aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acbuffer.h12
-rw-r--r--include/acpi/acconfig.h8
-rw-r--r--include/acpi/acexcep.h16
-rw-r--r--include/acpi/acnames.h4
-rw-r--r--include/acpi/acoutput.h6
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h357
-rw-r--r--include/acpi/acpi_drivers.h33
-rw-r--r--include/acpi/acpi_io.h2
-rw-r--r--include/acpi/acpi_numa.h25
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h141
-rw-r--r--include/acpi/acrestyp.h48
-rw-r--r--include/acpi/actbl.h5
-rw-r--r--include/acpi/actbl1.h337
-rw-r--r--include/acpi/actbl2.h1420
-rw-r--r--include/acpi/actbl3.h91
-rw-r--r--include/acpi/actypes.h50
-rw-r--r--include/acpi/acuuid.h18
-rw-r--r--include/acpi/apei.h9
-rw-r--r--include/acpi/battery.h6
-rw-r--r--include/acpi/cppc_acpi.h101
-rw-r--r--include/acpi/ghes.h46
-rw-r--r--include/acpi/pcc.h34
-rw-r--r--include/acpi/pdc_intel.h36
-rw-r--r--include/acpi/platform/acenv.h9
-rw-r--r--include/acpi/platform/acenvex.h4
-rw-r--r--include/acpi/platform/acgcc.h44
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/acintel.h55
-rw-r--r--include/acpi/platform/aclinux.h18
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/acpi/platform/aczephyr.h45
-rw-r--r--include/acpi/proc_cap_intel.h40
-rw-r--r--include/acpi/processor.h21
-rw-r--r--include/acpi/video.h31
-rw-r--r--include/asm-generic/Kbuild11
-rw-r--r--include/asm-generic/access_ok.h48
-rw-r--r--include/asm-generic/agp.h11
-rw-r--r--include/asm-generic/archrandom.h15
-rw-r--r--include/asm-generic/atomic-instrumented.h1789
-rw-r--r--include/asm-generic/atomic-long.h1014
-rw-r--r--include/asm-generic/atomic.h117
-rw-r--r--include/asm-generic/atomic64.h45
-rw-r--r--include/asm-generic/barrier.h76
-rw-r--r--include/asm-generic/bitops.h1
-rw-r--r--include/asm-generic/bitops/atomic.h38
-rw-r--r--include/asm-generic/bitops/builtin-ffs.h7
-rw-r--r--include/asm-generic/bitops/ffs.h2
-rw-r--r--include/asm-generic/bitops/find.h100
-rw-r--r--include/asm-generic/bitops/generic-non-atomic.h175
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h21
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h33
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h77
-rw-r--r--include/asm-generic/bitops/le.h34
-rw-r--r--include/asm-generic/bitops/lock.h45
-rw-r--r--include/asm-generic/bitops/non-atomic.h109
-rw-r--r--include/asm-generic/bitops/non-instrumented-non-atomic.h17
-rw-r--r--include/asm-generic/bitsperlong.h12
-rw-r--r--include/asm-generic/bug.h56
-rw-r--r--include/asm-generic/bugs.h11
-rw-r--r--include/asm-generic/cacheflush.h29
-rw-r--r--include/asm-generic/cfi.h5
-rw-r--r--include/asm-generic/checksum.h18
-rw-r--r--include/asm-generic/cmpxchg-local.h16
-rw-r--r--include/asm-generic/cmpxchg.h48
-rw-r--r--include/asm-generic/compat.h143
-rw-r--r--include/asm-generic/current.h2
-rw-r--r--include/asm-generic/div64.h24
-rw-r--r--include/asm-generic/dma-contiguous.h10
-rw-r--r--include/asm-generic/dma-mapping.h2
-rw-r--r--include/asm-generic/early_ioremap.h6
-rw-r--r--include/asm-generic/error-injection.h11
-rw-r--r--include/asm-generic/export.h89
-rw-r--r--include/asm-generic/fb.h126
-rw-r--r--include/asm-generic/futex.h31
-rw-r--r--include/asm-generic/getorder.h2
-rw-r--r--include/asm-generic/gpio.h172
-rw-r--r--include/asm-generic/hardirq.h6
-rw-r--r--include/asm-generic/hugetlb.h40
-rw-r--r--include/asm-generic/hyperv-tlfs.h406
-rw-r--r--include/asm-generic/ide_iops.h39
-rw-r--r--include/asm-generic/io.h176
-rw-r--r--include/asm-generic/iomap.h19
-rw-r--r--include/asm-generic/kmap_size.h12
-rw-r--r--include/asm-generic/kmap_types.h11
-rw-r--r--include/asm-generic/kprobes.h4
-rw-r--r--include/asm-generic/local.h1
-rw-r--r--include/asm-generic/local64.h12
-rw-r--r--include/asm-generic/logic_io.h78
-rw-r--r--include/asm-generic/memory_model.h45
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/asm-generic/mmu_context.h58
-rw-r--r--include/asm-generic/module.lds.h10
-rw-r--r--include/asm-generic/mshyperv.h172
-rw-r--r--include/asm-generic/msi.h8
-rw-r--r--include/asm-generic/nommu_context.h19
-rw-r--r--include/asm-generic/numa.h54
-rw-r--r--include/asm-generic/page.h18
-rw-r--r--include/asm-generic/pci.h39
-rw-r--r--include/asm-generic/pci_iomap.h7
-rw-r--r--include/asm-generic/percpu.h219
-rw-r--r--include/asm-generic/pgalloc.h115
-rw-r--r--include/asm-generic/pgtable-nop4d.h3
-rw-r--r--include/asm-generic/pgtable-nopmd.h4
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/preempt.h16
-rw-r--r--include/asm-generic/qrwlock.h63
-rw-r--r--include/asm-generic/qrwlock_types.h2
-rw-r--r--include/asm-generic/qspinlock.h35
-rw-r--r--include/asm-generic/sections.h113
-rw-r--r--include/asm-generic/signal.h2
-rw-r--r--include/asm-generic/softirq_stack.h14
-rw-r--r--include/asm-generic/spinlock.h96
-rw-r--r--include/asm-generic/spinlock_types.h17
-rw-r--r--include/asm-generic/syscall.h36
-rw-r--r--include/asm-generic/termios-base.h78
-rw-r--r--include/asm-generic/termios.h108
-rw-r--r--include/asm-generic/tlb.h186
-rw-r--r--include/asm-generic/topology.h2
-rw-r--r--include/asm-generic/uaccess.h192
-rw-r--r--include/asm-generic/unaligned.h167
-rw-r--r--include/asm-generic/vmlinux.lds.h501
-rw-r--r--include/asm-generic/word-at-a-time.h5
-rw-r--r--include/asm-generic/xor.h84
-rw-r--r--include/clocksource/arm_arch_timer.h5
-rw-r--r--include/clocksource/hyperv_timer.h45
-rw-r--r--include/clocksource/samsung_pwm.h3
-rw-r--r--include/clocksource/timer-goldfish.h31
-rw-r--r--include/clocksource/timer-riscv.h16
-rw-r--r--include/clocksource/timer-sp804.h29
-rw-r--r--include/clocksource/timer-ti-dm.h257
-rw-r--r--include/clocksource/timer-xilinx.h73
-rw-r--r--include/crypto/acompress.h141
-rw-r--r--include/crypto/aead.h67
-rw-r--r--include/crypto/akcipher.h147
-rw-r--r--include/crypto/algapi.h181
-rw-r--r--include/crypto/aria.h458
-rw-r--r--include/crypto/asym_tpm_subtype.h19
-rw-r--r--include/crypto/b128ops.h14
-rw-r--r--include/crypto/blake2b.h66
-rw-r--r--include/crypto/blake2s.h68
-rw-r--r--include/crypto/cbc.h141
-rw-r--r--include/crypto/chacha.h20
-rw-r--r--include/crypto/cryptd.h3
-rw-r--r--include/crypto/curve25519.h2
-rw-r--r--include/crypto/dh.h26
-rw-r--r--include/crypto/drbg.h11
-rw-r--r--include/crypto/ecc_curve.h60
-rw-r--r--include/crypto/ecdh.h3
-rw-r--r--include/crypto/engine.h121
-rw-r--r--include/crypto/gcm.h22
-rw-r--r--include/crypto/hash.h177
-rw-r--r--include/crypto/hash_info.h4
-rw-r--r--include/crypto/if_alg.h19
-rw-r--r--include/crypto/internal/acompress.h47
-rw-r--r--include/crypto/internal/aead.h49
-rw-r--r--include/crypto/internal/akcipher.h28
-rw-r--r--include/crypto/internal/blake2b.h115
-rw-r--r--include/crypto/internal/blake2s.h27
-rw-r--r--include/crypto/internal/cipher.h220
-rw-r--r--include/crypto/internal/ecc.h281
-rw-r--r--include/crypto/internal/engine.h74
-rw-r--r--include/crypto/internal/hash.h68
-rw-r--r--include/crypto/internal/kdf_selftest.h71
-rw-r--r--include/crypto/internal/kpp.h190
-rw-r--r--include/crypto/internal/poly1305.h3
-rw-r--r--include/crypto/internal/scompress.h18
-rw-r--r--include/crypto/internal/sig.h17
-rw-r--r--include/crypto/internal/skcipher.h152
-rw-r--r--include/crypto/kdf_sp800108.h61
-rw-r--r--include/crypto/kpp.h86
-rw-r--r--include/crypto/pcrypt.h2
-rw-r--r--include/crypto/poly1305.h6
-rw-r--r--include/crypto/polyval.h22
-rw-r--r--include/crypto/public_key.h47
-rw-r--r--include/crypto/rng.h67
-rw-r--r--include/crypto/scatterwalk.h21
-rw-r--r--include/crypto/sha1.h46
-rw-r--r--include/crypto/sha1_base.h5
-rw-r--r--include/crypto/sha2.h (renamed from include/crypto/sha.h)43
-rw-r--r--include/crypto/sha256_base.h55
-rw-r--r--include/crypto/sha512_base.h5
-rw-r--r--include/crypto/sig.h140
-rw-r--r--include/crypto/skcipher.h417
-rw-r--r--include/crypto/sm2.h28
-rw-r--r--include/crypto/sm3.h34
-rw-r--r--include/crypto/sm3_base.h3
-rw-r--r--include/crypto/sm4.h29
-rw-r--r--include/crypto/utils.h73
-rw-r--r--include/crypto/xts.h25
-rw-r--r--include/drm/amd_asic_type.h22
-rw-r--r--include/drm/bridge/aux-bridge.h52
-rw-r--r--include/drm/bridge/dw_hdmi.h17
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h21
-rw-r--r--include/drm/bridge/samsung-dsim.h125
-rw-r--r--include/drm/display/drm_dp.h (renamed from include/drm/drm_dp_helper.h)1043
-rw-r--r--include/drm/display/drm_dp_aux_bus.h85
-rw-r--r--include/drm/display/drm_dp_dual_mode_helper.h (renamed from include/drm/drm_dp_dual_mode_helper.h)14
-rw-r--r--include/drm/display/drm_dp_helper.h826
-rw-r--r--include/drm/display/drm_dp_mst_helper.h (renamed from include/drm/drm_dp_mst_helper.h)327
-rw-r--r--include/drm/display/drm_dp_tunnel.h248
-rw-r--r--include/drm/display/drm_dsc.h (renamed from include/drm/drm_dsc.h)12
-rw-r--r--include/drm/display/drm_dsc_helper.h33
-rw-r--r--include/drm/display/drm_hdcp.h (renamed from include/drm/drm_hdcp.h)27
-rw-r--r--include/drm/display/drm_hdcp_helper.h22
-rw-r--r--include/drm/display/drm_hdmi_helper.h27
-rw-r--r--include/drm/display/drm_scdc.h (renamed from include/drm/drm_scdc_helper.h)52
-rw-r--r--include/drm/display/drm_scdc_helper.h80
-rw-r--r--include/drm/drm_accel.h103
-rw-r--r--include/drm/drm_agpsupport.h135
-rw-r--r--include/drm/drm_aperture.h38
-rw-r--r--include/drm/drm_atomic.h195
-rw-r--r--include/drm/drm_atomic_helper.h51
-rw-r--r--include/drm/drm_atomic_state_helper.h4
-rw-r--r--include/drm/drm_atomic_uapi.h2
-rw-r--r--include/drm/drm_audio_component.h7
-rw-r--r--include/drm/drm_auth.h90
-rw-r--r--include/drm/drm_bridge.h157
-rw-r--r--include/drm/drm_bridge_connector.h2
-rw-r--r--include/drm/drm_buddy.h159
-rw-r--r--include/drm/drm_cache.h15
-rw-r--r--include/drm/drm_client.h23
-rw-r--r--include/drm/drm_color_mgmt.h20
-rw-r--r--include/drm/drm_connector.h508
-rw-r--r--include/drm/drm_crtc.h102
-rw-r--r--include/drm/drm_crtc_helper.h18
-rw-r--r--include/drm/drm_damage_helper.h18
-rw-r--r--include/drm/drm_debugfs.h91
-rw-r--r--include/drm/drm_device.h121
-rw-r--r--include/drm/drm_displayid.h143
-rw-r--r--include/drm/drm_drv.h258
-rw-r--r--include/drm/drm_edid.h339
-rw-r--r--include/drm/drm_eld.h164
-rw-r--r--include/drm/drm_encoder.h73
-rw-r--r--include/drm/drm_encoder_slave.h2
-rw-r--r--include/drm/drm_exec.h150
-rw-r--r--include/drm/drm_fb_cma_helper.h18
-rw-r--r--include/drm/drm_fb_dma_helper.h23
-rw-r--r--include/drm/drm_fb_helper.h216
-rw-r--r--include/drm/drm_fbdev_dma.h15
-rw-r--r--include/drm/drm_fbdev_generic.h15
-rw-r--r--include/drm/drm_file.h128
-rw-r--r--include/drm/drm_fixed.h9
-rw-r--r--include/drm/drm_flip_work.h20
-rw-r--r--include/drm/drm_format_helper.h130
-rw-r--r--include/drm/drm_fourcc.h29
-rw-r--r--include/drm/drm_framebuffer.h33
-rw-r--r--include/drm/drm_gem.h255
-rw-r--r--include/drm/drm_gem_atomic_helper.h154
-rw-r--r--include/drm/drm_gem_cma_helper.h195
-rw-r--r--include/drm/drm_gem_dma_helper.h273
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h16
-rw-r--r--include/drm/drm_gem_shmem_helper.h222
-rw-r--r--include/drm/drm_gem_ttm_helper.h16
-rw-r--r--include/drm/drm_gem_vram_helper.h84
-rw-r--r--include/drm/drm_gpuvm.h1247
-rw-r--r--include/drm/drm_hashtab.h79
-rw-r--r--include/drm/drm_ioctl.h13
-rw-r--r--include/drm/drm_irq.h32
-rw-r--r--include/drm/drm_kunit_helpers.h123
-rw-r--r--include/drm/drm_legacy.h235
-rw-r--r--include/drm/drm_managed.h23
-rw-r--r--include/drm/drm_mipi_dbi.h68
-rw-r--r--include/drm/drm_mipi_dsi.h72
-rw-r--r--include/drm/drm_mm.h6
-rw-r--r--include/drm/drm_mode_config.h80
-rw-r--r--include/drm/drm_mode_object.h9
-rw-r--r--include/drm/drm_modes.h103
-rw-r--r--include/drm/drm_modeset_helper_vtables.h256
-rw-r--r--include/drm/drm_modeset_lock.h9
-rw-r--r--include/drm/drm_module.h125
-rw-r--r--include/drm/drm_of.h41
-rw-r--r--include/drm/drm_panel.h146
-rw-r--r--include/drm/drm_pciids.h112
-rw-r--r--include/drm/drm_plane.h223
-rw-r--r--include/drm/drm_plane_helper.h39
-rw-r--r--include/drm/drm_prime.h17
-rw-r--r--include/drm/drm_print.h328
-rw-r--r--include/drm/drm_privacy_screen_consumer.h65
-rw-r--r--include/drm/drm_privacy_screen_driver.h95
-rw-r--r--include/drm/drm_privacy_screen_machine.h46
-rw-r--r--include/drm/drm_probe_helper.h15
-rw-r--r--include/drm/drm_property.h17
-rw-r--r--include/drm/drm_rect.h36
-rw-r--r--include/drm/drm_simple_kms_helper.h105
-rw-r--r--include/drm/drm_suballoc.h108
-rw-r--r--include/drm/drm_syncobj.h6
-rw-r--r--include/drm/drm_sysfs.h5
-rw-r--r--include/drm/drm_vblank.h2
-rw-r--r--include/drm/drm_vma_manager.h3
-rw-r--r--include/drm/drm_writeback.h11
-rw-r--r--include/drm/gpu_scheduler.h440
-rw-r--r--include/drm/gud.h335
-rw-r--r--include/drm/i915_component.h2
-rw-r--r--include/drm/i915_drm.h5
-rw-r--r--include/drm/i915_gsc_proxy_mei_interface.h53
-rw-r--r--include/drm/i915_hdcp_interface.h539
-rw-r--r--include/drm/i915_mei_hdcp_interface.h184
-rw-r--r--include/drm/i915_pciids.h298
-rw-r--r--include/drm/i915_pxp_tee_interface.h49
-rw-r--r--include/drm/intel-gtt.h35
-rw-r--r--include/drm/task_barrier.h4
-rw-r--r--include/drm/ttm/ttm_bo.h427
-rw-r--r--include/drm/ttm/ttm_bo_api.h736
-rw-r--r--include/drm/ttm/ttm_bo_driver.h859
-rw-r--r--include/drm/ttm/ttm_caching.h55
-rw-r--r--include/drm/ttm/ttm_device.h297
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h23
-rw-r--r--include/drm/ttm/ttm_kmap_iter.h61
-rw-r--r--include/drm/ttm/ttm_memory.h97
-rw-r--r--include/drm/ttm/ttm_module.h40
-rw-r--r--include/drm/ttm/ttm_page_alloc.h122
-rw-r--r--include/drm/ttm/ttm_placement.h49
-rw-r--r--include/drm/ttm/ttm_pool.h94
-rw-r--r--include/drm/ttm/ttm_range_manager.h56
-rw-r--r--include/drm/ttm/ttm_resource.h424
-rw-r--r--include/drm/ttm/ttm_set_memory.h150
-rw-r--r--include/drm/ttm/ttm_tt.h253
-rw-r--r--include/drm/xe_pciids.h190
-rw-r--r--include/dt-bindings/arm/qcom,ids.h287
-rw-r--r--include/dt-bindings/ata/ahci.h20
-rw-r--r--include/dt-bindings/bus/moxtet.h2
-rw-r--r--include/dt-bindings/clock/actions,s500-cmu.h6
-rw-r--r--include/dt-bindings/clock/alphascale,asm9260.h2
-rw-r--r--include/dt-bindings/clock/am3.h93
-rw-r--r--include/dt-bindings/clock/am4.h99
-rw-r--r--include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h168
-rw-r--r--include/dt-bindings/clock/amlogic,a1-pll-clkc.h25
-rw-r--r--include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h236
-rw-r--r--include/dt-bindings/clock/amlogic,s4-pll-clkc.h43
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h1
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h19
-rw-r--r--include/dt-bindings/clock/at91.h13
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h65
-rw-r--r--include/dt-bindings/clock/axg-clkc.h74
-rw-r--r--include/dt-bindings/clock/axis,artpec6-clkctrl.h2
-rw-r--r--include/dt-bindings/clock/bcm21664.h10
-rw-r--r--include/dt-bindings/clock/bcm281xx.h10
-rw-r--r--include/dt-bindings/clock/bcm63268-clock.h13
-rw-r--r--include/dt-bindings/clock/boston-clock.h3
-rw-r--r--include/dt-bindings/clock/cirrus,cs2000-cp.h14
-rw-r--r--include/dt-bindings/clock/dra7.h179
-rw-r--r--include/dt-bindings/clock/efm32-cmu.h43
-rw-r--r--include/dt-bindings/clock/en7523-clk.h17
-rw-r--r--include/dt-bindings/clock/exynos3250.h18
-rw-r--r--include/dt-bindings/clock/exynos4.h7
-rw-r--r--include/dt-bindings/clock/exynos5250.h7
-rw-r--r--include/dt-bindings/clock/exynos5260-clk.h25
-rw-r--r--include/dt-bindings/clock/exynos5410.h2
-rw-r--r--include/dt-bindings/clock/exynos5420.h9
-rw-r--r--include/dt-bindings/clock/exynos5433.h42
-rw-r--r--include/dt-bindings/clock/exynos7885.h147
-rw-r--r--include/dt-bindings/clock/exynos850.h393
-rw-r--r--include/dt-bindings/clock/fsd-clk.h150
-rw-r--r--include/dt-bindings/clock/fsl,qoriq-clockgen.h15
-rw-r--r--include/dt-bindings/clock/g12a-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h140
-rw-r--r--include/dt-bindings/clock/google,gs101.h521
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h65
-rw-r--r--include/dt-bindings/clock/hi3559av100-clock.h165
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h2
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h7
-rw-r--r--include/dt-bindings/clock/imx8-clock.h154
-rw-r--r--include/dt-bindings/clock/imx8-lpcg.h14
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h9
-rw-r--r--include/dt-bindings/clock/imx8mn-clock.h49
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h30
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h35
-rw-r--r--include/dt-bindings/clock/imx8ulp-clock.h258
-rw-r--r--include/dt-bindings/clock/imx93-clock.h209
-rw-r--r--include/dt-bindings/clock/imxrt1050-clock.h72
-rw-r--r--include/dt-bindings/clock/ingenic,jz4725b-cgu.h (renamed from include/dt-bindings/clock/jz4725b-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4740-cgu.h (renamed from include/dt-bindings/clock/jz4740-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4755-cgu.h49
-rw-r--r--include/dt-bindings/clock/ingenic,jz4760-cgu.h56
-rw-r--r--include/dt-bindings/clock/ingenic,jz4770-cgu.h (renamed from include/dt-bindings/clock/jz4770-cgu.h)1
-rw-r--r--include/dt-bindings/clock/ingenic,jz4780-cgu.h (renamed from include/dt-bindings/clock/jz4780-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,sysost.h29
-rw-r--r--include/dt-bindings/clock/ingenic,x1000-cgu.h (renamed from include/dt-bindings/clock/x1000-cgu.h)4
-rw-r--r--include/dt-bindings/clock/ingenic,x1830-cgu.h (renamed from include/dt-bindings/clock/x1830-cgu.h)0
-rw-r--r--include/dt-bindings/clock/intel,agilex5-clkmgr.h100
-rw-r--r--include/dt-bindings/clock/k210-clk.h55
-rw-r--r--include/dt-bindings/clock/lochnagar.h (renamed from include/dt-bindings/clk/lochnagar.h)0
-rw-r--r--include/dt-bindings/clock/loongson,ls1x-clk.h19
-rw-r--r--include/dt-bindings/clock/loongson,ls2k-clk.h30
-rw-r--r--include/dt-bindings/clock/marvell,mmp2-audio.h1
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h5
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h11
-rw-r--r--include/dt-bindings/clock/marvell,pxa1928.h3
-rw-r--r--include/dt-bindings/clock/marvell,pxa910.h5
-rw-r--r--include/dt-bindings/clock/mediatek,mt6795-clk.h275
-rw-r--r--include/dt-bindings/clock/mediatek,mt7981-clk.h215
-rw-r--r--include/dt-bindings/clock/mediatek,mt7988-clk.h280
-rw-r--r--include/dt-bindings/clock/mediatek,mt8188-clk.h726
-rw-r--r--include/dt-bindings/clock/mediatek,mt8365-clk.h373
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h109
-rw-r--r--include/dt-bindings/clock/microchip,lan966x.h34
-rw-r--r--include/dt-bindings/clock/microchip,mpfs-clock.h76
-rw-r--r--include/dt-bindings/clock/mobileye,eyeq5-clk.h22
-rw-r--r--include/dt-bindings/clock/mstar-msc313-mpll.h19
-rw-r--r--include/dt-bindings/clock/mt7621-clk.h41
-rw-r--r--include/dt-bindings/clock/mt7986-clk.h169
-rw-r--r--include/dt-bindings/clock/mt8167-clk.h131
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h1
-rw-r--r--include/dt-bindings/clock/mt8186-clk.h445
-rw-r--r--include/dt-bindings/clock/mt8192-clk.h585
-rw-r--r--include/dt-bindings/clock/mt8195-clk.h866
-rw-r--r--include/dt-bindings/clock/nuvoton,ma35d1-clk.h253
-rw-r--r--include/dt-bindings/clock/nuvoton,npcm7xx-clock.h2
-rw-r--r--include/dt-bindings/clock/nuvoton,npcm845-clk.h49
-rw-r--r--include/dt-bindings/clock/omap5.h2
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sc7180.h121
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sc7280.h127
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sm8250.h138
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-qcm2290.h38
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sc7280.h55
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sc8280xp.h100
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm6125.h41
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm6350.h48
l---------include/dt-bindings/clock/qcom,dispcc-sm8150.h1
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm8250.h76
l---------include/dt-bindings/clock/qcom,dispcc-sm8350.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-apq8084.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq4019.h6
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq5018.h183
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq806x.h5
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq8074.h18
-rw-r--r--include/dt-bindings/clock/qcom,gcc-mdm9607.h104
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8909.h218
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8917.h191
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8939.h7
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8953.h238
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8976.h241
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8994.h49
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h9
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcm2290.h188
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcs404.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7280.h226
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8180x.h311
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8280xp.h508
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdx55.h117
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdx65.h122
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm6115.h201
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm6125.h240
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm6350.h178
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8150.h8
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8350.h265
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8450.h243
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc7280.h35
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc8280xp.h35
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sdm660.h28
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm6350.h37
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm8350.h52
-rw-r--r--include/dt-bindings/clock/qcom,ipq5332-gcc.h356
-rw-r--r--include/dt-bindings/clock/qcom,ipq9574-gcc.h219
-rw-r--r--include/dt-bindings/clock/qcom,lcc-ipq806x.h2
-rw-r--r--include/dt-bindings/clock/qcom,lcc-mdm9615.h44
-rw-r--r--include/dt-bindings/clock/qcom,lpass-sc7280.h16
-rw-r--r--include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h48
-rw-r--r--include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h28
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8974.h1
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8994.h155
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-sdm660.h162
-rw-r--r--include/dt-bindings/clock/qcom,qdu1000-ecpricc.h147
-rw-r--r--include/dt-bindings/clock/qcom,qdu1000-gcc.h177
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h21
-rw-r--r--include/dt-bindings/clock/qcom,rpmh.h12
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-gcc.h320
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-gpucc.h50
-rw-r--r--include/dt-bindings/clock/qcom,sc8280xp-camcc.h179
-rw-r--r--include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h17
-rw-r--r--include/dt-bindings/clock/qcom,sdx75-gcc.h193
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-gcc.h197
-rw-r--r--include/dt-bindings/clock/qcom,sm6115-dispcc.h36
-rw-r--r--include/dt-bindings/clock/qcom,sm6115-gpucc.h36
-rw-r--r--include/dt-bindings/clock/qcom,sm6125-gpucc.h31
-rw-r--r--include/dt-bindings/clock/qcom,sm6350-camcc.h109
-rw-r--r--include/dt-bindings/clock/qcom,sm6375-dispcc.h42
-rw-r--r--include/dt-bindings/clock/qcom,sm6375-gcc.h234
-rw-r--r--include/dt-bindings/clock/qcom,sm6375-gpucc.h36
-rw-r--r--include/dt-bindings/clock/qcom,sm7150-gcc.h186
-rw-r--r--include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h11
-rw-r--r--include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h13
-rw-r--r--include/dt-bindings/clock/qcom,sm8350-videocc.h35
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-camcc.h159
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-dispcc.h103
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-gpucc.h48
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-videocc.h38
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-camcc.h187
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-dispcc.h101
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-gcc.h231
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-gpucc.h48
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-tcsr.h18
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-dispcc.h102
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-gcc.h254
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-gpucc.h43
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-tcsr.h18
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sc7280.h27
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sm8150.h29
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sm8250.h36
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-camcc.h135
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-dispcc.h98
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h485
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gpucc.h41
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-tcsr.h23
-rw-r--r--include/dt-bindings/clock/r8a7779-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a779a0-cpg-mssr.h55
-rw-r--r--include/dt-bindings/clock/r8a779f0-cpg-mssr.h64
-rw-r--r--include/dt-bindings/clock/r8a779g0-cpg-mssr.h91
-rw-r--r--include/dt-bindings/clock/r9a06g032-sysctrl.h1
-rw-r--r--include/dt-bindings/clock/r9a07g043-cpg.h204
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h220
-rw-r--r--include/dt-bindings/clock/r9a07g054-cpg.h229
-rw-r--r--include/dt-bindings/clock/r9a08g045-cpg.h242
-rw-r--r--include/dt-bindings/clock/r9a09g011-cpg.h352
-rw-r--r--include/dt-bindings/clock/renesas,r8a779h0-cpg-mssr.h96
-rw-r--r--include/dt-bindings/clock/rk3036-cru.h1
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h3
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h6
-rw-r--r--include/dt-bindings/clock/rk3568-cru.h926
-rw-r--r--include/dt-bindings/clock/rockchip,rk3588-cru.h765
-rw-r--r--include/dt-bindings/clock/rockchip,rv1126-cru.h632
-rw-r--r--include/dt-bindings/clock/s3c2410.h59
-rw-r--r--include/dt-bindings/clock/s3c2412.h70
-rw-r--r--include/dt-bindings/clock/s3c2443.h91
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h349
-rw-r--r--include/dt-bindings/clock/sifive-fu540-prci.h8
-rw-r--r--include/dt-bindings/clock/sifive-fu740-prci.h24
-rw-r--r--include/dt-bindings/clock/sophgo,cv1800.h176
-rw-r--r--include/dt-bindings/clock/sprd,ums512-clk.h397
-rw-r--r--include/dt-bindings/clock/st,stm32mp25-rcc.h492
-rw-r--r--include/dt-bindings/clock/starfive,jh7110-crg.h301
-rw-r--r--include/dt-bindings/clock/starfive-jh7100-audio.h41
-rw-r--r--include/dt-bindings/clock/starfive-jh7100.h202
-rw-r--r--include/dt-bindings/clock/ste-db8500-clkout.h17
-rw-r--r--include/dt-bindings/clock/stih416-clks.h17
-rw-r--r--include/dt-bindings/clock/stm32fx-clock.h4
-rw-r--r--include/dt-bindings/clock/stm32mp1-clks.h25
-rw-r--r--include/dt-bindings/clock/stm32mp13-clks.h229
-rw-r--r--include/dt-bindings/clock/stratix10-clock.h2
-rw-r--r--include/dt-bindings/clock/sun20i-d1-ccu.h158
-rw-r--r--include/dt-bindings/clock/sun20i-d1-r-ccu.h19
-rw-r--r--include/dt-bindings/clock/sun50i-a100-ccu.h116
-rw-r--r--include/dt-bindings/clock/sun50i-a100-r-ccu.h23
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-h6-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-h6-r-ccu.h3
-rw-r--r--include/dt-bindings/clock/sun50i-h616-ccu.h116
-rw-r--r--include/dt-bindings/clock/sun6i-rtc.h10
-rw-r--r--include/dt-bindings/clock/sun8i-h3-ccu.h2
-rw-r--r--include/dt-bindings/clock/suniv-ccu-f1c100s.h2
-rw-r--r--include/dt-bindings/clock/sunplus,sp7021-clkc.h88
-rw-r--r--include/dt-bindings/clock/tegra210-car.h2
-rw-r--r--include/dt-bindings/clock/tegra234-clock.h903
-rw-r--r--include/dt-bindings/clock/ti-dra7-atl.h10
-rw-r--r--include/dt-bindings/clock/toshiba,tmpv770x.h181
-rw-r--r--include/dt-bindings/clock/versaclock.h (renamed from include/dt-bindings/clk/versaclock.h)0
-rw-r--r--include/dt-bindings/clock/vf610-clock.h3
-rw-r--r--include/dt-bindings/clock/xlnx-vcu.h15
-rw-r--r--include/dt-bindings/clock/zx296702-clock.h180
-rw-r--r--include/dt-bindings/clock/zx296718-clock.h164
-rw-r--r--include/dt-bindings/display/sdtv-standards.h2
-rw-r--r--include/dt-bindings/dma/fsl-edma.h21
-rw-r--r--include/dt-bindings/dma/jz4775-dma.h44
-rw-r--r--include/dt-bindings/dma/qcom-gpi.h11
-rw-r--r--include/dt-bindings/dma/x2000-dma.h54
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h310
-rw-r--r--include/dt-bindings/firmware/qcom,scm.h39
-rw-r--r--include/dt-bindings/gce/mediatek,mt6795-gce.h123
-rw-r--r--include/dt-bindings/gce/mt8186-gce.h421
-rw-r--r--include/dt-bindings/gce/mt8192-gce.h335
-rw-r--r--include/dt-bindings/gce/mt8195-gce.h812
-rw-r--r--include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h179
-rw-r--r--include/dt-bindings/gpio/amlogic-c3-gpio.h72
-rw-r--r--include/dt-bindings/gpio/gpio.h5
-rw-r--r--include/dt-bindings/gpio/meson-g12a-gpio.h2
-rw-r--r--include/dt-bindings/gpio/meson-s4-gpio.h99
-rw-r--r--include/dt-bindings/gpio/msc313-gpio.h124
-rw-r--r--include/dt-bindings/gpio/tegra186-gpio.h4
-rw-r--r--include/dt-bindings/gpio/tegra234-gpio.h59
-rw-r--r--include/dt-bindings/gpio/tegra241-gpio.h42
-rw-r--r--include/dt-bindings/iio/adc/at91-sama5d2_adc.h3
-rw-r--r--include/dt-bindings/iio/adc/ingenic,adc.h1
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6370_adc.h18
-rw-r--r--include/dt-bindings/iio/addac/adi,ad74413r.h21
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h69
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h88
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h124
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h50
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h22
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h22
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h19
-rw-r--r--include/dt-bindings/iio/qcom,spmi-vadc.h3
-rw-r--r--include/dt-bindings/input/atmel-maxtouch.h10
-rw-r--r--include/dt-bindings/input/cros-ec-keyboard.h103
-rw-r--r--include/dt-bindings/interconnect/fsl,imx8mp.h59
-rw-r--r--include/dt-bindings/interconnect/qcom,icc.h26
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8909.h93
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8939.h105
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8996-cbf.h12
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8996.h163
-rw-r--r--include/dt-bindings/interconnect/qcom,osm-l3.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,qcm2290.h94
-rw-r--r--include/dt-bindings/interconnect/qcom,qdu1000-rpmh.h98
-rw-r--r--include/dt-bindings/interconnect/qcom,rpm-icc.h13
-rw-r--r--include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h231
-rw-r--r--include/dt-bindings/interconnect/qcom,sc7180.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,sc7280.h165
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8180x.h189
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8280xp.h232
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm660.h116
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm670-rpmh.h136
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm845.h2
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx55.h74
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx65.h67
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx75.h102
-rw-r--r--include/dt-bindings/interconnect/qcom,sm6115.h111
-rw-r--r--include/dt-bindings/interconnect/qcom,sm6350.h148
-rw-r--r--include/dt-bindings/interconnect/qcom,sm7150-rpmh.h150
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8150.h159
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8250.h176
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8350.h172
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8450.h171
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8550-rpmh.h189
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8650-rpmh.h154
-rw-r--r--include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h183
-rw-r--r--include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h126
-rw-r--r--include/dt-bindings/interrupt-controller/apple-aic.h17
-rw-r--r--include/dt-bindings/interrupt-controller/irqc-rzg2l.h25
-rw-r--r--include/dt-bindings/leds/common.h18
-rw-r--r--include/dt-bindings/leds/leds-lp55xx.h10
-rw-r--r--include/dt-bindings/leds/rt4831-backlight.h23
-rw-r--r--include/dt-bindings/mailbox/mediatek,mt8188-gce.h967
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h4
-rw-r--r--include/dt-bindings/mailbox/tegra186-hsp.h5
-rw-r--r--include/dt-bindings/media/video-interfaces.h16
-rw-r--r--include/dt-bindings/memory/mediatek,mt8188-memory-port.h489
-rw-r--r--include/dt-bindings/memory/mediatek,mt8365-larb-port.h90
-rw-r--r--include/dt-bindings/memory/mt2701-larb-port.h4
-rw-r--r--include/dt-bindings/memory/mt2712-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt6779-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt6795-larb-port.h95
-rw-r--r--include/dt-bindings/memory/mt8167-larb-port.h51
-rw-r--r--include/dt-bindings/memory/mt8173-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt8183-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt8186-memory-port.h217
-rw-r--r--include/dt-bindings/memory/mt8192-larb-port.h243
-rw-r--r--include/dt-bindings/memory/mt8195-memory-port.h408
-rw-r--r--include/dt-bindings/memory/mtk-memory-port.h17
-rw-r--r--include/dt-bindings/memory/tegra124-mc.h68
-rw-r--r--include/dt-bindings/memory/tegra20-mc.h53
-rw-r--r--include/dt-bindings/memory/tegra210-mc.h10
-rw-r--r--include/dt-bindings/memory/tegra234-mc.h544
-rw-r--r--include/dt-bindings/memory/tegra30-mc.h67
-rw-r--r--include/dt-bindings/mfd/cros_ec.h18
-rw-r--r--include/dt-bindings/mfd/qcom-pm8008.h19
-rw-r--r--include/dt-bindings/mfd/stm32f4-rcc.h1
-rw-r--r--include/dt-bindings/mfd/stm32f7-rcc.h2
-rw-r--r--include/dt-bindings/mux/mux-j721e-wiz.h53
-rw-r--r--include/dt-bindings/mux/ti-serdes.h190
-rw-r--r--include/dt-bindings/net/pcs-rzn1-miic.h33
-rw-r--r--include/dt-bindings/nvmem/microchip,sama7g5-otpc.h12
-rw-r--r--include/dt-bindings/phy/phy-cadence.h23
-rw-r--r--include/dt-bindings/phy/phy-imx8-pcie.h14
-rw-r--r--include/dt-bindings/phy/phy-lan966x-serdes.h14
-rw-r--r--include/dt-bindings/phy/phy-qcom-qmp.h20
-rw-r--r--include/dt-bindings/phy/phy-ti.h21
-rw-r--r--include/dt-bindings/phy/phy.h4
-rw-r--r--include/dt-bindings/pinctrl/apple.h13
-rw-r--r--include/dt-bindings/pinctrl/hisi.h12
-rw-r--r--include/dt-bindings/pinctrl/k210-fpioa.h276
-rw-r--r--include/dt-bindings/pinctrl/k3.h38
-rw-r--r--include/dt-bindings/pinctrl/keystone.h10
-rw-r--r--include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h1280
-rw-r--r--include/dt-bindings/pinctrl/mt65xx.h9
-rw-r--r--include/dt-bindings/pinctrl/mt6795-pinfunc.h908
-rw-r--r--include/dt-bindings/pinctrl/mt8135-pinfunc.h1294
-rw-r--r--include/dt-bindings/pinctrl/mt8183-pinfunc.h1120
-rw-r--r--include/dt-bindings/pinctrl/mt8186-pinfunc.h1174
-rw-r--r--include/dt-bindings/pinctrl/mt8192-pinfunc.h1344
-rw-r--r--include/dt-bindings/pinctrl/mt8195-pinfunc.h962
-rw-r--r--include/dt-bindings/pinctrl/mt8365-pinfunc.h858
-rw-r--r--include/dt-bindings/pinctrl/omap.h2
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h275
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-zynq.h17
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-zynqmp.h19
-rw-r--r--include/dt-bindings/pinctrl/r7s9210-pinctrl.h2
-rw-r--r--include/dt-bindings/pinctrl/rzg2l-pinctrl.h23
-rw-r--r--include/dt-bindings/pinctrl/rzv2m-pinctrl.h23
-rw-r--r--include/dt-bindings/pinctrl/samsung.h20
-rw-r--r--include/dt-bindings/pinctrl/sppctl-sp7021.h179
-rw-r--r--include/dt-bindings/pinctrl/sppctl.h31
-rw-r--r--include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h137
-rw-r--r--include/dt-bindings/pinctrl/stm32-pinfunc.h3
-rw-r--r--include/dt-bindings/power/allwinner,sun20i-d1-ppu.h10
-rw-r--r--include/dt-bindings/power/amlogic,c3-pwrc.h25
-rw-r--r--include/dt-bindings/power/amlogic,t7-pwrc.h63
-rw-r--r--include/dt-bindings/power/fsl,imx93-power.h15
-rw-r--r--include/dt-bindings/power/imx8mm-power.h31
-rw-r--r--include/dt-bindings/power/imx8mn-power.h20
-rw-r--r--include/dt-bindings/power/imx8mp-power.h59
-rw-r--r--include/dt-bindings/power/imx8mq-power.h3
-rw-r--r--include/dt-bindings/power/imx8ulp-power.h26
-rw-r--r--include/dt-bindings/power/mediatek,mt8188-power.h44
-rw-r--r--include/dt-bindings/power/mediatek,mt8365-power.h19
-rw-r--r--include/dt-bindings/power/meson-a1-power.h2
-rw-r--r--include/dt-bindings/power/meson-axg-power.h14
-rw-r--r--include/dt-bindings/power/meson-g12a-power.h4
-rw-r--r--include/dt-bindings/power/meson-gxbb-power.h2
-rw-r--r--include/dt-bindings/power/meson-s4-power.h19
-rw-r--r--include/dt-bindings/power/meson-sm1-power.h2
-rw-r--r--include/dt-bindings/power/meson8-power.h2
-rw-r--r--include/dt-bindings/power/mt6795-power.h16
-rw-r--r--include/dt-bindings/power/mt6797-power.h9
-rw-r--r--include/dt-bindings/power/mt8167-power.h17
-rw-r--r--include/dt-bindings/power/mt8183-power.h26
-rw-r--r--include/dt-bindings/power/mt8186-power.h32
-rw-r--r--include/dt-bindings/power/mt8192-power.h32
-rw-r--r--include/dt-bindings/power/mt8195-power.h46
-rw-r--r--include/dt-bindings/power/qcom,rpmhpd.h32
-rw-r--r--include/dt-bindings/power/qcom-aoss-qmp.h14
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h324
-rw-r--r--include/dt-bindings/power/r8a7795-sysc.h1
-rw-r--r--include/dt-bindings/power/r8a779a0-sysc.h59
-rw-r--r--include/dt-bindings/power/r8a779f0-sysc.h30
-rw-r--r--include/dt-bindings/power/r8a779g0-sysc.h46
-rw-r--r--include/dt-bindings/power/renesas,r8a779h0-sysc.h49
-rw-r--r--include/dt-bindings/power/rk3568-power.h32
-rw-r--r--include/dt-bindings/power/rk3588-power.h69
-rw-r--r--include/dt-bindings/power/rockchip,rv1126-power.h35
-rw-r--r--include/dt-bindings/power/starfive,jh7110-pmu.h21
-rw-r--r--include/dt-bindings/power/summit,smb347-charger.h23
-rw-r--r--include/dt-bindings/power/tegra234-powergate.h39
-rw-r--r--include/dt-bindings/power/xlnx-zynqmp-power.h6
-rw-r--r--include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h13
-rw-r--r--include/dt-bindings/regulator/dlg,da9121-regulator.h22
-rw-r--r--include/dt-bindings/regulator/mediatek,mt6360-regulator.h16
-rw-r--r--include/dt-bindings/regulator/richtek,rt5190a-regulator.h15
-rw-r--r--include/dt-bindings/regulator/st,stm32mp13-regulator.h42
-rw-r--r--include/dt-bindings/regulator/ti,tps62864.h9
-rw-r--r--include/dt-bindings/reset/altr,rst-mgr-s10.h5
-rw-r--r--include/dt-bindings/reset/amlogic,c3-reset.h119
-rw-r--r--include/dt-bindings/reset/amlogic,meson-g12a-reset.h4
-rw-r--r--include/dt-bindings/reset/amlogic,meson-s4-reset.h125
-rw-r--r--include/dt-bindings/reset/bcm6318-reset.h20
-rw-r--r--include/dt-bindings/reset/bcm63268-reset.h30
-rw-r--r--include/dt-bindings/reset/bcm6328-reset.h18
-rw-r--r--include/dt-bindings/reset/bcm6358-reset.h15
-rw-r--r--include/dt-bindings/reset/bcm6362-reset.h22
-rw-r--r--include/dt-bindings/reset/bcm6368-reset.h16
-rw-r--r--include/dt-bindings/reset/bt1-ccu.h9
-rw-r--r--include/dt-bindings/reset/delta,tn48m-reset.h20
-rw-r--r--include/dt-bindings/reset/imx8mq-reset.h5
-rw-r--r--include/dt-bindings/reset/imx8ulp-pcc-reset.h59
-rw-r--r--include/dt-bindings/reset/k210-rst.h42
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-wdt.h17
-rw-r--r--include/dt-bindings/reset/mediatek,mt6795-resets.h53
-rw-r--r--include/dt-bindings/reset/mediatek,mt7988-resets.h19
-rw-r--r--include/dt-bindings/reset/mt2712-resets.h (renamed from include/dt-bindings/reset-controller/mt2712-resets.h)0
-rw-r--r--include/dt-bindings/reset/mt7621-reset.h37
-rw-r--r--include/dt-bindings/reset/mt7986-resets.h55
-rw-r--r--include/dt-bindings/reset/mt8173-resets.h2
-rw-r--r--include/dt-bindings/reset/mt8183-resets.h (renamed from include/dt-bindings/reset-controller/mt8183-resets.h)3
-rw-r--r--include/dt-bindings/reset/mt8186-resets.h41
-rw-r--r--include/dt-bindings/reset/mt8188-resets.h116
-rw-r--r--include/dt-bindings/reset/mt8192-resets.h41
-rw-r--r--include/dt-bindings/reset/mt8195-resets.h83
-rw-r--r--include/dt-bindings/reset/nuvoton,ma35d1-reset.h108
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq5018.h122
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq806x.h5
-rw-r--r--include/dt-bindings/reset/qcom,ipq9574-gcc.h165
-rw-r--r--include/dt-bindings/reset/qcom,sdm845-pdc.h2
-rw-r--r--include/dt-bindings/reset/qcom,sm8350-videocc.h18
-rw-r--r--include/dt-bindings/reset/qcom,sm8450-gpucc.h20
-rw-r--r--include/dt-bindings/reset/qcom,sm8650-gpucc.h20
-rw-r--r--include/dt-bindings/reset/qcom,x1e80100-gpucc.h19
-rw-r--r--include/dt-bindings/reset/raspberrypi,firmware-reset.h13
-rw-r--r--include/dt-bindings/reset/rockchip,rk3588-cru.h754
-rw-r--r--include/dt-bindings/reset/sama7g5-reset.h10
-rw-r--r--include/dt-bindings/reset/sophgo,sg2042-reset.h87
-rw-r--r--include/dt-bindings/reset/st,stm32mp25-rcc.h167
-rw-r--r--include/dt-bindings/reset/starfive,jh7110-crg.h214
-rw-r--r--include/dt-bindings/reset/starfive-jh7100.h126
-rw-r--r--include/dt-bindings/reset/stericsson,db8500-prcc-reset.h51
-rw-r--r--include/dt-bindings/reset/stih415-resets.h28
-rw-r--r--include/dt-bindings/reset/stih416-resets.h52
-rw-r--r--include/dt-bindings/reset/stm32mp1-resets.h17
-rw-r--r--include/dt-bindings/reset/stm32mp13-resets.h100
-rw-r--r--include/dt-bindings/reset/sun20i-d1-ccu.h79
-rw-r--r--include/dt-bindings/reset/sun20i-d1-r-ccu.h16
-rw-r--r--include/dt-bindings/reset/sun50i-a100-ccu.h68
-rw-r--r--include/dt-bindings/reset/sun50i-a100-r-ccu.h18
-rw-r--r--include/dt-bindings/reset/sun50i-h6-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h6-r-ccu.h3
-rw-r--r--include/dt-bindings/reset/sun50i-h616-ccu.h70
-rw-r--r--include/dt-bindings/reset/sunplus,sp7021-reset.h87
-rw-r--r--include/dt-bindings/reset/tegra234-reset.h182
-rw-r--r--include/dt-bindings/reset/toshiba,tmpv770x.h41
-rw-r--r--include/dt-bindings/reset/xlnx-versal-resets.h105
-rw-r--r--include/dt-bindings/soc/bcm-pmb.h12
-rw-r--r--include/dt-bindings/soc/bcm6318-pm.h17
-rw-r--r--include/dt-bindings/soc/bcm63268-pm.h21
-rw-r--r--include/dt-bindings/soc/bcm6328-pm.h17
-rw-r--r--include/dt-bindings/soc/bcm6362-pm.h21
-rw-r--r--include/dt-bindings/soc/cpm1-fsl,tsa.h13
-rw-r--r--include/dt-bindings/soc/qcom,gpr.h19
-rw-r--r--include/dt-bindings/soc/rockchip,vop2.h18
-rw-r--r--include/dt-bindings/soc/samsung,boot-mode.h18
-rw-r--r--include/dt-bindings/soc/samsung,exynos-usi.h17
-rw-r--r--include/dt-bindings/soc/zte,pm_domains.h24
-rw-r--r--include/dt-bindings/sound/adi,adau1977.h15
-rw-r--r--include/dt-bindings/sound/apq8016-lpass.h7
-rw-r--r--include/dt-bindings/sound/cs35l45.h77
-rw-r--r--include/dt-bindings/sound/microchip,pdmc.h13
-rw-r--r--include/dt-bindings/sound/qcom,lpass.h46
-rw-r--r--include/dt-bindings/sound/qcom,q6afe.h109
-rw-r--r--include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h234
-rw-r--r--include/dt-bindings/sound/qcom,wcd9335.h15
-rw-r--r--include/dt-bindings/sound/rt5640.h1
-rw-r--r--include/dt-bindings/sound/sc7180-lpass.h9
-rw-r--r--include/dt-bindings/sound/tlv320adc3xxx.h28
-rw-r--r--include/dt-bindings/sound/tlv320aic31xx-micbias.h9
-rw-r--r--include/dt-bindings/sound/tlv320aic31xx.h14
-rw-r--r--include/dt-bindings/thermal/mediatek,lvts-thermal.h57
-rw-r--r--include/dt-bindings/thermal/tegra234-bpmp-thermal.h19
-rw-r--r--include/dt-bindings/usb/pd.h382
-rw-r--r--include/dt-bindings/watchdog/aspeed-wdt.h92
-rw-r--r--include/keys/asymmetric-parser.h2
-rw-r--r--include/keys/asymmetric-type.h12
-rw-r--r--include/keys/encrypted-type.h2
-rw-r--r--include/keys/rxrpc-type.h56
-rw-r--r--include/keys/system_keyring.h64
-rw-r--r--include/keys/trusted-type.h55
-rw-r--r--include/keys/trusted_caam.h11
-rw-r--r--include/keys/trusted_tee.h16
-rw-r--r--include/keys/trusted_tpm.h29
-rw-r--r--include/kunit/assert.h256
-rw-r--r--include/kunit/attributes.h50
-rw-r--r--include/kunit/device.h80
-rw-r--r--include/kunit/resource.h503
-rw-r--r--include/kunit/skbuff.h56
-rw-r--r--include/kunit/static_stub.h113
-rw-r--r--include/kunit/test-bug.h71
-rw-r--r--include/kunit/test.h1689
-rw-r--r--include/kunit/visibility.h33
-rw-r--r--include/kvm/arm_arch_timer.h66
-rw-r--r--include/kvm/arm_hypercalls.h14
-rw-r--r--include/kvm/arm_pmu.h101
-rw-r--r--include/kvm/arm_psci.h18
-rw-r--r--include/kvm/arm_vgic.h78
-rw-r--r--include/kvm/iodev.h6
-rw-r--r--include/linux/a.out.h18
-rw-r--r--include/linux/acct.h1
-rw-r--r--include/linux/acpi.h497
-rw-r--r--include/linux/acpi_amd_wbrf.h91
-rw-r--r--include/linux/acpi_iort.h38
-rw-r--r--include/linux/acpi_mdio.h33
-rw-r--r--include/linux/acpi_viot.h21
-rw-r--r--include/linux/adreno-smmu-priv.h72
-rw-r--r--include/linux/aer.h30
-rw-r--r--include/linux/ahci_platform.h10
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/alcor_pci.h7
-rw-r--r--include/linux/align.h15
-rw-r--r--include/linux/amba/bus.h75
-rw-r--r--include/linux/amba/mmci.h6
-rw-r--r--include/linux/amba/pl022.h14
-rw-r--r--include/linux/amba/pl093.h77
-rw-r--r--include/linux/amba/serial.h261
-rw-r--r--include/linux/amd-iommu.h157
-rw-r--r--include/linux/amd-pmf-io.h50
-rw-r--r--include/linux/amd-pstate.h127
-rw-r--r--include/linux/anon_inodes.h9
-rw-r--r--include/linux/aperture.h62
-rw-r--r--include/linux/apple-gmux.h145
-rw-r--r--include/linux/apple_bl.h27
-rw-r--r--include/linux/arch_topology.h45
-rw-r--r--include/linux/args.h28
-rw-r--r--include/linux/arm-cci.h2
-rw-r--r--include/linux/arm-smccc.h320
-rw-r--r--include/linux/arm_ffa.h433
-rw-r--r--include/linux/arm_sdei.h4
-rw-r--r--include/linux/armada-37xx-rwtm-mailbox.h2
-rw-r--r--include/linux/array_size.h13
-rw-r--r--include/linux/ascii85.h3
-rw-r--r--include/linux/asn1_encoder.h32
-rw-r--r--include/linux/async.h4
-rw-r--r--include/linux/async_tx.h23
-rw-r--r--include/linux/ata.h155
-rw-r--r--include/linux/ata_platform.h2
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/atm_suni.h12
-rw-r--r--include/linux/atm_tcp.h2
-rw-r--r--include/linux/atmdev.h5
-rw-r--r--include/linux/atmel-mci.h46
-rw-r--r--include/linux/atomic-arch-fallback.h2291
-rw-r--r--include/linux/atomic-fallback.h2525
-rw-r--r--include/linux/atomic.h11
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h4693
-rw-r--r--include/linux/atomic/atomic-instrumented.h5053
-rw-r--r--include/linux/atomic/atomic-long.h1812
-rw-r--r--include/linux/audit.h56
-rw-r--r--include/linux/audit_arch.h26
-rw-r--r--include/linux/auxiliary_bus.h251
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/avf/virtchnl.h1042
-rw-r--r--include/linux/backing-dev-defs.h47
-rw-r--r--include/linux/backing-dev.h210
-rw-r--r--include/linux/backing-file.h42
-rw-r--r--include/linux/badblocks.h30
-rw-r--r--include/linux/balloon_compaction.h28
-rw-r--r--include/linux/base64.h16
-rw-r--r--include/linux/bcd.h4
-rw-r--r--include/linux/bcm47xx_nvram.h6
-rw-r--r--include/linux/bcm47xx_sprom.h10
-rw-r--r--include/linux/bcm963xx_tag.h2
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h8
-rw-r--r--include/linux/binfmts.h29
-rw-r--r--include/linux/bio.h380
-rw-r--r--include/linux/bitfield.h51
-rw-r--r--include/linux/bitmap-str.h16
-rw-r--r--include/linux/bitmap.h433
-rw-r--r--include/linux/bitops.h197
-rw-r--r--include/linux/bits.h11
-rw-r--r--include/linux/blk-cgroup.h650
-rw-r--r--include/linux/blk-crypto-profile.h155
-rw-r--r--include/linux/blk-crypto.h36
-rw-r--r--include/linux/blk-integrity.h185
-rw-r--r--include/linux/blk-mq-pci.h4
-rw-r--r--include/linux/blk-mq-rdma.h11
-rw-r--r--include/linux/blk-mq-virtio.h2
-rw-r--r--include/linux/blk-mq.h793
-rw-r--r--include/linux/blk-pm.h3
-rw-r--r--include/linux/blk_types.h387
-rw-r--r--include/linux/blkdev.h1979
-rw-r--r--include/linux/blktrace_api.h34
-rw-r--r--include/linux/bma150.h4
-rw-r--r--include/linux/bootconfig.h106
-rw-r--r--include/linux/bootmem_info.h68
-rw-r--r--include/linux/bottom_half.h9
-rw-r--r--include/linux/bpf-cgroup-defs.h84
-rw-r--r--include/linux/bpf-cgroup.h410
-rw-r--r--include/linux/bpf-netns.h8
-rw-r--r--include/linux/bpf.h2270
-rw-r--r--include/linux/bpf_local_storage.h202
-rw-r--r--include/linux/bpf_lsm.h50
-rw-r--r--include/linux/bpf_mem_alloc.h48
-rw-r--r--include/linux/bpf_mprog.h343
-rw-r--r--include/linux/bpf_types.h30
-rw-r--r--include/linux/bpf_verifier.h610
-rw-r--r--include/linux/bpfilter.h25
-rw-r--r--include/linux/bpfptr.h88
-rw-r--r--include/linux/brcmphy.h116
-rw-r--r--include/linux/bsg-lib.h2
-rw-r--r--include/linux/bsg.h38
-rw-r--r--include/linux/btf.h460
-rw-r--r--include/linux/btf_ids.h175
-rw-r--r--include/linux/buffer_head.h264
-rw-r--r--include/linux/bug.h10
-rw-r--r--include/linux/build_bug.h9
-rw-r--r--include/linux/buildid.h21
-rw-r--r--include/linux/bvec.h133
-rw-r--r--include/linux/byteorder/generic.h4
-rw-r--r--include/linux/cache.h46
-rw-r--r--include/linux/cacheflush.h29
-rw-r--r--include/linux/cacheinfo.h69
-rw-r--r--include/linux/can/bittiming.h165
-rw-r--r--include/linux/can/can-ml.h12
-rw-r--r--include/linux/can/core.h9
-rw-r--r--include/linux/can/dev.h200
-rw-r--r--include/linux/can/dev/peak_canfd.h2
-rw-r--r--include/linux/can/led.h51
-rw-r--r--include/linux/can/length.h306
-rw-r--r--include/linux/can/platform/flexcan.h23
-rw-r--r--include/linux/can/platform/sja1000.h2
-rw-r--r--include/linux/can/rx-offload.h25
-rw-r--r--include/linux/can/skb.h108
-rw-r--r--include/linux/capability.h140
-rw-r--r--include/linux/cc_platform.h117
-rw-r--r--include/linux/ccp.h3
-rw-r--r--include/linux/cdrom.h20
-rw-r--r--include/linux/cdx/cdx_bus.h291
-rw-r--r--include/linux/ceph/auth.h72
-rw-r--r--include/linux/ceph/ceph_debug.h38
-rw-r--r--include/linux/ceph/ceph_features.h11
-rw-r--r--include/linux/ceph/ceph_fs.h106
-rw-r--r--include/linux/ceph/decode.h8
-rw-r--r--include/linux/ceph/libceph.h30
-rw-r--r--include/linux/ceph/mdsmap.h71
-rw-r--r--include/linux/ceph/messenger.h334
-rw-r--r--include/linux/ceph/mon_client.h4
-rw-r--r--include/linux/ceph/msgr.h66
-rw-r--r--include/linux/ceph/osd_client.h126
-rw-r--r--include/linux/ceph/osdmap.h18
-rw-r--r--include/linux/ceph/rados.h6
-rw-r--r--include/linux/cfag12864b.h2
-rw-r--r--include/linux/cfi.h53
-rw-r--r--include/linux/cfi_types.h45
-rw-r--r--include/linux/cgroup-defs.h229
-rw-r--r--include/linux/cgroup.h208
-rw-r--r--include/linux/cgroup_api.h1
-rw-r--r--include/linux/cgroup_refcnt.h96
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/cleancache.h124
-rw-r--r--include/linux/cleanup.h250
-rw-r--r--include/linux/clk-provider.h329
-rw-r--r--include/linux/clk.h309
-rw-r--r--include/linux/clk/at91_pmc.h36
-rw-r--r--include/linux/clk/davinci.h17
-rw-r--r--include/linux/clk/imx.h15
-rw-r--r--include/linux/clk/mmp.h18
-rw-r--r--include/linux/clk/pxa.h16
-rw-r--r--include/linux/clk/samsung.h24
-rw-r--r--include/linux/clk/spear.h37
-rw-r--r--include/linux/clk/sunxi-ng.h15
-rw-r--r--include/linux/clk/tegra.h126
-rw-r--r--include/linux/clk/ti.h35
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/clockchips.h4
-rw-r--r--include/linux/clocksource.h28
-rw-r--r--include/linux/clocksource_ids.h15
-rw-r--r--include/linux/closure.h415
-rw-r--r--include/linux/cm4000_cs.h11
-rw-r--r--include/linux/cma.h25
-rw-r--r--include/linux/cmdline-parser.h46
-rw-r--r--include/linux/comedi/comedi_8254.h161
-rw-r--r--include/linux/comedi/comedi_8255.h54
-rw-r--r--include/linux/comedi/comedi_isadma.h114
-rw-r--r--include/linux/comedi/comedi_pci.h56
-rw-r--r--include/linux/comedi/comedi_pcmcia.h48
-rw-r--r--include/linux/comedi/comedi_usb.h41
-rw-r--r--include/linux/comedi/comedidev.h1053
-rw-r--r--include/linux/comedi/comedilib.h26
-rw-r--r--include/linux/compaction.h132
-rw-r--r--include/linux/compat.h364
-rw-r--r--include/linux/compiler-clang.h89
-rw-r--r--include/linux/compiler-gcc.h120
-rw-r--r--include/linux/compiler-intel.h34
-rw-r--r--include/linux/compiler-version.h14
-rw-r--r--include/linux/compiler.h132
-rw-r--r--include/linux/compiler_attributes.h207
-rw-r--r--include/linux/compiler_types.h222
-rw-r--r--include/linux/completion.h7
-rw-r--r--include/linux/component.h24
-rw-r--r--include/linux/configfs.h7
-rw-r--r--include/linux/connector.h19
-rw-r--r--include/linux/console.h492
-rw-r--r--include/linux/console_struct.h11
-rw-r--r--include/linux/consolemap.h59
-rw-r--r--include/linux/container.h2
-rw-r--r--include/linux/container_of.h38
-rw-r--r--include/linux/context_tracking.h175
-rw-r--r--include/linux/context_tracking_irq.h21
-rw-r--r--include/linux/context_tracking_state.h121
-rw-r--r--include/linux/cookie.h51
-rw-r--r--include/linux/coredump.h39
-rw-r--r--include/linux/coresight-pmu.h56
-rw-r--r--include/linux/coresight.h486
-rw-r--r--include/linux/counter.h878
-rw-r--r--include/linux/counter_enum.h45
-rw-r--r--include/linux/cper.h56
-rw-r--r--include/linux/cpu.h78
-rw-r--r--include/linux/cpu_rmap.h4
-rw-r--r--include/linux/cpu_smt.h33
-rw-r--r--include/linux/cpufreq.h396
-rw-r--r--include/linux/cpuhotplug.h202
-rw-r--r--include/linux/cpuidle.h61
-rw-r--r--include/linux/cpumask.h765
-rw-r--r--include/linux/cpumask_api.h1
-rw-r--r--include/linux/cpuset.h69
-rw-r--r--include/linux/crash_core.h153
-rw-r--r--include/linux/crash_dump.h60
-rw-r--r--include/linux/crash_reserve.h48
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/crc-itu-t.h2
-rw-r--r--include/linux/crc32c.h1
-rw-r--r--include/linux/crc64.h7
-rw-r--r--include/linux/crc8.h2
-rw-r--r--include/linux/cred.h130
-rw-r--r--include/linux/crush/crush.h3
-rw-r--r--include/linux/crypto.h485
-rw-r--r--include/linux/ctype.h15
-rw-r--r--include/linux/cuda.h2
-rw-r--r--include/linux/cxl-event.h143
-rw-r--r--include/linux/cyclades.h364
-rw-r--r--include/linux/damon.h769
-rw-r--r--include/linux/dax.h229
-rw-r--r--include/linux/dcache.h218
-rw-r--r--include/linux/dccp.h6
-rw-r--r--include/linux/dcookies.h69
-rw-r--r--include/linux/debug_locks.h7
-rw-r--r--include/linux/debugfs.h105
-rw-r--r--include/linux/debugobjects.h34
-rw-r--r--include/linux/decompress/mm.h14
-rw-r--r--include/linux/delay.h17
-rw-r--r--include/linux/delayacct.h185
-rw-r--r--include/linux/dev_printk.h78
-rw-r--r--include/linux/devfreq-event.h14
-rw-r--r--include/linux/devfreq.h70
-rw-r--r--include/linux/devfreq_cooling.h27
-rw-r--r--include/linux/device-mapper.h217
-rw-r--r--include/linux/device.h536
-rw-r--r--include/linux/device/bus.h123
-rw-r--r--include/linux/device/class.h127
-rw-r--r--include/linux/device/driver.h35
-rw-r--r--include/linux/devm-helpers.h79
-rw-r--r--include/linux/dfl.h95
-rw-r--r--include/linux/dim.h5
-rw-r--r--include/linux/dio.h2
-rw-r--r--include/linux/dlm.h8
-rw-r--r--include/linux/dlm_plock.h2
-rw-r--r--include/linux/dm-bufio.h37
-rw-r--r--include/linux/dm-dirty-log.h9
-rw-r--r--include/linux/dm-io.h14
-rw-r--r--include/linux/dm-kcopyd.h24
-rw-r--r--include/linux/dm-region-hash.h9
-rw-r--r--include/linux/dm-verity-loadpin.h27
-rw-r--r--include/linux/dma-buf.h247
-rw-r--r--include/linux/dma-contiguous.h176
-rw-r--r--include/linux/dma-debug.h160
-rw-r--r--include/linux/dma-direct.h203
-rw-r--r--include/linux/dma-direction.h8
-rw-r--r--include/linux/dma-fence-array.h34
-rw-r--r--include/linux/dma-fence-chain.h72
-rw-r--r--include/linux/dma-fence-unwrap.h75
-rw-r--r--include/linux/dma-fence.h130
-rw-r--r--include/linux/dma-heap.h21
-rw-r--r--include/linux/dma-iommu.h82
-rw-r--r--include/linux/dma-map-ops.h513
-rw-r--r--include/linux/dma-mapping.h388
-rw-r--r--include/linux/dma-noncoherent.h114
-rw-r--r--include/linux/dma-resv.h386
-rw-r--r--include/linux/dma/amd_xdma.h16
-rw-r--r--include/linux/dma/edma.h83
-rw-r--r--include/linux/dma/hsu.h6
-rw-r--r--include/linux/dma/imx-dma.h (renamed from include/linux/platform_data/dma-imx.h)40
-rw-r--r--include/linux/dma/k3-event-router.h16
-rw-r--r--include/linux/dma/k3-psil.h25
-rw-r--r--include/linux/dma/k3-udma-glue.h22
-rw-r--r--include/linux/dma/mmp-pdma.h16
-rw-r--r--include/linux/dma/qcom-gpi-dma.h83
-rw-r--r--include/linux/dma/qcom_adm.h12
-rw-r--r--include/linux/dma/ti-cppi5.h5
-rw-r--r--include/linux/dma/xilinx_dpdma.h11
-rw-r--r--include/linux/dmaengine.h87
-rw-r--r--include/linux/dmar.h150
-rw-r--r--include/linux/dnotify.h7
-rw-r--r--include/linux/dpll.h182
-rw-r--r--include/linux/drbd.h7
-rw-r--r--include/linux/drbd_config.h16
-rw-r--r--include/linux/drbd_genl_api.h2
-rw-r--r--include/linux/drbd_limits.h204
-rw-r--r--include/linux/dsa/8021q.h111
-rw-r--r--include/linux/dsa/brcm.h16
-rw-r--r--include/linux/dsa/ksz_common.h53
-rw-r--r--include/linux/dsa/loop.h1
-rw-r--r--include/linux/dsa/mv88e6xxx.h13
-rw-r--r--include/linux/dsa/ocelot.h276
-rw-r--r--include/linux/dsa/sja1105.h54
-rw-r--r--include/linux/dsa/tag_qca.h87
-rw-r--r--include/linux/dtpm.h73
-rw-r--r--include/linux/dynamic_debug.h235
-rw-r--r--include/linux/dynamic_queue_limits.h45
-rw-r--r--include/linux/edac.h52
-rw-r--r--include/linux/eeprom_93xx46.h5
-rw-r--r--include/linux/efi.h491
-rw-r--r--include/linux/efi_embedded_fw.h6
-rw-r--r--include/linux/einj-cxl.h44
-rw-r--r--include/linux/elevator.h177
-rw-r--r--include/linux/elf-fdpic.h14
-rw-r--r--include/linux/elf.h10
-rw-r--r--include/linux/elfcore-compat.h20
-rw-r--r--include/linux/elfcore.h60
-rw-r--r--include/linux/elfnote-lto.h14
-rw-r--r--include/linux/energy_model.h271
-rw-r--r--include/linux/entry-common.h353
-rw-r--r--include/linux/entry-kvm.h27
-rw-r--r--include/linux/err.h48
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/error-injection.h3
-rw-r--r--include/linux/etherdevice.h94
-rw-r--r--include/linux/ethtool.h683
-rw-r--r--include/linux/ethtool_netlink.h51
-rw-r--r--include/linux/eventfd.h33
-rw-r--r--include/linux/eventpoll.h31
-rw-r--r--include/linux/evm.h72
-rw-r--r--include/linux/export-internal.h72
-rw-r--r--include/linux/export.h181
-rw-r--r--include/linux/exportfs.h104
-rw-r--r--include/linux/extcon.h39
-rw-r--r--include/linux/f2fs_fs.h153
-rw-r--r--include/linux/fanotify.h77
-rw-r--r--include/linux/fault-inject-usercopy.h22
-rw-r--r--include/linux/fault-inject.h38
-rw-r--r--include/linux/fb.h312
-rw-r--r--include/linux/fbcon.h4
-rw-r--r--include/linux/fcntl.h8
-rw-r--r--include/linux/fdtable.h46
-rw-r--r--include/linux/fiemap.h4
-rw-r--r--include/linux/file.h30
-rw-r--r--include/linux/fileattr.h59
-rw-r--r--include/linux/filelock.h510
-rw-r--r--include/linux/filter.h482
-rw-r--r--include/linux/find.h670
-rw-r--r--include/linux/firewire.h120
-rw-r--r--include/linux/firmware.h131
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h328
-rw-r--r--include/linux/firmware/cirrus/wmfw.h203
-rw-r--r--include/linux/firmware/imx/dsp.h16
-rw-r--r--include/linux/firmware/imx/ipc.h13
-rw-r--r--include/linux/firmware/imx/s4.h20
-rw-r--r--include/linux/firmware/imx/sci.h33
-rw-r--r--include/linux/firmware/imx/svc/misc.h19
-rw-r--r--include/linux/firmware/imx/svc/rm.h5
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h221
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h74
-rw-r--r--include/linux/firmware/mediatek/mtk-adsp-ipc.h59
-rw-r--r--include/linux/firmware/meson/meson_sm.h2
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h52
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h140
-rw-r--r--include/linux/firmware/trusted_foundations.h8
-rw-r--r--include/linux/firmware/xlnx-event-manager.h36
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h807
-rw-r--r--include/linux/fixp-arith.h20
-rw-r--r--include/linux/flex_proportions.h41
-rw-r--r--include/linux/font.h20
-rw-r--r--include/linux/fortify-string.h791
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h1
-rw-r--r--include/linux/fpga/fpga-bridge.h32
-rw-r--r--include/linux/fpga/fpga-mgr.h88
-rw-r--r--include/linux/fpga/fpga-region.h42
-rw-r--r--include/linux/fprobe.h118
-rw-r--r--include/linux/frame.h35
-rw-r--r--include/linux/framer/framer-provider.h193
-rw-r--r--include/linux/framer/framer.h205
-rw-r--r--include/linux/framer/pef2256.h31
-rw-r--r--include/linux/freezer.h246
-rw-r--r--include/linux/frontswap.h122
-rw-r--r--include/linux/fs.h2316
-rw-r--r--include/linux/fs_api.h1
-rw-r--r--include/linux/fs_context.h30
-rw-r--r--include/linux/fs_enet_pd.h165
-rw-r--r--include/linux/fs_parser.h5
-rw-r--r--include/linux/fs_stack.h8
-rw-r--r--include/linux/fs_uart_pd.h71
-rw-r--r--include/linux/fscache-cache.h617
-rw-r--r--include/linux/fscache.h1001
-rw-r--r--include/linux/fscrypt.h578
-rw-r--r--include/linux/fsi-occ.h2
-rw-r--r--include/linux/fsl/enetc_mdio.h21
-rw-r--r--include/linux/fsl/guts.h4
-rw-r--r--include/linux/fsl/mc.h82
-rw-r--r--include/linux/fsl/ptp_qoriq.h4
-rw-r--r--include/linux/fsnotify.h201
-rw-r--r--include/linux/fsnotify_backend.h390
-rw-r--r--include/linux/fsverity.h138
-rw-r--r--include/linux/ftrace.h359
-rw-r--r--include/linux/ftrace_irq.h13
-rw-r--r--include/linux/fw_table.h61
-rw-r--r--include/linux/fwnode.h146
-rw-r--r--include/linux/fwnode_mdio.h35
-rw-r--r--include/linux/gameport.h15
-rw-r--r--include/linux/generic-radix-tree.h90
-rw-r--r--include/linux/genetlink.h23
-rw-r--r--include/linux/genhd.h413
-rw-r--r--include/linux/genl_magic_func.h29
-rw-r--r--include/linux/genl_magic_struct.h11
-rw-r--r--include/linux/gfp.h510
-rw-r--r--include/linux/gfp_api.h1
-rw-r--r--include/linux/gfp_types.h375
-rw-r--r--include/linux/goldfish.h15
-rw-r--r--include/linux/gpio.h139
-rw-r--r--include/linux/gpio/aspeed.h4
-rw-r--r--include/linux/gpio/consumer.h171
-rw-r--r--include/linux/gpio/driver.h423
-rw-r--r--include/linux/gpio/gpio-nomadik.h294
-rw-r--r--include/linux/gpio/gpio-reg.h4
-rw-r--r--include/linux/gpio/legacy-of-mm-gpiochip.h36
-rw-r--r--include/linux/gpio/machine.h20
-rw-r--r--include/linux/gpio/property.h11
-rw-r--r--include/linux/gpio/regmap.h10
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/greybus.h46
-rw-r--r--include/linux/greybus/greybus_manifest.h4
-rw-r--r--include/linux/greybus/greybus_protocols.h8
-rw-r--r--include/linux/greybus/hd.h2
-rw-r--r--include/linux/greybus/module.h2
-rw-r--r--include/linux/greybus/svc.h3
-rw-r--r--include/linux/group_cpus.h14
-rw-r--r--include/linux/habanalabs/cpucp_if.h1423
-rw-r--r--include/linux/habanalabs/hl_boot_if.h792
-rw-r--r--include/linux/hardirq.h19
-rw-r--r--include/linux/hash.h5
-rw-r--r--include/linux/hashtable_api.h1
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/hdlcdrv.h2
-rw-r--r--include/linux/hdmi.h13
-rw-r--r--include/linux/hex.h35
-rw-r--r--include/linux/hid-roccat.h2
-rw-r--r--include/linux/hid-sensor-hub.h18
-rw-r--r--include/linux/hid-sensor-ids.h20
-rw-r--r--include/linux/hid.h259
-rw-r--r--include/linux/hid_bpf.h159
-rw-r--r--include/linux/hidden.h19
-rw-r--r--include/linux/highmem-internal.h290
-rw-r--r--include/linux/highmem.h756
-rw-r--r--include/linux/hil_mlc.h2
-rw-r--r--include/linux/hippidevice.h4
-rw-r--r--include/linux/hisi_acc_qm.h593
-rw-r--r--include/linux/hmm.h15
-rw-r--r--include/linux/host1x.h208
-rw-r--r--include/linux/host1x_context_bus.h15
-rw-r--r--include/linux/hp_sdc.h2
-rw-r--r--include/linux/hpet.h2
-rw-r--r--include/linux/hrtimer.h181
-rw-r--r--include/linux/hrtimer_api.h1
-rw-r--r--include/linux/hrtimer_defs.h104
-rw-r--r--include/linux/hrtimer_types.h50
-rw-r--r--include/linux/hsi/ssi_protocol.h1
-rw-r--r--include/linux/htcpld.h25
-rw-r--r--include/linux/hte.h271
-rw-r--r--include/linux/huge_mm.h492
-rw-r--r--include/linux/hugetlb.h685
-rw-r--r--include/linux/hugetlb_cgroup.h141
-rw-r--r--include/linux/hw_breakpoint.h17
-rw-r--r--include/linux/hw_random.h8
-rw-r--r--include/linux/hwmon-sysfs.h1
-rw-r--r--include/linux/hwmon.h66
-rw-r--r--include/linux/hyperv.h304
-rw-r--r--include/linux/hypervisor.h8
-rw-r--r--include/linux/i2c-algo-pca.h15
-rw-r--r--include/linux/i2c-atr.h116
-rw-r--r--include/linux/i2c-smbus.h12
-rw-r--r--include/linux/i2c.h128
-rw-r--r--include/linux/i3c/ccc.h6
-rw-r--r--include/linux/i3c/device.h33
-rw-r--r--include/linux/i3c/master.h32
-rw-r--r--include/linux/i8042.h1
-rw-r--r--include/linux/i8254.h21
-rw-r--r--include/linux/icmpv6.h49
-rw-r--r--include/linux/ide.h1628
-rw-r--r--include/linux/idle_inject.h5
-rw-r--r--include/linux/idr.h19
-rw-r--r--include/linux/ieee80211.h2048
-rw-r--r--include/linux/ieee802154.h112
-rw-r--r--include/linux/if_arp.h6
-rw-r--r--include/linux/if_bridge.h67
-rw-r--r--include/linux/if_eql.h2
-rw-r--r--include/linux/if_frad.h92
-rw-r--r--include/linux/if_hsr.h47
-rw-r--r--include/linux/if_macvlan.h11
-rw-r--r--include/linux/if_pppol2tp.h2
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/if_rmnet.h95
-rw-r--r--include/linux/if_tap.h11
-rw-r--r--include/linux/if_team.h17
-rw-r--r--include/linux/if_tun.h29
-rw-r--r--include/linux/if_vlan.h91
-rw-r--r--include/linux/igmp.h12
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h50
-rw-r--r--include/linux/iio/adc/adi-axi-adc.h64
-rw-r--r--include/linux/iio/adc/qcom-vadc-common.h167
-rw-r--r--include/linux/iio/afe/rescale.h36
-rw-r--r--include/linux/iio/backend.h72
-rw-r--r--include/linux/iio/buffer-dma.h12
-rw-r--r--include/linux/iio/buffer-dmaengine.h10
-rw-r--r--include/linux/iio/buffer.h16
-rw-r--r--include/linux/iio/buffer_impl.h34
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h12
-rw-r--r--include/linux/iio/common/inv_sensors_timestamp.h95
-rw-r--r--include/linux/iio/common/st_sensors.h51
-rw-r--r--include/linux/iio/consumer.h82
-rw-r--r--include/linux/iio/dac/mcp4725.h2
-rw-r--r--include/linux/iio/driver.h14
-rw-r--r--include/linux/iio/gyro/itg3200.h2
-rw-r--r--include/linux/iio/iio-gts-helper.h206
-rw-r--r--include/linux/iio/iio-opaque.h50
-rw-r--r--include/linux/iio/iio.h207
-rw-r--r--include/linux/iio/imu/adis.h139
-rw-r--r--include/linux/iio/kfifo_buf.h11
-rw-r--r--include/linux/iio/sw_device.h3
-rw-r--r--include/linux/iio/sw_trigger.h3
-rw-r--r--include/linux/iio/sysfs.h14
-rw-r--r--include/linux/iio/trigger.h42
-rw-r--r--include/linux/iio/trigger_consumer.h2
-rw-r--r--include/linux/iio/triggered_buffer.h30
-rw-r--r--include/linux/iio/types.h10
-rw-r--r--include/linux/ima.h142
-rw-r--r--include/linux/indirect_call_wrapper.h12
-rw-r--r--include/linux/inet_diag.h3
-rw-r--r--include/linux/inetdevice.h36
-rw-r--r--include/linux/init.h165
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/initrd.h15
-rw-r--r--include/linux/inotify.h3
-rw-r--r--include/linux/input-polldev.h58
-rw-r--r--include/linux/input.h20
-rw-r--r--include/linux/input/adp5589.h7
-rw-r--r--include/linux/input/as5011.h1
-rw-r--r--include/linux/input/auo-pixcir-ts.h44
-rw-r--r--include/linux/input/cy8ctmg110_pdata.h11
-rw-r--r--include/linux/input/cyttsp.h29
-rw-r--r--include/linux/input/elan-i2c-ids.h5
-rw-r--r--include/linux/input/matrix_keypad.h5
-rw-r--r--include/linux/input/mt.h2
-rw-r--r--include/linux/input/navpoint.h9
-rw-r--r--include/linux/input/sparse-keymap.h1
-rw-r--r--include/linux/input/vivaldi-fmap.h27
-rw-r--r--include/linux/instruction_pointer.h13
-rw-r--r--include/linux/instrumentation.h26
-rw-r--r--include/linux/instrumented.h118
-rw-r--r--include/linux/int_log.h (renamed from include/media/dvb_math.h)18
-rw-r--r--include/linux/integrity.h28
-rw-r--r--include/linux/intel-iommu.h808
-rw-r--r--include/linux/intel-ish-client-if.h17
-rw-r--r--include/linux/intel-pti.h35
-rw-r--r--include/linux/intel-svm.h59
-rw-r--r--include/linux/intel_rapl.h75
-rw-r--r--include/linux/intel_tcc.h18
-rw-r--r--include/linux/intel_tpmi.h50
-rw-r--r--include/linux/interconnect-clk.h22
-rw-r--r--include/linux/interconnect-provider.h51
-rw-r--r--include/linux/interconnect.h59
-rw-r--r--include/linux/interrupt.h166
-rw-r--r--include/linux/interval_tree.h58
-rw-r--r--include/linux/io-mapping.h63
-rw-r--r--include/linux/io-pgtable.h114
-rw-r--r--include/linux/io.h42
-rw-r--r--include/linux/io_uring.h59
-rw-r--r--include/linux/io_uring/cmd.h77
-rw-r--r--include/linux/io_uring_types.h682
-rw-r--r--include/linux/ioam6.h13
-rw-r--r--include/linux/ioam6_genl.h13
-rw-r--r--include/linux/ioam6_iptunnel.h13
-rw-r--r--include/linux/ioasid.h76
-rw-r--r--include/linux/iocontext.h55
-rw-r--r--include/linux/iomap.h256
-rw-r--r--include/linux/iommu-helper.h4
-rw-r--r--include/linux/iommu.h1295
-rw-r--r--include/linux/iommufd.h114
-rw-r--r--include/linux/iopoll.h28
-rw-r--r--include/linux/ioport.h81
-rw-r--r--include/linux/ioprio.h81
-rw-r--r--include/linux/ioremap.h30
-rw-r--r--include/linux/iosys-map.h516
-rw-r--r--include/linux/iov_iter.h274
-rw-r--r--include/linux/iova.h124
-rw-r--r--include/linux/iova_bitmap.h52
-rw-r--r--include/linux/ip.h21
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h60
-rw-r--r--include/linux/ipmi.h10
-rw-r--r--include/linux/ipmi_smi.h65
-rw-r--r--include/linux/ipv6.h132
-rw-r--r--include/linux/irq.h219
-rw-r--r--include/linux/irq_cpustat.h28
-rw-r--r--include/linux/irq_work.h44
-rw-r--r--include/linux/irqchip.h24
-rw-r--r--include/linux/irqchip/arm-gic-common.h25
-rw-r--r--include/linux/irqchip/arm-gic-v3.h62
-rw-r--r--include/linux/irqchip/arm-gic-v4.h6
-rw-r--r--include/linux/irqchip/arm-gic.h6
-rw-r--r--include/linux/irqchip/arm-vgic-info.h45
-rw-r--r--include/linux/irqchip/irq-ixp4xx.h12
-rw-r--r--include/linux/irqchip/mmp.h7
-rw-r--r--include/linux/irqchip/mxs.h11
-rw-r--r--include/linux/irqchip/versatile-fpga.h14
-rw-r--r--include/linux/irqdesc.h73
-rw-r--r--include/linux/irqdomain.h281
-rw-r--r--include/linux/irqdomain_defs.h33
-rw-r--r--include/linux/irqflags.h68
-rw-r--r--include/linux/irqflags_types.h22
-rw-r--r--include/linux/irqhandler.h2
-rw-r--r--include/linux/irqreturn.h8
-rw-r--r--include/linux/isa-dma.h14
-rw-r--r--include/linux/isa.h54
-rw-r--r--include/linux/isapnp.h6
-rw-r--r--include/linux/iscsi_ibft.h28
-rw-r--r--include/linux/isicom.h85
-rw-r--r--include/linux/ism.h92
-rw-r--r--include/linux/iversion.h129
-rw-r--r--include/linux/jbd2.h326
-rw-r--r--include/linux/jhash.h2
-rw-r--r--include/linux/jiffies.h209
-rw-r--r--include/linux/jump_label.h94
-rw-r--r--include/linux/kallsyms.h75
-rw-r--r--include/linux/kasan-checks.h8
-rw-r--r--include/linux/kasan-enabled.h35
-rw-r--r--include/linux/kasan-tags.h15
-rw-r--r--include/linux/kasan.h586
-rw-r--r--include/linux/kbd_kern.h13
-rw-r--r--include/linux/kconfig.h8
-rw-r--r--include/linux/kcore.h3
-rw-r--r--include/linux/kcov.h39
-rw-r--r--include/linux/kcsan-checks.h137
-rw-r--r--include/linux/kcsan.h18
-rw-r--r--include/linux/kd.h8
-rw-r--r--include/linux/kdb.h30
-rw-r--r--include/linux/kdev_t.h22
-rw-r--r--include/linux/kernel-page-flags.h2
-rw-r--r--include/linux/kernel.h773
-rw-r--r--include/linux/kernel_read_file.h55
-rw-r--r--include/linux/kernel_stat.h23
-rw-r--r--include/linux/kernfs.h134
-rw-r--r--include/linux/kexec.h187
-rw-r--r--include/linux/key-type.h2
-rw-r--r--include/linux/key.h23
-rw-r--r--include/linux/keyslot-manager.h106
-rw-r--r--include/linux/kfence.h253
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/kgdb.h30
-rw-r--r--include/linux/khugepaged.h70
-rw-r--r--include/linux/kmemleak.h8
-rw-r--r--include/linux/kmsan-checks.h83
-rw-r--r--include/linux/kmsan.h334
-rw-r--r--include/linux/kmsan_string.h21
-rw-r--r--include/linux/kmsan_types.h37
-rw-r--r--include/linux/kmsg_dump.h47
-rw-r--r--include/linux/kobject.h107
-rw-r--r--include/linux/kobject_api.h1
-rw-r--r--include/linux/kobject_ns.h4
-rw-r--r--include/linux/kprobes.h252
-rw-r--r--include/linux/kref_api.h1
-rw-r--r--include/linux/ks0108.h2
-rw-r--r--include/linux/ksm.h85
-rw-r--r--include/linux/kstrtox.h150
-rw-r--r--include/linux/kthread.h64
-rw-r--r--include/linux/ktime.h9
-rw-r--r--include/linux/ktime_api.h1
-rw-r--r--include/linux/kvm_dirty_ring.h101
-rw-r--r--include/linux/kvm_host.h1432
-rw-r--r--include/linux/kvm_irqfd.h2
-rw-r--r--include/linux/kvm_types.h49
-rw-r--r--include/linux/lapb.h5
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/led-class-flash.h18
-rw-r--r--include/linux/led-class-multicolor.h41
-rw-r--r--include/linux/leds-expresswire.h38
-rw-r--r--include/linux/leds-tca6507.h21
-rw-r--r--include/linux/leds.h191
-rw-r--r--include/linux/libata.h441
-rw-r--r--include/linux/libgcc.h7
-rw-r--r--include/linux/libnvdimm.h41
-rw-r--r--include/linux/libps2.h62
-rw-r--r--include/linux/lightnvm.h699
-rw-r--r--include/linux/limits.h3
-rw-r--r--include/linux/linear_range.h13
-rw-r--r--include/linux/linkage.h77
-rw-r--r--include/linux/linkmode.h26
-rw-r--r--include/linux/list.h260
-rw-r--r--include/linux/list_lru.h97
-rw-r--r--include/linux/list_sort.h7
-rw-r--r--include/linux/litex.h83
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/livepatch_sched.h29
-rw-r--r--include/linux/llist.h73
-rw-r--r--include/linux/llist_api.h1
-rw-r--r--include/linux/local_lock_internal.h83
-rw-r--r--include/linux/lockd/bind.h5
-rw-r--r--include/linux/lockd/lockd.h62
-rw-r--r--include/linux/lockd/xdr.h38
-rw-r--r--include/linux/lockd/xdr4.h33
-rw-r--r--include/linux/lockdep.h282
-rw-r--r--include/linux/lockdep_api.h1
-rw-r--r--include/linux/lockdep_types.h98
-rw-r--r--include/linux/lockref.h1
-rw-r--r--include/linux/log2.h9
-rw-r--r--include/linux/logic_iomem.h62
-rw-r--r--include/linux/logic_pio.h3
-rw-r--r--include/linux/lru_cache.h11
-rw-r--r--include/linux/lsm_audit.h12
-rw-r--r--include/linux/lsm_hook_defs.h159
-rw-r--r--include/linux/lsm_hooks.h1564
-rw-r--r--include/linux/lwq.h124
-rw-r--r--include/linux/mISDNif.h3
-rw-r--r--include/linux/mailbox/arm_mhuv2_message.h20
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h24
-rw-r--r--include/linux/mailbox/zynqmp-ipi-message.h4
-rw-r--r--include/linux/mailbox_client.h1
-rw-r--r--include/linux/mailbox_controller.h1
-rw-r--r--include/linux/map_benchmark.h31
-rw-r--r--include/linux/maple.h1
-rw-r--r--include/linux/maple_tree.h864
-rw-r--r--include/linux/marvell_phy.h12
-rw-r--r--include/linux/math.h208
-rw-r--r--include/linux/math64.h54
-rw-r--r--include/linux/max17040_battery.h16
-rw-r--r--include/linux/mbcache.h42
-rw-r--r--include/linux/mc146818rtc.h9
-rw-r--r--include/linux/mcb.h8
-rw-r--r--include/linux/mdev.h165
-rw-r--r--include/linux/mdio-bitbang.h8
-rw-r--r--include/linux/mdio-xpcs.h41
-rw-r--r--include/linux/mdio.h397
-rw-r--r--include/linux/mdio/mdio-i2c.h24
-rw-r--r--include/linux/mdio/mdio-mscc-miim.h19
-rw-r--r--include/linux/mdio/mdio-regmap.h26
-rw-r--r--include/linux/mdio/mdio-xgene.h134
-rw-r--r--include/linux/mei_aux.h31
-rw-r--r--include/linux/mei_cl_bus.h34
-rw-r--r--include/linux/mem_encrypt.h4
-rw-r--r--include/linux/memblock.h196
-rw-r--r--include/linux/memcontrol.h1334
-rw-r--r--include/linux/memfd.h4
-rw-r--r--include/linux/memory-tiers.h140
-rw-r--r--include/linux/memory.h117
-rw-r--r--include/linux/memory_hotplug.h312
-rw-r--r--include/linux/mempolicy.h88
-rw-r--r--include/linux/mempool.h19
-rw-r--r--include/linux/memregion.h42
-rw-r--r--include/linux/memremap.h134
-rw-r--r--include/linux/memstick.h1
-rw-r--r--include/linux/mfd/88pm860x.h6
-rw-r--r--include/linux/mfd/ab3100.h128
-rw-r--r--include/linux/mfd/abx500.h276
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h476
-rw-r--r--include/linux/mfd/abx500/ab8500.h13
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h51
-rw-r--r--include/linux/mfd/asic3.h313
-rw-r--r--include/linux/mfd/atc260x/atc2603c.h281
-rw-r--r--include/linux/mfd/atc260x/atc2609a.h308
-rw-r--r--include/linux/mfd/atc260x/core.h58
-rw-r--r--include/linux/mfd/axp20x.h218
-rw-r--r--include/linux/mfd/bcm2835-pm.h1
-rw-r--r--include/linux/mfd/bd9571mwv.h45
-rw-r--r--include/linux/mfd/core.h36
-rw-r--r--include/linux/mfd/cs42l43-regs.h1184
-rw-r--r--include/linux/mfd/cs42l43.h103
-rw-r--r--include/linux/mfd/da9055/pdata.h13
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/da9063/registers.h26
-rw-r--r--include/linux/mfd/db8500-prcmu.h2
-rw-r--r--include/linux/mfd/dbx500-prcmu.h46
-rw-r--r--include/linux/mfd/dm355evm_msp.h79
-rw-r--r--include/linux/mfd/hi655x-pmic.h7
-rw-r--r--include/linux/mfd/htc-pasic3.h54
-rw-r--r--include/linux/mfd/idt82p33_reg.h115
-rw-r--r--include/linux/mfd/idt8a340_reg.h768
-rw-r--r--include/linux/mfd/idtRC38xxx_reg.h273
-rw-r--r--include/linux/mfd/intel-m10-bmc.h308
-rw-r--r--include/linux/mfd/intel_msic.h453
-rw-r--r--include/linux/mfd/intel_soc_pmic.h9
-rw-r--r--include/linux/mfd/ipaq-micro.h4
-rw-r--r--include/linux/mfd/iqs62x.h18
-rw-r--r--include/linux/mfd/lp873x.h10
-rw-r--r--include/linux/mfd/lp87565.h44
-rw-r--r--include/linux/mfd/lp8788.h9
-rw-r--r--include/linux/mfd/lpc_ich.h9
-rw-r--r--include/linux/mfd/madera/core.h1
-rw-r--r--include/linux/mfd/madera/pdata.h2
-rw-r--r--include/linux/mfd/madera/registers.h635
-rw-r--r--include/linux/mfd/max5970.h96
-rw-r--r--include/linux/mfd/max77541.h91
-rw-r--r--include/linux/mfd/max77686-private.h32
-rw-r--r--include/linux/mfd/max77693-private.h2
-rw-r--r--include/linux/mfd/max77714.h60
-rw-r--r--include/linux/mfd/max77843-private.h2
-rw-r--r--include/linux/mfd/max8997.h12
-rw-r--r--include/linux/mfd/max8998.h6
-rw-r--r--include/linux/mfd/mt6331/core.h40
-rw-r--r--include/linux/mfd/mt6331/registers.h584
-rw-r--r--include/linux/mfd/mt6332/core.h65
-rw-r--r--include/linux/mfd/mt6332/registers.h642
-rw-r--r--include/linux/mfd/mt6357/core.h119
-rw-r--r--include/linux/mfd/mt6357/registers.h1574
-rw-r--r--include/linux/mfd/mt6358/core.h8
-rw-r--r--include/linux/mfd/mt6358/registers.h32
-rw-r--r--include/linux/mfd/mt6359/core.h133
-rw-r--r--include/linux/mfd/mt6359/registers.h531
-rw-r--r--include/linux/mfd/mt6359p/registers.h249
-rw-r--r--include/linux/mfd/mt6360.h240
-rw-r--r--include/linux/mfd/mt6397/core.h5
-rw-r--r--include/linux/mfd/mt6397/rtc.h2
-rw-r--r--include/linux/mfd/ntxec.h38
-rw-r--r--include/linux/mfd/ocelot.h62
-rw-r--r--include/linux/mfd/palmas.h9
-rw-r--r--include/linux/mfd/pcf50633/core.h6
-rw-r--r--include/linux/mfd/rk808.h590
-rw-r--r--include/linux/mfd/rn5t618.h10
-rw-r--r--include/linux/mfd/rohm-bd70528.h391
-rw-r--r--include/linux/mfd/rohm-bd71815.h562
-rw-r--r--include/linux/mfd/rohm-bd71828.h13
-rw-r--r--include/linux/mfd/rohm-bd718x7.h13
-rw-r--r--include/linux/mfd/rohm-bd957x.h140
-rw-r--r--include/linux/mfd/rohm-generic.h38
-rw-r--r--include/linux/mfd/rsmu.h39
-rw-r--r--include/linux/mfd/rt5033-private.h109
-rw-r--r--include/linux/mfd/rt5033.h25
-rw-r--r--include/linux/mfd/rz-mtu3.h191
-rw-r--r--include/linux/mfd/samsung/core.h35
-rw-r--r--include/linux/mfd/samsung/irq.h50
-rw-r--r--include/linux/mfd/samsung/s5m8763.h90
-rw-r--r--include/linux/mfd/si476x-core.h2
-rw-r--r--include/linux/mfd/si476x-platform.h2
-rw-r--r--include/linux/mfd/stm32-lptimer.h5
-rw-r--r--include/linux/mfd/stm32-timers.h16
-rw-r--r--include/linux/mfd/stmfx.h2
-rw-r--r--include/linux/mfd/stpmic1.h12
-rw-r--r--include/linux/mfd/sun4i-gpadc.h4
-rw-r--r--include/linux/mfd/sy7636a.h34
-rw-r--r--include/linux/mfd/syscon.h30
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h6
-rw-r--r--include/linux/mfd/syscon/xlnx-vcu.h39
-rw-r--r--include/linux/mfd/t7l66xb.h30
-rw-r--r--include/linux/mfd/tc3589x.h6
-rw-r--r--include/linux/mfd/tc6387xb.h20
-rw-r--r--include/linux/mfd/tc6393xb.h56
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h118
-rw-r--r--include/linux/mfd/tmio.h13
-rw-r--r--include/linux/mfd/tps65010.h11
-rw-r--r--include/linux/mfd/tps65086.h33
-rw-r--r--include/linux/mfd/tps65217.h10
-rw-r--r--include/linux/mfd/tps65218.h12
-rw-r--r--include/linux/mfd/tps65219.h345
-rw-r--r--include/linux/mfd/tps65910.h42
-rw-r--r--include/linux/mfd/tps65912.h12
-rw-r--r--include/linux/mfd/tps6594.h1020
-rw-r--r--include/linux/mfd/tps68470.h11
-rw-r--r--include/linux/mfd/tps80031.h637
-rw-r--r--include/linux/mfd/twl.h65
-rw-r--r--include/linux/mfd/twl6040.h32
-rw-r--r--include/linux/mfd/ucb1x00.h1
-rw-r--r--include/linux/mfd/wcd934x/registers.h57
-rw-r--r--include/linux/mhi.h182
-rw-r--r--include/linux/mhi_ep.h305
-rw-r--r--include/linux/mic_bus.h100
-rw-r--r--include/linux/micrel_phy.h30
-rw-r--r--include/linux/migrate.h189
-rw-r--r--include/linux/migrate_mode.h13
-rw-r--r--include/linux/mii.h87
-rw-r--r--include/linux/mii_timestamper.h4
-rw-r--r--include/linux/min_heap.h44
-rw-r--r--include/linux/minmax.h273
-rw-r--r--include/linux/misc_cgroup.h134
-rw-r--r--include/linux/miscdevice.h12
-rw-r--r--include/linux/mlx4/device.h24
-rw-r--r--include/linux/mlx4/driver.h64
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/accel.h156
-rw-r--r--include/linux/mlx5/cq.h2
-rw-r--r--include/linux/mlx5/device.h309
-rw-r--r--include/linux/mlx5/driver.h550
-rw-r--r--include/linux/mlx5/eq.h4
-rw-r--r--include/linux/mlx5/eswitch.h123
-rw-r--r--include/linux/mlx5/fs.h101
-rw-r--r--include/linux/mlx5/fs_helpers.h48
-rw-r--r--include/linux/mlx5/macsec.h32
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2413
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h235
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h224
-rw-r--r--include/linux/mlx5/mpfs.h18
-rw-r--r--include/linux/mlx5/port.h50
-rw-r--r--include/linux/mlx5/qp.h44
-rw-r--r--include/linux/mlx5/transobj.h1
-rw-r--r--include/linux/mlx5/vport.h11
-rw-r--r--include/linux/mm-arch-hooks.h22
-rw-r--r--include/linux/mm.h2915
-rw-r--r--include/linux/mm_api.h1
-rw-r--r--include/linux/mm_inline.h603
-rw-r--r--include/linux/mm_types.h1116
-rw-r--r--include/linux/mm_types_task.h32
-rw-r--r--include/linux/mman.h66
-rw-r--r--include/linux/mmap_lock.h147
-rw-r--r--include/linux/mmc/card.h45
-rw-r--r--include/linux/mmc/core.h10
-rw-r--r--include/linux/mmc/host.h100
-rw-r--r--include/linux/mmc/mmc.h18
-rw-r--r--include/linux/mmc/sd.h4
-rw-r--r--include/linux/mmc/sdhci-pci-data.h18
-rw-r--r--include/linux/mmc/sdio.h7
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/mmc/sdio_ids.h17
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/mmdebug.h66
-rw-r--r--include/linux/mmu_context.h30
-rw-r--r--include/linux/mmu_notifier.h162
-rw-r--r--include/linux/mmzone.h1137
-rw-r--r--include/linux/mnt_idmapping.h247
-rw-r--r--include/linux/mod_devicetable.h134
-rw-r--r--include/linux/module.h414
-rw-r--r--include/linux/module_symbol.h15
-rw-r--r--include/linux/moduleloader.h36
-rw-r--r--include/linux/moduleparam.h35
-rw-r--r--include/linux/mount.h55
-rw-r--r--include/linux/moxtet.h4
-rw-r--r--include/linux/mpage.h4
-rw-r--r--include/linux/mpi.h194
-rw-r--r--include/linux/mroute.h17
-rw-r--r--include/linux/mroute6.h35
-rw-r--r--include/linux/mroute_base.h17
-rw-r--r--include/linux/msi.h664
-rw-r--r--include/linux/msi_api.h73
-rw-r--r--include/linux/mtd/blktrans.h13
-rw-r--r--include/linux/mtd/cfi.h3
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/mtd/hyperbus.h17
-rw-r--r--include/linux/mtd/jedec.h3
-rw-r--r--include/linux/mtd/latch-addr-flash.h29
-rw-r--r--include/linux/mtd/lpc32xx_mlc.h2
-rw-r--r--include/linux/mtd/lpc32xx_slc.h2
-rw-r--r--include/linux/mtd/mtd.h41
-rw-r--r--include/linux/mtd/nand-ecc-mtk.h47
-rw-r--r--include/linux/mtd/nand-ecc-mxic.h49
-rw-r--r--include/linux/mtd/nand-ecc-sw-bch.h71
-rw-r--r--include/linux/mtd/nand-ecc-sw-hamming.h89
-rw-r--r--include/linux/mtd/nand.h316
-rw-r--r--include/linux/mtd/nand_bch.h66
-rw-r--r--include/linux/mtd/nand_ecc.h39
-rw-r--r--include/linux/mtd/onfi.h42
-rw-r--r--include/linux/mtd/pfow.h33
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/mtd/rawnand.h274
-rw-r--r--include/linux/mtd/sharpsl.h1
-rw-r--r--include/linux/mtd/spi-nor.h117
-rw-r--r--include/linux/mtd/spinand.h40
-rw-r--r--include/linux/mtd/ubi.h1
-rw-r--r--include/linux/mtd/xip.h2
-rw-r--r--include/linux/mutex.h152
-rw-r--r--include/linux/mutex_api.h1
-rw-r--r--include/linux/mutex_types.h71
-rw-r--r--include/linux/mux/consumer.h41
-rw-r--r--include/linux/mux/driver.h4
-rw-r--r--include/linux/mv643xx.h8
-rw-r--r--include/linux/mv643xx_eth.h2
-rw-r--r--include/linux/n_r3964.h175
-rw-r--r--include/linux/namei.h51
-rw-r--r--include/linux/nd.h83
-rw-r--r--include/linux/net.h62
-rw-r--r--include/linux/net/intel/i40e_client.h25
-rw-r--r--include/linux/net/intel/iidc.h107
-rw-r--r--include/linux/net_tstamp.h63
-rw-r--r--include/linux/netdev_features.h23
-rw-r--r--include/linux/netdevice.h2399
-rw-r--r--include/linux/netfilter.h63
-rw-r--r--include/linux/netfilter/ipset/ip_set.h24
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h13
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h113
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h38
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h73
-rw-r--r--include/linux/netfilter/x_tables.h26
-rw-r--r--include/linux/netfilter_arp/arp_tables.h13
-rw-r--r--include/linux/netfilter_bridge.h6
-rw-r--r--include/linux/netfilter_bridge/ebtables.h19
-rw-r--r--include/linux/netfilter_defs.h8
-rw-r--r--include/linux/netfilter_ingress.h58
-rw-r--r--include/linux/netfilter_ipv4.h2
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h17
-rw-r--r--include/linux/netfilter_ipv6.h20
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h18
-rw-r--r--include/linux/netfilter_netdev.h150
-rw-r--r--include/linux/netfs.h514
-rw-r--r--include/linux/netlink.h147
-rw-r--r--include/linux/netpoll.h1
-rw-r--r--include/linux/nfs.h28
-rw-r--r--include/linux/nfs4.h314
-rw-r--r--include/linux/nfs_fs.h250
-rw-r--r--include/linux/nfs_fs_sb.h46
-rw-r--r--include/linux/nfs_iostat.h12
-rw-r--r--include/linux/nfs_page.h93
-rw-r--r--include/linux/nfs_ssc.h81
-rw-r--r--include/linux/nfs_xdr.h89
-rw-r--r--include/linux/nfsacl.h6
-rw-r--r--include/linux/nitro_enclaves.h11
-rw-r--r--include/linux/nl802154.h2
-rw-r--r--include/linux/nls.h2
-rw-r--r--include/linux/nmi.h114
-rw-r--r--include/linux/node.h71
-rw-r--r--include/linux/nodemask.h93
-rw-r--r--include/linux/nodemask_types.h10
-rw-r--r--include/linux/nospec.h4
-rw-r--r--include/linux/notifier.h24
-rw-r--r--include/linux/ns_common.h5
-rw-r--r--include/linux/nsproxy.h9
-rw-r--r--include/linux/nubus.h5
-rw-r--r--include/linux/numa.h44
-rw-r--r--include/linux/nvme-auth.h44
-rw-r--r--include/linux/nvme-fc-driver.h33
-rw-r--r--include/linux/nvme-keyring.h28
-rw-r--r--include/linux/nvme-rdma.h6
-rw-r--r--include/linux/nvme-tcp.h12
-rw-r--r--include/linux/nvme.h494
-rw-r--r--include/linux/nvmem-consumer.h36
-rw-r--r--include/linux/nvmem-provider.h142
-rw-r--r--include/linux/objpool.h181
-rw-r--r--include/linux/objtool.h174
-rw-r--r--include/linux/objtool_types.h57
-rw-r--r--include/linux/of.h751
-rw-r--r--include/linux/of_address.h94
-rw-r--r--include/linux/of_device.h73
-rw-r--r--include/linux/of_fdt.h15
-rw-r--r--include/linux/of_gpio.h121
-rw-r--r--include/linux/of_graph.h4
-rw-r--r--include/linux/of_iommu.h30
-rw-r--r--include/linux/of_irq.h21
-rw-r--r--include/linux/of_mdio.h45
-rw-r--r--include/linux/of_net.h20
-rw-r--r--include/linux/of_platform.h42
-rw-r--r--include/linux/of_reserved_mem.h14
-rw-r--r--include/linux/oid_registry.h56
-rw-r--r--include/linux/olpc-ec.h2
-rw-r--r--include/linux/omap-dma.h35
-rw-r--r--include/linux/omap-gpmc.h3
-rw-r--r--include/linux/once.h38
-rw-r--r--include/linux/once_lite.h36
-rw-r--r--include/linux/oom.h16
-rw-r--r--include/linux/oprofile.h209
-rw-r--r--include/linux/osq_lock.h5
-rw-r--r--include/linux/overflow.h516
-rw-r--r--include/linux/packing.h2
-rw-r--r--include/linux/padata.h17
-rw-r--r--include/linux/page-flags-layout.h68
-rw-r--r--include/linux/page-flags.h751
-rw-r--r--include/linux/page-isolation.h25
-rw-r--r--include/linux/page_counter.h25
-rw-r--r--include/linux/page_ext.h61
-rw-r--r--include/linux/page_idle.h123
-rw-r--r--include/linux/page_owner.h38
-rw-r--r--include/linux/page_ref.h204
-rw-r--r--include/linux/page_reporting.h3
-rw-r--r--include/linux/page_table_check.h149
-rw-r--r--include/linux/pageblock-flags.h15
-rw-r--r--include/linux/pagemap.h1331
-rw-r--r--include/linux/pagevec.h114
-rw-r--r--include/linux/pagewalk.h41
-rw-r--r--include/linux/panic.h97
-rw-r--r--include/linux/panic_notifier.h12
-rw-r--r--include/linux/parport.h50
-rw-r--r--include/linux/parport_pc.h3
-rw-r--r--include/linux/parser.h1
-rw-r--r--include/linux/part_stat.h48
-rw-r--r--include/linux/pci-acpi.h11
-rw-r--r--include/linux/pci-dma-compat.h129
-rw-r--r--include/linux/pci-doe.h25
-rw-r--r--include/linux/pci-ecam.h33
-rw-r--r--include/linux/pci-ep-cfs.h6
-rw-r--r--include/linux/pci-epc.h155
-rw-r--r--include/linux/pci-epf.h81
-rw-r--r--include/linux/pci-p2pdma.h27
-rw-r--r--include/linux/pci.h656
-rw-r--r--include/linux/pci_hotplug.h4
-rw-r--r--include/linux/pci_ids.h268
-rw-r--r--include/linux/pcs-lynx.h17
-rw-r--r--include/linux/pcs-rzn1-miic.h18
-rw-r--r--include/linux/pcs/pcs-mtk-lynxi.h13
-rw-r--r--include/linux/pcs/pcs-xpcs.h51
-rw-r--r--include/linux/pda_power.h39
-rw-r--r--include/linux/pds/pds_adminq.h1269
-rw-r--r--include/linux/pds/pds_auxbus.h20
-rw-r--r--include/linux/pds/pds_common.h52
-rw-r--r--include/linux/pds/pds_core_if.h572
-rw-r--r--include/linux/pds/pds_intr.h163
-rw-r--r--include/linux/pe.h39
-rw-r--r--include/linux/peci-cpu.h40
-rw-r--r--include/linux/peci.h112
-rw-r--r--include/linux/percpu-defs.h51
-rw-r--r--include/linux/percpu-refcount.h87
-rw-r--r--include/linux/percpu-rwsem.h14
-rw-r--r--include/linux/percpu.h46
-rw-r--r--include/linux/percpu_counter.h110
-rw-r--r--include/linux/perf/arm_pmu.h51
-rw-r--r--include/linux/perf/arm_pmuv3.h312
-rw-r--r--include/linux/perf/riscv_pmu.h88
-rw-r--r--include/linux/perf_event.h646
-rw-r--r--include/linux/perf_event_api.h1
-rw-r--r--include/linux/perf_regs.h6
-rw-r--r--include/linux/pgtable.h842
-rw-r--r--include/linux/pgtable_api.h1
-rw-r--r--include/linux/phy.h1070
-rw-r--r--include/linux/phy/pcie.h12
-rw-r--r--include/linux/phy/phy-lvds.h32
-rw-r--r--include/linux/phy/phy-mipi-dphy.h3
-rw-r--r--include/linux/phy/phy.h85
-rw-r--r--include/linux/phy/tegra/xusb.h13
-rw-r--r--include/linux/phy_fixed.h3
-rw-r--r--include/linux/phylib_stubs.h68
-rw-r--r--include/linux/phylink.h307
-rw-r--r--include/linux/pid.h154
-rw-r--r--include/linux/pid_namespace.h41
-rw-r--r--include/linux/pid_types.h16
-rw-r--r--include/linux/pidfs.h8
-rw-r--r--include/linux/pinctrl/consumer.h88
-rw-r--r--include/linux/pinctrl/devinfo.h21
-rw-r--r--include/linux/pinctrl/machine.h10
-rw-r--r--include/linux/pinctrl/pinconf-generic.h68
-rw-r--r--include/linux/pinctrl/pinconf.h16
-rw-r--r--include/linux/pinctrl/pinctrl.h84
-rw-r--r--include/linux/pinctrl/pinmux.h27
-rw-r--r--include/linux/pipe_fs_i.h78
-rw-r--r--include/linux/pkeys.h6
-rw-r--r--include/linux/pktcdvd.h17
-rw-r--r--include/linux/pl353-smc.h30
-rw-r--r--include/linux/platform_data/ad5755.h102
-rw-r--r--include/linux/platform_data/ad7291.h13
-rw-r--r--include/linux/platform_data/ad7298.h19
-rw-r--r--include/linux/platform_data/ad7303.h20
-rw-r--r--include/linux/platform_data/ad7793.h2
-rw-r--r--include/linux/platform_data/ad7887.h4
-rw-r--r--include/linux/platform_data/adau1977.h44
-rw-r--r--include/linux/platform_data/adp5588.h171
-rw-r--r--include/linux/platform_data/amd_xdma.h34
-rw-r--r--include/linux/platform_data/asoc-mx27vis.h12
-rw-r--r--include/linux/platform_data/asoc-palm27x.h9
-rw-r--r--include/linux/platform_data/asoc-pxa.h32
-rw-r--r--include/linux/platform_data/asoc-s3c24xx_simtec.h30
-rw-r--r--include/linux/platform_data/asoc-ux500-msp.h20
-rw-r--r--include/linux/platform_data/at91_adc.h49
-rw-r--r--include/linux/platform_data/ata-samsung_cf.h31
-rw-r--r--include/linux/platform_data/atmel.h12
-rw-r--r--include/linux/platform_data/bcm7038_wdt.h8
-rw-r--r--include/linux/platform_data/bd6107.h2
-rw-r--r--include/linux/platform_data/brcmfmac.h6
-rw-r--r--include/linux/platform_data/brcmnand.h12
-rw-r--r--include/linux/platform_data/clk-fch.h2
-rw-r--r--include/linux/platform_data/clk-u300.h1
-rw-r--r--include/linux/platform_data/cros_ec_commands.h593
-rw-r--r--include/linux/platform_data/cros_ec_proto.h42
-rw-r--r--include/linux/platform_data/davinci-cpufreq.h6
-rw-r--r--include/linux/platform_data/davinci_asp.h11
-rw-r--r--include/linux/platform_data/dma-atmel.h61
-rw-r--r--include/linux/platform_data/dma-coh901318.h72
-rw-r--r--include/linux/platform_data/dma-dw.h23
-rw-r--r--include/linux/platform_data/dma-hsu.h2
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h71
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h36
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h48
-rw-r--r--include/linux/platform_data/dma-ste-dma40.h209
-rw-r--r--include/linux/platform_data/efm32-spi.h15
-rw-r--r--include/linux/platform_data/efm32-uart.h19
-rw-r--r--include/linux/platform_data/emc2305.h22
-rw-r--r--include/linux/platform_data/eth_ixp4xx.h19
-rw-r--r--include/linux/platform_data/gpio-davinci.h10
-rw-r--r--include/linux/platform_data/gpio-dwapb.h23
-rw-r--r--include/linux/platform_data/gpio-omap.h6
-rw-r--r--include/linux/platform_data/gpio/gpio-amd-fch.h2
-rw-r--r--include/linux/platform_data/gpio_backlight.h2
-rw-r--r--include/linux/platform_data/gpmc-omap.h8
-rw-r--r--include/linux/platform_data/gsc_hwmon.h6
-rw-r--r--include/linux/platform_data/hirschmann-hellcreek.h24
-rw-r--r--include/linux/platform_data/i2c-designware.h13
-rw-r--r--include/linux/platform_data/i2c-gpio.h9
-rw-r--r--include/linux/platform_data/i2c-hid.h41
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h2
-rw-r--r--include/linux/platform_data/invensense_mpu6050.h2
-rw-r--r--include/linux/platform_data/irda-pxaficp.h26
-rw-r--r--include/linux/platform_data/irda-sa11x0.h17
-rw-r--r--include/linux/platform_data/jz4740/jz4740_nand.h25
-rw-r--r--include/linux/platform_data/keyboard-pxa930_rotary.h21
-rw-r--r--include/linux/platform_data/keypad-omap.h3
-rw-r--r--include/linux/platform_data/lcd-mipid.h2
-rw-r--r--include/linux/platform_data/leds-lp55xx.h3
-rw-r--r--include/linux/platform_data/leds-omap.h19
-rw-r--r--include/linux/platform_data/leds-pca963x.h35
-rw-r--r--include/linux/platform_data/leds-s3c24xx.h18
-rw-r--r--include/linux/platform_data/lv5207lp.h2
-rw-r--r--include/linux/platform_data/macb.h20
-rw-r--r--include/linux/platform_data/max732x.h12
-rw-r--r--include/linux/platform_data/mdio-bcm-unimac.h3
-rw-r--r--include/linux/platform_data/media/camera-mx2.h31
-rw-r--r--include/linux/platform_data/media/camera-mx3.h43
-rw-r--r--include/linux/platform_data/media/coda.h14
-rw-r--r--include/linux/platform_data/media/s5p_hdmi.h32
-rw-r--r--include/linux/platform_data/microchip-ksz.h24
-rw-r--r--include/linux/platform_data/mlxcpld.h31
-rw-r--r--include/linux/platform_data/mlxreg.h122
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h42
-rw-r--r--include/linux/platform_data/mmc-omap.h5
-rw-r--r--include/linux/platform_data/mmc-s3cmci.h49
-rw-r--r--include/linux/platform_data/mmp_audio.h18
-rw-r--r--include/linux/platform_data/mouse-pxa930_trkball.h11
-rw-r--r--include/linux/platform_data/mtd-davinci.h9
-rw-r--r--include/linux/platform_data/mtd-mxc_nand.h19
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h10
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h2
-rw-r--r--include/linux/platform_data/net-cw1200.h4
-rw-r--r--include/linux/platform_data/nfcmrvl.h48
-rw-r--r--include/linux/platform_data/ntc_thermistor.h50
-rw-r--r--include/linux/platform_data/omap-twl4030.h3
-rw-r--r--include/linux/platform_data/pca953x.h13
-rw-r--r--include/linux/platform_data/pcf857x.h45
-rw-r--r--include/linux/platform_data/pcmcia-pxa2xx_viper.h12
-rw-r--r--include/linux/platform_data/pm33xx.h3
-rw-r--r--include/linux/platform_data/pxa2xx_udc.h6
-rw-r--r--include/linux/platform_data/rtc-ds2404.h20
-rw-r--r--include/linux/platform_data/rtc-v3020.h41
-rw-r--r--include/linux/platform_data/s3c-hsudc.h31
-rw-r--r--include/linux/platform_data/serial-imx.h15
-rw-r--r--include/linux/platform_data/sh_mmcif.h (renamed from include/linux/mmc/sh_mmcif.h)2
-rw-r--r--include/linux/platform_data/shmob_drm.h59
-rw-r--r--include/linux/platform_data/sht3x.h15
-rw-r--r--include/linux/platform_data/si5351.h2
-rw-r--r--include/linux/platform_data/simplefb.h2
-rw-r--r--include/linux/platform_data/spi-ath79.h16
-rw-r--r--include/linux/platform_data/spi-clps711x.h17
-rw-r--r--include/linux/platform_data/spi-mt65xx.h1
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h16
-rw-r--r--include/linux/platform_data/ssm2518.h21
-rw-r--r--include/linux/platform_data/st33zp24.h16
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h3
-rw-r--r--include/linux/platform_data/ti-sysc.h6
-rw-r--r--include/linux/platform_data/timer-ixp4xx.h11
-rw-r--r--include/linux/platform_data/tps68470.h40
-rw-r--r--include/linux/platform_data/tsl2563.h9
-rw-r--r--include/linux/platform_data/uio_dmem_genirq.h10
-rw-r--r--include/linux/platform_data/uio_pruss.h10
-rw-r--r--include/linux/platform_data/usb-ehci-mxc.h14
-rw-r--r--include/linux/platform_data/usb-mx2.h29
-rw-r--r--include/linux/platform_data/usb-omap.h16
-rw-r--r--include/linux/platform_data/usb-omap1.h4
-rw-r--r--include/linux/platform_data/usb-pxa3xx-ulpi.h32
-rw-r--r--include/linux/platform_data/usb-s3c2410_udc.h39
-rw-r--r--include/linux/platform_data/usb3503.h1
-rw-r--r--include/linux/platform_data/ux500_wdt.h18
-rw-r--r--include/linux/platform_data/video-imxfb.h70
-rw-r--r--include/linux/platform_data/video-mx3fb.h50
-rw-r--r--include/linux/platform_data/video-pxafb.h22
-rw-r--r--include/linux/platform_data/voltage-omap.h1
-rw-r--r--include/linux/platform_data/wan_ixp4xx_hss.h17
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h46
-rw-r--r--include/linux/platform_data/x86/clk-lpss.h2
-rw-r--r--include/linux/platform_data/x86/mlxcpld.h52
-rw-r--r--include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h76
-rw-r--r--include/linux/platform_data/x86/p2sb.h28
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h38
-rw-r--r--include/linux/platform_data/x86/pwm-lpss.h33
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h31
-rw-r--r--include/linux/platform_data/x86/simatic-ipc.h79
-rw-r--r--include/linux/platform_data/x86/soc.h70
-rw-r--r--include/linux/platform_data/x86/spi-intel.h (renamed from include/linux/platform_data/intel-spi.h)12
-rw-r--r--include/linux/platform_device.h72
-rw-r--r--include/linux/platform_profile.h41
-rw-r--r--include/linux/plist.h13
-rw-r--r--include/linux/plist_types.h17
-rw-r--r--include/linux/pm.h197
-rw-r--r--include/linux/pm2301_charger.h48
-rw-r--r--include/linux/pm_clock.h9
-rw-r--r--include/linux/pm_domain.h175
-rw-r--r--include/linux/pm_opp.h500
-rw-r--r--include/linux/pm_runtime.h110
-rw-r--r--include/linux/pm_wakeirq.h29
-rw-r--r--include/linux/pm_wakeup.h49
-rw-r--r--include/linux/pmbus.h39
-rw-r--r--include/linux/pmu.h2
-rw-r--r--include/linux/pnfs_osd_xdr.h317
-rw-r--r--include/linux/pnp.h10
-rw-r--r--include/linux/poison.h21
-rw-r--r--include/linux/poll.h6
-rw-r--r--include/linux/polynomial.h35
-rw-r--r--include/linux/posix-clock.h35
-rw-r--r--include/linux/posix-timers.h98
-rw-r--r--include/linux/posix-timers_types.h80
-rw-r--r--include/linux/posix_acl.h61
-rw-r--r--include/linux/posix_acl_xattr.h40
-rw-r--r--include/linux/power/ab8500.h16
-rw-r--r--include/linux/power/bq25890_charger.h15
-rw-r--r--include/linux/power/bq27xxx_battery.h9
-rw-r--r--include/linux/power/charger-manager.h41
-rw-r--r--include/linux/power/generic-adc-battery.h27
-rw-r--r--include/linux/power/gpio-charger.h6
-rw-r--r--include/linux/power/max17042_battery.h16
-rw-r--r--include/linux/power/max8903_charger.h43
-rw-r--r--include/linux/power/power_on_reason.h19
-rw-r--r--include/linux/power/smartreflex.h5
-rw-r--r--include/linux/power/smb347-charger.h114
-rw-r--r--include/linux/power_supply.h524
-rw-r--r--include/linux/powercap.h11
-rw-r--r--include/linux/ppp-comp.h2
-rw-r--r--include/linux/ppp_channel.h5
-rw-r--r--include/linux/ppp_defs.h14
-rw-r--r--include/linux/pps-gpio.h19
-rw-r--r--include/linux/pr.h25
-rw-r--r--include/linux/prandom.h29
-rw-r--r--include/linux/preempt.h201
-rw-r--r--include/linux/prefetch.h15
-rw-r--r--include/linux/printk.h265
-rw-r--r--include/linux/prmt.h7
-rw-r--r--include/linux/proc_fs.h41
-rw-r--r--include/linux/proc_ns.h3
-rw-r--r--include/linux/profile.h48
-rw-r--r--include/linux/property.h281
-rw-r--r--include/linux/pruss_driver.h177
-rw-r--r--include/linux/psci.h11
-rw-r--r--include/linux/pse-pd/pse.h129
-rw-r--r--include/linux/pseudo_fs.h2
-rw-r--r--include/linux/psi.h29
-rw-r--r--include/linux/psi_types.h90
-rw-r--r--include/linux/psp-platform-access.h69
-rw-r--r--include/linux/psp-sev.h385
-rw-r--r--include/linux/psp.h29
-rw-r--r--include/linux/pstore.h10
-rw-r--r--include/linux/pstore_blk.h69
-rw-r--r--include/linux/pstore_ram.h99
-rw-r--r--include/linux/ptdump.h10
-rw-r--r--include/linux/pti.h2
-rw-r--r--include/linux/ptp_classify.h175
-rw-r--r--include/linux/ptp_clock_kernel.h196
-rw-r--r--include/linux/ptp_kvm.h22
-rw-r--r--include/linux/ptp_mock.h38
-rw-r--r--include/linux/ptp_pch.h26
-rw-r--r--include/linux/ptrace.h125
-rw-r--r--include/linux/ptrace_api.h1
-rw-r--r--include/linux/purgatory.h2
-rw-r--r--include/linux/pwm.h250
-rw-r--r--include/linux/pwm_backlight.h1
-rw-r--r--include/linux/pxa2xx_ssp.h215
-rw-r--r--include/linux/qcom_scm.h167
-rw-r--r--include/linux/qed/common_hsi.h143
-rw-r--r--include/linux/qed/eth_common.h1
-rw-r--r--include/linux/qed/fcoe_common.h362
-rw-r--r--include/linux/qed/iscsi_common.h360
-rw-r--r--include/linux/qed/nvmetcp_common.h531
-rw-r--r--include/linux/qed/qed_chain.h103
-rw-r--r--include/linux/qed/qed_eth_if.h23
-rw-r--r--include/linux/qed/qed_fcoe_if.h7
-rw-r--r--include/linux/qed/qed_if.h380
-rw-r--r--include/linux/qed/qed_iscsi_if.h6
-rw-r--r--include/linux/qed/qed_ll2_if.h45
-rw-r--r--include/linux/qed/qed_nvmetcp_if.h257
-rw-r--r--include/linux/qed/qed_rdma_if.h5
-rw-r--r--include/linux/qed/qede_rdma.h4
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/quota.h21
-rw-r--r--include/linux/quotaops.h35
-rw-r--r--include/linux/radix-tree.h7
-rw-r--r--include/linux/raid/pq.h16
-rw-r--r--include/linux/raid/xor.h21
-rw-r--r--include/linux/raid_class.h6
-rw-r--r--include/linux/ramfs.h1
-rw-r--r--include/linux/random.h197
-rw-r--r--include/linux/randomize_kstack.h92
-rw-r--r--include/linux/range.h19
-rw-r--r--include/linux/ras.h18
-rw-r--r--include/linux/ratelimit_types.h14
-rw-r--r--include/linux/rbtree.h239
-rw-r--r--include/linux/rbtree_augmented.h30
-rw-r--r--include/linux/rbtree_latch.h6
-rw-r--r--include/linux/rbtree_types.h34
-rw-r--r--include/linux/rcu_node_tree.h2
-rw-r--r--include/linux/rcu_notifier.h32
-rw-r--r--include/linux/rcu_segcblist.h144
-rw-r--r--include/linux/rcu_sync.h1
-rw-r--r--include/linux/rculist.h83
-rw-r--r--include/linux/rculist_nulls.h8
-rw-r--r--include/linux/rcupdate.h426
-rw-r--r--include/linux/rcupdate_trace.h23
-rw-r--r--include/linux/rcupdate_wait.h15
-rw-r--r--include/linux/rcuref.h155
-rw-r--r--include/linux/rcutiny.h100
-rw-r--r--include/linux/rcutree.h82
-rw-r--r--include/linux/rcuwait.h29
-rw-r--r--include/linux/rcuwait_api.h1
-rw-r--r--include/linux/reboot.h117
-rw-r--r--include/linux/ref_tracker.h100
-rw-r--r--include/linux/refcount.h127
-rw-r--r--include/linux/refcount_api.h1
-rw-r--r--include/linux/refcount_types.h19
-rw-r--r--include/linux/regmap.h363
-rw-r--r--include/linux/regset.h27
-rw-r--r--include/linux/regulator/ab8500.h166
-rw-r--r--include/linux/regulator/consumer.h139
-rw-r--r--include/linux/regulator/coupler.h5
-rw-r--r--include/linux/regulator/da9121.h36
-rw-r--r--include/linux/regulator/db8500-prcmu.h6
-rw-r--r--include/linux/regulator/driver.h277
-rw-r--r--include/linux/regulator/gpio-regulator.h2
-rw-r--r--include/linux/regulator/lp872x.h17
-rw-r--r--include/linux/regulator/machine.h44
-rw-r--r--include/linux/regulator/max8973-regulator.h6
-rw-r--r--include/linux/regulator/mt6315-regulator.h44
-rw-r--r--include/linux/regulator/mt6331-regulator.h46
-rw-r--r--include/linux/regulator/mt6332-regulator.h27
-rw-r--r--include/linux/regulator/mt6357-regulator.h51
-rw-r--r--include/linux/regulator/mt6358-regulator.h46
-rw-r--r--include/linux/regulator/mt6359-regulator.h59
-rw-r--r--include/linux/regulator/pca9450.h21
-rw-r--r--include/linux/regulator/pfuze100.h6
-rw-r--r--include/linux/regulator/tps62360.h6
-rw-r--r--include/linux/regulator/userspace-consumer.h1
-rw-r--r--include/linux/relay.h29
-rw-r--r--include/linux/remoteproc.h137
-rw-r--r--include/linux/remoteproc/mtk_scp.h2
-rw-r--r--include/linux/remoteproc/pruss.h83
-rw-r--r--include/linux/remoteproc/qcom_rproc.h4
-rw-r--r--include/linux/resctrl.h293
-rw-r--r--include/linux/reset-controller.h26
-rw-r--r--include/linux/reset.h360
-rw-r--r--include/linux/reset/bcm63xx_pmb.h10
-rw-r--r--include/linux/resource.h2
-rw-r--r--include/linux/restart_block.h3
-rw-r--r--include/linux/resume_user_mode.h65
-rw-r--r--include/linux/rethook.h98
-rw-r--r--include/linux/rfkill.h36
-rw-r--r--include/linux/rhashtable-types.h2
-rw-r--r--include/linux/rhashtable.h61
-rw-r--r--include/linux/ring_buffer.h34
-rw-r--r--include/linux/rio_drv.h3
-rw-r--r--include/linux/rio_ids.h13
-rw-r--r--include/linux/rmap.h574
-rw-r--r--include/linux/rmi.h11
-rw-r--r--include/linux/root_dev.h9
-rw-r--r--include/linux/rpmsg.h107
-rw-r--r--include/linux/rpmsg/byteorder.h67
-rw-r--r--include/linux/rpmsg/ns.h45
-rw-r--r--include/linux/rpmsg/qcom_glink.h20
-rw-r--r--include/linux/rpmsg/qcom_smd.h5
-rw-r--r--include/linux/rseq.h131
-rw-r--r--include/linux/rslib.h1
-rw-r--r--include/linux/rtc.h106
-rw-r--r--include/linux/rtc/ds1685.h1
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h21
-rw-r--r--include/linux/rtmutex.h107
-rw-r--r--include/linux/rtnetlink.h68
-rw-r--r--include/linux/rtsx_pci.h46
-rw-r--r--include/linux/rtsx_usb.h2
-rw-r--r--include/linux/rv.h70
-rw-r--r--include/linux/rw_hint.h24
-rw-r--r--include/linux/rwbase_rt.h44
-rw-r--r--include/linux/rwlock.h30
-rw-r--r--include/linux/rwlock_api_smp.h14
-rw-r--r--include/linux/rwlock_rt.h150
-rw-r--r--include/linux/rwlock_types.h53
-rw-r--r--include/linux/rwsem.h130
-rw-r--r--include/linux/s3c_adc_battery.h42
-rw-r--r--include/linux/sbitmap.h206
-rw-r--r--include/linux/scatterlist.h250
-rw-r--r--include/linux/sched.h1035
-rw-r--r--include/linux/sched/affinity.h1
-rw-r--r--include/linux/sched/clock.h25
-rw-r--r--include/linux/sched/cond_resched.h1
-rw-r--r--include/linux/sched/coredump.h35
-rw-r--r--include/linux/sched/cpufreq.h7
-rw-r--r--include/linux/sched/cputime.h14
-rw-r--r--include/linux/sched/deadline.h6
-rw-r--r--include/linux/sched/debug.h4
-rw-r--r--include/linux/sched/hotplug.h2
-rw-r--r--include/linux/sched/idle.h44
-rw-r--r--include/linux/sched/isolation.h57
-rw-r--r--include/linux/sched/jobctl.h12
-rw-r--r--include/linux/sched/mm.h336
-rw-r--r--include/linux/sched/numa_balancing.h16
-rw-r--r--include/linux/sched/posix-timers.h1
-rw-r--r--include/linux/sched/prio.h18
-rw-r--r--include/linux/sched/rseq_api.h1
-rw-r--r--include/linux/sched/rt.h12
-rw-r--r--include/linux/sched/sd_flags.h170
-rw-r--r--include/linux/sched/signal.h211
-rw-r--r--include/linux/sched/smt.h2
-rw-r--r--include/linux/sched/stat.h16
-rw-r--r--include/linux/sched/sysctl.h85
-rw-r--r--include/linux/sched/task.h79
-rw-r--r--include/linux/sched/task_flags.h1
-rw-r--r--include/linux/sched/task_stack.h11
-rw-r--r--include/linux/sched/thread_info_api.h1
-rw-r--r--include/linux/sched/topology.h90
-rw-r--r--include/linux/sched/types.h2
-rw-r--r--include/linux/sched/user.h16
-rw-r--r--include/linux/sched/vhost_task.h13
-rw-r--r--include/linux/sched/wake_q.h7
-rw-r--r--include/linux/sched_clock.h4
-rw-r--r--include/linux/scif.h1339
-rw-r--r--include/linux/scmi_protocol.h809
-rw-r--r--include/linux/scpi_protocol.h8
-rw-r--r--include/linux/screen_info.h126
-rw-r--r--include/linux/scs.h34
-rw-r--r--include/linux/sctp.h50
-rw-r--r--include/linux/sdb.h160
-rw-r--r--include/linux/sdla.h240
-rw-r--r--include/linux/seccomp.h35
-rw-r--r--include/linux/seccomp_types.h35
-rw-r--r--include/linux/secretmem.h53
-rw-r--r--include/linux/security.h507
-rw-r--r--include/linux/sed-opal-key.h26
-rw-r--r--include/linux/sed-opal.h11
-rw-r--r--include/linux/selection.h48
-rw-r--r--include/linux/sem.h10
-rw-r--r--include/linux/sem_types.h13
-rw-r--r--include/linux/semaphore.h10
-rw-r--r--include/linux/seq_buf.h59
-rw-r--r--include/linux/seq_file.h61
-rw-r--r--include/linux/seq_file_net.h4
-rw-r--r--include/linux/seqlock.h516
-rw-r--r--include/linux/seqlock_api.h1
-rw-r--r--include/linux/seqlock_types.h93
-rw-r--r--include/linux/seqno-fence.h109
-rw-r--r--include/linux/serdev.h67
-rw-r--r--include/linux/serial.h27
-rw-r--r--include/linux/serial_8250.h111
-rw-r--r--include/linux/serial_core.h737
-rw-r--r--include/linux/serial_pnx8xxx.h67
-rw-r--r--include/linux/serial_s3c.h23
-rw-r--r--include/linux/serio.h2
-rw-r--r--include/linux/set_memory.h37
-rw-r--r--include/linux/sfi.h210
-rw-r--r--include/linux/sfi_acpi.h93
-rw-r--r--include/linux/sfp.h223
-rw-r--r--include/linux/sh_intc.h11
-rw-r--r--include/linux/shm.h4
-rw-r--r--include/linux/shmem_fs.h120
-rw-r--r--include/linux/shrinker.h87
-rw-r--r--include/linux/signal.h31
-rw-r--r--include/linux/signal_types.h21
-rw-r--r--include/linux/siox.h2
-rw-r--r--include/linux/siphash.h49
-rw-r--r--include/linux/sirfsoc_dma.h7
-rw-r--r--include/linux/sizes.h14
-rw-r--r--include/linux/skbuff.h1474
-rw-r--r--include/linux/skmsg.h272
-rw-r--r--include/linux/slab.h620
-rw-r--r--include/linux/slab_def.h120
-rw-r--r--include/linux/slimbus.h2
-rw-r--r--include/linux/slub_def.h196
-rw-r--r--include/linux/smc911x.h14
-rw-r--r--include/linux/smp.h101
-rw-r--r--include/linux/smp_types.h3
-rw-r--r--include/linux/smscphy.h44
-rw-r--r--include/linux/soc/andes/irq.h18
-rw-r--r--include/linux/soc/apple/rtkit.h175
-rw-r--r--include/linux/soc/apple/sart.h53
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h16
-rw-r--r--include/linux/soc/ixp4xx/cpu.h120
-rw-r--r--include/linux/soc/ixp4xx/npe.h2
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h57
-rw-r--r--include/linux/soc/mediatek/infracfg.h413
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h232
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h95
-rw-r--r--include/linux/soc/mediatek/mtk-mutex.h90
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h3
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h332
-rw-r--r--include/linux/soc/mmp/cputype.h24
-rw-r--r--include/linux/soc/pxa/cpu.h252
-rw-r--r--include/linux/soc/pxa/mfp.h470
-rw-r--r--include/linux/soc/pxa/smemc.h29
-rw-r--r--include/linux/soc/qcom/apr.h76
-rw-r--r--include/linux/soc/qcom/geni-se.h (renamed from include/linux/qcom-geni-se.h)91
-rw-r--r--include/linux/soc/qcom/irq.h2
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h79
-rw-r--r--include/linux/soc/qcom/mdt_loader.h50
-rw-r--r--include/linux/soc/qcom/pmic_glink.h32
-rw-r--r--include/linux/soc/qcom/qcom-pbs.h30
-rw-r--r--include/linux/soc/qcom/qcom_aoss.h38
-rw-r--r--include/linux/soc/qcom/qmi.h24
-rw-r--r--include/linux/soc/qcom/smd-rpm.h30
-rw-r--r--include/linux/soc/qcom/smem.h3
-rw-r--r--include/linux/soc/qcom/smem_state.h8
-rw-r--r--include/linux/soc/qcom/socinfo.h77
-rw-r--r--include/linux/soc/renesas/r9a06g032-sysctrl.h11
-rw-r--r--include/linux/soc/renesas/rcar-rst.h2
-rw-r--r--include/linux/soc/samsung/exynos-chipid.h6
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h11
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h6
-rw-r--r--include/linux/soc/samsung/s3c-pm.h36
-rw-r--r--include/linux/soc/sunxi/sunxi_sram.h2
-rw-r--r--include/linux/soc/ti/k3-ringacc.h22
-rw-r--r--include/linux/soc/ti/knav_dma.h10
-rw-r--r--include/linux/soc/ti/knav_qmss.h10
-rw-r--r--include/linux/soc/ti/omap1-io.h143
-rw-r--r--include/linux/soc/ti/omap1-mux.h311
-rw-r--r--include/linux/soc/ti/omap1-soc.h163
-rw-r--r--include/linux/soc/ti/omap1-usb.h116
-rw-r--r--include/linux/soc/ti/ti-msgmgr.h18
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h2
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h87
-rw-r--r--include/linux/sock_diag.h24
-rw-r--r--include/linux/socket.h59
-rw-r--r--include/linux/sockptr.h36
-rw-r--r--include/linux/softirq.h1
-rw-r--r--include/linux/sony-laptop.h2
-rw-r--r--include/linux/sort.h2
-rw-r--r--include/linux/soundwire/sdw.h270
-rw-r--r--include/linux/soundwire/sdw_amd.h168
-rw-r--r--include/linux/soundwire/sdw_intel.h268
-rw-r--r--include/linux/soundwire/sdw_registers.h50
-rw-r--r--include/linux/soundwire/sdw_type.h8
-rw-r--r--include/linux/spi/ads7846.h17
-rw-r--r--include/linux/spi/altera.h23
-rw-r--r--include/linux/spi/at86rf230.h20
-rw-r--r--include/linux/spi/cc2520.h21
-rw-r--r--include/linux/spi/corgi_lcd.h2
-rw-r--r--include/linux/spi/eeprom.h2
-rw-r--r--include/linux/spi/ifx_modem.h15
-rw-r--r--include/linux/spi/lms283gf05.h16
-rw-r--r--include/linux/spi/max7301.h4
-rw-r--r--include/linux/spi/mmc_spi.h9
-rw-r--r--include/linux/spi/pxa2xx_spi.h32
-rw-r--r--include/linux/spi/s3c24xx.h25
-rw-r--r--include/linux/spi/sh_msiof.h4
-rw-r--r--include/linux/spi/spi-mem.h41
-rw-r--r--include/linux/spi/spi.h758
-rw-r--r--include/linux/spi/spi_bitbang.h2
-rw-r--r--include/linux/spi/spi_gpio.h4
-rw-r--r--include/linux/spi/xilinx_spi.h1
-rw-r--r--include/linux/spinlock.h147
-rw-r--r--include/linux/spinlock_api.h1
-rw-r--r--include/linux/spinlock_api_smp.h14
-rw-r--r--include/linux/spinlock_api_up.h3
-rw-r--r--include/linux/spinlock_rt.h159
-rw-r--r--include/linux/spinlock_types.h89
-rw-r--r--include/linux/spinlock_types_raw.h73
-rw-r--r--include/linux/spinlock_types_up.h2
-rw-r--r--include/linux/spinlock_up.h3
-rw-r--r--include/linux/splice.h47
-rw-r--r--include/linux/spmi.h7
-rw-r--r--include/linux/sprintf.h27
-rw-r--r--include/linux/sram.h14
-rw-r--r--include/linux/srcu.h160
-rw-r--r--include/linux/srcutiny.h27
-rw-r--r--include/linux/srcutree.h111
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/linux/ssb/ssb_driver_extif.h2
-rw-r--r--include/linux/ssb/ssb_driver_gige.h16
-rw-r--r--include/linux/stackdepot.h240
-rw-r--r--include/linux/stackleak.h64
-rw-r--r--include/linux/stackprotector.h19
-rw-r--r--include/linux/stacktrace.h58
-rw-r--r--include/linux/start_kernel.h4
-rw-r--r--include/linux/stat.h17
-rw-r--r--include/linux/statfs.h14
-rw-r--r--include/linux/static_call.h346
-rw-r--r--include/linux/static_call_types.h103
-rw-r--r--include/linux/stdarg.h11
-rw-r--r--include/linux/stddef.h71
-rw-r--r--include/linux/stm.h2
-rw-r--r--include/linux/stmmac.h128
-rw-r--r--include/linux/stop_machine.h32
-rw-r--r--include/linux/string.h488
-rw-r--r--include/linux/string_choices.h56
-rw-r--r--include/linux/string_helpers.h60
-rw-r--r--include/linux/stringify.h2
-rw-r--r--include/linux/sungem_phy.h2
-rw-r--r--include/linux/sunrpc/auth.h3
-rw-r--r--include/linux/sunrpc/bc_xprt.h20
-rw-r--r--include/linux/sunrpc/cache.h39
-rw-r--r--include/linux/sunrpc/clnt.h31
-rw-r--r--include/linux/sunrpc/gss_krb5.h207
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h44
-rw-r--r--include/linux/sunrpc/msg_prot.h11
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h5
-rw-r--r--include/linux/sunrpc/sched.h56
-rw-r--r--include/linux/sunrpc/stats.h23
-rw-r--r--include/linux/sunrpc/svc.h396
-rw-r--r--include/linux/sunrpc/svc_rdma.h179
-rw-r--r--include/linux/sunrpc/svc_rdma_pcl.h128
-rw-r--r--include/linux/sunrpc/svc_xprt.h59
-rw-r--r--include/linux/sunrpc/svcauth.h60
-rw-r--r--include/linux/sunrpc/svcsock.h18
-rw-r--r--include/linux/sunrpc/xdr.h220
-rw-r--r--include/linux/sunrpc/xprt.h83
-rw-r--r--include/linux/sunrpc/xprtmultipath.h14
-rw-r--r--include/linux/sunrpc/xprtsock.h5
-rw-r--r--include/linux/sunxi-rsb.h2
-rw-r--r--include/linux/superhyway.h107
-rw-r--r--include/linux/surface_acpi_notify.h39
-rw-r--r--include/linux/surface_aggregator/controller.h994
-rw-r--r--include/linux/surface_aggregator/device.h632
-rw-r--r--include/linux/surface_aggregator/serial_hub.h691
-rw-r--r--include/linux/suspend.h170
-rw-r--r--include/linux/swab.h25
-rw-r--r--include/linux/swait.h2
-rw-r--r--include/linux/swait_api.h1
-rw-r--r--include/linux/swap.h396
-rw-r--r--include/linux/swap_cgroup.h4
-rw-r--r--include/linux/swap_slots.h4
-rw-r--r--include/linux/swapfile.h15
-rw-r--r--include/linux/swapops.h414
-rw-r--r--include/linux/swiotlb.h265
-rw-r--r--include/linux/switchtec.h4
-rw-r--r--include/linux/syscall_user_dispatch.h51
-rw-r--r--include/linux/syscall_user_dispatch_types.h22
-rw-r--r--include/linux/syscalls.h279
-rw-r--r--include/linux/syscalls_api.h1
-rw-r--r--include/linux/sysctl.h111
-rw-r--r--include/linux/sysfb.h114
-rw-r--r--include/linux/sysfs.h183
-rw-r--r--include/linux/syslog.h3
-rw-r--r--include/linux/sysrq.h18
-rw-r--r--include/linux/t10-pi.h31
-rw-r--r--include/linux/task_work.h19
-rw-r--r--include/linux/tboot.h2
-rw-r--r--include/linux/tc.h2
-rw-r--r--include/linux/tca6416_keypad.h1
-rw-r--r--include/linux/tcp.h387
-rw-r--r--include/linux/tee_drv.h204
-rw-r--r--include/linux/tegra-icc.h65
-rw-r--r--include/linux/termios_internal.h49
-rw-r--r--include/linux/textsearch.h2
-rw-r--r--include/linux/thermal.h315
-rw-r--r--include/linux/thread_info.h110
-rw-r--r--include/linux/threads.h2
-rw-r--r--include/linux/thunderbolt.h120
-rw-r--r--include/linux/ti-emif-sram.h10
-rw-r--r--include/linux/ti_wilink_st.h2
-rw-r--r--include/linux/tick.h73
-rw-r--r--include/linux/time.h13
-rw-r--r--include/linux/time64.h14
-rw-r--r--include/linux/time_namespace.h49
-rw-r--r--include/linux/timecounter.h2
-rw-r--r--include/linux/timekeeping.h44
-rw-r--r--include/linux/timekeeping32.h14
-rw-r--r--include/linux/timer.h80
-rw-r--r--include/linux/timer_types.h23
-rw-r--r--include/linux/timerqueue.h15
-rw-r--r--include/linux/timerqueue_types.h17
-rw-r--r--include/linux/timex.h13
-rw-r--r--include/linux/tnum.h24
-rw-r--r--include/linux/topology.h70
-rw-r--r--include/linux/torture.h48
-rw-r--r--include/linux/tpm.h41
-rw-r--r--include/linux/tpm_eventlog.h6
-rw-r--r--include/linux/trace.h67
-rw-r--r--include/linux/trace_events.h277
-rw-r--r--include/linux/trace_recursion.h228
-rw-r--r--include/linux/trace_seq.h27
-rw-r--r--include/linux/tracefs.h66
-rw-r--r--include/linux/tracehook.h201
-rw-r--r--include/linux/tracepoint-defs.h40
-rw-r--r--include/linux/tracepoint.h187
-rw-r--r--include/linux/transport_class.h8
-rw-r--r--include/linux/tsm.h69
-rw-r--r--include/linux/tty.h773
-rw-r--r--include/linux/tty_buffer.h57
-rw-r--r--include/linux/tty_driver.h632
-rw-r--r--include/linux/tty_flip.h93
-rw-r--r--include/linux/tty_ldisc.h380
-rw-r--r--include/linux/tty_port.h257
-rw-r--r--include/linux/typecheck.h9
-rw-r--r--include/linux/types.h24
-rw-r--r--include/linux/u64_stats_sync.h130
-rw-r--r--include/linux/u64_stats_sync_api.h1
-rw-r--r--include/linux/uacce.h24
-rw-r--r--include/linux/uaccess.h132
-rw-r--r--include/linux/ubsan.h9
-rw-r--r--include/linux/ucb1400.h162
-rw-r--r--include/linux/ucs2_string.h1
-rw-r--r--include/linux/udp.h97
-rw-r--r--include/linux/uidgid.h24
-rw-r--r--include/linux/uidgid_types.h15
-rw-r--r--include/linux/uio.h305
-rw-r--r--include/linux/uio_driver.h29
-rw-r--r--include/linux/ulpi/driver.h2
-rw-r--r--include/linux/umh.h11
-rw-r--r--include/linux/unaligned/access_ok.h68
-rw-r--r--include/linux/unaligned/be_byteshift.h71
-rw-r--r--include/linux/unaligned/be_memmove.h37
-rw-r--r--include/linux/unaligned/be_struct.h37
-rw-r--r--include/linux/unaligned/generic.h115
-rw-r--r--include/linux/unaligned/le_byteshift.h71
-rw-r--r--include/linux/unaligned/le_memmove.h37
-rw-r--r--include/linux/unaligned/le_struct.h37
-rw-r--r--include/linux/unaligned/memmove.h46
-rw-r--r--include/linux/unaligned/packed_struct.h2
-rw-r--r--include/linux/unicode.h52
-rw-r--r--include/linux/units.h37
-rw-r--r--include/linux/usb.h191
-rw-r--r--include/linux/usb/audio-v2.h21
-rw-r--r--include/linux/usb/audio.h3
-rw-r--r--include/linux/usb/c67x00.h15
-rw-r--r--include/linux/usb/cdc-wdm.h7
-rw-r--r--include/linux/usb/cdc.h4
-rw-r--r--include/linux/usb/cdc_ncm.h6
-rw-r--r--include/linux/usb/ch9.h70
-rw-r--r--include/linux/usb/chipidea.h9
-rw-r--r--include/linux/usb/composite.h75
-rw-r--r--include/linux/usb/ehci_def.h47
-rw-r--r--include/linux/usb/ehci_pdriver.h15
-rw-r--r--include/linux/usb/g_hid.h14
-rw-r--r--include/linux/usb/gadget.h108
-rw-r--r--include/linux/usb/hcd.h77
-rw-r--r--include/linux/usb/input.h4
-rw-r--r--include/linux/usb/isp1301.h10
-rw-r--r--include/linux/usb/isp1760.h19
-rw-r--r--include/linux/usb/ljca.h145
-rw-r--r--include/linux/usb/m66592.h14
-rw-r--r--include/linux/usb/midi-v2.h94
-rw-r--r--include/linux/usb/musb-ux500.h10
-rw-r--r--include/linux/usb/musb.h15
-rw-r--r--include/linux/usb/net2280.h14
-rw-r--r--include/linux/usb/of.h9
-rw-r--r--include/linux/usb/ohci_pdriver.h14
-rw-r--r--include/linux/usb/onboard_hub.h18
-rw-r--r--include/linux/usb/otg-fsm.h24
-rw-r--r--include/linux/usb/otg.h3
-rw-r--r--include/linux/usb/pd.h75
-rw-r--r--include/linux/usb/pd_bdo.h2
-rw-r--r--include/linux/usb/pd_ext_sdb.h4
-rw-r--r--include/linux/usb/pd_vdo.h327
-rw-r--r--include/linux/usb/phy_companion.h10
-rw-r--r--include/linux/usb/quirks.h5
-rw-r--r--include/linux/usb/r8152.h39
-rw-r--r--include/linux/usb/r8a66597.h14
-rw-r--r--include/linux/usb/renesas_usbhs.h10
-rw-r--r--include/linux/usb/rndis_host.h15
-rw-r--r--include/linux/usb/role.h12
-rw-r--r--include/linux/usb/rzv2m_usb3drd.h20
-rw-r--r--include/linux/usb/serial.h37
-rw-r--r--include/linux/usb/storage.h2
-rw-r--r--include/linux/usb/tcpci.h256
-rw-r--r--include/linux/usb/tcpm.h76
-rw-r--r--include/linux/usb/tegra_usb_phy.h17
-rw-r--r--include/linux/usb/typec.h107
-rw-r--r--include/linux/usb/typec_altmode.h60
-rw-r--r--include/linux/usb/typec_dp.h42
-rw-r--r--include/linux/usb/typec_mux.h63
-rw-r--r--include/linux/usb/typec_retimer.h45
-rw-r--r--include/linux/usb/typec_tbt.h12
-rw-r--r--include/linux/usb/ulpi.h4
-rw-r--r--include/linux/usb/usb338x.h11
-rw-r--r--include/linux/usb/usbnet.h34
-rw-r--r--include/linux/usb/uvc.h158
-rw-r--r--include/linux/usb/webusb.h80
-rw-r--r--include/linux/usb/xhci-dbgp.h6
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/user_events.h84
-rw-r--r--include/linux/user_namespace.h62
-rw-r--r--include/linux/userfaultfd_k.h283
-rw-r--r--include/linux/usermode_driver.h1
-rw-r--r--include/linux/util_macros.h14
-rw-r--r--include/linux/utsname.h10
-rw-r--r--include/linux/uuid.h18
-rw-r--r--include/linux/vdpa.h383
-rw-r--r--include/linux/verification.h11
-rw-r--r--include/linux/vermagic.h12
-rw-r--r--include/linux/vfio.h418
-rw-r--r--include/linux/vfio_pci_core.h161
-rw-r--r--include/linux/vgaarb.h143
-rw-r--r--include/linux/vhost_iotlb.h5
-rw-r--r--include/linux/via-core.h2
-rw-r--r--include/linux/via-gpio.h14
-rw-r--r--include/linux/virtio.h77
-rw-r--r--include/linux/virtio_anchor.h19
-rw-r--r--include/linux/virtio_config.h98
-rw-r--r--include/linux/virtio_console.h38
-rw-r--r--include/linux/virtio_dma_buf.h37
-rw-r--r--include/linux/virtio_net.h90
-rw-r--r--include/linux/virtio_pci_admin.h23
-rw-r--r--include/linux/virtio_pci_legacy.h40
-rw-r--r--include/linux/virtio_pci_modern.h130
-rw-r--r--include/linux/virtio_ring.h27
-rw-r--r--include/linux/virtio_vsock.h151
-rw-r--r--include/linux/visorbus.h344
-rw-r--r--include/linux/vlynq.h149
-rw-r--r--include/linux/vm_event_item.h49
-rw-r--r--include/linux/vmacache.h28
-rw-r--r--include/linux/vmalloc.h180
-rw-r--r--include/linux/vmcore_info.h81
-rw-r--r--include/linux/vme.h190
-rw-r--r--include/linux/vmpressure.h2
-rw-r--r--include/linux/vmstat.h247
-rw-r--r--include/linux/vmw_vmci_defs.h88
-rw-r--r--include/linux/vringh.h48
-rw-r--r--include/linux/vt_buffer.h2
-rw-r--r--include/linux/vt_kern.h56
-rw-r--r--include/linux/vtime.h136
-rw-r--r--include/linux/w1-gpio.h22
-rw-r--r--include/linux/w1.h4
-rw-r--r--include/linux/wait.h108
-rw-r--r--include/linux/wait_api.h1
-rw-r--r--include/linux/wait_bit.h8
-rw-r--r--include/linux/watch_queue.h16
-rw-r--r--include/linux/watchdog.h10
-rw-r--r--include/linux/wimax/debug.h491
-rw-r--r--include/linux/win_minmax.h4
-rw-r--r--include/linux/wireless.h10
-rw-r--r--include/linux/wkup_m3_ipc.h23
-rw-r--r--include/linux/wl12xx.h44
-rw-r--r--include/linux/wm97xx.h5
-rw-r--r--include/linux/wmi.h58
-rw-r--r--include/linux/wordpart.h42
-rw-r--r--include/linux/workqueue.h367
-rw-r--r--include/linux/workqueue_api.h1
-rw-r--r--include/linux/workqueue_types.h25
-rw-r--r--include/linux/writeback.h103
-rw-r--r--include/linux/ww_mutex.h79
-rw-r--r--include/linux/wwan.h201
-rw-r--r--include/linux/xarray.h125
-rw-r--r--include/linux/xattr.h103
-rw-r--r--include/linux/xz.h106
-rw-r--r--include/linux/z2_battery.h18
-rw-r--r--include/linux/zbud.h23
-rw-r--r--include/linux/zorro.h1
-rw-r--r--include/linux/zpool.h23
-rw-r--r--include/linux/zsmalloc.h5
-rw-r--r--include/linux/zstd.h1252
-rw-r--r--include/linux/zstd_errors.h77
-rw-r--r--include/linux/zstd_lib.h2551
-rw-r--r--include/linux/zswap.h70
-rw-r--r--include/math-emu/op-common.h5
-rw-r--r--include/media/cec.h62
-rw-r--r--include/media/davinci/ccdc_types.h30
-rw-r--r--include/media/davinci/dm355_ccdc.h308
-rw-r--r--include/media/davinci/dm644x_ccdc.h171
-rw-r--r--include/media/davinci/isif.h518
-rw-r--r--include/media/davinci/vpbe.h184
-rw-r--r--include/media/davinci/vpbe_display.h122
-rw-r--r--include/media/davinci/vpbe_osd.h382
-rw-r--r--include/media/davinci/vpbe_types.h74
-rw-r--r--include/media/davinci/vpbe_venc.h37
-rw-r--r--include/media/davinci/vpfe_capture.h177
-rw-r--r--include/media/davinci/vpif_types.h4
-rw-r--r--include/media/davinci/vpss.h111
-rw-r--r--include/media/dmxdev.h1
-rw-r--r--include/media/drv-intf/s3c_camif.h4
-rw-r--r--include/media/drv-intf/saa7146_vv.h65
-rw-r--r--include/media/dvb-usb-ids.h630
-rw-r--r--include/media/dvb_frontend.h13
-rw-r--r--include/media/dvb_net.h10
-rw-r--r--include/media/dvb_ringbuffer.h2
-rw-r--r--include/media/dvbdev.h59
-rw-r--r--include/media/frame_vector.h47
-rw-r--r--include/media/fwht-ctrls.h31
-rw-r--r--include/media/h264-ctrls.h218
-rw-r--r--include/media/hevc-ctrls.h212
-rw-r--r--include/media/i2c/ad9389b.h37
-rw-r--r--include/media/i2c/adv7343.h12
-rw-r--r--include/media/i2c/adv7393.h10
-rw-r--r--include/media/i2c/ds90ub9xx.h22
-rw-r--r--include/media/i2c/ir-kbd-i2c.h1
-rw-r--r--include/media/i2c/m5mols.h29
-rw-r--r--include/media/i2c/mt9m032.h22
-rw-r--r--include/media/i2c/mt9p031.h1
-rw-r--r--include/media/i2c/mt9t001.h10
-rw-r--r--include/media/i2c/mt9t112.h4
-rw-r--r--include/media/i2c/noon010pc30.h24
-rw-r--r--include/media/i2c/ov2659.h14
-rw-r--r--include/media/i2c/ov772x.h2
-rw-r--r--include/media/i2c/ov9650.h24
-rw-r--r--include/media/i2c/s5c73m3.h55
-rw-r--r--include/media/i2c/s5k4ecgx.h33
-rw-r--r--include/media/i2c/s5k6aa.h47
-rw-r--r--include/media/i2c/sr030pc30.h17
-rw-r--r--include/media/i2c/ths7303.h4
-rw-r--r--include/media/i2c/tvp514x.h11
-rw-r--r--include/media/i2c/tw9910.h8
-rw-r--r--include/media/i2c/wm8775.h2
-rw-r--r--include/media/ipu-bridge.h181
-rw-r--r--include/media/jpeg.h20
-rw-r--r--include/media/media-dev-allocator.h2
-rw-r--r--include/media/media-device.h71
-rw-r--r--include/media/media-entity.h433
-rw-r--r--include/media/mipi-csi2.h47
-rw-r--r--include/media/mpeg2-ctrls.h82
-rw-r--r--include/media/rc-core.h30
-rw-r--r--include/media/rc-map.h17
-rw-r--r--include/media/tpg/v4l2-tpg.h17
-rw-r--r--include/media/tuner.h1
-rw-r--r--include/media/tveeprom.h2
-rw-r--r--include/media/v4l2-async.h342
-rw-r--r--include/media/v4l2-cci.h141
-rw-r--r--include/media/v4l2-clk.h73
-rw-r--r--include/media/v4l2-common.h95
-rw-r--r--include/media/v4l2-ctrls.h208
-rw-r--r--include/media/v4l2-dev.h111
-rw-r--r--include/media/v4l2-device.h6
-rw-r--r--include/media/v4l2-dv-timings.h2
-rw-r--r--include/media/v4l2-event.h15
-rw-r--r--include/media/v4l2-fwnode.h309
-rw-r--r--include/media/v4l2-h264.h36
-rw-r--r--include/media/v4l2-ioctl.h10
-rw-r--r--include/media/v4l2-jpeg.h22
-rw-r--r--include/media/v4l2-mc.h22
-rw-r--r--include/media/v4l2-mediabus.h144
-rw-r--r--include/media/v4l2-mem2mem.h66
-rw-r--r--include/media/v4l2-subdev.h1000
-rw-r--r--include/media/v4l2-vp9.h233
-rw-r--r--include/media/videobuf-core.h233
-rw-r--r--include/media/videobuf-dma-contig.h30
-rw-r--r--include/media/videobuf-dma-sg.h102
-rw-r--r--include/media/videobuf-vmalloc.h43
-rw-r--r--include/media/videobuf2-core.h167
-rw-r--r--include/media/videobuf2-dvb.h2
-rw-r--r--include/media/videobuf2-memops.h3
-rw-r--r--include/media/videobuf2-v4l2.h68
-rw-r--r--include/media/vp8-ctrls.h112
-rw-r--r--include/media/vsp1.h2
-rw-r--r--include/memory/renesas-rpc-if.h36
-rw-r--r--include/misc/cxl.h2
-rw-r--r--include/misc/ocxl.h12
-rw-r--r--include/net/9p/9p.h25
-rw-r--r--include/net/9p/client.h80
-rw-r--r--include/net/9p/transport.h33
-rw-r--r--include/net/Space.h18
-rw-r--r--include/net/act_api.h90
-rw-r--r--include/net/addrconf.h31
-rw-r--r--include/net/af_rxrpc.h46
-rw-r--r--include/net/af_unix.h68
-rw-r--r--include/net/af_vsock.h42
-rw-r--r--include/net/amt.h408
-rw-r--r--include/net/arp.h17
-rw-r--r--include/net/ax25.h47
-rw-r--r--include/net/ax88796.h9
-rw-r--r--include/net/bareudp.h13
-rw-r--r--include/net/bluetooth/bluetooth.h214
-rw-r--r--include/net/bluetooth/coredump.h116
-rw-r--r--include/net/bluetooth/hci.h615
-rw-r--r--include/net/bluetooth/hci_core.h795
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/bluetooth/hci_sync.h156
-rw-r--r--include/net/bluetooth/iso.h32
-rw-r--r--include/net/bluetooth/l2cap.h51
-rw-r--r--include/net/bluetooth/mgmt.h237
-rw-r--r--include/net/bluetooth/sco.h2
-rw-r--r--include/net/bond_3ad.h8
-rw-r--r--include/net/bond_alb.h6
-rw-r--r--include/net/bond_options.h52
-rw-r--r--include/net/bonding.h151
-rw-r--r--include/net/bpf_sk_storage.h15
-rw-r--r--include/net/busy_poll.h50
-rw-r--r--include/net/caif/caif_dev.h2
-rw-r--r--include/net/caif/caif_hsi.h200
-rw-r--r--include/net/caif/caif_spi.h155
-rw-r--r--include/net/caif/cfcnfg.h2
-rw-r--r--include/net/caif/cfserl.h1
-rw-r--r--include/net/caif/cfsrvl.h3
-rw-r--r--include/net/cfg80211-wext.h20
-rw-r--r--include/net/cfg80211.h2292
-rw-r--r--include/net/cfg802154.h211
-rw-r--r--include/net/checksum.h90
-rw-r--r--include/net/codel.h11
-rw-r--r--include/net/codel_impl.h20
-rw-r--r--include/net/codel_qdisc.h3
-rw-r--r--include/net/compat.h42
-rw-r--r--include/net/datalink.h11
-rw-r--r--include/net/dcbevent.h2
-rw-r--r--include/net/dcbnl.h24
-rw-r--r--include/net/devlink.h1066
-rw-r--r--include/net/dn.h231
-rw-r--r--include/net/dn_dev.h199
-rw-r--r--include/net/dn_fib.h167
-rw-r--r--include/net/dn_neigh.h30
-rw-r--r--include/net/dn_nsp.h195
-rw-r--r--include/net/dn_route.h115
-rw-r--r--include/net/drop_monitor.h36
-rw-r--r--include/net/dropreason-core.h449
-rw-r--r--include/net/dropreason.h49
-rw-r--r--include/net/dsa.h1090
-rw-r--r--include/net/dsa_stubs.h48
-rw-r--r--include/net/dst.h106
-rw-r--r--include/net/dst_cache.h11
-rw-r--r--include/net/dst_metadata.h60
-rw-r--r--include/net/dst_ops.h4
-rw-r--r--include/net/eee.h38
-rw-r--r--include/net/erspan.h3
-rw-r--r--include/net/esp.h1
-rw-r--r--include/net/ethoc.h3
-rw-r--r--include/net/failover.h1
-rw-r--r--include/net/fib_rules.h28
-rw-r--r--include/net/firewire.h5
-rw-r--r--include/net/flow.h50
-rw-r--r--include/net/flow_dissector.h131
-rw-r--r--include/net/flow_offload.h93
-rw-r--r--include/net/fou.h2
-rw-r--r--include/net/fq.h20
-rw-r--r--include/net/fq_impl.h186
-rw-r--r--include/net/garp.h2
-rw-r--r--include/net/gen_stats.h59
-rw-r--r--include/net/genetlink.h298
-rw-r--r--include/net/geneve.h2
-rw-r--r--include/net/gre.h19
-rw-r--r--include/net/gro.h500
-rw-r--r--include/net/gso.h109
-rw-r--r--include/net/gtp.h46
-rw-r--r--include/net/gue.h3
-rw-r--r--include/net/handshake.h49
-rw-r--r--include/net/hotdata.h52
-rw-r--r--include/net/hwbm.h2
-rw-r--r--include/net/icmp.h7
-rw-r--r--include/net/ieee80211_radiotap.h234
-rw-r--r--include/net/ieee802154_netdev.h181
-rw-r--r--include/net/if_inet6.h63
-rw-r--r--include/net/ila.h14
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet6_hashtables.h108
-rw-r--r--include/net/inet_common.h15
-rw-r--r--include/net/inet_connection_sock.h73
-rw-r--r--include/net/inet_dscp.h57
-rw-r--r--include/net/inet_ecn.h31
-rw-r--r--include/net/inet_frag.h25
-rw-r--r--include/net/inet_hashtables.h250
-rw-r--r--include/net/inet_sock.h154
-rw-r--r--include/net/inet_timewait_sock.h5
-rw-r--r--include/net/ioam6.h72
-rw-r--r--include/net/ip.h150
-rw-r--r--include/net/ip6_checksum.h20
-rw-r--r--include/net/ip6_fib.h98
-rw-r--r--include/net/ip6_route.h76
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_fib.h70
-rw-r--r--include/net/ip_tunnels.h123
-rw-r--r--include/net/ip_vs.h229
-rw-r--r--include/net/ipcomp.h4
-rw-r--r--include/net/ipconfig.h2
-rw-r--r--include/net/ipv6.h198
-rw-r--r--include/net/ipv6_frag.h37
-rw-r--r--include/net/ipv6_stubs.h18
-rw-r--r--include/net/ipx.h171
-rw-r--r--include/net/iucv/af_iucv.h13
-rw-r--r--include/net/iucv/iucv.h7
-rw-r--r--include/net/iw_handler.h11
-rw-r--r--include/net/kcm.h2
-rw-r--r--include/net/lapb.h2
-rw-r--r--include/net/llc.h6
-rw-r--r--include/net/llc_c_ac.h8
-rw-r--r--include/net/llc_c_ev.h1
-rw-r--r--include/net/llc_c_st.h4
-rw-r--r--include/net/llc_conn.h3
-rw-r--r--include/net/llc_if.h3
-rw-r--r--include/net/llc_pdu.h43
-rw-r--r--include/net/llc_s_ac.h4
-rw-r--r--include/net/llc_s_ev.h1
-rw-r--r--include/net/llc_s_st.h6
-rw-r--r--include/net/lwtunnel.h8
-rw-r--r--include/net/mac80211.h1699
-rw-r--r--include/net/mac802154.h32
-rw-r--r--include/net/macsec.h97
-rw-r--r--include/net/mana/gdma.h871
-rw-r--r--include/net/mana/hw_channel.h200
-rw-r--r--include/net/mana/mana.h799
-rw-r--r--include/net/mana/mana_auxiliary.h10
-rw-r--r--include/net/mana/shm_channel.h21
-rw-r--r--include/net/mctp.h301
-rw-r--r--include/net/mctpdevice.h56
-rw-r--r--include/net/mld.h3
-rw-r--r--include/net/mpls_iptunnel.h3
-rw-r--r--include/net/mptcp.h166
-rw-r--r--include/net/mrp.h5
-rw-r--r--include/net/ncsi.h2
-rw-r--r--include/net/ndisc.h44
-rw-r--r--include/net/neighbour.h99
-rw-r--r--include/net/net_debug.h157
-rw-r--r--include/net/net_namespace.h152
-rw-r--r--include/net/net_trackers.h18
-rw-r--r--include/net/netdev_queues.h229
-rw-r--r--include/net/netdev_rx_queue.h57
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/ipv4/nf_defrag_ipv4.h3
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h14
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h3
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h8
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h14
-rw-r--r--include/net/netfilter/nf_bpf_link.h15
-rw-r--r--include/net/netfilter/nf_conntrack.h85
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h3
-rw-r--r--include/net/netfilter/nf_conntrack_act_ct.h54
-rw-r--r--include/net/netfilter/nf_conntrack_bpf.h46
-rw-r--r--include/net/netfilter/nf_conntrack_core.h30
-rw-r--r--include/net/netfilter/nf_conntrack_count.h1
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h129
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h20
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h49
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h7
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h36
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h16
-rw-r--r--include/net/netfilter/nf_conntrack_seqadj.h3
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h28
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h13
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h3
-rw-r--r--include/net/netfilter/nf_flow_table.h152
-rw-r--r--include/net/netfilter/nf_hooks_lwtunnel.h7
-rw-r--r--include/net/netfilter/nf_log.h24
-rw-r--r--include/net/netfilter/nf_nat.h6
-rw-r--r--include/net/netfilter/nf_nat_helper.h1
-rw-r--r--include/net/netfilter/nf_nat_redirect.h3
-rw-r--r--include/net/netfilter/nf_queue.h9
-rw-r--r--include/net/netfilter/nf_reject.h21
-rw-r--r--include/net/netfilter/nf_tables.h749
-rw-r--r--include/net/netfilter/nf_tables_core.h122
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h72
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h82
-rw-r--r--include/net/netfilter/nf_tables_offload.h22
-rw-r--r--include/net/netfilter/nf_tproxy.h7
-rw-r--r--include/net/netfilter/nft_fib.h7
-rw-r--r--include/net/netfilter/nft_meta.h17
-rw-r--r--include/net/netfilter/nft_reject.h3
-rw-r--r--include/net/netfilter/xt_rateest.h2
-rw-r--r--include/net/netkit.h44
-rw-r--r--include/net/netlabel.h7
-rw-r--r--include/net/netlink.h294
-rw-r--r--include/net/netmem.h41
-rw-r--r--include/net/netns/bpf.h9
-rw-r--r--include/net/netns/can.h2
-rw-r--r--include/net/netns/conntrack.h43
-rw-r--r--include/net/netns/core.h10
-rw-r--r--include/net/netns/dccp.h12
-rw-r--r--include/net/netns/flow_table.h14
-rw-r--r--include/net/netns/generic.h3
-rw-r--r--include/net/netns/ipv4.h206
-rw-r--r--include/net/netns/ipv6.h56
-rw-r--r--include/net/netns/mctp.h37
-rw-r--r--include/net/netns/mib.h30
-rw-r--r--include/net/netns/mpls.h2
-rw-r--r--include/net/netns/netfilter.h8
-rw-r--r--include/net/netns/nexthop.h3
-rw-r--r--include/net/netns/nftables.h9
-rw-r--r--include/net/netns/sctp.h18
-rw-r--r--include/net/netns/smc.h28
-rw-r--r--include/net/netns/unix.h8
-rw-r--r--include/net/netns/x_tables.h21
-rw-r--r--include/net/netns/xdp.h2
-rw-r--r--include/net/netns/xfrm.h10
-rw-r--r--include/net/netrom.h1
-rw-r--r--include/net/nexthop.h169
-rw-r--r--include/net/nfc/digital.h4
-rw-r--r--include/net/nfc/hci.h6
-rw-r--r--include/net/nfc/nci.h34
-rw-r--r--include/net/nfc/nci_core.h34
-rw-r--r--include/net/nfc/nfc.h18
-rw-r--r--include/net/nl802154.h139
-rw-r--r--include/net/nsh.h2
-rw-r--r--include/net/p8022.h8
-rw-r--r--include/net/page_pool.h218
-rw-r--r--include/net/page_pool/helpers.h409
-rw-r--r--include/net/page_pool/types.h252
-rw-r--r--include/net/phonet/pep.h3
-rw-r--r--include/net/phonet/phonet.h25
-rw-r--r--include/net/phonet/pn_dev.h5
-rw-r--r--include/net/pie.h2
-rw-r--r--include/net/ping.h13
-rw-r--r--include/net/pkt_cls.h235
-rw-r--r--include/net/pkt_sched.h156
-rw-r--r--include/net/pptp.h3
-rw-r--r--include/net/protocol.h8
-rw-r--r--include/net/psample.h23
-rw-r--r--include/net/psnap.h5
-rw-r--r--include/net/raw.h36
-rw-r--r--include/net/rawv6.h7
-rw-r--r--include/net/red.h21
-rw-r--r--include/net/regulatory.h17
-rw-r--r--include/net/request_sock.h57
-rw-r--r--include/net/rose.h12
-rw-r--r--include/net/route.h83
-rw-r--r--include/net/rpl.h6
-rw-r--r--include/net/rps.h125
-rw-r--r--include/net/rsi_91x.h2
-rw-r--r--include/net/rtnetlink.h43
-rw-r--r--include/net/sch_generic.h353
-rw-r--r--include/net/scm.h95
-rw-r--r--include/net/sctp/checksum.h2
-rw-r--r--include/net/sctp/command.h2
-rw-r--r--include/net/sctp/constants.h26
-rw-r--r--include/net/sctp/sctp.h103
-rw-r--r--include/net/sctp/sm.h19
-rw-r--r--include/net/sctp/stream_sched.h6
-rw-r--r--include/net/sctp/structs.h101
-rw-r--r--include/net/sctp/ulpqueue.h3
-rw-r--r--include/net/secure_seq.h6
-rw-r--r--include/net/seg6.h21
-rw-r--r--include/net/selftests.h31
-rw-r--r--include/net/smc.h48
-rw-r--r--include/net/sock.h1026
-rw-r--r--include/net/sock_reuseport.h22
-rw-r--r--include/net/stp.h2
-rw-r--r--include/net/strparser.h27
-rw-r--r--include/net/switchdev.h221
-rw-r--r--include/net/tc_act/tc_connmark.h9
-rw-r--r--include/net/tc_act/tc_ct.h11
-rw-r--r--include/net/tc_act/tc_gact.h15
-rw-r--r--include/net/tc_act/tc_gate.h5
-rw-r--r--include/net/tc_act/tc_ipt.h17
-rw-r--r--include/net/tc_act/tc_mirred.h2
-rw-r--r--include/net/tc_act/tc_nat.h10
-rw-r--r--include/net/tc_act/tc_pedit.h80
-rw-r--r--include/net/tc_act/tc_police.h82
-rw-r--r--include/net/tc_act/tc_skbedit.h42
-rw-r--r--include/net/tc_act/tc_tunnel_key.h5
-rw-r--r--include/net/tc_act/tc_vlan.h13
-rw-r--r--include/net/tc_wrapper.h232
-rw-r--r--include/net/tcp.h928
-rw-r--r--include/net/tcp_ao.h387
-rw-r--r--include/net/tcp_states.h2
-rw-r--r--include/net/tcx.h201
-rw-r--r--include/net/tls.h379
-rw-r--r--include/net/tls_prot.h68
-rw-r--r--include/net/transp_v6.h6
-rw-r--r--include/net/tso.h8
-rw-r--r--include/net/tun_proto.h3
-rw-r--r--include/net/udp.h91
-rw-r--r--include/net/udp_tunnel.h69
-rw-r--r--include/net/udplite.h68
-rw-r--r--include/net/vxlan.h135
-rw-r--r--include/net/wimax.h503
-rw-r--r--include/net/x25.h5
-rw-r--r--include/net/xdp.h321
-rw-r--r--include/net/xdp_priv.h2
-rw-r--r--include/net/xdp_sock.h176
-rw-r--r--include/net/xdp_sock_drv.h299
-rw-r--r--include/net/xfrm.h368
-rw-r--r--include/net/xsk_buff_pool.h140
-rw-r--r--include/pcmcia/soc_common.h125
-rw-r--r--include/ras/ras_event.h14
-rw-r--r--include/rdma/ib.h2
-rw-r--r--include/rdma/ib_addr.h24
-rw-r--r--include/rdma/ib_cache.h21
-rw-r--r--include/rdma/ib_cm.h13
-rw-r--r--include/rdma/ib_hdrs.h6
-rw-r--r--include/rdma/ib_mad.h32
-rw-r--r--include/rdma/ib_pack.h5
-rw-r--r--include/rdma/ib_sa.h44
-rw-r--r--include/rdma/ib_smi.h12
-rw-r--r--include/rdma/ib_sysfs.h37
-rw-r--r--include/rdma/ib_umem.h160
-rw-r--r--include/rdma/ib_umem_odp.h21
-rw-r--r--include/rdma/ib_verbs.h1073
-rw-r--r--include/rdma/iw_cm.h22
-rw-r--r--include/rdma/opa_vnic.h5
-rw-r--r--include/rdma/rdma_cm.h77
-rw-r--r--include/rdma/rdma_counter.h21
-rw-r--r--include/rdma/rdma_netlink.h2
-rw-r--r--include/rdma/rdma_vt.h20
-rw-r--r--include/rdma/rdmavt_qp.h2
-rw-r--r--include/rdma/restrack.h49
-rw-r--r--include/rdma/rw.h18
-rw-r--r--include/rdma/uverbs_ioctl.h139
-rw-r--r--include/rdma/uverbs_named_ioctl.h2
-rw-r--r--include/rdma/uverbs_types.h9
-rw-r--r--include/rv/automata.h75
-rw-r--r--include/rv/da_monitor.h544
-rw-r--r--include/rv/instrumentation.h29
-rw-r--r--include/scsi/fc/fc_ms.h63
-rw-r--r--include/scsi/fc_encode.h727
-rw-r--r--include/scsi/fc_frame.h30
-rw-r--r--include/scsi/iscsi_proto.h2
-rw-r--r--include/scsi/libfc.h17
-rw-r--r--include/scsi/libfcoe.h11
-rw-r--r--include/scsi/libiscsi.h64
-rw-r--r--include/scsi/libsas.h144
-rw-r--r--include/scsi/sas.h63
-rw-r--r--include/scsi/sas_ata.h46
-rw-r--r--include/scsi/scsi.h182
-rw-r--r--include/scsi/scsi_bsg_iscsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h150
-rw-r--r--include/scsi/scsi_common.h20
-rw-r--r--include/scsi/scsi_device.h188
-rw-r--r--include/scsi/scsi_devinfo.h10
-rw-r--r--include/scsi/scsi_dh.h5
-rw-r--r--include/scsi/scsi_driver.h9
-rw-r--r--include/scsi/scsi_eh.h6
-rw-r--r--include/scsi/scsi_host.h194
-rw-r--r--include/scsi/scsi_ioctl.h9
-rw-r--r--include/scsi/scsi_proto.h84
-rw-r--r--include/scsi/scsi_request.h33
-rw-r--r--include/scsi/scsi_status.h74
-rw-r--r--include/scsi/scsi_transport_fc.h72
-rw-r--r--include/scsi/scsi_transport_iscsi.h41
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/scsi/scsi_transport_srp.h2
-rw-r--r--include/scsi/sg.h39
-rw-r--r--include/scsi/srp.h26
-rw-r--r--include/scsi/viosrp.h17
-rw-r--r--include/soc/amlogic/meson_ddr_pmu.h66
-rw-r--r--include/soc/arc/timers.h4
-rw-r--r--include/soc/at91/atmel_tcb.h3
-rw-r--r--include/soc/at91/sama7-ddr.h88
-rw-r--r--include/soc/at91/sama7-sfrbu.h34
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h64
-rw-r--r--include/soc/brcmstb/common.h12
-rw-r--r--include/soc/canaan/k210-sysctl.h43
-rw-r--r--include/soc/fsl/caam-blob.h103
-rw-r--r--include/soc/fsl/dpaa2-fd.h3
-rw-r--r--include/soc/fsl/dpaa2-io.h9
-rw-r--r--include/soc/fsl/qe/immap_qe.h3
-rw-r--r--include/soc/fsl/qe/qe.h56
-rw-r--r--include/soc/fsl/qe/qe_tdm.h4
-rw-r--r--include/soc/fsl/qe/qmc.h96
-rw-r--r--include/soc/fsl/qe/ucc_fast.h3
-rw-r--r--include/soc/fsl/qe/ucc_slow.h2
-rw-r--r--include/soc/fsl/qman.h16
-rw-r--r--include/soc/imx/cpu.h1
-rw-r--r--include/soc/imx/revision.h1
-rw-r--r--include/soc/imx/timer.h23
-rw-r--r--include/soc/mediatek/smi.h29
-rw-r--r--include/soc/microchip/mpfs.h53
-rw-r--r--include/soc/mscc/ocelot.h700
-rw-r--r--include/soc/mscc/ocelot_ana.h18
-rw-r--r--include/soc/mscc/ocelot_dev.h23
-rw-r--r--include/soc/mscc/ocelot_ptp.h8
-rw-r--r--include/soc/mscc/ocelot_qsys.h7
-rw-r--r--include/soc/mscc/ocelot_vcap.h530
-rw-r--r--include/soc/mscc/vsc7514_regs.h19
-rw-r--r--include/soc/nps/common.h166
-rw-r--r--include/soc/nps/mtm.h59
-rw-r--r--include/soc/qcom/ice.h37
-rw-r--r--include/soc/qcom/qcom-spmi-pmic.h73
-rw-r--r--include/soc/qcom/spm.h22
-rw-r--r--include/soc/qcom/tcs.h9
-rw-r--r--include/soc/rockchip/pm_domains.h25
-rw-r--r--include/soc/rockchip/rk3399_grf.h9
-rw-r--r--include/soc/rockchip/rk3568_grf.h13
-rw-r--r--include/soc/rockchip/rk3588_grf.h18
-rw-r--r--include/soc/rockchip/rockchip_grf.h18
-rw-r--r--include/soc/sifive/sifive_ccache.h16
-rw-r--r--include/soc/sifive/sifive_l2_cache.h16
-rw-r--r--include/soc/starfive/reset-starfive-jh71x0.h17
-rw-r--r--include/soc/tegra/bpmp-abi.h1796
-rw-r--r--include/soc/tegra/bpmp.h23
-rw-r--r--include/soc/tegra/common.h46
-rw-r--r--include/soc/tegra/emc.h16
-rw-r--r--include/soc/tegra/fuse.h75
-rw-r--r--include/soc/tegra/irq.h9
-rw-r--r--include/soc/tegra/ivc.h12
-rw-r--r--include/soc/tegra/mc.h154
-rw-r--r--include/soc/tegra/pm.h8
-rw-r--r--include/soc/tegra/pmc.h29
-rw-r--r--include/soc/tegra/tegra-cbb.h47
-rw-r--r--include/sound/ac97/codec.h2
-rw-r--r--include/sound/ac97_codec.h5
-rw-r--r--include/sound/acp63_chip_offset_byte.h495
-rw-r--r--include/sound/ak4531_codec.h3
-rw-r--r--include/sound/asequencer.h4
-rw-r--r--include/sound/compress_driver.h20
-rw-r--r--include/sound/control.h73
-rw-r--r--include/sound/core.h96
-rw-r--r--include/sound/cs-amp-lib.h66
-rw-r--r--include/sound/cs35l41.h911
-rw-r--r--include/sound/cs35l56.h307
-rw-r--r--include/sound/cs4271.h1
-rw-r--r--include/sound/cs42l42.h815
-rw-r--r--include/sound/cs42l43.h17
-rw-r--r--include/sound/da7219-aad.h6
-rw-r--r--include/sound/designware_i2s.h3
-rw-r--r--include/sound/dmaengine_pcm.h11
-rw-r--r--include/sound/emu10k1.h1020
-rw-r--r--include/sound/emu8000.h3
-rw-r--r--include/sound/emux_synth.h4
-rw-r--r--include/sound/graph_card.h35
-rw-r--r--include/sound/hda-mlink.h184
-rw-r--r--include/sound/hda_codec.h41
-rw-r--r--include/sound/hda_register.h68
-rw-r--r--include/sound/hda_verbs.h2
-rw-r--r--include/sound/hdaudio.h108
-rw-r--r--include/sound/hdaudio_ext.h142
-rw-r--r--include/sound/hdmi-codec.h29
-rw-r--r--include/sound/hwdep.h2
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/intel-dsp-config.h10
-rw-r--r--include/sound/intel-nhlt.h75
-rw-r--r--include/sound/jack.h2
-rw-r--r--include/sound/l3.h28
-rw-r--r--include/sound/madera-pdata.h2
-rw-r--r--include/sound/max9768.h4
-rw-r--r--include/sound/memalloc.h130
-rw-r--r--include/sound/opl3.h2
-rw-r--r--include/sound/pcm-indirect.h22
-rw-r--r--include/sound/pcm.h268
-rw-r--r--include/sound/pcm_iec958.h8
-rw-r--r--include/sound/pcm_params.h7
-rw-r--r--include/sound/pxa2xx-lib.h17
-rw-r--r--include/sound/rawmidi.h26
-rw-r--r--include/sound/rt1015.h15
-rw-r--r--include/sound/rt5645.h30
-rw-r--r--include/sound/rt5665.h2
-rw-r--r--include/sound/rt5668.h3
-rw-r--r--include/sound/rt5682.h4
-rw-r--r--include/sound/rt5682s.h54
-rw-r--r--include/sound/s3c24xx_uda134x.h14
-rw-r--r--include/sound/sb.h3
-rw-r--r--include/sound/sdw.h49
-rw-r--r--include/sound/seq_device.h1
-rw-r--r--include/sound/seq_kernel.h10
-rw-r--r--include/sound/simple_card.h6
-rw-r--r--include/sound/simple_card_utils.h226
-rw-r--r--include/sound/soc-acpi-intel-match.h12
-rw-r--r--include/sound/soc-acpi.h65
-rw-r--r--include/sound/soc-card.h68
-rw-r--r--include/sound/soc-component.h125
-rw-r--r--include/sound/soc-dai.h271
-rw-r--r--include/sound/soc-dapm.h337
-rw-r--r--include/sound/soc-dpcm.h24
-rw-r--r--include/sound/soc-jack.h132
-rw-r--r--include/sound/soc-link.h12
-rw-r--r--include/sound/soc-topology.h22
-rw-r--r--include/sound/soc.h521
-rw-r--r--include/sound/sof.h106
-rw-r--r--include/sound/sof/channel_map.h4
-rw-r--r--include/sound/sof/control.h6
-rw-r--r--include/sound/sof/dai-amd.h36
-rw-r--r--include/sound/sof/dai-imx.h7
-rw-r--r--include/sound/sof/dai-intel.h6
-rw-r--r--include/sound/sof/dai-mediatek.h23
-rw-r--r--include/sound/sof/dai.h62
-rw-r--r--include/sound/sof/debug.h43
-rw-r--r--include/sound/sof/ext_manifest.h32
-rw-r--r--include/sound/sof/ext_manifest4.h119
-rw-r--r--include/sound/sof/header.h16
-rw-r--r--include/sound/sof/info.h11
-rw-r--r--include/sound/sof/ipc4/header.h572
-rw-r--r--include/sound/sof/stream.h7
-rw-r--r--include/sound/sof/topology.h69
-rw-r--r--include/sound/sof/trace.h28
-rw-r--r--include/sound/tas2781-dsp.h188
-rw-r--r--include/sound/tas2781-tlv.h21
-rw-r--r--include/sound/tas2781.h173
-rw-r--r--include/sound/timer.h8
-rw-r--r--include/sound/tlv320aic3x.h65
-rw-r--r--include/sound/uda134x.h24
-rw-r--r--include/sound/ump.h269
-rw-r--r--include/sound/ump_convert.h46
-rw-r--r--include/sound/ump_msg.h765
-rw-r--r--include/sound/wavefront.h53
-rw-r--r--include/sound/wm0010.h6
-rw-r--r--include/sound/wm1250-ev1.h24
-rw-r--r--include/sound/wm2200.h2
-rw-r--r--include/sound/wm5100.h4
-rw-r--r--include/sound/wm8996.h3
-rw-r--r--include/target/iscsi/iscsi_target_core.h107
-rw-r--r--include/target/iscsi/iscsi_transport.h126
-rw-r--r--include/target/target_core_backend.h17
-rw-r--r--include/target/target_core_base.h151
-rw-r--r--include/target/target_core_fabric.h52
-rw-r--r--include/trace/bpf_probe.h52
-rw-r--r--include/trace/define_custom_trace.h77
-rw-r--r--include/trace/define_trace.h14
-rw-r--r--include/trace/events/9p.h59
-rw-r--r--include/trace/events/afs.h972
-rw-r--r--include/trace/events/asoc.h46
-rw-r--r--include/trace/events/avc.h53
-rw-r--r--include/trace/events/bcache.h14
-rw-r--r--include/trace/events/block.h319
-rw-r--r--include/trace/events/bridge.h58
-rw-r--r--include/trace/events/btrfs.h692
-rw-r--r--include/trace/events/cachefiles.h789
-rw-r--r--include/trace/events/cgroup.h12
-rw-r--r--include/trace/events/clk.h87
-rw-r--r--include/trace/events/cma.h92
-rw-r--r--include/trace/events/compaction.h61
-rw-r--r--include/trace/events/csd.h72
-rw-r--r--include/trace/events/damon.h85
-rw-r--r--include/trace/events/devfreq.h30
-rw-r--r--include/trace/events/devlink.h98
-rw-r--r--include/trace/events/dlm.h682
-rw-r--r--include/trace/events/dma_fence.h4
-rw-r--r--include/trace/events/erofs.h54
-rw-r--r--include/trace/events/error_report.h76
-rw-r--r--include/trace/events/ext4.h682
-rw-r--r--include/trace/events/f2fs.h708
-rw-r--r--include/trace/events/fib.h11
-rw-r--r--include/trace/events/fib6.h13
-rw-r--r--include/trace/events/filelock.h110
-rw-r--r--include/trace/events/filemap.h32
-rw-r--r--include/trace/events/fscache.h678
-rw-r--r--include/trace/events/fsi.h117
-rw-r--r--include/trace/events/fsi_master_aspeed.h12
-rw-r--r--include/trace/events/fsi_master_i2cr.h107
-rw-r--r--include/trace/events/habanalabs.h211
-rw-r--r--include/trace/events/handshake.h319
-rw-r--r--include/trace/events/hswadsp.h385
-rw-r--r--include/trace/events/huge_memory.h81
-rw-r--r--include/trace/events/i2c_slave.h67
-rw-r--r--include/trace/events/ib_mad.h13
-rw-r--r--include/trace/events/intel_ifs.h41
-rw-r--r--include/trace/events/intel_iommu.h142
-rw-r--r--include/trace/events/io_uring.h535
-rw-r--r--include/trace/events/iocost.h87
-rw-r--r--include/trace/events/iommu.h17
-rw-r--r--include/trace/events/ipi.h44
-rw-r--r--include/trace/events/irq.h47
-rw-r--r--include/trace/events/iscsi.h4
-rw-r--r--include/trace/events/jbd2.h153
-rw-r--r--include/trace/events/kmem.h200
-rw-r--r--include/trace/events/ksm.h284
-rw-r--r--include/trace/events/kvm.h165
-rw-r--r--include/trace/events/kyber.h27
-rw-r--r--include/trace/events/libata.h417
-rw-r--r--include/trace/events/lock.h63
-rw-r--r--include/trace/events/maple_tree.h123
-rw-r--r--include/trace/events/mctp.h78
-rw-r--r--include/trace/events/migrate.h81
-rw-r--r--include/trace/events/mmap.h75
-rw-r--r--include/trace/events/mmap_lock.h87
-rw-r--r--include/trace/events/mmflags.h186
-rw-r--r--include/trace/events/mptcp.h184
-rw-r--r--include/trace/events/napi.h33
-rw-r--r--include/trace/events/neigh.h6
-rw-r--r--include/trace/events/net.h17
-rw-r--r--include/trace/events/netfs.h463
-rw-r--r--include/trace/events/netlink.h29
-rw-r--r--include/trace/events/nilfs2.h4
-rw-r--r--include/trace/events/notifier.h69
-rw-r--r--include/trace/events/oom.h36
-rw-r--r--include/trace/events/osnoise.h142
-rw-r--r--include/trace/events/page_pool.h6
-rw-r--r--include/trace/events/page_ref.h4
-rw-r--r--include/trace/events/pagemap.h51
-rw-r--r--include/trace/events/percpu.h23
-rw-r--r--include/trace/events/power.h51
-rw-r--r--include/trace/events/pwm.h20
-rw-r--r--include/trace/events/qdisc.h48
-rw-r--r--include/trace/events/qla.h4
-rw-r--r--include/trace/events/qrtr.h33
-rw-r--r--include/trace/events/random.h330
-rw-r--r--include/trace/events/rcu.h126
-rw-r--r--include/trace/events/rpcgss.h46
-rw-r--r--include/trace/events/rpcrdma.h1235
-rw-r--r--include/trace/events/rpm.h42
-rw-r--r--include/trace/events/rseq.h7
-rw-r--r--include/trace/events/rv.h142
-rw-r--r--include/trace/events/rwmmio.h108
-rw-r--r--include/trace/events/rxrpc.h1702
-rw-r--r--include/trace/events/sched.h179
-rw-r--r--include/trace/events/scmi.h108
-rw-r--r--include/trace/events/scsi.h102
-rw-r--r--include/trace/events/skb.h41
-rw-r--r--include/trace/events/sock.h137
-rw-r--r--include/trace/events/sof.h121
-rw-r--r--include/trace/events/sof_intel.h148
-rw-r--r--include/trace/events/spi.h65
-rw-r--r--include/trace/events/spmi.h12
-rw-r--r--include/trace/events/sunrpc.h1192
-rw-r--r--include/trace/events/swiotlb.h29
-rw-r--r--include/trace/events/target.h12
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/trace/events/tcp.h158
-rw-r--r--include/trace/events/thermal.h212
-rw-r--r--include/trace/events/thermal_power_allocator.h88
-rw-r--r--include/trace/events/thermal_pressure.h29
-rw-r--r--include/trace/events/thp.h73
-rw-r--r--include/trace/events/timer.h54
-rw-r--r--include/trace/events/timer_migration.h298
-rw-r--r--include/trace/events/ufs.h176
-rw-r--r--include/trace/events/vmalloc.h123
-rw-r--r--include/trace/events/vmscan.h117
-rw-r--r--include/trace/events/vsock_virtio_transport_common.h17
-rw-r--r--include/trace/events/watchdog.h66
-rw-r--r--include/trace/events/wbt.h8
-rw-r--r--include/trace/events/workqueue.h14
-rw-r--r--include/trace/events/writeback.h76
-rw-r--r--include/trace/events/xdp.h84
-rw-r--r--include/trace/events/xen.h34
-rw-r--r--include/trace/misc/fs.h122
-rw-r--r--include/trace/misc/nfs.h422
-rw-r--r--include/trace/misc/rdma.h (renamed from include/trace/events/rdma.h)41
-rw-r--r--include/trace/misc/sunrpc.h18
-rw-r--r--include/trace/perf.h17
-rw-r--r--include/trace/stages/init.h37
-rw-r--r--include/trace/stages/stage1_struct_define.h60
-rw-r--r--include/trace/stages/stage2_data_offsets.h63
-rw-r--r--include/trace/stages/stage3_trace_output.h144
-rw-r--r--include/trace/stages/stage4_event_fields.h81
-rw-r--r--include/trace/stages/stage5_get_offsets.h127
-rw-r--r--include/trace/stages/stage6_event_callback.h141
-rw-r--r--include/trace/stages/stage7_class_define.h39
-rw-r--r--include/trace/syscall.h6
-rw-r--r--include/trace/trace_custom_events.h221
-rw-r--r--include/trace/trace_events.h374
-rw-r--r--include/uapi/asm-generic/bitsperlong.h17
-rw-r--r--include/uapi/asm-generic/fcntl.h28
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h25
-rw-r--r--include/uapi/asm-generic/mman-common.h7
-rw-r--r--include/uapi/asm-generic/poll.h2
-rw-r--r--include/uapi/asm-generic/sembuf.h6
-rw-r--r--include/uapi/asm-generic/shmbuf.h4
-rw-r--r--include/uapi/asm-generic/siginfo.h42
-rw-r--r--include/uapi/asm-generic/signal-defs.h64
-rw-r--r--include/uapi/asm-generic/signal.h31
-rw-r--r--include/uapi/asm-generic/socket.h16
-rw-r--r--include/uapi/asm-generic/termbits-common.h66
-rw-r--r--include/uapi/asm-generic/termbits.h239
-rw-r--r--include/uapi/asm-generic/types.h6
-rw-r--r--include/uapi/asm-generic/unistd.h208
-rw-r--r--include/uapi/drm/amdgpu_drm.h212
-rw-r--r--include/uapi/drm/drm.h489
-rw-r--r--include/uapi/drm/drm_fourcc.h532
-rw-r--r--include/uapi/drm/drm_mode.h513
-rw-r--r--include/uapi/drm/etnaviv_drm.h8
-rw-r--r--include/uapi/drm/habanalabs_accel.h2368
-rw-r--r--include/uapi/drm/i810_drm.h292
-rw-r--r--include/uapi/drm/i915_drm.h1991
-rw-r--r--include/uapi/drm/ivpu_accel.h343
-rw-r--r--include/uapi/drm/mga_drm.h427
-rw-r--r--include/uapi/drm/msm_drm.h83
-rw-r--r--include/uapi/drm/nouveau_drm.h293
-rw-r--r--include/uapi/drm/panfrost_drm.h56
-rw-r--r--include/uapi/drm/pvr_drm.h1295
-rw-r--r--include/uapi/drm/qaic_accel.h399
-rw-r--r--include/uapi/drm/r128_drm.h336
-rw-r--r--include/uapi/drm/savage_drm.h220
-rw-r--r--include/uapi/drm/sis_drm.h77
-rw-r--r--include/uapi/drm/tegra_drm.h425
-rw-r--r--include/uapi/drm/v3d_drm.h457
-rw-r--r--include/uapi/drm/via_drm.h282
-rw-r--r--include/uapi/drm/virtgpu_drm.h90
-rw-r--r--include/uapi/drm/vmwgfx_drm.h55
-rw-r--r--include/uapi/drm/xe_drm.h1358
-rw-r--r--include/uapi/linux/acct.h3
-rw-r--r--include/uapi/linux/acrn.h649
-rw-r--r--include/uapi/linux/affs_hardblocks.h68
-rw-r--r--include/uapi/linux/agpgart.h9
-rw-r--r--include/uapi/linux/amt.h62
-rw-r--r--include/uapi/linux/android/binder.h84
-rw-r--r--include/uapi/linux/aspeed-video.h14
-rw-r--r--include/uapi/linux/atmbr2684.h2
-rw-r--r--include/uapi/linux/atmdev.h4
-rw-r--r--include/uapi/linux/audit.h17
-rw-r--r--include/uapi/linux/auto_dev-ioctl.h2
-rw-r--r--include/uapi/linux/auxvec.h7
-rw-r--r--include/uapi/linux/batadv_packet.h49
-rw-r--r--include/uapi/linux/batman_adv.h28
-rw-r--r--include/uapi/linux/bcache.h445
-rw-r--r--include/uapi/linux/binfmts.h4
-rw-r--r--include/uapi/linux/bits.h15
-rw-r--r--include/uapi/linux/blkpg.h28
-rw-r--r--include/uapi/linux/blkzoned.h27
-rw-r--r--include/uapi/linux/bpf.h3562
-rw-r--r--include/uapi/linux/bpfilter.h21
-rw-r--r--include/uapi/linux/btf.h76
-rw-r--r--include/uapi/linux/btrfs.h230
-rw-r--r--include/uapi/linux/btrfs_tree.h389
-rw-r--r--include/uapi/linux/byteorder/big_endian.h1
-rw-r--r--include/uapi/linux/byteorder/little_endian.h1
-rw-r--r--include/uapi/linux/cachefiles.h68
-rw-r--r--include/uapi/linux/can.h108
-rw-r--r--include/uapi/linux/can/bcm.h2
-rw-r--r--include/uapi/linux/can/error.h20
-rw-r--r--include/uapi/linux/can/gw.h4
-rw-r--r--include/uapi/linux/can/isotp.h183
-rw-r--r--include/uapi/linux/can/j1939.h9
-rw-r--r--include/uapi/linux/can/netlink.h45
-rw-r--r--include/uapi/linux/can/raw.h22
-rw-r--r--include/uapi/linux/capability.h13
-rw-r--r--include/uapi/linux/ccs.h18
-rw-r--r--include/uapi/linux/cdrom.h28
-rw-r--r--include/uapi/linux/cec-funcs.h16
-rw-r--r--include/uapi/linux/cec.h27
-rw-r--r--include/uapi/linux/cfm_bridge.h64
-rw-r--r--include/uapi/linux/cgroupstats.h2
-rw-r--r--include/uapi/linux/cifs/cifs_mount.h1
-rw-r--r--include/uapi/linux/cifs/cifs_netlink.h63
-rw-r--r--include/uapi/linux/close_range.h3
-rw-r--r--include/uapi/linux/cm4000_cs.h64
-rw-r--r--include/uapi/linux/cn_proc.h62
-rw-r--r--include/uapi/linux/comedi.h1528
-rw-r--r--include/uapi/linux/connector.h2
-rw-r--r--include/uapi/linux/const.h5
-rw-r--r--include/uapi/linux/coresight-stm.h1
-rw-r--r--include/uapi/linux/counter.h170
-rw-r--r--include/uapi/linux/cxl_mem.h230
-rw-r--r--include/uapi/linux/cyclades.h497
-rw-r--r--include/uapi/linux/cycx_cfm.h2
-rw-r--r--include/uapi/linux/dcbnl.h10
-rw-r--r--include/uapi/linux/devlink.h199
-rw-r--r--include/uapi/linux/dlm.h2
-rw-r--r--include/uapi/linux/dlm_device.h4
-rw-r--r--include/uapi/linux/dlm_netlink.h60
-rw-r--r--include/uapi/linux/dlm_plock.h1
-rw-r--r--include/uapi/linux/dlmconstants.h5
-rw-r--r--include/uapi/linux/dm-ioctl.h32
-rw-r--r--include/uapi/linux/dm-log-userspace.h2
-rw-r--r--include/uapi/linux/dma-buf.h138
-rw-r--r--include/uapi/linux/dn.h149
-rw-r--r--include/uapi/linux/dpll.h238
-rw-r--r--include/uapi/linux/dqblk_xfs.h21
-rw-r--r--include/uapi/linux/dvb/audio.h15
-rw-r--r--include/uapi/linux/dvb/ca.h15
-rw-r--r--include/uapi/linux/dvb/dmx.h15
-rw-r--r--include/uapi/linux/dvb/frontend.h85
-rw-r--r--include/uapi/linux/dvb/net.h15
-rw-r--r--include/uapi/linux/dvb/osd.h15
-rw-r--r--include/uapi/linux/dvb/version.h17
-rw-r--r--include/uapi/linux/dvb/video.h15
-rw-r--r--include/uapi/linux/dw100.h14
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/elf-fdpic.h15
-rw-r--r--include/uapi/linux/elf.h49
-rw-r--r--include/uapi/linux/ethtool.h394
-rw-r--r--include/uapi/linux/ethtool_netlink.h367
-rw-r--r--include/uapi/linux/eventfd.h11
-rw-r--r--include/uapi/linux/eventpoll.h31
-rw-r--r--include/uapi/linux/ext4.h117
-rw-r--r--include/uapi/linux/f2fs.h99
-rw-r--r--include/uapi/linux/fanotify.h78
-rw-r--r--include/uapi/linux/fb.h10
-rw-r--r--include/uapi/linux/fcntl.h9
-rw-r--r--include/uapi/linux/fd.h46
-rw-r--r--include/uapi/linux/fiemap.h2
-rw-r--r--include/uapi/linux/firewire-cdev.h192
-rw-r--r--include/uapi/linux/fou.h56
-rw-r--r--include/uapi/linux/fs.h95
-rw-r--r--include/uapi/linux/fscrypt.h19
-rw-r--r--include/uapi/linux/fsi.h24
-rw-r--r--include/uapi/linux/fsl_mc.h34
-rw-r--r--include/uapi/linux/fsmap.h2
-rw-r--r--include/uapi/linux/fsverity.h63
-rw-r--r--include/uapi/linux/fuse.h322
-rw-r--r--include/uapi/linux/futex.h52
-rw-r--r--include/uapi/linux/genetlink.h12
-rw-r--r--include/uapi/linux/gfs2_ondisk.h5
-rw-r--r--include/uapi/linux/gpio.h398
-rw-r--r--include/uapi/linux/gsmmux.h117
-rw-r--r--include/uapi/linux/gtp.h5
-rw-r--r--include/uapi/linux/handshake.h74
-rw-r--r--include/uapi/linux/hash_info.h3
-rw-r--r--include/uapi/linux/hid.h26
-rw-r--r--include/uapi/linux/hidraw.h6
-rw-r--r--include/uapi/linux/hsi/cs-protocol.h14
-rw-r--r--include/uapi/linux/hsi/hsi_char.h14
-rw-r--r--include/uapi/linux/hw_breakpoint.h10
-rw-r--r--include/uapi/linux/hyperv.h13
-rw-r--r--include/uapi/linux/i2c-dev.h25
-rw-r--r--include/uapi/linux/i2c.h128
-rw-r--r--include/uapi/linux/icmp.h41
-rw-r--r--include/uapi/linux/icmpv6.h4
-rw-r--r--include/uapi/linux/idxd.h183
-rw-r--r--include/uapi/linux/if_addr.h9
-rw-r--r--include/uapi/linux/if_alg.h20
-rw-r--r--include/uapi/linux/if_arcnet.h6
-rw-r--r--include/uapi/linux/if_arp.h1
-rw-r--r--include/uapi/linux/if_bonding.h12
-rw-r--r--include/uapi/linux/if_bridge.h283
-rw-r--r--include/uapi/linux/if_ether.h11
-rw-r--r--include/uapi/linux/if_fddi.h2
-rw-r--r--include/uapi/linux/if_frad.h123
-rw-r--r--include/uapi/linux/if_link.h944
-rw-r--r--include/uapi/linux/if_macsec.h2
-rw-r--r--include/uapi/linux/if_packet.h15
-rw-r--r--include/uapi/linux/if_pppol2tp.h2
-rw-r--r--include/uapi/linux/if_pppox.h4
-rw-r--r--include/uapi/linux/if_tun.h6
-rw-r--r--include/uapi/linux/if_tunnel.h4
-rw-r--r--include/uapi/linux/if_xdp.h60
-rw-r--r--include/uapi/linux/igmp.h6
-rw-r--r--include/uapi/linux/iio/buffer.h10
-rw-r--r--include/uapi/linux/iio/types.h18
-rw-r--r--include/uapi/linux/in.h34
-rw-r--r--include/uapi/linux/in6.h1
-rw-r--r--include/uapi/linux/inet_diag.h20
-rw-r--r--include/uapi/linux/inotify.h6
-rw-r--r--include/uapi/linux/input-event-codes.h35
-rw-r--r--include/uapi/linux/input.h14
-rw-r--r--include/uapi/linux/io_uring.h556
-rw-r--r--include/uapi/linux/ioam6.h133
-rw-r--r--include/uapi/linux/ioam6_genl.h72
-rw-r--r--include/uapi/linux/ioam6_iptunnel.h58
-rw-r--r--include/uapi/linux/iommu.h333
-rw-r--r--include/uapi/linux/iommufd.h695
-rw-r--r--include/uapi/linux/ioprio.h127
-rw-r--r--include/uapi/linux/ip.h12
-rw-r--r--include/uapi/linux/ip_vs.h4
-rw-r--r--include/uapi/linux/ipmi.h16
-rw-r--r--include/uapi/linux/ipmi_msgdefs.h2
-rw-r--r--include/uapi/linux/ipmi_ssif_bmc.h18
-rw-r--r--include/uapi/linux/ipv6.h16
-rw-r--r--include/uapi/linux/ipx.h87
-rw-r--r--include/uapi/linux/iso_fs.h4
-rw-r--r--include/uapi/linux/isst_if.h303
-rw-r--r--include/uapi/linux/jffs2.h8
-rw-r--r--include/uapi/linux/kcov.h2
-rw-r--r--include/uapi/linux/kd.h12
-rw-r--r--include/uapi/linux/kernel.h9
-rw-r--r--include/uapi/linux/kexec.h8
-rw-r--r--include/uapi/linux/keyboard.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h1021
-rw-r--r--include/uapi/linux/kfd_sysfs.h123
-rw-r--r--include/uapi/linux/kvm.h939
-rw-r--r--include/uapi/linux/kvm_para.h1
-rw-r--r--include/uapi/linux/l2tp.h10
-rw-r--r--include/uapi/linux/landlock.h247
-rw-r--r--include/uapi/linux/lightnvm.h225
-rw-r--r--include/uapi/linux/lirc.h26
-rw-r--r--include/uapi/linux/loadpin.h22
-rw-r--r--include/uapi/linux/loop.h11
-rw-r--r--include/uapi/linux/lsm.h92
-rw-r--r--include/uapi/linux/lwtunnel.h11
-rw-r--r--include/uapi/linux/magic.h13
-rw-r--r--include/uapi/linux/major.h2
-rw-r--r--include/uapi/linux/map_to_14segment.h241
-rw-r--r--include/uapi/linux/map_to_7segment.h25
-rw-r--r--include/uapi/linux/mctp.h100
-rw-r--r--include/uapi/linux/mdio.h138
-rw-r--r--include/uapi/linux/media-bus-format.h22
-rw-r--r--include/uapi/linux/media.h31
-rw-r--r--include/uapi/linux/mei.h61
-rw-r--r--include/uapi/linux/mei_uuid.h29
-rw-r--r--include/uapi/linux/membarrier.h30
-rw-r--r--include/uapi/linux/memfd.h4
-rw-r--r--include/uapi/linux/mempolicy.h16
-rw-r--r--include/uapi/linux/meye.h65
-rw-r--r--include/uapi/linux/mic_common.h235
-rw-r--r--include/uapi/linux/mic_ioctl.h77
-rw-r--r--include/uapi/linux/minix_fs.h4
-rw-r--r--include/uapi/linux/misc/bcm_vk.h84
-rw-r--r--include/uapi/linux/mman.h15
-rw-r--r--include/uapi/linux/mmc/ioctl.h2
-rw-r--r--include/uapi/linux/module.h1
-rw-r--r--include/uapi/linux/mount.h95
-rw-r--r--include/uapi/linux/mptcp.h154
-rw-r--r--include/uapi/linux/mptcp_pm.h150
-rw-r--r--include/uapi/linux/mroute.h5
-rw-r--r--include/uapi/linux/mroute6.h3
-rw-r--r--include/uapi/linux/mrp_bridge.h87
-rw-r--r--include/uapi/linux/n_r3964.h99
-rw-r--r--include/uapi/linux/nbd-netlink.h1
-rw-r--r--include/uapi/linux/nbd.h25
-rw-r--r--include/uapi/linux/ndctl.h12
-rw-r--r--include/uapi/linux/neighbour.h47
-rw-r--r--include/uapi/linux/net_dropmon.h5
-rw-r--r--include/uapi/linux/net_tstamp.h35
-rw-r--r--include/uapi/linux/netdev.h175
-rw-r--r--include/uapi/linux/netfilter.h6
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h12
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h117
-rw-r--r--include/uapi/linux/netfilter/nfnetlink.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h5
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cthelper.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cttimeout.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_hook.h82
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_log.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h6
-rw-r--r--include/uapi/linux/netfilter/x_tables.h6
-rw-r--r--include/uapi/linux/netfilter/xt_AUDIT.h4
-rw-r--r--include/uapi/linux/netfilter/xt_IDLETIMER.h17
-rw-r--r--include/uapi/linux/netfilter/xt_SECMARK.h6
-rw-r--r--include/uapi/linux/netfilter/xt_connmark.h13
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h14
-rw-r--r--include/uapi/linux/netfilter_arp/arp_tables.h6
-rw-r--r--include/uapi/linux/netfilter_bridge/ebt_among.h2
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h22
-rw-r--r--include/uapi/linux/netfilter_decnet.h72
-rw-r--r--include/uapi/linux/netfilter_ipv4/ip_tables.h6
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6_tables.h4
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_LOG.h2
-rw-r--r--include/uapi/linux/netlink.h48
-rw-r--r--include/uapi/linux/nexthop.h92
-rw-r--r--include/uapi/linux/nfc.h6
-rw-r--r--include/uapi/linux/nfs3.h6
-rw-r--r--include/uapi/linux/nfs4.h10
-rw-r--r--include/uapi/linux/nfs_fs.h2
-rw-r--r--include/uapi/linux/nfsacl.h2
-rw-r--r--include/uapi/linux/nfsd/export.h13
-rw-r--r--include/uapi/linux/nfsd/nfsfh.h105
-rw-r--r--include/uapi/linux/nfsd_netlink.h39
-rw-r--r--include/uapi/linux/nitro_enclaves.h359
-rw-r--r--include/uapi/linux/nl80211-vnd-intel.h106
-rw-r--r--include/uapi/linux/nl80211.h1219
-rw-r--r--include/uapi/linux/npcm-video.h41
-rw-r--r--include/uapi/linux/nsm.h31
-rw-r--r--include/uapi/linux/nvme_ioctl.h34
-rw-r--r--include/uapi/linux/omap3isp.h21
-rw-r--r--include/uapi/linux/openat2.h4
-rw-r--r--include/uapi/linux/openvswitch.h52
-rw-r--r--include/uapi/linux/parport.h3
-rw-r--r--include/uapi/linux/pci_regs.h228
-rw-r--r--include/uapi/linux/pcitest.h5
-rw-r--r--include/uapi/linux/perf_event.h259
-rw-r--r--include/uapi/linux/pfkeyv2.h2
-rw-r--r--include/uapi/linux/pfrut.h262
-rw-r--r--include/uapi/linux/pidfd.h18
-rw-r--r--include/uapi/linux/pkt_cls.h108
-rw-r--r--include/uapi/linux/pkt_sched.h168
-rw-r--r--include/uapi/linux/pktcdvd.h12
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/pr.h17
-rw-r--r--include/uapi/linux/prctl.h68
-rw-r--r--include/uapi/linux/psample.h10
-rw-r--r--include/uapi/linux/psci.h18
-rw-r--r--include/uapi/linux/psp-dbc.h147
-rw-r--r--include/uapi/linux/psp-sev.h67
-rw-r--r--include/uapi/linux/ptp_clock.h18
-rw-r--r--include/uapi/linux/ptrace.h45
-rw-r--r--include/uapi/linux/quota.h1
-rw-r--r--include/uapi/linux/raid/md_p.h10
-rw-r--r--include/uapi/linux/raid/md_u.h11
-rw-r--r--include/uapi/linux/random.h2
-rw-r--r--include/uapi/linux/raw.h17
-rw-r--r--include/uapi/linux/reiserfs_xattr.h2
-rw-r--r--include/uapi/linux/resource.h15
-rw-r--r--include/uapi/linux/rfkill.h106
-rw-r--r--include/uapi/linux/rkisp1-config.h999
-rw-r--r--include/uapi/linux/romfs_fs.h4
-rw-r--r--include/uapi/linux/rpl.h10
-rw-r--r--include/uapi/linux/rpmsg.h33
-rw-r--r--include/uapi/linux/rpmsg_types.h11
-rw-r--r--include/uapi/linux/rseq.h42
-rw-r--r--include/uapi/linux/rtc.h35
-rw-r--r--include/uapi/linux/rtnetlink.h69
-rw-r--r--include/uapi/linux/rxrpc.h2
-rw-r--r--include/uapi/linux/sched/types.h6
-rw-r--r--include/uapi/linux/sctp.h29
-rw-r--r--include/uapi/linux/sdla.h117
-rw-r--r--include/uapi/linux/seccomp.h7
-rw-r--r--include/uapi/linux/sed-opal.h71
-rw-r--r--include/uapi/linux/seg6.h2
-rw-r--r--include/uapi/linux/seg6_iptunnel.h4
-rw-r--r--include/uapi/linux/seg6_local.h57
-rw-r--r--include/uapi/linux/serial.h75
-rw-r--r--include/uapi/linux/serial_core.h81
-rw-r--r--include/uapi/linux/serial_reg.h11
-rw-r--r--include/uapi/linux/sev-guest.h96
-rw-r--r--include/uapi/linux/smc.h271
-rw-r--r--include/uapi/linux/smc_diag.h2
-rw-r--r--include/uapi/linux/snmp.h18
-rw-r--r--include/uapi/linux/socket.h9
-rw-r--r--include/uapi/linux/soundcard.h2
-rw-r--r--include/uapi/linux/spi/spi.h43
-rw-r--r--include/uapi/linux/spi/spidev.h30
-rw-r--r--include/uapi/linux/stat.h14
-rw-r--r--include/uapi/linux/stddef.h52
-rw-r--r--include/uapi/linux/stm.h2
-rw-r--r--include/uapi/linux/surface_aggregator/cdev.h147
-rw-r--r--include/uapi/linux/surface_aggregator/dtx.h146
-rw-r--r--include/uapi/linux/swab.h8
-rw-r--r--include/uapi/linux/sync_file.h61
-rw-r--r--include/uapi/linux/sysctl.h41
-rw-r--r--include/uapi/linux/target_core_user.h6
-rw-r--r--include/uapi/linux/taskstats.h34
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h5
-rw-r--r--include/uapi/linux/tc_act/tc_ct.h3
-rw-r--r--include/uapi/linux/tc_act/tc_ipt.h20
-rw-r--r--include/uapi/linux/tc_act/tc_mirred.h1
-rw-r--r--include/uapi/linux/tc_act/tc_mpls.h1
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h15
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h8
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h6
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h9
-rw-r--r--include/uapi/linux/tcp.h151
-rw-r--r--include/uapi/linux/tdx-guest.h42
-rw-r--r--include/uapi/linux/tee.h19
-rw-r--r--include/uapi/linux/termios.h15
-rw-r--r--include/uapi/linux/thermal.h8
-rw-r--r--include/uapi/linux/thp7312.h19
-rw-r--r--include/uapi/linux/tipc.h2
-rw-r--r--include/uapi/linux/tipc_config.h28
-rw-r--r--include/uapi/linux/tipc_netlink.h2
-rw-r--r--include/uapi/linux/tls.h79
-rw-r--r--include/uapi/linux/tps6594_pfsm.h37
-rw-r--r--include/uapi/linux/tty.h8
-rw-r--r--include/uapi/linux/tty_flags.h8
-rw-r--r--include/uapi/linux/types.h13
-rw-r--r--include/uapi/linux/ublk_cmd.h400
-rw-r--r--include/uapi/linux/usb/audio.h2
-rw-r--r--include/uapi/linux/usb/cdc.h19
-rw-r--r--include/uapi/linux/usb/ch11.h6
-rw-r--r--include/uapi/linux/usb/ch9.h49
-rw-r--r--include/uapi/linux/usb/functionfs.h47
-rw-r--r--include/uapi/linux/usb/g_uvc.h3
-rw-r--r--include/uapi/linux/usb/raw_gadget.h18
-rw-r--r--include/uapi/linux/usb/tmc.h3
-rw-r--r--include/uapi/linux/usb/video.h37
-rw-r--r--include/uapi/linux/usbdevice_fs.h4
-rw-r--r--include/uapi/linux/usbip.h26
-rw-r--r--include/uapi/linux/user_events.h94
-rw-r--r--include/uapi/linux/userfaultfd.h134
-rw-r--r--include/uapi/linux/uuid.h43
-rw-r--r--include/uapi/linux/uvcvideo.h16
-rw-r--r--include/uapi/linux/v4l2-common.h39
-rw-r--r--include/uapi/linux/v4l2-controls.h2832
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h9
-rw-r--r--include/uapi/linux/v4l2-mediabus.h19
-rw-r--r--include/uapi/linux/v4l2-subdev.h141
-rw-r--r--include/uapi/linux/vdpa.h80
-rw-r--r--include/uapi/linux/vduse.h353
-rw-r--r--include/uapi/linux/vesa.h18
-rw-r--r--include/uapi/linux/vfio.h971
-rw-r--r--include/uapi/linux/vfio_zdev.h85
-rw-r--r--include/uapi/linux/vhost.h98
-rw-r--r--include/uapi/linux/vhost_types.h55
-rw-r--r--include/uapi/linux/videodev2.h266
-rw-r--r--include/uapi/linux/virtio_9p.h2
-rw-r--r--include/uapi/linux/virtio_blk.h124
-rw-r--r--include/uapi/linux/virtio_bt.h39
-rw-r--r--include/uapi/linux/virtio_config.h30
-rw-r--r--include/uapi/linux/virtio_crypto.h82
-rw-r--r--include/uapi/linux/virtio_fs.h3
-rw-r--r--include/uapi/linux/virtio_gpio.h72
-rw-r--r--include/uapi/linux/virtio_gpu.h121
-rw-r--r--include/uapi/linux/virtio_i2c.h47
-rw-r--r--include/uapi/linux/virtio_ids.h70
-rw-r--r--include/uapi/linux/virtio_iommu.h8
-rw-r--r--include/uapi/linux/virtio_mem.h9
-rw-r--r--include/uapi/linux/virtio_mmio.h11
-rw-r--r--include/uapi/linux/virtio_net.h53
-rw-r--r--include/uapi/linux/virtio_pci.h92
-rw-r--r--include/uapi/linux/virtio_pcidev.h65
-rw-r--r--include/uapi/linux/virtio_pmem.h7
-rw-r--r--include/uapi/linux/virtio_ring.h16
-rw-r--r--include/uapi/linux/virtio_scmi.h24
-rw-r--r--include/uapi/linux/virtio_snd.h488
-rw-r--r--include/uapi/linux/virtio_vsock.h10
-rw-r--r--include/uapi/linux/vm_sockets.h56
-rw-r--r--include/uapi/linux/wimax.h239
-rw-r--r--include/uapi/linux/wimax/i2400m.h572
-rw-r--r--include/uapi/linux/wireless.h2
-rw-r--r--include/uapi/linux/wwan.h16
-rw-r--r--include/uapi/linux/xfrm.h40
-rw-r--r--include/uapi/misc/fastrpc.h93
-rw-r--r--include/uapi/misc/habanalabs.h921
-rw-r--r--include/uapi/misc/uacce/hisi_qm.h18
-rw-r--r--include/uapi/mtd/mtd-abi.h70
-rw-r--r--include/uapi/mtd/ubi-user.h10
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h90
-rw-r--r--include/uapi/rdma/efa-abi.h42
-rw-r--r--include/uapi/rdma/erdma-abi.h49
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h2
-rw-r--r--include/uapi/rdma/hns-abi.h68
-rw-r--r--include/uapi/rdma/i40iw-abi.h107
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h30
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h23
-rw-r--r--include/uapi/rdma/ib_user_mad.h2
-rw-r--r--include/uapi/rdma/ib_user_verbs.h160
-rw-r--r--include/uapi/rdma/irdma-abi.h120
-rw-r--r--include/uapi/rdma/mana-abi.h66
-rw-r--r--include/uapi/rdma/mlx5-abi.h23
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h49
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h27
-rw-r--r--include/uapi/rdma/rdma_netlink.h25
-rw-r--r--include/uapi/rdma/rdma_user_cm.h2
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h2
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h69
-rw-r--r--include/uapi/rdma/siw-abi.h2
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h7
-rw-r--r--include/uapi/regulator/regulator.h90
-rw-r--r--include/uapi/scsi/fc/fc_els.h232
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h4
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h578
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h125
-rw-r--r--include/uapi/scsi/scsi_netlink_fc.h7
-rw-r--r--include/uapi/sound/asequencer.h116
-rw-r--r--include/uapi/sound/asoc.h36
-rw-r--r--include/uapi/sound/asound.h165
-rw-r--r--include/uapi/sound/asound_fm.h15
-rw-r--r--include/uapi/sound/compress_offload.h21
-rw-r--r--include/uapi/sound/compress_params.h44
-rw-r--r--include/uapi/sound/emu10k1.h174
-rw-r--r--include/uapi/sound/firewire.h182
-rw-r--r--include/uapi/sound/hdsp.h14
-rw-r--r--include/uapi/sound/hdspm.h15
-rw-r--r--include/uapi/sound/intel/avs/tokens.h139
-rw-r--r--include/uapi/sound/sb16_csp.h15
-rw-r--r--include/uapi/sound/scarlett2.h54
-rw-r--r--include/uapi/sound/sfnt_info.h15
-rw-r--r--include/uapi/sound/skl-tplg-interface.h5
-rw-r--r--include/uapi/sound/snd_ar_tokens.h235
-rw-r--r--include/uapi/sound/snd_sst_tokens.h16
-rw-r--r--include/uapi/sound/sof/abi.h6
-rw-r--r--include/uapi/sound/sof/header.h57
-rw-r--r--include/uapi/sound/sof/tokens.h88
-rw-r--r--include/uapi/sound/tlv.h11
-rw-r--r--include/uapi/sound/usb_stream.h16
-rw-r--r--include/uapi/xen/evtchn.h9
-rw-r--r--include/uapi/xen/gntalloc.h5
-rw-r--r--include/uapi/xen/gntdev.h8
-rw-r--r--include/uapi/xen/privcmd.h32
-rw-r--r--include/ufs/ufs.h614
-rw-r--r--include/ufs/ufs_quirks.h110
-rw-r--r--include/ufs/ufshcd.h1438
-rw-r--r--include/ufs/ufshci.h615
-rw-r--r--include/ufs/unipro.h321
-rw-r--r--include/vdso/bits.h1
-rw-r--r--include/vdso/datapage.h14
-rw-r--r--include/vdso/gettime.h23
-rw-r--r--include/vdso/helpers.h8
-rw-r--r--include/vdso/time64.h1
-rw-r--r--include/video/cmdline.h16
-rw-r--r--include/video/imx-ipu-v3.h5
-rw-r--r--include/video/kyro.h12
-rw-r--r--include/video/mbxfb.h99
-rw-r--r--include/video/mmp_disp.h2
-rw-r--r--include/video/nomodeset.h8
-rw-r--r--include/video/of_display_timing.h2
-rw-r--r--include/video/omap-panel-data.h71
-rw-r--r--include/video/radeon.h2
-rw-r--r--include/video/samsung_fimd.h4
-rw-r--r--include/video/sstfb.h4
-rw-r--r--include/video/sticore.h406
-rw-r--r--include/video/uvesafb.h2
-rw-r--r--include/video/vga.h20
-rw-r--r--include/video/w100fb.h147
-rw-r--r--include/xen/acpi.h35
-rw-r--r--include/xen/arm/hypercall.h15
-rw-r--r--include/xen/arm/hypervisor.h12
-rw-r--r--include/xen/arm/page-coherent.h20
-rw-r--r--include/xen/arm/page.h7
-rw-r--r--include/xen/arm/swiotlb-xen.h20
-rw-r--r--include/xen/arm/xen-ops.h16
-rw-r--r--include/xen/balloon.h8
-rw-r--r--include/xen/events.h57
-rw-r--r--include/xen/grant_table.h61
-rw-r--r--include/xen/hvm.h2
-rw-r--r--include/xen/interface/callback.h19
-rw-r--r--include/xen/interface/elfnote.h29
-rw-r--r--include/xen/interface/event_channel.h2
-rw-r--r--include/xen/interface/features.h16
-rw-r--r--include/xen/interface/grant_table.h180
-rw-r--r--include/xen/interface/hvm/dm_op.h19
-rw-r--r--include/xen/interface/hvm/hvm_op.h39
-rw-r--r--include/xen/interface/hvm/hvm_vcpu.h29
-rw-r--r--include/xen/interface/hvm/ioreq.h51
-rw-r--r--include/xen/interface/hvm/params.h20
-rw-r--r--include/xen/interface/hvm/start_info.h19
-rw-r--r--include/xen/interface/io/9pfs.h19
-rw-r--r--include/xen/interface/io/blkif.h2
-rw-r--r--include/xen/interface/io/console.h2
-rw-r--r--include/xen/interface/io/displif.h21
-rw-r--r--include/xen/interface/io/fbif.h19
-rw-r--r--include/xen/interface/io/kbdif.h19
-rw-r--r--include/xen/interface/io/netif.h19
-rw-r--r--include/xen/interface/io/pciif.h19
-rw-r--r--include/xen/interface/io/protocols.h2
-rw-r--r--include/xen/interface/io/pvcalls.h2
-rw-r--r--include/xen/interface/io/ring.h270
-rw-r--r--include/xen/interface/io/sndif.h21
-rw-r--r--include/xen/interface/io/usbif.h405
-rw-r--r--include/xen/interface/io/vscsiif.h152
-rw-r--r--include/xen/interface/io/xenbus.h12
-rw-r--r--include/xen/interface/io/xs_wire.h39
-rw-r--r--include/xen/interface/memory.h2
-rw-r--r--include/xen/interface/nmi.h2
-rw-r--r--include/xen/interface/physdev.h20
-rw-r--r--include/xen/interface/platform.h22
-rw-r--r--include/xen/interface/sched.h19
-rw-r--r--include/xen/interface/vcpu.h19
-rw-r--r--include/xen/interface/version.h2
-rw-r--r--include/xen/interface/xen-mca.h1
-rw-r--r--include/xen/interface/xen.h26
-rw-r--r--include/xen/interface/xenpmu.h2
-rw-r--r--include/xen/pci.h28
-rw-r--r--include/xen/swiotlb-xen.h2
-rw-r--r--include/xen/xen-ops.h46
-rw-r--r--include/xen/xen.h43
-rw-r--r--include/xen/xenbus.h39
-rw-r--r--include/xen/xenbus_dev.h2
4126 files changed, 340404 insertions, 120329 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 531c1e9a7d10..252b235dce5a 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -3,7 +3,7 @@
*
* Name: acbuffer.h - Support for buffers returned by ACPI predefined names
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -207,4 +207,14 @@ struct acpi_pld_info {
#define ACPI_PLD_GET_HORIZ_OFFSET(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK)
#define ACPI_PLD_SET_HORIZ_OFFSET(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 128+16=144, Len 16 */
+/* Panel position defined in _PLD section of ACPI Specification 6.3 */
+
+#define ACPI_PLD_PANEL_TOP 0
+#define ACPI_PLD_PANEL_BOTTOM 1
+#define ACPI_PLD_PANEL_LEFT 2
+#define ACPI_PLD_PANEL_RIGHT 3
+#define ACPI_PLD_PANEL_FRONT 4
+#define ACPI_PLD_PANEL_BACK 5
+#define ACPI_PLD_PANEL_UNKNOWN 6
+
#endif /* ACBUFFER_H */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 5940a3c68a96..d768d9c568cf 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -3,7 +3,7 @@
*
* Name: acconfig.h - Global configuration constants
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -121,7 +121,7 @@
*
*****************************************************************************/
-/* Method info (in WALK_STATE), containing local variables and argumetns */
+/* Method info (in WALK_STATE), containing local variables and arguments */
#define ACPI_METHOD_NUM_LOCALS 8
#define ACPI_METHOD_MAX_LOCAL 7
@@ -188,6 +188,10 @@
#define ACPI_MAX_GSBUS_DATA_SIZE 255
#define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE
+#define ACPI_PRM_INPUT_BUFFER_SIZE 26
+
+#define ACPI_FFH_INPUT_BUFFER_SIZE 256
+
/* _sx_d and _sx_w control methods */
#define ACPI_NUM_sx_d_METHODS 4
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 436cd1411c3a..c5ecd0a0170c 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -3,7 +3,7 @@
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -40,12 +40,12 @@
struct acpi_exception_info {
char *name;
-#ifdef ACPI_HELP_APP
+#if defined (ACPI_HELP_APP) || defined (ACPI_ASL_COMPILER)
char *description;
#endif
};
-#ifdef ACPI_HELP_APP
+#if defined (ACPI_HELP_APP) || defined (ACPI_ASL_COMPILER)
#define EXCEP_TXT(name,description) {name, description}
#else
#define EXCEP_TXT(name,description) {name}
@@ -59,11 +59,11 @@ struct acpi_exception_info {
#define AE_OK (acpi_status) 0x0000
-#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL)
-#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML)
-#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER)
-#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES)
-#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL)
+#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL)
+#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML)
+#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER)
+#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES)
+#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL)
/*
* Environmental exceptions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 8922edb32730..76aa6aa346ba 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -3,7 +3,7 @@
*
* Name: acnames.h - Global names and strings
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -20,7 +20,9 @@
#define METHOD_NAME__CLS "_CLS"
#define METHOD_NAME__CRS "_CRS"
#define METHOD_NAME__DDN "_DDN"
+#define METHOD_NAME__DIS "_DIS"
#define METHOD_NAME__DMA "_DMA"
+#define METHOD_NAME__EVT "_EVT"
#define METHOD_NAME__HID "_HID"
#define METHOD_NAME__INI "_INI"
#define METHOD_NAME__PLD "_PLD"
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index c5d900c0ecda..b1571dd96310 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -3,7 +3,7 @@
*
* Name: acoutput.h -- debug output
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -362,7 +362,7 @@
*
* A less-safe version of the macros is provided for optional use if the
* compiler uses excessive CPU stack (for example, this may happen in the
- * debug case if code optimzation is disabled.)
+ * debug case if code optimization is disabled.)
*/
/* Exit trace helper macro */
@@ -415,7 +415,7 @@
/* Conditional execution */
#define ACPI_DEBUG_EXEC(a) a
-#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
+#define ACPI_DEBUG_ONLY_MEMBERS(a) a
#define _VERBOSE_STRUCTURES
/* Various object display routines for debug */
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index e3e8051d4812..8b4a497c1300 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -3,7 +3,7 @@
*
* Name: acpi.h - Master public include file used to interface to ACPICA
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index a3abcc4b7d9f..5de954e2b18a 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -12,11 +12,9 @@
#include <linux/device.h>
#include <linux/property.h>
-/* TBD: Make dynamic */
-#define ACPI_MAX_HANDLES 10
struct acpi_handle_list {
u32 count;
- acpi_handle handles[ACPI_MAX_HANDLES];
+ acpi_handle *handles;
};
/* acpi_utils.h */
@@ -27,11 +25,15 @@ acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments, unsigned long long *data);
-acpi_status
-acpi_evaluate_reference(acpi_handle handle,
- acpi_string pathname,
- struct acpi_object_list *arguments,
- struct acpi_handle_list *list);
+bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list);
+bool acpi_handle_list_equal(struct acpi_handle_list *list1,
+ struct acpi_handle_list *list2);
+void acpi_handle_list_replace(struct acpi_handle_list *dst,
+ struct acpi_handle_list *src);
+void acpi_handle_list_free(struct acpi_handle_list *list);
+bool acpi_device_dep(acpi_handle target, acpi_handle match);
acpi_status
acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
struct acpi_buffer *status_buf);
@@ -52,7 +54,7 @@ bool acpi_dock_match(acpi_handle handle);
bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs);
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
u64 rev, u64 func, union acpi_object *argv4);
-
+#ifdef CONFIG_ACPI
static inline union acpi_object *
acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
u64 func, union acpi_object *argv4,
@@ -68,6 +70,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
return obj;
}
+#endif
#define ACPI_INIT_DSM_ARGV4(cnt, eles) \
{ \
@@ -78,6 +81,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
+bool acpi_reduced_hardware(void);
#ifdef CONFIG_ACPI
@@ -148,7 +152,7 @@ struct acpi_hotplug_context {
*/
typedef int (*acpi_op_add) (struct acpi_device * device);
-typedef int (*acpi_op_remove) (struct acpi_device * device);
+typedef void (*acpi_op_remove) (struct acpi_device *device);
typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event);
struct acpi_device_ops {
@@ -201,7 +205,8 @@ struct acpi_device_flags {
u32 coherent_dma:1;
u32 cca_seen:1;
u32 enumeration_by_parent:1;
- u32 reserved:19;
+ u32 honor_deps:1;
+ u32 reserved:18;
};
/* File System */
@@ -228,11 +233,13 @@ struct acpi_pnp_type {
u32 hardware_id:1;
u32 bus_address:1;
u32 platform_id:1;
- u32 reserved:29;
+ u32 backlight:1;
+ u32 reserved:28;
};
struct acpi_device_pnp {
acpi_bus_id bus_id; /* Object name */
+ int instance_no; /* Instance number of this object */
struct acpi_pnp_type type; /* ID type */
acpi_bus_address bus_address; /* _ADR */
char *unique_id; /* _UID */
@@ -276,6 +283,16 @@ struct acpi_device_power {
int state; /* Current state */
struct acpi_device_power_flags flags;
struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */
+ u8 state_for_enumeration; /* Deepest power state for enumeration */
+};
+
+struct acpi_dep_data {
+ struct list_head node;
+ acpi_handle supplier;
+ acpi_handle consumer;
+ bool honor_dep;
+ bool met;
+ bool free_when_met;
};
/* Performance Management */
@@ -333,8 +350,9 @@ struct acpi_device_physical_node {
struct acpi_device_properties {
const guid_t *guid;
- const union acpi_object *properties;
+ union acpi_object *properties;
struct list_head list;
+ void **bufs;
};
/* ACPI Device Specific Data (_DSD) */
@@ -347,14 +365,104 @@ struct acpi_device_data {
struct acpi_gpio_mapping;
+#define ACPI_DEVICE_SWNODE_ROOT 0
+
+/*
+ * The maximum expected number of CSI-2 data lanes.
+ *
+ * This number is not expected to ever have to be equal to or greater than the
+ * number of bits in an unsigned long variable, but if it needs to be increased
+ * above that limit, code will need to be adjusted accordingly.
+ */
+#define ACPI_DEVICE_CSI2_DATA_LANES 8
+
+#define ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH 8
+
+enum acpi_device_swnode_dev_props {
+ ACPI_DEVICE_SWNODE_DEV_ROTATION,
+ ACPI_DEVICE_SWNODE_DEV_CLOCK_FREQUENCY,
+ ACPI_DEVICE_SWNODE_DEV_LED_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_TIMEOUT_US,
+ ACPI_DEVICE_SWNODE_DEV_NUM_OF,
+ ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_port_props {
+ ACPI_DEVICE_SWNODE_PORT_REG,
+ ACPI_DEVICE_SWNODE_PORT_NUM_OF,
+ ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_ep_props {
+ ACPI_DEVICE_SWNODE_EP_REMOTE_EP,
+ ACPI_DEVICE_SWNODE_EP_BUS_TYPE,
+ ACPI_DEVICE_SWNODE_EP_REG,
+ ACPI_DEVICE_SWNODE_EP_CLOCK_LANES,
+ ACPI_DEVICE_SWNODE_EP_DATA_LANES,
+ ACPI_DEVICE_SWNODE_EP_LANE_POLARITIES,
+ /* TX only */
+ ACPI_DEVICE_SWNODE_EP_LINK_FREQUENCIES,
+ ACPI_DEVICE_SWNODE_EP_NUM_OF,
+ ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES
+};
+
+/*
+ * Each device has a root software node plus two times as many nodes as the
+ * number of CSI-2 ports.
+ */
+#define ACPI_DEVICE_SWNODE_PORT(port) (2 * (port) + 1)
+#define ACPI_DEVICE_SWNODE_EP(endpoint) \
+ (ACPI_DEVICE_SWNODE_PORT(endpoint) + 1)
+
+/**
+ * struct acpi_device_software_node_port - MIPI DisCo for Imaging CSI-2 port
+ * @port_name: Port name.
+ * @data_lanes: "data-lanes" property values.
+ * @lane_polarities: "lane-polarities" property values.
+ * @link_frequencies: "link_frequencies" property values.
+ * @port_nr: Port number.
+ * @crs_crs2_local: _CRS CSI2 record present (i.e. this is a transmitter one).
+ * @port_props: Port properties.
+ * @ep_props: Endpoint properties.
+ * @remote_ep: Reference to the remote endpoint.
+ */
+struct acpi_device_software_node_port {
+ char port_name[ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH + 1];
+ u32 data_lanes[ACPI_DEVICE_CSI2_DATA_LANES];
+ u32 lane_polarities[ACPI_DEVICE_CSI2_DATA_LANES + 1 /* clock lane */];
+ u64 link_frequencies[ACPI_DEVICE_CSI2_DATA_LANES];
+ unsigned int port_nr;
+ bool crs_csi2_local;
+
+ struct property_entry port_props[ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES];
+ struct property_entry ep_props[ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES];
+
+ struct software_node_ref_args remote_ep[1];
+};
+
+/**
+ * struct acpi_device_software_nodes - Software nodes for an ACPI device
+ * @dev_props: Device properties.
+ * @nodes: Software nodes for root as well as ports and endpoints.
+ * @nodeprts: Array of software node pointers, for (un)registering them.
+ * @ports: Information related to each port and endpoint within a port.
+ * @num_ports: The number of ports.
+ */
+struct acpi_device_software_nodes {
+ struct property_entry dev_props[ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES];
+ struct software_node *nodes;
+ const struct software_node **nodeptrs;
+ struct acpi_device_software_node_port *ports;
+ unsigned int num_ports;
+};
+
/* Device */
struct acpi_device {
+ u32 pld_crc;
int device_type;
acpi_handle handle; /* no handle for fixed hardware */
struct fwnode_handle fwnode;
- struct acpi_device *parent;
- struct list_head children;
- struct list_head node;
struct list_head wakeup_list;
struct list_head del_list;
struct acpi_device_status status;
@@ -367,7 +475,7 @@ struct acpi_device {
struct acpi_device_data data;
struct acpi_scan_handler *handler;
struct acpi_hotplug_context *hp;
- struct acpi_driver *driver;
+ struct acpi_device_software_nodes *swnodes;
const struct acpi_gpio_mapping *driver_gpios;
void *driver_data;
struct device dev;
@@ -448,6 +556,14 @@ static inline void *acpi_driver_data(struct acpi_device *d)
#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
+static inline struct acpi_device *acpi_dev_parent(struct acpi_device *adev)
+{
+ if (adev->dev.parent)
+ return to_acpi_device(adev->dev.parent);
+
+ return NULL;
+}
+
static inline void acpi_set_device_status(struct acpi_device *adev, u32 sta)
{
*((u32 *)&adev->status) = sta;
@@ -466,7 +582,14 @@ void acpi_initialize_hp_context(struct acpi_device *adev,
void (*uevent)(struct acpi_device *, u32));
/* acpi_device.dev.bus == &acpi_bus_type */
-extern struct bus_type acpi_bus_type;
+extern const struct bus_type acpi_bus_type;
+
+int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data);
+int acpi_dev_for_each_child(struct acpi_device *adev,
+ int (*fn)(struct acpi_device *, void *), void *data);
+int acpi_dev_for_each_child_reverse(struct acpi_device *adev,
+ int (*fn)(struct acpi_device *, void *),
+ void *data);
/*
* Events
@@ -487,6 +610,12 @@ void acpi_bus_private_data_handler(acpi_handle, void *);
int acpi_bus_get_private_data(acpi_handle, void **);
int acpi_bus_attach_private_data(acpi_handle, void *);
void acpi_bus_detach_private_data(acpi_handle);
+int acpi_dev_install_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler, void *context);
+void acpi_dev_remove_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler);
extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
@@ -495,9 +624,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
* External Functions
*/
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
-struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
-void acpi_bus_put_acpi_device(struct acpi_device *adev);
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta);
int acpi_bus_get_status(struct acpi_device *device);
@@ -507,9 +633,13 @@ const char *acpi_power_state_string(int state);
int acpi_device_set_power(struct acpi_device *device, int state);
int acpi_bus_init_power(struct acpi_device *device);
int acpi_device_fix_up_power(struct acpi_device *device);
+void acpi_device_fix_up_power_extended(struct acpi_device *adev);
+void acpi_device_fix_up_power_children(struct acpi_device *adev);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
int acpi_device_update_power(struct acpi_device *device, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
+void acpi_dev_power_up_children_with_adr(struct acpi_device *adev);
+u8 acpi_dev_power_state_for_wake(struct acpi_device *adev);
int acpi_device_power_add_dependent(struct acpi_device *adev,
struct device *dev);
void acpi_device_power_remove_dependent(struct acpi_device *adev,
@@ -535,8 +665,6 @@ int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids);
void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
char *modalias, size_t len);
-int acpi_create_dir(struct acpi_device *);
-void acpi_remove_dir(struct acpi_device *);
static inline bool acpi_device_enumerated(struct acpi_device *adev)
{
@@ -564,30 +692,41 @@ struct acpi_bus_type {
bool (*match)(struct device *dev);
struct acpi_device * (*find_companion)(struct device *);
void (*setup)(struct device *);
- void (*cleanup)(struct device *);
};
int register_acpi_bus_type(struct acpi_bus_type *);
int unregister_acpi_bus_type(struct acpi_bus_type *);
int acpi_bind_one(struct device *dev, struct acpi_device *adev);
int acpi_unbind_one(struct device *dev);
+enum acpi_bridge_type {
+ ACPI_BRIDGE_TYPE_PCIE = 1,
+ ACPI_BRIDGE_TYPE_CXL,
+};
+
struct acpi_pci_root {
struct acpi_device * device;
struct pci_bus *bus;
u16 segment;
+ int bridge_type;
struct resource secondary; /* downstream bus range */
- u32 osc_support_set; /* _OSC state of support bits */
- u32 osc_control_set; /* _OSC state of control bits */
+ u32 osc_support_set; /* _OSC state of support bits */
+ u32 osc_control_set; /* _OSC state of control bits */
+ u32 osc_ext_support_set; /* _OSC state of extended support bits */
+ u32 osc_ext_control_set; /* _OSC state of extended control bits */
phys_addr_t mcfg_addr;
};
/* helper */
-bool acpi_dma_supported(struct acpi_device *adev);
+struct iommu_ops;
+
+bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
-int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
- u64 *size);
+int acpi_iommu_fwspec_init(struct device *dev, u32 id,
+ struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops);
+int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map);
int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id);
static inline int acpi_dma_configure(struct device *dev,
@@ -597,6 +736,8 @@ static inline int acpi_dma_configure(struct device *dev,
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children);
+struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev,
+ acpi_bus_address adr);
int acpi_is_root_bridge(acpi_handle);
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
@@ -604,9 +745,45 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
#ifdef CONFIG_X86
-bool acpi_device_always_present(struct acpi_device *adev);
+bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status);
+bool acpi_quirk_skip_acpi_ac_and_battery(void);
+int acpi_install_cmos_rtc_space_handler(acpi_handle handle);
+void acpi_remove_cmos_rtc_space_handler(acpi_handle handle);
+int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
+#else
+static inline bool acpi_device_override_status(struct acpi_device *adev,
+ unsigned long long *status)
+{
+ return false;
+}
+static inline bool acpi_quirk_skip_acpi_ac_and_battery(void)
+{
+ return false;
+}
+static inline int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
+{
+ return 1;
+}
+static inline void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
+{
+}
+static inline int
+acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+ *skip = false;
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
+bool acpi_quirk_skip_gpio_event_handlers(void);
#else
-static inline bool acpi_device_always_present(struct acpi_device *adev)
+static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
+{
+ return false;
+}
+static inline bool acpi_quirk_skip_gpio_event_handlers(void)
{
return false;
}
@@ -620,7 +797,6 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
bool acpi_pm_device_can_wakeup(struct device *dev);
int acpi_pm_device_sleep_state(struct device *, int *, int);
int acpi_pm_set_device_wakeup(struct device *dev, bool enable);
-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable);
#else
static inline void acpi_pm_wakeup_event(struct device *dev)
{
@@ -651,10 +827,6 @@ static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
{
return -ENODEV;
}
-static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
-{
- return -ENODEV;
-}
#endif
#ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT
@@ -686,14 +858,123 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
-bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer);
+
+static inline bool acpi_dev_hid_match(struct acpi_device *adev, const char *hid2)
+{
+ const char *hid1 = acpi_device_hid(adev);
+
+ return hid1 && hid2 && !strcmp(hid1, hid2);
+}
+
+static inline bool acpi_str_uid_match(struct acpi_device *adev, const char *uid2)
+{
+ const char *uid1 = acpi_device_uid(adev);
+
+ return uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
+{
+ u64 uid1;
+
+ return !acpi_dev_uid_to_integer(adev, &uid1) && uid1 == uid2;
+}
+
+#define TYPE_ENTRY(type, x) \
+ const type: x, \
+ type: x
+
+#define ACPI_STR_TYPES(match) \
+ TYPE_ENTRY(unsigned char *, match), \
+ TYPE_ENTRY(signed char *, match), \
+ TYPE_ENTRY(char *, match), \
+ TYPE_ENTRY(void *, match)
+
+/**
+ * acpi_dev_uid_match - Match device by supplied UID
+ * @adev: ACPI device to match.
+ * @uid2: Unique ID of the device.
+ *
+ * Matches UID in @adev with given @uid2.
+ *
+ * Returns: %true if matches, %false otherwise.
+ */
+#define acpi_dev_uid_match(adev, uid2) \
+ _Generic(uid2, \
+ /* Treat @uid2 as a string for acpi string types */ \
+ ACPI_STR_TYPES(acpi_str_uid_match), \
+ /* Treat as an integer otherwise */ \
+ default: acpi_int_uid_match)(adev, uid2)
+
+/**
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+ * @uid2: Unique ID of the device, pass 0 or NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
+ * will be treated as a match. If user wants to validate @uid2, it should be
+ * done before calling this function.
+ *
+ * Returns: %true if matches or @uid2 is 0 or NULL, %false otherwise.
+ */
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) \
+ (acpi_dev_hid_match(adev, hid2) && \
+ (!(uid2) || acpi_dev_uid_match(adev, uid2)))
+
+void acpi_dev_clear_dependencies(struct acpi_device *supplier);
+bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
+struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
+ struct acpi_device *start);
+
+/**
+ * for_each_acpi_consumer_dev - iterate over the consumer ACPI devices for a
+ * given supplier
+ * @supplier: Pointer to the supplier's ACPI device
+ * @consumer: Pointer to &struct acpi_device to hold the consumer, initially NULL
+ */
+#define for_each_acpi_consumer_dev(supplier, consumer) \
+ for (consumer = acpi_dev_get_next_consumer_dev(supplier, NULL); \
+ consumer; \
+ consumer = acpi_dev_get_next_consumer_dev(supplier, consumer))
struct acpi_device *
+acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv);
+struct acpi_device *
acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
+/**
+ * for_each_acpi_dev_match - iterate over ACPI devices that matching the criteria
+ * @adev: pointer to the matching ACPI device, NULL at the end of the loop
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * The caller is responsible for invoking acpi_dev_put() on the returned device.
+ */
+#define for_each_acpi_dev_match(adev, hid, uid, hrv) \
+ for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \
+ adev; \
+ adev = acpi_dev_get_next_match_dev(adev, hid, uid, hrv))
+
+static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev)
+{
+ return adev ? to_acpi_device(get_device(&adev->dev)) : NULL;
+}
+
static inline void acpi_dev_put(struct acpi_device *adev)
{
- put_device(&adev->dev);
+ if (adev)
+ put_device(&adev->dev);
+}
+
+struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
+struct acpi_device *acpi_get_acpi_dev(acpi_handle handle);
+
+static inline void acpi_put_acpi_dev(struct acpi_device *adev)
+{
+ acpi_dev_put(adev);
}
#else /* CONFIG_ACPI */
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 5eb175933a5b..b14d165632e7 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -12,25 +12,6 @@
#define ACPI_MAX_STRING 80
/*
- * Please update drivers/acpi/debug.c and Documentation/firmware-guide/acpi/debug.rst
- * if you add to this list.
- */
-#define ACPI_BUS_COMPONENT 0x00010000
-#define ACPI_AC_COMPONENT 0x00020000
-#define ACPI_BATTERY_COMPONENT 0x00040000
-#define ACPI_BUTTON_COMPONENT 0x00080000
-#define ACPI_SBS_COMPONENT 0x00100000
-#define ACPI_FAN_COMPONENT 0x00200000
-#define ACPI_PCI_COMPONENT 0x00400000
-#define ACPI_POWER_COMPONENT 0x00800000
-#define ACPI_CONTAINER_COMPONENT 0x01000000
-#define ACPI_SYSTEM_COMPONENT 0x02000000
-#define ACPI_THERMAL_COMPONENT 0x04000000
-#define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000
-#define ACPI_VIDEO_COMPONENT 0x10000000
-#define ACPI_PROCESSOR_COMPONENT 0x20000000
-
-/*
* _HID definitions
* HIDs must conform to ACPI spec(6.1.4)
* Linux specific HIDs do not apply to this and begin with LNX:
@@ -46,6 +27,8 @@
#define ACPI_BAY_HID "LNXIOBAY"
#define ACPI_DOCK_HID "LNXDOCK"
#define ACPI_ECDT_HID "LNXEC"
+/* SMBUS HID definition as supported by Microsoft Windows */
+#define ACPI_SMBUS_MS_HID "SMB0001"
/* Quirk for broken IBM BIOSes */
#define ACPI_SMBUS_IBM_HID "SMBUSIBM"
@@ -64,14 +47,14 @@
-------------------------------------------------------------------------- */
-/* ACPI PCI Interrupt Link (pci_link.c) */
+/* ACPI PCI Interrupt Link */
int acpi_irq_penalty_init(void);
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name);
int acpi_pci_link_free_irq(acpi_handle handle);
-/* ACPI PCI Device Binding (pci_bind.c) */
+/* ACPI PCI Device Binding */
struct pci_bus;
@@ -94,14 +77,6 @@ void pci_acpi_crs_quirks(void);
static inline void pci_acpi_crs_quirks(void) { }
#endif
-/* --------------------------------------------------------------------------
- Processor
- -------------------------------------------------------------------------- */
-
-#define ACPI_PROCESSOR_LIMIT_NONE 0x00
-#define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01
-#define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02
-
/*--------------------------------------------------------------------------
Dock Station
-------------------------------------------------------------------------- */
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index 12d8bd333fe7..027faa8883aa 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -21,7 +21,7 @@ void __iomem __ref
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
-int acpi_os_map_generic_address(struct acpi_generic_address *addr);
+void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *addr);
void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
#endif
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index fdebcfc6c8df..b5f594754a9e 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -3,7 +3,6 @@
#define __ACPI_NUMA_H
#ifdef CONFIG_ACPI_NUMA
-#include <linux/kernel.h>
#include <linux/numa.h>
/* Proximity bitmap length */
@@ -17,10 +16,30 @@ extern int pxm_to_node(int);
extern int node_to_pxm(int);
extern int acpi_map_pxm_to_node(int);
extern unsigned char acpi_srat_revision;
-extern int acpi_numa __initdata;
+extern void disable_srat(void);
extern void bad_srat(void);
extern int srat_disabled(void);
+#else /* CONFIG_ACPI_NUMA */
+static inline void disable_srat(void)
+{
+}
+static inline int pxm_to_node(int pxm)
+{
+ return 0;
+}
+static inline int node_to_pxm(int node)
+{
+ return 0;
+}
#endif /* CONFIG_ACPI_NUMA */
-#endif /* __ACP_NUMA_H */
+
+#ifdef CONFIG_ACPI_HMAT
+extern void disable_hmat(void);
+#else /* CONFIG_ACPI_HMAT */
+static inline void disable_hmat(void)
+{
+}
+#endif /* CONFIG_ACPI_HMAT */
+#endif /* __ACPI_NUMA_H */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 33bb8c9a089d..914c029f64c9 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -5,7 +5,7 @@
* interfaces must be implemented by OSL to interface the
* ACPI components to the host operating system.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 9dc816641286..3d90716f9522 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -3,7 +3,7 @@
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20200717
+#define ACPI_CA_VERSION 0x20230628
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -454,9 +454,11 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
* ACPI table load/unload interfaces
*/
ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
- acpi_install_table(acpi_physical_address address,
- u8 physical))
+ acpi_install_table(struct acpi_table_header *table))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+ acpi_install_physical_table(acpi_physical_address
+ address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_load_table(struct acpi_table_header *table,
u32 *table_idx))
@@ -524,7 +526,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
struct acpi_buffer *ret_path_ptr))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_handle(acpi_handle parent,
- acpi_string pathname,
+ const char *pathname,
acpi_handle *ret_handle))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_attach_data(acpi_handle object,
@@ -587,82 +589,92 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_install_initialization_handler
(acpi_init_handler handler, u32 function))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_sci_handler(acpi_sci_handler
- address,
- void *context))
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_remove_sci_handler(acpi_sci_handler
- address))
+ acpi_install_sci_handler(acpi_sci_handler
+ address,
+ void *context))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_global_event_handler
- (acpi_gbl_event_handler handler,
- void *context))
+ acpi_remove_sci_handler(acpi_sci_handler
+ address))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_fixed_event_handler(u32
- acpi_event,
- acpi_event_handler
- handler,
- void
- *context))
+ acpi_install_global_event_handler
+ (acpi_gbl_event_handler handler,
+ void *context))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_remove_fixed_event_handler(u32 acpi_event,
+ acpi_install_fixed_event_handler(u32
+ acpi_event,
acpi_event_handler
- handler))
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_gpe_handler(acpi_handle
- gpe_device,
- u32 gpe_number,
- u32 type,
- acpi_gpe_handler
- address,
- void *context))
+ handler,
+ void
+ *context))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_gpe_raw_handler(acpi_handle
- gpe_device,
- u32 gpe_number,
- u32 type,
- acpi_gpe_handler
- address,
- void *context))
+ acpi_remove_fixed_event_handler(u32 acpi_event,
+ acpi_event_handler
+ handler))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_remove_gpe_handler(acpi_handle gpe_device,
+ acpi_install_gpe_handler(acpi_handle
+ gpe_device,
u32 gpe_number,
+ u32 type,
acpi_gpe_handler
- address))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_install_notify_handler(acpi_handle device,
- u32 handler_type,
- acpi_notify_handler
- handler,
+ address,
void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_install_gpe_raw_handler(acpi_handle
+ gpe_device,
+ u32 gpe_number,
+ u32 type,
+ acpi_gpe_handler
+ address,
+ void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_remove_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ acpi_gpe_handler
+ address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_remove_notify_handler(acpi_handle device,
+ acpi_install_notify_handler(acpi_handle device,
u32 handler_type,
acpi_notify_handler
- handler))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_install_address_space_handler(acpi_handle
- device,
- acpi_adr_space_type
- space_id,
- acpi_adr_space_handler
- handler,
- acpi_adr_space_setup
- setup,
- void *context))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_remove_address_space_handler(acpi_handle
+ handler,
+ void *context))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_remove_notify_handler(acpi_handle device,
+ u32 handler_type,
+ acpi_notify_handler
+ handler))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_install_address_space_handler(acpi_handle
device,
acpi_adr_space_type
space_id,
acpi_adr_space_handler
- handler))
+ handler,
+ acpi_adr_space_setup
+ setup,
+ void *context))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_install_address_space_handler_no_reg
+ (acpi_handle device, acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup,
+ void *context))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_install_exception_handler
- (acpi_exception_handler handler))
+ acpi_execute_reg_methods(acpi_handle device,
+ acpi_adr_space_type
+ space_id))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_install_interface_handler
- (acpi_interface_handler handler))
+ acpi_remove_address_space_handler(acpi_handle
+ device,
+ acpi_adr_space_type
+ space_id,
+ acpi_adr_space_handler
+ handler))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_install_exception_handler
+ (acpi_exception_handler handler))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_install_interface_handler
+ (acpi_interface_handler handler))
/*
* Global Lock interfaces
@@ -749,6 +761,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_event_status
*event_status))
ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
@@ -957,8 +970,6 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
void **data,
void (*callback)(void *)))
-void acpi_run_debugger(char *batch_buffer);
-
void acpi_set_debugger_thread_id(acpi_thread_id thread_id);
#endif /* __ACXFACE_H__ */
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index d3521894ce6a..efef208b0324 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -3,7 +3,7 @@
*
* Name: acrestyp.h - Defines, types, and structures for resource descriptors
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -142,7 +142,10 @@ struct acpi_resource_irq {
u8 shareable;
u8 wake_capable;
u8 interrupt_count;
- u8 interrupts[1];
+ union {
+ u8 interrupt;
+ ACPI_FLEX_ARRAY(u8, interrupts);
+ };
};
struct acpi_resource_dma {
@@ -150,7 +153,10 @@ struct acpi_resource_dma {
u8 bus_master;
u8 transfer;
u8 channel_count;
- u8 channels[1];
+ union {
+ u8 channel;
+ ACPI_FLEX_ARRAY(u8, channels);
+ };
};
struct acpi_resource_start_dependent {
@@ -194,7 +200,7 @@ struct acpi_resource_fixed_dma {
struct acpi_resource_vendor {
u16 byte_length;
- u8 byte_data[1];
+ u8 byte_data[];
};
/* Vendor resource with UUID info (introduced in ACPI 3.0) */
@@ -203,7 +209,7 @@ struct acpi_resource_vendor_typed {
u16 byte_length;
u8 uuid_subtype;
u8 uuid[ACPI_UUID_LENGTH];
- u8 byte_data[1];
+ u8 byte_data[];
};
struct acpi_resource_end_tag {
@@ -332,7 +338,10 @@ struct acpi_resource_extended_irq {
u8 wake_capable;
u8 interrupt_count;
struct acpi_resource_source resource_source;
- u32 interrupts[1];
+ union {
+ u32 interrupt;
+ ACPI_FLEX_ARRAY(u32, interrupts);
+ };
};
struct acpi_resource_generic_register {
@@ -381,7 +390,7 @@ struct acpi_resource_gpio {
#define ACPI_IO_RESTRICT_OUTPUT 2
#define ACPI_IO_RESTRICT_NONE_PRESERVE 3
-/* Common structure for I2C, SPI, and UART serial descriptors */
+/* Common structure for I2C, SPI, UART, CSI2 serial descriptors */
#define ACPI_RESOURCE_SERIAL_COMMON \
u8 revision_id; \
@@ -403,6 +412,7 @@ ACPI_RESOURCE_SERIAL_COMMON};
#define ACPI_RESOURCE_SERIAL_TYPE_I2C 1
#define ACPI_RESOURCE_SERIAL_TYPE_SPI 2
#define ACPI_RESOURCE_SERIAL_TYPE_UART 3
+#define ACPI_RESOURCE_SERIAL_TYPE_CSI2 4
/* Values for slave_mode field above */
@@ -505,6 +515,11 @@ struct acpi_resource_uart_serialbus {
#define ACPI_UART_CLEAR_TO_SEND (1<<6)
#define ACPI_UART_REQUEST_TO_SEND (1<<7)
+struct acpi_resource_csi2_serialbus {
+ ACPI_RESOURCE_SERIAL_COMMON u8 local_port_instance;
+ u8 phy_type;
+};
+
struct acpi_resource_pin_function {
u8 revision_id;
u8 pin_config;
@@ -530,6 +545,15 @@ struct acpi_resource_pin_config {
u8 *vendor_data;
};
+struct acpi_resource_clock_input {
+ u8 revision_id;
+ u8 mode;
+ u8 scale;
+ u16 frequency_divisor;
+ u32 frequency_numerator;
+ struct acpi_resource_source resource_source;
+};
+
/* Values for pin_config_type field above */
#define ACPI_PIN_CONFIG_DEFAULT 0
@@ -607,7 +631,8 @@ struct acpi_resource_pin_group_config {
#define ACPI_RESOURCE_TYPE_PIN_GROUP 22 /* ACPI 6.2 */
#define ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION 23 /* ACPI 6.2 */
#define ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG 24 /* ACPI 6.2 */
-#define ACPI_RESOURCE_TYPE_MAX 24
+#define ACPI_RESOURCE_TYPE_CLOCK_INPUT 25 /* ACPI 6.5 */
+#define ACPI_RESOURCE_TYPE_MAX 25
/* Master union for resource descriptors */
@@ -634,12 +659,14 @@ union acpi_resource_data {
struct acpi_resource_i2c_serialbus i2c_serial_bus;
struct acpi_resource_spi_serialbus spi_serial_bus;
struct acpi_resource_uart_serialbus uart_serial_bus;
+ struct acpi_resource_csi2_serialbus csi2_serial_bus;
struct acpi_resource_common_serialbus common_serial_bus;
struct acpi_resource_pin_function pin_function;
struct acpi_resource_pin_config pin_config;
struct acpi_resource_pin_group pin_group;
struct acpi_resource_pin_group_function pin_group_function;
struct acpi_resource_pin_group_config pin_group_config;
+ struct acpi_resource_clock_input clock_input;
/* Common fields */
@@ -672,7 +699,10 @@ struct acpi_pci_routing_table {
u32 pin;
u64 address; /* here for 64-bit alignment */
u32 source_index;
- char source[4]; /* pad to 64 bits so sizeof() works in all cases */
+ union {
+ char pad[4]; /* pad to 64 bits so sizeof() works in all cases */
+ ACPI_FLEX_ARRAY(char, source);
+ };
};
#endif /* __ACRESTYP_H__ */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 5007c41f4d54..451f6276da49 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -3,7 +3,7 @@
*
* Name: actbl.h - Basic ACPI Table Definitions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -307,7 +307,8 @@ enum acpi_preferred_pm_profiles {
PM_SOHO_SERVER = 5,
PM_APPLIANCE_PC = 6,
PM_PERFORMANCE_SERVER = 7,
- PM_TABLET = 8
+ PM_TABLET = 8,
+ NR_PM_PROFILES = 9
};
/* Values for sleep_status and sleep_control registers (V5+ FADT) */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 43549547ed3e..a33375e055ad 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -3,7 +3,7 @@
*
* Name: actbl1.h - Additional ACPI table definitions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -24,10 +24,13 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
+#define ACPI_SIG_AEST "AEST" /* Arm Error Source Table */
#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
+#define ACPI_SIG_ASPT "ASPT" /* AMD Secure Processor Table */
#define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */
#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */
#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
+#define ACPI_SIG_CEDT "CEDT" /* CXL Early Discovery Table */
#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */
#define ACPI_SIG_CSRT "CSRT" /* Core System Resource Table */
#define ACPI_SIG_DBG2 "DBG2" /* Debug Port table type 2 */
@@ -43,10 +46,13 @@
#define ACPI_SIG_HMAT "HMAT" /* Heterogeneous Memory Attributes Table */
#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
+#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
+#define ACPI_SIG_NBFT "NBFT" /* NVMe Boot Firmware Table */
+
/* Reserved table signatures */
#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
@@ -104,6 +110,51 @@ struct acpi_whea_header {
u64 mask; /* Bitmask required for this register instruction */
};
+/* https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/acpitabl/ns-acpitabl-aspt_table */
+#define ASPT_REVISION_ID 0x01
+struct acpi_table_aspt {
+ struct acpi_table_header header;
+ u32 num_entries;
+};
+
+struct acpi_aspt_header {
+ u16 type;
+ u16 length;
+};
+
+enum acpi_aspt_type {
+ ACPI_ASPT_TYPE_GLOBAL_REGS = 0,
+ ACPI_ASPT_TYPE_SEV_MBOX_REGS = 1,
+ ACPI_ASPT_TYPE_ACPI_MBOX_REGS = 2,
+};
+
+/* 0: ASPT Global Registers */
+struct acpi_aspt_global_regs {
+ struct acpi_aspt_header header;
+ u32 reserved;
+ u64 feature_reg_addr;
+ u64 irq_en_reg_addr;
+ u64 irq_st_reg_addr;
+};
+
+/* 1: ASPT SEV Mailbox Registers */
+struct acpi_aspt_sev_mbox_regs {
+ struct acpi_aspt_header header;
+ u8 mbox_irq_id;
+ u8 reserved[3];
+ u64 cmd_resp_reg_addr;
+ u64 cmd_buf_lo_reg_addr;
+ u64 cmd_buf_hi_reg_addr;
+};
+
+/* 2: ASPT ACPI Mailbox Registers */
+struct acpi_aspt_acpi_mbox_regs {
+ struct acpi_aspt_header header;
+ u32 reserved1;
+ u64 cmd_resp_reg_addr;
+ u64 reserved2[2];
+};
+
/*******************************************************************************
*
* ASF - Alert Standard Format table (Signature "ASF!")
@@ -303,6 +354,241 @@ struct acpi_table_boot {
/*******************************************************************************
*
+ * CDAT - Coherent Device Attribute Table
+ * Version 1
+ *
+ * Conforms to the "Coherent Device Attribute Table (CDAT) Specification
+ " (Revision 1.01, October 2020.)
+ *
+ ******************************************************************************/
+
+struct acpi_table_cdat {
+ u32 length; /* Length of table in bytes, including this header */
+ u8 revision; /* ACPI Specification minor version number */
+ u8 checksum; /* To make sum of entire table == 0 */
+ u8 reserved[6];
+ u32 sequence; /* Used to detect runtime CDAT table changes */
+};
+
+/* CDAT common subtable header */
+
+struct acpi_cdat_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_cdat_type {
+ ACPI_CDAT_TYPE_DSMAS = 0,
+ ACPI_CDAT_TYPE_DSLBIS = 1,
+ ACPI_CDAT_TYPE_DSMSCIS = 2,
+ ACPI_CDAT_TYPE_DSIS = 3,
+ ACPI_CDAT_TYPE_DSEMTS = 4,
+ ACPI_CDAT_TYPE_SSLBIS = 5,
+ ACPI_CDAT_TYPE_RESERVED = 6 /* 6 through 0xFF are reserved */
+};
+
+/* Subtable 0: Device Scoped Memory Affinity Structure (DSMAS) */
+
+struct acpi_cdat_dsmas {
+ u8 dsmad_handle;
+ u8 flags;
+ u16 reserved;
+ u64 dpa_base_address;
+ u64 dpa_length;
+};
+
+/* Flags for subtable above */
+
+#define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2)
+
+/* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */
+
+struct acpi_cdat_dslbis {
+ u8 handle;
+ u8 flags; /* If Handle matches a DSMAS handle, the definition of this field matches
+ * Flags field in HMAT System Locality Latency */
+ u8 data_type;
+ u8 reserved;
+ u64 entry_base_unit;
+ u16 entry[3];
+ u16 reserved2;
+};
+
+/* Subtable 2: Device Scoped Memory Side Cache Information Structure (DSMSCIS) */
+
+struct acpi_cdat_dsmscis {
+ u8 dsmas_handle;
+ u8 reserved[3];
+ u64 side_cache_size;
+ u32 cache_attributes;
+};
+
+/* Subtable 3: Device Scoped Initiator Structure (DSIS) */
+
+struct acpi_cdat_dsis {
+ u8 flags;
+ u8 handle;
+ u16 reserved;
+};
+
+/* Flags for above subtable */
+
+#define ACPI_CDAT_DSIS_MEM_ATTACHED (1 << 0)
+
+/* Subtable 4: Device Scoped EFI Memory Type Structure (DSEMTS) */
+
+struct acpi_cdat_dsemts {
+ u8 dsmas_handle;
+ u8 memory_type;
+ u16 reserved;
+ u64 dpa_offset;
+ u64 range_length;
+};
+
+/* Subtable 5: Switch Scoped Latency and Bandwidth Information Structure (SSLBIS) */
+
+struct acpi_cdat_sslbis {
+ u8 data_type;
+ u8 reserved[3];
+ u64 entry_base_unit;
+};
+
+/* Sub-subtable for above, sslbe_entries field */
+
+struct acpi_cdat_sslbe {
+ u16 portx_id;
+ u16 porty_id;
+ u16 latency_or_bandwidth;
+ u16 reserved;
+};
+
+#define ACPI_CDAT_SSLBIS_US_PORT 0x0100
+#define ACPI_CDAT_SSLBIS_ANY_PORT 0xffff
+
+/*******************************************************************************
+ *
+ * CEDT - CXL Early Discovery Table
+ * Version 1
+ *
+ * Conforms to the "CXL Early Discovery Table" (CXL 2.0, October 2020)
+ *
+ ******************************************************************************/
+
+struct acpi_table_cedt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/* CEDT subtable header (Performance Record Structure) */
+
+struct acpi_cedt_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_cedt_type {
+ ACPI_CEDT_TYPE_CHBS = 0,
+ ACPI_CEDT_TYPE_CFMWS = 1,
+ ACPI_CEDT_TYPE_CXIMS = 2,
+ ACPI_CEDT_TYPE_RDPAS = 3,
+ ACPI_CEDT_TYPE_RESERVED = 4,
+};
+
+/* Values for version field above */
+
+#define ACPI_CEDT_CHBS_VERSION_CXL11 (0)
+#define ACPI_CEDT_CHBS_VERSION_CXL20 (1)
+
+/* Values for length field above */
+
+#define ACPI_CEDT_CHBS_LENGTH_CXL11 (0x2000)
+#define ACPI_CEDT_CHBS_LENGTH_CXL20 (0x10000)
+
+/*
+ * CEDT subtables
+ */
+
+/* 0: CXL Host Bridge Structure */
+
+struct acpi_cedt_chbs {
+ struct acpi_cedt_header header;
+ u32 uid;
+ u32 cxl_version;
+ u32 reserved;
+ u64 base;
+ u64 length;
+};
+
+/* 1: CXL Fixed Memory Window Structure */
+
+struct acpi_cedt_cfmws {
+ struct acpi_cedt_header header;
+ u32 reserved1;
+ u64 base_hpa;
+ u64 window_size;
+ u8 interleave_ways;
+ u8 interleave_arithmetic;
+ u16 reserved2;
+ u32 granularity;
+ u16 restrictions;
+ u16 qtg_id;
+ u32 interleave_targets[];
+};
+
+struct acpi_cedt_cfmws_target_element {
+ u32 interleave_target;
+};
+
+/* Values for Interleave Arithmetic field above */
+
+#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0)
+#define ACPI_CEDT_CFMWS_ARITHMETIC_XOR (1)
+
+/* Values for Restrictions field above */
+
+#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2 (1)
+#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3 (1<<1)
+#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2)
+#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3)
+#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4)
+
+/* 2: CXL XOR Interleave Math Structure */
+
+struct acpi_cedt_cxims {
+ struct acpi_cedt_header header;
+ u16 reserved1;
+ u8 hbig;
+ u8 nr_xormaps;
+ u64 xormap_list[];
+};
+
+/* 3: CXL RCEC Downstream Port Association Structure */
+
+struct acpi_cedt_rdpas {
+ struct acpi_cedt_header header;
+ u8 reserved1;
+ u16 length;
+ u16 segment;
+ u16 bdf;
+ u8 protocol;
+ u64 address;
+};
+
+/* Masks for bdf field above */
+#define ACPI_CEDT_RDPAS_BUS_MASK 0xff00
+#define ACPI_CEDT_RDPAS_DEVICE_MASK 0x00f8
+#define ACPI_CEDT_RDPAS_FUNCTION_MASK 0x0007
+
+#define ACPI_CEDT_RDPAS_PROTOCOL_IO (0)
+#define ACPI_CEDT_RDPAS_PROTOCOL_CACHEMEM (1)
+
+/*******************************************************************************
+ *
* CPEP - Corrected Platform Error Polling table (ACPI 4.0)
* Version 1
*
@@ -399,7 +685,7 @@ struct acpi_csrt_descriptor {
* DBG2 - Debug Port Table 2
* Version 0 (Both main table and subtables)
*
- * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
+ * Conforms to "Microsoft Debug Port Table 2 (DBG2)", September 21, 2020
*
******************************************************************************/
@@ -449,11 +735,24 @@ struct acpi_dbg2_device {
#define ACPI_DBG2_16550_COMPATIBLE 0x0000
#define ACPI_DBG2_16550_SUBSET 0x0001
+#define ACPI_DBG2_MAX311XE_SPI 0x0002
#define ACPI_DBG2_ARM_PL011 0x0003
+#define ACPI_DBG2_MSM8X60 0x0004
+#define ACPI_DBG2_16550_NVIDIA 0x0005
+#define ACPI_DBG2_TI_OMAP 0x0006
+#define ACPI_DBG2_APM88XXXX 0x0008
+#define ACPI_DBG2_MSM8974 0x0009
+#define ACPI_DBG2_SAM5250 0x000A
+#define ACPI_DBG2_INTEL_USIF 0x000B
+#define ACPI_DBG2_IMX6 0x000C
#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D
#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E
#define ACPI_DBG2_ARM_DCC 0x000F
#define ACPI_DBG2_BCM2835 0x0010
+#define ACPI_DBG2_SDM845_1_8432MHZ 0x0011
+#define ACPI_DBG2_16550_WITH_GAS 0x0012
+#define ACPI_DBG2_SDM845_7_372MHZ 0x0013
+#define ACPI_DBG2_INTEL_LPSS 0x0014
#define ACPI_DBG2_1394_STANDARD 0x0000
@@ -514,7 +813,8 @@ enum acpi_dmar_type {
ACPI_DMAR_TYPE_ROOT_ATS = 2,
ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3,
ACPI_DMAR_TYPE_NAMESPACE = 4,
- ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */
+ ACPI_DMAR_TYPE_SATC = 5,
+ ACPI_DMAR_TYPE_RESERVED = 6 /* 6 and greater are reserved */
};
/* DMAR Device Scope structure */
@@ -553,7 +853,7 @@ struct acpi_dmar_pci_path {
struct acpi_dmar_hardware_unit {
struct acpi_dmar_header header;
u8 flags;
- u8 reserved;
+ u8 size; /* Size of the register set */
u16 segment;
u64 address; /* Register Base Address */
};
@@ -604,9 +904,20 @@ struct acpi_dmar_andd {
struct acpi_dmar_header header;
u8 reserved[3];
u8 device_number;
- char device_name[1];
+ union {
+ char __pad;
+ ACPI_FLEX_ARRAY(char, device_name);
+ };
};
+/* 5: SOC Integrated Address Translation Cache Reporting Structure */
+
+struct acpi_dmar_satc {
+ struct acpi_dmar_header header;
+ u8 flags;
+ u8 reserved;
+ u16 segment;
+};
/*******************************************************************************
*
* DRTM - Dynamic Root of Trust for Measurement table
@@ -639,7 +950,7 @@ struct acpi_table_drtm {
struct acpi_drtm_vtable_list {
u32 validated_table_count;
- u64 validated_tables[1];
+ u64 validated_tables[];
};
/* 2) Resources List (of Resource Descriptors) */
@@ -654,7 +965,7 @@ struct acpi_drtm_resource {
struct acpi_drtm_resource_list {
u32 resource_count;
- struct acpi_drtm_resource resources[1];
+ struct acpi_drtm_resource resources[];
};
/* 3) Platform-specific Identifiers List */
@@ -677,7 +988,7 @@ struct acpi_table_ecdt {
struct acpi_generic_address data; /* Address of EC data register */
u32 uid; /* Unique ID - must be same as the EC _UID method */
u8 gpe; /* The GPE for the EC */
- u8 id[1]; /* Full namepath of the EC in the ACPI namespace */
+ u8 id[]; /* Full namepath of the EC in the ACPI namespace */
};
/*******************************************************************************
@@ -1436,7 +1747,8 @@ struct acpi_hmat_locality {
struct acpi_hmat_structure header;
u8 flags;
u8 data_type;
- u16 reserved1;
+ u8 min_transfer_size;
+ u8 reserved1;
u32 number_of_initiator_Pds;
u32 number_of_target_Pds;
u32 reserved2;
@@ -1445,15 +1757,18 @@ struct acpi_hmat_locality {
/* Masks for Flags field above */
-#define ACPI_HMAT_MEMORY_HIERARCHY (0x0F)
+#define ACPI_HMAT_MEMORY_HIERARCHY (0x0F) /* Bits 0-3 */
-/* Values for Memory Hierarchy flag */
+/* Values for Memory Hierarchy flags */
#define ACPI_HMAT_MEMORY 0
#define ACPI_HMAT_LAST_LEVEL_CACHE 1
#define ACPI_HMAT_1ST_LEVEL_CACHE 2
#define ACPI_HMAT_2ND_LEVEL_CACHE 3
#define ACPI_HMAT_3RD_LEVEL_CACHE 4
+#define ACPI_HMAT_MINIMUM_XFER_SIZE 0x10 /* Bit 4: ACPI 6.4 */
+#define ACPI_HMAT_NON_SEQUENTIAL_XFERS 0x20 /* Bit 5: ACPI 6.4 */
+
/* Values for data_type field above */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index ec66779cb193..9775384d61c6 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -3,7 +3,7 @@
*
* Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -24,26 +24,36 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
+#define ACPI_SIG_AGDI "AGDI" /* Arm Generic Diagnostic Dump and Reset Device Interface */
+#define ACPI_SIG_APMT "APMT" /* Arm Performance Monitoring Unit table */
+#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */
+#define ACPI_SIG_CCEL "CCEL" /* CC Event Log Table */
+#define ACPI_SIG_CDAT "CDAT" /* Coherent Device Attribute Table */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
+#define ACPI_SIG_MPAM "MPAM" /* Memory System Resource Partitioning and Monitoring Table */
#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */
-#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
-#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
+#define ACPI_SIG_NHLT "NHLT" /* Non HD Audio Link Table */
#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
+#define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */
#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
+#define ACPI_SIG_PRMT "PRMT" /* Platform Runtime Mechanism Table */
#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
+#define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */
+#define ACPI_SIG_RHCT "RHCT" /* RISC-V Hart Capabilities Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
-#define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */
+#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
+#define ACPI_SIG_TDEL "TDEL" /* TD Event Log Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -65,10 +75,308 @@
/*******************************************************************************
*
+ * AEST - Arm Error Source Table
+ *
+ * Conforms to: ACPI for the Armv8 RAS Extensions 1.1 Platform Design Document
+ * September 2020.
+ *
+ ******************************************************************************/
+
+struct acpi_table_aest {
+ struct acpi_table_header header;
+};
+
+/* Common Subtable header - one per Node Structure (Subtable) */
+
+struct acpi_aest_hdr {
+ u8 type;
+ u16 length;
+ u8 reserved;
+ u32 node_specific_offset;
+ u32 node_interface_offset;
+ u32 node_interrupt_offset;
+ u32 node_interrupt_count;
+ u64 timestamp_rate;
+ u64 reserved1;
+ u64 error_injection_rate;
+};
+
+/* Values for Type above */
+
+#define ACPI_AEST_PROCESSOR_ERROR_NODE 0
+#define ACPI_AEST_MEMORY_ERROR_NODE 1
+#define ACPI_AEST_SMMU_ERROR_NODE 2
+#define ACPI_AEST_VENDOR_ERROR_NODE 3
+#define ACPI_AEST_GIC_ERROR_NODE 4
+#define ACPI_AEST_NODE_TYPE_RESERVED 5 /* 5 and above are reserved */
+
+/*
+ * AEST subtables (Error nodes)
+ */
+
+/* 0: Processor Error */
+
+typedef struct acpi_aest_processor {
+ u32 processor_id;
+ u8 resource_type;
+ u8 reserved;
+ u8 flags;
+ u8 revision;
+ u64 processor_affinity;
+
+} acpi_aest_processor;
+
+/* Values for resource_type above, related structs below */
+
+#define ACPI_AEST_CACHE_RESOURCE 0
+#define ACPI_AEST_TLB_RESOURCE 1
+#define ACPI_AEST_GENERIC_RESOURCE 2
+#define ACPI_AEST_RESOURCE_RESERVED 3 /* 3 and above are reserved */
+
+/* 0R: Processor Cache Resource Substructure */
+
+typedef struct acpi_aest_processor_cache {
+ u32 cache_reference;
+ u32 reserved;
+
+} acpi_aest_processor_cache;
+
+/* Values for cache_type above */
+
+#define ACPI_AEST_CACHE_DATA 0
+#define ACPI_AEST_CACHE_INSTRUCTION 1
+#define ACPI_AEST_CACHE_UNIFIED 2
+#define ACPI_AEST_CACHE_RESERVED 3 /* 3 and above are reserved */
+
+/* 1R: Processor TLB Resource Substructure */
+
+typedef struct acpi_aest_processor_tlb {
+ u32 tlb_level;
+ u32 reserved;
+
+} acpi_aest_processor_tlb;
+
+/* 2R: Processor Generic Resource Substructure */
+
+typedef struct acpi_aest_processor_generic {
+ u32 resource;
+
+} acpi_aest_processor_generic;
+
+/* 1: Memory Error */
+
+typedef struct acpi_aest_memory {
+ u32 srat_proximity_domain;
+
+} acpi_aest_memory;
+
+/* 2: Smmu Error */
+
+typedef struct acpi_aest_smmu {
+ u32 iort_node_reference;
+ u32 subcomponent_reference;
+
+} acpi_aest_smmu;
+
+/* 3: Vendor Defined */
+
+typedef struct acpi_aest_vendor {
+ u32 acpi_hid;
+ u32 acpi_uid;
+ u8 vendor_specific_data[16];
+
+} acpi_aest_vendor;
+
+/* 4: Gic Error */
+
+typedef struct acpi_aest_gic {
+ u32 interface_type;
+ u32 instance_id;
+
+} acpi_aest_gic;
+
+/* Values for interface_type above */
+
+#define ACPI_AEST_GIC_CPU 0
+#define ACPI_AEST_GIC_DISTRIBUTOR 1
+#define ACPI_AEST_GIC_REDISTRIBUTOR 2
+#define ACPI_AEST_GIC_ITS 3
+#define ACPI_AEST_GIC_RESERVED 4 /* 4 and above are reserved */
+
+/* Node Interface Structure */
+
+typedef struct acpi_aest_node_interface {
+ u8 type;
+ u8 reserved[3];
+ u32 flags;
+ u64 address;
+ u32 error_record_index;
+ u32 error_record_count;
+ u64 error_record_implemented;
+ u64 error_status_reporting;
+ u64 addressing_mode;
+
+} acpi_aest_node_interface;
+
+/* Values for Type field above */
+
+#define ACPI_AEST_NODE_SYSTEM_REGISTER 0
+#define ACPI_AEST_NODE_MEMORY_MAPPED 1
+#define ACPI_AEST_XFACE_RESERVED 2 /* 2 and above are reserved */
+
+/* Node Interrupt Structure */
+
+typedef struct acpi_aest_node_interrupt {
+ u8 type;
+ u8 reserved[2];
+ u8 flags;
+ u32 gsiv;
+ u8 iort_id;
+ u8 reserved1[3];
+
+} acpi_aest_node_interrupt;
+
+/* Values for Type field above */
+
+#define ACPI_AEST_NODE_FAULT_HANDLING 0
+#define ACPI_AEST_NODE_ERROR_RECOVERY 1
+#define ACPI_AEST_XRUPT_RESERVED 2 /* 2 and above are reserved */
+
+/*******************************************************************************
+ * AGDI - Arm Generic Diagnostic Dump and Reset Device Interface
+ *
+ * Conforms to "ACPI for Arm Components 1.1, Platform Design Document"
+ * ARM DEN0093 v1.1
+ *
+ ******************************************************************************/
+struct acpi_table_agdi {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 flags;
+ u8 reserved[3];
+ u32 sdei_event;
+ u32 gsiv;
+};
+
+/* Mask for Flags field above */
+
+#define ACPI_AGDI_SIGNALING_MODE (1)
+
+/*******************************************************************************
+ *
+ * APMT - ARM Performance Monitoring Unit Table
+ *
+ * Conforms to:
+ * ARM Performance Monitoring Unit Architecture 1.0 Platform Design Document
+ * ARM DEN0117 v1.0 November 25, 2021
+ *
+ ******************************************************************************/
+
+struct acpi_table_apmt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+#define ACPI_APMT_NODE_ID_LENGTH 4
+
+/*
+ * APMT subtables
+ */
+struct acpi_apmt_node {
+ u16 length;
+ u8 flags;
+ u8 type;
+ u32 id;
+ u64 inst_primary;
+ u32 inst_secondary;
+ u64 base_address0;
+ u64 base_address1;
+ u32 ovflw_irq;
+ u32 reserved;
+ u32 ovflw_irq_flags;
+ u32 proc_affinity;
+ u32 impl_id;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_APMT_FLAGS_DUAL_PAGE (1<<0)
+#define ACPI_APMT_FLAGS_AFFINITY (1<<1)
+#define ACPI_APMT_FLAGS_ATOMIC (1<<2)
+
+/* Values for Flags dual page field above */
+
+#define ACPI_APMT_FLAGS_DUAL_PAGE_NSUPP (0<<0)
+#define ACPI_APMT_FLAGS_DUAL_PAGE_SUPP (1<<0)
+
+/* Values for Flags processor affinity field above */
+#define ACPI_APMT_FLAGS_AFFINITY_PROC (0<<1)
+#define ACPI_APMT_FLAGS_AFFINITY_PROC_CONTAINER (1<<1)
+
+/* Values for Flags 64-bit atomic field above */
+#define ACPI_APMT_FLAGS_ATOMIC_NSUPP (0<<2)
+#define ACPI_APMT_FLAGS_ATOMIC_SUPP (1<<2)
+
+/* Values for Type field above */
+
+enum acpi_apmt_node_type {
+ ACPI_APMT_NODE_TYPE_MC = 0x00,
+ ACPI_APMT_NODE_TYPE_SMMU = 0x01,
+ ACPI_APMT_NODE_TYPE_PCIE_ROOT = 0x02,
+ ACPI_APMT_NODE_TYPE_ACPI = 0x03,
+ ACPI_APMT_NODE_TYPE_CACHE = 0x04,
+ ACPI_APMT_NODE_TYPE_COUNT
+};
+
+/* Masks for ovflw_irq_flags field above */
+
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE (1<<0)
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_TYPE (1<<1)
+
+/* Values for ovflw_irq_flags mode field above */
+
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_LEVEL (0<<0)
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_EDGE (1<<0)
+
+/* Values for ovflw_irq_flags type field above */
+
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_TYPE_WIRED (0<<1)
+
+/*******************************************************************************
+ *
+ * BDAT - BIOS Data ACPI Table
+ *
+ * Conforms to "BIOS Data ACPI Table", Interface Specification v4.0 Draft 5
+ * Nov 2020
+ *
+ ******************************************************************************/
+
+struct acpi_table_bdat {
+ struct acpi_table_header header;
+ struct acpi_generic_address gas;
+};
+
+/*******************************************************************************
+ *
+ * CCEL - CC-Event Log
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)". Feb 2022
+ *
+ ******************************************************************************/
+
+struct acpi_table_ccel {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 CCtype;
+ u8 Ccsub_type;
+ u16 reserved;
+ u64 log_area_minimum_length;
+ u64 log_area_start_address;
+};
+
+/*******************************************************************************
+ *
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049D, March 2018
+ * Document number: ARM DEN 0049E.e, Sep 2022
*
******************************************************************************/
@@ -86,10 +394,10 @@ struct acpi_iort_node {
u8 type;
u16 length;
u8 revision;
- u32 reserved;
+ u32 identifier;
u32 mapping_count;
u32 mapping_offset;
- char node_data[1];
+ char node_data[];
};
/* Values for subtable Type above */
@@ -100,7 +408,8 @@ enum acpi_iort_node_type {
ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
ACPI_IORT_NODE_SMMU = 0x03,
ACPI_IORT_NODE_SMMU_V3 = 0x04,
- ACPI_IORT_NODE_PMCG = 0x05
+ ACPI_IORT_NODE_PMCG = 0x05,
+ ACPI_IORT_NODE_RMR = 0x06,
};
struct acpi_iort_id_mapping {
@@ -144,14 +453,14 @@ struct acpi_iort_memory_access {
*/
struct acpi_iort_its_group {
u32 its_count;
- u32 identifiers[1]; /* GIC ITS identifier array */
+ u32 identifiers[]; /* GIC ITS identifier array */
};
struct acpi_iort_named_component {
u32 node_flags;
u64 memory_properties; /* Memory access properties */
u8 memory_address_limit; /* Memory address size limit */
- char device_name[1]; /* Path of namespace object */
+ char device_name[]; /* Path of namespace object */
};
/* Masks for Flags field above */
@@ -164,13 +473,18 @@ struct acpi_iort_root_complex {
u32 ats_attribute;
u32 pci_segment_number;
u8 memory_address_limit; /* Memory address size limit */
- u8 reserved[3]; /* Reserved, must be zero */
+ u16 pasid_capabilities; /* PASID Capabilities */
+ u8 reserved[]; /* Reserved, must be zero */
};
-/* Values for ats_attribute field above */
+/* Masks for ats_attribute field above */
+
+#define ACPI_IORT_ATS_SUPPORTED (1) /* The root complex ATS support */
+#define ACPI_IORT_PRI_SUPPORTED (1<<1) /* The root complex PRI support */
+#define ACPI_IORT_PASID_FWD_SUPPORTED (1<<2) /* The root complex PASID forward support */
-#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */
-#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */
+/* Masks for pasid_capabilities field above */
+#define ACPI_IORT_PASID_MAX_WIDTH (0x1F) /* Bits 0-4 */
struct acpi_iort_smmu {
u64 base_address; /* SMMU base address */
@@ -182,7 +496,7 @@ struct acpi_iort_smmu {
u32 context_interrupt_offset;
u32 pmu_interrupt_count;
u32 pmu_interrupt_offset;
- u64 interrupts[1]; /* Interrupt array */
+ u64 interrupts[]; /* Interrupt array */
};
/* Values for Model field above */
@@ -233,6 +547,7 @@ struct acpi_iort_smmu_v3 {
#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1)
#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (3<<1)
#define ACPI_IORT_SMMU_V3_PXM_VALID (1<<3)
+#define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1<<4)
struct acpi_iort_pmcg {
u64 page0_base_address;
@@ -241,6 +556,37 @@ struct acpi_iort_pmcg {
u64 page1_base_address;
};
+struct acpi_iort_rmr {
+ u32 flags;
+ u32 rmr_count;
+ u32 rmr_offset;
+};
+
+/* Masks for Flags field above */
+#define ACPI_IORT_RMR_REMAP_PERMITTED (1)
+#define ACPI_IORT_RMR_ACCESS_PRIVILEGE (1<<1)
+
+/*
+ * Macro to access the Access Attributes in flags field above:
+ * Access Attributes is encoded in bits 9:2
+ */
+#define ACPI_IORT_RMR_ACCESS_ATTRIBUTES(flags) (((flags) >> 2) & 0xFF)
+
+/* Values for above Access Attributes */
+
+#define ACPI_IORT_RMR_ATTR_DEVICE_NGNRNE 0x00
+#define ACPI_IORT_RMR_ATTR_DEVICE_NGNRE 0x01
+#define ACPI_IORT_RMR_ATTR_DEVICE_NGRE 0x02
+#define ACPI_IORT_RMR_ATTR_DEVICE_GRE 0x03
+#define ACPI_IORT_RMR_ATTR_NORMAL_NC 0x04
+#define ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB 0x05
+
+struct acpi_iort_rmr_desc {
+ u64 base_address;
+ u64 length;
+ u32 reserved;
+};
+
/*******************************************************************************
*
* IVRS - I/O Virtualization Reporting Structure
@@ -277,6 +623,7 @@ struct acpi_ivrs_header {
enum acpi_ivrs_type {
ACPI_IVRS_TYPE_HARDWARE1 = 0x10,
ACPI_IVRS_TYPE_HARDWARE2 = 0x11,
+ ACPI_IVRS_TYPE_HARDWARE3 = 0x40,
ACPI_IVRS_TYPE_MEMORY1 = 0x20,
ACPI_IVRS_TYPE_MEMORY2 = 0x21,
ACPI_IVRS_TYPE_MEMORY3 = 0x22
@@ -365,7 +712,11 @@ enum acpi_ivrs_device_entry_type {
ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses struct acpi_ivrs_device8a */
ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */
ACPI_IVRS_TYPE_EXT_START = 71, /* Uses struct acpi_ivrs_device8b */
- ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses struct acpi_ivrs_device8c */
+ ACPI_IVRS_TYPE_SPECIAL = 72, /* Uses struct acpi_ivrs_device8c */
+
+ /* Variable-length device entries */
+
+ ACPI_IVRS_TYPE_HID = 240 /* Uses ACPI_IVRS_DEVICE_HID */
};
/* Values for Data field above */
@@ -417,6 +768,22 @@ struct acpi_ivrs_device8c {
#define ACPI_IVHD_IOAPIC 1
#define ACPI_IVHD_HPET 2
+/* Type 240: variable-length device entry */
+
+struct acpi_ivrs_device_hid {
+ struct acpi_ivrs_de_header header;
+ u64 acpi_hid;
+ u64 acpi_cid;
+ u8 uid_type;
+ u8 uid_length;
+};
+
+/* Values for uid_type above */
+
+#define ACPI_IVRS_UID_NOT_PRESENT 0
+#define ACPI_IVRS_UID_IS_INTEGER 1
+#define ACPI_IVRS_UID_IS_STRING 2
+
/* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */
struct acpi_ivrs_memory {
@@ -517,7 +884,20 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
- ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */
+ ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16,
+ ACPI_MADT_TYPE_CORE_PIC = 17,
+ ACPI_MADT_TYPE_LIO_PIC = 18,
+ ACPI_MADT_TYPE_HT_PIC = 19,
+ ACPI_MADT_TYPE_EIO_PIC = 20,
+ ACPI_MADT_TYPE_MSI_PIC = 21,
+ ACPI_MADT_TYPE_BIO_PIC = 22,
+ ACPI_MADT_TYPE_LPC_PIC = 23,
+ ACPI_MADT_TYPE_RINTC = 24,
+ ACPI_MADT_TYPE_IMSIC = 25,
+ ACPI_MADT_TYPE_APLIC = 26,
+ ACPI_MADT_TYPE_PLIC = 27,
+ ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */
+ ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */
};
/*
@@ -598,7 +978,7 @@ struct acpi_madt_local_sapic {
u8 reserved[3]; /* Reserved, must be zero */
u32 lapic_flags;
u32 uid; /* Numeric UID - ACPI 3.0 */
- char uid_string[1]; /* String UID - ACPI 3.0 */
+ char uid_string[]; /* String UID - ACPI 3.0 */
};
/* 8: Platform Interrupt Source */
@@ -638,7 +1018,7 @@ struct acpi_madt_local_x2apic_nmi {
u8 reserved[3]; /* reserved - must be zero */
};
-/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 changes) */
+/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 + ACPI 6.5 changes) */
struct acpi_madt_generic_interrupt {
struct acpi_subtable_header header;
@@ -658,6 +1038,7 @@ struct acpi_madt_generic_interrupt {
u8 efficiency_class;
u8 reserved2[1];
u16 spe_interrupt; /* ACPI 6.3 */
+ u16 trbe_interrupt; /* ACPI 6.5 */
};
/* Masks for Flags field above */
@@ -665,6 +1046,8 @@ struct acpi_madt_generic_interrupt {
/* ACPI_MADT_ENABLED (1) Processor is usable if set */
#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
+#define ACPI_MADT_GICC_ONLINE_CAPABLE (1<<3) /* 03: Processor is online capable */
+#define ACPI_MADT_GICC_NON_COHERENT (1<<4) /* 04: GIC redistributor is not coherent */
/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
@@ -709,21 +1092,247 @@ struct acpi_madt_generic_msi_frame {
struct acpi_madt_generic_redistributor {
struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
u64 base_address;
u32 length;
};
+#define ACPI_MADT_GICR_NON_COHERENT (1)
+
/* 15: Generic Translator (ACPI 6.0) */
struct acpi_madt_generic_translator {
struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
u32 translation_id;
u64 base_address;
u32 reserved2;
};
+#define ACPI_MADT_ITS_NON_COHERENT (1)
+
+/* 16: Multiprocessor wakeup (ACPI 6.4) */
+
+struct acpi_madt_multiproc_wakeup {
+ struct acpi_subtable_header header;
+ u16 mailbox_version;
+ u32 reserved; /* reserved - must be zero */
+ u64 base_address;
+};
+
+#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032
+#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048
+
+struct acpi_madt_multiproc_wakeup_mailbox {
+ u16 command;
+ u16 reserved; /* reserved - must be zero */
+ u32 apic_id;
+ u64 wakeup_vector;
+ u8 reserved_os[ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE]; /* reserved for OS use */
+ u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */
+};
+
+#define ACPI_MP_WAKE_COMMAND_WAKEUP 1
+
+/* 17: CPU Core Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_core_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u32 processor_id;
+ u32 core_id;
+ u32 flags;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_core_pic_version {
+ ACPI_MADT_CORE_PIC_VERSION_NONE = 0,
+ ACPI_MADT_CORE_PIC_VERSION_V1 = 1,
+ ACPI_MADT_CORE_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 18: Legacy I/O Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_lio_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u8 cascade[2];
+ u32 cascade_map[2];
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_lio_pic_version {
+ ACPI_MADT_LIO_PIC_VERSION_NONE = 0,
+ ACPI_MADT_LIO_PIC_VERSION_V1 = 1,
+ ACPI_MADT_LIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 19: HT Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_ht_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u8 cascade[8];
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_ht_pic_version {
+ ACPI_MADT_HT_PIC_VERSION_NONE = 0,
+ ACPI_MADT_HT_PIC_VERSION_V1 = 1,
+ ACPI_MADT_HT_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 20: Extend I/O Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_eio_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 cascade;
+ u8 node;
+ u64 node_map;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_eio_pic_version {
+ ACPI_MADT_EIO_PIC_VERSION_NONE = 0,
+ ACPI_MADT_EIO_PIC_VERSION_V1 = 1,
+ ACPI_MADT_EIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 21: MSI Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_msi_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 msg_address;
+ u32 start;
+ u32 count;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_msi_pic_version {
+ ACPI_MADT_MSI_PIC_VERSION_NONE = 0,
+ ACPI_MADT_MSI_PIC_VERSION_V1 = 1,
+ ACPI_MADT_MSI_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 22: Bridge I/O Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_bio_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u16 id;
+ u16 gsi_base;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_bio_pic_version {
+ ACPI_MADT_BIO_PIC_VERSION_NONE = 0,
+ ACPI_MADT_BIO_PIC_VERSION_V1 = 1,
+ ACPI_MADT_BIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 23: LPC Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_lpc_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u8 cascade;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_lpc_pic_version {
+ ACPI_MADT_LPC_PIC_VERSION_NONE = 0,
+ ACPI_MADT_LPC_PIC_VERSION_V1 = 1,
+ ACPI_MADT_LPC_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 24: RISC-V INTC */
+struct acpi_madt_rintc {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 reserved;
+ u32 flags;
+ u64 hart_id;
+ u32 uid; /* ACPI processor UID */
+ u32 ext_intc_id; /* External INTC Id */
+ u64 imsic_addr; /* IMSIC base address */
+ u32 imsic_size; /* IMSIC size */
+};
+
+/* Values for RISC-V INTC Version field above */
+
+enum acpi_madt_rintc_version {
+ ACPI_MADT_RINTC_VERSION_NONE = 0,
+ ACPI_MADT_RINTC_VERSION_V1 = 1,
+ ACPI_MADT_RINTC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 25: RISC-V IMSIC */
+struct acpi_madt_imsic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 reserved;
+ u32 flags;
+ u16 num_ids;
+ u16 num_guest_ids;
+ u8 guest_index_bits;
+ u8 hart_index_bits;
+ u8 group_index_bits;
+ u8 group_index_shift;
+};
+
+/* 26: RISC-V APLIC */
+struct acpi_madt_aplic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 id;
+ u32 flags;
+ u8 hw_id[8];
+ u16 num_idcs;
+ u16 num_sources;
+ u32 gsi_base;
+ u64 base_addr;
+ u32 size;
+};
+
+/* 27: RISC-V PLIC */
+struct acpi_madt_plic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 id;
+ u8 hw_id[8];
+ u16 num_irqs;
+ u16 max_prio;
+ u32 flags;
+ u32 size;
+ u64 base_addr;
+ u32 gsi_base;
+};
+
+/* 80: OEM data */
+
+struct acpi_madt_oem_data {
+ ACPI_FLEX_ARRAY(u8, oem_data);
+};
+
/*
* Common flags fields for MADT subtables
*/
@@ -731,6 +1340,7 @@ struct acpi_madt_generic_translator {
/* MADT Local APIC flags */
#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
+#define ACPI_MADT_ONLINE_CAPABLE (2) /* 01: System HW supports enabling processor at runtime */
/* MADT MPS INTI flags (inti_flags) */
@@ -801,6 +1411,121 @@ struct acpi_table_mchi {
/*******************************************************************************
*
+ * MPAM - Memory System Resource Partitioning and Monitoring
+ *
+ * Conforms to "ACPI for Memory System Resource Partitioning and Monitoring 2.0"
+ * Document number: ARM DEN 0065, December, 2022.
+ *
+ ******************************************************************************/
+
+/* MPAM RIS locator types. Table 11, Location types */
+enum acpi_mpam_locator_type {
+ ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE = 0,
+ ACPI_MPAM_LOCATION_TYPE_MEMORY = 1,
+ ACPI_MPAM_LOCATION_TYPE_SMMU = 2,
+ ACPI_MPAM_LOCATION_TYPE_MEMORY_CACHE = 3,
+ ACPI_MPAM_LOCATION_TYPE_ACPI_DEVICE = 4,
+ ACPI_MPAM_LOCATION_TYPE_INTERCONNECT = 5,
+ ACPI_MPAM_LOCATION_TYPE_UNKNOWN = 0xFF
+};
+
+/* MPAM Functional dependency descriptor. Table 10 */
+struct acpi_mpam_func_deps {
+ u32 producer;
+ u32 reserved;
+};
+
+/* MPAM Processor cache locator descriptor. Table 13 */
+struct acpi_mpam_resource_cache_locator {
+ u64 cache_reference;
+ u32 reserved;
+};
+
+/* MPAM Memory locator descriptor. Table 14 */
+struct acpi_mpam_resource_memory_locator {
+ u64 proximity_domain;
+ u32 reserved;
+};
+
+/* MPAM SMMU locator descriptor. Table 15 */
+struct acpi_mpam_resource_smmu_locator {
+ u64 smmu_interface;
+ u32 reserved;
+};
+
+/* MPAM Memory-side cache locator descriptor. Table 16 */
+struct acpi_mpam_resource_memcache_locator {
+ u8 reserved[7];
+ u8 level;
+ u32 reference;
+};
+
+/* MPAM ACPI device locator descriptor. Table 17 */
+struct acpi_mpam_resource_acpi_locator {
+ u64 acpi_hw_id;
+ u32 acpi_unique_id;
+};
+
+/* MPAM Interconnect locator descriptor. Table 18 */
+struct acpi_mpam_resource_interconnect_locator {
+ u64 inter_connect_desc_tbl_off;
+ u32 reserved;
+};
+
+/* MPAM Locator structure. Table 12 */
+struct acpi_mpam_resource_generic_locator {
+ u64 descriptor1;
+ u32 descriptor2;
+};
+
+union acpi_mpam_resource_locator {
+ struct acpi_mpam_resource_cache_locator cache_locator;
+ struct acpi_mpam_resource_memory_locator memory_locator;
+ struct acpi_mpam_resource_smmu_locator smmu_locator;
+ struct acpi_mpam_resource_memcache_locator mem_cache_locator;
+ struct acpi_mpam_resource_acpi_locator acpi_locator;
+ struct acpi_mpam_resource_interconnect_locator interconnect_ifc_locator;
+ struct acpi_mpam_resource_generic_locator generic_locator;
+};
+
+/* Memory System Component Resource Node Structure Table 9 */
+struct acpi_mpam_resource_node {
+ u32 identifier;
+ u8 ris_index;
+ u16 reserved1;
+ u8 locator_type;
+ union acpi_mpam_resource_locator locator;
+ u32 num_functional_deps;
+};
+
+/* Memory System Component (MSC) Node Structure. Table 4 */
+struct acpi_mpam_msc_node {
+ u16 length;
+ u8 interface_type;
+ u8 reserved;
+ u32 identifier;
+ u64 base_address;
+ u32 mmio_size;
+ u32 overflow_interrupt;
+ u32 overflow_interrupt_flags;
+ u32 reserved1;
+ u32 overflow_interrupt_affinity;
+ u32 error_interrupt;
+ u32 error_interrupt_flags;
+ u32 reserved2;
+ u32 error_interrupt_affinity;
+ u32 max_nrdy_usec;
+ u64 hardware_id_linked_device;
+ u32 instance_id_linked_device;
+ u32 num_resouce_nodes;
+};
+
+struct acpi_table_mpam {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/*******************************************************************************
+ *
* MPST - Memory Power State Table (ACPI 5.0)
* Version 1
*
@@ -937,29 +1662,6 @@ struct acpi_table_msdm {
/*******************************************************************************
*
- * MTMR - MID Timer Table
- * Version 1
- *
- * Conforms to "Simple Firmware Interface Specification",
- * Draft 0.8.2, Oct 19, 2010
- * NOTE: The ACPI MTMR is equivalent to the SFI MTMR table.
- *
- ******************************************************************************/
-
-struct acpi_table_mtmr {
- struct acpi_table_header header; /* Common ACPI table header */
-};
-
-/* MTMR entry */
-
-struct acpi_mtmr_entry {
- struct acpi_generic_address physical_address;
- u32 frequency;
- u32 irq;
-};
-
-/*******************************************************************************
- *
* NFIT - NVDIMM Interface Table (ACPI 6.0+)
* Version 1
*
@@ -1007,12 +1709,14 @@ struct acpi_nfit_system_address {
u64 address;
u64 length;
u64 memory_mapping;
+ u64 location_cookie; /* ACPI 6.4 */
};
/* Flags */
#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */
#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */
+#define ACPI_NFIT_LOCATION_COOKIE_VALID (1<<2) /* 02: SPA location cookie valid (ACPI 6.4) */
/* Range Type GUIDs appear in the include/acuuid.h file */
@@ -1052,7 +1756,7 @@ struct acpi_nfit_interleave {
u16 reserved; /* Reserved, must be zero */
u32 line_count;
u32 line_size;
- u32 line_offset[1]; /* Variable length */
+ u32 line_offset[]; /* Variable length */
};
/* 3: SMBIOS Management Information Structure */
@@ -1060,7 +1764,7 @@ struct acpi_nfit_interleave {
struct acpi_nfit_smbios {
struct acpi_nfit_header header;
u32 reserved; /* Reserved, must be zero */
- u8 data[1]; /* Variable length */
+ u8 data[]; /* Variable length */
};
/* 4: NVDIMM Control Region Structure */
@@ -1117,7 +1821,7 @@ struct acpi_nfit_flush_address {
u32 device_handle;
u16 hint_count;
u8 reserved[6]; /* Reserved, must be zero */
- u64 hint_address[1]; /* Variable length */
+ u64 hint_address[]; /* Variable length */
};
/* 7: Platform Capabilities Structure */
@@ -1185,6 +1889,260 @@ struct nfit_device_handle {
/*******************************************************************************
*
+ * NHLT - Non HD Audio Link Table
+ *
+ * Conforms to: Intel Smart Sound Technology NHLT Specification
+ * Version 0.8.1, January 2020.
+ *
+ ******************************************************************************/
+
+/* Main table */
+
+struct acpi_table_nhlt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 endpoint_count;
+};
+
+struct acpi_nhlt_endpoint {
+ u32 descriptor_length;
+ u8 link_type;
+ u8 instance_id;
+ u16 vendor_id;
+ u16 device_id;
+ u16 revision_id;
+ u32 subsystem_id;
+ u8 device_type;
+ u8 direction;
+ u8 virtual_bus_id;
+};
+
+/* Types for link_type field above */
+
+#define ACPI_NHLT_RESERVED_HD_AUDIO 0
+#define ACPI_NHLT_RESERVED_DSP 1
+#define ACPI_NHLT_PDM 2
+#define ACPI_NHLT_SSP 3
+#define ACPI_NHLT_RESERVED_SLIMBUS 4
+#define ACPI_NHLT_RESERVED_SOUNDWIRE 5
+#define ACPI_NHLT_TYPE_RESERVED 6 /* 6 and above are reserved */
+
+/* All other values above are reserved */
+
+/* Values for device_id field above */
+
+#define ACPI_NHLT_PDM_DMIC 0xAE20
+#define ACPI_NHLT_BT_SIDEBAND 0xAE30
+#define ACPI_NHLT_I2S_TDM_CODECS 0xAE23
+
+/* Values for device_type field above */
+
+/* SSP Link */
+
+#define ACPI_NHLT_LINK_BT_SIDEBAND 0
+#define ACPI_NHLT_LINK_FM 1
+#define ACPI_NHLT_LINK_MODEM 2
+/* 3 is reserved */
+#define ACPI_NHLT_LINK_SSP_ANALOG_CODEC 4
+
+/* PDM Link */
+
+#define ACPI_NHLT_PDM_ON_CAVS_1P8 0
+#define ACPI_NHLT_PDM_ON_CAVS_1P5 1
+
+/* Values for Direction field above */
+
+#define ACPI_NHLT_DIR_RENDER 0
+#define ACPI_NHLT_DIR_CAPTURE 1
+#define ACPI_NHLT_DIR_RENDER_LOOPBACK 2
+#define ACPI_NHLT_DIR_RENDER_FEEDBACK 3
+#define ACPI_NHLT_DIR_RESERVED 4 /* 4 and above are reserved */
+
+struct acpi_nhlt_device_specific_config {
+ u32 capabilities_size;
+ u8 virtual_slot;
+ u8 config_type;
+};
+
+struct acpi_nhlt_device_specific_config_a {
+ u32 capabilities_size;
+ u8 virtual_slot;
+ u8 config_type;
+ u8 array_type;
+};
+
+/* Values for Config Type above */
+
+#define ACPI_NHLT_CONFIG_TYPE_GENERIC 0x00
+#define ACPI_NHLT_CONFIG_TYPE_MIC_ARRAY 0x01
+#define ACPI_NHLT_CONFIG_TYPE_RENDER_FEEDBACK 0x03
+#define ACPI_NHLT_CONFIG_TYPE_RESERVED 0x04 /* 4 and above are reserved */
+
+struct acpi_nhlt_device_specific_config_b {
+ u32 capabilities_size;
+};
+
+struct acpi_nhlt_device_specific_config_c {
+ u32 capabilities_size;
+ u8 virtual_slot;
+};
+
+struct acpi_nhlt_render_device_specific_config {
+ u32 capabilities_size;
+ u8 virtual_slot;
+};
+
+struct acpi_nhlt_wave_extensible {
+ u16 format_tag;
+ u16 channel_count;
+ u32 samples_per_sec;
+ u32 avg_bytes_per_sec;
+ u16 block_align;
+ u16 bits_per_sample;
+ u16 extra_format_size;
+ u16 valid_bits_per_sample;
+ u32 channel_mask;
+ u8 sub_format_guid[16];
+};
+
+/* Values for channel_mask above */
+
+#define ACPI_NHLT_SPKR_FRONT_LEFT 0x1
+#define ACPI_NHLT_SPKR_FRONT_RIGHT 0x2
+#define ACPI_NHLT_SPKR_FRONT_CENTER 0x4
+#define ACPI_NHLT_SPKR_LOW_FREQ 0x8
+#define ACPI_NHLT_SPKR_BACK_LEFT 0x10
+#define ACPI_NHLT_SPKR_BACK_RIGHT 0x20
+#define ACPI_NHLT_SPKR_FRONT_LEFT_OF_CENTER 0x40
+#define ACPI_NHLT_SPKR_FRONT_RIGHT_OF_CENTER 0x80
+#define ACPI_NHLT_SPKR_BACK_CENTER 0x100
+#define ACPI_NHLT_SPKR_SIDE_LEFT 0x200
+#define ACPI_NHLT_SPKR_SIDE_RIGHT 0x400
+#define ACPI_NHLT_SPKR_TOP_CENTER 0x800
+#define ACPI_NHLT_SPKR_TOP_FRONT_LEFT 0x1000
+#define ACPI_NHLT_SPKR_TOP_FRONT_CENTER 0x2000
+#define ACPI_NHLT_SPKR_TOP_FRONT_RIGHT 0x4000
+#define ACPI_NHLT_SPKR_TOP_BACK_LEFT 0x8000
+#define ACPI_NHLT_SPKR_TOP_BACK_CENTER 0x10000
+#define ACPI_NHLT_SPKR_TOP_BACK_RIGHT 0x20000
+
+struct acpi_nhlt_format_config {
+ struct acpi_nhlt_wave_extensible format;
+ u32 capability_size;
+ u8 capabilities[];
+};
+
+struct acpi_nhlt_formats_config {
+ u8 formats_count;
+};
+
+struct acpi_nhlt_device_specific_hdr {
+ u8 virtual_slot;
+ u8 config_type;
+};
+
+/* Types for config_type above */
+
+#define ACPI_NHLT_GENERIC 0
+#define ACPI_NHLT_MIC 1
+#define ACPI_NHLT_RENDER 3
+
+struct acpi_nhlt_mic_device_specific_config {
+ struct acpi_nhlt_device_specific_hdr device_config;
+ u8 array_type_ext;
+};
+
+/* Values for array_type_ext above */
+
+#define ACPI_NHLT_ARRAY_TYPE_RESERVED 0x09 /* 9 and below are reserved */
+#define ACPI_NHLT_SMALL_LINEAR_2ELEMENT 0x0A
+#define ACPI_NHLT_BIG_LINEAR_2ELEMENT 0x0B
+#define ACPI_NHLT_FIRST_GEOMETRY_LINEAR_4ELEMENT 0x0C
+#define ACPI_NHLT_PLANAR_LSHAPED_4ELEMENT 0x0D
+#define ACPI_NHLT_SECOND_GEOMETRY_LINEAR_4ELEMENT 0x0E
+#define ACPI_NHLT_VENDOR_DEFINED 0x0F
+#define ACPI_NHLT_ARRAY_TYPE_MASK 0x0F
+#define ACPI_NHLT_ARRAY_TYPE_EXT_MASK 0x10
+
+#define ACPI_NHLT_NO_EXTENSION 0x0
+#define ACPI_NHLT_MIC_SNR_SENSITIVITY_EXT (1<<4)
+
+struct acpi_nhlt_vendor_mic_count {
+ u8 microphone_count;
+};
+
+struct acpi_nhlt_vendor_mic_config {
+ u8 type;
+ u8 panel;
+ u16 speaker_position_distance; /* mm */
+ u16 horizontal_offset; /* mm */
+ u16 vertical_offset; /* mm */
+ u8 frequency_low_band; /* 5*Hz */
+ u8 frequency_high_band; /* 500*Hz */
+ u16 direction_angle; /* -180 - + 180 */
+ u16 elevation_angle; /* -180 - + 180 */
+ u16 work_vertical_angle_begin; /* -180 - + 180 with 2 deg step */
+ u16 work_vertical_angle_end; /* -180 - + 180 with 2 deg step */
+ u16 work_horizontal_angle_begin; /* -180 - + 180 with 2 deg step */
+ u16 work_horizontal_angle_end; /* -180 - + 180 with 2 deg step */
+};
+
+/* Values for Type field above */
+
+#define ACPI_NHLT_MIC_OMNIDIRECTIONAL 0
+#define ACPI_NHLT_MIC_SUBCARDIOID 1
+#define ACPI_NHLT_MIC_CARDIOID 2
+#define ACPI_NHLT_MIC_SUPER_CARDIOID 3
+#define ACPI_NHLT_MIC_HYPER_CARDIOID 4
+#define ACPI_NHLT_MIC_8_SHAPED 5
+#define ACPI_NHLT_MIC_RESERVED6 6 /* 6 is reserved */
+#define ACPI_NHLT_MIC_VENDOR_DEFINED 7
+#define ACPI_NHLT_MIC_RESERVED 8 /* 8 and above are reserved */
+
+/* Values for Panel field above */
+
+#define ACPI_NHLT_MIC_POSITION_TOP 0
+#define ACPI_NHLT_MIC_POSITION_BOTTOM 1
+#define ACPI_NHLT_MIC_POSITION_LEFT 2
+#define ACPI_NHLT_MIC_POSITION_RIGHT 3
+#define ACPI_NHLT_MIC_POSITION_FRONT 4
+#define ACPI_NHLT_MIC_POSITION_BACK 5
+#define ACPI_NHLT_MIC_POSITION_RESERVED 6 /* 6 and above are reserved */
+
+struct acpi_nhlt_vendor_mic_device_specific_config {
+ struct acpi_nhlt_mic_device_specific_config mic_array_device_config;
+ u8 number_of_microphones;
+ struct acpi_nhlt_vendor_mic_config mic_config[]; /* Indexed by number_of_microphones */
+};
+
+/* Microphone SNR and Sensitivity extension */
+
+struct acpi_nhlt_mic_snr_sensitivity_extension {
+ u32 SNR;
+ u32 sensitivity;
+};
+
+/* Render device with feedback */
+
+struct acpi_nhlt_render_feedback_device_specific_config {
+ u8 feedback_virtual_slot; /* Render slot in case of capture */
+ u16 feedback_channels; /* Informative only */
+ u16 feedback_valid_bits_per_sample;
+};
+
+/* Non documented structures */
+
+struct acpi_nhlt_device_info_count {
+ u8 structure_count;
+};
+
+struct acpi_nhlt_device_info {
+ u8 device_id[16];
+ u8 device_instance_id;
+ u8 device_port_id;
+};
+
+/*******************************************************************************
+ *
* PCCT - Platform Communications Channel Table (ACPI 5.0)
* Version 2 (ACPI 6.2)
*
@@ -1208,7 +2166,8 @@ enum acpi_pcct_type {
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */
ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, /* ACPI 6.2 */
ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, /* ACPI 6.2 */
- ACPI_PCCT_TYPE_RESERVED = 5 /* 5 and greater are reserved */
+ ACPI_PCCT_TYPE_HW_REG_COMM_SUBSPACE = 5, /* ACPI 6.4 */
+ ACPI_PCCT_TYPE_RESERVED = 6 /* 6 and greater are reserved */
};
/*
@@ -1323,6 +2282,24 @@ struct acpi_pcct_ext_pcc_slave {
u64 error_status_mask;
};
+/* 5: HW Registers based Communications Subspace */
+
+struct acpi_pcct_hw_reg {
+ struct acpi_subtable_header header;
+ u16 version;
+ u64 base_address;
+ u64 length;
+ struct acpi_generic_address doorbell_register;
+ u64 doorbell_preserve;
+ u64 doorbell_write;
+ struct acpi_generic_address cmd_complete_register;
+ u64 cmd_complete_mask;
+ struct acpi_generic_address error_status_register;
+ u64 error_status_mask;
+ u32 nominal_latency;
+ u32 min_turnaround_time;
+};
+
/* Values for doorbell flags above */
#define ACPI_PCCT_INTERRUPT_POLARITY (1)
@@ -1381,6 +2358,66 @@ struct acpi_pdtt_channel {
/*******************************************************************************
*
+ * PHAT - Platform Health Assessment Table (ACPI 6.4)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_phat {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/* Common header for PHAT subtables that follow main table */
+
+struct acpi_phat_header {
+ u16 type;
+ u16 length;
+ u8 revision;
+};
+
+/* Values for Type field above */
+
+#define ACPI_PHAT_TYPE_FW_VERSION_DATA 0
+#define ACPI_PHAT_TYPE_FW_HEALTH_DATA 1
+#define ACPI_PHAT_TYPE_RESERVED 2 /* 0x02-0xFFFF are reserved */
+
+/*
+ * PHAT subtables, correspond to Type in struct acpi_phat_header
+ */
+
+/* 0: Firmware Version Data Record */
+
+struct acpi_phat_version_data {
+ struct acpi_phat_header header;
+ u8 reserved[3];
+ u32 element_count;
+};
+
+struct acpi_phat_version_element {
+ u8 guid[16];
+ u64 version_value;
+ u32 producer_id;
+};
+
+/* 1: Firmware Health Data Record */
+
+struct acpi_phat_health_data {
+ struct acpi_phat_header header;
+ u8 reserved[2];
+ u8 health;
+ u8 device_guid[16];
+ u32 device_specific_offset; /* Zero if no Device-specific data */
+};
+
+/* Values for Health field above */
+
+#define ACPI_PHAT_ERRORS_FOUND 0
+#define ACPI_PHAT_NO_ERRORS 1
+#define ACPI_PHAT_UNKNOWN_ERRORS 2
+#define ACPI_PHAT_ADVISORY 3
+
+/*******************************************************************************
+ *
* PMTT - Platform Memory Topology Table (ACPI 5.0)
* Version 1
*
@@ -1388,7 +2425,11 @@ struct acpi_pdtt_channel {
struct acpi_table_pmtt {
struct acpi_table_header header; /* Common ACPI table header */
- u32 reserved;
+ u32 memory_device_count;
+ /*
+ * Immediately followed by:
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
};
/* Common header for PMTT subtables that follow main table */
@@ -1399,6 +2440,12 @@ struct acpi_pmtt_header {
u16 length;
u16 flags;
u16 reserved2;
+ u32 memory_device_count; /* Zero means no memory device structs follow */
+ /*
+ * Immediately followed by:
+ * u8 type_specific_data[]
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
};
/* Values for Type field above */
@@ -1406,7 +2453,8 @@ struct acpi_pmtt_header {
#define ACPI_PMTT_TYPE_SOCKET 0
#define ACPI_PMTT_TYPE_CONTROLLER 1
#define ACPI_PMTT_TYPE_DIMM 2
-#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */
+#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFE are reserved */
+#define ACPI_PMTT_TYPE_VENDOR 0xFF
/* Values for Flags field above */
@@ -1425,37 +2473,43 @@ struct acpi_pmtt_socket {
u16 socket_id;
u16 reserved;
};
+ /*
+ * Immediately followed by:
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
/* 1: Memory Controller subtable */
struct acpi_pmtt_controller {
struct acpi_pmtt_header header;
- u32 read_latency;
- u32 write_latency;
- u32 read_bandwidth;
- u32 write_bandwidth;
- u16 access_width;
- u16 alignment;
+ u16 controller_id;
u16 reserved;
- u16 domain_count;
-};
-
-/* 1a: Proximity Domain substructure */
-
-struct acpi_pmtt_domain {
- u32 proximity_domain;
};
+ /*
+ * Immediately followed by:
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
/* 2: Physical Component Identifier (DIMM) */
struct acpi_pmtt_physical_component {
struct acpi_pmtt_header header;
- u16 component_id;
- u16 reserved;
- u32 memory_size;
u32 bios_handle;
};
+/* 0xFF: Vendor Specific Data */
+
+struct acpi_pmtt_vendor_specific {
+ struct acpi_pmtt_header header;
+ u8 type_uuid[16];
+ u8 specific[];
+ /*
+ * Immediately followed by:
+ * u8 vendor_specific_data[];
+ * MEMORY_DEVICE memory_device_struct[memory_device_count];
+ */
+};
+
/*******************************************************************************
*
* PPTT - Processor Properties Topology Table (ACPI 6.2)
@@ -1509,6 +2563,12 @@ struct acpi_pptt_cache {
u16 line_size;
};
+/* 1: Cache Type Structure for PPTT version 3 */
+
+struct acpi_pptt_cache_v1 {
+ u32 cache_id;
+};
+
/* Flags */
#define ACPI_PPTT_SIZE_PROPERTY_VALID (1) /* Physical property valid */
@@ -1518,6 +2578,7 @@ struct acpi_pptt_cache {
#define ACPI_PPTT_CACHE_TYPE_VALID (1<<4) /* Cache type valid */
#define ACPI_PPTT_WRITE_POLICY_VALID (1<<5) /* Write policy valid */
#define ACPI_PPTT_LINE_SIZE_VALID (1<<6) /* Line size valid */
+#define ACPI_PPTT_CACHE_ID_VALID (1<<7) /* Cache ID valid */
/* Masks for Attributes */
@@ -1554,6 +2615,48 @@ struct acpi_pptt_id {
/*******************************************************************************
*
+ * PRMT - Platform Runtime Mechanism Table
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_prmt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+struct acpi_table_prmt_header {
+ u8 platform_guid[16];
+ u32 module_info_offset;
+ u32 module_info_count;
+};
+
+struct acpi_prmt_module_header {
+ u16 revision;
+ u16 length;
+};
+
+struct acpi_prmt_module_info {
+ u16 revision;
+ u16 length;
+ u8 module_guid[16];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 handler_info_count;
+ u32 handler_info_offset;
+ u64 mmio_list_pointer;
+};
+
+struct acpi_prmt_handler_info {
+ u16 revision;
+ u16 length;
+ u8 handler_guid[16];
+ u64 handler_address;
+ u64 static_data_buffer_address;
+ u64 acpi_param_buffer_address;
+};
+
+/*******************************************************************************
+ *
* RASF - RAS Feature Table (ACPI 5.0)
* Version 1
*
@@ -1650,6 +2753,103 @@ enum acpi_rasf_status {
/*******************************************************************************
*
+ * RGRT - Regulatory Graphics Resource Table
+ * Version 1
+ *
+ * Conforms to "ACPI RGRT" available at:
+ * https://microsoft.github.io/mu/dyn/mu_plus/ms_core_pkg/acpi_RGRT/feature_acpi_rgrt/
+ *
+ ******************************************************************************/
+
+struct acpi_table_rgrt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 version;
+ u8 image_type;
+ u8 reserved;
+ u8 image[];
+};
+
+/* image_type values */
+
+enum acpi_rgrt_image_type {
+ ACPI_RGRT_TYPE_RESERVED0 = 0,
+ ACPI_RGRT_IMAGE_TYPE_PNG = 1,
+ ACPI_RGRT_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/*******************************************************************************
+ *
+ * RHCT - RISC-V Hart Capabilities Table
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_rhct {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 flags; /* RHCT flags */
+ u64 time_base_freq;
+ u32 node_count;
+ u32 node_offset;
+};
+
+/* RHCT Flags */
+
+#define ACPI_RHCT_TIMER_CANNOT_WAKEUP_CPU (1)
+/*
+ * RHCT subtables
+ */
+struct acpi_rhct_node_header {
+ u16 type;
+ u16 length;
+ u16 revision;
+};
+
+/* Values for RHCT subtable Type above */
+
+enum acpi_rhct_node_type {
+ ACPI_RHCT_NODE_TYPE_ISA_STRING = 0x0000,
+ ACPI_RHCT_NODE_TYPE_CMO = 0x0001,
+ ACPI_RHCT_NODE_TYPE_MMU = 0x0002,
+ ACPI_RHCT_NODE_TYPE_RESERVED = 0x0003,
+ ACPI_RHCT_NODE_TYPE_HART_INFO = 0xFFFF,
+};
+
+/*
+ * RHCT node specific subtables
+ */
+
+/* ISA string node structure */
+struct acpi_rhct_isa_string {
+ u16 isa_length;
+ char isa[];
+};
+
+struct acpi_rhct_cmo_node {
+ u8 reserved; /* Must be zero */
+ u8 cbom_size; /* CBOM size in powerof 2 */
+ u8 cbop_size; /* CBOP size in powerof 2 */
+ u8 cboz_size; /* CBOZ size in powerof 2 */
+};
+
+struct acpi_rhct_mmu_node {
+ u8 reserved; /* Must be zero */
+ u8 mmu_type; /* Virtual Address Scheme */
+};
+
+enum acpi_rhct_mmu_type {
+ ACPI_RHCT_MMU_TYPE_SV39 = 0,
+ ACPI_RHCT_MMU_TYPE_SV48 = 1,
+ ACPI_RHCT_MMU_TYPE_SV57 = 2
+};
+
+/* Hart Info node structure */
+struct acpi_rhct_hart_info {
+ u16 num_offsets;
+ u32 uid; /* ACPI processor UID */
+};
+
+/*******************************************************************************
+ *
* SBST - Smart Battery Specification Table
* Version 1
*
@@ -1703,6 +2903,7 @@ enum acpi_sdev_type {
/* Values for flags above */
#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1)
+#define ACPI_SDEV_SECURE_COMPONENTS_PRESENT (1<<1)
/*
* SDEV subtables
@@ -1718,6 +2919,46 @@ struct acpi_sdev_namespace {
u16 vendor_data_length;
};
+struct acpi_sdev_secure_component {
+ u16 secure_component_offset;
+ u16 secure_component_length;
+};
+
+/*
+ * SDEV sub-subtables ("Components") for above
+ */
+struct acpi_sdev_component {
+ struct acpi_sdev_header header;
+};
+
+/* Values for sub-subtable type above */
+
+enum acpi_sac_type {
+ ACPI_SDEV_TYPE_ID_COMPONENT = 0,
+ ACPI_SDEV_TYPE_MEM_COMPONENT = 1
+};
+
+struct acpi_sdev_id_component {
+ struct acpi_sdev_header header;
+ u16 hardware_id_offset;
+ u16 hardware_id_length;
+ u16 subsystem_id_offset;
+ u16 subsystem_id_length;
+ u16 hardware_revision;
+ u8 hardware_rev_present;
+ u8 class_code_present;
+ u8 pci_base_class;
+ u8 pci_sub_class;
+ u8 pci_programming_xface;
+};
+
+struct acpi_sdev_mem_component {
+ struct acpi_sdev_header header;
+ u32 reserved;
+ u64 memory_base_address;
+ u64 memory_length;
+};
+
/* 1: PCIe Endpoint Device Based Device Structure */
struct acpi_sdev_pcie {
@@ -1737,6 +2978,53 @@ struct acpi_sdev_pcie_path {
u8 function;
};
+/*******************************************************************************
+ *
+ * SVKL - Storage Volume Key Location Table (ACPI 6.4)
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)".
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_svkl {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 count;
+};
+
+struct acpi_svkl_key {
+ u16 type;
+ u16 format;
+ u32 size;
+ u64 address;
+};
+
+enum acpi_svkl_type {
+ ACPI_SVKL_TYPE_MAIN_STORAGE = 0,
+ ACPI_SVKL_TYPE_RESERVED = 1 /* 1 and greater are reserved */
+};
+
+enum acpi_svkl_format {
+ ACPI_SVKL_FORMAT_RAW_BINARY = 0,
+ ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */
+};
+
+/*******************************************************************************
+ *
+ * TDEL - TD-Event Log
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)".
+ * September 2020
+ *
+ ******************************************************************************/
+
+struct acpi_table_tdel {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved;
+ u64 log_area_minimum_length;
+ u64 log_area_start_address;
+};
+
/* Reset to default packing */
#pragma pack()
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index bdcac69fa6bd..c080d579a546 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -3,7 +3,7 @@
*
* Name: actbl3.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -33,7 +33,7 @@
#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */
-#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */
+#define ACPI_SIG_VIOT "VIOT" /* Virtual I/O Translation Table */
#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */
#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */
#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */
@@ -86,7 +86,7 @@ struct acpi_table_slic {
struct acpi_table_slit {
struct acpi_table_header header; /* Common ACPI table header */
u64 locality_count;
- u8 entry[1]; /* Real size = localities^2 */
+ u8 entry[]; /* Real size = localities^2 */
};
/*******************************************************************************
@@ -191,7 +191,8 @@ enum acpi_srat_type {
ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */
ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */
- ACPI_SRAT_TYPE_RESERVED = 6 /* 5 and greater are reserved */
+ ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, /* ACPI 6.4 */
+ ACPI_SRAT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
};
/*
@@ -272,21 +273,28 @@ struct acpi_srat_gic_its_affinity {
u32 its_id;
};
-/* 5: Generic Initiator Affinity Structure (ACPI 6.3) */
+/*
+ * Common structure for SRAT subtable types:
+ * 5: ACPI_SRAT_TYPE_GENERIC_AFFINITY
+ * 6: ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY
+ */
+
+#define ACPI_SRAT_DEVICE_HANDLE_SIZE 16
struct acpi_srat_generic_affinity {
struct acpi_subtable_header header;
u8 reserved;
u8 device_handle_type;
u32 proximity_domain;
- u8 device_handle[16];
+ u8 device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
u32 flags;
u32 reserved1;
};
/* Flags for struct acpi_srat_generic_affinity */
-#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */
+#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */
+#define ACPI_SRAT_ARCHITECTURAL_TRANSACTIONS (1<<1) /* ACPI 6.4 */
/*******************************************************************************
*
@@ -437,6 +445,7 @@ struct acpi_tpm2_phy {
#define ACPI_TPM2_RESERVED10 10
#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */
#define ACPI_TPM2_RESERVED 12
+#define ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON 13
/* Optional trailer appears after any start_method subtables */
@@ -486,24 +495,68 @@ struct acpi_table_uefi {
/*******************************************************************************
*
- * VRTC - Virtual Real Time Clock Table
+ * VIOT - Virtual I/O Translation Table
* Version 1
*
- * Conforms to "Simple Firmware Interface Specification",
- * Draft 0.8.2, Oct 19, 2010
- * NOTE: The ACPI VRTC is equivalent to The SFI MRTC table.
- *
******************************************************************************/
-struct acpi_table_vrtc {
+struct acpi_table_viot {
struct acpi_table_header header; /* Common ACPI table header */
+ u16 node_count;
+ u16 node_offset;
+ u8 reserved[8];
+};
+
+/* VIOT subtable header */
+
+struct acpi_viot_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_viot_node_type {
+ ACPI_VIOT_NODE_PCI_RANGE = 0x01,
+ ACPI_VIOT_NODE_MMIO = 0x02,
+ ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI = 0x03,
+ ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO = 0x04,
+ ACPI_VIOT_RESERVED = 0x05
+};
+
+/* VIOT subtables */
+
+struct acpi_viot_pci_range {
+ struct acpi_viot_header header;
+ u32 endpoint_start;
+ u16 segment_start;
+ u16 segment_end;
+ u16 bdf_start;
+ u16 bdf_end;
+ u16 output_node;
+ u8 reserved[6];
};
-/* VRTC entry */
+struct acpi_viot_mmio {
+ struct acpi_viot_header header;
+ u32 endpoint;
+ u64 base_address;
+ u16 output_node;
+ u8 reserved[6];
+};
-struct acpi_vrtc_entry {
- struct acpi_generic_address physical_address;
- u32 irq;
+struct acpi_viot_virtio_iommu_pci {
+ struct acpi_viot_header header;
+ u16 segment;
+ u16 bdf;
+ u8 reserved[8];
+};
+
+struct acpi_viot_virtio_iommu_mmio {
+ struct acpi_viot_header header;
+ u8 reserved[4];
+ u64 base_address;
};
/*******************************************************************************
@@ -678,6 +731,10 @@ struct acpi_table_wpbt {
u16 arguments_length;
};
+struct acpi_wpbt_unicode {
+ u16 *unicode_string;
+};
+
/*******************************************************************************
*
* WSMT - Windows SMM Security Mitigations Table
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index d50e61384f1f..85c2dcf2b704 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -3,7 +3,7 @@
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -507,9 +507,12 @@ typedef u64 acpi_integer;
/* Pointer/Integer type conversions */
#define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (acpi_size) (i))
+#ifndef ACPI_TO_INTEGER
#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0)
+#endif
+#ifndef ACPI_OFFSET
#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0)
-#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
+#endif
#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
/* Optimizations for 4-character (32-bit) acpi_name manipulation */
@@ -536,8 +539,14 @@ typedef u64 acpi_integer;
* Can be used with access_width of struct acpi_generic_address and access_size of
* struct acpi_resource_generic_register.
*/
-#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
-#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
+#define ACPI_ACCESS_BIT_SHIFT 2
+#define ACPI_ACCESS_BYTE_SHIFT -1
+#define ACPI_ACCESS_BIT_MAX (31 - ACPI_ACCESS_BIT_SHIFT)
+#define ACPI_ACCESS_BYTE_MAX (31 - ACPI_ACCESS_BYTE_SHIFT)
+#define ACPI_ACCESS_BIT_DEFAULT (8 - ACPI_ACCESS_BIT_SHIFT)
+#define ACPI_ACCESS_BYTE_DEFAULT (8 - ACPI_ACCESS_BYTE_SHIFT)
+#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BIT_SHIFT))
+#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT))
/*******************************************************************************
*
@@ -824,7 +833,7 @@ typedef u8 acpi_adr_space_type;
*
* Note: A Data Table region is a special type of operation region
* that has its own AML opcode. However, internally, the AML
- * interpreter simply creates an operation region with an an address
+ * interpreter simply creates an operation region with an address
* space type of ACPI_ADR_SPACE_DATA_TABLE.
*/
#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 0x7E /* Internal to ACPICA only */
@@ -1098,6 +1107,21 @@ struct acpi_connection_info {
u8 access_length;
};
+/* Special Context data for PCC Opregion (ACPI 6.3) */
+
+struct acpi_pcc_info {
+ u8 subspace_id;
+ u16 length;
+ u8 *internal_buffer;
+};
+
+/* Special Context data for FFH Opregion (ACPI 6.5) */
+
+struct acpi_ffh_info {
+ u64 offset;
+ u64 length;
+};
+
typedef
acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
u32 function,
@@ -1215,6 +1239,10 @@ struct acpi_mem_space_context {
struct acpi_mem_mapping *first_mm;
};
+struct acpi_data_table_mapping {
+ void *pointer;
+};
+
/*
* struct acpi_memory_list is used only if the ACPICA local cache is enabled
*/
@@ -1281,9 +1309,21 @@ typedef enum {
#define ACPI_OSI_WIN_10_RS4 0x12
#define ACPI_OSI_WIN_10_RS5 0x13
#define ACPI_OSI_WIN_10_19H1 0x14
+#define ACPI_OSI_WIN_10_20H1 0x15
+#define ACPI_OSI_WIN_11 0x16
/* Definitions of getopt */
#define ACPI_OPT_END -1
+/* Definitions for explicit fallthrough */
+
+#ifndef ACPI_FALLTHROUGH
+#define ACPI_FALLTHROUGH do {} while(0)
+#endif
+
+#ifndef ACPI_FLEX_ARRAY
+#define ACPI_FLEX_ARRAY(TYPE, NAME) TYPE NAME[0]
+#endif
+
#endif /* __ACTYPES_H__ */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 9e1367b19069..52a84523bfac 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -3,7 +3,7 @@
*
* Name: acuuid.h - ACPI-related UUID/GUID definitions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -27,6 +27,10 @@
#define UUID_PCI_HOST_BRIDGE "33db4d5b-1ff7-401c-9657-7441c03dd766"
#define UUID_I2C_DEVICE "3cdff6f7-4267-4555-ad05-b30a3d8938de"
#define UUID_POWER_BUTTON "dfbcf3c5-e7a5-44e6-9c1f-29c76f6e059c"
+#define UUID_MEMORY_DEVICE "03b19910-f473-11dd-87af-0800200c9a66"
+#define UUID_GENERIC_BUTTONS_DEVICE "fa6bd625-9ce8-470d-a2c7-b3ca36c4282e"
+#define UUID_NVDIMM_ROOT_DEVICE "2f10e7a4-9e91-11e4-89d3-123b93f75cba"
+#define UUID_CONTROL_METHOD_BATTERY "f18fc78b-0f15-4978-b793-53f833a1d35b"
/* Interfaces */
@@ -35,6 +39,7 @@
/* NVDIMM - NFIT table */
+#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
#define UUID_VOLATILE_MEMORY "7305944f-fdda-44e3-b16c-3f22d252e5d0"
#define UUID_PERSISTENT_MEMORY "66f0d379-b4f3-4074-ac43-0d3318b78cdb"
#define UUID_CONTROL_REGION "92f701f6-13b4-405d-910b-299367e8234c"
@@ -43,6 +48,10 @@
#define UUID_VOLATILE_VIRTUAL_CD "3d5abd30-4175-87ce-6d64-d2ade523c4bb"
#define UUID_PERSISTENT_VIRTUAL_DISK "5cea02c9-4d07-69d3-269f-4496fbe096f9"
#define UUID_PERSISTENT_VIRTUAL_CD "08018188-42cd-bb48-100f-5387d53ded3d"
+#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05"
+#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
+#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e"
+#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80"
/* Processor Properties (ACPI 6.2) */
@@ -56,5 +65,10 @@
#define UUID_BATTERY_THERMAL_LIMIT "4c2067e3-887d-475c-9720-4af1d3ed602e"
#define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500"
#define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301"
-
+#define UUID_DEVICE_GRAPHS "ab02a46b-74c7-45a2-bd68-f7d344ef2153"
+#define UUID_HIERARCHICAL_DATA_EXTENSION "dbb8e3e6-5886-4ba6-8795-1319f52a966b"
+#define UUID_CORESIGHT_GRAPH "3ecbc8b6-1d0e-4fb3-8107-e627f805c6cd"
+#define UUID_USB4_CAPABILITIES "23a0d13a-26ab-486c-9c5f-0ffa525a575a"
+#define UUID_1ST_FUNCTION_ID "893f00a6-660c-494e-bcfd-3043f4fb67c0"
+#define UUID_2ND_FUNCTION_ID "107ededd-d381-4fd7-8da9-08e9a6c79644"
#endif /* __ACUUID_H__ */
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 680f80960c3d..dc60f7db5524 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -27,19 +27,18 @@ extern int hest_disable;
extern int erst_disable;
#ifdef CONFIG_ACPI_APEI_GHES
extern bool ghes_disable;
+void __init acpi_ghes_init(void);
#else
#define ghes_disable 1
+static inline void acpi_ghes_init(void) { }
#endif
#ifdef CONFIG_ACPI_APEI
void __init acpi_hest_init(void);
#else
-static inline void acpi_hest_init(void) { return; }
+static inline void acpi_hest_init(void) { }
#endif
-typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
-int apei_hest_parse(apei_hest_func_t func, void *data);
-
int erst_write(const struct cper_record_header *record);
ssize_t erst_get_record_count(void);
int erst_get_record_id_begin(int *pos);
@@ -47,6 +46,8 @@ int erst_get_record_id_next(int *pos, u64 *record_id);
void erst_get_record_id_end(void);
ssize_t erst_read(u64 record_id, struct cper_record_header *record,
size_t buflen);
+ssize_t erst_read_record(u64 record_id, struct cper_record_header *record,
+ size_t buflen, size_t recordlen, const guid_t *creatorid);
int erst_clear(u64 record_id);
int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data);
diff --git a/include/acpi/battery.h b/include/acpi/battery.h
index 5d8f5d910c82..611a2561a014 100644
--- a/include/acpi/battery.h
+++ b/include/acpi/battery.h
@@ -2,6 +2,8 @@
#ifndef __ACPI_BATTERY_H
#define __ACPI_BATTERY_H
+#include <linux/power_supply.h>
+
#define ACPI_BATTERY_CLASS "battery"
#define ACPI_BATTERY_NOTIFY_STATUS 0x80
@@ -10,8 +12,8 @@
struct acpi_battery_hook {
const char *name;
- int (*add_battery)(struct power_supply *battery);
- int (*remove_battery)(struct power_supply *battery);
+ int (*add_battery)(struct power_supply *battery, struct acpi_battery_hook *hook);
+ int (*remove_battery)(struct power_supply *battery, struct acpi_battery_hook *hook);
struct list_head list;
};
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index a6a9373ab863..930b6afba6f4 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -11,12 +11,13 @@
#define _CPPC_ACPI_H
#include <linux/acpi.h>
+#include <linux/cpufreq.h>
#include <linux/types.h>
#include <acpi/pcc.h>
#include <acpi/processor.h>
-/* Support CPPCv2 and CPPCv3 */
+/* CPPCv2 and CPPCv3 support */
#define CPPC_V2_REV 2
#define CPPC_V3_REV 3
#define CPPC_V2_NUM_ENT 21
@@ -39,7 +40,7 @@ struct cpc_reg {
u8 bit_width;
u8 bit_offset;
u8 access_width;
- u64 __iomem address;
+ u64 address;
} __packed;
/*
@@ -107,12 +108,15 @@ struct cppc_perf_caps {
u32 lowest_nonlinear_perf;
u32 lowest_freq;
u32 nominal_freq;
+ u32 energy_perf;
+ bool auto_sel;
};
struct cppc_perf_ctrls {
u32 max_perf;
u32 min_perf;
u32 desired_perf;
+ u32 energy_perf;
};
struct cppc_perf_fb_ctrs {
@@ -124,23 +128,110 @@ struct cppc_perf_fb_ctrs {
/* Per CPU container for runtime CPPC management. */
struct cppc_cpudata {
- int cpu;
+ struct list_head node;
struct cppc_perf_caps perf_caps;
struct cppc_perf_ctrls perf_ctrls;
struct cppc_perf_fb_ctrs perf_fb_ctrs;
- struct cpufreq_policy *cur_policy;
unsigned int shared_type;
cpumask_var_t shared_cpu_map;
};
+#ifdef CONFIG_ACPI_CPPC_LIB
extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
+extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
+extern int cppc_get_highest_perf(int cpunum, u64 *highest_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
+extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
-extern int acpi_get_psd_map(struct cppc_cpudata **);
+extern bool cppc_perf_ctrs_in_pcc(void);
+extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
+extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
+extern bool acpi_cpc_valid(void);
+extern bool cppc_allow_fast_switch(void);
+extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
extern unsigned int cppc_get_transition_latency(int cpu);
extern bool cpc_ffh_supported(void);
+extern bool cpc_supported_by_cpu(void);
extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
+extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf);
+extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
+extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps);
+extern int cppc_set_auto_sel(int cpu, bool enable);
+#else /* !CONFIG_ACPI_CPPC_LIB */
+static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_set_enable(int cpu, bool enable)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
+{
+ return -ENOTSUPP;
+}
+static inline bool cppc_perf_ctrs_in_pcc(void)
+{
+ return false;
+}
+static inline bool acpi_cpc_valid(void)
+{
+ return false;
+}
+static inline bool cppc_allow_fast_switch(void)
+{
+ return false;
+}
+static inline unsigned int cppc_get_transition_latency(int cpu)
+{
+ return CPUFREQ_ETERNAL;
+}
+static inline bool cpc_ffh_supported(void)
+{
+ return false;
+}
+static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
+{
+ return -ENOTSUPP;
+}
+static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_set_auto_sel(int cpu, bool enable)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+{
+ return -ENOTSUPP;
+}
+#endif /* !CONFIG_ACPI_CPPC_LIB */
#endif /* _CPPC_ACPI_H*/
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 517a5231cc1b..be1dd4c1a917 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -27,6 +27,8 @@ struct ghes {
struct timer_list timer;
unsigned int irq;
};
+ struct device *dev;
+ struct list_head elist;
};
struct ghes_estatus_node {
@@ -53,33 +55,34 @@ enum {
GHES_SEV_PANIC = 0x3,
};
-int ghes_estatus_pool_init(int num_ghes);
-
-/* From drivers/edac/ghes_edac.c */
-
-#ifdef CONFIG_EDAC_GHES
-void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err);
+#ifdef CONFIG_ACPI_APEI_GHES
+/**
+ * ghes_register_vendor_record_notifier - register a notifier for vendor
+ * records that the kernel would otherwise ignore.
+ * @nb: pointer to the notifier_block structure of the event handler.
+ *
+ * return 0 : SUCCESS, non-zero : FAIL
+ */
+int ghes_register_vendor_record_notifier(struct notifier_block *nb);
-int ghes_edac_register(struct ghes *ghes, struct device *dev);
+/**
+ * ghes_unregister_vendor_record_notifier - unregister the previously
+ * registered vendor record notifier.
+ * @nb: pointer to the notifier_block structure of the vendor record handler.
+ */
+void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
-void ghes_edac_unregister(struct ghes *ghes);
+struct list_head *ghes_get_devices(void);
+void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
#else
-static inline void ghes_edac_report_mem_error(int sev,
- struct cper_sec_mem_err *mem_err)
-{
-}
+static inline struct list_head *ghes_get_devices(void) { return NULL; }
-static inline int ghes_edac_register(struct ghes *ghes, struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline void ghes_edac_unregister(struct ghes *ghes)
-{
-}
+static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; }
#endif
+int ghes_estatus_pool_init(unsigned int num_ghes);
+
static inline int acpi_hest_get_version(struct acpi_hest_generic_data *gdata)
{
return gdata->revision >> 8;
@@ -127,4 +130,7 @@ int ghes_notify_sea(void);
static inline int ghes_notify_sea(void) { return -ENOENT; }
#endif
+struct notifier_block;
+extern void ghes_register_report_chain(struct notifier_block *nb);
+extern void ghes_unregister_report_chain(struct notifier_block *nb);
#endif /* GHES_H */
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 4dec4ed138cd..9b373d172a77 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -9,18 +9,40 @@
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
+struct pcc_mbox_chan {
+ struct mbox_chan *mchan;
+ u64 shmem_base_addr;
+ u64 shmem_size;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+};
+
+/* Generic Communications Channel Shared Memory Region */
+#define PCC_SIGNATURE 0x50434300
+/* Generic Communications Channel Command Field */
+#define PCC_CMD_GENERATE_DB_INTR BIT(15)
+/* Generic Communications Channel Status Field */
+#define PCC_STATUS_CMD_COMPLETE BIT(0)
+#define PCC_STATUS_SCI_DOORBELL BIT(1)
+#define PCC_STATUS_ERROR BIT(2)
+#define PCC_STATUS_PLATFORM_NOTIFY BIT(3)
+/* Initiator Responder Communications Channel Flags */
+#define PCC_CMD_COMPLETION_NOTIFY BIT(0)
+
#define MAX_PCC_SUBSPACES 256
+
#ifdef CONFIG_PCC
-extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id);
-extern void pcc_mbox_free_channel(struct mbox_chan *chan);
+extern struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id);
+extern void pcc_mbox_free_channel(struct pcc_mbox_chan *chan);
#else
-static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id)
+static inline struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
return ERR_PTR(-ENODEV);
}
-static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
+static inline void pcc_mbox_free_channel(struct pcc_mbox_chan *chan) { }
#endif
#endif /* _PCC_H */
diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h
deleted file mode 100644
index 967c552d1cd3..000000000000
--- a/include/acpi/pdc_intel.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* _PDC bit definition for Intel processors */
-
-#ifndef __PDC_INTEL_H__
-#define __PDC_INTEL_H__
-
-#define ACPI_PDC_P_FFH (0x0001)
-#define ACPI_PDC_C_C1_HALT (0x0002)
-#define ACPI_PDC_T_FFH (0x0004)
-#define ACPI_PDC_SMP_C1PT (0x0008)
-#define ACPI_PDC_SMP_C2C3 (0x0010)
-#define ACPI_PDC_SMP_P_SWCOORD (0x0020)
-#define ACPI_PDC_SMP_C_SWCOORD (0x0040)
-#define ACPI_PDC_SMP_T_SWCOORD (0x0080)
-#define ACPI_PDC_C_C1_FFH (0x0100)
-#define ACPI_PDC_C_C2C3_FFH (0x0200)
-#define ACPI_PDC_SMP_P_HWCOORD (0x0800)
-
-#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_P_FFH)
-
-#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_SMP_P_SWCOORD | \
- ACPI_PDC_SMP_P_HWCOORD | \
- ACPI_PDC_P_FFH)
-
-#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
- ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_C_C1_FFH | \
- ACPI_PDC_C_C2C3_FFH)
-
-#endif /* __PDC_INTEL_H__ */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 8f6b2654c0b3..337ffa931ee8 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -3,7 +3,7 @@
*
* Name: acenv.h - Host and compiler configuration
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -148,15 +148,12 @@
*
*****************************************************************************/
-#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#if defined(__GNUC__)
#include <acpi/platform/acgcc.h>
#elif defined(_MSC_VER)
#include "acmsvc.h"
-#elif defined(__INTEL_COMPILER)
-#include <acpi/platform/acintel.h>
-
#endif
#if defined(_LINUX) || defined(__linux__)
@@ -212,6 +209,8 @@
#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI)
#include "acefi.h"
+#elif defined(__ZEPHYR__)
+#include "aczephyr.h"
#else
/* Unknown environment */
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index c3facf5f8495..7e67e3503f7b 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -3,7 +3,7 @@
*
* Name: acenvex.h - Extra host and compiler configuration
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -35,7 +35,7 @@
#endif
-#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#if defined(__GNUC__)
#include "acgccex.h"
#elif defined(_MSC_VER)
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 7d63d03cf507..04b4bf620517 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -3,28 +3,20 @@
*
* Name: acgcc.h - GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#ifndef __ACGCC_H__
#define __ACGCC_H__
-/*
- * Use compiler specific <stdarg.h> is a good practice for even when
- * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
- */
#ifndef va_arg
-#ifdef ACPI_USE_BUILTIN_STDARG
-typedef __builtin_va_list va_list;
-#define va_start(v, l) __builtin_va_start(v, l)
-#define va_end(v) __builtin_va_end(v)
-#define va_arg(v, l) __builtin_va_arg(v, l)
-#define va_copy(d, s) __builtin_va_copy(d, s)
+#ifdef __KERNEL__
+#include <linux/stdarg.h>
#else
#include <stdarg.h>
-#endif
-#endif
+#endif /* __KERNEL__ */
+#endif /* ! va_arg */
#define ACPI_INLINE __inline__
@@ -54,4 +46,30 @@ typedef __builtin_va_list va_list;
#define ACPI_USE_NATIVE_MATH64
+/* GCC did not support __has_attribute until 5.1. */
+
+#ifndef __has_attribute
+#define __has_attribute(x) 0
+#endif
+
+/*
+ * Explicitly mark intentional explicit fallthrough to silence
+ * -Wimplicit-fallthrough in GCC 7.1+.
+ */
+
+#if __has_attribute(__fallthrough__)
+#define ACPI_FALLTHROUGH __attribute__((__fallthrough__))
+#endif
+
+/*
+ * Flexible array members are not allowed to be part of a union under
+ * C99, but this is not for any technical reason. Work around the
+ * limitation.
+ */
+#define ACPI_FLEX_ARRAY(TYPE, NAME) \
+ struct { \
+ struct { } __Empty_ ## NAME; \
+ TYPE NAME[]; \
+ }
+
#endif /* __ACGCC_H__ */
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
index 7c88fd1de955..7c9f10e9633a 100644
--- a/include/acpi/platform/acgccex.h
+++ b/include/acpi/platform/acgccex.h
@@ -3,7 +3,7 @@
*
* Name: acgccex.h - Extra GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
deleted file mode 100644
index e7fd5e71be62..000000000000
--- a/include/acpi/platform/acintel.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
-/******************************************************************************
- *
- * Name: acintel.h - VC specific defines, etc.
- *
- * Copyright (C) 2000 - 2020, Intel Corp.
- *
- *****************************************************************************/
-
-#ifndef __ACINTEL_H__
-#define __ACINTEL_H__
-
-/*
- * Use compiler specific <stdarg.h> is a good practice for even when
- * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
- */
-#ifndef va_arg
-#include <stdarg.h>
-#endif
-
-/* Configuration specific to Intel 64-bit C compiler */
-
-#define COMPILER_DEPENDENT_INT64 __int64
-#define COMPILER_DEPENDENT_UINT64 unsigned __int64
-#define ACPI_INLINE __inline
-
-/*
- * Calling conventions:
- *
- * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
- * ACPI_EXTERNAL_XFACE - External ACPI interfaces
- * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
- * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
- */
-#define ACPI_SYSTEM_XFACE
-#define ACPI_EXTERNAL_XFACE
-#define ACPI_INTERNAL_XFACE
-#define ACPI_INTERNAL_VAR_XFACE
-
-/* remark 981 - operands evaluated in no particular order */
-#pragma warning(disable:981)
-
-/* warn C4100: unreferenced formal parameter */
-#pragma warning(disable:4100)
-
-/* warn C4127: conditional expression is constant */
-#pragma warning(disable:4127)
-
-/* warn C4706: assignment within conditional expression */
-#pragma warning(disable:4706)
-
-/* warn C4214: bit field types other than int */
-#pragma warning(disable:4214)
-
-#endif /* __ACINTEL_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 987e2af7c335..565341c826e3 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -3,7 +3,7 @@
*
* Name: aclinux.h - OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
@@ -114,10 +114,19 @@
#define acpi_raw_spinlock raw_spinlock_t *
#define acpi_cpu_flags unsigned long
+#define acpi_uintptr_t uintptr_t
+
+#define ACPI_TO_INTEGER(p) ((uintptr_t)(p))
+#define ACPI_OFFSET(d, f) offsetof(d, f)
+
/* Use native linux version of acpi_os_allocate_zeroed */
#define USE_NATIVE_ALLOCATE_ZEROED
+/* Use logical addresses for accessing GPE registers in system memory */
+
+#define ACPI_GPE_USE_LOGICAL_ADDRESSES
+
/*
* Overrides for in-kernel ACPICA
*/
@@ -171,7 +180,11 @@
#define ACPI_USE_STANDARD_HEADERS
#ifdef ACPI_USE_STANDARD_HEADERS
+#include <stddef.h>
#include <unistd.h>
+#include <stdint.h>
+
+#define ACPI_OFFSET(d, f) offsetof(d, f)
#endif
/* Define/disable kernel-specific declarators */
@@ -190,7 +203,8 @@
#if defined(__ia64__) || (defined(__x86_64__) && !defined(__ILP32__)) ||\
defined(__aarch64__) || defined(__PPC64__) ||\
- defined(__s390x__)
+ defined(__s390x__) || defined(__loongarch__) ||\
+ (defined(__riscv) && (defined(__LP64__) || defined(_LP64)))
#define ACPI_MACHINE_WIDTH 64
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 04f88f2de781..600d4e2641da 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -3,7 +3,7 @@
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aczephyr.h b/include/acpi/platform/aczephyr.h
new file mode 100644
index 000000000000..703db4dc740d
--- /dev/null
+++ b/include/acpi/platform/aczephyr.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/******************************************************************************
+ *
+ * Module Name: aczephyr.h - OS specific defines, etc.
+ *
+ * Copyright (C) 2000 - 2023, Intel Corp.
+ *
+ *****************************************************************************/
+
+#ifndef __ACZEPHYR_H__
+#define __ACZEPHYR_H__
+
+#define ACPI_MACHINE_WIDTH 64
+
+#define ACPI_NO_ERROR_MESSAGES
+#undef ACPI_DEBUG_OUTPUT
+#define ACPI_USE_SYSTEM_CLIBRARY
+#undef ACPI_DBG_TRACK_ALLOCATIONS
+#define ACPI_SINGLE_THREADED
+#define ACPI_USE_NATIVE_RSDP_POINTER
+
+#include <zephyr/kernel.h>
+#include <zephyr/device.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <zephyr/fs/fs.h>
+#include <zephyr/sys/printk.h>
+#include <zephyr/sys/__assert.h>
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_enable_dbg_print
+ *
+ * PARAMETERS: Enable, - Enable/Disable debug print
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Enable/disable debug print
+ *
+ *****************************************************************************/
+
+void acpi_enable_dbg_print(bool enable);
+#endif
diff --git a/include/acpi/proc_cap_intel.h b/include/acpi/proc_cap_intel.h
new file mode 100644
index 000000000000..ddcdc41d6c3e
--- /dev/null
+++ b/include/acpi/proc_cap_intel.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Vendor specific processor capabilities bit definition
+ * for Intel processors. Those bits are used to convey OSPM
+ * power management capabilities to the platform.
+ */
+
+#ifndef __PROC_CAP_INTEL_H__
+#define __PROC_CAP_INTEL_H__
+
+#define ACPI_PROC_CAP_P_FFH (0x0001)
+#define ACPI_PROC_CAP_C_C1_HALT (0x0002)
+#define ACPI_PROC_CAP_T_FFH (0x0004)
+#define ACPI_PROC_CAP_SMP_C1PT (0x0008)
+#define ACPI_PROC_CAP_SMP_C2C3 (0x0010)
+#define ACPI_PROC_CAP_SMP_P_SWCOORD (0x0020)
+#define ACPI_PROC_CAP_SMP_C_SWCOORD (0x0040)
+#define ACPI_PROC_CAP_SMP_T_SWCOORD (0x0080)
+#define ACPI_PROC_CAP_C_C1_FFH (0x0100)
+#define ACPI_PROC_CAP_C_C2C3_FFH (0x0200)
+#define ACPI_PROC_CAP_SMP_P_HWCOORD (0x0800)
+#define ACPI_PROC_CAP_COLLAB_PROC_PERF (0x1000)
+
+#define ACPI_PROC_CAP_EST_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_P_FFH)
+
+#define ACPI_PROC_CAP_EST_CAPABILITY_SWSMP (ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_SMP_P_SWCOORD | \
+ ACPI_PROC_CAP_SMP_P_HWCOORD | \
+ ACPI_PROC_CAP_P_FFH)
+
+#define ACPI_PROC_CAP_C_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C2C3 | \
+ ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_C_C1_FFH | \
+ ACPI_PROC_CAP_C_C2C3_FFH)
+
+#endif /* __PROC_CAP_INTEL_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 683e124ad517..3f34ebb27525 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -2,11 +2,16 @@
#ifndef __ACPI_PROCESSOR_H
#define __ACPI_PROCESSOR_H
-#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/pm_qos.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/thermal.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <asm/acpi.h>
#define ACPI_PROCESSOR_CLASS "processor"
@@ -436,9 +441,12 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
/* in processor_thermal.c */
-int acpi_processor_get_limit_info(struct acpi_processor *pr);
+int acpi_processor_thermal_init(struct acpi_processor *pr,
+ struct acpi_device *device);
+void acpi_processor_thermal_exit(struct acpi_processor *pr,
+ struct acpi_device *device);
extern const struct thermal_cooling_device_ops processor_cooling_ops;
-#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
+#ifdef CONFIG_CPU_FREQ
void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
#else
@@ -450,6 +458,11 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
return;
}
-#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
+#endif /* CONFIG_CPU_FREQ */
+
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
+extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
+#endif
#endif
diff --git a/include/acpi/video.h b/include/acpi/video.h
index db8548ff03ce..3d538d4178ab 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -48,15 +48,16 @@ enum acpi_backlight_type {
acpi_backlight_video,
acpi_backlight_vendor,
acpi_backlight_native,
+ acpi_backlight_nvidia_wmi_ec,
+ acpi_backlight_apple_gmux,
};
#if IS_ENABLED(CONFIG_ACPI_VIDEO)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
+extern void acpi_video_register_backlight(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
-extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
-extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
/*
* Note: The value returned by acpi_video_handles_brightness_key_presses()
* may change over time and should not be cached.
@@ -65,9 +66,32 @@ extern bool acpi_video_handles_brightness_key_presses(void);
extern int acpi_video_get_levels(struct acpi_device *device,
struct acpi_video_device_brightness **dev_br,
int *pmax_level);
+
+extern enum acpi_backlight_type __acpi_video_get_backlight_type(bool native,
+ bool *auto_detect);
+
+static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
+{
+ return __acpi_video_get_backlight_type(false, NULL);
+}
+
+/*
+ * This function MUST only be called by GPU drivers to check if the driver
+ * should register a backlight class device. This function not only checks
+ * if a GPU native backlight device should be registered it *also* tells
+ * the ACPI video-detect code that native GPU backlight control is available.
+ * Therefor calling this from any place other then the GPU driver is wrong!
+ * To check if GPU native backlight control is used in other places instead use:
+ * if (acpi_video_get_backlight_type() == acpi_backlight_native) { ... }
+ */
+static inline bool acpi_video_backlight_use_native(void)
+{
+ return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
+}
#else
static inline int acpi_video_register(void) { return -ENODEV; }
static inline void acpi_video_unregister(void) { return; }
+static inline void acpi_video_register_backlight(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
{
@@ -77,8 +101,9 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
{
return acpi_backlight_vendor;
}
-static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+static inline bool acpi_video_backlight_use_native(void)
{
+ return true;
}
static inline bool acpi_video_handles_brightness_key_presses(void)
{
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 74b0612601dd..d436bee4d129 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -2,21 +2,22 @@
#
# asm headers that all architectures except um should have
# (This file is not included when SRCARCH=um since UML borrows several
-# asm headers from the host architecutre.)
+# asm headers from the host architecture.)
mandatory-y += atomic.h
+mandatory-y += archrandom.h
mandatory-y += barrier.h
mandatory-y += bitops.h
mandatory-y += bug.h
mandatory-y += bugs.h
mandatory-y += cacheflush.h
+mandatory-y += cfi.h
mandatory-y += checksum.h
mandatory-y += compat.h
mandatory-y += current.h
mandatory-y += delay.h
mandatory-y += device.h
mandatory-y += div64.h
-mandatory-y += dma-contiguous.h
mandatory-y += dma-mapping.h
mandatory-y += dma.h
mandatory-y += emergency-restart.h
@@ -31,15 +32,16 @@ mandatory-y += irq.h
mandatory-y += irq_regs.h
mandatory-y += irq_work.h
mandatory-y += kdebug.h
-mandatory-y += kmap_types.h
+mandatory-y += kmap_size.h
mandatory-y += kprobes.h
mandatory-y += linkage.h
mandatory-y += local.h
-mandatory-y += mm-arch-hooks.h
+mandatory-y += local64.h
mandatory-y += mmiowb.h
mandatory-y += mmu.h
mandatory-y += mmu_context.h
mandatory-y += module.h
+mandatory-y += module.lds.h
mandatory-y += msi.h
mandatory-y += pci.h
mandatory-y += percpu.h
@@ -50,6 +52,7 @@ mandatory-y += sections.h
mandatory-y += serial.h
mandatory-y += shmparam.h
mandatory-y += simd.h
+mandatory-y += softirq_stack.h
mandatory-y += switch_to.h
mandatory-y += timex.h
mandatory-y += tlbflush.h
diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h
new file mode 100644
index 000000000000..2866ae61b1cd
--- /dev/null
+++ b/include/asm-generic/access_ok.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ACCESS_OK_H__
+#define __ASM_GENERIC_ACCESS_OK_H__
+
+/*
+ * Checking whether a pointer is valid for user space access.
+ * These definitions work on most architectures, but overrides can
+ * be used where necessary.
+ */
+
+/*
+ * architectures with compat tasks have a variable TASK_SIZE and should
+ * override this to a constant.
+ */
+#ifndef TASK_SIZE_MAX
+#define TASK_SIZE_MAX TASK_SIZE
+#endif
+
+#ifndef __access_ok
+/*
+ * 'size' is a compile-time constant for most callers, so optimize for
+ * this case to turn the check into a single comparison against a constant
+ * limit and catch all possible overflows.
+ * On architectures with separate user address space (m68k, s390, parisc,
+ * sparc64) or those without an MMU, this should always return true.
+ *
+ * This version was originally contributed by Jonas Bonn for the
+ * OpenRISC architecture, and was found to be the most efficient
+ * for constant 'size' and 'limit' values.
+ */
+static inline int __access_ok(const void __user *ptr, unsigned long size)
+{
+ unsigned long limit = TASK_SIZE_MAX;
+ unsigned long addr = (unsigned long)ptr;
+
+ if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) ||
+ !IS_ENABLED(CONFIG_MMU))
+ return true;
+
+ return (size <= limit) && (addr <= (limit - size));
+}
+#endif
+
+#ifndef access_ok
+#define access_ok(addr, size) likely(__access_ok(addr, size))
+#endif
+
+#endif
diff --git a/include/asm-generic/agp.h b/include/asm-generic/agp.h
new file mode 100644
index 000000000000..10db92ede168
--- /dev/null
+++ b/include/asm-generic/agp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_AGP_H
+#define _ASM_GENERIC_AGP_H
+
+#include <asm/io.h>
+
+#define map_page_into_agp(page) do {} while (0)
+#define unmap_page_from_agp(page) do {} while (0)
+#define flush_agp_cache() mb()
+
+#endif /* _ASM_GENERIC_AGP_H */
diff --git a/include/asm-generic/archrandom.h b/include/asm-generic/archrandom.h
new file mode 100644
index 000000000000..3cd7f980cfdc
--- /dev/null
+++ b/include/asm-generic/archrandom.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ARCHRANDOM_H__
+#define __ASM_GENERIC_ARCHRANDOM_H__
+
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
deleted file mode 100644
index 379986e40159..000000000000
--- a/include/asm-generic/atomic-instrumented.h
+++ /dev/null
@@ -1,1789 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-instrumented.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-/*
- * This file provides wrappers with KASAN instrumentation for atomic operations.
- * To use this functionality an arch's atomic.h file needs to define all
- * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
- * this file at the end. This file provides atomic_read() that forwards to
- * arch_atomic_read() for actual atomic operation.
- * Note: if an arch atomic operation is implemented by means of other atomic
- * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
- * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
- * double instrumentation.
- */
-#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-
-#include <linux/build_bug.h>
-#include <linux/compiler.h>
-#include <linux/instrumented.h>
-
-static __always_inline int
-atomic_read(const atomic_t *v)
-{
- instrument_atomic_read(v, sizeof(*v));
- return arch_atomic_read(v);
-}
-#define atomic_read atomic_read
-
-#if defined(arch_atomic_read_acquire)
-static __always_inline int
-atomic_read_acquire(const atomic_t *v)
-{
- instrument_atomic_read(v, sizeof(*v));
- return arch_atomic_read_acquire(v);
-}
-#define atomic_read_acquire atomic_read_acquire
-#endif
-
-static __always_inline void
-atomic_set(atomic_t *v, int i)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_set(v, i);
-}
-#define atomic_set atomic_set
-
-#if defined(arch_atomic_set_release)
-static __always_inline void
-atomic_set_release(atomic_t *v, int i)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_set_release(v, i);
-}
-#define atomic_set_release atomic_set_release
-#endif
-
-static __always_inline void
-atomic_add(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_add(i, v);
-}
-#define atomic_add atomic_add
-
-#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
-static __always_inline int
-atomic_add_return(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_add_return(i, v);
-}
-#define atomic_add_return atomic_add_return
-#endif
-
-#if defined(arch_atomic_add_return_acquire)
-static __always_inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_add_return_acquire(i, v);
-}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-
-#if defined(arch_atomic_add_return_release)
-static __always_inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_add_return_release(i, v);
-}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-
-#if defined(arch_atomic_add_return_relaxed)
-static __always_inline int
-atomic_add_return_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_add_return_relaxed(i, v);
-}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#endif
-
-#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
-static __always_inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_add(i, v);
-}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-
-#if defined(arch_atomic_fetch_add_acquire)
-static __always_inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_add_acquire(i, v);
-}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-
-#if defined(arch_atomic_fetch_add_release)
-static __always_inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_add_release(i, v);
-}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-
-#if defined(arch_atomic_fetch_add_relaxed)
-static __always_inline int
-atomic_fetch_add_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_add_relaxed(i, v);
-}
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#endif
-
-static __always_inline void
-atomic_sub(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_sub(i, v);
-}
-#define atomic_sub atomic_sub
-
-#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
-static __always_inline int
-atomic_sub_return(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_sub_return(i, v);
-}
-#define atomic_sub_return atomic_sub_return
-#endif
-
-#if defined(arch_atomic_sub_return_acquire)
-static __always_inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_sub_return_acquire(i, v);
-}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-
-#if defined(arch_atomic_sub_return_release)
-static __always_inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_sub_return_release(i, v);
-}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-
-#if defined(arch_atomic_sub_return_relaxed)
-static __always_inline int
-atomic_sub_return_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_sub_return_relaxed(i, v);
-}
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#endif
-
-#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
-static __always_inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_sub(i, v);
-}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-
-#if defined(arch_atomic_fetch_sub_acquire)
-static __always_inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_acquire(i, v);
-}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-
-#if defined(arch_atomic_fetch_sub_release)
-static __always_inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_release(i, v);
-}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-
-#if defined(arch_atomic_fetch_sub_relaxed)
-static __always_inline int
-atomic_fetch_sub_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_relaxed(i, v);
-}
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#endif
-
-#if defined(arch_atomic_inc)
-static __always_inline void
-atomic_inc(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_inc(v);
-}
-#define atomic_inc atomic_inc
-#endif
-
-#if defined(arch_atomic_inc_return)
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_inc_return(v);
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#if defined(arch_atomic_inc_return_acquire)
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_inc_return_acquire(v);
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#if defined(arch_atomic_inc_return_release)
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_inc_return_release(v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#if defined(arch_atomic_inc_return_relaxed)
-static __always_inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_inc_return_relaxed(v);
-}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-
-#if defined(arch_atomic_fetch_inc)
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_inc(v);
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#if defined(arch_atomic_fetch_inc_acquire)
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_acquire(v);
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#if defined(arch_atomic_fetch_inc_release)
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_release(v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#if defined(arch_atomic_fetch_inc_relaxed)
-static __always_inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_relaxed(v);
-}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-
-#if defined(arch_atomic_dec)
-static __always_inline void
-atomic_dec(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_dec(v);
-}
-#define atomic_dec atomic_dec
-#endif
-
-#if defined(arch_atomic_dec_return)
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_dec_return(v);
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#if defined(arch_atomic_dec_return_acquire)
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_dec_return_acquire(v);
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#if defined(arch_atomic_dec_return_release)
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_dec_return_release(v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#if defined(arch_atomic_dec_return_relaxed)
-static __always_inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_dec_return_relaxed(v);
-}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-
-#if defined(arch_atomic_fetch_dec)
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_dec(v);
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#if defined(arch_atomic_fetch_dec_acquire)
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_acquire(v);
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#if defined(arch_atomic_fetch_dec_release)
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_release(v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#if defined(arch_atomic_fetch_dec_relaxed)
-static __always_inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_relaxed(v);
-}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
-
-static __always_inline void
-atomic_and(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_and(i, v);
-}
-#define atomic_and atomic_and
-
-#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
-static __always_inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_and(i, v);
-}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-
-#if defined(arch_atomic_fetch_and_acquire)
-static __always_inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_and_acquire(i, v);
-}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-
-#if defined(arch_atomic_fetch_and_release)
-static __always_inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_and_release(i, v);
-}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-
-#if defined(arch_atomic_fetch_and_relaxed)
-static __always_inline int
-atomic_fetch_and_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_and_relaxed(i, v);
-}
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#endif
-
-#if defined(arch_atomic_andnot)
-static __always_inline void
-atomic_andnot(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_andnot(i, v);
-}
-#define atomic_andnot atomic_andnot
-#endif
-
-#if defined(arch_atomic_fetch_andnot)
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot(i, v);
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#if defined(arch_atomic_fetch_andnot_acquire)
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_acquire(i, v);
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#if defined(arch_atomic_fetch_andnot_release)
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_release(i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#if defined(arch_atomic_fetch_andnot_relaxed)
-static __always_inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
-
-static __always_inline void
-atomic_or(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_or(i, v);
-}
-#define atomic_or atomic_or
-
-#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
-static __always_inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_or(i, v);
-}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-
-#if defined(arch_atomic_fetch_or_acquire)
-static __always_inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_or_acquire(i, v);
-}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-
-#if defined(arch_atomic_fetch_or_release)
-static __always_inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_or_release(i, v);
-}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-
-#if defined(arch_atomic_fetch_or_relaxed)
-static __always_inline int
-atomic_fetch_or_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_or_relaxed(i, v);
-}
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#endif
-
-static __always_inline void
-atomic_xor(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_xor(i, v);
-}
-#define atomic_xor atomic_xor
-
-#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
-static __always_inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_xor(i, v);
-}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-
-#if defined(arch_atomic_fetch_xor_acquire)
-static __always_inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_acquire(i, v);
-}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-
-#if defined(arch_atomic_fetch_xor_release)
-static __always_inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_release(i, v);
-}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-
-#if defined(arch_atomic_fetch_xor_relaxed)
-static __always_inline int
-atomic_fetch_xor_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_relaxed(i, v);
-}
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#endif
-
-#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
-static __always_inline int
-atomic_xchg(atomic_t *v, int i)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_xchg(v, i);
-}
-#define atomic_xchg atomic_xchg
-#endif
-
-#if defined(arch_atomic_xchg_acquire)
-static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_xchg_acquire(v, i);
-}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-
-#if defined(arch_atomic_xchg_release)
-static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_xchg_release(v, i);
-}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-
-#if defined(arch_atomic_xchg_relaxed)
-static __always_inline int
-atomic_xchg_relaxed(atomic_t *v, int i)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_xchg_relaxed(v, i);
-}
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#endif
-
-#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
-static __always_inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_cmpxchg(v, old, new);
-}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-
-#if defined(arch_atomic_cmpxchg_acquire)
-static __always_inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_acquire(v, old, new);
-}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic_cmpxchg_release)
-static __always_inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_release(v, old, new);
-}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-
-#if defined(arch_atomic_cmpxchg_relaxed)
-static __always_inline int
-atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic_try_cmpxchg)
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg(v, old, new);
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_acquire)
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_acquire(v, old, new);
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_release)
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_release(v, old, new);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_relaxed)
-static __always_inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic_sub_and_test)
-static __always_inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_sub_and_test(i, v);
-}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-
-#if defined(arch_atomic_dec_and_test)
-static __always_inline bool
-atomic_dec_and_test(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_dec_and_test(v);
-}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-
-#if defined(arch_atomic_inc_and_test)
-static __always_inline bool
-atomic_inc_and_test(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_inc_and_test(v);
-}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-
-#if defined(arch_atomic_add_negative)
-static __always_inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_add_negative(i, v);
-}
-#define atomic_add_negative atomic_add_negative
-#endif
-
-#if defined(arch_atomic_fetch_add_unless)
-static __always_inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_fetch_add_unless(v, a, u);
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-
-#if defined(arch_atomic_add_unless)
-static __always_inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_add_unless(v, a, u);
-}
-#define atomic_add_unless atomic_add_unless
-#endif
-
-#if defined(arch_atomic_inc_not_zero)
-static __always_inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_inc_not_zero(v);
-}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-
-#if defined(arch_atomic_inc_unless_negative)
-static __always_inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_inc_unless_negative(v);
-}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-
-#if defined(arch_atomic_dec_unless_positive)
-static __always_inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_dec_unless_positive(v);
-}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-
-#if defined(arch_atomic_dec_if_positive)
-static __always_inline int
-atomic_dec_if_positive(atomic_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic_dec_if_positive(v);
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
-
-static __always_inline s64
-atomic64_read(const atomic64_t *v)
-{
- instrument_atomic_read(v, sizeof(*v));
- return arch_atomic64_read(v);
-}
-#define atomic64_read atomic64_read
-
-#if defined(arch_atomic64_read_acquire)
-static __always_inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
- instrument_atomic_read(v, sizeof(*v));
- return arch_atomic64_read_acquire(v);
-}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
-
-static __always_inline void
-atomic64_set(atomic64_t *v, s64 i)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_set(v, i);
-}
-#define atomic64_set atomic64_set
-
-#if defined(arch_atomic64_set_release)
-static __always_inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_set_release(v, i);
-}
-#define atomic64_set_release atomic64_set_release
-#endif
-
-static __always_inline void
-atomic64_add(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_add(i, v);
-}
-#define atomic64_add atomic64_add
-
-#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
-static __always_inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_add_return(i, v);
-}
-#define atomic64_add_return atomic64_add_return
-#endif
-
-#if defined(arch_atomic64_add_return_acquire)
-static __always_inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_add_return_acquire(i, v);
-}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-
-#if defined(arch_atomic64_add_return_release)
-static __always_inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_add_return_release(i, v);
-}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-
-#if defined(arch_atomic64_add_return_relaxed)
-static __always_inline s64
-atomic64_add_return_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_add_return_relaxed(i, v);
-}
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#endif
-
-#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
-static __always_inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_add(i, v);
-}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-
-#if defined(arch_atomic64_fetch_add_acquire)
-static __always_inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_acquire(i, v);
-}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_add_release)
-static __always_inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_release(i, v);
-}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-
-#if defined(arch_atomic64_fetch_add_relaxed)
-static __always_inline s64
-atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_relaxed(i, v);
-}
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#endif
-
-static __always_inline void
-atomic64_sub(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_sub(i, v);
-}
-#define atomic64_sub atomic64_sub
-
-#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
-static __always_inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_sub_return(i, v);
-}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-
-#if defined(arch_atomic64_sub_return_acquire)
-static __always_inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_sub_return_acquire(i, v);
-}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-
-#if defined(arch_atomic64_sub_return_release)
-static __always_inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_sub_return_release(i, v);
-}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-
-#if defined(arch_atomic64_sub_return_relaxed)
-static __always_inline s64
-atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_sub_return_relaxed(i, v);
-}
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#endif
-
-#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
-static __always_inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub(i, v);
-}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-
-#if defined(arch_atomic64_fetch_sub_acquire)
-static __always_inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_acquire(i, v);
-}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_sub_release)
-static __always_inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_release(i, v);
-}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-
-#if defined(arch_atomic64_fetch_sub_relaxed)
-static __always_inline s64
-atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#endif
-
-#if defined(arch_atomic64_inc)
-static __always_inline void
-atomic64_inc(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_inc(v);
-}
-#define atomic64_inc atomic64_inc
-#endif
-
-#if defined(arch_atomic64_inc_return)
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_inc_return(v);
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#if defined(arch_atomic64_inc_return_acquire)
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_inc_return_acquire(v);
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#if defined(arch_atomic64_inc_return_release)
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_inc_return_release(v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#if defined(arch_atomic64_inc_return_relaxed)
-static __always_inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_inc_return_relaxed(v);
-}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-
-#if defined(arch_atomic64_fetch_inc)
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc(v);
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#if defined(arch_atomic64_fetch_inc_acquire)
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_acquire(v);
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_inc_release)
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_release(v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#if defined(arch_atomic64_fetch_inc_relaxed)
-static __always_inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_relaxed(v);
-}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-
-#if defined(arch_atomic64_dec)
-static __always_inline void
-atomic64_dec(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_dec(v);
-}
-#define atomic64_dec atomic64_dec
-#endif
-
-#if defined(arch_atomic64_dec_return)
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_dec_return(v);
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#if defined(arch_atomic64_dec_return_acquire)
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_dec_return_acquire(v);
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#if defined(arch_atomic64_dec_return_release)
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_dec_return_release(v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#if defined(arch_atomic64_dec_return_relaxed)
-static __always_inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_dec_return_relaxed(v);
-}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-
-#if defined(arch_atomic64_fetch_dec)
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec(v);
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#if defined(arch_atomic64_fetch_dec_acquire)
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_acquire(v);
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_dec_release)
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_release(v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#if defined(arch_atomic64_fetch_dec_relaxed)
-static __always_inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_relaxed(v);
-}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
-
-static __always_inline void
-atomic64_and(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_and(i, v);
-}
-#define atomic64_and atomic64_and
-
-#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
-static __always_inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_and(i, v);
-}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-
-#if defined(arch_atomic64_fetch_and_acquire)
-static __always_inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_acquire(i, v);
-}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_and_release)
-static __always_inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_release(i, v);
-}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-
-#if defined(arch_atomic64_fetch_and_relaxed)
-static __always_inline s64
-atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_relaxed(i, v);
-}
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#endif
-
-#if defined(arch_atomic64_andnot)
-static __always_inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_andnot(i, v);
-}
-#define atomic64_andnot atomic64_andnot
-#endif
-
-#if defined(arch_atomic64_fetch_andnot)
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot(i, v);
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_acquire)
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_acquire(i, v);
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_release)
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_release(i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_relaxed)
-static __always_inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
-
-static __always_inline void
-atomic64_or(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_or(i, v);
-}
-#define atomic64_or atomic64_or
-
-#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
-static __always_inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_or(i, v);
-}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-
-#if defined(arch_atomic64_fetch_or_acquire)
-static __always_inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_acquire(i, v);
-}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_or_release)
-static __always_inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_release(i, v);
-}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-
-#if defined(arch_atomic64_fetch_or_relaxed)
-static __always_inline s64
-atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_relaxed(i, v);
-}
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#endif
-
-static __always_inline void
-atomic64_xor(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_xor(i, v);
-}
-#define atomic64_xor atomic64_xor
-
-#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
-static __always_inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor(i, v);
-}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-
-#if defined(arch_atomic64_fetch_xor_acquire)
-static __always_inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_acquire(i, v);
-}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_xor_release)
-static __always_inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_release(i, v);
-}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-
-#if defined(arch_atomic64_fetch_xor_relaxed)
-static __always_inline s64
-atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#endif
-
-#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
-static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_xchg(v, i);
-}
-#define atomic64_xchg atomic64_xchg
-#endif
-
-#if defined(arch_atomic64_xchg_acquire)
-static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_xchg_acquire(v, i);
-}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-
-#if defined(arch_atomic64_xchg_release)
-static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_xchg_release(v, i);
-}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-
-#if defined(arch_atomic64_xchg_relaxed)
-static __always_inline s64
-atomic64_xchg_relaxed(atomic64_t *v, s64 i)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_xchg_relaxed(v, i);
-}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
-#endif
-
-#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
-static __always_inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg(v, old, new);
-}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-
-#if defined(arch_atomic64_cmpxchg_acquire)
-static __always_inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_acquire(v, old, new);
-}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic64_cmpxchg_release)
-static __always_inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_release(v, old, new);
-}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-
-#if defined(arch_atomic64_cmpxchg_relaxed)
-static __always_inline s64
-atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg)
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg(v, old, new);
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_acquire)
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_acquire(v, old, new);
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_release)
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_release(v, old, new);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_relaxed)
-static __always_inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic64_sub_and_test)
-static __always_inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_sub_and_test(i, v);
-}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-
-#if defined(arch_atomic64_dec_and_test)
-static __always_inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_dec_and_test(v);
-}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-
-#if defined(arch_atomic64_inc_and_test)
-static __always_inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_inc_and_test(v);
-}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-
-#if defined(arch_atomic64_add_negative)
-static __always_inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_add_negative(i, v);
-}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-
-#if defined(arch_atomic64_fetch_add_unless)
-static __always_inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_unless(v, a, u);
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-
-#if defined(arch_atomic64_add_unless)
-static __always_inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
-}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-
-#if defined(arch_atomic64_inc_not_zero)
-static __always_inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_inc_not_zero(v);
-}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-
-#if defined(arch_atomic64_inc_unless_negative)
-static __always_inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_inc_unless_negative(v);
-}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-
-#if defined(arch_atomic64_dec_unless_positive)
-static __always_inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_dec_unless_positive(v);
-}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-
-#if defined(arch_atomic64_dec_if_positive)
-static __always_inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
- instrument_atomic_write(v, sizeof(*v));
- return arch_atomic64_dec_if_positive(v);
-}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-
-#if !defined(arch_xchg_relaxed) || defined(arch_xchg)
-#define xchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_acquire)
-#define xchg_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_release)
-#define xchg_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_relaxed)
-#define xchg_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg)
-#define cmpxchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_acquire)
-#define cmpxchg_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_release)
-#define cmpxchg_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_relaxed)
-#define cmpxchg_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64)
-#define cmpxchg64(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_acquire)
-#define cmpxchg64_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_release)
-#define cmpxchg64_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_relaxed)
-#define cmpxchg64_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#define cmpxchg_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define sync_cmpxchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_double(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
-})
-
-
-#define cmpxchg_double_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// 89bf97f3a7509b740845e51ddf31055b48a81f40
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
deleted file mode 100644
index 073cf40f431b..000000000000
--- a/include/asm-generic/atomic-long.h
+++ /dev/null
@@ -1,1014 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-long.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _ASM_GENERIC_ATOMIC_LONG_H
-#define _ASM_GENERIC_ATOMIC_LONG_H
-
-#include <linux/compiler.h>
-#include <asm/types.h>
-
-#ifdef CONFIG_64BIT
-typedef atomic64_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
-#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
-#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
-#else
-typedef atomic_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
-#define atomic_long_cond_read_acquire atomic_cond_read_acquire
-#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
-#endif
-
-#ifdef CONFIG_64BIT
-
-static __always_inline long
-atomic_long_read(const atomic_long_t *v)
-{
- return atomic64_read(v);
-}
-
-static __always_inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
- return atomic64_read_acquire(v);
-}
-
-static __always_inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
- atomic64_set(v, i);
-}
-
-static __always_inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
- atomic64_set_release(v, i);
-}
-
-static __always_inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
- atomic64_add(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
- return atomic64_add_return(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
- return atomic64_add_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
- return atomic64_add_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_add_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic64_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return atomic64_sub_return(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_inc(atomic_long_t *v)
-{
- atomic64_inc(v);
-}
-
-static __always_inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
- return atomic64_inc_return(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
- return atomic64_inc_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
- return atomic64_inc_return_release(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
- return atomic64_inc_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
- return atomic64_fetch_inc(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
- return atomic64_fetch_inc_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
- return atomic64_fetch_inc_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
- return atomic64_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_dec(atomic_long_t *v)
-{
- atomic64_dec(v);
-}
-
-static __always_inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
- return atomic64_dec_return(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
- return atomic64_dec_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
- return atomic64_dec_return_release(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
- return atomic64_dec_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
- return atomic64_fetch_dec(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
- return atomic64_fetch_dec_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
- return atomic64_fetch_dec_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
- return atomic64_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
- atomic64_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
- atomic64_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
- atomic64_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
- atomic64_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
- return atomic64_xchg(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
- return atomic64_xchg_acquire(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
- return atomic64_xchg_release(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
- return atomic64_xchg_relaxed(v, i);
-}
-
-static __always_inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_release(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
- return atomic64_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
- return atomic64_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
- return atomic64_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
- return atomic64_add_negative(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic64_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic64_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
- return atomic64_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
- return atomic64_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
- return atomic64_dec_unless_positive(v);
-}
-
-static __always_inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
- return atomic64_dec_if_positive(v);
-}
-
-#else /* CONFIG_64BIT */
-
-static __always_inline long
-atomic_long_read(const atomic_long_t *v)
-{
- return atomic_read(v);
-}
-
-static __always_inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
- return atomic_read_acquire(v);
-}
-
-static __always_inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
- atomic_set(v, i);
-}
-
-static __always_inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
- atomic_set_release(v, i);
-}
-
-static __always_inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
- atomic_add(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
- return atomic_add_return(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
- return atomic_add_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
- return atomic_add_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic_add_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
- return atomic_fetch_add(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return atomic_sub_return(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
- return atomic_sub_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
- return atomic_sub_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic_sub_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_inc(atomic_long_t *v)
-{
- atomic_inc(v);
-}
-
-static __always_inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
- return atomic_inc_return(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
- return atomic_inc_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
- return atomic_inc_return_release(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
- return atomic_inc_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
- return atomic_fetch_inc(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
- return atomic_fetch_inc_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
- return atomic_fetch_inc_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
- return atomic_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_dec(atomic_long_t *v)
-{
- atomic_dec(v);
-}
-
-static __always_inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
- return atomic_dec_return(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
- return atomic_dec_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
- return atomic_dec_return_release(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
- return atomic_dec_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
- return atomic_fetch_dec(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
- return atomic_fetch_dec_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
- return atomic_fetch_dec_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
- return atomic_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
- atomic_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
- return atomic_fetch_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
- atomic_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
- atomic_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
- return atomic_fetch_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
- atomic_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
- return atomic_xchg(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
- return atomic_xchg_acquire(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
- return atomic_xchg_release(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
- return atomic_xchg_relaxed(v, i);
-}
-
-static __always_inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_release(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_acquire(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_release(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
- return atomic_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
- return atomic_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
- return atomic_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
- return atomic_add_negative(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
- return atomic_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
- return atomic_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
- return atomic_dec_unless_positive(v);
-}
-
-static __always_inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
- return atomic_dec_if_positive(v);
-}
-
-#endif /* CONFIG_64BIT */
-#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// a624200981f552b2c6be4f32fe44da8289f30d87
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 11f96f40f4a7..22142c71d35a 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Generic C implementation of atomic counter operations. Usable on
- * UP systems only. Do not include in machine independent code.
+ * Generic C implementation of atomic counter operations. Do not include in
+ * machine independent code.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,56 +12,39 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-/*
- * atomic_$op() - $op integer to atomic variable
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
- * smp_mb__{before,after}_atomic().
- */
-
-/*
- * atomic_$op_return() - $op interer to atomic variable and returns the result
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does imply a full memory barrier.
- */
-
#ifdef CONFIG_SMP
/* we can build all atomic primitives from cmpxchg */
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c c_op i; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c; \
@@ -72,7 +55,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -82,7 +65,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -95,7 +78,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -110,87 +93,41 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#endif /* CONFIG_SMP */
-#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
-#endif
-
-#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
-#endif
-#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
-#endif
-
-#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
-#endif
-
-#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
-#endif
-
-#ifndef atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
-#endif
-
-#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)
-#endif
-#ifndef atomic_and
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
ATOMIC_OP(and, &)
-#endif
-
-#ifndef atomic_or
ATOMIC_OP(or, |)
-#endif
-
-#ifndef atomic_xor
ATOMIC_OP(xor, ^)
-#endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
+#define arch_atomic_add_return generic_atomic_add_return
+#define arch_atomic_sub_return generic_atomic_sub_return
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#ifndef atomic_read
-#define atomic_read(v) READ_ONCE((v)->counter)
-#endif
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#include <linux/irqflags.h>
+#define arch_atomic_fetch_add generic_atomic_fetch_add
+#define arch_atomic_fetch_sub generic_atomic_fetch_sub
+#define arch_atomic_fetch_and generic_atomic_fetch_and
+#define arch_atomic_fetch_or generic_atomic_fetch_or
+#define arch_atomic_fetch_xor generic_atomic_fetch_xor
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
+#define arch_atomic_add generic_atomic_add
+#define arch_atomic_sub generic_atomic_sub
+#define arch_atomic_and generic_atomic_and
+#define arch_atomic_or generic_atomic_or
+#define arch_atomic_xor generic_atomic_xor
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 370f01d4450f..100d24b02e52 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -15,19 +15,17 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-extern s64 atomic64_read(const atomic64_t *v);
-extern void atomic64_set(atomic64_t *v, s64 i);
-
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+extern s64 generic_atomic64_read(const atomic64_t *v);
+extern void generic_atomic64_set(atomic64_t *v, s64 i);
#define ATOMIC64_OP(op) \
-extern void atomic64_##op(s64 a, atomic64_t *v);
+extern void generic_atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
-extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
-extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -46,11 +44,32 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-extern s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
-extern s64 atomic64_xchg(atomic64_t *v, s64 new);
-extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+extern s64 generic_atomic64_dec_if_positive(atomic64_t *v);
+extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
+
+#define arch_atomic64_read generic_atomic64_read
+#define arch_atomic64_set generic_atomic64_set
+#define arch_atomic64_set_release generic_atomic64_set
+
+#define arch_atomic64_add generic_atomic64_add
+#define arch_atomic64_add_return generic_atomic64_add_return
+#define arch_atomic64_fetch_add generic_atomic64_fetch_add
+#define arch_atomic64_sub generic_atomic64_sub
+#define arch_atomic64_sub_return generic_atomic64_sub_return
+#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub
+
+#define arch_atomic64_and generic_atomic64_and
+#define arch_atomic64_fetch_and generic_atomic64_fetch_and
+#define arch_atomic64_or generic_atomic64_or
+#define arch_atomic64_fetch_or generic_atomic64_fetch_or
+#define arch_atomic64_xor generic_atomic64_xor
+#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor
+
+#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive
+#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg
+#define arch_atomic64_xchg generic_atomic64_xchg
+#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 798027bb89be..0c0695763bea 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -13,6 +13,8 @@
#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
#include <asm/rwonce.h>
#ifndef nop
@@ -20,6 +22,35 @@
#endif
/*
+ * Architectures that want generic instrumentation can define __ prefixed
+ * variants of all barriers.
+ */
+
+#ifdef __mb
+#define mb() do { kcsan_mb(); __mb(); } while (0)
+#endif
+
+#ifdef __rmb
+#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
+#endif
+
+#ifdef __wmb
+#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
+#endif
+
+#ifdef __dma_mb
+#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0)
+#endif
+
+#ifdef __dma_rmb
+#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
+#endif
+
+#ifdef __dma_wmb
+#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
+#endif
+
+/*
* Force strict CPU ordering. And yes, this is required on UP too when we're
* talking to devices.
*
@@ -38,6 +69,10 @@
#define wmb() mb()
#endif
+#ifndef dma_mb
+#define dma_mb() mb()
+#endif
+
#ifndef dma_rmb
#define dma_rmb() rmb()
#endif
@@ -61,15 +96,15 @@
#ifdef CONFIG_SMP
#ifndef smp_mb
-#define smp_mb() __smp_mb()
+#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
#endif
#ifndef smp_rmb
-#define smp_rmb() __smp_rmb()
+#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
#endif
#ifndef smp_wmb
-#define smp_wmb() __smp_wmb()
+#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
#endif
#else /* !CONFIG_SMP */
@@ -122,19 +157,19 @@ do { \
#ifdef CONFIG_SMP
#ifndef smp_store_mb
-#define smp_store_mb(var, value) __smp_store_mb(var, value)
+#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
#endif
#ifndef smp_mb__before_atomic
-#define smp_mb__before_atomic() __smp_mb__before_atomic()
+#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
#endif
#ifndef smp_mb__after_atomic
-#define smp_mb__after_atomic() __smp_mb__after_atomic()
+#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
#endif
#ifndef smp_store_release
-#define smp_store_release(p, v) __smp_store_release(p, v)
+#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#endif
#ifndef smp_load_acquire
@@ -158,7 +193,6 @@ do { \
#ifndef smp_store_release
#define smp_store_release(p, v) \
do { \
- compiletime_assert_atomic_type(*p); \
barrier(); \
WRITE_ONCE(*p, v); \
} while (0)
@@ -168,7 +202,6 @@ do { \
#define smp_load_acquire(p) \
({ \
__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
- compiletime_assert_atomic_type(*p); \
barrier(); \
(typeof(*p))___p1; \
})
@@ -177,13 +210,13 @@ do { \
#endif /* CONFIG_SMP */
/* Barriers for virtual machine guests when talking to an SMP host */
-#define virt_mb() __smp_mb()
-#define virt_rmb() __smp_rmb()
-#define virt_wmb() __smp_wmb()
-#define virt_store_mb(var, value) __smp_store_mb(var, value)
-#define virt_mb__before_atomic() __smp_mb__before_atomic()
-#define virt_mb__after_atomic() __smp_mb__after_atomic()
-#define virt_store_release(p, v) __smp_store_release(p, v)
+#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
+#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
+#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
+#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
+#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
+#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
+#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#define virt_load_acquire(p) __smp_load_acquire(p)
/**
@@ -250,5 +283,16 @@ do { \
#define pmem_wmb() wmb()
#endif
+/*
+ * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
+ * this kind of memory accesses, the CPU may wait for prior accesses to be
+ * merged with subsequent ones. In some situation, such wait is bad for the
+ * performance. io_stop_wc() can be used to prevent the merging of
+ * write-combining memory accesses before this macro with those after it.
+ */
+#ifndef io_stop_wc
+#define io_stop_wc() do { } while (0)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index df9b5bc3d282..a47b8a71d6fe 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -20,7 +20,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index dd90c9792909..e076e079f6b2 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -11,58 +11,60 @@
* See Documentation/atomic_bitops.txt for details.
*/
-static inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_set_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_change_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (READ_ONCE(*p) & mask)
- return 1;
-
- old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (!(READ_ONCE(*p) & mask))
- return 0;
-
- old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
return !!(old & mask);
}
+#include <asm-generic/bitops/instrumented-atomic.h>
+
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h
index 458c85ebcd15..7b129329046b 100644
--- a/include/asm-generic/bitops/builtin-ffs.h
+++ b/include/asm-generic/bitops/builtin-ffs.h
@@ -8,11 +8,8 @@
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from ffz (man ffs).
*/
-static __always_inline int ffs(int x)
-{
- return __builtin_ffs(x);
-}
+#define ffs(x) __builtin_ffs(x)
#endif
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
index e81868b2c0f0..323fd5d6ae26 100644
--- a/include/asm-generic/bitops/ffs.h
+++ b/include/asm-generic/bitops/ffs.h
@@ -8,7 +8,7 @@
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from ffz (man ffs).
*/
static inline int ffs(int x)
{
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
deleted file mode 100644
index 9fdf21302fdf..000000000000
--- a/include/asm-generic/bitops/find.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_BITOPS_FIND_H_
-#define _ASM_GENERIC_BITOPS_FIND_H_
-
-#ifndef find_next_bit
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
- size, unsigned long offset);
-#endif
-
-#ifndef find_next_and_bit
-/**
- * find_next_and_bit - find the next set bit in both memory regions
- * @addr1: The first address to base the search on
- * @addr2: The second address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-extern unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
- unsigned long offset);
-#endif
-
-#ifndef find_next_zero_bit
-/**
- * find_next_zero_bit - find the next cleared bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number of the next zero bit
- * If no bits are zero, returns @size.
- */
-extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
- long size, unsigned long offset);
-#endif
-
-#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first set bit.
- * If no bits are set, returns @size.
- */
-extern unsigned long find_first_bit(const unsigned long *addr,
- unsigned long size);
-
-/**
- * find_first_zero_bit - find the first cleared bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first cleared bit.
- * If no bits are zero, returns @size.
- */
-extern unsigned long find_first_zero_bit(const unsigned long *addr,
- unsigned long size);
-#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
-
-#ifndef find_first_bit
-#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
-#endif
-#ifndef find_first_zero_bit
-#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
-#endif
-
-#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
-
-/**
- * find_next_clump8 - find next 8-bit clump with set bits in a memory region
- * @clump: location to store copy of found clump
- * @addr: address to base the search on
- * @size: bitmap size in number of bits
- * @offset: bit offset at which to start searching
- *
- * Returns the bit offset for the next set clump; the found clump value is
- * copied to the location pointed by @clump. If no bits are set, returns @size.
- */
-extern unsigned long find_next_clump8(unsigned long *clump,
- const unsigned long *addr,
- unsigned long size, unsigned long offset);
-
-#define find_first_clump8(clump, bits, size) \
- find_next_clump8((clump), (bits), (size), 0)
-
-#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h
new file mode 100644
index 000000000000..564a8c675d85
--- /dev/null
+++ b/include/asm-generic/bitops/generic-non-atomic.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+
+#include <linux/bits.h>
+#include <asm/barrier.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+/*
+ * Generic definitions for bit operations, should not be used in regular code
+ * directly.
+ */
+
+/**
+ * generic___set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+static __always_inline void
+generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * generic___change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * generic___test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic___test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static __always_inline bool
+generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ /*
+ * Unlike the bitops with the '__' prefix above, this one *is* atomic,
+ * so `volatile` must always stay here with no cast-aways. See
+ * `Documentation/atomic_bitops.txt` for the details.
+ */
+ return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+/**
+ * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
+}
+
+/*
+ * const_*() definitions provide good compile-time optimizations when
+ * the passed arguments can be resolved at compile time.
+ */
+#define const___set_bit generic___set_bit
+#define const___clear_bit generic___clear_bit
+#define const___change_bit generic___change_bit
+#define const___test_and_set_bit generic___test_and_set_bit
+#define const___test_and_clear_bit generic___test_and_clear_bit
+#define const___test_and_change_bit generic___test_and_change_bit
+#define const_test_bit_acquire generic_test_bit_acquire
+
+/**
+ * const_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ *
+ * A version of generic_test_bit() which discards the `volatile` qualifier to
+ * allow a compiler to optimize code harder. Non-atomic and to be called only
+ * for testing compile-time constants, e.g. by the corresponding macros, not
+ * directly from "regular" code.
+ */
+static __always_inline bool
+const_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr);
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long val = *p;
+
+ return !!(val & mask);
+}
+
+#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index fb2cb33a4013..4225a8ca9c1a 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -23,7 +23,7 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void set_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
@@ -36,7 +36,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*
* This is a relaxed atomic operation (no implied memory barriers).
*/
-static inline void clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void clear_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
@@ -52,7 +52,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void change_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
@@ -65,9 +65,10 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -78,9 +79,10 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -91,9 +93,10 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index b9bec468ae03..542d3727ee4e 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -22,6 +22,7 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
+ kcsan_release();
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
@@ -37,6 +38,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
+ kcsan_release();
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
@@ -52,30 +54,29 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
-#if defined(arch_clear_bit_unlock_is_negative_byte)
/**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- * byte is negative, for unlock.
- * @nr: the bit to clear
- * @addr: the address to start counting from
+ * xor_unlock_is_negative_byte - XOR a single byte in memory and test if
+ * it is negative, for unlock.
+ * @mask: Change the bits which are set in this mask.
+ * @addr: The address of the word containing the byte to change.
*
+ * Changes some of bits 0-6 in the word pointed to by @addr.
* This operation is atomic and provides release barrier semantics.
+ * Used to optimise some folio operations which are commonly paired
+ * with an unlock or end of writeback. Bit 7 is used as PG_waiters to
+ * indicate whether anybody is waiting for the unlock.
*
- * This is a bit of a one-trick-pony for the filemap code, which clears
- * PG_locked and tests PG_waiters,
+ * Return: Whether the top bit of the byte is set.
*/
-static inline bool
-clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+static inline bool xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
- return arch_clear_bit_unlock_is_negative_byte(nr, addr);
+ kcsan_release();
+ instrument_atomic_write(addr, sizeof(long));
+ return arch_xor_unlock_is_negative_byte(mask, addr);
}
-/* Let everybody know we have it. */
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
-#endif
-
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 20f788a25ef9..2b238b161a62 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -14,7 +14,7 @@
#include <linux/instrumented.h>
/**
- * __set_bit - Set a bit in memory
+ * ___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -22,14 +22,15 @@
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___set_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
/**
- * __clear_bit - Clears a bit in memory
+ * ___clear_bit - Clears a bit in memory
* @nr: the bit to clear
* @addr: the address to start counting from
*
@@ -37,14 +38,15 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
/**
- * __change_bit - Toggle a bit in memory
+ * ___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
@@ -52,63 +54,104 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___change_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
+static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
+{
+ if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
+ /*
+ * We treat non-atomic read-write bitops a little more special.
+ * Given the operations here only modify a single bit, assuming
+ * non-atomicity of the writer is sufficient may be reasonable
+ * for certain usage (and follows the permissible nature of the
+ * assume-plain-writes-atomic rule):
+ * 1. report read-modify-write races -> check read;
+ * 2. do not report races with marked readers, but do report
+ * races with unmarked readers -> check "atomic" write.
+ */
+ kcsan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ /*
+ * Use generic write instrumentation, in case other sanitizers
+ * or tools are enabled alongside KCSAN.
+ */
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ } else {
+ instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+ }
+}
+
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
- instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
}
/**
- * __test_and_change_bit - Change a bit and return its old value
+ * ___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr);
}
/**
- * test_bit - Determine whether a bit is set
+ * _test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static inline bool test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool
+_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
+/**
+ * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_bit_acquire(nr, addr);
+}
+
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 188d3eba3ace..d51beff60375 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -9,46 +9,12 @@
#define BITOP_LE_SWIZZLE 0
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_zero_bit(addr, size, offset);
-}
-
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_bit(addr, size, offset);
-}
-
-static inline unsigned long find_first_zero_bit_le(const void *addr,
- unsigned long size)
-{
- return find_first_zero_bit(addr, size);
-}
-
#elif defined(__BIG_ENDIAN)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-#ifndef find_next_zero_bit_le
-extern unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
-#endif
-
-#ifndef find_next_bit_le
-extern unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
#endif
-#ifndef find_first_zero_bit_le
-#define find_first_zero_bit_le(addr, size) \
- find_next_zero_bit_le((addr), (size), 0)
-#endif
-
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
static inline int test_bit_le(int nr, const void *addr)
{
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 3ae021368f48..14d4ec8c5152 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -7,7 +7,7 @@
#include <asm/barrier.h>
/**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
@@ -15,8 +15,8 @@
* the returned value is 0.
* It can be used to implement bit locks.
*/
-static inline int test_and_set_bit_lock(unsigned int nr,
- volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
@@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr,
if (READ_ONCE(*p) & mask)
return 1;
- old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
return !!(old & mask);
}
/**
- * clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
}
/**
- * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -54,38 +55,28 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
*
* See for example x86's implementation.
*/
-static inline void __clear_bit_unlock(unsigned int nr,
- volatile unsigned long *p)
+static inline void
+arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
unsigned long old;
p += BIT_WORD(nr);
old = READ_ONCE(*p);
old &= ~BIT_MASK(nr);
- atomic_long_set_release((atomic_long_t *)p, old);
+ raw_atomic_long_set_release((atomic_long_t *)p, old);
}
-/**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- * byte is negative, for unlock.
- * @nr: the bit to clear
- * @addr: the address to start counting from
- *
- * This is a bit of a one-trick-pony for the filemap code, which clears
- * PG_locked and tests PG_waiters,
- */
-#ifndef clear_bit_unlock_is_negative_byte
-static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
- volatile unsigned long *p)
+#ifndef arch_xor_unlock_is_negative_byte
+static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *p)
{
long old;
- unsigned long mask = BIT_MASK(nr);
- p += BIT_WORD(nr);
- old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p);
return !!(old & BIT(7));
}
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
#endif
+#include <asm-generic/bitops/instrumented-lock.h>
+
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
index 7e10c4b50c5d..71f8d54a5195 100644
--- a/include/asm-generic/bitops/non-atomic.h
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -2,108 +2,19 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
-#include <asm/types.h>
+#include <asm-generic/bitops/generic-non-atomic.h>
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch___set_bit generic___set_bit
+#define arch___clear_bit generic___clear_bit
+#define arch___change_bit generic___change_bit
- *p |= mask;
-}
+#define arch___test_and_set_bit generic___test_and_set_bit
+#define arch___test_and_clear_bit generic___test_and_clear_bit
+#define arch___test_and_change_bit generic___test_and_change_bit
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
- *p &= ~mask;
-}
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-
- *p ^= mask;
-}
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-
-/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
-
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
-{
- return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h
new file mode 100644
index 000000000000..0ddc78dfc358
--- /dev/null
+++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+
+#define ___set_bit arch___set_bit
+#define ___clear_bit arch___clear_bit
+#define ___change_bit arch___change_bit
+
+#define ___test_and_set_bit arch___test_and_set_bit
+#define ___test_and_clear_bit arch___test_and_clear_bit
+#define ___test_and_change_bit arch___test_and_change_bit
+
+#define _test_bit arch_test_bit
+#define _test_bit_acquire arch_test_bit_acquire
+
+#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
index 3905c1c93dc2..1023e2a4bd37 100644
--- a/include/asm-generic/bitsperlong.h
+++ b/include/asm-generic/bitsperlong.h
@@ -23,4 +23,16 @@
#define BITS_PER_LONG_LONG 64
#endif
+/*
+ * small_const_nbits(n) is true precisely when it is known at compile-time
+ * that BITMAP_SIZE(n) is 1, i.e. 1 <= n <= BITS_PER_LONG. This allows
+ * various bit/bitmap APIs to provide a fast inline implementation. Bitmaps
+ * of size 0 are very rare, and a compile-time-known-size 0 is most likely
+ * a sign of error. They will be handled correctly by the bit/bitmap APIs,
+ * but using the out-of-line functions, so that the inline implementations
+ * can unconditionally dereference the pointer(s).
+ */
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
+
#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 18b0f4eee8cb..6e794420bd39 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -4,6 +4,7 @@
#include <linux/compiler.h>
#include <linux/instrumentation.h>
+#include <linux/once_lite.h>
#define CUT_HERE "------------[ cut here ]------------\n"
@@ -17,7 +18,14 @@
#endif
#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
+#include <linux/panic.h>
+#include <linux/printk.h>
+
+struct warn_args;
+struct pt_regs;
+
+void __warn(const char *file, int line, void *caller, unsigned taint,
+ struct pt_regs *regs, struct warn_args *args);
#ifdef CONFIG_BUG
@@ -79,10 +87,12 @@ struct bug_entry {
*
* Use the versions with printk format strings to provide better diagnostics.
*/
-#ifndef __WARN_FLAGS
extern __printf(4, 5)
void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
const char *fmt, ...);
+extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
+
+#ifndef __WARN_FLAGS
#define __WARN() __WARN_printf(TAINT_WARN, NULL)
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
@@ -90,7 +100,6 @@ void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
instrumentation_end(); \
} while (0)
#else
-extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
@@ -108,11 +117,6 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#endif
/* used internally by panic.c */
-struct warn_args;
-struct pt_regs;
-
-void __warn(const char *file, int line, void *caller, unsigned taint,
- struct pt_regs *regs, struct warn_args *args);
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
@@ -140,39 +144,15 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
})
#ifndef WARN_ON_ONCE
-#define WARN_ON_ONCE(condition) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_ON(1); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_ON_ONCE(condition) \
+ DO_ONCE_LITE_IF(condition, WARN_ON, 1)
#endif
-#define WARN_ONCE(condition, format...) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN(1, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_ONCE(condition, format...) \
+ DO_ONCE_LITE_IF(condition, WARN, 1, format)
-#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_TAINT(1, taint, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_TAINT_ONCE(condition, taint, format...) \
+ DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format)
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h
deleted file mode 100644
index 69021830f078..000000000000
--- a/include/asm-generic/bugs.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_BUGS_H
-#define __ASM_GENERIC_BUGS_H
-/*
- * This file is included by 'init/main.c' to check for
- * architecture-dependent bugs.
- */
-
-static inline void check_bugs(void) { }
-
-#endif /* __ASM_GENERIC_BUGS_H */
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 4a674db4e1fa..7ee8a179d103 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -2,6 +2,8 @@
#ifndef _ASM_GENERIC_CACHEFLUSH_H
#define _ASM_GENERIC_CACHEFLUSH_H
+#include <linux/instrumented.h>
+
struct mm_struct;
struct vm_area_struct;
struct page;
@@ -49,10 +51,10 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
static inline void flush_dcache_page(struct page *page)
{
}
+
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#endif
-
#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
@@ -75,13 +77,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
#define flush_icache_user_range flush_icache_range
#endif
-#ifndef flush_icache_page
-static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
-{
-}
-#endif
-
#ifndef flush_icache_user_page
static inline void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page,
@@ -96,6 +91,12 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
}
#endif
+#ifndef flush_cache_vmap_early
+static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
+{
+}
+#endif
+
#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
@@ -105,14 +106,22 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#ifndef copy_to_user_page
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
+ instrument_copy_to_user((void __user *)dst, src, len); \
memcpy(dst, src, len); \
flush_icache_user_page(vma, page, vaddr, len); \
} while (0)
#endif
+
#ifndef copy_from_user_page
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ instrument_copy_from_user_before(dst, (void __user *)src, \
+ len); \
+ memcpy(dst, src, len); \
+ instrument_copy_from_user_after(dst, (void __user *)src, len, \
+ 0); \
+ } while (0)
#endif
#endif /* _ASM_GENERIC_CACHEFLUSH_H */
diff --git a/include/asm-generic/cfi.h b/include/asm-generic/cfi.h
new file mode 100644
index 000000000000..41fac3537bf9
--- /dev/null
+++ b/include/asm-generic/cfi.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CFI_H
+#define __ASM_GENERIC_CFI_H
+
+#endif /* __ASM_GENERIC_CFI_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index cd8b75aa770d..ad928cce268b 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -2,6 +2,8 @@
#ifndef __ASM_GENERIC_CHECKSUM_H
#define __ASM_GENERIC_CHECKSUM_H
+#include <linux/bitops.h>
+
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
@@ -16,18 +18,6 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-#ifndef csum_partial_copy_nocheck
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
- __wsum sum);
-#endif
-
#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
@@ -43,9 +33,7 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
- sum = (sum & 0xffff) + (sum >> 16);
- sum = (sum & 0xffff) + (sum >> 16);
- return (__force __sum16)~sum;
+ return (__force __sum16)((~sum - ror32(sum, 16)) >> 16);
}
#endif
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index f17f14f84d09..f27d66fdc00a 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -12,7 +12,7 @@ extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
* long parameter, supporting various types of architectures.
*/
-static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
+static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
unsigned long old, unsigned long new, int size)
{
unsigned long flags, prev;
@@ -26,16 +26,16 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
raw_local_irq_save(flags);
switch (size) {
case 1: prev = *(u8 *)ptr;
- if (prev == old)
- *(u8 *)ptr = (u8)new;
+ if (prev == (old & 0xffu))
+ *(u8 *)ptr = (new & 0xffu);
break;
case 2: prev = *(u16 *)ptr;
- if (prev == old)
- *(u16 *)ptr = (u16)new;
+ if (prev == (old & 0xffffu))
+ *(u16 *)ptr = (new & 0xffffu);
break;
case 4: prev = *(u32 *)ptr;
- if (prev == old)
- *(u32 *)ptr = (u32)new;
+ if (prev == (old & 0xffffffffu))
+ *(u32 *)ptr = (new & 0xffffffffu);
break;
case 8: prev = *(u64 *)ptr;
if (prev == old)
@@ -51,7 +51,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
/*
* Generic version of __cmpxchg64_local. Takes an u64 parameter.
*/
-static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
+static inline u64 __generic_cmpxchg64_local(volatile void *ptr,
u64 old, u64 new)
{
u64 prev;
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 9a24510cd8c1..848de25fc4bf 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -14,16 +14,14 @@
#include <linux/types.h>
#include <linux/irqflags.h>
-#ifndef xchg
-
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
-extern void __xchg_called_with_bad_pointer(void);
+extern void __generic_xchg_called_with_bad_pointer(void);
static inline
-unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, flags;
@@ -34,7 +32,7 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#else
local_irq_save(flags);
ret = *(volatile u8 *)ptr;
- *(volatile u8 *)ptr = x;
+ *(volatile u8 *)ptr = (x & 0xffu);
local_irq_restore(flags);
return ret;
#endif /* __xchg_u8 */
@@ -45,7 +43,7 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#else
local_irq_save(flags);
ret = *(volatile u16 *)ptr;
- *(volatile u16 *)ptr = x;
+ *(volatile u16 *)ptr = (x & 0xffffu);
local_irq_restore(flags);
return ret;
#endif /* __xchg_u16 */
@@ -56,7 +54,7 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#else
local_irq_save(flags);
ret = *(volatile u32 *)ptr;
- *(volatile u32 *)ptr = x;
+ *(volatile u32 *)ptr = (x & 0xffffffffu);
local_irq_restore(flags);
return ret;
#endif /* __xchg_u32 */
@@ -75,35 +73,43 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#endif /* CONFIG_64BIT */
default:
- __xchg_called_with_bad_pointer();
+ __generic_xchg_called_with_bad_pointer();
return x;
}
}
-#define xchg(ptr, x) ({ \
- ((__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+#define generic_xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})
-#endif /* xchg */
-
/*
* Atomic compare and exchange.
*/
#include <asm-generic/cmpxchg-local.h>
-#ifndef cmpxchg_local
-#define cmpxchg_local(ptr, o, n) ({ \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr)))); \
+#define generic_cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
})
+
+#define generic_cmpxchg64_local(ptr, o, n) \
+ __generic_cmpxchg64_local((ptr), (o), (n))
+
+
+#ifndef arch_xchg
+#define arch_xchg generic_xchg
+#endif
+
+#ifndef arch_cmpxchg_local
+#define arch_cmpxchg_local generic_cmpxchg_local
#endif
-#ifndef cmpxchg64_local
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#ifndef arch_cmpxchg64_local
+#define arch_cmpxchg64_local generic_cmpxchg64_local
#endif
-#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg arch_cmpxchg_local
+#define arch_cmpxchg64 arch_cmpxchg64_local
#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index a86f65bffab8..8392caea398f 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -2,6 +2,30 @@
#ifndef __ASM_GENERIC_COMPAT_H
#define __ASM_GENERIC_COMPAT_H
+#ifndef COMPAT_USER_HZ
+#define COMPAT_USER_HZ 100
+#endif
+
+#ifndef COMPAT_RLIM_INFINITY
+#define COMPAT_RLIM_INFINITY 0xffffffff
+#endif
+
+#ifndef COMPAT_OFF_T_MAX
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#endif
+
+#ifndef compat_arg_u64
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi
+#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi
+#else
+#define compat_arg_u64(name) u32 name##_hi, u32 name##_lo
+#define compat_arg_u64_dual(name) u32, name##_hi, u32, name##_lo
+#endif
+#define compat_arg_u64_glue(name) (((u64)name##_lo & 0xffffffffUL) | \
+ ((u64)name##_hi << 32))
+#endif /* compat_arg_u64 */
+
/* These types are common across all compat ABIs */
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -20,6 +44,125 @@ typedef u16 compat_ushort_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u32 compat_uptr_t;
+typedef u32 compat_caddr_t;
typedef u32 compat_aio_context_t;
+typedef u32 compat_old_sigset_t;
+
+#ifndef __compat_uid_t
+typedef u32 __compat_uid_t;
+typedef u32 __compat_gid_t;
+#endif
+
+#ifndef __compat_uid32_t
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+#endif
+
+#ifndef compat_mode_t
+typedef u32 compat_mode_t;
+#endif
+
+#ifdef CONFIG_COMPAT_FOR_U64_ALIGNMENT
+typedef s64 __attribute__((aligned(4))) compat_s64;
+typedef u64 __attribute__((aligned(4))) compat_u64;
+#else
+typedef s64 compat_s64;
+typedef u64 compat_u64;
+#endif
+
+#ifndef _COMPAT_NSIG
+typedef u32 compat_sigset_word;
+#define _COMPAT_NSIG _NSIG
+#define _COMPAT_NSIG_BPW 32
+#endif
+
+#ifndef compat_dev_t
+typedef u32 compat_dev_t;
+#endif
+
+#ifndef compat_ipc_pid_t
+typedef s32 compat_ipc_pid_t;
+#endif
+
+#ifndef compat_fsid_t
+typedef __kernel_fsid_t compat_fsid_t;
+#endif
+
+#ifndef compat_statfs
+struct compat_statfs {
+ compat_int_t f_type;
+ compat_int_t f_bsize;
+ compat_int_t f_blocks;
+ compat_int_t f_bfree;
+ compat_int_t f_bavail;
+ compat_int_t f_files;
+ compat_int_t f_ffree;
+ compat_fsid_t f_fsid;
+ compat_int_t f_namelen;
+ compat_int_t f_frsize;
+ compat_int_t f_flags;
+ compat_int_t f_spare[4];
+};
+#endif
+
+#ifndef compat_ipc64_perm
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ compat_mode_t mode;
+ unsigned char __pad1[4 - sizeof(compat_mode_t)];
+ compat_ushort_t seq;
+ compat_ushort_t __pad2;
+ compat_ulong_t unused1;
+ compat_ulong_t unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_ulong_t sem_otime;
+ compat_ulong_t sem_otime_high;
+ compat_ulong_t sem_ctime;
+ compat_ulong_t sem_ctime_high;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ compat_ulong_t msg_stime;
+ compat_ulong_t msg_stime_high;
+ compat_ulong_t msg_rtime;
+ compat_ulong_t msg_rtime_high;
+ compat_ulong_t msg_ctime;
+ compat_ulong_t msg_ctime_high;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_ulong_t shm_atime;
+ compat_ulong_t shm_atime_high;
+ compat_ulong_t shm_dtime;
+ compat_ulong_t shm_dtime_high;
+ compat_ulong_t shm_ctime;
+ compat_ulong_t shm_ctime_high;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+#endif
#endif
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
index 3a2e224b9fa0..9c2aeecbd05a 100644
--- a/include/asm-generic/current.h
+++ b/include/asm-generic/current.h
@@ -2,9 +2,11 @@
#ifndef __ASM_GENERIC_CURRENT_H
#define __ASM_GENERIC_CURRENT_H
+#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
#define get_current() (current_thread_info()->task)
#define current get_current()
+#endif
#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index a3b98c86f077..13f5aa68a455 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -8,12 +8,14 @@
* Optimization for constant divisors on 32-bit machines:
* Copyright (C) 2006-2015 Nicolas Pitre
*
- * The semantics of do_div() are:
+ * The semantics of do_div() is, in C++ notation, observing that the name
+ * is a function-like macro and the n parameter has the semantics of a C++
+ * reference:
*
- * uint32_t do_div(uint64_t *n, uint32_t base)
+ * uint32_t do_div(uint64_t &n, uint32_t base)
* {
- * uint32_t remainder = *n % base;
- * *n = *n / base;
+ * uint32_t remainder = n % base;
+ * n = n / base;
* return remainder;
* }
*
@@ -55,17 +57,11 @@
/*
* If the divisor happens to be constant, we determine the appropriate
* inverse at compile time to turn the division into a few inline
- * multiplications which ought to be much faster. And yet only if compiling
- * with a sufficiently recent gcc version to perform proper 64-bit constant
- * propagation.
+ * multiplications which ought to be much faster.
*
* (It is unfortunate that gcc doesn't perform all this internally.)
*/
-#ifndef __div64_const32_is_OK
-#define __div64_const32_is_OK (__GNUC__ >= 4)
-#endif
-
#define __div64_const32(n, ___b) \
({ \
/* \
@@ -228,8 +224,7 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
is_power_of_2(__base)) { \
__rem = (n) & (__base - 1); \
(n) >>= ilog2(__base); \
- } else if (__div64_const32_is_OK && \
- __builtin_constant_p(__base) && \
+ } else if (__builtin_constant_p(__base) && \
__base != 0) { \
uint32_t __res_lo, __n_lo = (n); \
(n) = __div64_const32(n, __base); \
@@ -239,8 +234,9 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
} else if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
- } else \
+ } else { \
__rem = __div64_32(&(n), __base); \
+ } \
__rem; \
})
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
deleted file mode 100644
index f24b0f9a4f05..000000000000
--- a/include/asm-generic/dma-contiguous.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H
-#define _ASM_GENERIC_DMA_CONTIGUOUS_H
-
-#include <linux/types.h>
-
-static inline void
-dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
-
-#endif
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index c13f46109e88..46a0016efd81 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -2,7 +2,7 @@
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+static inline const struct dma_map_ops *get_arch_dma_ops(void)
{
return NULL;
}
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index 9def22e6e2b3..9d0479f50f97 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -19,12 +19,6 @@ extern void *early_memremap_prot(resource_size_t phys_addr,
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
-/*
- * Weak function called by early_ioremap_reset(). It does nothing, but
- * architectures may provide their own version to do any needed cleanups.
- */
-extern void early_ioremap_shutdown(void);
-
#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
/* Arch-specific initialization */
extern void early_ioremap_init(void);
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index 80ca61058dd2..b05253f68eaa 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -4,7 +4,6 @@
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
enum {
- EI_ETYPE_NONE, /* Dummy value for undefined case */
EI_ETYPE_NULL, /* Return NULL if failure */
EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
@@ -20,16 +19,18 @@ struct pt_regs;
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
/*
- * Whitelist ganerating macro. Specify functions which can be
- * error-injectable using this macro.
+ * Whitelist generating macro. Specify functions which can be error-injectable
+ * using this macro. If you unsure what is required for the error-injectable
+ * functions, please read Documentation/fault-injection/fault-injection.rst
+ * 'Error Injectable Functions' section.
*/
#define ALLOW_ERROR_INJECTION(fname, _etype) \
static struct error_injection_entry __used \
- __attribute__((__section__("_error_injection_whitelist"))) \
+ __section("_error_injection_whitelist") \
_eil_addr_##fname = { \
.addr = (unsigned long)fname, \
.etype = EI_ETYPE_##_etype, \
- };
+ }
void override_function_with_return(struct pt_regs *regs);
#else
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 365345f9a9e3..570cd4da7210 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -2,93 +2,10 @@
#ifndef __ASM_GENERIC_EXPORT_H
#define __ASM_GENERIC_EXPORT_H
-#ifndef KSYM_FUNC
-#define KSYM_FUNC(x) x
-#endif
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define KSYM_ALIGN 4
-#elif defined(CONFIG_64BIT)
-#define KSYM_ALIGN 8
-#else
-#define KSYM_ALIGN 4
-#endif
-#ifndef KCRC_ALIGN
-#define KCRC_ALIGN 4
-#endif
-
-.macro __put, val, name
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
- .long \val - ., \name - ., 0
-#elif defined(CONFIG_64BIT)
- .quad \val, \name, 0
-#else
- .long \val, \name, 0
-#endif
-.endm
-
/*
- * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
- * section flag requires it. Use '%progbits' instead of '@progbits' since the
- * former apparently works on all arches according to the binutils source.
+ * <asm/export.h> and <asm-generic/export.h> are deprecated.
+ * Please include <linux/export.h> directly.
*/
-
-.macro ___EXPORT_SYMBOL name,val,sec
-#ifdef CONFIG_MODULES
- .section ___ksymtab\sec+\name,"a"
- .balign KSYM_ALIGN
-__ksymtab_\name:
- __put \val, __kstrtab_\name
- .previous
- .section __ksymtab_strings,"aMS",%progbits,1
-__kstrtab_\name:
- .asciz "\name"
- .previous
-#ifdef CONFIG_MODVERSIONS
- .section ___kcrctab\sec+\name,"a"
- .balign KCRC_ALIGN
-#if defined(CONFIG_MODULE_REL_CRCS)
- .long __crc_\name - .
-#else
- .long __crc_\name
-#endif
- .weak __crc_\name
- .previous
-#endif
-#endif
-.endm
-
-#if defined(CONFIG_TRIM_UNUSED_KSYMS)
-
-#include <linux/kconfig.h>
-#include <generated/autoksyms.h>
-
-.macro __ksym_marker sym
- .section ".discard.ksym","a"
-__ksym_marker_\sym:
- .previous
-.endm
-
-#define __EXPORT_SYMBOL(sym, val, sec) \
- __ksym_marker sym; \
- __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, val, sec, conf) \
- ___cond_export_sym(sym, val, sec, conf)
-#define ___cond_export_sym(sym, val, sec, enabled) \
- __cond_export_sym_##enabled(sym, val, sec)
-#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
-#define __cond_export_sym_0(sym, val, sec) /* nothing */
-
-#else
-#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
-#endif
-
-#define EXPORT_SYMBOL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(name),)
-#define EXPORT_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl)
-#define EXPORT_DATA_SYMBOL(name) \
- __EXPORT_SYMBOL(name, name,)
-#define EXPORT_DATA_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, name,_gpl)
+#include <linux/export.h>
#endif
diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h
index f9f18101ed36..6ccabb400aa6 100644
--- a/include/asm-generic/fb.h
+++ b/include/asm-generic/fb.h
@@ -1,13 +1,135 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef __ASM_GENERIC_FB_H_
#define __ASM_GENERIC_FB_H_
-#include <linux/fb.h>
-#define fb_pgprotect(...) do {} while (0)
+/*
+ * Only include this header file from your architecture's <asm/fb.h>.
+ */
+
+#include <linux/io.h>
+#include <linux/mm_types.h>
+#include <linux/pgtable.h>
+
+struct fb_info;
+
+#ifndef pgprot_framebuffer
+#define pgprot_framebuffer pgprot_framebuffer
+static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
+ unsigned long vm_start, unsigned long vm_end,
+ unsigned long offset)
+{
+ return pgprot_writecombine(prot);
+}
+#endif
+#ifndef fb_is_primary_device
+#define fb_is_primary_device fb_is_primary_device
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
+#endif
+
+/*
+ * I/O helpers for the framebuffer. Prefer these functions over their
+ * regular counterparts. The regular I/O functions provide in-order
+ * access and swap bytes to/from little-endian ordering. Neither is
+ * required for framebuffers. Instead, the helpers read and write
+ * raw framebuffer data. Independent operations can be reordered for
+ * improved performance.
+ */
+
+#ifndef fb_readb
+static inline u8 fb_readb(const volatile void __iomem *addr)
+{
+ return __raw_readb(addr);
+}
+#define fb_readb fb_readb
+#endif
+
+#ifndef fb_readw
+static inline u16 fb_readw(const volatile void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+#define fb_readw fb_readw
+#endif
+
+#ifndef fb_readl
+static inline u32 fb_readl(const volatile void __iomem *addr)
+{
+ return __raw_readl(addr);
+}
+#define fb_readl fb_readl
+#endif
+
+#ifndef fb_readq
+#if defined(__raw_readq)
+static inline u64 fb_readq(const volatile void __iomem *addr)
+{
+ return __raw_readq(addr);
+}
+#define fb_readq fb_readq
+#endif
+#endif
+
+#ifndef fb_writeb
+static inline void fb_writeb(u8 b, volatile void __iomem *addr)
+{
+ __raw_writeb(b, addr);
+}
+#define fb_writeb fb_writeb
+#endif
+
+#ifndef fb_writew
+static inline void fb_writew(u16 b, volatile void __iomem *addr)
+{
+ __raw_writew(b, addr);
+}
+#define fb_writew fb_writew
+#endif
+
+#ifndef fb_writel
+static inline void fb_writel(u32 b, volatile void __iomem *addr)
+{
+ __raw_writel(b, addr);
+}
+#define fb_writel fb_writel
+#endif
+
+#ifndef fb_writeq
+#if defined(__raw_writeq)
+static inline void fb_writeq(u64 b, volatile void __iomem *addr)
+{
+ __raw_writeq(b, addr);
+}
+#define fb_writeq fb_writeq
+#endif
+#endif
+
+#ifndef fb_memcpy_fromio
+static inline void fb_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+{
+ memcpy_fromio(to, from, n);
+}
+#define fb_memcpy_fromio fb_memcpy_fromio
+#endif
+
+#ifndef fb_memcpy_toio
+static inline void fb_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+{
+ memcpy_toio(to, from, n);
+}
+#define fb_memcpy_toio fb_memcpy_toio
+#endif
+
+#ifndef fb_memset
+static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n)
+{
+ memset_io(addr, c, n);
+}
+#define fb_memset fb_memset_io
+#endif
#endif /* __ASM_GENERIC_FB_H_ */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index f4c3470480c7..2a19215baae5 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -6,15 +6,22 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
+#ifndef futex_atomic_cmpxchg_inatomic
#ifndef CONFIG_SMP
/*
* The following implementation only for uniprocessor machines.
* It relies on preempt_disable() ensuring mutual exclusion.
*
*/
+#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \
+ futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval)
+#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \
+ futex_atomic_op_inuser_local(op, oparg, oval, uaddr)
+#endif /* CONFIG_SMP */
+#endif
/**
- * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant
* argument and comparison of the previous
* futex value with another constant.
*
@@ -28,7 +35,7 @@
* -ENOSYS - Operation not supported
*/
static inline int
-arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
int oldval, ret;
u32 tmp;
@@ -75,7 +82,7 @@ out_pagefault_enable:
}
/**
- * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
+ * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the
* uaddr with newval if the current value is
* oldval.
* @uval: pointer to store content of @uaddr
@@ -87,10 +94,9 @@ out_pagefault_enable:
* 0 - On success
* -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention
- * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
u32 val;
@@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return 0;
}
-#else
-static inline int
-arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
-{
- return -ENOSYS;
-}
-
-static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_SMP */
#endif
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index e9f20b813a69..f2979e3a96b6 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -26,7 +26,7 @@
*
* The result is undefined if the size is 0.
*/
-static inline __attribute_const__ int get_order(unsigned long size)
+static __always_inline __attribute_const__ int get_order(unsigned long size)
{
if (__builtin_constant_p(size)) {
if (!size)
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
deleted file mode 100644
index aea9aee1f3e9..000000000000
--- a/include/asm-generic/gpio.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_GPIO_H
-#define _ASM_GENERIC_GPIO_H
-
-#include <linux/types.h>
-#include <linux/errno.h>
-
-#ifdef CONFIG_GPIOLIB
-
-#include <linux/compiler.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio/consumer.h>
-
-/* Platforms may implement their GPIO interface with library code,
- * at a small performance cost for non-inlined operations and some
- * extra memory (for code and for per-GPIO table entries).
- *
- * While the GPIO programming interface defines valid GPIO numbers
- * to be in the range 0..MAX_INT, this library restricts them to the
- * smaller range 0..ARCH_NR_GPIOS-1.
- *
- * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
- * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
- * actually an estimate of a board-specific value.
- */
-
-#ifndef ARCH_NR_GPIOS
-#if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0
-#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
-#else
-#define ARCH_NR_GPIOS 512
-#endif
-#endif
-
-/*
- * "valid" GPIO numbers are nonnegative and may be passed to
- * setup routines like gpio_request(). only some valid numbers
- * can successfully be requested and used.
- *
- * Invalid GPIO numbers are useful for indicating no-such-GPIO in
- * platform data and other tables.
- */
-
-static inline bool gpio_is_valid(int number)
-{
- return number >= 0 && number < ARCH_NR_GPIOS;
-}
-
-struct device;
-struct gpio;
-struct seq_file;
-struct module;
-struct device_node;
-struct gpio_desc;
-
-/* caller holds gpio_lock *OR* gpio is marked as requested */
-static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
-{
- return gpiod_to_chip(gpio_to_desc(gpio));
-}
-
-/* Always use the library code for GPIO management calls,
- * or when sleeping may be involved.
- */
-extern int gpio_request(unsigned gpio, const char *label);
-extern void gpio_free(unsigned gpio);
-
-static inline int gpio_direction_input(unsigned gpio)
-{
- return gpiod_direction_input(gpio_to_desc(gpio));
-}
-static inline int gpio_direction_output(unsigned gpio, int value)
-{
- return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
-}
-
-static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
-{
- return gpiod_set_debounce(gpio_to_desc(gpio), debounce);
-}
-
-static inline int gpio_get_value_cansleep(unsigned gpio)
-{
- return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
-}
-static inline void gpio_set_value_cansleep(unsigned gpio, int value)
-{
- return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
-}
-
-
-/* A platform's <asm/gpio.h> code may want to inline the I/O calls when
- * the GPIO is constant and refers to some always-present controller,
- * giving direct access to chip registers and tight bitbanging loops.
- */
-static inline int __gpio_get_value(unsigned gpio)
-{
- return gpiod_get_raw_value(gpio_to_desc(gpio));
-}
-static inline void __gpio_set_value(unsigned gpio, int value)
-{
- return gpiod_set_raw_value(gpio_to_desc(gpio), value);
-}
-
-static inline int __gpio_cansleep(unsigned gpio)
-{
- return gpiod_cansleep(gpio_to_desc(gpio));
-}
-
-static inline int __gpio_to_irq(unsigned gpio)
-{
- return gpiod_to_irq(gpio_to_desc(gpio));
-}
-
-extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-extern int gpio_request_array(const struct gpio *array, size_t num);
-extern void gpio_free_array(const struct gpio *array, size_t num);
-
-/*
- * A sysfs interface can be exported by individual drivers if they want,
- * but more typically is configured entirely from userspace.
- */
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
- return gpiod_export(gpio_to_desc(gpio), direction_may_change);
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio)
-{
- return gpiod_export_link(dev, name, gpio_to_desc(gpio));
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
- gpiod_unexport(gpio_to_desc(gpio));
-}
-
-#else /* !CONFIG_GPIOLIB */
-
-#include <linux/kernel.h>
-
-static inline bool gpio_is_valid(int number)
-{
- /* only non-negative numbers are valid */
- return number >= 0;
-}
-
-/* platforms that don't directly support access to GPIOs through I2C, SPI,
- * or other blocking infrastructure can use these wrappers.
- */
-
-static inline int gpio_cansleep(unsigned gpio)
-{
- return 0;
-}
-
-static inline int gpio_get_value_cansleep(unsigned gpio)
-{
- might_sleep();
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value_cansleep(unsigned gpio, int value)
-{
- might_sleep();
- __gpio_set_value(gpio, value);
-}
-
-#endif /* !CONFIG_GPIOLIB */
-
-#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index d14214dfc10b..7317e8258b48 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -7,9 +7,13 @@
typedef struct {
unsigned int __softirq_pending;
+#ifdef ARCH_WANTS_NMI_IRQSTAT
+ unsigned int __nmi_count;
+#endif
} ____cacheline_aligned irq_cpustat_t;
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
+
#include <linux/irq.h>
#ifndef ack_bad_irq
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 8e1e6244a89d..6dcf4d576970 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -2,6 +2,9 @@
#ifndef _ASM_GENERIC_HUGETLB_H
#define _ASM_GENERIC_HUGETLB_H
+#include <linux/swap.h>
+#include <linux/swapops.h>
+
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
{
return mk_pte(page, pgprot);
@@ -19,9 +22,16 @@ static inline unsigned long huge_pte_dirty(pte_t pte)
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
- return pte_mkwrite(pte);
+ return pte_mkwrite_novma(pte);
}
+#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+#endif
+
static inline pte_t huge_pte_mkdirty(pte_t pte)
{
return pte_mkdirty(pte);
@@ -32,6 +42,21 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
+static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
+{
+ return huge_pte_wrprotect(pte_mkuffd_wp(pte));
+}
+
+static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
+{
+ return pte_clear_uffd_wp(pte);
+}
+
+static inline int huge_pte_uffd_wp(pte_t pte)
+{
+ return pte_uffd_wp(pte);
+}
+
#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
@@ -51,7 +76,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
set_pte_at(mm, addr, ptep, pte);
}
@@ -66,10 +91,10 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- ptep_clear_flush(vma, addr, ptep);
+ return ptep_clear_flush(vma, addr, ptep);
}
#endif
@@ -80,12 +105,11 @@ static inline int huge_pte_none(pte_t pte)
}
#endif
-#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
-static inline pte_t huge_pte_wrprotect(pte_t pte)
+/* Please refer to comments above pte_none_mostly() for the usage */
+static inline int huge_pte_none_mostly(pte_t pte)
{
- return pte_wrprotect(pte);
+ return huge_pte_none(pte) || is_pte_marker(pte);
}
-#endif
#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index e73a11850055..87e3d49a4e29 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -88,8 +88,9 @@
#define HV_CONNECT_PORT BIT(7)
#define HV_ACCESS_STATS BIT(8)
#define HV_DEBUGGING BIT(11)
-#define HV_CPU_POWER_MANAGEMENT BIT(12)
-
+#define HV_CPU_MANAGEMENT BIT(12)
+#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
+#define HV_ISOLATION BIT(22)
/*
* TSC page layout.
@@ -101,6 +102,15 @@ struct ms_hyperv_tsc_page {
volatile s64 tsc_offset;
} __packed;
+union hv_reference_tsc_msr {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:11;
+ u64 pfn:52;
+ } __packed;
+};
+
/*
* The guest OS needs to register the guest ID with the hypervisor.
* The guest ID is a 64 bit entity and the structure of this ID is
@@ -136,11 +146,15 @@ struct ms_hyperv_tsc_page {
/* Declare the various hypercall operations. */
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
+#define HVCALL_ENABLE_VP_VTL 0x000f
#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
#define HVCALL_SEND_IPI 0x000b
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
#define HVCALL_SEND_IPI_EX 0x0015
+#define HVCALL_GET_PARTITION_ID 0x0046
+#define HVCALL_DEPOSIT_MEMORY 0x0048
+#define HVCALL_CREATE_VP 0x004e
#define HVCALL_GET_VP_REGISTERS 0x0050
#define HVCALL_SET_VP_REGISTERS 0x0051
#define HVCALL_POST_MESSAGE 0x005c
@@ -148,15 +162,30 @@ struct ms_hyperv_tsc_page {
#define HVCALL_POST_DEBUG_DATA 0x0069
#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
#define HVCALL_RESET_DEBUG_SESSION 0x006b
+#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
+#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
+#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
#define HVCALL_RETARGET_INTERRUPT 0x007e
+#define HVCALL_START_VP 0x0099
+#define HVCALL_GET_VP_ID_FROM_APIC_ID 0x009a
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
+#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
+#define HVCALL_MMIO_READ 0x0106
+#define HVCALL_MMIO_WRITE 0x0107
+
+/* Extended hypercalls */
+#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
+#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
#define HV_FLUSH_ALL_PROCESSORS BIT(0)
#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+/* Extended capability bits */
+#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
+
enum HV_GENERIC_SET_FORMAT {
HV_GENERIC_SET_SPARSE_4K,
HV_GENERIC_SET_ALL,
@@ -168,11 +197,19 @@ enum HV_GENERIC_SET_FORMAT {
#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
#define HV_HYPERCALL_FAST_BIT BIT(16)
#define HV_HYPERCALL_VARHEAD_OFFSET 17
+#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17)
+#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27)
+#define HV_HYPERCALL_NESTED BIT_ULL(31)
#define HV_HYPERCALL_REP_COMP_OFFSET 32
#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
+#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44)
#define HV_HYPERCALL_REP_START_OFFSET 48
#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
+#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60)
+#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \
+ HV_HYPERCALL_RSVD1_MASK | \
+ HV_HYPERCALL_RSVD2_MASK)
/* hypercall status code */
#define HV_STATUS_SUCCESS 0
@@ -180,11 +217,14 @@ enum HV_GENERIC_SET_FORMAT {
#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
#define HV_STATUS_INVALID_ALIGNMENT 4
#define HV_STATUS_INVALID_PARAMETER 5
+#define HV_STATUS_ACCESS_DENIED 6
#define HV_STATUS_OPERATION_DENIED 8
#define HV_STATUS_INSUFFICIENT_MEMORY 11
#define HV_STATUS_INVALID_PORT_ID 17
#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+#define HV_STATUS_TIME_OUT 120
+#define HV_STATUS_VTL_ALREADY_ENABLED 134
/*
* The Hyper-V TimeRefCount register and the TSC
@@ -213,6 +253,41 @@ enum HV_GENERIC_SET_FORMAT {
#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
+/*
+ * Define hypervisor message types. Some of the message types
+ * are x86/x64 specific, but there's no good way to separate
+ * them out into the arch-specific version of hyperv-tlfs.h
+ * because C doesn't provide a way to extend enum types.
+ * Keeping them all in the arch neutral hyperv-tlfs.h seems
+ * the least messy compromise.
+ */
+enum hv_message_type {
+ HVMSG_NONE = 0x00000000,
+
+ /* Memory access messages. */
+ HVMSG_UNMAPPED_GPA = 0x80000000,
+ HVMSG_GPA_INTERCEPT = 0x80000001,
+
+ /* Timer notification messages. */
+ HVMSG_TIMER_EXPIRED = 0x80000010,
+
+ /* Error messages. */
+ HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+ HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
+ HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
+
+ /* Trace buffer complete messages. */
+ HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
+
+ /* Platform-specific processor intercept messages. */
+ HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
+ HVMSG_X64_MSR_INTERCEPT = 0x80010001,
+ HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
+ HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
+ HVMSG_X64_APIC_EOI = 0x80010004,
+ HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
+};
+
/* Define synthetic interrupt controller message flags. */
union hv_message_flags {
__u8 asu8;
@@ -341,6 +416,11 @@ struct hv_vpset {
u64 bank_contents[];
} __packed;
+/* The maximum number of sparse vCPU banks which can be encoded by 'struct hv_vpset' */
+#define HV_MAX_SPARSE_VCPU_BANKS (64)
+/* The number of vCPUs in one sparse bank */
+#define HV_VCPUS_PER_SPARSE_BANK (64)
+
/* HvCallSendSyntheticClusterIpi hypercall */
struct hv_send_ipi {
u32 vector;
@@ -366,8 +446,10 @@ struct hv_guest_mapping_flush {
* by the bitwidth of "additional_pages" in union hv_gpa_page_range.
*/
#define HV_MAX_FLUSH_PAGES (2048)
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
-/* HvFlushGuestPhysicalAddressList hypercall */
+/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
union hv_gpa_page_range {
u64 address_space;
struct {
@@ -375,6 +457,12 @@ union hv_gpa_page_range {
u64 largepage:1;
u64 basepfn:52;
} page;
+ struct {
+ u64 reserved:12;
+ u64 page_size:1;
+ u64 reserved1:8;
+ u64 base_large_pfn:43;
+ };
};
/*
@@ -407,19 +495,111 @@ struct hv_tlb_flush_ex {
u64 gva_list[];
} __packed;
-/* HvRetargetDeviceInterrupt hypercall */
-union hv_msi_entry {
- u64 as_uint64;
+/* HvGetPartitionId hypercall (output only) */
+struct hv_get_partition_id {
+ u64 partition_id;
+} __packed;
+
+/* HvDepositMemory hypercall */
+struct hv_deposit_memory {
+ u64 partition_id;
+ u64 gpa_page_list[];
+} __packed;
+
+struct hv_proximity_domain_flags {
+ u32 proximity_preferred : 1;
+ u32 reserved : 30;
+ u32 proximity_info_valid : 1;
+} __packed;
+
+/* Not a union in windows but useful for zeroing */
+union hv_proximity_domain_info {
struct {
- u32 address;
- u32 data;
- } __packed;
+ u32 domain_id;
+ struct hv_proximity_domain_flags flags;
+ };
+ u64 as_uint64;
+} __packed;
+
+struct hv_lp_startup_status {
+ u64 hv_status;
+ u64 substatus1;
+ u64 substatus2;
+ u64 substatus3;
+ u64 substatus4;
+ u64 substatus5;
+ u64 substatus6;
+} __packed;
+
+/* HvAddLogicalProcessor hypercall */
+struct hv_add_logical_processor_in {
+ u32 lp_index;
+ u32 apic_id;
+ union hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+struct hv_add_logical_processor_out {
+ struct hv_lp_startup_status startup_status;
+} __packed;
+
+enum HV_SUBNODE_TYPE
+{
+ HvSubnodeAny = 0,
+ HvSubnodeSocket = 1,
+ HvSubnodeAmdNode = 2,
+ HvSubnodeL3 = 3,
+ HvSubnodeCount = 4,
+ HvSubnodeInvalid = -1
+};
+
+/* HvCreateVp hypercall */
+struct hv_create_vp {
+ u64 partition_id;
+ u32 vp_index;
+ u8 padding[3];
+ u8 subnode_type;
+ u64 subnode_id;
+ union hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+enum hv_interrupt_source {
+ HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
+ HV_INTERRUPT_SOURCE_IOAPIC,
};
+union hv_ioapic_rte {
+ u64 as_uint64;
+
+ struct {
+ u32 vector:8;
+ u32 delivery_mode:3;
+ u32 destination_mode:1;
+ u32 delivery_status:1;
+ u32 interrupt_polarity:1;
+ u32 remote_irr:1;
+ u32 trigger_mode:1;
+ u32 interrupt_mask:1;
+ u32 reserved1:15;
+
+ u32 reserved2:24;
+ u32 destination_id:8;
+ };
+
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ };
+} __packed;
+
struct hv_interrupt_entry {
- u32 source; /* 1 for MSI(-X) */
+ u32 source;
u32 reserved1;
- union hv_msi_entry msi_entry;
+ union {
+ union hv_msi_entry msi_entry;
+ union hv_ioapic_rte ioapic_rte;
+ };
} __packed;
/*
@@ -445,6 +625,37 @@ struct hv_retarget_device_interrupt {
struct hv_device_interrupt_target int_target;
} __packed __aligned(8);
+/*
+ * These Hyper-V registers provide information equivalent to the CPUID
+ * instruction on x86/x64.
+ */
+#define HV_REGISTER_HYPERVISOR_VERSION 0x00000100 /*CPUID 0x40000002 */
+#define HV_REGISTER_FEATURES 0x00000200 /*CPUID 0x40000003 */
+#define HV_REGISTER_ENLIGHTENMENTS 0x00000201 /*CPUID 0x40000004 */
+
+/*
+ * Synthetic register definitions equivalent to MSRs on x86/x64
+ */
+#define HV_REGISTER_GUEST_CRASH_P0 0x00000210
+#define HV_REGISTER_GUEST_CRASH_P1 0x00000211
+#define HV_REGISTER_GUEST_CRASH_P2 0x00000212
+#define HV_REGISTER_GUEST_CRASH_P3 0x00000213
+#define HV_REGISTER_GUEST_CRASH_P4 0x00000214
+#define HV_REGISTER_GUEST_CRASH_CTL 0x00000215
+
+#define HV_REGISTER_GUEST_OS_ID 0x00090002
+#define HV_REGISTER_VP_INDEX 0x00090003
+#define HV_REGISTER_TIME_REF_COUNT 0x00090004
+#define HV_REGISTER_REFERENCE_TSC 0x00090017
+
+#define HV_REGISTER_SINT0 0x000A0000
+#define HV_REGISTER_SCONTROL 0x000A0010
+#define HV_REGISTER_SIEFP 0x000A0012
+#define HV_REGISTER_SIMP 0x000A0013
+#define HV_REGISTER_EOM 0x000A0014
+
+#define HV_REGISTER_STIMER0_CONFIG 0x000B0000
+#define HV_REGISTER_STIMER0_COUNT 0x000B0001
/* HvGetVpRegisters hypercall input with variable size reg name list*/
struct hv_get_vp_registers_input {
@@ -460,7 +671,6 @@ struct hv_get_vp_registers_input {
} element[];
} __packed;
-
/* HvGetVpRegisters returns an array of these output elements */
struct hv_get_vp_registers_output {
union {
@@ -494,4 +704,176 @@ struct hv_set_vp_registers_input {
} element[];
} __packed;
+enum hv_device_type {
+ HV_DEVICE_TYPE_LOGICAL = 0,
+ HV_DEVICE_TYPE_PCI = 1,
+ HV_DEVICE_TYPE_IOAPIC = 2,
+ HV_DEVICE_TYPE_ACPI = 3,
+};
+
+typedef u16 hv_pci_rid;
+typedef u16 hv_pci_segment;
+typedef u64 hv_logical_device_id;
+union hv_pci_bdf {
+ u16 as_uint16;
+
+ struct {
+ u8 function:3;
+ u8 device:5;
+ u8 bus;
+ };
+} __packed;
+
+union hv_pci_bus_range {
+ u16 as_uint16;
+
+ struct {
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ };
+} __packed;
+
+union hv_device_id {
+ u64 as_uint64;
+
+ struct {
+ u64 reserved0:62;
+ u64 device_type:2;
+ };
+
+ /* HV_DEVICE_TYPE_LOGICAL */
+ struct {
+ u64 id:62;
+ u64 device_type:2;
+ } logical;
+
+ /* HV_DEVICE_TYPE_PCI */
+ struct {
+ union {
+ hv_pci_rid rid;
+ union hv_pci_bdf bdf;
+ };
+
+ hv_pci_segment segment;
+ union hv_pci_bus_range shadow_bus_range;
+
+ u16 phantom_function_bits:2;
+ u16 source_shadow:1;
+
+ u16 rsvdz0:11;
+ u16 device_type:2;
+ } pci;
+
+ /* HV_DEVICE_TYPE_IOAPIC */
+ struct {
+ u8 ioapic_id;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u16 rsvdz2;
+
+ u16 rsvdz3:14;
+ u16 device_type:2;
+ } ioapic;
+
+ /* HV_DEVICE_TYPE_ACPI */
+ struct {
+ u32 input_mapping_base;
+ u32 input_mapping_count:30;
+ u32 device_type:2;
+ } acpi;
+} __packed;
+
+enum hv_interrupt_trigger_mode {
+ HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
+ HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
+};
+
+struct hv_device_interrupt_descriptor {
+ u32 interrupt_type;
+ u32 trigger_mode;
+ u32 vector_count;
+ u32 reserved;
+ struct hv_device_interrupt_target target;
+} __packed;
+
+struct hv_input_map_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ u64 flags;
+ struct hv_interrupt_entry logical_interrupt_entry;
+ struct hv_device_interrupt_descriptor interrupt_descriptor;
+} __packed;
+
+struct hv_output_map_device_interrupt {
+ struct hv_interrupt_entry interrupt_entry;
+} __packed;
+
+struct hv_input_unmap_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ struct hv_interrupt_entry interrupt_entry;
+} __packed;
+
+#define HV_SOURCE_SHADOW_NONE 0x0
+#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
+
+/*
+ * Version info reported by hypervisor
+ */
+union hv_hypervisor_version_info {
+ struct {
+ u32 build_number;
+
+ u32 minor_version : 16;
+ u32 major_version : 16;
+
+ u32 service_pack;
+
+ u32 service_number : 24;
+ u32 service_branch : 8;
+ };
+ struct {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ };
+};
+
+/*
+ * The whole argument should fit in a page to be able to pass to the hypervisor
+ * in one hypercall.
+ */
+#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
+ sizeof(union hv_gpa_page_range))
+
+/* HvExtCallMemoryHeatHint hypercall */
+#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
+struct hv_memory_hint {
+ u64 type:2;
+ u64 reserved:62;
+ union hv_gpa_page_range ranges[];
+} __packed;
+
+/* Data structures for HVCALL_MMIO_READ and HVCALL_MMIO_WRITE */
+#define HV_HYPERCALL_MMIO_MAX_DATA_LENGTH 64
+
+struct hv_mmio_read_input {
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+} __packed;
+
+struct hv_mmio_read_output {
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
+struct hv_mmio_write_input {
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
#endif
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h
deleted file mode 100644
index 81dfa3ee5e06..000000000000
--- a/include/asm-generic/ide_iops.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Generic I/O and MEMIO string operations. */
-
-#define __ide_insw insw
-#define __ide_insl insl
-#define __ide_outsw outsw
-#define __ide_outsl outsl
-
-static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u16 *)addr = readw(port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u32 *)addr = readl(port);
- addr += 4;
- }
-}
-
-static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- writew(*(u16 *)addr, port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
-{
- while (count--) {
- writel(*(u32 *)addr, port);
- addr += 4;
- }
-}
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index dabf8cb7203b..bac63e874c7b 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -10,6 +10,7 @@
#include <asm/page.h> /* I/O is all done through memory accesses */
#include <linux/string.h> /* for memset() and memcpy() */
#include <linux/types.h>
+#include <linux/instruction_pointer.h>
#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
@@ -61,6 +62,44 @@
#define __io_par(v) __io_ar(v)
#endif
+/*
+ * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for
+ * specific kernel drivers in case of excessive/unwanted logging.
+ *
+ * Usage: Add a #define flag at the beginning of the driver file.
+ * Ex: #define __DISABLE_TRACE_MMIO__
+ * #include <...>
+ * ...
+ */
+#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
+#include <linux/tracepoint-defs.h>
+
+DECLARE_TRACEPOINT(rwmmio_write);
+DECLARE_TRACEPOINT(rwmmio_post_write);
+DECLARE_TRACEPOINT(rwmmio_read);
+DECLARE_TRACEPOINT(rwmmio_post_read);
+
+void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0);
+void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0);
+void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0);
+void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0);
+
+#else
+
+static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+
+#endif /* CONFIG_TRACE_MMIO_ACCESS */
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
@@ -149,9 +188,11 @@ static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __raw_readb(addr);
__io_ar(val);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -162,9 +203,11 @@ static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -175,9 +218,11 @@ static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -189,9 +234,11 @@ static inline u64 readq(const volatile void __iomem *addr)
{
u64 val;
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
__io_br();
- val = __le64_to_cpu(__raw_readq(addr));
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
__io_ar(val);
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -201,9 +248,11 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writeb(value, addr);
__io_aw();
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -211,9 +260,11 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -221,9 +272,11 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -232,9 +285,11 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
__io_bw();
- __raw_writeq(__cpu_to_le64(value), addr);
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
__io_aw();
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
#endif /* CONFIG_64BIT */
@@ -248,7 +303,12 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readb_relaxed readb_relaxed
static inline u8 readb_relaxed(const volatile void __iomem *addr)
{
- return __raw_readb(addr);
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ val = __raw_readb(addr);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ return val;
}
#endif
@@ -256,7 +316,12 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
#define readw_relaxed readw_relaxed
static inline u16 readw_relaxed(const volatile void __iomem *addr)
{
- return __le16_to_cpu(__raw_readw(addr));
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ return val;
}
#endif
@@ -264,7 +329,12 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
#define readl_relaxed readl_relaxed
static inline u32 readl_relaxed(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ return val;
}
#endif
@@ -272,7 +342,12 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
#define readq_relaxed readq_relaxed
static inline u64 readq_relaxed(const volatile void __iomem *addr)
{
- return __le64_to_cpu(__raw_readq(addr));
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ return val;
}
#endif
@@ -280,7 +355,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
#define writeb_relaxed writeb_relaxed
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__raw_writeb(value, addr);
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -288,7 +365,9 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
#define writew_relaxed writew_relaxed
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
{
- __raw_writew(cpu_to_le16(value), addr);
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -296,7 +375,9 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
#define writel_relaxed writel_relaxed
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
{
- __raw_writel(__cpu_to_le32(value), addr);
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -304,7 +385,9 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
#define writeq_relaxed writeq_relaxed
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
{
- __raw_writeq(__cpu_to_le64(value), addr);
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -911,18 +994,6 @@ static inline void iowrite64_rep(volatile void __iomem *addr,
#include <linux/vmalloc.h>
#define __io_virt(x) ((void __force *)(x))
-#ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-}
-#endif
-#endif /* CONFIG_GENERIC_IOMAP */
-
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
@@ -954,7 +1025,9 @@ static inline void *phys_to_virt(unsigned long address)
*
* ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
* for specific drivers if the architecture choses to implement them. If they
- * are not implemented we fall back to plain ioremap.
+ * are not implemented we fall back to plain ioremap. Conversely, ioremap_np()
+ * can provide stricter non-posted write semantics if the architecture
+ * implements them.
*/
#ifndef CONFIG_MMU
#ifndef ioremap
@@ -967,21 +1040,29 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
#ifndef iounmap
#define iounmap iounmap
-static inline void iounmap(void __iomem *addr)
+static inline void iounmap(volatile void __iomem *addr)
{
}
#endif
#elif defined(CONFIG_GENERIC_IOREMAP)
#include <linux/pgtable.h>
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
+void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
+ pgprot_t prot);
+
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot);
void iounmap(volatile void __iomem *addr);
+void generic_iounmap(volatile void __iomem *addr);
+#ifndef ioremap
+#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
{
/* _PAGE_IOREMAP needs to be supplied by the architecture */
return ioremap_prot(addr, size, _PAGE_IOREMAP);
}
+#endif
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
#ifndef ioremap_wc
@@ -1007,6 +1088,23 @@ static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
}
#endif
+/*
+ * ioremap_np needs an explicit architecture implementation, as it
+ * requests stronger semantics than regular ioremap(). Portable drivers
+ * should instead use one of the higher-level abstractions, like
+ * devm_ioremap_resource(), to choose the correct variant for any given
+ * device and bus. Portable drivers with a good reason to want non-posted
+ * write semantics should always provide an ioremap() fallback in case
+ * ioremap_np() is not available.
+ */
+#ifndef ioremap_np
+#define ioremap_np ioremap_np
+static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
#ifndef ioport_map
@@ -1016,6 +1114,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
port &= IO_SPACE_LIMIT;
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
#endif
#ifndef ioport_unmap
@@ -1030,15 +1129,10 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */
#endif /* CONFIG_HAS_IOPORT_MAP */
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#ifndef xlate_dev_kmem_ptr
-#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
-static inline void *xlate_dev_kmem_ptr(void *addr)
-{
- return addr;
-}
+#ifndef CONFIG_GENERIC_IOMAP
+#ifndef pci_iounmap
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
#endif
#ifndef xlate_dev_mem_ptr
@@ -1056,20 +1150,6 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
}
#endif
-#ifdef CONFIG_VIRT_TO_BUS
-#ifndef virt_to_bus
-static inline unsigned long virt_to_bus(void *address)
-{
- return (unsigned long)address;
-}
-
-static inline void *bus_to_virt(unsigned long address)
-{
- return (void *)address;
-}
-#endif
-#endif
-
#ifndef memset_io
#define memset_io memset_io
/**
@@ -1122,6 +1202,8 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
}
#endif
+extern int devmem_is_allowed(unsigned long pfn);
+
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 649224664969..196087a8126e 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -93,22 +93,21 @@ extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *);
#endif
-#ifndef ARCH_HAS_IOREMAP_WC
+#ifndef ioremap_wc
#define ioremap_wc ioremap
#endif
-#ifndef ARCH_HAS_IOREMAP_WT
+#ifndef ioremap_wt
#define ioremap_wt ioremap
#endif
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
+#ifndef ioremap_np
+/* See the comment in asm-generic/io.h about ioremap_np(). */
+#define ioremap_np ioremap_np
+static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
#endif
#include <asm-generic/pci_iomap.h>
diff --git a/include/asm-generic/kmap_size.h b/include/asm-generic/kmap_size.h
new file mode 100644
index 000000000000..6e36b2443ece
--- /dev/null
+++ b/include/asm-generic/kmap_size.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KMAP_SIZE_H
+#define _ASM_GENERIC_KMAP_SIZE_H
+
+/* For debug this provides guard pages between the maps */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL
+# define KM_MAX_IDX 33
+#else
+# define KM_MAX_IDX 16
+#endif
+
+#endif
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
deleted file mode 100644
index 9f95b7b63d19..000000000000
--- a/include/asm-generic/kmap_types.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_KMAP_TYPES_H
-#define _ASM_GENERIC_KMAP_TYPES_H
-
-#ifdef __WITH_KM_FENCE
-# define KM_TYPE_NR 41
-#else
-# define KM_TYPE_NR 20
-#endif
-
-#endif
diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h
index 4a982089c95c..060eab094e5a 100644
--- a/include/asm-generic/kprobes.h
+++ b/include/asm-generic/kprobes.h
@@ -10,11 +10,11 @@
*/
# define __NOKPROBE_SYMBOL(fname) \
static unsigned long __used \
- __attribute__((__section__("_kprobe_blacklist"))) \
+ __section("_kprobe_blacklist") \
_kbl_addr_##fname = (unsigned long)fname;
# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
/* Use this to forbid a kprobes attach on very low level functions */
-# define __kprobes __attribute__((__section__(".kprobes.text")))
+# define __kprobes __section(".kprobes.text")
# define nokprobe_inline __always_inline
#else
# define NOKPROBE_SYMBOL(fname)
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index fca7f1d84818..7f97018df66f 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -42,6 +42,7 @@ typedef struct
#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+#define local_try_cmpxchg(l, po, n) atomic_long_try_cmpxchg((&(l)->a), (po), (n))
#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h
index 765be0b7d883..14963a7a6253 100644
--- a/include/asm-generic/local64.h
+++ b/include/asm-generic/local64.h
@@ -42,7 +42,16 @@ typedef struct {
#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
#define local64_inc_return(l) local_inc_return(&(l)->a)
-#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
+static inline s64 local64_cmpxchg(local64_t *l, s64 old, s64 new)
+{
+ return local_cmpxchg(&l->a, old, new);
+}
+
+static inline bool local64_try_cmpxchg(local64_t *l, s64 *old, s64 new)
+{
+ return local_try_cmpxchg(&l->a, (long *)old, new);
+}
+
#define local64_xchg(l, n) local_xchg((&(l)->a), (n))
#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
@@ -81,6 +90,7 @@ typedef struct {
#define local64_inc_return(l) atomic64_inc_return(&(l)->a)
#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
+#define local64_try_cmpxchg(l, po, n) atomic64_try_cmpxchg((&(l)->a), (po), (n))
#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h
new file mode 100644
index 000000000000..8a59b6e567df
--- /dev/null
+++ b/include/asm-generic/logic_io.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef _LOGIC_IO_H
+#define _LOGIC_IO_H
+#include <linux/types.h>
+
+/* include this file into asm/io.h */
+
+#ifdef CONFIG_INDIRECT_IOMEM
+
+#ifdef CONFIG_INDIRECT_IOMEM_FALLBACK
+/*
+ * If you want emulated IO memory to fall back to 'normal' IO memory
+ * if a region wasn't registered as emulated, then you need to have
+ * all of the real_* functions implemented.
+ */
+#if !defined(real_ioremap) || !defined(real_iounmap) || \
+ !defined(real_raw_readb) || !defined(real_raw_writeb) || \
+ !defined(real_raw_readw) || !defined(real_raw_writew) || \
+ !defined(real_raw_readl) || !defined(real_raw_writel) || \
+ (defined(CONFIG_64BIT) && \
+ (!defined(real_raw_readq) || !defined(real_raw_writeq))) || \
+ !defined(real_memset_io) || \
+ !defined(real_memcpy_fromio) || \
+ !defined(real_memcpy_toio)
+#error "Must provide fallbacks for real IO memory access"
+#endif /* defined ... */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+#define ioremap ioremap
+void __iomem *ioremap(phys_addr_t offset, size_t size);
+
+#define iounmap iounmap
+void iounmap(void volatile __iomem *addr);
+
+#define __raw_readb __raw_readb
+u8 __raw_readb(const volatile void __iomem *addr);
+
+#define __raw_readw __raw_readw
+u16 __raw_readw(const volatile void __iomem *addr);
+
+#define __raw_readl __raw_readl
+u32 __raw_readl(const volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+u64 __raw_readq(const volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define __raw_writeb __raw_writeb
+void __raw_writeb(u8 value, volatile void __iomem *addr);
+
+#define __raw_writew __raw_writew
+void __raw_writew(u16 value, volatile void __iomem *addr);
+
+#define __raw_writel __raw_writel
+void __raw_writel(u32 value, volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+void __raw_writeq(u64 value, volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define memset_io memset_io
+void memset_io(volatile void __iomem *addr, int value, size_t size);
+
+#define memcpy_fromio memcpy_fromio
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size);
+
+#define memcpy_toio memcpy_toio
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size);
+
+#endif /* CONFIG_INDIRECT_IOMEM */
+#endif /* _LOGIC_IO_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 7637fb46ba4f..6796abe1900e 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -6,47 +6,30 @@
#ifndef __ASSEMBLY__
+/*
+ * supports 3 memory models.
+ */
#if defined(CONFIG_FLATMEM)
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (0UL)
#endif
-#elif defined(CONFIG_DISCONTIGMEM)
-
-#ifndef arch_pfn_to_nid
-#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
-#endif
-
-#ifndef arch_local_page_offset
-#define arch_local_page_offset(pfn, nid) \
- ((pfn) - NODE_DATA(nid)->node_start_pfn)
-#endif
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-/*
- * supports 3 memory models.
- */
-#if defined(CONFIG_FLATMEM)
-
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
-#elif defined(CONFIG_DISCONTIGMEM)
-#define __pfn_to_page(pfn) \
-({ unsigned long __pfn = (pfn); \
- unsigned long __nid = arch_pfn_to_nid(__pfn); \
- NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
-})
+#ifndef pfn_valid
+static inline int pfn_valid(unsigned long pfn)
+{
+ /* avoid <linux/mm.h> include hell */
+ extern unsigned long max_mapnr;
+ unsigned long pfn_offset = ARCH_PFN_OFFSET;
-#define __page_to_pfn(pg) \
-({ const struct page *__pg = (pg); \
- struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
- (unsigned long)(__pg - __pgdat->node_mem_map) + \
- __pgdat->node_start_pfn; \
-})
+ return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
+}
+#define pfn_valid pfn_valid
+#endif
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -70,7 +53,7 @@
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
-#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+#endif /* CONFIG_FLATMEM/SPARSEMEM */
/*
* Convert a physical address to a Page Frame Number and back
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
deleted file mode 100644
index 5ff0e5193f85..000000000000
--- a/include/asm-generic/mm-arch-hooks.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Architecture specific mm hooks
- */
-
-#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
-#define _ASM_GENERIC_MM_ARCH_HOOKS_H
-
-/*
- * This file should be included through arch/../include/asm/Kbuild for
- * the architecture which doesn't need specific mm hooks.
- *
- * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
- * are used.
- */
-
-#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h
index 6be9106fb6fb..91727065bacb 100644
--- a/include/asm-generic/mmu_context.h
+++ b/include/asm-generic/mmu_context.h
@@ -3,44 +3,74 @@
#define __ASM_GENERIC_MMU_CONTEXT_H
/*
- * Generic hooks for NOMMU architectures, which do not need to do
- * anything special here.
+ * Generic hooks to implement no-op functionality.
*/
-#include <asm-generic/mm_hooks.h>
-
struct task_struct;
struct mm_struct;
+/*
+ * enter_lazy_tlb - Called when "tsk" is about to enter lazy TLB mode.
+ *
+ * @mm: the currently active mm context which is becoming lazy
+ * @tsk: task which is entering lazy tlb
+ *
+ * tsk->mm will be NULL
+ */
+#ifndef enter_lazy_tlb
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
+#endif
+/**
+ * init_new_context - Initialize context of a new mm_struct.
+ * @tsk: task struct for the mm
+ * @mm: the new mm struct
+ * @return: 0 on success, -errno on failure
+ */
+#ifndef init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
return 0;
}
+#endif
+/**
+ * destroy_context - Undo init_new_context when the mm is going away
+ * @mm: old mm struct
+ */
+#ifndef destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
}
+#endif
-static inline void deactivate_mm(struct task_struct *task,
- struct mm_struct *mm)
-{
-}
-
-static inline void switch_mm(struct mm_struct *prev,
- struct mm_struct *next,
- struct task_struct *tsk)
+/**
+ * activate_mm - called after exec switches the current task to a new mm, to switch to it
+ * @prev_mm: previous mm of this task
+ * @next_mm: new mm
+ */
+#ifndef activate_mm
+static inline void activate_mm(struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
{
+ switch_mm(prev_mm, next_mm, current);
}
+#endif
-static inline void activate_mm(struct mm_struct *prev_mm,
- struct mm_struct *next_mm)
+/**
+ * dectivate_mm - called when an mm is released after exit or exec switches away from it
+ * @tsk: the task
+ * @mm: the old mm
+ */
+#ifndef deactivate_mm
+static inline void deactivate_mm(struct task_struct *tsk,
+ struct mm_struct *mm)
{
}
+#endif
#endif /* __ASM_GENERIC_MMU_CONTEXT_H */
diff --git a/include/asm-generic/module.lds.h b/include/asm-generic/module.lds.h
new file mode 100644
index 000000000000..f210d5c1b78b
--- /dev/null
+++ b/include/asm-generic/module.lds.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GENERIC_MODULE_LDS_H
+#define __ASM_GENERIC_MODULE_LDS_H
+
+/*
+ * <asm/module.lds.h> can specify arch-specific sections for linking modules.
+ * Empty for the asm-generic header.
+ */
+
+#endif /* __ASM_GENERIC_MODULE_LDS_H */
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index c5edc5e08b94..99935779682d 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -22,38 +22,110 @@
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
+#include <linux/nmi.h>
#include <asm/ptrace.h>
#include <asm/hyperv-tlfs.h>
+#define VTPM_BASE_ADDRESS 0xfed40000
+
struct ms_hyperv_info {
u32 features;
+ u32 priv_high;
u32 misc_features;
u32 hints;
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
+ u8 vtl;
+ union {
+ u32 isolation_config_a;
+ struct {
+ u32 paravisor_present : 1;
+ u32 reserved_a1 : 31;
+ };
+ };
+ union {
+ u32 isolation_config_b;
+ struct {
+ u32 cvm_type : 4;
+ u32 reserved_b1 : 1;
+ u32 shared_gpa_boundary_active : 1;
+ u32 shared_gpa_boundary_bits : 6;
+ u32 reserved_b2 : 20;
+ };
+ };
+ u64 shared_gpa_boundary;
};
extern struct ms_hyperv_info ms_hyperv;
+extern bool hv_nested;
+
+extern void * __percpu *hyperv_pcpu_input_arg;
+extern void * __percpu *hyperv_pcpu_output_arg;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+bool hv_isolation_type_snp(void);
+bool hv_isolation_type_tdx(void);
+
+/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
+static inline int hv_result(u64 status)
+{
+ return status & HV_HYPERCALL_RESULT_MASK;
+}
+
+static inline bool hv_result_success(u64 status)
+{
+ return hv_result(status) == HV_STATUS_SUCCESS;
+}
+static inline unsigned int hv_repcomp(u64 status)
+{
+ /* Bits [43:32] of status have 'Reps completed' data. */
+ return (status & HV_HYPERCALL_REP_COMP_MASK) >>
+ HV_HYPERCALL_REP_COMP_OFFSET;
+}
+
+/*
+ * Rep hypercalls. Callers of this functions are supposed to ensure that
+ * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ */
+static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
+ void *input, void *output)
+{
+ u64 control = code;
+ u64 status;
+ u16 rep_comp;
+
+ control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
+ control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+
+ do {
+ status = hv_do_hypercall(control, input, output);
+ if (!hv_result_success(status))
+ return status;
+
+ rep_comp = hv_repcomp(status);
+
+ control &= ~HV_HYPERCALL_REP_START_MASK;
+ control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
+
+ touch_nmi_watchdog();
+ } while (rep_comp < rep_count);
+
+ return status;
+}
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
-static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
- __u64 d_info2)
+static inline u64 hv_generate_guest_id(u64 kernel_version)
{
- __u64 guest_id = 0;
+ u64 guest_id;
- guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
- guest_id |= (d_info1 << 48);
+ guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48);
guest_id |= (kernel_version << 16);
- guest_id |= d_info2;
return guest_id;
}
-
/* Free the message slot and signal end-of-message if required */
static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
{
@@ -85,20 +157,27 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
* possibly deliver another msg from the
* hypervisor
*/
- hv_signal_eom();
+ hv_set_msr(HV_MSR_EOM, 0);
}
}
-void hv_setup_vmbus_irq(void (*handler)(void));
-void hv_remove_vmbus_irq(void);
-void hv_enable_vmbus_irq(void);
-void hv_disable_vmbus_irq(void);
+int hv_get_hypervisor_version(union hv_hypervisor_version_info *info);
+
+void hv_setup_vmbus_handler(void (*handler)(void));
+void hv_remove_vmbus_handler(void);
+void hv_setup_stimer0_handler(void (*handler)(void));
+void hv_remove_stimer0_handler(void);
void hv_setup_kexec_handler(void (*handler)(void));
void hv_remove_kexec_handler(void);
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
void hv_remove_crash_handler(void);
+extern int vmbus_interrupt;
+extern int vmbus_irq;
+
+extern bool hv_root_partition;
+
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Hypervisor's notion of virtual processor ID is different from
@@ -109,9 +188,21 @@ void hv_remove_crash_handler(void);
extern u32 *hv_vp_index;
extern u32 hv_max_vp_index;
+extern u64 (*hv_read_reference_counter)(void);
+
/* Sentinel value for an uninitialized entry in hv_vp_index array */
#define VP_INVAL U32_MAX
+int __init hv_common_init(void);
+void __init hv_common_free(void);
+void __init ms_hyperv_late_init(void);
+int hv_common_cpu_init(unsigned int cpu);
+int hv_common_cpu_die(unsigned int cpu);
+
+void *hv_alloc_hyperv_page(void);
+void *hv_alloc_hyperv_zeroed_page(void);
+void hv_free_hyperv_page(void *addr);
+
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
* @cpu_number: CPU number in Linux terms
@@ -128,13 +219,15 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
return hv_vp_index[cpu_number];
}
-static inline int cpumask_to_vpset(struct hv_vpset *vpset,
- const struct cpumask *cpus)
+static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus,
+ bool (*func)(int cpu))
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+ int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK;
- /* valid_bank_mask can represent up to 64 banks */
- if (hv_max_vp_index / 64 >= 64)
+ /* vpset.valid_bank_mask can represent up to HV_MAX_SPARSE_VCPU_BANKS banks */
+ if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS)
return 0;
/*
@@ -142,18 +235,20 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
* structs are not cleared between calls, we risk flushing unneeded
* vCPUs otherwise.
*/
- for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
+ for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++)
vpset->bank_contents[vcpu_bank] = 0;
/*
* Some banks may end up being empty but this is acceptable.
*/
for_each_cpu(cpu, cpus) {
+ if (func && func(cpu))
+ continue;
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL)
return -1;
- vcpu_bank = vcpu / 64;
- vcpu_offset = vcpu % 64;
+ vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK;
+ vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK;
__set_bit(vcpu_offset, (unsigned long *)
&vpset->bank_contents[vcpu_bank]);
if (vcpu_bank >= nr_bank)
@@ -163,20 +258,47 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
return nr_bank;
}
+/*
+ * Convert a Linux cpumask into a Hyper-V VPset. In the _skip variant,
+ * 'func' is called for each CPU present in cpumask. If 'func' returns
+ * true, that CPU is skipped -- i.e., that CPU from cpumask is *not*
+ * added to the Hyper-V VPset. If 'func' is NULL, no CPUs are
+ * skipped.
+ */
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ return __cpumask_to_vpset(vpset, cpus, NULL);
+}
+
+static inline int cpumask_to_vpset_skip(struct hv_vpset *vpset,
+ const struct cpumask *cpus,
+ bool (*func)(int cpu))
+{
+ return __cpumask_to_vpset(vpset, cpus, func);
+}
+
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
-void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
+enum hv_isolation_type hv_get_isolation_type(void);
+bool hv_is_isolation_supported(void);
+bool hv_isolation_type_snp(void);
+u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
+u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
void hyperv_cleanup(void);
+bool hv_query_ext_cap(u64 cap_query);
+void hv_setup_dma_ops(struct device *dev, bool coherent);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
+static inline void ms_hyperv_late_init(void) {}
+static inline bool hv_is_isolation_supported(void) { return false; }
+static inline enum hv_isolation_type hv_get_isolation_type(void)
+{
+ return HV_ISOLATION_TYPE_NONE;
+}
#endif /* CONFIG_HYPERV */
-#if IS_ENABLED(CONFIG_HYPERV)
-extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
-extern void hv_remove_stimer0_irq(int irq);
-#endif
-
#endif
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
index e6795f088bdd..124c734ca5d9 100644
--- a/include/asm-generic/msi.h
+++ b/include/asm-generic/msi.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
+#ifdef CONFIG_GENERIC_MSI_IRQ
+
#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
#endif
@@ -22,12 +24,18 @@ struct msi_desc;
typedef struct msi_alloc_info {
struct msi_desc *desc;
irq_hw_number_t hwirq;
+ unsigned long flags;
union {
unsigned long ul;
void *ptr;
} scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS];
} msi_alloc_info_t;
+/* Device generating MSIs is proxying for another device */
+#define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0)
+
#define GENERIC_MSI_DOMAIN_OPS 1
+#endif /* CONFIG_GENERIC_MSI_IRQ */
+
#endif
diff --git a/include/asm-generic/nommu_context.h b/include/asm-generic/nommu_context.h
new file mode 100644
index 000000000000..4f916f9e16cd
--- /dev/null
+++ b/include/asm-generic/nommu_context.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_NOMMU_H
+#define __ASM_GENERIC_NOMMU_H
+
+/*
+ * Generic hooks for NOMMU architectures, which do not need to do
+ * anything special here.
+ */
+#include <asm-generic/mm_hooks.h>
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* __ASM_GENERIC_NOMMU_H */
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
new file mode 100644
index 000000000000..c32e0cf23c90
--- /dev/null
+++ b/include/asm-generic/numa.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_NUMA_H
+#define __ASM_GENERIC_NUMA_H
+
+#ifdef CONFIG_NUMA
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
+int __node_distance(int from, int to);
+#define node_distance(a, b) __node_distance(a, b)
+
+extern nodemask_t numa_nodes_parsed __initdata;
+
+extern bool numa_off;
+
+/* Mappings between node number and cpus on that node. */
+extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+void numa_clear_node(unsigned int cpu);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+const struct cpumask *cpumask_of_node(int node);
+#else
+/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+static inline const struct cpumask *cpumask_of_node(int node)
+{
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
+ return node_to_cpumask_map[node];
+}
+#endif
+
+void __init arch_numa_init(void);
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_free_distance(void);
+void __init early_map_cpu_to_node(unsigned int cpu, int nid);
+int __init early_cpu_to_node(int cpu);
+void numa_store_cpu_info(unsigned int cpu);
+void numa_add_cpu(unsigned int cpu);
+void numa_remove_cpu(unsigned int cpu);
+
+#else /* CONFIG_NUMA */
+
+static inline void numa_store_cpu_info(unsigned int cpu) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
+static inline void arch_numa_init(void) { }
+static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
+#endif /* CONFIG_NUMA */
+
+#endif /* __ASM_GENERIC_NUMA_H */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index fe801f01625e..9773582fd96e 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -63,11 +63,7 @@ extern unsigned long memory_end;
#endif /* !__ASSEMBLY__ */
-#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS
-#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS)
-#else
#define PAGE_OFFSET (0)
-#endif
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
@@ -78,8 +74,16 @@ extern unsigned long memory_end;
#define __va(x) ((void *)((unsigned long) (x)))
#define __pa(x) ((unsigned long) (x))
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+#define virt_to_pfn virt_to_pfn
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn) << PAGE_SHIFT;
+}
+#define pfn_to_virt pfn_to_virt
#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
@@ -88,8 +92,6 @@ extern unsigned long memory_end;
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#endif
-#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
-
#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
((void *)(kaddr) < (void *)memory_end))
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index 6bb3cd3d695a..6869f1061528 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -1,17 +1,30 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/include/asm-generic/pci.h
- *
- * Copyright (C) 2003 Russell King
- */
-#ifndef _ASM_GENERIC_PCI_H
-#define _ASM_GENERIC_PCI_H
+/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+#ifndef __ASM_GENERIC_PCI_H
+#define __ASM_GENERIC_PCI_H
+
+#ifndef PCIBIOS_MIN_IO
+#define PCIBIOS_MIN_IO 0
+#endif
+
+#ifndef PCIBIOS_MIN_MEM
+#define PCIBIOS_MIN_MEM 0
+#endif
+
+#ifndef pcibios_assign_all_busses
+/* For bootloaders that do not initialize the PCI bus */
+#define pcibios_assign_all_busses() 1
+#endif
+
+/* Enable generic resource mapping code in drivers/pci/ */
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_proc_domain(struct pci_bus *bus)
{
- return channel ? 15 : 14;
+ /* always show the domain in /proc */
+ return 1;
}
-#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+#endif /* CONFIG_PCI_DOMAINS */
-#endif /* _ASM_GENERIC_PCI_H */
+#endif /* __ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index d4f16dcc2ed7..8fbb0a55545d 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -18,12 +18,15 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
unsigned int nr);
+#elif !defined(CONFIG_HAS_IOPORT_MAP)
+#define __pci_ioport_map(dev, port, nr) NULL
#else
#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
#endif
@@ -50,6 +53,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
{
return NULL;
}
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
#endif
-#endif /* __ASM_GENERIC_IO_H */
+#endif /* __ASM_GENERIC_PCI_IOMAP_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 35e4a53b83e6..94cbd50cc870 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -89,46 +89,54 @@ do { \
__ret; \
})
-#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
+#define __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, _cmpxchg) \
+({ \
+ typeof(pcp) __val, __old = *(ovalp); \
+ __val = _cmpxchg(pcp, __old, nval); \
+ if (__val != __old) \
+ *(ovalp) = __val; \
+ __val == __old; \
+})
+
+#define raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \
({ \
typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
- typeof(pcp) __ret; \
- __ret = *__p; \
- if (__ret == (oval)) \
+ typeof(pcp) __val = *__p, ___old = *(ovalp); \
+ bool __ret; \
+ if (__val == ___old) { \
*__p = nval; \
+ __ret = true; \
+ } else { \
+ *(ovalp) = __val; \
+ __ret = false; \
+ } \
__ret; \
})
-#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1)); \
- typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2)); \
- int __ret = 0; \
- if (*__p1 == (oval1) && *__p2 == (oval2)) { \
- *__p1 = nval1; \
- *__p2 = nval2; \
- __ret = 1; \
- } \
- (__ret); \
+ typeof(pcp) __old = (oval); \
+ raw_cpu_generic_try_cmpxchg(pcp, &__old, nval); \
+ __old; \
})
#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
- typeof(pcp) __ret; \
+ typeof(pcp) ___ret; \
preempt_disable_notrace(); \
- __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
+ ___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable_notrace(); \
- __ret; \
+ ___ret; \
})
#define __this_cpu_generic_read_noirq(pcp) \
({ \
- typeof(pcp) __ret; \
- unsigned long __flags; \
- raw_local_irq_save(__flags); \
- __ret = raw_cpu_generic_read(pcp); \
- raw_local_irq_restore(__flags); \
- __ret; \
+ typeof(pcp) ___ret; \
+ unsigned long ___flags; \
+ raw_local_irq_save(___flags); \
+ ___ret = raw_cpu_generic_read(pcp); \
+ raw_local_irq_restore(___flags); \
+ ___ret; \
})
#define this_cpu_generic_read(pcp) \
@@ -170,23 +178,22 @@ do { \
__ret; \
})
-#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
+#define this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \
({ \
- typeof(pcp) __ret; \
+ bool __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
+ __ret = raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
-#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- int __ret; \
+ typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
- oval1, oval2, nval1, nval2); \
+ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
@@ -282,6 +289,62 @@ do { \
#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
#endif
+#ifndef raw_cpu_try_cmpxchg_1
+#ifdef raw_cpu_cmpxchg_1
+#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_1)
+#else
+#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg_2
+#ifdef raw_cpu_cmpxchg_2
+#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_2)
+#else
+#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg_4
+#ifdef raw_cpu_cmpxchg_4
+#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_4)
+#else
+#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg_8
+#ifdef raw_cpu_cmpxchg_8
+#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_8)
+#else
+#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
+#ifndef raw_cpu_try_cmpxchg64
+#ifdef raw_cpu_cmpxchg64
+#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg64)
+#else
+#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg128
+#ifdef raw_cpu_cmpxchg128
+#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg128)
+#else
+#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
#ifndef raw_cpu_cmpxchg_1
#define raw_cpu_cmpxchg_1(pcp, oval, nval) \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
@@ -299,21 +362,13 @@ do { \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef raw_cpu_cmpxchg_double_1
-#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef raw_cpu_cmpxchg_double_2
-#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef raw_cpu_cmpxchg_double_4
-#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef raw_cpu_cmpxchg64
+#define raw_cpu_cmpxchg64(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef raw_cpu_cmpxchg_double_8
-#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef raw_cpu_cmpxchg128
+#define raw_cpu_cmpxchg128(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#ifndef this_cpu_read_1
@@ -407,6 +462,62 @@ do { \
#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval)
#endif
+#ifndef this_cpu_try_cmpxchg_1
+#ifdef this_cpu_cmpxchg_1
+#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_1)
+#else
+#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg_2
+#ifdef this_cpu_cmpxchg_2
+#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_2)
+#else
+#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg_4
+#ifdef this_cpu_cmpxchg_4
+#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_4)
+#else
+#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg_8
+#ifdef this_cpu_cmpxchg_8
+#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_8)
+#else
+#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
+#ifndef this_cpu_try_cmpxchg64
+#ifdef this_cpu_cmpxchg64
+#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg64)
+#else
+#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg128
+#ifdef this_cpu_cmpxchg128
+#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg128)
+#else
+#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
#ifndef this_cpu_cmpxchg_1
#define this_cpu_cmpxchg_1(pcp, oval, nval) \
this_cpu_generic_cmpxchg(pcp, oval, nval)
@@ -424,21 +535,13 @@ do { \
this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef this_cpu_cmpxchg_double_1
-#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef this_cpu_cmpxchg_double_2
-#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef this_cpu_cmpxchg_double_4
-#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef this_cpu_cmpxchg64
+#define this_cpu_cmpxchg64(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef this_cpu_cmpxchg_double_8
-#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef this_cpu_cmpxchg128
+#define this_cpu_cmpxchg128(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 02932efad3ab..879e5f8aa5e9 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -8,7 +8,7 @@
#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
/**
- * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* This function is intended for architectures that need
@@ -18,12 +18,17 @@
*/
static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
{
- return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL &
+ ~__GFP_HIGHMEM, 0);
+
+ if (!ptdesc)
+ return NULL;
+ return ptdesc_address(ptdesc);
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
/**
- * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* Return: pointer to the allocated memory or %NULL on error
@@ -35,40 +40,40 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
#endif
/**
- * pte_free_kernel - free PTE-level kernel page table page
+ * pte_free_kernel - free PTE-level kernel page table memory
* @mm: the mm_struct of the current context
* @pte: pointer to the memory containing the page table
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- free_page((unsigned long)pte);
+ pagetable_free(virt_to_ptdesc(pte));
}
/**
- * __pte_alloc_one - allocate a page for PTE-level user page table
+ * __pte_alloc_one - allocate memory for a PTE-level user page table
* @mm: the mm_struct of the current context
* @gfp: GFP flags to use for the allocation
*
- * Allocates a page and runs the pgtable_pte_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
*
* This function is intended for architectures that need
* anything beyond simple page allocation or must have custom GFP flags.
*
- * Return: `struct page` initialized as page table or %NULL on error
+ * Return: `struct page` referencing the ptdesc or %NULL on error
*/
static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
{
- struct page *pte;
+ struct ptdesc *ptdesc;
- pte = alloc_page(gfp);
- if (!pte)
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
return NULL;
- if (!pgtable_pte_page_ctor(pte)) {
- __free_page(pte);
+ if (!pagetable_pte_ctor(ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return pte;
+ return ptdesc_page(ptdesc);
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
@@ -76,9 +81,9 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
* pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_pte_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
*
- * Return: `struct page` initialized as page table or %NULL on error
+ * Return: `struct page` referencing the ptdesc or %NULL on error
*/
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
@@ -92,14 +97,16 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
*/
/**
- * pte_free - free PTE-level user page table page
+ * pte_free - free PTE-level user page table memory
* @mm: the mm_struct of the current context
- * @pte_page: the `struct page` representing the page table
+ * @pte_page: the `struct page` referencing the ptdesc
*/
static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
- pgtable_pte_page_dtor(pte_page);
- __free_page(pte_page);
+ struct ptdesc *ptdesc = page_ptdesc(pte_page);
+
+ pagetable_pte_dtor(ptdesc);
+ pagetable_free(ptdesc);
}
@@ -107,10 +114,11 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
/**
- * pmd_alloc_one - allocate a page for PMD-level page table
+ * pmd_alloc_one - allocate memory for a PMD-level page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_pmd_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
+ *
* Allocations use %GFP_PGTABLE_USER in user context and
* %GFP_PGTABLE_KERNEL in kernel context.
*
@@ -118,28 +126,30 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
*/
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- struct page *page;
+ struct ptdesc *ptdesc;
gfp_t gfp = GFP_PGTABLE_USER;
if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL;
- page = alloc_pages(gfp, 0);
- if (!page)
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
return NULL;
- if (!pgtable_pmd_page_ctor(page)) {
- __free_pages(page, 0);
+ if (!pagetable_pmd_ctor(ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return (pmd_t *)page_address(page);
+ return ptdesc_address(ptdesc);
}
#endif
#ifndef __HAVE_ARCH_PMD_FREE
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
+
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- pgtable_pmd_page_dtor(virt_to_page(pmd));
- free_page((unsigned long)pmd);
+ pagetable_pmd_dtor(ptdesc);
+ pagetable_free(ptdesc);
}
#endif
@@ -147,38 +157,61 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#if CONFIG_PGTABLE_LEVELS > 3
+static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ gfp &= ~__GFP_HIGHMEM;
+
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
+ return NULL;
+
+ pagetable_pud_ctor(ptdesc);
+ return ptdesc_address(ptdesc);
+}
+
#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
/**
- * pud_alloc_one - allocate a page for PUD-level page table
+ * pud_alloc_one - allocate memory for a PUD-level page table
* @mm: the mm_struct of the current context
*
- * Allocates a page using %GFP_PGTABLE_USER for user context and
- * %GFP_PGTABLE_KERNEL for kernel context.
+ * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
+ * and %GFP_PGTABLE_KERNEL for kernel context.
*
* Return: pointer to the allocated memory or %NULL on error
*/
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- gfp_t gfp = GFP_PGTABLE_USER;
-
- if (mm == &init_mm)
- gfp = GFP_PGTABLE_KERNEL;
- return (pud_t *)get_zeroed_page(gfp);
+ return __pud_alloc_one(mm, addr);
}
#endif
-static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pud);
+
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
- free_page((unsigned long)pud);
+ pagetable_pud_dtor(ptdesc);
+ pagetable_free(ptdesc);
+}
+
+#ifndef __HAVE_ARCH_PUD_FREE
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ __pud_free(mm, pud);
}
+#endif
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#ifndef __HAVE_ARCH_PGD_FREE
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_page((unsigned long)pgd);
+ pagetable_free(virt_to_ptdesc(pgd));
}
#endif
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index ce2cbb3c380f..03b7dae47dd4 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -9,7 +9,6 @@
typedef struct { pgd_t pgd; } p4d_t;
#define P4D_SHIFT PGDIR_SHIFT
-#define MAX_PTRS_PER_P4D 1
#define PTRS_PER_P4D 1
#define P4D_SIZE (1UL << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE-1))
@@ -42,7 +41,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
#define __p4d(x) ((p4d_t) { __pgd(x) })
#define pgd_page(pgd) (p4d_page((p4d_t){ pgd }))
-#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd }))
+#define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd })))
/*
* allocating and freeing a p4d is trivial: the 1-entry p4d is
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 3e13acd019ae..8ffd64e7a24c 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -30,6 +30,8 @@ typedef struct { pud_t pud; } pmd_t;
static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
+static inline int pud_user(pud_t pud) { return 0; }
+static inline int pud_leaf(pud_t pud) { return 0; }
static inline void pud_clear(pud_t *pud) { }
#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
@@ -51,7 +53,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
#define __pmd(x) ((pmd_t) { __pud(x) } )
#define pud_page(pud) (pmd_page((pmd_t){ pud }))
-#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
+#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud })))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index a9d751fbda9e..eb70c6d7ceff 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -49,7 +49,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
#define __pud(x) ((pud_t) { __p4d(x) })
#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
-#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d }))
+#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d })))
/*
* allocating and freeing a pud is trivial: the 1-entry pud is
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index d683f5e6d791..51f8f3881523 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc)
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
static __always_inline void set_preempt_need_resched(void)
@@ -80,9 +80,21 @@ static __always_inline bool should_resched(int preempt_offset)
#ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void);
-#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+void dynamic_preempt_schedule(void);
+void dynamic_preempt_schedule_notrace(void);
+#define __preempt_schedule() dynamic_preempt_schedule()
+#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
+
+#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
+
+#define __preempt_schedule() preempt_schedule()
#define __preempt_schedule_notrace() preempt_schedule_notrace()
+
+#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
#endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 3aefde23dcea..75b8f4601b28 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -2,6 +2,10 @@
/*
* Queue read/write lock
*
+ * These use generic atomic and locking routines, but depend on a fair spinlock
+ * implementation in order to be fair themselves. The implementation in
+ * asm-generic/spinlock.h meets these requirements.
+ *
* (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
*
* Authors: Waiman Long <waiman.long@hp.com>
@@ -15,6 +19,8 @@
#include <asm-generic/qrwlock_types.h>
+/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */
+
/*
* Writer states & reader shift and bias.
*/
@@ -31,13 +37,13 @@ extern void queued_read_lock_slowpath(struct qrwlock *lock);
extern void queued_write_lock_slowpath(struct qrwlock *lock);
/**
- * queued_read_trylock - try to acquire read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_trylock - try to acquire read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static inline int queued_read_trylock(struct qrwlock *lock)
{
- u32 cnts;
+ int cnts;
cnts = atomic_read(&lock->cnts);
if (likely(!(cnts & _QW_WMASK))) {
@@ -50,13 +56,13 @@ static inline int queued_read_trylock(struct qrwlock *lock)
}
/**
- * queued_write_trylock - try to acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_trylock - try to acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static inline int queued_write_trylock(struct qrwlock *lock)
{
- u32 cnts;
+ int cnts;
cnts = atomic_read(&lock->cnts);
if (unlikely(cnts))
@@ -66,12 +72,12 @@ static inline int queued_write_trylock(struct qrwlock *lock)
_QW_LOCKED));
}
/**
- * queued_read_lock - acquire read lock of a queue rwlock
- * @lock: Pointer to queue rwlock structure
+ * queued_read_lock - acquire read lock of a queued rwlock
+ * @lock: Pointer to queued rwlock structure
*/
static inline void queued_read_lock(struct qrwlock *lock)
{
- u32 cnts;
+ int cnts;
cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
@@ -82,12 +88,12 @@ static inline void queued_read_lock(struct qrwlock *lock)
}
/**
- * queued_write_lock - acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_lock - acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_write_lock(struct qrwlock *lock)
{
- u32 cnts = 0;
+ int cnts = 0;
/* Optimize for the unfair lock case where the fair flag is 0. */
if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
return;
@@ -96,8 +102,8 @@ static inline void queued_write_lock(struct qrwlock *lock)
}
/**
- * queued_read_unlock - release read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_unlock - release read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_read_unlock(struct qrwlock *lock)
{
@@ -108,23 +114,34 @@ static inline void queued_read_unlock(struct qrwlock *lock)
}
/**
- * queued_write_unlock - release write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_unlock - release write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_write_unlock(struct qrwlock *lock)
{
smp_store_release(&lock->wlocked, 0);
}
+/**
+ * queued_rwlock_is_contended - check if the lock is contended
+ * @lock : Pointer to queued rwlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static inline int queued_rwlock_is_contended(struct qrwlock *lock)
+{
+ return arch_spin_is_locked(&lock->wait_lock);
+}
+
/*
* Remapping rwlock architecture specific functions to the corresponding
- * queue rwlock functions.
+ * queued rwlock functions.
*/
-#define arch_read_lock(l) queued_read_lock(l)
-#define arch_write_lock(l) queued_write_lock(l)
-#define arch_read_trylock(l) queued_read_trylock(l)
-#define arch_write_trylock(l) queued_write_trylock(l)
-#define arch_read_unlock(l) queued_read_unlock(l)
-#define arch_write_unlock(l) queued_write_unlock(l)
+#define arch_read_lock(l) queued_read_lock(l)
+#define arch_write_lock(l) queued_write_lock(l)
+#define arch_read_trylock(l) queued_read_trylock(l)
+#define arch_write_trylock(l) queued_write_trylock(l)
+#define arch_read_unlock(l) queued_read_unlock(l)
+#define arch_write_unlock(l) queued_write_unlock(l)
+#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l)
#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index c36f1d5a2572..12392c14c4d0 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -7,7 +7,7 @@
#include <asm/spinlock_types.h>
/*
- * The queue read/write lock data structure
+ * The queued read/write lock data structure
*/
typedef struct qrwlock {
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 4fe7fd0fe834..0655aa5b57b2 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -2,6 +2,35 @@
/*
* Queued spinlock
*
+ * A 'generic' spinlock implementation that is based on MCS locks. For an
+ * architecture that's looking for a 'generic' spinlock, please first consider
+ * ticket-lock.h and only come looking here when you've considered all the
+ * constraints below and can show your hardware does actually perform better
+ * with qspinlock.
+ *
+ * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
+ * weaker than RCtso if you're power), where regular code only expects atomic_t
+ * to be RCpc.
+ *
+ * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
+ * of atomic operations to behave well together, please audit them carefully to
+ * ensure they all have forward progress. Many atomic operations may default to
+ * cmpxchg() loops which will not have good forward progress properties on
+ * LL/SC architectures.
+ *
+ * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
+ * do. Carefully read the patches that introduced
+ * queued_fetch_set_pending_acquire().
+ *
+ * qspinlock also heavily relies on mixed size atomic operations, in specific
+ * it requires architectures to have xchg16; something which many LL/SC
+ * architectures need to implement as a 32bit and+or in order to satisfy the
+ * forward progress guarantees mentioned above.
+ *
+ * Further reading on mixed size atomics that might be relevant:
+ *
+ * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
+ *
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
*
@@ -41,7 +70,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
*/
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
{
- return !atomic_read(&lock.val);
+ return !lock.val.counter;
}
/**
@@ -60,7 +89,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
*/
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
- u32 val = atomic_read(&lock->val);
+ int val = atomic_read(&lock->val);
if (unlikely(val))
return 0;
@@ -77,7 +106,7 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
*/
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
- u32 val = 0;
+ int val = 0;
if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
return;
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d16302d3eb59..db13bb620f52 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -59,40 +59,23 @@ extern char __noinstr_text_start[], __noinstr_text_end[];
extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */
-#ifndef dereference_function_descriptor
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
+void *dereference_function_descriptor(void *ptr);
+void *dereference_kernel_function_descriptor(void *ptr);
+#else
#define dereference_function_descriptor(p) ((void *)(p))
#define dereference_kernel_function_descriptor(p) ((void *)(p))
-#endif
-
-/* random extra sections (if any). Override
- * in asm/sections.h */
-#ifndef arch_is_kernel_text
-static inline int arch_is_kernel_text(unsigned long addr)
-{
- return 0;
-}
-#endif
-#ifndef arch_is_kernel_data
-static inline int arch_is_kernel_data(unsigned long addr)
-{
- return 0;
-}
+/* An address is simply the address of the function. */
+typedef struct {
+ unsigned long addr;
+} func_desc_t;
#endif
-/*
- * Check if an address is part of freed initmem. This is needed on architectures
- * with virt == phys kernel mapping, for code that wants to check if an address
- * is part of a static object within [_stext, _end]. After initmem is freed,
- * memory can be allocated from it, and such allocations would then have
- * addresses within the range [_stext, _end].
- */
-#ifndef arch_is_kernel_initmem_freed
-static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+static inline bool have_function_descriptors(void)
{
- return 0;
+ return IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS);
}
-#endif
/**
* memory_contains - checks if an object is contained within a memory region
@@ -114,7 +97,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
/**
* memory_intersects - checks if the region occupied by an object intersects
* with another memory region
- * @begin: virtual address of the beginning of the memory regien
+ * @begin: virtual address of the beginning of the memory region
* @end: virtual address of the end of the memory region
* @virt: virtual address of the memory object
* @size: size of the memory object
@@ -127,7 +110,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
{
void *vend = virt + size;
- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
+ if (virt < end && vend > begin)
+ return true;
+
+ return false;
}
/**
@@ -159,6 +145,28 @@ static inline bool init_section_intersects(void *virt, size_t size)
}
/**
+ * is_kernel_core_data - checks if the pointer address is located in the
+ * .data or .bss section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .data or .bss, false otherwise.
+ * Note: On some archs it may return true for core RODATA, and false
+ * for others. But will always be true for core RW data.
+ */
+static inline bool is_kernel_core_data(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
+ return true;
+
+ if (addr >= (unsigned long)__bss_start &&
+ addr < (unsigned long)__bss_stop)
+ return true;
+
+ return false;
+}
+
+/**
* is_kernel_rodata - checks if the pointer address is located in the
* .rodata section
*
@@ -172,4 +180,51 @@ static inline bool is_kernel_rodata(unsigned long addr)
addr < (unsigned long)__end_rodata;
}
+/**
+ * is_kernel_inittext - checks if the pointer address is located in the
+ * .init.text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .init.text, false otherwise.
+ */
+static inline bool is_kernel_inittext(unsigned long addr)
+{
+ return addr >= (unsigned long)_sinittext &&
+ addr < (unsigned long)_einittext;
+}
+
+/**
+ * __is_kernel_text - checks if the pointer address is located in the
+ * .text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .text, false otherwise.
+ * Note: an internal helper, only check the range of _stext to _etext.
+ */
+static inline bool __is_kernel_text(unsigned long addr)
+{
+ return addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_etext;
+}
+
+/**
+ * __is_kernel - checks if the pointer address is located in the kernel range
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in the kernel range, false otherwise.
+ * Note: an internal helper, check the range of _stext to _end,
+ * and range from __init_begin to __init_end, which can be outside
+ * of the _stext to _end range.
+ */
+static inline bool __is_kernel(unsigned long addr)
+{
+ return ((addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_end) ||
+ (addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end));
+}
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
index c53984fa9761..663dd6d0795d 100644
--- a/include/asm-generic/signal.h
+++ b/include/asm-generic/signal.h
@@ -5,8 +5,6 @@
#include <uapi/asm-generic/signal.h>
#ifndef __ASSEMBLY__
-#ifdef SA_RESTORER
-#endif
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h
new file mode 100644
index 000000000000..2a67aed9ac52
--- /dev/null
+++ b/include/asm-generic/softirq_stack.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
+#define __ASM_GENERIC_SOFTIRQ_STACK_H
+
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+void do_softirq_own_stack(void);
+#else
+static inline void do_softirq_own_stack(void)
+{
+ __do_softirq();
+}
+#endif
+
+#endif
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index adaf6acab172..90803a826ba0 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -1,12 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_SPINLOCK_H
-#define __ASM_GENERIC_SPINLOCK_H
+
/*
- * You need to implement asm/spinlock.h for SMP support. The generic
- * version does not handle SMP.
+ * 'Generic' ticket-lock implementation.
+ *
+ * It relies on atomic_fetch_add() having well defined forward progress
+ * guarantees under contention. If your architecture cannot provide this, stick
+ * to a test-and-set lock.
+ *
+ * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
+ * sub-word of the value. This is generally true for anything LL/SC although
+ * you'd be hard pressed to find anything useful in architecture specifications
+ * about this. If your architecture cannot do this you might be better off with
+ * a test-and-set.
+ *
+ * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
+ * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
+ * a full fence after the spin to upgrade the otherwise-RCpc
+ * atomic_cond_read_acquire().
+ *
+ * The implementation uses smp_cond_load_acquire() to spin, so if the
+ * architecture has WFE like instructions to sleep instead of poll for word
+ * modifications be sure to implement that (see ARM64 for example).
+ *
*/
-#ifdef CONFIG_SMP
-#error need an architecture specific asm/spinlock.h
-#endif
+
+#ifndef __ASM_GENERIC_SPINLOCK_H
+#define __ASM_GENERIC_SPINLOCK_H
+
+#include <linux/atomic.h>
+#include <asm-generic/spinlock_types.h>
+
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ u32 val = atomic_fetch_add(1<<16, lock);
+ u16 ticket = val >> 16;
+
+ if (ticket == (u16)val)
+ return;
+
+ /*
+ * atomic_cond_read_acquire() is RCpc, but rather than defining a
+ * custom cond_read_rcsc() here we just emit a full fence. We only
+ * need the prior reads before subsequent writes ordering from
+ * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
+ * have no outstanding writes due to the atomic_fetch_add() the extra
+ * orderings are free.
+ */
+ atomic_cond_read_acquire(lock, ticket == (u16)VAL);
+ smp_mb();
+}
+
+static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 old = atomic_read(lock);
+
+ if ((old >> 16) != (old & 0xffff))
+ return false;
+
+ return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */
+}
+
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ u32 val = atomic_read(lock);
+
+ smp_store_release(ptr, (u16)val + 1);
+}
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ u32 val = lock.counter;
+
+ return ((val >> 16) == (val & 0xffff));
+}
+
+static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ arch_spinlock_t val = READ_ONCE(*lock);
+
+ return !arch_spin_value_unlocked(val);
+}
+
+static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ u32 val = atomic_read(lock);
+
+ return (s16)((val >> 16) - (val & 0xffff)) > 1;
+}
+
+#include <asm/qrwlock.h>
#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h
new file mode 100644
index 000000000000..8962bb730945
--- /dev/null
+++ b/include/asm-generic/spinlock_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_SPINLOCK_TYPES_H
+#define __ASM_GENERIC_SPINLOCK_TYPES_H
+
+#include <linux/types.h>
+typedef atomic_t arch_spinlock_t;
+
+/*
+ * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the
+ * include.
+ */
+#include <asm/qrwlock_types.h>
+
+#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0)
+
+#endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index f3135e734387..5a80fe728dc8 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -43,9 +43,9 @@ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
* @regs: task_pt_regs() of @task
*
* It's only valid to call this when @task is stopped for system
- * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT),
- * after tracehook_report_syscall_entry() returned nonzero to prevent
- * the system call from taking place.
+ * call exit tracing (due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT), after ptrace_report_syscall_entry()
+ * returned nonzero to prevent the system call from taking place.
*
* This rolls back the register state in @regs so it's as if the
* system call instruction was a no-op. The registers containing
@@ -63,7 +63,8 @@ void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
* Returns 0 if the system call succeeded, or -ERRORCODE if it failed.
*
* It's only valid to call this when @task is stopped for tracing on exit
- * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
@@ -76,7 +77,8 @@ long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
* This value is meaningless if syscall_get_error() returned nonzero.
*
* It's only valid to call this when @task is stopped for tracing on exit
- * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
@@ -93,7 +95,8 @@ long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
* code; the user sees a failed system call with this errno code.
*
* It's only valid to call this when @task is stopped for tracing on exit
- * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
int error, long val);
@@ -108,34 +111,21 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
* @args[0], and so on.
*
* It's only valid to call this when @task is stopped for tracing on
- * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
*/
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned long *args);
/**
- * syscall_set_arguments - change system call parameter value
- * @task: task of interest, must be in system call entry tracing
- * @regs: task_pt_regs() of @task
- * @args: array of argument values to store
- *
- * Changes 6 arguments to the system call.
- * The first argument gets value @args[0], and so on.
- *
- * It's only valid to call this when @task is stopped for tracing on
- * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- */
-void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- const unsigned long *args);
-
-/**
* syscall_get_arch - return the AUDIT_ARCH for the current system call
* @task: task of interest, must be blocked
*
* Returns the AUDIT_ARCH_* based on the system call convention in use.
*
* It's only valid to call this when @task is stopped on entry to a system
- * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
+ * call, due to %SYSCALL_WORK_SYSCALL_TRACE, %SYSCALL_WORK_SYSCALL_AUDIT, or
+ * %SYSCALL_WORK_SECCOMP.
*
* Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
* provide an implementation of this.
diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h
deleted file mode 100644
index 59c5a3bd4a6e..000000000000
--- a/include/asm-generic/termios-base.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* termios.h: generic termios/termio user copying/translation
- */
-
-#ifndef _ASM_GENERIC_TERMIOS_BASE_H
-#define _ASM_GENERIC_TERMIOS_BASE_H
-
-#include <linux/uaccess.h>
-
-#ifndef __ARCH_TERMIO_GETPUT
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifndef user_termios_to_kernel_termios
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#endif
-
-#ifndef kernel_termios_to_user_termios
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif
-
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* __ARCH_TERMIO_GETPUT */
-
-#endif /* _ASM_GENERIC_TERMIOS_BASE_H */
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
deleted file mode 100644
index b1398d0d4a1d..000000000000
--- a/include/asm-generic/termios.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_TERMIOS_H
-#define _ASM_GENERIC_TERMIOS_H
-
-
-#include <linux/uaccess.h>
-#include <uapi/asm-generic/termios.h>
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- const struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifdef TCGETS2
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios2 __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios2));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios2));
-}
-
-static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#else /* TCGETS2 */
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#endif /* TCGETS2 */
-
-#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 6661ee1cff47..709830274b75 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -46,7 +46,9 @@
*
* The mmu_gather API consists of:
*
- * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
+ * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
+ *
+ * start and finish a mmu_gather
*
* Finish in particular will issue a (final) TLB invalidate and free
* all (remaining) queued pages.
@@ -67,6 +69,7 @@
*
* - tlb_remove_page() / __tlb_remove_page()
* - tlb_remove_page_size() / __tlb_remove_page_size()
+ * - __tlb_remove_folio_pages()
*
* __tlb_remove_page_size() is the basic primitive that queues a page for
* freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
@@ -76,6 +79,11 @@
* tlb_remove_page() and tlb_remove_page_size() imply the call to
* tlb_flush_mmu() when required and has no return value.
*
+ * __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however,
+ * instead of removing a single page, remove the given number of consecutive
+ * pages that are all part of the same (large) folio: just like calling
+ * __tlb_remove_page() on each page individually.
+ *
* - tlb_change_page_size()
*
* call before __tlb_remove_page*() to set the current page-size; implies a
@@ -91,7 +99,7 @@
*
* - mmu_gather::fullmm
*
- * A flag set by tlb_gather_mmu() to indicate we're going to free
+ * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
* the entire mm; this allows a number of optimizations.
*
* - We can ignore tlb_{start,end}_vma(); because we don't
@@ -156,9 +164,24 @@
* Useful if your architecture doesn't use IPIs for remote TLB invalidates
* and therefore doesn't naturally serialize with software page-table walkers.
*
+ * MMU_GATHER_NO_FLUSH_CACHE
+ *
+ * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
+ * before unmapping a VMA.
+ *
+ * NOTE: strictly speaking we shouldn't have this knob and instead rely on
+ * flush_cache_range() being a NOP, except Sparc64 seems to be
+ * different here.
+ *
+ * MMU_GATHER_MERGE_VMAS
+ *
+ * Indicates the architecture wants to merge ranges over VMAs; typical when
+ * multiple range invalidates are more expensive than a full invalidate.
+ *
* MMU_GATHER_NO_RANGE
*
- * Use this if your architecture lacks an efficient flush_tlb_range().
+ * Use this if your architecture lacks an efficient flush_tlb_range(). This
+ * option implies MMU_GATHER_MERGE_VMAS above.
*
* MMU_GATHER_NO_GATHER
*
@@ -178,7 +201,7 @@ struct mmu_table_batch {
struct rcu_head rcu;
#endif
unsigned int nr;
- void *tables[0];
+ void *tables[];
};
#define MAX_TABLE_BATCH \
@@ -205,12 +228,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_needs_table_invalidate() (true)
#endif
+void tlb_remove_table_sync_one(void);
+
#else
#ifdef tlb_needs_table_invalidate
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
#endif
+static inline void tlb_remove_table_sync_one(void) { }
+
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
@@ -225,7 +252,7 @@ struct mmu_gather_batch {
struct mmu_gather_batch *next;
unsigned int nr;
unsigned int max;
- struct page *pages[0];
+ struct encoded_page *encoded_pages[];
};
#define MAX_GATHER_BATCH \
@@ -240,7 +267,31 @@ struct mmu_gather_batch {
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
- int page_size);
+ bool delay_rmap, int page_size);
+bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
+ unsigned int nr_pages, bool delay_rmap);
+
+#ifdef CONFIG_SMP
+/*
+ * This both sets 'delayed_rmap', and returns true. It would be an inline
+ * function, except we define it before the 'struct mmu_gather'.
+ */
+#define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
+extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
+#endif
+
+#endif
+
+/*
+ * We have a no-op version of the rmap removal that doesn't
+ * delay anything. That is used on S390, which flushes remote
+ * TLBs synchronously, and on UP, which doesn't have any
+ * remote TLBs to flush and is not preemptible due to this
+ * all happening under the page table lock.
+ */
+#ifndef tlb_delay_rmap
+#define tlb_delay_rmap(tlb) (false)
+static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#endif
/*
@@ -274,6 +325,11 @@ struct mmu_gather {
unsigned int freed_tables : 1;
/*
+ * Do we have pending delayed rmap removals?
+ */
+ unsigned int delayed_rmap : 1;
+
+ /*
* at which levels have we cleared entries?
*/
unsigned int cleared_ptes : 1;
@@ -286,6 +342,7 @@ struct mmu_gather {
*/
unsigned int vma_exec : 1;
unsigned int vma_huge : 1;
+ unsigned int vma_pfn : 1;
unsigned int batch_count;
@@ -332,8 +389,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#ifdef CONFIG_MMU_GATHER_NO_RANGE
-#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
-#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#if defined(tlb_flush)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
#endif
/*
@@ -350,20 +407,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
}
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#define tlb_end_vma tlb_end_vma
-static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
#else /* CONFIG_MMU_GATHER_NO_RANGE */
#ifndef tlb_flush
-
-#if defined(tlb_start_vma) || defined(tlb_end_vma)
-#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
-#endif
-
/*
* When an architecture does not provide its own tlb_flush() implementation
* but does have a reasonably efficient flush_vma_range() implementation
@@ -383,6 +429,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_range(&vma, tlb->start, tlb->end);
}
}
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
@@ -400,17 +449,9 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
*/
tlb->vma_huge = is_vm_hugetlb_page(vma);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+ tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
}
-#else
-
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#endif
-
-#endif /* CONFIG_MMU_GATHER_NO_RANGE */
-
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
/*
@@ -422,20 +463,20 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
return;
tlb_flush(tlb);
- mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
__tlb_reset_range(tlb);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
- if (__tlb_remove_page_size(tlb, page, page_size))
+ if (__tlb_remove_page_size(tlb, page, false, page_size))
tlb_flush_mmu(tlb);
}
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb,
+ struct page *page, bool delay_rmap)
{
- return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
+ return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE);
}
/* tlb_remove_page
@@ -447,6 +488,17 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
+static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
+{
+ tlb_remove_table(tlb, pt);
+}
+
+/* Like tlb_remove_ptdesc, but for page-like page directories. */
+static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
+{
+ tlb_remove_page(tlb, ptdesc_page(pt));
+}
+
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
@@ -484,32 +536,36 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
-#ifndef tlb_start_vma
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
tlb_update_vma_flags(tlb, vma);
+#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
flush_cache_range(vma, vma->vm_start, vma->vm_end);
-}
#endif
+}
-#ifndef tlb_end_vma
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
/*
- * Do a TLB flush and reset the range at VMA boundaries; this avoids
- * the ranges growing with the unused space between consecutive VMAs,
- * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
- * this.
+ * VM_PFNMAP is more fragile because the core mm will not track the
+ * page mapcount -- there might not be page-frames for these PFNs after
+ * all. Force flush TLBs for such ranges to avoid munmap() vs
+ * unmap_mapping_range() races.
*/
- tlb_flush_mmu_tlbonly(tlb);
+ if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
+ /*
+ * Do a TLB flush and reset the range at VMA boundaries; this avoids
+ * the ranges growing with the unused space between consecutive VMAs.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+ }
}
-#endif
/*
* tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
@@ -544,7 +600,9 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
}
#ifndef __tlb_remove_tlb_entry
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
+{
+}
#endif
/**
@@ -560,13 +618,37 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
+/**
+ * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for
+ * later tlb invalidation.
+ *
+ * Similar to tlb_remove_tlb_entry(), but remember unmapping of multiple
+ * consecutive ptes instead of only a single one.
+ */
+static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb,
+ pte_t *ptep, unsigned int nr, unsigned long address)
+{
+ tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr);
+ for (;;) {
+ __tlb_remove_tlb_entry(tlb, ptep, address);
+ if (--nr == 0)
+ break;
+ ptep++;
+ address += PAGE_SIZE;
+ }
+}
+
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
- if (_sz == PMD_SIZE) \
- tlb_flush_pmd_range(tlb, address, _sz); \
- else if (_sz == PUD_SIZE) \
+ if (_sz >= P4D_SIZE) \
+ tlb_flush_p4d_range(tlb, address, _sz); \
+ else if (_sz >= PUD_SIZE) \
tlb_flush_pud_range(tlb, address, _sz); \
+ else if (_sz >= PMD_SIZE) \
+ tlb_flush_pmd_range(tlb, address, _sz); \
+ else \
+ tlb_flush_pte_range(tlb, address, _sz); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -652,6 +734,20 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
} while (0)
#endif
+#ifndef pte_needs_flush
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return true;
+}
+#endif
+
+#ifndef huge_pmd_needs_flush
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return true;
+}
+#endif
+
#endif /* CONFIG_MMU */
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 5aa8705df87e..4dbe715be65b 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -45,7 +45,7 @@
#endif
#ifndef cpumask_of_node
- #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #ifdef CONFIG_NUMA
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else
#define cpumask_of_node(node) ((void)(node), cpu_online_mask)
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index ba68ee4dabfa..a5be9e61a2a2 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -8,100 +8,92 @@
* address space, e.g. all NOMMU machines.
*/
#include <linux/string.h>
+#include <asm-generic/access_ok.h>
#ifdef CONFIG_UACCESS_MEMCPY
-static inline __must_check unsigned long
-raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+#include <asm/unaligned.h>
+
+static __always_inline int
+__get_user_fn(size_t size, const void __user *from, void *to)
{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 *)to = *(u8 __force *)from;
- return 0;
- case 2:
- *(u16 *)to = *(u16 __force *)from;
- return 0;
- case 4:
- *(u32 *)to = *(u32 __force *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 *)to = *(u64 __force *)from;
- return 0;
-#endif
- }
+ BUILD_BUG_ON(!__builtin_constant_p(size));
+
+ switch (size) {
+ case 1:
+ *(u8 *)to = *((u8 __force *)from);
+ return 0;
+ case 2:
+ *(u16 *)to = get_unaligned((u16 __force *)from);
+ return 0;
+ case 4:
+ *(u32 *)to = get_unaligned((u32 __force *)from);
+ return 0;
+ case 8:
+ *(u64 *)to = get_unaligned((u64 __force *)from);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
}
- memcpy(to, (const void __force *)from, n);
- return 0;
}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
-static inline __must_check unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+static __always_inline int
+__put_user_fn(size_t size, void __user *to, void *from)
{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 __force *)to = *(u8 *)from;
- return 0;
- case 2:
- *(u16 __force *)to = *(u16 *)from;
- return 0;
- case 4:
- *(u32 __force *)to = *(u32 *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 __force *)to = *(u64 *)from;
- return 0;
-#endif
- default:
- break;
- }
- }
+ BUILD_BUG_ON(!__builtin_constant_p(size));
- memcpy((void __force *)to, from, n);
- return 0;
+ switch (size) {
+ case 1:
+ *(u8 __force *)to = *(u8 *)from;
+ return 0;
+ case 2:
+ put_unaligned(*(u16 *)from, (u16 __force *)to);
+ return 0;
+ case 4:
+ put_unaligned(*(u32 *)from, (u32 __force *)to);
+ return 0;
+ case 8:
+ put_unaligned(*(u64 *)from, (u64 __force *)to);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
+ }
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-#endif /* CONFIG_UACCESS_MEMCPY */
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ *((type *)dst) = get_unaligned((type *)(src)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
-#ifndef KERNEL_DS
-#define KERNEL_DS MAKE_MM_SEG(~0UL)
-#endif
-
-#ifndef USER_DS
-#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
-#endif
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ put_unaligned(*((type *)src), (type *)(dst)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
-#ifndef get_fs
-#define get_fs() (current_thread_info()->addr_limit)
-
-static inline void set_fs(mm_segment_t fs)
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
{
- current_thread_info()->addr_limit = fs;
+ memcpy(to, (const void __force *)from, n);
+ return 0;
}
-#endif
-
-#ifndef uaccess_kernel
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-#endif
-
-#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
-/*
- * The architecture should really override this if possible, at least
- * doing a check on the get_fs()
- */
-#ifndef __access_ok
-static inline int __access_ok(unsigned long addr, unsigned long size)
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return 1;
+ memcpy((void __force *)to, from, n);
+ return 0;
}
-#endif
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+#endif /* CONFIG_UACCESS_MEMCPY */
/*
* These are the main single-value transfer routines. They automatically
@@ -213,50 +205,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
extern int __get_user_bad(void) __attribute__((noreturn));
/*
- * Copy a null terminated string from userspace.
- */
-#ifndef __strncpy_from_user
-static inline long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
- char *tmp;
- strncpy(dst, (const char __force *)src, count);
- for (tmp = dst; *tmp && count > 0; tmp++, count--)
- ;
- return (tmp - dst);
-}
-#endif
-
-static inline long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- if (!access_ok(src, 1))
- return -EFAULT;
- return __strncpy_from_user(dst, src, count);
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-#ifndef __strnlen_user
-#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
-#endif
-
-/*
- * Unlike strnlen, strnlen_user includes the nul terminator in
- * its returned count. Callers should check for a returned value
- * greater than N as an indication the string is too long.
- */
-static inline long strnlen_user(const char __user *src, long n)
-{
- if (!access_ok(src, 1))
- return 0;
- return __strnlen_user(src, n);
-}
-
-/*
* Zero Userspace
*/
#ifndef __clear_user
@@ -280,4 +228,8 @@ clear_user(void __user *to, unsigned long n)
#include <asm/extable.h>
+__must_check long strncpy_from_user(char *dst, const char __user *src,
+ long count);
+__must_check long strnlen_user(const char __user *src, long n);
+
#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 374c940e9be1..a84c64e5f11e 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -6,31 +6,150 @@
* This is the most generic implementation of unaligned accesses
* and should work almost anywhere.
*/
+#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
-/* Set by the arch if it can handle unaligned accesses in hardware. */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/access_ok.h>
-#endif
-
-#if defined(__LITTLE_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return be64_to_cpu(__get_unaligned_t(__be64, p));
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_t(__be16, cpu_to_be16(val), p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_t(__be32, cpu_to_be32(val), p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_t(__be64, cpu_to_be64(val), p);
+}
+
+static inline u32 __get_unaligned_be24(const u8 *p)
+{
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+ return __get_unaligned_be24(p);
+}
+
+static inline u32 __get_unaligned_le24(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16;
+}
+
+static inline u32 get_unaligned_le24(const void *p)
+{
+ return __get_unaligned_le24(p);
+}
+
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
+{
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
+}
+
+static inline void put_unaligned_be24(const u32 val, void *p)
+{
+ __put_unaligned_be24(val, p);
+}
+
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
+{
+ *p++ = val & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = (val >> 16) & 0xff;
+}
+
+static inline void put_unaligned_le24(const u32 val, void *p)
+{
+ __put_unaligned_le24(val, p);
+}
+
+static inline void __put_unaligned_be48(const u64 val, u8 *p)
+{
+ *p++ = (val >> 40) & 0xff;
+ *p++ = (val >> 32) & 0xff;
+ *p++ = (val >> 24) & 0xff;
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
+}
+
+static inline void put_unaligned_be48(const u64 val, void *p)
+{
+ __put_unaligned_be48(val, p);
+}
+
+static inline u64 __get_unaligned_be48(const u8 *p)
+{
+ return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
+ p[3] << 16 | p[4] << 8 | p[5];
+}
+
+static inline u64 get_unaligned_be48(const void *p)
+{
+ return __get_unaligned_be48(p);
+}
#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 5430febd34be..f7749d0f2562 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -34,6 +34,7 @@
*
* STABS_DEBUG
* DWARF_DEBUG
+ * ELF_DETAILS
*
* DISCARDS // must be the last
* }
@@ -80,8 +81,8 @@
#define RO_EXCEPTION_TABLE
#endif
-/* Align . to a 8 byte boundary equals to maximum function alignment. */
-#define ALIGN_FUNCTION() . = ALIGN(8)
+/* Align . function alignment. */
+#define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT)
/*
* LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
@@ -89,15 +90,18 @@
* .data. We don't want to pull in .data..other sections, which Linux
* has defined. Same for text and bss.
*
+ * With LTO_CLANG, the linker also splits sections by default, so we need
+ * these macros to combine the sections during the final link.
+ *
* RODATA_MAIN is not used because existing code already defines .rodata.x
* sections to be brought in with rodata.
*/
-#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
-#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
-#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
+#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
#else
#define TEXT_MAIN .text
@@ -112,11 +116,7 @@
* GCC 4.5 and later have a 32 bytes section alignment for structures.
* Except GCC 4.9, that feels the need to align on 64 bytes.
*/
-#if __GNUC__ == 4 && __GNUC_MINOR__ == 9
-#define STRUCT_ALIGNMENT 64
-#else
#define STRUCT_ALIGNMENT 32
-#endif
#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
/*
@@ -126,25 +126,18 @@
*/
#define SCHED_DATA \
STRUCT_ALIGN(); \
- __begin_sched_classes = .; \
- *(__idle_sched_class) \
- *(__fair_sched_class) \
- *(__rt_sched_class) \
- *(__dl_sched_class) \
+ __sched_class_highest = .; \
*(__stop_sched_class) \
- __end_sched_classes = .;
+ *(__dl_sched_class) \
+ *(__rt_sched_class) \
+ *(__fair_sched_class) \
+ *(__idle_sched_class) \
+ __sched_class_lowest = .;
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
*/
-#ifdef CONFIG_HOTPLUG_CPU
-#define CPU_KEEP(sec) *(.cpu##sec)
-#define CPU_DISCARD(sec)
-#else
-#define CPU_KEEP(sec)
-#define CPU_DISCARD(sec) *(.cpu##sec)
-#endif
#if defined(CONFIG_MEMORY_HOTPLUG)
#define MEM_KEEP(sec) *(.mem##sec)
@@ -154,6 +147,24 @@
#define MEM_DISCARD(sec) *(.mem##sec)
#endif
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
+#define KEEP_PATCHABLE KEEP(*(__patchable_function_entries))
+#define PATCHABLE_DISCARDS
+#else
+#define KEEP_PATCHABLE
+#define PATCHABLE_DISCARDS *(__patchable_function_entries)
+#endif
+
+#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
+/*
+ * Simply points to ftrace_stub, but with the proper protocol.
+ * Defined by the linker script in linux/vmlinux.lds.h
+ */
+#define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub;
+#else
+#define FTRACE_STUB_HACK
+#endif
+
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/*
* The ftrace call sites are logged to a section whose name depends on the
@@ -161,118 +172,134 @@
* FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
* dependencies for FTRACE_CALLSITE_SECTION's definition.
*
- * Need to also make ftrace_stub_graph point to ftrace_stub
- * so that the same stub location may have different protocols
- * and not mess up with C verifiers.
+ * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
+ * as some archs will have a different prototype for that function
+ * but ftrace_ops_list_func() will have a single prototype.
*/
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
- KEEP(*(__patchable_function_entries)) \
+ KEEP_PATCHABLE \
__stop_mcount_loc = .; \
- ftrace_stub_graph = ftrace_stub;
+ FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
#else
# ifdef CONFIG_FUNCTION_TRACER
-# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub;
+# define MCOUNT_REC() FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
# else
# define MCOUNT_REC()
# endif
#endif
+#define BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) \
+ _BEGIN_##_label_ = .; \
+ KEEP(*(_sec_)) \
+ _END_##_label_ = .;
+
+#define BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) \
+ _label_##_BEGIN_ = .; \
+ KEEP(*(_sec_)) \
+ _label_##_END_ = .;
+
+#define BOUNDED_SECTION_BY(_sec_, _label_) \
+ BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop)
+
+#define BOUNDED_SECTION(_sec) BOUNDED_SECTION_BY(_sec, _sec)
+
+#define HEADERED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \
+ _HDR_##_label_ = .; \
+ KEEP(*(.gnu.linkonce.##_sec_)) \
+ BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_)
+
+#define HEADERED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \
+ _label_##_HDR_ = .; \
+ KEEP(*(.gnu.linkonce.##_sec_)) \
+ BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_)
+
+#define HEADERED_SECTION_BY(_sec_, _label_) \
+ HEADERED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop)
+
+#define HEADERED_SECTION(_sec) HEADERED_SECTION_BY(_sec, _sec)
+
#ifdef CONFIG_TRACE_BRANCH_PROFILING
-#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
- KEEP(*(_ftrace_annotated_branch)) \
- __stop_annotated_branch_profile = .;
+#define LIKELY_PROFILE() \
+ BOUNDED_SECTION_BY(_ftrace_annotated_branch, _annotated_branch_profile)
#else
#define LIKELY_PROFILE()
#endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
-#define BRANCH_PROFILE() __start_branch_profile = .; \
- KEEP(*(_ftrace_branch)) \
- __stop_branch_profile = .;
+#define BRANCH_PROFILE() \
+ BOUNDED_SECTION_BY(_ftrace_branch, _branch_profile)
#else
#define BRANCH_PROFILE()
#endif
#ifdef CONFIG_KPROBES
-#define KPROBE_BLACKLIST() . = ALIGN(8); \
- __start_kprobe_blacklist = .; \
- KEEP(*(_kprobe_blacklist)) \
- __stop_kprobe_blacklist = .;
+#define KPROBE_BLACKLIST() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION(_kprobe_blacklist)
#else
#define KPROBE_BLACKLIST()
#endif
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
-#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
- __start_error_injection_whitelist = .; \
- KEEP(*(_error_injection_whitelist)) \
- __stop_error_injection_whitelist = .;
+#define ERROR_INJECT_WHITELIST() \
+ STRUCT_ALIGN(); \
+ BOUNDED_SECTION(_error_injection_whitelist)
#else
#define ERROR_INJECT_WHITELIST()
#endif
#ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS() . = ALIGN(8); \
- __start_ftrace_events = .; \
- KEEP(*(_ftrace_events)) \
- __stop_ftrace_events = .; \
- __start_ftrace_eval_maps = .; \
- KEEP(*(_ftrace_eval_map)) \
- __stop_ftrace_eval_maps = .;
+#define FTRACE_EVENTS() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION(_ftrace_events) \
+ BOUNDED_SECTION_BY(_ftrace_eval_map, _ftrace_eval_maps)
#else
#define FTRACE_EVENTS()
#endif
#ifdef CONFIG_TRACING
-#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
- KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
- __stop___trace_bprintk_fmt = .;
-#define TRACEPOINT_STR() __start___tracepoint_str = .; \
- KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
- __stop___tracepoint_str = .;
+#define TRACE_PRINTKS() BOUNDED_SECTION_BY(__trace_printk_fmt, ___trace_bprintk_fmt)
+#define TRACEPOINT_STR() BOUNDED_SECTION_BY(__tracepoint_str, ___tracepoint_str)
#else
#define TRACE_PRINTKS()
#define TRACEPOINT_STR()
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() . = ALIGN(8); \
- __start_syscalls_metadata = .; \
- KEEP(*(__syscalls_metadata)) \
- __stop_syscalls_metadata = .;
+#define TRACE_SYSCALLS() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_BY(__syscalls_metadata, _syscalls_metadata)
#else
#define TRACE_SYSCALLS()
#endif
#ifdef CONFIG_BPF_EVENTS
-#define BPF_RAW_TP() STRUCT_ALIGN(); \
- __start__bpf_raw_tp = .; \
- KEEP(*(__bpf_raw_tp_map)) \
- __stop__bpf_raw_tp = .;
+#define BPF_RAW_TP() STRUCT_ALIGN(); \
+ BOUNDED_SECTION_BY(__bpf_raw_tp_map, __bpf_raw_tp)
#else
#define BPF_RAW_TP()
#endif
#ifdef CONFIG_SERIAL_EARLYCON
-#define EARLYCON_TABLE() . = ALIGN(8); \
- __earlycon_table = .; \
- KEEP(*(__earlycon_table)) \
- __earlycon_table_end = .;
+#define EARLYCON_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_POST_LABEL(__earlycon_table, __earlycon_table, , _end)
#else
#define EARLYCON_TABLE()
#endif
#ifdef CONFIG_SECURITY
-#define LSM_TABLE() . = ALIGN(8); \
- __start_lsm_info = .; \
- KEEP(*(.lsm_info.init)) \
- __end_lsm_info = .;
-#define EARLY_LSM_TABLE() . = ALIGN(8); \
- __start_early_lsm_info = .; \
- KEEP(*(.early_lsm_info.init)) \
- __end_early_lsm_info = .;
+#define LSM_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_PRE_LABEL(.lsm_info.init, _lsm_info, __start, __end)
+
+#define EARLY_LSM_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_PRE_LABEL(.early_lsm_info.init, _early_lsm_info, __start, __end)
#else
#define LSM_TABLE()
#define EARLY_LSM_TABLE()
@@ -298,9 +325,8 @@
#ifdef CONFIG_ACPI
#define ACPI_PROBE_TABLE(name) \
. = ALIGN(8); \
- __##name##_acpi_probe_table = .; \
- KEEP(*(__##name##_acpi_probe_table)) \
- __##name##_acpi_probe_table_end = .;
+ BOUNDED_SECTION_POST_LABEL(__##name##_acpi_probe_table, \
+ __##name##_acpi_probe_table,, _end)
#else
#define ACPI_PROBE_TABLE(name)
#endif
@@ -308,9 +334,8 @@
#ifdef CONFIG_THERMAL
#define THERMAL_TABLE(name) \
. = ALIGN(8); \
- __##name##_thermal_table = .; \
- KEEP(*(__##name##_thermal_table)) \
- __##name##_thermal_table_end = .;
+ BOUNDED_SECTION_POST_LABEL(__##name##_thermal_table, \
+ __##name##_thermal_table,, _end)
#else
#define THERMAL_TABLE(name)
#endif
@@ -327,10 +352,10 @@
#define DATA_DATA \
*(.xiptext) \
*(DATA_MAIN) \
+ *(.data..decrypted) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data*) \
- MEM_KEEP(exit.data*) \
*(.data.unlikely) \
__start_once = .; \
*(.data.once) \
@@ -339,14 +364,14 @@
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
- __start___dyndbg = .; \
- KEEP(*(__dyndbg)) \
- __stop___dyndbg = .; \
+ BOUNDED_SECTION_BY(__dyndbg_classes, ___dyndbg_classes) \
+ BOUNDED_SECTION_BY(__dyndbg, ___dyndbg) \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
BPF_RAW_TP() \
- TRACEPOINT_STR()
+ TRACEPOINT_STR() \
+ KUNIT_TABLE()
/*
* Data section helpers
@@ -384,9 +409,16 @@
#define JUMP_TABLE_DATA \
. = ALIGN(8); \
- __start___jump_table = .; \
- KEEP(*(__jump_table)) \
- __stop___jump_table = .;
+ BOUNDED_SECTION_BY(__jump_table, ___jump_table)
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+#define STATIC_CALL_DATA \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_BY(.static_call_sites, _static_call_sites) \
+ BOUNDED_SECTION_BY(.static_call_tramp_key, _static_call_tramp_key)
+#else
+#define STATIC_CALL_DATA
+#endif
/*
* Allow architectures to handle ro_after_init data on their
@@ -398,10 +430,25 @@
__start_ro_after_init = .; \
*(.data..ro_after_init) \
JUMP_TABLE_DATA \
+ STATIC_CALL_DATA \
__end_ro_after_init = .;
#endif
/*
+ * .kcfi_traps contains a list KCFI trap locations.
+ */
+#ifndef KCFI_TRAPS
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+#define KCFI_TRAPS \
+ __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \
+ BOUNDED_SECTION_BY(.kcfi_traps, ___kcfi_traps) \
+ }
+#else
+#define KCFI_TRAPS
+#endif
+#endif
+
+/*
* Read only Data
*/
#define RO_DATA(align) \
@@ -412,9 +459,7 @@
SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
- __start___tracepoints_ptrs = .; \
- KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
- __stop___tracepoints_ptrs = .; \
+ BOUNDED_SECTION_BY(__tracepoints_ptrs, ___tracepoints_ptrs) \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
@@ -424,41 +469,21 @@
\
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
- __start_pci_fixups_early = .; \
- KEEP(*(.pci_fixup_early)) \
- __end_pci_fixups_early = .; \
- __start_pci_fixups_header = .; \
- KEEP(*(.pci_fixup_header)) \
- __end_pci_fixups_header = .; \
- __start_pci_fixups_final = .; \
- KEEP(*(.pci_fixup_final)) \
- __end_pci_fixups_final = .; \
- __start_pci_fixups_enable = .; \
- KEEP(*(.pci_fixup_enable)) \
- __end_pci_fixups_enable = .; \
- __start_pci_fixups_resume = .; \
- KEEP(*(.pci_fixup_resume)) \
- __end_pci_fixups_resume = .; \
- __start_pci_fixups_resume_early = .; \
- KEEP(*(.pci_fixup_resume_early)) \
- __end_pci_fixups_resume_early = .; \
- __start_pci_fixups_suspend = .; \
- KEEP(*(.pci_fixup_suspend)) \
- __end_pci_fixups_suspend = .; \
- __start_pci_fixups_suspend_late = .; \
- KEEP(*(.pci_fixup_suspend_late)) \
- __end_pci_fixups_suspend_late = .; \
- } \
- \
- /* Built-in firmware blobs */ \
- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
- __start_builtin_fw = .; \
- KEEP(*(.builtin_fw)) \
- __end_builtin_fw = .; \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_early, _pci_fixups_early, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_header, _pci_fixups_header, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_final, _pci_fixups_final, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_enable, _pci_fixups_enable, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume, _pci_fixups_resume, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend, _pci_fixups_suspend, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume_early, _pci_fixups_resume_early, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend_late, _pci_fixups_suspend_late, __start, __end) \
} \
\
+ FW_LOADER_BUILT_IN_DATA \
TRACEDATA \
\
+ PRINTK_INDEX \
+ \
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
__start___ksymtab = .; \
@@ -473,27 +498,6 @@
__stop___ksymtab_gpl = .; \
} \
\
- /* Kernel symbol table: Normal unused symbols */ \
- __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
- __start___ksymtab_unused = .; \
- KEEP(*(SORT(___ksymtab_unused+*))) \
- __stop___ksymtab_unused = .; \
- } \
- \
- /* Kernel symbol table: GPL-only unused symbols */ \
- __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
- __start___ksymtab_unused_gpl = .; \
- KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
- __stop___ksymtab_unused_gpl = .; \
- } \
- \
- /* Kernel symbol table: GPL-future-only symbols */ \
- __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
- __start___ksymtab_gpl_future = .; \
- KEEP(*(SORT(___ksymtab_gpl_future+*))) \
- __stop___ksymtab_gpl_future = .; \
- } \
- \
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
__start___kcrctab = .; \
@@ -508,27 +512,6 @@
__stop___kcrctab_gpl = .; \
} \
\
- /* Kernel symbol table: Normal unused symbols */ \
- __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
- __start___kcrctab_unused = .; \
- KEEP(*(SORT(___kcrctab_unused+*))) \
- __stop___kcrctab_unused = .; \
- } \
- \
- /* Kernel symbol table: GPL-only unused symbols */ \
- __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
- __start___kcrctab_unused_gpl = .; \
- KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
- __stop___kcrctab_unused_gpl = .; \
- } \
- \
- /* Kernel symbol table: GPL-future-only symbols */ \
- __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
- __start___kcrctab_gpl_future = .; \
- KEEP(*(SORT(___kcrctab_gpl_future+*))) \
- __stop___kcrctab_gpl_future = .; \
- } \
- \
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
*(__ksymtab_strings) \
@@ -538,23 +521,20 @@
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
*(.ref.rodata) \
MEM_KEEP(init.rodata) \
- MEM_KEEP(exit.rodata) \
} \
\
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
- __start___param = .; \
- KEEP(*(__param)) \
- __stop___param = .; \
+ BOUNDED_SECTION_BY(__param, ___param) \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
- __start___modver = .; \
- KEEP(*(__modver)) \
- __stop___modver = .; \
+ BOUNDED_SECTION_BY(__modver, ___modver) \
} \
\
+ KCFI_TRAPS \
+ \
RO_EXCEPTION_TABLE \
NOTES \
BTF \
@@ -562,6 +542,7 @@
. = ALIGN((align)); \
__end_rodata = .;
+
/*
* Non-instrumentable text section
*/
@@ -569,6 +550,9 @@
ALIGN_FUNCTION(); \
__noinstr_text_start = .; \
*(.noinstr.text) \
+ __cpuidle_text_start = .; \
+ *(.cpuidle.text) \
+ __cpuidle_text_end = .; \
__noinstr_text_end = .;
/*
@@ -581,12 +565,14 @@
*/
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
+ *(.text.hot .text.hot.*) \
+ *(TEXT_MAIN .text.fixup) \
+ *(.text.unlikely .text.unlikely.*) \
+ *(.text.unknown .text.unknown.*) \
NOINSTR_TEXT \
- *(.text..refcount) \
*(.ref.text) \
+ *(.text.asan.* .text.tsan.*) \
MEM_KEEP(init.text*) \
- MEM_KEEP(exit.text*) \
/* sched.text is aling to function alignment to secure we have same
@@ -605,12 +591,6 @@
*(.spinlock.text) \
__lock_text_end = .;
-#define CPUIDLE_TEXT \
- ALIGN_FUNCTION(); \
- __cpuidle_text_start = .; \
- *(.cpuidle.text) \
- __cpuidle_text_end = .;
-
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
__kprobes_text_start = .; \
@@ -635,6 +615,12 @@
*(.softirqentry.text) \
__softirqentry_text_end = .;
+#define STATIC_CALL_TEXT \
+ ALIGN_FUNCTION(); \
+ __static_call_text_start = .; \
+ *(.static_call.text) \
+ __static_call_text_end = .;
+
/* Section used for early init (in .S files) */
#define HEAD_TEXT KEEP(*(.head.text))
@@ -649,9 +635,7 @@
#define EXCEPTION_TABLE(align) \
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
- __start___ex_table = .; \
- KEEP(*(__ex_table)) \
- __stop___ex_table = .; \
+ BOUNDED_SECTION_BY(__ex_table, ___ex_table) \
}
/*
@@ -660,9 +644,7 @@
#ifdef CONFIG_DEBUG_INFO_BTF
#define BTF \
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
- __start_BTF = .; \
- *(.BTF) \
- __stop_BTF = .; \
+ BOUNDED_SECTION_BY(.BTF, _BTF) \
} \
. = ALIGN(4); \
.BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
@@ -684,6 +666,7 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
__ctors_start = .; \
+ KEEP(*(SORT(.ctors.*))) \
KEEP(*(.ctors)) \
KEEP(*(SORT(.init_array.*))) \
KEEP(*(.init_array)) \
@@ -695,7 +678,7 @@
/* init and exit section handling */
#define INIT_DATA \
KEEP(*(SORT(___kentry+*))) \
- *(.init.data init.data.*) \
+ *(.init.data .init.data.*) \
MEM_DISCARD(init.data*) \
KERNEL_CTORS() \
MCOUNT_REC() \
@@ -717,7 +700,8 @@
THERMAL_TABLE(governor) \
EARLYCON_TABLE() \
LSM_TABLE() \
- EARLY_LSM_TABLE()
+ EARLY_LSM_TABLE() \
+ KUNIT_INIT_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
@@ -728,13 +712,10 @@
*(.exit.data .exit.data.*) \
*(.fini_array .fini_array.*) \
*(.dtors .dtors.*) \
- MEM_DISCARD(exit.data*) \
- MEM_DISCARD(exit.rodata*)
#define EXIT_TEXT \
*(.exit.text) \
*(.text.exit) \
- MEM_DISCARD(exit.text)
#define EXIT_CALL \
*(.exitcall.exit)
@@ -809,26 +790,35 @@
/* DWARF 4 */ \
.debug_types 0 : { *(.debug_types) } \
/* DWARF 5 */ \
+ .debug_addr 0 : { *(.debug_addr) } \
+ .debug_line_str 0 : { *(.debug_line_str) } \
+ .debug_loclists 0 : { *(.debug_loclists) } \
.debug_macro 0 : { *(.debug_macro) } \
- .debug_addr 0 : { *(.debug_addr) }
+ .debug_names 0 : { *(.debug_names) } \
+ .debug_rnglists 0 : { *(.debug_rnglists) } \
+ .debug_str_offsets 0 : { *(.debug_str_offsets) }
- /* Stabs debugging sections. */
+/* Stabs debugging sections. */
#define STABS_DEBUG \
.stab 0 : { *(.stab) } \
.stabstr 0 : { *(.stabstr) } \
.stab.excl 0 : { *(.stab.excl) } \
.stab.exclstr 0 : { *(.stab.exclstr) } \
.stab.index 0 : { *(.stab.index) } \
- .stab.indexstr 0 : { *(.stab.indexstr) } \
- .comment 0 : { *(.comment) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+
+/* Required sections not related to debugging. */
+#define ELF_DETAILS \
+ .comment 0 : { *(.comment) } \
+ .symtab 0 : { *(.symtab) } \
+ .strtab 0 : { *(.strtab) } \
+ .shstrtab 0 : { *(.shstrtab) }
#ifdef CONFIG_GENERIC_BUG
#define BUG_TABLE \
. = ALIGN(8); \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
- __start___bug_table = .; \
- KEEP(*(__bug_table)) \
- __stop___bug_table = .; \
+ BOUNDED_SECTION_BY(__bug_table, ___bug_table) \
}
#else
#define BUG_TABLE
@@ -836,22 +826,22 @@
#ifdef CONFIG_UNWINDER_ORC
#define ORC_UNWIND_TABLE \
+ .orc_header : AT(ADDR(.orc_header) - LOAD_OFFSET) { \
+ BOUNDED_SECTION_BY(.orc_header, _orc_header) \
+ } \
. = ALIGN(4); \
.orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
- __start_orc_unwind_ip = .; \
- KEEP(*(.orc_unwind_ip)) \
- __stop_orc_unwind_ip = .; \
+ BOUNDED_SECTION_BY(.orc_unwind_ip, _orc_unwind_ip) \
} \
. = ALIGN(2); \
.orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
- __start_orc_unwind = .; \
- KEEP(*(.orc_unwind)) \
- __stop_orc_unwind = .; \
+ BOUNDED_SECTION_BY(.orc_unwind, _orc_unwind) \
} \
+ text_size = _etext - _stext; \
. = ALIGN(4); \
.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
orc_lookup = .; \
- . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
+ . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \
LOOKUP_BLOCK_SIZE) + 1) * 4; \
orc_lookup_end = .; \
}
@@ -859,31 +849,56 @@
#define ORC_UNWIND_TABLE
#endif
+/* Built-in firmware blobs */
+#ifdef CONFIG_FW_LOADER
+#define FW_LOADER_BUILT_IN_DATA \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
+ BOUNDED_SECTION_PRE_LABEL(.builtin_fw, _builtin_fw, __start, __end) \
+ }
+#else
+#define FW_LOADER_BUILT_IN_DATA
+#endif
+
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
- __tracedata_start = .; \
- KEEP(*(.tracedata)) \
- __tracedata_end = .; \
+ BOUNDED_SECTION_POST_LABEL(.tracedata, __tracedata, _start, _end) \
}
#else
#define TRACEDATA
#endif
+#ifdef CONFIG_PRINTK_INDEX
+#define PRINTK_INDEX \
+ .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \
+ BOUNDED_SECTION_BY(.printk_index, _printk_index) \
+ }
+#else
+#define PRINTK_INDEX
+#endif
+
+/*
+ * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
+ * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
+ *
+ * Also, discard .note.gnu.property, otherwise it forces the notes section to
+ * be 8-byte aligned which causes alignment mismatches with the kernel's custom
+ * 4-byte aligned notes.
+ */
#define NOTES \
+ /DISCARD/ : { \
+ *(.note.GNU-stack) \
+ *(.note.gnu.property) \
+ } \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
- __start_notes = .; \
- KEEP(*(.note.*)) \
- __stop_notes = .; \
+ BOUNDED_SECTION_BY(.note.*, _notes) \
} NOTES_HEADERS \
NOTES_HEADERS_RESTORE
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
- __setup_start = .; \
- KEEP(*(.init.setup)) \
- __setup_end = .;
+ BOUNDED_SECTION_POST_LABEL(.init.setup, __setup, _start, _end)
#define INIT_CALLS_LEVEL(level) \
__initcall##level##_start = .; \
@@ -905,9 +920,18 @@
__initcall_end = .;
#define CON_INITCALL \
- __con_initcall_start = .; \
- KEEP(*(.con_initcall.init)) \
- __con_initcall_end = .;
+ BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end)
+
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end)
+
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_INIT_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_POST_LABEL(.kunit_init_test_suites, \
+ __kunit_init_suites, _start, _end)
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
@@ -955,13 +979,46 @@
EXIT_DATA
#endif
+/*
+ * Clang's -fprofile-arcs, -fsanitize=kernel-address, and
+ * -fsanitize=thread produce unwanted sections (.eh_frame
+ * and .init_array.*), but CONFIG_CONSTRUCTORS wants to
+ * keep any .init_array.* sections.
+ * https://llvm.org/pr46478
+ */
+#ifdef CONFIG_UNWIND_TABLES
+#define DISCARD_EH_FRAME
+#else
+#define DISCARD_EH_FRAME *(.eh_frame)
+#endif
+#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
+# ifdef CONFIG_CONSTRUCTORS
+# define SANITIZER_DISCARDS \
+ DISCARD_EH_FRAME
+# else
+# define SANITIZER_DISCARDS \
+ *(.init_array) *(.init_array.*) \
+ DISCARD_EH_FRAME
+# endif
+#else
+# define SANITIZER_DISCARDS
+#endif
+
+#define COMMON_DISCARDS \
+ SANITIZER_DISCARDS \
+ PATCHABLE_DISCARDS \
+ *(.discard) \
+ *(.discard.*) \
+ *(.export_symbol) \
+ *(.modinfo) \
+ /* ld.bfd warns about .gnu.version* even when not emitted */ \
+ *(.gnu.version*) \
+
#define DISCARDS \
/DISCARD/ : { \
EXIT_DISCARDS \
EXIT_CALL \
- *(.discard) \
- *(.discard.*) \
- *(.modinfo) \
+ COMMON_DISCARDS \
}
/**
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 20c93f08c993..ef3f841c6625 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -2,7 +2,8 @@
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
#include <asm/byteorder.h>
#ifdef __BIG_ENDIAN
@@ -38,7 +39,7 @@ static inline long find_zero(unsigned long mask)
return (mask >> 8) ? byte : byte + 1;
}
-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
{
unsigned long rhs = val | c->low_bits;
*data = rhs;
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
index b62a2a56a4d4..44509d48fca2 100644
--- a/include/asm-generic/xor.h
+++ b/include/asm-generic/xor.h
@@ -8,7 +8,8 @@
#include <linux/prefetch.h>
static void
-xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -27,8 +28,9 @@ xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -48,8 +50,10 @@ xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -70,8 +74,11 @@ xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -93,7 +100,8 @@ xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -129,8 +137,9 @@ xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -175,8 +184,10 @@ xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -230,8 +241,11 @@ xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -294,7 +308,8 @@ xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -320,8 +335,9 @@ xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -350,8 +366,10 @@ xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -384,8 +402,11 @@ xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -421,7 +442,8 @@ xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -466,8 +488,9 @@ xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -523,8 +546,10 @@ xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -591,8 +616,11 @@ xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 1d68d5613dae..cbbc9a6dc571 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -21,10 +21,11 @@
#define CNTHCTL_EVNTEN (1 << 2)
#define CNTHCTL_EVNTDIR (1 << 3)
#define CNTHCTL_EVNTI (0xF << 4)
+#define CNTHCTL_ECV (1 << 12)
enum arch_timer_reg {
ARCH_TIMER_REG_CTRL,
- ARCH_TIMER_REG_TVAL,
+ ARCH_TIMER_REG_CVAL,
};
enum arch_timer_ppi_nr {
@@ -32,6 +33,7 @@ enum arch_timer_ppi_nr {
ARCH_TIMER_PHYS_NONSECURE_PPI,
ARCH_TIMER_VIRT_PPI,
ARCH_TIMER_HYP_PPI,
+ ARCH_TIMER_HYP_VIRT_PPI,
ARCH_TIMER_MAX_TIMER_PPI
};
@@ -55,6 +57,7 @@ enum arch_timer_spi_nr {
#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT)
#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
+#define ARCH_TIMER_EVT_INTERVAL_SCALE (1 << 17) /* EVNTIS in the ARMv8 ARM */
#define ARCH_TIMER_EVT_STREAM_PERIOD_US 100
#define ARCH_TIMER_EVT_STREAM_FREQ \
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index 34eef083c988..6cdc873ac907 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -15,28 +15,32 @@
#include <linux/clocksource.h>
#include <linux/math64.h>
-#include <asm/mshyperv.h>
+#include <asm/hyperv-tlfs.h>
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
#define HV_MIN_DELTA_TICKS 1
+#ifdef CONFIG_HYPERV_TIMER
+
+#include <asm/hyperv_timer.h>
+
/* Routines called by the VMbus driver */
-extern int hv_stimer_alloc(void);
-extern void hv_stimer_free(void);
+extern int hv_stimer_alloc(bool have_percpu_irqs);
extern int hv_stimer_cleanup(unsigned int cpu);
extern void hv_stimer_legacy_init(unsigned int cpu, int sint);
extern void hv_stimer_legacy_cleanup(unsigned int cpu);
extern void hv_stimer_global_cleanup(void);
extern void hv_stimer0_isr(void);
-#ifdef CONFIG_HYPERV_TIMER
-extern u64 (*hv_read_reference_counter)(void);
extern void hv_init_clocksource(void);
+extern void hv_remap_tsc_clocksource(void);
+extern unsigned long hv_get_tsc_pfn(void);
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
-static inline notrace u64
-hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
+static __always_inline bool
+hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
+ u64 *cur_tsc, u64 *time)
{
u64 scale, offset;
u32 sequence;
@@ -60,7 +64,7 @@ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
do {
sequence = READ_ONCE(tsc_pg->tsc_sequence);
if (!sequence)
- return U64_MAX;
+ return false;
/*
* Make sure we read sequence before we read other values from
* TSC page.
@@ -79,28 +83,33 @@ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
} while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
- return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
+ *time = mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
+ return true;
}
-static inline notrace u64
-hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
+#else /* CONFIG_HYPERV_TIMER */
+static inline unsigned long hv_get_tsc_pfn(void)
{
- u64 cur_tsc;
-
- return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
+ return 0;
}
-#else /* CONFIG_HYPERV_TIMER */
static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
return NULL;
}
-static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
- u64 *cur_tsc)
+static __always_inline bool
+hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc, u64 *time)
{
- return U64_MAX;
+ return false;
}
+
+static inline int hv_stimer_cleanup(unsigned int cpu) { return 0; }
+static inline void hv_stimer_legacy_init(unsigned int cpu, int sint) {}
+static inline void hv_stimer_legacy_cleanup(unsigned int cpu) {}
+static inline void hv_stimer_global_cleanup(void) {}
+static inline void hv_stimer0_isr(void) {}
+
#endif /* CONFIG_HYPERV_TIMER */
#endif
diff --git a/include/clocksource/samsung_pwm.h b/include/clocksource/samsung_pwm.h
index c395238d0922..9b435caa95fe 100644
--- a/include/clocksource/samsung_pwm.h
+++ b/include/clocksource/samsung_pwm.h
@@ -27,6 +27,7 @@ struct samsung_pwm_variant {
};
void samsung_pwm_clocksource_init(void __iomem *base,
- unsigned int *irqs, struct samsung_pwm_variant *variant);
+ unsigned int *irqs,
+ const struct samsung_pwm_variant *variant);
#endif /* __CLOCKSOURCE_SAMSUNG_PWM_H */
diff --git a/include/clocksource/timer-goldfish.h b/include/clocksource/timer-goldfish.h
new file mode 100644
index 000000000000..05a3a4f610d6
--- /dev/null
+++ b/include/clocksource/timer-goldfish.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * goldfish-timer clocksource
+ * Registers definition for the goldfish-timer device
+ */
+
+#ifndef _CLOCKSOURCE_TIMER_GOLDFISH_H
+#define _CLOCKSOURCE_TIMER_GOLDFISH_H
+
+/*
+ * TIMER_TIME_LOW get low bits of current time and update TIMER_TIME_HIGH
+ * TIMER_TIME_HIGH get high bits of time at last TIMER_TIME_LOW read
+ * TIMER_ALARM_LOW set low bits of alarm and activate it
+ * TIMER_ALARM_HIGH set high bits of next alarm
+ * TIMER_IRQ_ENABLED enable alarm interrupt
+ * TIMER_CLEAR_ALARM disarm an existing alarm
+ * TIMER_ALARM_STATUS alarm status (running or not)
+ * TIMER_CLEAR_INTERRUPT clear interrupt
+ */
+#define TIMER_TIME_LOW 0x00
+#define TIMER_TIME_HIGH 0x04
+#define TIMER_ALARM_LOW 0x08
+#define TIMER_ALARM_HIGH 0x0c
+#define TIMER_IRQ_ENABLED 0x10
+#define TIMER_CLEAR_ALARM 0x14
+#define TIMER_ALARM_STATUS 0x18
+#define TIMER_CLEAR_INTERRUPT 0x1c
+
+extern int goldfish_timer_init(int irq, void __iomem *base);
+
+#endif /* _CLOCKSOURCE_TIMER_GOLDFISH_H */
diff --git a/include/clocksource/timer-riscv.h b/include/clocksource/timer-riscv.h
new file mode 100644
index 000000000000..d7f455754e60
--- /dev/null
+++ b/include/clocksource/timer-riscv.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __TIMER_RISCV_H
+#define __TIMER_RISCV_H
+
+#include <linux/types.h>
+
+extern void riscv_cs_get_mult_shift(u32 *mult, u32 *shift);
+
+#endif
diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h
deleted file mode 100644
index a5b41f31a1c2..000000000000
--- a/include/clocksource/timer-sp804.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CLKSOURCE_TIMER_SP804_H
-#define __CLKSOURCE_TIMER_SP804_H
-
-struct clk;
-
-int __sp804_clocksource_and_sched_clock_init(void __iomem *,
- const char *, struct clk *, int);
-int __sp804_clockevents_init(void __iomem *, unsigned int,
- struct clk *, const char *);
-void sp804_timer_disable(void __iomem *);
-
-static inline void sp804_clocksource_init(void __iomem *base, const char *name)
-{
- __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0);
-}
-
-static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base,
- const char *name)
-{
- __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1);
-}
-
-static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name)
-{
- __sp804_clockevents_init(base, irq, NULL, name);
-
-}
-#endif
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
index 4c61dade8835..dcc1712f75e7 100644
--- a/include/clocksource/timer-ti-dm.h
+++ b/include/clocksource/timer-ti-dm.h
@@ -52,10 +52,6 @@
#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01
#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
-/* posted mode types */
-#define OMAP_TIMER_NONPOSTED 0x00
-#define OMAP_TIMER_POSTED 0x01
-
/* timer capabilities used in hwmod database */
#define OMAP_TIMER_SECURE 0x80000000
#define OMAP_TIMER_ALWON 0x40000000
@@ -63,72 +59,11 @@
#define OMAP_TIMER_NEEDS_RESET 0x10000000
#define OMAP_TIMER_HAS_DSP_IRQ 0x08000000
-/*
- * timer errata flags
- *
- * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
- * errata prevents us from using posted mode on these devices, unless the
- * timer counter register is never read. For more details please refer to
- * the OMAP3/4/5 errata documents.
- */
-#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
-
-struct timer_regs {
- u32 tidr;
- u32 tier;
- u32 twer;
- u32 tclr;
- u32 tcrr;
- u32 tldr;
- u32 ttrg;
- u32 twps;
- u32 tmar;
- u32 tcar1;
- u32 tsicr;
- u32 tcar2;
- u32 tpir;
- u32 tnir;
- u32 tcvr;
- u32 tocr;
- u32 towr;
-};
-
struct omap_dm_timer {
- int id;
- int irq;
- struct clk *fclk;
-
- void __iomem *io_base;
- void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */
- void __iomem *irq_ena; /* irq enable */
- void __iomem *irq_dis; /* irq disable, only on v2 ip */
- void __iomem *pend; /* write pending */
- void __iomem *func_base; /* function register base */
-
- atomic_t enabled;
- unsigned long rate;
- unsigned reserved:1;
- unsigned posted:1;
- struct timer_regs context;
- int revision;
- u32 capability;
- u32 errata;
- struct platform_device *pdev;
- struct list_head node;
- struct notifier_block nb;
};
-int omap_dm_timer_reserve_systimer(int id);
-struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap);
-
-int omap_dm_timer_get_irq(struct omap_dm_timer *timer);
-
u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
-int omap_dm_timer_trigger(struct omap_dm_timer *timer);
-
-int omap_dm_timers_active(void);
-
/*
* Do not use the defines below, they are not needed. They should be only
* used by dmtimer.c and sys_timer related code.
@@ -198,196 +133,4 @@ int omap_dm_timers_active(void);
#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
-/* register offsets with the write pending bit encoded */
-#define WPSHIFT 16
-
-#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
- | (WP_TCLR << WPSHIFT))
-
-#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
- | (WP_TCRR << WPSHIFT))
-
-#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
- | (WP_TLDR << WPSHIFT))
-
-#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
- | (WP_TTGR << WPSHIFT))
-
-#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
- | (WP_TMAR << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
- | (WP_TPIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
- | (WP_TNIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
- | (WP_TCVR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
- (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
- (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
-
-/*
- * The below are inlined to optimize code size for system timers. Other code
- * should not need these at all.
- */
-#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS)
-static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
- int posted)
-{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
- cpu_relax();
-
- return readl_relaxed(timer->func_base + (reg & 0xff));
-}
-
-static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
- u32 reg, u32 val, int posted)
-{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
- cpu_relax();
-
- writel_relaxed(val, timer->func_base + (reg & 0xff));
-}
-
-static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
-{
- u32 tidr;
-
- /* Assume v1 ip if bits [31:16] are zero */
- tidr = readl_relaxed(timer->io_base);
- if (!(tidr >> 16)) {
- timer->revision = 1;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
- timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
- timer->func_base = timer->io_base;
- } else {
- timer->revision = 2;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
- timer->pend = timer->io_base +
- _OMAP_TIMER_WRITE_PEND_OFFSET +
- OMAP_TIMER_V2_FUNC_OFFSET;
- timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
- }
-}
-
-/*
- * __omap_dm_timer_enable_posted - enables write posted mode
- * @timer: pointer to timer instance handle
- *
- * Enables the write posted mode for the timer. When posted mode is enabled
- * writes to certain timer registers are immediately acknowledged by the
- * internal bus and hence prevents stalling the CPU waiting for the write to
- * complete. Enabling this feature can improve performance for writing to the
- * timer registers.
- */
-static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
-{
- if (timer->posted)
- return;
-
- if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
- timer->posted = OMAP_TIMER_NONPOSTED;
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
- return;
- }
-
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
- OMAP_TIMER_CTRL_POSTED, 0);
- timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
- timer->posted = OMAP_TIMER_POSTED;
-}
-
-/**
- * __omap_dm_timer_override_errata - override errata flags for a timer
- * @timer: pointer to timer handle
- * @errata: errata flags to be ignored
- *
- * For a given timer, override a timer errata by clearing the flags
- * specified by the errata argument. A specific erratum should only be
- * overridden for a timer if the timer is used in such a way the erratum
- * has no impact.
- */
-static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer,
- u32 errata)
-{
- timer->errata &= ~errata;
-}
-
-static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
- int posted, unsigned long rate)
-{
- u32 l;
-
- l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
- if (l & OMAP_TIMER_CTRL_ST) {
- l &= ~0x1;
- __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
-#ifdef CONFIG_ARCH_OMAP2PLUS
- /* Readback to make sure write has completed */
- __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
- /*
- * Wait for functional clock period x 3.5 to make sure that
- * timer is stopped
- */
- udelay(3500000 / rate + 1);
-#endif
- }
-
- /* Ack possibly pending interrupt */
- writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
-}
-
-static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer,
- u32 ctrl, unsigned int load,
- int posted)
-{
- __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted);
- __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted);
-}
-
-static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
- unsigned int value)
-{
- writel_relaxed(value, timer->irq_ena);
- __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
-}
-
-static inline unsigned int
-__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
-{
- return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
-}
-
-static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
- unsigned int value)
-{
- writel_relaxed(value, timer->irq_stat);
-}
-#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */
#endif /* __CLOCKSOURCE_DMTIMER_H */
diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
new file mode 100644
index 000000000000..c0f56fe6d22a
--- /dev/null
+++ b/include/clocksource/timer-xilinx.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2021 Sean Anderson <sean.anderson@seco.com>
+ */
+
+#ifndef XILINX_TIMER_H
+#define XILINX_TIMER_H
+
+#include <linux/compiler.h>
+
+#define TCSR0 0x00
+#define TLR0 0x04
+#define TCR0 0x08
+#define TCSR1 0x10
+#define TLR1 0x14
+#define TCR1 0x18
+
+#define TCSR_MDT BIT(0)
+#define TCSR_UDT BIT(1)
+#define TCSR_GENT BIT(2)
+#define TCSR_CAPT BIT(3)
+#define TCSR_ARHT BIT(4)
+#define TCSR_LOAD BIT(5)
+#define TCSR_ENIT BIT(6)
+#define TCSR_ENT BIT(7)
+#define TCSR_TINT BIT(8)
+#define TCSR_PWMA BIT(9)
+#define TCSR_ENALL BIT(10)
+#define TCSR_CASC BIT(11)
+
+struct clk;
+struct device_node;
+struct regmap;
+
+/**
+ * struct xilinx_timer_priv - Private data for Xilinx AXI timer drivers
+ * @map: Regmap of the device, possibly with an offset
+ * @clk: Parent clock
+ * @max: Maximum value of the counters
+ */
+struct xilinx_timer_priv {
+ struct regmap *map;
+ struct clk *clk;
+ u32 max;
+};
+
+/**
+ * xilinx_timer_tlr_cycles() - Calculate the TLR for a period specified
+ * in clock cycles
+ * @priv: The timer's private data
+ * @tcsr: The value of the TCSR register for this counter
+ * @cycles: The number of cycles in this period
+ *
+ * Callers of this function MUST ensure that @cycles is representable as
+ * a TLR.
+ *
+ * Return: The calculated value for TLR
+ */
+u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr,
+ u64 cycles);
+
+/**
+ * xilinx_timer_get_period() - Get the current period of a counter
+ * @priv: The timer's private data
+ * @tlr: The value of TLR for this counter
+ * @tcsr: The value of TCSR for this counter
+ *
+ * Return: The period, in ns
+ */
+unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv,
+ u32 tlr, u32 tcsr);
+
+#endif /* XILINX_TIMER_H */
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index fcde59c65a81..80e243611fe2 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -8,9 +8,13 @@
*/
#ifndef _CRYPTO_ACOMP_H
#define _CRYPTO_ACOMP_H
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
+#define CRYPTO_ACOMP_DST_MAX 131072
/**
* struct acomp_req - asynchronous (de)compression request
@@ -52,37 +56,35 @@ struct crypto_acomp {
struct crypto_tfm base;
};
-/**
- * struct acomp_alg - asynchronous compression algorithm
- *
- * @compress: Function performs a compress operation
- * @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the algorithm
- * @init: Initialize the cryptographic transformation object.
- * This function is used to initialize the cryptographic
- * transformation object. This function is called only once at
- * the instantiation time, right after the transformation context
- * was allocated. In case the cryptographic hardware has some
- * special requirements which need to be handled by software, this
- * function shall check for the precise requirement of the
- * transformation and put any software fallbacks in place.
- * @exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @init, used to remove various changes set in
- * @init.
- *
- * @reqsize: Context size for (de)compression requests
- * @base: Common crypto API algorithm data structure
+/*
+ * struct crypto_istat_compress - statistics for compress algorithm
+ * @compress_cnt: number of compress requests
+ * @compress_tlen: total data size handled by compress requests
+ * @decompress_cnt: number of decompress requests
+ * @decompress_tlen: total data size handled by decompress requests
+ * @err_cnt: number of error for compress requests
*/
-struct acomp_alg {
- int (*compress)(struct acomp_req *req);
- int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
- int (*init)(struct crypto_acomp *tfm);
- void (*exit)(struct crypto_acomp *tfm);
- unsigned int reqsize;
- struct crypto_alg base;
+struct crypto_istat_compress {
+ atomic64_t compress_cnt;
+ atomic64_t compress_tlen;
+ atomic64_t decompress_cnt;
+ atomic64_t decompress_tlen;
+ atomic64_t err_cnt;
};
+#ifdef CONFIG_CRYPTO_STATS
+#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat;
+#else
+#define COMP_ALG_COMMON_STATS
+#endif
+
+#define COMP_ALG_COMMON { \
+ COMP_ALG_COMMON_STATS \
+ \
+ struct crypto_alg base; \
+}
+struct comp_alg_common COMP_ALG_COMMON;
+
/**
* DOC: Asynchronous Compression API
*
@@ -130,9 +132,10 @@ static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
return &tfm->base;
}
-static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+static inline struct comp_alg_common *__crypto_comp_alg_common(
+ struct crypto_alg *alg)
{
- return container_of(alg, struct acomp_alg, base);
+ return container_of(alg, struct comp_alg_common, base);
}
static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
@@ -140,9 +143,10 @@ static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_acomp, base);
}
-static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+static inline struct comp_alg_common *crypto_comp_alg_common(
+ struct crypto_acomp *tfm)
{
- return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+ return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
}
static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
@@ -156,6 +160,12 @@ static inline void acomp_request_set_tfm(struct acomp_req *req,
req->base.tfm = crypto_acomp_tfm(tfm);
}
+static inline bool acomp_is_async(struct crypto_acomp *tfm)
+{
+ return crypto_comp_alg_common(tfm)->base.cra_flags &
+ CRYPTO_ALG_ASYNC;
+}
+
static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
{
return __crypto_acomp_tfm(req->base.tfm);
@@ -165,6 +175,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
* crypto_free_acomp() -- free ACOMPRESS tfm handle
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_acomp(struct crypto_acomp *tfm)
{
@@ -216,7 +228,8 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
{
req->base.complete = cmpl;
req->base.data = data;
- req->base.flags = flgs;
+ req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT;
+ req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT;
}
/**
@@ -243,10 +256,32 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->slen = slen;
req->dlen = dlen;
+ req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
if (!req->dst)
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
+static inline struct crypto_istat_compress *comp_get_stat(
+ struct comp_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&comp_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/**
* crypto_acomp_compress() -- Invoke asynchronous compress operation
*
@@ -259,14 +294,18 @@ static inline void acomp_request_set_params(struct acomp_req *req,
static inline int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int slen = req->slen;
- int ret;
-
- crypto_stats_get(alg);
- ret = tfm->compress(req);
- crypto_stats_compress(slen, ret, alg);
- return ret;
+ struct comp_alg_common *alg;
+
+ alg = crypto_comp_alg_common(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_compress *istat = comp_get_stat(alg);
+
+ atomic64_inc(&istat->compress_cnt);
+ atomic64_add(req->slen, &istat->compress_tlen);
+ }
+
+ return crypto_comp_errstat(alg, tfm->compress(req));
}
/**
@@ -281,14 +320,18 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int slen = req->slen;
- int ret;
-
- crypto_stats_get(alg);
- ret = tfm->decompress(req);
- crypto_stats_decompress(slen, ret, alg);
- return ret;
+ struct comp_alg_common *alg;
+
+ alg = crypto_comp_alg_common(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_compress *istat = comp_get_stat(alg);
+
+ atomic64_inc(&istat->decompress_cnt);
+ atomic64_add(req->slen, &istat->decompress_tlen);
+ }
+
+ return crypto_comp_errstat(alg, tfm->decompress(req));
}
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index c32a6f5664e9..51382befbe37 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -8,9 +8,11 @@
#ifndef _CRYPTO_AEAD_H
#define _CRYPTO_AEAD_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/types.h>
/**
* DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
@@ -26,15 +28,12 @@
*
* For example: authenc(hmac(sha256), cbc(aes))
*
- * The example code provided for the symmetric key cipher operation
- * applies here as well. Naturally all *skcipher* symbols must be exchanged
- * the *aead* pendants discussed in the following. In addition, for the AEAD
- * operation, the aead_request_set_ad function must be used to set the
- * pointer to the associated data memory location before performing the
- * encryption or decryption operation. In case of an encryption, the associated
- * data memory is filled during the encryption operation. For decryption, the
- * associated data memory must contain data that is used to verify the integrity
- * of the decrypted data. Another deviation from the asynchronous block cipher
+ * The example code provided for the symmetric key cipher operation applies
+ * here as well. Naturally all *skcipher* symbols must be exchanged the *aead*
+ * pendants discussed in the following. In addition, for the AEAD operation,
+ * the aead_request_set_ad function must be used to set the pointer to the
+ * associated data memory location before performing the encryption or
+ * decryption operation. Another deviation from the asynchronous block cipher
* operation is that the caller should explicitly check for -EBADMSG of the
* crypto_aead_decrypt. That error indicates an authentication error, i.e.
* a breach in the integrity of the message. In essence, that -EBADMSG error
@@ -48,7 +47,10 @@
*
* The destination scatterlist has the same layout, except that the plaintext
* (resp. ciphertext) will grow (resp. shrink) by the authentication tag size
- * during encryption (resp. decryption).
+ * during encryption (resp. decryption). The authentication tag is generated
+ * during the encryption operation and appended to the ciphertext. During
+ * decryption, the authentication tag is consumed along with the ciphertext and
+ * used to verify the integrity of the plaintext and the associated data.
*
* In-place encryption/decryption is enabled by using the same scatterlist
* pointer for both the source and destination.
@@ -73,6 +75,7 @@
*/
struct crypto_aead;
+struct scatterlist;
/**
* struct aead_request - AEAD request
@@ -98,6 +101,22 @@ struct aead_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+/*
+ * struct crypto_istat_aead - statistics for AEAD algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for AEAD requests
+ */
+struct crypto_istat_aead {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
/**
* struct aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
@@ -116,6 +135,7 @@ struct aead_request {
* @setkey: see struct skcipher_alg
* @encrypt: see struct skcipher_alg
* @decrypt: see struct skcipher_alg
+ * @stat: statistics for AEAD algorithm
* @ivsize: see struct skcipher_alg
* @chunksize: see struct skcipher_alg
* @init: Initialize the cryptographic transformation object. This function
@@ -142,6 +162,10 @@ struct aead_alg {
int (*init)(struct crypto_aead *tfm);
void (*exit)(struct crypto_aead *tfm);
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_aead stat;
+#endif
+
unsigned int ivsize;
unsigned int maxauthsize;
unsigned int chunksize;
@@ -185,12 +209,31 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_aead(struct crypto_aead *tfm)
{
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
+/**
+ * crypto_has_aead() - Search for the availability of an aead.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * aead
+ * @type: specifies the type of the aead
+ * @mask: specifies the mask for the aead
+ *
+ * Return: true when the aead is known to the kernel crypto API; false
+ * otherwise
+ */
+int crypto_has_aead(const char *alg_name, u32 type, u32 mask);
+
+static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
+}
+
static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
{
return container_of(crypto_aead_tfm(tfm)->__crt_alg,
@@ -483,7 +526,7 @@ static inline void aead_request_set_callback(struct aead_request *req,
* The memory structure for cipher operation has the following structure:
*
* - AEAD encryption input: assoc data || plaintext
- * - AEAD encryption output: assoc data || cipherntext || auth tag
+ * - AEAD encryption output: assoc data || ciphertext || auth tag
* - AEAD decryption input: assoc data || ciphertext || auth tag
* - AEAD decryption output: assoc data || plaintext
*
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 1d3aa252caba..31c111bebb68 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -7,6 +7,8 @@
*/
#ifndef _CRYPTO_AKCIPHER_H
#define _CRYPTO_AKCIPHER_H
+
+#include <linux/atomic.h>
#include <linux/crypto.h>
/**
@@ -43,12 +45,35 @@ struct akcipher_request {
* struct crypto_akcipher - user-instantiated objects which encapsulate
* algorithms and core processing logic
*
+ * @reqsize: Request context size required by algorithm implementation
* @base: Common crypto API algorithm data structure
*/
struct crypto_akcipher {
+ unsigned int reqsize;
+
struct crypto_tfm base;
};
+/*
+ * struct crypto_istat_akcipher - statistics for akcipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @verify_cnt: number of verify operation
+ * @sign_cnt: number of sign requests
+ * @err_cnt: number of error for akcipher requests
+ */
+struct crypto_istat_akcipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t verify_cnt;
+ atomic64_t sign_cnt;
+ atomic64_t err_cnt;
+};
+
/**
* struct akcipher_alg - generic public key algorithm
*
@@ -85,8 +110,8 @@ struct crypto_akcipher {
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
+ * @stat: Statistics for akcipher algorithm
*
- * @reqsize: Request context size required by algorithm implementation
* @base: Common crypto API algorithm data structure
*/
struct akcipher_alg {
@@ -102,7 +127,10 @@ struct akcipher_alg {
int (*init)(struct crypto_akcipher *tfm);
void (*exit)(struct crypto_akcipher *tfm);
- unsigned int reqsize;
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_akcipher stat;
+#endif
+
struct crypto_alg base;
};
@@ -155,7 +183,7 @@ static inline struct akcipher_alg *crypto_akcipher_alg(
static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm)
{
- return crypto_akcipher_alg(tfm)->reqsize;
+ return tfm->reqsize;
}
static inline void akcipher_request_set_tfm(struct akcipher_request *req,
@@ -174,6 +202,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
* crypto_free_akcipher() - free AKCIPHER tfm handle
*
* @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
{
@@ -272,6 +302,27 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
+static inline struct crypto_istat_akcipher *akcipher_get_stat(
+ struct akcipher_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&akcipher_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/**
* crypto_akcipher_encrypt() - Invoke public key encrypt operation
*
@@ -286,14 +337,15 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- unsigned int src_len = req->src_len;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->encrypt(req);
- crypto_stats_akcipher_encrypt(src_len, ret, calg);
- return ret;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
+
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->src_len, &istat->encrypt_tlen);
+ }
+
+ return crypto_akcipher_errstat(alg, alg->encrypt(req));
}
/**
@@ -310,17 +362,54 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- unsigned int src_len = req->src_len;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->decrypt(req);
- crypto_stats_akcipher_decrypt(src_len, ret, calg);
- return ret;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
+
+ atomic64_inc(&istat->decrypt_cnt);
+ atomic64_add(req->src_len, &istat->decrypt_tlen);
+ }
+
+ return crypto_akcipher_errstat(alg, alg->decrypt(req));
}
/**
+ * crypto_akcipher_sync_encrypt() - Invoke public key encrypt operation
+ *
+ * Function invokes the specific public key encrypt operation for a given
+ * public key algorithm
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
+ * crypto_akcipher_sync_decrypt() - Invoke public key decrypt operation
+ *
+ * Function invokes the specific public key decrypt operation for a given
+ * public key algorithm
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: Output length on success; error code in case of error
+ */
+int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
* crypto_akcipher_sign() - Invoke public key sign operation
*
* Function invokes the specific public key sign operation for a given
@@ -334,13 +423,11 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->sign(req);
- crypto_stats_akcipher_sign(ret, calg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&akcipher_get_stat(alg)->sign_cnt);
+
+ return crypto_akcipher_errstat(alg, alg->sign(req));
}
/**
@@ -361,13 +448,11 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->verify(req);
- crypto_stats_akcipher_verify(ret, calg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&akcipher_get_stat(alg)->verify_cnt);
+
+ return crypto_akcipher_errstat(alg, alg->verify(req));
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 143d884d65c7..7a4a71af653f 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -7,10 +7,12 @@
#ifndef _CRYPTO_ALGAPI_H
#define _CRYPTO_ALGAPI_H
+#include <crypto/utils.h>
+#include <linux/align.h>
+#include <linux/cache.h>
#include <linux/crypto.h>
-#include <linux/list.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
/*
* Maximum values for blocksize and alignmask, used to allocate
@@ -18,24 +20,50 @@
* algs and architectures. Ciphers have a lower maximum size.
*/
#define MAX_ALGAPI_BLOCKSIZE 160
-#define MAX_ALGAPI_ALIGNMASK 63
+#define MAX_ALGAPI_ALIGNMASK 127
#define MAX_CIPHER_BLOCKSIZE 16
#define MAX_CIPHER_ALIGNMASK 15
+#ifdef ARCH_DMA_MINALIGN
+#define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN
+#else
+#define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN
+#endif
+
+#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
+
+/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
struct crypto_aead;
struct crypto_instance;
struct module;
+struct notifier_block;
struct rtattr;
+struct scatterlist;
struct seq_file;
+struct sk_buff;
struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
unsigned int (*extsize)(struct crypto_alg *alg);
- int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
int (*init_tfm)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
void (*free)(struct crypto_instance *inst);
+#ifdef CONFIG_CRYPTO_STATS
+ int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
+#endif
unsigned int type;
unsigned int maskclear;
@@ -55,6 +83,8 @@ struct crypto_instance {
struct crypto_spawn *spawns;
};
+ struct work_struct free_work;
+
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -96,6 +126,23 @@ struct scatter_walk {
unsigned int offset;
};
+struct crypto_attr_alg {
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct crypto_attr_type {
+ u32 type;
+ u32 mask;
+};
+
+/*
+ * Algorithm registration interface.
+ */
+int crypto_register_alg(struct crypto_alg *alg);
+void crypto_unregister_alg(struct crypto_alg *alg);
+int crypto_register_algs(struct crypto_alg *algs, int count);
+void crypto_unregister_algs(struct crypto_alg *algs, int count);
+
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
@@ -118,7 +165,6 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg);
@@ -134,98 +180,45 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
}
void crypto_inc(u8 *a, unsigned int size);
-void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
-static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
+static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
- __builtin_constant_p(size) &&
- (size % sizeof(unsigned long)) == 0) {
- unsigned long *d = (unsigned long *)dst;
- unsigned long *s = (unsigned long *)src;
-
- while (size > 0) {
- *d++ ^= *s++;
- size -= sizeof(unsigned long);
- }
- } else {
- __crypto_xor(dst, dst, src, size);
- }
+ return tfm->__crt_ctx;
}
-static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
- unsigned int size)
+static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
+ unsigned int align)
{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
- __builtin_constant_p(size) &&
- (size % sizeof(unsigned long)) == 0) {
- unsigned long *d = (unsigned long *)dst;
- unsigned long *s1 = (unsigned long *)src1;
- unsigned long *s2 = (unsigned long *)src2;
-
- while (size > 0) {
- *d++ = *s1++ ^ *s2++;
- size -= sizeof(unsigned long);
- }
- } else {
- __crypto_xor(dst, src1, src2, size);
- }
-}
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
-static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
-{
- return PTR_ALIGN(crypto_tfm_ctx(tfm),
- crypto_tfm_alg_alignmask(tfm) + 1);
-}
-
-static inline struct crypto_instance *crypto_tfm_alg_instance(
- struct crypto_tfm *tfm)
-{
- return container_of(tfm->__crt_alg, struct crypto_instance, alg);
-}
-
-static inline void *crypto_instance_ctx(struct crypto_instance *inst)
-{
- return inst->__ctx;
+ return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
}
-struct crypto_cipher_spawn {
- struct crypto_spawn base;
-};
-
-static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
- struct crypto_instance *inst,
- const char *name, u32 type, u32 mask)
+static inline unsigned int crypto_dma_align(void)
{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
- return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+ return CRYPTO_DMA_ALIGN;
}
-static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
+static inline unsigned int crypto_dma_padding(void)
{
- crypto_drop_spawn(&spawn->base);
+ return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
}
-static inline struct crypto_alg *crypto_spawn_cipher_alg(
- struct crypto_cipher_spawn *spawn)
+static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
{
- return spawn->base.alg;
+ return crypto_tfm_ctx_align(tfm, crypto_dma_align());
}
-static inline struct crypto_cipher *crypto_spawn_cipher(
- struct crypto_cipher_spawn *spawn)
+static inline struct crypto_instance *crypto_tfm_alg_instance(
+ struct crypto_tfm *tfm)
{
- u32 type = CRYPTO_ALG_TYPE_CIPHER;
- u32 mask = CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
+ return container_of(tfm->__crt_alg, struct crypto_instance, alg);
}
-static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
+static inline void *crypto_instance_ctx(struct crypto_instance *inst)
{
- return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
+ return inst->__ctx;
}
static inline struct crypto_async_request *crypto_get_backlog(
@@ -260,29 +253,6 @@ static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
}
-noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
-
-/**
- * crypto_memneq - Compare two areas of memory without leaking
- * timing information.
- *
- * @a: One area of memory
- * @b: Another area of memory
- * @size: The size of the area.
- *
- * Returns 0 when data is equal, 1 otherwise.
- */
-static inline int crypto_memneq(const void *a, const void *b, size_t size)
-{
- return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
-}
-
-static inline void crypto_yield(u32 flags)
-{
- if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
- cond_resched();
-}
-
int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
@@ -293,4 +263,15 @@ enum {
CRYPTO_MSG_ALG_LOADED,
};
+static inline void crypto_request_complete(struct crypto_async_request *req,
+ int err)
+{
+ req->complete(req->data, err);
+}
+
+static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
+}
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/aria.h b/include/crypto/aria.h
new file mode 100644
index 000000000000..73295146be11
--- /dev/null
+++ b/include/crypto/aria.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * ARIA Cipher Algorithm.
+ *
+ * Documentation of ARIA can be found in RFC 5794.
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ *
+ * Information for ARIA
+ * http://210.104.33.10/ARIA/index-e.html (English)
+ * http://seed.kisa.or.kr/ (Korean)
+ *
+ * Public domain version is distributed above.
+ */
+
+#ifndef _CRYPTO_ARIA_H
+#define _CRYPTO_ARIA_H
+
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+
+#define ARIA_MIN_KEY_SIZE 16
+#define ARIA_MAX_KEY_SIZE 32
+#define ARIA_BLOCK_SIZE 16
+#define ARIA_MAX_RD_KEYS 17
+#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32))
+
+struct aria_ctx {
+ u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
+ u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
+ int rounds;
+ int key_length;
+};
+
+static const u32 s1[256] = {
+ 0x00636363, 0x007c7c7c, 0x00777777, 0x007b7b7b,
+ 0x00f2f2f2, 0x006b6b6b, 0x006f6f6f, 0x00c5c5c5,
+ 0x00303030, 0x00010101, 0x00676767, 0x002b2b2b,
+ 0x00fefefe, 0x00d7d7d7, 0x00ababab, 0x00767676,
+ 0x00cacaca, 0x00828282, 0x00c9c9c9, 0x007d7d7d,
+ 0x00fafafa, 0x00595959, 0x00474747, 0x00f0f0f0,
+ 0x00adadad, 0x00d4d4d4, 0x00a2a2a2, 0x00afafaf,
+ 0x009c9c9c, 0x00a4a4a4, 0x00727272, 0x00c0c0c0,
+ 0x00b7b7b7, 0x00fdfdfd, 0x00939393, 0x00262626,
+ 0x00363636, 0x003f3f3f, 0x00f7f7f7, 0x00cccccc,
+ 0x00343434, 0x00a5a5a5, 0x00e5e5e5, 0x00f1f1f1,
+ 0x00717171, 0x00d8d8d8, 0x00313131, 0x00151515,
+ 0x00040404, 0x00c7c7c7, 0x00232323, 0x00c3c3c3,
+ 0x00181818, 0x00969696, 0x00050505, 0x009a9a9a,
+ 0x00070707, 0x00121212, 0x00808080, 0x00e2e2e2,
+ 0x00ebebeb, 0x00272727, 0x00b2b2b2, 0x00757575,
+ 0x00090909, 0x00838383, 0x002c2c2c, 0x001a1a1a,
+ 0x001b1b1b, 0x006e6e6e, 0x005a5a5a, 0x00a0a0a0,
+ 0x00525252, 0x003b3b3b, 0x00d6d6d6, 0x00b3b3b3,
+ 0x00292929, 0x00e3e3e3, 0x002f2f2f, 0x00848484,
+ 0x00535353, 0x00d1d1d1, 0x00000000, 0x00ededed,
+ 0x00202020, 0x00fcfcfc, 0x00b1b1b1, 0x005b5b5b,
+ 0x006a6a6a, 0x00cbcbcb, 0x00bebebe, 0x00393939,
+ 0x004a4a4a, 0x004c4c4c, 0x00585858, 0x00cfcfcf,
+ 0x00d0d0d0, 0x00efefef, 0x00aaaaaa, 0x00fbfbfb,
+ 0x00434343, 0x004d4d4d, 0x00333333, 0x00858585,
+ 0x00454545, 0x00f9f9f9, 0x00020202, 0x007f7f7f,
+ 0x00505050, 0x003c3c3c, 0x009f9f9f, 0x00a8a8a8,
+ 0x00515151, 0x00a3a3a3, 0x00404040, 0x008f8f8f,
+ 0x00929292, 0x009d9d9d, 0x00383838, 0x00f5f5f5,
+ 0x00bcbcbc, 0x00b6b6b6, 0x00dadada, 0x00212121,
+ 0x00101010, 0x00ffffff, 0x00f3f3f3, 0x00d2d2d2,
+ 0x00cdcdcd, 0x000c0c0c, 0x00131313, 0x00ececec,
+ 0x005f5f5f, 0x00979797, 0x00444444, 0x00171717,
+ 0x00c4c4c4, 0x00a7a7a7, 0x007e7e7e, 0x003d3d3d,
+ 0x00646464, 0x005d5d5d, 0x00191919, 0x00737373,
+ 0x00606060, 0x00818181, 0x004f4f4f, 0x00dcdcdc,
+ 0x00222222, 0x002a2a2a, 0x00909090, 0x00888888,
+ 0x00464646, 0x00eeeeee, 0x00b8b8b8, 0x00141414,
+ 0x00dedede, 0x005e5e5e, 0x000b0b0b, 0x00dbdbdb,
+ 0x00e0e0e0, 0x00323232, 0x003a3a3a, 0x000a0a0a,
+ 0x00494949, 0x00060606, 0x00242424, 0x005c5c5c,
+ 0x00c2c2c2, 0x00d3d3d3, 0x00acacac, 0x00626262,
+ 0x00919191, 0x00959595, 0x00e4e4e4, 0x00797979,
+ 0x00e7e7e7, 0x00c8c8c8, 0x00373737, 0x006d6d6d,
+ 0x008d8d8d, 0x00d5d5d5, 0x004e4e4e, 0x00a9a9a9,
+ 0x006c6c6c, 0x00565656, 0x00f4f4f4, 0x00eaeaea,
+ 0x00656565, 0x007a7a7a, 0x00aeaeae, 0x00080808,
+ 0x00bababa, 0x00787878, 0x00252525, 0x002e2e2e,
+ 0x001c1c1c, 0x00a6a6a6, 0x00b4b4b4, 0x00c6c6c6,
+ 0x00e8e8e8, 0x00dddddd, 0x00747474, 0x001f1f1f,
+ 0x004b4b4b, 0x00bdbdbd, 0x008b8b8b, 0x008a8a8a,
+ 0x00707070, 0x003e3e3e, 0x00b5b5b5, 0x00666666,
+ 0x00484848, 0x00030303, 0x00f6f6f6, 0x000e0e0e,
+ 0x00616161, 0x00353535, 0x00575757, 0x00b9b9b9,
+ 0x00868686, 0x00c1c1c1, 0x001d1d1d, 0x009e9e9e,
+ 0x00e1e1e1, 0x00f8f8f8, 0x00989898, 0x00111111,
+ 0x00696969, 0x00d9d9d9, 0x008e8e8e, 0x00949494,
+ 0x009b9b9b, 0x001e1e1e, 0x00878787, 0x00e9e9e9,
+ 0x00cecece, 0x00555555, 0x00282828, 0x00dfdfdf,
+ 0x008c8c8c, 0x00a1a1a1, 0x00898989, 0x000d0d0d,
+ 0x00bfbfbf, 0x00e6e6e6, 0x00424242, 0x00686868,
+ 0x00414141, 0x00999999, 0x002d2d2d, 0x000f0f0f,
+ 0x00b0b0b0, 0x00545454, 0x00bbbbbb, 0x00161616
+};
+
+static const u32 s2[256] = {
+ 0xe200e2e2, 0x4e004e4e, 0x54005454, 0xfc00fcfc,
+ 0x94009494, 0xc200c2c2, 0x4a004a4a, 0xcc00cccc,
+ 0x62006262, 0x0d000d0d, 0x6a006a6a, 0x46004646,
+ 0x3c003c3c, 0x4d004d4d, 0x8b008b8b, 0xd100d1d1,
+ 0x5e005e5e, 0xfa00fafa, 0x64006464, 0xcb00cbcb,
+ 0xb400b4b4, 0x97009797, 0xbe00bebe, 0x2b002b2b,
+ 0xbc00bcbc, 0x77007777, 0x2e002e2e, 0x03000303,
+ 0xd300d3d3, 0x19001919, 0x59005959, 0xc100c1c1,
+ 0x1d001d1d, 0x06000606, 0x41004141, 0x6b006b6b,
+ 0x55005555, 0xf000f0f0, 0x99009999, 0x69006969,
+ 0xea00eaea, 0x9c009c9c, 0x18001818, 0xae00aeae,
+ 0x63006363, 0xdf00dfdf, 0xe700e7e7, 0xbb00bbbb,
+ 0x00000000, 0x73007373, 0x66006666, 0xfb00fbfb,
+ 0x96009696, 0x4c004c4c, 0x85008585, 0xe400e4e4,
+ 0x3a003a3a, 0x09000909, 0x45004545, 0xaa00aaaa,
+ 0x0f000f0f, 0xee00eeee, 0x10001010, 0xeb00ebeb,
+ 0x2d002d2d, 0x7f007f7f, 0xf400f4f4, 0x29002929,
+ 0xac00acac, 0xcf00cfcf, 0xad00adad, 0x91009191,
+ 0x8d008d8d, 0x78007878, 0xc800c8c8, 0x95009595,
+ 0xf900f9f9, 0x2f002f2f, 0xce00cece, 0xcd00cdcd,
+ 0x08000808, 0x7a007a7a, 0x88008888, 0x38003838,
+ 0x5c005c5c, 0x83008383, 0x2a002a2a, 0x28002828,
+ 0x47004747, 0xdb00dbdb, 0xb800b8b8, 0xc700c7c7,
+ 0x93009393, 0xa400a4a4, 0x12001212, 0x53005353,
+ 0xff00ffff, 0x87008787, 0x0e000e0e, 0x31003131,
+ 0x36003636, 0x21002121, 0x58005858, 0x48004848,
+ 0x01000101, 0x8e008e8e, 0x37003737, 0x74007474,
+ 0x32003232, 0xca00caca, 0xe900e9e9, 0xb100b1b1,
+ 0xb700b7b7, 0xab00abab, 0x0c000c0c, 0xd700d7d7,
+ 0xc400c4c4, 0x56005656, 0x42004242, 0x26002626,
+ 0x07000707, 0x98009898, 0x60006060, 0xd900d9d9,
+ 0xb600b6b6, 0xb900b9b9, 0x11001111, 0x40004040,
+ 0xec00ecec, 0x20002020, 0x8c008c8c, 0xbd00bdbd,
+ 0xa000a0a0, 0xc900c9c9, 0x84008484, 0x04000404,
+ 0x49004949, 0x23002323, 0xf100f1f1, 0x4f004f4f,
+ 0x50005050, 0x1f001f1f, 0x13001313, 0xdc00dcdc,
+ 0xd800d8d8, 0xc000c0c0, 0x9e009e9e, 0x57005757,
+ 0xe300e3e3, 0xc300c3c3, 0x7b007b7b, 0x65006565,
+ 0x3b003b3b, 0x02000202, 0x8f008f8f, 0x3e003e3e,
+ 0xe800e8e8, 0x25002525, 0x92009292, 0xe500e5e5,
+ 0x15001515, 0xdd00dddd, 0xfd00fdfd, 0x17001717,
+ 0xa900a9a9, 0xbf00bfbf, 0xd400d4d4, 0x9a009a9a,
+ 0x7e007e7e, 0xc500c5c5, 0x39003939, 0x67006767,
+ 0xfe00fefe, 0x76007676, 0x9d009d9d, 0x43004343,
+ 0xa700a7a7, 0xe100e1e1, 0xd000d0d0, 0xf500f5f5,
+ 0x68006868, 0xf200f2f2, 0x1b001b1b, 0x34003434,
+ 0x70007070, 0x05000505, 0xa300a3a3, 0x8a008a8a,
+ 0xd500d5d5, 0x79007979, 0x86008686, 0xa800a8a8,
+ 0x30003030, 0xc600c6c6, 0x51005151, 0x4b004b4b,
+ 0x1e001e1e, 0xa600a6a6, 0x27002727, 0xf600f6f6,
+ 0x35003535, 0xd200d2d2, 0x6e006e6e, 0x24002424,
+ 0x16001616, 0x82008282, 0x5f005f5f, 0xda00dada,
+ 0xe600e6e6, 0x75007575, 0xa200a2a2, 0xef00efef,
+ 0x2c002c2c, 0xb200b2b2, 0x1c001c1c, 0x9f009f9f,
+ 0x5d005d5d, 0x6f006f6f, 0x80008080, 0x0a000a0a,
+ 0x72007272, 0x44004444, 0x9b009b9b, 0x6c006c6c,
+ 0x90009090, 0x0b000b0b, 0x5b005b5b, 0x33003333,
+ 0x7d007d7d, 0x5a005a5a, 0x52005252, 0xf300f3f3,
+ 0x61006161, 0xa100a1a1, 0xf700f7f7, 0xb000b0b0,
+ 0xd600d6d6, 0x3f003f3f, 0x7c007c7c, 0x6d006d6d,
+ 0xed00eded, 0x14001414, 0xe000e0e0, 0xa500a5a5,
+ 0x3d003d3d, 0x22002222, 0xb300b3b3, 0xf800f8f8,
+ 0x89008989, 0xde00dede, 0x71007171, 0x1a001a1a,
+ 0xaf00afaf, 0xba00baba, 0xb500b5b5, 0x81008181
+};
+
+static const u32 x1[256] = {
+ 0x52520052, 0x09090009, 0x6a6a006a, 0xd5d500d5,
+ 0x30300030, 0x36360036, 0xa5a500a5, 0x38380038,
+ 0xbfbf00bf, 0x40400040, 0xa3a300a3, 0x9e9e009e,
+ 0x81810081, 0xf3f300f3, 0xd7d700d7, 0xfbfb00fb,
+ 0x7c7c007c, 0xe3e300e3, 0x39390039, 0x82820082,
+ 0x9b9b009b, 0x2f2f002f, 0xffff00ff, 0x87870087,
+ 0x34340034, 0x8e8e008e, 0x43430043, 0x44440044,
+ 0xc4c400c4, 0xdede00de, 0xe9e900e9, 0xcbcb00cb,
+ 0x54540054, 0x7b7b007b, 0x94940094, 0x32320032,
+ 0xa6a600a6, 0xc2c200c2, 0x23230023, 0x3d3d003d,
+ 0xeeee00ee, 0x4c4c004c, 0x95950095, 0x0b0b000b,
+ 0x42420042, 0xfafa00fa, 0xc3c300c3, 0x4e4e004e,
+ 0x08080008, 0x2e2e002e, 0xa1a100a1, 0x66660066,
+ 0x28280028, 0xd9d900d9, 0x24240024, 0xb2b200b2,
+ 0x76760076, 0x5b5b005b, 0xa2a200a2, 0x49490049,
+ 0x6d6d006d, 0x8b8b008b, 0xd1d100d1, 0x25250025,
+ 0x72720072, 0xf8f800f8, 0xf6f600f6, 0x64640064,
+ 0x86860086, 0x68680068, 0x98980098, 0x16160016,
+ 0xd4d400d4, 0xa4a400a4, 0x5c5c005c, 0xcccc00cc,
+ 0x5d5d005d, 0x65650065, 0xb6b600b6, 0x92920092,
+ 0x6c6c006c, 0x70700070, 0x48480048, 0x50500050,
+ 0xfdfd00fd, 0xeded00ed, 0xb9b900b9, 0xdada00da,
+ 0x5e5e005e, 0x15150015, 0x46460046, 0x57570057,
+ 0xa7a700a7, 0x8d8d008d, 0x9d9d009d, 0x84840084,
+ 0x90900090, 0xd8d800d8, 0xabab00ab, 0x00000000,
+ 0x8c8c008c, 0xbcbc00bc, 0xd3d300d3, 0x0a0a000a,
+ 0xf7f700f7, 0xe4e400e4, 0x58580058, 0x05050005,
+ 0xb8b800b8, 0xb3b300b3, 0x45450045, 0x06060006,
+ 0xd0d000d0, 0x2c2c002c, 0x1e1e001e, 0x8f8f008f,
+ 0xcaca00ca, 0x3f3f003f, 0x0f0f000f, 0x02020002,
+ 0xc1c100c1, 0xafaf00af, 0xbdbd00bd, 0x03030003,
+ 0x01010001, 0x13130013, 0x8a8a008a, 0x6b6b006b,
+ 0x3a3a003a, 0x91910091, 0x11110011, 0x41410041,
+ 0x4f4f004f, 0x67670067, 0xdcdc00dc, 0xeaea00ea,
+ 0x97970097, 0xf2f200f2, 0xcfcf00cf, 0xcece00ce,
+ 0xf0f000f0, 0xb4b400b4, 0xe6e600e6, 0x73730073,
+ 0x96960096, 0xacac00ac, 0x74740074, 0x22220022,
+ 0xe7e700e7, 0xadad00ad, 0x35350035, 0x85850085,
+ 0xe2e200e2, 0xf9f900f9, 0x37370037, 0xe8e800e8,
+ 0x1c1c001c, 0x75750075, 0xdfdf00df, 0x6e6e006e,
+ 0x47470047, 0xf1f100f1, 0x1a1a001a, 0x71710071,
+ 0x1d1d001d, 0x29290029, 0xc5c500c5, 0x89890089,
+ 0x6f6f006f, 0xb7b700b7, 0x62620062, 0x0e0e000e,
+ 0xaaaa00aa, 0x18180018, 0xbebe00be, 0x1b1b001b,
+ 0xfcfc00fc, 0x56560056, 0x3e3e003e, 0x4b4b004b,
+ 0xc6c600c6, 0xd2d200d2, 0x79790079, 0x20200020,
+ 0x9a9a009a, 0xdbdb00db, 0xc0c000c0, 0xfefe00fe,
+ 0x78780078, 0xcdcd00cd, 0x5a5a005a, 0xf4f400f4,
+ 0x1f1f001f, 0xdddd00dd, 0xa8a800a8, 0x33330033,
+ 0x88880088, 0x07070007, 0xc7c700c7, 0x31310031,
+ 0xb1b100b1, 0x12120012, 0x10100010, 0x59590059,
+ 0x27270027, 0x80800080, 0xecec00ec, 0x5f5f005f,
+ 0x60600060, 0x51510051, 0x7f7f007f, 0xa9a900a9,
+ 0x19190019, 0xb5b500b5, 0x4a4a004a, 0x0d0d000d,
+ 0x2d2d002d, 0xe5e500e5, 0x7a7a007a, 0x9f9f009f,
+ 0x93930093, 0xc9c900c9, 0x9c9c009c, 0xefef00ef,
+ 0xa0a000a0, 0xe0e000e0, 0x3b3b003b, 0x4d4d004d,
+ 0xaeae00ae, 0x2a2a002a, 0xf5f500f5, 0xb0b000b0,
+ 0xc8c800c8, 0xebeb00eb, 0xbbbb00bb, 0x3c3c003c,
+ 0x83830083, 0x53530053, 0x99990099, 0x61610061,
+ 0x17170017, 0x2b2b002b, 0x04040004, 0x7e7e007e,
+ 0xbaba00ba, 0x77770077, 0xd6d600d6, 0x26260026,
+ 0xe1e100e1, 0x69690069, 0x14140014, 0x63630063,
+ 0x55550055, 0x21210021, 0x0c0c000c, 0x7d7d007d
+};
+
+static const u32 x2[256] = {
+ 0x30303000, 0x68686800, 0x99999900, 0x1b1b1b00,
+ 0x87878700, 0xb9b9b900, 0x21212100, 0x78787800,
+ 0x50505000, 0x39393900, 0xdbdbdb00, 0xe1e1e100,
+ 0x72727200, 0x09090900, 0x62626200, 0x3c3c3c00,
+ 0x3e3e3e00, 0x7e7e7e00, 0x5e5e5e00, 0x8e8e8e00,
+ 0xf1f1f100, 0xa0a0a000, 0xcccccc00, 0xa3a3a300,
+ 0x2a2a2a00, 0x1d1d1d00, 0xfbfbfb00, 0xb6b6b600,
+ 0xd6d6d600, 0x20202000, 0xc4c4c400, 0x8d8d8d00,
+ 0x81818100, 0x65656500, 0xf5f5f500, 0x89898900,
+ 0xcbcbcb00, 0x9d9d9d00, 0x77777700, 0xc6c6c600,
+ 0x57575700, 0x43434300, 0x56565600, 0x17171700,
+ 0xd4d4d400, 0x40404000, 0x1a1a1a00, 0x4d4d4d00,
+ 0xc0c0c000, 0x63636300, 0x6c6c6c00, 0xe3e3e300,
+ 0xb7b7b700, 0xc8c8c800, 0x64646400, 0x6a6a6a00,
+ 0x53535300, 0xaaaaaa00, 0x38383800, 0x98989800,
+ 0x0c0c0c00, 0xf4f4f400, 0x9b9b9b00, 0xededed00,
+ 0x7f7f7f00, 0x22222200, 0x76767600, 0xafafaf00,
+ 0xdddddd00, 0x3a3a3a00, 0x0b0b0b00, 0x58585800,
+ 0x67676700, 0x88888800, 0x06060600, 0xc3c3c300,
+ 0x35353500, 0x0d0d0d00, 0x01010100, 0x8b8b8b00,
+ 0x8c8c8c00, 0xc2c2c200, 0xe6e6e600, 0x5f5f5f00,
+ 0x02020200, 0x24242400, 0x75757500, 0x93939300,
+ 0x66666600, 0x1e1e1e00, 0xe5e5e500, 0xe2e2e200,
+ 0x54545400, 0xd8d8d800, 0x10101000, 0xcecece00,
+ 0x7a7a7a00, 0xe8e8e800, 0x08080800, 0x2c2c2c00,
+ 0x12121200, 0x97979700, 0x32323200, 0xababab00,
+ 0xb4b4b400, 0x27272700, 0x0a0a0a00, 0x23232300,
+ 0xdfdfdf00, 0xefefef00, 0xcacaca00, 0xd9d9d900,
+ 0xb8b8b800, 0xfafafa00, 0xdcdcdc00, 0x31313100,
+ 0x6b6b6b00, 0xd1d1d100, 0xadadad00, 0x19191900,
+ 0x49494900, 0xbdbdbd00, 0x51515100, 0x96969600,
+ 0xeeeeee00, 0xe4e4e400, 0xa8a8a800, 0x41414100,
+ 0xdadada00, 0xffffff00, 0xcdcdcd00, 0x55555500,
+ 0x86868600, 0x36363600, 0xbebebe00, 0x61616100,
+ 0x52525200, 0xf8f8f800, 0xbbbbbb00, 0x0e0e0e00,
+ 0x82828200, 0x48484800, 0x69696900, 0x9a9a9a00,
+ 0xe0e0e000, 0x47474700, 0x9e9e9e00, 0x5c5c5c00,
+ 0x04040400, 0x4b4b4b00, 0x34343400, 0x15151500,
+ 0x79797900, 0x26262600, 0xa7a7a700, 0xdedede00,
+ 0x29292900, 0xaeaeae00, 0x92929200, 0xd7d7d700,
+ 0x84848400, 0xe9e9e900, 0xd2d2d200, 0xbababa00,
+ 0x5d5d5d00, 0xf3f3f300, 0xc5c5c500, 0xb0b0b000,
+ 0xbfbfbf00, 0xa4a4a400, 0x3b3b3b00, 0x71717100,
+ 0x44444400, 0x46464600, 0x2b2b2b00, 0xfcfcfc00,
+ 0xebebeb00, 0x6f6f6f00, 0xd5d5d500, 0xf6f6f600,
+ 0x14141400, 0xfefefe00, 0x7c7c7c00, 0x70707000,
+ 0x5a5a5a00, 0x7d7d7d00, 0xfdfdfd00, 0x2f2f2f00,
+ 0x18181800, 0x83838300, 0x16161600, 0xa5a5a500,
+ 0x91919100, 0x1f1f1f00, 0x05050500, 0x95959500,
+ 0x74747400, 0xa9a9a900, 0xc1c1c100, 0x5b5b5b00,
+ 0x4a4a4a00, 0x85858500, 0x6d6d6d00, 0x13131300,
+ 0x07070700, 0x4f4f4f00, 0x4e4e4e00, 0x45454500,
+ 0xb2b2b200, 0x0f0f0f00, 0xc9c9c900, 0x1c1c1c00,
+ 0xa6a6a600, 0xbcbcbc00, 0xececec00, 0x73737300,
+ 0x90909000, 0x7b7b7b00, 0xcfcfcf00, 0x59595900,
+ 0x8f8f8f00, 0xa1a1a100, 0xf9f9f900, 0x2d2d2d00,
+ 0xf2f2f200, 0xb1b1b100, 0x00000000, 0x94949400,
+ 0x37373700, 0x9f9f9f00, 0xd0d0d000, 0x2e2e2e00,
+ 0x9c9c9c00, 0x6e6e6e00, 0x28282800, 0x3f3f3f00,
+ 0x80808000, 0xf0f0f000, 0x3d3d3d00, 0xd3d3d300,
+ 0x25252500, 0x8a8a8a00, 0xb5b5b500, 0xe7e7e700,
+ 0x42424200, 0xb3b3b300, 0xc7c7c700, 0xeaeaea00,
+ 0xf7f7f700, 0x4c4c4c00, 0x11111100, 0x33333300,
+ 0x03030300, 0xa2a2a200, 0xacacac00, 0x60606000
+};
+
+static inline u32 rotl32(u32 v, u32 r)
+{
+ return ((v << r) | (v >> (32 - r)));
+}
+
+static inline u32 rotr32(u32 v, u32 r)
+{
+ return ((v >> r) | (v << (32 - r)));
+}
+
+static inline u32 bswap32(u32 v)
+{
+ return ((v << 24) ^
+ (v >> 24) ^
+ ((v & 0x0000ff00) << 8) ^
+ ((v & 0x00ff0000) >> 8));
+}
+
+static inline u8 get_u8(u32 x, u32 y)
+{
+ return (x >> ((3 - y) * 8));
+}
+
+static inline u32 make_u32(u8 v0, u8 v1, u8 v2, u8 v3)
+{
+ return ((u32)v0 << 24) | ((u32)v1 << 16) | ((u32)v2 << 8) | ((u32)v3);
+}
+
+static inline u32 aria_m(u32 t0)
+{
+ return rotr32(t0, 8) ^ rotr32(t0 ^ rotr32(t0, 8), 16);
+}
+
+/* S-Box Layer 1 + M */
+static inline void aria_sbox_layer1_with_pre_diff(u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 = s1[get_u8(*t0, 0)] ^
+ s2[get_u8(*t0, 1)] ^
+ x1[get_u8(*t0, 2)] ^
+ x2[get_u8(*t0, 3)];
+ *t1 = s1[get_u8(*t1, 0)] ^
+ s2[get_u8(*t1, 1)] ^
+ x1[get_u8(*t1, 2)] ^
+ x2[get_u8(*t1, 3)];
+ *t2 = s1[get_u8(*t2, 0)] ^
+ s2[get_u8(*t2, 1)] ^
+ x1[get_u8(*t2, 2)] ^
+ x2[get_u8(*t2, 3)];
+ *t3 = s1[get_u8(*t3, 0)] ^
+ s2[get_u8(*t3, 1)] ^
+ x1[get_u8(*t3, 2)] ^
+ x2[get_u8(*t3, 3)];
+}
+
+/* S-Box Layer 2 + M */
+static inline void aria_sbox_layer2_with_pre_diff(u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 = x1[get_u8(*t0, 0)] ^
+ x2[get_u8(*t0, 1)] ^
+ s1[get_u8(*t0, 2)] ^
+ s2[get_u8(*t0, 3)];
+ *t1 = x1[get_u8(*t1, 0)] ^
+ x2[get_u8(*t1, 1)] ^
+ s1[get_u8(*t1, 2)] ^
+ s2[get_u8(*t1, 3)];
+ *t2 = x1[get_u8(*t2, 0)] ^
+ x2[get_u8(*t2, 1)] ^
+ s1[get_u8(*t2, 2)] ^
+ s2[get_u8(*t2, 3)];
+ *t3 = x1[get_u8(*t3, 0)] ^
+ x2[get_u8(*t3, 1)] ^
+ s1[get_u8(*t3, 2)] ^
+ s2[get_u8(*t3, 3)];
+}
+
+/* Word-level diffusion */
+static inline void aria_diff_word(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ *t1 ^= *t2;
+ *t2 ^= *t3;
+ *t0 ^= *t1;
+
+ *t3 ^= *t1;
+ *t2 ^= *t0;
+ *t1 ^= *t2;
+}
+
+/* Byte-level diffusion */
+static inline void aria_diff_byte(u32 *t1, u32 *t2, u32 *t3)
+{
+ *t1 = ((*t1 << 8) & 0xff00ff00) ^ ((*t1 >> 8) & 0x00ff00ff);
+ *t2 = rotr32(*t2, 16);
+ *t3 = bswap32(*t3);
+}
+
+/* Key XOR Layer */
+static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 ^= rk[0];
+ *t1 ^= rk[1];
+ *t2 ^= rk[2];
+ *t3 ^= rk[3];
+}
+/* Odd round Substitution & Diffusion */
+static inline void aria_subst_diff_odd(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ aria_sbox_layer1_with_pre_diff(t0, t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+ aria_diff_byte(t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+}
+
+/* Even round Substitution & Diffusion */
+static inline void aria_subst_diff_even(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ aria_sbox_layer2_with_pre_diff(t0, t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+ aria_diff_byte(t3, t0, t1);
+ aria_diff_word(t0, t1, t2, t3);
+}
+
+/* Q, R Macro expanded ARIA GSRK */
+static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n)
+{
+ int q = 4 - (n / 32);
+ int r = n % 32;
+
+ rk[0] = (x[0]) ^
+ ((y[q % 4]) >> r) ^
+ ((y[(q + 3) % 4]) << (32 - r));
+ rk[1] = (x[1]) ^
+ ((y[(q + 1) % 4]) >> r) ^
+ ((y[q % 4]) << (32 - r));
+ rk[2] = (x[2]) ^
+ ((y[(q + 2) % 4]) >> r) ^
+ ((y[(q + 1) % 4]) << (32 - r));
+ rk[3] = (x[3]) ^
+ ((y[(q + 3) % 4]) >> r) ^
+ ((y[(q + 2) % 4]) << (32 - r));
+}
+
+void aria_encrypt(void *ctx, u8 *out, const u8 *in);
+void aria_decrypt(void *ctx, u8 *out, const u8 *in);
+int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+
+#endif
diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h
deleted file mode 100644
index 48198c36d6b9..000000000000
--- a/include/crypto/asym_tpm_subtype.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef _LINUX_ASYM_TPM_SUBTYPE_H
-#define _LINUX_ASYM_TPM_SUBTYPE_H
-
-#include <linux/keyctl.h>
-
-struct tpm_key {
- void *blob;
- u32 blob_len;
- uint16_t key_len; /* Size in bits of the key */
- const void *pub_key; /* pointer inside blob to the public key bytes */
- uint16_t pub_key_len; /* length of the public key */
-};
-
-struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len);
-
-extern struct asymmetric_key_subtype asym_tpm_subtype;
-
-#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */
diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h
index 0b8e6bc55301..f3b37cbb3131 100644
--- a/include/crypto/b128ops.h
+++ b/include/crypto/b128ops.h
@@ -50,10 +50,6 @@
#include <linux/types.h>
typedef struct {
- u64 a, b;
-} u128;
-
-typedef struct {
__be64 a, b;
} be128;
@@ -61,20 +57,16 @@ typedef struct {
__le64 b, a;
} le128;
-static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
+static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
{
r->a = p->a ^ q->a;
r->b = p->b ^ q->b;
}
-static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
-{
- u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
-}
-
static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
{
- u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+ r->a = p->a ^ q->a;
+ r->b = p->b ^ q->b;
}
#endif /* _CRYPTO_B128OPS_H */
diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h
new file mode 100644
index 000000000000..0c0176285349
--- /dev/null
+++ b/include/crypto/blake2b.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef _CRYPTO_BLAKE2B_H
+#define _CRYPTO_BLAKE2B_H
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+enum blake2b_lengths {
+ BLAKE2B_BLOCK_SIZE = 128,
+ BLAKE2B_HASH_SIZE = 64,
+ BLAKE2B_KEY_SIZE = 64,
+
+ BLAKE2B_160_HASH_SIZE = 20,
+ BLAKE2B_256_HASH_SIZE = 32,
+ BLAKE2B_384_HASH_SIZE = 48,
+ BLAKE2B_512_HASH_SIZE = 64,
+};
+
+struct blake2b_state {
+ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
+ u64 h[8];
+ u64 t[2];
+ u64 f[2];
+ u8 buf[BLAKE2B_BLOCK_SIZE];
+ unsigned int buflen;
+ unsigned int outlen;
+};
+
+enum blake2b_iv {
+ BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL,
+ BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL,
+ BLAKE2B_IV2 = 0x3C6EF372FE94F82BULL,
+ BLAKE2B_IV3 = 0xA54FF53A5F1D36F1ULL,
+ BLAKE2B_IV4 = 0x510E527FADE682D1ULL,
+ BLAKE2B_IV5 = 0x9B05688C2B3E6C1FULL,
+ BLAKE2B_IV6 = 0x1F83D9ABFB41BD6BULL,
+ BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL,
+};
+
+static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
+ const void *key, size_t keylen)
+{
+ state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ state->h[1] = BLAKE2B_IV1;
+ state->h[2] = BLAKE2B_IV2;
+ state->h[3] = BLAKE2B_IV3;
+ state->h[4] = BLAKE2B_IV4;
+ state->h[5] = BLAKE2B_IV5;
+ state->h[6] = BLAKE2B_IV6;
+ state->h[7] = BLAKE2B_IV7;
+ state->t[0] = 0;
+ state->t[1] = 0;
+ state->f[0] = 0;
+ state->f[1] = 0;
+ state->buflen = 0;
+ state->outlen = outlen;
+ if (keylen) {
+ memcpy(state->buf, key, keylen);
+ memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
+ state->buflen = BLAKE2B_BLOCK_SIZE;
+ }
+}
+
+#endif /* _CRYPTO_BLAKE2B_H */
diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
index b471deac28ff..f9ffd39194eb 100644
--- a/include/crypto/blake2s.h
+++ b/include/crypto/blake2s.h
@@ -3,15 +3,14 @@
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
-#ifndef BLAKE2S_H
-#define BLAKE2S_H
+#ifndef _CRYPTO_BLAKE2S_H
+#define _CRYPTO_BLAKE2S_H
+#include <linux/bug.h>
+#include <linux/kconfig.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/string.h>
-#include <asm/bug.h>
-
enum blake2s_lengths {
BLAKE2S_BLOCK_SIZE = 64,
BLAKE2S_HASH_SIZE = 32,
@@ -24,6 +23,7 @@ enum blake2s_lengths {
};
struct blake2s_state {
+ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
u32 h[8];
u32 t[2];
u32 f[2];
@@ -43,29 +43,34 @@ enum blake2s_iv {
BLAKE2S_IV7 = 0x5BE0CD19UL,
};
-void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
-void blake2s_final(struct blake2s_state *state, u8 *out);
-
-static inline void blake2s_init_param(struct blake2s_state *state,
- const u32 param)
+static inline void __blake2s_init(struct blake2s_state *state, size_t outlen,
+ const void *key, size_t keylen)
{
- *state = (struct blake2s_state){{
- BLAKE2S_IV0 ^ param,
- BLAKE2S_IV1,
- BLAKE2S_IV2,
- BLAKE2S_IV3,
- BLAKE2S_IV4,
- BLAKE2S_IV5,
- BLAKE2S_IV6,
- BLAKE2S_IV7,
- }};
+ state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ state->h[1] = BLAKE2S_IV1;
+ state->h[2] = BLAKE2S_IV2;
+ state->h[3] = BLAKE2S_IV3;
+ state->h[4] = BLAKE2S_IV4;
+ state->h[5] = BLAKE2S_IV5;
+ state->h[6] = BLAKE2S_IV6;
+ state->h[7] = BLAKE2S_IV7;
+ state->t[0] = 0;
+ state->t[1] = 0;
+ state->f[0] = 0;
+ state->f[1] = 0;
+ state->buflen = 0;
+ state->outlen = outlen;
+ if (keylen) {
+ memcpy(state->buf, key, keylen);
+ memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen);
+ state->buflen = BLAKE2S_BLOCK_SIZE;
+ }
}
static inline void blake2s_init(struct blake2s_state *state,
const size_t outlen)
{
- blake2s_init_param(state, 0x01010000 | outlen);
- state->outlen = outlen;
+ __blake2s_init(state, outlen, NULL, 0);
}
static inline void blake2s_init_key(struct blake2s_state *state,
@@ -75,12 +80,12 @@ static inline void blake2s_init_key(struct blake2s_state *state,
WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE ||
!key || !keylen || keylen > BLAKE2S_KEY_SIZE));
- blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen);
- memcpy(state->buf, key, keylen);
- state->buflen = BLAKE2S_BLOCK_SIZE;
- state->outlen = outlen;
+ __blake2s_init(state, outlen, key, keylen);
}
+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
+void blake2s_final(struct blake2s_state *state, u8 *out);
+
static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
const size_t outlen, const size_t inlen,
const size_t keylen)
@@ -91,16 +96,9 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE ||
(!key && keylen)));
- if (keylen)
- blake2s_init_key(&state, outlen, key, keylen);
- else
- blake2s_init(&state, outlen);
-
+ __blake2s_init(&state, outlen, key, keylen);
blake2s_update(&state, in, inlen);
blake2s_final(&state, out);
}
-void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
- const size_t keylen);
-
-#endif /* BLAKE2S_H */
+#endif /* _CRYPTO_BLAKE2S_H */
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
deleted file mode 100644
index 2b6422db42e2..000000000000
--- a/include/crypto/cbc.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CBC: Cipher Block Chaining mode
- *
- * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#ifndef _CRYPTO_CBC_H
-#define _CRYPTO_CBC_H
-
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-static inline int crypto_cbc_encrypt_segment(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(iv, src, bsize);
- fn(tfm, iv, dst);
- memcpy(iv, dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_inplace(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(src, iv, bsize);
- fn(tfm, src, src);
- iv = src;
-
- src += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
- void (*fn)(struct crypto_skcipher *,
- const u8 *, u8 *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
- else
- err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
- err = skcipher_walk_done(&walk, err);
- }
-
- return err;
-}
-
-static inline int crypto_cbc_decrypt_segment(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- fn(tfm, src, dst);
- crypto_xor(dst, iv, bsize);
- iv = src;
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_inplace(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- unsigned int bsize = crypto_skcipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 last_iv[MAX_CIPHER_BLOCKSIZE];
-
- /* Start of the last block. */
- src += nbytes - (nbytes & (bsize - 1)) - bsize;
- memcpy(last_iv, src, bsize);
-
- for (;;) {
- fn(tfm, src, src);
- if ((nbytes -= bsize) < bsize)
- break;
- crypto_xor(src, src - bsize, bsize);
- src -= bsize;
- }
-
- crypto_xor(src, walk->iv, bsize);
- memcpy(walk->iv, last_iv, bsize);
-
- return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_blocks(
- struct skcipher_walk *walk, struct crypto_skcipher *tfm,
- void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
- if (walk->src.virt.addr == walk->dst.virt.addr)
- return crypto_cbc_decrypt_inplace(walk, tfm, fn);
- else
- return crypto_cbc_decrypt_segment(walk, tfm, fn);
-}
-
-#endif /* _CRYPTO_CBC_H */
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index 3a1c72fdb7cf..b3ea73b81944 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -47,13 +47,25 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
hchacha_block_generic(state, out, nrounds);
}
+enum chacha_constants { /* expand 32-byte k */
+ CHACHA_CONSTANT_EXPA = 0x61707865U,
+ CHACHA_CONSTANT_ND_3 = 0x3320646eU,
+ CHACHA_CONSTANT_2_BY = 0x79622d32U,
+ CHACHA_CONSTANT_TE_K = 0x6b206574U
+};
+
+static inline void chacha_init_consts(u32 *state)
+{
+ state[0] = CHACHA_CONSTANT_EXPA;
+ state[1] = CHACHA_CONSTANT_ND_3;
+ state[2] = CHACHA_CONSTANT_2_BY;
+ state[3] = CHACHA_CONSTANT_TE_K;
+}
+
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
{
- state[0] = 0x61707865; /* "expa" */
- state[1] = 0x3320646e; /* "nd 3" */
- state[2] = 0x79622d32; /* "2-by" */
- state[3] = 0x6b206574; /* "te k" */
+ chacha_init_consts(state);
state[4] = key[0];
state[5] = key[1];
state[6] = key[2];
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 23169f4d87e6..796d986e58e1 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -13,7 +13,8 @@
#ifndef _CRYPTO_CRYPT_H
#define _CRYPTO_CRYPT_H
-#include <linux/kernel.h>
+#include <linux/types.h>
+
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h
index 4e6dc840b159..ece6a9b5fafc 100644
--- a/include/crypto/curve25519.h
+++ b/include/crypto/curve25519.h
@@ -28,6 +28,8 @@ void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE]);
+bool curve25519_selftest(void);
+
static inline
bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index d71e9858ab86..7b863e911cb4 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -24,21 +24,17 @@
*
* @key: Private DH key
* @p: Diffie-Hellman parameter P
- * @q: Diffie-Hellman parameter Q
* @g: Diffie-Hellman generator G
* @key_size: Size of the private DH key
* @p_size: Size of DH parameter P
- * @q_size: Size of DH parameter Q
* @g_size: Size of DH generator G
*/
struct dh {
- void *key;
- void *p;
- void *q;
- void *g;
+ const void *key;
+ const void *p;
+ const void *g;
unsigned int key_size;
unsigned int p_size;
- unsigned int q_size;
unsigned int g_size;
};
@@ -83,4 +79,20 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
*/
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
+/**
+ * __crypto_dh_decode_key() - decode a private key without parameter checks
+ * @buf: Buffer holding a packet key that should be decoded
+ * @len: Length of the packet private key buffer
+ * @params: Buffer allocated by the caller that is filled with the
+ * unpacked DH private key.
+ *
+ * Internal function providing the same services as the exported
+ * crypto_dh_decode_key(), but without any of those basic parameter
+ * checks conducted by the latter.
+ *
+ * Return: -EINVAL if buffer has insufficient size, 0 on success
+ */
+int __crypto_dh_decode_key(const char *buf, unsigned int len,
+ struct dh *params);
+
#endif
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index c4165126937e..af5ad51d3eef 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -105,6 +105,12 @@ struct drbg_test_data {
struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
};
+enum drbg_seed_state {
+ DRBG_SEED_STATE_UNSEEDED,
+ DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
+ DRBG_SEED_STATE_FULL,
+};
+
struct drbg_state {
struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
@@ -127,16 +133,15 @@ struct drbg_state {
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
- bool seeded; /* DRBG fully seeded? */
+ enum drbg_seed_state seeded; /* DRBG fully seeded? */
+ unsigned long last_seed_time;
bool pr; /* Prediction resistance enabled? */
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
- struct work_struct seed_work; /* asynchronous seeding support */
struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;
const struct drbg_core *core;
struct drbg_string test_data;
- struct random_ready_callback random_ready;
};
static inline __u8 drbg_statelen(struct drbg_state *drbg)
diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h
new file mode 100644
index 000000000000..70964781eb68
--- /dev/null
+++ b/include/crypto/ecc_curve.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 HiSilicon */
+
+#ifndef _CRYTO_ECC_CURVE_H
+#define _CRYTO_ECC_CURVE_H
+
+#include <linux/types.h>
+
+/**
+ * struct ecc_point - elliptic curve point in affine coordinates
+ *
+ * @x: X coordinate in vli form.
+ * @y: Y coordinate in vli form.
+ * @ndigits: Length of vlis in u64 qwords.
+ */
+struct ecc_point {
+ u64 *x;
+ u64 *y;
+ u8 ndigits;
+};
+
+/**
+ * struct ecc_curve - definition of elliptic curve
+ *
+ * @name: Short name of the curve.
+ * @g: Generator point of the curve.
+ * @p: Prime number, if Barrett's reduction is used for this curve
+ * pre-calculated value 'mu' is appended to the @p after ndigits.
+ * Use of Barrett's reduction is heuristically determined in
+ * vli_mmod_fast().
+ * @n: Order of the curve group.
+ * @a: Curve parameter a.
+ * @b: Curve parameter b.
+ */
+struct ecc_curve {
+ char *name;
+ struct ecc_point g;
+ u64 *p;
+ u64 *n;
+ u64 *a;
+ u64 *b;
+};
+
+/**
+ * ecc_get_curve() - get elliptic curve;
+ * @curve_id: Curves IDs:
+ * defined in 'include/crypto/ecdh.h';
+ *
+ * Returns curve if get curve succssful, NULL otherwise
+ */
+const struct ecc_curve *ecc_get_curve(unsigned int curve_id);
+
+/**
+ * ecc_get_curve25519() - get curve25519 curve;
+ *
+ * Returns curve25519
+ */
+const struct ecc_curve *ecc_get_curve25519(void);
+
+#endif
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index a5b805b5526d..a9f98078d29c 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -25,16 +25,15 @@
/* Curves IDs */
#define ECC_CURVE_NIST_P192 0x0001
#define ECC_CURVE_NIST_P256 0x0002
+#define ECC_CURVE_NIST_P384 0x0003
/**
* struct ecdh - define an ECDH private key
*
- * @curve_id: ECC curve the key is based on.
* @key: Private ECDH key
* @key_size: Size of the private ECDH key
*/
struct ecdh {
- unsigned short curve_id;
char *key;
unsigned short key_size;
};
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 3f06e40d063a..545dbefe3e13 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -7,86 +7,47 @@
#ifndef _CRYPTO_ENGINE_H
#define _CRYPTO_ENGINE_H
-#include <linux/crypto.h>
-#include <linux/list.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
+#include <crypto/kpp.h>
#include <crypto/skcipher.h>
+#include <linux/types.h>
-#define ENGINE_NAME_LEN 30
-/*
- * struct crypto_engine - crypto hardware engine
- * @name: the engine name
- * @idling: the engine is entering idle state
- * @busy: request pump is busy
- * @running: the engine is on working
- * @retry_support: indication that the hardware allows re-execution
- * of a failed backlog request
- * crypto-engine, in head position to keep order
- * @list: link with the global crypto engine list
- * @queue_lock: spinlock to syncronise access to request queue
- * @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
- * @kworker: kthread worker struct for request pump
- * @pump_requests: work struct for scheduling work to the request pump
- * @priv_data: the engine private data
- * @cur_req: the current request which is on processing
- */
-struct crypto_engine {
- char name[ENGINE_NAME_LEN];
- bool idling;
- bool busy;
- bool running;
-
- bool retry_support;
-
- struct list_head list;
- spinlock_t queue_lock;
- struct crypto_queue queue;
- struct device *dev;
-
- bool rt;
-
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
-
- struct kthread_worker *kworker;
- struct kthread_work pump_requests;
-
- void *priv_data;
- struct crypto_async_request *cur_req;
-};
+struct crypto_engine;
+struct device;
/*
* struct crypto_engine_op - crypto hardware engine operations
- * @prepare__request: do some prepare if need before handle the current request
- * @unprepare_request: undo any work done by prepare_request()
* @do_one_request: do encryption for current request
*/
struct crypto_engine_op {
- int (*prepare_request)(struct crypto_engine *engine,
- void *areq);
- int (*unprepare_request)(struct crypto_engine *engine,
- void *areq);
int (*do_one_request)(struct crypto_engine *engine,
void *areq);
};
-struct crypto_engine_ctx {
+struct aead_engine_alg {
+ struct aead_alg base;
+ struct crypto_engine_op op;
+};
+
+struct ahash_engine_alg {
+ struct ahash_alg base;
+ struct crypto_engine_op op;
+};
+
+struct akcipher_engine_alg {
+ struct akcipher_alg base;
+ struct crypto_engine_op op;
+};
+
+struct kpp_engine_alg {
+ struct kpp_alg base;
+ struct crypto_engine_op op;
+};
+
+struct skcipher_engine_alg {
+ struct skcipher_alg base;
struct crypto_engine_op op;
};
@@ -96,6 +57,8 @@ int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
struct akcipher_request *req);
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req);
+int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
+ struct kpp_request *req);
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
struct skcipher_request *req);
void crypto_finalize_aead_request(struct crypto_engine *engine,
@@ -104,6 +67,8 @@ void crypto_finalize_akcipher_request(struct crypto_engine *engine,
struct akcipher_request *req, int err);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err);
+void crypto_finalize_kpp_request(struct crypto_engine *engine,
+ struct kpp_request *req, int err);
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
struct skcipher_request *req, int err);
int crypto_engine_start(struct crypto_engine *engine);
@@ -113,6 +78,30 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
-int crypto_engine_exit(struct crypto_engine *engine);
+void crypto_engine_exit(struct crypto_engine *engine);
+
+int crypto_engine_register_aead(struct aead_engine_alg *alg);
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg);
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count);
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg);
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg);
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count);
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+ int count);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg);
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg);
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg);
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg);
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+ int count);
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+ int count);
#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
index 9d7eff04f224..fd9df607a836 100644
--- a/include/crypto/gcm.h
+++ b/include/crypto/gcm.h
@@ -3,6 +3,9 @@
#include <linux/errno.h>
+#include <crypto/aes.h>
+#include <crypto/gf128mul.h>
+
#define GCM_AES_IV_SIZE 12
#define GCM_RFC4106_IV_SIZE 8
#define GCM_RFC4543_IV_SIZE 8
@@ -60,4 +63,23 @@ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen)
return 0;
}
+
+struct aesgcm_ctx {
+ be128 ghash_key;
+ struct crypto_aes_ctx aes_ctx;
+ unsigned int authsize;
+};
+
+int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key,
+ unsigned int keysize, unsigned int authsize);
+
+void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
+ int crypt_len, const u8 *assoc, int assoc_len,
+ const u8 iv[GCM_AES_IV_SIZE], u8 *authtag);
+
+bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst,
+ const u8 *src, int crypt_len, const u8 *assoc,
+ int assoc_len, const u8 iv[GCM_AES_IV_SIZE],
+ const u8 *authtag);
+
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 0d1b403888c9..5d61f576cfc8 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -8,6 +8,7 @@
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
+#include <linux/atomic.h>
#include <linux/crypto.h>
#include <linux/string.h>
@@ -22,8 +23,27 @@ struct crypto_ahash;
* crypto_unregister_shash().
*/
-/**
+/*
+ * struct crypto_istat_hash - statistics for has algorithm
+ * @hash_cnt: number of hash requests
+ * @hash_tlen: total data size hashed
+ * @err_cnt: number of error for hash requests
+ */
+struct crypto_istat_hash {
+ atomic64_t hash_cnt;
+ atomic64_t hash_tlen;
+ atomic64_t err_cnt;
+};
+
+#ifdef CONFIG_CRYPTO_STATS
+#define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat;
+#else
+#define HASH_ALG_COMMON_STAT
+#endif
+
+/*
* struct hash_alg_common - define properties of message digest
+ * @stat: Statistics for hash algorithm.
* @digestsize: Size of the result of the transformation. A buffer of this size
* must be available to the @final and @finup calls, so they can
* store the resulting hash into it. For various predefined sizes,
@@ -39,12 +59,15 @@ struct crypto_ahash;
* The hash_alg_common data structure now adds the hash-specific
* information.
*/
-struct hash_alg_common {
- unsigned int digestsize;
- unsigned int statesize;
-
- struct crypto_alg base;
-};
+#define HASH_ALG_COMMON { \
+ HASH_ALG_COMMON_STAT \
+ \
+ unsigned int digestsize; \
+ unsigned int statesize; \
+ \
+ struct crypto_alg base; \
+}
+struct hash_alg_common HASH_ALG_COMMON;
struct ahash_request {
struct crypto_async_request base;
@@ -59,11 +82,6 @@ struct ahash_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-#define AHASH_REQUEST_ON_STACK(name, ahash) \
- char __##name##_desc[sizeof(struct ahash_request) + \
- crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
- struct ahash_request *name = (void *)__##name##_desc
-
/**
* struct ahash_alg - asynchronous message digest definition
* @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
@@ -123,6 +141,18 @@ struct ahash_request {
* data so the transformation can continue from this point onward. No
* data processing happens at this point. Driver must not use
* req->result.
+ * @init_tfm: Initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation
+ * time, right after the transformation context was
+ * allocated. In case the cryptographic hardware has
+ * some special requirements which need to be handled
+ * by software, this function shall check for the precise
+ * requirement of the transformation and put any software
+ * fallbacks in place.
+ * @exit_tfm: Deinitialize the cryptographic transformation object.
+ * This is a counterpart to @init_tfm, used to remove
+ * various changes set in @init_tfm.
+ * @clone_tfm: Copy transform into new object, may allocate memory.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
@@ -135,13 +165,16 @@ struct ahash_alg {
int (*import)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+ int (*init_tfm)(struct crypto_ahash *tfm);
+ void (*exit_tfm)(struct crypto_ahash *tfm);
+ int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
struct hash_alg_common halg;
};
struct shash_desc {
struct crypto_shash *tfm;
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
+ void *__ctx[] __aligned(ARCH_SLAB_MINALIGN);
};
#define HASH_MAX_DIGESTSIZE 64
@@ -152,11 +185,9 @@ struct shash_desc {
*/
#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
-#define HASH_MAX_STATESIZE 512
-
-#define SHASH_DESC_ON_STACK(shash, ctx) \
- char __##shash##_desc[sizeof(struct shash_desc) + \
- HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
+#define SHASH_DESC_ON_STACK(shash, ctx) \
+ char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
+ __aligned(__alignof__(struct shash_desc)); \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
/**
@@ -180,12 +211,12 @@ struct shash_desc {
* @exit_tfm: Deinitialize the cryptographic transformation object.
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
- * @digestsize: see struct ahash_alg
- * @statesize: see struct ahash_alg
+ * @clone_tfm: Copy transform into new object, may allocate memory.
* @descsize: Size of the operational state for the message digest. This state
* size is the memory size that needs to be allocated for
* shash_desc.__ctx
- * @base: internally used
+ * @halg: see struct hash_alg_common
+ * @HASH_ALG_COMMON: see struct hash_alg_common
*/
struct shash_alg {
int (*init)(struct shash_desc *desc);
@@ -202,28 +233,21 @@ struct shash_alg {
unsigned int keylen);
int (*init_tfm)(struct crypto_shash *tfm);
void (*exit_tfm)(struct crypto_shash *tfm);
+ int (*clone_tfm)(struct crypto_shash *dst, struct crypto_shash *src);
unsigned int descsize;
- /* These fields must match hash_alg_common. */
- unsigned int digestsize
- __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
- unsigned int statesize;
-
- struct crypto_alg base;
+ union {
+ struct HASH_ALG_COMMON;
+ struct hash_alg_common halg;
+ };
};
+#undef HASH_ALG_COMMON
+#undef HASH_ALG_COMMON_STAT
struct crypto_ahash {
- int (*init)(struct ahash_request *req);
- int (*update)(struct ahash_request *req);
- int (*final)(struct ahash_request *req);
- int (*finup)(struct ahash_request *req);
- int (*digest)(struct ahash_request *req);
- int (*export)(struct ahash_request *req, void *out);
- int (*import)(struct ahash_request *req, const void *in);
- int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen);
-
+ bool using_shash; /* Underlying algorithm is shash, not ahash */
+ unsigned int statesize;
unsigned int reqsize;
struct crypto_tfm base;
};
@@ -265,6 +289,8 @@ static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask);
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm);
+
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{
return &tfm->base;
@@ -273,6 +299,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
/**
* crypto_free_ahash() - zeroize and free the ahash handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
@@ -301,12 +329,6 @@ static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
}
-static inline unsigned int crypto_ahash_alignmask(
- struct crypto_ahash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
-}
-
/**
* crypto_ahash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -360,7 +382,7 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
*/
static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
{
- return crypto_hash_alg_common(tfm)->statesize;
+ return tfm->statesize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
@@ -448,7 +470,7 @@ int crypto_ahash_finup(struct ahash_request *req);
*
* Return:
* 0 if the message digest was successfully calculated;
- * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later;
+ * -EINPROGRESS if data is fed into hardware (DMA) or queued for later;
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
@@ -478,10 +500,7 @@ int crypto_ahash_digest(struct ahash_request *req);
*
* Return: 0 if the export was successful; < 0 if an error occurred
*/
-static inline int crypto_ahash_export(struct ahash_request *req, void *out)
-{
- return crypto_ahash_reqtfm(req)->export(req, out);
-}
+int crypto_ahash_export(struct ahash_request *req, void *out);
/**
* crypto_ahash_import() - import message digest state
@@ -494,15 +513,7 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
*
* Return: 0 if the import was successful; < 0 if an error occurred
*/
-static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->import(req, in);
-}
+int crypto_ahash_import(struct ahash_request *req, const void *in);
/**
* crypto_ahash_init() - (re)initialize message digest handle
@@ -515,15 +526,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
*
* Return: see crypto_ahash_final()
*/
-static inline int crypto_ahash_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->init(req);
-}
+int crypto_ahash_init(struct ahash_request *req);
/**
* crypto_ahash_update() - add data to message digest for processing
@@ -536,18 +539,7 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*
* Return: see crypto_ahash_final()
*/
-static inline int crypto_ahash_update(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
-
- crypto_stats_get(alg);
- ret = crypto_ahash_reqtfm(req)->update(req);
- crypto_stats_ahash_update(nbytes, ret, alg);
- return ret;
-}
+int crypto_ahash_update(struct ahash_request *req);
/**
* DOC: Asynchronous Hash Request Handle
@@ -708,6 +700,10 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask);
+struct crypto_shash *crypto_clone_shash(struct crypto_shash *tfm);
+
+int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
{
return &tfm->base;
@@ -716,6 +712,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
/**
* crypto_free_shash() - zeroize and free the message digest handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
@@ -732,12 +730,6 @@ static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
}
-static inline unsigned int crypto_shash_alignmask(
- struct crypto_shash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
-}
-
/**
* crypto_shash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -886,10 +878,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
* Context: Any context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
-static inline int crypto_shash_export(struct shash_desc *desc, void *out)
-{
- return crypto_shash_alg(desc->tfm)->export(desc, out);
-}
+int crypto_shash_export(struct shash_desc *desc, void *out);
/**
* crypto_shash_import() - import operational state
@@ -903,15 +892,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
* Context: Any context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
-static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
-{
- struct crypto_shash *tfm = desc->tfm;
-
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_shash_alg(tfm)->import(desc, in);
-}
+int crypto_shash_import(struct shash_desc *desc, const void *in);
/**
* crypto_shash_init() - (re)initialize message digest
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
index eb9d2e368969..d6927739f8b2 100644
--- a/include/crypto/hash_info.h
+++ b/include/crypto/hash_info.h
@@ -8,7 +8,9 @@
#ifndef _CRYPTO_HASH_INFO_H
#define _CRYPTO_HASH_INFO_H
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/sha3.h>
#include <crypto/md5.h>
#include <crypto/streebog.h>
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index ee6412314f8f..78ecaf5db04c 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -21,8 +21,6 @@
#define ALG_MAX_PAGES 16
-struct crypto_async_request;
-
struct alg_sock {
/* struct sock must be the first member of struct alg_sock */
struct sock sk;
@@ -46,6 +44,7 @@ struct af_alg_type {
void *(*bind)(const char *name, u32 type, u32 mask);
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+ int (*setentropy)(void *private, sockptr_t entropy, unsigned int len);
int (*accept)(void *private, struct sock *sk);
int (*accept_nokey)(void *private, struct sock *sk);
int (*setauthsize)(void *private, unsigned int authsize);
@@ -57,9 +56,9 @@ struct af_alg_type {
};
struct af_alg_sgl {
- struct scatterlist sg[ALG_MAX_PAGES + 1];
- struct page *pages[ALG_MAX_PAGES];
- unsigned int npages;
+ struct sg_table sgt;
+ struct scatterlist sgl[ALG_MAX_PAGES + 1];
+ bool need_unpin;
};
/* TX SGL entry */
@@ -122,6 +121,7 @@ struct af_alg_async_req {
*
* @tsgl_list: Link to TX SGL
* @iv: IV for cipher operation
+ * @state: Existing state for continuing operation
* @aead_assoclen: Length of AAD for AEAD cipher operations
* @completion: Work queue for synchronous operation
* @used: TX bytes sent to kernel. This variable is used to
@@ -137,11 +137,13 @@ struct af_alg_async_req {
* recvmsg is invoked.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
+ * @inflight: Non-zero when AIO requests are in flight.
*/
struct af_alg_ctx {
struct list_head tsgl_list;
void *iv;
+ void *state;
size_t aead_assoclen;
struct crypto_wait wait;
@@ -155,6 +157,8 @@ struct af_alg_ctx {
bool init;
unsigned int len;
+
+ unsigned int inflight;
};
int af_alg_register_type(const struct af_alg_type *type);
@@ -164,7 +168,6 @@ int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
static inline struct alg_sock *alg_sk(struct sock *sk)
@@ -231,10 +234,8 @@ void af_alg_wmem_wakeup(struct sock *sk);
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
-ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
-void af_alg_async_cb(struct crypto_async_request *_req, int err);
+void af_alg_async_cb(void *data, int err);
__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index cfc47e18820f..4ac46bafba9d 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -8,7 +8,47 @@
*/
#ifndef _CRYPTO_ACOMP_INT_H
#define _CRYPTO_ACOMP_INT_H
+
#include <crypto/acompress.h>
+#include <crypto/algapi.h>
+
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @dst_free: Frees destination buffer if allocated inside the algorithm
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @reqsize: Context size for (de)compression requests
+ * @stat: Statistics for compress algorithm
+ * @base: Common crypto API algorithm data structure
+ * @calg: Cmonn algorithm data structure shared with scomp
+ */
+struct acomp_alg {
+ int (*compress)(struct acomp_req *req);
+ int (*decompress)(struct acomp_req *req);
+ void (*dst_free)(struct scatterlist *dst);
+ int (*init)(struct crypto_acomp *tfm);
+ void (*exit)(struct crypto_acomp *tfm);
+
+ unsigned int reqsize;
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
+};
/*
* Transform internal helpers.
@@ -26,12 +66,7 @@ static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
static inline void acomp_request_complete(struct acomp_req *req,
int err)
{
- req->base.complete(&req->base, err);
-}
-
-static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
-{
- return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
+ crypto_request_complete(&req->base, err);
}
static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 27b7b0224ea6..28a95eb3182d 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -39,6 +39,11 @@ static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
return crypto_tfm_ctx(&tfm->base);
}
+static inline void *crypto_aead_ctx_dma(struct crypto_aead *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
+}
+
static inline struct crypto_instance *aead_crypto_instance(
struct aead_instance *inst)
{
@@ -65,9 +70,19 @@ static inline void *aead_request_ctx(struct aead_request *req)
return req->__ctx;
}
+static inline void *aead_request_ctx_dma(struct aead_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(aead_request_ctx(req), align);
+}
+
static inline void aead_request_complete(struct aead_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline u32 aead_request_flags(struct aead_request *req)
@@ -108,35 +123,17 @@ static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
aead->reqsize = reqsize;
}
-static inline void aead_init_queue(struct aead_queue *queue,
- unsigned int max_qlen)
+static inline void crypto_aead_set_reqsize_dma(struct crypto_aead *aead,
+ unsigned int reqsize)
{
- crypto_init_queue(&queue->base, max_qlen);
-}
-
-static inline int aead_enqueue_request(struct aead_queue *queue,
- struct aead_request *request)
-{
- return crypto_enqueue_request(&queue->base, &request->base);
-}
-
-static inline struct aead_request *aead_dequeue_request(
- struct aead_queue *queue)
-{
- struct crypto_async_request *req;
-
- req = crypto_dequeue_request(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ aead->reqsize = reqsize;
}
-static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
+static inline void aead_init_queue(struct aead_queue *queue,
+ unsigned int max_qlen)
{
- struct crypto_async_request *req;
-
- req = crypto_get_backlog(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
+ crypto_init_queue(&queue->base, max_qlen);
}
static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index 8d3220c9ab77..a0fba4b2eccf 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -33,21 +33,43 @@ static inline void *akcipher_request_ctx(struct akcipher_request *req)
return req->__ctx;
}
+static inline void *akcipher_request_ctx_dma(struct akcipher_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(akcipher_request_ctx(req), align);
+}
+
static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher,
unsigned int reqsize)
{
- crypto_akcipher_alg(akcipher)->reqsize = reqsize;
+ akcipher->reqsize = reqsize;
+}
+
+static inline void akcipher_set_reqsize_dma(struct crypto_akcipher *akcipher,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ akcipher->reqsize = reqsize;
}
static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
{
- return tfm->base.__crt_ctx;
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *akcipher_tfm_ctx_dma(struct crypto_akcipher *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
}
static inline void akcipher_request_complete(struct akcipher_request *req,
int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm)
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
new file mode 100644
index 000000000000..982fe5e8471c
--- /dev/null
+++ b/include/crypto/internal/blake2b.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Helper functions for BLAKE2b implementations.
+ * Keep this in sync with the corresponding BLAKE2s header.
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
+#define _CRYPTO_INTERNAL_BLAKE2B_H
+
+#include <crypto/blake2b.h>
+#include <crypto/internal/hash.h>
+#include <linux/string.h>
+
+void blake2b_compress_generic(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
+
+static inline void blake2b_set_lastblock(struct blake2b_state *state)
+{
+ state->f[0] = -1;
+}
+
+typedef void (*blake2b_compress_t)(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
+
+static inline void __blake2b_update(struct blake2b_state *state,
+ const u8 *in, size_t inlen,
+ blake2b_compress_t compress)
+{
+ const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2B_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
+ /* Hash one less (full) block than strictly possible */
+ (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
+ in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+}
+
+static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
+ blake2b_compress_t compress)
+{
+ int i;
+
+ blake2b_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
+ (*compress)(state, state->buf, 1, state->buflen);
+ for (i = 0; i < ARRAY_SIZE(state->h); i++)
+ __cpu_to_le64s(&state->h[i]);
+ memcpy(out, state->h, state->outlen);
+}
+
+/* Helper functions for shash implementations of BLAKE2b */
+
+struct blake2b_tfm_ctx {
+ u8 key[BLAKE2B_KEY_SIZE];
+ unsigned int keylen;
+};
+
+static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
+ return -EINVAL;
+
+ memcpy(tctx->key, key, keylen);
+ tctx->keylen = keylen;
+
+ return 0;
+}
+
+static inline int crypto_blake2b_init(struct shash_desc *desc)
+{
+ const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct blake2b_state *state = shash_desc_ctx(desc);
+ unsigned int outlen = crypto_shash_digestsize(desc->tfm);
+
+ __blake2b_init(state, outlen, tctx->key, tctx->keylen);
+ return 0;
+}
+
+static inline int crypto_blake2b_update(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen,
+ blake2b_compress_t compress)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+
+ __blake2b_update(state, in, inlen, compress);
+ return 0;
+}
+
+static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
+ blake2b_compress_t compress)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+
+ __blake2b_final(state, out, compress);
+ return 0;
+}
+
+#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
index 74ff77032e52..506d56530ca9 100644
--- a/include/crypto/internal/blake2s.h
+++ b/include/crypto/internal/blake2s.h
@@ -1,24 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Helper functions for BLAKE2s implementations.
+ * Keep this in sync with the corresponding BLAKE2b header.
+ */
-#ifndef BLAKE2S_INTERNAL_H
-#define BLAKE2S_INTERNAL_H
+#ifndef _CRYPTO_INTERNAL_BLAKE2S_H
+#define _CRYPTO_INTERNAL_BLAKE2S_H
#include <crypto/blake2s.h>
+#include <linux/string.h>
-struct blake2s_tfm_ctx {
- u8 key[BLAKE2S_KEY_SIZE];
- unsigned int keylen;
-};
-
-void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
+void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc);
-void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
- size_t nblocks, const u32 inc);
+void blake2s_compress(struct blake2s_state *state, const u8 *block,
+ size_t nblocks, const u32 inc);
-static inline void blake2s_set_lastblock(struct blake2s_state *state)
-{
- state->f[0] = -1;
-}
+bool blake2s_selftest(void);
-#endif /* BLAKE2S_INTERNAL_H */
+#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */
diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h
new file mode 100644
index 000000000000..5030f6d2df31
--- /dev/null
+++ b/include/crypto/internal/cipher.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
+ * and Nettle, by Niels Möller.
+ */
+
+#ifndef _CRYPTO_INTERNAL_CIPHER_H
+#define _CRYPTO_INTERNAL_CIPHER_H
+
+#include <crypto/algapi.h>
+
+struct crypto_cipher {
+ struct crypto_tfm base;
+};
+
+/**
+ * DOC: Single Block Cipher API
+ *
+ * The single block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
+ *
+ * Using the single block cipher API calls, operations with the basic cipher
+ * primitive can be implemented. These cipher primitives exclude any block
+ * chaining operations including IV handling.
+ *
+ * The purpose of this single block cipher API is to support the implementation
+ * of templates or other concepts that only need to perform the cipher operation
+ * on one block at a time. Templates invoke the underlying cipher primitive
+ * block-wise and process either the input or the output data of these cipher
+ * operations.
+ */
+
+static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_cipher *)tfm;
+}
+
+/**
+ * crypto_alloc_cipher() - allocate single block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a single block cipher. The returned struct
+ * crypto_cipher is the cipher handle that is required for any subsequent API
+ * invocation for that single block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_cipher() - zeroize and free the single block cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_cipher(struct crypto_cipher *tfm)
+{
+ crypto_free_tfm(crypto_cipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_cipher() - Search for the availability of a single block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the single block cipher is known to the kernel crypto API;
+ * false otherwise
+ */
+static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * crypto_cipher_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the single block cipher referenced with the cipher handle
+ * tfm is returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
+}
+
+static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
+}
+
+static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_cipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the single block cipher referenced by the
+ * cipher handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen);
+
+/**
+ * crypto_cipher_encrypt_one() - encrypt one block of plaintext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the ciphertext
+ * @src: buffer holding the plaintext to be encrypted
+ *
+ * Invoke the encryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
+
+/**
+ * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the plaintext
+ * @src: buffer holding the ciphertext to be decrypted
+ *
+ * Invoke the decryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
+
+struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher);
+
+struct crypto_cipher_spawn {
+ struct crypto_spawn base;
+};
+
+static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+
+static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct crypto_alg *crypto_spawn_cipher_alg(
+ struct crypto_cipher_spawn *spawn)
+{
+ return spawn->base.alg;
+}
+
+static inline struct crypto_cipher *crypto_spawn_cipher(
+ struct crypto_cipher_spawn *spawn)
+{
+ u32 type = CRYPTO_ALG_TYPE_CIPHER;
+ u32 mask = CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
+}
+
+static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
+{
+ return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
+}
+
+#endif
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
new file mode 100644
index 000000000000..4f6c1a68882f
--- /dev/null
+++ b/include/crypto/internal/ecc.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _CRYPTO_ECC_H
+#define _CRYPTO_ECC_H
+
+#include <crypto/ecc_curve.h>
+#include <asm/unaligned.h>
+
+/* One digit is u64 qword. */
+#define ECC_CURVE_NIST_P192_DIGITS 3
+#define ECC_CURVE_NIST_P256_DIGITS 4
+#define ECC_CURVE_NIST_P384_DIGITS 6
+#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */
+
+#define ECC_DIGITS_TO_BYTES_SHIFT 3
+
+#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT)
+
+#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+
+/**
+ * ecc_swap_digits() - Copy ndigits from big endian array to native array
+ * @in: Input array
+ * @out: Output array
+ * @ndigits: Number of digits to copy
+ */
+static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
+{
+ const __be64 *src = (__force __be64 *)in;
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
+}
+
+/**
+ * ecc_is_key_valid() - Validate a given ECDH private key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key to be used for the given curve
+ * @private_key_len: private key length
+ *
+ * Returns 0 if the key is acceptable, a negative value otherwise
+ */
+int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, unsigned int private_key_len);
+
+/**
+ * ecc_gen_privkey() - Generates an ECC private key.
+ * The private key is a random integer in the range 0 < random < n, where n is a
+ * prime that is the order of the cyclic subgroup generated by the distinguished
+ * point G.
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve number of digits
+ * @private_key: buffer for storing the generated private key
+ *
+ * Returns 0 if the private key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey);
+
+/**
+ * ecc_make_pub_key() - Compute an ECC public key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: pregenerated private key for the given curve
+ * @public_key: buffer for storing the generated public key
+ *
+ * Returns 0 if the public key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, u64 *public_key);
+
+/**
+ * crypto_ecdh_shared_secret() - Compute a shared secret
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key of part A
+ * @public_key: public key of counterpart B
+ * @secret: buffer for storing the calculated shared secret
+ *
+ * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
+ * before using it for symmetric encryption or HMAC.
+ *
+ * Returns 0 if the shared secret was generated successfully, a negative value
+ * if an error occurred.
+ */
+int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, const u64 *public_key,
+ u64 *secret);
+
+/**
+ * ecc_is_pubkey_valid_partial() - Partial public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
+ * Public-Key Validation Routine.
+ *
+ * Note: There is no check that the public key is in the correct elliptic curve
+ * subgroup.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * ecc_is_pubkey_valid_full() - Full public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full
+ * Public-Key Validation Routine.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * vli_is_zero() - Determine is vli is zero
+ *
+ * @vli: vli to check.
+ * @ndigits: length of the @vli
+ */
+bool vli_is_zero(const u64 *vli, unsigned int ndigits);
+
+/**
+ * vli_cmp() - compare left and right vlis
+ *
+ * @left: vli
+ * @right: vli
+ * @ndigits: length of both vlis
+ *
+ * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * 0 if @left == @right, 1 if @left > @right.
+ */
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
+
+/**
+ * vli_sub() - Subtracts right from left
+ *
+ * @result: where to write result
+ * @left: vli
+ * @right vli
+ * @ndigits: length of all vlis
+ *
+ * Note: can modify in-place.
+ *
+ * Return: carry bit.
+ */
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits);
+
+/**
+ * vli_from_be64() - Load vli from big-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 BE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_from_le64() - Load vli from little-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 LE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_mod_inv() - Modular inversion
+ *
+ * @result: where to write vli number
+ * @input: vli value to operate on
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ */
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+ unsigned int ndigits);
+
+/**
+ * vli_mod_mult_slow() - Modular multiplication
+ *
+ * @result: where to write result value
+ * @left: vli number to multiply with @right
+ * @right: vli number to multiply with @left
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ *
+ * Note: Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits);
+
+/**
+ * vli_num_bits() - Counts the number of bits required for vli.
+ *
+ * @vli: vli to check.
+ * @ndigits: Length of the @vli
+ *
+ * Return: The number of bits required to represent @vli.
+ */
+unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits);
+
+/**
+ * ecc_aloc_point() - Allocate ECC point.
+ *
+ * @ndigits: Length of vlis in u64 qwords.
+ *
+ * Return: Pointer to the allocated point or NULL if allocation failed.
+ */
+struct ecc_point *ecc_alloc_point(unsigned int ndigits);
+
+/**
+ * ecc_free_point() - Free ECC point.
+ *
+ * @p: The point to free.
+ */
+void ecc_free_point(struct ecc_point *p);
+
+/**
+ * ecc_point_is_zero() - Check if point is zero.
+ *
+ * @p: Point to check for zero.
+ *
+ * Return: true if point is the point at infinity, false otherwise.
+ */
+bool ecc_point_is_zero(const struct ecc_point *point);
+
+/**
+ * ecc_point_mult_shamir() - Add two points multiplied by scalars
+ *
+ * @result: resulting point
+ * @x: scalar to multiply with @p
+ * @p: point to multiply with @x
+ * @y: scalar to multiply with @q
+ * @q: point to multiply with @y
+ * @curve: curve
+ *
+ * Returns result = x * p + x * q over the curve.
+ * This works faster than two multiplications and addition.
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+ const u64 *x, const struct ecc_point *p,
+ const u64 *y, const struct ecc_point *q,
+ const struct ecc_curve *curve);
+
+#endif
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
new file mode 100644
index 000000000000..fbf4be56cf12
--- /dev/null
+++ b/include/crypto/internal/engine.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_ENGINE_H
+#define _CRYPTO_INTERNAL_ENGINE_H
+
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <linux/kthread.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#define ENGINE_NAME_LEN 30
+
+struct device;
+
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @retry_support: indication that the hardware allows re-execution
+ * of a failed backlog request
+ * crypto-engine, in head position to keep order
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to synchronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @do_batch_requests: execute a batch of requests. Depends on multiple
+ * requests support.
+ * @kworker: kthread worker struct for request pump
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+ char name[ENGINE_NAME_LEN];
+ bool idling;
+ bool busy;
+ bool running;
+
+ bool retry_support;
+
+ struct list_head list;
+ spinlock_t queue_lock;
+ struct crypto_queue queue;
+ struct device *dev;
+
+ bool rt;
+
+ int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*do_batch_requests)(struct crypto_engine *engine);
+
+
+ struct kthread_worker *kworker;
+ struct kthread_work pump_requests;
+
+ void *priv_data;
+ struct crypto_async_request *cur_req;
+};
+
+#endif
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 89f6f46ab2b8..58967593b6b4 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -18,15 +18,13 @@ struct crypto_hash_walk {
char *data;
unsigned int offset;
- unsigned int alignmask;
+ unsigned int flags;
struct page *pg;
unsigned int entrylen;
unsigned int total;
struct scatterlist *sg;
-
- unsigned int flags;
};
struct ahash_instance {
@@ -62,25 +60,12 @@ struct crypto_shash_spawn {
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
-int crypto_ahash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk);
-
-static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk,
- int err)
-{
- return crypto_hash_walk_done(walk, err);
-}
static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
{
return !(walk->entrylen | walk->total);
}
-static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
-{
- return crypto_hash_walk_last(walk);
-}
-
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
@@ -102,8 +87,6 @@ static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
-bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
-
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -146,25 +129,47 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
-int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
-
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
{
return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
}
+static inline void *crypto_ahash_ctx_dma(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx_dma(crypto_ahash_tfm(tfm));
+}
+
static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
{
return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
halg);
}
+static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+{
+ return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
+ halg);
+}
+
+static inline void crypto_ahash_set_statesize(struct crypto_ahash *tfm,
+ unsigned int size)
+{
+ tfm->statesize = size;
+}
+
static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
unsigned int reqsize)
{
tfm->reqsize = reqsize;
}
+static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ ahash->reqsize = reqsize;
+}
+
static inline struct crypto_instance *ahash_crypto_instance(
struct ahash_instance *inst)
{
@@ -177,14 +182,30 @@ static inline struct ahash_instance *ahash_instance(
return container_of(inst, struct ahash_instance, s.base);
}
+static inline struct ahash_instance *ahash_alg_instance(
+ struct crypto_ahash *ahash)
+{
+ return ahash_instance(crypto_tfm_alg_instance(&ahash->base));
+}
+
static inline void *ahash_instance_ctx(struct ahash_instance *inst)
{
return crypto_instance_ctx(ahash_crypto_instance(inst));
}
+static inline void *ahash_request_ctx_dma(struct ahash_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(ahash_request_ctx(req), align);
+}
+
static inline void ahash_request_complete(struct ahash_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline u32 ahash_request_flags(struct ahash_request *req)
@@ -244,11 +265,6 @@ static inline struct crypto_shash *crypto_spawn_shash(
return crypto_spawn_tfm2(&spawn->base);
}
-static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
-{
- return crypto_tfm_ctx_aligned(&tfm->base);
-}
-
static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_shash, base);
diff --git a/include/crypto/internal/kdf_selftest.h b/include/crypto/internal/kdf_selftest.h
new file mode 100644
index 000000000000..4d03d2af57b7
--- /dev/null
+++ b/include/crypto/internal/kdf_selftest.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _CRYPTO_KDF_SELFTEST_H
+#define _CRYPTO_KDF_SELFTEST_H
+
+#include <crypto/hash.h>
+#include <linux/uio.h>
+
+struct kdf_testvec {
+ unsigned char *key;
+ size_t keylen;
+ unsigned char *ikm;
+ size_t ikmlen;
+ struct kvec info;
+ unsigned char *expected;
+ size_t expectedlen;
+};
+
+static inline int
+kdf_test(const struct kdf_testvec *test, const char *name,
+ int (*crypto_kdf_setkey)(struct crypto_shash *kmd,
+ const u8 *key, size_t keylen,
+ const u8 *ikm, size_t ikmlen),
+ int (*crypto_kdf_generate)(struct crypto_shash *kmd,
+ const struct kvec *info,
+ unsigned int info_nvec,
+ u8 *dst, unsigned int dlen))
+{
+ struct crypto_shash *kmd;
+ int ret;
+ u8 *buf = kzalloc(test->expectedlen, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ kmd = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(kmd)) {
+ pr_err("alg: kdf: could not allocate hash handle for %s\n",
+ name);
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ ret = crypto_kdf_setkey(kmd, test->key, test->keylen,
+ test->ikm, test->ikmlen);
+ if (ret) {
+ pr_err("alg: kdf: could not set key derivation key\n");
+ goto err;
+ }
+
+ ret = crypto_kdf_generate(kmd, &test->info, 1, buf, test->expectedlen);
+ if (ret) {
+ pr_err("alg: kdf: could not obtain key data\n");
+ goto err;
+ }
+
+ ret = memcmp(test->expected, buf, test->expectedlen);
+ if (ret)
+ ret = -EINVAL;
+
+err:
+ crypto_free_shash(kmd);
+ kfree(buf);
+ return ret;
+}
+
+#endif /* _CRYPTO_KDF_SELFTEST_H */
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
index 659b642efada..0a6db8c4a9a0 100644
--- a/include/crypto/internal/kpp.h
+++ b/include/crypto/internal/kpp.h
@@ -10,6 +10,38 @@
#include <crypto/kpp.h>
#include <crypto/algapi.h>
+/**
+ * struct kpp_instance - KPP template instance
+ * @free: Callback getting invoked upon instance destruction. Must be set.
+ * @s: Internal. Generic crypto core instance state properly layout
+ * to alias with @alg as needed.
+ * @alg: The &struct kpp_alg implementation provided by the instance.
+ */
+struct kpp_instance {
+ void (*free)(struct kpp_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct kpp_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct kpp_alg alg;
+ };
+};
+
+/**
+ * struct crypto_kpp_spawn - KPP algorithm spawn
+ * @base: Internal. Generic crypto core spawn state.
+ *
+ * Template instances can get a hold on some inner KPP algorithm by
+ * binding a &struct crypto_kpp_spawn via
+ * crypto_grab_kpp(). Transforms may subsequently get instantiated
+ * from the referenced inner &struct kpp_alg by means of
+ * crypto_spawn_kpp().
+ */
+struct crypto_kpp_spawn {
+ struct crypto_spawn base;
+};
+
/*
* Transform internal helpers.
*/
@@ -18,14 +50,42 @@ static inline void *kpp_request_ctx(struct kpp_request *req)
return req->__ctx;
}
+static inline void *kpp_request_ctx_dma(struct kpp_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(kpp_request_ctx(req), align);
+}
+
+static inline void kpp_set_reqsize(struct crypto_kpp *kpp,
+ unsigned int reqsize)
+{
+ kpp->reqsize = reqsize;
+}
+
+static inline void kpp_set_reqsize_dma(struct crypto_kpp *kpp,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ kpp->reqsize = reqsize;
+}
+
static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
{
- return tfm->base.__crt_ctx;
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *kpp_tfm_ctx_dma(struct crypto_kpp *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
}
static inline void kpp_request_complete(struct kpp_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
@@ -33,6 +93,62 @@ static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
}
+/*
+ * Template instance internal helpers.
+ */
+/**
+ * kpp_crypto_instance() - Cast a &struct kpp_instance to the corresponding
+ * generic &struct crypto_instance.
+ * @inst: Pointer to the &struct kpp_instance to be cast.
+ * Return: A pointer to the &struct crypto_instance embedded in @inst.
+ */
+static inline struct crypto_instance *kpp_crypto_instance(
+ struct kpp_instance *inst)
+{
+ return &inst->s.base;
+}
+
+/**
+ * kpp_instance() - Cast a generic &struct crypto_instance to the corresponding
+ * &struct kpp_instance.
+ * @inst: Pointer to the &struct crypto_instance to be cast.
+ * Return: A pointer to the &struct kpp_instance @inst is embedded in.
+ */
+static inline struct kpp_instance *kpp_instance(struct crypto_instance *inst)
+{
+ return container_of(inst, struct kpp_instance, s.base);
+}
+
+/**
+ * kpp_alg_instance() - Get the &struct kpp_instance a given KPP transform has
+ * been instantiated from.
+ * @kpp: The KPP transform instantiated from some &struct kpp_instance.
+ * Return: The &struct kpp_instance associated with @kpp.
+ */
+static inline struct kpp_instance *kpp_alg_instance(struct crypto_kpp *kpp)
+{
+ return kpp_instance(crypto_tfm_alg_instance(&kpp->base));
+}
+
+/**
+ * kpp_instance_ctx() - Get a pointer to a &struct kpp_instance's implementation
+ * specific context data.
+ * @inst: The &struct kpp_instance whose context data to access.
+ *
+ * A KPP template implementation may allocate extra memory beyond the
+ * end of a &struct kpp_instance instantiated from &crypto_template.create().
+ * This function provides a means to obtain a pointer to this area.
+ *
+ * Return: A pointer to the implementation specific context data.
+ */
+static inline void *kpp_instance_ctx(struct kpp_instance *inst)
+{
+ return crypto_instance_ctx(kpp_crypto_instance(inst));
+}
+
+/*
+ * KPP algorithm (un)registration functions.
+ */
/**
* crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
*
@@ -56,4 +172,74 @@ int crypto_register_kpp(struct kpp_alg *alg);
*/
void crypto_unregister_kpp(struct kpp_alg *alg);
+/**
+ * kpp_register_instance() - Register a KPP template instance.
+ * @tmpl: The instantiating template.
+ * @inst: The KPP template instance to be registered.
+ * Return: %0 on success, negative error code otherwise.
+ */
+int kpp_register_instance(struct crypto_template *tmpl,
+ struct kpp_instance *inst);
+
+/*
+ * KPP spawn related functions.
+ */
+/**
+ * crypto_grab_kpp() - Look up a KPP algorithm and bind a spawn to it.
+ * @spawn: The KPP spawn to bind.
+ * @inst: The template instance owning @spawn.
+ * @name: The KPP algorithm name to look up.
+ * @type: The type bitset to pass on to the lookup.
+ * @mask: The mask bismask to pass on to the lookup.
+ * Return: %0 on success, a negative error code otherwise.
+ */
+int crypto_grab_kpp(struct crypto_kpp_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+/**
+ * crypto_drop_kpp() - Release a spawn previously bound via crypto_grab_kpp().
+ * @spawn: The spawn to release.
+ */
+static inline void crypto_drop_kpp(struct crypto_kpp_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+/**
+ * crypto_spawn_kpp_alg() - Get the algorithm a KPP spawn has been bound to.
+ * @spawn: The spawn to get the referenced &struct kpp_alg for.
+ *
+ * This function as well as the returned result are safe to use only
+ * after @spawn has been successfully bound via crypto_grab_kpp() and
+ * up to until the template instance owning @spawn has either been
+ * registered successfully or the spawn has been released again via
+ * crypto_drop_spawn().
+ *
+ * Return: A pointer to the &struct kpp_alg referenced from the spawn.
+ */
+static inline struct kpp_alg *crypto_spawn_kpp_alg(
+ struct crypto_kpp_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct kpp_alg, base);
+}
+
+/**
+ * crypto_spawn_kpp() - Create a transform from a KPP spawn.
+ * @spawn: The spawn previously bound to some &struct kpp_alg via
+ * crypto_grab_kpp().
+ *
+ * Once a &struct crypto_kpp_spawn has been successfully bound to a
+ * &struct kpp_alg via crypto_grab_kpp(), transforms for the latter
+ * may get instantiated from the former by means of this function.
+ *
+ * Return: A pointer to the freshly created KPP transform on success
+ * or an ``ERR_PTR()`` otherwise.
+ */
+static inline struct crypto_kpp *crypto_spawn_kpp(
+ struct crypto_kpp_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
#endif
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index 064e52ca5248..196aa769f296 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -18,7 +18,8 @@
* only the ε-almost-∆-universal hash function (not the full MAC) is computed.
*/
-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
static inline void poly1305_core_init(struct poly1305_state *state)
{
*state = (struct poly1305_state){};
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index f834274c2493..858fe3965ae3 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -8,10 +8,14 @@
*/
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <linux/crypto.h>
+
+#include <crypto/acompress.h>
+#include <crypto/algapi.h>
#define SCOMP_SCRATCH_SIZE 131072
+struct acomp_req;
+
struct crypto_scomp {
struct crypto_tfm base;
};
@@ -23,7 +27,9 @@ struct crypto_scomp {
* @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
+ * @stat: Statistics for compress algorithm
* @base: Common crypto API algorithm data structure
+ * @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
void *(*alloc_ctx)(struct crypto_scomp *tfm);
@@ -34,7 +40,11 @@ struct scomp_alg {
int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
- struct crypto_alg base;
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
};
static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
@@ -89,10 +99,6 @@ static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
ctx);
}
-int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
-
/**
* crypto_register_scomp() -- Register synchronous compression algorithm
*
diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h
new file mode 100644
index 000000000000..97cb26ef8115
--- /dev/null
+++ b/include/crypto/internal/sig.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_SIG_H
+#define _CRYPTO_INTERNAL_SIG_H
+
+#include <crypto/algapi.h>
+#include <crypto/sig.h>
+
+static inline void *crypto_sig_ctx(struct crypto_sig *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+#endif
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 10226c12c5df..7ae42afdcf3e 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -9,10 +9,19 @@
#define _CRYPTO_INTERNAL_SKCIPHER_H
#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
#include <crypto/skcipher.h>
#include <linux/list.h>
#include <linux/types.h>
+/*
+ * Set this if your algorithm is sync but needs a reqsize larger
+ * than MAX_SYNC_SKCIPHER_REQSIZE.
+ *
+ * Reuse bit that is specific to hash algorithms.
+ */
+#define CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE CRYPTO_ALG_OPTIONAL_KEY
+
struct aead_request;
struct rtattr;
@@ -27,10 +36,25 @@ struct skcipher_instance {
};
};
+struct lskcipher_instance {
+ void (*free)(struct lskcipher_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct lskcipher_alg, co.base)];
+ struct crypto_instance base;
+ } s;
+ struct lskcipher_alg alg;
+ };
+};
+
struct crypto_skcipher_spawn {
struct crypto_spawn base;
};
+struct crypto_lskcipher_spawn {
+ struct crypto_spawn base;
+};
+
struct skcipher_walk {
union {
struct {
@@ -71,6 +95,12 @@ static inline struct crypto_instance *skcipher_crypto_instance(
return &inst->s.base;
}
+static inline struct crypto_instance *lskcipher_crypto_instance(
+ struct lskcipher_instance *inst)
+{
+ return &inst->s.base;
+}
+
static inline struct skcipher_instance *skcipher_alg_instance(
struct crypto_skcipher *skcipher)
{
@@ -78,35 +108,62 @@ static inline struct skcipher_instance *skcipher_alg_instance(
struct skcipher_instance, alg);
}
+static inline struct lskcipher_instance *lskcipher_alg_instance(
+ struct crypto_lskcipher *lskcipher)
+{
+ return container_of(crypto_lskcipher_alg(lskcipher),
+ struct lskcipher_instance, alg);
+}
+
static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
{
return crypto_instance_ctx(skcipher_crypto_instance(inst));
}
+static inline void *lskcipher_instance_ctx(struct lskcipher_instance *inst)
+{
+ return crypto_instance_ctx(lskcipher_crypto_instance(inst));
+}
+
static inline void skcipher_request_complete(struct skcipher_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
+int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
- struct crypto_skcipher_spawn *spawn)
+static inline void crypto_drop_lskcipher(struct crypto_lskcipher_spawn *spawn)
{
- return container_of(spawn->base.alg, struct skcipher_alg, base);
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct lskcipher_alg *crypto_lskcipher_spawn_alg(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct lskcipher_alg, co.base);
}
-static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
+static inline struct skcipher_alg_common *crypto_spawn_skcipher_alg_common(
struct crypto_skcipher_spawn *spawn)
{
- return crypto_skcipher_spawn_alg(spawn);
+ return container_of(spawn->base.alg, struct skcipher_alg_common, base);
+}
+
+static inline struct lskcipher_alg *crypto_spawn_lskcipher_alg(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return crypto_lskcipher_spawn_alg(spawn);
}
static inline struct crypto_skcipher *crypto_spawn_skcipher(
@@ -115,12 +172,25 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher(
return crypto_spawn_tfm2(&spawn->base);
}
+static inline struct crypto_lskcipher *crypto_spawn_lskcipher(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
static inline void crypto_skcipher_set_reqsize(
struct crypto_skcipher *skcipher, unsigned int reqsize)
{
skcipher->reqsize = reqsize;
}
+static inline void crypto_skcipher_set_reqsize_dma(
+ struct crypto_skcipher *skcipher, unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ skcipher->reqsize = reqsize;
+}
+
int crypto_register_skcipher(struct skcipher_alg *alg);
void crypto_unregister_skcipher(struct skcipher_alg *alg);
int crypto_register_skciphers(struct skcipher_alg *algs, int count);
@@ -128,11 +198,17 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
int skcipher_register_instance(struct crypto_template *tmpl,
struct skcipher_instance *inst);
+int crypto_register_lskcipher(struct lskcipher_alg *alg);
+void crypto_unregister_lskcipher(struct lskcipher_alg *alg);
+int crypto_register_lskciphers(struct lskcipher_alg *algs, int count);
+void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
+int lskcipher_register_instance(struct crypto_template *tmpl,
+ struct lskcipher_instance *inst);
+
int skcipher_walk_done(struct skcipher_walk *walk, int err);
int skcipher_walk_virt(struct skcipher_walk *walk,
struct skcipher_request *req,
bool atomic);
-void skcipher_walk_atomise(struct skcipher_walk *walk);
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
@@ -151,49 +227,34 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
return crypto_tfm_ctx(&tfm->base);
}
-static inline void *skcipher_request_ctx(struct skcipher_request *req)
+static inline void *crypto_lskcipher_ctx(struct crypto_lskcipher *tfm)
{
- return req->__ctx;
+ return crypto_tfm_ctx(&tfm->base);
}
-static inline u32 skcipher_request_flags(struct skcipher_request *req)
+static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
{
- return req->base.flags;
+ return crypto_tfm_ctx_dma(&tfm->base);
}
-static inline unsigned int crypto_skcipher_alg_min_keysize(
- struct skcipher_alg *alg)
+static inline void *skcipher_request_ctx(struct skcipher_request *req)
{
- return alg->min_keysize;
+ return req->__ctx;
}
-static inline unsigned int crypto_skcipher_alg_max_keysize(
- struct skcipher_alg *alg)
+static inline void *skcipher_request_ctx_dma(struct skcipher_request *req)
{
- return alg->max_keysize;
-}
+ unsigned int align = crypto_dma_align();
-static inline unsigned int crypto_skcipher_alg_walksize(
- struct skcipher_alg *alg)
-{
- return alg->walksize;
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(skcipher_request_ctx(req), align);
}
-/**
- * crypto_skcipher_walksize() - obtain walk size
- * @tfm: cipher handle
- *
- * In some cases, algorithms can only perform optimally when operating on
- * multiple blocks in parallel. This is reflected by the walksize, which
- * must be a multiple of the chunksize (or equal if the concern does not
- * apply)
- *
- * Return: walk size in bytes
- */
-static inline unsigned int crypto_skcipher_walksize(
- struct crypto_skcipher *tfm)
+static inline u32 skcipher_request_flags(struct skcipher_request *req)
{
- return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
+ return req->base.flags;
}
/* Helpers for simple block cipher modes of operation */
@@ -219,5 +280,24 @@ static inline struct crypto_alg *skcipher_ialg_simple(
return crypto_spawn_cipher_alg(spawn);
}
+static inline struct crypto_lskcipher *lskcipher_cipher_simple(
+ struct crypto_lskcipher *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+
+ return *ctx;
+}
+
+struct lskcipher_instance *lskcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb);
+
+static inline struct lskcipher_alg *lskcipher_ialg_simple(
+ struct lskcipher_instance *inst)
+{
+ struct crypto_lskcipher_spawn *spawn = lskcipher_instance_ctx(inst);
+
+ return crypto_lskcipher_spawn_alg(spawn);
+}
+
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/kdf_sp800108.h b/include/crypto/kdf_sp800108.h
new file mode 100644
index 000000000000..b7b20a778fb7
--- /dev/null
+++ b/include/crypto/kdf_sp800108.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _CRYPTO_KDF108_H
+#define _CRYPTO_KDF108_H
+
+#include <crypto/hash.h>
+#include <linux/uio.h>
+
+/**
+ * Counter KDF generate operation according to SP800-108 section 5.1
+ * as well as SP800-56A section 5.8.1 (Single-step KDF).
+ *
+ * @kmd Keyed message digest whose key was set with crypto_kdf108_setkey or
+ * unkeyed message digest
+ * @info optional context and application specific information - this may be
+ * NULL
+ * @info_vec number of optional context/application specific information entries
+ * @dst destination buffer that the caller already allocated
+ * @dlen length of the destination buffer - the KDF derives that amount of
+ * bytes.
+ *
+ * To comply with SP800-108, the caller must provide Label || 0x00 || Context
+ * in the info parameter.
+ *
+ * @return 0 on success, < 0 on error
+ */
+int crypto_kdf108_ctr_generate(struct crypto_shash *kmd,
+ const struct kvec *info, unsigned int info_nvec,
+ u8 *dst, unsigned int dlen);
+
+/**
+ * Counter KDF setkey operation
+ *
+ * @kmd Keyed message digest allocated by the caller. The key should not have
+ * been set.
+ * @key Seed key to be used to initialize the keyed message digest context.
+ * @keylen This length of the key buffer.
+ * @ikm The SP800-108 KDF does not support IKM - this parameter must be NULL
+ * @ikmlen This parameter must be 0.
+ *
+ * According to SP800-108 section 7.2, the seed key must be at least as large as
+ * the message digest size of the used keyed message digest. This limitation
+ * is enforced by the implementation.
+ *
+ * SP800-108 allows the use of either a HMAC or a hash primitive. When
+ * the caller intends to use a hash primitive, the call to
+ * crypto_kdf108_setkey is not required and the key derivation operation can
+ * immediately performed using crypto_kdf108_ctr_generate after allocating
+ * a handle.
+ *
+ * @return 0 on success, < 0 on error
+ */
+int crypto_kdf108_setkey(struct crypto_shash *kmd,
+ const u8 *key, size_t keylen,
+ const u8 *ikm, size_t ikmlen);
+
+#endif /* _CRYPTO_KDF108_H */
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 88b591215d5c..1988e24a0d1d 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -8,7 +8,11 @@
#ifndef _CRYPTO_KPP_
#define _CRYPTO_KPP_
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
+#include <linux/slab.h>
/**
* struct kpp_request
@@ -37,12 +41,30 @@ struct kpp_request {
* struct crypto_kpp - user-instantiated object which encapsulate
* algorithms and core processing logic
*
+ * @reqsize: Request context size required by algorithm
+ * implementation
* @base: Common crypto API algorithm data structure
*/
struct crypto_kpp {
+ unsigned int reqsize;
+
struct crypto_tfm base;
};
+/*
+ * struct crypto_istat_kpp - statistics for KPP algorithm
+ * @setsecret_cnt: number of setsecrey operation
+ * @generate_public_key_cnt: number of generate_public_key operation
+ * @compute_shared_secret_cnt: number of compute_shared_secret operation
+ * @err_cnt: number of error for KPP requests
+ */
+struct crypto_istat_kpp {
+ atomic64_t setsecret_cnt;
+ atomic64_t generate_public_key_cnt;
+ atomic64_t compute_shared_secret_cnt;
+ atomic64_t err_cnt;
+};
+
/**
* struct kpp_alg - generic key-agreement protocol primitives
*
@@ -64,9 +86,8 @@ struct crypto_kpp {
* put in place here.
* @exit: Undo everything @init did.
*
- * @reqsize: Request context size required by algorithm
- * implementation
* @base: Common crypto API algorithm data structure
+ * @stat: Statistics for KPP algorithm
*/
struct kpp_alg {
int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
@@ -79,7 +100,10 @@ struct kpp_alg {
int (*init)(struct crypto_kpp *tfm);
void (*exit)(struct crypto_kpp *tfm);
- unsigned int reqsize;
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_kpp stat;
+#endif
+
struct crypto_alg base;
};
@@ -104,6 +128,8 @@ struct kpp_alg {
*/
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask);
+int crypto_has_kpp(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm)
{
return &tfm->base;
@@ -126,7 +152,7 @@ static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm)
static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm)
{
- return crypto_kpp_alg(tfm)->reqsize;
+ return tfm->reqsize;
}
static inline void kpp_request_set_tfm(struct kpp_request *req,
@@ -154,6 +180,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
* crypto_free_kpp() - free KPP tfm handle
*
* @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_kpp(struct crypto_kpp *tfm)
{
@@ -263,6 +291,26 @@ struct kpp_secret {
unsigned short len;
};
+static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&kpp_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
@@ -282,13 +330,11 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->set_secret(tfm, buffer, len);
- crypto_stats_kpp_set_secret(calg, ret);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt);
+
+ return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len));
}
/**
@@ -308,13 +354,11 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->generate_public_key(req);
- crypto_stats_kpp_generate_public_key(calg, ret);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt);
+
+ return crypto_kpp_errstat(alg, alg->generate_public_key(req));
}
/**
@@ -331,13 +375,11 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->compute_shared_secret(req);
- crypto_stats_kpp_compute_shared_secret(calg, ret);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt);
+
+ return crypto_kpp_errstat(alg, alg->compute_shared_secret(req));
}
/**
diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h
index b9bc3436196a..234d7cf3cf5e 100644
--- a/include/crypto/pcrypt.h
+++ b/include/crypto/pcrypt.h
@@ -9,8 +9,8 @@
#ifndef _CRYPTO_PCRYPT_H
#define _CRYPTO_PCRYPT_H
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/padata.h>
struct pcrypt_request {
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index f1f67fc749cf..090692ec3bc7 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
};
};
-void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
+void poly1305_init_arch(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE]);
+void poly1305_init_generic(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE]);
static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
{
diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h
new file mode 100644
index 000000000000..1d630f371f77
--- /dev/null
+++ b/include/crypto/polyval.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Polyval hash algorithm
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#ifndef _CRYPTO_POLYVAL_H
+#define _CRYPTO_POLYVAL_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define POLYVAL_BLOCK_SIZE 16
+#define POLYVAL_DIGEST_SIZE 16
+
+void polyval_mul_non4k(u8 *op1, const u8 *op2);
+
+void polyval_update_non4k(const u8 *key, const u8 *in,
+ size_t nblocks, u8 *accumulator);
+
+#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 11f535cfb810..b7f308977c84 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -10,6 +10,7 @@
#ifndef _LINUX_PUBLIC_KEY_H
#define _LINUX_PUBLIC_KEY_H
+#include <linux/errno.h>
#include <linux/keyctl.h>
#include <linux/oid_registry.h>
@@ -28,6 +29,10 @@ struct public_key {
bool key_is_private;
const char *id_type;
const char *pkey_algo;
+ unsigned long key_eflags; /* key extension flags */
+#define KEY_EFLAG_CA 0 /* set if the CA basic constraints is set */
+#define KEY_EFLAG_DIGITALSIG 1 /* set if the digitalSignature usage is set */
+#define KEY_EFLAG_KEYCERTSIGN 2 /* set if the keyCertSign usage is set */
};
extern void public_key_free(struct public_key *key);
@@ -36,11 +41,11 @@ extern void public_key_free(struct public_key *key);
* Public key cryptography signature data
*/
struct public_key_signature {
- struct asymmetric_key_id *auth_ids[2];
+ struct asymmetric_key_id *auth_ids[3];
u8 *s; /* Signature */
- u32 s_size; /* Number of bytes in signature */
u8 *digest;
- u8 digest_size; /* Number of bytes in digest */
+ u32 s_size; /* Number of bytes in signature */
+ u32 digest_size; /* Number of bytes in digest */
const char *pkey_algo;
const char *hash_algo;
const char *encoding;
@@ -69,6 +74,33 @@ extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring,
const union key_payload *payload,
struct key *trusted);
+#if IS_REACHABLE(CONFIG_ASYMMETRIC_KEY_TYPE)
+extern int restrict_link_by_ca(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring);
+int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring);
+#else
+static inline int restrict_link_by_ca(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ return 0;
+}
+
+static inline int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ return 0;
+}
+#endif
+
extern int query_asymmetric_key(const struct kernel_pkey_params *,
struct kernel_pkey_query *);
@@ -78,7 +110,16 @@ extern int create_signature(struct kernel_pkey_params *, const void *, void *);
extern int verify_signature(const struct key *,
const struct public_key_signature *);
+#if IS_REACHABLE(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE)
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig);
+#else
+static inline
+int public_key_verify_signature(const struct public_key *pkey,
+ const struct public_key_signature *sig)
+{
+ return -EINVAL;
+}
+#endif
#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 8b4b844b4eef..6abe5102e5fb 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -9,10 +9,26 @@
#ifndef _CRYPTO_RNG_H
#define _CRYPTO_RNG_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
struct crypto_rng;
+/*
+ * struct crypto_istat_rng: statistics for RNG algorithm
+ * @generate_cnt: number of RNG generate requests
+ * @generate_tlen: total data size of generated data by the RNG
+ * @seed_cnt: number of times the RNG was seeded
+ * @err_cnt: number of error for RNG requests
+ */
+struct crypto_istat_rng {
+ atomic64_t generate_cnt;
+ atomic64_t generate_tlen;
+ atomic64_t seed_cnt;
+ atomic64_t err_cnt;
+};
+
/**
* struct rng_alg - random number generator definition
*
@@ -30,6 +46,7 @@ struct crypto_rng;
* size of the seed is defined with @seedsize .
* @set_ent: Set entropy that would otherwise be obtained from
* entropy source. Internal use only.
+ * @stat: Statistics for rng algorithm
* @seedsize: The seed size required for a random number generator
* initialization defined with this variable. Some
* random number generators does not require a seed
@@ -46,6 +63,10 @@ struct rng_alg {
void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
unsigned int len);
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_rng stat;
+#endif
+
unsigned int seedsize;
struct crypto_alg base;
@@ -94,6 +115,11 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
return &tfm->base;
}
+static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct rng_alg, base);
+}
+
/**
* crypto_rng_alg - obtain name of RNG
* @tfm: cipher handle
@@ -104,19 +130,40 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
- return container_of(crypto_rng_tfm(tfm)->__crt_alg,
- struct rng_alg, base);
+ return __crypto_rng_alg(crypto_rng_tfm(tfm)->__crt_alg);
}
/**
* crypto_free_rng() - zeroize and free RNG handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
+static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_rng_errstat(struct rng_alg *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&rng_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
@@ -135,13 +182,17 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- struct crypto_alg *alg = tfm->base.__crt_alg;
- int ret;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_rng *istat = rng_get_stat(alg);
+
+ atomic64_inc(&istat->generate_cnt);
+ atomic64_add(dlen, &istat->generate_tlen);
+ }
- crypto_stats_get(alg);
- ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
- crypto_stats_rng_generate(alg, dlen, ret);
- return ret;
+ return crypto_rng_errstat(alg,
+ alg->generate(tfm, src, slen, dst, dlen));
}
/**
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index c837d0775474..32fc4473175b 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -12,8 +12,9 @@
#define _CRYPTO_SCATTERWALK_H
#include <crypto/algapi.h>
+
#include <linux/highmem.h>
-#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/scatterlist.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
@@ -45,12 +46,6 @@ static inline void scatterwalk_advance(struct scatter_walk *walk,
walk->offset += nbytes;
}
-static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
- unsigned int alignmask)
-{
- return !(walk->offset & alignmask);
-}
-
static inline struct page *scatterwalk_page(struct scatter_walk *walk)
{
return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
@@ -58,7 +53,7 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk)
static inline void scatterwalk_unmap(void *vaddr)
{
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
}
static inline void scatterwalk_start(struct scatter_walk *walk,
@@ -70,7 +65,7 @@ static inline void scatterwalk_start(struct scatter_walk *walk,
static inline void *scatterwalk_map(struct scatter_walk *walk)
{
- return kmap_atomic(scatterwalk_page(walk)) +
+ return kmap_local_page(scatterwalk_page(walk)) +
offset_in_page(walk->offset);
}
@@ -81,12 +76,7 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
struct page *page;
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
- * PageSlab cannot be optimised away per se due to
- * use of volatile pointer.
- */
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
- flush_dcache_page(page);
+ flush_dcache_page(page);
}
if (more && walk->offset >= walk->sg->offset + walk->sg->length)
@@ -103,7 +93,6 @@ static inline void scatterwalk_done(struct scatter_walk *walk, int out,
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out);
-void *scatterwalk_map(struct scatter_walk *walk);
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out);
diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h
new file mode 100644
index 000000000000..044ecea60ac8
--- /dev/null
+++ b/include/crypto/sha1.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for SHA-1 algorithms
+ */
+
+#ifndef _CRYPTO_SHA1_H
+#define _CRYPTO_SHA1_H
+
+#include <linux/types.h>
+
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE];
+
+struct sha1_state {
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buffer[SHA1_BLOCK_SIZE];
+};
+
+struct shash_desc;
+
+extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
+/*
+ * An implementation of SHA-1's compression function. Don't use in new code!
+ * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
+ * the correct way to hash something with SHA-1 (use crypto_shash instead).
+ */
+#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4)
+#define SHA1_WORKSPACE_WORDS 16
+void sha1_init(__u32 *buf);
+void sha1_transform(__u32 *digest, const char *data, __u32 *W);
+
+#endif /* _CRYPTO_SHA1_H */
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
index 20fd1f7468af..2e0e7c3827d1 100644
--- a/include/crypto/sha1_base.h
+++ b/include/crypto/sha1_base.h
@@ -9,9 +9,10 @@
#define _CRYPTO_SHA1_BASE_H
#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
@@ -101,7 +102,7 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
- *sctx = (struct sha1_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sha.h b/include/crypto/sha2.h
index 4ff3da816630..b9e9281d76c9 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha2.h
@@ -1,16 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Common values for SHA algorithms
+ * Common values for SHA-2 algorithms
*/
-#ifndef _CRYPTO_SHA_H
-#define _CRYPTO_SHA_H
+#ifndef _CRYPTO_SHA2_H
+#define _CRYPTO_SHA2_H
#include <linux/types.h>
-#define SHA1_DIGEST_SIZE 20
-#define SHA1_BLOCK_SIZE 64
-
#define SHA224_DIGEST_SIZE 28
#define SHA224_BLOCK_SIZE 64
@@ -23,12 +20,6 @@
#define SHA512_DIGEST_SIZE 64
#define SHA512_BLOCK_SIZE 128
-#define SHA1_H0 0x67452301UL
-#define SHA1_H1 0xefcdab89UL
-#define SHA1_H2 0x98badcfeUL
-#define SHA1_H3 0x10325476UL
-#define SHA1_H4 0xc3d2e1f0UL
-
#define SHA224_H0 0xc1059ed8UL
#define SHA224_H1 0x367cd507UL
#define SHA224_H2 0x3070dd17UL
@@ -65,8 +56,6 @@
#define SHA512_H6 0x1f83d9abfb41bd6bULL
#define SHA512_H7 0x5be0cd19137e2179ULL
-extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE];
-
extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
@@ -75,12 +64,6 @@ extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
-struct sha1_state {
- u32 state[SHA1_DIGEST_SIZE / 4];
- u64 count;
- u8 buffer[SHA1_BLOCK_SIZE];
-};
-
struct sha256_state {
u32 state[SHA256_DIGEST_SIZE / 4];
u64 count;
@@ -95,12 +78,6 @@ struct sha512_state {
struct shash_desc;
-extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
@@ -114,16 +91,6 @@ extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash);
/*
- * An implementation of SHA-1's compression function. Don't use in new code!
- * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
- * the correct way to hash something with SHA-1 (use crypto_shash instead).
- */
-#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4)
-#define SHA1_WORKSPACE_WORDS 16
-void sha1_init(__u32 *buf);
-void sha1_transform(__u32 *digest, const char *data, __u32 *W);
-
-/*
* Stand-alone implementation of the SHA256 algorithm. It is designed to
* have as little dependencies as possible so it can be used in the
* kexec_file purgatory. In other cases you should generally use the
@@ -161,7 +128,7 @@ static inline void sha224_init(struct sha256_state *sctx)
sctx->state[7] = SHA224_H7;
sctx->count = 0;
}
-void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
+/* Simply use sha256_update as it is equivalent to sha224_update. */
void sha224_final(struct sha256_state *sctx, u8 *out);
-#endif
+#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
index 6ded110783ae..ab904d82236f 100644
--- a/include/crypto/sha256_base.h
+++ b/include/crypto/sha256_base.h
@@ -8,12 +8,12 @@
#ifndef _CRYPTO_SHA256_BASE_H
#define _CRYPTO_SHA256_BASE_H
-#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-
+#include <asm/byteorder.h>
#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha2.h>
+#include <linux/string.h>
+#include <linux/types.h>
typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
int blocks);
@@ -34,12 +34,11 @@ static inline int sha256_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha256_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
+static inline int lib_sha256_base_do_update(struct sha256_state *sctx,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx->count += len;
@@ -72,11 +71,20 @@ static inline int sha256_base_do_update(struct shash_desc *desc,
return 0;
}
-static inline int sha256_base_do_finalize(struct shash_desc *desc,
- sha256_block_fn *block_fn)
+static inline int sha256_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
{
- const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_do_update(sctx, data, len, block_fn);
+}
+
+static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx,
+ sha256_block_fn *block_fn)
+{
+ const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
__be64 *bits = (__be64 *)(sctx->buf + bit_offset);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
@@ -95,18 +103,33 @@ static inline int sha256_base_do_finalize(struct shash_desc *desc,
return 0;
}
-static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+static inline int sha256_base_do_finalize(struct shash_desc *desc,
+ sha256_block_fn *block_fn)
{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_do_finalize(sctx, block_fn);
+}
+
+static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out,
+ unsigned int digest_size)
+{
__be32 *digest = (__be32 *)out;
int i;
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
put_unaligned_be32(sctx->state[i], digest++);
- *sctx = (struct sha256_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
+static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_finish(sctx, out, digest_size);
+}
+
#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
index fb19c77494dc..b370b3340b16 100644
--- a/include/crypto/sha512_base.h
+++ b/include/crypto/sha512_base.h
@@ -9,9 +9,10 @@
#define _CRYPTO_SHA512_BASE_H
#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
@@ -126,7 +127,7 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
put_unaligned_be64(sctx->state[i], digest++);
- *sctx = (struct sha512_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
new file mode 100644
index 000000000000..d25186bb2be3
--- /dev/null
+++ b/include/crypto/sig.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_SIG_H
+#define _CRYPTO_SIG_H
+
+#include <linux/crypto.h>
+
+/**
+ * struct crypto_sig - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_sig {
+ struct crypto_tfm base;
+};
+
+/**
+ * DOC: Generic Public Key Signature API
+ *
+ * The Public Key Signature API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_SIG (listed as type "sig" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_sig() - allocate signature tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * signing algorithm e.g. "ecdsa"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for public key signature algorithm. The returned struct
+ * crypto_sig is the handle that is required for any subsequent
+ * API invocation for signature operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_sig() - free signature tfm handle
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
+ */
+static inline void crypto_free_sig(struct crypto_sig *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_sig_tfm(tfm));
+}
+
+/**
+ * crypto_sig_maxsize() - Get len for output buffer
+ *
+ * Function returns the dest buffer size required for a given key.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you will end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+int crypto_sig_maxsize(struct crypto_sig *tfm);
+
+/**
+ * crypto_sig_sign() - Invoke signing operation
+ *
+ * Function invokes the specific signing operation for a given algorithm
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
+ * crypto_sig_verify() - Invoke signature verification
+ *
+ * Function invokes the specific signature verification operation
+ * for a given algorithm.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @digest: digest
+ * @dlen: digest length
+ *
+ * Return: zero on verification success; error code in case of error.
+ */
+int crypto_sig_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen);
+
+/**
+ * crypto_sig_set_pubkey() - Invoke set public key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded public key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_set_pubkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+
+/**
+ * crypto_sig_set_privkey() - Invoke set private key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded private key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_set_privkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 6a733b171a5d..c8857d7bdb37 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -8,9 +8,25 @@
#ifndef _CRYPTO_SKCIPHER_H
#define _CRYPTO_SKCIPHER_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/* Set this bit if the lskcipher operation is a continuation. */
+#define CRYPTO_LSKCIPHER_FLAG_CONT 0x00000001
+/* Set this bit if the lskcipher operation is final. */
+#define CRYPTO_LSKCIPHER_FLAG_FINAL 0x00000002
+/* The bit CRYPTO_TFM_REQ_MAY_SLEEP can also be set if needed. */
+
+/* Set this bit if the skcipher operation is a continuation. */
+#define CRYPTO_SKCIPHER_REQ_CONT 0x00000001
+/* Set this bit if the skcipher operation is not final. */
+#define CRYPTO_SKCIPHER_REQ_NOTFINAL 0x00000002
+
+struct scatterlist;
/**
* struct skcipher_request - Symmetric key cipher request
@@ -44,8 +60,34 @@ struct crypto_sync_skcipher {
struct crypto_skcipher base;
};
-/**
- * struct skcipher_alg - symmetric key cipher definition
+struct crypto_lskcipher {
+ struct crypto_tfm base;
+};
+
+/*
+ * struct crypto_istat_cipher - statistics for cipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for cipher requests
+ */
+struct crypto_istat_cipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
+#ifdef CONFIG_CRYPTO_STATS
+#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat;
+#else
+#define SKCIPHER_ALG_COMMON_STAT
+#endif
+
+/*
+ * struct skcipher_alg_common - common properties of skcipher_alg
* @min_keysize: Minimum key size supported by the transformation. This is the
* smallest key length supported by this transformation algorithm.
* This must be set to one of the pre-defined values as this is
@@ -56,6 +98,29 @@ struct crypto_sync_skcipher {
* This must be set to one of the pre-defined values as this is
* not hardware specific. Possible values for this field can be
* found via git grep "_MAX_KEY_SIZE" include/crypto/
+ * @ivsize: IV size applicable for transformation. The consumer must provide an
+ * IV of exactly that size to perform the encrypt or decrypt operation.
+ * @chunksize: Equal to the block size except for stream ciphers such as
+ * CTR where it is set to the underlying block size.
+ * @statesize: Size of the internal state for the algorithm.
+ * @stat: Statistics for cipher algorithm
+ * @base: Definition of a generic crypto algorithm.
+ */
+#define SKCIPHER_ALG_COMMON { \
+ unsigned int min_keysize; \
+ unsigned int max_keysize; \
+ unsigned int ivsize; \
+ unsigned int chunksize; \
+ unsigned int statesize; \
+ \
+ SKCIPHER_ALG_COMMON_STAT \
+ \
+ struct crypto_alg base; \
+}
+struct skcipher_alg_common SKCIPHER_ALG_COMMON;
+
+/**
+ * struct skcipher_alg - symmetric key cipher definition
* @setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
@@ -79,6 +144,17 @@ struct crypto_sync_skcipher {
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
+ * @export: Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
@@ -90,14 +166,10 @@ struct crypto_sync_skcipher {
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- * @chunksize: Equal to the block size except for stream ciphers such as
- * CTR where it is set to the underlying block size.
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
- * @base: Definition of a generic crypto algorithm.
+ * @co: see struct skcipher_alg_common
*
* All fields except @ivsize are mandatory and must be filled.
*/
@@ -106,16 +178,63 @@ struct skcipher_alg {
unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
+ int (*export)(struct skcipher_request *req, void *out);
+ int (*import)(struct skcipher_request *req, const void *in);
int (*init)(struct crypto_skcipher *tfm);
void (*exit)(struct crypto_skcipher *tfm);
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
- unsigned int chunksize;
unsigned int walksize;
- struct crypto_alg base;
+ union {
+ struct SKCIPHER_ALG_COMMON;
+ struct skcipher_alg_common co;
+ };
+};
+
+/**
+ * struct lskcipher_alg - linear symmetric key cipher definition
+ * @setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function can
+ * be called multiple times during the existence of the transformation
+ * object, so one must make sure the key is properly reprogrammed into
+ * the hardware. This function is also responsible for checking the key
+ * length for validity. In case a software fallback was put in place in
+ * the @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes.
+ * @encrypt: Encrypt a number of bytes. This function is used to encrypt
+ * the supplied data. This function shall not modify
+ * the transformation context, as this function may be called
+ * in parallel with the same transformation object. Data
+ * may be left over if length is not a multiple of blocks
+ * and there is more to come (final == false). The number of
+ * left-over bytes should be returned in case of success.
+ * The siv field shall be as long as ivsize + statesize with
+ * the IV placed at the front. The state will be used by the
+ * algorithm internally.
+ * @decrypt: Decrypt a number of bytes. This is a reverse counterpart to
+ * @encrypt and the conditions are exactly the same.
+ * @init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ * @co: see struct skcipher_alg_common
+ */
+struct lskcipher_alg {
+ int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+ int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+ int (*init)(struct crypto_lskcipher *tfm);
+ void (*exit)(struct crypto_lskcipher *tfm);
+
+ struct skcipher_alg_common co;
};
#define MAX_SYNC_SKCIPHER_REQSIZE 384
@@ -187,15 +306,41 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
u32 type, u32 mask);
+
+/**
+ * crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * lskcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an lskcipher. The returned struct
+ * crypto_lskcipher is the cipher handle that is required for any subsequent
+ * API invocation for that lskcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
+ u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_lskcipher_tfm(
+ struct crypto_lskcipher *tfm)
+{
+ return &tfm->base;
+}
+
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
{
@@ -208,6 +353,17 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
}
/**
+ * crypto_free_lskcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
+ */
+static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm));
+}
+
+/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher
@@ -225,6 +381,19 @@ static inline const char *crypto_skcipher_driver_name(
return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
}
+static inline const char *crypto_lskcipher_driver_name(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm));
+}
+
+static inline struct skcipher_alg_common *crypto_skcipher_alg_common(
+ struct crypto_skcipher *tfm)
+{
+ return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
+ struct skcipher_alg_common, base);
+}
+
static inline struct skcipher_alg *crypto_skcipher_alg(
struct crypto_skcipher *tfm)
{
@@ -232,9 +401,11 @@ static inline struct skcipher_alg *crypto_skcipher_alg(
struct skcipher_alg, base);
}
-static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
+static inline struct lskcipher_alg *crypto_lskcipher_alg(
+ struct crypto_lskcipher *tfm)
{
- return alg->ivsize;
+ return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg,
+ struct lskcipher_alg, co.base);
}
/**
@@ -248,7 +419,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
*/
static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->ivsize;
+ return crypto_skcipher_alg_common(tfm)->ivsize;
}
static inline unsigned int crypto_sync_skcipher_ivsize(
@@ -258,6 +429,21 @@ static inline unsigned int crypto_sync_skcipher_ivsize(
}
/**
+ * crypto_lskcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the lskcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_lskcipher_ivsize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.ivsize;
+}
+
+/**
* crypto_skcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle
*
@@ -273,10 +459,20 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
-static inline unsigned int crypto_skcipher_alg_chunksize(
- struct skcipher_alg *alg)
+/**
+ * crypto_lskcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the lskcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_lskcipher_blocksize(
+ struct crypto_lskcipher *tfm)
{
- return alg->chunksize;
+ return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm));
}
/**
@@ -293,7 +489,58 @@ static inline unsigned int crypto_skcipher_alg_chunksize(
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
+ return crypto_skcipher_alg_common(tfm)->chunksize;
+}
+
+/**
+ * crypto_lskcipher_chunksize() - obtain chunk size
+ * @tfm: cipher handle
+ *
+ * The block size is set to one for ciphers such as CTR. However,
+ * you still need to provide incremental updates in multiples of
+ * the underlying block size as the IV does not have sub-block
+ * granularity. This is known in this API as the chunk size.
+ *
+ * Return: chunk size in bytes
+ */
+static inline unsigned int crypto_lskcipher_chunksize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.chunksize;
+}
+
+/**
+ * crypto_skcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_skcipher_statesize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_common(tfm)->statesize;
+}
+
+/**
+ * crypto_lskcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_lskcipher_statesize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.statesize;
}
static inline unsigned int crypto_sync_skcipher_blocksize(
@@ -308,6 +555,12 @@ static inline unsigned int crypto_skcipher_alignmask(
return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_lskcipher_alignmask(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm));
+}
+
static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
{
return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
@@ -343,6 +596,23 @@ static inline void crypto_sync_skcipher_clear_flags(
crypto_skcipher_clear_flags(&tfm->base, flags);
}
+static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm));
+}
+
+static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags);
+}
+
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
@@ -368,16 +638,47 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
+/**
+ * crypto_lskcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the lskcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm,
+ const u8 *key, unsigned int keylen);
+
static inline unsigned int crypto_skcipher_min_keysize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->min_keysize;
+ return crypto_skcipher_alg_common(tfm)->min_keysize;
}
static inline unsigned int crypto_skcipher_max_keysize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->max_keysize;
+ return crypto_skcipher_alg_common(tfm)->max_keysize;
+}
+
+static inline unsigned int crypto_lskcipher_min_keysize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.min_keysize;
+}
+
+static inline unsigned int crypto_lskcipher_max_keysize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.max_keysize;
}
/**
@@ -430,6 +731,78 @@ int crypto_skcipher_encrypt(struct skcipher_request *req);
int crypto_skcipher_decrypt(struct skcipher_request *req);
/**
+ * crypto_skcipher_export() - export partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @out: output buffer of sufficient size that can hold the state
+ *
+ * Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_export(struct skcipher_request *req, void *out);
+
+/**
+ * crypto_skcipher_import() - import partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @in: buffer holding the state
+ *
+ * Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_import(struct skcipher_request *req, const void *in);
+
+/**
+ * crypto_lskcipher_encrypt() - encrypt plaintext
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ * Encrypt plaintext data using the lskcipher handle.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv);
+
+/**
+ * crypto_lskcipher_decrypt() - decrypt ciphertext
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ *
+ * Decrypt ciphertext data using the lskcipher handle.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv);
+
+/**
* DOC: Symmetric Key Cipher Request Handle
*
* The skcipher_request data structure contains all pointers to data
diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h
new file mode 100644
index 000000000000..04a92c1013c8
--- /dev/null
+++ b/include/crypto/sm2.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * sm2.h - SM2 asymmetric public-key algorithm
+ * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
+ * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
+ *
+ * Copyright (c) 2020, Alibaba Group.
+ * Written by Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#ifndef _CRYPTO_SM2_H
+#define _CRYPTO_SM2_H
+
+struct shash_desc;
+
+#if IS_REACHABLE(CONFIG_CRYPTO_SM2)
+int sm2_compute_z_digest(struct shash_desc *desc,
+ const void *key, unsigned int keylen, void *dgst);
+#else
+static inline int sm2_compute_z_digest(struct shash_desc *desc,
+ const void *key, unsigned int keylen,
+ void *dgst)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif /* _CRYPTO_SM2_H */
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index 1438942dc773..1f021ad0533f 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -1,5 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Common values for SM3 algorithm
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#ifndef _CRYPTO_SM3_H
@@ -30,11 +35,30 @@ struct sm3_state {
u8 buffer[SM3_BLOCK_SIZE];
};
-struct shash_desc;
+/*
+ * Stand-alone implementation of the SM3 algorithm. It is designed to
+ * have as little dependencies as possible so it can be used in the
+ * kexec_file purgatory. In other cases you should generally use the
+ * hash APIs from include/crypto/hash.h. Especially when hashing large
+ * amounts of data as those APIs may be hw-accelerated.
+ *
+ * For details see lib/crypto/sm3.c
+ */
+
+static inline void sm3_init(struct sm3_state *sctx)
+{
+ sctx->state[0] = SM3_IVA;
+ sctx->state[1] = SM3_IVB;
+ sctx->state[2] = SM3_IVC;
+ sctx->state[3] = SM3_IVD;
+ sctx->state[4] = SM3_IVE;
+ sctx->state[5] = SM3_IVF;
+ sctx->state[6] = SM3_IVG;
+ sctx->state[7] = SM3_IVH;
+ sctx->count = 0;
+}
-extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len);
+void sm3_final(struct sm3_state *sctx, u8 *out);
-extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index 1cbf9aa1fe52..2f3a32ab97bb 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -13,6 +13,7 @@
#include <crypto/sm3.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
@@ -104,7 +105,7 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
- *sctx = (struct sm3_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
index 7afd730d16ff..9656a9a40326 100644
--- a/include/crypto/sm4.h
+++ b/include/crypto/sm4.h
@@ -3,6 +3,7 @@
/*
* Common values for the SM4 algorithm
* Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#ifndef _CRYPTO_SM4_H
@@ -15,17 +16,33 @@
#define SM4_BLOCK_SIZE 16
#define SM4_RKEY_WORDS 32
-struct crypto_sm4_ctx {
+struct sm4_ctx {
u32 rkey_enc[SM4_RKEY_WORDS];
u32 rkey_dec[SM4_RKEY_WORDS];
};
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len);
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+extern const u32 crypto_sm4_fk[];
+extern const u32 crypto_sm4_ck[];
+extern const u8 crypto_sm4_sbox[];
+
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx: The location where the computed key will be stored.
+ * @in_key: The supplied key.
+ * @key_len: The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
unsigned int key_len);
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk: The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out: Buffer to store output data
+ * @in: Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in);
#endif
diff --git a/include/crypto/utils.h b/include/crypto/utils.h
new file mode 100644
index 000000000000..acbb917a00c6
--- /dev/null
+++ b/include/crypto/utils.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic utilities
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_UTILS_H
+#define _CRYPTO_UTILS_H
+
+#include <asm/unaligned.h>
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
+
+static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s = (unsigned long *)src;
+ unsigned long l;
+
+ while (size > 0) {
+ l = get_unaligned(d) ^ get_unaligned(s++);
+ put_unaligned(l, d++);
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ __crypto_xor(dst, dst, src, size);
+ }
+}
+
+static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
+ unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s1 = (unsigned long *)src1;
+ unsigned long *s2 = (unsigned long *)src2;
+ unsigned long l;
+
+ while (size > 0) {
+ l = get_unaligned(s1++) ^ get_unaligned(s2++);
+ put_unaligned(l, d++);
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ __crypto_xor(dst, src1, src2, size);
+ }
+}
+
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ * timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+ return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
+
+#endif /* _CRYPTO_UTILS_H */
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 0f8dba69feb4..15b16c4853d8 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -8,8 +8,8 @@
#define XTS_BLOCK_SIZE 16
-static inline int xts_check_key(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
+static inline int xts_verify_key(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
/*
* key consists of keys of equal size concatenated, therefore
@@ -18,24 +18,17 @@ static inline int xts_check_key(struct crypto_tfm *tfm,
if (keylen % 2)
return -EINVAL;
- /* ensure that the AES and tweak key are not identical */
- if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2))
- return -EINVAL;
-
- return 0;
-}
-
-static inline int xts_verify_key(struct crypto_skcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
/*
- * key consists of keys of equal size concatenated, therefore
- * the length must be even.
+ * In FIPS mode only a combined key length of either 256 or
+ * 512 bits is allowed, c.f. FIPS 140-3 IG C.I.
*/
- if (keylen % 2)
+ if (fips_enabled && keylen != 32 && keylen != 64)
return -EINVAL;
- /* ensure that the AES and tweak key are not identical */
+ /*
+ * Ensure that the AES and tweak key are not identical when
+ * in FIPS mode or the FORBID_WEAK_KEYS flag is set.
+ */
if ((fips_enabled || (crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) &&
!crypto_memneq(key, key + (keylen / 2), keylen / 2))
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 8712e14991ed..724c45e3e9a7 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -51,14 +51,26 @@ enum amd_asic_type {
CHIP_RAVEN, /* 22 */
CHIP_ARCTURUS, /* 23 */
CHIP_RENOIR, /* 24 */
- CHIP_NAVI10, /* 25 */
- CHIP_NAVI14, /* 26 */
- CHIP_NAVI12, /* 27 */
- CHIP_SIENNA_CICHLID, /* 28 */
- CHIP_NAVY_FLOUNDER, /* 29 */
+ CHIP_ALDEBARAN, /* 25 */
+ CHIP_NAVI10, /* 26 */
+ CHIP_CYAN_SKILLFISH, /* 27 */
+ CHIP_NAVI14, /* 28 */
+ CHIP_NAVI12, /* 29 */
+ CHIP_SIENNA_CICHLID, /* 30 */
+ CHIP_NAVY_FLOUNDER, /* 31 */
+ CHIP_VANGOGH, /* 32 */
+ CHIP_DIMGREY_CAVEFISH, /* 33 */
+ CHIP_BEIGE_GOBY, /* 34 */
+ CHIP_YELLOW_CARP, /* 35 */
+ CHIP_IP_DISCOVERY, /* 36 */
CHIP_LAST,
};
extern const char *amdgpu_asic_name[];
+struct amdgpu_asic_type_quirk {
+ unsigned short device; /* PCI device ID */
+ u8 revision; /* revision ID */
+ unsigned short type; /* real ASIC type */
+};
#endif /*__AMD_ASIC_TYPE_H__ */
diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
new file mode 100644
index 000000000000..4453906105ca
--- /dev/null
+++ b/include/drm/bridge/aux-bridge.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#ifndef DRM_AUX_BRIDGE_H
+#define DRM_AUX_BRIDGE_H
+
+#include <drm/drm_connector.h>
+
+struct auxiliary_device;
+
+#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE)
+int drm_aux_bridge_register(struct device *parent);
+#else
+static inline int drm_aux_bridge_register(struct device *parent)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np);
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev);
+struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np);
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status);
+#else
+static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent,
+ struct device_node *np)
+{
+ return NULL;
+}
+
+static inline int devm_drm_dp_hpd_bridge_add(struct auxiliary_device *adev)
+{
+ return 0;
+}
+
+static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np)
+{
+ return NULL;
+}
+
+static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index ea34ca146b82..6a46baa0737c 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -126,6 +126,8 @@ struct dw_hdmi_phy_ops {
struct dw_hdmi_plat_data {
struct regmap *regm;
+ unsigned int output_port;
+
unsigned long input_bus_encoding;
bool use_drm_infoframe;
bool ycbcr_420_allowed;
@@ -141,6 +143,11 @@ struct dw_hdmi_plat_data {
const struct drm_display_info *info,
const struct drm_display_mode *mode);
+ /* Platform-specific audio enable/disable (optional) */
+ void (*enable_audio)(struct dw_hdmi *hdmi, int channel,
+ int width, int rate, int non_pcm);
+ void (*disable_audio)(struct dw_hdmi *hdmi);
+
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
const char *phy_name;
@@ -153,6 +160,8 @@ struct dw_hdmi_plat_data {
const struct dw_hdmi_phy_config *phy_config;
int (*configure_phy)(struct dw_hdmi *hdmi, void *data,
unsigned long mpixelclock);
+
+ unsigned int disable_cec : 1;
};
struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
@@ -169,6 +178,8 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev);
+void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm);
+void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status);
@@ -183,9 +194,11 @@ void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address);
void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
unsigned char addr);
+void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi);
+
void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable);
void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable);
-void dw_hdmi_phy_reset(struct dw_hdmi *hdmi);
+void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi);
enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
void *data);
@@ -193,4 +206,6 @@ void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
bool force, bool disabled, bool rxsense);
void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
+bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index b0e390b3288e..65d5e68065e3 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -11,6 +11,10 @@
#include <linux/types.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_modes.h>
struct drm_display_mode;
@@ -36,6 +40,7 @@ struct dw_mipi_dsi_phy_ops {
unsigned int *lane_mbps);
int (*get_timing)(void *priv_data, unsigned int lane_mbps,
struct dw_mipi_dsi_dphy_timing *timing);
+ int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate);
};
struct dw_mipi_dsi_host_ops {
@@ -50,7 +55,20 @@ struct dw_mipi_dsi_plat_data {
unsigned int max_data_lanes;
enum drm_mode_status (*mode_valid)(void *priv_data,
- const struct drm_display_mode *mode);
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags,
+ u32 lanes, u32 format);
+
+ bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ u32 *(*get_input_bus_fmts)(void *priv_data,
+ struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
const struct dw_mipi_dsi_phy_ops *phy_ops;
const struct dw_mipi_dsi_host_ops *host_ops;
@@ -65,5 +83,6 @@ void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder);
void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave);
+struct drm_bridge *dw_mipi_dsi_get_bridge(struct dw_mipi_dsi *dsi);
#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
new file mode 100644
index 000000000000..e0c105051246
--- /dev/null
+++ b/include/drm/bridge/samsung-dsim.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2022 Amarula Solutions(India)
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ */
+
+#ifndef __SAMSUNG_DSIM__
+#define __SAMSUNG_DSIM__
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_mipi_dsi.h>
+
+struct samsung_dsim;
+
+#define DSIM_STATE_ENABLED BIT(0)
+#define DSIM_STATE_INITIALIZED BIT(1)
+#define DSIM_STATE_CMD_LPM BIT(2)
+#define DSIM_STATE_VIDOUT_AVAILABLE BIT(3)
+
+enum samsung_dsim_type {
+ DSIM_TYPE_EXYNOS3250,
+ DSIM_TYPE_EXYNOS4210,
+ DSIM_TYPE_EXYNOS5410,
+ DSIM_TYPE_EXYNOS5422,
+ DSIM_TYPE_EXYNOS5433,
+ DSIM_TYPE_IMX8MM,
+ DSIM_TYPE_IMX8MP,
+ DSIM_TYPE_COUNT,
+};
+
+#define samsung_dsim_hw_is_exynos(hw) \
+ ((hw) >= DSIM_TYPE_EXYNOS3250 && (hw) <= DSIM_TYPE_EXYNOS5433)
+
+struct samsung_dsim_transfer {
+ struct list_head list;
+ struct completion completed;
+ int result;
+ struct mipi_dsi_packet packet;
+ u16 flags;
+ u16 tx_done;
+
+ u8 *rx_payload;
+ u16 rx_len;
+ u16 rx_done;
+};
+
+struct samsung_dsim_driver_data {
+ const unsigned int *reg_ofs;
+ unsigned int plltmr_reg;
+ unsigned int has_freqband:1;
+ unsigned int has_clklane_stop:1;
+ unsigned int has_broken_fifoctrl_emptyhdr:1;
+ unsigned int num_clks;
+ unsigned int min_freq;
+ unsigned int max_freq;
+ unsigned int wait_for_reset;
+ unsigned int num_bits_resol;
+ unsigned int pll_p_offset;
+ const unsigned int *reg_values;
+ unsigned int pll_fin_min;
+ unsigned int pll_fin_max;
+ u16 m_min;
+ u16 m_max;
+};
+
+struct samsung_dsim_host_ops {
+ int (*register_host)(struct samsung_dsim *dsim);
+ void (*unregister_host)(struct samsung_dsim *dsim);
+ int (*attach)(struct samsung_dsim *dsim, struct mipi_dsi_device *device);
+ void (*detach)(struct samsung_dsim *dsim, struct mipi_dsi_device *device);
+ irqreturn_t (*te_irq_handler)(struct samsung_dsim *dsim);
+};
+
+struct samsung_dsim_plat_data {
+ enum samsung_dsim_type hw_type;
+ const struct samsung_dsim_host_ops *host_ops;
+};
+
+struct samsung_dsim {
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct drm_bridge *out_bridge;
+ struct device *dev;
+ struct drm_display_mode mode;
+
+ void __iomem *reg_base;
+ struct phy *phy;
+ struct clk **clks;
+ struct clk *pll_clk;
+ struct regulator_bulk_data supplies[2];
+ int irq;
+ struct gpio_desc *te_gpio;
+
+ u32 pll_clk_rate;
+ u32 burst_clk_rate;
+ u32 hs_clock;
+ u32 esc_clk_rate;
+ u32 lanes;
+ u32 mode_flags;
+ u32 format;
+
+ bool swap_dn_dp_clk;
+ bool swap_dn_dp_data;
+ int state;
+ struct drm_property *brightness;
+ struct completion completed;
+
+ spinlock_t transfer_lock; /* protects transfer_list */
+ struct list_head transfer_list;
+
+ const struct samsung_dsim_driver_data *driver_data;
+ const struct samsung_dsim_plat_data *plat_data;
+
+ void *priv;
+};
+
+extern int samsung_dsim_probe(struct platform_device *pdev);
+extern void samsung_dsim_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops samsung_dsim_pm_ops;
+
+#endif /* __SAMSUNG_DSIM__ */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/display/drm_dp.h
index e47dc22ebf50..4891bd916d26 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/display/drm_dp.h
@@ -20,11 +20,9 @@
* OF THIS SOFTWARE.
*/
-#ifndef _DRM_DP_HELPER_H_
-#define _DRM_DP_HELPER_H_
+#ifndef _DRM_DP_H_
+#define _DRM_DP_H_
-#include <linux/delay.h>
-#include <linux/i2c.h>
#include <linux/types.h>
/*
@@ -103,8 +101,9 @@
#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2)
#define DP_AUX_I2C_REPLY_MASK (0x3 << 2)
-/* AUX CH addresses */
-/* DPCD */
+/* DPCD Field Address Mapping */
+
+/* Receiver Capability */
#define DP_DPCD_REV 0x000
# define DP_DPCD_REV_10 0x10
# define DP_DPCD_REV_11 0x11
@@ -121,6 +120,7 @@
#define DP_MAX_DOWNSPREAD 0x003
# define DP_MAX_DOWNSPREAD_0_5 (1 << 0)
+# define DP_STREAM_REGENERATION_STATUS_CAP (1 << 1) /* 2.0 */
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
# define DP_TPS4_SUPPORTED (1 << 7)
@@ -138,6 +138,7 @@
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
# define DP_CAP_ANSI_8B10B (1 << 0)
+# define DP_CAP_ANSI_128B132B (1 << 1) /* 2.0 */
#define DP_DOWN_STREAM_PORT_COUNT 0x007
# define DP_PORT_COUNT_MASK 0x0f
@@ -147,6 +148,7 @@
#define DP_RECEIVE_PORT_0_CAP_0 0x008
# define DP_LOCAL_EDID_PRESENT (1 << 1)
# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
+# define DP_HBLANK_EXPANSION_CAPABLE (1 << 3)
#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
@@ -181,8 +183,14 @@
#define DP_FAUX_CAP 0x020 /* 1.2 */
# define DP_FAUX_CAP_1 (1 << 0)
+#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020 /* 2.0 */
+# define DP_FALLBACK_1024x768_60HZ_24BPP (1 << 0)
+# define DP_FALLBACK_1280x720_60HZ_24BPP (1 << 1)
+# define DP_FALLBACK_1920x1080_60HZ_24BPP (1 << 2)
+
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
+# define DP_SINGLE_STREAM_SIDEBAND_MSG (1 << 1) /* 2.0 */
#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
@@ -232,6 +240,9 @@
#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */
# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0)
+# define DP_DSC_PASSTHROUGH_IS_SUPPORTED (1 << 1)
+# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_COMP_TO_COMP (1 << 2)
+# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_UNCOMP_TO_COMP (1 << 3)
#define DP_DSC_REV 0x061
# define DP_DSC_MAJOR_MASK (0xf << 0)
@@ -270,12 +281,14 @@
#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066
# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0)
+# define DP_DSC_RGB_COLOR_CONV_BYPASS_SUPPORT (1 << 1)
#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
-# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
+# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK (0x3 << 5) /* eDP 1.5 & DP 2.0 */
+# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY (1 << 7) /* eDP 1.5 & DP 2.0 */
#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
# define DP_DSC_RGB (1 << 0)
@@ -337,16 +350,19 @@
# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2)
#define DP_DSC_BITS_PER_PIXEL_INC 0x06F
+# define DP_DSC_RGB_YCbCr444_MAX_BPP_DELTA_MASK 0x1f
+# define DP_DSC_RGB_YCbCr420_MAX_BPP_DELTA_MASK 0xe0
# define DP_DSC_BITS_PER_PIXEL_1_16 0x0
# define DP_DSC_BITS_PER_PIXEL_1_8 0x1
# define DP_DSC_BITS_PER_PIXEL_1_4 0x2
# define DP_DSC_BITS_PER_PIXEL_1_2 0x3
-# define DP_DSC_BITS_PER_PIXEL_1 0x4
+# define DP_DSC_BITS_PER_PIXEL_1_1 0x4
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
+# define DP_PSR2_WITH_Y_COORD_ET_SUPPORTED 4 /* eDP 1.5, adopted eDP 1.4b SCR */
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
@@ -361,6 +377,7 @@
# define DP_PSR_SETUP_TIME_SHIFT 1
# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */
# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */
+# define DP_PSR2_SU_AUX_FRAME_SYNC_NOT_NEEDED (1 << 6)/* eDP 1.5, adopted eDP 1.4b SCR */
#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */
#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */
@@ -384,13 +401,52 @@
# define DP_DS_PORT_TYPE_DP_DUALMODE 5
# define DP_DS_PORT_TYPE_WIRELESS 6
# define DP_DS_PORT_HPD (1 << 3)
+# define DP_DS_NON_EDID_MASK (0xf << 4)
+# define DP_DS_NON_EDID_720x480i_60 (1 << 4)
+# define DP_DS_NON_EDID_720x480i_50 (2 << 4)
+# define DP_DS_NON_EDID_1920x1080i_60 (3 << 4)
+# define DP_DS_NON_EDID_1920x1080i_50 (4 << 4)
+# define DP_DS_NON_EDID_1280x720_60 (5 << 4)
+# define DP_DS_NON_EDID_1280x720_50 (7 << 4)
/* offset 1 for VGA is maximum megapixels per second / 8 */
-/* offset 2 */
+/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */
+/* offset 2 for VGA/DVI/HDMI */
# define DP_DS_MAX_BPC_MASK (3 << 0)
# define DP_DS_8BPC 0
# define DP_DS_10BPC 1
# define DP_DS_12BPC 2
# define DP_DS_16BPC 3
+/* HDMI2.1 PCON FRL CONFIGURATION */
+# define DP_PCON_MAX_FRL_BW (7 << 2)
+# define DP_PCON_MAX_0GBPS (0 << 2)
+# define DP_PCON_MAX_9GBPS (1 << 2)
+# define DP_PCON_MAX_18GBPS (2 << 2)
+# define DP_PCON_MAX_24GBPS (3 << 2)
+# define DP_PCON_MAX_32GBPS (4 << 2)
+# define DP_PCON_MAX_40GBPS (5 << 2)
+# define DP_PCON_MAX_48GBPS (6 << 2)
+# define DP_PCON_SOURCE_CTL_MODE (1 << 5)
+
+/* offset 3 for DVI */
+# define DP_DS_DVI_DUAL_LINK (1 << 1)
+# define DP_DS_DVI_HIGH_COLOR_DEPTH (1 << 2)
+/* offset 3 for HDMI */
+# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0)
+# define DP_DS_HDMI_YCBCR422_PASS_THROUGH (1 << 1)
+# define DP_DS_HDMI_YCBCR420_PASS_THROUGH (1 << 2)
+# define DP_DS_HDMI_YCBCR444_TO_422_CONV (1 << 3)
+# define DP_DS_HDMI_YCBCR444_TO_420_CONV (1 << 4)
+
+/*
+ * VESA DP-to-HDMI PCON Specification adds caps for colorspace
+ * conversion in DFP cap DPCD 83h. Sec6.1 Table-3.
+ * Based on the available support the source can enable
+ * color conversion by writing into PROTOCOL_COVERTER_CONTROL_2
+ * DPCD 3052h.
+ */
+# define DP_DS_HDMI_BT601_RGB_YCBCR_CONV (1 << 5)
+# define DP_DS_HDMI_BT709_RGB_YCBCR_CONV (1 << 6)
+# define DP_DS_HDMI_BT2020_RGB_YCBCR_CONV (1 << 7)
#define DP_MAX_DOWNSTREAM_PORTS 0x10
@@ -400,19 +456,108 @@
# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
+#define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */
+
+/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
+#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xD /* 0x92 through 0x9E */
+#define DP_PCON_DSC_ENCODER 0x092
+# define DP_PCON_DSC_ENCODER_SUPPORTED (1 << 0)
+# define DP_PCON_DSC_PPS_ENC_OVERRIDE (1 << 1)
+
+/* DP-HDMI2.1 PCON DSC Version */
+#define DP_PCON_DSC_VERSION 0x093
+# define DP_PCON_DSC_MAJOR_MASK (0xF << 0)
+# define DP_PCON_DSC_MINOR_MASK (0xF << 4)
+# define DP_PCON_DSC_MAJOR_SHIFT 0
+# define DP_PCON_DSC_MINOR_SHIFT 4
+
+/* DP-HDMI2.1 PCON DSC RC Buffer block size */
+#define DP_PCON_DSC_RC_BUF_BLK_INFO 0x094
+# define DP_PCON_DSC_RC_BUF_BLK_SIZE (0x3 << 0)
+# define DP_PCON_DSC_RC_BUF_BLK_1KB 0
+# define DP_PCON_DSC_RC_BUF_BLK_4KB 1
+# define DP_PCON_DSC_RC_BUF_BLK_16KB 2
+# define DP_PCON_DSC_RC_BUF_BLK_64KB 3
+
+/* DP-HDMI2.1 PCON DSC RC Buffer size */
+#define DP_PCON_DSC_RC_BUF_SIZE 0x095
+
+/* DP-HDMI2.1 PCON DSC Slice capabilities-1 */
+#define DP_PCON_DSC_SLICE_CAP_1 0x096
+# define DP_PCON_DSC_1_PER_DSC_ENC (0x1 << 0)
+# define DP_PCON_DSC_2_PER_DSC_ENC (0x1 << 1)
+# define DP_PCON_DSC_4_PER_DSC_ENC (0x1 << 3)
+# define DP_PCON_DSC_6_PER_DSC_ENC (0x1 << 4)
+# define DP_PCON_DSC_8_PER_DSC_ENC (0x1 << 5)
+# define DP_PCON_DSC_10_PER_DSC_ENC (0x1 << 6)
+# define DP_PCON_DSC_12_PER_DSC_ENC (0x1 << 7)
+
+#define DP_PCON_DSC_BUF_BIT_DEPTH 0x097
+# define DP_PCON_DSC_BIT_DEPTH_MASK (0xF << 0)
+# define DP_PCON_DSC_DEPTH_9_BITS 0
+# define DP_PCON_DSC_DEPTH_10_BITS 1
+# define DP_PCON_DSC_DEPTH_11_BITS 2
+# define DP_PCON_DSC_DEPTH_12_BITS 3
+# define DP_PCON_DSC_DEPTH_13_BITS 4
+# define DP_PCON_DSC_DEPTH_14_BITS 5
+# define DP_PCON_DSC_DEPTH_15_BITS 6
+# define DP_PCON_DSC_DEPTH_16_BITS 7
+# define DP_PCON_DSC_DEPTH_8_BITS 8
+
+#define DP_PCON_DSC_BLOCK_PREDICTION 0x098
+# define DP_PCON_DSC_BLOCK_PRED_SUPPORT (0x1 << 0)
+
+#define DP_PCON_DSC_ENC_COLOR_FMT_CAP 0x099
+# define DP_PCON_DSC_ENC_RGB (0x1 << 0)
+# define DP_PCON_DSC_ENC_YUV444 (0x1 << 1)
+# define DP_PCON_DSC_ENC_YUV422_S (0x1 << 2)
+# define DP_PCON_DSC_ENC_YUV422_N (0x1 << 3)
+# define DP_PCON_DSC_ENC_YUV420_N (0x1 << 4)
+
+#define DP_PCON_DSC_ENC_COLOR_DEPTH_CAP 0x09A
+# define DP_PCON_DSC_ENC_8BPC (0x1 << 1)
+# define DP_PCON_DSC_ENC_10BPC (0x1 << 2)
+# define DP_PCON_DSC_ENC_12BPC (0x1 << 3)
+
+#define DP_PCON_DSC_MAX_SLICE_WIDTH 0x09B
+
+/* DP-HDMI2.1 PCON DSC Slice capabilities-2 */
+#define DP_PCON_DSC_SLICE_CAP_2 0x09C
+# define DP_PCON_DSC_16_PER_DSC_ENC (0x1 << 0)
+# define DP_PCON_DSC_20_PER_DSC_ENC (0x1 << 1)
+# define DP_PCON_DSC_24_PER_DSC_ENC (0x1 << 2)
+
+/* DP-HDMI2.1 PCON HDMI TX Encoder Bits/pixel increment */
+#define DP_PCON_DSC_BPP_INCR 0x09E
+# define DP_PCON_DSC_BPP_INCR_MASK (0x7 << 0)
+# define DP_PCON_DSC_ONE_16TH_BPP 0
+# define DP_PCON_DSC_ONE_8TH_BPP 1
+# define DP_PCON_DSC_ONE_4TH_BPP 2
+# define DP_PCON_DSC_ONE_HALF_BPP 3
+# define DP_PCON_DSC_ONE_BPP 4
/* DP Extended DSC Capabilities */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
-/* link configuration */
+/* DFP Capability Extension */
+#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+
+#define DP_PANEL_REPLAY_CAP 0x0b0 /* DP 2.0 */
+# define DP_PANEL_REPLAY_SUPPORT (1 << 0)
+# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1)
+
+/* Link Configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
# define DP_LINK_BW_8_1 0x1e /* 1.4 */
+# define DP_LINK_BW_10 0x01 /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_13_5 0x04 /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_20 0x02 /* 2.0 128b/132b Link Layer */
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
@@ -422,6 +567,7 @@
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_2_CDS 3 /* 2.0 E11 */
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_4 7 /* 1.4 */
# define DP_TRAINING_PATTERN_MASK 0x3
@@ -464,12 +610,16 @@
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+# define DP_TX_FFE_PRESET_VALUE_MASK (0xf << 0) /* 2.0 128b/132b Link Layer */
+
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
+# define DP_FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE (1 << 6)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
# define DP_SET_ANSI_8B10B (1 << 0)
+# define DP_SET_ANSI_128B132B (1 << 1)
#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
/* bitmask as for DP_I2C_SPEED_CAP */
@@ -488,8 +638,22 @@
# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
# define DP_LINK_QUAL_PATTERN_PRBS7 3
# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
-# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
-# define DP_LINK_QUAL_PATTERN_MASK 7
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_1 5
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_2 6
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_3 7
+/* DP 2.0 UHBR10, UHBR13.5, UHBR20 */
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS1 0x08
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS2 0x10
+# define DP_LINK_QUAL_PATTERN_PRSBS9 0x18
+# define DP_LINK_QUAL_PATTERN_PRSBS11 0x20
+# define DP_LINK_QUAL_PATTERN_PRSBS15 0x28
+# define DP_LINK_QUAL_PATTERN_PRSBS23 0x30
+# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38
+# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40
+# define DP_LINK_QUAL_PATTERN_SQUARE 0x48
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED 0x49
+# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED 0x4a
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED 0x4b
#define DP_TRAINING_LANE0_1_SET2 0x10f
#define DP_TRAINING_LANE2_3_SET2 0x110
@@ -535,20 +699,26 @@
# define DP_FEC_LANE_2_SELECT (2 << 4)
# define DP_FEC_LANE_3_SELECT (3 << 4)
+#define DP_SDP_ERROR_DETECTION_CONFIGURATION 0x121 /* DP 2.0 E11 */
+#define DP_SDP_CRC16_128B132B_EN BIT(0)
+
#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
# define DP_DECOMPRESSION_EN (1 << 0)
-
-#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
-# define DP_PSR_ENABLE (1 << 0)
-# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
-# define DP_PSR_CRC_VERIFICATION (1 << 2)
-# define DP_PSR_FRAME_CAPTURE (1 << 3)
-# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
-# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
-# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */
+# define DP_DSC_PASSTHROUGH_EN (1 << 1)
+#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */
+
+#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
+# define DP_PSR_ENABLE BIT(0)
+# define DP_PSR_MAIN_LINK_ACTIVE BIT(1)
+# define DP_PSR_CRC_VERIFICATION BIT(2)
+# define DP_PSR_FRAME_CAPTURE BIT(3)
+# define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */
+# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */
+# define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */
+# define DP_PSR_ENABLE_SU_REGION_ET BIT(7) /* eDP 1.5 */
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
@@ -556,10 +726,18 @@
#define DP_BRANCH_DEVICE_CTRL 0x1a1
# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
+#define PANEL_REPLAY_CONFIG 0x1b0 /* DP 2.0 */
+# define DP_PANEL_REPLAY_ENABLE (1 << 0)
+# define DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN (1 << 3)
+# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN (1 << 4)
+# define DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN (1 << 5)
+# define DP_PANEL_REPLAY_SU_ENABLE (1 << 6)
+
#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
+/* Link/Sink Device Status */
#define DP_SINK_COUNT 0x200
/* prior to 1.2 bit 7 was reserved mbz */
# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
@@ -584,16 +762,19 @@
DP_LANE_CHANNEL_EQ_DONE | \
DP_LANE_SYMBOL_LOCKED)
-#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
-
-#define DP_INTERLANE_ALIGN_DONE (1 << 0)
-#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
-#define DP_LINK_STATUS_UPDATED (1 << 7)
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */
+#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */
+#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
#define DP_SINK_STATUS 0x205
-
-#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
-#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+# define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */
+# define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) /* 2.0 */
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
@@ -606,6 +787,12 @@
# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+/* DP 2.0 128b/132b Link Layer */
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_MASK (0xf << 0)
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT 0
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_MASK (0xf << 4)
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT 4
+
#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c
# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03
# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
@@ -710,6 +897,8 @@
# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4
# define DP_PHY_TEST_PATTERN_CP2520 0x5
+#define DP_PHY_SQUARE_PATTERN 0x249
+
#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A
#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250
#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251
@@ -759,20 +948,27 @@
#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */
/* up to ID_SLOT_63 at 0x2ff */
+/* Source Device-specific */
#define DP_SOURCE_OUI 0x300
+
+/* Sink Device-specific */
#define DP_SINK_OUI 0x400
+
+/* Branch Device-specific */
#define DP_BRANCH_OUI 0x500
#define DP_BRANCH_ID 0x503
#define DP_BRANCH_REVISION_START 0x509
#define DP_BRANCH_HW_REV 0x509
#define DP_BRANCH_SW_REV 0x50A
+/* Link/Sink Device Power Control */
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
# define DP_SET_POWER_D3_AUX_ON 0x5
+/* eDP-specific */
#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
# define DP_EDP_11 0x00
# define DP_EDP_12 0x01
@@ -803,6 +999,7 @@
#define DP_EDP_GENERAL_CAP_2 0x703
# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
+# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4)
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
@@ -828,6 +1025,7 @@
# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4)
# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5)
# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */
+# define DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE (1 << 7)
#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
@@ -852,20 +1050,25 @@
#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
+#define DP_EDP_PANEL_TARGET_LUMINANCE_VALUE 0x734
#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
+#define DP_EDP_MSO_LINK_CAPABILITIES 0x7a4 /* eDP 1.4 */
+# define DP_EDP_MSO_NUMBER_OF_LINKS_MASK (7 << 0)
+# define DP_EDP_MSO_NUMBER_OF_LINKS_SHIFT 0
+# define DP_EDP_MSO_INDEPENDENT_LINK_BIT (1 << 3)
+
+/* Sideband MSG Buffers */
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
-#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
-/* 0-5 sink count */
-# define DP_SINK_COUNT_CP_READY (1 << 6)
-
-#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */
+/* DPRX Event Status Indicator */
+#define DP_SINK_COUNT_ESI 0x2002 /* same as 0x200 */
+#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* same as 0x201 */
#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */
# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0)
@@ -873,6 +1076,12 @@
# define DP_CEC_IRQ (1 << 2)
#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */
+# define RX_CAP_CHANGED (1 << 0)
+# define LINK_STATUS_CHANGED (1 << 1)
+# define STREAM_STATUS_CHANGED (1 << 2)
+# define HDMI_LINK_STATUS_CHANGED (1 << 3)
+# define CONNECTED_OFF_ENTRY_REQUESTED (1 << 4)
+# define DP_TUNNELING_IRQ (1 << 5)
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
@@ -914,8 +1123,20 @@
#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
+#define DP_PANEL_REPLAY_ERROR_STATUS 0x2020 /* DP 2.1*/
+# define DP_PANEL_REPLAY_LINK_CRC_ERROR (1 << 0)
+# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR (1 << 1)
+# define DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2)
+
+#define DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS 0x2022 /* DP 2.1 */
+# define DP_SINK_DEVICE_PANEL_REPLAY_STATUS_MASK (7 << 0)
+# define DP_SINK_FRAME_LOCKED_SHIFT 3
+# define DP_SINK_FRAME_LOCKED_MASK (3 << 3)
+# define DP_SINK_FRAME_LOCKED_STATUS_VALID_SHIFT 5
+# define DP_SINK_FRAME_LOCKED_STATUS_VALID_MASK (1 << 5)
+
+/* Extended Receiver Capability: See DP_DPCD_REV for definitions */
#define DP_DP13_DPCD_REV 0x2200
-#define DP_DP13_MAX_LINK_RATE 0x2201
#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */
# define DP_GTC_CAP (1 << 0) /* DP 1.3 */
@@ -927,6 +1148,40 @@
# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+#define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */
+# define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0)
+# define DP_AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED (1 << 1)
+# define DP_VSC_EXT_SDP_FRAMEWORK_VERSION_1_SUPPORTED (1 << 4)
+
+#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */
+# define DP_UHBR10 (1 << 0)
+# define DP_UHBR20 (1 << 1)
+# define DP_UHBR13_5 (1 << 2)
+
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT (1 << 7)
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS 0x02
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS 0x03
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS 0x04
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS 0x05
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS 0x06
+
+#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230
+#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250
+
+/* DSC Extended Capability Branch Total DSC Resources */
+#define DP_DSC_SUPPORT_AND_DSC_DECODER_COUNT 0x2260 /* 2.0 */
+# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
+# define DP_DSC_DECODER_COUNT_SHIFT 5
+#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 /* 2.0 */
+# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
+
+/* Protocol Converter Extension */
/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
#define DP_CEC_TUNNELING_CAPABILITY 0x3000
# define DP_CEC_TUNNELING_CAPABLE (1 << 0)
@@ -983,6 +1238,106 @@
#define DP_CEC_TX_MESSAGE_BUFFER 0x3020
#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10
+/* PCON CONFIGURE-1 FRL FOR HDMI SINK */
+#define DP_PCON_HDMI_LINK_CONFIG_1 0x305A
+# define DP_PCON_ENABLE_MAX_FRL_BW (7 << 0)
+# define DP_PCON_ENABLE_MAX_BW_0GBPS 0
+# define DP_PCON_ENABLE_MAX_BW_9GBPS 1
+# define DP_PCON_ENABLE_MAX_BW_18GBPS 2
+# define DP_PCON_ENABLE_MAX_BW_24GBPS 3
+# define DP_PCON_ENABLE_MAX_BW_32GBPS 4
+# define DP_PCON_ENABLE_MAX_BW_40GBPS 5
+# define DP_PCON_ENABLE_MAX_BW_48GBPS 6
+# define DP_PCON_ENABLE_SOURCE_CTL_MODE (1 << 3)
+# define DP_PCON_ENABLE_CONCURRENT_LINK (1 << 4)
+# define DP_PCON_ENABLE_SEQUENTIAL_LINK (0 << 4)
+# define DP_PCON_ENABLE_LINK_FRL_MODE (1 << 5)
+# define DP_PCON_ENABLE_HPD_READY (1 << 6)
+# define DP_PCON_ENABLE_HDMI_LINK (1 << 7)
+
+/* PCON CONFIGURE-2 FRL FOR HDMI SINK */
+#define DP_PCON_HDMI_LINK_CONFIG_2 0x305B
+# define DP_PCON_MAX_LINK_BW_MASK (0x3F << 0)
+# define DP_PCON_FRL_BW_MASK_9GBPS (1 << 0)
+# define DP_PCON_FRL_BW_MASK_18GBPS (1 << 1)
+# define DP_PCON_FRL_BW_MASK_24GBPS (1 << 2)
+# define DP_PCON_FRL_BW_MASK_32GBPS (1 << 3)
+# define DP_PCON_FRL_BW_MASK_40GBPS (1 << 4)
+# define DP_PCON_FRL_BW_MASK_48GBPS (1 << 5)
+# define DP_PCON_FRL_LINK_TRAIN_EXTENDED (1 << 6)
+# define DP_PCON_FRL_LINK_TRAIN_NORMAL (0 << 6)
+
+/* PCON HDMI LINK STATUS */
+#define DP_PCON_HDMI_TX_LINK_STATUS 0x303B
+# define DP_PCON_HDMI_TX_LINK_ACTIVE (1 << 0)
+# define DP_PCON_FRL_READY (1 << 1)
+
+/* PCON HDMI POST FRL STATUS */
+#define DP_PCON_HDMI_POST_FRL_STATUS 0x3036
+# define DP_PCON_HDMI_LINK_MODE (1 << 0)
+# define DP_PCON_HDMI_MODE_TMDS 0
+# define DP_PCON_HDMI_MODE_FRL 1
+# define DP_PCON_HDMI_FRL_TRAINED_BW (0x3F << 1)
+# define DP_PCON_FRL_TRAINED_BW_9GBPS (1 << 1)
+# define DP_PCON_FRL_TRAINED_BW_18GBPS (1 << 2)
+# define DP_PCON_FRL_TRAINED_BW_24GBPS (1 << 3)
+# define DP_PCON_FRL_TRAINED_BW_32GBPS (1 << 4)
+# define DP_PCON_FRL_TRAINED_BW_40GBPS (1 << 5)
+# define DP_PCON_FRL_TRAINED_BW_48GBPS (1 << 6)
+
+#define DP_PROTOCOL_CONVERTER_CONTROL_0 0x3050 /* DP 1.3 */
+# define DP_HDMI_DVI_OUTPUT_CONFIG (1 << 0) /* DP 1.3 */
+#define DP_PROTOCOL_CONVERTER_CONTROL_1 0x3051 /* DP 1.3 */
+# define DP_CONVERSION_TO_YCBCR420_ENABLE (1 << 0) /* DP 1.3 */
+# define DP_HDMI_EDID_PROCESSING_DISABLE (1 << 1) /* DP 1.4 */
+# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE (1 << 2) /* DP 1.4 */
+# define DP_HDMI_FORCE_SCRAMBLING (1 << 3) /* DP 1.4 */
+#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */
+# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */
+# define DP_PCON_ENABLE_DSC_ENCODER (1 << 1)
+# define DP_PCON_ENCODER_PPS_OVERRIDE_MASK (0x3 << 2)
+# define DP_PCON_ENC_PPS_OVERRIDE_DISABLED 0
+# define DP_PCON_ENC_PPS_OVERRIDE_EN_PARAMS 1
+# define DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER 2
+# define DP_CONVERSION_RGB_YCBCR_MASK (7 << 4)
+# define DP_CONVERSION_BT601_RGB_YCBCR_ENABLE (1 << 4)
+# define DP_CONVERSION_BT709_RGB_YCBCR_ENABLE (1 << 5)
+# define DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE (1 << 6)
+
+/* PCON Downstream HDMI ERROR Status per Lane */
+#define DP_PCON_HDMI_ERROR_STATUS_LN0 0x3037
+#define DP_PCON_HDMI_ERROR_STATUS_LN1 0x3038
+#define DP_PCON_HDMI_ERROR_STATUS_LN2 0x3039
+#define DP_PCON_HDMI_ERROR_STATUS_LN3 0x303A
+# define DP_PCON_HDMI_ERROR_COUNT_MASK (0x7 << 0)
+# define DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS (1 << 0)
+# define DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS (1 << 1)
+# define DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS (1 << 2)
+
+/* PCON HDMI CONFIG PPS Override Buffer
+ * Valid Offsets to be added to Base : 0-127
+ */
+#define DP_PCON_HDMI_PPS_OVERRIDE_BASE 0x3100
+
+/* PCON HDMI CONFIG PPS Override Parameter: Slice height
+ * Offset-0 8LSBs of the Slice height.
+ * Offset-1 8MSBs of the Slice height.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT 0x3180
+
+/* PCON HDMI CONFIG PPS Override Parameter: Slice width
+ * Offset-0 8LSBs of the Slice width.
+ * Offset-1 8MSBs of the Slice width.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH 0x3182
+
+/* PCON HDMI CONFIG PPS Override Parameter: bits_per_pixel
+ * Offset-0 8LSBs of the bits_per_pixel.
+ * Offset-1 2MSBs of the bits_per_pixel.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_BPP 0x3184
+
+/* HDCP 1.3 and HDCP 2.2 */
#define DP_AUX_HDCP_BKSV 0x68000
#define DP_AUX_HDCP_RI_PRIME 0x68005
#define DP_AUX_HDCP_AKSV 0x68007
@@ -1028,7 +1383,67 @@
#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
-/* Link Training (LT)-tunable PHY Repeaters */
+/* DP-tunneling */
+#define DP_TUNNELING_OUI 0xe0000
+#define DP_TUNNELING_OUI_BYTES 3
+
+#define DP_TUNNELING_DEV_ID 0xe0003
+#define DP_TUNNELING_DEV_ID_BYTES 6
+
+#define DP_TUNNELING_HW_REV 0xe0009
+#define DP_TUNNELING_HW_REV_MAJOR_SHIFT 4
+#define DP_TUNNELING_HW_REV_MAJOR_MASK (0xf << DP_TUNNELING_HW_REV_MAJOR_SHIFT)
+#define DP_TUNNELING_HW_REV_MINOR_SHIFT 0
+#define DP_TUNNELING_HW_REV_MINOR_MASK (0xf << DP_TUNNELING_HW_REV_MINOR_SHIFT)
+
+#define DP_TUNNELING_SW_REV_MAJOR 0xe000a
+#define DP_TUNNELING_SW_REV_MINOR 0xe000b
+
+#define DP_TUNNELING_CAPABILITIES 0xe000d
+#define DP_IN_BW_ALLOCATION_MODE_SUPPORT (1 << 7)
+#define DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT (1 << 6)
+#define DP_TUNNELING_SUPPORT (1 << 0)
+
+#define DP_IN_ADAPTER_INFO 0xe000e
+#define DP_IN_ADAPTER_NUMBER_BITS 7
+#define DP_IN_ADAPTER_NUMBER_MASK ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1)
+
+#define DP_USB4_DRIVER_ID 0xe000f
+#define DP_USB4_DRIVER_ID_BITS 4
+#define DP_USB4_DRIVER_ID_MASK ((1 << DP_USB4_DRIVER_ID_BITS) - 1)
+
+#define DP_USB4_DRIVER_BW_CAPABILITY 0xe0020
+#define DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT (1 << 7)
+
+#define DP_IN_ADAPTER_TUNNEL_INFORMATION 0xe0021
+#define DP_GROUP_ID_BITS 3
+#define DP_GROUP_ID_MASK ((1 << DP_GROUP_ID_BITS) - 1)
+
+#define DP_BW_GRANULARITY 0xe0022
+#define DP_BW_GRANULARITY_MASK 0x3
+
+#define DP_ESTIMATED_BW 0xe0023
+#define DP_ALLOCATED_BW 0xe0024
+
+#define DP_TUNNELING_STATUS 0xe0025
+#define DP_BW_ALLOCATION_CAPABILITY_CHANGED (1 << 3)
+#define DP_ESTIMATED_BW_CHANGED (1 << 2)
+#define DP_BW_REQUEST_SUCCEEDED (1 << 1)
+#define DP_BW_REQUEST_FAILED (1 << 0)
+
+#define DP_TUNNELING_MAX_LINK_RATE 0xe0028
+
+#define DP_TUNNELING_MAX_LANE_COUNT 0xe0029
+#define DP_TUNNELING_MAX_LANE_COUNT_MASK 0x1f
+
+#define DP_DPTX_BW_ALLOCATION_MODE_CONTROL 0xe0030
+#define DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE (1 << 7)
+#define DP_UNMASK_BW_ALLOCATION_IRQ (1 << 6)
+
+#define DP_REQUEST_BW 0xe0031
+#define MAX_DP_REQUEST_BW 255
+
+/* LTTPR: Link Training (LT)-tunable PHY Repeaters */
#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */
@@ -1036,15 +1451,68 @@
#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */
+# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
+/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
+#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */
+#define DP_PHY_REPEATER_EQ_DONE 0xf0008 /* 2.0 E11 */
+
+enum drm_dp_phy {
+ DP_PHY_DPRX,
+
+ DP_PHY_LTTPR1,
+ DP_PHY_LTTPR2,
+ DP_PHY_LTTPR3,
+ DP_PHY_LTTPR4,
+ DP_PHY_LTTPR5,
+ DP_PHY_LTTPR6,
+ DP_PHY_LTTPR7,
+ DP_PHY_LTTPR8,
+
+ DP_MAX_LTTPR_COUNT = DP_PHY_LTTPR8,
+};
+
+#define DP_PHY_LTTPR(i) (DP_PHY_LTTPR1 + (i))
+
+#define __DP_LTTPR1_BASE 0xf0010 /* 1.3 */
+#define __DP_LTTPR2_BASE 0xf0060 /* 1.3 */
+#define DP_LTTPR_BASE(dp_phy) \
+ (__DP_LTTPR1_BASE + (__DP_LTTPR2_BASE - __DP_LTTPR1_BASE) * \
+ ((dp_phy) - DP_PHY_LTTPR1))
+
+#define DP_LTTPR_REG(dp_phy, lttpr1_reg) \
+ (DP_LTTPR_BASE(dp_phy) - DP_LTTPR_BASE(DP_PHY_LTTPR1) + (lttpr1_reg))
+
#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */
+#define DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_TRAINING_PATTERN_SET_PHY_REPEATER1)
+
#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */
+#define DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_TRAINING_LANE0_SET_PHY_REPEATER1)
+
#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */
#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */
#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */
#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */
+#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1)
+
#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */
+# define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED BIT(0)
+# define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED BIT(1)
+
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0022 /* 2.0 */
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1)
+/* see DP_128B132B_TRAINING_AUX_RD_INTERVAL for values */
+
#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */
+#define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1)
+
#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */
+
#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */
#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */
#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */
@@ -1052,10 +1520,27 @@
#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+
+#define __DP_FEC1_BASE 0xf0290 /* 1.4 */
+#define __DP_FEC2_BASE 0xf0298 /* 1.4 */
+#define DP_FEC_BASE(dp_phy) \
+ (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \
+ ((dp_phy) - DP_PHY_LTTPR1)))
+
+#define DP_FEC_REG(dp_phy, fec1_reg) \
+ (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg)
+
#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */
+#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \
+ DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1)
+
#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */
#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */
+#define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */
+
+#define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */
+
/* Repeater modes */
#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */
#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */
@@ -1108,6 +1593,9 @@
#define DP_POWER_DOWN_PHY 0x25
#define DP_SINK_EVENT_NOTIFY 0x30
#define DP_QUERY_STREAM_ENC_STATUS 0x38
+#define DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST 0
+#define DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE 1
+#define DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE 2
/* DP 1.2 MST sideband reply types */
#define DP_SIDEBAND_REPLY_ACK 0x00
@@ -1134,29 +1622,16 @@
#define DP_MST_PHYSICAL_PORT_0 0
#define DP_MST_LOGICAL_PORT_0 8
+#define DP_LINK_CONSTANT_N_VALUE 0x8000
#define DP_LINK_STATUS_SIZE 6
-bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count);
-bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count);
-u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane);
-u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane);
-u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
- unsigned int lane);
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_DSC_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
-
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-
-u8 drm_dp_link_rate_to_bw_code(int link_rate);
-int drm_dp_bw_code_to_link_rate(u8 link_bw);
+#define DP_LTTPR_COMMON_CAP_SIZE 8
+#define DP_LTTPR_PHY_CAP_SIZE 3
#define DP_SDP_AUDIO_TIMESTAMP 0x01
#define DP_SDP_AUDIO_STREAM 0x02
@@ -1170,6 +1645,8 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw);
#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
/* 0x80+ CEA-861 infoframe types */
+#define DP_SDP_AUDIO_INFOFRAME_HB2 0x1b
+
/**
* struct dp_sdp_header - DP secondary data packet header
* @HB0: Secondary Data Packet ID
@@ -1248,7 +1725,7 @@ enum dp_pixelformat {
*
* This enum is used to indicate DP VSC SDP Colorimetry formats.
* It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
- * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition.
+ * DB18] and a name of enum member follows enum drm_colorimetry definition.
*
* @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or
* ITU-R BT.601 colorimetry format
@@ -1321,460 +1798,4 @@ enum dp_content_type {
DP_CONTENT_TYPE_GAME = 0x04,
};
-/**
- * struct drm_dp_vsc_sdp - drm DP VSC SDP
- *
- * This structure represents a DP VSC SDP of drm
- * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and
- * [Table 2-117: VSC SDP Payload for DB16 through DB18]
- *
- * @sdp_type: secondary-data packet type
- * @revision: revision number
- * @length: number of valid data bytes
- * @pixelformat: pixel encoding format
- * @colorimetry: colorimetry format
- * @bpc: bit per color
- * @dynamic_range: dynamic range information
- * @content_type: CTA-861-G defines content types and expected processing by a sink device
- */
-struct drm_dp_vsc_sdp {
- unsigned char sdp_type;
- unsigned char revision;
- unsigned char length;
- enum dp_pixelformat pixelformat;
- enum dp_colorimetry colorimetry;
- int bpc;
- enum dp_dynamic_range dynamic_range;
- enum dp_content_type content_type;
-};
-
-void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
- const struct drm_dp_vsc_sdp *vsc);
-
-int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
-
-static inline int
-drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
-}
-
-static inline u8
-drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
-}
-
-static inline bool
-drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x11 &&
- (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
-}
-
-static inline bool
-drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x11 &&
- (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
-}
-
-static inline bool
-drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x12 &&
- dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
-}
-
-static inline bool
-drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x14 &&
- dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED;
-}
-
-static inline u8
-drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 :
- DP_TRAINING_PATTERN_MASK;
-}
-
-static inline bool
-drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
-}
-
-/* DP/eDP DSC support */
-u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
- bool is_edp);
-u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
-int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
- u8 dsc_bpc[3]);
-
-static inline bool
-drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
-{
- return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
- DP_DSC_DECOMPRESSION_IS_SUPPORTED;
-}
-
-static inline u16
-drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
-{
- return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
- (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
- DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
- DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
-}
-
-static inline u32
-drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
-{
- /* Max Slicewidth = Number of Pixels * 320 */
- return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
- DP_DSC_SLICE_WIDTH_MULTIPLIER;
-}
-
-/* Forward Error Correction Support on DP 1.4 */
-static inline bool
-drm_dp_sink_supports_fec(const u8 fec_capable)
-{
- return fec_capable & DP_FEC_CAPABLE;
-}
-
-static inline bool
-drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B;
-}
-
-static inline bool
-drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_EDP_CONFIGURATION_CAP] &
- DP_ALTERNATE_SCRAMBLER_RESET_CAP;
-}
-
-/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */
-static inline bool
-drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DOWN_STREAM_PORT_COUNT] &
- DP_MSA_TIMING_PAR_IGNORED;
-}
-
-/*
- * DisplayPort AUX channel
- */
-
-/**
- * struct drm_dp_aux_msg - DisplayPort AUX channel transaction
- * @address: address of the (first) register to access
- * @request: contains the type of transaction (see DP_AUX_* macros)
- * @reply: upon completion, contains the reply type of the transaction
- * @buffer: pointer to a transmission or reception buffer
- * @size: size of @buffer
- */
-struct drm_dp_aux_msg {
- unsigned int address;
- u8 request;
- u8 reply;
- void *buffer;
- size_t size;
-};
-
-struct cec_adapter;
-struct edid;
-struct drm_connector;
-
-/**
- * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
- * @lock: mutex protecting this struct
- * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
- * @connector: the connector this CEC adapter is associated with
- * @unregister_work: unregister the CEC adapter
- */
-struct drm_dp_aux_cec {
- struct mutex lock;
- struct cec_adapter *adap;
- struct drm_connector *connector;
- struct delayed_work unregister_work;
-};
-
-/**
- * struct drm_dp_aux - DisplayPort AUX channel
- * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
- * @ddc: I2C adapter that can be used for I2C-over-AUX communication
- * @dev: pointer to struct device that is the parent for this AUX channel
- * @crtc: backpointer to the crtc that is currently using this AUX channel
- * @hw_mutex: internal mutex used for locking transfers
- * @crc_work: worker that captures CRCs for each frame
- * @crc_count: counter of captured frame CRCs
- * @transfer: transfers a message representing a single AUX transaction
- *
- * The .dev field should be set to a pointer to the device that implements
- * the AUX channel.
- *
- * The .name field may be used to specify the name of the I2C adapter. If set to
- * NULL, dev_name() of .dev will be used.
- *
- * Drivers provide a hardware-specific implementation of how transactions
- * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg
- * structure describing the transaction is passed into this function. Upon
- * success, the implementation should return the number of payload bytes
- * that were transferred, or a negative error-code on failure. Helpers
- * propagate errors from the .transfer() function, with the exception of
- * the -EBUSY error, which causes a transaction to be retried. On a short,
- * helpers will return -EPROTO to make it simpler to check for failure.
- *
- * An AUX channel can also be used to transport I2C messages to a sink. A
- * typical application of that is to access an EDID that's present in the
- * sink device. The .transfer() function can also be used to execute such
- * transactions. The drm_dp_aux_register() function registers an I2C
- * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
- * should call drm_dp_aux_unregister() to remove the I2C adapter.
- * The I2C adapter uses long transfers by default; if a partial response is
- * received, the adapter will drop down to the size given by the partial
- * response for this transaction only.
- *
- * Note that the aux helper code assumes that the .transfer() function
- * only modifies the reply field of the drm_dp_aux_msg structure. The
- * retry logic and i2c helpers assume this is the case.
- */
-struct drm_dp_aux {
- const char *name;
- struct i2c_adapter ddc;
- struct device *dev;
- struct drm_crtc *crtc;
- struct mutex hw_mutex;
- struct work_struct crc_work;
- u8 crc_count;
- ssize_t (*transfer)(struct drm_dp_aux *aux,
- struct drm_dp_aux_msg *msg);
- /**
- * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
- */
- unsigned i2c_nack_count;
- /**
- * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
- */
- unsigned i2c_defer_count;
- /**
- * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
- */
- struct drm_dp_aux_cec cec;
- /**
- * @is_remote: Is this AUX CH actually using sideband messaging.
- */
- bool is_remote;
-};
-
-ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
- void *buffer, size_t size);
-ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
- void *buffer, size_t size);
-
-/**
- * drm_dp_dpcd_readb() - read a single byte from the DPCD
- * @aux: DisplayPort AUX channel
- * @offset: address of the register to read
- * @valuep: location where the value of the register will be stored
- *
- * Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
- */
-static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
- unsigned int offset, u8 *valuep)
-{
- return drm_dp_dpcd_read(aux, offset, valuep, 1);
-}
-
-/**
- * drm_dp_dpcd_writeb() - write a single byte to the DPCD
- * @aux: DisplayPort AUX channel
- * @offset: address of the register to write
- * @value: value to write to the register
- *
- * Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
- */
-static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
- unsigned int offset, u8 value)
-{
- return drm_dp_dpcd_write(aux, offset, &value, 1);
-}
-
-int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
- u8 status[DP_LINK_STATUS_SIZE]);
-
-bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
- u8 real_edid_checksum);
-
-int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
-void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4], struct drm_dp_aux *aux);
-
-void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
-void drm_dp_aux_init(struct drm_dp_aux *aux);
-int drm_dp_aux_register(struct drm_dp_aux *aux);
-void drm_dp_aux_unregister(struct drm_dp_aux *aux);
-
-int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
-int drm_dp_stop_crc(struct drm_dp_aux *aux);
-
-struct drm_dp_dpcd_ident {
- u8 oui[3];
- u8 device_id[6];
- u8 hw_rev;
- u8 sw_major_rev;
- u8 sw_minor_rev;
-} __packed;
-
-/**
- * struct drm_dp_desc - DP branch/sink device descriptor
- * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
- * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
- */
-struct drm_dp_desc {
- struct drm_dp_dpcd_ident ident;
- u32 quirks;
-};
-
-int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
- bool is_branch);
-u32 drm_dp_get_edid_quirks(const struct edid *edid);
-
-/**
- * enum drm_dp_quirk - Display Port sink/branch device specific quirks
- *
- * Display Port sink and branch devices in the wild have a variety of bugs, try
- * to collect them here. The quirks are shared, but it's up to the drivers to
- * implement workarounds for them. Note that because some devices have
- * unreliable OUIDs, the EDID of sinks should also be checked for quirks using
- * drm_dp_get_edid_quirks().
- */
-enum drm_dp_quirk {
- /**
- * @DP_DPCD_QUIRK_CONSTANT_N:
- *
- * The device requires main link attributes Mvid and Nvid to be limited
- * to 16 bits. So will give a constant value (0x8000) for compatability.
- */
- DP_DPCD_QUIRK_CONSTANT_N,
- /**
- * @DP_DPCD_QUIRK_NO_PSR:
- *
- * The device does not support PSR even if reports that it supports or
- * driver still need to implement proper handling for such device.
- */
- DP_DPCD_QUIRK_NO_PSR,
- /**
- * @DP_DPCD_QUIRK_NO_SINK_COUNT:
- *
- * The device does not set SINK_COUNT to a non-zero value.
- * The driver should ignore SINK_COUNT during detection.
- */
- DP_DPCD_QUIRK_NO_SINK_COUNT,
- /**
- * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
- *
- * The device supports MST DSC despite not supporting Virtual DPCD.
- * The DSC caps can be read from the physical aux instead.
- */
- DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
- /**
- * @DP_QUIRK_FORCE_DPCD_BACKLIGHT:
- *
- * The device is telling the truth when it says that it uses DPCD
- * backlight controls, even if the system's firmware disagrees. This
- * quirk should be checked against both the ident and panel EDID.
- * When present, the driver should honor the DPCD backlight
- * capabilities advertised.
- */
- DP_QUIRK_FORCE_DPCD_BACKLIGHT,
- /**
- * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS:
- *
- * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite
- * the DP_MAX_LINK_RATE register reporting a lower max multiplier.
- */
- DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS,
-};
-
-/**
- * drm_dp_has_quirk() - does the DP device have a specific quirk
- * @desc: Device descriptor filled by drm_dp_read_desc()
- * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks()
- * @quirk: Quirk to query for
- *
- * Return true if DP device identified by @desc has @quirk.
- */
-static inline bool
-drm_dp_has_quirk(const struct drm_dp_desc *desc, u32 edid_quirks,
- enum drm_dp_quirk quirk)
-{
- return (desc->quirks | edid_quirks) & BIT(quirk);
-}
-
-#ifdef CONFIG_DRM_DP_CEC
-void drm_dp_cec_irq(struct drm_dp_aux *aux);
-void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
- struct drm_connector *connector);
-void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
-void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
-void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
-#else
-static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
-{
-}
-
-static inline void
-drm_dp_cec_register_connector(struct drm_dp_aux *aux,
- struct drm_connector *connector)
-{
-}
-
-static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
-{
-}
-
-static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
- const struct edid *edid)
-{
-}
-
-static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
-{
-}
-
-#endif
-
-/**
- * struct drm_dp_phy_test_params - DP Phy Compliance parameters
- * @link_rate: Requested Link rate from DPCD 0x219
- * @num_lanes: Number of lanes requested by sing through DPCD 0x220
- * @phy_pattern: DP Phy test pattern from DPCD 0x248
- * @hb2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B
- * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259
- * @enhanced_frame_cap: flag for enhanced frame capability.
- */
-struct drm_dp_phy_test_params {
- int link_rate;
- u8 num_lanes;
- u8 phy_pattern;
- u8 hbr2_reset[2];
- u8 custom80[10];
- bool enhanced_frame_cap;
-};
-
-int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
- struct drm_dp_phy_test_params *data);
-int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
- struct drm_dp_phy_test_params *data, u8 dp_rev);
-#endif /* _DRM_DP_HELPER_H_ */
+#endif /* _DRM_DP_H_ */
diff --git a/include/drm/display/drm_dp_aux_bus.h b/include/drm/display/drm_dp_aux_bus.h
new file mode 100644
index 000000000000..8a0a486383c5
--- /dev/null
+++ b/include/drm/display/drm_dp_aux_bus.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * The DP AUX bus is used for devices that are connected over a DisplayPort
+ * AUX bus. The devices on the far side of the bus are referred to as
+ * endpoints in this code.
+ */
+
+#ifndef _DP_AUX_BUS_H_
+#define _DP_AUX_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * struct dp_aux_ep_device - Main dev structure for DP AUX endpoints
+ *
+ * This is used to instantiate devices that are connected via a DP AUX
+ * bus. Usually the device is a panel, but conceivable other devices could
+ * be hooked up there.
+ */
+struct dp_aux_ep_device {
+ /** @dev: The normal dev pointer */
+ struct device dev;
+ /** @aux: Pointer to the aux bus */
+ struct drm_dp_aux *aux;
+};
+
+struct dp_aux_ep_driver {
+ int (*probe)(struct dp_aux_ep_device *aux_ep);
+ void (*remove)(struct dp_aux_ep_device *aux_ep);
+ void (*shutdown)(struct dp_aux_ep_device *aux_ep);
+ struct device_driver driver;
+};
+
+static inline struct dp_aux_ep_device *to_dp_aux_ep_dev(struct device *dev)
+{
+ return container_of(dev, struct dp_aux_ep_device, dev);
+}
+
+static inline struct dp_aux_ep_driver *to_dp_aux_ep_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct dp_aux_ep_driver, driver);
+}
+
+int of_dp_aux_populate_bus(struct drm_dp_aux *aux,
+ int (*done_probing)(struct drm_dp_aux *aux));
+void of_dp_aux_depopulate_bus(struct drm_dp_aux *aux);
+int devm_of_dp_aux_populate_bus(struct drm_dp_aux *aux,
+ int (*done_probing)(struct drm_dp_aux *aux));
+
+/* Deprecated versions of the above functions. To be removed when no callers. */
+static inline int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = of_dp_aux_populate_bus(aux, NULL);
+
+ /* New API returns -ENODEV for no child case; adapt to old assumption */
+ return (ret != -ENODEV) ? ret : 0;
+}
+
+static inline int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = devm_of_dp_aux_populate_bus(aux, NULL);
+
+ /* New API returns -ENODEV for no child case; adapt to old assumption */
+ return (ret != -ENODEV) ? ret : 0;
+}
+
+static inline void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux)
+{
+ of_dp_aux_depopulate_bus(aux);
+}
+
+#define dp_aux_dp_driver_register(aux_ep_drv) \
+ __dp_aux_dp_driver_register(aux_ep_drv, THIS_MODULE)
+int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *aux_ep_drv,
+ struct module *owner);
+void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *aux_ep_drv);
+
+#endif /* _DP_AUX_BUS_H_ */
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/display/drm_dp_dual_mode_helper.h
index 4c42db81fcb4..7ee482265087 100644
--- a/include/drm/drm_dp_dual_mode_helper.h
+++ b/include/drm/display/drm_dp_dual_mode_helper.h
@@ -62,6 +62,7 @@
#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41
#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1
+struct drm_device;
struct i2c_adapter;
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
@@ -103,17 +104,18 @@ enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_LSPCON,
};
-enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
-int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+enum drm_dp_dual_mode_type
+drm_dp_dual_mode_detect(const struct drm_device *dev, struct i2c_adapter *adapter);
+int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter);
-int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool *enabled);
-int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable);
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
-int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode *current_mode);
-int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode reqd_mode);
#endif
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
new file mode 100644
index 000000000000..a62fcd051d4d
--- /dev/null
+++ b/include/drm/display/drm_dp_helper.h
@@ -0,0 +1,826 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+
+#include <drm/display/drm_dp.h>
+#include <drm/drm_connector.h>
+
+struct drm_device;
+struct drm_dp_aux;
+struct drm_panel;
+
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+
+int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy, bool uhbr);
+int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy, bool uhbr);
+
+void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_lttpr_link_train_clock_recovery_delay(void);
+void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+
+int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux);
+bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+const char *drm_dp_phy_name(enum drm_dp_phy dp_phy);
+
+/**
+ * struct drm_dp_vsc_sdp - drm DP VSC SDP
+ *
+ * This structure represents a DP VSC SDP of drm
+ * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and
+ * [Table 2-117: VSC SDP Payload for DB16 through DB18]
+ *
+ * @sdp_type: secondary-data packet type
+ * @revision: revision number
+ * @length: number of valid data bytes
+ * @pixelformat: pixel encoding format
+ * @colorimetry: colorimetry format
+ * @bpc: bit per color
+ * @dynamic_range: dynamic range information
+ * @content_type: CTA-861-G defines content types and expected processing by a sink device
+ */
+struct drm_dp_vsc_sdp {
+ unsigned char sdp_type;
+ unsigned char revision;
+ unsigned char length;
+ enum dp_pixelformat pixelformat;
+ enum dp_colorimetry colorimetry;
+ int bpc;
+ enum dp_dynamic_range dynamic_range;
+ enum dp_content_type content_type;
+};
+
+void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc);
+
+bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
+
+static inline int
+drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
+
+static inline u8
+drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
+static inline bool
+drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
+}
+
+static inline bool
+drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+}
+
+static inline bool
+drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x12 &&
+ dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
+}
+
+static inline bool
+drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 ||
+ dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5;
+}
+
+static inline bool
+drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x14 &&
+ dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED;
+}
+
+static inline u8
+drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 :
+ DP_TRAINING_PATTERN_MASK;
+}
+
+static inline bool
+drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
+}
+
+/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp);
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3]);
+
+static inline bool
+drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_DECOMPRESSION_IS_SUPPORTED;
+}
+
+static inline u16
+drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+ ((dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK) << 8);
+}
+
+static inline u32
+drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ /* Max Slicewidth = Number of Pixels * 320 */
+ return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
+ DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+
+/**
+ * drm_dp_dsc_sink_supports_format() - check if sink supports DSC with given output format
+ * @dsc_dpcd : DSC-capability DPCDs of the sink
+ * @output_format: output_format which is to be checked
+ *
+ * Returns true if the sink supports DSC with the given output_format, false otherwise.
+ */
+static inline bool
+drm_dp_dsc_sink_supports_format(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], u8 output_format)
+{
+ return dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & output_format;
+}
+
+/* Forward Error Correction Support on DP 1.4 */
+static inline bool
+drm_dp_sink_supports_fec(const u8 fec_capable)
+{
+ return fec_capable & DP_FEC_CAPABLE;
+}
+
+static inline bool
+drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B;
+}
+
+static inline bool
+drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_EDP_CONFIGURATION_CAP] &
+ DP_ALTERNATE_SCRAMBLER_RESET_CAP;
+}
+
+/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */
+static inline bool
+drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWN_STREAM_PORT_COUNT] &
+ DP_MSA_TIMING_PAR_IGNORED;
+}
+
+/**
+ * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support
+ * @edp_dpcd: The DPCD to check
+ *
+ * Note that currently this function will return %false for panels which support various DPCD
+ * backlight features but which require the brightness be set through PWM, and don't support setting
+ * the brightness level via the DPCD.
+ *
+ * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false
+ * otherwise
+ */
+static inline bool
+drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE])
+{
+ return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP);
+}
+
+/**
+ * drm_dp_is_uhbr_rate - Determine if a link rate is UHBR
+ * @link_rate: link rate in 10kbits/s units
+ *
+ * Determine if the provided link rate is an UHBR rate.
+ *
+ * Returns: %True if @link_rate is an UHBR rate.
+ */
+static inline bool drm_dp_is_uhbr_rate(int link_rate)
+{
+ return link_rate >= 1000000;
+}
+
+/*
+ * DisplayPort AUX channel
+ */
+
+/**
+ * struct drm_dp_aux_msg - DisplayPort AUX channel transaction
+ * @address: address of the (first) register to access
+ * @request: contains the type of transaction (see DP_AUX_* macros)
+ * @reply: upon completion, contains the reply type of the transaction
+ * @buffer: pointer to a transmission or reception buffer
+ * @size: size of @buffer
+ */
+struct drm_dp_aux_msg {
+ unsigned int address;
+ u8 request;
+ u8 reply;
+ void *buffer;
+ size_t size;
+};
+
+struct cec_adapter;
+struct drm_connector;
+struct drm_edid;
+
+/**
+ * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
+ * @lock: mutex protecting this struct
+ * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
+ * @connector: the connector this CEC adapter is associated with
+ * @unregister_work: unregister the CEC adapter
+ */
+struct drm_dp_aux_cec {
+ struct mutex lock;
+ struct cec_adapter *adap;
+ struct drm_connector *connector;
+ struct delayed_work unregister_work;
+};
+
+/**
+ * struct drm_dp_aux - DisplayPort AUX channel
+ *
+ * An AUX channel can also be used to transport I2C messages to a sink. A
+ * typical application of that is to access an EDID that's present in the sink
+ * device. The @transfer() function can also be used to execute such
+ * transactions. The drm_dp_aux_register() function registers an I2C adapter
+ * that can be passed to drm_probe_ddc(). Upon removal, drivers should call
+ * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long
+ * transfers by default; if a partial response is received, the adapter will
+ * drop down to the size given by the partial response for this transaction
+ * only.
+ */
+struct drm_dp_aux {
+ /**
+ * @name: user-visible name of this AUX channel and the
+ * I2C-over-AUX adapter.
+ *
+ * It's also used to specify the name of the I2C adapter. If set
+ * to %NULL, dev_name() of @dev will be used.
+ */
+ const char *name;
+
+ /**
+ * @ddc: I2C adapter that can be used for I2C-over-AUX
+ * communication
+ */
+ struct i2c_adapter ddc;
+
+ /**
+ * @dev: pointer to struct device that is the parent for this
+ * AUX channel.
+ */
+ struct device *dev;
+
+ /**
+ * @drm_dev: pointer to the &drm_device that owns this AUX channel.
+ * Beware, this may be %NULL before drm_dp_aux_register() has been
+ * called.
+ *
+ * It should be set to the &drm_device that will be using this AUX
+ * channel as early as possible. For many graphics drivers this should
+ * happen before drm_dp_aux_init(), however it's perfectly fine to set
+ * this field later so long as it's assigned before calling
+ * drm_dp_aux_register().
+ */
+ struct drm_device *drm_dev;
+
+ /**
+ * @crtc: backpointer to the crtc that is currently using this
+ * AUX channel
+ */
+ struct drm_crtc *crtc;
+
+ /**
+ * @hw_mutex: internal mutex used for locking transfers.
+ *
+ * Note that if the underlying hardware is shared among multiple
+ * channels, the driver needs to do additional locking to
+ * prevent concurrent access.
+ */
+ struct mutex hw_mutex;
+
+ /**
+ * @crc_work: worker that captures CRCs for each frame
+ */
+ struct work_struct crc_work;
+
+ /**
+ * @crc_count: counter of captured frame CRCs
+ */
+ u8 crc_count;
+
+ /**
+ * @transfer: transfers a message representing a single AUX
+ * transaction.
+ *
+ * This is a hardware-specific implementation of how
+ * transactions are executed that the drivers must provide.
+ *
+ * A pointer to a &drm_dp_aux_msg structure describing the
+ * transaction is passed into this function. Upon success, the
+ * implementation should return the number of payload bytes that
+ * were transferred, or a negative error-code on failure.
+ *
+ * Helpers will propagate these errors, with the exception of
+ * the %-EBUSY error, which causes a transaction to be retried.
+ * On a short, helpers will return %-EPROTO to make it simpler
+ * to check for failure.
+ *
+ * The @transfer() function must only modify the reply field of
+ * the &drm_dp_aux_msg structure. The retry logic and i2c
+ * helpers assume this is the case.
+ *
+ * Also note that this callback can be called no matter the
+ * state @dev is in and also no matter what state the panel is
+ * in. It's expected:
+ *
+ * - If the @dev providing the AUX bus is currently unpowered then
+ * it will power itself up for the transfer.
+ *
+ * - If we're on eDP (using a drm_panel) and the panel is not in a
+ * state where it can respond (it's not powered or it's in a
+ * low power state) then this function may return an error, but
+ * not crash. It's up to the caller of this code to make sure that
+ * the panel is powered on if getting an error back is not OK. If a
+ * drm_panel driver is initiating a DP AUX transfer it may power
+ * itself up however it wants. All other code should ensure that
+ * the pre_enable() bridge chain (which eventually calls the
+ * drm_panel prepare function) has powered the panel.
+ */
+ ssize_t (*transfer)(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg);
+
+ /**
+ * @wait_hpd_asserted: wait for HPD to be asserted
+ *
+ * This is mainly useful for eDP panels drivers to wait for an eDP
+ * panel to finish powering on. This is an optional function.
+ *
+ * This function will efficiently wait for the HPD signal to be
+ * asserted. The `wait_us` parameter that is passed in says that we
+ * know that the HPD signal is expected to be asserted within `wait_us`
+ * microseconds. This function could wait for longer than `wait_us` if
+ * the logic in the DP controller has a long debouncing time. The
+ * important thing is that if this function returns success that the
+ * DP controller is ready to send AUX transactions.
+ *
+ * This function returns 0 if HPD was asserted or -ETIMEDOUT if time
+ * expired and HPD wasn't asserted. This function should not print
+ * timeout errors to the log.
+ *
+ * The semantics of this function are designed to match the
+ * readx_poll_timeout() function. That means a `wait_us` of 0 means
+ * to wait forever. Like readx_poll_timeout(), this function may sleep.
+ *
+ * NOTE: this function specifically reports the state of the HPD pin
+ * that's associated with the DP AUX channel. This is different from
+ * the HPD concept in much of the rest of DRM which is more about
+ * physical presence of a display. For eDP, for instance, a display is
+ * assumed always present even if the HPD pin is deasserted.
+ */
+ int (*wait_hpd_asserted)(struct drm_dp_aux *aux, unsigned long wait_us);
+
+ /**
+ * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
+ */
+ unsigned i2c_nack_count;
+ /**
+ * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
+ */
+ unsigned i2c_defer_count;
+ /**
+ * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
+ */
+ struct drm_dp_aux_cec cec;
+ /**
+ * @is_remote: Is this AUX CH actually using sideband messaging.
+ */
+ bool is_remote;
+
+ /**
+ * @powered_down: If true then the remote endpoint is powered down.
+ */
+ bool powered_down;
+};
+
+int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
+void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered);
+ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size);
+ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size);
+
+/**
+ * drm_dp_dpcd_readb() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure.
+ */
+static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read(aux, offset, valuep, 1);
+}
+
+/**
+ * drm_dp_dpcd_writeb() - write a single byte to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to write
+ * @value: value to write to the register
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure.
+ */
+static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 value)
+{
+ return drm_dp_dpcd_write(aux, offset, &value, 1);
+}
+
+int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
+ u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
+ u8 status[DP_LINK_STATUS_SIZE]);
+
+int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
+ enum drm_dp_phy dp_phy,
+ u8 link_status[DP_LINK_STATUS_SIZE]);
+
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+ u8 real_edid_checksum);
+
+int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]);
+bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], u8 type);
+bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid);
+int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid);
+int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid);
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid);
+bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
+void drm_dp_downstream_debug(struct seq_file *m,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid,
+ struct drm_dp_aux *aux);
+enum drm_mode_subconnector
+drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+void drm_dp_set_subconnector_property(struct drm_connector *connector,
+ enum drm_connector_status status,
+ const u8 *dpcd,
+ const u8 port_cap[4]);
+
+struct drm_dp_desc;
+bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const struct drm_dp_desc *desc);
+int drm_dp_read_sink_count(struct drm_dp_aux *aux);
+
+int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy,
+ u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
+void drm_dp_aux_init(struct drm_dp_aux *aux);
+int drm_dp_aux_register(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister(struct drm_dp_aux *aux);
+
+int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
+int drm_dp_stop_crc(struct drm_dp_aux *aux);
+
+struct drm_dp_dpcd_ident {
+ u8 oui[3];
+ u8 device_id[6];
+ u8 hw_rev;
+ u8 sw_major_rev;
+ u8 sw_minor_rev;
+} __packed;
+
+/**
+ * struct drm_dp_desc - DP branch/sink device descriptor
+ * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
+ * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
+ */
+struct drm_dp_desc {
+ struct drm_dp_dpcd_ident ident;
+ u32 quirks;
+};
+
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+ bool is_branch);
+
+/**
+ * enum drm_dp_quirk - Display Port sink/branch device specific quirks
+ *
+ * Display Port sink and branch devices in the wild have a variety of bugs, try
+ * to collect them here. The quirks are shared, but it's up to the drivers to
+ * implement workarounds for them.
+ */
+enum drm_dp_quirk {
+ /**
+ * @DP_DPCD_QUIRK_CONSTANT_N:
+ *
+ * The device requires main link attributes Mvid and Nvid to be limited
+ * to 16 bits. So will give a constant value (0x8000) for compatability.
+ */
+ DP_DPCD_QUIRK_CONSTANT_N,
+ /**
+ * @DP_DPCD_QUIRK_NO_PSR:
+ *
+ * The device does not support PSR even if reports that it supports or
+ * driver still need to implement proper handling for such device.
+ */
+ DP_DPCD_QUIRK_NO_PSR,
+ /**
+ * @DP_DPCD_QUIRK_NO_SINK_COUNT:
+ *
+ * The device does not set SINK_COUNT to a non-zero value.
+ * The driver should ignore SINK_COUNT during detection. Note that
+ * drm_dp_read_sink_count_cap() automatically checks for this quirk.
+ */
+ DP_DPCD_QUIRK_NO_SINK_COUNT,
+ /**
+ * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
+ *
+ * The device supports MST DSC despite not supporting Virtual DPCD.
+ * The DSC caps can be read from the physical aux instead.
+ */
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
+ /**
+ * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS:
+ *
+ * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite
+ * the DP_MAX_LINK_RATE register reporting a lower max multiplier.
+ */
+ DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS,
+ /**
+ * @DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC:
+ *
+ * The device applies HBLANK expansion for some modes, but this
+ * requires enabling DSC.
+ */
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC,
+};
+
+/**
+ * drm_dp_has_quirk() - does the DP device have a specific quirk
+ * @desc: Device descriptor filled by drm_dp_read_desc()
+ * @quirk: Quirk to query for
+ *
+ * Return true if DP device identified by @desc has @quirk.
+ */
+static inline bool
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+{
+ return desc->quirks & BIT(quirk);
+}
+
+/**
+ * struct drm_edp_backlight_info - Probed eDP backlight info struct
+ * @pwmgen_bit_count: The pwmgen bit count
+ * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any
+ * @max: The maximum backlight level that may be set
+ * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register?
+ * @aux_enable: Does the panel support the AUX enable cap?
+ * @aux_set: Does the panel support setting the brightness through AUX?
+ *
+ * This structure contains various data about an eDP backlight, which can be populated by using
+ * drm_edp_backlight_init().
+ */
+struct drm_edp_backlight_info {
+ u8 pwmgen_bit_count;
+ u8 pwm_freq_pre_divider;
+ u16 max;
+
+ bool lsb_reg_used : 1;
+ bool aux_enable : 1;
+ bool aux_set : 1;
+};
+
+int
+drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
+ u16 *current_level, u8 *current_mode);
+int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
+ u16 level);
+int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
+ u16 level);
+int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl);
+
+#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
+
+int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux);
+
+#else
+
+static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel,
+ struct drm_dp_aux *aux)
+{
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_DRM_DP_CEC
+void drm_dp_cec_irq(struct drm_dp_aux *aux);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector);
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address);
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
+#else
+static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+}
+
+static inline void
+drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
+{
+}
+
+static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_attach(struct drm_dp_aux *aux,
+ u16 source_physical_address)
+{
+}
+
+static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
+ const struct edid *edid)
+{
+}
+
+static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
+/**
+ * struct drm_dp_phy_test_params - DP Phy Compliance parameters
+ * @link_rate: Requested Link rate from DPCD 0x219
+ * @num_lanes: Number of lanes requested by sing through DPCD 0x220
+ * @phy_pattern: DP Phy test pattern from DPCD 0x248
+ * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B
+ * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259
+ * @enhanced_frame_cap: flag for enhanced frame capability.
+ */
+struct drm_dp_phy_test_params {
+ int link_rate;
+ u8 num_lanes;
+ u8 phy_pattern;
+ u8 hbr2_reset[2];
+ u8 custom80[10];
+ bool enhanced_frame_cap;
+};
+
+int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data);
+int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data, u8 dp_rev);
+int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd);
+bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
+ u8 frl_mode);
+int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
+ u8 frl_type);
+int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux);
+
+bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux);
+int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask);
+void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
+ struct drm_connector *connector);
+bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_pps_default(struct drm_dp_aux *aux);
+int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]);
+int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]);
+bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], u8 color_spc);
+int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc);
+
+#define DRM_DP_BW_OVERHEAD_MST BIT(0)
+#define DRM_DP_BW_OVERHEAD_UHBR BIT(1)
+#define DRM_DP_BW_OVERHEAD_SSC_REF_CLK BIT(2)
+#define DRM_DP_BW_OVERHEAD_FEC BIT(3)
+#define DRM_DP_BW_OVERHEAD_DSC BIT(4)
+
+int drm_dp_bw_overhead(int lane_count, int hactive,
+ int dsc_slice_count,
+ int bpp_x16, unsigned long flags);
+int drm_dp_bw_channel_coding_efficiency(bool is_uhbr);
+int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes);
+
+ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, struct dp_sdp *sdp);
+
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index 8b9eb4db3381..9b19d8bd520a 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -23,8 +23,9 @@
#define _DRM_DP_MST_HELPER_H_
#include <linux/types.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_fixed.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stackdepot.h>
@@ -46,22 +47,15 @@ struct drm_dp_mst_topology_ref_history {
};
#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
-struct drm_dp_mst_branch;
-
-/**
- * struct drm_dp_vcpi - Virtual Channel Payload Identifier
- * @vcpi: Virtual channel ID.
- * @pbn: Payload Bandwidth Number for this channel
- * @aligned_pbn: PBN aligned with slot size
- * @num_slots: number of slots for this PBN
- */
-struct drm_dp_vcpi {
- int vcpi;
- int pbn;
- int aligned_pbn;
- int num_slots;
+enum drm_dp_mst_payload_allocation {
+ DRM_DP_MST_PAYLOAD_ALLOCATION_NONE,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_DFP,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE,
};
+struct drm_dp_mst_branch;
+
/**
* struct drm_dp_mst_port - MST port
* @port_num: port number
@@ -86,6 +80,8 @@ struct drm_dp_vcpi {
* @next: link to next port on this branch device
* @aux: i2c aux transport to talk to device connected to this port, protected
* by &drm_dp_mst_topology_mgr.base.lock.
+ * @passthrough_aux: parent aux to which DSC pass-through requests should be
+ * sent, only set if DSC pass-through is possible.
* @parent: branch device parent of this port
* @vcpi: Virtual Channel Payload info for this port.
* @connector: DRM connector this port is connected to. Protected by
@@ -140,9 +136,9 @@ struct drm_dp_mst_port {
*/
struct drm_dp_mst_branch *mstb;
struct drm_dp_aux aux; /* i2c bus for this port? */
+ struct drm_dp_aux *passthrough_aux;
struct drm_dp_mst_branch *parent;
- struct drm_dp_vcpi vcpi;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
@@ -150,12 +146,7 @@ struct drm_dp_mst_port {
* @cached_edid: for DP logical ports - make tiling work by ensuring
* that the EDID for all connectors is read immediately.
*/
- struct edid *cached_edid;
- /**
- * @has_audio: Tracks whether the sink connector to this port is
- * audio-capable.
- */
- bool has_audio;
+ const struct drm_edid *cached_edid;
/**
* @fec_capable: bool indicating if FEC can be supported up to that
@@ -313,6 +304,34 @@ struct drm_dp_remote_i2c_write_ack_reply {
u8 port_number;
};
+struct drm_dp_query_stream_enc_status_ack_reply {
+ /* Bit[23:16]- Stream Id */
+ u8 stream_id;
+
+ /* Bit[15]- Signed */
+ bool reply_signed;
+
+ /* Bit[10:8]- Stream Output Sink Type */
+ bool unauthorizable_device_present;
+ bool legacy_device_present;
+ bool query_capable_device_present;
+
+ /* Bit[12:11]- Stream Output CP Type */
+ bool hdcp_1x_device_present;
+ bool hdcp_2x_device_present;
+
+ /* Bit[4]- Stream Authentication */
+ bool auth_completed;
+
+ /* Bit[3]- Stream Encryption */
+ bool encryption_enabled;
+
+ /* Bit[2]- Stream Repeater Function Present */
+ bool repeater_present;
+
+ /* Bit[1:0]- Stream State */
+ u8 state;
+};
#define DRM_DP_MAX_SDP_STREAMS 16
struct drm_dp_allocate_payload {
@@ -374,6 +393,15 @@ struct drm_dp_remote_i2c_write {
u8 *bytes;
};
+struct drm_dp_query_stream_enc_status {
+ u8 stream_id;
+ u8 client_id[7]; /* 56-bit nonce */
+ u8 stream_event;
+ bool valid_stream_event;
+ u8 stream_behavior;
+ u8 valid_stream_behavior;
+};
+
/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
struct drm_dp_port_number_req {
u8 port_number;
@@ -422,6 +450,8 @@ struct drm_dp_sideband_msg_req_body {
struct drm_dp_remote_i2c_read i2c_read;
struct drm_dp_remote_i2c_write i2c_write;
+
+ struct drm_dp_query_stream_enc_status enc_status;
} u;
};
@@ -444,6 +474,8 @@ struct drm_dp_sideband_msg_reply_body {
struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
+
+ struct drm_dp_query_stream_enc_status_ack_reply enc_status;
} u;
};
@@ -486,33 +518,107 @@ struct drm_dp_mst_topology_cbs {
void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
};
-#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
-
-#define DP_PAYLOAD_LOCAL 1
-#define DP_PAYLOAD_REMOTE 2
-#define DP_PAYLOAD_DELETE_LOCAL 3
-
-struct drm_dp_payload {
- int payload_state;
- int start_slot;
- int num_slots;
- int vcpi;
-};
-
#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
-struct drm_dp_vcpi_allocation {
+/**
+ * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
+ *
+ * The primary atomic state structure for a given MST payload. Stores information like current
+ * bandwidth allocation, intended action for this payload, etc.
+ */
+struct drm_dp_mst_atomic_payload {
+ /** @port: The MST port assigned to this payload */
struct drm_dp_mst_port *port;
- int vcpi;
+
+ /**
+ * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
+ * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
+ * check time. This shouldn't usually matter, as the start slot should never be relevant for
+ * atomic state computations.
+ *
+ * Since this value is determined at commit time instead of check time, this value is
+ * protected by the MST helpers ensuring that async commits operating on the given topology
+ * never run in parallel. In the event that a driver does need to read this value (e.g. to
+ * inform hardware of the starting timeslot for a payload), the driver may either:
+ *
+ * * Read this field during the atomic commit after
+ * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
+ * previous MST states payload start slots have been copied over to the new state. Note
+ * that a new start slot won't be assigned/removed from this payload until
+ * drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called.
+ * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
+ * get committed to hardware by calling drm_crtc_commit_wait() on each of the
+ * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
+ *
+ * If neither of the two above solutions suffice (e.g. the driver needs to read the start
+ * slot in the middle of an atomic commit without waiting for some reason), then drivers
+ * should cache this value themselves after changing payloads.
+ */
+ s8 vc_start_slot;
+
+ /** @vcpi: The Virtual Channel Payload Identifier */
+ u8 vcpi;
+ /**
+ * @time_slots:
+ * The number of timeslots allocated to this payload from the source DP Tx to
+ * the immediate downstream DP Rx
+ */
+ int time_slots;
+ /** @pbn: The payload bandwidth for this payload */
int pbn;
- bool dsc_enabled;
+
+ /** @delete: Whether or not we intend to delete this payload during this atomic commit */
+ bool delete : 1;
+ /** @dsc_enabled: Whether or not this payload has DSC enabled */
+ bool dsc_enabled : 1;
+
+ /** @payload_allocation_status: The allocation status of this payload */
+ enum drm_dp_mst_payload_allocation payload_allocation_status;
+
+ /** @next: The list node for this payload */
struct list_head next;
};
+/**
+ * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
+ *
+ * This struct represents the atomic state of the toplevel DisplayPort MST manager
+ */
struct drm_dp_mst_topology_state {
+ /** @base: Base private state for atomic */
struct drm_private_state base;
- struct list_head vcpis;
+
+ /** @mgr: The topology manager */
struct drm_dp_mst_topology_mgr *mgr;
+
+ /**
+ * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
+ * modify this to add additional dependencies if needed.
+ */
+ u32 pending_crtc_mask;
+ /**
+ * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
+ * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
+ */
+ struct drm_crtc_commit **commit_deps;
+ /** @num_commit_deps: The number of CRTC commits in @commit_deps */
+ size_t num_commit_deps;
+
+ /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
+ u32 payload_mask;
+ /** @payloads: The list of payloads being created/destroyed in this state */
+ struct list_head payloads;
+
+ /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
+ u8 total_avail_slots;
+ /** @start_slot: The first usable time slot in this topology (1 or 0) */
+ u8 start_slot;
+
+ /**
+ * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
+ * out itself.
+ */
+ fixed20_12 pbn_div;
};
#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
@@ -595,6 +701,20 @@ struct drm_dp_mst_topology_mgr {
bool payload_id_table_cleared : 1;
/**
+ * @payload_count: The number of currently active payloads in hardware. This value is only
+ * intended to be used internally by MST helpers for payload tracking, and is only safe to
+ * read/write from the atomic commit (not check) context.
+ */
+ u8 payload_count;
+
+ /**
+ * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
+ * internally by MST helpers for payload tracking, and is only safe to read/write from the
+ * atomic commit (not check) context.
+ */
+ u8 next_start_slot;
+
+ /**
* @mst_primary: Pointer to the primary/first branch device.
*/
struct drm_dp_mst_branch *mst_primary;
@@ -607,10 +727,6 @@ struct drm_dp_mst_topology_mgr {
* @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
*/
u8 sink_count;
- /**
- * @pbn_div: PBN to slots divisor.
- */
- int pbn_div;
/**
* @funcs: Atomic helper callbacks
@@ -628,32 +744,6 @@ struct drm_dp_mst_topology_mgr {
struct list_head tx_msg_downq;
/**
- * @payload_lock: Protect payload information.
- */
- struct mutex payload_lock;
- /**
- * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
- * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
- * this array is determined by @max_payloads.
- */
- struct drm_dp_vcpi **proposed_vcpis;
- /**
- * @payloads: Array of payloads. The size of this array is determined
- * by @max_payloads.
- */
- struct drm_dp_payload *payloads;
- /**
- * @payload_mask: Elements of @payloads actually in use. Since
- * reallocation of active outputs isn't possible gaps can be created by
- * disabling outputs out of order compared to how they've been enabled.
- */
- unsigned long payload_mask;
- /**
- * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
- */
- unsigned long vcpi_mask;
-
- /**
* @tx_waitq: Wait to queue stall for the tx worker.
*/
wait_queue_head_t tx_waitq;
@@ -728,12 +818,14 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
-
+bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
-
-int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-
+int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
+ const u8 *esi,
+ u8 *ack,
+ bool *handled);
+void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
int
drm_dp_mst_detect_port(struct drm_connector *connector,
@@ -741,32 +833,33 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
-struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-
-int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
-
-bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn, int slots);
-
-int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-
-void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port);
-
-
-int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
- int pbn);
+const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
+fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int link_lane_count);
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_calc_pbn_mode(int clock, int bpp);
+void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
-int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
+int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_atomic_state *state,
+ struct drm_dp_mst_atomic_payload *payload);
+void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
+void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ const struct drm_dp_mst_atomic_payload *old_payload,
+ struct drm_dp_mst_atomic_payload *new_payload);
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
@@ -788,33 +881,61 @@ int drm_dp_mst_connector_late_register(struct drm_connector *connector,
void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
struct drm_dp_mst_port *port);
-struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
+drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
+drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
+drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_atomic_payload *
+drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port *port);
+bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_mst_port *parent);
int __must_check
-drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn,
- int pbn_div);
+ struct drm_dp_mst_port *port, int pbn);
int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
struct drm_dp_mst_port *port,
- int pbn, int pbn_div,
- bool enable);
+ int pbn, bool enable);
int __must_check
drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
int __must_check
-drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
+void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
+int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up);
+int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_query_stream_enc_status_ack_reply *status);
+int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_port **failing_port);
int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
+int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
+ struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
+static inline struct drm_dp_mst_topology_state *
+to_drm_dp_mst_topology_state(struct drm_private_state *state)
+{
+ return container_of(state, struct drm_dp_mst_topology_state, base);
+}
+
extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
/**
diff --git a/include/drm/display/drm_dp_tunnel.h b/include/drm/display/drm_dp_tunnel.h
new file mode 100644
index 000000000000..87212c847915
--- /dev/null
+++ b/include/drm/display/drm_dp_tunnel.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __DRM_DP_TUNNEL_H__
+#define __DRM_DP_TUNNEL_H__
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct drm_dp_aux;
+
+struct drm_device;
+
+struct drm_atomic_state;
+struct drm_dp_tunnel_mgr;
+struct drm_dp_tunnel_state;
+
+struct ref_tracker;
+
+struct drm_dp_tunnel_ref {
+ struct drm_dp_tunnel *tunnel;
+ struct ref_tracker *tracker;
+};
+
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL
+
+struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
+
+void
+drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
+
+static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
+ struct drm_dp_tunnel_ref *tunnel_ref)
+{
+ tunnel_ref->tunnel = drm_dp_tunnel_get(tunnel, &tunnel_ref->tracker);
+}
+
+static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref)
+{
+ drm_dp_tunnel_put(tunnel_ref->tunnel, &tunnel_ref->tracker);
+ tunnel_ref->tunnel = NULL;
+}
+
+struct drm_dp_tunnel *
+drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux);
+int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel);
+
+int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel);
+bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw);
+int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel);
+
+void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel);
+
+int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux);
+
+int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel);
+
+const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel);
+
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel);
+
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel);
+
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel);
+
+int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel,
+ u8 stream_id, int bw);
+int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel,
+ u32 *stream_mask);
+
+int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
+ u32 *failed_stream_mask);
+
+int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state);
+
+struct drm_dp_tunnel_mgr *
+drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count);
+void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr);
+
+#else
+
+static inline struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker)
+{
+ return NULL;
+}
+
+static inline void
+drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) {}
+
+static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
+ struct drm_dp_tunnel_ref *tunnel_ref) {}
+
+static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref) {}
+
+static inline struct drm_dp_tunnel *
+drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int
+drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
+{
+ return 0;
+}
+
+static inline int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
+{
+ return false;
+}
+
+static inline int
+drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
+{
+ return -1;
+}
+
+static inline int
+drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) {}
+
+static inline int
+drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
+{
+ return 0;
+}
+
+static inline int
+drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
+{
+ return 0;
+}
+
+static inline int
+drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return -1;
+}
+
+static inline const char *
+drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
+{
+ return NULL;
+}
+
+static inline struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int
+drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel,
+ u8 stream_id, int bw)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel,
+ u32 *stream_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
+ u32 *failed_stream_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
+{
+ return 0;
+}
+
+static inline struct drm_dp_tunnel_mgr *
+drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline
+void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr) {}
+
+#endif /* CONFIG_DRM_DISPLAY_DP_TUNNEL */
+
+#endif /* __DRM_DP_TUNNEL_H__ */
diff --git a/include/drm/drm_dsc.h b/include/drm/display/drm_dsc.h
index 887954cbfc60..bc90273d06a6 100644
--- a/include/drm/drm_dsc.h
+++ b/include/drm/display/drm_dsc.h
@@ -8,7 +8,7 @@
#ifndef DRM_DSC_H_
#define DRM_DSC_H_
-#include <drm/drm_dp_helper.h>
+#include <drm/display/drm_dp.h>
/* VESA Display Stream Compression DSC 1.2 constants */
#define DSC_NUM_BUF_RANGES 15
@@ -273,7 +273,8 @@ struct drm_dsc_config {
};
/**
- * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set
+ * struct drm_dsc_picture_parameter_set - Represents 128 bytes of
+ * Picture Parameter Set
*
* The VESA DSC standard defines picture parameter set (PPS) which display
* stream compression encoders must communicate to decoders.
@@ -588,7 +589,7 @@ struct drm_dsc_picture_parameter_set {
* This structure represents the DSC PPS infoframe required to send the Picture
* Parameter Set metadata required before enabling VESA Display Stream
* Compression. This is based on the DP Secondary Data Packet structure and
- * comprises of SDP Header as defined &struct struct dp_sdp_header in drm_dp_helper.h
+ * comprises of SDP Header as defined &struct dp_sdp_header in drm_dp_helper.h
* and PPS payload defined in &struct drm_dsc_picture_parameter_set.
*
* @pps_header: Header for PPS as per DP SDP header format of type
@@ -601,9 +602,4 @@ struct drm_dsc_pps_infoframe {
struct drm_dsc_picture_parameter_set pps_payload;
} __packed;
-void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
-void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
- const struct drm_dsc_config *dsc_cfg);
-int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
-
#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h
new file mode 100644
index 000000000000..913aa2071232
--- /dev/null
+++ b/include/drm/display/drm_dsc_helper.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_HELPER_H_
+#define DRM_DSC_HELPER_H_
+
+#include <drm/display/drm_dsc.h>
+
+enum drm_dsc_params_type {
+ DRM_DSC_1_2_444,
+ DRM_DSC_1_1_PRE_SCR, /* legacy params from DSC 1.1 */
+ DRM_DSC_1_2_422,
+ DRM_DSC_1_2_420,
+};
+
+void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
+int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size);
+void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg);
+void drm_dsc_set_const_params(struct drm_dsc_config *vdsc_cfg);
+void drm_dsc_set_rc_buf_thresh(struct drm_dsc_config *vdsc_cfg);
+int drm_dsc_setup_rc_params(struct drm_dsc_config *vdsc_cfg, enum drm_dsc_params_type type);
+int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
+u8 drm_dsc_initial_scale_value(const struct drm_dsc_config *dsc);
+u32 drm_dsc_flatness_det_thresh(const struct drm_dsc_config *dsc);
+u32 drm_dsc_get_bpp_int(const struct drm_dsc_config *vdsc_cfg);
+
+#endif /* _DRM_DSC_HELPER_H_ */
+
diff --git a/include/drm/drm_hdcp.h b/include/drm/display/drm_hdcp.h
index fe58dbb46962..96a99b1377c0 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/display/drm_hdcp.h
@@ -6,8 +6,8 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#ifndef _DRM_HDCP_H_INCLUDED_
-#define _DRM_HDCP_H_INCLUDED_
+#ifndef _DRM_HDCP_H_
+#define _DRM_HDCP_H_
#include <linux/types.h>
@@ -101,11 +101,11 @@
/* Following Macros take a byte at a time for bit(s) masking */
/*
- * TODO: This has to be changed for DP MST, as multiple stream on
- * same port is possible.
- * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ * TODO: HDCP_2_2_MAX_CONTENT_STREAMS_CNT is based upon actual
+ * H/W MST streams capacity.
+ * This required to be moved out to platform specific header.
*/
-#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 4
#define HDCP_2_2_TXCAP_MASK_LEN 2
#define HDCP_2_2_RXCAPS_LEN 3
#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0))
@@ -224,11 +224,14 @@ struct hdcp2_rep_stream_ready {
/* HDCP2.2 TIMEOUTs in mSec */
#define HDCP_2_2_CERT_TIMEOUT_MS 100
+#define HDCP_2_2_DP_CERT_READ_TIMEOUT_MS 110
#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000
#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200
+#define HDCP_2_2_DP_HPRIME_READ_TIMEOUT_MS 7
#define HDCP_2_2_PAIRING_TIMEOUT_MS 200
+#define HDCP_2_2_DP_PAIRING_READ_TIMEOUT_MS 5
#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20
-#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7
+#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 16
#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000
#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100
@@ -288,16 +291,6 @@ struct hdcp_srm_header {
u8 srm_gen_no;
} __packed;
-struct drm_device;
-struct drm_connector;
-
-int drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
- u8 *ksvs, u32 ksv_count);
-int drm_connector_attach_content_protection_property(
- struct drm_connector *connector, bool hdcp_content_type);
-void drm_hdcp_update_content_protection(struct drm_connector *connector,
- u64 val);
-
/* Content Type classification for HDCP2.2 vs others */
#define DRM_MODE_HDCP_CONTENT_TYPE0 0
#define DRM_MODE_HDCP_CONTENT_TYPE1 1
diff --git a/include/drm/display/drm_hdcp_helper.h b/include/drm/display/drm_hdcp_helper.h
new file mode 100644
index 000000000000..8aaf87bf2735
--- /dev/null
+++ b/include/drm/display/drm_hdcp_helper.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#ifndef _DRM_HDCP_HELPER_H_INCLUDED_
+#define _DRM_HDCP_HELPER_H_INCLUDED_
+
+#include <drm/display/drm_hdcp.h>
+
+struct drm_device;
+struct drm_connector;
+
+int drm_hdcp_check_ksvs_revoked(struct drm_device *dev, u8 *ksvs, u32 ksv_count);
+int drm_connector_attach_content_protection_property(struct drm_connector *connector,
+ bool hdcp_content_type);
+void drm_hdcp_update_content_protection(struct drm_connector *connector, u64 val);
+
+#endif
diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h
new file mode 100644
index 000000000000..76d234826e22
--- /dev/null
+++ b/include/drm/display/drm_hdmi_helper.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_HDMI_HELPER
+#define DRM_HDMI_HELPER
+
+#include <linux/hdmi.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_display_mode;
+
+void
+drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+int
+drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+#endif
diff --git a/include/drm/drm_scdc_helper.h b/include/drm/display/drm_scdc.h
index 6a483533aae4..3d58f37e8ed8 100644
--- a/include/drm/drm_scdc_helper.h
+++ b/include/drm/display/drm_scdc.h
@@ -21,11 +21,8 @@
* DEALINGS IN THE SOFTWARE.
*/
-#ifndef DRM_SCDC_HELPER_H
-#define DRM_SCDC_HELPER_H
-
-#include <linux/i2c.h>
-#include <linux/types.h>
+#ifndef DRM_SCDC_H
+#define DRM_SCDC_H
#define SCDC_SINK_VERSION 0x01
@@ -88,49 +85,4 @@
#define SCDC_MANUFACTURER_SPECIFIC 0xde
#define SCDC_MANUFACTURER_SPECIFIC_SIZE 34
-ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer,
- size_t size);
-ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
- const void *buffer, size_t size);
-
-/**
- * drm_scdc_readb - read a single byte from SCDC
- * @adapter: I2C adapter
- * @offset: offset of register to read
- * @value: return location for the register value
- *
- * Reads a single byte from SCDC. This is a convenience wrapper around the
- * drm_scdc_read() function.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset,
- u8 *value)
-{
- return drm_scdc_read(adapter, offset, value, sizeof(*value));
-}
-
-/**
- * drm_scdc_writeb - write a single byte to SCDC
- * @adapter: I2C adapter
- * @offset: offset of register to read
- * @value: return location for the register value
- *
- * Writes a single byte to SCDC. This is a convenience wrapper around the
- * drm_scdc_write() function.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset,
- u8 value)
-{
- return drm_scdc_write(adapter, offset, &value, sizeof(value));
-}
-
-bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter);
-
-bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable);
-bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set);
#endif
diff --git a/include/drm/display/drm_scdc_helper.h b/include/drm/display/drm_scdc_helper.h
new file mode 100644
index 000000000000..34600476a1b9
--- /dev/null
+++ b/include/drm/display/drm_scdc_helper.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015 NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_SCDC_HELPER_H
+#define DRM_SCDC_HELPER_H
+
+#include <linux/types.h>
+
+#include <drm/display/drm_scdc.h>
+
+struct drm_connector;
+struct i2c_adapter;
+
+ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer,
+ size_t size);
+ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
+ const void *buffer, size_t size);
+
+/**
+ * drm_scdc_readb - read a single byte from SCDC
+ * @adapter: I2C adapter
+ * @offset: offset of register to read
+ * @value: return location for the register value
+ *
+ * Reads a single byte from SCDC. This is a convenience wrapper around the
+ * drm_scdc_read() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset,
+ u8 *value)
+{
+ return drm_scdc_read(adapter, offset, value, sizeof(*value));
+}
+
+/**
+ * drm_scdc_writeb - write a single byte to SCDC
+ * @adapter: I2C adapter
+ * @offset: offset of register to read
+ * @value: return location for the register value
+ *
+ * Writes a single byte to SCDC. This is a convenience wrapper around the
+ * drm_scdc_write() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset,
+ u8 value)
+{
+ return drm_scdc_write(adapter, offset, &value, sizeof(value));
+}
+
+bool drm_scdc_get_scrambling_status(struct drm_connector *connector);
+
+bool drm_scdc_set_scrambling(struct drm_connector *connector, bool enable);
+bool drm_scdc_set_high_tmds_clock_ratio(struct drm_connector *connector, bool set);
+
+#endif
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
new file mode 100644
index 000000000000..f4d3784b1dce
--- /dev/null
+++ b/include/drm/drm_accel.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef DRM_ACCEL_H_
+#define DRM_ACCEL_H_
+
+#include <drm/drm_file.h>
+
+#define ACCEL_MAJOR 261
+#define ACCEL_MAX_MINORS 256
+
+/**
+ * DRM_ACCEL_FOPS - Default drm accelerators file operations
+ *
+ * This macro provides a shorthand for setting the accelerator file ops in the
+ * &file_operations structure. If all you need are the default ops, use
+ * DEFINE_DRM_ACCEL_FOPS instead.
+ */
+#define DRM_ACCEL_FOPS \
+ .open = accel_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek, \
+ .mmap = drm_gem_mmap
+
+/**
+ * DEFINE_DRM_ACCEL_FOPS() - macro to generate file operations for accelerators drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for accelerators based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_ACCEL_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ DRM_ACCEL_FOPS,\
+ }
+
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+
+void accel_core_exit(void);
+int accel_core_init(void);
+void accel_minor_remove(int index);
+int accel_minor_alloc(void);
+void accel_minor_replace(struct drm_minor *minor, int index);
+void accel_set_device_instance_params(struct device *kdev, int index);
+int accel_open(struct inode *inode, struct file *filp);
+void accel_debugfs_init(struct drm_device *dev);
+void accel_debugfs_register(struct drm_device *dev);
+
+#else
+
+static inline void accel_core_exit(void)
+{
+}
+
+static inline int __init accel_core_init(void)
+{
+ /* Return 0 to allow drm_core_init to complete successfully */
+ return 0;
+}
+
+static inline void accel_minor_remove(int index)
+{
+}
+
+static inline int accel_minor_alloc(void)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void accel_minor_replace(struct drm_minor *minor, int index)
+{
+}
+
+static inline void accel_set_device_instance_params(struct device *kdev, int index)
+{
+}
+
+static inline void accel_debugfs_init(struct drm_device *dev)
+{
+}
+
+static inline void accel_debugfs_register(struct drm_device *dev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_DRM_ACCEL) */
+
+#endif /* DRM_ACCEL_H_ */
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
deleted file mode 100644
index 664e120b93e6..000000000000
--- a/include/drm/drm_agpsupport.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DRM_AGPSUPPORT_H_
-#define _DRM_AGPSUPPORT_H_
-
-#include <linux/agp_backend.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <uapi/drm/drm.h>
-
-struct drm_device;
-struct drm_file;
-
-struct drm_agp_head {
- struct agp_kern_info agp_info;
- struct list_head memory;
- unsigned long mode;
- struct agp_bridge_data *bridge;
- int enabled;
- int acquired;
- unsigned long base;
- int agp_mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
-};
-
-#if IS_ENABLED(CONFIG_AGP)
-
-void drm_free_agp(struct agp_memory * handle, int pages);
-int drm_bind_agp(struct agp_memory * handle, unsigned int start);
-int drm_unbind_agp(struct agp_memory * handle);
-
-struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-void drm_legacy_agp_clear(struct drm_device *dev);
-int drm_agp_acquire(struct drm_device *dev);
-int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_release(struct drm_device *dev);
-int drm_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-int drm_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-#else /* CONFIG_AGP */
-
-static inline void drm_free_agp(struct agp_memory * handle, int pages)
-{
-}
-
-static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start)
-{
- return -ENODEV;
-}
-
-static inline int drm_unbind_agp(struct agp_memory * handle)
-{
- return -ENODEV;
-}
-
-static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
-{
- return NULL;
-}
-
-static inline void drm_legacy_agp_clear(struct drm_device *dev)
-{
-}
-
-static inline int drm_agp_acquire(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_release(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_enable(struct drm_device *dev,
- struct drm_agp_mode mode)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_info(struct drm_device *dev,
- struct drm_agp_info *info)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_alloc(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_free(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_unbind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_bind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-#endif /* CONFIG_AGP */
-
-#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h
new file mode 100644
index 000000000000..cbe33b49fd5d
--- /dev/null
+++ b/include/drm/drm_aperture.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _DRM_APERTURE_H_
+#define _DRM_APERTURE_H_
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_driver;
+struct pci_dev;
+
+int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
+ resource_size_t size);
+
+int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
+ const struct drm_driver *req_driver);
+
+int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
+ const struct drm_driver *req_driver);
+
+/**
+ * drm_aperture_remove_framebuffers - remove all existing framebuffers
+ * @req_driver: requesting DRM driver
+ *
+ * This function removes all graphics device drivers. Use this function on systems
+ * that can have their framebuffer located anywhere in memory.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+static inline int
+drm_aperture_remove_framebuffers(const struct drm_driver *req_driver)
+{
+ return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1,
+ req_driver);
+}
+
+#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d07c851d255b..4d7f4c5f2001 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -66,6 +66,8 @@
*
* For an implementation of how to use this look at
* drm_atomic_helper_setup_commit() from the atomic helper library.
+ *
+ * See also drm_crtc_commit_wait().
*/
struct drm_crtc_commit {
/**
@@ -225,6 +227,18 @@ struct drm_private_state_funcs {
*/
void (*atomic_destroy_state)(struct drm_private_obj *obj,
struct drm_private_state *state);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses &struct drm_private_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_private_obj_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_private_state *state);
};
/**
@@ -248,6 +262,26 @@ struct drm_private_state_funcs {
* drm_dev_register()
* 2/ all calls to drm_atomic_private_obj_fini() must be done after calling
* drm_dev_unregister()
+ *
+ * If that private object is used to store a state shared by multiple
+ * CRTCs, proper care must be taken to ensure that non-blocking commits are
+ * properly ordered to avoid a use-after-free issue.
+ *
+ * Indeed, assuming a sequence of two non-blocking &drm_atomic_commit on two
+ * different &drm_crtc using different &drm_plane and &drm_connector, so with no
+ * resources shared, there's no guarantee on which commit is going to happen
+ * first. However, the second &drm_atomic_commit will consider the first
+ * &drm_private_obj its old state, and will be in charge of freeing it whenever
+ * the second &drm_atomic_commit is done.
+ *
+ * If the first &drm_atomic_commit happens after it, it will consider its
+ * &drm_private_obj the new state and will be likely to access it, resulting in
+ * an access to a freed memory region. Drivers should store (and get a reference
+ * to) the &drm_crtc_commit structure in our private state in
+ * &drm_mode_config_helper_funcs.atomic_commit_setup, and then wait for that
+ * commit to complete as the first step of
+ * &drm_mode_config_helper_funcs.atomic_commit_tail, similar to
+ * drm_atomic_helper_wait_for_dependencies().
*/
struct drm_private_obj {
/**
@@ -289,14 +323,21 @@ struct drm_private_obj {
/**
* struct drm_private_state - base struct for driver private object state
- * @state: backpointer to global drm_atomic_state
*
- * Currently only contains a backpointer to the overall atomic update, but in
- * the future also might hold synchronization information similar to e.g.
- * &drm_crtc.commit.
+ * Currently only contains a backpointer to the overall atomic update,
+ * and the relevant private object but in the future also might hold
+ * synchronization information similar to e.g. &drm_crtc.commit.
*/
struct drm_private_state {
+ /**
+ * @state: backpointer to global drm_atomic_state
+ */
struct drm_atomic_state *state;
+
+ /**
+ * @obj: backpointer to the private object
+ */
+ struct drm_private_obj *obj;
};
struct __drm_private_objs_state {
@@ -305,26 +346,29 @@ struct __drm_private_objs_state {
};
/**
- * struct drm_atomic_state - the global state object for atomic updates
- * @ref: count of all references to this state (will not be freed until zero)
- * @dev: parent DRM device
- * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
- * @async_update: hint for asynchronous plane update
- * @planes: pointer to array of structures with per-plane data
- * @crtcs: pointer to array of CRTC pointers
- * @num_connector: size of the @connectors and @connector_states arrays
- * @connectors: pointer to array of structures with per-connector data
- * @num_private_objs: size of the @private_objs array
- * @private_objs: pointer to array of private object pointers
- * @acquire_ctx: acquire context for this atomic modeset state update
+ * struct drm_atomic_state - Atomic commit structure
+ *
+ * This structure is the kernel counterpart of @drm_mode_atomic and represents
+ * an atomic commit that transitions from an old to a new display state. It
+ * contains all the objects affected by the atomic commit and both the new
+ * state structures and pointers to the old state structures for
+ * these.
*
* States are added to an atomic update by calling drm_atomic_get_crtc_state(),
* drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for
* private state structures, drm_atomic_get_private_obj_state().
*/
struct drm_atomic_state {
+ /**
+ * @ref:
+ *
+ * Count of all references to this update (will not be freed until zero).
+ */
struct kref ref;
+ /**
+ * @dev: Parent DRM Device.
+ */
struct drm_device *dev;
/**
@@ -336,8 +380,24 @@ struct drm_atomic_state {
* drm_atomic_crtc_needs_modeset().
*/
bool allow_modeset : 1;
+ /**
+ * @legacy_cursor_update:
+ *
+ * Hint to enforce legacy cursor IOCTL semantics.
+ *
+ * WARNING: This is thoroughly broken and pretty much impossible to
+ * implement correctly. Drivers must ignore this and should instead
+ * implement &drm_plane_helper_funcs.atomic_async_check and
+ * &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this
+ * flag are not allowed.
+ */
bool legacy_cursor_update : 1;
+
+ /**
+ * @async_update: hint for asynchronous plane update
+ */
bool async_update : 1;
+
/**
* @duplicated:
*
@@ -347,13 +407,52 @@ struct drm_atomic_state {
* states.
*/
bool duplicated : 1;
+
+ /**
+ * @planes:
+ *
+ * Pointer to array of @drm_plane and @drm_plane_state part of this
+ * update.
+ */
struct __drm_planes_state *planes;
+
+ /**
+ * @crtcs:
+ *
+ * Pointer to array of @drm_crtc and @drm_crtc_state part of this
+ * update.
+ */
struct __drm_crtcs_state *crtcs;
+
+ /**
+ * @num_connector: size of the @connectors array
+ */
int num_connector;
+
+ /**
+ * @connectors:
+ *
+ * Pointer to array of @drm_connector and @drm_connector_state part of
+ * this update.
+ */
struct __drm_connnectors_state *connectors;
+
+ /**
+ * @num_private_objs: size of the @private_objs array
+ */
int num_private_objs;
+
+ /**
+ * @private_objs:
+ *
+ * Pointer to array of @drm_private_obj and @drm_private_obj_state part
+ * of this update.
+ */
struct __drm_private_objs_state *private_objs;
+ /**
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
struct drm_modeset_acquire_ctx *acquire_ctx;
/**
@@ -406,6 +505,8 @@ static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit)
kref_put(&commit->ref, __drm_crtc_commit_free);
}
+int drm_crtc_commit_wait(struct drm_crtc_commit *commit);
+
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
@@ -462,17 +563,24 @@ struct drm_private_state * __must_check
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_private_state *
-drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_private_state *
-drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_connector *
-drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
struct drm_connector *
-drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
+ struct drm_encoder *encoder);
+
+struct drm_crtc *
+drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder);
+struct drm_crtc *
+drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder);
/**
@@ -487,7 +595,7 @@ drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
* @drm_atomic_get_new_crtc_state should be used instead.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_existing_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].state;
@@ -502,7 +610,7 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
* NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_old_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].old_state;
@@ -516,7 +624,7 @@ drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
* NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_new_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].new_state;
@@ -534,7 +642,7 @@ drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
* @drm_atomic_get_new_plane_state should be used instead.
*/
static inline struct drm_plane_state *
-drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_existing_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].state;
@@ -549,7 +657,7 @@ drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
* NULL if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
-drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_old_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].old_state;
@@ -564,7 +672,7 @@ drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
* NULL if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
-drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_new_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].new_state;
@@ -582,7 +690,7 @@ drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
* @drm_atomic_get_new_connector_state should be used instead.
*/
static inline struct drm_connector_state *
-drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_existing_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -602,7 +710,7 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
-drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -622,7 +730,7 @@ drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
-drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -660,7 +768,7 @@ drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
* Read-only pointer to the current plane state.
*/
static inline const struct drm_plane_state *
-__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
+__drm_atomic_get_current_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
if (state->planes[drm_plane_index(plane)].state)
@@ -773,7 +881,8 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
(old_crtc_state) = (__state)->crtcs[__i].old_state, \
(void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \
- (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
+ (new_crtc_state) = (__state)->crtcs[__i].new_state, \
+ (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
* for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -792,6 +901,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->crtcs[__i].ptr && \
((crtc) = (__state)->crtcs[__i].ptr, \
+ (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
(old_crtc_state) = (__state)->crtcs[__i].old_state, 1))
/**
@@ -860,6 +970,22 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
+ * for_each_new_plane_in_state_reverse - other than only tracking new state,
+ * it's the same as for_each_oldnew_plane_in_state_reverse
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ */
+#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \
+ for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
+ (__i) >= 0; \
+ (__i)--) \
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (new_plane_state) = (__state)->planes[__i].new_state, 1))
+
+/**
* for_each_old_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
@@ -951,6 +1077,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
for ((__i) = 0; \
(__i) < (__state)->num_private_objs && \
((obj) = (__state)->private_objs[__i].ptr, \
+ (void)(obj) /* Only to avoid unused-but-set-variable warning */, \
(new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
(__i)++)
@@ -1047,7 +1174,7 @@ struct drm_bridge_state {
struct drm_bus_cfg input_bus_cfg;
/**
- * @output_bus_cfg: input bus configuration
+ * @output_bus_cfg: output bus configuration
*/
struct drm_bus_cfg output_bus_cfg;
};
@@ -1062,10 +1189,10 @@ struct drm_bridge_state *
drm_atomic_get_bridge_state(struct drm_atomic_state *state,
struct drm_bridge *bridge);
struct drm_bridge_state *
-drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge);
struct drm_bridge_state *
-drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge);
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index b268180c97eb..9aa0a05aa072 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -34,12 +34,23 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_util.h>
+/*
+ * Drivers that don't allow primary plane scaling may pass this macro in place
+ * of the min/max scale parameters of the plane-state checker function.
+ *
+ * Due to src being in 16.16 fixed point and dest being in integer pixels,
+ * 1<<16 represents no scaling.
+ */
+#define DRM_PLANE_NO_SCALING (1<<16)
+
struct drm_atomic_state;
struct drm_private_obj;
struct drm_private_state;
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
+int drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
+ struct drm_atomic_state *state);
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
const struct drm_crtc_state *crtc_state,
int min_scale,
@@ -48,6 +59,7 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
bool can_update_disabled);
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
+int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
@@ -74,6 +86,9 @@ void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
struct drm_atomic_state *old_state);
+void
+drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state);
+
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
@@ -81,6 +96,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
+void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0)
#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1)
@@ -144,10 +161,6 @@ int drm_atomic_helper_page_flip_target(
uint32_t flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
-int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t size,
- struct drm_modeset_acquire_ctx *ctx);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
@@ -164,7 +177,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
/**
- * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
+ * drm_atomic_crtc_state_for_each_plane - iterate over attached planes in new state
* @plane: the loop cursor
* @crtc_state: the incoming CRTC state
*
@@ -177,7 +190,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
/**
- * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
+ * drm_atomic_crtc_state_for_each_plane_state - iterate over attached planes in new state
* @plane: the loop cursor
* @plane_state: loop cursor for the plane's state, must be const
* @crtc_state: the incoming CRTC state
@@ -198,6 +211,32 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
plane)))
/**
+ * drm_atomic_plane_enabling - check whether a plane is being enabled
+ * @old_plane_state: old atomic plane state
+ * @new_plane_state: new atomic plane state
+ *
+ * Checks the atomic state of a plane to determine whether it's being enabled
+ * or not. This also WARNs if it detects an invalid state (both CRTC and FB
+ * need to either both be NULL or both be non-NULL).
+ *
+ * RETURNS:
+ * True if the plane is being enabled, false otherwise.
+ */
+static inline bool drm_atomic_plane_enabling(struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ /*
+ * When enabling a plane, CRTC and FB should always be set together.
+ * Anything else should be considered a bug in the atomic core, so we
+ * gently warn about it.
+ */
+ WARN_ON((!new_plane_state->crtc && new_plane_state->fb) ||
+ (new_plane_state->crtc && !new_plane_state->fb));
+
+ return !old_plane_state->crtc && new_plane_state->crtc;
+}
+
+/**
* drm_atomic_plane_disabling - check whether a plane is being disabled
* @old_plane_state: old atomic plane state
* @new_plane_state: new atomic plane state
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index 3f8f1d627f7c..b9740edb2658 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
+struct drm_atomic_state;
struct drm_bridge;
struct drm_bridge_state;
struct drm_crtc;
@@ -71,6 +72,9 @@ void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector);
+int drm_atomic_helper_connector_tv_check(struct drm_connector *connector,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connector);
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
struct drm_connector_state *state);
diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h
index 8cec52ad1277..4c6d39d7bdb2 100644
--- a/include/drm/drm_atomic_uapi.h
+++ b/include/drm/drm_atomic_uapi.h
@@ -49,8 +49,6 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
-void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
- struct dma_fence *fence);
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index a45f93487039..5a4cd1fa8e2a 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -4,6 +4,9 @@
#ifndef _DRM_AUDIO_COMPONENT_H_
#define _DRM_AUDIO_COMPONENT_H_
+#include <linux/completion.h>
+#include <linux/types.h>
+
struct drm_audio_component;
struct device;
@@ -117,6 +120,10 @@ struct drm_audio_component {
* @audio_ops: Ops implemented by hda driver, called by DRM driver
*/
const struct drm_audio_component_audio_ops *audio_ops;
+ /**
+ * @master_bind_complete: completion held during component master binding
+ */
+ struct completion master_bind_complete;
};
#endif /* _DRM_AUDIO_COMPONENT_H_ */
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index 6bf8b2b78991..50131383ed81 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -33,24 +33,6 @@
#include <linux/wait.h>
struct drm_file;
-struct drm_hw_lock;
-
-/*
- * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for
- * include ordering reasons.
- *
- * DO NOT USE.
- */
-struct drm_lock_data {
- struct drm_hw_lock *hw_lock;
- struct drm_file *file_priv;
- wait_queue_head_t lock_queue;
- unsigned long lock_time;
- spinlock_t spinlock;
- uint32_t kernel_waiters;
- uint32_t user_waiters;
- int idle_has_lock;
-};
/**
* struct drm_master - drm master structure
@@ -58,12 +40,6 @@ struct drm_lock_data {
* @refcount: Refcount for this master object.
* @dev: Link back to the DRM device
* @driver_priv: Pointer to driver-private information.
- * @lessor: Lease holder
- * @lessee_id: id for lessees. Owners always have id 0
- * @lessee_list: other lessees of the same master
- * @lessees: drm_masters leasing from this one
- * @leases: Objects leased to this drm_master.
- * @lessee_idr: All lessees under this owner (only used where lessor == NULL)
*
* Note that master structures are only relevant for the legacy/primary device
* nodes, hence there can only be one per device, not one per drm_minor.
@@ -88,25 +64,73 @@ struct drm_master {
struct idr magic_map;
void *driver_priv;
- /* Tree of display resource leases, each of which is a drm_master struct
- * All of these get activated simultaneously, so drm_device master points
- * at the top of the tree (for which lessor is NULL). Protected by
- * &drm_device.mode_config.idr_mutex.
+ /**
+ * @lessor:
+ *
+ * Lease grantor, only set if this &struct drm_master represents a
+ * lessee holding a lease of objects from @lessor. Full owners of the
+ * device have this set to NULL.
+ *
+ * The lessor does not change once it's set in drm_lease_create(), and
+ * each lessee holds a reference to its lessor that it releases upon
+ * being destroyed in drm_lease_destroy().
+ *
+ * See also the :ref:`section on display resource leasing
+ * <drm_leasing>`.
*/
-
struct drm_master *lessor;
+
+ /**
+ * @lessee_id:
+ *
+ * ID for lessees. Owners (i.e. @lessor is NULL) always have ID 0.
+ * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ */
int lessee_id;
+
+ /**
+ * @lessee_list:
+ *
+ * List entry of lessees of @lessor, where they are linked to @lessees.
+ * Not used for owners. Protected by &drm_device.mode_config's
+ * &drm_mode_config.idr_mutex.
+ */
struct list_head lessee_list;
+
+ /**
+ * @lessees:
+ *
+ * List of drm_masters leasing from this one. Protected by
+ * &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ *
+ * This list is empty if no leases have been granted, or if all lessees
+ * have been destroyed. Since lessors are referenced by all their
+ * lessees, this master cannot be destroyed unless the list is empty.
+ */
struct list_head lessees;
+
+ /**
+ * @leases:
+ *
+ * Objects leased to this drm_master. Protected by
+ * &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ *
+ * Objects are leased all together in drm_lease_create(), and are
+ * removed all together when the lease is revoked.
+ */
struct idr leases;
+
+ /**
+ * @lessee_idr:
+ *
+ * All lessees under this owner (only used where @lessor is NULL).
+ * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ */
struct idr lessee_idr;
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- struct drm_lock_data lock;
-#endif
};
struct drm_master *drm_master_get(struct drm_master *master);
+struct drm_master *drm_file_get_master(struct drm_file *file_priv);
void drm_master_put(struct drm_master **master);
bool drm_is_current_master(struct drm_file *fpriv);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 2195daa289d2..3606e1a7f965 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -32,10 +32,13 @@
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
+struct device_node;
+
struct drm_bridge;
struct drm_bridge_timings;
struct drm_connector;
struct drm_display_info;
+struct drm_minor;
struct drm_panel;
struct edid;
struct i2c_adapter;
@@ -104,7 +107,7 @@ struct drm_bridge_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup.
*
@@ -171,6 +174,11 @@ struct drm_bridge_funcs {
* signals) feeding it is still running when this callback is called.
*
* The @disable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_disable.
*/
void (*disable)(struct drm_bridge *bridge);
@@ -186,10 +194,15 @@ struct drm_bridge_funcs {
* or &drm_encoder_helper_funcs.dpms hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
- * singals) feeding it is no longer running when this callback is
+ * signals) feeding it is no longer running when this callback is
* called.
*
* The @post_disable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_post_disable.
*/
void (*post_disable)(struct drm_bridge *bridge);
@@ -215,9 +228,9 @@ struct drm_bridge_funcs {
*
* NOTE:
*
- * If a need arises to store and access modes adjusted for other
- * locations than the connection between the CRTC and the first bridge,
- * the DRM framework will have to be extended with DRM bridge states.
+ * This is deprecated, do not use!
+ * New drivers shall set their mode in the
+ * &drm_bridge_funcs.atomic_enable operation.
*/
void (*mode_set)(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
@@ -239,6 +252,11 @@ struct drm_bridge_funcs {
* there is one) when this callback is called.
*
* The @pre_enable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_pre_enable.
*/
void (*pre_enable)(struct drm_bridge *bridge);
@@ -259,6 +277,11 @@ struct drm_bridge_funcs {
* chain if there is one.
*
* The @enable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_enable.
*/
void (*enable)(struct drm_bridge *bridge);
@@ -277,12 +300,6 @@ struct drm_bridge_funcs {
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_pre_enable. It would be prudent to also provide an
- * implementation of @pre_enable if you are expecting driver calls into
- * &drm_bridge_chain_pre_enable.
- *
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
@@ -303,11 +320,6 @@ struct drm_bridge_funcs {
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_chain_enable.
- * It would be prudent to also provide an implementation of @enable if
- * you are expecting driver calls into &drm_bridge_chain_enable.
- *
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
@@ -325,12 +337,6 @@ struct drm_bridge_funcs {
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_disable. It would be prudent to also provide an
- * implementation of @disable if you are expecting driver calls into
- * &drm_bridge_chain_disable.
- *
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
@@ -350,13 +356,6 @@ struct drm_bridge_funcs {
* signals) feeding it is no longer running when this callback is
* called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_post_disable.
- * It would be prudent to also provide an implementation of
- * @post_disable if you are expecting driver calls into
- * &drm_bridge_chain_post_disable.
- *
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
@@ -427,11 +426,11 @@ struct drm_bridge_funcs {
*
* The returned array must be allocated with kmalloc() and will be
* freed by the caller. If the allocation fails, NULL should be
- * returned. num_output_fmts must be set to the returned array size.
+ * returned. num_input_fmts must be set to the returned array size.
* Formats listed in the returned array should be listed in decreasing
* preference order (the core will try all formats until it finds one
* that works). When the format is not supported NULL should be
- * returned and num_output_fmts should be set to 0.
+ * returned and num_input_fmts should be set to 0.
*
* This method is called on all elements of the bridge chain as part of
* the bus format negotiation process that happens in
@@ -559,11 +558,11 @@ struct drm_bridge_funcs {
struct drm_connector *connector);
/**
- * @get_edid:
+ * @edid_read:
*
- * Read and parse the EDID data of the connected display.
+ * Read the EDID data of the connected display.
*
- * The @get_edid callback is the preferred way of reporting mode
+ * The @edid_read callback is the preferred way of reporting mode
* information for a display connected to the bridge output. Bridges
* that support reading EDID shall implement this callback and leave
* the @get_modes callback unimplemented.
@@ -576,17 +575,18 @@ struct drm_bridge_funcs {
* DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
*
* The connector parameter shall be used for the sole purpose of EDID
- * retrieval and parsing, and shall not be stored internally by bridge
- * drivers for future usage.
+ * retrieval, and shall not be stored internally by bridge drivers for
+ * future usage.
*
* RETURNS:
*
- * An edid structure newly allocated with kmalloc() (or similar) on
- * success, or NULL otherwise. The caller is responsible for freeing
- * the returned edid structure with kfree().
+ * An edid structure newly allocated with drm_edid_alloc() or returned
+ * from drm_edid_read() family of functions on success, or NULL
+ * otherwise. The caller is responsible for freeing the returned edid
+ * structure with drm_edid_free().
*/
- struct edid *(*get_edid)(struct drm_bridge *bridge,
- struct drm_connector *connector);
+ const struct drm_edid *(*edid_read)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @hpd_notify:
@@ -629,6 +629,13 @@ struct drm_bridge_funcs {
* the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
*/
void (*hpd_disable)(struct drm_bridge *bridge);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows bridges to create bridge-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_bridge *bridge, struct dentry *root);
};
/**
@@ -712,10 +719,8 @@ struct drm_bridge {
struct drm_encoder *encoder;
/** @chain_node: used to form a bridge chain */
struct list_head chain_node;
-#ifdef CONFIG_OF
/** @of_node: device node pointer to the bridge */
struct device_node *of_node;
-#endif
/** @list: to keep track of all added bridges */
struct list_head list;
/**
@@ -742,6 +747,14 @@ struct drm_bridge {
*/
bool interlace_allowed;
/**
+ * @pre_enable_prev_first: The bridge requires that the prev
+ * bridge @pre_enable function is called before its @pre_enable,
+ * and conversely for post_disable. This is most frequently a
+ * requirement for DSI devices which need the host to be initialised
+ * before the peripheral.
+ */
+ bool pre_enable_prev_first;
+ /**
* @ddc: Associated I2C adapter for DDC access, if any.
*/
struct i2c_adapter *ddc;
@@ -769,12 +782,21 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
}
void drm_bridge_add(struct drm_bridge *bridge);
+int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
-struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous,
enum drm_bridge_attach_flags flags);
+#ifdef CONFIG_OF
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+#else
+static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
@@ -840,13 +862,9 @@ enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
-void drm_bridge_chain_disable(struct drm_bridge *bridge);
-void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode);
-void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_chain_enable(struct drm_bridge *bridge);
int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
struct drm_crtc_state *crtc_state,
@@ -871,8 +889,8 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
int drm_bridge_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector);
-struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector);
+const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector);
void drm_bridge_hpd_enable(struct drm_bridge *bridge,
void (*cb)(void *data,
enum drm_connector_status status),
@@ -882,16 +900,55 @@ void drm_bridge_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status);
#ifdef CONFIG_DRM_PANEL_BRIDGE
+bool drm_bridge_is_panel(const struct drm_bridge *bridge);
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
u32 connector_type);
void drm_panel_bridge_remove(struct drm_bridge *bridge);
+int drm_panel_bridge_set_orientation(struct drm_connector *connector,
+ struct drm_bridge *bridge);
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_panel *panel);
struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
struct drm_panel *panel,
u32 connector_type);
+struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
+ struct drm_panel *panel);
struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
+#else
+static inline bool drm_bridge_is_panel(const struct drm_bridge *bridge)
+{
+ return false;
+}
+
+static inline int drm_panel_bridge_set_orientation(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ return -EINVAL;
+}
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE)
+struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node,
+ u32 port, u32 endpoint);
+struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, struct device_node *node,
+ u32 port, u32 endpoint);
+#else
+static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
#endif
#endif
diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h
index 33f6c3bbdb4a..69630815fb09 100644
--- a/include/drm/drm_bridge_connector.h
+++ b/include/drm/drm_bridge_connector.h
@@ -10,8 +10,6 @@ struct drm_connector;
struct drm_device;
struct drm_encoder;
-void drm_bridge_connector_enable_hpd(struct drm_connector *connector);
-void drm_bridge_connector_disable_hpd(struct drm_connector *connector);
struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_encoder *encoder);
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
new file mode 100644
index 000000000000..a5b39fc01003
--- /dev/null
+++ b/include/drm/drm_buddy.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __DRM_BUDDY_H__
+#define __DRM_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include <drm/drm_print.h>
+
+#define range_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ >= max__ || size__ > max__ - start__; \
+})
+
+#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
+#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
+#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
+
+struct drm_buddy_block {
+#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
+#define DRM_BUDDY_ALLOCATED (1 << 10)
+#define DRM_BUDDY_FREE (2 << 10)
+#define DRM_BUDDY_SPLIT (3 << 10)
+/* Free to be used, if needed in the future */
+#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
+#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
+ u64 header;
+
+ struct drm_buddy_block *left;
+ struct drm_buddy_block *right;
+ struct drm_buddy_block *parent;
+
+ void *private; /* owned by creator */
+
+ /*
+ * While the block is allocated by the user through drm_buddy_alloc*,
+ * the user has ownership of the link, for example to maintain within
+ * a list, if so desired. As soon as the block is freed with
+ * drm_buddy_free* ownership is given back to the mm.
+ */
+ struct list_head link;
+ struct list_head tmp_link;
+};
+
+/* Order-zero must be at least PAGE_SIZE */
+#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
+
+/*
+ * Binary Buddy System.
+ *
+ * Locking should be handled by the user, a simple mutex around
+ * drm_buddy_alloc* and drm_buddy_free* should suffice.
+ */
+struct drm_buddy {
+ /* Maintain a free list for each order. */
+ struct list_head *free_list;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
+ * address space. This gives us a simple way of finding a buddy block
+ * and performing the potentially recursive merge step when freeing a
+ * block. Nodes are either allocated or free, in which case they will
+ * also exist on the respective free list.
+ */
+ struct drm_buddy_block **roots;
+
+ /*
+ * Anything from here is public, and remains static for the lifetime of
+ * the mm. Everything above is considered do-not-touch.
+ */
+ unsigned int n_roots;
+ unsigned int max_order;
+
+ /* Must be at least PAGE_SIZE */
+ u64 chunk_size;
+ u64 size;
+ u64 avail;
+};
+
+static inline u64
+drm_buddy_block_offset(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+drm_buddy_block_order(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_ORDER;
+}
+
+static inline unsigned int
+drm_buddy_block_state(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_STATE;
+}
+
+static inline bool
+drm_buddy_block_is_allocated(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED;
+}
+
+static inline bool
+drm_buddy_block_is_free(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_FREE;
+}
+
+static inline bool
+drm_buddy_block_is_split(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT;
+}
+
+static inline u64
+drm_buddy_block_size(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ return mm->chunk_size << drm_buddy_block_order(block);
+}
+
+int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size);
+
+void drm_buddy_fini(struct drm_buddy *mm);
+
+struct drm_buddy_block *
+drm_get_buddy(struct drm_buddy_block *block);
+
+int drm_buddy_alloc_blocks(struct drm_buddy *mm,
+ u64 start, u64 end, u64 size,
+ u64 min_page_size,
+ struct list_head *blocks,
+ unsigned long flags);
+
+int drm_buddy_block_trim(struct drm_buddy *mm,
+ u64 new_size,
+ struct list_head *blocks);
+
+void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
+
+void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects);
+
+void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
+void drm_buddy_block_print(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ struct drm_printer *p);
+#endif
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index e9ad4863d915..08e0e3ffad13 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,6 +35,8 @@
#include <linux/scatterlist.h>
+struct iosys_map;
+
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
@@ -65,9 +67,22 @@ static inline bool drm_arch_can_wc_memory(void)
* optimization entirely for ARM and arm64.
*/
return false;
+#elif defined(CONFIG_LOONGARCH)
+ /*
+ * LoongArch maintains cache coherency in hardware, but its WUC attribute
+ * (Weak-ordered UnCached, which is similar to WC) is out of the scope of
+ * cache coherency machanism. This means WUC can only used for write-only
+ * memory regions.
+ */
+ return false;
#else
return true;
#endif
}
+void drm_memcpy_init_early(void);
+
+void drm_memcpy_from_wc(struct iosys_map *dst,
+ const struct iosys_map *src,
+ unsigned long len);
#endif
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 7aaea665bfc2..d47458ecdac4 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -3,6 +3,7 @@
#ifndef _DRM_CLIENT_H_
#define _DRM_CLIENT_H_
+#include <linux/iosys-map.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/types.h>
@@ -105,6 +106,14 @@ struct drm_client_dev {
* @modesets: CRTC configurations
*/
struct drm_mode_set *modesets;
+
+ /**
+ * @hotplug_failed:
+ *
+ * Set by client hotplug helpers if the hotplugging failed
+ * before. It is usually not tried again.
+ */
+ bool hotplug_failed;
};
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
@@ -126,11 +135,6 @@ struct drm_client_buffer {
struct drm_client_dev *client;
/**
- * @handle: Buffer handle
- */
- u32 handle;
-
- /**
* @pitch: Buffer pitch
*/
u32 pitch;
@@ -141,9 +145,9 @@ struct drm_client_buffer {
struct drm_gem_object *gem;
/**
- * @vaddr: Virtual address for the buffer
+ * @map: Virtual address for the buffer
*/
- void *vaddr;
+ struct iosys_map map;
/**
* @fb: DRM framebuffer
@@ -155,7 +159,8 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
-void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
+int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
+ struct iosys_map *map);
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
int drm_client_modeset_create(struct drm_client_dev *client);
@@ -190,6 +195,6 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
drm_for_each_connector_iter(connector, iter) \
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
-void drm_client_debugfs_init(struct drm_minor *minor);
+void drm_client_debugfs_init(struct drm_device *dev);
#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 81c298488b0c..ed81741036d7 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -24,6 +24,7 @@
#define __DRM_COLOR_MGMT_H__
#include <linux/ctype.h>
+#include <linux/math64.h>
#include <drm/drm_property.h>
struct drm_crtc;
@@ -36,20 +37,17 @@ struct drm_plane;
*
* Extract a degamma/gamma LUT value provided by user (in the form of
* &drm_color_lut entries) and round it to the precision supported by the
- * hardware.
+ * hardware, following OpenGL int<->float conversion rules
+ * (see eg. OpenGL 4.6 specification - 2.3.5 Fixed-Point Data Conversions).
*/
static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision)
{
- u32 val = user_input;
- u32 max = 0xffff >> (16 - bit_precision);
-
- /* Round only if we're not using full precision. */
- if (bit_precision < 16) {
- val += 1UL << (16 - bit_precision - 1);
- val >>= 16 - bit_precision;
- }
-
- return clamp_val(val, 0, max);
+ if (bit_precision > 16)
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(user_input, (1 << bit_precision) - 1),
+ (1 << 16) - 1);
+ else
+ return DIV_ROUND_CLOSEST(user_input * ((1 << bit_precision) - 1),
+ (1 << 16) - 1);
}
u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index af145608b5ed..fe88d7fc6b8f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -27,8 +27,10 @@
#include <linux/llist.h>
#include <linux/ctype.h>
#include <linux/hdmi.h>
+#include <linux/notifier.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_util.h>
+#include <drm/drm_property.h>
#include <uapi/drm/drm_mode.h>
@@ -37,9 +39,11 @@ struct drm_modeset_acquire_ctx;
struct drm_device;
struct drm_crtc;
struct drm_encoder;
+struct drm_panel;
struct drm_property;
struct drm_property_blob;
struct drm_printer;
+struct drm_privacy_screen;
struct edid;
struct i2c_adapter;
@@ -84,7 +88,7 @@ enum drm_connector_status {
};
/**
- * enum drm_connector_registration_status - userspace registration status for
+ * enum drm_connector_registration_state - userspace registration status for
* a &drm_connector
*
* This enum is used to track the status of initializing a connector and
@@ -141,6 +145,70 @@ enum subpixel_order {
};
/**
+ * enum drm_connector_tv_mode - Analog TV output mode
+ *
+ * This enum is used to indicate the TV output mode used on an analog TV
+ * connector.
+ *
+ * WARNING: The values of this enum is uABI since they're exposed in the
+ * "TV mode" connector property.
+ */
+enum drm_connector_tv_mode {
+ /**
+ * @DRM_MODE_TV_MODE_NTSC: CCIR System M (aka 525-lines)
+ * together with the NTSC Color Encoding.
+ */
+ DRM_MODE_TV_MODE_NTSC,
+
+ /**
+ * @DRM_MODE_TV_MODE_NTSC_443: Variant of
+ * @DRM_MODE_TV_MODE_NTSC. Uses a color subcarrier frequency
+ * of 4.43 MHz.
+ */
+ DRM_MODE_TV_MODE_NTSC_443,
+
+ /**
+ * @DRM_MODE_TV_MODE_NTSC_J: Variant of @DRM_MODE_TV_MODE_NTSC
+ * used in Japan. Uses a black level equals to the blanking
+ * level.
+ */
+ DRM_MODE_TV_MODE_NTSC_J,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL: CCIR System B together with the PAL
+ * color system.
+ */
+ DRM_MODE_TV_MODE_PAL,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL_M: CCIR System M (aka 525-lines)
+ * together with the PAL color encoding
+ */
+ DRM_MODE_TV_MODE_PAL_M,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL_N: CCIR System N together with the PAL
+ * color encoding. It uses 625 lines, but has a color subcarrier
+ * frequency of 3.58MHz, the SECAM color space, and narrower
+ * channels compared to most of the other PAL variants.
+ */
+ DRM_MODE_TV_MODE_PAL_N,
+
+ /**
+ * @DRM_MODE_TV_MODE_SECAM: CCIR System B together with the
+ * SECAM color system.
+ */
+ DRM_MODE_TV_MODE_SECAM,
+
+ /**
+ * @DRM_MODE_TV_MODE_MAX: Number of analog TV output modes.
+ *
+ * Internal implementation detail; this is not uABI.
+ */
+ DRM_MODE_TV_MODE_MAX,
+};
+
+/**
* struct drm_scrambling: sink's scrambling support.
*/
struct drm_scrambling {
@@ -175,6 +243,46 @@ struct drm_scdc {
struct drm_scrambling scrambling;
};
+/**
+ * struct drm_hdmi_dsc_cap - DSC capabilities of HDMI sink
+ *
+ * Describes the DSC support provided by HDMI 2.1 sink.
+ * The information is fetched fom additional HFVSDB blocks defined
+ * for HDMI 2.1.
+ */
+struct drm_hdmi_dsc_cap {
+ /** @v_1p2: flag for dsc1.2 version support by sink */
+ bool v_1p2;
+
+ /** @native_420: Does sink support DSC with 4:2:0 compression */
+ bool native_420;
+
+ /**
+ * @all_bpp: Does sink support all bpp with 4:4:4: or 4:2:2
+ * compressed formats
+ */
+ bool all_bpp;
+
+ /**
+ * @bpc_supported: compressed bpc supported by sink : 10, 12 or 16 bpc
+ */
+ u8 bpc_supported;
+
+ /** @max_slices: maximum number of Horizontal slices supported by */
+ u8 max_slices;
+
+ /** @clk_per_slice : max pixel clock in MHz supported per slice */
+ int clk_per_slice;
+
+ /** @max_lanes : dsc max lanes supported for Fixed rate Link training */
+ u8 max_lanes;
+
+ /** @max_frl_rate_per_lane : maximum frl rate with DSC per lane */
+ u8 max_frl_rate_per_lane;
+
+ /** @total_chunk_kbytes: max size of chunks in KBs supported per line*/
+ u8 total_chunk_kbytes;
+};
/**
* struct drm_hdmi_info - runtime information about the connected HDMI sink
@@ -202,11 +310,17 @@ struct drm_hdmi_info {
*/
unsigned long y420_cmdb_modes[BITS_TO_LONGS(256)];
- /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */
- u64 y420_cmdb_map;
-
/** @y420_dc_modes: bitmap of deep color support index */
u8 y420_dc_modes;
+
+ /** @max_frl_rate_per_lane: support fixed rate link */
+ u8 max_frl_rate_per_lane;
+
+ /** @max_lanes: supported by sink */
+ u8 max_lanes;
+
+ /** @dsc_cap: DSC capabilities of the sink */
+ struct drm_hdmi_dsc_cap dsc_cap;
};
/**
@@ -267,41 +381,151 @@ enum drm_panel_orientation {
* EDID's detailed monitor range
*/
struct drm_monitor_range_info {
- u8 min_vfreq;
- u8 max_vfreq;
+ u16 min_vfreq;
+ u16 max_vfreq;
};
-/*
- * This is a consolidated colorimetry list supported by HDMI and
+/**
+ * struct drm_luminance_range_info - Panel's luminance range for
+ * &drm_display_info. Calculated using data in EDID
+ *
+ * This struct is used to store a luminance range supported by panel
+ * as calculated using data from EDID's static hdr metadata.
+ *
+ * @min_luminance: This is the min supported luminance value
+ *
+ * @max_luminance: This is the max supported luminance value
+ */
+struct drm_luminance_range_info {
+ u32 min_luminance;
+ u32 max_luminance;
+};
+
+/**
+ * enum drm_privacy_screen_status - privacy screen status
+ *
+ * This enum is used to track and control the state of the integrated privacy
+ * screen present on some display panels, via the "privacy-screen sw-state"
+ * and "privacy-screen hw-state" properties. Note the _LOCKED enum values
+ * are only valid for the "privacy-screen hw-state" property.
+ *
+ * @PRIVACY_SCREEN_DISABLED:
+ * The privacy-screen on the panel is disabled
+ * @PRIVACY_SCREEN_ENABLED:
+ * The privacy-screen on the panel is enabled
+ * @PRIVACY_SCREEN_DISABLED_LOCKED:
+ * The privacy-screen on the panel is disabled and locked (cannot be changed)
+ * @PRIVACY_SCREEN_ENABLED_LOCKED:
+ * The privacy-screen on the panel is enabled and locked (cannot be changed)
+ */
+enum drm_privacy_screen_status {
+ PRIVACY_SCREEN_DISABLED = 0,
+ PRIVACY_SCREEN_ENABLED,
+ PRIVACY_SCREEN_DISABLED_LOCKED,
+ PRIVACY_SCREEN_ENABLED_LOCKED,
+};
+
+/**
+ * enum drm_colorspace - color space
+ *
+ * This enum is a consolidated colorimetry list supported by HDMI and
* DP protocol standard. The respective connectors will register
* a property with the subset of this list (supported by that
* respective protocol). Userspace will set the colorspace through
* a colorspace property which will be created and exposed to
* userspace.
+ *
+ * DP definitions come from the DP v2.0 spec
+ * HDMI definitions come from the CTA-861-H spec
+ *
+ * A note on YCC and RGB variants:
+ *
+ * Since userspace is not aware of the encoding on the wire
+ * (RGB or YCbCr), drivers are free to pick the appropriate
+ * variant, regardless of what userspace selects. E.g., if
+ * BT2020_RGB is selected by userspace a driver will pick
+ * BT2020_YCC if the encoding on the wire is YUV444 or YUV420.
+ *
+ * @DRM_MODE_COLORIMETRY_DEFAULT:
+ * Driver specific behavior.
+ * @DRM_MODE_COLORIMETRY_NO_DATA:
+ * Driver specific behavior.
+ * @DRM_MODE_COLORIMETRY_SMPTE_170M_YCC:
+ * (HDMI)
+ * SMPTE ST 170M colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT709_YCC:
+ * (HDMI, DP)
+ * ITU-R BT.709 colorimetry format
+ * @DRM_MODE_COLORIMETRY_XVYCC_601:
+ * (HDMI, DP)
+ * xvYCC601 colorimetry format
+ * @DRM_MODE_COLORIMETRY_XVYCC_709:
+ * (HDMI, DP)
+ * xvYCC709 colorimetry format
+ * @DRM_MODE_COLORIMETRY_SYCC_601:
+ * (HDMI, DP)
+ * sYCC601 colorimetry format
+ * @DRM_MODE_COLORIMETRY_OPYCC_601:
+ * (HDMI, DP)
+ * opYCC601 colorimetry format
+ * @DRM_MODE_COLORIMETRY_OPRGB:
+ * (HDMI, DP)
+ * opRGB colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ * (HDMI, DP)
+ * ITU-R BT.2020 Y'c C'bc C'rc (constant luminance) colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT2020_RGB:
+ * (HDMI, DP)
+ * ITU-R BT.2020 R' G' B' colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT2020_YCC:
+ * (HDMI, DP)
+ * ITU-R BT.2020 Y' C'b C'r colorimetry format
+ * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ * (HDMI)
+ * SMPTE ST 2113 P3D65 colorimetry format
+ * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ * (HDMI)
+ * SMPTE ST 2113 P3DCI colorimetry format
+ * @DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED:
+ * (DP)
+ * RGB wide gamut fixed point colorimetry format
+ * @DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT:
+ * (DP)
+ * RGB wide gamut floating point
+ * (scRGB (IEC 61966-2-2)) colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT601_YCC:
+ * (DP)
+ * ITU-R BT.601 colorimetry format
+ * The DP spec does not say whether this is the 525 or the 625
+ * line version.
+ * @DRM_MODE_COLORIMETRY_COUNT:
+ * Not a valid value; merely used four counting
*/
-
-/* For Default case, driver will set the colorspace */
-#define DRM_MODE_COLORIMETRY_DEFAULT 0
-/* CEA 861 Normal Colorimetry options */
-#define DRM_MODE_COLORIMETRY_NO_DATA 0
-#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1
-#define DRM_MODE_COLORIMETRY_BT709_YCC 2
-/* CEA 861 Extended Colorimetry Options */
-#define DRM_MODE_COLORIMETRY_XVYCC_601 3
-#define DRM_MODE_COLORIMETRY_XVYCC_709 4
-#define DRM_MODE_COLORIMETRY_SYCC_601 5
-#define DRM_MODE_COLORIMETRY_OPYCC_601 6
-#define DRM_MODE_COLORIMETRY_OPRGB 7
-#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8
-#define DRM_MODE_COLORIMETRY_BT2020_RGB 9
-#define DRM_MODE_COLORIMETRY_BT2020_YCC 10
-/* Additional Colorimetry extension added as part of CTA 861.G */
-#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11
-#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12
-/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
-#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13
-#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14
-#define DRM_MODE_COLORIMETRY_BT601_YCC 15
+enum drm_colorspace {
+ /* For Default case, driver will set the colorspace */
+ DRM_MODE_COLORIMETRY_DEFAULT = 0,
+ /* CEA 861 Normal Colorimetry options */
+ DRM_MODE_COLORIMETRY_NO_DATA = 0,
+ DRM_MODE_COLORIMETRY_SMPTE_170M_YCC = 1,
+ DRM_MODE_COLORIMETRY_BT709_YCC = 2,
+ /* CEA 861 Extended Colorimetry Options */
+ DRM_MODE_COLORIMETRY_XVYCC_601 = 3,
+ DRM_MODE_COLORIMETRY_XVYCC_709 = 4,
+ DRM_MODE_COLORIMETRY_SYCC_601 = 5,
+ DRM_MODE_COLORIMETRY_OPYCC_601 = 6,
+ DRM_MODE_COLORIMETRY_OPRGB = 7,
+ DRM_MODE_COLORIMETRY_BT2020_CYCC = 8,
+ DRM_MODE_COLORIMETRY_BT2020_RGB = 9,
+ DRM_MODE_COLORIMETRY_BT2020_YCC = 10,
+ /* Additional Colorimetry extension added as part of CTA 861.G */
+ DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 = 11,
+ DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER = 12,
+ /* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
+ DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13,
+ DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14,
+ DRM_MODE_COLORIMETRY_BT601_YCC = 15,
+ DRM_MODE_COLORIMETRY_COUNT
+};
/**
* enum drm_bus_flags - bus_flags info for &drm_display_info
@@ -447,9 +671,9 @@ struct drm_display_info {
enum subpixel_order subpixel_order;
#define DRM_COLOR_FORMAT_RGB444 (1<<0)
-#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
-#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
-#define DRM_COLOR_FORMAT_YCRCB420 (1<<3)
+#define DRM_COLOR_FORMAT_YCBCR444 (1<<1)
+#define DRM_COLOR_FORMAT_YCBCR422 (1<<2)
+#define DRM_COLOR_FORMAT_YCBCR420 (1<<3)
/**
* @panel_orientation: Read only connector property for built-in panels,
@@ -506,6 +730,14 @@ struct drm_display_info {
bool is_hdmi;
/**
+ * @has_audio: True if the sink supports audio.
+ *
+ * This field shall be used instead of calling
+ * drm_detect_monitor_audio() when possible.
+ */
+ bool has_audio;
+
+ /**
* @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
*/
bool has_hdmi_infoframe;
@@ -517,10 +749,16 @@ struct drm_display_info {
bool rgb_quant_range_selectable;
/**
- * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
- * more stuff redundant with @bus_formats.
+ * @edid_hdmi_rgb444_dc_modes: Mask of supported hdmi deep color modes
+ * in RGB 4:4:4. Even more stuff redundant with @bus_formats.
*/
- u8 edid_hdmi_dc_modes;
+ u8 edid_hdmi_rgb444_dc_modes;
+
+ /**
+ * @edid_hdmi_ycbcr444_dc_modes: Mask of supported hdmi deep color
+ * modes in YCbCr 4:4:4. Even more stuff redundant with @bus_formats.
+ */
+ u8 edid_hdmi_ycbcr444_dc_modes;
/**
* @cea_rev: CEA revision of the HDMI sink.
@@ -541,6 +779,52 @@ struct drm_display_info {
* @monitor_range: Frequency range supported by monitor range descriptor
*/
struct drm_monitor_range_info monitor_range;
+
+ /**
+ * @luminance_range: Luminance range supported by panel
+ */
+ struct drm_luminance_range_info luminance_range;
+
+ /**
+ * @mso_stream_count: eDP Multi-SST Operation (MSO) stream count from
+ * the DisplayID VESA vendor block. 0 for conventional Single-Stream
+ * Transport (SST), or 2 or 4 MSO streams.
+ */
+ u8 mso_stream_count;
+
+ /**
+ * @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels.
+ */
+ u8 mso_pixel_overlap;
+
+ /**
+ * @max_dsc_bpp: Maximum DSC target bitrate, if it is set to 0 the
+ * monitor's default value is used instead.
+ */
+ u32 max_dsc_bpp;
+
+ /**
+ * @vics: Array of vics_len VICs. Internal to EDID parsing.
+ */
+ u8 *vics;
+
+ /**
+ * @vics_len: Number of elements in vics. Internal to EDID parsing.
+ */
+ int vics_len;
+
+ /**
+ * @quirks: EDID based quirks. Internal to EDID parsing.
+ */
+ u32 quirks;
+
+ /**
+ * @source_physical_address: Source Physical Address from HDMI
+ * Vendor-Specific Data Block, for CEC usage.
+ *
+ * Defaults to CEC_PHYS_ADDR_INVALID (0xffff).
+ */
+ u16 source_physical_address;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -577,8 +861,10 @@ struct drm_connector_tv_margins {
/**
* struct drm_tv_connector_state - TV connector related states
- * @subconnector: selected subconnector
+ * @select_subconnector: selected subconnector
+ * @subconnector: detected subconnector
* @margins: TV margins
+ * @legacy_mode: Legacy TV mode, driver specific value
* @mode: TV mode
* @brightness: brightness in percent
* @contrast: contrast in percent
@@ -588,8 +874,10 @@ struct drm_connector_tv_margins {
* @hue: hue in percent
*/
struct drm_tv_connector_state {
+ enum drm_mode_subconnector select_subconnector;
enum drm_mode_subconnector subconnector;
struct drm_connector_tv_margins margins;
+ unsigned int legacy_mode;
unsigned int mode;
unsigned int brightness;
unsigned int contrast;
@@ -705,7 +993,7 @@ struct drm_connector_state {
* colorspace change on Sink. This is most commonly used to switch
* to wider color gamuts like BT2020.
*/
- u32 colorspace;
+ enum drm_colorspace colorspace;
/**
* @writeback_job: Writeback job for writeback connectors
@@ -733,6 +1021,12 @@ struct drm_connector_state {
u8 max_bpc;
/**
+ * @privacy_screen_sw_state: See :ref:`Standard Connector
+ * Properties<standard_connector_properties>`
+ */
+ enum drm_privacy_screen_status privacy_screen_sw_state;
+
+ /**
* @hdr_output_metadata:
* DRM blob property for HDR output metadata
*/
@@ -799,6 +1093,11 @@ struct drm_connector_funcs {
* locks to avoid races with concurrent modeset changes need to use
* &drm_connector_helper_funcs.detect_ctx instead.
*
+ * Also note that this callback can be called no matter the
+ * state the connector is in. Drivers that need the underlying
+ * device to be powered to perform the detection will first need
+ * to make sure it's been properly enabled.
+ *
* RETURNS:
*
* drm_connector_status indicating the connector's status.
@@ -1030,6 +1329,22 @@ struct drm_connector_funcs {
*/
void (*atomic_print_state)(struct drm_printer *p,
const struct drm_connector_state *state);
+
+ /**
+ * @oob_hotplug_event:
+ *
+ * This will get called when a hotplug-event for a drm-connector
+ * has been received from a source outside the display driver / device.
+ */
+ void (*oob_hotplug_event)(struct drm_connector *connector,
+ enum drm_connector_status status);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows connectors to create connector-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_connector *connector, struct dentry *root);
};
/**
@@ -1070,6 +1385,13 @@ struct drm_cmdline_mode {
bool bpp_specified;
/**
+ * @pixel_clock:
+ *
+ * Pixel Clock in kHz. Optional.
+ */
+ unsigned int pixel_clock;
+
+ /**
* @xres:
*
* Active resolution on the X axis, in pixels.
@@ -1157,6 +1479,18 @@ struct drm_cmdline_mode {
* @tv_margins: TV margins to apply to the mode.
*/
struct drm_connector_tv_margins tv_margins;
+
+ /**
+ * @tv_mode: TV mode standard. See DRM_MODE_TV_MODE_*.
+ */
+ enum drm_connector_tv_mode tv_mode;
+
+ /**
+ * @tv_mode_specified:
+ *
+ * Did the mode have a preferred TV mode?
+ */
+ bool tv_mode_specified;
};
/**
@@ -1174,6 +1508,14 @@ struct drm_connector {
struct device *kdev;
/** @attr: sysfs attributes */
struct device_attribute *attr;
+ /**
+ * @fwnode: associated fwnode supplied by platform firmware
+ *
+ * Drivers can set this to associate a fwnode with a connector, drivers
+ * are expected to get a reference on the fwnode when setting this.
+ * drm_connector_cleanup() will call fwnode_handle_put() on this.
+ */
+ struct fwnode_handle *fwnode;
/**
* @head:
@@ -1185,6 +1527,14 @@ struct drm_connector {
*/
struct list_head head;
+ /**
+ * @global_connector_list_entry:
+ *
+ * Connector entry in the global connector-list, used by
+ * drm_connector_find_by_fwnode().
+ */
+ struct list_head global_connector_list_entry;
+
/** @base: base KMS object */
struct drm_mode_object base;
@@ -1331,6 +1681,24 @@ struct drm_connector {
*/
struct drm_property *max_bpc_property;
+ /** @privacy_screen: drm_privacy_screen for this connector, or NULL. */
+ struct drm_privacy_screen *privacy_screen;
+
+ /** @privacy_screen_notifier: privacy-screen notifier_block */
+ struct notifier_block privacy_screen_notifier;
+
+ /**
+ * @privacy_screen_sw_state_property: Optional atomic property for the
+ * connector to control the integrated privacy screen.
+ */
+ struct drm_property *privacy_screen_sw_state_property;
+
+ /**
+ * @privacy_screen_hw_state_property: Optional atomic property for the
+ * connector to report the actual integrated privacy screen state.
+ */
+ struct drm_property *privacy_screen_hw_state_property;
+
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1373,8 +1741,20 @@ struct drm_connector {
struct drm_cmdline_mode cmdline_mode;
/** @force: a DRM_FORCE_<foo> state for forced mode sets */
enum drm_connector_force force;
- /** @override_edid: has the EDID been overwritten through debugfs for testing? */
- bool override_edid;
+
+ /**
+ * @edid_override: Override EDID set via debugfs.
+ *
+ * Do not modify or access outside of the drm_edid_override_* family of
+ * functions.
+ */
+ const struct drm_edid *edid_override;
+
+ /**
+ * @edid_override_mutex: Protect access to edid_override.
+ */
+ struct mutex edid_override_mutex;
+
/** @epoch_counter: used to detect any other changes in connector, besides status */
u64 epoch_counter;
@@ -1519,6 +1899,11 @@ int drm_connector_init_with_ddc(struct drm_device *dev,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc);
+int drmm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc);
void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
@@ -1596,22 +1981,32 @@ drm_connector_is_unregistered(struct drm_connector *connector)
DRM_CONNECTOR_UNREGISTERED;
}
+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode,
+ enum drm_connector_status status);
const char *drm_get_connector_type_name(unsigned int connector_type);
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
const char *drm_get_dpms_name(int val);
const char *drm_get_dvi_i_subconnector_name(int val);
const char *drm_get_dvi_i_select_name(int val);
+const char *drm_get_tv_mode_name(int val);
const char *drm_get_tv_subconnector_name(int val);
const char *drm_get_tv_select_name(int val);
+const char *drm_get_dp_subconnector_name(int val);
const char *drm_get_content_protection_name(int val);
const char *drm_get_hdcp_content_type_name(int val);
+int drm_get_tv_mode_from_name(const char *name, size_t len);
+
int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector);
+
int drm_mode_create_tv_margin_properties(struct drm_device *dev);
+int drm_mode_create_tv_properties_legacy(struct drm_device *dev,
+ unsigned int num_modes,
+ const char * const modes[]);
int drm_mode_create_tv_properties(struct drm_device *dev,
- unsigned int num_modes,
- const char * const modes[]);
+ unsigned int supported_tv_modes);
void drm_connector_attach_tv_margin_properties(struct drm_connector *conn);
int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_content_type_property(struct drm_connector *dev);
@@ -1619,13 +2014,16 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
+int drm_connector_attach_colorspace_property(struct drm_connector *connector);
+int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector);
+bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
-int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector);
-int drm_mode_create_dp_colorspace_property(struct drm_connector *connector);
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector,
+ u32 supported_colorspaces);
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector,
+ u32 supported_colorspaces);
int drm_mode_create_content_type_property(struct drm_device *dev);
-void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
int drm_connector_set_path_property(struct drm_connector *connector,
@@ -1644,8 +2042,16 @@ int drm_connector_set_panel_orientation_with_quirk(
struct drm_connector *connector,
enum drm_panel_orientation panel_orientation,
int width, int height);
+int drm_connector_set_orientation_from_panel(
+ struct drm_connector *connector,
+ struct drm_panel *panel);
int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
int min, int max);
+void drm_connector_create_privacy_screen_properties(struct drm_connector *conn);
+void drm_connector_attach_privacy_screen_properties(struct drm_connector *conn);
+void drm_connector_attach_privacy_screen_provider(
+ struct drm_connector *connector, struct drm_privacy_screen *priv);
+void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state);
/**
* struct drm_tile_group - Tile group metadata
@@ -1679,6 +2085,11 @@ void drm_mode_put_tile_group(struct drm_device *dev,
* drm_connector_list_iter_begin(), drm_connector_list_iter_end() and
* drm_connector_list_iter_next() respectively the convenience macro
* drm_for_each_connector_iter().
+ *
+ * Note that the return value of drm_connector_list_iter_next() is only valid
+ * up to the next drm_connector_list_iter_next() or
+ * drm_connector_list_iter_end() call. If you want to use the connector later,
+ * then you need to grab your own reference first using drm_connector_get().
*/
struct drm_connector_list_iter {
/* private: */
@@ -1694,6 +2105,7 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
bool drm_connector_has_possible_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
+const char *drm_get_colorspace_name(enum drm_colorspace colorspace);
/**
* drm_for_each_connector_iter - connector_list iterator macro
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 59b51a09cae6..8b48a1974da3 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -25,37 +25,24 @@
#ifndef __DRM_CRTC_H__
#define __DRM_CRTC_H__
-#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/fb.h>
-#include <linux/hdmi.h>
-#include <linux/media-bus-format.h>
-#include <uapi/drm/drm_mode.h>
-#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
-#include <drm/drm_rect.h>
#include <drm/drm_mode_object.h>
-#include <drm/drm_framebuffer.h>
#include <drm/drm_modes.h>
-#include <drm/drm_connector.h>
#include <drm/drm_device.h>
-#include <drm/drm_property.h>
-#include <drm/drm_edid.h>
#include <drm/drm_plane.h>
-#include <drm/drm_blend.h>
-#include <drm/drm_color_mgmt.h>
#include <drm/drm_debugfs_crc.h>
#include <drm/drm_mode_config.h>
+struct drm_connector;
struct drm_device;
+struct drm_framebuffer;
struct drm_mode_set;
struct drm_file;
-struct drm_clip_rect;
struct drm_printer;
struct drm_self_refresh_data;
struct device_node;
-struct dma_fence;
struct edid;
static inline int64_t U642I64(uint64_t val)
@@ -90,11 +77,6 @@ struct drm_plane_helper_funcs;
* intended to indicate whether a full modeset is needed, rather than strictly
* describing what has changed in a commit. See also:
* drm_atomic_crtc_needs_modeset()
- *
- * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
- * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
- * state like @plane_mask so drivers not converted over to atomic helpers should
- * not rely on these being accurate!
*/
struct drm_crtc_state {
/** @crtc: backpointer to the CRTC */
@@ -285,6 +267,10 @@ struct drm_crtc_state {
* Lookup table for converting pixel data after the color conversion
* matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not
* NULL) is an array of &struct drm_color_lut.
+ *
+ * Note that for mostly historical reasons stemming from Xorg heritage,
+ * this is also used to store the color map (also sometimes color lut,
+ * CLUT or color palette) for indexed formats like DRM_FORMAT_C8.
*/
struct drm_property_blob *gamma_lut;
@@ -325,6 +311,13 @@ struct drm_crtc_state {
bool self_refresh_active;
/**
+ * @scaling_filter:
+ *
+ * Scaling filter to be applied
+ */
+ enum drm_scaling_filter scaling_filter;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -1068,12 +1061,18 @@ struct drm_crtc {
/**
* @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
* by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint32_t gamma_size;
/**
* @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
* GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint16_t *gamma_store;
@@ -1084,6 +1083,12 @@ struct drm_crtc {
struct drm_object_properties properties;
/**
+ * @scaling_filter_property: property to apply a particular filter while
+ * scaling.
+ */
+ struct drm_property *scaling_filter_property;
+
+ /**
* @state:
*
* Current atomic state for this CRTC.
@@ -1122,14 +1127,12 @@ struct drm_crtc {
*/
spinlock_t commit_lock;
-#ifdef CONFIG_DEBUG_FS
/**
* @debugfs_entry:
*
* Debugfs directory for this CRTC.
*/
struct dentry *debugfs_entry;
-#endif
/**
* @crc:
@@ -1208,8 +1211,50 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
+
+__printf(6, 7)
+int drmm_crtc_init_with_planes(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...);
+
void drm_crtc_cleanup(struct drm_crtc *crtc);
+__printf(7, 8)
+void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
+ size_t size, size_t offset,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...);
+
+/**
+ * drmm_crtc_alloc_with_planes - Allocate and initialize a new CRTC object with
+ * specified primary and cursor planes.
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_crtc
+ * @member: the name of the &drm_crtc within @type.
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Allocates and initializes a new crtc object. Cleanup is automatically
+ * handled through registering drmm_crtc_cleanup() with drmm_add_action().
+ *
+ * The @drm_crtc_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new crtc, or ERR_PTR on failure.
+ */
+#define drmm_crtc_alloc_with_planes(dev, type, member, primary, cursor, funcs, name, ...) \
+ ((type *)__drmm_crtc_alloc_with_planes(dev, sizeof(type), \
+ offsetof(type, member), \
+ primary, cursor, funcs, \
+ name, ##__VA_ARGS__))
+
/**
* drm_crtc_index - find the index of a registered CRTC
* @crtc: CRTC to find index for
@@ -1266,4 +1311,17 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
#define drm_for_each_crtc(crtc, dev) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+/**
+ * drm_for_each_crtc_reverse - iterate over all CRTCs in reverse order
+ * @crtc: a &struct drm_crtc as the loop cursor
+ * @dev: the &struct drm_device
+ *
+ * Iterate over all CRTCs of @dev.
+ */
+#define drm_for_each_crtc_reverse(crtc, dev) \
+ list_for_each_entry_reverse(crtc, &(dev)->mode_config.crtc_list, head)
+
+int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
+ unsigned int supported_filters);
+
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index a6d520d5b6ca..8c886fc46ef2 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -33,15 +33,17 @@
#ifndef __DRM_CRTC_HELPER_H__
#define __DRM_CRTC_HELPER_H__
-#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/idr.h>
-#include <linux/fb.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_modeset_helper.h>
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_crtc;
+struct drm_device;
+struct drm_display_mode;
+struct drm_encoder;
+struct drm_framebuffer;
+struct drm_mode_set;
+struct drm_modeset_acquire_ctx;
void drm_helper_disable_unused_functions(struct drm_device *dev);
int drm_crtc_helper_set_config(struct drm_mode_set *set,
@@ -50,6 +52,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
+int drm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
index 40c34a5bf149..effda42cce31 100644
--- a/include/drm/drm_damage_helper.h
+++ b/include/drm/drm_damage_helper.h
@@ -64,7 +64,6 @@ struct drm_atomic_helper_damage_iter {
bool full_update;
};
-void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
struct drm_plane_state *plane_state);
int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
@@ -82,21 +81,4 @@ bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
struct drm_plane_state *state,
struct drm_rect *rect);
-/**
- * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect.
- * @state: Plane state.
- *
- * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect
- * can be obtained by simply typecasting &drm_mode_rect. This is because both
- * are signed 32 and during drm_atomic_check_only() it is verified that damage
- * clips are inside fb.
- *
- * Return: Clips in plane fb_damage_clips blob property.
- */
-static inline struct drm_rect *
-drm_helper_get_plane_damage_clips(const struct drm_plane_state *state)
-{
- return (struct drm_rect *)drm_plane_get_damage_clips(state);
-}
-
#endif
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index 2188dc83957f..cf06cee4343f 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -34,6 +34,22 @@
#include <linux/types.h>
#include <linux/seq_file.h>
+
+#include <drm/drm_gpuvm.h>
+
+/**
+ * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
+ * @show: the &drm_info_list's show callback
+ * @data: driver private data
+ *
+ * Drivers should use this macro to define a &drm_info_list entry to provide a
+ * debugfs file for dumping the GPU VA space regions and mappings.
+ *
+ * For each DRM GPU VA space drivers should call drm_debugfs_gpuva_info() from
+ * their @show callback.
+ */
+#define DRM_DEBUGFS_GPUVA_INFO(show, data) {"gpuvas", show, DRIVER_GEM_GPUVA, data}
+
/**
* struct drm_info_list - debugfs info list entry
*
@@ -79,12 +95,64 @@ struct drm_info_node {
struct dentry *dent;
};
+/**
+ * struct drm_debugfs_info - debugfs info list entry
+ *
+ * This structure represents a debugfs file to be created by the drm
+ * core.
+ */
+struct drm_debugfs_info {
+ /** @name: File name */
+ const char *name;
+
+ /**
+ * @show:
+ *
+ * Show callback. &seq_file->private will be set to the &struct
+ * drm_debugfs_entry corresponding to the instance of this info
+ * on a given &struct drm_device.
+ */
+ int (*show)(struct seq_file*, void*);
+
+ /** @driver_features: Required driver features for this entry. */
+ u32 driver_features;
+
+ /** @data: Driver-private data, should not be device-specific. */
+ void *data;
+};
+
+/**
+ * struct drm_debugfs_entry - Per-device debugfs node structure
+ *
+ * This structure represents a debugfs file, as an instantiation of a &struct
+ * drm_debugfs_info on a &struct drm_device.
+ */
+struct drm_debugfs_entry {
+ /** @dev: &struct drm_device for this node. */
+ struct drm_device *dev;
+
+ /** @file: Template for this node. */
+ struct drm_debugfs_info file;
+
+ /** @list: Linked list of all device nodes. */
+ struct list_head list;
+};
+
#if defined(CONFIG_DEBUG_FS)
void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
struct drm_minor *minor);
-int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor);
+int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
+ struct dentry *root, struct drm_minor *minor);
+
+void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*), void *data);
+
+void drm_debugfs_add_files(struct drm_device *dev,
+ const struct drm_debugfs_info *files, int count);
+
+int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuvm *gpuvm);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -92,7 +160,24 @@ static inline void drm_debugfs_create_files(const struct drm_info_list *files,
{}
static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor)
+ int count, struct dentry *root,
+ struct drm_minor *minor)
+{
+ return 0;
+}
+
+static inline void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{}
+
+static inline void drm_debugfs_add_files(struct drm_device *dev,
+ const struct drm_debugfs_info *files,
+ int count)
+{}
+
+static inline int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuvm *gpuvm)
{
return 0;
}
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 0988351d743c..63767cf24371 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -6,16 +6,12 @@
#include <linux/mutex.h>
#include <linux/idr.h>
-#include <drm/drm_hashtab.h>
#include <drm/drm_mode_config.h>
struct drm_driver;
struct drm_minor;
struct drm_master;
-struct drm_device_dma;
struct drm_vblank_crtc;
-struct drm_sg_mem;
-struct drm_local_map;
struct drm_vma_offset_manager;
struct drm_vram_mm;
struct drm_fb_helper;
@@ -27,7 +23,7 @@ struct pci_controller;
/**
- * enum drm_switch_power - power state of drm device
+ * enum switch_power_state - power state of drm device
*/
enum switch_power_state {
@@ -51,13 +47,6 @@ enum switch_power_state {
* may contain multiple heads.
*/
struct drm_device {
- /**
- * @legacy_dev_list:
- *
- * List of devices per driver for stealth attach cleanup
- */
- struct list_head legacy_dev_list;
-
/** @if_version: Highest interface version set */
int if_version;
@@ -83,7 +72,7 @@ struct drm_device {
} managed;
/** @driver: DRM driver managing the device */
- struct drm_driver *driver;
+ const struct drm_driver *driver;
/**
* @dev_private:
@@ -92,17 +81,33 @@ struct drm_device {
* NULL.
*
* Instead of using this pointer it is recommended that drivers use
- * drm_dev_init() and embed struct &drm_device in their larger
+ * devm_drm_dev_alloc() and embed struct &drm_device in their larger
* per-device structure.
*/
void *dev_private;
- /** @primary: Primary node */
+ /**
+ * @primary:
+ *
+ * Primary node. Drivers should not interact with this
+ * directly. debugfs interfaces can be registered with
+ * drm_debugfs_add_file(), and sysfs should be directly added on the
+ * hardware (and not character device node) struct device @dev.
+ */
struct drm_minor *primary;
- /** @render: Render node */
+ /**
+ * @render:
+ *
+ * Render node. Drivers should not interact with this directly ever.
+ * Drivers should not expose any additional interfaces in debugfs or
+ * sysfs on this node.
+ */
struct drm_minor *render;
+ /** @accel: Compute Acceleration node */
+ struct drm_minor *accel;
+
/**
* @registered:
*
@@ -147,8 +152,8 @@ struct drm_device {
*
* Lock for others (not &drm_minor.master and &drm_file.is_master)
*
- * WARNING:
- * Only drivers annotated with DRIVER_LEGACY should be using this.
+ * TODO: This lock used to be the BKL of the DRM subsystem. Move the
+ * lock into i915, which is the only remaining user.
*/
struct mutex struct_mutex;
@@ -199,20 +204,6 @@ struct drm_device {
struct list_head clientlist;
/**
- * @irq_enabled:
- *
- * Indicates that interrupt handling is enabled, specifically vblank
- * handling. Drivers which don't use drm_irq_install() need to set this
- * to true manually.
- */
- bool irq_enabled;
-
- /**
- * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers.
- */
- int irq;
-
- /**
* @vblank_disable_immediate:
*
* If true, vblank interrupt will be disabled immediately when the
@@ -283,16 +274,6 @@ struct drm_device {
*/
spinlock_t event_lock;
- /** @agp: AGP data */
- struct drm_agp_head *agp;
-
- /** @pdev: PCI device structure */
- struct pci_dev *pdev;
-
-#ifdef __alpha__
- /** @hose: PCI hose, only used on ALPHA platforms. */
- struct pci_controller *hose;
-#endif
/** @num_crtcs: Number of CRTCs on this device */
unsigned int num_crtcs;
@@ -329,56 +310,12 @@ struct drm_device {
*/
struct drm_fb_helper *fb_helper;
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- /* Context handle management - linked list of context handles */
- struct list_head ctxlist;
-
- /* Context handle management - mutex for &ctxlist */
- struct mutex ctxlist_mutex;
-
- /* Context handle management */
- struct idr ctx_idr;
-
- /* Memory management - linked list of regions */
- struct list_head maplist;
-
- /* Memory management - user token hash table for maps */
- struct drm_open_hash map_hash;
-
- /* Context handle management - list of vmas (for debugging) */
- struct list_head vmalist;
-
- /* Optional pointer for DMA support */
- struct drm_device_dma *dma;
-
- /* Context swapping flag */
- __volatile__ long context_flag;
-
- /* Last current context */
- int last_context;
-
- /* Lock for &buf_use and a few other things. */
- spinlock_t buf_lock;
-
- /* Usage counter for buffers in use -- cannot alloc */
- int buf_use;
-
- /* Buffer allocation in progress */
- atomic_t buf_alloc;
-
- struct {
- int context;
- struct drm_hw_lock *lock;
- } sigdata;
-
- struct drm_local_map *agp_buffer_map;
- unsigned int agp_buffer_token;
-
- /* Scatter gather memory */
- struct drm_sg_mem *sg;
-#endif
+ /**
+ * @debugfs_root:
+ *
+ * Root directory for debugfs files.
+ */
+ struct dentry *debugfs_root;
};
#endif
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index 77941efb5426..566497eeb3b8 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -22,37 +22,74 @@
#ifndef DRM_DISPLAYID_H
#define DRM_DISPLAYID_H
-#define DATA_BLOCK_PRODUCT_ID 0x00
-#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
-#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
-#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
-#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
-#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
-#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
-#define DATA_BLOCK_VESA_TIMING 0x07
-#define DATA_BLOCK_CEA_TIMING 0x08
-#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
-#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
-#define DATA_BLOCK_GP_ASCII_STRING 0x0b
-#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
-#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
-#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
-#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
-#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
-#define DATA_BLOCK_TILED_DISPLAY 0x12
-#define DATA_BLOCK_CTA 0x81
-
-#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
-
-#define PRODUCT_TYPE_EXTENSION 0
-#define PRODUCT_TYPE_TEST 1
-#define PRODUCT_TYPE_PANEL 2
-#define PRODUCT_TYPE_MONITOR 3
-#define PRODUCT_TYPE_TV 4
-#define PRODUCT_TYPE_REPEATER 5
-#define PRODUCT_TYPE_DIRECT_DRIVE 6
-
-struct displayid_hdr {
+#include <linux/types.h>
+#include <linux/bits.h>
+
+struct drm_edid;
+
+#define VESA_IEEE_OUI 0x3a0292
+
+/* DisplayID Structure versions */
+#define DISPLAY_ID_STRUCTURE_VER_12 0x12
+#define DISPLAY_ID_STRUCTURE_VER_20 0x20
+
+/* DisplayID Structure v1r2 Data Blocks */
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+#define DATA_BLOCK_CTA 0x81
+
+/* DisplayID Structure v2r0 Data Blocks */
+#define DATA_BLOCK_2_PRODUCT_ID 0x20
+#define DATA_BLOCK_2_DISPLAY_PARAMETERS 0x21
+#define DATA_BLOCK_2_TYPE_7_DETAILED_TIMING 0x22
+#define DATA_BLOCK_2_TYPE_8_ENUMERATED_TIMING 0x23
+#define DATA_BLOCK_2_TYPE_9_FORMULA_TIMING 0x24
+#define DATA_BLOCK_2_DYNAMIC_VIDEO_TIMING 0x25
+#define DATA_BLOCK_2_DISPLAY_INTERFACE_FEATURES 0x26
+#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
+#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
+#define DATA_BLOCK_2_CONTAINER_ID 0x29
+#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
+#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
+
+/* DisplayID Structure v1r2 Product Type */
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+/* DisplayID Structure v2r0 Display Product Primary Use Case (~Product Type) */
+#define PRIMARY_USE_EXTENSION 0
+#define PRIMARY_USE_TEST 1
+#define PRIMARY_USE_GENERIC 2
+#define PRIMARY_USE_TV 3
+#define PRIMARY_USE_DESKTOP_PRODUCTIVITY 4
+#define PRIMARY_USE_DESKTOP_GAMING 5
+#define PRIMARY_USE_PRESENTATION 6
+#define PRIMARY_USE_HEAD_MOUNTED_VR 7
+#define PRIMARY_USE_HEAD_MOUNTED_AR 8
+
+struct displayid_header {
u8 rev;
u8 bytes;
u8 prod_id;
@@ -92,12 +129,42 @@ struct displayid_detailed_timing_block {
struct displayid_detailed_timings_1 timings[];
};
-#define for_each_displayid_db(displayid, block, idx, length) \
- for ((block) = (struct displayid_block *)&(displayid)[idx]; \
- (idx) + sizeof(struct displayid_block) <= (length) && \
- (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
- (block)->num_bytes > 0; \
- (idx) += sizeof(struct displayid_block) + (block)->num_bytes, \
- (block) = (struct displayid_block *)&(displayid)[idx])
+#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
+#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
+
+struct displayid_vesa_vendor_specific_block {
+ struct displayid_block base;
+ u8 oui[3];
+ u8 data_structure_type;
+ u8 mso;
+} __packed;
+
+/*
+ * DisplayID iteration.
+ *
+ * Do not access directly, this is private.
+ */
+struct displayid_iter {
+ const struct drm_edid *drm_edid;
+
+ const u8 *section;
+ int length;
+ int idx;
+ int ext_index;
+
+ u8 version;
+ u8 primary_use;
+};
+
+void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
+ struct displayid_iter *iter);
+const struct displayid_block *
+__displayid_iter_next(struct displayid_iter *iter);
+#define displayid_iter_for_each(__block, __iter) \
+ while (((__block) = __displayid_iter_next(__iter)))
+void displayid_iter_end(struct displayid_iter *iter);
+
+u8 displayid_version(const struct displayid_iter *iter);
+u8 displayid_primary_use(const struct displayid_iter *iter);
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 7116abc1a04e..8878260d7529 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -30,16 +30,20 @@
#include <linux/list.h>
#include <linux/irqreturn.h>
+#include <video/nomodeset.h>
+
#include <drm/drm_device.h>
struct drm_file;
struct drm_gem_object;
struct drm_master;
struct drm_minor;
+struct dma_buf;
struct dma_buf_attachment;
struct drm_display_mode;
struct drm_mode_create_dumb;
struct drm_printer;
+struct sg_table;
/**
* enum drm_driver_feature - feature flags
@@ -72,7 +76,7 @@ enum drm_driver_feature {
* @DRIVER_ATOMIC:
*
* Driver supports the full atomic modesetting userspace API. Drivers
- * which only use atomic internally, but do not the support the full
+ * which only use atomic internally, but do not support the full
* userspace API (e.g. not all properties converted to atomic, or
* multi-plane updates are not guaranteed to be tear-free) should not
* set this flag.
@@ -92,6 +96,29 @@ enum drm_driver_feature {
* synchronization of command submission.
*/
DRIVER_SYNCOBJ_TIMELINE = BIT(6),
+ /**
+ * @DRIVER_COMPUTE_ACCEL:
+ *
+ * Driver supports compute acceleration devices. This flag is mutually exclusive with
+ * @DRIVER_RENDER and @DRIVER_MODESET. Devices that support both graphics and compute
+ * acceleration should be handled by two drivers that are connected using auxiliary bus.
+ */
+ DRIVER_COMPUTE_ACCEL = BIT(7),
+ /**
+ * @DRIVER_GEM_GPUVA:
+ *
+ * Driver supports user defined GPU VA bindings for GEM objects.
+ */
+ DRIVER_GEM_GPUVA = BIT(8),
+ /**
+ * @DRIVER_CURSOR_HOTSPOT:
+ *
+ * Driver supports and requires cursor hotspot information in the
+ * cursor plane (e.g. cursor plane has to actually track the mouse
+ * cursor and the clients are required to set hotspot in order for
+ * the cursor planes to work correctly).
+ */
+ DRIVER_CURSOR_HOTSPOT = BIT(9),
/* IMPORTANT: Below are all the legacy flags, add new ones above. */
@@ -135,19 +162,8 @@ enum drm_driver_feature {
* @DRIVER_HAVE_IRQ:
*
* Legacy irq support. Only for legacy drivers. Do not use.
- *
- * New drivers can either use the drm_irq_install() and
- * drm_irq_uninstall() helper functions, or roll their own irq support
- * code by calling request_irq() directly.
*/
DRIVER_HAVE_IRQ = BIT(30),
- /**
- * @DRIVER_KMS_LEGACY_CONTEXT:
- *
- * Used only by nouveau for backwards compatibility with existing
- * userspace. Do not use.
- */
- DRIVER_KMS_LEGACY_CONTEXT = BIT(31),
};
/**
@@ -163,13 +179,12 @@ struct drm_driver {
/**
* @load:
*
- * Backward-compatible driver callback to complete
- * initialization steps after the driver is registered. For
- * this reason, may suffer from race conditions and its use is
- * deprecated for new drivers. It is therefore only supported
- * for existing drivers not yet converted to the new scheme.
- * See drm_dev_init() and drm_dev_register() for proper and
- * race-free way to set up a &struct drm_device.
+ * Backward-compatible driver callback to complete initialization steps
+ * after the driver is registered. For this reason, may suffer from
+ * race conditions and its use is deprecated for new drivers. It is
+ * therefore only supported for existing drivers not yet converted to
+ * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for
+ * proper and race-free way to set up a &struct drm_device.
*
* This is deprecated, do not use!
*
@@ -271,42 +286,6 @@ struct drm_driver {
void (*release) (struct drm_device *);
/**
- * @irq_handler:
- *
- * Interrupt handler called when using drm_irq_install(). Not used by
- * drivers which implement their own interrupt handling.
- */
- irqreturn_t(*irq_handler) (int irq, void *arg);
-
- /**
- * @irq_preinstall:
- *
- * Optional callback used by drm_irq_install() which is called before
- * the interrupt handler is registered. This should be used to clear out
- * any pending interrupts (from e.g. firmware based drives) and reset
- * the interrupt handling registers.
- */
- void (*irq_preinstall) (struct drm_device *dev);
-
- /**
- * @irq_postinstall:
- *
- * Optional callback used by drm_irq_install() which is called after
- * the interrupt handler is registered. This should be used to enable
- * interrupt generation in the hardware.
- */
- int (*irq_postinstall) (struct drm_device *dev);
-
- /**
- * @irq_uninstall:
- *
- * Optional callback used by drm_irq_uninstall() which is called before
- * the interrupt handler is unregistered. This should be used to disable
- * interrupt generation in the hardware.
- */
- void (*irq_uninstall) (struct drm_device *dev);
-
- /**
* @master_set:
*
* Called whenever the minor master is set. Only used by vmwgfx.
@@ -328,69 +307,30 @@ struct drm_driver {
void (*debugfs_init)(struct drm_minor *minor);
/**
- * @gem_free_object_unlocked: deconstructor for drm_gem_objects
- *
- * This is deprecated and should not be used by new drivers. Use
- * &drm_gem_object_funcs.free instead.
- */
- void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
-
- /**
- * @gem_open_object:
- *
- * This callback is deprecated in favour of &drm_gem_object_funcs.open.
- *
- * Driver hook called upon gem handle creation
- */
- int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
- * @gem_close_object:
- *
- * This callback is deprecated in favour of &drm_gem_object_funcs.close.
- *
- * Driver hook called upon gem handle release
- */
- void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
* @gem_create_object: constructor for gem objects
*
- * Hook for allocating the GEM object struct, for use by the CMA and
- * SHMEM GEM helpers.
+ * Hook for allocating the GEM object struct, for use by the CMA
+ * and SHMEM GEM helpers. Returns a GEM object on success, or an
+ * ERR_PTR()-encoded error code otherwise.
*/
struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
size_t size);
+
/**
* @prime_handle_to_fd:
*
- * Main PRIME export function. Should be implemented with
- * drm_gem_prime_handle_to_fd() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME export function. Only used by vmwgfx.
*/
int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, int *prime_fd);
/**
* @prime_fd_to_handle:
*
- * Main PRIME import function. Should be implemented with
- * drm_gem_prime_fd_to_handle() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME import function. Only used by vmwgfx.
*/
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
- /**
- * @gem_prime_export:
- *
- * Export hook for GEM drivers. Deprecated in favour of
- * &drm_gem_object_funcs.export.
- */
- struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj,
- int flags);
+
/**
* @gem_prime_import:
*
@@ -400,29 +340,6 @@ struct drm_driver {
*/
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
-
- /**
- * @gem_prime_pin:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.pin.
- */
- int (*gem_prime_pin)(struct drm_gem_object *obj);
-
- /**
- * @gem_prime_unpin:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.unpin.
- */
- void (*gem_prime_unpin)(struct drm_gem_object *obj);
-
-
- /**
- * @gem_prime_get_sg_table:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table.
- */
- struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
-
/**
* @gem_prime_import_sg_table:
*
@@ -433,33 +350,6 @@ struct drm_driver {
struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
- /**
- * @gem_prime_vmap:
- *
- * Deprecated vmap hook for GEM drivers. Please use
- * &drm_gem_object_funcs.vmap instead.
- */
- void *(*gem_prime_vmap)(struct drm_gem_object *obj);
-
- /**
- * @gem_prime_vunmap:
- *
- * Deprecated vunmap hook for GEM drivers. Please use
- * &drm_gem_object_funcs.vunmap instead.
- */
- void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
-
- /**
- * @gem_prime_mmap:
- *
- * mmap hook for GEM drivers, used to implement dma-buf mmap in the
- * PRIME helpers.
- *
- * FIXME: There's way too much duplication going on here, and also moved
- * to &drm_gem_object_funcs.
- */
- int (*gem_prime_mmap)(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
/**
* @dumb_create:
@@ -503,33 +393,13 @@ struct drm_driver {
int (*dumb_map_offset)(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
- /**
- * @dumb_destroy:
- *
- * This destroys the userspace handle for the given dumb backing storage buffer.
- * Since buffer objects must be reference counted in the kernel a buffer object
- * won't be immediately freed if a framebuffer modeset object still uses it.
- *
- * Called by the user via ioctl.
- *
- * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
- * must not overwrite this.
- *
- * Returns:
- *
- * Zero on success, negative errno on failure.
- */
- int (*dumb_destroy)(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
/**
- * @gem_vm_ops: Driver private ops for this object
+ * @show_fdinfo:
*
- * For GEM drivers this is deprecated in favour of
- * &drm_gem_object_funcs.vm_ops.
+ * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst.
*/
- const struct vm_operations_struct *gem_vm_ops;
+ void (*show_fdinfo)(struct drm_printer *p, struct drm_file *f);
/** @major: driver major number */
int major;
@@ -572,31 +442,10 @@ struct drm_driver {
* some examples.
*/
const struct file_operations *fops;
-
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-
- /* List of devices hanging off this driver with stealth attach. */
- struct list_head legacy_dev_list;
- int (*firstopen) (struct drm_device *);
- void (*preclose) (struct drm_device *, struct drm_file *file_priv);
- int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- int (*dma_quiescent) (struct drm_device *);
- int (*context_dtor) (struct drm_device *dev, int context);
- u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
- int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
- void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
- int dev_priv_size;
};
-int drm_dev_init(struct drm_device *dev,
- struct drm_driver *driver,
- struct device *parent);
-int devm_drm_dev_init(struct device *parent,
- struct drm_device *dev,
- struct drm_driver *driver);
-
-void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
+void *__devm_drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
size_t size, size_t offset);
/**
@@ -629,7 +478,7 @@ void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \
offsetof(type, member)))
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
struct device *parent);
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
@@ -716,7 +565,18 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
}
-int drm_dev_set_unique(struct drm_device *dev, const char *name);
+/* TODO: Inline drm_firmware_drivers_only() in all its callers. */
+static inline bool drm_firmware_drivers_only(void)
+{
+ return video_firmware_drivers_only();
+}
+#if defined(CONFIG_DEBUG_FS)
+void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root);
+#else
+static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+{
+}
+#endif
#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index cfa4f5af49af..7923bc00dc7a 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -24,10 +24,14 @@
#define __DRM_EDID_H__
#include <linux/types.h>
-#include <linux/hdmi.h>
-#include <drm/drm_mode.h>
+enum hdmi_quantization_range;
+struct drm_connector;
struct drm_device;
+struct drm_display_mode;
+struct drm_edid;
+struct hdmi_avi_infoframe;
+struct hdmi_vendor_infoframe;
struct i2c_adapter;
#define EDID_LENGTH 128
@@ -45,7 +49,7 @@ struct est_timings {
u8 t1;
u8 t2;
u8 mfg_rsvd;
-} __attribute__((packed));
+} __packed;
/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
#define EDID_TIMING_ASPECT_SHIFT 6
@@ -58,7 +62,7 @@ struct est_timings {
struct std_timing {
u8 hsize; /* need to multiply by 8 then add 248 */
u8 vfreq_aspect;
-} __attribute__((packed));
+} __packed;
#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
@@ -84,17 +88,25 @@ struct detailed_pixel_timing {
u8 hborder;
u8 vborder;
u8 misc;
-} __attribute__((packed));
+} __packed;
/* If it's not pixel timing, it'll be one of the below */
struct detailed_data_string {
u8 str[13];
-} __attribute__((packed));
+} __packed;
+
+#define DRM_EDID_RANGE_OFFSET_MIN_VFREQ (1 << 0) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_VFREQ (1 << 1) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MIN_HFREQ (1 << 2) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_HFREQ (1 << 3) /* 1.4 */
-#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00
-#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01
-#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02
-#define DRM_EDID_CVT_SUPPORT_FLAG 0x04
+#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00 /* 1.3 */
+#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01 /* 1.4 */
+#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02 /* 1.3 */
+#define DRM_EDID_CVT_SUPPORT_FLAG 0x04 /* 1.4 */
+
+#define DRM_EDID_CVT_FLAGS_STANDARD_BLANKING (1 << 3)
+#define DRM_EDID_CVT_FLAGS_REDUCED_BLANKING (1 << 4)
struct detailed_data_monitor_range {
u8 min_vfreq;
@@ -111,7 +123,7 @@ struct detailed_data_monitor_range {
__le16 m;
u8 k;
u8 j; /* need to divide by 2 */
- } __attribute__((packed)) gtf2;
+ } __packed gtf2;
struct {
u8 version;
u8 data1; /* high 6 bits: extra clock resolution */
@@ -120,27 +132,27 @@ struct detailed_data_monitor_range {
u8 flags; /* preferred aspect and blanking support */
u8 supported_scalings;
u8 preferred_refresh;
- } __attribute__((packed)) cvt;
- } formula;
-} __attribute__((packed));
+ } __packed cvt;
+ } __packed formula;
+} __packed;
struct detailed_data_wpindex {
u8 white_yx_lo; /* Lower 2 bits each */
u8 white_x_hi;
u8 white_y_hi;
u8 gamma; /* need to divide by 100 then add 1 */
-} __attribute__((packed));
+} __packed;
struct detailed_data_color_point {
u8 windex1;
u8 wpindex1[3];
u8 windex2;
u8 wpindex2[3];
-} __attribute__((packed));
+} __packed;
struct cvt_timing {
u8 code[3];
-} __attribute__((packed));
+} __packed;
struct detailed_non_pixel {
u8 pad1;
@@ -154,8 +166,8 @@ struct detailed_non_pixel {
struct detailed_data_wpindex color;
struct std_timing timings[6];
struct cvt_timing cvt[4];
- } data;
-} __attribute__((packed));
+ } __packed data;
+} __packed;
#define EDID_DETAIL_EST_TIMINGS 0xf7
#define EDID_DETAIL_CVT_3BYTE 0xf8
@@ -172,8 +184,8 @@ struct detailed_timing {
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
- } data;
-} __attribute__((packed));
+ } __packed data;
+} __packed;
#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
@@ -200,7 +212,8 @@ struct detailed_timing {
#define DRM_EDID_DIGITAL_TYPE_DP (5 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_DFP_1_X (1 << 0) /* 1.3 */
-#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
+#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) /* 1.2 */
+#define DRM_EDID_FEATURE_CONTINUOUS_FREQ (1 << 0) /* 1.4 */
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
/* If analog */
@@ -229,63 +242,35 @@ struct detailed_timing {
DRM_EDID_YCBCR420_DC_36 | \
DRM_EDID_YCBCR420_DC_30)
-/* ELD Header Block */
-#define DRM_ELD_HEADER_BLOCK_SIZE 4
-
-#define DRM_ELD_VER 0
-# define DRM_ELD_VER_SHIFT 3
-# define DRM_ELD_VER_MASK (0x1f << 3)
-# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
-# define DRM_ELD_VER_CANNED (0x1f << 3)
-
-#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
-
-/* ELD Baseline Block for ELD_Ver == 2 */
-#define DRM_ELD_CEA_EDID_VER_MNL 4
-# define DRM_ELD_CEA_EDID_VER_SHIFT 5
-# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
-# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
-# define DRM_ELD_MNL_SHIFT 0
-# define DRM_ELD_MNL_MASK (0x1f << 0)
-
-#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
-# define DRM_ELD_SAD_COUNT_SHIFT 4
-# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
-# define DRM_ELD_CONN_TYPE_SHIFT 2
-# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
-# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
-# define DRM_ELD_CONN_TYPE_DP (1 << 2)
-# define DRM_ELD_SUPPORTS_AI (1 << 1)
-# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
-
-#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
-# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
-
-#define DRM_ELD_SPEAKER 7
-# define DRM_ELD_SPEAKER_MASK 0x7f
-# define DRM_ELD_SPEAKER_RLRC (1 << 6)
-# define DRM_ELD_SPEAKER_FLRC (1 << 5)
-# define DRM_ELD_SPEAKER_RC (1 << 4)
-# define DRM_ELD_SPEAKER_RLR (1 << 3)
-# define DRM_ELD_SPEAKER_FC (1 << 2)
-# define DRM_ELD_SPEAKER_LFE (1 << 1)
-# define DRM_ELD_SPEAKER_FLR (1 << 0)
-
-#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
-# define DRM_ELD_PORT_ID_LEN 8
-
-#define DRM_ELD_MANUFACTURER_NAME0 16
-#define DRM_ELD_MANUFACTURER_NAME1 17
-
-#define DRM_ELD_PRODUCT_CODE0 18
-#define DRM_ELD_PRODUCT_CODE1 19
-
-#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
-
-#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+/* HDMI 2.1 additional fields */
+#define DRM_EDID_MAX_FRL_RATE_MASK 0xf0
+#define DRM_EDID_FAPA_START_LOCATION (1 << 0)
+#define DRM_EDID_ALLM (1 << 1)
+#define DRM_EDID_FVA (1 << 2)
+
+/* Deep Color specific */
+#define DRM_EDID_DC_30BIT_420 (1 << 0)
+#define DRM_EDID_DC_36BIT_420 (1 << 1)
+#define DRM_EDID_DC_48BIT_420 (1 << 2)
+
+/* VRR specific */
+#define DRM_EDID_CNMVRR (1 << 3)
+#define DRM_EDID_CINEMA_VRR (1 << 4)
+#define DRM_EDID_MDELTA (1 << 5)
+#define DRM_EDID_VRR_MAX_UPPER_MASK 0xc0
+#define DRM_EDID_VRR_MAX_LOWER_MASK 0xff
+#define DRM_EDID_VRR_MIN_MASK 0x3f
+
+/* DSC specific */
+#define DRM_EDID_DSC_10BPC (1 << 0)
+#define DRM_EDID_DSC_12BPC (1 << 1)
+#define DRM_EDID_DSC_16BPC (1 << 2)
+#define DRM_EDID_DSC_ALL_BPP (1 << 3)
+#define DRM_EDID_DSC_NATIVE_420 (1 << 6)
+#define DRM_EDID_DSC_1P2 (1 << 7)
+#define DRM_EDID_DSC_MAX_FRL_RATE_MASK 0xf0
+#define DRM_EDID_DSC_MAX_SLICES 0xf
+#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
struct edid {
u8 header[8];
@@ -306,7 +291,7 @@ struct edid {
u8 features;
/* Color characteristics */
u8 red_green_lo;
- u8 black_white_lo;
+ u8 blue_white_lo;
u8 red_x;
u8 red_y;
u8 green_x;
@@ -325,7 +310,7 @@ struct edid {
u8 extensions;
/* Checksum */
u8 checksum;
-} __attribute__((packed));
+} __packed;
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
@@ -337,35 +322,11 @@ struct cea_sad {
u8 byte2; /* meaning depends on format */
};
-struct drm_encoder;
-struct drm_connector;
-struct drm_connector_state;
-struct drm_display_mode;
-
-int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
-int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
+int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads);
+int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
-#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
-struct edid *drm_load_edid_firmware(struct drm_connector *connector);
-int __drm_set_edid_firmware_path(const char *path);
-int __drm_get_edid_firmware_path(char *buf, size_t bufsize);
-#else
-static inline struct edid *
-drm_load_edid_firmware(struct drm_connector *connector)
-{
- return ERR_PTR(-ENOENT);
-}
-#endif
-
-/**
- * drm_edid_are_equal - compare two edid blobs.
- * @edid1: pointer to first blob
- * @edid2: pointer to second blob
- * This helper can be used during probing to determine if
- * edid had changed.
- */
bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2);
int
@@ -378,111 +339,68 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode);
void
-drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
-void
-drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
-void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_connector *connector,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range);
-int
-drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
-/**
- * drm_eld_mnl - Get ELD monitor name length in bytes.
- * @eld: pointer to an eld memory structure with mnl set
- */
-static inline int drm_eld_mnl(const uint8_t *eld)
-{
- return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
-}
-
/**
- * drm_eld_sad - Get ELD SAD structures.
- * @eld: pointer to an eld memory structure with sad_count set
+ * drm_edid_decode_mfg_id - Decode the manufacturer ID
+ * @mfg_id: The manufacturer ID
+ * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
+ * termination
*/
-static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
+static inline const char *drm_edid_decode_mfg_id(u16 mfg_id, char vend[4])
{
- unsigned int ver, mnl;
-
- ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
- if (ver != 2 && ver != 31)
- return NULL;
-
- mnl = drm_eld_mnl(eld);
- if (mnl > 16)
- return NULL;
+ vend[0] = '@' + ((mfg_id >> 10) & 0x1f);
+ vend[1] = '@' + ((mfg_id >> 5) & 0x1f);
+ vend[2] = '@' + ((mfg_id >> 0) & 0x1f);
+ vend[3] = '\0';
- return eld + DRM_ELD_CEA_SAD(mnl, 0);
-}
-
-/**
- * drm_eld_sad_count - Get ELD SAD count.
- * @eld: pointer to an eld memory structure with sad_count set
- */
-static inline int drm_eld_sad_count(const uint8_t *eld)
-{
- return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
- DRM_ELD_SAD_COUNT_SHIFT;
-}
-
-/**
- * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
- * @eld: pointer to an eld memory structure with mnl and sad_count set
- *
- * This is a helper for determining the payload size of the baseline block, in
- * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
- */
-static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
-{
- return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
- drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+ return vend;
}
/**
- * drm_eld_size - Get ELD size in bytes
- * @eld: pointer to a complete eld memory structure
+ * drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id()
+ * @vend_chr_0: First character of the vendor string.
+ * @vend_chr_1: Second character of the vendor string.
+ * @vend_chr_2: Third character of the vendor string.
+ * @product_id: The 16-bit product ID.
*
- * The returned value does not include the vendor block. It's vendor specific,
- * and comprises of the remaining bytes in the ELD memory buffer after
- * drm_eld_size() bytes of header and baseline block.
+ * This is a macro so that it can be calculated at compile time and used
+ * as an initializer.
*
- * The returned value is guaranteed to be a multiple of 4.
- */
-static inline int drm_eld_size(const uint8_t *eld)
-{
- return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
-}
-
-/**
- * drm_eld_get_spk_alloc - Get speaker allocation
- * @eld: pointer to an ELD memory structure
+ * For instance:
+ * drm_edid_encode_panel_id('B', 'O', 'E', 0x2d08) => 0x09e52d08
*
- * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER
- * field definitions to identify speakers.
+ * Return: a 32-bit ID per panel.
*/
-static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld)
-{
- return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK;
-}
+#define drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, product_id) \
+ ((((u32)(vend_chr_0) - '@') & 0x1f) << 26 | \
+ (((u32)(vend_chr_1) - '@') & 0x1f) << 21 | \
+ (((u32)(vend_chr_2) - '@') & 0x1f) << 16 | \
+ ((product_id) & 0xffff))
/**
- * drm_eld_get_conn_type - Get device type hdmi/dp connected
- * @eld: pointer to an ELD memory structure
+ * drm_edid_decode_panel_id - Decode a panel ID from drm_edid_encode_panel_id()
+ * @panel_id: The panel ID to decode.
+ * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
+ * termination
+ * @product_id: The product ID will be returned here.
*
- * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
- * identify the display type connected.
+ * For instance, after:
+ * drm_edid_decode_panel_id(0x09e52d08, vend, &product_id)
+ * These will be true:
+ * vend[0] = 'B'
+ * vend[1] = 'O'
+ * vend[2] = 'E'
+ * vend[3] = '\0'
+ * product_id = 0x2d08
*/
-static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
+static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *product_id)
{
- return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
+ *product_id = (u16)(panel_id & 0xffff);
+ drm_edid_decode_mfg_id(panel_id >> 16, vend);
}
bool drm_probe_ddc(struct i2c_adapter *adapter);
@@ -492,29 +410,54 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
+u32 drm_edid_get_panel_id(struct i2c_adapter *adapter);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
-int drm_add_override_edid_modes(struct drm_connector *connector);
+int drm_edid_override_connector_update(struct drm_connector *connector);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-bool drm_detect_hdmi_monitor(struct edid *edid);
-bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_detect_hdmi_monitor(const struct edid *edid);
+bool drm_detect_monitor_audio(const struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
-void drm_set_preferred_mode(struct drm_connector *connector,
- int hpref, int vpref);
-int drm_edid_header_is_valid(const u8 *raw_edid);
+int drm_edid_header_is_valid(const void *edid);
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
bool *edid_corrupt);
bool drm_edid_is_valid(struct edid *edid);
-void drm_edid_get_monitor_name(struct edid *edid, char *name,
+void drm_edid_get_monitor_name(const struct edid *edid, char *name,
int buflen);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb);
+struct drm_display_mode *
+drm_display_mode_from_cea_vic(struct drm_device *dev,
+ u8 video_code);
+
+/* Interface based on struct drm_edid */
+const struct drm_edid *drm_edid_alloc(const void *edid, size_t size);
+const struct drm_edid *drm_edid_dup(const struct drm_edid *drm_edid);
+void drm_edid_free(const struct drm_edid *drm_edid);
+bool drm_edid_valid(const struct drm_edid *drm_edid);
+const struct edid *drm_edid_raw(const struct drm_edid *drm_edid);
+const struct drm_edid *drm_edid_read(struct drm_connector *connector);
+const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector,
+ int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len),
+ void *context);
+const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+int drm_edid_connector_update(struct drm_connector *connector,
+ const struct drm_edid *edid);
+int drm_edid_connector_add_modes(struct drm_connector *connector);
+bool drm_edid_is_digital(const struct drm_edid *drm_edid);
+
+const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid,
+ int ext_id, int *ext_index);
+
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h
new file mode 100644
index 000000000000..0a88d10b28b0
--- /dev/null
+++ b/include/drm/drm_eld.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __DRM_ELD_H__
+#define __DRM_ELD_H__
+
+#include <linux/types.h>
+
+struct cea_sad;
+
+/* ELD Header Block */
+#define DRM_ELD_HEADER_BLOCK_SIZE 4
+
+#define DRM_ELD_VER 0
+# define DRM_ELD_VER_SHIFT 3
+# define DRM_ELD_VER_MASK (0x1f << 3)
+# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
+# define DRM_ELD_VER_CANNED (0x1f << 3)
+
+#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
+
+/* ELD Baseline Block for ELD_Ver == 2 */
+#define DRM_ELD_CEA_EDID_VER_MNL 4
+# define DRM_ELD_CEA_EDID_VER_SHIFT 5
+# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
+# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
+# define DRM_ELD_MNL_SHIFT 0
+# define DRM_ELD_MNL_MASK (0x1f << 0)
+
+#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
+# define DRM_ELD_SAD_COUNT_SHIFT 4
+# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
+# define DRM_ELD_CONN_TYPE_SHIFT 2
+# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
+# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
+# define DRM_ELD_CONN_TYPE_DP (1 << 2)
+# define DRM_ELD_SUPPORTS_AI (1 << 1)
+# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
+
+#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
+# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
+
+#define DRM_ELD_SPEAKER 7
+# define DRM_ELD_SPEAKER_MASK 0x7f
+# define DRM_ELD_SPEAKER_RLRC (1 << 6)
+# define DRM_ELD_SPEAKER_FLRC (1 << 5)
+# define DRM_ELD_SPEAKER_RC (1 << 4)
+# define DRM_ELD_SPEAKER_RLR (1 << 3)
+# define DRM_ELD_SPEAKER_FC (1 << 2)
+# define DRM_ELD_SPEAKER_LFE (1 << 1)
+# define DRM_ELD_SPEAKER_FLR (1 << 0)
+
+#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
+# define DRM_ELD_PORT_ID_LEN 8
+
+#define DRM_ELD_MANUFACTURER_NAME0 16
+#define DRM_ELD_MANUFACTURER_NAME1 17
+
+#define DRM_ELD_PRODUCT_CODE0 18
+#define DRM_ELD_PRODUCT_CODE1 19
+
+#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
+
+#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+
+/**
+ * drm_eld_mnl - Get ELD monitor name length in bytes.
+ * @eld: pointer to an eld memory structure with mnl set
+ */
+static inline int drm_eld_mnl(const u8 *eld)
+{
+ return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+}
+
+int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad);
+int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad);
+
+/**
+ * drm_eld_sad - Get ELD SAD structures.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline const u8 *drm_eld_sad(const u8 *eld)
+{
+ unsigned int ver, mnl;
+
+ ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
+ if (ver != 2 && ver != 31)
+ return NULL;
+
+ mnl = drm_eld_mnl(eld);
+ if (mnl > 16)
+ return NULL;
+
+ return eld + DRM_ELD_CEA_SAD(mnl, 0);
+}
+
+/**
+ * drm_eld_sad_count - Get ELD SAD count.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline int drm_eld_sad_count(const u8 *eld)
+{
+ return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
+ DRM_ELD_SAD_COUNT_SHIFT;
+}
+
+/**
+ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
+ * @eld: pointer to an eld memory structure with mnl and sad_count set
+ *
+ * This is a helper for determining the payload size of the baseline block, in
+ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
+ */
+static inline int drm_eld_calc_baseline_block_size(const u8 *eld)
+{
+ return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
+ drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+}
+
+/**
+ * drm_eld_size - Get ELD size in bytes
+ * @eld: pointer to a complete eld memory structure
+ *
+ * The returned value does not include the vendor block. It's vendor specific,
+ * and comprises of the remaining bytes in the ELD memory buffer after
+ * drm_eld_size() bytes of header and baseline block.
+ *
+ * The returned value is guaranteed to be a multiple of 4.
+ */
+static inline int drm_eld_size(const u8 *eld)
+{
+ return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
+}
+
+/**
+ * drm_eld_get_spk_alloc - Get speaker allocation
+ * @eld: pointer to an ELD memory structure
+ *
+ * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER
+ * field definitions to identify speakers.
+ */
+static inline u8 drm_eld_get_spk_alloc(const u8 *eld)
+{
+ return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK;
+}
+
+/**
+ * drm_eld_get_conn_type - Get device type hdmi/dp connected
+ * @eld: pointer to an ELD memory structure
+ *
+ * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
+ * identify the display type connected.
+ */
+static inline u8 drm_eld_get_conn_type(const u8 *eld)
+{
+ return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
+}
+
+#endif /* __DRM_ELD_H__ */
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index a60f5f1555ac..977a9381c8ba 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -60,7 +60,7 @@ struct drm_encoder_funcs {
* @late_register:
*
* This optional hook can be used to register additional userspace
- * interfaces attached to the encoder like debugfs interfaces.
+ * interfaces attached to the encoder.
* It is called late in the driver load sequence from drm_dev_register().
* Everything added from this callback should be unregistered in
* the early_unregister callback.
@@ -81,6 +81,13 @@ struct drm_encoder_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_encoder *encoder);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows encoders to create encoder-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root);
};
/**
@@ -89,8 +96,7 @@ struct drm_encoder_funcs {
* @head: list management
* @base: base KMS object
* @name: human readable name, can be overwritten by the driver
- * @bridge: bridge associated to the encoder
- * @funcs: control functions
+ * @funcs: control functions, can be NULL for simple managed encoders
* @helper_private: mid-layer private data
*
* CRTCs drive pixels to encoders, which convert them into signals
@@ -185,6 +191,13 @@ struct drm_encoder {
const struct drm_encoder_funcs *funcs;
const struct drm_encoder_helper_funcs *helper_private;
+
+ /**
+ * @debugfs_entry:
+ *
+ * Debugfs directory for this CRTC.
+ */
+ struct dentry *debugfs_entry;
};
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
@@ -195,6 +208,60 @@ int drm_encoder_init(struct drm_device *dev,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
+__printf(5, 6)
+int drmm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...);
+
+__printf(6, 7)
+void *__drmm_encoder_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type,
+ const char *name, ...);
+
+/**
+ * drmm_encoder_alloc - Allocate and initialize an encoder
+ * @dev: drm device
+ * @type: the type of the struct which contains struct &drm_encoder
+ * @member: the name of the &drm_encoder within @type
+ * @funcs: callbacks for this encoder (optional)
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Allocates and initializes an encoder. Encoder should be subclassed as part of
+ * driver encoder objects. Cleanup is automatically handled through registering
+ * drm_encoder_cleanup() with drmm_add_action().
+ *
+ * The @drm_encoder_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new encoder, or ERR_PTR on failure.
+ */
+#define drmm_encoder_alloc(dev, type, member, funcs, encoder_type, name, ...) \
+ ((type *)__drmm_encoder_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs, \
+ encoder_type, name, ##__VA_ARGS__))
+
+/**
+ * drmm_plain_encoder_alloc - Allocate and initialize an encoder
+ * @dev: drm device
+ * @funcs: callbacks for this encoder (optional)
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * This is a simplified version of drmm_encoder_alloc(), which only allocates
+ * and returns a struct drm_encoder instance, with no subclassing.
+ *
+ * Returns:
+ * Pointer to the new drm_encoder struct, or ERR_PTR on failure.
+ */
+#define drmm_plain_encoder_alloc(dev, funcs, encoder_type, name, ...) \
+ ((struct drm_encoder *) \
+ __drmm_encoder_alloc(dev, sizeof(struct drm_encoder), \
+ 0, funcs, encoder_type, name, ##__VA_ARGS__))
+
/**
* drm_encoder_index - find the index of a registered encoder
* @encoder: encoder to find index for
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index a09864f6d684..7214101fd731 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -27,6 +27,8 @@
#ifndef __DRM_ENCODER_SLAVE_H__
#define __DRM_ENCODER_SLAVE_H__
+#include <linux/i2c.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
new file mode 100644
index 000000000000..aa786b828a0a
--- /dev/null
+++ b/include/drm/drm_exec.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef __DRM_EXEC_H__
+#define __DRM_EXEC_H__
+
+#include <linux/compiler.h>
+#include <linux/ww_mutex.h>
+
+#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0)
+#define DRM_EXEC_IGNORE_DUPLICATES BIT(1)
+
+struct drm_gem_object;
+
+/**
+ * struct drm_exec - Execution context
+ */
+struct drm_exec {
+ /**
+ * @flags: Flags to control locking behavior
+ */
+ u32 flags;
+
+ /**
+ * @ticket: WW ticket used for acquiring locks
+ */
+ struct ww_acquire_ctx ticket;
+
+ /**
+ * @num_objects: number of objects locked
+ */
+ unsigned int num_objects;
+
+ /**
+ * @max_objects: maximum objects in array
+ */
+ unsigned int max_objects;
+
+ /**
+ * @objects: array of the locked objects
+ */
+ struct drm_gem_object **objects;
+
+ /**
+ * @contended: contended GEM object we backed off for
+ */
+ struct drm_gem_object *contended;
+
+ /**
+ * @prelocked: already locked GEM object due to contention
+ */
+ struct drm_gem_object *prelocked;
+};
+
+/**
+ * drm_exec_obj() - Return the object for a give drm_exec index
+ * @exec: Pointer to the drm_exec context
+ * @index: The index.
+ *
+ * Return: Pointer to the locked object corresponding to @index if
+ * index is within the number of locked objects. NULL otherwise.
+ */
+static inline struct drm_gem_object *
+drm_exec_obj(struct drm_exec *exec, unsigned long index)
+{
+ return index < exec->num_objects ? exec->objects[index] : NULL;
+}
+
+/**
+ * drm_exec_for_each_locked_object - iterate over all the locked objects
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object.
+ */
+#define drm_exec_for_each_locked_object(exec, index, obj) \
+ for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index))
+
+/**
+ * drm_exec_for_each_locked_object_reverse - iterate over all the locked
+ * objects in reverse locking order
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object in
+ * reverse locking order. Note that @index may go below zero and wrap,
+ * but that will be caught by drm_exec_obj(), returning a NULL object.
+ */
+#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \
+ for ((index) = (exec)->num_objects - 1; \
+ ((obj) = drm_exec_obj(exec, index)); --(index))
+
+/**
+ * drm_exec_until_all_locked - loop until all GEM objects are locked
+ * @exec: drm_exec object
+ *
+ * Core functionality of the drm_exec object. Loops until all GEM objects are
+ * locked and no more contention exists. At the beginning of the loop it is
+ * guaranteed that no GEM object is locked.
+ *
+ * Since labels can't be defined local to the loops body we use a jump pointer
+ * to make sure that the retry is only used from within the loops body.
+ */
+#define drm_exec_until_all_locked(exec) \
+__PASTE(__drm_exec_, __LINE__): \
+ for (void *__drm_exec_retry_ptr; ({ \
+ __drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\
+ (void)__drm_exec_retry_ptr; \
+ drm_exec_cleanup(exec); \
+ });)
+
+/**
+ * drm_exec_retry_on_contention - restart the loop to grap all locks
+ * @exec: drm_exec object
+ *
+ * Control flow helper to continue when a contention was detected and we need to
+ * clean up and re-start the loop to prepare all GEM objects.
+ */
+#define drm_exec_retry_on_contention(exec) \
+ do { \
+ if (unlikely(drm_exec_is_contended(exec))) \
+ goto *__drm_exec_retry_ptr; \
+ } while (0)
+
+/**
+ * drm_exec_is_contended - check for contention
+ * @exec: drm_exec object
+ *
+ * Returns true if the drm_exec object has run into some contention while
+ * locking a GEM object and needs to clean up.
+ */
+static inline bool drm_exec_is_contended(struct drm_exec *exec)
+{
+ return !!exec->contended;
+}
+
+void drm_exec_init(struct drm_exec *exec, u32 flags, unsigned nr);
+void drm_exec_fini(struct drm_exec *exec);
+bool drm_exec_cleanup(struct drm_exec *exec);
+int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+ unsigned int num_fences);
+int drm_exec_prepare_array(struct drm_exec *exec,
+ struct drm_gem_object **objects,
+ unsigned int num_objects,
+ unsigned int num_fences);
+
+#endif
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
deleted file mode 100644
index 795aea1d0a25..000000000000
--- a/include/drm/drm_fb_cma_helper.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_FB_CMA_HELPER_H__
-#define __DRM_FB_CMA_HELPER_H__
-
-#include <linux/types.h>
-
-struct drm_framebuffer;
-struct drm_plane_state;
-
-struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane);
-
-dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
- struct drm_plane_state *state,
- unsigned int plane);
-
-#endif
-
diff --git a/include/drm/drm_fb_dma_helper.h b/include/drm/drm_fb_dma_helper.h
new file mode 100644
index 000000000000..d5e036c57801
--- /dev/null
+++ b/include/drm/drm_fb_dma_helper.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRM_FB_DMA_HELPER_H__
+#define __DRM_FB_DMA_HELPER_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_framebuffer;
+struct drm_plane_state;
+
+struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane);
+
+dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
+ struct drm_plane_state *state,
+ unsigned int plane);
+
+void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
+ struct drm_plane_state *old_state,
+ struct drm_plane_state *state);
+
+#endif
+
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 306aa3a60be9..375737fd6c36 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -30,18 +30,12 @@
#ifndef DRM_FB_HELPER_H
#define DRM_FB_HELPER_H
+struct drm_clip_rect;
struct drm_fb_helper;
+#include <linux/fb.h>
+
#include <drm/drm_client.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_device.h>
-#include <linux/kgdb.h>
-#include <linux/vgaarb.h>
-
-enum mode_set_atomic {
- LEAVE_ATOMIC_MODE_SET,
- ENTER_ATOMIC_MODE_SET,
-};
/**
* struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
@@ -91,6 +85,20 @@ struct drm_fb_helper_funcs {
*/
int (*fb_probe)(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
+
+ /**
+ * @fb_dirty:
+ *
+ * Driver callback to update the framebuffer memory. If set, fbdev
+ * emulation will invoke this callback in regular intervals after
+ * the framebuffer has been written.
+ *
+ * This callback is optional.
+ *
+ * Returns:
+ * 0 on success, or an error code otherwise.
+ */
+ int (*fb_dirty)(struct drm_fb_helper *helper, struct drm_clip_rect *clip);
};
/**
@@ -98,12 +106,12 @@ struct drm_fb_helper_funcs {
* @fb: Scanout framebuffer object
* @dev: DRM device
* @funcs: driver callbacks for fb helper
- * @fbdev: emulated fbdev device info struct
+ * @info: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
- * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to
- * the screen buffer
- * @dirty_lock: spinlock protecting @dirty_clip
- * @dirty_work: worker used to flush the framebuffer
+ * @damage_clip: clip rectangle used with deferred_io to accumulate damage to
+ * the screen buffer
+ * @damage_lock: spinlock protecting @damage_clip
+ * @damage_work: worker used to flush the framebuffer
* @resume_work: worker used during resume if the console lock is already taken
*
* This is the main structure used by the fbdev helpers. Drivers supporting
@@ -129,11 +137,11 @@ struct drm_fb_helper {
struct drm_framebuffer *fb;
struct drm_device *dev;
const struct drm_fb_helper_funcs *funcs;
- struct fb_info *fbdev;
+ struct fb_info *info;
u32 pseudo_palette[17];
- struct drm_clip_rect dirty_clip;
- spinlock_t dirty_lock;
- struct work_struct dirty_work;
+ struct drm_clip_rect damage_clip;
+ spinlock_t damage_lock;
+ struct work_struct damage_work;
struct work_struct resume_work;
/**
@@ -186,6 +194,18 @@ struct drm_fb_helper {
* See also: @deferred_setup
*/
int preferred_bpp;
+
+#ifdef CONFIG_FB_DEFERRED_IO
+ /**
+ * @fbdefio:
+ *
+ * Temporary storage for the driver's FB deferred I/O handler. If the
+ * driver uses the DRM fbdev emulation layer, this is set by the core
+ * to a generic deferred I/O handler if a driver is preferring to use
+ * a shadow buffer.
+ */
+ struct fb_deferred_io fbdefio;
+#endif
};
static inline struct drm_fb_helper *
@@ -212,7 +232,9 @@ drm_fb_helper_from_client(struct drm_client_dev *client)
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+ unsigned int preferred_bpp,
const struct drm_fb_helper_funcs *funcs);
+void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper);
int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper);
void drm_fb_helper_fini(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
@@ -224,33 +246,17 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
-struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
-void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
+struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_info(struct fb_info *info,
struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
-void drm_fb_helper_deferred_io(struct fb_info *info,
- struct list_head *pagelist);
-
-ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
- size_t count, loff_t *ppos);
-ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos);
-
-void drm_fb_helper_sys_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-void drm_fb_helper_sys_copyarea(struct fb_info *info,
- const struct fb_copyarea *area);
-void drm_fb_helper_sys_imageblit(struct fb_info *info,
- const struct fb_image *image);
+void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len);
+void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height);
-void drm_fb_helper_cfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-void drm_fb_helper_cfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area);
-void drm_fb_helper_cfb_imageblit(struct fb_info *info,
- const struct fb_image *image);
+void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist);
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend);
void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
@@ -262,19 +268,21 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
-int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
void drm_fb_helper_lastclose(struct drm_device *dev);
void drm_fb_helper_output_poll_changed(struct drm_device *dev);
-
-void drm_fbdev_generic_setup(struct drm_device *dev,
- unsigned int preferred_bpp);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
- struct drm_fb_helper *helper,
- const struct drm_fb_helper_funcs *funcs)
+ struct drm_fb_helper *helper,
+ unsigned int preferred_bpp,
+ const struct drm_fb_helper_funcs *funcs)
+{
+}
+
+static inline void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper)
{
}
@@ -323,12 +331,16 @@ drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
}
static inline struct fb_info *
-drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
{
return NULL;
}
-static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+static inline void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
{
}
@@ -356,55 +368,6 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info,
{
}
-static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
-{
- return -ENODEV;
-}
-
-static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
- char __user *buf, size_t count,
- loff_t *ppos)
-{
- return -ENODEV;
-}
-
-static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return -ENODEV;
-}
-
-static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
-}
-
-static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
-}
-
-static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
-}
-
-static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
-}
-
-static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
-}
-
-static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
-}
-
static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
bool suspend)
{
@@ -420,8 +383,7 @@ static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return 0;
}
-static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
- int bpp_sel)
+static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
{
return 0;
}
@@ -443,62 +405,6 @@ static inline void drm_fb_helper_lastclose(struct drm_device *dev)
static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
{
}
-
-static inline void
-drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
-{
-}
-
#endif
-/**
- * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
- * @a: memory range, users of which are to be removed
- * @name: requesting driver name
- * @primary: also kick vga16fb if present
- *
- * This function removes framebuffer devices (initialized by firmware/bootloader)
- * which use memory range described by @a. If @a is NULL all such devices are
- * removed.
- */
-static inline int
-drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
-{
-#if IS_REACHABLE(CONFIG_FB)
- return remove_conflicting_framebuffers(a, name, primary);
-#else
- return 0;
-#endif
-}
-
-/**
- * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
- * @pdev: PCI device
- * @name: requesting driver name
- *
- * This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for any of @pdev's memory bars.
- *
- * The function assumes that PCI device with shadowed ROM drives a primary
- * display and so kicks out vga16fb.
- */
-static inline int
-drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const char *name)
-{
- int ret = 0;
-
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
-#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, name);
-#endif
- if (ret == 0)
- ret = vga_remove_vgacon(pdev);
- return ret;
-}
-
#endif
diff --git a/include/drm/drm_fbdev_dma.h b/include/drm/drm_fbdev_dma.h
new file mode 100644
index 000000000000..2da7ee784133
--- /dev/null
+++ b/include/drm/drm_fbdev_dma.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_FBDEV_DMA_H
+#define DRM_FBDEV_DMA_H
+
+struct drm_device;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp);
+#else
+static inline void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{ }
+#endif
+
+#endif
diff --git a/include/drm/drm_fbdev_generic.h b/include/drm/drm_fbdev_generic.h
new file mode 100644
index 000000000000..75799342098d
--- /dev/null
+++ b/include/drm/drm_fbdev_generic.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_FBDEV_GENERIC_H
+#define DRM_FBDEV_GENERIC_H
+
+struct drm_device;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
+#else
+static inline void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{ }
+#endif
+
+#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 716990bace10..ab230d3af138 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -41,6 +41,7 @@
struct dma_fence;
struct drm_file;
struct drm_device;
+struct drm_printer;
struct device;
struct file;
@@ -49,13 +50,17 @@ struct file;
* header include loops we need it here for now.
*/
-/* Note that the order of this enum is ABI (it determines
+/* Note that the values of this enum are ABI (it determines
* /dev/dri/renderD* numbers).
+ *
+ * Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to
+ * be implemented before we hit any future
*/
enum drm_minor_type {
- DRM_MINOR_PRIMARY,
- DRM_MINOR_CONTROL,
- DRM_MINOR_RENDER,
+ DRM_MINOR_PRIMARY = 0,
+ DRM_MINOR_CONTROL = 1,
+ DRM_MINOR_RENDER = 2,
+ DRM_MINOR_ACCEL = 32,
};
/**
@@ -70,14 +75,12 @@ enum drm_minor_type {
struct drm_minor {
/* private: */
int index; /* Minor device number */
- int type; /* Control or render */
+ int type; /* Control or render or accel */
struct device *kdev; /* Linux device */
struct drm_device *dev;
+ struct dentry *debugfs_symlink;
struct dentry *debugfs_root;
-
- struct list_head debugfs_list;
- struct mutex debugfs_lock; /* Protects debugfs_list. */
};
/**
@@ -224,19 +227,57 @@ struct drm_file {
bool is_master;
/**
+ * @supports_virtualized_cursor_plane:
+ *
+ * This client is capable of handling the cursor plane with the
+ * restrictions imposed on it by the virtualized drivers.
+ *
+ * This implies that the cursor plane has to behave like a cursor
+ * i.e. track cursor movement. It also requires setting of the
+ * hotspot properties by the client on the cursor plane.
+ */
+ bool supports_virtualized_cursor_plane;
+
+ /**
* @master:
*
- * Master this node is currently associated with. Only relevant if
- * drm_is_primary_client() returns true. Note that this only
- * matches &drm_device.master if the master is the currently active one.
+ * Master this node is currently associated with. Protected by struct
+ * &drm_device.master_mutex, and serialized by @master_lookup_lock.
+ *
+ * Only relevant if drm_is_primary_client() returns true. Note that
+ * this only matches &drm_device.master if the master is the currently
+ * active one.
+ *
+ * To update @master, both &drm_device.master_mutex and
+ * @master_lookup_lock need to be held, therefore holding either of
+ * them is safe and enough for the read side.
+ *
+ * When dereferencing this pointer, either hold struct
+ * &drm_device.master_mutex for the duration of the pointer's use, or
+ * use drm_file_get_master() if struct &drm_device.master_mutex is not
+ * currently held and there is no other need to hold it. This prevents
+ * @master from being freed during use.
*
* See also @authentication and @is_master and the :ref:`section on
* primary nodes and authentication <drm_primary_node>`.
*/
struct drm_master *master;
- /** @pid: Process that opened this file. */
- struct pid *pid;
+ /** @master_lookup_lock: Serializes @master. */
+ spinlock_t master_lookup_lock;
+
+ /**
+ * @pid: Process that is using this file.
+ *
+ * Must only be dereferenced under a rcu_read_lock or equivalent.
+ *
+ * Updates are guarded with dev->filelist_mutex and reference must be
+ * dropped after a RCU grace period to accommodate lockless readers.
+ */
+ struct pid __rcu *pid;
+
+ /** @client_id: A unique id for fdinfo */
+ u64 client_id;
/** @magic: Authentication magic, see @authenticated. */
drm_magic_t magic;
@@ -345,11 +386,6 @@ struct drm_file {
* Per-file buffer caches used by the PRIME buffer sharing code.
*/
struct drm_prime_file_private prime;
-
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- unsigned long lock_count; /* DRI1 legacy lock count */
-#endif
};
/**
@@ -381,7 +417,25 @@ static inline bool drm_is_render_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_RENDER;
}
+/**
+ * drm_is_accel_client - is this an open file of the compute acceleration node
+ * @file_priv: DRM file
+ *
+ * Returns true if this is an open file of the compute acceleration node, i.e.
+ * &drm_file.minor of @file_priv is a accel minor.
+ *
+ * See also :doc:`Introduction to compute accelerators subsystem
+ * </accel/introduction>`.
+ */
+static inline bool drm_is_accel_client(const struct drm_file *file_priv)
+{
+ return file_priv->minor->type == DRM_MINOR_ACCEL;
+}
+
+void drm_file_update_pid(struct drm_file *);
+
int drm_open(struct inode *inode, struct file *filp);
+int drm_open_helper(struct file *filp, struct drm_minor *minor);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
@@ -399,16 +453,38 @@ void drm_event_cancel_free(struct drm_device *dev,
struct drm_pending_event *p);
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
+void drm_send_event_timestamp_locked(struct drm_device *dev,
+ struct drm_pending_event *e,
+ ktime_t timestamp);
-struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
+/**
+ * struct drm_memory_stats - GEM object stats associated
+ * @shared: Total size of GEM objects shared between processes
+ * @private: Total size of GEM objects
+ * @resident: Total size of GEM objects backing pages
+ * @purgeable: Total size of GEM objects that can be purged (resident and not active)
+ * @active: Total size of GEM objects active on one or more engines
+ *
+ * Used by drm_print_memory_stats()
+ */
+struct drm_memory_stats {
+ u64 shared;
+ u64 private;
+ u64 resident;
+ u64 purgeable;
+ u64 active;
+};
+
+enum drm_gem_object_status;
-#ifdef CONFIG_MMU
-struct drm_vma_offset_manager;
-unsigned long drm_get_unmapped_area(struct file *file,
- unsigned long uaddr, unsigned long len,
- unsigned long pgoff, unsigned long flags,
- struct drm_vma_offset_manager *mgr);
-#endif /* CONFIG_MMU */
+void drm_print_memory_stats(struct drm_printer *p,
+ const struct drm_memory_stats *stats,
+ enum drm_gem_object_status supported_status,
+ const char *region);
+void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file);
+void drm_show_fdinfo(struct seq_file *m, struct file *f);
+
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
#endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 553210c02ee0..0c9f917a4d4b 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -25,6 +25,7 @@
#ifndef DRM_FIXED_H
#define DRM_FIXED_H
+#include <linux/kernel.h>
#include <linux/math64.h>
typedef union dfixed {
@@ -70,6 +71,7 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
}
#define DRM_FIXED_POINT 32
+#define DRM_FIXED_POINT_HALF 16
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
@@ -86,9 +88,14 @@ static inline int drm_fixp2int(s64 a)
return ((s64)a) >> DRM_FIXED_POINT;
}
+static inline int drm_fixp2int_round(s64 a)
+{
+ return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
+}
+
static inline int drm_fixp2int_ceil(s64 a)
{
- if (a > 0)
+ if (a >= 0)
return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
else
return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 21c3d512d25c..1eef3283a109 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -31,11 +31,10 @@
/**
* DOC: flip utils
*
- * Util to queue up work to run from work-queue context after flip/vblank.
+ * Utility to queue up work to run from work-queue context after flip/vblank.
* Typically this can be used to defer unref of framebuffer's, cursor
- * bo's, etc until after vblank. The APIs are all thread-safe.
- * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
- * in atomic context.
+ * bo's, etc until after vblank. The APIs are all thread-safe. Moreover,
+ * drm_flip_work_commit() can be called in atomic context.
*/
struct drm_flip_work;
@@ -52,16 +51,6 @@ struct drm_flip_work;
typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
/**
- * struct drm_flip_task - flip work task
- * @node: list entry element
- * @data: data to pass to &drm_flip_work.func
- */
-struct drm_flip_task {
- struct list_head node;
- void *data;
-};
-
-/**
* struct drm_flip_work - flip work queue
* @name: debug name
* @func: callback fxn called for each committed item
@@ -79,9 +68,6 @@ struct drm_flip_work {
spinlock_t lock;
};
-struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
-void drm_flip_work_queue_task(struct drm_flip_work *work,
- struct drm_flip_task *task);
void drm_flip_work_queue(struct drm_flip_work *work, void *val);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 5f9e37032468..f13b34e0b752 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -6,26 +6,120 @@
#ifndef __LINUX_DRM_FORMAT_HELPER_H
#define __LINUX_DRM_FORMAT_HELPER_H
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_rect;
-void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool cached);
-void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swab);
-void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
- void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swab);
-void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
- void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
+struct iosys_map;
+
+/**
+ * struct drm_format_conv_state - Stores format-conversion state
+ *
+ * DRM helpers for format conversion store temporary state in
+ * struct drm_xfrm_buf. The buffer's resources can be reused
+ * among multiple conversion operations.
+ *
+ * All fields are considered private.
+ */
+struct drm_format_conv_state {
+ struct {
+ void *mem;
+ size_t size;
+ bool preallocated;
+ } tmp;
+};
+
+#define __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, _preallocated) { \
+ .tmp = { \
+ .mem = (_mem), \
+ .size = (_size), \
+ .preallocated = (_preallocated), \
+ } \
+ }
+
+/**
+ * DRM_FORMAT_CONV_STATE_INIT - Initializer for struct drm_format_conv_state
+ *
+ * Initializes an instance of struct drm_format_conv_state to default values.
+ */
+#define DRM_FORMAT_CONV_STATE_INIT \
+ __DRM_FORMAT_CONV_STATE_INIT(NULL, 0, false)
+
+/**
+ * DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED - Initializer for struct drm_format_conv_state
+ * @_mem: The preallocated memory area
+ * @_size: The number of bytes in _mem
+ *
+ * Initializes an instance of struct drm_format_conv_state to preallocated
+ * storage. The caller is responsible for releasing the provided memory range.
+ */
+#define DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(_mem, _size) \
+ __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, true)
+
+void drm_format_conv_state_init(struct drm_format_conv_state *state);
+void drm_format_conv_state_copy(struct drm_format_conv_state *state,
+ const struct drm_format_conv_state *old_state);
+void *drm_format_conv_state_reserve(struct drm_format_conv_state *state,
+ size_t new_size, gfp_t flags);
+void drm_format_conv_state_release(struct drm_format_conv_state *state);
+
+unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format,
+ const struct drm_rect *clip);
+
+void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool cached,
+ struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state,
+ bool swab);
+void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+
+int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+
+void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+
+size_t drm_fb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out);
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 156b122c0ad5..ccf91daa4307 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -22,9 +22,15 @@
#ifndef __DRM_FOURCC_H__
#define __DRM_FOURCC_H__
+#include <linux/math.h>
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+/**
+ * DRM_FORMAT_MAX_PLANES - maximum number of planes a DRM format can have
+ */
+#define DRM_FORMAT_MAX_PLANES 4u
+
/*
* DRM formats are little endian. Define host endian variants for the
* most common formats here, to reduce the #ifdefs needed in drivers.
@@ -78,7 +84,7 @@ struct drm_format_info {
* triplet @char_per_block, @block_w, @block_h for better
* describing the pixel format.
*/
- u8 cpp[4];
+ u8 cpp[DRM_FORMAT_MAX_PLANES];
/**
* @char_per_block:
@@ -104,7 +110,7 @@ struct drm_format_info {
* information from their drm_mode_config.get_format_info hook
* if they want the core to be validating the pitch.
*/
- u8 char_per_block[4];
+ u8 char_per_block[DRM_FORMAT_MAX_PLANES];
};
/**
@@ -113,7 +119,7 @@ struct drm_format_info {
* Block width in pixels, this is intended to be accessed through
* drm_format_info_block_width()
*/
- u8 block_w[4];
+ u8 block_w[DRM_FORMAT_MAX_PLANES];
/**
* @block_h:
@@ -121,7 +127,7 @@ struct drm_format_info {
* Block height in pixels, this is intended to be accessed through
* drm_format_info_block_height()
*/
- u8 block_h[4];
+ u8 block_h[DRM_FORMAT_MAX_PLANES];
/** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
@@ -133,14 +139,9 @@ struct drm_format_info {
/** @is_yuv: Is it a YUV format? */
bool is_yuv;
-};
-/**
- * struct drm_format_name_buf - name of a DRM format
- * @str: string buffer containing the format name
- */
-struct drm_format_name_buf {
- char str[32];
+ /** @is_color_indexed: Is it a color-indexed format? */
+ bool is_color_indexed;
};
/**
@@ -279,7 +280,7 @@ int drm_format_info_plane_width(const struct drm_format_info *info, int width,
if (plane == 0)
return width;
- return width / info->hsub;
+ return DIV_ROUND_UP(width, info->hsub);
}
/**
@@ -301,7 +302,7 @@ int drm_format_info_plane_height(const struct drm_format_info *info, int height,
if (plane == 0)
return height;
- return height / info->vsub;
+ return DIV_ROUND_UP(height, info->vsub);
}
const struct drm_format_info *__drm_format_info(u32 format);
@@ -316,8 +317,8 @@ unsigned int drm_format_info_block_width(const struct drm_format_info *info,
int plane);
unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane);
+unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane);
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width);
-const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index be658ebbec72..668077009fce 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -27,12 +27,12 @@
#include <linux/list.h>
#include <linux/sched.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_mode_object.h>
struct drm_clip_rect;
struct drm_device;
struct drm_file;
-struct drm_format_info;
struct drm_framebuffer;
struct drm_gem_object;
@@ -147,17 +147,17 @@ struct drm_framebuffer {
* @pitches: Line stride per buffer. For userspace created object this
* is copied from drm_mode_fb_cmd2.
*/
- unsigned int pitches[4];
+ unsigned int pitches[DRM_FORMAT_MAX_PLANES];
/**
* @offsets: Offset from buffer start to the actual pixel data in bytes,
* per buffer. For userspace created object this is copied from
* drm_mode_fb_cmd2.
*
* Note that this is a linear offset and does not take into account
- * tiling or buffer laytou per @modifier. It meant to be used when the
- * actual pixel data for this framebuffer plane starts at an offset,
- * e.g. when multiple planes are allocated within the same backing
- * storage buffer object. For tiled layouts this generally means it
+ * tiling or buffer layout per @modifier. It is meant to be used when
+ * the actual pixel data for this framebuffer plane starts at an offset,
+ * e.g. when multiple planes are allocated within the same backing
+ * storage buffer object. For tiled layouts this generally means its
* @offsets must at least be tile-size aligned, but hardware often has
* stricter requirements.
*
@@ -165,7 +165,7 @@ struct drm_framebuffer {
* data (even for linear buffers). Specifying an x/y pixel offset is
* instead done through the source rectangle in &struct drm_plane_state.
*/
- unsigned int offsets[4];
+ unsigned int offsets[DRM_FORMAT_MAX_PLANES];
/**
* @modifier: Data layout modifier. This is used to describe
* tiling, or also special layouts (like compression) of auxiliary
@@ -189,18 +189,6 @@ struct drm_framebuffer {
*/
int flags;
/**
- * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
- */
- int hot_x;
- /**
- * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
- */
- int hot_y;
- /**
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
*/
struct list_head filp_head;
@@ -210,7 +198,7 @@ struct drm_framebuffer {
* This is used by the GEM framebuffer helpers, see e.g.
* drm_gem_fb_create().
*/
- struct drm_gem_object *obj[4];
+ struct drm_gem_object *obj[DRM_FORMAT_MAX_PLANES];
};
#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
@@ -292,11 +280,6 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
&fb->head != (&(dev)->mode_config.fb_list); \
fb = list_next_entry(fb, head))
-int drm_framebuffer_plane_width(int width,
- const struct drm_framebuffer *fb, int plane);
-int drm_framebuffer_plane_height(int height,
- const struct drm_framebuffer *fb, int plane);
-
/**
* struct drm_afbc_framebuffer - a special afbc frame buffer object
*
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 337a48321705..2ebec3984cd4 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -36,12 +36,34 @@
#include <linux/kref.h>
#include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <drm/drm_vma_manager.h>
+struct iosys_map;
struct drm_gem_object;
/**
+ * enum drm_gem_object_status - bitmask of object state for fdinfo reporting
+ * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned)
+ * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace
+ *
+ * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status
+ * and drm_show_fdinfo(). Note that an object can DRM_GEM_OBJECT_PURGEABLE if
+ * it still active or not resident, in which case drm_show_fdinfo() will not
+ * account for it as purgeable. So drivers do not need to check if the buffer
+ * is idle and resident to return this bit. (Ie. userspace can mark a buffer
+ * as purgeable even while it is still busy on the GPU.. it does not _actually_
+ * become puregeable until it becomes idle. The status gem object func does
+ * not need to consider this.)
+ */
+enum drm_gem_object_status {
+ DRM_GEM_OBJECT_RESIDENT = BIT(0),
+ DRM_GEM_OBJECT_PURGEABLE = BIT(1),
+};
+
+/**
* struct drm_gem_object_funcs - GEM object functions
*/
struct drm_gem_object_funcs {
@@ -138,7 +160,7 @@ struct drm_gem_object_funcs {
*
* This callback is optional.
*/
- void *(*vmap)(struct drm_gem_object *obj);
+ int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map);
/**
* @vunmap:
@@ -148,7 +170,7 @@ struct drm_gem_object_funcs {
*
* This callback is optional.
*/
- void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
+ void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map);
/**
* @mmap:
@@ -164,6 +186,38 @@ struct drm_gem_object_funcs {
int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**
+ * @evict:
+ *
+ * Evicts gem object out from memory. Used by the drm_gem_object_evict()
+ * helper. Returns 0 on success, -errno otherwise.
+ *
+ * This callback is optional.
+ */
+ int (*evict)(struct drm_gem_object *obj);
+
+ /**
+ * @status:
+ *
+ * The optional status callback can return additional object state
+ * which determines which stats the object is counted against. The
+ * callback is called under table_lock. Racing against object status
+ * change is "harmless", and the callback can expect to not race
+ * against object destruction.
+ *
+ * Called by drm_show_memory_stats().
+ */
+ enum drm_gem_object_status (*status)(struct drm_gem_object *obj);
+
+ /**
+ * @rss:
+ *
+ * Return resident size of the object in physical memory.
+ *
+ * Called by drm_show_memory_stats().
+ */
+ size_t (*rss)(struct drm_gem_object *obj);
+
+ /**
* @vm_ops:
*
* Virtual memory operations used with mmap.
@@ -174,6 +228,41 @@ struct drm_gem_object_funcs {
};
/**
+ * struct drm_gem_lru - A simple LRU helper
+ *
+ * A helper for tracking GEM objects in a given state, to aid in
+ * driver's shrinker implementation. Tracks the count of pages
+ * for lockless &shrinker.count_objects, and provides
+ * &drm_gem_lru_scan for driver's &shrinker.scan_objects
+ * implementation.
+ */
+struct drm_gem_lru {
+ /**
+ * @lock:
+ *
+ * Lock protecting movement of GEM objects between LRUs. All
+ * LRUs that the object can move between should be protected
+ * by the same lock.
+ */
+ struct mutex *lock;
+
+ /**
+ * @count:
+ *
+ * The total number of backing pages of the GEM objects in
+ * this LRU.
+ */
+ long count;
+
+ /**
+ * @list:
+ *
+ * The LRU list.
+ */
+ struct list_head list;
+};
+
+/**
* struct drm_gem_object - GEM buffer object
*
* This structure defines the generic parts for GEM buffer objects, which are
@@ -216,7 +305,7 @@ struct drm_gem_object {
*
* SHMEM file node used as backing storage for swappable buffer objects.
* GEM also supports driver private objects with driver-specific backing
- * storage (contiguous CMA memory, special reserved blocks). In this
+ * storage (contiguous DMA memory, special reserved blocks). In this
* case @filp is NULL.
*/
struct file *filp;
@@ -272,7 +361,7 @@ struct drm_gem_object {
* attachment point for the device. This is invariant over the lifetime
* of a gem object.
*
- * The &drm_driver.gem_free_object_unlocked callback is responsible for
+ * The &drm_gem_object_funcs.free callback is responsible for
* cleaning up the dma_buf attachment and references acquired at import
* time.
*
@@ -302,6 +391,22 @@ struct drm_gem_object {
struct dma_resv _resv;
/**
+ * @gpuva:
+ *
+ * Provides the list of GPU VAs attached to this GEM object.
+ *
+ * Drivers should lock list accesses with the GEMs &dma_resv lock
+ * (&drm_gem_object.resv) or a custom lock if one is provided.
+ */
+ struct {
+ struct list_head list;
+
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map *lock_dep_map;
+#endif
+ } gpuva;
+
+ /**
* @funcs:
*
* Optional GEM object functions. If this is set, it will be used instead of the
@@ -311,9 +416,40 @@ struct drm_gem_object {
*
*/
const struct drm_gem_object_funcs *funcs;
+
+ /**
+ * @lru_node:
+ *
+ * List node in a &drm_gem_lru.
+ */
+ struct list_head lru_node;
+
+ /**
+ * @lru:
+ *
+ * The current LRU list that the GEM object is on.
+ */
+ struct drm_gem_lru *lru;
};
/**
+ * DRM_GEM_FOPS - Default drm GEM file operations
+ *
+ * This macro provides a shorthand for setting the GEM file ops in the
+ * &file_operations structure. If all you need are the default ops, use
+ * DEFINE_DRM_GEM_FOPS instead.
+ */
+#define DRM_GEM_FOPS \
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_mmap
+
+/**
* DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
* @name: name for the generated structure
*
@@ -329,14 +465,7 @@ struct drm_gem_object {
#define DEFINE_DRM_GEM_FOPS(name) \
static const struct file_operations name = {\
.owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_mmap,\
+ DRM_GEM_FOPS,\
}
void drm_gem_object_release(struct drm_gem_object *obj);
@@ -345,6 +474,7 @@ int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
+void drm_gem_private_object_fini(struct drm_gem_object *obj);
void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
@@ -383,8 +513,6 @@ drm_gem_object_put(struct drm_gem_object *obj)
__drm_gem_object_put(obj);
}
-void drm_gem_object_put_locked(struct drm_gem_object *obj);
-
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -399,6 +527,9 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
+int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
+
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out);
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
@@ -408,15 +539,95 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
-int drm_gem_fence_array_add(struct xarray *fence_array,
- struct dma_fence *fence);
-int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
- struct drm_gem_object *obj,
- bool write);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
-int drm_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
+
+void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
+void drm_gem_lru_remove(struct drm_gem_object *obj);
+void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj);
+void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
+unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
+ bool (*shrink)(struct drm_gem_object *obj));
+
+int drm_gem_evict(struct drm_gem_object *obj);
+
+/**
+ * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
+ *
+ * This helper should only be used for fdinfo shared memory stats to determine
+ * if a GEM object is shared.
+ *
+ * @obj: obj in question
+ */
+static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj)
+{
+ return (obj->handle_count > 1) || obj->dma_buf;
+}
+
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
+ * @obj: the &drm_gem_object
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this if you're not proctecting access to the gpuva list with the
+ * dma-resv lock, but with a custom lock.
+ */
+#define drm_gem_gpuva_set_lock(obj, lock) \
+ if (!WARN((obj)->gpuva.lock_dep_map, \
+ "GEM GPUVA lock should be set only once.")) \
+ (obj)->gpuva.lock_dep_map = &(lock)->dep_map
+#define drm_gem_gpuva_assert_lock_held(obj) \
+ lockdep_assert((obj)->gpuva.lock_dep_map ? \
+ lock_is_held((obj)->gpuva.lock_dep_map) : \
+ dma_resv_held((obj)->resv))
+#else
+#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
+#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
+#endif
+
+/**
+ * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
+ * @obj: the &drm_gem_object
+ *
+ * This initializes the &drm_gem_object's &drm_gpuvm_bo list.
+ *
+ * Calling this function is only necessary for drivers intending to support the
+ * &drm_driver_feature DRIVER_GEM_GPUVA.
+ *
+ * See also drm_gem_gpuva_set_lock().
+ */
+static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
+{
+ INIT_LIST_HEAD(&obj->gpuva.list);
+}
+
+/**
+ * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
+ * &drm_gem_object.
+ */
+#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \
+ list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem)
+
+/**
+ * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of
+ * &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step
+ * @next__: &next &drm_gpuvm_bo to store the next step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
+ * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ */
+#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \
+ list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem)
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
new file mode 100644
index 000000000000..3e01c619a25e
--- /dev/null
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __DRM_GEM_ATOMIC_HELPER_H__
+#define __DRM_GEM_ATOMIC_HELPER_H__
+
+#include <linux/iosys-map.h>
+
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+
+struct drm_simple_display_pipe;
+
+/*
+ * Plane Helpers
+ */
+
+int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state);
+
+/*
+ * Helpers for planes with shadow buffers
+ */
+
+/**
+ * DRM_SHADOW_PLANE_MAX_WIDTH - Maximum width of a plane's shadow buffer in pixels
+ *
+ * For drivers with shadow planes, the maximum width of the framebuffer is
+ * usually independent from hardware limitations. Drivers can initialize struct
+ * drm_mode_config.max_width from DRM_SHADOW_PLANE_MAX_WIDTH.
+ */
+#define DRM_SHADOW_PLANE_MAX_WIDTH (4096u)
+
+/**
+ * DRM_SHADOW_PLANE_MAX_HEIGHT - Maximum height of a plane's shadow buffer in scanlines
+ *
+ * For drivers with shadow planes, the maximum height of the framebuffer is
+ * usually independent from hardware limitations. Drivers can initialize struct
+ * drm_mode_config.max_height from DRM_SHADOW_PLANE_MAX_HEIGHT.
+ */
+#define DRM_SHADOW_PLANE_MAX_HEIGHT (4096u)
+
+/**
+ * struct drm_shadow_plane_state - plane state for planes with shadow buffers
+ *
+ * For planes that use a shadow buffer, struct drm_shadow_plane_state
+ * provides the regular plane state plus mappings of the shadow buffer
+ * into kernel address space.
+ */
+struct drm_shadow_plane_state {
+ /** @base: plane state */
+ struct drm_plane_state base;
+
+ /**
+ * @fmtcnv_state: Format-conversion state
+ *
+ * Per-plane state for format conversion.
+ * Flags for copying shadow buffers into backend storage. Also holds
+ * temporary storage for format conversion.
+ */
+ struct drm_format_conv_state fmtcnv_state;
+
+ /* Transitional state - do not export or duplicate */
+
+ /**
+ * @map: Mappings of the plane's framebuffer BOs in to kernel address space
+ *
+ * The memory mappings stored in map should be established in the plane's
+ * prepare_fb callback and removed in the cleanup_fb callback.
+ */
+ struct iosys_map map[DRM_FORMAT_MAX_PLANES];
+
+ /**
+ * @data: Address of each framebuffer BO's data
+ *
+ * The address of the data stored in each mapping. This is different
+ * for framebuffers with non-zero offset fields.
+ */
+ struct iosys_map data[DRM_FORMAT_MAX_PLANES];
+};
+
+/**
+ * to_drm_shadow_plane_state - upcasts from struct drm_plane_state
+ * @state: the plane state
+ */
+static inline struct drm_shadow_plane_state *
+to_drm_shadow_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct drm_shadow_plane_state, base);
+}
+
+void __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
+ struct drm_shadow_plane_state *new_shadow_plane_state);
+void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state);
+void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
+ struct drm_shadow_plane_state *shadow_plane_state);
+
+void drm_gem_reset_shadow_plane(struct drm_plane *plane);
+struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane);
+void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_plane_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_funcs to use the rsp helper functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_FUNCS \
+ .reset = drm_gem_reset_shadow_plane, \
+ .atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \
+ .atomic_destroy_state = drm_gem_destroy_shadow_plane_state
+
+int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state);
+void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS -
+ * Initializes struct drm_plane_helper_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_helper_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \
+ .begin_fb_access = drm_gem_begin_shadow_fb_access, \
+ .end_fb_access = drm_gem_end_shadow_fb_access
+
+int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe);
+struct drm_plane_state *
+drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe);
+void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_simple_display_pipe_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_simple_display_pipe_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \
+ .begin_fb_access = drm_gem_simple_kms_begin_shadow_fb_access, \
+ .end_fb_access = drm_gem_simple_kms_end_shadow_fb_access, \
+ .reset_plane = drm_gem_simple_kms_reset_shadow_plane, \
+ .duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \
+ .destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state
+
+#endif /* __DRM_GEM_ATOMIC_HELPER_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
deleted file mode 100644
index 2bfa2502607a..000000000000
--- a/include/drm/drm_gem_cma_helper.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_GEM_CMA_HELPER_H__
-#define __DRM_GEM_CMA_HELPER_H__
-
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_gem.h>
-
-struct drm_mode_create_dumb;
-
-/**
- * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
- * @base: base GEM object
- * @paddr: physical address of the backing memory
- * @sgt: scatter/gather table for imported PRIME buffers. The table can have
- * more than one entry but they are guaranteed to have contiguous
- * DMA addresses.
- * @vaddr: kernel virtual address of the backing memory
- */
-struct drm_gem_cma_object {
- struct drm_gem_object base;
- dma_addr_t paddr;
- struct sg_table *sgt;
-
- /* For objects with DMA memory allocated by GEM CMA */
- void *vaddr;
-};
-
-#define to_drm_gem_cma_obj(gem_obj) \
- container_of(gem_obj, struct drm_gem_cma_object, base)
-
-#ifndef CONFIG_MMU
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- .get_unmapped_area = drm_gem_cma_get_unmapped_area,
-#else
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
-#endif
-
-/**
- * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
- * @name: name for the generated structure
- *
- * This macro autogenerates a suitable &struct file_operations for CMA based
- * drivers, which can be assigned to &drm_driver.fops. Note that this structure
- * cannot be shared between drivers, because it contains a reference to the
- * current module using THIS_MODULE.
- *
- * Note that the declaration is already marked as static - if you need a
- * non-static version of this you're probably doing it wrong and will break the
- * THIS_MODULE reference by accident.
- */
-#define DEFINE_DRM_GEM_CMA_FOPS(name) \
- static const struct file_operations name = {\
- .owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_cma_mmap,\
- DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- }
-
-/* free GEM object */
-void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-
-/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args);
-
-/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args);
-
-/* set vm_flags and we can change the VM attribute to other one at here */
-int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
-
-/* allocate physical memory */
-struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- size_t size);
-
-extern const struct vm_operations_struct drm_gem_cma_vm_ops;
-
-#ifndef CONFIG_MMU
-unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags);
-#endif
-
-void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj);
-
-struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj);
-void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-
-struct drm_gem_object *
-drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size);
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
- * @dumb_create_func: callback function for .dumb_create
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure.
- *
- * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS for drivers that
- * override the default implementation of &struct rm_driver.dumb_create. Use
- * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
- * on imported buffers should use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
- .gem_create_object = drm_gem_cma_create_object_default_funcs, \
- .dumb_create = (dumb_create_func), \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_cma_prime_mmap
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure.
- *
- * Drivers that come with their own implementation of
- * &struct drm_driver.dumb_create should use
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
- * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
- * on imported buffers should use DRM_GEM_CMA_DRIVER_OPS_VMAP instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS \
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations
- * ensuring a virtual address
- * on the buffer
- * @dumb_create_func: callback function for .dumb_create
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure for drivers that need the virtual address also on
- * imported buffers.
- *
- * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS_VMAP for drivers that
- * override the default implementation of &struct drm_driver.dumb_create. Use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
- * virtual address on imported buffers should use
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
- .gem_create_object = drm_gem_cma_create_object_default_funcs, \
- .dumb_create = dumb_create_func, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
- .gem_prime_mmap = drm_gem_prime_mmap
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS_VMAP - CMA GEM driver operations ensuring a virtual
- * address on the buffer
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure for drivers that need the virtual address also on
- * imported buffers.
- *
- * Drivers that come with their own implementation of
- * &struct drm_driver.dumb_create should use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
- * virtual address on imported buffers should use DRM_GEM_CMA_DRIVER_OPS
- * instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS_VMAP \
- DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
-
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-
-#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
new file mode 100644
index 000000000000..a827bde494f6
--- /dev/null
+++ b/include/drm/drm_gem_dma_helper.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRM_GEM_DMA_HELPER_H__
+#define __DRM_GEM_DMA_HELPER_H__
+
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_gem.h>
+
+struct drm_mode_create_dumb;
+
+/**
+ * struct drm_gem_dma_object - GEM object backed by DMA memory allocations
+ * @base: base GEM object
+ * @dma_addr: DMA address of the backing memory
+ * @sgt: scatter/gather table for imported PRIME buffers. The table can have
+ * more than one entry but they are guaranteed to have contiguous
+ * DMA addresses.
+ * @vaddr: kernel virtual address of the backing memory
+ * @map_noncoherent: if true, the GEM object is backed by non-coherent memory
+ */
+struct drm_gem_dma_object {
+ struct drm_gem_object base;
+ dma_addr_t dma_addr;
+ struct sg_table *sgt;
+
+ /* For objects with DMA memory allocated by GEM DMA */
+ void *vaddr;
+
+ bool map_noncoherent;
+};
+
+#define to_drm_gem_dma_obj(gem_obj) \
+ container_of(gem_obj, struct drm_gem_dma_object, base)
+
+struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
+ size_t size);
+void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj);
+void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
+ struct drm_printer *p, unsigned int indent);
+struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj);
+int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
+ struct iosys_map *map);
+int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct drm_gem_dma_vm_ops;
+
+/*
+ * GEM object functions
+ */
+
+/**
+ * drm_gem_dma_object_free - GEM object function for drm_gem_dma_free()
+ * @obj: GEM object to free
+ *
+ * This function wraps drm_gem_dma_free_object(). Drivers that employ the DMA helpers
+ * should use it as their &drm_gem_object_funcs.free handler.
+ */
+static inline void drm_gem_dma_object_free(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ drm_gem_dma_free(dma_obj);
+}
+
+/**
+ * drm_gem_dma_object_print_info() - Print &drm_gem_dma_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_dma_print_info(). Drivers that employ the DMA helpers
+ * should use this function as their &drm_gem_object_funcs.print_info handler.
+ */
+static inline void drm_gem_dma_object_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ drm_gem_dma_print_info(dma_obj, p, indent);
+}
+
+/**
+ * drm_gem_dma_object_get_sg_table - GEM object function for drm_gem_dma_get_sg_table()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_dma_get_sg_table(). Drivers that employ the DMA helpers should
+ * use it as their &drm_gem_object_funcs.get_sg_table handler.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+static inline struct sg_table *drm_gem_dma_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_get_sg_table(dma_obj);
+}
+
+/*
+ * drm_gem_dma_object_vmap - GEM object function for drm_gem_dma_vmap()
+ * @obj: GEM object
+ * @map: Returns the kernel virtual address of the DMA GEM object's backing store.
+ *
+ * This function wraps drm_gem_dma_vmap(). Drivers that employ the DMA helpers should
+ * use it as their &drm_gem_object_funcs.vmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_dma_object_vmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_vmap(dma_obj, map);
+}
+
+/**
+ * drm_gem_dma_object_mmap - GEM object function for drm_gem_dma_mmap()
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function wraps drm_gem_dma_mmap(). Drivers that employ the dma helpers should
+ * use it as their &drm_gem_object_funcs.mmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_dma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_mmap(dma_obj, vma);
+}
+
+/*
+ * Driver ops
+ */
+
+/* create memory region for DRM framebuffer */
+int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+/* create memory region for DRM framebuffer */
+int drm_gem_dma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE - DMA GEM driver operations
+ * @dumb_create_func: callback function for .dumb_create
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS for drivers that
+ * override the default implementation of &struct rm_driver.dumb_create. Use
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use DRM_GEM_DMA_DRIVER_OPS_VMAP instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS \
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - DMA GEM driver operations
+ * ensuring a virtual address
+ * on the buffer
+ * @dumb_create_func: callback function for .dumb_create
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ *
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS_VMAP for drivers that
+ * override the default implementation of &struct drm_driver.dumb_create. Use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual
+ * address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use DRM_GEM_DMA_DRIVER_OPS
+ * instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP \
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
+
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+/*
+ * File ops
+ */
+
+#ifndef CONFIG_MMU
+unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
+ .get_unmapped_area = drm_gem_dma_get_unmapped_area,
+#else
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS
+#endif
+
+/**
+ * DEFINE_DRM_GEM_DMA_FOPS() - macro to generate file operations for DMA drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for DMA based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_DMA_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_mmap,\
+ DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
+ }
+
+#endif /* __DRM_GEM_DMA_HELPER_H__ */
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index 6b013154911d..d302521f3dd4 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -1,6 +1,9 @@
#ifndef __DRM_GEM_FB_HELPER_H__
#define __DRM_GEM_FB_HELPER_H__
+#include <linux/dma-buf.h>
+#include <linux/iosys-map.h>
+
struct drm_afbc_framebuffer;
struct drm_device;
struct drm_fb_helper_surface_size;
@@ -9,9 +12,6 @@ struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
-struct drm_plane;
-struct drm_plane_state;
-struct drm_simple_display_pipe;
#define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52)
@@ -37,6 +37,12 @@ struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd);
+int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
+ struct iosys_map *data);
+void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map);
+int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir);
+void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir);
+
#define drm_is_afbc(modifier) \
(((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0))
@@ -44,8 +50,4 @@ int drm_gem_fb_afbc_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb);
-int drm_gem_fb_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state);
-int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
#endif
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 5381f0c8cf6f..bf0c31aa8fbe 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -27,11 +27,6 @@ struct drm_gem_shmem_object {
struct drm_gem_object base;
/**
- * @pages_lock: Protects the page table and use count
- */
- struct mutex pages_lock;
-
- /**
* @pages: Page table
*/
struct page **pages;
@@ -61,30 +56,11 @@ struct drm_gem_shmem_object {
struct list_head madv_list;
/**
- * @pages_mark_dirty_on_put:
- *
- * Mark pages as dirty when they are put.
- */
- unsigned int pages_mark_dirty_on_put : 1;
-
- /**
- * @pages_mark_accessed_on_put:
- *
- * Mark pages as accessed when they are put.
- */
- unsigned int pages_mark_accessed_on_put : 1;
-
- /**
* @sgt: Scatter/gather table for imported PRIME buffers
*/
struct sg_table *sgt;
/**
- * @vmap_lock: Protects the vmap address and use count
- */
- struct mutex vmap_lock;
-
- /**
* @vaddr: Kernel virtual address of the backing memory
*/
void *vaddr;
@@ -98,25 +74,41 @@ struct drm_gem_shmem_object {
unsigned int vmap_use_count;
/**
- * @map_cached: map object cached (instead of using writecombine).
+ * @pages_mark_dirty_on_put:
+ *
+ * Mark pages as dirty when they are put.
+ */
+ bool pages_mark_dirty_on_put : 1;
+
+ /**
+ * @pages_mark_accessed_on_put:
+ *
+ * Mark pages as accessed when they are put.
+ */
+ bool pages_mark_accessed_on_put : 1;
+
+ /**
+ * @map_wc: map object write-combined (instead of using shmem defaults).
*/
- bool map_cached;
+ bool map_wc : 1;
};
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
-void drm_gem_shmem_free_object(struct drm_gem_object *obj);
+void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_pin(struct drm_gem_object *obj);
-void drm_gem_shmem_unpin(struct drm_gem_object *obj);
-void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
-void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
+int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
-int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
+int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
@@ -125,32 +117,159 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
!shmem->base.dma_buf && !shmem->base.import_attach;
}
-void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
-bool drm_gem_shmem_purge(struct drm_gem_object *obj);
+void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
-struct drm_gem_shmem_object *
-drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
- struct drm_device *dev, size_t size,
- uint32_t *handle);
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
-struct drm_gem_object *
-drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size);
+void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
+ struct drm_printer *p, unsigned int indent);
-int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
+extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+
+/*
+ * GEM object functions
+ */
+
+/**
+ * drm_gem_shmem_object_free - GEM object function for drm_gem_shmem_free()
+ * @obj: GEM object to free
+ *
+ * This function wraps drm_gem_shmem_free(). Drivers that employ the shmem helpers
+ * should use it as their &drm_gem_object_funcs.free handler.
+ */
+static inline void drm_gem_shmem_object_free(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_free(shmem);
+}
-int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+/**
+ * drm_gem_shmem_object_print_info() - Print &drm_gem_shmem_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_print_info(). Drivers that employ the shmem helpers should
+ * use this function as their &drm_gem_object_funcs.print_info handler.
+ */
+static inline void drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj);
+ drm_gem_shmem_print_info(shmem, p, indent);
+}
+
+/**
+ * drm_gem_shmem_object_pin - GEM object function for drm_gem_shmem_pin()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_pin(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.pin handler.
+ */
+static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_pin(shmem);
+}
+
+/**
+ * drm_gem_shmem_object_unpin - GEM object function for drm_gem_shmem_unpin()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_unpin(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.unpin handler.
+ */
+static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_unpin(shmem);
+}
+
+/**
+ * drm_gem_shmem_object_get_sg_table - GEM object function for drm_gem_shmem_get_sg_table()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_get_sg_table(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.get_sg_table handler.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
+ */
+static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_get_sg_table(shmem);
+}
+
+/*
+ * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
+ * @obj: GEM object
+ * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
+ *
+ * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.vmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_vmap(shmem, map);
+}
+
+/*
+ * drm_gem_shmem_object_vunmap - GEM object function for drm_gem_shmem_vunmap()
+ * @obj: GEM object
+ * @map: Kernel virtual address where the SHMEM GEM object was mapped
+ *
+ * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.vunmap handler.
+ */
+static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_vunmap(shmem, map);
+}
+
+/**
+ * drm_gem_shmem_object_mmap - GEM object function for drm_gem_shmem_mmap()
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function wraps drm_gem_shmem_mmap(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.mmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_mmap(shmem, vma);
+}
+
+/*
+ * Driver ops
+ */
-struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
-
-struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
/**
* DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
@@ -159,10 +278,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
* the &drm_driver structure.
*/
#define DRM_GEM_SHMEM_DRIVER_OPS \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap, \
- .dumb_create = drm_gem_shmem_dumb_create
+ .dumb_create = drm_gem_shmem_dumb_create
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
index 118cef76f84f..7b53d673ae7e 100644
--- a/include/drm/drm_gem_ttm_helper.h
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -3,19 +3,27 @@
#ifndef DRM_GEM_TTM_HELPER_H
#define DRM_GEM_TTM_HELPER_H
-#include <linux/kernel.h>
+#include <linux/container_of.h>
-#include <drm/drm_gem.h>
#include <drm/drm_device.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo.h>
+
+struct iosys_map;
#define drm_gem_ttm_of_gem(gem_obj) \
container_of(gem_obj, struct ttm_buffer_object, base)
void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *gem);
+int drm_gem_ttm_vmap(struct drm_gem_object *gem,
+ struct iosys_map *map);
+void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
+ struct iosys_map *map);
int drm_gem_ttm_mmap(struct drm_gem_object *gem,
struct vm_area_struct *vma);
+int drm_gem_ttm_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+
#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 035332f3723f..2938ba80750d 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -5,13 +5,14 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_modes.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
-#include <linux/kernel.h> /* for container_of() */
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
struct drm_mode_create_dumb;
struct drm_plane;
@@ -20,9 +21,9 @@ struct drm_simple_display_pipe;
struct filp;
struct vm_area_struct;
-#define DRM_GEM_VRAM_PL_FLAG_VRAM TTM_PL_FLAG_VRAM
-#define DRM_GEM_VRAM_PL_FLAG_SYSTEM TTM_PL_FLAG_SYSTEM
-#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN TTM_PL_FLAG_TOPDOWN
+#define DRM_GEM_VRAM_PL_FLAG_SYSTEM (1 << 0)
+#define DRM_GEM_VRAM_PL_FLAG_VRAM (1 << 1)
+#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN (1 << 2)
/*
* Buffer-object helpers
@@ -30,13 +31,11 @@ struct vm_area_struct;
/**
* struct drm_gem_vram_object - GEM object backed by VRAM
- * @gem: GEM object
* @bo: TTM buffer object
- * @kmap: Mapping information for @bo
- * @placement: TTM placement information. Supported placements are \
- %TTM_PL_VRAM and %TTM_PL_SYSTEM
+ * @map: Mapping information for @bo
+ * @placement: TTM placement information. Supported placements are %TTM_PL_VRAM
+ * and %TTM_PL_SYSTEM
* @placements: TTM placement information.
- * @pin_count: Pin counter
*
* The type struct drm_gem_vram_object represents a GEM object that is
* backed by VRAM. It can be used for simple framebuffer devices with
@@ -52,26 +51,24 @@ struct vm_area_struct;
*/
struct drm_gem_vram_object {
struct ttm_buffer_object bo;
- struct ttm_bo_kmap_obj kmap;
+ struct iosys_map map;
/**
- * @kmap_use_count:
+ * @vmap_use_count:
*
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int kmap_use_count;
+ unsigned int vmap_use_count;
/* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
struct ttm_placement placement;
struct ttm_place placements[2];
-
- int pin_count;
};
/**
- * Returns the container of type &struct drm_gem_vram_object
- * for field bo.
+ * drm_gem_vram_of_bo - Returns the container of type
+ * &struct drm_gem_vram_object for field bo.
* @bo: the VRAM buffer object
* Returns: The containing GEM VRAM object
*/
@@ -82,8 +79,8 @@ static inline struct drm_gem_vram_object *drm_gem_vram_of_bo(
}
/**
- * Returns the container of type &struct drm_gem_vram_object
- * for field gem.
+ * drm_gem_vram_of_gem - Returns the container of type
+ * &struct drm_gem_vram_object for field gem.
* @gem: the GEM object
* Returns: The containing GEM VRAM object
*/
@@ -97,15 +94,12 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
size_t size,
unsigned long pg_align);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
-u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
-void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
- bool *is_iomem);
-void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo);
-void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo);
-void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr);
+int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map);
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
+ struct iosys_map *map);
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
@@ -120,9 +114,6 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
int drm_gem_vram_driver_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
/*
* Helpers for struct drm_plane_helper_funcs
@@ -134,6 +125,18 @@ void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
+/**
+ * DRM_GEM_VRAM_PLANE_HELPER_FUNCS - Initializes struct drm_plane_helper_funcs
+ * for VRAM handling
+ *
+ * Drivers may use GEM BOs as VRAM helpers for the framebuffer memory. This
+ * macro initializes struct drm_plane_helper_funcs to use the respective helper
+ * functions.
+ */
+#define DRM_GEM_VRAM_PLANE_HELPER_FUNCS \
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb
+
/*
* Helpers for struct drm_simple_display_pipe_funcs
*/
@@ -147,17 +150,16 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb(
struct drm_plane_state *old_state);
/**
- * define DRM_GEM_VRAM_DRIVER - default callback functions for \
- &struct drm_driver
+ * define DRM_GEM_VRAM_DRIVER - default callback functions for
+ * &struct drm_driver
*
* Drivers that use VRAM MM and GEM VRAM can use this macro to initialize
* &struct drm_driver with default functions.
*/
#define DRM_GEM_VRAM_DRIVER \
- .debugfs_init = drm_vram_mm_debugfs_init, \
- .dumb_create = drm_gem_vram_driver_dumb_create, \
- .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \
- .gem_prime_mmap = drm_gem_prime_mmap
+ .debugfs_init = drm_vram_mm_debugfs_init, \
+ .dumb_create = drm_gem_vram_driver_dumb_create, \
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset
/*
* VRAM memory manager
@@ -179,19 +181,19 @@ struct drm_vram_mm {
uint64_t vram_base;
size_t vram_size;
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
};
/**
- * drm_vram_mm_of_bdev() - \
- Returns the container of type &struct ttm_bo_device for field bdev.
+ * drm_vram_mm_of_bdev() - Returns the container of type &struct ttm_device for
+ * field bdev.
* @bdev: the TTM BO device
*
* Returns:
* The containing instance of &struct drm_vram_mm
*/
static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
- struct ttm_bo_device *bdev)
+ struct ttm_device *bdev)
{
return container_of(bdev, struct drm_vram_mm, bdev);
}
@@ -202,10 +204,6 @@ void drm_vram_mm_debugfs_init(struct drm_minor *minor);
* Helpers for integration with struct drm_device
*/
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size);
-void drm_vram_helper_release_mm(struct drm_device *dev);
-
int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base,
size_t vram_size);
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
new file mode 100644
index 000000000000..00d4e43b76b6
--- /dev/null
+++ b/include/drm/drm_gpuvm.h
@@ -0,0 +1,1247 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef __DRM_GPUVM_H__
+#define __DRM_GPUVM_H__
+
+/*
+ * Copyright (c) 2022 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_exec.h>
+
+struct drm_gpuvm;
+struct drm_gpuvm_bo;
+struct drm_gpuvm_ops;
+
+/**
+ * enum drm_gpuva_flags - flags for struct drm_gpuva
+ */
+enum drm_gpuva_flags {
+ /**
+ * @DRM_GPUVA_INVALIDATED:
+ *
+ * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
+ */
+ DRM_GPUVA_INVALIDATED = (1 << 0),
+
+ /**
+ * @DRM_GPUVA_SPARSE:
+ *
+ * Flag indicating that the &drm_gpuva is a sparse mapping.
+ */
+ DRM_GPUVA_SPARSE = (1 << 1),
+
+ /**
+ * @DRM_GPUVA_USERBITS: user defined bits
+ */
+ DRM_GPUVA_USERBITS = (1 << 2),
+};
+
+/**
+ * struct drm_gpuva - structure to track a GPU VA mapping
+ *
+ * This structure represents a GPU VA mapping and is associated with a
+ * &drm_gpuvm.
+ *
+ * Typically, this structure is embedded in bigger driver structures.
+ */
+struct drm_gpuva {
+ /**
+ * @vm: the &drm_gpuvm this object is associated with
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
+ * &drm_gem_object
+ */
+ struct drm_gpuvm_bo *vm_bo;
+
+ /**
+ * @flags: the &drm_gpuva_flags for this mapping
+ */
+ enum drm_gpuva_flags flags;
+
+ /**
+ * @va: structure containing the address and range of the &drm_gpuva
+ */
+ struct {
+ /**
+ * @va.addr: the start address
+ */
+ u64 addr;
+
+ /*
+ * @range: the range
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and it's offset
+ */
+ struct {
+ /**
+ * @gem.offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @gem.obj: the mapped &drm_gem_object
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo
+ */
+ struct list_head entry;
+ } gem;
+
+ /**
+ * @rb: structure containing data to store &drm_gpuvas in a rb-tree
+ */
+ struct {
+ /**
+ * @rb.node: the rb-tree node
+ */
+ struct rb_node node;
+
+ /**
+ * @rb.entry: The &list_head to additionally connect &drm_gpuvas
+ * in the same order they appear in the interval tree. This is
+ * useful to keep iterating &drm_gpuvas from a start node found
+ * through the rb-tree while doing modifications on the rb-tree
+ * itself.
+ */
+ struct list_head entry;
+
+ /**
+ * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
+ */
+ u64 __subtree_last;
+ } rb;
+};
+
+int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
+void drm_gpuva_remove(struct drm_gpuva *va);
+
+void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
+void drm_gpuva_unlink(struct drm_gpuva *va);
+
+struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
+struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
+
+static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset)
+{
+ va->va.addr = addr;
+ va->va.range = range;
+ va->gem.obj = obj;
+ va->gem.offset = offset;
+}
+
+/**
+ * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
+ * invalidated
+ * @va: the &drm_gpuva to set the invalidate flag for
+ * @invalidate: indicates whether the &drm_gpuva is invalidated
+ */
+static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
+{
+ if (invalidate)
+ va->flags |= DRM_GPUVA_INVALIDATED;
+ else
+ va->flags &= ~DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
+ * is invalidated
+ * @va: the &drm_gpuva to check
+ *
+ * Returns: %true if the GPU VA is invalidated, %false otherwise
+ */
+static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
+{
+ return va->flags & DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * enum drm_gpuvm_flags - flags for struct drm_gpuvm
+ */
+enum drm_gpuvm_flags {
+ /**
+ * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
+ * GPUVM's &dma_resv lock
+ */
+ DRM_GPUVM_RESV_PROTECTED = BIT(0),
+
+ /**
+ * @DRM_GPUVM_USERBITS: user defined bits
+ */
+ DRM_GPUVM_USERBITS = BIT(1),
+};
+
+/**
+ * struct drm_gpuvm - DRM GPU VA Manager
+ *
+ * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
+ * &maple_tree structures. Typically, this structure is embedded in bigger
+ * driver structures.
+ *
+ * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
+ * pages.
+ *
+ * There should be one manager instance per GPU virtual address space.
+ */
+struct drm_gpuvm {
+ /**
+ * @name: the name of the DRM GPU VA space
+ */
+ const char *name;
+
+ /**
+ * @flags: the &drm_gpuvm_flags of this GPUVM
+ */
+ enum drm_gpuvm_flags flags;
+
+ /**
+ * @drm: the &drm_device this VM lives in
+ */
+ struct drm_device *drm;
+
+ /**
+ * @mm_start: start of the VA space
+ */
+ u64 mm_start;
+
+ /**
+ * @mm_range: length of the VA space
+ */
+ u64 mm_range;
+
+ /**
+ * @rb: structures to track &drm_gpuva entries
+ */
+ struct {
+ /**
+ * @rb.tree: the rb-tree to track GPU VA mappings
+ */
+ struct rb_root_cached tree;
+
+ /**
+ * @rb.list: the &list_head to track GPU VA mappings
+ */
+ struct list_head list;
+ } rb;
+
+ /**
+ * @kref: reference count of this object
+ */
+ struct kref kref;
+
+ /**
+ * @kernel_alloc_node:
+ *
+ * &drm_gpuva representing the address space cutout reserved for
+ * the kernel
+ */
+ struct drm_gpuva kernel_alloc_node;
+
+ /**
+ * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
+ */
+ const struct drm_gpuvm_ops *ops;
+
+ /**
+ * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv.
+ */
+ struct drm_gem_object *r_obj;
+
+ /**
+ * @extobj: structure holding the extobj list
+ */
+ struct {
+ /**
+ * @extobj.list: &list_head storing &drm_gpuvm_bos serving as
+ * external object
+ */
+ struct list_head list;
+
+ /**
+ * @extobj.local_list: pointer to the local list temporarily
+ * storing entries from the external object list
+ */
+ struct list_head *local_list;
+
+ /**
+ * @extobj.lock: spinlock to protect the extobj list
+ */
+ spinlock_t lock;
+ } extobj;
+
+ /**
+ * @evict: structure holding the evict list and evict list lock
+ */
+ struct {
+ /**
+ * @evict.list: &list_head storing &drm_gpuvm_bos currently
+ * being evicted
+ */
+ struct list_head list;
+
+ /**
+ * @evict.local_list: pointer to the local list temporarily
+ * storing entries from the evicted object list
+ */
+ struct list_head *local_list;
+
+ /**
+ * @evict.lock: spinlock to protect the evict list
+ */
+ spinlock_t lock;
+ } evict;
+};
+
+void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+ enum drm_gpuvm_flags flags,
+ struct drm_device *drm,
+ struct drm_gem_object *r_obj,
+ u64 start_offset, u64 range,
+ u64 reserve_offset, u64 reserve_range,
+ const struct drm_gpuvm_ops *ops);
+
+/**
+ * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to acquire the reference of
+ *
+ * This function acquires an additional reference to @gpuvm. It is illegal to
+ * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct drm_gpuvm pointer
+ */
+static inline struct drm_gpuvm *
+drm_gpuvm_get(struct drm_gpuvm *gpuvm)
+{
+ kref_get(&gpuvm->kref);
+
+ return gpuvm;
+}
+
+void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
+
+bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
+bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
+
+struct drm_gem_object *
+drm_gpuvm_resv_object_alloc(struct drm_device *drm);
+
+/**
+ * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
+ * set
+ * @gpuvm: the &drm_gpuvm
+ *
+ * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
+ */
+static inline bool
+drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
+{
+ return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
+}
+
+/**
+ * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
+ * @gpuvm__: the &drm_gpuvm
+ *
+ * Returns: a pointer to the &drm_gpuvm's shared &dma_resv
+ */
+#define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
+
+/**
+ * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
+ * &dma_resv
+ * @gpuvm__: the &drm_gpuvm
+ *
+ * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared
+ * &dma_resv
+ */
+#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
+
+#define drm_gpuvm_resv_held(gpuvm__) \
+ dma_resv_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_assert_held(gpuvm__) \
+ dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_held(gpuvm__) \
+ dma_resv_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_assert_held(gpuvm__) \
+ dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
+
+/**
+ * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
+ * external object
+ * @gpuvm: the &drm_gpuvm to check
+ * @obj: the &drm_gem_object to check
+ *
+ * Returns: true if the &drm_gem_object &dma_resv differs from the
+ * &drm_gpuvms &dma_resv, false otherwise
+ */
+static inline bool
+drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ return obj && obj->resv != drm_gpuvm_resv(gpuvm);
+}
+
+static inline struct drm_gpuva *
+__drm_gpuva_next(struct drm_gpuva *va)
+{
+ if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
+ return list_next_entry(va, rb.entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @gpuvm__: &drm_gpuvm to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
+ * between @start__ and @end__. It is implemented similarly to list_for_each(),
+ * but is using the &drm_gpuvm's internal interval tree to accelerate
+ * the search for the starting &drm_gpuva, and hence isn't safe against removal
+ * of elements. It assumes that @end__ is within (or is the upper limit of) the
+ * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
+ * @kernel_alloc_node.
+ */
+#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
+ * &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @gpuvm__: &drm_gpuvm to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
+ * between @start__ and @end__. It is implemented similarly to
+ * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
+ * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
+ * against removal of elements. It assumes that @end__ is within (or is the
+ * upper limit of) the &drm_gpuvm. This iterator does not skip over the
+ * &drm_gpuvm's @kernel_alloc_node.
+ */
+#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
+ next__ = __drm_gpuva_next(va__); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = next__, next__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @gpuvm__: &drm_gpuvm to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuvm.
+ */
+#define drm_gpuvm_for_each_va(va__, gpuvm__) \
+ list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
+
+/**
+ * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @gpuvm__: &drm_gpuvm to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
+ * hence safe against the removal of elements.
+ */
+#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
+ list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
+
+/**
+ * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
+ *
+ * This structure should be created on the stack as &drm_exec should be.
+ *
+ * Optionally, @extra can be set in order to lock additional &drm_gem_objects.
+ */
+struct drm_gpuvm_exec {
+ /**
+ * @exec: the &drm_exec structure
+ */
+ struct drm_exec exec;
+
+ /**
+ * @flags: the flags for the struct drm_exec
+ */
+ u32 flags;
+
+ /**
+ * @vm: the &drm_gpuvm to lock its DMA reservations
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @num_fences: the number of fences to reserve for the &dma_resv of the
+ * locked &drm_gem_objects
+ */
+ unsigned int num_fences;
+
+ /**
+ * @extra: Callback and corresponding private data for the driver to
+ * lock arbitrary additional &drm_gem_objects.
+ */
+ struct {
+ /**
+ * @extra.fn: The driver callback to lock additional
+ * &drm_gem_objects.
+ */
+ int (*fn)(struct drm_gpuvm_exec *vm_exec);
+
+ /**
+ * @extra.priv: driver private data for the @fn callback
+ */
+ void *priv;
+ } extra;
+};
+
+int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences);
+
+int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences);
+
+int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ u64 addr, u64 range,
+ unsigned int num_fences);
+
+int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec);
+
+int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
+ struct drm_gem_object **objs,
+ unsigned int num_objs);
+
+int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
+ u64 addr, u64 range);
+
+/**
+ * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * Releases all dma-resv locks of all &drm_gem_objects previously acquired
+ * through drm_gpuvm_exec_lock() or its variants.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static inline void
+drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
+{
+ drm_exec_fini(&vm_exec->exec);
+}
+
+int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec);
+void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage);
+
+/**
+ * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ * @fence: fence to add
+ * @private_usage: private dma-resv usage
+ * @extobj_usage: extobj dma-resv usage
+ *
+ * See drm_gpuvm_resv_add_fence().
+ */
+static inline void
+drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage)
+{
+ drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
+ private_usage, extobj_usage);
+}
+
+/**
+ * drm_gpuvm_exec_validate() - validate all BOs marked as evicted
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * See drm_gpuvm_validate().
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static inline int
+drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
+{
+ return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
+}
+
+/**
+ * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
+ * &drm_gem_object combination
+ *
+ * This structure is an abstraction representing a &drm_gpuvm and
+ * &drm_gem_object combination. It serves as an indirection to accelerate
+ * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
+ * &drm_gem_object.
+ *
+ * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
+ * accelerate validation.
+ *
+ * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
+ * a GEM object is mapped first in a GPU-VM and release the instance once the
+ * last mapping of the GEM object in this GPU-VM is unmapped.
+ */
+struct drm_gpuvm_bo {
+ /**
+ * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
+ * counted pointer.
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @obj: The &drm_gem_object being mapped in @vm. This is a reference
+ * counted pointer.
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @evicted: Indicates whether the &drm_gem_object is evicted; field
+ * protected by the &drm_gem_object's dma-resv lock.
+ */
+ bool evicted;
+
+ /**
+ * @kref: The reference count for this &drm_gpuvm_bo.
+ */
+ struct kref kref;
+
+ /**
+ * @list: Structure containing all &list_heads.
+ */
+ struct {
+ /**
+ * @list.gpuva: The list of linked &drm_gpuvas.
+ *
+ * It is safe to access entries from this list as long as the
+ * GEM's gpuva lock is held. See also struct drm_gem_object.
+ */
+ struct list_head gpuva;
+
+ /**
+ * @list.entry: Structure containing all &list_heads serving as
+ * entry.
+ */
+ struct {
+ /**
+ * @list.entry.gem: List entry to attach to the
+ * &drm_gem_objects gpuva list.
+ */
+ struct list_head gem;
+
+ /**
+ * @list.entry.evict: List entry to attach to the
+ * &drm_gpuvms extobj list.
+ */
+ struct list_head extobj;
+
+ /**
+ * @list.entry.evict: List entry to attach to the
+ * &drm_gpuvms evict list.
+ */
+ struct list_head evict;
+ } entry;
+ } list;
+};
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
+
+/**
+ * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
+ * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
+ *
+ * This function acquires an additional reference to @vm_bo. It is illegal to
+ * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct vm_bo pointer
+ */
+static inline struct drm_gpuvm_bo *
+drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
+{
+ kref_get(&vm_bo->kref);
+ return vm_bo;
+}
+
+bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+
+void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
+
+/**
+ * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
+ * to/from the &drm_gpuvms evicted list
+ * @obj: the &drm_gem_object
+ * @evict: indicates whether @obj is evicted
+ *
+ * See drm_gpuvm_bo_evict().
+ */
+static inline void
+drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj)
+ drm_gpuvm_bo_evict(vm_bo, evict);
+}
+
+void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
+
+/**
+ * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuvm_bo.
+ *
+ * The caller must hold the GEM's gpuva lock.
+ */
+#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
+ list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
+
+/**
+ * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
+ * &drm_gpuva
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @next__: &next &drm_gpuva to store the next step
+ * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ *
+ * The caller must hold the GEM's gpuva lock.
+ */
+#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
+ list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
+
+/**
+ * enum drm_gpuva_op_type - GPU VA operation type
+ *
+ * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
+ */
+enum drm_gpuva_op_type {
+ /**
+ * @DRM_GPUVA_OP_MAP: the map op type
+ */
+ DRM_GPUVA_OP_MAP,
+
+ /**
+ * @DRM_GPUVA_OP_REMAP: the remap op type
+ */
+ DRM_GPUVA_OP_REMAP,
+
+ /**
+ * @DRM_GPUVA_OP_UNMAP: the unmap op type
+ */
+ DRM_GPUVA_OP_UNMAP,
+
+ /**
+ * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
+ */
+ DRM_GPUVA_OP_PREFETCH,
+};
+
+/**
+ * struct drm_gpuva_op_map - GPU VA map operation
+ *
+ * This structure represents a single map operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_map {
+ /**
+ * @va: structure containing address and range of a map
+ * operation
+ */
+ struct {
+ /**
+ * @va.addr: the base address of the new mapping
+ */
+ u64 addr;
+
+ /**
+ * @va.range: the range of the new mapping
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and it's offset
+ */
+ struct {
+ /**
+ * @gem.offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @gem.obj: the &drm_gem_object to map
+ */
+ struct drm_gem_object *obj;
+ } gem;
+};
+
+/**
+ * struct drm_gpuva_op_unmap - GPU VA unmap operation
+ *
+ * This structure represents a single unmap operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_unmap {
+ /**
+ * @va: the &drm_gpuva to unmap
+ */
+ struct drm_gpuva *va;
+
+ /**
+ * @keep:
+ *
+ * Indicates whether this &drm_gpuva is physically contiguous with the
+ * original mapping request.
+ *
+ * Optionally, if &keep is set, drivers may keep the actual page table
+ * mappings for this &drm_gpuva, adding the missing page table entries
+ * only and update the &drm_gpuvm accordingly.
+ */
+ bool keep;
+};
+
+/**
+ * struct drm_gpuva_op_remap - GPU VA remap operation
+ *
+ * This represents a single remap operation generated by the DRM GPU VA manager.
+ *
+ * A remap operation is generated when an existing GPU VA mmapping is split up
+ * by inserting a new GPU VA mapping or by partially unmapping existent
+ * mapping(s), hence it consists of a maximum of two map and one unmap
+ * operation.
+ *
+ * The @unmap operation takes care of removing the original existing mapping.
+ * @prev is used to remap the preceding part, @next the subsequent part.
+ *
+ * If either a new mapping's start address is aligned with the start address
+ * of the old mapping or the new mapping's end address is aligned with the
+ * end address of the old mapping, either @prev or @next is NULL.
+ *
+ * Note, the reason for a dedicated remap operation, rather than arbitrary
+ * unmap and map operations, is to give drivers the chance of extracting driver
+ * specific data for creating the new mappings from the unmap operations's
+ * &drm_gpuva structure which typically is embedded in larger driver specific
+ * structures.
+ */
+struct drm_gpuva_op_remap {
+ /**
+ * @prev: the preceding part of a split mapping
+ */
+ struct drm_gpuva_op_map *prev;
+
+ /**
+ * @next: the subsequent part of a split mapping
+ */
+ struct drm_gpuva_op_map *next;
+
+ /**
+ * @unmap: the unmap operation for the original existing mapping
+ */
+ struct drm_gpuva_op_unmap *unmap;
+};
+
+/**
+ * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
+ *
+ * This structure represents a single prefetch operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_prefetch {
+ /**
+ * @va: the &drm_gpuva to prefetch
+ */
+ struct drm_gpuva *va;
+};
+
+/**
+ * struct drm_gpuva_op - GPU VA operation
+ *
+ * This structure represents a single generic operation.
+ *
+ * The particular type of the operation is defined by @op.
+ */
+struct drm_gpuva_op {
+ /**
+ * @entry:
+ *
+ * The &list_head used to distribute instances of this struct within
+ * &drm_gpuva_ops.
+ */
+ struct list_head entry;
+
+ /**
+ * @op: the type of the operation
+ */
+ enum drm_gpuva_op_type op;
+
+ union {
+ /**
+ * @map: the map operation
+ */
+ struct drm_gpuva_op_map map;
+
+ /**
+ * @remap: the remap operation
+ */
+ struct drm_gpuva_op_remap remap;
+
+ /**
+ * @unmap: the unmap operation
+ */
+ struct drm_gpuva_op_unmap unmap;
+
+ /**
+ * @prefetch: the prefetch operation
+ */
+ struct drm_gpuva_op_prefetch prefetch;
+ };
+};
+
+/**
+ * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
+ */
+struct drm_gpuva_ops {
+ /**
+ * @list: the &list_head
+ */
+ struct list_head list;
+};
+
+/**
+ * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations.
+ */
+#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @next: &next &drm_gpuva_op to store the next step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations. It is
+ * implemented with list_for_each_safe(), so save against removal of elements.
+ */
+#define drm_gpuva_for_each_op_safe(op, next, ops) \
+ list_for_each_entry_safe(op, next, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations beginning
+ * from the given operation in reverse order.
+ */
+#define drm_gpuva_for_each_op_from_reverse(op, ops) \
+ list_for_each_entry_from_reverse(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations in reverse
+ */
+#define drm_gpuva_for_each_op_reverse(op, ops) \
+ list_for_each_entry_reverse(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
+ */
+#define drm_gpuva_first_op(ops) \
+ list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
+ */
+#define drm_gpuva_last_op(ops) \
+ list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
+
+/**
+ * drm_gpuva_next_op() - next &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_next_op(op) list_next_entry(op, entry)
+
+struct drm_gpuva_ops *
+drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset);
+struct drm_gpuva_ops *
+drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
+
+void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
+ struct drm_gpuva_ops *ops);
+
+static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op)
+{
+ drm_gpuva_init(va, op->va.addr, op->va.range,
+ op->gem.obj, op->gem.offset);
+}
+
+/**
+ * struct drm_gpuvm_ops - callbacks for split/merge steps
+ *
+ * This structure defines the callbacks used by &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
+ * operations to drivers.
+ */
+struct drm_gpuvm_ops {
+ /**
+ * @vm_free: called when the last reference of a struct drm_gpuvm is
+ * dropped
+ *
+ * This callback is mandatory.
+ */
+ void (*vm_free)(struct drm_gpuvm *gpuvm);
+
+ /**
+ * @op_alloc: called when the &drm_gpuvm allocates
+ * a struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * allocate memory accordingly.
+ *
+ * This callback is optional.
+ */
+ struct drm_gpuva_op *(*op_alloc)(void);
+
+ /**
+ * @op_free: called when the &drm_gpuvm frees a
+ * struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * free the previously allocated memory accordingly.
+ *
+ * This callback is optional.
+ */
+ void (*op_free)(struct drm_gpuva_op *op);
+
+ /**
+ * @vm_bo_alloc: called when the &drm_gpuvm allocates
+ * a struct drm_gpuvm_bo
+ *
+ * Some drivers may want to embed struct drm_gpuvm_bo into driver
+ * specific structures. By implementing this callback drivers can
+ * allocate memory accordingly.
+ *
+ * This callback is optional.
+ */
+ struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
+
+ /**
+ * @vm_bo_free: called when the &drm_gpuvm frees a
+ * struct drm_gpuvm_bo
+ *
+ * Some drivers may want to embed struct drm_gpuvm_bo into driver
+ * specific structures. By implementing this callback drivers can
+ * free the previously allocated memory accordingly.
+ *
+ * This callback is optional.
+ */
+ void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
+
+ /**
+ * @vm_bo_validate: called from drm_gpuvm_validate()
+ *
+ * Drivers receive this callback for every evicted &drm_gem_object being
+ * mapped in the corresponding &drm_gpuvm.
+ *
+ * Typically, drivers would call their driver specific variant of
+ * ttm_bo_validate() from within this callback.
+ */
+ int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo,
+ struct drm_exec *exec);
+
+ /**
+ * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
+ * mapping once all previous steps were completed
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
+ *
+ * Can be NULL if &drm_gpuvm_sm_map is used.
+ */
+ int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_remap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to split up an existent mapping
+ *
+ * This callback is called when existent mapping needs to be split up.
+ * This is the case when either a newly requested mapping overlaps or
+ * is enclosed by an existent mapping or a partial unmap of an existent
+ * mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
+ * used.
+ */
+ int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_unmap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to unmap an existent mapping
+ *
+ * This callback is called when existent mapping needs to be unmapped.
+ * This is the case when either a newly requested mapping encloses an
+ * existent mapping or an unmap of an existent mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
+ * used.
+ */
+ int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
+};
+
+int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
+ u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset);
+
+int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
+ u64 addr, u64 range);
+
+void drm_gpuva_map(struct drm_gpuvm *gpuvm,
+ struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op);
+
+void drm_gpuva_remap(struct drm_gpuva *prev,
+ struct drm_gpuva *next,
+ struct drm_gpuva_op_remap *op);
+
+void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
+
+/**
+ * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
+ * the unmap stage of a remap op.
+ * @op: Remap op.
+ * @start_addr: Output pointer for the start of the required unmap.
+ * @range: Output pointer for the length of the required unmap.
+ *
+ * The given start address and range will be set such that they represent the
+ * range of the address space that was previously covered by the mapping being
+ * re-mapped, but is now empty.
+ */
+static inline void
+drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op,
+ u64 *start_addr, u64 *range)
+{
+ const u64 va_start = op->prev ?
+ op->prev->va.addr + op->prev->va.range :
+ op->unmap->va->va.addr;
+ const u64 va_end = op->next ?
+ op->next->va.addr :
+ op->unmap->va->va.addr + op->unmap->va->va.range;
+
+ if (start_addr)
+ *start_addr = va_start;
+ if (range)
+ *range = va_end - va_start;
+}
+
+#endif /* __DRM_GPUVM_H__ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
deleted file mode 100644
index bb95ff011baf..000000000000
--- a/include/drm/drm_hashtab.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#ifndef DRM_HASHTAB_H
-#define DRM_HASHTAB_H
-
-#include <linux/list.h>
-
-#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-
-struct drm_hash_item {
- struct hlist_node head;
- unsigned long key;
-};
-
-struct drm_open_hash {
- struct hlist_head *table;
- u8 order;
-};
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add);
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-void drm_ht_remove(struct drm_open_hash *ht);
-
-/*
- * RCU-safe interface
- *
- * The user of this API needs to make sure that two or more instances of the
- * hash table manipulation functions are never run simultaneously.
- * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
- * with any of the manipulation functions as long as it's called from within
- * an RCU read-locked section.
- */
-#define drm_ht_insert_item_rcu drm_ht_insert_item
-#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
-#define drm_ht_remove_key_rcu drm_ht_remove_key
-#define drm_ht_remove_item_rcu drm_ht_remove_item
-#define drm_ht_find_item_rcu drm_ht_find_item
-
-#endif
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index 10100a4bbe2a..171760b6c4a1 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n)
#define DRM_MAJOR 226
/**
@@ -109,17 +110,6 @@ enum drm_ioctl_flags {
*/
DRM_ROOT_ONLY = BIT(2),
/**
- * @DRM_UNLOCKED:
- *
- * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
- * or not. Enforced as the default for all modern drivers, hence there
- * should never be a need to set this flag.
- *
- * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the
- * only legacy IOCTL which needs this.
- */
- DRM_UNLOCKED = BIT(4),
- /**
* @DRM_RENDER_ALLOW:
*
* This is used for all ioctl needed for rendering only, for drivers
@@ -166,7 +156,6 @@ struct drm_ioctl_desc {
.name = #ioctl \
}
-int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32);
#ifdef CONFIG_COMPAT
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
deleted file mode 100644
index d77f6e65b1c6..000000000000
--- a/include/drm/drm_irq.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2016 Intel Corp.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DRM_IRQ_H_
-#define _DRM_IRQ_H_
-
-struct drm_device;
-
-int drm_irq_install(struct drm_device *dev, int irq);
-int drm_irq_uninstall(struct drm_device *dev);
-
-#endif
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
new file mode 100644
index 000000000000..6e99627edf45
--- /dev/null
+++ b/include/drm/drm_kunit_helpers.h
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef DRM_KUNIT_HELPERS_H_
+#define DRM_KUNIT_HELPERS_H_
+
+#include <drm/drm_drv.h>
+
+#include <linux/device.h>
+
+#include <kunit/test.h>
+
+struct drm_crtc_funcs;
+struct drm_crtc_helper_funcs;
+struct drm_device;
+struct drm_plane_funcs;
+struct drm_plane_helper_funcs;
+struct kunit;
+
+struct device *drm_kunit_helper_alloc_device(struct kunit *test);
+void drm_kunit_helper_free_device(struct kunit *test, struct device *dev);
+
+struct drm_device *
+__drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ const struct drm_driver *driver);
+
+/**
+ * drm_kunit_helper_alloc_drm_device_with_driver - Allocates a mock DRM device for KUnit tests
+ * @_test: The test context object
+ * @_dev: The parent device object
+ * @_type: the type of the struct which contains struct &drm_device
+ * @_member: the name of the &drm_device within @_type.
+ * @_drv: Mocked DRM device driver features
+ *
+ * This function creates a struct &drm_device from @_dev and @_drv.
+ *
+ * @_dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * The driver is tied to the @_test context and will get cleaned at the
+ * end of the test. The drm_device is allocated through
+ * devm_drm_dev_alloc() and will thus be freed through a device-managed
+ * resource.
+ *
+ * Returns:
+ * A pointer to the new drm_device, or an ERR_PTR() otherwise.
+ */
+#define drm_kunit_helper_alloc_drm_device_with_driver(_test, _dev, _type, _member, _drv) \
+ ((_type *)__drm_kunit_helper_alloc_drm_device_with_driver(_test, _dev, \
+ sizeof(_type), \
+ offsetof(_type, _member), \
+ _drv))
+
+static inline struct drm_device *
+__drm_kunit_helper_alloc_drm_device(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ u32 features)
+{
+ struct drm_driver *driver;
+
+ driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, driver);
+
+ driver->driver_features = features;
+
+ return __drm_kunit_helper_alloc_drm_device_with_driver(test, dev,
+ size, offset,
+ driver);
+}
+
+/**
+ * drm_kunit_helper_alloc_drm_device - Allocates a mock DRM device for KUnit tests
+ * @_test: The test context object
+ * @_dev: The parent device object
+ * @_type: the type of the struct which contains struct &drm_device
+ * @_member: the name of the &drm_device within @_type.
+ * @_features: Mocked DRM device driver features
+ *
+ * This function creates a struct &drm_driver and will create a struct
+ * &drm_device from @_dev and that driver.
+ *
+ * @_dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * The driver is tied to the @_test context and will get cleaned at the
+ * end of the test. The drm_device is allocated through
+ * devm_drm_dev_alloc() and will thus be freed through a device-managed
+ * resource.
+ *
+ * Returns:
+ * A pointer to the new drm_device, or an ERR_PTR() otherwise.
+ */
+#define drm_kunit_helper_alloc_drm_device(_test, _dev, _type, _member, _feat) \
+ ((_type *)__drm_kunit_helper_alloc_drm_device(_test, _dev, \
+ sizeof(_type), \
+ offsetof(_type, _member), \
+ _feat))
+struct drm_modeset_acquire_ctx *
+drm_kunit_helper_acquire_ctx_alloc(struct kunit *test);
+
+struct drm_atomic_state *
+drm_kunit_helper_atomic_state_alloc(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_modeset_acquire_ctx *ctx);
+
+struct drm_plane *
+drm_kunit_helper_create_primary_plane(struct kunit *test,
+ struct drm_device *drm,
+ const struct drm_plane_funcs *funcs,
+ const struct drm_plane_helper_funcs *helper_funcs,
+ const uint32_t *formats,
+ unsigned int num_formats,
+ const uint64_t *modifiers);
+
+struct drm_crtc *
+drm_kunit_helper_create_crtc(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const struct drm_crtc_helper_funcs *helper_funcs);
+
+#endif // DRM_KUNIT_HELPERS_H_
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
deleted file mode 100644
index 852d7451eeb1..000000000000
--- a/include/drm/drm_legacy.h
+++ /dev/null
@@ -1,235 +0,0 @@
-#ifndef __DRM_DRM_LEGACY_H__
-#define __DRM_DRM_LEGACY_H__
-/*
- * Legacy driver interfaces for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- * Copyright © 2014 Intel Corporation
- * Daniel Vetter <daniel.vetter@ffwll.ch>
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drm.h>
-#include <drm/drm_auth.h>
-#include <drm/drm_hashtab.h>
-
-struct drm_device;
-struct drm_driver;
-struct file;
-struct pci_driver;
-
-/*
- * Legacy Support for palateontologic DRM drivers
- *
- * If you add a new driver and it uses any of these functions or structures,
- * you're doing it terribly wrong.
- */
-
-/**
- * DMA buffer.
- */
-struct drm_buf {
- int idx; /**< Index into master buflist */
- int total; /**< Buffer size */
- int order; /**< log-base-2(total) */
- int used; /**< Amount of buffer in use (for DMA) */
- unsigned long offset; /**< Byte offset (used internally) */
- void *address; /**< Address of buffer */
- unsigned long bus_address; /**< Bus address of buffer */
- struct drm_buf *next; /**< Kernel-only: used for free list */
- __volatile__ int waiting; /**< On kernel DMA queue */
- __volatile__ int pending; /**< On hardware DMA queue */
- struct drm_file *file_priv; /**< Private of holding file descr */
- int context; /**< Kernel queue for this buffer */
- int while_locked; /**< Dispatch this buffer while locked */
- enum {
- DRM_LIST_NONE = 0,
- DRM_LIST_FREE = 1,
- DRM_LIST_WAIT = 2,
- DRM_LIST_PEND = 3,
- DRM_LIST_PRIO = 4,
- DRM_LIST_RECLAIM = 5
- } list; /**< Which list we're on */
-
- int dev_priv_size; /**< Size of buffer private storage */
- void *dev_private; /**< Per-buffer private storage */
-};
-
-typedef struct drm_dma_handle {
- dma_addr_t busaddr;
- void *vaddr;
- size_t size;
-} drm_dma_handle_t;
-
-/**
- * Buffer entry. There is one of this for each buffer size order.
- */
-struct drm_buf_entry {
- int buf_size; /**< size */
- int buf_count; /**< number of buffers */
- struct drm_buf *buflist; /**< buffer list */
- int seg_count;
- int page_order;
- struct drm_dma_handle **seglist;
-
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
-};
-
-/**
- * DMA data.
- */
-struct drm_device_dma {
-
- struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
- int buf_count; /**< total number of buffers */
- struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
- int seg_count;
- int page_count; /**< number of pages */
- unsigned long *pagelist; /**< page list */
- unsigned long byte_count;
- enum {
- _DRM_DMA_USE_AGP = 0x01,
- _DRM_DMA_USE_SG = 0x02,
- _DRM_DMA_USE_FB = 0x04,
- _DRM_DMA_USE_PCI_RO = 0x08
- } flags;
-
-};
-
-/**
- * Scatter-gather memory.
- */
-struct drm_sg_mem {
- unsigned long handle;
- void *virtual;
- int pages;
- struct page **pagelist;
- dma_addr_t *busaddr;
-};
-
-/**
- * Kernel side of a mapping
- */
-struct drm_local_map {
- dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/
- unsigned long size; /**< Requested physical size (bytes) */
- enum drm_map_type type; /**< Type of memory to map */
- enum drm_map_flags flags; /**< Flags */
- void *handle; /**< User-space: "Handle" to pass to mmap() */
- /**< Kernel-space: kernel-virtual address */
- int mtrr; /**< MTRR slot used */
-};
-
-typedef struct drm_local_map drm_local_map_t;
-
-/**
- * Mappings list
- */
-struct drm_map_list {
- struct list_head head; /**< list head */
- struct drm_hash_item hash;
- struct drm_local_map *map; /**< mapping */
- uint64_t user_token;
- struct drm_master *master;
-};
-
-int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_p);
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
-void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
-int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
-
-int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
-int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
-
-/**
- * Test that the hardware lock is held by the caller, returning otherwise.
- *
- * \param dev DRM device.
- * \param filp file pointer of the caller.
- */
-#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
-do { \
- if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
- _file_priv->master->lock.file_priv != _file_priv) { \
- DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
- __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
- _file_priv->master->lock.file_priv, _file_priv); \
- return -EINVAL; \
- } \
-} while (0)
-
-void drm_legacy_idlelock_take(struct drm_lock_data *lock);
-void drm_legacy_idlelock_release(struct drm_lock_data *lock);
-
-/* drm_pci.c */
-
-#ifdef CONFIG_PCI
-
-struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
- size_t align);
-void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah);
-
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
-
-#else
-
-static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
- size_t size, size_t align)
-{
- return NULL;
-}
-
-static inline void drm_pci_free(struct drm_device *dev,
- struct drm_dma_handle *dmah)
-{
-}
-
-static inline int drm_legacy_pci_init(struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- return -EINVAL;
-}
-
-static inline void drm_legacy_pci_exit(struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
-}
-
-#endif
-
-/* drm_memory.c */
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
-
-#endif /* __DRM_DRM_LEGACY_H__ */
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
index ca4114633bf9..f547b09ca023 100644
--- a/include/drm/drm_managed.h
+++ b/include/drm/drm_managed.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
struct drm_device;
+struct mutex;
typedef void (*drmres_release_t)(struct drm_device *dev, void *res);
@@ -44,7 +45,9 @@ int __must_check __drmm_add_action_or_reset(struct drm_device *dev,
drmres_release_t action,
void *data, const char *name);
-void drmm_add_final_kfree(struct drm_device *dev, void *container);
+void drmm_release_action(struct drm_device *dev,
+ drmres_release_t action,
+ void *data);
void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
@@ -106,4 +109,22 @@ char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
void drmm_kfree(struct drm_device *dev, void *data);
+void __drmm_mutex_release(struct drm_device *dev, void *res);
+
+/**
+ * drmm_mutex_init - &drm_device-managed mutex_init()
+ * @dev: DRM device
+ * @lock: lock to be initialized
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ *
+ * This is a &drm_device-managed version of mutex_init(). The initialized
+ * lock is automatically destroyed on the final drm_dev_put().
+ */
+#define drmm_mutex_init(dev, lock) ({ \
+ mutex_init(lock); \
+ drmm_add_action_or_reset(dev, __drmm_mutex_release, lock); \
+}) \
+
#endif
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index c2827ceaba0d..e8e0f8d39f3a 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -12,10 +12,12 @@
#include <drm/drm_device.h>
#include <drm/drm_simple_kms_helper.h>
+struct drm_format_conv_state;
struct drm_rect;
-struct spi_device;
struct gpio_desc;
+struct iosys_map;
struct regulator;
+struct spi_device;
/**
* struct mipi_dbi - MIPI DBI interface
@@ -122,14 +124,27 @@ struct mipi_dbi_dev {
struct backlight_device *backlight;
/**
- * @regulator: power regulator (optional)
+ * @regulator: power regulator (Vdd) (optional)
*/
struct regulator *regulator;
/**
+ * @io_regulator: I/O power regulator (Vddi) (optional)
+ */
+ struct regulator *io_regulator;
+
+ /**
* @dbi: MIPI DBI interface
*/
struct mipi_dbi dbi;
+
+ /**
+ * @driver_private: Driver private data.
+ * Necessary for drivers with private data since devm_drm_dev_alloc()
+ * can't allocate structures that embed a structure which then again
+ * embeds drm_device.
+ */
+ void *driver_private;
};
static inline struct mipi_dbi_dev *drm_to_mipi_dbi_dev(struct drm_device *drm)
@@ -147,12 +162,23 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation);
+enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode);
void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state);
void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plan_state);
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
+int mipi_dbi_pipe_begin_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void mipi_dbi_pipe_end_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void mipi_dbi_pipe_reset_plane(struct drm_simple_display_pipe *pipe);
+struct drm_plane_state *mipi_dbi_pipe_duplicate_plane_state(struct drm_simple_display_pipe *pipe);
+void mipi_dbi_pipe_destroy_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
void mipi_dbi_hw_reset(struct mipi_dbi *dbi);
bool mipi_dbi_display_is_on(struct mipi_dbi *dbi);
int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev);
@@ -166,13 +192,15 @@ int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val);
int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
size_t len);
-int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap);
+int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swap,
+ struct drm_format_conv_state *fmtcnv_state);
+
/**
* mipi_dbi_command - MIPI DCS command with optional parameter(s)
* @dbi: MIPI DBI structure
* @cmd: Command
- * @seq...: Optional parameter(s)
+ * @seq: Optional parameter(s)
*
* Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for
* get/read.
@@ -183,13 +211,39 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
#define mipi_dbi_command(dbi, cmd, seq...) \
({ \
const u8 d[] = { seq }; \
- mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
+ struct device *dev = &(dbi)->spi->dev; \
+ int ret; \
+ ret = mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret) \
+ dev_err_ratelimited(dev, "error %d when sending command %#02x\n", ret, cmd); \
+ ret; \
})
#ifdef CONFIG_DEBUG_FS
void mipi_dbi_debugfs_init(struct drm_minor *minor);
#else
-#define mipi_dbi_debugfs_init NULL
+static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {}
#endif
+/**
+ * DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS - Initializes struct drm_simple_display_pipe_funcs
+ * for MIPI-DBI devices
+ * @enable_: Enable-callback implementation
+ *
+ * This macro initializes struct drm_simple_display_pipe_funcs with default
+ * values for MIPI-DBI-based devices. The only callback that depends on the
+ * hardware is @enable, for which the driver has to provide an implementation.
+ * MIPI-based drivers are encouraged to use this macro for initialization.
+ */
+#define DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(enable_) \
+ .mode_valid = mipi_dbi_pipe_mode_valid, \
+ .enable = (enable_), \
+ .disable = mipi_dbi_pipe_disable, \
+ .update = mipi_dbi_pipe_update, \
+ .begin_fb_access = mipi_dbi_pipe_begin_fb_access, \
+ .end_fb_access = mipi_dbi_pipe_end_fb_access, \
+ .reset_plane = mipi_dbi_pipe_reset_plane, \
+ .duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state, \
+ .destroy_plane_state = mipi_dbi_pipe_destroy_plane_state
+
#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 360e6377e84b..c0aec0d4d664 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -80,6 +80,11 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
* Note that typically DSI packet transmission is atomic, so the .transfer()
* function will seldomly return anything other than the number of bytes
* contained in the transmit buffer on success.
+ *
+ * Also note that those callbacks can be called no matter the state the
+ * host is in. Drivers that need the underlying device to be powered to
+ * perform these operations will first need to make sure it's been
+ * properly enabled.
*/
struct mipi_dsi_host_ops {
int (*attach)(struct mipi_dsi_host *host,
@@ -119,19 +124,21 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
/* enable hsync-end packets in vsync-pulse and v-porch area */
#define MIPI_DSI_MODE_VIDEO_HSE BIT(4)
/* disable hfront-porch area */
-#define MIPI_DSI_MODE_VIDEO_HFP BIT(5)
+#define MIPI_DSI_MODE_VIDEO_NO_HFP BIT(5)
/* disable hback-porch area */
-#define MIPI_DSI_MODE_VIDEO_HBP BIT(6)
+#define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6)
/* disable hsync-active area */
-#define MIPI_DSI_MODE_VIDEO_HSA BIT(7)
+#define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7)
/* flush display FIFO on vsync pulse */
#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8)
/* disable EoT packets in HS mode */
-#define MIPI_DSI_MODE_EOT_PACKET BIT(9)
+#define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9)
/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
/* transmit data in low power */
#define MIPI_DSI_MODE_LPM BIT(11)
+/* transmit data ending at the same time for all lanes within one hsync */
+#define MIPI_DSI_HS_PKT_END_ALIGNED BIT(12)
enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB888,
@@ -161,6 +168,7 @@ struct mipi_dsi_device_info {
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
+ * @attached: the DSI device has been successfully attached
* @name: DSI peripheral chip type
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
@@ -172,10 +180,12 @@ struct mipi_dsi_device_info {
* @lp_rate: maximum lane frequency for low power mode in hertz, this should
* be set to the real limits of the hardware, zero is only accepted for
* legacy drivers
+ * @dsc: panel/bridge DSC pps payload to be sent
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
struct device dev;
+ bool attached;
char name[DSI_DEV_NAME_SIZE];
unsigned int channel;
@@ -184,14 +194,12 @@ struct mipi_dsi_device {
unsigned long mode_flags;
unsigned long hs_rate;
unsigned long lp_rate;
+ struct drm_dsc_config *dsc;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
-static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
-{
- return container_of(dev, struct mipi_dsi_device, dev);
-}
+#define to_mipi_dsi_device(__dev) container_of_const(__dev, struct mipi_dsi_device, dev)
/**
* mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any
@@ -222,9 +230,13 @@ struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info);
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
+struct mipi_dsi_device *
+devm_mipi_dsi_device_register_full(struct device *dev, struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int devm_mipi_dsi_attach(struct device *dev, struct mipi_dsi_device *dsi);
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
@@ -283,6 +295,48 @@ int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
u16 brightness);
int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
u16 *brightness);
+int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 brightness);
+int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 *brightness);
+
+/**
+ * mipi_dsi_generic_write_seq - transmit data using a generic write packet
+ * @dsi: DSI peripheral device
+ * @seq: buffer containing the payload
+ */
+#define mipi_dsi_generic_write_seq(dsi, seq...) \
+ do { \
+ static const u8 d[] = { seq }; \
+ struct device *dev = &dsi->dev; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) { \
+ dev_err_ratelimited(dev, "transmit data failed: %d\n", \
+ ret); \
+ return ret; \
+ } \
+ } while (0)
+
+/**
+ * mipi_dsi_dcs_write_seq - transmit a DCS command with payload
+ * @dsi: DSI peripheral device
+ * @cmd: Command
+ * @seq: buffer containing data to be transmitted
+ */
+#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \
+ do { \
+ static const u8 d[] = { cmd, seq }; \
+ struct device *dev = &dsi->dev; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) { \
+ dev_err_ratelimited( \
+ dev, "sending command %#02x failed: %d\n", \
+ cmd, ret); \
+ return ret; \
+ } \
+ } while (0)
/**
* struct mipi_dsi_driver - DSI driver
@@ -294,7 +348,7 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
struct mipi_dsi_driver {
struct device_driver driver;
int(*probe)(struct mipi_dsi_device *dsi);
- int(*remove)(struct mipi_dsi_device *dsi);
+ void (*remove)(struct mipi_dsi_device *dsi);
void (*shutdown)(struct mipi_dsi_device *dsi);
};
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index a01bc6fac83c..ac33ba1b18bc 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -39,13 +39,15 @@
*/
#include <linux/bug.h>
#include <linux/rbtree.h>
-#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/mm_types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#ifdef CONFIG_DRM_DEBUG_MM
#include <linux/stackdepot.h>
#endif
+#include <linux/types.h>
+
#include <drm/drm_print.h>
#ifdef CONFIG_DRM_DEBUG_MM
@@ -338,7 +340,7 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
/**
* drm_mm_nodes - list of nodes under the drm_mm range manager
- * @mm: the struct drm_mm range manger
+ * @mm: the struct drm_mm range manager
*
* As the drm_mm range manager hides its node_list deep with its
* structure, extracting it looks painful and repetitive. This is
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index c2d3d71d133c..973119a9176b 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -58,6 +58,12 @@ struct drm_mode_config_funcs {
* actual modifier used if the request doesn't have it specified,
* ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
*
+ * IMPORTANT: These implied modifiers for legacy userspace must be
+ * stored in struct &drm_framebuffer, including all relevant metadata
+ * like &drm_framebuffer.pitches and &drm_framebuffer.offsets if the
+ * modifier enables additional planes beyond the fourcc pixel format
+ * code. This is required by the GETFB2 ioctl.
+ *
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
* a new &drm_framebuffer structure, subclassed to contain
@@ -97,14 +103,13 @@ struct drm_mode_config_funcs {
* Callback used by helpers to inform the driver of output configuration
* changes.
*
- * Drivers implementing fbdev emulation with the helpers can call
- * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
- * helper of output changes.
- *
- * FIXME:
+ * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event()
+ * to call this hook to inform the fbdev helper of output changes.
*
- * Except that there's no vtable for device-level helper callbacks
- * there's no reason this is a core function.
+ * This hook is deprecated, drivers should instead use
+ * drm_fbdev_generic_setup() which takes care of any necessary
+ * hotplug event forwarding already without further involvement by
+ * the driver.
*/
void (*output_poll_changed)(struct drm_device *dev);
@@ -340,7 +345,6 @@ struct drm_mode_config_funcs {
* @max_width: maximum fb pixel width on this device
* @max_height: maximum fb pixel height on this device
* @funcs: core driver provided mode setting functions
- * @fb_base: base address of the framebuffer
* @poll_enabled: track polling support for this device
* @poll_running: track polling status for this device
* @delayed_event: track delayed poll uevent deliver for this device
@@ -354,6 +358,19 @@ struct drm_mode_config_funcs {
* Core mode resource tracking structure. All CRTC, encoders, and connectors
* enumerated by the driver are added here, as are global properties. Some
* global restrictions are also here, e.g. dimension restrictions.
+ *
+ * Framebuffer sizes refer to the virtual screen that can be displayed by
+ * the CRTC. This can be different from the physical resolution programmed.
+ * The minimum width and height, stored in @min_width and @min_height,
+ * describe the smallest size of the framebuffer. It correlates to the
+ * minimum programmable resolution.
+ * The maximum width, stored in @max_width, is typically limited by the
+ * maximum pitch between two adjacent scanlines. The maximum height, stored
+ * in @max_height, is usually only limited by the amount of addressable video
+ * memory. For hardware that has no real maximum, drivers should pick a
+ * reasonable default.
+ *
+ * See also @DRM_SHADOW_PLANE_MAX_WIDTH and @DRM_SHADOW_PLANE_MAX_HEIGHT.
*/
struct drm_mode_config {
/**
@@ -524,7 +541,6 @@ struct drm_mode_config {
int min_width, min_height;
int max_width, max_height;
const struct drm_mode_config_funcs *funcs;
- resource_size_t fb_base;
/* output poll support */
bool poll_enabled;
@@ -681,6 +697,12 @@ struct drm_mode_config {
struct drm_property *dvi_i_select_subconnector_property;
/**
+ * @dp_subconnector_property: Optional DP property to differentiate
+ * between different DP downstream port types.
+ */
+ struct drm_property *dp_subconnector_property;
+
+ /**
* @tv_subconnector_property: Optional TV property to differentiate
* between different TV connector types.
*/
@@ -690,11 +712,21 @@ struct drm_mode_config {
* between different TV connector types.
*/
struct drm_property *tv_select_subconnector_property;
+
/**
- * @tv_mode_property: Optional TV property to select
+ * @legacy_tv_mode_property: Optional TV property to select
* the output TV mode.
+ *
+ * Superseded by @tv_mode_property
+ */
+ struct drm_property *legacy_tv_mode_property;
+
+ /**
+ * @tv_mode_property: Optional TV property to select the TV
+ * standard output on the connector.
*/
struct drm_property *tv_mode_property;
+
/**
* @tv_left_margin_property: Optional TV property to set the left
* margin (expressed in pixels).
@@ -859,25 +891,6 @@ struct drm_mode_config {
uint32_t preferred_depth, prefer_shadow;
/**
- * @prefer_shadow_fbdev:
- *
- * Hint to framebuffer emulation to prefer shadow-fb rendering.
- */
- bool prefer_shadow_fbdev;
-
- /**
- * @fbdev_use_iomem:
- *
- * Set to true if framebuffer reside in iomem.
- * When set to true memcpy_toio() is used when copying the framebuffer in
- * drm_fb_helper.drm_fb_helper_dirty_blit_real().
- *
- * FIXME: This should be replaced with a per-mapping is_iomem
- * flag (like ttm does), and then used everywhere in fbdev code.
- */
- bool fbdev_use_iomem;
-
- /**
* @quirk_addfb_prefer_xbgr_30bpp:
*
* Special hack for legacy ADDFB to keep nouveau userspace happy. Should
@@ -906,11 +919,14 @@ struct drm_mode_config {
bool async_page_flip;
/**
- * @allow_fb_modifiers:
+ * @fb_modifiers_not_supported:
*
- * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ * When this flag is set, the DRM device will not expose modifier
+ * support to userspace. This is only used by legacy drivers that infer
+ * the buffer layout through heuristics without using modifiers. New
+ * drivers shall not set fhis flag.
*/
- bool allow_fb_modifiers;
+ bool fb_modifiers_not_supported;
/**
* @normalize_zpos:
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index c34a3e8030e1..08d7a7f0188f 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -60,7 +60,7 @@ struct drm_mode_object {
void (*free_cb)(struct kref *kref);
};
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
/**
* struct drm_object_properties - property tracking for &drm_mode_object
*/
@@ -98,6 +98,10 @@ struct drm_object_properties {
* Hence atomic drivers should not use drm_object_property_set_value()
* and drm_object_property_get_value() on mutable objects, i.e. those
* without the DRM_MODE_PROP_IMMUTABLE flag set.
+ *
+ * For atomic drivers the default value of properties is stored in this
+ * array, so drm_object_property_get_default_value can be used to
+ * retrieve it.
*/
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
@@ -126,6 +130,9 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *value);
+int drm_object_property_get_default_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *val);
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index eee3c9de6c4f..b9bb92e4b029 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -139,6 +139,35 @@ enum drm_mode_status {
.vscan = (vs), .flags = (f)
/**
+ * DRM_MODE_RES_MM - Calculates the display size from resolution and DPI
+ * @res: The resolution in pixel
+ * @dpi: The number of dots per inch
+ */
+#define DRM_MODE_RES_MM(res, dpi) \
+ (((res) * 254ul) / ((dpi) * 10ul))
+
+#define __DRM_MODE_INIT(pix, hd, vd, hd_mm, vd_mm) \
+ .type = DRM_MODE_TYPE_DRIVER, .clock = (pix), \
+ .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \
+ .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \
+ .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \
+ .height_mm = (vd_mm)
+
+/**
+ * DRM_MODE_INIT - Initialize display mode
+ * @hz: Vertical refresh rate in Hertz
+ * @hd: Horizontal resolution, width
+ * @vd: Vertical resolution, height
+ * @hd_mm: Display width in millimeters
+ * @vd_mm: Display height in millimeters
+ *
+ * This macro initializes a &drm_display_mode that contains information about
+ * refresh rate, resolution and physical size.
+ */
+#define DRM_MODE_INIT(hz, hd, vd, hd_mm, vd_mm) \
+ __DRM_MODE_INIT((hd) * (vd) * (hz) / 1000 /* kHz */, hd, vd, hd_mm, vd_mm)
+
+/**
* DRM_SIMPLE_MODE - Simple display mode
* @hd: Horizontal resolution, width
* @vd: Vertical resolution, height
@@ -149,11 +178,7 @@ enum drm_mode_status {
* resolution and physical size.
*/
#define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \
- .type = DRM_MODE_TYPE_DRIVER, .clock = 1 /* pass validation */, \
- .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \
- .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \
- .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \
- .height_mm = (vd_mm)
+ __DRM_MODE_INIT(1 /* pass validation */, hd, vd, hd_mm, vd_mm)
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
@@ -195,6 +220,9 @@ enum drm_mode_status {
* @crtc_vsync_end: hardware mode vertical sync end
* @crtc_vtotal: hardware mode vertical total size
*
+ * This is the kernel API display mode information structure. For the
+ * user-space version see struct drm_mode_modeinfo.
+ *
* The horizontal and vertical timings are defined per the following diagram.
*
* ::
@@ -350,14 +378,15 @@ struct drm_display_mode {
u8 type;
/**
- * @private_flags:
+ * @expose_to_userspace:
*
- * Driver private flags. private_flags can only be used for mode
- * objects passed to drivers in modeset operations. It shouldn't be used
- * by atomic drivers since they can store any additional data by
- * subclassing state structures.
+ * Indicates whether the mode is to be exposed to the userspace.
+ * This is to maintain a set of exposed modes while preparing
+ * user-mode's list in drm_mode_getconnector ioctl. The purpose of
+ * this only lies in the ioctl function, and is not to be used
+ * outside the function.
*/
- int private_flags;
+ bool expose_to_userspace;
/**
* @head:
@@ -367,19 +396,6 @@ struct drm_display_mode {
struct list_head head;
/**
- * @export_head:
- *
- * struct list_head for modes to be exposed to the userspace.
- * This is to maintain a list of exposed modes while preparing
- * user-mode's list in drm_mode_getconnector ioctl. The purpose of this
- * list_head only lies in the ioctl function, and is not expected to be
- * used outside the function.
- * Once used, the stale pointers are not reset, but left as it is, to
- * avoid overhead of protecting it by mode_config.mutex.
- */
- struct list_head export_head;
-
- /**
* @name:
*
* Human-readable name of the mode, filled out with drm_mode_set_name().
@@ -451,6 +467,25 @@ bool drm_mode_is_420_also(const struct drm_display_info *display,
const struct drm_display_mode *mode);
bool drm_mode_is_420(const struct drm_display_info *display,
const struct drm_display_mode *mode);
+void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref);
+
+struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev,
+ enum drm_connector_tv_mode mode,
+ unsigned long pixel_clock_hz,
+ unsigned int hdisplay,
+ unsigned int vdisplay,
+ bool interlace);
+
+static inline struct drm_display_mode *drm_mode_analog_ntsc_480i(struct drm_device *dev)
+{
+ return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_NTSC, 13500000, 720, 480, true);
+}
+
+static inline struct drm_display_mode *drm_mode_analog_pal_576i(struct drm_device *dev)
+{
+ return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_PAL, 13500000, 720, 576, true);
+}
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
@@ -470,9 +505,27 @@ void drm_display_mode_from_videomode(const struct videomode *vm,
void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
struct videomode *vm);
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags);
+
+#if defined(CONFIG_OF)
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode, u32 *bus_flags,
int index);
+int of_get_drm_panel_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, u32 *bus_flags);
+#else
+static inline int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode,
+ u32 *bus_flags, int index)
+{
+ return -EINVAL;
+}
+
+static inline int of_get_drm_panel_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, u32 *bus_flags)
+{
+ return -EINVAL;
+}
+#endif
void drm_mode_set_name(struct drm_display_mode *mode);
int drm_mode_vrefresh(const struct drm_display_mode *mode);
@@ -483,6 +536,8 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
void drm_mode_copy(struct drm_display_mode *dst,
const struct drm_display_mode *src);
+void drm_mode_init(struct drm_display_mode *dst,
+ const struct drm_display_mode *src);
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
bool drm_mode_match(const struct drm_display_mode *mode1,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 4efec30f8bad..881b03e4dc28 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -48,15 +48,19 @@
* To make this clear all the helper vtables are pulled together in this location here.
*/
-enum mode_set_atomic;
struct drm_writeback_connector;
struct drm_writeback_job;
+enum mode_set_atomic {
+ LEAVE_ATOMIC_MODE_SET,
+ ENTER_ATOMIC_MODE_SET,
+};
+
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_crtc_helper_funcs {
/**
@@ -130,7 +134,7 @@ struct drm_crtc_helper_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup or @atomic_check.
*
@@ -212,9 +216,7 @@ struct drm_crtc_helper_funcs {
*
* This callback is used to update the display mode of a CRTC without
* changing anything of the primary plane configuration. This fits the
- * requirement of atomic and hence is used by the atomic helpers. It is
- * also used by the transitional plane helpers to implement a
- * @mode_set hook in drm_helper_crtc_mode_set().
+ * requirement of atomic and hence is used by the atomic helpers.
*
* Note that the display pipe is completely off when this function is
* called. Atomic drivers which need hardware to be running before they
@@ -329,15 +331,14 @@ struct drm_crtc_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
* This function is called in the check phase of an atomic update. The
* driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * state object passed-in.
*
* Also beware that userspace can request its own custom modes, neither
* core nor helpers filter modes to the list of probe modes reported by
@@ -353,7 +354,7 @@ struct drm_crtc_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_begin:
@@ -370,11 +371,11 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_begin)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_flush:
*
@@ -394,11 +395,11 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_flush)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_enable:
@@ -417,14 +418,10 @@ struct drm_crtc_helper_funcs {
* @atomic_enable must be the inverse of @atomic_disable for atomic
* drivers.
*
- * Drivers can use the @old_crtc_state input parameter if the operations
- * needed to enable the CRTC don't depend solely on the new state but
- * also on the transition between the old state and the new state.
- *
* This function is optional.
*/
void (*atomic_enable)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
@@ -441,15 +438,10 @@ struct drm_crtc_helper_funcs {
* need to implement it if there's no need to disable anything at the
* CRTC level.
*
- * Comparing to @disable, this one provides the additional input
- * parameter @old_crtc_state which could be used to access the old
- * state. Atomic drivers should consider to use this one instead
- * of @disable.
- *
* This function is optional.
*/
void (*atomic_disable)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @get_scanout_position:
@@ -513,8 +505,8 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
/**
* struct drm_encoder_helper_funcs - helper operations for encoders
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_encoder_helper_funcs {
/**
@@ -558,7 +550,7 @@ struct drm_encoder_helper_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup or @atomic_check.
*
@@ -876,13 +868,19 @@ struct drm_connector_helper_funcs {
* The usual way to implement this is to cache the EDID retrieved in the
* probe callback somewhere in the driver-private connector structure.
* In this function drivers then parse the modes in the EDID and add
- * them by calling drm_add_edid_modes(). But connectors that driver a
+ * them by calling drm_add_edid_modes(). But connectors that drive a
* fixed panel can also manually add specific modes using
* drm_mode_probed_add(). Drivers which manually add modes should also
* make sure that the &drm_connector.display_info,
* &drm_connector.width_mm and &drm_connector.height_mm fields are
* filled in.
*
+ * Note that the caller function will automatically add standard VESA
+ * DMT modes up to 1024x768 if the .get_modes() helper operation returns
+ * no mode and if the connector status is connector_status_connected or
+ * connector_status_unknown. There is no need to call
+ * drm_add_modes_noedid() manually in that case.
+ *
* Virtual drivers that just want some standard VESA mode with a given
* resolution can call drm_add_modes_noedid(), and mark the preferred
* one using drm_set_preferred_mode().
@@ -1054,9 +1052,8 @@ struct drm_connector_helper_funcs {
* NOTE:
*
* This function is called in the check phase of an atomic update. The
- * driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * driver is not allowed to change anything outside of the
+ * &drm_atomic_state update tracking structure passed in.
*
* RETURNS:
*
@@ -1066,7 +1063,7 @@ struct drm_connector_helper_funcs {
* for this.
*/
struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
- struct drm_connector_state *connector_state);
+ struct drm_atomic_state *state);
/**
* @atomic_check:
@@ -1107,15 +1104,15 @@ struct drm_connector_helper_funcs {
*
* This hook is to be used by drivers implementing writeback connectors
* that need a point when to commit the writeback job to the hardware.
- * The writeback_job to commit is available in
- * &drm_connector_state.writeback_job.
+ * The writeback_job to commit is available in the new connector state,
+ * in &drm_connector_state.writeback_job.
*
* This hook is optional.
*
* This callback is used by the atomic modeset helpers.
*/
void (*atomic_commit)(struct drm_connector *connector,
- struct drm_connector_state *state);
+ struct drm_atomic_state *state);
/**
* @prepare_writeback_job:
@@ -1148,6 +1145,38 @@ struct drm_connector_helper_funcs {
*/
void (*cleanup_writeback_job)(struct drm_writeback_connector *connector,
struct drm_writeback_job *job);
+
+ /**
+ * @enable_hpd:
+ *
+ * Enable hot-plug detection for the connector.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the drm_kms_helper_poll_enable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
+ */
+ void (*enable_hpd)(struct drm_connector *connector);
+
+ /**
+ * @disable_hpd:
+ *
+ * Disable hot-plug detection for the connector.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the drm_kms_helper_poll_disable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
+ */
+ void (*disable_hpd)(struct drm_connector *connector);
};
/**
@@ -1164,8 +1193,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
/**
* struct drm_plane_helper_funcs - helper operations for planes
*
- * These functions are used by the atomic helpers and by the transitional plane
- * helpers.
+ * These functions are used by the atomic helpers.
*/
struct drm_plane_helper_funcs {
/**
@@ -1183,14 +1211,25 @@ struct drm_plane_helper_funcs {
* equivalent functionality should be implemented through private
* members in the plane structure.
*
- * Drivers which always have their buffers pinned should use
- * drm_gem_fb_prepare_fb() for this hook.
+ * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
+ * set drm_gem_plane_helper_prepare_fb() is called automatically to
+ * implement this. Other drivers which need additional plane processing
+ * can call drm_gem_plane_helper_prepare_fb() from their @prepare_fb
+ * hook.
+ *
+ * The resources acquired in @prepare_fb persist after the end of
+ * the atomic commit. Resources that can be release at the commit's end
+ * should be acquired in @begin_fb_access and released in @end_fb_access.
+ * For example, a GEM buffer's pin operation belongs into @prepare_fb to
+ * keep the buffer pinned after the commit. But a vmap operation for
+ * shadow-plane helpers belongs into @begin_fb_access, so that atomic
+ * helpers remove the mapping at the end of the commit.
*
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. See @begin_fb_access for preparing per-commit resources.
*
* RETURNS:
*
@@ -1207,13 +1246,43 @@ struct drm_plane_helper_funcs {
* This hook is called to clean up any resources allocated for the given
* framebuffer and plane configuration in @prepare_fb.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*cleanup_fb)(struct drm_plane *plane,
struct drm_plane_state *old_state);
/**
+ * @begin_fb_access:
+ *
+ * This hook prepares the plane for access during an atomic commit.
+ * In contrast to @prepare_fb, resources acquired in @begin_fb_access,
+ * are released at the end of the atomic commit in @end_fb_access.
+ *
+ * For example, with shadow-plane helpers, the GEM buffer's vmap
+ * operation belongs into @begin_fb_access, so that the buffer's
+ * memory will be unmapped at the end of the commit in @end_fb_access.
+ * But a GEM buffer's pin operation belongs into @prepare_fb
+ * to keep the buffer pinned after the commit.
+ *
+ * The callback is used by the atomic modeset helpers, but it is optional.
+ * See @end_fb_cleanup for undoing the effects of @begin_fb_access and
+ * @prepare_fb for acquiring resources until the next pageflip.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+ int (*begin_fb_access)(struct drm_plane *plane, struct drm_plane_state *new_plane_state);
+
+ /**
+ * @end_fb_access:
+ *
+ * This hook cleans up resources allocated by @begin_fb_access. It it called
+ * at the end of a commit for the new plane state.
+ */
+ void (*end_fb_access)(struct drm_plane *plane, struct drm_plane_state *new_plane_state);
+
+ /**
* @atomic_check:
*
* Drivers should check plane specific constraints in this hook.
@@ -1232,15 +1301,14 @@ struct drm_plane_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
* This function is called in the check phase of an atomic update. The
- * driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * driver is not allowed to change anything outside of the
+ * &drm_atomic_state update tracking structure.
*
* RETURNS:
*
@@ -1250,7 +1318,7 @@ struct drm_plane_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_update:
@@ -1264,11 +1332,36 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is optional.
*/
void (*atomic_update)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
+
+ /**
+ * @atomic_enable:
+ *
+ * Drivers should use this function to unconditionally enable a plane.
+ * This hook is called in-between the &drm_crtc_helper_funcs.atomic_begin
+ * and drm_crtc_helper_funcs.atomic_flush callbacks. It is called after
+ * @atomic_update, which will be called for all enabled planes. Drivers
+ * that use @atomic_enable should set up a plane in @atomic_update and
+ * afterwards enable the plane in @atomic_enable. If a plane needs to be
+ * enabled before installing the scanout buffer, drivers can still do
+ * so in @atomic_update.
+ *
+ * Note that the power state of the display pipe when this function is
+ * called depends upon the exact helpers and calling sequence the driver
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
+ *
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. If implemented, @atomic_enable should be the inverse of
+ * @atomic_disable. Drivers that don't want to use either can still
+ * implement the complete plane update in @atomic_update.
+ */
+ void (*atomic_enable)(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+
/**
* @atomic_disable:
*
@@ -1288,18 +1381,18 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. It's intended to reverse the effects of @atomic_enable.
*/
void (*atomic_disable)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_check:
*
- * Drivers should set this function pointer to check if the plane state
- * can be updated in a async fashion. Here async means "not vblank
- * synchronized".
+ * Drivers should set this function pointer to check if the plane's
+ * atomic state can be updated in a async fashion. Here async means
+ * "not vblank synchronized".
*
* This hook is called by drm_atomic_async_check() to establish if a
* given update can be committed asynchronously, that is, if it can
@@ -1311,7 +1404,7 @@ struct drm_plane_helper_funcs {
* can not be applied in asynchronous manner.
*/
int (*atomic_async_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_update:
@@ -1327,11 +1420,9 @@ struct drm_plane_helper_funcs {
* update won't happen if there is an outstanding commit modifying
* the same plane.
*
- * Note that unlike &drm_plane_helper_funcs.atomic_update this hook
- * takes the new &drm_plane_state as parameter. When doing async_update
- * drivers shouldn't replace the &drm_plane_state but update the
- * current one with the new plane configurations in the new
- * plane_state.
+ * When doing async_update drivers shouldn't replace the
+ * &drm_plane_state but update the current one with the new plane
+ * configurations in the new plane_state.
*
* Drivers should also swap the framebuffers between current plane
* state (&drm_plane.state) and new_state.
@@ -1350,7 +1441,7 @@ struct drm_plane_helper_funcs {
* for deferring if needed, until a common solution is created.
*/
void (*atomic_async_update)(struct drm_plane *plane,
- struct drm_plane_state *new_state);
+ struct drm_atomic_state *state);
};
/**
@@ -1383,13 +1474,13 @@ struct drm_mode_config_helper_funcs {
* swapped into the various state pointers. The passed in state
* therefore contains copies of the old/previous state. This hook should
* commit the new state into hardware. Note that the helpers have
- * already waited for preceeding atomic commits and fences, but drivers
+ * already waited for preceding atomic commits and fences, but drivers
* can add more waiting calls at the start of their implementation, e.g.
* to wait for driver-internal request for implicit syncing, before
* starting to commit the update to the hardware.
*
* After the atomic update is committed to the hardware this hook needs
- * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
+ * to call drm_atomic_helper_commit_hw_done(). Then wait for the update
* to be executed by the hardware, for example using
* drm_atomic_helper_wait_for_vblanks() or
* drm_atomic_helper_wait_for_flip_done(), and then clean up the old
@@ -1406,6 +1497,27 @@ struct drm_mode_config_helper_funcs {
* drm_atomic_helper_commit_tail().
*/
void (*atomic_commit_tail)(struct drm_atomic_state *state);
+
+ /**
+ * @atomic_commit_setup:
+ *
+ * This hook is used by the default atomic_commit() hook implemented in
+ * drm_atomic_helper_commit() together with the nonblocking helpers (see
+ * drm_atomic_helper_setup_commit()) to extend the DRM commit setup. It
+ * is not used by the atomic helpers.
+ *
+ * This function is called at the end of
+ * drm_atomic_helper_setup_commit(), so once the commit has been
+ * properly setup across the generic DRM object states. It allows
+ * drivers to do some additional commit tracking that isn't related to a
+ * CRTC, plane or connector, tracked in a &drm_private_obj structure.
+ *
+ * Note that the documentation of &drm_private_obj has more details on
+ * how one should implement this.
+ *
+ * This hook is optional.
+ */
+ int (*atomic_commit_setup)(struct drm_atomic_state *state);
};
#endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index aafd07388eb7..ec4f543c3d95 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -24,6 +24,8 @@
#ifndef DRM_MODESET_LOCK_H_
#define DRM_MODESET_LOCK_H_
+#include <linux/types.h> /* stackdepot.h is not self-contained */
+#include <linux/stackdepot.h>
#include <linux/ww_mutex.h>
struct drm_modeset_lock;
@@ -32,6 +34,7 @@ struct drm_modeset_lock;
* struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
* @ww_ctx: base acquire ctx
* @contended: used internally for -EDEADLK handling
+ * @stack_depot: used internally for contention debugging
* @locked: list of held locks
* @trylock_only: trylock mode used in atomic contexts/panic notifiers
* @interruptible: whether interruptible locking should be used.
@@ -52,6 +55,12 @@ struct drm_modeset_acquire_ctx {
struct drm_modeset_lock *contended;
/*
+ * Stack depot for debugging when a contended lock was not backed off
+ * from.
+ */
+ depot_stack_handle_t stack_depot;
+
+ /*
* list of held locks (drm_modeset_lock)
*/
struct list_head locked;
diff --git a/include/drm/drm_module.h b/include/drm/drm_module.h
new file mode 100644
index 000000000000..4db1ae03d9a5
--- /dev/null
+++ b/include/drm/drm_module.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_MODULE_H
+#define DRM_MODULE_H
+
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_drv.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers registering DRM drivers during module
+ * initialization and shutdown. The provided helpers act like bus-specific
+ * module helpers, such as module_pci_driver(), but respect additional
+ * parameters that control DRM driver registration.
+ *
+ * Below is an example of initializing a DRM driver for a device on the
+ * PCI bus.
+ *
+ * .. code-block:: c
+ *
+ * struct pci_driver my_pci_drv = {
+ * };
+ *
+ * drm_module_pci_driver(my_pci_drv);
+ *
+ * The generated code will test if DRM drivers are enabled and register
+ * the PCI driver my_pci_drv. For more complex module initialization, you
+ * can still use module_init() and module_exit() in your driver.
+ */
+
+/*
+ * PCI drivers
+ */
+
+static inline int __init drm_pci_register_driver(struct pci_driver *pci_drv)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return pci_register_driver(pci_drv);
+}
+
+/**
+ * drm_module_pci_driver - Register a DRM driver for PCI-based devices
+ * @__pci_drv: the PCI driver structure
+ *
+ * Registers a DRM driver for devices on the PCI bus. The helper
+ * macro behaves like module_pci_driver() but tests the state of
+ * drm_firmware_drivers_only(). For more complex module initialization,
+ * use module_init() and module_exit() directly.
+ *
+ * Each module may only use this macro once. Calling it replaces
+ * module_init() and module_exit().
+ */
+#define drm_module_pci_driver(__pci_drv) \
+ module_driver(__pci_drv, drm_pci_register_driver, pci_unregister_driver)
+
+static inline int __init
+drm_pci_register_driver_if_modeset(struct pci_driver *pci_drv, int modeset)
+{
+ if (drm_firmware_drivers_only() && modeset == -1)
+ return -ENODEV;
+ if (modeset == 0)
+ return -ENODEV;
+
+ return pci_register_driver(pci_drv);
+}
+
+static inline void __exit
+drm_pci_unregister_driver_if_modeset(struct pci_driver *pci_drv, int modeset)
+{
+ pci_unregister_driver(pci_drv);
+}
+
+/**
+ * drm_module_pci_driver_if_modeset - Register a DRM driver for PCI-based devices
+ * @__pci_drv: the PCI driver structure
+ * @__modeset: an additional parameter that disables the driver
+ *
+ * This macro is deprecated and only provided for existing drivers. For
+ * new drivers, use drm_module_pci_driver().
+ *
+ * Registers a DRM driver for devices on the PCI bus. The helper macro
+ * behaves like drm_module_pci_driver() with an additional driver-specific
+ * flag. If __modeset is 0, the driver has been disabled, if __modeset is
+ * -1 the driver state depends on the global DRM state. For all other
+ * values, the PCI driver has been enabled. The default should be -1.
+ */
+#define drm_module_pci_driver_if_modeset(__pci_drv, __modeset) \
+ module_driver(__pci_drv, drm_pci_register_driver_if_modeset, \
+ drm_pci_unregister_driver_if_modeset, __modeset)
+
+/*
+ * Platform drivers
+ */
+
+static inline int __init
+drm_platform_driver_register(struct platform_driver *platform_drv)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return platform_driver_register(platform_drv);
+}
+
+/**
+ * drm_module_platform_driver - Register a DRM driver for platform devices
+ * @__platform_drv: the platform driver structure
+ *
+ * Registers a DRM driver for devices on the platform bus. The helper
+ * macro behaves like module_platform_driver() but tests the state of
+ * drm_firmware_drivers_only(). For more complex module initialization,
+ * use module_init() and module_exit() directly.
+ *
+ * Each module may only use this macro once. Calling it replaces
+ * module_init() and module_exit().
+ */
+#define drm_module_platform_driver(__platform_drv) \
+ module_driver(__platform_drv, drm_platform_driver_register, \
+ platform_driver_unregister)
+
+#endif
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index b9b093add92e..082a6e980d01 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -15,6 +15,8 @@ struct drm_encoder;
struct drm_panel;
struct drm_bridge;
struct device_node;
+struct mipi_dsi_device_info;
+struct mipi_dsi_host;
/**
* enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection
@@ -49,6 +51,13 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_bridge **bridge);
int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
const struct device_node *port2);
+int drm_of_lvds_get_data_mapping(const struct device_node *port);
+int drm_of_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min, const unsigned int max);
+int drm_of_get_data_lanes_count_ep(const struct device_node *port,
+ int port_reg, int reg,
+ const unsigned int min,
+ const unsigned int max);
#else
static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
struct device_node *port)
@@ -98,8 +107,40 @@ drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
{
return -EINVAL;
}
+
+static inline int
+drm_of_lvds_get_data_mapping(const struct device_node *port)
+{
+ return -EINVAL;
+}
+
+static inline int
+drm_of_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min, const unsigned int max)
+{
+ return -EINVAL;
+}
+
+static inline int
+drm_of_get_data_lanes_count_ep(const struct device_node *port,
+ int port_reg, int reg,
+ const unsigned int min,
+ const unsigned int max)
+{
+ return -EINVAL;
+}
#endif
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_MIPI_DSI)
+struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev);
+#else
+static inline struct
+mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_OF && CONFIG_DRM_MIPI_DSI */
+
/*
* drm_of_panel_bridge_remove - remove panel bridge
* @np: device tree node containing panel bridge output ports
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 6193cb555acc..10015891b056 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -27,14 +27,19 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/list.h>
+#include <linux/mutex.h>
struct backlight_device;
+struct dentry;
struct device_node;
struct drm_connector;
struct drm_device;
+struct drm_panel_follower;
struct drm_panel;
struct display_timing;
+enum drm_panel_orientation;
+
/**
* struct drm_panel_funcs - perform operations on a given panel
*
@@ -62,8 +67,8 @@ struct display_timing;
* the panel. This is the job of the .unprepare() function.
*
* Backlight can be handled automatically if configured using
- * drm_panel_of_backlight(). Then the driver does not need to implement the
- * functionality to enable/disable backlight.
+ * drm_panel_of_backlight() or drm_panel_dp_aux_backlight(). Then the driver
+ * does not need to implement the functionality to enable/disable backlight.
*/
struct drm_panel_funcs {
/**
@@ -114,6 +119,15 @@ struct drm_panel_funcs {
struct drm_connector *connector);
/**
+ * @get_orientation:
+ *
+ * Return the panel orientation set by device tree or EDID.
+ *
+ * This function is optional.
+ */
+ enum drm_panel_orientation (*get_orientation)(struct drm_panel *panel);
+
+ /**
* @get_timings:
*
* Copy display timings into the provided array and return
@@ -123,6 +137,52 @@ struct drm_panel_funcs {
*/
int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
struct display_timing *timings);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows panels to create panels-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_panel *panel, struct dentry *root);
+};
+
+struct drm_panel_follower_funcs {
+ /**
+ * @panel_prepared:
+ *
+ * Called after the panel has been powered on.
+ */
+ int (*panel_prepared)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_unpreparing:
+ *
+ * Called before the panel is powered off.
+ */
+ int (*panel_unpreparing)(struct drm_panel_follower *follower);
+};
+
+struct drm_panel_follower {
+ /**
+ * @funcs:
+ *
+ * Dependent device callbacks; should be initted by the caller.
+ */
+ const struct drm_panel_follower_funcs *funcs;
+
+ /**
+ * @list
+ *
+ * Used for linking into panel's list; set by drm_panel_add_follower().
+ */
+ struct list_head list;
+
+ /**
+ * @panel
+ *
+ * The panel we're dependent on; set by drm_panel_add_follower().
+ */
+ struct drm_panel *panel;
};
/**
@@ -142,8 +202,8 @@ struct drm_panel {
* Backlight device, used to turn on backlight after the call
* to enable(), and to turn off backlight before the call to
* disable().
- * backlight is set by drm_panel_of_backlight() and drivers
- * shall not assign it.
+ * backlight is set by drm_panel_of_backlight() or
+ * drm_panel_dp_aux_backlight() and drivers shall not assign it.
*/
struct backlight_device *backlight;
@@ -169,18 +229,53 @@ struct drm_panel {
* Panel entry in registry.
*/
struct list_head list;
+
+ /**
+ * @followers:
+ *
+ * A list of struct drm_panel_follower dependent on this panel.
+ */
+ struct list_head followers;
+
+ /**
+ * @follower_lock:
+ *
+ * Lock for followers list.
+ */
+ struct mutex follower_lock;
+
+ /**
+ * @prepare_prev_first:
+ *
+ * The previous controller should be prepared first, before the prepare
+ * for the panel is called. This is largely required for DSI panels
+ * where the DSI host controller should be initialised to LP-11 before
+ * the panel is powered up.
+ */
+ bool prepare_prev_first;
+
+ /**
+ * @prepared:
+ *
+ * If true then the panel has been prepared.
+ */
+ bool prepared;
+
+ /**
+ * @enabled:
+ *
+ * If true then the panel has been enabled.
+ */
+ bool enabled;
};
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs,
int connector_type);
-int drm_panel_add(struct drm_panel *panel);
+void drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
-int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
-void drm_panel_detach(struct drm_panel *panel);
-
int drm_panel_prepare(struct drm_panel *panel);
int drm_panel_unprepare(struct drm_panel *panel);
@@ -191,11 +286,46 @@ int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector
#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
struct drm_panel *of_drm_find_panel(const struct device_node *np);
+int of_drm_get_panel_orientation(const struct device_node *np,
+ enum drm_panel_orientation *orientation);
#else
static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
return ERR_PTR(-ENODEV);
}
+
+static inline int of_drm_get_panel_orientation(const struct device_node *np,
+ enum drm_panel_orientation *orientation)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_DRM_PANEL)
+bool drm_is_panel_follower(struct device *dev);
+int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+void drm_panel_remove_follower(struct drm_panel_follower *follower);
+int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+#else
+static inline bool drm_is_panel_follower(struct device *dev)
+{
+ return false;
+}
+
+static inline int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
+
+static inline void drm_panel_remove_follower(struct drm_panel_follower *follower) { }
+static inline int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
#endif
#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index b7e899ce44f0..90e8abc08653 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -700,115 +700,3 @@
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
-
-#define r128_PCI_IDS \
- {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
-#define mga_PCI_IDS \
- {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
- {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
- {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
- {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
- {0, 0, 0}
-
-#define sisdrv_PCI_IDS \
- {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
- {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
- {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
- {0, 0, 0}
-
-#define tdfx_PCI_IDS \
- {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
-#define viadrv_PCI_IDS \
- {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
- {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
- {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
- {0, 0, 0}
-
-#define i810_PCI_IDS \
- {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
-#define savage_PCI_IDS \
- {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
- {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
- {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
- {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
- {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
- {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
- {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
- {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
- {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
- {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
- {0, 0, 0}
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 3f396d94afe4..641fe298052d 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -35,10 +35,15 @@ struct drm_crtc;
struct drm_printer;
struct drm_modeset_acquire_ctx;
+enum drm_scaling_filter {
+ DRM_SCALING_FILTER_DEFAULT,
+ DRM_SCALING_FILTER_NEAREST_NEIGHBOR,
+};
+
/**
* struct drm_plane_state - mutable plane state
*
- * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
+ * Please note that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
* @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
* raw coordinates provided by userspace. Drivers should use
* drm_atomic_helper_check_plane_state() and only use the derived rectangles in
@@ -51,7 +56,7 @@ struct drm_plane_state {
/**
* @crtc:
*
- * Currently bound CRTC, NULL if disabled. Do not this write directly,
+ * Currently bound CRTC, NULL if disabled. Do not write this directly,
* use drm_atomic_set_crtc_for_plane()
*/
struct drm_crtc *crtc;
@@ -69,13 +74,11 @@ struct drm_plane_state {
*
* Optional fence to wait for before scanning out @fb. The core atomic
* code will set this when userspace is using explicit fencing. Do not
- * write this field directly for a driver's implicit fence, use
- * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is
- * preserved.
+ * write this field directly for a driver's implicit fence.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
- * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
+ * &drm_plane_helper_funcs.prepare_fb callback. See
+ * drm_gem_plane_helper_prepare_fb() for a suitable helper.
*/
struct dma_fence *fence;
@@ -113,6 +116,10 @@ struct drm_plane_state {
/** @src_h: height of visible portion of plane (in 16.16) */
uint32_t src_h, src_w;
+ /** @hotspot_x: x offset to mouse cursor hotspot */
+ /** @hotspot_y: y offset to mouse cursor hotspot */
+ int32_t hotspot_x, hotspot_y;
+
/**
* @alpha:
* Opacity of the plane with 0 as completely transparent and 0xffff as
@@ -181,10 +188,23 @@ struct drm_plane_state {
* since last plane update) as an array of &drm_mode_rect in framebuffer
* coodinates of the attached framebuffer. Note that unlike plane src,
* damage clips are not in 16.16 fixed point.
+ *
+ * See drm_plane_get_damage_clips() and
+ * drm_plane_get_damage_clips_count() for accessing these.
*/
struct drm_property_blob *fb_damage_clips;
/**
+ * @ignore_damage_clips:
+ *
+ * Set by drivers to indicate the drm_atomic_helper_damage_iter_init()
+ * helper that the @fb_damage_clips blob property should be ignored.
+ *
+ * See :ref:`damage_tracking_properties` for more information.
+ */
+ bool ignore_damage_clips;
+
+ /**
* @src:
*
* source coordinates of the plane (in 16.16).
@@ -215,6 +235,13 @@ struct drm_plane_state {
bool visible;
/**
+ * @scaling_filter:
+ *
+ * Scaling filter to be applied
+ */
+ enum drm_scaling_filter scaling_filter;
+
+ /**
* @commit: Tracks the pending commit to prevent use-after-free conditions,
* and for async plane updates.
*
@@ -224,6 +251,13 @@ struct drm_plane_state {
/** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
+
+ /**
+ * @color_mgmt_changed: Color management properties have changed. Used
+ * by the atomic helpers and drivers to steer the atomic commit control
+ * flow.
+ */
+ bool color_mgmt_changed : 1;
};
static inline struct drm_rect
@@ -501,7 +535,7 @@ struct drm_plane_funcs {
* This optional hook is used for the DRM to determine if the given
* format/modifier combination is valid for the plane. This allows the
* DRM to generate the correct format bitmask (which formats apply to
- * which modifier), and to valdiate modifiers at atomic_check time.
+ * which modifier), and to validate modifiers at atomic_check time.
*
* If not present, then any modifier in the plane's modifier
* list is allowed with any of the plane's formats.
@@ -526,10 +560,14 @@ struct drm_plane_funcs {
*
* For compatibility with legacy userspace, only overlay planes are made
* available to userspace by default. Userspace clients may set the
- * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
+ * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
* wish to receive a universal plane list containing all plane types. See also
* drm_for_each_legacy_plane().
*
+ * In addition to setting each plane's type, drivers need to setup the
+ * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy
+ * IOCTLs. See drm_crtc_init_with_planes().
+ *
* WARNING: The values of this enum is UABI since they're exposed in the "type"
* property.
*/
@@ -545,19 +583,20 @@ enum drm_plane_type {
/**
* @DRM_PLANE_TYPE_PRIMARY:
*
- * Primary planes represent a "main" plane for a CRTC. Primary planes
- * are the planes operated upon by CRTC modesetting and flipping
- * operations described in the &drm_crtc_funcs.page_flip and
- * &drm_crtc_funcs.set_config hooks.
+ * A primary plane attached to a CRTC is the most likely to be able to
+ * light up the CRTC when no scaling/cropping is used and the plane
+ * covers the whole CRTC.
*/
DRM_PLANE_TYPE_PRIMARY,
/**
* @DRM_PLANE_TYPE_CURSOR:
*
- * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes
- * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
- * DRM_IOCTL_MODE_CURSOR2 IOCTLs.
+ * A cursor plane attached to a CRTC is more likely to be able to be
+ * enabled when no scaling/cropping is used and the framebuffer has the
+ * size indicated by &drm_mode_config.cursor_width and
+ * &drm_mode_config.cursor_height. Additionally, if the driver doesn't
+ * support modifiers, the framebuffer should have a linear layout.
*/
DRM_PLANE_TYPE_CURSOR,
};
@@ -613,7 +652,7 @@ struct drm_plane {
unsigned int format_count;
/**
* @format_default: driver hasn't supplied supported formats for the
- * plane. Used by the drm_plane_init compatibility wrapper only.
+ * plane. Used by the non-atomic driver compatibility wrapper only.
*/
bool format_default;
@@ -724,6 +763,22 @@ struct drm_plane {
* See drm_plane_create_color_properties().
*/
struct drm_property *color_range_property;
+
+ /**
+ * @scaling_filter_property: property to apply a particular filter while
+ * scaling.
+ */
+ struct drm_property *scaling_filter_property;
+
+ /**
+ * @hotspot_x_property: property to set mouse hotspot x offset.
+ */
+ struct drm_property *hotspot_x_property;
+
+ /**
+ * @hotspot_y_property: property to set mouse hotspot y offset.
+ */
+ struct drm_property *hotspot_y_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
@@ -738,14 +793,97 @@ int drm_universal_plane_init(struct drm_device *dev,
const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...);
-int drm_plane_init(struct drm_device *dev,
- struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- bool is_primary);
void drm_plane_cleanup(struct drm_plane *plane);
+__printf(10, 11)
+void *__drmm_universal_plane_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type plane_type,
+ const char *name, ...);
+
+/**
+ * drmm_universal_plane_alloc - Allocate and initialize an universal plane object
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_plane
+ * @member: the name of the &drm_plane within @type
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @plane_type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Allocates and initializes a plane object of type @type. Cleanup is
+ * automatically handled through registering drm_plane_cleanup() with
+ * drmm_add_action().
+ *
+ * The @drm_plane_funcs.destroy hook must be NULL.
+ *
+ * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
+ * @format_modifiers to NULL. The plane will advertise the linear modifier.
+ *
+ * Returns:
+ * Pointer to new plane, or ERR_PTR on failure.
+ */
+#define drmm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, plane_type, name, ...) \
+ ((type *)__drmm_universal_plane_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, \
+ plane_type, name, ##__VA_ARGS__))
+
+__printf(10, 11)
+void *__drm_universal_plane_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type plane_type,
+ const char *name, ...);
+
+/**
+ * drm_universal_plane_alloc() - Allocate and initialize an universal plane object
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_plane
+ * @member: the name of the &drm_plane within @type
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @plane_type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Allocates and initializes a plane object of type @type. The caller
+ * is responsible for releasing the allocated memory with kfree().
+ *
+ * Drivers are encouraged to use drmm_universal_plane_alloc() instead.
+ *
+ * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
+ * @format_modifiers to NULL. The plane will advertise the linear modifier.
+ *
+ * Returns:
+ * Pointer to new plane, or ERR_PTR on failure.
+ */
+#define drm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, plane_type, name, ...) \
+ ((type *)__drm_universal_plane_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, \
+ plane_type, name, ##__VA_ARGS__))
+
/**
* drm_plane_index - find the index of a registered plane
* @plane: plane to find index for
@@ -829,37 +967,14 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
bool drm_any_plane_has_format(struct drm_device *dev,
u32 format, u64 modifier);
-/**
- * drm_plane_get_damage_clips_count - Returns damage clips count.
- * @state: Plane state.
- *
- * Simple helper to get the number of &drm_mode_rect clips set by user-space
- * during plane update.
- *
- * Return: Number of clips in plane fb_damage_clips blob property.
- */
-static inline unsigned int
-drm_plane_get_damage_clips_count(const struct drm_plane_state *state)
-{
- return (state && state->fb_damage_clips) ?
- state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0;
-}
-/**
- * drm_plane_get_damage_clips - Returns damage clips.
- * @state: Plane state.
- *
- * Note that this function returns uapi type &drm_mode_rect. Drivers might
- * instead be interested in internal &drm_rect which can be obtained by calling
- * drm_helper_get_plane_damage_clips().
- *
- * Return: Damage clips in plane fb_damage_clips blob property.
- */
-static inline struct drm_mode_rect *
-drm_plane_get_damage_clips(const struct drm_plane_state *state)
-{
- return (struct drm_mode_rect *)((state && state->fb_damage_clips) ?
- state->fb_damage_clips->data : NULL);
-}
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
+unsigned int
+drm_plane_get_damage_clips_count(const struct drm_plane_state *state);
+struct drm_mode_rect *
+drm_plane_get_damage_clips(const struct drm_plane_state *state);
+
+int drm_plane_create_scaling_filter_property(struct drm_plane *plane,
+ unsigned int supported_filters);
#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 331ebd60b3a3..75f9c4830564 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -24,21 +24,34 @@
#ifndef DRM_PLANE_HELPER_H
#define DRM_PLANE_HELPER_H
-#include <drm/drm_rect.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_modeset_helper.h>
+#include <linux/types.h>
-/*
- * Drivers that don't allow primary plane scaling may pass this macro in place
- * of the min/max scale parameters of the update checker function.
+struct drm_crtc;
+struct drm_framebuffer;
+struct drm_modeset_acquire_ctx;
+struct drm_plane;
+
+int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_plane_helper_disable_primary(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
+void drm_plane_helper_destroy(struct drm_plane *plane);
+
+/**
+ * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers
*
- * Due to src being in 16.16 fixed point and dest being in integer pixels,
- * 1<<16 represents no scaling.
+ * This macro initializes plane functions for non-atomic drivers to default
+ * values. Non-atomic interfaces are deprecated and should not be used in new
+ * drivers.
*/
-#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-
-void drm_primary_helper_destroy(struct drm_plane *plane);
-extern const struct drm_plane_funcs drm_primary_helper_funcs;
+#define DRM_PLANE_NON_ATOMIC_FUNCS \
+ .update_plane = drm_plane_helper_update_primary, \
+ .disable_plane = drm_plane_helper_disable_primary, \
+ .destroy = drm_plane_helper_destroy
#endif
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 9af7422b44cf..2a1d01e5b56b 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -54,6 +54,7 @@ struct device;
struct dma_buf_export_info;
struct dma_buf;
struct dma_buf_attachment;
+struct iosys_map;
enum dma_data_direction;
@@ -82,16 +83,19 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir);
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map);
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map);
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
+struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+ struct page **pages, unsigned int nr_pages);
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags);
+unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
+
/* helper functions for importing */
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
@@ -101,8 +105,9 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
-int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_pages);
-
+int drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages,
+ int max_pages);
+int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
+ int max_pages);
#endif /* __DRM_PRIME_H__ */
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 1c9417430d08..9cc473e5d353 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -31,11 +31,14 @@
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/debugfs.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm.h>
+struct drm_device;
+
/* Do *not* use outside of drm_print.[ch]! */
-extern unsigned int __drm_debug;
+extern unsigned long __drm_debug;
/**
* DOC: print
@@ -67,6 +70,101 @@ extern unsigned int __drm_debug;
*/
/**
+ * enum drm_debug_category - The DRM debug categories
+ *
+ * Each of the DRM debug logging macros use a specific category, and the logging
+ * is filtered by the drm.debug module parameter. This enum specifies the values
+ * for the interface.
+ *
+ * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except
+ * DRM_DEBUG() logs to DRM_UT_CORE.
+ *
+ * Enabling verbose debug messages is done through the drm.debug parameter, each
+ * category being enabled by a bit:
+ *
+ * - drm.debug=0x1 will enable CORE messages
+ * - drm.debug=0x2 will enable DRIVER messages
+ * - drm.debug=0x3 will enable CORE and DRIVER messages
+ * - ...
+ * - drm.debug=0x1ff will enable all messages
+ *
+ * An interesting feature is that it's possible to enable verbose logging at
+ * run-time by echoing the debug value in its sysfs node::
+ *
+ * # echo 0xf > /sys/module/drm/parameters/debug
+ *
+ */
+enum drm_debug_category {
+ /* These names must match those in DYNAMIC_DEBUG_CLASSBITS */
+ /**
+ * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c,
+ * drm_memory.c, ...
+ */
+ DRM_UT_CORE,
+ /**
+ * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915,
+ * radeon, ... macro.
+ */
+ DRM_UT_DRIVER,
+ /**
+ * @DRM_UT_KMS: Used in the modesetting code.
+ */
+ DRM_UT_KMS,
+ /**
+ * @DRM_UT_PRIME: Used in the prime code.
+ */
+ DRM_UT_PRIME,
+ /**
+ * @DRM_UT_ATOMIC: Used in the atomic code.
+ */
+ DRM_UT_ATOMIC,
+ /**
+ * @DRM_UT_VBL: Used for verbose debug message in the vblank code.
+ */
+ DRM_UT_VBL,
+ /**
+ * @DRM_UT_STATE: Used for verbose atomic state debugging.
+ */
+ DRM_UT_STATE,
+ /**
+ * @DRM_UT_LEASE: Used in the lease code.
+ */
+ DRM_UT_LEASE,
+ /**
+ * @DRM_UT_DP: Used in the DP code.
+ */
+ DRM_UT_DP,
+ /**
+ * @DRM_UT_DRMRES: Used in the drm managed resources code.
+ */
+ DRM_UT_DRMRES
+};
+
+static inline bool drm_debug_enabled_raw(enum drm_debug_category category)
+{
+ return unlikely(__drm_debug & BIT(category));
+}
+
+#define drm_debug_enabled_instrumented(category) \
+ ({ \
+ pr_debug("todo: is this frequent enough to optimize ?\n"); \
+ drm_debug_enabled_raw(category); \
+ })
+
+#if defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+/*
+ * the drm.debug API uses dyndbg, so each drm_*dbg macro/callsite gets
+ * a descriptor, and only enabled callsites are reachable. They use
+ * the private macro to avoid re-testing the enable-bit.
+ */
+#define __drm_debug_enabled(category) true
+#define drm_debug_enabled(category) drm_debug_enabled_instrumented(category)
+#else
+#define __drm_debug_enabled(category) drm_debug_enabled_raw(category)
+#define drm_debug_enabled(category) drm_debug_enabled_raw(category)
+#endif
+
+/**
* struct drm_printer - drm output "stream"
*
* Do not use struct members directly. Use drm_printer_seq_file(),
@@ -78,6 +176,7 @@ struct drm_printer {
void (*puts)(struct drm_printer *p, const char *str);
void *arg;
const char *prefix;
+ enum drm_debug_category category;
};
void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf);
@@ -85,7 +184,7 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str);
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
void __drm_puts_seq_file(struct drm_printer *p, const char *str);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
-void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
@@ -218,127 +317,64 @@ static inline struct drm_printer drm_info_printer(struct device *dev)
}
/**
- * drm_debug_printer - construct a &drm_printer that outputs to pr_debug()
- * @prefix: debug output prefix
+ * drm_dbg_printer - construct a &drm_printer for drm device specific output
+ * @drm: the &struct drm_device pointer, or NULL
+ * @category: the debug category to use
+ * @prefix: debug output prefix, or NULL for no prefix
*
* RETURNS:
* The &drm_printer object
*/
-static inline struct drm_printer drm_debug_printer(const char *prefix)
+static inline struct drm_printer drm_dbg_printer(struct drm_device *drm,
+ enum drm_debug_category category,
+ const char *prefix)
{
struct drm_printer p = {
- .printfn = __drm_printfn_debug,
- .prefix = prefix
+ .printfn = __drm_printfn_dbg,
+ .arg = drm,
+ .prefix = prefix,
+ .category = category,
};
return p;
}
/**
- * drm_err_printer - construct a &drm_printer that outputs to pr_err()
- * @prefix: debug output prefix
+ * drm_err_printer - construct a &drm_printer that outputs to drm_err()
+ * @drm: the &struct drm_device pointer
+ * @prefix: debug output prefix, or NULL for no prefix
*
* RETURNS:
* The &drm_printer object
*/
-static inline struct drm_printer drm_err_printer(const char *prefix)
+static inline struct drm_printer drm_err_printer(struct drm_device *drm,
+ const char *prefix)
{
struct drm_printer p = {
.printfn = __drm_printfn_err,
+ .arg = drm,
.prefix = prefix
};
return p;
}
-/**
- * enum drm_debug_category - The DRM debug categories
- *
- * Each of the DRM debug logging macros use a specific category, and the logging
- * is filtered by the drm.debug module parameter. This enum specifies the values
- * for the interface.
- *
- * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except
- * DRM_DEBUG() logs to DRM_UT_CORE.
- *
- * Enabling verbose debug messages is done through the drm.debug parameter, each
- * category being enabled by a bit:
- *
- * - drm.debug=0x1 will enable CORE messages
- * - drm.debug=0x2 will enable DRIVER messages
- * - drm.debug=0x3 will enable CORE and DRIVER messages
- * - ...
- * - drm.debug=0x1ff will enable all messages
- *
- * An interesting feature is that it's possible to enable verbose logging at
- * run-time by echoing the debug value in its sysfs node::
- *
- * # echo 0xf > /sys/module/drm/parameters/debug
- *
- */
-enum drm_debug_category {
- /**
- * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c,
- * drm_memory.c, ...
- */
- DRM_UT_CORE = 0x01,
- /**
- * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915,
- * radeon, ... macro.
- */
- DRM_UT_DRIVER = 0x02,
- /**
- * @DRM_UT_KMS: Used in the modesetting code.
- */
- DRM_UT_KMS = 0x04,
- /**
- * @DRM_UT_PRIME: Used in the prime code.
- */
- DRM_UT_PRIME = 0x08,
- /**
- * @DRM_UT_ATOMIC: Used in the atomic code.
- */
- DRM_UT_ATOMIC = 0x10,
- /**
- * @DRM_UT_VBL: Used for verbose debug message in the vblank code.
- */
- DRM_UT_VBL = 0x20,
- /**
- * @DRM_UT_STATE: Used for verbose atomic state debugging.
- */
- DRM_UT_STATE = 0x40,
- /**
- * @DRM_UT_LEASE: Used in the lease code.
- */
- DRM_UT_LEASE = 0x80,
- /**
- * @DRM_UT_DP: Used in the DP code.
- */
- DRM_UT_DP = 0x100,
- /**
- * @DRM_UT_DRMRES: Used in the drm managed resources code.
- */
- DRM_UT_DRMRES = 0x200,
-};
-
-static inline bool drm_debug_enabled(enum drm_debug_category category)
-{
- return unlikely(__drm_debug & category);
-}
-
/*
* struct device based logging
*
- * Prefer drm_device based logging over device or prink based logging.
+ * Prefer drm_device based logging over device or printk based logging.
*/
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...);
-__printf(3, 4)
-void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
- const char *format, ...);
+struct _ddebug;
+__printf(4, 5)
+void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
+ enum drm_debug_category category, const char *format, ...);
/**
- * Error output.
+ * DRM_DEV_ERROR() - Error output.
+ *
+ * NOTE: this is deprecated in favor of drm_err() or dev_err().
*
* @dev: device pointer
* @fmt: printf() like format string.
@@ -347,10 +383,15 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
/**
- * Rate limited error output. Like DRM_ERROR() but won't flood the log.
+ * DRM_DEV_ERROR_RATELIMITED() - Rate limited error output.
+ *
+ * NOTE: this is deprecated in favor of drm_err_ratelimited() or
+ * dev_err_ratelimited().
*
* @dev: device pointer
* @fmt: printf() like format string.
+ *
+ * Like DRM_ERROR() but won't flood the log.
*/
#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \
({ \
@@ -362,9 +403,11 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
})
+/* NOTE: this is deprecated in favor of drm_info() or dev_info(). */
#define DRM_DEV_INFO(dev, fmt, ...) \
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
({ \
static bool __print_once __read_mostly; \
@@ -374,16 +417,43 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
} \
})
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+#define drm_dev_dbg(dev, cat, fmt, ...) \
+ __drm_dev_dbg(NULL, dev, cat, fmt, ##__VA_ARGS__)
+#else
+#define drm_dev_dbg(dev, cat, fmt, ...) \
+ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \
+ dev, cat, fmt, ##__VA_ARGS__)
+#endif
+
/**
- * Debug output.
+ * DRM_DEV_DEBUG() - Debug output for generic drm code
+ *
+ * NOTE: this is deprecated in favor of drm_dbg_core().
*
* @dev: device pointer
* @fmt: printf() like format string.
*/
#define DRM_DEV_DEBUG(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+/**
+ * DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver
+ *
+ * NOTE: this is deprecated in favor of drm_dbg() or dev_dbg().
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+/**
+ * DRM_DEV_DEBUG_KMS() - Debug output for modesetting code
+ *
+ * NOTE: this is deprecated in favor of drm_dbg_kms().
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
@@ -395,7 +465,7 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/* Helper for struct drm_device based logging. */
#define __drm_printk(drm, level, type, fmt, ...) \
- dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__)
+ dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__)
#define drm_info(drm, fmt, ...) \
@@ -429,26 +499,27 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
#define drm_dbg_core(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define drm_dbg(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define drm_dbg_driver(drm, fmt, ...) \
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define drm_dbg_kms(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define drm_dbg_prime(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define drm_dbg_atomic(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define drm_dbg_vbl(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define drm_dbg_state(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__)
#define drm_dbg_lease(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
#define drm_dbg_dp(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__)
#define drm_dbg_drmres(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+#define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__)
/*
* printk based logging
@@ -456,70 +527,99 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
* Prefer drm_device based logging over device or prink based logging.
*/
-__printf(2, 3)
-void __drm_dbg(enum drm_debug_category category, const char *format, ...);
+__printf(3, 4)
+void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...);
__printf(1, 2)
void __drm_err(const char *format, ...);
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+#define __drm_dbg(cat, fmt, ...) ___drm_dbg(NULL, cat, fmt, ##__VA_ARGS__)
+#else
+#define __drm_dbg(cat, fmt, ...) \
+ _dynamic_func_call_cls(cat, fmt, ___drm_dbg, \
+ cat, fmt, ##__VA_ARGS__)
+#endif
+
/* Macros to make printk easier */
#define _DRM_PRINTK(once, level, fmt, ...) \
printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info(). */
#define DRM_INFO(fmt, ...) \
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice(). */
#define DRM_NOTE(fmt, ...) \
_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn(). */
#define DRM_WARN(fmt, ...) \
_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info_once(). */
#define DRM_INFO_ONCE(fmt, ...) \
_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice_once(). */
#define DRM_NOTE_ONCE(fmt, ...) \
_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn_once(). */
#define DRM_WARN_ONCE(fmt, ...) \
_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err(). */
#define DRM_ERROR(fmt, ...) \
__drm_err(fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err_ratelimited(). */
#define DRM_ERROR_RATELIMITED(fmt, ...) \
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */
#define DRM_DEBUG(fmt, ...) \
__drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */
#define DRM_DEBUG_DRIVER(fmt, ...) \
__drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */
#define DRM_DEBUG_KMS(fmt, ...) \
__drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */
#define DRM_DEBUG_PRIME(fmt, ...) \
__drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */
#define DRM_DEBUG_ATOMIC(fmt, ...) \
__drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */
#define DRM_DEBUG_VBL(fmt, ...) \
__drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */
#define DRM_DEBUG_LEASE(fmt, ...) \
__drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */
#define DRM_DEBUG_DP(fmt, ...) \
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
-
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
-({ \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- drm_dev_dbg(NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__); \
+#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\
+ const struct drm_device *drm_ = (drm); \
+ \
+ if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \
+ drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \
})
+#define drm_dbg_ratelimited(drm, fmt, ...) \
+ __DRM_DEFINE_DBG_RATELIMITED(DRIVER, drm, fmt, ## __VA_ARGS__)
+
+#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
+ __DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
+
/*
* struct drm_device based WARNs
*
diff --git a/include/drm/drm_privacy_screen_consumer.h b/include/drm/drm_privacy_screen_consumer.h
new file mode 100644
index 000000000000..7f66a90d15b7
--- /dev/null
+++ b/include/drm/drm_privacy_screen_consumer.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_PRIVACY_SCREEN_CONSUMER_H__
+#define __DRM_PRIVACY_SCREEN_CONSUMER_H__
+
+#include <linux/device.h>
+#include <drm/drm_connector.h>
+
+struct drm_privacy_screen;
+
+#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN)
+struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev,
+ const char *con_id);
+void drm_privacy_screen_put(struct drm_privacy_screen *priv);
+
+int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state);
+void drm_privacy_screen_get_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status *sw_state_ret,
+ enum drm_privacy_screen_status *hw_state_ret);
+
+int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb);
+int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb);
+#else
+static inline struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev,
+ const char *con_id)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void drm_privacy_screen_put(struct drm_privacy_screen *priv)
+{
+}
+static inline int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state)
+{
+ return -ENODEV;
+}
+static inline void drm_privacy_screen_get_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status *sw_state_ret,
+ enum drm_privacy_screen_status *hw_state_ret)
+{
+ *sw_state_ret = PRIVACY_SCREEN_DISABLED;
+ *hw_state_ret = PRIVACY_SCREEN_DISABLED;
+}
+static inline int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/drm/drm_privacy_screen_driver.h b/include/drm/drm_privacy_screen_driver.h
new file mode 100644
index 000000000000..4ef246d5706f
--- /dev/null
+++ b/include/drm/drm_privacy_screen_driver.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_PRIVACY_SCREEN_DRIVER_H__
+#define __DRM_PRIVACY_SCREEN_DRIVER_H__
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <drm/drm_connector.h>
+
+struct drm_privacy_screen;
+
+/**
+ * struct drm_privacy_screen_ops - drm_privacy_screen operations
+ *
+ * Defines the operations which the privacy-screen class code may call.
+ * These functions should be implemented by the privacy-screen driver.
+ */
+struct drm_privacy_screen_ops {
+ /**
+ * @set_sw_state: Called to request a change of the privacy-screen
+ * state. The privacy-screen class code contains a check to avoid this
+ * getting called when the hw_state reports the state is locked.
+ * It is the driver's responsibility to update sw_state and hw_state.
+ * This is always called with the drm_privacy_screen's lock held.
+ */
+ int (*set_sw_state)(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state);
+ /**
+ * @get_hw_state: Called to request that the driver gets the current
+ * privacy-screen state from the hardware and then updates sw_state and
+ * hw_state accordingly. This will be called by the core just before
+ * the privacy-screen is registered in sysfs.
+ */
+ void (*get_hw_state)(struct drm_privacy_screen *priv);
+};
+
+/**
+ * struct drm_privacy_screen - central privacy-screen structure
+ *
+ * Central privacy-screen structure, this contains the struct device used
+ * to register the screen in sysfs, the screen's state, ops, etc.
+ */
+struct drm_privacy_screen {
+ /** @dev: device used to register the privacy-screen in sysfs. */
+ struct device dev;
+ /** @lock: mutex protection all fields in this struct. */
+ struct mutex lock;
+ /** @list: privacy-screen devices list list-entry. */
+ struct list_head list;
+ /** @notifier_head: privacy-screen notifier head. */
+ struct blocking_notifier_head notifier_head;
+ /**
+ * @ops: &struct drm_privacy_screen_ops for this privacy-screen.
+ * This is NULL if the driver has unregistered the privacy-screen.
+ */
+ const struct drm_privacy_screen_ops *ops;
+ /**
+ * @sw_state: The privacy-screen's software state, see
+ * :ref:`Standard Connector Properties<standard_connector_properties>`
+ * for more info.
+ */
+ enum drm_privacy_screen_status sw_state;
+ /**
+ * @hw_state: The privacy-screen's hardware state, see
+ * :ref:`Standard Connector Properties<standard_connector_properties>`
+ * for more info.
+ */
+ enum drm_privacy_screen_status hw_state;
+ /**
+ * @drvdata: Private data owned by the privacy screen provider
+ */
+ void *drvdata;
+};
+
+static inline
+void *drm_privacy_screen_get_drvdata(struct drm_privacy_screen *priv)
+{
+ return priv->drvdata;
+}
+
+struct drm_privacy_screen *drm_privacy_screen_register(
+ struct device *parent, const struct drm_privacy_screen_ops *ops,
+ void *data);
+void drm_privacy_screen_unregister(struct drm_privacy_screen *priv);
+
+void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv);
+
+#endif
diff --git a/include/drm/drm_privacy_screen_machine.h b/include/drm/drm_privacy_screen_machine.h
new file mode 100644
index 000000000000..02e5371904d3
--- /dev/null
+++ b/include/drm/drm_privacy_screen_machine.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_PRIVACY_SCREEN_MACHINE_H__
+#define __DRM_PRIVACY_SCREEN_MACHINE_H__
+
+#include <linux/list.h>
+
+/**
+ * struct drm_privacy_screen_lookup - static privacy-screen lookup list entry
+ *
+ * Used for the static lookup-list for mapping privacy-screen consumer
+ * dev-connector pairs to a privacy-screen provider.
+ */
+struct drm_privacy_screen_lookup {
+ /** @list: Lookup list list-entry. */
+ struct list_head list;
+ /** @dev_id: Consumer device name or NULL to match all devices. */
+ const char *dev_id;
+ /** @con_id: Consumer connector name or NULL to match all connectors. */
+ const char *con_id;
+ /** @provider: dev_name() of the privacy_screen provider. */
+ const char *provider;
+};
+
+void drm_privacy_screen_lookup_add(struct drm_privacy_screen_lookup *lookup);
+void drm_privacy_screen_lookup_remove(struct drm_privacy_screen_lookup *lookup);
+
+#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN) && IS_ENABLED(CONFIG_X86)
+void drm_privacy_screen_lookup_init(void);
+void drm_privacy_screen_lookup_exit(void);
+#else
+static inline void drm_privacy_screen_lookup_init(void)
+{
+}
+static inline void drm_privacy_screen_lookup_exit(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 8d3ed2834d34..62741a88796b 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -3,9 +3,10 @@
#ifndef __DRM_PROBE_HELPER_H__
#define __DRM_PROBE_HELPER_H__
-#include <linux/types.h>
+#include <drm/drm_modes.h>
struct drm_connector;
+struct drm_crtc;
struct drm_device;
struct drm_modeset_acquire_ctx;
@@ -18,10 +19,22 @@ int drm_helper_probe_detect(struct drm_connector *connector,
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector);
void drm_kms_helper_hotplug_event(struct drm_device *dev);
+void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector);
void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
+void drm_kms_helper_poll_reschedule(struct drm_device *dev);
bool drm_kms_helper_is_poll_worker(void);
+enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *fixed_mode);
+
+int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
+ const struct drm_display_mode *fixed_mode);
+int drm_connector_helper_get_modes(struct drm_connector *connector);
+int drm_connector_helper_tv_get_modes(struct drm_connector *connector);
+
#endif
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 4a0a80d658c7..082f29156b3e 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -31,7 +31,6 @@
/**
* struct drm_property_enum - symbolic values for enumerations
- * @value: numeric property value for this enum entry
* @head: list of enum values, linked to &drm_property.enum_list
* @name: symbolic name for the enum
*
@@ -39,6 +38,14 @@
* decoding for each value. This is used for example for the rotation property.
*/
struct drm_property_enum {
+ /**
+ * @value: numeric property value for this enum entry
+ *
+ * If the property has the type &DRM_MODE_PROP_BITMASK, @value stores a
+ * bitshift, not a bitmask. In other words, the enum entry is enabled
+ * if the bit number @value is set in the property's value. This enum
+ * entry has the bitmask ``1 << value``.
+ */
uint64_t value;
struct list_head head;
char name[DRM_PROP_NAME_LEN];
@@ -114,7 +121,7 @@ struct drm_property {
* by the property. Bitmask properties are created using
* drm_property_create_bitmask().
*
- * DRM_MODE_PROB_OBJECT
+ * DRM_MODE_PROP_OBJECT
* Object properties are used to link modeset objects. This is used
* extensively in the atomic support to create the display pipeline,
* by linking &drm_framebuffer to &drm_plane, &drm_plane to
@@ -272,6 +279,12 @@ struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
const void *data);
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id);
+int drm_property_replace_blob_from_id(struct drm_device *dev,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ ssize_t expected_elem_size,
+ bool *replaced);
int drm_property_replace_global_blob(struct drm_device *dev,
struct drm_property_blob **replace,
size_t length,
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index e7f4d24cdd00..73fcb899a01d 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -39,12 +39,31 @@
* @x2: horizontal ending coordinate (exclusive)
* @y1: vertical starting coordinate (inclusive)
* @y2: vertical ending coordinate (exclusive)
+ *
+ * Note that this must match the layout of struct drm_mode_rect or the damage
+ * helpers like drm_atomic_helper_damage_iter_init() break.
*/
struct drm_rect {
int x1, y1, x2, y2;
};
/**
+ * DRM_RECT_INIT - initialize a rectangle from x/y/w/h
+ * @x: x coordinate
+ * @y: y coordinate
+ * @w: width
+ * @h: height
+ *
+ * RETURNS:
+ * A new rectangle of the specified size.
+ */
+#define DRM_RECT_INIT(x, y, w, h) ((struct drm_rect){ \
+ .x1 = (x), \
+ .y1 = (y), \
+ .x2 = (x) + (w), \
+ .y2 = (y) + (h) })
+
+/**
* DRM_RECT_FMT - printf string for &struct drm_rect
*/
#define DRM_RECT_FMT "%dx%d%+d%+d"
@@ -110,7 +129,7 @@ static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh)
/**
* drm_rect_translate - translate the rectangle
- * @r: rectangle to be tranlated
+ * @r: rectangle to be translated
* @dx: horizontal translation
* @dy: vertical translation
*
@@ -127,7 +146,7 @@ static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
/**
* drm_rect_translate_to - translate the rectangle to an absolute position
- * @r: rectangle to be tranlated
+ * @r: rectangle to be translated
* @x: horizontal position
* @y: vertical position
*
@@ -206,6 +225,19 @@ static inline bool drm_rect_equals(const struct drm_rect *r1,
r1->y1 == r2->y1 && r1->y2 == r2->y2;
}
+/**
+ * drm_rect_fp_to_int - Convert a rect in 16.16 fixed point form to int form.
+ * @dst: rect to be stored the converted value
+ * @src: rect in 16.16 fixed point form
+ */
+static inline void drm_rect_fp_to_int(struct drm_rect *dst,
+ const struct drm_rect *src)
+{
+ drm_rect_init(dst, src->x1 >> 16, src->y1 >> 16,
+ drm_rect_width(src) >> 16,
+ drm_rect_height(src) >> 16);
+}
+
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip);
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index a026375464ff..b2486d073763 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -116,8 +116,11 @@ struct drm_simple_display_pipe_funcs {
* the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
* more details.
*
- * Drivers which always have their buffers pinned should use
- * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook.
+ * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
+ * set, drm_gem_plane_helper_prepare_fb() is called automatically
+ * to implement this. Other drivers which need additional plane
+ * processing can call drm_gem_plane_helper_prepare_fb() from
+ * their @prepare_fb hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
@@ -133,6 +136,26 @@ struct drm_simple_display_pipe_funcs {
struct drm_plane_state *plane_state);
/**
+ * @begin_fb_access:
+ *
+ * Optional, called by &drm_plane_helper_funcs.begin_fb_access. Please read
+ * the documentation for the &drm_plane_helper_funcs.begin_fb_access hook for
+ * more details.
+ */
+ int (*begin_fb_access)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_plane_state);
+
+ /**
+ * @end_fb_access:
+ *
+ * Optional, called by &drm_plane_helper_funcs.end_fb_access. Please read
+ * the documentation for the &drm_plane_helper_funcs.end_fb_access hook for
+ * more details.
+ */
+ void (*end_fb_access)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+ /**
* @enable_vblank:
*
* Optional, called by &drm_crtc_funcs.enable_vblank. Please read
@@ -149,6 +172,60 @@ struct drm_simple_display_pipe_funcs {
* more details.
*/
void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @reset_crtc:
+ *
+ * Optional, called by &drm_crtc_funcs.reset. Please read the
+ * documentation for the &drm_crtc_funcs.reset hook for more details.
+ */
+ void (*reset_crtc)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @duplicate_crtc_state:
+ *
+ * Optional, called by &drm_crtc_funcs.atomic_duplicate_state. Please
+ * read the documentation for the &drm_crtc_funcs.atomic_duplicate_state
+ * hook for more details.
+ */
+ struct drm_crtc_state * (*duplicate_crtc_state)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @destroy_crtc_state:
+ *
+ * Optional, called by &drm_crtc_funcs.atomic_destroy_state. Please
+ * read the documentation for the &drm_crtc_funcs.atomic_destroy_state
+ * hook for more details.
+ */
+ void (*destroy_crtc_state)(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state);
+
+ /**
+ * @reset_plane:
+ *
+ * Optional, called by &drm_plane_funcs.reset. Please read the
+ * documentation for the &drm_plane_funcs.reset hook for more details.
+ */
+ void (*reset_plane)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @duplicate_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_duplicate_state
+ * hook for more details.
+ */
+ struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @destroy_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_destroy_state
+ * hook for more details.
+ */
+ void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
};
/**
@@ -185,4 +262,28 @@ int drm_simple_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
int encoder_type);
+void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
+ size_t offset, int encoder_type);
+
+/**
+ * drmm_simple_encoder_alloc - Allocate and initialize an encoder with basic
+ * functionality.
+ * @dev: drm device
+ * @type: the type of the struct which contains struct &drm_encoder
+ * @member: the name of the &drm_encoder within @type.
+ * @encoder_type: user visible type of the encoder
+ *
+ * Allocates and initializes an encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * Cleanup is automatically handled through registering drm_encoder_cleanup()
+ * with drmm_add_action().
+ *
+ * Returns:
+ * Pointer to new encoder, or ERR_PTR on failure.
+ */
+#define drmm_simple_encoder_alloc(dev, type, member, encoder_type) \
+ ((type *)__drmm_simple_encoder_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ encoder_type))
+
#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/drm_suballoc.h b/include/drm/drm_suballoc.h
new file mode 100644
index 000000000000..c2188bb0b157
--- /dev/null
+++ b/include/drm/drm_suballoc.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2011 Red Hat Inc.
+ * Copyright © 2022 Intel Corporation
+ */
+#ifndef _DRM_SUBALLOC_H_
+#define _DRM_SUBALLOC_H_
+
+#include <drm/drm_mm.h>
+
+#include <linux/dma-fence.h>
+#include <linux/types.h>
+
+#define DRM_SUBALLOC_MAX_QUEUES 32
+/**
+ * struct drm_suballoc_manager - fenced range allocations
+ * @wq: Wait queue for sleeping allocations on contention.
+ * @hole: Pointer to first hole node.
+ * @olist: List of allocated ranges.
+ * @flist: Array[fence context hash] of queues of fenced allocated ranges.
+ * @size: Size of the managed range.
+ * @align: Default alignment for the managed range.
+ */
+struct drm_suballoc_manager {
+ wait_queue_head_t wq;
+ struct list_head *hole;
+ struct list_head olist;
+ struct list_head flist[DRM_SUBALLOC_MAX_QUEUES];
+ size_t size;
+ size_t align;
+};
+
+/**
+ * struct drm_suballoc - Sub-allocated range
+ * @olist: List link for list of allocated ranges.
+ * @flist: List linkk for the manager fenced allocated ranges queues.
+ * @manager: The drm_suballoc_manager.
+ * @soffset: Start offset.
+ * @eoffset: End offset + 1 so that @eoffset - @soffset = size.
+ * @dma_fence: The fence protecting the allocation.
+ */
+struct drm_suballoc {
+ struct list_head olist;
+ struct list_head flist;
+ struct drm_suballoc_manager *manager;
+ size_t soffset;
+ size_t eoffset;
+ struct dma_fence *fence;
+};
+
+void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align);
+
+void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager);
+
+struct drm_suballoc *
+drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size,
+ gfp_t gfp, bool intr, size_t align);
+
+void drm_suballoc_free(struct drm_suballoc *sa, struct dma_fence *fence);
+
+/**
+ * drm_suballoc_soffset - Range start.
+ * @sa: The struct drm_suballoc.
+ *
+ * Return: The start of the allocated range.
+ */
+static inline size_t drm_suballoc_soffset(struct drm_suballoc *sa)
+{
+ return sa->soffset;
+}
+
+/**
+ * drm_suballoc_eoffset - Range end.
+ * @sa: The struct drm_suballoc.
+ *
+ * Return: The end of the allocated range + 1.
+ */
+static inline size_t drm_suballoc_eoffset(struct drm_suballoc *sa)
+{
+ return sa->eoffset;
+}
+
+/**
+ * drm_suballoc_size - Range size.
+ * @sa: The struct drm_suballoc.
+ *
+ * Return: The size of the allocated range.
+ */
+static inline size_t drm_suballoc_size(struct drm_suballoc *sa)
+{
+ return sa->eoffset - sa->soffset;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
+ struct drm_printer *p,
+ unsigned long long suballoc_base);
+#else
+static inline void
+drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
+ struct drm_printer *p,
+ unsigned long long suballoc_base)
+{ }
+
+#endif
+
+#endif /* _DRM_SUBALLOC_H_ */
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 6cf7243a1dc5..b40052132e52 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -54,7 +54,11 @@ struct drm_syncobj {
*/
struct list_head cb_list;
/**
- * @lock: Protects &cb_list and write-locks &fence.
+ * @ev_fd_list: List of registered eventfd.
+ */
+ struct list_head ev_fd_list;
+ /**
+ * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence.
*/
spinlock_t lock;
/**
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index d454ef617b2c..96a5d858404b 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -11,6 +11,7 @@ int drm_class_device_register(struct device *dev);
void drm_class_device_unregister(struct device *dev);
void drm_sysfs_hotplug_event(struct drm_device *dev);
-void drm_sysfs_connector_status_event(struct drm_connector *connector,
- struct drm_property *property);
+void drm_sysfs_connector_hotplug_event(struct drm_connector *connector);
+void drm_sysfs_connector_property_event(struct drm_connector *connector,
+ struct drm_property *property);
#endif
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index dd125f8c766c..7f3957943dd1 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -230,6 +230,7 @@ bool drm_dev_has_vblank(const struct drm_device *dev);
u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime);
+int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime);
void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
@@ -247,7 +248,6 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
-void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 76ac5e97a559..6c2a2f21dbf0 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -53,7 +53,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
- bool readonly:1;
+ void *driver_private;
};
struct drm_vma_offset_manager {
@@ -74,6 +74,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node);
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
+int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
void drm_vma_node_revoke(struct drm_vma_offset_node *node,
struct drm_file *tag);
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
index 9697d2714d2a..17e576c80169 100644
--- a/include/drm/drm_writeback.h
+++ b/include/drm/drm_writeback.h
@@ -30,6 +30,8 @@ struct drm_writeback_connector {
* @drm_writeback_connector control the behaviour of the @encoder
* by passing the @enc_funcs parameter to drm_writeback_connector_init()
* function.
+ * For users of drm_writeback_connector_init_with_encoder(), this field
+ * is not valid as the encoder is managed within their drivers.
*/
struct drm_encoder encoder;
@@ -150,7 +152,14 @@ int drm_writeback_connector_init(struct drm_device *dev,
struct drm_writeback_connector *wb_connector,
const struct drm_connector_funcs *con_funcs,
const struct drm_encoder_helper_funcs *enc_helper_funcs,
- const u32 *formats, int n_formats);
+ const u32 *formats, int n_formats,
+ u32 possible_crtcs);
+
+int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ struct drm_encoder *enc,
+ const struct drm_connector_funcs *con_funcs, const u32 *formats,
+ int n_formats);
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index b9780ae9dd26..5acc64954a88 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,79 +27,217 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
#include <linux/completion.h>
+#include <linux/xarray.h>
+#include <linux/workqueue.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+/**
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
+ *
+ * Setting this flag on a scheduler fence prevents pipelining of jobs depending
+ * on this fence. In other words we always insert a full CPU round trip before
+ * dependen jobs are pushed to the hw queue.
+ */
+#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
+
+/**
+ * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
+ *
+ * Because we could have a deadline hint can be set before the backing hw
+ * fence is created, we need to keep track of whether a deadline has already
+ * been set.
+ */
+#define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
+
+enum dma_resv_usage;
+struct dma_resv;
+struct drm_gem_object;
+
struct drm_gpu_scheduler;
struct drm_sched_rq;
+struct drm_file;
+
+/* These are often used as an (initial) index
+ * to an array, and as such should start at 0.
+ */
enum drm_sched_priority {
- DRM_SCHED_PRIORITY_MIN,
- DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
- DRM_SCHED_PRIORITY_NORMAL,
- DRM_SCHED_PRIORITY_HIGH_SW,
- DRM_SCHED_PRIORITY_HIGH_HW,
DRM_SCHED_PRIORITY_KERNEL,
- DRM_SCHED_PRIORITY_MAX,
- DRM_SCHED_PRIORITY_INVALID = -1,
- DRM_SCHED_PRIORITY_UNSET = -2
+ DRM_SCHED_PRIORITY_HIGH,
+ DRM_SCHED_PRIORITY_NORMAL,
+ DRM_SCHED_PRIORITY_LOW,
+
+ DRM_SCHED_PRIORITY_COUNT
};
+/* Used to chose between FIFO and RR jobs scheduling */
+extern int drm_sched_policy;
+
+#define DRM_SCHED_POLICY_RR 0
+#define DRM_SCHED_POLICY_FIFO 1
+
/**
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
*
- * @list: used to append this struct to the list of entities in the
- * runqueue.
- * @rq: runqueue on which this entity is currently scheduled.
- * @sched_list: A list of schedulers (drm_gpu_schedulers).
- * Jobs from this entity can be scheduled on any scheduler
- * on this list.
- * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
- * @priority: priority of the entity
- * @rq_lock: lock to modify the runqueue to which this entity belongs.
- * @job_queue: the list of jobs of this entity.
- * @fence_seq: a linearly increasing seqno incremented with each
- * new &drm_sched_fence which is part of the entity.
- * @fence_context: a unique context for all the fences which belong
- * to this entity.
- * The &drm_sched_fence.scheduled uses the
- * fence_context but &drm_sched_fence.finished uses
- * fence_context + 1.
- * @dependency: the dependency fence of the job which is on the top
- * of the job queue.
- * @cb: callback for the dependency fence above.
- * @guilty: points to ctx's guilty.
- * @fini_status: contains the exit status in case the process was signalled.
- * @last_scheduled: points to the finished fence of the last scheduled job.
- * @last_user: last group leader pushing a job into the entity.
- * @stopped: Marks the enity as removed from rq and destined for termination.
- * @entity_idle: Signals when enityt is not in use
- *
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
* scheduling policy.
*/
struct drm_sched_entity {
+ /**
+ * @list:
+ *
+ * Used to append this struct to the list of entities in the runqueue
+ * @rq under &drm_sched_rq.entities.
+ *
+ * Protected by &drm_sched_rq.lock of @rq.
+ */
struct list_head list;
+
+ /**
+ * @rq:
+ *
+ * Runqueue on which this entity is currently scheduled.
+ *
+ * FIXME: Locking is very unclear for this. Writers are protected by
+ * @rq_lock, but readers are generally lockless and seem to just race
+ * with not even a READ_ONCE.
+ */
struct drm_sched_rq *rq;
+
+ /**
+ * @sched_list:
+ *
+ * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
+ * be scheduled on any scheduler on this list.
+ *
+ * This can be modified by calling drm_sched_entity_modify_sched().
+ * Locking is entirely up to the driver, see the above function for more
+ * details.
+ *
+ * This will be set to NULL if &num_sched_list equals 1 and @rq has been
+ * set already.
+ *
+ * FIXME: This means priority changes through
+ * drm_sched_entity_set_priority() will be lost henceforth in this case.
+ */
struct drm_gpu_scheduler **sched_list;
+
+ /**
+ * @num_sched_list:
+ *
+ * Number of drm_gpu_schedulers in the @sched_list.
+ */
unsigned int num_sched_list;
+
+ /**
+ * @priority:
+ *
+ * Priority of the entity. This can be modified by calling
+ * drm_sched_entity_set_priority(). Protected by &rq_lock.
+ */
enum drm_sched_priority priority;
+
+ /**
+ * @rq_lock:
+ *
+ * Lock to modify the runqueue to which this entity belongs.
+ */
spinlock_t rq_lock;
+ /**
+ * @job_queue: the list of jobs of this entity.
+ */
struct spsc_queue job_queue;
+ /**
+ * @fence_seq:
+ *
+ * A linearly increasing seqno incremented with each new
+ * &drm_sched_fence which is part of the entity.
+ *
+ * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
+ * this doesn't need to be atomic.
+ */
atomic_t fence_seq;
+
+ /**
+ * @fence_context:
+ *
+ * A unique context for all the fences which belong to this entity. The
+ * &drm_sched_fence.scheduled uses the fence_context but
+ * &drm_sched_fence.finished uses fence_context + 1.
+ */
uint64_t fence_context;
+ /**
+ * @dependency:
+ *
+ * The dependency fence of the job which is on the top of the job queue.
+ */
struct dma_fence *dependency;
+
+ /**
+ * @cb:
+ *
+ * Callback for the dependency fence above.
+ */
struct dma_fence_cb cb;
+
+ /**
+ * @guilty:
+ *
+ * Points to entities' guilty.
+ */
atomic_t *guilty;
- struct dma_fence *last_scheduled;
+
+ /**
+ * @last_scheduled:
+ *
+ * Points to the finished fence of the last scheduled job. Only written
+ * by the scheduler thread, can be accessed locklessly from
+ * drm_sched_job_arm() iff the queue is empty.
+ */
+ struct dma_fence __rcu *last_scheduled;
+
+ /**
+ * @last_user: last group leader pushing a job into the entity.
+ */
struct task_struct *last_user;
+
+ /**
+ * @stopped:
+ *
+ * Marks the enity as removed from rq and destined for
+ * termination. This is set by calling drm_sched_entity_flush() and by
+ * drm_sched_fini().
+ */
bool stopped;
+
+ /**
+ * @entity_idle:
+ *
+ * Signals when entity is not in use, used to sequence entity cleanup in
+ * drm_sched_entity_fini().
+ */
struct completion entity_idle;
+
+ /**
+ * @oldest_job_waiting:
+ *
+ * Marks earliest job waiting in SW queue
+ */
+ ktime_t oldest_job_waiting;
+
+ /**
+ * @rb_tree_node:
+ *
+ * The node used to insert this entity into time based priority queue
+ */
+ struct rb_node rb_tree_node;
+
};
/**
@@ -109,6 +247,7 @@ struct drm_sched_entity {
* @sched: the scheduler to which this rq belongs to.
* @entities: list of the entities to be scheduled.
* @current_entity: the entity which is to be scheduled.
+ * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
*
* Run queue is a set of entities scheduling command submissions for
* one specific ring. It implements the scheduling policy that selects
@@ -119,6 +258,7 @@ struct drm_sched_rq {
struct drm_gpu_scheduler *sched;
struct list_head entities;
struct drm_sched_entity *current_entity;
+ struct rb_root_cached rb_tree_root;
};
/**
@@ -143,6 +283,12 @@ struct drm_sched_fence {
*/
struct dma_fence finished;
+ /**
+ * @deadline: deadline set on &drm_sched_fence.finished which
+ * potentially needs to be propagated to &drm_sched_fence.parent
+ */
+ ktime_t deadline;
+
/**
* @parent: the fence returned by &drm_sched_backend_ops.run_job
* when scheduling the job on hardware. We signal the
@@ -170,10 +316,12 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* struct drm_sched_job - A job to be run by an entity.
*
* @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @list: a job participates in a "pending" and "done" lists.
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
- * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @credits: the number of credits this job contributes to the scheduler
+ * @work: Helper to reschdeule job kill to different context.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
@@ -188,37 +336,77 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
*/
struct drm_sched_job {
struct spsc_node queue_node;
+ struct list_head list;
struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence;
- struct dma_fence_cb finish_cb;
- struct list_head node;
+
+ u32 credits;
+
+ /*
+ * work is used only after finish_cb has been used and will not be
+ * accessed anymore.
+ */
+ union {
+ struct dma_fence_cb finish_cb;
+ struct work_struct work;
+ };
+
uint64_t id;
atomic_t karma;
enum drm_sched_priority s_priority;
- struct drm_sched_entity *entity;
+ struct drm_sched_entity *entity;
struct dma_fence_cb cb;
+ /**
+ * @dependencies:
+ *
+ * Contains the dependencies as struct dma_fence for this job, see
+ * drm_sched_job_add_dependency() and
+ * drm_sched_job_add_implicit_dependencies().
+ */
+ struct xarray dependencies;
+
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned long last_dependency;
+
+ /**
+ * @submit_ts:
+ *
+ * When the job was pushed into the entity queue.
+ */
+ ktime_t submit_ts;
};
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
int threshold)
{
- return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
}
+enum drm_gpu_sched_stat {
+ DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
+ DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_ENODEV,
+};
+
/**
- * struct drm_sched_backend_ops
+ * struct drm_sched_backend_ops - Define the backend operations
+ * called by the scheduler
*
- * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side.
+ * These functions should be implemented in the driver side.
*/
struct drm_sched_backend_ops {
/**
- * @dependency: Called when the scheduler is considering scheduling
- * this job next, to get another struct dma_fence for this job to
- * block on. Once it returns NULL, run_job() may be called.
+ * @prepare_job:
+ *
+ * Called when the scheduler is considering scheduling this job next, to
+ * get another struct dma_fence for this job to block on. Once it
+ * returns NULL, run_job() may be called.
+ *
+ * Can be NULL if no additional preparation to the dependencies are
+ * necessary. Skipped when jobs are killed instead of run.
*/
- struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity);
+ struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity);
/**
* @run_job: Called to execute the job once all of the dependencies
@@ -229,95 +417,184 @@ struct drm_sched_backend_ops {
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
/**
- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
+ *
+ * This method is called in a workqueue context.
+ *
+ * Drivers typically issue a reset to recover from GPU hangs, and this
+ * procedure usually follows the following workflow:
+ *
+ * 1. Stop the scheduler using drm_sched_stop(). This will park the
+ * scheduler thread and cancel the timeout work, guaranteeing that
+ * nothing is queued while we reset the hardware queue
+ * 2. Try to gracefully stop non-faulty jobs (optional)
+ * 3. Issue a GPU reset (driver-specific)
+ * 4. Re-submit jobs using drm_sched_resubmit_jobs()
+ * 5. Restart the scheduler using drm_sched_start(). At that point, new
+ * jobs can be queued, and the scheduler thread is unblocked
+ *
+ * Note that some GPUs have distinct hardware queues but need to reset
+ * the GPU globally, which requires extra synchronization between the
+ * timeout handler of the different &drm_gpu_scheduler. One way to
+ * achieve this synchronization is to create an ordered workqueue
+ * (using alloc_ordered_workqueue()) at the driver level, and pass this
+ * queue to drm_sched_init(), to guarantee that timeout handlers are
+ * executed sequentially. The above workflow needs to be slightly
+ * adjusted in that case:
+ *
+ * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
+ * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
+ * the reset (optional)
+ * 3. Issue a GPU reset on all faulty queues (driver-specific)
+ * 4. Re-submit jobs on all schedulers impacted by the reset using
+ * drm_sched_resubmit_jobs()
+ * 5. Restart all schedulers that were stopped in step #1 using
+ * drm_sched_start()
+ *
+ * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
+ * and the underlying driver has started or completed recovery.
+ *
+ * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
+ * available, i.e. has been unplugged.
*/
- void (*timedout_job)(struct drm_sched_job *sched_job);
+ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
/**
* @free_job: Called once the job's finished fence has been signaled
* and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
+
+ /**
+ * @update_job_credits: Called when the scheduler is considering this
+ * job for execution.
+ *
+ * This callback returns the number of credits the job would take if
+ * pushed to the hardware. Drivers may use this to dynamically update
+ * the job's credit count. For instance, deduct the number of credits
+ * for already signalled native fences.
+ *
+ * This callback is optional.
+ */
+ u32 (*update_job_credits)(struct drm_sched_job *sched_job);
};
/**
- * struct drm_gpu_scheduler
+ * struct drm_gpu_scheduler - scheduler instance-specific data
*
* @ops: backend operations provided by the driver.
- * @hw_submission_limit: the max size of the hardware queue.
+ * @credit_limit: the credit limit of this scheduler
+ * @credit_count: the current credit count of this scheduler
* @timeout: the time after which a job is removed from the scheduler.
* @name: name of the ring for which this scheduler is being used.
- * @sched_rq: priority wise array of run queues.
- * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
- * is ready to be scheduled.
+ * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
+ * as there's usually one run-queue per priority, but could be less.
+ * @sched_rq: An allocated array of run-queues of size @num_rqs;
* @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished.
- * @hw_rq_count: the number of jobs currently in the hardware queue.
* @job_id_count: used to assign unique id to the each job.
+ * @submit_wq: workqueue used to queue @work_run_job and @work_free_job
+ * @timeout_wq: workqueue used to queue @work_tdr
+ * @work_run_job: work which calls run_job op of each scheduler.
+ * @work_free_job: work which calls free_job op of each scheduler.
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over.
- * @thread: the kthread on which the scheduler which run.
- * @ring_mirror_list: the list of jobs which are currently in the job queue.
- * @job_list_lock: lock to protect the ring_mirror_list.
+ * @pending_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
- * guilty and it will be considered for scheduling further.
+ * guilty and it will no longer be considered for scheduling.
* @score: score to help loadbalancer pick a idle sched
+ * @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
+ * @pause_submit: pause queuing of @work_run_job on @submit_wq
+ * @own_submit_wq: scheduler owns allocation of @submit_wq
+ * @dev: system &struct device
*
* One scheduler is implemented for each hardware ring.
*/
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops *ops;
- uint32_t hw_submission_limit;
+ u32 credit_limit;
+ atomic_t credit_count;
long timeout;
const char *name;
- struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
- wait_queue_head_t wake_up_worker;
+ u32 num_rqs;
+ struct drm_sched_rq **sched_rq;
wait_queue_head_t job_scheduled;
- atomic_t hw_rq_count;
atomic64_t job_id_count;
+ struct workqueue_struct *submit_wq;
+ struct workqueue_struct *timeout_wq;
+ struct work_struct work_run_job;
+ struct work_struct work_free_job;
struct delayed_work work_tdr;
- struct task_struct *thread;
- struct list_head ring_mirror_list;
+ struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t score;
+ atomic_t *score;
+ atomic_t _score;
bool ready;
bool free_guilty;
+ bool pause_submit;
+ bool own_submit_wq;
+ struct device *dev;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
- uint32_t hw_submission, unsigned hang_limit, long timeout,
- const char *name);
+ struct workqueue_struct *submit_wq,
+ u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
+ long timeout, struct workqueue_struct *timeout_wq,
+ atomic_t *score, const char *name, struct device *dev);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- void *owner);
+ u32 credits, void *owner);
+void drm_sched_job_arm(struct drm_sched_job *job);
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
+int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
+ struct drm_file *file,
+ u32 handle,
+ u32 point);
+int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage);
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write);
+
+
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list);
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
+void drm_sched_reset_karma(struct drm_sched_job *bad);
+void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
-void drm_sched_job_kickout(struct drm_sched_job *s_job);
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity);
void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity);
+void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts);
+
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
struct drm_gpu_scheduler **sched_list,
@@ -328,16 +605,21 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
+int drm_sched_entity_error(struct drm_sched_entity *entity);
-struct drm_sched_fence *drm_sched_fence_create(
+struct drm_sched_fence *drm_sched_fence_alloc(
struct drm_sched_entity *s_entity, void *owner);
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
-void drm_sched_fence_finished(struct drm_sched_fence *fence);
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity);
+void drm_sched_fence_free(struct drm_sched_fence *fence);
+
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+ struct dma_fence *parent);
+void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
diff --git a/include/drm/gud.h b/include/drm/gud.h
new file mode 100644
index 000000000000..c52a8ba4ae4e
--- /dev/null
+++ b/include/drm/gud.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2020 Noralf Trønnes
+ */
+
+#ifndef __LINUX_GUD_H
+#define __LINUX_GUD_H
+
+#include <linux/types.h>
+
+/*
+ * struct gud_display_descriptor_req - Display descriptor
+ * @magic: Magic value GUD_DISPLAY_MAGIC
+ * @version: Protocol version
+ * @flags: Flags
+ * - STATUS_ON_SET: Always do a status request after a SET request.
+ * This is used by the Linux gadget driver since it has
+ * no way to control the status stage of a control OUT
+ * request that has a payload.
+ * - FULL_UPDATE: Always send the entire framebuffer when flushing changes.
+ * The GUD_REQ_SET_BUFFER request will not be sent
+ * before each bulk transfer, it will only be sent if the
+ * previous bulk transfer had failed. This gives the device
+ * a chance to reset its state machine if needed.
+ * This flag can not be used in combination with compression.
+ * @compression: Supported compression types
+ * - GUD_COMPRESSION_LZ4: LZ4 lossless compression.
+ * @max_buffer_size: Maximum buffer size the device can handle (optional).
+ * This is useful for devices that don't have a big enough
+ * buffer to decompress the entire framebuffer in one go.
+ * @min_width: Minimum pixel width the controller can handle
+ * @max_width: Maximum width
+ * @min_height: Minimum height
+ * @max_height: Maximum height
+ *
+ * Devices that have only one display mode will have min_width == max_width
+ * and min_height == max_height.
+ */
+struct gud_display_descriptor_req {
+ __le32 magic;
+#define GUD_DISPLAY_MAGIC 0x1d50614d
+ __u8 version;
+ __le32 flags;
+#define GUD_DISPLAY_FLAG_STATUS_ON_SET BIT(0)
+#define GUD_DISPLAY_FLAG_FULL_UPDATE BIT(1)
+ __u8 compression;
+#define GUD_COMPRESSION_LZ4 BIT(0)
+ __le32 max_buffer_size;
+ __le32 min_width;
+ __le32 max_width;
+ __le32 min_height;
+ __le32 max_height;
+} __packed;
+
+/*
+ * struct gud_property_req - Property
+ * @prop: Property
+ * @val: Value
+ */
+struct gud_property_req {
+ __le16 prop;
+ __le64 val;
+} __packed;
+
+/*
+ * struct gud_display_mode_req - Display mode
+ * @clock: Pixel clock in kHz
+ * @hdisplay: Horizontal display size
+ * @hsync_start: Horizontal sync start
+ * @hsync_end: Horizontal sync end
+ * @htotal: Horizontal total size
+ * @vdisplay: Vertical display size
+ * @vsync_start: Vertical sync start
+ * @vsync_end: Vertical sync end
+ * @vtotal: Vertical total size
+ * @flags: Bits 0-13 are the same as in the RandR protocol and also what DRM uses.
+ * The deprecated bits are reused for internal protocol flags leaving us
+ * free to follow DRM for the other bits in the future.
+ * - FLAG_PREFERRED: Set on the preferred display mode.
+ */
+struct gud_display_mode_req {
+ __le32 clock;
+ __le16 hdisplay;
+ __le16 hsync_start;
+ __le16 hsync_end;
+ __le16 htotal;
+ __le16 vdisplay;
+ __le16 vsync_start;
+ __le16 vsync_end;
+ __le16 vtotal;
+ __le32 flags;
+#define GUD_DISPLAY_MODE_FLAG_PHSYNC BIT(0)
+#define GUD_DISPLAY_MODE_FLAG_NHSYNC BIT(1)
+#define GUD_DISPLAY_MODE_FLAG_PVSYNC BIT(2)
+#define GUD_DISPLAY_MODE_FLAG_NVSYNC BIT(3)
+#define GUD_DISPLAY_MODE_FLAG_INTERLACE BIT(4)
+#define GUD_DISPLAY_MODE_FLAG_DBLSCAN BIT(5)
+#define GUD_DISPLAY_MODE_FLAG_CSYNC BIT(6)
+#define GUD_DISPLAY_MODE_FLAG_PCSYNC BIT(7)
+#define GUD_DISPLAY_MODE_FLAG_NCSYNC BIT(8)
+#define GUD_DISPLAY_MODE_FLAG_HSKEW BIT(9)
+/* BCast and PixelMultiplex are deprecated */
+#define GUD_DISPLAY_MODE_FLAG_DBLCLK BIT(12)
+#define GUD_DISPLAY_MODE_FLAG_CLKDIV2 BIT(13)
+#define GUD_DISPLAY_MODE_FLAG_USER_MASK \
+ (GUD_DISPLAY_MODE_FLAG_PHSYNC | GUD_DISPLAY_MODE_FLAG_NHSYNC | \
+ GUD_DISPLAY_MODE_FLAG_PVSYNC | GUD_DISPLAY_MODE_FLAG_NVSYNC | \
+ GUD_DISPLAY_MODE_FLAG_INTERLACE | GUD_DISPLAY_MODE_FLAG_DBLSCAN | \
+ GUD_DISPLAY_MODE_FLAG_CSYNC | GUD_DISPLAY_MODE_FLAG_PCSYNC | \
+ GUD_DISPLAY_MODE_FLAG_NCSYNC | GUD_DISPLAY_MODE_FLAG_HSKEW | \
+ GUD_DISPLAY_MODE_FLAG_DBLCLK | GUD_DISPLAY_MODE_FLAG_CLKDIV2)
+/* Internal protocol flags */
+#define GUD_DISPLAY_MODE_FLAG_PREFERRED BIT(10)
+} __packed;
+
+/*
+ * struct gud_connector_descriptor_req - Connector descriptor
+ * @connector_type: Connector type (GUD_CONNECTOR_TYPE_*).
+ * If the host doesn't support the type it should fall back to PANEL.
+ * @flags: Flags
+ * - POLL_STATUS: Connector status can change (polled every 10 seconds)
+ * - INTERLACE: Interlaced modes are supported
+ * - DOUBLESCAN: Doublescan modes are supported
+ */
+struct gud_connector_descriptor_req {
+ __u8 connector_type;
+#define GUD_CONNECTOR_TYPE_PANEL 0
+#define GUD_CONNECTOR_TYPE_VGA 1
+#define GUD_CONNECTOR_TYPE_COMPOSITE 2
+#define GUD_CONNECTOR_TYPE_SVIDEO 3
+#define GUD_CONNECTOR_TYPE_COMPONENT 4
+#define GUD_CONNECTOR_TYPE_DVI 5
+#define GUD_CONNECTOR_TYPE_DISPLAYPORT 6
+#define GUD_CONNECTOR_TYPE_HDMI 7
+ __le32 flags;
+#define GUD_CONNECTOR_FLAGS_POLL_STATUS BIT(0)
+#define GUD_CONNECTOR_FLAGS_INTERLACE BIT(1)
+#define GUD_CONNECTOR_FLAGS_DOUBLESCAN BIT(2)
+} __packed;
+
+/*
+ * struct gud_set_buffer_req - Set buffer transfer info
+ * @x: X position of rectangle
+ * @y: Y position
+ * @width: Pixel width of rectangle
+ * @height: Pixel height
+ * @length: Buffer length in bytes
+ * @compression: Transfer compression
+ * @compressed_length: Compressed buffer length
+ *
+ * This request is issued right before the bulk transfer.
+ * @x, @y, @width and @height specifies the rectangle where the buffer should be
+ * placed inside the framebuffer.
+ */
+struct gud_set_buffer_req {
+ __le32 x;
+ __le32 y;
+ __le32 width;
+ __le32 height;
+ __le32 length;
+ __u8 compression;
+ __le32 compressed_length;
+} __packed;
+
+/*
+ * struct gud_state_req - Display state
+ * @mode: Display mode
+ * @format: Pixel format GUD_PIXEL_FORMAT_*
+ * @connector: Connector index
+ * @properties: Array of properties
+ *
+ * The entire state is transferred each time there's a change.
+ */
+struct gud_state_req {
+ struct gud_display_mode_req mode;
+ __u8 format;
+ __u8 connector;
+ struct gud_property_req properties[];
+} __packed;
+
+/* List of supported connector properties: */
+
+/* Margins in pixels to deal with overscan, range 0-100 */
+#define GUD_PROPERTY_TV_LEFT_MARGIN 1
+#define GUD_PROPERTY_TV_RIGHT_MARGIN 2
+#define GUD_PROPERTY_TV_TOP_MARGIN 3
+#define GUD_PROPERTY_TV_BOTTOM_MARGIN 4
+#define GUD_PROPERTY_TV_MODE 5
+/* Brightness in percent, range 0-100 */
+#define GUD_PROPERTY_TV_BRIGHTNESS 6
+/* Contrast in percent, range 0-100 */
+#define GUD_PROPERTY_TV_CONTRAST 7
+/* Flicker reduction in percent, range 0-100 */
+#define GUD_PROPERTY_TV_FLICKER_REDUCTION 8
+/* Overscan in percent, range 0-100 */
+#define GUD_PROPERTY_TV_OVERSCAN 9
+/* Saturation in percent, range 0-100 */
+#define GUD_PROPERTY_TV_SATURATION 10
+/* Hue in percent, range 0-100 */
+#define GUD_PROPERTY_TV_HUE 11
+
+/*
+ * Backlight brightness is in the range 0-100 inclusive. The value represents the human perceptual
+ * brightness and not a linear PWM value. 0 is minimum brightness which should not turn the
+ * backlight completely off. The DPMS connector property should be used to control power which will
+ * trigger a GUD_REQ_SET_DISPLAY_ENABLE request.
+ *
+ * This does not map to a DRM property, it is used with the backlight device.
+ */
+#define GUD_PROPERTY_BACKLIGHT_BRIGHTNESS 12
+
+/* List of supported properties that are not connector propeties: */
+
+/*
+ * Plane rotation. Should return the supported bitmask on
+ * GUD_REQ_GET_PROPERTIES. GUD_ROTATION_0 is mandatory.
+ *
+ * Note: This is not display rotation so 90/270 will need scaling to make it fit (unless squared).
+ */
+#define GUD_PROPERTY_ROTATION 50
+ #define GUD_ROTATION_0 BIT(0)
+ #define GUD_ROTATION_90 BIT(1)
+ #define GUD_ROTATION_180 BIT(2)
+ #define GUD_ROTATION_270 BIT(3)
+ #define GUD_ROTATION_REFLECT_X BIT(4)
+ #define GUD_ROTATION_REFLECT_Y BIT(5)
+ #define GUD_ROTATION_MASK (GUD_ROTATION_0 | GUD_ROTATION_90 | \
+ GUD_ROTATION_180 | GUD_ROTATION_270 | \
+ GUD_ROTATION_REFLECT_X | GUD_ROTATION_REFLECT_Y)
+
+/* USB Control requests: */
+
+/* Get status from the last GET/SET control request. Value is u8. */
+#define GUD_REQ_GET_STATUS 0x00
+ /* Status values: */
+ #define GUD_STATUS_OK 0x00
+ #define GUD_STATUS_BUSY 0x01
+ #define GUD_STATUS_REQUEST_NOT_SUPPORTED 0x02
+ #define GUD_STATUS_PROTOCOL_ERROR 0x03
+ #define GUD_STATUS_INVALID_PARAMETER 0x04
+ #define GUD_STATUS_ERROR 0x05
+
+/* Get display descriptor as a &gud_display_descriptor_req */
+#define GUD_REQ_GET_DESCRIPTOR 0x01
+
+/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */
+#define GUD_REQ_GET_FORMATS 0x40
+ #define GUD_FORMATS_MAX_NUM 32
+ #define GUD_PIXEL_FORMAT_R1 0x01 /* 1-bit monochrome */
+ #define GUD_PIXEL_FORMAT_R8 0x08 /* 8-bit greyscale */
+ #define GUD_PIXEL_FORMAT_XRGB1111 0x20
+ #define GUD_PIXEL_FORMAT_RGB332 0x30
+ #define GUD_PIXEL_FORMAT_RGB565 0x40
+ #define GUD_PIXEL_FORMAT_RGB888 0x50
+ #define GUD_PIXEL_FORMAT_XRGB8888 0x80
+ #define GUD_PIXEL_FORMAT_ARGB8888 0x81
+
+/*
+ * Get supported properties that are not connector propeties as a &gud_property_req array.
+ * gud_property_req.val often contains the initial value for the property.
+ */
+#define GUD_REQ_GET_PROPERTIES 0x41
+ #define GUD_PROPERTIES_MAX_NUM 32
+
+/* Connector requests have the connector index passed in the wValue field */
+
+/* Get connector descriptors as an array of &gud_connector_descriptor_req */
+#define GUD_REQ_GET_CONNECTORS 0x50
+ #define GUD_CONNECTORS_MAX_NUM 32
+
+/*
+ * Get properties supported by the connector as a &gud_property_req array.
+ * gud_property_req.val often contains the initial value for the property.
+ */
+#define GUD_REQ_GET_CONNECTOR_PROPERTIES 0x51
+ #define GUD_CONNECTOR_PROPERTIES_MAX_NUM 32
+
+/*
+ * Issued when there's a TV_MODE property present.
+ * Gets an array of the supported TV_MODE names each entry of length
+ * GUD_CONNECTOR_TV_MODE_NAME_LEN. Names must be NUL-terminated.
+ */
+#define GUD_REQ_GET_CONNECTOR_TV_MODE_VALUES 0x52
+ #define GUD_CONNECTOR_TV_MODE_NAME_LEN 16
+ #define GUD_CONNECTOR_TV_MODE_MAX_NUM 16
+
+/* When userspace checks connector status, this is issued first, not used for poll requests. */
+#define GUD_REQ_SET_CONNECTOR_FORCE_DETECT 0x53
+
+/*
+ * Get connector status. Value is u8.
+ *
+ * Userspace will get a HOTPLUG uevent if one of the following is true:
+ * - Connection status has changed since last
+ * - CHANGED is set
+ */
+#define GUD_REQ_GET_CONNECTOR_STATUS 0x54
+ #define GUD_CONNECTOR_STATUS_DISCONNECTED 0x00
+ #define GUD_CONNECTOR_STATUS_CONNECTED 0x01
+ #define GUD_CONNECTOR_STATUS_UNKNOWN 0x02
+ #define GUD_CONNECTOR_STATUS_CONNECTED_MASK 0x03
+ #define GUD_CONNECTOR_STATUS_CHANGED BIT(7)
+
+/*
+ * Display modes can be fetched as either EDID data or an array of &gud_display_mode_req.
+ *
+ * If GUD_REQ_GET_CONNECTOR_MODES returns zero, EDID is used to create display modes.
+ * If both display modes and EDID are returned, EDID is just passed on to userspace
+ * in the EDID connector property.
+ */
+
+/* Get &gud_display_mode_req array of supported display modes */
+#define GUD_REQ_GET_CONNECTOR_MODES 0x55
+ #define GUD_CONNECTOR_MAX_NUM_MODES 128
+
+/* Get Extended Display Identification Data */
+#define GUD_REQ_GET_CONNECTOR_EDID 0x56
+ #define GUD_CONNECTOR_MAX_EDID_LEN 2048
+
+/* Set buffer properties before bulk transfer as &gud_set_buffer_req */
+#define GUD_REQ_SET_BUFFER 0x60
+
+/* Check display configuration as &gud_state_req */
+#define GUD_REQ_SET_STATE_CHECK 0x61
+
+/* Apply the previous STATE_CHECK configuration */
+#define GUD_REQ_SET_STATE_COMMIT 0x62
+
+/* Enable/disable the display controller, value is u8: 0/1 */
+#define GUD_REQ_SET_CONTROLLER_ENABLE 0x63
+
+/* Enable/disable display/output (DPMS), value is u8: 0/1 */
+#define GUD_REQ_SET_DISPLAY_ENABLE 0x64
+
+#endif
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 55c3b123581b..56a84ee1c64c 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -29,6 +29,8 @@
enum i915_component_type {
I915_COMPONENT_AUDIO = 1,
I915_COMPONENT_HDCP,
+ I915_COMPONENT_PXP,
+ I915_COMPONENT_GSC_PROXY,
};
/* MAX_PORT is the number of port
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 6722005884db..adff68538484 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -26,8 +26,7 @@
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
-#include <drm/i915_pciids.h>
-#include <uapi/drm/i915_drm.h>
+#include <linux/types.h>
/* For use by IPS driver */
unsigned long i915_read_mch_val(void);
@@ -43,7 +42,7 @@ extern struct resource intel_graphics_stolen_res;
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
* This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga rbiter.
+ * cares about the vga bit for the vga arbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
diff --git a/include/drm/i915_gsc_proxy_mei_interface.h b/include/drm/i915_gsc_proxy_mei_interface.h
new file mode 100644
index 000000000000..9462341d3ae1
--- /dev/null
+++ b/include/drm/i915_gsc_proxy_mei_interface.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2022-2023 Intel Corporation
+ */
+
+#ifndef _I915_GSC_PROXY_MEI_INTERFACE_H_
+#define _I915_GSC_PROXY_MEI_INTERFACE_H_
+
+#include <linux/types.h>
+
+struct device;
+struct module;
+
+/**
+ * struct i915_gsc_proxy_component_ops - ops for GSC Proxy services.
+ * @owner: Module providing the ops
+ * @send: sends a proxy message from GSC FW to ME FW
+ * @recv: receives a proxy message for GSC FW from ME FW
+ */
+struct i915_gsc_proxy_component_ops {
+ struct module *owner;
+
+ /**
+ * send - Sends a proxy message to ME FW.
+ * @dev: device struct corresponding to the mei device
+ * @buf: message buffer to send
+ * @size: size of the message
+ * Return: bytes sent on success, negative errno value on failure
+ */
+ int (*send)(struct device *dev, const void *buf, size_t size);
+
+ /**
+ * recv - Receives a proxy message from ME FW.
+ * @dev: device struct corresponding to the mei device
+ * @buf: message buffer to contain the received message
+ * @size: size of the buffer
+ * Return: bytes received on success, negative errno value on failure
+ */
+ int (*recv)(struct device *dev, void *buf, size_t size);
+};
+
+/**
+ * struct i915_gsc_proxy_component - Used for communication between i915 and
+ * MEI drivers for GSC proxy services
+ * @mei_dev: device that provide the GSC proxy service.
+ * @ops: Ops implemented by GSC proxy driver, used by i915 driver.
+ */
+struct i915_gsc_proxy_component {
+ struct device *mei_dev;
+ const struct i915_gsc_proxy_component_ops *ops;
+};
+
+#endif /* _I915_GSC_PROXY_MEI_INTERFACE_H_ */
diff --git a/include/drm/i915_hdcp_interface.h b/include/drm/i915_hdcp_interface.h
new file mode 100644
index 000000000000..4c9c8167c2d5
--- /dev/null
+++ b/include/drm/i915_hdcp_interface.h
@@ -0,0 +1,539 @@
+/* SPDX-License-Identifier: (GPL-2.0+) */
+/*
+ * Copyright © 2017-2019 Intel Corporation
+ *
+ * Authors:
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+#ifndef _I915_HDCP_INTERFACE_H_
+#define _I915_HDCP_INTERFACE_H_
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <drm/display/drm_hdcp.h>
+
+/**
+ * enum hdcp_port_type - HDCP port implementation type defined by ME/GSC FW
+ * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type
+ * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port
+ * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON
+ * (HDMI 2.0) solution
+ * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3)
+ * solution
+ */
+enum hdcp_port_type {
+ HDCP_PORT_TYPE_INVALID,
+ HDCP_PORT_TYPE_INTEGRATED,
+ HDCP_PORT_TYPE_LSPCON,
+ HDCP_PORT_TYPE_CPDP
+};
+
+/**
+ * enum hdcp_wired_protocol - HDCP adaptation used on the port
+ * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol
+ * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port
+ * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port
+ */
+enum hdcp_wired_protocol {
+ HDCP_PROTOCOL_INVALID,
+ HDCP_PROTOCOL_HDMI,
+ HDCP_PROTOCOL_DP
+};
+
+enum hdcp_ddi {
+ HDCP_DDI_INVALID_PORT = 0x0,
+
+ HDCP_DDI_B = 1,
+ HDCP_DDI_C,
+ HDCP_DDI_D,
+ HDCP_DDI_E,
+ HDCP_DDI_F,
+ HDCP_DDI_A = 7,
+ HDCP_DDI_RANGE_END = HDCP_DDI_A,
+};
+
+/**
+ * enum hdcp_tc - ME/GSC Firmware defined index for transcoders
+ * @HDCP_INVALID_TRANSCODER: Index for Invalid transcoder
+ * @HDCP_TRANSCODER_EDP: Index for EDP Transcoder
+ * @HDCP_TRANSCODER_DSI0: Index for DSI0 Transcoder
+ * @HDCP_TRANSCODER_DSI1: Index for DSI1 Transcoder
+ * @HDCP_TRANSCODER_A: Index for Transcoder A
+ * @HDCP_TRANSCODER_B: Index for Transcoder B
+ * @HDCP_TRANSCODER_C: Index for Transcoder C
+ * @HDCP_TRANSCODER_D: Index for Transcoder D
+ */
+enum hdcp_transcoder {
+ HDCP_INVALID_TRANSCODER = 0x00,
+ HDCP_TRANSCODER_EDP,
+ HDCP_TRANSCODER_DSI0,
+ HDCP_TRANSCODER_DSI1,
+ HDCP_TRANSCODER_A = 0x10,
+ HDCP_TRANSCODER_B,
+ HDCP_TRANSCODER_C,
+ HDCP_TRANSCODER_D
+};
+
+/**
+ * struct hdcp_port_data - intel specific HDCP port data
+ * @hdcp_ddi: ddi index as per ME/GSC FW
+ * @hdcp_transcoder: transcoder index as per ME/GSC FW
+ * @port_type: HDCP port type as per ME/GSC FW classification
+ * @protocol: HDCP adaptation as per ME/GSC FW
+ * @k: No of streams transmitted on a port. Only on DP MST this is != 1
+ * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated.
+ * Initialized to 0 on AKE_INIT. Incremented after every successful
+ * transmission of RepeaterAuth_Stream_Manage message. When it rolls
+ * over re-Auth has to be triggered.
+ * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the
+ * streams
+ */
+struct hdcp_port_data {
+ enum hdcp_ddi hdcp_ddi;
+ enum hdcp_transcoder hdcp_transcoder;
+ u8 port_type;
+ u8 protocol;
+ u16 k;
+ u32 seq_num_m;
+ struct hdcp2_streamid_type *streams;
+};
+
+/**
+ * struct i915_hdcp_ops- ops for HDCP2.2 services.
+ * @owner: Module providing the ops
+ * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session.
+ * And Prepare AKE_Init.
+ * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
+ * AKE_Send_Cert and prepare
+ AKE_Stored_Km/AKE_No_Stored_Km
+ * @verify_hprime: Verify AKE_Send_H_prime
+ * @store_pairing_info: Store pairing info received
+ * @initiate_locality_check: Prepare LC_Init
+ * @verify_lprime: Verify lprime
+ * @get_session_key: Prepare SKE_Send_Eks
+ * @repeater_check_flow_prepare_ack: Validate the Downstream topology
+ * and prepare rep_ack
+ * @verify_mprime: Verify mprime
+ * @enable_hdcp_authentication: Mark a port as authenticated.
+ * @close_hdcp_session: Close the Wired HDCP Tx session per port.
+ * This also disables the authenticated state of the port.
+ */
+struct i915_hdcp_ops {
+ /**
+ * @owner: hdcp module
+ */
+ struct module *owner;
+
+ int (*initiate_hdcp2_session)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_init *ake_data);
+ int (*verify_receiver_cert_prepare_km)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_cert
+ *rx_cert,
+ bool *km_stored,
+ struct hdcp2_ake_no_stored_km
+ *ek_pub_km,
+ size_t *msg_sz);
+ int (*verify_hprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_hprime *rx_hprime);
+ int (*store_pairing_info)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_pairing_info
+ *pairing_info);
+ int (*initiate_locality_check)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_init *lc_init_data);
+ int (*verify_lprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_send_lprime *rx_lprime);
+ int (*get_session_key)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data);
+ int (*repeater_check_flow_prepare_ack)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack
+ *rep_send_ack);
+ int (*verify_mprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready);
+ int (*enable_hdcp_authentication)(struct device *dev,
+ struct hdcp_port_data *data);
+ int (*close_hdcp_session)(struct device *dev,
+ struct hdcp_port_data *data);
+};
+
+/**
+ * struct i915_hdcp_arbiter - Used for communication between i915
+ * and hdcp drivers for the HDCP2.2 services
+ * @hdcp_dev: device that provide the HDCP2.2 service from MEI Bus.
+ * @hdcp_ops: Ops implemented by hdcp driver or intel_hdcp_gsc , used by i915 driver.
+ */
+struct i915_hdcp_arbiter {
+ struct device *hdcp_dev;
+ const struct i915_hdcp_ops *ops;
+
+ /* To protect the above members. */
+ struct mutex mutex;
+};
+
+/* fw_hdcp_status: Enumeration of all HDCP Status Codes */
+enum fw_hdcp_status {
+ FW_HDCP_STATUS_SUCCESS = 0x0000,
+
+ /* WiDi Generic Status Codes */
+ FW_HDCP_STATUS_INTERNAL_ERROR = 0x1000,
+ FW_HDCP_STATUS_UNKNOWN_ERROR = 0x1001,
+ FW_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002,
+ FW_HDCP_STATUS_INVALID_FUNCTION = 0x1003,
+ FW_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004,
+ FW_HDCP_STATUS_INVALID_PARAMS = 0x1005,
+ FW_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006,
+
+ /* WiDi Status Codes */
+ FW_HDCP_INVALID_SESSION_STATE = 0x6000,
+ FW_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001,
+ FW_HDCP_SRM_INVALID_LENGTH = 0x6002,
+ FW_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003,
+ FW_HDCP_SRM_VERIFICATION_FAILED = 0x6004,
+ FW_HDCP_SRM_VERSION_TOO_OLD = 0x6005,
+ FW_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006,
+ FW_HDCP_RX_REVOKED = 0x6007,
+ FW_HDCP_H_VERIFICATION_FAILED = 0x6008,
+ FW_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009,
+ FW_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A,
+ FW_HDCP_V_VERIFICATION_FAILED = 0x600B,
+ FW_HDCP_L_VERIFICATION_FAILED = 0x600C,
+ FW_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D,
+ FW_HDCP_BASE_KEY_RESET_FAILED = 0x600E,
+ FW_HDCP_NONCE_GENERATION_FAILED = 0x600F,
+ FW_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010,
+ FW_HDCP_STATUS_INVALID_CS_ICV = 0x6011,
+ FW_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012,
+ FW_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013,
+ FW_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014,
+ FW_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015,
+
+ /* New status for HDCP 2.1 */
+ FW_HDCP_STATUS_MISMATCH_IN_M = 0x6016,
+
+ /* New status code for HDCP 2.2 Rx */
+ FW_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017,
+ FW_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018,
+ FW_HDCP_RX_NEEDS_PROVISIONING = 0x6019,
+ FW_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020,
+ FW_HDCP_STATUS_INVALID_STREAM_ID = 0x6021,
+ FW_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022,
+ FW_HDCP_FAIL_NOT_EXPECTED = 0x6023,
+ FW_HDCP_FAIL_HDCP_OFF = 0x6024,
+ FW_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025,
+ FW_HDCP_FAIL_AES_ECB_FAILURE = 0x6026,
+ FW_HDCP_FEATURE_NOT_SUPPORTED = 0x6027,
+ FW_HDCP_DMA_READ_ERROR = 0x6028,
+ FW_HDCP_DMA_WRITE_ERROR = 0x6029,
+ FW_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030,
+ FW_HDCP_H264_PARSING_ERROR = 0x6031,
+ FW_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032,
+ FW_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033,
+ FW_HDCP_TX_ACTIVE_ERROR = 0x6034,
+ FW_HDCP_MODE_CHANGE_ERROR = 0x6035,
+ FW_HDCP_STREAM_TYPE_ERROR = 0x6036,
+ FW_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037,
+
+ FW_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038,
+ FW_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039,
+ FW_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a,
+ FW_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b,
+ FW_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c,
+ FW_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d,
+
+ /* hdcp capable bit is not set in rx_caps(error is unique to DP) */
+ FW_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041,
+
+ FW_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042,
+};
+
+#define HDCP_API_VERSION 0x00010000
+
+#define HDCP_M_LEN 16
+#define HDCP_KH_LEN 16
+
+/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */
+/* Wired_Tx_AKE */
+#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1)
+#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3)
+
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3)
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16)
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128)
+
+#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32)
+#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4)
+
+#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16)
+#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4)
+
+#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4)
+#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4)
+
+/* Wired_Tx_LC */
+#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4)
+#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8)
+
+#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32)
+#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4)
+
+/* Wired_Tx_SKE */
+#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4)
+#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8)
+
+/* Wired_Tx_SKE */
+#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1)
+#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4)
+
+/* Wired_Tx_Repeater */
+#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155)
+#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16)
+
+#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \
+ 32 + 2 + 2)
+
+#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4)
+
+/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */
+enum hdcp_command_id {
+ _WIDI_COMMAND_BASE = 0x00030000,
+ WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE,
+ HDCP_GET_SRM_STATUS,
+ HDCP_SEND_SRM_FRAGMENT,
+
+ /* The wired HDCP Tx commands */
+ _WIRED_COMMAND_BASE = 0x00031000,
+ WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE,
+ WIRED_VERIFY_RECEIVER_CERT,
+ WIRED_AKE_SEND_HPRIME,
+ WIRED_AKE_SEND_PAIRING_INFO,
+ WIRED_INIT_LOCALITY_CHECK,
+ WIRED_VALIDATE_LOCALITY,
+ WIRED_GET_SESSION_KEY,
+ WIRED_ENABLE_AUTH,
+ WIRED_VERIFY_REPEATER,
+ WIRED_REPEATER_AUTH_STREAM_REQ,
+ WIRED_CLOSE_SESSION,
+
+ _WIRED_COMMANDS_COUNT,
+};
+
+union encrypted_buff {
+ u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+ u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+ struct {
+ u8 e_kh_km[HDCP_KH_LEN];
+ u8 m[HDCP_M_LEN];
+ } __packed;
+};
+
+/* HDCP HECI message header. All header values are little endian. */
+struct hdcp_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ enum fw_hdcp_status status;
+ /* Length of the HECI message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+/* Empty command request or response. No data follows the header. */
+struct hdcp_cmd_no_data {
+ struct hdcp_cmd_header header;
+} __packed;
+
+/* Uniquely identifies the hdcp port being addressed for a given command. */
+struct hdcp_port_id {
+ u8 integrated_port_type;
+ /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */
+ u8 physical_port;
+ /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */
+ u8 attached_transcoder;
+ u8 reserved;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in
+ * support of the AKE protocol
+ */
+/* HECI struct for integrated wired HDCP Tx session initiation. */
+struct wired_cmd_initiate_hdcp2_session_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 protocol; /* for HDMI vs DP */
+} __packed;
+
+struct wired_cmd_initiate_hdcp2_session_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 r_tx[HDCP_2_2_RTX_LEN];
+ struct hdcp2_tx_caps tx_caps;
+} __packed;
+
+/* HECI struct for ending an integrated wired HDCP Tx session. */
+struct wired_cmd_close_session_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_close_session_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */
+struct wired_cmd_verify_receiver_cert_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ struct hdcp2_cert_rx cert_rx;
+ u8 r_rx[HDCP_2_2_RRX_LEN];
+ u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct wired_cmd_verify_receiver_cert_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 km_stored;
+ u8 reserved[3];
+ union encrypted_buff ekm_buff;
+} __packed;
+
+/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */
+struct wired_cmd_ake_send_hprime_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct wired_cmd_ake_send_hprime_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * HECI struct for sending in AKE pairing data generated by the Rx in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_ake_send_pairing_info_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct wired_cmd_ake_send_pairing_info_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/
+/*
+ * HECI struct for initiating locality check with an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_init_locality_check_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_init_locality_check_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+/*
+ * HECI struct for validating an Rx's LPrime value in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_validate_locality_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct wired_cmd_validate_locality_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in support of the
+ * SKE protocol
+ */
+/* HECI struct for creating session key */
+struct wired_cmd_get_session_key_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_get_session_key_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+ u8 r_iv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+/* HECI struct for the Tx enable authentication command */
+struct wired_cmd_enable_auth_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 stream_type;
+} __packed;
+
+struct wired_cmd_enable_auth_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in support of
+ * the repeater protocols
+ */
+/*
+ * HECI struct for verifying the downstream repeater's HDCP topology in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_verify_repeater_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+ u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+ u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct wired_cmd_verify_repeater_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 content_type_supported;
+ u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+/*
+ * HECI struct in support of stream management in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_repeater_auth_stream_req_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+ u8 m_prime[HDCP_2_2_MPRIME_LEN];
+ __be16 k;
+ struct hdcp2_streamid_type streams[];
+} __packed;
+
+struct wired_cmd_repeater_auth_stream_req_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+#endif /* _I915_HDCP_INTERFACE_H_ */
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
deleted file mode 100644
index 702f613243bb..000000000000
--- a/include/drm/i915_mei_hdcp_interface.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+) */
-/*
- * Copyright © 2017-2019 Intel Corporation
- *
- * Authors:
- * Ramalingam C <ramalingam.c@intel.com>
- */
-
-#ifndef _I915_MEI_HDCP_INTERFACE_H_
-#define _I915_MEI_HDCP_INTERFACE_H_
-
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <drm/drm_hdcp.h>
-
-/**
- * enum hdcp_port_type - HDCP port implementation type defined by ME FW
- * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type
- * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port
- * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON
- * (HDMI 2.0) solution
- * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3)
- * solution
- */
-enum hdcp_port_type {
- HDCP_PORT_TYPE_INVALID,
- HDCP_PORT_TYPE_INTEGRATED,
- HDCP_PORT_TYPE_LSPCON,
- HDCP_PORT_TYPE_CPDP
-};
-
-/**
- * enum hdcp_wired_protocol - HDCP adaptation used on the port
- * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol
- * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port
- * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port
- */
-enum hdcp_wired_protocol {
- HDCP_PROTOCOL_INVALID,
- HDCP_PROTOCOL_HDMI,
- HDCP_PROTOCOL_DP
-};
-
-enum mei_fw_ddi {
- MEI_DDI_INVALID_PORT = 0x0,
-
- MEI_DDI_B = 1,
- MEI_DDI_C,
- MEI_DDI_D,
- MEI_DDI_E,
- MEI_DDI_F,
- MEI_DDI_A = 7,
- MEI_DDI_RANGE_END = MEI_DDI_A,
-};
-
-/**
- * enum mei_fw_tc - ME Firmware defined index for transcoders
- * @MEI_INVALID_TRANSCODER: Index for Invalid transcoder
- * @MEI_TRANSCODER_EDP: Index for EDP Transcoder
- * @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder
- * @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder
- * @MEI_TRANSCODER_A: Index for Transcoder A
- * @MEI_TRANSCODER_B: Index for Transcoder B
- * @MEI_TRANSCODER_C: Index for Transcoder C
- * @MEI_TRANSCODER_D: Index for Transcoder D
- */
-enum mei_fw_tc {
- MEI_INVALID_TRANSCODER = 0x00,
- MEI_TRANSCODER_EDP,
- MEI_TRANSCODER_DSI0,
- MEI_TRANSCODER_DSI1,
- MEI_TRANSCODER_A = 0x10,
- MEI_TRANSCODER_B,
- MEI_TRANSCODER_C,
- MEI_TRANSCODER_D
-};
-
-/**
- * struct hdcp_port_data - intel specific HDCP port data
- * @fw_ddi: ddi index as per ME FW
- * @fw_tc: transcoder index as per ME FW
- * @port_type: HDCP port type as per ME FW classification
- * @protocol: HDCP adaptation as per ME FW
- * @k: No of streams transmitted on a port. Only on DP MST this is != 1
- * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated.
- * Initialized to 0 on AKE_INIT. Incremented after every successful
- * transmission of RepeaterAuth_Stream_Manage message. When it rolls
- * over re-Auth has to be triggered.
- * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the
- * streams
- */
-struct hdcp_port_data {
- enum mei_fw_ddi fw_ddi;
- enum mei_fw_tc fw_tc;
- u8 port_type;
- u8 protocol;
- u16 k;
- u32 seq_num_m;
- struct hdcp2_streamid_type *streams;
-};
-
-/**
- * struct i915_hdcp_component_ops- ops for HDCP2.2 services.
- * @owner: Module providing the ops
- * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session.
- * And Prepare AKE_Init.
- * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
- * AKE_Send_Cert and prepare
- AKE_Stored_Km/AKE_No_Stored_Km
- * @verify_hprime: Verify AKE_Send_H_prime
- * @store_pairing_info: Store pairing info received
- * @initiate_locality_check: Prepare LC_Init
- * @verify_lprime: Verify lprime
- * @get_session_key: Prepare SKE_Send_Eks
- * @repeater_check_flow_prepare_ack: Validate the Downstream topology
- * and prepare rep_ack
- * @verify_mprime: Verify mprime
- * @enable_hdcp_authentication: Mark a port as authenticated.
- * @close_hdcp_session: Close the Wired HDCP Tx session per port.
- * This also disables the authenticated state of the port.
- */
-struct i915_hdcp_component_ops {
- /**
- * @owner: mei_hdcp module
- */
- struct module *owner;
-
- int (*initiate_hdcp2_session)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_init *ake_data);
- int (*verify_receiver_cert_prepare_km)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_cert
- *rx_cert,
- bool *km_stored,
- struct hdcp2_ake_no_stored_km
- *ek_pub_km,
- size_t *msg_sz);
- int (*verify_hprime)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_hprime *rx_hprime);
- int (*store_pairing_info)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_pairing_info
- *pairing_info);
- int (*initiate_locality_check)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_lc_init *lc_init_data);
- int (*verify_lprime)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_lc_send_lprime *rx_lprime);
- int (*get_session_key)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data);
- int (*repeater_check_flow_prepare_ack)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_send_receiverid_list
- *rep_topology,
- struct hdcp2_rep_send_ack
- *rep_send_ack);
- int (*verify_mprime)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready);
- int (*enable_hdcp_authentication)(struct device *dev,
- struct hdcp_port_data *data);
- int (*close_hdcp_session)(struct device *dev,
- struct hdcp_port_data *data);
-};
-
-/**
- * struct i915_hdcp_component_master - Used for communication between i915
- * and mei_hdcp drivers for the HDCP2.2 services
- * @mei_dev: device that provide the HDCP2.2 service from MEI Bus.
- * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver.
- */
-struct i915_hdcp_comp_master {
- struct device *mei_dev;
- const struct i915_hdcp_component_ops *ops;
-
- /* To protect the above members. */
- struct mutex mutex;
-};
-
-#endif /* _I915_MEI_HDCP_INTERFACE_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 96e408b4bdc9..28a96aa1e08f 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -170,9 +170,9 @@
#define INTEL_HSW_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
- INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0A06, info) /* ULT GT1 mobile */
+ INTEL_VGA_DEVICE(0x0A0B, info) /* ULT GT1 reserved */
#define INTEL_HSW_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */
@@ -181,26 +181,26 @@
INTEL_HSW_ULT_GT1_IDS(info), \
INTEL_HSW_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
- INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
+ INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x040A, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */
+ INTEL_VGA_DEVICE(0x0D0E, info) /* CRW GT1 reserved */
#define INTEL_HSW_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
- INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0A16, info) /* ULT GT2 mobile */
+ INTEL_VGA_DEVICE(0x0A1B, info) /* ULT GT2 reserved */ \
#define INTEL_HSW_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \
@@ -209,45 +209,45 @@
INTEL_HSW_ULT_GT2_IDS(info), \
INTEL_HSW_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
- INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
+ INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x041A, info), /* GT2 server */ \
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */
+ INTEL_VGA_DEVICE(0x0D1E, info) /* CRW GT2 reserved */
#define INTEL_HSW_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */
#define INTEL_HSW_GT3_IDS(info) \
INTEL_HSW_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
- INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
+ INTEL_VGA_DEVICE(0x0426, info), /* GT3 mobile */ \
+ INTEL_VGA_DEVICE(0x042A, info), /* GT3 server */ \
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0D26, info), /* CRW GT3 mobile */ \
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
+ INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */
#define INTEL_HSW_IDS(info) \
INTEL_HSW_GT1_IDS(info), \
@@ -258,9 +258,7 @@
INTEL_VGA_DEVICE(0x0f30, info), \
INTEL_VGA_DEVICE(0x0f31, info), \
INTEL_VGA_DEVICE(0x0f32, info), \
- INTEL_VGA_DEVICE(0x0f33, info), \
- INTEL_VGA_DEVICE(0x0157, info), \
- INTEL_VGA_DEVICE(0x0155, info)
+ INTEL_VGA_DEVICE(0x0f33, info)
#define INTEL_BDW_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
@@ -331,17 +329,20 @@
INTEL_VGA_DEVICE(0x22b3, info)
#define INTEL_SKL_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x1906, info) /* ULT GT1 */
+ INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
+ INTEL_VGA_DEVICE(0x1913, info) /* ULT GT1.5 */
#define INTEL_SKL_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x190E, info) /* ULX GT1 */
+ INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x1915, info) /* ULX GT1.5 */
#define INTEL_SKL_GT1_IDS(info) \
INTEL_SKL_ULT_GT1_IDS(info), \
INTEL_SKL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
+ INTEL_VGA_DEVICE(0x1917, info) /* DT GT1.5 */
#define INTEL_SKL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
@@ -354,26 +355,26 @@
INTEL_SKL_ULT_GT2_IDS(info), \
INTEL_SKL_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
#define INTEL_SKL_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x1926, info) /* ULT GT3 */
+ INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3e */ \
+ INTEL_VGA_DEVICE(0x1927, info) /* ULT GT3e */
#define INTEL_SKL_GT3_IDS(info) \
INTEL_SKL_ULT_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
- INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */
+ INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3e */ \
+ INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3e */
#define INTEL_SKL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
- INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
- INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
- INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \
- INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */
+ INTEL_VGA_DEVICE(0x193A, info), /* SRV GT4e */ \
+ INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4e */ \
+ INTEL_VGA_DEVICE(0x193D, info) /* WKS GT4e */
#define INTEL_SKL_IDS(info) \
INTEL_SKL_GT1_IDS(info), \
@@ -405,8 +406,8 @@
INTEL_KBL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
+ INTEL_VGA_DEVICE(0x590A, info), /* SRV GT1 */ \
+ INTEL_VGA_DEVICE(0x590B, info) /* Halo GT1 */
#define INTEL_KBL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
@@ -418,10 +419,10 @@
#define INTEL_KBL_GT2_IDS(info) \
INTEL_KBL_ULT_GT2_IDS(info), \
INTEL_KBL_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
+ INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
#define INTEL_KBL_ULT_GT3_IDS(info) \
@@ -446,10 +447,10 @@
/* CML GT1 */
#define INTEL_CML_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9BA5, info), \
- INTEL_VGA_DEVICE(0x9BA8, info), \
+ INTEL_VGA_DEVICE(0x9BA2, info), \
INTEL_VGA_DEVICE(0x9BA4, info), \
- INTEL_VGA_DEVICE(0x9BA2, info)
+ INTEL_VGA_DEVICE(0x9BA5, info), \
+ INTEL_VGA_DEVICE(0x9BA8, info)
#define INTEL_CML_U_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x9B21, info), \
@@ -458,11 +459,11 @@
/* CML GT2 */
#define INTEL_CML_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9BC5, info), \
- INTEL_VGA_DEVICE(0x9BC8, info), \
- INTEL_VGA_DEVICE(0x9BC4, info), \
INTEL_VGA_DEVICE(0x9BC2, info), \
+ INTEL_VGA_DEVICE(0x9BC4, info), \
+ INTEL_VGA_DEVICE(0x9BC5, info), \
INTEL_VGA_DEVICE(0x9BC6, info), \
+ INTEL_VGA_DEVICE(0x9BC8, info), \
INTEL_VGA_DEVICE(0x9BE6, info), \
INTEL_VGA_DEVICE(0x9BF6, info)
@@ -496,8 +497,8 @@
INTEL_VGA_DEVICE(0x3E9C, info)
#define INTEL_CFL_H_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
+ INTEL_VGA_DEVICE(0x3E94, info), /* Halo GT2 */ \
+ INTEL_VGA_DEVICE(0x3E9B, info) /* Halo GT2 */
/* CFL U GT2 */
#define INTEL_CFL_U_GT2_IDS(info) \
@@ -542,73 +543,82 @@
/* CNL */
#define INTEL_CNL_PORT_F_IDS(info) \
- INTEL_VGA_DEVICE(0x5A54, info), \
- INTEL_VGA_DEVICE(0x5A5C, info), \
INTEL_VGA_DEVICE(0x5A44, info), \
- INTEL_VGA_DEVICE(0x5A4C, info)
+ INTEL_VGA_DEVICE(0x5A4C, info), \
+ INTEL_VGA_DEVICE(0x5A54, info), \
+ INTEL_VGA_DEVICE(0x5A5C, info)
#define INTEL_CNL_IDS(info) \
INTEL_CNL_PORT_F_IDS(info), \
- INTEL_VGA_DEVICE(0x5A51, info), \
- INTEL_VGA_DEVICE(0x5A59, info), \
+ INTEL_VGA_DEVICE(0x5A40, info), \
INTEL_VGA_DEVICE(0x5A41, info), \
- INTEL_VGA_DEVICE(0x5A49, info), \
- INTEL_VGA_DEVICE(0x5A52, info), \
- INTEL_VGA_DEVICE(0x5A5A, info), \
INTEL_VGA_DEVICE(0x5A42, info), \
+ INTEL_VGA_DEVICE(0x5A49, info), \
INTEL_VGA_DEVICE(0x5A4A, info), \
INTEL_VGA_DEVICE(0x5A50, info), \
- INTEL_VGA_DEVICE(0x5A40, info)
+ INTEL_VGA_DEVICE(0x5A51, info), \
+ INTEL_VGA_DEVICE(0x5A52, info), \
+ INTEL_VGA_DEVICE(0x5A59, info), \
+ INTEL_VGA_DEVICE(0x5A5A, info)
/* ICL */
#define INTEL_ICL_PORT_F_IDS(info) \
INTEL_VGA_DEVICE(0x8A50, info), \
- INTEL_VGA_DEVICE(0x8A5C, info), \
- INTEL_VGA_DEVICE(0x8A59, info), \
- INTEL_VGA_DEVICE(0x8A58, info), \
INTEL_VGA_DEVICE(0x8A52, info), \
+ INTEL_VGA_DEVICE(0x8A53, info), \
+ INTEL_VGA_DEVICE(0x8A54, info), \
+ INTEL_VGA_DEVICE(0x8A56, info), \
+ INTEL_VGA_DEVICE(0x8A57, info), \
+ INTEL_VGA_DEVICE(0x8A58, info), \
+ INTEL_VGA_DEVICE(0x8A59, info), \
INTEL_VGA_DEVICE(0x8A5A, info), \
INTEL_VGA_DEVICE(0x8A5B, info), \
- INTEL_VGA_DEVICE(0x8A57, info), \
- INTEL_VGA_DEVICE(0x8A56, info), \
- INTEL_VGA_DEVICE(0x8A71, info), \
+ INTEL_VGA_DEVICE(0x8A5C, info), \
INTEL_VGA_DEVICE(0x8A70, info), \
- INTEL_VGA_DEVICE(0x8A53, info), \
- INTEL_VGA_DEVICE(0x8A54, info)
+ INTEL_VGA_DEVICE(0x8A71, info)
#define INTEL_ICL_11_IDS(info) \
INTEL_ICL_PORT_F_IDS(info), \
INTEL_VGA_DEVICE(0x8A51, info), \
INTEL_VGA_DEVICE(0x8A5D, info)
-/* EHL/JSL */
+/* EHL */
#define INTEL_EHL_IDS(info) \
- INTEL_VGA_DEVICE(0x4500, info), \
- INTEL_VGA_DEVICE(0x4571, info), \
- INTEL_VGA_DEVICE(0x4551, info), \
INTEL_VGA_DEVICE(0x4541, info), \
- INTEL_VGA_DEVICE(0x4E71, info), \
- INTEL_VGA_DEVICE(0x4557, info), \
+ INTEL_VGA_DEVICE(0x4551, info), \
INTEL_VGA_DEVICE(0x4555, info), \
- INTEL_VGA_DEVICE(0x4E61, info), \
- INTEL_VGA_DEVICE(0x4E57, info), \
+ INTEL_VGA_DEVICE(0x4557, info), \
+ INTEL_VGA_DEVICE(0x4570, info), \
+ INTEL_VGA_DEVICE(0x4571, info)
+
+/* JSL */
+#define INTEL_JSL_IDS(info) \
+ INTEL_VGA_DEVICE(0x4E51, info), \
INTEL_VGA_DEVICE(0x4E55, info), \
- INTEL_VGA_DEVICE(0x4E51, info)
+ INTEL_VGA_DEVICE(0x4E57, info), \
+ INTEL_VGA_DEVICE(0x4E61, info), \
+ INTEL_VGA_DEVICE(0x4E71, info)
/* TGL */
-#define INTEL_TGL_12_IDS(info) \
+#define INTEL_TGL_12_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x9A60, info), \
+ INTEL_VGA_DEVICE(0x9A68, info), \
+ INTEL_VGA_DEVICE(0x9A70, info)
+
+#define INTEL_TGL_12_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x9A40, info), \
INTEL_VGA_DEVICE(0x9A49, info), \
INTEL_VGA_DEVICE(0x9A59, info), \
- INTEL_VGA_DEVICE(0x9A60, info), \
- INTEL_VGA_DEVICE(0x9A68, info), \
- INTEL_VGA_DEVICE(0x9A70, info), \
INTEL_VGA_DEVICE(0x9A78, info), \
INTEL_VGA_DEVICE(0x9AC0, info), \
INTEL_VGA_DEVICE(0x9AC9, info), \
INTEL_VGA_DEVICE(0x9AD9, info), \
INTEL_VGA_DEVICE(0x9AF8, info)
+#define INTEL_TGL_12_IDS(info) \
+ INTEL_TGL_12_GT1_IDS(info), \
+ INTEL_TGL_12_GT2_IDS(info)
+
/* RKL */
#define INTEL_RKL_IDS(info) \
INTEL_VGA_DEVICE(0x4C80, info), \
@@ -620,6 +630,136 @@
/* DG1 */
#define INTEL_DG1_IDS(info) \
- INTEL_VGA_DEVICE(0x4905, info)
+ INTEL_VGA_DEVICE(0x4905, info), \
+ INTEL_VGA_DEVICE(0x4906, info), \
+ INTEL_VGA_DEVICE(0x4907, info), \
+ INTEL_VGA_DEVICE(0x4908, info), \
+ INTEL_VGA_DEVICE(0x4909, info)
+
+/* ADL-S */
+#define INTEL_ADLS_IDS(info) \
+ INTEL_VGA_DEVICE(0x4680, info), \
+ INTEL_VGA_DEVICE(0x4682, info), \
+ INTEL_VGA_DEVICE(0x4688, info), \
+ INTEL_VGA_DEVICE(0x468A, info), \
+ INTEL_VGA_DEVICE(0x468B, info), \
+ INTEL_VGA_DEVICE(0x4690, info), \
+ INTEL_VGA_DEVICE(0x4692, info), \
+ INTEL_VGA_DEVICE(0x4693, info)
+
+/* ADL-P */
+#define INTEL_ADLP_IDS(info) \
+ INTEL_VGA_DEVICE(0x46A0, info), \
+ INTEL_VGA_DEVICE(0x46A1, info), \
+ INTEL_VGA_DEVICE(0x46A2, info), \
+ INTEL_VGA_DEVICE(0x46A3, info), \
+ INTEL_VGA_DEVICE(0x46A6, info), \
+ INTEL_VGA_DEVICE(0x46A8, info), \
+ INTEL_VGA_DEVICE(0x46AA, info), \
+ INTEL_VGA_DEVICE(0x462A, info), \
+ INTEL_VGA_DEVICE(0x4626, info), \
+ INTEL_VGA_DEVICE(0x4628, info), \
+ INTEL_VGA_DEVICE(0x46B0, info), \
+ INTEL_VGA_DEVICE(0x46B1, info), \
+ INTEL_VGA_DEVICE(0x46B2, info), \
+ INTEL_VGA_DEVICE(0x46B3, info), \
+ INTEL_VGA_DEVICE(0x46C0, info), \
+ INTEL_VGA_DEVICE(0x46C1, info), \
+ INTEL_VGA_DEVICE(0x46C2, info), \
+ INTEL_VGA_DEVICE(0x46C3, info)
+
+/* ADL-N */
+#define INTEL_ADLN_IDS(info) \
+ INTEL_VGA_DEVICE(0x46D0, info), \
+ INTEL_VGA_DEVICE(0x46D1, info), \
+ INTEL_VGA_DEVICE(0x46D2, info), \
+ INTEL_VGA_DEVICE(0x46D3, info), \
+ INTEL_VGA_DEVICE(0x46D4, info)
+
+/* RPL-S */
+#define INTEL_RPLS_IDS(info) \
+ INTEL_VGA_DEVICE(0xA780, info), \
+ INTEL_VGA_DEVICE(0xA781, info), \
+ INTEL_VGA_DEVICE(0xA782, info), \
+ INTEL_VGA_DEVICE(0xA783, info), \
+ INTEL_VGA_DEVICE(0xA788, info), \
+ INTEL_VGA_DEVICE(0xA789, info), \
+ INTEL_VGA_DEVICE(0xA78A, info), \
+ INTEL_VGA_DEVICE(0xA78B, info)
+
+/* RPL-U */
+#define INTEL_RPLU_IDS(info) \
+ INTEL_VGA_DEVICE(0xA721, info), \
+ INTEL_VGA_DEVICE(0xA7A1, info), \
+ INTEL_VGA_DEVICE(0xA7A9, info), \
+ INTEL_VGA_DEVICE(0xA7AC, info), \
+ INTEL_VGA_DEVICE(0xA7AD, info)
+
+/* RPL-P */
+#define INTEL_RPLP_IDS(info) \
+ INTEL_RPLU_IDS(info), \
+ INTEL_VGA_DEVICE(0xA720, info), \
+ INTEL_VGA_DEVICE(0xA7A0, info), \
+ INTEL_VGA_DEVICE(0xA7A8, info), \
+ INTEL_VGA_DEVICE(0xA7AA, info), \
+ INTEL_VGA_DEVICE(0xA7AB, info)
+
+/* DG2 */
+#define INTEL_DG2_G10_IDS(info) \
+ INTEL_VGA_DEVICE(0x5690, info), \
+ INTEL_VGA_DEVICE(0x5691, info), \
+ INTEL_VGA_DEVICE(0x5692, info), \
+ INTEL_VGA_DEVICE(0x56A0, info), \
+ INTEL_VGA_DEVICE(0x56A1, info), \
+ INTEL_VGA_DEVICE(0x56A2, info)
+
+#define INTEL_DG2_G11_IDS(info) \
+ INTEL_VGA_DEVICE(0x5693, info), \
+ INTEL_VGA_DEVICE(0x5694, info), \
+ INTEL_VGA_DEVICE(0x5695, info), \
+ INTEL_VGA_DEVICE(0x56A5, info), \
+ INTEL_VGA_DEVICE(0x56A6, info), \
+ INTEL_VGA_DEVICE(0x56B0, info), \
+ INTEL_VGA_DEVICE(0x56B1, info), \
+ INTEL_VGA_DEVICE(0x56BA, info), \
+ INTEL_VGA_DEVICE(0x56BB, info), \
+ INTEL_VGA_DEVICE(0x56BC, info), \
+ INTEL_VGA_DEVICE(0x56BD, info)
+
+#define INTEL_DG2_G12_IDS(info) \
+ INTEL_VGA_DEVICE(0x5696, info), \
+ INTEL_VGA_DEVICE(0x5697, info), \
+ INTEL_VGA_DEVICE(0x56A3, info), \
+ INTEL_VGA_DEVICE(0x56A4, info), \
+ INTEL_VGA_DEVICE(0x56B2, info), \
+ INTEL_VGA_DEVICE(0x56B3, info)
+
+#define INTEL_DG2_IDS(info) \
+ INTEL_DG2_G10_IDS(info), \
+ INTEL_DG2_G11_IDS(info), \
+ INTEL_DG2_G12_IDS(info)
+
+#define INTEL_ATS_M150_IDS(info) \
+ INTEL_VGA_DEVICE(0x56C0, info), \
+ INTEL_VGA_DEVICE(0x56C2, info)
+
+#define INTEL_ATS_M75_IDS(info) \
+ INTEL_VGA_DEVICE(0x56C1, info)
+
+#define INTEL_ATS_M_IDS(info) \
+ INTEL_ATS_M150_IDS(info), \
+ INTEL_ATS_M75_IDS(info)
+
+/* MTL */
+#define INTEL_MTL_IDS(info) \
+ INTEL_VGA_DEVICE(0x7D40, info), \
+ INTEL_VGA_DEVICE(0x7D41, info), \
+ INTEL_VGA_DEVICE(0x7D45, info), \
+ INTEL_VGA_DEVICE(0x7D51, info), \
+ INTEL_VGA_DEVICE(0x7D55, info), \
+ INTEL_VGA_DEVICE(0x7D60, info), \
+ INTEL_VGA_DEVICE(0x7D67, info), \
+ INTEL_VGA_DEVICE(0x7DD1, info), \
+ INTEL_VGA_DEVICE(0x7DD5, info)
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h
new file mode 100644
index 000000000000..7d96985f2d05
--- /dev/null
+++ b/include/drm/i915_pxp_tee_interface.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I915_PXP_TEE_INTERFACE_H_
+#define _I915_PXP_TEE_INTERFACE_H_
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+struct scatterlist;
+
+/**
+ * struct i915_pxp_component_ops - ops for PXP services.
+ * @owner: Module providing the ops
+ * @send: sends data to PXP
+ * @receive: receives data from PXP
+ */
+struct i915_pxp_component_ops {
+ /**
+ * @owner: owner of the module provding the ops
+ */
+ struct module *owner;
+
+ int (*send)(struct device *dev, const void *message, size_t size,
+ unsigned long timeout_ms);
+ int (*recv)(struct device *dev, void *buffer, size_t size,
+ unsigned long timeout_ms);
+ ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id,
+ struct scatterlist *sg_in, size_t total_in_len,
+ struct scatterlist *sg_out);
+
+};
+
+/**
+ * struct i915_pxp_component - Used for communication between i915 and TEE
+ * drivers for the PXP services
+ * @tee_dev: device that provide the PXP service from TEE Bus.
+ * @pxp_ops: Ops implemented by TEE driver, used by i915 driver.
+ */
+struct i915_pxp_component {
+ struct device *tee_dev;
+ const struct i915_pxp_component_ops *ops;
+
+ /* To protect the above members. */
+ struct mutex mutex;
+};
+
+#endif /* _I915_TEE_PXP_INTERFACE_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 71d81923e6b0..cb0d5b7200c7 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -4,27 +4,30 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
-#include <linux/agp_backend.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
-void intel_gtt_get(u64 *gtt_total,
- phys_addr_t *mappable_base,
- resource_size_t *mappable_end);
+struct agp_bridge_data;
+struct pci_dev;
+struct sg_table;
+
+void intel_gmch_gtt_get(u64 *gtt_total,
+ phys_addr_t *mappable_base,
+ resource_size_t *mappable_end);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
void intel_gmch_remove(void);
-bool intel_enable_gtt(void);
+bool intel_gmch_enable_gtt(void);
-void intel_gtt_chipset_flush(void);
-void intel_gtt_insert_page(dma_addr_t addr,
- unsigned int pg,
- unsigned int flags);
-void intel_gtt_insert_sg_entries(struct sg_table *st,
- unsigned int pg_start,
- unsigned int flags);
-void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+void intel_gmch_gtt_flush(void);
+void intel_gmch_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags);
+void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
+ unsigned int pg_start,
+ unsigned int flags);
+void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
@@ -33,8 +36,4 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
/* flag for GFDT type */
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
-#ifdef CONFIG_INTEL_IOMMU
-extern int intel_iommu_gfx_mapped;
-#endif
-
#endif
diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h
index 087e3f649c52..f6e6ed529681 100644
--- a/include/drm/task_barrier.h
+++ b/include/drm/task_barrier.h
@@ -24,8 +24,8 @@
#include <linux/atomic.h>
/*
- * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
- * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ * Reusable 2 PHASE task barrier (rendez-vous point) implementation for N tasks.
+ * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/
*/
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
new file mode 100644
index 000000000000..0223a41a64b2
--- /dev/null
+++ b/include/drm/ttm/ttm_bo.h
@@ -0,0 +1,427 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_BO_API_H_
+#define _TTM_BO_API_H_
+
+#include <drm/drm_gem.h>
+
+#include <linux/kref.h>
+#include <linux/list.h>
+
+#include "ttm_device.h"
+
+/* Default number of pre-faulted pages in the TTM fault handler */
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+struct iosys_map;
+
+struct ttm_global;
+struct ttm_device;
+struct ttm_placement;
+struct ttm_place;
+struct ttm_resource;
+struct ttm_resource_manager;
+struct ttm_tt;
+
+/**
+ * enum ttm_bo_type
+ *
+ * @ttm_bo_type_device: These are 'normal' buffers that can
+ * be mmapped by user space. Each of these bos occupy a slot in the
+ * device address space, that can be used for normal vm operations.
+ *
+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
+ * but they cannot be accessed from user-space. For kernel-only use.
+ *
+ * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
+ * driver.
+ */
+enum ttm_bo_type {
+ ttm_bo_type_device,
+ ttm_bo_type_kernel,
+ ttm_bo_type_sg
+};
+
+/**
+ * struct ttm_buffer_object
+ *
+ * @base: drm_gem_object superclass data.
+ * @bdev: Pointer to the buffer object device structure.
+ * @type: The bo type.
+ * @page_alignment: Page alignment.
+ * @destroy: Destruction function. If NULL, kfree is used.
+ * @kref: Reference count of this buffer object. When this refcount reaches
+ * zero, the object is destroyed or put on the delayed delete list.
+ * @resource: structure describing current placement.
+ * @ttm: TTM structure holding system pages.
+ * @deleted: True if the object is only a zombie and already deleted.
+ *
+ * Base class for TTM buffer object, that deals with data placement and CPU
+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
+ * the driver can usually use the placement offset @offset directly as the
+ * GPU virtual address. For drivers implementing multiple
+ * GPU memory manager contexts, the driver should manage the address space
+ * in these contexts separately and use these objects to get the correct
+ * placement and caching for these GPU maps. This makes it possible to use
+ * these objects for even quite elaborate memory management schemes.
+ * The destroy member, the API visibility of this object makes it possible
+ * to derive driver specific types.
+ */
+struct ttm_buffer_object {
+ struct drm_gem_object base;
+
+ /*
+ * Members constant at init.
+ */
+ struct ttm_device *bdev;
+ enum ttm_bo_type type;
+ uint32_t page_alignment;
+ void (*destroy) (struct ttm_buffer_object *);
+
+ /*
+ * Members not needing protection.
+ */
+ struct kref kref;
+
+ /*
+ * Members protected by the bo::resv::reserved lock.
+ */
+ struct ttm_resource *resource;
+ struct ttm_tt *ttm;
+ bool deleted;
+ struct ttm_lru_bulk_move *bulk_move;
+ unsigned priority;
+ unsigned pin_count;
+
+ /**
+ * @delayed_delete: Work item used when we can't delete the BO
+ * immediately
+ */
+ struct work_struct delayed_delete;
+
+ /**
+ * Special members that are protected by the reserve lock
+ * and the bo::lock when written to. Can be read with
+ * either of these locks held.
+ */
+ struct sg_table *sg;
+};
+
+/**
+ * struct ttm_bo_kmap_obj
+ *
+ * @virtual: The current kernel virtual address.
+ * @page: The page when kmap'ing a single page.
+ * @bo_kmap_type: Type of bo_kmap.
+ *
+ * Object describing a kernel mapping. Since a TTM bo may be located
+ * in various memory types with various caching policies, the
+ * mapping can either be an ioremap, a vmap, a kmap or part of a
+ * premapped region.
+ */
+#define TTM_BO_MAP_IOMEM_MASK 0x80
+struct ttm_bo_kmap_obj {
+ void *virtual;
+ struct page *page;
+ enum {
+ ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
+ ttm_bo_map_vmap = 2,
+ ttm_bo_map_kmap = 3,
+ ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
+ } bo_kmap_type;
+ struct ttm_buffer_object *bo;
+};
+
+/**
+ * struct ttm_operation_ctx
+ *
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
+ * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
+ * BOs share the same reservation object.
+ * @force_alloc: Don't check the memory account during suspend or CPU page
+ * faults. Should only be used by TTM internally.
+ * @resv: Reservation object to allow reserved evictions with.
+ *
+ * Context for TTM operations like changing buffer placement or general memory
+ * allocation.
+ */
+struct ttm_operation_ctx {
+ bool interruptible;
+ bool no_wait_gpu;
+ bool gfp_retry_mayfail;
+ bool allow_res_evict;
+ bool force_alloc;
+ struct dma_resv *resv;
+ uint64_t bytes_moved;
+};
+
+/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
+ * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
+ * its refcount has already reached zero.
+ * @bo: The buffer object.
+ *
+ * Used to reference a TTM buffer object in lookups where the object is removed
+ * from the lookup structure during the destructor and for RCU lookups.
+ *
+ * Returns: @bo if the referencing was successful, NULL otherwise.
+ */
+static inline __must_check struct ttm_buffer_object *
+ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
+{
+ if (!kref_get_unless_zero(&bo->kref))
+ return NULL;
+ return bo;
+}
+
+/**
+ * ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @ticket: ticket used to acquire the ww_mutex.
+ *
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation), while taking a number of measures to prevent
+ * deadlocks.
+ *
+ * Returns:
+ * -EDEADLK: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again.
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EALREADY: Bo already reserved using @ticket. This error code will only
+ * be returned if @use_ticket is set to true.
+ */
+static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible, bool no_wait,
+ struct ww_acquire_ctx *ticket)
+{
+ int ret = 0;
+
+ if (no_wait) {
+ bool success;
+
+ if (WARN_ON(ticket))
+ return -EBUSY;
+
+ success = dma_resv_trylock(bo->base.resv);
+ return success ? 0 : -EBUSY;
+ }
+
+ if (interruptible)
+ ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
+ else
+ ret = dma_resv_lock(bo->base.resv, ticket);
+ if (ret == -EINTR)
+ return -ERESTARTSYS;
+ return ret;
+}
+
+/**
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ */
+static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible,
+ struct ww_acquire_ctx *ticket)
+{
+ if (interruptible) {
+ int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
+ ticket);
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
+ dma_resv_lock_slow(bo->base.resv, ticket);
+ return 0;
+}
+
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
+
+static inline void
+ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
+{
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_bo_move_to_lru_tail(bo);
+ spin_unlock(&bo->bdev->lru_lock);
+}
+
+static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ WARN_ON(bo->resource);
+ bo->resource = new_mem;
+}
+
+/**
+ * ttm_bo_move_null = assign memory for a buffer object.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
+ *
+ * Assign the memory from new_mem to the memory of the buffer object bo.
+ */
+static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, new_mem);
+}
+
+/**
+ * ttm_bo_unreserve
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ */
+static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ dma_resv_unlock(bo->base.resv);
+}
+
+/**
+ * ttm_kmap_obj_virtual
+ *
+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
+ * virtual map is io memory, 0 if normal memory.
+ *
+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
+ * that should strictly be accessed by the iowriteXX() and similar functions.
+ */
+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
+ bool *is_iomem)
+{
+ *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
+ return map->virtual;
+}
+
+int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx);
+void ttm_bo_put(struct ttm_buffer_object *bo);
+void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk);
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy)(struct ttm_buffer_object *));
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, bool interruptible,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy)(struct ttm_buffer_object *));
+int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
+void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
+int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
+ gfp_t gfp_flags);
+void ttm_bo_pin(struct ttm_buffer_object *bo);
+void ttm_bo_unpin(struct ttm_buffer_object *bo);
+int ttm_mem_evict_first(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket);
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf);
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+ pgprot_t prot,
+ pgoff_t num_prefault);
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
+void ttm_bo_vm_open(struct vm_area_struct *vma);
+void ttm_bo_vm_close(struct vm_area_struct *vma);
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
+
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_resource **mem,
+ struct ttm_operation_ctx *ctx);
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+/*
+ * ttm_bo_util.c
+ */
+int ttm_mem_io_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+void ttm_mem_io_free(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+void ttm_move_memcpy(bool clear, u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter);
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem);
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ struct dma_fence *fence, bool evict,
+ bool pipeline,
+ struct ttm_resource *new_mem);
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem);
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp);
+void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
deleted file mode 100644
index a9e13b252820..000000000000
--- a/include/drm/ttm/ttm_bo_api.h
+++ /dev/null
@@ -1,736 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _TTM_BO_API_H_
-#define _TTM_BO_API_H_
-
-#include <drm/drm_gem.h>
-#include <drm/drm_hashtab.h>
-#include <drm/drm_vma_manager.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/dma-resv.h>
-
-struct ttm_bo_global;
-
-struct ttm_bo_device;
-
-struct drm_mm_node;
-
-struct ttm_placement;
-
-struct ttm_place;
-
-struct ttm_lru_bulk_move;
-
-/**
- * struct ttm_bus_placement
- *
- * @addr: mapped virtual address
- * @base: bus base address
- * @is_iomem: is this io memory ?
- * @size: size in byte
- * @offset: offset from the base address
- * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
- * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
- *
- * Structure indicating the bus placement of an object.
- */
-struct ttm_bus_placement {
- void *addr;
- phys_addr_t base;
- unsigned long size;
- unsigned long offset;
- bool is_iomem;
- bool io_reserved_vm;
- uint64_t io_reserved_count;
-};
-
-
-/**
- * struct ttm_mem_reg
- *
- * @mm_node: Memory manager node.
- * @size: Requested size of memory region.
- * @num_pages: Actual size of memory region in pages.
- * @page_alignment: Page alignment.
- * @placement: Placement flags.
- * @bus: Placement on io bus accessible to the CPU
- *
- * Structure indicating the placement and space resources used by a
- * buffer object.
- */
-
-struct ttm_mem_reg {
- void *mm_node;
- unsigned long start;
- unsigned long size;
- unsigned long num_pages;
- uint32_t page_alignment;
- uint32_t mem_type;
- uint32_t placement;
- struct ttm_bus_placement bus;
-};
-
-/**
- * enum ttm_bo_type
- *
- * @ttm_bo_type_device: These are 'normal' buffers that can
- * be mmapped by user space. Each of these bos occupy a slot in the
- * device address space, that can be used for normal vm operations.
- *
- * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
- * but they cannot be accessed from user-space. For kernel-only use.
- *
- * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
- * driver.
- */
-
-enum ttm_bo_type {
- ttm_bo_type_device,
- ttm_bo_type_kernel,
- ttm_bo_type_sg
-};
-
-struct ttm_tt;
-
-/**
- * struct ttm_buffer_object
- *
- * @base: drm_gem_object superclass data.
- * @bdev: Pointer to the buffer object device structure.
- * @type: The bo type.
- * @destroy: Destruction function. If NULL, kfree is used.
- * @num_pages: Actual number of pages.
- * @acc_size: Accounted size for this object.
- * @kref: Reference count of this buffer object. When this refcount reaches
- * zero, the object is destroyed or put on the delayed delete list.
- * @mem: structure describing current placement.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object.
- * @ttm: TTM structure holding system pages.
- * @evicted: Whether the object was evicted without user-space knowing.
- * @deleted: True if the object is only a zombie and already deleted.
- * @lru: List head for the lru list.
- * @ddestroy: List head for the delayed destroy list.
- * @swap: List head for swap LRU list.
- * @moving: Fence set when BO is moving
- * @offset: The current GPU offset, which can have different meanings
- * depending on the memory type. For SYSTEM type memory, it should be 0.
- * @cur_placement: Hint of current placement.
- *
- * Base class for TTM buffer object, that deals with data placement and CPU
- * mappings. GPU mappings are really up to the driver, but for simpler GPUs
- * the driver can usually use the placement offset @offset directly as the
- * GPU virtual address. For drivers implementing multiple
- * GPU memory manager contexts, the driver should manage the address space
- * in these contexts separately and use these objects to get the correct
- * placement and caching for these GPU maps. This makes it possible to use
- * these objects for even quite elaborate memory management schemes.
- * The destroy member, the API visibility of this object makes it possible
- * to derive driver specific types.
- */
-
-struct ttm_buffer_object {
- struct drm_gem_object base;
-
- /**
- * Members constant at init.
- */
-
- struct ttm_bo_device *bdev;
- enum ttm_bo_type type;
- void (*destroy) (struct ttm_buffer_object *);
- unsigned long num_pages;
- size_t acc_size;
-
- /**
- * Members not needing protection.
- */
- struct kref kref;
-
- /**
- * Members protected by the bo::resv::reserved lock.
- */
-
- struct ttm_mem_reg mem;
- struct file *persistent_swap_storage;
- struct ttm_tt *ttm;
- bool evicted;
- bool deleted;
-
- /**
- * Members protected by the bdev::lru_lock.
- */
-
- struct list_head lru;
- struct list_head ddestroy;
- struct list_head swap;
- struct list_head io_reserve_lru;
-
- /**
- * Members protected by a bo reservation.
- */
-
- struct dma_fence *moving;
- unsigned priority;
-
- /**
- * Special members that are protected by the reserve lock
- * and the bo::lock when written to. Can be read with
- * either of these locks held.
- */
-
- struct sg_table *sg;
-};
-
-/**
- * struct ttm_bo_kmap_obj
- *
- * @virtual: The current kernel virtual address.
- * @page: The page when kmap'ing a single page.
- * @bo_kmap_type: Type of bo_kmap.
- *
- * Object describing a kernel mapping. Since a TTM bo may be located
- * in various memory types with various caching policies, the
- * mapping can either be an ioremap, a vmap, a kmap or part of a
- * premapped region.
- */
-
-#define TTM_BO_MAP_IOMEM_MASK 0x80
-struct ttm_bo_kmap_obj {
- void *virtual;
- struct page *page;
- enum {
- ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
- ttm_bo_map_vmap = 2,
- ttm_bo_map_kmap = 3,
- ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
- } bo_kmap_type;
- struct ttm_buffer_object *bo;
-};
-
-/**
- * struct ttm_operation_ctx
- *
- * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @resv: Reservation object to allow reserved evictions with.
- * @flags: Including the following flags
- *
- * Context for TTM operations like changing buffer placement or general memory
- * allocation.
- */
-struct ttm_operation_ctx {
- bool interruptible;
- bool no_wait_gpu;
- struct dma_resv *resv;
- uint64_t bytes_moved;
- uint32_t flags;
-};
-
-/* Allow eviction of reserved BOs */
-#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1
-/* when serving page fault or suspend, allow alloc anyway */
-#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
-
-/**
- * ttm_bo_get - reference a struct ttm_buffer_object
- *
- * @bo: The buffer object.
- */
-static inline void ttm_bo_get(struct ttm_buffer_object *bo)
-{
- kref_get(&bo->kref);
-}
-
-/**
- * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
- * its refcount has already reached zero.
- * @bo: The buffer object.
- *
- * Used to reference a TTM buffer object in lookups where the object is removed
- * from the lookup structure during the destructor and for RCU lookups.
- *
- * Returns: @bo if the referencing was successful, NULL otherwise.
- */
-static inline __must_check struct ttm_buffer_object *
-ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
-{
- if (!kref_get_unless_zero(&bo->kref))
- return NULL;
- return bo;
-}
-
-/**
- * ttm_bo_wait - wait for buffer idle.
- *
- * @bo: The buffer object.
- * @interruptible: Use interruptible wait.
- * @no_wait: Return immediately if buffer is busy.
- *
- * This function must be called with the bo::mutex held, and makes
- * sure any previous rendering to the buffer is completed.
- * Note: It might be necessary to block validations before the
- * wait by reserving the buffer.
- * Returns -EBUSY if no_wait is true and the buffer is busy.
- * Returns -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
-
-/**
- * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
- *
- * @placement: Return immediately if buffer is busy.
- * @mem: The struct ttm_mem_reg indicating the region where the bo resides
- * @new_flags: Describes compatible placement found
- *
- * Returns true if the placement is compatible
- */
-bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
- uint32_t *new_flags);
-
-/**
- * ttm_bo_validate
- *
- * @bo: The buffer object.
- * @placement: Proposed placement for the buffer object.
- * @ctx: validation parameters.
- *
- * Changes placement and caching policy of the buffer object
- * according proposed placement.
- * Returns
- * -EINVAL on invalid proposed placement.
- * -ENOMEM on out-of-memory condition.
- * -EBUSY if no_wait is true and buffer busy.
- * -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_validate(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_bo_put
- *
- * @bo: The buffer object.
- *
- * Unreference a buffer object.
- */
-void ttm_bo_put(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_move_to_lru_tail
- *
- * @bo: The buffer object.
- * @bulk: optional bulk move structure to remember BO positions
- *
- * Move this BO to the tail of all lru lists used to lookup and reserve an
- * object. This function must be called with struct ttm_bo_global::lru_lock
- * held, and is used to make a BO less likely to be considered for eviction.
- */
-void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
- struct ttm_lru_bulk_move *bulk);
-
-/**
- * ttm_bo_bulk_move_lru_tail
- *
- * @bulk: bulk move structure
- *
- * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
- * BO order never changes. Should be called with ttm_bo_global::lru_lock held.
- */
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
-
-/**
- * ttm_bo_lock_delayed_workqueue
- *
- * Prevent the delayed workqueue from running.
- * Returns
- * True if the workqueue was queued at the time
- */
-int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
-
-/**
- * ttm_bo_unlock_delayed_workqueue
- *
- * Allows the delayed workqueue to run.
- */
-void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
-
-/**
- * ttm_bo_eviction_valuable
- *
- * @bo: The buffer object to evict
- * @place: the placement we need to make room for
- *
- * Check if it is valuable to evict the BO to make room for the given placement.
- */
-bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
- const struct ttm_place *place);
-
-/**
- * ttm_bo_acc_size
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @bo_size: size of the buffer object in byte.
- * @struct_size: size of the structure holding buffer object datas
- *
- * Returns size to account for a buffer object
- */
-size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size);
-size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size);
-
-/**
- * ttm_bo_init_reserved
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @flags: Initial placement flags.
- * @page_alignment: Data alignment in pages.
- * @ctx: TTM operation context for memory allocation.
- * @acc_size: Accounted size for this object.
- * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
- * @destroy: Destroy function. Use NULL for kfree().
- *
- * This function initializes a pre-allocated struct ttm_buffer_object.
- * As this object may be part of a larger structure, this function,
- * together with the @destroy function,
- * enables driver-specific objects derived from a ttm_buffer_object.
- *
- * On successful return, the caller owns an object kref to @bo. The kref and
- * list_kref are usually set to 1, but note that in some situations, other
- * tasks may already be holding references to @bo as well.
- * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
- * and it is the caller's responsibility to call ttm_bo_unreserve.
- *
- * If a failure occurs, the function will call the @destroy function, or
- * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
- * illegal and will likely cause memory corruption.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
- */
-
-int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
- struct ttm_buffer_object *bo,
- unsigned long size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- struct ttm_operation_ctx *ctx,
- size_t acc_size,
- struct sg_table *sg,
- struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
-
-/**
- * ttm_bo_init
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @flags: Initial placement flags.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep to wait for GPU resources,
- * sleep interruptible.
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object. Typically, this would
- * point to the shmem object backing a GEM object if TTM is used to back a
- * GEM user interface.
- * @acc_size: Accounted size for this object.
- * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
- * @destroy: Destroy function. Use NULL for kfree().
- *
- * This function initializes a pre-allocated struct ttm_buffer_object.
- * As this object may be part of a larger structure, this function,
- * together with the @destroy function,
- * enables driver-specific objects derived from a ttm_buffer_object.
- *
- * On successful return, the caller owns an object kref to @bo. The kref and
- * list_kref are usually set to 1, but note that in some situations, other
- * tasks may already be holding references to @bo as well.
- *
- * If a failure occurs, the function will call the @destroy function, or
- * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
- * illegal and will likely cause memory corruption.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
- */
-int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
- unsigned long size, enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment, bool interrubtible, size_t acc_size,
- struct sg_table *sg, struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
-
-/**
- * ttm_bo_create
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @placement: Initial placement.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep while waiting for GPU resources,
- * sleep interruptible.
- * @p_bo: On successful completion *p_bo points to the created object.
- *
- * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
- * on that object. The destroy function is set to kfree().
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while waiting for resources.
- */
-int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
- enum ttm_bo_type type, struct ttm_placement *placement,
- uint32_t page_alignment, bool interruptible,
- struct ttm_buffer_object **p_bo);
-
-/**
- * ttm_bo_init_mm
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @mem_type: The memory type.
- * @p_size: size managed area in pages.
- *
- * Initialize a manager for a given memory type.
- * Note: if part of driver firstopen, it must be protected from a
- * potentially racing lastclose.
- * Returns:
- * -EINVAL: invalid size or memory type.
- * -ENOMEM: Not enough memory.
- * May also return driver-specified errors.
- */
-int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_size);
-
-/**
- * ttm_bo_clean_mm
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @mem_type: The memory type.
- *
- * Take down a manager for a given memory type after first walking
- * the LRU list to evict any buffers left alive.
- *
- * Normally, this function is part of lastclose() or unload(), and at that
- * point there shouldn't be any buffers left created by user-space, since
- * there should've been removed by the file descriptor release() method.
- * However, before this function is run, make sure to signal all sync objects,
- * and verify that the delayed delete queue is empty. The driver must also
- * make sure that there are no NO_EVICT buffers present in this memory type
- * when the call is made.
- *
- * If this function is part of a VT switch, the caller must make sure that
- * there are no appications currently validating buffers before this
- * function is called. The caller can do that by first taking the
- * struct ttm_bo_device::ttm_lock in write mode.
- *
- * Returns:
- * -EINVAL: invalid or uninitialized memory type.
- * -EBUSY: There are still buffers left in this memory type.
- */
-int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
-
-/**
- * ttm_bo_evict_mm
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @mem_type: The memory type.
- *
- * Evicts all buffers on the lru list of the memory type.
- * This is normally part of a VT switch or an
- * out-of-memory-space-due-to-fragmentation handler.
- * The caller must make sure that there are no other processes
- * currently validating buffers, and can do that by taking the
- * struct ttm_bo_device::ttm_lock in write mode.
- *
- * Returns:
- * -EINVAL: Invalid or uninitialized memory type.
- * -ERESTARTSYS: The call was interrupted by a signal while waiting to
- * evict a buffer.
- */
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
-
-/**
- * ttm_kmap_obj_virtual
- *
- * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
- * @is_iomem: Pointer to an integer that on return indicates 1 if the
- * virtual map is io memory, 0 if normal memory.
- *
- * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
- * If *is_iomem is 1 on return, the virtual address points to an io memory area,
- * that should strictly be accessed by the iowriteXX() and similar functions.
- */
-static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
- bool *is_iomem)
-{
- *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
- return map->virtual;
-}
-
-/**
- * ttm_bo_kmap
- *
- * @bo: The buffer object.
- * @start_page: The first page to map.
- * @num_pages: Number of pages to map.
- * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
- *
- * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
- * data in the buffer object. The ttm_kmap_obj_virtual function can then be
- * used to obtain a virtual address to the data.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid range.
- */
-int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
- unsigned long num_pages, struct ttm_bo_kmap_obj *map);
-
-/**
- * ttm_bo_kunmap
- *
- * @map: Object describing the map to unmap.
- *
- * Unmaps a kernel map set up by ttm_bo_kmap.
- */
-void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
-
-/**
- * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
- *
- * @vma: vma as input from the fbdev mmap method.
- * @bo: The bo backing the address space.
- *
- * Maps a buffer object.
- */
-int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_mmap - mmap out of the ttm device address space.
- *
- * @filp: filp as input from the mmap method.
- * @vma: vma as input from the mmap method.
- * @bdev: Pointer to the ttm_bo_device with the address space manager.
- *
- * This function is intended to be called by the device mmap method.
- * if the device address space is to be backed by the bo manager.
- */
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_bo_device *bdev);
-
-/**
- * ttm_bo_io
- *
- * @bdev: Pointer to the struct ttm_bo_device.
- * @filp: Pointer to the struct file attempting to read / write.
- * @wbuf: User-space pointer to address of buffer to write. NULL on read.
- * @rbuf: User-space pointer to address of buffer to read into.
- * Null on write.
- * @count: Number of bytes to read / write.
- * @f_pos: Pointer to current file position.
- * @write: 1 for read, 0 for write.
- *
- * This function implements read / write into ttm buffer objects, and is
- * intended to
- * be called from the fops::read and fops::write method.
- * Returns:
- * See man (2) write, man(2) read. In particular,
- * the function may return -ERESTARTSYS if
- * interrupted by a signal.
- */
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
- const char __user *wbuf, char __user *rbuf,
- size_t count, loff_t *f_pos, bool write);
-
-int ttm_bo_swapout(struct ttm_bo_global *glob,
- struct ttm_operation_ctx *ctx);
-void ttm_bo_swapout_all(void);
-
-/**
- * ttm_bo_uses_embedded_gem_object - check if the given bo uses the
- * embedded drm_gem_object.
- *
- * Most ttm drivers are using gem too, so the embedded
- * ttm_buffer_object.base will be initialized by the driver (before
- * calling ttm_bo_init). It is also possible to use ttm without gem
- * though (vmwgfx does that).
- *
- * This helper will figure whenever a given ttm bo is a gem object too
- * or not.
- *
- * @bo: The bo to check.
- */
-static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
-{
- return bo->base.dev != NULL;
-}
-
-/* Default number of pre-faulted pages in the TTM fault handler */
-#define TTM_BO_VM_NUM_PREFAULT 16
-
-vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
- struct vm_fault *vmf);
-
-vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
- pgprot_t prot,
- pgoff_t num_prefault,
- pgoff_t fault_page_size);
-
-vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
-
-void ttm_bo_vm_open(struct vm_area_struct *vma);
-
-void ttm_bo_vm_close(struct vm_area_struct *vma);
-
-int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write);
-
-#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
deleted file mode 100644
index 5a37f1cc057e..000000000000
--- a/include/drm/ttm/ttm_bo_driver.h
+++ /dev/null
@@ -1,859 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-#ifndef _TTM_BO_DRIVER_H_
-#define _TTM_BO_DRIVER_H_
-
-#include <drm/drm_mm.h>
-#include <drm/drm_vma_manager.h>
-#include <linux/workqueue.h>
-#include <linux/fs.h>
-#include <linux/spinlock.h>
-#include <linux/dma-resv.h>
-
-#include "ttm_bo_api.h"
-#include "ttm_memory.h"
-#include "ttm_module.h"
-#include "ttm_placement.h"
-#include "ttm_tt.h"
-
-#define TTM_MAX_BO_PRIORITY 4U
-
-#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
-#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
-
-struct ttm_mem_type_manager;
-
-struct ttm_mem_type_manager_func {
- /**
- * struct ttm_mem_type_manager member init
- *
- * @man: Pointer to a memory type manager.
- * @p_size: Implementation dependent, but typically the size of the
- * range to be managed in pages.
- *
- * Called to initialize a private range manager. The function is
- * expected to initialize the man::priv member.
- * Returns 0 on success, negative error code on failure.
- */
- int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
-
- /**
- * struct ttm_mem_type_manager member takedown
- *
- * @man: Pointer to a memory type manager.
- *
- * Called to undo the setup done in init. All allocated resources
- * should be freed.
- */
- int (*takedown)(struct ttm_mem_type_manager *man);
-
- /**
- * struct ttm_mem_type_manager member get_node
- *
- * @man: Pointer to a memory type manager.
- * @bo: Pointer to the buffer object we're allocating space for.
- * @placement: Placement details.
- * @flags: Additional placement flags.
- * @mem: Pointer to a struct ttm_mem_reg to be filled in.
- *
- * This function should allocate space in the memory type managed
- * by @man. Placement details if
- * applicable are given by @placement. If successful,
- * @mem::mm_node should be set to a non-null value, and
- * @mem::start should be set to a value identifying the beginning
- * of the range allocated, and the function should return zero.
- * If the memory region accommodate the buffer object, @mem::mm_node
- * should be set to NULL, and the function should return 0.
- * If a system error occurred, preventing the request to be fulfilled,
- * the function should return a negative error code.
- *
- * Note that @mem::mm_node will only be dereferenced by
- * struct ttm_mem_type_manager functions and optionally by the driver,
- * which has knowledge of the underlying type.
- *
- * This function may not be called from within atomic context, so
- * an implementation can and must use either a mutex or a spinlock to
- * protect any data structures managing the space.
- */
- int (*get_node)(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem);
-
- /**
- * struct ttm_mem_type_manager member put_node
- *
- * @man: Pointer to a memory type manager.
- * @mem: Pointer to a struct ttm_mem_reg to be filled in.
- *
- * This function frees memory type resources previously allocated
- * and that are identified by @mem::mm_node and @mem::start. May not
- * be called from within atomic context.
- */
- void (*put_node)(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem);
-
- /**
- * struct ttm_mem_type_manager member debug
- *
- * @man: Pointer to a memory type manager.
- * @printer: Prefix to be used in printout to identify the caller.
- *
- * This function is called to print out the state of the memory
- * type manager to aid debugging of out-of-memory conditions.
- * It may not be called from within atomic context.
- */
- void (*debug)(struct ttm_mem_type_manager *man,
- struct drm_printer *printer);
-};
-
-/**
- * struct ttm_mem_type_manager
- *
- * @has_type: The memory type has been initialized.
- * @use_type: The memory type is enabled.
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
- * managed by this memory type.
- * @gpu_offset: If used, the GPU offset of the first managed page of
- * fixed memory or the first managed location in an aperture.
- * @size: Size of the managed region.
- * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
- * as defined in ttm_placement_common.h
- * @default_caching: The default caching policy used for a buffer object
- * placed in this memory type if the user doesn't provide one.
- * @func: structure pointer implementing the range manager. See above
- * @priv: Driver private closure for @func.
- * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
- * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
- * reserved by the TTM vm system.
- * @io_reserve_lru: Optional lru list for unreserving io mem regions.
- * @move_lock: lock for move fence
- * static information. bdev::driver::io_mem_free is never used.
- * @lru: The lru list for this memory type.
- * @move: The fence of the last pipelined move operation.
- *
- * This structure is used to identify and manage memory types for a device.
- * It's set up by the ttm_bo_driver::init_mem_type method.
- */
-
-
-
-struct ttm_mem_type_manager {
- struct ttm_bo_device *bdev;
-
- /*
- * No protection. Constant from start.
- */
-
- bool has_type;
- bool use_type;
- uint32_t flags;
- uint64_t size;
- uint32_t available_caching;
- uint32_t default_caching;
- const struct ttm_mem_type_manager_func *func;
- void *priv;
- struct mutex io_reserve_mutex;
- bool use_io_reserve_lru;
- spinlock_t move_lock;
-
- /*
- * Protected by @io_reserve_mutex:
- */
-
- struct list_head io_reserve_lru;
-
- /*
- * Protected by the global->lru_lock.
- */
-
- struct list_head lru[TTM_MAX_BO_PRIORITY];
-
- /*
- * Protected by @move_lock.
- */
- struct dma_fence *move;
-};
-
-/**
- * struct ttm_bo_driver
- *
- * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
- * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
- * structure.
- * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
- * @move: Callback for a driver to hook in accelerated functions to
- * move a buffer.
- * If set to NULL, a potentially slow memcpy() move is used.
- */
-
-struct ttm_bo_driver {
- /**
- * ttm_tt_create
- *
- * @bo: The buffer object to create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- *
- * Create a struct ttm_tt to back data with system memory pages.
- * No pages are actually allocated.
- * Returns:
- * NULL: Out of memory.
- */
- struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
- uint32_t page_flags);
-
- /**
- * ttm_tt_populate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Allocate all backing pages
- * Returns:
- * -ENOMEM: Out of memory.
- */
- int (*ttm_tt_populate)(struct ttm_tt *ttm,
- struct ttm_operation_ctx *ctx);
-
- /**
- * ttm_tt_unpopulate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Free all backing page
- */
- void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
-
- int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man);
-
- /**
- * struct ttm_bo_driver member eviction_valuable
- *
- * @bo: the buffer object to be evicted
- * @place: placement we need room for
- *
- * Check with the driver if it is valuable to evict a BO to make room
- * for a certain placement.
- */
- bool (*eviction_valuable)(struct ttm_buffer_object *bo,
- const struct ttm_place *place);
- /**
- * struct ttm_bo_driver member evict_flags:
- *
- * @bo: the buffer object to be evicted
- *
- * Return the bo flags for a buffer which is not mapped to the hardware.
- * These will be placed in proposed_flags so that when the move is
- * finished, they'll end up in bo->mem.flags
- */
-
- void (*evict_flags)(struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
-
- /**
- * struct ttm_bo_driver member move:
- *
- * @bo: the buffer to move
- * @evict: whether this motion is evicting the buffer from
- * the graphics address space
- * @ctx: context for this move with parameters
- * @new_mem: the new memory region receiving the buffer
- *
- * Move a buffer between two memory regions.
- */
- int (*move)(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem);
-
- /**
- * struct ttm_bo_driver_member verify_access
- *
- * @bo: Pointer to a buffer object.
- * @filp: Pointer to a struct file trying to access the object.
- *
- * Called from the map / write / read methods to verify that the
- * caller is permitted to access the buffer object.
- * This member may be set to NULL, which will refuse this kind of
- * access for all buffer objects.
- * This function should return 0 if access is granted, -EPERM otherwise.
- */
- int (*verify_access)(struct ttm_buffer_object *bo,
- struct file *filp);
-
- /**
- * Hook to notify driver about a driver move so it
- * can do tiling things and book-keeping.
- *
- * @evict: whether this move is evicting the buffer from the graphics
- * address space
- */
- void (*move_notify)(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_mem_reg *new_mem);
- /* notify the driver we are taking a fault on this BO
- * and have reserved it */
- int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
-
- /**
- * notify the driver that we're about to swap out this bo
- */
- void (*swap_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Driver callback on when mapping io memory (for bo_move_memcpy
- * for instance). TTM will take care to call io_mem_free whenever
- * the mapping is not use anymore. io_mem_reserve & io_mem_free
- * are balanced.
- */
- int (*io_mem_reserve)(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
- void (*io_mem_free)(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-
- /**
- * Return the pfn for a given page_offset inside the BO.
- *
- * @bo: the BO to look up the pfn for
- * @page_offset: the offset to look up
- */
- unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
- unsigned long page_offset);
-
- /**
- * Read/write memory buffers for ptrace access
- *
- * @bo: the BO to access
- * @offset: the offset from the start of the BO
- * @buf: pointer to source/destination buffer
- * @len: number of bytes to copy
- * @write: whether to read (0) from or write (non-0) to BO
- *
- * If successful, this function should return the number of
- * bytes copied, -EIO otherwise. If the number of bytes
- * returned is < len, the function may be called again with
- * the remainder of the buffer to copy.
- */
- int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
- void *buf, int len, int write);
-
- /**
- * struct ttm_bo_driver member del_from_lru_notify
- *
- * @bo: the buffer object deleted from lru
- *
- * notify driver that a BO was deleted from LRU.
- */
- void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Notify the driver that we're about to release a BO
- *
- * @bo: BO that is about to be released
- *
- * Gives the driver a chance to do any cleanup, including
- * adding fences that may force a delayed delete
- */
- void (*release_notify)(struct ttm_buffer_object *bo);
-};
-
-/**
- * struct ttm_bo_global - Buffer object driver global data.
- *
- * @dummy_read_page: Pointer to a dummy page used for mapping requests
- * of unpopulated pages.
- * @shrink: A shrink callback object used for buffer object swap.
- * @device_list_mutex: Mutex protecting the device list.
- * This mutex is held while traversing the device list for pm options.
- * @lru_lock: Spinlock protecting the bo subsystem lru lists.
- * @device_list: List of buffer object devices.
- * @swap_lru: Lru list of buffer objects used for swapping.
- */
-
-extern struct ttm_bo_global {
-
- /**
- * Constant after init.
- */
-
- struct kobject kobj;
- struct page *dummy_read_page;
- spinlock_t lru_lock;
-
- /**
- * Protected by ttm_global_mutex.
- */
- struct list_head device_list;
-
- /**
- * Protected by the lru_lock.
- */
- struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
-
- /**
- * Internal protection.
- */
- atomic_t bo_count;
-} ttm_bo_glob;
-
-
-#define TTM_NUM_MEM_TYPES 8
-
-/**
- * struct ttm_bo_device - Buffer object driver device-specific data.
- *
- * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
- * @man: An array of mem_type_managers.
- * @vma_manager: Address space manager (pointer)
- * lru_lock: Spinlock that protects the buffer+device lru lists and
- * ddestroy lists.
- * @dev_mapping: A pointer to the struct address_space representing the
- * device address space.
- * @wq: Work queue structure for the delayed delete workqueue.
- * @no_retry: Don't retry allocation if it fails
- *
- */
-
-struct ttm_bo_device {
-
- /*
- * Constant after bo device init / atomic.
- */
- struct list_head device_list;
- struct ttm_bo_driver *driver;
- struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
-
- /*
- * Protected by internal locks.
- */
- struct drm_vma_offset_manager *vma_manager;
-
- /*
- * Protected by the global:lru lock.
- */
- struct list_head ddestroy;
-
- /*
- * Protected by load / firstopen / lastclose /unload sync.
- */
-
- struct address_space *dev_mapping;
-
- /*
- * Internal protection.
- */
-
- struct delayed_work wq;
-
- bool need_dma32;
-
- bool no_retry;
-};
-
-/**
- * struct ttm_lru_bulk_move_pos
- *
- * @first: first BO in the bulk move range
- * @last: last BO in the bulk move range
- *
- * Positions for a lru bulk move.
- */
-struct ttm_lru_bulk_move_pos {
- struct ttm_buffer_object *first;
- struct ttm_buffer_object *last;
-};
-
-/**
- * struct ttm_lru_bulk_move
- *
- * @tt: first/last lru entry for BOs in the TT domain
- * @vram: first/last lru entry for BOs in the VRAM domain
- * @swap: first/last lru entry for BOs on the swap list
- *
- * Helper structure for bulk moves on the LRU list.
- */
-struct ttm_lru_bulk_move {
- struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
- struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
- struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
-};
-
-/**
- * ttm_flag_masked
- *
- * @old: Pointer to the result and original value.
- * @new: New value of bits.
- * @mask: Mask of bits to change.
- *
- * Convenience function to change a number of bits identified by a mask.
- */
-
-static inline uint32_t
-ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
-{
- *old ^= (*old ^ new) & mask;
- return *old;
-}
-
-/*
- * ttm_bo.c
- */
-
-/**
- * ttm_bo_mem_space
- *
- * @bo: Pointer to a struct ttm_buffer_object. the data of which
- * we want to allocate space for.
- * @proposed_placement: Proposed new placement for the buffer object.
- * @mem: A struct ttm_mem_reg.
- * @interruptible: Sleep interruptible when sliping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- *
- * Allocate memory space for the buffer object pointed to by @bo, using
- * the placement flags in @mem, potentially evicting other idle buffer objects.
- * This function may sleep while waiting for space to become available.
- * Returns:
- * -EBUSY: No space available (only if no_wait == 1).
- * -ENOMEM: Could not allocate memory for the buffer object, either due to
- * fragmentation or concurrent allocators.
- * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
- */
-int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- struct ttm_operation_ctx *ctx);
-
-void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
-
-int ttm_bo_device_release(struct ttm_bo_device *bdev);
-
-/**
- * ttm_bo_device_init
- *
- * @bdev: A pointer to a struct ttm_bo_device to initialize.
- * @glob: A pointer to an initialized struct ttm_bo_global.
- * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
- * @mapping: The address space to use for this bo.
- * @vma_manager: A pointer to a vma manager.
- * @file_page_offset: Offset into the device address space that is available
- * for buffer data. This ensures compatibility with other users of the
- * address space.
- *
- * Initializes a struct ttm_bo_device:
- * Returns:
- * !0: Failure.
- */
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_driver *driver,
- struct address_space *mapping,
- struct drm_vma_offset_manager *vma_manager,
- bool need_dma32);
-
-/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- */
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- *
- * The caller must take ttm_mem_io_lock before calling this function.
- */
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
-
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-
-/**
- * __ttm_bo_reserve:
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @ticket: ticket used to acquire the ww_mutex.
- *
- * Will not remove reserved buffers from the lru lists.
- * Otherwise identical to ttm_bo_reserve.
- *
- * Returns:
- * -EDEADLK: The reservation may cause a deadlock.
- * Release all buffer reservations, wait for @bo to become unreserved and
- * try again. (only if use_sequence == 1).
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- * -EBUSY: The function needed to sleep, but @no_wait was true
- * -EALREADY: Bo already reserved using @ticket. This error code will only
- * be returned if @use_ticket is set to true.
- */
-static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait,
- struct ww_acquire_ctx *ticket)
-{
- int ret = 0;
-
- if (no_wait) {
- bool success;
- if (WARN_ON(ticket))
- return -EBUSY;
-
- success = dma_resv_trylock(bo->base.resv);
- return success ? 0 : -EBUSY;
- }
-
- if (interruptible)
- ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
- else
- ret = dma_resv_lock(bo->base.resv, ticket);
- if (ret == -EINTR)
- return -ERESTARTSYS;
- return ret;
-}
-
-/**
- * ttm_bo_reserve:
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @ticket: ticket used to acquire the ww_mutex.
- *
- * Locks a buffer object for validation. (Or prevents other processes from
- * locking it for validation) and removes it from lru lists, while taking
- * a number of measures to prevent deadlocks.
- *
- * Deadlocks may occur when two processes try to reserve multiple buffers in
- * different order, either by will or as a result of a buffer being evicted
- * to make room for a buffer already reserved. (Buffers are reserved before
- * they are evicted). The following algorithm prevents such deadlocks from
- * occurring:
- * Processes attempting to reserve multiple buffers other than for eviction,
- * (typically execbuf), should first obtain a unique 32-bit
- * validation sequence number,
- * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
- * sequence number. If upon call of this function, the buffer object is already
- * reserved, the validation sequence is checked against the validation
- * sequence of the process currently reserving the buffer,
- * and if the current validation sequence is greater than that of the process
- * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
- * waiting for the buffer to become unreserved, after which it retries
- * reserving.
- * The caller should, when receiving an -EDEADLK error
- * release all its buffer reservations, wait for @bo to become unreserved, and
- * then rerun the validation with the same validation sequence. This procedure
- * will always guarantee that the process with the lowest validation sequence
- * will eventually succeed, preventing both deadlocks and starvation.
- *
- * Returns:
- * -EDEADLK: The reservation may cause a deadlock.
- * Release all buffer reservations, wait for @bo to become unreserved and
- * try again. (only if use_sequence == 1).
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- * -EBUSY: The function needed to sleep, but @no_wait was true
- * -EALREADY: Bo already reserved using @ticket. This error code will only
- * be returned if @use_ticket is set to true.
- */
-static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait,
- struct ww_acquire_ctx *ticket)
-{
- WARN_ON(!kref_read(&bo->kref));
-
- return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
-}
-
-/**
- * ttm_bo_reserve_slowpath:
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @sequence: Set (@bo)->sequence to this value after lock
- *
- * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
- * from all our other reservations. Because there are no other reservations
- * held by us, this function cannot deadlock any more.
- */
-static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
- bool interruptible,
- struct ww_acquire_ctx *ticket)
-{
- int ret = 0;
-
- WARN_ON(!kref_read(&bo->kref));
-
- if (interruptible)
- ret = dma_resv_lock_slow_interruptible(bo->base.resv,
- ticket);
- else
- dma_resv_lock_slow(bo->base.resv, ticket);
-
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
-
- return ret;
-}
-
-/**
- * ttm_bo_unreserve
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Unreserve a previous reservation of @bo.
- */
-static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
-{
- spin_lock(&ttm_bo_glob.lru_lock);
- ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&ttm_bo_glob.lru_lock);
- dma_resv_unlock(bo->base.resv);
-}
-
-/*
- * ttm_bo_util.c
- */
-
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-/**
- * ttm_bo_move_ttm
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_mem_reg indicating where to move.
- *
- * Optimized move function for a buffer object with both old and
- * new placement backed by a TTM. The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem);
-
-/**
- * ttm_bo_move_memcpy
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_mem_reg indicating where to move.
- *
- * Fallback move function for a mappable buffer object in mappable memory.
- * The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem);
-
-/**
- * ttm_bo_free_old_node
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Utility function to free an old placement after a successful move.
- */
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_move_accel_cleanup.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @fence: A fence object that signals when moving is complete.
- * @evict: This is an evict move. Don't return until the buffer is idle.
- * @new_mem: struct ttm_mem_reg indicating where to move.
- *
- * Accelerated move function to be called when an accelerated move
- * has been scheduled. The function will create a new temporary buffer object
- * representing the old placement, and put the sync object on both buffer
- * objects. After that the newly created buffer object is unref'd to be
- * destroyed when the move is complete. This will help pipeline
- * buffer moves.
- */
-int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct dma_fence *fence, bool evict,
- struct ttm_mem_reg *new_mem);
-
-/**
- * ttm_bo_pipeline_move.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @fence: A fence object that signals when moving is complete.
- * @evict: This is an evict move. Don't return until the buffer is idle.
- * @new_mem: struct ttm_mem_reg indicating where to move.
- *
- * Function for pipelining accelerated moves. Either free the memory
- * immediately or hang it on a temporary buffer object.
- */
-int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct dma_fence *fence, bool evict,
- struct ttm_mem_reg *new_mem);
-
-/**
- * ttm_bo_pipeline_gutting.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Pipelined gutting a BO of its backing store.
- */
-int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
-
-/**
- * ttm_io_prot
- *
- * @c_state: Caching state.
- * @tmp: Page protection flag for a normal, cached mapping.
- *
- * Utility function that returns the pgprot_t that should be used for
- * setting up a PTE with the caching model indicated by @c_state.
- */
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
-
-extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
-
-#endif
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
new file mode 100644
index 000000000000..235a743d90e1
--- /dev/null
+++ b/include/drm/ttm/ttm_caching.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_CACHING_H_
+#define _TTM_CACHING_H_
+
+#define TTM_NUM_CACHING_TYPES 3
+
+/**
+ * enum ttm_caching - CPU caching and BUS snooping behavior.
+ */
+enum ttm_caching {
+ /**
+ * @ttm_uncached: Most defensive option for device mappings,
+ * don't even allow write combining.
+ */
+ ttm_uncached,
+
+ /**
+ * @ttm_write_combined: Don't cache read accesses, but allow at least
+ * writes to be combined.
+ */
+ ttm_write_combined,
+
+ /**
+ * @ttm_cached: Fully cached like normal system memory, requires that
+ * devices snoop the CPU cache on accesses.
+ */
+ ttm_cached
+};
+
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp);
+
+#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
new file mode 100644
index 000000000000..c22f30535c84
--- /dev/null
+++ b/include/drm/ttm/ttm_device.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_DEVICE_H_
+#define _TTM_DEVICE_H_
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_pool.h>
+
+struct ttm_device;
+struct ttm_placement;
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+
+/**
+ * struct ttm_global - Buffer object driver global data.
+ */
+extern struct ttm_global {
+
+ /**
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages. Constant after init.
+ */
+ struct page *dummy_read_page;
+
+ /**
+ * @device_list: List of buffer object devices. Protected by
+ * ttm_global_mutex.
+ */
+ struct list_head device_list;
+
+ /**
+ * @bo_count: Number of buffer objects allocated by devices.
+ */
+ atomic_t bo_count;
+} ttm_glob;
+
+struct ttm_device_funcs {
+ /**
+ * ttm_tt_create
+ *
+ * @bo: The buffer object to create the ttm for.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_destroy
+ *
+ * @bdev: Pointer to a ttm device
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_bo_driver member eviction_valuable
+ *
+ * @bo: the buffer object to be evicted
+ * @place: placement we need room for
+ *
+ * Check with the driver if it is valuable to evict a BO to make room
+ * for a certain placement.
+ */
+ bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+ /**
+ * struct ttm_bo_driver member evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ * This should not cause multihop evictions, and the core will warn
+ * if one is proposed.
+ */
+
+ void (*evict_flags)(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
+ /**
+ * struct ttm_bo_driver member move:
+ *
+ * @bo: the buffer to move
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ * @ctx: context for this move with parameters
+ * @new_mem: the new memory region receiving the buffer
+ * @hop: placement for driver directed intermediate hop
+ *
+ * Move a buffer between two memory regions.
+ * Returns errno -EMULTIHOP if driver requests a hop
+ */
+ int (*move)(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop);
+
+ /**
+ * Hook to notify driver about a resource delete.
+ */
+ void (*delete_mem_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * notify the driver that we're about to swap out this bo
+ */
+ void (*swap_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy
+ * for instance). TTM will take care to call io_mem_free whenever
+ * the mapping is not use anymore. io_mem_reserve & io_mem_free
+ * are balanced.
+ */
+ int (*io_mem_reserve)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+ void (*io_mem_free)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+ /**
+ * Return the pfn for a given page_offset inside the BO.
+ *
+ * @bo: the BO to look up the pfn for
+ * @page_offset: the offset to look up
+ */
+ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
+
+ /**
+ * Read/write memory buffers for ptrace access
+ *
+ * @bo: the BO to access
+ * @offset: the offset from the start of the BO
+ * @buf: pointer to source/destination buffer
+ * @len: number of bytes to copy
+ * @write: whether to read (0) from or write (non-0) to BO
+ *
+ * If successful, this function should return the number of
+ * bytes copied, -EIO otherwise. If the number of bytes
+ * returned is < len, the function may be called again with
+ * the remainder of the buffer to copy.
+ */
+ int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write);
+
+ /**
+ * Notify the driver that we're about to release a BO
+ *
+ * @bo: BO that is about to be released
+ *
+ * Gives the driver a chance to do any cleanup, including
+ * adding fences that may force a delayed delete
+ */
+ void (*release_notify)(struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_device - Buffer object driver device-specific data.
+ */
+struct ttm_device {
+ /**
+ * @device_list: Our entry in the global device list.
+ * Constant after bo device init
+ */
+ struct list_head device_list;
+
+ /**
+ * @funcs: Function table for the device.
+ * Constant after bo device init
+ */
+ const struct ttm_device_funcs *funcs;
+
+ /**
+ * @sysman: Resource manager for the system domain.
+ * Access via ttm_manager_type.
+ */
+ struct ttm_resource_manager sysman;
+
+ /**
+ * @man_drv: An array of resource_managers, one per resource type.
+ */
+ struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
+
+ /**
+ * @vma_manager: Address space manager for finding BOs to mmap.
+ */
+ struct drm_vma_offset_manager *vma_manager;
+
+ /**
+ * @pool: page pool for the device.
+ */
+ struct ttm_pool pool;
+
+ /**
+ * @lru_lock: Protection for the per manager LRU and ddestroy lists.
+ */
+ spinlock_t lru_lock;
+
+ /**
+ * @pinned: Buffer objects which are pinned and so not on any LRU list.
+ */
+ struct list_head pinned;
+
+ /**
+ * @dev_mapping: A pointer to the struct address_space for invalidating
+ * CPU mappings on buffer move. Protected by load/unload sync.
+ */
+ struct address_space *dev_mapping;
+
+ /**
+ * @wq: Work queue structure for the delayed delete workqueue.
+ */
+ struct workqueue_struct *wq;
+};
+
+int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
+int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ gfp_t gfp_flags);
+
+static inline struct ttm_resource_manager *
+ttm_manager_type(struct ttm_device *bdev, int mem_type)
+{
+ BUILD_BUG_ON(__builtin_constant_p(mem_type)
+ && mem_type >= TTM_NUM_MEM_TYPES);
+ return bdev->man_drv[mem_type];
+}
+
+static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
+ struct ttm_resource_manager *manager)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ bdev->man_drv[type] = manager;
+}
+
+int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
+ struct device *dev, struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
+ bool use_dma_alloc, bool use_dma32);
+void ttm_device_fini(struct ttm_device *bdev);
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
+
+#endif
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 5a19843bb80d..03aca29d3ce4 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -33,7 +33,9 @@
#include <linux/list.h>
-#include "ttm_bo_api.h"
+struct ww_acquire_ctx;
+struct dma_fence;
+struct ttm_buffer_object;
/**
* struct ttm_validate_buffer
@@ -58,9 +60,8 @@ struct ttm_validate_buffer {
* Undoes all buffer validation reservations for bos pointed to by
* the list entries.
*/
-
-extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
- struct list_head *list);
+void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
+ struct list_head *list);
/**
* function ttm_eu_reserve_buffers
@@ -96,10 +97,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* ttm_eu_fence_buffer_objects() when command submission is complete or
* has failed.
*/
-
-extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr,
- struct list_head *dups);
+int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
+ struct list_head *list, bool intr,
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.
@@ -113,9 +113,8 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
* It also unreserves all buffers, putting them on lru lists.
*
*/
-
-extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list,
- struct dma_fence *fence);
+void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
+ struct list_head *list,
+ struct dma_fence *fence);
#endif
diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h
new file mode 100644
index 000000000000..cc5c09a211b4
--- /dev/null
+++ b/include/drm/ttm/ttm_kmap_iter.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __TTM_KMAP_ITER_H__
+#define __TTM_KMAP_ITER_H__
+
+#include <linux/types.h>
+
+struct ttm_kmap_iter;
+struct iosys_map;
+
+/**
+ * struct ttm_kmap_iter_ops - Ops structure for a struct
+ * ttm_kmap_iter.
+ * @maps_tt: Whether the iterator maps TT memory directly, as opposed
+ * mapping a TT through an aperture. Both these modes have
+ * struct ttm_resource_manager::use_tt set, but the latter typically
+ * returns is_iomem == true from ttm_mem_io_reserve.
+ */
+struct ttm_kmap_iter_ops {
+ /**
+ * kmap_local() - Map a PAGE_SIZE part of the resource using
+ * kmap_local semantics.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct iosys_map holding the virtual address after
+ * the operation.
+ * @i: The location within the resource to map. PAGE_SIZE granularity.
+ */
+ void (*map_local)(struct ttm_kmap_iter *res_iter,
+ struct iosys_map *dmap, pgoff_t i);
+ /**
+ * unmap_local() - Unmap a PAGE_SIZE part of the resource previously
+ * mapped using kmap_local.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct iosys_map holding the virtual address after
+ * the operation.
+ */
+ void (*unmap_local)(struct ttm_kmap_iter *res_iter,
+ struct iosys_map *dmap);
+ bool maps_tt;
+};
+
+/**
+ * struct ttm_kmap_iter - Iterator for kmap_local type operations on a
+ * resource.
+ * @ops: Pointer to the operations struct.
+ *
+ * This struct is intended to be embedded in a resource-specific specialization
+ * implementing operations for the resource.
+ *
+ * Nothing stops us from extending the operations to vmap, vmap_pfn etc,
+ * replacing some or parts of the ttm_bo_util. cpu-map functionality.
+ */
+struct ttm_kmap_iter {
+ const struct ttm_kmap_iter_ops *ops;
+};
+
+#endif /* __TTM_KMAP_ITER_H__ */
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
deleted file mode 100644
index c78ea99c42cf..000000000000
--- a/include/drm/ttm/ttm_memory.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TTM_MEMORY_H
-#define TTM_MEMORY_H
-
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-#include <linux/bug.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kobject.h>
-#include <linux/mm.h>
-#include "ttm_bo_api.h"
-
-/**
- * struct ttm_mem_global - Global memory accounting structure.
- *
- * @shrink: A single callback to shrink TTM memory usage. Extend this
- * to a linked list to be able to handle multiple callbacks when needed.
- * @swap_queue: A workqueue to handle shrinking in low memory situations. We
- * need a separate workqueue since it will spend a lot of time waiting
- * for the GPU, and this will otherwise block other workqueue tasks(?)
- * At this point we use only a single-threaded workqueue.
- * @work: The workqueue callback for the shrink queue.
- * @lock: Lock to protect the @shrink - and the memory accounting members,
- * that is, essentially the whole structure with some exceptions.
- * @lower_mem_limit: include lower limit of swap space and lower limit of
- * system memory.
- * @zones: Array of pointers to accounting zones.
- * @num_zones: Number of populated entries in the @zones array.
- * @zone_kernel: Pointer to the kernel zone.
- * @zone_highmem: Pointer to the highmem zone if there is one.
- * @zone_dma32: Pointer to the dma32 zone if there is one.
- *
- * Note that this structure is not per device. It should be global for all
- * graphics devices.
- */
-
-#define TTM_MEM_MAX_ZONES 2
-struct ttm_mem_zone;
-extern struct ttm_mem_global {
- struct kobject kobj;
- struct workqueue_struct *swap_queue;
- struct work_struct work;
- spinlock_t lock;
- uint64_t lower_mem_limit;
- struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
- unsigned int num_zones;
- struct ttm_mem_zone *zone_kernel;
-#ifdef CONFIG_HIGHMEM
- struct ttm_mem_zone *zone_highmem;
-#else
- struct ttm_mem_zone *zone_dma32;
-#endif
-} ttm_mem_glob;
-
-extern int ttm_mem_global_init(struct ttm_mem_global *glob);
-extern void ttm_mem_global_release(struct ttm_mem_global *glob);
-extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx);
-extern void ttm_mem_global_free(struct ttm_mem_global *glob,
- uint64_t amount);
-extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx);
-extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size);
-extern size_t ttm_round_pot(size_t size);
-extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
-extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
- uint64_t num_pages, struct ttm_operation_ctx *ctx);
-#endif
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
deleted file mode 100644
index 45fa318c1585..000000000000
--- a/include/drm/ttm/ttm_module.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _TTM_MODULE_H_
-#define _TTM_MODULE_H_
-
-#include <linux/kernel.h>
-struct kobject;
-
-#define TTM_PFX "[TTM] "
-extern struct kobject *ttm_get_kobj(void);
-
-#endif /* _TTM_MODULE_H_ */
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
deleted file mode 100644
index a6b6ef5f9bf4..000000000000
--- a/include/drm/ttm/ttm_page_alloc.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) Red Hat Inc.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie <airlied@redhat.com>
- * Jerome Glisse <jglisse@redhat.com>
- */
-#ifndef TTM_PAGE_ALLOC
-#define TTM_PAGE_ALLOC
-
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_memory.h>
-
-struct device;
-
-/**
- * Initialize pool allocator.
- */
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-/**
- * Free pool allocator.
- */
-void ttm_page_alloc_fini(void);
-
-/**
- * ttm_pool_populate:
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Add backing pages to all of @ttm
- */
-int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_pool_unpopulate:
- *
- * @ttm: The struct ttm_tt which to free backing pages.
- *
- * Free all pages of @ttm
- */
-void ttm_pool_unpopulate(struct ttm_tt *ttm);
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
- struct ttm_operation_ctx *ctx);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-
-#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL)
-/**
- * Initialize pool allocator.
- */
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-
-/**
- * Free pool allocator.
- */
-void ttm_dma_page_alloc_fini(void);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
- struct ttm_operation_ctx *ctx);
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
-
-#else
-static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
- unsigned max_pages)
-{
- return -ENODEV;
-}
-
-static inline void ttm_dma_page_alloc_fini(void) { return; }
-
-static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- return 0;
-}
-static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
- struct device *dev,
- struct ttm_operation_ctx *ctx)
-{
- return -ENOMEM;
-}
-static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
- struct device *dev)
-{
-}
-#endif
-
-#endif
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index e88a8e39767b..b510a4812609 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -35,6 +35,17 @@
/*
* Memory regions for data placement.
+ *
+ * Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can
+ * be swapped out whenever TTMs thinks it is a good idea.
+ * In cases where drivers would like to use TTM_PL_SYSTEM as a valid
+ * placement they need to be able to handle the issues that arise due to the
+ * above manually.
+ *
+ * For BO's which reside in system memory but for which the accelerator
+ * requires direct access (i.e. their usage needs to be synchronized
+ * between the CPU and accelerator via fences) a new, driver private
+ * placement that can handle such scenarios is a good idea.
*/
#define TTM_PL_SYSTEM 0
@@ -42,42 +53,29 @@
#define TTM_PL_VRAM 2
#define TTM_PL_PRIV 3
-#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
-#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
-#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
-#define TTM_PL_FLAG_PRIV (1 << TTM_PL_PRIV)
-#define TTM_PL_MASK_MEM 0x0000FFFF
-
/*
- * Other flags that affects data placement.
- * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
- * if available.
- * TTM_PL_FLAG_SHARED means that another application may
- * reference the buffer.
- * TTM_PL_FLAG_NO_EVICT means that the buffer may never
- * be evicted to make room for other buffers.
* TTM_PL_FLAG_TOPDOWN requests to be placed from the
* top of the memory area, instead of the bottom.
*/
-#define TTM_PL_FLAG_CACHED (1 << 16)
-#define TTM_PL_FLAG_UNCACHED (1 << 17)
-#define TTM_PL_FLAG_WC (1 << 18)
-#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
-#define TTM_PL_FLAG_NO_EVICT (1 << 21)
-#define TTM_PL_FLAG_TOPDOWN (1 << 22)
+#define TTM_PL_FLAG_CONTIGUOUS (1 << 0)
+#define TTM_PL_FLAG_TOPDOWN (1 << 1)
+
+/* For multihop handling */
+#define TTM_PL_FLAG_TEMPORARY (1 << 2)
-#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
- TTM_PL_FLAG_UNCACHED | \
- TTM_PL_FLAG_WC)
+/* Placement is never used during eviction */
+#define TTM_PL_FLAG_DESIRED (1 << 3)
-#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
+/* Placement is only used during eviction */
+#define TTM_PL_FLAG_FALLBACK (1 << 4)
/**
* struct ttm_place
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
+ * @mem_type: One of TTM_PL_* where the resource should be allocated from.
* @flags: memory domain and caching flags for the object
*
* Structure indicating a possible place to put an object.
@@ -85,6 +83,7 @@
struct ttm_place {
unsigned fpfn;
unsigned lpfn;
+ uint32_t mem_type;
uint32_t flags;
};
@@ -93,16 +92,12 @@ struct ttm_place {
*
* @num_placement: number of preferred placements
* @placement: preferred placements
- * @num_busy_placement: number of preferred placements when need to evict buffer
- * @busy_placement: preferred placements when need to evict buffer
*
* Structure indicating the placement you request for an object.
*/
struct ttm_placement {
unsigned num_placement;
const struct ttm_place *placement;
- unsigned num_busy_placement;
- const struct ttm_place *busy_placement;
};
#endif
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
new file mode 100644
index 000000000000..4490d43c63e3
--- /dev/null
+++ b/include/drm/ttm/ttm_pool.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_PAGE_POOL_H_
+#define _TTM_PAGE_POOL_H_
+
+#include <linux/mmzone.h>
+#include <linux/llist.h>
+#include <linux/spinlock.h>
+#include <drm/ttm/ttm_caching.h>
+
+struct device;
+struct ttm_tt;
+struct ttm_pool;
+struct ttm_operation_ctx;
+
+/**
+ * struct ttm_pool_type - Pool for a certain memory type
+ *
+ * @pool: the pool we belong to, might be NULL for the global ones
+ * @order: the allocation order our pages have
+ * @caching: the caching type our pages have
+ * @shrinker_list: our place on the global shrinker list
+ * @lock: protection of the page list
+ * @pages: the list of pages in the pool
+ */
+struct ttm_pool_type {
+ struct ttm_pool *pool;
+ unsigned int order;
+ enum ttm_caching caching;
+
+ struct list_head shrinker_list;
+
+ spinlock_t lock;
+ struct list_head pages;
+};
+
+/**
+ * struct ttm_pool - Pool for all caching and orders
+ *
+ * @dev: the device we allocate pages for
+ * @nid: which numa node to use
+ * @use_dma_alloc: if coherent DMA allocations should be used
+ * @use_dma32: if GFP_DMA32 should be used
+ * @caching: pools for each caching/order
+ */
+struct ttm_pool {
+ struct device *dev;
+ int nid;
+
+ bool use_dma_alloc;
+ bool use_dma32;
+
+ struct {
+ struct ttm_pool_type orders[NR_PAGE_ORDERS];
+ } caching[TTM_NUM_CACHING_TYPES];
+};
+
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx);
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
+
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+ int nid, bool use_dma_alloc, bool use_dma32);
+void ttm_pool_fini(struct ttm_pool *pool);
+
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
+
+int ttm_pool_mgr_init(unsigned long num_pages);
+void ttm_pool_mgr_fini(void);
+
+#endif
diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h
new file mode 100644
index 000000000000..7963b957e9ef
--- /dev/null
+++ b/include/drm/ttm/ttm_range_manager.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef _TTM_RANGE_MANAGER_H_
+#define _TTM_RANGE_MANAGER_H_
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/drm_mm.h>
+
+/**
+ * struct ttm_range_mgr_node
+ *
+ * @base: base clase we extend
+ * @mm_nodes: MM nodes, usually 1
+ *
+ * Extending the ttm_resource object to manage an address space allocation with
+ * one or more drm_mm_nodes.
+ */
+struct ttm_range_mgr_node {
+ struct ttm_resource base;
+ struct drm_mm_node mm_nodes[];
+};
+
+/**
+ * to_ttm_range_mgr_node
+ *
+ * @res: the resource to upcast
+ *
+ * Upcast the ttm_resource object into a ttm_range_mgr_node object.
+ */
+static inline struct ttm_range_mgr_node *
+to_ttm_range_mgr_node(struct ttm_resource *res)
+{
+ return container_of(res, struct ttm_range_mgr_node, base);
+}
+
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
+ unsigned type, bool use_tt,
+ unsigned long p_size);
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
+ unsigned type);
+static __always_inline int ttm_range_man_init(struct ttm_device *bdev,
+ unsigned int type, bool use_tt,
+ unsigned long p_size)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size);
+}
+
+static __always_inline int ttm_range_man_fini(struct ttm_device *bdev,
+ unsigned int type)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_fini_nocheck(bdev, type);
+}
+#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
new file mode 100644
index 000000000000..1afa13f0c22b
--- /dev/null
+++ b/include/drm/ttm/ttm_resource.h
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_RESOURCE_H_
+#define _TTM_RESOURCE_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/iosys-map.h>
+#include <linux/dma-fence.h>
+
+#include <drm/drm_print.h>
+#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
+
+#define TTM_MAX_BO_PRIORITY 4U
+#define TTM_NUM_MEM_TYPES 8
+
+struct ttm_device;
+struct ttm_resource_manager;
+struct ttm_resource;
+struct ttm_place;
+struct ttm_buffer_object;
+struct ttm_placement;
+struct iosys_map;
+struct io_mapping;
+struct sg_table;
+struct scatterlist;
+
+struct ttm_resource_manager_func {
+ /**
+ * struct ttm_resource_manager_func member alloc
+ *
+ * @man: Pointer to a memory type manager.
+ * @bo: Pointer to the buffer object we're allocating space for.
+ * @place: Placement details.
+ * @res: Resulting pointer to the ttm_resource.
+ *
+ * This function should allocate space in the memory type managed
+ * by @man. Placement details if applicable are given by @place. If
+ * successful, a filled in ttm_resource object should be returned in
+ * @res. @res::start should be set to a value identifying the beginning
+ * of the range allocated, and the function should return zero.
+ * If the manager can't fulfill the request -ENOSPC should be returned.
+ * If a system error occurred, preventing the request to be fulfilled,
+ * the function should return a negative error code.
+ *
+ * This function may not be called from within atomic context and needs
+ * to take care of its own locking to protect any data structures
+ * managing the space.
+ */
+ int (*alloc)(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res);
+
+ /**
+ * struct ttm_resource_manager_func member free
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be freed.
+ *
+ * This function frees memory type resources previously allocated.
+ * May not be called from within atomic context.
+ */
+ void (*free)(struct ttm_resource_manager *man,
+ struct ttm_resource *res);
+
+ /**
+ * struct ttm_resource_manager_func member intersects
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be checked.
+ * @place: Placement to check against.
+ * @size: Size of the check.
+ *
+ * Test if @res intersects with @place + @size. Used to judge if
+ * evictions are valueable or not.
+ */
+ bool (*intersects)(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+
+ /**
+ * struct ttm_resource_manager_func member compatible
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be checked.
+ * @place: Placement to check against.
+ * @size: Size of the check.
+ *
+ * Test if @res compatible with @place + @size. Used to check of
+ * the need to move the backing store or not.
+ */
+ bool (*compatible)(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+
+ /**
+ * struct ttm_resource_manager_func member debug
+ *
+ * @man: Pointer to a memory type manager.
+ * @printer: Prefix to be used in printout to identify the caller.
+ *
+ * This function is called to print out the state of the memory
+ * type manager to aid debugging of out-of-memory conditions.
+ * It may not be called from within atomic context.
+ */
+ void (*debug)(struct ttm_resource_manager *man,
+ struct drm_printer *printer);
+};
+
+/**
+ * struct ttm_resource_manager
+ *
+ * @use_type: The memory type is enabled.
+ * @use_tt: If a TT object should be used for the backing store.
+ * @size: Size of the managed region.
+ * @bdev: ttm device this manager belongs to
+ * @func: structure pointer implementing the range manager. See above
+ * @move_lock: lock for move fence
+ * @move: The fence of the last pipelined move operation.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ */
+struct ttm_resource_manager {
+ /*
+ * No protection. Constant from start.
+ */
+ bool use_type;
+ bool use_tt;
+ struct ttm_device *bdev;
+ uint64_t size;
+ const struct ttm_resource_manager_func *func;
+ spinlock_t move_lock;
+
+ /*
+ * Protected by @move_lock.
+ */
+ struct dma_fence *move;
+
+ /*
+ * Protected by the bdev->lru_lock.
+ */
+ struct list_head lru[TTM_MAX_BO_PRIORITY];
+
+ /**
+ * @usage: How much of the resources are used, protected by the
+ * bdev->lru_lock.
+ */
+ uint64_t usage;
+};
+
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr: mapped virtual address
+ * @offset: physical addr
+ * @is_iomem: is this io memory ?
+ * @caching: See enum ttm_caching
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+ void *addr;
+ phys_addr_t offset;
+ bool is_iomem;
+ enum ttm_caching caching;
+};
+
+/**
+ * struct ttm_resource
+ *
+ * @start: Start of the allocation.
+ * @size: Actual size of resource in bytes.
+ * @mem_type: Resource type of the allocation.
+ * @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
+ * @bo: weak reference to the BO, protected by ttm_device::lru_lock
+ *
+ * Structure indicating the placement and space resources used by a
+ * buffer object.
+ */
+struct ttm_resource {
+ unsigned long start;
+ size_t size;
+ uint32_t mem_type;
+ uint32_t placement;
+ struct ttm_bus_placement bus;
+ struct ttm_buffer_object *bo;
+
+ /**
+ * @lru: Least recently used list, see &ttm_resource_manager.lru
+ */
+ struct list_head lru;
+};
+
+/**
+ * struct ttm_resource_cursor
+ *
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+ unsigned int priority;
+};
+
+/**
+ * struct ttm_lru_bulk_move_pos
+ *
+ * @first: first res in the bulk move range
+ * @last: last res in the bulk move range
+ *
+ * Range of resources for a lru bulk move.
+ */
+struct ttm_lru_bulk_move_pos {
+ struct ttm_resource *first;
+ struct ttm_resource *last;
+};
+
+/**
+ * struct ttm_lru_bulk_move
+ *
+ * @pos: first/last lru entry for resources in the each domain/priority
+ *
+ * Container for the current bulk move state. Should be used with
+ * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
+ */
+struct ttm_lru_bulk_move {
+ struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+};
+
+/**
+ * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
+ * struct sg_table backed struct ttm_resource.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface.
+ * @iomap: struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ * @cache: Scatterlist traversal cache for fast lookups.
+ * @cache.sg: Pointer to the currently cached scatterlist segment.
+ * @cache.i: First index of @sg. PAGE_SIZE granularity.
+ * @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity.
+ * @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity.
+ */
+struct ttm_kmap_iter_iomap {
+ struct ttm_kmap_iter base;
+ struct io_mapping *iomap;
+ struct sg_table *st;
+ resource_size_t start;
+ struct {
+ struct scatterlist *sg;
+ pgoff_t i;
+ pgoff_t end;
+ pgoff_t offs;
+ } cache;
+};
+
+/**
+ * struct ttm_kmap_iter_linear_io - Iterator specialization for linear io
+ * @base: The base iterator
+ * @dmap: Points to the starting address of the region
+ * @needs_unmap: Whether we need to unmap on fini
+ */
+struct ttm_kmap_iter_linear_io {
+ struct ttm_kmap_iter base;
+ struct iosys_map dmap;
+ bool needs_unmap;
+};
+
+/**
+ * ttm_resource_manager_set_used
+ *
+ * @man: A memory manager object.
+ * @used: usage state to set.
+ *
+ * Set the manager in use flag. If disabled the manager is no longer
+ * used for object placement.
+ */
+static inline void
+ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used)
+{
+ int i;
+
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; i++)
+ WARN_ON(!list_empty(&man->lru[i]));
+ man->use_type = used;
+}
+
+/**
+ * ttm_resource_manager_used
+ *
+ * @man: Manager to get used state for
+ *
+ * Get the in use flag for a manager.
+ * Returns:
+ * true is used, false if not.
+ */
+static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man)
+{
+ return man->use_type;
+}
+
+/**
+ * ttm_resource_manager_cleanup
+ *
+ * @man: A memory manager object.
+ *
+ * Cleanup the move fences from the memory manager object.
+ */
+static inline void
+ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
+{
+ dma_fence_put(man->move);
+ man->move = NULL;
+}
+
+void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+
+void ttm_resource_add_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
+void ttm_resource_del_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res);
+
+void ttm_resource_init(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *res);
+void ttm_resource_fini(struct ttm_resource_manager *man,
+ struct ttm_resource *res);
+
+int ttm_resource_alloc(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res);
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
+bool ttm_resource_intersects(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+bool ttm_resource_compatible(struct ttm_resource *res,
+ struct ttm_placement *placement);
+void ttm_resource_set_bo(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
+
+void ttm_resource_manager_init(struct ttm_resource_manager *man,
+ struct ttm_device *bdev,
+ uint64_t size);
+
+int ttm_resource_manager_evict_all(struct ttm_device *bdev,
+ struct ttm_resource_manager *man);
+
+uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man);
+void ttm_resource_manager_debug(struct ttm_resource_manager *man,
+ struct drm_printer *p);
+
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor);
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor,
+ struct ttm_resource *res);
+
+/**
+ * ttm_resource_manager_for_each_res - iterate over all resources
+ * @man: the resource manager
+ * @cursor: struct ttm_resource_cursor for the current position
+ * @res: the current resource
+ *
+ * Iterate over all the evictable resources in a resource manager.
+ */
+#define ttm_resource_manager_for_each_res(man, cursor, res) \
+ for (res = ttm_resource_manager_first(man, cursor); res; \
+ res = ttm_resource_manager_next(man, cursor, res))
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start);
+
+struct ttm_kmap_iter_linear_io;
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
+ struct dentry * parent,
+ const char *name);
+#endif
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
deleted file mode 100644
index 7c492b49e38c..000000000000
--- a/include/drm/ttm/ttm_set_memory.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2018 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Huang Rui <ray.huang@amd.com>
- */
-
-#ifndef TTM_SET_MEMORY
-#define TTM_SET_MEMORY
-
-#include <linux/mm.h>
-
-#ifdef CONFIG_X86
-
-#include <asm/set_memory.h>
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- return set_pages_array_wb(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- return set_pages_array_wc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- return set_pages_array_uc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- return set_pages_wb(page, numpages);
-}
-
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
- unsigned long addr = (unsigned long)page_address(page);
-
- return set_memory_wc(addr, numpages);
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
- return set_pages_uc(page, numpages);
-}
-
-#else /* for CONFIG_X86 */
-
-#if IS_ENABLED(CONFIG_AGP)
-
-#include <asm/agp.h>
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
- return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
- return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
- return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- int i;
-
- for (i = 0; i < numpages; i++)
- unmap_page_from_agp(page++);
- return 0;
-}
-
-#else /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- return 0;
-}
-
-#endif /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
- return 0;
-}
-
-#endif /* for CONFIG_X86 */
-
-#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 5e2393fe42c6..2b9d856ff388 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -27,114 +27,108 @@
#ifndef _TTM_TT_H_
#define _TTM_TT_H_
+#include <linux/pagemap.h>
#include <linux/types.h>
+#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
+struct ttm_device;
struct ttm_tt;
-struct ttm_mem_reg;
+struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
-#define TTM_PAGE_FLAG_WRITE (1 << 3)
-#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
-#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_DMA32 (1 << 7)
-#define TTM_PAGE_FLAG_SG (1 << 8)
-#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
-
-enum ttm_caching_state {
- tt_uncached,
- tt_wc,
- tt_cached
-};
-
-struct ttm_backend_func {
+/**
+ * struct ttm_tt - This is a structure holding the pages, caching- and aperture
+ * binding status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_tt {
+ /** @pages: Array of pages backing the data. */
+ struct page **pages;
/**
- * struct ttm_backend_func member bind
+ * @page_flags: The page flags.
*
- * @ttm: Pointer to a struct ttm_tt.
- * @bo_mem: Pointer to a struct ttm_mem_reg describing the
- * memory type and location for binding.
+ * Supported values:
*
- * Bind the backend pages into the aperture in the location
- * indicated by @bo_mem. This function should be able to handle
- * differences between aperture and system page sizes.
- */
- int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
-
- /**
- * struct ttm_backend_func member unbind
+ * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated
+ * and swapped out by TTM. Calling ttm_tt_populate() will then swap the
+ * pages back in, and unset the flag. Drivers should in general never
+ * need to touch this.
*
- * @ttm: Pointer to a struct ttm_tt.
+ * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on
+ * allocation.
*
- * Unbind previously bound backend pages. This function should be
- * able to handle differences between aperture and system page sizes.
- */
- void (*unbind) (struct ttm_tt *ttm);
-
- /**
- * struct ttm_backend_func member destroy
+ * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated
+ * externally, like with dma-buf or userptr. This effectively disables
+ * TTM swapping out such pages. Also important is to prevent TTM from
+ * ever directly mapping these pages.
*
- * @ttm: Pointer to a struct ttm_tt.
+ * Note that enum ttm_bo_type.ttm_bo_type_sg objects will always enable
+ * this flag.
*
- * Destroy the backend. This will be call back from ttm_tt_destroy so
- * don't call ttm_tt_destroy from the callback or infinite loop.
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE: Same behaviour as
+ * TTM_TT_FLAG_EXTERNAL, but with the reduced restriction that it is
+ * still valid to use TTM to map the pages directly. This is useful when
+ * implementing a ttm_tt backend which still allocates driver owned
+ * pages underneath(say with shmem).
+ *
+ * Note that since this also implies TTM_TT_FLAG_EXTERNAL, the usage
+ * here should always be:
+ *
+ * page_flags = TTM_TT_FLAG_EXTERNAL |
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+ *
+ * TTM_TT_FLAG_DECRYPTED: The mapped ttm pages should be marked as
+ * not encrypted. The framework will try to match what the dma layer
+ * is doing, but note that it is a little fragile because ttm page
+ * fault handling abuses the DMA api a bit and dma_map_attrs can't be
+ * used to assure pgprot always matches.
+ *
+ * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
+ * set by TTM after ttm_tt_populate() has successfully returned, and is
+ * then unset when TTM calls ttm_tt_unpopulate().
*/
- void (*destroy) (struct ttm_tt *ttm);
-};
+#define TTM_TT_FLAG_SWAPPED BIT(0)
+#define TTM_TT_FLAG_ZERO_ALLOC BIT(1)
+#define TTM_TT_FLAG_EXTERNAL BIT(2)
+#define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3)
+#define TTM_TT_FLAG_DECRYPTED BIT(4)
-/**
- * struct ttm_tt
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @func: Pointer to a struct ttm_backend_func that describes
- * the backend methods.
- * pointer.
- * @pages: Array of pages backing the data.
- * @num_pages: Number of pages in the page array.
- * @bdev: Pointer to the current struct ttm_bo_device.
- * @be: Pointer to the ttm backend.
- * @swap_storage: Pointer to shmem struct file for swap storage.
- * @caching_state: The current caching state of the pages.
- * @state: The current binding state of the pages.
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-struct ttm_tt {
- struct ttm_bo_device *bdev;
- struct ttm_backend_func *func;
- struct page **pages;
+#define TTM_TT_FLAG_PRIV_POPULATED BIT(5)
uint32_t page_flags;
- unsigned long num_pages;
- struct sg_table *sg; /* for SG objects via dma-buf */
+ /** @num_pages: Number of pages in the page array. */
+ uint32_t num_pages;
+ /** @sg: for SG objects via dma-buf. */
+ struct sg_table *sg;
+ /** @dma_address: The DMA (bus) addresses of the pages. */
+ dma_addr_t *dma_address;
+ /** @swap_storage: Pointer to shmem struct file for swap storage. */
struct file *swap_storage;
- enum ttm_caching_state caching_state;
- enum {
- tt_bound,
- tt_unbound,
- tt_unpopulated,
- } state;
+ /**
+ * @caching: The current caching state of the pages, see enum
+ * ttm_caching.
+ */
+ enum ttm_caching caching;
};
/**
- * struct ttm_dma_tt
- *
- * @ttm: Base ttm_tt struct.
- * @dma_address: The DMA (bus) addresses of the pages
- * @pages_list: used by some page allocation backend
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
+ * struct ttm_kmap_iter_tt - Specialization of a mappig iterator for a tt.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface
+ * @tt: Cached struct ttm_tt.
+ * @prot: Cached page protection for mapping.
*/
-struct ttm_dma_tt {
- struct ttm_tt ttm;
- dma_addr_t *dma_address;
- struct list_head pages_list;
+struct ttm_kmap_iter_tt {
+ struct ttm_kmap_iter base;
+ struct ttm_tt *tt;
+ pgprot_t prot;
};
+static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
+{
+ return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
+}
+
/**
* ttm_tt_create
*
@@ -151,7 +145,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
*
* @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
+ * @caching: the desired caching state of the pages
+ * @extra_pages: Extra pages needed for the driver.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
@@ -159,11 +155,10 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* NULL: Out of memory.
*/
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags);
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags);
+ uint32_t page_flags, enum ttm_caching caching,
+ unsigned long extra_pages);
+int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching);
/**
* ttm_tt_fini
@@ -173,36 +168,16 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
* Free memory of ttm_tt structure
*/
void ttm_tt_fini(struct ttm_tt *ttm);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
-
-/**
- * ttm_ttm_bind:
- *
- * @ttm: The struct ttm_tt containing backing pages.
- * @bo_mem: The struct ttm_mem_reg identifying the binding location.
- *
- * Bind the pages of @ttm to an aperture location identified by @bo_mem
- */
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
- struct ttm_operation_ctx *ctx);
/**
- * ttm_ttm_destroy:
+ * ttm_tt_destroy:
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: The struct ttm_tt.
*
* Unbind, unpopulate and destroy common struct ttm_tt.
*/
-void ttm_tt_destroy(struct ttm_tt *ttm);
-
-/**
- * ttm_ttm_unbind:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind a struct ttm_tt.
- */
-void ttm_tt_unbind(struct ttm_tt *ttm);
+void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_swapin:
@@ -212,41 +187,49 @@ void ttm_tt_unbind(struct ttm_tt *ttm);
* Swap in a previously swap out ttm_tt.
*/
int ttm_tt_swapin(struct ttm_tt *ttm);
+int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
+ gfp_t gfp_flags);
/**
- * ttm_tt_set_placement_caching:
+ * ttm_tt_populate - allocate pages for a ttm
*
- * @ttm A struct ttm_tt the backing pages of which will change caching policy.
- * @placement: Flag indicating the desired caching policy.
+ * @bdev: the ttm_device this object belongs to
+ * @ttm: Pointer to the ttm_tt structure
+ * @ctx: operation context for populating the tt object.
*
- * This function will change caching policy of any default kernel mappings of
- * the pages backing @ttm. If changing from cached to uncached or
- * write-combined,
- * all CPU caches will first be flushed to make sure the data of the pages
- * hit RAM. This function may be very costly as it involves global TLB
- * and cache flushes and potential page splitting / combining.
+ * Calls the driver method to allocate pages for a ttm
*/
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
-int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
+int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
/**
- * ttm_tt_populate - allocate pages for a ttm
+ * ttm_tt_unpopulate - free pages from a ttm
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: Pointer to the ttm_tt structure
*
- * Calls the driver method to allocate pages for a ttm
+ * Calls the driver method to free all pages from a ttm
*/
-int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
- * ttm_tt_unpopulate - free pages from a ttm
+ * ttm_tt_mark_for_clear - Mark pages for clearing on populate.
*
* @ttm: Pointer to the ttm_tt structure
*
- * Calls the driver method to free all pages from a ttm
+ * Marks pages for clearing so that the next time the page vector is
+ * populated, the pages will be cleared.
*/
-void ttm_tt_unpopulate(struct ttm_tt *ttm);
+static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
+{
+ ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
+}
+
+void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
+struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+ struct ttm_tt *tt);
+unsigned long ttm_tt_pages_limit(void);
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
@@ -255,7 +238,7 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm);
*
* @bo: Buffer object we allocate the ttm for.
* @bridge: The agp bridge this device is sitting on.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
@@ -265,8 +248,10 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm);
struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
struct agp_bridge_data *bridge,
uint32_t page_flags);
-int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem);
+void ttm_agp_unbind(struct ttm_tt *ttm);
+void ttm_agp_destroy(struct ttm_tt *ttm);
+bool ttm_agp_is_bound(struct ttm_tt *ttm);
#endif
#endif
diff --git a/include/drm/xe_pciids.h b/include/drm/xe_pciids.h
new file mode 100644
index 000000000000..de1a344737bc
--- /dev/null
+++ b/include/drm/xe_pciids.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PCIIDS_H_
+#define _XE_PCIIDS_H_
+
+/*
+ * Lists below can be turned into initializers for a struct pci_device_id
+ * by defining INTEL_VGA_DEVICE:
+ *
+ * #define INTEL_VGA_DEVICE(id, info) { \
+ * 0x8086, id, \
+ * ~0, ~0, \
+ * 0x030000, 0xff0000, \
+ * (unsigned long) info }
+ *
+ * And then calling like:
+ *
+ * XE_TGL_12_GT1_IDS(INTEL_VGA_DEVICE, ## __VA_ARGS__)
+ *
+ * To turn them into something else, just provide a different macro passed as
+ * first argument.
+ */
+
+/* TGL */
+#define XE_TGL_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9A60, ## __VA_ARGS__), \
+ MACRO__(0x9A68, ## __VA_ARGS__), \
+ MACRO__(0x9A70, ## __VA_ARGS__)
+
+#define XE_TGL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9A40, ## __VA_ARGS__), \
+ MACRO__(0x9A49, ## __VA_ARGS__), \
+ MACRO__(0x9A59, ## __VA_ARGS__), \
+ MACRO__(0x9A78, ## __VA_ARGS__), \
+ MACRO__(0x9AC0, ## __VA_ARGS__), \
+ MACRO__(0x9AC9, ## __VA_ARGS__), \
+ MACRO__(0x9AD9, ## __VA_ARGS__), \
+ MACRO__(0x9AF8, ## __VA_ARGS__)
+
+#define XE_TGL_IDS(MACRO__, ...) \
+ XE_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* RKL */
+#define XE_RKL_IDS(MACRO__, ...) \
+ MACRO__(0x4C80, ## __VA_ARGS__), \
+ MACRO__(0x4C8A, ## __VA_ARGS__), \
+ MACRO__(0x4C8B, ## __VA_ARGS__), \
+ MACRO__(0x4C8C, ## __VA_ARGS__), \
+ MACRO__(0x4C90, ## __VA_ARGS__), \
+ MACRO__(0x4C9A, ## __VA_ARGS__)
+
+/* DG1 */
+#define XE_DG1_IDS(MACRO__, ...) \
+ MACRO__(0x4905, ## __VA_ARGS__), \
+ MACRO__(0x4906, ## __VA_ARGS__), \
+ MACRO__(0x4907, ## __VA_ARGS__), \
+ MACRO__(0x4908, ## __VA_ARGS__), \
+ MACRO__(0x4909, ## __VA_ARGS__)
+
+/* ADL-S */
+#define XE_ADLS_IDS(MACRO__, ...) \
+ MACRO__(0x4680, ## __VA_ARGS__), \
+ MACRO__(0x4682, ## __VA_ARGS__), \
+ MACRO__(0x4688, ## __VA_ARGS__), \
+ MACRO__(0x468A, ## __VA_ARGS__), \
+ MACRO__(0x468B, ## __VA_ARGS__), \
+ MACRO__(0x4690, ## __VA_ARGS__), \
+ MACRO__(0x4692, ## __VA_ARGS__), \
+ MACRO__(0x4693, ## __VA_ARGS__)
+
+/* ADL-P */
+#define XE_ADLP_IDS(MACRO__, ...) \
+ MACRO__(0x46A0, ## __VA_ARGS__), \
+ MACRO__(0x46A1, ## __VA_ARGS__), \
+ MACRO__(0x46A2, ## __VA_ARGS__), \
+ MACRO__(0x46A3, ## __VA_ARGS__), \
+ MACRO__(0x46A6, ## __VA_ARGS__), \
+ MACRO__(0x46A8, ## __VA_ARGS__), \
+ MACRO__(0x46AA, ## __VA_ARGS__), \
+ MACRO__(0x462A, ## __VA_ARGS__), \
+ MACRO__(0x4626, ## __VA_ARGS__), \
+ MACRO__(0x4628, ## __VA_ARGS__), \
+ MACRO__(0x46B0, ## __VA_ARGS__), \
+ MACRO__(0x46B1, ## __VA_ARGS__), \
+ MACRO__(0x46B2, ## __VA_ARGS__), \
+ MACRO__(0x46B3, ## __VA_ARGS__), \
+ MACRO__(0x46C0, ## __VA_ARGS__), \
+ MACRO__(0x46C1, ## __VA_ARGS__), \
+ MACRO__(0x46C2, ## __VA_ARGS__), \
+ MACRO__(0x46C3, ## __VA_ARGS__)
+
+/* ADL-N */
+#define XE_ADLN_IDS(MACRO__, ...) \
+ MACRO__(0x46D0, ## __VA_ARGS__), \
+ MACRO__(0x46D1, ## __VA_ARGS__), \
+ MACRO__(0x46D2, ## __VA_ARGS__)
+
+/* RPL-S */
+#define XE_RPLS_IDS(MACRO__, ...) \
+ MACRO__(0xA780, ## __VA_ARGS__), \
+ MACRO__(0xA781, ## __VA_ARGS__), \
+ MACRO__(0xA782, ## __VA_ARGS__), \
+ MACRO__(0xA783, ## __VA_ARGS__), \
+ MACRO__(0xA788, ## __VA_ARGS__), \
+ MACRO__(0xA789, ## __VA_ARGS__), \
+ MACRO__(0xA78A, ## __VA_ARGS__), \
+ MACRO__(0xA78B, ## __VA_ARGS__)
+
+/* RPL-U */
+#define XE_RPLU_IDS(MACRO__, ...) \
+ MACRO__(0xA721, ## __VA_ARGS__), \
+ MACRO__(0xA7A1, ## __VA_ARGS__), \
+ MACRO__(0xA7A9, ## __VA_ARGS__), \
+ MACRO__(0xA7AC, ## __VA_ARGS__), \
+ MACRO__(0xA7AD, ## __VA_ARGS__)
+
+/* RPL-P */
+#define XE_RPLP_IDS(MACRO__, ...) \
+ XE_RPLU_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0xA720, ## __VA_ARGS__), \
+ MACRO__(0xA7A0, ## __VA_ARGS__), \
+ MACRO__(0xA7A8, ## __VA_ARGS__), \
+ MACRO__(0xA7AA, ## __VA_ARGS__), \
+ MACRO__(0xA7AB, ## __VA_ARGS__)
+
+/* DG2 */
+#define XE_DG2_G10_IDS(MACRO__, ...) \
+ MACRO__(0x5690, ## __VA_ARGS__), \
+ MACRO__(0x5691, ## __VA_ARGS__), \
+ MACRO__(0x5692, ## __VA_ARGS__), \
+ MACRO__(0x56A0, ## __VA_ARGS__), \
+ MACRO__(0x56A1, ## __VA_ARGS__), \
+ MACRO__(0x56A2, ## __VA_ARGS__)
+
+#define XE_DG2_G11_IDS(MACRO__, ...) \
+ MACRO__(0x5693, ## __VA_ARGS__), \
+ MACRO__(0x5694, ## __VA_ARGS__), \
+ MACRO__(0x5695, ## __VA_ARGS__), \
+ MACRO__(0x56A5, ## __VA_ARGS__), \
+ MACRO__(0x56A6, ## __VA_ARGS__), \
+ MACRO__(0x56B0, ## __VA_ARGS__), \
+ MACRO__(0x56B1, ## __VA_ARGS__), \
+ MACRO__(0x56BA, ## __VA_ARGS__), \
+ MACRO__(0x56BB, ## __VA_ARGS__), \
+ MACRO__(0x56BC, ## __VA_ARGS__), \
+ MACRO__(0x56BD, ## __VA_ARGS__)
+
+#define XE_DG2_G12_IDS(MACRO__, ...) \
+ MACRO__(0x5696, ## __VA_ARGS__), \
+ MACRO__(0x5697, ## __VA_ARGS__), \
+ MACRO__(0x56A3, ## __VA_ARGS__), \
+ MACRO__(0x56A4, ## __VA_ARGS__), \
+ MACRO__(0x56B2, ## __VA_ARGS__), \
+ MACRO__(0x56B3, ## __VA_ARGS__)
+
+#define XE_DG2_IDS(MACRO__, ...) \
+ XE_DG2_G10_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_DG2_G11_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_DG2_G12_IDS(MACRO__, ## __VA_ARGS__)
+
+#define XE_ATS_M150_IDS(MACRO__, ...) \
+ MACRO__(0x56C0, ## __VA_ARGS__), \
+ MACRO__(0x56C2, ## __VA_ARGS__)
+
+#define XE_ATS_M75_IDS(MACRO__, ...) \
+ MACRO__(0x56C1, ## __VA_ARGS__)
+
+#define XE_ATS_M_IDS(MACRO__, ...) \
+ XE_ATS_M150_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
+
+/* MTL / ARL */
+#define XE_MTL_IDS(MACRO__, ...) \
+ MACRO__(0x7D40, ## __VA_ARGS__), \
+ MACRO__(0x7D45, ## __VA_ARGS__), \
+ MACRO__(0x7D55, ## __VA_ARGS__), \
+ MACRO__(0x7D60, ## __VA_ARGS__), \
+ MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0x7DD5, ## __VA_ARGS__)
+
+#define XE_LNL_IDS(MACRO__, ...) \
+ MACRO__(0x6420, ## __VA_ARGS__), \
+ MACRO__(0x64A0, ## __VA_ARGS__), \
+ MACRO__(0x64B0, ## __VA_ARGS__)
+
+#endif
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
new file mode 100644
index 000000000000..19ac7b36f608
--- /dev/null
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Linaro Ltd
+ * Author: Krzysztof Kozlowski <krzk@kernel.org> based on previous work of Kumar Gala.
+ */
+#ifndef _DT_BINDINGS_ARM_QCOM_IDS_H
+#define _DT_BINDINGS_ARM_QCOM_IDS_H
+
+/*
+ * The MSM chipset and hardware revision used by Qualcomm bootloaders, DTS for
+ * older chipsets (qcom,msm-id) and in socinfo driver:
+ */
+#define QCOM_ID_MSM8260 70
+#define QCOM_ID_MSM8660 71
+#define QCOM_ID_APQ8060 86
+#define QCOM_ID_MSM8960 87
+#define QCOM_ID_APQ8064 109
+#define QCOM_ID_MSM8930 116
+#define QCOM_ID_MSM8630 117
+#define QCOM_ID_MSM8230 118
+#define QCOM_ID_APQ8030 119
+#define QCOM_ID_MSM8627 120
+#define QCOM_ID_MSM8227 121
+#define QCOM_ID_MSM8660A 122
+#define QCOM_ID_MSM8260A 123
+#define QCOM_ID_APQ8060A 124
+#define QCOM_ID_MSM8974 126
+#define QCOM_ID_MSM8225 127
+#define QCOM_ID_MSM8625 129
+#define QCOM_ID_MPQ8064 130
+#define QCOM_ID_MSM8960AB 138
+#define QCOM_ID_APQ8060AB 139
+#define QCOM_ID_MSM8260AB 140
+#define QCOM_ID_MSM8660AB 141
+#define QCOM_ID_MSM8930AA 142
+#define QCOM_ID_MSM8630AA 143
+#define QCOM_ID_MSM8230AA 144
+#define QCOM_ID_MSM8626 145
+#define QCOM_ID_MSM8610 147
+#define QCOM_ID_APQ8064AB 153
+#define QCOM_ID_MSM8930AB 154
+#define QCOM_ID_MSM8630AB 155
+#define QCOM_ID_MSM8230AB 156
+#define QCOM_ID_APQ8030AB 157
+#define QCOM_ID_MSM8226 158
+#define QCOM_ID_MSM8526 159
+#define QCOM_ID_APQ8030AA 160
+#define QCOM_ID_MSM8110 161
+#define QCOM_ID_MSM8210 162
+#define QCOM_ID_MSM8810 163
+#define QCOM_ID_MSM8212 164
+#define QCOM_ID_MSM8612 165
+#define QCOM_ID_MSM8112 166
+#define QCOM_ID_MSM8125 167
+#define QCOM_ID_MSM8225Q 168
+#define QCOM_ID_MSM8625Q 169
+#define QCOM_ID_MSM8125Q 170
+#define QCOM_ID_APQ8064AA 172
+#define QCOM_ID_APQ8084 178
+#define QCOM_ID_MSM8130 179
+#define QCOM_ID_MSM8130AA 180
+#define QCOM_ID_MSM8130AB 181
+#define QCOM_ID_MSM8627AA 182
+#define QCOM_ID_MSM8227AA 183
+#define QCOM_ID_APQ8074 184
+#define QCOM_ID_MSM8274 185
+#define QCOM_ID_MSM8674 186
+#define QCOM_ID_MDM9635 187
+#define QCOM_ID_MSM8974PRO_AC 194
+#define QCOM_ID_MSM8126 198
+#define QCOM_ID_APQ8026 199
+#define QCOM_ID_MSM8926 200
+#define QCOM_ID_IPQ8062 201
+#define QCOM_ID_IPQ8064 202
+#define QCOM_ID_IPQ8066 203
+#define QCOM_ID_IPQ8068 204
+#define QCOM_ID_MSM8326 205
+#define QCOM_ID_MSM8916 206
+#define QCOM_ID_MSM8994 207
+#define QCOM_ID_APQ8074PRO_AA 208
+#define QCOM_ID_APQ8074PRO_AB 209
+#define QCOM_ID_APQ8074PRO_AC 210
+#define QCOM_ID_MSM8274PRO_AA 211
+#define QCOM_ID_MSM8274PRO_AB 212
+#define QCOM_ID_MSM8274PRO_AC 213
+#define QCOM_ID_MSM8674PRO_AA 214
+#define QCOM_ID_MSM8674PRO_AB 215
+#define QCOM_ID_MSM8674PRO_AC 216
+#define QCOM_ID_MSM8974PRO_AA 217
+#define QCOM_ID_MSM8974PRO_AB 218
+#define QCOM_ID_APQ8028 219
+#define QCOM_ID_MSM8128 220
+#define QCOM_ID_MSM8228 221
+#define QCOM_ID_MSM8528 222
+#define QCOM_ID_MSM8628 223
+#define QCOM_ID_MSM8928 224
+#define QCOM_ID_MSM8510 225
+#define QCOM_ID_MSM8512 226
+#define QCOM_ID_MSM8936 233
+#define QCOM_ID_MDM9640 234
+#define QCOM_ID_MSM8939 239
+#define QCOM_ID_APQ8036 240
+#define QCOM_ID_APQ8039 241
+#define QCOM_ID_MSM8236 242
+#define QCOM_ID_MSM8636 243
+#define QCOM_ID_MSM8909 245
+#define QCOM_ID_MSM8996 246
+#define QCOM_ID_APQ8016 247
+#define QCOM_ID_MSM8216 248
+#define QCOM_ID_MSM8116 249
+#define QCOM_ID_MSM8616 250
+#define QCOM_ID_MSM8992 251
+#define QCOM_ID_APQ8092 252
+#define QCOM_ID_APQ8094 253
+#define QCOM_ID_MSM8209 258
+#define QCOM_ID_MSM8208 259
+#define QCOM_ID_MDM9209 260
+#define QCOM_ID_MDM9309 261
+#define QCOM_ID_MDM9609 262
+#define QCOM_ID_MSM8239 263
+#define QCOM_ID_MSM8952 264
+#define QCOM_ID_APQ8009 265
+#define QCOM_ID_MSM8956 266
+#define QCOM_ID_MSM8929 268
+#define QCOM_ID_MSM8629 269
+#define QCOM_ID_MSM8229 270
+#define QCOM_ID_APQ8029 271
+#define QCOM_ID_APQ8056 274
+#define QCOM_ID_MSM8609 275
+#define QCOM_ID_APQ8076 277
+#define QCOM_ID_MSM8976 278
+#define QCOM_ID_MDM9650 279
+#define QCOM_ID_IPQ8065 280
+#define QCOM_ID_IPQ8069 281
+#define QCOM_ID_MDM9655 283
+#define QCOM_ID_MDM9250 284
+#define QCOM_ID_MDM9255 285
+#define QCOM_ID_MDM9350 286
+#define QCOM_ID_APQ8052 289
+#define QCOM_ID_MDM9607 290
+#define QCOM_ID_APQ8096 291
+#define QCOM_ID_MSM8998 292
+#define QCOM_ID_MSM8953 293
+#define QCOM_ID_MSM8937 294
+#define QCOM_ID_APQ8037 295
+#define QCOM_ID_MDM8207 296
+#define QCOM_ID_MDM9207 297
+#define QCOM_ID_MDM9307 298
+#define QCOM_ID_MDM9628 299
+#define QCOM_ID_MSM8909W 300
+#define QCOM_ID_APQ8009W 301
+#define QCOM_ID_MSM8996L 302
+#define QCOM_ID_MSM8917 303
+#define QCOM_ID_APQ8053 304
+#define QCOM_ID_MSM8996SG 305
+#define QCOM_ID_APQ8017 307
+#define QCOM_ID_MSM8217 308
+#define QCOM_ID_MSM8617 309
+#define QCOM_ID_MSM8996AU 310
+#define QCOM_ID_APQ8096AU 311
+#define QCOM_ID_APQ8096SG 312
+#define QCOM_ID_MSM8940 313
+#define QCOM_ID_SDX201 314
+#define QCOM_ID_SDM660 317
+#define QCOM_ID_SDM630 318
+#define QCOM_ID_APQ8098 319
+#define QCOM_ID_MSM8920 320
+#define QCOM_ID_SDM845 321
+#define QCOM_ID_MDM9206 322
+#define QCOM_ID_IPQ8074 323
+#define QCOM_ID_SDA660 324
+#define QCOM_ID_SDM658 325
+#define QCOM_ID_SDA658 326
+#define QCOM_ID_SDA630 327
+#define QCOM_ID_MSM8905 331
+#define QCOM_ID_SDX202 333
+#define QCOM_ID_SDM450 338
+#define QCOM_ID_SM8150 339
+#define QCOM_ID_SDA845 341
+#define QCOM_ID_IPQ8072 342
+#define QCOM_ID_IPQ8076 343
+#define QCOM_ID_IPQ8078 344
+#define QCOM_ID_SDM636 345
+#define QCOM_ID_SDA636 346
+#define QCOM_ID_SDM632 349
+#define QCOM_ID_SDA632 350
+#define QCOM_ID_SDA450 351
+#define QCOM_ID_SDM439 353
+#define QCOM_ID_SDM429 354
+#define QCOM_ID_SM8250 356
+#define QCOM_ID_SA8155 362
+#define QCOM_ID_SDA439 363
+#define QCOM_ID_SDA429 364
+#define QCOM_ID_SM7150 365
+#define QCOM_ID_SM7150P 366
+#define QCOM_ID_IPQ8070 375
+#define QCOM_ID_IPQ8071 376
+#define QCOM_ID_QM215 386
+#define QCOM_ID_IPQ8072A 389
+#define QCOM_ID_IPQ8074A 390
+#define QCOM_ID_IPQ8076A 391
+#define QCOM_ID_IPQ8078A 392
+#define QCOM_ID_SM6125 394
+#define QCOM_ID_IPQ8070A 395
+#define QCOM_ID_IPQ8071A 396
+#define QCOM_ID_IPQ8172 397
+#define QCOM_ID_IPQ8173 398
+#define QCOM_ID_IPQ8174 399
+#define QCOM_ID_IPQ6018 402
+#define QCOM_ID_IPQ6028 403
+#define QCOM_ID_SDM429W 416
+#define QCOM_ID_SM4250 417
+#define QCOM_ID_IPQ6000 421
+#define QCOM_ID_IPQ6010 422
+#define QCOM_ID_SC7180 425
+#define QCOM_ID_SM6350 434
+#define QCOM_ID_QCM2150 436
+#define QCOM_ID_SDA429W 437
+#define QCOM_ID_SM8350 439
+#define QCOM_ID_QCM2290 441
+#define QCOM_ID_SM7125 443
+#define QCOM_ID_SM6115 444
+#define QCOM_ID_IPQ5010 446
+#define QCOM_ID_IPQ5018 447
+#define QCOM_ID_IPQ5028 448
+#define QCOM_ID_SC8280XP 449
+#define QCOM_ID_IPQ6005 453
+#define QCOM_ID_QRB5165 455
+#define QCOM_ID_SM8450 457
+#define QCOM_ID_SM7225 459
+#define QCOM_ID_SA8295P 460
+#define QCOM_ID_SA8540P 461
+#define QCOM_ID_QCM4290 469
+#define QCOM_ID_QCS4290 470
+#define QCOM_ID_SM8450_2 480
+#define QCOM_ID_SM8450_3 482
+#define QCOM_ID_SC7280 487
+#define QCOM_ID_SC7180P 495
+#define QCOM_ID_QCM6490 497
+#define QCOM_ID_IPQ5000 503
+#define QCOM_ID_IPQ0509 504
+#define QCOM_ID_IPQ0518 505
+#define QCOM_ID_SM6375 507
+#define QCOM_ID_IPQ9514 510
+#define QCOM_ID_IPQ9550 511
+#define QCOM_ID_IPQ9554 512
+#define QCOM_ID_IPQ9570 513
+#define QCOM_ID_IPQ9574 514
+#define QCOM_ID_SM8550 519
+#define QCOM_ID_IPQ5016 520
+#define QCOM_ID_IPQ9510 521
+#define QCOM_ID_QRB4210 523
+#define QCOM_ID_QRB2210 524
+#define QCOM_ID_SM8475 530
+#define QCOM_ID_SM8475P 531
+#define QCOM_ID_SA8775P 534
+#define QCOM_ID_QRU1000 539
+#define QCOM_ID_SM8475_2 540
+#define QCOM_ID_QDU1000 545
+#define QCOM_ID_SM8650 557
+#define QCOM_ID_SM4450 568
+#define QCOM_ID_QDU1010 587
+#define QCOM_ID_QRU1032 588
+#define QCOM_ID_QRU1052 589
+#define QCOM_ID_QRU1062 590
+#define QCOM_ID_IPQ5332 592
+#define QCOM_ID_IPQ5322 593
+#define QCOM_ID_IPQ5312 594
+#define QCOM_ID_IPQ5302 595
+#define QCOM_ID_QCS8550 603
+#define QCOM_ID_QCM8550 604
+#define QCOM_ID_IPQ5300 624
+
+/*
+ * The board type and revision information, used by Qualcomm bootloaders and
+ * DTS for older chipsets (qcom,board-id):
+ */
+#define QCOM_BOARD_ID(a, major, minor) \
+ (((major & 0xff) << 16) | ((minor & 0xff) << 8) | QCOM_BOARD_ID_##a)
+
+#define QCOM_BOARD_ID_MTP 8
+#define QCOM_BOARD_ID_DRAGONBOARD 10
+#define QCOM_BOARD_ID_QRD 11
+#define QCOM_BOARD_ID_SBC 24
+
+#endif /* _DT_BINDINGS_ARM_QCOM_IDS_H */
diff --git a/include/dt-bindings/ata/ahci.h b/include/dt-bindings/ata/ahci.h
new file mode 100644
index 000000000000..b3f3b7cf9af8
--- /dev/null
+++ b/include/dt-bindings/ata/ahci.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * This header provides constants for most AHCI bindings.
+ */
+
+#ifndef _DT_BINDINGS_ATA_AHCI_H
+#define _DT_BINDINGS_ATA_AHCI_H
+
+/* Host Bus Adapter generic platform capabilities */
+#define HBA_SSS (1 << 27)
+#define HBA_SMPS (1 << 28)
+
+/* Host Bus Adapter port-specific platform capabilities */
+#define HBA_PORT_HPCP (1 << 18)
+#define HBA_PORT_MPSP (1 << 19)
+#define HBA_PORT_CPD (1 << 20)
+#define HBA_PORT_ESP (1 << 21)
+#define HBA_PORT_FBSCP (1 << 22)
+
+#endif
diff --git a/include/dt-bindings/bus/moxtet.h b/include/dt-bindings/bus/moxtet.h
index dc9345440ebe..10528de7b3ef 100644
--- a/include/dt-bindings/bus/moxtet.h
+++ b/include/dt-bindings/bus/moxtet.h
@@ -2,7 +2,7 @@
/*
* Constant for device tree bindings for Turris Mox module configuration bus
*
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
*/
#ifndef _DT_BINDINGS_BUS_MOXTET_H
diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h
index a250a52a6192..a237eb26accb 100644
--- a/include/dt-bindings/clock/actions,s500-cmu.h
+++ b/include/dt-bindings/clock/actions,s500-cmu.h
@@ -74,10 +74,12 @@
#define CLK_RMII_REF 54
#define CLK_GPIO 55
-/* system clock (part 2) */
+/* additional clocks */
#define CLK_APB 56
#define CLK_DMAC 57
+#define CLK_NIC 58
+#define CLK_ETHERNET 59
-#define CLK_NR_CLKS (CLK_DMAC + 1)
+#define CLK_NR_CLKS (CLK_ETHERNET + 1)
#endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */
diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h
index d3871c63308b..f53f8b16883d 100644
--- a/include/dt-bindings/clock/alphascale,asm9260.h
+++ b/include/dt-bindings/clock/alphascale,asm9260.h
@@ -55,7 +55,7 @@
#define CLKID_AHB_I2S1 45
#define CLKID_AHB_MAC1 46
-/* devider */
+/* divider */
#define CLKID_SYS_CPU 47
#define CLKID_SYS_AHB 48
#define CLKID_SYS_I2S0M 49
diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h
index 894951541276..dfbad5c87933 100644
--- a/include/dt-bindings/clock/am3.h
+++ b/include/dt-bindings/clock/am3.h
@@ -8,99 +8,6 @@
#define AM3_CLKCTRL_OFFSET 0x0
#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET)
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* l4_per clocks */
-#define AM3_L4_PER_CLKCTRL_OFFSET 0x14
-#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET)
-#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14)
-#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18)
-#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c)
-#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24)
-#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28)
-#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c)
-#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30)
-#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34)
-#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38)
-#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c)
-#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40)
-#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44)
-#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48)
-#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c)
-#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50)
-#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60)
-#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68)
-#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c)
-#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70)
-#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74)
-#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78)
-#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c)
-#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80)
-#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84)
-#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88)
-#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90)
-#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94)
-#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0)
-#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac)
-#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0)
-#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4)
-#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc)
-#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0)
-#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4)
-#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc)
-#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4)
-#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8)
-#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc)
-#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0)
-#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8)
-#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec)
-#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0)
-#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4)
-#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8)
-#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc)
-#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100)
-#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c)
-#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110)
-#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120)
-#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130)
-#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c)
-
-/* l4_wkup clocks */
-#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4
-#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET)
-#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4)
-#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8)
-#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc)
-#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14)
-#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0)
-#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4)
-#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8)
-#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc)
-#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0)
-#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4)
-#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8)
-#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4)
-
-/* mpu clocks */
-#define AM3_MPU_CLKCTRL_OFFSET 0x4
-#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET)
-#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4)
-
-/* l4_rtc clocks */
-#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0)
-
-/* gfx_l3 clocks */
-#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4
-#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET)
-#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4)
-
-/* l4_cefuse clocks */
-#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20
-#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET)
-#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20)
-
-/* XXX: Compatibility part end */
-
/* l4ls clocks */
#define AM3_L4LS_CLKCTRL_OFFSET 0x38
#define AM3_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4LS_CLKCTRL_OFFSET)
diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h
index d961e7cb3682..a65b082e9cff 100644
--- a/include/dt-bindings/clock/am4.h
+++ b/include/dt-bindings/clock/am4.h
@@ -8,104 +8,6 @@
#define AM4_CLKCTRL_OFFSET 0x20
#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET)
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* l4_wkup clocks */
-#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120)
-#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
-#define AM4_WKUP_M3_CLKCTRL AM4_CLKCTRL_INDEX(0x228)
-#define AM4_COUNTER_32K_CLKCTRL AM4_CLKCTRL_INDEX(0x230)
-#define AM4_TIMER1_CLKCTRL AM4_CLKCTRL_INDEX(0x328)
-#define AM4_WD_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x338)
-#define AM4_I2C1_CLKCTRL AM4_CLKCTRL_INDEX(0x340)
-#define AM4_UART1_CLKCTRL AM4_CLKCTRL_INDEX(0x348)
-#define AM4_SMARTREFLEX0_CLKCTRL AM4_CLKCTRL_INDEX(0x350)
-#define AM4_SMARTREFLEX1_CLKCTRL AM4_CLKCTRL_INDEX(0x358)
-#define AM4_CONTROL_CLKCTRL AM4_CLKCTRL_INDEX(0x360)
-#define AM4_GPIO1_CLKCTRL AM4_CLKCTRL_INDEX(0x368)
-
-/* mpu clocks */
-#define AM4_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-
-/* gfx_l3 clocks */
-#define AM4_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-
-/* l4_rtc clocks */
-#define AM4_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-
-/* l4_per clocks */
-#define AM4_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-#define AM4_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28)
-#define AM4_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30)
-#define AM4_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40)
-#define AM4_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50)
-#define AM4_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58)
-#define AM4_VPFE0_CLKCTRL AM4_CLKCTRL_INDEX(0x68)
-#define AM4_VPFE1_CLKCTRL AM4_CLKCTRL_INDEX(0x70)
-#define AM4_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78)
-#define AM4_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80)
-#define AM4_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88)
-#define AM4_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90)
-#define AM4_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0)
-#define AM4_GPMC_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
-#define AM4_MCASP0_CLKCTRL AM4_CLKCTRL_INDEX(0x238)
-#define AM4_MCASP1_CLKCTRL AM4_CLKCTRL_INDEX(0x240)
-#define AM4_MMC3_CLKCTRL AM4_CLKCTRL_INDEX(0x248)
-#define AM4_QSPI_CLKCTRL AM4_CLKCTRL_INDEX(0x258)
-#define AM4_USB_OTG_SS0_CLKCTRL AM4_CLKCTRL_INDEX(0x260)
-#define AM4_USB_OTG_SS1_CLKCTRL AM4_CLKCTRL_INDEX(0x268)
-#define AM4_PRUSS_CLKCTRL AM4_CLKCTRL_INDEX(0x320)
-#define AM4_L4_LS_CLKCTRL AM4_CLKCTRL_INDEX(0x420)
-#define AM4_D_CAN0_CLKCTRL AM4_CLKCTRL_INDEX(0x428)
-#define AM4_D_CAN1_CLKCTRL AM4_CLKCTRL_INDEX(0x430)
-#define AM4_EPWMSS0_CLKCTRL AM4_CLKCTRL_INDEX(0x438)
-#define AM4_EPWMSS1_CLKCTRL AM4_CLKCTRL_INDEX(0x440)
-#define AM4_EPWMSS2_CLKCTRL AM4_CLKCTRL_INDEX(0x448)
-#define AM4_EPWMSS3_CLKCTRL AM4_CLKCTRL_INDEX(0x450)
-#define AM4_EPWMSS4_CLKCTRL AM4_CLKCTRL_INDEX(0x458)
-#define AM4_EPWMSS5_CLKCTRL AM4_CLKCTRL_INDEX(0x460)
-#define AM4_ELM_CLKCTRL AM4_CLKCTRL_INDEX(0x468)
-#define AM4_GPIO2_CLKCTRL AM4_CLKCTRL_INDEX(0x478)
-#define AM4_GPIO3_CLKCTRL AM4_CLKCTRL_INDEX(0x480)
-#define AM4_GPIO4_CLKCTRL AM4_CLKCTRL_INDEX(0x488)
-#define AM4_GPIO5_CLKCTRL AM4_CLKCTRL_INDEX(0x490)
-#define AM4_GPIO6_CLKCTRL AM4_CLKCTRL_INDEX(0x498)
-#define AM4_HDQ1W_CLKCTRL AM4_CLKCTRL_INDEX(0x4a0)
-#define AM4_I2C2_CLKCTRL AM4_CLKCTRL_INDEX(0x4a8)
-#define AM4_I2C3_CLKCTRL AM4_CLKCTRL_INDEX(0x4b0)
-#define AM4_MAILBOX_CLKCTRL AM4_CLKCTRL_INDEX(0x4b8)
-#define AM4_MMC1_CLKCTRL AM4_CLKCTRL_INDEX(0x4c0)
-#define AM4_MMC2_CLKCTRL AM4_CLKCTRL_INDEX(0x4c8)
-#define AM4_RNG_CLKCTRL AM4_CLKCTRL_INDEX(0x4e0)
-#define AM4_SPI0_CLKCTRL AM4_CLKCTRL_INDEX(0x500)
-#define AM4_SPI1_CLKCTRL AM4_CLKCTRL_INDEX(0x508)
-#define AM4_SPI2_CLKCTRL AM4_CLKCTRL_INDEX(0x510)
-#define AM4_SPI3_CLKCTRL AM4_CLKCTRL_INDEX(0x518)
-#define AM4_SPI4_CLKCTRL AM4_CLKCTRL_INDEX(0x520)
-#define AM4_SPINLOCK_CLKCTRL AM4_CLKCTRL_INDEX(0x528)
-#define AM4_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x530)
-#define AM4_TIMER3_CLKCTRL AM4_CLKCTRL_INDEX(0x538)
-#define AM4_TIMER4_CLKCTRL AM4_CLKCTRL_INDEX(0x540)
-#define AM4_TIMER5_CLKCTRL AM4_CLKCTRL_INDEX(0x548)
-#define AM4_TIMER6_CLKCTRL AM4_CLKCTRL_INDEX(0x550)
-#define AM4_TIMER7_CLKCTRL AM4_CLKCTRL_INDEX(0x558)
-#define AM4_TIMER8_CLKCTRL AM4_CLKCTRL_INDEX(0x560)
-#define AM4_TIMER9_CLKCTRL AM4_CLKCTRL_INDEX(0x568)
-#define AM4_TIMER10_CLKCTRL AM4_CLKCTRL_INDEX(0x570)
-#define AM4_TIMER11_CLKCTRL AM4_CLKCTRL_INDEX(0x578)
-#define AM4_UART2_CLKCTRL AM4_CLKCTRL_INDEX(0x580)
-#define AM4_UART3_CLKCTRL AM4_CLKCTRL_INDEX(0x588)
-#define AM4_UART4_CLKCTRL AM4_CLKCTRL_INDEX(0x590)
-#define AM4_UART5_CLKCTRL AM4_CLKCTRL_INDEX(0x598)
-#define AM4_UART6_CLKCTRL AM4_CLKCTRL_INDEX(0x5a0)
-#define AM4_OCP2SCP0_CLKCTRL AM4_CLKCTRL_INDEX(0x5b8)
-#define AM4_OCP2SCP1_CLKCTRL AM4_CLKCTRL_INDEX(0x5c0)
-#define AM4_EMIF_CLKCTRL AM4_CLKCTRL_INDEX(0x720)
-#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20)
-#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20)
-
-/* XXX: Compatibility part end. */
-
/* l3s_tsc clocks */
#define AM4_L3S_TSC_CLKCTRL_OFFSET 0x120
#define AM4_L3S_TSC_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_TSC_CLKCTRL_OFFSET)
@@ -158,6 +60,7 @@
#define AM4_L3S_VPFE0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x68)
#define AM4_L3S_VPFE1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x70)
#define AM4_L3S_GPMC_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x220)
+#define AM4_L3S_ADC1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x230)
#define AM4_L3S_MCASP0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x238)
#define AM4_L3S_MCASP1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x240)
#define AM4_L3S_MMC3_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x248)
diff --git a/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
new file mode 100644
index 000000000000..06f198ee7623
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ *
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
+ */
+
+#ifndef __A1_PERIPHERALS_CLKC_H
+#define __A1_PERIPHERALS_CLKC_H
+
+#define CLKID_XTAL_IN 0
+#define CLKID_FIXPLL_IN 1
+#define CLKID_USB_PHY_IN 2
+#define CLKID_USB_CTRL_IN 3
+#define CLKID_HIFIPLL_IN 4
+#define CLKID_SYSPLL_IN 5
+#define CLKID_DDS_IN 6
+#define CLKID_SYS 7
+#define CLKID_CLKTREE 8
+#define CLKID_RESET_CTRL 9
+#define CLKID_ANALOG_CTRL 10
+#define CLKID_PWR_CTRL 11
+#define CLKID_PAD_CTRL 12
+#define CLKID_SYS_CTRL 13
+#define CLKID_TEMP_SENSOR 14
+#define CLKID_AM2AXI_DIV 15
+#define CLKID_SPICC_B 16
+#define CLKID_SPICC_A 17
+#define CLKID_MSR 18
+#define CLKID_AUDIO 19
+#define CLKID_JTAG_CTRL 20
+#define CLKID_SARADC_EN 21
+#define CLKID_PWM_EF 22
+#define CLKID_PWM_CD 23
+#define CLKID_PWM_AB 24
+#define CLKID_CEC 25
+#define CLKID_I2C_S 26
+#define CLKID_IR_CTRL 27
+#define CLKID_I2C_M_D 28
+#define CLKID_I2C_M_C 29
+#define CLKID_I2C_M_B 30
+#define CLKID_I2C_M_A 31
+#define CLKID_ACODEC 32
+#define CLKID_OTP 33
+#define CLKID_SD_EMMC_A 34
+#define CLKID_USB_PHY 35
+#define CLKID_USB_CTRL 36
+#define CLKID_SYS_DSPB 37
+#define CLKID_SYS_DSPA 38
+#define CLKID_DMA 39
+#define CLKID_IRQ_CTRL 40
+#define CLKID_NIC 41
+#define CLKID_GIC 42
+#define CLKID_UART_C 43
+#define CLKID_UART_B 44
+#define CLKID_UART_A 45
+#define CLKID_SYS_PSRAM 46
+#define CLKID_RSA 47
+#define CLKID_CORESIGHT 48
+#define CLKID_AM2AXI_VAD 49
+#define CLKID_AUDIO_VAD 50
+#define CLKID_AXI_DMC 51
+#define CLKID_AXI_PSRAM 52
+#define CLKID_RAMB 53
+#define CLKID_RAMA 54
+#define CLKID_AXI_SPIFC 55
+#define CLKID_AXI_NIC 56
+#define CLKID_AXI_DMA 57
+#define CLKID_CPU_CTRL 58
+#define CLKID_ROM 59
+#define CLKID_PROC_I2C 60
+#define CLKID_DSPA_SEL 61
+#define CLKID_DSPB_SEL 62
+#define CLKID_DSPA_EN 63
+#define CLKID_DSPA_EN_NIC 64
+#define CLKID_DSPB_EN 65
+#define CLKID_DSPB_EN_NIC 66
+#define CLKID_RTC 67
+#define CLKID_CECA_32K 68
+#define CLKID_CECB_32K 69
+#define CLKID_24M 70
+#define CLKID_12M 71
+#define CLKID_FCLK_DIV2_DIVN 72
+#define CLKID_GEN 73
+#define CLKID_SARADC_SEL 74
+#define CLKID_SARADC 75
+#define CLKID_PWM_A 76
+#define CLKID_PWM_B 77
+#define CLKID_PWM_C 78
+#define CLKID_PWM_D 79
+#define CLKID_PWM_E 80
+#define CLKID_PWM_F 81
+#define CLKID_SPICC 82
+#define CLKID_TS 83
+#define CLKID_SPIFC 84
+#define CLKID_USB_BUS 85
+#define CLKID_SD_EMMC 86
+#define CLKID_PSRAM 87
+#define CLKID_DMC 88
+#define CLKID_SYS_A_SEL 89
+#define CLKID_SYS_A_DIV 90
+#define CLKID_SYS_A 91
+#define CLKID_SYS_B_SEL 92
+#define CLKID_SYS_B_DIV 93
+#define CLKID_SYS_B 94
+#define CLKID_DSPA_A_SEL 95
+#define CLKID_DSPA_A_DIV 96
+#define CLKID_DSPA_A 97
+#define CLKID_DSPA_B_SEL 98
+#define CLKID_DSPA_B_DIV 99
+#define CLKID_DSPA_B 100
+#define CLKID_DSPB_A_SEL 101
+#define CLKID_DSPB_A_DIV 102
+#define CLKID_DSPB_A 103
+#define CLKID_DSPB_B_SEL 104
+#define CLKID_DSPB_B_DIV 105
+#define CLKID_DSPB_B 106
+#define CLKID_RTC_32K_IN 107
+#define CLKID_RTC_32K_DIV 108
+#define CLKID_RTC_32K_XTAL 109
+#define CLKID_RTC_32K_SEL 110
+#define CLKID_CECB_32K_IN 111
+#define CLKID_CECB_32K_DIV 112
+#define CLKID_CECB_32K_SEL_PRE 113
+#define CLKID_CECB_32K_SEL 114
+#define CLKID_CECA_32K_IN 115
+#define CLKID_CECA_32K_DIV 116
+#define CLKID_CECA_32K_SEL_PRE 117
+#define CLKID_CECA_32K_SEL 118
+#define CLKID_DIV2_PRE 119
+#define CLKID_24M_DIV2 120
+#define CLKID_GEN_SEL 121
+#define CLKID_GEN_DIV 122
+#define CLKID_SARADC_DIV 123
+#define CLKID_PWM_A_SEL 124
+#define CLKID_PWM_A_DIV 125
+#define CLKID_PWM_B_SEL 126
+#define CLKID_PWM_B_DIV 127
+#define CLKID_PWM_C_SEL 128
+#define CLKID_PWM_C_DIV 129
+#define CLKID_PWM_D_SEL 130
+#define CLKID_PWM_D_DIV 131
+#define CLKID_PWM_E_SEL 132
+#define CLKID_PWM_E_DIV 133
+#define CLKID_PWM_F_SEL 134
+#define CLKID_PWM_F_DIV 135
+#define CLKID_SPICC_SEL 136
+#define CLKID_SPICC_DIV 137
+#define CLKID_SPICC_SEL2 138
+#define CLKID_TS_DIV 139
+#define CLKID_SPIFC_SEL 140
+#define CLKID_SPIFC_DIV 141
+#define CLKID_SPIFC_SEL2 142
+#define CLKID_USB_BUS_SEL 143
+#define CLKID_USB_BUS_DIV 144
+#define CLKID_SD_EMMC_SEL 145
+#define CLKID_SD_EMMC_DIV 146
+#define CLKID_SD_EMMC_SEL2 147
+#define CLKID_PSRAM_SEL 148
+#define CLKID_PSRAM_DIV 149
+#define CLKID_PSRAM_SEL2 150
+#define CLKID_DMC_SEL 151
+#define CLKID_DMC_DIV 152
+#define CLKID_DMC_SEL2 153
+
+#endif /* __A1_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,a1-pll-clkc.h b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
new file mode 100644
index 000000000000..2b660c0f2c9f
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ *
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
+ */
+
+#ifndef __A1_PLL_CLKC_H
+#define __A1_PLL_CLKC_H
+
+#define CLKID_FIXED_PLL_DCO 0
+#define CLKID_FIXED_PLL 1
+#define CLKID_FCLK_DIV2_DIV 2
+#define CLKID_FCLK_DIV3_DIV 3
+#define CLKID_FCLK_DIV5_DIV 4
+#define CLKID_FCLK_DIV7_DIV 5
+#define CLKID_FCLK_DIV2 6
+#define CLKID_FCLK_DIV3 7
+#define CLKID_FCLK_DIV5 8
+#define CLKID_FCLK_DIV7 9
+#define CLKID_HIFI_PLL 10
+
+#endif /* __A1_PLL_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h
new file mode 100644
index 000000000000..861a331963ac
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved.
+ * Author: Yu Tu <yu.tu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H
+
+#define CLKID_RTC_32K_CLKIN 0
+#define CLKID_RTC_32K_DIV 1
+#define CLKID_RTC_32K_SEL 2
+#define CLKID_RTC_32K_XATL 3
+#define CLKID_RTC 4
+#define CLKID_SYS_CLK_B_SEL 5
+#define CLKID_SYS_CLK_B_DIV 6
+#define CLKID_SYS_CLK_B 7
+#define CLKID_SYS_CLK_A_SEL 8
+#define CLKID_SYS_CLK_A_DIV 9
+#define CLKID_SYS_CLK_A 10
+#define CLKID_SYS 11
+#define CLKID_CECA_32K_CLKIN 12
+#define CLKID_CECA_32K_DIV 13
+#define CLKID_CECA_32K_SEL_PRE 14
+#define CLKID_CECA_32K_SEL 15
+#define CLKID_CECA_32K_CLKOUT 16
+#define CLKID_CECB_32K_CLKIN 17
+#define CLKID_CECB_32K_DIV 18
+#define CLKID_CECB_32K_SEL_PRE 19
+#define CLKID_CECB_32K_SEL 20
+#define CLKID_CECB_32K_CLKOUT 21
+#define CLKID_SC_CLK_SEL 22
+#define CLKID_SC_CLK_DIV 23
+#define CLKID_SC 24
+#define CLKID_12_24M 25
+#define CLKID_12M_CLK_DIV 26
+#define CLKID_12_24M_CLK_SEL 27
+#define CLKID_VID_PLL_DIV 28
+#define CLKID_VID_PLL_SEL 29
+#define CLKID_VID_PLL 30
+#define CLKID_VCLK_SEL 31
+#define CLKID_VCLK2_SEL 32
+#define CLKID_VCLK_INPUT 33
+#define CLKID_VCLK2_INPUT 34
+#define CLKID_VCLK_DIV 35
+#define CLKID_VCLK2_DIV 36
+#define CLKID_VCLK 37
+#define CLKID_VCLK2 38
+#define CLKID_VCLK_DIV1 39
+#define CLKID_VCLK_DIV2_EN 40
+#define CLKID_VCLK_DIV4_EN 41
+#define CLKID_VCLK_DIV6_EN 42
+#define CLKID_VCLK_DIV12_EN 43
+#define CLKID_VCLK2_DIV1 44
+#define CLKID_VCLK2_DIV2_EN 45
+#define CLKID_VCLK2_DIV4_EN 46
+#define CLKID_VCLK2_DIV6_EN 47
+#define CLKID_VCLK2_DIV12_EN 48
+#define CLKID_VCLK_DIV2 49
+#define CLKID_VCLK_DIV4 50
+#define CLKID_VCLK_DIV6 51
+#define CLKID_VCLK_DIV12 52
+#define CLKID_VCLK2_DIV2 53
+#define CLKID_VCLK2_DIV4 54
+#define CLKID_VCLK2_DIV6 55
+#define CLKID_VCLK2_DIV12 56
+#define CLKID_CTS_ENCI_SEL 57
+#define CLKID_CTS_ENCP_SEL 58
+#define CLKID_CTS_VDAC_SEL 59
+#define CLKID_HDMI_TX_SEL 60
+#define CLKID_CTS_ENCI 61
+#define CLKID_CTS_ENCP 62
+#define CLKID_CTS_VDAC 63
+#define CLKID_HDMI_TX 64
+#define CLKID_HDMI_SEL 65
+#define CLKID_HDMI_DIV 66
+#define CLKID_HDMI 67
+#define CLKID_TS_CLK_DIV 68
+#define CLKID_TS 69
+#define CLKID_MALI_0_SEL 70
+#define CLKID_MALI_0_DIV 71
+#define CLKID_MALI_0 72
+#define CLKID_MALI_1_SEL 73
+#define CLKID_MALI_1_DIV 74
+#define CLKID_MALI_1 75
+#define CLKID_MALI_SEL 76
+#define CLKID_VDEC_P0_SEL 77
+#define CLKID_VDEC_P0_DIV 78
+#define CLKID_VDEC_P0 79
+#define CLKID_VDEC_P1_SEL 80
+#define CLKID_VDEC_P1_DIV 81
+#define CLKID_VDEC_P1 82
+#define CLKID_VDEC_SEL 83
+#define CLKID_HEVCF_P0_SEL 84
+#define CLKID_HEVCF_P0_DIV 85
+#define CLKID_HEVCF_P0 86
+#define CLKID_HEVCF_P1_SEL 87
+#define CLKID_HEVCF_P1_DIV 88
+#define CLKID_HEVCF_P1 89
+#define CLKID_HEVCF_SEL 90
+#define CLKID_VPU_0_SEL 91
+#define CLKID_VPU_0_DIV 92
+#define CLKID_VPU_0 93
+#define CLKID_VPU_1_SEL 94
+#define CLKID_VPU_1_DIV 95
+#define CLKID_VPU_1 96
+#define CLKID_VPU 97
+#define CLKID_VPU_CLKB_TMP_SEL 98
+#define CLKID_VPU_CLKB_TMP_DIV 99
+#define CLKID_VPU_CLKB_TMP 100
+#define CLKID_VPU_CLKB_DIV 101
+#define CLKID_VPU_CLKB 102
+#define CLKID_VPU_CLKC_P0_SEL 103
+#define CLKID_VPU_CLKC_P0_DIV 104
+#define CLKID_VPU_CLKC_P0 105
+#define CLKID_VPU_CLKC_P1_SEL 106
+#define CLKID_VPU_CLKC_P1_DIV 107
+#define CLKID_VPU_CLKC_P1 108
+#define CLKID_VPU_CLKC_SEL 109
+#define CLKID_VAPB_0_SEL 110
+#define CLKID_VAPB_0_DIV 111
+#define CLKID_VAPB_0 112
+#define CLKID_VAPB_1_SEL 113
+#define CLKID_VAPB_1_DIV 114
+#define CLKID_VAPB_1 115
+#define CLKID_VAPB 116
+#define CLKID_GE2D 117
+#define CLKID_VDIN_MEAS_SEL 118
+#define CLKID_VDIN_MEAS_DIV 119
+#define CLKID_VDIN_MEAS 120
+#define CLKID_SD_EMMC_C_CLK_SEL 121
+#define CLKID_SD_EMMC_C_CLK_DIV 122
+#define CLKID_SD_EMMC_C 123
+#define CLKID_SD_EMMC_A_CLK_SEL 124
+#define CLKID_SD_EMMC_A_CLK_DIV 125
+#define CLKID_SD_EMMC_A 126
+#define CLKID_SD_EMMC_B_CLK_SEL 127
+#define CLKID_SD_EMMC_B_CLK_DIV 128
+#define CLKID_SD_EMMC_B 129
+#define CLKID_SPICC0_SEL 130
+#define CLKID_SPICC0_DIV 131
+#define CLKID_SPICC0_EN 132
+#define CLKID_PWM_A_SEL 133
+#define CLKID_PWM_A_DIV 134
+#define CLKID_PWM_A 135
+#define CLKID_PWM_B_SEL 136
+#define CLKID_PWM_B_DIV 137
+#define CLKID_PWM_B 138
+#define CLKID_PWM_C_SEL 139
+#define CLKID_PWM_C_DIV 140
+#define CLKID_PWM_C 141
+#define CLKID_PWM_D_SEL 142
+#define CLKID_PWM_D_DIV 143
+#define CLKID_PWM_D 144
+#define CLKID_PWM_E_SEL 145
+#define CLKID_PWM_E_DIV 146
+#define CLKID_PWM_E 147
+#define CLKID_PWM_F_SEL 148
+#define CLKID_PWM_F_DIV 149
+#define CLKID_PWM_F 150
+#define CLKID_PWM_G_SEL 151
+#define CLKID_PWM_G_DIV 152
+#define CLKID_PWM_G 153
+#define CLKID_PWM_H_SEL 154
+#define CLKID_PWM_H_DIV 155
+#define CLKID_PWM_H 156
+#define CLKID_PWM_I_SEL 157
+#define CLKID_PWM_I_DIV 158
+#define CLKID_PWM_I 159
+#define CLKID_PWM_J_SEL 160
+#define CLKID_PWM_J_DIV 161
+#define CLKID_PWM_J 162
+#define CLKID_SARADC_SEL 163
+#define CLKID_SARADC_DIV 164
+#define CLKID_SARADC 165
+#define CLKID_GEN_SEL 166
+#define CLKID_GEN_DIV 167
+#define CLKID_GEN 168
+#define CLKID_DDR 169
+#define CLKID_DOS 170
+#define CLKID_ETHPHY 171
+#define CLKID_MALI 172
+#define CLKID_AOCPU 173
+#define CLKID_AUCPU 174
+#define CLKID_CEC 175
+#define CLKID_SDEMMC_A 176
+#define CLKID_SDEMMC_B 177
+#define CLKID_NAND 178
+#define CLKID_SMARTCARD 179
+#define CLKID_ACODEC 180
+#define CLKID_SPIFC 181
+#define CLKID_MSR 182
+#define CLKID_IR_CTRL 183
+#define CLKID_AUDIO 184
+#define CLKID_ETH 185
+#define CLKID_UART_A 186
+#define CLKID_UART_B 187
+#define CLKID_UART_C 188
+#define CLKID_UART_D 189
+#define CLKID_UART_E 190
+#define CLKID_AIFIFO 191
+#define CLKID_TS_DDR 192
+#define CLKID_TS_PLL 193
+#define CLKID_G2D 194
+#define CLKID_SPICC0 195
+#define CLKID_SPICC1 196
+#define CLKID_USB 197
+#define CLKID_I2C_M_A 198
+#define CLKID_I2C_M_B 199
+#define CLKID_I2C_M_C 200
+#define CLKID_I2C_M_D 201
+#define CLKID_I2C_M_E 202
+#define CLKID_HDMITX_APB 203
+#define CLKID_I2C_S_A 204
+#define CLKID_USB1_TO_DDR 205
+#define CLKID_HDCP22 206
+#define CLKID_MMC_APB 207
+#define CLKID_RSA 208
+#define CLKID_CPU_DEBUG 209
+#define CLKID_VPU_INTR 210
+#define CLKID_DEMOD 211
+#define CLKID_SAR_ADC 212
+#define CLKID_GIC 213
+#define CLKID_PWM_AB 214
+#define CLKID_PWM_CD 215
+#define CLKID_PWM_EF 216
+#define CLKID_PWM_GH 217
+#define CLKID_PWM_IJ 218
+#define CLKID_HDCP22_ESMCLK_SEL 219
+#define CLKID_HDCP22_ESMCLK_DIV 220
+#define CLKID_HDCP22_ESMCLK 221
+#define CLKID_HDCP22_SKPCLK_SEL 222
+#define CLKID_HDCP22_SKPCLK_DIV 223
+#define CLKID_HDCP22_SKPCLK 224
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,s4-pll-clkc.h b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h
new file mode 100644
index 000000000000..af9f110f8b62
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved.
+ * Author: Yu Tu <yu.tu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H
+
+#define CLKID_FIXED_PLL_DCO 0
+#define CLKID_FIXED_PLL 1
+#define CLKID_FCLK_DIV2_DIV 2
+#define CLKID_FCLK_DIV2 3
+#define CLKID_FCLK_DIV3_DIV 4
+#define CLKID_FCLK_DIV3 5
+#define CLKID_FCLK_DIV4_DIV 6
+#define CLKID_FCLK_DIV4 7
+#define CLKID_FCLK_DIV5_DIV 8
+#define CLKID_FCLK_DIV5 9
+#define CLKID_FCLK_DIV7_DIV 10
+#define CLKID_FCLK_DIV7 11
+#define CLKID_FCLK_DIV2P5_DIV 12
+#define CLKID_FCLK_DIV2P5 13
+#define CLKID_GP0_PLL_DCO 14
+#define CLKID_GP0_PLL 15
+#define CLKID_HIFI_PLL_DCO 16
+#define CLKID_HIFI_PLL 17
+#define CLKID_HDMI_PLL_DCO 18
+#define CLKID_HDMI_PLL_OD 19
+#define CLKID_HDMI_PLL 20
+#define CLKID_MPLL_50M_DIV 21
+#define CLKID_MPLL_50M 22
+#define CLKID_MPLL_PREDIV 23
+#define CLKID_MPLL0_DIV 24
+#define CLKID_MPLL0 25
+#define CLKID_MPLL1_DIV 26
+#define CLKID_MPLL1 27
+#define CLKID_MPLL2_DIV 28
+#define CLKID_MPLL2 29
+#define CLKID_MPLL3_DIV 30
+#define CLKID_MPLL3 31
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H */
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 9ff4f6e4558c..06d568382c77 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -52,5 +52,6 @@
#define ASPEED_RESET_I2C 7
#define ASPEED_RESET_AHB 8
#define ASPEED_RESET_CRT1 9
+#define ASPEED_RESET_HACE 10
#endif
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 62b9520a00fd..7ae96c7bd72f 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -57,8 +57,6 @@
#define ASPEED_CLK_GATE_I3C3CLK 40
#define ASPEED_CLK_GATE_I3C4CLK 41
#define ASPEED_CLK_GATE_I3C5CLK 42
-#define ASPEED_CLK_GATE_I3C6CLK 43
-#define ASPEED_CLK_GATE_I3C7CLK 44
#define ASPEED_CLK_GATE_FSICLK 45
@@ -87,11 +85,25 @@
#define ASPEED_CLK_MAC2RCLK 68
#define ASPEED_CLK_MAC3RCLK 69
#define ASPEED_CLK_MAC4RCLK 70
+#define ASPEED_CLK_I3C 71
+#define ASPEED_CLK_FSI 72
-/* Only list resets here that are not part of a gate */
+/* Only list resets here that are not part of a clock gate + reset pair */
#define ASPEED_RESET_ADC 55
#define ASPEED_RESET_JTAG_MASTER2 54
+
+#define ASPEED_RESET_MAC4 53
+#define ASPEED_RESET_MAC3 52
+
+#define ASPEED_RESET_I3C5 45
+#define ASPEED_RESET_I3C4 44
+#define ASPEED_RESET_I3C3 43
+#define ASPEED_RESET_I3C2 42
+#define ASPEED_RESET_I3C1 41
+#define ASPEED_RESET_I3C0 40
+#define ASPEED_RESET_I3C 39
#define ASPEED_RESET_I3C_DMA 39
+
#define ASPEED_RESET_PWM 37
#define ASPEED_RESET_PECI 36
#define ASPEED_RESET_MII 35
@@ -111,6 +123,7 @@
#define ASPEED_RESET_PCIE_RC_O 19
#define ASPEED_RESET_PCIE_RC_OEN 18
#define ASPEED_RESET_PCI_DP 5
+#define ASPEED_RESET_HACE 4
#define ASPEED_RESET_AHB 1
#define ASPEED_RESET_SDRAM 0
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index eba17106608b..3e3972a814c1 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -24,6 +24,19 @@
#define PMC_PLLACK 7
#define PMC_PLLBCK 8
#define PMC_AUDIOPLLCK 9
+#define PMC_AUDIOPINCK 10
+
+/* SAMA7G5 */
+#define PMC_CPUPLL (PMC_MAIN + 1)
+#define PMC_SYSPLL (PMC_MAIN + 2)
+#define PMC_DDRPLL (PMC_MAIN + 3)
+#define PMC_IMGPLL (PMC_MAIN + 4)
+#define PMC_BAUDPLL (PMC_MAIN + 5)
+#define PMC_AUDIOPMCPLL (PMC_MAIN + 6)
+#define PMC_AUDIOIOPLL (PMC_MAIN + 7)
+#define PMC_ETHPLL (PMC_MAIN + 8)
+#define PMC_CPU (PMC_MAIN + 9)
+#define PMC_MCK1 (PMC_MAIN + 10)
#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index f561f5c5ef8f..08c82c22fa5f 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -37,6 +37,26 @@
#define AUD_CLKID_SPDIFIN_CLK 56
#define AUD_CLKID_PDM_DCLK 57
#define AUD_CLKID_PDM_SYSCLK 58
+#define AUD_CLKID_MST_A_MCLK_SEL 59
+#define AUD_CLKID_MST_B_MCLK_SEL 60
+#define AUD_CLKID_MST_C_MCLK_SEL 61
+#define AUD_CLKID_MST_D_MCLK_SEL 62
+#define AUD_CLKID_MST_E_MCLK_SEL 63
+#define AUD_CLKID_MST_F_MCLK_SEL 64
+#define AUD_CLKID_MST_A_MCLK_DIV 65
+#define AUD_CLKID_MST_B_MCLK_DIV 66
+#define AUD_CLKID_MST_C_MCLK_DIV 67
+#define AUD_CLKID_MST_D_MCLK_DIV 68
+#define AUD_CLKID_MST_E_MCLK_DIV 69
+#define AUD_CLKID_MST_F_MCLK_DIV 70
+#define AUD_CLKID_SPDIFOUT_CLK_SEL 71
+#define AUD_CLKID_SPDIFOUT_CLK_DIV 72
+#define AUD_CLKID_SPDIFIN_CLK_SEL 73
+#define AUD_CLKID_SPDIFIN_CLK_DIV 74
+#define AUD_CLKID_PDM_DCLK_SEL 75
+#define AUD_CLKID_PDM_DCLK_DIV 76
+#define AUD_CLKID_PDM_SYSCLK_SEL 77
+#define AUD_CLKID_PDM_SYSCLK_DIV 78
#define AUD_CLKID_MST_A_SCLK 79
#define AUD_CLKID_MST_B_SCLK 80
#define AUD_CLKID_MST_C_SCLK 81
@@ -49,6 +69,30 @@
#define AUD_CLKID_MST_D_LRCLK 89
#define AUD_CLKID_MST_E_LRCLK 90
#define AUD_CLKID_MST_F_LRCLK 91
+#define AUD_CLKID_MST_A_SCLK_PRE_EN 92
+#define AUD_CLKID_MST_B_SCLK_PRE_EN 93
+#define AUD_CLKID_MST_C_SCLK_PRE_EN 94
+#define AUD_CLKID_MST_D_SCLK_PRE_EN 95
+#define AUD_CLKID_MST_E_SCLK_PRE_EN 96
+#define AUD_CLKID_MST_F_SCLK_PRE_EN 97
+#define AUD_CLKID_MST_A_SCLK_DIV 98
+#define AUD_CLKID_MST_B_SCLK_DIV 99
+#define AUD_CLKID_MST_C_SCLK_DIV 100
+#define AUD_CLKID_MST_D_SCLK_DIV 101
+#define AUD_CLKID_MST_E_SCLK_DIV 102
+#define AUD_CLKID_MST_F_SCLK_DIV 103
+#define AUD_CLKID_MST_A_SCLK_POST_EN 104
+#define AUD_CLKID_MST_B_SCLK_POST_EN 105
+#define AUD_CLKID_MST_C_SCLK_POST_EN 106
+#define AUD_CLKID_MST_D_SCLK_POST_EN 107
+#define AUD_CLKID_MST_E_SCLK_POST_EN 108
+#define AUD_CLKID_MST_F_SCLK_POST_EN 109
+#define AUD_CLKID_MST_A_LRCLK_DIV 110
+#define AUD_CLKID_MST_B_LRCLK_DIV 111
+#define AUD_CLKID_MST_C_LRCLK_DIV 112
+#define AUD_CLKID_MST_D_LRCLK_DIV 113
+#define AUD_CLKID_MST_E_LRCLK_DIV 114
+#define AUD_CLKID_MST_F_LRCLK_DIV 115
#define AUD_CLKID_TDMIN_A_SCLK_SEL 116
#define AUD_CLKID_TDMIN_B_SCLK_SEL 117
#define AUD_CLKID_TDMIN_C_SCLK_SEL 118
@@ -70,8 +114,24 @@
#define AUD_CLKID_TDMOUT_A_LRCLK 134
#define AUD_CLKID_TDMOUT_B_LRCLK 135
#define AUD_CLKID_TDMOUT_C_LRCLK 136
+#define AUD_CLKID_TDMIN_A_SCLK_PRE_EN 137
+#define AUD_CLKID_TDMIN_B_SCLK_PRE_EN 138
+#define AUD_CLKID_TDMIN_C_SCLK_PRE_EN 139
+#define AUD_CLKID_TDMIN_LB_SCLK_PRE_EN 140
+#define AUD_CLKID_TDMOUT_A_SCLK_PRE_EN 141
+#define AUD_CLKID_TDMOUT_B_SCLK_PRE_EN 142
+#define AUD_CLKID_TDMOUT_C_SCLK_PRE_EN 143
+#define AUD_CLKID_TDMIN_A_SCLK_POST_EN 144
+#define AUD_CLKID_TDMIN_B_SCLK_POST_EN 145
+#define AUD_CLKID_TDMIN_C_SCLK_POST_EN 146
+#define AUD_CLKID_TDMIN_LB_SCLK_POST_EN 147
+#define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148
+#define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149
+#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
#define AUD_CLKID_SPDIFOUT_B 151
#define AUD_CLKID_SPDIFOUT_B_CLK 152
+#define AUD_CLKID_SPDIFOUT_B_CLK_SEL 153
+#define AUD_CLKID_SPDIFOUT_B_CLK_DIV 154
#define AUD_CLKID_TDM_MCLK_PAD0 155
#define AUD_CLKID_TDM_MCLK_PAD1 156
#define AUD_CLKID_TDM_LRCLK_PAD0 157
@@ -90,5 +150,10 @@
#define AUD_CLKID_FRDDR_D 170
#define AUD_CLKID_TODDR_D 171
#define AUD_CLKID_LOOPBACK_B 172
+#define AUD_CLKID_CLK81_EN 173
+#define AUD_CLKID_SYSCLK_A_DIV 174
+#define AUD_CLKID_SYSCLK_B_DIV 175
+#define AUD_CLKID_SYSCLK_A_EN 176
+#define AUD_CLKID_SYSCLK_B_EN 177
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index fd1f938c38d1..442162822b88 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -16,6 +16,8 @@
#define CLKID_FCLK_DIV5 5
#define CLKID_FCLK_DIV7 6
#define CLKID_GP0_PLL 7
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
#define CLKID_CLK81 10
#define CLKID_MPLL0 11
#define CLKID_MPLL1 12
@@ -67,10 +69,80 @@
#define CLKID_AO_I2C 58
#define CLKID_SD_EMMC_B_CLK0 59
#define CLKID_SD_EMMC_C_CLK0 60
+#define CLKID_SD_EMMC_B_CLK0_SEL 61
+#define CLKID_SD_EMMC_B_CLK0_DIV 62
+#define CLKID_SD_EMMC_C_CLK0_SEL 63
+#define CLKID_SD_EMMC_C_CLK0_DIV 64
+#define CLKID_MPLL0_DIV 65
+#define CLKID_MPLL1_DIV 66
+#define CLKID_MPLL2_DIV 67
+#define CLKID_MPLL3_DIV 68
#define CLKID_HIFI_PLL 69
+#define CLKID_MPLL_PREDIV 70
+#define CLKID_FCLK_DIV2_DIV 71
+#define CLKID_FCLK_DIV3_DIV 72
+#define CLKID_FCLK_DIV4_DIV 73
+#define CLKID_FCLK_DIV5_DIV 74
+#define CLKID_FCLK_DIV7_DIV 75
+#define CLKID_PCIE_PLL 76
+#define CLKID_PCIE_MUX 77
+#define CLKID_PCIE_REF 78
#define CLKID_PCIE_CML_EN0 79
#define CLKID_PCIE_CML_EN1 80
-#define CLKID_MIPI_ENABLE 81
+#define CLKID_GEN_CLK_SEL 82
+#define CLKID_GEN_CLK_DIV 83
#define CLKID_GEN_CLK 84
+#define CLKID_SYS_PLL_DCO 85
+#define CLKID_FIXED_PLL_DCO 86
+#define CLKID_GP0_PLL_DCO 87
+#define CLKID_HIFI_PLL_DCO 88
+#define CLKID_PCIE_PLL_DCO 89
+#define CLKID_PCIE_PLL_OD 90
+#define CLKID_VPU_0_DIV 91
+#define CLKID_VPU_0_SEL 92
+#define CLKID_VPU_0 93
+#define CLKID_VPU_1_DIV 94
+#define CLKID_VPU_1_SEL 95
+#define CLKID_VPU_1 96
+#define CLKID_VPU 97
+#define CLKID_VAPB_0_DIV 98
+#define CLKID_VAPB_0_SEL 99
+#define CLKID_VAPB_0 100
+#define CLKID_VAPB_1_DIV 101
+#define CLKID_VAPB_1_SEL 102
+#define CLKID_VAPB_1 103
+#define CLKID_VAPB_SEL 104
+#define CLKID_VAPB 105
+#define CLKID_VCLK 106
+#define CLKID_VCLK2 107
+#define CLKID_VCLK_SEL 108
+#define CLKID_VCLK2_SEL 109
+#define CLKID_VCLK_INPUT 110
+#define CLKID_VCLK2_INPUT 111
+#define CLKID_VCLK_DIV 112
+#define CLKID_VCLK2_DIV 113
+#define CLKID_VCLK_DIV2_EN 114
+#define CLKID_VCLK_DIV4_EN 115
+#define CLKID_VCLK_DIV6_EN 116
+#define CLKID_VCLK_DIV12_EN 117
+#define CLKID_VCLK2_DIV2_EN 118
+#define CLKID_VCLK2_DIV4_EN 119
+#define CLKID_VCLK2_DIV6_EN 120
+#define CLKID_VCLK2_DIV12_EN 121
+#define CLKID_VCLK_DIV1 122
+#define CLKID_VCLK_DIV2 123
+#define CLKID_VCLK_DIV4 124
+#define CLKID_VCLK_DIV6 125
+#define CLKID_VCLK_DIV12 126
+#define CLKID_VCLK2_DIV1 127
+#define CLKID_VCLK2_DIV2 128
+#define CLKID_VCLK2_DIV4 129
+#define CLKID_VCLK2_DIV6 130
+#define CLKID_VCLK2_DIV12 131
+#define CLKID_CTS_ENCL_SEL 132
+#define CLKID_CTS_ENCL 133
+#define CLKID_VDIN_MEAS_SEL 134
+#define CLKID_VDIN_MEAS_DIV 135
+#define CLKID_VDIN_MEAS 136
#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/axis,artpec6-clkctrl.h b/include/dt-bindings/clock/axis,artpec6-clkctrl.h
index b1f4971642e6..14e424a7c08c 100644
--- a/include/dt-bindings/clock/axis,artpec6-clkctrl.h
+++ b/include/dt-bindings/clock/axis,artpec6-clkctrl.h
@@ -2,7 +2,7 @@
/*
* ARTPEC-6 clock controller indexes
*
- * Copyright 2016 Axis Comunications AB.
+ * Copyright 2016 Axis Communications AB.
*/
#ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H
diff --git a/include/dt-bindings/clock/bcm21664.h b/include/dt-bindings/clock/bcm21664.h
index 5a7f0e4750a8..7c7492742f3d 100644
--- a/include/dt-bindings/clock/bcm21664.h
+++ b/include/dt-bindings/clock/bcm21664.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CLOCK_BCM21664_H
diff --git a/include/dt-bindings/clock/bcm281xx.h b/include/dt-bindings/clock/bcm281xx.h
index a763460cf1af..d74ca42112e7 100644
--- a/include/dt-bindings/clock/bcm281xx.h
+++ b/include/dt-bindings/clock/bcm281xx.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CLOCK_BCM281XX_H
diff --git a/include/dt-bindings/clock/bcm63268-clock.h b/include/dt-bindings/clock/bcm63268-clock.h
index da23e691d359..dea8adc8510e 100644
--- a/include/dt-bindings/clock/bcm63268-clock.h
+++ b/include/dt-bindings/clock/bcm63268-clock.h
@@ -27,4 +27,17 @@
#define BCM63268_CLK_TBUS 27
#define BCM63268_CLK_ROBOSW250 31
+#define BCM63268_TCLK_EPHY1 0
+#define BCM63268_TCLK_EPHY2 1
+#define BCM63268_TCLK_EPHY3 2
+#define BCM63268_TCLK_GPHY1 3
+#define BCM63268_TCLK_DSL 4
+#define BCM63268_TCLK_WAKEON_EPHY 6
+#define BCM63268_TCLK_WAKEON_DSL 7
+#define BCM63268_TCLK_FAP1 11
+#define BCM63268_TCLK_FAP2 15
+#define BCM63268_TCLK_UTO_50 16
+#define BCM63268_TCLK_UTO_EXTIN 17
+#define BCM63268_TCLK_USB_REF 18
+
#endif /* __DT_BINDINGS_CLOCK_BCM63268_H */
diff --git a/include/dt-bindings/clock/boston-clock.h b/include/dt-bindings/clock/boston-clock.h
index a6f009821137..38140fa87b09 100644
--- a/include/dt-bindings/clock/boston-clock.h
+++ b/include/dt-bindings/clock/boston-clock.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2016 Imagination Technologies
- *
- * SPDX-License-Identifier: GPL-2.0
*/
#ifndef __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__
diff --git a/include/dt-bindings/clock/cirrus,cs2000-cp.h b/include/dt-bindings/clock/cirrus,cs2000-cp.h
new file mode 100644
index 000000000000..fe3ac71750a8
--- /dev/null
+++ b/include/dt-bindings/clock/cirrus,cs2000-cp.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Daniel Mack
+ */
+
+#ifndef __DT_BINDINGS_CS2000CP_CLK_H
+#define __DT_BINDINGS_CS2000CP_CLK_H
+
+#define CS2000CP_AUX_OUTPUT_REF_CLK 0
+#define CS2000CP_AUX_OUTPUT_CLK_IN 1
+#define CS2000CP_AUX_OUTPUT_CLK_OUT 2
+#define CS2000CP_AUX_OUTPUT_PLL_LOCK 3
+
+#endif /* __DT_BINDINGS_CS2000CP_CLK_H */
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index 8cec5a1e1806..8a903c78c5a5 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -8,177 +8,6 @@
#define DRA7_CLKCTRL_OFFSET 0x20
#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET)
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* mpu clocks */
-#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* ipu clocks */
-#define _DRA7_IPU_CLKCTRL_OFFSET 0x40
-#define _DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - _DRA7_IPU_CLKCTRL_OFFSET)
-#define DRA7_MCASP1_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x50)
-#define DRA7_TIMER5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x58)
-#define DRA7_TIMER6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x60)
-#define DRA7_TIMER7_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x68)
-#define DRA7_TIMER8_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x70)
-#define DRA7_I2C5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x78)
-#define DRA7_UART6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x80)
-
-/* rtc clocks */
-#define DRA7_RTC_CLKCTRL_OFFSET 0x40
-#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET)
-#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44)
-
-/* vip clocks */
-#define DRA7_VIP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_VIP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_VIP3_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-
-/* vpe clocks */
-#define DRA7_VPE_CLKCTRL_OFFSET 0x60
-#define DRA7_VPE_CLKCTRL_INDEX(offset) ((offset) - DRA7_VPE_CLKCTRL_OFFSET)
-#define DRA7_VPE_CLKCTRL DRA7_VPE_CLKCTRL_INDEX(0x64)
-
-/* coreaon clocks */
-#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
-
-/* l3main1 clocks */
-#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
-#define DRA7_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
-#define DRA7_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
-
-/* dma clocks */
-#define DRA7_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* emif clocks */
-#define DRA7_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* atl clocks */
-#define DRA7_ATL_CLKCTRL_OFFSET 0x0
-#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET)
-#define DRA7_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0)
-
-/* l4cfg clocks */
-#define DRA7_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58)
-#define DRA7_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60)
-#define DRA7_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68)
-#define DRA7_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
-#define DRA7_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
-#define DRA7_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
-#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98)
-#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
-
-/* l3instr clocks */
-#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-
-/* dss clocks */
-#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-
-/* gpu clocks */
-#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* l3init clocks */
-#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
-#define DRA7_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_PCIE1_CLKCTRL DRA7_CLKCTRL_INDEX(0xb0)
-#define DRA7_PCIE2_CLKCTRL DRA7_CLKCTRL_INDEX(0xb8)
-#define DRA7_GMAC_CLKCTRL DRA7_CLKCTRL_INDEX(0xd0)
-#define DRA7_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0)
-#define DRA7_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8)
-#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0)
-
-/* l4per clocks */
-#define _DRA7_L4PER_CLKCTRL_OFFSET 0x0
-#define _DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - _DRA7_L4PER_CLKCTRL_OFFSET)
-#define DRA7_L4_PER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc)
-#define DRA7_L4_PER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x14)
-#define DRA7_TIMER10_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x28)
-#define DRA7_TIMER11_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x30)
-#define DRA7_TIMER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x48)
-#define DRA7_TIMER9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x50)
-#define DRA7_ELM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x58)
-#define DRA7_GPIO2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x60)
-#define DRA7_GPIO3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x68)
-#define DRA7_GPIO4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x70)
-#define DRA7_GPIO5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x78)
-#define DRA7_GPIO6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x80)
-#define DRA7_HDQ1W_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x88)
-#define DRA7_EPWMSS1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x90)
-#define DRA7_EPWMSS2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x98)
-#define DRA7_I2C1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa0)
-#define DRA7_I2C2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa8)
-#define DRA7_I2C3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb0)
-#define DRA7_I2C4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb8)
-#define DRA7_L4_PER1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc0)
-#define DRA7_EPWMSS0_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc4)
-#define DRA7_TIMER13_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc8)
-#define DRA7_TIMER14_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd0)
-#define DRA7_TIMER15_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd8)
-#define DRA7_MCSPI1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf0)
-#define DRA7_MCSPI2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf8)
-#define DRA7_MCSPI3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x100)
-#define DRA7_MCSPI4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x108)
-#define DRA7_GPIO7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x110)
-#define DRA7_GPIO8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x118)
-#define DRA7_MMC3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x120)
-#define DRA7_MMC4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x128)
-#define DRA7_TIMER16_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x130)
-#define DRA7_QSPI_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x138)
-#define DRA7_UART1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x140)
-#define DRA7_UART2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x148)
-#define DRA7_UART3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x150)
-#define DRA7_UART4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x158)
-#define DRA7_MCASP2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x160)
-#define DRA7_MCASP3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x168)
-#define DRA7_UART5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x170)
-#define DRA7_MCASP5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x178)
-#define DRA7_MCASP8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x190)
-#define DRA7_MCASP4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x198)
-#define DRA7_AES1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
-#define DRA7_AES2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
-#define DRA7_DES_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
-#define DRA7_RNG_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
-#define DRA7_SHAM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
-#define DRA7_UART7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
-#define DRA7_UART8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
-#define DRA7_UART9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
-#define DRA7_DCAN2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
-#define DRA7_MCASP6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x204)
-#define DRA7_MCASP7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x208)
-
-/* wkupaon clocks */
-#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
-
-/* XXX: Compatibility part end. */
-
/* mpu clocks */
#define DRA7_MPU_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
@@ -263,10 +92,17 @@
#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+/* iva clocks */
+#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+
/* dss clocks */
#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
/* l3init clocks */
#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
@@ -332,6 +168,7 @@
#define DRA7_L4SEC_DES_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1b0)
#define DRA7_L4SEC_RNG_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c0)
#define DRA7_L4SEC_SHAM_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c8)
+#define DRA7_L4SEC_SHAM2_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1f8)
/* l4per2 clocks */
#define DRA7_L4PER2_CLKCTRL_OFFSET 0xc
diff --git a/include/dt-bindings/clock/efm32-cmu.h b/include/dt-bindings/clock/efm32-cmu.h
deleted file mode 100644
index 4b48d15fe194..000000000000
--- a/include/dt-bindings/clock/efm32-cmu.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DT_BINDINGS_CLOCK_EFM32_CMU_H
-#define __DT_BINDINGS_CLOCK_EFM32_CMU_H
-
-#define clk_HFXO 0
-#define clk_HFRCO 1
-#define clk_LFXO 2
-#define clk_LFRCO 3
-#define clk_ULFRCO 4
-#define clk_AUXHFRCO 5
-#define clk_HFCLKNODIV 6
-#define clk_HFCLK 7
-#define clk_HFPERCLK 8
-#define clk_HFCORECLK 9
-#define clk_LFACLK 10
-#define clk_LFBCLK 11
-#define clk_WDOGCLK 12
-#define clk_HFCORECLKDMA 13
-#define clk_HFCORECLKAES 14
-#define clk_HFCORECLKUSBC 15
-#define clk_HFCORECLKUSB 16
-#define clk_HFCORECLKLE 17
-#define clk_HFCORECLKEBI 18
-#define clk_HFPERCLKUSART0 19
-#define clk_HFPERCLKUSART1 20
-#define clk_HFPERCLKUSART2 21
-#define clk_HFPERCLKUART0 22
-#define clk_HFPERCLKUART1 23
-#define clk_HFPERCLKTIMER0 24
-#define clk_HFPERCLKTIMER1 25
-#define clk_HFPERCLKTIMER2 26
-#define clk_HFPERCLKTIMER3 27
-#define clk_HFPERCLKACMP0 28
-#define clk_HFPERCLKACMP1 29
-#define clk_HFPERCLKI2C0 30
-#define clk_HFPERCLKI2C1 31
-#define clk_HFPERCLKGPIO 32
-#define clk_HFPERCLKVCMP 33
-#define clk_HFPERCLKPRS 34
-#define clk_HFPERCLKADC0 35
-#define clk_HFPERCLKDAC0 36
-
-#endif /* __DT_BINDINGS_CLOCK_EFM32_CMU_H */
diff --git a/include/dt-bindings/clock/en7523-clk.h b/include/dt-bindings/clock/en7523-clk.h
new file mode 100644
index 000000000000..717d23a5e5ae
--- /dev/null
+++ b/include/dt-bindings/clock/en7523-clk.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_
+#define _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_
+
+#define EN7523_CLK_GSW 0
+#define EN7523_CLK_EMI 1
+#define EN7523_CLK_BUS 2
+#define EN7523_CLK_SLIC 3
+#define EN7523_CLK_SPI 4
+#define EN7523_CLK_NPU 5
+#define EN7523_CLK_CRYPTO 6
+#define EN7523_CLK_PCIE 7
+
+#define EN7523_NUM_CLOCKS 8
+
+#endif /* _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index fe8214017b46..cc7268151843 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -257,12 +257,6 @@
#define CLK_SCLK_MMC2 249
/*
- * Total number of clocks of main CMU.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define CLK_NR_CLKS 250
-
-/*
* CMU DMC
*/
@@ -284,12 +278,6 @@
#define CLK_DIV_DMCD 20
/*
- * Total number of clocks of main CMU.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define NR_CLKS_DMC 21
-
-/*
* CMU ISP
*/
@@ -344,10 +332,4 @@
#define CLK_ASYNCAXIM 46
#define CLK_SCLK_MPWM_ISP 47
-/*
- * Total number of clocks of CMU_ISP.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define NR_CLKS_ISP 48
-
#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 88ec3968b90a..4ebff79ed9e2 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -209,6 +209,7 @@
#define CLK_ACLK400_MCUISP 395 /* Exynos4x12 only */
#define CLK_MOUT_HDMI 396
#define CLK_MOUT_MIXER 397
+#define CLK_MOUT_VPLLSRC 398
/* gate clocks - ppmu */
#define CLK_PPMULEFT 400
@@ -236,9 +237,7 @@
#define CLK_DIV_C2C 458 /* Exynos4x12 only */
#define CLK_DIV_GDL 459
#define CLK_DIV_GDR 460
-
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 461
+#define CLK_DIV_CORE2 461
/* Exynos4x12 ISP clocks */
#define CLK_ISP_FIMC_ISP 1
@@ -273,6 +272,4 @@
#define CLK_ISP_DIV_MCUISP0 29
#define CLK_ISP_DIV_MCUISP1 30
-#define CLK_NR_ISP_CLKS 31
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index bc8a3c53a54b..2337c028bbe1 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -19,6 +19,7 @@
#define CLK_FOUT_EPLL 7
#define CLK_FOUT_VPLL 8
#define CLK_ARM_CLK 9
+#define CLK_DIV_ARM2 10
/* gate for special clocks (sclk) */
#define CLK_SCLK_CAM_BAYER 128
@@ -172,8 +173,8 @@
#define CLK_MOUT_GPLL 1025
#define CLK_MOUT_ACLK200_DISP1_SUB 1026
#define CLK_MOUT_ACLK300_DISP1_SUB 1027
-
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 1028
+#define CLK_MOUT_APLL 1028
+#define CLK_MOUT_MPLL 1029
+#define CLK_MOUT_VPLLSRC 1030
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */
diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h
index 98a58cbd81b2..dfde40ea40f0 100644
--- a/include/dt-bindings/clock/exynos5260-clk.h
+++ b/include/dt-bindings/clock/exynos5260-clk.h
@@ -137,8 +137,6 @@
#define PHYCLK_USBHOST20_PHY_CLK48MOHCI 122
#define PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 123
#define PHYCLK_USBDRD30_UDRD30_PHYCLOCK 124
-#define TOP_NR_CLK 125
-
/* List Of Clocks For CMU_EGL */
@@ -153,8 +151,6 @@
#define EGL_DOUT_ACLK_EGL 9
#define EGL_DOUT_EGL2 10
#define EGL_DOUT_EGL1 11
-#define EGL_NR_CLK 12
-
/* List Of Clocks For CMU_KFC */
@@ -168,8 +164,6 @@
#define KFC_DOUT_KFC_ATCLK 8
#define KFC_DOUT_KFC2 9
#define KFC_DOUT_KFC1 10
-#define KFC_NR_CLK 11
-
/* List Of Clocks For CMU_MIF */
@@ -200,8 +194,6 @@
#define MIF_CLK_INTMEM 25
#define MIF_SCLK_LPDDR3PHY_WRAP_U1 26
#define MIF_SCLK_LPDDR3PHY_WRAP_U0 27
-#define MIF_NR_CLK 28
-
/* List Of Clocks For CMU_G3D */
@@ -211,8 +203,6 @@
#define G3D_DOUT_ACLK_G3D 4
#define G3D_CLK_G3D_HPM 5
#define G3D_CLK_G3D 6
-#define G3D_NR_CLK 7
-
/* List Of Clocks For CMU_AUD */
@@ -231,8 +221,6 @@
#define AUD_SCLK_AUD_UART 13
#define AUD_SCLK_PCM 14
#define AUD_SCLK_I2S 15
-#define AUD_NR_CLK 16
-
/* List Of Clocks For CMU_MFC */
@@ -241,8 +229,6 @@
#define MFC_CLK_MFC 3
#define MFC_CLK_SMMU2_MFCM1 4
#define MFC_CLK_SMMU2_MFCM0 5
-#define MFC_NR_CLK 6
-
/* List Of Clocks For CMU_GSCL */
@@ -272,8 +258,6 @@
#define GSCL_CLK_SMMU3_MSCL1 24
#define GSCL_SCLK_CSIS1_WRAP 25
#define GSCL_SCLK_CSIS0_WRAP 26
-#define GSCL_NR_CLK 27
-
/* List Of Clocks For CMU_FSYS */
@@ -295,8 +279,6 @@
#define FSYS_CLK_SMMU_RTIC 16
#define FSYS_PHYCLK_USBDRD30 17
#define FSYS_PHYCLK_USBHOST20 18
-#define FSYS_NR_CLK 19
-
/* List Of Clocks For CMU_PERI */
@@ -366,8 +348,6 @@
#define PERI_SCLK_SPDIF 64
#define PERI_SCLK_I2S 65
#define PERI_SCLK_PCM1 66
-#define PERI_NR_CLK 67
-
/* List Of Clocks For CMU_DISP */
@@ -406,8 +386,6 @@
#define DISP_CLK_DP 33
#define DISP_SCLK_PIXEL 34
#define DISP_MOUT_HDMI_PHY_PIXEL_USER 35
-#define DISP_NR_CLK 36
-
/* List Of Clocks For CMU_G2D */
@@ -423,8 +401,6 @@
#define G2D_CLK_SMMU_SSS 10
#define G2D_CLK_SMMU_MDMA 11
#define G2D_CLK_SMMU3_G2D 12
-#define G2D_NR_CLK 13
-
/* List Of Clocks For CMU_ISP */
@@ -461,6 +437,5 @@
#define ISP_SCLK_SPI0_EXT 31
#define ISP_SCLK_SPI1_EXT 32
#define ISP_SCLK_UART_EXT 33
-#define ISP_NR_CLK 34
#endif
diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h
index 86c2ad56c5ef..7a1a93f8df6c 100644
--- a/include/dt-bindings/clock/exynos5410.h
+++ b/include/dt-bindings/clock/exynos5410.h
@@ -61,6 +61,4 @@
#define CLK_USBD301 367
#define CLK_SSS 471
-#define CLK_NR_CLKS 512
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5410_H */
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 02d5ac469a3d..73e82527a9e9 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -230,6 +230,12 @@
#define CLK_MOUT_USER_MAU_EPLL 659
#define CLK_MOUT_SCLK_SPLL 660
#define CLK_MOUT_MX_MSPLL_CCORE_PHY 661
+#define CLK_MOUT_SW_ACLK_G3D 662
+#define CLK_MOUT_APLL 663
+#define CLK_MOUT_MSPLL_CPU 664
+#define CLK_MOUT_KPLL 665
+#define CLK_MOUT_MSPLL_KFC 666
+
/* divider clocks */
#define CLK_DOUT_PIXEL 768
@@ -265,7 +271,4 @@
#define CLK_DOUT_PCLK_DREX0 798
#define CLK_DOUT_PCLK_DREX1 799
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 800
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 25ffa53573a5..d12c1a963fa1 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -188,8 +188,6 @@
#define CLK_SCLK_ISP_SPI0_CAM1 252
#define CLK_SCLK_HDMI_SPDIF_DISP 253
-#define TOP_NR_CLK 254
-
/* CMU_CPIF */
#define CLK_FOUT_MPHY_PLL 1
@@ -200,8 +198,6 @@
#define CLK_SCLK_MPHY_PLL 11
#define CLK_SCLK_UFS_MPHY 11
-#define CPIF_NR_CLK 12
-
/* CMU_MIF */
#define CLK_FOUT_MEM0_PLL 1
#define CLK_FOUT_MEM1_PLL 2
@@ -396,8 +392,6 @@
#define CLK_SCLK_BUS_PLL_APOLLO 199
#define CLK_SCLK_BUS_PLL_ATLAS 200
-#define MIF_NR_CLK 201
-
/* CMU_PERIC */
#define CLK_PCLK_SPI2 1
#define CLK_PCLK_SPI1 2
@@ -468,8 +462,6 @@
#define CLK_DIV_SCLK_SCI 70
#define CLK_DIV_SCLK_SC_IN 71
-#define PERIC_NR_CLK 72
-
/* CMU_PERIS */
#define CLK_PCLK_HPM_APBIF 1
#define CLK_PCLK_TMU1_APBIF 2
@@ -513,8 +505,6 @@
#define CLK_SCLK_ANTIRBK_CNT 40
#define CLK_SCLK_OTP_CON 41
-#define PERIS_NR_CLK 42
-
/* CMU_FSYS */
#define CLK_MOUT_ACLK_FSYS_200_USER 1
#define CLK_MOUT_SCLK_MMC2_USER 2
@@ -621,8 +611,6 @@
#define CLK_SCLK_USBDRD30 114
#define CLK_PCIE 115
-#define FSYS_NR_CLK 116
-
/* CMU_G2D */
#define CLK_MUX_ACLK_G2D_266_USER 1
#define CLK_MUX_ACLK_G2D_400_USER 2
@@ -653,8 +641,6 @@
#define CLK_PCLK_G2D 25
#define CLK_PCLK_SMMU_G2D 26
-#define G2D_NR_CLK 27
-
/* CMU_DISP */
#define CLK_FOUT_DISP_PLL 1
@@ -771,8 +757,6 @@
#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114
#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115
-#define DISP_NR_CLK 116
-
/* CMU_AUD */
#define CLK_MOUT_AUD_PLL_USER 1
#define CLK_MOUT_SCLK_AUD_PCM 2
@@ -824,8 +808,6 @@
#define CLK_SCLK_I2S_BCLK 46
#define CLK_SCLK_AUD_I2S 47
-#define AUD_NR_CLK 48
-
/* CMU_BUS{0|1|2} */
#define CLK_DIV_PCLK_BUS_133 1
@@ -840,8 +822,6 @@
#define CLK_ACLK_BUS2BEND_400 9 /* Only CMU_BUS2 */
#define CLK_ACLK_BUS2RTND_400 10 /* Only CMU_BUS2 */
-#define BUSx_NR_CLK 11
-
/* CMU_G3D */
#define CLK_FOUT_G3D_PLL 1
@@ -865,8 +845,6 @@
#define CLK_PCLK_SYSREG_G3D 18
#define CLK_SCLK_HPM_G3D 19
-#define G3D_NR_CLK 20
-
/* CMU_GSCL */
#define CLK_MOUT_ACLK_GSCL_111_USER 1
#define CLK_MOUT_ACLK_GSCL_333_USER 2
@@ -898,8 +876,6 @@
#define CLK_PCLK_SMMU_GSCL1 27
#define CLK_PCLK_SMMU_GSCL2 28
-#define GSCL_NR_CLK 29
-
/* CMU_APOLLO */
#define CLK_FOUT_APOLLO_PLL 1
@@ -935,8 +911,6 @@
#define CLK_SCLK_HPM_APOLLO 29
#define CLK_SCLK_APOLLO 30
-#define APOLLO_NR_CLK 31
-
/* CMU_ATLAS */
#define CLK_FOUT_ATLAS_PLL 1
@@ -981,8 +955,6 @@
#define CLK_ATCLK 38
#define CLK_SCLK_ATLAS 39
-#define ATLAS_NR_CLK 40
-
/* CMU_MSCL */
#define CLK_MOUT_SCLK_JPEG_USER 1
#define CLK_MOUT_ACLK_MSCL_400_USER 2
@@ -1016,8 +988,6 @@
#define CLK_PCLK_SMMU_JPEG 28
#define CLK_SCLK_JPEG 29
-#define MSCL_NR_CLK 30
-
/* CMU_MFC */
#define CLK_MOUT_ACLK_MFC_400_USER 1
@@ -1040,8 +1010,6 @@
#define CLK_PCLK_SMMU_MFC_1 17
#define CLK_PCLK_SMMU_MFC_0 18
-#define MFC_NR_CLK 19
-
/* CMU_HEVC */
#define CLK_MOUT_ACLK_HEVC_400_USER 1
@@ -1064,8 +1032,6 @@
#define CLK_PCLK_SMMU_HEVC_1 17
#define CLK_PCLK_SMMU_HEVC_0 18
-#define HEVC_NR_CLK 19
-
/* CMU_ISP */
#define CLK_MOUT_ACLK_ISP_DIS_400_USER 1
#define CLK_MOUT_ACLK_ISP_400_USER 2
@@ -1147,8 +1113,6 @@
#define CLK_SCLK_PIXELASYNCS_ISPC 76
#define CLK_SCLK_PIXELASYNCM_ISPC 77
-#define ISP_NR_CLK 78
-
/* CMU_CAM0 */
#define CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY 1
#define CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY 2
@@ -1285,8 +1249,6 @@
#define CLK_SCLK_PIXELASYNCM_LITE_C_INIT 132
#define CLK_SCLK_PIXELASYNCS_LITE_C_INIT 133
-#define CAM0_NR_CLK 134
-
/* CMU_CAM1 */
#define CLK_PHYCLK_RXBYTEECLKHS0_S2B 1
@@ -1404,12 +1366,8 @@
#define CLK_ATCLK_ISP 111
#define CLK_SCLK_ISP_CA5 112
-#define CAM1_NR_CLK 113
-
/* CMU_IMEM */
#define CLK_ACLK_SLIMSSS 2
#define CLK_PCLK_SLIMSSS 35
-#define IMEM_NR_CLK 36
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
new file mode 100644
index 000000000000..255e3aa94323
--- /dev/null
+++ b/include/dt-bindings/clock/exynos7885.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Dávid Virág
+ *
+ * Device Tree binding constants for Exynos7885 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_7885_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_7885_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_DOUT_SHARED0_DIV2 3
+#define CLK_DOUT_SHARED0_DIV3 4
+#define CLK_DOUT_SHARED0_DIV4 5
+#define CLK_DOUT_SHARED0_DIV5 6
+#define CLK_DOUT_SHARED1_DIV2 7
+#define CLK_DOUT_SHARED1_DIV3 8
+#define CLK_DOUT_SHARED1_DIV4 9
+#define CLK_MOUT_CORE_BUS 10
+#define CLK_MOUT_CORE_CCI 11
+#define CLK_MOUT_CORE_G3D 12
+#define CLK_DOUT_CORE_BUS 13
+#define CLK_DOUT_CORE_CCI 14
+#define CLK_DOUT_CORE_G3D 15
+#define CLK_GOUT_CORE_BUS 16
+#define CLK_GOUT_CORE_CCI 17
+#define CLK_GOUT_CORE_G3D 18
+#define CLK_MOUT_PERI_BUS 19
+#define CLK_MOUT_PERI_SPI0 20
+#define CLK_MOUT_PERI_SPI1 21
+#define CLK_MOUT_PERI_UART0 22
+#define CLK_MOUT_PERI_UART1 23
+#define CLK_MOUT_PERI_UART2 24
+#define CLK_MOUT_PERI_USI0 25
+#define CLK_MOUT_PERI_USI1 26
+#define CLK_MOUT_PERI_USI2 27
+#define CLK_DOUT_PERI_BUS 28
+#define CLK_DOUT_PERI_SPI0 29
+#define CLK_DOUT_PERI_SPI1 30
+#define CLK_DOUT_PERI_UART0 31
+#define CLK_DOUT_PERI_UART1 32
+#define CLK_DOUT_PERI_UART2 33
+#define CLK_DOUT_PERI_USI0 34
+#define CLK_DOUT_PERI_USI1 35
+#define CLK_DOUT_PERI_USI2 36
+#define CLK_GOUT_PERI_BUS 37
+#define CLK_GOUT_PERI_SPI0 38
+#define CLK_GOUT_PERI_SPI1 39
+#define CLK_GOUT_PERI_UART0 40
+#define CLK_GOUT_PERI_UART1 41
+#define CLK_GOUT_PERI_UART2 42
+#define CLK_GOUT_PERI_USI0 43
+#define CLK_GOUT_PERI_USI1 44
+#define CLK_GOUT_PERI_USI2 45
+#define CLK_MOUT_FSYS_BUS 46
+#define CLK_MOUT_FSYS_MMC_CARD 47
+#define CLK_MOUT_FSYS_MMC_EMBD 48
+#define CLK_MOUT_FSYS_MMC_SDIO 49
+#define CLK_MOUT_FSYS_USB30DRD 50
+#define CLK_DOUT_FSYS_BUS 51
+#define CLK_DOUT_FSYS_MMC_CARD 52
+#define CLK_DOUT_FSYS_MMC_EMBD 53
+#define CLK_DOUT_FSYS_MMC_SDIO 54
+#define CLK_DOUT_FSYS_USB30DRD 55
+#define CLK_GOUT_FSYS_BUS 56
+#define CLK_GOUT_FSYS_MMC_CARD 57
+#define CLK_GOUT_FSYS_MMC_EMBD 58
+#define CLK_GOUT_FSYS_MMC_SDIO 59
+#define CLK_GOUT_FSYS_USB30DRD 60
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_MOUT_CORE_CCI_USER 2
+#define CLK_MOUT_CORE_G3D_USER 3
+#define CLK_MOUT_CORE_GIC 4
+#define CLK_DOUT_CORE_BUSP 5
+#define CLK_GOUT_CCI_ACLK 6
+#define CLK_GOUT_GIC400_CLK 7
+#define CLK_GOUT_TREX_D_CORE_ACLK 8
+#define CLK_GOUT_TREX_D_CORE_GCLK 9
+#define CLK_GOUT_TREX_D_CORE_PCLK 10
+#define CLK_GOUT_TREX_P_CORE_ACLK_P_CORE 11
+#define CLK_GOUT_TREX_P_CORE_CCLK_P_CORE 12
+#define CLK_GOUT_TREX_P_CORE_PCLK 13
+#define CLK_GOUT_TREX_P_CORE_PCLK_P_CORE 14
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_BUS_USER 1
+#define CLK_MOUT_PERI_SPI0_USER 2
+#define CLK_MOUT_PERI_SPI1_USER 3
+#define CLK_MOUT_PERI_UART0_USER 4
+#define CLK_MOUT_PERI_UART1_USER 5
+#define CLK_MOUT_PERI_UART2_USER 6
+#define CLK_MOUT_PERI_USI0_USER 7
+#define CLK_MOUT_PERI_USI1_USER 8
+#define CLK_MOUT_PERI_USI2_USER 9
+#define CLK_GOUT_GPIO_TOP_PCLK 10
+#define CLK_GOUT_HSI2C0_PCLK 11
+#define CLK_GOUT_HSI2C1_PCLK 12
+#define CLK_GOUT_HSI2C2_PCLK 13
+#define CLK_GOUT_HSI2C3_PCLK 14
+#define CLK_GOUT_I2C0_PCLK 15
+#define CLK_GOUT_I2C1_PCLK 16
+#define CLK_GOUT_I2C2_PCLK 17
+#define CLK_GOUT_I2C3_PCLK 18
+#define CLK_GOUT_I2C4_PCLK 19
+#define CLK_GOUT_I2C5_PCLK 20
+#define CLK_GOUT_I2C6_PCLK 21
+#define CLK_GOUT_I2C7_PCLK 22
+#define CLK_GOUT_PWM_MOTOR_PCLK 23
+#define CLK_GOUT_SPI0_PCLK 24
+#define CLK_GOUT_SPI0_EXT_CLK 25
+#define CLK_GOUT_SPI1_PCLK 26
+#define CLK_GOUT_SPI1_EXT_CLK 27
+#define CLK_GOUT_UART0_EXT_UCLK 28
+#define CLK_GOUT_UART0_PCLK 29
+#define CLK_GOUT_UART1_EXT_UCLK 30
+#define CLK_GOUT_UART1_PCLK 31
+#define CLK_GOUT_UART2_EXT_UCLK 32
+#define CLK_GOUT_UART2_PCLK 33
+#define CLK_GOUT_USI0_PCLK 34
+#define CLK_GOUT_USI0_SCLK 35
+#define CLK_GOUT_USI1_PCLK 36
+#define CLK_GOUT_USI1_SCLK 37
+#define CLK_GOUT_USI2_PCLK 38
+#define CLK_GOUT_USI2_SCLK 39
+#define CLK_GOUT_MCT_PCLK 40
+#define CLK_GOUT_SYSREG_PERI_PCLK 41
+#define CLK_GOUT_WDT0_PCLK 42
+#define CLK_GOUT_WDT1_PCLK 43
+
+/* CMU_FSYS */
+#define CLK_MOUT_FSYS_BUS_USER 1
+#define CLK_MOUT_FSYS_MMC_CARD_USER 2
+#define CLK_MOUT_FSYS_MMC_EMBD_USER 3
+#define CLK_MOUT_FSYS_MMC_SDIO_USER 4
+#define CLK_MOUT_FSYS_USB30DRD_USER 4
+#define CLK_GOUT_MMC_CARD_ACLK 5
+#define CLK_GOUT_MMC_CARD_SDCLKIN 6
+#define CLK_GOUT_MMC_EMBD_ACLK 7
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
+#define CLK_GOUT_MMC_SDIO_ACLK 9
+#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
new file mode 100644
index 000000000000..7666241520f8
--- /dev/null
+++ b/include/dt-bindings/clock/exynos850.h
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Device Tree binding constants for Exynos850 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_850_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_850_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_MMC_PLL 3
+#define CLK_MOUT_SHARED0_PLL 4
+#define CLK_MOUT_SHARED1_PLL 5
+#define CLK_MOUT_MMC_PLL 6
+#define CLK_MOUT_CORE_BUS 7
+#define CLK_MOUT_CORE_CCI 8
+#define CLK_MOUT_CORE_MMC_EMBD 9
+#define CLK_MOUT_CORE_SSS 10
+#define CLK_MOUT_DPU 11
+#define CLK_MOUT_HSI_BUS 12
+#define CLK_MOUT_HSI_MMC_CARD 13
+#define CLK_MOUT_HSI_USB20DRD 14
+#define CLK_MOUT_PERI_BUS 15
+#define CLK_MOUT_PERI_UART 16
+#define CLK_MOUT_PERI_IP 17
+#define CLK_DOUT_SHARED0_DIV3 18
+#define CLK_DOUT_SHARED0_DIV2 19
+#define CLK_DOUT_SHARED1_DIV3 20
+#define CLK_DOUT_SHARED1_DIV2 21
+#define CLK_DOUT_SHARED0_DIV4 22
+#define CLK_DOUT_SHARED1_DIV4 23
+#define CLK_DOUT_CORE_BUS 24
+#define CLK_DOUT_CORE_CCI 25
+#define CLK_DOUT_CORE_MMC_EMBD 26
+#define CLK_DOUT_CORE_SSS 27
+#define CLK_DOUT_DPU 28
+#define CLK_DOUT_HSI_BUS 29
+#define CLK_DOUT_HSI_MMC_CARD 30
+#define CLK_DOUT_HSI_USB20DRD 31
+#define CLK_DOUT_PERI_BUS 32
+#define CLK_DOUT_PERI_UART 33
+#define CLK_DOUT_PERI_IP 34
+#define CLK_GOUT_CORE_BUS 35
+#define CLK_GOUT_CORE_CCI 36
+#define CLK_GOUT_CORE_MMC_EMBD 37
+#define CLK_GOUT_CORE_SSS 38
+#define CLK_GOUT_DPU 39
+#define CLK_GOUT_HSI_BUS 40
+#define CLK_GOUT_HSI_MMC_CARD 41
+#define CLK_GOUT_HSI_USB20DRD 42
+#define CLK_GOUT_PERI_BUS 43
+#define CLK_GOUT_PERI_UART 44
+#define CLK_GOUT_PERI_IP 45
+#define CLK_MOUT_CLKCMU_APM_BUS 46
+#define CLK_DOUT_CLKCMU_APM_BUS 47
+#define CLK_GOUT_CLKCMU_APM_BUS 48
+#define CLK_MOUT_AUD 49
+#define CLK_GOUT_AUD 50
+#define CLK_DOUT_AUD 51
+#define CLK_MOUT_IS_BUS 52
+#define CLK_MOUT_IS_ITP 53
+#define CLK_MOUT_IS_VRA 54
+#define CLK_MOUT_IS_GDC 55
+#define CLK_GOUT_IS_BUS 56
+#define CLK_GOUT_IS_ITP 57
+#define CLK_GOUT_IS_VRA 58
+#define CLK_GOUT_IS_GDC 59
+#define CLK_DOUT_IS_BUS 60
+#define CLK_DOUT_IS_ITP 61
+#define CLK_DOUT_IS_VRA 62
+#define CLK_DOUT_IS_GDC 63
+#define CLK_MOUT_MFCMSCL_MFC 64
+#define CLK_MOUT_MFCMSCL_M2M 65
+#define CLK_MOUT_MFCMSCL_MCSC 66
+#define CLK_MOUT_MFCMSCL_JPEG 67
+#define CLK_GOUT_MFCMSCL_MFC 68
+#define CLK_GOUT_MFCMSCL_M2M 69
+#define CLK_GOUT_MFCMSCL_MCSC 70
+#define CLK_GOUT_MFCMSCL_JPEG 71
+#define CLK_DOUT_MFCMSCL_MFC 72
+#define CLK_DOUT_MFCMSCL_M2M 73
+#define CLK_DOUT_MFCMSCL_MCSC 74
+#define CLK_DOUT_MFCMSCL_JPEG 75
+#define CLK_MOUT_G3D_SWITCH 76
+#define CLK_GOUT_G3D_SWITCH 77
+#define CLK_DOUT_G3D_SWITCH 78
+#define CLK_MOUT_CPUCL0_DBG 79
+#define CLK_MOUT_CPUCL0_SWITCH 80
+#define CLK_GOUT_CPUCL0_DBG 81
+#define CLK_GOUT_CPUCL0_SWITCH 82
+#define CLK_DOUT_CPUCL0_DBG 83
+#define CLK_DOUT_CPUCL0_SWITCH 84
+#define CLK_MOUT_CPUCL1_DBG 85
+#define CLK_MOUT_CPUCL1_SWITCH 86
+#define CLK_GOUT_CPUCL1_DBG 87
+#define CLK_GOUT_CPUCL1_SWITCH 88
+#define CLK_DOUT_CPUCL1_DBG 89
+#define CLK_DOUT_CPUCL1_SWITCH 90
+
+/* CMU_APM */
+#define CLK_RCO_I3C_PMIC 1
+#define OSCCLK_RCO_APM 2
+#define CLK_RCO_APM__ALV 3
+#define CLK_DLL_DCO 4
+#define CLK_MOUT_APM_BUS_USER 5
+#define CLK_MOUT_RCO_APM_I3C_USER 6
+#define CLK_MOUT_RCO_APM_USER 7
+#define CLK_MOUT_DLL_USER 8
+#define CLK_MOUT_CLKCMU_CHUB_BUS 9
+#define CLK_MOUT_APM_BUS 10
+#define CLK_MOUT_APM_I3C 11
+#define CLK_DOUT_CLKCMU_CHUB_BUS 12
+#define CLK_DOUT_APM_BUS 13
+#define CLK_DOUT_APM_I3C 14
+#define CLK_GOUT_CLKCMU_CMGP_BUS 15
+#define CLK_GOUT_CLKCMU_CHUB_BUS 16
+#define CLK_GOUT_RTC_PCLK 17
+#define CLK_GOUT_TOP_RTC_PCLK 18
+#define CLK_GOUT_I3C_PCLK 19
+#define CLK_GOUT_I3C_SCLK 20
+#define CLK_GOUT_SPEEDY_PCLK 21
+#define CLK_GOUT_GPIO_ALIVE_PCLK 22
+#define CLK_GOUT_PMU_ALIVE_PCLK 23
+#define CLK_GOUT_SYSREG_APM_PCLK 24
+
+/* CMU_AUD */
+#define CLK_DOUT_AUD_AUDIF 1
+#define CLK_DOUT_AUD_BUSD 2
+#define CLK_DOUT_AUD_BUSP 3
+#define CLK_DOUT_AUD_CNT 4
+#define CLK_DOUT_AUD_CPU 5
+#define CLK_DOUT_AUD_CPU_ACLK 6
+#define CLK_DOUT_AUD_CPU_PCLKDBG 7
+#define CLK_DOUT_AUD_FM 8
+#define CLK_DOUT_AUD_FM_SPDY 9
+#define CLK_DOUT_AUD_MCLK 10
+#define CLK_DOUT_AUD_UAIF0 11
+#define CLK_DOUT_AUD_UAIF1 12
+#define CLK_DOUT_AUD_UAIF2 13
+#define CLK_DOUT_AUD_UAIF3 14
+#define CLK_DOUT_AUD_UAIF4 15
+#define CLK_DOUT_AUD_UAIF5 16
+#define CLK_DOUT_AUD_UAIF6 17
+#define CLK_FOUT_AUD_PLL 18
+#define CLK_GOUT_AUD_ABOX_ACLK 19
+#define CLK_GOUT_AUD_ASB_CCLK 20
+#define CLK_GOUT_AUD_CA32_CCLK 21
+#define CLK_GOUT_AUD_CNT_BCLK 22
+#define CLK_GOUT_AUD_CODEC_MCLK 23
+#define CLK_GOUT_AUD_DAP_CCLK 24
+#define CLK_GOUT_AUD_GPIO_PCLK 25
+#define CLK_GOUT_AUD_PPMU_ACLK 26
+#define CLK_GOUT_AUD_PPMU_PCLK 27
+#define CLK_GOUT_AUD_SPDY_BCLK 28
+#define CLK_GOUT_AUD_SYSMMU_CLK 29
+#define CLK_GOUT_AUD_SYSREG_PCLK 30
+#define CLK_GOUT_AUD_TZPC_PCLK 31
+#define CLK_GOUT_AUD_UAIF0_BCLK 32
+#define CLK_GOUT_AUD_UAIF1_BCLK 33
+#define CLK_GOUT_AUD_UAIF2_BCLK 34
+#define CLK_GOUT_AUD_UAIF3_BCLK 35
+#define CLK_GOUT_AUD_UAIF4_BCLK 36
+#define CLK_GOUT_AUD_UAIF5_BCLK 37
+#define CLK_GOUT_AUD_UAIF6_BCLK 38
+#define CLK_GOUT_AUD_WDT_PCLK 39
+#define CLK_MOUT_AUD_CPU 40
+#define CLK_MOUT_AUD_CPU_HCH 41
+#define CLK_MOUT_AUD_CPU_USER 42
+#define CLK_MOUT_AUD_FM 43
+#define CLK_MOUT_AUD_PLL 44
+#define CLK_MOUT_AUD_TICK_USB_USER 45
+#define CLK_MOUT_AUD_UAIF0 46
+#define CLK_MOUT_AUD_UAIF1 47
+#define CLK_MOUT_AUD_UAIF2 48
+#define CLK_MOUT_AUD_UAIF3 49
+#define CLK_MOUT_AUD_UAIF4 50
+#define CLK_MOUT_AUD_UAIF5 51
+#define CLK_MOUT_AUD_UAIF6 52
+#define IOCLK_AUDIOCDCLK0 53
+#define IOCLK_AUDIOCDCLK1 54
+#define IOCLK_AUDIOCDCLK2 55
+#define IOCLK_AUDIOCDCLK3 56
+#define IOCLK_AUDIOCDCLK4 57
+#define IOCLK_AUDIOCDCLK5 58
+#define IOCLK_AUDIOCDCLK6 59
+#define TICK_USB 60
+#define CLK_GOUT_AUD_CMU_AUD_PCLK 61
+
+/* CMU_CMGP */
+#define CLK_RCO_CMGP 1
+#define CLK_MOUT_CMGP_ADC 2
+#define CLK_MOUT_CMGP_USI0 3
+#define CLK_MOUT_CMGP_USI1 4
+#define CLK_DOUT_CMGP_ADC 5
+#define CLK_DOUT_CMGP_USI0 6
+#define CLK_DOUT_CMGP_USI1 7
+#define CLK_GOUT_CMGP_ADC_S0_PCLK 8
+#define CLK_GOUT_CMGP_ADC_S1_PCLK 9
+#define CLK_GOUT_CMGP_GPIO_PCLK 10
+#define CLK_GOUT_CMGP_USI0_IPCLK 11
+#define CLK_GOUT_CMGP_USI0_PCLK 12
+#define CLK_GOUT_CMGP_USI1_IPCLK 13
+#define CLK_GOUT_CMGP_USI1_PCLK 14
+#define CLK_GOUT_SYSREG_CMGP_PCLK 15
+
+/* CMU_CPUCL0 */
+#define CLK_FOUT_CPUCL0_PLL 1
+#define CLK_MOUT_PLL_CPUCL0 2
+#define CLK_MOUT_CPUCL0_SWITCH_USER 3
+#define CLK_MOUT_CPUCL0_DBG_USER 4
+#define CLK_MOUT_CPUCL0_PLL 5
+#define CLK_DOUT_CPUCL0_CPU 6
+#define CLK_DOUT_CPUCL0_CMUREF 7
+#define CLK_DOUT_CPUCL0_PCLK 8
+#define CLK_DOUT_CLUSTER0_ACLK 9
+#define CLK_DOUT_CLUSTER0_ATCLK 10
+#define CLK_DOUT_CLUSTER0_PCLKDBG 11
+#define CLK_DOUT_CLUSTER0_PERIPHCLK 12
+#define CLK_GOUT_CLUSTER0_ATCLK 13
+#define CLK_GOUT_CLUSTER0_PCLK 14
+#define CLK_GOUT_CLUSTER0_PERIPHCLK 15
+#define CLK_GOUT_CLUSTER0_SCLK 16
+#define CLK_GOUT_CPUCL0_CMU_CPUCL0_PCLK 17
+#define CLK_GOUT_CLUSTER0_CPU 18
+#define CLK_CLUSTER0_SCLK 19
+
+/* CMU_CPUCL1 */
+#define CLK_FOUT_CPUCL1_PLL 1
+#define CLK_MOUT_PLL_CPUCL1 2
+#define CLK_MOUT_CPUCL1_SWITCH_USER 3
+#define CLK_MOUT_CPUCL1_DBG_USER 4
+#define CLK_MOUT_CPUCL1_PLL 5
+#define CLK_DOUT_CPUCL1_CPU 6
+#define CLK_DOUT_CPUCL1_CMUREF 7
+#define CLK_DOUT_CPUCL1_PCLK 8
+#define CLK_DOUT_CLUSTER1_ACLK 9
+#define CLK_DOUT_CLUSTER1_ATCLK 10
+#define CLK_DOUT_CLUSTER1_PCLKDBG 11
+#define CLK_DOUT_CLUSTER1_PERIPHCLK 12
+#define CLK_GOUT_CLUSTER1_ATCLK 13
+#define CLK_GOUT_CLUSTER1_PCLK 14
+#define CLK_GOUT_CLUSTER1_PERIPHCLK 15
+#define CLK_GOUT_CLUSTER1_SCLK 16
+#define CLK_GOUT_CPUCL1_CMU_CPUCL1_PCLK 17
+#define CLK_GOUT_CLUSTER1_CPU 18
+#define CLK_CLUSTER1_SCLK 19
+
+/* CMU_G3D */
+#define CLK_FOUT_G3D_PLL 1
+#define CLK_MOUT_G3D_PLL 2
+#define CLK_MOUT_G3D_SWITCH_USER 3
+#define CLK_MOUT_G3D_BUSD 4
+#define CLK_DOUT_G3D_BUSP 5
+#define CLK_GOUT_G3D_CMU_G3D_PCLK 6
+#define CLK_GOUT_G3D_GPU_CLK 7
+#define CLK_GOUT_G3D_TZPC_PCLK 8
+#define CLK_GOUT_G3D_GRAY2BIN_CLK 9
+#define CLK_GOUT_G3D_BUSD_CLK 10
+#define CLK_GOUT_G3D_BUSP_CLK 11
+#define CLK_GOUT_G3D_SYSREG_PCLK 12
+
+/* CMU_HSI */
+#define CLK_MOUT_HSI_BUS_USER 1
+#define CLK_MOUT_HSI_MMC_CARD_USER 2
+#define CLK_MOUT_HSI_USB20DRD_USER 3
+#define CLK_MOUT_HSI_RTC 4
+#define CLK_GOUT_USB_RTC_CLK 5
+#define CLK_GOUT_USB_REF_CLK 6
+#define CLK_GOUT_USB_PHY_REF_CLK 7
+#define CLK_GOUT_USB_PHY_ACLK 8
+#define CLK_GOUT_USB_BUS_EARLY_CLK 9
+#define CLK_GOUT_GPIO_HSI_PCLK 10
+#define CLK_GOUT_MMC_CARD_ACLK 11
+#define CLK_GOUT_MMC_CARD_SDCLKIN 12
+#define CLK_GOUT_SYSREG_HSI_PCLK 13
+#define CLK_GOUT_HSI_PPMU_ACLK 14
+#define CLK_GOUT_HSI_PPMU_PCLK 15
+#define CLK_GOUT_HSI_CMU_HSI_PCLK 16
+
+/* CMU_IS */
+#define CLK_MOUT_IS_BUS_USER 1
+#define CLK_MOUT_IS_ITP_USER 2
+#define CLK_MOUT_IS_VRA_USER 3
+#define CLK_MOUT_IS_GDC_USER 4
+#define CLK_DOUT_IS_BUSP 5
+#define CLK_GOUT_IS_CMU_IS_PCLK 6
+#define CLK_GOUT_IS_CSIS0_ACLK 7
+#define CLK_GOUT_IS_CSIS1_ACLK 8
+#define CLK_GOUT_IS_CSIS2_ACLK 9
+#define CLK_GOUT_IS_TZPC_PCLK 10
+#define CLK_GOUT_IS_CSIS_DMA_CLK 11
+#define CLK_GOUT_IS_GDC_CLK 12
+#define CLK_GOUT_IS_IPP_CLK 13
+#define CLK_GOUT_IS_ITP_CLK 14
+#define CLK_GOUT_IS_MCSC_CLK 15
+#define CLK_GOUT_IS_VRA_CLK 16
+#define CLK_GOUT_IS_PPMU_IS0_ACLK 17
+#define CLK_GOUT_IS_PPMU_IS0_PCLK 18
+#define CLK_GOUT_IS_PPMU_IS1_ACLK 19
+#define CLK_GOUT_IS_PPMU_IS1_PCLK 20
+#define CLK_GOUT_IS_SYSMMU_IS0_CLK 21
+#define CLK_GOUT_IS_SYSMMU_IS1_CLK 22
+#define CLK_GOUT_IS_SYSREG_PCLK 23
+
+/* CMU_MFCMSCL */
+#define CLK_MOUT_MFCMSCL_MFC_USER 1
+#define CLK_MOUT_MFCMSCL_M2M_USER 2
+#define CLK_MOUT_MFCMSCL_MCSC_USER 3
+#define CLK_MOUT_MFCMSCL_JPEG_USER 4
+#define CLK_DOUT_MFCMSCL_BUSP 5
+#define CLK_GOUT_MFCMSCL_CMU_MFCMSCL_PCLK 6
+#define CLK_GOUT_MFCMSCL_TZPC_PCLK 7
+#define CLK_GOUT_MFCMSCL_JPEG_ACLK 8
+#define CLK_GOUT_MFCMSCL_M2M_ACLK 9
+#define CLK_GOUT_MFCMSCL_MCSC_CLK 10
+#define CLK_GOUT_MFCMSCL_MFC_ACLK 11
+#define CLK_GOUT_MFCMSCL_PPMU_ACLK 12
+#define CLK_GOUT_MFCMSCL_PPMU_PCLK 13
+#define CLK_GOUT_MFCMSCL_SYSMMU_CLK 14
+#define CLK_GOUT_MFCMSCL_SYSREG_PCLK 15
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_BUS_USER 1
+#define CLK_MOUT_PERI_UART_USER 2
+#define CLK_MOUT_PERI_HSI2C_USER 3
+#define CLK_MOUT_PERI_SPI_USER 4
+#define CLK_DOUT_PERI_HSI2C0 5
+#define CLK_DOUT_PERI_HSI2C1 6
+#define CLK_DOUT_PERI_HSI2C2 7
+#define CLK_DOUT_PERI_SPI0 8
+#define CLK_GOUT_PERI_HSI2C0 9
+#define CLK_GOUT_PERI_HSI2C1 10
+#define CLK_GOUT_PERI_HSI2C2 11
+#define CLK_GOUT_GPIO_PERI_PCLK 12
+#define CLK_GOUT_HSI2C0_IPCLK 13
+#define CLK_GOUT_HSI2C0_PCLK 14
+#define CLK_GOUT_HSI2C1_IPCLK 15
+#define CLK_GOUT_HSI2C1_PCLK 16
+#define CLK_GOUT_HSI2C2_IPCLK 17
+#define CLK_GOUT_HSI2C2_PCLK 18
+#define CLK_GOUT_I2C0_PCLK 19
+#define CLK_GOUT_I2C1_PCLK 20
+#define CLK_GOUT_I2C2_PCLK 21
+#define CLK_GOUT_I2C3_PCLK 22
+#define CLK_GOUT_I2C4_PCLK 23
+#define CLK_GOUT_I2C5_PCLK 24
+#define CLK_GOUT_I2C6_PCLK 25
+#define CLK_GOUT_MCT_PCLK 26
+#define CLK_GOUT_PWM_MOTOR_PCLK 27
+#define CLK_GOUT_SPI0_IPCLK 28
+#define CLK_GOUT_SPI0_PCLK 29
+#define CLK_GOUT_SYSREG_PERI_PCLK 30
+#define CLK_GOUT_UART_IPCLK 31
+#define CLK_GOUT_UART_PCLK 32
+#define CLK_GOUT_WDT0_PCLK 33
+#define CLK_GOUT_WDT1_PCLK 34
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_MOUT_CORE_CCI_USER 2
+#define CLK_MOUT_CORE_MMC_EMBD_USER 3
+#define CLK_MOUT_CORE_SSS_USER 4
+#define CLK_MOUT_CORE_GIC 5
+#define CLK_DOUT_CORE_BUSP 6
+#define CLK_GOUT_CCI_ACLK 7
+#define CLK_GOUT_GIC_CLK 8
+#define CLK_GOUT_MMC_EMBD_ACLK 9
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 10
+#define CLK_GOUT_SSS_ACLK 11
+#define CLK_GOUT_SSS_PCLK 12
+#define CLK_GOUT_GPIO_CORE_PCLK 13
+#define CLK_GOUT_SYSREG_CORE_PCLK 14
+#define CLK_GOUT_PDMA_CORE_ACLK 15
+#define CLK_GOUT_SPDMA_CORE_ACLK 16
+
+/* CMU_DPU */
+#define CLK_MOUT_DPU_USER 1
+#define CLK_DOUT_DPU_BUSP 2
+#define CLK_GOUT_DPU_CMU_DPU_PCLK 3
+#define CLK_GOUT_DPU_DECON0_ACLK 4
+#define CLK_GOUT_DPU_DMA_ACLK 5
+#define CLK_GOUT_DPU_DPP_ACLK 6
+#define CLK_GOUT_DPU_PPMU_ACLK 7
+#define CLK_GOUT_DPU_PPMU_PCLK 8
+#define CLK_GOUT_DPU_SMMU_CLK 9
+#define CLK_GOUT_DPU_SYSREG_PCLK 10
+#define DPU_NR_CLK 11
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS_850_H */
diff --git a/include/dt-bindings/clock/fsd-clk.h b/include/dt-bindings/clock/fsd-clk.h
new file mode 100644
index 000000000000..c8a2af1dd1ad
--- /dev/null
+++ b/include/dt-bindings/clock/fsd-clk.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 - 2022: Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2017-2022 Tesla, Inc.
+ * https://www.tesla.com
+ *
+ * The constants defined in this header are being used in dts
+ * and fsd platform driver.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_FSD_H
+#define _DT_BINDINGS_CLOCK_FSD_H
+
+/* CMU */
+#define DOUT_CMU_PLL_SHARED0_DIV4 1
+#define DOUT_CMU_PERIC_SHARED1DIV36 2
+#define DOUT_CMU_PERIC_SHARED0DIV3_TBUCLK 3
+#define DOUT_CMU_PERIC_SHARED0DIV20 4
+#define DOUT_CMU_PERIC_SHARED1DIV4_DMACLK 5
+#define DOUT_CMU_PLL_SHARED0_DIV6 6
+#define DOUT_CMU_FSYS0_SHARED1DIV4 7
+#define DOUT_CMU_FSYS0_SHARED0DIV4 8
+#define DOUT_CMU_FSYS1_SHARED0DIV8 9
+#define DOUT_CMU_FSYS1_SHARED0DIV4 10
+#define CMU_CPUCL_SWITCH_GATE 11
+#define DOUT_CMU_IMEM_TCUCLK 12
+#define DOUT_CMU_IMEM_ACLK 13
+#define DOUT_CMU_IMEM_DMACLK 14
+#define GAT_CMU_FSYS0_SHARED0DIV4 15
+#define CMU_NR_CLK 16
+
+/* PERIC */
+#define PERIC_SCLK_UART0 1
+#define PERIC_PCLK_UART0 2
+#define PERIC_SCLK_UART1 3
+#define PERIC_PCLK_UART1 4
+#define PERIC_DMA0_IPCLKPORT_ACLK 5
+#define PERIC_DMA1_IPCLKPORT_ACLK 6
+#define PERIC_PWM0_IPCLKPORT_I_PCLK_S0 7
+#define PERIC_PWM1_IPCLKPORT_I_PCLK_S0 8
+#define PERIC_PCLK_SPI0 9
+#define PERIC_SCLK_SPI0 10
+#define PERIC_PCLK_SPI1 11
+#define PERIC_SCLK_SPI1 12
+#define PERIC_PCLK_SPI2 13
+#define PERIC_SCLK_SPI2 14
+#define PERIC_PCLK_TDM0 15
+#define PERIC_PCLK_HSI2C0 16
+#define PERIC_PCLK_HSI2C1 17
+#define PERIC_PCLK_HSI2C2 18
+#define PERIC_PCLK_HSI2C3 19
+#define PERIC_PCLK_HSI2C4 20
+#define PERIC_PCLK_HSI2C5 21
+#define PERIC_PCLK_HSI2C6 22
+#define PERIC_PCLK_HSI2C7 23
+#define PERIC_MCAN0_IPCLKPORT_CCLK 24
+#define PERIC_MCAN0_IPCLKPORT_PCLK 25
+#define PERIC_MCAN1_IPCLKPORT_CCLK 26
+#define PERIC_MCAN1_IPCLKPORT_PCLK 27
+#define PERIC_MCAN2_IPCLKPORT_CCLK 28
+#define PERIC_MCAN2_IPCLKPORT_PCLK 29
+#define PERIC_MCAN3_IPCLKPORT_CCLK 30
+#define PERIC_MCAN3_IPCLKPORT_PCLK 31
+#define PERIC_PCLK_ADCIF 32
+#define PERIC_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I 33
+#define PERIC_EQOS_TOP_IPCLKPORT_ACLK_I 34
+#define PERIC_EQOS_TOP_IPCLKPORT_HCLK_I 35
+#define PERIC_EQOS_TOP_IPCLKPORT_RGMII_CLK_I 36
+#define PERIC_EQOS_TOP_IPCLKPORT_CLK_RX_I 37
+#define PERIC_BUS_D_PERIC_IPCLKPORT_EQOSCLK 38
+#define PERIC_BUS_P_PERIC_IPCLKPORT_EQOSCLK 39
+#define PERIC_HCLK_TDM0 40
+#define PERIC_PCLK_TDM1 41
+#define PERIC_HCLK_TDM1 42
+#define PERIC_EQOS_PHYRXCLK_MUX 43
+#define PERIC_EQOS_PHYRXCLK 44
+#define PERIC_DOUT_RGMII_CLK 45
+#define PERIC_NR_CLK 46
+
+/* FSYS0 */
+#define UFS0_MPHY_REFCLK_IXTAL24 1
+#define UFS0_MPHY_REFCLK_IXTAL26 2
+#define UFS1_MPHY_REFCLK_IXTAL24 3
+#define UFS1_MPHY_REFCLK_IXTAL26 4
+#define UFS0_TOP0_HCLK_BUS 5
+#define UFS0_TOP0_ACLK 6
+#define UFS0_TOP0_CLK_UNIPRO 7
+#define UFS0_TOP0_FMP_CLK 8
+#define UFS1_TOP1_HCLK_BUS 9
+#define UFS1_TOP1_ACLK 10
+#define UFS1_TOP1_CLK_UNIPRO 11
+#define UFS1_TOP1_FMP_CLK 12
+#define PCIE_SUBCTRL_INST0_DBI_ACLK_SOC 13
+#define PCIE_SUBCTRL_INST0_AUX_CLK_SOC 14
+#define PCIE_SUBCTRL_INST0_MSTR_ACLK_SOC 15
+#define PCIE_SUBCTRL_INST0_SLV_ACLK_SOC 16
+#define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I 17
+#define FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I 18
+#define FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I 19
+#define FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I 20
+#define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I 21
+#define FSYS0_DOUT_FSYS0_PERIBUS_GRP 22
+#define FSYS0_NR_CLK 23
+
+/* FSYS1 */
+#define PCIE_LINK0_IPCLKPORT_DBI_ACLK 1
+#define PCIE_LINK0_IPCLKPORT_AUX_ACLK 2
+#define PCIE_LINK0_IPCLKPORT_MSTR_ACLK 3
+#define PCIE_LINK0_IPCLKPORT_SLV_ACLK 4
+#define PCIE_LINK1_IPCLKPORT_DBI_ACLK 5
+#define PCIE_LINK1_IPCLKPORT_AUX_ACLK 6
+#define PCIE_LINK1_IPCLKPORT_MSTR_ACLK 7
+#define PCIE_LINK1_IPCLKPORT_SLV_ACLK 8
+#define FSYS1_NR_CLK 9
+
+/* IMEM */
+#define IMEM_DMA0_IPCLKPORT_ACLK 1
+#define IMEM_DMA1_IPCLKPORT_ACLK 2
+#define IMEM_WDT0_IPCLKPORT_PCLK 3
+#define IMEM_WDT1_IPCLKPORT_PCLK 4
+#define IMEM_WDT2_IPCLKPORT_PCLK 5
+#define IMEM_MCT_PCLK 6
+#define IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS 7
+#define IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS 8
+#define IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS 9
+#define IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS 10
+#define IMEM_TMU_GT_IPCLKPORT_I_CLK_TS 11
+#define IMEM_NR_CLK 12
+
+/* MFC */
+#define MFC_MFC_IPCLKPORT_ACLK 1
+#define MFC_NR_CLK 2
+
+/* CAM_CSI */
+#define CAM_CSI0_0_IPCLKPORT_I_ACLK 1
+#define CAM_CSI0_1_IPCLKPORT_I_ACLK 2
+#define CAM_CSI0_2_IPCLKPORT_I_ACLK 3
+#define CAM_CSI0_3_IPCLKPORT_I_ACLK 4
+#define CAM_CSI1_0_IPCLKPORT_I_ACLK 5
+#define CAM_CSI1_1_IPCLKPORT_I_ACLK 6
+#define CAM_CSI1_2_IPCLKPORT_I_ACLK 7
+#define CAM_CSI1_3_IPCLKPORT_I_ACLK 8
+#define CAM_CSI2_0_IPCLKPORT_I_ACLK 9
+#define CAM_CSI2_1_IPCLKPORT_I_ACLK 10
+#define CAM_CSI2_2_IPCLKPORT_I_ACLK 11
+#define CAM_CSI2_3_IPCLKPORT_I_ACLK 12
+#define CAM_CSI_NR_CLK 13
+
+#endif /*_DT_BINDINGS_CLOCK_FSD_H */
diff --git a/include/dt-bindings/clock/fsl,qoriq-clockgen.h b/include/dt-bindings/clock/fsl,qoriq-clockgen.h
new file mode 100644
index 000000000000..ddec7d0bdc7f
--- /dev/null
+++ b/include/dt-bindings/clock/fsl,qoriq-clockgen.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef DT_CLOCK_FSL_QORIQ_CLOCKGEN_H
+#define DT_CLOCK_FSL_QORIQ_CLOCKGEN_H
+
+#define QORIQ_CLK_SYSCLK 0
+#define QORIQ_CLK_CMUX 1
+#define QORIQ_CLK_HWACCEL 2
+#define QORIQ_CLK_FMAN 3
+#define QORIQ_CLK_PLATFORM_PLL 4
+#define QORIQ_CLK_CORECLK 5
+
+#define QORIQ_CLK_PLL_DIV(x) ((x) - 1)
+
+#endif /* DT_CLOCK_FSL_QORIQ_CLOCKGEN_H */
diff --git a/include/dt-bindings/clock/g12a-aoclkc.h b/include/dt-bindings/clock/g12a-aoclkc.h
index e916e49ff288..8fe7712fb12d 100644
--- a/include/dt-bindings/clock/g12a-aoclkc.h
+++ b/include/dt-bindings/clock/g12a-aoclkc.h
@@ -26,10 +26,17 @@
#define CLKID_AO_M4_FCLK 13
#define CLKID_AO_M4_HCLK 14
#define CLKID_AO_CLK81 15
+#define CLKID_AO_SAR_ADC_DIV 17
#define CLKID_AO_SAR_ADC_SEL 16
#define CLKID_AO_SAR_ADC_CLK 18
#define CLKID_AO_CTS_OSCIN 19
+#define CLKID_AO_32K_PRE 20
+#define CLKID_AO_32K_DIV 21
+#define CLKID_AO_32K_SEL 22
#define CLKID_AO_32K 23
+#define CLKID_AO_CEC_PRE 24
+#define CLKID_AO_CEC_DIV 25
+#define CLKID_AO_CEC_SEL 26
#define CLKID_AO_CEC 27
#define CLKID_AO_CTS_RTC_OSCIN 28
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index 40d49940d8a8..fd09819da2ec 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -16,6 +16,8 @@
#define CLKID_FCLK_DIV5 5
#define CLKID_FCLK_DIV7 6
#define CLKID_GP0_PLL 7
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
#define CLKID_CLK81 10
#define CLKID_MPLL0 11
#define CLKID_MPLL1 12
@@ -69,7 +71,23 @@
#define CLKID_SD_EMMC_A_CLK0 60
#define CLKID_SD_EMMC_B_CLK0 61
#define CLKID_SD_EMMC_C_CLK0 62
+#define CLKID_SD_EMMC_A_CLK0_SEL 63
+#define CLKID_SD_EMMC_A_CLK0_DIV 64
+#define CLKID_SD_EMMC_B_CLK0_SEL 65
+#define CLKID_SD_EMMC_B_CLK0_DIV 66
+#define CLKID_SD_EMMC_C_CLK0_SEL 67
+#define CLKID_SD_EMMC_C_CLK0_DIV 68
+#define CLKID_MPLL0_DIV 69
+#define CLKID_MPLL1_DIV 70
+#define CLKID_MPLL2_DIV 71
+#define CLKID_MPLL3_DIV 72
+#define CLKID_MPLL_PREDIV 73
#define CLKID_HIFI_PLL 74
+#define CLKID_FCLK_DIV2_DIV 75
+#define CLKID_FCLK_DIV3_DIV 76
+#define CLKID_FCLK_DIV4_DIV 77
+#define CLKID_FCLK_DIV5_DIV 78
+#define CLKID_FCLK_DIV7_DIV 79
#define CLKID_VCLK2_VENCI0 80
#define CLKID_VCLK2_VENCI1 81
#define CLKID_VCLK2_VENCP0 82
@@ -90,26 +108,54 @@
#define CLKID_VCLK2_VENCL 97
#define CLKID_VCLK2_OTHER1 98
#define CLKID_FCLK_DIV2P5 99
+#define CLKID_FCLK_DIV2P5_DIV 100
+#define CLKID_FIXED_PLL_DCO 101
+#define CLKID_SYS_PLL_DCO 102
+#define CLKID_GP0_PLL_DCO 103
+#define CLKID_HIFI_PLL_DCO 104
#define CLKID_DMA 105
#define CLKID_EFUSE 106
#define CLKID_ROM_BOOT 107
#define CLKID_RESET_SEC 108
#define CLKID_SEC_AHB_APB3 109
#define CLKID_VPU_0_SEL 110
+#define CLKID_VPU_0_DIV 111
#define CLKID_VPU_0 112
#define CLKID_VPU_1_SEL 113
+#define CLKID_VPU_1_DIV 114
#define CLKID_VPU_1 115
#define CLKID_VPU 116
#define CLKID_VAPB_0_SEL 117
+#define CLKID_VAPB_0_DIV 118
#define CLKID_VAPB_0 119
#define CLKID_VAPB_1_SEL 120
+#define CLKID_VAPB_1_DIV 121
#define CLKID_VAPB_1 122
#define CLKID_VAPB_SEL 123
#define CLKID_VAPB 124
+#define CLKID_HDMI_PLL_DCO 125
+#define CLKID_HDMI_PLL_OD 126
+#define CLKID_HDMI_PLL_OD2 127
#define CLKID_HDMI_PLL 128
#define CLKID_VID_PLL 129
+#define CLKID_VID_PLL_SEL 130
+#define CLKID_VID_PLL_DIV 131
+#define CLKID_VCLK_SEL 132
+#define CLKID_VCLK2_SEL 133
+#define CLKID_VCLK_INPUT 134
+#define CLKID_VCLK2_INPUT 135
+#define CLKID_VCLK_DIV 136
+#define CLKID_VCLK2_DIV 137
#define CLKID_VCLK 138
#define CLKID_VCLK2 139
+#define CLKID_VCLK_DIV2_EN 140
+#define CLKID_VCLK_DIV4_EN 141
+#define CLKID_VCLK_DIV6_EN 142
+#define CLKID_VCLK_DIV12_EN 143
+#define CLKID_VCLK2_DIV2_EN 144
+#define CLKID_VCLK2_DIV4_EN 145
+#define CLKID_VCLK2_DIV6_EN 146
+#define CLKID_VCLK2_DIV12_EN 147
#define CLKID_VCLK_DIV1 148
#define CLKID_VCLK_DIV2 149
#define CLKID_VCLK_DIV4 150
@@ -120,32 +166,126 @@
#define CLKID_VCLK2_DIV4 155
#define CLKID_VCLK2_DIV6 156
#define CLKID_VCLK2_DIV12 157
+#define CLKID_CTS_ENCI_SEL 158
+#define CLKID_CTS_ENCP_SEL 159
+#define CLKID_CTS_VDAC_SEL 160
+#define CLKID_HDMI_TX_SEL 161
#define CLKID_CTS_ENCI 162
#define CLKID_CTS_ENCP 163
#define CLKID_CTS_VDAC 164
#define CLKID_HDMI_TX 165
+#define CLKID_HDMI_SEL 166
+#define CLKID_HDMI_DIV 167
#define CLKID_HDMI 168
#define CLKID_MALI_0_SEL 169
+#define CLKID_MALI_0_DIV 170
#define CLKID_MALI_0 171
#define CLKID_MALI_1_SEL 172
+#define CLKID_MALI_1_DIV 173
#define CLKID_MALI_1 174
#define CLKID_MALI 175
+#define CLKID_MPLL_50M_DIV 176
#define CLKID_MPLL_50M 177
+#define CLKID_SYS_PLL_DIV16_EN 178
+#define CLKID_SYS_PLL_DIV16 179
+#define CLKID_CPU_CLK_DYN0_SEL 180
+#define CLKID_CPU_CLK_DYN0_DIV 181
+#define CLKID_CPU_CLK_DYN0 182
+#define CLKID_CPU_CLK_DYN1_SEL 183
+#define CLKID_CPU_CLK_DYN1_DIV 184
+#define CLKID_CPU_CLK_DYN1 185
+#define CLKID_CPU_CLK_DYN 186
#define CLKID_CPU_CLK 187
+#define CLKID_CPU_CLK_DIV16_EN 188
+#define CLKID_CPU_CLK_DIV16 189
+#define CLKID_CPU_CLK_APB_DIV 190
+#define CLKID_CPU_CLK_APB 191
+#define CLKID_CPU_CLK_ATB_DIV 192
+#define CLKID_CPU_CLK_ATB 193
+#define CLKID_CPU_CLK_AXI_DIV 194
+#define CLKID_CPU_CLK_AXI 195
+#define CLKID_CPU_CLK_TRACE_DIV 196
+#define CLKID_CPU_CLK_TRACE 197
+#define CLKID_PCIE_PLL_DCO 198
+#define CLKID_PCIE_PLL_DCO_DIV2 199
+#define CLKID_PCIE_PLL_OD 200
#define CLKID_PCIE_PLL 201
+#define CLKID_VDEC_1_SEL 202
+#define CLKID_VDEC_1_DIV 203
#define CLKID_VDEC_1 204
+#define CLKID_VDEC_HEVC_SEL 205
+#define CLKID_VDEC_HEVC_DIV 206
#define CLKID_VDEC_HEVC 207
+#define CLKID_VDEC_HEVCF_SEL 208
+#define CLKID_VDEC_HEVCF_DIV 209
#define CLKID_VDEC_HEVCF 210
+#define CLKID_TS_DIV 211
#define CLKID_TS 212
+#define CLKID_SYS1_PLL_DCO 213
+#define CLKID_SYS1_PLL 214
+#define CLKID_SYS1_PLL_DIV16_EN 215
+#define CLKID_SYS1_PLL_DIV16 216
+#define CLKID_CPUB_CLK_DYN0_SEL 217
+#define CLKID_CPUB_CLK_DYN0_DIV 218
+#define CLKID_CPUB_CLK_DYN0 219
+#define CLKID_CPUB_CLK_DYN1_SEL 220
+#define CLKID_CPUB_CLK_DYN1_DIV 221
+#define CLKID_CPUB_CLK_DYN1 222
+#define CLKID_CPUB_CLK_DYN 223
#define CLKID_CPUB_CLK 224
+#define CLKID_CPUB_CLK_DIV16_EN 225
+#define CLKID_CPUB_CLK_DIV16 226
+#define CLKID_CPUB_CLK_DIV2 227
+#define CLKID_CPUB_CLK_DIV3 228
+#define CLKID_CPUB_CLK_DIV4 229
+#define CLKID_CPUB_CLK_DIV5 230
+#define CLKID_CPUB_CLK_DIV6 231
+#define CLKID_CPUB_CLK_DIV7 232
+#define CLKID_CPUB_CLK_DIV8 233
+#define CLKID_CPUB_CLK_APB_SEL 234
+#define CLKID_CPUB_CLK_APB 235
+#define CLKID_CPUB_CLK_ATB_SEL 236
+#define CLKID_CPUB_CLK_ATB 237
+#define CLKID_CPUB_CLK_AXI_SEL 238
+#define CLKID_CPUB_CLK_AXI 239
+#define CLKID_CPUB_CLK_TRACE_SEL 240
+#define CLKID_CPUB_CLK_TRACE 241
+#define CLKID_GP1_PLL_DCO 242
#define CLKID_GP1_PLL 243
+#define CLKID_DSU_CLK_DYN0_SEL 244
+#define CLKID_DSU_CLK_DYN0_DIV 245
+#define CLKID_DSU_CLK_DYN0 246
+#define CLKID_DSU_CLK_DYN1_SEL 247
+#define CLKID_DSU_CLK_DYN1_DIV 248
+#define CLKID_DSU_CLK_DYN1 249
+#define CLKID_DSU_CLK_DYN 250
+#define CLKID_DSU_CLK_FINAL 251
#define CLKID_DSU_CLK 252
#define CLKID_CPU1_CLK 253
#define CLKID_CPU2_CLK 254
#define CLKID_CPU3_CLK 255
+#define CLKID_SPICC0_SCLK_SEL 256
+#define CLKID_SPICC0_SCLK_DIV 257
#define CLKID_SPICC0_SCLK 258
+#define CLKID_SPICC1_SCLK_SEL 259
+#define CLKID_SPICC1_SCLK_DIV 260
#define CLKID_SPICC1_SCLK 261
+#define CLKID_NNA_AXI_CLK_SEL 262
+#define CLKID_NNA_AXI_CLK_DIV 263
#define CLKID_NNA_AXI_CLK 264
+#define CLKID_NNA_CORE_CLK_SEL 265
+#define CLKID_NNA_CORE_CLK_DIV 266
#define CLKID_NNA_CORE_CLK 267
+#define CLKID_MIPI_DSI_PXCLK_DIV 268
+#define CLKID_MIPI_DSI_PXCLK_SEL 269
+#define CLKID_MIPI_DSI_PXCLK 270
+#define CLKID_CTS_ENCL 271
+#define CLKID_CTS_ENCL_SEL 272
+#define CLKID_MIPI_ISP_DIV 273
+#define CLKID_MIPI_ISP_SEL 274
+#define CLKID_MIPI_ISP 275
+#define CLKID_MIPI_ISP_GATE 276
+#define CLKID_MIPI_ISP_CSI_PHY0 277
+#define CLKID_MIPI_ISP_CSI_PHY1 278
#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/google,gs101.h b/include/dt-bindings/clock/google,gs101.h
new file mode 100644
index 000000000000..3dac3577788a
--- /dev/null
+++ b/include/dt-bindings/clock/google,gs101.h
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * Device Tree binding constants for Google gs101 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_GOOGLE_GS101_H
+#define _DT_BINDINGS_CLOCK_GOOGLE_GS101_H
+
+/* CMU_TOP PLL */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SPARE_PLL 5
+
+/* CMU_TOP MUX */
+#define CLK_MOUT_PLL_SHARED0 6
+#define CLK_MOUT_PLL_SHARED1 7
+#define CLK_MOUT_PLL_SHARED2 8
+#define CLK_MOUT_PLL_SHARED3 9
+#define CLK_MOUT_PLL_SPARE 10
+#define CLK_MOUT_CMU_BO_BUS 11
+#define CLK_MOUT_CMU_BUS0_BUS 12
+#define CLK_MOUT_CMU_BUS1_BUS 13
+#define CLK_MOUT_CMU_BUS2_BUS 14
+#define CLK_MOUT_CMU_CIS_CLK0 15
+#define CLK_MOUT_CMU_CIS_CLK1 16
+#define CLK_MOUT_CMU_CIS_CLK2 17
+#define CLK_MOUT_CMU_CIS_CLK3 18
+#define CLK_MOUT_CMU_CIS_CLK4 19
+#define CLK_MOUT_CMU_CIS_CLK5 20
+#define CLK_MOUT_CMU_CIS_CLK6 21
+#define CLK_MOUT_CMU_CIS_CLK7 22
+#define CLK_MOUT_CMU_CMU_BOOST 23
+#define CLK_MOUT_CMU_BOOST_OPTION1 24
+#define CLK_MOUT_CMU_CORE_BUS 25
+#define CLK_MOUT_CMU_CPUCL0_DBG 26
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 27
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 28
+#define CLK_MOUT_CMU_CPUCL2_SWITCH 29
+#define CLK_MOUT_CMU_CSIS_BUS 30
+#define CLK_MOUT_CMU_DISP_BUS 31
+#define CLK_MOUT_CMU_DNS_BUS 32
+#define CLK_MOUT_CMU_DPU_BUS 33
+#define CLK_MOUT_CMU_EH_BUS 34
+#define CLK_MOUT_CMU_G2D_G2D 35
+#define CLK_MOUT_CMU_G2D_MSCL 36
+#define CLK_MOUT_CMU_G3AA_G3AA 37
+#define CLK_MOUT_CMU_G3D_BUSD 38
+#define CLK_MOUT_CMU_G3D_GLB 39
+#define CLK_MOUT_CMU_G3D_SWITCH 40
+#define CLK_MOUT_CMU_GDC_GDC0 41
+#define CLK_MOUT_CMU_GDC_GDC1 42
+#define CLK_MOUT_CMU_GDC_SCSC 43
+#define CLK_MOUT_CMU_HPM 44
+#define CLK_MOUT_CMU_HSI0_BUS 45
+#define CLK_MOUT_CMU_HSI0_DPGTC 46
+#define CLK_MOUT_CMU_HSI0_USB31DRD 47
+#define CLK_MOUT_CMU_HSI0_USBDPDBG 48
+#define CLK_MOUT_CMU_HSI1_BUS 49
+#define CLK_MOUT_CMU_HSI1_PCIE 50
+#define CLK_MOUT_CMU_HSI2_BUS 51
+#define CLK_MOUT_CMU_HSI2_MMC_CARD 52
+#define CLK_MOUT_CMU_HSI2_PCIE 53
+#define CLK_MOUT_CMU_HSI2_UFS_EMBD 54
+#define CLK_MOUT_CMU_IPP_BUS 55
+#define CLK_MOUT_CMU_ITP_BUS 56
+#define CLK_MOUT_CMU_MCSC_ITSC 57
+#define CLK_MOUT_CMU_MCSC_MCSC 58
+#define CLK_MOUT_CMU_MFC_MFC 59
+#define CLK_MOUT_CMU_MIF_BUSP 60
+#define CLK_MOUT_CMU_MIF_SWITCH 61
+#define CLK_MOUT_CMU_MISC_BUS 62
+#define CLK_MOUT_CMU_MISC_SSS 63
+#define CLK_MOUT_CMU_PDP_BUS 64
+#define CLK_MOUT_CMU_PDP_VRA 65
+#define CLK_MOUT_CMU_PERIC0_BUS 66
+#define CLK_MOUT_CMU_PERIC0_IP 67
+#define CLK_MOUT_CMU_PERIC1_BUS 68
+#define CLK_MOUT_CMU_PERIC1_IP 69
+#define CLK_MOUT_CMU_TNR_BUS 70
+#define CLK_MOUT_CMU_TOP_BOOST_OPTION1 71
+#define CLK_MOUT_CMU_TOP_CMUREF 72
+#define CLK_MOUT_CMU_TPU_BUS 73
+#define CLK_MOUT_CMU_TPU_TPU 74
+#define CLK_MOUT_CMU_TPU_TPUCTL 75
+#define CLK_MOUT_CMU_TPU_UART 76
+#define CLK_MOUT_CMU_CMUREF 77
+
+/* CMU_TOP Dividers */
+#define CLK_DOUT_CMU_BO_BUS 78
+#define CLK_DOUT_CMU_BUS0_BUS 79
+#define CLK_DOUT_CMU_BUS1_BUS 80
+#define CLK_DOUT_CMU_BUS2_BUS 81
+#define CLK_DOUT_CMU_CIS_CLK0 82
+#define CLK_DOUT_CMU_CIS_CLK1 83
+#define CLK_DOUT_CMU_CIS_CLK2 84
+#define CLK_DOUT_CMU_CIS_CLK3 85
+#define CLK_DOUT_CMU_CIS_CLK4 86
+#define CLK_DOUT_CMU_CIS_CLK5 87
+#define CLK_DOUT_CMU_CIS_CLK6 88
+#define CLK_DOUT_CMU_CIS_CLK7 89
+#define CLK_DOUT_CMU_CORE_BUS 90
+#define CLK_DOUT_CMU_CPUCL0_DBG 91
+#define CLK_DOUT_CMU_CPUCL0_SWITCH 92
+#define CLK_DOUT_CMU_CPUCL1_SWITCH 93
+#define CLK_DOUT_CMU_CPUCL2_SWITCH 94
+#define CLK_DOUT_CMU_CSIS_BUS 95
+#define CLK_DOUT_CMU_DISP_BUS 96
+#define CLK_DOUT_CMU_DNS_BUS 97
+#define CLK_DOUT_CMU_DPU_BUS 98
+#define CLK_DOUT_CMU_EH_BUS 99
+#define CLK_DOUT_CMU_G2D_G2D 100
+#define CLK_DOUT_CMU_G2D_MSCL 101
+#define CLK_DOUT_CMU_G3AA_G3AA 102
+#define CLK_DOUT_CMU_G3D_BUSD 103
+#define CLK_DOUT_CMU_G3D_GLB 104
+#define CLK_DOUT_CMU_G3D_SWITCH 105
+#define CLK_DOUT_CMU_GDC_GDC0 106
+#define CLK_DOUT_CMU_GDC_GDC1 107
+#define CLK_DOUT_CMU_GDC_SCSC 108
+#define CLK_DOUT_CMU_CMU_HPM 109
+#define CLK_DOUT_CMU_HSI0_BUS 110
+#define CLK_DOUT_CMU_HSI0_DPGTC 111
+#define CLK_DOUT_CMU_HSI0_USB31DRD 112
+#define CLK_DOUT_CMU_HSI0_USBDPDBG 113
+#define CLK_DOUT_CMU_HSI1_BUS 114
+#define CLK_DOUT_CMU_HSI1_PCIE 115
+#define CLK_DOUT_CMU_HSI2_BUS 116
+#define CLK_DOUT_CMU_HSI2_MMC_CARD 117
+#define CLK_DOUT_CMU_HSI2_PCIE 118
+#define CLK_DOUT_CMU_HSI2_UFS_EMBD 119
+#define CLK_DOUT_CMU_IPP_BUS 120
+#define CLK_DOUT_CMU_ITP_BUS 121
+#define CLK_DOUT_CMU_MCSC_ITSC 122
+#define CLK_DOUT_CMU_MCSC_MCSC 123
+#define CLK_DOUT_CMU_MFC_MFC 124
+#define CLK_DOUT_CMU_MIF_BUSP 125
+#define CLK_DOUT_CMU_MISC_BUS 126
+#define CLK_DOUT_CMU_MISC_SSS 127
+#define CLK_DOUT_CMU_OTP 128
+#define CLK_DOUT_CMU_PDP_BUS 129
+#define CLK_DOUT_CMU_PDP_VRA 130
+#define CLK_DOUT_CMU_PERIC0_BUS 131
+#define CLK_DOUT_CMU_PERIC0_IP 132
+#define CLK_DOUT_CMU_PERIC1_BUS 133
+#define CLK_DOUT_CMU_PERIC1_IP 134
+#define CLK_DOUT_CMU_TNR_BUS 135
+#define CLK_DOUT_CMU_TPU_BUS 136
+#define CLK_DOUT_CMU_TPU_TPU 137
+#define CLK_DOUT_CMU_TPU_TPUCTL 138
+#define CLK_DOUT_CMU_TPU_UART 139
+#define CLK_DOUT_CMU_CMU_BOOST 140
+#define CLK_DOUT_CMU_CMU_CMUREF 141
+#define CLK_DOUT_CMU_SHARED0_DIV2 142
+#define CLK_DOUT_CMU_SHARED0_DIV3 143
+#define CLK_DOUT_CMU_SHARED0_DIV4 144
+#define CLK_DOUT_CMU_SHARED0_DIV5 145
+#define CLK_DOUT_CMU_SHARED1_DIV2 146
+#define CLK_DOUT_CMU_SHARED1_DIV3 147
+#define CLK_DOUT_CMU_SHARED1_DIV4 148
+#define CLK_DOUT_CMU_SHARED2_DIV2 149
+#define CLK_DOUT_CMU_SHARED3_DIV2 150
+
+/* CMU_TOP Gates */
+#define CLK_GOUT_CMU_BUS0_BOOST 151
+#define CLK_GOUT_CMU_BUS1_BOOST 152
+#define CLK_GOUT_CMU_BUS2_BOOST 153
+#define CLK_GOUT_CMU_CORE_BOOST 154
+#define CLK_GOUT_CMU_CPUCL0_BOOST 155
+#define CLK_GOUT_CMU_CPUCL1_BOOST 156
+#define CLK_GOUT_CMU_CPUCL2_BOOST 157
+#define CLK_GOUT_CMU_MIF_BOOST 158
+#define CLK_GOUT_CMU_MIF_SWITCH 159
+#define CLK_GOUT_CMU_BO_BUS 160
+#define CLK_GOUT_CMU_BUS0_BUS 161
+#define CLK_GOUT_CMU_BUS1_BUS 162
+#define CLK_GOUT_CMU_BUS2_BUS 163
+#define CLK_GOUT_CMU_CIS_CLK0 164
+#define CLK_GOUT_CMU_CIS_CLK1 165
+#define CLK_GOUT_CMU_CIS_CLK2 166
+#define CLK_GOUT_CMU_CIS_CLK3 167
+#define CLK_GOUT_CMU_CIS_CLK4 168
+#define CLK_GOUT_CMU_CIS_CLK5 169
+#define CLK_GOUT_CMU_CIS_CLK6 170
+#define CLK_GOUT_CMU_CIS_CLK7 171
+#define CLK_GOUT_CMU_CMU_BOOST 172
+#define CLK_GOUT_CMU_CORE_BUS 173
+#define CLK_GOUT_CMU_CPUCL0_DBG 174
+#define CLK_GOUT_CMU_CPUCL0_SWITCH 175
+#define CLK_GOUT_CMU_CPUCL1_SWITCH 176
+#define CLK_GOUT_CMU_CPUCL2_SWITCH 177
+#define CLK_GOUT_CMU_CSIS_BUS 178
+#define CLK_GOUT_CMU_DISP_BUS 179
+#define CLK_GOUT_CMU_DNS_BUS 180
+#define CLK_GOUT_CMU_DPU_BUS 181
+#define CLK_GOUT_CMU_EH_BUS 182
+#define CLK_GOUT_CMU_G2D_G2D 183
+#define CLK_GOUT_CMU_G2D_MSCL 184
+#define CLK_GOUT_CMU_G3AA_G3AA 185
+#define CLK_GOUT_CMU_G3D_BUSD 186
+#define CLK_GOUT_CMU_G3D_GLB 187
+#define CLK_GOUT_CMU_G3D_SWITCH 188
+#define CLK_GOUT_CMU_GDC_GDC0 189
+#define CLK_GOUT_CMU_GDC_GDC1 190
+#define CLK_GOUT_CMU_GDC_SCSC 191
+#define CLK_GOUT_CMU_HPM 192
+#define CLK_GOUT_CMU_HSI0_BUS 193
+#define CLK_GOUT_CMU_HSI0_DPGTC 194
+#define CLK_GOUT_CMU_HSI0_USB31DRD 195
+#define CLK_GOUT_CMU_HSI0_USBDPDBG 196
+#define CLK_GOUT_CMU_HSI1_BUS 197
+#define CLK_GOUT_CMU_HSI1_PCIE 198
+#define CLK_GOUT_CMU_HSI2_BUS 199
+#define CLK_GOUT_CMU_HSI2_MMC_CARD 200
+#define CLK_GOUT_CMU_HSI2_PCIE 201
+#define CLK_GOUT_CMU_HSI2_UFS_EMBD 202
+#define CLK_GOUT_CMU_IPP_BUS 203
+#define CLK_GOUT_CMU_ITP_BUS 204
+#define CLK_GOUT_CMU_MCSC_ITSC 205
+#define CLK_GOUT_CMU_MCSC_MCSC 206
+#define CLK_GOUT_CMU_MFC_MFC 207
+#define CLK_GOUT_CMU_MIF_BUSP 208
+#define CLK_GOUT_CMU_MISC_BUS 209
+#define CLK_GOUT_CMU_MISC_SSS 210
+#define CLK_GOUT_CMU_PDP_BUS 211
+#define CLK_GOUT_CMU_PDP_VRA 212
+#define CLK_GOUT_CMU_G3AA 213
+#define CLK_GOUT_CMU_PERIC0_BUS 214
+#define CLK_GOUT_CMU_PERIC0_IP 215
+#define CLK_GOUT_CMU_PERIC1_BUS 216
+#define CLK_GOUT_CMU_PERIC1_IP 217
+#define CLK_GOUT_CMU_TNR_BUS 218
+#define CLK_GOUT_CMU_TOP_CMUREF 219
+#define CLK_GOUT_CMU_TPU_BUS 220
+#define CLK_GOUT_CMU_TPU_TPU 221
+#define CLK_GOUT_CMU_TPU_TPUCTL 222
+#define CLK_GOUT_CMU_TPU_UART 223
+
+/* CMU_APM */
+#define CLK_MOUT_APM_FUNC 1
+#define CLK_MOUT_APM_FUNCSRC 2
+#define CLK_DOUT_APM_BOOST 3
+#define CLK_DOUT_APM_USI0_UART 4
+#define CLK_DOUT_APM_USI0_USI 5
+#define CLK_DOUT_APM_USI1_UART 6
+#define CLK_GOUT_APM_APM_CMU_APM_PCLK 7
+#define CLK_GOUT_BUS0_BOOST_OPTION1 8
+#define CLK_GOUT_CMU_BOOST_OPTION1 9
+#define CLK_GOUT_CORE_BOOST_OPTION1 10
+#define CLK_GOUT_APM_FUNC 11
+#define CLK_GOUT_APM_APBIF_GPIO_ALIVE_PCLK 12
+#define CLK_GOUT_APM_APBIF_GPIO_FAR_ALIVE_PCLK 13
+#define CLK_GOUT_APM_APBIF_PMU_ALIVE_PCLK 14
+#define CLK_GOUT_APM_APBIF_RTC_PCLK 15
+#define CLK_GOUT_APM_APBIF_TRTC_PCLK 16
+#define CLK_GOUT_APM_APM_USI0_UART_IPCLK 17
+#define CLK_GOUT_APM_APM_USI0_UART_PCLK 18
+#define CLK_GOUT_APM_APM_USI0_USI_IPCLK 19
+#define CLK_GOUT_APM_APM_USI0_USI_PCLK 20
+#define CLK_GOUT_APM_APM_USI1_UART_IPCLK 21
+#define CLK_GOUT_APM_APM_USI1_UART_PCLK 22
+#define CLK_GOUT_APM_D_TZPC_APM_PCLK 23
+#define CLK_GOUT_APM_GPC_APM_PCLK 24
+#define CLK_GOUT_APM_GREBEINTEGRATION_HCLK 25
+#define CLK_GOUT_APM_INTMEM_ACLK 26
+#define CLK_GOUT_APM_INTMEM_PCLK 27
+#define CLK_GOUT_APM_LHM_AXI_G_SWD_I_CLK 28
+#define CLK_GOUT_APM_LHM_AXI_P_AOCAPM_I_CLK 29
+#define CLK_GOUT_APM_LHM_AXI_P_APM_I_CLK 30
+#define CLK_GOUT_APM_LHS_AXI_D_APM_I_CLK 31
+#define CLK_GOUT_APM_LHS_AXI_G_DBGCORE_I_CLK 32
+#define CLK_GOUT_APM_LHS_AXI_G_SCAN2DRAM_I_CLK 33
+#define CLK_GOUT_APM_MAILBOX_APM_AOC_PCLK 34
+#define CLK_GOUT_APM_MAILBOX_APM_AP_PCLK 35
+#define CLK_GOUT_APM_MAILBOX_APM_GSA_PCLK 36
+#define CLK_GOUT_APM_MAILBOX_APM_SWD_PCLK 37
+#define CLK_GOUT_APM_MAILBOX_APM_TPU_PCLK 38
+#define CLK_GOUT_APM_MAILBOX_AP_AOC_PCLK 39
+#define CLK_GOUT_APM_MAILBOX_AP_DBGCORE_PCLK 40
+#define CLK_GOUT_APM_PMU_INTR_GEN_PCLK 41
+#define CLK_GOUT_APM_ROM_CRC32_HOST_ACLK 42
+#define CLK_GOUT_APM_ROM_CRC32_HOST_PCLK 43
+#define CLK_GOUT_APM_CLK_APM_BUS_CLK 44
+#define CLK_GOUT_APM_CLK_APM_USI0_UART_CLK 45
+#define CLK_GOUT_APM_CLK_APM_USI0_USI_CLK 46
+#define CLK_GOUT_APM_CLK_APM_USI1_UART_CLK 47
+#define CLK_GOUT_APM_SPEEDY_APM_PCLK 48
+#define CLK_GOUT_APM_SPEEDY_SUB_APM_PCLK 49
+#define CLK_GOUT_APM_SSMT_D_APM_ACLK 50
+#define CLK_GOUT_APM_SSMT_D_APM_PCLK 51
+#define CLK_GOUT_APM_SSMT_G_DBGCORE_ACLK 52
+#define CLK_GOUT_APM_SSMT_G_DBGCORE_PCLK 53
+#define CLK_GOUT_APM_SS_DBGCORE_SS_DBGCORE_HCLK 54
+#define CLK_GOUT_APM_SYSMMU_D_APM_CLK_S2 55
+#define CLK_GOUT_APM_SYSREG_APM_PCLK 56
+#define CLK_GOUT_APM_UASC_APM_ACLK 57
+#define CLK_GOUT_APM_UASC_APM_PCLK 58
+#define CLK_GOUT_APM_UASC_DBGCORE_ACLK 59
+#define CLK_GOUT_APM_UASC_DBGCORE_PCLK 60
+#define CLK_GOUT_APM_UASC_G_SWD_ACLK 61
+#define CLK_GOUT_APM_UASC_G_SWD_PCLK 62
+#define CLK_GOUT_APM_UASC_P_AOCAPM_ACLK 63
+#define CLK_GOUT_APM_UASC_P_AOCAPM_PCLK 64
+#define CLK_GOUT_APM_UASC_P_APM_ACLK 65
+#define CLK_GOUT_APM_UASC_P_APM_PCLK 66
+#define CLK_GOUT_APM_WDT_APM_PCLK 67
+#define CLK_GOUT_APM_XIU_DP_APM_ACLK 68
+#define CLK_APM_PLL_DIV2_APM 69
+#define CLK_APM_PLL_DIV4_APM 70
+#define CLK_APM_PLL_DIV16_APM 71
+
+/* CMU_MISC */
+#define CLK_MOUT_MISC_BUS_USER 1
+#define CLK_MOUT_MISC_SSS_USER 2
+#define CLK_MOUT_MISC_GIC 3
+#define CLK_DOUT_MISC_BUSP 4
+#define CLK_DOUT_MISC_GIC 5
+#define CLK_GOUT_MISC_MISC_CMU_MISC_PCLK 6
+#define CLK_GOUT_MISC_OTP_CON_BIRA_I_OSCCLK 7
+#define CLK_GOUT_MISC_OTP_CON_BISR_I_OSCCLK 8
+#define CLK_GOUT_MISC_OTP_CON_TOP_I_OSCCLK 9
+#define CLK_GOUT_MISC_CLK_MISC_OSCCLK_CLK 10
+#define CLK_GOUT_MISC_ADM_AHB_SSS_HCLKM 11
+#define CLK_GOUT_MISC_AD_APB_DIT_PCLKM 12
+#define CLK_GOUT_MISC_AD_APB_PUF_PCLKM 13
+#define CLK_GOUT_MISC_DIT_ICLKL2A 14
+#define CLK_GOUT_MISC_D_TZPC_MISC_PCLK 15
+#define CLK_GOUT_MISC_GIC_GICCLK 16
+#define CLK_GOUT_MISC_GPC_MISC_PCLK 17
+#define CLK_GOUT_MISC_LHM_AST_ICC_CPUGIC_I_CLK 18
+#define CLK_GOUT_MISC_LHM_AXI_D_SSS_I_CLK 19
+#define CLK_GOUT_MISC_LHM_AXI_P_GIC_I_CLK 20
+#define CLK_GOUT_MISC_LHM_AXI_P_MISC_I_CLK 21
+#define CLK_GOUT_MISC_LHS_ACEL_D_MISC_I_CLK 22
+#define CLK_GOUT_MISC_LHS_AST_IRI_GICCPU_I_CLK 23
+#define CLK_GOUT_MISC_LHS_AXI_D_SSS_I_CLK 24
+#define CLK_GOUT_MISC_MCT_PCLK 25
+#define CLK_GOUT_MISC_OTP_CON_BIRA_PCLK 26
+#define CLK_GOUT_MISC_OTP_CON_BISR_PCLK 27
+#define CLK_GOUT_MISC_OTP_CON_TOP_PCLK 28
+#define CLK_GOUT_MISC_PDMA_ACLK 29
+#define CLK_GOUT_MISC_PPMU_DMA_ACLK 30
+#define CLK_GOUT_MISC_PPMU_MISC_ACLK 31
+#define CLK_GOUT_MISC_PPMU_MISC_PCLK 32
+#define CLK_GOUT_MISC_PUF_I_CLK 33
+#define CLK_GOUT_MISC_QE_DIT_ACLK 34
+#define CLK_GOUT_MISC_QE_DIT_PCLK 35
+#define CLK_GOUT_MISC_QE_PDMA_ACLK 36
+#define CLK_GOUT_MISC_QE_PDMA_PCLK 37
+#define CLK_GOUT_MISC_QE_PPMU_DMA_ACLK 38
+#define CLK_GOUT_MISC_QE_PPMU_DMA_PCLK 39
+#define CLK_GOUT_MISC_QE_RTIC_ACLK 40
+#define CLK_GOUT_MISC_QE_RTIC_PCLK 41
+#define CLK_GOUT_MISC_QE_SPDMA_ACLK 42
+#define CLK_GOUT_MISC_QE_SPDMA_PCLK 43
+#define CLK_GOUT_MISC_QE_SSS_ACLK 44
+#define CLK_GOUT_MISC_QE_SSS_PCLK 45
+#define CLK_GOUT_MISC_CLK_MISC_BUSD_CLK 46
+#define CLK_GOUT_MISC_CLK_MISC_BUSP_CLK 47
+#define CLK_GOUT_MISC_CLK_MISC_GIC_CLK 48
+#define CLK_GOUT_MISC_CLK_MISC_SSS_CLK 49
+#define CLK_GOUT_MISC_RTIC_I_ACLK 50
+#define CLK_GOUT_MISC_RTIC_I_PCLK 51
+#define CLK_GOUT_MISC_SPDMA_ACLK 52
+#define CLK_GOUT_MISC_SSMT_DIT_ACLK 53
+#define CLK_GOUT_MISC_SSMT_DIT_PCLK 54
+#define CLK_GOUT_MISC_SSMT_PDMA_ACLK 55
+#define CLK_GOUT_MISC_SSMT_PDMA_PCLK 56
+#define CLK_GOUT_MISC_SSMT_PPMU_DMA_ACLK 57
+#define CLK_GOUT_MISC_SSMT_PPMU_DMA_PCLK 58
+#define CLK_GOUT_MISC_SSMT_RTIC_ACLK 59
+#define CLK_GOUT_MISC_SSMT_RTIC_PCLK 60
+#define CLK_GOUT_MISC_SSMT_SPDMA_ACLK 61
+#define CLK_GOUT_MISC_SSMT_SPDMA_PCLK 62
+#define CLK_GOUT_MISC_SSMT_SSS_ACLK 63
+#define CLK_GOUT_MISC_SSMT_SSS_PCLK 64
+#define CLK_GOUT_MISC_SSS_I_ACLK 65
+#define CLK_GOUT_MISC_SSS_I_PCLK 66
+#define CLK_GOUT_MISC_SYSMMU_MISC_CLK_S2 67
+#define CLK_GOUT_MISC_SYSMMU_SSS_CLK_S1 68
+#define CLK_GOUT_MISC_SYSREG_MISC_PCLK 69
+#define CLK_GOUT_MISC_TMU_SUB_PCLK 70
+#define CLK_GOUT_MISC_TMU_TOP_PCLK 71
+#define CLK_GOUT_MISC_WDT_CLUSTER0_PCLK 72
+#define CLK_GOUT_MISC_WDT_CLUSTER1_PCLK 73
+#define CLK_GOUT_MISC_XIU_D_MISC_ACLK 74
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_I3C_USER 2
+#define CLK_MOUT_PERIC0_USI0_UART_USER 3
+#define CLK_MOUT_PERIC0_USI14_USI_USER 4
+#define CLK_MOUT_PERIC0_USI1_USI_USER 5
+#define CLK_MOUT_PERIC0_USI2_USI_USER 6
+#define CLK_MOUT_PERIC0_USI3_USI_USER 7
+#define CLK_MOUT_PERIC0_USI4_USI_USER 8
+#define CLK_MOUT_PERIC0_USI5_USI_USER 9
+#define CLK_MOUT_PERIC0_USI6_USI_USER 10
+#define CLK_MOUT_PERIC0_USI7_USI_USER 11
+#define CLK_MOUT_PERIC0_USI8_USI_USER 12
+#define CLK_DOUT_PERIC0_I3C 13
+#define CLK_DOUT_PERIC0_USI0_UART 14
+#define CLK_DOUT_PERIC0_USI14_USI 15
+#define CLK_DOUT_PERIC0_USI1_USI 16
+#define CLK_DOUT_PERIC0_USI2_USI 17
+#define CLK_DOUT_PERIC0_USI3_USI 18
+#define CLK_DOUT_PERIC0_USI4_USI 19
+#define CLK_DOUT_PERIC0_USI5_USI 20
+#define CLK_DOUT_PERIC0_USI6_USI 21
+#define CLK_DOUT_PERIC0_USI7_USI 22
+#define CLK_DOUT_PERIC0_USI8_USI 23
+#define CLK_GOUT_PERIC0_IP 24
+#define CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK 25
+#define CLK_GOUT_PERIC0_CLK_PERIC0_OSCCLK_CLK 26
+#define CLK_GOUT_PERIC0_D_TZPC_PERIC0_PCLK 27
+#define CLK_GOUT_PERIC0_GPC_PERIC0_PCLK 28
+#define CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK 29
+#define CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK 30
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0 31
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1 32
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_10 33
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_11 34
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_12 35
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_13 36
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_14 37
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_15 38
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2 39
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3 40
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4 41
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5 42
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6 43
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7 44
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_8 45
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_9 46
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_0 47
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_1 48
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_10 49
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_11 50
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_12 51
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_13 52
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_14 53
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_15 54
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_2 55
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_3 56
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_4 57
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_5 58
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_6 59
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_7 60
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_8 61
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_9 62
+#define CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_0 63
+#define CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2 64
+#define CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0 65
+#define CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_2 66
+#define CLK_GOUT_PERIC0_CLK_PERIC0_BUSP_CLK 67
+#define CLK_GOUT_PERIC0_CLK_PERIC0_I3C_CLK 68
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI0_UART_CLK 69
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI14_USI_CLK 70
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI1_USI_CLK 71
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI2_USI_CLK 72
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI3_USI_CLK 73
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI4_USI_CLK 74
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI5_USI_CLK 75
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI6_USI_CLK 76
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI7_USI_CLK 77
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI8_USI_CLK 78
+#define CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK 79
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_I3C_USER 2
+#define CLK_MOUT_PERIC1_USI0_USI_USER 3
+#define CLK_MOUT_PERIC1_USI10_USI_USER 4
+#define CLK_MOUT_PERIC1_USI11_USI_USER 5
+#define CLK_MOUT_PERIC1_USI12_USI_USER 6
+#define CLK_MOUT_PERIC1_USI13_USI_USER 7
+#define CLK_MOUT_PERIC1_USI9_USI_USER 8
+#define CLK_DOUT_PERIC1_I3C 9
+#define CLK_DOUT_PERIC1_USI0_USI 10
+#define CLK_DOUT_PERIC1_USI10_USI 11
+#define CLK_DOUT_PERIC1_USI11_USI 12
+#define CLK_DOUT_PERIC1_USI12_USI 13
+#define CLK_DOUT_PERIC1_USI13_USI 14
+#define CLK_DOUT_PERIC1_USI9_USI 15
+#define CLK_GOUT_PERIC1_IP 16
+#define CLK_GOUT_PERIC1_PCLK 17
+#define CLK_GOUT_PERIC1_CLK_PERIC1_I3C_CLK 18
+#define CLK_GOUT_PERIC1_CLK_PERIC1_OSCCLK_CLK 19
+#define CLK_GOUT_PERIC1_D_TZPC_PERIC1_PCLK 20
+#define CLK_GOUT_PERIC1_GPC_PERIC1_PCLK 21
+#define CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK 22
+#define CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK 23
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1 24
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2 25
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3 26
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4 27
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5 28
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6 29
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_8 30
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_1 31
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_15 32
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_2 33
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_3 34
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_4 35
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_5 36
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_6 37
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_8 38
+#define CLK_GOUT_PERIC1_CLK_PERIC1_BUSP_CLK 39
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI0_USI_CLK 40
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI10_USI_CLK 41
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI11_USI_CLK 42
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI12_USI_CLK 43
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI13_USI_CLK 44
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI9_USI_CLK 45
+#define CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK 46
+
+#endif /* _DT_BINDINGS_CLOCK_GOOGLE_GS101_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 4073eb7a9da1..c0ce5e9c4151 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -15,6 +15,8 @@
#define CLKID_FCLK_DIV5 7
#define CLKID_FCLK_DIV7 8
#define CLKID_GP0_PLL 9
+#define CLKID_MPEG_SEL 10
+#define CLKID_MPEG_DIV 11
#define CLKID_CLK81 12
#define CLKID_MPLL0 13
#define CLKID_MPLL1 14
@@ -102,35 +104,92 @@
#define CLKID_SD_EMMC_C 96
#define CLKID_SAR_ADC_CLK 97
#define CLKID_SAR_ADC_SEL 98
+#define CLKID_SAR_ADC_DIV 99
#define CLKID_MALI_0_SEL 100
+#define CLKID_MALI_0_DIV 101
#define CLKID_MALI_0 102
#define CLKID_MALI_1_SEL 103
+#define CLKID_MALI_1_DIV 104
#define CLKID_MALI_1 105
#define CLKID_MALI 106
#define CLKID_CTS_AMCLK 107
+#define CLKID_CTS_AMCLK_SEL 108
+#define CLKID_CTS_AMCLK_DIV 109
#define CLKID_CTS_MCLK_I958 110
+#define CLKID_CTS_MCLK_I958_SEL 111
+#define CLKID_CTS_MCLK_I958_DIV 112
#define CLKID_CTS_I958 113
#define CLKID_32K_CLK 114
+#define CLKID_32K_CLK_SEL 115
+#define CLKID_32K_CLK_DIV 116
+#define CLKID_SD_EMMC_A_CLK0_SEL 117
+#define CLKID_SD_EMMC_A_CLK0_DIV 118
#define CLKID_SD_EMMC_A_CLK0 119
+#define CLKID_SD_EMMC_B_CLK0_SEL 120
+#define CLKID_SD_EMMC_B_CLK0_DIV 121
#define CLKID_SD_EMMC_B_CLK0 122
+#define CLKID_SD_EMMC_C_CLK0_SEL 123
+#define CLKID_SD_EMMC_C_CLK0_DIV 124
#define CLKID_SD_EMMC_C_CLK0 125
#define CLKID_VPU_0_SEL 126
+#define CLKID_VPU_0_DIV 127
#define CLKID_VPU_0 128
#define CLKID_VPU_1_SEL 129
+#define CLKID_VPU_1_DIV 130
#define CLKID_VPU_1 131
#define CLKID_VPU 132
#define CLKID_VAPB_0_SEL 133
+#define CLKID_VAPB_0_DIV 134
#define CLKID_VAPB_0 135
#define CLKID_VAPB_1_SEL 136
+#define CLKID_VAPB_1_DIV 137
#define CLKID_VAPB_1 138
#define CLKID_VAPB_SEL 139
#define CLKID_VAPB 140
+#define CLKID_HDMI_PLL_PRE_MULT 141
+#define CLKID_MPLL0_DIV 142
+#define CLKID_MPLL1_DIV 143
+#define CLKID_MPLL2_DIV 144
+#define CLKID_MPLL_PREDIV 145
+#define CLKID_FCLK_DIV2_DIV 146
+#define CLKID_FCLK_DIV3_DIV 147
+#define CLKID_FCLK_DIV4_DIV 148
+#define CLKID_FCLK_DIV5_DIV 149
+#define CLKID_FCLK_DIV7_DIV 150
+#define CLKID_VDEC_1_SEL 151
+#define CLKID_VDEC_1_DIV 152
#define CLKID_VDEC_1 153
+#define CLKID_VDEC_HEVC_SEL 154
+#define CLKID_VDEC_HEVC_DIV 155
#define CLKID_VDEC_HEVC 156
+#define CLKID_GEN_CLK_SEL 157
+#define CLKID_GEN_CLK_DIV 158
#define CLKID_GEN_CLK 159
+#define CLKID_FIXED_PLL_DCO 160
+#define CLKID_HDMI_PLL_DCO 161
+#define CLKID_HDMI_PLL_OD 162
+#define CLKID_HDMI_PLL_OD2 163
+#define CLKID_SYS_PLL_DCO 164
+#define CLKID_GP0_PLL_DCO 165
#define CLKID_VID_PLL 166
+#define CLKID_VID_PLL_SEL 167
+#define CLKID_VID_PLL_DIV 168
+#define CLKID_VCLK_SEL 169
+#define CLKID_VCLK2_SEL 170
+#define CLKID_VCLK_INPUT 171
+#define CLKID_VCLK2_INPUT 172
+#define CLKID_VCLK_DIV 173
+#define CLKID_VCLK2_DIV 174
#define CLKID_VCLK 175
#define CLKID_VCLK2 176
+#define CLKID_VCLK_DIV2_EN 177
+#define CLKID_VCLK_DIV4_EN 178
+#define CLKID_VCLK_DIV6_EN 179
+#define CLKID_VCLK_DIV12_EN 180
+#define CLKID_VCLK2_DIV2_EN 181
+#define CLKID_VCLK2_DIV4_EN 182
+#define CLKID_VCLK2_DIV6_EN 183
+#define CLKID_VCLK2_DIV12_EN 184
#define CLKID_VCLK_DIV1 185
#define CLKID_VCLK_DIV2 186
#define CLKID_VCLK_DIV4 187
@@ -141,10 +200,16 @@
#define CLKID_VCLK2_DIV4 192
#define CLKID_VCLK2_DIV6 193
#define CLKID_VCLK2_DIV12 194
+#define CLKID_CTS_ENCI_SEL 195
+#define CLKID_CTS_ENCP_SEL 196
+#define CLKID_CTS_VDAC_SEL 197
+#define CLKID_HDMI_TX_SEL 198
#define CLKID_CTS_ENCI 199
#define CLKID_CTS_ENCP 200
#define CLKID_CTS_VDAC 201
#define CLKID_HDMI_TX 202
+#define CLKID_HDMI_SEL 203
+#define CLKID_HDMI_DIV 204
#define CLKID_HDMI 205
#define CLKID_ACODEC 206
diff --git a/include/dt-bindings/clock/hi3559av100-clock.h b/include/dt-bindings/clock/hi3559av100-clock.h
new file mode 100644
index 000000000000..a4f0e997546c
--- /dev/null
+++ b/include/dt-bindings/clock/hi3559av100-clock.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause */
+/*
+ * Copyright (c) 2019-2020, Huawei Tech. Co., Ltd.
+ *
+ * Author: Dongjiu Geng <gengdongjiu@huawei.com>
+ */
+
+#ifndef __DTS_HI3559AV100_CLOCK_H
+#define __DTS_HI3559AV100_CLOCK_H
+
+/* fixed rate */
+#define HI3559AV100_FIXED_1188M 1
+#define HI3559AV100_FIXED_1000M 2
+#define HI3559AV100_FIXED_842M 3
+#define HI3559AV100_FIXED_792M 4
+#define HI3559AV100_FIXED_750M 5
+#define HI3559AV100_FIXED_710M 6
+#define HI3559AV100_FIXED_680M 7
+#define HI3559AV100_FIXED_667M 8
+#define HI3559AV100_FIXED_631M 9
+#define HI3559AV100_FIXED_600M 10
+#define HI3559AV100_FIXED_568M 11
+#define HI3559AV100_FIXED_500M 12
+#define HI3559AV100_FIXED_475M 13
+#define HI3559AV100_FIXED_428M 14
+#define HI3559AV100_FIXED_400M 15
+#define HI3559AV100_FIXED_396M 16
+#define HI3559AV100_FIXED_300M 17
+#define HI3559AV100_FIXED_250M 18
+#define HI3559AV100_FIXED_198M 19
+#define HI3559AV100_FIXED_187p5M 20
+#define HI3559AV100_FIXED_150M 21
+#define HI3559AV100_FIXED_148p5M 22
+#define HI3559AV100_FIXED_125M 23
+#define HI3559AV100_FIXED_107M 24
+#define HI3559AV100_FIXED_100M 25
+#define HI3559AV100_FIXED_99M 26
+#define HI3559AV100_FIXED_74p25M 27
+#define HI3559AV100_FIXED_72M 28
+#define HI3559AV100_FIXED_60M 29
+#define HI3559AV100_FIXED_54M 30
+#define HI3559AV100_FIXED_50M 31
+#define HI3559AV100_FIXED_49p5M 32
+#define HI3559AV100_FIXED_37p125M 33
+#define HI3559AV100_FIXED_36M 34
+#define HI3559AV100_FIXED_32p4M 35
+#define HI3559AV100_FIXED_27M 36
+#define HI3559AV100_FIXED_25M 37
+#define HI3559AV100_FIXED_24M 38
+#define HI3559AV100_FIXED_12M 39
+#define HI3559AV100_FIXED_3M 40
+#define HI3559AV100_FIXED_1p6M 41
+#define HI3559AV100_FIXED_400K 42
+#define HI3559AV100_FIXED_100K 43
+#define HI3559AV100_FIXED_200M 44
+#define HI3559AV100_FIXED_75M 75
+
+#define HI3559AV100_I2C0_CLK 50
+#define HI3559AV100_I2C1_CLK 51
+#define HI3559AV100_I2C2_CLK 52
+#define HI3559AV100_I2C3_CLK 53
+#define HI3559AV100_I2C4_CLK 54
+#define HI3559AV100_I2C5_CLK 55
+#define HI3559AV100_I2C6_CLK 56
+#define HI3559AV100_I2C7_CLK 57
+#define HI3559AV100_I2C8_CLK 58
+#define HI3559AV100_I2C9_CLK 59
+#define HI3559AV100_I2C10_CLK 60
+#define HI3559AV100_I2C11_CLK 61
+
+#define HI3559AV100_SPI0_CLK 62
+#define HI3559AV100_SPI1_CLK 63
+#define HI3559AV100_SPI2_CLK 64
+#define HI3559AV100_SPI3_CLK 65
+#define HI3559AV100_SPI4_CLK 66
+#define HI3559AV100_SPI5_CLK 67
+#define HI3559AV100_SPI6_CLK 68
+
+#define HI3559AV100_EDMAC_CLK 69
+#define HI3559AV100_EDMAC_AXICLK 70
+#define HI3559AV100_EDMAC1_CLK 71
+#define HI3559AV100_EDMAC1_AXICLK 72
+#define HI3559AV100_VDMAC_CLK 73
+
+/* mux clocks */
+#define HI3559AV100_FMC_MUX 80
+#define HI3559AV100_SYSAPB_MUX 81
+#define HI3559AV100_UART_MUX 82
+#define HI3559AV100_SYSBUS_MUX 83
+#define HI3559AV100_A73_MUX 84
+#define HI3559AV100_MMC0_MUX 85
+#define HI3559AV100_MMC1_MUX 86
+#define HI3559AV100_MMC2_MUX 87
+#define HI3559AV100_MMC3_MUX 88
+
+/* gate clocks */
+#define HI3559AV100_FMC_CLK 90
+#define HI3559AV100_UART0_CLK 91
+#define HI3559AV100_UART1_CLK 92
+#define HI3559AV100_UART2_CLK 93
+#define HI3559AV100_UART3_CLK 94
+#define HI3559AV100_UART4_CLK 95
+#define HI3559AV100_MMC0_CLK 96
+#define HI3559AV100_MMC1_CLK 97
+#define HI3559AV100_MMC2_CLK 98
+#define HI3559AV100_MMC3_CLK 99
+
+#define HI3559AV100_ETH_CLK 100
+#define HI3559AV100_ETH_MACIF_CLK 101
+#define HI3559AV100_ETH1_CLK 102
+#define HI3559AV100_ETH1_MACIF_CLK 103
+
+/* complex */
+#define HI3559AV100_MAC0_CLK 110
+#define HI3559AV100_MAC1_CLK 111
+#define HI3559AV100_SATA_CLK 112
+#define HI3559AV100_USB_CLK 113
+#define HI3559AV100_USB1_CLK 114
+
+/* pll clocks */
+#define HI3559AV100_APLL_CLK 250
+#define HI3559AV100_GPLL_CLK 251
+
+#define HI3559AV100_CRG_NR_CLKS 256
+
+#define HI3559AV100_SHUB_SOURCE_SOC_24M 0
+#define HI3559AV100_SHUB_SOURCE_SOC_200M 1
+#define HI3559AV100_SHUB_SOURCE_SOC_300M 2
+#define HI3559AV100_SHUB_SOURCE_PLL 3
+#define HI3559AV100_SHUB_SOURCE_CLK 4
+
+#define HI3559AV100_SHUB_I2C0_CLK 10
+#define HI3559AV100_SHUB_I2C1_CLK 11
+#define HI3559AV100_SHUB_I2C2_CLK 12
+#define HI3559AV100_SHUB_I2C3_CLK 13
+#define HI3559AV100_SHUB_I2C4_CLK 14
+#define HI3559AV100_SHUB_I2C5_CLK 15
+#define HI3559AV100_SHUB_I2C6_CLK 16
+#define HI3559AV100_SHUB_I2C7_CLK 17
+
+#define HI3559AV100_SHUB_SPI_SOURCE_CLK 20
+#define HI3559AV100_SHUB_SPI4_SOURCE_CLK 21
+#define HI3559AV100_SHUB_SPI0_CLK 22
+#define HI3559AV100_SHUB_SPI1_CLK 23
+#define HI3559AV100_SHUB_SPI2_CLK 24
+#define HI3559AV100_SHUB_SPI3_CLK 25
+#define HI3559AV100_SHUB_SPI4_CLK 26
+
+#define HI3559AV100_SHUB_UART_CLK_32K 30
+#define HI3559AV100_SHUB_UART_SOURCE_CLK 31
+#define HI3559AV100_SHUB_UART_DIV_CLK 32
+#define HI3559AV100_SHUB_UART0_CLK 33
+#define HI3559AV100_SHUB_UART1_CLK 34
+#define HI3559AV100_SHUB_UART2_CLK 35
+#define HI3559AV100_SHUB_UART3_CLK 36
+#define HI3559AV100_SHUB_UART4_CLK 37
+#define HI3559AV100_SHUB_UART5_CLK 38
+#define HI3559AV100_SHUB_UART6_CLK 39
+
+#define HI3559AV100_SHUB_EDMAC_CLK 40
+
+#define HI3559AV100_SHUB_NR_CLKS 50
+
+#endif /* __DTS_HI3559AV100_CLOCK_H */
+
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index e20c43cc36f6..e5b2a1ba02bc 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -273,6 +273,8 @@
#define IMX6QDL_CLK_MMDC_P0_IPG 263
#define IMX6QDL_CLK_DCIC1 264
#define IMX6QDL_CLK_DCIC2 265
-#define IMX6QDL_CLK_END 266
+#define IMX6QDL_CLK_ENET_REF_SEL 266
+#define IMX6QDL_CLK_ENET_REF_PAD 267
+#define IMX6QDL_CLK_END 268
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
index f446710fe63d..494fd0c37fb5 100644
--- a/include/dt-bindings/clock/imx6sll-clock.h
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2018 NXP.
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index 79094338e6f1..66239ebc0e23 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -256,7 +256,12 @@
#define IMX6UL_CLK_GPIO4 247
#define IMX6UL_CLK_GPIO5 248
#define IMX6UL_CLK_MMDC_P1_IPG 249
+#define IMX6UL_CLK_ENET1_REF_125M 250
+#define IMX6UL_CLK_ENET1_REF_SEL 251
+#define IMX6UL_CLK_ENET1_REF_PAD 252
+#define IMX6UL_CLK_ENET2_REF_SEL 253
+#define IMX6UL_CLK_ENET2_REF_PAD 254
-#define IMX6UL_CLK_END 250
+#define IMX6UL_CLK_END 255
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h
index 673a8c662340..2242ff54fc5e 100644
--- a/include/dt-bindings/clock/imx8-clock.h
+++ b/include/dt-bindings/clock/imx8-clock.h
@@ -7,132 +7,6 @@
#ifndef __DT_BINDINGS_CLOCK_IMX_H
#define __DT_BINDINGS_CLOCK_IMX_H
-/* SCU Clocks */
-
-#define IMX_CLK_DUMMY 0
-
-/* CPU */
-#define IMX_A35_CLK 1
-
-/* LSIO SS */
-#define IMX_LSIO_MEM_CLK 2
-#define IMX_LSIO_BUS_CLK 3
-#define IMX_LSIO_PWM0_CLK 10
-#define IMX_LSIO_PWM1_CLK 11
-#define IMX_LSIO_PWM2_CLK 12
-#define IMX_LSIO_PWM3_CLK 13
-#define IMX_LSIO_PWM4_CLK 14
-#define IMX_LSIO_PWM5_CLK 15
-#define IMX_LSIO_PWM6_CLK 16
-#define IMX_LSIO_PWM7_CLK 17
-#define IMX_LSIO_GPT0_CLK 18
-#define IMX_LSIO_GPT1_CLK 19
-#define IMX_LSIO_GPT2_CLK 20
-#define IMX_LSIO_GPT3_CLK 21
-#define IMX_LSIO_GPT4_CLK 22
-#define IMX_LSIO_FSPI0_CLK 23
-#define IMX_LSIO_FSPI1_CLK 24
-
-/* Connectivity SS */
-#define IMX_CONN_AXI_CLK_ROOT 30
-#define IMX_CONN_AHB_CLK_ROOT 31
-#define IMX_CONN_IPG_CLK_ROOT 32
-#define IMX_CONN_SDHC0_CLK 40
-#define IMX_CONN_SDHC1_CLK 41
-#define IMX_CONN_SDHC2_CLK 42
-#define IMX_CONN_ENET0_ROOT_CLK 43
-#define IMX_CONN_ENET0_BYPASS_CLK 44
-#define IMX_CONN_ENET0_RGMII_CLK 45
-#define IMX_CONN_ENET1_ROOT_CLK 46
-#define IMX_CONN_ENET1_BYPASS_CLK 47
-#define IMX_CONN_ENET1_RGMII_CLK 48
-#define IMX_CONN_GPMI_BCH_IO_CLK 49
-#define IMX_CONN_GPMI_BCH_CLK 50
-#define IMX_CONN_USB2_ACLK 51
-#define IMX_CONN_USB2_BUS_CLK 52
-#define IMX_CONN_USB2_LPM_CLK 53
-
-/* HSIO SS */
-#define IMX_HSIO_AXI_CLK 60
-#define IMX_HSIO_PER_CLK 61
-
-/* Display controller SS */
-#define IMX_DC_AXI_EXT_CLK 70
-#define IMX_DC_AXI_INT_CLK 71
-#define IMX_DC_CFG_CLK 72
-#define IMX_DC0_PLL0_CLK 80
-#define IMX_DC0_PLL1_CLK 81
-#define IMX_DC0_DISP0_CLK 82
-#define IMX_DC0_DISP1_CLK 83
-
-/* MIPI-LVDS SS */
-#define IMX_MIPI_IPG_CLK 90
-#define IMX_MIPI0_PIXEL_CLK 100
-#define IMX_MIPI0_BYPASS_CLK 101
-#define IMX_MIPI0_LVDS_PIXEL_CLK 102
-#define IMX_MIPI0_LVDS_BYPASS_CLK 103
-#define IMX_MIPI0_LVDS_PHY_CLK 104
-#define IMX_MIPI0_I2C0_CLK 105
-#define IMX_MIPI0_I2C1_CLK 106
-#define IMX_MIPI0_PWM0_CLK 107
-#define IMX_MIPI1_PIXEL_CLK 108
-#define IMX_MIPI1_BYPASS_CLK 109
-#define IMX_MIPI1_LVDS_PIXEL_CLK 110
-#define IMX_MIPI1_LVDS_BYPASS_CLK 111
-#define IMX_MIPI1_LVDS_PHY_CLK 112
-#define IMX_MIPI1_I2C0_CLK 113
-#define IMX_MIPI1_I2C1_CLK 114
-#define IMX_MIPI1_PWM0_CLK 115
-
-/* IMG SS */
-#define IMX_IMG_AXI_CLK 120
-#define IMX_IMG_IPG_CLK 121
-#define IMX_IMG_PXL_CLK 122
-
-/* MIPI-CSI SS */
-#define IMX_CSI0_CORE_CLK 130
-#define IMX_CSI0_ESC_CLK 131
-#define IMX_CSI0_PWM0_CLK 132
-#define IMX_CSI0_I2C0_CLK 133
-
-/* PARALLER CSI SS */
-#define IMX_PARALLEL_CSI_DPLL_CLK 140
-#define IMX_PARALLEL_CSI_PIXEL_CLK 141
-#define IMX_PARALLEL_CSI_MCLK_CLK 142
-
-/* VPU SS */
-#define IMX_VPU_ENC_CLK 150
-#define IMX_VPU_DEC_CLK 151
-
-/* GPU SS */
-#define IMX_GPU0_CORE_CLK 160
-#define IMX_GPU0_SHADER_CLK 161
-
-/* ADMA SS */
-#define IMX_ADMA_IPG_CLK_ROOT 165
-#define IMX_ADMA_UART0_CLK 170
-#define IMX_ADMA_UART1_CLK 171
-#define IMX_ADMA_UART2_CLK 172
-#define IMX_ADMA_UART3_CLK 173
-#define IMX_ADMA_SPI0_CLK 174
-#define IMX_ADMA_SPI1_CLK 175
-#define IMX_ADMA_SPI2_CLK 176
-#define IMX_ADMA_SPI3_CLK 177
-#define IMX_ADMA_CAN0_CLK 178
-#define IMX_ADMA_CAN1_CLK 179
-#define IMX_ADMA_CAN2_CLK 180
-#define IMX_ADMA_I2C0_CLK 181
-#define IMX_ADMA_I2C1_CLK 182
-#define IMX_ADMA_I2C2_CLK 183
-#define IMX_ADMA_I2C3_CLK 184
-#define IMX_ADMA_FTM0_CLK 185
-#define IMX_ADMA_FTM1_CLK 186
-#define IMX_ADMA_ADC0_CLK 187
-#define IMX_ADMA_PWM_CLK 188
-#define IMX_ADMA_LCD_CLK 189
-
-#define IMX_SCU_CLK_END 190
-
/* LPCG clocks */
/* LSIO SS LPCG */
@@ -290,4 +164,32 @@
#define IMX_ADMA_LPCG_CLK_END 45
+#define IMX_ADMA_ACM_AUD_CLK0_SEL 0
+#define IMX_ADMA_ACM_AUD_CLK1_SEL 1
+#define IMX_ADMA_ACM_MCLKOUT0_SEL 2
+#define IMX_ADMA_ACM_MCLKOUT1_SEL 3
+#define IMX_ADMA_ACM_ESAI0_MCLK_SEL 4
+#define IMX_ADMA_ACM_ESAI1_MCLK_SEL 5
+#define IMX_ADMA_ACM_GPT0_MUX_CLK_SEL 6
+#define IMX_ADMA_ACM_GPT1_MUX_CLK_SEL 7
+#define IMX_ADMA_ACM_GPT2_MUX_CLK_SEL 8
+#define IMX_ADMA_ACM_GPT3_MUX_CLK_SEL 9
+#define IMX_ADMA_ACM_GPT4_MUX_CLK_SEL 10
+#define IMX_ADMA_ACM_GPT5_MUX_CLK_SEL 11
+#define IMX_ADMA_ACM_SAI0_MCLK_SEL 12
+#define IMX_ADMA_ACM_SAI1_MCLK_SEL 13
+#define IMX_ADMA_ACM_SAI2_MCLK_SEL 14
+#define IMX_ADMA_ACM_SAI3_MCLK_SEL 15
+#define IMX_ADMA_ACM_SAI4_MCLK_SEL 16
+#define IMX_ADMA_ACM_SAI5_MCLK_SEL 17
+#define IMX_ADMA_ACM_SAI6_MCLK_SEL 18
+#define IMX_ADMA_ACM_SAI7_MCLK_SEL 19
+#define IMX_ADMA_ACM_SPDIF0_TX_CLK_SEL 20
+#define IMX_ADMA_ACM_SPDIF1_TX_CLK_SEL 21
+#define IMX_ADMA_ACM_MQS_TX_CLK_SEL 22
+#define IMX_ADMA_ACM_ASRC0_MUX_CLK_SEL 23
+#define IMX_ADMA_ACM_ASRC1_MUX_CLK_SEL 24
+
+#define IMX_ADMA_ACM_CLK_END 25
+
#endif /* __DT_BINDINGS_CLOCK_IMX_H */
diff --git a/include/dt-bindings/clock/imx8-lpcg.h b/include/dt-bindings/clock/imx8-lpcg.h
new file mode 100644
index 000000000000..d202715652c3
--- /dev/null
+++ b/include/dt-bindings/clock/imx8-lpcg.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2019-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#define IMX_LPCG_CLK_0 0
+#define IMX_LPCG_CLK_1 4
+#define IMX_LPCG_CLK_2 8
+#define IMX_LPCG_CLK_3 12
+#define IMX_LPCG_CLK_4 16
+#define IMX_LPCG_CLK_5 20
+#define IMX_LPCG_CLK_6 24
+#define IMX_LPCG_CLK_7 28
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
index e63a5530aed7..1f768b2eeb1a 100644
--- a/include/dt-bindings/clock/imx8mm-clock.h
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -274,6 +274,13 @@
#define IMX8MM_CLK_A53_CORE 251
-#define IMX8MM_CLK_END 252
+#define IMX8MM_CLK_CLKOUT1_SEL 252
+#define IMX8MM_CLK_CLKOUT1_DIV 253
+#define IMX8MM_CLK_CLKOUT1 254
+#define IMX8MM_CLK_CLKOUT2_SEL 255
+#define IMX8MM_CLK_CLKOUT2_DIV 256
+#define IMX8MM_CLK_CLKOUT2 257
+
+#define IMX8MM_CLK_END 258
#endif
diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
index 621ea0e87c67..04809edab33c 100644
--- a/include/dt-bindings/clock/imx8mn-clock.h
+++ b/include/dt-bindings/clock/imx8mn-clock.h
@@ -16,40 +16,48 @@
#define IMX8MN_CLK_EXT4 7
#define IMX8MN_AUDIO_PLL1_REF_SEL 8
#define IMX8MN_AUDIO_PLL2_REF_SEL 9
-#define IMX8MN_VIDEO_PLL1_REF_SEL 10
+#define IMX8MN_VIDEO_PLL_REF_SEL 10
+#define IMX8MN_VIDEO_PLL1_REF_SEL IMX8MN_VIDEO_PLL_REF_SEL
#define IMX8MN_DRAM_PLL_REF_SEL 11
#define IMX8MN_GPU_PLL_REF_SEL 12
-#define IMX8MN_VPU_PLL_REF_SEL 13
+#define IMX8MN_M7_ALT_PLL_REF_SEL 13
+#define IMX8MN_VPU_PLL_REF_SEL IMX8MN_M7_ALT_PLL_REF_SEL
#define IMX8MN_ARM_PLL_REF_SEL 14
#define IMX8MN_SYS_PLL1_REF_SEL 15
#define IMX8MN_SYS_PLL2_REF_SEL 16
#define IMX8MN_SYS_PLL3_REF_SEL 17
#define IMX8MN_AUDIO_PLL1 18
#define IMX8MN_AUDIO_PLL2 19
-#define IMX8MN_VIDEO_PLL1 20
+#define IMX8MN_VIDEO_PLL 20
+#define IMX8MN_VIDEO_PLL1 IMX8MN_VIDEO_PLL
#define IMX8MN_DRAM_PLL 21
#define IMX8MN_GPU_PLL 22
-#define IMX8MN_VPU_PLL 23
+#define IMX8MN_M7_ALT_PLL 23
+#define IMX8MN_VPU_PLL IMX8MN_M7_ALT_PLL
#define IMX8MN_ARM_PLL 24
#define IMX8MN_SYS_PLL1 25
#define IMX8MN_SYS_PLL2 26
#define IMX8MN_SYS_PLL3 27
#define IMX8MN_AUDIO_PLL1_BYPASS 28
#define IMX8MN_AUDIO_PLL2_BYPASS 29
-#define IMX8MN_VIDEO_PLL1_BYPASS 30
+#define IMX8MN_VIDEO_PLL_BYPASS 30
+#define IMX8MN_VIDEO_PLL1_BYPASS IMX8MN_VIDEO_PLL_BYPASS
#define IMX8MN_DRAM_PLL_BYPASS 31
#define IMX8MN_GPU_PLL_BYPASS 32
-#define IMX8MN_VPU_PLL_BYPASS 33
+#define IMX8MN_M7_ALT_PLL_BYPASS 33
+#define IMX8MN_VPU_PLL_BYPASS IMX8MN_M7_ALT_PLL_BYPASS
#define IMX8MN_ARM_PLL_BYPASS 34
#define IMX8MN_SYS_PLL1_BYPASS 35
#define IMX8MN_SYS_PLL2_BYPASS 36
#define IMX8MN_SYS_PLL3_BYPASS 37
#define IMX8MN_AUDIO_PLL1_OUT 38
#define IMX8MN_AUDIO_PLL2_OUT 39
-#define IMX8MN_VIDEO_PLL1_OUT 40
+#define IMX8MN_VIDEO_PLL_OUT 40
+#define IMX8MN_VIDEO_PLL1_OUT IMX8MN_VIDEO_PLL_OUT
#define IMX8MN_DRAM_PLL_OUT 41
#define IMX8MN_GPU_PLL_OUT 42
-#define IMX8MN_VPU_PLL_OUT 43
+#define IMX8MN_M7_ALT_PLL_OUT 43
+#define IMX8MN_VPU_PLL_OUT IMX8MN_M7_ALT_PLL_OUT
#define IMX8MN_ARM_PLL_OUT 44
#define IMX8MN_SYS_PLL1_OUT 45
#define IMX8MN_SYS_PLL2_OUT 46
@@ -234,6 +242,29 @@
#define IMX8MN_CLK_A53_CORE 214
-#define IMX8MN_CLK_END 215
+#define IMX8MN_CLK_CLKOUT1_SEL 215
+#define IMX8MN_CLK_CLKOUT1_DIV 216
+#define IMX8MN_CLK_CLKOUT1 217
+#define IMX8MN_CLK_CLKOUT2_SEL 218
+#define IMX8MN_CLK_CLKOUT2_DIV 219
+#define IMX8MN_CLK_CLKOUT2 220
+
+#define IMX8MN_CLK_M7_CORE 221
+
+#define IMX8MN_CLK_GPT_3M 222
+#define IMX8MN_CLK_GPT1 223
+#define IMX8MN_CLK_GPT1_ROOT 224
+#define IMX8MN_CLK_GPT2 225
+#define IMX8MN_CLK_GPT2_ROOT 226
+#define IMX8MN_CLK_GPT3 227
+#define IMX8MN_CLK_GPT3_ROOT 228
+#define IMX8MN_CLK_GPT4 229
+#define IMX8MN_CLK_GPT4_ROOT 230
+#define IMX8MN_CLK_GPT5 231
+#define IMX8MN_CLK_GPT5_ROOT 232
+#define IMX8MN_CLK_GPT6 233
+#define IMX8MN_CLK_GPT6_ROOT 234
+
+#define IMX8MN_CLK_END 235
#endif
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 7a23f289b27f..7da4243984b2 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -117,7 +117,6 @@
#define IMX8MP_CLK_AUDIO_AHB 108
#define IMX8MP_CLK_MIPI_DSI_ESC_RX 109
#define IMX8MP_CLK_IPG_ROOT 110
-#define IMX8MP_CLK_IPG_AUDIO_ROOT 111
#define IMX8MP_CLK_DRAM_ALT 112
#define IMX8MP_CLK_DRAM_APB 113
#define IMX8MP_CLK_VPU_G1 114
@@ -125,14 +124,13 @@
#define IMX8MP_CLK_CAN1 116
#define IMX8MP_CLK_CAN2 117
#define IMX8MP_CLK_MEMREPAIR 118
-#define IMX8MP_CLK_PCIE_PHY 119
#define IMX8MP_CLK_PCIE_AUX 120
#define IMX8MP_CLK_I2C5 121
#define IMX8MP_CLK_I2C6 122
#define IMX8MP_CLK_SAI1 123
#define IMX8MP_CLK_SAI2 124
#define IMX8MP_CLK_SAI3 125
-#define IMX8MP_CLK_SAI4 126
+/* #define IMX8MP_CLK_SAI4 126 */
#define IMX8MP_CLK_SAI5 127
#define IMX8MP_CLK_SAI6 128
#define IMX8MP_CLK_ENET_QOS 129
@@ -180,10 +178,8 @@
#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF 171
#define IMX8MP_CLK_MEDIA_DISP1_PIX 172
#define IMX8MP_CLK_MEDIA_CAM2_PIX 173
-#define IMX8MP_CLK_MEDIA_MIPI_PHY2_REF 174
+#define IMX8MP_CLK_MEDIA_LDB 174
#define IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC 175
-#define IMX8MP_CLK_PCIE2_CTRL 176
-#define IMX8MP_CLK_PCIE2_PHY 177
#define IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE 178
#define IMX8MP_CLK_ECSPI3 179
#define IMX8MP_CLK_PDM 180
@@ -321,8 +317,25 @@
#define IMX8MP_CLK_AUDIO_AXI 310
#define IMX8MP_CLK_HSIO_AXI 311
#define IMX8MP_CLK_MEDIA_ISP 312
-
-#define IMX8MP_CLK_END 313
+#define IMX8MP_CLK_MEDIA_DISP2_PIX 313
+#define IMX8MP_CLK_CLKOUT1_SEL 314
+#define IMX8MP_CLK_CLKOUT1_DIV 315
+#define IMX8MP_CLK_CLKOUT1 316
+#define IMX8MP_CLK_CLKOUT2_SEL 317
+#define IMX8MP_CLK_CLKOUT2_DIV 318
+#define IMX8MP_CLK_CLKOUT2 319
+#define IMX8MP_CLK_USB_SUSP 320
+#define IMX8MP_CLK_AUDIO_AHB_ROOT IMX8MP_CLK_AUDIO_ROOT
+#define IMX8MP_CLK_AUDIO_AXI_ROOT 321
+#define IMX8MP_CLK_SAI1_ROOT 322
+#define IMX8MP_CLK_SAI2_ROOT 323
+#define IMX8MP_CLK_SAI3_ROOT 324
+#define IMX8MP_CLK_SAI5_ROOT 325
+#define IMX8MP_CLK_SAI6_ROOT 326
+#define IMX8MP_CLK_SAI7_ROOT 327
+#define IMX8MP_CLK_PDM_ROOT 328
+#define IMX8MP_CLK_MEDIA_LDB_ROOT 329
+#define IMX8MP_CLK_END 330
#define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0
#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1
@@ -363,7 +376,6 @@
#define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36
#define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37
#define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38
-#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39
#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40
#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41
#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index 9b8045d75b8b..afa74d7ba100 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -405,25 +405,6 @@
#define IMX8MQ_VIDEO2_PLL1_REF_SEL 266
-#define IMX8MQ_SYS1_PLL_40M_CG 267
-#define IMX8MQ_SYS1_PLL_80M_CG 268
-#define IMX8MQ_SYS1_PLL_100M_CG 269
-#define IMX8MQ_SYS1_PLL_133M_CG 270
-#define IMX8MQ_SYS1_PLL_160M_CG 271
-#define IMX8MQ_SYS1_PLL_200M_CG 272
-#define IMX8MQ_SYS1_PLL_266M_CG 273
-#define IMX8MQ_SYS1_PLL_400M_CG 274
-#define IMX8MQ_SYS1_PLL_800M_CG 275
-#define IMX8MQ_SYS2_PLL_50M_CG 276
-#define IMX8MQ_SYS2_PLL_100M_CG 277
-#define IMX8MQ_SYS2_PLL_125M_CG 278
-#define IMX8MQ_SYS2_PLL_166M_CG 279
-#define IMX8MQ_SYS2_PLL_200M_CG 280
-#define IMX8MQ_SYS2_PLL_250M_CG 281
-#define IMX8MQ_SYS2_PLL_333M_CG 282
-#define IMX8MQ_SYS2_PLL_500M_CG 283
-#define IMX8MQ_SYS2_PLL_1000M_CG 284
-
#define IMX8MQ_CLK_GPU_CORE 285
#define IMX8MQ_CLK_GPU_SHADER 286
#define IMX8MQ_CLK_M4_CORE 287
@@ -431,6 +412,20 @@
#define IMX8MQ_CLK_A53_CORE 289
-#define IMX8MQ_CLK_END 290
+#define IMX8MQ_CLK_MON_AUDIO_PLL1_DIV 290
+#define IMX8MQ_CLK_MON_AUDIO_PLL2_DIV 291
+#define IMX8MQ_CLK_MON_VIDEO_PLL1_DIV 292
+#define IMX8MQ_CLK_MON_GPU_PLL_DIV 293
+#define IMX8MQ_CLK_MON_VPU_PLL_DIV 294
+#define IMX8MQ_CLK_MON_ARM_PLL_DIV 295
+#define IMX8MQ_CLK_MON_SYS_PLL1_DIV 296
+#define IMX8MQ_CLK_MON_SYS_PLL2_DIV 297
+#define IMX8MQ_CLK_MON_SYS_PLL3_DIV 298
+#define IMX8MQ_CLK_MON_DRAM_PLL_DIV 299
+#define IMX8MQ_CLK_MON_VIDEO_PLL2_DIV 300
+#define IMX8MQ_CLK_MON_SEL 301
+#define IMX8MQ_CLK_MON_CLK2_OUT 302
+
+#define IMX8MQ_CLK_END 303
#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/imx8ulp-clock.h b/include/dt-bindings/clock/imx8ulp-clock.h
new file mode 100644
index 000000000000..827404fadf5c
--- /dev/null
+++ b/include/dt-bindings/clock/imx8ulp-clock.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8ULP_H
+#define __DT_BINDINGS_CLOCK_IMX8ULP_H
+
+#define IMX8ULP_CLK_DUMMY 0
+
+/* CGC1 */
+#define IMX8ULP_CLK_SPLL2 5
+#define IMX8ULP_CLK_SPLL3 6
+#define IMX8ULP_CLK_A35_SEL 7
+#define IMX8ULP_CLK_A35_DIV 8
+#define IMX8ULP_CLK_SPLL2_PRE_SEL 9
+#define IMX8ULP_CLK_SPLL3_PRE_SEL 10
+#define IMX8ULP_CLK_SPLL3_PFD0 11
+#define IMX8ULP_CLK_SPLL3_PFD1 12
+#define IMX8ULP_CLK_SPLL3_PFD2 13
+#define IMX8ULP_CLK_SPLL3_PFD3 14
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV1 15
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV2 16
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV1 17
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV2 18
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV1 19
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV2 20
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV1 21
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV2 22
+#define IMX8ULP_CLK_NIC_SEL 23
+#define IMX8ULP_CLK_NIC_AD_DIVPLAT 24
+#define IMX8ULP_CLK_NIC_PER_DIVPLAT 25
+#define IMX8ULP_CLK_XBAR_SEL 26
+#define IMX8ULP_CLK_XBAR_AD_DIVPLAT 27
+#define IMX8ULP_CLK_XBAR_DIVBUS 28
+#define IMX8ULP_CLK_XBAR_AD_SLOW 29
+#define IMX8ULP_CLK_SOSC_DIV1 30
+#define IMX8ULP_CLK_SOSC_DIV2 31
+#define IMX8ULP_CLK_SOSC_DIV3 32
+#define IMX8ULP_CLK_FROSC_DIV1 33
+#define IMX8ULP_CLK_FROSC_DIV2 34
+#define IMX8ULP_CLK_FROSC_DIV3 35
+#define IMX8ULP_CLK_SPLL3_VCODIV 36
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV1_GATE 37
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV2_GATE 38
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV1_GATE 39
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV2_GATE 40
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV1_GATE 41
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV2_GATE 42
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV1_GATE 43
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV2_GATE 44
+#define IMX8ULP_CLK_SOSC_DIV1_GATE 45
+#define IMX8ULP_CLK_SOSC_DIV2_GATE 46
+#define IMX8ULP_CLK_SOSC_DIV3_GATE 47
+#define IMX8ULP_CLK_FROSC_DIV1_GATE 48
+#define IMX8ULP_CLK_FROSC_DIV2_GATE 49
+#define IMX8ULP_CLK_FROSC_DIV3_GATE 50
+#define IMX8ULP_CLK_SAI4_SEL 51
+#define IMX8ULP_CLK_SAI5_SEL 52
+#define IMX8ULP_CLK_AUD_CLK1 53
+#define IMX8ULP_CLK_ARM 54
+#define IMX8ULP_CLK_ENET_TS_SEL 55
+
+#define IMX8ULP_CLK_CGC1_END 56
+
+/* CGC2 */
+#define IMX8ULP_CLK_PLL4_PRE_SEL 0
+#define IMX8ULP_CLK_PLL4 1
+#define IMX8ULP_CLK_PLL4_VCODIV 2
+#define IMX8ULP_CLK_DDR_SEL 3
+#define IMX8ULP_CLK_DDR_DIV 4
+#define IMX8ULP_CLK_LPAV_AXI_SEL 5
+#define IMX8ULP_CLK_LPAV_AXI_DIV 6
+#define IMX8ULP_CLK_LPAV_AHB_DIV 7
+#define IMX8ULP_CLK_LPAV_BUS_DIV 8
+#define IMX8ULP_CLK_PLL4_PFD0 9
+#define IMX8ULP_CLK_PLL4_PFD1 10
+#define IMX8ULP_CLK_PLL4_PFD2 11
+#define IMX8ULP_CLK_PLL4_PFD3 12
+#define IMX8ULP_CLK_PLL4_PFD0_DIV1_GATE 13
+#define IMX8ULP_CLK_PLL4_PFD0_DIV2_GATE 14
+#define IMX8ULP_CLK_PLL4_PFD1_DIV1_GATE 15
+#define IMX8ULP_CLK_PLL4_PFD1_DIV2_GATE 16
+#define IMX8ULP_CLK_PLL4_PFD2_DIV1_GATE 17
+#define IMX8ULP_CLK_PLL4_PFD2_DIV2_GATE 18
+#define IMX8ULP_CLK_PLL4_PFD3_DIV1_GATE 19
+#define IMX8ULP_CLK_PLL4_PFD3_DIV2_GATE 20
+#define IMX8ULP_CLK_PLL4_PFD0_DIV1 21
+#define IMX8ULP_CLK_PLL4_PFD0_DIV2 22
+#define IMX8ULP_CLK_PLL4_PFD1_DIV1 23
+#define IMX8ULP_CLK_PLL4_PFD1_DIV2 24
+#define IMX8ULP_CLK_PLL4_PFD2_DIV1 25
+#define IMX8ULP_CLK_PLL4_PFD2_DIV2 26
+#define IMX8ULP_CLK_PLL4_PFD3_DIV1 27
+#define IMX8ULP_CLK_PLL4_PFD3_DIV2 28
+#define IMX8ULP_CLK_CGC2_SOSC_DIV1_GATE 29
+#define IMX8ULP_CLK_CGC2_SOSC_DIV2_GATE 30
+#define IMX8ULP_CLK_CGC2_SOSC_DIV3_GATE 31
+#define IMX8ULP_CLK_CGC2_SOSC_DIV1 32
+#define IMX8ULP_CLK_CGC2_SOSC_DIV2 33
+#define IMX8ULP_CLK_CGC2_SOSC_DIV3 34
+#define IMX8ULP_CLK_CGC2_FROSC_DIV1_GATE 35
+#define IMX8ULP_CLK_CGC2_FROSC_DIV2_GATE 36
+#define IMX8ULP_CLK_CGC2_FROSC_DIV3_GATE 37
+#define IMX8ULP_CLK_CGC2_FROSC_DIV1 38
+#define IMX8ULP_CLK_CGC2_FROSC_DIV2 39
+#define IMX8ULP_CLK_CGC2_FROSC_DIV3 40
+#define IMX8ULP_CLK_AUD_CLK2 41
+#define IMX8ULP_CLK_SAI6_SEL 42
+#define IMX8ULP_CLK_SAI7_SEL 43
+#define IMX8ULP_CLK_SPDIF_SEL 44
+#define IMX8ULP_CLK_HIFI_SEL 45
+#define IMX8ULP_CLK_HIFI_DIVCORE 46
+#define IMX8ULP_CLK_HIFI_DIVPLAT 47
+#define IMX8ULP_CLK_DSI_PHY_REF 48
+
+#define IMX8ULP_CLK_CGC2_END 49
+
+/* PCC3 */
+#define IMX8ULP_CLK_WDOG3 0
+#define IMX8ULP_CLK_WDOG4 1
+#define IMX8ULP_CLK_LPIT1 2
+#define IMX8ULP_CLK_TPM4 3
+#define IMX8ULP_CLK_TPM5 4
+#define IMX8ULP_CLK_FLEXIO1 5
+#define IMX8ULP_CLK_I3C2 6
+#define IMX8ULP_CLK_LPI2C4 7
+#define IMX8ULP_CLK_LPI2C5 8
+#define IMX8ULP_CLK_LPUART4 9
+#define IMX8ULP_CLK_LPUART5 10
+#define IMX8ULP_CLK_LPSPI4 11
+#define IMX8ULP_CLK_LPSPI5 12
+#define IMX8ULP_CLK_DMA1_MP 13
+#define IMX8ULP_CLK_DMA1_CH0 14
+#define IMX8ULP_CLK_DMA1_CH1 15
+#define IMX8ULP_CLK_DMA1_CH2 16
+#define IMX8ULP_CLK_DMA1_CH3 17
+#define IMX8ULP_CLK_DMA1_CH4 18
+#define IMX8ULP_CLK_DMA1_CH5 19
+#define IMX8ULP_CLK_DMA1_CH6 20
+#define IMX8ULP_CLK_DMA1_CH7 21
+#define IMX8ULP_CLK_DMA1_CH8 22
+#define IMX8ULP_CLK_DMA1_CH9 23
+#define IMX8ULP_CLK_DMA1_CH10 24
+#define IMX8ULP_CLK_DMA1_CH11 25
+#define IMX8ULP_CLK_DMA1_CH12 26
+#define IMX8ULP_CLK_DMA1_CH13 27
+#define IMX8ULP_CLK_DMA1_CH14 28
+#define IMX8ULP_CLK_DMA1_CH15 29
+#define IMX8ULP_CLK_DMA1_CH16 30
+#define IMX8ULP_CLK_DMA1_CH17 31
+#define IMX8ULP_CLK_DMA1_CH18 32
+#define IMX8ULP_CLK_DMA1_CH19 33
+#define IMX8ULP_CLK_DMA1_CH20 34
+#define IMX8ULP_CLK_DMA1_CH21 35
+#define IMX8ULP_CLK_DMA1_CH22 36
+#define IMX8ULP_CLK_DMA1_CH23 37
+#define IMX8ULP_CLK_DMA1_CH24 38
+#define IMX8ULP_CLK_DMA1_CH25 39
+#define IMX8ULP_CLK_DMA1_CH26 40
+#define IMX8ULP_CLK_DMA1_CH27 41
+#define IMX8ULP_CLK_DMA1_CH28 42
+#define IMX8ULP_CLK_DMA1_CH29 43
+#define IMX8ULP_CLK_DMA1_CH30 44
+#define IMX8ULP_CLK_DMA1_CH31 45
+#define IMX8ULP_CLK_MU3_A 46
+#define IMX8ULP_CLK_MU0_B 47
+
+#define IMX8ULP_CLK_PCC3_END 48
+
+/* PCC4 */
+#define IMX8ULP_CLK_FLEXSPI2 0
+#define IMX8ULP_CLK_TPM6 1
+#define IMX8ULP_CLK_TPM7 2
+#define IMX8ULP_CLK_LPI2C6 3
+#define IMX8ULP_CLK_LPI2C7 4
+#define IMX8ULP_CLK_LPUART6 5
+#define IMX8ULP_CLK_LPUART7 6
+#define IMX8ULP_CLK_SAI4 7
+#define IMX8ULP_CLK_SAI5 8
+#define IMX8ULP_CLK_PCTLE 9
+#define IMX8ULP_CLK_PCTLF 10
+#define IMX8ULP_CLK_USDHC0 11
+#define IMX8ULP_CLK_USDHC1 12
+#define IMX8ULP_CLK_USDHC2 13
+#define IMX8ULP_CLK_USB0 14
+#define IMX8ULP_CLK_USB0_PHY 15
+#define IMX8ULP_CLK_USB1 16
+#define IMX8ULP_CLK_USB1_PHY 17
+#define IMX8ULP_CLK_USB_XBAR 18
+#define IMX8ULP_CLK_ENET 19
+#define IMX8ULP_CLK_SFA1 20
+#define IMX8ULP_CLK_RGPIOE 21
+#define IMX8ULP_CLK_RGPIOF 22
+
+#define IMX8ULP_CLK_PCC4_END 23
+
+/* PCC5 */
+#define IMX8ULP_CLK_TPM8 0
+#define IMX8ULP_CLK_SAI6 1
+#define IMX8ULP_CLK_SAI7 2
+#define IMX8ULP_CLK_SPDIF 3
+#define IMX8ULP_CLK_ISI 4
+#define IMX8ULP_CLK_CSI_REGS 5
+#define IMX8ULP_CLK_PCTLD 6
+#define IMX8ULP_CLK_CSI 7
+#define IMX8ULP_CLK_DSI 8
+#define IMX8ULP_CLK_WDOG5 9
+#define IMX8ULP_CLK_EPDC 10
+#define IMX8ULP_CLK_PXP 11
+#define IMX8ULP_CLK_SFA2 12
+#define IMX8ULP_CLK_GPU2D 13
+#define IMX8ULP_CLK_GPU3D 14
+#define IMX8ULP_CLK_DC_NANO 15
+#define IMX8ULP_CLK_CSI_CLK_UI 16
+#define IMX8ULP_CLK_CSI_CLK_ESC 17
+#define IMX8ULP_CLK_RGPIOD 18
+#define IMX8ULP_CLK_DMA2_MP 19
+#define IMX8ULP_CLK_DMA2_CH0 20
+#define IMX8ULP_CLK_DMA2_CH1 21
+#define IMX8ULP_CLK_DMA2_CH2 22
+#define IMX8ULP_CLK_DMA2_CH3 23
+#define IMX8ULP_CLK_DMA2_CH4 24
+#define IMX8ULP_CLK_DMA2_CH5 25
+#define IMX8ULP_CLK_DMA2_CH6 26
+#define IMX8ULP_CLK_DMA2_CH7 27
+#define IMX8ULP_CLK_DMA2_CH8 28
+#define IMX8ULP_CLK_DMA2_CH9 29
+#define IMX8ULP_CLK_DMA2_CH10 30
+#define IMX8ULP_CLK_DMA2_CH11 31
+#define IMX8ULP_CLK_DMA2_CH12 32
+#define IMX8ULP_CLK_DMA2_CH13 33
+#define IMX8ULP_CLK_DMA2_CH14 34
+#define IMX8ULP_CLK_DMA2_CH15 35
+#define IMX8ULP_CLK_DMA2_CH16 36
+#define IMX8ULP_CLK_DMA2_CH17 37
+#define IMX8ULP_CLK_DMA2_CH18 38
+#define IMX8ULP_CLK_DMA2_CH19 39
+#define IMX8ULP_CLK_DMA2_CH20 40
+#define IMX8ULP_CLK_DMA2_CH21 41
+#define IMX8ULP_CLK_DMA2_CH22 42
+#define IMX8ULP_CLK_DMA2_CH23 43
+#define IMX8ULP_CLK_DMA2_CH24 44
+#define IMX8ULP_CLK_DMA2_CH25 45
+#define IMX8ULP_CLK_DMA2_CH26 46
+#define IMX8ULP_CLK_DMA2_CH27 47
+#define IMX8ULP_CLK_DMA2_CH28 48
+#define IMX8ULP_CLK_DMA2_CH29 49
+#define IMX8ULP_CLK_DMA2_CH30 50
+#define IMX8ULP_CLK_DMA2_CH31 51
+#define IMX8ULP_CLK_MU2_B 52
+#define IMX8ULP_CLK_MU3_B 53
+#define IMX8ULP_CLK_AVD_SIM 54
+#define IMX8ULP_CLK_DSI_TX_ESC 55
+
+#define IMX8ULP_CLK_PCC5_END 56
+
+#endif
diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
new file mode 100644
index 000000000000..787c9e74dc96
--- /dev/null
+++ b/include/dt-bindings/clock/imx93-clock.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * Copyright 2022 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX93_CLK_H
+#define __DT_BINDINGS_CLOCK_IMX93_CLK_H
+
+#define IMX93_CLK_DUMMY 0
+#define IMX93_CLK_24M 1
+#define IMX93_CLK_EXT1 2
+#define IMX93_CLK_SYS_PLL_PFD0 3
+#define IMX93_CLK_SYS_PLL_PFD0_DIV2 4
+#define IMX93_CLK_SYS_PLL_PFD1 5
+#define IMX93_CLK_SYS_PLL_PFD1_DIV2 6
+#define IMX93_CLK_SYS_PLL_PFD2 7
+#define IMX93_CLK_SYS_PLL_PFD2_DIV2 8
+#define IMX93_CLK_AUDIO_PLL 9
+#define IMX93_CLK_VIDEO_PLL 10
+#define IMX93_CLK_A55_PERIPH 11
+#define IMX93_CLK_A55_MTR_BUS 12
+#define IMX93_CLK_A55 13
+#define IMX93_CLK_M33 14
+#define IMX93_CLK_BUS_WAKEUP 15
+#define IMX93_CLK_BUS_AON 16
+#define IMX93_CLK_WAKEUP_AXI 17
+#define IMX93_CLK_SWO_TRACE 18
+#define IMX93_CLK_M33_SYSTICK 19
+#define IMX93_CLK_FLEXIO1 20
+#define IMX93_CLK_FLEXIO2 21
+#define IMX93_CLK_LPTMR1 24
+#define IMX93_CLK_LPTMR2 25
+#define IMX93_CLK_TPM2 27
+#define IMX93_CLK_TPM4 29
+#define IMX93_CLK_TPM5 30
+#define IMX93_CLK_TPM6 31
+#define IMX93_CLK_FLEXSPI1 32
+#define IMX93_CLK_CAN1 33
+#define IMX93_CLK_CAN2 34
+#define IMX93_CLK_LPUART1 35
+#define IMX93_CLK_LPUART2 36
+#define IMX93_CLK_LPUART3 37
+#define IMX93_CLK_LPUART4 38
+#define IMX93_CLK_LPUART5 39
+#define IMX93_CLK_LPUART6 40
+#define IMX93_CLK_LPUART7 41
+#define IMX93_CLK_LPUART8 42
+#define IMX93_CLK_LPI2C1 43
+#define IMX93_CLK_LPI2C2 44
+#define IMX93_CLK_LPI2C3 45
+#define IMX93_CLK_LPI2C4 46
+#define IMX93_CLK_LPI2C5 47
+#define IMX93_CLK_LPI2C6 48
+#define IMX93_CLK_LPI2C7 49
+#define IMX93_CLK_LPI2C8 50
+#define IMX93_CLK_LPSPI1 51
+#define IMX93_CLK_LPSPI2 52
+#define IMX93_CLK_LPSPI3 53
+#define IMX93_CLK_LPSPI4 54
+#define IMX93_CLK_LPSPI5 55
+#define IMX93_CLK_LPSPI6 56
+#define IMX93_CLK_LPSPI7 57
+#define IMX93_CLK_LPSPI8 58
+#define IMX93_CLK_I3C1 59
+#define IMX93_CLK_I3C2 60
+#define IMX93_CLK_USDHC1 61
+#define IMX93_CLK_USDHC2 62
+#define IMX93_CLK_USDHC3 63
+#define IMX93_CLK_SAI1 64
+#define IMX93_CLK_SAI2 65
+#define IMX93_CLK_SAI3 66
+#define IMX93_CLK_CCM_CKO1 67
+#define IMX93_CLK_CCM_CKO2 68
+#define IMX93_CLK_CCM_CKO3 69
+#define IMX93_CLK_CCM_CKO4 70
+#define IMX93_CLK_HSIO 71
+#define IMX93_CLK_HSIO_USB_TEST_60M 72
+#define IMX93_CLK_HSIO_ACSCAN_80M 73
+#define IMX93_CLK_HSIO_ACSCAN_480M 74
+#define IMX93_CLK_ML_APB 75
+#define IMX93_CLK_ML 76
+#define IMX93_CLK_MEDIA_AXI 77
+#define IMX93_CLK_MEDIA_APB 78
+#define IMX93_CLK_MEDIA_LDB 79
+#define IMX93_CLK_MEDIA_DISP_PIX 80
+#define IMX93_CLK_CAM_PIX 81
+#define IMX93_CLK_MIPI_TEST_BYTE 82
+#define IMX93_CLK_MIPI_PHY_CFG 83
+#define IMX93_CLK_ADC 84
+#define IMX93_CLK_PDM 85
+#define IMX93_CLK_TSTMR1 86
+#define IMX93_CLK_TSTMR2 87
+#define IMX93_CLK_MQS1 88
+#define IMX93_CLK_MQS2 89
+#define IMX93_CLK_AUDIO_XCVR 90
+#define IMX93_CLK_SPDIF 91
+#define IMX93_CLK_ENET 92
+#define IMX93_CLK_ENET_TIMER1 93
+#define IMX93_CLK_ENET_TIMER2 94
+#define IMX93_CLK_ENET_REF 95
+#define IMX93_CLK_ENET_REF_PHY 96
+#define IMX93_CLK_I3C1_SLOW 97
+#define IMX93_CLK_I3C2_SLOW 98
+#define IMX93_CLK_USB_PHY_BURUNIN 99
+#define IMX93_CLK_PAL_CAME_SCAN 100
+#define IMX93_CLK_A55_GATE 101
+#define IMX93_CLK_CM33_GATE 102
+#define IMX93_CLK_ADC1_GATE 103
+#define IMX93_CLK_WDOG1_GATE 104
+#define IMX93_CLK_WDOG2_GATE 105
+#define IMX93_CLK_WDOG3_GATE 106
+#define IMX93_CLK_WDOG4_GATE 107
+#define IMX93_CLK_WDOG5_GATE 108
+#define IMX93_CLK_SEMA1_GATE 109
+#define IMX93_CLK_SEMA2_GATE 110
+#define IMX93_CLK_MU_A_GATE 111
+#define IMX93_CLK_MU_B_GATE 112
+#define IMX93_CLK_EDMA1_GATE 113
+#define IMX93_CLK_EDMA2_GATE 114
+#define IMX93_CLK_FLEXSPI1_GATE 115
+#define IMX93_CLK_GPIO1_GATE 116
+#define IMX93_CLK_GPIO2_GATE 117
+#define IMX93_CLK_GPIO3_GATE 118
+#define IMX93_CLK_GPIO4_GATE 119
+#define IMX93_CLK_FLEXIO1_GATE 120
+#define IMX93_CLK_FLEXIO2_GATE 121
+#define IMX93_CLK_LPIT1_GATE 122
+#define IMX93_CLK_LPIT2_GATE 123
+#define IMX93_CLK_LPTMR1_GATE 124
+#define IMX93_CLK_LPTMR2_GATE 125
+#define IMX93_CLK_TPM1_GATE 126
+#define IMX93_CLK_TPM2_GATE 127
+#define IMX93_CLK_TPM3_GATE 128
+#define IMX93_CLK_TPM4_GATE 129
+#define IMX93_CLK_TPM5_GATE 130
+#define IMX93_CLK_TPM6_GATE 131
+#define IMX93_CLK_CAN1_GATE 132
+#define IMX93_CLK_CAN2_GATE 133
+#define IMX93_CLK_LPUART1_GATE 134
+#define IMX93_CLK_LPUART2_GATE 135
+#define IMX93_CLK_LPUART3_GATE 136
+#define IMX93_CLK_LPUART4_GATE 137
+#define IMX93_CLK_LPUART5_GATE 138
+#define IMX93_CLK_LPUART6_GATE 139
+#define IMX93_CLK_LPUART7_GATE 140
+#define IMX93_CLK_LPUART8_GATE 141
+#define IMX93_CLK_LPI2C1_GATE 142
+#define IMX93_CLK_LPI2C2_GATE 143
+#define IMX93_CLK_LPI2C3_GATE 144
+#define IMX93_CLK_LPI2C4_GATE 145
+#define IMX93_CLK_LPI2C5_GATE 146
+#define IMX93_CLK_LPI2C6_GATE 147
+#define IMX93_CLK_LPI2C7_GATE 148
+#define IMX93_CLK_LPI2C8_GATE 149
+#define IMX93_CLK_LPSPI1_GATE 150
+#define IMX93_CLK_LPSPI2_GATE 151
+#define IMX93_CLK_LPSPI3_GATE 152
+#define IMX93_CLK_LPSPI4_GATE 153
+#define IMX93_CLK_LPSPI5_GATE 154
+#define IMX93_CLK_LPSPI6_GATE 155
+#define IMX93_CLK_LPSPI7_GATE 156
+#define IMX93_CLK_LPSPI8_GATE 157
+#define IMX93_CLK_I3C1_GATE 158
+#define IMX93_CLK_I3C2_GATE 159
+#define IMX93_CLK_USDHC1_GATE 160
+#define IMX93_CLK_USDHC2_GATE 161
+#define IMX93_CLK_USDHC3_GATE 162
+#define IMX93_CLK_SAI1_GATE 163
+#define IMX93_CLK_SAI2_GATE 164
+#define IMX93_CLK_SAI3_GATE 165
+#define IMX93_CLK_MIPI_CSI_GATE 166
+#define IMX93_CLK_MIPI_DSI_GATE 167
+#define IMX93_CLK_LVDS_GATE 168
+#define IMX93_CLK_LCDIF_GATE 169
+#define IMX93_CLK_PXP_GATE 170
+#define IMX93_CLK_ISI_GATE 171
+#define IMX93_CLK_NIC_MEDIA_GATE 172
+#define IMX93_CLK_USB_CONTROLLER_GATE 173
+#define IMX93_CLK_USB_TEST_60M_GATE 174
+#define IMX93_CLK_HSIO_TROUT_24M_GATE 175
+#define IMX93_CLK_PDM_GATE 176
+#define IMX93_CLK_MQS1_GATE 177
+#define IMX93_CLK_MQS2_GATE 178
+#define IMX93_CLK_AUD_XCVR_GATE 179
+#define IMX93_CLK_SPDIF_GATE 180
+#define IMX93_CLK_HSIO_32K_GATE 181
+#define IMX93_CLK_ENET1_GATE 182
+#define IMX93_CLK_ENET_QOS_GATE 183
+#define IMX93_CLK_SYS_CNT_GATE 184
+#define IMX93_CLK_TSTMR1_GATE 185
+#define IMX93_CLK_TSTMR2_GATE 186
+#define IMX93_CLK_TMC_GATE 187
+#define IMX93_CLK_PMRO_GATE 188
+#define IMX93_CLK_32K 189
+#define IMX93_CLK_SAI1_IPG 190
+#define IMX93_CLK_SAI2_IPG 191
+#define IMX93_CLK_SAI3_IPG 192
+#define IMX93_CLK_MU1_A_GATE 193
+#define IMX93_CLK_MU1_B_GATE 194
+#define IMX93_CLK_MU2_A_GATE 195
+#define IMX93_CLK_MU2_B_GATE 196
+#define IMX93_CLK_NIC_AXI 197
+#define IMX93_CLK_ARM_PLL 198
+#define IMX93_CLK_A55_SEL 199
+#define IMX93_CLK_A55_CORE 200
+#define IMX93_CLK_PDM_IPG 201
+#define IMX93_CLK_END 202
+
+#endif
diff --git a/include/dt-bindings/clock/imxrt1050-clock.h b/include/dt-bindings/clock/imxrt1050-clock.h
new file mode 100644
index 000000000000..93bef0832d16
--- /dev/null
+++ b/include/dt-bindings/clock/imxrt1050-clock.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright(C) 2019
+ * Author(s): Giulio Benetti <giulio.benetti@benettiengineering.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMXRT1050_H
+#define __DT_BINDINGS_CLOCK_IMXRT1050_H
+
+#define IMXRT1050_CLK_DUMMY 0
+#define IMXRT1050_CLK_CKIL 1
+#define IMXRT1050_CLK_CKIH 2
+#define IMXRT1050_CLK_OSC 3
+#define IMXRT1050_CLK_PLL2_PFD0_352M 4
+#define IMXRT1050_CLK_PLL2_PFD1_594M 5
+#define IMXRT1050_CLK_PLL2_PFD2_396M 6
+#define IMXRT1050_CLK_PLL3_PFD0_720M 7
+#define IMXRT1050_CLK_PLL3_PFD1_664_62M 8
+#define IMXRT1050_CLK_PLL3_PFD2_508_24M 9
+#define IMXRT1050_CLK_PLL3_PFD3_454_74M 10
+#define IMXRT1050_CLK_PLL2_198M 11
+#define IMXRT1050_CLK_PLL3_120M 12
+#define IMXRT1050_CLK_PLL3_80M 13
+#define IMXRT1050_CLK_PLL3_60M 14
+#define IMXRT1050_CLK_PLL1_BYPASS 15
+#define IMXRT1050_CLK_PLL2_BYPASS 16
+#define IMXRT1050_CLK_PLL3_BYPASS 17
+#define IMXRT1050_CLK_PLL5_BYPASS 19
+#define IMXRT1050_CLK_PLL1_REF_SEL 20
+#define IMXRT1050_CLK_PLL2_REF_SEL 21
+#define IMXRT1050_CLK_PLL3_REF_SEL 22
+#define IMXRT1050_CLK_PLL5_REF_SEL 23
+#define IMXRT1050_CLK_PRE_PERIPH_SEL 24
+#define IMXRT1050_CLK_PERIPH_SEL 25
+#define IMXRT1050_CLK_SEMC_ALT_SEL 26
+#define IMXRT1050_CLK_SEMC_SEL 27
+#define IMXRT1050_CLK_USDHC1_SEL 28
+#define IMXRT1050_CLK_USDHC2_SEL 29
+#define IMXRT1050_CLK_LPUART_SEL 30
+#define IMXRT1050_CLK_LCDIF_SEL 31
+#define IMXRT1050_CLK_VIDEO_POST_DIV_SEL 32
+#define IMXRT1050_CLK_VIDEO_DIV 33
+#define IMXRT1050_CLK_ARM_PODF 34
+#define IMXRT1050_CLK_LPUART_PODF 35
+#define IMXRT1050_CLK_USDHC1_PODF 36
+#define IMXRT1050_CLK_USDHC2_PODF 37
+#define IMXRT1050_CLK_SEMC_PODF 38
+#define IMXRT1050_CLK_AHB_PODF 39
+#define IMXRT1050_CLK_LCDIF_PRED 40
+#define IMXRT1050_CLK_LCDIF_PODF 41
+#define IMXRT1050_CLK_USDHC1 42
+#define IMXRT1050_CLK_USDHC2 43
+#define IMXRT1050_CLK_LPUART1 44
+#define IMXRT1050_CLK_SEMC 45
+#define IMXRT1050_CLK_LCDIF_APB 46
+#define IMXRT1050_CLK_PLL1_ARM 47
+#define IMXRT1050_CLK_PLL2_SYS 48
+#define IMXRT1050_CLK_PLL3_USB_OTG 49
+#define IMXRT1050_CLK_PLL4_AUDIO 50
+#define IMXRT1050_CLK_PLL5_VIDEO 51
+#define IMXRT1050_CLK_PLL6_ENET 52
+#define IMXRT1050_CLK_PLL7_USB_HOST 53
+#define IMXRT1050_CLK_LCDIF_PIX 54
+#define IMXRT1050_CLK_USBOH3 55
+#define IMXRT1050_CLK_IPG_PDOF 56
+#define IMXRT1050_CLK_PER_CLK_SEL 57
+#define IMXRT1050_CLK_PER_PDOF 58
+#define IMXRT1050_CLK_DMA 59
+#define IMXRT1050_CLK_DMA_MUX 60
+#define IMXRT1050_CLK_END 61
+
+#endif /* __DT_BINDINGS_CLOCK_IMXRT1050_H */
diff --git a/include/dt-bindings/clock/jz4725b-cgu.h b/include/dt-bindings/clock/ingenic,jz4725b-cgu.h
index 31f1ab0fe42c..31f1ab0fe42c 100644
--- a/include/dt-bindings/clock/jz4725b-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4725b-cgu.h
diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/ingenic,jz4740-cgu.h
index e82d77028581..e82d77028581 100644
--- a/include/dt-bindings/clock/jz4740-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4740-cgu.h
diff --git a/include/dt-bindings/clock/ingenic,jz4755-cgu.h b/include/dt-bindings/clock/ingenic,jz4755-cgu.h
new file mode 100644
index 000000000000..10098494e7df
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,jz4755-cgu.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides clock numbers for the ingenic,jz4755-cgu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4755_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4755_CGU_H__
+
+#define JZ4755_CLK_EXT 0
+#define JZ4755_CLK_OSC32K 1
+#define JZ4755_CLK_PLL 2
+#define JZ4755_CLK_PLL_HALF 3
+#define JZ4755_CLK_EXT_HALF 4
+#define JZ4755_CLK_CCLK 5
+#define JZ4755_CLK_H0CLK 6
+#define JZ4755_CLK_PCLK 7
+#define JZ4755_CLK_MCLK 8
+#define JZ4755_CLK_H1CLK 9
+#define JZ4755_CLK_UDC 10
+#define JZ4755_CLK_LCD 11
+#define JZ4755_CLK_UART0 12
+#define JZ4755_CLK_UART1 13
+#define JZ4755_CLK_UART2 14
+#define JZ4755_CLK_DMA 15
+#define JZ4755_CLK_MMC 16
+#define JZ4755_CLK_MMC0 17
+#define JZ4755_CLK_MMC1 18
+#define JZ4755_CLK_EXT512 19
+#define JZ4755_CLK_RTC 20
+#define JZ4755_CLK_UDC_PHY 21
+#define JZ4755_CLK_I2S 22
+#define JZ4755_CLK_SPI 23
+#define JZ4755_CLK_AIC 24
+#define JZ4755_CLK_ADC 25
+#define JZ4755_CLK_TCU 26
+#define JZ4755_CLK_BCH 27
+#define JZ4755_CLK_I2C 28
+#define JZ4755_CLK_TVE 29
+#define JZ4755_CLK_CIM 30
+#define JZ4755_CLK_AUX_CPU 31
+#define JZ4755_CLK_AHB1 32
+#define JZ4755_CLK_IDCT 33
+#define JZ4755_CLK_DB 34
+#define JZ4755_CLK_ME 35
+#define JZ4755_CLK_MC 36
+#define JZ4755_CLK_TSSI 37
+#define JZ4755_CLK_IPU 38
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4755_CGU_H__ */
diff --git a/include/dt-bindings/clock/ingenic,jz4760-cgu.h b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
new file mode 100644
index 000000000000..9fb04ebac6de
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,jz4760-cgu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4760_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4760_CGU_H__
+
+#define JZ4760_CLK_EXT 0
+#define JZ4760_CLK_OSC32K 1
+#define JZ4760_CLK_PLL0 2
+#define JZ4760_CLK_PLL0_HALF 3
+#define JZ4760_CLK_PLL1 4
+#define JZ4760_CLK_CCLK 5
+#define JZ4760_CLK_HCLK 6
+#define JZ4760_CLK_SCLK 7
+#define JZ4760_CLK_H2CLK 8
+#define JZ4760_CLK_MCLK 9
+#define JZ4760_CLK_PCLK 10
+#define JZ4760_CLK_MMC_MUX 11
+#define JZ4760_CLK_MMC0 12
+#define JZ4760_CLK_MMC1 13
+#define JZ4760_CLK_MMC2 14
+#define JZ4760_CLK_CIM 15
+#define JZ4760_CLK_UHC 16
+#define JZ4760_CLK_GPU 17
+#define JZ4760_CLK_GPS 18
+#define JZ4760_CLK_SSI_MUX 19
+#define JZ4760_CLK_PCM 20
+#define JZ4760_CLK_I2S 21
+#define JZ4760_CLK_OTG 22
+#define JZ4760_CLK_SSI0 23
+#define JZ4760_CLK_SSI1 24
+#define JZ4760_CLK_SSI2 25
+#define JZ4760_CLK_DMA 26
+#define JZ4760_CLK_I2C0 27
+#define JZ4760_CLK_I2C1 28
+#define JZ4760_CLK_UART0 29
+#define JZ4760_CLK_UART1 30
+#define JZ4760_CLK_UART2 31
+#define JZ4760_CLK_UART3 32
+#define JZ4760_CLK_IPU 33
+#define JZ4760_CLK_ADC 34
+#define JZ4760_CLK_AIC 35
+#define JZ4760_CLK_VPU 36
+#define JZ4760_CLK_UHC_PHY 37
+#define JZ4760_CLK_OTG_PHY 38
+#define JZ4760_CLK_EXT512 39
+#define JZ4760_CLK_RTC 40
+#define JZ4760_CLK_LPCLK_DIV 41
+#define JZ4760_CLK_TVE 42
+#define JZ4760_CLK_LPCLK 43
+#define JZ4760_CLK_MDMA 44
+#define JZ4760_CLK_BDMA 45
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ */
diff --git a/include/dt-bindings/clock/jz4770-cgu.h b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
index d68a7695a1f8..0b475e8ae321 100644
--- a/include/dt-bindings/clock/jz4770-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
@@ -54,5 +54,6 @@
#define JZ4770_CLK_OTG_PHY 45
#define JZ4770_CLK_EXT512 46
#define JZ4770_CLK_RTC 47
+#define JZ4770_CLK_BDMA 48
#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */
diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/ingenic,jz4780-cgu.h
index 85cf8eb5081b..85cf8eb5081b 100644
--- a/include/dt-bindings/clock/jz4780-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4780-cgu.h
diff --git a/include/dt-bindings/clock/ingenic,sysost.h b/include/dt-bindings/clock/ingenic,sysost.h
index 9ac88e90babf..d7aa42c08ded 100644
--- a/include/dt-bindings/clock/ingenic,sysost.h
+++ b/include/dt-bindings/clock/ingenic,sysost.h
@@ -1,12 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * This header provides clock numbers for the ingenic,tcu DT binding.
+ * This header provides clock numbers for the Ingenic OST DT binding.
*/
#ifndef __DT_BINDINGS_CLOCK_INGENIC_OST_H__
#define __DT_BINDINGS_CLOCK_INGENIC_OST_H__
-#define OST_CLK_PERCPU_TIMER 0
-#define OST_CLK_GLOBAL_TIMER 1
+#define OST_CLK_PERCPU_TIMER 1
+#define OST_CLK_GLOBAL_TIMER 0
+#define OST_CLK_PERCPU_TIMER0 1
+#define OST_CLK_PERCPU_TIMER1 2
+#define OST_CLK_PERCPU_TIMER2 3
+#define OST_CLK_PERCPU_TIMER3 4
+
+#define OST_CLK_EVENT_TIMER 1
+
+#define OST_CLK_EVENT_TIMER0 0
+#define OST_CLK_EVENT_TIMER1 1
+#define OST_CLK_EVENT_TIMER2 2
+#define OST_CLK_EVENT_TIMER3 3
+#define OST_CLK_EVENT_TIMER4 4
+#define OST_CLK_EVENT_TIMER5 5
+#define OST_CLK_EVENT_TIMER6 6
+#define OST_CLK_EVENT_TIMER7 7
+#define OST_CLK_EVENT_TIMER8 8
+#define OST_CLK_EVENT_TIMER9 9
+#define OST_CLK_EVENT_TIMER10 10
+#define OST_CLK_EVENT_TIMER11 11
+#define OST_CLK_EVENT_TIMER12 12
+#define OST_CLK_EVENT_TIMER13 13
+#define OST_CLK_EVENT_TIMER14 14
+#define OST_CLK_EVENT_TIMER15 15
#endif /* __DT_BINDINGS_CLOCK_INGENIC_OST_H__ */
diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/ingenic,x1000-cgu.h
index f187e0719fd3..78daf44b3514 100644
--- a/include/dt-bindings/clock/x1000-cgu.h
+++ b/include/dt-bindings/clock/ingenic,x1000-cgu.h
@@ -50,5 +50,9 @@
#define X1000_CLK_PDMA 35
#define X1000_CLK_EXCLK_DIV512 36
#define X1000_CLK_RTC 37
+#define X1000_CLK_AIC 38
+#define X1000_CLK_I2SPLLMUX 39
+#define X1000_CLK_I2SPLL 40
+#define X1000_CLK_I2S 41
#endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
diff --git a/include/dt-bindings/clock/x1830-cgu.h b/include/dt-bindings/clock/ingenic,x1830-cgu.h
index 88455376a950..88455376a950 100644
--- a/include/dt-bindings/clock/x1830-cgu.h
+++ b/include/dt-bindings/clock/ingenic,x1830-cgu.h
diff --git a/include/dt-bindings/clock/intel,agilex5-clkmgr.h b/include/dt-bindings/clock/intel,agilex5-clkmgr.h
new file mode 100644
index 000000000000..2f3a23b31c5c
--- /dev/null
+++ b/include/dt-bindings/clock/intel,agilex5-clkmgr.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Intel Corporation
+ */
+
+#ifndef __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H
+#define __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H
+
+/* fixed rate clocks */
+#define AGILEX5_OSC1 0
+#define AGILEX5_CB_INTOSC_HS_DIV2_CLK 1
+#define AGILEX5_CB_INTOSC_LS_CLK 2
+#define AGILEX5_F2S_FREE_CLK 3
+
+/* PLL clocks */
+#define AGILEX5_MAIN_PLL_CLK 4
+#define AGILEX5_MAIN_PLL_C0_CLK 5
+#define AGILEX5_MAIN_PLL_C1_CLK 6
+#define AGILEX5_MAIN_PLL_C2_CLK 7
+#define AGILEX5_MAIN_PLL_C3_CLK 8
+#define AGILEX5_PERIPH_PLL_CLK 9
+#define AGILEX5_PERIPH_PLL_C0_CLK 10
+#define AGILEX5_PERIPH_PLL_C1_CLK 11
+#define AGILEX5_PERIPH_PLL_C2_CLK 12
+#define AGILEX5_PERIPH_PLL_C3_CLK 13
+#define AGILEX5_CORE0_FREE_CLK 14
+#define AGILEX5_CORE1_FREE_CLK 15
+#define AGILEX5_CORE2_FREE_CLK 16
+#define AGILEX5_CORE3_FREE_CLK 17
+#define AGILEX5_DSU_FREE_CLK 18
+#define AGILEX5_BOOT_CLK 19
+
+/* fixed factor clocks */
+#define AGILEX5_L3_MAIN_FREE_CLK 20
+#define AGILEX5_NOC_FREE_CLK 21
+#define AGILEX5_S2F_USR0_CLK 22
+#define AGILEX5_NOC_CLK 23
+#define AGILEX5_EMAC_A_FREE_CLK 24
+#define AGILEX5_EMAC_B_FREE_CLK 25
+#define AGILEX5_EMAC_PTP_FREE_CLK 26
+#define AGILEX5_GPIO_DB_FREE_CLK 27
+#define AGILEX5_S2F_USER0_FREE_CLK 28
+#define AGILEX5_S2F_USER1_FREE_CLK 29
+#define AGILEX5_PSI_REF_FREE_CLK 30
+#define AGILEX5_USB31_FREE_CLK 31
+
+/* Gate clocks */
+#define AGILEX5_CORE0_CLK 32
+#define AGILEX5_CORE1_CLK 33
+#define AGILEX5_CORE2_CLK 34
+#define AGILEX5_CORE3_CLK 35
+#define AGILEX5_MPU_CLK 36
+#define AGILEX5_MPU_PERIPH_CLK 37
+#define AGILEX5_MPU_CCU_CLK 38
+#define AGILEX5_L4_MAIN_CLK 39
+#define AGILEX5_L4_MP_CLK 40
+#define AGILEX5_L4_SYS_FREE_CLK 41
+#define AGILEX5_L4_SP_CLK 42
+#define AGILEX5_CS_AT_CLK 43
+#define AGILEX5_CS_TRACE_CLK 44
+#define AGILEX5_CS_PDBG_CLK 45
+#define AGILEX5_EMAC1_CLK 47
+#define AGILEX5_EMAC2_CLK 48
+#define AGILEX5_EMAC_PTP_CLK 49
+#define AGILEX5_GPIO_DB_CLK 50
+#define AGILEX5_S2F_USER0_CLK 51
+#define AGILEX5_S2F_USER1_CLK 52
+#define AGILEX5_PSI_REF_CLK 53
+#define AGILEX5_USB31_SUSPEND_CLK 54
+#define AGILEX5_EMAC0_CLK 46
+#define AGILEX5_USB31_BUS_CLK_EARLY 55
+#define AGILEX5_USB2OTG_HCLK 56
+#define AGILEX5_SPIM_0_CLK 57
+#define AGILEX5_SPIM_1_CLK 58
+#define AGILEX5_SPIS_0_CLK 59
+#define AGILEX5_SPIS_1_CLK 60
+#define AGILEX5_DMA_CORE_CLK 61
+#define AGILEX5_DMA_HS_CLK 62
+#define AGILEX5_I3C_0_CORE_CLK 63
+#define AGILEX5_I3C_1_CORE_CLK 64
+#define AGILEX5_I2C_0_PCLK 65
+#define AGILEX5_I2C_1_PCLK 66
+#define AGILEX5_I2C_EMAC0_PCLK 67
+#define AGILEX5_I2C_EMAC1_PCLK 68
+#define AGILEX5_I2C_EMAC2_PCLK 69
+#define AGILEX5_UART_0_PCLK 70
+#define AGILEX5_UART_1_PCLK 71
+#define AGILEX5_SPTIMER_0_PCLK 72
+#define AGILEX5_SPTIMER_1_PCLK 73
+#define AGILEX5_DFI_CLK 74
+#define AGILEX5_NAND_NF_CLK 75
+#define AGILEX5_NAND_BCH_CLK 76
+#define AGILEX5_SDMMC_SDPHY_REG_CLK 77
+#define AGILEX5_SDMCLK 78
+#define AGILEX5_SOFTPHY_REG_PCLK 79
+#define AGILEX5_SOFTPHY_PHY_CLK 80
+#define AGILEX5_SOFTPHY_CTRL_CLK 81
+#define AGILEX5_NUM_CLKS 82
+
+#endif /* __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H */
diff --git a/include/dt-bindings/clock/k210-clk.h b/include/dt-bindings/clock/k210-clk.h
index 5a2fd64d1a49..b2de702cbf75 100644
--- a/include/dt-bindings/clock/k210-clk.h
+++ b/include/dt-bindings/clock/k210-clk.h
@@ -3,18 +3,51 @@
* Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/
-#ifndef K210_CLK_H
-#define K210_CLK_H
+#ifndef CLOCK_K210_CLK_H
+#define CLOCK_K210_CLK_H
/*
- * Arbitrary identifiers for clocks.
- * The structure is: in0 -> pll0 -> aclk -> cpu
- *
- * Since we use the hardware defaults for now, set all these to the same clock.
+ * Kendryte K210 SoC clock identifiers (arbitrary values).
*/
-#define K210_CLK_PLL0 0
-#define K210_CLK_PLL1 0
-#define K210_CLK_ACLK 0
-#define K210_CLK_CPU 0
+#define K210_CLK_CPU 0
+#define K210_CLK_SRAM0 1
+#define K210_CLK_SRAM1 2
+#define K210_CLK_AI 3
+#define K210_CLK_DMA 4
+#define K210_CLK_FFT 5
+#define K210_CLK_ROM 6
+#define K210_CLK_DVP 7
+#define K210_CLK_APB0 8
+#define K210_CLK_APB1 9
+#define K210_CLK_APB2 10
+#define K210_CLK_I2S0 11
+#define K210_CLK_I2S1 12
+#define K210_CLK_I2S2 13
+#define K210_CLK_I2S0_M 14
+#define K210_CLK_I2S1_M 15
+#define K210_CLK_I2S2_M 16
+#define K210_CLK_WDT0 17
+#define K210_CLK_WDT1 18
+#define K210_CLK_SPI0 19
+#define K210_CLK_SPI1 20
+#define K210_CLK_SPI2 21
+#define K210_CLK_I2C0 22
+#define K210_CLK_I2C1 23
+#define K210_CLK_I2C2 24
+#define K210_CLK_SPI3 25
+#define K210_CLK_TIMER0 26
+#define K210_CLK_TIMER1 27
+#define K210_CLK_TIMER2 28
+#define K210_CLK_GPIO 29
+#define K210_CLK_UART1 30
+#define K210_CLK_UART2 31
+#define K210_CLK_UART3 32
+#define K210_CLK_FPIOA 33
+#define K210_CLK_SHA 34
+#define K210_CLK_AES 35
+#define K210_CLK_OTP 36
+#define K210_CLK_RTC 37
-#endif /* K210_CLK_H */
+#define K210_NUM_CLKS 38
+
+#endif /* CLOCK_K210_CLK_H */
diff --git a/include/dt-bindings/clk/lochnagar.h b/include/dt-bindings/clock/lochnagar.h
index 8fa20551ff17..8fa20551ff17 100644
--- a/include/dt-bindings/clk/lochnagar.h
+++ b/include/dt-bindings/clock/lochnagar.h
diff --git a/include/dt-bindings/clock/loongson,ls1x-clk.h b/include/dt-bindings/clock/loongson,ls1x-clk.h
new file mode 100644
index 000000000000..d400e9ac6002
--- /dev/null
+++ b/include/dt-bindings/clock/loongson,ls1x-clk.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Loongson-1 clock tree IDs
+ *
+ * Copyright (C) 2023 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_LS1X_CLK_H__
+#define __DT_BINDINGS_CLOCK_LS1X_CLK_H__
+
+#define LS1X_CLKID_PLL 0
+#define LS1X_CLKID_CPU 1
+#define LS1X_CLKID_DC 2
+#define LS1X_CLKID_AHB 3
+#define LS1X_CLKID_APB 4
+
+#define CLK_NR_CLKS (LS1X_CLKID_APB + 1)
+
+#endif /* __DT_BINDINGS_CLOCK_LS1X_CLK_H__ */
diff --git a/include/dt-bindings/clock/loongson,ls2k-clk.h b/include/dt-bindings/clock/loongson,ls2k-clk.h
new file mode 100644
index 000000000000..3bc4dfc193c2
--- /dev/null
+++ b/include/dt-bindings/clock/loongson,ls2k-clk.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Author: Yinbo Zhu <zhuyinbo@loongson.cn>
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_LOONGSON2_H
+#define __DT_BINDINGS_CLOCK_LOONGSON2_H
+
+#define LOONGSON2_REF_100M 0
+#define LOONGSON2_NODE_PLL 1
+#define LOONGSON2_DDR_PLL 2
+#define LOONGSON2_DC_PLL 3
+#define LOONGSON2_PIX0_PLL 4
+#define LOONGSON2_PIX1_PLL 5
+#define LOONGSON2_NODE_CLK 6
+#define LOONGSON2_HDA_CLK 7
+#define LOONGSON2_GPU_CLK 8
+#define LOONGSON2_DDR_CLK 9
+#define LOONGSON2_GMAC_CLK 10
+#define LOONGSON2_DC_CLK 11
+#define LOONGSON2_APB_CLK 12
+#define LOONGSON2_USB_CLK 13
+#define LOONGSON2_SATA_CLK 14
+#define LOONGSON2_PIX0_CLK 15
+#define LOONGSON2_PIX1_CLK 16
+#define LOONGSON2_BOOT_CLK 17
+#define LOONGSON2_CLK_END 18
+
+#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2-audio.h b/include/dt-bindings/clock/marvell,mmp2-audio.h
index 20664776f497..9653e04dedc3 100644
--- a/include/dt-bindings/clock/marvell,mmp2-audio.h
+++ b/include/dt-bindings/clock/marvell,mmp2-audio.h
@@ -6,5 +6,4 @@
#define MMP2_CLK_AUDIO_SSPA0 1
#define MMP2_CLK_AUDIO_SSPA1 2
-#define MMP2_CLK_AUDIO_NR_CLKS 3
#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 87f5ad5df72f..88c2d716476f 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -32,7 +32,7 @@
#define MMP2_CLK_I2S0 31
#define MMP2_CLK_I2S1 32
-/* apb periphrals */
+/* apb peripherals */
#define MMP2_CLK_TWSI0 60
#define MMP2_CLK_TWSI1 61
#define MMP2_CLK_TWSI2 62
@@ -60,7 +60,7 @@
#define MMP3_CLK_THERMAL2 84
#define MMP3_CLK_THERMAL3 85
-/* axi periphrals */
+/* axi peripherals */
#define MMP2_CLK_SDH0 101
#define MMP2_CLK_SDH1 102
#define MMP2_CLK_SDH2 103
@@ -91,5 +91,4 @@
#define MMP3_CLK_SDH4 126
#define MMP2_CLK_AUDIO 127
-#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
index caf90436b848..d1bb59187e1d 100644
--- a/include/dt-bindings/clock/marvell,pxa168.h
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -20,10 +20,13 @@
#define PXA168_CLK_PLL1_2_1_5 19
#define PXA168_CLK_PLL1_3_16 20
#define PXA168_CLK_PLL1_192 21
+#define PXA168_CLK_PLL1_2_1_10 22
+#define PXA168_CLK_PLL1_2_3_16 23
#define PXA168_CLK_UART_PLL 27
#define PXA168_CLK_USB_PLL 28
+#define PXA168_CLK_CLK32_2 50
-/* apb periphrals */
+/* apb peripherals */
#define PXA168_CLK_TWSI0 60
#define PXA168_CLK_TWSI1 61
#define PXA168_CLK_TWSI2 62
@@ -45,7 +48,7 @@
#define PXA168_CLK_SSP4 78
#define PXA168_CLK_TIMER 79
-/* axi periphrals */
+/* axi peripherals */
#define PXA168_CLK_DFC 100
#define PXA168_CLK_SDH0 101
#define PXA168_CLK_SDH1 102
@@ -56,6 +59,8 @@
#define PXA168_CLK_CCIC0 107
#define PXA168_CLK_CCIC0_PHY 108
#define PXA168_CLK_CCIC0_SPHY 109
+#define PXA168_CLK_SDH3 110
+#define PXA168_CLK_SDH01_AXI 111
+#define PXA168_CLK_SDH23_AXI 112
-#define PXA168_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h
index 5dca4820297f..0c708d3d3314 100644
--- a/include/dt-bindings/clock/marvell,pxa1928.h
+++ b/include/dt-bindings/clock/marvell,pxa1928.h
@@ -36,7 +36,6 @@
#define PXA1928_CLK_THSENS_CPU 0x26
#define PXA1928_CLK_THSENS_VPU 0x27
#define PXA1928_CLK_THSENS_GC 0x28
-#define PXA1928_APBC_NR_CLKS 0x30
/* axi peripherals */
@@ -53,6 +52,4 @@
#define PXA1928_CLK_GC3D 0x5d
#define PXA1928_CLK_GC2D 0x5f
-#define PXA1928_APMU_NR_CLKS 0x60
-
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
index 7bf46238946e..6caa231de0c1 100644
--- a/include/dt-bindings/clock/marvell,pxa910.h
+++ b/include/dt-bindings/clock/marvell,pxa910.h
@@ -23,7 +23,7 @@
#define PXA910_CLK_UART_PLL 27
#define PXA910_CLK_USB_PLL 28
-/* apb periphrals */
+/* apb peripherals */
#define PXA910_CLK_TWSI0 60
#define PXA910_CLK_TWSI1 61
#define PXA910_CLK_TWSI2 62
@@ -43,7 +43,7 @@
#define PXA910_CLK_TIMER0 76
#define PXA910_CLK_TIMER1 77
-/* axi periphrals */
+/* axi peripherals */
#define PXA910_CLK_DFC 100
#define PXA910_CLK_SDH0 101
#define PXA910_CLK_SDH1 102
@@ -55,5 +55,4 @@
#define PXA910_CLK_CCIC0_PHY 108
#define PXA910_CLK_CCIC0_SPHY 109
-#define PXA910_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6795-clk.h b/include/dt-bindings/clock/mediatek,mt6795-clk.h
new file mode 100644
index 000000000000..9902906ac902
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6795-clk.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT6795_H
+#define _DT_BINDINGS_CLK_MT6795_H
+
+/* TOPCKGEN */
+#define CLK_TOP_ADSYS_26M 0
+#define CLK_TOP_CLKPH_MCK_O 1
+#define CLK_TOP_USB_SYSPLL_125M 2
+#define CLK_TOP_DSI0_DIG 3
+#define CLK_TOP_DSI1_DIG 4
+#define CLK_TOP_ARMCA53PLL_754M 5
+#define CLK_TOP_ARMCA53PLL_502M 6
+#define CLK_TOP_MAIN_H546M 7
+#define CLK_TOP_MAIN_H364M 8
+#define CLK_TOP_MAIN_H218P4M 9
+#define CLK_TOP_MAIN_H156M 10
+#define CLK_TOP_TVDPLL_445P5M 11
+#define CLK_TOP_TVDPLL_594M 12
+#define CLK_TOP_UNIV_624M 13
+#define CLK_TOP_UNIV_416M 14
+#define CLK_TOP_UNIV_249P6M 15
+#define CLK_TOP_UNIV_178P3M 16
+#define CLK_TOP_UNIV_48M 17
+#define CLK_TOP_CLKRTC_EXT 18
+#define CLK_TOP_CLKRTC_INT 19
+#define CLK_TOP_FPC 20
+#define CLK_TOP_HDMITXPLL_D2 21
+#define CLK_TOP_HDMITXPLL_D3 22
+#define CLK_TOP_ARMCA53PLL_D2 23
+#define CLK_TOP_ARMCA53PLL_D3 24
+#define CLK_TOP_APLL1 25
+#define CLK_TOP_APLL2 26
+#define CLK_TOP_DMPLL 27
+#define CLK_TOP_DMPLL_D2 28
+#define CLK_TOP_DMPLL_D4 29
+#define CLK_TOP_DMPLL_D8 30
+#define CLK_TOP_DMPLL_D16 31
+#define CLK_TOP_MMPLL 32
+#define CLK_TOP_MMPLL_D2 33
+#define CLK_TOP_MSDCPLL 34
+#define CLK_TOP_MSDCPLL_D2 35
+#define CLK_TOP_MSDCPLL_D4 36
+#define CLK_TOP_MSDCPLL2 37
+#define CLK_TOP_MSDCPLL2_D2 38
+#define CLK_TOP_MSDCPLL2_D4 39
+#define CLK_TOP_SYSPLL_D2 40
+#define CLK_TOP_SYSPLL1_D2 41
+#define CLK_TOP_SYSPLL1_D4 42
+#define CLK_TOP_SYSPLL1_D8 43
+#define CLK_TOP_SYSPLL1_D16 44
+#define CLK_TOP_SYSPLL_D3 45
+#define CLK_TOP_SYSPLL2_D2 46
+#define CLK_TOP_SYSPLL2_D4 47
+#define CLK_TOP_SYSPLL_D5 48
+#define CLK_TOP_SYSPLL3_D2 49
+#define CLK_TOP_SYSPLL3_D4 50
+#define CLK_TOP_SYSPLL_D7 51
+#define CLK_TOP_SYSPLL4_D2 52
+#define CLK_TOP_SYSPLL4_D4 53
+#define CLK_TOP_TVDPLL 54
+#define CLK_TOP_TVDPLL_D2 55
+#define CLK_TOP_TVDPLL_D4 56
+#define CLK_TOP_TVDPLL_D8 57
+#define CLK_TOP_TVDPLL_D16 58
+#define CLK_TOP_UNIVPLL_D2 59
+#define CLK_TOP_UNIVPLL1_D2 60
+#define CLK_TOP_UNIVPLL1_D4 61
+#define CLK_TOP_UNIVPLL1_D8 62
+#define CLK_TOP_UNIVPLL_D3 63
+#define CLK_TOP_UNIVPLL2_D2 64
+#define CLK_TOP_UNIVPLL2_D4 65
+#define CLK_TOP_UNIVPLL2_D8 66
+#define CLK_TOP_UNIVPLL_D5 67
+#define CLK_TOP_UNIVPLL3_D2 68
+#define CLK_TOP_UNIVPLL3_D4 69
+#define CLK_TOP_UNIVPLL3_D8 70
+#define CLK_TOP_UNIVPLL_D7 71
+#define CLK_TOP_UNIVPLL_D26 72
+#define CLK_TOP_UNIVPLL_D52 73
+#define CLK_TOP_VCODECPLL 74
+#define CLK_TOP_VCODECPLL_370P5 75
+#define CLK_TOP_VENCPLL 76
+#define CLK_TOP_VENCPLL_D2 77
+#define CLK_TOP_VENCPLL_D4 78
+#define CLK_TOP_AXI_SEL 79
+#define CLK_TOP_MEM_SEL 80
+#define CLK_TOP_DDRPHYCFG_SEL 81
+#define CLK_TOP_MM_SEL 82
+#define CLK_TOP_PWM_SEL 83
+#define CLK_TOP_VDEC_SEL 84
+#define CLK_TOP_VENC_SEL 85
+#define CLK_TOP_MFG_SEL 86
+#define CLK_TOP_CAMTG_SEL 87
+#define CLK_TOP_UART_SEL 88
+#define CLK_TOP_SPI_SEL 89
+#define CLK_TOP_USB20_SEL 90
+#define CLK_TOP_USB30_SEL 91
+#define CLK_TOP_MSDC50_0_H_SEL 92
+#define CLK_TOP_MSDC50_0_SEL 93
+#define CLK_TOP_MSDC30_1_SEL 94
+#define CLK_TOP_MSDC30_2_SEL 95
+#define CLK_TOP_MSDC30_3_SEL 96
+#define CLK_TOP_AUDIO_SEL 97
+#define CLK_TOP_AUD_INTBUS_SEL 98
+#define CLK_TOP_PMICSPI_SEL 99
+#define CLK_TOP_SCP_SEL 100
+#define CLK_TOP_MJC_SEL 101
+#define CLK_TOP_DPI0_SEL 102
+#define CLK_TOP_IRDA_SEL 103
+#define CLK_TOP_CCI400_SEL 104
+#define CLK_TOP_AUD_1_SEL 105
+#define CLK_TOP_AUD_2_SEL 106
+#define CLK_TOP_MEM_MFG_IN_SEL 107
+#define CLK_TOP_AXI_MFG_IN_SEL 108
+#define CLK_TOP_SCAM_SEL 109
+#define CLK_TOP_I2S0_M_SEL 110
+#define CLK_TOP_I2S1_M_SEL 111
+#define CLK_TOP_I2S2_M_SEL 112
+#define CLK_TOP_I2S3_M_SEL 113
+#define CLK_TOP_I2S3_B_SEL 114
+#define CLK_TOP_APLL1_DIV0 115
+#define CLK_TOP_APLL1_DIV1 116
+#define CLK_TOP_APLL1_DIV2 117
+#define CLK_TOP_APLL1_DIV3 118
+#define CLK_TOP_APLL1_DIV4 119
+#define CLK_TOP_APLL1_DIV5 120
+#define CLK_TOP_APLL2_DIV0 121
+#define CLK_TOP_APLL2_DIV1 122
+#define CLK_TOP_APLL2_DIV2 123
+#define CLK_TOP_APLL2_DIV3 124
+#define CLK_TOP_APLL2_DIV4 125
+#define CLK_TOP_APLL2_DIV5 126
+#define CLK_TOP_NR_CLK 127
+
+/* APMIXED_SYS */
+#define CLK_APMIXED_ARMCA53PLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MMPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_VENCPLL 5
+#define CLK_APMIXED_TVDPLL 6
+#define CLK_APMIXED_MPLL 7
+#define CLK_APMIXED_VCODECPLL 8
+#define CLK_APMIXED_APLL1 9
+#define CLK_APMIXED_APLL2 10
+#define CLK_APMIXED_REF2USB_TX 11
+#define CLK_APMIXED_NR_CLK 12
+
+/* INFRA_SYS */
+#define CLK_INFRA_DBGCLK 0
+#define CLK_INFRA_SMI 1
+#define CLK_INFRA_AUDIO 2
+#define CLK_INFRA_GCE 3
+#define CLK_INFRA_L2C_SRAM 4
+#define CLK_INFRA_M4U 5
+#define CLK_INFRA_MD1MCU 6
+#define CLK_INFRA_MD1BUS 7
+#define CLK_INFRA_MD1DBB 8
+#define CLK_INFRA_DEVICE_APC 9
+#define CLK_INFRA_TRNG 10
+#define CLK_INFRA_MD1LTE 11
+#define CLK_INFRA_CPUM 12
+#define CLK_INFRA_KP 13
+#define CLK_INFRA_CA53_C0_SEL 14
+#define CLK_INFRA_CA53_C1_SEL 15
+#define CLK_INFRA_NR_CLK 16
+
+/* PERI_SYS */
+#define CLK_PERI_NFI 0
+#define CLK_PERI_THERM 1
+#define CLK_PERI_PWM1 2
+#define CLK_PERI_PWM2 3
+#define CLK_PERI_PWM3 4
+#define CLK_PERI_PWM4 5
+#define CLK_PERI_PWM5 6
+#define CLK_PERI_PWM6 7
+#define CLK_PERI_PWM7 8
+#define CLK_PERI_PWM 9
+#define CLK_PERI_USB0 10
+#define CLK_PERI_USB1 11
+#define CLK_PERI_AP_DMA 12
+#define CLK_PERI_MSDC30_0 13
+#define CLK_PERI_MSDC30_1 14
+#define CLK_PERI_MSDC30_2 15
+#define CLK_PERI_MSDC30_3 16
+#define CLK_PERI_NLI_ARB 17
+#define CLK_PERI_IRDA 18
+#define CLK_PERI_UART0 19
+#define CLK_PERI_UART1 20
+#define CLK_PERI_UART2 21
+#define CLK_PERI_UART3 22
+#define CLK_PERI_I2C0 23
+#define CLK_PERI_I2C1 24
+#define CLK_PERI_I2C2 25
+#define CLK_PERI_I2C3 26
+#define CLK_PERI_I2C4 27
+#define CLK_PERI_AUXADC 28
+#define CLK_PERI_SPI0 29
+#define CLK_PERI_UART0_SEL 30
+#define CLK_PERI_UART1_SEL 31
+#define CLK_PERI_UART2_SEL 32
+#define CLK_PERI_UART3_SEL 33
+#define CLK_PERI_NR_CLK 34
+
+/* MFG */
+#define CLK_MFG_BAXI 0
+#define CLK_MFG_BMEM 1
+#define CLK_MFG_BG3D 2
+#define CLK_MFG_B26M 3
+#define CLK_MFG_NR_CLK 4
+
+/* MM_SYS */
+#define CLK_MM_SMI_COMMON 0
+#define CLK_MM_SMI_LARB0 1
+#define CLK_MM_CAM_MDP 2
+#define CLK_MM_MDP_RDMA0 3
+#define CLK_MM_MDP_RDMA1 4
+#define CLK_MM_MDP_RSZ0 5
+#define CLK_MM_MDP_RSZ1 6
+#define CLK_MM_MDP_RSZ2 7
+#define CLK_MM_MDP_TDSHP0 8
+#define CLK_MM_MDP_TDSHP1 9
+#define CLK_MM_MDP_CROP 10
+#define CLK_MM_MDP_WDMA 11
+#define CLK_MM_MDP_WROT0 12
+#define CLK_MM_MDP_WROT1 13
+#define CLK_MM_FAKE_ENG 14
+#define CLK_MM_MUTEX_32K 15
+#define CLK_MM_DISP_OVL0 16
+#define CLK_MM_DISP_OVL1 17
+#define CLK_MM_DISP_RDMA0 18
+#define CLK_MM_DISP_RDMA1 19
+#define CLK_MM_DISP_RDMA2 20
+#define CLK_MM_DISP_WDMA0 21
+#define CLK_MM_DISP_WDMA1 22
+#define CLK_MM_DISP_COLOR0 23
+#define CLK_MM_DISP_COLOR1 24
+#define CLK_MM_DISP_AAL 25
+#define CLK_MM_DISP_GAMMA 26
+#define CLK_MM_DISP_UFOE 27
+#define CLK_MM_DISP_SPLIT0 28
+#define CLK_MM_DISP_SPLIT1 29
+#define CLK_MM_DISP_MERGE 30
+#define CLK_MM_DISP_OD 31
+#define CLK_MM_DISP_PWM0MM 32
+#define CLK_MM_DISP_PWM026M 33
+#define CLK_MM_DISP_PWM1MM 34
+#define CLK_MM_DISP_PWM126M 35
+#define CLK_MM_DSI0_ENGINE 36
+#define CLK_MM_DSI0_DIGITAL 37
+#define CLK_MM_DSI1_ENGINE 38
+#define CLK_MM_DSI1_DIGITAL 39
+#define CLK_MM_DPI_PIXEL 40
+#define CLK_MM_DPI_ENGINE 41
+#define CLK_MM_NR_CLK 42
+
+/* VDEC_SYS */
+#define CLK_VDEC_CKEN 0
+#define CLK_VDEC_LARB_CKEN 1
+#define CLK_VDEC_NR_CLK 2
+
+/* VENC_SYS */
+#define CLK_VENC_LARB 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+#define CLK_VENC_NR_CLK 4
+
+#endif /* _DT_BINDINGS_CLK_MT6795_H */
diff --git a/include/dt-bindings/clock/mediatek,mt7981-clk.h b/include/dt-bindings/clock/mediatek,mt7981-clk.h
new file mode 100644
index 000000000000..192f8cefb589
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt7981-clk.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Wenzhen.Yu <wenzhen.yu@mediatek.com>
+ * Author: Jianhui Zhao <zhaojh329@gmail.com>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7981_H
+#define _DT_BINDINGS_CLK_MT7981_H
+
+/* TOPCKGEN */
+#define CLK_TOP_CB_CKSQ_40M 0
+#define CLK_TOP_CB_M_416M 1
+#define CLK_TOP_CB_M_D2 2
+#define CLK_TOP_CB_M_D3 3
+#define CLK_TOP_M_D3_D2 4
+#define CLK_TOP_CB_M_D4 5
+#define CLK_TOP_CB_M_D8 6
+#define CLK_TOP_M_D8_D2 7
+#define CLK_TOP_CB_MM_720M 8
+#define CLK_TOP_CB_MM_D2 9
+#define CLK_TOP_CB_MM_D3 10
+#define CLK_TOP_CB_MM_D3_D5 11
+#define CLK_TOP_CB_MM_D4 12
+#define CLK_TOP_CB_MM_D6 13
+#define CLK_TOP_MM_D6_D2 14
+#define CLK_TOP_CB_MM_D8 15
+#define CLK_TOP_CB_APLL2_196M 16
+#define CLK_TOP_APLL2_D2 17
+#define CLK_TOP_APLL2_D4 18
+#define CLK_TOP_NET1_2500M 19
+#define CLK_TOP_CB_NET1_D4 20
+#define CLK_TOP_CB_NET1_D5 21
+#define CLK_TOP_NET1_D5_D2 22
+#define CLK_TOP_NET1_D5_D4 23
+#define CLK_TOP_CB_NET1_D8 24
+#define CLK_TOP_NET1_D8_D2 25
+#define CLK_TOP_NET1_D8_D4 26
+#define CLK_TOP_CB_NET2_800M 27
+#define CLK_TOP_CB_NET2_D2 28
+#define CLK_TOP_CB_NET2_D4 29
+#define CLK_TOP_NET2_D4_D2 30
+#define CLK_TOP_NET2_D4_D4 31
+#define CLK_TOP_CB_NET2_D6 32
+#define CLK_TOP_CB_WEDMCU_208M 33
+#define CLK_TOP_CB_SGM_325M 34
+#define CLK_TOP_CKSQ_40M_D2 35
+#define CLK_TOP_CB_RTC_32K 36
+#define CLK_TOP_CB_RTC_32P7K 37
+#define CLK_TOP_USB_TX250M 38
+#define CLK_TOP_FAUD 39
+#define CLK_TOP_NFI1X 40
+#define CLK_TOP_USB_EQ_RX250M 41
+#define CLK_TOP_USB_CDR_CK 42
+#define CLK_TOP_USB_LN0_CK 43
+#define CLK_TOP_SPINFI_BCK 44
+#define CLK_TOP_SPI 45
+#define CLK_TOP_SPIM_MST 46
+#define CLK_TOP_UART_BCK 47
+#define CLK_TOP_PWM_BCK 48
+#define CLK_TOP_I2C_BCK 49
+#define CLK_TOP_PEXTP_TL 50
+#define CLK_TOP_EMMC_208M 51
+#define CLK_TOP_EMMC_400M 52
+#define CLK_TOP_DRAMC_REF 53
+#define CLK_TOP_DRAMC_MD32 54
+#define CLK_TOP_SYSAXI 55
+#define CLK_TOP_SYSAPB 56
+#define CLK_TOP_ARM_DB_MAIN 57
+#define CLK_TOP_AP2CNN_HOST 58
+#define CLK_TOP_NETSYS 59
+#define CLK_TOP_NETSYS_500M 60
+#define CLK_TOP_NETSYS_WED_MCU 61
+#define CLK_TOP_NETSYS_2X 62
+#define CLK_TOP_SGM_325M 63
+#define CLK_TOP_SGM_REG 64
+#define CLK_TOP_F26M 65
+#define CLK_TOP_EIP97B 66
+#define CLK_TOP_USB3_PHY 67
+#define CLK_TOP_AUD 68
+#define CLK_TOP_A1SYS 69
+#define CLK_TOP_AUD_L 70
+#define CLK_TOP_A_TUNER 71
+#define CLK_TOP_U2U3_REF 72
+#define CLK_TOP_U2U3_SYS 73
+#define CLK_TOP_U2U3_XHCI 74
+#define CLK_TOP_USB_FRMCNT 75
+#define CLK_TOP_NFI1X_SEL 76
+#define CLK_TOP_SPINFI_SEL 77
+#define CLK_TOP_SPI_SEL 78
+#define CLK_TOP_SPIM_MST_SEL 79
+#define CLK_TOP_UART_SEL 80
+#define CLK_TOP_PWM_SEL 81
+#define CLK_TOP_I2C_SEL 82
+#define CLK_TOP_PEXTP_TL_SEL 83
+#define CLK_TOP_EMMC_208M_SEL 84
+#define CLK_TOP_EMMC_400M_SEL 85
+#define CLK_TOP_F26M_SEL 86
+#define CLK_TOP_DRAMC_SEL 87
+#define CLK_TOP_DRAMC_MD32_SEL 88
+#define CLK_TOP_SYSAXI_SEL 89
+#define CLK_TOP_SYSAPB_SEL 90
+#define CLK_TOP_ARM_DB_MAIN_SEL 91
+#define CLK_TOP_AP2CNN_HOST_SEL 92
+#define CLK_TOP_NETSYS_SEL 93
+#define CLK_TOP_NETSYS_500M_SEL 94
+#define CLK_TOP_NETSYS_MCU_SEL 95
+#define CLK_TOP_NETSYS_2X_SEL 96
+#define CLK_TOP_SGM_325M_SEL 97
+#define CLK_TOP_SGM_REG_SEL 98
+#define CLK_TOP_EIP97B_SEL 99
+#define CLK_TOP_USB3_PHY_SEL 100
+#define CLK_TOP_AUD_SEL 101
+#define CLK_TOP_A1SYS_SEL 102
+#define CLK_TOP_AUD_L_SEL 103
+#define CLK_TOP_A_TUNER_SEL 104
+#define CLK_TOP_U2U3_SEL 105
+#define CLK_TOP_U2U3_SYS_SEL 106
+#define CLK_TOP_U2U3_XHCI_SEL 107
+#define CLK_TOP_USB_FRMCNT_SEL 108
+#define CLK_TOP_AUD_I2S_M 109
+
+/* INFRACFG */
+#define CLK_INFRA_66M_MCK 0
+#define CLK_INFRA_UART0_SEL 1
+#define CLK_INFRA_UART1_SEL 2
+#define CLK_INFRA_UART2_SEL 3
+#define CLK_INFRA_SPI0_SEL 4
+#define CLK_INFRA_SPI1_SEL 5
+#define CLK_INFRA_SPI2_SEL 6
+#define CLK_INFRA_PWM1_SEL 7
+#define CLK_INFRA_PWM2_SEL 8
+#define CLK_INFRA_PWM3_SEL 9
+#define CLK_INFRA_PWM_BSEL 10
+#define CLK_INFRA_PCIE_SEL 11
+#define CLK_INFRA_GPT_STA 12
+#define CLK_INFRA_PWM_HCK 13
+#define CLK_INFRA_PWM_STA 14
+#define CLK_INFRA_PWM1_CK 15
+#define CLK_INFRA_PWM2_CK 16
+#define CLK_INFRA_PWM3_CK 17
+#define CLK_INFRA_CQ_DMA_CK 18
+#define CLK_INFRA_AUD_BUS_CK 19
+#define CLK_INFRA_AUD_26M_CK 20
+#define CLK_INFRA_AUD_L_CK 21
+#define CLK_INFRA_AUD_AUD_CK 22
+#define CLK_INFRA_AUD_EG2_CK 23
+#define CLK_INFRA_DRAMC_26M_CK 24
+#define CLK_INFRA_DBG_CK 25
+#define CLK_INFRA_AP_DMA_CK 26
+#define CLK_INFRA_SEJ_CK 27
+#define CLK_INFRA_SEJ_13M_CK 28
+#define CLK_INFRA_THERM_CK 29
+#define CLK_INFRA_I2C0_CK 30
+#define CLK_INFRA_UART0_CK 31
+#define CLK_INFRA_UART1_CK 32
+#define CLK_INFRA_UART2_CK 33
+#define CLK_INFRA_SPI2_CK 34
+#define CLK_INFRA_SPI2_HCK_CK 35
+#define CLK_INFRA_NFI1_CK 36
+#define CLK_INFRA_SPINFI1_CK 37
+#define CLK_INFRA_NFI_HCK_CK 38
+#define CLK_INFRA_SPI0_CK 39
+#define CLK_INFRA_SPI1_CK 40
+#define CLK_INFRA_SPI0_HCK_CK 41
+#define CLK_INFRA_SPI1_HCK_CK 42
+#define CLK_INFRA_FRTC_CK 43
+#define CLK_INFRA_MSDC_CK 44
+#define CLK_INFRA_MSDC_HCK_CK 45
+#define CLK_INFRA_MSDC_133M_CK 46
+#define CLK_INFRA_MSDC_66M_CK 47
+#define CLK_INFRA_ADC_26M_CK 48
+#define CLK_INFRA_ADC_FRC_CK 49
+#define CLK_INFRA_FBIST2FPC_CK 50
+#define CLK_INFRA_I2C_MCK_CK 51
+#define CLK_INFRA_I2C_PCK_CK 52
+#define CLK_INFRA_IUSB_133_CK 53
+#define CLK_INFRA_IUSB_66M_CK 54
+#define CLK_INFRA_IUSB_SYS_CK 55
+#define CLK_INFRA_IUSB_CK 56
+#define CLK_INFRA_IPCIE_CK 57
+#define CLK_INFRA_IPCIE_PIPE_CK 58
+#define CLK_INFRA_IPCIER_CK 59
+#define CLK_INFRA_IPCIEB_CK 60
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_NET2PLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_SGMPLL 3
+#define CLK_APMIXED_WEDMCUPLL 4
+#define CLK_APMIXED_NET1PLL 5
+#define CLK_APMIXED_MPLL 6
+#define CLK_APMIXED_APLL2 7
+
+/* SGMIISYS_0 */
+#define CLK_SGM0_TX_EN 0
+#define CLK_SGM0_RX_EN 1
+#define CLK_SGM0_CK0_EN 2
+#define CLK_SGM0_CDR_CK0_EN 3
+
+/* SGMIISYS_1 */
+#define CLK_SGM1_TX_EN 0
+#define CLK_SGM1_RX_EN 1
+#define CLK_SGM1_CK1_EN 2
+#define CLK_SGM1_CDR_CK1_EN 3
+
+/* ETHSYS */
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_WOCPU0_EN 3
+
+#endif /* _DT_BINDINGS_CLK_MT7981_H */
diff --git a/include/dt-bindings/clock/mediatek,mt7988-clk.h b/include/dt-bindings/clock/mediatek,mt7988-clk.h
new file mode 100644
index 000000000000..63376e40f14d
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt7988-clk.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Xiufeng Li <Xiufeng.Li@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7988_H
+#define _DT_BINDINGS_CLK_MT7988_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_NETSYSPLL 0
+#define CLK_APMIXED_MPLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_APLL2 3
+#define CLK_APMIXED_NET1PLL 4
+#define CLK_APMIXED_NET2PLL 5
+#define CLK_APMIXED_WEDMCUPLL 6
+#define CLK_APMIXED_SGMPLL 7
+#define CLK_APMIXED_ARM_B 8
+#define CLK_APMIXED_CCIPLL2_B 9
+#define CLK_APMIXED_USXGMIIPLL 10
+#define CLK_APMIXED_MSDCPLL 11
+
+/* TOPCKGEN */
+
+#define CLK_TOP_XTAL 0
+#define CLK_TOP_XTAL_D2 1
+#define CLK_TOP_RTC_32K 2
+#define CLK_TOP_RTC_32P7K 3
+#define CLK_TOP_MPLL_D2 4
+#define CLK_TOP_MPLL_D3_D2 5
+#define CLK_TOP_MPLL_D4 6
+#define CLK_TOP_MPLL_D8 7
+#define CLK_TOP_MPLL_D8_D2 8
+#define CLK_TOP_MMPLL_D2 9
+#define CLK_TOP_MMPLL_D3_D5 10
+#define CLK_TOP_MMPLL_D4 11
+#define CLK_TOP_MMPLL_D6_D2 12
+#define CLK_TOP_MMPLL_D8 13
+#define CLK_TOP_APLL2_D4 14
+#define CLK_TOP_NET1PLL_D4 15
+#define CLK_TOP_NET1PLL_D5 16
+#define CLK_TOP_NET1PLL_D5_D2 17
+#define CLK_TOP_NET1PLL_D5_D4 18
+#define CLK_TOP_NET1PLL_D8 19
+#define CLK_TOP_NET1PLL_D8_D2 20
+#define CLK_TOP_NET1PLL_D8_D4 21
+#define CLK_TOP_NET1PLL_D8_D8 22
+#define CLK_TOP_NET1PLL_D8_D16 23
+#define CLK_TOP_NET2PLL_D2 24
+#define CLK_TOP_NET2PLL_D4 25
+#define CLK_TOP_NET2PLL_D4_D4 26
+#define CLK_TOP_NET2PLL_D4_D8 27
+#define CLK_TOP_NET2PLL_D6 28
+#define CLK_TOP_NET2PLL_D8 29
+#define CLK_TOP_NETSYS_SEL 30
+#define CLK_TOP_NETSYS_500M_SEL 31
+#define CLK_TOP_NETSYS_2X_SEL 32
+#define CLK_TOP_NETSYS_GSW_SEL 33
+#define CLK_TOP_ETH_GMII_SEL 34
+#define CLK_TOP_NETSYS_MCU_SEL 35
+#define CLK_TOP_NETSYS_PAO_2X_SEL 36
+#define CLK_TOP_EIP197_SEL 37
+#define CLK_TOP_AXI_INFRA_SEL 38
+#define CLK_TOP_UART_SEL 39
+#define CLK_TOP_EMMC_250M_SEL 40
+#define CLK_TOP_EMMC_400M_SEL 41
+#define CLK_TOP_SPI_SEL 42
+#define CLK_TOP_SPIM_MST_SEL 43
+#define CLK_TOP_NFI1X_SEL 44
+#define CLK_TOP_SPINFI_SEL 45
+#define CLK_TOP_PWM_SEL 46
+#define CLK_TOP_I2C_SEL 47
+#define CLK_TOP_PCIE_MBIST_250M_SEL 48
+#define CLK_TOP_PEXTP_TL_SEL 49
+#define CLK_TOP_PEXTP_TL_P1_SEL 50
+#define CLK_TOP_PEXTP_TL_P2_SEL 51
+#define CLK_TOP_PEXTP_TL_P3_SEL 52
+#define CLK_TOP_USB_SYS_SEL 53
+#define CLK_TOP_USB_SYS_P1_SEL 54
+#define CLK_TOP_USB_XHCI_SEL 55
+#define CLK_TOP_USB_XHCI_P1_SEL 56
+#define CLK_TOP_USB_FRMCNT_SEL 57
+#define CLK_TOP_USB_FRMCNT_P1_SEL 58
+#define CLK_TOP_AUD_SEL 59
+#define CLK_TOP_A1SYS_SEL 60
+#define CLK_TOP_AUD_L_SEL 61
+#define CLK_TOP_A_TUNER_SEL 62
+#define CLK_TOP_SSPXTP_SEL 63
+#define CLK_TOP_USB_PHY_SEL 64
+#define CLK_TOP_USXGMII_SBUS_0_SEL 65
+#define CLK_TOP_USXGMII_SBUS_1_SEL 66
+#define CLK_TOP_SGM_0_SEL 67
+#define CLK_TOP_SGM_SBUS_0_SEL 68
+#define CLK_TOP_SGM_1_SEL 69
+#define CLK_TOP_SGM_SBUS_1_SEL 70
+#define CLK_TOP_XFI_PHY_0_XTAL_SEL 71
+#define CLK_TOP_XFI_PHY_1_XTAL_SEL 72
+#define CLK_TOP_SYSAXI_SEL 73
+#define CLK_TOP_SYSAPB_SEL 74
+#define CLK_TOP_ETH_REFCK_50M_SEL 75
+#define CLK_TOP_ETH_SYS_200M_SEL 76
+#define CLK_TOP_ETH_SYS_SEL 77
+#define CLK_TOP_ETH_XGMII_SEL 78
+#define CLK_TOP_BUS_TOPS_SEL 79
+#define CLK_TOP_NPU_TOPS_SEL 80
+#define CLK_TOP_DRAMC_SEL 81
+#define CLK_TOP_DRAMC_MD32_SEL 82
+#define CLK_TOP_INFRA_F26M_SEL 83
+#define CLK_TOP_PEXTP_P0_SEL 84
+#define CLK_TOP_PEXTP_P1_SEL 85
+#define CLK_TOP_PEXTP_P2_SEL 86
+#define CLK_TOP_PEXTP_P3_SEL 87
+#define CLK_TOP_DA_XTP_GLB_P0_SEL 88
+#define CLK_TOP_DA_XTP_GLB_P1_SEL 89
+#define CLK_TOP_DA_XTP_GLB_P2_SEL 90
+#define CLK_TOP_DA_XTP_GLB_P3_SEL 91
+#define CLK_TOP_CKM_SEL 92
+#define CLK_TOP_DA_SEL 93
+#define CLK_TOP_PEXTP_SEL 94
+#define CLK_TOP_TOPS_P2_26M_SEL 95
+#define CLK_TOP_MCUSYS_BACKUP_625M_SEL 96
+#define CLK_TOP_NETSYS_SYNC_250M_SEL 97
+#define CLK_TOP_MACSEC_SEL 98
+#define CLK_TOP_NETSYS_TOPS_400M_SEL 99
+#define CLK_TOP_NETSYS_PPEFB_250M_SEL 100
+#define CLK_TOP_NETSYS_WARP_SEL 101
+#define CLK_TOP_ETH_MII_SEL 102
+#define CLK_TOP_NPU_SEL 103
+#define CLK_TOP_AUD_I2S_M 104
+
+/* MCUSYS */
+
+#define CLK_MCU_BUS_DIV_SEL 0
+#define CLK_MCU_ARM_DIV_SEL 1
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_MUX_UART0_SEL 0
+#define CLK_INFRA_MUX_UART1_SEL 1
+#define CLK_INFRA_MUX_UART2_SEL 2
+#define CLK_INFRA_MUX_SPI0_SEL 3
+#define CLK_INFRA_MUX_SPI1_SEL 4
+#define CLK_INFRA_MUX_SPI2_SEL 5
+#define CLK_INFRA_PWM_SEL 6
+#define CLK_INFRA_PWM_CK1_SEL 7
+#define CLK_INFRA_PWM_CK2_SEL 8
+#define CLK_INFRA_PWM_CK3_SEL 9
+#define CLK_INFRA_PWM_CK4_SEL 10
+#define CLK_INFRA_PWM_CK5_SEL 11
+#define CLK_INFRA_PWM_CK6_SEL 12
+#define CLK_INFRA_PWM_CK7_SEL 13
+#define CLK_INFRA_PWM_CK8_SEL 14
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P0_SEL 15
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P1_SEL 16
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P2_SEL 17
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P3_SEL 18
+
+/* INFRACFG */
+
+#define CLK_INFRA_PCIE_PERI_26M_CK_P0 19
+#define CLK_INFRA_PCIE_PERI_26M_CK_P1 20
+#define CLK_INFRA_PCIE_PERI_26M_CK_P2 21
+#define CLK_INFRA_PCIE_PERI_26M_CK_P3 22
+#define CLK_INFRA_66M_GPT_BCK 23
+#define CLK_INFRA_66M_PWM_HCK 24
+#define CLK_INFRA_66M_PWM_BCK 25
+#define CLK_INFRA_66M_PWM_CK1 26
+#define CLK_INFRA_66M_PWM_CK2 27
+#define CLK_INFRA_66M_PWM_CK3 28
+#define CLK_INFRA_66M_PWM_CK4 29
+#define CLK_INFRA_66M_PWM_CK5 30
+#define CLK_INFRA_66M_PWM_CK6 31
+#define CLK_INFRA_66M_PWM_CK7 32
+#define CLK_INFRA_66M_PWM_CK8 33
+#define CLK_INFRA_133M_CQDMA_BCK 34
+#define CLK_INFRA_66M_AUD_SLV_BCK 35
+#define CLK_INFRA_AUD_26M 36
+#define CLK_INFRA_AUD_L 37
+#define CLK_INFRA_AUD_AUD 38
+#define CLK_INFRA_AUD_EG2 39
+#define CLK_INFRA_DRAMC_F26M 40
+#define CLK_INFRA_133M_DBG_ACKM 41
+#define CLK_INFRA_66M_AP_DMA_BCK 42
+#define CLK_INFRA_66M_SEJ_BCK 43
+#define CLK_INFRA_PRE_CK_SEJ_F13M 44
+#define CLK_INFRA_26M_THERM_SYSTEM 45
+#define CLK_INFRA_I2C_BCK 46
+#define CLK_INFRA_52M_UART0_CK 47
+#define CLK_INFRA_52M_UART1_CK 48
+#define CLK_INFRA_52M_UART2_CK 49
+#define CLK_INFRA_NFI 50
+#define CLK_INFRA_SPINFI 51
+#define CLK_INFRA_66M_NFI_HCK 52
+#define CLK_INFRA_104M_SPI0 53
+#define CLK_INFRA_104M_SPI1 54
+#define CLK_INFRA_104M_SPI2_BCK 55
+#define CLK_INFRA_66M_SPI0_HCK 56
+#define CLK_INFRA_66M_SPI1_HCK 57
+#define CLK_INFRA_66M_SPI2_HCK 58
+#define CLK_INFRA_66M_FLASHIF_AXI 59
+#define CLK_INFRA_RTC 60
+#define CLK_INFRA_26M_ADC_BCK 61
+#define CLK_INFRA_RC_ADC 62
+#define CLK_INFRA_MSDC400 63
+#define CLK_INFRA_MSDC2_HCK 64
+#define CLK_INFRA_133M_MSDC_0_HCK 65
+#define CLK_INFRA_66M_MSDC_0_HCK 66
+#define CLK_INFRA_133M_CPUM_BCK 67
+#define CLK_INFRA_BIST2FPC 68
+#define CLK_INFRA_I2C_X16W_MCK_CK_P1 69
+#define CLK_INFRA_I2C_X16W_PCK_CK_P1 70
+#define CLK_INFRA_133M_USB_HCK 71
+#define CLK_INFRA_133M_USB_HCK_CK_P1 72
+#define CLK_INFRA_66M_USB_HCK 73
+#define CLK_INFRA_66M_USB_HCK_CK_P1 74
+#define CLK_INFRA_USB_SYS 75
+#define CLK_INFRA_USB_SYS_CK_P1 76
+#define CLK_INFRA_USB_REF 77
+#define CLK_INFRA_USB_CK_P1 78
+#define CLK_INFRA_USB_FRMCNT 79
+#define CLK_INFRA_USB_FRMCNT_CK_P1 80
+#define CLK_INFRA_USB_PIPE 81
+#define CLK_INFRA_USB_PIPE_CK_P1 82
+#define CLK_INFRA_USB_UTMI 83
+#define CLK_INFRA_USB_UTMI_CK_P1 84
+#define CLK_INFRA_USB_XHCI 85
+#define CLK_INFRA_USB_XHCI_CK_P1 86
+#define CLK_INFRA_PCIE_GFMUX_TL_P0 87
+#define CLK_INFRA_PCIE_GFMUX_TL_P1 88
+#define CLK_INFRA_PCIE_GFMUX_TL_P2 89
+#define CLK_INFRA_PCIE_GFMUX_TL_P3 90
+#define CLK_INFRA_PCIE_PIPE_P0 91
+#define CLK_INFRA_PCIE_PIPE_P1 92
+#define CLK_INFRA_PCIE_PIPE_P2 93
+#define CLK_INFRA_PCIE_PIPE_P3 94
+#define CLK_INFRA_133M_PCIE_CK_P0 95
+#define CLK_INFRA_133M_PCIE_CK_P1 96
+#define CLK_INFRA_133M_PCIE_CK_P2 97
+#define CLK_INFRA_133M_PCIE_CK_P3 98
+
+/* ETHDMA */
+
+#define CLK_ETHDMA_XGP1_EN 0
+#define CLK_ETHDMA_XGP2_EN 1
+#define CLK_ETHDMA_XGP3_EN 2
+#define CLK_ETHDMA_FE_EN 3
+#define CLK_ETHDMA_GP2_EN 4
+#define CLK_ETHDMA_GP1_EN 5
+#define CLK_ETHDMA_GP3_EN 6
+#define CLK_ETHDMA_ESW_EN 7
+#define CLK_ETHDMA_CRYPT0_EN 8
+#define CLK_ETHDMA_NR_CLK 9
+
+/* SGMIISYS_0 */
+
+#define CLK_SGM0_TX_EN 0
+#define CLK_SGM0_RX_EN 1
+#define CLK_SGMII0_NR_CLK 2
+
+/* SGMIISYS_1 */
+
+#define CLK_SGM1_TX_EN 0
+#define CLK_SGM1_RX_EN 1
+#define CLK_SGMII1_NR_CLK 2
+
+/* ETHWARP */
+
+#define CLK_ETHWARP_WOCPU2_EN 0
+#define CLK_ETHWARP_WOCPU1_EN 1
+#define CLK_ETHWARP_WOCPU0_EN 2
+#define CLK_ETHWARP_NR_CLK 3
+
+/* XFIPLL */
+#define CLK_XFIPLL_PLL 0
+#define CLK_XFIPLL_PLL_EN 1
+
+#endif /* _DT_BINDINGS_CLK_MT7988_H */
diff --git a/include/dt-bindings/clock/mediatek,mt8188-clk.h b/include/dt-bindings/clock/mediatek,mt8188-clk.h
new file mode 100644
index 000000000000..bd5cd100b796
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt8188-clk.h
@@ -0,0 +1,726 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8188_H
+#define _DT_BINDINGS_CLK_MT8188_H
+
+/* TOPCKGEN */
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SPM 1
+#define CLK_TOP_SCP 2
+#define CLK_TOP_BUS_AXIMEM 3
+#define CLK_TOP_VPP 4
+#define CLK_TOP_ETHDR 5
+#define CLK_TOP_IPE 6
+#define CLK_TOP_CAM 7
+#define CLK_TOP_CCU 8
+#define CLK_TOP_CCU_AHB 9
+#define CLK_TOP_IMG 10
+#define CLK_TOP_CAMTM 11
+#define CLK_TOP_DSP 12
+#define CLK_TOP_DSP1 13
+#define CLK_TOP_DSP2 14
+#define CLK_TOP_DSP3 15
+#define CLK_TOP_DSP4 16
+#define CLK_TOP_DSP5 17
+#define CLK_TOP_DSP6 18
+#define CLK_TOP_DSP7 19
+#define CLK_TOP_MFG_CORE_TMP 20
+#define CLK_TOP_CAMTG 21
+#define CLK_TOP_CAMTG2 22
+#define CLK_TOP_CAMTG3 23
+#define CLK_TOP_UART 24
+#define CLK_TOP_SPI 25
+#define CLK_TOP_MSDC50_0_HCLK 26
+#define CLK_TOP_MSDC50_0 27
+#define CLK_TOP_MSDC30_1 28
+#define CLK_TOP_MSDC30_2 29
+#define CLK_TOP_INTDIR 30
+#define CLK_TOP_AUD_INTBUS 31
+#define CLK_TOP_AUDIO_H 32
+#define CLK_TOP_PWRAP_ULPOSC 33
+#define CLK_TOP_ATB 34
+#define CLK_TOP_SSPM 35
+#define CLK_TOP_DP 36
+#define CLK_TOP_EDP 37
+#define CLK_TOP_DPI 38
+#define CLK_TOP_DISP_PWM0 39
+#define CLK_TOP_DISP_PWM1 40
+#define CLK_TOP_USB_TOP 41
+#define CLK_TOP_SSUSB_XHCI 42
+#define CLK_TOP_USB_TOP_2P 43
+#define CLK_TOP_SSUSB_XHCI_2P 44
+#define CLK_TOP_USB_TOP_3P 45
+#define CLK_TOP_SSUSB_XHCI_3P 46
+#define CLK_TOP_I2C 47
+#define CLK_TOP_SENINF 48
+#define CLK_TOP_SENINF1 49
+#define CLK_TOP_GCPU 50
+#define CLK_TOP_VENC 51
+#define CLK_TOP_VDEC 52
+#define CLK_TOP_PWM 53
+#define CLK_TOP_MCUPM 54
+#define CLK_TOP_SPMI_P_MST 55
+#define CLK_TOP_SPMI_M_MST 56
+#define CLK_TOP_DVFSRC 57
+#define CLK_TOP_TL 58
+#define CLK_TOP_AES_MSDCFDE 59
+#define CLK_TOP_DSI_OCC 60
+#define CLK_TOP_WPE_VPP 61
+#define CLK_TOP_HDCP 62
+#define CLK_TOP_HDCP_24M 63
+#define CLK_TOP_HDMI_APB 64
+#define CLK_TOP_SNPS_ETH_250M 65
+#define CLK_TOP_SNPS_ETH_62P4M_PTP 66
+#define CLK_TOP_SNPS_ETH_50M_RMII 67
+#define CLK_TOP_ADSP 68
+#define CLK_TOP_AUDIO_LOCAL_BUS 69
+#define CLK_TOP_ASM_H 70
+#define CLK_TOP_ASM_L 71
+#define CLK_TOP_APLL1 72
+#define CLK_TOP_APLL2 73
+#define CLK_TOP_APLL3 74
+#define CLK_TOP_APLL4 75
+#define CLK_TOP_APLL5 76
+#define CLK_TOP_I2SO1 77
+#define CLK_TOP_I2SO2 78
+#define CLK_TOP_I2SI1 79
+#define CLK_TOP_I2SI2 80
+#define CLK_TOP_DPTX 81
+#define CLK_TOP_AUD_IEC 82
+#define CLK_TOP_A1SYS_HP 83
+#define CLK_TOP_A2SYS 84
+#define CLK_TOP_A3SYS 85
+#define CLK_TOP_A4SYS 86
+#define CLK_TOP_ECC 87
+#define CLK_TOP_SPINOR 88
+#define CLK_TOP_ULPOSC 89
+#define CLK_TOP_SRCK 90
+#define CLK_TOP_MFG_CK_FAST_REF 91
+#define CLK_TOP_MAINPLL_D3 92
+#define CLK_TOP_MAINPLL_D4 93
+#define CLK_TOP_MAINPLL_D4_D2 94
+#define CLK_TOP_MAINPLL_D4_D4 95
+#define CLK_TOP_MAINPLL_D4_D8 96
+#define CLK_TOP_MAINPLL_D5 97
+#define CLK_TOP_MAINPLL_D5_D2 98
+#define CLK_TOP_MAINPLL_D5_D4 99
+#define CLK_TOP_MAINPLL_D5_D8 100
+#define CLK_TOP_MAINPLL_D6 101
+#define CLK_TOP_MAINPLL_D6_D2 102
+#define CLK_TOP_MAINPLL_D6_D4 103
+#define CLK_TOP_MAINPLL_D6_D8 104
+#define CLK_TOP_MAINPLL_D7 105
+#define CLK_TOP_MAINPLL_D7_D2 106
+#define CLK_TOP_MAINPLL_D7_D4 107
+#define CLK_TOP_MAINPLL_D7_D8 108
+#define CLK_TOP_MAINPLL_D9 109
+#define CLK_TOP_UNIVPLL_D2 110
+#define CLK_TOP_UNIVPLL_D3 111
+#define CLK_TOP_UNIVPLL_D4 112
+#define CLK_TOP_UNIVPLL_D4_D2 113
+#define CLK_TOP_UNIVPLL_D4_D4 114
+#define CLK_TOP_UNIVPLL_D4_D8 115
+#define CLK_TOP_UNIVPLL_D5 116
+#define CLK_TOP_UNIVPLL_D5_D2 117
+#define CLK_TOP_UNIVPLL_D5_D4 118
+#define CLK_TOP_UNIVPLL_D5_D8 119
+#define CLK_TOP_UNIVPLL_D6 120
+#define CLK_TOP_UNIVPLL_D6_D2 121
+#define CLK_TOP_UNIVPLL_D6_D4 122
+#define CLK_TOP_UNIVPLL_D6_D8 123
+#define CLK_TOP_UNIVPLL_D7 124
+#define CLK_TOP_UNIVPLL_192M 125
+#define CLK_TOP_UNIVPLL_192M_D4 126
+#define CLK_TOP_UNIVPLL_192M_D8 127
+#define CLK_TOP_UNIVPLL_192M_D10 128
+#define CLK_TOP_UNIVPLL_192M_D16 129
+#define CLK_TOP_UNIVPLL_192M_D32 130
+#define CLK_TOP_APLL1_D3 131
+#define CLK_TOP_APLL1_D4 132
+#define CLK_TOP_APLL2_D3 133
+#define CLK_TOP_APLL2_D4 134
+#define CLK_TOP_APLL3_D4 135
+#define CLK_TOP_APLL4_D4 136
+#define CLK_TOP_APLL5_D4 137
+#define CLK_TOP_MMPLL_D4 138
+#define CLK_TOP_MMPLL_D4_D2 139
+#define CLK_TOP_MMPLL_D5 140
+#define CLK_TOP_MMPLL_D5_D2 141
+#define CLK_TOP_MMPLL_D5_D4 142
+#define CLK_TOP_MMPLL_D6 143
+#define CLK_TOP_MMPLL_D6_D2 144
+#define CLK_TOP_MMPLL_D7 145
+#define CLK_TOP_MMPLL_D9 146
+#define CLK_TOP_TVDPLL1 147
+#define CLK_TOP_TVDPLL1_D2 148
+#define CLK_TOP_TVDPLL1_D4 149
+#define CLK_TOP_TVDPLL1_D8 150
+#define CLK_TOP_TVDPLL1_D16 151
+#define CLK_TOP_TVDPLL2 152
+#define CLK_TOP_TVDPLL2_D2 153
+#define CLK_TOP_TVDPLL2_D4 154
+#define CLK_TOP_TVDPLL2_D8 155
+#define CLK_TOP_TVDPLL2_D16 156
+#define CLK_TOP_MSDCPLL_D2 157
+#define CLK_TOP_MSDCPLL_D16 158
+#define CLK_TOP_ETHPLL 159
+#define CLK_TOP_ETHPLL_D2 160
+#define CLK_TOP_ETHPLL_D4 161
+#define CLK_TOP_ETHPLL_D8 162
+#define CLK_TOP_ETHPLL_D10 163
+#define CLK_TOP_ADSPPLL_D2 164
+#define CLK_TOP_ADSPPLL_D4 165
+#define CLK_TOP_ADSPPLL_D8 166
+#define CLK_TOP_ULPOSC1 167
+#define CLK_TOP_ULPOSC1_D2 168
+#define CLK_TOP_ULPOSC1_D4 169
+#define CLK_TOP_ULPOSC1_D8 170
+#define CLK_TOP_ULPOSC1_D7 171
+#define CLK_TOP_ULPOSC1_D10 172
+#define CLK_TOP_ULPOSC1_D16 173
+#define CLK_TOP_MPHONE_SLAVE_BCK 174
+#define CLK_TOP_PAD_FPC 175
+#define CLK_TOP_466M_FMEM 176
+#define CLK_TOP_PEXTP_PIPE 177
+#define CLK_TOP_DSI_PHY 178
+#define CLK_TOP_APLL12_CK_DIV0 179
+#define CLK_TOP_APLL12_CK_DIV1 180
+#define CLK_TOP_APLL12_CK_DIV2 181
+#define CLK_TOP_APLL12_CK_DIV3 182
+#define CLK_TOP_APLL12_CK_DIV4 183
+#define CLK_TOP_APLL12_CK_DIV9 184
+#define CLK_TOP_CFGREG_CLOCK_EN_VPP0 185
+#define CLK_TOP_CFGREG_CLOCK_EN_VPP1 186
+#define CLK_TOP_CFGREG_CLOCK_EN_VDO0 187
+#define CLK_TOP_CFGREG_CLOCK_EN_VDO1 188
+#define CLK_TOP_CFGREG_CLOCK_ISP_AXI_GALS 189
+#define CLK_TOP_CFGREG_F26M_VPP0 190
+#define CLK_TOP_CFGREG_F26M_VPP1 191
+#define CLK_TOP_CFGREG_F26M_VDO0 192
+#define CLK_TOP_CFGREG_F26M_VDO1 193
+#define CLK_TOP_CFGREG_AUD_F26M_AUD 194
+#define CLK_TOP_CFGREG_UNIPLL_SES 195
+#define CLK_TOP_CFGREG_F_PCIE_PHY_REF 196
+#define CLK_TOP_SSUSB_TOP_REF 197
+#define CLK_TOP_SSUSB_PHY_REF 198
+#define CLK_TOP_SSUSB_TOP_P1_REF 199
+#define CLK_TOP_SSUSB_PHY_P1_REF 200
+#define CLK_TOP_SSUSB_TOP_P2_REF 201
+#define CLK_TOP_SSUSB_PHY_P2_REF 202
+#define CLK_TOP_SSUSB_TOP_P3_REF 203
+#define CLK_TOP_SSUSB_PHY_P3_REF 204
+#define CLK_TOP_NR_CLK 205
+
+/* INFRACFG_AO */
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SEJ 4
+#define CLK_INFRA_AO_APXGPT 5
+#define CLK_INFRA_AO_GCE 6
+#define CLK_INFRA_AO_GCE2 7
+#define CLK_INFRA_AO_THERM 8
+#define CLK_INFRA_AO_PWM_HCLK 9
+#define CLK_INFRA_AO_PWM1 10
+#define CLK_INFRA_AO_PWM2 11
+#define CLK_INFRA_AO_PWM3 12
+#define CLK_INFRA_AO_PWM4 13
+#define CLK_INFRA_AO_PWM 14
+#define CLK_INFRA_AO_UART0 15
+#define CLK_INFRA_AO_UART1 16
+#define CLK_INFRA_AO_UART2 17
+#define CLK_INFRA_AO_UART3 18
+#define CLK_INFRA_AO_UART4 19
+#define CLK_INFRA_AO_GCE_26M 20
+#define CLK_INFRA_AO_CQ_DMA_FPC 21
+#define CLK_INFRA_AO_UART5 22
+#define CLK_INFRA_AO_HDMI_26M 23
+#define CLK_INFRA_AO_SPI0 24
+#define CLK_INFRA_AO_MSDC0 25
+#define CLK_INFRA_AO_MSDC1 26
+#define CLK_INFRA_AO_MSDC2 27
+#define CLK_INFRA_AO_MSDC0_SRC 28
+#define CLK_INFRA_AO_DVFSRC 29
+#define CLK_INFRA_AO_TRNG 30
+#define CLK_INFRA_AO_AUXADC 31
+#define CLK_INFRA_AO_CPUM 32
+#define CLK_INFRA_AO_HDMI_32K 33
+#define CLK_INFRA_AO_CEC_66M_HCLK 34
+#define CLK_INFRA_AO_PCIE_TL_26M 35
+#define CLK_INFRA_AO_MSDC1_SRC 36
+#define CLK_INFRA_AO_CEC_66M_BCLK 37
+#define CLK_INFRA_AO_PCIE_TL_96M 38
+#define CLK_INFRA_AO_DEVICE_APC 39
+#define CLK_INFRA_AO_ECC_66M_HCLK 40
+#define CLK_INFRA_AO_DEBUGSYS 41
+#define CLK_INFRA_AO_AUDIO 42
+#define CLK_INFRA_AO_PCIE_TL_32K 43
+#define CLK_INFRA_AO_DBG_TRACE 44
+#define CLK_INFRA_AO_DRAMC_F26M 45
+#define CLK_INFRA_AO_IRTX 46
+#define CLK_INFRA_AO_DISP_PWM 47
+#define CLK_INFRA_AO_CLDMA_BCLK 48
+#define CLK_INFRA_AO_AUDIO_26M_BCLK 49
+#define CLK_INFRA_AO_SPI1 50
+#define CLK_INFRA_AO_SPI2 51
+#define CLK_INFRA_AO_SPI3 52
+#define CLK_INFRA_AO_FSSPM 53
+#define CLK_INFRA_AO_SSPM_BUS_HCLK 54
+#define CLK_INFRA_AO_APDMA_BCLK 55
+#define CLK_INFRA_AO_SPI4 56
+#define CLK_INFRA_AO_SPI5 57
+#define CLK_INFRA_AO_CQ_DMA 58
+#define CLK_INFRA_AO_MSDC0_SELF 59
+#define CLK_INFRA_AO_MSDC1_SELF 60
+#define CLK_INFRA_AO_MSDC2_SELF 61
+#define CLK_INFRA_AO_I2S_DMA 62
+#define CLK_INFRA_AO_AP_MSDC0 63
+#define CLK_INFRA_AO_MD_MSDC0 64
+#define CLK_INFRA_AO_MSDC30_2 65
+#define CLK_INFRA_AO_GCPU 66
+#define CLK_INFRA_AO_PCIE_PERI_26M 67
+#define CLK_INFRA_AO_GCPU_66M_BCLK 68
+#define CLK_INFRA_AO_GCPU_133M_BCLK 69
+#define CLK_INFRA_AO_DISP_PWM1 70
+#define CLK_INFRA_AO_FBIST2FPC 71
+#define CLK_INFRA_AO_DEVICE_APC_SYNC 72
+#define CLK_INFRA_AO_PCIE_P1_PERI_26M 73
+#define CLK_INFRA_AO_133M_MCLK_CK 74
+#define CLK_INFRA_AO_66M_MCLK_CK 75
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P0 76
+#define CLK_INFRA_AO_RG_AES_MSDCFDE_CK_0P 77
+#define CLK_INFRA_AO_NR_CLK 78
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ETHPLL 0
+#define CLK_APMIXED_MSDCPLL 1
+#define CLK_APMIXED_TVDPLL1 2
+#define CLK_APMIXED_TVDPLL2 3
+#define CLK_APMIXED_MMPLL 4
+#define CLK_APMIXED_MAINPLL 5
+#define CLK_APMIXED_IMGPLL 6
+#define CLK_APMIXED_UNIVPLL 7
+#define CLK_APMIXED_ADSPPLL 8
+#define CLK_APMIXED_APLL1 9
+#define CLK_APMIXED_APLL2 10
+#define CLK_APMIXED_APLL3 11
+#define CLK_APMIXED_APLL4 12
+#define CLK_APMIXED_APLL5 13
+#define CLK_APMIXED_MFGPLL 14
+#define CLK_APMIXED_PLL_SSUSB26M_EN 15
+#define CLK_APMIXED_NR_CLK 16
+
+/* AUDIODSP */
+#define CLK_AUDIODSP_AUDIO26M 0
+#define CLK_AUDIODSP_NR_CLK 1
+
+/* PERICFG_AO */
+#define CLK_PERI_AO_ETHERNET 0
+#define CLK_PERI_AO_ETHERNET_BUS 1
+#define CLK_PERI_AO_FLASHIF_BUS 2
+#define CLK_PERI_AO_FLASHIF_26M 3
+#define CLK_PERI_AO_FLASHIFLASHCK 4
+#define CLK_PERI_AO_SSUSB_2P_BUS 5
+#define CLK_PERI_AO_SSUSB_2P_XHCI 6
+#define CLK_PERI_AO_SSUSB_3P_BUS 7
+#define CLK_PERI_AO_SSUSB_3P_XHCI 8
+#define CLK_PERI_AO_SSUSB_BUS 9
+#define CLK_PERI_AO_SSUSB_XHCI 10
+#define CLK_PERI_AO_ETHERNET_MAC 11
+#define CLK_PERI_AO_PCIE_P0_FMEM 12
+#define CLK_PERI_AO_NR_CLK 13
+
+/* IMP_IIC_WRAP_C */
+#define CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C0 0
+#define CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C2 1
+#define CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C3 2
+#define CLK_IMP_IIC_WRAP_C_NR_CLK 3
+
+/* IMP_IIC_WRAP_W */
+#define CLK_IMP_IIC_WRAP_W_AP_CLOCK_I2C1 0
+#define CLK_IMP_IIC_WRAP_W_AP_CLOCK_I2C4 1
+#define CLK_IMP_IIC_WRAP_W_NR_CLK 2
+
+/* IMP_IIC_WRAP_EN */
+#define CLK_IMP_IIC_WRAP_EN_AP_CLOCK_I2C5 0
+#define CLK_IMP_IIC_WRAP_EN_AP_CLOCK_I2C6 1
+#define CLK_IMP_IIC_WRAP_EN_NR_CLK 2
+
+/* MFGCFG */
+#define CLK_MFGCFG_BG3D 0
+#define CLK_MFGCFG_NR_CLK 1
+
+/* VPPSYS0 */
+#define CLK_VPP0_MDP_FG 0
+#define CLK_VPP0_STITCH 1
+#define CLK_VPP0_PADDING 2
+#define CLK_VPP0_MDP_TCC 3
+#define CLK_VPP0_WARP0_ASYNC_TX 4
+#define CLK_VPP0_WARP1_ASYNC_TX 5
+#define CLK_VPP0_MUTEX 6
+#define CLK_VPP02VPP1_RELAY 7
+#define CLK_VPP0_VPP12VPP0_ASYNC 8
+#define CLK_VPP0_MMSYSRAM_TOP 9
+#define CLK_VPP0_MDP_AAL 10
+#define CLK_VPP0_MDP_RSZ 11
+#define CLK_VPP0_SMI_COMMON_MMSRAM 12
+#define CLK_VPP0_GALS_VDO0_LARB0_MMSRAM 13
+#define CLK_VPP0_GALS_VDO0_LARB1_MMSRAM 14
+#define CLK_VPP0_GALS_VENCSYS_MMSRAM 15
+#define CLK_VPP0_GALS_VENCSYS_CORE1_MMSRAM 16
+#define CLK_VPP0_GALS_INFRA_MMSRAM 17
+#define CLK_VPP0_GALS_CAMSYS_MMSRAM 18
+#define CLK_VPP0_GALS_VPP1_LARB5_MMSRAM 19
+#define CLK_VPP0_GALS_VPP1_LARB6_MMSRAM 20
+#define CLK_VPP0_SMI_REORDER_MMSRAM 21
+#define CLK_VPP0_SMI_IOMMU 22
+#define CLK_VPP0_GALS_IMGSYS_CAMSYS 23
+#define CLK_VPP0_MDP_RDMA 24
+#define CLK_VPP0_MDP_WROT 25
+#define CLK_VPP0_GALS_EMI0_EMI1 26
+#define CLK_VPP0_SMI_SUB_COMMON_REORDER 27
+#define CLK_VPP0_SMI_RSI 28
+#define CLK_VPP0_SMI_COMMON_LARB4 29
+#define CLK_VPP0_GALS_VDEC_VDEC_CORE1 30
+#define CLK_VPP0_GALS_VPP1_WPESYS 31
+#define CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1 32
+#define CLK_VPP0_FAKE_ENG 33
+#define CLK_VPP0_MDP_HDR 34
+#define CLK_VPP0_MDP_TDSHP 35
+#define CLK_VPP0_MDP_COLOR 36
+#define CLK_VPP0_MDP_OVL 37
+#define CLK_VPP0_DSIP_RDMA 38
+#define CLK_VPP0_DISP_WDMA 39
+#define CLK_VPP0_MDP_HMS 40
+#define CLK_VPP0_WARP0_RELAY 41
+#define CLK_VPP0_WARP0_ASYNC 42
+#define CLK_VPP0_WARP1_RELAY 43
+#define CLK_VPP0_WARP1_ASYNC 44
+#define CLK_VPP0_NR_CLK 45
+
+/* WPESYS */
+#define CLK_WPE_TOP_WPE_VPP0 0
+#define CLK_WPE_TOP_SMI_LARB7 1
+#define CLK_WPE_TOP_WPESYS_EVENT_TX 2
+#define CLK_WPE_TOP_SMI_LARB7_PCLK_EN 3
+#define CLK_WPE_TOP_NR_CLK 4
+
+/* WPESYS_VPP0 */
+#define CLK_WPE_VPP0_VECI 0
+#define CLK_WPE_VPP0_VEC2I 1
+#define CLK_WPE_VPP0_VEC3I 2
+#define CLK_WPE_VPP0_WPEO 3
+#define CLK_WPE_VPP0_MSKO 4
+#define CLK_WPE_VPP0_VGEN 5
+#define CLK_WPE_VPP0_EXT 6
+#define CLK_WPE_VPP0_VFC 7
+#define CLK_WPE_VPP0_CACH0_TOP 8
+#define CLK_WPE_VPP0_CACH0_DMA 9
+#define CLK_WPE_VPP0_CACH1_TOP 10
+#define CLK_WPE_VPP0_CACH1_DMA 11
+#define CLK_WPE_VPP0_CACH2_TOP 12
+#define CLK_WPE_VPP0_CACH2_DMA 13
+#define CLK_WPE_VPP0_CACH3_TOP 14
+#define CLK_WPE_VPP0_CACH3_DMA 15
+#define CLK_WPE_VPP0_PSP 16
+#define CLK_WPE_VPP0_PSP2 17
+#define CLK_WPE_VPP0_SYNC 18
+#define CLK_WPE_VPP0_C24 19
+#define CLK_WPE_VPP0_MDP_CROP 20
+#define CLK_WPE_VPP0_ISP_CROP 21
+#define CLK_WPE_VPP0_TOP 22
+#define CLK_WPE_VPP0_NR_CLK 23
+
+/* VPPSYS1 */
+#define CLK_VPP1_SVPP1_MDP_OVL 0
+#define CLK_VPP1_SVPP1_MDP_TCC 1
+#define CLK_VPP1_SVPP1_MDP_WROT 2
+#define CLK_VPP1_SVPP1_VPP_PAD 3
+#define CLK_VPP1_SVPP2_MDP_WROT 4
+#define CLK_VPP1_SVPP2_VPP_PAD 5
+#define CLK_VPP1_SVPP3_MDP_WROT 6
+#define CLK_VPP1_SVPP3_VPP_PAD 7
+#define CLK_VPP1_SVPP1_MDP_RDMA 8
+#define CLK_VPP1_SVPP1_MDP_FG 9
+#define CLK_VPP1_SVPP2_MDP_RDMA 10
+#define CLK_VPP1_SVPP2_MDP_FG 11
+#define CLK_VPP1_SVPP3_MDP_RDMA 12
+#define CLK_VPP1_SVPP3_MDP_FG 13
+#define CLK_VPP1_VPP_SPLIT 14
+#define CLK_VPP1_SVPP2_VDO0_DL_RELAY 15
+#define CLK_VPP1_SVPP1_MDP_RSZ 16
+#define CLK_VPP1_SVPP1_MDP_TDSHP 17
+#define CLK_VPP1_SVPP1_MDP_COLOR 18
+#define CLK_VPP1_SVPP3_VDO1_DL_RELAY 19
+#define CLK_VPP1_SVPP2_MDP_RSZ 20
+#define CLK_VPP1_SVPP2_VPP_MERGE 21
+#define CLK_VPP1_SVPP2_MDP_TDSHP 22
+#define CLK_VPP1_SVPP2_MDP_COLOR 23
+#define CLK_VPP1_SVPP3_MDP_RSZ 24
+#define CLK_VPP1_SVPP3_VPP_MERGE 25
+#define CLK_VPP1_SVPP3_MDP_TDSHP 26
+#define CLK_VPP1_SVPP3_MDP_COLOR 27
+#define CLK_VPP1_GALS5 28
+#define CLK_VPP1_GALS6 29
+#define CLK_VPP1_LARB5 30
+#define CLK_VPP1_LARB6 31
+#define CLK_VPP1_SVPP1_MDP_HDR 32
+#define CLK_VPP1_SVPP1_MDP_AAL 33
+#define CLK_VPP1_SVPP2_MDP_HDR 34
+#define CLK_VPP1_SVPP2_MDP_AAL 35
+#define CLK_VPP1_SVPP3_MDP_HDR 36
+#define CLK_VPP1_SVPP3_MDP_AAL 37
+#define CLK_VPP1_DISP_MUTEX 38
+#define CLK_VPP1_SVPP2_VDO1_DL_RELAY 39
+#define CLK_VPP1_SVPP3_VDO0_DL_RELAY 40
+#define CLK_VPP1_VPP0_DL_ASYNC 41
+#define CLK_VPP1_VPP0_DL1_RELAY 42
+#define CLK_VPP1_LARB5_FAKE_ENG 43
+#define CLK_VPP1_LARB6_FAKE_ENG 44
+#define CLK_VPP1_HDMI_META 45
+#define CLK_VPP1_VPP_SPLIT_HDMI 46
+#define CLK_VPP1_DGI_IN 47
+#define CLK_VPP1_DGI_OUT 48
+#define CLK_VPP1_VPP_SPLIT_DGI 49
+#define CLK_VPP1_DL_CON_OCC 50
+#define CLK_VPP1_VPP_SPLIT_26M 51
+#define CLK_VPP1_NR_CLK 52
+
+/* IMGSYS */
+#define CLK_IMGSYS_MAIN_LARB9 0
+#define CLK_IMGSYS_MAIN_TRAW0 1
+#define CLK_IMGSYS_MAIN_TRAW1 2
+#define CLK_IMGSYS_MAIN_VCORE_GALS 3
+#define CLK_IMGSYS_MAIN_DIP0 4
+#define CLK_IMGSYS_MAIN_WPE0 5
+#define CLK_IMGSYS_MAIN_IPE 6
+#define CLK_IMGSYS_MAIN_WPE1 7
+#define CLK_IMGSYS_MAIN_WPE2 8
+#define CLK_IMGSYS_MAIN_GALS 9
+#define CLK_IMGSYS_MAIN_NR_CLK 10
+
+/* IMGSYS1_DIP_TOP */
+#define CLK_IMGSYS1_DIP_TOP_LARB10 0
+#define CLK_IMGSYS1_DIP_TOP_DIP_TOP 1
+#define CLK_IMGSYS1_DIP_TOP_NR_CLK 2
+
+/* IMGSYS1_DIP_NR */
+#define CLK_IMGSYS1_DIP_NR_LARB15 0
+#define CLK_IMGSYS1_DIP_NR_DIP_NR 1
+#define CLK_IMGSYS1_DIP_NR_NR_CLK 2
+
+/* IMGSYS_WPE1 */
+#define CLK_IMGSYS_WPE1_LARB11 0
+#define CLK_IMGSYS_WPE1 1
+#define CLK_IMGSYS_WPE1_NR_CLK 2
+
+/* IPESYS */
+#define CLK_IPE_DPE 0
+#define CLK_IPE_FDVT 1
+#define CLK_IPE_ME 2
+#define CLK_IPESYS_TOP 3
+#define CLK_IPE_SMI_LARB12 4
+#define CLK_IPE_NR_CLK 5
+
+/* IMGSYS_WPE2 */
+#define CLK_IMGSYS_WPE2_LARB11 0
+#define CLK_IMGSYS_WPE2 1
+#define CLK_IMGSYS_WPE2_NR_CLK 2
+
+/* IMGSYS_WPE3 */
+#define CLK_IMGSYS_WPE3_LARB11 0
+#define CLK_IMGSYS_WPE3 1
+#define CLK_IMGSYS_WPE3_NR_CLK 2
+
+/* CAMSYS */
+#define CLK_CAM_MAIN_LARB13 0
+#define CLK_CAM_MAIN_LARB14 1
+#define CLK_CAM_MAIN_CAM 2
+#define CLK_CAM_MAIN_CAM_SUBA 3
+#define CLK_CAM_MAIN_CAM_SUBB 4
+#define CLK_CAM_MAIN_CAMTG 5
+#define CLK_CAM_MAIN_SENINF 6
+#define CLK_CAM_MAIN_GCAMSVA 7
+#define CLK_CAM_MAIN_GCAMSVB 8
+#define CLK_CAM_MAIN_GCAMSVC 9
+#define CLK_CAM_MAIN_GCAMSVD 10
+#define CLK_CAM_MAIN_GCAMSVE 11
+#define CLK_CAM_MAIN_GCAMSVF 12
+#define CLK_CAM_MAIN_GCAMSVG 13
+#define CLK_CAM_MAIN_GCAMSVH 14
+#define CLK_CAM_MAIN_GCAMSVI 15
+#define CLK_CAM_MAIN_GCAMSVJ 16
+#define CLK_CAM_MAIN_CAMSV_TOP 17
+#define CLK_CAM_MAIN_CAMSV_CQ_A 18
+#define CLK_CAM_MAIN_CAMSV_CQ_B 19
+#define CLK_CAM_MAIN_CAMSV_CQ_C 20
+#define CLK_CAM_MAIN_FAKE_ENG 21
+#define CLK_CAM_MAIN_CAM2MM0_GALS 22
+#define CLK_CAM_MAIN_CAM2MM1_GALS 23
+#define CLK_CAM_MAIN_CAM2SYS_GALS 24
+#define CLK_CAM_MAIN_NR_CLK 25
+
+/* CAMSYS_RAWA */
+#define CLK_CAM_RAWA_LARBX 0
+#define CLK_CAM_RAWA_CAM 1
+#define CLK_CAM_RAWA_CAMTG 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_YUVA */
+#define CLK_CAM_YUVA_LARBX 0
+#define CLK_CAM_YUVA_CAM 1
+#define CLK_CAM_YUVA_CAMTG 2
+#define CLK_CAM_YUVA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+#define CLK_CAM_RAWB_LARBX 0
+#define CLK_CAM_RAWB_CAM 1
+#define CLK_CAM_RAWB_CAMTG 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* CAMSYS_YUVB */
+#define CLK_CAM_YUVB_LARBX 0
+#define CLK_CAM_YUVB_CAM 1
+#define CLK_CAM_YUVB_CAMTG 2
+#define CLK_CAM_YUVB_NR_CLK 3
+
+/* CCUSYS */
+#define CLK_CCU_LARB27 0
+#define CLK_CCU_AHB 1
+#define CLK_CCU_CCU0 2
+#define CLK_CCU_NR_CLK 3
+
+/* VDECSYS_SOC */
+#define CLK_VDEC1_SOC_LARB1 0
+#define CLK_VDEC1_SOC_LAT 1
+#define CLK_VDEC1_SOC_LAT_ACTIVE 2
+#define CLK_VDEC1_SOC_LAT_ENG 3
+#define CLK_VDEC1_SOC_VDEC 4
+#define CLK_VDEC1_SOC_VDEC_ACTIVE 5
+#define CLK_VDEC1_SOC_VDEC_ENG 6
+#define CLK_VDEC1_NR_CLK 7
+
+/* VDECSYS */
+#define CLK_VDEC2_LARB1 0
+#define CLK_VDEC2_LAT 1
+#define CLK_VDEC2_VDEC 2
+#define CLK_VDEC2_VDEC_ACTIVE 3
+#define CLK_VDEC2_VDEC_ENG 4
+#define CLK_VDEC2_NR_CLK 5
+
+/* VENCSYS */
+#define CLK_VENC1_LARB 0
+#define CLK_VENC1_VENC 1
+#define CLK_VENC1_JPGENC 2
+#define CLK_VENC1_JPGDEC 3
+#define CLK_VENC1_JPGDEC_C1 4
+#define CLK_VENC1_GALS 5
+#define CLK_VENC1_GALS_SRAM 6
+#define CLK_VENC1_NR_CLK 7
+
+/* VDOSYS0 */
+#define CLK_VDO0_DISP_OVL0 0
+#define CLK_VDO0_FAKE_ENG0 1
+#define CLK_VDO0_DISP_CCORR0 2
+#define CLK_VDO0_DISP_MUTEX0 3
+#define CLK_VDO0_DISP_GAMMA0 4
+#define CLK_VDO0_DISP_DITHER0 5
+#define CLK_VDO0_DISP_WDMA0 6
+#define CLK_VDO0_DISP_RDMA0 7
+#define CLK_VDO0_DSI0 8
+#define CLK_VDO0_DSI1 9
+#define CLK_VDO0_DSC_WRAP0 10
+#define CLK_VDO0_VPP_MERGE0 11
+#define CLK_VDO0_DP_INTF0 12
+#define CLK_VDO0_DISP_AAL0 13
+#define CLK_VDO0_INLINEROT0 14
+#define CLK_VDO0_APB_BUS 15
+#define CLK_VDO0_DISP_COLOR0 16
+#define CLK_VDO0_MDP_WROT0 17
+#define CLK_VDO0_DISP_RSZ0 18
+#define CLK_VDO0_DISP_POSTMASK0 19
+#define CLK_VDO0_FAKE_ENG1 20
+#define CLK_VDO0_DL_ASYNC2 21
+#define CLK_VDO0_DL_RELAY3 22
+#define CLK_VDO0_DL_RELAY4 23
+#define CLK_VDO0_SMI_GALS 24
+#define CLK_VDO0_SMI_COMMON 25
+#define CLK_VDO0_SMI_EMI 26
+#define CLK_VDO0_SMI_IOMMU 27
+#define CLK_VDO0_SMI_LARB 28
+#define CLK_VDO0_SMI_RSI 29
+#define CLK_VDO0_DSI0_DSI 30
+#define CLK_VDO0_DSI1_DSI 31
+#define CLK_VDO0_DP_INTF0_DP_INTF 32
+#define CLK_VDO0_NR_CLK 33
+
+/* VDOSYS1 */
+#define CLK_VDO1_SMI_LARB2 0
+#define CLK_VDO1_SMI_LARB3 1
+#define CLK_VDO1_GALS 2
+#define CLK_VDO1_FAKE_ENG0 3
+#define CLK_VDO1_FAKE_ENG1 4
+#define CLK_VDO1_MDP_RDMA0 5
+#define CLK_VDO1_MDP_RDMA1 6
+#define CLK_VDO1_MDP_RDMA2 7
+#define CLK_VDO1_MDP_RDMA3 8
+#define CLK_VDO1_VPP_MERGE0 9
+#define CLK_VDO1_VPP_MERGE1 10
+#define CLK_VDO1_VPP_MERGE2 11
+#define CLK_VDO1_VPP_MERGE3 12
+#define CLK_VDO1_VPP_MERGE4 13
+#define CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC 14
+#define CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC 15
+#define CLK_VDO1_DISP_MUTEX 16
+#define CLK_VDO1_MDP_RDMA4 17
+#define CLK_VDO1_MDP_RDMA5 18
+#define CLK_VDO1_MDP_RDMA6 19
+#define CLK_VDO1_MDP_RDMA7 20
+#define CLK_VDO1_DP_INTF0_MMCK 21
+#define CLK_VDO1_DPI0_MM 22
+#define CLK_VDO1_DPI1_MM 23
+#define CLK_VDO1_MERGE0_DL_ASYNC 24
+#define CLK_VDO1_MERGE1_DL_ASYNC 25
+#define CLK_VDO1_MERGE2_DL_ASYNC 26
+#define CLK_VDO1_MERGE3_DL_ASYNC 27
+#define CLK_VDO1_MERGE4_DL_ASYNC 28
+#define CLK_VDO1_DSC_VDO1_DL_ASYNC 29
+#define CLK_VDO1_MERGE_VDO1_DL_ASYNC 30
+#define CLK_VDO1_PADDING0 31
+#define CLK_VDO1_PADDING1 32
+#define CLK_VDO1_PADDING2 33
+#define CLK_VDO1_PADDING3 34
+#define CLK_VDO1_PADDING4 35
+#define CLK_VDO1_PADDING5 36
+#define CLK_VDO1_PADDING6 37
+#define CLK_VDO1_PADDING7 38
+#define CLK_VDO1_DISP_RSZ0 39
+#define CLK_VDO1_DISP_RSZ1 40
+#define CLK_VDO1_DISP_RSZ2 41
+#define CLK_VDO1_DISP_RSZ3 42
+#define CLK_VDO1_HDR_VDO_FE0 43
+#define CLK_VDO1_HDR_GFX_FE0 44
+#define CLK_VDO1_HDR_VDO_BE 45
+#define CLK_VDO1_HDR_VDO_FE1 46
+#define CLK_VDO1_HDR_GFX_FE1 47
+#define CLK_VDO1_DISP_MIXER 48
+#define CLK_VDO1_HDR_VDO_FE0_DL_ASYNC 49
+#define CLK_VDO1_HDR_VDO_FE1_DL_ASYNC 50
+#define CLK_VDO1_HDR_GFX_FE0_DL_ASYNC 51
+#define CLK_VDO1_HDR_GFX_FE1_DL_ASYNC 52
+#define CLK_VDO1_HDR_VDO_BE_DL_ASYNC 53
+#define CLK_VDO1_DPI0 54
+#define CLK_VDO1_DISP_MONITOR_DPI0 55
+#define CLK_VDO1_DPI1 56
+#define CLK_VDO1_DISP_MONITOR_DPI1 57
+#define CLK_VDO1_DPINTF 58
+#define CLK_VDO1_DISP_MONITOR_DPINTF 59
+#define CLK_VDO1_26M_SLOW 60
+#define CLK_VDO1_NR_CLK 61
+
+#endif /* _DT_BINDINGS_CLK_MT8188_H */
diff --git a/include/dt-bindings/clock/mediatek,mt8365-clk.h b/include/dt-bindings/clock/mediatek,mt8365-clk.h
new file mode 100644
index 000000000000..f9aff1775810
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt8365-clk.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8365_H
+#define _DT_BINDINGS_CLK_MT8365_H
+
+/* TOPCKGEN */
+#define CLK_TOP_CLK_NULL 0
+#define CLK_TOP_I2S0_BCK 1
+#define CLK_TOP_DSI0_LNTC_DSICK 2
+#define CLK_TOP_VPLL_DPIX 3
+#define CLK_TOP_LVDSTX_CLKDIG_CTS 4
+#define CLK_TOP_MFGPLL 5
+#define CLK_TOP_SYSPLL_D2 6
+#define CLK_TOP_SYSPLL1_D2 7
+#define CLK_TOP_SYSPLL1_D4 8
+#define CLK_TOP_SYSPLL1_D8 9
+#define CLK_TOP_SYSPLL1_D16 10
+#define CLK_TOP_SYSPLL_D3 11
+#define CLK_TOP_SYSPLL2_D2 12
+#define CLK_TOP_SYSPLL2_D4 13
+#define CLK_TOP_SYSPLL2_D8 14
+#define CLK_TOP_SYSPLL_D5 15
+#define CLK_TOP_SYSPLL3_D2 16
+#define CLK_TOP_SYSPLL3_D4 17
+#define CLK_TOP_SYSPLL_D7 18
+#define CLK_TOP_SYSPLL4_D2 19
+#define CLK_TOP_SYSPLL4_D4 20
+#define CLK_TOP_UNIVPLL 21
+#define CLK_TOP_UNIVPLL_D2 22
+#define CLK_TOP_UNIVPLL1_D2 23
+#define CLK_TOP_UNIVPLL1_D4 24
+#define CLK_TOP_UNIVPLL_D3 25
+#define CLK_TOP_UNIVPLL2_D2 26
+#define CLK_TOP_UNIVPLL2_D4 27
+#define CLK_TOP_UNIVPLL2_D8 28
+#define CLK_TOP_UNIVPLL2_D32 29
+#define CLK_TOP_UNIVPLL_D5 30
+#define CLK_TOP_UNIVPLL3_D2 31
+#define CLK_TOP_UNIVPLL3_D4 32
+#define CLK_TOP_MMPLL 33
+#define CLK_TOP_MMPLL_D2 34
+#define CLK_TOP_LVDSPLL_D2 35
+#define CLK_TOP_LVDSPLL_D4 36
+#define CLK_TOP_LVDSPLL_D8 37
+#define CLK_TOP_LVDSPLL_D16 38
+#define CLK_TOP_USB20_192M 39
+#define CLK_TOP_USB20_192M_D4 40
+#define CLK_TOP_USB20_192M_D8 41
+#define CLK_TOP_USB20_192M_D16 42
+#define CLK_TOP_USB20_192M_D32 43
+#define CLK_TOP_APLL1 44
+#define CLK_TOP_APLL1_D2 45
+#define CLK_TOP_APLL1_D4 46
+#define CLK_TOP_APLL1_D8 47
+#define CLK_TOP_APLL2 48
+#define CLK_TOP_APLL2_D2 49
+#define CLK_TOP_APLL2_D4 50
+#define CLK_TOP_APLL2_D8 51
+#define CLK_TOP_SYS_26M_D2 52
+#define CLK_TOP_MSDCPLL 53
+#define CLK_TOP_MSDCPLL_D2 54
+#define CLK_TOP_DSPPLL 55
+#define CLK_TOP_DSPPLL_D2 56
+#define CLK_TOP_DSPPLL_D4 57
+#define CLK_TOP_DSPPLL_D8 58
+#define CLK_TOP_APUPLL 59
+#define CLK_TOP_CLK26M_D52 60
+#define CLK_TOP_AXI_SEL 61
+#define CLK_TOP_MEM_SEL 62
+#define CLK_TOP_MM_SEL 63
+#define CLK_TOP_SCP_SEL 64
+#define CLK_TOP_MFG_SEL 65
+#define CLK_TOP_ATB_SEL 66
+#define CLK_TOP_CAMTG_SEL 67
+#define CLK_TOP_CAMTG1_SEL 68
+#define CLK_TOP_UART_SEL 69
+#define CLK_TOP_SPI_SEL 70
+#define CLK_TOP_MSDC50_0_HC_SEL 71
+#define CLK_TOP_MSDC2_2_HC_SEL 72
+#define CLK_TOP_MSDC50_0_SEL 73
+#define CLK_TOP_MSDC50_2_SEL 74
+#define CLK_TOP_MSDC30_1_SEL 75
+#define CLK_TOP_AUDIO_SEL 76
+#define CLK_TOP_AUD_INTBUS_SEL 77
+#define CLK_TOP_AUD_1_SEL 78
+#define CLK_TOP_AUD_2_SEL 79
+#define CLK_TOP_AUD_ENGEN1_SEL 80
+#define CLK_TOP_AUD_ENGEN2_SEL 81
+#define CLK_TOP_AUD_SPDIF_SEL 82
+#define CLK_TOP_DISP_PWM_SEL 83
+#define CLK_TOP_DXCC_SEL 84
+#define CLK_TOP_SSUSB_SYS_SEL 85
+#define CLK_TOP_SSUSB_XHCI_SEL 86
+#define CLK_TOP_SPM_SEL 87
+#define CLK_TOP_I2C_SEL 88
+#define CLK_TOP_PWM_SEL 89
+#define CLK_TOP_SENIF_SEL 90
+#define CLK_TOP_AES_FDE_SEL 91
+#define CLK_TOP_CAMTM_SEL 92
+#define CLK_TOP_DPI0_SEL 93
+#define CLK_TOP_DPI1_SEL 94
+#define CLK_TOP_DSP_SEL 95
+#define CLK_TOP_NFI2X_SEL 96
+#define CLK_TOP_NFIECC_SEL 97
+#define CLK_TOP_ECC_SEL 98
+#define CLK_TOP_ETH_SEL 99
+#define CLK_TOP_GCPU_SEL 100
+#define CLK_TOP_GCPU_CPM_SEL 101
+#define CLK_TOP_APU_SEL 102
+#define CLK_TOP_APU_IF_SEL 103
+#define CLK_TOP_MBIST_DIAG_SEL 104
+#define CLK_TOP_APLL_I2S0_SEL 105
+#define CLK_TOP_APLL_I2S1_SEL 106
+#define CLK_TOP_APLL_I2S2_SEL 107
+#define CLK_TOP_APLL_I2S3_SEL 108
+#define CLK_TOP_APLL_TDMOUT_SEL 109
+#define CLK_TOP_APLL_TDMIN_SEL 110
+#define CLK_TOP_APLL_SPDIF_SEL 111
+#define CLK_TOP_APLL12_CK_DIV0 112
+#define CLK_TOP_APLL12_CK_DIV1 113
+#define CLK_TOP_APLL12_CK_DIV2 114
+#define CLK_TOP_APLL12_CK_DIV3 115
+#define CLK_TOP_APLL12_CK_DIV4 116
+#define CLK_TOP_APLL12_CK_DIV4B 117
+#define CLK_TOP_APLL12_CK_DIV5 118
+#define CLK_TOP_APLL12_CK_DIV5B 119
+#define CLK_TOP_APLL12_CK_DIV6 120
+#define CLK_TOP_AUD_I2S0_M 121
+#define CLK_TOP_AUD_I2S1_M 122
+#define CLK_TOP_AUD_I2S2_M 123
+#define CLK_TOP_AUD_I2S3_M 124
+#define CLK_TOP_AUD_TDMOUT_M 125
+#define CLK_TOP_AUD_TDMOUT_B 126
+#define CLK_TOP_AUD_TDMIN_M 127
+#define CLK_TOP_AUD_TDMIN_B 128
+#define CLK_TOP_AUD_SPDIF_M 129
+#define CLK_TOP_USB20_48M_EN 130
+#define CLK_TOP_UNIVPLL_48M_EN 131
+#define CLK_TOP_LVDSTX_CLKDIG_EN 132
+#define CLK_TOP_VPLL_DPIX_EN 133
+#define CLK_TOP_SSUSB_TOP_CK_EN 134
+#define CLK_TOP_SSUSB_PHY_CK_EN 135
+#define CLK_TOP_CONN_32K 136
+#define CLK_TOP_CONN_26M 137
+#define CLK_TOP_DSP_32K 138
+#define CLK_TOP_DSP_26M 139
+#define CLK_TOP_NR_CLK 140
+
+/* INFRACFG */
+#define CLK_IFR_PMIC_TMR 0
+#define CLK_IFR_PMIC_AP 1
+#define CLK_IFR_PMIC_MD 2
+#define CLK_IFR_PMIC_CONN 3
+#define CLK_IFR_ICUSB 4
+#define CLK_IFR_GCE 5
+#define CLK_IFR_THERM 6
+#define CLK_IFR_PWM_HCLK 7
+#define CLK_IFR_PWM1 8
+#define CLK_IFR_PWM2 9
+#define CLK_IFR_PWM3 10
+#define CLK_IFR_PWM4 11
+#define CLK_IFR_PWM5 12
+#define CLK_IFR_PWM 13
+#define CLK_IFR_UART0 14
+#define CLK_IFR_UART1 15
+#define CLK_IFR_UART2 16
+#define CLK_IFR_DSP_UART 17
+#define CLK_IFR_GCE_26M 18
+#define CLK_IFR_CQ_DMA_FPC 19
+#define CLK_IFR_BTIF 20
+#define CLK_IFR_SPI0 21
+#define CLK_IFR_MSDC0_HCLK 22
+#define CLK_IFR_MSDC2_HCLK 23
+#define CLK_IFR_MSDC1_HCLK 24
+#define CLK_IFR_DVFSRC 25
+#define CLK_IFR_GCPU 26
+#define CLK_IFR_TRNG 27
+#define CLK_IFR_AUXADC 28
+#define CLK_IFR_CPUM 29
+#define CLK_IFR_AUXADC_MD 30
+#define CLK_IFR_AP_DMA 31
+#define CLK_IFR_DEBUGSYS 32
+#define CLK_IFR_AUDIO 33
+#define CLK_IFR_PWM_FBCLK6 34
+#define CLK_IFR_DISP_PWM 35
+#define CLK_IFR_AUD_26M_BK 36
+#define CLK_IFR_CQ_DMA 37
+#define CLK_IFR_MSDC0_SF 38
+#define CLK_IFR_MSDC1_SF 39
+#define CLK_IFR_MSDC2_SF 40
+#define CLK_IFR_AP_MSDC0 41
+#define CLK_IFR_MD_MSDC0 42
+#define CLK_IFR_MSDC0_SRC 43
+#define CLK_IFR_MSDC1_SRC 44
+#define CLK_IFR_MSDC2_SRC 45
+#define CLK_IFR_PWRAP_TMR 46
+#define CLK_IFR_PWRAP_SPI 47
+#define CLK_IFR_PWRAP_SYS 48
+#define CLK_IFR_MCU_PM_BK 49
+#define CLK_IFR_IRRX_26M 50
+#define CLK_IFR_IRRX_32K 51
+#define CLK_IFR_I2C0_AXI 52
+#define CLK_IFR_I2C1_AXI 53
+#define CLK_IFR_I2C2_AXI 54
+#define CLK_IFR_I2C3_AXI 55
+#define CLK_IFR_NIC_AXI 56
+#define CLK_IFR_NIC_SLV_AXI 57
+#define CLK_IFR_APU_AXI 58
+#define CLK_IFR_NFIECC 59
+#define CLK_IFR_NFIECC_BK 60
+#define CLK_IFR_NFI1X_BK 61
+#define CLK_IFR_NFI_BK 62
+#define CLK_IFR_MSDC2_AP_BK 63
+#define CLK_IFR_MSDC2_MD_BK 64
+#define CLK_IFR_MSDC2_BK 65
+#define CLK_IFR_SUSB_133_BK 66
+#define CLK_IFR_SUSB_66_BK 67
+#define CLK_IFR_SSUSB_SYS 68
+#define CLK_IFR_SSUSB_REF 69
+#define CLK_IFR_SSUSB_XHCI 70
+#define CLK_IFR_NR_CLK 71
+
+/* PERICFG */
+#define CLK_PERIAXI 0
+#define CLK_PERI_NR_CLK 1
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MFGPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_MMPLL 5
+#define CLK_APMIXED_APLL1 6
+#define CLK_APMIXED_APLL2 7
+#define CLK_APMIXED_LVDSPLL 8
+#define CLK_APMIXED_DSPPLL 9
+#define CLK_APMIXED_APUPLL 10
+#define CLK_APMIXED_UNIV_EN 11
+#define CLK_APMIXED_USB20_EN 12
+#define CLK_APMIXED_NR_CLK 13
+
+/* GCE */
+#define CLK_GCE_FAXI 0
+#define CLK_GCE_NR_CLK 1
+
+/* AUDIOTOP */
+#define CLK_AUD_AFE 0
+#define CLK_AUD_I2S 1
+#define CLK_AUD_22M 2
+#define CLK_AUD_24M 3
+#define CLK_AUD_INTDIR 4
+#define CLK_AUD_APLL2_TUNER 5
+#define CLK_AUD_APLL_TUNER 6
+#define CLK_AUD_SPDF 7
+#define CLK_AUD_HDMI 8
+#define CLK_AUD_HDMI_IN 9
+#define CLK_AUD_ADC 10
+#define CLK_AUD_DAC 11
+#define CLK_AUD_DAC_PREDIS 12
+#define CLK_AUD_TML 13
+#define CLK_AUD_I2S1_BK 14
+#define CLK_AUD_I2S2_BK 15
+#define CLK_AUD_I2S3_BK 16
+#define CLK_AUD_I2S4_BK 17
+#define CLK_AUD_NR_CLK 18
+
+/* MIPI_CSI0A */
+#define CLK_MIPI0A_CSR_CSI_EN_0A 0
+#define CLK_MIPI_RX_ANA_CSI0A_NR_CLK 1
+
+/* MIPI_CSI0B */
+#define CLK_MIPI0B_CSR_CSI_EN_0B 0
+#define CLK_MIPI_RX_ANA_CSI0B_NR_CLK 1
+
+/* MIPI_CSI1A */
+#define CLK_MIPI1A_CSR_CSI_EN_1A 0
+#define CLK_MIPI_RX_ANA_CSI1A_NR_CLK 1
+
+/* MIPI_CSI1B */
+#define CLK_MIPI1B_CSR_CSI_EN_1B 0
+#define CLK_MIPI_RX_ANA_CSI1B_NR_CLK 1
+
+/* MIPI_CSI2A */
+#define CLK_MIPI2A_CSR_CSI_EN_2A 0
+#define CLK_MIPI_RX_ANA_CSI2A_NR_CLK 1
+
+/* MIPI_CSI2B */
+#define CLK_MIPI2B_CSR_CSI_EN_2B 0
+#define CLK_MIPI_RX_ANA_CSI2B_NR_CLK 1
+
+/* MCUCFG */
+#define CLK_MCU_BUS_SEL 0
+#define CLK_MCU_NR_CLK 1
+
+/* MFGCFG */
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_MBIST_DIAG 1
+#define CLK_MFG_NR_CLK 2
+
+/* MMSYS */
+#define CLK_MM_MM_MDP_RDMA0 0
+#define CLK_MM_MM_MDP_CCORR0 1
+#define CLK_MM_MM_MDP_RSZ0 2
+#define CLK_MM_MM_MDP_RSZ1 3
+#define CLK_MM_MM_MDP_TDSHP0 4
+#define CLK_MM_MM_MDP_WROT0 5
+#define CLK_MM_MM_MDP_WDMA0 6
+#define CLK_MM_MM_DISP_OVL0 7
+#define CLK_MM_MM_DISP_OVL0_2L 8
+#define CLK_MM_MM_DISP_RSZ0 9
+#define CLK_MM_MM_DISP_RDMA0 10
+#define CLK_MM_MM_DISP_WDMA0 11
+#define CLK_MM_MM_DISP_COLOR0 12
+#define CLK_MM_MM_DISP_CCORR0 13
+#define CLK_MM_MM_DISP_AAL0 14
+#define CLK_MM_MM_DISP_GAMMA0 15
+#define CLK_MM_MM_DISP_DITHER0 16
+#define CLK_MM_MM_DSI0 17
+#define CLK_MM_MM_DISP_RDMA1 18
+#define CLK_MM_MM_MDP_RDMA1 19
+#define CLK_MM_DPI0_DPI0 20
+#define CLK_MM_MM_FAKE 21
+#define CLK_MM_MM_SMI_COMMON 22
+#define CLK_MM_MM_SMI_LARB0 23
+#define CLK_MM_MM_SMI_COMM0 24
+#define CLK_MM_MM_SMI_COMM1 25
+#define CLK_MM_MM_CAM_MDP 26
+#define CLK_MM_MM_SMI_IMG 27
+#define CLK_MM_MM_SMI_CAM 28
+#define CLK_MM_IMG_IMG_DL_RELAY 29
+#define CLK_MM_IMG_IMG_DL_ASYNC_TOP 30
+#define CLK_MM_DSI0_DIG_DSI 31
+#define CLK_MM_26M_HRTWT 32
+#define CLK_MM_MM_DPI0 33
+#define CLK_MM_LVDSTX_PXL 34
+#define CLK_MM_LVDSTX_CTS 35
+#define CLK_MM_NR_CLK 36
+
+/* IMGSYS */
+#define CLK_CAM_LARB2 0
+#define CLK_CAM 1
+#define CLK_CAMTG 2
+#define CLK_CAM_SENIF 3
+#define CLK_CAMSV0 4
+#define CLK_CAMSV1 5
+#define CLK_CAM_FDVT 6
+#define CLK_CAM_WPE 7
+#define CLK_CAM_NR_CLK 8
+
+/* VDECSYS */
+#define CLK_VDEC_VDEC 0
+#define CLK_VDEC_LARB1 1
+#define CLK_VDEC_NR_CLK 2
+
+/* VENCSYS */
+#define CLK_VENC 0
+#define CLK_VENC_JPGENC 1
+#define CLK_VENC_NR_CLK 2
+
+/* APUSYS */
+#define CLK_APU_IPU_CK 0
+#define CLK_APU_AXI 1
+#define CLK_APU_JTAG 2
+#define CLK_APU_IF_CK 3
+#define CLK_APU_EDMA 4
+#define CLK_APU_AHB 5
+#define CLK_APU_NR_CLK 6
+
+#endif /* _DT_BINDINGS_CLK_MT8365_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 4c5965ae1df4..385bf243c56c 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -6,8 +6,6 @@
#ifndef __MESON8B_CLKC_H
#define __MESON8B_CLKC_H
-#define CLKID_UNUSED 0
-#define CLKID_XTAL 1
#define CLKID_PLL_FIXED 2
#define CLKID_PLL_VID 3
#define CLKID_PLL_SYS 4
@@ -102,19 +100,126 @@
#define CLKID_MPLL0 93
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
+#define CLKID_MPLL0_DIV 96
+#define CLKID_MPLL1_DIV 97
+#define CLKID_MPLL2_DIV 98
+#define CLKID_CPU_IN_SEL 99
+#define CLKID_CPU_IN_DIV2 100
+#define CLKID_CPU_IN_DIV3 101
+#define CLKID_CPU_SCALE_DIV 102
+#define CLKID_CPU_SCALE_OUT_SEL 103
+#define CLKID_MPLL_PREDIV 104
+#define CLKID_FCLK_DIV2_DIV 105
+#define CLKID_FCLK_DIV3_DIV 106
+#define CLKID_FCLK_DIV4_DIV 107
+#define CLKID_FCLK_DIV5_DIV 108
+#define CLKID_FCLK_DIV7_DIV 109
+#define CLKID_NAND_SEL 110
+#define CLKID_NAND_DIV 111
#define CLKID_NAND_CLK 112
+#define CLKID_PLL_FIXED_DCO 113
+#define CLKID_HDMI_PLL_DCO 114
+#define CLKID_PLL_SYS_DCO 115
+#define CLKID_CPU_CLK_DIV2 116
+#define CLKID_CPU_CLK_DIV3 117
+#define CLKID_CPU_CLK_DIV4 118
+#define CLKID_CPU_CLK_DIV5 119
+#define CLKID_CPU_CLK_DIV6 120
+#define CLKID_CPU_CLK_DIV7 121
+#define CLKID_CPU_CLK_DIV8 122
+#define CLKID_APB_SEL 123
#define CLKID_APB 124
+#define CLKID_PERIPH_SEL 125
#define CLKID_PERIPH 126
+#define CLKID_AXI_SEL 127
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
+#define CLKID_L2_DRAM_SEL 129
+#define CLKID_HDMI_PLL_LVDS_OUT 131
+#define CLKID_HDMI_PLL_HDMI_OUT 132
+#define CLKID_VID_PLL_IN_SEL 133
+#define CLKID_VID_PLL_IN_EN 134
+#define CLKID_VID_PLL_PRE_DIV 135
+#define CLKID_VID_PLL_POST_DIV 136
+#define CLKID_VID_PLL_FINAL_DIV 137
+#define CLKID_VCLK_IN_SEL 138
+#define CLKID_VCLK_IN_EN 139
+#define CLKID_VCLK_DIV1 140
+#define CLKID_VCLK_DIV2_DIV 141
+#define CLKID_VCLK_DIV2 142
+#define CLKID_VCLK_DIV4_DIV 143
+#define CLKID_VCLK_DIV4 144
+#define CLKID_VCLK_DIV6_DIV 145
+#define CLKID_VCLK_DIV6 146
+#define CLKID_VCLK_DIV12_DIV 147
+#define CLKID_VCLK_DIV12 148
+#define CLKID_VCLK2_IN_SEL 149
+#define CLKID_VCLK2_IN_EN 150
+#define CLKID_VCLK2_DIV1 151
+#define CLKID_VCLK2_DIV2_DIV 152
+#define CLKID_VCLK2_DIV2 153
+#define CLKID_VCLK2_DIV4_DIV 154
+#define CLKID_VCLK2_DIV4 155
+#define CLKID_VCLK2_DIV6_DIV 156
+#define CLKID_VCLK2_DIV6 157
+#define CLKID_VCLK2_DIV12_DIV 158
+#define CLKID_VCLK2_DIV12 159
+#define CLKID_CTS_ENCT_SEL 160
+#define CLKID_CTS_ENCT 161
+#define CLKID_CTS_ENCP_SEL 162
+#define CLKID_CTS_ENCP 163
+#define CLKID_CTS_ENCI_SEL 164
+#define CLKID_CTS_ENCI 165
+#define CLKID_HDMI_TX_PIXEL_SEL 166
+#define CLKID_HDMI_TX_PIXEL 167
+#define CLKID_CTS_ENCL_SEL 168
+#define CLKID_CTS_ENCL 169
+#define CLKID_CTS_VDAC0_SEL 170
+#define CLKID_CTS_VDAC0 171
+#define CLKID_HDMI_SYS_SEL 172
+#define CLKID_HDMI_SYS_DIV 173
#define CLKID_HDMI_SYS 174
+#define CLKID_MALI_0_SEL 175
+#define CLKID_MALI_0_DIV 176
+#define CLKID_MALI_0 177
+#define CLKID_MALI_1_SEL 178
+#define CLKID_MALI_1_DIV 179
+#define CLKID_MALI_1 180
+#define CLKID_GP_PLL_DCO 181
+#define CLKID_GP_PLL 182
+#define CLKID_VPU_0_SEL 183
+#define CLKID_VPU_0_DIV 184
+#define CLKID_VPU_0 185
+#define CLKID_VPU_1_SEL 186
+#define CLKID_VPU_1_DIV 187
+#define CLKID_VPU_1 189
#define CLKID_VPU 190
+#define CLKID_VDEC_1_SEL 191
+#define CLKID_VDEC_1_1_DIV 192
+#define CLKID_VDEC_1_1 193
+#define CLKID_VDEC_1_2_DIV 194
+#define CLKID_VDEC_1_2 195
#define CLKID_VDEC_1 196
+#define CLKID_VDEC_HCODEC_SEL 197
+#define CLKID_VDEC_HCODEC_DIV 198
#define CLKID_VDEC_HCODEC 199
+#define CLKID_VDEC_2_SEL 200
+#define CLKID_VDEC_2_DIV 201
#define CLKID_VDEC_2 202
+#define CLKID_VDEC_HEVC_SEL 203
+#define CLKID_VDEC_HEVC_DIV 204
+#define CLKID_VDEC_HEVC_EN 205
#define CLKID_VDEC_HEVC 206
+#define CLKID_CTS_AMCLK_SEL 207
+#define CLKID_CTS_AMCLK_DIV 208
#define CLKID_CTS_AMCLK 209
+#define CLKID_CTS_MCLK_I958_SEL 210
+#define CLKID_CTS_MCLK_I958_DIV 211
#define CLKID_CTS_MCLK_I958 212
#define CLKID_CTS_I958 213
+#define CLKID_VCLK_EN 214
+#define CLKID_VCLK2_EN 215
+#define CLKID_VID_PLL_LVDS_EN 216
+#define CLKID_HDMI_PLL_DCO_IN 217
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/microchip,lan966x.h b/include/dt-bindings/clock/microchip,lan966x.h
new file mode 100644
index 000000000000..6f9d43d76d5a
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,lan966x.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Microchip Inc.
+ *
+ * Author: Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_LAN966X_H
+#define _DT_BINDINGS_CLK_LAN966X_H
+
+#define GCK_ID_QSPI0 0
+#define GCK_ID_QSPI1 1
+#define GCK_ID_QSPI2 2
+#define GCK_ID_SDMMC0 3
+#define GCK_ID_PI 4
+#define GCK_ID_MCAN0 5
+#define GCK_ID_MCAN1 6
+#define GCK_ID_FLEXCOM0 7
+#define GCK_ID_FLEXCOM1 8
+#define GCK_ID_FLEXCOM2 9
+#define GCK_ID_FLEXCOM3 10
+#define GCK_ID_FLEXCOM4 11
+#define GCK_ID_TIMER 12
+#define GCK_ID_USB_REFCLK 13
+
+/* Gate clocks */
+#define GCK_GATE_UHPHS 14
+#define GCK_GATE_UDPHS 15
+#define GCK_GATE_MCRAMC 16
+#define GCK_GATE_HMATRIX 17
+
+#define N_CLOCKS 18
+
+#endif
diff --git a/include/dt-bindings/clock/microchip,mpfs-clock.h b/include/dt-bindings/clock/microchip,mpfs-clock.h
new file mode 100644
index 000000000000..b52f19a2b480
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,mpfs-clock.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Daire McNamara,<daire.mcnamara@microchip.com>
+ * Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_
+#define _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_
+
+#define CLK_CPU 0
+#define CLK_AXI 1
+#define CLK_AHB 2
+
+#define CLK_ENVM 3
+#define CLK_MAC0 4
+#define CLK_MAC1 5
+#define CLK_MMC 6
+#define CLK_TIMER 7
+#define CLK_MMUART0 8
+#define CLK_MMUART1 9
+#define CLK_MMUART2 10
+#define CLK_MMUART3 11
+#define CLK_MMUART4 12
+#define CLK_SPI0 13
+#define CLK_SPI1 14
+#define CLK_I2C0 15
+#define CLK_I2C1 16
+#define CLK_CAN0 17
+#define CLK_CAN1 18
+#define CLK_USB 19
+#define CLK_RESERVED 20
+#define CLK_RTC 21
+#define CLK_QSPI 22
+#define CLK_GPIO0 23
+#define CLK_GPIO1 24
+#define CLK_GPIO2 25
+#define CLK_DDRC 26
+#define CLK_FIC0 27
+#define CLK_FIC1 28
+#define CLK_FIC2 29
+#define CLK_FIC3 30
+#define CLK_ATHENA 31
+#define CLK_CFM 32
+
+#define CLK_RTCREF 33
+#define CLK_MSSPLL 34
+#define CLK_MSSPLL0 34
+#define CLK_MSSPLL1 35
+#define CLK_MSSPLL2 36
+#define CLK_MSSPLL3 37
+/* 38 is reserved for MSS PLL internals */
+
+/* Clock Conditioning Circuitry Clock IDs */
+
+#define CLK_CCC_PLL0 0
+#define CLK_CCC_PLL1 1
+#define CLK_CCC_DLL0 2
+#define CLK_CCC_DLL1 3
+
+#define CLK_CCC_PLL0_OUT0 4
+#define CLK_CCC_PLL0_OUT1 5
+#define CLK_CCC_PLL0_OUT2 6
+#define CLK_CCC_PLL0_OUT3 7
+
+#define CLK_CCC_PLL1_OUT0 8
+#define CLK_CCC_PLL1_OUT1 9
+#define CLK_CCC_PLL1_OUT2 10
+#define CLK_CCC_PLL1_OUT3 11
+
+#define CLK_CCC_DLL0_OUT0 12
+#define CLK_CCC_DLL0_OUT1 13
+
+#define CLK_CCC_DLL1_OUT0 14
+#define CLK_CCC_DLL1_OUT1 15
+
+#endif /* _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ */
diff --git a/include/dt-bindings/clock/mobileye,eyeq5-clk.h b/include/dt-bindings/clock/mobileye,eyeq5-clk.h
new file mode 100644
index 000000000000..26d8930335e4
--- /dev/null
+++ b/include/dt-bindings/clock/mobileye,eyeq5-clk.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ5_CLK_H
+#define _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ5_CLK_H
+
+#define EQ5C_PLL_CPU 0
+#define EQ5C_PLL_VMP 1
+#define EQ5C_PLL_PMA 2
+#define EQ5C_PLL_VDI 3
+#define EQ5C_PLL_DDR0 4
+#define EQ5C_PLL_PCI 5
+#define EQ5C_PLL_PER 6
+#define EQ5C_PLL_PMAC 7
+#define EQ5C_PLL_MPC 8
+#define EQ5C_PLL_DDR1 9
+
+#define EQ5C_DIV_OSPI 10
+
+#endif
diff --git a/include/dt-bindings/clock/mstar-msc313-mpll.h b/include/dt-bindings/clock/mstar-msc313-mpll.h
new file mode 100644
index 000000000000..1b30b02317b6
--- /dev/null
+++ b/include/dt-bindings/clock/mstar-msc313-mpll.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Output definitions for the MStar/SigmaStar MPLL
+ *
+ * Copyright (C) 2020 Daniel Palmer <daniel@thingy.jp>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MSTAR_MSC313_MPLL_H
+#define _DT_BINDINGS_CLOCK_MSTAR_MSC313_MPLL_H
+
+#define MSTAR_MSC313_MPLL_DIV2 1
+#define MSTAR_MSC313_MPLL_DIV3 2
+#define MSTAR_MSC313_MPLL_DIV4 3
+#define MSTAR_MSC313_MPLL_DIV5 4
+#define MSTAR_MSC313_MPLL_DIV6 5
+#define MSTAR_MSC313_MPLL_DIV7 6
+#define MSTAR_MSC313_MPLL_DIV10 7
+
+#endif
diff --git a/include/dt-bindings/clock/mt7621-clk.h b/include/dt-bindings/clock/mt7621-clk.h
new file mode 100644
index 000000000000..1422badcf9de
--- /dev/null
+++ b/include/dt-bindings/clock/mt7621-clk.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Author: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7621_H
+#define _DT_BINDINGS_CLK_MT7621_H
+
+#define MT7621_CLK_XTAL 0
+#define MT7621_CLK_CPU 1
+#define MT7621_CLK_BUS 2
+#define MT7621_CLK_50M 3
+#define MT7621_CLK_125M 4
+#define MT7621_CLK_150M 5
+#define MT7621_CLK_250M 6
+#define MT7621_CLK_270M 7
+
+#define MT7621_CLK_HSDMA 8
+#define MT7621_CLK_FE 9
+#define MT7621_CLK_SP_DIVTX 10
+#define MT7621_CLK_TIMER 11
+#define MT7621_CLK_PCM 12
+#define MT7621_CLK_PIO 13
+#define MT7621_CLK_GDMA 14
+#define MT7621_CLK_NAND 15
+#define MT7621_CLK_I2C 16
+#define MT7621_CLK_I2S 17
+#define MT7621_CLK_SPI 18
+#define MT7621_CLK_UART1 19
+#define MT7621_CLK_UART2 20
+#define MT7621_CLK_UART3 21
+#define MT7621_CLK_ETH 22
+#define MT7621_CLK_PCIE0 23
+#define MT7621_CLK_PCIE1 24
+#define MT7621_CLK_PCIE2 25
+#define MT7621_CLK_CRYPTO 26
+#define MT7621_CLK_SHXC 27
+
+#define MT7621_CLK_MAX 28
+
+#endif /* _DT_BINDINGS_CLK_MT7621_H */
diff --git a/include/dt-bindings/clock/mt7986-clk.h b/include/dt-bindings/clock/mt7986-clk.h
new file mode 100644
index 000000000000..5a9b169324b0
--- /dev/null
+++ b/include/dt-bindings/clock/mt7986-clk.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7986_H
+#define _DT_BINDINGS_CLK_MT7986_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_NET2PLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_SGMPLL 3
+#define CLK_APMIXED_WEDMCUPLL 4
+#define CLK_APMIXED_NET1PLL 5
+#define CLK_APMIXED_MPLL 6
+#define CLK_APMIXED_APLL2 7
+
+/* TOPCKGEN */
+
+#define CLK_TOP_XTAL 0
+#define CLK_TOP_XTAL_D2 1
+#define CLK_TOP_RTC_32K 2
+#define CLK_TOP_RTC_32P7K 3
+#define CLK_TOP_MPLL_D2 4
+#define CLK_TOP_MPLL_D4 5
+#define CLK_TOP_MPLL_D8 6
+#define CLK_TOP_MPLL_D8_D2 7
+#define CLK_TOP_MPLL_D3_D2 8
+#define CLK_TOP_MMPLL_D2 9
+#define CLK_TOP_MMPLL_D4 10
+#define CLK_TOP_MMPLL_D8 11
+#define CLK_TOP_MMPLL_D8_D2 12
+#define CLK_TOP_MMPLL_D3_D8 13
+#define CLK_TOP_MMPLL_U2PHY 14
+#define CLK_TOP_APLL2_D4 15
+#define CLK_TOP_NET1PLL_D4 16
+#define CLK_TOP_NET1PLL_D5 17
+#define CLK_TOP_NET1PLL_D5_D2 18
+#define CLK_TOP_NET1PLL_D5_D4 19
+#define CLK_TOP_NET1PLL_D8_D2 20
+#define CLK_TOP_NET1PLL_D8_D4 21
+#define CLK_TOP_NET2PLL_D4 22
+#define CLK_TOP_NET2PLL_D4_D2 23
+#define CLK_TOP_NET2PLL_D3_D2 24
+#define CLK_TOP_WEDMCUPLL_D5_D2 25
+#define CLK_TOP_NFI1X_SEL 26
+#define CLK_TOP_SPINFI_SEL 27
+#define CLK_TOP_SPI_SEL 28
+#define CLK_TOP_SPIM_MST_SEL 29
+#define CLK_TOP_UART_SEL 30
+#define CLK_TOP_PWM_SEL 31
+#define CLK_TOP_I2C_SEL 32
+#define CLK_TOP_PEXTP_TL_SEL 33
+#define CLK_TOP_EMMC_250M_SEL 34
+#define CLK_TOP_EMMC_416M_SEL 35
+#define CLK_TOP_F_26M_ADC_SEL 36
+#define CLK_TOP_DRAMC_SEL 37
+#define CLK_TOP_DRAMC_MD32_SEL 38
+#define CLK_TOP_SYSAXI_SEL 39
+#define CLK_TOP_SYSAPB_SEL 40
+#define CLK_TOP_ARM_DB_MAIN_SEL 41
+#define CLK_TOP_ARM_DB_JTSEL 42
+#define CLK_TOP_NETSYS_SEL 43
+#define CLK_TOP_NETSYS_500M_SEL 44
+#define CLK_TOP_NETSYS_MCU_SEL 45
+#define CLK_TOP_NETSYS_2X_SEL 46
+#define CLK_TOP_SGM_325M_SEL 47
+#define CLK_TOP_SGM_REG_SEL 48
+#define CLK_TOP_A1SYS_SEL 49
+#define CLK_TOP_CONN_MCUSYS_SEL 50
+#define CLK_TOP_EIP_B_SEL 51
+#define CLK_TOP_PCIE_PHY_SEL 52
+#define CLK_TOP_USB3_PHY_SEL 53
+#define CLK_TOP_F26M_SEL 54
+#define CLK_TOP_AUD_L_SEL 55
+#define CLK_TOP_A_TUNER_SEL 56
+#define CLK_TOP_U2U3_SEL 57
+#define CLK_TOP_U2U3_SYS_SEL 58
+#define CLK_TOP_U2U3_XHCI_SEL 59
+#define CLK_TOP_DA_U2_REFSEL 60
+#define CLK_TOP_DA_U2_CK_1P_SEL 61
+#define CLK_TOP_AP2CNN_HOST_SEL 62
+#define CLK_TOP_JTAG 63
+
+/* INFRACFG */
+
+#define CLK_INFRA_SYSAXI_D2 0
+#define CLK_INFRA_UART0_SEL 1
+#define CLK_INFRA_UART1_SEL 2
+#define CLK_INFRA_UART2_SEL 3
+#define CLK_INFRA_SPI0_SEL 4
+#define CLK_INFRA_SPI1_SEL 5
+#define CLK_INFRA_PWM1_SEL 6
+#define CLK_INFRA_PWM2_SEL 7
+#define CLK_INFRA_PWM_BSEL 8
+#define CLK_INFRA_PCIE_SEL 9
+#define CLK_INFRA_GPT_STA 10
+#define CLK_INFRA_PWM_HCK 11
+#define CLK_INFRA_PWM_STA 12
+#define CLK_INFRA_PWM1_CK 13
+#define CLK_INFRA_PWM2_CK 14
+#define CLK_INFRA_CQ_DMA_CK 15
+#define CLK_INFRA_EIP97_CK 16
+#define CLK_INFRA_AUD_BUS_CK 17
+#define CLK_INFRA_AUD_26M_CK 18
+#define CLK_INFRA_AUD_L_CK 19
+#define CLK_INFRA_AUD_AUD_CK 20
+#define CLK_INFRA_AUD_EG2_CK 21
+#define CLK_INFRA_DRAMC_26M_CK 22
+#define CLK_INFRA_DBG_CK 23
+#define CLK_INFRA_AP_DMA_CK 24
+#define CLK_INFRA_SEJ_CK 25
+#define CLK_INFRA_SEJ_13M_CK 26
+#define CLK_INFRA_THERM_CK 27
+#define CLK_INFRA_I2C0_CK 28
+#define CLK_INFRA_UART0_CK 29
+#define CLK_INFRA_UART1_CK 30
+#define CLK_INFRA_UART2_CK 31
+#define CLK_INFRA_NFI1_CK 32
+#define CLK_INFRA_SPINFI1_CK 33
+#define CLK_INFRA_NFI_HCK_CK 34
+#define CLK_INFRA_SPI0_CK 35
+#define CLK_INFRA_SPI1_CK 36
+#define CLK_INFRA_SPI0_HCK_CK 37
+#define CLK_INFRA_SPI1_HCK_CK 38
+#define CLK_INFRA_FRTC_CK 39
+#define CLK_INFRA_MSDC_CK 40
+#define CLK_INFRA_MSDC_HCK_CK 41
+#define CLK_INFRA_MSDC_133M_CK 42
+#define CLK_INFRA_MSDC_66M_CK 43
+#define CLK_INFRA_ADC_26M_CK 44
+#define CLK_INFRA_ADC_FRC_CK 45
+#define CLK_INFRA_FBIST2FPC_CK 46
+#define CLK_INFRA_IUSB_133_CK 47
+#define CLK_INFRA_IUSB_66M_CK 48
+#define CLK_INFRA_IUSB_SYS_CK 49
+#define CLK_INFRA_IUSB_CK 50
+#define CLK_INFRA_IPCIE_CK 51
+#define CLK_INFRA_IPCIE_PIPE_CK 52
+#define CLK_INFRA_IPCIER_CK 53
+#define CLK_INFRA_IPCIEB_CK 54
+#define CLK_INFRA_TRNG_CK 55
+
+/* SGMIISYS_0 */
+
+#define CLK_SGMII0_TX250M_EN 0
+#define CLK_SGMII0_RX250M_EN 1
+#define CLK_SGMII0_CDR_REF 2
+#define CLK_SGMII0_CDR_FB 3
+
+/* SGMIISYS_1 */
+
+#define CLK_SGMII1_TX250M_EN 0
+#define CLK_SGMII1_RX250M_EN 1
+#define CLK_SGMII1_CDR_REF 2
+#define CLK_SGMII1_CDR_FB 3
+
+/* ETHSYS */
+
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_WOCPU1_EN 3
+#define CLK_ETH_WOCPU0_EN 4
+
+#endif /* _DT_BINDINGS_CLK_MT7986_H */
diff --git a/include/dt-bindings/clock/mt8167-clk.h b/include/dt-bindings/clock/mt8167-clk.h
new file mode 100644
index 000000000000..a96158edd817
--- /dev/null
+++ b/include/dt-bindings/clock/mt8167-clk.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8167_H
+#define _DT_BINDINGS_CLK_MT8167_H
+
+/* MT8167 is based on MT8516 */
+#include <dt-bindings/clock/mt8516-clk.h>
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_TVDPLL (CLK_APMIXED_NR_CLK + 0)
+#define CLK_APMIXED_LVDSPLL (CLK_APMIXED_NR_CLK + 1)
+#define CLK_APMIXED_HDMI_REF (CLK_APMIXED_NR_CLK + 2)
+#define MT8167_CLK_APMIXED_NR_CLK (CLK_APMIXED_NR_CLK + 3)
+
+/* TOPCKGEN */
+
+#define CLK_TOP_DSI0_LNTC_DSICK (CLK_TOP_NR_CLK + 0)
+#define CLK_TOP_VPLL_DPIX (CLK_TOP_NR_CLK + 1)
+#define CLK_TOP_LVDSTX_CLKDIG_CTS (CLK_TOP_NR_CLK + 2)
+#define CLK_TOP_HDMTX_CLKDIG_CTS (CLK_TOP_NR_CLK + 3)
+#define CLK_TOP_LVDSPLL (CLK_TOP_NR_CLK + 4)
+#define CLK_TOP_LVDSPLL_D2 (CLK_TOP_NR_CLK + 5)
+#define CLK_TOP_LVDSPLL_D4 (CLK_TOP_NR_CLK + 6)
+#define CLK_TOP_LVDSPLL_D8 (CLK_TOP_NR_CLK + 7)
+#define CLK_TOP_MIPI_26M (CLK_TOP_NR_CLK + 8)
+#define CLK_TOP_TVDPLL (CLK_TOP_NR_CLK + 9)
+#define CLK_TOP_TVDPLL_D2 (CLK_TOP_NR_CLK + 10)
+#define CLK_TOP_TVDPLL_D4 (CLK_TOP_NR_CLK + 11)
+#define CLK_TOP_TVDPLL_D8 (CLK_TOP_NR_CLK + 12)
+#define CLK_TOP_TVDPLL_D16 (CLK_TOP_NR_CLK + 13)
+#define CLK_TOP_PWM_MM (CLK_TOP_NR_CLK + 14)
+#define CLK_TOP_CAM_MM (CLK_TOP_NR_CLK + 15)
+#define CLK_TOP_MFG_MM (CLK_TOP_NR_CLK + 16)
+#define CLK_TOP_SPM_52M (CLK_TOP_NR_CLK + 17)
+#define CLK_TOP_MIPI_26M_DBG (CLK_TOP_NR_CLK + 18)
+#define CLK_TOP_SCAM_MM (CLK_TOP_NR_CLK + 19)
+#define CLK_TOP_SMI_MM (CLK_TOP_NR_CLK + 20)
+#define CLK_TOP_26M_HDMI_SIFM (CLK_TOP_NR_CLK + 21)
+#define CLK_TOP_26M_CEC (CLK_TOP_NR_CLK + 22)
+#define CLK_TOP_32K_CEC (CLK_TOP_NR_CLK + 23)
+#define CLK_TOP_GCPU_B (CLK_TOP_NR_CLK + 24)
+#define CLK_TOP_RG_VDEC (CLK_TOP_NR_CLK + 25)
+#define CLK_TOP_RG_FDPI0 (CLK_TOP_NR_CLK + 26)
+#define CLK_TOP_RG_FDPI1 (CLK_TOP_NR_CLK + 27)
+#define CLK_TOP_RG_AXI_MFG (CLK_TOP_NR_CLK + 28)
+#define CLK_TOP_RG_SLOW_MFG (CLK_TOP_NR_CLK + 29)
+#define CLK_TOP_GFMUX_EMI1X_SEL (CLK_TOP_NR_CLK + 30)
+#define CLK_TOP_CSW_MUX_MFG_SEL (CLK_TOP_NR_CLK + 31)
+#define CLK_TOP_CAMTG_MM_SEL (CLK_TOP_NR_CLK + 32)
+#define CLK_TOP_PWM_MM_SEL (CLK_TOP_NR_CLK + 33)
+#define CLK_TOP_SPM_52M_SEL (CLK_TOP_NR_CLK + 34)
+#define CLK_TOP_MFG_MM_SEL (CLK_TOP_NR_CLK + 35)
+#define CLK_TOP_SMI_MM_SEL (CLK_TOP_NR_CLK + 36)
+#define CLK_TOP_SCAM_MM_SEL (CLK_TOP_NR_CLK + 37)
+#define CLK_TOP_VDEC_MM_SEL (CLK_TOP_NR_CLK + 38)
+#define CLK_TOP_DPI0_MM_SEL (CLK_TOP_NR_CLK + 39)
+#define CLK_TOP_DPI1_MM_SEL (CLK_TOP_NR_CLK + 40)
+#define CLK_TOP_AXI_MFG_IN_SEL (CLK_TOP_NR_CLK + 41)
+#define CLK_TOP_SLOW_MFG_SEL (CLK_TOP_NR_CLK + 42)
+#define MT8167_CLK_TOP_NR_CLK (CLK_TOP_NR_CLK + 43)
+
+/* MFGCFG */
+
+#define CLK_MFG_BAXI 0
+#define CLK_MFG_BMEM 1
+#define CLK_MFG_BG3D 2
+#define CLK_MFG_B26M 3
+#define CLK_MFG_NR_CLK 4
+
+/* MMSYS */
+
+#define CLK_MM_SMI_COMMON 0
+#define CLK_MM_SMI_LARB0 1
+#define CLK_MM_CAM_MDP 2
+#define CLK_MM_MDP_RDMA 3
+#define CLK_MM_MDP_RSZ0 4
+#define CLK_MM_MDP_RSZ1 5
+#define CLK_MM_MDP_TDSHP 6
+#define CLK_MM_MDP_WDMA 7
+#define CLK_MM_MDP_WROT 8
+#define CLK_MM_FAKE_ENG 9
+#define CLK_MM_DISP_OVL0 10
+#define CLK_MM_DISP_RDMA0 11
+#define CLK_MM_DISP_RDMA1 12
+#define CLK_MM_DISP_WDMA 13
+#define CLK_MM_DISP_COLOR 14
+#define CLK_MM_DISP_CCORR 15
+#define CLK_MM_DISP_AAL 16
+#define CLK_MM_DISP_GAMMA 17
+#define CLK_MM_DISP_DITHER 18
+#define CLK_MM_DISP_UFOE 19
+#define CLK_MM_DISP_PWM_MM 20
+#define CLK_MM_DISP_PWM_26M 21
+#define CLK_MM_DSI_ENGINE 22
+#define CLK_MM_DSI_DIGITAL 23
+#define CLK_MM_DPI0_ENGINE 24
+#define CLK_MM_DPI0_PXL 25
+#define CLK_MM_LVDS_PXL 26
+#define CLK_MM_LVDS_CTS 27
+#define CLK_MM_DPI1_ENGINE 28
+#define CLK_MM_DPI1_PXL 29
+#define CLK_MM_HDMI_PXL 30
+#define CLK_MM_HDMI_SPDIF 31
+#define CLK_MM_HDMI_ADSP_BCK 32
+#define CLK_MM_HDMI_PLL 33
+#define CLK_MM_NR_CLK 34
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB1_SMI 0
+#define CLK_IMG_CAM_SMI 1
+#define CLK_IMG_CAM_CAM 2
+#define CLK_IMG_SEN_TG 3
+#define CLK_IMG_SEN_CAM 4
+#define CLK_IMG_VENC 5
+#define CLK_IMG_NR_CLK 6
+
+/* VDECSYS */
+
+#define CLK_VDEC_CKEN 0
+#define CLK_VDEC_LARB1_CKEN 1
+#define CLK_VDEC_NR_CLK 2
+
+#endif /* _DT_BINDINGS_CLK_MT8167_H */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
index 3acebe937bfc..3d00c98b9654 100644
--- a/include/dt-bindings/clock/mt8173-clk.h
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -186,7 +186,6 @@
#define CLK_INFRA_PMICWRAP 11
#define CLK_INFRA_CLK_13M 12
#define CLK_INFRA_CA53SEL 13
-#define CLK_INFRA_CA57SEL 14 /* Deprecated. Don't use it. */
#define CLK_INFRA_CA72SEL 14
#define CLK_INFRA_NR_CLK 15
diff --git a/include/dt-bindings/clock/mt8186-clk.h b/include/dt-bindings/clock/mt8186-clk.h
new file mode 100644
index 000000000000..a70bf67af47d
--- /dev/null
+++ b/include/dt-bindings/clock/mt8186-clk.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8186_H
+#define _DT_BINDINGS_CLK_MT8186_H
+
+/* MCUSYS */
+
+#define CLK_MCU_ARMPLL_LL_SEL 0
+#define CLK_MCU_ARMPLL_BL_SEL 1
+#define CLK_MCU_ARMPLL_BUS_SEL 2
+#define CLK_MCU_NR_CLK 3
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SCP 1
+#define CLK_TOP_MFG 2
+#define CLK_TOP_CAMTG 3
+#define CLK_TOP_CAMTG1 4
+#define CLK_TOP_CAMTG2 5
+#define CLK_TOP_CAMTG3 6
+#define CLK_TOP_CAMTG4 7
+#define CLK_TOP_CAMTG5 8
+#define CLK_TOP_CAMTG6 9
+#define CLK_TOP_UART 10
+#define CLK_TOP_SPI 11
+#define CLK_TOP_MSDC50_0_HCLK 12
+#define CLK_TOP_MSDC50_0 13
+#define CLK_TOP_MSDC30_1 14
+#define CLK_TOP_AUDIO 15
+#define CLK_TOP_AUD_INTBUS 16
+#define CLK_TOP_AUD_1 17
+#define CLK_TOP_AUD_2 18
+#define CLK_TOP_AUD_ENGEN1 19
+#define CLK_TOP_AUD_ENGEN2 20
+#define CLK_TOP_DISP_PWM 21
+#define CLK_TOP_SSPM 22
+#define CLK_TOP_DXCC 23
+#define CLK_TOP_USB_TOP 24
+#define CLK_TOP_SRCK 25
+#define CLK_TOP_SPM 26
+#define CLK_TOP_I2C 27
+#define CLK_TOP_PWM 28
+#define CLK_TOP_SENINF 29
+#define CLK_TOP_SENINF1 30
+#define CLK_TOP_SENINF2 31
+#define CLK_TOP_SENINF3 32
+#define CLK_TOP_AES_MSDCFDE 33
+#define CLK_TOP_PWRAP_ULPOSC 34
+#define CLK_TOP_CAMTM 35
+#define CLK_TOP_VENC 36
+#define CLK_TOP_CAM 37
+#define CLK_TOP_IMG1 38
+#define CLK_TOP_IPE 39
+#define CLK_TOP_DPMAIF 40
+#define CLK_TOP_VDEC 41
+#define CLK_TOP_DISP 42
+#define CLK_TOP_MDP 43
+#define CLK_TOP_AUDIO_H 44
+#define CLK_TOP_UFS 45
+#define CLK_TOP_AES_FDE 46
+#define CLK_TOP_AUDIODSP 47
+#define CLK_TOP_DVFSRC 48
+#define CLK_TOP_DSI_OCC 49
+#define CLK_TOP_SPMI_MST 50
+#define CLK_TOP_SPINOR 51
+#define CLK_TOP_NNA 52
+#define CLK_TOP_NNA1 53
+#define CLK_TOP_NNA2 54
+#define CLK_TOP_SSUSB_XHCI 55
+#define CLK_TOP_SSUSB_TOP_1P 56
+#define CLK_TOP_SSUSB_XHCI_1P 57
+#define CLK_TOP_WPE 58
+#define CLK_TOP_DPI 59
+#define CLK_TOP_U3_OCC_250M 60
+#define CLK_TOP_U3_OCC_500M 61
+#define CLK_TOP_ADSP_BUS 62
+#define CLK_TOP_APLL_I2S0_MCK_SEL 63
+#define CLK_TOP_APLL_I2S1_MCK_SEL 64
+#define CLK_TOP_APLL_I2S2_MCK_SEL 65
+#define CLK_TOP_APLL_I2S4_MCK_SEL 66
+#define CLK_TOP_APLL_TDMOUT_MCK_SEL 67
+#define CLK_TOP_MAINPLL_D2 68
+#define CLK_TOP_MAINPLL_D2_D2 69
+#define CLK_TOP_MAINPLL_D2_D4 70
+#define CLK_TOP_MAINPLL_D2_D16 71
+#define CLK_TOP_MAINPLL_D3 72
+#define CLK_TOP_MAINPLL_D3_D2 73
+#define CLK_TOP_MAINPLL_D3_D4 74
+#define CLK_TOP_MAINPLL_D5 75
+#define CLK_TOP_MAINPLL_D5_D2 76
+#define CLK_TOP_MAINPLL_D5_D4 77
+#define CLK_TOP_MAINPLL_D7 78
+#define CLK_TOP_MAINPLL_D7_D2 79
+#define CLK_TOP_MAINPLL_D7_D4 80
+#define CLK_TOP_UNIVPLL 81
+#define CLK_TOP_UNIVPLL_D2 82
+#define CLK_TOP_UNIVPLL_D2_D2 83
+#define CLK_TOP_UNIVPLL_D2_D4 84
+#define CLK_TOP_UNIVPLL_D3 85
+#define CLK_TOP_UNIVPLL_D3_D2 86
+#define CLK_TOP_UNIVPLL_D3_D4 87
+#define CLK_TOP_UNIVPLL_D3_D8 88
+#define CLK_TOP_UNIVPLL_D3_D32 89
+#define CLK_TOP_UNIVPLL_D5 90
+#define CLK_TOP_UNIVPLL_D5_D2 91
+#define CLK_TOP_UNIVPLL_D5_D4 92
+#define CLK_TOP_UNIVPLL_D7 93
+#define CLK_TOP_UNIVPLL_192M 94
+#define CLK_TOP_UNIVPLL_192M_D4 95
+#define CLK_TOP_UNIVPLL_192M_D8 96
+#define CLK_TOP_UNIVPLL_192M_D16 97
+#define CLK_TOP_UNIVPLL_192M_D32 98
+#define CLK_TOP_APLL1_D2 99
+#define CLK_TOP_APLL1_D4 100
+#define CLK_TOP_APLL1_D8 101
+#define CLK_TOP_APLL2_D2 102
+#define CLK_TOP_APLL2_D4 103
+#define CLK_TOP_APLL2_D8 104
+#define CLK_TOP_MMPLL_D2 105
+#define CLK_TOP_TVDPLL_D2 106
+#define CLK_TOP_TVDPLL_D4 107
+#define CLK_TOP_TVDPLL_D8 108
+#define CLK_TOP_TVDPLL_D16 109
+#define CLK_TOP_TVDPLL_D32 110
+#define CLK_TOP_MSDCPLL_D2 111
+#define CLK_TOP_ULPOSC1 112
+#define CLK_TOP_ULPOSC1_D2 113
+#define CLK_TOP_ULPOSC1_D4 114
+#define CLK_TOP_ULPOSC1_D8 115
+#define CLK_TOP_ULPOSC1_D10 116
+#define CLK_TOP_ULPOSC1_D16 117
+#define CLK_TOP_ULPOSC1_D32 118
+#define CLK_TOP_ADSPPLL_D2 119
+#define CLK_TOP_ADSPPLL_D4 120
+#define CLK_TOP_ADSPPLL_D8 121
+#define CLK_TOP_NNAPLL_D2 122
+#define CLK_TOP_NNAPLL_D4 123
+#define CLK_TOP_NNAPLL_D8 124
+#define CLK_TOP_NNA2PLL_D2 125
+#define CLK_TOP_NNA2PLL_D4 126
+#define CLK_TOP_NNA2PLL_D8 127
+#define CLK_TOP_F_BIST2FPC 128
+#define CLK_TOP_466M_FMEM 129
+#define CLK_TOP_MPLL 130
+#define CLK_TOP_APLL12_CK_DIV0 131
+#define CLK_TOP_APLL12_CK_DIV1 132
+#define CLK_TOP_APLL12_CK_DIV2 133
+#define CLK_TOP_APLL12_CK_DIV4 134
+#define CLK_TOP_APLL12_CK_DIV_TDMOUT_M 135
+#define CLK_TOP_NR_CLK 136
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SCP_CORE 4
+#define CLK_INFRA_AO_SEJ 5
+#define CLK_INFRA_AO_APXGPT 6
+#define CLK_INFRA_AO_ICUSB 7
+#define CLK_INFRA_AO_GCE 8
+#define CLK_INFRA_AO_THERM 9
+#define CLK_INFRA_AO_I2C_AP 10
+#define CLK_INFRA_AO_I2C_CCU 11
+#define CLK_INFRA_AO_I2C_SSPM 12
+#define CLK_INFRA_AO_I2C_RSV 13
+#define CLK_INFRA_AO_PWM_HCLK 14
+#define CLK_INFRA_AO_PWM1 15
+#define CLK_INFRA_AO_PWM2 16
+#define CLK_INFRA_AO_PWM3 17
+#define CLK_INFRA_AO_PWM4 18
+#define CLK_INFRA_AO_PWM5 19
+#define CLK_INFRA_AO_PWM 20
+#define CLK_INFRA_AO_UART0 21
+#define CLK_INFRA_AO_UART1 22
+#define CLK_INFRA_AO_UART2 23
+#define CLK_INFRA_AO_GCE_26M 24
+#define CLK_INFRA_AO_CQ_DMA_FPC 25
+#define CLK_INFRA_AO_BTIF 26
+#define CLK_INFRA_AO_SPI0 27
+#define CLK_INFRA_AO_MSDC0 28
+#define CLK_INFRA_AO_MSDCFDE 29
+#define CLK_INFRA_AO_MSDC1 30
+#define CLK_INFRA_AO_DVFSRC 31
+#define CLK_INFRA_AO_GCPU 32
+#define CLK_INFRA_AO_TRNG 33
+#define CLK_INFRA_AO_AUXADC 34
+#define CLK_INFRA_AO_CPUM 35
+#define CLK_INFRA_AO_CCIF1_AP 36
+#define CLK_INFRA_AO_CCIF1_MD 37
+#define CLK_INFRA_AO_AUXADC_MD 38
+#define CLK_INFRA_AO_AP_DMA 39
+#define CLK_INFRA_AO_XIU 40
+#define CLK_INFRA_AO_DEVICE_APC 41
+#define CLK_INFRA_AO_CCIF_AP 42
+#define CLK_INFRA_AO_DEBUGTOP 43
+#define CLK_INFRA_AO_AUDIO 44
+#define CLK_INFRA_AO_CCIF_MD 45
+#define CLK_INFRA_AO_DXCC_SEC_CORE 46
+#define CLK_INFRA_AO_DXCC_AO 47
+#define CLK_INFRA_AO_IMP_IIC 48
+#define CLK_INFRA_AO_DRAMC_F26M 49
+#define CLK_INFRA_AO_RG_PWM_FBCLK6 50
+#define CLK_INFRA_AO_SSUSB_TOP_HCLK 51
+#define CLK_INFRA_AO_DISP_PWM 52
+#define CLK_INFRA_AO_CLDMA_BCLK 53
+#define CLK_INFRA_AO_AUDIO_26M_BCLK 54
+#define CLK_INFRA_AO_SSUSB_TOP_P1_HCLK 55
+#define CLK_INFRA_AO_SPI1 56
+#define CLK_INFRA_AO_I2C4 57
+#define CLK_INFRA_AO_MODEM_TEMP_SHARE 58
+#define CLK_INFRA_AO_SPI2 59
+#define CLK_INFRA_AO_SPI3 60
+#define CLK_INFRA_AO_SSUSB_TOP_REF 61
+#define CLK_INFRA_AO_SSUSB_TOP_XHCI 62
+#define CLK_INFRA_AO_SSUSB_TOP_P1_REF 63
+#define CLK_INFRA_AO_SSUSB_TOP_P1_XHCI 64
+#define CLK_INFRA_AO_SSPM 65
+#define CLK_INFRA_AO_SSUSB_TOP_P1_SYS 66
+#define CLK_INFRA_AO_I2C5 67
+#define CLK_INFRA_AO_I2C5_ARBITER 68
+#define CLK_INFRA_AO_I2C5_IMM 69
+#define CLK_INFRA_AO_I2C1_ARBITER 70
+#define CLK_INFRA_AO_I2C1_IMM 71
+#define CLK_INFRA_AO_I2C2_ARBITER 72
+#define CLK_INFRA_AO_I2C2_IMM 73
+#define CLK_INFRA_AO_SPI4 74
+#define CLK_INFRA_AO_SPI5 75
+#define CLK_INFRA_AO_CQ_DMA 76
+#define CLK_INFRA_AO_BIST2FPC 77
+#define CLK_INFRA_AO_MSDC0_SELF 78
+#define CLK_INFRA_AO_SPINOR 79
+#define CLK_INFRA_AO_SSPM_26M_SELF 80
+#define CLK_INFRA_AO_SSPM_32K_SELF 81
+#define CLK_INFRA_AO_I2C6 82
+#define CLK_INFRA_AO_AP_MSDC0 83
+#define CLK_INFRA_AO_MD_MSDC0 84
+#define CLK_INFRA_AO_MSDC0_SRC 85
+#define CLK_INFRA_AO_MSDC1_SRC 86
+#define CLK_INFRA_AO_SEJ_F13M 87
+#define CLK_INFRA_AO_AES_TOP0_BCLK 88
+#define CLK_INFRA_AO_MCU_PM_BCLK 89
+#define CLK_INFRA_AO_CCIF2_AP 90
+#define CLK_INFRA_AO_CCIF2_MD 91
+#define CLK_INFRA_AO_CCIF3_AP 92
+#define CLK_INFRA_AO_CCIF3_MD 93
+#define CLK_INFRA_AO_FADSP_26M 94
+#define CLK_INFRA_AO_FADSP_32K 95
+#define CLK_INFRA_AO_CCIF4_AP 96
+#define CLK_INFRA_AO_CCIF4_MD 97
+#define CLK_INFRA_AO_FADSP 98
+#define CLK_INFRA_AO_FLASHIF_133M 99
+#define CLK_INFRA_AO_FLASHIF_66M 100
+#define CLK_INFRA_AO_NR_CLK 101
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL_LL 0
+#define CLK_APMIXED_ARMPLL_BL 1
+#define CLK_APMIXED_CCIPLL 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_UNIV2PLL 4
+#define CLK_APMIXED_MSDCPLL 5
+#define CLK_APMIXED_MMPLL 6
+#define CLK_APMIXED_NNAPLL 7
+#define CLK_APMIXED_NNA2PLL 8
+#define CLK_APMIXED_ADSPPLL 9
+#define CLK_APMIXED_MFGPLL 10
+#define CLK_APMIXED_TVDPLL 11
+#define CLK_APMIXED_APLL1 12
+#define CLK_APMIXED_APLL2 13
+#define CLK_APMIXED_NR_CLK 14
+
+/* IMP_IIC_WRAP */
+
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C0 0
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C1 1
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C2 2
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C3 3
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C4 4
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C5 5
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C6 6
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C7 7
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C8 8
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C9 9
+#define CLK_IMP_IIC_WRAP_NR_CLK 10
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* MMSYS */
+
+#define CLK_MM_DISP_MUTEX0 0
+#define CLK_MM_APB_MM_BUS 1
+#define CLK_MM_DISP_OVL0 2
+#define CLK_MM_DISP_RDMA0 3
+#define CLK_MM_DISP_OVL0_2L 4
+#define CLK_MM_DISP_WDMA0 5
+#define CLK_MM_DISP_RSZ0 6
+#define CLK_MM_DISP_AAL0 7
+#define CLK_MM_DISP_CCORR0 8
+#define CLK_MM_DISP_COLOR0 9
+#define CLK_MM_SMI_INFRA 10
+#define CLK_MM_DISP_DSC_WRAP0 11
+#define CLK_MM_DISP_GAMMA0 12
+#define CLK_MM_DISP_POSTMASK0 13
+#define CLK_MM_DISP_DITHER0 14
+#define CLK_MM_SMI_COMMON 15
+#define CLK_MM_DSI0 16
+#define CLK_MM_DISP_FAKE_ENG0 17
+#define CLK_MM_DISP_FAKE_ENG1 18
+#define CLK_MM_SMI_GALS 19
+#define CLK_MM_SMI_IOMMU 20
+#define CLK_MM_DISP_RDMA1 21
+#define CLK_MM_DISP_DPI 22
+#define CLK_MM_DSI0_DSI_CK_DOMAIN 23
+#define CLK_MM_DISP_26M 24
+#define CLK_MM_NR_CLK 25
+
+/* WPESYS */
+
+#define CLK_WPE_CK_EN 0
+#define CLK_WPE_SMI_LARB8_CK_EN 1
+#define CLK_WPE_SYS_EVENT_TX_CK_EN 2
+#define CLK_WPE_SMI_LARB8_PCLK_EN 3
+#define CLK_WPE_NR_CLK 4
+
+/* IMGSYS1 */
+
+#define CLK_IMG1_LARB9_IMG1 0
+#define CLK_IMG1_LARB10_IMG1 1
+#define CLK_IMG1_DIP 2
+#define CLK_IMG1_GALS_IMG1 3
+#define CLK_IMG1_NR_CLK 4
+
+/* IMGSYS2 */
+
+#define CLK_IMG2_LARB9_IMG2 0
+#define CLK_IMG2_LARB10_IMG2 1
+#define CLK_IMG2_MFB 2
+#define CLK_IMG2_WPE 3
+#define CLK_IMG2_MSS 4
+#define CLK_IMG2_GALS_IMG2 5
+#define CLK_IMG2_NR_CLK 6
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1_CKEN 0
+#define CLK_VDEC_LAT_CKEN 1
+#define CLK_VDEC_LAT_ACTIVE 2
+#define CLK_VDEC_LAT_CKEN_ENG 3
+#define CLK_VDEC_MINI_MDP_CKEN_CFG_RG 4
+#define CLK_VDEC_CKEN 5
+#define CLK_VDEC_ACTIVE 6
+#define CLK_VDEC_CKEN_ENG 7
+#define CLK_VDEC_NR_CLK 8
+
+/* VENCSYS */
+
+#define CLK_VENC_CKE0_LARB 0
+#define CLK_VENC_CKE1_VENC 1
+#define CLK_VENC_CKE2_JPGENC 2
+#define CLK_VENC_CKE5_GALS 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM_LARB14 2
+#define CLK_CAM 3
+#define CLK_CAMTG 4
+#define CLK_CAM_SENINF 5
+#define CLK_CAMSV1 6
+#define CLK_CAMSV2 7
+#define CLK_CAMSV3 8
+#define CLK_CAM_CCU0 9
+#define CLK_CAM_CCU1 10
+#define CLK_CAM_MRAW0 11
+#define CLK_CAM_FAKE_ENG 12
+#define CLK_CAM_CCU_GALS 13
+#define CLK_CAM2MM_GALS 14
+#define CLK_CAM_NR_CLK 15
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX_RAWA 0
+#define CLK_CAM_RAWA 1
+#define CLK_CAM_RAWA_CAMTG_RAWA 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX_RAWB 0
+#define CLK_CAM_RAWB 1
+#define CLK_CAM_RAWB_CAMTG_RAWB 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* MDPSYS */
+
+#define CLK_MDP_RDMA0 0
+#define CLK_MDP_TDSHP0 1
+#define CLK_MDP_IMG_DL_ASYNC0 2
+#define CLK_MDP_IMG_DL_ASYNC1 3
+#define CLK_MDP_DISP_RDMA 4
+#define CLK_MDP_HMS 5
+#define CLK_MDP_SMI0 6
+#define CLK_MDP_APB_BUS 7
+#define CLK_MDP_WROT0 8
+#define CLK_MDP_RSZ0 9
+#define CLK_MDP_HDR0 10
+#define CLK_MDP_MUTEX0 11
+#define CLK_MDP_WROT1 12
+#define CLK_MDP_RSZ1 13
+#define CLK_MDP_FAKE_ENG0 14
+#define CLK_MDP_AAL0 15
+#define CLK_MDP_DISP_WDMA 16
+#define CLK_MDP_COLOR 17
+#define CLK_MDP_IMG_DL_ASYNC2 18
+#define CLK_MDP_IMG_DL_RELAY0_ASYNC0 19
+#define CLK_MDP_IMG_DL_RELAY1_ASYNC1 20
+#define CLK_MDP_IMG_DL_RELAY2_ASYNC2 21
+#define CLK_MDP_NR_CLK 22
+
+/* IPESYS */
+
+#define CLK_IPE_LARB19 0
+#define CLK_IPE_LARB20 1
+#define CLK_IPE_SMI_SUBCOM 2
+#define CLK_IPE_FD 3
+#define CLK_IPE_FE 4
+#define CLK_IPE_RSC 5
+#define CLK_IPE_DPE 6
+#define CLK_IPE_GALS_IPE 7
+#define CLK_IPE_NR_CLK 8
+
+#endif /* _DT_BINDINGS_CLK_MT8186_H */
diff --git a/include/dt-bindings/clock/mt8192-clk.h b/include/dt-bindings/clock/mt8192-clk.h
new file mode 100644
index 000000000000..5ab68f15a256
--- /dev/null
+++ b/include/dt-bindings/clock/mt8192-clk.h
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8192_H
+#define _DT_BINDINGS_CLK_MT8192_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI_SEL 0
+#define CLK_TOP_SPM_SEL 1
+#define CLK_TOP_SCP_SEL 2
+#define CLK_TOP_BUS_AXIMEM_SEL 3
+#define CLK_TOP_DISP_SEL 4
+#define CLK_TOP_MDP_SEL 5
+#define CLK_TOP_IMG1_SEL 6
+#define CLK_TOP_IMG2_SEL 7
+#define CLK_TOP_IPE_SEL 8
+#define CLK_TOP_DPE_SEL 9
+#define CLK_TOP_CAM_SEL 10
+#define CLK_TOP_CCU_SEL 11
+#define CLK_TOP_DSP7_SEL 12
+#define CLK_TOP_MFG_REF_SEL 13
+#define CLK_TOP_MFG_PLL_SEL 14
+#define CLK_TOP_CAMTG_SEL 15
+#define CLK_TOP_CAMTG2_SEL 16
+#define CLK_TOP_CAMTG3_SEL 17
+#define CLK_TOP_CAMTG4_SEL 18
+#define CLK_TOP_CAMTG5_SEL 19
+#define CLK_TOP_CAMTG6_SEL 20
+#define CLK_TOP_UART_SEL 21
+#define CLK_TOP_SPI_SEL 22
+#define CLK_TOP_MSDC50_0_H_SEL 23
+#define CLK_TOP_MSDC50_0_SEL 24
+#define CLK_TOP_MSDC30_1_SEL 25
+#define CLK_TOP_MSDC30_2_SEL 26
+#define CLK_TOP_AUDIO_SEL 27
+#define CLK_TOP_AUD_INTBUS_SEL 28
+#define CLK_TOP_PWRAP_ULPOSC_SEL 29
+#define CLK_TOP_ATB_SEL 30
+#define CLK_TOP_DPI_SEL 31
+#define CLK_TOP_SCAM_SEL 32
+#define CLK_TOP_DISP_PWM_SEL 33
+#define CLK_TOP_USB_TOP_SEL 34
+#define CLK_TOP_SSUSB_XHCI_SEL 35
+#define CLK_TOP_I2C_SEL 36
+#define CLK_TOP_SENINF_SEL 37
+#define CLK_TOP_SENINF1_SEL 38
+#define CLK_TOP_SENINF2_SEL 39
+#define CLK_TOP_SENINF3_SEL 40
+#define CLK_TOP_TL_SEL 41
+#define CLK_TOP_DXCC_SEL 42
+#define CLK_TOP_AUD_ENGEN1_SEL 43
+#define CLK_TOP_AUD_ENGEN2_SEL 44
+#define CLK_TOP_AES_UFSFDE_SEL 45
+#define CLK_TOP_UFS_SEL 46
+#define CLK_TOP_AUD_1_SEL 47
+#define CLK_TOP_AUD_2_SEL 48
+#define CLK_TOP_ADSP_SEL 49
+#define CLK_TOP_DPMAIF_MAIN_SEL 50
+#define CLK_TOP_VENC_SEL 51
+#define CLK_TOP_VDEC_SEL 52
+#define CLK_TOP_CAMTM_SEL 53
+#define CLK_TOP_PWM_SEL 54
+#define CLK_TOP_AUDIO_H_SEL 55
+#define CLK_TOP_SPMI_MST_SEL 56
+#define CLK_TOP_AES_MSDCFDE_SEL 57
+#define CLK_TOP_SFLASH_SEL 58
+#define CLK_TOP_APLL_I2S0_M_SEL 59
+#define CLK_TOP_APLL_I2S1_M_SEL 60
+#define CLK_TOP_APLL_I2S2_M_SEL 61
+#define CLK_TOP_APLL_I2S3_M_SEL 62
+#define CLK_TOP_APLL_I2S4_M_SEL 63
+#define CLK_TOP_APLL_I2S5_M_SEL 64
+#define CLK_TOP_APLL_I2S6_M_SEL 65
+#define CLK_TOP_APLL_I2S7_M_SEL 66
+#define CLK_TOP_APLL_I2S8_M_SEL 67
+#define CLK_TOP_APLL_I2S9_M_SEL 68
+#define CLK_TOP_MAINPLL_D3 69
+#define CLK_TOP_MAINPLL_D4 70
+#define CLK_TOP_MAINPLL_D4_D2 71
+#define CLK_TOP_MAINPLL_D4_D4 72
+#define CLK_TOP_MAINPLL_D4_D8 73
+#define CLK_TOP_MAINPLL_D4_D16 74
+#define CLK_TOP_MAINPLL_D5 75
+#define CLK_TOP_MAINPLL_D5_D2 76
+#define CLK_TOP_MAINPLL_D5_D4 77
+#define CLK_TOP_MAINPLL_D5_D8 78
+#define CLK_TOP_MAINPLL_D6 79
+#define CLK_TOP_MAINPLL_D6_D2 80
+#define CLK_TOP_MAINPLL_D6_D4 81
+#define CLK_TOP_MAINPLL_D7 82
+#define CLK_TOP_MAINPLL_D7_D2 83
+#define CLK_TOP_MAINPLL_D7_D4 84
+#define CLK_TOP_MAINPLL_D7_D8 85
+#define CLK_TOP_UNIVPLL_D3 86
+#define CLK_TOP_UNIVPLL_D4 87
+#define CLK_TOP_UNIVPLL_D4_D2 88
+#define CLK_TOP_UNIVPLL_D4_D4 89
+#define CLK_TOP_UNIVPLL_D4_D8 90
+#define CLK_TOP_UNIVPLL_D5 91
+#define CLK_TOP_UNIVPLL_D5_D2 92
+#define CLK_TOP_UNIVPLL_D5_D4 93
+#define CLK_TOP_UNIVPLL_D5_D8 94
+#define CLK_TOP_UNIVPLL_D6 95
+#define CLK_TOP_UNIVPLL_D6_D2 96
+#define CLK_TOP_UNIVPLL_D6_D4 97
+#define CLK_TOP_UNIVPLL_D6_D8 98
+#define CLK_TOP_UNIVPLL_D6_D16 99
+#define CLK_TOP_UNIVPLL_D7 100
+#define CLK_TOP_APLL1 101
+#define CLK_TOP_APLL1_D2 102
+#define CLK_TOP_APLL1_D4 103
+#define CLK_TOP_APLL1_D8 104
+#define CLK_TOP_APLL2 105
+#define CLK_TOP_APLL2_D2 106
+#define CLK_TOP_APLL2_D4 107
+#define CLK_TOP_APLL2_D8 108
+#define CLK_TOP_MMPLL_D4 109
+#define CLK_TOP_MMPLL_D4_D2 110
+#define CLK_TOP_MMPLL_D5 111
+#define CLK_TOP_MMPLL_D5_D2 112
+#define CLK_TOP_MMPLL_D6 113
+#define CLK_TOP_MMPLL_D6_D2 114
+#define CLK_TOP_MMPLL_D7 115
+#define CLK_TOP_MMPLL_D9 116
+#define CLK_TOP_APUPLL 117
+#define CLK_TOP_NPUPLL 118
+#define CLK_TOP_TVDPLL 119
+#define CLK_TOP_TVDPLL_D2 120
+#define CLK_TOP_TVDPLL_D4 121
+#define CLK_TOP_TVDPLL_D8 122
+#define CLK_TOP_TVDPLL_D16 123
+#define CLK_TOP_MSDCPLL 124
+#define CLK_TOP_MSDCPLL_D2 125
+#define CLK_TOP_MSDCPLL_D4 126
+#define CLK_TOP_ULPOSC 127
+#define CLK_TOP_OSC_D2 128
+#define CLK_TOP_OSC_D4 129
+#define CLK_TOP_OSC_D8 130
+#define CLK_TOP_OSC_D10 131
+#define CLK_TOP_OSC_D16 132
+#define CLK_TOP_OSC_D20 133
+#define CLK_TOP_CSW_F26M_D2 134
+#define CLK_TOP_ADSPPLL 135
+#define CLK_TOP_UNIVPLL_192M 136
+#define CLK_TOP_UNIVPLL_192M_D2 137
+#define CLK_TOP_UNIVPLL_192M_D4 138
+#define CLK_TOP_UNIVPLL_192M_D8 139
+#define CLK_TOP_UNIVPLL_192M_D16 140
+#define CLK_TOP_UNIVPLL_192M_D32 141
+#define CLK_TOP_APLL12_DIV0 142
+#define CLK_TOP_APLL12_DIV1 143
+#define CLK_TOP_APLL12_DIV2 144
+#define CLK_TOP_APLL12_DIV3 145
+#define CLK_TOP_APLL12_DIV4 146
+#define CLK_TOP_APLL12_DIVB 147
+#define CLK_TOP_APLL12_DIV5 148
+#define CLK_TOP_APLL12_DIV6 149
+#define CLK_TOP_APLL12_DIV7 150
+#define CLK_TOP_APLL12_DIV8 151
+#define CLK_TOP_APLL12_DIV9 152
+#define CLK_TOP_SSUSB_TOP_REF 153
+#define CLK_TOP_SSUSB_PHY_REF 154
+#define CLK_TOP_NR_CLK 155
+
+/* INFRACFG */
+
+#define CLK_INFRA_PMIC_TMR 0
+#define CLK_INFRA_PMIC_AP 1
+#define CLK_INFRA_PMIC_MD 2
+#define CLK_INFRA_PMIC_CONN 3
+#define CLK_INFRA_SCPSYS 4
+#define CLK_INFRA_SEJ 5
+#define CLK_INFRA_APXGPT 6
+#define CLK_INFRA_GCE 7
+#define CLK_INFRA_GCE2 8
+#define CLK_INFRA_THERM 9
+#define CLK_INFRA_I2C0 10
+#define CLK_INFRA_AP_DMA_PSEUDO 11
+#define CLK_INFRA_I2C2 12
+#define CLK_INFRA_I2C3 13
+#define CLK_INFRA_PWM_H 14
+#define CLK_INFRA_PWM1 15
+#define CLK_INFRA_PWM2 16
+#define CLK_INFRA_PWM3 17
+#define CLK_INFRA_PWM4 18
+#define CLK_INFRA_PWM 19
+#define CLK_INFRA_UART0 20
+#define CLK_INFRA_UART1 21
+#define CLK_INFRA_UART2 22
+#define CLK_INFRA_UART3 23
+#define CLK_INFRA_GCE_26M 24
+#define CLK_INFRA_CQ_DMA_FPC 25
+#define CLK_INFRA_BTIF 26
+#define CLK_INFRA_SPI0 27
+#define CLK_INFRA_MSDC0 28
+#define CLK_INFRA_MSDC1 29
+#define CLK_INFRA_MSDC2 30
+#define CLK_INFRA_MSDC0_SRC 31
+#define CLK_INFRA_GCPU 32
+#define CLK_INFRA_TRNG 33
+#define CLK_INFRA_AUXADC 34
+#define CLK_INFRA_CPUM 35
+#define CLK_INFRA_CCIF1_AP 36
+#define CLK_INFRA_CCIF1_MD 37
+#define CLK_INFRA_AUXADC_MD 38
+#define CLK_INFRA_PCIE_TL_26M 39
+#define CLK_INFRA_MSDC1_SRC 40
+#define CLK_INFRA_MSDC2_SRC 41
+#define CLK_INFRA_PCIE_TL_96M 42
+#define CLK_INFRA_PCIE_PL_P_250M 43
+#define CLK_INFRA_DEVICE_APC 44
+#define CLK_INFRA_CCIF_AP 45
+#define CLK_INFRA_DEBUGSYS 46
+#define CLK_INFRA_AUDIO 47
+#define CLK_INFRA_CCIF_MD 48
+#define CLK_INFRA_DXCC_SEC_CORE 49
+#define CLK_INFRA_DXCC_AO 50
+#define CLK_INFRA_DBG_TRACE 51
+#define CLK_INFRA_DEVMPU_B 52
+#define CLK_INFRA_DRAMC_F26M 53
+#define CLK_INFRA_IRTX 54
+#define CLK_INFRA_SSUSB 55
+#define CLK_INFRA_DISP_PWM 56
+#define CLK_INFRA_CLDMA_B 57
+#define CLK_INFRA_AUDIO_26M_B 58
+#define CLK_INFRA_MODEM_TEMP_SHARE 59
+#define CLK_INFRA_SPI1 60
+#define CLK_INFRA_I2C4 61
+#define CLK_INFRA_SPI2 62
+#define CLK_INFRA_SPI3 63
+#define CLK_INFRA_UNIPRO_SYS 64
+#define CLK_INFRA_UNIPRO_TICK 65
+#define CLK_INFRA_UFS_MP_SAP_B 66
+#define CLK_INFRA_MD32_B 67
+#define CLK_INFRA_UNIPRO_MBIST 68
+#define CLK_INFRA_I2C5 69
+#define CLK_INFRA_I2C5_ARBITER 70
+#define CLK_INFRA_I2C5_IMM 71
+#define CLK_INFRA_I2C1_ARBITER 72
+#define CLK_INFRA_I2C1_IMM 73
+#define CLK_INFRA_I2C2_ARBITER 74
+#define CLK_INFRA_I2C2_IMM 75
+#define CLK_INFRA_SPI4 76
+#define CLK_INFRA_SPI5 77
+#define CLK_INFRA_CQ_DMA 78
+#define CLK_INFRA_UFS 79
+#define CLK_INFRA_AES_UFSFDE 80
+#define CLK_INFRA_UFS_TICK 81
+#define CLK_INFRA_SSUSB_XHCI 82
+#define CLK_INFRA_MSDC0_SELF 83
+#define CLK_INFRA_MSDC1_SELF 84
+#define CLK_INFRA_MSDC2_SELF 85
+#define CLK_INFRA_UFS_AXI 86
+#define CLK_INFRA_I2C6 87
+#define CLK_INFRA_AP_MSDC0 88
+#define CLK_INFRA_MD_MSDC0 89
+#define CLK_INFRA_CCIF5_AP 90
+#define CLK_INFRA_CCIF5_MD 91
+#define CLK_INFRA_PCIE_TOP_H_133M 92
+#define CLK_INFRA_FLASHIF_TOP_H_133M 93
+#define CLK_INFRA_PCIE_PERI_26M 94
+#define CLK_INFRA_CCIF2_AP 95
+#define CLK_INFRA_CCIF2_MD 96
+#define CLK_INFRA_CCIF3_AP 97
+#define CLK_INFRA_CCIF3_MD 98
+#define CLK_INFRA_SEJ_F13M 99
+#define CLK_INFRA_AES 100
+#define CLK_INFRA_I2C7 101
+#define CLK_INFRA_I2C8 102
+#define CLK_INFRA_FBIST2FPC 103
+#define CLK_INFRA_DEVICE_APC_SYNC 104
+#define CLK_INFRA_DPMAIF_MAIN 105
+#define CLK_INFRA_PCIE_TL_32K 106
+#define CLK_INFRA_CCIF4_AP 107
+#define CLK_INFRA_CCIF4_MD 108
+#define CLK_INFRA_SPI6 109
+#define CLK_INFRA_SPI7 110
+#define CLK_INFRA_133M 111
+#define CLK_INFRA_66M 112
+#define CLK_INFRA_66M_PERI_BUS 113
+#define CLK_INFRA_FREE_DCM_133M 114
+#define CLK_INFRA_FREE_DCM_66M 115
+#define CLK_INFRA_PERI_BUS_DCM_133M 116
+#define CLK_INFRA_PERI_BUS_DCM_66M 117
+#define CLK_INFRA_FLASHIF_PERI_26M 118
+#define CLK_INFRA_FLASHIF_SFLASH 119
+#define CLK_INFRA_AP_DMA 120
+#define CLK_INFRA_NR_CLK 121
+
+/* PERICFG */
+
+#define CLK_PERI_PERIAXI 0
+#define CLK_PERI_NR_CLK 1
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_MAINPLL 0
+#define CLK_APMIXED_UNIVPLL 1
+#define CLK_APMIXED_USBPLL 2
+#define CLK_APMIXED_MSDCPLL 3
+#define CLK_APMIXED_MMPLL 4
+#define CLK_APMIXED_ADSPPLL 5
+#define CLK_APMIXED_MFGPLL 6
+#define CLK_APMIXED_TVDPLL 7
+#define CLK_APMIXED_APLL1 8
+#define CLK_APMIXED_APLL2 9
+#define CLK_APMIXED_MIPID26M 10
+#define CLK_APMIXED_NR_CLK 11
+
+/* SCP_ADSP */
+
+#define CLK_SCP_ADSP_AUDIODSP 0
+#define CLK_SCP_ADSP_NR_CLK 1
+
+/* IMP_IIC_WRAP_C */
+
+#define CLK_IMP_IIC_WRAP_C_I2C10 0
+#define CLK_IMP_IIC_WRAP_C_I2C11 1
+#define CLK_IMP_IIC_WRAP_C_I2C12 2
+#define CLK_IMP_IIC_WRAP_C_I2C13 3
+#define CLK_IMP_IIC_WRAP_C_NR_CLK 4
+
+/* AUDSYS */
+
+#define CLK_AUD_AFE 0
+#define CLK_AUD_22M 1
+#define CLK_AUD_24M 2
+#define CLK_AUD_APLL2_TUNER 3
+#define CLK_AUD_APLL_TUNER 4
+#define CLK_AUD_TDM 5
+#define CLK_AUD_ADC 6
+#define CLK_AUD_DAC 7
+#define CLK_AUD_DAC_PREDIS 8
+#define CLK_AUD_TML 9
+#define CLK_AUD_NLE 10
+#define CLK_AUD_I2S1_B 11
+#define CLK_AUD_I2S2_B 12
+#define CLK_AUD_I2S3_B 13
+#define CLK_AUD_I2S4_B 14
+#define CLK_AUD_CONNSYS_I2S_ASRC 15
+#define CLK_AUD_GENERAL1_ASRC 16
+#define CLK_AUD_GENERAL2_ASRC 17
+#define CLK_AUD_DAC_HIRES 18
+#define CLK_AUD_ADC_HIRES 19
+#define CLK_AUD_ADC_HIRES_TML 20
+#define CLK_AUD_ADDA6_ADC 21
+#define CLK_AUD_ADDA6_ADC_HIRES 22
+#define CLK_AUD_3RD_DAC 23
+#define CLK_AUD_3RD_DAC_PREDIS 24
+#define CLK_AUD_3RD_DAC_TML 25
+#define CLK_AUD_3RD_DAC_HIRES 26
+#define CLK_AUD_I2S5_B 27
+#define CLK_AUD_I2S6_B 28
+#define CLK_AUD_I2S7_B 29
+#define CLK_AUD_I2S8_B 30
+#define CLK_AUD_I2S9_B 31
+#define CLK_AUD_NR_CLK 32
+
+/* IMP_IIC_WRAP_E */
+
+#define CLK_IMP_IIC_WRAP_E_I2C3 0
+#define CLK_IMP_IIC_WRAP_E_NR_CLK 1
+
+/* IMP_IIC_WRAP_S */
+
+#define CLK_IMP_IIC_WRAP_S_I2C7 0
+#define CLK_IMP_IIC_WRAP_S_I2C8 1
+#define CLK_IMP_IIC_WRAP_S_I2C9 2
+#define CLK_IMP_IIC_WRAP_S_NR_CLK 3
+
+/* IMP_IIC_WRAP_WS */
+
+#define CLK_IMP_IIC_WRAP_WS_I2C1 0
+#define CLK_IMP_IIC_WRAP_WS_I2C2 1
+#define CLK_IMP_IIC_WRAP_WS_I2C4 2
+#define CLK_IMP_IIC_WRAP_WS_NR_CLK 3
+
+/* IMP_IIC_WRAP_W */
+
+#define CLK_IMP_IIC_WRAP_W_I2C5 0
+#define CLK_IMP_IIC_WRAP_W_NR_CLK 1
+
+/* IMP_IIC_WRAP_N */
+
+#define CLK_IMP_IIC_WRAP_N_I2C0 0
+#define CLK_IMP_IIC_WRAP_N_I2C6 1
+#define CLK_IMP_IIC_WRAP_N_NR_CLK 2
+
+/* MSDC_TOP */
+
+#define CLK_MSDC_TOP_AES_0P 0
+#define CLK_MSDC_TOP_SRC_0P 1
+#define CLK_MSDC_TOP_SRC_1P 2
+#define CLK_MSDC_TOP_SRC_2P 3
+#define CLK_MSDC_TOP_P_MSDC0 4
+#define CLK_MSDC_TOP_P_MSDC1 5
+#define CLK_MSDC_TOP_P_MSDC2 6
+#define CLK_MSDC_TOP_P_CFG 7
+#define CLK_MSDC_TOP_AXI 8
+#define CLK_MSDC_TOP_H_MST_0P 9
+#define CLK_MSDC_TOP_H_MST_1P 10
+#define CLK_MSDC_TOP_H_MST_2P 11
+#define CLK_MSDC_TOP_MEM_OFF_DLY_26M 12
+#define CLK_MSDC_TOP_32K 13
+#define CLK_MSDC_TOP_AHB2AXI_BRG_AXI 14
+#define CLK_MSDC_TOP_NR_CLK 15
+
+/* MSDC */
+
+#define CLK_MSDC_AXI_WRAP 0
+#define CLK_MSDC_NR_CLK 1
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* MMSYS */
+
+#define CLK_MM_DISP_MUTEX0 0
+#define CLK_MM_DISP_CONFIG 1
+#define CLK_MM_DISP_OVL0 2
+#define CLK_MM_DISP_RDMA0 3
+#define CLK_MM_DISP_OVL0_2L 4
+#define CLK_MM_DISP_WDMA0 5
+#define CLK_MM_DISP_UFBC_WDMA0 6
+#define CLK_MM_DISP_RSZ0 7
+#define CLK_MM_DISP_AAL0 8
+#define CLK_MM_DISP_CCORR0 9
+#define CLK_MM_DISP_DITHER0 10
+#define CLK_MM_SMI_INFRA 11
+#define CLK_MM_DISP_GAMMA0 12
+#define CLK_MM_DISP_POSTMASK0 13
+#define CLK_MM_DISP_DSC_WRAP0 14
+#define CLK_MM_DSI0 15
+#define CLK_MM_DISP_COLOR0 16
+#define CLK_MM_SMI_COMMON 17
+#define CLK_MM_DISP_FAKE_ENG0 18
+#define CLK_MM_DISP_FAKE_ENG1 19
+#define CLK_MM_MDP_TDSHP4 20
+#define CLK_MM_MDP_RSZ4 21
+#define CLK_MM_MDP_AAL4 22
+#define CLK_MM_MDP_HDR4 23
+#define CLK_MM_MDP_RDMA4 24
+#define CLK_MM_MDP_COLOR4 25
+#define CLK_MM_DISP_Y2R0 26
+#define CLK_MM_SMI_GALS 27
+#define CLK_MM_DISP_OVL2_2L 28
+#define CLK_MM_DISP_RDMA4 29
+#define CLK_MM_DISP_DPI0 30
+#define CLK_MM_SMI_IOMMU 31
+#define CLK_MM_DSI_DSI0 32
+#define CLK_MM_DPI_DPI0 33
+#define CLK_MM_26MHZ 34
+#define CLK_MM_32KHZ 35
+#define CLK_MM_NR_CLK 36
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB9 0
+#define CLK_IMG_LARB10 1
+#define CLK_IMG_DIP 2
+#define CLK_IMG_GALS 3
+#define CLK_IMG_NR_CLK 4
+
+/* IMGSYS2 */
+
+#define CLK_IMG2_LARB11 0
+#define CLK_IMG2_LARB12 1
+#define CLK_IMG2_MFB 2
+#define CLK_IMG2_WPE 3
+#define CLK_IMG2_MSS 4
+#define CLK_IMG2_GALS 5
+#define CLK_IMG2_NR_CLK 6
+
+/* VDECSYS_SOC */
+
+#define CLK_VDEC_SOC_LARB1 0
+#define CLK_VDEC_SOC_LAT 1
+#define CLK_VDEC_SOC_LAT_ACTIVE 2
+#define CLK_VDEC_SOC_VDEC 3
+#define CLK_VDEC_SOC_VDEC_ACTIVE 4
+#define CLK_VDEC_SOC_NR_CLK 5
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1 0
+#define CLK_VDEC_LAT 1
+#define CLK_VDEC_LAT_ACTIVE 2
+#define CLK_VDEC_VDEC 3
+#define CLK_VDEC_ACTIVE 4
+#define CLK_VDEC_NR_CLK 5
+
+/* VENCSYS */
+
+#define CLK_VENC_SET0_LARB 0
+#define CLK_VENC_SET1_VENC 1
+#define CLK_VENC_SET2_JPGENC 2
+#define CLK_VENC_SET5_GALS 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM_LARB14 2
+#define CLK_CAM_CAM 3
+#define CLK_CAM_CAMTG 4
+#define CLK_CAM_SENINF 5
+#define CLK_CAM_CAMSV0 6
+#define CLK_CAM_CAMSV1 7
+#define CLK_CAM_CAMSV2 8
+#define CLK_CAM_CAMSV3 9
+#define CLK_CAM_CCU0 10
+#define CLK_CAM_CCU1 11
+#define CLK_CAM_MRAW0 12
+#define CLK_CAM_FAKE_ENG 13
+#define CLK_CAM_CCU_GALS 14
+#define CLK_CAM_CAM2MM_GALS 15
+#define CLK_CAM_NR_CLK 16
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX 0
+#define CLK_CAM_RAWA_CAM 1
+#define CLK_CAM_RAWA_CAMTG 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX 0
+#define CLK_CAM_RAWB_CAM 1
+#define CLK_CAM_RAWB_CAMTG 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* CAMSYS_RAWC */
+
+#define CLK_CAM_RAWC_LARBX 0
+#define CLK_CAM_RAWC_CAM 1
+#define CLK_CAM_RAWC_CAMTG 2
+#define CLK_CAM_RAWC_NR_CLK 3
+
+/* IPESYS */
+
+#define CLK_IPE_LARB19 0
+#define CLK_IPE_LARB20 1
+#define CLK_IPE_SMI_SUBCOM 2
+#define CLK_IPE_FD 3
+#define CLK_IPE_FE 4
+#define CLK_IPE_RSC 5
+#define CLK_IPE_DPE 6
+#define CLK_IPE_GALS 7
+#define CLK_IPE_NR_CLK 8
+
+/* MDPSYS */
+
+#define CLK_MDP_RDMA0 0
+#define CLK_MDP_TDSHP0 1
+#define CLK_MDP_IMG_DL_ASYNC0 2
+#define CLK_MDP_IMG_DL_ASYNC1 3
+#define CLK_MDP_RDMA1 4
+#define CLK_MDP_TDSHP1 5
+#define CLK_MDP_SMI0 6
+#define CLK_MDP_APB_BUS 7
+#define CLK_MDP_WROT0 8
+#define CLK_MDP_RSZ0 9
+#define CLK_MDP_HDR0 10
+#define CLK_MDP_MUTEX0 11
+#define CLK_MDP_WROT1 12
+#define CLK_MDP_RSZ1 13
+#define CLK_MDP_HDR1 14
+#define CLK_MDP_FAKE_ENG0 15
+#define CLK_MDP_AAL0 16
+#define CLK_MDP_AAL1 17
+#define CLK_MDP_COLOR0 18
+#define CLK_MDP_COLOR1 19
+#define CLK_MDP_IMG_DL_RELAY0_ASYNC0 20
+#define CLK_MDP_IMG_DL_RELAY1_ASYNC1 21
+#define CLK_MDP_NR_CLK 22
+
+#endif /* _DT_BINDINGS_CLK_MT8192_H */
diff --git a/include/dt-bindings/clock/mt8195-clk.h b/include/dt-bindings/clock/mt8195-clk.h
new file mode 100644
index 000000000000..d70d017ad69c
--- /dev/null
+++ b/include/dt-bindings/clock/mt8195-clk.h
@@ -0,0 +1,866 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8195_H
+#define _DT_BINDINGS_CLK_MT8195_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SPM 1
+#define CLK_TOP_SCP 2
+#define CLK_TOP_BUS_AXIMEM 3
+#define CLK_TOP_VPP 4
+#define CLK_TOP_ETHDR 5
+#define CLK_TOP_IPE 6
+#define CLK_TOP_CAM 7
+#define CLK_TOP_CCU 8
+#define CLK_TOP_IMG 9
+#define CLK_TOP_CAMTM 10
+#define CLK_TOP_DSP 11
+#define CLK_TOP_DSP1 12
+#define CLK_TOP_DSP2 13
+#define CLK_TOP_DSP3 14
+#define CLK_TOP_DSP4 15
+#define CLK_TOP_DSP5 16
+#define CLK_TOP_DSP6 17
+#define CLK_TOP_DSP7 18
+#define CLK_TOP_IPU_IF 19
+#define CLK_TOP_MFG_CORE_TMP 20
+#define CLK_TOP_CAMTG 21
+#define CLK_TOP_CAMTG2 22
+#define CLK_TOP_CAMTG3 23
+#define CLK_TOP_CAMTG4 24
+#define CLK_TOP_CAMTG5 25
+#define CLK_TOP_UART 26
+#define CLK_TOP_SPI 27
+#define CLK_TOP_SPIS 28
+#define CLK_TOP_MSDC50_0_HCLK 29
+#define CLK_TOP_MSDC50_0 30
+#define CLK_TOP_MSDC30_1 31
+#define CLK_TOP_MSDC30_2 32
+#define CLK_TOP_INTDIR 33
+#define CLK_TOP_AUD_INTBUS 34
+#define CLK_TOP_AUDIO_H 35
+#define CLK_TOP_PWRAP_ULPOSC 36
+#define CLK_TOP_ATB 37
+#define CLK_TOP_PWRMCU 38
+#define CLK_TOP_DP 39
+#define CLK_TOP_EDP 40
+#define CLK_TOP_DPI 41
+#define CLK_TOP_DISP_PWM0 42
+#define CLK_TOP_DISP_PWM1 43
+#define CLK_TOP_USB_TOP 44
+#define CLK_TOP_SSUSB_XHCI 45
+#define CLK_TOP_USB_TOP_1P 46
+#define CLK_TOP_SSUSB_XHCI_1P 47
+#define CLK_TOP_USB_TOP_2P 48
+#define CLK_TOP_SSUSB_XHCI_2P 49
+#define CLK_TOP_USB_TOP_3P 50
+#define CLK_TOP_SSUSB_XHCI_3P 51
+#define CLK_TOP_I2C 52
+#define CLK_TOP_SENINF 53
+#define CLK_TOP_SENINF1 54
+#define CLK_TOP_SENINF2 55
+#define CLK_TOP_SENINF3 56
+#define CLK_TOP_GCPU 57
+#define CLK_TOP_DXCC 58
+#define CLK_TOP_DPMAIF_MAIN 59
+#define CLK_TOP_AES_UFSFDE 60
+#define CLK_TOP_UFS 61
+#define CLK_TOP_UFS_TICK1US 62
+#define CLK_TOP_UFS_MP_SAP_CFG 63
+#define CLK_TOP_VENC 64
+#define CLK_TOP_VDEC 65
+#define CLK_TOP_PWM 66
+#define CLK_TOP_MCUPM 67
+#define CLK_TOP_SPMI_P_MST 68
+#define CLK_TOP_SPMI_M_MST 69
+#define CLK_TOP_DVFSRC 70
+#define CLK_TOP_TL 71
+#define CLK_TOP_TL_P1 72
+#define CLK_TOP_AES_MSDCFDE 73
+#define CLK_TOP_DSI_OCC 74
+#define CLK_TOP_WPE_VPP 75
+#define CLK_TOP_HDCP 76
+#define CLK_TOP_HDCP_24M 77
+#define CLK_TOP_HD20_DACR_REF_CLK 78
+#define CLK_TOP_HD20_HDCP_CCLK 79
+#define CLK_TOP_HDMI_XTAL 80
+#define CLK_TOP_HDMI_APB 81
+#define CLK_TOP_SNPS_ETH_250M 82
+#define CLK_TOP_SNPS_ETH_62P4M_PTP 83
+#define CLK_TOP_SNPS_ETH_50M_RMII 84
+#define CLK_TOP_DGI_OUT 85
+#define CLK_TOP_NNA0 86
+#define CLK_TOP_NNA1 87
+#define CLK_TOP_ADSP 88
+#define CLK_TOP_ASM_H 89
+#define CLK_TOP_ASM_M 90
+#define CLK_TOP_ASM_L 91
+#define CLK_TOP_APLL1 92
+#define CLK_TOP_APLL2 93
+#define CLK_TOP_APLL3 94
+#define CLK_TOP_APLL4 95
+#define CLK_TOP_APLL5 96
+#define CLK_TOP_I2SO1_MCK 97
+#define CLK_TOP_I2SO2_MCK 98
+#define CLK_TOP_I2SI1_MCK 99
+#define CLK_TOP_I2SI2_MCK 100
+#define CLK_TOP_DPTX_MCK 101
+#define CLK_TOP_AUD_IEC_CLK 102
+#define CLK_TOP_A1SYS_HP 103
+#define CLK_TOP_A2SYS_HF 104
+#define CLK_TOP_A3SYS_HF 105
+#define CLK_TOP_A4SYS_HF 106
+#define CLK_TOP_SPINFI_BCLK 107
+#define CLK_TOP_NFI1X 108
+#define CLK_TOP_ECC 109
+#define CLK_TOP_AUDIO_LOCAL_BUS 110
+#define CLK_TOP_SPINOR 111
+#define CLK_TOP_DVIO_DGI_REF 112
+#define CLK_TOP_ULPOSC 113
+#define CLK_TOP_ULPOSC_CORE 114
+#define CLK_TOP_SRCK 115
+#define CLK_TOP_MFG_CK_FAST_REF 116
+#define CLK_TOP_CLK26M_D2 117
+#define CLK_TOP_CLK26M_D52 118
+#define CLK_TOP_IN_DGI 119
+#define CLK_TOP_IN_DGI_D2 120
+#define CLK_TOP_IN_DGI_D4 121
+#define CLK_TOP_IN_DGI_D6 122
+#define CLK_TOP_IN_DGI_D8 123
+#define CLK_TOP_MAINPLL_D3 124
+#define CLK_TOP_MAINPLL_D4 125
+#define CLK_TOP_MAINPLL_D4_D2 126
+#define CLK_TOP_MAINPLL_D4_D4 127
+#define CLK_TOP_MAINPLL_D4_D8 128
+#define CLK_TOP_MAINPLL_D5 129
+#define CLK_TOP_MAINPLL_D5_D2 130
+#define CLK_TOP_MAINPLL_D5_D4 131
+#define CLK_TOP_MAINPLL_D5_D8 132
+#define CLK_TOP_MAINPLL_D6 133
+#define CLK_TOP_MAINPLL_D6_D2 134
+#define CLK_TOP_MAINPLL_D6_D4 135
+#define CLK_TOP_MAINPLL_D6_D8 136
+#define CLK_TOP_MAINPLL_D7 137
+#define CLK_TOP_MAINPLL_D7_D2 138
+#define CLK_TOP_MAINPLL_D7_D4 139
+#define CLK_TOP_MAINPLL_D7_D8 140
+#define CLK_TOP_MAINPLL_D9 141
+#define CLK_TOP_UNIVPLL_D2 142
+#define CLK_TOP_UNIVPLL_D3 143
+#define CLK_TOP_UNIVPLL_D4 144
+#define CLK_TOP_UNIVPLL_D4_D2 145
+#define CLK_TOP_UNIVPLL_D4_D4 146
+#define CLK_TOP_UNIVPLL_D4_D8 147
+#define CLK_TOP_UNIVPLL_D5 148
+#define CLK_TOP_UNIVPLL_D5_D2 149
+#define CLK_TOP_UNIVPLL_D5_D4 150
+#define CLK_TOP_UNIVPLL_D5_D8 151
+#define CLK_TOP_UNIVPLL_D6 152
+#define CLK_TOP_UNIVPLL_D6_D2 153
+#define CLK_TOP_UNIVPLL_D6_D4 154
+#define CLK_TOP_UNIVPLL_D6_D8 155
+#define CLK_TOP_UNIVPLL_D6_D16 156
+#define CLK_TOP_UNIVPLL_D7 157
+#define CLK_TOP_UNIVPLL_192M 158
+#define CLK_TOP_UNIVPLL_192M_D4 159
+#define CLK_TOP_UNIVPLL_192M_D8 160
+#define CLK_TOP_UNIVPLL_192M_D16 161
+#define CLK_TOP_UNIVPLL_192M_D32 162
+#define CLK_TOP_APLL1_D3 163
+#define CLK_TOP_APLL1_D4 164
+#define CLK_TOP_APLL2_D3 165
+#define CLK_TOP_APLL2_D4 166
+#define CLK_TOP_APLL3_D4 167
+#define CLK_TOP_APLL4_D4 168
+#define CLK_TOP_APLL5_D4 169
+#define CLK_TOP_HDMIRX_APLL_D3 170
+#define CLK_TOP_HDMIRX_APLL_D4 171
+#define CLK_TOP_HDMIRX_APLL_D6 172
+#define CLK_TOP_MMPLL_D4 173
+#define CLK_TOP_MMPLL_D4_D2 174
+#define CLK_TOP_MMPLL_D4_D4 175
+#define CLK_TOP_MMPLL_D5 176
+#define CLK_TOP_MMPLL_D5_D2 177
+#define CLK_TOP_MMPLL_D5_D4 178
+#define CLK_TOP_MMPLL_D6 179
+#define CLK_TOP_MMPLL_D6_D2 180
+#define CLK_TOP_MMPLL_D7 181
+#define CLK_TOP_MMPLL_D9 182
+#define CLK_TOP_TVDPLL1_D2 183
+#define CLK_TOP_TVDPLL1_D4 184
+#define CLK_TOP_TVDPLL1_D8 185
+#define CLK_TOP_TVDPLL1_D16 186
+#define CLK_TOP_TVDPLL2_D2 187
+#define CLK_TOP_TVDPLL2_D4 188
+#define CLK_TOP_TVDPLL2_D8 189
+#define CLK_TOP_TVDPLL2_D16 190
+#define CLK_TOP_MSDCPLL_D2 191
+#define CLK_TOP_MSDCPLL_D4 192
+#define CLK_TOP_MSDCPLL_D16 193
+#define CLK_TOP_ETHPLL_D2 194
+#define CLK_TOP_ETHPLL_D8 195
+#define CLK_TOP_ETHPLL_D10 196
+#define CLK_TOP_DGIPLL_D2 197
+#define CLK_TOP_ULPOSC1 198
+#define CLK_TOP_ULPOSC1_D2 199
+#define CLK_TOP_ULPOSC1_D4 200
+#define CLK_TOP_ULPOSC1_D7 201
+#define CLK_TOP_ULPOSC1_D8 202
+#define CLK_TOP_ULPOSC1_D10 203
+#define CLK_TOP_ULPOSC1_D16 204
+#define CLK_TOP_ULPOSC2 205
+#define CLK_TOP_ADSPPLL_D2 206
+#define CLK_TOP_ADSPPLL_D4 207
+#define CLK_TOP_ADSPPLL_D8 208
+#define CLK_TOP_MEM_466M 209
+#define CLK_TOP_MPHONE_SLAVE_B 210
+#define CLK_TOP_PEXTP_PIPE 211
+#define CLK_TOP_UFS_RX_SYMBOL 212
+#define CLK_TOP_UFS_TX_SYMBOL 213
+#define CLK_TOP_SSUSB_U3PHY_P1_P_P0 214
+#define CLK_TOP_UFS_RX_SYMBOL1 215
+#define CLK_TOP_FPC 216
+#define CLK_TOP_HDMIRX_P 217
+#define CLK_TOP_APLL12_DIV0 218
+#define CLK_TOP_APLL12_DIV1 219
+#define CLK_TOP_APLL12_DIV2 220
+#define CLK_TOP_APLL12_DIV3 221
+#define CLK_TOP_APLL12_DIV4 222
+#define CLK_TOP_APLL12_DIV9 223
+#define CLK_TOP_CFG_VPP0 224
+#define CLK_TOP_CFG_VPP1 225
+#define CLK_TOP_CFG_VDO0 226
+#define CLK_TOP_CFG_VDO1 227
+#define CLK_TOP_CFG_UNIPLL_SES 228
+#define CLK_TOP_CFG_26M_VPP0 229
+#define CLK_TOP_CFG_26M_VPP1 230
+#define CLK_TOP_CFG_26M_AUD 231
+#define CLK_TOP_CFG_AXI_EAST 232
+#define CLK_TOP_CFG_AXI_EAST_NORTH 233
+#define CLK_TOP_CFG_AXI_NORTH 234
+#define CLK_TOP_CFG_AXI_SOUTH 235
+#define CLK_TOP_CFG_EXT_TEST 236
+#define CLK_TOP_SSUSB_REF 237
+#define CLK_TOP_SSUSB_PHY_REF 238
+#define CLK_TOP_SSUSB_P1_REF 239
+#define CLK_TOP_SSUSB_PHY_P1_REF 240
+#define CLK_TOP_SSUSB_P2_REF 241
+#define CLK_TOP_SSUSB_PHY_P2_REF 242
+#define CLK_TOP_SSUSB_P3_REF 243
+#define CLK_TOP_SSUSB_PHY_P3_REF 244
+#define CLK_TOP_NR_CLK 245
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SEJ 4
+#define CLK_INFRA_AO_APXGPT 5
+#define CLK_INFRA_AO_GCE 6
+#define CLK_INFRA_AO_GCE2 7
+#define CLK_INFRA_AO_THERM 8
+#define CLK_INFRA_AO_PWM_H 9
+#define CLK_INFRA_AO_PWM1 10
+#define CLK_INFRA_AO_PWM2 11
+#define CLK_INFRA_AO_PWM3 12
+#define CLK_INFRA_AO_PWM4 13
+#define CLK_INFRA_AO_PWM 14
+#define CLK_INFRA_AO_UART0 15
+#define CLK_INFRA_AO_UART1 16
+#define CLK_INFRA_AO_UART2 17
+#define CLK_INFRA_AO_UART3 18
+#define CLK_INFRA_AO_UART4 19
+#define CLK_INFRA_AO_GCE_26M 20
+#define CLK_INFRA_AO_CQ_DMA_FPC 21
+#define CLK_INFRA_AO_UART5 22
+#define CLK_INFRA_AO_HDMI_26M 23
+#define CLK_INFRA_AO_SPI0 24
+#define CLK_INFRA_AO_MSDC0 25
+#define CLK_INFRA_AO_MSDC1 26
+#define CLK_INFRA_AO_CG1_MSDC2 27
+#define CLK_INFRA_AO_MSDC0_SRC 28
+#define CLK_INFRA_AO_TRNG 29
+#define CLK_INFRA_AO_AUXADC 30
+#define CLK_INFRA_AO_CPUM 31
+#define CLK_INFRA_AO_HDMI_32K 32
+#define CLK_INFRA_AO_CEC_66M_H 33
+#define CLK_INFRA_AO_IRRX 34
+#define CLK_INFRA_AO_PCIE_TL_26M 35
+#define CLK_INFRA_AO_MSDC1_SRC 36
+#define CLK_INFRA_AO_CEC_66M_B 37
+#define CLK_INFRA_AO_PCIE_TL_96M 38
+#define CLK_INFRA_AO_DEVICE_APC 39
+#define CLK_INFRA_AO_ECC_66M_H 40
+#define CLK_INFRA_AO_DEBUGSYS 41
+#define CLK_INFRA_AO_AUDIO 42
+#define CLK_INFRA_AO_PCIE_TL_32K 43
+#define CLK_INFRA_AO_DBG_TRACE 44
+#define CLK_INFRA_AO_DRAMC_F26M 45
+#define CLK_INFRA_AO_IRTX 46
+#define CLK_INFRA_AO_SSUSB 47
+#define CLK_INFRA_AO_DISP_PWM 48
+#define CLK_INFRA_AO_CLDMA_B 49
+#define CLK_INFRA_AO_AUDIO_26M_B 50
+#define CLK_INFRA_AO_SPI1 51
+#define CLK_INFRA_AO_SPI2 52
+#define CLK_INFRA_AO_SPI3 53
+#define CLK_INFRA_AO_UNIPRO_SYS 54
+#define CLK_INFRA_AO_UNIPRO_TICK 55
+#define CLK_INFRA_AO_UFS_MP_SAP_B 56
+#define CLK_INFRA_AO_PWRMCU 57
+#define CLK_INFRA_AO_PWRMCU_BUS_H 58
+#define CLK_INFRA_AO_APDMA_B 59
+#define CLK_INFRA_AO_SPI4 60
+#define CLK_INFRA_AO_SPI5 61
+#define CLK_INFRA_AO_CQ_DMA 62
+#define CLK_INFRA_AO_AES_UFSFDE 63
+#define CLK_INFRA_AO_AES 64
+#define CLK_INFRA_AO_UFS_TICK 65
+#define CLK_INFRA_AO_SSUSB_XHCI 66
+#define CLK_INFRA_AO_MSDC0_SELF 67
+#define CLK_INFRA_AO_MSDC1_SELF 68
+#define CLK_INFRA_AO_MSDC2_SELF 69
+#define CLK_INFRA_AO_I2S_DMA 70
+#define CLK_INFRA_AO_AP_MSDC0 71
+#define CLK_INFRA_AO_MD_MSDC0 72
+#define CLK_INFRA_AO_CG3_MSDC2 73
+#define CLK_INFRA_AO_GCPU 74
+#define CLK_INFRA_AO_PCIE_PERI_26M 75
+#define CLK_INFRA_AO_GCPU_66M_B 76
+#define CLK_INFRA_AO_GCPU_133M_B 77
+#define CLK_INFRA_AO_DISP_PWM1 78
+#define CLK_INFRA_AO_FBIST2FPC 79
+#define CLK_INFRA_AO_DEVICE_APC_SYNC 80
+#define CLK_INFRA_AO_PCIE_P1_PERI_26M 81
+#define CLK_INFRA_AO_SPIS0 82
+#define CLK_INFRA_AO_SPIS1 83
+#define CLK_INFRA_AO_133M_M_PERI 84
+#define CLK_INFRA_AO_66M_M_PERI 85
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P0 86
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P1 87
+#define CLK_INFRA_AO_PCIE_P1_TL_96M 88
+#define CLK_INFRA_AO_AES_MSDCFDE_0P 89
+#define CLK_INFRA_AO_UFS_TX_SYMBOL 90
+#define CLK_INFRA_AO_UFS_RX_SYMBOL 91
+#define CLK_INFRA_AO_UFS_RX_SYMBOL1 92
+#define CLK_INFRA_AO_PERI_UFS_MEM_SUB 93
+#define CLK_INFRA_AO_NR_CLK 94
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_NNAPLL 0
+#define CLK_APMIXED_RESPLL 1
+#define CLK_APMIXED_ETHPLL 2
+#define CLK_APMIXED_MSDCPLL 3
+#define CLK_APMIXED_TVDPLL1 4
+#define CLK_APMIXED_TVDPLL2 5
+#define CLK_APMIXED_MMPLL 6
+#define CLK_APMIXED_MAINPLL 7
+#define CLK_APMIXED_VDECPLL 8
+#define CLK_APMIXED_IMGPLL 9
+#define CLK_APMIXED_UNIVPLL 10
+#define CLK_APMIXED_HDMIPLL1 11
+#define CLK_APMIXED_HDMIPLL2 12
+#define CLK_APMIXED_HDMIRX_APLL 13
+#define CLK_APMIXED_USB1PLL 14
+#define CLK_APMIXED_ADSPPLL 15
+#define CLK_APMIXED_APLL1 16
+#define CLK_APMIXED_APLL2 17
+#define CLK_APMIXED_APLL3 18
+#define CLK_APMIXED_APLL4 19
+#define CLK_APMIXED_APLL5 20
+#define CLK_APMIXED_MFGPLL 21
+#define CLK_APMIXED_DGIPLL 22
+#define CLK_APMIXED_PLL_SSUSB26M 23
+#define CLK_APMIXED_NR_CLK 24
+
+/* SCP_ADSP */
+
+#define CLK_SCP_ADSP_AUDIODSP 0
+#define CLK_SCP_ADSP_NR_CLK 1
+
+/* PERICFG_AO */
+
+#define CLK_PERI_AO_ETHERNET 0
+#define CLK_PERI_AO_ETHERNET_BUS 1
+#define CLK_PERI_AO_FLASHIF_BUS 2
+#define CLK_PERI_AO_FLASHIF_FLASH 3
+#define CLK_PERI_AO_SSUSB_1P_BUS 4
+#define CLK_PERI_AO_SSUSB_1P_XHCI 5
+#define CLK_PERI_AO_SSUSB_2P_BUS 6
+#define CLK_PERI_AO_SSUSB_2P_XHCI 7
+#define CLK_PERI_AO_SSUSB_3P_BUS 8
+#define CLK_PERI_AO_SSUSB_3P_XHCI 9
+#define CLK_PERI_AO_SPINFI 10
+#define CLK_PERI_AO_ETHERNET_MAC 11
+#define CLK_PERI_AO_NFI_H 12
+#define CLK_PERI_AO_FNFI1X 13
+#define CLK_PERI_AO_PCIE_P0_MEM 14
+#define CLK_PERI_AO_PCIE_P1_MEM 15
+#define CLK_PERI_AO_NR_CLK 16
+
+/* IMP_IIC_WRAP_S */
+
+#define CLK_IMP_IIC_WRAP_S_I2C5 0
+#define CLK_IMP_IIC_WRAP_S_I2C6 1
+#define CLK_IMP_IIC_WRAP_S_I2C7 2
+#define CLK_IMP_IIC_WRAP_S_NR_CLK 3
+
+/* IMP_IIC_WRAP_W */
+
+#define CLK_IMP_IIC_WRAP_W_I2C0 0
+#define CLK_IMP_IIC_WRAP_W_I2C1 1
+#define CLK_IMP_IIC_WRAP_W_I2C2 2
+#define CLK_IMP_IIC_WRAP_W_I2C3 3
+#define CLK_IMP_IIC_WRAP_W_I2C4 4
+#define CLK_IMP_IIC_WRAP_W_NR_CLK 5
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* VPPSYS0 */
+
+#define CLK_VPP0_MDP_FG 0
+#define CLK_VPP0_STITCH 1
+#define CLK_VPP0_PADDING 2
+#define CLK_VPP0_MDP_TCC 3
+#define CLK_VPP0_WARP0_ASYNC_TX 4
+#define CLK_VPP0_WARP1_ASYNC_TX 5
+#define CLK_VPP0_MUTEX 6
+#define CLK_VPP0_VPP02VPP1_RELAY 7
+#define CLK_VPP0_VPP12VPP0_ASYNC 8
+#define CLK_VPP0_MMSYSRAM_TOP 9
+#define CLK_VPP0_MDP_AAL 10
+#define CLK_VPP0_MDP_RSZ 11
+#define CLK_VPP0_SMI_COMMON 12
+#define CLK_VPP0_GALS_VDO0_LARB0 13
+#define CLK_VPP0_GALS_VDO0_LARB1 14
+#define CLK_VPP0_GALS_VENCSYS 15
+#define CLK_VPP0_GALS_VENCSYS_CORE1 16
+#define CLK_VPP0_GALS_INFRA 17
+#define CLK_VPP0_GALS_CAMSYS 18
+#define CLK_VPP0_GALS_VPP1_LARB5 19
+#define CLK_VPP0_GALS_VPP1_LARB6 20
+#define CLK_VPP0_SMI_REORDER 21
+#define CLK_VPP0_SMI_IOMMU 22
+#define CLK_VPP0_GALS_IMGSYS_CAMSYS 23
+#define CLK_VPP0_MDP_RDMA 24
+#define CLK_VPP0_MDP_WROT 25
+#define CLK_VPP0_GALS_EMI0_EMI1 26
+#define CLK_VPP0_SMI_SUB_COMMON_REORDER 27
+#define CLK_VPP0_SMI_RSI 28
+#define CLK_VPP0_SMI_COMMON_LARB4 29
+#define CLK_VPP0_GALS_VDEC_VDEC_CORE1 30
+#define CLK_VPP0_GALS_VPP1_WPE 31
+#define CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1 32
+#define CLK_VPP0_FAKE_ENG 33
+#define CLK_VPP0_MDP_HDR 34
+#define CLK_VPP0_MDP_TDSHP 35
+#define CLK_VPP0_MDP_COLOR 36
+#define CLK_VPP0_MDP_OVL 37
+#define CLK_VPP0_WARP0_RELAY 38
+#define CLK_VPP0_WARP0_MDP_DL_ASYNC 39
+#define CLK_VPP0_WARP1_RELAY 40
+#define CLK_VPP0_WARP1_MDP_DL_ASYNC 41
+#define CLK_VPP0_NR_CLK 42
+
+/* WPESYS */
+
+#define CLK_WPE_VPP0 0
+#define CLK_WPE_VPP1 1
+#define CLK_WPE_SMI_LARB7 2
+#define CLK_WPE_SMI_LARB8 3
+#define CLK_WPE_EVENT_TX 4
+#define CLK_WPE_SMI_LARB7_P 5
+#define CLK_WPE_SMI_LARB8_P 6
+#define CLK_WPE_NR_CLK 7
+
+/* WPESYS_VPP0 */
+
+#define CLK_WPE_VPP0_VECI 0
+#define CLK_WPE_VPP0_VEC2I 1
+#define CLK_WPE_VPP0_VEC3I 2
+#define CLK_WPE_VPP0_WPEO 3
+#define CLK_WPE_VPP0_MSKO 4
+#define CLK_WPE_VPP0_VGEN 5
+#define CLK_WPE_VPP0_EXT 6
+#define CLK_WPE_VPP0_VFC 7
+#define CLK_WPE_VPP0_CACH0_TOP 8
+#define CLK_WPE_VPP0_CACH0_DMA 9
+#define CLK_WPE_VPP0_CACH1_TOP 10
+#define CLK_WPE_VPP0_CACH1_DMA 11
+#define CLK_WPE_VPP0_CACH2_TOP 12
+#define CLK_WPE_VPP0_CACH2_DMA 13
+#define CLK_WPE_VPP0_CACH3_TOP 14
+#define CLK_WPE_VPP0_CACH3_DMA 15
+#define CLK_WPE_VPP0_PSP 16
+#define CLK_WPE_VPP0_PSP2 17
+#define CLK_WPE_VPP0_SYNC 18
+#define CLK_WPE_VPP0_C24 19
+#define CLK_WPE_VPP0_MDP_CROP 20
+#define CLK_WPE_VPP0_ISP_CROP 21
+#define CLK_WPE_VPP0_TOP 22
+#define CLK_WPE_VPP0_NR_CLK 23
+
+/* WPESYS_VPP1 */
+
+#define CLK_WPE_VPP1_VECI 0
+#define CLK_WPE_VPP1_VEC2I 1
+#define CLK_WPE_VPP1_VEC3I 2
+#define CLK_WPE_VPP1_WPEO 3
+#define CLK_WPE_VPP1_MSKO 4
+#define CLK_WPE_VPP1_VGEN 5
+#define CLK_WPE_VPP1_EXT 6
+#define CLK_WPE_VPP1_VFC 7
+#define CLK_WPE_VPP1_CACH0_TOP 8
+#define CLK_WPE_VPP1_CACH0_DMA 9
+#define CLK_WPE_VPP1_CACH1_TOP 10
+#define CLK_WPE_VPP1_CACH1_DMA 11
+#define CLK_WPE_VPP1_CACH2_TOP 12
+#define CLK_WPE_VPP1_CACH2_DMA 13
+#define CLK_WPE_VPP1_CACH3_TOP 14
+#define CLK_WPE_VPP1_CACH3_DMA 15
+#define CLK_WPE_VPP1_PSP 16
+#define CLK_WPE_VPP1_PSP2 17
+#define CLK_WPE_VPP1_SYNC 18
+#define CLK_WPE_VPP1_C24 19
+#define CLK_WPE_VPP1_MDP_CROP 20
+#define CLK_WPE_VPP1_ISP_CROP 21
+#define CLK_WPE_VPP1_TOP 22
+#define CLK_WPE_VPP1_NR_CLK 23
+
+/* VPPSYS1 */
+
+#define CLK_VPP1_SVPP1_MDP_OVL 0
+#define CLK_VPP1_SVPP1_MDP_TCC 1
+#define CLK_VPP1_SVPP1_MDP_WROT 2
+#define CLK_VPP1_SVPP1_VPP_PAD 3
+#define CLK_VPP1_SVPP2_MDP_WROT 4
+#define CLK_VPP1_SVPP2_VPP_PAD 5
+#define CLK_VPP1_SVPP3_MDP_WROT 6
+#define CLK_VPP1_SVPP3_VPP_PAD 7
+#define CLK_VPP1_SVPP1_MDP_RDMA 8
+#define CLK_VPP1_SVPP1_MDP_FG 9
+#define CLK_VPP1_SVPP2_MDP_RDMA 10
+#define CLK_VPP1_SVPP2_MDP_FG 11
+#define CLK_VPP1_SVPP3_MDP_RDMA 12
+#define CLK_VPP1_SVPP3_MDP_FG 13
+#define CLK_VPP1_VPP_SPLIT 14
+#define CLK_VPP1_SVPP2_VDO0_DL_RELAY 15
+#define CLK_VPP1_SVPP1_MDP_TDSHP 16
+#define CLK_VPP1_SVPP1_MDP_COLOR 17
+#define CLK_VPP1_SVPP3_VDO1_DL_RELAY 18
+#define CLK_VPP1_SVPP2_VPP_MERGE 19
+#define CLK_VPP1_SVPP2_MDP_COLOR 20
+#define CLK_VPP1_VPPSYS1_GALS 21
+#define CLK_VPP1_SVPP3_VPP_MERGE 22
+#define CLK_VPP1_SVPP3_MDP_COLOR 23
+#define CLK_VPP1_VPPSYS1_LARB 24
+#define CLK_VPP1_SVPP1_MDP_RSZ 25
+#define CLK_VPP1_SVPP1_MDP_HDR 26
+#define CLK_VPP1_SVPP1_MDP_AAL 27
+#define CLK_VPP1_SVPP2_MDP_HDR 28
+#define CLK_VPP1_SVPP2_MDP_AAL 29
+#define CLK_VPP1_DL_ASYNC 30
+#define CLK_VPP1_LARB5_FAKE_ENG 31
+#define CLK_VPP1_SVPP3_MDP_HDR 32
+#define CLK_VPP1_SVPP3_MDP_AAL 33
+#define CLK_VPP1_SVPP2_VDO1_DL_RELAY 34
+#define CLK_VPP1_LARB6_FAKE_ENG 35
+#define CLK_VPP1_SVPP2_MDP_RSZ 36
+#define CLK_VPP1_SVPP3_MDP_RSZ 37
+#define CLK_VPP1_SVPP3_VDO0_DL_RELAY 38
+#define CLK_VPP1_DISP_MUTEX 39
+#define CLK_VPP1_SVPP2_MDP_TDSHP 40
+#define CLK_VPP1_SVPP3_MDP_TDSHP 41
+#define CLK_VPP1_VPP0_DL1_RELAY 42
+#define CLK_VPP1_HDMI_META 43
+#define CLK_VPP1_VPP_SPLIT_HDMI 44
+#define CLK_VPP1_DGI_IN 45
+#define CLK_VPP1_DGI_OUT 46
+#define CLK_VPP1_VPP_SPLIT_DGI 47
+#define CLK_VPP1_VPP0_DL_ASYNC 48
+#define CLK_VPP1_VPP0_DL_RELAY 49
+#define CLK_VPP1_VPP_SPLIT_26M 50
+#define CLK_VPP1_NR_CLK 51
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB9 0
+#define CLK_IMG_TRAW0 1
+#define CLK_IMG_TRAW1 2
+#define CLK_IMG_TRAW2 3
+#define CLK_IMG_TRAW3 4
+#define CLK_IMG_DIP0 5
+#define CLK_IMG_WPE0 6
+#define CLK_IMG_IPE 7
+#define CLK_IMG_DIP1 8
+#define CLK_IMG_WPE1 9
+#define CLK_IMG_GALS 10
+#define CLK_IMG_NR_CLK 11
+
+/* IMGSYS1_DIP_TOP */
+
+#define CLK_IMG1_DIP_TOP_LARB10 0
+#define CLK_IMG1_DIP_TOP_DIP_TOP 1
+#define CLK_IMG1_DIP_TOP_NR_CLK 2
+
+/* IMGSYS1_DIP_NR */
+
+#define CLK_IMG1_DIP_NR_RESERVE 0
+#define CLK_IMG1_DIP_NR_DIP_NR 1
+#define CLK_IMG1_DIP_NR_NR_CLK 2
+
+/* IMGSYS1_WPE */
+
+#define CLK_IMG1_WPE_LARB11 0
+#define CLK_IMG1_WPE_WPE 1
+#define CLK_IMG1_WPE_NR_CLK 2
+
+/* IPESYS */
+
+#define CLK_IPE_DPE 0
+#define CLK_IPE_FDVT 1
+#define CLK_IPE_ME 2
+#define CLK_IPE_TOP 3
+#define CLK_IPE_SMI_LARB12 4
+#define CLK_IPE_NR_CLK 5
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_LARB14 1
+#define CLK_CAM_MAIN_CAM 2
+#define CLK_CAM_MAIN_CAMTG 3
+#define CLK_CAM_SENINF 4
+#define CLK_CAM_GCAMSVA 5
+#define CLK_CAM_GCAMSVB 6
+#define CLK_CAM_GCAMSVC 7
+#define CLK_CAM_SCAMSA 8
+#define CLK_CAM_SCAMSB 9
+#define CLK_CAM_CAMSV_TOP 10
+#define CLK_CAM_CAMSV_CQ 11
+#define CLK_CAM_ADL 12
+#define CLK_CAM_ASG 13
+#define CLK_CAM_PDA 14
+#define CLK_CAM_FAKE_ENG 15
+#define CLK_CAM_MAIN_MRAW0 16
+#define CLK_CAM_MAIN_MRAW1 17
+#define CLK_CAM_MAIN_MRAW2 18
+#define CLK_CAM_MAIN_MRAW3 19
+#define CLK_CAM_CAM2MM0_GALS 20
+#define CLK_CAM_CAM2MM1_GALS 21
+#define CLK_CAM_CAM2SYS_GALS 22
+#define CLK_CAM_NR_CLK 23
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX 0
+#define CLK_CAM_RAWA_CAM 1
+#define CLK_CAM_RAWA_CAMTG 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_YUVA */
+
+#define CLK_CAM_YUVA_LARBX 0
+#define CLK_CAM_YUVA_CAM 1
+#define CLK_CAM_YUVA_CAMTG 2
+#define CLK_CAM_YUVA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX 0
+#define CLK_CAM_RAWB_CAM 1
+#define CLK_CAM_RAWB_CAMTG 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* CAMSYS_YUVB */
+
+#define CLK_CAM_YUVB_LARBX 0
+#define CLK_CAM_YUVB_CAM 1
+#define CLK_CAM_YUVB_CAMTG 2
+#define CLK_CAM_YUVB_NR_CLK 3
+
+/* CAMSYS_MRAW */
+
+#define CLK_CAM_MRAW_LARBX 0
+#define CLK_CAM_MRAW_CAMTG 1
+#define CLK_CAM_MRAW_MRAW0 2
+#define CLK_CAM_MRAW_MRAW1 3
+#define CLK_CAM_MRAW_MRAW2 4
+#define CLK_CAM_MRAW_MRAW3 5
+#define CLK_CAM_MRAW_NR_CLK 6
+
+/* CCUSYS */
+
+#define CLK_CCU_LARB18 0
+#define CLK_CCU_AHB 1
+#define CLK_CCU_CCU0 2
+#define CLK_CCU_CCU1 3
+#define CLK_CCU_NR_CLK 4
+
+/* VDECSYS_SOC */
+
+#define CLK_VDEC_SOC_LARB1 0
+#define CLK_VDEC_SOC_LAT 1
+#define CLK_VDEC_SOC_VDEC 2
+#define CLK_VDEC_SOC_NR_CLK 3
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1 0
+#define CLK_VDEC_LAT 1
+#define CLK_VDEC_VDEC 2
+#define CLK_VDEC_NR_CLK 3
+
+/* VDECSYS_CORE1 */
+
+#define CLK_VDEC_CORE1_LARB1 0
+#define CLK_VDEC_CORE1_LAT 1
+#define CLK_VDEC_CORE1_VDEC 2
+#define CLK_VDEC_CORE1_NR_CLK 3
+
+/* APUSYS_PLL */
+
+#define CLK_APUSYS_PLL_APUPLL 0
+#define CLK_APUSYS_PLL_NPUPLL 1
+#define CLK_APUSYS_PLL_APUPLL1 2
+#define CLK_APUSYS_PLL_APUPLL2 3
+#define CLK_APUSYS_PLL_NR_CLK 4
+
+/* VENCSYS */
+
+#define CLK_VENC_LARB 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+#define CLK_VENC_JPGDEC_C1 4
+#define CLK_VENC_GALS 5
+#define CLK_VENC_NR_CLK 6
+
+/* VENCSYS_CORE1 */
+
+#define CLK_VENC_CORE1_LARB 0
+#define CLK_VENC_CORE1_VENC 1
+#define CLK_VENC_CORE1_JPGENC 2
+#define CLK_VENC_CORE1_JPGDEC 3
+#define CLK_VENC_CORE1_JPGDEC_C1 4
+#define CLK_VENC_CORE1_GALS 5
+#define CLK_VENC_CORE1_NR_CLK 6
+
+/* VDOSYS0 */
+
+#define CLK_VDO0_DISP_OVL0 0
+#define CLK_VDO0_DISP_COLOR0 1
+#define CLK_VDO0_DISP_COLOR1 2
+#define CLK_VDO0_DISP_CCORR0 3
+#define CLK_VDO0_DISP_CCORR1 4
+#define CLK_VDO0_DISP_AAL0 5
+#define CLK_VDO0_DISP_AAL1 6
+#define CLK_VDO0_DISP_GAMMA0 7
+#define CLK_VDO0_DISP_GAMMA1 8
+#define CLK_VDO0_DISP_DITHER0 9
+#define CLK_VDO0_DISP_DITHER1 10
+#define CLK_VDO0_DISP_OVL1 11
+#define CLK_VDO0_DISP_WDMA0 12
+#define CLK_VDO0_DISP_WDMA1 13
+#define CLK_VDO0_DISP_RDMA0 14
+#define CLK_VDO0_DISP_RDMA1 15
+#define CLK_VDO0_DSI0 16
+#define CLK_VDO0_DSI1 17
+#define CLK_VDO0_DSC_WRAP0 18
+#define CLK_VDO0_VPP_MERGE0 19
+#define CLK_VDO0_DP_INTF0 20
+#define CLK_VDO0_DISP_MUTEX0 21
+#define CLK_VDO0_DISP_IL_ROT0 22
+#define CLK_VDO0_APB_BUS 23
+#define CLK_VDO0_FAKE_ENG0 24
+#define CLK_VDO0_FAKE_ENG1 25
+#define CLK_VDO0_DL_ASYNC0 26
+#define CLK_VDO0_DL_ASYNC1 27
+#define CLK_VDO0_DL_ASYNC2 28
+#define CLK_VDO0_DL_ASYNC3 29
+#define CLK_VDO0_DL_ASYNC4 30
+#define CLK_VDO0_DISP_MONITOR0 31
+#define CLK_VDO0_DISP_MONITOR1 32
+#define CLK_VDO0_DISP_MONITOR2 33
+#define CLK_VDO0_DISP_MONITOR3 34
+#define CLK_VDO0_DISP_MONITOR4 35
+#define CLK_VDO0_SMI_GALS 36
+#define CLK_VDO0_SMI_COMMON 37
+#define CLK_VDO0_SMI_EMI 38
+#define CLK_VDO0_SMI_IOMMU 39
+#define CLK_VDO0_SMI_LARB 40
+#define CLK_VDO0_SMI_RSI 41
+#define CLK_VDO0_DSI0_DSI 42
+#define CLK_VDO0_DSI1_DSI 43
+#define CLK_VDO0_DP_INTF0_DP_INTF 44
+#define CLK_VDO0_NR_CLK 45
+
+/* VDOSYS1 */
+
+#define CLK_VDO1_SMI_LARB2 0
+#define CLK_VDO1_SMI_LARB3 1
+#define CLK_VDO1_GALS 2
+#define CLK_VDO1_FAKE_ENG0 3
+#define CLK_VDO1_FAKE_ENG 4
+#define CLK_VDO1_MDP_RDMA0 5
+#define CLK_VDO1_MDP_RDMA1 6
+#define CLK_VDO1_MDP_RDMA2 7
+#define CLK_VDO1_MDP_RDMA3 8
+#define CLK_VDO1_VPP_MERGE0 9
+#define CLK_VDO1_VPP_MERGE1 10
+#define CLK_VDO1_VPP_MERGE2 11
+#define CLK_VDO1_VPP_MERGE3 12
+#define CLK_VDO1_VPP_MERGE4 13
+#define CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC 14
+#define CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC 15
+#define CLK_VDO1_DISP_MUTEX 16
+#define CLK_VDO1_MDP_RDMA4 17
+#define CLK_VDO1_MDP_RDMA5 18
+#define CLK_VDO1_MDP_RDMA6 19
+#define CLK_VDO1_MDP_RDMA7 20
+#define CLK_VDO1_DP_INTF0_MM 21
+#define CLK_VDO1_DPI0_MM 22
+#define CLK_VDO1_DPI1_MM 23
+#define CLK_VDO1_DISP_MONITOR 24
+#define CLK_VDO1_MERGE0_DL_ASYNC 25
+#define CLK_VDO1_MERGE1_DL_ASYNC 26
+#define CLK_VDO1_MERGE2_DL_ASYNC 27
+#define CLK_VDO1_MERGE3_DL_ASYNC 28
+#define CLK_VDO1_MERGE4_DL_ASYNC 29
+#define CLK_VDO1_VDO0_DSC_TO_VDO1_DL_ASYNC 30
+#define CLK_VDO1_VDO0_MERGE_TO_VDO1_DL_ASYNC 31
+#define CLK_VDO1_HDR_VDO_FE0 32
+#define CLK_VDO1_HDR_GFX_FE0 33
+#define CLK_VDO1_HDR_VDO_BE 34
+#define CLK_VDO1_HDR_VDO_FE1 35
+#define CLK_VDO1_HDR_GFX_FE1 36
+#define CLK_VDO1_DISP_MIXER 37
+#define CLK_VDO1_HDR_VDO_FE0_DL_ASYNC 38
+#define CLK_VDO1_HDR_VDO_FE1_DL_ASYNC 39
+#define CLK_VDO1_HDR_GFX_FE0_DL_ASYNC 40
+#define CLK_VDO1_HDR_GFX_FE1_DL_ASYNC 41
+#define CLK_VDO1_HDR_VDO_BE_DL_ASYNC 42
+#define CLK_VDO1_DPI0 43
+#define CLK_VDO1_DISP_MONITOR_DPI0 44
+#define CLK_VDO1_DPI1 45
+#define CLK_VDO1_DISP_MONITOR_DPI1 46
+#define CLK_VDO1_DPINTF 47
+#define CLK_VDO1_DISP_MONITOR_DPINTF 48
+#define CLK_VDO1_26M_SLOW 49
+#define CLK_VDO1_DPI1_HDMI 50
+#define CLK_VDO1_NR_CLK 51
+
+
+#endif /* _DT_BINDINGS_CLK_MT8195_H */
diff --git a/include/dt-bindings/clock/nuvoton,ma35d1-clk.h b/include/dt-bindings/clock/nuvoton,ma35d1-clk.h
new file mode 100644
index 000000000000..ba2d70f776a6
--- /dev/null
+++ b/include/dt-bindings/clock/nuvoton,ma35d1-clk.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Nuvoton Technologies.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_NUVOTON_MA35D1_CLK_H
+#define __DT_BINDINGS_CLOCK_NUVOTON_MA35D1_CLK_H
+
+/* external and internal oscillator clocks */
+#define HXT 0
+#define HXT_GATE 1
+#define LXT 2
+#define LXT_GATE 3
+#define HIRC 4
+#define HIRC_GATE 5
+#define LIRC 6
+#define LIRC_GATE 7
+/* PLLs */
+#define CAPLL 8
+#define SYSPLL 9
+#define DDRPLL 10
+#define APLL 11
+#define EPLL 12
+#define VPLL 13
+/* EPLL divider */
+#define EPLL_DIV2 14
+#define EPLL_DIV4 15
+#define EPLL_DIV8 16
+/* CPU clock, system clock, AXI, HCLK and PCLK */
+#define CA35CLK_MUX 17
+#define AXICLK_DIV2 18
+#define AXICLK_DIV4 19
+#define AXICLK_MUX 20
+#define SYSCLK0_MUX 21
+#define SYSCLK1_MUX 22
+#define SYSCLK1_DIV2 23
+#define HCLK0 24
+#define HCLK1 25
+#define HCLK2 26
+#define PCLK0 27
+#define PCLK1 28
+#define PCLK2 29
+#define HCLK3 30
+#define PCLK3 31
+#define PCLK4 32
+/* AXI and AHB peripheral clocks */
+#define USBPHY0 33
+#define USBPHY1 34
+#define DDR0_GATE 35
+#define DDR6_GATE 36
+#define CAN0_MUX 37
+#define CAN0_DIV 38
+#define CAN0_GATE 39
+#define CAN1_MUX 40
+#define CAN1_DIV 41
+#define CAN1_GATE 42
+#define CAN2_MUX 43
+#define CAN2_DIV 44
+#define CAN2_GATE 45
+#define CAN3_MUX 46
+#define CAN3_DIV 47
+#define CAN3_GATE 48
+#define SDH0_MUX 49
+#define SDH0_GATE 50
+#define SDH1_MUX 51
+#define SDH1_GATE 52
+#define NAND_GATE 53
+#define USBD_GATE 54
+#define USBH_GATE 55
+#define HUSBH0_GATE 56
+#define HUSBH1_GATE 57
+#define GFX_MUX 58
+#define GFX_GATE 59
+#define VC8K_GATE 60
+#define DCU_MUX 61
+#define DCU_GATE 62
+#define DCUP_DIV 63
+#define EMAC0_GATE 64
+#define EMAC1_GATE 65
+#define CCAP0_MUX 66
+#define CCAP0_DIV 67
+#define CCAP0_GATE 68
+#define CCAP1_MUX 69
+#define CCAP1_DIV 70
+#define CCAP1_GATE 71
+#define PDMA0_GATE 72
+#define PDMA1_GATE 73
+#define PDMA2_GATE 74
+#define PDMA3_GATE 75
+#define WH0_GATE 76
+#define WH1_GATE 77
+#define HWS_GATE 78
+#define EBI_GATE 79
+#define SRAM0_GATE 80
+#define SRAM1_GATE 81
+#define ROM_GATE 82
+#define TRA_GATE 83
+#define DBG_MUX 84
+#define DBG_GATE 85
+#define CKO_MUX 86
+#define CKO_DIV 87
+#define CKO_GATE 88
+#define GTMR_GATE 89
+#define GPA_GATE 90
+#define GPB_GATE 91
+#define GPC_GATE 92
+#define GPD_GATE 93
+#define GPE_GATE 94
+#define GPF_GATE 95
+#define GPG_GATE 96
+#define GPH_GATE 97
+#define GPI_GATE 98
+#define GPJ_GATE 99
+#define GPK_GATE 100
+#define GPL_GATE 101
+#define GPM_GATE 102
+#define GPN_GATE 103
+/* APB peripheral clocks */
+#define TMR0_MUX 104
+#define TMR0_GATE 105
+#define TMR1_MUX 106
+#define TMR1_GATE 107
+#define TMR2_MUX 108
+#define TMR2_GATE 109
+#define TMR3_MUX 110
+#define TMR3_GATE 111
+#define TMR4_MUX 112
+#define TMR4_GATE 113
+#define TMR5_MUX 114
+#define TMR5_GATE 115
+#define TMR6_MUX 116
+#define TMR6_GATE 117
+#define TMR7_MUX 118
+#define TMR7_GATE 119
+#define TMR8_MUX 120
+#define TMR8_GATE 121
+#define TMR9_MUX 122
+#define TMR9_GATE 123
+#define TMR10_MUX 124
+#define TMR10_GATE 125
+#define TMR11_MUX 126
+#define TMR11_GATE 127
+#define UART0_MUX 128
+#define UART0_DIV 129
+#define UART0_GATE 130
+#define UART1_MUX 131
+#define UART1_DIV 132
+#define UART1_GATE 133
+#define UART2_MUX 134
+#define UART2_DIV 135
+#define UART2_GATE 136
+#define UART3_MUX 137
+#define UART3_DIV 138
+#define UART3_GATE 139
+#define UART4_MUX 140
+#define UART4_DIV 141
+#define UART4_GATE 142
+#define UART5_MUX 143
+#define UART5_DIV 144
+#define UART5_GATE 145
+#define UART6_MUX 146
+#define UART6_DIV 147
+#define UART6_GATE 148
+#define UART7_MUX 149
+#define UART7_DIV 150
+#define UART7_GATE 151
+#define UART8_MUX 152
+#define UART8_DIV 153
+#define UART8_GATE 154
+#define UART9_MUX 155
+#define UART9_DIV 156
+#define UART9_GATE 157
+#define UART10_MUX 158
+#define UART10_DIV 159
+#define UART10_GATE 160
+#define UART11_MUX 161
+#define UART11_DIV 162
+#define UART11_GATE 163
+#define UART12_MUX 164
+#define UART12_DIV 165
+#define UART12_GATE 166
+#define UART13_MUX 167
+#define UART13_DIV 168
+#define UART13_GATE 169
+#define UART14_MUX 170
+#define UART14_DIV 171
+#define UART14_GATE 172
+#define UART15_MUX 173
+#define UART15_DIV 174
+#define UART15_GATE 175
+#define UART16_MUX 176
+#define UART16_DIV 177
+#define UART16_GATE 178
+#define RTC_GATE 179
+#define DDR_GATE 180
+#define KPI_MUX 181
+#define KPI_DIV 182
+#define KPI_GATE 183
+#define I2C0_GATE 184
+#define I2C1_GATE 185
+#define I2C2_GATE 186
+#define I2C3_GATE 187
+#define I2C4_GATE 188
+#define I2C5_GATE 189
+#define QSPI0_MUX 190
+#define QSPI0_GATE 191
+#define QSPI1_MUX 192
+#define QSPI1_GATE 193
+#define SMC0_MUX 194
+#define SMC0_DIV 195
+#define SMC0_GATE 196
+#define SMC1_MUX 197
+#define SMC1_DIV 198
+#define SMC1_GATE 199
+#define WDT0_MUX 200
+#define WDT0_GATE 201
+#define WDT1_MUX 202
+#define WDT1_GATE 203
+#define WDT2_MUX 204
+#define WDT2_GATE 205
+#define WWDT0_MUX 206
+#define WWDT1_MUX 207
+#define WWDT2_MUX 208
+#define EPWM0_GATE 209
+#define EPWM1_GATE 210
+#define EPWM2_GATE 211
+#define I2S0_MUX 212
+#define I2S0_GATE 213
+#define I2S1_MUX 214
+#define I2S1_GATE 215
+#define SSMCC_GATE 216
+#define SSPCC_GATE 217
+#define SPI0_MUX 218
+#define SPI0_GATE 219
+#define SPI1_MUX 220
+#define SPI1_GATE 221
+#define SPI2_MUX 222
+#define SPI2_GATE 223
+#define SPI3_MUX 224
+#define SPI3_GATE 225
+#define ECAP0_GATE 226
+#define ECAP1_GATE 227
+#define ECAP2_GATE 228
+#define QEI0_GATE 229
+#define QEI1_GATE 230
+#define QEI2_GATE 231
+#define ADC_DIV 232
+#define ADC_GATE 233
+#define EADC_DIV 234
+#define EADC_GATE 235
+#define CLK_MAX_IDX 236
+
+#endif /* __DT_BINDINGS_CLOCK_NUVOTON_MA35D1_CLK_H */
diff --git a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
index f21522605b94..3e0a9b68933d 100644
--- a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
+++ b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Nuvoton NPCM7xx Clock Generator binding
- * clock binding number for all clocks supportted by nuvoton,npcm7xx-clk
+ * clock binding number for all clocks supported by nuvoton,npcm7xx-clk
*
* Copyright (C) 2018 Nuvoton Technologies tali.perry@nuvoton.com
*
diff --git a/include/dt-bindings/clock/nuvoton,npcm845-clk.h b/include/dt-bindings/clock/nuvoton,npcm845-clk.h
new file mode 100644
index 000000000000..e5cce08b00e1
--- /dev/null
+++ b/include/dt-bindings/clock/nuvoton,npcm845-clk.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Nuvoton Technologies.
+ * Author: Tomer Maimon <tomer.maimon@nuvoton.com>
+ *
+ * Device Tree binding constants for NPCM8XX clock controller.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_NPCM8XX_H
+#define __DT_BINDINGS_CLOCK_NPCM8XX_H
+
+#define NPCM8XX_CLK_CPU 0
+#define NPCM8XX_CLK_GFX_PIXEL 1
+#define NPCM8XX_CLK_MC 2
+#define NPCM8XX_CLK_ADC 3
+#define NPCM8XX_CLK_AHB 4
+#define NPCM8XX_CLK_TIMER 5
+#define NPCM8XX_CLK_UART 6
+#define NPCM8XX_CLK_UART2 7
+#define NPCM8XX_CLK_MMC 8
+#define NPCM8XX_CLK_SPI3 9
+#define NPCM8XX_CLK_PCI 10
+#define NPCM8XX_CLK_AXI 11
+#define NPCM8XX_CLK_APB4 12
+#define NPCM8XX_CLK_APB3 13
+#define NPCM8XX_CLK_APB2 14
+#define NPCM8XX_CLK_APB1 15
+#define NPCM8XX_CLK_APB5 16
+#define NPCM8XX_CLK_CLKOUT 17
+#define NPCM8XX_CLK_GFX 18
+#define NPCM8XX_CLK_SU 19
+#define NPCM8XX_CLK_SU48 20
+#define NPCM8XX_CLK_SDHC 21
+#define NPCM8XX_CLK_SPI0 22
+#define NPCM8XX_CLK_SPI1 23
+#define NPCM8XX_CLK_SPIX 24
+#define NPCM8XX_CLK_RG 25
+#define NPCM8XX_CLK_RCP 26
+#define NPCM8XX_CLK_PRE_ADC 27
+#define NPCM8XX_CLK_ATB 28
+#define NPCM8XX_CLK_PRE_CLK 29
+#define NPCM8XX_CLK_TH 30
+#define NPCM8XX_CLK_REFCLK 31
+#define NPCM8XX_CLK_SYSBYPCK 32
+#define NPCM8XX_CLK_MCBYPCK 33
+
+#define NPCM8XX_NUM_CLOCKS (NPCM8XX_CLK_MCBYPCK + 1)
+
+#endif
diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h
index 41775272fd27..90e0d4b00127 100644
--- a/include/dt-bindings/clock/omap5.h
+++ b/include/dt-bindings/clock/omap5.h
@@ -32,6 +32,8 @@
/* l3main2 clocks */
#define OMAP5_L3_MAIN_2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_L3_MAIN_2_GPMC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
+#define OMAP5_L3_MAIN_2_OCMC_RAM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
/* ipu clocks */
#define OMAP5_MMU_IPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
diff --git a/include/dt-bindings/clock/qcom,camcc-sc7180.h b/include/dt-bindings/clock/qcom,camcc-sc7180.h
new file mode 100644
index 000000000000..ef7d3a041b88
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sc7180.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7180_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL2_OUT_EARLY 0
+#define CAM_CC_PLL0 1
+#define CAM_CC_PLL1 2
+#define CAM_CC_PLL2 3
+#define CAM_CC_PLL2_OUT_AUX 4
+#define CAM_CC_PLL3 5
+#define CAM_CC_CAMNOC_AXI_CLK 6
+#define CAM_CC_CCI_0_CLK 7
+#define CAM_CC_CCI_0_CLK_SRC 8
+#define CAM_CC_CCI_1_CLK 9
+#define CAM_CC_CCI_1_CLK_SRC 10
+#define CAM_CC_CORE_AHB_CLK 11
+#define CAM_CC_CPAS_AHB_CLK 12
+#define CAM_CC_CPHY_RX_CLK_SRC 13
+#define CAM_CC_CSI0PHYTIMER_CLK 14
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 15
+#define CAM_CC_CSI1PHYTIMER_CLK 16
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 17
+#define CAM_CC_CSI2PHYTIMER_CLK 18
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 19
+#define CAM_CC_CSI3PHYTIMER_CLK 20
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 21
+#define CAM_CC_CSIPHY0_CLK 22
+#define CAM_CC_CSIPHY1_CLK 23
+#define CAM_CC_CSIPHY2_CLK 24
+#define CAM_CC_CSIPHY3_CLK 25
+#define CAM_CC_FAST_AHB_CLK_SRC 26
+#define CAM_CC_ICP_APB_CLK 27
+#define CAM_CC_ICP_ATB_CLK 28
+#define CAM_CC_ICP_CLK 29
+#define CAM_CC_ICP_CLK_SRC 30
+#define CAM_CC_ICP_CTI_CLK 31
+#define CAM_CC_ICP_TS_CLK 32
+#define CAM_CC_IFE_0_AXI_CLK 33
+#define CAM_CC_IFE_0_CLK 34
+#define CAM_CC_IFE_0_CLK_SRC 35
+#define CAM_CC_IFE_0_CPHY_RX_CLK 36
+#define CAM_CC_IFE_0_CSID_CLK 37
+#define CAM_CC_IFE_0_CSID_CLK_SRC 38
+#define CAM_CC_IFE_0_DSP_CLK 39
+#define CAM_CC_IFE_1_AXI_CLK 40
+#define CAM_CC_IFE_1_CLK 41
+#define CAM_CC_IFE_1_CLK_SRC 42
+#define CAM_CC_IFE_1_CPHY_RX_CLK 43
+#define CAM_CC_IFE_1_CSID_CLK 44
+#define CAM_CC_IFE_1_CSID_CLK_SRC 45
+#define CAM_CC_IFE_1_DSP_CLK 46
+#define CAM_CC_IFE_LITE_CLK 47
+#define CAM_CC_IFE_LITE_CLK_SRC 48
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 49
+#define CAM_CC_IFE_LITE_CSID_CLK 50
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 51
+#define CAM_CC_IPE_0_AHB_CLK 52
+#define CAM_CC_IPE_0_AREG_CLK 53
+#define CAM_CC_IPE_0_AXI_CLK 54
+#define CAM_CC_IPE_0_CLK 55
+#define CAM_CC_IPE_0_CLK_SRC 56
+#define CAM_CC_JPEG_CLK 57
+#define CAM_CC_JPEG_CLK_SRC 58
+#define CAM_CC_LRME_CLK 59
+#define CAM_CC_LRME_CLK_SRC 60
+#define CAM_CC_MCLK0_CLK 61
+#define CAM_CC_MCLK0_CLK_SRC 62
+#define CAM_CC_MCLK1_CLK 63
+#define CAM_CC_MCLK1_CLK_SRC 64
+#define CAM_CC_MCLK2_CLK 65
+#define CAM_CC_MCLK2_CLK_SRC 66
+#define CAM_CC_MCLK3_CLK 67
+#define CAM_CC_MCLK3_CLK_SRC 68
+#define CAM_CC_MCLK4_CLK 69
+#define CAM_CC_MCLK4_CLK_SRC 70
+#define CAM_CC_BPS_AHB_CLK 71
+#define CAM_CC_BPS_AREG_CLK 72
+#define CAM_CC_BPS_AXI_CLK 73
+#define CAM_CC_BPS_CLK 74
+#define CAM_CC_BPS_CLK_SRC 75
+#define CAM_CC_SLOW_AHB_CLK_SRC 76
+#define CAM_CC_SOC_AHB_CLK 77
+#define CAM_CC_SYS_TMR_CLK 78
+
+/* CAM_CC power domains */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IPE_0_GDSC 3
+#define TITAN_TOP_GDSC 4
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_0_BCR 2
+#define CAM_CC_CCI_1_BCR 3
+#define CAM_CC_CPAS_BCR 4
+#define CAM_CC_CSI0PHY_BCR 5
+#define CAM_CC_CSI1PHY_BCR 6
+#define CAM_CC_CSI2PHY_BCR 7
+#define CAM_CC_CSI3PHY_BCR 8
+#define CAM_CC_ICP_BCR 9
+#define CAM_CC_IFE_0_BCR 10
+#define CAM_CC_IFE_1_BCR 11
+#define CAM_CC_IFE_LITE_BCR 12
+#define CAM_CC_IPE_0_BCR 13
+#define CAM_CC_JPEG_BCR 14
+#define CAM_CC_LRME_BCR 15
+#define CAM_CC_MCLK0_BCR 16
+#define CAM_CC_MCLK1_BCR 17
+#define CAM_CC_MCLK2_BCR 18
+#define CAM_CC_MCLK3_BCR 19
+#define CAM_CC_MCLK4_BCR 20
+#define CAM_CC_TITAN_TOP_BCR 21
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sc7280.h b/include/dt-bindings/clock/qcom,camcc-sc7280.h
new file mode 100644
index 000000000000..56640f407309
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sc7280.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7280_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_AUX 6
+#define CAM_CC_PLL2_OUT_AUX2 7
+#define CAM_CC_PLL3 8
+#define CAM_CC_PLL3_OUT_EVEN 9
+#define CAM_CC_PLL4 10
+#define CAM_CC_PLL4_OUT_EVEN 11
+#define CAM_CC_PLL5 12
+#define CAM_CC_PLL5_OUT_EVEN 13
+#define CAM_CC_PLL6 14
+#define CAM_CC_PLL6_OUT_EVEN 15
+#define CAM_CC_PLL6_OUT_ODD 16
+#define CAM_CC_BPS_AHB_CLK 17
+#define CAM_CC_BPS_AREG_CLK 18
+#define CAM_CC_BPS_AXI_CLK 19
+#define CAM_CC_BPS_CLK 20
+#define CAM_CC_BPS_CLK_SRC 21
+#define CAM_CC_CAMNOC_AXI_CLK 22
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 23
+#define CAM_CC_CAMNOC_DCD_XO_CLK 24
+#define CAM_CC_CCI_0_CLK 25
+#define CAM_CC_CCI_0_CLK_SRC 26
+#define CAM_CC_CCI_1_CLK 27
+#define CAM_CC_CCI_1_CLK_SRC 28
+#define CAM_CC_CORE_AHB_CLK 29
+#define CAM_CC_CPAS_AHB_CLK 30
+#define CAM_CC_CPHY_RX_CLK_SRC 31
+#define CAM_CC_CSI0PHYTIMER_CLK 32
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSI1PHYTIMER_CLK 34
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSI2PHYTIMER_CLK 36
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 37
+#define CAM_CC_CSI3PHYTIMER_CLK 38
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 39
+#define CAM_CC_CSI4PHYTIMER_CLK 40
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 41
+#define CAM_CC_CSIPHY0_CLK 42
+#define CAM_CC_CSIPHY1_CLK 43
+#define CAM_CC_CSIPHY2_CLK 44
+#define CAM_CC_CSIPHY3_CLK 45
+#define CAM_CC_CSIPHY4_CLK 46
+#define CAM_CC_FAST_AHB_CLK_SRC 47
+#define CAM_CC_GDSC_CLK 48
+#define CAM_CC_ICP_AHB_CLK 49
+#define CAM_CC_ICP_CLK 50
+#define CAM_CC_ICP_CLK_SRC 51
+#define CAM_CC_IFE_0_AXI_CLK 52
+#define CAM_CC_IFE_0_CLK 53
+#define CAM_CC_IFE_0_CLK_SRC 54
+#define CAM_CC_IFE_0_CPHY_RX_CLK 55
+#define CAM_CC_IFE_0_CSID_CLK 56
+#define CAM_CC_IFE_0_CSID_CLK_SRC 57
+#define CAM_CC_IFE_0_DSP_CLK 58
+#define CAM_CC_IFE_1_AXI_CLK 59
+#define CAM_CC_IFE_1_CLK 60
+#define CAM_CC_IFE_1_CLK_SRC 61
+#define CAM_CC_IFE_1_CPHY_RX_CLK 62
+#define CAM_CC_IFE_1_CSID_CLK 63
+#define CAM_CC_IFE_1_CSID_CLK_SRC 64
+#define CAM_CC_IFE_1_DSP_CLK 65
+#define CAM_CC_IFE_2_AXI_CLK 66
+#define CAM_CC_IFE_2_CLK 67
+#define CAM_CC_IFE_2_CLK_SRC 68
+#define CAM_CC_IFE_2_CPHY_RX_CLK 69
+#define CAM_CC_IFE_2_CSID_CLK 70
+#define CAM_CC_IFE_2_CSID_CLK_SRC 71
+#define CAM_CC_IFE_2_DSP_CLK 72
+#define CAM_CC_IFE_LITE_0_CLK 73
+#define CAM_CC_IFE_LITE_0_CLK_SRC 74
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 75
+#define CAM_CC_IFE_LITE_0_CSID_CLK 76
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 77
+#define CAM_CC_IFE_LITE_1_CLK 78
+#define CAM_CC_IFE_LITE_1_CLK_SRC 79
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 80
+#define CAM_CC_IFE_LITE_1_CSID_CLK 81
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 82
+#define CAM_CC_IPE_0_AHB_CLK 83
+#define CAM_CC_IPE_0_AREG_CLK 84
+#define CAM_CC_IPE_0_AXI_CLK 85
+#define CAM_CC_IPE_0_CLK 86
+#define CAM_CC_IPE_0_CLK_SRC 87
+#define CAM_CC_JPEG_CLK 88
+#define CAM_CC_JPEG_CLK_SRC 89
+#define CAM_CC_LRME_CLK 90
+#define CAM_CC_LRME_CLK_SRC 91
+#define CAM_CC_MCLK0_CLK 92
+#define CAM_CC_MCLK0_CLK_SRC 93
+#define CAM_CC_MCLK1_CLK 94
+#define CAM_CC_MCLK1_CLK_SRC 95
+#define CAM_CC_MCLK2_CLK 96
+#define CAM_CC_MCLK2_CLK_SRC 97
+#define CAM_CC_MCLK3_CLK 98
+#define CAM_CC_MCLK3_CLK_SRC 99
+#define CAM_CC_MCLK4_CLK 100
+#define CAM_CC_MCLK4_CLK_SRC 101
+#define CAM_CC_MCLK5_CLK 102
+#define CAM_CC_MCLK5_CLK_SRC 103
+#define CAM_CC_SLEEP_CLK 104
+#define CAM_CC_SLEEP_CLK_SRC 105
+#define CAM_CC_SLOW_AHB_CLK_SRC 106
+#define CAM_CC_XO_CLK_SRC 107
+
+/* CAM_CC power domains */
+#define CAM_CC_BPS_GDSC 0
+#define CAM_CC_IFE_0_GDSC 1
+#define CAM_CC_IFE_1_GDSC 2
+#define CAM_CC_IFE_2_GDSC 3
+#define CAM_CC_IPE_0_GDSC 4
+#define CAM_CC_TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sm8250.h b/include/dt-bindings/clock/qcom,camcc-sm8250.h
new file mode 100644
index 000000000000..383ead17608d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sm8250.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_AXI_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 6
+#define CAM_CC_CAMNOC_DCD_XO_CLK 7
+#define CAM_CC_CCI_0_CLK 8
+#define CAM_CC_CCI_0_CLK_SRC 9
+#define CAM_CC_CCI_1_CLK 10
+#define CAM_CC_CCI_1_CLK_SRC 11
+#define CAM_CC_CORE_AHB_CLK 12
+#define CAM_CC_CPAS_AHB_CLK 13
+#define CAM_CC_CPHY_RX_CLK_SRC 14
+#define CAM_CC_CSI0PHYTIMER_CLK 15
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 16
+#define CAM_CC_CSI1PHYTIMER_CLK 17
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 18
+#define CAM_CC_CSI2PHYTIMER_CLK 19
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 20
+#define CAM_CC_CSI3PHYTIMER_CLK 21
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 22
+#define CAM_CC_CSI4PHYTIMER_CLK 23
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSI5PHYTIMER_CLK 25
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 26
+#define CAM_CC_CSIPHY0_CLK 27
+#define CAM_CC_CSIPHY1_CLK 28
+#define CAM_CC_CSIPHY2_CLK 29
+#define CAM_CC_CSIPHY3_CLK 30
+#define CAM_CC_CSIPHY4_CLK 31
+#define CAM_CC_CSIPHY5_CLK 32
+#define CAM_CC_FAST_AHB_CLK_SRC 33
+#define CAM_CC_FD_CORE_CLK 34
+#define CAM_CC_FD_CORE_CLK_SRC 35
+#define CAM_CC_FD_CORE_UAR_CLK 36
+#define CAM_CC_GDSC_CLK 37
+#define CAM_CC_ICP_AHB_CLK 38
+#define CAM_CC_ICP_CLK 39
+#define CAM_CC_ICP_CLK_SRC 40
+#define CAM_CC_IFE_0_AHB_CLK 41
+#define CAM_CC_IFE_0_AREG_CLK 42
+#define CAM_CC_IFE_0_AXI_CLK 43
+#define CAM_CC_IFE_0_CLK 44
+#define CAM_CC_IFE_0_CLK_SRC 45
+#define CAM_CC_IFE_0_CPHY_RX_CLK 46
+#define CAM_CC_IFE_0_CSID_CLK 47
+#define CAM_CC_IFE_0_CSID_CLK_SRC 48
+#define CAM_CC_IFE_0_DSP_CLK 49
+#define CAM_CC_IFE_1_AHB_CLK 50
+#define CAM_CC_IFE_1_AREG_CLK 51
+#define CAM_CC_IFE_1_AXI_CLK 52
+#define CAM_CC_IFE_1_CLK 53
+#define CAM_CC_IFE_1_CLK_SRC 54
+#define CAM_CC_IFE_1_CPHY_RX_CLK 55
+#define CAM_CC_IFE_1_CSID_CLK 56
+#define CAM_CC_IFE_1_CSID_CLK_SRC 57
+#define CAM_CC_IFE_1_DSP_CLK 58
+#define CAM_CC_IFE_LITE_AHB_CLK 59
+#define CAM_CC_IFE_LITE_AXI_CLK 60
+#define CAM_CC_IFE_LITE_CLK 61
+#define CAM_CC_IFE_LITE_CLK_SRC 62
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 63
+#define CAM_CC_IFE_LITE_CSID_CLK 64
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 65
+#define CAM_CC_IPE_0_AHB_CLK 66
+#define CAM_CC_IPE_0_AREG_CLK 67
+#define CAM_CC_IPE_0_AXI_CLK 68
+#define CAM_CC_IPE_0_CLK 69
+#define CAM_CC_IPE_0_CLK_SRC 70
+#define CAM_CC_JPEG_CLK 71
+#define CAM_CC_JPEG_CLK_SRC 72
+#define CAM_CC_MCLK0_CLK 73
+#define CAM_CC_MCLK0_CLK_SRC 74
+#define CAM_CC_MCLK1_CLK 75
+#define CAM_CC_MCLK1_CLK_SRC 76
+#define CAM_CC_MCLK2_CLK 77
+#define CAM_CC_MCLK2_CLK_SRC 78
+#define CAM_CC_MCLK3_CLK 79
+#define CAM_CC_MCLK3_CLK_SRC 80
+#define CAM_CC_MCLK4_CLK 81
+#define CAM_CC_MCLK4_CLK_SRC 82
+#define CAM_CC_MCLK5_CLK 83
+#define CAM_CC_MCLK5_CLK_SRC 84
+#define CAM_CC_MCLK6_CLK 85
+#define CAM_CC_MCLK6_CLK_SRC 86
+#define CAM_CC_PLL0 87
+#define CAM_CC_PLL0_OUT_EVEN 88
+#define CAM_CC_PLL0_OUT_ODD 89
+#define CAM_CC_PLL1 90
+#define CAM_CC_PLL1_OUT_EVEN 91
+#define CAM_CC_PLL2 92
+#define CAM_CC_PLL2_OUT_MAIN 93
+#define CAM_CC_PLL3 94
+#define CAM_CC_PLL3_OUT_EVEN 95
+#define CAM_CC_PLL4 96
+#define CAM_CC_PLL4_OUT_EVEN 97
+#define CAM_CC_SBI_AHB_CLK 98
+#define CAM_CC_SBI_AXI_CLK 99
+#define CAM_CC_SBI_CLK 100
+#define CAM_CC_SBI_CPHY_RX_CLK 101
+#define CAM_CC_SBI_CSID_CLK 102
+#define CAM_CC_SBI_CSID_CLK_SRC 103
+#define CAM_CC_SBI_DIV_CLK_SRC 104
+#define CAM_CC_SBI_IFE_0_CLK 105
+#define CAM_CC_SBI_IFE_1_CLK 106
+#define CAM_CC_SLEEP_CLK 107
+#define CAM_CC_SLEEP_CLK_SRC 108
+#define CAM_CC_SLOW_AHB_CLK_SRC 109
+#define CAM_CC_XO_CLK_SRC 110
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_ICP_BCR 1
+#define CAM_CC_IFE_0_BCR 2
+#define CAM_CC_IFE_1_BCR 3
+#define CAM_CC_IPE_0_BCR 4
+#define CAM_CC_SBI_BCR 5
+
+/* CAM_CC GDSCRs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define SBI_GDSC 2
+#define IFE_0_GDSC 3
+#define IFE_1_GDSC 4
+#define TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-qcm2290.h b/include/dt-bindings/clock/qcom,dispcc-qcm2290.h
new file mode 100644
index 000000000000..cb687949be41
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-qcm2290.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_QCM2290_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_ESC0_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK_SRC 8
+#define DISP_CC_MDSS_MDP_CLK 9
+#define DISP_CC_MDSS_MDP_CLK_SRC 10
+#define DISP_CC_MDSS_MDP_LUT_CLK 11
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 12
+#define DISP_CC_MDSS_PCLK0_CLK 13
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 14
+#define DISP_CC_MDSS_VSYNC_CLK 15
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 16
+#define DISP_CC_SLEEP_CLK 17
+#define DISP_CC_SLEEP_CLK_SRC 18
+#define DISP_CC_XO_CLK 19
+#define DISP_CC_XO_CLK_SRC 20
+
+/* GDSCs */
+#define MDSS_GDSC 0
+
+/* Resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc7280.h b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
new file mode 100644
index 000000000000..a4a692c20acf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7280_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK 7
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 10
+#define DISP_CC_MDSS_DP_LINK_CLK 11
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL_CLK 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 16
+#define DISP_CC_MDSS_EDP_AUX_CLK 17
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 18
+#define DISP_CC_MDSS_EDP_LINK_CLK 19
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 20
+#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC 21
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_EDP_PIXEL_CLK 23
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 24
+#define DISP_CC_MDSS_ESC0_CLK 25
+#define DISP_CC_MDSS_ESC0_CLK_SRC 26
+#define DISP_CC_MDSS_MDP_CLK 27
+#define DISP_CC_MDSS_MDP_CLK_SRC 28
+#define DISP_CC_MDSS_MDP_LUT_CLK 29
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 30
+#define DISP_CC_MDSS_PCLK0_CLK 31
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 32
+#define DISP_CC_MDSS_ROT_CLK 33
+#define DISP_CC_MDSS_ROT_CLK_SRC 34
+#define DISP_CC_MDSS_RSCC_AHB_CLK 35
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 36
+#define DISP_CC_MDSS_VSYNC_CLK 37
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 38
+#define DISP_CC_SLEEP_CLK 39
+#define DISP_CC_XO_CLK 40
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc8280xp.h b/include/dt-bindings/clock/qcom,dispcc-sc8280xp.h
new file mode 100644
index 000000000000..2831c61fa979
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sc8280xp.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SC8280XP_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SC8280XP_H
+
+/* DISPCC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_PLL1 1
+#define DISP_CC_PLL1_OUT_EVEN 2
+#define DISP_CC_PLL2 3
+#define DISP_CC_MDSS_AHB1_CLK 4
+#define DISP_CC_MDSS_AHB_CLK 5
+#define DISP_CC_MDSS_AHB_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_CLK 7
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 8
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 10
+#define DISP_CC_MDSS_BYTE1_CLK 11
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 12
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 13
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 14
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 15
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 17
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 18
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 20
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 21
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 22
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 23
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 25
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 26
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 28
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 29
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 30
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 31
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 32
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 34
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 35
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 36
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 37
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 50
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 52
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 54
+#define DISP_CC_MDSS_ESC0_CLK 55
+#define DISP_CC_MDSS_ESC0_CLK_SRC 56
+#define DISP_CC_MDSS_ESC1_CLK 57
+#define DISP_CC_MDSS_ESC1_CLK_SRC 58
+#define DISP_CC_MDSS_MDP1_CLK 59
+#define DISP_CC_MDSS_MDP_CLK 60
+#define DISP_CC_MDSS_MDP_CLK_SRC 61
+#define DISP_CC_MDSS_MDP_LUT1_CLK 62
+#define DISP_CC_MDSS_MDP_LUT_CLK 63
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 64
+#define DISP_CC_MDSS_PCLK0_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 66
+#define DISP_CC_MDSS_PCLK1_CLK 67
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 68
+#define DISP_CC_MDSS_ROT1_CLK 69
+#define DISP_CC_MDSS_ROT_CLK 70
+#define DISP_CC_MDSS_ROT_CLK_SRC 71
+#define DISP_CC_MDSS_RSCC_AHB_CLK 72
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC1_CLK 74
+#define DISP_CC_MDSS_VSYNC_CLK 75
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 76
+#define DISP_CC_SLEEP_CLK 77
+#define DISP_CC_SLEEP_CLK_SRC 78
+#define DISP_CC_XO_CLK 79
+#define DISP_CC_XO_CLK_SRC 80
+
+/* DISPCC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+/* DISPCC GDSCs */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6125.h b/include/dt-bindings/clock/qcom,dispcc-sm6125.h
new file mode 100644
index 000000000000..4ff974f4fcc3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm6125.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
+
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
+#define DISP_CC_MDSS_DP_AUX_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 7
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 9
+#define DISP_CC_MDSS_DP_LINK_CLK 10
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 12
+#define DISP_CC_MDSS_DP_PIXEL_CLK 13
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 14
+#define DISP_CC_MDSS_ESC0_CLK 15
+#define DISP_CC_MDSS_ESC0_CLK_SRC 16
+#define DISP_CC_MDSS_MDP_CLK 17
+#define DISP_CC_MDSS_MDP_CLK_SRC 18
+#define DISP_CC_MDSS_MDP_LUT_CLK 19
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 20
+#define DISP_CC_MDSS_PCLK0_CLK 21
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 22
+#define DISP_CC_MDSS_ROT_CLK 23
+#define DISP_CC_MDSS_ROT_CLK_SRC 24
+#define DISP_CC_MDSS_VSYNC_CLK 25
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 26
+#define DISP_CC_XO_CLK 27
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6350.h b/include/dt-bindings/clock/qcom,dispcc-sm6350.h
new file mode 100644
index 000000000000..cb54aae2723e
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm6350.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6350_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK 7
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 10
+#define DISP_CC_MDSS_DP_LINK_CLK 11
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL_CLK 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 16
+#define DISP_CC_MDSS_ESC0_CLK 17
+#define DISP_CC_MDSS_ESC0_CLK_SRC 18
+#define DISP_CC_MDSS_MDP_CLK 19
+#define DISP_CC_MDSS_MDP_CLK_SRC 20
+#define DISP_CC_MDSS_MDP_LUT_CLK 21
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 22
+#define DISP_CC_MDSS_PCLK0_CLK 23
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 24
+#define DISP_CC_MDSS_ROT_CLK 25
+#define DISP_CC_MDSS_ROT_CLK_SRC 26
+#define DISP_CC_MDSS_RSCC_AHB_CLK 27
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 28
+#define DISP_CC_MDSS_VSYNC_CLK 29
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 30
+#define DISP_CC_SLEEP_CLK 31
+#define DISP_CC_XO_CLK 32
+
+/* GDSCs */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8150.h b/include/dt-bindings/clock/qcom,dispcc-sm8150.h
new file mode 120000
index 000000000000..0312b4544acb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm8150.h
@@ -0,0 +1 @@
+qcom,dispcc-sm8250.h \ No newline at end of file
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8250.h b/include/dt-bindings/clock/qcom,dispcc-sm8250.h
new file mode 100644
index 000000000000..767fdb27e514
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm8250.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8250_H
+
+/* DISP_CC clock registers */
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AHB_CLK_SRC 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
+#define DISP_CC_MDSS_BYTE1_CLK 6
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 8
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 9
+#define DISP_CC_MDSS_DP_AUX1_CLK 10
+#define DISP_CC_MDSS_DP_AUX1_CLK_SRC 11
+#define DISP_CC_MDSS_DP_AUX_CLK 12
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK1_CLK 14
+#define DISP_CC_MDSS_DP_LINK1_CLK_SRC 15
+#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC 16
+#define DISP_CC_MDSS_DP_LINK1_INTF_CLK 17
+#define DISP_CC_MDSS_DP_LINK_CLK 18
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 19
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 20
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 21
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 22
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 23
+#define DISP_CC_MDSS_DP_PIXEL2_CLK 24
+#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC 25
+#define DISP_CC_MDSS_DP_PIXEL_CLK 26
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 27
+#define DISP_CC_MDSS_ESC0_CLK 28
+#define DISP_CC_MDSS_ESC0_CLK_SRC 29
+#define DISP_CC_MDSS_ESC1_CLK 30
+#define DISP_CC_MDSS_ESC1_CLK_SRC 31
+#define DISP_CC_MDSS_MDP_CLK 32
+#define DISP_CC_MDSS_MDP_CLK_SRC 33
+#define DISP_CC_MDSS_MDP_LUT_CLK 34
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 35
+#define DISP_CC_MDSS_PCLK0_CLK 36
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 37
+#define DISP_CC_MDSS_PCLK1_CLK 38
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 39
+#define DISP_CC_MDSS_ROT_CLK 40
+#define DISP_CC_MDSS_ROT_CLK_SRC 41
+#define DISP_CC_MDSS_RSCC_AHB_CLK 42
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 43
+#define DISP_CC_MDSS_VSYNC_CLK 44
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 45
+#define DISP_CC_PLL0 46
+#define DISP_CC_PLL1 47
+#define DISP_CC_MDSS_EDP_AUX_CLK 48
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 49
+#define DISP_CC_MDSS_EDP_GTC_CLK 50
+#define DISP_CC_MDSS_EDP_GTC_CLK_SRC 51
+#define DISP_CC_MDSS_EDP_LINK_CLK 52
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 53
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 54
+#define DISP_CC_MDSS_EDP_PIXEL_CLK 55
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 56
+#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC 57
+
+/* DISP_CC Reset */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8350.h b/include/dt-bindings/clock/qcom,dispcc-sm8350.h
new file mode 120000
index 000000000000..0312b4544acb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm8350.h
@@ -0,0 +1 @@
+qcom,dispcc-sm8250.h \ No newline at end of file
diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h
index 7f657cf8cc8a..a985248d6332 100644
--- a/include/dt-bindings/clock/qcom,gcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h
@@ -339,6 +339,7 @@
#define GCC_PCIE_1_MSTR_AXI_CLK 330
#define GCC_PCIE_1_PIPE_CLK 331
#define GCC_PCIE_1_SLV_AXI_CLK 332
+#define GCC_MMSS_GPLL0_CLK_SRC 333
/* gdscs */
#define USB_HS_HSIC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
index 7e8a7be6dcda..fa0587857547 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
@@ -165,5 +165,11 @@
#define GCC_QDSS_BCR 69
#define GCC_MPM_BCR 70
#define GCC_SPDM_BCR 71
+#define ESS_MAC1_ARES 72
+#define ESS_MAC2_ARES 73
+#define ESS_MAC3_ARES 74
+#define ESS_MAC4_ARES 75
+#define ESS_MAC5_ARES 76
+#define ESS_PSGMII_ARES 77
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq5018.h b/include/dt-bindings/clock/qcom,gcc-ipq5018.h
new file mode 100644
index 000000000000..f3de2fdfeea1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-ipq5018.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_5018_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_5018_H
+
+#define GPLL0_MAIN 0
+#define GPLL0 1
+#define GPLL2_MAIN 2
+#define GPLL2 3
+#define GPLL4_MAIN 4
+#define GPLL4 5
+#define UBI32_PLL_MAIN 6
+#define UBI32_PLL 7
+#define ADSS_PWM_CLK_SRC 8
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 10
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 14
+#define BLSP1_UART1_APPS_CLK_SRC 15
+#define BLSP1_UART2_APPS_CLK_SRC 16
+#define CRYPTO_CLK_SRC 17
+#define GCC_ADSS_PWM_CLK 18
+#define GCC_BLSP1_AHB_CLK 19
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 20
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 21
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 22
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 23
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 24
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 25
+#define GCC_BLSP1_UART1_APPS_CLK 26
+#define GCC_BLSP1_UART2_APPS_CLK 27
+#define GCC_BTSS_LPO_CLK 28
+#define GCC_CMN_BLK_AHB_CLK 29
+#define GCC_CMN_BLK_SYS_CLK 30
+#define GCC_CRYPTO_AHB_CLK 31
+#define GCC_CRYPTO_AXI_CLK 32
+#define GCC_CRYPTO_CLK 33
+#define GCC_CRYPTO_PPE_CLK 34
+#define GCC_DCC_CLK 35
+#define GCC_GEPHY_RX_CLK 36
+#define GCC_GEPHY_TX_CLK 37
+#define GCC_GMAC0_CFG_CLK 38
+#define GCC_GMAC0_PTP_CLK 39
+#define GCC_GMAC0_RX_CLK 40
+#define GCC_GMAC0_SYS_CLK 41
+#define GCC_GMAC0_TX_CLK 42
+#define GCC_GMAC1_CFG_CLK 43
+#define GCC_GMAC1_PTP_CLK 44
+#define GCC_GMAC1_RX_CLK 45
+#define GCC_GMAC1_SYS_CLK 46
+#define GCC_GMAC1_TX_CLK 47
+#define GCC_GP1_CLK 48
+#define GCC_GP2_CLK 49
+#define GCC_GP3_CLK 50
+#define GCC_LPASS_CORE_AXIM_CLK 51
+#define GCC_LPASS_SWAY_CLK 52
+#define GCC_MDIO0_AHB_CLK 53
+#define GCC_MDIO1_AHB_CLK 54
+#define GCC_PCIE0_AHB_CLK 55
+#define GCC_PCIE0_AUX_CLK 56
+#define GCC_PCIE0_AXI_M_CLK 57
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 58
+#define GCC_PCIE0_AXI_S_CLK 59
+#define GCC_PCIE0_PIPE_CLK 60
+#define GCC_PCIE1_AHB_CLK 61
+#define GCC_PCIE1_AUX_CLK 62
+#define GCC_PCIE1_AXI_M_CLK 63
+#define GCC_PCIE1_AXI_S_BRIDGE_CLK 64
+#define GCC_PCIE1_AXI_S_CLK 65
+#define GCC_PCIE1_PIPE_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_Q6_AXIM_CLK 68
+#define GCC_Q6_AXIM2_CLK 69
+#define GCC_Q6_AXIS_CLK 70
+#define GCC_Q6_AHB_CLK 71
+#define GCC_Q6_AHB_S_CLK 72
+#define GCC_Q6_TSCTR_1TO2_CLK 73
+#define GCC_Q6SS_ATBM_CLK 74
+#define GCC_Q6SS_PCLKDBG_CLK 75
+#define GCC_Q6SS_TRIG_CLK 76
+#define GCC_QDSS_AT_CLK 77
+#define GCC_QDSS_CFG_AHB_CLK 78
+#define GCC_QDSS_DAP_AHB_CLK 79
+#define GCC_QDSS_DAP_CLK 80
+#define GCC_QDSS_ETR_USB_CLK 81
+#define GCC_QDSS_EUD_AT_CLK 82
+#define GCC_QDSS_STM_CLK 83
+#define GCC_QDSS_TRACECLKIN_CLK 84
+#define GCC_QDSS_TSCTR_DIV8_CLK 85
+#define GCC_QPIC_AHB_CLK 86
+#define GCC_QPIC_CLK 87
+#define GCC_QPIC_IO_MACRO_CLK 88
+#define GCC_SDCC1_AHB_CLK 89
+#define GCC_SDCC1_APPS_CLK 90
+#define GCC_SLEEP_CLK_SRC 91
+#define GCC_SNOC_GMAC0_AHB_CLK 92
+#define GCC_SNOC_GMAC0_AXI_CLK 93
+#define GCC_SNOC_GMAC1_AHB_CLK 94
+#define GCC_SNOC_GMAC1_AXI_CLK 95
+#define GCC_SNOC_LPASS_AXIM_CLK 96
+#define GCC_SNOC_LPASS_SWAY_CLK 97
+#define GCC_SNOC_UBI0_AXI_CLK 98
+#define GCC_SYS_NOC_PCIE0_AXI_CLK 99
+#define GCC_SYS_NOC_PCIE1_AXI_CLK 100
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 101
+#define GCC_SYS_NOC_USB0_AXI_CLK 102
+#define GCC_SYS_NOC_WCSS_AHB_CLK 103
+#define GCC_UBI0_AXI_CLK 104
+#define GCC_UBI0_CFG_CLK 105
+#define GCC_UBI0_CORE_CLK 106
+#define GCC_UBI0_DBG_CLK 107
+#define GCC_UBI0_NC_AXI_CLK 108
+#define GCC_UBI0_UTCM_CLK 109
+#define GCC_UNIPHY_AHB_CLK 110
+#define GCC_UNIPHY_RX_CLK 111
+#define GCC_UNIPHY_SYS_CLK 112
+#define GCC_UNIPHY_TX_CLK 113
+#define GCC_USB0_AUX_CLK 114
+#define GCC_USB0_EUD_AT_CLK 115
+#define GCC_USB0_LFPS_CLK 116
+#define GCC_USB0_MASTER_CLK 117
+#define GCC_USB0_MOCK_UTMI_CLK 118
+#define GCC_USB0_PHY_CFG_AHB_CLK 119
+#define GCC_USB0_SLEEP_CLK 120
+#define GCC_WCSS_ACMT_CLK 121
+#define GCC_WCSS_AHB_S_CLK 122
+#define GCC_WCSS_AXI_M_CLK 123
+#define GCC_WCSS_AXI_S_CLK 124
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK 125
+#define GCC_WCSS_DBG_IFC_APB_CLK 126
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK 127
+#define GCC_WCSS_DBG_IFC_ATB_CLK 128
+#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_CLK 129
+#define GCC_WCSS_DBG_IFC_DAPBUS_CLK 130
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK 131
+#define GCC_WCSS_DBG_IFC_NTS_CLK 132
+#define GCC_WCSS_ECAHB_CLK 133
+#define GCC_XO_CLK 134
+#define GCC_XO_CLK_SRC 135
+#define GMAC0_RX_CLK_SRC 136
+#define GMAC0_TX_CLK_SRC 137
+#define GMAC1_RX_CLK_SRC 138
+#define GMAC1_TX_CLK_SRC 139
+#define GMAC_CLK_SRC 140
+#define GP1_CLK_SRC 141
+#define GP2_CLK_SRC 142
+#define GP3_CLK_SRC 143
+#define LPASS_AXIM_CLK_SRC 144
+#define LPASS_SWAY_CLK_SRC 145
+#define PCIE0_AUX_CLK_SRC 146
+#define PCIE0_AXI_CLK_SRC 147
+#define PCIE1_AUX_CLK_SRC 148
+#define PCIE1_AXI_CLK_SRC 149
+#define PCNOC_BFDCD_CLK_SRC 150
+#define Q6_AXI_CLK_SRC 151
+#define QDSS_AT_CLK_SRC 152
+#define QDSS_STM_CLK_SRC 153
+#define QDSS_TSCTR_CLK_SRC 154
+#define QDSS_TRACECLKIN_CLK_SRC 155
+#define QPIC_IO_MACRO_CLK_SRC 156
+#define SDCC1_APPS_CLK_SRC 157
+#define SYSTEM_NOC_BFDCD_CLK_SRC 158
+#define UBI0_AXI_CLK_SRC 159
+#define UBI0_CORE_CLK_SRC 160
+#define USB0_AUX_CLK_SRC 161
+#define USB0_LFPS_CLK_SRC 162
+#define USB0_MASTER_CLK_SRC 163
+#define USB0_MOCK_UTMI_CLK_SRC 164
+#define WCSS_AHB_CLK_SRC 165
+#define PCIE0_PIPE_CLK_SRC 166
+#define PCIE1_PIPE_CLK_SRC 167
+#define USB0_PIPE_CLK_SRC 168
+#define GCC_USB0_PIPE_CLK 169
+#define GMAC0_RX_DIV_CLK_SRC 170
+#define GMAC0_TX_DIV_CLK_SRC 171
+#define GMAC1_RX_DIV_CLK_SRC 172
+#define GMAC1_TX_DIV_CLK_SRC 173
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index 7deec14a6dee..02262d2ac899 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -240,7 +240,7 @@
#define PLL14 232
#define PLL14_VOTE 233
#define PLL18 234
-#define CE5_SRC 235
+#define CE5_A_CLK 235
#define CE5_H_CLK 236
#define CE5_CORE_CLK 237
#define CE3_SLEEP_CLK 238
@@ -283,5 +283,8 @@
#define EBI2_AON_CLK 281
#define NSSTCM_CLK_SRC 282
#define NSSTCM_CLK 283
+#define CE5_A_CLK_SRC 285
+#define CE5_H_CLK_SRC 286
+#define CE5_CORE_CLK_SRC 287
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
index 8e2bec1c91bf..f9ea55811104 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -233,6 +233,7 @@
#define GCC_PCIE0_AXI_S_BRIDGE_CLK 224
#define GCC_PCIE0_RCHNG_CLK_SRC 225
#define GCC_PCIE0_RCHNG_CLK 226
+#define GCC_CRYPTO_PPE_CLK 227
#define GCC_BLSP1_BCR 0
#define GCC_BLSP1_QUP1_BCR 1
@@ -366,5 +367,22 @@
#define GCC_PCIE1_AHB_ARES 129
#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130
#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 131
+#define GCC_PPE_FULL_RESET 132
+#define GCC_UNIPHY0_SOFT_RESET 133
+#define GCC_UNIPHY0_XPCS_RESET 134
+#define GCC_UNIPHY1_SOFT_RESET 135
+#define GCC_UNIPHY1_XPCS_RESET 136
+#define GCC_UNIPHY2_SOFT_RESET 137
+#define GCC_UNIPHY2_XPCS_RESET 138
+#define GCC_EDMA_HW_RESET 139
+#define GCC_NSSPORT1_RESET 140
+#define GCC_NSSPORT2_RESET 141
+#define GCC_NSSPORT3_RESET 142
+#define GCC_NSSPORT4_RESET 143
+#define GCC_NSSPORT5_RESET 144
+#define GCC_NSSPORT6_RESET 145
+
+#define USB0_GDSC 0
+#define USB1_GDSC 1
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9607.h b/include/dt-bindings/clock/qcom,gcc-mdm9607.h
new file mode 100644
index 000000000000..357a680a40da
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-mdm9607.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_9607_H
+#define _DT_BINDINGS_CLK_MSM_GCC_9607_H
+
+#define GPLL0 0
+#define GPLL0_EARLY 1
+#define GPLL1 2
+#define GPLL1_VOTE 3
+#define GPLL2 4
+#define GPLL2_EARLY 5
+#define PCNOC_BFDCD_CLK_SRC 6
+#define SYSTEM_NOC_BFDCD_CLK_SRC 7
+#define GCC_SMMU_CFG_CLK 8
+#define APSS_AHB_CLK_SRC 9
+#define GCC_QDSS_DAP_CLK 10
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 14
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 15
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 16
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 18
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 19
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 20
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 21
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 22
+#define BLSP1_UART1_APPS_CLK_SRC 23
+#define BLSP1_UART2_APPS_CLK_SRC 24
+#define CRYPTO_CLK_SRC 25
+#define GP1_CLK_SRC 26
+#define GP2_CLK_SRC 27
+#define GP3_CLK_SRC 28
+#define PDM2_CLK_SRC 29
+#define SDCC1_APPS_CLK_SRC 30
+#define SDCC2_APPS_CLK_SRC 31
+#define APSS_TCU_CLK_SRC 32
+#define USB_HS_SYSTEM_CLK_SRC 33
+#define GCC_BLSP1_AHB_CLK 34
+#define GCC_BLSP1_SLEEP_CLK 35
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 36
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 37
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 38
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 39
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 40
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 41
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 42
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 43
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 44
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 45
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 46
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 47
+#define GCC_BLSP1_UART1_APPS_CLK 48
+#define GCC_BLSP1_UART2_APPS_CLK 49
+#define GCC_BOOT_ROM_AHB_CLK 50
+#define GCC_CRYPTO_AHB_CLK 51
+#define GCC_CRYPTO_AXI_CLK 52
+#define GCC_CRYPTO_CLK 53
+#define GCC_GP1_CLK 54
+#define GCC_GP2_CLK 55
+#define GCC_GP3_CLK 56
+#define GCC_MSS_CFG_AHB_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM_AHB_CLK 59
+#define GCC_PRNG_AHB_CLK 60
+#define GCC_SDCC1_AHB_CLK 61
+#define GCC_SDCC1_APPS_CLK 62
+#define GCC_SDCC2_AHB_CLK 63
+#define GCC_SDCC2_APPS_CLK 64
+#define GCC_USB2A_PHY_SLEEP_CLK 65
+#define GCC_USB_HS_AHB_CLK 66
+#define GCC_USB_HS_SYSTEM_CLK 67
+#define GCC_APSS_TCU_CLK 68
+#define GCC_MSS_Q6_BIMC_AXI_CLK 69
+#define BIMC_PLL 70
+#define BIMC_PLL_VOTE 71
+#define BIMC_DDR_CLK_SRC 72
+#define BLSP1_UART3_APPS_CLK_SRC 73
+#define BLSP1_UART4_APPS_CLK_SRC 74
+#define BLSP1_UART5_APPS_CLK_SRC 75
+#define BLSP1_UART6_APPS_CLK_SRC 76
+#define GCC_BLSP1_UART3_APPS_CLK 77
+#define GCC_BLSP1_UART4_APPS_CLK 78
+#define GCC_BLSP1_UART5_APPS_CLK 79
+#define GCC_BLSP1_UART6_APPS_CLK 80
+#define GCC_APSS_AHB_CLK 81
+#define GCC_APSS_AXI_CLK 82
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 83
+#define GCC_USB_HSIC_CLK_SRC 84
+#define GCC_USB_HSIC_IO_CAL_CLK_SRC 85
+#define GCC_USB_HSIC_SYSTEM_CLK_SRC 86
+
+/* Resets */
+#define USB2_HS_PHY_ONLY_BCR 0
+#define QUSB2_PHY_BCR 1
+#define GCC_MSS_RESTART 2
+#define USB_HS_HSIC_BCR 3
+#define USB_HS_BCR 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8909.h b/include/dt-bindings/clock/qcom,gcc-msm8909.h
new file mode 100644
index 000000000000..4394ba003425
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8909.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Kernkonzept GmbH.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_8909_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_8909_H
+
+/* PLLs */
+#define GPLL0_EARLY 0
+#define GPLL0 1
+#define GPLL1 2
+#define GPLL1_VOTE 3
+#define GPLL2_EARLY 4
+#define GPLL2 5
+#define BIMC_PLL_EARLY 6
+#define BIMC_PLL 7
+
+/* RCGs */
+#define APSS_AHB_CLK_SRC 8
+#define BIMC_DDR_CLK_SRC 9
+#define BIMC_GPU_CLK_SRC 10
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 14
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 15
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 16
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 18
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 19
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 20
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 21
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 22
+#define BLSP1_UART1_APPS_CLK_SRC 23
+#define BLSP1_UART2_APPS_CLK_SRC 24
+#define BYTE0_CLK_SRC 25
+#define CAMSS_GP0_CLK_SRC 26
+#define CAMSS_GP1_CLK_SRC 27
+#define CAMSS_TOP_AHB_CLK_SRC 28
+#define CODEC_DIGCODEC_CLK_SRC 29
+#define CRYPTO_CLK_SRC 30
+#define CSI0_CLK_SRC 31
+#define CSI0PHYTIMER_CLK_SRC 32
+#define CSI1_CLK_SRC 33
+#define ESC0_CLK_SRC 34
+#define GFX3D_CLK_SRC 35
+#define GP1_CLK_SRC 36
+#define GP2_CLK_SRC 37
+#define GP3_CLK_SRC 38
+#define MCLK0_CLK_SRC 39
+#define MCLK1_CLK_SRC 40
+#define MDP_CLK_SRC 41
+#define PCLK0_CLK_SRC 42
+#define PCNOC_BFDCD_CLK_SRC 43
+#define PDM2_CLK_SRC 44
+#define SDCC1_APPS_CLK_SRC 45
+#define SDCC2_APPS_CLK_SRC 46
+#define SYSTEM_NOC_BFDCD_CLK_SRC 47
+#define ULTAUDIO_AHBFABRIC_CLK_SRC 48
+#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 49
+#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 50
+#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 51
+#define ULTAUDIO_XO_CLK_SRC 52
+#define USB_HS_SYSTEM_CLK_SRC 53
+#define VCODEC0_CLK_SRC 54
+#define VFE0_CLK_SRC 55
+#define VSYNC_CLK_SRC 56
+
+/* Voteable Clocks */
+#define GCC_APSS_TCU_CLK 57
+#define GCC_BLSP1_AHB_CLK 58
+#define GCC_BLSP1_SLEEP_CLK 59
+#define GCC_BOOT_ROM_AHB_CLK 60
+#define GCC_CRYPTO_CLK 61
+#define GCC_CRYPTO_AHB_CLK 62
+#define GCC_CRYPTO_AXI_CLK 63
+#define GCC_GFX_TBU_CLK 64
+#define GCC_GFX_TCU_CLK 65
+#define GCC_GTCU_AHB_CLK 66
+#define GCC_MDP_TBU_CLK 67
+#define GCC_PRNG_AHB_CLK 68
+#define GCC_SMMU_CFG_CLK 69
+#define GCC_VENUS_TBU_CLK 70
+#define GCC_VFE_TBU_CLK 71
+
+/* Branches */
+#define GCC_BIMC_GFX_CLK 72
+#define GCC_BIMC_GPU_CLK 73
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 74
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 75
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 76
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 77
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 78
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 79
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 80
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 81
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 82
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 83
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 84
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 85
+#define GCC_BLSP1_UART1_APPS_CLK 86
+#define GCC_BLSP1_UART2_APPS_CLK 87
+#define GCC_CAMSS_AHB_CLK 88
+#define GCC_CAMSS_CSI0_CLK 89
+#define GCC_CAMSS_CSI0_AHB_CLK 90
+#define GCC_CAMSS_CSI0PHY_CLK 91
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 92
+#define GCC_CAMSS_CSI0PIX_CLK 93
+#define GCC_CAMSS_CSI0RDI_CLK 94
+#define GCC_CAMSS_CSI1_CLK 95
+#define GCC_CAMSS_CSI1_AHB_CLK 96
+#define GCC_CAMSS_CSI1PHY_CLK 97
+#define GCC_CAMSS_CSI1PIX_CLK 98
+#define GCC_CAMSS_CSI1RDI_CLK 99
+#define GCC_CAMSS_CSI_VFE0_CLK 100
+#define GCC_CAMSS_GP0_CLK 101
+#define GCC_CAMSS_GP1_CLK 102
+#define GCC_CAMSS_ISPIF_AHB_CLK 103
+#define GCC_CAMSS_MCLK0_CLK 104
+#define GCC_CAMSS_MCLK1_CLK 105
+#define GCC_CAMSS_TOP_AHB_CLK 106
+#define GCC_CAMSS_VFE0_CLK 107
+#define GCC_CAMSS_VFE_AHB_CLK 108
+#define GCC_CAMSS_VFE_AXI_CLK 109
+#define GCC_CODEC_DIGCODEC_CLK 110
+#define GCC_GP1_CLK 111
+#define GCC_GP2_CLK 112
+#define GCC_GP3_CLK 113
+#define GCC_MDSS_AHB_CLK 114
+#define GCC_MDSS_AXI_CLK 115
+#define GCC_MDSS_BYTE0_CLK 116
+#define GCC_MDSS_ESC0_CLK 117
+#define GCC_MDSS_MDP_CLK 118
+#define GCC_MDSS_PCLK0_CLK 119
+#define GCC_MDSS_VSYNC_CLK 120
+#define GCC_MSS_CFG_AHB_CLK 121
+#define GCC_MSS_Q6_BIMC_AXI_CLK 122
+#define GCC_OXILI_AHB_CLK 123
+#define GCC_OXILI_GFX3D_CLK 124
+#define GCC_PDM2_CLK 125
+#define GCC_PDM_AHB_CLK 126
+#define GCC_SDCC1_AHB_CLK 127
+#define GCC_SDCC1_APPS_CLK 128
+#define GCC_SDCC2_AHB_CLK 129
+#define GCC_SDCC2_APPS_CLK 130
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 131
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 132
+#define GCC_ULTAUDIO_AVSYNC_XO_CLK 133
+#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 134
+#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 135
+#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 136
+#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 137
+#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 138
+#define GCC_ULTAUDIO_STC_XO_CLK 139
+#define GCC_USB2A_PHY_SLEEP_CLK 140
+#define GCC_USB_HS_AHB_CLK 141
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 142
+#define GCC_USB_HS_SYSTEM_CLK 143
+#define GCC_VENUS0_AHB_CLK 144
+#define GCC_VENUS0_AXI_CLK 145
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 146
+#define GCC_VENUS0_VCODEC0_CLK 147
+
+/* Resets */
+#define GCC_AUDIO_CORE_BCR 0
+#define GCC_BLSP1_BCR 1
+#define GCC_BLSP1_QUP1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_QUP3_BCR 4
+#define GCC_BLSP1_QUP4_BCR 5
+#define GCC_BLSP1_QUP5_BCR 6
+#define GCC_BLSP1_QUP6_BCR 7
+#define GCC_BLSP1_UART1_BCR 8
+#define GCC_BLSP1_UART2_BCR 9
+#define GCC_CAMSS_CSI0_BCR 10
+#define GCC_CAMSS_CSI0PHY_BCR 11
+#define GCC_CAMSS_CSI0PIX_BCR 12
+#define GCC_CAMSS_CSI0RDI_BCR 13
+#define GCC_CAMSS_CSI1_BCR 14
+#define GCC_CAMSS_CSI1PHY_BCR 15
+#define GCC_CAMSS_CSI1PIX_BCR 16
+#define GCC_CAMSS_CSI1RDI_BCR 17
+#define GCC_CAMSS_CSI_VFE0_BCR 18
+#define GCC_CAMSS_GP0_BCR 19
+#define GCC_CAMSS_GP1_BCR 20
+#define GCC_CAMSS_ISPIF_BCR 21
+#define GCC_CAMSS_MCLK0_BCR 22
+#define GCC_CAMSS_MCLK1_BCR 23
+#define GCC_CAMSS_PHY0_BCR 24
+#define GCC_CAMSS_TOP_BCR 25
+#define GCC_CAMSS_TOP_AHB_BCR 26
+#define GCC_CAMSS_VFE_BCR 27
+#define GCC_CRYPTO_BCR 28
+#define GCC_MDSS_BCR 29
+#define GCC_OXILI_BCR 30
+#define GCC_PDM_BCR 31
+#define GCC_PRNG_BCR 32
+#define GCC_QUSB2_PHY_BCR 33
+#define GCC_SDCC1_BCR 34
+#define GCC_SDCC2_BCR 35
+#define GCC_ULT_AUDIO_BCR 36
+#define GCC_USB2A_PHY_BCR 37
+#define GCC_USB2_HS_PHY_ONLY_BCR 38
+#define GCC_USB_HS_BCR 39
+#define GCC_VENUS0_BCR 40
+
+/* Subsystem Restart */
+#define GCC_MSS_RESTART 41
+
+/* Power Domains */
+#define MDSS_GDSC 0
+#define OXILI_GDSC 1
+#define VENUS_GDSC 2
+#define VENUS_CORE0_GDSC 3
+#define VFE_GDSC 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8917.h b/include/dt-bindings/clock/qcom,gcc-msm8917.h
new file mode 100644
index 000000000000..4b421e7414b5
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8917.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8917_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8917_H
+
+/* Clocks */
+#define APSS_AHB_CLK_SRC 0
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 1
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 2
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 3
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 4
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 5
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 6
+#define BLSP1_UART1_APPS_CLK_SRC 7
+#define BLSP1_UART2_APPS_CLK_SRC 8
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 9
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 10
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 11
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 12
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 13
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 14
+#define BLSP2_UART1_APPS_CLK_SRC 15
+#define BLSP2_UART2_APPS_CLK_SRC 16
+#define BYTE0_CLK_SRC 17
+#define CAMSS_GP0_CLK_SRC 18
+#define CAMSS_GP1_CLK_SRC 19
+#define CAMSS_TOP_AHB_CLK_SRC 20
+#define CCI_CLK_SRC 21
+#define CPP_CLK_SRC 22
+#define CRYPTO_CLK_SRC 23
+#define CSI0PHYTIMER_CLK_SRC 24
+#define CSI0_CLK_SRC 25
+#define CSI1PHYTIMER_CLK_SRC 26
+#define CSI1_CLK_SRC 27
+#define CSI2_CLK_SRC 28
+#define ESC0_CLK_SRC 29
+#define GCC_APSS_TCU_CLK 30
+#define GCC_BIMC_GFX_CLK 31
+#define GCC_BIMC_GPU_CLK 32
+#define GCC_BLSP1_AHB_CLK 33
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 34
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 35
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 36
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 37
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 38
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 39
+#define GCC_BLSP1_UART1_APPS_CLK 40
+#define GCC_BLSP1_UART2_APPS_CLK 41
+#define GCC_BLSP2_AHB_CLK 42
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 43
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 44
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 45
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 46
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 47
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 48
+#define GCC_BLSP2_UART1_APPS_CLK 49
+#define GCC_BLSP2_UART2_APPS_CLK 50
+#define GCC_BOOT_ROM_AHB_CLK 51
+#define GCC_CAMSS_AHB_CLK 52
+#define GCC_CAMSS_CCI_AHB_CLK 53
+#define GCC_CAMSS_CCI_CLK 54
+#define GCC_CAMSS_CPP_AHB_CLK 55
+#define GCC_CAMSS_CPP_CLK 56
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 57
+#define GCC_CAMSS_CSI0PHY_CLK 58
+#define GCC_CAMSS_CSI0PIX_CLK 59
+#define GCC_CAMSS_CSI0RDI_CLK 60
+#define GCC_CAMSS_CSI0_AHB_CLK 61
+#define GCC_CAMSS_CSI0_CLK 62
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 63
+#define GCC_CAMSS_CSI1PHY_CLK 64
+#define GCC_CAMSS_CSI1PIX_CLK 65
+#define GCC_CAMSS_CSI1RDI_CLK 66
+#define GCC_CAMSS_CSI1_AHB_CLK 67
+#define GCC_CAMSS_CSI1_CLK 68
+#define GCC_CAMSS_CSI2PHY_CLK 69
+#define GCC_CAMSS_CSI2PIX_CLK 70
+#define GCC_CAMSS_CSI2RDI_CLK 71
+#define GCC_CAMSS_CSI2_AHB_CLK 72
+#define GCC_CAMSS_CSI2_CLK 73
+#define GCC_CAMSS_CSI_VFE0_CLK 74
+#define GCC_CAMSS_CSI_VFE1_CLK 75
+#define GCC_CAMSS_GP0_CLK 76
+#define GCC_CAMSS_GP1_CLK 77
+#define GCC_CAMSS_ISPIF_AHB_CLK 78
+#define GCC_CAMSS_JPEG0_CLK 79
+#define GCC_CAMSS_JPEG_AHB_CLK 80
+#define GCC_CAMSS_JPEG_AXI_CLK 81
+#define GCC_CAMSS_MCLK0_CLK 82
+#define GCC_CAMSS_MCLK1_CLK 83
+#define GCC_CAMSS_MCLK2_CLK 84
+#define GCC_CAMSS_MICRO_AHB_CLK 85
+#define GCC_CAMSS_TOP_AHB_CLK 86
+#define GCC_CAMSS_VFE0_AHB_CLK 87
+#define GCC_CAMSS_VFE0_AXI_CLK 88
+#define GCC_CAMSS_VFE0_CLK 89
+#define GCC_CAMSS_VFE1_AHB_CLK 90
+#define GCC_CAMSS_VFE1_AXI_CLK 91
+#define GCC_CAMSS_VFE1_CLK 92
+#define GCC_CPP_TBU_CLK 93
+#define GCC_CRYPTO_AHB_CLK 94
+#define GCC_CRYPTO_AXI_CLK 95
+#define GCC_CRYPTO_CLK 96
+#define GCC_DCC_CLK 97
+#define GCC_GFX_TBU_CLK 98
+#define GCC_GFX_TCU_CLK 99
+#define GCC_GP1_CLK 100
+#define GCC_GP2_CLK 101
+#define GCC_GP3_CLK 102
+#define GCC_GTCU_AHB_CLK 103
+#define GCC_JPEG_TBU_CLK 104
+#define GCC_MDP_TBU_CLK 105
+#define GCC_MDSS_AHB_CLK 106
+#define GCC_MDSS_AXI_CLK 107
+#define GCC_MDSS_BYTE0_CLK 108
+#define GCC_MDSS_ESC0_CLK 109
+#define GCC_MDSS_MDP_CLK 110
+#define GCC_MDSS_PCLK0_CLK 111
+#define GCC_MDSS_VSYNC_CLK 112
+#define GCC_MSS_CFG_AHB_CLK 113
+#define GCC_MSS_Q6_BIMC_AXI_CLK 114
+#define GCC_OXILI_AHB_CLK 115
+#define GCC_OXILI_GFX3D_CLK 116
+#define GCC_PDM2_CLK 117
+#define GCC_PDM_AHB_CLK 118
+#define GCC_PRNG_AHB_CLK 119
+#define GCC_QDSS_DAP_CLK 120
+#define GCC_SDCC1_AHB_CLK 121
+#define GCC_SDCC1_APPS_CLK 122
+#define GCC_SDCC1_ICE_CORE_CLK 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_SMMU_CFG_CLK 126
+#define GCC_USB2A_PHY_SLEEP_CLK 127
+#define GCC_USB_HS_AHB_CLK 128
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 129
+#define GCC_USB_HS_SYSTEM_CLK 130
+#define GCC_VENUS0_AHB_CLK 131
+#define GCC_VENUS0_AXI_CLK 132
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 133
+#define GCC_VENUS0_VCODEC0_CLK 134
+#define GCC_VENUS_TBU_CLK 135
+#define GCC_VFE1_TBU_CLK 136
+#define GCC_VFE_TBU_CLK 137
+#define GFX3D_CLK_SRC 138
+#define GP1_CLK_SRC 139
+#define GP2_CLK_SRC 140
+#define GP3_CLK_SRC 141
+#define GPLL0 142
+#define GPLL0_EARLY 143
+#define GPLL3 144
+#define GPLL3_EARLY 145
+#define GPLL4 146
+#define GPLL4_EARLY 147
+#define GPLL6 148
+#define GPLL6_EARLY 149
+#define JPEG0_CLK_SRC 150
+#define MCLK0_CLK_SRC 151
+#define MCLK1_CLK_SRC 152
+#define MCLK2_CLK_SRC 153
+#define MDP_CLK_SRC 154
+#define PCLK0_CLK_SRC 155
+#define PDM2_CLK_SRC 156
+#define SDCC1_APPS_CLK_SRC 157
+#define SDCC1_ICE_CORE_CLK_SRC 158
+#define SDCC2_APPS_CLK_SRC 159
+#define USB_HS_SYSTEM_CLK_SRC 160
+#define VCODEC0_CLK_SRC 161
+#define VFE0_CLK_SRC 162
+#define VFE1_CLK_SRC 163
+#define VSYNC_CLK_SRC 164
+#define GPLL0_SLEEP_CLK_SRC 165
+
+/* GCC block resets */
+#define GCC_CAMSS_MICRO_BCR 0
+#define GCC_MSS_BCR 1
+#define GCC_QUSB2_PHY_BCR 2
+#define GCC_USB_HS_BCR 3
+#define GCC_USB2_HS_PHY_ONLY_BCR 4
+
+/* GDSCs */
+#define CPP_GDSC 0
+#define JPEG_GDSC 1
+#define MDSS_GDSC 2
+#define OXILI_GX_GDSC 3
+#define VENUS_CORE0_GDSC 4
+#define VENUS_GDSC 5
+#define VFE0_GDSC 6
+#define VFE1_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h
index 0634467c4ce5..9a9bc55b49af 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8939.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h
@@ -192,6 +192,13 @@
#define GCC_VENUS0_CORE0_VCODEC0_CLK 183
#define GCC_VENUS0_CORE1_VCODEC0_CLK 184
#define GCC_OXILI_TIMER_CLK 185
+#define SYSTEM_MM_NOC_BFDCD_CLK_SRC 186
+#define CSI2_CLK_SRC 187
+#define GCC_CAMSS_CSI2_AHB_CLK 188
+#define GCC_CAMSS_CSI2_CLK 189
+#define GCC_CAMSS_CSI2PHY_CLK 190
+#define GCC_CAMSS_CSI2PIX_CLK 191
+#define GCC_CAMSS_CSI2RDI_CLK 192
/* Indexes for GDSCs */
#define BIMC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8953.h b/include/dt-bindings/clock/qcom,gcc-msm8953.h
new file mode 100644
index 000000000000..13b4a62877e5
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8953.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8953_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8953_H
+
+/* Clocks */
+#define APC0_DROOP_DETECTOR_CLK_SRC 0
+#define APC1_DROOP_DETECTOR_CLK_SRC 1
+#define APSS_AHB_CLK_SRC 2
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 3
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 4
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 5
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 6
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 7
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 8
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 10
+#define BLSP1_UART1_APPS_CLK_SRC 11
+#define BLSP1_UART2_APPS_CLK_SRC 12
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 13
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 14
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 15
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 16
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 17
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 18
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 19
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 20
+#define BLSP2_UART1_APPS_CLK_SRC 21
+#define BLSP2_UART2_APPS_CLK_SRC 22
+#define BYTE0_CLK_SRC 23
+#define BYTE1_CLK_SRC 24
+#define CAMSS_GP0_CLK_SRC 25
+#define CAMSS_GP1_CLK_SRC 26
+#define CAMSS_TOP_AHB_CLK_SRC 27
+#define CCI_CLK_SRC 28
+#define CPP_CLK_SRC 29
+#define CRYPTO_CLK_SRC 30
+#define CSI0PHYTIMER_CLK_SRC 31
+#define CSI0P_CLK_SRC 32
+#define CSI0_CLK_SRC 33
+#define CSI1PHYTIMER_CLK_SRC 34
+#define CSI1P_CLK_SRC 35
+#define CSI1_CLK_SRC 36
+#define CSI2PHYTIMER_CLK_SRC 37
+#define CSI2P_CLK_SRC 38
+#define CSI2_CLK_SRC 39
+#define ESC0_CLK_SRC 40
+#define ESC1_CLK_SRC 41
+#define GCC_APC0_DROOP_DETECTOR_GPLL0_CLK 42
+#define GCC_APC1_DROOP_DETECTOR_GPLL0_CLK 43
+#define GCC_APSS_AHB_CLK 44
+#define GCC_APSS_AXI_CLK 45
+#define GCC_APSS_TCU_ASYNC_CLK 46
+#define GCC_BIMC_GFX_CLK 47
+#define GCC_BIMC_GPU_CLK 48
+#define GCC_BLSP1_AHB_CLK 49
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 50
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 51
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 52
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 53
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 54
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 55
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 56
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 57
+#define GCC_BLSP1_UART1_APPS_CLK 58
+#define GCC_BLSP1_UART2_APPS_CLK 59
+#define GCC_BLSP2_AHB_CLK 60
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 61
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 62
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 63
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 64
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 65
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 66
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 67
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 68
+#define GCC_BLSP2_UART1_APPS_CLK 69
+#define GCC_BLSP2_UART2_APPS_CLK 70
+#define GCC_BOOT_ROM_AHB_CLK 71
+#define GCC_CAMSS_AHB_CLK 72
+#define GCC_CAMSS_CCI_AHB_CLK 73
+#define GCC_CAMSS_CCI_CLK 74
+#define GCC_CAMSS_CPP_AHB_CLK 75
+#define GCC_CAMSS_CPP_AXI_CLK 76
+#define GCC_CAMSS_CPP_CLK 77
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 78
+#define GCC_CAMSS_CSI0PHY_CLK 79
+#define GCC_CAMSS_CSI0PIX_CLK 80
+#define GCC_CAMSS_CSI0RDI_CLK 81
+#define GCC_CAMSS_CSI0_AHB_CLK 82
+#define GCC_CAMSS_CSI0_CLK 83
+#define GCC_CAMSS_CSI0_CSIPHY_3P_CLK 84
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 85
+#define GCC_CAMSS_CSI1PHY_CLK 86
+#define GCC_CAMSS_CSI1PIX_CLK 87
+#define GCC_CAMSS_CSI1RDI_CLK 88
+#define GCC_CAMSS_CSI1_AHB_CLK 89
+#define GCC_CAMSS_CSI1_CLK 90
+#define GCC_CAMSS_CSI1_CSIPHY_3P_CLK 91
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 92
+#define GCC_CAMSS_CSI2PHY_CLK 93
+#define GCC_CAMSS_CSI2PIX_CLK 94
+#define GCC_CAMSS_CSI2RDI_CLK 95
+#define GCC_CAMSS_CSI2_AHB_CLK 96
+#define GCC_CAMSS_CSI2_CLK 97
+#define GCC_CAMSS_CSI2_CSIPHY_3P_CLK 98
+#define GCC_CAMSS_CSI_VFE0_CLK 99
+#define GCC_CAMSS_CSI_VFE1_CLK 100
+#define GCC_CAMSS_GP0_CLK 101
+#define GCC_CAMSS_GP1_CLK 102
+#define GCC_CAMSS_ISPIF_AHB_CLK 103
+#define GCC_CAMSS_JPEG0_CLK 104
+#define GCC_CAMSS_JPEG_AHB_CLK 105
+#define GCC_CAMSS_JPEG_AXI_CLK 106
+#define GCC_CAMSS_MCLK0_CLK 107
+#define GCC_CAMSS_MCLK1_CLK 108
+#define GCC_CAMSS_MCLK2_CLK 109
+#define GCC_CAMSS_MCLK3_CLK 110
+#define GCC_CAMSS_MICRO_AHB_CLK 111
+#define GCC_CAMSS_TOP_AHB_CLK 112
+#define GCC_CAMSS_VFE0_AHB_CLK 113
+#define GCC_CAMSS_VFE0_AXI_CLK 114
+#define GCC_CAMSS_VFE0_CLK 115
+#define GCC_CAMSS_VFE1_AHB_CLK 116
+#define GCC_CAMSS_VFE1_AXI_CLK 117
+#define GCC_CAMSS_VFE1_CLK 118
+#define GCC_CPP_TBU_CLK 119
+#define GCC_CRYPTO_AHB_CLK 120
+#define GCC_CRYPTO_AXI_CLK 121
+#define GCC_CRYPTO_CLK 122
+#define GCC_DCC_CLK 123
+#define GCC_GP1_CLK 124
+#define GCC_GP2_CLK 125
+#define GCC_GP3_CLK 126
+#define GCC_JPEG_TBU_CLK 127
+#define GCC_MDP_TBU_CLK 128
+#define GCC_MDSS_AHB_CLK 129
+#define GCC_MDSS_AXI_CLK 130
+#define GCC_MDSS_BYTE0_CLK 131
+#define GCC_MDSS_BYTE1_CLK 132
+#define GCC_MDSS_ESC0_CLK 133
+#define GCC_MDSS_ESC1_CLK 134
+#define GCC_MDSS_MDP_CLK 135
+#define GCC_MDSS_PCLK0_CLK 136
+#define GCC_MDSS_PCLK1_CLK 137
+#define GCC_MDSS_VSYNC_CLK 138
+#define GCC_MSS_CFG_AHB_CLK 139
+#define GCC_MSS_Q6_BIMC_AXI_CLK 140
+#define GCC_OXILI_AHB_CLK 141
+#define GCC_OXILI_AON_CLK 142
+#define GCC_OXILI_GFX3D_CLK 143
+#define GCC_OXILI_TIMER_CLK 144
+#define GCC_PCNOC_USB3_AXI_CLK 145
+#define GCC_PDM2_CLK 146
+#define GCC_PDM_AHB_CLK 147
+#define GCC_PRNG_AHB_CLK 148
+#define GCC_QDSS_DAP_CLK 149
+#define GCC_QUSB_REF_CLK 150
+#define GCC_RBCPR_GFX_CLK 151
+#define GCC_SDCC1_AHB_CLK 152
+#define GCC_SDCC1_APPS_CLK 153
+#define GCC_SDCC1_ICE_CORE_CLK 154
+#define GCC_SDCC2_AHB_CLK 155
+#define GCC_SDCC2_APPS_CLK 156
+#define GCC_SMMU_CFG_CLK 157
+#define GCC_USB30_MASTER_CLK 158
+#define GCC_USB30_MOCK_UTMI_CLK 159
+#define GCC_USB30_SLEEP_CLK 160
+#define GCC_USB3_AUX_CLK 161
+#define GCC_USB3_PIPE_CLK 162
+#define GCC_USB_PHY_CFG_AHB_CLK 163
+#define GCC_USB_SS_REF_CLK 164
+#define GCC_VENUS0_AHB_CLK 165
+#define GCC_VENUS0_AXI_CLK 166
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 167
+#define GCC_VENUS0_VCODEC0_CLK 168
+#define GCC_VENUS_TBU_CLK 169
+#define GCC_VFE1_TBU_CLK 170
+#define GCC_VFE_TBU_CLK 171
+#define GFX3D_CLK_SRC 172
+#define GP1_CLK_SRC 173
+#define GP2_CLK_SRC 174
+#define GP3_CLK_SRC 175
+#define GPLL0 176
+#define GPLL0_EARLY 177
+#define GPLL2 178
+#define GPLL2_EARLY 179
+#define GPLL3 180
+#define GPLL3_EARLY 181
+#define GPLL4 182
+#define GPLL4_EARLY 183
+#define GPLL6 184
+#define GPLL6_EARLY 185
+#define JPEG0_CLK_SRC 186
+#define MCLK0_CLK_SRC 187
+#define MCLK1_CLK_SRC 188
+#define MCLK2_CLK_SRC 189
+#define MCLK3_CLK_SRC 190
+#define MDP_CLK_SRC 191
+#define PCLK0_CLK_SRC 192
+#define PCLK1_CLK_SRC 193
+#define PDM2_CLK_SRC 194
+#define RBCPR_GFX_CLK_SRC 195
+#define SDCC1_APPS_CLK_SRC 196
+#define SDCC1_ICE_CORE_CLK_SRC 197
+#define SDCC2_APPS_CLK_SRC 198
+#define USB30_MASTER_CLK_SRC 199
+#define USB30_MOCK_UTMI_CLK_SRC 200
+#define USB3_AUX_CLK_SRC 201
+#define VCODEC0_CLK_SRC 202
+#define VFE0_CLK_SRC 203
+#define VFE1_CLK_SRC 204
+#define VSYNC_CLK_SRC 205
+
+/* GCC block resets */
+#define GCC_CAMSS_MICRO_BCR 0
+#define GCC_MSS_BCR 1
+#define GCC_QUSB2_PHY_BCR 2
+#define GCC_USB3PHY_PHY_BCR 3
+#define GCC_USB3_PHY_BCR 4
+#define GCC_USB_30_BCR 5
+#define GCC_MDSS_BCR 6
+#define GCC_CRYPTO_BCR 7
+#define GCC_SDCC1_BCR 8
+#define GCC_SDCC2_BCR 9
+
+/* GDSCs */
+#define CPP_GDSC 0
+#define JPEG_GDSC 1
+#define MDSS_GDSC 2
+#define OXILI_CX_GDSC 3
+#define OXILI_GX_GDSC 4
+#define USB30_GDSC 5
+#define VENUS_CORE0_GDSC 6
+#define VENUS_GDSC 7
+#define VFE0_GDSC 8
+#define VFE1_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8976.h b/include/dt-bindings/clock/qcom,gcc-msm8976.h
new file mode 100644
index 000000000000..5351f48b2068
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8976.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2021, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8976_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8976_H
+
+#define GPLL0 0
+#define GPLL2 1
+#define GPLL3 2
+#define GPLL4 3
+#define GPLL6 4
+#define GPLL0_CLK_SRC 5
+#define GPLL2_CLK_SRC 6
+#define GPLL3_CLK_SRC 7
+#define GPLL4_CLK_SRC 8
+#define GPLL6_CLK_SRC 9
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 10
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 11
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 12
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 13
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 14
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 15
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 16
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 17
+#define GCC_BLSP1_UART1_APPS_CLK 18
+#define GCC_BLSP1_UART2_APPS_CLK 19
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 20
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 21
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 22
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 23
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 24
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 25
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 26
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 27
+#define GCC_BLSP2_UART1_APPS_CLK 28
+#define GCC_BLSP2_UART2_APPS_CLK 29
+#define GCC_CAMSS_CCI_AHB_CLK 30
+#define GCC_CAMSS_CCI_CLK 31
+#define GCC_CAMSS_CPP_AHB_CLK 32
+#define GCC_CAMSS_CPP_AXI_CLK 33
+#define GCC_CAMSS_CPP_CLK 34
+#define GCC_CAMSS_CSI0_AHB_CLK 35
+#define GCC_CAMSS_CSI0_CLK 36
+#define GCC_CAMSS_CSI0PHY_CLK 37
+#define GCC_CAMSS_CSI0PIX_CLK 38
+#define GCC_CAMSS_CSI0RDI_CLK 39
+#define GCC_CAMSS_CSI1_AHB_CLK 40
+#define GCC_CAMSS_CSI1_CLK 41
+#define GCC_CAMSS_CSI1PHY_CLK 42
+#define GCC_CAMSS_CSI1PIX_CLK 43
+#define GCC_CAMSS_CSI1RDI_CLK 44
+#define GCC_CAMSS_CSI2_AHB_CLK 45
+#define GCC_CAMSS_CSI2_CLK 46
+#define GCC_CAMSS_CSI2PHY_CLK 47
+#define GCC_CAMSS_CSI2PIX_CLK 48
+#define GCC_CAMSS_CSI2RDI_CLK 49
+#define GCC_CAMSS_CSI_VFE0_CLK 50
+#define GCC_CAMSS_CSI_VFE1_CLK 51
+#define GCC_CAMSS_GP0_CLK 52
+#define GCC_CAMSS_GP1_CLK 53
+#define GCC_CAMSS_ISPIF_AHB_CLK 54
+#define GCC_CAMSS_JPEG0_CLK 55
+#define GCC_CAMSS_JPEG_AHB_CLK 56
+#define GCC_CAMSS_JPEG_AXI_CLK 57
+#define GCC_CAMSS_MCLK0_CLK 58
+#define GCC_CAMSS_MCLK1_CLK 59
+#define GCC_CAMSS_MCLK2_CLK 60
+#define GCC_CAMSS_MICRO_AHB_CLK 61
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 62
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 63
+#define GCC_CAMSS_AHB_CLK 64
+#define GCC_CAMSS_TOP_AHB_CLK 65
+#define GCC_CAMSS_VFE0_CLK 66
+#define GCC_CAMSS_VFE_AHB_CLK 67
+#define GCC_CAMSS_VFE_AXI_CLK 68
+#define GCC_CAMSS_VFE1_AHB_CLK 69
+#define GCC_CAMSS_VFE1_AXI_CLK 70
+#define GCC_CAMSS_VFE1_CLK 71
+#define GCC_DCC_CLK 72
+#define GCC_GP1_CLK 73
+#define GCC_GP2_CLK 74
+#define GCC_GP3_CLK 75
+#define GCC_MDSS_AHB_CLK 76
+#define GCC_MDSS_AXI_CLK 77
+#define GCC_MDSS_ESC0_CLK 78
+#define GCC_MDSS_ESC1_CLK 79
+#define GCC_MDSS_MDP_CLK 80
+#define GCC_MDSS_VSYNC_CLK 81
+#define GCC_MSS_CFG_AHB_CLK 82
+#define GCC_MSS_Q6_BIMC_AXI_CLK 83
+#define GCC_PDM2_CLK 84
+#define GCC_PRNG_AHB_CLK 85
+#define GCC_PDM_AHB_CLK 86
+#define GCC_RBCPR_GFX_AHB_CLK 87
+#define GCC_RBCPR_GFX_CLK 88
+#define GCC_SDCC1_AHB_CLK 89
+#define GCC_SDCC1_APPS_CLK 90
+#define GCC_SDCC1_ICE_CORE_CLK 91
+#define GCC_SDCC2_AHB_CLK 92
+#define GCC_SDCC2_APPS_CLK 93
+#define GCC_SDCC3_AHB_CLK 94
+#define GCC_SDCC3_APPS_CLK 95
+#define GCC_USB2A_PHY_SLEEP_CLK 96
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 97
+#define GCC_USB_FS_AHB_CLK 98
+#define GCC_USB_FS_IC_CLK 99
+#define GCC_USB_FS_SYSTEM_CLK 100
+#define GCC_USB_HS_AHB_CLK 101
+#define GCC_USB_HS_SYSTEM_CLK 102
+#define GCC_VENUS0_AHB_CLK 103
+#define GCC_VENUS0_AXI_CLK 104
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 105
+#define GCC_VENUS0_CORE1_VCODEC0_CLK 106
+#define GCC_VENUS0_VCODEC0_CLK 107
+#define GCC_APSS_AHB_CLK 108
+#define GCC_APSS_AXI_CLK 109
+#define GCC_BLSP1_AHB_CLK 110
+#define GCC_BLSP2_AHB_CLK 111
+#define GCC_BOOT_ROM_AHB_CLK 112
+#define GCC_CRYPTO_AHB_CLK 113
+#define GCC_CRYPTO_AXI_CLK 114
+#define GCC_CRYPTO_CLK 115
+#define GCC_CPP_TBU_CLK 116
+#define GCC_APSS_TCU_CLK 117
+#define GCC_JPEG_TBU_CLK 118
+#define GCC_MDP_RT_TBU_CLK 119
+#define GCC_MDP_TBU_CLK 120
+#define GCC_SMMU_CFG_CLK 121
+#define GCC_VENUS_1_TBU_CLK 122
+#define GCC_VENUS_TBU_CLK 123
+#define GCC_VFE1_TBU_CLK 124
+#define GCC_VFE_TBU_CLK 125
+#define GCC_APS_0_CLK 126
+#define GCC_APS_1_CLK 127
+#define APS_0_CLK_SRC 128
+#define APS_1_CLK_SRC 129
+#define APSS_AHB_CLK_SRC 130
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 131
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 132
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 133
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 134
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 135
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 136
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 137
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 138
+#define BLSP1_UART1_APPS_CLK_SRC 139
+#define BLSP1_UART2_APPS_CLK_SRC 140
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 141
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 142
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 143
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 144
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 145
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 146
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 147
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 148
+#define BLSP2_UART1_APPS_CLK_SRC 149
+#define BLSP2_UART2_APPS_CLK_SRC 150
+#define CCI_CLK_SRC 151
+#define CPP_CLK_SRC 152
+#define CSI0_CLK_SRC 153
+#define CSI1_CLK_SRC 154
+#define CSI2_CLK_SRC 155
+#define CAMSS_GP0_CLK_SRC 156
+#define CAMSS_GP1_CLK_SRC 157
+#define JPEG0_CLK_SRC 158
+#define MCLK0_CLK_SRC 159
+#define MCLK1_CLK_SRC 160
+#define MCLK2_CLK_SRC 161
+#define CSI0PHYTIMER_CLK_SRC 162
+#define CSI1PHYTIMER_CLK_SRC 163
+#define CAMSS_TOP_AHB_CLK_SRC 164
+#define VFE0_CLK_SRC 165
+#define VFE1_CLK_SRC 166
+#define CRYPTO_CLK_SRC 167
+#define GP1_CLK_SRC 168
+#define GP2_CLK_SRC 169
+#define GP3_CLK_SRC 170
+#define ESC0_CLK_SRC 171
+#define ESC1_CLK_SRC 172
+#define MDP_CLK_SRC 173
+#define VSYNC_CLK_SRC 174
+#define PDM2_CLK_SRC 175
+#define RBCPR_GFX_CLK_SRC 176
+#define SDCC1_APPS_CLK_SRC 177
+#define SDCC1_ICE_CORE_CLK_SRC 178
+#define SDCC2_APPS_CLK_SRC 179
+#define SDCC3_APPS_CLK_SRC 180
+#define USB_FS_IC_CLK_SRC 181
+#define USB_FS_SYSTEM_CLK_SRC 182
+#define USB_HS_SYSTEM_CLK_SRC 183
+#define VCODEC0_CLK_SRC 184
+#define GCC_MDSS_BYTE0_CLK_SRC 185
+#define GCC_MDSS_BYTE1_CLK_SRC 186
+#define GCC_MDSS_BYTE0_CLK 187
+#define GCC_MDSS_BYTE1_CLK 188
+#define GCC_MDSS_PCLK0_CLK_SRC 189
+#define GCC_MDSS_PCLK1_CLK_SRC 190
+#define GCC_MDSS_PCLK0_CLK 191
+#define GCC_MDSS_PCLK1_CLK 192
+#define GCC_GFX3D_CLK_SRC 193
+#define GCC_GFX3D_OXILI_CLK 194
+#define GCC_GFX3D_BIMC_CLK 195
+#define GCC_GFX3D_OXILI_AHB_CLK 196
+#define GCC_GFX3D_OXILI_AON_CLK 197
+#define GCC_GFX3D_OXILI_GMEM_CLK 198
+#define GCC_GFX3D_OXILI_TIMER_CLK 199
+#define GCC_GFX3D_TBU0_CLK 200
+#define GCC_GFX3D_TBU1_CLK 201
+#define GCC_GFX3D_TCU_CLK 202
+#define GCC_GFX3D_GTCU_AHB_CLK 203
+
+/* GCC block resets */
+#define RST_CAMSS_MICRO_BCR 0
+#define RST_USB_HS_BCR 1
+#define RST_QUSB2_PHY_BCR 2
+#define RST_USB2_HS_PHY_ONLY_BCR 3
+#define RST_USB_HS_PHY_CFG_AHB_BCR 4
+#define RST_USB_FS_BCR 5
+#define RST_CAMSS_CSI1PIX_BCR 6
+#define RST_CAMSS_CSI_VFE1_BCR 7
+#define RST_CAMSS_VFE1_BCR 8
+#define RST_CAMSS_CPP_BCR 9
+#define RST_MSS_BCR 10
+
+/* GDSCs */
+#define VENUS_GDSC 0
+#define VENUS_CORE0_GDSC 1
+#define VENUS_CORE1_GDSC 2
+#define MDSS_GDSC 3
+#define JPEG_GDSC 4
+#define VFE0_GDSC 5
+#define VFE1_GDSC 6
+#define CPP_GDSC 7
+#define OXILI_GX_GDSC 8
+#define OXILI_CX_GDSC 9
+
+#endif /* _DT_BINDINGS_CLK_MSM_GCC_8976_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h
index 938969309e00..f6836f430bb5 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8994.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h
@@ -126,5 +126,54 @@
#define GCC_USB3_PHY_AUX_CLK 116
#define GCC_USB_HS_SYSTEM_CLK 117
#define GCC_SDCC1_AHB_CLK 118
+#define GCC_LPASS_Q6_AXI_CLK 119
+#define GCC_MSS_Q6_BIMC_AXI_CLK 120
+#define GCC_PCIE_0_CFG_AHB_CLK 121
+#define GCC_PCIE_0_MSTR_AXI_CLK 122
+#define GCC_PCIE_0_SLV_AXI_CLK 123
+#define GCC_PCIE_1_CFG_AHB_CLK 124
+#define GCC_PCIE_1_MSTR_AXI_CLK 125
+#define GCC_PCIE_1_SLV_AXI_CLK 126
+#define GCC_PDM_AHB_CLK 127
+#define GCC_SDCC2_AHB_CLK 128
+#define GCC_SDCC3_AHB_CLK 129
+#define GCC_SDCC4_AHB_CLK 130
+#define GCC_TSIF_AHB_CLK 131
+#define GCC_UFS_AHB_CLK 132
+#define GCC_UFS_RX_SYMBOL_0_CLK 133
+#define GCC_UFS_RX_SYMBOL_1_CLK 134
+#define GCC_UFS_TX_SYMBOL_0_CLK 135
+#define GCC_UFS_TX_SYMBOL_1_CLK 136
+#define GCC_USB2_HS_PHY_SLEEP_CLK 137
+#define GCC_USB30_SLEEP_CLK 138
+#define GCC_USB_HS_AHB_CLK 139
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 140
+#define CONFIG_NOC_CLK_SRC 141
+#define PERIPH_NOC_CLK_SRC 142
+#define SYSTEM_NOC_CLK_SRC 143
+#define GPLL0_OUT_MMSSCC 144
+#define GPLL0_OUT_MSSCC 145
+#define PCIE_0_PHY_LDO 146
+#define PCIE_1_PHY_LDO 147
+#define UFS_PHY_LDO 148
+#define USB_SS_PHY_LDO 149
+#define GCC_BOOT_ROM_AHB_CLK 150
+#define GCC_PRNG_AHB_CLK 151
+#define GCC_USB3_PHY_PIPE_CLK 152
+
+/* GDSCs */
+#define PCIE_GDSC 0
+#define PCIE_0_GDSC 1
+#define PCIE_1_GDSC 2
+#define USB30_GDSC 3
+#define UFS_GDSC 4
+
+/* Resets */
+#define USB3_PHY_RESET 0
+#define USB3PHY_PHY_RESET 1
+#define PCIE_PHY_0_RESET 2
+#define PCIE_PHY_1_RESET 3
+#define QUSB2_PHY_RESET 4
+#define MSS_RESET 5
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 6a73a174f049..b5456a64d421 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -184,6 +184,15 @@
#define GCC_MSS_MNOC_BIMC_AXI_CLK 175
#define GCC_BIMC_GFX_CLK 176
#define UFS_UNIPRO_CORE_CLK_SRC 177
+#define GCC_MMSS_GPLL0_CLK 178
+#define HMSS_GPLL0_CLK_SRC 179
+#define GCC_IM_SLEEP 180
+#define AGGRE2_SNOC_NORTH_AXI 181
+#define SSC_XO 182
+#define SSC_CNOC_AHBS_CLK 183
+#define GCC_MMSS_GPLL0_DIV_CLK 184
+#define GCC_GPU_GPLL0_DIV_CLK 185
+#define GCC_GPU_GPLL0_CLK 186
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
diff --git a/include/dt-bindings/clock/qcom,gcc-qcm2290.h b/include/dt-bindings/clock/qcom,gcc-qcm2290.h
new file mode 100644
index 000000000000..8d907035f9e4
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-qcm2290.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCM2290_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_AUX2 1
+#define GPLL1 2
+#define GPLL10 3
+#define GPLL11 4
+#define GPLL3 5
+#define GPLL3_OUT_MAIN 6
+#define GPLL4 7
+#define GPLL5 8
+#define GPLL6 9
+#define GPLL6_OUT_MAIN 10
+#define GPLL7 11
+#define GPLL8 12
+#define GPLL8_OUT_MAIN 13
+#define GPLL9 14
+#define GPLL9_OUT_MAIN 15
+#define GCC_AHB2PHY_CSI_CLK 16
+#define GCC_AHB2PHY_USB_CLK 17
+#define GCC_APC_VS_CLK 18
+#define GCC_BIMC_GPU_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAM_THROTTLE_NRT_CLK 21
+#define GCC_CAM_THROTTLE_RT_CLK 22
+#define GCC_CAMERA_AHB_CLK 23
+#define GCC_CAMERA_XO_CLK 24
+#define GCC_CAMSS_AXI_CLK 25
+#define GCC_CAMSS_AXI_CLK_SRC 26
+#define GCC_CAMSS_CAMNOC_ATB_CLK 27
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 28
+#define GCC_CAMSS_CCI_0_CLK 29
+#define GCC_CAMSS_CCI_CLK_SRC 30
+#define GCC_CAMSS_CPHY_0_CLK 31
+#define GCC_CAMSS_CPHY_1_CLK 32
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 33
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 34
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 35
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 36
+#define GCC_CAMSS_MCLK0_CLK 37
+#define GCC_CAMSS_MCLK0_CLK_SRC 38
+#define GCC_CAMSS_MCLK1_CLK 39
+#define GCC_CAMSS_MCLK1_CLK_SRC 40
+#define GCC_CAMSS_MCLK2_CLK 41
+#define GCC_CAMSS_MCLK2_CLK_SRC 42
+#define GCC_CAMSS_MCLK3_CLK 43
+#define GCC_CAMSS_MCLK3_CLK_SRC 44
+#define GCC_CAMSS_NRT_AXI_CLK 45
+#define GCC_CAMSS_OPE_AHB_CLK 46
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 47
+#define GCC_CAMSS_OPE_CLK 48
+#define GCC_CAMSS_OPE_CLK_SRC 49
+#define GCC_CAMSS_RT_AXI_CLK 50
+#define GCC_CAMSS_TFE_0_CLK 51
+#define GCC_CAMSS_TFE_0_CLK_SRC 52
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 53
+#define GCC_CAMSS_TFE_0_CSID_CLK 54
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 55
+#define GCC_CAMSS_TFE_1_CLK 56
+#define GCC_CAMSS_TFE_1_CLK_SRC 57
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 58
+#define GCC_CAMSS_TFE_1_CSID_CLK 59
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 60
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 61
+#define GCC_CAMSS_TOP_AHB_CLK 62
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 63
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 64
+#define GCC_CPUSS_AHB_CLK 65
+#define GCC_CPUSS_AHB_CLK_SRC 66
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 67
+#define GCC_CPUSS_GNOC_CLK 68
+#define GCC_CPUSS_THROTTLE_CORE_CLK 69
+#define GCC_CPUSS_THROTTLE_XO_CLK 70
+#define GCC_DISP_AHB_CLK 71
+#define GCC_DISP_GPLL0_CLK_SRC 72
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 73
+#define GCC_DISP_HF_AXI_CLK 74
+#define GCC_DISP_THROTTLE_CORE_CLK 75
+#define GCC_DISP_XO_CLK 76
+#define GCC_GP1_CLK 77
+#define GCC_GP1_CLK_SRC 78
+#define GCC_GP2_CLK 79
+#define GCC_GP2_CLK_SRC 80
+#define GCC_GP3_CLK 81
+#define GCC_GP3_CLK_SRC 82
+#define GCC_GPU_CFG_AHB_CLK 83
+#define GCC_GPU_GPLL0_CLK_SRC 84
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 85
+#define GCC_GPU_IREF_CLK 86
+#define GCC_GPU_MEMNOC_GFX_CLK 87
+#define GCC_GPU_SNOC_DVM_GFX_CLK 88
+#define GCC_GPU_THROTTLE_CORE_CLK 89
+#define GCC_GPU_THROTTLE_XO_CLK 90
+#define GCC_PDM2_CLK 91
+#define GCC_PDM2_CLK_SRC 92
+#define GCC_PDM_AHB_CLK 93
+#define GCC_PDM_XO4_CLK 94
+#define GCC_PWM0_XO512_CLK 95
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 96
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 97
+#define GCC_QMIP_CPUSS_CFG_AHB_CLK 98
+#define GCC_QMIP_DISP_AHB_CLK 99
+#define GCC_QMIP_GPU_CFG_AHB_CLK 100
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 101
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 102
+#define GCC_QUPV3_WRAP0_CORE_CLK 103
+#define GCC_QUPV3_WRAP0_S0_CLK 104
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 105
+#define GCC_QUPV3_WRAP0_S1_CLK 106
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 107
+#define GCC_QUPV3_WRAP0_S2_CLK 108
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 109
+#define GCC_QUPV3_WRAP0_S3_CLK 110
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 111
+#define GCC_QUPV3_WRAP0_S4_CLK 112
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 113
+#define GCC_QUPV3_WRAP0_S5_CLK 114
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 115
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 116
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 117
+#define GCC_SDCC1_AHB_CLK 118
+#define GCC_SDCC1_APPS_CLK 119
+#define GCC_SDCC1_APPS_CLK_SRC 120
+#define GCC_SDCC1_ICE_CORE_CLK 121
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 122
+#define GCC_SDCC2_AHB_CLK 123
+#define GCC_SDCC2_APPS_CLK 124
+#define GCC_SDCC2_APPS_CLK_SRC 125
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 126
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 127
+#define GCC_USB30_PRIM_MASTER_CLK 128
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 129
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV 132
+#define GCC_USB30_PRIM_SLEEP_CLK 133
+#define GCC_USB3_PRIM_CLKREF_CLK 134
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 135
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 136
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 137
+#define GCC_VCODEC0_AXI_CLK 138
+#define GCC_VENUS_AHB_CLK 139
+#define GCC_VENUS_CTL_AXI_CLK 140
+#define GCC_VIDEO_AHB_CLK 141
+#define GCC_VIDEO_AXI0_CLK 142
+#define GCC_VIDEO_THROTTLE_CORE_CLK 143
+#define GCC_VIDEO_VCODEC0_SYS_CLK 144
+#define GCC_VIDEO_VENUS_CLK_SRC 145
+#define GCC_VIDEO_VENUS_CTL_CLK 146
+#define GCC_VIDEO_XO_CLK 147
+
+/* GCC resets */
+#define GCC_CAMSS_OPE_BCR 0
+#define GCC_CAMSS_TFE_BCR 1
+#define GCC_CAMSS_TOP_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_MMSS_BCR 4
+#define GCC_PDM_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_SDCC1_BCR 7
+#define GCC_SDCC2_BCR 8
+#define GCC_USB30_PRIM_BCR 9
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 10
+#define GCC_VCODEC0_BCR 11
+#define GCC_VENUS_BCR 12
+#define GCC_VIDEO_INTERFACE_BCR 13
+#define GCC_QUSB2PHY_PRIM_BCR 14
+#define GCC_USB3_PHY_PRIM_SP0_BCR 15
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 16
+
+/* Indexes for GDSCs */
+#define GCC_CAMSS_TOP_GDSC 0
+#define GCC_USB30_PRIM_GDSC 1
+#define GCC_VCODEC0_GDSC 2
+#define GCC_VENUS_GDSC 3
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 4
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h
index bc3051543347..126a51898571 100644
--- a/include/dt-bindings/clock/qcom,gcc-qcs404.h
+++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h
@@ -177,4 +177,8 @@
#define GCC_PCIE_0_PIPE_ARES 21
#define GCC_WDSP_RESTART 22
+/* Indexes for GDSCs */
+#define MDSS_GDSC 0
+#define OXILI_GDSC 1
+
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7280.h b/include/dt-bindings/clock/qcom,gcc-sc7280.h
new file mode 100644
index 000000000000..3d5724b79bff
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc7280.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SC7280_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL0_OUT_ODD 2
+#define GCC_GPLL1 3
+#define GCC_GPLL10 4
+#define GCC_GPLL4 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 7
+#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 8
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 9
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 10
+#define GCC_CAMERA_AHB_CLK 11
+#define GCC_CAMERA_HF_AXI_CLK 12
+#define GCC_CAMERA_SF_AXI_CLK 13
+#define GCC_CAMERA_XO_CLK 14
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 15
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 16
+#define GCC_CPUSS_AHB_CLK 17
+#define GCC_CPUSS_AHB_CLK_SRC 18
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 19
+#define GCC_DDRSS_GPU_AXI_CLK 20
+#define GCC_DDRSS_PCIE_SF_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_GPLL0_CLK_SRC 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_SF_AXI_CLK 25
+#define GCC_DISP_XO_CLK 26
+#define GCC_GP1_CLK 27
+#define GCC_GP1_CLK_SRC 28
+#define GCC_GP2_CLK 29
+#define GCC_GP2_CLK_SRC 30
+#define GCC_GP3_CLK 31
+#define GCC_GP3_CLK_SRC 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_GPU_IREF_EN 36
+#define GCC_GPU_MEMNOC_GFX_CLK 37
+#define GCC_GPU_SNOC_DVM_GFX_CLK 38
+#define GCC_PCIE0_PHY_RCHNG_CLK 39
+#define GCC_PCIE1_PHY_RCHNG_CLK 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_MSTR_AXI_CLK 44
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_PIPE_CLK_SRC 47
+#define GCC_PCIE_0_SLV_AXI_CLK 48
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 49
+#define GCC_PCIE_1_AUX_CLK 50
+#define GCC_PCIE_1_AUX_CLK_SRC 51
+#define GCC_PCIE_1_CFG_AHB_CLK 52
+#define GCC_PCIE_1_MSTR_AXI_CLK 53
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 54
+#define GCC_PCIE_1_PIPE_CLK 55
+#define GCC_PCIE_1_PIPE_CLK_SRC 56
+#define GCC_PCIE_1_SLV_AXI_CLK 57
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 58
+#define GCC_PCIE_THROTTLE_CORE_CLK 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 64
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 65
+#define GCC_QMIP_DISP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 68
+#define GCC_QUPV3_WRAP0_CORE_CLK 69
+#define GCC_QUPV3_WRAP0_S0_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S1_CLK 72
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S2_CLK 74
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S3_CLK 76
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S4_CLK 78
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S5_CLK 80
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S6_CLK 82
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 83
+#define GCC_QUPV3_WRAP0_S7_CLK 84
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 86
+#define GCC_QUPV3_WRAP1_CORE_CLK 87
+#define GCC_QUPV3_WRAP1_S0_CLK 88
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S1_CLK 90
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S2_CLK 92
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S3_CLK 94
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S4_CLK 96
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S5_CLK 98
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S6_CLK 100
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_S7_CLK 102
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 103
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 104
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 105
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 106
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 107
+#define GCC_SDCC1_AHB_CLK 108
+#define GCC_SDCC1_APPS_CLK 109
+#define GCC_SDCC1_APPS_CLK_SRC 110
+#define GCC_SDCC1_ICE_CORE_CLK 111
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 112
+#define GCC_SDCC2_AHB_CLK 113
+#define GCC_SDCC2_APPS_CLK 114
+#define GCC_SDCC2_APPS_CLK_SRC 115
+#define GCC_SDCC4_AHB_CLK 116
+#define GCC_SDCC4_APPS_CLK 117
+#define GCC_SDCC4_APPS_CLK_SRC 118
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 119
+#define GCC_THROTTLE_PCIE_AHB_CLK 120
+#define GCC_TITAN_NRT_THROTTLE_CORE_CLK 121
+#define GCC_TITAN_RT_THROTTLE_CORE_CLK 122
+#define GCC_UFS_1_CLKREF_EN 123
+#define GCC_UFS_PHY_AHB_CLK 124
+#define GCC_UFS_PHY_AXI_CLK 125
+#define GCC_UFS_PHY_AXI_CLK_SRC 126
+#define GCC_UFS_PHY_ICE_CORE_CLK 127
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 128
+#define GCC_UFS_PHY_PHY_AUX_CLK 129
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 130
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 131
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 132
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 133
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 134
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 135
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 136
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 137
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 138
+#define GCC_USB30_PRIM_MASTER_CLK 139
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 140
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 141
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 142
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 143
+#define GCC_USB30_PRIM_SLEEP_CLK 144
+#define GCC_USB30_SEC_MASTER_CLK 145
+#define GCC_USB30_SEC_MASTER_CLK_SRC 146
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 147
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 148
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 149
+#define GCC_USB30_SEC_SLEEP_CLK 150
+#define GCC_USB3_PRIM_PHY_AUX_CLK 151
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 152
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 153
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 154
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 155
+#define GCC_USB3_SEC_PHY_AUX_CLK 156
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 157
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 158
+#define GCC_USB3_SEC_PHY_PIPE_CLK 159
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 160
+#define GCC_VIDEO_AHB_CLK 161
+#define GCC_VIDEO_AXI0_CLK 162
+#define GCC_VIDEO_MVP_THROTTLE_CORE_CLK 163
+#define GCC_VIDEO_XO_CLK 164
+#define GCC_GPLL0_MAIN_DIV_CDIV 165
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 166
+#define GCC_QSPI_CORE_CLK 167
+#define GCC_QSPI_CORE_CLK_SRC 168
+#define GCC_CFG_NOC_LPASS_CLK 169
+#define GCC_MSS_GPLL0_MAIN_DIV_CLK_SRC 170
+#define GCC_MSS_CFG_AHB_CLK 171
+#define GCC_MSS_OFFLINE_AXI_CLK 172
+#define GCC_MSS_SNOC_AXI_CLK 173
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 174
+#define GCC_MSS_Q6SS_BOOT_CLK_SRC 175
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 176
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 177
+#define GCC_AGGRE_NOC_PCIE_CENTER_SF_AXI_CLK 178
+#define GCC_PCIE_CLKREF_EN 179
+#define GCC_WPSS_AHB_CLK 180
+#define GCC_WPSS_AHB_BDG_MST_CLK 181
+#define GCC_WPSS_RSCP_CLK 182
+#define GCC_EDP_CLKREF_EN 183
+#define GCC_SEC_CTRL_CLK_SRC 184
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_PCIE_1_GDSC 1
+#define GCC_UFS_PHY_GDSC 2
+#define GCC_USB30_PRIM_GDSC 3
+#define GCC_USB30_SEC_GDSC 4
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 5
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 6
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 7
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 8
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 9
+
+/* GCC resets */
+#define GCC_PCIE_0_BCR 0
+#define GCC_PCIE_0_PHY_BCR 1
+#define GCC_PCIE_1_BCR 2
+#define GCC_PCIE_1_PHY_BCR 3
+#define GCC_QUSB2PHY_PRIM_BCR 4
+#define GCC_QUSB2PHY_SEC_BCR 5
+#define GCC_SDCC1_BCR 6
+#define GCC_SDCC2_BCR 7
+#define GCC_SDCC4_BCR 8
+#define GCC_UFS_PHY_BCR 9
+#define GCC_USB30_PRIM_BCR 10
+#define GCC_USB30_SEC_BCR 11
+#define GCC_USB3_DP_PHY_PRIM_BCR 12
+#define GCC_USB3_PHY_PRIM_BCR 13
+#define GCC_USB3PHY_PHY_PRIM_BCR 14
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8180x.h b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
new file mode 100644
index 000000000000..90c6e021a035
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC8180X_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SC8180X_H
+
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 3
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 4
+#define GCC_AGGRE_USB3_MP_AXI_CLK 5
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 6
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 7
+#define GCC_BOOT_ROM_AHB_CLK 8
+#define GCC_CAMERA_HF_AXI_CLK 9
+#define GCC_CAMERA_SF_AXI_CLK 10
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 11
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
+#define GCC_CPUSS_AHB_CLK 14
+#define GCC_CPUSS_AHB_CLK_SRC 15
+#define GCC_CPUSS_RBCPR_CLK 16
+#define GCC_DDRSS_GPU_AXI_CLK 17
+#define GCC_DISP_HF_AXI_CLK 18
+#define GCC_DISP_SF_AXI_CLK 19
+#define GCC_EMAC_AXI_CLK 20
+#define GCC_EMAC_PTP_CLK 21
+#define GCC_EMAC_PTP_CLK_SRC 22
+#define GCC_EMAC_RGMII_CLK 23
+#define GCC_EMAC_RGMII_CLK_SRC 24
+#define GCC_EMAC_SLV_AHB_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GP4_CLK 32
+#define GCC_GP4_CLK_SRC 33
+#define GCC_GP5_CLK 34
+#define GCC_GP5_CLK_SRC 35
+#define GCC_GPU_GPLL0_CLK_SRC 36
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 37
+#define GCC_GPU_MEMNOC_GFX_CLK 38
+#define GCC_GPU_SNOC_DVM_GFX_CLK 39
+#define GCC_NPU_AT_CLK 40
+#define GCC_NPU_AXI_CLK 41
+#define GCC_NPU_AXI_CLK_SRC 42
+#define GCC_NPU_GPLL0_CLK_SRC 43
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 44
+#define GCC_NPU_TRIG_CLK 45
+#define GCC_PCIE0_PHY_REFGEN_CLK 46
+#define GCC_PCIE1_PHY_REFGEN_CLK 47
+#define GCC_PCIE2_PHY_REFGEN_CLK 48
+#define GCC_PCIE3_PHY_REFGEN_CLK 49
+#define GCC_PCIE_0_AUX_CLK 50
+#define GCC_PCIE_0_AUX_CLK_SRC 51
+#define GCC_PCIE_0_CFG_AHB_CLK 52
+#define GCC_PCIE_0_MSTR_AXI_CLK 53
+#define GCC_PCIE_0_PIPE_CLK 54
+#define GCC_PCIE_0_SLV_AXI_CLK 55
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 56
+#define GCC_PCIE_1_AUX_CLK 57
+#define GCC_PCIE_1_AUX_CLK_SRC 58
+#define GCC_PCIE_1_CFG_AHB_CLK 59
+#define GCC_PCIE_1_MSTR_AXI_CLK 60
+#define GCC_PCIE_1_PIPE_CLK 61
+#define GCC_PCIE_1_SLV_AXI_CLK 62
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 63
+#define GCC_PCIE_2_AUX_CLK 64
+#define GCC_PCIE_2_AUX_CLK_SRC 65
+#define GCC_PCIE_2_CFG_AHB_CLK 66
+#define GCC_PCIE_2_MSTR_AXI_CLK 67
+#define GCC_PCIE_2_PIPE_CLK 68
+#define GCC_PCIE_2_SLV_AXI_CLK 69
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 70
+#define GCC_PCIE_3_AUX_CLK 71
+#define GCC_PCIE_3_AUX_CLK_SRC 72
+#define GCC_PCIE_3_CFG_AHB_CLK 73
+#define GCC_PCIE_3_MSTR_AXI_CLK 74
+#define GCC_PCIE_3_PIPE_CLK 75
+#define GCC_PCIE_3_SLV_AXI_CLK 76
+#define GCC_PCIE_3_SLV_Q2A_AXI_CLK 77
+#define GCC_PCIE_PHY_AUX_CLK 78
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 79
+#define GCC_PDM2_CLK 80
+#define GCC_PDM2_CLK_SRC 81
+#define GCC_PDM_AHB_CLK 82
+#define GCC_PDM_XO4_CLK 83
+#define GCC_PRNG_AHB_CLK 84
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 85
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 86
+#define GCC_QMIP_DISP_AHB_CLK 87
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 88
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 89
+#define GCC_QSPI_1_CNOC_PERIPH_AHB_CLK 90
+#define GCC_QSPI_1_CORE_CLK 91
+#define GCC_QSPI_1_CORE_CLK_SRC 92
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 93
+#define GCC_QSPI_CORE_CLK 94
+#define GCC_QSPI_CORE_CLK_SRC 95
+#define GCC_QUPV3_WRAP0_S0_CLK 96
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 97
+#define GCC_QUPV3_WRAP0_S1_CLK 98
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 99
+#define GCC_QUPV3_WRAP0_S2_CLK 100
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 101
+#define GCC_QUPV3_WRAP0_S3_CLK 102
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 103
+#define GCC_QUPV3_WRAP0_S4_CLK 104
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 105
+#define GCC_QUPV3_WRAP0_S5_CLK 106
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 107
+#define GCC_QUPV3_WRAP0_S6_CLK 108
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 109
+#define GCC_QUPV3_WRAP0_S7_CLK 110
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 111
+#define GCC_QUPV3_WRAP1_S0_CLK 112
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 113
+#define GCC_QUPV3_WRAP1_S1_CLK 114
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 115
+#define GCC_QUPV3_WRAP1_S2_CLK 116
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 117
+#define GCC_QUPV3_WRAP1_S3_CLK 118
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 119
+#define GCC_QUPV3_WRAP1_S4_CLK 120
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 121
+#define GCC_QUPV3_WRAP1_S5_CLK 122
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 123
+#define GCC_QUPV3_WRAP2_S0_CLK 124
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 125
+#define GCC_QUPV3_WRAP2_S1_CLK 126
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 127
+#define GCC_QUPV3_WRAP2_S2_CLK 128
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 129
+#define GCC_QUPV3_WRAP2_S3_CLK 130
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 131
+#define GCC_QUPV3_WRAP2_S4_CLK 132
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 133
+#define GCC_QUPV3_WRAP2_S5_CLK 134
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 135
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 136
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 137
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 138
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 139
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 141
+#define GCC_SDCC2_AHB_CLK 142
+#define GCC_SDCC2_APPS_CLK 143
+#define GCC_SDCC2_APPS_CLK_SRC 144
+#define GCC_SDCC4_AHB_CLK 145
+#define GCC_SDCC4_APPS_CLK 146
+#define GCC_SDCC4_APPS_CLK_SRC 147
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 148
+#define GCC_TSIF_AHB_CLK 149
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 150
+#define GCC_TSIF_REF_CLK 151
+#define GCC_TSIF_REF_CLK_SRC 152
+#define GCC_UFS_CARD_2_AHB_CLK 153
+#define GCC_UFS_CARD_2_AXI_CLK 154
+#define GCC_UFS_CARD_2_AXI_CLK_SRC 155
+#define GCC_UFS_CARD_2_ICE_CORE_CLK 156
+#define GCC_UFS_CARD_2_ICE_CORE_CLK_SRC 157
+#define GCC_UFS_CARD_2_PHY_AUX_CLK 158
+#define GCC_UFS_CARD_2_PHY_AUX_CLK_SRC 159
+#define GCC_UFS_CARD_2_RX_SYMBOL_0_CLK 160
+#define GCC_UFS_CARD_2_RX_SYMBOL_1_CLK 161
+#define GCC_UFS_CARD_2_TX_SYMBOL_0_CLK 162
+#define GCC_UFS_CARD_2_UNIPRO_CORE_CLK 163
+#define GCC_UFS_CARD_2_UNIPRO_CORE_CLK_SRC 164
+#define GCC_UFS_CARD_AHB_CLK 165
+#define GCC_UFS_CARD_AXI_CLK 166
+#define GCC_UFS_CARD_AXI_CLK_SRC 167
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 168
+#define GCC_UFS_CARD_ICE_CORE_CLK 169
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 170
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 171
+#define GCC_UFS_CARD_PHY_AUX_CLK 172
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 173
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 174
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 175
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 176
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 177
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 178
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 179
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 180
+#define GCC_UFS_PHY_AHB_CLK 181
+#define GCC_UFS_PHY_AXI_CLK 182
+#define GCC_UFS_PHY_AXI_CLK_SRC 183
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 184
+#define GCC_UFS_PHY_ICE_CORE_CLK 185
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 186
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 187
+#define GCC_UFS_PHY_PHY_AUX_CLK 188
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 189
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 190
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 191
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 192
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 193
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 194
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 195
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 196
+#define GCC_USB30_MP_MASTER_CLK 197
+#define GCC_USB30_MP_MASTER_CLK_SRC 198
+#define GCC_USB30_MP_MOCK_UTMI_CLK 199
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 200
+#define GCC_USB30_MP_SLEEP_CLK 201
+#define GCC_USB30_PRIM_MASTER_CLK 202
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 203
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 204
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 205
+#define GCC_USB30_PRIM_SLEEP_CLK 206
+#define GCC_USB30_SEC_MASTER_CLK 207
+#define GCC_USB30_SEC_MASTER_CLK_SRC 208
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 209
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 210
+#define GCC_USB30_SEC_SLEEP_CLK 211
+#define GCC_USB3_MP_PHY_AUX_CLK 212
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 213
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 214
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 215
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 216
+#define GCC_USB3_PRIM_PHY_AUX_CLK 217
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 218
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 219
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 220
+#define GCC_USB3_SEC_PHY_AUX_CLK 221
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 222
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 223
+#define GCC_USB3_SEC_PHY_PIPE_CLK 224
+#define GCC_VIDEO_AXI0_CLK 225
+#define GCC_VIDEO_AXI1_CLK 226
+#define GCC_VIDEO_AXIC_CLK 227
+#define GPLL0 228
+#define GPLL0_OUT_EVEN 229
+#define GPLL1 230
+#define GPLL4 231
+#define GPLL7 232
+#define GCC_PCIE_0_CLKREF_CLK 233
+#define GCC_PCIE_1_CLKREF_CLK 234
+#define GCC_PCIE_2_CLKREF_CLK 235
+#define GCC_PCIE_3_CLKREF_CLK 236
+#define GCC_USB3_PRIM_CLKREF_CLK 237
+#define GCC_USB3_SEC_CLKREF_CLK 238
+#define GCC_UFS_MEM_CLKREF_EN 239
+#define GCC_UFS_CARD_CLKREF_EN 240
+
+#define GCC_EMAC_BCR 0
+#define GCC_GPU_BCR 1
+#define GCC_MMSS_BCR 2
+#define GCC_NPU_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_PHY_BCR 5
+#define GCC_PCIE_1_BCR 6
+#define GCC_PCIE_1_PHY_BCR 7
+#define GCC_PCIE_2_BCR 8
+#define GCC_PCIE_2_PHY_BCR 9
+#define GCC_PCIE_3_BCR 10
+#define GCC_PCIE_3_PHY_BCR 11
+#define GCC_PCIE_PHY_BCR 12
+#define GCC_PDM_BCR 13
+#define GCC_PRNG_BCR 14
+#define GCC_QSPI_1_BCR 15
+#define GCC_QSPI_BCR 16
+#define GCC_QUPV3_WRAPPER_0_BCR 17
+#define GCC_QUPV3_WRAPPER_1_BCR 18
+#define GCC_QUPV3_WRAPPER_2_BCR 19
+#define GCC_QUSB2PHY_5_BCR 20
+#define GCC_QUSB2PHY_MP0_BCR 21
+#define GCC_QUSB2PHY_MP1_BCR 22
+#define GCC_QUSB2PHY_PRIM_BCR 23
+#define GCC_QUSB2PHY_SEC_BCR 24
+#define GCC_USB3_PHY_PRIM_SP0_BCR 25
+#define GCC_USB3_PHY_PRIM_SP1_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_SP0_BCR 27
+#define GCC_USB3_DP_PHY_PRIM_SP1_BCR 28
+#define GCC_USB3_PHY_SEC_BCR 29
+#define GCC_USB3PHY_PHY_SEC_BCR 30
+#define GCC_SDCC2_BCR 31
+#define GCC_SDCC4_BCR 32
+#define GCC_TSIF_BCR 33
+#define GCC_UFS_CARD_2_BCR 34
+#define GCC_UFS_CARD_BCR 35
+#define GCC_UFS_PHY_BCR 36
+#define GCC_USB30_MP_BCR 37
+#define GCC_USB30_PRIM_BCR 38
+#define GCC_USB30_SEC_BCR 39
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 40
+#define GCC_VIDEO_AXIC_CLK_BCR 41
+#define GCC_VIDEO_AXI0_CLK_BCR 42
+#define GCC_VIDEO_AXI1_CLK_BCR 43
+#define GCC_USB3_DP_PHY_SEC_BCR 44
+
+/* GCC GDSCRs */
+#define EMAC_GDSC 0
+#define PCIE_0_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_2_GDSC 3
+#define PCIE_3_GDSC 4
+#define UFS_CARD_2_GDSC 5
+#define UFS_CARD_GDSC 6
+#define UFS_PHY_GDSC 7
+#define USB30_MP_GDSC 8
+#define USB30_PRIM_GDSC 9
+#define USB30_SEC_GDSC 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
new file mode 100644
index 000000000000..845491591784
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
@@ -0,0 +1,508 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL2 2
+#define GCC_GPLL4 3
+#define GCC_GPLL7 4
+#define GCC_GPLL8 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE0_TUNNEL_AXI_CLK 7
+#define GCC_AGGRE_NOC_PCIE1_TUNNEL_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_4_AXI_CLK 9
+#define GCC_AGGRE_NOC_PCIE_SOUTH_SF_AXI_CLK 10
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 11
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 12
+#define GCC_AGGRE_USB3_MP_AXI_CLK 13
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 14
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 15
+#define GCC_AGGRE_USB4_1_AXI_CLK 16
+#define GCC_AGGRE_USB4_AXI_CLK 17
+#define GCC_AGGRE_USB_NOC_AXI_CLK 18
+#define GCC_AGGRE_USB_NOC_NORTH_AXI_CLK 19
+#define GCC_AGGRE_USB_NOC_SOUTH_AXI_CLK 20
+#define GCC_AHB2PHY0_CLK 21
+#define GCC_AHB2PHY2_CLK 22
+#define GCC_BOOT_ROM_AHB_CLK 23
+#define GCC_CAMERA_AHB_CLK 24
+#define GCC_CAMERA_HF_AXI_CLK 25
+#define GCC_CAMERA_SF_AXI_CLK 26
+#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK 27
+#define GCC_CAMERA_THROTTLE_RT_AXI_CLK 28
+#define GCC_CAMERA_THROTTLE_XO_CLK 29
+#define GCC_CAMERA_XO_CLK 30
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 31
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 32
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 33
+#define GCC_CNOC_PCIE0_TUNNEL_CLK 34
+#define GCC_CNOC_PCIE1_TUNNEL_CLK 35
+#define GCC_CNOC_PCIE4_QX_CLK 36
+#define GCC_DDRSS_GPU_AXI_CLK 37
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 38
+#define GCC_DISP1_AHB_CLK 39
+#define GCC_DISP1_HF_AXI_CLK 40
+#define GCC_DISP1_SF_AXI_CLK 41
+#define GCC_DISP1_THROTTLE_NRT_AXI_CLK 42
+#define GCC_DISP1_THROTTLE_RT_AXI_CLK 43
+#define GCC_DISP1_XO_CLK 44
+#define GCC_DISP_AHB_CLK 45
+#define GCC_DISP_HF_AXI_CLK 46
+#define GCC_DISP_SF_AXI_CLK 47
+#define GCC_DISP_THROTTLE_NRT_AXI_CLK 48
+#define GCC_DISP_THROTTLE_RT_AXI_CLK 49
+#define GCC_DISP_XO_CLK 50
+#define GCC_EMAC0_AXI_CLK 51
+#define GCC_EMAC0_PTP_CLK 52
+#define GCC_EMAC0_PTP_CLK_SRC 53
+#define GCC_EMAC0_RGMII_CLK 54
+#define GCC_EMAC0_RGMII_CLK_SRC 55
+#define GCC_EMAC0_SLV_AHB_CLK 56
+#define GCC_EMAC1_AXI_CLK 57
+#define GCC_EMAC1_PTP_CLK 58
+#define GCC_EMAC1_PTP_CLK_SRC 59
+#define GCC_EMAC1_RGMII_CLK 60
+#define GCC_EMAC1_RGMII_CLK_SRC 61
+#define GCC_EMAC1_SLV_AHB_CLK 62
+#define GCC_GP1_CLK 63
+#define GCC_GP1_CLK_SRC 64
+#define GCC_GP2_CLK 65
+#define GCC_GP2_CLK_SRC 66
+#define GCC_GP3_CLK 67
+#define GCC_GP3_CLK_SRC 68
+#define GCC_GP4_CLK 69
+#define GCC_GP4_CLK_SRC 70
+#define GCC_GP5_CLK 71
+#define GCC_GP5_CLK_SRC 72
+#define GCC_GPU_CFG_AHB_CLK 73
+#define GCC_GPU_GPLL0_CLK_SRC 74
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 75
+#define GCC_GPU_IREF_EN 76
+#define GCC_GPU_MEMNOC_GFX_CLK 77
+#define GCC_GPU_SNOC_DVM_GFX_CLK 78
+#define GCC_GPU_TCU_THROTTLE_AHB_CLK 79
+#define GCC_GPU_TCU_THROTTLE_CLK 80
+#define GCC_PCIE0_PHY_RCHNG_CLK 81
+#define GCC_PCIE1_PHY_RCHNG_CLK 82
+#define GCC_PCIE2A_PHY_RCHNG_CLK 83
+#define GCC_PCIE2B_PHY_RCHNG_CLK 84
+#define GCC_PCIE3A_PHY_RCHNG_CLK 85
+#define GCC_PCIE3B_PHY_RCHNG_CLK 86
+#define GCC_PCIE4_PHY_RCHNG_CLK 87
+#define GCC_PCIE_0_AUX_CLK 88
+#define GCC_PCIE_0_AUX_CLK_SRC 89
+#define GCC_PCIE_0_CFG_AHB_CLK 90
+#define GCC_PCIE_0_MSTR_AXI_CLK 91
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 92
+#define GCC_PCIE_0_PIPE_CLK 93
+#define GCC_PCIE_0_SLV_AXI_CLK 94
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 95
+#define GCC_PCIE_1_AUX_CLK 96
+#define GCC_PCIE_1_AUX_CLK_SRC 97
+#define GCC_PCIE_1_CFG_AHB_CLK 98
+#define GCC_PCIE_1_MSTR_AXI_CLK 99
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 100
+#define GCC_PCIE_1_PIPE_CLK 101
+#define GCC_PCIE_1_SLV_AXI_CLK 102
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 103
+#define GCC_PCIE_2A2B_CLKREF_CLK 104
+#define GCC_PCIE_2A_AUX_CLK 105
+#define GCC_PCIE_2A_AUX_CLK_SRC 106
+#define GCC_PCIE_2A_CFG_AHB_CLK 107
+#define GCC_PCIE_2A_MSTR_AXI_CLK 108
+#define GCC_PCIE_2A_PHY_RCHNG_CLK_SRC 109
+#define GCC_PCIE_2A_PIPE_CLK 110
+#define GCC_PCIE_2A_PIPE_CLK_SRC 111
+#define GCC_PCIE_2A_PIPE_DIV_CLK_SRC 112
+#define GCC_PCIE_2A_PIPEDIV2_CLK 113
+#define GCC_PCIE_2A_SLV_AXI_CLK 114
+#define GCC_PCIE_2A_SLV_Q2A_AXI_CLK 115
+#define GCC_PCIE_2B_AUX_CLK 116
+#define GCC_PCIE_2B_AUX_CLK_SRC 117
+#define GCC_PCIE_2B_CFG_AHB_CLK 118
+#define GCC_PCIE_2B_MSTR_AXI_CLK 119
+#define GCC_PCIE_2B_PHY_RCHNG_CLK_SRC 120
+#define GCC_PCIE_2B_PIPE_CLK 121
+#define GCC_PCIE_2B_PIPE_CLK_SRC 122
+#define GCC_PCIE_2B_PIPE_DIV_CLK_SRC 123
+#define GCC_PCIE_2B_PIPEDIV2_CLK 124
+#define GCC_PCIE_2B_SLV_AXI_CLK 125
+#define GCC_PCIE_2B_SLV_Q2A_AXI_CLK 126
+#define GCC_PCIE_3A3B_CLKREF_CLK 127
+#define GCC_PCIE_3A_AUX_CLK 128
+#define GCC_PCIE_3A_AUX_CLK_SRC 129
+#define GCC_PCIE_3A_CFG_AHB_CLK 130
+#define GCC_PCIE_3A_MSTR_AXI_CLK 131
+#define GCC_PCIE_3A_PHY_RCHNG_CLK_SRC 132
+#define GCC_PCIE_3A_PIPE_CLK 133
+#define GCC_PCIE_3A_PIPE_CLK_SRC 134
+#define GCC_PCIE_3A_PIPE_DIV_CLK_SRC 135
+#define GCC_PCIE_3A_PIPEDIV2_CLK 136
+#define GCC_PCIE_3A_SLV_AXI_CLK 137
+#define GCC_PCIE_3A_SLV_Q2A_AXI_CLK 138
+#define GCC_PCIE_3B_AUX_CLK 139
+#define GCC_PCIE_3B_AUX_CLK_SRC 140
+#define GCC_PCIE_3B_CFG_AHB_CLK 141
+#define GCC_PCIE_3B_MSTR_AXI_CLK 142
+#define GCC_PCIE_3B_PHY_RCHNG_CLK_SRC 143
+#define GCC_PCIE_3B_PIPE_CLK 144
+#define GCC_PCIE_3B_PIPE_CLK_SRC 145
+#define GCC_PCIE_3B_PIPE_DIV_CLK_SRC 146
+#define GCC_PCIE_3B_PIPEDIV2_CLK 147
+#define GCC_PCIE_3B_SLV_AXI_CLK 148
+#define GCC_PCIE_3B_SLV_Q2A_AXI_CLK 149
+#define GCC_PCIE_4_AUX_CLK 150
+#define GCC_PCIE_4_AUX_CLK_SRC 151
+#define GCC_PCIE_4_CFG_AHB_CLK 152
+#define GCC_PCIE_4_CLKREF_CLK 153
+#define GCC_PCIE_4_MSTR_AXI_CLK 154
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 155
+#define GCC_PCIE_4_PIPE_CLK 156
+#define GCC_PCIE_4_PIPE_CLK_SRC 157
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 158
+#define GCC_PCIE_4_PIPEDIV2_CLK 159
+#define GCC_PCIE_4_SLV_AXI_CLK 160
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 161
+#define GCC_PCIE_RSCC_AHB_CLK 162
+#define GCC_PCIE_RSCC_XO_CLK 163
+#define GCC_PCIE_RSCC_XO_CLK_SRC 164
+#define GCC_PCIE_THROTTLE_CFG_CLK 165
+#define GCC_PDM2_CLK 166
+#define GCC_PDM2_CLK_SRC 167
+#define GCC_PDM_AHB_CLK 168
+#define GCC_PDM_XO4_CLK 169
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 170
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 171
+#define GCC_QMIP_DISP1_AHB_CLK 172
+#define GCC_QMIP_DISP1_ROT_AHB_CLK 173
+#define GCC_QMIP_DISP_AHB_CLK 174
+#define GCC_QMIP_DISP_ROT_AHB_CLK 175
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 176
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 177
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 178
+#define GCC_QUPV3_WRAP0_CORE_CLK 179
+#define GCC_QUPV3_WRAP0_QSPI0_CLK 180
+#define GCC_QUPV3_WRAP0_S0_CLK 181
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 182
+#define GCC_QUPV3_WRAP0_S1_CLK 183
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 184
+#define GCC_QUPV3_WRAP0_S2_CLK 185
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 186
+#define GCC_QUPV3_WRAP0_S3_CLK 187
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 188
+#define GCC_QUPV3_WRAP0_S4_CLK 189
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 190
+#define GCC_QUPV3_WRAP0_S4_DIV_CLK_SRC 191
+#define GCC_QUPV3_WRAP0_S5_CLK 192
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 193
+#define GCC_QUPV3_WRAP0_S6_CLK 194
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 195
+#define GCC_QUPV3_WRAP0_S7_CLK 196
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 197
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 198
+#define GCC_QUPV3_WRAP1_CORE_CLK 199
+#define GCC_QUPV3_WRAP1_QSPI0_CLK 200
+#define GCC_QUPV3_WRAP1_S0_CLK 201
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 202
+#define GCC_QUPV3_WRAP1_S1_CLK 203
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 204
+#define GCC_QUPV3_WRAP1_S2_CLK 205
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 206
+#define GCC_QUPV3_WRAP1_S3_CLK 207
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 208
+#define GCC_QUPV3_WRAP1_S4_CLK 209
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 210
+#define GCC_QUPV3_WRAP1_S4_DIV_CLK_SRC 211
+#define GCC_QUPV3_WRAP1_S5_CLK 212
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 213
+#define GCC_QUPV3_WRAP1_S6_CLK 214
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 215
+#define GCC_QUPV3_WRAP1_S7_CLK 216
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 217
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 218
+#define GCC_QUPV3_WRAP2_CORE_CLK 219
+#define GCC_QUPV3_WRAP2_QSPI0_CLK 220
+#define GCC_QUPV3_WRAP2_S0_CLK 221
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 222
+#define GCC_QUPV3_WRAP2_S1_CLK 223
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 224
+#define GCC_QUPV3_WRAP2_S2_CLK 225
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 226
+#define GCC_QUPV3_WRAP2_S3_CLK 227
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 228
+#define GCC_QUPV3_WRAP2_S4_CLK 229
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 230
+#define GCC_QUPV3_WRAP2_S4_DIV_CLK_SRC 231
+#define GCC_QUPV3_WRAP2_S5_CLK 232
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 233
+#define GCC_QUPV3_WRAP2_S6_CLK 234
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 235
+#define GCC_QUPV3_WRAP2_S7_CLK 236
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 237
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 238
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 239
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 240
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 241
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 242
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 243
+#define GCC_SDCC2_AHB_CLK 244
+#define GCC_SDCC2_APPS_CLK 245
+#define GCC_SDCC2_APPS_CLK_SRC 246
+#define GCC_SDCC4_AHB_CLK 247
+#define GCC_SDCC4_APPS_CLK 248
+#define GCC_SDCC4_APPS_CLK_SRC 249
+#define GCC_SYS_NOC_USB_AXI_CLK 250
+#define GCC_UFS_1_CARD_CLKREF_CLK 251
+#define GCC_UFS_CARD_AHB_CLK 252
+#define GCC_UFS_CARD_AXI_CLK 253
+#define GCC_UFS_CARD_AXI_CLK_SRC 254
+#define GCC_UFS_CARD_CLKREF_CLK 255
+#define GCC_UFS_CARD_ICE_CORE_CLK 256
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 257
+#define GCC_UFS_CARD_PHY_AUX_CLK 258
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 259
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 260
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 261
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 262
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 263
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 264
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 265
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 266
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 267
+#define GCC_UFS_PHY_AHB_CLK 268
+#define GCC_UFS_PHY_AXI_CLK 269
+#define GCC_UFS_PHY_AXI_CLK_SRC 270
+#define GCC_UFS_PHY_ICE_CORE_CLK 271
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 272
+#define GCC_UFS_PHY_PHY_AUX_CLK 273
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 274
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 275
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 276
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 277
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 278
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 279
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 280
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 281
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 282
+#define GCC_UFS_REF_CLKREF_CLK 283
+#define GCC_USB2_HS0_CLKREF_CLK 284
+#define GCC_USB2_HS1_CLKREF_CLK 285
+#define GCC_USB2_HS2_CLKREF_CLK 286
+#define GCC_USB2_HS3_CLKREF_CLK 287
+#define GCC_USB30_MP_MASTER_CLK 288
+#define GCC_USB30_MP_MASTER_CLK_SRC 289
+#define GCC_USB30_MP_MOCK_UTMI_CLK 290
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 291
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 292
+#define GCC_USB30_MP_SLEEP_CLK 293
+#define GCC_USB30_PRIM_MASTER_CLK 294
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 295
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 296
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 297
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 298
+#define GCC_USB30_PRIM_SLEEP_CLK 299
+#define GCC_USB30_SEC_MASTER_CLK 300
+#define GCC_USB30_SEC_MASTER_CLK_SRC 301
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 302
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 303
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 304
+#define GCC_USB30_SEC_SLEEP_CLK 305
+#define GCC_USB34_PRIM_PHY_PIPE_CLK_SRC 306
+#define GCC_USB34_SEC_PHY_PIPE_CLK_SRC 307
+#define GCC_USB3_MP0_CLKREF_CLK 308
+#define GCC_USB3_MP1_CLKREF_CLK 309
+#define GCC_USB3_MP_PHY_AUX_CLK 310
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 311
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 312
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 313
+#define GCC_USB3_MP_PHY_PIPE_0_CLK_SRC 314
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 315
+#define GCC_USB3_MP_PHY_PIPE_1_CLK_SRC 316
+#define GCC_USB3_PRIM_PHY_AUX_CLK 317
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 318
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 319
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 320
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 321
+#define GCC_USB3_SEC_PHY_AUX_CLK 322
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 323
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 324
+#define GCC_USB3_SEC_PHY_PIPE_CLK 325
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 326
+#define GCC_USB4_1_CFG_AHB_CLK 327
+#define GCC_USB4_1_DP_CLK 328
+#define GCC_USB4_1_MASTER_CLK 329
+#define GCC_USB4_1_MASTER_CLK_SRC 330
+#define GCC_USB4_1_PHY_DP_CLK_SRC 331
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 332
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC 333
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 334
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 335
+#define GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC 336
+#define GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC 337
+#define GCC_USB4_1_PHY_RX0_CLK 338
+#define GCC_USB4_1_PHY_RX0_CLK_SRC 339
+#define GCC_USB4_1_PHY_RX1_CLK 340
+#define GCC_USB4_1_PHY_RX1_CLK_SRC 341
+#define GCC_USB4_1_PHY_SYS_CLK_SRC 342
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 343
+#define GCC_USB4_1_SB_IF_CLK 344
+#define GCC_USB4_1_SB_IF_CLK_SRC 345
+#define GCC_USB4_1_SYS_CLK 346
+#define GCC_USB4_1_TMU_CLK 347
+#define GCC_USB4_1_TMU_CLK_SRC 348
+#define GCC_USB4_CFG_AHB_CLK 349
+#define GCC_USB4_CLKREF_CLK 350
+#define GCC_USB4_DP_CLK 351
+#define GCC_USB4_EUD_CLKREF_CLK 352
+#define GCC_USB4_MASTER_CLK 353
+#define GCC_USB4_MASTER_CLK_SRC 354
+#define GCC_USB4_PHY_DP_CLK_SRC 355
+#define GCC_USB4_PHY_P2RR2P_PIPE_CLK 356
+#define GCC_USB4_PHY_P2RR2P_PIPE_CLK_SRC 357
+#define GCC_USB4_PHY_PCIE_PIPE_CLK 358
+#define GCC_USB4_PHY_PCIE_PIPE_CLK_SRC 359
+#define GCC_USB4_PHY_PCIE_PIPE_MUX_CLK_SRC 360
+#define GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC 361
+#define GCC_USB4_PHY_RX0_CLK 362
+#define GCC_USB4_PHY_RX0_CLK_SRC 363
+#define GCC_USB4_PHY_RX1_CLK 364
+#define GCC_USB4_PHY_RX1_CLK_SRC 365
+#define GCC_USB4_PHY_SYS_CLK_SRC 366
+#define GCC_USB4_PHY_USB_PIPE_CLK 367
+#define GCC_USB4_SB_IF_CLK 368
+#define GCC_USB4_SB_IF_CLK_SRC 369
+#define GCC_USB4_SYS_CLK 370
+#define GCC_USB4_TMU_CLK 371
+#define GCC_USB4_TMU_CLK_SRC 372
+#define GCC_VIDEO_AHB_CLK 373
+#define GCC_VIDEO_AXI0_CLK 374
+#define GCC_VIDEO_AXI1_CLK 375
+#define GCC_VIDEO_CVP_THROTTLE_CLK 376
+#define GCC_VIDEO_VCODEC_THROTTLE_CLK 377
+#define GCC_VIDEO_XO_CLK 378
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 379
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 380
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 381
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 382
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 383
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 384
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 385
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 386
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 387
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 388
+
+/* GCC resets */
+#define GCC_EMAC0_BCR 0
+#define GCC_EMAC1_BCR 1
+#define GCC_PCIE_0_LINK_DOWN_BCR 2
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 3
+#define GCC_PCIE_0_PHY_BCR 4
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_TUNNEL_BCR 6
+#define GCC_PCIE_1_LINK_DOWN_BCR 7
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_PHY_BCR 9
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_TUNNEL_BCR 11
+#define GCC_PCIE_2A_BCR 12
+#define GCC_PCIE_2A_LINK_DOWN_BCR 13
+#define GCC_PCIE_2A_NOCSR_COM_PHY_BCR 14
+#define GCC_PCIE_2A_PHY_BCR 15
+#define GCC_PCIE_2A_PHY_NOCSR_COM_PHY_BCR 16
+#define GCC_PCIE_2B_BCR 17
+#define GCC_PCIE_2B_LINK_DOWN_BCR 18
+#define GCC_PCIE_2B_NOCSR_COM_PHY_BCR 19
+#define GCC_PCIE_2B_PHY_BCR 20
+#define GCC_PCIE_2B_PHY_NOCSR_COM_PHY_BCR 21
+#define GCC_PCIE_3A_BCR 22
+#define GCC_PCIE_3A_LINK_DOWN_BCR 23
+#define GCC_PCIE_3A_NOCSR_COM_PHY_BCR 24
+#define GCC_PCIE_3A_PHY_BCR 25
+#define GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR 26
+#define GCC_PCIE_3B_BCR 27
+#define GCC_PCIE_3B_LINK_DOWN_BCR 28
+#define GCC_PCIE_3B_NOCSR_COM_PHY_BCR 29
+#define GCC_PCIE_3B_PHY_BCR 30
+#define GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR 31
+#define GCC_PCIE_4_BCR 32
+#define GCC_PCIE_4_LINK_DOWN_BCR 33
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 34
+#define GCC_PCIE_4_PHY_BCR 35
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 36
+#define GCC_PCIE_PHY_CFG_AHB_BCR 37
+#define GCC_PCIE_PHY_COM_BCR 38
+#define GCC_PCIE_RSCC_BCR 39
+#define GCC_QUSB2PHY_HS0_MP_BCR 40
+#define GCC_QUSB2PHY_HS1_MP_BCR 41
+#define GCC_QUSB2PHY_HS2_MP_BCR 42
+#define GCC_QUSB2PHY_HS3_MP_BCR 43
+#define GCC_QUSB2PHY_PRIM_BCR 44
+#define GCC_QUSB2PHY_SEC_BCR 45
+#define GCC_SDCC2_BCR 46
+#define GCC_SDCC4_BCR 47
+#define GCC_UFS_CARD_BCR 48
+#define GCC_UFS_PHY_BCR 49
+#define GCC_USB2_PHY_PRIM_BCR 50
+#define GCC_USB2_PHY_SEC_BCR 51
+#define GCC_USB30_MP_BCR 52
+#define GCC_USB30_PRIM_BCR 53
+#define GCC_USB30_SEC_BCR 54
+#define GCC_USB3_DP_PHY_PRIM_BCR 55
+#define GCC_USB3_DP_PHY_SEC_BCR 56
+#define GCC_USB3_PHY_PRIM_BCR 57
+#define GCC_USB3_PHY_SEC_BCR 58
+#define GCC_USB3_UNIPHY_MP0_BCR 59
+#define GCC_USB3_UNIPHY_MP1_BCR 60
+#define GCC_USB3PHY_PHY_PRIM_BCR 61
+#define GCC_USB3PHY_PHY_SEC_BCR 62
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 63
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 64
+#define GCC_USB4_1_BCR 65
+#define GCC_USB4_1_DP_PHY_PRIM_BCR 66
+#define GCC_USB4_1_DPPHY_AUX_BCR 67
+#define GCC_USB4_1_PHY_PRIM_BCR 68
+#define GCC_USB4_BCR 69
+#define GCC_USB4_DP_PHY_PRIM_BCR 70
+#define GCC_USB4_DPPHY_AUX_BCR 71
+#define GCC_USB4_PHY_PRIM_BCR 72
+#define GCC_USB4PHY_1_PHY_PRIM_BCR 73
+#define GCC_USB4PHY_PHY_PRIM_BCR 74
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 75
+#define GCC_VIDEO_BCR 76
+#define GCC_VIDEO_AXI0_CLK_ARES 77
+#define GCC_VIDEO_AXI1_CLK_ARES 78
+
+/* GCC GDSCs */
+#define PCIE_0_TUNNEL_GDSC 0
+#define PCIE_1_TUNNEL_GDSC 1
+#define PCIE_2A_GDSC 2
+#define PCIE_2B_GDSC 3
+#define PCIE_3A_GDSC 4
+#define PCIE_3B_GDSC 5
+#define PCIE_4_GDSC 6
+#define UFS_CARD_GDSC 7
+#define UFS_PHY_GDSC 8
+#define USB30_MP_GDSC 9
+#define USB30_PRIM_GDSC 10
+#define USB30_SEC_GDSC 11
+#define EMAC_0_GDSC 12
+#define EMAC_1_GDSC 13
+#define USB4_1_GDSC 14
+#define USB4_GDSC 15
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 16
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 17
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 18
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 19
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 20
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 21
+#define HLOS1_VOTE_TURING_MMU_TBU2_GDSC 22
+#define HLOS1_VOTE_TURING_MMU_TBU3_GDSC 23
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 968fa65b9c42..d78b899263a2 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -199,6 +199,7 @@
#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189
#define GCC_LPASS_Q6_AXI_CLK 190
#define GCC_LPASS_SWAY_CLK 191
+#define GPLL6 192
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdx55.h b/include/dt-bindings/clock/qcom,gcc-sdx55.h
new file mode 100644
index 000000000000..fb9a5942f793
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdx55.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDX55_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SDX55_H
+
+#define GPLL0 3
+#define GPLL0_OUT_EVEN 4
+#define GPLL4 5
+#define GPLL4_OUT_EVEN 6
+#define GPLL5 7
+#define GCC_AHB_PCIE_LINK_CLK 8
+#define GCC_BLSP1_AHB_CLK 9
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 10
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 11
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 12
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 13
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 14
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 15
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 16
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 17
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 18
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 19
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 20
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 21
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 22
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 23
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 24
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 25
+#define GCC_BLSP1_UART1_APPS_CLK 26
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 27
+#define GCC_BLSP1_UART2_APPS_CLK 28
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 29
+#define GCC_BLSP1_UART3_APPS_CLK 30
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 31
+#define GCC_BLSP1_UART4_APPS_CLK 32
+#define GCC_BLSP1_UART4_APPS_CLK_SRC 33
+#define GCC_BOOT_ROM_AHB_CLK 34
+#define GCC_CE1_AHB_CLK 35
+#define GCC_CE1_AXI_CLK 36
+#define GCC_CE1_CLK 37
+#define GCC_CPUSS_AHB_CLK 38
+#define GCC_CPUSS_AHB_CLK_SRC 39
+#define GCC_CPUSS_GNOC_CLK 40
+#define GCC_CPUSS_RBCPR_CLK 41
+#define GCC_CPUSS_RBCPR_CLK_SRC 42
+#define GCC_EMAC_CLK_SRC 43
+#define GCC_EMAC_PTP_CLK_SRC 44
+#define GCC_ETH_AXI_CLK 45
+#define GCC_ETH_PTP_CLK 46
+#define GCC_ETH_RGMII_CLK 47
+#define GCC_ETH_SLAVE_AHB_CLK 48
+#define GCC_GP1_CLK 49
+#define GCC_GP1_CLK_SRC 50
+#define GCC_GP2_CLK 51
+#define GCC_GP2_CLK_SRC 52
+#define GCC_GP3_CLK 53
+#define GCC_GP3_CLK_SRC 54
+#define GCC_PCIE_0_CLKREF_CLK 55
+#define GCC_PCIE_AUX_CLK 56
+#define GCC_PCIE_AUX_PHY_CLK_SRC 57
+#define GCC_PCIE_CFG_AHB_CLK 58
+#define GCC_PCIE_MSTR_AXI_CLK 59
+#define GCC_PCIE_PIPE_CLK 60
+#define GCC_PCIE_RCHNG_PHY_CLK 61
+#define GCC_PCIE_RCHNG_PHY_CLK_SRC 62
+#define GCC_PCIE_SLEEP_CLK 63
+#define GCC_PCIE_SLV_AXI_CLK 64
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 65
+#define GCC_PDM2_CLK 66
+#define GCC_PDM2_CLK_SRC 67
+#define GCC_PDM_AHB_CLK 68
+#define GCC_PDM_XO4_CLK 69
+#define GCC_SDCC1_AHB_CLK 70
+#define GCC_SDCC1_APPS_CLK 71
+#define GCC_SDCC1_APPS_CLK_SRC 72
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 73
+#define GCC_USB30_MASTER_CLK 74
+#define GCC_USB30_MASTER_CLK_SRC 75
+#define GCC_USB30_MOCK_UTMI_CLK 76
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 77
+#define GCC_USB30_MSTR_AXI_CLK 78
+#define GCC_USB30_SLEEP_CLK 79
+#define GCC_USB30_SLV_AHB_CLK 80
+#define GCC_USB3_PHY_AUX_CLK 81
+#define GCC_USB3_PHY_AUX_CLK_SRC 82
+#define GCC_USB3_PHY_PIPE_CLK 83
+#define GCC_USB3_PRIM_CLKREF_CLK 84
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 85
+#define GCC_XO_DIV4_CLK 86
+#define GCC_XO_PCIE_LINK_CLK 87
+
+#define GCC_EMAC_BCR 0
+#define GCC_PCIE_BCR 1
+#define GCC_PCIE_LINK_DOWN_BCR 2
+#define GCC_PCIE_NOCSR_COM_PHY_BCR 3
+#define GCC_PCIE_PHY_BCR 4
+#define GCC_PCIE_PHY_CFG_AHB_BCR 5
+#define GCC_PCIE_PHY_COM_BCR 6
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PDM_BCR 8
+#define GCC_QUSB2PHY_BCR 9
+#define GCC_TCSR_PCIE_BCR 10
+#define GCC_USB30_BCR 11
+#define GCC_USB3_PHY_BCR 12
+#define GCC_USB3PHY_PHY_BCR 13
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 14
+
+/* GCC power domains */
+#define USB30_GDSC 0
+#define PCIE_GDSC 1
+#define EMAC_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdx65.h b/include/dt-bindings/clock/qcom,gcc-sdx65.h
new file mode 100644
index 000000000000..75ecc9237d8f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdx65.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDX65_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SDX65_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GCC_AHB_PCIE_LINK_CLK 2
+#define GCC_BLSP1_AHB_CLK 3
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 4
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 5
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 6
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 7
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 8
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 9
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 10
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 11
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 12
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 13
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 14
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 15
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 16
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 18
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 19
+#define GCC_BLSP1_SLEEP_CLK 20
+#define GCC_BLSP1_UART1_APPS_CLK 21
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 22
+#define GCC_BLSP1_UART2_APPS_CLK 23
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 24
+#define GCC_BLSP1_UART3_APPS_CLK 25
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 26
+#define GCC_BLSP1_UART4_APPS_CLK 27
+#define GCC_BLSP1_UART4_APPS_CLK_SRC 28
+#define GCC_BOOT_ROM_AHB_CLK 29
+#define GCC_CPUSS_AHB_CLK 30
+#define GCC_CPUSS_AHB_CLK_SRC 31
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 32
+#define GCC_CPUSS_GNOC_CLK 33
+#define GCC_GP1_CLK 34
+#define GCC_GP1_CLK_SRC 35
+#define GCC_GP2_CLK 36
+#define GCC_GP2_CLK_SRC 37
+#define GCC_GP3_CLK 38
+#define GCC_GP3_CLK_SRC 39
+#define GCC_PCIE_0_CLKREF_EN 40
+#define GCC_PCIE_AUX_CLK 41
+#define GCC_PCIE_AUX_CLK_SRC 42
+#define GCC_PCIE_AUX_PHY_CLK_SRC 43
+#define GCC_PCIE_CFG_AHB_CLK 44
+#define GCC_PCIE_MSTR_AXI_CLK 45
+#define GCC_PCIE_PIPE_CLK 46
+#define GCC_PCIE_PIPE_CLK_SRC 47
+#define GCC_PCIE_RCHNG_PHY_CLK 48
+#define GCC_PCIE_RCHNG_PHY_CLK_SRC 49
+#define GCC_PCIE_SLEEP_CLK 50
+#define GCC_PCIE_SLV_AXI_CLK 51
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 52
+#define GCC_PDM2_CLK 53
+#define GCC_PDM2_CLK_SRC 54
+#define GCC_PDM_AHB_CLK 55
+#define GCC_PDM_XO4_CLK 56
+#define GCC_RX1_USB2_CLKREF_EN 57
+#define GCC_SDCC1_AHB_CLK 58
+#define GCC_SDCC1_APPS_CLK 59
+#define GCC_SDCC1_APPS_CLK_SRC 60
+#define GCC_SPMI_FETCHER_AHB_CLK 61
+#define GCC_SPMI_FETCHER_CLK 62
+#define GCC_SPMI_FETCHER_CLK_SRC 63
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 64
+#define GCC_USB30_MASTER_CLK 65
+#define GCC_USB30_MASTER_CLK_SRC 66
+#define GCC_USB30_MOCK_UTMI_CLK 67
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 68
+#define GCC_USB30_MOCK_UTMI_POSTDIV_CLK_SRC 69
+#define GCC_USB30_MSTR_AXI_CLK 70
+#define GCC_USB30_SLEEP_CLK 71
+#define GCC_USB30_SLV_AHB_CLK 72
+#define GCC_USB3_PHY_AUX_CLK 73
+#define GCC_USB3_PHY_AUX_CLK_SRC 74
+#define GCC_USB3_PHY_PIPE_CLK 75
+#define GCC_USB3_PHY_PIPE_CLK_SRC 76
+#define GCC_USB3_PRIM_CLKREF_EN 77
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 78
+#define GCC_XO_DIV4_CLK 79
+#define GCC_XO_PCIE_LINK_CLK 80
+
+/* GCC resets */
+#define GCC_BLSP1_QUP1_BCR 0
+#define GCC_BLSP1_QUP2_BCR 1
+#define GCC_BLSP1_QUP3_BCR 2
+#define GCC_BLSP1_QUP4_BCR 3
+#define GCC_BLSP1_UART1_BCR 4
+#define GCC_BLSP1_UART2_BCR 5
+#define GCC_BLSP1_UART3_BCR 6
+#define GCC_BLSP1_UART4_BCR 7
+#define GCC_PCIE_BCR 8
+#define GCC_PCIE_LINK_DOWN_BCR 9
+#define GCC_PCIE_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_PHY_BCR 11
+#define GCC_PCIE_PHY_CFG_AHB_BCR 12
+#define GCC_PCIE_PHY_COM_BCR 13
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 14
+#define GCC_PDM_BCR 15
+#define GCC_QUSB2PHY_BCR 16
+#define GCC_SDCC1_BCR 17
+#define GCC_SPMI_FETCHER_BCR 18
+#define GCC_TCSR_PCIE_BCR 19
+#define GCC_USB30_BCR 20
+#define GCC_USB3_PHY_BCR 21
+#define GCC_USB3PHY_PHY_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+
+/* GCC power domains */
+#define USB30_GDSC 0
+#define PCIE_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm6115.h b/include/dt-bindings/clock/qcom,gcc-sm6115.h
new file mode 100644
index 000000000000..b91a7b460433
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm6115.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6115_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_AUX2 1
+#define GPLL0_OUT_MAIN 2
+#define GPLL10 3
+#define GPLL10_OUT_MAIN 4
+#define GPLL11 5
+#define GPLL11_OUT_MAIN 6
+#define GPLL3 7
+#define GPLL4 8
+#define GPLL4_OUT_MAIN 9
+#define GPLL6 10
+#define GPLL6_OUT_MAIN 11
+#define GPLL7 12
+#define GPLL7_OUT_MAIN 13
+#define GPLL8 14
+#define GPLL8_OUT_MAIN 15
+#define GPLL9 16
+#define GPLL9_OUT_MAIN 17
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 18
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 19
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 20
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 21
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 22
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 23
+#define GCC_CAMSS_MCLK0_CLK 24
+#define GCC_CAMSS_MCLK0_CLK_SRC 25
+#define GCC_CAMSS_MCLK1_CLK 26
+#define GCC_CAMSS_MCLK1_CLK_SRC 27
+#define GCC_CAMSS_MCLK2_CLK 28
+#define GCC_CAMSS_MCLK2_CLK_SRC 29
+#define GCC_CAMSS_MCLK3_CLK 30
+#define GCC_CAMSS_MCLK3_CLK_SRC 31
+#define GCC_CAMSS_NRT_AXI_CLK 32
+#define GCC_CAMSS_OPE_AHB_CLK 33
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 34
+#define GCC_CAMSS_OPE_CLK 35
+#define GCC_CAMSS_OPE_CLK_SRC 36
+#define GCC_CAMSS_RT_AXI_CLK 37
+#define GCC_CAMSS_TFE_0_CLK 38
+#define GCC_CAMSS_TFE_0_CLK_SRC 39
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 40
+#define GCC_CAMSS_TFE_0_CSID_CLK 41
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 42
+#define GCC_CAMSS_TFE_1_CLK 43
+#define GCC_CAMSS_TFE_1_CLK_SRC 44
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 45
+#define GCC_CAMSS_TFE_1_CSID_CLK 46
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 47
+#define GCC_CAMSS_TFE_2_CLK 48
+#define GCC_CAMSS_TFE_2_CLK_SRC 49
+#define GCC_CAMSS_TFE_2_CPHY_RX_CLK 50
+#define GCC_CAMSS_TFE_2_CSID_CLK 51
+#define GCC_CAMSS_TFE_2_CSID_CLK_SRC 52
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 53
+#define GCC_CAMSS_TOP_AHB_CLK 54
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 55
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 56
+#define GCC_CPUSS_AHB_CLK 57
+#define GCC_CPUSS_GNOC_CLK 60
+#define GCC_DISP_AHB_CLK 61
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 62
+#define GCC_DISP_HF_AXI_CLK 63
+#define GCC_DISP_THROTTLE_CORE_CLK 64
+#define GCC_DISP_XO_CLK 65
+#define GCC_GP1_CLK 66
+#define GCC_GP1_CLK_SRC 67
+#define GCC_GP2_CLK 68
+#define GCC_GP2_CLK_SRC 69
+#define GCC_GP3_CLK 70
+#define GCC_GP3_CLK_SRC 71
+#define GCC_GPU_CFG_AHB_CLK 72
+#define GCC_GPU_GPLL0_CLK_SRC 73
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 74
+#define GCC_GPU_IREF_CLK 75
+#define GCC_GPU_MEMNOC_GFX_CLK 76
+#define GCC_GPU_SNOC_DVM_GFX_CLK 77
+#define GCC_GPU_THROTTLE_CORE_CLK 78
+#define GCC_GPU_THROTTLE_XO_CLK 79
+#define GCC_PDM2_CLK 80
+#define GCC_PDM2_CLK_SRC 81
+#define GCC_PDM_AHB_CLK 82
+#define GCC_PDM_XO4_CLK 83
+#define GCC_PRNG_AHB_CLK 84
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 85
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 86
+#define GCC_QMIP_DISP_AHB_CLK 87
+#define GCC_QMIP_GPU_CFG_AHB_CLK 88
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 89
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 90
+#define GCC_QUPV3_WRAP0_CORE_CLK 91
+#define GCC_QUPV3_WRAP0_S0_CLK 92
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 93
+#define GCC_QUPV3_WRAP0_S1_CLK 94
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 95
+#define GCC_QUPV3_WRAP0_S2_CLK 96
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 97
+#define GCC_QUPV3_WRAP0_S3_CLK 98
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 99
+#define GCC_QUPV3_WRAP0_S4_CLK 100
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 101
+#define GCC_QUPV3_WRAP0_S5_CLK 102
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 103
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 104
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 105
+#define GCC_SDCC1_AHB_CLK 106
+#define GCC_SDCC1_APPS_CLK 107
+#define GCC_SDCC1_APPS_CLK_SRC 108
+#define GCC_SDCC1_ICE_CORE_CLK 109
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 110
+#define GCC_SDCC2_AHB_CLK 111
+#define GCC_SDCC2_APPS_CLK 112
+#define GCC_SDCC2_APPS_CLK_SRC 113
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 114
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 115
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 116
+#define GCC_UFS_PHY_AHB_CLK 117
+#define GCC_UFS_PHY_AXI_CLK 118
+#define GCC_UFS_PHY_AXI_CLK_SRC 119
+#define GCC_UFS_PHY_ICE_CORE_CLK 120
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 121
+#define GCC_UFS_PHY_PHY_AUX_CLK 122
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 123
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 124
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 126
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 127
+#define GCC_USB30_PRIM_MASTER_CLK 128
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 129
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 132
+#define GCC_USB30_PRIM_SLEEP_CLK 133
+#define GCC_USB3_PRIM_CLKREF_CLK 134
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 135
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 136
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 137
+#define GCC_VCODEC0_AXI_CLK 138
+#define GCC_VENUS_AHB_CLK 139
+#define GCC_VENUS_CTL_AXI_CLK 140
+#define GCC_VIDEO_AHB_CLK 141
+#define GCC_VIDEO_AXI0_CLK 142
+#define GCC_VIDEO_THROTTLE_CORE_CLK 143
+#define GCC_VIDEO_VCODEC0_SYS_CLK 144
+#define GCC_VIDEO_VENUS_CLK_SRC 145
+#define GCC_VIDEO_VENUS_CTL_CLK 146
+#define GCC_VIDEO_XO_CLK 147
+#define GCC_AHB2PHY_CSI_CLK 148
+#define GCC_AHB2PHY_USB_CLK 149
+#define GCC_BIMC_GPU_AXI_CLK 150
+#define GCC_BOOT_ROM_AHB_CLK 151
+#define GCC_CAM_THROTTLE_NRT_CLK 152
+#define GCC_CAM_THROTTLE_RT_CLK 153
+#define GCC_CAMERA_AHB_CLK 154
+#define GCC_CAMERA_XO_CLK 155
+#define GCC_CAMSS_AXI_CLK 156
+#define GCC_CAMSS_AXI_CLK_SRC 157
+#define GCC_CAMSS_CAMNOC_ATB_CLK 158
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 159
+#define GCC_CAMSS_CCI_0_CLK 160
+#define GCC_CAMSS_CCI_CLK_SRC 161
+#define GCC_CAMSS_CPHY_0_CLK 162
+#define GCC_CAMSS_CPHY_1_CLK 163
+#define GCC_CAMSS_CPHY_2_CLK 164
+#define GCC_UFS_CLKREF_CLK 165
+#define GCC_DISP_GPLL0_CLK_SRC 166
+
+/* GCC resets */
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_SDCC1_BCR 2
+#define GCC_UFS_PHY_BCR 3
+#define GCC_USB30_PRIM_BCR 4
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 5
+#define GCC_VCODEC0_BCR 6
+#define GCC_VENUS_BCR 7
+#define GCC_VIDEO_INTERFACE_BCR 8
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 9
+#define GCC_USB3_PHY_PRIM_SP0_BCR 10
+#define GCC_SDCC2_BCR 11
+
+/* Indexes for GDSCs */
+#define GCC_CAMSS_TOP_GDSC 0
+#define GCC_UFS_PHY_GDSC 1
+#define GCC_USB30_PRIM_GDSC 2
+#define GCC_VCODEC0_GDSC 3
+#define GCC_VENUS_GDSC 4
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 5
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 7
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm6125.h b/include/dt-bindings/clock/qcom,gcc-sm6125.h
new file mode 100644
index 000000000000..08ea18086824
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm6125.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H
+
+#define GPLL0_OUT_AUX2 0
+#define GPLL0_OUT_MAIN 1
+#define GPLL6_OUT_MAIN 2
+#define GPLL7_OUT_MAIN 3
+#define GPLL8_OUT_MAIN 4
+#define GPLL9_OUT_MAIN 5
+#define GPLL0_OUT_EARLY 6
+#define GPLL3_OUT_EARLY 7
+#define GPLL4_OUT_MAIN 8
+#define GPLL5_OUT_MAIN 9
+#define GPLL6_OUT_EARLY 10
+#define GPLL7_OUT_EARLY 11
+#define GPLL8_OUT_EARLY 12
+#define GPLL9_OUT_EARLY 13
+#define GCC_AHB2PHY_CSI_CLK 14
+#define GCC_AHB2PHY_USB_CLK 15
+#define GCC_APC_VS_CLK 16
+#define GCC_BOOT_ROM_AHB_CLK 17
+#define GCC_CAMERA_AHB_CLK 18
+#define GCC_CAMERA_XO_CLK 19
+#define GCC_CAMSS_AHB_CLK_SRC 20
+#define GCC_CAMSS_CCI_AHB_CLK 21
+#define GCC_CAMSS_CCI_CLK 22
+#define GCC_CAMSS_CCI_CLK_SRC 23
+#define GCC_CAMSS_CPHY_CSID0_CLK 24
+#define GCC_CAMSS_CPHY_CSID1_CLK 25
+#define GCC_CAMSS_CPHY_CSID2_CLK 26
+#define GCC_CAMSS_CPHY_CSID3_CLK 27
+#define GCC_CAMSS_CPP_AHB_CLK 28
+#define GCC_CAMSS_CPP_AXI_CLK 29
+#define GCC_CAMSS_CPP_CLK 30
+#define GCC_CAMSS_CPP_CLK_SRC 31
+#define GCC_CAMSS_CPP_VBIF_AHB_CLK 32
+#define GCC_CAMSS_CSI0_AHB_CLK 33
+#define GCC_CAMSS_CSI0_CLK 34
+#define GCC_CAMSS_CSI0_CLK_SRC 35
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 36
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 37
+#define GCC_CAMSS_CSI0PIX_CLK 38
+#define GCC_CAMSS_CSI0RDI_CLK 39
+#define GCC_CAMSS_CSI1_AHB_CLK 40
+#define GCC_CAMSS_CSI1_CLK 41
+#define GCC_CAMSS_CSI1_CLK_SRC 42
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 43
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 44
+#define GCC_CAMSS_CSI1PIX_CLK 45
+#define GCC_CAMSS_CSI1RDI_CLK 46
+#define GCC_CAMSS_CSI2_AHB_CLK 47
+#define GCC_CAMSS_CSI2_CLK 48
+#define GCC_CAMSS_CSI2_CLK_SRC 49
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 50
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 51
+#define GCC_CAMSS_CSI2PIX_CLK 52
+#define GCC_CAMSS_CSI2RDI_CLK 53
+#define GCC_CAMSS_CSI3_AHB_CLK 54
+#define GCC_CAMSS_CSI3_CLK 55
+#define GCC_CAMSS_CSI3_CLK_SRC 56
+#define GCC_CAMSS_CSI3PIX_CLK 57
+#define GCC_CAMSS_CSI3RDI_CLK 58
+#define GCC_CAMSS_CSI_VFE0_CLK 59
+#define GCC_CAMSS_CSI_VFE1_CLK 60
+#define GCC_CAMSS_CSIPHY0_CLK 61
+#define GCC_CAMSS_CSIPHY1_CLK 62
+#define GCC_CAMSS_CSIPHY2_CLK 63
+#define GCC_CAMSS_CSIPHY_CLK_SRC 64
+#define GCC_CAMSS_GP0_CLK 65
+#define GCC_CAMSS_GP0_CLK_SRC 66
+#define GCC_CAMSS_GP1_CLK 67
+#define GCC_CAMSS_GP1_CLK_SRC 68
+#define GCC_CAMSS_ISPIF_AHB_CLK 69
+#define GCC_CAMSS_JPEG_AHB_CLK 70
+#define GCC_CAMSS_JPEG_AXI_CLK 71
+#define GCC_CAMSS_JPEG_CLK 72
+#define GCC_CAMSS_JPEG_CLK_SRC 73
+#define GCC_CAMSS_MCLK0_CLK 74
+#define GCC_CAMSS_MCLK0_CLK_SRC 75
+#define GCC_CAMSS_MCLK1_CLK 76
+#define GCC_CAMSS_MCLK1_CLK_SRC 77
+#define GCC_CAMSS_MCLK2_CLK 78
+#define GCC_CAMSS_MCLK2_CLK_SRC 79
+#define GCC_CAMSS_MCLK3_CLK 80
+#define GCC_CAMSS_MCLK3_CLK_SRC 81
+#define GCC_CAMSS_MICRO_AHB_CLK 82
+#define GCC_CAMSS_THROTTLE_NRT_AXI_CLK 83
+#define GCC_CAMSS_THROTTLE_RT_AXI_CLK 84
+#define GCC_CAMSS_TOP_AHB_CLK 85
+#define GCC_CAMSS_VFE0_AHB_CLK 86
+#define GCC_CAMSS_VFE0_CLK 87
+#define GCC_CAMSS_VFE0_CLK_SRC 88
+#define GCC_CAMSS_VFE0_STREAM_CLK 89
+#define GCC_CAMSS_VFE1_AHB_CLK 90
+#define GCC_CAMSS_VFE1_CLK 91
+#define GCC_CAMSS_VFE1_CLK_SRC 92
+#define GCC_CAMSS_VFE1_STREAM_CLK 93
+#define GCC_CAMSS_VFE_TSCTR_CLK 94
+#define GCC_CAMSS_VFE_VBIF_AHB_CLK 95
+#define GCC_CAMSS_VFE_VBIF_AXI_CLK 96
+#define GCC_CE1_AHB_CLK 97
+#define GCC_CE1_AXI_CLK 98
+#define GCC_CE1_CLK 99
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 100
+#define GCC_CPUSS_GNOC_CLK 101
+#define GCC_DISP_AHB_CLK 102
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 103
+#define GCC_DISP_HF_AXI_CLK 104
+#define GCC_DISP_THROTTLE_CORE_CLK 105
+#define GCC_DISP_XO_CLK 106
+#define GCC_GP1_CLK 107
+#define GCC_GP1_CLK_SRC 108
+#define GCC_GP2_CLK 109
+#define GCC_GP2_CLK_SRC 110
+#define GCC_GP3_CLK 111
+#define GCC_GP3_CLK_SRC 112
+#define GCC_GPU_CFG_AHB_CLK 113
+#define GCC_GPU_GPLL0_CLK_SRC 114
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 115
+#define GCC_GPU_MEMNOC_GFX_CLK 116
+#define GCC_GPU_SNOC_DVM_GFX_CLK 117
+#define GCC_GPU_THROTTLE_CORE_CLK 118
+#define GCC_GPU_THROTTLE_XO_CLK 119
+#define GCC_MSS_VS_CLK 120
+#define GCC_PDM2_CLK 121
+#define GCC_PDM2_CLK_SRC 122
+#define GCC_PDM_AHB_CLK 123
+#define GCC_PDM_XO4_CLK 124
+#define GCC_PRNG_AHB_CLK 125
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 126
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 127
+#define GCC_QMIP_DISP_AHB_CLK 128
+#define GCC_QMIP_GPU_CFG_AHB_CLK 129
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 130
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 131
+#define GCC_QUPV3_WRAP0_CORE_CLK 132
+#define GCC_QUPV3_WRAP0_S0_CLK 133
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 134
+#define GCC_QUPV3_WRAP0_S1_CLK 135
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 136
+#define GCC_QUPV3_WRAP0_S2_CLK 137
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 138
+#define GCC_QUPV3_WRAP0_S3_CLK 139
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 140
+#define GCC_QUPV3_WRAP0_S4_CLK 141
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 142
+#define GCC_QUPV3_WRAP0_S5_CLK 143
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 144
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 145
+#define GCC_QUPV3_WRAP1_CORE_CLK 146
+#define GCC_QUPV3_WRAP1_S0_CLK 147
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 148
+#define GCC_QUPV3_WRAP1_S1_CLK 149
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 150
+#define GCC_QUPV3_WRAP1_S2_CLK 151
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 152
+#define GCC_QUPV3_WRAP1_S3_CLK 153
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 154
+#define GCC_QUPV3_WRAP1_S4_CLK 155
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 156
+#define GCC_QUPV3_WRAP1_S5_CLK 157
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 158
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 159
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 160
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 161
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 162
+#define GCC_SDCC1_AHB_CLK 163
+#define GCC_SDCC1_APPS_CLK 164
+#define GCC_SDCC1_APPS_CLK_SRC 165
+#define GCC_SDCC1_ICE_CORE_CLK 166
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 167
+#define GCC_SDCC2_AHB_CLK 168
+#define GCC_SDCC2_APPS_CLK 169
+#define GCC_SDCC2_APPS_CLK_SRC 170
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 171
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 172
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 173
+#define GCC_UFS_PHY_AHB_CLK 174
+#define GCC_UFS_PHY_AXI_CLK 175
+#define GCC_UFS_PHY_AXI_CLK_SRC 176
+#define GCC_UFS_PHY_ICE_CORE_CLK 177
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 178
+#define GCC_UFS_PHY_PHY_AUX_CLK 179
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 180
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 181
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 182
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 183
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 184
+#define GCC_USB30_PRIM_MASTER_CLK 185
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 186
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 187
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 188
+#define GCC_USB30_PRIM_SLEEP_CLK 189
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 190
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 191
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 192
+#define GCC_VDDA_VS_CLK 193
+#define GCC_VDDCX_VS_CLK 194
+#define GCC_VDDMX_VS_CLK 195
+#define GCC_VIDEO_AHB_CLK 196
+#define GCC_VIDEO_AXI0_CLK 197
+#define GCC_VIDEO_THROTTLE_CORE_CLK 198
+#define GCC_VIDEO_XO_CLK 199
+#define GCC_VS_CTRL_AHB_CLK 200
+#define GCC_VS_CTRL_CLK 201
+#define GCC_VS_CTRL_CLK_SRC 202
+#define GCC_VSENSOR_CLK_SRC 203
+#define GCC_WCSS_VS_CLK 204
+#define GCC_USB3_PRIM_CLKREF_CLK 205
+#define GCC_SYS_NOC_COMPUTE_SF_AXI_CLK 206
+#define GCC_BIMC_GPU_AXI_CLK 207
+#define GCC_UFS_MEM_CLKREF_CLK 208
+
+/* GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define CAMSS_VFE0_GDSC 2
+#define CAMSS_VFE1_GDSC 3
+#define CAMSS_TOP_GDSC 4
+#define CAM_CPP_GDSC 5
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 7
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 8
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 9
+
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_UFS_PHY_BCR 2
+#define GCC_USB30_PRIM_BCR 3
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 4
+#define GCC_USB3_PHY_PRIM_SP0_BCR 5
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 6
+#define GCC_CAMSS_MICRO_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm6350.h b/include/dt-bindings/clock/qcom,gcc-sm6350.h
new file mode 100644
index 000000000000..ba584ca33c39
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm6350.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6350_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL0_OUT_ODD 2
+#define GPLL6 3
+#define GPLL6_OUT_EVEN 4
+#define GPLL7 5
+#define GCC_AGGRE_CNOC_PERIPH_CENTER_AHB_CLK 6
+#define GCC_AGGRE_NOC_CENTER_AHB_CLK 7
+#define GCC_AGGRE_NOC_PCIE_SF_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 9
+#define GCC_AGGRE_NOC_WLAN_AXI_CLK 10
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 11
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12
+#define GCC_BOOT_ROM_AHB_CLK 13
+#define GCC_CAMERA_AHB_CLK 14
+#define GCC_CAMERA_AXI_CLK 15
+#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK 16
+#define GCC_CAMERA_THROTTLE_RT_AXI_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CE1_AHB_CLK 19
+#define GCC_CE1_AXI_CLK 20
+#define GCC_CE1_CLK 21
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 22
+#define GCC_CPUSS_AHB_CLK 23
+#define GCC_CPUSS_AHB_CLK_SRC 24
+#define GCC_CPUSS_AHB_DIV_CLK_SRC 25
+#define GCC_CPUSS_GNOC_CLK 26
+#define GCC_CPUSS_RBCPR_CLK 27
+#define GCC_DDRSS_GPU_AXI_CLK 28
+#define GCC_DISP_AHB_CLK 29
+#define GCC_DISP_AXI_CLK 30
+#define GCC_DISP_CC_SLEEP_CLK 31
+#define GCC_DISP_CC_XO_CLK 32
+#define GCC_DISP_GPLL0_CLK 33
+#define GCC_DISP_THROTTLE_AXI_CLK 34
+#define GCC_DISP_XO_CLK 35
+#define GCC_GP1_CLK 36
+#define GCC_GP1_CLK_SRC 37
+#define GCC_GP2_CLK 38
+#define GCC_GP2_CLK_SRC 39
+#define GCC_GP3_CLK 40
+#define GCC_GP3_CLK_SRC 41
+#define GCC_GPU_CFG_AHB_CLK 42
+#define GCC_GPU_GPLL0_CLK 43
+#define GCC_GPU_GPLL0_DIV_CLK 44
+#define GCC_GPU_MEMNOC_GFX_CLK 45
+#define GCC_GPU_SNOC_DVM_GFX_CLK 46
+#define GCC_NPU_AXI_CLK 47
+#define GCC_NPU_BWMON_AXI_CLK 48
+#define GCC_NPU_BWMON_DMA_CFG_AHB_CLK 49
+#define GCC_NPU_BWMON_DSP_CFG_AHB_CLK 50
+#define GCC_NPU_CFG_AHB_CLK 51
+#define GCC_NPU_DMA_CLK 52
+#define GCC_NPU_GPLL0_CLK 53
+#define GCC_NPU_GPLL0_DIV_CLK 54
+#define GCC_PCIE_0_AUX_CLK 55
+#define GCC_PCIE_0_AUX_CLK_SRC 56
+#define GCC_PCIE_0_CFG_AHB_CLK 57
+#define GCC_PCIE_0_MSTR_AXI_CLK 58
+#define GCC_PCIE_0_PIPE_CLK 59
+#define GCC_PCIE_0_SLV_AXI_CLK 60
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 61
+#define GCC_PCIE_PHY_RCHNG_CLK 62
+#define GCC_PCIE_PHY_RCHNG_CLK_SRC 63
+#define GCC_PDM2_CLK 64
+#define GCC_PDM2_CLK_SRC 65
+#define GCC_PDM_AHB_CLK 66
+#define GCC_PDM_XO4_CLK 67
+#define GCC_PRNG_AHB_CLK 68
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 69
+#define GCC_QUPV3_WRAP0_CORE_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK 71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_S1_CLK 73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 74
+#define GCC_QUPV3_WRAP0_S2_CLK 75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 76
+#define GCC_QUPV3_WRAP0_S3_CLK 77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S4_CLK 79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S5_CLK 81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 82
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 83
+#define GCC_QUPV3_WRAP1_CORE_CLK 84
+#define GCC_QUPV3_WRAP1_S0_CLK 85
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S1_CLK 87
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S2_CLK 89
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S3_CLK 91
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_S4_CLK 93
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S5_CLK 95
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 96
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 97
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 98
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 99
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 100
+#define GCC_SDCC1_AHB_CLK 101
+#define GCC_SDCC1_APPS_CLK 102
+#define GCC_SDCC1_APPS_CLK_SRC 103
+#define GCC_SDCC1_ICE_CORE_CLK 104
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 105
+#define GCC_SDCC2_AHB_CLK 106
+#define GCC_SDCC2_APPS_CLK 107
+#define GCC_SDCC2_APPS_CLK_SRC 108
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 109
+#define GCC_UFS_MEM_CLKREF_CLK 110
+#define GCC_UFS_PHY_AHB_CLK 111
+#define GCC_UFS_PHY_AXI_CLK 112
+#define GCC_UFS_PHY_AXI_CLK_SRC 113
+#define GCC_UFS_PHY_ICE_CORE_CLK 114
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 115
+#define GCC_UFS_PHY_PHY_AUX_CLK 116
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 117
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 118
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 119
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 120
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 121
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 122
+#define GCC_USB30_PRIM_MASTER_CLK 123
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 124
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 125
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 126
+#define GCC_USB30_PRIM_MOCK_UTMI_DIV_CLK_SRC 127
+#define GCC_USB3_PRIM_CLKREF_CLK 128
+#define GCC_USB30_PRIM_SLEEP_CLK 129
+#define GCC_USB3_PRIM_PHY_AUX_CLK 130
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 131
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 132
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 133
+#define GCC_VIDEO_AHB_CLK 134
+#define GCC_VIDEO_AXI_CLK 135
+#define GCC_VIDEO_THROTTLE_AXI_CLK 136
+#define GCC_VIDEO_XO_CLK 137
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 138
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 139
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 140
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 141
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 142
+#define GCC_RX5_PCIE_CLKREF_CLK 143
+#define GCC_GPU_GPLL0_MAIN_DIV_CLK_SRC 144
+#define GCC_NPU_PLL0_MAIN_DIV_CLK_SRC 145
+
+/* GCC resets */
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_SDCC1_BCR 2
+#define GCC_SDCC2_BCR 3
+#define GCC_UFS_PHY_BCR 4
+#define GCC_USB30_PRIM_BCR 5
+#define GCC_PCIE_0_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_QUPV3_WRAPPER_0_BCR 8
+#define GCC_QUPV3_WRAPPER_1_BCR 9
+#define GCC_USB3_PHY_PRIM_BCR 10
+#define GCC_USB3_DP_PHY_PRIM_BCR 11
+
+/* GCC GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 2
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8150.h b/include/dt-bindings/clock/qcom,gcc-sm8150.h
index 3e1a91876610..921a33f24d33 100644
--- a/include/dt-bindings/clock/qcom,gcc-sm8150.h
+++ b/include/dt-bindings/clock/qcom,gcc-sm8150.h
@@ -239,9 +239,17 @@
#define GCC_USB30_PRIM_BCR 26
#define GCC_USB30_SEC_BCR 27
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 28
+#define GCC_VIDEO_AXIC_CLK_BCR 29
+#define GCC_VIDEO_AXI0_CLK_BCR 30
+#define GCC_VIDEO_AXI1_CLK_BCR 31
/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
#define USB30_PRIM_GDSC 4
#define USB30_SEC_GDSC 5
+#define EMAC_GDSC 6
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8350.h b/include/dt-bindings/clock/qcom,gcc-sm8350.h
new file mode 100644
index 000000000000..529c1b8b0417
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm8350.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8350_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8350_H
+
+/* GCC HW clocks */
+#define PCIE_0_PIPE_CLK 1
+#define PCIE_1_PIPE_CLK 2
+#define UFS_CARD_RX_SYMBOL_0_CLK 3
+#define UFS_CARD_RX_SYMBOL_1_CLK 4
+#define UFS_CARD_TX_SYMBOL_0_CLK 5
+#define UFS_PHY_RX_SYMBOL_0_CLK 6
+#define UFS_PHY_RX_SYMBOL_1_CLK 7
+#define UFS_PHY_TX_SYMBOL_0_CLK 8
+#define USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK 9
+#define USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK 10
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 11
+#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 12
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 13
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 14
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 15
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 16
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 17
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 18
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAMERA_HF_AXI_CLK 21
+#define GCC_CAMERA_SF_AXI_CLK 22
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 24
+#define GCC_DDRSS_GPU_AXI_CLK 25
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 26
+#define GCC_DISP_HF_AXI_CLK 27
+#define GCC_DISP_SF_AXI_CLK 28
+#define GCC_GP1_CLK 29
+#define GCC_GP1_CLK_SRC 30
+#define GCC_GP2_CLK 31
+#define GCC_GP2_CLK_SRC 32
+#define GCC_GP3_CLK 33
+#define GCC_GP3_CLK_SRC 34
+#define GCC_GPLL0 35
+#define GCC_GPLL0_OUT_EVEN 36
+#define GCC_GPLL4 37
+#define GCC_GPLL9 38
+#define GCC_GPU_GPLL0_CLK_SRC 39
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 40
+#define GCC_GPU_IREF_EN 41
+#define GCC_GPU_MEMNOC_GFX_CLK 42
+#define GCC_GPU_SNOC_DVM_GFX_CLK 43
+#define GCC_PCIE0_PHY_RCHNG_CLK 44
+#define GCC_PCIE1_PHY_RCHNG_CLK 45
+#define GCC_PCIE_0_AUX_CLK 46
+#define GCC_PCIE_0_AUX_CLK_SRC 47
+#define GCC_PCIE_0_CFG_AHB_CLK 48
+#define GCC_PCIE_0_CLKREF_EN 49
+#define GCC_PCIE_0_MSTR_AXI_CLK 50
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 51
+#define GCC_PCIE_0_PIPE_CLK 52
+#define GCC_PCIE_0_PIPE_CLK_SRC 53
+#define GCC_PCIE_0_SLV_AXI_CLK 54
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 55
+#define GCC_PCIE_1_AUX_CLK 56
+#define GCC_PCIE_1_AUX_CLK_SRC 57
+#define GCC_PCIE_1_CFG_AHB_CLK 58
+#define GCC_PCIE_1_CLKREF_EN 59
+#define GCC_PCIE_1_MSTR_AXI_CLK 60
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 61
+#define GCC_PCIE_1_PIPE_CLK 62
+#define GCC_PCIE_1_PIPE_CLK_SRC 63
+#define GCC_PCIE_1_SLV_AXI_CLK 64
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 65
+#define GCC_PDM2_CLK 66
+#define GCC_PDM2_CLK_SRC 67
+#define GCC_PDM_AHB_CLK 68
+#define GCC_PDM_XO4_CLK 69
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 70
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 71
+#define GCC_QMIP_DISP_AHB_CLK 72
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 73
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 74
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 75
+#define GCC_QUPV3_WRAP0_CORE_CLK 76
+#define GCC_QUPV3_WRAP0_S0_CLK 77
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S1_CLK 79
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S2_CLK 81
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 82
+#define GCC_QUPV3_WRAP0_S3_CLK 83
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 84
+#define GCC_QUPV3_WRAP0_S4_CLK 85
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 86
+#define GCC_QUPV3_WRAP0_S5_CLK 87
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 88
+#define GCC_QUPV3_WRAP0_S6_CLK 89
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 90
+#define GCC_QUPV3_WRAP0_S7_CLK 91
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 93
+#define GCC_QUPV3_WRAP1_CORE_CLK 94
+#define GCC_QUPV3_WRAP1_S0_CLK 95
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S1_CLK 97
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S2_CLK 99
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S3_CLK 101
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_S4_CLK 103
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 104
+#define GCC_QUPV3_WRAP1_S5_CLK 105
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 106
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 107
+#define GCC_QUPV3_WRAP2_CORE_CLK 108
+#define GCC_QUPV3_WRAP2_S0_CLK 109
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110
+#define GCC_QUPV3_WRAP2_S1_CLK 111
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_S2_CLK 113
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114
+#define GCC_QUPV3_WRAP2_S3_CLK 115
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116
+#define GCC_QUPV3_WRAP2_S4_CLK 117
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118
+#define GCC_QUPV3_WRAP2_S5_CLK 119
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 121
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 122
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 123
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 124
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 125
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 126
+#define GCC_SDCC2_AHB_CLK 127
+#define GCC_SDCC2_APPS_CLK 128
+#define GCC_SDCC2_APPS_CLK_SRC 129
+#define GCC_SDCC4_AHB_CLK 130
+#define GCC_SDCC4_APPS_CLK 131
+#define GCC_SDCC4_APPS_CLK_SRC 132
+#define GCC_THROTTLE_PCIE_AHB_CLK 133
+#define GCC_UFS_1_CLKREF_EN 134
+#define GCC_UFS_CARD_AHB_CLK 135
+#define GCC_UFS_CARD_AXI_CLK 136
+#define GCC_UFS_CARD_AXI_CLK_SRC 137
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 138
+#define GCC_UFS_CARD_ICE_CORE_CLK 139
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 140
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 141
+#define GCC_UFS_CARD_PHY_AUX_CLK 142
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 143
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 144
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 145
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 146
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 147
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 148
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 149
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 150
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 151
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 152
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 153
+#define GCC_UFS_PHY_AHB_CLK 154
+#define GCC_UFS_PHY_AXI_CLK 155
+#define GCC_UFS_PHY_AXI_CLK_SRC 156
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 157
+#define GCC_UFS_PHY_ICE_CORE_CLK 158
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 160
+#define GCC_UFS_PHY_PHY_AUX_CLK 161
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 162
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 165
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 166
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 167
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 169
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 170
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 171
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 172
+#define GCC_USB30_PRIM_MASTER_CLK 173
+#define GCC_USB30_PRIM_MASTER_CLK__FORCE_MEM_CORE_ON 174
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 175
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 176
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 177
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 178
+#define GCC_USB30_PRIM_SLEEP_CLK 179
+#define GCC_USB30_SEC_MASTER_CLK 180
+#define GCC_USB30_SEC_MASTER_CLK__FORCE_MEM_CORE_ON 181
+#define GCC_USB30_SEC_MASTER_CLK_SRC 182
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 183
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 184
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 185
+#define GCC_USB30_SEC_SLEEP_CLK 186
+#define GCC_USB3_PRIM_PHY_AUX_CLK 187
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 188
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 189
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 190
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 191
+#define GCC_USB3_SEC_CLKREF_EN 192
+#define GCC_USB3_SEC_PHY_AUX_CLK 193
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 194
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 195
+#define GCC_USB3_SEC_PHY_PIPE_CLK 196
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 197
+#define GCC_VIDEO_AXI0_CLK 198
+#define GCC_VIDEO_AXI1_CLK 199
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_MMSS_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_BCR 9
+#define GCC_PCIE_1_LINK_DOWN_BCR 10
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_PHY_BCR 12
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_0_BCR 17
+#define GCC_QUPV3_WRAPPER_1_BCR 18
+#define GCC_QUPV3_WRAPPER_2_BCR 19
+#define GCC_QUSB2PHY_PRIM_BCR 20
+#define GCC_QUSB2PHY_SEC_BCR 21
+#define GCC_SDCC2_BCR 22
+#define GCC_SDCC4_BCR 23
+#define GCC_UFS_CARD_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB30_SEC_BCR 27
+#define GCC_USB3_DP_PHY_PRIM_BCR 28
+#define GCC_USB3_DP_PHY_SEC_BCR 29
+#define GCC_USB3_PHY_PRIM_BCR 30
+#define GCC_USB3_PHY_SEC_BCR 31
+#define GCC_USB3PHY_PHY_PRIM_BCR 32
+#define GCC_USB3PHY_PHY_SEC_BCR 33
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 34
+#define GCC_VIDEO_AXI0_CLK_ARES 35
+#define GCC_VIDEO_AXI1_CLK_ARES 36
+#define GCC_VIDEO_BCR 37
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 6
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 7
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 8
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8450.h b/include/dt-bindings/clock/qcom,gcc-sm8450.h
new file mode 100644
index 000000000000..9679410843a0
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm8450.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8450_H
+
+/* GCC HW clocks */
+#define PCIE_0_PIPE_CLK 1
+#define PCIE_1_PHY_AUX_CLK 2
+#define PCIE_1_PIPE_CLK 3
+#define UFS_PHY_RX_SYMBOL_0_CLK 4
+#define UFS_PHY_RX_SYMBOL_1_CLK 5
+#define UFS_PHY_TX_SYMBOL_0_CLK 6
+#define USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK 7
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 9
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 10
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 11
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12
+#define GCC_ANOC_PCIE_PWRCTL_CLK 13
+#define GCC_BOOT_ROM_AHB_CLK 14
+#define GCC_CAMERA_AHB_CLK 15
+#define GCC_CAMERA_HF_AXI_CLK 16
+#define GCC_CAMERA_SF_AXI_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 19
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 20
+#define GCC_CPUSS_AHB_CLK 21
+#define GCC_CPUSS_AHB_CLK_SRC 22
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 23
+#define GCC_CPUSS_CONFIG_NOC_SF_CLK 24
+#define GCC_DDRSS_GPU_AXI_CLK 25
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 26
+#define GCC_DISP_AHB_CLK 27
+#define GCC_DISP_HF_AXI_CLK 28
+#define GCC_DISP_SF_AXI_CLK 29
+#define GCC_DISP_XO_CLK 30
+#define GCC_EUSB3_0_CLKREF_EN 31
+#define GCC_GP1_CLK 32
+#define GCC_GP1_CLK_SRC 33
+#define GCC_GP2_CLK 34
+#define GCC_GP2_CLK_SRC 35
+#define GCC_GP3_CLK 36
+#define GCC_GP3_CLK_SRC 37
+#define GCC_GPLL0 38
+#define GCC_GPLL0_OUT_EVEN 39
+#define GCC_GPLL4 40
+#define GCC_GPLL9 41
+#define GCC_GPU_CFG_AHB_CLK 42
+#define GCC_GPU_GPLL0_CLK_SRC 43
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 44
+#define GCC_GPU_MEMNOC_GFX_CLK 45
+#define GCC_GPU_SNOC_DVM_GFX_CLK 46
+#define GCC_PCIE_0_AUX_CLK 47
+#define GCC_PCIE_0_AUX_CLK_SRC 48
+#define GCC_PCIE_0_CFG_AHB_CLK 49
+#define GCC_PCIE_0_CLKREF_EN 50
+#define GCC_PCIE_0_MSTR_AXI_CLK 51
+#define GCC_PCIE_0_PHY_RCHNG_CLK 52
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 53
+#define GCC_PCIE_0_PIPE_CLK 54
+#define GCC_PCIE_0_PIPE_CLK_SRC 55
+#define GCC_PCIE_0_SLV_AXI_CLK 56
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57
+#define GCC_PCIE_1_AUX_CLK 58
+#define GCC_PCIE_1_AUX_CLK_SRC 59
+#define GCC_PCIE_1_CFG_AHB_CLK 60
+#define GCC_PCIE_1_CLKREF_EN 61
+#define GCC_PCIE_1_MSTR_AXI_CLK 62
+#define GCC_PCIE_1_PHY_AUX_CLK 63
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 64
+#define GCC_PCIE_1_PHY_RCHNG_CLK 65
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 66
+#define GCC_PCIE_1_PIPE_CLK 67
+#define GCC_PCIE_1_PIPE_CLK_SRC 68
+#define GCC_PCIE_1_SLV_AXI_CLK 69
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 70
+#define GCC_PDM2_CLK 71
+#define GCC_PDM2_CLK_SRC 72
+#define GCC_PDM_AHB_CLK 73
+#define GCC_PDM_XO4_CLK 74
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 75
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 76
+#define GCC_QMIP_DISP_AHB_CLK 77
+#define GCC_QMIP_GPU_AHB_CLK 78
+#define GCC_QMIP_PCIE_AHB_CLK 79
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 80
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 81
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 82
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 83
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 84
+#define GCC_QUPV3_WRAP0_CORE_CLK 85
+#define GCC_QUPV3_WRAP0_S0_CLK 86
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 87
+#define GCC_QUPV3_WRAP0_S1_CLK 88
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 89
+#define GCC_QUPV3_WRAP0_S2_CLK 90
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 91
+#define GCC_QUPV3_WRAP0_S3_CLK 92
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 93
+#define GCC_QUPV3_WRAP0_S4_CLK 94
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 95
+#define GCC_QUPV3_WRAP0_S5_CLK 96
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 97
+#define GCC_QUPV3_WRAP0_S6_CLK 98
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 99
+#define GCC_QUPV3_WRAP0_S7_CLK 100
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 102
+#define GCC_QUPV3_WRAP1_CORE_CLK 103
+#define GCC_QUPV3_WRAP1_S0_CLK 104
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 105
+#define GCC_QUPV3_WRAP1_S1_CLK 106
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 107
+#define GCC_QUPV3_WRAP1_S2_CLK 108
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 109
+#define GCC_QUPV3_WRAP1_S3_CLK 110
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 111
+#define GCC_QUPV3_WRAP1_S4_CLK 112
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 113
+#define GCC_QUPV3_WRAP1_S5_CLK 114
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 115
+#define GCC_QUPV3_WRAP1_S6_CLK 116
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 117
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 118
+#define GCC_QUPV3_WRAP2_CORE_CLK 119
+#define GCC_QUPV3_WRAP2_S0_CLK 120
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 121
+#define GCC_QUPV3_WRAP2_S1_CLK 122
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 123
+#define GCC_QUPV3_WRAP2_S2_CLK 124
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 125
+#define GCC_QUPV3_WRAP2_S3_CLK 126
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 127
+#define GCC_QUPV3_WRAP2_S4_CLK 128
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 129
+#define GCC_QUPV3_WRAP2_S5_CLK 130
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 131
+#define GCC_QUPV3_WRAP2_S6_CLK 132
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 133
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 134
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 135
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 136
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 137
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 138
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 139
+#define GCC_SDCC2_AHB_CLK 140
+#define GCC_SDCC2_APPS_CLK 141
+#define GCC_SDCC2_APPS_CLK_SRC 142
+#define GCC_SDCC2_AT_CLK 143
+#define GCC_SDCC4_AHB_CLK 144
+#define GCC_SDCC4_APPS_CLK 145
+#define GCC_SDCC4_APPS_CLK_SRC 146
+#define GCC_SDCC4_AT_CLK 147
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 148
+#define GCC_UFS_0_CLKREF_EN 149
+#define GCC_UFS_PHY_AHB_CLK 150
+#define GCC_UFS_PHY_AXI_CLK 151
+#define GCC_UFS_PHY_AXI_CLK_SRC 152
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 153
+#define GCC_UFS_PHY_ICE_CORE_CLK 154
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 155
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 156
+#define GCC_UFS_PHY_PHY_AUX_CLK 157
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 158
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 159
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 160
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 161
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 162
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 163
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 165
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 166
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 167
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 168
+#define GCC_USB30_PRIM_MASTER_CLK 169
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 170
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 171
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 172
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 173
+#define GCC_USB30_PRIM_SLEEP_CLK 174
+#define GCC_USB3_0_CLKREF_EN 175
+#define GCC_USB3_PRIM_PHY_AUX_CLK 176
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 177
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 178
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 179
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 180
+#define GCC_VIDEO_AHB_CLK 181
+#define GCC_VIDEO_AXI0_CLK 182
+#define GCC_VIDEO_AXI1_CLK 183
+#define GCC_VIDEO_XO_CLK 184
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_MMSS_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_BCR 9
+#define GCC_PCIE_1_LINK_DOWN_BCR 10
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_PHY_BCR 12
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_PHY_BCR 14
+#define GCC_PCIE_PHY_CFG_AHB_BCR 15
+#define GCC_PCIE_PHY_COM_BCR 16
+#define GCC_PDM_BCR 17
+#define GCC_QUPV3_WRAPPER_0_BCR 18
+#define GCC_QUPV3_WRAPPER_1_BCR 19
+#define GCC_QUPV3_WRAPPER_2_BCR 20
+#define GCC_QUSB2PHY_PRIM_BCR 21
+#define GCC_QUSB2PHY_SEC_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_SDCC4_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_BCR 27
+#define GCC_USB3_DP_PHY_SEC_BCR 28
+#define GCC_USB3_PHY_PRIM_BCR 29
+#define GCC_USB3_PHY_SEC_BCR 30
+#define GCC_USB3PHY_PHY_PRIM_BCR 31
+#define GCC_USB3PHY_PHY_SEC_BCR 32
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 33
+#define GCC_VIDEO_AXI0_CLK_ARES 34
+#define GCC_VIDEO_AXI1_CLK_ARES 35
+#define GCC_VIDEO_BCR 36
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_PHY_GDSC 2
+#define USB30_PRIM_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7280.h b/include/dt-bindings/clock/qcom,gpucc-sc7280.h
new file mode 100644
index 000000000000..669b23b606ba
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sc7280.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7280_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_GMU_CLK_SRC 9
+#define GPU_CC_GX_GMU_CLK 10
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 11
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 12
+#define GPU_CC_HUB_AON_CLK 13
+#define GPU_CC_HUB_CLK_SRC 14
+#define GPU_CC_HUB_CX_INT_CLK 15
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 16
+#define GPU_CC_MND1X_0_GFX3D_CLK 17
+#define GPU_CC_MND1X_1_GFX3D_CLK 18
+#define GPU_CC_SLEEP_CLK 19
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h b/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h
new file mode 100644
index 000000000000..bb7da46333b0
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC8280XP_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC8280XP_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_FREQ_MEASURE_CLK 9
+#define GPU_CC_GMU_CLK_SRC 10
+#define GPU_CC_GX_GMU_CLK 11
+#define GPU_CC_GX_VSENSE_CLK 12
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 13
+#define GPU_CC_HUB_AON_CLK 14
+#define GPU_CC_HUB_CLK_SRC 15
+#define GPU_CC_HUB_CX_INT_CLK 16
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 17
+#define GPU_CC_SLEEP_CLK 18
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 19
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm660.h b/include/dt-bindings/clock/qcom,gpucc-sdm660.h
new file mode 100644
index 000000000000..7ea3e53df58c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm660.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_GPUCC_660_H
+#define _DT_BINDINGS_CLK_SDM_GPUCC_660_H
+
+#define GPUCC_CXO_CLK 0
+#define GPU_PLL0_PLL 1
+#define GPU_PLL1_PLL 2
+#define GFX3D_CLK_SRC 3
+#define RBCPR_CLK_SRC 4
+#define RBBMTIMER_CLK_SRC 5
+#define GPUCC_RBCPR_CLK 6
+#define GPUCC_GFX3D_CLK 7
+#define GPUCC_RBBMTIMER_CLK 8
+
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#define GPU_CX_BCR 0
+#define GPU_GX_BCR 1
+#define RBCPR_BCR 2
+#define SPDM_BCR 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm6350.h b/include/dt-bindings/clock/qcom,gpucc-sm6350.h
new file mode 100644
index 000000000000..68e814fc8acd
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm6350.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6350_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_ACD_AHB_CLK 2
+#define GPU_CC_ACD_CXO_CLK 3
+#define GPU_CC_AHB_CLK 4
+#define GPU_CC_CRC_AHB_CLK 5
+#define GPU_CC_CX_GFX3D_CLK 6
+#define GPU_CC_CX_GFX3D_SLV_CLK 7
+#define GPU_CC_CX_GMU_CLK 8
+#define GPU_CC_CX_SNOC_DVM_CLK 9
+#define GPU_CC_CXO_AON_CLK 10
+#define GPU_CC_CXO_CLK 11
+#define GPU_CC_GMU_CLK_SRC 12
+#define GPU_CC_GX_CXO_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_CLK_SRC 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+
+/* CLK_HW */
+#define GPU_CC_CRC_DIV 0
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8350.h b/include/dt-bindings/clock/qcom,gpucc-sm8350.h
new file mode 100644
index 000000000000..2ca857f5bfd2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm8350.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_APB_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CX_QDSS_AT_CLK 5
+#define GPU_CC_CX_QDSS_TRIG_CLK 6
+#define GPU_CC_CX_QDSS_TSCTR_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_FREQ_MEASURE_CLK 11
+#define GPU_CC_GMU_CLK_SRC 12
+#define GPU_CC_GX_GMU_CLK 13
+#define GPU_CC_GX_QDSS_TSCTR_CLK 14
+#define GPU_CC_GX_VSENSE_CLK 15
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 16
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 17
+#define GPU_CC_HUB_AON_CLK 18
+#define GPU_CC_HUB_CLK_SRC 19
+#define GPU_CC_HUB_CX_INT_CLK 20
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 21
+#define GPU_CC_MND1X_0_GFX3D_CLK 22
+#define GPU_CC_MND1X_1_GFX3D_CLK 23
+#define GPU_CC_PLL0 24
+#define GPU_CC_PLL1 25
+#define GPU_CC_SLEEP_CLK 26
+
+/* GPU_CC resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CB_BCR 1
+#define GPUCC_GPU_CC_CX_BCR 2
+#define GPUCC_GPU_CC_FAST_HUB_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5332-gcc.h b/include/dt-bindings/clock/qcom,ipq5332-gcc.h
new file mode 100644
index 000000000000..8a405a0a96d0
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5332-gcc.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_IPQ5332_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_IPQ5332_H
+
+#define GPLL0_MAIN 0
+#define GPLL0 1
+#define GPLL2_MAIN 2
+#define GPLL2 3
+#define GPLL4_MAIN 4
+#define GPLL4 5
+#define GCC_ADSS_PWM_CLK 6
+#define GCC_ADSS_PWM_CLK_SRC 7
+#define GCC_AHB_CLK 8
+#define GCC_APSS_AXI_CLK_SRC 9
+#define GCC_BLSP1_AHB_CLK 10
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 11
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 12
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 13
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 14
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 15
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 16
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 17
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 18
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 19
+#define GCC_BLSP1_SLEEP_CLK 20
+#define GCC_BLSP1_UART1_APPS_CLK 21
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 22
+#define GCC_BLSP1_UART2_APPS_CLK 23
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 24
+#define GCC_BLSP1_UART3_APPS_CLK 25
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 26
+#define GCC_CE_AHB_CLK 27
+#define GCC_CE_AXI_CLK 28
+#define GCC_CE_PCNOC_AHB_CLK 29
+#define GCC_CMN_12GPLL_AHB_CLK 30
+#define GCC_CMN_12GPLL_APU_CLK 31
+#define GCC_CMN_12GPLL_SYS_CLK 32
+#define GCC_GP1_CLK 33
+#define GCC_GP1_CLK_SRC 34
+#define GCC_GP2_CLK 35
+#define GCC_GP2_CLK_SRC 36
+#define GCC_LPASS_CORE_AXIM_CLK 37
+#define GCC_LPASS_SWAY_CLK 38
+#define GCC_LPASS_SWAY_CLK_SRC 39
+#define GCC_MDIO_AHB_CLK 40
+#define GCC_MDIO_SLAVE_AHB_CLK 41
+#define GCC_MEM_NOC_Q6_AXI_CLK 42
+#define GCC_MEM_NOC_TS_CLK 43
+#define GCC_NSS_TS_CLK 44
+#define GCC_NSS_TS_CLK_SRC 45
+#define GCC_NSSCC_CLK 46
+#define GCC_NSSCFG_CLK 47
+#define GCC_NSSNOC_ATB_CLK 48
+#define GCC_NSSNOC_NSSCC_CLK 49
+#define GCC_NSSNOC_QOSGEN_REF_CLK 50
+#define GCC_NSSNOC_SNOC_1_CLK 51
+#define GCC_NSSNOC_SNOC_CLK 52
+#define GCC_NSSNOC_TIMEOUT_REF_CLK 53
+#define GCC_NSSNOC_XO_DCD_CLK 54
+#define GCC_PCIE3X1_0_AHB_CLK 55
+#define GCC_PCIE3X1_0_AUX_CLK 56
+#define GCC_PCIE3X1_0_AXI_CLK_SRC 57
+#define GCC_PCIE3X1_0_AXI_M_CLK 58
+#define GCC_PCIE3X1_0_AXI_S_BRIDGE_CLK 59
+#define GCC_PCIE3X1_0_AXI_S_CLK 60
+#define GCC_PCIE3X1_0_PIPE_CLK 61
+#define GCC_PCIE3X1_0_RCHG_CLK 62
+#define GCC_PCIE3X1_0_RCHG_CLK_SRC 63
+#define GCC_PCIE3X1_1_AHB_CLK 64
+#define GCC_PCIE3X1_1_AUX_CLK 65
+#define GCC_PCIE3X1_1_AXI_CLK_SRC 66
+#define GCC_PCIE3X1_1_AXI_M_CLK 67
+#define GCC_PCIE3X1_1_AXI_S_BRIDGE_CLK 68
+#define GCC_PCIE3X1_1_AXI_S_CLK 69
+#define GCC_PCIE3X1_1_PIPE_CLK 70
+#define GCC_PCIE3X1_1_RCHG_CLK 71
+#define GCC_PCIE3X1_1_RCHG_CLK_SRC 72
+#define GCC_PCIE3X1_PHY_AHB_CLK 73
+#define GCC_PCIE3X2_AHB_CLK 74
+#define GCC_PCIE3X2_AUX_CLK 75
+#define GCC_PCIE3X2_AXI_M_CLK 76
+#define GCC_PCIE3X2_AXI_M_CLK_SRC 77
+#define GCC_PCIE3X2_AXI_S_BRIDGE_CLK 78
+#define GCC_PCIE3X2_AXI_S_CLK 79
+#define GCC_PCIE3X2_AXI_S_CLK_SRC 80
+#define GCC_PCIE3X2_PHY_AHB_CLK 81
+#define GCC_PCIE3X2_PIPE_CLK 82
+#define GCC_PCIE3X2_RCHG_CLK 83
+#define GCC_PCIE3X2_RCHG_CLK_SRC 84
+#define GCC_PCIE_AUX_CLK_SRC 85
+#define GCC_PCNOC_AT_CLK 86
+#define GCC_PCNOC_BFDCD_CLK_SRC 87
+#define GCC_PCNOC_LPASS_CLK 88
+#define GCC_PRNG_AHB_CLK 89
+#define GCC_Q6_AHB_CLK 90
+#define GCC_Q6_AHB_S_CLK 91
+#define GCC_Q6_AXIM_CLK 92
+#define GCC_Q6_AXIM_CLK_SRC 93
+#define GCC_Q6_AXIS_CLK 94
+#define GCC_Q6_TSCTR_1TO2_CLK 95
+#define GCC_Q6SS_ATBM_CLK 96
+#define GCC_Q6SS_PCLKDBG_CLK 97
+#define GCC_Q6SS_TRIG_CLK 98
+#define GCC_QDSS_AT_CLK 99
+#define GCC_QDSS_AT_CLK_SRC 100
+#define GCC_QDSS_CFG_AHB_CLK 101
+#define GCC_QDSS_DAP_AHB_CLK 102
+#define GCC_QDSS_DAP_CLK 103
+#define GCC_QDSS_DAP_DIV_CLK_SRC 104
+#define GCC_QDSS_ETR_USB_CLK 105
+#define GCC_QDSS_EUD_AT_CLK 106
+#define GCC_QDSS_TSCTR_CLK_SRC 107
+#define GCC_QPIC_AHB_CLK 108
+#define GCC_QPIC_CLK 109
+#define GCC_QPIC_IO_MACRO_CLK 110
+#define GCC_QPIC_IO_MACRO_CLK_SRC 111
+#define GCC_QPIC_SLEEP_CLK 112
+#define GCC_SDCC1_AHB_CLK 113
+#define GCC_SDCC1_APPS_CLK 114
+#define GCC_SDCC1_APPS_CLK_SRC 115
+#define GCC_SLEEP_CLK_SRC 116
+#define GCC_SNOC_LPASS_CFG_CLK 117
+#define GCC_SNOC_NSSNOC_1_CLK 118
+#define GCC_SNOC_NSSNOC_CLK 119
+#define GCC_SNOC_PCIE3_1LANE_1_M_CLK 120
+#define GCC_SNOC_PCIE3_1LANE_1_S_CLK 121
+#define GCC_SNOC_PCIE3_1LANE_M_CLK 122
+#define GCC_SNOC_PCIE3_1LANE_S_CLK 123
+#define GCC_SNOC_PCIE3_2LANE_M_CLK 124
+#define GCC_SNOC_PCIE3_2LANE_S_CLK 125
+#define GCC_SNOC_USB_CLK 126
+#define GCC_SYS_NOC_AT_CLK 127
+#define GCC_SYS_NOC_WCSS_AHB_CLK 128
+#define GCC_SYSTEM_NOC_BFDCD_CLK_SRC 129
+#define GCC_UNIPHY0_AHB_CLK 130
+#define GCC_UNIPHY0_SYS_CLK 131
+#define GCC_UNIPHY1_AHB_CLK 132
+#define GCC_UNIPHY1_SYS_CLK 133
+#define GCC_UNIPHY_SYS_CLK_SRC 134
+#define GCC_USB0_AUX_CLK 135
+#define GCC_USB0_AUX_CLK_SRC 136
+#define GCC_USB0_EUD_AT_CLK 137
+#define GCC_USB0_LFPS_CLK 138
+#define GCC_USB0_LFPS_CLK_SRC 139
+#define GCC_USB0_MASTER_CLK 140
+#define GCC_USB0_MASTER_CLK_SRC 141
+#define GCC_USB0_MOCK_UTMI_CLK 142
+#define GCC_USB0_MOCK_UTMI_CLK_SRC 143
+#define GCC_USB0_MOCK_UTMI_DIV_CLK_SRC 144
+#define GCC_USB0_PHY_CFG_AHB_CLK 145
+#define GCC_USB0_PIPE_CLK 146
+#define GCC_USB0_SLEEP_CLK 147
+#define GCC_WCSS_AHB_CLK_SRC 148
+#define GCC_WCSS_AXIM_CLK 149
+#define GCC_WCSS_AXIS_CLK 150
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK 151
+#define GCC_WCSS_DBG_IFC_APB_CLK 152
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK 153
+#define GCC_WCSS_DBG_IFC_ATB_CLK 154
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK 155
+#define GCC_WCSS_DBG_IFC_NTS_CLK 156
+#define GCC_WCSS_ECAHB_CLK 157
+#define GCC_WCSS_MST_ASYNC_BDG_CLK 158
+#define GCC_WCSS_SLV_ASYNC_BDG_CLK 159
+#define GCC_XO_CLK 160
+#define GCC_XO_CLK_SRC 161
+#define GCC_XO_DIV4_CLK 162
+#define GCC_IM_SLEEP_CLK 163
+#define GCC_NSSNOC_PCNOC_1_CLK 164
+#define GCC_MEM_NOC_AHB_CLK 165
+#define GCC_MEM_NOC_APSS_AXI_CLK 166
+#define GCC_SNOC_QOSGEN_EXTREF_DIV_CLK_SRC 167
+#define GCC_MEM_NOC_QOSGEN_EXTREF_CLK 168
+#define GCC_PCIE3X2_PIPE_CLK_SRC 169
+#define GCC_PCIE3X1_0_PIPE_CLK_SRC 170
+#define GCC_PCIE3X1_1_PIPE_CLK_SRC 171
+#define GCC_USB0_PIPE_CLK_SRC 172
+
+#define GCC_ADSS_BCR 0
+#define GCC_ADSS_PWM_CLK_ARES 1
+#define GCC_AHB_CLK_ARES 2
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 3
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_GPLL0_CLK_ARES 4
+#define GCC_APSS_AHB_CLK_ARES 5
+#define GCC_APSS_AXI_CLK_ARES 6
+#define GCC_BLSP1_AHB_CLK_ARES 7
+#define GCC_BLSP1_BCR 8
+#define GCC_BLSP1_QUP1_BCR 9
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_ARES 10
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_ARES 11
+#define GCC_BLSP1_QUP2_BCR 12
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_ARES 13
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_ARES 14
+#define GCC_BLSP1_QUP3_BCR 15
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_ARES 16
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_ARES 17
+#define GCC_BLSP1_SLEEP_CLK_ARES 18
+#define GCC_BLSP1_UART1_APPS_CLK_ARES 19
+#define GCC_BLSP1_UART1_BCR 20
+#define GCC_BLSP1_UART2_APPS_CLK_ARES 21
+#define GCC_BLSP1_UART2_BCR 22
+#define GCC_BLSP1_UART3_APPS_CLK_ARES 23
+#define GCC_BLSP1_UART3_BCR 24
+#define GCC_CE_BCR 25
+#define GCC_CMN_BLK_BCR 26
+#define GCC_CMN_LDO0_BCR 27
+#define GCC_CMN_LDO1_BCR 28
+#define GCC_DCC_BCR 29
+#define GCC_GP1_CLK_ARES 30
+#define GCC_GP2_CLK_ARES 31
+#define GCC_LPASS_BCR 32
+#define GCC_LPASS_CORE_AXIM_CLK_ARES 33
+#define GCC_LPASS_SWAY_CLK_ARES 34
+#define GCC_MDIOM_BCR 35
+#define GCC_MDIOS_BCR 36
+#define GCC_NSS_BCR 37
+#define GCC_NSS_TS_CLK_ARES 38
+#define GCC_NSSCC_CLK_ARES 39
+#define GCC_NSSCFG_CLK_ARES 40
+#define GCC_NSSNOC_ATB_CLK_ARES 41
+#define GCC_NSSNOC_NSSCC_CLK_ARES 42
+#define GCC_NSSNOC_QOSGEN_REF_CLK_ARES 43
+#define GCC_NSSNOC_SNOC_1_CLK_ARES 44
+#define GCC_NSSNOC_SNOC_CLK_ARES 45
+#define GCC_NSSNOC_TIMEOUT_REF_CLK_ARES 46
+#define GCC_NSSNOC_XO_DCD_CLK_ARES 47
+#define GCC_PCIE3X1_0_AHB_CLK_ARES 48
+#define GCC_PCIE3X1_0_AUX_CLK_ARES 49
+#define GCC_PCIE3X1_0_AXI_M_CLK_ARES 50
+#define GCC_PCIE3X1_0_AXI_S_BRIDGE_CLK_ARES 51
+#define GCC_PCIE3X1_0_AXI_S_CLK_ARES 52
+#define GCC_PCIE3X1_0_BCR 53
+#define GCC_PCIE3X1_0_LINK_DOWN_BCR 54
+#define GCC_PCIE3X1_0_PHY_BCR 55
+#define GCC_PCIE3X1_0_PHY_PHY_BCR 56
+#define GCC_PCIE3X1_1_AHB_CLK_ARES 57
+#define GCC_PCIE3X1_1_AUX_CLK_ARES 58
+#define GCC_PCIE3X1_1_AXI_M_CLK_ARES 59
+#define GCC_PCIE3X1_1_AXI_S_BRIDGE_CLK_ARES 60
+#define GCC_PCIE3X1_1_AXI_S_CLK_ARES 61
+#define GCC_PCIE3X1_1_BCR 62
+#define GCC_PCIE3X1_1_LINK_DOWN_BCR 63
+#define GCC_PCIE3X1_1_PHY_BCR 64
+#define GCC_PCIE3X1_1_PHY_PHY_BCR 65
+#define GCC_PCIE3X1_PHY_AHB_CLK_ARES 66
+#define GCC_PCIE3X2_AHB_CLK_ARES 67
+#define GCC_PCIE3X2_AUX_CLK_ARES 68
+#define GCC_PCIE3X2_AXI_M_CLK_ARES 69
+#define GCC_PCIE3X2_AXI_S_BRIDGE_CLK_ARES 70
+#define GCC_PCIE3X2_AXI_S_CLK_ARES 71
+#define GCC_PCIE3X2_BCR 72
+#define GCC_PCIE3X2_LINK_DOWN_BCR 73
+#define GCC_PCIE3X2_PHY_AHB_CLK_ARES 74
+#define GCC_PCIE3X2_PHY_BCR 75
+#define GCC_PCIE3X2PHY_PHY_BCR 76
+#define GCC_PCNOC_BCR 77
+#define GCC_PCNOC_LPASS_CLK_ARES 78
+#define GCC_PRNG_AHB_CLK_ARES 79
+#define GCC_PRNG_BCR 80
+#define GCC_Q6_AHB_CLK_ARES 81
+#define GCC_Q6_AHB_S_CLK_ARES 82
+#define GCC_Q6_AXIM_CLK_ARES 83
+#define GCC_Q6_AXIS_CLK_ARES 84
+#define GCC_Q6_TSCTR_1TO2_CLK_ARES 85
+#define GCC_Q6SS_ATBM_CLK_ARES 86
+#define GCC_Q6SS_PCLKDBG_CLK_ARES 87
+#define GCC_Q6SS_TRIG_CLK_ARES 88
+#define GCC_QDSS_APB2JTAG_CLK_ARES 89
+#define GCC_QDSS_AT_CLK_ARES 90
+#define GCC_QDSS_BCR 91
+#define GCC_QDSS_CFG_AHB_CLK_ARES 92
+#define GCC_QDSS_DAP_AHB_CLK_ARES 93
+#define GCC_QDSS_DAP_CLK_ARES 94
+#define GCC_QDSS_ETR_USB_CLK_ARES 95
+#define GCC_QDSS_EUD_AT_CLK_ARES 96
+#define GCC_QDSS_STM_CLK_ARES 97
+#define GCC_QDSS_TRACECLKIN_CLK_ARES 98
+#define GCC_QDSS_TS_CLK_ARES 99
+#define GCC_QDSS_TSCTR_DIV16_CLK_ARES 100
+#define GCC_QDSS_TSCTR_DIV2_CLK_ARES 101
+#define GCC_QDSS_TSCTR_DIV3_CLK_ARES 102
+#define GCC_QDSS_TSCTR_DIV4_CLK_ARES 103
+#define GCC_QDSS_TSCTR_DIV8_CLK_ARES 104
+#define GCC_QPIC_AHB_CLK_ARES 105
+#define GCC_QPIC_CLK_ARES 106
+#define GCC_QPIC_BCR 107
+#define GCC_QPIC_IO_MACRO_CLK_ARES 108
+#define GCC_QPIC_SLEEP_CLK_ARES 109
+#define GCC_QUSB2_0_PHY_BCR 110
+#define GCC_SDCC1_AHB_CLK_ARES 111
+#define GCC_SDCC1_APPS_CLK_ARES 112
+#define GCC_SDCC_BCR 113
+#define GCC_SNOC_BCR 114
+#define GCC_SNOC_LPASS_CFG_CLK_ARES 115
+#define GCC_SNOC_NSSNOC_1_CLK_ARES 116
+#define GCC_SNOC_NSSNOC_CLK_ARES 117
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK_ARES 118
+#define GCC_SYS_NOC_WCSS_AHB_CLK_ARES 119
+#define GCC_UNIPHY0_AHB_CLK_ARES 120
+#define GCC_UNIPHY0_BCR 121
+#define GCC_UNIPHY0_SYS_CLK_ARES 122
+#define GCC_UNIPHY1_AHB_CLK_ARES 123
+#define GCC_UNIPHY1_BCR 124
+#define GCC_UNIPHY1_SYS_CLK_ARES 125
+#define GCC_USB0_AUX_CLK_ARES 126
+#define GCC_USB0_EUD_AT_CLK_ARES 127
+#define GCC_USB0_LFPS_CLK_ARES 128
+#define GCC_USB0_MASTER_CLK_ARES 129
+#define GCC_USB0_MOCK_UTMI_CLK_ARES 130
+#define GCC_USB0_PHY_BCR 131
+#define GCC_USB0_PHY_CFG_AHB_CLK_ARES 132
+#define GCC_USB0_SLEEP_CLK_ARES 133
+#define GCC_USB3PHY_0_PHY_BCR 134
+#define GCC_USB_BCR 135
+#define GCC_WCSS_AXIM_CLK_ARES 136
+#define GCC_WCSS_AXIS_CLK_ARES 137
+#define GCC_WCSS_BCR 138
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK_ARES 139
+#define GCC_WCSS_DBG_IFC_APB_CLK_ARES 140
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK_ARES 141
+#define GCC_WCSS_DBG_IFC_ATB_CLK_ARES 142
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK_ARES 143
+#define GCC_WCSS_DBG_IFC_NTS_CLK_ARES 144
+#define GCC_WCSS_ECAHB_CLK_ARES 145
+#define GCC_WCSS_MST_ASYNC_BDG_CLK_ARES 146
+#define GCC_WCSS_Q6_BCR 147
+#define GCC_WCSS_SLV_ASYNC_BDG_CLK_ARES 148
+#define GCC_XO_CLK_ARES 149
+#define GCC_XO_DIV4_CLK_ARES 150
+#define GCC_Q6SS_DBG_ARES 151
+#define GCC_WCSS_DBG_BDG_ARES 152
+#define GCC_WCSS_DBG_ARES 153
+#define GCC_WCSS_AXI_S_ARES 154
+#define GCC_WCSS_AXI_M_ARES 155
+#define GCC_WCSSAON_ARES 156
+#define GCC_PCIE3X2_PIPE_ARES 157
+#define GCC_PCIE3X2_CORE_STICKY_ARES 158
+#define GCC_PCIE3X2_AXI_S_STICKY_ARES 159
+#define GCC_PCIE3X2_AXI_M_STICKY_ARES 160
+#define GCC_PCIE3X1_0_PIPE_ARES 161
+#define GCC_PCIE3X1_0_CORE_STICKY_ARES 162
+#define GCC_PCIE3X1_0_AXI_S_STICKY_ARES 163
+#define GCC_PCIE3X1_0_AXI_M_STICKY_ARES 164
+#define GCC_PCIE3X1_1_PIPE_ARES 165
+#define GCC_PCIE3X1_1_CORE_STICKY_ARES 166
+#define GCC_PCIE3X1_1_AXI_S_STICKY_ARES 167
+#define GCC_PCIE3X1_1_AXI_M_STICKY_ARES 168
+#define GCC_IM_SLEEP_CLK_ARES 169
+#define GCC_NSSNOC_PCNOC_1_CLK_ARES 170
+#define GCC_UNIPHY0_XPCS_ARES 171
+#define GCC_UNIPHY1_XPCS_ARES 172
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq9574-gcc.h b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
new file mode 100644
index 000000000000..08fd3a37acaa
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2023 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_9574_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_9574_H
+
+#define GPLL0_MAIN 0
+#define GPLL0 1
+#define GPLL2_MAIN 2
+#define GPLL2 3
+#define GPLL4_MAIN 4
+#define GPLL4 5
+#define GCC_SLEEP_CLK_SRC 6
+#define APSS_AHB_CLK_SRC 7
+#define APSS_AXI_CLK_SRC 8
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 10
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 14
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 15
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 16
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 17
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 18
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 19
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 20
+#define BLSP1_UART1_APPS_CLK_SRC 21
+#define BLSP1_UART2_APPS_CLK_SRC 22
+#define BLSP1_UART3_APPS_CLK_SRC 23
+#define BLSP1_UART4_APPS_CLK_SRC 24
+#define BLSP1_UART5_APPS_CLK_SRC 25
+#define BLSP1_UART6_APPS_CLK_SRC 26
+#define GCC_APSS_AHB_CLK 27
+#define GCC_APSS_AXI_CLK 28
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 29
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 30
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 31
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 32
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 33
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 34
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 35
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 36
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 37
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 38
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 39
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 40
+#define GCC_BLSP1_UART1_APPS_CLK 41
+#define GCC_BLSP1_UART2_APPS_CLK 42
+#define GCC_BLSP1_UART3_APPS_CLK 43
+#define GCC_BLSP1_UART4_APPS_CLK 44
+#define GCC_BLSP1_UART5_APPS_CLK 45
+#define GCC_BLSP1_UART6_APPS_CLK 46
+#define PCIE0_AXI_M_CLK_SRC 47
+#define GCC_PCIE0_AXI_M_CLK 48
+#define PCIE1_AXI_M_CLK_SRC 49
+#define GCC_PCIE1_AXI_M_CLK 50
+#define PCIE2_AXI_M_CLK_SRC 51
+#define GCC_PCIE2_AXI_M_CLK 52
+#define PCIE3_AXI_M_CLK_SRC 53
+#define GCC_PCIE3_AXI_M_CLK 54
+#define PCIE0_AXI_S_CLK_SRC 55
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 56
+#define GCC_PCIE0_AXI_S_CLK 57
+#define PCIE1_AXI_S_CLK_SRC 58
+#define GCC_PCIE1_AXI_S_BRIDGE_CLK 59
+#define GCC_PCIE1_AXI_S_CLK 60
+#define PCIE2_AXI_S_CLK_SRC 61
+#define GCC_PCIE2_AXI_S_BRIDGE_CLK 62
+#define GCC_PCIE2_AXI_S_CLK 63
+#define PCIE3_AXI_S_CLK_SRC 64
+#define GCC_PCIE3_AXI_S_BRIDGE_CLK 65
+#define GCC_PCIE3_AXI_S_CLK 66
+#define PCIE0_PIPE_CLK_SRC 67
+#define PCIE1_PIPE_CLK_SRC 68
+#define PCIE2_PIPE_CLK_SRC 69
+#define PCIE3_PIPE_CLK_SRC 70
+#define PCIE_AUX_CLK_SRC 71
+#define GCC_PCIE0_AUX_CLK 72
+#define GCC_PCIE1_AUX_CLK 73
+#define GCC_PCIE2_AUX_CLK 74
+#define GCC_PCIE3_AUX_CLK 75
+#define PCIE0_RCHNG_CLK_SRC 76
+#define GCC_PCIE0_RCHNG_CLK 77
+#define PCIE1_RCHNG_CLK_SRC 78
+#define GCC_PCIE1_RCHNG_CLK 79
+#define PCIE2_RCHNG_CLK_SRC 80
+#define GCC_PCIE2_RCHNG_CLK 81
+#define PCIE3_RCHNG_CLK_SRC 82
+#define GCC_PCIE3_RCHNG_CLK 83
+#define GCC_PCIE0_AHB_CLK 84
+#define GCC_PCIE1_AHB_CLK 85
+#define GCC_PCIE2_AHB_CLK 86
+#define GCC_PCIE3_AHB_CLK 87
+#define USB0_AUX_CLK_SRC 88
+#define GCC_USB0_AUX_CLK 89
+#define USB0_MASTER_CLK_SRC 90
+#define GCC_USB0_MASTER_CLK 91
+#define GCC_SNOC_USB_CLK 92
+#define GCC_ANOC_USB_AXI_CLK 93
+#define USB0_MOCK_UTMI_CLK_SRC 94
+#define USB0_MOCK_UTMI_DIV_CLK_SRC 95
+#define GCC_USB0_MOCK_UTMI_CLK 96
+#define USB0_PIPE_CLK_SRC 97
+#define GCC_USB0_PHY_CFG_AHB_CLK 98
+#define SDCC1_APPS_CLK_SRC 99
+#define GCC_SDCC1_APPS_CLK 100
+#define SDCC1_ICE_CORE_CLK_SRC 101
+#define GCC_SDCC1_ICE_CORE_CLK 102
+#define GCC_SDCC1_AHB_CLK 103
+#define PCNOC_BFDCD_CLK_SRC 104
+#define GCC_NSSCFG_CLK 105
+#define GCC_NSSNOC_NSSCC_CLK 106
+#define GCC_NSSCC_CLK 107
+#define GCC_NSSNOC_PCNOC_1_CLK 108
+#define GCC_QDSS_DAP_AHB_CLK 109
+#define GCC_QDSS_CFG_AHB_CLK 110
+#define GCC_QPIC_AHB_CLK 111
+#define GCC_QPIC_CLK 112
+#define GCC_BLSP1_AHB_CLK 113
+#define GCC_MDIO_AHB_CLK 114
+#define GCC_PRNG_AHB_CLK 115
+#define GCC_UNIPHY0_AHB_CLK 116
+#define GCC_UNIPHY1_AHB_CLK 117
+#define GCC_UNIPHY2_AHB_CLK 118
+#define GCC_CMN_12GPLL_AHB_CLK 119
+#define GCC_CMN_12GPLL_APU_CLK 120
+#define SYSTEM_NOC_BFDCD_CLK_SRC 121
+#define GCC_NSSNOC_SNOC_CLK 122
+#define GCC_NSSNOC_SNOC_1_CLK 123
+#define GCC_QDSS_ETR_USB_CLK 124
+#define WCSS_AHB_CLK_SRC 125
+#define GCC_Q6_AHB_CLK 126
+#define GCC_Q6_AHB_S_CLK 127
+#define GCC_WCSS_ECAHB_CLK 128
+#define GCC_WCSS_ACMT_CLK 129
+#define GCC_SYS_NOC_WCSS_AHB_CLK 130
+#define WCSS_AXI_M_CLK_SRC 131
+#define GCC_ANOC_WCSS_AXI_M_CLK 132
+#define QDSS_AT_CLK_SRC 133
+#define GCC_Q6SS_ATBM_CLK 134
+#define GCC_WCSS_DBG_IFC_ATB_CLK 135
+#define GCC_NSSNOC_ATB_CLK 136
+#define GCC_QDSS_AT_CLK 137
+#define GCC_SYS_NOC_AT_CLK 138
+#define GCC_PCNOC_AT_CLK 139
+#define GCC_USB0_EUD_AT_CLK 140
+#define GCC_QDSS_EUD_AT_CLK 141
+#define QDSS_STM_CLK_SRC 142
+#define GCC_QDSS_STM_CLK 143
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 144
+#define QDSS_TRACECLKIN_CLK_SRC 145
+#define GCC_QDSS_TRACECLKIN_CLK 146
+#define QDSS_TSCTR_CLK_SRC 147
+#define GCC_Q6_TSCTR_1TO2_CLK 148
+#define GCC_WCSS_DBG_IFC_NTS_CLK 149
+#define GCC_QDSS_TSCTR_DIV2_CLK 150
+#define GCC_QDSS_TS_CLK 151
+#define GCC_QDSS_TSCTR_DIV4_CLK 152
+#define GCC_NSS_TS_CLK 153
+#define GCC_QDSS_TSCTR_DIV8_CLK 154
+#define GCC_QDSS_TSCTR_DIV16_CLK 155
+#define GCC_Q6SS_PCLKDBG_CLK 156
+#define GCC_Q6SS_TRIG_CLK 157
+#define GCC_WCSS_DBG_IFC_APB_CLK 158
+#define GCC_WCSS_DBG_IFC_DAPBUS_CLK 159
+#define GCC_QDSS_DAP_CLK 160
+#define GCC_QDSS_APB2JTAG_CLK 161
+#define GCC_QDSS_TSCTR_DIV3_CLK 162
+#define QPIC_IO_MACRO_CLK_SRC 163
+#define GCC_QPIC_IO_MACRO_CLK 164
+#define Q6_AXI_CLK_SRC 165
+#define GCC_Q6_AXIM_CLK 166
+#define GCC_WCSS_Q6_TBU_CLK 167
+#define GCC_MEM_NOC_Q6_AXI_CLK 168
+#define Q6_AXIM2_CLK_SRC 169
+#define NSSNOC_MEMNOC_BFDCD_CLK_SRC 170
+#define GCC_NSSNOC_MEMNOC_CLK 171
+#define GCC_NSSNOC_MEM_NOC_1_CLK 172
+#define GCC_NSS_TBU_CLK 173
+#define GCC_MEM_NOC_NSSNOC_CLK 174
+#define LPASS_AXIM_CLK_SRC 175
+#define LPASS_SWAY_CLK_SRC 176
+#define ADSS_PWM_CLK_SRC 177
+#define GCC_ADSS_PWM_CLK 178
+#define GP1_CLK_SRC 179
+#define GP2_CLK_SRC 180
+#define GP3_CLK_SRC 181
+#define DDRSS_SMS_SLOW_CLK_SRC 182
+#define GCC_XO_CLK_SRC 183
+#define GCC_XO_CLK 184
+#define GCC_NSSNOC_QOSGEN_REF_CLK 185
+#define GCC_NSSNOC_TIMEOUT_REF_CLK 186
+#define GCC_XO_DIV4_CLK 187
+#define GCC_UNIPHY0_SYS_CLK 188
+#define GCC_UNIPHY1_SYS_CLK 189
+#define GCC_UNIPHY2_SYS_CLK 190
+#define GCC_CMN_12GPLL_SYS_CLK 191
+#define GCC_NSSNOC_XO_DCD_CLK 192
+#define GCC_Q6SS_BOOT_CLK 193
+#define UNIPHY_SYS_CLK_SRC 194
+#define NSS_TS_CLK_SRC 195
+#define GCC_ANOC_PCIE0_1LANE_M_CLK 196
+#define GCC_ANOC_PCIE1_1LANE_M_CLK 197
+#define GCC_ANOC_PCIE2_2LANE_M_CLK 198
+#define GCC_ANOC_PCIE3_2LANE_M_CLK 199
+#define GCC_SNOC_PCIE0_1LANE_S_CLK 200
+#define GCC_SNOC_PCIE1_1LANE_S_CLK 201
+#define GCC_SNOC_PCIE2_2LANE_S_CLK 202
+#define GCC_SNOC_PCIE3_2LANE_S_CLK 203
+#define GCC_CRYPTO_CLK_SRC 204
+#define GCC_CRYPTO_CLK 205
+#define GCC_CRYPTO_AXI_CLK 206
+#define GCC_CRYPTO_AHB_CLK 207
+#define GCC_USB0_PIPE_CLK 208
+#define GCC_USB0_SLEEP_CLK 209
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
index 25b92bbf0ab4..e0fb4acf4ba8 100644
--- a/include/dt-bindings/clock/qcom,lcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
@@ -19,4 +19,6 @@
#define SPDIF_CLK 10
#define AHBIX_CLK 11
+#define LCC_PCM_RESET 0
+
#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-mdm9615.h b/include/dt-bindings/clock/qcom,lcc-mdm9615.h
deleted file mode 100644
index 299338ee1d88..000000000000
--- a/include/dt-bindings/clock/qcom,lcc-mdm9615.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- * Copyright (c) BayLibre, SAS.
- * Author : Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#ifndef _DT_BINDINGS_CLK_LCC_MDM9615_H
-#define _DT_BINDINGS_CLK_LCC_MDM9615_H
-
-#define PLL4 0
-#define MI2S_OSR_SRC 1
-#define MI2S_OSR_CLK 2
-#define MI2S_DIV_CLK 3
-#define MI2S_BIT_DIV_CLK 4
-#define MI2S_BIT_CLK 5
-#define PCM_SRC 6
-#define PCM_CLK_OUT 7
-#define PCM_CLK 8
-#define SLIMBUS_SRC 9
-#define AUDIO_SLIMBUS_CLK 10
-#define SPS_SLIMBUS_CLK 11
-#define CODEC_I2S_MIC_OSR_SRC 12
-#define CODEC_I2S_MIC_OSR_CLK 13
-#define CODEC_I2S_MIC_DIV_CLK 14
-#define CODEC_I2S_MIC_BIT_DIV_CLK 15
-#define CODEC_I2S_MIC_BIT_CLK 16
-#define SPARE_I2S_MIC_OSR_SRC 17
-#define SPARE_I2S_MIC_OSR_CLK 18
-#define SPARE_I2S_MIC_DIV_CLK 19
-#define SPARE_I2S_MIC_BIT_DIV_CLK 20
-#define SPARE_I2S_MIC_BIT_CLK 21
-#define CODEC_I2S_SPKR_OSR_SRC 22
-#define CODEC_I2S_SPKR_OSR_CLK 23
-#define CODEC_I2S_SPKR_DIV_CLK 24
-#define CODEC_I2S_SPKR_BIT_DIV_CLK 25
-#define CODEC_I2S_SPKR_BIT_CLK 26
-#define SPARE_I2S_SPKR_OSR_SRC 27
-#define SPARE_I2S_SPKR_OSR_CLK 28
-#define SPARE_I2S_SPKR_DIV_CLK 29
-#define SPARE_I2S_SPKR_BIT_DIV_CLK 30
-#define SPARE_I2S_SPKR_BIT_CLK 31
-
-#endif
diff --git a/include/dt-bindings/clock/qcom,lpass-sc7280.h b/include/dt-bindings/clock/qcom,lpass-sc7280.h
new file mode 100644
index 000000000000..e71ccac3a375
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpass-sc7280.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_SC7280_H
+
+#define LPASS_Q6SS_AHBM_CLK 0
+#define LPASS_Q6SS_AHBS_CLK 1
+#define LPASS_TOP_CC_LPI_Q6_AXIM_HS_CLK 2
+#define LPASS_QDSP6SS_XO_CLK 3
+#define LPASS_QDSP6SS_SLEEP_CLK 4
+#define LPASS_QDSP6SS_CORE_CLK 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h
new file mode 100644
index 000000000000..22dcd47d4513
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_AUDIO_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_AUDIO_CC_SC7280_H
+
+/* LPASS_AUDIO_CC clocks */
+#define LPASS_AUDIO_CC_PLL 0
+#define LPASS_AUDIO_CC_PLL_OUT_AUX2 1
+#define LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC 2
+#define LPASS_AUDIO_CC_PLL_OUT_MAIN_DIV_CLK_SRC 3
+#define LPASS_AUDIO_CC_CDIV_RX_MCLK_DIV_CLK_SRC 4
+#define LPASS_AUDIO_CC_CODEC_MEM0_CLK 5
+#define LPASS_AUDIO_CC_CODEC_MEM1_CLK 6
+#define LPASS_AUDIO_CC_CODEC_MEM2_CLK 7
+#define LPASS_AUDIO_CC_CODEC_MEM_CLK 8
+#define LPASS_AUDIO_CC_EXT_MCLK0_CLK 9
+#define LPASS_AUDIO_CC_EXT_MCLK0_CLK_SRC 10
+#define LPASS_AUDIO_CC_EXT_MCLK1_CLK 11
+#define LPASS_AUDIO_CC_EXT_MCLK1_CLK_SRC 12
+#define LPASS_AUDIO_CC_RX_MCLK_2X_CLK 13
+#define LPASS_AUDIO_CC_RX_MCLK_CLK 14
+#define LPASS_AUDIO_CC_RX_MCLK_CLK_SRC 15
+
+/* LPASS AUDIO CC CSR */
+#define LPASS_AUDIO_SWR_RX_CGCR 0
+#define LPASS_AUDIO_SWR_TX_CGCR 1
+#define LPASS_AUDIO_SWR_WSA_CGCR 2
+
+/* LPASS_AON_CC clocks */
+#define LPASS_AON_CC_PLL 0
+#define LPASS_AON_CC_PLL_OUT_EVEN 1
+#define LPASS_AON_CC_PLL_OUT_MAIN_CDIV_DIV_CLK_SRC 2
+#define LPASS_AON_CC_PLL_OUT_ODD 3
+#define LPASS_AON_CC_AUDIO_HM_H_CLK 4
+#define LPASS_AON_CC_CDIV_TX_MCLK_DIV_CLK_SRC 5
+#define LPASS_AON_CC_MAIN_RCG_CLK_SRC 6
+#define LPASS_AON_CC_TX_MCLK_2X_CLK 7
+#define LPASS_AON_CC_TX_MCLK_CLK 8
+#define LPASS_AON_CC_TX_MCLK_RCG_CLK_SRC 9
+#define LPASS_AON_CC_VA_MEM0_CLK 10
+
+/* LPASS_AON_CC power domains */
+#define LPASS_AON_CC_LPASS_AUDIO_HM_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h
new file mode 100644
index 000000000000..0324c69ce968
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7280_H
+
+/* LPASS_CORE_CC clocks */
+#define LPASS_CORE_CC_DIG_PLL 0
+#define LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC 1
+#define LPASS_CORE_CC_DIG_PLL_OUT_ODD 2
+#define LPASS_CORE_CC_CORE_CLK 3
+#define LPASS_CORE_CC_CORE_CLK_SRC 4
+#define LPASS_CORE_CC_EXT_IF0_CLK_SRC 5
+#define LPASS_CORE_CC_EXT_IF0_IBIT_CLK 6
+#define LPASS_CORE_CC_EXT_IF1_CLK_SRC 7
+#define LPASS_CORE_CC_EXT_IF1_IBIT_CLK 8
+#define LPASS_CORE_CC_LPM_CORE_CLK 9
+#define LPASS_CORE_CC_LPM_MEM0_CORE_CLK 10
+#define LPASS_CORE_CC_SYSNOC_MPORT_CORE_CLK 11
+#define LPASS_CORE_CC_EXT_MCLK0_CLK 12
+#define LPASS_CORE_CC_EXT_MCLK0_CLK_SRC 13
+
+/* LPASS_CORE_CC power domains */
+#define LPASS_CORE_CC_LPASS_CORE_HM_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
index a62cb0629a7a..743ee60632eb 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
@@ -121,7 +121,6 @@
#define MMSS_MMSSNOC_BTO_AHB_CLK 112
#define MMSS_MMSSNOC_AXI_CLK 113
#define MMSS_S0_AXI_CLK 114
-#define OCMEMCX_AHB_CLK 115
#define OCMEMCX_OCMEMNOC_CLK 116
#define OXILI_OCMEMGX_CLK 117
#define OCMEMNOC_CLK 118
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8994.h b/include/dt-bindings/clock/qcom,mmcc-msm8994.h
new file mode 100644
index 000000000000..4b289092f5a2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8994.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, Konrad Dybcio
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8994_H
+#define _DT_BINDINGS_CLK_MSM_MMCC_8994_H
+
+/* Clocks */
+#define MMPLL0_EARLY 0
+#define MMPLL0_PLL 1
+#define MMPLL1_EARLY 2
+#define MMPLL1_PLL 3
+#define MMPLL3_EARLY 4
+#define MMPLL3_PLL 5
+#define MMPLL4_EARLY 6
+#define MMPLL4_PLL 7
+#define MMPLL5_EARLY 8
+#define MMPLL5_PLL 9
+#define AXI_CLK_SRC 10
+#define RBBMTIMER_CLK_SRC 11
+#define PCLK0_CLK_SRC 12
+#define PCLK1_CLK_SRC 13
+#define MDP_CLK_SRC 14
+#define VSYNC_CLK_SRC 15
+#define BYTE0_CLK_SRC 16
+#define BYTE1_CLK_SRC 17
+#define ESC0_CLK_SRC 18
+#define ESC1_CLK_SRC 19
+#define MDSS_AHB_CLK 20
+#define MDSS_PCLK0_CLK 21
+#define MDSS_PCLK1_CLK 22
+#define MDSS_VSYNC_CLK 23
+#define MDSS_BYTE0_CLK 24
+#define MDSS_BYTE1_CLK 25
+#define MDSS_ESC0_CLK 26
+#define MDSS_ESC1_CLK 27
+#define CSI0_CLK_SRC 28
+#define CSI1_CLK_SRC 29
+#define CSI2_CLK_SRC 30
+#define CSI3_CLK_SRC 31
+#define VFE0_CLK_SRC 32
+#define VFE1_CLK_SRC 33
+#define CPP_CLK_SRC 34
+#define JPEG0_CLK_SRC 35
+#define JPEG1_CLK_SRC 36
+#define JPEG2_CLK_SRC 37
+#define CSI2PHYTIMER_CLK_SRC 38
+#define FD_CORE_CLK_SRC 39
+#define OCMEMNOC_CLK_SRC 40
+#define CCI_CLK_SRC 41
+#define MMSS_GP0_CLK_SRC 42
+#define MMSS_GP1_CLK_SRC 43
+#define JPEG_DMA_CLK_SRC 44
+#define MCLK0_CLK_SRC 45
+#define MCLK1_CLK_SRC 46
+#define MCLK2_CLK_SRC 47
+#define MCLK3_CLK_SRC 48
+#define CSI0PHYTIMER_CLK_SRC 49
+#define CSI1PHYTIMER_CLK_SRC 50
+#define EXTPCLK_CLK_SRC 51
+#define HDMI_CLK_SRC 52
+#define CAMSS_AHB_CLK 53
+#define CAMSS_CCI_CCI_AHB_CLK 54
+#define CAMSS_CCI_CCI_CLK 55
+#define CAMSS_VFE_CPP_AHB_CLK 56
+#define CAMSS_VFE_CPP_AXI_CLK 57
+#define CAMSS_VFE_CPP_CLK 58
+#define CAMSS_CSI0_AHB_CLK 59
+#define CAMSS_CSI0_CLK 60
+#define CAMSS_CSI0PHY_CLK 61
+#define CAMSS_CSI0PIX_CLK 62
+#define CAMSS_CSI0RDI_CLK 63
+#define CAMSS_CSI1_AHB_CLK 64
+#define CAMSS_CSI1_CLK 65
+#define CAMSS_CSI1PHY_CLK 66
+#define CAMSS_CSI1PIX_CLK 67
+#define CAMSS_CSI1RDI_CLK 68
+#define CAMSS_CSI2_AHB_CLK 69
+#define CAMSS_CSI2_CLK 70
+#define CAMSS_CSI2PHY_CLK 71
+#define CAMSS_CSI2PIX_CLK 72
+#define CAMSS_CSI2RDI_CLK 73
+#define CAMSS_CSI3_AHB_CLK 74
+#define CAMSS_CSI3_CLK 75
+#define CAMSS_CSI3PHY_CLK 76
+#define CAMSS_CSI3PIX_CLK 77
+#define CAMSS_CSI3RDI_CLK 78
+#define CAMSS_CSI_VFE0_CLK 79
+#define CAMSS_CSI_VFE1_CLK 80
+#define CAMSS_GP0_CLK 81
+#define CAMSS_GP1_CLK 82
+#define CAMSS_ISPIF_AHB_CLK 83
+#define CAMSS_JPEG_DMA_CLK 84
+#define CAMSS_JPEG_JPEG0_CLK 85
+#define CAMSS_JPEG_JPEG1_CLK 86
+#define CAMSS_JPEG_JPEG2_CLK 87
+#define CAMSS_JPEG_JPEG_AHB_CLK 88
+#define CAMSS_JPEG_JPEG_AXI_CLK 89
+#define CAMSS_MCLK0_CLK 90
+#define CAMSS_MCLK1_CLK 91
+#define CAMSS_MCLK2_CLK 92
+#define CAMSS_MCLK3_CLK 93
+#define CAMSS_MICRO_AHB_CLK 94
+#define CAMSS_PHY0_CSI0PHYTIMER_CLK 95
+#define CAMSS_PHY1_CSI1PHYTIMER_CLK 96
+#define CAMSS_PHY2_CSI2PHYTIMER_CLK 97
+#define CAMSS_TOP_AHB_CLK 98
+#define CAMSS_VFE_VFE0_CLK 99
+#define CAMSS_VFE_VFE1_CLK 100
+#define CAMSS_VFE_VFE_AHB_CLK 101
+#define CAMSS_VFE_VFE_AXI_CLK 102
+#define FD_AXI_CLK 103
+#define FD_CORE_CLK 104
+#define FD_CORE_UAR_CLK 105
+#define MDSS_AXI_CLK 106
+#define MDSS_EXTPCLK_CLK 107
+#define MDSS_HDMI_AHB_CLK 108
+#define MDSS_HDMI_CLK 109
+#define MDSS_MDP_CLK 110
+#define MMSS_MISC_AHB_CLK 111
+#define MMSS_MMSSNOC_AXI_CLK 112
+#define MMSS_S0_AXI_CLK 113
+#define OCMEMCX_OCMEMNOC_CLK 114
+#define OXILI_GFX3D_CLK 115
+#define OXILI_RBBMTIMER_CLK 116
+#define OXILICX_AHB_CLK 117
+#define VENUS0_AHB_CLK 118
+#define VENUS0_AXI_CLK 119
+#define VENUS0_OCMEMNOC_CLK 120
+#define VENUS0_VCODEC0_CLK 121
+#define VENUS0_CORE0_VCODEC_CLK 122
+#define VENUS0_CORE1_VCODEC_CLK 123
+#define VENUS0_CORE2_VCODEC_CLK 124
+#define AHB_CLK_SRC 125
+#define FD_AHB_CLK 126
+
+/* GDSCs */
+#define VENUS_GDSC 0
+#define VENUS_CORE0_GDSC 1
+#define VENUS_CORE1_GDSC 2
+#define VENUS_CORE2_GDSC 3
+#define CAMSS_TOP_GDSC 4
+#define MDSS_GDSC 5
+#define JPEG_GDSC 6
+#define VFE_GDSC 7
+#define CPP_GDSC 8
+#define OXILI_GX_GDSC 9
+#define OXILI_CX_GDSC 10
+#define FD_GDSC 11
+
+/* Resets */
+#define CAMSS_MICRO_BCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-sdm660.h b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
new file mode 100644
index 000000000000..f9dbc21cb5c7
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_MMCC_660_H
+#define _DT_BINDINGS_CLK_MSM_MMCC_660_H
+
+#define AHB_CLK_SRC 0
+#define BYTE0_CLK_SRC 1
+#define BYTE1_CLK_SRC 2
+#define CAMSS_GP0_CLK_SRC 3
+#define CAMSS_GP1_CLK_SRC 4
+#define CCI_CLK_SRC 5
+#define CPP_CLK_SRC 6
+#define CSI0_CLK_SRC 7
+#define CSI0PHYTIMER_CLK_SRC 8
+#define CSI1_CLK_SRC 9
+#define CSI1PHYTIMER_CLK_SRC 10
+#define CSI2_CLK_SRC 11
+#define CSI2PHYTIMER_CLK_SRC 12
+#define CSI3_CLK_SRC 13
+#define CSIPHY_CLK_SRC 14
+#define DP_AUX_CLK_SRC 15
+#define DP_CRYPTO_CLK_SRC 16
+#define DP_GTC_CLK_SRC 17
+#define DP_LINK_CLK_SRC 18
+#define DP_PIXEL_CLK_SRC 19
+#define ESC0_CLK_SRC 20
+#define ESC1_CLK_SRC 21
+#define JPEG0_CLK_SRC 22
+#define MCLK0_CLK_SRC 23
+#define MCLK1_CLK_SRC 24
+#define MCLK2_CLK_SRC 25
+#define MCLK3_CLK_SRC 26
+#define MDP_CLK_SRC 27
+#define MMPLL0_PLL 28
+#define MMPLL10_PLL 29
+#define MMPLL1_PLL 30
+#define MMPLL3_PLL 31
+#define MMPLL4_PLL 32
+#define MMPLL5_PLL 33
+#define MMPLL6_PLL 34
+#define MMPLL7_PLL 35
+#define MMPLL8_PLL 36
+#define BIMC_SMMU_AHB_CLK 37
+#define BIMC_SMMU_AXI_CLK 38
+#define CAMSS_AHB_CLK 39
+#define CAMSS_CCI_AHB_CLK 40
+#define CAMSS_CCI_CLK 41
+#define CAMSS_CPHY_CSID0_CLK 42
+#define CAMSS_CPHY_CSID1_CLK 43
+#define CAMSS_CPHY_CSID2_CLK 44
+#define CAMSS_CPHY_CSID3_CLK 45
+#define CAMSS_CPP_AHB_CLK 46
+#define CAMSS_CPP_AXI_CLK 47
+#define CAMSS_CPP_CLK 48
+#define CAMSS_CPP_VBIF_AHB_CLK 49
+#define CAMSS_CSI0_AHB_CLK 50
+#define CAMSS_CSI0_CLK 51
+#define CAMSS_CSI0PHYTIMER_CLK 52
+#define CAMSS_CSI0PIX_CLK 53
+#define CAMSS_CSI0RDI_CLK 54
+#define CAMSS_CSI1_AHB_CLK 55
+#define CAMSS_CSI1_CLK 56
+#define CAMSS_CSI1PHYTIMER_CLK 57
+#define CAMSS_CSI1PIX_CLK 58
+#define CAMSS_CSI1RDI_CLK 59
+#define CAMSS_CSI2_AHB_CLK 60
+#define CAMSS_CSI2_CLK 61
+#define CAMSS_CSI2PHYTIMER_CLK 62
+#define CAMSS_CSI2PIX_CLK 63
+#define CAMSS_CSI2RDI_CLK 64
+#define CAMSS_CSI3_AHB_CLK 65
+#define CAMSS_CSI3_CLK 66
+#define CAMSS_CSI3PIX_CLK 67
+#define CAMSS_CSI3RDI_CLK 68
+#define CAMSS_CSI_VFE0_CLK 69
+#define CAMSS_CSI_VFE1_CLK 70
+#define CAMSS_CSIPHY0_CLK 71
+#define CAMSS_CSIPHY1_CLK 72
+#define CAMSS_CSIPHY2_CLK 73
+#define CAMSS_GP0_CLK 74
+#define CAMSS_GP1_CLK 75
+#define CAMSS_ISPIF_AHB_CLK 76
+#define CAMSS_JPEG0_CLK 77
+#define CAMSS_JPEG_AHB_CLK 78
+#define CAMSS_JPEG_AXI_CLK 79
+#define CAMSS_MCLK0_CLK 80
+#define CAMSS_MCLK1_CLK 81
+#define CAMSS_MCLK2_CLK 82
+#define CAMSS_MCLK3_CLK 83
+#define CAMSS_MICRO_AHB_CLK 84
+#define CAMSS_TOP_AHB_CLK 85
+#define CAMSS_VFE0_AHB_CLK 86
+#define CAMSS_VFE0_CLK 87
+#define CAMSS_VFE0_STREAM_CLK 88
+#define CAMSS_VFE1_AHB_CLK 89
+#define CAMSS_VFE1_CLK 90
+#define CAMSS_VFE1_STREAM_CLK 91
+#define CAMSS_VFE_VBIF_AHB_CLK 92
+#define CAMSS_VFE_VBIF_AXI_CLK 93
+#define CSIPHY_AHB2CRIF_CLK 94
+#define CXO_CLK 95
+#define MDSS_AHB_CLK 96
+#define MDSS_AXI_CLK 97
+#define MDSS_BYTE0_CLK 98
+#define MDSS_BYTE0_INTF_CLK 99
+#define MDSS_BYTE0_INTF_DIV_CLK 100
+#define MDSS_BYTE1_CLK 101
+#define MDSS_BYTE1_INTF_CLK 102
+#define MDSS_DP_AUX_CLK 103
+#define MDSS_DP_CRYPTO_CLK 104
+#define MDSS_DP_GTC_CLK 105
+#define MDSS_DP_LINK_CLK 106
+#define MDSS_DP_LINK_INTF_CLK 107
+#define MDSS_DP_PIXEL_CLK 108
+#define MDSS_ESC0_CLK 109
+#define MDSS_ESC1_CLK 110
+#define MDSS_HDMI_DP_AHB_CLK 111
+#define MDSS_MDP_CLK 112
+#define MDSS_PCLK0_CLK 113
+#define MDSS_PCLK1_CLK 114
+#define MDSS_ROT_CLK 115
+#define MDSS_VSYNC_CLK 116
+#define MISC_AHB_CLK 117
+#define MISC_CXO_CLK 118
+#define MNOC_AHB_CLK 119
+#define SNOC_DVM_AXI_CLK 120
+#define THROTTLE_CAMSS_AHB_CLK 121
+#define THROTTLE_CAMSS_AXI_CLK 122
+#define THROTTLE_MDSS_AHB_CLK 123
+#define THROTTLE_MDSS_AXI_CLK 124
+#define THROTTLE_VIDEO_AHB_CLK 125
+#define THROTTLE_VIDEO_AXI_CLK 126
+#define VIDEO_AHB_CLK 127
+#define VIDEO_AXI_CLK 128
+#define VIDEO_CORE_CLK 129
+#define VIDEO_SUBCORE0_CLK 130
+#define PCLK0_CLK_SRC 131
+#define PCLK1_CLK_SRC 132
+#define ROT_CLK_SRC 133
+#define VFE0_CLK_SRC 134
+#define VFE1_CLK_SRC 135
+#define VIDEO_CORE_CLK_SRC 136
+#define VSYNC_CLK_SRC 137
+#define MDSS_BYTE1_INTF_DIV_CLK 138
+#define AXI_CLK_SRC 139
+
+#define VENUS_GDSC 0
+#define VENUS_CORE0_GDSC 1
+#define MDSS_GDSC 2
+#define CAMSS_TOP_GDSC 3
+#define CAMSS_VFE0_GDSC 4
+#define CAMSS_VFE1_GDSC 5
+#define CAMSS_CPP_GDSC 6
+#define BIMC_SMMU_GDSC 7
+
+#define CAMSS_MICRO_BCR 0
+
+#endif
+
diff --git a/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
new file mode 100644
index 000000000000..731e404a2ce6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H
+#define _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H
+
+/* ECPRI_CC clocks */
+#define ECPRI_CC_PLL0 0
+#define ECPRI_CC_PLL1 1
+#define ECPRI_CC_ECPRI_CG_CLK 2
+#define ECPRI_CC_ECPRI_CLK_SRC 3
+#define ECPRI_CC_ECPRI_DMA_CLK 4
+#define ECPRI_CC_ECPRI_DMA_CLK_SRC 5
+#define ECPRI_CC_ECPRI_DMA_NOC_CLK 6
+#define ECPRI_CC_ECPRI_FAST_CLK 7
+#define ECPRI_CC_ECPRI_FAST_CLK_SRC 8
+#define ECPRI_CC_ECPRI_FAST_DIV2_CLK 9
+#define ECPRI_CC_ECPRI_FAST_DIV2_CLK_SRC 10
+#define ECPRI_CC_ECPRI_FAST_DIV2_NOC_CLK 11
+#define ECPRI_CC_ECPRI_FR_CLK 12
+#define ECPRI_CC_ECPRI_ORAN_CLK_SRC 13
+#define ECPRI_CC_ECPRI_ORAN_DIV2_CLK 14
+#define ECPRI_CC_ETH_100G_C2C0_HM_FF_CLK_SRC 15
+#define ECPRI_CC_ETH_100G_C2C0_UDP_FIFO_CLK 16
+#define ECPRI_CC_ETH_100G_C2C1_UDP_FIFO_CLK 17
+#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_0_CLK 18
+#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_1_CLK 19
+#define ECPRI_CC_ETH_100G_C2C_HM_FF_0_DIV_CLK_SRC 20
+#define ECPRI_CC_ETH_100G_C2C_HM_FF_1_DIV_CLK_SRC 21
+#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK 22
+#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK_SRC 23
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_CLK 24
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_DIV_CLK_SRC 25
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_CLK 26
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_DIV_CLK_SRC 27
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_CLK_SRC 28
+#define ECPRI_CC_ETH_100G_DBG_C2C_UDP_FIFO_CLK 29
+#define ECPRI_CC_ETH_100G_FH0_HM_FF_CLK_SRC 30
+#define ECPRI_CC_ETH_100G_FH0_MACSEC_CLK_SRC 31
+#define ECPRI_CC_ETH_100G_FH1_HM_FF_CLK_SRC 32
+#define ECPRI_CC_ETH_100G_FH1_MACSEC_CLK_SRC 33
+#define ECPRI_CC_ETH_100G_FH2_HM_FF_CLK_SRC 34
+#define ECPRI_CC_ETH_100G_FH2_MACSEC_CLK_SRC 35
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_CLK 36
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_DIV_CLK_SRC 37
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_CLK 38
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_DIV_CLK_SRC 39
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_CLK 40
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_DIV_CLK_SRC 41
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_CLK 42
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_DIV_CLK_SRC 43
+#define ECPRI_CC_ETH_100G_FH_0_UDP_FIFO_CLK 44
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_CLK 45
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_DIV_CLK_SRC 46
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_CLK 47
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_DIV_CLK_SRC 48
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_CLK 49
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_DIV_CLK_SRC 50
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_CLK 51
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_DIV_CLK_SRC 52
+#define ECPRI_CC_ETH_100G_FH_1_UDP_FIFO_CLK 53
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_CLK 54
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_DIV_CLK_SRC 55
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_CLK 56
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_DIV_CLK_SRC 57
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_CLK 58
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_DIV_CLK_SRC 59
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_CLK 60
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_DIV_CLK_SRC 61
+#define ECPRI_CC_ETH_100G_FH_2_UDP_FIFO_CLK 62
+#define ECPRI_CC_ETH_100G_FH_MACSEC_0_CLK 63
+#define ECPRI_CC_ETH_100G_FH_MACSEC_1_CLK 64
+#define ECPRI_CC_ETH_100G_FH_MACSEC_2_CLK 65
+#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK 66
+#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK_SRC 67
+#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK 68
+#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK_SRC 69
+#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK 70
+#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK_SRC 71
+#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK 72
+#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK_SRC 73
+#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK 74
+#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK_SRC 75
+#define ECPRI_CC_ETH_DBG_NFAPI_AXI_CLK 76
+#define ECPRI_CC_ETH_DBG_NOC_AXI_CLK 77
+#define ECPRI_CC_ETH_PHY_0_OCK_SRAM_CLK 78
+#define ECPRI_CC_ETH_PHY_1_OCK_SRAM_CLK 79
+#define ECPRI_CC_ETH_PHY_2_OCK_SRAM_CLK 80
+#define ECPRI_CC_ETH_PHY_3_OCK_SRAM_CLK 81
+#define ECPRI_CC_ETH_PHY_4_OCK_SRAM_CLK 82
+#define ECPRI_CC_MSS_EMAC_CLK 83
+#define ECPRI_CC_MSS_EMAC_CLK_SRC 84
+#define ECPRI_CC_MSS_ORAN_CLK 85
+#define ECPRI_CC_PHY0_LANE0_RX_CLK 86
+#define ECPRI_CC_PHY0_LANE0_TX_CLK 87
+#define ECPRI_CC_PHY0_LANE1_RX_CLK 88
+#define ECPRI_CC_PHY0_LANE1_TX_CLK 89
+#define ECPRI_CC_PHY0_LANE2_RX_CLK 90
+#define ECPRI_CC_PHY0_LANE2_TX_CLK 91
+#define ECPRI_CC_PHY0_LANE3_RX_CLK 92
+#define ECPRI_CC_PHY0_LANE3_TX_CLK 93
+#define ECPRI_CC_PHY1_LANE0_RX_CLK 94
+#define ECPRI_CC_PHY1_LANE0_TX_CLK 95
+#define ECPRI_CC_PHY1_LANE1_RX_CLK 96
+#define ECPRI_CC_PHY1_LANE1_TX_CLK 97
+#define ECPRI_CC_PHY1_LANE2_RX_CLK 98
+#define ECPRI_CC_PHY1_LANE2_TX_CLK 99
+#define ECPRI_CC_PHY1_LANE3_RX_CLK 100
+#define ECPRI_CC_PHY1_LANE3_TX_CLK 101
+#define ECPRI_CC_PHY2_LANE0_RX_CLK 102
+#define ECPRI_CC_PHY2_LANE0_TX_CLK 103
+#define ECPRI_CC_PHY2_LANE1_RX_CLK 104
+#define ECPRI_CC_PHY2_LANE1_TX_CLK 105
+#define ECPRI_CC_PHY2_LANE2_RX_CLK 106
+#define ECPRI_CC_PHY2_LANE2_TX_CLK 107
+#define ECPRI_CC_PHY2_LANE3_RX_CLK 108
+#define ECPRI_CC_PHY2_LANE3_TX_CLK 109
+#define ECPRI_CC_PHY3_LANE0_RX_CLK 110
+#define ECPRI_CC_PHY3_LANE0_TX_CLK 111
+#define ECPRI_CC_PHY3_LANE1_RX_CLK 112
+#define ECPRI_CC_PHY3_LANE1_TX_CLK 113
+#define ECPRI_CC_PHY3_LANE2_RX_CLK 114
+#define ECPRI_CC_PHY3_LANE2_TX_CLK 115
+#define ECPRI_CC_PHY3_LANE3_RX_CLK 116
+#define ECPRI_CC_PHY3_LANE3_TX_CLK 117
+#define ECPRI_CC_PHY4_LANE0_RX_CLK 118
+#define ECPRI_CC_PHY4_LANE0_TX_CLK 119
+#define ECPRI_CC_PHY4_LANE1_RX_CLK 120
+#define ECPRI_CC_PHY4_LANE1_TX_CLK 121
+#define ECPRI_CC_PHY4_LANE2_RX_CLK 122
+#define ECPRI_CC_PHY4_LANE2_TX_CLK 123
+#define ECPRI_CC_PHY4_LANE3_RX_CLK 124
+#define ECPRI_CC_PHY4_LANE3_TX_CLK 125
+
+/* ECPRI_CC resets */
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ECPRI_SS_BCR 0
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_C2C_BCR 1
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH0_BCR 2
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH1_BCR 3
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH2_BCR 4
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_WRAPPER_TOP_BCR 5
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_MODEM_BCR 6
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_NOC_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qdu1000-gcc.h b/include/dt-bindings/clock/qcom,qdu1000-gcc.h
new file mode 100644
index 000000000000..2fd36cbfddbb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qdu1000-gcc.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QDU1000_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QDU1000_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL2 3
+#define GCC_GPLL2_OUT_EVEN 4
+#define GCC_GPLL3 5
+#define GCC_GPLL4 6
+#define GCC_GPLL5 7
+#define GCC_GPLL5_OUT_EVEN 8
+#define GCC_GPLL6 9
+#define GCC_GPLL7 10
+#define GCC_GPLL8 11
+#define GCC_AGGRE_NOC_ECPRI_DMA_CLK 12
+#define GCC_AGGRE_NOC_ECPRI_DMA_CLK_SRC 13
+#define GCC_AGGRE_NOC_ECPRI_GSI_CLK_SRC 14
+#define GCC_BOOT_ROM_AHB_CLK 15
+#define GCC_CFG_NOC_ECPRI_CC_AHB_CLK 16
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 17
+#define GCC_DDRSS_ECPRI_DMA_CLK 18
+#define GCC_ECPRI_AHB_CLK 19
+#define GCC_ECPRI_CC_GPLL0_CLK_SRC 20
+#define GCC_ECPRI_CC_GPLL1_EVEN_CLK_SRC 21
+#define GCC_ECPRI_CC_GPLL2_EVEN_CLK_SRC 22
+#define GCC_ECPRI_CC_GPLL3_CLK_SRC 23
+#define GCC_ECPRI_CC_GPLL4_CLK_SRC 24
+#define GCC_ECPRI_CC_GPLL5_EVEN_CLK_SRC 25
+#define GCC_ECPRI_XO_CLK 26
+#define GCC_ETH_DBG_SNOC_AXI_CLK 27
+#define GCC_GEMNOC_PCIE_QX_CLK 28
+#define GCC_GP1_CLK 29
+#define GCC_GP1_CLK_SRC 30
+#define GCC_GP2_CLK 31
+#define GCC_GP2_CLK_SRC 32
+#define GCC_GP3_CLK 33
+#define GCC_GP3_CLK_SRC 34
+#define GCC_PCIE_0_AUX_CLK 35
+#define GCC_PCIE_0_AUX_CLK_SRC 36
+#define GCC_PCIE_0_CFG_AHB_CLK 37
+#define GCC_PCIE_0_CLKREF_EN 38
+#define GCC_PCIE_0_MSTR_AXI_CLK 39
+#define GCC_PCIE_0_PHY_AUX_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK 41
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42
+#define GCC_PCIE_0_PIPE_CLK 43
+#define GCC_PCIE_0_SLV_AXI_CLK 44
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 45
+#define GCC_PDM2_CLK 46
+#define GCC_PDM2_CLK_SRC 47
+#define GCC_PDM_AHB_CLK 48
+#define GCC_PDM_XO4_CLK 49
+#define GCC_QMIP_ANOC_PCIE_CLK 50
+#define GCC_QMIP_ECPRI_DMA0_CLK 51
+#define GCC_QMIP_ECPRI_DMA1_CLK 52
+#define GCC_QMIP_ECPRI_GSI_CLK 53
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 54
+#define GCC_QUPV3_WRAP0_CORE_CLK 55
+#define GCC_QUPV3_WRAP0_S0_CLK 56
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 57
+#define GCC_QUPV3_WRAP0_S1_CLK 58
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 59
+#define GCC_QUPV3_WRAP0_S2_CLK 60
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 61
+#define GCC_QUPV3_WRAP0_S3_CLK 62
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 63
+#define GCC_QUPV3_WRAP0_S4_CLK 64
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 65
+#define GCC_QUPV3_WRAP0_S5_CLK 66
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 67
+#define GCC_QUPV3_WRAP0_S6_CLK 68
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S7_CLK 70
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 71
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 72
+#define GCC_QUPV3_WRAP1_CORE_CLK 73
+#define GCC_QUPV3_WRAP1_S0_CLK 74
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 75
+#define GCC_QUPV3_WRAP1_S1_CLK 76
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 77
+#define GCC_QUPV3_WRAP1_S2_CLK 78
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 79
+#define GCC_QUPV3_WRAP1_S3_CLK 80
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 81
+#define GCC_QUPV3_WRAP1_S4_CLK 82
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S5_CLK 84
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S6_CLK 86
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S7_CLK 88
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 89
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 90
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 91
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 92
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 93
+#define GCC_SDCC5_AHB_CLK 94
+#define GCC_SDCC5_APPS_CLK 95
+#define GCC_SDCC5_APPS_CLK_SRC 96
+#define GCC_SDCC5_ICE_CORE_CLK 97
+#define GCC_SDCC5_ICE_CORE_CLK_SRC 98
+#define GCC_SNOC_CNOC_GEMNOC_PCIE_QX_CLK 99
+#define GCC_SNOC_CNOC_GEMNOC_PCIE_SOUTH_QX_CLK 100
+#define GCC_SNOC_CNOC_PCIE_QX_CLK 101
+#define GCC_SNOC_PCIE_SF_CENTER_QX_CLK 102
+#define GCC_SNOC_PCIE_SF_SOUTH_QX_CLK 103
+#define GCC_TSC_CFG_AHB_CLK 104
+#define GCC_TSC_CLK_SRC 105
+#define GCC_TSC_CNTR_CLK 106
+#define GCC_TSC_ETU_CLK 107
+#define GCC_USB2_CLKREF_EN 108
+#define GCC_USB30_PRIM_MASTER_CLK 109
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 110
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 111
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 112
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 113
+#define GCC_USB30_PRIM_SLEEP_CLK 114
+#define GCC_USB3_PRIM_PHY_AUX_CLK 115
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 116
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 117
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 118
+#define GCC_SM_BUS_AHB_CLK 119
+#define GCC_SM_BUS_XO_CLK 120
+#define GCC_SM_BUS_XO_CLK_SRC 121
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 122
+#define GCC_ETH_100G_C2C_HM_APB_CLK 123
+#define GCC_ETH_100G_FH_HM_APB_0_CLK 124
+#define GCC_ETH_100G_FH_HM_APB_1_CLK 125
+#define GCC_ETH_100G_FH_HM_APB_2_CLK 126
+#define GCC_ETH_DBG_C2C_HM_APB_CLK 127
+#define GCC_AGGRE_NOC_ECPRI_GSI_CLK 128
+#define GCC_PCIE_0_PIPE_CLK_SRC 129
+#define GCC_PCIE_0_PHY_AUX_CLK_SRC 130
+#define GCC_GPLL1_OUT_EVEN 131
+#define GCC_DDRSS_ECPRI_GSI_CLK 132
+
+/* GCC resets */
+#define GCC_ECPRI_CC_BCR 0
+#define GCC_ECPRI_SS_BCR 1
+#define GCC_ETH_WRAPPER_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_PHY_CFG_AHB_BCR 8
+#define GCC_PCIE_PHY_COM_BCR 9
+#define GCC_PDM_BCR 10
+#define GCC_QUPV3_WRAPPER_0_BCR 11
+#define GCC_QUPV3_WRAPPER_1_BCR 12
+#define GCC_QUSB2PHY_PRIM_BCR 13
+#define GCC_QUSB2PHY_SEC_BCR 14
+#define GCC_SDCC5_BCR 15
+#define GCC_TCSR_PCIE_BCR 16
+#define GCC_TSC_BCR 17
+#define GCC_USB30_PRIM_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_DP_PHY_SEC_BCR 20
+#define GCC_USB3_PHY_PRIM_BCR 21
+#define GCC_USB3_PHY_SEC_BCR 22
+#define GCC_USB3PHY_PHY_PRIM_BCR 23
+#define GCC_USB3PHY_PHY_SEC_BCR 24
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 25
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define USB30_PRIM_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index 8aaba7cd9589..46309c9953b2 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -149,5 +149,26 @@
#define RPM_SMD_CE2_A_CLK 103
#define RPM_SMD_CE3_CLK 104
#define RPM_SMD_CE3_A_CLK 105
+#define RPM_SMD_QUP_CLK 106
+#define RPM_SMD_QUP_A_CLK 107
+#define RPM_SMD_MMRT_CLK 108
+#define RPM_SMD_MMRT_A_CLK 109
+#define RPM_SMD_MMNRT_CLK 110
+#define RPM_SMD_MMNRT_A_CLK 111
+#define RPM_SMD_SNOC_PERIPH_CLK 112
+#define RPM_SMD_SNOC_PERIPH_A_CLK 113
+#define RPM_SMD_SNOC_LPASS_CLK 114
+#define RPM_SMD_SNOC_LPASS_A_CLK 115
+#define RPM_SMD_HWKM_CLK 116
+#define RPM_SMD_HWKM_A_CLK 117
+#define RPM_SMD_PKA_CLK 118
+#define RPM_SMD_PKA_A_CLK 119
+#define RPM_SMD_CPUSS_GNOC_CLK 120
+#define RPM_SMD_CPUSS_GNOC_A_CLK 121
+#define RPM_SMD_MSS_CFG_AHB_CLK 122
+#define RPM_SMD_MSS_CFG_AHB_A_CLK 123
+#define RPM_SMD_BIMC_FREQ_LOG 124
+#define RPM_SMD_LN_BB_CLK_PIN 125
+#define RPM_SMD_LN_BB_A_CLK_PIN 126
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index 2e6c54e65455..0a7d1be0d124 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -21,5 +21,17 @@
#define RPMH_IPA_CLK 12
#define RPMH_LN_BB_CLK1 13
#define RPMH_LN_BB_CLK1_A 14
+#define RPMH_CE_CLK 15
+#define RPMH_QPIC_CLK 16
+#define RPMH_DIV_CLK1 17
+#define RPMH_DIV_CLK1_A 18
+#define RPMH_RF_CLK4 19
+#define RPMH_RF_CLK4_A 20
+#define RPMH_RF_CLK5 21
+#define RPMH_RF_CLK5_A 22
+#define RPMH_PKA_CLK 23
+#define RPMH_HWKM_CLK 24
+#define RPMH_QLINK_CLK 25
+#define RPMH_QLINK_CLK_A 26
#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-gcc.h b/include/dt-bindings/clock/qcom,sa8775p-gcc.h
new file mode 100644
index 000000000000..01f54234963d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-gcc.h
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SA8775P_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SA8775P_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL4 3
+#define GCC_GPLL5 4
+#define GCC_GPLL7 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_QUPV3_AXI_CLK 7
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 8
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 9
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 10
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 11
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 12
+#define GCC_AHB2PHY0_CLK 13
+#define GCC_AHB2PHY2_CLK 14
+#define GCC_AHB2PHY3_CLK 15
+#define GCC_BOOT_ROM_AHB_CLK 16
+#define GCC_CAMERA_AHB_CLK 17
+#define GCC_CAMERA_HF_AXI_CLK 18
+#define GCC_CAMERA_SF_AXI_CLK 19
+#define GCC_CAMERA_THROTTLE_XO_CLK 20
+#define GCC_CAMERA_XO_CLK 21
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 22
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 24
+#define GCC_DDRSS_GPU_AXI_CLK 25
+#define GCC_DISP1_AHB_CLK 26
+#define GCC_DISP1_HF_AXI_CLK 27
+#define GCC_DISP1_XO_CLK 28
+#define GCC_DISP_AHB_CLK 29
+#define GCC_DISP_HF_AXI_CLK 30
+#define GCC_DISP_XO_CLK 31
+#define GCC_EDP_REF_CLKREF_EN 32
+#define GCC_EMAC0_AXI_CLK 33
+#define GCC_EMAC0_PHY_AUX_CLK 34
+#define GCC_EMAC0_PHY_AUX_CLK_SRC 35
+#define GCC_EMAC0_PTP_CLK 36
+#define GCC_EMAC0_PTP_CLK_SRC 37
+#define GCC_EMAC0_RGMII_CLK 38
+#define GCC_EMAC0_RGMII_CLK_SRC 39
+#define GCC_EMAC0_SLV_AHB_CLK 40
+#define GCC_EMAC1_AXI_CLK 41
+#define GCC_EMAC1_PHY_AUX_CLK 42
+#define GCC_EMAC1_PHY_AUX_CLK_SRC 43
+#define GCC_EMAC1_PTP_CLK 44
+#define GCC_EMAC1_PTP_CLK_SRC 45
+#define GCC_EMAC1_RGMII_CLK 46
+#define GCC_EMAC1_RGMII_CLK_SRC 47
+#define GCC_EMAC1_SLV_AHB_CLK 48
+#define GCC_GP1_CLK 49
+#define GCC_GP1_CLK_SRC 50
+#define GCC_GP2_CLK 51
+#define GCC_GP2_CLK_SRC 52
+#define GCC_GP3_CLK 53
+#define GCC_GP3_CLK_SRC 54
+#define GCC_GP4_CLK 55
+#define GCC_GP4_CLK_SRC 56
+#define GCC_GP5_CLK 57
+#define GCC_GP5_CLK_SRC 58
+#define GCC_GPU_CFG_AHB_CLK 59
+#define GCC_GPU_GPLL0_CLK_SRC 60
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 61
+#define GCC_GPU_MEMNOC_GFX_CLK 62
+#define GCC_GPU_SNOC_DVM_GFX_CLK 63
+#define GCC_GPU_TCU_THROTTLE_AHB_CLK 64
+#define GCC_GPU_TCU_THROTTLE_CLK 65
+#define GCC_PCIE_0_AUX_CLK 66
+#define GCC_PCIE_0_AUX_CLK_SRC 67
+#define GCC_PCIE_0_CFG_AHB_CLK 68
+#define GCC_PCIE_0_MSTR_AXI_CLK 69
+#define GCC_PCIE_0_PHY_AUX_CLK 70
+#define GCC_PCIE_0_PHY_AUX_CLK_SRC 71
+#define GCC_PCIE_0_PHY_RCHNG_CLK 72
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 73
+#define GCC_PCIE_0_PIPE_CLK 74
+#define GCC_PCIE_0_PIPE_CLK_SRC 75
+#define GCC_PCIE_0_PIPE_DIV_CLK_SRC 76
+#define GCC_PCIE_0_PIPEDIV2_CLK 77
+#define GCC_PCIE_0_SLV_AXI_CLK 78
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 79
+#define GCC_PCIE_1_AUX_CLK 80
+#define GCC_PCIE_1_AUX_CLK_SRC 81
+#define GCC_PCIE_1_CFG_AHB_CLK 82
+#define GCC_PCIE_1_MSTR_AXI_CLK 83
+#define GCC_PCIE_1_PHY_AUX_CLK 84
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 85
+#define GCC_PCIE_1_PHY_RCHNG_CLK 86
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 87
+#define GCC_PCIE_1_PIPE_CLK 88
+#define GCC_PCIE_1_PIPE_CLK_SRC 89
+#define GCC_PCIE_1_PIPE_DIV_CLK_SRC 90
+#define GCC_PCIE_1_PIPEDIV2_CLK 91
+#define GCC_PCIE_1_SLV_AXI_CLK 92
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 93
+#define GCC_PCIE_CLKREF_EN 94
+#define GCC_PCIE_THROTTLE_CFG_CLK 95
+#define GCC_PDM2_CLK 96
+#define GCC_PDM2_CLK_SRC 97
+#define GCC_PDM_AHB_CLK 98
+#define GCC_PDM_XO4_CLK 99
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 100
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 101
+#define GCC_QMIP_DISP1_AHB_CLK 102
+#define GCC_QMIP_DISP1_ROT_AHB_CLK 103
+#define GCC_QMIP_DISP_AHB_CLK 104
+#define GCC_QMIP_DISP_ROT_AHB_CLK 105
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 106
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 107
+#define GCC_QMIP_VIDEO_VCPU_AHB_CLK 108
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 109
+#define GCC_QUPV3_WRAP0_CORE_CLK 110
+#define GCC_QUPV3_WRAP0_S0_CLK 111
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 112
+#define GCC_QUPV3_WRAP0_S1_CLK 113
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 114
+#define GCC_QUPV3_WRAP0_S2_CLK 115
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 116
+#define GCC_QUPV3_WRAP0_S3_CLK 117
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 118
+#define GCC_QUPV3_WRAP0_S4_CLK 119
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 120
+#define GCC_QUPV3_WRAP0_S5_CLK 121
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 122
+#define GCC_QUPV3_WRAP0_S6_CLK 123
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 124
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 125
+#define GCC_QUPV3_WRAP1_CORE_CLK 126
+#define GCC_QUPV3_WRAP1_S0_CLK 127
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 128
+#define GCC_QUPV3_WRAP1_S1_CLK 129
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 130
+#define GCC_QUPV3_WRAP1_S2_CLK 131
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 132
+#define GCC_QUPV3_WRAP1_S3_CLK 133
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 134
+#define GCC_QUPV3_WRAP1_S4_CLK 135
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 136
+#define GCC_QUPV3_WRAP1_S5_CLK 137
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 138
+#define GCC_QUPV3_WRAP1_S6_CLK 139
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 140
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 141
+#define GCC_QUPV3_WRAP2_CORE_CLK 142
+#define GCC_QUPV3_WRAP2_S0_CLK 143
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 144
+#define GCC_QUPV3_WRAP2_S1_CLK 145
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 146
+#define GCC_QUPV3_WRAP2_S2_CLK 147
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 148
+#define GCC_QUPV3_WRAP2_S3_CLK 149
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 150
+#define GCC_QUPV3_WRAP2_S4_CLK 151
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 152
+#define GCC_QUPV3_WRAP2_S5_CLK 153
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 154
+#define GCC_QUPV3_WRAP2_S6_CLK 155
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 156
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 157
+#define GCC_QUPV3_WRAP3_CORE_CLK 158
+#define GCC_QUPV3_WRAP3_QSPI_CLK 159
+#define GCC_QUPV3_WRAP3_S0_CLK 160
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 161
+#define GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC 162
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 163
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 164
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 165
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 166
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 167
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 168
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 169
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 170
+#define GCC_SDCC1_AHB_CLK 171
+#define GCC_SDCC1_APPS_CLK 172
+#define GCC_SDCC1_APPS_CLK_SRC 173
+#define GCC_SDCC1_ICE_CORE_CLK 174
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 175
+#define GCC_SGMI_CLKREF_EN 176
+#define GCC_TSCSS_AHB_CLK 177
+#define GCC_TSCSS_CNTR_CLK_SRC 178
+#define GCC_TSCSS_ETU_CLK 179
+#define GCC_TSCSS_GLOBAL_CNTR_CLK 180
+#define GCC_UFS_CARD_AHB_CLK 181
+#define GCC_UFS_CARD_AXI_CLK 182
+#define GCC_UFS_CARD_AXI_CLK_SRC 183
+#define GCC_UFS_CARD_ICE_CORE_CLK 184
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 185
+#define GCC_UFS_CARD_PHY_AUX_CLK 186
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 187
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 188
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 189
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 190
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 191
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 192
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 193
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 194
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 195
+#define GCC_UFS_PHY_AHB_CLK 196
+#define GCC_UFS_PHY_AXI_CLK 197
+#define GCC_UFS_PHY_AXI_CLK_SRC 198
+#define GCC_UFS_PHY_ICE_CORE_CLK 199
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 200
+#define GCC_UFS_PHY_PHY_AUX_CLK 201
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 202
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 203
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 204
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 205
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 206
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 207
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 208
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 209
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 210
+#define GCC_USB20_MASTER_CLK 211
+#define GCC_USB20_MASTER_CLK_SRC 212
+#define GCC_USB20_MOCK_UTMI_CLK 213
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 214
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 215
+#define GCC_USB20_SLEEP_CLK 216
+#define GCC_USB30_PRIM_MASTER_CLK 217
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 218
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 219
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 220
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 221
+#define GCC_USB30_PRIM_SLEEP_CLK 222
+#define GCC_USB30_SEC_MASTER_CLK 223
+#define GCC_USB30_SEC_MASTER_CLK_SRC 224
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 225
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 226
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 227
+#define GCC_USB30_SEC_SLEEP_CLK 228
+#define GCC_USB3_PRIM_PHY_AUX_CLK 229
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 230
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 231
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 232
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 233
+#define GCC_USB3_SEC_PHY_AUX_CLK 234
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 235
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 236
+#define GCC_USB3_SEC_PHY_PIPE_CLK 237
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 238
+#define GCC_USB_CLKREF_EN 239
+#define GCC_VIDEO_AHB_CLK 240
+#define GCC_VIDEO_AXI0_CLK 241
+#define GCC_VIDEO_AXI1_CLK 242
+#define GCC_VIDEO_XO_CLK 243
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 244
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 245
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 246
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 247
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 248
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY1_BCR 1
+#define GCC_DISPLAY_BCR 2
+#define GCC_EMAC0_BCR 3
+#define GCC_EMAC1_BCR 4
+#define GCC_GPU_BCR 5
+#define GCC_MMSS_BCR 6
+#define GCC_PCIE_0_BCR 7
+#define GCC_PCIE_0_LINK_DOWN_BCR 8
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 9
+#define GCC_PCIE_0_PHY_BCR 10
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_BCR 12
+#define GCC_PCIE_1_LINK_DOWN_BCR 13
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 14
+#define GCC_PCIE_1_PHY_BCR 15
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 16
+#define GCC_PDM_BCR 17
+#define GCC_QUPV3_WRAPPER_0_BCR 18
+#define GCC_QUPV3_WRAPPER_1_BCR 19
+#define GCC_QUPV3_WRAPPER_2_BCR 20
+#define GCC_QUPV3_WRAPPER_3_BCR 21
+#define GCC_SDCC1_BCR 22
+#define GCC_TSCSS_BCR 23
+#define GCC_UFS_CARD_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB20_PRIM_BCR 26
+#define GCC_USB2_PHY_PRIM_BCR 27
+#define GCC_USB2_PHY_SEC_BCR 28
+#define GCC_USB30_PRIM_BCR 29
+#define GCC_USB30_SEC_BCR 30
+#define GCC_USB3_DP_PHY_PRIM_BCR 31
+#define GCC_USB3_DP_PHY_SEC_BCR 32
+#define GCC_USB3_PHY_PRIM_BCR 33
+#define GCC_USB3_PHY_SEC_BCR 34
+#define GCC_USB3_PHY_TERT_BCR 35
+#define GCC_USB3_UNIPHY_MP0_BCR 36
+#define GCC_USB3_UNIPHY_MP1_BCR 37
+#define GCC_USB3PHY_PHY_PRIM_BCR 38
+#define GCC_USB3PHY_PHY_SEC_BCR 39
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 40
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 41
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 42
+#define GCC_VIDEO_BCR 43
+#define GCC_VIDEO_AXI0_CLK_ARES 44
+#define GCC_VIDEO_AXI1_CLK_ARES 45
+
+/* GCC GDSCs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB20_PRIM_GDSC 4
+#define USB30_PRIM_GDSC 5
+#define USB30_SEC_GDSC 6
+#define EMAC0_GDSC 7
+#define EMAC1_GDSC 8
+
+#endif /* _DT_BINDINGS_CLK_QCOM_GCC_SA8775P_H */
diff --git a/include/dt-bindings/clock/qcom,sa8775p-gpucc.h b/include/dt-bindings/clock/qcom,sa8775p-gpucc.h
new file mode 100644
index 000000000000..a5fd784b1ea2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-gpucc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPUCC_SA8775P_H
+#define _DT_BINDINGS_CLK_QCOM_GPUCC_SA8775P_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_FF_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CX_SNOC_DVM_CLK 7
+#define GPU_CC_CXO_AON_CLK 8
+#define GPU_CC_CXO_CLK 9
+#define GPU_CC_DEMET_CLK 10
+#define GPU_CC_DEMET_DIV_CLK_SRC 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_GMU_CLK_SRC 13
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 14
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 15
+#define GPU_CC_HUB_AON_CLK 16
+#define GPU_CC_HUB_CLK_SRC 17
+#define GPU_CC_HUB_CX_INT_CLK 18
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 19
+#define GPU_CC_MEMNOC_GFX_CLK 20
+#define GPU_CC_SLEEP_CLK 21
+#define GPU_CC_XO_CLK_SRC 22
+
+/* GPU_CC resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CB_BCR 1
+#define GPUCC_GPU_CC_CX_BCR 2
+#define GPUCC_GPU_CC_FAST_HUB_BCR 3
+#define GPUCC_GPU_CC_FF_BCR 4
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 5
+#define GPUCC_GPU_CC_GMU_BCR 6
+#define GPUCC_GPU_CC_GX_BCR 7
+#define GPUCC_GPU_CC_XO_BCR 8
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+#endif /* _DT_BINDINGS_CLK_QCOM_GPUCC_SA8775P_H */
diff --git a/include/dt-bindings/clock/qcom,sc8280xp-camcc.h b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h
new file mode 100644
index 000000000000..ea5ec73c8c6a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__
+#define __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__
+
+/* CAMCC clocks */
+#define CAMCC_PLL0 0
+#define CAMCC_PLL0_OUT_EVEN 1
+#define CAMCC_PLL0_OUT_ODD 2
+#define CAMCC_PLL1 3
+#define CAMCC_PLL1_OUT_EVEN 4
+#define CAMCC_PLL2 5
+#define CAMCC_PLL3 6
+#define CAMCC_PLL3_OUT_EVEN 7
+#define CAMCC_PLL4 8
+#define CAMCC_PLL4_OUT_EVEN 9
+#define CAMCC_PLL5 10
+#define CAMCC_PLL5_OUT_EVEN 11
+#define CAMCC_PLL6 12
+#define CAMCC_PLL6_OUT_EVEN 13
+#define CAMCC_PLL7 14
+#define CAMCC_PLL7_OUT_EVEN 15
+#define CAMCC_PLL7_OUT_ODD 16
+#define CAMCC_BPS_AHB_CLK 17
+#define CAMCC_BPS_AREG_CLK 18
+#define CAMCC_BPS_AXI_CLK 19
+#define CAMCC_BPS_CLK 20
+#define CAMCC_BPS_CLK_SRC 21
+#define CAMCC_CAMNOC_AXI_CLK 22
+#define CAMCC_CAMNOC_AXI_CLK_SRC 23
+#define CAMCC_CAMNOC_DCD_XO_CLK 24
+#define CAMCC_CCI_0_CLK 25
+#define CAMCC_CCI_0_CLK_SRC 26
+#define CAMCC_CCI_1_CLK 27
+#define CAMCC_CCI_1_CLK_SRC 28
+#define CAMCC_CCI_2_CLK 29
+#define CAMCC_CCI_2_CLK_SRC 30
+#define CAMCC_CCI_3_CLK 31
+#define CAMCC_CCI_3_CLK_SRC 32
+#define CAMCC_CORE_AHB_CLK 33
+#define CAMCC_CPAS_AHB_CLK 34
+#define CAMCC_CPHY_RX_CLK_SRC 35
+#define CAMCC_CSI0PHYTIMER_CLK 36
+#define CAMCC_CSI0PHYTIMER_CLK_SRC 37
+#define CAMCC_CSI1PHYTIMER_CLK 38
+#define CAMCC_CSI1PHYTIMER_CLK_SRC 39
+#define CAMCC_CSI2PHYTIMER_CLK 40
+#define CAMCC_CSI2PHYTIMER_CLK_SRC 41
+#define CAMCC_CSI3PHYTIMER_CLK 42
+#define CAMCC_CSI3PHYTIMER_CLK_SRC 43
+#define CAMCC_CSIPHY0_CLK 44
+#define CAMCC_CSIPHY1_CLK 45
+#define CAMCC_CSIPHY2_CLK 46
+#define CAMCC_CSIPHY3_CLK 47
+#define CAMCC_FAST_AHB_CLK_SRC 48
+#define CAMCC_GDSC_CLK 49
+#define CAMCC_ICP_AHB_CLK 50
+#define CAMCC_ICP_CLK 51
+#define CAMCC_ICP_CLK_SRC 52
+#define CAMCC_IFE_0_AXI_CLK 53
+#define CAMCC_IFE_0_CLK 54
+#define CAMCC_IFE_0_CLK_SRC 55
+#define CAMCC_IFE_0_CPHY_RX_CLK 56
+#define CAMCC_IFE_0_CSID_CLK 57
+#define CAMCC_IFE_0_CSID_CLK_SRC 58
+#define CAMCC_IFE_0_DSP_CLK 59
+#define CAMCC_IFE_1_AXI_CLK 60
+#define CAMCC_IFE_1_CLK 61
+#define CAMCC_IFE_1_CLK_SRC 62
+#define CAMCC_IFE_1_CPHY_RX_CLK 63
+#define CAMCC_IFE_1_CSID_CLK 64
+#define CAMCC_IFE_1_CSID_CLK_SRC 65
+#define CAMCC_IFE_1_DSP_CLK 66
+#define CAMCC_IFE_2_AXI_CLK 67
+#define CAMCC_IFE_2_CLK 68
+#define CAMCC_IFE_2_CLK_SRC 69
+#define CAMCC_IFE_2_CPHY_RX_CLK 70
+#define CAMCC_IFE_2_CSID_CLK 71
+#define CAMCC_IFE_2_CSID_CLK_SRC 72
+#define CAMCC_IFE_2_DSP_CLK 73
+#define CAMCC_IFE_3_AXI_CLK 74
+#define CAMCC_IFE_3_CLK 75
+#define CAMCC_IFE_3_CLK_SRC 76
+#define CAMCC_IFE_3_CPHY_RX_CLK 77
+#define CAMCC_IFE_3_CSID_CLK 78
+#define CAMCC_IFE_3_CSID_CLK_SRC 79
+#define CAMCC_IFE_3_DSP_CLK 80
+#define CAMCC_IFE_LITE_0_CLK 81
+#define CAMCC_IFE_LITE_0_CLK_SRC 82
+#define CAMCC_IFE_LITE_0_CPHY_RX_CLK 83
+#define CAMCC_IFE_LITE_0_CSID_CLK 84
+#define CAMCC_IFE_LITE_0_CSID_CLK_SRC 85
+#define CAMCC_IFE_LITE_1_CLK 86
+#define CAMCC_IFE_LITE_1_CLK_SRC 87
+#define CAMCC_IFE_LITE_1_CPHY_RX_CLK 88
+#define CAMCC_IFE_LITE_1_CSID_CLK 89
+#define CAMCC_IFE_LITE_1_CSID_CLK_SRC 90
+#define CAMCC_IFE_LITE_2_CLK 91
+#define CAMCC_IFE_LITE_2_CLK_SRC 92
+#define CAMCC_IFE_LITE_2_CPHY_RX_CLK 93
+#define CAMCC_IFE_LITE_2_CSID_CLK 94
+#define CAMCC_IFE_LITE_2_CSID_CLK_SRC 95
+#define CAMCC_IFE_LITE_3_CLK 96
+#define CAMCC_IFE_LITE_3_CLK_SRC 97
+#define CAMCC_IFE_LITE_3_CPHY_RX_CLK 98
+#define CAMCC_IFE_LITE_3_CSID_CLK 99
+#define CAMCC_IFE_LITE_3_CSID_CLK_SRC 100
+#define CAMCC_IPE_0_AHB_CLK 101
+#define CAMCC_IPE_0_AREG_CLK 102
+#define CAMCC_IPE_0_AXI_CLK 103
+#define CAMCC_IPE_0_CLK 104
+#define CAMCC_IPE_0_CLK_SRC 105
+#define CAMCC_IPE_1_AHB_CLK 106
+#define CAMCC_IPE_1_AREG_CLK 107
+#define CAMCC_IPE_1_AXI_CLK 108
+#define CAMCC_IPE_1_CLK 109
+#define CAMCC_JPEG_CLK 110
+#define CAMCC_JPEG_CLK_SRC 111
+#define CAMCC_LRME_CLK 112
+#define CAMCC_LRME_CLK_SRC 113
+#define CAMCC_MCLK0_CLK 114
+#define CAMCC_MCLK0_CLK_SRC 115
+#define CAMCC_MCLK1_CLK 116
+#define CAMCC_MCLK1_CLK_SRC 117
+#define CAMCC_MCLK2_CLK 118
+#define CAMCC_MCLK2_CLK_SRC 119
+#define CAMCC_MCLK3_CLK 120
+#define CAMCC_MCLK3_CLK_SRC 121
+#define CAMCC_MCLK4_CLK 122
+#define CAMCC_MCLK4_CLK_SRC 123
+#define CAMCC_MCLK5_CLK 124
+#define CAMCC_MCLK5_CLK_SRC 125
+#define CAMCC_MCLK6_CLK 126
+#define CAMCC_MCLK6_CLK_SRC 127
+#define CAMCC_MCLK7_CLK 128
+#define CAMCC_MCLK7_CLK_SRC 129
+#define CAMCC_SLEEP_CLK 130
+#define CAMCC_SLEEP_CLK_SRC 131
+#define CAMCC_SLOW_AHB_CLK_SRC 132
+#define CAMCC_XO_CLK_SRC 133
+
+/* CAMCC resets */
+#define CAMCC_BPS_BCR 0
+#define CAMCC_CAMNOC_BCR 1
+#define CAMCC_CCI_BCR 2
+#define CAMCC_CPAS_BCR 3
+#define CAMCC_CSI0PHY_BCR 4
+#define CAMCC_CSI1PHY_BCR 5
+#define CAMCC_CSI2PHY_BCR 6
+#define CAMCC_CSI3PHY_BCR 7
+#define CAMCC_ICP_BCR 8
+#define CAMCC_IFE_0_BCR 9
+#define CAMCC_IFE_1_BCR 10
+#define CAMCC_IFE_2_BCR 11
+#define CAMCC_IFE_3_BCR 12
+#define CAMCC_IFE_LITE_0_BCR 13
+#define CAMCC_IFE_LITE_1_BCR 14
+#define CAMCC_IFE_LITE_2_BCR 15
+#define CAMCC_IFE_LITE_3_BCR 16
+#define CAMCC_IPE_0_BCR 17
+#define CAMCC_IPE_1_BCR 18
+#define CAMCC_JPEG_BCR 19
+#define CAMCC_LRME_BCR 20
+
+/* CAMCC GDSCRs */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IFE_2_GDSC 3
+#define IFE_3_GDSC 4
+#define IPE_0_GDSC 5
+#define IPE_1_GDSC 6
+#define TITAN_TOP_GDSC 7
+
+#endif /* __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__ */
diff --git a/include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h b/include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h
new file mode 100644
index 000000000000..d190d57fc81a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASSCC_SC8280XP_H
+#define _DT_BINDINGS_CLK_QCOM_LPASSCC_SC8280XP_H
+
+/* LPASS AUDIO CC CSR */
+#define LPASS_AUDIO_SWR_RX_CGCR 0
+#define LPASS_AUDIO_SWR_WSA_CGCR 1
+#define LPASS_AUDIO_SWR_WSA2_CGCR 2
+
+/* LPASS TCSR */
+#define LPASS_AUDIO_SWR_TX_CGCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sdx75-gcc.h b/include/dt-bindings/clock/qcom,sdx75-gcc.h
new file mode 100644
index 000000000000..a470e8c4fd41
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sdx75-gcc.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDX75_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SDX75_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL4 2
+#define GPLL5 3
+#define GPLL6 4
+#define GPLL8 5
+#define GCC_AHB_PCIE_LINK_CLK 6
+#define GCC_BOOT_ROM_AHB_CLK 7
+#define GCC_EEE_EMAC0_CLK 8
+#define GCC_EEE_EMAC0_CLK_SRC 9
+#define GCC_EEE_EMAC1_CLK 10
+#define GCC_EEE_EMAC1_CLK_SRC 11
+#define GCC_EMAC0_AXI_CLK 12
+#define GCC_EMAC0_CC_SGMIIPHY_RX_CLK 13
+#define GCC_EMAC0_CC_SGMIIPHY_RX_CLK_SRC 14
+#define GCC_EMAC0_CC_SGMIIPHY_TX_CLK 15
+#define GCC_EMAC0_CC_SGMIIPHY_TX_CLK_SRC 16
+#define GCC_EMAC0_PHY_AUX_CLK 17
+#define GCC_EMAC0_PHY_AUX_CLK_SRC 18
+#define GCC_EMAC0_PTP_CLK 19
+#define GCC_EMAC0_PTP_CLK_SRC 20
+#define GCC_EMAC0_RGMII_CLK 21
+#define GCC_EMAC0_RGMII_CLK_SRC 22
+#define GCC_EMAC0_RPCS_RX_CLK 23
+#define GCC_EMAC0_RPCS_TX_CLK 24
+#define GCC_EMAC0_SGMIIPHY_MAC_RCLK_SRC 25
+#define GCC_EMAC0_SGMIIPHY_MAC_TCLK_SRC 26
+#define GCC_EMAC0_SLV_AHB_CLK 27
+#define GCC_EMAC0_XGXS_RX_CLK 28
+#define GCC_EMAC0_XGXS_TX_CLK 29
+#define GCC_EMAC1_AXI_CLK 30
+#define GCC_EMAC1_CC_SGMIIPHY_RX_CLK 31
+#define GCC_EMAC1_CC_SGMIIPHY_RX_CLK_SRC 32
+#define GCC_EMAC1_CC_SGMIIPHY_TX_CLK 33
+#define GCC_EMAC1_CC_SGMIIPHY_TX_CLK_SRC 34
+#define GCC_EMAC1_PHY_AUX_CLK 35
+#define GCC_EMAC1_PHY_AUX_CLK_SRC 36
+#define GCC_EMAC1_PTP_CLK 37
+#define GCC_EMAC1_PTP_CLK_SRC 38
+#define GCC_EMAC1_RGMII_CLK 39
+#define GCC_EMAC1_RGMII_CLK_SRC 40
+#define GCC_EMAC1_RPCS_RX_CLK 41
+#define GCC_EMAC1_RPCS_TX_CLK 42
+#define GCC_EMAC1_SGMIIPHY_MAC_RCLK_SRC 43
+#define GCC_EMAC1_SGMIIPHY_MAC_TCLK_SRC 44
+#define GCC_EMAC1_SLV_AHB_CLK 45
+#define GCC_EMAC1_XGXS_RX_CLK 46
+#define GCC_EMAC1_XGXS_TX_CLK 47
+#define GCC_EMAC_0_CLKREF_EN 48
+#define GCC_EMAC_1_CLKREF_EN 49
+#define GCC_GP1_CLK 50
+#define GCC_GP1_CLK_SRC 51
+#define GCC_GP2_CLK 52
+#define GCC_GP2_CLK_SRC 53
+#define GCC_GP3_CLK 54
+#define GCC_GP3_CLK_SRC 55
+#define GCC_PCIE_0_CLKREF_EN 56
+#define GCC_PCIE_1_AUX_CLK 57
+#define GCC_PCIE_1_AUX_PHY_CLK_SRC 58
+#define GCC_PCIE_1_CFG_AHB_CLK 59
+#define GCC_PCIE_1_CLKREF_EN 60
+#define GCC_PCIE_1_MSTR_AXI_CLK 61
+#define GCC_PCIE_1_PHY_RCHNG_CLK 62
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 63
+#define GCC_PCIE_1_PIPE_CLK 64
+#define GCC_PCIE_1_PIPE_CLK_SRC 65
+#define GCC_PCIE_1_PIPE_DIV2_CLK 66
+#define GCC_PCIE_1_PIPE_DIV2_CLK_SRC 67
+#define GCC_PCIE_1_SLV_AXI_CLK 68
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 69
+#define GCC_PCIE_2_AUX_CLK 70
+#define GCC_PCIE_2_AUX_PHY_CLK_SRC 71
+#define GCC_PCIE_2_CFG_AHB_CLK 72
+#define GCC_PCIE_2_CLKREF_EN 73
+#define GCC_PCIE_2_MSTR_AXI_CLK 74
+#define GCC_PCIE_2_PHY_RCHNG_CLK 75
+#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 76
+#define GCC_PCIE_2_PIPE_CLK 77
+#define GCC_PCIE_2_PIPE_CLK_SRC 78
+#define GCC_PCIE_2_PIPE_DIV2_CLK 79
+#define GCC_PCIE_2_PIPE_DIV2_CLK_SRC 80
+#define GCC_PCIE_2_SLV_AXI_CLK 81
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 82
+#define GCC_PCIE_AUX_CLK 83
+#define GCC_PCIE_AUX_CLK_SRC 84
+#define GCC_PCIE_AUX_PHY_CLK_SRC 85
+#define GCC_PCIE_CFG_AHB_CLK 86
+#define GCC_PCIE_MSTR_AXI_CLK 87
+#define GCC_PCIE_PIPE_CLK 88
+#define GCC_PCIE_PIPE_CLK_SRC 89
+#define GCC_PCIE_RCHNG_PHY_CLK 90
+#define GCC_PCIE_RCHNG_PHY_CLK_SRC 91
+#define GCC_PCIE_SLEEP_CLK 92
+#define GCC_PCIE_SLV_AXI_CLK 93
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 94
+#define GCC_PDM2_CLK 95
+#define GCC_PDM2_CLK_SRC 96
+#define GCC_PDM_AHB_CLK 97
+#define GCC_PDM_XO4_CLK 98
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 99
+#define GCC_QUPV3_WRAP0_CORE_CLK 100
+#define GCC_QUPV3_WRAP0_S0_CLK 101
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 102
+#define GCC_QUPV3_WRAP0_S1_CLK 103
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 104
+#define GCC_QUPV3_WRAP0_S2_CLK 105
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 106
+#define GCC_QUPV3_WRAP0_S3_CLK 107
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 108
+#define GCC_QUPV3_WRAP0_S4_CLK 109
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 110
+#define GCC_QUPV3_WRAP0_S5_CLK 111
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 112
+#define GCC_QUPV3_WRAP0_S6_CLK 113
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 114
+#define GCC_QUPV3_WRAP0_S7_CLK 115
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 116
+#define GCC_QUPV3_WRAP0_S8_CLK 117
+#define GCC_QUPV3_WRAP0_S8_CLK_SRC 118
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 119
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 120
+#define GCC_SDCC1_AHB_CLK 121
+#define GCC_SDCC1_APPS_CLK 122
+#define GCC_SDCC1_APPS_CLK_SRC 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_SDCC2_APPS_CLK_SRC 126
+#define GCC_USB2_CLKREF_EN 127
+#define GCC_USB30_MASTER_CLK 128
+#define GCC_USB30_MASTER_CLK_SRC 129
+#define GCC_USB30_MOCK_UTMI_CLK 130
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_MOCK_UTMI_POSTDIV_CLK_SRC 132
+#define GCC_USB30_MSTR_AXI_CLK 133
+#define GCC_USB30_SLEEP_CLK 134
+#define GCC_USB30_SLV_AHB_CLK 135
+#define GCC_USB3_PHY_AUX_CLK 136
+#define GCC_USB3_PHY_AUX_CLK_SRC 137
+#define GCC_USB3_PHY_PIPE_CLK 138
+#define GCC_USB3_PHY_PIPE_CLK_SRC 139
+#define GCC_USB3_PRIM_CLKREF_EN 140
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 141
+#define GCC_XO_PCIE_LINK_CLK 142
+
+/* GCC power domains */
+#define GCC_EMAC0_GDSC 0
+#define GCC_EMAC1_GDSC 1
+#define GCC_PCIE_1_GDSC 2
+#define GCC_PCIE_1_PHY_GDSC 3
+#define GCC_PCIE_2_GDSC 4
+#define GCC_PCIE_2_PHY_GDSC 5
+#define GCC_PCIE_GDSC 6
+#define GCC_PCIE_PHY_GDSC 7
+#define GCC_USB30_GDSC 8
+#define GCC_USB3_PHY_GDSC 9
+
+/* GCC resets */
+#define GCC_EMAC0_BCR 0
+#define GCC_EMAC1_BCR 1
+#define GCC_EMMC_BCR 2
+#define GCC_PCIE_1_BCR 3
+#define GCC_PCIE_1_LINK_DOWN_BCR 4
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_1_PHY_BCR 6
+#define GCC_PCIE_2_BCR 7
+#define GCC_PCIE_2_LINK_DOWN_BCR 8
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 9
+#define GCC_PCIE_2_PHY_BCR 10
+#define GCC_PCIE_BCR 11
+#define GCC_PCIE_LINK_DOWN_BCR 12
+#define GCC_PCIE_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_PHY_BCR 14
+#define GCC_PCIE_PHY_CFG_AHB_BCR 15
+#define GCC_PCIE_PHY_COM_BCR 16
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 17
+#define GCC_QUSB2PHY_BCR 18
+#define GCC_TCSR_PCIE_BCR 19
+#define GCC_USB30_BCR 20
+#define GCC_USB3_PHY_BCR 21
+#define GCC_USB3PHY_PHY_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_EMAC0_RGMII_CLK_ARES 24
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-gcc.h b/include/dt-bindings/clock/qcom,sm4450-gcc.h
new file mode 100644
index 000000000000..c18e47a86f40
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-gcc.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_BOOT_ROM_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_SLEEP_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11
+#define GCC_DDRSS_GPU_AXI_CLK 12
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 13
+#define GCC_DISP_AHB_CLK 14
+#define GCC_DISP_HF_AXI_CLK 15
+#define GCC_DISP_XO_CLK 16
+#define GCC_EUSB3_0_CLKREF_EN 17
+#define GCC_GP1_CLK 18
+#define GCC_GP1_CLK_SRC 19
+#define GCC_GP2_CLK 20
+#define GCC_GP2_CLK_SRC 21
+#define GCC_GP3_CLK 22
+#define GCC_GP3_CLK_SRC 23
+#define GCC_GPLL0 24
+#define GCC_GPLL0_OUT_EVEN 25
+#define GCC_GPLL0_OUT_ODD 26
+#define GCC_GPLL1 27
+#define GCC_GPLL3 28
+#define GCC_GPLL4 29
+#define GCC_GPLL9 30
+#define GCC_GPLL10 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GPLL0_CLK_SRC 33
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
+#define GCC_GPU_MEMNOC_GFX_CLK 35
+#define GCC_GPU_SNOC_DVM_GFX_CLK 36
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_CLK 37
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_CLK 38
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_CLK 39
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_CLK 40
+#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF0_CLK 41
+#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF1_CLK 42
+#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_SF0_CLK 43
+#define GCC_HLOS1_VOTE_MMU_TCU_CLK 44
+#define GCC_PCIE_0_AUX_CLK 45
+#define GCC_PCIE_0_AUX_CLK_SRC 46
+#define GCC_PCIE_0_CFG_AHB_CLK 47
+#define GCC_PCIE_0_CLKREF_EN 48
+#define GCC_PCIE_0_MSTR_AXI_CLK 49
+#define GCC_PCIE_0_PHY_RCHNG_CLK 50
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 51
+#define GCC_PCIE_0_PIPE_CLK 52
+#define GCC_PCIE_0_PIPE_CLK_SRC 53
+#define GCC_PCIE_0_PIPE_DIV2_CLK 54
+#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 55
+#define GCC_PCIE_0_SLV_AXI_CLK 56
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM2_CLK_SRC 59
+#define GCC_PDM_AHB_CLK 60
+#define GCC_PDM_XO4_CLK 61
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 63
+#define GCC_QMIP_DISP_AHB_CLK 64
+#define GCC_QMIP_GPU_AHB_CLK 65
+#define GCC_QMIP_PCIE_AHB_CLK 66
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 68
+#define GCC_QUPV3_WRAP0_CORE_CLK 69
+#define GCC_QUPV3_WRAP0_S0_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S1_CLK 72
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S2_CLK 74
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S3_CLK 76
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S4_CLK 78
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 80
+#define GCC_QUPV3_WRAP1_CORE_CLK 81
+#define GCC_QUPV3_WRAP1_S0_CLK 82
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S1_CLK 84
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S2_CLK 86
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S3_CLK 88
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S4_CLK 90
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 91
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 92
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 93
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 94
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 95
+#define GCC_SDCC1_AHB_CLK 96
+#define GCC_SDCC1_APPS_CLK 97
+#define GCC_SDCC1_APPS_CLK_SRC 98
+#define GCC_SDCC1_ICE_CORE_CLK 99
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 100
+#define GCC_SDCC2_AHB_CLK 101
+#define GCC_SDCC2_APPS_CLK 102
+#define GCC_SDCC2_APPS_CLK_SRC 103
+#define GCC_UFS_0_CLKREF_EN 104
+#define GCC_UFS_PAD_CLKREF_EN 105
+#define GCC_UFS_PHY_AHB_CLK 106
+#define GCC_UFS_PHY_AXI_CLK 107
+#define GCC_UFS_PHY_AXI_CLK_SRC 108
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 109
+#define GCC_UFS_PHY_ICE_CORE_CLK 110
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 111
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 112
+#define GCC_UFS_PHY_PHY_AUX_CLK 113
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 114
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 115
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 116
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 117
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 118
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 119
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 120
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 121
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 122
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 123
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 124
+#define GCC_USB30_PRIM_MASTER_CLK 125
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 126
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 127
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 128
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 129
+#define GCC_USB30_PRIM_SLEEP_CLK 130
+#define GCC_USB3_0_CLKREF_EN 131
+#define GCC_USB3_PRIM_PHY_AUX_CLK 132
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 133
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 134
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 135
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 136
+#define GCC_VCODEC0_AXI_CLK 137
+#define GCC_VENUS_CTL_AXI_CLK 138
+#define GCC_VIDEO_AHB_CLK 139
+#define GCC_VIDEO_THROTTLE_CORE_CLK 140
+#define GCC_VIDEO_VCODEC0_SYS_CLK 141
+#define GCC_VIDEO_VENUS_CLK_SRC 142
+#define GCC_VIDEO_VENUS_CTL_CLK 143
+#define GCC_VIDEO_XO_CLK 144
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_UFS_PHY_GDSC 1
+#define GCC_USB30_PRIM_GDSC 2
+#define GCC_VCODEC0_GDSC 3
+#define GCC_VENUS_GDSC 4
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_PHY_BCR 8
+#define GCC_PCIE_PHY_CFG_AHB_BCR 9
+#define GCC_PCIE_PHY_COM_BCR 10
+#define GCC_PDM_BCR 11
+#define GCC_QUPV3_WRAPPER_0_BCR 12
+#define GCC_QUPV3_WRAPPER_1_BCR 13
+#define GCC_QUSB2PHY_PRIM_BCR 14
+#define GCC_QUSB2PHY_SEC_BCR 15
+#define GCC_SDCC1_BCR 16
+#define GCC_SDCC2_BCR 17
+#define GCC_UFS_PHY_BCR 18
+#define GCC_USB30_PRIM_BCR 19
+#define GCC_USB3_DP_PHY_PRIM_BCR 20
+#define GCC_USB3_DP_PHY_SEC_BCR 21
+#define GCC_USB3_PHY_PRIM_BCR 22
+#define GCC_USB3_PHY_SEC_BCR 23
+#define GCC_USB3PHY_PHY_PRIM_BCR 24
+#define GCC_USB3PHY_PHY_SEC_BCR 25
+#define GCC_VCODEC0_BCR 26
+#define GCC_VENUS_BCR 27
+#define GCC_VIDEO_BCR 28
+#define GCC_VIDEO_VENUS_BCR 29
+#define GCC_VENUS_CTL_AXI_CLK_ARES 30
+#define GCC_VIDEO_VENUS_CTL_CLK_ARES 31
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6115-dispcc.h b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
new file mode 100644
index 000000000000..d1a6c45b5029
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_PLL0_OUT_MAIN 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK 8
+#define DISP_CC_MDSS_ESC0_CLK_SRC 9
+#define DISP_CC_MDSS_MDP_CLK 10
+#define DISP_CC_MDSS_MDP_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_LUT_CLK 12
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 13
+#define DISP_CC_MDSS_PCLK0_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 15
+#define DISP_CC_MDSS_ROT_CLK 16
+#define DISP_CC_MDSS_ROT_CLK_SRC 17
+#define DISP_CC_MDSS_VSYNC_CLK 18
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 19
+#define DISP_CC_SLEEP_CLK 20
+#define DISP_CC_SLEEP_CLK_SRC 21
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6115-gpucc.h b/include/dt-bindings/clock/qcom,sm6115-gpucc.h
new file mode 100644
index 000000000000..945f21a7d745
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6115-gpucc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6115_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL0_OUT_AUX2 1
+#define GPU_CC_PLL1 2
+#define GPU_CC_PLL1_OUT_AUX 3
+#define GPU_CC_AHB_CLK 4
+#define GPU_CC_CRC_AHB_CLK 5
+#define GPU_CC_CX_GFX3D_CLK 6
+#define GPU_CC_CX_GMU_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_CXO_CLK 12
+#define GPU_CC_GX_GFX3D_CLK 13
+#define GPU_CC_GX_GFX3D_CLK_SRC 14
+#define GPU_CC_SLEEP_CLK 15
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 16
+
+/* Resets */
+#define GPU_GX_BCR 0
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6125-gpucc.h b/include/dt-bindings/clock/qcom,sm6125-gpucc.h
new file mode 100644
index 000000000000..ce5bd920f2c4
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6125-gpucc.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6125_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6125_H
+
+/* Clocks */
+#define GPU_CC_PLL0_OUT_AUX2 0
+#define GPU_CC_PLL1_OUT_AUX2 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_APB_CLK 3
+#define GPU_CC_CX_GFX3D_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_GMU_CLK_SRC 9
+#define GPU_CC_SLEEP_CLK 10
+#define GPU_CC_GX_GFX3D_CLK 11
+#define GPU_CC_GX_GFX3D_CLK_SRC 12
+#define GPU_CC_AHB_CLK 13
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 14
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6350-camcc.h b/include/dt-bindings/clock/qcom,sm6350-camcc.h
new file mode 100644
index 000000000000..c6bcdc8fd485
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6350-camcc.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAMCC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_CAMCC_SM6350_H
+
+/* CAMCC clocks */
+#define CAMCC_PLL2_OUT_EARLY 0
+#define CAMCC_PLL0 1
+#define CAMCC_PLL0_OUT_EVEN 2
+#define CAMCC_PLL1 3
+#define CAMCC_PLL1_OUT_EVEN 4
+#define CAMCC_PLL2 5
+#define CAMCC_PLL2_OUT_MAIN 6
+#define CAMCC_PLL3 7
+#define CAMCC_BPS_AHB_CLK 8
+#define CAMCC_BPS_AREG_CLK 9
+#define CAMCC_BPS_AXI_CLK 10
+#define CAMCC_BPS_CLK 11
+#define CAMCC_BPS_CLK_SRC 12
+#define CAMCC_CAMNOC_ATB_CLK 13
+#define CAMCC_CAMNOC_AXI_CLK 14
+#define CAMCC_CCI_0_CLK 15
+#define CAMCC_CCI_0_CLK_SRC 16
+#define CAMCC_CCI_1_CLK 17
+#define CAMCC_CCI_1_CLK_SRC 18
+#define CAMCC_CORE_AHB_CLK 19
+#define CAMCC_CPAS_AHB_CLK 20
+#define CAMCC_CPHY_RX_CLK_SRC 21
+#define CAMCC_CSI0PHYTIMER_CLK 22
+#define CAMCC_CSI0PHYTIMER_CLK_SRC 23
+#define CAMCC_CSI1PHYTIMER_CLK 24
+#define CAMCC_CSI1PHYTIMER_CLK_SRC 25
+#define CAMCC_CSI2PHYTIMER_CLK 26
+#define CAMCC_CSI2PHYTIMER_CLK_SRC 27
+#define CAMCC_CSI3PHYTIMER_CLK 28
+#define CAMCC_CSI3PHYTIMER_CLK_SRC 29
+#define CAMCC_CSIPHY0_CLK 30
+#define CAMCC_CSIPHY1_CLK 31
+#define CAMCC_CSIPHY2_CLK 32
+#define CAMCC_CSIPHY3_CLK 33
+#define CAMCC_FAST_AHB_CLK_SRC 34
+#define CAMCC_ICP_APB_CLK 35
+#define CAMCC_ICP_ATB_CLK 36
+#define CAMCC_ICP_CLK 37
+#define CAMCC_ICP_CLK_SRC 38
+#define CAMCC_ICP_CTI_CLK 39
+#define CAMCC_ICP_TS_CLK 40
+#define CAMCC_IFE_0_AXI_CLK 41
+#define CAMCC_IFE_0_CLK 42
+#define CAMCC_IFE_0_CLK_SRC 43
+#define CAMCC_IFE_0_CPHY_RX_CLK 44
+#define CAMCC_IFE_0_CSID_CLK 45
+#define CAMCC_IFE_0_CSID_CLK_SRC 46
+#define CAMCC_IFE_0_DSP_CLK 47
+#define CAMCC_IFE_1_AXI_CLK 48
+#define CAMCC_IFE_1_CLK 49
+#define CAMCC_IFE_1_CLK_SRC 50
+#define CAMCC_IFE_1_CPHY_RX_CLK 51
+#define CAMCC_IFE_1_CSID_CLK 52
+#define CAMCC_IFE_1_CSID_CLK_SRC 53
+#define CAMCC_IFE_1_DSP_CLK 54
+#define CAMCC_IFE_2_AXI_CLK 55
+#define CAMCC_IFE_2_CLK 56
+#define CAMCC_IFE_2_CLK_SRC 57
+#define CAMCC_IFE_2_CPHY_RX_CLK 58
+#define CAMCC_IFE_2_CSID_CLK 59
+#define CAMCC_IFE_2_CSID_CLK_SRC 60
+#define CAMCC_IFE_2_DSP_CLK 61
+#define CAMCC_IFE_LITE_CLK 62
+#define CAMCC_IFE_LITE_CLK_SRC 63
+#define CAMCC_IFE_LITE_CPHY_RX_CLK 64
+#define CAMCC_IFE_LITE_CSID_CLK 65
+#define CAMCC_IFE_LITE_CSID_CLK_SRC 66
+#define CAMCC_IPE_0_AHB_CLK 67
+#define CAMCC_IPE_0_AREG_CLK 68
+#define CAMCC_IPE_0_AXI_CLK 69
+#define CAMCC_IPE_0_CLK 70
+#define CAMCC_IPE_0_CLK_SRC 71
+#define CAMCC_JPEG_CLK 72
+#define CAMCC_JPEG_CLK_SRC 73
+#define CAMCC_LRME_CLK 74
+#define CAMCC_LRME_CLK_SRC 75
+#define CAMCC_MCLK0_CLK 76
+#define CAMCC_MCLK0_CLK_SRC 77
+#define CAMCC_MCLK1_CLK 78
+#define CAMCC_MCLK1_CLK_SRC 79
+#define CAMCC_MCLK2_CLK 80
+#define CAMCC_MCLK2_CLK_SRC 81
+#define CAMCC_MCLK3_CLK 82
+#define CAMCC_MCLK3_CLK_SRC 83
+#define CAMCC_MCLK4_CLK 84
+#define CAMCC_MCLK4_CLK_SRC 85
+#define CAMCC_SLOW_AHB_CLK_SRC 86
+#define CAMCC_SOC_AHB_CLK 87
+#define CAMCC_SYS_TMR_CLK 88
+
+/* GDSCs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define IFE_0_GDSC 2
+#define IFE_1_GDSC 3
+#define IFE_2_GDSC 4
+#define TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6375-dispcc.h b/include/dt-bindings/clock/qcom,sm6375-dispcc.h
new file mode 100644
index 000000000000..1cb0bed004bd
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6375-dispcc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6375_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6375_H
+
+/* Clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_ESC0_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK_SRC 8
+#define DISP_CC_MDSS_MDP_CLK 9
+#define DISP_CC_MDSS_MDP_CLK_SRC 10
+#define DISP_CC_MDSS_MDP_LUT_CLK 11
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 12
+#define DISP_CC_MDSS_PCLK0_CLK 13
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 14
+#define DISP_CC_MDSS_ROT_CLK 15
+#define DISP_CC_MDSS_ROT_CLK_SRC 16
+#define DISP_CC_MDSS_RSCC_AHB_CLK 17
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 18
+#define DISP_CC_MDSS_VSYNC_CLK 19
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 20
+#define DISP_CC_SLEEP_CLK 21
+#define DISP_CC_XO_CLK 22
+
+/* Resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+/* GDSCs */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6375-gcc.h b/include/dt-bindings/clock/qcom,sm6375-gcc.h
new file mode 100644
index 000000000000..1e9801e1cedf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6375-gcc.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6375_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6375_H
+
+/* Clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL0_OUT_ODD 2
+#define GPLL1 3
+#define GPLL10 4
+#define GPLL11 5
+#define GPLL3 6
+#define GPLL3_OUT_EVEN 7
+#define GPLL4 8
+#define GPLL5 9
+#define GPLL6 10
+#define GPLL6_OUT_EVEN 11
+#define GPLL7 12
+#define GPLL8 13
+#define GPLL8_OUT_EVEN 14
+#define GPLL9 15
+#define GPLL9_OUT_MAIN 16
+#define GCC_AHB2PHY_CSI_CLK 17
+#define GCC_AHB2PHY_USB_CLK 18
+#define GCC_BIMC_GPU_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAM_THROTTLE_NRT_CLK 21
+#define GCC_CAM_THROTTLE_RT_CLK 22
+#define GCC_CAMERA_AHB_CLK 23
+#define GCC_CAMERA_XO_CLK 24
+#define GCC_CAMSS_AXI_CLK 25
+#define GCC_CAMSS_AXI_CLK_SRC 26
+#define GCC_CAMSS_CAMNOC_ATB_CLK 27
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 28
+#define GCC_CAMSS_CCI_0_CLK 29
+#define GCC_CAMSS_CCI_0_CLK_SRC 30
+#define GCC_CAMSS_CCI_1_CLK 31
+#define GCC_CAMSS_CCI_1_CLK_SRC 32
+#define GCC_CAMSS_CPHY_0_CLK 33
+#define GCC_CAMSS_CPHY_1_CLK 34
+#define GCC_CAMSS_CPHY_2_CLK 35
+#define GCC_CAMSS_CPHY_3_CLK 36
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 37
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 38
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 39
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 40
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 41
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 42
+#define GCC_CAMSS_CSI3PHYTIMER_CLK 43
+#define GCC_CAMSS_CSI3PHYTIMER_CLK_SRC 44
+#define GCC_CAMSS_MCLK0_CLK 45
+#define GCC_CAMSS_MCLK0_CLK_SRC 46
+#define GCC_CAMSS_MCLK1_CLK 47
+#define GCC_CAMSS_MCLK1_CLK_SRC 48
+#define GCC_CAMSS_MCLK2_CLK 49
+#define GCC_CAMSS_MCLK2_CLK_SRC 50
+#define GCC_CAMSS_MCLK3_CLK 51
+#define GCC_CAMSS_MCLK3_CLK_SRC 52
+#define GCC_CAMSS_MCLK4_CLK 53
+#define GCC_CAMSS_MCLK4_CLK_SRC 54
+#define GCC_CAMSS_NRT_AXI_CLK 55
+#define GCC_CAMSS_OPE_AHB_CLK 56
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 57
+#define GCC_CAMSS_OPE_CLK 58
+#define GCC_CAMSS_OPE_CLK_SRC 59
+#define GCC_CAMSS_RT_AXI_CLK 60
+#define GCC_CAMSS_TFE_0_CLK 61
+#define GCC_CAMSS_TFE_0_CLK_SRC 62
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 63
+#define GCC_CAMSS_TFE_0_CSID_CLK 64
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 65
+#define GCC_CAMSS_TFE_1_CLK 66
+#define GCC_CAMSS_TFE_1_CLK_SRC 67
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 68
+#define GCC_CAMSS_TFE_1_CSID_CLK 69
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 70
+#define GCC_CAMSS_TFE_2_CLK 71
+#define GCC_CAMSS_TFE_2_CLK_SRC 72
+#define GCC_CAMSS_TFE_2_CPHY_RX_CLK 73
+#define GCC_CAMSS_TFE_2_CSID_CLK 74
+#define GCC_CAMSS_TFE_2_CSID_CLK_SRC 75
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 76
+#define GCC_CAMSS_TOP_AHB_CLK 77
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 78
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 79
+#define GCC_CPUSS_AHB_CLK_SRC 80
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 81
+#define GCC_CPUSS_GNOC_CLK 82
+#define GCC_DISP_AHB_CLK 83
+#define GCC_DISP_GPLL0_CLK_SRC 84
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 85
+#define GCC_DISP_HF_AXI_CLK 86
+#define GCC_DISP_SLEEP_CLK 87
+#define GCC_DISP_THROTTLE_CORE_CLK 88
+#define GCC_DISP_XO_CLK 89
+#define GCC_GP1_CLK 90
+#define GCC_GP1_CLK_SRC 91
+#define GCC_GP2_CLK 92
+#define GCC_GP2_CLK_SRC 93
+#define GCC_GP3_CLK 94
+#define GCC_GP3_CLK_SRC 95
+#define GCC_GPU_CFG_AHB_CLK 96
+#define GCC_GPU_GPLL0_CLK_SRC 97
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 98
+#define GCC_GPU_MEMNOC_GFX_CLK 99
+#define GCC_GPU_SNOC_DVM_GFX_CLK 100
+#define GCC_GPU_THROTTLE_CORE_CLK 101
+#define GCC_PDM2_CLK 102
+#define GCC_PDM2_CLK_SRC 103
+#define GCC_PDM_AHB_CLK 104
+#define GCC_PDM_XO4_CLK 105
+#define GCC_PRNG_AHB_CLK 106
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 107
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 108
+#define GCC_QMIP_DISP_AHB_CLK 109
+#define GCC_QMIP_GPU_CFG_AHB_CLK 110
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 111
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 112
+#define GCC_QUPV3_WRAP0_CORE_CLK 113
+#define GCC_QUPV3_WRAP0_S0_CLK 114
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 115
+#define GCC_QUPV3_WRAP0_S1_CLK 116
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 117
+#define GCC_QUPV3_WRAP0_S2_CLK 118
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 119
+#define GCC_QUPV3_WRAP0_S3_CLK 120
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 121
+#define GCC_QUPV3_WRAP0_S4_CLK 122
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 123
+#define GCC_QUPV3_WRAP0_S5_CLK 124
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 125
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 126
+#define GCC_QUPV3_WRAP1_CORE_CLK 127
+#define GCC_QUPV3_WRAP1_S0_CLK 128
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 129
+#define GCC_QUPV3_WRAP1_S1_CLK 130
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 131
+#define GCC_QUPV3_WRAP1_S2_CLK 132
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 133
+#define GCC_QUPV3_WRAP1_S3_CLK 134
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 135
+#define GCC_QUPV3_WRAP1_S4_CLK 136
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 137
+#define GCC_QUPV3_WRAP1_S5_CLK 138
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 139
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 141
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 142
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 143
+#define GCC_RX5_PCIE_CLKREF_EN_CLK 144
+#define GCC_SDCC1_AHB_CLK 145
+#define GCC_SDCC1_APPS_CLK 146
+#define GCC_SDCC1_APPS_CLK_SRC 147
+#define GCC_SDCC1_ICE_CORE_CLK 148
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 149
+#define GCC_SDCC2_AHB_CLK 150
+#define GCC_SDCC2_APPS_CLK 151
+#define GCC_SDCC2_APPS_CLK_SRC 152
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 153
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 154
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 155
+#define GCC_UFS_MEM_CLKREF_CLK 156
+#define GCC_UFS_PHY_AHB_CLK 157
+#define GCC_UFS_PHY_AXI_CLK 158
+#define GCC_UFS_PHY_AXI_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_CLK 160
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 161
+#define GCC_UFS_PHY_PHY_AUX_CLK 162
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 165
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 166
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 167
+#define GCC_USB30_PRIM_MASTER_CLK 168
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 169
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 170
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 171
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 172
+#define GCC_USB30_PRIM_SLEEP_CLK 173
+#define GCC_USB3_PRIM_CLKREF_CLK 174
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 175
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 176
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 177
+#define GCC_VCODEC0_AXI_CLK 178
+#define GCC_VENUS_AHB_CLK 179
+#define GCC_VENUS_CTL_AXI_CLK 180
+#define GCC_VIDEO_AHB_CLK 181
+#define GCC_VIDEO_AXI0_CLK 182
+#define GCC_VIDEO_THROTTLE_CORE_CLK 183
+#define GCC_VIDEO_VCODEC0_SYS_CLK 184
+#define GCC_VIDEO_VENUS_CLK_SRC 185
+#define GCC_VIDEO_VENUS_CTL_CLK 186
+#define GCC_VIDEO_XO_CLK 187
+
+/* Resets */
+#define GCC_CAMSS_OPE_BCR 0
+#define GCC_CAMSS_TFE_BCR 1
+#define GCC_CAMSS_TOP_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_MMSS_BCR 4
+#define GCC_PDM_BCR 5
+#define GCC_PRNG_BCR 6
+#define GCC_QUPV3_WRAPPER_0_BCR 7
+#define GCC_QUPV3_WRAPPER_1_BCR 8
+#define GCC_QUSB2PHY_PRIM_BCR 9
+#define GCC_QUSB2PHY_SEC_BCR 10
+#define GCC_SDCC1_BCR 11
+#define GCC_SDCC2_BCR 12
+#define GCC_UFS_PHY_BCR 13
+#define GCC_USB30_PRIM_BCR 14
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15
+#define GCC_VCODEC0_BCR 16
+#define GCC_VENUS_BCR 17
+#define GCC_VIDEO_INTERFACE_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_PRIM_SP0_BCR 20
+
+/* GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define CAMSS_TOP_GDSC 2
+#define VENUS_GDSC 3
+#define VCODEC0_GDSC 4
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 6
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 7
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6375-gpucc.h b/include/dt-bindings/clock/qcom,sm6375-gpucc.h
new file mode 100644
index 000000000000..0887ac03825e
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6375-gpucc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_BLAIR_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_BLAIR_H
+
+/* GPU CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CX_GFX3D_CLK 3
+#define GPU_CC_CX_GFX3D_SLV_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_GMU_CLK_SRC 9
+#define GPU_CC_GX_CXO_CLK 10
+#define GPU_CC_GX_GFX3D_CLK 11
+#define GPU_CC_GX_GFX3D_CLK_SRC 12
+#define GPU_CC_GX_GMU_CLK 13
+#define GPU_CC_SLEEP_CLK 14
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+/* Resets */
+#define GPU_GX_BCR 0
+#define GPU_ACD_BCR 1
+#define GPU_GX_ACD_MISC_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm7150-gcc.h b/include/dt-bindings/clock/qcom,sm7150-gcc.h
new file mode 100644
index 000000000000..7719ffc86139
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm7150-gcc.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Danila Tikhonov <danila@jiaxyga.com>
+ * Copyright (c) 2023, David Wronek <davidwronek@gmail.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM7150_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM7150_H
+
+/* GCC clock registers */
+#define GCC_GPLL0_MAIN_DIV_CDIV 0
+#define GPLL0 1
+#define GPLL0_OUT_EVEN 2
+#define GPLL6 3
+#define GPLL7 4
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 5
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 6
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 7
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 8
+#define GCC_APC_VS_CLK 9
+#define GCC_BOOT_ROM_AHB_CLK 10
+#define GCC_CAMERA_HF_AXI_CLK 11
+#define GCC_CAMERA_SF_AXI_CLK 12
+#define GCC_CE1_AHB_CLK 13
+#define GCC_CE1_AXI_CLK 14
+#define GCC_CE1_CLK 15
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 16
+#define GCC_CPUSS_AHB_CLK 17
+#define GCC_CPUSS_AHB_CLK_SRC 18
+#define GCC_CPUSS_RBCPR_CLK 19
+#define GCC_CPUSS_RBCPR_CLK_SRC 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DISP_GPLL0_CLK_SRC 22
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_SF_AXI_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GPU_GPLL0_CLK_SRC 32
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 33
+#define GCC_GPU_MEMNOC_GFX_CLK 34
+#define GCC_GPU_SNOC_DVM_GFX_CLK 35
+#define GCC_GPU_VS_CLK 36
+#define GCC_NPU_AXI_CLK 37
+#define GCC_NPU_CFG_AHB_CLK 38
+#define GCC_NPU_GPLL0_CLK_SRC 39
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_CLKREF_CLK 44
+#define GCC_PCIE_0_MSTR_AXI_CLK 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_PHY_AUX_CLK 49
+#define GCC_PCIE_PHY_REFGEN_CLK 50
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 51
+#define GCC_PDM2_CLK 52
+#define GCC_PDM2_CLK_SRC 53
+#define GCC_PDM_AHB_CLK 54
+#define GCC_PDM_XO4_CLK 55
+#define GCC_PRNG_AHB_CLK 56
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 57
+#define GCC_QUPV3_WRAP0_CORE_CLK 58
+#define GCC_QUPV3_WRAP0_S0_CLK 59
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 60
+#define GCC_QUPV3_WRAP0_S1_CLK 61
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 62
+#define GCC_QUPV3_WRAP0_S2_CLK 63
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 64
+#define GCC_QUPV3_WRAP0_S3_CLK 65
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 66
+#define GCC_QUPV3_WRAP0_S4_CLK 67
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 68
+#define GCC_QUPV3_WRAP0_S5_CLK 69
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 70
+#define GCC_QUPV3_WRAP0_S6_CLK 71
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_S7_CLK 73
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 74
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 75
+#define GCC_QUPV3_WRAP1_CORE_CLK 76
+#define GCC_QUPV3_WRAP1_S0_CLK 77
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 78
+#define GCC_QUPV3_WRAP1_S1_CLK 79
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 80
+#define GCC_QUPV3_WRAP1_S2_CLK 81
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 82
+#define GCC_QUPV3_WRAP1_S3_CLK 83
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 84
+#define GCC_QUPV3_WRAP1_S4_CLK 85
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S5_CLK 87
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S6_CLK 89
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S7_CLK 91
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 92
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 93
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 94
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 95
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 96
+#define GCC_SDCC1_AHB_CLK 97
+#define GCC_SDCC1_APPS_CLK 98
+#define GCC_SDCC1_APPS_CLK_SRC 99
+#define GCC_SDCC1_ICE_CORE_CLK 100
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 101
+#define GCC_SDCC2_AHB_CLK 102
+#define GCC_SDCC2_APPS_CLK 103
+#define GCC_SDCC2_APPS_CLK_SRC 104
+#define GCC_SDCC4_AHB_CLK 105
+#define GCC_SDCC4_APPS_CLK 106
+#define GCC_SDCC4_APPS_CLK_SRC 107
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 108
+#define GCC_TSIF_AHB_CLK 109
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 110
+#define GCC_TSIF_REF_CLK 111
+#define GCC_TSIF_REF_CLK_SRC 112
+#define GCC_UFS_MEM_CLKREF_CLK 113
+#define GCC_UFS_PHY_AHB_CLK 114
+#define GCC_UFS_PHY_AXI_CLK 115
+#define GCC_UFS_PHY_AXI_CLK_SRC 116
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 117
+#define GCC_UFS_PHY_ICE_CORE_CLK 118
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 119
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 120
+#define GCC_UFS_PHY_PHY_AUX_CLK 121
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 122
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 123
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 124
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 126
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 127
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 128
+#define GCC_USB30_PRIM_MASTER_CLK 129
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 131
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 132
+#define GCC_USB30_PRIM_SLEEP_CLK 133
+#define GCC_USB3_PRIM_CLKREF_CLK 134
+#define GCC_USB3_PRIM_PHY_AUX_CLK 135
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 136
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 137
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 138
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 139
+#define GCC_VDDA_VS_CLK 140
+#define GCC_VDDCX_VS_CLK 141
+#define GCC_VDDMX_VS_CLK 142
+#define GCC_VIDEO_AXI_CLK 143
+#define GCC_VS_CTRL_AHB_CLK 144
+#define GCC_VS_CTRL_CLK 145
+#define GCC_VS_CTRL_CLK_SRC 146
+#define GCC_VSENSOR_CLK_SRC 147
+
+/* GCC Resets */
+#define GCC_PCIE_0_BCR 0
+#define GCC_PCIE_PHY_BCR 1
+#define GCC_PCIE_PHY_COM_BCR 2
+#define GCC_UFS_PHY_BCR 3
+#define GCC_USB30_PRIM_BCR 4
+#define GCC_USB3_DP_PHY_PRIM_BCR 5
+#define GCC_USB3_DP_PHY_SEC_BCR 6
+#define GCC_USB3_PHY_PRIM_BCR 7
+#define GCC_USB3_PHY_SEC_BCR 8
+#define GCC_QUSB2PHY_PRIM_BCR 9
+#define GCC_VIDEO_AXI_CLK_BCR 10
+
+/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define UFS_PHY_GDSC 1
+#define USB30_PRIM_GDSC 2
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 3
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 4
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 6
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 7
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 8
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h b/include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h
new file mode 100644
index 000000000000..f5a1cfac8612
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_LPASS_AONCC_SM8250_H
+#define _DT_BINDINGS_CLK_LPASS_AONCC_SM8250_H
+
+/* from AOCC */
+#define LPASS_CDC_VA_MCLK 0
+#define LPASS_CDC_TX_NPL 1
+#define LPASS_CDC_TX_MCLK 2
+
+#endif /* _DT_BINDINGS_CLK_LPASS_AONCC_SM8250_H */
diff --git a/include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h b/include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h
new file mode 100644
index 000000000000..a1aa6cb5d840
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_LPASS_AUDIOCC_SM8250_H
+#define _DT_BINDINGS_CLK_LPASS_AUDIOCC_SM8250_H
+
+/* From AudioCC */
+#define LPASS_CDC_WSA_NPL 0
+#define LPASS_CDC_WSA_MCLK 1
+#define LPASS_CDC_RX_MCLK 2
+#define LPASS_CDC_RX_NPL 3
+#define LPASS_CDC_RX_MCLK_MCLK2 4
+
+#endif /* _DT_BINDINGS_CLK_LPASS_AUDIOCC_SM8250_H */
diff --git a/include/dt-bindings/clock/qcom,sm8350-videocc.h b/include/dt-bindings/clock/qcom,sm8350-videocc.h
new file mode 100644
index 000000000000..b6945a448676
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8350-videocc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8350_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8350_H
+
+/* Clocks */
+#define VIDEO_CC_AHB_CLK_SRC 0
+#define VIDEO_CC_MVS0_CLK 1
+#define VIDEO_CC_MVS0_CLK_SRC 2
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 3
+#define VIDEO_CC_MVS0C_CLK 4
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 5
+#define VIDEO_CC_MVS1_CLK 6
+#define VIDEO_CC_MVS1_CLK_SRC 7
+#define VIDEO_CC_MVS1_DIV2_CLK 8
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 9
+#define VIDEO_CC_MVS1C_CLK 10
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 11
+#define VIDEO_CC_SLEEP_CLK 12
+#define VIDEO_CC_SLEEP_CLK_SRC 13
+#define VIDEO_CC_XO_CLK_SRC 14
+#define VIDEO_PLL0 15
+#define VIDEO_PLL1 16
+
+/* GDSCs */
+#define MVS0C_GDSC 0
+#define MVS1C_GDSC 1
+#define MVS0_GDSC 2
+#define MVS1_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-camcc.h b/include/dt-bindings/clock/qcom,sm8450-camcc.h
new file mode 100644
index 000000000000..7ff67acf301a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-camcc.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_CAMNOC_AXI_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 5
+#define CAM_CC_CAMNOC_DCD_XO_CLK 6
+#define CAM_CC_CCI_0_CLK 7
+#define CAM_CC_CCI_0_CLK_SRC 8
+#define CAM_CC_CCI_1_CLK 9
+#define CAM_CC_CCI_1_CLK_SRC 10
+#define CAM_CC_CORE_AHB_CLK 11
+#define CAM_CC_CPAS_AHB_CLK 12
+#define CAM_CC_CPAS_BPS_CLK 13
+#define CAM_CC_CPAS_FAST_AHB_CLK 14
+#define CAM_CC_CPAS_IFE_0_CLK 15
+#define CAM_CC_CPAS_IFE_1_CLK 16
+#define CAM_CC_CPAS_IFE_2_CLK 17
+#define CAM_CC_CPAS_IFE_LITE_CLK 18
+#define CAM_CC_CPAS_IPE_NPS_CLK 19
+#define CAM_CC_CPAS_SBI_CLK 20
+#define CAM_CC_CPAS_SFE_0_CLK 21
+#define CAM_CC_CPAS_SFE_1_CLK 22
+#define CAM_CC_CPHY_RX_CLK_SRC 23
+#define CAM_CC_CSI0PHYTIMER_CLK 24
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 25
+#define CAM_CC_CSI1PHYTIMER_CLK 26
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI2PHYTIMER_CLK 28
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSI3PHYTIMER_CLK 30
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 31
+#define CAM_CC_CSI4PHYTIMER_CLK 32
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSI5PHYTIMER_CLK 34
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSID_CLK 36
+#define CAM_CC_CSID_CLK_SRC 37
+#define CAM_CC_CSID_CSIPHY_RX_CLK 38
+#define CAM_CC_CSIPHY0_CLK 39
+#define CAM_CC_CSIPHY1_CLK 40
+#define CAM_CC_CSIPHY2_CLK 41
+#define CAM_CC_CSIPHY3_CLK 42
+#define CAM_CC_CSIPHY4_CLK 43
+#define CAM_CC_CSIPHY5_CLK 44
+#define CAM_CC_FAST_AHB_CLK_SRC 45
+#define CAM_CC_GDSC_CLK 46
+#define CAM_CC_ICP_AHB_CLK 47
+#define CAM_CC_ICP_CLK 48
+#define CAM_CC_ICP_CLK_SRC 49
+#define CAM_CC_IFE_0_CLK 50
+#define CAM_CC_IFE_0_CLK_SRC 51
+#define CAM_CC_IFE_0_DSP_CLK 52
+#define CAM_CC_IFE_0_FAST_AHB_CLK 53
+#define CAM_CC_IFE_1_CLK 54
+#define CAM_CC_IFE_1_CLK_SRC 55
+#define CAM_CC_IFE_1_DSP_CLK 56
+#define CAM_CC_IFE_1_FAST_AHB_CLK 57
+#define CAM_CC_IFE_2_CLK 58
+#define CAM_CC_IFE_2_CLK_SRC 59
+#define CAM_CC_IFE_2_DSP_CLK 60
+#define CAM_CC_IFE_2_FAST_AHB_CLK 61
+#define CAM_CC_IFE_LITE_AHB_CLK 62
+#define CAM_CC_IFE_LITE_CLK 63
+#define CAM_CC_IFE_LITE_CLK_SRC 64
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 65
+#define CAM_CC_IFE_LITE_CSID_CLK 66
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 67
+#define CAM_CC_IPE_NPS_AHB_CLK 68
+#define CAM_CC_IPE_NPS_CLK 69
+#define CAM_CC_IPE_NPS_CLK_SRC 70
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 71
+#define CAM_CC_IPE_PPS_CLK 72
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 73
+#define CAM_CC_JPEG_CLK 74
+#define CAM_CC_JPEG_CLK_SRC 75
+#define CAM_CC_MCLK0_CLK 76
+#define CAM_CC_MCLK0_CLK_SRC 77
+#define CAM_CC_MCLK1_CLK 78
+#define CAM_CC_MCLK1_CLK_SRC 79
+#define CAM_CC_MCLK2_CLK 80
+#define CAM_CC_MCLK2_CLK_SRC 81
+#define CAM_CC_MCLK3_CLK 82
+#define CAM_CC_MCLK3_CLK_SRC 83
+#define CAM_CC_MCLK4_CLK 84
+#define CAM_CC_MCLK4_CLK_SRC 85
+#define CAM_CC_MCLK5_CLK 86
+#define CAM_CC_MCLK5_CLK_SRC 87
+#define CAM_CC_MCLK6_CLK 88
+#define CAM_CC_MCLK6_CLK_SRC 89
+#define CAM_CC_MCLK7_CLK 90
+#define CAM_CC_MCLK7_CLK_SRC 91
+#define CAM_CC_PLL0 92
+#define CAM_CC_PLL0_OUT_EVEN 93
+#define CAM_CC_PLL0_OUT_ODD 94
+#define CAM_CC_PLL1 95
+#define CAM_CC_PLL1_OUT_EVEN 96
+#define CAM_CC_PLL2 97
+#define CAM_CC_PLL3 98
+#define CAM_CC_PLL3_OUT_EVEN 99
+#define CAM_CC_PLL4 100
+#define CAM_CC_PLL4_OUT_EVEN 101
+#define CAM_CC_PLL5 102
+#define CAM_CC_PLL5_OUT_EVEN 103
+#define CAM_CC_PLL6 104
+#define CAM_CC_PLL6_OUT_EVEN 105
+#define CAM_CC_PLL7 106
+#define CAM_CC_PLL7_OUT_EVEN 107
+#define CAM_CC_PLL8 108
+#define CAM_CC_PLL8_OUT_EVEN 109
+#define CAM_CC_QDSS_DEBUG_CLK 110
+#define CAM_CC_QDSS_DEBUG_CLK_SRC 111
+#define CAM_CC_QDSS_DEBUG_XO_CLK 112
+#define CAM_CC_SBI_AHB_CLK 113
+#define CAM_CC_SBI_CLK 114
+#define CAM_CC_SFE_0_CLK 115
+#define CAM_CC_SFE_0_CLK_SRC 116
+#define CAM_CC_SFE_0_FAST_AHB_CLK 117
+#define CAM_CC_SFE_1_CLK 118
+#define CAM_CC_SFE_1_CLK_SRC 119
+#define CAM_CC_SFE_1_FAST_AHB_CLK 120
+#define CAM_CC_SLEEP_CLK 121
+#define CAM_CC_SLEEP_CLK_SRC 122
+#define CAM_CC_SLOW_AHB_CLK_SRC 123
+#define CAM_CC_XO_CLK_SRC 124
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_ICP_BCR 1
+#define CAM_CC_IFE_0_BCR 2
+#define CAM_CC_IFE_1_BCR 3
+#define CAM_CC_IFE_2_BCR 4
+#define CAM_CC_IPE_0_BCR 5
+#define CAM_CC_QDSS_DEBUG_BCR 6
+#define CAM_CC_SBI_BCR 7
+#define CAM_CC_SFE_0_BCR 8
+#define CAM_CC_SFE_1_BCR 9
+
+/* CAM_CC GDSCRs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define SBI_GDSC 2
+#define IFE_0_GDSC 3
+#define IFE_1_GDSC 4
+#define IFE_2_GDSC 5
+#define SFE_0_GDSC 6
+#define SFE_1_GDSC 7
+#define TITAN_TOP_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-dispcc.h b/include/dt-bindings/clock/qcom,sm8450-dispcc.h
new file mode 100644
index 000000000000..fd16ca894971
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-dispcc.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8450_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB1_CLK 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_BYTE1_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 8
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 10
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 12
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 13
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 15
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 17
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 21
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 25
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 26
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 28
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 29
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 30
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 31
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 32
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 34
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 35
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 36
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 37
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 39
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 41
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 43
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 45
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 47
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 48
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 50
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 52
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 54
+#define DISP_CC_MDSS_ESC0_CLK 55
+#define DISP_CC_MDSS_ESC0_CLK_SRC 56
+#define DISP_CC_MDSS_ESC1_CLK 57
+#define DISP_CC_MDSS_ESC1_CLK_SRC 58
+#define DISP_CC_MDSS_MDP1_CLK 59
+#define DISP_CC_MDSS_MDP_CLK 60
+#define DISP_CC_MDSS_MDP_CLK_SRC 61
+#define DISP_CC_MDSS_MDP_LUT1_CLK 62
+#define DISP_CC_MDSS_MDP_LUT_CLK 63
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 64
+#define DISP_CC_MDSS_PCLK0_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 66
+#define DISP_CC_MDSS_PCLK1_CLK 67
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 68
+#define DISP_CC_MDSS_ROT1_CLK 69
+#define DISP_CC_MDSS_ROT_CLK 70
+#define DISP_CC_MDSS_ROT_CLK_SRC 71
+#define DISP_CC_MDSS_RSCC_AHB_CLK 72
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC1_CLK 74
+#define DISP_CC_MDSS_VSYNC_CLK 75
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 76
+#define DISP_CC_PLL0 77
+#define DISP_CC_PLL1 78
+#define DISP_CC_SLEEP_CLK 79
+#define DISP_CC_SLEEP_CLK_SRC 80
+#define DISP_CC_XO_CLK 81
+#define DISP_CC_XO_CLK_SRC 82
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-gpucc.h b/include/dt-bindings/clock/qcom,sm8450-gpucc.h
new file mode 100644
index 000000000000..712b171503d6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-gpucc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8450_H
+
+/* Clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_APB_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CX_SNOC_DVM_CLK 5
+#define GPU_CC_CXO_AON_CLK 6
+#define GPU_CC_CXO_CLK 7
+#define GPU_CC_DEMET_CLK 8
+#define GPU_CC_DEMET_DIV_CLK_SRC 9
+#define GPU_CC_FF_CLK_SRC 10
+#define GPU_CC_FREQ_MEASURE_CLK 11
+#define GPU_CC_GMU_CLK_SRC 12
+#define GPU_CC_GX_FF_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_RDVM_CLK 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 18
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 19
+#define GPU_CC_HUB_AON_CLK 20
+#define GPU_CC_HUB_CLK_SRC 21
+#define GPU_CC_HUB_CX_INT_CLK 22
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 23
+#define GPU_CC_MEMNOC_GFX_CLK 24
+#define GPU_CC_MND1X_0_GFX3D_CLK 25
+#define GPU_CC_MND1X_1_GFX3D_CLK 26
+#define GPU_CC_PLL0 27
+#define GPU_CC_PLL1 28
+#define GPU_CC_SLEEP_CLK 29
+#define GPU_CC_XO_CLK_SRC 30
+#define GPU_CC_XO_DIV_CLK_SRC 31
+
+/* GDSCs */
+#define GPU_GX_GDSC 0
+#define GPU_CX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-videocc.h b/include/dt-bindings/clock/qcom,sm8450-videocc.h
new file mode 100644
index 000000000000..9d795adfe4eb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-videocc.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8450_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_MVS0_CLK 0
+#define VIDEO_CC_MVS0_CLK_SRC 1
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 2
+#define VIDEO_CC_MVS0C_CLK 3
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 4
+#define VIDEO_CC_MVS1_CLK 5
+#define VIDEO_CC_MVS1_CLK_SRC 6
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 7
+#define VIDEO_CC_MVS1C_CLK 8
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 9
+#define VIDEO_CC_PLL0 10
+#define VIDEO_CC_PLL1 11
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0C_GDSC 0
+#define VIDEO_CC_MVS0_GDSC 1
+#define VIDEO_CC_MVS1C_GDSC 2
+#define VIDEO_CC_MVS1_GDSC 3
+
+/* VIDEO_CC resets */
+#define CVP_VIDEO_CC_INTERFACE_BCR 0
+#define CVP_VIDEO_CC_MVS0_BCR 1
+#define CVP_VIDEO_CC_MVS0C_BCR 2
+#define CVP_VIDEO_CC_MVS1_BCR 3
+#define CVP_VIDEO_CC_MVS1C_BCR 4
+#define VIDEO_CC_MVS0C_CLK_ARES 5
+#define VIDEO_CC_MVS1C_CLK_ARES 6
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-camcc.h b/include/dt-bindings/clock/qcom,sm8550-camcc.h
new file mode 100644
index 000000000000..a2a256691c2b
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-camcc.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_CAMNOC_AXI_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 5
+#define CAM_CC_CAMNOC_DCD_XO_CLK 6
+#define CAM_CC_CAMNOC_XO_CLK 7
+#define CAM_CC_CCI_0_CLK 8
+#define CAM_CC_CCI_0_CLK_SRC 9
+#define CAM_CC_CCI_1_CLK 10
+#define CAM_CC_CCI_1_CLK_SRC 11
+#define CAM_CC_CCI_2_CLK 12
+#define CAM_CC_CCI_2_CLK_SRC 13
+#define CAM_CC_CORE_AHB_CLK 14
+#define CAM_CC_CPAS_AHB_CLK 15
+#define CAM_CC_CPAS_BPS_CLK 16
+#define CAM_CC_CPAS_CRE_CLK 17
+#define CAM_CC_CPAS_FAST_AHB_CLK 18
+#define CAM_CC_CPAS_IFE_0_CLK 19
+#define CAM_CC_CPAS_IFE_1_CLK 20
+#define CAM_CC_CPAS_IFE_2_CLK 21
+#define CAM_CC_CPAS_IFE_LITE_CLK 22
+#define CAM_CC_CPAS_IPE_NPS_CLK 23
+#define CAM_CC_CPAS_SBI_CLK 24
+#define CAM_CC_CPAS_SFE_0_CLK 25
+#define CAM_CC_CPAS_SFE_1_CLK 26
+#define CAM_CC_CPHY_RX_CLK_SRC 27
+#define CAM_CC_CRE_AHB_CLK 28
+#define CAM_CC_CRE_CLK 29
+#define CAM_CC_CRE_CLK_SRC 30
+#define CAM_CC_CSI0PHYTIMER_CLK 31
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 32
+#define CAM_CC_CSI1PHYTIMER_CLK 33
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 34
+#define CAM_CC_CSI2PHYTIMER_CLK 35
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 36
+#define CAM_CC_CSI3PHYTIMER_CLK 37
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 38
+#define CAM_CC_CSI4PHYTIMER_CLK 39
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 40
+#define CAM_CC_CSI5PHYTIMER_CLK 41
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 42
+#define CAM_CC_CSI6PHYTIMER_CLK 43
+#define CAM_CC_CSI6PHYTIMER_CLK_SRC 44
+#define CAM_CC_CSI7PHYTIMER_CLK 45
+#define CAM_CC_CSI7PHYTIMER_CLK_SRC 46
+#define CAM_CC_CSID_CLK 47
+#define CAM_CC_CSID_CLK_SRC 48
+#define CAM_CC_CSID_CSIPHY_RX_CLK 49
+#define CAM_CC_CSIPHY0_CLK 50
+#define CAM_CC_CSIPHY1_CLK 51
+#define CAM_CC_CSIPHY2_CLK 52
+#define CAM_CC_CSIPHY3_CLK 53
+#define CAM_CC_CSIPHY4_CLK 54
+#define CAM_CC_CSIPHY5_CLK 55
+#define CAM_CC_CSIPHY6_CLK 56
+#define CAM_CC_CSIPHY7_CLK 57
+#define CAM_CC_DRV_AHB_CLK 58
+#define CAM_CC_DRV_XO_CLK 59
+#define CAM_CC_FAST_AHB_CLK_SRC 60
+#define CAM_CC_GDSC_CLK 61
+#define CAM_CC_ICP_AHB_CLK 62
+#define CAM_CC_ICP_CLK 63
+#define CAM_CC_ICP_CLK_SRC 64
+#define CAM_CC_IFE_0_CLK 65
+#define CAM_CC_IFE_0_CLK_SRC 66
+#define CAM_CC_IFE_0_DSP_CLK 67
+#define CAM_CC_IFE_0_DSP_CLK_SRC 68
+#define CAM_CC_IFE_0_FAST_AHB_CLK 69
+#define CAM_CC_IFE_1_CLK 70
+#define CAM_CC_IFE_1_CLK_SRC 71
+#define CAM_CC_IFE_1_DSP_CLK 72
+#define CAM_CC_IFE_1_DSP_CLK_SRC 73
+#define CAM_CC_IFE_1_FAST_AHB_CLK 74
+#define CAM_CC_IFE_2_CLK 75
+#define CAM_CC_IFE_2_CLK_SRC 76
+#define CAM_CC_IFE_2_DSP_CLK 77
+#define CAM_CC_IFE_2_DSP_CLK_SRC 78
+#define CAM_CC_IFE_2_FAST_AHB_CLK 79
+#define CAM_CC_IFE_LITE_AHB_CLK 80
+#define CAM_CC_IFE_LITE_CLK 81
+#define CAM_CC_IFE_LITE_CLK_SRC 82
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 83
+#define CAM_CC_IFE_LITE_CSID_CLK 84
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 85
+#define CAM_CC_IPE_NPS_AHB_CLK 86
+#define CAM_CC_IPE_NPS_CLK 87
+#define CAM_CC_IPE_NPS_CLK_SRC 88
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 89
+#define CAM_CC_IPE_PPS_CLK 90
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 91
+#define CAM_CC_JPEG_1_CLK 92
+#define CAM_CC_JPEG_CLK 93
+#define CAM_CC_JPEG_CLK_SRC 94
+#define CAM_CC_MCLK0_CLK 95
+#define CAM_CC_MCLK0_CLK_SRC 96
+#define CAM_CC_MCLK1_CLK 97
+#define CAM_CC_MCLK1_CLK_SRC 98
+#define CAM_CC_MCLK2_CLK 99
+#define CAM_CC_MCLK2_CLK_SRC 100
+#define CAM_CC_MCLK3_CLK 101
+#define CAM_CC_MCLK3_CLK_SRC 102
+#define CAM_CC_MCLK4_CLK 103
+#define CAM_CC_MCLK4_CLK_SRC 104
+#define CAM_CC_MCLK5_CLK 105
+#define CAM_CC_MCLK5_CLK_SRC 106
+#define CAM_CC_MCLK6_CLK 107
+#define CAM_CC_MCLK6_CLK_SRC 108
+#define CAM_CC_MCLK7_CLK 109
+#define CAM_CC_MCLK7_CLK_SRC 110
+#define CAM_CC_PLL0 111
+#define CAM_CC_PLL0_OUT_EVEN 112
+#define CAM_CC_PLL0_OUT_ODD 113
+#define CAM_CC_PLL1 114
+#define CAM_CC_PLL1_OUT_EVEN 115
+#define CAM_CC_PLL2 116
+#define CAM_CC_PLL3 117
+#define CAM_CC_PLL3_OUT_EVEN 118
+#define CAM_CC_PLL4 119
+#define CAM_CC_PLL4_OUT_EVEN 120
+#define CAM_CC_PLL5 121
+#define CAM_CC_PLL5_OUT_EVEN 122
+#define CAM_CC_PLL6 123
+#define CAM_CC_PLL6_OUT_EVEN 124
+#define CAM_CC_PLL7 125
+#define CAM_CC_PLL7_OUT_EVEN 126
+#define CAM_CC_PLL8 127
+#define CAM_CC_PLL8_OUT_EVEN 128
+#define CAM_CC_PLL9 129
+#define CAM_CC_PLL9_OUT_EVEN 130
+#define CAM_CC_PLL10 131
+#define CAM_CC_PLL10_OUT_EVEN 132
+#define CAM_CC_PLL11 133
+#define CAM_CC_PLL11_OUT_EVEN 134
+#define CAM_CC_PLL12 135
+#define CAM_CC_PLL12_OUT_EVEN 136
+#define CAM_CC_QDSS_DEBUG_CLK 137
+#define CAM_CC_QDSS_DEBUG_CLK_SRC 138
+#define CAM_CC_QDSS_DEBUG_XO_CLK 139
+#define CAM_CC_SBI_CLK 140
+#define CAM_CC_SBI_FAST_AHB_CLK 141
+#define CAM_CC_SFE_0_CLK 142
+#define CAM_CC_SFE_0_CLK_SRC 143
+#define CAM_CC_SFE_0_FAST_AHB_CLK 144
+#define CAM_CC_SFE_1_CLK 145
+#define CAM_CC_SFE_1_CLK_SRC 146
+#define CAM_CC_SFE_1_FAST_AHB_CLK 147
+#define CAM_CC_SLEEP_CLK 148
+#define CAM_CC_SLEEP_CLK_SRC 149
+#define CAM_CC_SLOW_AHB_CLK_SRC 150
+#define CAM_CC_XO_CLK_SRC 151
+
+/* CAM_CC power domains */
+#define CAM_CC_BPS_GDSC 0
+#define CAM_CC_IFE_0_GDSC 1
+#define CAM_CC_IFE_1_GDSC 2
+#define CAM_CC_IFE_2_GDSC 3
+#define CAM_CC_IPE_0_GDSC 4
+#define CAM_CC_SBI_GDSC 5
+#define CAM_CC_SFE_0_GDSC 6
+#define CAM_CC_SFE_1_GDSC 7
+#define CAM_CC_TITAN_TOP_GDSC 8
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_DRV_BCR 1
+#define CAM_CC_ICP_BCR 2
+#define CAM_CC_IFE_0_BCR 3
+#define CAM_CC_IFE_1_BCR 4
+#define CAM_CC_IFE_2_BCR 5
+#define CAM_CC_IPE_0_BCR 6
+#define CAM_CC_QDSS_DEBUG_BCR 7
+#define CAM_CC_SBI_BCR 8
+#define CAM_CC_SFE_0_BCR 9
+#define CAM_CC_SFE_1_BCR 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-dispcc.h b/include/dt-bindings/clock/qcom,sm8550-dispcc.h
new file mode 100644
index 000000000000..ed3094c694e0
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-dispcc.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SM8550_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SM8550_DISP_CC_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_ACCU_CLK 0
+#define DISP_CC_MDSS_AHB1_CLK 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK 8
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 15
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 24
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 25
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 26
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 27
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 28
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 29
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 30
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 31
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 33
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 34
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 35
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 36
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 37
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 50
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 52
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 54
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 55
+#define DISP_CC_MDSS_ESC0_CLK 56
+#define DISP_CC_MDSS_ESC0_CLK_SRC 57
+#define DISP_CC_MDSS_ESC1_CLK 58
+#define DISP_CC_MDSS_ESC1_CLK_SRC 59
+#define DISP_CC_MDSS_MDP1_CLK 60
+#define DISP_CC_MDSS_MDP_CLK 61
+#define DISP_CC_MDSS_MDP_CLK_SRC 62
+#define DISP_CC_MDSS_MDP_LUT1_CLK 63
+#define DISP_CC_MDSS_MDP_LUT_CLK 64
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK 66
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 67
+#define DISP_CC_MDSS_PCLK1_CLK 68
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 69
+#define DISP_CC_MDSS_RSCC_AHB_CLK 70
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 71
+#define DISP_CC_MDSS_VSYNC1_CLK 72
+#define DISP_CC_MDSS_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 74
+#define DISP_CC_PLL0 75
+#define DISP_CC_PLL1 76
+#define DISP_CC_SLEEP_CLK 77
+#define DISP_CC_SLEEP_CLK_SRC 78
+#define DISP_CC_XO_CLK 79
+#define DISP_CC_XO_CLK_SRC 80
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-gcc.h b/include/dt-bindings/clock/qcom,sm8550-gcc.h
new file mode 100644
index 000000000000..3bf6f2b75c99
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-gcc.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8550_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_AHB2PHY_0_CLK 4
+#define GCC_BOOT_ROM_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_HF_AXI_CLK 7
+#define GCC_CAMERA_SF_AXI_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11
+#define GCC_CNOC_PCIE_SF_AXI_CLK 12
+#define GCC_DDRSS_GPU_AXI_CLK 13
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 14
+#define GCC_DISP_AHB_CLK 15
+#define GCC_DISP_HF_AXI_CLK 16
+#define GCC_DISP_XO_CLK 17
+#define GCC_GP1_CLK 18
+#define GCC_GP1_CLK_SRC 19
+#define GCC_GP2_CLK 20
+#define GCC_GP2_CLK_SRC 21
+#define GCC_GP3_CLK 22
+#define GCC_GP3_CLK_SRC 23
+#define GCC_GPLL0 24
+#define GCC_GPLL0_OUT_EVEN 25
+#define GCC_GPLL4 26
+#define GCC_GPLL7 27
+#define GCC_GPLL9 28
+#define GCC_GPU_CFG_AHB_CLK 29
+#define GCC_GPU_GPLL0_CLK_SRC 30
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 31
+#define GCC_GPU_MEMNOC_GFX_CLK 32
+#define GCC_GPU_SNOC_DVM_GFX_CLK 33
+#define GCC_PCIE_0_AUX_CLK 34
+#define GCC_PCIE_0_AUX_CLK_SRC 35
+#define GCC_PCIE_0_CFG_AHB_CLK 36
+#define GCC_PCIE_0_MSTR_AXI_CLK 37
+#define GCC_PCIE_0_PHY_RCHNG_CLK 38
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 39
+#define GCC_PCIE_0_PIPE_CLK 40
+#define GCC_PCIE_0_PIPE_CLK_SRC 41
+#define GCC_PCIE_0_SLV_AXI_CLK 42
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 43
+#define GCC_PCIE_1_AUX_CLK 44
+#define GCC_PCIE_1_AUX_CLK_SRC 45
+#define GCC_PCIE_1_CFG_AHB_CLK 46
+#define GCC_PCIE_1_MSTR_AXI_CLK 47
+#define GCC_PCIE_1_PHY_AUX_CLK 48
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 49
+#define GCC_PCIE_1_PHY_RCHNG_CLK 50
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 51
+#define GCC_PCIE_1_PIPE_CLK 52
+#define GCC_PCIE_1_PIPE_CLK_SRC 53
+#define GCC_PCIE_1_SLV_AXI_CLK 54
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 55
+#define GCC_PDM2_CLK 56
+#define GCC_PDM2_CLK_SRC 57
+#define GCC_PDM_AHB_CLK 58
+#define GCC_PDM_XO4_CLK 59
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 60
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 61
+#define GCC_QMIP_DISP_AHB_CLK 62
+#define GCC_QMIP_GPU_AHB_CLK 63
+#define GCC_QMIP_PCIE_AHB_CLK 64
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 65
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 67
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 68
+#define GCC_QUPV3_I2C_CORE_CLK 69
+#define GCC_QUPV3_I2C_S0_CLK 70
+#define GCC_QUPV3_I2C_S0_CLK_SRC 71
+#define GCC_QUPV3_I2C_S1_CLK 72
+#define GCC_QUPV3_I2C_S1_CLK_SRC 73
+#define GCC_QUPV3_I2C_S2_CLK 74
+#define GCC_QUPV3_I2C_S2_CLK_SRC 75
+#define GCC_QUPV3_I2C_S3_CLK 76
+#define GCC_QUPV3_I2C_S3_CLK_SRC 77
+#define GCC_QUPV3_I2C_S4_CLK 78
+#define GCC_QUPV3_I2C_S4_CLK_SRC 79
+#define GCC_QUPV3_I2C_S5_CLK 80
+#define GCC_QUPV3_I2C_S5_CLK_SRC 81
+#define GCC_QUPV3_I2C_S6_CLK 82
+#define GCC_QUPV3_I2C_S6_CLK_SRC 83
+#define GCC_QUPV3_I2C_S7_CLK 84
+#define GCC_QUPV3_I2C_S7_CLK_SRC 85
+#define GCC_QUPV3_I2C_S8_CLK 86
+#define GCC_QUPV3_I2C_S8_CLK_SRC 87
+#define GCC_QUPV3_I2C_S9_CLK 88
+#define GCC_QUPV3_I2C_S9_CLK_SRC 89
+#define GCC_QUPV3_I2C_S_AHB_CLK 90
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 91
+#define GCC_QUPV3_WRAP1_CORE_CLK 92
+#define GCC_QUPV3_WRAP1_S0_CLK 93
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S1_CLK 95
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S2_CLK 97
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S3_CLK 99
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S4_CLK 101
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_S5_CLK 103
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 104
+#define GCC_QUPV3_WRAP1_S6_CLK 105
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_S7_CLK 107
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 108
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 109
+#define GCC_QUPV3_WRAP2_CORE_CLK 110
+#define GCC_QUPV3_WRAP2_S0_CLK 111
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_S1_CLK 113
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 114
+#define GCC_QUPV3_WRAP2_S2_CLK 115
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 116
+#define GCC_QUPV3_WRAP2_S3_CLK 117
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 118
+#define GCC_QUPV3_WRAP2_S4_CLK 119
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 120
+#define GCC_QUPV3_WRAP2_S5_CLK 121
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 122
+#define GCC_QUPV3_WRAP2_S6_CLK 123
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 124
+#define GCC_QUPV3_WRAP2_S7_CLK 125
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 126
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 127
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 128
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 129
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 130
+#define GCC_SDCC2_AHB_CLK 131
+#define GCC_SDCC2_APPS_CLK 132
+#define GCC_SDCC2_APPS_CLK_SRC 133
+#define GCC_SDCC4_AHB_CLK 134
+#define GCC_SDCC4_APPS_CLK 135
+#define GCC_SDCC4_APPS_CLK_SRC 136
+#define GCC_UFS_PHY_AHB_CLK 137
+#define GCC_UFS_PHY_AXI_CLK 138
+#define GCC_UFS_PHY_AXI_CLK_SRC 139
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 140
+#define GCC_UFS_PHY_ICE_CORE_CLK 141
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 142
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 143
+#define GCC_UFS_PHY_PHY_AUX_CLK 144
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 145
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 146
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 147
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 148
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 149
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 150
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 151
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 152
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 153
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 154
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 155
+#define GCC_USB30_PRIM_MASTER_CLK 156
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 157
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 158
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 159
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 160
+#define GCC_USB30_PRIM_SLEEP_CLK 161
+#define GCC_USB3_PRIM_PHY_AUX_CLK 162
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 163
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 164
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 165
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 166
+#define GCC_VIDEO_AHB_CLK 167
+#define GCC_VIDEO_AXI0_CLK 168
+#define GCC_VIDEO_AXI1_CLK 169
+#define GCC_VIDEO_XO_CLK 170
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_1_BCR 17
+#define GCC_QUPV3_WRAPPER_2_BCR 18
+#define GCC_QUPV3_WRAPPER_I2C_BCR 19
+#define GCC_QUSB2PHY_PRIM_BCR 20
+#define GCC_QUSB2PHY_SEC_BCR 21
+#define GCC_SDCC2_BCR 22
+#define GCC_SDCC4_BCR 23
+#define GCC_UFS_PHY_BCR 24
+#define GCC_USB30_PRIM_BCR 25
+#define GCC_USB3_DP_PHY_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_SEC_BCR 27
+#define GCC_USB3_PHY_PRIM_BCR 28
+#define GCC_USB3_PHY_SEC_BCR 29
+#define GCC_USB3PHY_PHY_PRIM_BCR 30
+#define GCC_USB3PHY_PHY_SEC_BCR 31
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 32
+#define GCC_VIDEO_AXI0_CLK_ARES 33
+#define GCC_VIDEO_AXI1_CLK_ARES 34
+#define GCC_VIDEO_BCR 35
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-gpucc.h b/include/dt-bindings/clock/qcom,sm8550-gpucc.h
new file mode 100644
index 000000000000..a6760547a3ab
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-gpucc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8550_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_FF_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CXO_AON_CLK 4
+#define GPU_CC_CXO_CLK 5
+#define GPU_CC_DEMET_CLK 6
+#define GPU_CC_DEMET_DIV_CLK_SRC 7
+#define GPU_CC_FF_CLK_SRC 8
+#define GPU_CC_FREQ_MEASURE_CLK 9
+#define GPU_CC_GMU_CLK_SRC 10
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 11
+#define GPU_CC_HUB_AON_CLK 12
+#define GPU_CC_HUB_CLK_SRC 13
+#define GPU_CC_HUB_CX_INT_CLK 14
+#define GPU_CC_MEMNOC_GFX_CLK 15
+#define GPU_CC_MND1X_0_GFX3D_CLK 16
+#define GPU_CC_MND1X_1_GFX3D_CLK 17
+#define GPU_CC_PLL0 18
+#define GPU_CC_PLL1 19
+#define GPU_CC_SLEEP_CLK 20
+#define GPU_CC_XO_CLK_SRC 21
+#define GPU_CC_XO_DIV_CLK_SRC 22
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-tcsr.h b/include/dt-bindings/clock/qcom,sm8550-tcsr.h
new file mode 100644
index 000000000000..091cb76f953a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-tcsr.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8550_H
+
+/* TCSR CC clocks */
+#define TCSR_PCIE_0_CLKREF_EN 0
+#define TCSR_PCIE_1_CLKREF_EN 1
+#define TCSR_UFS_CLKREF_EN 2
+#define TCSR_UFS_PAD_CLKREF_EN 3
+#define TCSR_USB2_CLKREF_EN 4
+#define TCSR_USB3_CLKREF_EN 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-dispcc.h b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
new file mode 100644
index 000000000000..b0a668b395a5
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_ACCU_CLK 0
+#define DISP_CC_MDSS_AHB1_CLK 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK 8
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 15
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 24
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 25
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 26
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 27
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 28
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 29
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 30
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 31
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 33
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 34
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 35
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 36
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 37
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 50
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 52
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 54
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 55
+#define DISP_CC_MDSS_ESC0_CLK 56
+#define DISP_CC_MDSS_ESC0_CLK_SRC 57
+#define DISP_CC_MDSS_ESC1_CLK 58
+#define DISP_CC_MDSS_ESC1_CLK_SRC 59
+#define DISP_CC_MDSS_MDP1_CLK 60
+#define DISP_CC_MDSS_MDP_CLK 61
+#define DISP_CC_MDSS_MDP_CLK_SRC 62
+#define DISP_CC_MDSS_MDP_LUT1_CLK 63
+#define DISP_CC_MDSS_MDP_LUT_CLK 64
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK 66
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 67
+#define DISP_CC_MDSS_PCLK1_CLK 68
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 69
+#define DISP_CC_MDSS_RSCC_AHB_CLK 70
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 71
+#define DISP_CC_MDSS_VSYNC1_CLK 72
+#define DISP_CC_MDSS_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 74
+#define DISP_CC_PLL0 75
+#define DISP_CC_PLL1 76
+#define DISP_CC_SLEEP_CLK 77
+#define DISP_CC_SLEEP_CLK_SRC 78
+#define DISP_CC_XO_CLK 79
+#define DISP_CC_XO_CLK_SRC 80
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-gcc.h b/include/dt-bindings/clock/qcom,sm8650-gcc.h
new file mode 100644
index 000000000000..0c543ba46079
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-gcc.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_BOOT_ROM_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 9
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 10
+#define GCC_CNOC_PCIE_SF_AXI_CLK 11
+#define GCC_DDRSS_GPU_AXI_CLK 12
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 13
+#define GCC_DISP_AHB_CLK 14
+#define GCC_DISP_HF_AXI_CLK 15
+#define GCC_DISP_XO_CLK 16
+#define GCC_GP1_CLK 17
+#define GCC_GP1_CLK_SRC 18
+#define GCC_GP2_CLK 19
+#define GCC_GP2_CLK_SRC 20
+#define GCC_GP3_CLK 21
+#define GCC_GP3_CLK_SRC 22
+#define GCC_GPLL0 23
+#define GCC_GPLL0_OUT_EVEN 24
+#define GCC_GPLL1 25
+#define GCC_GPLL3 26
+#define GCC_GPLL4 27
+#define GCC_GPLL6 28
+#define GCC_GPLL7 29
+#define GCC_GPLL9 30
+#define GCC_GPU_CFG_AHB_CLK 31
+#define GCC_GPU_GPLL0_CLK_SRC 32
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 33
+#define GCC_GPU_MEMNOC_GFX_CLK 34
+#define GCC_GPU_SNOC_DVM_GFX_CLK 35
+#define GCC_PCIE_0_AUX_CLK 36
+#define GCC_PCIE_0_AUX_CLK_SRC 37
+#define GCC_PCIE_0_CFG_AHB_CLK 38
+#define GCC_PCIE_0_MSTR_AXI_CLK 39
+#define GCC_PCIE_0_PHY_RCHNG_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 41
+#define GCC_PCIE_0_PIPE_CLK 42
+#define GCC_PCIE_0_PIPE_CLK_SRC 43
+#define GCC_PCIE_0_SLV_AXI_CLK 44
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 45
+#define GCC_PCIE_1_AUX_CLK 46
+#define GCC_PCIE_1_AUX_CLK_SRC 47
+#define GCC_PCIE_1_CFG_AHB_CLK 48
+#define GCC_PCIE_1_MSTR_AXI_CLK 49
+#define GCC_PCIE_1_PHY_AUX_CLK 50
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 51
+#define GCC_PCIE_1_PHY_RCHNG_CLK 52
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_PIPE_CLK_SRC 55
+#define GCC_PCIE_1_SLV_AXI_CLK 56
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM2_CLK_SRC 59
+#define GCC_PDM_AHB_CLK 60
+#define GCC_PDM_XO4_CLK 61
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 63
+#define GCC_QMIP_DISP_AHB_CLK 64
+#define GCC_QMIP_GPU_AHB_CLK 65
+#define GCC_QMIP_PCIE_AHB_CLK 66
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 67
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 68
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 69
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 70
+#define GCC_QUPV3_I2C_CORE_CLK 71
+#define GCC_QUPV3_I2C_S0_CLK 72
+#define GCC_QUPV3_I2C_S0_CLK_SRC 73
+#define GCC_QUPV3_I2C_S1_CLK 74
+#define GCC_QUPV3_I2C_S1_CLK_SRC 75
+#define GCC_QUPV3_I2C_S2_CLK 76
+#define GCC_QUPV3_I2C_S2_CLK_SRC 77
+#define GCC_QUPV3_I2C_S3_CLK 78
+#define GCC_QUPV3_I2C_S3_CLK_SRC 79
+#define GCC_QUPV3_I2C_S4_CLK 80
+#define GCC_QUPV3_I2C_S4_CLK_SRC 81
+#define GCC_QUPV3_I2C_S5_CLK 82
+#define GCC_QUPV3_I2C_S5_CLK_SRC 83
+#define GCC_QUPV3_I2C_S6_CLK 84
+#define GCC_QUPV3_I2C_S6_CLK_SRC 85
+#define GCC_QUPV3_I2C_S7_CLK 86
+#define GCC_QUPV3_I2C_S7_CLK_SRC 87
+#define GCC_QUPV3_I2C_S8_CLK 88
+#define GCC_QUPV3_I2C_S8_CLK_SRC 89
+#define GCC_QUPV3_I2C_S9_CLK 90
+#define GCC_QUPV3_I2C_S9_CLK_SRC 91
+#define GCC_QUPV3_I2C_S_AHB_CLK 92
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 93
+#define GCC_QUPV3_WRAP1_CORE_CLK 94
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 95
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S0_CLK 97
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S1_CLK 99
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S2_CLK 101
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_S3_CLK 103
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 104
+#define GCC_QUPV3_WRAP1_S4_CLK 105
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_S5_CLK 107
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 108
+#define GCC_QUPV3_WRAP1_S6_CLK 109
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 110
+#define GCC_QUPV3_WRAP1_S7_CLK 111
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 113
+#define GCC_QUPV3_WRAP2_CORE_CLK 114
+#define GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC 115
+#define GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK 116
+#define GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK 117
+#define GCC_QUPV3_WRAP2_S0_CLK 118
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 119
+#define GCC_QUPV3_WRAP2_S1_CLK 120
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 121
+#define GCC_QUPV3_WRAP2_S2_CLK 122
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 123
+#define GCC_QUPV3_WRAP2_S3_CLK 124
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 125
+#define GCC_QUPV3_WRAP2_S4_CLK 126
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 127
+#define GCC_QUPV3_WRAP2_S5_CLK 128
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 129
+#define GCC_QUPV3_WRAP2_S6_CLK 130
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 131
+#define GCC_QUPV3_WRAP2_S7_CLK 132
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 133
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 134
+#define GCC_QUPV3_WRAP3_CORE_CLK 135
+#define GCC_QUPV3_WRAP3_QSPI_REF_CLK 136
+#define GCC_QUPV3_WRAP3_QSPI_REF_CLK_SRC 137
+#define GCC_QUPV3_WRAP3_S0_CLK 138
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 139
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 141
+#define GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK 142
+#define GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK 143
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 144
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 145
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 146
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 147
+#define GCC_SDCC2_AHB_CLK 148
+#define GCC_SDCC2_APPS_CLK 149
+#define GCC_SDCC2_APPS_CLK_SRC 150
+#define GCC_SDCC4_AHB_CLK 151
+#define GCC_SDCC4_APPS_CLK 152
+#define GCC_SDCC4_APPS_CLK_SRC 153
+#define GCC_UFS_PHY_AHB_CLK 154
+#define GCC_UFS_PHY_AXI_CLK 155
+#define GCC_UFS_PHY_AXI_CLK_SRC 156
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 157
+#define GCC_UFS_PHY_ICE_CORE_CLK 158
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 160
+#define GCC_UFS_PHY_PHY_AUX_CLK 161
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 162
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 165
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 166
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 167
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 169
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 170
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 171
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 172
+#define GCC_USB30_PRIM_MASTER_CLK 173
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 174
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 175
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 176
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 177
+#define GCC_USB30_PRIM_SLEEP_CLK 178
+#define GCC_USB3_PRIM_PHY_AUX_CLK 179
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 180
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 181
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 182
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 183
+#define GCC_VIDEO_AHB_CLK 184
+#define GCC_VIDEO_AXI0_CLK 185
+#define GCC_VIDEO_AXI1_CLK 186
+#define GCC_VIDEO_XO_CLK 187
+#define GCC_GPLL0_AO 188
+#define GCC_GPLL0_OUT_EVEN_AO 189
+#define GCC_GPLL1_AO 190
+#define GCC_GPLL3_AO 191
+#define GCC_GPLL4_AO 192
+#define GCC_GPLL6_AO 193
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_1_BCR 17
+#define GCC_QUPV3_WRAPPER_2_BCR 18
+#define GCC_QUPV3_WRAPPER_3_BCR 19
+#define GCC_QUPV3_WRAPPER_I2C_BCR 20
+#define GCC_QUSB2PHY_PRIM_BCR 21
+#define GCC_QUSB2PHY_SEC_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_SDCC4_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_BCR 27
+#define GCC_USB3_DP_PHY_SEC_BCR 28
+#define GCC_USB3_PHY_PRIM_BCR 29
+#define GCC_USB3_PHY_SEC_BCR 30
+#define GCC_USB3PHY_PHY_PRIM_BCR 31
+#define GCC_USB3PHY_PHY_SEC_BCR 32
+#define GCC_VIDEO_AXI0_CLK_ARES 33
+#define GCC_VIDEO_AXI1_CLK_ARES 34
+#define GCC_VIDEO_BCR 35
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-gpucc.h b/include/dt-bindings/clock/qcom,sm8650-gpucc.h
new file mode 100644
index 000000000000..d0dc457cfe75
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-gpucc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_ACCU_SHIFT_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_DEMET_CLK 7
+#define GPU_CC_DPM_CLK 8
+#define GPU_CC_FF_CLK_SRC 9
+#define GPU_CC_FREQ_MEASURE_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_ACCU_SHIFT_CLK 12
+#define GPU_CC_GX_FF_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_RDVM_CLK 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 18
+#define GPU_CC_HUB_AON_CLK 19
+#define GPU_CC_HUB_CLK_SRC 20
+#define GPU_CC_HUB_CX_INT_CLK 21
+#define GPU_CC_HUB_DIV_CLK_SRC 22
+#define GPU_CC_MEMNOC_GFX_CLK 23
+#define GPU_CC_PLL0 24
+#define GPU_CC_PLL1 25
+#define GPU_CC_SLEEP_CLK 26
+
+/* GDSCs */
+#define GPU_GX_GDSC 0
+#define GPU_CX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-tcsr.h b/include/dt-bindings/clock/qcom,sm8650-tcsr.h
new file mode 100644
index 000000000000..b2c72d492f1f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-tcsr.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H
+
+/* TCSR CC clocks */
+#define TCSR_PCIE_0_CLKREF_EN 0
+#define TCSR_PCIE_1_CLKREF_EN 1
+#define TCSR_UFS_CLKREF_EN 2
+#define TCSR_UFS_PAD_CLKREF_EN 3
+#define TCSR_USB2_CLKREF_EN 4
+#define TCSR_USB3_CLKREF_EN 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sc7280.h b/include/dt-bindings/clock/qcom,videocc-sc7280.h
new file mode 100644
index 000000000000..9e00c3a5f75e
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sc7280.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7280_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_PLL0 0
+#define VIDEO_CC_IRIS_AHB_CLK 1
+#define VIDEO_CC_IRIS_CLK_SRC 2
+#define VIDEO_CC_MVS0_AXI_CLK 3
+#define VIDEO_CC_MVS0_CORE_CLK 4
+#define VIDEO_CC_MVSC_CORE_CLK 5
+#define VIDEO_CC_MVSC_CTL_AXI_CLK 6
+#define VIDEO_CC_SLEEP_CLK 7
+#define VIDEO_CC_SLEEP_CLK_SRC 8
+#define VIDEO_CC_VENUS_AHB_CLK 9
+#define VIDEO_CC_XO_CLK 10
+#define VIDEO_CC_XO_CLK_SRC 11
+
+/* VIDEO_CC power domains */
+#define MVS0_GDSC 0
+#define MVSC_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h
new file mode 100644
index 000000000000..c557b78dc572
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8150_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_IRIS_AHB_CLK 0
+#define VIDEO_CC_IRIS_CLK_SRC 1
+#define VIDEO_CC_MVS0_CORE_CLK 2
+#define VIDEO_CC_MVS1_CORE_CLK 3
+#define VIDEO_CC_MVSC_CORE_CLK 4
+#define VIDEO_CC_PLL0 5
+
+/* VIDEO_CC Resets */
+#define VIDEO_CC_MVSC_CORE_CLK_BCR 0
+#define VIDEO_CC_INTERFACE_BCR 1
+#define VIDEO_CC_MVS0_BCR 2
+#define VIDEO_CC_MVS1_BCR 3
+#define VIDEO_CC_MVSC_BCR 4
+
+/* VIDEO_CC GDSCRs */
+#define VENUS_GDSC 0
+#define VCODEC0_GDSC 1
+#define VCODEC1_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sm8250.h b/include/dt-bindings/clock/qcom,videocc-sm8250.h
new file mode 100644
index 000000000000..8d321ac3b1fa
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sm8250.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8250_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_MVS0_CLK_SRC 0
+#define VIDEO_CC_MVS0C_CLK 1
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 2
+#define VIDEO_CC_MVS1_CLK_SRC 3
+#define VIDEO_CC_MVS1_DIV2_CLK 4
+#define VIDEO_CC_MVS1C_CLK 5
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 6
+#define VIDEO_CC_PLL0 7
+#define VIDEO_CC_PLL1 8
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 9
+#define VIDEO_CC_MVS0_CLK 10
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_CVP_INTERFACE_BCR 0
+#define VIDEO_CC_CVP_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_CVP_MVS0C_BCR 3
+#define VIDEO_CC_CVP_MVS1_BCR 4
+#define VIDEO_CC_MVS1C_CLK_ARES 5
+#define VIDEO_CC_CVP_MVS1C_BCR 6
+
+#define MVS0C_GDSC 0
+#define MVS1C_GDSC 1
+#define MVS0_GDSC 2
+#define MVS1_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-camcc.h b/include/dt-bindings/clock/qcom,x1e80100-camcc.h
new file mode 100644
index 000000000000..d72fdfb06a7c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-camcc.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_X1E80100_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_X1E80100_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_CAMNOC_AXI_NRT_CLK 4
+#define CAM_CC_CAMNOC_AXI_RT_CLK 5
+#define CAM_CC_CAMNOC_AXI_RT_CLK_SRC 6
+#define CAM_CC_CAMNOC_DCD_XO_CLK 7
+#define CAM_CC_CAMNOC_XO_CLK 8
+#define CAM_CC_CCI_0_CLK 9
+#define CAM_CC_CCI_0_CLK_SRC 10
+#define CAM_CC_CCI_1_CLK 11
+#define CAM_CC_CCI_1_CLK_SRC 12
+#define CAM_CC_CORE_AHB_CLK 13
+#define CAM_CC_CPAS_AHB_CLK 14
+#define CAM_CC_CPAS_BPS_CLK 15
+#define CAM_CC_CPAS_FAST_AHB_CLK 16
+#define CAM_CC_CPAS_IFE_0_CLK 17
+#define CAM_CC_CPAS_IFE_1_CLK 18
+#define CAM_CC_CPAS_IFE_LITE_CLK 19
+#define CAM_CC_CPAS_IPE_NPS_CLK 20
+#define CAM_CC_CPAS_SFE_0_CLK 21
+#define CAM_CC_CPHY_RX_CLK_SRC 22
+#define CAM_CC_CSI0PHYTIMER_CLK 23
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSI1PHYTIMER_CLK 25
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 26
+#define CAM_CC_CSI2PHYTIMER_CLK 27
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 28
+#define CAM_CC_CSI3PHYTIMER_CLK 29
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 30
+#define CAM_CC_CSI4PHYTIMER_CLK 31
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 32
+#define CAM_CC_CSI5PHYTIMER_CLK 33
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 34
+#define CAM_CC_CSID_CLK 35
+#define CAM_CC_CSID_CLK_SRC 36
+#define CAM_CC_CSID_CSIPHY_RX_CLK 37
+#define CAM_CC_CSIPHY0_CLK 38
+#define CAM_CC_CSIPHY1_CLK 39
+#define CAM_CC_CSIPHY2_CLK 40
+#define CAM_CC_CSIPHY3_CLK 41
+#define CAM_CC_CSIPHY4_CLK 42
+#define CAM_CC_CSIPHY5_CLK 43
+#define CAM_CC_FAST_AHB_CLK_SRC 44
+#define CAM_CC_GDSC_CLK 45
+#define CAM_CC_ICP_AHB_CLK 46
+#define CAM_CC_ICP_CLK 47
+#define CAM_CC_ICP_CLK_SRC 48
+#define CAM_CC_IFE_0_CLK 49
+#define CAM_CC_IFE_0_CLK_SRC 50
+#define CAM_CC_IFE_0_DSP_CLK 51
+#define CAM_CC_IFE_0_FAST_AHB_CLK 52
+#define CAM_CC_IFE_1_CLK 53
+#define CAM_CC_IFE_1_CLK_SRC 54
+#define CAM_CC_IFE_1_DSP_CLK 55
+#define CAM_CC_IFE_1_FAST_AHB_CLK 56
+#define CAM_CC_IFE_LITE_AHB_CLK 57
+#define CAM_CC_IFE_LITE_CLK 58
+#define CAM_CC_IFE_LITE_CLK_SRC 59
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 60
+#define CAM_CC_IFE_LITE_CSID_CLK 61
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 62
+#define CAM_CC_IPE_NPS_AHB_CLK 63
+#define CAM_CC_IPE_NPS_CLK 64
+#define CAM_CC_IPE_NPS_CLK_SRC 65
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 66
+#define CAM_CC_IPE_PPS_CLK 67
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 68
+#define CAM_CC_JPEG_CLK 69
+#define CAM_CC_JPEG_CLK_SRC 70
+#define CAM_CC_MCLK0_CLK 71
+#define CAM_CC_MCLK0_CLK_SRC 72
+#define CAM_CC_MCLK1_CLK 73
+#define CAM_CC_MCLK1_CLK_SRC 74
+#define CAM_CC_MCLK2_CLK 75
+#define CAM_CC_MCLK2_CLK_SRC 76
+#define CAM_CC_MCLK3_CLK 77
+#define CAM_CC_MCLK3_CLK_SRC 78
+#define CAM_CC_MCLK4_CLK 79
+#define CAM_CC_MCLK4_CLK_SRC 80
+#define CAM_CC_MCLK5_CLK 81
+#define CAM_CC_MCLK5_CLK_SRC 82
+#define CAM_CC_MCLK6_CLK 83
+#define CAM_CC_MCLK6_CLK_SRC 84
+#define CAM_CC_MCLK7_CLK 85
+#define CAM_CC_MCLK7_CLK_SRC 86
+#define CAM_CC_PLL0 87
+#define CAM_CC_PLL0_OUT_EVEN 88
+#define CAM_CC_PLL0_OUT_ODD 89
+#define CAM_CC_PLL1 90
+#define CAM_CC_PLL1_OUT_EVEN 91
+#define CAM_CC_PLL2 92
+#define CAM_CC_PLL3 93
+#define CAM_CC_PLL3_OUT_EVEN 94
+#define CAM_CC_PLL4 95
+#define CAM_CC_PLL4_OUT_EVEN 96
+#define CAM_CC_PLL6 97
+#define CAM_CC_PLL6_OUT_EVEN 98
+#define CAM_CC_PLL8 99
+#define CAM_CC_PLL8_OUT_EVEN 100
+#define CAM_CC_SFE_0_CLK 101
+#define CAM_CC_SFE_0_CLK_SRC 102
+#define CAM_CC_SFE_0_FAST_AHB_CLK 103
+#define CAM_CC_SLEEP_CLK 104
+#define CAM_CC_SLEEP_CLK_SRC 105
+#define CAM_CC_SLOW_AHB_CLK_SRC 106
+#define CAM_CC_XO_CLK_SRC 107
+
+/* CAM_CC power domains */
+#define CAM_CC_BPS_GDSC 0
+#define CAM_CC_IFE_0_GDSC 1
+#define CAM_CC_IFE_1_GDSC 2
+#define CAM_CC_IPE_0_GDSC 3
+#define CAM_CC_SFE_0_GDSC 4
+#define CAM_CC_TITAN_TOP_GDSC 5
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_ICP_BCR 1
+#define CAM_CC_IFE_0_BCR 2
+#define CAM_CC_IFE_1_BCR 3
+#define CAM_CC_IPE_0_BCR 4
+#define CAM_CC_SFE_0_BCR 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-dispcc.h b/include/dt-bindings/clock/qcom,x1e80100-dispcc.h
new file mode 100644
index 000000000000..d4a83e4fd0d1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-dispcc.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_X1E80100_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_X1E80100_DISP_CC_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_ACCU_CLK 0
+#define DISP_CC_MDSS_AHB1_CLK 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK 8
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 15
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 17
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 21
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 25
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 26
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 28
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 29
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 30
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 31
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 33
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 34
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 35
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 36
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 37
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 38
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 39
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 40
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 43
+#define DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK 44
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 45
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 47
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 49
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 50
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 51
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 52
+#define DISP_CC_MDSS_ESC0_CLK 53
+#define DISP_CC_MDSS_ESC0_CLK_SRC 54
+#define DISP_CC_MDSS_ESC1_CLK 55
+#define DISP_CC_MDSS_ESC1_CLK_SRC 56
+#define DISP_CC_MDSS_MDP1_CLK 57
+#define DISP_CC_MDSS_MDP_CLK 58
+#define DISP_CC_MDSS_MDP_CLK_SRC 59
+#define DISP_CC_MDSS_MDP_LUT1_CLK 60
+#define DISP_CC_MDSS_MDP_LUT_CLK 61
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 62
+#define DISP_CC_MDSS_PCLK0_CLK 63
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 64
+#define DISP_CC_MDSS_PCLK1_CLK 65
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 66
+#define DISP_CC_MDSS_RSCC_AHB_CLK 67
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 68
+#define DISP_CC_MDSS_VSYNC1_CLK 69
+#define DISP_CC_MDSS_VSYNC_CLK 70
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 71
+#define DISP_CC_PLL0 72
+#define DISP_CC_PLL1 73
+#define DISP_CC_SLEEP_CLK 74
+#define DISP_CC_SLEEP_CLK_SRC 75
+#define DISP_CC_XO_CLK 76
+#define DISP_CC_XO_CLK_SRC 77
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
new file mode 100644
index 000000000000..24ba9e2a5cf6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_USB_NORTH_AXI_CLK 0
+#define GCC_AGGRE_NOC_USB_SOUTH_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_MP_AXI_CLK 4
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 5
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 6
+#define GCC_AGGRE_USB3_TERT_AXI_CLK 7
+#define GCC_AGGRE_USB4_0_AXI_CLK 8
+#define GCC_AGGRE_USB4_1_AXI_CLK 9
+#define GCC_AGGRE_USB4_2_AXI_CLK 10
+#define GCC_AGGRE_USB_NOC_AXI_CLK 11
+#define GCC_AV1E_AHB_CLK 12
+#define GCC_AV1E_AXI_CLK 13
+#define GCC_AV1E_XO_CLK 14
+#define GCC_BOOT_ROM_AHB_CLK 15
+#define GCC_CAMERA_AHB_CLK 16
+#define GCC_CAMERA_HF_AXI_CLK 17
+#define GCC_CAMERA_SF_AXI_CLK 18
+#define GCC_CAMERA_XO_CLK 19
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 20
+#define GCC_CFG_NOC_PCIE_ANOC_NORTH_AHB_CLK 21
+#define GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK 22
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 23
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 24
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 25
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 26
+#define GCC_CFG_NOC_USB3_TERT_AXI_CLK 27
+#define GCC_CFG_NOC_USB_ANOC_AHB_CLK 28
+#define GCC_CFG_NOC_USB_ANOC_NORTH_AHB_CLK 29
+#define GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK 30
+#define GCC_CNOC_PCIE1_TUNNEL_CLK 31
+#define GCC_CNOC_PCIE2_TUNNEL_CLK 32
+#define GCC_CNOC_PCIE_NORTH_SF_AXI_CLK 33
+#define GCC_CNOC_PCIE_SOUTH_SF_AXI_CLK 34
+#define GCC_CNOC_PCIE_TUNNEL_CLK 35
+#define GCC_DDRSS_GPU_AXI_CLK 36
+#define GCC_DISP_AHB_CLK 37
+#define GCC_DISP_HF_AXI_CLK 38
+#define GCC_DISP_XO_CLK 39
+#define GCC_GP1_CLK 40
+#define GCC_GP1_CLK_SRC 41
+#define GCC_GP2_CLK 42
+#define GCC_GP2_CLK_SRC 43
+#define GCC_GP3_CLK 44
+#define GCC_GP3_CLK_SRC 45
+#define GCC_GPLL0 46
+#define GCC_GPLL0_OUT_EVEN 47
+#define GCC_GPLL4 48
+#define GCC_GPLL7 49
+#define GCC_GPLL8 50
+#define GCC_GPLL9 51
+#define GCC_GPU_CFG_AHB_CLK 52
+#define GCC_GPU_GPLL0_CPH_CLK_SRC 53
+#define GCC_GPU_GPLL0_DIV_CPH_CLK_SRC 54
+#define GCC_GPU_MEMNOC_GFX_CLK 55
+#define GCC_GPU_SNOC_DVM_GFX_CLK 56
+#define GCC_PCIE0_PHY_RCHNG_CLK 57
+#define GCC_PCIE1_PHY_RCHNG_CLK 58
+#define GCC_PCIE2_PHY_RCHNG_CLK 59
+#define GCC_PCIE_0_AUX_CLK 60
+#define GCC_PCIE_0_AUX_CLK_SRC 61
+#define GCC_PCIE_0_CFG_AHB_CLK 62
+#define GCC_PCIE_0_MSTR_AXI_CLK 63
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 64
+#define GCC_PCIE_0_PIPE_CLK 65
+#define GCC_PCIE_0_SLV_AXI_CLK 66
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 67
+#define GCC_PCIE_1_AUX_CLK 68
+#define GCC_PCIE_1_AUX_CLK_SRC 69
+#define GCC_PCIE_1_CFG_AHB_CLK 70
+#define GCC_PCIE_1_MSTR_AXI_CLK 71
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 72
+#define GCC_PCIE_1_PIPE_CLK 73
+#define GCC_PCIE_1_SLV_AXI_CLK 74
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 75
+#define GCC_PCIE_2_AUX_CLK 76
+#define GCC_PCIE_2_AUX_CLK_SRC 77
+#define GCC_PCIE_2_CFG_AHB_CLK 78
+#define GCC_PCIE_2_MSTR_AXI_CLK 79
+#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 80
+#define GCC_PCIE_2_PIPE_CLK 81
+#define GCC_PCIE_2_SLV_AXI_CLK 82
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 83
+#define GCC_PCIE_3_AUX_CLK 84
+#define GCC_PCIE_3_AUX_CLK_SRC 85
+#define GCC_PCIE_3_CFG_AHB_CLK 86
+#define GCC_PCIE_3_MSTR_AXI_CLK 87
+#define GCC_PCIE_3_PHY_AUX_CLK 88
+#define GCC_PCIE_3_PHY_RCHNG_CLK 89
+#define GCC_PCIE_3_PHY_RCHNG_CLK_SRC 90
+#define GCC_PCIE_3_PIPE_CLK 91
+#define GCC_PCIE_3_PIPE_DIV_CLK_SRC 92
+#define GCC_PCIE_3_PIPEDIV2_CLK 93
+#define GCC_PCIE_3_SLV_AXI_CLK 94
+#define GCC_PCIE_3_SLV_Q2A_AXI_CLK 95
+#define GCC_PCIE_4_AUX_CLK 96
+#define GCC_PCIE_4_AUX_CLK_SRC 97
+#define GCC_PCIE_4_CFG_AHB_CLK 98
+#define GCC_PCIE_4_MSTR_AXI_CLK 99
+#define GCC_PCIE_4_PHY_RCHNG_CLK 100
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 101
+#define GCC_PCIE_4_PIPE_CLK 102
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 103
+#define GCC_PCIE_4_PIPEDIV2_CLK 104
+#define GCC_PCIE_4_SLV_AXI_CLK 105
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 106
+#define GCC_PCIE_5_AUX_CLK 107
+#define GCC_PCIE_5_AUX_CLK_SRC 108
+#define GCC_PCIE_5_CFG_AHB_CLK 109
+#define GCC_PCIE_5_MSTR_AXI_CLK 110
+#define GCC_PCIE_5_PHY_RCHNG_CLK 111
+#define GCC_PCIE_5_PHY_RCHNG_CLK_SRC 112
+#define GCC_PCIE_5_PIPE_CLK 113
+#define GCC_PCIE_5_PIPE_DIV_CLK_SRC 114
+#define GCC_PCIE_5_PIPEDIV2_CLK 115
+#define GCC_PCIE_5_SLV_AXI_CLK 116
+#define GCC_PCIE_5_SLV_Q2A_AXI_CLK 117
+#define GCC_PCIE_6A_AUX_CLK 118
+#define GCC_PCIE_6A_AUX_CLK_SRC 119
+#define GCC_PCIE_6A_CFG_AHB_CLK 120
+#define GCC_PCIE_6A_MSTR_AXI_CLK 121
+#define GCC_PCIE_6A_PHY_AUX_CLK 122
+#define GCC_PCIE_6A_PHY_RCHNG_CLK 123
+#define GCC_PCIE_6A_PHY_RCHNG_CLK_SRC 124
+#define GCC_PCIE_6A_PIPE_CLK 125
+#define GCC_PCIE_6A_PIPE_DIV_CLK_SRC 126
+#define GCC_PCIE_6A_PIPEDIV2_CLK 127
+#define GCC_PCIE_6A_SLV_AXI_CLK 128
+#define GCC_PCIE_6A_SLV_Q2A_AXI_CLK 129
+#define GCC_PCIE_6B_AUX_CLK 130
+#define GCC_PCIE_6B_AUX_CLK_SRC 131
+#define GCC_PCIE_6B_CFG_AHB_CLK 132
+#define GCC_PCIE_6B_MSTR_AXI_CLK 133
+#define GCC_PCIE_6B_PHY_AUX_CLK 134
+#define GCC_PCIE_6B_PHY_RCHNG_CLK 135
+#define GCC_PCIE_6B_PHY_RCHNG_CLK_SRC 136
+#define GCC_PCIE_6B_PIPE_CLK 137
+#define GCC_PCIE_6B_PIPE_DIV_CLK_SRC 138
+#define GCC_PCIE_6B_PIPEDIV2_CLK 139
+#define GCC_PCIE_6B_SLV_AXI_CLK 140
+#define GCC_PCIE_6B_SLV_Q2A_AXI_CLK 141
+#define GCC_PCIE_RSCC_AHB_CLK 142
+#define GCC_PCIE_RSCC_XO_CLK 143
+#define GCC_PCIE_RSCC_XO_CLK_SRC 144
+#define GCC_PDM2_CLK 145
+#define GCC_PDM2_CLK_SRC 146
+#define GCC_PDM_AHB_CLK 147
+#define GCC_PDM_XO4_CLK 148
+#define GCC_QMIP_AV1E_AHB_CLK 149
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 150
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 151
+#define GCC_QMIP_DISP_AHB_CLK 152
+#define GCC_QMIP_GPU_AHB_CLK 153
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 154
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 155
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 156
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 157
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 158
+#define GCC_QUPV3_WRAP0_CORE_CLK 159
+#define GCC_QUPV3_WRAP0_QSPI_S2_CLK 160
+#define GCC_QUPV3_WRAP0_QSPI_S3_CLK 161
+#define GCC_QUPV3_WRAP0_S0_CLK 162
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 163
+#define GCC_QUPV3_WRAP0_S1_CLK 164
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 165
+#define GCC_QUPV3_WRAP0_S2_CLK 166
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 167
+#define GCC_QUPV3_WRAP0_S2_DIV_CLK_SRC 168
+#define GCC_QUPV3_WRAP0_S3_CLK 169
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 170
+#define GCC_QUPV3_WRAP0_S3_DIV_CLK_SRC 171
+#define GCC_QUPV3_WRAP0_S4_CLK 172
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 173
+#define GCC_QUPV3_WRAP0_S5_CLK 174
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 175
+#define GCC_QUPV3_WRAP0_S6_CLK 176
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 177
+#define GCC_QUPV3_WRAP0_S7_CLK 178
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 179
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 180
+#define GCC_QUPV3_WRAP1_CORE_CLK 181
+#define GCC_QUPV3_WRAP1_QSPI_S2_CLK 182
+#define GCC_QUPV3_WRAP1_QSPI_S3_CLK 183
+#define GCC_QUPV3_WRAP1_S0_CLK 184
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 185
+#define GCC_QUPV3_WRAP1_S1_CLK 186
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 187
+#define GCC_QUPV3_WRAP1_S2_CLK 188
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 189
+#define GCC_QUPV3_WRAP1_S2_DIV_CLK_SRC 190
+#define GCC_QUPV3_WRAP1_S3_CLK 191
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 192
+#define GCC_QUPV3_WRAP1_S3_DIV_CLK_SRC 193
+#define GCC_QUPV3_WRAP1_S4_CLK 194
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 195
+#define GCC_QUPV3_WRAP1_S5_CLK 196
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 197
+#define GCC_QUPV3_WRAP1_S6_CLK 198
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 199
+#define GCC_QUPV3_WRAP1_S7_CLK 200
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 201
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 202
+#define GCC_QUPV3_WRAP2_CORE_CLK 203
+#define GCC_QUPV3_WRAP2_QSPI_S2_CLK 204
+#define GCC_QUPV3_WRAP2_QSPI_S3_CLK 205
+#define GCC_QUPV3_WRAP2_S0_CLK 206
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 207
+#define GCC_QUPV3_WRAP2_S1_CLK 208
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 209
+#define GCC_QUPV3_WRAP2_S2_CLK 210
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 211
+#define GCC_QUPV3_WRAP2_S2_DIV_CLK_SRC 212
+#define GCC_QUPV3_WRAP2_S3_CLK 213
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 214
+#define GCC_QUPV3_WRAP2_S3_DIV_CLK_SRC 215
+#define GCC_QUPV3_WRAP2_S4_CLK 216
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 217
+#define GCC_QUPV3_WRAP2_S5_CLK 218
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 219
+#define GCC_QUPV3_WRAP2_S6_CLK 220
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 221
+#define GCC_QUPV3_WRAP2_S7_CLK 222
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 223
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 224
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 225
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 226
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 227
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 228
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 229
+#define GCC_SDCC2_AHB_CLK 230
+#define GCC_SDCC2_APPS_CLK 231
+#define GCC_SDCC2_APPS_CLK_SRC 232
+#define GCC_SDCC4_AHB_CLK 233
+#define GCC_SDCC4_APPS_CLK 234
+#define GCC_SDCC4_APPS_CLK_SRC 235
+#define GCC_SYS_NOC_USB_AXI_CLK 236
+#define GCC_UFS_PHY_AHB_CLK 237
+#define GCC_UFS_PHY_AXI_CLK 238
+#define GCC_UFS_PHY_AXI_CLK_SRC 239
+#define GCC_UFS_PHY_ICE_CORE_CLK 240
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 241
+#define GCC_UFS_PHY_PHY_AUX_CLK 242
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 243
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 244
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 245
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 246
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 247
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 248
+#define GCC_USB20_MASTER_CLK 249
+#define GCC_USB20_MASTER_CLK_SRC 250
+#define GCC_USB20_MOCK_UTMI_CLK 251
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 252
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 253
+#define GCC_USB20_SLEEP_CLK 254
+#define GCC_USB30_MP_MASTER_CLK 255
+#define GCC_USB30_MP_MASTER_CLK_SRC 256
+#define GCC_USB30_MP_MOCK_UTMI_CLK 257
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 258
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 259
+#define GCC_USB30_MP_SLEEP_CLK 260
+#define GCC_USB30_PRIM_MASTER_CLK 261
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 262
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 263
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 264
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 265
+#define GCC_USB30_PRIM_SLEEP_CLK 266
+#define GCC_USB30_SEC_MASTER_CLK 267
+#define GCC_USB30_SEC_MASTER_CLK_SRC 268
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 269
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 270
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 271
+#define GCC_USB30_SEC_SLEEP_CLK 272
+#define GCC_USB30_TERT_MASTER_CLK 273
+#define GCC_USB30_TERT_MASTER_CLK_SRC 274
+#define GCC_USB30_TERT_MOCK_UTMI_CLK 275
+#define GCC_USB30_TERT_MOCK_UTMI_CLK_SRC 276
+#define GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC 277
+#define GCC_USB30_TERT_SLEEP_CLK 278
+#define GCC_USB3_MP_PHY_AUX_CLK 279
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 280
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 281
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 282
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 283
+#define GCC_USB3_PRIM_PHY_AUX_CLK 284
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 285
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 286
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 287
+#define GCC_USB3_SEC_PHY_AUX_CLK 288
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 289
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 290
+#define GCC_USB3_SEC_PHY_PIPE_CLK 291
+#define GCC_USB3_TERT_PHY_AUX_CLK 292
+#define GCC_USB3_TERT_PHY_AUX_CLK_SRC 293
+#define GCC_USB3_TERT_PHY_COM_AUX_CLK 294
+#define GCC_USB3_TERT_PHY_PIPE_CLK 295
+#define GCC_USB4_0_CFG_AHB_CLK 296
+#define GCC_USB4_0_DP0_CLK 297
+#define GCC_USB4_0_DP1_CLK 298
+#define GCC_USB4_0_MASTER_CLK 299
+#define GCC_USB4_0_MASTER_CLK_SRC 300
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK 301
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK 302
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC 303
+#define GCC_USB4_0_PHY_RX0_CLK 304
+#define GCC_USB4_0_PHY_RX1_CLK 305
+#define GCC_USB4_0_PHY_USB_PIPE_CLK 306
+#define GCC_USB4_0_SB_IF_CLK 307
+#define GCC_USB4_0_SB_IF_CLK_SRC 308
+#define GCC_USB4_0_SYS_CLK 309
+#define GCC_USB4_0_TMU_CLK 310
+#define GCC_USB4_0_TMU_CLK_SRC 311
+#define GCC_USB4_1_CFG_AHB_CLK 312
+#define GCC_USB4_1_DP0_CLK 313
+#define GCC_USB4_1_DP1_CLK 314
+#define GCC_USB4_1_MASTER_CLK 315
+#define GCC_USB4_1_MASTER_CLK_SRC 316
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 317
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 318
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 319
+#define GCC_USB4_1_PHY_RX0_CLK 320
+#define GCC_USB4_1_PHY_RX1_CLK 321
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 322
+#define GCC_USB4_1_SB_IF_CLK 323
+#define GCC_USB4_1_SB_IF_CLK_SRC 324
+#define GCC_USB4_1_SYS_CLK 325
+#define GCC_USB4_1_TMU_CLK 326
+#define GCC_USB4_1_TMU_CLK_SRC 327
+#define GCC_USB4_2_CFG_AHB_CLK 328
+#define GCC_USB4_2_DP0_CLK 329
+#define GCC_USB4_2_DP1_CLK 330
+#define GCC_USB4_2_MASTER_CLK 331
+#define GCC_USB4_2_MASTER_CLK_SRC 332
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK 333
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK 334
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC 335
+#define GCC_USB4_2_PHY_RX0_CLK 336
+#define GCC_USB4_2_PHY_RX1_CLK 337
+#define GCC_USB4_2_PHY_USB_PIPE_CLK 338
+#define GCC_USB4_2_SB_IF_CLK 339
+#define GCC_USB4_2_SB_IF_CLK_SRC 340
+#define GCC_USB4_2_SYS_CLK 341
+#define GCC_USB4_2_TMU_CLK 342
+#define GCC_USB4_2_TMU_CLK_SRC 343
+#define GCC_VIDEO_AHB_CLK 344
+#define GCC_VIDEO_AXI0_CLK 345
+#define GCC_VIDEO_AXI1_CLK 346
+#define GCC_VIDEO_XO_CLK 347
+#define GCC_PCIE_3_PIPE_CLK_SRC 348
+#define GCC_PCIE_4_PIPE_CLK_SRC 349
+#define GCC_PCIE_5_PIPE_CLK_SRC 350
+#define GCC_PCIE_6A_PIPE_CLK_SRC 351
+#define GCC_PCIE_6B_PIPE_CLK_SRC 352
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 353
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 354
+#define GCC_USB3_TERT_PHY_PIPE_CLK_SRC 355
+
+/* GCC power domains */
+#define GCC_PCIE_0_TUNNEL_GDSC 0
+#define GCC_PCIE_1_TUNNEL_GDSC 1
+#define GCC_PCIE_2_TUNNEL_GDSC 2
+#define GCC_PCIE_3_GDSC 3
+#define GCC_PCIE_3_PHY_GDSC 4
+#define GCC_PCIE_4_GDSC 5
+#define GCC_PCIE_4_PHY_GDSC 6
+#define GCC_PCIE_5_GDSC 7
+#define GCC_PCIE_5_PHY_GDSC 8
+#define GCC_PCIE_6_PHY_GDSC 9
+#define GCC_PCIE_6A_GDSC 10
+#define GCC_PCIE_6B_GDSC 11
+#define GCC_UFS_MEM_PHY_GDSC 12
+#define GCC_UFS_PHY_GDSC 13
+#define GCC_USB20_PRIM_GDSC 14
+#define GCC_USB30_MP_GDSC 15
+#define GCC_USB30_PRIM_GDSC 16
+#define GCC_USB30_SEC_GDSC 17
+#define GCC_USB30_TERT_GDSC 18
+#define GCC_USB3_MP_SS0_PHY_GDSC 19
+#define GCC_USB3_MP_SS1_PHY_GDSC 20
+#define GCC_USB4_0_GDSC 21
+#define GCC_USB4_1_GDSC 22
+#define GCC_USB4_2_GDSC 23
+#define GCC_USB_0_PHY_GDSC 24
+#define GCC_USB_1_PHY_GDSC 25
+#define GCC_USB_2_PHY_GDSC 26
+
+/* GCC resets */
+#define GCC_AV1E_BCR 0
+#define GCC_CAMERA_BCR 1
+#define GCC_DISPLAY_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_0_TUNNEL_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_1_TUNNEL_BCR 13
+#define GCC_PCIE_2_LINK_DOWN_BCR 14
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 15
+#define GCC_PCIE_2_PHY_BCR 16
+#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 17
+#define GCC_PCIE_2_TUNNEL_BCR 18
+#define GCC_PCIE_3_BCR 19
+#define GCC_PCIE_3_LINK_DOWN_BCR 20
+#define GCC_PCIE_3_NOCSR_COM_PHY_BCR 21
+#define GCC_PCIE_3_PHY_BCR 22
+#define GCC_PCIE_3_PHY_NOCSR_COM_PHY_BCR 23
+#define GCC_PCIE_4_BCR 24
+#define GCC_PCIE_4_LINK_DOWN_BCR 25
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 26
+#define GCC_PCIE_4_PHY_BCR 27
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 28
+#define GCC_PCIE_5_BCR 29
+#define GCC_PCIE_5_LINK_DOWN_BCR 30
+#define GCC_PCIE_5_NOCSR_COM_PHY_BCR 31
+#define GCC_PCIE_5_PHY_BCR 32
+#define GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR 33
+#define GCC_PCIE_6A_BCR 34
+#define GCC_PCIE_6A_LINK_DOWN_BCR 35
+#define GCC_PCIE_6A_NOCSR_COM_PHY_BCR 36
+#define GCC_PCIE_6A_PHY_BCR 37
+#define GCC_PCIE_6A_PHY_NOCSR_COM_PHY_BCR 38
+#define GCC_PCIE_6B_BCR 39
+#define GCC_PCIE_6B_LINK_DOWN_BCR 40
+#define GCC_PCIE_6B_NOCSR_COM_PHY_BCR 41
+#define GCC_PCIE_6B_PHY_BCR 42
+#define GCC_PCIE_6B_PHY_NOCSR_COM_PHY_BCR 43
+#define GCC_PCIE_PHY_BCR 44
+#define GCC_PCIE_PHY_CFG_AHB_BCR 45
+#define GCC_PCIE_PHY_COM_BCR 46
+#define GCC_PCIE_RSCC_BCR 47
+#define GCC_PDM_BCR 48
+#define GCC_QUPV3_WRAPPER_0_BCR 49
+#define GCC_QUPV3_WRAPPER_1_BCR 50
+#define GCC_QUPV3_WRAPPER_2_BCR 51
+#define GCC_QUSB2PHY_HS0_MP_BCR 52
+#define GCC_QUSB2PHY_HS1_MP_BCR 53
+#define GCC_QUSB2PHY_PRIM_BCR 54
+#define GCC_QUSB2PHY_SEC_BCR 55
+#define GCC_QUSB2PHY_TERT_BCR 56
+#define GCC_QUSB2PHY_USB20_HS_BCR 57
+#define GCC_SDCC2_BCR 58
+#define GCC_SDCC4_BCR 59
+#define GCC_UFS_PHY_BCR 60
+#define GCC_USB20_PRIM_BCR 61
+#define GCC_USB30_MP_BCR 62
+#define GCC_USB30_PRIM_BCR 63
+#define GCC_USB30_SEC_BCR 64
+#define GCC_USB30_TERT_BCR 65
+#define GCC_USB3_MP_SS0_PHY_BCR 66
+#define GCC_USB3_MP_SS1_PHY_BCR 67
+#define GCC_USB3_PHY_PRIM_BCR 68
+#define GCC_USB3_PHY_SEC_BCR 69
+#define GCC_USB3_PHY_TERT_BCR 70
+#define GCC_USB3_UNIPHY_MP0_BCR 71
+#define GCC_USB3_UNIPHY_MP1_BCR 72
+#define GCC_USB3PHY_PHY_PRIM_BCR 73
+#define GCC_USB3PHY_PHY_SEC_BCR 74
+#define GCC_USB3PHY_PHY_TERT_BCR 75
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 76
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 77
+#define GCC_USB4_0_BCR 78
+#define GCC_USB4_0_DP0_PHY_PRIM_BCR 79
+#define GCC_USB4_1_DP0_PHY_SEC_BCR 80
+#define GCC_USB4_2_DP0_PHY_TERT_BCR 81
+#define GCC_USB4_1_BCR 82
+#define GCC_USB4_2_BCR 83
+#define GCC_USB_0_PHY_BCR 84
+#define GCC_USB_1_PHY_BCR 85
+#define GCC_USB_2_PHY_BCR 86
+#define GCC_VIDEO_BCR 87
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gpucc.h b/include/dt-bindings/clock/qcom,x1e80100-gpucc.h
new file mode 100644
index 000000000000..61a3a8f3ac43
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-gpucc.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_X1E80100_GPU_CC_H
+#define _DT_BINDINGS_CLK_QCOM_X1E80100_GPU_CC_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_DEMET_CLK 7
+#define GPU_CC_DEMET_DIV_CLK_SRC 8
+#define GPU_CC_FF_CLK_SRC 9
+#define GPU_CC_FREQ_MEASURE_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_GMU_CLK 12
+#define GPU_CC_GX_VSENSE_CLK 13
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 14
+#define GPU_CC_HUB_AON_CLK 15
+#define GPU_CC_HUB_CLK_SRC 16
+#define GPU_CC_HUB_CX_INT_CLK 17
+#define GPU_CC_MEMNOC_GFX_CLK 18
+#define GPU_CC_MND1X_0_GFX3D_CLK 19
+#define GPU_CC_MND1X_1_GFX3D_CLK 20
+#define GPU_CC_PLL0 21
+#define GPU_CC_PLL1 22
+#define GPU_CC_SLEEP_CLK 23
+#define GPU_CC_XO_CLK_SRC 24
+#define GPU_CC_XO_DIV_CLK_SRC 25
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-tcsr.h b/include/dt-bindings/clock/qcom,x1e80100-tcsr.h
new file mode 100644
index 000000000000..bae2c4654ee2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-tcsr.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_X1E80100_TCSR_CC_H
+#define _DT_BINDINGS_CLK_QCOM_X1E80100_TCSR_CC_H
+
+/* TCSR CC clocks */
+#define TCSR_PCIE_2L_4_CLKREF_EN 0
+#define TCSR_PCIE_2L_5_CLKREF_EN 1
+#define TCSR_PCIE_8L_CLKREF_EN 2
+#define TCSR_USB3_MP0_CLKREF_EN 3
+#define TCSR_USB3_MP1_CLKREF_EN 4
+#define TCSR_USB2_1_CLKREF_EN 5
+#define TCSR_UFS_PHY_CLKREF_EN 6
+#define TCSR_USB4_1_CLKREF_EN 7
+#define TCSR_USB4_2_CLKREF_EN 8
+#define TCSR_USB2_2_CLKREF_EN 9
+#define TCSR_PCIE_4L_CLKREF_EN 10
+#define TCSR_EDP_CLKREF_EN 11
+
+#endif
diff --git a/include/dt-bindings/clock/r8a7779-clock.h b/include/dt-bindings/clock/r8a7779-clock.h
index f0549234b7d8..342a60b11934 100644
--- a/include/dt-bindings/clock/r8a7779-clock.h
+++ b/include/dt-bindings/clock/r8a7779-clock.h
@@ -19,6 +19,7 @@
#define R8A7779_CLK_OUT 7
/* MSTP 0 */
+#define R8A7779_CLK_PWM 5
#define R8A7779_CLK_HSPI 7
#define R8A7779_CLK_TMU2 14
#define R8A7779_CLK_TMU1 15
diff --git a/include/dt-bindings/clock/r8a779a0-cpg-mssr.h b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
new file mode 100644
index 000000000000..f1d737ca7ca1
--- /dev/null
+++ b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779A0 CPG Core Clocks */
+#define R8A779A0_CLK_Z0 0
+#define R8A779A0_CLK_ZX 1
+#define R8A779A0_CLK_Z1 2
+#define R8A779A0_CLK_ZR 3
+#define R8A779A0_CLK_ZS 4
+#define R8A779A0_CLK_ZT 5
+#define R8A779A0_CLK_ZTR 6
+#define R8A779A0_CLK_S1D1 7
+#define R8A779A0_CLK_S1D2 8
+#define R8A779A0_CLK_S1D4 9
+#define R8A779A0_CLK_S1D8 10
+#define R8A779A0_CLK_S1D12 11
+#define R8A779A0_CLK_S3D1 12
+#define R8A779A0_CLK_S3D2 13
+#define R8A779A0_CLK_S3D4 14
+#define R8A779A0_CLK_LB 15
+#define R8A779A0_CLK_CP 16
+#define R8A779A0_CLK_CL 17
+#define R8A779A0_CLK_CL16MCK 18
+#define R8A779A0_CLK_ZB30 19
+#define R8A779A0_CLK_ZB30D2 20
+#define R8A779A0_CLK_ZB30D4 21
+#define R8A779A0_CLK_ZB31 22
+#define R8A779A0_CLK_ZB31D2 23
+#define R8A779A0_CLK_ZB31D4 24
+#define R8A779A0_CLK_SD0H 25
+#define R8A779A0_CLK_SD0 26
+#define R8A779A0_CLK_RPC 27
+#define R8A779A0_CLK_RPCD2 28
+#define R8A779A0_CLK_MSO 29
+#define R8A779A0_CLK_CANFD 30
+#define R8A779A0_CLK_CSI0 31
+#define R8A779A0_CLK_FRAY 32
+#define R8A779A0_CLK_DSI 33
+#define R8A779A0_CLK_VIP 34
+#define R8A779A0_CLK_ADGH 35
+#define R8A779A0_CLK_CNNDSP 36
+#define R8A779A0_CLK_ICU 37
+#define R8A779A0_CLK_ICUD2 38
+#define R8A779A0_CLK_VCBUS 39
+#define R8A779A0_CLK_CBFUSA 40
+#define R8A779A0_CLK_R 41
+#define R8A779A0_CLK_OSC 42
+
+#endif /* __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a779f0-cpg-mssr.h b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
new file mode 100644
index 000000000000..c34be5624954
--- /dev/null
+++ b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779f0 CPG Core Clocks */
+
+#define R8A779F0_CLK_ZX 0
+#define R8A779F0_CLK_ZS 1
+#define R8A779F0_CLK_ZT 2
+#define R8A779F0_CLK_ZTR 3
+#define R8A779F0_CLK_S0D2 4
+#define R8A779F0_CLK_S0D3 5
+#define R8A779F0_CLK_S0D4 6
+#define R8A779F0_CLK_S0D2_MM 7
+#define R8A779F0_CLK_S0D3_MM 8
+#define R8A779F0_CLK_S0D4_MM 9
+#define R8A779F0_CLK_S0D2_RT 10
+#define R8A779F0_CLK_S0D3_RT 11
+#define R8A779F0_CLK_S0D4_RT 12
+#define R8A779F0_CLK_S0D6_RT 13
+#define R8A779F0_CLK_S0D3_PER 14
+#define R8A779F0_CLK_S0D6_PER 15
+#define R8A779F0_CLK_S0D12_PER 16
+#define R8A779F0_CLK_S0D24_PER 17
+#define R8A779F0_CLK_S0D2_HSC 18
+#define R8A779F0_CLK_S0D3_HSC 19
+#define R8A779F0_CLK_S0D4_HSC 20
+#define R8A779F0_CLK_S0D6_HSC 21
+#define R8A779F0_CLK_S0D12_HSC 22
+#define R8A779F0_CLK_S0D2_CC 23
+#define R8A779F0_CLK_CL 24
+#define R8A779F0_CLK_CL16M 25
+#define R8A779F0_CLK_CL16M_MM 26
+#define R8A779F0_CLK_CL16M_RT 27
+#define R8A779F0_CLK_CL16M_PER 28
+#define R8A779F0_CLK_CL16M_HSC 29
+#define R8A779F0_CLK_Z0 30
+#define R8A779F0_CLK_Z1 31
+#define R8A779F0_CLK_ZB3 32
+#define R8A779F0_CLK_ZB3D2 33
+#define R8A779F0_CLK_ZB3D4 34
+#define R8A779F0_CLK_SD0H 35
+#define R8A779F0_CLK_SD0 36
+#define R8A779F0_CLK_RPC 37
+#define R8A779F0_CLK_RPCD2 38
+#define R8A779F0_CLK_MSO 39
+#define R8A779F0_CLK_SASYNCRT 40
+#define R8A779F0_CLK_SASYNCPERD1 41
+#define R8A779F0_CLK_SASYNCPERD2 42
+#define R8A779F0_CLK_SASYNCPERD4 43
+#define R8A779F0_CLK_DBGSOC_HSC 44
+#define R8A779F0_CLK_RSW2 45
+#define R8A779F0_CLK_OSC 46
+#define R8A779F0_CLK_ZR 47
+#define R8A779F0_CLK_CPEX 48
+#define R8A779F0_CLK_CBFUSA 49
+#define R8A779F0_CLK_R 50
+
+#endif /* __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
new file mode 100644
index 000000000000..7850cdc62e28
--- /dev/null
+++ b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779g0 CPG Core Clocks */
+
+#define R8A779G0_CLK_ZX 0
+#define R8A779G0_CLK_ZS 1
+#define R8A779G0_CLK_ZT 2
+#define R8A779G0_CLK_ZTR 3
+#define R8A779G0_CLK_S0D2 4
+#define R8A779G0_CLK_S0D3 5
+#define R8A779G0_CLK_S0D4 6
+#define R8A779G0_CLK_S0D1_VIO 7
+#define R8A779G0_CLK_S0D2_VIO 8
+#define R8A779G0_CLK_S0D4_VIO 9
+#define R8A779G0_CLK_S0D8_VIO 10
+#define R8A779G0_CLK_S0D1_VC 11
+#define R8A779G0_CLK_S0D2_VC 12
+#define R8A779G0_CLK_S0D4_VC 13
+#define R8A779G0_CLK_S0D2_MM 14
+#define R8A779G0_CLK_S0D4_MM 15
+#define R8A779G0_CLK_S0D2_U3DG 16
+#define R8A779G0_CLK_S0D4_U3DG 17
+#define R8A779G0_CLK_S0D2_RT 18
+#define R8A779G0_CLK_S0D3_RT 19
+#define R8A779G0_CLK_S0D4_RT 20
+#define R8A779G0_CLK_S0D6_RT 21
+#define R8A779G0_CLK_S0D24_RT 22
+#define R8A779G0_CLK_S0D2_PER 23
+#define R8A779G0_CLK_S0D3_PER 24
+#define R8A779G0_CLK_S0D4_PER 25
+#define R8A779G0_CLK_S0D6_PER 26
+#define R8A779G0_CLK_S0D12_PER 27
+#define R8A779G0_CLK_S0D24_PER 28
+#define R8A779G0_CLK_S0D1_HSC 29
+#define R8A779G0_CLK_S0D2_HSC 30
+#define R8A779G0_CLK_S0D4_HSC 31
+#define R8A779G0_CLK_S0D2_CC 32
+#define R8A779G0_CLK_SVD1_IR 33
+#define R8A779G0_CLK_SVD2_IR 34
+#define R8A779G0_CLK_SVD1_VIP 35
+#define R8A779G0_CLK_SVD2_VIP 36
+#define R8A779G0_CLK_CL 37
+#define R8A779G0_CLK_CL16M 38
+#define R8A779G0_CLK_CL16M_MM 39
+#define R8A779G0_CLK_CL16M_RT 40
+#define R8A779G0_CLK_CL16M_PER 41
+#define R8A779G0_CLK_CL16M_HSC 42
+#define R8A779G0_CLK_Z0 43
+#define R8A779G0_CLK_ZB3 44
+#define R8A779G0_CLK_ZB3D2 45
+#define R8A779G0_CLK_ZB3D4 46
+#define R8A779G0_CLK_ZG 47
+#define R8A779G0_CLK_SD0H 48
+#define R8A779G0_CLK_SD0 49
+#define R8A779G0_CLK_RPC 50
+#define R8A779G0_CLK_RPCD2 51
+#define R8A779G0_CLK_MSO 52
+#define R8A779G0_CLK_CANFD 53
+#define R8A779G0_CLK_CSI 54
+#define R8A779G0_CLK_FRAY 55
+#define R8A779G0_CLK_IPC 56
+#define R8A779G0_CLK_SASYNCRT 57
+#define R8A779G0_CLK_SASYNCPERD1 58
+#define R8A779G0_CLK_SASYNCPERD2 59
+#define R8A779G0_CLK_SASYNCPERD4 60
+#define R8A779G0_CLK_VIOBUS 61
+#define R8A779G0_CLK_VIOBUSD2 62
+#define R8A779G0_CLK_VCBUS 63
+#define R8A779G0_CLK_VCBUSD2 64
+#define R8A779G0_CLK_DSIEXT 65
+#define R8A779G0_CLK_DSIREF 66
+#define R8A779G0_CLK_ADGH 67
+#define R8A779G0_CLK_OSC 68
+#define R8A779G0_CLK_ZR0 69
+#define R8A779G0_CLK_ZR1 70
+#define R8A779G0_CLK_ZR2 71
+#define R8A779G0_CLK_IMPA 72
+#define R8A779G0_CLK_IMPAD4 73
+#define R8A779G0_CLK_CPEX 74
+#define R8A779G0_CLK_CBFUSA 75
+#define R8A779G0_CLK_R 76
+#define R8A779G0_CLK_CP 77
+
+#endif /* __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h
index 90c0f3dc1ba1..d9d7b8b4f426 100644
--- a/include/dt-bindings/clock/r9a06g032-sysctrl.h
+++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h
@@ -74,6 +74,7 @@
#define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */
#define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */
#define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_WATCHDOG 82 /* AKA CLK_REF_SYNC_D8 */
#define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */
#define R9A06G032_HCLK_CAN0 85
#define R9A06G032_HCLK_CAN1 86
diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h
new file mode 100644
index 000000000000..77cde8effdc7
--- /dev/null
+++ b/include/dt-bindings/clock/r9a07g043-cpg.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A07G043 CPG Core Clocks */
+#define R9A07G043_CLK_I 0
+#define R9A07G043_CLK_I2 1
+#define R9A07G043_CLK_S0 2
+#define R9A07G043_CLK_SPI0 3
+#define R9A07G043_CLK_SPI1 4
+#define R9A07G043_CLK_SD0 5
+#define R9A07G043_CLK_SD1 6
+#define R9A07G043_CLK_M0 7
+#define R9A07G043_CLK_M2 8
+#define R9A07G043_CLK_M3 9
+#define R9A07G043_CLK_HP 10
+#define R9A07G043_CLK_TSU 11
+#define R9A07G043_CLK_ZT 12
+#define R9A07G043_CLK_P0 13
+#define R9A07G043_CLK_P1 14
+#define R9A07G043_CLK_P2 15
+#define R9A07G043_CLK_AT 16
+#define R9A07G043_OSCCLK 17
+#define R9A07G043_CLK_P0_DIV2 18
+
+/* R9A07G043 Module Clocks */
+#define R9A07G043_CA55_SCLK 0 /* RZ/G2UL Only */
+#define R9A07G043_CA55_PCLK 1 /* RZ/G2UL Only */
+#define R9A07G043_CA55_ATCLK 2 /* RZ/G2UL Only */
+#define R9A07G043_CA55_GICCLK 3 /* RZ/G2UL Only */
+#define R9A07G043_CA55_PERICLK 4 /* RZ/G2UL Only */
+#define R9A07G043_CA55_ACLK 5 /* RZ/G2UL Only */
+#define R9A07G043_CA55_TSCLK 6 /* RZ/G2UL Only */
+#define R9A07G043_GIC600_GICCLK 7 /* RZ/G2UL Only */
+#define R9A07G043_IA55_CLK 8 /* RZ/G2UL Only */
+#define R9A07G043_IA55_PCLK 9 /* RZ/G2UL Only */
+#define R9A07G043_MHU_PCLK 10 /* RZ/G2UL Only */
+#define R9A07G043_SYC_CNT_CLK 11
+#define R9A07G043_DMAC_ACLK 12
+#define R9A07G043_DMAC_PCLK 13
+#define R9A07G043_OSTM0_PCLK 14
+#define R9A07G043_OSTM1_PCLK 15
+#define R9A07G043_OSTM2_PCLK 16
+#define R9A07G043_MTU_X_MCK_MTU3 17
+#define R9A07G043_POE3_CLKM_POE 18
+#define R9A07G043_WDT0_PCLK 19
+#define R9A07G043_WDT0_CLK 20
+#define R9A07G043_WDT2_PCLK 21 /* RZ/G2UL Only */
+#define R9A07G043_WDT2_CLK 22 /* RZ/G2UL Only */
+#define R9A07G043_SPI_CLK2 23
+#define R9A07G043_SPI_CLK 24
+#define R9A07G043_SDHI0_IMCLK 25
+#define R9A07G043_SDHI0_IMCLK2 26
+#define R9A07G043_SDHI0_CLK_HS 27
+#define R9A07G043_SDHI0_ACLK 28
+#define R9A07G043_SDHI1_IMCLK 29
+#define R9A07G043_SDHI1_IMCLK2 30
+#define R9A07G043_SDHI1_CLK_HS 31
+#define R9A07G043_SDHI1_ACLK 32
+#define R9A07G043_ISU_ACLK 33 /* RZ/G2UL Only */
+#define R9A07G043_ISU_PCLK 34 /* RZ/G2UL Only */
+#define R9A07G043_CRU_SYSCLK 35 /* RZ/G2UL Only */
+#define R9A07G043_CRU_VCLK 36 /* RZ/G2UL Only */
+#define R9A07G043_CRU_PCLK 37 /* RZ/G2UL Only */
+#define R9A07G043_CRU_ACLK 38 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_CLK_A 39 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_CLK_P 40 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_CLK_D 41 /* RZ/G2UL Only */
+#define R9A07G043_SSI0_PCLK2 42
+#define R9A07G043_SSI0_PCLK_SFR 43
+#define R9A07G043_SSI1_PCLK2 44
+#define R9A07G043_SSI1_PCLK_SFR 45
+#define R9A07G043_SSI2_PCLK2 46
+#define R9A07G043_SSI2_PCLK_SFR 47
+#define R9A07G043_SSI3_PCLK2 48
+#define R9A07G043_SSI3_PCLK_SFR 49
+#define R9A07G043_SRC_CLKP 50 /* RZ/G2UL Only */
+#define R9A07G043_USB_U2H0_HCLK 51
+#define R9A07G043_USB_U2H1_HCLK 52
+#define R9A07G043_USB_U2P_EXR_CPUCLK 53
+#define R9A07G043_USB_PCLK 54
+#define R9A07G043_ETH0_CLK_AXI 55
+#define R9A07G043_ETH0_CLK_CHI 56
+#define R9A07G043_ETH1_CLK_AXI 57
+#define R9A07G043_ETH1_CLK_CHI 58
+#define R9A07G043_I2C0_PCLK 59
+#define R9A07G043_I2C1_PCLK 60
+#define R9A07G043_I2C2_PCLK 61
+#define R9A07G043_I2C3_PCLK 62
+#define R9A07G043_SCIF0_CLK_PCK 63
+#define R9A07G043_SCIF1_CLK_PCK 64
+#define R9A07G043_SCIF2_CLK_PCK 65
+#define R9A07G043_SCIF3_CLK_PCK 66
+#define R9A07G043_SCIF4_CLK_PCK 67
+#define R9A07G043_SCI0_CLKP 68
+#define R9A07G043_SCI1_CLKP 69
+#define R9A07G043_IRDA_CLKP 70
+#define R9A07G043_RSPI0_CLKB 71
+#define R9A07G043_RSPI1_CLKB 72
+#define R9A07G043_RSPI2_CLKB 73
+#define R9A07G043_CANFD_PCLK 74
+#define R9A07G043_GPIO_HCLK 75
+#define R9A07G043_ADC_ADCLK 76
+#define R9A07G043_ADC_PCLK 77
+#define R9A07G043_TSU_PCLK 78
+#define R9A07G043_NCEPLDM_DM_CLK 79 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_ACLK 80 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_TCK 81 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_ACLK 82 /* RZ/Five Only */
+#define R9A07G043_NCEPLIC_ACLK 83 /* RZ/Five Only */
+#define R9A07G043_AX45MP_CORE0_CLK 84 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ACLK 85 /* RZ/Five Only */
+#define R9A07G043_IAX45_CLK 86 /* RZ/Five Only */
+#define R9A07G043_IAX45_PCLK 87 /* RZ/Five Only */
+
+/* R9A07G043 Resets */
+#define R9A07G043_CA55_RST_1_0 0 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_1_1 1 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_3_0 2 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_3_1 3 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_4 4 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_5 5 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_6 6 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_7 7 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_8 8 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_9 9 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_10 10 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_11 11 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_12 12 /* RZ/G2UL Only */
+#define R9A07G043_GIC600_GICRESET_N 13 /* RZ/G2UL Only */
+#define R9A07G043_GIC600_DBG_GICRESET_N 14 /* RZ/G2UL Only */
+#define R9A07G043_IA55_RESETN 15 /* RZ/G2UL Only */
+#define R9A07G043_MHU_RESETN 16 /* RZ/G2UL Only */
+#define R9A07G043_DMAC_ARESETN 17
+#define R9A07G043_DMAC_RST_ASYNC 18
+#define R9A07G043_SYC_RESETN 19
+#define R9A07G043_OSTM0_PRESETZ 20
+#define R9A07G043_OSTM1_PRESETZ 21
+#define R9A07G043_OSTM2_PRESETZ 22
+#define R9A07G043_MTU_X_PRESET_MTU3 23
+#define R9A07G043_POE3_RST_M_REG 24
+#define R9A07G043_WDT0_PRESETN 25
+#define R9A07G043_WDT2_PRESETN 26 /* RZ/G2UL Only */
+#define R9A07G043_SPI_RST 27
+#define R9A07G043_SDHI0_IXRST 28
+#define R9A07G043_SDHI1_IXRST 29
+#define R9A07G043_ISU_ARESETN 30 /* RZ/G2UL Only */
+#define R9A07G043_ISU_PRESETN 31 /* RZ/G2UL Only */
+#define R9A07G043_CRU_CMN_RSTB 32 /* RZ/G2UL Only */
+#define R9A07G043_CRU_PRESETN 33 /* RZ/G2UL Only */
+#define R9A07G043_CRU_ARESETN 34 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_RESET_N 35 /* RZ/G2UL Only */
+#define R9A07G043_SSI0_RST_M2_REG 36
+#define R9A07G043_SSI1_RST_M2_REG 37
+#define R9A07G043_SSI2_RST_M2_REG 38
+#define R9A07G043_SSI3_RST_M2_REG 39
+#define R9A07G043_SRC_RST 40 /* RZ/G2UL Only */
+#define R9A07G043_USB_U2H0_HRESETN 41
+#define R9A07G043_USB_U2H1_HRESETN 42
+#define R9A07G043_USB_U2P_EXL_SYSRST 43
+#define R9A07G043_USB_PRESETN 44
+#define R9A07G043_ETH0_RST_HW_N 45
+#define R9A07G043_ETH1_RST_HW_N 46
+#define R9A07G043_I2C0_MRST 47
+#define R9A07G043_I2C1_MRST 48
+#define R9A07G043_I2C2_MRST 49
+#define R9A07G043_I2C3_MRST 50
+#define R9A07G043_SCIF0_RST_SYSTEM_N 51
+#define R9A07G043_SCIF1_RST_SYSTEM_N 52
+#define R9A07G043_SCIF2_RST_SYSTEM_N 53
+#define R9A07G043_SCIF3_RST_SYSTEM_N 54
+#define R9A07G043_SCIF4_RST_SYSTEM_N 55
+#define R9A07G043_SCI0_RST 56
+#define R9A07G043_SCI1_RST 57
+#define R9A07G043_IRDA_RST 58
+#define R9A07G043_RSPI0_RST 59
+#define R9A07G043_RSPI1_RST 60
+#define R9A07G043_RSPI2_RST 61
+#define R9A07G043_CANFD_RSTP_N 62
+#define R9A07G043_CANFD_RSTC_N 63
+#define R9A07G043_GPIO_RSTN 64
+#define R9A07G043_GPIO_PORT_RESETN 65
+#define R9A07G043_GPIO_SPARE_RESETN 66
+#define R9A07G043_ADC_PRESETN 67
+#define R9A07G043_ADC_ADRST_N 68
+#define R9A07G043_TSU_PRESETN 69
+#define R9A07G043_NCEPLDM_DTM_PWR_RST_N 70 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_ARESETN 71 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_POR_RSTN 72 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_ARESETN 73 /* RZ/Five Only */
+#define R9A07G043_NCEPLIC_ARESETN 74 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ARESETNM 75 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ARESETNS 76 /* RZ/Five Only */
+#define R9A07G043_AX45MP_L2_RESETN 77 /* RZ/Five Only */
+#define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */
+#define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */
+
+
+#endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h
new file mode 100644
index 000000000000..0bb17ff1a01a
--- /dev/null
+++ b/include/dt-bindings/clock/r9a07g044-cpg.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A07G044 CPG Core Clocks */
+#define R9A07G044_CLK_I 0
+#define R9A07G044_CLK_I2 1
+#define R9A07G044_CLK_G 2
+#define R9A07G044_CLK_S0 3
+#define R9A07G044_CLK_S1 4
+#define R9A07G044_CLK_SPI0 5
+#define R9A07G044_CLK_SPI1 6
+#define R9A07G044_CLK_SD0 7
+#define R9A07G044_CLK_SD1 8
+#define R9A07G044_CLK_M0 9
+#define R9A07G044_CLK_M1 10
+#define R9A07G044_CLK_M2 11
+#define R9A07G044_CLK_M3 12
+#define R9A07G044_CLK_M4 13
+#define R9A07G044_CLK_HP 14
+#define R9A07G044_CLK_TSU 15
+#define R9A07G044_CLK_ZT 16
+#define R9A07G044_CLK_P0 17
+#define R9A07G044_CLK_P1 18
+#define R9A07G044_CLK_P2 19
+#define R9A07G044_CLK_AT 20
+#define R9A07G044_OSCCLK 21
+#define R9A07G044_CLK_P0_DIV2 22
+
+/* R9A07G044 Module Clocks */
+#define R9A07G044_CA55_SCLK 0
+#define R9A07G044_CA55_PCLK 1
+#define R9A07G044_CA55_ATCLK 2
+#define R9A07G044_CA55_GICCLK 3
+#define R9A07G044_CA55_PERICLK 4
+#define R9A07G044_CA55_ACLK 5
+#define R9A07G044_CA55_TSCLK 6
+#define R9A07G044_GIC600_GICCLK 7
+#define R9A07G044_IA55_CLK 8
+#define R9A07G044_IA55_PCLK 9
+#define R9A07G044_MHU_PCLK 10
+#define R9A07G044_SYC_CNT_CLK 11
+#define R9A07G044_DMAC_ACLK 12
+#define R9A07G044_DMAC_PCLK 13
+#define R9A07G044_OSTM0_PCLK 14
+#define R9A07G044_OSTM1_PCLK 15
+#define R9A07G044_OSTM2_PCLK 16
+#define R9A07G044_MTU_X_MCK_MTU3 17
+#define R9A07G044_POE3_CLKM_POE 18
+#define R9A07G044_GPT_PCLK 19
+#define R9A07G044_POEG_A_CLKP 20
+#define R9A07G044_POEG_B_CLKP 21
+#define R9A07G044_POEG_C_CLKP 22
+#define R9A07G044_POEG_D_CLKP 23
+#define R9A07G044_WDT0_PCLK 24
+#define R9A07G044_WDT0_CLK 25
+#define R9A07G044_WDT1_PCLK 26
+#define R9A07G044_WDT1_CLK 27
+#define R9A07G044_WDT2_PCLK 28
+#define R9A07G044_WDT2_CLK 29
+#define R9A07G044_SPI_CLK2 30
+#define R9A07G044_SPI_CLK 31
+#define R9A07G044_SDHI0_IMCLK 32
+#define R9A07G044_SDHI0_IMCLK2 33
+#define R9A07G044_SDHI0_CLK_HS 34
+#define R9A07G044_SDHI0_ACLK 35
+#define R9A07G044_SDHI1_IMCLK 36
+#define R9A07G044_SDHI1_IMCLK2 37
+#define R9A07G044_SDHI1_CLK_HS 38
+#define R9A07G044_SDHI1_ACLK 39
+#define R9A07G044_GPU_CLK 40
+#define R9A07G044_GPU_AXI_CLK 41
+#define R9A07G044_GPU_ACE_CLK 42
+#define R9A07G044_ISU_ACLK 43
+#define R9A07G044_ISU_PCLK 44
+#define R9A07G044_H264_CLK_A 45
+#define R9A07G044_H264_CLK_P 46
+#define R9A07G044_CRU_SYSCLK 47
+#define R9A07G044_CRU_VCLK 48
+#define R9A07G044_CRU_PCLK 49
+#define R9A07G044_CRU_ACLK 50
+#define R9A07G044_MIPI_DSI_PLLCLK 51
+#define R9A07G044_MIPI_DSI_SYSCLK 52
+#define R9A07G044_MIPI_DSI_ACLK 53
+#define R9A07G044_MIPI_DSI_PCLK 54
+#define R9A07G044_MIPI_DSI_VCLK 55
+#define R9A07G044_MIPI_DSI_LPCLK 56
+#define R9A07G044_LCDC_CLK_A 57
+#define R9A07G044_LCDC_CLK_P 58
+#define R9A07G044_LCDC_CLK_D 59
+#define R9A07G044_SSI0_PCLK2 60
+#define R9A07G044_SSI0_PCLK_SFR 61
+#define R9A07G044_SSI1_PCLK2 62
+#define R9A07G044_SSI1_PCLK_SFR 63
+#define R9A07G044_SSI2_PCLK2 64
+#define R9A07G044_SSI2_PCLK_SFR 65
+#define R9A07G044_SSI3_PCLK2 66
+#define R9A07G044_SSI3_PCLK_SFR 67
+#define R9A07G044_SRC_CLKP 68
+#define R9A07G044_USB_U2H0_HCLK 69
+#define R9A07G044_USB_U2H1_HCLK 70
+#define R9A07G044_USB_U2P_EXR_CPUCLK 71
+#define R9A07G044_USB_PCLK 72
+#define R9A07G044_ETH0_CLK_AXI 73
+#define R9A07G044_ETH0_CLK_CHI 74
+#define R9A07G044_ETH1_CLK_AXI 75
+#define R9A07G044_ETH1_CLK_CHI 76
+#define R9A07G044_I2C0_PCLK 77
+#define R9A07G044_I2C1_PCLK 78
+#define R9A07G044_I2C2_PCLK 79
+#define R9A07G044_I2C3_PCLK 80
+#define R9A07G044_SCIF0_CLK_PCK 81
+#define R9A07G044_SCIF1_CLK_PCK 82
+#define R9A07G044_SCIF2_CLK_PCK 83
+#define R9A07G044_SCIF3_CLK_PCK 84
+#define R9A07G044_SCIF4_CLK_PCK 85
+#define R9A07G044_SCI0_CLKP 86
+#define R9A07G044_SCI1_CLKP 87
+#define R9A07G044_IRDA_CLKP 88
+#define R9A07G044_RSPI0_CLKB 89
+#define R9A07G044_RSPI1_CLKB 90
+#define R9A07G044_RSPI2_CLKB 91
+#define R9A07G044_CANFD_PCLK 92
+#define R9A07G044_GPIO_HCLK 93
+#define R9A07G044_ADC_ADCLK 94
+#define R9A07G044_ADC_PCLK 95
+#define R9A07G044_TSU_PCLK 96
+
+/* R9A07G044 Resets */
+#define R9A07G044_CA55_RST_1_0 0
+#define R9A07G044_CA55_RST_1_1 1
+#define R9A07G044_CA55_RST_3_0 2
+#define R9A07G044_CA55_RST_3_1 3
+#define R9A07G044_CA55_RST_4 4
+#define R9A07G044_CA55_RST_5 5
+#define R9A07G044_CA55_RST_6 6
+#define R9A07G044_CA55_RST_7 7
+#define R9A07G044_CA55_RST_8 8
+#define R9A07G044_CA55_RST_9 9
+#define R9A07G044_CA55_RST_10 10
+#define R9A07G044_CA55_RST_11 11
+#define R9A07G044_CA55_RST_12 12
+#define R9A07G044_GIC600_GICRESET_N 13
+#define R9A07G044_GIC600_DBG_GICRESET_N 14
+#define R9A07G044_IA55_RESETN 15
+#define R9A07G044_MHU_RESETN 16
+#define R9A07G044_DMAC_ARESETN 17
+#define R9A07G044_DMAC_RST_ASYNC 18
+#define R9A07G044_SYC_RESETN 19
+#define R9A07G044_OSTM0_PRESETZ 20
+#define R9A07G044_OSTM1_PRESETZ 21
+#define R9A07G044_OSTM2_PRESETZ 22
+#define R9A07G044_MTU_X_PRESET_MTU3 23
+#define R9A07G044_POE3_RST_M_REG 24
+#define R9A07G044_GPT_RST_C 25
+#define R9A07G044_POEG_A_RST 26
+#define R9A07G044_POEG_B_RST 27
+#define R9A07G044_POEG_C_RST 28
+#define R9A07G044_POEG_D_RST 29
+#define R9A07G044_WDT0_PRESETN 30
+#define R9A07G044_WDT1_PRESETN 31
+#define R9A07G044_WDT2_PRESETN 32
+#define R9A07G044_SPI_RST 33
+#define R9A07G044_SDHI0_IXRST 34
+#define R9A07G044_SDHI1_IXRST 35
+#define R9A07G044_GPU_RESETN 36
+#define R9A07G044_GPU_AXI_RESETN 37
+#define R9A07G044_GPU_ACE_RESETN 38
+#define R9A07G044_ISU_ARESETN 39
+#define R9A07G044_ISU_PRESETN 40
+#define R9A07G044_H264_X_RESET_VCP 41
+#define R9A07G044_H264_CP_PRESET_P 42
+#define R9A07G044_CRU_CMN_RSTB 43
+#define R9A07G044_CRU_PRESETN 44
+#define R9A07G044_CRU_ARESETN 45
+#define R9A07G044_MIPI_DSI_CMN_RSTB 46
+#define R9A07G044_MIPI_DSI_ARESET_N 47
+#define R9A07G044_MIPI_DSI_PRESET_N 48
+#define R9A07G044_LCDC_RESET_N 49
+#define R9A07G044_SSI0_RST_M2_REG 50
+#define R9A07G044_SSI1_RST_M2_REG 51
+#define R9A07G044_SSI2_RST_M2_REG 52
+#define R9A07G044_SSI3_RST_M2_REG 53
+#define R9A07G044_SRC_RST 54
+#define R9A07G044_USB_U2H0_HRESETN 55
+#define R9A07G044_USB_U2H1_HRESETN 56
+#define R9A07G044_USB_U2P_EXL_SYSRST 57
+#define R9A07G044_USB_PRESETN 58
+#define R9A07G044_ETH0_RST_HW_N 59
+#define R9A07G044_ETH1_RST_HW_N 60
+#define R9A07G044_I2C0_MRST 61
+#define R9A07G044_I2C1_MRST 62
+#define R9A07G044_I2C2_MRST 63
+#define R9A07G044_I2C3_MRST 64
+#define R9A07G044_SCIF0_RST_SYSTEM_N 65
+#define R9A07G044_SCIF1_RST_SYSTEM_N 66
+#define R9A07G044_SCIF2_RST_SYSTEM_N 67
+#define R9A07G044_SCIF3_RST_SYSTEM_N 68
+#define R9A07G044_SCIF4_RST_SYSTEM_N 69
+#define R9A07G044_SCI0_RST 70
+#define R9A07G044_SCI1_RST 71
+#define R9A07G044_IRDA_RST 72
+#define R9A07G044_RSPI0_RST 73
+#define R9A07G044_RSPI1_RST 74
+#define R9A07G044_RSPI2_RST 75
+#define R9A07G044_CANFD_RSTP_N 76
+#define R9A07G044_CANFD_RSTC_N 77
+#define R9A07G044_GPIO_RSTN 78
+#define R9A07G044_GPIO_PORT_RESETN 79
+#define R9A07G044_GPIO_SPARE_RESETN 80
+#define R9A07G044_ADC_PRESETN 81
+#define R9A07G044_ADC_ADRST_N 82
+#define R9A07G044_TSU_PRESETN 83
+
+#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g054-cpg.h b/include/dt-bindings/clock/r9a07g054-cpg.h
new file mode 100644
index 000000000000..43f4dbda872c
--- /dev/null
+++ b/include/dt-bindings/clock/r9a07g054-cpg.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A07G054 CPG Core Clocks */
+#define R9A07G054_CLK_I 0
+#define R9A07G054_CLK_I2 1
+#define R9A07G054_CLK_G 2
+#define R9A07G054_CLK_S0 3
+#define R9A07G054_CLK_S1 4
+#define R9A07G054_CLK_SPI0 5
+#define R9A07G054_CLK_SPI1 6
+#define R9A07G054_CLK_SD0 7
+#define R9A07G054_CLK_SD1 8
+#define R9A07G054_CLK_M0 9
+#define R9A07G054_CLK_M1 10
+#define R9A07G054_CLK_M2 11
+#define R9A07G054_CLK_M3 12
+#define R9A07G054_CLK_M4 13
+#define R9A07G054_CLK_HP 14
+#define R9A07G054_CLK_TSU 15
+#define R9A07G054_CLK_ZT 16
+#define R9A07G054_CLK_P0 17
+#define R9A07G054_CLK_P1 18
+#define R9A07G054_CLK_P2 19
+#define R9A07G054_CLK_AT 20
+#define R9A07G054_OSCCLK 21
+#define R9A07G054_CLK_P0_DIV2 22
+#define R9A07G054_CLK_DRP_M 23
+#define R9A07G054_CLK_DRP_D 24
+#define R9A07G054_CLK_DRP_A 25
+
+/* R9A07G054 Module Clocks */
+#define R9A07G054_CA55_SCLK 0
+#define R9A07G054_CA55_PCLK 1
+#define R9A07G054_CA55_ATCLK 2
+#define R9A07G054_CA55_GICCLK 3
+#define R9A07G054_CA55_PERICLK 4
+#define R9A07G054_CA55_ACLK 5
+#define R9A07G054_CA55_TSCLK 6
+#define R9A07G054_GIC600_GICCLK 7
+#define R9A07G054_IA55_CLK 8
+#define R9A07G054_IA55_PCLK 9
+#define R9A07G054_MHU_PCLK 10
+#define R9A07G054_SYC_CNT_CLK 11
+#define R9A07G054_DMAC_ACLK 12
+#define R9A07G054_DMAC_PCLK 13
+#define R9A07G054_OSTM0_PCLK 14
+#define R9A07G054_OSTM1_PCLK 15
+#define R9A07G054_OSTM2_PCLK 16
+#define R9A07G054_MTU_X_MCK_MTU3 17
+#define R9A07G054_POE3_CLKM_POE 18
+#define R9A07G054_GPT_PCLK 19
+#define R9A07G054_POEG_A_CLKP 20
+#define R9A07G054_POEG_B_CLKP 21
+#define R9A07G054_POEG_C_CLKP 22
+#define R9A07G054_POEG_D_CLKP 23
+#define R9A07G054_WDT0_PCLK 24
+#define R9A07G054_WDT0_CLK 25
+#define R9A07G054_WDT1_PCLK 26
+#define R9A07G054_WDT1_CLK 27
+#define R9A07G054_WDT2_PCLK 28
+#define R9A07G054_WDT2_CLK 29
+#define R9A07G054_SPI_CLK2 30
+#define R9A07G054_SPI_CLK 31
+#define R9A07G054_SDHI0_IMCLK 32
+#define R9A07G054_SDHI0_IMCLK2 33
+#define R9A07G054_SDHI0_CLK_HS 34
+#define R9A07G054_SDHI0_ACLK 35
+#define R9A07G054_SDHI1_IMCLK 36
+#define R9A07G054_SDHI1_IMCLK2 37
+#define R9A07G054_SDHI1_CLK_HS 38
+#define R9A07G054_SDHI1_ACLK 39
+#define R9A07G054_GPU_CLK 40
+#define R9A07G054_GPU_AXI_CLK 41
+#define R9A07G054_GPU_ACE_CLK 42
+#define R9A07G054_ISU_ACLK 43
+#define R9A07G054_ISU_PCLK 44
+#define R9A07G054_H264_CLK_A 45
+#define R9A07G054_H264_CLK_P 46
+#define R9A07G054_CRU_SYSCLK 47
+#define R9A07G054_CRU_VCLK 48
+#define R9A07G054_CRU_PCLK 49
+#define R9A07G054_CRU_ACLK 50
+#define R9A07G054_MIPI_DSI_PLLCLK 51
+#define R9A07G054_MIPI_DSI_SYSCLK 52
+#define R9A07G054_MIPI_DSI_ACLK 53
+#define R9A07G054_MIPI_DSI_PCLK 54
+#define R9A07G054_MIPI_DSI_VCLK 55
+#define R9A07G054_MIPI_DSI_LPCLK 56
+#define R9A07G054_LCDC_CLK_A 57
+#define R9A07G054_LCDC_CLK_P 58
+#define R9A07G054_LCDC_CLK_D 59
+#define R9A07G054_SSI0_PCLK2 60
+#define R9A07G054_SSI0_PCLK_SFR 61
+#define R9A07G054_SSI1_PCLK2 62
+#define R9A07G054_SSI1_PCLK_SFR 63
+#define R9A07G054_SSI2_PCLK2 64
+#define R9A07G054_SSI2_PCLK_SFR 65
+#define R9A07G054_SSI3_PCLK2 66
+#define R9A07G054_SSI3_PCLK_SFR 67
+#define R9A07G054_SRC_CLKP 68
+#define R9A07G054_USB_U2H0_HCLK 69
+#define R9A07G054_USB_U2H1_HCLK 70
+#define R9A07G054_USB_U2P_EXR_CPUCLK 71
+#define R9A07G054_USB_PCLK 72
+#define R9A07G054_ETH0_CLK_AXI 73
+#define R9A07G054_ETH0_CLK_CHI 74
+#define R9A07G054_ETH1_CLK_AXI 75
+#define R9A07G054_ETH1_CLK_CHI 76
+#define R9A07G054_I2C0_PCLK 77
+#define R9A07G054_I2C1_PCLK 78
+#define R9A07G054_I2C2_PCLK 79
+#define R9A07G054_I2C3_PCLK 80
+#define R9A07G054_SCIF0_CLK_PCK 81
+#define R9A07G054_SCIF1_CLK_PCK 82
+#define R9A07G054_SCIF2_CLK_PCK 83
+#define R9A07G054_SCIF3_CLK_PCK 84
+#define R9A07G054_SCIF4_CLK_PCK 85
+#define R9A07G054_SCI0_CLKP 86
+#define R9A07G054_SCI1_CLKP 87
+#define R9A07G054_IRDA_CLKP 88
+#define R9A07G054_RSPI0_CLKB 89
+#define R9A07G054_RSPI1_CLKB 90
+#define R9A07G054_RSPI2_CLKB 91
+#define R9A07G054_CANFD_PCLK 92
+#define R9A07G054_GPIO_HCLK 93
+#define R9A07G054_ADC_ADCLK 94
+#define R9A07G054_ADC_PCLK 95
+#define R9A07G054_TSU_PCLK 96
+#define R9A07G054_STPAI_INITCLK 97
+#define R9A07G054_STPAI_ACLK 98
+#define R9A07G054_STPAI_MCLK 99
+#define R9A07G054_STPAI_DCLKIN 100
+#define R9A07G054_STPAI_ACLK_DRP 101
+
+/* R9A07G054 Resets */
+#define R9A07G054_CA55_RST_1_0 0
+#define R9A07G054_CA55_RST_1_1 1
+#define R9A07G054_CA55_RST_3_0 2
+#define R9A07G054_CA55_RST_3_1 3
+#define R9A07G054_CA55_RST_4 4
+#define R9A07G054_CA55_RST_5 5
+#define R9A07G054_CA55_RST_6 6
+#define R9A07G054_CA55_RST_7 7
+#define R9A07G054_CA55_RST_8 8
+#define R9A07G054_CA55_RST_9 9
+#define R9A07G054_CA55_RST_10 10
+#define R9A07G054_CA55_RST_11 11
+#define R9A07G054_CA55_RST_12 12
+#define R9A07G054_GIC600_GICRESET_N 13
+#define R9A07G054_GIC600_DBG_GICRESET_N 14
+#define R9A07G054_IA55_RESETN 15
+#define R9A07G054_MHU_RESETN 16
+#define R9A07G054_DMAC_ARESETN 17
+#define R9A07G054_DMAC_RST_ASYNC 18
+#define R9A07G054_SYC_RESETN 19
+#define R9A07G054_OSTM0_PRESETZ 20
+#define R9A07G054_OSTM1_PRESETZ 21
+#define R9A07G054_OSTM2_PRESETZ 22
+#define R9A07G054_MTU_X_PRESET_MTU3 23
+#define R9A07G054_POE3_RST_M_REG 24
+#define R9A07G054_GPT_RST_C 25
+#define R9A07G054_POEG_A_RST 26
+#define R9A07G054_POEG_B_RST 27
+#define R9A07G054_POEG_C_RST 28
+#define R9A07G054_POEG_D_RST 29
+#define R9A07G054_WDT0_PRESETN 30
+#define R9A07G054_WDT1_PRESETN 31
+#define R9A07G054_WDT2_PRESETN 32
+#define R9A07G054_SPI_RST 33
+#define R9A07G054_SDHI0_IXRST 34
+#define R9A07G054_SDHI1_IXRST 35
+#define R9A07G054_GPU_RESETN 36
+#define R9A07G054_GPU_AXI_RESETN 37
+#define R9A07G054_GPU_ACE_RESETN 38
+#define R9A07G054_ISU_ARESETN 39
+#define R9A07G054_ISU_PRESETN 40
+#define R9A07G054_H264_X_RESET_VCP 41
+#define R9A07G054_H264_CP_PRESET_P 42
+#define R9A07G054_CRU_CMN_RSTB 43
+#define R9A07G054_CRU_PRESETN 44
+#define R9A07G054_CRU_ARESETN 45
+#define R9A07G054_MIPI_DSI_CMN_RSTB 46
+#define R9A07G054_MIPI_DSI_ARESET_N 47
+#define R9A07G054_MIPI_DSI_PRESET_N 48
+#define R9A07G054_LCDC_RESET_N 49
+#define R9A07G054_SSI0_RST_M2_REG 50
+#define R9A07G054_SSI1_RST_M2_REG 51
+#define R9A07G054_SSI2_RST_M2_REG 52
+#define R9A07G054_SSI3_RST_M2_REG 53
+#define R9A07G054_SRC_RST 54
+#define R9A07G054_USB_U2H0_HRESETN 55
+#define R9A07G054_USB_U2H1_HRESETN 56
+#define R9A07G054_USB_U2P_EXL_SYSRST 57
+#define R9A07G054_USB_PRESETN 58
+#define R9A07G054_ETH0_RST_HW_N 59
+#define R9A07G054_ETH1_RST_HW_N 60
+#define R9A07G054_I2C0_MRST 61
+#define R9A07G054_I2C1_MRST 62
+#define R9A07G054_I2C2_MRST 63
+#define R9A07G054_I2C3_MRST 64
+#define R9A07G054_SCIF0_RST_SYSTEM_N 65
+#define R9A07G054_SCIF1_RST_SYSTEM_N 66
+#define R9A07G054_SCIF2_RST_SYSTEM_N 67
+#define R9A07G054_SCIF3_RST_SYSTEM_N 68
+#define R9A07G054_SCIF4_RST_SYSTEM_N 69
+#define R9A07G054_SCI0_RST 70
+#define R9A07G054_SCI1_RST 71
+#define R9A07G054_IRDA_RST 72
+#define R9A07G054_RSPI0_RST 73
+#define R9A07G054_RSPI1_RST 74
+#define R9A07G054_RSPI2_RST 75
+#define R9A07G054_CANFD_RSTP_N 76
+#define R9A07G054_CANFD_RSTC_N 77
+#define R9A07G054_GPIO_RSTN 78
+#define R9A07G054_GPIO_PORT_RESETN 79
+#define R9A07G054_GPIO_SPARE_RESETN 80
+#define R9A07G054_ADC_PRESETN 81
+#define R9A07G054_ADC_ADRST_N 82
+#define R9A07G054_TSU_PRESETN 83
+#define R9A07G054_STPAI_ARESETN 84
+
+#endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h
new file mode 100644
index 000000000000..410725b778a8
--- /dev/null
+++ b/include/dt-bindings/clock/r9a08g045-cpg.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A08G045 CPG Core Clocks */
+#define R9A08G045_CLK_I 0
+#define R9A08G045_CLK_I2 1
+#define R9A08G045_CLK_I3 2
+#define R9A08G045_CLK_S0 3
+#define R9A08G045_CLK_SPI0 4
+#define R9A08G045_CLK_SPI1 5
+#define R9A08G045_CLK_SD0 6
+#define R9A08G045_CLK_SD1 7
+#define R9A08G045_CLK_SD2 8
+#define R9A08G045_CLK_M0 9
+#define R9A08G045_CLK_HP 10
+#define R9A08G045_CLK_TSU 11
+#define R9A08G045_CLK_ZT 12
+#define R9A08G045_CLK_P0 13
+#define R9A08G045_CLK_P1 14
+#define R9A08G045_CLK_P2 15
+#define R9A08G045_CLK_P3 16
+#define R9A08G045_CLK_P4 17
+#define R9A08G045_CLK_P5 18
+#define R9A08G045_CLK_AT 19
+#define R9A08G045_CLK_OC0 20
+#define R9A08G045_CLK_OC1 21
+#define R9A08G045_OSCCLK 22
+#define R9A08G045_OSCCLK2 23
+#define R9A08G045_SWD 24
+
+/* R9A08G045 Module Clocks */
+#define R9A08G045_OCTA_ACLK 0
+#define R9A08G045_OCTA_MCLK 1
+#define R9A08G045_CA55_SCLK 2
+#define R9A08G045_CA55_PCLK 3
+#define R9A08G045_CA55_ATCLK 4
+#define R9A08G045_CA55_GICCLK 5
+#define R9A08G045_CA55_PERICLK 6
+#define R9A08G045_CA55_ACLK 7
+#define R9A08G045_CA55_TSCLK 8
+#define R9A08G045_SRAM_ACPU_ACLK0 9
+#define R9A08G045_SRAM_ACPU_ACLK1 10
+#define R9A08G045_SRAM_ACPU_ACLK2 11
+#define R9A08G045_GIC600_GICCLK 12
+#define R9A08G045_IA55_CLK 13
+#define R9A08G045_IA55_PCLK 14
+#define R9A08G045_MHU_PCLK 15
+#define R9A08G045_SYC_CNT_CLK 16
+#define R9A08G045_DMAC_ACLK 17
+#define R9A08G045_DMAC_PCLK 18
+#define R9A08G045_OSTM0_PCLK 19
+#define R9A08G045_OSTM1_PCLK 20
+#define R9A08G045_OSTM2_PCLK 21
+#define R9A08G045_OSTM3_PCLK 22
+#define R9A08G045_OSTM4_PCLK 23
+#define R9A08G045_OSTM5_PCLK 24
+#define R9A08G045_OSTM6_PCLK 25
+#define R9A08G045_OSTM7_PCLK 26
+#define R9A08G045_MTU_X_MCK_MTU3 27
+#define R9A08G045_POE3_CLKM_POE 28
+#define R9A08G045_GPT_PCLK 29
+#define R9A08G045_POEG_A_CLKP 30
+#define R9A08G045_POEG_B_CLKP 31
+#define R9A08G045_POEG_C_CLKP 32
+#define R9A08G045_POEG_D_CLKP 33
+#define R9A08G045_WDT0_PCLK 34
+#define R9A08G045_WDT0_CLK 35
+#define R9A08G045_WDT1_PCLK 36
+#define R9A08G045_WDT1_CLK 37
+#define R9A08G045_WDT2_PCLK 38
+#define R9A08G045_WDT2_CLK 39
+#define R9A08G045_SPI_HCLK 40
+#define R9A08G045_SPI_ACLK 41
+#define R9A08G045_SPI_CLK 42
+#define R9A08G045_SPI_CLKX2 43
+#define R9A08G045_SDHI0_IMCLK 44
+#define R9A08G045_SDHI0_IMCLK2 45
+#define R9A08G045_SDHI0_CLK_HS 46
+#define R9A08G045_SDHI0_ACLK 47
+#define R9A08G045_SDHI1_IMCLK 48
+#define R9A08G045_SDHI1_IMCLK2 49
+#define R9A08G045_SDHI1_CLK_HS 50
+#define R9A08G045_SDHI1_ACLK 51
+#define R9A08G045_SDHI2_IMCLK 52
+#define R9A08G045_SDHI2_IMCLK2 53
+#define R9A08G045_SDHI2_CLK_HS 54
+#define R9A08G045_SDHI2_ACLK 55
+#define R9A08G045_SSI0_PCLK2 56
+#define R9A08G045_SSI0_PCLK_SFR 57
+#define R9A08G045_SSI1_PCLK2 58
+#define R9A08G045_SSI1_PCLK_SFR 59
+#define R9A08G045_SSI2_PCLK2 60
+#define R9A08G045_SSI2_PCLK_SFR 61
+#define R9A08G045_SSI3_PCLK2 62
+#define R9A08G045_SSI3_PCLK_SFR 63
+#define R9A08G045_SRC_CLKP 64
+#define R9A08G045_USB_U2H0_HCLK 65
+#define R9A08G045_USB_U2H1_HCLK 66
+#define R9A08G045_USB_U2P_EXR_CPUCLK 67
+#define R9A08G045_USB_PCLK 68
+#define R9A08G045_ETH0_CLK_AXI 69
+#define R9A08G045_ETH0_CLK_CHI 70
+#define R9A08G045_ETH0_REFCLK 71
+#define R9A08G045_ETH1_CLK_AXI 72
+#define R9A08G045_ETH1_CLK_CHI 73
+#define R9A08G045_ETH1_REFCLK 74
+#define R9A08G045_I2C0_PCLK 75
+#define R9A08G045_I2C1_PCLK 76
+#define R9A08G045_I2C2_PCLK 77
+#define R9A08G045_I2C3_PCLK 78
+#define R9A08G045_SCIF0_CLK_PCK 79
+#define R9A08G045_SCIF1_CLK_PCK 80
+#define R9A08G045_SCIF2_CLK_PCK 81
+#define R9A08G045_SCIF3_CLK_PCK 82
+#define R9A08G045_SCIF4_CLK_PCK 83
+#define R9A08G045_SCIF5_CLK_PCK 84
+#define R9A08G045_SCI0_CLKP 85
+#define R9A08G045_SCI1_CLKP 86
+#define R9A08G045_IRDA_CLKP 87
+#define R9A08G045_RSPI0_CLKB 88
+#define R9A08G045_RSPI1_CLKB 89
+#define R9A08G045_RSPI2_CLKB 90
+#define R9A08G045_RSPI3_CLKB 91
+#define R9A08G045_RSPI4_CLKB 92
+#define R9A08G045_CANFD_PCLK 93
+#define R9A08G045_CANFD_CLK_RAM 94
+#define R9A08G045_GPIO_HCLK 95
+#define R9A08G045_ADC_ADCLK 96
+#define R9A08G045_ADC_PCLK 97
+#define R9A08G045_TSU_PCLK 98
+#define R9A08G045_PDM_PCLK 99
+#define R9A08G045_PDM_CCLK 100
+#define R9A08G045_PCI_ACLK 101
+#define R9A08G045_PCI_CLKL1PM 102
+#define R9A08G045_SPDIF_PCLK 103
+#define R9A08G045_I3C_PCLK 104
+#define R9A08G045_I3C_TCLK 105
+#define R9A08G045_VBAT_BCLK 106
+
+/* R9A08G045 Resets */
+#define R9A08G045_CA55_RST_1_0 0
+#define R9A08G045_CA55_RST_3_0 1
+#define R9A08G045_CA55_RST_4 2
+#define R9A08G045_CA55_RST_5 3
+#define R9A08G045_CA55_RST_6 4
+#define R9A08G045_CA55_RST_7 5
+#define R9A08G045_CA55_RST_8 6
+#define R9A08G045_CA55_RST_9 7
+#define R9A08G045_CA55_RST_10 8
+#define R9A08G045_CA55_RST_11 9
+#define R9A08G045_CA55_RST_12 10
+#define R9A08G045_SRAM_ACPU_ARESETN0 11
+#define R9A08G045_SRAM_ACPU_ARESETN1 12
+#define R9A08G045_SRAM_ACPU_ARESETN2 13
+#define R9A08G045_GIC600_GICRESET_N 14
+#define R9A08G045_GIC600_DBG_GICRESET_N 15
+#define R9A08G045_IA55_RESETN 16
+#define R9A08G045_MHU_RESETN 17
+#define R9A08G045_DMAC_ARESETN 18
+#define R9A08G045_DMAC_RST_ASYNC 19
+#define R9A08G045_SYC_RESETN 20
+#define R9A08G045_OSTM0_PRESETZ 21
+#define R9A08G045_OSTM1_PRESETZ 22
+#define R9A08G045_OSTM2_PRESETZ 23
+#define R9A08G045_OSTM3_PRESETZ 24
+#define R9A08G045_OSTM4_PRESETZ 25
+#define R9A08G045_OSTM5_PRESETZ 26
+#define R9A08G045_OSTM6_PRESETZ 27
+#define R9A08G045_OSTM7_PRESETZ 28
+#define R9A08G045_MTU_X_PRESET_MTU3 29
+#define R9A08G045_POE3_RST_M_REG 30
+#define R9A08G045_GPT_RST_C 31
+#define R9A08G045_POEG_A_RST 32
+#define R9A08G045_POEG_B_RST 33
+#define R9A08G045_POEG_C_RST 34
+#define R9A08G045_POEG_D_RST 35
+#define R9A08G045_WDT0_PRESETN 36
+#define R9A08G045_WDT1_PRESETN 37
+#define R9A08G045_WDT2_PRESETN 38
+#define R9A08G045_SPI_HRESETN 39
+#define R9A08G045_SPI_ARESETN 40
+#define R9A08G045_SDHI0_IXRST 41
+#define R9A08G045_SDHI1_IXRST 42
+#define R9A08G045_SDHI2_IXRST 43
+#define R9A08G045_SSI0_RST_M2_REG 44
+#define R9A08G045_SSI1_RST_M2_REG 45
+#define R9A08G045_SSI2_RST_M2_REG 46
+#define R9A08G045_SSI3_RST_M2_REG 47
+#define R9A08G045_SRC_RST 48
+#define R9A08G045_USB_U2H0_HRESETN 49
+#define R9A08G045_USB_U2H1_HRESETN 50
+#define R9A08G045_USB_U2P_EXL_SYSRST 51
+#define R9A08G045_USB_PRESETN 52
+#define R9A08G045_ETH0_RST_HW_N 53
+#define R9A08G045_ETH1_RST_HW_N 54
+#define R9A08G045_I2C0_MRST 55
+#define R9A08G045_I2C1_MRST 56
+#define R9A08G045_I2C2_MRST 57
+#define R9A08G045_I2C3_MRST 58
+#define R9A08G045_SCIF0_RST_SYSTEM_N 59
+#define R9A08G045_SCIF1_RST_SYSTEM_N 60
+#define R9A08G045_SCIF2_RST_SYSTEM_N 61
+#define R9A08G045_SCIF3_RST_SYSTEM_N 62
+#define R9A08G045_SCIF4_RST_SYSTEM_N 63
+#define R9A08G045_SCIF5_RST_SYSTEM_N 64
+#define R9A08G045_SCI0_RST 65
+#define R9A08G045_SCI1_RST 66
+#define R9A08G045_IRDA_RST 67
+#define R9A08G045_RSPI0_RST 68
+#define R9A08G045_RSPI1_RST 69
+#define R9A08G045_RSPI2_RST 70
+#define R9A08G045_RSPI3_RST 71
+#define R9A08G045_RSPI4_RST 72
+#define R9A08G045_CANFD_RSTP_N 73
+#define R9A08G045_CANFD_RSTC_N 74
+#define R9A08G045_GPIO_RSTN 75
+#define R9A08G045_GPIO_PORT_RESETN 76
+#define R9A08G045_GPIO_SPARE_RESETN 77
+#define R9A08G045_ADC_PRESETN 78
+#define R9A08G045_ADC_ADRST_N 79
+#define R9A08G045_TSU_PRESETN 80
+#define R9A08G045_OCTA_ARESETN 81
+#define R9A08G045_PDM0_PRESETNT 82
+#define R9A08G045_PCI_ARESETN 83
+#define R9A08G045_PCI_RST_B 84
+#define R9A08G045_PCI_RST_GP_B 85
+#define R9A08G045_PCI_RST_PS_B 86
+#define R9A08G045_PCI_RST_RSM_B 87
+#define R9A08G045_PCI_RST_CFG_B 88
+#define R9A08G045_PCI_RST_LOAD_B 89
+#define R9A08G045_SPDIF_RST 90
+#define R9A08G045_I3C_TRESETN 91
+#define R9A08G045_I3C_PRESETN 92
+#define R9A08G045_VBAT_BRESETN 93
+
+#endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a09g011-cpg.h b/include/dt-bindings/clock/r9a09g011-cpg.h
new file mode 100644
index 000000000000..41dd585d7115
--- /dev/null
+++ b/include/dt-bindings/clock/r9a09g011-cpg.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Module Clocks */
+#define R9A09G011_SYS_CLK 0
+#define R9A09G011_PFC_PCLK 1
+#define R9A09G011_PMC_CORE_CLOCK 2
+#define R9A09G011_GIC_CLK 3
+#define R9A09G011_RAMA_ACLK 4
+#define R9A09G011_ROMA_ACLK 5
+#define R9A09G011_SEC_ACLK 6
+#define R9A09G011_SEC_PCLK 7
+#define R9A09G011_SEC_TCLK 8
+#define R9A09G011_DMAA_ACLK 9
+#define R9A09G011_TSU0_PCLK 10
+#define R9A09G011_TSU1_PCLK 11
+
+#define R9A09G011_CST_TRACECLK 12
+#define R9A09G011_CST_SB_CLK 13
+#define R9A09G011_CST_AHB_CLK 14
+#define R9A09G011_CST_ATB_SB_CLK 15
+#define R9A09G011_CST_TS_SB_CLK 16
+
+#define R9A09G011_SDI0_ACLK 17
+#define R9A09G011_SDI0_IMCLK 18
+#define R9A09G011_SDI0_IMCLK2 19
+#define R9A09G011_SDI0_CLK_HS 20
+#define R9A09G011_SDI1_ACLK 21
+#define R9A09G011_SDI1_IMCLK 22
+#define R9A09G011_SDI1_IMCLK2 23
+#define R9A09G011_SDI1_CLK_HS 24
+#define R9A09G011_EMM_ACLK 25
+#define R9A09G011_EMM_IMCLK 26
+#define R9A09G011_EMM_IMCLK2 27
+#define R9A09G011_EMM_CLK_HS 28
+#define R9A09G011_NFI_ACLK 29
+#define R9A09G011_NFI_NF_CLK 30
+
+#define R9A09G011_PCI_ACLK 31
+#define R9A09G011_PCI_CLK_PMU 32
+#define R9A09G011_PCI_APB_CLK 33
+#define R9A09G011_USB_ACLK_H 34
+#define R9A09G011_USB_ACLK_P 35
+#define R9A09G011_USB_PCLK 36
+#define R9A09G011_ETH0_CLK_AXI 37
+#define R9A09G011_ETH0_CLK_CHI 38
+#define R9A09G011_ETH0_GPTP_EXT 39
+
+#define R9A09G011_SDT_CLK 40
+#define R9A09G011_SDT_CLKAPB 41
+#define R9A09G011_SDT_CLK48 42
+#define R9A09G011_GRP_CLK 43
+#define R9A09G011_CIF_P0_CLK 44
+#define R9A09G011_CIF_P1_CLK 45
+#define R9A09G011_CIF_APB_CLK 46
+#define R9A09G011_DCI_CLKAXI 47
+#define R9A09G011_DCI_CLKAPB 48
+#define R9A09G011_DCI_CLKDCI2 49
+
+#define R9A09G011_HMI_PCLK 50
+#define R9A09G011_LCI_PCLK 51
+#define R9A09G011_LCI_ACLK 52
+#define R9A09G011_LCI_VCLK 53
+#define R9A09G011_LCI_LPCLK 54
+
+#define R9A09G011_AUI_CLK 55
+#define R9A09G011_AUI_CLKAXI 56
+#define R9A09G011_AUI_CLKAPB 57
+#define R9A09G011_AUMCLK 58
+#define R9A09G011_GMCLK0 59
+#define R9A09G011_GMCLK1 60
+#define R9A09G011_MTR_CLK0 61
+#define R9A09G011_MTR_CLK1 62
+#define R9A09G011_MTR_CLKAPB 63
+#define R9A09G011_GFT_CLK 64
+#define R9A09G011_GFT_CLKAPB 65
+#define R9A09G011_GFT_MCLK 66
+
+#define R9A09G011_ATGA_CLK 67
+#define R9A09G011_ATGA_CLKAPB 68
+#define R9A09G011_ATGB_CLK 69
+#define R9A09G011_ATGB_CLKAPB 70
+#define R9A09G011_SYC_CNT_CLK 71
+
+#define R9A09G011_CPERI_GRPA_PCLK 72
+#define R9A09G011_TIM0_CLK 73
+#define R9A09G011_TIM1_CLK 74
+#define R9A09G011_TIM2_CLK 75
+#define R9A09G011_TIM3_CLK 76
+#define R9A09G011_TIM4_CLK 77
+#define R9A09G011_TIM5_CLK 78
+#define R9A09G011_TIM6_CLK 79
+#define R9A09G011_TIM7_CLK 80
+#define R9A09G011_IIC_PCLK0 81
+
+#define R9A09G011_CPERI_GRPB_PCLK 82
+#define R9A09G011_TIM8_CLK 83
+#define R9A09G011_TIM9_CLK 84
+#define R9A09G011_TIM10_CLK 85
+#define R9A09G011_TIM11_CLK 86
+#define R9A09G011_TIM12_CLK 87
+#define R9A09G011_TIM13_CLK 88
+#define R9A09G011_TIM14_CLK 89
+#define R9A09G011_TIM15_CLK 90
+#define R9A09G011_IIC_PCLK1 91
+
+#define R9A09G011_CPERI_GRPC_PCLK 92
+#define R9A09G011_TIM16_CLK 93
+#define R9A09G011_TIM17_CLK 94
+#define R9A09G011_TIM18_CLK 95
+#define R9A09G011_TIM19_CLK 96
+#define R9A09G011_TIM20_CLK 97
+#define R9A09G011_TIM21_CLK 98
+#define R9A09G011_TIM22_CLK 99
+#define R9A09G011_TIM23_CLK 100
+#define R9A09G011_WDT0_PCLK 101
+#define R9A09G011_WDT0_CLK 102
+#define R9A09G011_WDT1_PCLK 103
+#define R9A09G011_WDT1_CLK 104
+
+#define R9A09G011_CPERI_GRPD_PCLK 105
+#define R9A09G011_TIM24_CLK 106
+#define R9A09G011_TIM25_CLK 107
+#define R9A09G011_TIM26_CLK 108
+#define R9A09G011_TIM27_CLK 109
+#define R9A09G011_TIM28_CLK 110
+#define R9A09G011_TIM29_CLK 111
+#define R9A09G011_TIM30_CLK 112
+#define R9A09G011_TIM31_CLK 113
+
+#define R9A09G011_CPERI_GRPE_PCLK 114
+#define R9A09G011_PWM0_CLK 115
+#define R9A09G011_PWM1_CLK 116
+#define R9A09G011_PWM2_CLK 117
+#define R9A09G011_PWM3_CLK 118
+#define R9A09G011_PWM4_CLK 119
+#define R9A09G011_PWM5_CLK 120
+#define R9A09G011_PWM6_CLK 121
+#define R9A09G011_PWM7_CLK 122
+
+#define R9A09G011_CPERI_GRPF_PCLK 123
+#define R9A09G011_PWM8_CLK 124
+#define R9A09G011_PWM9_CLK 125
+#define R9A09G011_PWM10_CLK 126
+#define R9A09G011_PWM11_CLK 127
+#define R9A09G011_PWM12_CLK 128
+#define R9A09G011_PWM13_CLK 129
+#define R9A09G011_PWM14_CLK 130
+#define R9A09G011_PWM15_CLK 131
+
+#define R9A09G011_CPERI_GRPG_PCLK 132
+#define R9A09G011_CPERI_GRPH_PCLK 133
+#define R9A09G011_URT_PCLK 134
+#define R9A09G011_URT0_CLK 135
+#define R9A09G011_URT1_CLK 136
+#define R9A09G011_CSI0_CLK 137
+#define R9A09G011_CSI1_CLK 138
+#define R9A09G011_CSI2_CLK 139
+#define R9A09G011_CSI3_CLK 140
+#define R9A09G011_CSI4_CLK 141
+#define R9A09G011_CSI5_CLK 142
+
+#define R9A09G011_ICB_ACLK1 143
+#define R9A09G011_ICB_GIC_CLK 144
+#define R9A09G011_ICB_MPCLK1 145
+#define R9A09G011_ICB_SPCLK1 146
+#define R9A09G011_ICB_CLK48 147
+#define R9A09G011_ICB_CLK48_2 148
+#define R9A09G011_ICB_CLK48_3 149
+#define R9A09G011_ICB_CLK48_4L 150
+#define R9A09G011_ICB_CLK48_4R 151
+#define R9A09G011_ICB_CLK48_5 152
+#define R9A09G011_ICB_CST_ATB_SB_CLK 153
+#define R9A09G011_ICB_CST_CS_CLK 154
+#define R9A09G011_ICB_CLK100_1 155
+#define R9A09G011_ICB_ETH0_CLK_AXI 156
+#define R9A09G011_ICB_DCI_CLKAXI 157
+#define R9A09G011_ICB_SYC_CNT_CLK 158
+
+#define R9A09G011_ICB_DRPA_ACLK 159
+#define R9A09G011_ICB_RFX_ACLK 160
+#define R9A09G011_ICB_RFX_PCLK5 161
+#define R9A09G011_ICB_MMC_ACLK 162
+
+#define R9A09G011_ICB_MPCLK3 163
+#define R9A09G011_ICB_CIMA_CLK 164
+#define R9A09G011_ICB_CIMB_CLK 165
+#define R9A09G011_ICB_BIMA_CLK 166
+#define R9A09G011_ICB_FCD_CLKAXI 167
+#define R9A09G011_ICB_VD_ACLK4 168
+#define R9A09G011_ICB_MPCLK4 169
+#define R9A09G011_ICB_VCD_PCLK4 170
+
+#define R9A09G011_CA53_CLK 171
+#define R9A09G011_CA53_ACLK 172
+#define R9A09G011_CA53_APCLK_DBG 173
+#define R9A09G011_CST_APB_CA53_CLK 174
+#define R9A09G011_CA53_ATCLK 175
+#define R9A09G011_CST_CS_CLK 176
+#define R9A09G011_CA53_TSCLK 177
+#define R9A09G011_CST_TS_CLK 178
+#define R9A09G011_CA53_APCLK_REG 179
+
+#define R9A09G011_DRPA_ACLK 180
+#define R9A09G011_DRPA_DCLK 181
+#define R9A09G011_DRPA_INITCLK 182
+
+#define R9A09G011_RAMB0_ACLK 183
+#define R9A09G011_RAMB1_ACLK 184
+#define R9A09G011_RAMB2_ACLK 185
+#define R9A09G011_RAMB3_ACLK 186
+
+#define R9A09G011_CIMA_CLKAPB 187
+#define R9A09G011_CIMA_CLK 188
+#define R9A09G011_CIMB_CLK 189
+#define R9A09G011_FAFA_CLK 190
+#define R9A09G011_STG_CLKAXI 191
+#define R9A09G011_STG_CLK0 192
+
+#define R9A09G011_BIMA_CLKAPB 193
+#define R9A09G011_BIMA_CLK 194
+#define R9A09G011_FAFB_CLK 195
+#define R9A09G011_FCD_CLK 196
+#define R9A09G011_FCD_CLKAXI 197
+
+#define R9A09G011_RIM_CLK 198
+#define R9A09G011_VCD_ACLK 199
+#define R9A09G011_VCD_PCLK 200
+#define R9A09G011_JPG0_CLK 201
+#define R9A09G011_JPG0_ACLK 202
+
+#define R9A09G011_MMC_CORE_DDRC_CLK 203
+#define R9A09G011_MMC_ACLK 204
+#define R9A09G011_MMC_PCLK 205
+#define R9A09G011_DDI_APBCLK 206
+
+/* Resets */
+#define R9A09G011_SYS_RST_N 0
+#define R9A09G011_PFC_PRESETN 1
+#define R9A09G011_RAMA_ARESETN 2
+#define R9A09G011_ROM_ARESETN 3
+#define R9A09G011_DMAA_ARESETN 4
+#define R9A09G011_SEC_ARESETN 5
+#define R9A09G011_SEC_PRESETN 6
+#define R9A09G011_SEC_RSTB 7
+#define R9A09G011_TSU0_RESETN 8
+#define R9A09G011_TSU1_RESETN 9
+#define R9A09G011_PMC_RESET_N 10
+
+#define R9A09G011_CST_NTRST 11
+#define R9A09G011_CST_NPOTRST 12
+#define R9A09G011_CST_NTRST2 13
+#define R9A09G011_CST_CS_RESETN 14
+#define R9A09G011_CST_TS_RESETN 15
+#define R9A09G011_CST_TRESETN 16
+#define R9A09G011_CST_SB_RESETN 17
+#define R9A09G011_CST_AHB_RESETN 18
+#define R9A09G011_CST_TS_SB_RESETN 19
+#define R9A09G011_CST_APB_CA53_RESETN 20
+#define R9A09G011_CST_ATB_SB_RESETN 21
+
+#define R9A09G011_SDI0_IXRST 22
+#define R9A09G011_SDI1_IXRST 23
+#define R9A09G011_EMM_IXRST 24
+#define R9A09G011_NFI_MARESETN 25
+#define R9A09G011_NFI_REG_RST_N 26
+#define R9A09G011_USB_PRESET_N 27
+#define R9A09G011_USB_DRD_RESET 28
+#define R9A09G011_USB_ARESETN_P 29
+#define R9A09G011_USB_ARESETN_H 30
+#define R9A09G011_ETH0_RST_HW_N 31
+#define R9A09G011_PCI_ARESETN 32
+
+#define R9A09G011_SDT_RSTSYSAX 33
+#define R9A09G011_GRP_RESETN 34
+#define R9A09G011_CIF_RST_N 35
+#define R9A09G011_DCU_RSTSYSAX 36
+#define R9A09G011_HMI_RST_N 37
+#define R9A09G011_HMI_PRESETN 38
+#define R9A09G011_LCI_PRESETN 39
+#define R9A09G011_LCI_ARESETN 40
+
+#define R9A09G011_AUI_RSTSYSAX 41
+#define R9A09G011_MTR_RSTSYSAX 42
+#define R9A09G011_GFT_RSTSYSAX 43
+#define R9A09G011_ATGA_RSTSYSAX 44
+#define R9A09G011_ATGB_RSTSYSAX 45
+#define R9A09G011_SYC_RST_N 46
+
+#define R9A09G011_TIM_GPA_PRESETN 47
+#define R9A09G011_TIM_GPB_PRESETN 48
+#define R9A09G011_TIM_GPC_PRESETN 49
+#define R9A09G011_TIM_GPD_PRESETN 50
+#define R9A09G011_PWM_GPE_PRESETN 51
+#define R9A09G011_PWM_GPF_PRESETN 52
+#define R9A09G011_CSI_GPG_PRESETN 53
+#define R9A09G011_CSI_GPH_PRESETN 54
+#define R9A09G011_IIC_GPA_PRESETN 55
+#define R9A09G011_IIC_GPB_PRESETN 56
+#define R9A09G011_URT_PRESETN 57
+#define R9A09G011_WDT0_PRESETN 58
+#define R9A09G011_WDT1_PRESETN 59
+
+#define R9A09G011_ICB_PD_AWO_RST_N 60
+#define R9A09G011_ICB_PD_MMC_RST_N 61
+#define R9A09G011_ICB_PD_VD0_RST_N 62
+#define R9A09G011_ICB_PD_VD1_RST_N 63
+#define R9A09G011_ICB_PD_RFX_RST_N 64
+
+#define R9A09G011_CA53_NCPUPORESET0 65
+#define R9A09G011_CA53_NCPUPORESET1 66
+#define R9A09G011_CA53_NCORERESET0 67
+#define R9A09G011_CA53_NCORERESET1 68
+#define R9A09G011_CA53_NPRESETDBG 69
+#define R9A09G011_CA53_L2RESET 70
+#define R9A09G011_CA53_NMISCRESET_HM 71
+#define R9A09G011_CA53_NMISCRESET_SM 72
+#define R9A09G011_CA53_NARESET 73
+
+#define R9A09G011_DRPA_ARESETN 74
+
+#define R9A09G011_RAMB0_ARESETN 75
+#define R9A09G011_RAMB1_ARESETN 76
+#define R9A09G011_RAMB2_ARESETN 77
+#define R9A09G011_RAMB3_ARESETN 78
+
+#define R9A09G011_CIMA_RSTSYSAX 79
+#define R9A09G011_CIMB_RSTSYSAX 80
+#define R9A09G011_FAFA_RSTSYSAX 81
+#define R9A09G011_STG_RSTSYSAX 82
+
+#define R9A09G011_BIMA_RSTSYSAX 83
+#define R9A09G011_FAFB_RSTSYSAX 84
+#define R9A09G011_FCD_RSTSYSAX 85
+#define R9A09G011_RIM_RSTSYSAX 86
+#define R9A09G011_VCD_RESETN 87
+#define R9A09G011_JPG_XRESET 88
+
+#define R9A09G011_MMC_CORE_DDRC_RSTN 89
+#define R9A09G011_MMC_ARESETN_N 90
+#define R9A09G011_MMC_PRESETN 91
+#define R9A09G011_DDI_PWROK 92
+#define R9A09G011_DDI_RESET 93
+#define R9A09G011_DDI_RESETN_APB 94
+
+#endif /* __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r8a779h0-cpg-mssr.h b/include/dt-bindings/clock/renesas,r8a779h0-cpg-mssr.h
new file mode 100644
index 000000000000..7ab6cfbaf901
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r8a779h0-cpg-mssr.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R8A779H0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R8A779H0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779h0 CPG Core Clocks */
+
+#define R8A779H0_CLK_ZX 0
+#define R8A779H0_CLK_ZD 1
+#define R8A779H0_CLK_ZS 2
+#define R8A779H0_CLK_ZT 3
+#define R8A779H0_CLK_ZTR 4
+#define R8A779H0_CLK_S0D2 5
+#define R8A779H0_CLK_S0D3 6
+#define R8A779H0_CLK_S0D4 7
+#define R8A779H0_CLK_S0D1_VIO 8
+#define R8A779H0_CLK_S0D2_VIO 9
+#define R8A779H0_CLK_S0D4_VIO 10
+#define R8A779H0_CLK_S0D8_VIO 11
+#define R8A779H0_CLK_VIOBUSD1 12
+#define R8A779H0_CLK_VIOBUSD2 13
+#define R8A779H0_CLK_S0D1_VC 14
+#define R8A779H0_CLK_S0D2_VC 15
+#define R8A779H0_CLK_S0D4_VC 16
+#define R8A779H0_CLK_VCBUSD1 17
+#define R8A779H0_CLK_VCBUSD2 18
+#define R8A779H0_CLK_S0D2_MM 19
+#define R8A779H0_CLK_S0D4_MM 20
+#define R8A779H0_CLK_S0D2_U3DG 21
+#define R8A779H0_CLK_S0D4_U3DG 22
+#define R8A779H0_CLK_S0D2_RT 23
+#define R8A779H0_CLK_S0D3_RT 24
+#define R8A779H0_CLK_S0D4_RT 25
+#define R8A779H0_CLK_S0D6_RT 26
+#define R8A779H0_CLK_S0D2_PER 27
+#define R8A779H0_CLK_S0D3_PER 28
+#define R8A779H0_CLK_S0D4_PER 29
+#define R8A779H0_CLK_S0D6_PER 30
+#define R8A779H0_CLK_S0D12_PER 31
+#define R8A779H0_CLK_S0D24_PER 32
+#define R8A779H0_CLK_S0D1_HSC 33
+#define R8A779H0_CLK_S0D2_HSC 34
+#define R8A779H0_CLK_S0D4_HSC 35
+#define R8A779H0_CLK_S0D8_HSC 36
+#define R8A779H0_CLK_SVD1_IR 37
+#define R8A779H0_CLK_SVD2_IR 38
+#define R8A779H0_CLK_IMPAD1 39
+#define R8A779H0_CLK_IMPAD4 40
+#define R8A779H0_CLK_IMPB 41
+#define R8A779H0_CLK_SVD1_VIP 42
+#define R8A779H0_CLK_SVD2_VIP 43
+#define R8A779H0_CLK_CL 44
+#define R8A779H0_CLK_CL16M 45
+#define R8A779H0_CLK_CL16M_MM 46
+#define R8A779H0_CLK_CL16M_RT 47
+#define R8A779H0_CLK_CL16M_PER 48
+#define R8A779H0_CLK_CL16M_HSC 49
+#define R8A779H0_CLK_ZC0 50
+#define R8A779H0_CLK_ZC1 51
+#define R8A779H0_CLK_ZC2 52
+#define R8A779H0_CLK_ZC3 53
+#define R8A779H0_CLK_ZB3 54
+#define R8A779H0_CLK_ZB3D2 55
+#define R8A779H0_CLK_ZB3D4 56
+#define R8A779H0_CLK_ZG 57
+#define R8A779H0_CLK_SD0H 58
+#define R8A779H0_CLK_SD0 59
+#define R8A779H0_CLK_RPC 60
+#define R8A779H0_CLK_RPCD2 61
+#define R8A779H0_CLK_MSO 62
+#define R8A779H0_CLK_CANFD 63
+#define R8A779H0_CLK_CSI 64
+#define R8A779H0_CLK_FRAY 65
+#define R8A779H0_CLK_IPC 66
+#define R8A779H0_CLK_SASYNCRT 67
+#define R8A779H0_CLK_SASYNCPERD1 68
+#define R8A779H0_CLK_SASYNCPERD2 69
+#define R8A779H0_CLK_SASYNCPERD4 70
+#define R8A779H0_CLK_DSIEXT 71
+#define R8A779H0_CLK_DSIREF 72
+#define R8A779H0_CLK_ADGH 73
+#define R8A779H0_CLK_OSC 74
+#define R8A779H0_CLK_ZR0 75
+#define R8A779H0_CLK_ZR1 76
+#define R8A779H0_CLK_ZR2 77
+#define R8A779H0_CLK_RGMII 78
+#define R8A779H0_CLK_CPEX 79
+#define R8A779H0_CLK_CP 80
+#define R8A779H0_CLK_CBFUSA 81
+#define R8A779H0_CLK_R 82
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R8A779H0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h
index 35a5a01f9697..a96a9870ad59 100644
--- a/include/dt-bindings/clock/rk3036-cru.h
+++ b/include/dt-bindings/clock/rk3036-cru.h
@@ -81,6 +81,7 @@
#define HCLK_OTG0 449
#define HCLK_OTG1 450
#define HCLK_NANDC 453
+#define HCLK_SFC 454
#define HCLK_SDMMC 456
#define HCLK_SDIO 457
#define HCLK_EMMC 459
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
index 0a06c5f514d7..83c72a163fd3 100644
--- a/include/dt-bindings/clock/rk3368-cru.h
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -78,6 +78,7 @@
#define SCLK_TIMER13 136
#define SCLK_TIMER14 137
#define SCLK_TIMER15 138
+#define SCLK_VIP_OUT 139
#define DCLK_VOP 190
#define MCLK_CRYPTO 191
@@ -148,6 +149,8 @@
#define PCLK_VIP 367
#define PCLK_WDT 368
#define PCLK_EFUSE256 369
+#define PCLK_DPHYRX 370
+#define PCLK_DPHYTX0 371
/* hclk gates */
#define HCLK_SFC 448
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 44e0a319f077..39169d94a44e 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -547,8 +547,8 @@
#define SRST_H_PERILP0 171
#define SRST_H_PERILP0_NOC 172
#define SRST_ROM 173
-#define SRST_CRYPTO_S 174
-#define SRST_CRYPTO_M 175
+#define SRST_CRYPTO0_S 174
+#define SRST_CRYPTO0_M 175
/* cru_softrst_con11 */
#define SRST_P_DCF 176
@@ -556,7 +556,7 @@
#define SRST_CM0S 178
#define SRST_CM0S_DBG 179
#define SRST_CM0S_PO 180
-#define SRST_CRYPTO 181
+#define SRST_CRYPTO0 181
#define SRST_P_PERILP1_SGRF 182
#define SRST_P_PERILP1_GRF 183
#define SRST_CRYPTO1_S 184
diff --git a/include/dt-bindings/clock/rk3568-cru.h b/include/dt-bindings/clock/rk3568-cru.h
new file mode 100644
index 000000000000..d29890865150
--- /dev/null
+++ b/include/dt-bindings/clock/rk3568-cru.h
@@ -0,0 +1,926 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3568_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3568_H
+
+/* pmucru-clocks indices */
+
+/* pmucru plls */
+#define PLL_PPLL 1
+#define PLL_HPLL 2
+
+/* pmucru clocks */
+#define XIN_OSC0_DIV 4
+#define CLK_RTC_32K 5
+#define CLK_PMU 6
+#define CLK_I2C0 7
+#define CLK_RTC32K_FRAC 8
+#define CLK_UART0_DIV 9
+#define CLK_UART0_FRAC 10
+#define SCLK_UART0 11
+#define DBCLK_GPIO0 12
+#define CLK_PWM0 13
+#define CLK_CAPTURE_PWM0_NDFT 14
+#define CLK_PMUPVTM 15
+#define CLK_CORE_PMUPVTM 16
+#define CLK_REF24M 17
+#define XIN_OSC0_USBPHY0_G 18
+#define CLK_USBPHY0_REF 19
+#define XIN_OSC0_USBPHY1_G 20
+#define CLK_USBPHY1_REF 21
+#define XIN_OSC0_MIPIDSIPHY0_G 22
+#define CLK_MIPIDSIPHY0_REF 23
+#define XIN_OSC0_MIPIDSIPHY1_G 24
+#define CLK_MIPIDSIPHY1_REF 25
+#define CLK_WIFI_DIV 26
+#define CLK_WIFI_OSC0 27
+#define CLK_WIFI 28
+#define CLK_PCIEPHY0_DIV 29
+#define CLK_PCIEPHY0_OSC0 30
+#define CLK_PCIEPHY0_REF 31
+#define CLK_PCIEPHY1_DIV 32
+#define CLK_PCIEPHY1_OSC0 33
+#define CLK_PCIEPHY1_REF 34
+#define CLK_PCIEPHY2_DIV 35
+#define CLK_PCIEPHY2_OSC0 36
+#define CLK_PCIEPHY2_REF 37
+#define CLK_PCIE30PHY_REF_M 38
+#define CLK_PCIE30PHY_REF_N 39
+#define CLK_HDMI_REF 40
+#define XIN_OSC0_EDPPHY_G 41
+#define PCLK_PDPMU 42
+#define PCLK_PMU 43
+#define PCLK_UART0 44
+#define PCLK_I2C0 45
+#define PCLK_GPIO0 46
+#define PCLK_PMUPVTM 47
+#define PCLK_PWM0 48
+#define CLK_PDPMU 49
+#define SCLK_32K_IOE 50
+
+#define CLKPMU_NR_CLKS (SCLK_32K_IOE + 1)
+
+/* cru-clocks indices */
+
+/* cru plls */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_GPLL 4
+#define PLL_VPLL 5
+#define PLL_NPLL 6
+
+/* cru clocks */
+#define CPLL_333M 9
+#define ARMCLK 10
+#define USB480M 11
+#define ACLK_CORE_NIU2BUS 18
+#define CLK_CORE_PVTM 19
+#define CLK_CORE_PVTM_CORE 20
+#define CLK_CORE_PVTPLL 21
+#define CLK_GPU_SRC 22
+#define CLK_GPU_PRE_NDFT 23
+#define CLK_GPU_PRE_MUX 24
+#define ACLK_GPU_PRE 25
+#define PCLK_GPU_PRE 26
+#define CLK_GPU 27
+#define CLK_GPU_NP5 28
+#define PCLK_GPU_PVTM 29
+#define CLK_GPU_PVTM 30
+#define CLK_GPU_PVTM_CORE 31
+#define CLK_GPU_PVTPLL 32
+#define CLK_NPU_SRC 33
+#define CLK_NPU_PRE_NDFT 34
+#define CLK_NPU 35
+#define CLK_NPU_NP5 36
+#define HCLK_NPU_PRE 37
+#define PCLK_NPU_PRE 38
+#define ACLK_NPU_PRE 39
+#define ACLK_NPU 40
+#define HCLK_NPU 41
+#define PCLK_NPU_PVTM 42
+#define CLK_NPU_PVTM 43
+#define CLK_NPU_PVTM_CORE 44
+#define CLK_NPU_PVTPLL 45
+#define CLK_DDRPHY1X_SRC 46
+#define CLK_DDRPHY1X_HWFFC_SRC 47
+#define CLK_DDR1X 48
+#define CLK_MSCH 49
+#define CLK24_DDRMON 50
+#define ACLK_GIC_AUDIO 51
+#define HCLK_GIC_AUDIO 52
+#define HCLK_SDMMC_BUFFER 53
+#define DCLK_SDMMC_BUFFER 54
+#define ACLK_GIC600 55
+#define ACLK_SPINLOCK 56
+#define HCLK_I2S0_8CH 57
+#define HCLK_I2S1_8CH 58
+#define HCLK_I2S2_2CH 59
+#define HCLK_I2S3_2CH 60
+#define CLK_I2S0_8CH_TX_SRC 61
+#define CLK_I2S0_8CH_TX_FRAC 62
+#define MCLK_I2S0_8CH_TX 63
+#define I2S0_MCLKOUT_TX 64
+#define CLK_I2S0_8CH_RX_SRC 65
+#define CLK_I2S0_8CH_RX_FRAC 66
+#define MCLK_I2S0_8CH_RX 67
+#define I2S0_MCLKOUT_RX 68
+#define CLK_I2S1_8CH_TX_SRC 69
+#define CLK_I2S1_8CH_TX_FRAC 70
+#define MCLK_I2S1_8CH_TX 71
+#define I2S1_MCLKOUT_TX 72
+#define CLK_I2S1_8CH_RX_SRC 73
+#define CLK_I2S1_8CH_RX_FRAC 74
+#define MCLK_I2S1_8CH_RX 75
+#define I2S1_MCLKOUT_RX 76
+#define CLK_I2S2_2CH_SRC 77
+#define CLK_I2S2_2CH_FRAC 78
+#define MCLK_I2S2_2CH 79
+#define I2S2_MCLKOUT 80
+#define CLK_I2S3_2CH_TX_SRC 81
+#define CLK_I2S3_2CH_TX_FRAC 82
+#define MCLK_I2S3_2CH_TX 83
+#define I2S3_MCLKOUT_TX 84
+#define CLK_I2S3_2CH_RX_SRC 85
+#define CLK_I2S3_2CH_RX_FRAC 86
+#define MCLK_I2S3_2CH_RX 87
+#define I2S3_MCLKOUT_RX 88
+#define HCLK_PDM 89
+#define MCLK_PDM 90
+#define HCLK_VAD 91
+#define HCLK_SPDIF_8CH 92
+#define MCLK_SPDIF_8CH_SRC 93
+#define MCLK_SPDIF_8CH_FRAC 94
+#define MCLK_SPDIF_8CH 95
+#define HCLK_AUDPWM 96
+#define SCLK_AUDPWM_SRC 97
+#define SCLK_AUDPWM_FRAC 98
+#define SCLK_AUDPWM 99
+#define HCLK_ACDCDIG 100
+#define CLK_ACDCDIG_I2C 101
+#define CLK_ACDCDIG_DAC 102
+#define CLK_ACDCDIG_ADC 103
+#define ACLK_SECURE_FLASH 104
+#define HCLK_SECURE_FLASH 105
+#define ACLK_CRYPTO_NS 106
+#define HCLK_CRYPTO_NS 107
+#define CLK_CRYPTO_NS_CORE 108
+#define CLK_CRYPTO_NS_PKA 109
+#define CLK_CRYPTO_NS_RNG 110
+#define HCLK_TRNG_NS 111
+#define CLK_TRNG_NS 112
+#define PCLK_OTPC_NS 113
+#define CLK_OTPC_NS_SBPI 114
+#define CLK_OTPC_NS_USR 115
+#define HCLK_NANDC 116
+#define NCLK_NANDC 117
+#define HCLK_SFC 118
+#define HCLK_SFC_XIP 119
+#define SCLK_SFC 120
+#define ACLK_EMMC 121
+#define HCLK_EMMC 122
+#define BCLK_EMMC 123
+#define CCLK_EMMC 124
+#define TCLK_EMMC 125
+#define ACLK_PIPE 126
+#define PCLK_PIPE 127
+#define PCLK_PIPE_GRF 128
+#define ACLK_PCIE20_MST 129
+#define ACLK_PCIE20_SLV 130
+#define ACLK_PCIE20_DBI 131
+#define PCLK_PCIE20 132
+#define CLK_PCIE20_AUX_NDFT 133
+#define CLK_PCIE20_AUX_DFT 134
+#define CLK_PCIE20_PIPE_DFT 135
+#define ACLK_PCIE30X1_MST 136
+#define ACLK_PCIE30X1_SLV 137
+#define ACLK_PCIE30X1_DBI 138
+#define PCLK_PCIE30X1 139
+#define CLK_PCIE30X1_AUX_NDFT 140
+#define CLK_PCIE30X1_AUX_DFT 141
+#define CLK_PCIE30X1_PIPE_DFT 142
+#define ACLK_PCIE30X2_MST 143
+#define ACLK_PCIE30X2_SLV 144
+#define ACLK_PCIE30X2_DBI 145
+#define PCLK_PCIE30X2 146
+#define CLK_PCIE30X2_AUX_NDFT 147
+#define CLK_PCIE30X2_AUX_DFT 148
+#define CLK_PCIE30X2_PIPE_DFT 149
+#define ACLK_SATA0 150
+#define CLK_SATA0_PMALIVE 151
+#define CLK_SATA0_RXOOB 152
+#define CLK_SATA0_PIPE_NDFT 153
+#define CLK_SATA0_PIPE_DFT 154
+#define ACLK_SATA1 155
+#define CLK_SATA1_PMALIVE 156
+#define CLK_SATA1_RXOOB 157
+#define CLK_SATA1_PIPE_NDFT 158
+#define CLK_SATA1_PIPE_DFT 159
+#define ACLK_SATA2 160
+#define CLK_SATA2_PMALIVE 161
+#define CLK_SATA2_RXOOB 162
+#define CLK_SATA2_PIPE_NDFT 163
+#define CLK_SATA2_PIPE_DFT 164
+#define ACLK_USB3OTG0 165
+#define CLK_USB3OTG0_REF 166
+#define CLK_USB3OTG0_SUSPEND 167
+#define ACLK_USB3OTG1 168
+#define CLK_USB3OTG1_REF 169
+#define CLK_USB3OTG1_SUSPEND 170
+#define CLK_XPCS_EEE 171
+#define PCLK_XPCS 172
+#define ACLK_PHP 173
+#define HCLK_PHP 174
+#define PCLK_PHP 175
+#define HCLK_SDMMC0 176
+#define CLK_SDMMC0 177
+#define HCLK_SDMMC1 178
+#define CLK_SDMMC1 179
+#define ACLK_GMAC0 180
+#define PCLK_GMAC0 181
+#define CLK_MAC0_2TOP 182
+#define CLK_MAC0_OUT 183
+#define CLK_MAC0_REFOUT 184
+#define CLK_GMAC0_PTP_REF 185
+#define ACLK_USB 186
+#define HCLK_USB 187
+#define PCLK_USB 188
+#define HCLK_USB2HOST0 189
+#define HCLK_USB2HOST0_ARB 190
+#define HCLK_USB2HOST1 191
+#define HCLK_USB2HOST1_ARB 192
+#define HCLK_SDMMC2 193
+#define CLK_SDMMC2 194
+#define ACLK_GMAC1 195
+#define PCLK_GMAC1 196
+#define CLK_MAC1_2TOP 197
+#define CLK_MAC1_OUT 198
+#define CLK_MAC1_REFOUT 199
+#define CLK_GMAC1_PTP_REF 200
+#define ACLK_PERIMID 201
+#define HCLK_PERIMID 202
+#define ACLK_VI 203
+#define HCLK_VI 204
+#define PCLK_VI 205
+#define ACLK_VICAP 206
+#define HCLK_VICAP 207
+#define DCLK_VICAP 208
+#define ICLK_VICAP_G 209
+#define ACLK_ISP 210
+#define HCLK_ISP 211
+#define CLK_ISP 212
+#define PCLK_CSI2HOST1 213
+#define CLK_CIF_OUT 214
+#define CLK_CAM0_OUT 215
+#define CLK_CAM1_OUT 216
+#define ACLK_VO 217
+#define HCLK_VO 218
+#define PCLK_VO 219
+#define ACLK_VOP_PRE 220
+#define ACLK_VOP 221
+#define HCLK_VOP 222
+#define DCLK_VOP0 223
+#define DCLK_VOP1 224
+#define DCLK_VOP2 225
+#define CLK_VOP_PWM 226
+#define ACLK_HDCP 227
+#define HCLK_HDCP 228
+#define PCLK_HDCP 229
+#define PCLK_HDMI_HOST 230
+#define CLK_HDMI_SFR 231
+#define PCLK_DSITX_0 232
+#define PCLK_DSITX_1 233
+#define PCLK_EDP_CTRL 234
+#define CLK_EDP_200M 235
+#define ACLK_VPU_PRE 236
+#define HCLK_VPU_PRE 237
+#define ACLK_VPU 238
+#define HCLK_VPU 239
+#define ACLK_RGA_PRE 240
+#define HCLK_RGA_PRE 241
+#define PCLK_RGA_PRE 242
+#define ACLK_RGA 243
+#define HCLK_RGA 244
+#define CLK_RGA_CORE 245
+#define ACLK_IEP 246
+#define HCLK_IEP 247
+#define CLK_IEP_CORE 248
+#define HCLK_EBC 249
+#define DCLK_EBC 250
+#define ACLK_JDEC 251
+#define HCLK_JDEC 252
+#define ACLK_JENC 253
+#define HCLK_JENC 254
+#define PCLK_EINK 255
+#define HCLK_EINK 256
+#define ACLK_RKVENC_PRE 257
+#define HCLK_RKVENC_PRE 258
+#define ACLK_RKVENC 259
+#define HCLK_RKVENC 260
+#define CLK_RKVENC_CORE 261
+#define ACLK_RKVDEC_PRE 262
+#define HCLK_RKVDEC_PRE 263
+#define ACLK_RKVDEC 264
+#define HCLK_RKVDEC 265
+#define CLK_RKVDEC_CA 266
+#define CLK_RKVDEC_CORE 267
+#define CLK_RKVDEC_HEVC_CA 268
+#define ACLK_BUS 269
+#define PCLK_BUS 270
+#define PCLK_TSADC 271
+#define CLK_TSADC_TSEN 272
+#define CLK_TSADC 273
+#define PCLK_SARADC 274
+#define CLK_SARADC 275
+#define PCLK_SCR 276
+#define PCLK_WDT_NS 277
+#define TCLK_WDT_NS 278
+#define ACLK_DMAC0 279
+#define ACLK_DMAC1 280
+#define ACLK_MCU 281
+#define PCLK_INTMUX 282
+#define PCLK_MAILBOX 283
+#define PCLK_UART1 284
+#define CLK_UART1_SRC 285
+#define CLK_UART1_FRAC 286
+#define SCLK_UART1 287
+#define PCLK_UART2 288
+#define CLK_UART2_SRC 289
+#define CLK_UART2_FRAC 290
+#define SCLK_UART2 291
+#define PCLK_UART3 292
+#define CLK_UART3_SRC 293
+#define CLK_UART3_FRAC 294
+#define SCLK_UART3 295
+#define PCLK_UART4 296
+#define CLK_UART4_SRC 297
+#define CLK_UART4_FRAC 298
+#define SCLK_UART4 299
+#define PCLK_UART5 300
+#define CLK_UART5_SRC 301
+#define CLK_UART5_FRAC 302
+#define SCLK_UART5 303
+#define PCLK_UART6 304
+#define CLK_UART6_SRC 305
+#define CLK_UART6_FRAC 306
+#define SCLK_UART6 307
+#define PCLK_UART7 308
+#define CLK_UART7_SRC 309
+#define CLK_UART7_FRAC 310
+#define SCLK_UART7 311
+#define PCLK_UART8 312
+#define CLK_UART8_SRC 313
+#define CLK_UART8_FRAC 314
+#define SCLK_UART8 315
+#define PCLK_UART9 316
+#define CLK_UART9_SRC 317
+#define CLK_UART9_FRAC 318
+#define SCLK_UART9 319
+#define PCLK_CAN0 320
+#define CLK_CAN0 321
+#define PCLK_CAN1 322
+#define CLK_CAN1 323
+#define PCLK_CAN2 324
+#define CLK_CAN2 325
+#define CLK_I2C 326
+#define PCLK_I2C1 327
+#define CLK_I2C1 328
+#define PCLK_I2C2 329
+#define CLK_I2C2 330
+#define PCLK_I2C3 331
+#define CLK_I2C3 332
+#define PCLK_I2C4 333
+#define CLK_I2C4 334
+#define PCLK_I2C5 335
+#define CLK_I2C5 336
+#define PCLK_SPI0 337
+#define CLK_SPI0 338
+#define PCLK_SPI1 339
+#define CLK_SPI1 340
+#define PCLK_SPI2 341
+#define CLK_SPI2 342
+#define PCLK_SPI3 343
+#define CLK_SPI3 344
+#define PCLK_PWM1 345
+#define CLK_PWM1 346
+#define CLK_PWM1_CAPTURE 347
+#define PCLK_PWM2 348
+#define CLK_PWM2 349
+#define CLK_PWM2_CAPTURE 350
+#define PCLK_PWM3 351
+#define CLK_PWM3 352
+#define CLK_PWM3_CAPTURE 353
+#define DBCLK_GPIO 354
+#define PCLK_GPIO1 355
+#define DBCLK_GPIO1 356
+#define PCLK_GPIO2 357
+#define DBCLK_GPIO2 358
+#define PCLK_GPIO3 359
+#define DBCLK_GPIO3 360
+#define PCLK_GPIO4 361
+#define DBCLK_GPIO4 362
+#define OCC_SCAN_CLK_GPIO 363
+#define PCLK_TIMER 364
+#define CLK_TIMER0 365
+#define CLK_TIMER1 366
+#define CLK_TIMER2 367
+#define CLK_TIMER3 368
+#define CLK_TIMER4 369
+#define CLK_TIMER5 370
+#define ACLK_TOP_HIGH 371
+#define ACLK_TOP_LOW 372
+#define HCLK_TOP 373
+#define PCLK_TOP 374
+#define PCLK_PCIE30PHY 375
+#define CLK_OPTC_ARB 376
+#define PCLK_MIPICSIPHY 377
+#define PCLK_MIPIDSIPHY0 378
+#define PCLK_MIPIDSIPHY1 379
+#define PCLK_PIPEPHY0 380
+#define PCLK_PIPEPHY1 381
+#define PCLK_PIPEPHY2 382
+#define PCLK_CPU_BOOST 383
+#define CLK_CPU_BOOST 384
+#define PCLK_OTPPHY 385
+#define SCLK_GMAC0 386
+#define SCLK_GMAC0_RGMII_SPEED 387
+#define SCLK_GMAC0_RMII_SPEED 388
+#define SCLK_GMAC0_RX_TX 389
+#define SCLK_GMAC1 390
+#define SCLK_GMAC1_RGMII_SPEED 391
+#define SCLK_GMAC1_RMII_SPEED 392
+#define SCLK_GMAC1_RX_TX 393
+#define SCLK_SDMMC0_DRV 394
+#define SCLK_SDMMC0_SAMPLE 395
+#define SCLK_SDMMC1_DRV 396
+#define SCLK_SDMMC1_SAMPLE 397
+#define SCLK_SDMMC2_DRV 398
+#define SCLK_SDMMC2_SAMPLE 399
+#define SCLK_EMMC_DRV 400
+#define SCLK_EMMC_SAMPLE 401
+#define PCLK_EDPPHY_GRF 402
+#define CLK_HDMI_CEC 403
+#define CLK_I2S0_8CH_TX 404
+#define CLK_I2S0_8CH_RX 405
+#define CLK_I2S1_8CH_TX 406
+#define CLK_I2S1_8CH_RX 407
+#define CLK_I2S2_2CH 408
+#define CLK_I2S3_2CH_TX 409
+#define CLK_I2S3_2CH_RX 410
+#define CPLL_500M 411
+#define CPLL_250M 412
+#define CPLL_125M 413
+#define CPLL_62P5M 414
+#define CPLL_50M 415
+#define CPLL_25M 416
+#define CPLL_100M 417
+#define SCLK_DDRCLK 418
+
+#define PCLK_CORE_PVTM 450
+
+#define CLK_NR_CLKS (PCLK_CORE_PVTM + 1)
+
+/* pmu soft-reset indices */
+/* pmucru_softrst_con0 */
+#define SRST_P_PDPMU_NIU 0
+#define SRST_P_PMUCRU 1
+#define SRST_P_PMUGRF 2
+#define SRST_P_I2C0 3
+#define SRST_I2C0 4
+#define SRST_P_UART0 5
+#define SRST_S_UART0 6
+#define SRST_P_PWM0 7
+#define SRST_PWM0 8
+#define SRST_P_GPIO0 9
+#define SRST_GPIO0 10
+#define SRST_P_PMUPVTM 11
+#define SRST_PMUPVTM 12
+
+/* soft-reset indices */
+
+/* cru_softrst_con0 */
+#define SRST_NCORERESET0 0
+#define SRST_NCORERESET1 1
+#define SRST_NCORERESET2 2
+#define SRST_NCORERESET3 3
+#define SRST_NCPUPORESET0 4
+#define SRST_NCPUPORESET1 5
+#define SRST_NCPUPORESET2 6
+#define SRST_NCPUPORESET3 7
+#define SRST_NSRESET 8
+#define SRST_NSPORESET 9
+#define SRST_NATRESET 10
+#define SRST_NGICRESET 11
+#define SRST_NPRESET 12
+#define SRST_NPERIPHRESET 13
+
+/* cru_softrst_con1 */
+#define SRST_A_CORE_NIU2DDR 16
+#define SRST_A_CORE_NIU2BUS 17
+#define SRST_P_DBG_NIU 18
+#define SRST_P_DBG 19
+#define SRST_P_DBG_DAPLITE 20
+#define SRST_DAP 21
+#define SRST_A_ADB400_CORE2GIC 22
+#define SRST_A_ADB400_GIC2CORE 23
+#define SRST_P_CORE_GRF 24
+#define SRST_P_CORE_PVTM 25
+#define SRST_CORE_PVTM 26
+#define SRST_CORE_PVTPLL 27
+
+/* cru_softrst_con2 */
+#define SRST_GPU 32
+#define SRST_A_GPU_NIU 33
+#define SRST_P_GPU_NIU 34
+#define SRST_P_GPU_PVTM 35
+#define SRST_GPU_PVTM 36
+#define SRST_GPU_PVTPLL 37
+#define SRST_A_NPU_NIU 40
+#define SRST_H_NPU_NIU 41
+#define SRST_P_NPU_NIU 42
+#define SRST_A_NPU 43
+#define SRST_H_NPU 44
+#define SRST_P_NPU_PVTM 45
+#define SRST_NPU_PVTM 46
+#define SRST_NPU_PVTPLL 47
+
+/* cru_softrst_con3 */
+#define SRST_A_MSCH 51
+#define SRST_HWFFC_CTRL 52
+#define SRST_DDR_ALWAYSON 53
+#define SRST_A_DDRSPLIT 54
+#define SRST_DDRDFI_CTL 55
+#define SRST_A_DMA2DDR 57
+
+/* cru_softrst_con4 */
+#define SRST_A_PERIMID_NIU 64
+#define SRST_H_PERIMID_NIU 65
+#define SRST_A_GIC_AUDIO_NIU 66
+#define SRST_H_GIC_AUDIO_NIU 67
+#define SRST_A_GIC600 68
+#define SRST_A_GIC600_DEBUG 69
+#define SRST_A_GICADB_CORE2GIC 70
+#define SRST_A_GICADB_GIC2CORE 71
+#define SRST_A_SPINLOCK 72
+#define SRST_H_SDMMC_BUFFER 73
+#define SRST_D_SDMMC_BUFFER 74
+#define SRST_H_I2S0_8CH 75
+#define SRST_H_I2S1_8CH 76
+#define SRST_H_I2S2_2CH 77
+#define SRST_H_I2S3_2CH 78
+
+/* cru_softrst_con5 */
+#define SRST_M_I2S0_8CH_TX 80
+#define SRST_M_I2S0_8CH_RX 81
+#define SRST_M_I2S1_8CH_TX 82
+#define SRST_M_I2S1_8CH_RX 83
+#define SRST_M_I2S2_2CH 84
+#define SRST_M_I2S3_2CH_TX 85
+#define SRST_M_I2S3_2CH_RX 86
+#define SRST_H_PDM 87
+#define SRST_M_PDM 88
+#define SRST_H_VAD 89
+#define SRST_H_SPDIF_8CH 90
+#define SRST_M_SPDIF_8CH 91
+#define SRST_H_AUDPWM 92
+#define SRST_S_AUDPWM 93
+#define SRST_H_ACDCDIG 94
+#define SRST_ACDCDIG 95
+
+/* cru_softrst_con6 */
+#define SRST_A_SECURE_FLASH_NIU 96
+#define SRST_H_SECURE_FLASH_NIU 97
+#define SRST_A_CRYPTO_NS 103
+#define SRST_H_CRYPTO_NS 104
+#define SRST_CRYPTO_NS_CORE 105
+#define SRST_CRYPTO_NS_PKA 106
+#define SRST_CRYPTO_NS_RNG 107
+#define SRST_H_TRNG_NS 108
+#define SRST_TRNG_NS 109
+
+/* cru_softrst_con7 */
+#define SRST_H_NANDC 112
+#define SRST_N_NANDC 113
+#define SRST_H_SFC 114
+#define SRST_H_SFC_XIP 115
+#define SRST_S_SFC 116
+#define SRST_A_EMMC 117
+#define SRST_H_EMMC 118
+#define SRST_B_EMMC 119
+#define SRST_C_EMMC 120
+#define SRST_T_EMMC 121
+
+/* cru_softrst_con8 */
+#define SRST_A_PIPE_NIU 128
+#define SRST_P_PIPE_NIU 130
+#define SRST_P_PIPE_GRF 133
+#define SRST_A_SATA0 134
+#define SRST_SATA0_PIPE 135
+#define SRST_SATA0_PMALIVE 136
+#define SRST_SATA0_RXOOB 137
+#define SRST_A_SATA1 138
+#define SRST_SATA1_PIPE 139
+#define SRST_SATA1_PMALIVE 140
+#define SRST_SATA1_RXOOB 141
+
+/* cru_softrst_con9 */
+#define SRST_A_SATA2 144
+#define SRST_SATA2_PIPE 145
+#define SRST_SATA2_PMALIVE 146
+#define SRST_SATA2_RXOOB 147
+#define SRST_USB3OTG0 148
+#define SRST_USB3OTG1 149
+#define SRST_XPCS 150
+#define SRST_XPCS_TX_DIV10 151
+#define SRST_XPCS_RX_DIV10 152
+#define SRST_XPCS_XGXS_RX 153
+
+/* cru_softrst_con10 */
+#define SRST_P_PCIE20 160
+#define SRST_PCIE20_POWERUP 161
+#define SRST_MSTR_ARESET_PCIE20 162
+#define SRST_SLV_ARESET_PCIE20 163
+#define SRST_DBI_ARESET_PCIE20 164
+#define SRST_BRESET_PCIE20 165
+#define SRST_PERST_PCIE20 166
+#define SRST_CORE_RST_PCIE20 167
+#define SRST_NSTICKY_RST_PCIE20 168
+#define SRST_STICKY_RST_PCIE20 169
+#define SRST_PWR_RST_PCIE20 170
+
+/* cru_softrst_con11 */
+#define SRST_P_PCIE30X1 176
+#define SRST_PCIE30X1_POWERUP 177
+#define SRST_M_ARESET_PCIE30X1 178
+#define SRST_S_ARESET_PCIE30X1 179
+#define SRST_D_ARESET_PCIE30X1 180
+#define SRST_BRESET_PCIE30X1 181
+#define SRST_PERST_PCIE30X1 182
+#define SRST_CORE_RST_PCIE30X1 183
+#define SRST_NSTC_RST_PCIE30X1 184
+#define SRST_STC_RST_PCIE30X1 185
+#define SRST_PWR_RST_PCIE30X1 186
+
+/* cru_softrst_con12 */
+#define SRST_P_PCIE30X2 192
+#define SRST_PCIE30X2_POWERUP 193
+#define SRST_M_ARESET_PCIE30X2 194
+#define SRST_S_ARESET_PCIE30X2 195
+#define SRST_D_ARESET_PCIE30X2 196
+#define SRST_BRESET_PCIE30X2 197
+#define SRST_PERST_PCIE30X2 198
+#define SRST_CORE_RST_PCIE30X2 199
+#define SRST_NSTC_RST_PCIE30X2 200
+#define SRST_STC_RST_PCIE30X2 201
+#define SRST_PWR_RST_PCIE30X2 202
+
+/* cru_softrst_con13 */
+#define SRST_A_PHP_NIU 208
+#define SRST_H_PHP_NIU 209
+#define SRST_P_PHP_NIU 210
+#define SRST_H_SDMMC0 211
+#define SRST_SDMMC0 212
+#define SRST_H_SDMMC1 213
+#define SRST_SDMMC1 214
+#define SRST_A_GMAC0 215
+#define SRST_GMAC0_TIMESTAMP 216
+
+/* cru_softrst_con14 */
+#define SRST_A_USB_NIU 224
+#define SRST_H_USB_NIU 225
+#define SRST_P_USB_NIU 226
+#define SRST_P_USB_GRF 227
+#define SRST_H_USB2HOST0 228
+#define SRST_H_USB2HOST0_ARB 229
+#define SRST_USB2HOST0_UTMI 230
+#define SRST_H_USB2HOST1 231
+#define SRST_H_USB2HOST1_ARB 232
+#define SRST_USB2HOST1_UTMI 233
+#define SRST_H_SDMMC2 234
+#define SRST_SDMMC2 235
+#define SRST_A_GMAC1 236
+#define SRST_GMAC1_TIMESTAMP 237
+
+/* cru_softrst_con15 */
+#define SRST_A_VI_NIU 240
+#define SRST_H_VI_NIU 241
+#define SRST_P_VI_NIU 242
+#define SRST_A_VICAP 247
+#define SRST_H_VICAP 248
+#define SRST_D_VICAP 249
+#define SRST_I_VICAP 250
+#define SRST_P_VICAP 251
+#define SRST_H_ISP 252
+#define SRST_ISP 253
+#define SRST_P_CSI2HOST1 255
+
+/* cru_softrst_con16 */
+#define SRST_A_VO_NIU 256
+#define SRST_H_VO_NIU 257
+#define SRST_P_VO_NIU 258
+#define SRST_A_VOP_NIU 259
+#define SRST_A_VOP 260
+#define SRST_H_VOP 261
+#define SRST_VOP0 262
+#define SRST_VOP1 263
+#define SRST_VOP2 264
+#define SRST_VOP_PWM 265
+#define SRST_A_HDCP 266
+#define SRST_H_HDCP 267
+#define SRST_P_HDCP 268
+#define SRST_P_HDMI_HOST 270
+#define SRST_HDMI_HOST 271
+
+/* cru_softrst_con17 */
+#define SRST_P_DSITX_0 272
+#define SRST_P_DSITX_1 273
+#define SRST_P_EDP_CTRL 274
+#define SRST_EDP_24M 275
+#define SRST_A_VPU_NIU 280
+#define SRST_H_VPU_NIU 281
+#define SRST_A_VPU 282
+#define SRST_H_VPU 283
+#define SRST_H_EINK 286
+#define SRST_P_EINK 287
+
+/* cru_softrst_con18 */
+#define SRST_A_RGA_NIU 288
+#define SRST_H_RGA_NIU 289
+#define SRST_P_RGA_NIU 290
+#define SRST_A_RGA 292
+#define SRST_H_RGA 293
+#define SRST_RGA_CORE 294
+#define SRST_A_IEP 295
+#define SRST_H_IEP 296
+#define SRST_IEP_CORE 297
+#define SRST_H_EBC 298
+#define SRST_D_EBC 299
+#define SRST_A_JDEC 300
+#define SRST_H_JDEC 301
+#define SRST_A_JENC 302
+#define SRST_H_JENC 303
+
+/* cru_softrst_con19 */
+#define SRST_A_VENC_NIU 304
+#define SRST_H_VENC_NIU 305
+#define SRST_A_RKVENC 307
+#define SRST_H_RKVENC 308
+#define SRST_RKVENC_CORE 309
+
+/* cru_softrst_con20 */
+#define SRST_A_RKVDEC_NIU 320
+#define SRST_H_RKVDEC_NIU 321
+#define SRST_A_RKVDEC 322
+#define SRST_H_RKVDEC 323
+#define SRST_RKVDEC_CA 324
+#define SRST_RKVDEC_CORE 325
+#define SRST_RKVDEC_HEVC_CA 326
+
+/* cru_softrst_con21 */
+#define SRST_A_BUS_NIU 336
+#define SRST_P_BUS_NIU 338
+#define SRST_P_CAN0 340
+#define SRST_CAN0 341
+#define SRST_P_CAN1 342
+#define SRST_CAN1 343
+#define SRST_P_CAN2 344
+#define SRST_CAN2 345
+#define SRST_P_GPIO1 346
+#define SRST_GPIO1 347
+#define SRST_P_GPIO2 348
+#define SRST_GPIO2 349
+#define SRST_P_GPIO3 350
+#define SRST_GPIO3 351
+
+/* cru_softrst_con22 */
+#define SRST_P_GPIO4 352
+#define SRST_GPIO4 353
+#define SRST_P_I2C1 354
+#define SRST_I2C1 355
+#define SRST_P_I2C2 356
+#define SRST_I2C2 357
+#define SRST_P_I2C3 358
+#define SRST_I2C3 359
+#define SRST_P_I2C4 360
+#define SRST_I2C4 361
+#define SRST_P_I2C5 362
+#define SRST_I2C5 363
+#define SRST_P_OTPC_NS 364
+#define SRST_OTPC_NS_SBPI 365
+#define SRST_OTPC_NS_USR 366
+
+/* cru_softrst_con23 */
+#define SRST_P_PWM1 368
+#define SRST_PWM1 369
+#define SRST_P_PWM2 370
+#define SRST_PWM2 371
+#define SRST_P_PWM3 372
+#define SRST_PWM3 373
+#define SRST_P_SPI0 374
+#define SRST_SPI0 375
+#define SRST_P_SPI1 376
+#define SRST_SPI1 377
+#define SRST_P_SPI2 378
+#define SRST_SPI2 379
+#define SRST_P_SPI3 380
+#define SRST_SPI3 381
+
+/* cru_softrst_con24 */
+#define SRST_P_SARADC 384
+#define SRST_P_TSADC 385
+#define SRST_TSADC 386
+#define SRST_P_TIMER 387
+#define SRST_TIMER0 388
+#define SRST_TIMER1 389
+#define SRST_TIMER2 390
+#define SRST_TIMER3 391
+#define SRST_TIMER4 392
+#define SRST_TIMER5 393
+#define SRST_P_UART1 394
+#define SRST_S_UART1 395
+
+/* cru_softrst_con25 */
+#define SRST_P_UART2 400
+#define SRST_S_UART2 401
+#define SRST_P_UART3 402
+#define SRST_S_UART3 403
+#define SRST_P_UART4 404
+#define SRST_S_UART4 405
+#define SRST_P_UART5 406
+#define SRST_S_UART5 407
+#define SRST_P_UART6 408
+#define SRST_S_UART6 409
+#define SRST_P_UART7 410
+#define SRST_S_UART7 411
+#define SRST_P_UART8 412
+#define SRST_S_UART8 413
+#define SRST_P_UART9 414
+#define SRST_S_UART9 415
+
+/* cru_softrst_con26 */
+#define SRST_P_GRF 416
+#define SRST_P_GRF_VCCIO12 417
+#define SRST_P_GRF_VCCIO34 418
+#define SRST_P_GRF_VCCIO567 419
+#define SRST_P_SCR 420
+#define SRST_P_WDT_NS 421
+#define SRST_T_WDT_NS 422
+#define SRST_P_DFT2APB 423
+#define SRST_A_MCU 426
+#define SRST_P_INTMUX 427
+#define SRST_P_MAILBOX 428
+
+/* cru_softrst_con27 */
+#define SRST_A_TOP_HIGH_NIU 432
+#define SRST_A_TOP_LOW_NIU 433
+#define SRST_H_TOP_NIU 434
+#define SRST_P_TOP_NIU 435
+#define SRST_P_TOP_CRU 438
+#define SRST_P_DDRPHY 439
+#define SRST_DDRPHY 440
+#define SRST_P_MIPICSIPHY 442
+#define SRST_P_MIPIDSIPHY0 443
+#define SRST_P_MIPIDSIPHY1 444
+#define SRST_P_PCIE30PHY 445
+#define SRST_PCIE30PHY 446
+#define SRST_P_PCIE30PHY_GRF 447
+
+/* cru_softrst_con28 */
+#define SRST_P_APB2ASB_LEFT 448
+#define SRST_P_APB2ASB_BOTTOM 449
+#define SRST_P_ASB2APB_LEFT 450
+#define SRST_P_ASB2APB_BOTTOM 451
+#define SRST_P_PIPEPHY0 452
+#define SRST_PIPEPHY0 453
+#define SRST_P_PIPEPHY1 454
+#define SRST_PIPEPHY1 455
+#define SRST_P_PIPEPHY2 456
+#define SRST_PIPEPHY2 457
+#define SRST_P_USB2PHY0_GRF 458
+#define SRST_P_USB2PHY1_GRF 459
+#define SRST_P_CPU_BOOST 460
+#define SRST_CPU_BOOST 461
+#define SRST_P_OTPPHY 462
+#define SRST_OTPPHY 463
+
+/* cru_softrst_con29 */
+#define SRST_USB2PHY0_POR 464
+#define SRST_USB2PHY0_USB3OTG0 465
+#define SRST_USB2PHY0_USB3OTG1 466
+#define SRST_USB2PHY1_POR 467
+#define SRST_USB2PHY1_USB2HOST0 468
+#define SRST_USB2PHY1_USB2HOST1 469
+#define SRST_P_EDPPHY_GRF 470
+#define SRST_TSADCPHY 471
+#define SRST_GMAC0_DELAYLINE 472
+#define SRST_GMAC1_DELAYLINE 473
+#define SRST_OTPC_ARB 474
+#define SRST_P_PIPEPHY0_GRF 475
+#define SRST_P_PIPEPHY1_GRF 476
+#define SRST_P_PIPEPHY2_GRF 477
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rk3588-cru.h b/include/dt-bindings/clock/rockchip,rk3588-cru.h
new file mode 100644
index 000000000000..0c7d3ca2d5bc
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3588-cru.h
@@ -0,0 +1,765 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2022 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Sebastian Reichel <sebastian.reichel@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3588_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3588_H
+
+/* cru-clocks indices */
+
+#define PLL_B0PLL 0
+#define PLL_B1PLL 1
+#define PLL_LPLL 2
+#define PLL_V0PLL 3
+#define PLL_AUPLL 4
+#define PLL_CPLL 5
+#define PLL_GPLL 6
+#define PLL_NPLL 7
+#define PLL_PPLL 8
+#define ARMCLK_L 9
+#define ARMCLK_B01 10
+#define ARMCLK_B23 11
+#define PCLK_BIGCORE0_ROOT 12
+#define PCLK_BIGCORE0_PVTM 13
+#define PCLK_BIGCORE1_ROOT 14
+#define PCLK_BIGCORE1_PVTM 15
+#define PCLK_DSU_S_ROOT 16
+#define PCLK_DSU_ROOT 17
+#define PCLK_DSU_NS_ROOT 18
+#define PCLK_LITCORE_PVTM 19
+#define PCLK_DBG 20
+#define PCLK_DSU 21
+#define PCLK_S_DAPLITE 22
+#define PCLK_M_DAPLITE 23
+#define MBIST_MCLK_PDM1 24
+#define MBIST_CLK_ACDCDIG 25
+#define HCLK_I2S2_2CH 26
+#define HCLK_I2S3_2CH 27
+#define CLK_I2S2_2CH_SRC 28
+#define CLK_I2S2_2CH_FRAC 29
+#define CLK_I2S2_2CH 30
+#define MCLK_I2S2_2CH 31
+#define I2S2_2CH_MCLKOUT 32
+#define CLK_DAC_ACDCDIG 33
+#define CLK_I2S3_2CH_SRC 34
+#define CLK_I2S3_2CH_FRAC 35
+#define CLK_I2S3_2CH 36
+#define MCLK_I2S3_2CH 37
+#define I2S3_2CH_MCLKOUT 38
+#define PCLK_ACDCDIG 39
+#define HCLK_I2S0_8CH 40
+#define CLK_I2S0_8CH_TX_SRC 41
+#define CLK_I2S0_8CH_TX_FRAC 42
+#define MCLK_I2S0_8CH_TX 43
+#define CLK_I2S0_8CH_TX 44
+#define CLK_I2S0_8CH_RX_SRC 45
+#define CLK_I2S0_8CH_RX_FRAC 46
+#define MCLK_I2S0_8CH_RX 47
+#define CLK_I2S0_8CH_RX 48
+#define I2S0_8CH_MCLKOUT 49
+#define HCLK_PDM1 50
+#define MCLK_PDM1 51
+#define HCLK_AUDIO_ROOT 52
+#define PCLK_AUDIO_ROOT 53
+#define HCLK_SPDIF0 54
+#define CLK_SPDIF0_SRC 55
+#define CLK_SPDIF0_FRAC 56
+#define MCLK_SPDIF0 57
+#define CLK_SPDIF0 58
+#define CLK_SPDIF1 59
+#define HCLK_SPDIF1 60
+#define CLK_SPDIF1_SRC 61
+#define CLK_SPDIF1_FRAC 62
+#define MCLK_SPDIF1 63
+#define ACLK_AV1_ROOT 64
+#define ACLK_AV1 65
+#define PCLK_AV1_ROOT 66
+#define PCLK_AV1 67
+#define PCLK_MAILBOX0 68
+#define PCLK_MAILBOX1 69
+#define PCLK_MAILBOX2 70
+#define PCLK_PMU2 71
+#define PCLK_PMUCM0_INTMUX 72
+#define PCLK_DDRCM0_INTMUX 73
+#define PCLK_TOP 74
+#define PCLK_PWM1 75
+#define CLK_PWM1 76
+#define CLK_PWM1_CAPTURE 77
+#define PCLK_PWM2 78
+#define CLK_PWM2 79
+#define CLK_PWM2_CAPTURE 80
+#define PCLK_PWM3 81
+#define CLK_PWM3 82
+#define CLK_PWM3_CAPTURE 83
+#define PCLK_BUSTIMER0 84
+#define PCLK_BUSTIMER1 85
+#define CLK_BUS_TIMER_ROOT 86
+#define CLK_BUSTIMER0 87
+#define CLK_BUSTIMER1 88
+#define CLK_BUSTIMER2 89
+#define CLK_BUSTIMER3 90
+#define CLK_BUSTIMER4 91
+#define CLK_BUSTIMER5 92
+#define CLK_BUSTIMER6 93
+#define CLK_BUSTIMER7 94
+#define CLK_BUSTIMER8 95
+#define CLK_BUSTIMER9 96
+#define CLK_BUSTIMER10 97
+#define CLK_BUSTIMER11 98
+#define PCLK_WDT0 99
+#define TCLK_WDT0 100
+#define PCLK_CAN0 101
+#define CLK_CAN0 102
+#define PCLK_CAN1 103
+#define CLK_CAN1 104
+#define PCLK_CAN2 105
+#define CLK_CAN2 106
+#define ACLK_DECOM 107
+#define PCLK_DECOM 108
+#define DCLK_DECOM 109
+#define ACLK_DMAC0 110
+#define ACLK_DMAC1 111
+#define ACLK_DMAC2 112
+#define ACLK_BUS_ROOT 113
+#define ACLK_GIC 114
+#define PCLK_GPIO1 115
+#define DBCLK_GPIO1 116
+#define PCLK_GPIO2 117
+#define DBCLK_GPIO2 118
+#define PCLK_GPIO3 119
+#define DBCLK_GPIO3 120
+#define PCLK_GPIO4 121
+#define DBCLK_GPIO4 122
+#define PCLK_I2C1 123
+#define PCLK_I2C2 124
+#define PCLK_I2C3 125
+#define PCLK_I2C4 126
+#define PCLK_I2C5 127
+#define PCLK_I2C6 128
+#define PCLK_I2C7 129
+#define PCLK_I2C8 130
+#define CLK_I2C1 131
+#define CLK_I2C2 132
+#define CLK_I2C3 133
+#define CLK_I2C4 134
+#define CLK_I2C5 135
+#define CLK_I2C6 136
+#define CLK_I2C7 137
+#define CLK_I2C8 138
+#define PCLK_OTPC_NS 139
+#define CLK_OTPC_NS 140
+#define CLK_OTPC_ARB 141
+#define CLK_OTPC_AUTO_RD_G 142
+#define CLK_OTP_PHY_G 143
+#define PCLK_SARADC 144
+#define CLK_SARADC 145
+#define PCLK_SPI0 146
+#define PCLK_SPI1 147
+#define PCLK_SPI2 148
+#define PCLK_SPI3 149
+#define PCLK_SPI4 150
+#define CLK_SPI0 151
+#define CLK_SPI1 152
+#define CLK_SPI2 153
+#define CLK_SPI3 154
+#define CLK_SPI4 155
+#define ACLK_SPINLOCK 156
+#define PCLK_TSADC 157
+#define CLK_TSADC 158
+#define PCLK_UART1 159
+#define PCLK_UART2 160
+#define PCLK_UART3 161
+#define PCLK_UART4 162
+#define PCLK_UART5 163
+#define PCLK_UART6 164
+#define PCLK_UART7 165
+#define PCLK_UART8 166
+#define PCLK_UART9 167
+#define CLK_UART1_SRC 168
+#define CLK_UART1_FRAC 169
+#define CLK_UART1 170
+#define SCLK_UART1 171
+#define CLK_UART2_SRC 172
+#define CLK_UART2_FRAC 173
+#define CLK_UART2 174
+#define SCLK_UART2 175
+#define CLK_UART3_SRC 176
+#define CLK_UART3_FRAC 177
+#define CLK_UART3 178
+#define SCLK_UART3 179
+#define CLK_UART4_SRC 180
+#define CLK_UART4_FRAC 181
+#define CLK_UART4 182
+#define SCLK_UART4 183
+#define CLK_UART5_SRC 184
+#define CLK_UART5_FRAC 185
+#define CLK_UART5 186
+#define SCLK_UART5 187
+#define CLK_UART6_SRC 188
+#define CLK_UART6_FRAC 189
+#define CLK_UART6 190
+#define SCLK_UART6 191
+#define CLK_UART7_SRC 192
+#define CLK_UART7_FRAC 193
+#define CLK_UART7 194
+#define SCLK_UART7 195
+#define CLK_UART8_SRC 196
+#define CLK_UART8_FRAC 197
+#define CLK_UART8 198
+#define SCLK_UART8 199
+#define CLK_UART9_SRC 200
+#define CLK_UART9_FRAC 201
+#define CLK_UART9 202
+#define SCLK_UART9 203
+#define ACLK_CENTER_ROOT 204
+#define ACLK_CENTER_LOW_ROOT 205
+#define HCLK_CENTER_ROOT 206
+#define PCLK_CENTER_ROOT 207
+#define ACLK_DMA2DDR 208
+#define ACLK_DDR_SHAREMEM 209
+#define ACLK_CENTER_S200_ROOT 210
+#define ACLK_CENTER_S400_ROOT 211
+#define FCLK_DDR_CM0_CORE 212
+#define CLK_DDR_TIMER_ROOT 213
+#define CLK_DDR_TIMER0 214
+#define CLK_DDR_TIMER1 215
+#define TCLK_WDT_DDR 216
+#define CLK_DDR_CM0_RTC 217
+#define PCLK_WDT 218
+#define PCLK_TIMER 219
+#define PCLK_DMA2DDR 220
+#define PCLK_SHAREMEM 221
+#define CLK_50M_SRC 222
+#define CLK_100M_SRC 223
+#define CLK_150M_SRC 224
+#define CLK_200M_SRC 225
+#define CLK_250M_SRC 226
+#define CLK_300M_SRC 227
+#define CLK_350M_SRC 228
+#define CLK_400M_SRC 229
+#define CLK_450M_SRC 230
+#define CLK_500M_SRC 231
+#define CLK_600M_SRC 232
+#define CLK_650M_SRC 233
+#define CLK_700M_SRC 234
+#define CLK_800M_SRC 235
+#define CLK_1000M_SRC 236
+#define CLK_1200M_SRC 237
+#define ACLK_TOP_M300_ROOT 238
+#define ACLK_TOP_M500_ROOT 239
+#define ACLK_TOP_M400_ROOT 240
+#define ACLK_TOP_S200_ROOT 241
+#define ACLK_TOP_S400_ROOT 242
+#define CLK_MIPI_CAMARAOUT_M0 243
+#define CLK_MIPI_CAMARAOUT_M1 244
+#define CLK_MIPI_CAMARAOUT_M2 245
+#define CLK_MIPI_CAMARAOUT_M3 246
+#define CLK_MIPI_CAMARAOUT_M4 247
+#define MCLK_GMAC0_OUT 248
+#define REFCLKO25M_ETH0_OUT 249
+#define REFCLKO25M_ETH1_OUT 250
+#define CLK_CIFOUT_OUT 251
+#define PCLK_MIPI_DCPHY0 252
+#define PCLK_MIPI_DCPHY1 253
+#define PCLK_CSIPHY0 254
+#define PCLK_CSIPHY1 255
+#define ACLK_TOP_ROOT 256
+#define PCLK_TOP_ROOT 257
+#define ACLK_LOW_TOP_ROOT 258
+#define PCLK_CRU 259
+#define PCLK_GPU_ROOT 260
+#define CLK_GPU_SRC 261
+#define CLK_GPU 262
+#define CLK_GPU_COREGROUP 263
+#define CLK_GPU_STACKS 264
+#define PCLK_GPU_PVTM 265
+#define CLK_GPU_PVTM 266
+#define CLK_CORE_GPU_PVTM 267
+#define PCLK_GPU_GRF 268
+#define ACLK_ISP1_ROOT 269
+#define HCLK_ISP1_ROOT 270
+#define CLK_ISP1_CORE 271
+#define CLK_ISP1_CORE_MARVIN 272
+#define CLK_ISP1_CORE_VICAP 273
+#define ACLK_ISP1 274
+#define HCLK_ISP1 275
+#define ACLK_NPU1 276
+#define HCLK_NPU1 277
+#define ACLK_NPU2 278
+#define HCLK_NPU2 279
+#define HCLK_NPU_CM0_ROOT 280
+#define FCLK_NPU_CM0_CORE 281
+#define CLK_NPU_CM0_RTC 282
+#define PCLK_NPU_PVTM 283
+#define PCLK_NPU_GRF 284
+#define CLK_NPU_PVTM 285
+#define CLK_CORE_NPU_PVTM 286
+#define ACLK_NPU0 287
+#define HCLK_NPU0 288
+#define HCLK_NPU_ROOT 289
+#define CLK_NPU_DSU0 290
+#define PCLK_NPU_ROOT 291
+#define PCLK_NPU_TIMER 292
+#define CLK_NPUTIMER_ROOT 293
+#define CLK_NPUTIMER0 294
+#define CLK_NPUTIMER1 295
+#define PCLK_NPU_WDT 296
+#define TCLK_NPU_WDT 297
+#define HCLK_EMMC 298
+#define ACLK_EMMC 299
+#define CCLK_EMMC 300
+#define BCLK_EMMC 301
+#define TMCLK_EMMC 302
+#define SCLK_SFC 303
+#define HCLK_SFC 304
+#define HCLK_SFC_XIP 305
+#define HCLK_NVM_ROOT 306
+#define ACLK_NVM_ROOT 307
+#define CLK_GMAC0_PTP_REF 308
+#define CLK_GMAC1_PTP_REF 309
+#define CLK_GMAC_125M 310
+#define CLK_GMAC_50M 311
+#define ACLK_PHP_GIC_ITS 312
+#define ACLK_MMU_PCIE 313
+#define ACLK_MMU_PHP 314
+#define ACLK_PCIE_4L_DBI 315
+#define ACLK_PCIE_2L_DBI 316
+#define ACLK_PCIE_1L0_DBI 317
+#define ACLK_PCIE_1L1_DBI 318
+#define ACLK_PCIE_1L2_DBI 319
+#define ACLK_PCIE_4L_MSTR 320
+#define ACLK_PCIE_2L_MSTR 321
+#define ACLK_PCIE_1L0_MSTR 322
+#define ACLK_PCIE_1L1_MSTR 323
+#define ACLK_PCIE_1L2_MSTR 324
+#define ACLK_PCIE_4L_SLV 325
+#define ACLK_PCIE_2L_SLV 326
+#define ACLK_PCIE_1L0_SLV 327
+#define ACLK_PCIE_1L1_SLV 328
+#define ACLK_PCIE_1L2_SLV 329
+#define PCLK_PCIE_4L 330
+#define PCLK_PCIE_2L 331
+#define PCLK_PCIE_1L0 332
+#define PCLK_PCIE_1L1 333
+#define PCLK_PCIE_1L2 334
+#define CLK_PCIE_AUX0 335
+#define CLK_PCIE_AUX1 336
+#define CLK_PCIE_AUX2 337
+#define CLK_PCIE_AUX3 338
+#define CLK_PCIE_AUX4 339
+#define CLK_PIPEPHY0_REF 340
+#define CLK_PIPEPHY1_REF 341
+#define CLK_PIPEPHY2_REF 342
+#define PCLK_PHP_ROOT 343
+#define PCLK_GMAC0 344
+#define PCLK_GMAC1 345
+#define ACLK_PCIE_ROOT 346
+#define ACLK_PHP_ROOT 347
+#define ACLK_PCIE_BRIDGE 348
+#define ACLK_GMAC0 349
+#define ACLK_GMAC1 350
+#define CLK_PMALIVE0 351
+#define CLK_PMALIVE1 352
+#define CLK_PMALIVE2 353
+#define ACLK_SATA0 354
+#define ACLK_SATA1 355
+#define ACLK_SATA2 356
+#define CLK_RXOOB0 357
+#define CLK_RXOOB1 358
+#define CLK_RXOOB2 359
+#define ACLK_USB3OTG2 360
+#define SUSPEND_CLK_USB3OTG2 361
+#define REF_CLK_USB3OTG2 362
+#define CLK_UTMI_OTG2 363
+#define CLK_PIPEPHY0_PIPE_G 364
+#define CLK_PIPEPHY1_PIPE_G 365
+#define CLK_PIPEPHY2_PIPE_G 366
+#define CLK_PIPEPHY0_PIPE_ASIC_G 367
+#define CLK_PIPEPHY1_PIPE_ASIC_G 368
+#define CLK_PIPEPHY2_PIPE_ASIC_G 369
+#define CLK_PIPEPHY2_PIPE_U3_G 370
+#define CLK_PCIE1L2_PIPE 371
+#define CLK_PCIE4L_PIPE 372
+#define CLK_PCIE2L_PIPE 373
+#define PCLK_PCIE_COMBO_PIPE_PHY0 374
+#define PCLK_PCIE_COMBO_PIPE_PHY1 375
+#define PCLK_PCIE_COMBO_PIPE_PHY2 376
+#define PCLK_PCIE_COMBO_PIPE_PHY 377
+#define HCLK_RGA3_1 378
+#define ACLK_RGA3_1 379
+#define CLK_RGA3_1_CORE 380
+#define ACLK_RGA3_ROOT 381
+#define HCLK_RGA3_ROOT 382
+#define ACLK_RKVDEC_CCU 383
+#define HCLK_RKVDEC0 384
+#define ACLK_RKVDEC0 385
+#define CLK_RKVDEC0_CA 386
+#define CLK_RKVDEC0_HEVC_CA 387
+#define CLK_RKVDEC0_CORE 388
+#define HCLK_RKVDEC1 389
+#define ACLK_RKVDEC1 390
+#define CLK_RKVDEC1_CA 391
+#define CLK_RKVDEC1_HEVC_CA 392
+#define CLK_RKVDEC1_CORE 393
+#define HCLK_SDIO 394
+#define CCLK_SRC_SDIO 395
+#define ACLK_USB_ROOT 396
+#define HCLK_USB_ROOT 397
+#define HCLK_HOST0 398
+#define HCLK_HOST_ARB0 399
+#define HCLK_HOST1 400
+#define HCLK_HOST_ARB1 401
+#define ACLK_USB3OTG0 402
+#define SUSPEND_CLK_USB3OTG0 403
+#define REF_CLK_USB3OTG0 404
+#define ACLK_USB3OTG1 405
+#define SUSPEND_CLK_USB3OTG1 406
+#define REF_CLK_USB3OTG1 407
+#define UTMI_OHCI_CLK48_HOST0 408
+#define UTMI_OHCI_CLK48_HOST1 409
+#define HCLK_IEP2P0 410
+#define ACLK_IEP2P0 411
+#define CLK_IEP2P0_CORE 412
+#define ACLK_JPEG_ENCODER0 413
+#define HCLK_JPEG_ENCODER0 414
+#define ACLK_JPEG_ENCODER1 415
+#define HCLK_JPEG_ENCODER1 416
+#define ACLK_JPEG_ENCODER2 417
+#define HCLK_JPEG_ENCODER2 418
+#define ACLK_JPEG_ENCODER3 419
+#define HCLK_JPEG_ENCODER3 420
+#define ACLK_JPEG_DECODER 421
+#define HCLK_JPEG_DECODER 422
+#define HCLK_RGA2 423
+#define ACLK_RGA2 424
+#define CLK_RGA2_CORE 425
+#define HCLK_RGA3_0 426
+#define ACLK_RGA3_0 427
+#define CLK_RGA3_0_CORE 428
+#define ACLK_VDPU_ROOT 429
+#define ACLK_VDPU_LOW_ROOT 430
+#define HCLK_VDPU_ROOT 431
+#define ACLK_JPEG_DECODER_ROOT 432
+#define ACLK_VPU 433
+#define HCLK_VPU 434
+#define HCLK_RKVENC0_ROOT 435
+#define ACLK_RKVENC0_ROOT 436
+#define HCLK_RKVENC0 437
+#define ACLK_RKVENC0 438
+#define CLK_RKVENC0_CORE 439
+#define HCLK_RKVENC1_ROOT 440
+#define ACLK_RKVENC1_ROOT 441
+#define HCLK_RKVENC1 442
+#define ACLK_RKVENC1 443
+#define CLK_RKVENC1_CORE 444
+#define ICLK_CSIHOST01 445
+#define ICLK_CSIHOST0 446
+#define ICLK_CSIHOST1 447
+#define PCLK_CSI_HOST_0 448
+#define PCLK_CSI_HOST_1 449
+#define PCLK_CSI_HOST_2 450
+#define PCLK_CSI_HOST_3 451
+#define PCLK_CSI_HOST_4 452
+#define PCLK_CSI_HOST_5 453
+#define ACLK_FISHEYE0 454
+#define HCLK_FISHEYE0 455
+#define CLK_FISHEYE0_CORE 456
+#define ACLK_FISHEYE1 457
+#define HCLK_FISHEYE1 458
+#define CLK_FISHEYE1_CORE 459
+#define CLK_ISP0_CORE 460
+#define CLK_ISP0_CORE_MARVIN 461
+#define CLK_ISP0_CORE_VICAP 462
+#define ACLK_ISP0 463
+#define HCLK_ISP0 464
+#define ACLK_VI_ROOT 465
+#define HCLK_VI_ROOT 466
+#define PCLK_VI_ROOT 467
+#define DCLK_VICAP 468
+#define ACLK_VICAP 469
+#define HCLK_VICAP 470
+#define PCLK_DP0 471
+#define PCLK_DP1 472
+#define PCLK_S_DP0 473
+#define PCLK_S_DP1 474
+#define CLK_DP0 475
+#define CLK_DP1 476
+#define HCLK_HDCP_KEY0 477
+#define ACLK_HDCP0 478
+#define HCLK_HDCP0 479
+#define PCLK_HDCP0 480
+#define HCLK_I2S4_8CH 481
+#define ACLK_TRNG0 482
+#define PCLK_TRNG0 483
+#define ACLK_VO0_ROOT 484
+#define HCLK_VO0_ROOT 485
+#define HCLK_VO0_S_ROOT 486
+#define PCLK_VO0_ROOT 487
+#define PCLK_VO0_S_ROOT 488
+#define PCLK_VO0GRF 489
+#define CLK_I2S4_8CH_TX_SRC 490
+#define CLK_I2S4_8CH_TX_FRAC 491
+#define MCLK_I2S4_8CH_TX 492
+#define CLK_I2S4_8CH_TX 493
+#define HCLK_I2S8_8CH 494
+#define CLK_I2S8_8CH_TX_SRC 495
+#define CLK_I2S8_8CH_TX_FRAC 496
+#define MCLK_I2S8_8CH_TX 497
+#define CLK_I2S8_8CH_TX 498
+#define HCLK_SPDIF2_DP0 499
+#define CLK_SPDIF2_DP0_SRC 500
+#define CLK_SPDIF2_DP0_FRAC 501
+#define MCLK_SPDIF2_DP0 502
+#define CLK_SPDIF2_DP0 503
+#define MCLK_SPDIF2 504
+#define HCLK_SPDIF5_DP1 505
+#define CLK_SPDIF5_DP1_SRC 506
+#define CLK_SPDIF5_DP1_FRAC 507
+#define MCLK_SPDIF5_DP1 508
+#define CLK_SPDIF5_DP1 509
+#define MCLK_SPDIF5 510
+#define PCLK_EDP0 511
+#define CLK_EDP0_24M 512
+#define CLK_EDP0_200M 513
+#define PCLK_EDP1 514
+#define CLK_EDP1_24M 515
+#define CLK_EDP1_200M 516
+#define HCLK_HDCP_KEY1 517
+#define ACLK_HDCP1 518
+#define HCLK_HDCP1 519
+#define PCLK_HDCP1 520
+#define ACLK_HDMIRX 521
+#define PCLK_HDMIRX 522
+#define CLK_HDMIRX_REF 523
+#define CLK_HDMIRX_AUD_SRC 524
+#define CLK_HDMIRX_AUD_FRAC 525
+#define CLK_HDMIRX_AUD 526
+#define CLK_HDMIRX_AUD_P_MUX 527
+#define PCLK_HDMITX0 528
+#define CLK_HDMITX0_EARC 529
+#define CLK_HDMITX0_REF 530
+#define PCLK_HDMITX1 531
+#define CLK_HDMITX1_EARC 532
+#define CLK_HDMITX1_REF 533
+#define CLK_HDMITRX_REFSRC 534
+#define ACLK_TRNG1 535
+#define PCLK_TRNG1 536
+#define ACLK_HDCP1_ROOT 537
+#define ACLK_HDMIRX_ROOT 538
+#define HCLK_VO1_ROOT 539
+#define HCLK_VO1_S_ROOT 540
+#define PCLK_VO1_ROOT 541
+#define PCLK_VO1_S_ROOT 542
+#define PCLK_S_EDP0 543
+#define PCLK_S_EDP1 544
+#define PCLK_S_HDMIRX 545
+#define HCLK_I2S10_8CH 546
+#define CLK_I2S10_8CH_RX_SRC 547
+#define CLK_I2S10_8CH_RX_FRAC 548
+#define CLK_I2S10_8CH_RX 549
+#define MCLK_I2S10_8CH_RX 550
+#define HCLK_I2S7_8CH 551
+#define CLK_I2S7_8CH_RX_SRC 552
+#define CLK_I2S7_8CH_RX_FRAC 553
+#define CLK_I2S7_8CH_RX 554
+#define MCLK_I2S7_8CH_RX 555
+#define HCLK_I2S9_8CH 556
+#define CLK_I2S9_8CH_RX_SRC 557
+#define CLK_I2S9_8CH_RX_FRAC 558
+#define CLK_I2S9_8CH_RX 559
+#define MCLK_I2S9_8CH_RX 560
+#define CLK_I2S5_8CH_TX_SRC 561
+#define CLK_I2S5_8CH_TX_FRAC 562
+#define CLK_I2S5_8CH_TX 563
+#define MCLK_I2S5_8CH_TX 564
+#define HCLK_I2S5_8CH 565
+#define CLK_I2S6_8CH_TX_SRC 566
+#define CLK_I2S6_8CH_TX_FRAC 567
+#define CLK_I2S6_8CH_TX 568
+#define MCLK_I2S6_8CH_TX 569
+#define CLK_I2S6_8CH_RX_SRC 570
+#define CLK_I2S6_8CH_RX_FRAC 571
+#define CLK_I2S6_8CH_RX 572
+#define MCLK_I2S6_8CH_RX 573
+#define I2S6_8CH_MCLKOUT 574
+#define HCLK_I2S6_8CH 575
+#define HCLK_SPDIF3 576
+#define CLK_SPDIF3_SRC 577
+#define CLK_SPDIF3_FRAC 578
+#define CLK_SPDIF3 579
+#define MCLK_SPDIF3 580
+#define HCLK_SPDIF4 581
+#define CLK_SPDIF4_SRC 582
+#define CLK_SPDIF4_FRAC 583
+#define CLK_SPDIF4 584
+#define MCLK_SPDIF4 585
+#define HCLK_SPDIFRX0 586
+#define MCLK_SPDIFRX0 587
+#define HCLK_SPDIFRX1 588
+#define MCLK_SPDIFRX1 589
+#define HCLK_SPDIFRX2 590
+#define MCLK_SPDIFRX2 591
+#define ACLK_VO1USB_TOP_ROOT 592
+#define HCLK_VO1USB_TOP_ROOT 593
+#define CLK_HDMIHDP0 594
+#define CLK_HDMIHDP1 595
+#define PCLK_HDPTX0 596
+#define PCLK_HDPTX1 597
+#define PCLK_USBDPPHY0 598
+#define PCLK_USBDPPHY1 599
+#define ACLK_VOP_ROOT 600
+#define ACLK_VOP_LOW_ROOT 601
+#define HCLK_VOP_ROOT 602
+#define PCLK_VOP_ROOT 603
+#define HCLK_VOP 604
+#define ACLK_VOP 605
+#define DCLK_VOP0_SRC 606
+#define DCLK_VOP1_SRC 607
+#define DCLK_VOP2_SRC 608
+#define DCLK_VOP0 609
+#define DCLK_VOP1 610
+#define DCLK_VOP2 611
+#define DCLK_VOP3 612
+#define PCLK_DSIHOST0 613
+#define PCLK_DSIHOST1 614
+#define CLK_DSIHOST0 615
+#define CLK_DSIHOST1 616
+#define CLK_VOP_PMU 617
+#define ACLK_VOP_DOBY 618
+#define ACLK_VOP_SUB_SRC 619
+#define CLK_USBDP_PHY0_IMMORTAL 620
+#define CLK_USBDP_PHY1_IMMORTAL 621
+#define CLK_PMU0 622
+#define PCLK_PMU0 623
+#define PCLK_PMU0IOC 624
+#define PCLK_GPIO0 625
+#define DBCLK_GPIO0 626
+#define PCLK_I2C0 627
+#define CLK_I2C0 628
+#define HCLK_I2S1_8CH 629
+#define CLK_I2S1_8CH_TX_SRC 630
+#define CLK_I2S1_8CH_TX_FRAC 631
+#define CLK_I2S1_8CH_TX 632
+#define MCLK_I2S1_8CH_TX 633
+#define CLK_I2S1_8CH_RX_SRC 634
+#define CLK_I2S1_8CH_RX_FRAC 635
+#define CLK_I2S1_8CH_RX 636
+#define MCLK_I2S1_8CH_RX 637
+#define I2S1_8CH_MCLKOUT 638
+#define CLK_PMU1_50M_SRC 639
+#define CLK_PMU1_100M_SRC 640
+#define CLK_PMU1_200M_SRC 641
+#define CLK_PMU1_300M_SRC 642
+#define CLK_PMU1_400M_SRC 643
+#define HCLK_PMU1_ROOT 644
+#define PCLK_PMU1_ROOT 645
+#define PCLK_PMU0_ROOT 646
+#define HCLK_PMU_CM0_ROOT 647
+#define PCLK_PMU1 648
+#define CLK_DDR_FAIL_SAFE 649
+#define CLK_PMU1 650
+#define HCLK_PDM0 651
+#define MCLK_PDM0 652
+#define HCLK_VAD 653
+#define FCLK_PMU_CM0_CORE 654
+#define CLK_PMU_CM0_RTC 655
+#define PCLK_PMU1_IOC 656
+#define PCLK_PMU1PWM 657
+#define CLK_PMU1PWM 658
+#define CLK_PMU1PWM_CAPTURE 659
+#define PCLK_PMU1TIMER 660
+#define CLK_PMU1TIMER_ROOT 661
+#define CLK_PMU1TIMER0 662
+#define CLK_PMU1TIMER1 663
+#define CLK_UART0_SRC 664
+#define CLK_UART0_FRAC 665
+#define CLK_UART0 666
+#define SCLK_UART0 667
+#define PCLK_UART0 668
+#define PCLK_PMU1WDT 669
+#define TCLK_PMU1WDT 670
+#define CLK_CR_PARA 671
+#define CLK_USB2PHY_HDPTXRXPHY_REF 672
+#define CLK_USBDPPHY_MIPIDCPPHY_REF 673
+#define CLK_REF_PIPE_PHY0_OSC_SRC 674
+#define CLK_REF_PIPE_PHY1_OSC_SRC 675
+#define CLK_REF_PIPE_PHY2_OSC_SRC 676
+#define CLK_REF_PIPE_PHY0_PLL_SRC 677
+#define CLK_REF_PIPE_PHY1_PLL_SRC 678
+#define CLK_REF_PIPE_PHY2_PLL_SRC 679
+#define CLK_REF_PIPE_PHY0 680
+#define CLK_REF_PIPE_PHY1 681
+#define CLK_REF_PIPE_PHY2 682
+#define SCLK_SDIO_DRV 683
+#define SCLK_SDIO_SAMPLE 684
+#define SCLK_SDMMC_DRV 685
+#define SCLK_SDMMC_SAMPLE 686
+#define CLK_PCIE1L0_PIPE 687
+#define CLK_PCIE1L1_PIPE 688
+#define CLK_BIGCORE0_PVTM 689
+#define CLK_CORE_BIGCORE0_PVTM 690
+#define CLK_BIGCORE1_PVTM 691
+#define CLK_CORE_BIGCORE1_PVTM 692
+#define CLK_LITCORE_PVTM 693
+#define CLK_CORE_LITCORE_PVTM 694
+#define CLK_AUX16M_0 695
+#define CLK_AUX16M_1 696
+#define CLK_PHY0_REF_ALT_P 697
+#define CLK_PHY0_REF_ALT_M 698
+#define CLK_PHY1_REF_ALT_P 699
+#define CLK_PHY1_REF_ALT_M 700
+#define ACLK_ISP1_PRE 701
+#define HCLK_ISP1_PRE 702
+#define HCLK_NVM 703
+#define ACLK_USB 704
+#define HCLK_USB 705
+#define ACLK_JPEG_DECODER_PRE 706
+#define ACLK_VDPU_LOW_PRE 707
+#define ACLK_RKVENC1_PRE 708
+#define HCLK_RKVENC1_PRE 709
+#define HCLK_RKVDEC0_PRE 710
+#define ACLK_RKVDEC0_PRE 711
+#define HCLK_RKVDEC1_PRE 712
+#define ACLK_RKVDEC1_PRE 713
+#define ACLK_HDCP0_PRE 714
+#define HCLK_VO0 715
+#define ACLK_HDCP1_PRE 716
+#define HCLK_VO1 717
+#define ACLK_AV1_PRE 718
+#define PCLK_AV1_PRE 719
+#define HCLK_SDIO_PRE 720
+#define PCLK_VO1GRF 721
+
+/* scmi-clocks indices */
+
+#define SCMI_CLK_CPUL 0
+#define SCMI_CLK_DSU 1
+#define SCMI_CLK_CPUB01 2
+#define SCMI_CLK_CPUB23 3
+#define SCMI_CLK_DDR 4
+#define SCMI_CLK_GPU 5
+#define SCMI_CLK_NPU 6
+#define SCMI_CLK_SBUS 7
+#define SCMI_PCLK_SBUS 8
+#define SCMI_CCLK_SD 9
+#define SCMI_DCLK_SD 10
+#define SCMI_ACLK_SECURE_NS 11
+#define SCMI_HCLK_SECURE_NS 12
+#define SCMI_TCLK_WDT 13
+#define SCMI_KEYLADDER_CORE 14
+#define SCMI_KEYLADDER_RNG 15
+#define SCMI_ACLK_SECURE_S 16
+#define SCMI_HCLK_SECURE_S 17
+#define SCMI_PCLK_SECURE_S 18
+#define SCMI_CRYPTO_RNG 19
+#define SCMI_CRYPTO_CORE 20
+#define SCMI_CRYPTO_PKA 21
+#define SCMI_SPLL 22
+#define SCMI_HCLK_SD 23
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rv1126-cru.h b/include/dt-bindings/clock/rockchip,rv1126-cru.h
new file mode 100644
index 000000000000..e89a3a5a4a34
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rv1126-cru.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2019 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1126_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RV1126_H
+
+/* pmucru-clocks indices */
+
+/* pll clocks */
+#define PLL_GPLL 1
+
+/* sclk (special clocks) */
+#define CLK_OSC0_DIV32K 2
+#define CLK_RTC32K 3
+#define CLK_WIFI_DIV 4
+#define CLK_WIFI_OSC0 5
+#define CLK_WIFI 6
+#define CLK_PMU 7
+#define SCLK_UART1_DIV 8
+#define SCLK_UART1_FRACDIV 9
+#define SCLK_UART1_MUX 10
+#define SCLK_UART1 11
+#define CLK_I2C0 12
+#define CLK_I2C2 13
+#define CLK_CAPTURE_PWM0 14
+#define CLK_PWM0 15
+#define CLK_CAPTURE_PWM1 16
+#define CLK_PWM1 17
+#define CLK_SPI0 18
+#define DBCLK_GPIO0 19
+#define CLK_PMUPVTM 20
+#define CLK_CORE_PMUPVTM 21
+#define CLK_REF12M 22
+#define CLK_USBPHY_OTG_REF 23
+#define CLK_USBPHY_HOST_REF 24
+#define CLK_REF24M 25
+#define CLK_MIPIDSIPHY_REF 26
+
+/* pclk */
+#define PCLK_PDPMU 30
+#define PCLK_PMU 31
+#define PCLK_UART1 32
+#define PCLK_I2C0 33
+#define PCLK_I2C2 34
+#define PCLK_PWM0 35
+#define PCLK_PWM1 36
+#define PCLK_SPI0 37
+#define PCLK_GPIO0 38
+#define PCLK_PMUSGRF 39
+#define PCLK_PMUGRF 40
+#define PCLK_PMUCRU 41
+#define PCLK_CHIPVEROTP 42
+#define PCLK_PDPMU_NIU 43
+#define PCLK_PMUPVTM 44
+#define PCLK_SCRKEYGEN 45
+
+#define CLKPMU_NR_CLKS (PCLK_SCRKEYGEN + 1)
+
+/* cru-clocks indices */
+
+/* pll clocks */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_HPLL 4
+
+/* sclk (special clocks) */
+#define ARMCLK 5
+#define USB480M 6
+#define CLK_CORE_CPUPVTM 7
+#define CLK_CPUPVTM 8
+#define CLK_SCR1 9
+#define CLK_SCR1_CORE 10
+#define CLK_SCR1_RTC 11
+#define CLK_SCR1_JTAG 12
+#define SCLK_UART0_DIV 13
+#define SCLK_UART0_FRAC 14
+#define SCLK_UART0_MUX 15
+#define SCLK_UART0 16
+#define SCLK_UART2_DIV 17
+#define SCLK_UART2_FRAC 18
+#define SCLK_UART2_MUX 19
+#define SCLK_UART2 20
+#define SCLK_UART3_DIV 21
+#define SCLK_UART3_FRAC 22
+#define SCLK_UART3_MUX 23
+#define SCLK_UART3 24
+#define SCLK_UART4_DIV 25
+#define SCLK_UART4_FRAC 26
+#define SCLK_UART4_MUX 27
+#define SCLK_UART4 28
+#define SCLK_UART5_DIV 29
+#define SCLK_UART5_FRAC 30
+#define SCLK_UART5_MUX 31
+#define SCLK_UART5 32
+#define CLK_I2C1 33
+#define CLK_I2C3 34
+#define CLK_I2C4 35
+#define CLK_I2C5 36
+#define CLK_SPI1 37
+#define CLK_CAPTURE_PWM2 38
+#define CLK_PWM2 39
+#define DBCLK_GPIO1 40
+#define DBCLK_GPIO2 41
+#define DBCLK_GPIO3 42
+#define DBCLK_GPIO4 43
+#define CLK_SARADC 44
+#define CLK_TIMER0 45
+#define CLK_TIMER1 46
+#define CLK_TIMER2 47
+#define CLK_TIMER3 48
+#define CLK_TIMER4 49
+#define CLK_TIMER5 50
+#define CLK_CAN 51
+#define CLK_NPU_TSADC 52
+#define CLK_NPU_TSADCPHY 53
+#define CLK_CPU_TSADC 54
+#define CLK_CPU_TSADCPHY 55
+#define CLK_CRYPTO_CORE 56
+#define CLK_CRYPTO_PKA 57
+#define MCLK_I2S0_TX_DIV 58
+#define MCLK_I2S0_TX_FRACDIV 59
+#define MCLK_I2S0_TX_MUX 60
+#define MCLK_I2S0_TX 61
+#define MCLK_I2S0_RX_DIV 62
+#define MCLK_I2S0_RX_FRACDIV 63
+#define MCLK_I2S0_RX_MUX 64
+#define MCLK_I2S0_RX 65
+#define MCLK_I2S0_TX_OUT2IO 66
+#define MCLK_I2S0_RX_OUT2IO 67
+#define MCLK_I2S1_DIV 68
+#define MCLK_I2S1_FRACDIV 69
+#define MCLK_I2S1_MUX 70
+#define MCLK_I2S1 71
+#define MCLK_I2S1_OUT2IO 72
+#define MCLK_I2S2_DIV 73
+#define MCLK_I2S2_FRACDIV 74
+#define MCLK_I2S2_MUX 75
+#define MCLK_I2S2 76
+#define MCLK_I2S2_OUT2IO 77
+#define MCLK_PDM 78
+#define SCLK_ADUPWM_DIV 79
+#define SCLK_AUDPWM_FRACDIV 80
+#define SCLK_AUDPWM_MUX 81
+#define SCLK_AUDPWM 82
+#define CLK_ACDCDIG_ADC 83
+#define CLK_ACDCDIG_DAC 84
+#define CLK_ACDCDIG_I2C 85
+#define CLK_VENC_CORE 86
+#define CLK_VDEC_CORE 87
+#define CLK_VDEC_CA 88
+#define CLK_VDEC_HEVC_CA 89
+#define CLK_RGA_CORE 90
+#define CLK_IEP_CORE 91
+#define CLK_ISP_DIV 92
+#define CLK_ISP_NP5 93
+#define CLK_ISP_NUX 94
+#define CLK_ISP 95
+#define CLK_CIF_OUT_DIV 96
+#define CLK_CIF_OUT_FRACDIV 97
+#define CLK_CIF_OUT_MUX 98
+#define CLK_CIF_OUT 99
+#define CLK_MIPICSI_OUT_DIV 100
+#define CLK_MIPICSI_OUT_FRACDIV 101
+#define CLK_MIPICSI_OUT_MUX 102
+#define CLK_MIPICSI_OUT 103
+#define CLK_ISPP_DIV 104
+#define CLK_ISPP_NP5 105
+#define CLK_ISPP_NUX 106
+#define CLK_ISPP 107
+#define CLK_SDMMC 108
+#define SCLK_SDMMC_DRV 109
+#define SCLK_SDMMC_SAMPLE 110
+#define CLK_SDIO 111
+#define SCLK_SDIO_DRV 112
+#define SCLK_SDIO_SAMPLE 113
+#define CLK_EMMC 114
+#define SCLK_EMMC_DRV 115
+#define SCLK_EMMC_SAMPLE 116
+#define CLK_NANDC 117
+#define SCLK_SFC 118
+#define CLK_USBHOST_UTMI_OHCI 119
+#define CLK_USBOTG_REF 120
+#define CLK_GMAC_DIV 121
+#define CLK_GMAC_RGMII_M0 122
+#define CLK_GMAC_SRC_M0 123
+#define CLK_GMAC_RGMII_M1 124
+#define CLK_GMAC_SRC_M1 125
+#define CLK_GMAC_SRC 126
+#define CLK_GMAC_REF 127
+#define CLK_GMAC_TX_SRC 128
+#define CLK_GMAC_TX_DIV5 129
+#define CLK_GMAC_TX_DIV50 130
+#define RGMII_MODE_CLK 131
+#define CLK_GMAC_RX_SRC 132
+#define CLK_GMAC_RX_DIV2 133
+#define CLK_GMAC_RX_DIV20 134
+#define RMII_MODE_CLK 135
+#define CLK_GMAC_TX_RX 136
+#define CLK_GMAC_PTPREF 137
+#define CLK_GMAC_ETHERNET_OUT 138
+#define CLK_DDRPHY 139
+#define CLK_DDR_MON 140
+#define TMCLK_DDR_MON 141
+#define CLK_NPU_DIV 142
+#define CLK_NPU_NP5 143
+#define CLK_CORE_NPU 144
+#define CLK_CORE_NPUPVTM 145
+#define CLK_NPUPVTM 146
+#define SCLK_DDRCLK 147
+#define CLK_OTP 148
+
+/* dclk */
+#define DCLK_DECOM 150
+#define DCLK_VOP_DIV 151
+#define DCLK_VOP_FRACDIV 152
+#define DCLK_VOP_MUX 153
+#define DCLK_VOP 154
+#define DCLK_CIF 155
+#define DCLK_CIFLITE 156
+
+/* aclk */
+#define ACLK_PDBUS 160
+#define ACLK_DMAC 161
+#define ACLK_DCF 162
+#define ACLK_SPINLOCK 163
+#define ACLK_DECOM 164
+#define ACLK_PDCRYPTO 165
+#define ACLK_CRYPTO 166
+#define ACLK_PDVEPU 167
+#define ACLK_VENC 168
+#define ACLK_PDVDEC 169
+#define ACLK_PDJPEG 170
+#define ACLK_VDEC 171
+#define ACLK_JPEG 172
+#define ACLK_PDVO 173
+#define ACLK_RGA 174
+#define ACLK_VOP 175
+#define ACLK_IEP 176
+#define ACLK_PDVI_DIV 177
+#define ACLK_PDVI_NP5 178
+#define ACLK_PDVI 179
+#define ACLK_ISP 180
+#define ACLK_CIF 181
+#define ACLK_CIFLITE 182
+#define ACLK_PDISPP_DIV 183
+#define ACLK_PDISPP_NP5 184
+#define ACLK_PDISPP 185
+#define ACLK_ISPP 186
+#define ACLK_PDPHP 187
+#define ACLK_PDUSB 188
+#define ACLK_USBOTG 189
+#define ACLK_PDGMAC 190
+#define ACLK_GMAC 191
+#define ACLK_PDNPU_DIV 192
+#define ACLK_PDNPU_NP5 193
+#define ACLK_PDNPU 194
+#define ACLK_NPU 195
+
+/* hclk */
+#define HCLK_PDCORE_NIU 200
+#define HCLK_PDUSB 201
+#define HCLK_PDCRYPTO 202
+#define HCLK_CRYPTO 203
+#define HCLK_PDAUDIO 204
+#define HCLK_I2S0 205
+#define HCLK_I2S1 206
+#define HCLK_I2S2 207
+#define HCLK_PDM 208
+#define HCLK_AUDPWM 209
+#define HCLK_PDVEPU 210
+#define HCLK_VENC 211
+#define HCLK_PDVDEC 212
+#define HCLK_PDJPEG 213
+#define HCLK_VDEC 214
+#define HCLK_JPEG 215
+#define HCLK_PDVO 216
+#define HCLK_RGA 217
+#define HCLK_VOP 218
+#define HCLK_IEP 219
+#define HCLK_PDVI 220
+#define HCLK_ISP 221
+#define HCLK_CIF 222
+#define HCLK_CIFLITE 223
+#define HCLK_PDISPP 224
+#define HCLK_ISPP 225
+#define HCLK_PDPHP 226
+#define HCLK_PDSDMMC 227
+#define HCLK_SDMMC 228
+#define HCLK_PDSDIO 229
+#define HCLK_SDIO 230
+#define HCLK_PDNVM 231
+#define HCLK_EMMC 232
+#define HCLK_NANDC 233
+#define HCLK_SFC 234
+#define HCLK_SFCXIP 235
+#define HCLK_PDBUS 236
+#define HCLK_USBHOST 237
+#define HCLK_USBHOST_ARB 238
+#define HCLK_PDNPU 239
+#define HCLK_NPU 240
+
+/* pclk */
+#define PCLK_CPUPVTM 245
+#define PCLK_PDBUS 246
+#define PCLK_DCF 247
+#define PCLK_WDT 248
+#define PCLK_MAILBOX 249
+#define PCLK_UART0 250
+#define PCLK_UART2 251
+#define PCLK_UART3 252
+#define PCLK_UART4 253
+#define PCLK_UART5 254
+#define PCLK_I2C1 255
+#define PCLK_I2C3 256
+#define PCLK_I2C4 257
+#define PCLK_I2C5 258
+#define PCLK_SPI1 259
+#define PCLK_PWM2 261
+#define PCLK_GPIO1 262
+#define PCLK_GPIO2 263
+#define PCLK_GPIO3 264
+#define PCLK_GPIO4 265
+#define PCLK_SARADC 266
+#define PCLK_TIMER 267
+#define PCLK_DECOM 268
+#define PCLK_CAN 269
+#define PCLK_NPU_TSADC 270
+#define PCLK_CPU_TSADC 271
+#define PCLK_ACDCDIG 272
+#define PCLK_PDVO 273
+#define PCLK_DSIHOST 274
+#define PCLK_PDVI 275
+#define PCLK_CSIHOST 276
+#define PCLK_PDGMAC 277
+#define PCLK_GMAC 278
+#define PCLK_PDDDR 279
+#define PCLK_DDR_MON 280
+#define PCLK_PDNPU 281
+#define PCLK_NPUPVTM 282
+#define PCLK_PDTOP 283
+#define PCLK_TOPCRU 284
+#define PCLK_TOPGRF 285
+#define PCLK_CPUEMADET 286
+#define PCLK_DDRPHY 287
+#define PCLK_DSIPHY 289
+#define PCLK_CSIPHY0 290
+#define PCLK_CSIPHY1 291
+#define PCLK_USBPHY_HOST 292
+#define PCLK_USBPHY_OTG 293
+#define PCLK_OTP 294
+
+#define CLK_NR_CLKS (PCLK_OTP + 1)
+
+/* pmu soft-reset indices */
+
+/* pmu_cru_softrst_con0 */
+#define SRST_PDPMU_NIU_P 0
+#define SRST_PMU_SGRF_P 1
+#define SRST_PMU_SGRF_REMAP_P 2
+#define SRST_I2C0_P 3
+#define SRST_I2C0 4
+#define SRST_I2C2_P 7
+#define SRST_I2C2 8
+#define SRST_UART1_P 9
+#define SRST_UART1 10
+#define SRST_PWM0_P 11
+#define SRST_PWM0 12
+#define SRST_PWM1_P 13
+#define SRST_PWM1 14
+#define SRST_DDR_FAIL_SAFE 15
+
+/* pmu_cru_softrst_con1 */
+#define SRST_GPIO0_P 17
+#define SRST_GPIO0_DB 18
+#define SRST_SPI0_P 19
+#define SRST_SPI0 20
+#define SRST_PMUGRF_P 21
+#define SRST_CHIPVEROTP_P 22
+#define SRST_PMUPVTM 24
+#define SRST_PMUPVTM_P 25
+#define SRST_PMUCRU_P 30
+
+/* soft-reset indices */
+
+/* cru_softrst_con0 */
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_CORE0_DBG 8
+#define SRST_CORE1_DBG 9
+#define SRST_CORE2_DBG 10
+#define SRST_CORE3_DBG 11
+#define SRST_NL2 12
+#define SRST_CORE_NIU_A 13
+#define SRST_DBG_DAPLITE_P 14
+#define SRST_DAPLITE_P 15
+
+/* cru_softrst_con1 */
+#define SRST_PDBUS_NIU1_A 16
+#define SRST_PDBUS_NIU1_H 17
+#define SRST_PDBUS_NIU1_P 18
+#define SRST_PDBUS_NIU2_A 19
+#define SRST_PDBUS_NIU2_H 20
+#define SRST_PDBUS_NIU3_A 21
+#define SRST_PDBUS_NIU3_H 22
+#define SRST_PDBUS_HOLD_NIU1_A 23
+#define SRST_DBG_NIU_P 24
+#define SRST_PDCORE_NIIU_H 25
+#define SRST_MUC_NIU 26
+#define SRST_DCF_A 29
+#define SRST_DCF_P 30
+#define SRST_SYSTEM_SRAM_A 31
+
+/* cru_softrst_con2 */
+#define SRST_I2C1_P 32
+#define SRST_I2C1 33
+#define SRST_I2C3_P 34
+#define SRST_I2C3 35
+#define SRST_I2C4_P 36
+#define SRST_I2C4 37
+#define SRST_I2C5_P 38
+#define SRST_I2C5 39
+#define SRST_SPI1_P 40
+#define SRST_SPI1 41
+#define SRST_MCU_CORE 42
+#define SRST_PWM2_P 44
+#define SRST_PWM2 45
+#define SRST_SPINLOCK_A 46
+
+/* cru_softrst_con3 */
+#define SRST_UART0_P 48
+#define SRST_UART0 49
+#define SRST_UART2_P 50
+#define SRST_UART2 51
+#define SRST_UART3_P 52
+#define SRST_UART3 53
+#define SRST_UART4_P 54
+#define SRST_UART4 55
+#define SRST_UART5_P 56
+#define SRST_UART5 57
+#define SRST_WDT_P 58
+#define SRST_SARADC_P 59
+#define SRST_GRF_P 61
+#define SRST_TIMER_P 62
+#define SRST_MAILBOX_P 63
+
+/* cru_softrst_con4 */
+#define SRST_TIMER0 64
+#define SRST_TIMER1 65
+#define SRST_TIMER2 66
+#define SRST_TIMER3 67
+#define SRST_TIMER4 68
+#define SRST_TIMER5 69
+#define SRST_INTMUX_P 70
+#define SRST_GPIO1_P 72
+#define SRST_GPIO1_DB 73
+#define SRST_GPIO2_P 74
+#define SRST_GPIO2_DB 75
+#define SRST_GPIO3_P 76
+#define SRST_GPIO3_DB 77
+#define SRST_GPIO4_P 78
+#define SRST_GPIO4_DB 79
+
+/* cru_softrst_con5 */
+#define SRST_CAN_P 80
+#define SRST_CAN 81
+#define SRST_DECOM_A 85
+#define SRST_DECOM_P 86
+#define SRST_DECOM_D 87
+#define SRST_PDCRYPTO_NIU_A 88
+#define SRST_PDCRYPTO_NIU_H 89
+#define SRST_CRYPTO_A 90
+#define SRST_CRYPTO_H 91
+#define SRST_CRYPTO_CORE 92
+#define SRST_CRYPTO_PKA 93
+#define SRST_SGRF_P 95
+
+/* cru_softrst_con6 */
+#define SRST_PDAUDIO_NIU_H 96
+#define SRST_PDAUDIO_NIU_P 97
+#define SRST_I2S0_H 98
+#define SRST_I2S0_TX_M 99
+#define SRST_I2S0_RX_M 100
+#define SRST_I2S1_H 101
+#define SRST_I2S1_M 102
+#define SRST_I2S2_H 103
+#define SRST_I2S2_M 104
+#define SRST_PDM_H 105
+#define SRST_PDM_M 106
+#define SRST_AUDPWM_H 107
+#define SRST_AUDPWM 108
+#define SRST_ACDCDIG_P 109
+#define SRST_ACDCDIG 110
+
+/* cru_softrst_con7 */
+#define SRST_PDVEPU_NIU_A 112
+#define SRST_PDVEPU_NIU_H 113
+#define SRST_VENC_A 114
+#define SRST_VENC_H 115
+#define SRST_VENC_CORE 116
+#define SRST_PDVDEC_NIU_A 117
+#define SRST_PDVDEC_NIU_H 118
+#define SRST_VDEC_A 119
+#define SRST_VDEC_H 120
+#define SRST_VDEC_CORE 121
+#define SRST_VDEC_CA 122
+#define SRST_VDEC_HEVC_CA 123
+#define SRST_PDJPEG_NIU_A 124
+#define SRST_PDJPEG_NIU_H 125
+#define SRST_JPEG_A 126
+#define SRST_JPEG_H 127
+
+/* cru_softrst_con8 */
+#define SRST_PDVO_NIU_A 128
+#define SRST_PDVO_NIU_H 129
+#define SRST_PDVO_NIU_P 130
+#define SRST_RGA_A 131
+#define SRST_RGA_H 132
+#define SRST_RGA_CORE 133
+#define SRST_VOP_A 134
+#define SRST_VOP_H 135
+#define SRST_VOP_D 136
+#define SRST_TXBYTEHS_DSIHOST 137
+#define SRST_DSIHOST_P 138
+#define SRST_IEP_A 139
+#define SRST_IEP_H 140
+#define SRST_IEP_CORE 141
+#define SRST_ISP_RX_P 142
+
+/* cru_softrst_con9 */
+#define SRST_PDVI_NIU_A 144
+#define SRST_PDVI_NIU_H 145
+#define SRST_PDVI_NIU_P 146
+#define SRST_ISP 147
+#define SRST_CIF_A 148
+#define SRST_CIF_H 149
+#define SRST_CIF_D 150
+#define SRST_CIF_P 151
+#define SRST_CIF_I 152
+#define SRST_CIF_RX_P 153
+#define SRST_PDISPP_NIU_A 154
+#define SRST_PDISPP_NIU_H 155
+#define SRST_ISPP_A 156
+#define SRST_ISPP_H 157
+#define SRST_ISPP 158
+#define SRST_CSIHOST_P 159
+
+/* cru_softrst_con10 */
+#define SRST_PDPHPMID_NIU_A 160
+#define SRST_PDPHPMID_NIU_H 161
+#define SRST_PDNVM_NIU_H 163
+#define SRST_SDMMC_H 164
+#define SRST_SDIO_H 165
+#define SRST_EMMC_H 166
+#define SRST_SFC_H 167
+#define SRST_SFCXIP_H 168
+#define SRST_SFC 169
+#define SRST_NANDC_H 170
+#define SRST_NANDC 171
+#define SRST_PDSDMMC_H 173
+#define SRST_PDSDIO_H 174
+
+/* cru_softrst_con11 */
+#define SRST_PDUSB_NIU_A 176
+#define SRST_PDUSB_NIU_H 177
+#define SRST_USBHOST_H 178
+#define SRST_USBHOST_ARB_H 179
+#define SRST_USBHOST_UTMI 180
+#define SRST_USBOTG_A 181
+#define SRST_USBPHY_OTG_P 182
+#define SRST_USBPHY_HOST_P 183
+#define SRST_USBPHYPOR_OTG 184
+#define SRST_USBPHYPOR_HOST 185
+#define SRST_PDGMAC_NIU_A 188
+#define SRST_PDGMAC_NIU_P 189
+#define SRST_GMAC_A 190
+
+/* cru_softrst_con12 */
+#define SRST_DDR_DFICTL_P 193
+#define SRST_DDR_MON_P 194
+#define SRST_DDR_STANDBY_P 195
+#define SRST_DDR_GRF_P 196
+#define SRST_DDR_MSCH_P 197
+#define SRST_DDR_SPLIT_A 198
+#define SRST_DDR_MSCH 199
+#define SRST_DDR_DFICTL 202
+#define SRST_DDR_STANDBY 203
+#define SRST_NPUMCU_NIU 205
+#define SRST_DDRPHY_P 206
+#define SRST_DDRPHY 207
+
+/* cru_softrst_con13 */
+#define SRST_PDNPU_NIU_A 208
+#define SRST_PDNPU_NIU_H 209
+#define SRST_PDNPU_NIU_P 210
+#define SRST_NPU_A 211
+#define SRST_NPU_H 212
+#define SRST_NPU 213
+#define SRST_NPUPVTM_P 214
+#define SRST_NPUPVTM 215
+#define SRST_NPU_TSADC_P 216
+#define SRST_NPU_TSADC 217
+#define SRST_NPU_TSADCPHY 218
+#define SRST_CIFLITE_A 220
+#define SRST_CIFLITE_H 221
+#define SRST_CIFLITE_D 222
+#define SRST_CIFLITE_RX_P 223
+
+/* cru_softrst_con14 */
+#define SRST_TOPNIU_P 224
+#define SRST_TOPCRU_P 225
+#define SRST_TOPGRF_P 226
+#define SRST_CPUEMADET_P 227
+#define SRST_CSIPHY0_P 228
+#define SRST_CSIPHY1_P 229
+#define SRST_DSIPHY_P 230
+#define SRST_CPU_TSADC_P 232
+#define SRST_CPU_TSADC 233
+#define SRST_CPU_TSADCPHY 234
+#define SRST_CPUPVTM_P 235
+#define SRST_CPUPVTM 236
+
+#endif
diff --git a/include/dt-bindings/clock/s3c2410.h b/include/dt-bindings/clock/s3c2410.h
deleted file mode 100644
index 0fb65c3f2f59..000000000000
--- a/include/dt-bindings/clock/s3c2410.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Device Tree binding constants clock controllers of Samsung S3C2410 and later.
- */
-
-#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2410_CLOCK_H
-#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2410_CLOCK_H
-
-/*
- * Let each exported clock get a unique index, which is used on DT-enabled
- * platforms to lookup the clock from a clock specifier. These indices are
- * therefore considered an ABI and so must not be changed. This implies
- * that new clocks should be added either in free spaces between clock groups
- * or at the end.
- */
-
-/* Core clocks. */
-
-/* id 1 is reserved */
-#define MPLL 2
-#define UPLL 3
-#define FCLK 4
-#define HCLK 5
-#define PCLK 6
-#define UCLK 7
-#define ARMCLK 8
-
-/* pclk-gates */
-#define PCLK_UART0 16
-#define PCLK_UART1 17
-#define PCLK_UART2 18
-#define PCLK_I2C 19
-#define PCLK_SDI 20
-#define PCLK_SPI 21
-#define PCLK_ADC 22
-#define PCLK_AC97 23
-#define PCLK_I2S 24
-#define PCLK_PWM 25
-#define PCLK_RTC 26
-#define PCLK_GPIO 27
-
-
-/* hclk-gates */
-#define HCLK_LCD 32
-#define HCLK_USBH 33
-#define HCLK_USBD 34
-#define HCLK_NAND 35
-#define HCLK_CAM 36
-
-
-#define CAMIF 40
-
-
-/* Total number of clocks. */
-#define NR_CLKS (CAMIF + 1)
-
-#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H */
diff --git a/include/dt-bindings/clock/s3c2412.h b/include/dt-bindings/clock/s3c2412.h
deleted file mode 100644
index b4656156cc0f..000000000000
--- a/include/dt-bindings/clock/s3c2412.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Device Tree binding constants clock controllers of Samsung S3C2412.
- */
-
-#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H
-#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H
-
-/*
- * Let each exported clock get a unique index, which is used on DT-enabled
- * platforms to lookup the clock from a clock specifier. These indices are
- * therefore considered an ABI and so must not be changed. This implies
- * that new clocks should be added either in free spaces between clock groups
- * or at the end.
- */
-
-/* Core clocks. */
-
-/* id 1 is reserved */
-#define MPLL 2
-#define UPLL 3
-#define MDIVCLK 4
-#define MSYSCLK 5
-#define USYSCLK 6
-#define HCLK 7
-#define PCLK 8
-#define ARMDIV 9
-#define ARMCLK 10
-
-
-/* Special clocks */
-#define SCLK_CAM 16
-#define SCLK_UART 17
-#define SCLK_I2S 18
-#define SCLK_USBD 19
-#define SCLK_USBH 20
-
-/* pclk-gates */
-#define PCLK_WDT 32
-#define PCLK_SPI 33
-#define PCLK_I2S 34
-#define PCLK_I2C 35
-#define PCLK_ADC 36
-#define PCLK_RTC 37
-#define PCLK_GPIO 38
-#define PCLK_UART2 39
-#define PCLK_UART1 40
-#define PCLK_UART0 41
-#define PCLK_SDI 42
-#define PCLK_PWM 43
-#define PCLK_USBD 44
-
-/* hclk-gates */
-#define HCLK_HALF 48
-#define HCLK_X2 49
-#define HCLK_SDRAM 50
-#define HCLK_USBH 51
-#define HCLK_LCD 52
-#define HCLK_NAND 53
-#define HCLK_DMA3 54
-#define HCLK_DMA2 55
-#define HCLK_DMA1 56
-#define HCLK_DMA0 57
-
-/* Total number of clocks. */
-#define NR_CLKS (HCLK_DMA0 + 1)
-
-#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H */
diff --git a/include/dt-bindings/clock/s3c2443.h b/include/dt-bindings/clock/s3c2443.h
deleted file mode 100644
index a9d2f105d536..000000000000
--- a/include/dt-bindings/clock/s3c2443.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Device Tree binding constants clock controllers of Samsung S3C2443 and later.
- */
-
-#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H
-#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H
-
-/*
- * Let each exported clock get a unique index, which is used on DT-enabled
- * platforms to lookup the clock from a clock specifier. These indices are
- * therefore considered an ABI and so must not be changed. This implies
- * that new clocks should be added either in free spaces between clock groups
- * or at the end.
- */
-
-/* Core clocks. */
-#define MSYSCLK 1
-#define ESYSCLK 2
-#define ARMDIV 3
-#define ARMCLK 4
-#define HCLK 5
-#define PCLK 6
-#define MPLL 7
-#define EPLL 8
-
-/* Special clocks */
-#define SCLK_HSSPI0 16
-#define SCLK_FIMD 17
-#define SCLK_I2S0 18
-#define SCLK_I2S1 19
-#define SCLK_HSMMC1 20
-#define SCLK_HSMMC_EXT 21
-#define SCLK_CAM 22
-#define SCLK_UART 23
-#define SCLK_USBH 24
-
-/* Muxes */
-#define MUX_HSSPI0 32
-#define MUX_HSSPI1 33
-#define MUX_HSMMC0 34
-#define MUX_HSMMC1 35
-
-/* hclk-gates */
-#define HCLK_DMA0 48
-#define HCLK_DMA1 49
-#define HCLK_DMA2 50
-#define HCLK_DMA3 51
-#define HCLK_DMA4 52
-#define HCLK_DMA5 53
-#define HCLK_DMA6 54
-#define HCLK_DMA7 55
-#define HCLK_CAM 56
-#define HCLK_LCD 57
-#define HCLK_USBH 58
-#define HCLK_USBD 59
-#define HCLK_IROM 60
-#define HCLK_HSMMC0 61
-#define HCLK_HSMMC1 62
-#define HCLK_CFC 63
-#define HCLK_SSMC 64
-#define HCLK_DRAM 65
-#define HCLK_2D 66
-
-/* pclk-gates */
-#define PCLK_UART0 72
-#define PCLK_UART1 73
-#define PCLK_UART2 74
-#define PCLK_UART3 75
-#define PCLK_I2C0 76
-#define PCLK_SDI 77
-#define PCLK_SPI0 78
-#define PCLK_ADC 79
-#define PCLK_AC97 80
-#define PCLK_I2S0 81
-#define PCLK_PWM 82
-#define PCLK_WDT 83
-#define PCLK_RTC 84
-#define PCLK_GPIO 85
-#define PCLK_SPI1 86
-#define PCLK_CHIPID 87
-#define PCLK_I2C1 88
-#define PCLK_I2S1 89
-#define PCLK_PCM 90
-
-/* Total number of clocks. */
-#define NR_CLKS (PCLK_PCM + 1)
-
-#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H */
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
new file mode 100644
index 000000000000..3065375c2d8b
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
+ * Author: Chanho Park <chanho61.park@samsung.com>
+ *
+ * Device Tree binding constants for Exynos Auto V9 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H
+#define _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H
+
+/* CMU_TOP */
+#define FOUT_SHARED0_PLL 1
+#define FOUT_SHARED1_PLL 2
+#define FOUT_SHARED2_PLL 3
+#define FOUT_SHARED3_PLL 4
+#define FOUT_SHARED4_PLL 5
+
+/* MUX in CMU_TOP */
+#define MOUT_SHARED0_PLL 6
+#define MOUT_SHARED1_PLL 7
+#define MOUT_SHARED2_PLL 8
+#define MOUT_SHARED3_PLL 9
+#define MOUT_SHARED4_PLL 10
+#define MOUT_CLKCMU_CMU_BOOST 11
+#define MOUT_CLKCMU_CMU_CMUREF 12
+#define MOUT_CLKCMU_ACC_BUS 13
+#define MOUT_CLKCMU_APM_BUS 14
+#define MOUT_CLKCMU_AUD_CPU 15
+#define MOUT_CLKCMU_AUD_BUS 16
+#define MOUT_CLKCMU_BUSC_BUS 17
+#define MOUT_CLKCMU_BUSMC_BUS 19
+#define MOUT_CLKCMU_CORE_BUS 20
+#define MOUT_CLKCMU_CPUCL0_SWITCH 21
+#define MOUT_CLKCMU_CPUCL0_CLUSTER 22
+#define MOUT_CLKCMU_CPUCL1_SWITCH 24
+#define MOUT_CLKCMU_CPUCL1_CLUSTER 25
+#define MOUT_CLKCMU_DPTX_BUS 26
+#define MOUT_CLKCMU_DPTX_DPGTC 27
+#define MOUT_CLKCMU_DPUM_BUS 28
+#define MOUT_CLKCMU_DPUS0_BUS 29
+#define MOUT_CLKCMU_DPUS1_BUS 30
+#define MOUT_CLKCMU_FSYS0_BUS 31
+#define MOUT_CLKCMU_FSYS0_PCIE 32
+#define MOUT_CLKCMU_FSYS1_BUS 33
+#define MOUT_CLKCMU_FSYS1_USBDRD 34
+#define MOUT_CLKCMU_FSYS1_MMC_CARD 35
+#define MOUT_CLKCMU_FSYS2_BUS 36
+#define MOUT_CLKCMU_FSYS2_UFS_EMBD 37
+#define MOUT_CLKCMU_FSYS2_ETHERNET 38
+#define MOUT_CLKCMU_G2D_G2D 39
+#define MOUT_CLKCMU_G2D_MSCL 40
+#define MOUT_CLKCMU_G3D00_SWITCH 41
+#define MOUT_CLKCMU_G3D01_SWITCH 42
+#define MOUT_CLKCMU_G3D1_SWITCH 43
+#define MOUT_CLKCMU_ISPB_BUS 44
+#define MOUT_CLKCMU_MFC_MFC 45
+#define MOUT_CLKCMU_MFC_WFD 46
+#define MOUT_CLKCMU_MIF_SWITCH 47
+#define MOUT_CLKCMU_MIF_BUSP 48
+#define MOUT_CLKCMU_NPU_BUS 49
+#define MOUT_CLKCMU_PERIC0_BUS 50
+#define MOUT_CLKCMU_PERIC0_IP 51
+#define MOUT_CLKCMU_PERIC1_BUS 52
+#define MOUT_CLKCMU_PERIC1_IP 53
+#define MOUT_CLKCMU_PERIS_BUS 54
+
+/* DIV in CMU_TOP */
+#define DOUT_SHARED0_DIV3 101
+#define DOUT_SHARED0_DIV2 102
+#define DOUT_SHARED1_DIV3 103
+#define DOUT_SHARED1_DIV2 104
+#define DOUT_SHARED1_DIV4 105
+#define DOUT_SHARED2_DIV3 106
+#define DOUT_SHARED2_DIV2 107
+#define DOUT_SHARED2_DIV4 108
+#define DOUT_SHARED4_DIV2 109
+#define DOUT_SHARED4_DIV4 110
+#define DOUT_CLKCMU_CMU_BOOST 111
+#define DOUT_CLKCMU_ACC_BUS 112
+#define DOUT_CLKCMU_APM_BUS 113
+#define DOUT_CLKCMU_AUD_CPU 114
+#define DOUT_CLKCMU_AUD_BUS 115
+#define DOUT_CLKCMU_BUSC_BUS 116
+#define DOUT_CLKCMU_BUSMC_BUS 118
+#define DOUT_CLKCMU_CORE_BUS 119
+#define DOUT_CLKCMU_CPUCL0_SWITCH 120
+#define DOUT_CLKCMU_CPUCL0_CLUSTER 121
+#define DOUT_CLKCMU_CPUCL1_SWITCH 123
+#define DOUT_CLKCMU_CPUCL1_CLUSTER 124
+#define DOUT_CLKCMU_DPTX_BUS 125
+#define DOUT_CLKCMU_DPTX_DPGTC 126
+#define DOUT_CLKCMU_DPUM_BUS 127
+#define DOUT_CLKCMU_DPUS0_BUS 128
+#define DOUT_CLKCMU_DPUS1_BUS 129
+#define DOUT_CLKCMU_FSYS0_BUS 130
+#define DOUT_CLKCMU_FSYS0_PCIE 131
+#define DOUT_CLKCMU_FSYS1_BUS 132
+#define DOUT_CLKCMU_FSYS1_USBDRD 133
+#define DOUT_CLKCMU_FSYS2_BUS 134
+#define DOUT_CLKCMU_FSYS2_UFS_EMBD 135
+#define DOUT_CLKCMU_FSYS2_ETHERNET 136
+#define DOUT_CLKCMU_G2D_G2D 137
+#define DOUT_CLKCMU_G2D_MSCL 138
+#define DOUT_CLKCMU_G3D00_SWITCH 139
+#define DOUT_CLKCMU_G3D01_SWITCH 140
+#define DOUT_CLKCMU_G3D1_SWITCH 141
+#define DOUT_CLKCMU_ISPB_BUS 142
+#define DOUT_CLKCMU_MFC_MFC 143
+#define DOUT_CLKCMU_MFC_WFD 144
+#define DOUT_CLKCMU_MIF_SWITCH 145
+#define DOUT_CLKCMU_MIF_BUSP 146
+#define DOUT_CLKCMU_NPU_BUS 147
+#define DOUT_CLKCMU_PERIC0_BUS 148
+#define DOUT_CLKCMU_PERIC0_IP 149
+#define DOUT_CLKCMU_PERIC1_BUS 150
+#define DOUT_CLKCMU_PERIC1_IP 151
+#define DOUT_CLKCMU_PERIS_BUS 152
+
+/* GAT in CMU_TOP */
+#define GOUT_CLKCMU_CMU_BOOST 201
+#define GOUT_CLKCMU_CPUCL0_BOOST 202
+#define GOUT_CLKCMU_CPUCL1_BOOST 203
+#define GOUT_CLKCMU_CORE_BOOST 204
+#define GOUT_CLKCMU_BUSC_BOOST 205
+#define GOUT_CLKCMU_BUSMC_BOOST 206
+#define GOUT_CLKCMU_MIF_BOOST 207
+#define GOUT_CLKCMU_ACC_BUS 208
+#define GOUT_CLKCMU_APM_BUS 209
+#define GOUT_CLKCMU_AUD_CPU 210
+#define GOUT_CLKCMU_AUD_BUS 211
+#define GOUT_CLKCMU_BUSC_BUS 212
+#define GOUT_CLKCMU_BUSMC_BUS 214
+#define GOUT_CLKCMU_CORE_BUS 215
+#define GOUT_CLKCMU_CPUCL0_SWITCH 216
+#define GOUT_CLKCMU_CPUCL0_CLUSTER 217
+#define GOUT_CLKCMU_CPUCL1_SWITCH 219
+#define GOUT_CLKCMU_CPUCL1_CLUSTER 220
+#define GOUT_CLKCMU_DPTX_BUS 221
+#define GOUT_CLKCMU_DPTX_DPGTC 222
+#define GOUT_CLKCMU_DPUM_BUS 223
+#define GOUT_CLKCMU_DPUS0_BUS 224
+#define GOUT_CLKCMU_DPUS1_BUS 225
+#define GOUT_CLKCMU_FSYS0_BUS 226
+#define GOUT_CLKCMU_FSYS0_PCIE 227
+#define GOUT_CLKCMU_FSYS1_BUS 228
+#define GOUT_CLKCMU_FSYS1_USBDRD 229
+#define GOUT_CLKCMU_FSYS1_MMC_CARD 230
+#define GOUT_CLKCMU_FSYS2_BUS 231
+#define GOUT_CLKCMU_FSYS2_UFS_EMBD 232
+#define GOUT_CLKCMU_FSYS2_ETHERNET 233
+#define GOUT_CLKCMU_G2D_G2D 234
+#define GOUT_CLKCMU_G2D_MSCL 235
+#define GOUT_CLKCMU_G3D00_SWITCH 236
+#define GOUT_CLKCMU_G3D01_SWITCH 237
+#define GOUT_CLKCMU_G3D1_SWITCH 238
+#define GOUT_CLKCMU_ISPB_BUS 239
+#define GOUT_CLKCMU_MFC_MFC 240
+#define GOUT_CLKCMU_MFC_WFD 241
+#define GOUT_CLKCMU_MIF_SWITCH 242
+#define GOUT_CLKCMU_MIF_BUSP 243
+#define GOUT_CLKCMU_NPU_BUS 244
+#define GOUT_CLKCMU_PERIC0_BUS 245
+#define GOUT_CLKCMU_PERIC0_IP 246
+#define GOUT_CLKCMU_PERIC1_BUS 247
+#define GOUT_CLKCMU_PERIC1_IP 248
+#define GOUT_CLKCMU_PERIS_BUS 249
+
+/* CMU_BUSMC */
+#define CLK_MOUT_BUSMC_BUS_USER 1
+#define CLK_DOUT_BUSMC_BUSP 2
+#define CLK_GOUT_BUSMC_PDMA0_PCLK 3
+#define CLK_GOUT_BUSMC_SPDMA_PCLK 4
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_DOUT_CORE_BUSP 2
+#define CLK_GOUT_CORE_CCI_CLK 3
+#define CLK_GOUT_CORE_CCI_PCLK 4
+#define CLK_GOUT_CORE_CMU_CORE_PCLK 5
+
+/* CMU_FSYS0 */
+#define CLK_MOUT_FSYS0_BUS_USER 1
+#define CLK_MOUT_FSYS0_PCIE_USER 2
+#define CLK_GOUT_FSYS0_BUS_PCLK 3
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_REFCLK 4
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_REFCLK 5
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_DBI_ACLK 6
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_MSTR_ACLK 7
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_SLV_ACLK 8
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_DBI_ACLK 9
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_MSTR_ACLK 10
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_SLV_ACLK 11
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_PIPE_CLK 12
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_2L0_CLK 13
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_2L0_CLK 14
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_REFCLK 15
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_REFCLK 16
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_DBI_ACLK 17
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_MSTR_ACLK 18
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_SLV_ACLK 19
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_DBI_ACLK 20
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_MSTR_ACLK 21
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_SLV_ACLK 22
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_PIPE_CLK 23
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_2L1_CLK 24
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_2L1_CLK 25
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_REFCLK 26
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_REFCLK 27
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_DBI_ACLK 28
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_MSTR_ACLK 29
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_SLV_ACLK 30
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_DBI_ACLK 31
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_MSTR_ACLK 32
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_SLV_ACLK 33
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_PIPE_CLK 34
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_4L_CLK 35
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK 36
+
+/* CMU_FSYS1 */
+#define FOUT_MMC_PLL 1
+
+#define CLK_MOUT_FSYS1_BUS_USER 2
+#define CLK_MOUT_FSYS1_MMC_PLL 3
+#define CLK_MOUT_FSYS1_MMC_CARD_USER 4
+#define CLK_MOUT_FSYS1_USBDRD_USER 5
+#define CLK_MOUT_FSYS1_MMC_CARD 6
+
+#define CLK_DOUT_FSYS1_MMC_CARD 7
+
+#define CLK_GOUT_FSYS1_PCLK 8
+#define CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN 9
+#define CLK_GOUT_FSYS1_MMC_CARD_ACLK 10
+#define CLK_GOUT_FSYS1_USB20DRD_0_REFCLK 11
+#define CLK_GOUT_FSYS1_USB20DRD_1_REFCLK 12
+#define CLK_GOUT_FSYS1_USB30DRD_0_REFCLK 13
+#define CLK_GOUT_FSYS1_USB30DRD_1_REFCLK 14
+#define CLK_GOUT_FSYS1_USB20_0_ACLK 15
+#define CLK_GOUT_FSYS1_USB20_1_ACLK 16
+#define CLK_GOUT_FSYS1_USB30_0_ACLK 17
+#define CLK_GOUT_FSYS1_USB30_1_ACLK 18
+
+/* CMU_FSYS2 */
+#define CLK_MOUT_FSYS2_BUS_USER 1
+#define CLK_MOUT_FSYS2_UFS_EMBD_USER 2
+#define CLK_MOUT_FSYS2_ETHERNET_USER 3
+#define CLK_GOUT_FSYS2_UFS_EMBD0_ACLK 4
+#define CLK_GOUT_FSYS2_UFS_EMBD0_UNIPRO 5
+#define CLK_GOUT_FSYS2_UFS_EMBD1_ACLK 6
+#define CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO 7
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_IP_USER 2
+#define CLK_MOUT_PERIC0_USI00_USI 3
+#define CLK_MOUT_PERIC0_USI01_USI 4
+#define CLK_MOUT_PERIC0_USI02_USI 5
+#define CLK_MOUT_PERIC0_USI03_USI 6
+#define CLK_MOUT_PERIC0_USI04_USI 7
+#define CLK_MOUT_PERIC0_USI05_USI 8
+#define CLK_MOUT_PERIC0_USI_I2C 9
+
+#define CLK_DOUT_PERIC0_USI00_USI 10
+#define CLK_DOUT_PERIC0_USI01_USI 11
+#define CLK_DOUT_PERIC0_USI02_USI 12
+#define CLK_DOUT_PERIC0_USI03_USI 13
+#define CLK_DOUT_PERIC0_USI04_USI 14
+#define CLK_DOUT_PERIC0_USI05_USI 15
+#define CLK_DOUT_PERIC0_USI_I2C 16
+
+#define CLK_GOUT_PERIC0_IPCLK_0 20
+#define CLK_GOUT_PERIC0_IPCLK_1 21
+#define CLK_GOUT_PERIC0_IPCLK_2 22
+#define CLK_GOUT_PERIC0_IPCLK_3 23
+#define CLK_GOUT_PERIC0_IPCLK_4 24
+#define CLK_GOUT_PERIC0_IPCLK_5 25
+#define CLK_GOUT_PERIC0_IPCLK_6 26
+#define CLK_GOUT_PERIC0_IPCLK_7 27
+#define CLK_GOUT_PERIC0_IPCLK_8 28
+#define CLK_GOUT_PERIC0_IPCLK_9 29
+#define CLK_GOUT_PERIC0_IPCLK_10 30
+#define CLK_GOUT_PERIC0_IPCLK_11 31
+#define CLK_GOUT_PERIC0_PCLK_0 32
+#define CLK_GOUT_PERIC0_PCLK_1 33
+#define CLK_GOUT_PERIC0_PCLK_2 34
+#define CLK_GOUT_PERIC0_PCLK_3 35
+#define CLK_GOUT_PERIC0_PCLK_4 36
+#define CLK_GOUT_PERIC0_PCLK_5 37
+#define CLK_GOUT_PERIC0_PCLK_6 38
+#define CLK_GOUT_PERIC0_PCLK_7 39
+#define CLK_GOUT_PERIC0_PCLK_8 40
+#define CLK_GOUT_PERIC0_PCLK_9 41
+#define CLK_GOUT_PERIC0_PCLK_10 42
+#define CLK_GOUT_PERIC0_PCLK_11 43
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_IP_USER 2
+#define CLK_MOUT_PERIC1_USI06_USI 3
+#define CLK_MOUT_PERIC1_USI07_USI 4
+#define CLK_MOUT_PERIC1_USI08_USI 5
+#define CLK_MOUT_PERIC1_USI09_USI 6
+#define CLK_MOUT_PERIC1_USI10_USI 7
+#define CLK_MOUT_PERIC1_USI11_USI 8
+#define CLK_MOUT_PERIC1_USI_I2C 9
+
+#define CLK_DOUT_PERIC1_USI06_USI 10
+#define CLK_DOUT_PERIC1_USI07_USI 11
+#define CLK_DOUT_PERIC1_USI08_USI 12
+#define CLK_DOUT_PERIC1_USI09_USI 13
+#define CLK_DOUT_PERIC1_USI10_USI 14
+#define CLK_DOUT_PERIC1_USI11_USI 15
+#define CLK_DOUT_PERIC1_USI_I2C 16
+
+#define CLK_GOUT_PERIC1_IPCLK_0 20
+#define CLK_GOUT_PERIC1_IPCLK_1 21
+#define CLK_GOUT_PERIC1_IPCLK_2 22
+#define CLK_GOUT_PERIC1_IPCLK_3 23
+#define CLK_GOUT_PERIC1_IPCLK_4 24
+#define CLK_GOUT_PERIC1_IPCLK_5 25
+#define CLK_GOUT_PERIC1_IPCLK_6 26
+#define CLK_GOUT_PERIC1_IPCLK_7 27
+#define CLK_GOUT_PERIC1_IPCLK_8 28
+#define CLK_GOUT_PERIC1_IPCLK_9 29
+#define CLK_GOUT_PERIC1_IPCLK_10 30
+#define CLK_GOUT_PERIC1_IPCLK_11 31
+#define CLK_GOUT_PERIC1_PCLK_0 32
+#define CLK_GOUT_PERIC1_PCLK_1 33
+#define CLK_GOUT_PERIC1_PCLK_2 34
+#define CLK_GOUT_PERIC1_PCLK_3 35
+#define CLK_GOUT_PERIC1_PCLK_4 36
+#define CLK_GOUT_PERIC1_PCLK_5 37
+#define CLK_GOUT_PERIC1_PCLK_6 38
+#define CLK_GOUT_PERIC1_PCLK_7 39
+#define CLK_GOUT_PERIC1_PCLK_8 40
+#define CLK_GOUT_PERIC1_PCLK_9 41
+#define CLK_GOUT_PERIC1_PCLK_10 42
+#define CLK_GOUT_PERIC1_PCLK_11 43
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_BUS_USER 1
+#define CLK_GOUT_SYSREG_PERIS_PCLK 2
+#define CLK_GOUT_WDT_CLUSTER0 3
+#define CLK_GOUT_WDT_CLUSTER1 4
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H */
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
index 3b21d0522c91..5af372e8385f 100644
--- a/include/dt-bindings/clock/sifive-fu540-prci.h
+++ b/include/dt-bindings/clock/sifive-fu540-prci.h
@@ -10,9 +10,9 @@
/* Clock indexes for use by Device Tree data and the PRCI driver */
-#define PRCI_CLK_COREPLL 0
-#define PRCI_CLK_DDRPLL 1
-#define PRCI_CLK_GEMGXLPLL 2
-#define PRCI_CLK_TLCLK 3
+#define FU540_PRCI_CLK_COREPLL 0
+#define FU540_PRCI_CLK_DDRPLL 1
+#define FU540_PRCI_CLK_GEMGXLPLL 2
+#define FU540_PRCI_CLK_TLCLK 3
#endif
diff --git a/include/dt-bindings/clock/sifive-fu740-prci.h b/include/dt-bindings/clock/sifive-fu740-prci.h
new file mode 100644
index 000000000000..672bdadbf6c0
--- /dev/null
+++ b/include/dt-bindings/clock/sifive-fu740-prci.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ * Zong Li
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H
+#define __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H
+
+/* Clock indexes for use by Device Tree data and the PRCI driver */
+
+#define FU740_PRCI_CLK_COREPLL 0
+#define FU740_PRCI_CLK_DDRPLL 1
+#define FU740_PRCI_CLK_GEMGXLPLL 2
+#define FU740_PRCI_CLK_DVFSCOREPLL 3
+#define FU740_PRCI_CLK_HFPCLKPLL 4
+#define FU740_PRCI_CLK_CLTXPLL 5
+#define FU740_PRCI_CLK_TLCLK 6
+#define FU740_PRCI_CLK_PCLK 7
+#define FU740_PRCI_CLK_PCIE_AUX 8
+
+#endif /* __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H */
diff --git a/include/dt-bindings/clock/sophgo,cv1800.h b/include/dt-bindings/clock/sophgo,cv1800.h
new file mode 100644
index 000000000000..cfbeca25a650
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,cv1800.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Ltd.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_CV1800_CLK_H__
+#define __DT_BINDINGS_SOPHGO_CV1800_CLK_H__
+
+#define CLK_MPLL 0
+#define CLK_TPLL 1
+#define CLK_FPLL 2
+#define CLK_MIPIMPLL 3
+#define CLK_A0PLL 4
+#define CLK_DISPPLL 5
+#define CLK_CAM0PLL 6
+#define CLK_CAM1PLL 7
+
+#define CLK_MIPIMPLL_D3 8
+#define CLK_CAM0PLL_D2 9
+#define CLK_CAM0PLL_D3 10
+
+#define CLK_TPU 11
+#define CLK_TPU_FAB 12
+#define CLK_AHB_ROM 13
+#define CLK_DDR_AXI_REG 14
+#define CLK_RTC_25M 15
+#define CLK_SRC_RTC_SYS_0 16
+#define CLK_TEMPSEN 17
+#define CLK_SARADC 18
+#define CLK_EFUSE 19
+#define CLK_APB_EFUSE 20
+#define CLK_DEBUG 21
+#define CLK_AP_DEBUG 22
+#define CLK_XTAL_MISC 23
+#define CLK_AXI4_EMMC 24
+#define CLK_EMMC 25
+#define CLK_EMMC_100K 26
+#define CLK_AXI4_SD0 27
+#define CLK_SD0 28
+#define CLK_SD0_100K 29
+#define CLK_AXI4_SD1 30
+#define CLK_SD1 31
+#define CLK_SD1_100K 32
+#define CLK_SPI_NAND 33
+#define CLK_ETH0_500M 34
+#define CLK_AXI4_ETH0 35
+#define CLK_ETH1_500M 36
+#define CLK_AXI4_ETH1 37
+#define CLK_APB_GPIO 38
+#define CLK_APB_GPIO_INTR 39
+#define CLK_GPIO_DB 40
+#define CLK_AHB_SF 41
+#define CLK_AHB_SF1 42
+#define CLK_A24M 43
+#define CLK_AUDSRC 44
+#define CLK_APB_AUDSRC 45
+#define CLK_SDMA_AXI 46
+#define CLK_SDMA_AUD0 47
+#define CLK_SDMA_AUD1 48
+#define CLK_SDMA_AUD2 49
+#define CLK_SDMA_AUD3 50
+#define CLK_I2C 51
+#define CLK_APB_I2C 52
+#define CLK_APB_I2C0 53
+#define CLK_APB_I2C1 54
+#define CLK_APB_I2C2 55
+#define CLK_APB_I2C3 56
+#define CLK_APB_I2C4 57
+#define CLK_APB_WDT 58
+#define CLK_PWM_SRC 59
+#define CLK_PWM 60
+#define CLK_SPI 61
+#define CLK_APB_SPI0 62
+#define CLK_APB_SPI1 63
+#define CLK_APB_SPI2 64
+#define CLK_APB_SPI3 65
+#define CLK_1M 66
+#define CLK_CAM0_200 67
+#define CLK_PM 68
+#define CLK_TIMER0 69
+#define CLK_TIMER1 70
+#define CLK_TIMER2 71
+#define CLK_TIMER3 72
+#define CLK_TIMER4 73
+#define CLK_TIMER5 74
+#define CLK_TIMER6 75
+#define CLK_TIMER7 76
+#define CLK_UART0 77
+#define CLK_APB_UART0 78
+#define CLK_UART1 79
+#define CLK_APB_UART1 80
+#define CLK_UART2 81
+#define CLK_APB_UART2 82
+#define CLK_UART3 83
+#define CLK_APB_UART3 84
+#define CLK_UART4 85
+#define CLK_APB_UART4 86
+#define CLK_APB_I2S0 87
+#define CLK_APB_I2S1 88
+#define CLK_APB_I2S2 89
+#define CLK_APB_I2S3 90
+#define CLK_AXI4_USB 91
+#define CLK_APB_USB 92
+#define CLK_USB_125M 93
+#define CLK_USB_33K 94
+#define CLK_USB_12M 95
+#define CLK_AXI4 96
+#define CLK_AXI6 97
+#define CLK_DSI_ESC 98
+#define CLK_AXI_VIP 99
+#define CLK_SRC_VIP_SYS_0 100
+#define CLK_SRC_VIP_SYS_1 101
+#define CLK_SRC_VIP_SYS_2 102
+#define CLK_SRC_VIP_SYS_3 103
+#define CLK_SRC_VIP_SYS_4 104
+#define CLK_CSI_BE_VIP 105
+#define CLK_CSI_MAC0_VIP 106
+#define CLK_CSI_MAC1_VIP 107
+#define CLK_CSI_MAC2_VIP 108
+#define CLK_CSI0_RX_VIP 109
+#define CLK_CSI1_RX_VIP 110
+#define CLK_ISP_TOP_VIP 111
+#define CLK_IMG_D_VIP 112
+#define CLK_IMG_V_VIP 113
+#define CLK_SC_TOP_VIP 114
+#define CLK_SC_D_VIP 115
+#define CLK_SC_V1_VIP 116
+#define CLK_SC_V2_VIP 117
+#define CLK_SC_V3_VIP 118
+#define CLK_DWA_VIP 119
+#define CLK_BT_VIP 120
+#define CLK_DISP_VIP 121
+#define CLK_DSI_MAC_VIP 122
+#define CLK_LVDS0_VIP 123
+#define CLK_LVDS1_VIP 124
+#define CLK_PAD_VI_VIP 125
+#define CLK_PAD_VI1_VIP 126
+#define CLK_PAD_VI2_VIP 127
+#define CLK_CFG_REG_VIP 128
+#define CLK_VIP_IP0 129
+#define CLK_VIP_IP1 130
+#define CLK_VIP_IP2 131
+#define CLK_VIP_IP3 132
+#define CLK_IVE_VIP 133
+#define CLK_RAW_VIP 134
+#define CLK_OSDC_VIP 135
+#define CLK_CAM0_VIP 136
+#define CLK_AXI_VIDEO_CODEC 137
+#define CLK_VC_SRC0 138
+#define CLK_VC_SRC1 139
+#define CLK_VC_SRC2 140
+#define CLK_H264C 141
+#define CLK_APB_H264C 142
+#define CLK_H265C 143
+#define CLK_APB_H265C 144
+#define CLK_JPEG 145
+#define CLK_APB_JPEG 146
+#define CLK_CAM0 147
+#define CLK_CAM1 148
+#define CLK_WGN 149
+#define CLK_WGN0 150
+#define CLK_WGN1 151
+#define CLK_WGN2 152
+#define CLK_KEYSCAN 153
+#define CLK_CFG_REG_VC 154
+#define CLK_C906_0 155
+#define CLK_C906_1 156
+#define CLK_A53 157
+#define CLK_CPU_AXI0 158
+#define CLK_CPU_GIC 159
+#define CLK_XTAL_AP 160
+
+// Only for CV181x
+#define CLK_DISP_SRC_VIP 161
+
+#endif /* __DT_BINDINGS_SOPHGO_CV1800_CLK_H__ */
diff --git a/include/dt-bindings/clock/sprd,ums512-clk.h b/include/dt-bindings/clock/sprd,ums512-clk.h
new file mode 100644
index 000000000000..4f1d90849944
--- /dev/null
+++ b/include/dt-bindings/clock/sprd,ums512-clk.h
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Unisoc UMS512 SoC DTS file
+ *
+ * Copyright (C) 2022, Unisoc Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_UMS512_H_
+#define _DT_BINDINGS_CLK_UMS512_H_
+
+#define CLK_26M_AUD 0
+#define CLK_13M 1
+#define CLK_6M5 2
+#define CLK_4M3 3
+#define CLK_2M 4
+#define CLK_1M 5
+#define CLK_250K 6
+#define CLK_RCO_25M 7
+#define CLK_RCO_4M 8
+#define CLK_RCO_2M 9
+#define CLK_ISPPLL_GATE 10
+#define CLK_DPLL0_GATE 11
+#define CLK_DPLL1_GATE 12
+#define CLK_LPLL_GATE 13
+#define CLK_TWPLL_GATE 14
+#define CLK_GPLL_GATE 15
+#define CLK_RPLL_GATE 16
+#define CLK_CPPLL_GATE 17
+#define CLK_MPLL0_GATE 18
+#define CLK_MPLL1_GATE 19
+#define CLK_MPLL2_GATE 20
+#define CLK_PMU_GATE_NUM (CLK_MPLL2_GATE + 1)
+
+#define CLK_DPLL0 0
+#define CLK_DPLL0_58M31 1
+#define CLK_ANLG_PHY_G0_NUM (CLK_DPLL0_58M31 + 1)
+
+#define CLK_MPLL1 0
+#define CLK_MPLL1_63M38 1
+#define CLK_ANLG_PHY_G2_NUM (CLK_MPLL1_63M38 + 1)
+
+#define CLK_RPLL 0
+#define CLK_AUDIO_GATE 1
+#define CLK_MPLL0 2
+#define CLK_MPLL0_56M88 3
+#define CLK_MPLL2 4
+#define CLK_MPLL2_47M13 5
+#define CLK_ANLG_PHY_G3_NUM (CLK_MPLL2_47M13 + 1)
+
+#define CLK_TWPLL 0
+#define CLK_TWPLL_768M 1
+#define CLK_TWPLL_384M 2
+#define CLK_TWPLL_192M 3
+#define CLK_TWPLL_96M 4
+#define CLK_TWPLL_48M 5
+#define CLK_TWPLL_24M 6
+#define CLK_TWPLL_12M 7
+#define CLK_TWPLL_512M 8
+#define CLK_TWPLL_256M 9
+#define CLK_TWPLL_128M 10
+#define CLK_TWPLL_64M 11
+#define CLK_TWPLL_307M2 12
+#define CLK_TWPLL_219M4 13
+#define CLK_TWPLL_170M6 14
+#define CLK_TWPLL_153M6 15
+#define CLK_TWPLL_76M8 16
+#define CLK_TWPLL_51M2 17
+#define CLK_TWPLL_38M4 18
+#define CLK_TWPLL_19M2 19
+#define CLK_TWPLL_12M29 20
+#define CLK_LPLL 21
+#define CLK_LPLL_614M4 22
+#define CLK_LPLL_409M6 23
+#define CLK_LPLL_245M76 24
+#define CLK_LPLL_30M72 25
+#define CLK_ISPPLL 26
+#define CLK_ISPPLL_468M 27
+#define CLK_ISPPLL_78M 28
+#define CLK_GPLL 29
+#define CLK_GPLL_40M 30
+#define CLK_CPPLL 31
+#define CLK_CPPLL_39M32 32
+#define CLK_ANLG_PHY_GC_NUM (CLK_CPPLL_39M32 + 1)
+
+#define CLK_AP_APB 0
+#define CLK_IPI 1
+#define CLK_AP_UART0 2
+#define CLK_AP_UART1 3
+#define CLK_AP_UART2 4
+#define CLK_AP_I2C0 5
+#define CLK_AP_I2C1 6
+#define CLK_AP_I2C2 7
+#define CLK_AP_I2C3 8
+#define CLK_AP_I2C4 9
+#define CLK_AP_SPI0 10
+#define CLK_AP_SPI1 11
+#define CLK_AP_SPI2 12
+#define CLK_AP_SPI3 13
+#define CLK_AP_IIS0 14
+#define CLK_AP_IIS1 15
+#define CLK_AP_IIS2 16
+#define CLK_AP_SIM 17
+#define CLK_AP_CE 18
+#define CLK_SDIO0_2X 19
+#define CLK_SDIO1_2X 20
+#define CLK_EMMC_2X 21
+#define CLK_VSP 22
+#define CLK_DISPC0 23
+#define CLK_DISPC0_DPI 24
+#define CLK_DSI_APB 25
+#define CLK_DSI_RXESC 26
+#define CLK_DSI_LANEBYTE 27
+#define CLK_VDSP 28
+#define CLK_VDSP_M 29
+#define CLK_AP_CLK_NUM (CLK_VDSP_M + 1)
+
+#define CLK_DSI_EB 0
+#define CLK_DISPC_EB 1
+#define CLK_VSP_EB 2
+#define CLK_VDMA_EB 3
+#define CLK_DMA_PUB_EB 4
+#define CLK_DMA_SEC_EB 5
+#define CLK_IPI_EB 6
+#define CLK_AHB_CKG_EB 7
+#define CLK_BM_CLK_EB 8
+#define CLK_AP_AHB_GATE_NUM (CLK_BM_CLK_EB + 1)
+
+#define CLK_AON_APB 0
+#define CLK_ADI 1
+#define CLK_AUX0 2
+#define CLK_AUX1 3
+#define CLK_AUX2 4
+#define CLK_PROBE 5
+#define CLK_PWM0 6
+#define CLK_PWM1 7
+#define CLK_PWM2 8
+#define CLK_PWM3 9
+#define CLK_EFUSE 10
+#define CLK_UART0 11
+#define CLK_UART1 12
+#define CLK_THM0 13
+#define CLK_THM1 14
+#define CLK_THM2 15
+#define CLK_THM3 16
+#define CLK_AON_I2C 17
+#define CLK_AON_IIS 18
+#define CLK_SCC 19
+#define CLK_APCPU_DAP 20
+#define CLK_APCPU_DAP_MTCK 21
+#define CLK_APCPU_TS 22
+#define CLK_DEBUG_TS 23
+#define CLK_DSI_TEST_S 24
+#define CLK_DJTAG_TCK 25
+#define CLK_DJTAG_TCK_HW 26
+#define CLK_AON_TMR 27
+#define CLK_AON_PMU 28
+#define CLK_DEBOUNCE 29
+#define CLK_APCPU_PMU 30
+#define CLK_TOP_DVFS 31
+#define CLK_OTG_UTMI 32
+#define CLK_OTG_REF 33
+#define CLK_CSSYS 34
+#define CLK_CSSYS_PUB 35
+#define CLK_CSSYS_APB 36
+#define CLK_AP_AXI 37
+#define CLK_AP_MM 38
+#define CLK_SDIO2_2X 39
+#define CLK_ANALOG_IO_APB 40
+#define CLK_DMC_REF_CLK 41
+#define CLK_EMC 42
+#define CLK_USB 43
+#define CLK_26M_PMU 44
+#define CLK_AON_APB_NUM (CLK_26M_PMU + 1)
+
+#define CLK_MM_AHB 0
+#define CLK_MM_MTX 1
+#define CLK_SENSOR0 2
+#define CLK_SENSOR1 3
+#define CLK_SENSOR2 4
+#define CLK_CPP 5
+#define CLK_JPG 6
+#define CLK_FD 7
+#define CLK_DCAM_IF 8
+#define CLK_DCAM_AXI 9
+#define CLK_ISP 10
+#define CLK_MIPI_CSI0 11
+#define CLK_MIPI_CSI1 12
+#define CLK_MIPI_CSI2 13
+#define CLK_MM_CLK_NUM (CLK_MIPI_CSI2 + 1)
+
+#define CLK_RC100M_CAL_EB 0
+#define CLK_DJTAG_TCK_EB 1
+#define CLK_DJTAG_EB 2
+#define CLK_AUX0_EB 3
+#define CLK_AUX1_EB 4
+#define CLK_AUX2_EB 5
+#define CLK_PROBE_EB 6
+#define CLK_MM_EB 7
+#define CLK_GPU_EB 8
+#define CLK_MSPI_EB 9
+#define CLK_APCPU_DAP_EB 10
+#define CLK_AON_CSSYS_EB 11
+#define CLK_CSSYS_APB_EB 12
+#define CLK_CSSYS_PUB_EB 13
+#define CLK_SDPHY_CFG_EB 14
+#define CLK_SDPHY_REF_EB 15
+#define CLK_EFUSE_EB 16
+#define CLK_GPIO_EB 17
+#define CLK_MBOX_EB 18
+#define CLK_KPD_EB 19
+#define CLK_AON_SYST_EB 20
+#define CLK_AP_SYST_EB 21
+#define CLK_AON_TMR_EB 22
+#define CLK_OTG_UTMI_EB 23
+#define CLK_OTG_PHY_EB 24
+#define CLK_SPLK_EB 25
+#define CLK_PIN_EB 26
+#define CLK_ANA_EB 27
+#define CLK_APCPU_TS0_EB 28
+#define CLK_APB_BUSMON_EB 29
+#define CLK_AON_IIS_EB 30
+#define CLK_SCC_EB 31
+#define CLK_THM0_EB 32
+#define CLK_THM1_EB 33
+#define CLK_THM2_EB 34
+#define CLK_ASIM_TOP_EB 35
+#define CLK_I2C_EB 36
+#define CLK_PMU_EB 37
+#define CLK_ADI_EB 38
+#define CLK_EIC_EB 39
+#define CLK_AP_INTC0_EB 40
+#define CLK_AP_INTC1_EB 41
+#define CLK_AP_INTC2_EB 42
+#define CLK_AP_INTC3_EB 43
+#define CLK_AP_INTC4_EB 44
+#define CLK_AP_INTC5_EB 45
+#define CLK_AUDCP_INTC_EB 46
+#define CLK_AP_TMR0_EB 47
+#define CLK_AP_TMR1_EB 48
+#define CLK_AP_TMR2_EB 49
+#define CLK_PWM0_EB 50
+#define CLK_PWM1_EB 51
+#define CLK_PWM2_EB 52
+#define CLK_PWM3_EB 53
+#define CLK_AP_WDG_EB 54
+#define CLK_APCPU_WDG_EB 55
+#define CLK_SERDES_EB 56
+#define CLK_ARCH_RTC_EB 57
+#define CLK_KPD_RTC_EB 58
+#define CLK_AON_SYST_RTC_EB 59
+#define CLK_AP_SYST_RTC_EB 60
+#define CLK_AON_TMR_RTC_EB 61
+#define CLK_EIC_RTC_EB 62
+#define CLK_EIC_RTCDV5_EB 63
+#define CLK_AP_WDG_RTC_EB 64
+#define CLK_AC_WDG_RTC_EB 65
+#define CLK_AP_TMR0_RTC_EB 66
+#define CLK_AP_TMR1_RTC_EB 67
+#define CLK_AP_TMR2_RTC_EB 68
+#define CLK_DCXO_LC_RTC_EB 69
+#define CLK_BB_CAL_RTC_EB 70
+#define CLK_AP_EMMC_RTC_EB 71
+#define CLK_AP_SDIO0_RTC_EB 72
+#define CLK_AP_SDIO1_RTC_EB 73
+#define CLK_AP_SDIO2_RTC_EB 74
+#define CLK_DSI_CSI_TEST_EB 75
+#define CLK_DJTAG_TCK_EN 76
+#define CLK_DPHY_REF_EB 77
+#define CLK_DMC_REF_EB 78
+#define CLK_OTG_REF_EB 79
+#define CLK_TSEN_EB 80
+#define CLK_TMR_EB 81
+#define CLK_RC100M_REF_EB 82
+#define CLK_RC100M_FDK_EB 83
+#define CLK_DEBOUNCE_EB 84
+#define CLK_DET_32K_EB 85
+#define CLK_TOP_CSSYS_EB 86
+#define CLK_AP_AXI_EN 87
+#define CLK_SDIO0_2X_EN 88
+#define CLK_SDIO0_1X_EN 89
+#define CLK_SDIO1_2X_EN 90
+#define CLK_SDIO1_1X_EN 91
+#define CLK_SDIO2_2X_EN 92
+#define CLK_SDIO2_1X_EN 93
+#define CLK_EMMC_2X_EN 94
+#define CLK_EMMC_1X_EN 95
+#define CLK_PLL_TEST_EN 96
+#define CLK_CPHY_CFG_EN 97
+#define CLK_DEBUG_TS_EN 98
+#define CLK_ACCESS_AUD_EN 99
+#define CLK_AON_APB_GATE_NUM (CLK_ACCESS_AUD_EN + 1)
+
+#define CLK_MM_CPP_EB 0
+#define CLK_MM_JPG_EB 1
+#define CLK_MM_DCAM_EB 2
+#define CLK_MM_ISP_EB 3
+#define CLK_MM_CSI2_EB 4
+#define CLK_MM_CSI1_EB 5
+#define CLK_MM_CSI0_EB 6
+#define CLK_MM_CKG_EB 7
+#define CLK_ISP_AHB_EB 8
+#define CLK_MM_DVFS_EB 9
+#define CLK_MM_FD_EB 10
+#define CLK_MM_SENSOR2_EB 11
+#define CLK_MM_SENSOR1_EB 12
+#define CLK_MM_SENSOR0_EB 13
+#define CLK_MM_MIPI_CSI2_EB 14
+#define CLK_MM_MIPI_CSI1_EB 15
+#define CLK_MM_MIPI_CSI0_EB 16
+#define CLK_DCAM_AXI_EB 17
+#define CLK_ISP_AXI_EB 18
+#define CLK_MM_CPHY_EB 19
+#define CLK_MM_GATE_CLK_NUM (CLK_MM_CPHY_EB + 1)
+
+#define CLK_SIM0_EB 0
+#define CLK_IIS0_EB 1
+#define CLK_IIS1_EB 2
+#define CLK_IIS2_EB 3
+#define CLK_APB_REG_EB 4
+#define CLK_SPI0_EB 5
+#define CLK_SPI1_EB 6
+#define CLK_SPI2_EB 7
+#define CLK_SPI3_EB 8
+#define CLK_I2C0_EB 9
+#define CLK_I2C1_EB 10
+#define CLK_I2C2_EB 11
+#define CLK_I2C3_EB 12
+#define CLK_I2C4_EB 13
+#define CLK_UART0_EB 14
+#define CLK_UART1_EB 15
+#define CLK_UART2_EB 16
+#define CLK_SIM0_32K_EB 17
+#define CLK_SPI0_LFIN_EB 18
+#define CLK_SPI1_LFIN_EB 19
+#define CLK_SPI2_LFIN_EB 20
+#define CLK_SPI3_LFIN_EB 21
+#define CLK_SDIO0_EB 22
+#define CLK_SDIO1_EB 23
+#define CLK_SDIO2_EB 24
+#define CLK_EMMC_EB 25
+#define CLK_SDIO0_32K_EB 26
+#define CLK_SDIO1_32K_EB 27
+#define CLK_SDIO2_32K_EB 28
+#define CLK_EMMC_32K_EB 29
+#define CLK_AP_APB_GATE_NUM (CLK_EMMC_32K_EB + 1)
+
+#define CLK_GPU_CORE_EB 0
+#define CLK_GPU_CORE 1
+#define CLK_GPU_MEM_EB 2
+#define CLK_GPU_MEM 3
+#define CLK_GPU_SYS_EB 4
+#define CLK_GPU_SYS 5
+#define CLK_GPU_CLK_NUM (CLK_GPU_SYS + 1)
+
+#define CLK_AUDCP_IIS0_EB 0
+#define CLK_AUDCP_IIS1_EB 1
+#define CLK_AUDCP_IIS2_EB 2
+#define CLK_AUDCP_UART_EB 3
+#define CLK_AUDCP_DMA_CP_EB 4
+#define CLK_AUDCP_DMA_AP_EB 5
+#define CLK_AUDCP_SRC48K_EB 6
+#define CLK_AUDCP_MCDT_EB 7
+#define CLK_AUDCP_VBCIFD_EB 8
+#define CLK_AUDCP_VBC_EB 9
+#define CLK_AUDCP_SPLK_EB 10
+#define CLK_AUDCP_ICU_EB 11
+#define CLK_AUDCP_DMA_AP_ASHB_EB 12
+#define CLK_AUDCP_DMA_CP_ASHB_EB 13
+#define CLK_AUDCP_AUD_EB 14
+#define CLK_AUDCP_VBC_24M_EB 15
+#define CLK_AUDCP_TMR_26M_EB 16
+#define CLK_AUDCP_DVFS_ASHB_EB 17
+#define CLK_AUDCP_AHB_GATE_NUM (CLK_AUDCP_DVFS_ASHB_EB + 1)
+
+#define CLK_AUDCP_WDG_EB 0
+#define CLK_AUDCP_RTC_WDG_EB 1
+#define CLK_AUDCP_TMR0_EB 2
+#define CLK_AUDCP_TMR1_EB 3
+#define CLK_AUDCP_APB_GATE_NUM (CLK_AUDCP_TMR1_EB + 1)
+
+#define CLK_ACORE0 0
+#define CLK_ACORE1 1
+#define CLK_ACORE2 2
+#define CLK_ACORE3 3
+#define CLK_ACORE4 4
+#define CLK_ACORE5 5
+#define CLK_PCORE0 6
+#define CLK_PCORE1 7
+#define CLK_SCU 8
+#define CLK_ACE 9
+#define CLK_PERIPH 10
+#define CLK_GIC 11
+#define CLK_ATB 12
+#define CLK_DEBUG_APB 13
+#define CLK_APCPU_SEC_NUM (CLK_DEBUG_APB + 1)
+
+#endif /* _DT_BINDINGS_CLK_UMS512_H_ */
diff --git a/include/dt-bindings/clock/st,stm32mp25-rcc.h b/include/dt-bindings/clock/st,stm32mp25-rcc.h
new file mode 100644
index 000000000000..b6cf05ad4be6
--- /dev/null
+++ b/include/dt-bindings/clock/st,stm32mp25-rcc.h
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP25_CLKS_H_
+#define _DT_BINDINGS_STM32MP25_CLKS_H_
+
+/* INTERNAL/EXTERNAL OSCILLATORS */
+#define HSI_CK 0
+#define HSE_CK 1
+#define MSI_CK 2
+#define LSI_CK 3
+#define LSE_CK 4
+#define I2S_CK 5
+#define RTC_CK 6
+#define SPDIF_CK_SYMB 7
+
+/* PLL CLOCKS */
+#define PLL1_CK 8
+#define PLL2_CK 9
+#define PLL3_CK 10
+#define PLL4_CK 11
+#define PLL5_CK 12
+#define PLL6_CK 13
+#define PLL7_CK 14
+#define PLL8_CK 15
+
+#define CK_CPU1 16
+
+/* APB DIV CLOCKS */
+#define CK_ICN_APB1 17
+#define CK_ICN_APB2 18
+#define CK_ICN_APB3 19
+#define CK_ICN_APB4 20
+#define CK_ICN_APBDBG 21
+
+/* GLOBAL TIMER */
+#define TIMG1_CK 22
+#define TIMG2_CK 23
+
+/* FLEXGEN CLOCKS */
+#define CK_ICN_HS_MCU 24
+#define CK_ICN_SDMMC 25
+#define CK_ICN_DDR 26
+#define CK_ICN_DISPLAY 27
+#define CK_ICN_HSL 28
+#define CK_ICN_NIC 29
+#define CK_ICN_VID 30
+#define CK_FLEXGEN_07 31
+#define CK_FLEXGEN_08 32
+#define CK_FLEXGEN_09 33
+#define CK_FLEXGEN_10 34
+#define CK_FLEXGEN_11 35
+#define CK_FLEXGEN_12 36
+#define CK_FLEXGEN_13 37
+#define CK_FLEXGEN_14 38
+#define CK_FLEXGEN_15 39
+#define CK_FLEXGEN_16 40
+#define CK_FLEXGEN_17 41
+#define CK_FLEXGEN_18 42
+#define CK_FLEXGEN_19 43
+#define CK_FLEXGEN_20 44
+#define CK_FLEXGEN_21 45
+#define CK_FLEXGEN_22 46
+#define CK_FLEXGEN_23 47
+#define CK_FLEXGEN_24 48
+#define CK_FLEXGEN_25 49
+#define CK_FLEXGEN_26 50
+#define CK_FLEXGEN_27 51
+#define CK_FLEXGEN_28 52
+#define CK_FLEXGEN_29 53
+#define CK_FLEXGEN_30 54
+#define CK_FLEXGEN_31 55
+#define CK_FLEXGEN_32 56
+#define CK_FLEXGEN_33 57
+#define CK_FLEXGEN_34 58
+#define CK_FLEXGEN_35 59
+#define CK_FLEXGEN_36 60
+#define CK_FLEXGEN_37 61
+#define CK_FLEXGEN_38 62
+#define CK_FLEXGEN_39 63
+#define CK_FLEXGEN_40 64
+#define CK_FLEXGEN_41 65
+#define CK_FLEXGEN_42 66
+#define CK_FLEXGEN_43 67
+#define CK_FLEXGEN_44 68
+#define CK_FLEXGEN_45 69
+#define CK_FLEXGEN_46 70
+#define CK_FLEXGEN_47 71
+#define CK_FLEXGEN_48 72
+#define CK_FLEXGEN_49 73
+#define CK_FLEXGEN_50 74
+#define CK_FLEXGEN_51 75
+#define CK_FLEXGEN_52 76
+#define CK_FLEXGEN_53 77
+#define CK_FLEXGEN_54 78
+#define CK_FLEXGEN_55 79
+#define CK_FLEXGEN_56 80
+#define CK_FLEXGEN_57 81
+#define CK_FLEXGEN_58 82
+#define CK_FLEXGEN_59 83
+#define CK_FLEXGEN_60 84
+#define CK_FLEXGEN_61 85
+#define CK_FLEXGEN_62 86
+#define CK_FLEXGEN_63 87
+
+/* LOW SPEED MCU CLOCK */
+#define CK_ICN_LS_MCU 88
+
+#define CK_BUS_STM500 89
+#define CK_BUS_FMC 90
+#define CK_BUS_GPU 91
+#define CK_BUS_ETH1 92
+#define CK_BUS_ETH2 93
+#define CK_BUS_PCIE 94
+#define CK_BUS_DDRPHYC 95
+#define CK_BUS_SYSCPU1 96
+#define CK_BUS_ETHSW 97
+#define CK_BUS_HPDMA1 98
+#define CK_BUS_HPDMA2 99
+#define CK_BUS_HPDMA3 100
+#define CK_BUS_ADC12 101
+#define CK_BUS_ADC3 102
+#define CK_BUS_IPCC1 103
+#define CK_BUS_CCI 104
+#define CK_BUS_CRC 105
+#define CK_BUS_MDF1 106
+#define CK_BUS_OSPIIOM 107
+#define CK_BUS_BKPSRAM 108
+#define CK_BUS_HASH 109
+#define CK_BUS_RNG 110
+#define CK_BUS_CRYP1 111
+#define CK_BUS_CRYP2 112
+#define CK_BUS_SAES 113
+#define CK_BUS_PKA 114
+#define CK_BUS_GPIOA 115
+#define CK_BUS_GPIOB 116
+#define CK_BUS_GPIOC 117
+#define CK_BUS_GPIOD 118
+#define CK_BUS_GPIOE 119
+#define CK_BUS_GPIOF 120
+#define CK_BUS_GPIOG 121
+#define CK_BUS_GPIOH 122
+#define CK_BUS_GPIOI 123
+#define CK_BUS_GPIOJ 124
+#define CK_BUS_GPIOK 125
+#define CK_BUS_LPSRAM1 126
+#define CK_BUS_LPSRAM2 127
+#define CK_BUS_LPSRAM3 128
+#define CK_BUS_GPIOZ 129
+#define CK_BUS_LPDMA 130
+#define CK_BUS_HSEM 131
+#define CK_BUS_IPCC2 132
+#define CK_BUS_RTC 133
+#define CK_BUS_SPI8 134
+#define CK_BUS_LPUART1 135
+#define CK_BUS_I2C8 136
+#define CK_BUS_LPTIM3 137
+#define CK_BUS_LPTIM4 138
+#define CK_BUS_LPTIM5 139
+#define CK_BUS_IWDG5 140
+#define CK_BUS_WWDG2 141
+#define CK_BUS_I3C4 142
+#define CK_BUS_TIM2 143
+#define CK_BUS_TIM3 144
+#define CK_BUS_TIM4 145
+#define CK_BUS_TIM5 146
+#define CK_BUS_TIM6 147
+#define CK_BUS_TIM7 148
+#define CK_BUS_TIM10 149
+#define CK_BUS_TIM11 150
+#define CK_BUS_TIM12 151
+#define CK_BUS_TIM13 152
+#define CK_BUS_TIM14 153
+#define CK_BUS_LPTIM1 154
+#define CK_BUS_LPTIM2 155
+#define CK_BUS_SPI2 156
+#define CK_BUS_SPI3 157
+#define CK_BUS_SPDIFRX 158
+#define CK_BUS_USART2 159
+#define CK_BUS_USART3 160
+#define CK_BUS_UART4 161
+#define CK_BUS_UART5 162
+#define CK_BUS_I2C1 163
+#define CK_BUS_I2C2 164
+#define CK_BUS_I2C3 165
+#define CK_BUS_I2C4 166
+#define CK_BUS_I2C5 167
+#define CK_BUS_I2C6 168
+#define CK_BUS_I2C7 169
+#define CK_BUS_I3C1 170
+#define CK_BUS_I3C2 171
+#define CK_BUS_I3C3 172
+#define CK_BUS_TIM1 173
+#define CK_BUS_TIM8 174
+#define CK_BUS_TIM15 175
+#define CK_BUS_TIM16 176
+#define CK_BUS_TIM17 177
+#define CK_BUS_TIM20 178
+#define CK_BUS_SAI1 179
+#define CK_BUS_SAI2 180
+#define CK_BUS_SAI3 181
+#define CK_BUS_SAI4 182
+#define CK_BUS_USART1 183
+#define CK_BUS_USART6 184
+#define CK_BUS_UART7 185
+#define CK_BUS_UART8 186
+#define CK_BUS_UART9 187
+#define CK_BUS_FDCAN 188
+#define CK_BUS_SPI1 189
+#define CK_BUS_SPI4 190
+#define CK_BUS_SPI5 191
+#define CK_BUS_SPI6 192
+#define CK_BUS_SPI7 193
+#define CK_BUS_BSEC 194
+#define CK_BUS_IWDG1 195
+#define CK_BUS_IWDG2 196
+#define CK_BUS_IWDG3 197
+#define CK_BUS_IWDG4 198
+#define CK_BUS_WWDG1 199
+#define CK_BUS_VREF 200
+#define CK_BUS_DTS 201
+#define CK_BUS_SERC 202
+#define CK_BUS_HDP 203
+#define CK_BUS_IS2M 204
+#define CK_BUS_DSI 205
+#define CK_BUS_LTDC 206
+#define CK_BUS_CSI 207
+#define CK_BUS_DCMIPP 208
+#define CK_BUS_DDRC 209
+#define CK_BUS_DDRCFG 210
+#define CK_BUS_GICV2M 211
+#define CK_BUS_USBTC 212
+#define CK_BUS_USB3PCIEPHY 214
+#define CK_BUS_STGEN 215
+#define CK_BUS_VDEC 216
+#define CK_BUS_VENC 217
+#define CK_SYSDBG 218
+#define CK_KER_TIM2 219
+#define CK_KER_TIM3 220
+#define CK_KER_TIM4 221
+#define CK_KER_TIM5 222
+#define CK_KER_TIM6 223
+#define CK_KER_TIM7 224
+#define CK_KER_TIM10 225
+#define CK_KER_TIM11 226
+#define CK_KER_TIM12 227
+#define CK_KER_TIM13 228
+#define CK_KER_TIM14 229
+#define CK_KER_TIM1 230
+#define CK_KER_TIM8 231
+#define CK_KER_TIM15 232
+#define CK_KER_TIM16 233
+#define CK_KER_TIM17 234
+#define CK_KER_TIM20 235
+#define CK_BUS_SYSRAM 236
+#define CK_BUS_VDERAM 237
+#define CK_BUS_RETRAM 238
+#define CK_BUS_OSPI1 239
+#define CK_BUS_OSPI2 240
+#define CK_BUS_OTFD1 241
+#define CK_BUS_OTFD2 242
+#define CK_BUS_SRAM1 243
+#define CK_BUS_SRAM2 244
+#define CK_BUS_SDMMC1 245
+#define CK_BUS_SDMMC2 246
+#define CK_BUS_SDMMC3 247
+#define CK_BUS_DDR 248
+#define CK_BUS_RISAF4 249
+#define CK_BUS_USB2OHCI 250
+#define CK_BUS_USB2EHCI 251
+#define CK_BUS_USB3DR 252
+#define CK_KER_LPTIM1 253
+#define CK_KER_LPTIM2 254
+#define CK_KER_USART2 255
+#define CK_KER_UART4 256
+#define CK_KER_USART3 257
+#define CK_KER_UART5 258
+#define CK_KER_SPI2 259
+#define CK_KER_SPI3 260
+#define CK_KER_SPDIFRX 261
+#define CK_KER_I2C1 262
+#define CK_KER_I2C2 263
+#define CK_KER_I3C1 264
+#define CK_KER_I3C2 265
+#define CK_KER_I2C3 266
+#define CK_KER_I2C5 267
+#define CK_KER_I3C3 268
+#define CK_KER_I2C4 269
+#define CK_KER_I2C6 270
+#define CK_KER_I2C7 271
+#define CK_KER_SPI1 272
+#define CK_KER_SPI4 273
+#define CK_KER_SPI5 274
+#define CK_KER_SPI6 275
+#define CK_KER_SPI7 276
+#define CK_KER_USART1 277
+#define CK_KER_USART6 278
+#define CK_KER_UART7 279
+#define CK_KER_UART8 280
+#define CK_KER_UART9 281
+#define CK_KER_MDF1 282
+#define CK_KER_SAI1 283
+#define CK_KER_SAI2 284
+#define CK_KER_SAI3 285
+#define CK_KER_SAI4 286
+#define CK_KER_FDCAN 287
+#define CK_KER_DSIBLANE 288
+#define CK_KER_DSIPHY 289
+#define CK_KER_CSI 290
+#define CK_KER_CSITXESC 291
+#define CK_KER_CSIPHY 292
+#define CK_KER_LVDSPHY 293
+#define CK_KER_STGEN 294
+#define CK_KER_USB3PCIEPHY 295
+#define CK_KER_USB2PHY2EN 296
+#define CK_KER_I3C4 297
+#define CK_KER_SPI8 298
+#define CK_KER_I2C8 299
+#define CK_KER_LPUART1 300
+#define CK_KER_LPTIM3 301
+#define CK_KER_LPTIM4 302
+#define CK_KER_LPTIM5 303
+#define CK_KER_TSDBG 304
+#define CK_KER_TPIU 305
+#define CK_BUS_ETR 306
+#define CK_BUS_SYSATB 307
+#define CK_KER_ADC12 308
+#define CK_KER_ADC3 309
+#define CK_KER_OSPI1 310
+#define CK_KER_OSPI2 311
+#define CK_KER_FMC 312
+#define CK_KER_SDMMC1 313
+#define CK_KER_SDMMC2 314
+#define CK_KER_SDMMC3 315
+#define CK_KER_ETH1 316
+#define CK_KER_ETH2 317
+#define CK_KER_ETH1PTP 318
+#define CK_KER_ETH2PTP 319
+#define CK_KER_USB2PHY1 320
+#define CK_KER_USB2PHY2 321
+#define CK_KER_ETHSW 322
+#define CK_KER_ETHSWREF 323
+#define CK_MCO1 324
+#define CK_MCO2 325
+#define CK_KER_DTS 326
+#define CK_ETH1_RX 327
+#define CK_ETH1_TX 328
+#define CK_ETH1_MAC 329
+#define CK_ETH2_RX 330
+#define CK_ETH2_TX 331
+#define CK_ETH2_MAC 332
+#define CK_ETH1_STP 333
+#define CK_ETH2_STP 334
+#define CK_KER_USBTC 335
+#define CK_BUS_ADF1 336
+#define CK_KER_ADF1 337
+#define CK_BUS_LVDS 338
+#define CK_KER_LTDC 339
+#define CK_KER_GPU 340
+#define CK_BUS_ETHSWACMCFG 341
+#define CK_BUS_ETHSWACMMSG 342
+#define HSE_DIV2_CK 343
+
+#define STM32MP25_LAST_CLK 344
+
+#define CK_SCMI_ICN_HS_MCU 0
+#define CK_SCMI_ICN_SDMMC 1
+#define CK_SCMI_ICN_DDR 2
+#define CK_SCMI_ICN_DISPLAY 3
+#define CK_SCMI_ICN_HSL 4
+#define CK_SCMI_ICN_NIC 5
+#define CK_SCMI_ICN_VID 6
+#define CK_SCMI_FLEXGEN_07 7
+#define CK_SCMI_FLEXGEN_08 8
+#define CK_SCMI_FLEXGEN_09 9
+#define CK_SCMI_FLEXGEN_10 10
+#define CK_SCMI_FLEXGEN_11 11
+#define CK_SCMI_FLEXGEN_12 12
+#define CK_SCMI_FLEXGEN_13 13
+#define CK_SCMI_FLEXGEN_14 14
+#define CK_SCMI_FLEXGEN_15 15
+#define CK_SCMI_FLEXGEN_16 16
+#define CK_SCMI_FLEXGEN_17 17
+#define CK_SCMI_FLEXGEN_18 18
+#define CK_SCMI_FLEXGEN_19 19
+#define CK_SCMI_FLEXGEN_20 20
+#define CK_SCMI_FLEXGEN_21 21
+#define CK_SCMI_FLEXGEN_22 22
+#define CK_SCMI_FLEXGEN_23 23
+#define CK_SCMI_FLEXGEN_24 24
+#define CK_SCMI_FLEXGEN_25 25
+#define CK_SCMI_FLEXGEN_26 26
+#define CK_SCMI_FLEXGEN_27 27
+#define CK_SCMI_FLEXGEN_28 28
+#define CK_SCMI_FLEXGEN_29 29
+#define CK_SCMI_FLEXGEN_30 30
+#define CK_SCMI_FLEXGEN_31 31
+#define CK_SCMI_FLEXGEN_32 32
+#define CK_SCMI_FLEXGEN_33 33
+#define CK_SCMI_FLEXGEN_34 34
+#define CK_SCMI_FLEXGEN_35 35
+#define CK_SCMI_FLEXGEN_36 36
+#define CK_SCMI_FLEXGEN_37 37
+#define CK_SCMI_FLEXGEN_38 38
+#define CK_SCMI_FLEXGEN_39 39
+#define CK_SCMI_FLEXGEN_40 40
+#define CK_SCMI_FLEXGEN_41 41
+#define CK_SCMI_FLEXGEN_42 42
+#define CK_SCMI_FLEXGEN_43 43
+#define CK_SCMI_FLEXGEN_44 44
+#define CK_SCMI_FLEXGEN_45 45
+#define CK_SCMI_FLEXGEN_46 46
+#define CK_SCMI_FLEXGEN_47 47
+#define CK_SCMI_FLEXGEN_48 48
+#define CK_SCMI_FLEXGEN_49 49
+#define CK_SCMI_FLEXGEN_50 50
+#define CK_SCMI_FLEXGEN_51 51
+#define CK_SCMI_FLEXGEN_52 52
+#define CK_SCMI_FLEXGEN_53 53
+#define CK_SCMI_FLEXGEN_54 54
+#define CK_SCMI_FLEXGEN_55 55
+#define CK_SCMI_FLEXGEN_56 56
+#define CK_SCMI_FLEXGEN_57 57
+#define CK_SCMI_FLEXGEN_58 58
+#define CK_SCMI_FLEXGEN_59 59
+#define CK_SCMI_FLEXGEN_60 60
+#define CK_SCMI_FLEXGEN_61 61
+#define CK_SCMI_FLEXGEN_62 62
+#define CK_SCMI_FLEXGEN_63 63
+#define CK_SCMI_ICN_LS_MCU 64
+#define CK_SCMI_HSE 65
+#define CK_SCMI_LSE 66
+#define CK_SCMI_HSI 67
+#define CK_SCMI_LSI 68
+#define CK_SCMI_MSI 69
+#define CK_SCMI_HSE_DIV2 70
+#define CK_SCMI_CPU1 71
+#define CK_SCMI_SYSCPU1 72
+#define CK_SCMI_PLL2 73
+#define CK_SCMI_PLL3 74
+#define CK_SCMI_RTC 75
+#define CK_SCMI_RTCCK 76
+#define CK_SCMI_ICN_APB1 77
+#define CK_SCMI_ICN_APB2 78
+#define CK_SCMI_ICN_APB3 79
+#define CK_SCMI_ICN_APB4 80
+#define CK_SCMI_ICN_APBDBG 81
+#define CK_SCMI_TIMG1 82
+#define CK_SCMI_TIMG2 83
+#define CK_SCMI_BKPSRAM 84
+#define CK_SCMI_BSEC 85
+#define CK_SCMI_ETR 87
+#define CK_SCMI_FMC 88
+#define CK_SCMI_GPIOA 89
+#define CK_SCMI_GPIOB 90
+#define CK_SCMI_GPIOC 91
+#define CK_SCMI_GPIOD 92
+#define CK_SCMI_GPIOE 93
+#define CK_SCMI_GPIOF 94
+#define CK_SCMI_GPIOG 95
+#define CK_SCMI_GPIOH 96
+#define CK_SCMI_GPIOI 97
+#define CK_SCMI_GPIOJ 98
+#define CK_SCMI_GPIOK 99
+#define CK_SCMI_GPIOZ 100
+#define CK_SCMI_HPDMA1 101
+#define CK_SCMI_HPDMA2 102
+#define CK_SCMI_HPDMA3 103
+#define CK_SCMI_HSEM 104
+#define CK_SCMI_IPCC1 105
+#define CK_SCMI_IPCC2 106
+#define CK_SCMI_LPDMA 107
+#define CK_SCMI_RETRAM 108
+#define CK_SCMI_SRAM1 109
+#define CK_SCMI_SRAM2 110
+#define CK_SCMI_LPSRAM1 111
+#define CK_SCMI_LPSRAM2 112
+#define CK_SCMI_LPSRAM3 113
+#define CK_SCMI_VDERAM 114
+#define CK_SCMI_SYSRAM 115
+#define CK_SCMI_OSPI1 116
+#define CK_SCMI_OSPI2 117
+#define CK_SCMI_TPIU 118
+#define CK_SCMI_SYSDBG 119
+#define CK_SCMI_SYSATB 120
+#define CK_SCMI_TSDBG 121
+#define CK_SCMI_STM500 122
+
+#endif /* _DT_BINDINGS_STM32MP25_CLKS_H_ */
diff --git a/include/dt-bindings/clock/starfive,jh7110-crg.h b/include/dt-bindings/clock/starfive,jh7110-crg.h
new file mode 100644
index 000000000000..467ccab3bfaa
--- /dev/null
+++ b/include/dt-bindings/clock/starfive,jh7110-crg.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright 2022 StarFive Technology Co., Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__
+#define __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__
+
+/* PLL clocks */
+#define JH7110_PLLCLK_PLL0_OUT 0
+#define JH7110_PLLCLK_PLL1_OUT 1
+#define JH7110_PLLCLK_PLL2_OUT 2
+#define JH7110_PLLCLK_END 3
+
+/* SYSCRG clocks */
+#define JH7110_SYSCLK_CPU_ROOT 0
+#define JH7110_SYSCLK_CPU_CORE 1
+#define JH7110_SYSCLK_CPU_BUS 2
+#define JH7110_SYSCLK_GPU_ROOT 3
+#define JH7110_SYSCLK_PERH_ROOT 4
+#define JH7110_SYSCLK_BUS_ROOT 5
+#define JH7110_SYSCLK_NOCSTG_BUS 6
+#define JH7110_SYSCLK_AXI_CFG0 7
+#define JH7110_SYSCLK_STG_AXIAHB 8
+#define JH7110_SYSCLK_AHB0 9
+#define JH7110_SYSCLK_AHB1 10
+#define JH7110_SYSCLK_APB_BUS 11
+#define JH7110_SYSCLK_APB0 12
+#define JH7110_SYSCLK_PLL0_DIV2 13
+#define JH7110_SYSCLK_PLL1_DIV2 14
+#define JH7110_SYSCLK_PLL2_DIV2 15
+#define JH7110_SYSCLK_AUDIO_ROOT 16
+#define JH7110_SYSCLK_MCLK_INNER 17
+#define JH7110_SYSCLK_MCLK 18
+#define JH7110_SYSCLK_MCLK_OUT 19
+#define JH7110_SYSCLK_ISP_2X 20
+#define JH7110_SYSCLK_ISP_AXI 21
+#define JH7110_SYSCLK_GCLK0 22
+#define JH7110_SYSCLK_GCLK1 23
+#define JH7110_SYSCLK_GCLK2 24
+#define JH7110_SYSCLK_CORE 25
+#define JH7110_SYSCLK_CORE1 26
+#define JH7110_SYSCLK_CORE2 27
+#define JH7110_SYSCLK_CORE3 28
+#define JH7110_SYSCLK_CORE4 29
+#define JH7110_SYSCLK_DEBUG 30
+#define JH7110_SYSCLK_RTC_TOGGLE 31
+#define JH7110_SYSCLK_TRACE0 32
+#define JH7110_SYSCLK_TRACE1 33
+#define JH7110_SYSCLK_TRACE2 34
+#define JH7110_SYSCLK_TRACE3 35
+#define JH7110_SYSCLK_TRACE4 36
+#define JH7110_SYSCLK_TRACE_COM 37
+#define JH7110_SYSCLK_NOC_BUS_CPU_AXI 38
+#define JH7110_SYSCLK_NOC_BUS_AXICFG0_AXI 39
+#define JH7110_SYSCLK_OSC_DIV2 40
+#define JH7110_SYSCLK_PLL1_DIV4 41
+#define JH7110_SYSCLK_PLL1_DIV8 42
+#define JH7110_SYSCLK_DDR_BUS 43
+#define JH7110_SYSCLK_DDR_AXI 44
+#define JH7110_SYSCLK_GPU_CORE 45
+#define JH7110_SYSCLK_GPU_CORE_CLK 46
+#define JH7110_SYSCLK_GPU_SYS_CLK 47
+#define JH7110_SYSCLK_GPU_APB 48
+#define JH7110_SYSCLK_GPU_RTC_TOGGLE 49
+#define JH7110_SYSCLK_NOC_BUS_GPU_AXI 50
+#define JH7110_SYSCLK_ISP_TOP_CORE 51
+#define JH7110_SYSCLK_ISP_TOP_AXI 52
+#define JH7110_SYSCLK_NOC_BUS_ISP_AXI 53
+#define JH7110_SYSCLK_HIFI4_CORE 54
+#define JH7110_SYSCLK_HIFI4_AXI 55
+#define JH7110_SYSCLK_AXI_CFG1_MAIN 56
+#define JH7110_SYSCLK_AXI_CFG1_AHB 57
+#define JH7110_SYSCLK_VOUT_SRC 58
+#define JH7110_SYSCLK_VOUT_AXI 59
+#define JH7110_SYSCLK_NOC_BUS_DISP_AXI 60
+#define JH7110_SYSCLK_VOUT_TOP_AHB 61
+#define JH7110_SYSCLK_VOUT_TOP_AXI 62
+#define JH7110_SYSCLK_VOUT_TOP_HDMITX0_MCLK 63
+#define JH7110_SYSCLK_VOUT_TOP_MIPIPHY_REF 64
+#define JH7110_SYSCLK_JPEGC_AXI 65
+#define JH7110_SYSCLK_CODAJ12_AXI 66
+#define JH7110_SYSCLK_CODAJ12_CORE 67
+#define JH7110_SYSCLK_CODAJ12_APB 68
+#define JH7110_SYSCLK_VDEC_AXI 69
+#define JH7110_SYSCLK_WAVE511_AXI 70
+#define JH7110_SYSCLK_WAVE511_BPU 71
+#define JH7110_SYSCLK_WAVE511_VCE 72
+#define JH7110_SYSCLK_WAVE511_APB 73
+#define JH7110_SYSCLK_VDEC_JPG 74
+#define JH7110_SYSCLK_VDEC_MAIN 75
+#define JH7110_SYSCLK_NOC_BUS_VDEC_AXI 76
+#define JH7110_SYSCLK_VENC_AXI 77
+#define JH7110_SYSCLK_WAVE420L_AXI 78
+#define JH7110_SYSCLK_WAVE420L_BPU 79
+#define JH7110_SYSCLK_WAVE420L_VCE 80
+#define JH7110_SYSCLK_WAVE420L_APB 81
+#define JH7110_SYSCLK_NOC_BUS_VENC_AXI 82
+#define JH7110_SYSCLK_AXI_CFG0_MAIN_DIV 83
+#define JH7110_SYSCLK_AXI_CFG0_MAIN 84
+#define JH7110_SYSCLK_AXI_CFG0_HIFI4 85
+#define JH7110_SYSCLK_AXIMEM2_AXI 86
+#define JH7110_SYSCLK_QSPI_AHB 87
+#define JH7110_SYSCLK_QSPI_APB 88
+#define JH7110_SYSCLK_QSPI_REF_SRC 89
+#define JH7110_SYSCLK_QSPI_REF 90
+#define JH7110_SYSCLK_SDIO0_AHB 91
+#define JH7110_SYSCLK_SDIO1_AHB 92
+#define JH7110_SYSCLK_SDIO0_SDCARD 93
+#define JH7110_SYSCLK_SDIO1_SDCARD 94
+#define JH7110_SYSCLK_USB_125M 95
+#define JH7110_SYSCLK_NOC_BUS_STG_AXI 96
+#define JH7110_SYSCLK_GMAC1_AHB 97
+#define JH7110_SYSCLK_GMAC1_AXI 98
+#define JH7110_SYSCLK_GMAC_SRC 99
+#define JH7110_SYSCLK_GMAC1_GTXCLK 100
+#define JH7110_SYSCLK_GMAC1_RMII_RTX 101
+#define JH7110_SYSCLK_GMAC1_PTP 102
+#define JH7110_SYSCLK_GMAC1_RX 103
+#define JH7110_SYSCLK_GMAC1_RX_INV 104
+#define JH7110_SYSCLK_GMAC1_TX 105
+#define JH7110_SYSCLK_GMAC1_TX_INV 106
+#define JH7110_SYSCLK_GMAC1_GTXC 107
+#define JH7110_SYSCLK_GMAC0_GTXCLK 108
+#define JH7110_SYSCLK_GMAC0_PTP 109
+#define JH7110_SYSCLK_GMAC_PHY 110
+#define JH7110_SYSCLK_GMAC0_GTXC 111
+#define JH7110_SYSCLK_IOMUX_APB 112
+#define JH7110_SYSCLK_MAILBOX_APB 113
+#define JH7110_SYSCLK_INT_CTRL_APB 114
+#define JH7110_SYSCLK_CAN0_APB 115
+#define JH7110_SYSCLK_CAN0_TIMER 116
+#define JH7110_SYSCLK_CAN0_CAN 117
+#define JH7110_SYSCLK_CAN1_APB 118
+#define JH7110_SYSCLK_CAN1_TIMER 119
+#define JH7110_SYSCLK_CAN1_CAN 120
+#define JH7110_SYSCLK_PWM_APB 121
+#define JH7110_SYSCLK_WDT_APB 122
+#define JH7110_SYSCLK_WDT_CORE 123
+#define JH7110_SYSCLK_TIMER_APB 124
+#define JH7110_SYSCLK_TIMER0 125
+#define JH7110_SYSCLK_TIMER1 126
+#define JH7110_SYSCLK_TIMER2 127
+#define JH7110_SYSCLK_TIMER3 128
+#define JH7110_SYSCLK_TEMP_APB 129
+#define JH7110_SYSCLK_TEMP_CORE 130
+#define JH7110_SYSCLK_SPI0_APB 131
+#define JH7110_SYSCLK_SPI1_APB 132
+#define JH7110_SYSCLK_SPI2_APB 133
+#define JH7110_SYSCLK_SPI3_APB 134
+#define JH7110_SYSCLK_SPI4_APB 135
+#define JH7110_SYSCLK_SPI5_APB 136
+#define JH7110_SYSCLK_SPI6_APB 137
+#define JH7110_SYSCLK_I2C0_APB 138
+#define JH7110_SYSCLK_I2C1_APB 139
+#define JH7110_SYSCLK_I2C2_APB 140
+#define JH7110_SYSCLK_I2C3_APB 141
+#define JH7110_SYSCLK_I2C4_APB 142
+#define JH7110_SYSCLK_I2C5_APB 143
+#define JH7110_SYSCLK_I2C6_APB 144
+#define JH7110_SYSCLK_UART0_APB 145
+#define JH7110_SYSCLK_UART0_CORE 146
+#define JH7110_SYSCLK_UART1_APB 147
+#define JH7110_SYSCLK_UART1_CORE 148
+#define JH7110_SYSCLK_UART2_APB 149
+#define JH7110_SYSCLK_UART2_CORE 150
+#define JH7110_SYSCLK_UART3_APB 151
+#define JH7110_SYSCLK_UART3_CORE 152
+#define JH7110_SYSCLK_UART4_APB 153
+#define JH7110_SYSCLK_UART4_CORE 154
+#define JH7110_SYSCLK_UART5_APB 155
+#define JH7110_SYSCLK_UART5_CORE 156
+#define JH7110_SYSCLK_PWMDAC_APB 157
+#define JH7110_SYSCLK_PWMDAC_CORE 158
+#define JH7110_SYSCLK_SPDIF_APB 159
+#define JH7110_SYSCLK_SPDIF_CORE 160
+#define JH7110_SYSCLK_I2STX0_APB 161
+#define JH7110_SYSCLK_I2STX0_BCLK_MST 162
+#define JH7110_SYSCLK_I2STX0_BCLK_MST_INV 163
+#define JH7110_SYSCLK_I2STX0_LRCK_MST 164
+#define JH7110_SYSCLK_I2STX0_BCLK 165
+#define JH7110_SYSCLK_I2STX0_BCLK_INV 166
+#define JH7110_SYSCLK_I2STX0_LRCK 167
+#define JH7110_SYSCLK_I2STX1_APB 168
+#define JH7110_SYSCLK_I2STX1_BCLK_MST 169
+#define JH7110_SYSCLK_I2STX1_BCLK_MST_INV 170
+#define JH7110_SYSCLK_I2STX1_LRCK_MST 171
+#define JH7110_SYSCLK_I2STX1_BCLK 172
+#define JH7110_SYSCLK_I2STX1_BCLK_INV 173
+#define JH7110_SYSCLK_I2STX1_LRCK 174
+#define JH7110_SYSCLK_I2SRX_APB 175
+#define JH7110_SYSCLK_I2SRX_BCLK_MST 176
+#define JH7110_SYSCLK_I2SRX_BCLK_MST_INV 177
+#define JH7110_SYSCLK_I2SRX_LRCK_MST 178
+#define JH7110_SYSCLK_I2SRX_BCLK 179
+#define JH7110_SYSCLK_I2SRX_BCLK_INV 180
+#define JH7110_SYSCLK_I2SRX_LRCK 181
+#define JH7110_SYSCLK_PDM_DMIC 182
+#define JH7110_SYSCLK_PDM_APB 183
+#define JH7110_SYSCLK_TDM_AHB 184
+#define JH7110_SYSCLK_TDM_APB 185
+#define JH7110_SYSCLK_TDM_INTERNAL 186
+#define JH7110_SYSCLK_TDM_TDM 187
+#define JH7110_SYSCLK_TDM_TDM_INV 188
+#define JH7110_SYSCLK_JTAG_CERTIFICATION_TRNG 189
+
+#define JH7110_SYSCLK_END 190
+
+/* AONCRG clocks */
+#define JH7110_AONCLK_OSC_DIV4 0
+#define JH7110_AONCLK_APB_FUNC 1
+#define JH7110_AONCLK_GMAC0_AHB 2
+#define JH7110_AONCLK_GMAC0_AXI 3
+#define JH7110_AONCLK_GMAC0_RMII_RTX 4
+#define JH7110_AONCLK_GMAC0_TX 5
+#define JH7110_AONCLK_GMAC0_TX_INV 6
+#define JH7110_AONCLK_GMAC0_RX 7
+#define JH7110_AONCLK_GMAC0_RX_INV 8
+#define JH7110_AONCLK_OTPC_APB 9
+#define JH7110_AONCLK_RTC_APB 10
+#define JH7110_AONCLK_RTC_INTERNAL 11
+#define JH7110_AONCLK_RTC_32K 12
+#define JH7110_AONCLK_RTC_CAL 13
+
+#define JH7110_AONCLK_END 14
+
+/* STGCRG clocks */
+#define JH7110_STGCLK_HIFI4_CLK_CORE 0
+#define JH7110_STGCLK_USB0_APB 1
+#define JH7110_STGCLK_USB0_UTMI_APB 2
+#define JH7110_STGCLK_USB0_AXI 3
+#define JH7110_STGCLK_USB0_LPM 4
+#define JH7110_STGCLK_USB0_STB 5
+#define JH7110_STGCLK_USB0_APP_125 6
+#define JH7110_STGCLK_USB0_REFCLK 7
+#define JH7110_STGCLK_PCIE0_AXI_MST0 8
+#define JH7110_STGCLK_PCIE0_APB 9
+#define JH7110_STGCLK_PCIE0_TL 10
+#define JH7110_STGCLK_PCIE1_AXI_MST0 11
+#define JH7110_STGCLK_PCIE1_APB 12
+#define JH7110_STGCLK_PCIE1_TL 13
+#define JH7110_STGCLK_PCIE_SLV_MAIN 14
+#define JH7110_STGCLK_SEC_AHB 15
+#define JH7110_STGCLK_SEC_MISC_AHB 16
+#define JH7110_STGCLK_GRP0_MAIN 17
+#define JH7110_STGCLK_GRP0_BUS 18
+#define JH7110_STGCLK_GRP0_STG 19
+#define JH7110_STGCLK_GRP1_MAIN 20
+#define JH7110_STGCLK_GRP1_BUS 21
+#define JH7110_STGCLK_GRP1_STG 22
+#define JH7110_STGCLK_GRP1_HIFI 23
+#define JH7110_STGCLK_E2_RTC 24
+#define JH7110_STGCLK_E2_CORE 25
+#define JH7110_STGCLK_E2_DBG 26
+#define JH7110_STGCLK_DMA1P_AXI 27
+#define JH7110_STGCLK_DMA1P_AHB 28
+
+#define JH7110_STGCLK_END 29
+
+/* ISPCRG clocks */
+#define JH7110_ISPCLK_DOM4_APB_FUNC 0
+#define JH7110_ISPCLK_MIPI_RX0_PXL 1
+#define JH7110_ISPCLK_DVP_INV 2
+#define JH7110_ISPCLK_M31DPHY_CFG_IN 3
+#define JH7110_ISPCLK_M31DPHY_REF_IN 4
+#define JH7110_ISPCLK_M31DPHY_TX_ESC_LAN0 5
+#define JH7110_ISPCLK_VIN_APB 6
+#define JH7110_ISPCLK_VIN_SYS 7
+#define JH7110_ISPCLK_VIN_PIXEL_IF0 8
+#define JH7110_ISPCLK_VIN_PIXEL_IF1 9
+#define JH7110_ISPCLK_VIN_PIXEL_IF2 10
+#define JH7110_ISPCLK_VIN_PIXEL_IF3 11
+#define JH7110_ISPCLK_VIN_P_AXI_WR 12
+#define JH7110_ISPCLK_ISPV2_TOP_WRAPPER_C 13
+
+#define JH7110_ISPCLK_END 14
+
+/* VOUTCRG clocks */
+#define JH7110_VOUTCLK_APB 0
+#define JH7110_VOUTCLK_DC8200_PIX 1
+#define JH7110_VOUTCLK_DSI_SYS 2
+#define JH7110_VOUTCLK_TX_ESC 3
+#define JH7110_VOUTCLK_DC8200_AXI 4
+#define JH7110_VOUTCLK_DC8200_CORE 5
+#define JH7110_VOUTCLK_DC8200_AHB 6
+#define JH7110_VOUTCLK_DC8200_PIX0 7
+#define JH7110_VOUTCLK_DC8200_PIX1 8
+#define JH7110_VOUTCLK_DOM_VOUT_TOP_LCD 9
+#define JH7110_VOUTCLK_DSITX_APB 10
+#define JH7110_VOUTCLK_DSITX_SYS 11
+#define JH7110_VOUTCLK_DSITX_DPI 12
+#define JH7110_VOUTCLK_DSITX_TXESC 13
+#define JH7110_VOUTCLK_MIPITX_DPHY_TXESC 14
+#define JH7110_VOUTCLK_HDMI_TX_MCLK 15
+#define JH7110_VOUTCLK_HDMI_TX_BCLK 16
+#define JH7110_VOUTCLK_HDMI_TX_SYS 17
+
+#define JH7110_VOUTCLK_END 18
+
+#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__ */
diff --git a/include/dt-bindings/clock/starfive-jh7100-audio.h b/include/dt-bindings/clock/starfive-jh7100-audio.h
new file mode 100644
index 000000000000..fbb4eae6572b
--- /dev/null
+++ b/include/dt-bindings/clock/starfive-jh7100-audio.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__
+#define __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__
+
+#define JH7100_AUDCLK_ADC_MCLK 0
+#define JH7100_AUDCLK_I2S1_MCLK 1
+#define JH7100_AUDCLK_I2SADC_APB 2
+#define JH7100_AUDCLK_I2SADC_BCLK 3
+#define JH7100_AUDCLK_I2SADC_BCLK_N 4
+#define JH7100_AUDCLK_I2SADC_LRCLK 5
+#define JH7100_AUDCLK_PDM_APB 6
+#define JH7100_AUDCLK_PDM_MCLK 7
+#define JH7100_AUDCLK_I2SVAD_APB 8
+#define JH7100_AUDCLK_SPDIF 9
+#define JH7100_AUDCLK_SPDIF_APB 10
+#define JH7100_AUDCLK_PWMDAC_APB 11
+#define JH7100_AUDCLK_DAC_MCLK 12
+#define JH7100_AUDCLK_I2SDAC_APB 13
+#define JH7100_AUDCLK_I2SDAC_BCLK 14
+#define JH7100_AUDCLK_I2SDAC_BCLK_N 15
+#define JH7100_AUDCLK_I2SDAC_LRCLK 16
+#define JH7100_AUDCLK_I2S1_APB 17
+#define JH7100_AUDCLK_I2S1_BCLK 18
+#define JH7100_AUDCLK_I2S1_BCLK_N 19
+#define JH7100_AUDCLK_I2S1_LRCLK 20
+#define JH7100_AUDCLK_I2SDAC16K_APB 21
+#define JH7100_AUDCLK_APB0_BUS 22
+#define JH7100_AUDCLK_DMA1P_AHB 23
+#define JH7100_AUDCLK_USB_APB 24
+#define JH7100_AUDCLK_USB_LPM 25
+#define JH7100_AUDCLK_USB_STB 26
+#define JH7100_AUDCLK_APB_EN 27
+#define JH7100_AUDCLK_VAD_MEM 28
+
+#define JH7100_AUDCLK_END 29
+
+#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__ */
diff --git a/include/dt-bindings/clock/starfive-jh7100.h b/include/dt-bindings/clock/starfive-jh7100.h
new file mode 100644
index 000000000000..aa0863b9728d
--- /dev/null
+++ b/include/dt-bindings/clock/starfive-jh7100.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Ahmad Fatoum, Pengutronix
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_CLOCK_STARFIVE_JH7100_H__
+
+#define JH7100_CLK_CPUNDBUS_ROOT 0
+#define JH7100_CLK_DLA_ROOT 1
+#define JH7100_CLK_DSP_ROOT 2
+#define JH7100_CLK_GMACUSB_ROOT 3
+#define JH7100_CLK_PERH0_ROOT 4
+#define JH7100_CLK_PERH1_ROOT 5
+#define JH7100_CLK_VIN_ROOT 6
+#define JH7100_CLK_VOUT_ROOT 7
+#define JH7100_CLK_AUDIO_ROOT 8
+#define JH7100_CLK_CDECHIFI4_ROOT 9
+#define JH7100_CLK_CDEC_ROOT 10
+#define JH7100_CLK_VOUTBUS_ROOT 11
+#define JH7100_CLK_CPUNBUS_ROOT_DIV 12
+#define JH7100_CLK_DSP_ROOT_DIV 13
+#define JH7100_CLK_PERH0_SRC 14
+#define JH7100_CLK_PERH1_SRC 15
+#define JH7100_CLK_PLL0_TESTOUT 16
+#define JH7100_CLK_PLL1_TESTOUT 17
+#define JH7100_CLK_PLL2_TESTOUT 18
+#define JH7100_CLK_PLL2_REF 19
+#define JH7100_CLK_CPU_CORE 20
+#define JH7100_CLK_CPU_AXI 21
+#define JH7100_CLK_AHB_BUS 22
+#define JH7100_CLK_APB1_BUS 23
+#define JH7100_CLK_APB2_BUS 24
+#define JH7100_CLK_DOM3AHB_BUS 25
+#define JH7100_CLK_DOM7AHB_BUS 26
+#define JH7100_CLK_U74_CORE0 27
+#define JH7100_CLK_U74_CORE1 28
+#define JH7100_CLK_U74_AXI 29
+#define JH7100_CLK_U74RTC_TOGGLE 30
+#define JH7100_CLK_SGDMA2P_AXI 31
+#define JH7100_CLK_DMA2PNOC_AXI 32
+#define JH7100_CLK_SGDMA2P_AHB 33
+#define JH7100_CLK_DLA_BUS 34
+#define JH7100_CLK_DLA_AXI 35
+#define JH7100_CLK_DLANOC_AXI 36
+#define JH7100_CLK_DLA_APB 37
+#define JH7100_CLK_VP6_CORE 38
+#define JH7100_CLK_VP6BUS_SRC 39
+#define JH7100_CLK_VP6_AXI 40
+#define JH7100_CLK_VCDECBUS_SRC 41
+#define JH7100_CLK_VDEC_BUS 42
+#define JH7100_CLK_VDEC_AXI 43
+#define JH7100_CLK_VDECBRG_MAIN 44
+#define JH7100_CLK_VDEC_BCLK 45
+#define JH7100_CLK_VDEC_CCLK 46
+#define JH7100_CLK_VDEC_APB 47
+#define JH7100_CLK_JPEG_AXI 48
+#define JH7100_CLK_JPEG_CCLK 49
+#define JH7100_CLK_JPEG_APB 50
+#define JH7100_CLK_GC300_2X 51
+#define JH7100_CLK_GC300_AHB 52
+#define JH7100_CLK_JPCGC300_AXIBUS 53
+#define JH7100_CLK_GC300_AXI 54
+#define JH7100_CLK_JPCGC300_MAIN 55
+#define JH7100_CLK_VENC_BUS 56
+#define JH7100_CLK_VENC_AXI 57
+#define JH7100_CLK_VENCBRG_MAIN 58
+#define JH7100_CLK_VENC_BCLK 59
+#define JH7100_CLK_VENC_CCLK 60
+#define JH7100_CLK_VENC_APB 61
+#define JH7100_CLK_DDRPLL_DIV2 62
+#define JH7100_CLK_DDRPLL_DIV4 63
+#define JH7100_CLK_DDRPLL_DIV8 64
+#define JH7100_CLK_DDROSC_DIV2 65
+#define JH7100_CLK_DDRC0 66
+#define JH7100_CLK_DDRC1 67
+#define JH7100_CLK_DDRPHY_APB 68
+#define JH7100_CLK_NOC_ROB 69
+#define JH7100_CLK_NOC_COG 70
+#define JH7100_CLK_NNE_AHB 71
+#define JH7100_CLK_NNEBUS_SRC1 72
+#define JH7100_CLK_NNE_BUS 73
+#define JH7100_CLK_NNE_AXI 74
+#define JH7100_CLK_NNENOC_AXI 75
+#define JH7100_CLK_DLASLV_AXI 76
+#define JH7100_CLK_DSPX2C_AXI 77
+#define JH7100_CLK_HIFI4_SRC 78
+#define JH7100_CLK_HIFI4_COREFREE 79
+#define JH7100_CLK_HIFI4_CORE 80
+#define JH7100_CLK_HIFI4_BUS 81
+#define JH7100_CLK_HIFI4_AXI 82
+#define JH7100_CLK_HIFI4NOC_AXI 83
+#define JH7100_CLK_SGDMA1P_BUS 84
+#define JH7100_CLK_SGDMA1P_AXI 85
+#define JH7100_CLK_DMA1P_AXI 86
+#define JH7100_CLK_X2C_AXI 87
+#define JH7100_CLK_USB_BUS 88
+#define JH7100_CLK_USB_AXI 89
+#define JH7100_CLK_USBNOC_AXI 90
+#define JH7100_CLK_USBPHY_ROOTDIV 91
+#define JH7100_CLK_USBPHY_125M 92
+#define JH7100_CLK_USBPHY_PLLDIV25M 93
+#define JH7100_CLK_USBPHY_25M 94
+#define JH7100_CLK_AUDIO_DIV 95
+#define JH7100_CLK_AUDIO_SRC 96
+#define JH7100_CLK_AUDIO_12288 97
+#define JH7100_CLK_VIN_SRC 98
+#define JH7100_CLK_ISP0_BUS 99
+#define JH7100_CLK_ISP0_AXI 100
+#define JH7100_CLK_ISP0NOC_AXI 101
+#define JH7100_CLK_ISPSLV_AXI 102
+#define JH7100_CLK_ISP1_BUS 103
+#define JH7100_CLK_ISP1_AXI 104
+#define JH7100_CLK_ISP1NOC_AXI 105
+#define JH7100_CLK_VIN_BUS 106
+#define JH7100_CLK_VIN_AXI 107
+#define JH7100_CLK_VINNOC_AXI 108
+#define JH7100_CLK_VOUT_SRC 109
+#define JH7100_CLK_DISPBUS_SRC 110
+#define JH7100_CLK_DISP_BUS 111
+#define JH7100_CLK_DISP_AXI 112
+#define JH7100_CLK_DISPNOC_AXI 113
+#define JH7100_CLK_SDIO0_AHB 114
+#define JH7100_CLK_SDIO0_CCLKINT 115
+#define JH7100_CLK_SDIO0_CCLKINT_INV 116
+#define JH7100_CLK_SDIO1_AHB 117
+#define JH7100_CLK_SDIO1_CCLKINT 118
+#define JH7100_CLK_SDIO1_CCLKINT_INV 119
+#define JH7100_CLK_GMAC_AHB 120
+#define JH7100_CLK_GMAC_ROOT_DIV 121
+#define JH7100_CLK_GMAC_PTP_REF 122
+#define JH7100_CLK_GMAC_GTX 123
+#define JH7100_CLK_GMAC_RMII_TX 124
+#define JH7100_CLK_GMAC_RMII_RX 125
+#define JH7100_CLK_GMAC_TX 126
+#define JH7100_CLK_GMAC_TX_INV 127
+#define JH7100_CLK_GMAC_RX_PRE 128
+#define JH7100_CLK_GMAC_RX_INV 129
+#define JH7100_CLK_GMAC_RMII 130
+#define JH7100_CLK_GMAC_TOPHYREF 131
+#define JH7100_CLK_SPI2AHB_AHB 132
+#define JH7100_CLK_SPI2AHB_CORE 133
+#define JH7100_CLK_EZMASTER_AHB 134
+#define JH7100_CLK_E24_AHB 135
+#define JH7100_CLK_E24RTC_TOGGLE 136
+#define JH7100_CLK_QSPI_AHB 137
+#define JH7100_CLK_QSPI_APB 138
+#define JH7100_CLK_QSPI_REF 139
+#define JH7100_CLK_SEC_AHB 140
+#define JH7100_CLK_AES 141
+#define JH7100_CLK_SHA 142
+#define JH7100_CLK_PKA 143
+#define JH7100_CLK_TRNG_APB 144
+#define JH7100_CLK_OTP_APB 145
+#define JH7100_CLK_UART0_APB 146
+#define JH7100_CLK_UART0_CORE 147
+#define JH7100_CLK_UART1_APB 148
+#define JH7100_CLK_UART1_CORE 149
+#define JH7100_CLK_SPI0_APB 150
+#define JH7100_CLK_SPI0_CORE 151
+#define JH7100_CLK_SPI1_APB 152
+#define JH7100_CLK_SPI1_CORE 153
+#define JH7100_CLK_I2C0_APB 154
+#define JH7100_CLK_I2C0_CORE 155
+#define JH7100_CLK_I2C1_APB 156
+#define JH7100_CLK_I2C1_CORE 157
+#define JH7100_CLK_GPIO_APB 158
+#define JH7100_CLK_UART2_APB 159
+#define JH7100_CLK_UART2_CORE 160
+#define JH7100_CLK_UART3_APB 161
+#define JH7100_CLK_UART3_CORE 162
+#define JH7100_CLK_SPI2_APB 163
+#define JH7100_CLK_SPI2_CORE 164
+#define JH7100_CLK_SPI3_APB 165
+#define JH7100_CLK_SPI3_CORE 166
+#define JH7100_CLK_I2C2_APB 167
+#define JH7100_CLK_I2C2_CORE 168
+#define JH7100_CLK_I2C3_APB 169
+#define JH7100_CLK_I2C3_CORE 170
+#define JH7100_CLK_WDTIMER_APB 171
+#define JH7100_CLK_WDT_CORE 172
+#define JH7100_CLK_TIMER0_CORE 173
+#define JH7100_CLK_TIMER1_CORE 174
+#define JH7100_CLK_TIMER2_CORE 175
+#define JH7100_CLK_TIMER3_CORE 176
+#define JH7100_CLK_TIMER4_CORE 177
+#define JH7100_CLK_TIMER5_CORE 178
+#define JH7100_CLK_TIMER6_CORE 179
+#define JH7100_CLK_VP6INTC_APB 180
+#define JH7100_CLK_PWM_APB 181
+#define JH7100_CLK_MSI_APB 182
+#define JH7100_CLK_TEMP_APB 183
+#define JH7100_CLK_TEMP_SENSE 184
+#define JH7100_CLK_SYSERR_APB 185
+
+#define JH7100_CLK_PLL0_OUT 186
+#define JH7100_CLK_PLL1_OUT 187
+#define JH7100_CLK_PLL2_OUT 188
+
+#define JH7100_CLK_END 189
+
+#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/clock/ste-db8500-clkout.h b/include/dt-bindings/clock/ste-db8500-clkout.h
new file mode 100644
index 000000000000..ca07cb2bd1bc
--- /dev/null
+++ b/include/dt-bindings/clock/ste-db8500-clkout.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __STE_CLK_DB8500_CLKOUT_H__
+#define __STE_CLK_DB8500_CLKOUT_H__
+
+#define DB8500_CLKOUT_1 0
+#define DB8500_CLKOUT_2 1
+
+#define DB8500_CLKOUT_SRC_CLK38M 0
+#define DB8500_CLKOUT_SRC_ACLK 1
+#define DB8500_CLKOUT_SRC_SYSCLK 2
+#define DB8500_CLKOUT_SRC_LCDCLK 3
+#define DB8500_CLKOUT_SRC_SDMMCCLK 4
+#define DB8500_CLKOUT_SRC_TVCLK 5
+#define DB8500_CLKOUT_SRC_TIMCLK 6
+#define DB8500_CLKOUT_SRC_CLK009 7
+
+#endif
diff --git a/include/dt-bindings/clock/stih416-clks.h b/include/dt-bindings/clock/stih416-clks.h
deleted file mode 100644
index 74302278024e..000000000000
--- a/include/dt-bindings/clock/stih416-clks.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants clk index STMicroelectronics
- * STiH416 SoC.
- */
-#ifndef _CLK_STIH416
-#define _CLK_STIH416
-
-/* CLOCKGEN A0 */
-#define CLK_ICN_REG 0
-#define CLK_ETH1_PHY 4
-
-/* CLOCKGEN A1 */
-#define CLK_ICN_IF_2 0
-#define CLK_GMAC0_PHY 3
-
-#endif
diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h
index 1cc89c548578..e5dad050d518 100644
--- a/include/dt-bindings/clock/stm32fx-clock.h
+++ b/include/dt-bindings/clock/stm32fx-clock.h
@@ -7,10 +7,10 @@
*/
/*
- * List of clocks wich are not derived from system clock (SYSCLOCK)
+ * List of clocks which are not derived from system clock (SYSCLOCK)
*
* The index of these clocks is the secondary index of DT bindings
- * (see Documentatoin/devicetree/bindings/clock/st,stm32-rcc.txt)
+ * (see Documentation/devicetree/bindings/clock/st,stm32-rcc.txt)
*
* e.g:
<assigned-clocks = <&rcc 1 CLK_LSE>;
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 4cdaf135829c..0a5324bcdbda 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
@@ -248,4 +248,27 @@
#define STM32MP1_LAST_CLK 232
+/* SCMI clock identifiers */
+#define CK_SCMI_HSE 0
+#define CK_SCMI_HSI 1
+#define CK_SCMI_CSI 2
+#define CK_SCMI_LSE 3
+#define CK_SCMI_LSI 4
+#define CK_SCMI_PLL2_Q 5
+#define CK_SCMI_PLL2_R 6
+#define CK_SCMI_MPU 7
+#define CK_SCMI_AXI 8
+#define CK_SCMI_BSEC 9
+#define CK_SCMI_CRYP1 10
+#define CK_SCMI_GPIOZ 11
+#define CK_SCMI_HASH1 12
+#define CK_SCMI_I2C4 13
+#define CK_SCMI_I2C6 14
+#define CK_SCMI_IWDG1 15
+#define CK_SCMI_RNG1 16
+#define CK_SCMI_RTC 17
+#define CK_SCMI_RTCAPB 18
+#define CK_SCMI_SPI6 19
+#define CK_SCMI_USART1 20
+
#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */
diff --git a/include/dt-bindings/clock/stm32mp13-clks.h b/include/dt-bindings/clock/stm32mp13-clks.h
new file mode 100644
index 000000000000..0bd7b54c65ff
--- /dev/null
+++ b/include/dt-bindings/clock/stm32mp13-clks.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2020 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP13_CLKS_H_
+#define _DT_BINDINGS_STM32MP13_CLKS_H_
+
+/* OSCILLATOR clocks */
+#define CK_HSE 0
+#define CK_CSI 1
+#define CK_LSI 2
+#define CK_LSE 3
+#define CK_HSI 4
+#define CK_HSE_DIV2 5
+
+/* PLL */
+#define PLL1 6
+#define PLL2 7
+#define PLL3 8
+#define PLL4 9
+
+/* ODF */
+#define PLL1_P 10
+#define PLL1_Q 11
+#define PLL1_R 12
+#define PLL2_P 13
+#define PLL2_Q 14
+#define PLL2_R 15
+#define PLL3_P 16
+#define PLL3_Q 17
+#define PLL3_R 18
+#define PLL4_P 19
+#define PLL4_Q 20
+#define PLL4_R 21
+
+#define PCLK1 22
+#define PCLK2 23
+#define PCLK3 24
+#define PCLK4 25
+#define PCLK5 26
+#define PCLK6 27
+
+/* SYSTEM CLOCK */
+#define CK_PER 28
+#define CK_MPU 29
+#define CK_AXI 30
+#define CK_MLAHB 31
+
+/* BASE TIMER */
+#define CK_TIMG1 32
+#define CK_TIMG2 33
+#define CK_TIMG3 34
+
+/* AUX */
+#define RTC 35
+
+/* TRACE & DEBUG clocks */
+#define CK_DBG 36
+#define CK_TRACE 37
+
+/* MCO clocks */
+#define CK_MCO1 38
+#define CK_MCO2 39
+
+/* IP clocks */
+#define SYSCFG 40
+#define VREF 41
+#define DTS 42
+#define PMBCTRL 43
+#define HDP 44
+#define IWDG2 45
+#define STGENRO 46
+#define USART1 47
+#define RTCAPB 48
+#define TZC 49
+#define TZPC 50
+#define IWDG1 51
+#define BSEC 52
+#define DMA1 53
+#define DMA2 54
+#define DMAMUX1 55
+#define DMAMUX2 56
+#define GPIOA 57
+#define GPIOB 58
+#define GPIOC 59
+#define GPIOD 60
+#define GPIOE 61
+#define GPIOF 62
+#define GPIOG 63
+#define GPIOH 64
+#define GPIOI 65
+#define CRYP1 66
+#define HASH1 67
+#define BKPSRAM 68
+#define MDMA 69
+#define CRC1 70
+#define USBH 71
+#define DMA3 72
+#define TSC 73
+#define PKA 74
+#define AXIMC 75
+#define MCE 76
+#define ETH1TX 77
+#define ETH2TX 78
+#define ETH1RX 79
+#define ETH2RX 80
+#define ETH1MAC 81
+#define ETH2MAC 82
+#define ETH1STP 83
+#define ETH2STP 84
+
+/* IP clocks with parents */
+#define SDMMC1_K 85
+#define SDMMC2_K 86
+#define ADC1_K 87
+#define ADC2_K 88
+#define FMC_K 89
+#define QSPI_K 90
+#define RNG1_K 91
+#define USBPHY_K 92
+#define STGEN_K 93
+#define SPDIF_K 94
+#define SPI1_K 95
+#define SPI2_K 96
+#define SPI3_K 97
+#define SPI4_K 98
+#define SPI5_K 99
+#define I2C1_K 100
+#define I2C2_K 101
+#define I2C3_K 102
+#define I2C4_K 103
+#define I2C5_K 104
+#define TIM2_K 105
+#define TIM3_K 106
+#define TIM4_K 107
+#define TIM5_K 108
+#define TIM6_K 109
+#define TIM7_K 110
+#define TIM12_K 111
+#define TIM13_K 112
+#define TIM14_K 113
+#define TIM1_K 114
+#define TIM8_K 115
+#define TIM15_K 116
+#define TIM16_K 117
+#define TIM17_K 118
+#define LPTIM1_K 119
+#define LPTIM2_K 120
+#define LPTIM3_K 121
+#define LPTIM4_K 122
+#define LPTIM5_K 123
+#define USART1_K 124
+#define USART2_K 125
+#define USART3_K 126
+#define UART4_K 127
+#define UART5_K 128
+#define USART6_K 129
+#define UART7_K 130
+#define UART8_K 131
+#define DFSDM_K 132
+#define FDCAN_K 133
+#define SAI1_K 134
+#define SAI2_K 135
+#define ADFSDM_K 136
+#define USBO_K 137
+#define LTDC_PX 138
+#define ETH1CK_K 139
+#define ETH1PTP_K 140
+#define ETH2CK_K 141
+#define ETH2PTP_K 142
+#define DCMIPP_K 143
+#define SAES_K 144
+#define DTS_K 145
+
+/* DDR */
+#define DDRC1 146
+#define DDRC1LP 147
+#define DDRC2 148
+#define DDRC2LP 149
+#define DDRPHYC 150
+#define DDRPHYCLP 151
+#define DDRCAPB 152
+#define DDRCAPBLP 153
+#define AXIDCG 154
+#define DDRPHYCAPB 155
+#define DDRPHYCAPBLP 156
+#define DDRPERFM 157
+
+#define ADC1 158
+#define ADC2 159
+#define SAI1 160
+#define SAI2 161
+
+#define STM32MP1_LAST_CLK 162
+
+/* SCMI clock identifiers */
+#define CK_SCMI_HSE 0
+#define CK_SCMI_HSI 1
+#define CK_SCMI_CSI 2
+#define CK_SCMI_LSE 3
+#define CK_SCMI_LSI 4
+#define CK_SCMI_HSE_DIV2 5
+#define CK_SCMI_PLL2_Q 6
+#define CK_SCMI_PLL2_R 7
+#define CK_SCMI_PLL3_P 8
+#define CK_SCMI_PLL3_Q 9
+#define CK_SCMI_PLL3_R 10
+#define CK_SCMI_PLL4_P 11
+#define CK_SCMI_PLL4_Q 12
+#define CK_SCMI_PLL4_R 13
+#define CK_SCMI_MPU 14
+#define CK_SCMI_AXI 15
+#define CK_SCMI_MLAHB 16
+#define CK_SCMI_CKPER 17
+#define CK_SCMI_PCLK1 18
+#define CK_SCMI_PCLK2 19
+#define CK_SCMI_PCLK3 20
+#define CK_SCMI_PCLK4 21
+#define CK_SCMI_PCLK5 22
+#define CK_SCMI_PCLK6 23
+#define CK_SCMI_CKTIMG1 24
+#define CK_SCMI_CKTIMG2 25
+#define CK_SCMI_CKTIMG3 26
+#define CK_SCMI_RTC 27
+#define CK_SCMI_RTCAPB 28
+
+#endif /* _DT_BINDINGS_STM32MP13_CLKS_H_ */
diff --git a/include/dt-bindings/clock/stratix10-clock.h b/include/dt-bindings/clock/stratix10-clock.h
index 08b98e20b7cc..636498f9e08e 100644
--- a/include/dt-bindings/clock/stratix10-clock.h
+++ b/include/dt-bindings/clock/stratix10-clock.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017, Intel Corporation
*/
diff --git a/include/dt-bindings/clock/sun20i-d1-ccu.h b/include/dt-bindings/clock/sun20i-d1-ccu.h
new file mode 100644
index 000000000000..fdbfb404f92a
--- /dev/null
+++ b/include/dt-bindings/clock/sun20i-d1-ccu.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (C) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_
+#define _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_
+
+#define CLK_PLL_CPUX 0
+#define CLK_PLL_DDR0 1
+#define CLK_PLL_PERIPH0_4X 2
+#define CLK_PLL_PERIPH0_2X 3
+#define CLK_PLL_PERIPH0_800M 4
+#define CLK_PLL_PERIPH0 5
+#define CLK_PLL_PERIPH0_DIV3 6
+#define CLK_PLL_VIDEO0_4X 7
+#define CLK_PLL_VIDEO0_2X 8
+#define CLK_PLL_VIDEO0 9
+#define CLK_PLL_VIDEO1_4X 10
+#define CLK_PLL_VIDEO1_2X 11
+#define CLK_PLL_VIDEO1 12
+#define CLK_PLL_VE 13
+#define CLK_PLL_AUDIO0_4X 14
+#define CLK_PLL_AUDIO0_2X 15
+#define CLK_PLL_AUDIO0 16
+#define CLK_PLL_AUDIO1 17
+#define CLK_PLL_AUDIO1_DIV2 18
+#define CLK_PLL_AUDIO1_DIV5 19
+#define CLK_CPUX 20
+#define CLK_CPUX_AXI 21
+#define CLK_CPUX_APB 22
+#define CLK_PSI_AHB 23
+#define CLK_APB0 24
+#define CLK_APB1 25
+#define CLK_MBUS 26
+#define CLK_DE 27
+#define CLK_BUS_DE 28
+#define CLK_DI 29
+#define CLK_BUS_DI 30
+#define CLK_G2D 31
+#define CLK_BUS_G2D 32
+#define CLK_CE 33
+#define CLK_BUS_CE 34
+#define CLK_VE 35
+#define CLK_BUS_VE 36
+#define CLK_BUS_DMA 37
+#define CLK_BUS_MSGBOX0 38
+#define CLK_BUS_MSGBOX1 39
+#define CLK_BUS_MSGBOX2 40
+#define CLK_BUS_SPINLOCK 41
+#define CLK_BUS_HSTIMER 42
+#define CLK_AVS 43
+#define CLK_BUS_DBG 44
+#define CLK_BUS_PWM 45
+#define CLK_BUS_IOMMU 46
+#define CLK_DRAM 47
+#define CLK_MBUS_DMA 48
+#define CLK_MBUS_VE 49
+#define CLK_MBUS_CE 50
+#define CLK_MBUS_TVIN 51
+#define CLK_MBUS_CSI 52
+#define CLK_MBUS_G2D 53
+#define CLK_MBUS_RISCV 54
+#define CLK_BUS_DRAM 55
+#define CLK_MMC0 56
+#define CLK_MMC1 57
+#define CLK_MMC2 58
+#define CLK_BUS_MMC0 59
+#define CLK_BUS_MMC1 60
+#define CLK_BUS_MMC2 61
+#define CLK_BUS_UART0 62
+#define CLK_BUS_UART1 63
+#define CLK_BUS_UART2 64
+#define CLK_BUS_UART3 65
+#define CLK_BUS_UART4 66
+#define CLK_BUS_UART5 67
+#define CLK_BUS_I2C0 68
+#define CLK_BUS_I2C1 69
+#define CLK_BUS_I2C2 70
+#define CLK_BUS_I2C3 71
+#define CLK_SPI0 72
+#define CLK_SPI1 73
+#define CLK_BUS_SPI0 74
+#define CLK_BUS_SPI1 75
+#define CLK_EMAC_25M 76
+#define CLK_BUS_EMAC 77
+#define CLK_IR_TX 78
+#define CLK_BUS_IR_TX 79
+#define CLK_BUS_GPADC 80
+#define CLK_BUS_THS 81
+#define CLK_I2S0 82
+#define CLK_I2S1 83
+#define CLK_I2S2 84
+#define CLK_I2S2_ASRC 85
+#define CLK_BUS_I2S0 86
+#define CLK_BUS_I2S1 87
+#define CLK_BUS_I2S2 88
+#define CLK_SPDIF_TX 89
+#define CLK_SPDIF_RX 90
+#define CLK_BUS_SPDIF 91
+#define CLK_DMIC 92
+#define CLK_BUS_DMIC 93
+#define CLK_AUDIO_DAC 94
+#define CLK_AUDIO_ADC 95
+#define CLK_BUS_AUDIO 96
+#define CLK_USB_OHCI0 97
+#define CLK_USB_OHCI1 98
+#define CLK_BUS_OHCI0 99
+#define CLK_BUS_OHCI1 100
+#define CLK_BUS_EHCI0 101
+#define CLK_BUS_EHCI1 102
+#define CLK_BUS_OTG 103
+#define CLK_BUS_LRADC 104
+#define CLK_BUS_DPSS_TOP 105
+#define CLK_HDMI_24M 106
+#define CLK_HDMI_CEC_32K 107
+#define CLK_HDMI_CEC 108
+#define CLK_BUS_HDMI 109
+#define CLK_MIPI_DSI 110
+#define CLK_BUS_MIPI_DSI 111
+#define CLK_TCON_LCD0 112
+#define CLK_BUS_TCON_LCD0 113
+#define CLK_TCON_TV 114
+#define CLK_BUS_TCON_TV 115
+#define CLK_TVE 116
+#define CLK_BUS_TVE_TOP 117
+#define CLK_BUS_TVE 118
+#define CLK_TVD 119
+#define CLK_BUS_TVD_TOP 120
+#define CLK_BUS_TVD 121
+#define CLK_LEDC 122
+#define CLK_BUS_LEDC 123
+#define CLK_CSI_TOP 124
+#define CLK_CSI_MCLK 125
+#define CLK_BUS_CSI 126
+#define CLK_TPADC 127
+#define CLK_BUS_TPADC 128
+#define CLK_BUS_TZMA 129
+#define CLK_DSP 130
+#define CLK_BUS_DSP_CFG 131
+#define CLK_RISCV 132
+#define CLK_RISCV_AXI 133
+#define CLK_BUS_RISCV_CFG 134
+#define CLK_FANOUT_24M 135
+#define CLK_FANOUT_12M 136
+#define CLK_FANOUT_16M 137
+#define CLK_FANOUT_25M 138
+#define CLK_FANOUT_32K 139
+#define CLK_FANOUT_27M 140
+#define CLK_FANOUT_PCLK 141
+#define CLK_FANOUT0 142
+#define CLK_FANOUT1 143
+#define CLK_FANOUT2 144
+#define CLK_BUS_CAN0 145
+#define CLK_BUS_CAN1 146
+
+#endif /* _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun20i-d1-r-ccu.h b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
new file mode 100644
index 000000000000..f95c170711e5
--- /dev/null
+++ b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_
+
+#define CLK_R_AHB 0
+
+#define CLK_BUS_R_TIMER 2
+#define CLK_BUS_R_TWD 3
+#define CLK_BUS_R_PPU 4
+#define CLK_R_IR_RX 5
+#define CLK_BUS_R_IR_RX 6
+#define CLK_BUS_R_RTC 7
+#define CLK_BUS_R_CPUCFG 8
+
+#endif /* _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun50i-a100-ccu.h b/include/dt-bindings/clock/sun50i-a100-ccu.h
new file mode 100644
index 000000000000..06a2031d466b
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-a100-ccu.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_A100_H_
+#define _DT_BINDINGS_CLK_SUN50I_A100_H_
+
+#define CLK_PLL_PERIPH0 3
+
+#define CLK_CPUX 24
+
+#define CLK_APB1 29
+
+#define CLK_MBUS 31
+#define CLK_DE 32
+#define CLK_BUS_DE 33
+#define CLK_G2D 34
+#define CLK_BUS_G2D 35
+#define CLK_GPU 36
+#define CLK_BUS_GPU 37
+#define CLK_CE 38
+#define CLK_BUS_CE 39
+#define CLK_VE 40
+#define CLK_BUS_VE 41
+#define CLK_BUS_DMA 42
+#define CLK_BUS_MSGBOX 43
+#define CLK_BUS_SPINLOCK 44
+#define CLK_BUS_HSTIMER 45
+#define CLK_AVS 46
+#define CLK_BUS_DBG 47
+#define CLK_BUS_PSI 48
+#define CLK_BUS_PWM 49
+#define CLK_BUS_IOMMU 50
+#define CLK_MBUS_DMA 51
+#define CLK_MBUS_VE 52
+#define CLK_MBUS_CE 53
+#define CLK_MBUS_NAND 54
+#define CLK_MBUS_CSI 55
+#define CLK_MBUS_ISP 56
+#define CLK_MBUS_G2D 57
+
+#define CLK_NAND0 59
+#define CLK_NAND1 60
+#define CLK_BUS_NAND 61
+#define CLK_MMC0 62
+#define CLK_MMC1 63
+#define CLK_MMC2 64
+#define CLK_MMC3 65
+#define CLK_BUS_MMC0 66
+#define CLK_BUS_MMC1 67
+#define CLK_BUS_MMC2 68
+#define CLK_BUS_UART0 69
+#define CLK_BUS_UART1 70
+#define CLK_BUS_UART2 71
+#define CLK_BUS_UART3 72
+#define CLK_BUS_UART4 73
+#define CLK_BUS_I2C0 74
+#define CLK_BUS_I2C1 75
+#define CLK_BUS_I2C2 76
+#define CLK_BUS_I2C3 77
+#define CLK_SPI0 78
+#define CLK_SPI1 79
+#define CLK_SPI2 80
+#define CLK_BUS_SPI0 81
+#define CLK_BUS_SPI1 82
+#define CLK_BUS_SPI2 83
+#define CLK_EMAC_25M 84
+#define CLK_BUS_EMAC 85
+#define CLK_IR_RX 86
+#define CLK_BUS_IR_RX 87
+#define CLK_IR_TX 88
+#define CLK_BUS_IR_TX 89
+#define CLK_BUS_GPADC 90
+#define CLK_BUS_THS 91
+#define CLK_I2S0 92
+#define CLK_I2S1 93
+#define CLK_I2S2 94
+#define CLK_I2S3 95
+#define CLK_BUS_I2S0 96
+#define CLK_BUS_I2S1 97
+#define CLK_BUS_I2S2 98
+#define CLK_BUS_I2S3 99
+#define CLK_SPDIF 100
+#define CLK_BUS_SPDIF 101
+#define CLK_DMIC 102
+#define CLK_BUS_DMIC 103
+#define CLK_AUDIO_DAC 104
+#define CLK_AUDIO_ADC 105
+#define CLK_AUDIO_4X 106
+#define CLK_BUS_AUDIO_CODEC 107
+#define CLK_USB_OHCI0 108
+#define CLK_USB_PHY0 109
+#define CLK_USB_OHCI1 110
+#define CLK_USB_PHY1 111
+#define CLK_BUS_OHCI0 112
+#define CLK_BUS_OHCI1 113
+#define CLK_BUS_EHCI0 114
+#define CLK_BUS_EHCI1 115
+#define CLK_BUS_OTG 116
+#define CLK_BUS_LRADC 117
+#define CLK_BUS_DPSS_TOP0 118
+#define CLK_BUS_DPSS_TOP1 119
+#define CLK_MIPI_DSI 120
+#define CLK_BUS_MIPI_DSI 121
+#define CLK_TCON_LCD 122
+#define CLK_BUS_TCON_LCD 123
+#define CLK_LEDC 124
+#define CLK_BUS_LEDC 125
+#define CLK_CSI_TOP 126
+#define CLK_CSI0_MCLK 127
+#define CLK_CSI1_MCLK 128
+#define CLK_BUS_CSI 129
+#define CLK_CSI_ISP 130
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_A100_H_ */
diff --git a/include/dt-bindings/clock/sun50i-a100-r-ccu.h b/include/dt-bindings/clock/sun50i-a100-r-ccu.h
new file mode 100644
index 000000000000..07312e7264fb
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-a100-r-ccu.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_A100_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN50I_A100_R_CCU_H_
+
+#define CLK_R_APB1 2
+
+#define CLK_R_APB1_TIMER 4
+#define CLK_R_APB1_TWD 5
+#define CLK_R_APB1_PWM 6
+#define CLK_R_APB1_BUS_PWM 7
+#define CLK_R_APB1_PPU 8
+#define CLK_R_APB2_UART 9
+#define CLK_R_APB2_I2C0 10
+#define CLK_R_APB2_I2C1 11
+#define CLK_R_APB1_IR 12
+#define CLK_R_APB1_BUS_IR 13
+#define CLK_R_AHB_BUS_RTC 14
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_A100_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index 318eb15c414c..175892189e9d 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -113,7 +113,7 @@
#define CLK_USB_OHCI0 91
#define CLK_USB_OHCI1 93
-
+#define CLK_DRAM 94
#define CLK_DRAM_VE 95
#define CLK_DRAM_CSI 96
#define CLK_DRAM_DEINTERLACE 97
diff --git a/include/dt-bindings/clock/sun50i-h6-ccu.h b/include/dt-bindings/clock/sun50i-h6-ccu.h
index a1545cd60e75..ef9123d81937 100644
--- a/include/dt-bindings/clock/sun50i-h6-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h6-ccu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/include/dt-bindings/clock/sun50i-h6-r-ccu.h b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
index 76136132a13e..a96087abc86f 100644
--- a/include/dt-bindings/clock/sun50i-h6-r-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
@@ -21,4 +21,7 @@
#define CLK_IR 11
#define CLK_W1 12
+#define CLK_R_APB2_RSB 13
+#define CLK_R_APB1_RTC 14
+
#endif /* _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun50i-h616-ccu.h b/include/dt-bindings/clock/sun50i-h616-ccu.h
new file mode 100644
index 000000000000..6f8f01e67628
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-h616-ccu.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (C) 2020 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_H616_H_
+#define _DT_BINDINGS_CLK_SUN50I_H616_H_
+
+#define CLK_PLL_PERIPH0 4
+
+#define CLK_CPUX 21
+
+#define CLK_APB1 26
+
+#define CLK_DE 29
+#define CLK_BUS_DE 30
+#define CLK_DEINTERLACE 31
+#define CLK_BUS_DEINTERLACE 32
+#define CLK_G2D 33
+#define CLK_BUS_G2D 34
+#define CLK_GPU0 35
+#define CLK_BUS_GPU 36
+#define CLK_GPU1 37
+#define CLK_CE 38
+#define CLK_BUS_CE 39
+#define CLK_VE 40
+#define CLK_BUS_VE 41
+#define CLK_BUS_DMA 42
+#define CLK_BUS_HSTIMER 43
+#define CLK_AVS 44
+#define CLK_BUS_DBG 45
+#define CLK_BUS_PSI 46
+#define CLK_BUS_PWM 47
+#define CLK_BUS_IOMMU 48
+
+#define CLK_MBUS_DMA 50
+#define CLK_MBUS_VE 51
+#define CLK_MBUS_CE 52
+#define CLK_MBUS_TS 53
+#define CLK_MBUS_NAND 54
+#define CLK_MBUS_G2D 55
+
+#define CLK_NAND0 57
+#define CLK_NAND1 58
+#define CLK_BUS_NAND 59
+#define CLK_MMC0 60
+#define CLK_MMC1 61
+#define CLK_MMC2 62
+#define CLK_BUS_MMC0 63
+#define CLK_BUS_MMC1 64
+#define CLK_BUS_MMC2 65
+#define CLK_BUS_UART0 66
+#define CLK_BUS_UART1 67
+#define CLK_BUS_UART2 68
+#define CLK_BUS_UART3 69
+#define CLK_BUS_UART4 70
+#define CLK_BUS_UART5 71
+#define CLK_BUS_I2C0 72
+#define CLK_BUS_I2C1 73
+#define CLK_BUS_I2C2 74
+#define CLK_BUS_I2C3 75
+#define CLK_BUS_I2C4 76
+#define CLK_SPI0 77
+#define CLK_SPI1 78
+#define CLK_BUS_SPI0 79
+#define CLK_BUS_SPI1 80
+#define CLK_EMAC_25M 81
+#define CLK_BUS_EMAC0 82
+#define CLK_BUS_EMAC1 83
+#define CLK_TS 84
+#define CLK_BUS_TS 85
+#define CLK_BUS_THS 86
+#define CLK_SPDIF 87
+#define CLK_BUS_SPDIF 88
+#define CLK_DMIC 89
+#define CLK_BUS_DMIC 90
+#define CLK_AUDIO_CODEC_1X 91
+#define CLK_AUDIO_CODEC_4X 92
+#define CLK_BUS_AUDIO_CODEC 93
+#define CLK_AUDIO_HUB 94
+#define CLK_BUS_AUDIO_HUB 95
+#define CLK_USB_OHCI0 96
+#define CLK_USB_PHY0 97
+#define CLK_USB_OHCI1 98
+#define CLK_USB_PHY1 99
+#define CLK_USB_OHCI2 100
+#define CLK_USB_PHY2 101
+#define CLK_USB_OHCI3 102
+#define CLK_USB_PHY3 103
+#define CLK_BUS_OHCI0 104
+#define CLK_BUS_OHCI1 105
+#define CLK_BUS_OHCI2 106
+#define CLK_BUS_OHCI3 107
+#define CLK_BUS_EHCI0 108
+#define CLK_BUS_EHCI1 109
+#define CLK_BUS_EHCI2 110
+#define CLK_BUS_EHCI3 111
+#define CLK_BUS_OTG 112
+#define CLK_BUS_KEYADC 113
+#define CLK_HDMI 114
+#define CLK_HDMI_SLOW 115
+#define CLK_HDMI_CEC 116
+#define CLK_BUS_HDMI 117
+#define CLK_BUS_TCON_TOP 118
+#define CLK_TCON_TV0 119
+#define CLK_TCON_TV1 120
+#define CLK_BUS_TCON_TV0 121
+#define CLK_BUS_TCON_TV1 122
+#define CLK_TVE0 123
+#define CLK_BUS_TVE_TOP 124
+#define CLK_BUS_TVE0 125
+#define CLK_HDCP 126
+#define CLK_BUS_HDCP 127
+#define CLK_PLL_SYSTEM_32K 128
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h
new file mode 100644
index 000000000000..3bd3aa3d57ce
--- /dev/null
+++ b/include/dt-bindings/clock/sun6i-rtc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+
+#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_
+#define _DT_BINDINGS_CLK_SUN6I_RTC_H_
+
+#define CLK_OSC32K 0
+#define CLK_OSC32K_FANOUT 1
+#define CLK_IOSC 2
+
+#endif /* _DT_BINDINGS_CLK_SUN6I_RTC_H_ */
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h
index 30d2d15373a2..5d4ada2c22e6 100644
--- a/include/dt-bindings/clock/sun8i-h3-ccu.h
+++ b/include/dt-bindings/clock/sun8i-h3-ccu.h
@@ -126,7 +126,7 @@
#define CLK_USB_OHCI1 93
#define CLK_USB_OHCI2 94
#define CLK_USB_OHCI3 95
-
+#define CLK_DRAM 96
#define CLK_DRAM_VE 97
#define CLK_DRAM_CSI 98
#define CLK_DRAM_DEINTERLACE 99
diff --git a/include/dt-bindings/clock/suniv-ccu-f1c100s.h b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
index f5ac155c9c70..d7570765f424 100644
--- a/include/dt-bindings/clock/suniv-ccu-f1c100s.h
+++ b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
@@ -67,4 +67,6 @@
#define CLK_CODEC 65
#define CLK_AVS 66
+#define CLK_IR 67
+
#endif
diff --git a/include/dt-bindings/clock/sunplus,sp7021-clkc.h b/include/dt-bindings/clock/sunplus,sp7021-clkc.h
new file mode 100644
index 000000000000..cd84321eb2b5
--- /dev/null
+++ b/include/dt-bindings/clock/sunplus,sp7021-clkc.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+#ifndef _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H
+#define _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H
+
+/* gates */
+#define CLK_RTC 0
+#define CLK_OTPRX 1
+#define CLK_NOC 2
+#define CLK_BR 3
+#define CLK_SPIFL 4
+#define CLK_PERI0 5
+#define CLK_PERI1 6
+#define CLK_STC0 7
+#define CLK_STC_AV0 8
+#define CLK_STC_AV1 9
+#define CLK_STC_AV2 10
+#define CLK_UA0 11
+#define CLK_UA1 12
+#define CLK_UA2 13
+#define CLK_UA3 14
+#define CLK_UA4 15
+#define CLK_HWUA 16
+#define CLK_DDC0 17
+#define CLK_UADMA 18
+#define CLK_CBDMA0 19
+#define CLK_CBDMA1 20
+#define CLK_SPI_COMBO_0 21
+#define CLK_SPI_COMBO_1 22
+#define CLK_SPI_COMBO_2 23
+#define CLK_SPI_COMBO_3 24
+#define CLK_AUD 25
+#define CLK_USBC0 26
+#define CLK_USBC1 27
+#define CLK_UPHY0 28
+#define CLK_UPHY1 29
+#define CLK_I2CM0 30
+#define CLK_I2CM1 31
+#define CLK_I2CM2 32
+#define CLK_I2CM3 33
+#define CLK_PMC 34
+#define CLK_CARD_CTL0 35
+#define CLK_CARD_CTL1 36
+#define CLK_CARD_CTL4 37
+#define CLK_BCH 38
+#define CLK_DDFCH 39
+#define CLK_CSIIW0 40
+#define CLK_CSIIW1 41
+#define CLK_MIPICSI0 42
+#define CLK_MIPICSI1 43
+#define CLK_HDMI_TX 44
+#define CLK_VPOST 45
+#define CLK_TGEN 46
+#define CLK_DMIX 47
+#define CLK_TCON 48
+#define CLK_GPIO 49
+#define CLK_MAILBOX 50
+#define CLK_SPIND 51
+#define CLK_I2C2CBUS 52
+#define CLK_SEC 53
+#define CLK_DVE 54
+#define CLK_GPOST0 55
+#define CLK_OSD0 56
+#define CLK_DISP_PWM 57
+#define CLK_UADBG 58
+#define CLK_FIO_CTL 59
+#define CLK_FPGA 60
+#define CLK_L2SW 61
+#define CLK_ICM 62
+#define CLK_AXI_GLOBAL 63
+
+/* plls */
+#define PLL_A 64
+#define PLL_E 65
+#define PLL_E_2P5 66
+#define PLL_E_25 67
+#define PLL_E_112P5 68
+#define PLL_F 69
+#define PLL_TV 70
+#define PLL_TV_A 71
+#define PLL_SYS 72
+
+#define CLK_MAX 73
+
+#endif
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index ab8b8a737a0a..9cfcc3baa52c 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -307,7 +307,7 @@
#define TEGRA210_CLK_AUDIO4 275
#define TEGRA210_CLK_SPDIF 276
/* 277 */
-/* 278 */
+#define TEGRA210_CLK_QSPI_PM 278
/* 279 */
/* 280 */
#define TEGRA210_CLK_SOR0_LVDS 281 /* deprecated */
diff --git a/include/dt-bindings/clock/tegra234-clock.h b/include/dt-bindings/clock/tegra234-clock.h
new file mode 100644
index 000000000000..c360455d02ee
--- /dev/null
+++ b/include/dt-bindings/clock/tegra234-clock.h
@@ -0,0 +1,903 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H
+#define DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H
+
+/**
+ * @file
+ * @defgroup bpmp_clock_ids Clock ID's
+ * @{
+ */
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ACTMON */
+#define TEGRA234_CLK_ACTMON 1U
+/** @brief output of gate CLK_ENB_ADSP */
+#define TEGRA234_CLK_ADSP 2U
+/** @brief output of gate CLK_ENB_ADSPNEON */
+#define TEGRA234_CLK_ADSPNEON 3U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */
+#define TEGRA234_CLK_AHUB 4U
+/** @brief output of gate CLK_ENB_APB2APE */
+#define TEGRA234_CLK_APB2APE 5U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_APE */
+#define TEGRA234_CLK_APE 6U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */
+#define TEGRA234_CLK_AUD_MCLK 7U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AXI_CBB */
+#define TEGRA234_CLK_AXI_CBB 8U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN1 */
+#define TEGRA234_CLK_CAN1 9U
+/** @brief output of gate CLK_ENB_CAN1_HOST */
+#define TEGRA234_CLK_CAN1_HOST 10U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN2 */
+#define TEGRA234_CLK_CAN2 11U
+/** @brief output of gate CLK_ENB_CAN2_HOST */
+#define TEGRA234_CLK_CAN2_HOST 12U
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_M_DIVIDE */
+#define TEGRA234_CLK_CLK_M 14U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */
+#define TEGRA234_CLK_DMIC1 15U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */
+#define TEGRA234_CLK_DMIC2 16U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC3 */
+#define TEGRA234_CLK_DMIC3 17U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */
+#define TEGRA234_CLK_DMIC4 18U
+/** @brief output of gate CLK_ENB_DPAUX */
+#define TEGRA234_CLK_DPAUX 19U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG1 */
+#define TEGRA234_CLK_NVJPG1 20U
+/**
+ * @brief output of mux controlled by CLK_RST_CONTROLLER_ACLK_BURST_POLICY
+ * divided by the divider controlled by ACLK_CLK_DIVISOR in
+ * CLK_RST_CONTROLLER_SUPER_ACLK_DIVIDER
+ */
+#define TEGRA234_CLK_ACLK 21U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_MSS_ENCRYPT switch divider output */
+#define TEGRA234_CLK_MSS_ENCRYPT 22U
+/** @brief clock recovered from EAVB input */
+#define TEGRA234_CLK_EQOS_RX_INPUT 23U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_APB switch divider output */
+#define TEGRA234_CLK_AON_APB 25U
+/** @brief CLK_RST_CONTROLLER_AON_NIC_RATE divider output */
+#define TEGRA234_CLK_AON_NIC 26U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_CPU_NIC switch divider output */
+#define TEGRA234_CLK_AON_CPU_NIC 27U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLA1_BASE for use by audio clocks */
+#define TEGRA234_CLK_PLLA1 28U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */
+#define TEGRA234_CLK_DSPK1 29U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */
+#define TEGRA234_CLK_DSPK2 30U
+/**
+ * @brief controls the EMC clock frequency.
+ * @details Doing a clk_set_rate on this clock will select the
+ * appropriate clock source, program the source rate and execute a
+ * specific sequence to switch to the new clock source for both memory
+ * controllers. This can be used to control the balance between memory
+ * throughput and memory controller power.
+ */
+#define TEGRA234_CLK_EMC 31U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_AXI_CLK_0 divider gated output */
+#define TEGRA234_CLK_EQOS_AXI 32U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_PTP_REF_CLK_0 divider gated output */
+#define TEGRA234_CLK_EQOS_PTP_REF 33U
+/** @brief output of gate CLK_ENB_EQOS_RX */
+#define TEGRA234_CLK_EQOS_RX 34U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK divider gated output */
+#define TEGRA234_CLK_EQOS_TX 35U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH1 */
+#define TEGRA234_CLK_EXTPERIPH1 36U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH2 */
+#define TEGRA234_CLK_EXTPERIPH2 37U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH3 */
+#define TEGRA234_CLK_EXTPERIPH3 38U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH4 */
+#define TEGRA234_CLK_EXTPERIPH4 39U
+/** @brief output of gate CLK_ENB_FUSE */
+#define TEGRA234_CLK_FUSE 40U
+/** @brief output of GPU GPC0 clkGen (in 1x mode same rate as GPC0 MUX2 out) */
+#define TEGRA234_CLK_GPC0CLK 41U
+/** @brief TODO */
+#define TEGRA234_CLK_GPU_PWR 42U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA2CODEC_2X */
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */
+#define TEGRA234_CLK_HOST1X 46U
+/** @brief xusb_hs_hsicp_clk */
+#define TEGRA234_CLK_XUSB_HS_HSICP 47U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C1 */
+#define TEGRA234_CLK_I2C1 48U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */
+#define TEGRA234_CLK_I2C2 49U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C3 */
+#define TEGRA234_CLK_I2C3 50U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C4 */
+#define TEGRA234_CLK_I2C4 51U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */
+#define TEGRA234_CLK_I2C6 52U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C7 */
+#define TEGRA234_CLK_I2C7 53U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C8 */
+#define TEGRA234_CLK_I2C8 54U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C9 */
+#define TEGRA234_CLK_I2C9 55U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S1 */
+#define TEGRA234_CLK_I2S1 56U
+/** @brief clock recovered from I2S1 input */
+#define TEGRA234_CLK_I2S1_SYNC_INPUT 57U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S2 */
+#define TEGRA234_CLK_I2S2 58U
+/** @brief clock recovered from I2S2 input */
+#define TEGRA234_CLK_I2S2_SYNC_INPUT 59U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S3 */
+#define TEGRA234_CLK_I2S3 60U
+/** @brief clock recovered from I2S3 input */
+#define TEGRA234_CLK_I2S3_SYNC_INPUT 61U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S4 */
+#define TEGRA234_CLK_I2S4 62U
+/** @brief clock recovered from I2S4 input */
+#define TEGRA234_CLK_I2S4_SYNC_INPUT 63U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S5 */
+#define TEGRA234_CLK_I2S5 64U
+/** @brief clock recovered from I2S5 input */
+#define TEGRA234_CLK_I2S5_SYNC_INPUT 65U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S6 */
+#define TEGRA234_CLK_I2S6 66U
+/** @brief clock recovered from I2S6 input */
+#define TEGRA234_CLK_I2S6_SYNC_INPUT 67U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ISP */
+#define TEGRA234_CLK_ISP 69U
+/** @brief Monitored branch of EQOS_RX clock */
+#define TEGRA234_CLK_EQOS_RX_M 70U
+/** @brief CLK_RST_CONTROLLER_MAUDCLK_OUT_SWITCH_DIVIDER switch divider output (maudclk) */
+#define TEGRA234_CLK_MAUD 71U
+/** @brief output of gate CLK_ENB_MIPI_CAL */
+#define TEGRA234_CLK_MIPI_CAL 72U
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_CORE_PLL_FIXED */
+#define TEGRA234_CLK_MPHY_CORE_PLL_FIXED 73U
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_ANA */
+#define TEGRA234_CLK_MPHY_L0_RX_ANA 74U
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_LS_BIT */
+#define TEGRA234_CLK_MPHY_L0_RX_LS_BIT 75U
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_SYMB */
+#define TEGRA234_CLK_MPHY_L0_RX_SYMB 76U
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_LS_3XBIT */
+#define TEGRA234_CLK_MPHY_L0_TX_LS_3XBIT 77U
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_SYMB */
+#define TEGRA234_CLK_MPHY_L0_TX_SYMB 78U
+/** @brief output of gate CLK_ENB_MPHY_L1_RX_ANA */
+#define TEGRA234_CLK_MPHY_L1_RX_ANA 79U
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_TX_1MHZ_REF */
+#define TEGRA234_CLK_MPHY_TX_1MHZ_REF 80U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSI */
+#define TEGRA234_CLK_NVCSI 81U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSILP */
+#define TEGRA234_CLK_NVCSILP 82U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDEC */
+#define TEGRA234_CLK_NVDEC 83U
+/** @brief CLK_RST_CONTROLLER_HUBCLK_OUT_SWITCH_DIVIDER switch divider output (hubclk) */
+#define TEGRA234_CLK_HUB 84U
+/** @brief CLK_RST_CONTROLLER_DISPCLK_SWITCH_DIVIDER switch divider output (dispclk) */
+#define TEGRA234_CLK_DISP 85U
+/** @brief RG_CLK_CTRL__0_DIV divider output (nvdisplay_p0_clk) */
+#define TEGRA234_CLK_NVDISPLAY_P0 86U
+/** @brief RG_CLK_CTRL__1_DIV divider output (nvdisplay_p1_clk) */
+#define TEGRA234_CLK_NVDISPLAY_P1 87U
+/** @brief DSC_CLK (DISPCLK ÷ 3) */
+#define TEGRA234_CLK_DSC 88U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVENC */
+#define TEGRA234_CLK_NVENC 89U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG */
+#define TEGRA234_CLK_NVJPG 90U
+/** @brief input from Tegra's XTAL_IN */
+#define TEGRA234_CLK_OSC 91U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_TOUCH switch divider output */
+#define TEGRA234_CLK_AON_TOUCH 92U
+/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */
+#define TEGRA234_CLK_PLLA 93U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLAON_BASE for use by IP blocks in the AON domain */
+#define TEGRA234_CLK_PLLAON 94U
+/** Fixed 100MHz PLL for PCIe, SATA and superspeed USB */
+#define TEGRA234_CLK_PLLE 100U
+/** @brief PLLP vco output */
+#define TEGRA234_CLK_PLLP 101U
+/** @brief PLLP clk output */
+#define TEGRA234_CLK_PLLP_OUT0 102U
+/** Fixed frequency 960MHz PLL for USB and EAVB */
+#define TEGRA234_CLK_UTMIP_PLL 103U
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */
+#define TEGRA234_CLK_PLLA_OUT0 104U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */
+#define TEGRA234_CLK_PWM1 105U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM2 */
+#define TEGRA234_CLK_PWM2 106U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM3 */
+#define TEGRA234_CLK_PWM3 107U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM4 */
+#define TEGRA234_CLK_PWM4 108U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM5 */
+#define TEGRA234_CLK_PWM5 109U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM6 */
+#define TEGRA234_CLK_PWM6 110U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM7 */
+#define TEGRA234_CLK_PWM7 111U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */
+#define TEGRA234_CLK_PWM8 112U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_RCE_CPU_NIC output */
+#define TEGRA234_CLK_RCE_CPU_NIC 113U
+/** @brief CLK_RST_CONTROLLER_RCE_NIC_RATE divider output */
+#define TEGRA234_CLK_RCE_NIC 114U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_I2C_SLOW switch divider output */
+#define TEGRA234_CLK_AON_I2C_SLOW 117U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_CPU_NIC */
+#define TEGRA234_CLK_SCE_CPU_NIC 118U
+/** @brief output of divider CLK_RST_CONTROLLER_SCE_NIC_RATE */
+#define TEGRA234_CLK_SCE_NIC 119U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1 */
+#define TEGRA234_CLK_SDMMC1 120U
+/** @brief Logical clk for setting the UPHY PLL3 rate */
+#define TEGRA234_CLK_UPHY_PLL3 121U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */
+#define TEGRA234_CLK_SDMMC4 123U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SE switch divider gated output */
+#define TEGRA234_CLK_SE 124U
+/** @brief VPLL select for sor0_ref clk driven by disp_2clk_sor0_head_sel signal */
+#define TEGRA234_CLK_SOR0_PLL_REF 125U
+/** @brief Output of mux controlled by disp_2clk_sor0_pll_ref_clk_safe signal (sor0_ref_clk) */
+#define TEGRA234_CLK_SOR0_REF 126U
+/** @brief VPLL select for sor1_ref clk driven by disp_2clk_sor0_head_sel signal */
+#define TEGRA234_CLK_SOR1_PLL_REF 127U
+/** @brief SOR_PLL_REF_CLK_CTRL__0_DIV divider output */
+#define TEGRA234_CLK_PRE_SOR0_REF 128U
+/** @brief Output of mux controlled by disp_2clk_sor1_pll_ref_clk_safe signal (sor1_ref_clk) */
+#define TEGRA234_CLK_SOR1_REF 129U
+/** @brief SOR_PLL_REF_CLK_CTRL__1_DIV divider output */
+#define TEGRA234_CLK_PRE_SOR1_REF 130U
+/** @brief output of gate CLK_ENB_SOR_SAFE */
+#define TEGRA234_CLK_SOR_SAFE 131U
+/** @brief SOR_CLK_CTRL__0_DIV divider output */
+#define TEGRA234_CLK_SOR0_DIV 132U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC5 */
+#define TEGRA234_CLK_DMIC5 134U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI1 */
+#define TEGRA234_CLK_SPI1 135U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI2 */
+#define TEGRA234_CLK_SPI2 136U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI3 */
+#define TEGRA234_CLK_SPI3 137U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C_SLOW */
+#define TEGRA234_CLK_I2C_SLOW 138U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */
+#define TEGRA234_CLK_SYNC_DMIC1 139U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */
+#define TEGRA234_CLK_SYNC_DMIC2 140U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC3 */
+#define TEGRA234_CLK_SYNC_DMIC3 141U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC4 */
+#define TEGRA234_CLK_SYNC_DMIC4 142U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK1 */
+#define TEGRA234_CLK_SYNC_DSPK1 143U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK2 */
+#define TEGRA234_CLK_SYNC_DSPK2 144U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S1 */
+#define TEGRA234_CLK_SYNC_I2S1 145U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S2 */
+#define TEGRA234_CLK_SYNC_I2S2 146U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S3 */
+#define TEGRA234_CLK_SYNC_I2S3 147U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S4 */
+#define TEGRA234_CLK_SYNC_I2S4 148U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S5 */
+#define TEGRA234_CLK_SYNC_I2S5 149U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */
+#define TEGRA234_CLK_SYNC_I2S6 150U
+/** @brief controls MPHY_FORCE_LS_MODE upon enable & disable */
+#define TEGRA234_CLK_MPHY_FORCE_LS_MODE 151U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH0 */
+#define TEGRA234_CLK_TACH0 152U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSEC */
+#define TEGRA234_CLK_TSEC 153U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PKA */
+#define TEGRA234_CLK_TSEC_PKA 154U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */
+#define TEGRA234_CLK_UARTA 155U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTB */
+#define TEGRA234_CLK_UARTB 156U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTC */
+#define TEGRA234_CLK_UARTC 157U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTD */
+#define TEGRA234_CLK_UARTD 158U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTE */
+#define TEGRA234_CLK_UARTE 159U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTF */
+#define TEGRA234_CLK_UARTF 160U
+/** @brief output of gate CLK_ENB_PEX1_CORE_6 */
+#define TEGRA234_CLK_PEX1_C6_CORE 161U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UART_FST_MIPI_CAL */
+#define TEGRA234_CLK_UART_FST_MIPI_CAL 162U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSDEV_REF */
+#define TEGRA234_CLK_UFSDEV_REF 163U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSHC_CG_SYS */
+#define TEGRA234_CLK_UFSHC 164U
+/** @brief output of gate CLK_ENB_USB2_TRK */
+#define TEGRA234_CLK_USB2_TRK 165U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI */
+#define TEGRA234_CLK_VI 166U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */
+#define TEGRA234_CLK_VIC 167U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_CSITE switch divider output */
+#define TEGRA234_CLK_CSITE 168U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_IST switch divider output */
+#define TEGRA234_CLK_IST 169U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_IST_JTAG_REG_CLK_SEL */
+#define TEGRA234_CLK_JTAG_INTFC_PRE_CG 170U
+/** @brief output of gate CLK_ENB_PEX2_CORE_7 */
+#define TEGRA234_CLK_PEX2_C7_CORE 171U
+/** @brief output of gate CLK_ENB_PEX2_CORE_8 */
+#define TEGRA234_CLK_PEX2_C8_CORE 172U
+/** @brief output of gate CLK_ENB_PEX2_CORE_9 */
+#define TEGRA234_CLK_PEX2_C9_CORE 173U
+/** @brief dla0_falcon_clk */
+#define TEGRA234_CLK_DLA0_FALCON 174U
+/** @brief dla0_core_clk */
+#define TEGRA234_CLK_DLA0_CORE 175U
+/** @brief dla1_falcon_clk */
+#define TEGRA234_CLK_DLA1_FALCON 176U
+/** @brief dla1_core_clk */
+#define TEGRA234_CLK_DLA1_CORE 177U
+/** @brief Output of mux controlled by disp_2clk_sor0_clk_safe signal (sor0_clk) */
+#define TEGRA234_CLK_SOR0 178U
+/** @brief Output of mux controlled by disp_2clk_sor1_clk_safe signal (sor1_clk) */
+#define TEGRA234_CLK_SOR1 179U
+/** @brief DP macro feedback clock (same as LINKA_SYM CLKOUT) */
+#define TEGRA234_CLK_SOR_PAD_INPUT 180U
+/** @brief Output of mux controlled by disp_2clk_h0_dsi_sel signal in sf0_clk path */
+#define TEGRA234_CLK_PRE_SF0 181U
+/** @brief Output of mux controlled by disp_2clk_sf0_clk_safe signal (sf0_clk) */
+#define TEGRA234_CLK_SF0 182U
+/** @brief Output of mux controlled by disp_2clk_sf1_clk_safe signal (sf1_clk) */
+#define TEGRA234_CLK_SF1 183U
+/** @brief CLKOUT_AB output from DSI BRICK A (dsi_clkout_ab) */
+#define TEGRA234_CLK_DSI_PAD_INPUT 184U
+/** @brief output of gate CLK_ENB_PEX2_CORE_10 */
+#define TEGRA234_CLK_PEX2_C10_CORE 187U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_UARTI switch divider output (uarti_r_clk) */
+#define TEGRA234_CLK_UARTI 188U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_UARTJ switch divider output (uartj_r_clk) */
+#define TEGRA234_CLK_UARTJ 189U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_UARTH switch divider output */
+#define TEGRA234_CLK_UARTH 190U
+/** @brief ungated version of fuse clk */
+#define TEGRA234_CLK_FUSE_SERIAL 191U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 switch divider output (qspi0_2x_pm_clk) */
+#define TEGRA234_CLK_QSPI0_2X_PM 192U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 switch divider output (qspi1_2x_pm_clk) */
+#define TEGRA234_CLK_QSPI1_2X_PM 193U
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 (qspi0_pm_clk) */
+#define TEGRA234_CLK_QSPI0_PM 194U
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 (qspi1_pm_clk) */
+#define TEGRA234_CLK_QSPI1_PM 195U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_VI_CONST switch divider output */
+#define TEGRA234_CLK_VI_CONST 196U
+/** @brief NAFLL clock source for BPMP */
+#define TEGRA234_CLK_NAFLL_BPMP 197U
+/** @brief NAFLL clock source for SCE */
+#define TEGRA234_CLK_NAFLL_SCE 198U
+/** @brief NAFLL clock source for NVDEC */
+#define TEGRA234_CLK_NAFLL_NVDEC 199U
+/** @brief NAFLL clock source for NVJPG */
+#define TEGRA234_CLK_NAFLL_NVJPG 200U
+/** @brief NAFLL clock source for TSEC */
+#define TEGRA234_CLK_NAFLL_TSEC 201U
+/** @brief NAFLL clock source for VI */
+#define TEGRA234_CLK_NAFLL_VI 203U
+/** @brief NAFLL clock source for SE */
+#define TEGRA234_CLK_NAFLL_SE 204U
+/** @brief NAFLL clock source for NVENC */
+#define TEGRA234_CLK_NAFLL_NVENC 205U
+/** @brief NAFLL clock source for ISP */
+#define TEGRA234_CLK_NAFLL_ISP 206U
+/** @brief NAFLL clock source for VIC */
+#define TEGRA234_CLK_NAFLL_VIC 207U
+/** @brief NAFLL clock source for AXICBB */
+#define TEGRA234_CLK_NAFLL_AXICBB 209U
+/** @brief NAFLL clock source for NVJPG1 */
+#define TEGRA234_CLK_NAFLL_NVJPG1 210U
+/** @brief NAFLL clock source for PVA core */
+#define TEGRA234_CLK_NAFLL_PVA0_CORE 211U
+/** @brief NAFLL clock source for PVA VPS */
+#define TEGRA234_CLK_NAFLL_PVA0_VPS 212U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_DBGAPB_0 switch divider output (dbgapb_clk) */
+#define TEGRA234_CLK_DBGAPB 213U
+/** @brief NAFLL clock source for RCE */
+#define TEGRA234_CLK_NAFLL_RCE 214U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_LA switch divider output (la_r_clk) */
+#define TEGRA234_CLK_LA 215U
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLP_OUTD */
+#define TEGRA234_CLK_PLLP_OUT_JTAG 216U
+/** @brief AXI_CBB branch sharing gate control with SDMMC4 */
+#define TEGRA234_CLK_SDMMC4_AXICIF 217U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM switch divider output */
+#define TEGRA234_CLK_SDMMC_LEGACY_TM 219U
+/** @brief output of gate CLK_ENB_PEX0_CORE_0 */
+#define TEGRA234_CLK_PEX0_C0_CORE 220U
+/** @brief output of gate CLK_ENB_PEX0_CORE_1 */
+#define TEGRA234_CLK_PEX0_C1_CORE 221U
+/** @brief output of gate CLK_ENB_PEX0_CORE_2 */
+#define TEGRA234_CLK_PEX0_C2_CORE 222U
+/** @brief output of gate CLK_ENB_PEX0_CORE_3 */
+#define TEGRA234_CLK_PEX0_C3_CORE 223U
+/** @brief output of gate CLK_ENB_PEX0_CORE_4 */
+#define TEGRA234_CLK_PEX0_C4_CORE 224U
+/** @brief output of gate CLK_ENB_PEX1_CORE_5 */
+#define TEGRA234_CLK_PEX1_C5_CORE 225U
+/** @brief Monitored branch of PEX0_C0_CORE clock */
+#define TEGRA234_CLK_PEX0_C0_CORE_M 229U
+/** @brief Monitored branch of PEX0_C1_CORE clock */
+#define TEGRA234_CLK_PEX0_C1_CORE_M 230U
+/** @brief Monitored branch of PEX0_C2_CORE clock */
+#define TEGRA234_CLK_PEX0_C2_CORE_M 231U
+/** @brief Monitored branch of PEX0_C3_CORE clock */
+#define TEGRA234_CLK_PEX0_C3_CORE_M 232U
+/** @brief Monitored branch of PEX0_C4_CORE clock */
+#define TEGRA234_CLK_PEX0_C4_CORE_M 233U
+/** @brief Monitored branch of PEX1_C5_CORE clock */
+#define TEGRA234_CLK_PEX1_C5_CORE_M 234U
+/** @brief Monitored branch of PEX1_C6_CORE clock */
+#define TEGRA234_CLK_PEX1_C6_CORE_M 235U
+/** @brief output of GPU GPC1 clkGen (in 1x mode same rate as GPC1 MUX2 out) */
+#define TEGRA234_CLK_GPC1CLK 236U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */
+#define TEGRA234_CLK_PLLC4 237U
+/** @brief PLLC4 VCO followed by DIV3 path */
+#define TEGRA234_CLK_PLLC4_OUT1 239U
+/** @brief PLLC4 VCO followed by DIV5 path */
+#define TEGRA234_CLK_PLLC4_OUT2 240U
+/** @brief output of the mux controlled by PLLC4_CLK_SEL */
+#define TEGRA234_CLK_PLLC4_MUXED 241U
+/** @brief PLLC4 VCO followed by DIV2 path */
+#define TEGRA234_CLK_PLLC4_VCO_DIV2 242U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVHS_BASE */
+#define TEGRA234_CLK_PLLNVHS 243U
+/** @brief Monitored branch of PEX2_C7_CORE clock */
+#define TEGRA234_CLK_PEX2_C7_CORE_M 244U
+/** @brief Monitored branch of PEX2_C8_CORE clock */
+#define TEGRA234_CLK_PEX2_C8_CORE_M 245U
+/** @brief Monitored branch of PEX2_C9_CORE clock */
+#define TEGRA234_CLK_PEX2_C9_CORE_M 246U
+/** @brief Monitored branch of PEX2_C10_CORE clock */
+#define TEGRA234_CLK_PEX2_C10_CORE_M 247U
+/** @brief RX clock recovered from MGBE0 lane input */
+#define TEGRA234_CLK_MGBE0_RX_INPUT 248U
+/** @brief RX clock recovered from MGBE1 lane input */
+#define TEGRA234_CLK_MGBE1_RX_INPUT 249U
+/** @brief RX clock recovered from MGBE2 lane input */
+#define TEGRA234_CLK_MGBE2_RX_INPUT 250U
+/** @brief RX clock recovered from MGBE3 lane input */
+#define TEGRA234_CLK_MGBE3_RX_INPUT 251U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_SATA_USB_RX_BYP switch divider output */
+#define TEGRA234_CLK_PEX_SATA_USB_RX_BYP 254U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL0_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL0_MGMT 255U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL1_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL1_MGMT 256U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL2_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL2_MGMT 257U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL3_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL3_MGMT 258U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_NVHS_RX_BYP switch divider output */
+#define TEGRA234_CLK_NVHS_RX_BYP_REF 263U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_NVHS_PLL0_MGMT switch divider output */
+#define TEGRA234_CLK_NVHS_PLL0_MGMT 264U
+/** @brief xusb_core_dev_clk */
+#define TEGRA234_CLK_XUSB_CORE_DEV 265U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_CORE_HOST switch divider output */
+#define TEGRA234_CLK_XUSB_CORE_MUX 266U
+/** @brief xusb_core_host_clk */
+#define TEGRA234_CLK_XUSB_CORE_HOST 267U
+/** @brief xusb_core_superspeed_clk */
+#define TEGRA234_CLK_XUSB_CORE_SS 268U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_FALCON switch divider output */
+#define TEGRA234_CLK_XUSB_FALCON 269U
+/** @brief xusb_falcon_host_clk */
+#define TEGRA234_CLK_XUSB_FALCON_HOST 270U
+/** @brief xusb_falcon_superspeed_clk */
+#define TEGRA234_CLK_XUSB_FALCON_SS 271U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_FS switch divider output */
+#define TEGRA234_CLK_XUSB_FS 272U
+/** @brief xusb_fs_host_clk */
+#define TEGRA234_CLK_XUSB_FS_HOST 273U
+/** @brief xusb_fs_dev_clk */
+#define TEGRA234_CLK_XUSB_FS_DEV 274U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_SS switch divider output */
+#define TEGRA234_CLK_XUSB_SS 275U
+/** @brief xusb_ss_dev_clk */
+#define TEGRA234_CLK_XUSB_SS_DEV 276U
+/** @brief xusb_ss_superspeed_clk */
+#define TEGRA234_CLK_XUSB_SS_SUPERSPEED 277U
+/** @brief NAFLL clock source for CPU cluster 0 */
+#define TEGRA234_CLK_NAFLL_CLUSTER0 280U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER0_CORE 280U
+/** @brief NAFLL clock source for CPU cluster 1 */
+#define TEGRA234_CLK_NAFLL_CLUSTER1 281U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER1_CORE 281U
+/** @brief NAFLL clock source for CPU cluster 2 */
+#define TEGRA234_CLK_NAFLL_CLUSTER2 282U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER2_CORE 282U
+/** @brief CLK_RST_CONTROLLER_CAN1_CORE_RATE divider output */
+#define TEGRA234_CLK_CAN1_CORE 284U
+/** @brief CLK_RST_CONTROLLER_CAN2_CORE_RATE divider outputt */
+#define TEGRA234_CLK_CAN2_CORE 285U
+/** @brief CLK_RST_CONTROLLER_PLLA1_OUT1 switch divider output */
+#define TEGRA234_CLK_PLLA1_OUT1 286U
+/** @brief NVHS PLL hardware power sequencer (overrides 'manual' programming of PLL) */
+#define TEGRA234_CLK_PLLNVHS_HPS 287U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLREFE_BASE */
+#define TEGRA234_CLK_PLLREFE_VCOOUT 288U
+/** @brief 32K input clock provided by PMIC */
+#define TEGRA234_CLK_CLK_32K 289U
+/** @brief Fixed 48MHz clock divided down from utmipll */
+#define TEGRA234_CLK_UTMIPLL_CLKOUT48 291U
+/** @brief Fixed 480MHz clock divided down from utmipll */
+#define TEGRA234_CLK_UTMIPLL_CLKOUT480 292U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVCSI_BASE */
+#define TEGRA234_CLK_PLLNVCSI 294U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PVA0_CPU_AXI switch divider output */
+#define TEGRA234_CLK_PVA0_CPU_AXI 295U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PVA0_VPS switch divider output */
+#define TEGRA234_CLK_PVA0_VPS 297U
+/** @brief DLA0_CORE_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA0_CORE 299U
+/** @brief DLA0_FALCON_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA0_FALCON 300U
+/** @brief DLA1_CORE_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA1_CORE 301U
+/** @brief DLA1_FALCON_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA1_FALCON 302U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_UART_FST_MIPI_CAL */
+#define TEGRA234_CLK_AON_UART_FST_MIPI_CAL 303U
+/** @brief GPU system clock */
+#define TEGRA234_CLK_GPUSYS 304U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C5 */
+#define TEGRA234_CLK_I2C5 305U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SE switch divider free running clk */
+#define TEGRA234_CLK_FR_SE 306U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_CPU_NIC switch divider output */
+#define TEGRA234_CLK_BPMP_CPU_NIC 307U
+/** @brief output of gate CLK_ENB_BPMP_CPU */
+#define TEGRA234_CLK_BPMP_CPU 308U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_TSC switch divider output */
+#define TEGRA234_CLK_TSC 309U
+/** @brief output of mem pll A sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMC */
+#define TEGRA234_CLK_EMCSA_MPLL 310U
+/** @brief output of mem pll B sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMCSB */
+#define TEGRA234_CLK_EMCSB_MPLL 311U
+/** @brief output of mem pll C sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMCSC */
+#define TEGRA234_CLK_EMCSC_MPLL 312U
+/** @brief output of mem pll D sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMCSD */
+#define TEGRA234_CLK_EMCSD_MPLL 313U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC_BASE */
+#define TEGRA234_CLK_PLLC 314U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC2_BASE */
+#define TEGRA234_CLK_PLLC2 315U
+/** @brief CLK_RST_CONTROLLER_TSC_HS_SUPER_CLK_DIVIDER skip divider output */
+#define TEGRA234_CLK_TSC_REF 317U
+/** @brief Dummy clock to ensure minimum SoC voltage for fuse burning */
+#define TEGRA234_CLK_FUSE_BURN 318U
+/** @brief GBE PLL */
+#define TEGRA234_CLK_PLLGBE 319U
+/** @brief GBE PLL hardware power sequencer */
+#define TEGRA234_CLK_PLLGBE_HPS 320U
+/** @brief output of EMC CDB side A fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSA_EMC 321U
+/** @brief output of EMC CDB side B fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSB_EMC 322U
+/** @brief output of EMC CDB side C fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSC_EMC 323U
+/** @brief output of EMC CDB side D fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSD_EMC 324U
+/** @brief PLLE hardware power sequencer (overrides 'manual' programming of PLL) */
+#define TEGRA234_CLK_PLLE_HPS 326U
+/** @brief CLK_ENB_PLLREFE_OUT gate output */
+#define TEGRA234_CLK_PLLREFE_VCOOUT_GATED 327U
+/** @brief TEGRA234_CLK_SOR_SAFE clk source (PLLP_OUT0 divided by 17) */
+#define TEGRA234_CLK_PLLP_DIV17 328U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SOC_THERM switch divider output */
+#define TEGRA234_CLK_SOC_THERM 329U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_TSENSE switch divider output */
+#define TEGRA234_CLK_TSENSE 330U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SEU1 switch divider free running clk */
+#define TEGRA234_CLK_FR_SEU1 331U
+/** @brief NAFLL clock source for OFA */
+#define TEGRA234_CLK_NAFLL_OFA 333U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_OFA switch divider output */
+#define TEGRA234_CLK_OFA 334U
+/** @brief NAFLL clock source for SEU1 */
+#define TEGRA234_CLK_NAFLL_SEU1 335U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SEU1 switch divider gated output */
+#define TEGRA234_CLK_SEU1 336U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI4 */
+#define TEGRA234_CLK_SPI4 337U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI5 */
+#define TEGRA234_CLK_SPI5 338U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DCE_CPU_NIC */
+#define TEGRA234_CLK_DCE_CPU_NIC 339U
+/** @brief output of divider CLK_RST_CONTROLLER_DCE_NIC_RATE */
+#define TEGRA234_CLK_DCE_NIC 340U
+/** @brief NAFLL clock source for DCE */
+#define TEGRA234_CLK_NAFLL_DCE 341U
+/** @brief Monitored branch of MPHY_L0_RX_ANA clock */
+#define TEGRA234_CLK_MPHY_L0_RX_ANA_M 342U
+/** @brief Monitored branch of MPHY_L1_RX_ANA clock */
+#define TEGRA234_CLK_MPHY_L1_RX_ANA_M 343U
+/** @brief ungated version of TX symbol clock after fixed 1/2 divider */
+#define TEGRA234_CLK_MPHY_L0_TX_PRE_SYMB 344U
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_TX_LS_SYMB */
+#define TEGRA234_CLK_MPHY_L0_TX_LS_SYMB_DIV 345U
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_2X_SYMB */
+#define TEGRA234_CLK_MPHY_L0_TX_2X_SYMB 346U
+/** @brief output of SW_MPHY_L0_TX_HS_SYMB divider in CLK_RST_CONTROLLER_MPHY_L0_TX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_TX_HS_SYMB_DIV 347U
+/** @brief output of SW_MPHY_L0_TX_LS_3XBIT divider in CLK_RST_CONTROLLER_MPHY_L0_TX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_TX_LS_3XBIT_DIV 348U
+/** @brief LS/HS divider mux SW_MPHY_L0_TX_LS_HS_SEL in CLK_RST_CONTROLLER_MPHY_L0_TX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_TX_MUX_SYMB_DIV 349U
+/** @brief Monitored branch of MPHY_L0_TX_SYMB clock */
+#define TEGRA234_CLK_MPHY_L0_TX_SYMB_M 350U
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_RX_LS_SYMB */
+#define TEGRA234_CLK_MPHY_L0_RX_LS_SYMB_DIV 351U
+/** @brief output of SW_MPHY_L0_RX_HS_SYMB divider in CLK_RST_CONTROLLER_MPHY_L0_RX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_RX_HS_SYMB_DIV 352U
+/** @brief output of SW_MPHY_L0_RX_LS_BIT divider in CLK_RST_CONTROLLER_MPHY_L0_RX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_RX_LS_BIT_DIV 353U
+/** @brief LS/HS divider mux SW_MPHY_L0_RX_LS_HS_SEL in CLK_RST_CONTROLLER_MPHY_L0_RX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_RX_MUX_SYMB_DIV 354U
+/** @brief Monitored branch of MPHY_L0_RX_SYMB clock */
+#define TEGRA234_CLK_MPHY_L0_RX_SYMB_M 355U
+/** @brief Monitored branch of MBGE0 RX input clock */
+#define TEGRA234_CLK_MGBE0_RX_INPUT_M 357U
+/** @brief Monitored branch of MBGE1 RX input clock */
+#define TEGRA234_CLK_MGBE1_RX_INPUT_M 358U
+/** @brief Monitored branch of MBGE2 RX input clock */
+#define TEGRA234_CLK_MGBE2_RX_INPUT_M 359U
+/** @brief Monitored branch of MBGE3 RX input clock */
+#define TEGRA234_CLK_MGBE3_RX_INPUT_M 360U
+/** @brief Monitored branch of MGBE0 RX PCS mux output */
+#define TEGRA234_CLK_MGBE0_RX_PCS_M 361U
+/** @brief Monitored branch of MGBE1 RX PCS mux output */
+#define TEGRA234_CLK_MGBE1_RX_PCS_M 362U
+/** @brief Monitored branch of MGBE2 RX PCS mux output */
+#define TEGRA234_CLK_MGBE2_RX_PCS_M 363U
+/** @brief Monitored branch of MGBE3 RX PCS mux output */
+#define TEGRA234_CLK_MGBE3_RX_PCS_M 364U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH1 */
+#define TEGRA234_CLK_TACH1 365U
+/** @brief GBE_UPHY_MGBES_APP_CLK switch divider gated output */
+#define TEGRA234_CLK_MGBES_APP 366U
+/** @brief Logical clk for setting GBE UPHY PLL2 TX_REF rate */
+#define TEGRA234_CLK_UPHY_GBE_PLL2_TX_REF 367U
+/** @brief Logical clk for setting GBE UPHY PLL2 XDIG rate */
+#define TEGRA234_CLK_UPHY_GBE_PLL2_XDIG 368U
+/** @brief RX PCS clock recovered from MGBE0 lane input */
+#define TEGRA234_CLK_MGBE0_RX_PCS_INPUT 369U
+/** @brief RX PCS clock recovered from MGBE1 lane input */
+#define TEGRA234_CLK_MGBE1_RX_PCS_INPUT 370U
+/** @brief RX PCS clock recovered from MGBE2 lane input */
+#define TEGRA234_CLK_MGBE2_RX_PCS_INPUT 371U
+/** @brief RX PCS clock recovered from MGBE3 lane input */
+#define TEGRA234_CLK_MGBE3_RX_PCS_INPUT 372U
+/** @brief output of mux controlled by GBE_UPHY_MGBE0_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE0_RX_PCS 373U
+/** @brief GBE_UPHY_MGBE0_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_TX 374U
+/** @brief GBE_UPHY_MGBE0_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_TX_PCS 375U
+/** @brief GBE_UPHY_MGBE0_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE0_MAC_DIVIDER 376U
+/** @brief GBE_UPHY_MGBE0_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE0_MAC 377U
+/** @brief GBE_UPHY_MGBE0_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE0_MACSEC 378U
+/** @brief GBE_UPHY_MGBE0_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE0_EEE_PCS 379U
+/** @brief GBE_UPHY_MGBE0_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE0_APP 380U
+/** @brief GBE_UPHY_MGBE0_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_PTP_REF 381U
+/** @brief output of mux controlled by GBE_UPHY_MGBE1_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE1_RX_PCS 382U
+/** @brief GBE_UPHY_MGBE1_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_TX 383U
+/** @brief GBE_UPHY_MGBE1_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_TX_PCS 384U
+/** @brief GBE_UPHY_MGBE1_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE1_MAC_DIVIDER 385U
+/** @brief GBE_UPHY_MGBE1_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE1_MAC 386U
+/** @brief GBE_UPHY_MGBE1_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE1_MACSEC 387U
+/** @brief GBE_UPHY_MGBE1_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE1_EEE_PCS 388U
+/** @brief GBE_UPHY_MGBE1_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE1_APP 389U
+/** @brief GBE_UPHY_MGBE1_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_PTP_REF 390U
+/** @brief output of mux controlled by GBE_UPHY_MGBE2_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE2_RX_PCS 391U
+/** @brief GBE_UPHY_MGBE2_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_TX 392U
+/** @brief GBE_UPHY_MGBE2_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_TX_PCS 393U
+/** @brief GBE_UPHY_MGBE2_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE2_MAC_DIVIDER 394U
+/** @brief GBE_UPHY_MGBE2_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE2_MAC 395U
+/** @brief GBE_UPHY_MGBE2_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE2_MACSEC 396U
+/** @brief GBE_UPHY_MGBE2_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE2_EEE_PCS 397U
+/** @brief GBE_UPHY_MGBE2_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE2_APP 398U
+/** @brief GBE_UPHY_MGBE2_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_PTP_REF 399U
+/** @brief output of mux controlled by GBE_UPHY_MGBE3_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE3_RX_PCS 400U
+/** @brief GBE_UPHY_MGBE3_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_TX 401U
+/** @brief GBE_UPHY_MGBE3_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_TX_PCS 402U
+/** @brief GBE_UPHY_MGBE3_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE3_MAC_DIVIDER 403U
+/** @brief GBE_UPHY_MGBE3_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE3_MAC 404U
+/** @brief GBE_UPHY_MGBE3_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE3_MACSEC 405U
+/** @brief GBE_UPHY_MGBE3_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE3_EEE_PCS 406U
+/** @brief GBE_UPHY_MGBE3_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE3_APP 407U
+/** @brief GBE_UPHY_MGBE3_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_PTP_REF 408U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_RX_BYP switch divider output */
+#define TEGRA234_CLK_GBE_RX_BYP_REF 409U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_PLL0_MGMT switch divider output */
+#define TEGRA234_CLK_GBE_PLL0_MGMT 410U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_PLL1_MGMT switch divider output */
+#define TEGRA234_CLK_GBE_PLL1_MGMT 411U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_PLL2_MGMT switch divider output */
+#define TEGRA234_CLK_GBE_PLL2_MGMT 412U
+/** @brief output of gate CLK_ENB_EQOS_MACSEC_RX */
+#define TEGRA234_CLK_EQOS_MACSEC_RX 413U
+/** @brief output of gate CLK_ENB_EQOS_MACSEC_TX */
+#define TEGRA234_CLK_EQOS_MACSEC_TX 414U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK divider ungated output */
+#define TEGRA234_CLK_EQOS_TX_DIVIDER 415U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_NVHS_PLL1_MGMT switch divider output */
+#define TEGRA234_CLK_NVHS_PLL1_MGMT 416U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EMCHUB mux output */
+#define TEGRA234_CLK_EMCHUB 417U
+/** @brief clock recovered from I2S7 input */
+#define TEGRA234_CLK_I2S7_SYNC_INPUT 418U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S7 */
+#define TEGRA234_CLK_SYNC_I2S7 419U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S7 */
+#define TEGRA234_CLK_I2S7 420U
+/** @brief Monitored output of I2S7 pad macro mux */
+#define TEGRA234_CLK_I2S7_PAD_M 421U
+/** @brief clock recovered from I2S8 input */
+#define TEGRA234_CLK_I2S8_SYNC_INPUT 422U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S8 */
+#define TEGRA234_CLK_SYNC_I2S8 423U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S8 */
+#define TEGRA234_CLK_I2S8 424U
+/** @brief Monitored output of I2S8 pad macro mux */
+#define TEGRA234_CLK_I2S8_PAD_M 425U
+/** @brief NAFLL clock source for GPU GPC0 */
+#define TEGRA234_CLK_NAFLL_GPC0 426U
+/** @brief NAFLL clock source for GPU GPC1 */
+#define TEGRA234_CLK_NAFLL_GPC1 427U
+/** @brief NAFLL clock source for GPU SYSCLK */
+#define TEGRA234_CLK_NAFLL_GPUSYS 428U
+/** @brief NAFLL clock source for CPU cluster 0 DSUCLK */
+#define TEGRA234_CLK_NAFLL_DSU0 429U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER0_DSU 429U
+/** @brief NAFLL clock source for CPU cluster 1 DSUCLK */
+#define TEGRA234_CLK_NAFLL_DSU1 430U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER1_DSU 430U
+/** @brief NAFLL clock source for CPU cluster 2 DSUCLK */
+#define TEGRA234_CLK_NAFLL_DSU2 431U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER2_DSU 431U
+/** @brief output of gate CLK_ENB_SCE_CPU */
+#define TEGRA234_CLK_SCE_CPU 432U
+/** @brief output of gate CLK_ENB_RCE_CPU */
+#define TEGRA234_CLK_RCE_CPU 433U
+/** @brief output of gate CLK_ENB_DCE_CPU */
+#define TEGRA234_CLK_DCE_CPU 434U
+/** @brief DSIPLL VCO output */
+#define TEGRA234_CLK_DSIPLL_VCO 435U
+/** @brief DSIPLL SYNC_CLKOUTP/N differential output */
+#define TEGRA234_CLK_DSIPLL_CLKOUTPN 436U
+/** @brief DSIPLL SYNC_CLKOUTA output */
+#define TEGRA234_CLK_DSIPLL_CLKOUTA 437U
+/** @brief SPPLL0 VCO output */
+#define TEGRA234_CLK_SPPLL0_VCO 438U
+/** @brief SPPLL0 SYNC_CLKOUTP/N differential output */
+#define TEGRA234_CLK_SPPLL0_CLKOUTPN 439U
+/** @brief SPPLL0 SYNC_CLKOUTA output */
+#define TEGRA234_CLK_SPPLL0_CLKOUTA 440U
+/** @brief SPPLL0 SYNC_CLKOUTB output */
+#define TEGRA234_CLK_SPPLL0_CLKOUTB 441U
+/** @brief SPPLL0 CLKOUT_DIVBY10 output */
+#define TEGRA234_CLK_SPPLL0_DIV10 442U
+/** @brief SPPLL0 CLKOUT_DIVBY25 output */
+#define TEGRA234_CLK_SPPLL0_DIV25 443U
+/** @brief SPPLL0 CLKOUT_DIVBY27P/N differential output */
+#define TEGRA234_CLK_SPPLL0_DIV27PN 444U
+/** @brief SPPLL1 VCO output */
+#define TEGRA234_CLK_SPPLL1_VCO 445U
+/** @brief SPPLL1 SYNC_CLKOUTP/N differential output */
+#define TEGRA234_CLK_SPPLL1_CLKOUTPN 446U
+/** @brief SPPLL1 CLKOUT_DIVBY27P/N differential output */
+#define TEGRA234_CLK_SPPLL1_DIV27PN 447U
+/** @brief VPLL0 reference clock */
+#define TEGRA234_CLK_VPLL0_REF 448U
+/** @brief VPLL0 */
+#define TEGRA234_CLK_VPLL0 449U
+/** @brief VPLL1 */
+#define TEGRA234_CLK_VPLL1 450U
+/** @brief NVDISPLAY_P0_CLK reference select */
+#define TEGRA234_CLK_NVDISPLAY_P0_REF 451U
+/** @brief RG0_PCLK */
+#define TEGRA234_CLK_RG0 452U
+/** @brief RG1_PCLK */
+#define TEGRA234_CLK_RG1 453U
+/** @brief DISPPLL output */
+#define TEGRA234_CLK_DISPPLL 454U
+/** @brief DISPHUBPLL output */
+#define TEGRA234_CLK_DISPHUBPLL 455U
+/** @brief CLK_RST_CONTROLLER_DSI_LP_SWITCH_DIVIDER switch divider output (dsi_lp_clk) */
+#define TEGRA234_CLK_DSI_LP 456U
+/** @brief CLK_RST_CONTROLLER_AZA2XBITCLK_OUT_SWITCH_DIVIDER switch divider output (aza_2xbitclk) */
+#define TEGRA234_CLK_AZA_2XBIT 457U
+/** @brief aza_2xbitclk / 2 (aza_bitclk) */
+#define TEGRA234_CLK_AZA_BIT 458U
+/** @brief SWITCH_DSI_CORE_PIXEL_MISC_DSI_CORE_CLK_SRC switch output (dsi_core_clk) */
+#define TEGRA234_CLK_DSI_CORE 459U
+/** @brief Output of mux controlled by pkt_wr_fifo_signal from dsi (dsi_pixel_clk) */
+#define TEGRA234_CLK_DSI_PIXEL 460U
+/** @brief Output of mux controlled by disp_2clk_sor0_dp_sel (pre_sor0_clk) */
+#define TEGRA234_CLK_PRE_SOR0 461U
+/** @brief Output of mux controlled by disp_2clk_sor1_dp_sel (pre_sor1_clk) */
+#define TEGRA234_CLK_PRE_SOR1 462U
+/** @brief CLK_RST_CONTROLLER_LINK_REFCLK_CFG__0 output */
+#define TEGRA234_CLK_DP_LINK_REF 463U
+/** @brief Link clock input from DP macro brick PLL */
+#define TEGRA234_CLK_SOR_LINKA_INPUT 464U
+/** @brief SOR AFIFO clock outut */
+#define TEGRA234_CLK_SOR_LINKA_AFIFO 465U
+/** @brief Monitored branch of linka_afifo_clk */
+#define TEGRA234_CLK_SOR_LINKA_AFIFO_M 466U
+/** @brief Monitored branch of rg0_pclk */
+#define TEGRA234_CLK_RG0_M 467U
+/** @brief Monitored branch of rg1_pclk */
+#define TEGRA234_CLK_RG1_M 468U
+/** @brief Monitored branch of sor0_clk */
+#define TEGRA234_CLK_SOR0_M 469U
+/** @brief Monitored branch of sor1_clk */
+#define TEGRA234_CLK_SOR1_M 470U
+/** @brief EMC PLLHUB output */
+#define TEGRA234_CLK_PLLHUB 471U
+/** @brief output of fixed (DIV2) MC HUB divider */
+#define TEGRA234_CLK_MCHUB 472U
+/** @brief output of divider controlled by EMC side A MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSA_MC 473U
+/** @brief output of divider controlled by EMC side B MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSB_MC 474U
+/** @brief output of divider controlled by EMC side C MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSC_MC 475U
+/** @brief output of divider controlled by EMC side D MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSD_MC 476U
+
+/** @} */
+
+#endif
diff --git a/include/dt-bindings/clock/ti-dra7-atl.h b/include/dt-bindings/clock/ti-dra7-atl.h
index 42dd4164f6f4..b0e71e3cce95 100644
--- a/include/dt-bindings/clock/ti-dra7-atl.h
+++ b/include/dt-bindings/clock/ti-dra7-atl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This header provides constants for DRA7 ATL (Audio Tracking Logic)
*
@@ -6,15 +7,6 @@
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_CLK_DRA7_ATL_H
diff --git a/include/dt-bindings/clock/toshiba,tmpv770x.h b/include/dt-bindings/clock/toshiba,tmpv770x.h
new file mode 100644
index 000000000000..5fce713001fd
--- /dev/null
+++ b/include/dt-bindings/clock/toshiba,tmpv770x.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_
+#define _DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_
+
+/* PLL */
+#define TMPV770X_PLL_PIPLL0 0
+#define TMPV770X_PLL_PIPLL1 1
+#define TMPV770X_PLL_PIDNNPLL 2
+#define TMPV770X_PLL_PIETHERPLL 3
+#define TMPV770X_PLL_PIDDRCPLL 4
+#define TMPV770X_PLL_PIVOIFPLL 5
+#define TMPV770X_PLL_PIIMGERPLL 6
+#define TMPV770X_NR_PLL 7
+
+/* Clocks */
+#define TMPV770X_CLK_PIPLL1_DIV1 0
+#define TMPV770X_CLK_PIPLL1_DIV2 1
+#define TMPV770X_CLK_PIPLL1_DIV4 2
+#define TMPV770X_CLK_PIDNNPLL_DIV1 3
+#define TMPV770X_CLK_DDRC_PHY_PLL0 4
+#define TMPV770X_CLK_DDRC_PHY_PLL1 5
+#define TMPV770X_CLK_D_PHYPLL 6
+#define TMPV770X_CLK_PHY_PCIEPLL 7
+#define TMPV770X_CLK_CA53CL0 8
+#define TMPV770X_CLK_CA53CL1 9
+#define TMPV770X_CLK_PISDMAC 10
+#define TMPV770X_CLK_PIPDMAC0 11
+#define TMPV770X_CLK_PIPDMAC1 12
+#define TMPV770X_CLK_PIWRAM 13
+#define TMPV770X_CLK_DDRC0 14
+#define TMPV770X_CLK_DDRC0_SCLK 15
+#define TMPV770X_CLK_DDRC0_NCLK 16
+#define TMPV770X_CLK_DDRC0_MCLK 17
+#define TMPV770X_CLK_DDRC0_APBCLK 18
+#define TMPV770X_CLK_DDRC1 19
+#define TMPV770X_CLK_DDRC1_SCLK 20
+#define TMPV770X_CLK_DDRC1_NCLK 21
+#define TMPV770X_CLK_DDRC1_MCLK 22
+#define TMPV770X_CLK_DDRC1_APBCLK 23
+#define TMPV770X_CLK_HOX 24
+#define TMPV770X_CLK_PCIE_MSTR 25
+#define TMPV770X_CLK_PCIE_AUX 26
+#define TMPV770X_CLK_PIINTC 27
+#define TMPV770X_CLK_PIETHER_BUS 28
+#define TMPV770X_CLK_PISPI0 29
+#define TMPV770X_CLK_PISPI1 30
+#define TMPV770X_CLK_PISPI2 31
+#define TMPV770X_CLK_PISPI3 32
+#define TMPV770X_CLK_PISPI4 33
+#define TMPV770X_CLK_PISPI5 34
+#define TMPV770X_CLK_PISPI6 35
+#define TMPV770X_CLK_PIUART0 36
+#define TMPV770X_CLK_PIUART1 37
+#define TMPV770X_CLK_PIUART2 38
+#define TMPV770X_CLK_PIUART3 39
+#define TMPV770X_CLK_PII2C0 40
+#define TMPV770X_CLK_PII2C1 41
+#define TMPV770X_CLK_PII2C2 42
+#define TMPV770X_CLK_PII2C3 43
+#define TMPV770X_CLK_PII2C4 44
+#define TMPV770X_CLK_PII2C5 45
+#define TMPV770X_CLK_PII2C6 46
+#define TMPV770X_CLK_PII2C7 47
+#define TMPV770X_CLK_PII2C8 48
+#define TMPV770X_CLK_PIGPIO 49
+#define TMPV770X_CLK_PIPGM 50
+#define TMPV770X_CLK_PIPCMIF 51
+#define TMPV770X_CLK_PIPCMIF_AUDIO_O 52
+#define TMPV770X_CLK_PIPCMIF_AUDIO_I 53
+#define TMPV770X_CLK_PICMPT0 54
+#define TMPV770X_CLK_PICMPT1 55
+#define TMPV770X_CLK_PITSC 56
+#define TMPV770X_CLK_PIUWDT 57
+#define TMPV770X_CLK_PISWDT 58
+#define TMPV770X_CLK_WDTCLK 59
+#define TMPV770X_CLK_PISUBUS_150M 60
+#define TMPV770X_CLK_PISUBUS_300M 61
+#define TMPV770X_CLK_PIPMU 62
+#define TMPV770X_CLK_PIGPMU 63
+#define TMPV770X_CLK_PITMU 64
+#define TMPV770X_CLK_WRCK 65
+#define TMPV770X_CLK_PIEMM 66
+#define TMPV770X_CLK_PIMISC 67
+#define TMPV770X_CLK_PIGCOMM 68
+#define TMPV770X_CLK_PIDCOMM 69
+#define TMPV770X_CLK_PICKMON 70
+#define TMPV770X_CLK_PIMBUS 71
+#define TMPV770X_CLK_SBUSCLK 72
+#define TMPV770X_CLK_DDR0_APBCLKCLK 73
+#define TMPV770X_CLK_DDR1_APBCLKCLK 74
+#define TMPV770X_CLK_DSP0_PBCLK 75
+#define TMPV770X_CLK_DSP1_PBCLK 76
+#define TMPV770X_CLK_DSP2_PBCLK 77
+#define TMPV770X_CLK_DSP3_PBCLK 78
+#define TMPV770X_CLK_DSVIIF0_APBCLK 79
+#define TMPV770X_CLK_VIIF0_APBCLK 80
+#define TMPV770X_CLK_VIIF0_CFGCLK 81
+#define TMPV770X_CLK_VIIF1_APBCLK 82
+#define TMPV770X_CLK_VIIF1_CFGCLK 83
+#define TMPV770X_CLK_VIIF2_APBCLK 84
+#define TMPV770X_CLK_VIIF2_CFGCLK 85
+#define TMPV770X_CLK_VIIF3_APBCLK 86
+#define TMPV770X_CLK_VIIF3_CFGCLK 87
+#define TMPV770X_CLK_VIIF4_APBCLK 88
+#define TMPV770X_CLK_VIIF4_CFGCLK 89
+#define TMPV770X_CLK_VIIF5_APBCLK 90
+#define TMPV770X_CLK_VIIF5_CFGCLK 91
+#define TMPV770X_CLK_VOIF_SBUSCLK 92
+#define TMPV770X_CLK_VOIF_PROCCLK 93
+#define TMPV770X_CLK_VOIF_DPHYCFGCLK 94
+#define TMPV770X_CLK_DNN0 95
+#define TMPV770X_CLK_STMAT 96
+#define TMPV770X_CLK_HWA0 97
+#define TMPV770X_CLK_AFFINE0 98
+#define TMPV770X_CLK_HAMAT 99
+#define TMPV770X_CLK_SMLDB 100
+#define TMPV770X_CLK_HWA0_ASYNC 101
+#define TMPV770X_CLK_HWA2 102
+#define TMPV770X_CLK_FLMAT 103
+#define TMPV770X_CLK_PYRAMID 104
+#define TMPV770X_CLK_HWA2_ASYNC 105
+#define TMPV770X_CLK_DSP0 106
+#define TMPV770X_CLK_VIIFBS0 107
+#define TMPV770X_CLK_VIIFBS0_L2ISP 108
+#define TMPV770X_CLK_VIIFBS0_L1ISP 109
+#define TMPV770X_CLK_VIIFBS0_PROC 110
+#define TMPV770X_CLK_VIIFBS1 111
+#define TMPV770X_CLK_VIIFBS2 112
+#define TMPV770X_CLK_VIIFOP_MBUS 113
+#define TMPV770X_CLK_VIIFOP0_PROC 114
+#define TMPV770X_CLK_PIETHER_2P5M 115
+#define TMPV770X_CLK_PIETHER_25M 116
+#define TMPV770X_CLK_PIETHER_50M 117
+#define TMPV770X_CLK_PIETHER_125M 118
+#define TMPV770X_CLK_VOIF0_DPHYCFG 119
+#define TMPV770X_CLK_VOIF0_PROC 120
+#define TMPV770X_CLK_VOIF0_SBUS 121
+#define TMPV770X_CLK_VOIF0_DSIREF 122
+#define TMPV770X_CLK_VOIF0_PIXEL 123
+#define TMPV770X_CLK_PIREFCLK 124
+#define TMPV770X_CLK_SBUS 125
+#define TMPV770X_CLK_BUSLCK 126
+#define TMPV770X_NR_CLK 127
+
+/* Reset */
+#define TMPV770X_RESET_PIETHER_2P5M 0
+#define TMPV770X_RESET_PIETHER_25M 1
+#define TMPV770X_RESET_PIETHER_50M 2
+#define TMPV770X_RESET_PIETHER_125M 3
+#define TMPV770X_RESET_HOX 4
+#define TMPV770X_RESET_PCIE_MSTR 5
+#define TMPV770X_RESET_PCIE_AUX 6
+#define TMPV770X_RESET_PIINTC 7
+#define TMPV770X_RESET_PIETHER_BUS 8
+#define TMPV770X_RESET_PISPI0 9
+#define TMPV770X_RESET_PISPI1 10
+#define TMPV770X_RESET_PISPI2 11
+#define TMPV770X_RESET_PISPI3 12
+#define TMPV770X_RESET_PISPI4 13
+#define TMPV770X_RESET_PISPI5 14
+#define TMPV770X_RESET_PISPI6 15
+#define TMPV770X_RESET_PIUART0 16
+#define TMPV770X_RESET_PIUART1 17
+#define TMPV770X_RESET_PIUART2 18
+#define TMPV770X_RESET_PIUART3 19
+#define TMPV770X_RESET_PII2C0 20
+#define TMPV770X_RESET_PII2C1 21
+#define TMPV770X_RESET_PII2C2 22
+#define TMPV770X_RESET_PII2C3 23
+#define TMPV770X_RESET_PII2C4 24
+#define TMPV770X_RESET_PII2C5 25
+#define TMPV770X_RESET_PII2C6 26
+#define TMPV770X_RESET_PII2C7 27
+#define TMPV770X_RESET_PII2C8 28
+#define TMPV770X_RESET_PIPCMIF 29
+#define TMPV770X_RESET_PICKMON 30
+#define TMPV770X_RESET_SBUSCLK 31
+#define TMPV770X_NR_RESET 32
+
+#endif /*_DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/clk/versaclock.h b/include/dt-bindings/clock/versaclock.h
index c6a6a0946564..c6a6a0946564 100644
--- a/include/dt-bindings/clk/versaclock.h
+++ b/include/dt-bindings/clock/versaclock.h
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 0f2d60e884dc..373644e46747 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -196,6 +196,7 @@
#define VF610_CLK_TCON0 187
#define VF610_CLK_TCON1 188
#define VF610_CLK_CAAM 189
-#define VF610_CLK_END 190
+#define VF610_CLK_CRC 190
+#define VF610_CLK_END 191
#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/clock/xlnx-vcu.h b/include/dt-bindings/clock/xlnx-vcu.h
new file mode 100644
index 000000000000..1ed76b9563b6
--- /dev/null
+++ b/include/dt-bindings/clock/xlnx-vcu.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_XLNX_VCU_H
+#define _DT_BINDINGS_CLOCK_XLNX_VCU_H
+
+#define CLK_XVCU_ENC_CORE 0
+#define CLK_XVCU_ENC_MCU 1
+#define CLK_XVCU_DEC_CORE 2
+#define CLK_XVCU_DEC_MCU 3
+#define CLK_XVCU_NUM_CLOCKS 4
+
+#endif /* _DT_BINDINGS_CLOCK_XLNX_VCU_H */
diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h
deleted file mode 100644
index e04126111aae..000000000000
--- a/include/dt-bindings/clock/zx296702-clock.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#ifndef __DT_BINDINGS_CLOCK_ZX296702_H
-#define __DT_BINDINGS_CLOCK_ZX296702_H
-
-#define ZX296702_OSC 0
-#define ZX296702_PLL_A9 1
-#define ZX296702_PLL_A9_350M 2
-#define ZX296702_PLL_MAC_1000M 3
-#define ZX296702_PLL_MAC_333M 4
-#define ZX296702_PLL_MM0_1188M 5
-#define ZX296702_PLL_MM0_396M 6
-#define ZX296702_PLL_MM0_198M 7
-#define ZX296702_PLL_MM1_108M 8
-#define ZX296702_PLL_MM1_72M 9
-#define ZX296702_PLL_MM1_54M 10
-#define ZX296702_PLL_LSP_104M 11
-#define ZX296702_PLL_LSP_26M 12
-#define ZX296702_PLL_AUDIO_294M912 13
-#define ZX296702_PLL_DDR_266M 14
-#define ZX296702_CLK_148M5 15
-#define ZX296702_MATRIX_ACLK 16
-#define ZX296702_MAIN_HCLK 17
-#define ZX296702_MAIN_PCLK 18
-#define ZX296702_CLK_500 19
-#define ZX296702_CLK_250 20
-#define ZX296702_CLK_125 21
-#define ZX296702_CLK_74M25 22
-#define ZX296702_A9_WCLK 23
-#define ZX296702_A9_AS1_ACLK_MUX 24
-#define ZX296702_A9_TRACE_CLKIN_MUX 25
-#define ZX296702_A9_AS1_ACLK_DIV 26
-#define ZX296702_CLK_2 27
-#define ZX296702_CLK_27 28
-#define ZX296702_DECPPU_ACLK_MUX 29
-#define ZX296702_PPU_ACLK_MUX 30
-#define ZX296702_MALI400_ACLK_MUX 31
-#define ZX296702_VOU_ACLK_MUX 32
-#define ZX296702_VOU_MAIN_WCLK_MUX 33
-#define ZX296702_VOU_AUX_WCLK_MUX 34
-#define ZX296702_VOU_SCALER_WCLK_MUX 35
-#define ZX296702_R2D_ACLK_MUX 36
-#define ZX296702_R2D_WCLK_MUX 37
-#define ZX296702_CLK_50 38
-#define ZX296702_CLK_25 39
-#define ZX296702_CLK_12 40
-#define ZX296702_CLK_16M384 41
-#define ZX296702_CLK_32K768 42
-#define ZX296702_SEC_WCLK_DIV 43
-#define ZX296702_DDR_WCLK_MUX 44
-#define ZX296702_NAND_WCLK_MUX 45
-#define ZX296702_LSP_26_WCLK_MUX 46
-#define ZX296702_A9_AS0_ACLK 47
-#define ZX296702_A9_AS1_ACLK 48
-#define ZX296702_A9_TRACE_CLKIN 49
-#define ZX296702_DECPPU_AXI_M_ACLK 50
-#define ZX296702_DECPPU_AHB_S_HCLK 51
-#define ZX296702_PPU_AXI_M_ACLK 52
-#define ZX296702_PPU_AHB_S_HCLK 53
-#define ZX296702_VOU_AXI_M_ACLK 54
-#define ZX296702_VOU_APB_PCLK 55
-#define ZX296702_VOU_MAIN_CHANNEL_WCLK 56
-#define ZX296702_VOU_AUX_CHANNEL_WCLK 57
-#define ZX296702_VOU_HDMI_OSCLK_CEC 58
-#define ZX296702_VOU_SCALER_WCLK 59
-#define ZX296702_MALI400_AXI_M_ACLK 60
-#define ZX296702_MALI400_APB_PCLK 61
-#define ZX296702_R2D_WCLK 62
-#define ZX296702_R2D_AXI_M_ACLK 63
-#define ZX296702_R2D_AHB_HCLK 64
-#define ZX296702_DDR3_AXI_S0_ACLK 65
-#define ZX296702_DDR3_APB_PCLK 66
-#define ZX296702_DDR3_WCLK 67
-#define ZX296702_USB20_0_AHB_HCLK 68
-#define ZX296702_USB20_0_EXTREFCLK 69
-#define ZX296702_USB20_1_AHB_HCLK 70
-#define ZX296702_USB20_1_EXTREFCLK 71
-#define ZX296702_USB20_2_AHB_HCLK 72
-#define ZX296702_USB20_2_EXTREFCLK 73
-#define ZX296702_GMAC_AXI_M_ACLK 74
-#define ZX296702_GMAC_APB_PCLK 75
-#define ZX296702_GMAC_125_CLKIN 76
-#define ZX296702_GMAC_RMII_CLKIN 77
-#define ZX296702_GMAC_25M_CLK 78
-#define ZX296702_NANDFLASH_AHB_HCLK 79
-#define ZX296702_NANDFLASH_WCLK 80
-#define ZX296702_LSP0_APB_PCLK 81
-#define ZX296702_LSP0_AHB_HCLK 82
-#define ZX296702_LSP0_26M_WCLK 83
-#define ZX296702_LSP0_104M_WCLK 84
-#define ZX296702_LSP0_16M384_WCLK 85
-#define ZX296702_LSP1_APB_PCLK 86
-#define ZX296702_LSP1_26M_WCLK 87
-#define ZX296702_LSP1_104M_WCLK 88
-#define ZX296702_LSP1_32K_CLK 89
-#define ZX296702_AON_HCLK 90
-#define ZX296702_SYS_CTRL_PCLK 91
-#define ZX296702_DMA_PCLK 92
-#define ZX296702_DMA_ACLK 93
-#define ZX296702_SEC_HCLK 94
-#define ZX296702_AES_WCLK 95
-#define ZX296702_DES_WCLK 96
-#define ZX296702_IRAM_ACLK 97
-#define ZX296702_IROM_ACLK 98
-#define ZX296702_BOOT_CTRL_HCLK 99
-#define ZX296702_EFUSE_CLK_30 100
-#define ZX296702_VOU_MAIN_CHANNEL_DIV 101
-#define ZX296702_VOU_AUX_CHANNEL_DIV 102
-#define ZX296702_VOU_TV_ENC_HD_DIV 103
-#define ZX296702_VOU_TV_ENC_SD_DIV 104
-#define ZX296702_VL0_MUX 105
-#define ZX296702_VL1_MUX 106
-#define ZX296702_VL2_MUX 107
-#define ZX296702_GL0_MUX 108
-#define ZX296702_GL1_MUX 109
-#define ZX296702_GL2_MUX 110
-#define ZX296702_WB_MUX 111
-#define ZX296702_HDMI_MUX 112
-#define ZX296702_VOU_TV_ENC_HD_MUX 113
-#define ZX296702_VOU_TV_ENC_SD_MUX 114
-#define ZX296702_VL0_CLK 115
-#define ZX296702_VL1_CLK 116
-#define ZX296702_VL2_CLK 117
-#define ZX296702_GL0_CLK 118
-#define ZX296702_GL1_CLK 119
-#define ZX296702_GL2_CLK 120
-#define ZX296702_WB_CLK 121
-#define ZX296702_CL_CLK 122
-#define ZX296702_MAIN_MIX_CLK 123
-#define ZX296702_AUX_MIX_CLK 124
-#define ZX296702_HDMI_CLK 125
-#define ZX296702_VOU_TV_ENC_HD_DAC_CLK 126
-#define ZX296702_VOU_TV_ENC_SD_DAC_CLK 127
-#define ZX296702_A9_PERIPHCLK 128
-#define ZX296702_TOPCLK_END 129
-
-#define ZX296702_SDMMC1_WCLK_MUX 0
-#define ZX296702_SDMMC1_WCLK_DIV 1
-#define ZX296702_SDMMC1_WCLK 2
-#define ZX296702_SDMMC1_PCLK 3
-#define ZX296702_SPDIF0_WCLK_MUX 4
-#define ZX296702_SPDIF0_WCLK 5
-#define ZX296702_SPDIF0_PCLK 6
-#define ZX296702_SPDIF0_DIV 7
-#define ZX296702_I2S0_WCLK_MUX 8
-#define ZX296702_I2S0_WCLK 9
-#define ZX296702_I2S0_PCLK 10
-#define ZX296702_I2S0_DIV 11
-#define ZX296702_I2S1_WCLK_MUX 12
-#define ZX296702_I2S1_WCLK 13
-#define ZX296702_I2S1_PCLK 14
-#define ZX296702_I2S1_DIV 15
-#define ZX296702_I2S2_WCLK_MUX 16
-#define ZX296702_I2S2_WCLK 17
-#define ZX296702_I2S2_PCLK 18
-#define ZX296702_I2S2_DIV 19
-#define ZX296702_GPIO_CLK 20
-#define ZX296702_LSP0CLK_END 21
-
-#define ZX296702_UART0_WCLK_MUX 0
-#define ZX296702_UART0_WCLK 1
-#define ZX296702_UART0_PCLK 2
-#define ZX296702_UART1_WCLK_MUX 3
-#define ZX296702_UART1_WCLK 4
-#define ZX296702_UART1_PCLK 5
-#define ZX296702_SDMMC0_WCLK_MUX 6
-#define ZX296702_SDMMC0_WCLK_DIV 7
-#define ZX296702_SDMMC0_WCLK 8
-#define ZX296702_SDMMC0_PCLK 9
-#define ZX296702_SPDIF1_WCLK_MUX 10
-#define ZX296702_SPDIF1_WCLK 11
-#define ZX296702_SPDIF1_PCLK 12
-#define ZX296702_SPDIF1_DIV 13
-#define ZX296702_LSP1CLK_END 14
-
-#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */
diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h
deleted file mode 100644
index bf2ff6d2ee23..000000000000
--- a/include/dt-bindings/clock/zx296718-clock.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2015 - 2016 ZTE Corporation.
- */
-#ifndef __DT_BINDINGS_CLOCK_ZX296718_H
-#define __DT_BINDINGS_CLOCK_ZX296718_H
-
-/* PLL */
-#define ZX296718_PLL_CPU 1
-#define ZX296718_PLL_MAC 2
-#define ZX296718_PLL_MM0 3
-#define ZX296718_PLL_MM1 4
-#define ZX296718_PLL_VGA 5
-#define ZX296718_PLL_DDR 6
-#define ZX296718_PLL_AUDIO 7
-#define ZX296718_PLL_HSIC 8
-#define CPU_DBG_GATE 9
-#define A72_GATE 10
-#define CPU_PERI_GATE 11
-#define A53_GATE 12
-#define DDR1_GATE 13
-#define DDR0_GATE 14
-#define SD1_WCLK 15
-#define SD1_AHB 16
-#define SD0_WCLK 17
-#define SD0_AHB 18
-#define EMMC_WCLK 19
-#define EMMC_NAND_AXI 20
-#define NAND_WCLK 21
-#define EMMC_NAND_AHB 22
-#define LSP1_148M5 23
-#define LSP1_99M 24
-#define LSP1_24M 25
-#define LSP0_74M25 26
-#define LSP0_32K 27
-#define LSP0_148M5 28
-#define LSP0_99M 29
-#define LSP0_24M 30
-#define DEMUX_AXI 31
-#define DEMUX_APB 32
-#define DEMUX_148M5 33
-#define DEMUX_108M 34
-#define AUDIO_APB 35
-#define AUDIO_99M 36
-#define AUDIO_24M 37
-#define AUDIO_16M384 38
-#define AUDIO_32K 39
-#define WDT_WCLK 40
-#define TIMER_WCLK 41
-#define VDE_ACLK 42
-#define VCE_ACLK 43
-#define HDE_ACLK 44
-#define GPU_ACLK 45
-#define SAPPU_ACLK 46
-#define SAPPU_WCLK 47
-#define VOU_ACLK 48
-#define VOU_MAIN_WCLK 49
-#define VOU_AUX_WCLK 50
-#define VOU_PPU_WCLK 51
-#define MIPI_CFG_CLK 52
-#define VGA_I2C_WCLK 53
-#define MIPI_REF_CLK 54
-#define HDMI_OSC_CEC 55
-#define HDMI_OSC_CLK 56
-#define HDMI_XCLK 57
-#define VIU_M0_ACLK 58
-#define VIU_M1_ACLK 59
-#define VIU_WCLK 60
-#define VIU_JPEG_WCLK 61
-#define VIU_CFG_CLK 62
-#define TS_SYS_WCLK 63
-#define TS_SYS_108M 64
-#define USB20_HCLK 65
-#define USB20_PHY_CLK 66
-#define USB21_HCLK 67
-#define USB21_PHY_CLK 68
-#define GMAC_RMIICLK 69
-#define GMAC_PCLK 70
-#define GMAC_ACLK 71
-#define GMAC_RFCLK 72
-#define TEMPSENSOR_GATE 73
-
-#define TOP_NR_CLKS 74
-
-
-#define LSP0_TIMER3_PCLK 1
-#define LSP0_TIMER3_WCLK 2
-#define LSP0_TIMER4_PCLK 3
-#define LSP0_TIMER4_WCLK 4
-#define LSP0_TIMER5_PCLK 5
-#define LSP0_TIMER5_WCLK 6
-#define LSP0_UART3_PCLK 7
-#define LSP0_UART3_WCLK 8
-#define LSP0_UART1_PCLK 9
-#define LSP0_UART1_WCLK 10
-#define LSP0_UART2_PCLK 11
-#define LSP0_UART2_WCLK 12
-#define LSP0_SPIFC0_PCLK 13
-#define LSP0_SPIFC0_WCLK 14
-#define LSP0_I2C4_PCLK 15
-#define LSP0_I2C4_WCLK 16
-#define LSP0_I2C5_PCLK 17
-#define LSP0_I2C5_WCLK 18
-#define LSP0_SSP0_PCLK 19
-#define LSP0_SSP0_WCLK 20
-#define LSP0_SSP1_PCLK 21
-#define LSP0_SSP1_WCLK 22
-#define LSP0_USIM_PCLK 23
-#define LSP0_USIM_WCLK 24
-#define LSP0_GPIO_PCLK 25
-#define LSP0_GPIO_WCLK 26
-#define LSP0_I2C3_PCLK 27
-#define LSP0_I2C3_WCLK 28
-
-#define LSP0_NR_CLKS 29
-
-
-#define LSP1_UART4_PCLK 1
-#define LSP1_UART4_WCLK 2
-#define LSP1_UART5_PCLK 3
-#define LSP1_UART5_WCLK 4
-#define LSP1_PWM_PCLK 5
-#define LSP1_PWM_WCLK 6
-#define LSP1_I2C2_PCLK 7
-#define LSP1_I2C2_WCLK 8
-#define LSP1_SSP2_PCLK 9
-#define LSP1_SSP2_WCLK 10
-#define LSP1_SSP3_PCLK 11
-#define LSP1_SSP3_WCLK 12
-#define LSP1_SSP4_PCLK 13
-#define LSP1_SSP4_WCLK 14
-#define LSP1_USIM1_PCLK 15
-#define LSP1_USIM1_WCLK 16
-
-#define LSP1_NR_CLKS 17
-
-
-#define AUDIO_I2S0_WCLK 1
-#define AUDIO_I2S0_PCLK 2
-#define AUDIO_I2S1_WCLK 3
-#define AUDIO_I2S1_PCLK 4
-#define AUDIO_I2S2_WCLK 5
-#define AUDIO_I2S2_PCLK 6
-#define AUDIO_I2S3_WCLK 7
-#define AUDIO_I2S3_PCLK 8
-#define AUDIO_I2C0_WCLK 9
-#define AUDIO_I2C0_PCLK 10
-#define AUDIO_SPDIF0_WCLK 11
-#define AUDIO_SPDIF0_PCLK 12
-#define AUDIO_SPDIF1_WCLK 13
-#define AUDIO_SPDIF1_PCLK 14
-#define AUDIO_TIMER_WCLK 15
-#define AUDIO_TIMER_PCLK 16
-#define AUDIO_TDM_WCLK 17
-#define AUDIO_TDM_PCLK 18
-#define AUDIO_TS_PCLK 19
-#define I2S0_WCLK_MUX 20
-#define I2S1_WCLK_MUX 21
-#define I2S2_WCLK_MUX 22
-#define I2S3_WCLK_MUX 23
-
-#define AUDIO_NR_CLKS 24
-
-#endif
diff --git a/include/dt-bindings/display/sdtv-standards.h b/include/dt-bindings/display/sdtv-standards.h
index fbc1a3db2ea7..8249a2b47b79 100644
--- a/include/dt-bindings/display/sdtv-standards.h
+++ b/include/dt-bindings/display/sdtv-standards.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only or X11 */
+/* SPDX-License-Identifier: GPL-2.0-only OR X11 */
/*
* Copyright 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
*/
diff --git a/include/dt-bindings/dma/fsl-edma.h b/include/dt-bindings/dma/fsl-edma.h
new file mode 100644
index 000000000000..fd11478cfe9c
--- /dev/null
+++ b/include/dt-bindings/dma/fsl-edma.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef _FSL_EDMA_DT_BINDING_H_
+#define _FSL_EDMA_DT_BINDING_H_
+
+/* Receive Channel */
+#define FSL_EDMA_RX 0x1
+
+/* iMX8 audio remote DMA */
+#define FSL_EDMA_REMOTE 0x2
+
+/* FIFO is continue memory region */
+#define FSL_EDMA_MULTI_FIFO 0x4
+
+/* Channel need stick to even channel */
+#define FSL_EDMA_EVEN_CH 0x8
+
+/* Channel need stick to odd channel */
+#define FSL_EDMA_ODD_CH 0x10
+
+#endif
diff --git a/include/dt-bindings/dma/jz4775-dma.h b/include/dt-bindings/dma/jz4775-dma.h
new file mode 100644
index 000000000000..8d27e2c69dca
--- /dev/null
+++ b/include/dt-bindings/dma/jz4775-dma.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for JZ4775 DMA bindings.
+ *
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_JZ4775_DMA_H__
+#define __DT_BINDINGS_DMA_JZ4775_DMA_H__
+
+/*
+ * Request type numbers for the JZ4775 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define JZ4775_DMA_I2S0_TX 0x6
+#define JZ4775_DMA_I2S0_RX 0x7
+#define JZ4775_DMA_AUTO 0x8
+#define JZ4775_DMA_SADC_RX 0x9
+#define JZ4775_DMA_UART3_TX 0x0e
+#define JZ4775_DMA_UART3_RX 0x0f
+#define JZ4775_DMA_UART2_TX 0x10
+#define JZ4775_DMA_UART2_RX 0x11
+#define JZ4775_DMA_UART1_TX 0x12
+#define JZ4775_DMA_UART1_RX 0x13
+#define JZ4775_DMA_UART0_TX 0x14
+#define JZ4775_DMA_UART0_RX 0x15
+#define JZ4775_DMA_SSI0_TX 0x16
+#define JZ4775_DMA_SSI0_RX 0x17
+#define JZ4775_DMA_MSC0_TX 0x1a
+#define JZ4775_DMA_MSC0_RX 0x1b
+#define JZ4775_DMA_MSC1_TX 0x1c
+#define JZ4775_DMA_MSC1_RX 0x1d
+#define JZ4775_DMA_MSC2_TX 0x1e
+#define JZ4775_DMA_MSC2_RX 0x1f
+#define JZ4775_DMA_PCM0_TX 0x20
+#define JZ4775_DMA_PCM0_RX 0x21
+#define JZ4775_DMA_SMB0_TX 0x24
+#define JZ4775_DMA_SMB0_RX 0x25
+#define JZ4775_DMA_SMB1_TX 0x26
+#define JZ4775_DMA_SMB1_RX 0x27
+#define JZ4775_DMA_SMB2_TX 0x28
+#define JZ4775_DMA_SMB2_RX 0x29
+
+#endif /* __DT_BINDINGS_DMA_JZ4775_DMA_H__ */
diff --git a/include/dt-bindings/dma/qcom-gpi.h b/include/dt-bindings/dma/qcom-gpi.h
new file mode 100644
index 000000000000..ebda2a37f52a
--- /dev/null
+++ b/include/dt-bindings/dma/qcom-gpi.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/* Copyright (c) 2020, Linaro Ltd. */
+
+#ifndef __DT_BINDINGS_DMA_QCOM_GPI_H__
+#define __DT_BINDINGS_DMA_QCOM_GPI_H__
+
+#define QCOM_GPI_SPI 1
+#define QCOM_GPI_UART 2
+#define QCOM_GPI_I2C 3
+
+#endif /* __DT_BINDINGS_DMA_QCOM_GPI_H__ */
diff --git a/include/dt-bindings/dma/x2000-dma.h b/include/dt-bindings/dma/x2000-dma.h
new file mode 100644
index 000000000000..db2cd4830b00
--- /dev/null
+++ b/include/dt-bindings/dma/x2000-dma.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for X2000 DMA bindings.
+ *
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_X2000_DMA_H__
+#define __DT_BINDINGS_DMA_X2000_DMA_H__
+
+/*
+ * Request type numbers for the X2000 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define X2000_DMA_AUTO 0x8
+#define X2000_DMA_UART5_TX 0xa
+#define X2000_DMA_UART5_RX 0xb
+#define X2000_DMA_UART4_TX 0xc
+#define X2000_DMA_UART4_RX 0xd
+#define X2000_DMA_UART3_TX 0xe
+#define X2000_DMA_UART3_RX 0xf
+#define X2000_DMA_UART2_TX 0x10
+#define X2000_DMA_UART2_RX 0x11
+#define X2000_DMA_UART1_TX 0x12
+#define X2000_DMA_UART1_RX 0x13
+#define X2000_DMA_UART0_TX 0x14
+#define X2000_DMA_UART0_RX 0x15
+#define X2000_DMA_SSI0_TX 0x16
+#define X2000_DMA_SSI0_RX 0x17
+#define X2000_DMA_SSI1_TX 0x18
+#define X2000_DMA_SSI1_RX 0x19
+#define X2000_DMA_I2C0_TX 0x24
+#define X2000_DMA_I2C0_RX 0x25
+#define X2000_DMA_I2C1_TX 0x26
+#define X2000_DMA_I2C1_RX 0x27
+#define X2000_DMA_I2C2_TX 0x28
+#define X2000_DMA_I2C2_RX 0x29
+#define X2000_DMA_I2C3_TX 0x2a
+#define X2000_DMA_I2C3_RX 0x2b
+#define X2000_DMA_I2C4_TX 0x2c
+#define X2000_DMA_I2C4_RX 0x2d
+#define X2000_DMA_I2C5_TX 0x2e
+#define X2000_DMA_I2C5_RX 0x2f
+#define X2000_DMA_UART6_TX 0x30
+#define X2000_DMA_UART6_RX 0x31
+#define X2000_DMA_UART7_TX 0x32
+#define X2000_DMA_UART7_RX 0x33
+#define X2000_DMA_UART8_TX 0x34
+#define X2000_DMA_UART8_RX 0x35
+#define X2000_DMA_UART9_TX 0x36
+#define X2000_DMA_UART9_RX 0x37
+#define X2000_DMA_SADC_RX 0x38
+
+#endif /* __DT_BINDINGS_DMA_X2000_DMA_H__ */
diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h
index 54278d5c1856..1a8c025d77b8 100644
--- a/include/dt-bindings/firmware/imx/rsrc.h
+++ b/include/dt-bindings/firmware/imx/rsrc.h
@@ -13,34 +13,38 @@
* never be changed or removed (only added to at the end of the list).
*/
-#define IMX_SC_R_A53 0
-#define IMX_SC_R_A53_0 1
-#define IMX_SC_R_A53_1 2
-#define IMX_SC_R_A53_2 3
-#define IMX_SC_R_A53_3 4
-#define IMX_SC_R_A72 5
-#define IMX_SC_R_A72_0 6
-#define IMX_SC_R_A72_1 7
-#define IMX_SC_R_A72_2 8
-#define IMX_SC_R_A72_3 9
+#define IMX_SC_R_AP_0 0
+#define IMX_SC_R_AP_0_0 1
+#define IMX_SC_R_AP_0_1 2
+#define IMX_SC_R_AP_0_2 3
+#define IMX_SC_R_AP_0_3 4
+#define IMX_SC_R_AP_1 5
+#define IMX_SC_R_AP_1_0 6
+#define IMX_SC_R_AP_1_1 7
+#define IMX_SC_R_AP_1_2 8
+#define IMX_SC_R_AP_1_3 9
#define IMX_SC_R_CCI 10
#define IMX_SC_R_DB 11
#define IMX_SC_R_DRC_0 12
#define IMX_SC_R_DRC_1 13
#define IMX_SC_R_GIC_SMMU 14
-#define IMX_SC_R_IRQSTR_M4_0 15
-#define IMX_SC_R_IRQSTR_M4_1 16
-#define IMX_SC_R_SMMU 17
-#define IMX_SC_R_GIC 18
+#define IMX_SC_R_IRQSTR_MCU_0 15
+#define IMX_SC_R_IRQSTR_MCU_1 16
+#define IMX_SC_R_SMMU_0 17
+#define IMX_SC_R_GIC_0 18
#define IMX_SC_R_DC_0_BLIT0 19
#define IMX_SC_R_DC_0_BLIT1 20
#define IMX_SC_R_DC_0_BLIT2 21
#define IMX_SC_R_DC_0_BLIT_OUT 22
-#define IMX_SC_R_PERF 23
+#define IMX_SC_R_PERF_0 23
+#define IMX_SC_R_USB_1_PHY 24
#define IMX_SC_R_DC_0_WARP 25
+#define IMX_SC_R_V2X_MU_0 26
+#define IMX_SC_R_V2X_MU_1 27
#define IMX_SC_R_DC_0_VIDEO0 28
#define IMX_SC_R_DC_0_VIDEO1 29
#define IMX_SC_R_DC_0_FRAC0 30
+#define IMX_SC_R_V2X_MU_2 31
#define IMX_SC_R_DC_0 32
#define IMX_SC_R_GPU_2_PID0 33
#define IMX_SC_R_DC_0_PLL_0 34
@@ -49,11 +53,17 @@
#define IMX_SC_R_DC_1_BLIT1 37
#define IMX_SC_R_DC_1_BLIT2 38
#define IMX_SC_R_DC_1_BLIT_OUT 39
+#define IMX_SC_R_V2X_MU_3 40
+#define IMX_SC_R_V2X_MU_4 41
#define IMX_SC_R_DC_1_WARP 42
+#define IMX_SC_R_STM 43
+#define IMX_SC_R_SECVIO 44
#define IMX_SC_R_DC_1_VIDEO0 45
#define IMX_SC_R_DC_1_VIDEO1 46
#define IMX_SC_R_DC_1_FRAC0 47
+#define IMX_SC_R_V2X 48
#define IMX_SC_R_DC_1 49
+#define IMX_SC_R_UNUSED14 50
#define IMX_SC_R_DC_1_PLL_0 51
#define IMX_SC_R_DC_1_PLL_1 52
#define IMX_SC_R_SPI_0 53
@@ -111,6 +121,7 @@
#define IMX_SC_R_CAN_0 105
#define IMX_SC_R_CAN_1 106
#define IMX_SC_R_CAN_2 107
+#define IMX_SC_R_CAN(x) (IMX_SC_R_CAN_0 + (x))
#define IMX_SC_R_DMA_1_CH0 108
#define IMX_SC_R_DMA_1_CH1 109
#define IMX_SC_R_DMA_1_CH2 110
@@ -143,10 +154,10 @@
#define IMX_SC_R_DMA_1_CH29 137
#define IMX_SC_R_DMA_1_CH30 138
#define IMX_SC_R_DMA_1_CH31 139
-#define IMX_SC_R_UNUSED1 140
-#define IMX_SC_R_UNUSED2 141
-#define IMX_SC_R_UNUSED3 142
-#define IMX_SC_R_UNUSED4 143
+#define IMX_SC_R_V2X_PID0 140
+#define IMX_SC_R_V2X_PID1 141
+#define IMX_SC_R_V2X_PID2 142
+#define IMX_SC_R_V2X_PID3 143
#define IMX_SC_R_GPU_0_PID0 144
#define IMX_SC_R_GPU_0_PID1 145
#define IMX_SC_R_GPU_0_PID2 146
@@ -175,7 +186,7 @@
#define IMX_SC_R_PCIE_B 169
#define IMX_SC_R_SATA_0 170
#define IMX_SC_R_SERDES_1 171
-#define IMX_SC_R_HSIO_GPIO 172
+#define IMX_SC_R_HSIO_GPIO_0 172
#define IMX_SC_R_MATCH_15 173
#define IMX_SC_R_MATCH_16 174
#define IMX_SC_R_MATCH_17 175
@@ -242,15 +253,15 @@
#define IMX_SC_R_ROM_0 236
#define IMX_SC_R_FSPI_0 237
#define IMX_SC_R_FSPI_1 238
-#define IMX_SC_R_IEE 239
-#define IMX_SC_R_IEE_R0 240
-#define IMX_SC_R_IEE_R1 241
-#define IMX_SC_R_IEE_R2 242
-#define IMX_SC_R_IEE_R3 243
-#define IMX_SC_R_IEE_R4 244
-#define IMX_SC_R_IEE_R5 245
-#define IMX_SC_R_IEE_R6 246
-#define IMX_SC_R_IEE_R7 247
+#define IMX_SC_R_IEE_0 239
+#define IMX_SC_R_IEE_0_R0 240
+#define IMX_SC_R_IEE_0_R1 241
+#define IMX_SC_R_IEE_0_R2 242
+#define IMX_SC_R_IEE_0_R3 243
+#define IMX_SC_R_IEE_0_R4 244
+#define IMX_SC_R_IEE_0_R5 245
+#define IMX_SC_R_IEE_0_R6 246
+#define IMX_SC_R_IEE_0_R7 247
#define IMX_SC_R_SDHC_0 248
#define IMX_SC_R_SDHC_1 249
#define IMX_SC_R_SDHC_2 250
@@ -281,46 +292,50 @@
#define IMX_SC_R_LVDS_2_PWM_0 275
#define IMX_SC_R_LVDS_2_I2C_0 276
#define IMX_SC_R_LVDS_2_I2C_1 277
-#define IMX_SC_R_M4_0_PID0 278
-#define IMX_SC_R_M4_0_PID1 279
-#define IMX_SC_R_M4_0_PID2 280
-#define IMX_SC_R_M4_0_PID3 281
-#define IMX_SC_R_M4_0_PID4 282
-#define IMX_SC_R_M4_0_RGPIO 283
-#define IMX_SC_R_M4_0_SEMA42 284
-#define IMX_SC_R_M4_0_TPM 285
-#define IMX_SC_R_M4_0_PIT 286
-#define IMX_SC_R_M4_0_UART 287
-#define IMX_SC_R_M4_0_I2C 288
-#define IMX_SC_R_M4_0_INTMUX 289
-#define IMX_SC_R_M4_0_MU_0B 292
-#define IMX_SC_R_M4_0_MU_0A0 293
-#define IMX_SC_R_M4_0_MU_0A1 294
-#define IMX_SC_R_M4_0_MU_0A2 295
-#define IMX_SC_R_M4_0_MU_0A3 296
-#define IMX_SC_R_M4_0_MU_1A 297
-#define IMX_SC_R_M4_1_PID0 298
-#define IMX_SC_R_M4_1_PID1 299
-#define IMX_SC_R_M4_1_PID2 300
-#define IMX_SC_R_M4_1_PID3 301
-#define IMX_SC_R_M4_1_PID4 302
-#define IMX_SC_R_M4_1_RGPIO 303
-#define IMX_SC_R_M4_1_SEMA42 304
-#define IMX_SC_R_M4_1_TPM 305
-#define IMX_SC_R_M4_1_PIT 306
-#define IMX_SC_R_M4_1_UART 307
-#define IMX_SC_R_M4_1_I2C 308
-#define IMX_SC_R_M4_1_INTMUX 309
-#define IMX_SC_R_M4_1_MU_0B 312
-#define IMX_SC_R_M4_1_MU_0A0 313
-#define IMX_SC_R_M4_1_MU_0A1 314
-#define IMX_SC_R_M4_1_MU_0A2 315
-#define IMX_SC_R_M4_1_MU_0A3 316
-#define IMX_SC_R_M4_1_MU_1A 317
+#define IMX_SC_R_MCU_0_PID0 278
+#define IMX_SC_R_MCU_0_PID1 279
+#define IMX_SC_R_MCU_0_PID2 280
+#define IMX_SC_R_MCU_0_PID3 281
+#define IMX_SC_R_MCU_0_PID4 282
+#define IMX_SC_R_MCU_0_RGPIO 283
+#define IMX_SC_R_MCU_0_SEMA42 284
+#define IMX_SC_R_MCU_0_TPM 285
+#define IMX_SC_R_MCU_0_PIT 286
+#define IMX_SC_R_MCU_0_UART 287
+#define IMX_SC_R_MCU_0_I2C 288
+#define IMX_SC_R_MCU_0_INTMUX 289
+#define IMX_SC_R_ENET_0_A0 290
+#define IMX_SC_R_ENET_0_A1 291
+#define IMX_SC_R_MCU_0_MU_0B 292
+#define IMX_SC_R_MCU_0_MU_0A0 293
+#define IMX_SC_R_MCU_0_MU_0A1 294
+#define IMX_SC_R_MCU_0_MU_0A2 295
+#define IMX_SC_R_MCU_0_MU_0A3 296
+#define IMX_SC_R_MCU_0_MU_1A 297
+#define IMX_SC_R_MCU_1_PID0 298
+#define IMX_SC_R_MCU_1_PID1 299
+#define IMX_SC_R_MCU_1_PID2 300
+#define IMX_SC_R_MCU_1_PID3 301
+#define IMX_SC_R_MCU_1_PID4 302
+#define IMX_SC_R_MCU_1_RGPIO 303
+#define IMX_SC_R_MCU_1_SEMA42 304
+#define IMX_SC_R_MCU_1_TPM 305
+#define IMX_SC_R_MCU_1_PIT 306
+#define IMX_SC_R_MCU_1_UART 307
+#define IMX_SC_R_MCU_1_I2C 308
+#define IMX_SC_R_MCU_1_INTMUX 309
+#define IMX_SC_R_UNUSED17 310
+#define IMX_SC_R_UNUSED18 311
+#define IMX_SC_R_MCU_1_MU_0B 312
+#define IMX_SC_R_MCU_1_MU_0A0 313
+#define IMX_SC_R_MCU_1_MU_0A1 314
+#define IMX_SC_R_MCU_1_MU_0A2 315
+#define IMX_SC_R_MCU_1_MU_0A3 316
+#define IMX_SC_R_MCU_1_MU_1A 317
#define IMX_SC_R_SAI_0 318
#define IMX_SC_R_SAI_1 319
#define IMX_SC_R_SAI_2 320
-#define IMX_SC_R_IRQSTR_SCU2 321
+#define IMX_SC_R_IRQSTR_AP_0 321
#define IMX_SC_R_IRQSTR_DSP 322
#define IMX_SC_R_ELCDIF_PLL 323
#define IMX_SC_R_OCRAM 324
@@ -365,33 +380,33 @@
#define IMX_SC_R_VPU_PID5 363
#define IMX_SC_R_VPU_PID6 364
#define IMX_SC_R_VPU_PID7 365
-#define IMX_SC_R_VPU_UART 366
-#define IMX_SC_R_VPUCORE 367
-#define IMX_SC_R_VPUCORE_0 368
-#define IMX_SC_R_VPUCORE_1 369
-#define IMX_SC_R_VPUCORE_2 370
-#define IMX_SC_R_VPUCORE_3 371
+#define IMX_SC_R_ENET_0_A2 366
+#define IMX_SC_R_ENET_1_A0 367
+#define IMX_SC_R_ENET_1_A1 368
+#define IMX_SC_R_ENET_1_A2 369
+#define IMX_SC_R_ENET_1_A3 370
+#define IMX_SC_R_ENET_1_A4 371
#define IMX_SC_R_DMA_4_CH0 372
#define IMX_SC_R_DMA_4_CH1 373
#define IMX_SC_R_DMA_4_CH2 374
#define IMX_SC_R_DMA_4_CH3 375
#define IMX_SC_R_DMA_4_CH4 376
-#define IMX_SC_R_ISI_CH0 377
-#define IMX_SC_R_ISI_CH1 378
-#define IMX_SC_R_ISI_CH2 379
-#define IMX_SC_R_ISI_CH3 380
-#define IMX_SC_R_ISI_CH4 381
-#define IMX_SC_R_ISI_CH5 382
-#define IMX_SC_R_ISI_CH6 383
-#define IMX_SC_R_ISI_CH7 384
-#define IMX_SC_R_MJPEG_DEC_S0 385
-#define IMX_SC_R_MJPEG_DEC_S1 386
-#define IMX_SC_R_MJPEG_DEC_S2 387
-#define IMX_SC_R_MJPEG_DEC_S3 388
-#define IMX_SC_R_MJPEG_ENC_S0 389
-#define IMX_SC_R_MJPEG_ENC_S1 390
-#define IMX_SC_R_MJPEG_ENC_S2 391
-#define IMX_SC_R_MJPEG_ENC_S3 392
+#define IMX_SC_R_ISI_0_CH0 377
+#define IMX_SC_R_ISI_0_CH1 378
+#define IMX_SC_R_ISI_0_CH2 379
+#define IMX_SC_R_ISI_0_CH3 380
+#define IMX_SC_R_ISI_0_CH4 381
+#define IMX_SC_R_ISI_0_CH5 382
+#define IMX_SC_R_ISI_0_CH6 383
+#define IMX_SC_R_ISI_0_CH7 384
+#define IMX_SC_R_MJPEG_0_DEC_S0 385
+#define IMX_SC_R_MJPEG_0_DEC_S1 386
+#define IMX_SC_R_MJPEG_0_DEC_S2 387
+#define IMX_SC_R_MJPEG_0_DEC_S3 388
+#define IMX_SC_R_MJPEG_0_ENC_S0 389
+#define IMX_SC_R_MJPEG_0_ENC_S1 390
+#define IMX_SC_R_MJPEG_0_ENC_S2 391
+#define IMX_SC_R_MJPEG_0_ENC_S3 392
#define IMX_SC_R_MIPI_0 393
#define IMX_SC_R_MIPI_0_PWM_0 394
#define IMX_SC_R_MIPI_0_I2C_0 395
@@ -506,11 +521,11 @@
#define IMX_SC_R_SECO_MU_3 504
#define IMX_SC_R_SECO_MU_4 505
#define IMX_SC_R_HDMI_RX_PWM_0 506
-#define IMX_SC_R_A35 507
-#define IMX_SC_R_A35_0 508
-#define IMX_SC_R_A35_1 509
-#define IMX_SC_R_A35_2 510
-#define IMX_SC_R_A35_3 511
+#define IMX_SC_R_AP_2 507
+#define IMX_SC_R_AP_2_0 508
+#define IMX_SC_R_AP_2_1 509
+#define IMX_SC_R_AP_2_2 510
+#define IMX_SC_R_AP_2_3 511
#define IMX_SC_R_DSP 512
#define IMX_SC_R_DSP_RAM 513
#define IMX_SC_R_CAAM_JR1_OUT 514
@@ -531,8 +546,8 @@
#define IMX_SC_R_BOARD_R5 529
#define IMX_SC_R_BOARD_R6 530
#define IMX_SC_R_BOARD_R7 531
-#define IMX_SC_R_MJPEG_DEC_MP 532
-#define IMX_SC_R_MJPEG_ENC_MP 533
+#define IMX_SC_R_MJPEG_0_DEC_MP 532
+#define IMX_SC_R_MJPEG_0_ENC_MP 533
#define IMX_SC_R_VPU_TS_0 534
#define IMX_SC_R_VPU_MU_0 535
#define IMX_SC_R_VPU_MU_1 536
@@ -565,6 +580,105 @@
#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
/*
+ * Compatibility defines for sc_rsrc_t
+ */
+#define IMX_SC_R_A35 IMX_SC_R_AP_2
+#define IMX_SC_R_A35_0 IMX_SC_R_AP_2_0
+#define IMX_SC_R_A35_1 IMX_SC_R_AP_2_1
+#define IMX_SC_R_A35_2 IMX_SC_R_AP_2_2
+#define IMX_SC_R_A35_3 IMX_SC_R_AP_2_3
+#define IMX_SC_R_A53 IMX_SC_R_AP_0
+#define IMX_SC_R_A53_0 IMX_SC_R_AP_0_0
+#define IMX_SC_R_A53_1 IMX_SC_R_AP_0_1
+#define IMX_SC_R_A53_2 IMX_SC_R_AP_0_2
+#define IMX_SC_R_A53_3 IMX_SC_R_AP_0_3
+#define IMX_SC_R_A72 IMX_SC_R_AP_1
+#define IMX_SC_R_A72_0 IMX_SC_R_AP_1_0
+#define IMX_SC_R_A72_1 IMX_SC_R_AP_1_1
+#define IMX_SC_R_A72_2 IMX_SC_R_AP_1_2
+#define IMX_SC_R_A72_3 IMX_SC_R_AP_1_3
+#define IMX_SC_R_GIC IMX_SC_R_GIC_0
+#define IMX_SC_R_HSIO_GPIO IMX_SC_R_HSIO_GPIO_0
+#define IMX_SC_R_IEE IMX_SC_R_IEE_0
+#define IMX_SC_R_IEE_R0 IMX_SC_R_IEE_0_R0
+#define IMX_SC_R_IEE_R1 IMX_SC_R_IEE_0_R1
+#define IMX_SC_R_IEE_R2 IMX_SC_R_IEE_0_R2
+#define IMX_SC_R_IEE_R3 IMX_SC_R_IEE_0_R3
+#define IMX_SC_R_IEE_R4 IMX_SC_R_IEE_0_R4
+#define IMX_SC_R_IEE_R5 IMX_SC_R_IEE_0_R5
+#define IMX_SC_R_IEE_R6 IMX_SC_R_IEE_0_R6
+#define IMX_SC_R_IEE_R7 IMX_SC_R_IEE_0_R7
+#define IMX_SC_R_IRQSTR_M4_0 IMX_SC_R_IRQSTR_MCU_0
+#define IMX_SC_R_IRQSTR_M4_1 IMX_SC_R_IRQSTR_MCU_1
+#define IMX_SC_R_IRQSTR_SCU2 IMX_SC_R_IRQSTR_AP_0
+#define IMX_SC_R_ISI_CH0 IMX_SC_R_ISI_0_CH0
+#define IMX_SC_R_ISI_CH1 IMX_SC_R_ISI_0_CH1
+#define IMX_SC_R_ISI_CH2 IMX_SC_R_ISI_0_CH2
+#define IMX_SC_R_ISI_CH3 IMX_SC_R_ISI_0_CH3
+#define IMX_SC_R_ISI_CH4 IMX_SC_R_ISI_0_CH4
+#define IMX_SC_R_ISI_CH5 IMX_SC_R_ISI_0_CH5
+#define IMX_SC_R_ISI_CH6 IMX_SC_R_ISI_0_CH6
+#define IMX_SC_R_ISI_CH7 IMX_SC_R_ISI_0_CH7
+#define IMX_SC_R_M4_0_I2C IMX_SC_R_MCU_0_I2C
+#define IMX_SC_R_M4_0_INTMUX IMX_SC_R_MCU_0_INTMUX
+#define IMX_SC_R_M4_0_MU_0A0 IMX_SC_R_MCU_0_MU_0A0
+#define IMX_SC_R_M4_0_MU_0A1 IMX_SC_R_MCU_0_MU_0A1
+#define IMX_SC_R_M4_0_MU_0A2 IMX_SC_R_MCU_0_MU_0A2
+#define IMX_SC_R_M4_0_MU_0A3 IMX_SC_R_MCU_0_MU_0A3
+#define IMX_SC_R_M4_0_MU_0B IMX_SC_R_MCU_0_MU_0B
+#define IMX_SC_R_M4_0_MU_1A IMX_SC_R_MCU_0_MU_1A
+#define IMX_SC_R_M4_0_PID0 IMX_SC_R_MCU_0_PID0
+#define IMX_SC_R_M4_0_PID1 IMX_SC_R_MCU_0_PID1
+#define IMX_SC_R_M4_0_PID2 IMX_SC_R_MCU_0_PID2
+#define IMX_SC_R_M4_0_PID3 IMX_SC_R_MCU_0_PID3
+#define IMX_SC_R_M4_0_PID4 IMX_SC_R_MCU_0_PID4
+#define IMX_SC_R_M4_0_PIT IMX_SC_R_MCU_0_PIT
+#define IMX_SC_R_M4_0_RGPIO IMX_SC_R_MCU_0_RGPIO
+#define IMX_SC_R_M4_0_SEMA42 IMX_SC_R_MCU_0_SEMA42
+#define IMX_SC_R_M4_0_TPM IMX_SC_R_MCU_0_TPM
+#define IMX_SC_R_M4_0_UART IMX_SC_R_MCU_0_UART
+#define IMX_SC_R_M4_1_I2C IMX_SC_R_MCU_1_I2C
+#define IMX_SC_R_M4_1_INTMUX IMX_SC_R_MCU_1_INTMUX
+#define IMX_SC_R_M4_1_MU_0A0 IMX_SC_R_MCU_1_MU_0A0
+#define IMX_SC_R_M4_1_MU_0A1 IMX_SC_R_MCU_1_MU_0A1
+#define IMX_SC_R_M4_1_MU_0A2 IMX_SC_R_MCU_1_MU_0A2
+#define IMX_SC_R_M4_1_MU_0A3 IMX_SC_R_MCU_1_MU_0A3
+#define IMX_SC_R_M4_1_MU_0B IMX_SC_R_MCU_1_MU_0B
+#define IMX_SC_R_M4_1_MU_1A IMX_SC_R_MCU_1_MU_1A
+#define IMX_SC_R_M4_1_PID0 IMX_SC_R_MCU_1_PID0
+#define IMX_SC_R_M4_1_PID1 IMX_SC_R_MCU_1_PID1
+#define IMX_SC_R_M4_1_PID2 IMX_SC_R_MCU_1_PID2
+#define IMX_SC_R_M4_1_PID3 IMX_SC_R_MCU_1_PID3
+#define IMX_SC_R_M4_1_PID4 IMX_SC_R_MCU_1_PID4
+#define IMX_SC_R_M4_1_PIT IMX_SC_R_MCU_1_PIT
+#define IMX_SC_R_M4_1_RGPIO IMX_SC_R_MCU_1_RGPIO
+#define IMX_SC_R_M4_1_SEMA42 IMX_SC_R_MCU_1_SEMA42
+#define IMX_SC_R_M4_1_TPM IMX_SC_R_MCU_1_TPM
+#define IMX_SC_R_M4_1_UART IMX_SC_R_MCU_1_UART
+#define IMX_SC_R_MJPEG_DEC_MP IMX_SC_R_MJPEG_0_DEC_MP
+#define IMX_SC_R_MJPEG_DEC_S0 IMX_SC_R_MJPEG_0_DEC_S0
+#define IMX_SC_R_MJPEG_DEC_S1 IMX_SC_R_MJPEG_0_DEC_S1
+#define IMX_SC_R_MJPEG_DEC_S2 IMX_SC_R_MJPEG_0_DEC_S2
+#define IMX_SC_R_MJPEG_DEC_S3 IMX_SC_R_MJPEG_0_DEC_S3
+#define IMX_SC_R_MJPEG_ENC_MP IMX_SC_R_MJPEG_0_ENC_MP
+#define IMX_SC_R_MJPEG_ENC_S0 IMX_SC_R_MJPEG_0_ENC_S0
+#define IMX_SC_R_MJPEG_ENC_S1 IMX_SC_R_MJPEG_0_ENC_S1
+#define IMX_SC_R_MJPEG_ENC_S2 IMX_SC_R_MJPEG_0_ENC_S2
+#define IMX_SC_R_MJPEG_ENC_S3 IMX_SC_R_MJPEG_0_ENC_S3
+#define IMX_SC_R_PERF IMX_SC_R_PERF_0
+#define IMX_SC_R_SMMU IMX_SC_R_SMMU_0
+#define IMX_SC_R_VPU_UART IMX_SC_R_ENET_0_A2
+#define IMX_SC_R_VPUCORE IMX_SC_R_ENET_1_A0
+#define IMX_SC_R_VPUCORE_0 IMX_SC_R_ENET_1_A1
+#define IMX_SC_R_VPUCORE_1 IMX_SC_R_ENET_1_A2
+#define IMX_SC_R_VPUCORE_2 IMX_SC_R_ENET_1_A3
+#define IMX_SC_R_VPUCORE_3 IMX_SC_R_ENET_1_A4
+#define IMX_SC_R_UNUSED1 IMX_SC_R_V2X_PID0
+#define IMX_SC_R_UNUSED2 IMX_SC_R_V2X_PID1
+#define IMX_SC_R_UNUSED3 IMX_SC_R_V2X_PID2
+#define IMX_SC_R_UNUSED4 IMX_SC_R_V2X_PID3
+
+/*
* Defines for SC CONTROL
*/
#define IMX_SC_C_TEMP 0
@@ -629,6 +743,10 @@
#define IMX_SC_C_INTF_SEL 59
#define IMX_SC_C_RXC_DLY 60
#define IMX_SC_C_TIMER_SEL 61
-#define IMX_SC_C_LAST 62
+#define IMX_SC_C_MISC0 62
+#define IMX_SC_C_MISC1 63
+#define IMX_SC_C_MISC2 64
+#define IMX_SC_C_MISC3 65
+#define IMX_SC_C_LAST 66
#endif /* __DT_BINDINGS_RSCRC_IMX_H */
diff --git a/include/dt-bindings/firmware/qcom,scm.h b/include/dt-bindings/firmware/qcom,scm.h
new file mode 100644
index 000000000000..6de8b08e1e79
--- /dev/null
+++ b/include/dt-bindings/firmware/qcom,scm.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+#define _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+
+#define QCOM_SCM_VMID_TZ 0x1
+#define QCOM_SCM_VMID_HLOS 0x3
+#define QCOM_SCM_VMID_SSC_Q6 0x5
+#define QCOM_SCM_VMID_ADSP_Q6 0x6
+#define QCOM_SCM_VMID_CP_TOUCH 0x8
+#define QCOM_SCM_VMID_CP_BITSTREAM 0x9
+#define QCOM_SCM_VMID_CP_PIXEL 0xA
+#define QCOM_SCM_VMID_CP_NON_PIXEL 0xB
+#define QCOM_SCM_VMID_CP_CAMERA 0xD
+#define QCOM_SCM_VMID_HLOS_FREE 0xE
+#define QCOM_SCM_VMID_MSS_MSA 0xF
+#define QCOM_SCM_VMID_MSS_NONMSA 0x10
+#define QCOM_SCM_VMID_CP_SEC_DISPLAY 0x11
+#define QCOM_SCM_VMID_CP_APP 0x12
+#define QCOM_SCM_VMID_LPASS 0x16
+#define QCOM_SCM_VMID_WLAN 0x18
+#define QCOM_SCM_VMID_WLAN_CE 0x19
+#define QCOM_SCM_VMID_CP_SPSS_SP 0x1A
+#define QCOM_SCM_VMID_CP_CAMERA_PREVIEW 0x1D
+#define QCOM_SCM_VMID_CDSP 0x1E
+#define QCOM_SCM_VMID_CP_SPSS_SP_SHARED 0x22
+#define QCOM_SCM_VMID_CP_SPSS_HLOS_SHARED 0x24
+#define QCOM_SCM_VMID_ADSP_HEAP 0x25
+#define QCOM_SCM_VMID_CP_CDSP 0x2A
+#define QCOM_SCM_VMID_NAV 0x2B
+#define QCOM_SCM_VMID_TVM 0x2D
+#define QCOM_SCM_VMID_OEMVM 0x31
+
+#endif
diff --git a/include/dt-bindings/gce/mediatek,mt6795-gce.h b/include/dt-bindings/gce/mediatek,mt6795-gce.h
new file mode 100644
index 000000000000..97d5ba2d2b44
--- /dev/null
+++ b/include/dt-bindings/gce/mediatek,mt6795-gce.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+#ifndef _DT_BINDINGS_GCE_MT6795_H
+#define _DT_BINDINGS_GCE_MT6795_H
+
+/* GCE HW thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_NORMAL 1
+#define CMDQ_THR_PRIO_NORMAL_2 2
+#define CMDQ_THR_PRIO_MEDIUM 3
+#define CMDQ_THR_PRIO_MEDIUM_2 4
+#define CMDQ_THR_PRIO_HIGH 5
+#define CMDQ_THR_PRIO_HIGHER 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* GCE SUBSYS */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1500XXXX 4
+#define SUBSYS_1600XXXX 5
+#define SUBSYS_1700XXXX 6
+#define SUBSYS_1800XXXX 7
+#define SUBSYS_1000XXXX 8
+#define SUBSYS_1001XXXX 9
+#define SUBSYS_1002XXXX 10
+#define SUBSYS_1003XXXX 11
+#define SUBSYS_1004XXXX 12
+#define SUBSYS_1005XXXX 13
+#define SUBSYS_1020XXXX 14
+#define SUBSYS_1021XXXX 15
+#define SUBSYS_1120XXXX 16
+#define SUBSYS_1121XXXX 17
+#define SUBSYS_1122XXXX 18
+#define SUBSYS_1123XXXX 19
+#define SUBSYS_1124XXXX 20
+#define SUBSYS_1125XXXX 21
+#define SUBSYS_1126XXXX 22
+
+/* GCE HW EVENT */
+#define CMDQ_EVENT_MDP_RDMA0_SOF 0
+#define CMDQ_EVENT_MDP_RDMA1_SOF 1
+#define CMDQ_EVENT_MDP_DSI0_TE_SOF 2
+#define CMDQ_EVENT_MDP_DSI1_TE_SOF 3
+#define CMDQ_EVENT_MDP_MVW_SOF 4
+#define CMDQ_EVENT_MDP_TDSHP0_SOF 5
+#define CMDQ_EVENT_MDP_TDSHP1_SOF 6
+#define CMDQ_EVENT_MDP_WDMA_SOF 7
+#define CMDQ_EVENT_MDP_WROT0_SOF 8
+#define CMDQ_EVENT_MDP_WROT1_SOF 9
+#define CMDQ_EVENT_MDP_CROP_SOF 10
+#define CMDQ_EVENT_DISP_OVL0_SOF 11
+#define CMDQ_EVENT_DISP_OVL1_SOF 12
+#define CMDQ_EVENT_DISP_RDMA0_SOF 13
+#define CMDQ_EVENT_DISP_RDMA1_SOF 14
+#define CMDQ_EVENT_DISP_RDMA2_SOF 15
+#define CMDQ_EVENT_DISP_WDMA0_SOF 16
+#define CMDQ_EVENT_DISP_WDMA1_SOF 17
+#define CMDQ_EVENT_DISP_COLOR0_SOF 18
+#define CMDQ_EVENT_DISP_COLOR1_SOF 19
+#define CMDQ_EVENT_DISP_AAL_SOF 20
+#define CMDQ_EVENT_DISP_GAMMA_SOF 21
+#define CMDQ_EVENT_DISP_UFOE_SOF 22
+#define CMDQ_EVENT_DISP_PWM0_SOF 23
+#define CMDQ_EVENT_DISP_PWM1_SOF 24
+#define CMDQ_EVENT_DISP_OD_SOF 25
+#define CMDQ_EVENT_MDP_RDMA0_EOF 26
+#define CMDQ_EVENT_MDP_RDMA1_EOF 27
+#define CMDQ_EVENT_MDP_RSZ0_EOF 28
+#define CMDQ_EVENT_MDP_RSZ1_EOF 29
+#define CMDQ_EVENT_MDP_RSZ2_EOF 30
+#define CMDQ_EVENT_MDP_TDSHP0_EOF 31
+#define CMDQ_EVENT_MDP_TDSHP1_EOF 32
+#define CMDQ_EVENT_MDP_WDMA_EOF 33
+#define CMDQ_EVENT_MDP_WROT0_WRITE_EOF 34
+#define CMDQ_EVENT_MDP_WROT0_READ_EOF 35
+#define CMDQ_EVENT_MDP_WROT1_WRITE_EOF 36
+#define CMDQ_EVENT_MDP_WROT1_READ_EOF 37
+#define CMDQ_EVENT_MDP_CROP_EOF 38
+#define CMDQ_EVENT_DISP_OVL0_EOF 39
+#define CMDQ_EVENT_DISP_OVL1_EOF 40
+#define CMDQ_EVENT_DISP_RDMA0_EOF 41
+#define CMDQ_EVENT_DISP_RDMA1_EOF 42
+#define CMDQ_EVENT_DISP_RDMA2_EOF 43
+#define CMDQ_EVENT_DISP_WDMA0_EOF 44
+#define CMDQ_EVENT_DISP_WDMA1_EOF 45
+#define CMDQ_EVENT_DISP_COLOR0_EOF 46
+#define CMDQ_EVENT_DISP_COLOR1_EOF 47
+#define CMDQ_EVENT_DISP_AAL_EOF 48
+#define CMDQ_EVENT_DISP_GAMMA_EOF 49
+#define CMDQ_EVENT_DISP_UFOE_EOF 50
+#define CMDQ_EVENT_DISP_DPI0_EOF 51
+#define CMDQ_EVENT_MUTEX0_STREAM_EOF 52
+#define CMDQ_EVENT_MUTEX1_STREAM_EOF 53
+#define CMDQ_EVENT_MUTEX2_STREAM_EOF 54
+#define CMDQ_EVENT_MUTEX3_STREAM_EOF 55
+#define CMDQ_EVENT_MUTEX4_STREAM_EOF 56
+#define CMDQ_EVENT_MUTEX5_STREAM_EOF 57
+#define CMDQ_EVENT_MUTEX6_STREAM_EOF 58
+#define CMDQ_EVENT_MUTEX7_STREAM_EOF 59
+#define CMDQ_EVENT_MUTEX8_STREAM_EOF 60
+#define CMDQ_EVENT_MUTEX9_STREAM_EOF 61
+#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 62
+#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 63
+#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 64
+#define CMDQ_EVENT_ISP_PASS2_2_EOF 129
+#define CMDQ_EVENT_ISP_PASS2_1_EOF 130
+#define CMDQ_EVENT_ISP_PASS2_0_EOF 131
+#define CMDQ_EVENT_ISP_PASS1_1_EOF 132
+#define CMDQ_EVENT_ISP_PASS1_0_EOF 133
+#define CMDQ_EVENT_CAMSV_2_PASS1_EOF 134
+#define CMDQ_EVENT_CAMSV_1_PASS1_EOF 135
+#define CMDQ_EVENT_SENINF_CAM1_2_3_FIFO_FULL 136
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 137
+#define CMDQ_EVENT_JPGENC_PASS2_EOF 257
+#define CMDQ_EVENT_JPGENC_PASS1_EOF 258
+#define CMDQ_EVENT_JPGDEC_EOF 259
+
+#endif
diff --git a/include/dt-bindings/gce/mt8186-gce.h b/include/dt-bindings/gce/mt8186-gce.h
new file mode 100644
index 000000000000..f12e3cb586ce
--- /dev/null
+++ b/include/dt-bindings/gce/mt8186-gce.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Yongqiang Niu <yongqiang.niu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8186_H
+#define _DT_BINDINGS_GCE_MT8186_H
+
+/* assign timeout 0 also means default */
+#define CMDQ_NO_TIMEOUT 0xffffffff
+#define CMDQ_TIMEOUT_DEFAULT 1000
+
+/* GCE thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* CPR count in 32bit register */
+#define GCE_CPR_COUNT 1312
+
+/* GCE subsys table */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1502XXXX 4
+#define SUBSYS_1582XXXX 5
+#define SUBSYS_1B00XXXX 6
+#define SUBSYS_1C00XXXX 7
+#define SUBSYS_1C10XXXX 8
+#define SUBSYS_1000XXXX 9
+#define SUBSYS_1001XXXX 10
+#define SUBSYS_1020XXXX 11
+#define SUBSYS_1021XXXX 12
+#define SUBSYS_1022XXXX 13
+#define SUBSYS_1023XXXX 14
+#define SUBSYS_1060XXXX 15
+#define SUBSYS_1602XXXX 16
+#define SUBSYS_1608XXXX 17
+#define SUBSYS_1700XXXX 18
+#define SUBSYS_1701XXXX 19
+#define SUBSYS_1702XXXX 20
+#define SUBSYS_1703XXXX 21
+#define SUBSYS_1706XXXX 22
+#define SUBSYS_1A00XXXX 23
+#define SUBSYS_1A01XXXX 24
+#define SUBSYS_1A02XXXX 25
+#define SUBSYS_1A03XXXX 26
+#define SUBSYS_1A04XXXX 27
+#define SUBSYS_1A05XXXX 28
+#define SUBSYS_1A06XXXX 29
+#define SUBSYS_NO_SUPPORT 99
+
+/* GCE General Purpose Register (GPR) support
+ * Leave note for scenario usage here
+ */
+/* GCE: write mask */
+#define GCE_GPR_R00 0x00
+#define GCE_GPR_R01 0x01
+/* MDP: P1: JPEG dest */
+#define GCE_GPR_R02 0x02
+#define GCE_GPR_R03 0x03
+/* MDP: PQ color */
+#define GCE_GPR_R04 0x04
+/* MDP: 2D sharpness */
+#define GCE_GPR_R05 0x05
+/* DISP: poll esd */
+#define GCE_GPR_R06 0x06
+#define GCE_GPR_R07 0x07
+/* MDP: P4: 2D sharpness dst */
+#define GCE_GPR_R08 0x08
+#define GCE_GPR_R09 0x09
+/* VCU: poll with timeout for GPR timer */
+#define GCE_GPR_R10 0x0A
+#define GCE_GPR_R11 0x0B
+/* CMDQ: debug */
+#define GCE_GPR_R12 0x0C
+#define GCE_GPR_R13 0x0D
+/* CMDQ: P7: debug */
+#define GCE_GPR_R14 0x0E
+#define GCE_GPR_R15 0x0F
+
+/* GCE hardware events */
+/* VDEC */
+#define CMDQ_EVENT_LINE_COUNT_THRESHOLD_INTERRUPT 0
+#define CMDQ_EVENT_VDEC_INT 1
+#define CMDQ_EVENT_VDEC_PAUSE 2
+#define CMDQ_EVENT_VDEC_DEC_ERROR 3
+#define CMDQ_EVENT_MDEC_TIMEOUT 4
+#define CMDQ_EVENT_DRAM_ACCESS_DONE 5
+#define CMDQ_EVENT_INI_FETCH_RDY 6
+#define CMDQ_EVENT_PROCESS_FLAG 7
+#define CMDQ_EVENT_SEARCH_START_CODE_DONE 8
+#define CMDQ_EVENT_REF_REORDER_DONE 9
+#define CMDQ_EVENT_WP_TBLE_DONE 10
+#define CMDQ_EVENT_COUNT_SRAM_CLR_DONE 11
+#define CMDQ_EVENT_GCE_CNT_OP_THRESHOLD 15
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_0 16
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_1 17
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_2 18
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_3 19
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_4 20
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_5 21
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_6 22
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_7 23
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_8 24
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_9 25
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_10 26
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_11 27
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_12 28
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_13 29
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_14 30
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_15 31
+#define CMDQ_EVENT_WPE_GCE_FRAME_DONE 32
+
+/* CAM */
+#define CMDQ_EVENT_ISP_FRAME_DONE_A 65
+#define CMDQ_EVENT_ISP_FRAME_DONE_B 66
+#define CMDQ_EVENT_CAMSV1_PASS1_DONE 70
+#define CMDQ_EVENT_CAMSV2_PASS1_DONE 71
+#define CMDQ_EVENT_CAMSV3_PASS1_DONE 72
+#define CMDQ_EVENT_MRAW_0_PASS1_DONE 73
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 75
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 76
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 77
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 78
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 79
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 80
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 81
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 82
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 83
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 84
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 85
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 86
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 87
+#define CMDQ_EVENT_TG_OVRUN_A_INT 88
+#define CMDQ_EVENT_DMA_R1_ERROR_A_INT 89
+#define CMDQ_EVENT_TG_OVRUN_B_INT 90
+#define CMDQ_EVENT_DMA_R1_ERROR_B_INT 91
+#define CMDQ_EVENT_TG_OVRUN_M0_INT 94
+#define CMDQ_EVENT_R1_ERROR_M0_INT 95
+#define CMDQ_EVENT_TG_GRABERR_M0_INT 96
+#define CMDQ_EVENT_TG_GRABERR_A_INT 98
+#define CMDQ_EVENT_CQ_VR_SNAP_A_INT 99
+#define CMDQ_EVENT_TG_GRABERR_B_INT 100
+#define CMDQ_EVENT_CQ_VR_SNAP_B_INT 101
+/* VENC */
+#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 129
+#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 130
+#define CMDQ_EVENT_JPGENC_CMDQ_DONE 131
+#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 132
+#define CMDQ_EVENT_VENC_CMDQ_128BYTE_CNT_DONE 133
+#define CMDQ_EVENT_VENC_CMDQ_PPS_DONE 136
+#define CMDQ_EVENT_VENC_CMDQ_SPS_DONE 137
+#define CMDQ_EVENT_VENC_CMDQ_VPS_DONE 138
+/* IPE */
+#define CMDQ_EVENT_FDVT_DONE 161
+#define CMDQ_EVENT_FE_DONE 162
+#define CMDQ_EVENT_RSC_DONE 163
+#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 164
+#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 165
+/* IMG2 */
+#define CMDQ_EVENT_GCE_IMG2_EVENT0 193
+#define CMDQ_EVENT_GCE_IMG2_EVENT1 194
+#define CMDQ_EVENT_GCE_IMG2_EVENT2 195
+#define CMDQ_EVENT_GCE_IMG2_EVENT3 196
+#define CMDQ_EVENT_GCE_IMG2_EVENT4 197
+#define CMDQ_EVENT_GCE_IMG2_EVENT5 198
+#define CMDQ_EVENT_GCE_IMG2_EVENT6 199
+#define CMDQ_EVENT_GCE_IMG2_EVENT7 200
+#define CMDQ_EVENT_GCE_IMG2_EVENT8 201
+#define CMDQ_EVENT_GCE_IMG2_EVENT9 202
+#define CMDQ_EVENT_GCE_IMG2_EVENT10 203
+#define CMDQ_EVENT_GCE_IMG2_EVENT11 204
+#define CMDQ_EVENT_GCE_IMG2_EVENT12 205
+#define CMDQ_EVENT_GCE_IMG2_EVENT13 206
+#define CMDQ_EVENT_GCE_IMG2_EVENT14 207
+#define CMDQ_EVENT_GCE_IMG2_EVENT15 208
+#define CMDQ_EVENT_GCE_IMG2_EVENT16 209
+#define CMDQ_EVENT_GCE_IMG2_EVENT17 210
+#define CMDQ_EVENT_GCE_IMG2_EVENT18 211
+#define CMDQ_EVENT_GCE_IMG2_EVENT19 212
+#define CMDQ_EVENT_GCE_IMG2_EVENT20 213
+#define CMDQ_EVENT_GCE_IMG2_EVENT21 214
+#define CMDQ_EVENT_GCE_IMG2_EVENT22 215
+#define CMDQ_EVENT_GCE_IMG2_EVENT23 216
+/* IMG1 */
+#define CMDQ_EVENT_GCE_IMG1_EVENT0 225
+#define CMDQ_EVENT_GCE_IMG1_EVENT1 226
+#define CMDQ_EVENT_GCE_IMG1_EVENT2 227
+#define CMDQ_EVENT_GCE_IMG1_EVENT3 228
+#define CMDQ_EVENT_GCE_IMG1_EVENT4 229
+#define CMDQ_EVENT_GCE_IMG1_EVENT5 230
+#define CMDQ_EVENT_GCE_IMG1_EVENT6 231
+#define CMDQ_EVENT_GCE_IMG1_EVENT7 232
+#define CMDQ_EVENT_GCE_IMG1_EVENT8 233
+#define CMDQ_EVENT_GCE_IMG1_EVENT9 234
+#define CMDQ_EVENT_GCE_IMG1_EVENT10 235
+#define CMDQ_EVENT_GCE_IMG1_EVENT11 236
+#define CMDQ_EVENT_GCE_IMG1_EVENT12 237
+#define CMDQ_EVENT_GCE_IMG1_EVENT13 238
+#define CMDQ_EVENT_GCE_IMG1_EVENT14 239
+#define CMDQ_EVENT_GCE_IMG1_EVENT15 240
+#define CMDQ_EVENT_GCE_IMG1_EVENT16 241
+#define CMDQ_EVENT_GCE_IMG1_EVENT17 242
+#define CMDQ_EVENT_GCE_IMG1_EVENT18 243
+#define CMDQ_EVENT_GCE_IMG1_EVENT19 244
+#define CMDQ_EVENT_GCE_IMG1_EVENT20 245
+#define CMDQ_EVENT_GCE_IMG1_EVENT21 246
+#define CMDQ_EVENT_GCE_IMG1_EVENT22 247
+#define CMDQ_EVENT_GCE_IMG1_EVENT23 248
+/* MDP */
+#define CMDQ_EVENT_MDP_RDMA0_SOF 256
+#define CMDQ_EVENT_MDP_RDMA1_SOF 257
+#define CMDQ_EVENT_MDP_AAL0_SOF 258
+#define CMDQ_EVENT_MDP_AAL1_SOF 259
+#define CMDQ_EVENT_MDP_HDR0_SOF 260
+#define CMDQ_EVENT_MDP_RSZ0_SOF 261
+#define CMDQ_EVENT_MDP_RSZ1_SOF 262
+#define CMDQ_EVENT_MDP_WROT0_SOF 263
+#define CMDQ_EVENT_MDP_WROT1_SOF 264
+#define CMDQ_EVENT_MDP_TDSHP0_SOF 265
+#define CMDQ_EVENT_MDP_TDSHP1_SOF 266
+#define CMDQ_EVENT_IMG_DL_RELAY0_SOF 267
+#define CMDQ_EVENT_IMG_DL_RELAY1_SOF 268
+#define CMDQ_EVENT_MDP_COLOR0_SOF 269
+#define CMDQ_EVENT_MDP_WROT3_FRAME_DONE 288
+#define CMDQ_EVENT_MDP_WROT2_FRAME_DONE 289
+#define CMDQ_EVENT_MDP_WROT1_FRAME_DONE 290
+#define CMDQ_EVENT_MDP_WROT0_FRAME_DONE 291
+#define CMDQ_EVENT_MDP_TDSHP3_FRAME_DONE 292
+#define CMDQ_EVENT_MDP_TDSHP2_FRAME_DONE 293
+#define CMDQ_EVENT_MDP_TDSHP1_FRAME_DONE 294
+#define CMDQ_EVENT_MDP_TDSHP0_FRAME_DONE 295
+#define CMDQ_EVENT_MDP_RSZ3_FRAME_DONE 296
+#define CMDQ_EVENT_MDP_RSZ2_FRAME_DONE 297
+#define CMDQ_EVENT_MDP_RSZ1_FRAME_DONE 298
+#define CMDQ_EVENT_MDP_RSZ0_FRAME_DONE 299
+#define CMDQ_EVENT_MDP_RDMA3_FRAME_DONE 300
+#define CMDQ_EVENT_MDP_RDMA2_FRAME_DONE 301
+#define CMDQ_EVENT_MDP_RDMA1_FRAME_DONE 302
+#define CMDQ_EVENT_MDP_RDMA0_FRAME_DONE 303
+#define CMDQ_EVENT_MDP_HDR1_FRAME_DONE 304
+#define CMDQ_EVENT_MDP_HDR0_FRAME_DONE 305
+#define CMDQ_EVENT_MDP_COLOR0_FRAME_DONE 306
+#define CMDQ_EVENT_MDP_AAL3_FRAME_DONE 307
+#define CMDQ_EVENT_MDP_AAL2_FRAME_DONE 308
+#define CMDQ_EVENT_MDP_AAL1_FRAME_DONE 309
+#define CMDQ_EVENT_MDP_AAL0_FRAME_DONE 310
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_0 320
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_1 321
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_2 322
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_3 323
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_4 324
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_5 325
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_6 326
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_7 327
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_8 328
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_9 329
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_10 330
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_11 331
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_12 332
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_13 333
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_14 334
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_15 335
+#define CMDQ_EVENT_MDP_WROT3_SW_RST_DONE_ENG_EVENT 336
+#define CMDQ_EVENT_MDP_WROT2_SW_RST_DONE_ENG_EVENT 337
+#define CMDQ_EVENT_MDP_WROT1_SW_RST_DONE_ENG_EVENT 338
+#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE_ENG_EVENT 339
+#define CMDQ_EVENT_MDP_RDMA3_SW_RST_DONE_ENG_EVENT 340
+#define CMDQ_EVENT_MDP_RDMA2_SW_RST_DONE_ENG_EVENT 341
+#define CMDQ_EVENT_MDP_RDMA1_SW_RST_DONE_ENG_EVENT 342
+#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE_ENG_EVENT 343
+/* DISP */
+#define CMDQ_EVENT_DISP_OVL0_SOF 384
+#define CMDQ_EVENT_DISP_OVL0_2L_SOF 385
+#define CMDQ_EVENT_DISP_RDMA0_SOF 386
+#define CMDQ_EVENT_DISP_RSZ0_SOF 387
+#define CMDQ_EVENT_DISP_COLOR0_SOF 388
+#define CMDQ_EVENT_DISP_CCORR0_SOF 389
+#define CMDQ_EVENT_DISP_CCORR1_SOF 390
+#define CMDQ_EVENT_DISP_AAL0_SOF 391
+#define CMDQ_EVENT_DISP_GAMMA0_SOF 392
+#define CMDQ_EVENT_DISP_POSTMASK0_SOF 393
+#define CMDQ_EVENT_DISP_DITHER0_SOF 394
+#define CMDQ_EVENT_DISP_CM0_SOF 395
+#define CMDQ_EVENT_DISP_SPR0_SOF 396
+#define CMDQ_EVENT_DISP_DSC_WRAP0_SOF 397
+#define CMDQ_EVENT_DSI0_SOF 398
+#define CMDQ_EVENT_DISP_WDMA0_SOF 399
+#define CMDQ_EVENT_DISP_PWM0_SOF 400
+#define CMDQ_EVENT_DSI0_FRAME_DONE 410
+#define CMDQ_EVENT_DISP_WDMA0_FRAME_DONE 411
+#define CMDQ_EVENT_DISP_SPR0_FRAME_DONE 412
+#define CMDQ_EVENT_DISP_RSZ0_FRAME_DONE 413
+#define CMDQ_EVENT_DISP_RDMA0_FRAME_DONE 414
+#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 415
+#define CMDQ_EVENT_DISP_OVL0_FRAME_DONE 416
+#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_DONE 417
+#define CMDQ_EVENT_DISP_GAMMA0_FRAME_DONE 418
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_FRAME_DONE 420
+#define CMDQ_EVENT_DISP_DITHER0_FRAME_DONE 421
+#define CMDQ_EVENT_DISP_COLOR0_FRAME_DONE 422
+#define CMDQ_EVENT_DISP_CM0_FRAME_DONE 423
+#define CMDQ_EVENT_DISP_CCORR1_FRAME_DONE 424
+#define CMDQ_EVENT_DISP_CCORR0_FRAME_DONE 425
+#define CMDQ_EVENT_DISP_AAL0_FRAME_DONE 426
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0 434
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1 435
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_2 436
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_3 437
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_4 438
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_5 439
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_6 440
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_7 441
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_8 442
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_9 443
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_10 444
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_11 445
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_12 446
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_13 447
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_14 448
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_15 449
+#define CMDQ_EVENT_DSI0_TE_ENG_EVENT 450
+#define CMDQ_EVENT_DSI0_IRQ_ENG_EVENT 451
+#define CMDQ_EVENT_DSI0_DONE_ENG_EVENT 452
+#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE_ENG_EVENT 453
+#define CMDQ_EVENT_DISP_SMIASSERT_ENG_EVENT 454
+#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE_ENG_EVENT 455
+#define CMDQ_EVENT_DISP_OVL0_RST_DONE_ENG_EVENT 456
+#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE_ENG_EVENT 457
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_0 458
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_1 459
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_2 460
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_3 461
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_4 462
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_5 463
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_6 464
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_7 465
+#define CMDQ_EVENT_OUT_EVENT_0 898
+
+/* CMDQ sw tokens
+ * Following definitions are gce sw token which may use by clients
+ * event operation API.
+ * Note that token 512 to 639 may set secure
+ */
+
+/* end of hw event and begin of sw token */
+#define CMDQ_MAX_HW_EVENT 512
+
+/* Config thread notify trigger thread */
+#define CMDQ_SYNC_TOKEN_CONFIG_DIRTY 640
+/* Trigger thread notify config thread */
+#define CMDQ_SYNC_TOKEN_STREAM_EOF 641
+/* Block Trigger thread until the ESD check finishes. */
+#define CMDQ_SYNC_TOKEN_ESD_EOF 642
+#define CMDQ_SYNC_TOKEN_STREAM_BLOCK 643
+/* check CABC setup finish */
+#define CMDQ_SYNC_TOKEN_CABC_EOF 644
+
+/* Notify normal CMDQ there are some secure task done
+ * MUST NOT CHANGE, this token sync with secure world
+ */
+#define CMDQ_SYNC_SECURE_THR_EOF 647
+
+/* CMDQ use sw token */
+#define CMDQ_SYNC_TOKEN_USER_0 649
+#define CMDQ_SYNC_TOKEN_USER_1 650
+#define CMDQ_SYNC_TOKEN_POLL_MONITOR 651
+#define CMDQ_SYNC_TOKEN_TPR_LOCK 652
+
+/* ISP sw token */
+#define CMDQ_SYNC_TOKEN_MSS 665
+#define CMDQ_SYNC_TOKEN_MSF 666
+
+/* DISP sw token */
+#define CMDQ_SYNC_TOKEN_SODI 671
+
+/* GPR access tokens (for register backup)
+ * There are 15 32-bit GPR, 3 GPR form a set
+ * (64-bit for address, 32-bit for value)
+ * MUST NOT CHANGE, these tokens sync with MDP
+ */
+#define CMDQ_SYNC_TOKEN_GPR_SET_0 700
+#define CMDQ_SYNC_TOKEN_GPR_SET_1 701
+#define CMDQ_SYNC_TOKEN_GPR_SET_2 702
+#define CMDQ_SYNC_TOKEN_GPR_SET_3 703
+#define CMDQ_SYNC_TOKEN_GPR_SET_4 704
+
+/* Resource lock event to control resource in GCE thread */
+#define CMDQ_SYNC_RESOURCE_WROT0 710
+#define CMDQ_SYNC_RESOURCE_WROT1 711
+
+/* event for gpr timer, used in sleep and poll with timeout */
+#define CMDQ_TOKEN_GPR_TIMER_R0 994
+#define CMDQ_TOKEN_GPR_TIMER_R1 995
+#define CMDQ_TOKEN_GPR_TIMER_R2 996
+#define CMDQ_TOKEN_GPR_TIMER_R3 997
+#define CMDQ_TOKEN_GPR_TIMER_R4 998
+#define CMDQ_TOKEN_GPR_TIMER_R5 999
+#define CMDQ_TOKEN_GPR_TIMER_R6 1000
+#define CMDQ_TOKEN_GPR_TIMER_R7 1001
+#define CMDQ_TOKEN_GPR_TIMER_R8 1002
+#define CMDQ_TOKEN_GPR_TIMER_R9 1003
+#define CMDQ_TOKEN_GPR_TIMER_R10 1004
+#define CMDQ_TOKEN_GPR_TIMER_R11 1005
+#define CMDQ_TOKEN_GPR_TIMER_R12 1006
+#define CMDQ_TOKEN_GPR_TIMER_R13 1007
+#define CMDQ_TOKEN_GPR_TIMER_R14 1008
+#define CMDQ_TOKEN_GPR_TIMER_R15 1009
+
+#define CMDQ_EVENT_MAX 0x3FF
+/* CMDQ sw tokens END */
+
+#endif
diff --git a/include/dt-bindings/gce/mt8192-gce.h b/include/dt-bindings/gce/mt8192-gce.h
new file mode 100644
index 000000000000..9e5a0eb040a0
--- /dev/null
+++ b/include/dt-bindings/gce/mt8192-gce.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Yongqiang Niu <yongqiang.niu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8192_H
+#define _DT_BINDINGS_GCE_MT8192_H
+
+/* assign timeout 0 also means default */
+#define CMDQ_NO_TIMEOUT 0xffffffff
+#define CMDQ_TIMEOUT_DEFAULT 1000
+
+/* GCE thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* CPR count in 32bit register */
+#define GCE_CPR_COUNT 1312
+
+/* GCE subsys table */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1502XXXX 4
+#define SUBSYS_1880XXXX 5
+#define SUBSYS_1881XXXX 6
+#define SUBSYS_1882XXXX 7
+#define SUBSYS_1883XXXX 8
+#define SUBSYS_1884XXXX 9
+#define SUBSYS_1000XXXX 10
+#define SUBSYS_1001XXXX 11
+#define SUBSYS_1002XXXX 12
+#define SUBSYS_1003XXXX 13
+#define SUBSYS_1004XXXX 14
+#define SUBSYS_1005XXXX 15
+#define SUBSYS_1020XXXX 16
+#define SUBSYS_1028XXXX 17
+#define SUBSYS_1700XXXX 18
+#define SUBSYS_1701XXXX 19
+#define SUBSYS_1702XXXX 20
+#define SUBSYS_1703XXXX 21
+#define SUBSYS_1800XXXX 22
+#define SUBSYS_1801XXXX 23
+#define SUBSYS_1802XXXX 24
+#define SUBSYS_1804XXXX 25
+#define SUBSYS_1805XXXX 26
+#define SUBSYS_1808XXXX 27
+#define SUBSYS_180aXXXX 28
+#define SUBSYS_180bXXXX 29
+
+#define CMDQ_EVENT_VDEC_LAT_SOF_0 0
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_0 1
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_1 2
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_2 3
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_3 4
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_4 5
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_5 6
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_6 7
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_0 8
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_1 9
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_2 10
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_3 11
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_4 12
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_5 13
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_6 14
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_7 15
+
+#define CMDQ_EVENT_ISP_FRAME_DONE_A 65
+#define CMDQ_EVENT_ISP_FRAME_DONE_B 66
+#define CMDQ_EVENT_ISP_FRAME_DONE_C 67
+#define CMDQ_EVENT_CAMSV0_PASS1_DONE 68
+#define CMDQ_EVENT_CAMSV02_PASS1_DONE 69
+#define CMDQ_EVENT_CAMSV1_PASS1_DONE 70
+#define CMDQ_EVENT_CAMSV2_PASS1_DONE 71
+#define CMDQ_EVENT_CAMSV3_PASS1_DONE 72
+#define CMDQ_EVENT_MRAW_0_PASS1_DONE 73
+#define CMDQ_EVENT_MRAW_1_PASS1_DONE 74
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 75
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 76
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 77
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 78
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 79
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 80
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 81
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 82
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 83
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 84
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 85
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 86
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 87
+#define CMDQ_EVENT_TG_OVRUN_A_INT 88
+#define CMDQ_EVENT_DMA_R1_ERROR_A_INT 89
+#define CMDQ_EVENT_TG_OVRUN_B_INT 90
+#define CMDQ_EVENT_DMA_R1_ERROR_B_INT 91
+#define CMDQ_EVENT_TG_OVRUN_C_INT 92
+#define CMDQ_EVENT_DMA_R1_ERROR_C_INT 93
+#define CMDQ_EVENT_TG_OVRUN_M0_INT 94
+#define CMDQ_EVENT_DMA_R1_ERROR_M0_INT 95
+#define CMDQ_EVENT_TG_GRABERR_M0_INT 96
+#define CMDQ_EVENT_TG_GRABERR_M1_INT 97
+#define CMDQ_EVENT_TG_GRABERR_A_INT 98
+#define CMDQ_EVENT_CQ_VR_SNAP_A_INT 99
+#define CMDQ_EVENT_TG_GRABERR_B_INT 100
+#define CMDQ_EVENT_CQ_VR_SNAP_B_INT 101
+#define CMDQ_EVENT_TG_GRABERR_C_INT 102
+#define CMDQ_EVENT_CQ_VR_SNAP_C_INT 103
+
+#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 129
+#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 130
+#define CMDQ_EVENT_JPGENC_CMDQ_DONE 131
+#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 132
+#define CMDQ_EVENT_VENC_CMDQ_128BYTE_CNT_DONE 133
+#define CMDQ_EVENT_VENC_C0_CMDQ_WP_2ND_STAGE_DONE 134
+#define CMDQ_EVENT_VENC_C0_CMDQ_WP_3RD_STAGE_DONE 135
+#define CMDQ_EVENT_VENC_CMDQ_PPS_DONE 136
+#define CMDQ_EVENT_VENC_CMDQ_SPS_DONE 137
+#define CMDQ_EVENT_VENC_CMDQ_VPS_DONE 138
+
+#define CMDQ_EVENT_VDEC_CORE0_SOF_0 160
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_0 161
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_1 162
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_2 163
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_3 164
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_4 165
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_5 166
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_6 167
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_0 168
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_1 169
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_2 170
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_3 171
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_4 172
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_5 173
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_6 174
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_7 175
+#define CMDQ_EVENT_FDVT_DONE 177
+#define CMDQ_EVENT_FE_DONE 178
+#define CMDQ_EVENT_RSC_DONE 179
+#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 180
+#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 181
+
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_0 193
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_1 194
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_2 195
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_3 196
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_4 197
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_5 198
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_6 199
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_7 200
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_8 201
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_9 202
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_10 203
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_11 204
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_12 205
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_13 206
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_14 207
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_15 208
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_16 209
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_17 210
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_18 211
+#define CMDQ_EVENT_IMG2_DIP_DMA_ERR_EVENT 212
+#define CMDQ_EVENT_IMG2_AMD_FRAME_DONE 213
+#define CMDQ_EVENT_IMG2_MFB_DONE_LINK_MISC 214
+#define CMDQ_EVENT_IMG2_WPE_A_DONE_LINK_MISC 215
+#define CMDQ_EVENT_IMG2_MSS_DONE_LINK_MISC 216
+
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_0 225
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_1 226
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_2 227
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_3 228
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_4 229
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_5 230
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_6 231
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_7 232
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_8 233
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_9 234
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_10 235
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_11 236
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_12 237
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_13 238
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_14 239
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_15 240
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_16 241
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_17 242
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_18 243
+#define CMDQ_EVENT_IMG1_DIP_DMA_ERR_EVENT 244
+#define CMDQ_EVENT_IMG1_AMD_FRAME_DONE 245
+#define CMDQ_EVENT_IMG1_MFB_DONE_LINK_MISC 246
+#define CMDQ_EVENT_IMG1_WPE_A_DONE_LINK_MISC 247
+#define CMDQ_EVENT_IMG1_MSS_DONE_LINK_MISC 248
+
+#define CMDQ_EVENT_MDP_RDMA0_SOF 256
+#define CMDQ_EVENT_MDP_RDMA1_SOF 257
+#define CMDQ_EVENT_MDP_AAL0_SOF 258
+#define CMDQ_EVENT_MDP_AAL1_SOF 259
+#define CMDQ_EVENT_MDP_HDR0_SOF 260
+#define CMDQ_EVENT_MDP_HDR1_SOF 261
+#define CMDQ_EVENT_MDP_RSZ0_SOF 262
+#define CMDQ_EVENT_MDP_RSZ1_SOF 263
+#define CMDQ_EVENT_MDP_WROT0_SOF 264
+#define CMDQ_EVENT_MDP_WROT1_SOF 265
+#define CMDQ_EVENT_MDP_TDSHP0_SOF 266
+#define CMDQ_EVENT_MDP_TDSHP1_SOF 267
+#define CMDQ_EVENT_IMG_DL_RELAY0_SOF 268
+#define CMDQ_EVENT_IMG_DL_RELAY1_SOF 269
+#define CMDQ_EVENT_MDP_COLOR0_SOF 270
+#define CMDQ_EVENT_MDP_COLOR1_SOF 271
+#define CMDQ_EVENT_MDP_WROT1_FRAME_DONE 290
+#define CMDQ_EVENT_MDP_WROT0_FRAME_DONE 291
+#define CMDQ_EVENT_MDP_TDSHP1_FRAME_DONE 294
+#define CMDQ_EVENT_MDP_TDSHP0_FRAME_DONE 295
+#define CMDQ_EVENT_MDP_RSZ1_FRAME_DONE 302
+#define CMDQ_EVENT_MDP_RSZ0_FRAME_DONE 303
+#define CMDQ_EVENT_MDP_RDMA1_FRAME_DONE 306
+#define CMDQ_EVENT_MDP_RDMA0_FRAME_DONE 307
+#define CMDQ_EVENT_MDP_HDR1_FRAME_DONE 308
+#define CMDQ_EVENT_MDP_HDR0_FRAME_DONE 309
+#define CMDQ_EVENT_MDP_COLOR1_FRAME_DONE 312
+#define CMDQ_EVENT_MDP_COLOR0_FRAME_DONE 313
+#define CMDQ_EVENT_MDP_AAL1_FRAME_DONE 316
+#define CMDQ_EVENT_MDP_AAL0_FRAME_DONE 317
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_0 320
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_1 321
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_2 322
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_3 323
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_4 324
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_5 325
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_6 326
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_7 327
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_8 328
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_9 329
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_10 330
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_11 331
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_12 332
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_13 333
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_14 334
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_15 335
+#define CMDQ_EVENT_MDP_WROT1_SW_RST_DONE_ENG_EVENT 338
+#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE_ENG_EVENT 339
+#define CMDQ_EVENT_MDP_RDMA1_SW_RST_DONE_ENG_EVENT 342
+#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE_ENG_EVENT 343
+
+#define CMDQ_EVENT_DISP_OVL0_SOF 384
+#define CMDQ_EVENT_DISP_OVL0_2L_SOF 385
+#define CMDQ_EVENT_DISP_RDMA0_SOF 386
+#define CMDQ_EVENT_DISP_RSZ0_SOF 387
+#define CMDQ_EVENT_DISP_COLOR0_SOF 388
+#define CMDQ_EVENT_DISP_CCORR0_SOF 389
+#define CMDQ_EVENT_DISP_AAL0_SOF 390
+#define CMDQ_EVENT_DISP_GAMMA0_SOF 391
+#define CMDQ_EVENT_DISP_POSTMASK0_SOF 392
+#define CMDQ_EVENT_DISP_DITHER0_SOF 393
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_SOF 394
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE1_SOF 395
+#define CMDQ_EVENT_DSI0_SOF 396
+#define CMDQ_EVENT_DISP_WDMA0_SOF 397
+#define CMDQ_EVENT_DISP_UFBC_WDMA0_SOF 398
+#define CMDQ_EVENT_DISP_PWM0_SOF 399
+#define CMDQ_EVENT_DISP_OVL2_2L_SOF 400
+#define CMDQ_EVENT_DISP_RDMA4_SOF 401
+#define CMDQ_EVENT_DISP_DPI0_SOF 402
+#define CMDQ_EVENT_MDP_RDMA4_SOF 403
+#define CMDQ_EVENT_MDP_HDR4_SOF 404
+#define CMDQ_EVENT_MDP_RSZ4_SOF 405
+#define CMDQ_EVENT_MDP_AAL4_SOF 406
+#define CMDQ_EVENT_MDP_TDSHP4_SOF 407
+#define CMDQ_EVENT_MDP_COLOR4_SOF 408
+#define CMDQ_EVENT_DISP_Y2R0_SOF 409
+#define CMDQ_EVENT_MDP_TDSHP4_FRAME_DONE 410
+#define CMDQ_EVENT_MDP_RSZ4_FRAME_DONE 411
+#define CMDQ_EVENT_MDP_RDMA4_FRAME_DONE 412
+#define CMDQ_EVENT_MDP_HDR4_FRAME_DONE 413
+#define CMDQ_EVENT_MDP_COLOR4_FRAME_DONE 414
+#define CMDQ_EVENT_MDP_AAL4_FRAME_DONE 415
+#define CMDQ_EVENT_DSI0_FRAME_DONE 416
+#define CMDQ_EVENT_DISP_WDMA0_FRAME_DONE 417
+#define CMDQ_EVENT_DISP_UFBC_WDMA0_FRAME_DONE 418
+#define CMDQ_EVENT_DISP_RSZ0_FRAME_DONE 419
+#define CMDQ_EVENT_DISP_RDMA4_FRAME_DONE 420
+#define CMDQ_EVENT_DISP_RDMA0_FRAME_DONE 421
+#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 422
+#define CMDQ_EVENT_DISP_OVL2_2L_FRAME_DONE 423
+#define CMDQ_EVENT_DISP_OVL0_FRAME_DONE 424
+#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_DONE 425
+#define CMDQ_EVENT_DISP_GAMMA0_FRAME_DONE 426
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE1_FRAME_DONE 427
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_FRAME_DONE 428
+#define CMDQ_EVENT_DISP_DPI0_FRAME_DONE 429
+#define CMDQ_EVENT_DISP_DITHER0_FRAME_DONE 430
+#define CMDQ_EVENT_DISP_COLOR0_FRAME_DONE 431
+#define CMDQ_EVENT_DISP_CCORR0_FRAME_DONE 432
+#define CMDQ_EVENT_DISP_AAL0_FRAME_DONE 433
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0 434
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1 435
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_2 436
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_3 437
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_4 438
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_5 439
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_6 440
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_7 441
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_8 442
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_9 443
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_10 444
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_11 445
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_12 446
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_13 447
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_14 448
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_15 449
+#define CMDQ_EVENT_DSI0_TE_ENG_EVENT 450
+#define CMDQ_EVENT_DSI0_IRQ_ENG_EVENT 451
+#define CMDQ_EVENT_DSI0_DONE_ENG_EVENT 452
+#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE_ENG_EVENT 453
+#define CMDQ_EVENT_DISP_SMIASSERT_ENG_EVENT 454
+#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE_ENG_EVENT 455
+#define CMDQ_EVENT_DISP_OVL2_2L_RST_DONE_ENG_EVENT 456
+#define CMDQ_EVENT_DISP_OVL0_RST_DONE_ENG_EVENT 457
+#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE_ENG_EVENT 458
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_0 459
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_1 460
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_2 461
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_3 462
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_4 463
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_5 464
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_6 465
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_7 466
+#define CMDQ_MAX_HW_EVENT 512
+
+#endif
diff --git a/include/dt-bindings/gce/mt8195-gce.h b/include/dt-bindings/gce/mt8195-gce.h
new file mode 100644
index 000000000000..dcfb302b8a5b
--- /dev/null
+++ b/include/dt-bindings/gce/mt8195-gce.h
@@ -0,0 +1,812 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Jason-JH Lin <jason0jh.lin@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8195_H
+#define _DT_BINDINGS_GCE_MT8195_H
+
+/* assign timeout 0 also means default */
+#define CMDQ_NO_TIMEOUT 0xffffffff
+#define CMDQ_TIMEOUT_DEFAULT 1000
+
+/* GCE thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* CPR count in 32bit register */
+#define GCE_CPR_COUNT 1312
+
+/* GCE subsys table */
+#define SUBSYS_1400XXXX 0
+#define SUBSYS_1401XXXX 1
+#define SUBSYS_1402XXXX 2
+#define SUBSYS_1c00XXXX 3
+#define SUBSYS_1c01XXXX 4
+#define SUBSYS_1c02XXXX 5
+#define SUBSYS_1c10XXXX 6
+#define SUBSYS_1c11XXXX 7
+#define SUBSYS_1c12XXXX 8
+#define SUBSYS_14f0XXXX 9
+#define SUBSYS_14f1XXXX 10
+#define SUBSYS_14f2XXXX 11
+#define SUBSYS_1800XXXX 12
+#define SUBSYS_1801XXXX 13
+#define SUBSYS_1802XXXX 14
+#define SUBSYS_1803XXXX 15
+#define SUBSYS_1032XXXX 16
+#define SUBSYS_1033XXXX 17
+#define SUBSYS_1600XXXX 18
+#define SUBSYS_1601XXXX 19
+#define SUBSYS_14e0XXXX 20
+#define SUBSYS_1c20XXXX 21
+#define SUBSYS_1c30XXXX 22
+#define SUBSYS_1c40XXXX 23
+#define SUBSYS_1c50XXXX 24
+#define SUBSYS_1c60XXXX 25
+
+/* GCE General Purpose Register (GPR) support */
+#define GCE_GPR_R00 0x0
+#define GCE_GPR_R01 0x1
+#define GCE_GPR_R02 0x2
+#define GCE_GPR_R03 0x3
+#define GCE_GPR_R04 0x4
+#define GCE_GPR_R05 0x5
+#define GCE_GPR_R06 0x6
+#define GCE_GPR_R07 0x7
+#define GCE_GPR_R08 0x8
+#define GCE_GPR_R09 0x9
+#define GCE_GPR_R10 0xa
+#define GCE_GPR_R11 0xb
+#define GCE_GPR_R12 0xc
+#define GCE_GPR_R13 0xd
+#define GCE_GPR_R14 0xe
+#define GCE_GPR_R15 0xf
+
+/* GCE hw event id */
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_0 1
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_1 2
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_2 3
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_3 4
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_4 5
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_5 6
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_6 7
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_7 8
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_8 9
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_9 10
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_10 11
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_11 12
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_12 13
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_13 14
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_14 15
+#define CMDQ_EVENT_TRAW0_DMA_ERROR_INT 16
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_0 17
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_1 18
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_2 19
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_3 20
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_4 21
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_5 22
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_6 23
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_7 24
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_8 25
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_9 26
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_10 27
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_11 28
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_12 29
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_13 30
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_14 31
+#define CMDQ_EVENT_TRAW1_DMA_ERROR_INT 32
+
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_0 65
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_1 66
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_2 67
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_3 68
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_4 69
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_5 70
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_6 71
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_7 72
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_8 73
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_9 74
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_10 75
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_11 76
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_12 77
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_13 78
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_14 79
+#define CMDQ_EVENT_DIP0_DMA_ERR 80
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_0 81
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_1 82
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_2 83
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_3 84
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_4 85
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_5 86
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_6 87
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_7 88
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_8 89
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_9 90
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_10 91
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_11 92
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_12 93
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_13 94
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_14 95
+#define CMDQ_EVENT_PQA0_DMA_ERR 96
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_0 97
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_1 98
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_2 99
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_3 100
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_4 101
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_5 102
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_6 103
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_7 104
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_8 105
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_9 106
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_10 107
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_11 108
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_12 109
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_13 110
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_14 111
+#define CMDQ_EVENT_PQB0_DMA_ERR 112
+#define CMDQ_EVENT_DIP0_DUMMY_0 113
+#define CMDQ_EVENT_DIP0_DUMMY_1 114
+#define CMDQ_EVENT_DIP0_DUMMY_2 115
+#define CMDQ_EVENT_DIP0_DUMMY_3 116
+#define CMDQ_EVENT_WPE0_EIS_GCE_FRAME_DONE 117
+#define CMDQ_EVENT_WPE0_EIS_DONE_SYNC_OUT 118
+#define CMDQ_EVENT_WPE0_TNR_GCE_FRAME_DONE 119
+#define CMDQ_EVENT_WPE0_TNR_DONE_SYNC_OUT 120
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_0 121
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_1 122
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_2 123
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_3 124
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_4 125
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_5 126
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_6 127
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_7 128
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_8 129
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_9 130
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_10 131
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_11 132
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_12 133
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_13 134
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_14 135
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_0 136
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_1 137
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_2 138
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_3 139
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_4 140
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_5 141
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_6 142
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_7 143
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_8 144
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_9 145
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_10 146
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_11 147
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_12 148
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_13 149
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_14 150
+#define CMDQ_EVENT_WPE0_DUMMY_0 151
+#define CMDQ_EVENT_IMGSYS_IPE_DUMMY 152
+#define CMDQ_EVENT_IMGSYS_IPE_FDVT_DONE 153
+#define CMDQ_EVENT_IMGSYS_IPE_ME_DONE 154
+#define CMDQ_EVENT_IMGSYS_IPE_DVS_DONE 155
+#define CMDQ_EVENT_IMGSYS_IPE_DVP_DONE 156
+
+#define CMDQ_EVENT_TPR_0 194
+#define CMDQ_EVENT_TPR_1 195
+#define CMDQ_EVENT_TPR_2 196
+#define CMDQ_EVENT_TPR_3 197
+#define CMDQ_EVENT_TPR_4 198
+#define CMDQ_EVENT_TPR_5 199
+#define CMDQ_EVENT_TPR_6 200
+#define CMDQ_EVENT_TPR_7 201
+#define CMDQ_EVENT_TPR_8 202
+#define CMDQ_EVENT_TPR_9 203
+#define CMDQ_EVENT_TPR_10 204
+#define CMDQ_EVENT_TPR_11 205
+#define CMDQ_EVENT_TPR_12 206
+#define CMDQ_EVENT_TPR_13 207
+#define CMDQ_EVENT_TPR_14 208
+#define CMDQ_EVENT_TPR_15 209
+#define CMDQ_EVENT_TPR_16 210
+#define CMDQ_EVENT_TPR_17 211
+#define CMDQ_EVENT_TPR_18 212
+#define CMDQ_EVENT_TPR_19 213
+#define CMDQ_EVENT_TPR_20 214
+#define CMDQ_EVENT_TPR_21 215
+#define CMDQ_EVENT_TPR_22 216
+#define CMDQ_EVENT_TPR_23 217
+#define CMDQ_EVENT_TPR_24 218
+#define CMDQ_EVENT_TPR_25 219
+#define CMDQ_EVENT_TPR_26 220
+#define CMDQ_EVENT_TPR_27 221
+#define CMDQ_EVENT_TPR_28 222
+#define CMDQ_EVENT_TPR_29 223
+#define CMDQ_EVENT_TPR_30 224
+#define CMDQ_EVENT_TPR_31 225
+#define CMDQ_EVENT_TPR_TIMEOUT_0 226
+#define CMDQ_EVENT_TPR_TIMEOUT_1 227
+#define CMDQ_EVENT_TPR_TIMEOUT_2 228
+#define CMDQ_EVENT_TPR_TIMEOUT_3 229
+#define CMDQ_EVENT_TPR_TIMEOUT_4 230
+#define CMDQ_EVENT_TPR_TIMEOUT_5 231
+#define CMDQ_EVENT_TPR_TIMEOUT_6 232
+#define CMDQ_EVENT_TPR_TIMEOUT_7 233
+#define CMDQ_EVENT_TPR_TIMEOUT_8 234
+#define CMDQ_EVENT_TPR_TIMEOUT_9 235
+#define CMDQ_EVENT_TPR_TIMEOUT_10 236
+#define CMDQ_EVENT_TPR_TIMEOUT_11 237
+#define CMDQ_EVENT_TPR_TIMEOUT_12 238
+#define CMDQ_EVENT_TPR_TIMEOUT_13 239
+#define CMDQ_EVENT_TPR_TIMEOUT_14 240
+#define CMDQ_EVENT_TPR_TIMEOUT_15 241
+
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SOF 256
+#define CMDQ_EVENT_VPP0_MDP_FG_SOF 257
+#define CMDQ_EVENT_VPP0_STITCH_SOF 258
+#define CMDQ_EVENT_VPP0_MDP_HDR_SOF 259
+#define CMDQ_EVENT_VPP0_MDP_AAL_SOF 260
+#define CMDQ_EVENT_VPP0_MDP_RSZ_IN_RSZ_SOF 261
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_SOF 262
+#define CMDQ_EVENT_VPP0_DISP_COLOR_SOF 263
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_SOF 264
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_SOF 265
+#define CMDQ_EVENT_VPP0_MDP_TCC_IN_SOF 266
+#define CMDQ_EVENT_VPP0_MDP_WROT_SOF 267
+
+#define CMDQ_EVENT_VPP0_WARP0_MMSYS_TOP_RELAY_SOF_PRE 269
+#define CMDQ_EVENT_VPP0_WARP1_MMSYS_TOP_RELAY_SOF_PRE 270
+#define CMDQ_EVENT_VPP0_VPP1_MMSYS_TOP_RELAY_SOF 271
+#define CMDQ_EVENT_VPP0_VPP1_IN_MMSYS_TOP_RELAY_SOF_PRE 272
+
+#define CMDQ_EVENT_VPP0_MDP_RDMA_FRAME_DONE 288
+#define CMDQ_EVENT_VPP0_MDP_FG_TILE_DONE 289
+#define CMDQ_EVENT_VPP0_STITCH_FRAME_DONE 290
+#define CMDQ_EVENT_VPP0_MDP_HDR_FRAME_DONE 291
+#define CMDQ_EVENT_VPP0_MDP_AAL_FRAME_DONE 292
+#define CMDQ_EVENT_VPP0_MDP_RSZ_FRAME_DONE 293
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_FRAME_DONE 294
+#define CMDQ_EVENT_VPP0_DISP_COLOR_FRAME_DONE 295
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_DONE 296
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_FRAME_DONE 297
+#define CMDQ_EVENT_VPP0_MDP_TCC_TCC_FRAME_DONE 298
+#define CMDQ_EVENT_VPP0_MDP_WROT_VIDO_WDONE 299
+
+#define CMDQ_EVENT_VPP0_STREAM_DONE_0 320
+#define CMDQ_EVENT_VPP0_STREAM_DONE_1 321
+#define CMDQ_EVENT_VPP0_STREAM_DONE_2 322
+#define CMDQ_EVENT_VPP0_STREAM_DONE_3 323
+#define CMDQ_EVENT_VPP0_STREAM_DONE_4 324
+#define CMDQ_EVENT_VPP0_STREAM_DONE_5 325
+#define CMDQ_EVENT_VPP0_STREAM_DONE_6 326
+#define CMDQ_EVENT_VPP0_STREAM_DONE_7 327
+#define CMDQ_EVENT_VPP0_STREAM_DONE_8 328
+#define CMDQ_EVENT_VPP0_STREAM_DONE_9 329
+#define CMDQ_EVENT_VPP0_STREAM_DONE_10 330
+#define CMDQ_EVENT_VPP0_STREAM_DONE_11 331
+#define CMDQ_EVENT_VPP0_STREAM_DONE_12 332
+#define CMDQ_EVENT_VPP0_STREAM_DONE_13 333
+#define CMDQ_EVENT_VPP0_STREAM_DONE_14 334
+#define CMDQ_EVENT_VPP0_STREAM_DONE_15 335
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_0 336
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_1 337
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_2 338
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_3 339
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_4 340
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_5 341
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_6 342
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_7 343
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_8 344
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_9 345
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_10 346
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_11 347
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_12 348
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_13 349
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_14 350
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_15 351
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SW_RST_DONE 352
+#define CMDQ_EVENT_VPP0_MDP_RDMA_PM_VALID 353
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_RESET_DONE_PULSE 354
+#define CMDQ_EVENT_VPP0_MDP_WROT_SW_RST_DONE 355
+
+#define CMDQ_EVENT_VPP1_HDMI_META_SOF 384
+#define CMDQ_EVENT_VPP1_DGI_SOF 385
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_SOF 386
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_SOF 387
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_SOF 388
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_SOF 389
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_SOF 390
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_SOF 391
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_SOF 392
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_SOF 393
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_SOF 394
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_SOF 395
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_SOF 396
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_SOF 397
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_SOF 398
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_SOF 399
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_SOF 400
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_SOF 401
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_SOF 402
+#define CMDQ_EVENT_VPP1_SVPP1_TDSHP_SOF 403
+#define CMDQ_EVENT_VPP1_SVPP2_TDSHP_SOF 404
+#define CMDQ_EVENT_VPP1_SVPP3_TDSHP_SOF 405
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_SOF 406
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_SOF 407
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_SOF 408
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_SOF 409
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_SOF 410
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_SOF 411
+#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_SOF 412
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_SOF 413
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_SOF 414
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SOF 415
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SOF 416
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SOF 417
+#define CMDQ_EVENT_VPP1_VPP0_DL_IRLY_SOF 418
+#define CMDQ_EVENT_VPP1_VPP0_DL_ORLY_SOF 419
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_0_SOF 420
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_1_SOF 421
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_0_SOF 422
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_1_SOF 423
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_FRAME_DONE 424
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_FRAME_DONE 425
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_FRAME_DONE 426
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_FRAME_DONE 427
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_FRAME_DONE 428
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_FRAME_DONE 429
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_FRAME_DONE 430
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_FRAME_DONE 431
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_FRAME_DONE 432
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_FRAME_DONE 433
+#define CMDQ_EVENT_VPP1_FRAME_DONE_10 434
+#define CMDQ_EVENT_VPP1_FRAME_DONE_11 435
+#define CMDQ_EVENT_VPP1_FRAME_DONE_12 436
+#define CMDQ_EVENT_VPP1_FRAME_DONE_13 437
+#define CMDQ_EVENT_VPP1_FRAME_DONE_14 438
+#define CMDQ_EVENT_VPP1_STREAM_DONE_0 439
+#define CMDQ_EVENT_VPP1_STREAM_DONE_1 440
+#define CMDQ_EVENT_VPP1_STREAM_DONE_2 441
+#define CMDQ_EVENT_VPP1_STREAM_DONE_3 442
+#define CMDQ_EVENT_VPP1_STREAM_DONE_4 443
+#define CMDQ_EVENT_VPP1_STREAM_DONE_5 444
+#define CMDQ_EVENT_VPP1_STREAM_DONE_6 445
+#define CMDQ_EVENT_VPP1_STREAM_DONE_7 446
+#define CMDQ_EVENT_VPP1_STREAM_DONE_8 447
+#define CMDQ_EVENT_VPP1_STREAM_DONE_9 448
+#define CMDQ_EVENT_VPP1_STREAM_DONE_10 449
+#define CMDQ_EVENT_VPP1_STREAM_DONE_11 450
+#define CMDQ_EVENT_VPP1_STREAM_DONE_12 451
+#define CMDQ_EVENT_VPP1_STREAM_DONE_13 452
+#define CMDQ_EVENT_VPP1_STREAM_DONE_14 453
+#define CMDQ_EVENT_VPP1_STREAM_DONE_15 454
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_0 455
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_1 456
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_2 457
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_3 458
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_4 459
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_5 460
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_6 461
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_7 462
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_8 463
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_9 464
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_10 465
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_11 466
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_12 467
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_13 468
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_14 469
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_15 470
+#define CMDQ_EVENT_VPP1_DGI_0 471
+#define CMDQ_EVENT_VPP1_DGI_1 472
+#define CMDQ_EVENT_VPP1_DGI_2 473
+#define CMDQ_EVENT_VPP1_DGI_3 474
+#define CMDQ_EVENT_VPP1_DGI_4 475
+#define CMDQ_EVENT_VPP1_DGI_5 476
+#define CMDQ_EVENT_VPP1_DGI_6 477
+#define CMDQ_EVENT_VPP1_DGI_7 478
+#define CMDQ_EVENT_VPP1_DGI_8 479
+#define CMDQ_EVENT_VPP1_DGI_9 480
+#define CMDQ_EVENT_VPP1_DGI_10 481
+#define CMDQ_EVENT_VPP1_DGI_11 482
+#define CMDQ_EVENT_VPP1_DGI_12 483
+#define CMDQ_EVENT_VPP1_DGI_13 484
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE 485
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE 486
+#define CMDQ_EVENT_VPP1_MDP_OVL_FRAME_RESET_DONE_PULSE 487
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_DGI 488
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_HDMI 489
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SW_RST_DONE 490
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SW_RST_DONE 491
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SW_RST_DONE 492
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_TILE_DONE 493
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_TILE_DONE 494
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_TILE_DONE 495
+
+#define CMDQ_EVENT_VDO0_DISP_OVL0_SOF 512
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_SOF 513
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_SOF 514
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_SOF 515
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_SOF 516
+#define CMDQ_EVENT_VDO0_DISP_AAL0_SOF 517
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_SOF 518
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_SOF 519
+#define CMDQ_EVENT_VDO0_DSI0_SOF 520
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_SOF 521
+#define CMDQ_EVENT_VDO0_DISP_OVL1_SOF 522
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_SOF 523
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_SOF 524
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_SOF 525
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_SOF 526
+#define CMDQ_EVENT_VDO0_DISP_AAL1_SOF 527
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_SOF 528
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_SOF 529
+#define CMDQ_EVENT_VDO0_DSI1_SOF 530
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_SOF 531
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_SOF 532
+#define CMDQ_EVENT_VDO0_DP_INTF0_SOF 533
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY0_SOF 534
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY1_SOF 535
+#define CMDQ_EVENT_VDO0_VDO1_DL_RELAY2_SOF 536
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY3_SOF 537
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY4_SOF 538
+#define CMDQ_EVENT_VDO0_DISP_PWM0_SOF 539
+#define CMDQ_EVENT_VDO0_DISP_PWM1_SOF 540
+
+#define CMDQ_EVENT_VDO0_DISP_OVL0_FRAME_DONE 544
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_FRAME_DONE 545
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_FRAME_DONE 546
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_FRAME_DONE 547
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_FRAME_DONE 548
+#define CMDQ_EVENT_VDO0_DISP_AAL0_FRAME_DONE 549
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_FRAME_DONE 550
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_FRAME_DONE 551
+#define CMDQ_EVENT_VDO0_DSI0_FRAME_DONE 552
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_FRAME_DONE 553
+#define CMDQ_EVENT_VDO0_DISP_OVL1_FRAME_DONE 554
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_FRAME_DONE 555
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_FRAME_DONE 556
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_FRAME_DONE 557
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_FRAME_DONE 558
+#define CMDQ_EVENT_VDO0_DISP_AAL1_FRAME_DONE 559
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_FRAME_DONE 560
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_FRAME_DONE 561
+#define CMDQ_EVENT_VDO0_DSI1_FRAME_DONE 562
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_FRAME_DONE 563
+
+#define CMDQ_EVENT_VDO0_DP_INTF0_FRAME_DONE 565
+
+#define CMDQ_EVENT_VDO0_DISP_SMIASSERT_ENG 576
+#define CMDQ_EVENT_VDO0_DSI0_IRQ_ENG_EVENT_MM 577
+#define CMDQ_EVENT_VDO0_DSI0_TE_ENG_EVENT_MM 578
+#define CMDQ_EVENT_VDO0_DSI0_DONE_ENG_EVENT_MM 579
+#define CMDQ_EVENT_VDO0_DSI0_SOF_ENG_EVENT_MM 580
+#define CMDQ_EVENT_VDO0_DSI0_VACTL_ENG_EVENT_MM 581
+#define CMDQ_EVENT_VDO0_DSI1_IRQ_ENG_EVENT_MM 582
+#define CMDQ_EVENT_VDO0_DSI1_TE_ENG_EVENT_MM 583
+#define CMDQ_EVENT_VDO0_DSI1_DONE_ENG_EVENT_MM 584
+#define CMDQ_EVENT_VDO0_DSI1_SOF_ENG_EVENT_MM 585
+#define CMDQ_EVENT_VDO0_DSI1_VACTL_ENG_EVENT_MM 586
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_SW_RST_DONE_ENG 587
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_SW_RST_DONE_ENG 588
+#define CMDQ_EVENT_VDO0_DISP_OVL0_RST_DONE_ENG 589
+#define CMDQ_EVENT_VDO0_DISP_OVL1_RST_DONE_ENG 590
+#define CMDQ_EVENT_VDO0_DP_INTF0_VSYNC_START_ENG_EVENT_MM 591
+#define CMDQ_EVENT_VDO0_DP_INTF0_VSYNC_END_ENG_EVENT_MM 592
+#define CMDQ_EVENT_VDO0_DP_INTF0_VDE_START_ENG_EVENT_MM 593
+#define CMDQ_EVENT_VDO0_DP_INTF0_VDE_END_ENG_EVENT_MM 594
+#define CMDQ_EVENT_VDO0_DP_INTF0_TARGET_LINE_ENG_EVENT_MM 595
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_ENG 596
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0 597
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_1 598
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_2 599
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_3 600
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_4 601
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_5 602
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_6 603
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_7 604
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_8 605
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_9 606
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_10 607
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_11 608
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_12 609
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_13 610
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_14 611
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_15 612
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_0 613
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_1 614
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_2 615
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_3 616
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_4 617
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_5 618
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_6 619
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_7 620
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_8 621
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_9 622
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_10 623
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_11 624
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_12 625
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_13 626
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_14 627
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_15 628
+
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SOF 640
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SOF 641
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SOF 642
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SOF 643
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SOF 644
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SOF 645
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SOF 646
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SOF 647
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_SOF 648
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_SOF 649
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_SOF 650
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_SOF 651
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_SOF 652
+#define CMDQ_EVENT_VDO1_VPP2_DL_RELAY_SOF 653
+#define CMDQ_EVENT_VDO1_VPP3_DL_RELAY_SOF 654
+#define CMDQ_EVENT_VDO1_VDO0_DSC_DL_ASYNC_SOF 655
+#define CMDQ_EVENT_VDO1_VDO0_MERGE_DL_ASYNC_SOF 656
+#define CMDQ_EVENT_VDO1_OUT_DL_RELAY_SOF 657
+#define CMDQ_EVENT_VDO1_DISP_MIXER_SOF 658
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE0_SOF 659
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_SOF 660
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_SOF 661
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_SOF 662
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_SOF 663
+#define CMDQ_EVENT_VDO1_HDR_MLOAD_SOF 664
+
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_FRAME_DONE 672
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_FRAME_DONE 673
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_FRAME_DONE 674
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_FRAME_DONE 675
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_FRAME_DONE 676
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_FRAME_DONE 677
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_FRAME_DONE 678
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_FRAME_DONE 679
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_FRAME_DONE 680
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_FRAME_DONE 681
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_FRAME_DONE 682
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_FRAME_DONE 683
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_FRAME_DONE 684
+#define CMDQ_EVENT_VDO1_DPI0_FRAME_DONE 685
+#define CMDQ_EVENT_VDO1_DPI1_FRAME_DONE 686
+#define CMDQ_EVENT_VDO1_DP_INTF0_FRAME_DONE 687
+#define CMDQ_EVENT_VDO1_DISP_MIXER_FRAME_DONE_MM 688
+
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0 704
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_1 705
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_2 706
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_3 707
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_4 708
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_5 709
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_6 710
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_7 711
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_8 712
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_9 713
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_10 714
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_11 715
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_12 716
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_13 717
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_14 718
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_15 719
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_0 720
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_1 721
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_2 722
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_3 723
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_4 724
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_5 725
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_6 726
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_7 727
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_8 728
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_9 729
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_10 730
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_11 731
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_12 732
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_13 733
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_14 734
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_15 735
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SW_RST_DONE 736
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SW_RST_DONE 737
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SW_RST_DONE 738
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SW_RST_DONE 739
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SW_RST_DONE 740
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SW_RST_DONE 741
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SW_RST_DONE 742
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SW_RST_DONE 743
+
+#define CMDQ_EVENT_VDO1_DP0_VDE_END_ENG_EVENT_MM 745
+#define CMDQ_EVENT_VDO1_DP0_VDE_START_ENG_EVENT_MM 746
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_END_ENG_EVENT_MM 747
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_START_ENG_EVENT_MM 748
+#define CMDQ_EVENT_VDO1_DP0_TARGET_LINE_ENG_EVENT_MM 749
+#define CMDQ_EVENT_VDO1_VPP_MERGE0 750
+#define CMDQ_EVENT_VDO1_VPP_MERGE1 751
+#define CMDQ_EVENT_VDO1_VPP_MERGE2 752
+#define CMDQ_EVENT_VDO1_VPP_MERGE3 753
+#define CMDQ_EVENT_VDO1_VPP_MERGE4 754
+#define CMDQ_EVENT_VDO1_HDMITX 755
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_ADL_TRIG_EVENT_MM 756
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_THDR_ADL_TRIG_EVENT_MM 757
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_DM_ADL_TRIG_EVENT_MM 758
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_THDR_ADL_TRIG_EVENT_MM 759
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_DM_ADL_TRIG_EVENT_MM 760
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_ADL_TRIG_EVENT_MM 761
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_AD0_TRIG_EVENT_MM 762
+
+#define CMDQ_EVENT_CAM_A_PASS1_DONE 769
+#define CMDQ_EVENT_CAM_B_PASS1_DONE 770
+#define CMDQ_EVENT_GCAMSV_A_PASS1_DONE 771
+#define CMDQ_EVENT_GCAMSV_B_PASS1_DONE 772
+#define CMDQ_EVENT_MRAW_0_PASS1_DONE 773
+#define CMDQ_EVENT_MRAW_1_PASS1_DONE 774
+#define CMDQ_EVENT_MRAW_2_PASS1_DONE 775
+#define CMDQ_EVENT_MRAW_3_PASS1_DONE 776
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL_X 777
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL_X 778
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 779
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 780
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 781
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 782
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 783
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 784
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 785
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 786
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL_X 787
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL_X 788
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL_X 789
+#define CMDQ_EVENT_SENINF_CAM13_FIFO_FULL_X 790
+#define CMDQ_EVENT_TG_OVRUN_MRAW0_INT_X0 791
+#define CMDQ_EVENT_TG_OVRUN_MRAW1_INT_X0 792
+#define CMDQ_EVENT_TG_OVRUN_MRAW2_INT 793
+#define CMDQ_EVENT_TG_OVRUN_MRAW3_INT 794
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW0_INT 795
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW1_INT 796
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW2_INT 797
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW3_INT 798
+#define CMDQ_EVENT_U_CAMSYS_PDA_IRQO_EVENT_DONE_D1 799
+#define CMDQ_EVENT_SUBB_TG_INT4 800
+#define CMDQ_EVENT_SUBB_TG_INT3 801
+#define CMDQ_EVENT_SUBB_TG_INT2 802
+#define CMDQ_EVENT_SUBB_TG_INT1 803
+#define CMDQ_EVENT_SUBA_TG_INT4 804
+#define CMDQ_EVENT_SUBA_TG_INT3 805
+#define CMDQ_EVENT_SUBA_TG_INT2 806
+#define CMDQ_EVENT_SUBA_TG_INT1 807
+#define CMDQ_EVENT_SUBB_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 808
+#define CMDQ_EVENT_SUBB_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 809
+#define CMDQ_EVENT_SUBB_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 810
+#define CMDQ_EVENT_SUBB_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 811
+#define CMDQ_EVENT_SUBA_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 812
+#define CMDQ_EVENT_SUBA_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 813
+#define CMDQ_EVENT_SUBA_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 814
+#define CMDQ_EVENT_SUBA_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 815
+#define CMDQ_EVENT_GCE1_SOF_0 816
+#define CMDQ_EVENT_GCE1_SOF_1 817
+#define CMDQ_EVENT_GCE1_SOF_2 818
+#define CMDQ_EVENT_GCE1_SOF_3 819
+#define CMDQ_EVENT_GCE1_SOF_4 820
+#define CMDQ_EVENT_GCE1_SOF_5 821
+#define CMDQ_EVENT_GCE1_SOF_6 822
+#define CMDQ_EVENT_GCE1_SOF_7 823
+#define CMDQ_EVENT_GCE1_SOF_8 824
+#define CMDQ_EVENT_GCE1_SOF_9 825
+#define CMDQ_EVENT_GCE1_SOF_10 826
+#define CMDQ_EVENT_GCE1_SOF_11 827
+#define CMDQ_EVENT_GCE1_SOF_12 828
+#define CMDQ_EVENT_GCE1_SOF_13 829
+#define CMDQ_EVENT_GCE1_SOF_14 830
+#define CMDQ_EVENT_GCE1_SOF_15 831
+
+#define CMDQ_EVENT_VDEC_LAT_LINE_COUNT_THRESHOLD_INTERRUPT 832
+#define CMDQ_EVENT_VDEC_LAT_VDEC_INT 833
+#define CMDQ_EVENT_VDEC_LAT_VDEC_PAUSE 834
+#define CMDQ_EVENT_VDEC_LAT_VDEC_DEC_ERROR 835
+#define CMDQ_EVENT_VDEC_LAT_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 836
+#define CMDQ_EVENT_VDEC_LAT_VDEC_FRAME_DONE 837
+#define CMDQ_EVENT_VDEC_LAT_INI_FETCH_RDY 838
+#define CMDQ_EVENT_VDEC_LAT_PROCESS_FLAG 839
+#define CMDQ_EVENT_VDEC_LAT_SEARCH_START_CODE_DONE 840
+#define CMDQ_EVENT_VDEC_LAT_REF_REORDER_DONE 841
+#define CMDQ_EVENT_VDEC_LAT_WP_TBLE_DONE 842
+#define CMDQ_EVENT_VDEC_LAT_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 843
+#define CMDQ_EVENT_VDEC_LAT_GCE_CNT_OP_THRESHOLD 847
+
+#define CMDQ_EVENT_VDEC_LAT1_LINE_COUNT_THRESHOLD_INTERRUPT 848
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_INT 849
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_PAUSE 850
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_DEC_ERROR 851
+#define CMDQ_EVENT_VDEC_LAT1_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 852
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_FRAME_DONE 853
+#define CMDQ_EVENT_VDEC_LAT1_INI_FETCH_RDY 854
+#define CMDQ_EVENT_VDEC_LAT1_PROCESS_FLAG 855
+#define CMDQ_EVENT_VDEC_LAT1_SEARCH_START_CODE_DONE 856
+#define CMDQ_EVENT_VDEC_LAT1_REF_REORDER_DONE 857
+#define CMDQ_EVENT_VDEC_LAT1_WP_TBLE_DONE 858
+#define CMDQ_EVENT_VDEC_LAT1_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 859
+#define CMDQ_EVENT_VDEC_LAT1_GCE_CNT_OP_THRESHOLD 863
+
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_0 864
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_1 865
+
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_8 872
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_9 873
+
+#define CMDQ_EVENT_VDEC_CORE_LINE_COUNT_THRESHOLD_INTERRUPT 896
+#define CMDQ_EVENT_VDEC_CORE_VDEC_INT 897
+#define CMDQ_EVENT_VDEC_CORE_VDEC_PAUSE 898
+#define CMDQ_EVENT_VDEC_CORE_VDEC_DEC_ERROR 899
+#define CMDQ_EVENT_VDEC_CORE_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 900
+#define CMDQ_EVENT_VDEC_CORE_VDEC_FRAME_DONE 901
+#define CMDQ_EVENT_VDEC_CORE_INI_FETCH_RDY 902
+#define CMDQ_EVENT_VDEC_CORE_PROCESS_FLAG 903
+#define CMDQ_EVENT_VDEC_CORE_SEARCH_START_CODE_DONE 904
+#define CMDQ_EVENT_VDEC_CORE_REF_REORDER_DONE 905
+#define CMDQ_EVENT_VDEC_CORE_WP_TBLE_DONE 906
+#define CMDQ_EVENT_VDEC_CORE_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 907
+#define CMDQ_EVENT_VDEC_CORE_GCE_CNT_OP_THRESHOLD 911
+
+#define CMDQ_EVENT_VDEC_CORE1_LINE_COUNT_THRESHOLD_INTERRUPT 912
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_INT 913
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_PAUSE 914
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_DEC_ERROR 915
+#define CMDQ_EVENT_VDEC_CORE1_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 916
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_FRAME_DONE 917
+#define CMDQ_EVENT_VDEC_CORE1_INI_FETCH_RDY 918
+#define CMDQ_EVENT_VDEC_CORE1_PROCESS_FLAG 919
+#define CMDQ_EVENT_VDEC_CORE1_SEARCH_START_CODE_DONE 920
+#define CMDQ_EVENT_VDEC_CORE1_REF_REORDER_DONE 921
+#define CMDQ_EVENT_VDEC_CORE1_WP_TBLE_DONE 922
+#define CMDQ_EVENT_VDEC_CORE1_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 923
+#define CMDQ_EVENT_VDEC_CORE1_CNT_OP_THRESHOLD 927
+
+#define CMDQ_EVENT_VENC_TOP_FRAME_DONE 929
+#define CMDQ_EVENT_VENC_TOP_PAUSE_DONE 930
+#define CMDQ_EVENT_VENC_TOP_JPGENC_DONE 931
+#define CMDQ_EVENT_VENC_TOP_MB_DONE 932
+#define CMDQ_EVENT_VENC_TOP_128BYTE_DONE 933
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_DONE 934
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_C1_DONE 935
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_INSUFF_DONE 936
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_C1_INSUFF_DONE 937
+#define CMDQ_EVENT_VENC_TOP_WP_2ND_STAGE_DONE 938
+#define CMDQ_EVENT_VENC_TOP_WP_3RD_STAGE_DONE 939
+#define CMDQ_EVENT_VENC_TOP_PPS_HEADER_DONE 940
+#define CMDQ_EVENT_VENC_TOP_SPS_HEADER_DONE 941
+#define CMDQ_EVENT_VENC_TOP_VPS_HEADER_DONE 942
+
+#define CMDQ_EVENT_VENC_CORE1_TOP_FRAME_DONE 945
+#define CMDQ_EVENT_VENC_CORE1_TOP_PAUSE_DONE 946
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGENC_DONE 947
+#define CMDQ_EVENT_VENC_CORE1_TOP_MB_DONE 948
+#define CMDQ_EVENT_VENC_CORE1_TOP_128BYTE_DONE 949
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_DONE 950
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_C1_DONE 951
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_INSUFF_DONE 952
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_C1_INSUFF_DONE 953
+#define CMDQ_EVENT_VENC_CORE1_TOP_WP_2ND_STAGE_DONE 954
+#define CMDQ_EVENT_VENC_CORE1_TOP_WP_3RD_STAGE_DONE 955
+#define CMDQ_EVENT_VENC_CORE1_TOP_PPS_HEADER_DONE 956
+#define CMDQ_EVENT_VENC_CORE1_TOP_SPS_HEADER_DONE 957
+#define CMDQ_EVENT_VENC_CORE1_TOP_VPS_HEADER_DONE 958
+
+#define CMDQ_EVENT_WPE_VPP0_WPE_GCE_FRAME_DONE 962
+#define CMDQ_EVENT_WPE_VPP0_WPE_DONE_SYNC_OUT 963
+
+#define CMDQ_EVENT_WPE_VPP1_WPE_GCE_FRAME_DONE 969
+#define CMDQ_EVENT_WPE_VPP1_WPE_DONE_SYNC_OUT 970
+
+#define CMDQ_EVENT_DP_TX_VBLANK_FALLING 994
+#define CMDQ_EVENT_DP_TX_VSC_FINISH 995
+
+#define CMDQ_EVENT_OUTPIN_0 1018
+#define CMDQ_EVENT_OUTPIN_1 1019
+
+/* end of hw event */
+#define CMDQ_MAX_HW_EVENT 1019
+
+#endif
diff --git a/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h
new file mode 100644
index 000000000000..4e16d31a71c9
--- /dev/null
+++ b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_T7_GPIO_H
+#define _DT_BINDINGS_AMLOGIC_T7_GPIO_H
+
+#define GPIOB_0 0
+#define GPIOB_1 1
+#define GPIOB_2 2
+#define GPIOB_3 3
+#define GPIOB_4 4
+#define GPIOB_5 5
+#define GPIOB_6 6
+#define GPIOB_7 7
+#define GPIOB_8 8
+#define GPIOB_9 9
+#define GPIOB_10 10
+#define GPIOB_11 11
+#define GPIOB_12 12
+
+#define GPIOC_0 13
+#define GPIOC_1 14
+#define GPIOC_2 15
+#define GPIOC_3 16
+#define GPIOC_4 17
+#define GPIOC_5 18
+#define GPIOC_6 19
+
+#define GPIOX_0 20
+#define GPIOX_1 21
+#define GPIOX_2 22
+#define GPIOX_3 23
+#define GPIOX_4 24
+#define GPIOX_5 25
+#define GPIOX_6 26
+#define GPIOX_7 27
+#define GPIOX_8 28
+#define GPIOX_9 29
+#define GPIOX_10 30
+#define GPIOX_11 31
+#define GPIOX_12 32
+#define GPIOX_13 33
+#define GPIOX_14 34
+#define GPIOX_15 35
+#define GPIOX_16 36
+#define GPIOX_17 37
+#define GPIOX_18 38
+#define GPIOX_19 39
+
+#define GPIOW_0 40
+#define GPIOW_1 41
+#define GPIOW_2 42
+#define GPIOW_3 43
+#define GPIOW_4 44
+#define GPIOW_5 45
+#define GPIOW_6 46
+#define GPIOW_7 47
+#define GPIOW_8 48
+#define GPIOW_9 49
+#define GPIOW_10 50
+#define GPIOW_11 51
+#define GPIOW_12 52
+#define GPIOW_13 53
+#define GPIOW_14 54
+#define GPIOW_15 55
+#define GPIOW_16 56
+
+#define GPIOD_0 57
+#define GPIOD_1 58
+#define GPIOD_2 59
+#define GPIOD_3 60
+#define GPIOD_4 61
+#define GPIOD_5 62
+#define GPIOD_6 63
+#define GPIOD_7 64
+#define GPIOD_8 65
+#define GPIOD_9 66
+#define GPIOD_10 67
+#define GPIOD_11 68
+#define GPIOD_12 69
+
+#define GPIOE_0 70
+#define GPIOE_1 71
+#define GPIOE_2 72
+#define GPIOE_3 73
+#define GPIOE_4 74
+#define GPIOE_5 75
+#define GPIOE_6 76
+
+#define GPIOZ_0 77
+#define GPIOZ_1 78
+#define GPIOZ_2 79
+#define GPIOZ_3 80
+#define GPIOZ_4 81
+#define GPIOZ_5 82
+#define GPIOZ_6 83
+#define GPIOZ_7 84
+#define GPIOZ_8 85
+#define GPIOZ_9 86
+#define GPIOZ_10 87
+#define GPIOZ_11 88
+#define GPIOZ_12 89
+#define GPIOZ_13 90
+
+#define GPIOT_0 91
+#define GPIOT_1 92
+#define GPIOT_2 93
+#define GPIOT_3 94
+#define GPIOT_4 95
+#define GPIOT_5 96
+#define GPIOT_6 97
+#define GPIOT_7 98
+#define GPIOT_8 99
+#define GPIOT_9 100
+#define GPIOT_10 101
+#define GPIOT_11 102
+#define GPIOT_12 103
+#define GPIOT_13 104
+#define GPIOT_14 105
+#define GPIOT_15 106
+#define GPIOT_16 107
+#define GPIOT_17 108
+#define GPIOT_18 109
+#define GPIOT_19 110
+#define GPIOT_20 111
+#define GPIOT_21 112
+#define GPIOT_22 113
+#define GPIOT_23 114
+
+#define GPIOM_0 115
+#define GPIOM_1 116
+#define GPIOM_2 117
+#define GPIOM_3 118
+#define GPIOM_4 119
+#define GPIOM_5 120
+#define GPIOM_6 121
+#define GPIOM_7 122
+#define GPIOM_8 123
+#define GPIOM_9 124
+#define GPIOM_10 125
+#define GPIOM_11 126
+#define GPIOM_12 127
+#define GPIOM_13 128
+
+#define GPIOY_0 129
+#define GPIOY_1 130
+#define GPIOY_2 131
+#define GPIOY_3 132
+#define GPIOY_4 133
+#define GPIOY_5 134
+#define GPIOY_6 135
+#define GPIOY_7 136
+#define GPIOY_8 137
+#define GPIOY_9 138
+#define GPIOY_10 139
+#define GPIOY_11 140
+#define GPIOY_12 141
+#define GPIOY_13 142
+#define GPIOY_14 143
+#define GPIOY_15 144
+#define GPIOY_16 145
+#define GPIOY_17 146
+#define GPIOY_18 147
+
+#define GPIOH_0 148
+#define GPIOH_1 149
+#define GPIOH_2 150
+#define GPIOH_3 151
+#define GPIOH_4 152
+#define GPIOH_5 153
+#define GPIOH_6 154
+#define GPIOH_7 155
+
+#define GPIO_TEST_N 156
+
+#endif /* _DT_BINDINGS_AMLOGIC_T7_GPIO_H */
diff --git a/include/dt-bindings/gpio/amlogic-c3-gpio.h b/include/dt-bindings/gpio/amlogic-c3-gpio.h
new file mode 100644
index 000000000000..75c8da6f505f
--- /dev/null
+++ b/include/dt-bindings/gpio/amlogic-c3-gpio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_C3_GPIO_H
+#define _DT_BINDINGS_AMLOGIC_C3_GPIO_H
+
+#define GPIOE_0 0
+#define GPIOE_1 1
+#define GPIOE_2 2
+#define GPIOE_3 3
+#define GPIOE_4 4
+
+#define GPIOB_0 5
+#define GPIOB_1 6
+#define GPIOB_2 7
+#define GPIOB_3 8
+#define GPIOB_4 9
+#define GPIOB_5 10
+#define GPIOB_6 11
+#define GPIOB_7 12
+#define GPIOB_8 13
+#define GPIOB_9 14
+#define GPIOB_10 15
+#define GPIOB_11 16
+#define GPIOB_12 17
+#define GPIOB_13 18
+#define GPIOB_14 19
+
+#define GPIOC_0 20
+#define GPIOC_1 21
+#define GPIOC_2 22
+#define GPIOC_3 23
+#define GPIOC_4 24
+#define GPIOC_5 25
+#define GPIOC_6 26
+
+#define GPIOX_0 27
+#define GPIOX_1 28
+#define GPIOX_2 29
+#define GPIOX_3 30
+#define GPIOX_4 31
+#define GPIOX_5 32
+#define GPIOX_6 33
+#define GPIOX_7 34
+#define GPIOX_8 35
+#define GPIOX_9 36
+#define GPIOX_10 37
+#define GPIOX_11 38
+#define GPIOX_12 39
+#define GPIOX_13 40
+
+#define GPIOD_0 41
+#define GPIOD_1 42
+#define GPIOD_2 43
+#define GPIOD_3 44
+#define GPIOD_4 45
+#define GPIOD_5 46
+#define GPIOD_6 47
+
+#define GPIOA_0 48
+#define GPIOA_1 49
+#define GPIOA_2 50
+#define GPIOA_3 51
+#define GPIOA_4 52
+#define GPIOA_5 53
+
+#define GPIO_TEST_N 54
+
+#endif /* _DT_BINDINGS_AMLOGIC_C3_GPIO_H */
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index c029467e828b..b5d531237448 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
* This header provides constants for most GPIO bindings.
*
@@ -39,4 +39,7 @@
/* Bit 5 express pull down */
#define GPIO_PULL_DOWN 32
+/* Bit 6 express pull disable */
+#define GPIO_PULL_DISABLE 64
+
#endif
diff --git a/include/dt-bindings/gpio/meson-g12a-gpio.h b/include/dt-bindings/gpio/meson-g12a-gpio.h
index f7bd69350d18..fa7bb0bbf010 100644
--- a/include/dt-bindings/gpio/meson-g12a-gpio.h
+++ b/include/dt-bindings/gpio/meson-g12a-gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2018 Amlogic, Inc. All rights reserved.
* Author: Xingyu Chen <xingyu.chen@amlogic.com>
diff --git a/include/dt-bindings/gpio/meson-s4-gpio.h b/include/dt-bindings/gpio/meson-s4-gpio.h
new file mode 100644
index 000000000000..35aee21b94f1
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-s4-gpio.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_S4_GPIO_H
+#define _DT_BINDINGS_MESON_S4_GPIO_H
+
+#define GPIOB_0 0
+#define GPIOB_1 1
+#define GPIOB_2 2
+#define GPIOB_3 3
+#define GPIOB_4 4
+#define GPIOB_5 5
+#define GPIOB_6 6
+#define GPIOB_7 7
+#define GPIOB_8 8
+#define GPIOB_9 9
+#define GPIOB_10 10
+#define GPIOB_11 11
+#define GPIOB_12 12
+#define GPIOB_13 13
+
+#define GPIOC_0 14
+#define GPIOC_1 15
+#define GPIOC_2 16
+#define GPIOC_3 17
+#define GPIOC_4 18
+#define GPIOC_5 19
+#define GPIOC_6 20
+#define GPIOC_7 21
+
+#define GPIOE_0 22
+#define GPIOE_1 23
+
+#define GPIOD_0 24
+#define GPIOD_1 25
+#define GPIOD_2 26
+#define GPIOD_3 27
+#define GPIOD_4 28
+#define GPIOD_5 29
+#define GPIOD_6 30
+#define GPIOD_7 31
+#define GPIOD_8 32
+#define GPIOD_9 33
+#define GPIOD_10 34
+#define GPIOD_11 35
+
+#define GPIOH_0 36
+#define GPIOH_1 37
+#define GPIOH_2 38
+#define GPIOH_3 39
+#define GPIOH_4 40
+#define GPIOH_5 41
+#define GPIOH_6 42
+#define GPIOH_7 43
+#define GPIOH_8 44
+#define GPIOH_9 45
+#define GPIOH_10 46
+#define GPIOH_11 47
+
+#define GPIOX_0 48
+#define GPIOX_1 49
+#define GPIOX_2 50
+#define GPIOX_3 51
+#define GPIOX_4 52
+#define GPIOX_5 53
+#define GPIOX_6 54
+#define GPIOX_7 55
+#define GPIOX_8 56
+#define GPIOX_9 57
+#define GPIOX_10 58
+#define GPIOX_11 59
+#define GPIOX_12 60
+#define GPIOX_13 61
+#define GPIOX_14 62
+#define GPIOX_15 63
+#define GPIOX_16 64
+#define GPIOX_17 65
+#define GPIOX_18 66
+#define GPIOX_19 67
+
+#define GPIOZ_0 68
+#define GPIOZ_1 69
+#define GPIOZ_2 70
+#define GPIOZ_3 71
+#define GPIOZ_4 72
+#define GPIOZ_5 73
+#define GPIOZ_6 74
+#define GPIOZ_7 75
+#define GPIOZ_8 76
+#define GPIOZ_9 77
+#define GPIOZ_10 78
+#define GPIOZ_11 79
+#define GPIOZ_12 80
+
+#define GPIO_TEST_N 81
+#endif /* _DT_BINDINGS_MESON_S4_GPIO_H */
diff --git a/include/dt-bindings/gpio/msc313-gpio.h b/include/dt-bindings/gpio/msc313-gpio.h
new file mode 100644
index 000000000000..5458c6580a02
--- /dev/null
+++ b/include/dt-bindings/gpio/msc313-gpio.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * GPIO definitions for MStar/SigmaStar MSC313 and later SoCs
+ *
+ * Copyright (C) 2020 Daniel Palmer <daniel@thingy.jp>
+ */
+
+#ifndef _DT_BINDINGS_MSC313_GPIO_H
+#define _DT_BINDINGS_MSC313_GPIO_H
+
+#define MSC313_GPIO_FUART 0
+#define MSC313_GPIO_FUART_RX (MSC313_GPIO_FUART + 0)
+#define MSC313_GPIO_FUART_TX (MSC313_GPIO_FUART + 1)
+#define MSC313_GPIO_FUART_CTS (MSC313_GPIO_FUART + 2)
+#define MSC313_GPIO_FUART_RTS (MSC313_GPIO_FUART + 3)
+
+#define MSC313_GPIO_SR (MSC313_GPIO_FUART_RTS + 1)
+#define MSC313_GPIO_SR_IO2 (MSC313_GPIO_SR + 0)
+#define MSC313_GPIO_SR_IO3 (MSC313_GPIO_SR + 1)
+#define MSC313_GPIO_SR_IO4 (MSC313_GPIO_SR + 2)
+#define MSC313_GPIO_SR_IO5 (MSC313_GPIO_SR + 3)
+#define MSC313_GPIO_SR_IO6 (MSC313_GPIO_SR + 4)
+#define MSC313_GPIO_SR_IO7 (MSC313_GPIO_SR + 5)
+#define MSC313_GPIO_SR_IO8 (MSC313_GPIO_SR + 6)
+#define MSC313_GPIO_SR_IO9 (MSC313_GPIO_SR + 7)
+#define MSC313_GPIO_SR_IO10 (MSC313_GPIO_SR + 8)
+#define MSC313_GPIO_SR_IO11 (MSC313_GPIO_SR + 9)
+#define MSC313_GPIO_SR_IO12 (MSC313_GPIO_SR + 10)
+#define MSC313_GPIO_SR_IO13 (MSC313_GPIO_SR + 11)
+#define MSC313_GPIO_SR_IO14 (MSC313_GPIO_SR + 12)
+#define MSC313_GPIO_SR_IO15 (MSC313_GPIO_SR + 13)
+#define MSC313_GPIO_SR_IO16 (MSC313_GPIO_SR + 14)
+#define MSC313_GPIO_SR_IO17 (MSC313_GPIO_SR + 15)
+
+#define MSC313_GPIO_SD (MSC313_GPIO_SR_IO17 + 1)
+#define MSC313_GPIO_SD_CLK (MSC313_GPIO_SD + 0)
+#define MSC313_GPIO_SD_CMD (MSC313_GPIO_SD + 1)
+#define MSC313_GPIO_SD_D0 (MSC313_GPIO_SD + 2)
+#define MSC313_GPIO_SD_D1 (MSC313_GPIO_SD + 3)
+#define MSC313_GPIO_SD_D2 (MSC313_GPIO_SD + 4)
+#define MSC313_GPIO_SD_D3 (MSC313_GPIO_SD + 5)
+
+#define MSC313_GPIO_I2C1 (MSC313_GPIO_SD_D3 + 1)
+#define MSC313_GPIO_I2C1_SCL (MSC313_GPIO_I2C1 + 0)
+#define MSC313_GPIO_I2C1_SDA (MSC313_GPIO_I2C1 + 1)
+
+#define MSC313_GPIO_SPI0 (MSC313_GPIO_I2C1_SDA + 1)
+#define MSC313_GPIO_SPI0_CZ (MSC313_GPIO_SPI0 + 0)
+#define MSC313_GPIO_SPI0_CK (MSC313_GPIO_SPI0 + 1)
+#define MSC313_GPIO_SPI0_DI (MSC313_GPIO_SPI0 + 2)
+#define MSC313_GPIO_SPI0_DO (MSC313_GPIO_SPI0 + 3)
+
+/* SSD20x */
+#define SSD20XD_GPIO_FUART 0
+#define SSD20XD_GPIO_FUART_RX (SSD20XD_GPIO_FUART + 0)
+#define SSD20XD_GPIO_FUART_TX (SSD20XD_GPIO_FUART + 1)
+#define SSD20XD_GPIO_FUART_CTS (SSD20XD_GPIO_FUART + 2)
+#define SSD20XD_GPIO_FUART_RTS (SSD20XD_GPIO_FUART + 3)
+
+#define SSD20XD_GPIO_SD (SSD20XD_GPIO_FUART_RTS + 1)
+#define SSD20XD_GPIO_SD_CLK (SSD20XD_GPIO_SD + 0)
+#define SSD20XD_GPIO_SD_CMD (SSD20XD_GPIO_SD + 1)
+#define SSD20XD_GPIO_SD_D0 (SSD20XD_GPIO_SD + 2)
+#define SSD20XD_GPIO_SD_D1 (SSD20XD_GPIO_SD + 3)
+#define SSD20XD_GPIO_SD_D2 (SSD20XD_GPIO_SD + 4)
+#define SSD20XD_GPIO_SD_D3 (SSD20XD_GPIO_SD + 5)
+
+#define SSD20XD_GPIO_UART0 (SSD20XD_GPIO_SD_D3 + 1)
+#define SSD20XD_GPIO_UART0_RX (SSD20XD_GPIO_UART0 + 0)
+#define SSD20XD_GPIO_UART0_TX (SSD20XD_GPIO_UART0 + 1)
+
+#define SSD20XD_GPIO_UART1 (SSD20XD_GPIO_UART0_TX + 1)
+#define SSD20XD_GPIO_UART1_RX (SSD20XD_GPIO_UART1 + 0)
+#define SSD20XD_GPIO_UART1_TX (SSD20XD_GPIO_UART1 + 1)
+
+#define SSD20XD_GPIO_TTL (SSD20XD_GPIO_UART1_TX + 1)
+#define SSD20XD_GPIO_TTL0 (SSD20XD_GPIO_TTL + 0)
+#define SSD20XD_GPIO_TTL1 (SSD20XD_GPIO_TTL + 1)
+#define SSD20XD_GPIO_TTL2 (SSD20XD_GPIO_TTL + 2)
+#define SSD20XD_GPIO_TTL3 (SSD20XD_GPIO_TTL + 3)
+#define SSD20XD_GPIO_TTL4 (SSD20XD_GPIO_TTL + 4)
+#define SSD20XD_GPIO_TTL5 (SSD20XD_GPIO_TTL + 5)
+#define SSD20XD_GPIO_TTL6 (SSD20XD_GPIO_TTL + 6)
+#define SSD20XD_GPIO_TTL7 (SSD20XD_GPIO_TTL + 7)
+#define SSD20XD_GPIO_TTL8 (SSD20XD_GPIO_TTL + 8)
+#define SSD20XD_GPIO_TTL9 (SSD20XD_GPIO_TTL + 9)
+#define SSD20XD_GPIO_TTL10 (SSD20XD_GPIO_TTL + 10)
+#define SSD20XD_GPIO_TTL11 (SSD20XD_GPIO_TTL + 11)
+#define SSD20XD_GPIO_TTL12 (SSD20XD_GPIO_TTL + 12)
+#define SSD20XD_GPIO_TTL13 (SSD20XD_GPIO_TTL + 13)
+#define SSD20XD_GPIO_TTL14 (SSD20XD_GPIO_TTL + 14)
+#define SSD20XD_GPIO_TTL15 (SSD20XD_GPIO_TTL + 15)
+#define SSD20XD_GPIO_TTL16 (SSD20XD_GPIO_TTL + 16)
+#define SSD20XD_GPIO_TTL17 (SSD20XD_GPIO_TTL + 17)
+#define SSD20XD_GPIO_TTL18 (SSD20XD_GPIO_TTL + 18)
+#define SSD20XD_GPIO_TTL19 (SSD20XD_GPIO_TTL + 19)
+#define SSD20XD_GPIO_TTL20 (SSD20XD_GPIO_TTL + 20)
+#define SSD20XD_GPIO_TTL21 (SSD20XD_GPIO_TTL + 21)
+#define SSD20XD_GPIO_TTL22 (SSD20XD_GPIO_TTL + 22)
+#define SSD20XD_GPIO_TTL23 (SSD20XD_GPIO_TTL + 23)
+#define SSD20XD_GPIO_TTL24 (SSD20XD_GPIO_TTL + 24)
+#define SSD20XD_GPIO_TTL25 (SSD20XD_GPIO_TTL + 25)
+#define SSD20XD_GPIO_TTL26 (SSD20XD_GPIO_TTL + 26)
+#define SSD20XD_GPIO_TTL27 (SSD20XD_GPIO_TTL + 27)
+
+#define SSD20XD_GPIO_GPIO (SSD20XD_GPIO_TTL27 + 1)
+#define SSD20XD_GPIO_GPIO0 (SSD20XD_GPIO_GPIO + 0)
+#define SSD20XD_GPIO_GPIO1 (SSD20XD_GPIO_GPIO + 1)
+#define SSD20XD_GPIO_GPIO2 (SSD20XD_GPIO_GPIO + 2)
+#define SSD20XD_GPIO_GPIO3 (SSD20XD_GPIO_GPIO + 3)
+#define SSD20XD_GPIO_GPIO4 (SSD20XD_GPIO_GPIO + 4)
+#define SSD20XD_GPIO_GPIO5 (SSD20XD_GPIO_GPIO + 5)
+#define SSD20XD_GPIO_GPIO6 (SSD20XD_GPIO_GPIO + 6)
+#define SSD20XD_GPIO_GPIO7 (SSD20XD_GPIO_GPIO + 7)
+#define SSD20XD_GPIO_GPIO10 (SSD20XD_GPIO_GPIO + 8)
+#define SSD20XD_GPIO_GPIO11 (SSD20XD_GPIO_GPIO + 9)
+#define SSD20XD_GPIO_GPIO12 (SSD20XD_GPIO_GPIO + 10)
+#define SSD20XD_GPIO_GPIO13 (SSD20XD_GPIO_GPIO + 11)
+#define SSD20XD_GPIO_GPIO14 (SSD20XD_GPIO_GPIO + 12)
+#define SSD20XD_GPIO_GPIO85 (SSD20XD_GPIO_GPIO + 13)
+#define SSD20XD_GPIO_GPIO86 (SSD20XD_GPIO_GPIO + 14)
+#define SSD20XD_GPIO_GPIO90 (SSD20XD_GPIO_GPIO + 15)
+
+#endif /* _DT_BINDINGS_MSC313_GPIO_H */
diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h
index 0782b05e2775..af0d9583be70 100644
--- a/include/dt-bindings/gpio/tegra186-gpio.h
+++ b/include/dt-bindings/gpio/tegra186-gpio.h
@@ -8,8 +8,8 @@
* The second cell contains standard flag values specified in gpio.h.
*/
-#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H
-#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H
+#ifndef _DT_BINDINGS_GPIO_TEGRA186_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA186_GPIO_H
#include <dt-bindings/gpio/gpio.h>
diff --git a/include/dt-bindings/gpio/tegra234-gpio.h b/include/dt-bindings/gpio/tegra234-gpio.h
new file mode 100644
index 000000000000..784673c2c752
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra234-gpio.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for binding nvidia,tegra234-gpio*.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA234_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA234_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA234_MAIN_GPIO_PORT_A 0
+#define TEGRA234_MAIN_GPIO_PORT_B 1
+#define TEGRA234_MAIN_GPIO_PORT_C 2
+#define TEGRA234_MAIN_GPIO_PORT_D 3
+#define TEGRA234_MAIN_GPIO_PORT_E 4
+#define TEGRA234_MAIN_GPIO_PORT_F 5
+#define TEGRA234_MAIN_GPIO_PORT_G 6
+#define TEGRA234_MAIN_GPIO_PORT_H 7
+#define TEGRA234_MAIN_GPIO_PORT_I 8
+#define TEGRA234_MAIN_GPIO_PORT_J 9
+#define TEGRA234_MAIN_GPIO_PORT_K 10
+#define TEGRA234_MAIN_GPIO_PORT_L 11
+#define TEGRA234_MAIN_GPIO_PORT_M 12
+#define TEGRA234_MAIN_GPIO_PORT_N 13
+#define TEGRA234_MAIN_GPIO_PORT_P 14
+#define TEGRA234_MAIN_GPIO_PORT_Q 15
+#define TEGRA234_MAIN_GPIO_PORT_R 16
+#define TEGRA234_MAIN_GPIO_PORT_X 17
+#define TEGRA234_MAIN_GPIO_PORT_Y 18
+#define TEGRA234_MAIN_GPIO_PORT_Z 19
+#define TEGRA234_MAIN_GPIO_PORT_AC 20
+#define TEGRA234_MAIN_GPIO_PORT_AD 21
+#define TEGRA234_MAIN_GPIO_PORT_AE 22
+#define TEGRA234_MAIN_GPIO_PORT_AF 23
+#define TEGRA234_MAIN_GPIO_PORT_AG 24
+
+#define TEGRA234_MAIN_GPIO(port, offset) \
+ ((TEGRA234_MAIN_GPIO_PORT_##port * 8) + offset)
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA234_AON_GPIO_PORT_AA 0
+#define TEGRA234_AON_GPIO_PORT_BB 1
+#define TEGRA234_AON_GPIO_PORT_CC 2
+#define TEGRA234_AON_GPIO_PORT_DD 3
+#define TEGRA234_AON_GPIO_PORT_EE 4
+#define TEGRA234_AON_GPIO_PORT_GG 5
+
+#define TEGRA234_AON_GPIO(port, offset) \
+ ((TEGRA234_AON_GPIO_PORT_##port * 8) + offset)
+
+#endif
diff --git a/include/dt-bindings/gpio/tegra241-gpio.h b/include/dt-bindings/gpio/tegra241-gpio.h
new file mode 100644
index 000000000000..80cee3016be6
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra241-gpio.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for the nvidia,tegra241-gpio DT binding.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA241_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA241_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA241_MAIN_GPIO_PORT_A 0
+#define TEGRA241_MAIN_GPIO_PORT_B 1
+#define TEGRA241_MAIN_GPIO_PORT_C 2
+#define TEGRA241_MAIN_GPIO_PORT_D 3
+#define TEGRA241_MAIN_GPIO_PORT_E 4
+#define TEGRA241_MAIN_GPIO_PORT_F 5
+#define TEGRA241_MAIN_GPIO_PORT_G 6
+#define TEGRA241_MAIN_GPIO_PORT_H 7
+#define TEGRA241_MAIN_GPIO_PORT_I 8
+#define TEGRA241_MAIN_GPIO_PORT_J 9
+#define TEGRA241_MAIN_GPIO_PORT_K 10
+#define TEGRA241_MAIN_GPIO_PORT_L 11
+
+#define TEGRA241_MAIN_GPIO(port, offset) \
+ ((TEGRA241_MAIN_GPIO_PORT_##port * 8) + (offset))
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA241_AON_GPIO_PORT_AA 0
+#define TEGRA241_AON_GPIO_PORT_BB 1
+
+#define TEGRA241_AON_GPIO(port, offset) \
+ ((TEGRA241_AON_GPIO_PORT_##port * 8) + (offset))
+
+#endif
diff --git a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
index 70f99dbdbb42..866d36530583 100644
--- a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
+++ b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
@@ -13,4 +13,7 @@
/* pressure channel index */
#define AT91_SAMA5D2_ADC_P_CHANNEL 26
+/* SAMA7G5 Temperature sensor channel index. */
+#define AT91_SAMA7G5_ADC_TEMP_CHANNEL 31
+
#endif
diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h
index 4627a00e369e..a6ccc031635b 100644
--- a/include/dt-bindings/iio/adc/ingenic,adc.h
+++ b/include/dt-bindings/iio/adc/ingenic,adc.h
@@ -13,5 +13,6 @@
#define INGENIC_ADC_TOUCH_YN 6
#define INGENIC_ADC_TOUCH_XD 7
#define INGENIC_ADC_TOUCH_YD 8
+#define INGENIC_ADC_AUX0 9
#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h b/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h
new file mode 100644
index 000000000000..6ee725547763
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_MEDIATEK_MT6370_ADC_H__
+#define __DT_BINDINGS_MEDIATEK_MT6370_ADC_H__
+
+/* ADC Channel Index */
+#define MT6370_CHAN_VBUSDIV5 0
+#define MT6370_CHAN_VBUSDIV2 1
+#define MT6370_CHAN_VSYS 2
+#define MT6370_CHAN_VBAT 3
+#define MT6370_CHAN_TS_BAT 4
+#define MT6370_CHAN_IBUS 5
+#define MT6370_CHAN_IBAT 6
+#define MT6370_CHAN_CHG_VDDP 7
+#define MT6370_CHAN_TEMP_JC 8
+#define MT6370_CHAN_MAX 9
+
+#endif
diff --git a/include/dt-bindings/iio/addac/adi,ad74413r.h b/include/dt-bindings/iio/addac/adi,ad74413r.h
new file mode 100644
index 000000000000..204f92bbd79f
--- /dev/null
+++ b/include/dt-bindings/iio/addac/adi,ad74413r.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_ADI_AD74413R_H
+#define _DT_BINDINGS_ADI_AD74413R_H
+
+#define CH_FUNC_HIGH_IMPEDANCE 0x0
+#define CH_FUNC_VOLTAGE_OUTPUT 0x1
+#define CH_FUNC_CURRENT_OUTPUT 0x2
+#define CH_FUNC_VOLTAGE_INPUT 0x3
+#define CH_FUNC_CURRENT_INPUT_EXT_POWER 0x4
+#define CH_FUNC_CURRENT_INPUT_LOOP_POWER 0x5
+#define CH_FUNC_RESISTANCE_INPUT 0x6
+#define CH_FUNC_DIGITAL_INPUT_LOGIC 0x7
+#define CH_FUNC_DIGITAL_INPUT_LOOP_POWER 0x8
+#define CH_FUNC_CURRENT_INPUT_EXT_POWER_HART 0x9
+#define CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART 0xA
+
+#define CH_FUNC_MIN CH_FUNC_HIGH_IMPEDANCE
+#define CH_FUNC_MAX CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART
+
+#endif /* _DT_BINDINGS_ADI_AD74413R_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h
new file mode 100644
index 000000000000..96908014e09e
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H
+
+#ifndef PM7325_SID
+#define PM7325_SID 1
+#endif
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+/* ADC channels for PM7325_ADC for PMIC7 */
+#define PM7325_ADC7_REF_GND (PM7325_SID << 8 | ADC7_REF_GND)
+#define PM7325_ADC7_1P25VREF (PM7325_SID << 8 | ADC7_1P25VREF)
+#define PM7325_ADC7_VREF_VADC (PM7325_SID << 8 | ADC7_VREF_VADC)
+#define PM7325_ADC7_DIE_TEMP (PM7325_SID << 8 | ADC7_DIE_TEMP)
+
+#define PM7325_ADC7_AMUX_THM1 (PM7325_SID << 8 | ADC7_AMUX_THM1)
+#define PM7325_ADC7_AMUX_THM2 (PM7325_SID << 8 | ADC7_AMUX_THM2)
+#define PM7325_ADC7_AMUX_THM3 (PM7325_SID << 8 | ADC7_AMUX_THM3)
+#define PM7325_ADC7_AMUX_THM4 (PM7325_SID << 8 | ADC7_AMUX_THM4)
+#define PM7325_ADC7_AMUX_THM5 (PM7325_SID << 8 | ADC7_AMUX_THM5)
+#define PM7325_ADC7_GPIO1 (PM7325_SID << 8 | ADC7_GPIO1)
+#define PM7325_ADC7_GPIO2 (PM7325_SID << 8 | ADC7_GPIO2)
+#define PM7325_ADC7_GPIO3 (PM7325_SID << 8 | ADC7_GPIO3)
+#define PM7325_ADC7_GPIO4 (PM7325_SID << 8 | ADC7_GPIO4)
+
+/* 30k pull-up1 */
+#define PM7325_ADC7_AMUX_THM1_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM7325_ADC7_AMUX_THM2_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM7325_ADC7_AMUX_THM3_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM7325_ADC7_AMUX_THM4_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM7325_ADC7_AMUX_THM5_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM7325_ADC7_GPIO1_30K_PU (PM7325_SID << 8 | ADC7_GPIO1_30K_PU)
+#define PM7325_ADC7_GPIO2_30K_PU (PM7325_SID << 8 | ADC7_GPIO2_30K_PU)
+#define PM7325_ADC7_GPIO3_30K_PU (PM7325_SID << 8 | ADC7_GPIO3_30K_PU)
+#define PM7325_ADC7_GPIO4_30K_PU (PM7325_SID << 8 | ADC7_GPIO4_30K_PU)
+
+/* 100k pull-up2 */
+#define PM7325_ADC7_AMUX_THM1_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM7325_ADC7_AMUX_THM2_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM7325_ADC7_AMUX_THM3_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM7325_ADC7_AMUX_THM4_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM7325_ADC7_AMUX_THM5_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM7325_ADC7_GPIO1_100K_PU (PM7325_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PM7325_ADC7_GPIO2_100K_PU (PM7325_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PM7325_ADC7_GPIO3_100K_PU (PM7325_SID << 8 | ADC7_GPIO3_100K_PU)
+#define PM7325_ADC7_GPIO4_100K_PU (PM7325_SID << 8 | ADC7_GPIO4_100K_PU)
+
+/* 400k pull-up3 */
+#define PM7325_ADC7_AMUX_THM1_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM7325_ADC7_AMUX_THM2_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM7325_ADC7_AMUX_THM3_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM7325_ADC7_AMUX_THM4_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM7325_ADC7_AMUX_THM5_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM7325_ADC7_GPIO1_400K_PU (PM7325_SID << 8 | ADC7_GPIO1_400K_PU)
+#define PM7325_ADC7_GPIO2_400K_PU (PM7325_SID << 8 | ADC7_GPIO2_400K_PU)
+#define PM7325_ADC7_GPIO3_400K_PU (PM7325_SID << 8 | ADC7_GPIO3_400K_PU)
+#define PM7325_ADC7_GPIO4_400K_PU (PM7325_SID << 8 | ADC7_GPIO4_400K_PU)
+
+/* 1/3 Divider */
+#define PM7325_ADC7_GPIO4_DIV3 (PM7325_SID << 8 | ADC7_GPIO4_DIV3)
+
+#define PM7325_ADC7_VPH_PWR (PM7325_SID << 8 | ADC7_VPH_PWR)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
index 9426f27a1946..5d98f7d48a1e 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
@@ -6,62 +6,60 @@
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
-#ifndef PM8350_SID
-#define PM8350_SID 1
-#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
/* ADC channels for PM8350_ADC for PMIC7 */
-#define PM8350_ADC7_REF_GND (PM8350_SID << 8 | 0x0)
-#define PM8350_ADC7_1P25VREF (PM8350_SID << 8 | 0x01)
-#define PM8350_ADC7_VREF_VADC (PM8350_SID << 8 | 0x02)
-#define PM8350_ADC7_DIE_TEMP (PM8350_SID << 8 | 0x03)
+#define PM8350_ADC7_REF_GND(sid) ((sid) << 8 | ADC7_REF_GND)
+#define PM8350_ADC7_1P25VREF(sid) ((sid) << 8 | ADC7_1P25VREF)
+#define PM8350_ADC7_VREF_VADC(sid) ((sid) << 8 | ADC7_VREF_VADC)
+#define PM8350_ADC7_DIE_TEMP(sid) ((sid) << 8 | ADC7_DIE_TEMP)
-#define PM8350_ADC7_AMUX_THM1 (PM8350_SID << 8 | 0x04)
-#define PM8350_ADC7_AMUX_THM2 (PM8350_SID << 8 | 0x05)
-#define PM8350_ADC7_AMUX_THM3 (PM8350_SID << 8 | 0x06)
-#define PM8350_ADC7_AMUX_THM4 (PM8350_SID << 8 | 0x07)
-#define PM8350_ADC7_AMUX_THM5 (PM8350_SID << 8 | 0x08)
-#define PM8350_ADC7_GPIO1 (PM8350_SID << 8 | 0x0a)
-#define PM8350_ADC7_GPIO2 (PM8350_SID << 8 | 0x0b)
-#define PM8350_ADC7_GPIO3 (PM8350_SID << 8 | 0x0c)
-#define PM8350_ADC7_GPIO4 (PM8350_SID << 8 | 0x0d)
+#define PM8350_ADC7_AMUX_THM1(sid) ((sid) << 8 | ADC7_AMUX_THM1)
+#define PM8350_ADC7_AMUX_THM2(sid) ((sid) << 8 | ADC7_AMUX_THM2)
+#define PM8350_ADC7_AMUX_THM3(sid) ((sid) << 8 | ADC7_AMUX_THM3)
+#define PM8350_ADC7_AMUX_THM4(sid) ((sid) << 8 | ADC7_AMUX_THM4)
+#define PM8350_ADC7_AMUX_THM5(sid) ((sid) << 8 | ADC7_AMUX_THM5)
+#define PM8350_ADC7_GPIO1(sid) ((sid) << 8 | ADC7_GPIO1)
+#define PM8350_ADC7_GPIO2(sid) ((sid) << 8 | ADC7_GPIO2)
+#define PM8350_ADC7_GPIO3(sid) ((sid) << 8 | ADC7_GPIO3)
+#define PM8350_ADC7_GPIO4(sid) ((sid) << 8 | ADC7_GPIO4)
/* 30k pull-up1 */
-#define PM8350_ADC7_AMUX_THM1_30K_PU (PM8350_SID << 8 | 0x24)
-#define PM8350_ADC7_AMUX_THM2_30K_PU (PM8350_SID << 8 | 0x25)
-#define PM8350_ADC7_AMUX_THM3_30K_PU (PM8350_SID << 8 | 0x26)
-#define PM8350_ADC7_AMUX_THM4_30K_PU (PM8350_SID << 8 | 0x27)
-#define PM8350_ADC7_AMUX_THM5_30K_PU (PM8350_SID << 8 | 0x28)
-#define PM8350_ADC7_GPIO1_30K_PU (PM8350_SID << 8 | 0x2a)
-#define PM8350_ADC7_GPIO2_30K_PU (PM8350_SID << 8 | 0x2b)
-#define PM8350_ADC7_GPIO3_30K_PU (PM8350_SID << 8 | 0x2c)
-#define PM8350_ADC7_GPIO4_30K_PU (PM8350_SID << 8 | 0x2d)
+#define PM8350_ADC7_AMUX_THM1_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350_ADC7_AMUX_THM2_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350_ADC7_AMUX_THM3_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350_ADC7_AMUX_THM4_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350_ADC7_AMUX_THM5_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350_ADC7_GPIO1_30K_PU(sid) ((sid) << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350_ADC7_GPIO2_30K_PU(sid) ((sid) << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350_ADC7_GPIO3_30K_PU(sid) ((sid) << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350_ADC7_GPIO4_30K_PU(sid) ((sid) << 8 | ADC7_GPIO4_30K_PU)
/* 100k pull-up2 */
-#define PM8350_ADC7_AMUX_THM1_100K_PU (PM8350_SID << 8 | 0x44)
-#define PM8350_ADC7_AMUX_THM2_100K_PU (PM8350_SID << 8 | 0x45)
-#define PM8350_ADC7_AMUX_THM3_100K_PU (PM8350_SID << 8 | 0x46)
-#define PM8350_ADC7_AMUX_THM4_100K_PU (PM8350_SID << 8 | 0x47)
-#define PM8350_ADC7_AMUX_THM5_100K_PU (PM8350_SID << 8 | 0x48)
-#define PM8350_ADC7_GPIO1_100K_PU (PM8350_SID << 8 | 0x4a)
-#define PM8350_ADC7_GPIO2_100K_PU (PM8350_SID << 8 | 0x4b)
-#define PM8350_ADC7_GPIO3_100K_PU (PM8350_SID << 8 | 0x4c)
-#define PM8350_ADC7_GPIO4_100K_PU (PM8350_SID << 8 | 0x4d)
+#define PM8350_ADC7_AMUX_THM1_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350_ADC7_AMUX_THM2_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350_ADC7_AMUX_THM3_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350_ADC7_AMUX_THM4_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350_ADC7_AMUX_THM5_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350_ADC7_GPIO1_100K_PU(sid) ((sid) << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350_ADC7_GPIO2_100K_PU(sid) ((sid) << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350_ADC7_GPIO3_100K_PU(sid) ((sid) << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350_ADC7_GPIO4_100K_PU(sid) ((sid) << 8 | ADC7_GPIO4_100K_PU)
/* 400k pull-up3 */
-#define PM8350_ADC7_AMUX_THM1_400K_PU (PM8350_SID << 8 | 0x64)
-#define PM8350_ADC7_AMUX_THM2_400K_PU (PM8350_SID << 8 | 0x65)
-#define PM8350_ADC7_AMUX_THM3_400K_PU (PM8350_SID << 8 | 0x66)
-#define PM8350_ADC7_AMUX_THM4_400K_PU (PM8350_SID << 8 | 0x67)
-#define PM8350_ADC7_AMUX_THM5_400K_PU (PM8350_SID << 8 | 0x68)
-#define PM8350_ADC7_GPIO1_400K_PU (PM8350_SID << 8 | 0x6a)
-#define PM8350_ADC7_GPIO2_400K_PU (PM8350_SID << 8 | 0x6b)
-#define PM8350_ADC7_GPIO3_400K_PU (PM8350_SID << 8 | 0x6c)
-#define PM8350_ADC7_GPIO4_400K_PU (PM8350_SID << 8 | 0x6d)
+#define PM8350_ADC7_AMUX_THM1_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350_ADC7_AMUX_THM2_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350_ADC7_AMUX_THM3_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350_ADC7_AMUX_THM4_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350_ADC7_AMUX_THM5_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350_ADC7_GPIO1_400K_PU(sid) ((sid) << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350_ADC7_GPIO2_400K_PU(sid) ((sid) << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350_ADC7_GPIO3_400K_PU(sid) ((sid) << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350_ADC7_GPIO4_400K_PU(sid) ((sid) << 8 | ADC7_GPIO4_400K_PU)
/* 1/3 Divider */
-#define PM8350_ADC7_GPIO4_DIV3 (PM8350_SID << 8 | 0x8d)
+#define PM8350_ADC7_GPIO4_DIV3(sid) ((sid) << 8 | ADC7_GPIO4_DIV3)
-#define PM8350_ADC7_VPH_PWR (PM8350_SID << 8 | 0x8e)
+#define PM8350_ADC7_VPH_PWR(sid) ((sid) << 8 | ADC7_VPH_PWR)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
index dc2497c27e16..57c7977666d3 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
@@ -10,79 +10,81 @@
#define PM8350B_SID 3
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PM8350B_ADC for PMIC7 */
-#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | 0x0)
-#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | 0x01)
-#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | 0x02)
-#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | 0x03)
+#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | ADC7_REF_GND)
+#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | ADC7_1P25VREF)
+#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | ADC7_VREF_VADC)
+#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | ADC7_DIE_TEMP)
-#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | 0x04)
-#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | 0x05)
-#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | 0x06)
-#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | 0x07)
-#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | 0x08)
-#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | 0x09)
-#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | 0x0a)
-#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | 0x0b)
-#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | 0x0c)
-#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | 0x0d)
+#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | ADC7_AMUX_THM1)
+#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | ADC7_AMUX_THM2)
+#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | ADC7_AMUX_THM3)
+#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | ADC7_AMUX_THM4)
+#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | ADC7_AMUX_THM5)
+#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | ADC7_AMUX_THM6)
+#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | ADC7_GPIO1)
+#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | ADC7_GPIO2)
+#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | ADC7_GPIO3)
+#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | ADC7_GPIO4)
-#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | 0x10)
-#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | 0x11)
-#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | 0x12)
-#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | 0x13)
-#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | 0x15)
-#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | 0x17)
+#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | ADC7_CHG_TEMP)
+#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | ADC7_USB_IN_V_16)
+#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | ADC7_VDC_16)
+#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | ADC7_CC1_ID)
+#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | ADC7_VREF_BAT_THERM)
+#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | ADC7_IIN_FB)
/* 30k pull-up1 */
-#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | 0x24)
-#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | 0x25)
-#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | 0x26)
-#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | 0x27)
-#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | 0x28)
-#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | 0x29)
-#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | 0x2a)
-#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | 0x2b)
-#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | 0x2c)
-#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | 0x2d)
-#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | 0x33)
+#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_30K_PU)
+#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | ADC7_GPIO4_30K_PU)
+#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_30K_PU)
/* 100k pull-up2 */
-#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | 0x44)
-#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | 0x45)
-#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | 0x46)
-#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | 0x47)
-#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | 0x48)
-#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | 0x49)
-#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | 0x4a)
-#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | 0x4b)
-#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | 0x4c)
-#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | 0x4d)
-#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | 0x53)
+#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_100K_PU)
+#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | ADC7_GPIO4_100K_PU)
+#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_100K_PU)
/* 400k pull-up3 */
-#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | 0x64)
-#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | 0x65)
-#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | 0x66)
-#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | 0x67)
-#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | 0x68)
-#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | 0x69)
-#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | 0x6a)
-#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | 0x6b)
-#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | 0x6c)
-#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | 0x6d)
-#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | 0x73)
+#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_400K_PU)
+#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | ADC7_GPIO4_400K_PU)
+#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_400K_PU)
/* 1/3 Divider */
-#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | 0x8a)
-#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | 0x8b)
-#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | 0x8c)
-#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | 0x8d)
+#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | ADC7_GPIO1_DIV3)
+#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | ADC7_GPIO2_DIV3)
+#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | ADC7_GPIO3_DIV3)
+#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | ADC7_GPIO4_DIV3)
-#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | 0x8e)
-#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | 0x8f)
+#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | ADC7_VPH_PWR)
+#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | ADC7_VBAT_SNS)
-#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | 0x94)
-#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | 0x96)
+#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | ADC7_SBU)
+#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | ADC7_VBAT_2S_MID)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
index 6c296870e95b..3d1a41a22cef 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
@@ -10,37 +10,39 @@
#define PMK8350_SID 0
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMK8350_ADC for PMIC7 */
-#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | 0x0)
-#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | 0x01)
-#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | 0x02)
-#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | 0x03)
+#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | ADC7_REF_GND)
+#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | ADC7_1P25VREF)
+#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | ADC7_VREF_VADC)
+#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | ADC7_DIE_TEMP)
-#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | 0x04)
-#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | 0x05)
-#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | 0x06)
-#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | 0x07)
-#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | 0x08)
+#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | ADC7_AMUX_THM1)
+#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | ADC7_AMUX_THM2)
+#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | ADC7_AMUX_THM3)
+#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | ADC7_AMUX_THM4)
+#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | ADC7_AMUX_THM5)
/* 30k pull-up1 */
-#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | 0x24)
-#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | 0x25)
-#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | 0x26)
-#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | 0x27)
-#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | 0x28)
+#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_30K_PU)
/* 100k pull-up2 */
-#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | 0x44)
-#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | 0x45)
-#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | 0x46)
-#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | 0x47)
-#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | 0x48)
+#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_100K_PU)
/* 400k pull-up3 */
-#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | 0x64)
-#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | 0x65)
-#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | 0x66)
-#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | 0x67)
-#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | 0x68)
+#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_400K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
index d6df1b19e5ff..c5adfa82b20d 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
@@ -10,19 +10,21 @@
#define PMR735A_SID 4
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735A_ADC for PMIC7 */
-#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | 0x0)
-#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | 0x01)
-#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | 0x02)
-#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | 0x03)
+#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | ADC7_REF_GND)
+#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | ADC7_1P25VREF)
+#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | ADC7_VREF_VADC)
+#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | 0x0a)
-#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | 0x0b)
-#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | 0x0c)
+#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | ADC7_GPIO1)
+#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | ADC7_GPIO2)
+#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | 0x4a)
-#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | 0x4b)
-#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | 0x4c)
+#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
index 8da0e7dab315..fdb8dd9ae541 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
@@ -10,19 +10,21 @@
#define PMR735B_SID 5
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735B_ADC for PMIC7 */
-#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | 0x0)
-#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | 0x01)
-#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | 0x02)
-#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | 0x03)
+#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | ADC7_REF_GND)
+#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | ADC7_1P25VREF)
+#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | ADC7_VREF_VADC)
+#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | 0x0a)
-#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | 0x0b)
-#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | 0x0c)
+#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | ADC7_GPIO1)
+#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | ADC7_GPIO2)
+#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | 0x4a)
-#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | 0x4b)
-#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | 0x4c)
+#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h
new file mode 100644
index 000000000000..c0680d1285cf
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+#define SMB139x_1_ADC7_SMB_TEMP (SMB139x_1_SID << 8 | ADC7_SMB_TEMP)
+#define SMB139x_1_ADC7_ICHG_SMB (SMB139x_1_SID << 8 | ADC7_ICHG_SMB)
+#define SMB139x_1_ADC7_IIN_SMB (SMB139x_1_SID << 8 | ADC7_IIN_SMB)
+
+#define SMB139x_2_ADC7_SMB_TEMP (SMB139x_2_SID << 8 | ADC7_SMB_TEMP)
+#define SMB139x_2_ADC7_ICHG_SMB (SMB139x_2_SID << 8 | ADC7_ICHG_SMB)
+#define SMB139x_2_ADC7_IIN_SMB (SMB139x_2_SID << 8 | ADC7_IIN_SMB)
+
+#endif
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index 08adfe25964c..ef07ecd4d585 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -239,12 +239,15 @@
#define ADC7_GPIO3 0x0c
#define ADC7_GPIO4 0x0d
+#define ADC7_SMB_TEMP 0x06
#define ADC7_CHG_TEMP 0x10
#define ADC7_USB_IN_V_16 0x11
#define ADC7_VDC_16 0x12
#define ADC7_CC1_ID 0x13
#define ADC7_VREF_BAT_THERM 0x15
#define ADC7_IIN_FB 0x17
+#define ADC7_ICHG_SMB 0x18
+#define ADC7_IIN_SMB 0x19
/* 30k pull-up1 */
#define ADC7_AMUX_THM1_30K_PU 0x24
diff --git a/include/dt-bindings/input/atmel-maxtouch.h b/include/dt-bindings/input/atmel-maxtouch.h
new file mode 100644
index 000000000000..7345ab32224d
--- /dev/null
+++ b/include/dt-bindings/input/atmel-maxtouch.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _DT_BINDINGS_ATMEL_MAXTOUCH_H
+#define _DT_BINDINGS_ATMEL_MAXTOUCH_H
+
+#define ATMEL_MXT_WAKEUP_NONE 0
+#define ATMEL_MXT_WAKEUP_I2C_SCL 1
+#define ATMEL_MXT_WAKEUP_GPIO 2
+
+#endif /* _DT_BINDINGS_ATMEL_MAXTOUCH_H */
diff --git a/include/dt-bindings/input/cros-ec-keyboard.h b/include/dt-bindings/input/cros-ec-keyboard.h
new file mode 100644
index 000000000000..f0ae03634a96
--- /dev/null
+++ b/include/dt-bindings/input/cros-ec-keyboard.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides the constants of the standard Chrome OS key matrix
+ * for cros-ec keyboard-controller bindings.
+ *
+ * Copyright (c) 2021 Google, Inc
+ */
+
+#ifndef _CROS_EC_KEYBOARD_H
+#define _CROS_EC_KEYBOARD_H
+
+#define CROS_STD_TOP_ROW_KEYMAP \
+ MATRIX_KEY(0x00, 0x02, KEY_F1) \
+ MATRIX_KEY(0x03, 0x02, KEY_F2) \
+ MATRIX_KEY(0x02, 0x02, KEY_F3) \
+ MATRIX_KEY(0x01, 0x02, KEY_F4) \
+ MATRIX_KEY(0x03, 0x04, KEY_F5) \
+ MATRIX_KEY(0x02, 0x04, KEY_F6) \
+ MATRIX_KEY(0x01, 0x04, KEY_F7) \
+ MATRIX_KEY(0x02, 0x09, KEY_F8) \
+ MATRIX_KEY(0x01, 0x09, KEY_F9) \
+ MATRIX_KEY(0x00, 0x04, KEY_F10)
+
+#define CROS_STD_MAIN_KEYMAP \
+ MATRIX_KEY(0x00, 0x01, KEY_LEFTMETA) \
+ MATRIX_KEY(0x00, 0x03, KEY_B) \
+ MATRIX_KEY(0x00, 0x05, KEY_RO) \
+ MATRIX_KEY(0x00, 0x06, KEY_N) \
+ MATRIX_KEY(0x00, 0x08, KEY_EQUAL) \
+ MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) \
+ MATRIX_KEY(0x01, 0x01, KEY_ESC) \
+ MATRIX_KEY(0x01, 0x03, KEY_G) \
+ MATRIX_KEY(0x01, 0x06, KEY_H) \
+ MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) \
+ MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) \
+ MATRIX_KEY(0x01, 0x0c, KEY_HENKAN) \
+ \
+ MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL) \
+ MATRIX_KEY(0x02, 0x01, KEY_TAB) \
+ MATRIX_KEY(0x02, 0x03, KEY_T) \
+ MATRIX_KEY(0x02, 0x05, KEY_RIGHTBRACE) \
+ MATRIX_KEY(0x02, 0x06, KEY_Y) \
+ MATRIX_KEY(0x02, 0x07, KEY_102ND) \
+ MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) \
+ MATRIX_KEY(0x02, 0x0a, KEY_YEN) \
+ \
+ MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA) \
+ MATRIX_KEY(0x03, 0x01, KEY_GRAVE) \
+ MATRIX_KEY(0x03, 0x03, KEY_5) \
+ MATRIX_KEY(0x03, 0x06, KEY_6) \
+ MATRIX_KEY(0x03, 0x08, KEY_MINUS) \
+ MATRIX_KEY(0x03, 0x09, KEY_SLEEP) \
+ MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) \
+ MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN) \
+ \
+ MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL) \
+ MATRIX_KEY(0x04, 0x01, KEY_A) \
+ MATRIX_KEY(0x04, 0x02, KEY_D) \
+ MATRIX_KEY(0x04, 0x03, KEY_F) \
+ MATRIX_KEY(0x04, 0x04, KEY_S) \
+ MATRIX_KEY(0x04, 0x05, KEY_K) \
+ MATRIX_KEY(0x04, 0x06, KEY_J) \
+ MATRIX_KEY(0x04, 0x08, KEY_SEMICOLON) \
+ MATRIX_KEY(0x04, 0x09, KEY_L) \
+ MATRIX_KEY(0x04, 0x0a, KEY_BACKSLASH) \
+ MATRIX_KEY(0x04, 0x0b, KEY_ENTER) \
+ \
+ MATRIX_KEY(0x05, 0x01, KEY_Z) \
+ MATRIX_KEY(0x05, 0x02, KEY_C) \
+ MATRIX_KEY(0x05, 0x03, KEY_V) \
+ MATRIX_KEY(0x05, 0x04, KEY_X) \
+ MATRIX_KEY(0x05, 0x05, KEY_COMMA) \
+ MATRIX_KEY(0x05, 0x06, KEY_M) \
+ MATRIX_KEY(0x05, 0x07, KEY_LEFTSHIFT) \
+ MATRIX_KEY(0x05, 0x08, KEY_SLASH) \
+ MATRIX_KEY(0x05, 0x09, KEY_DOT) \
+ MATRIX_KEY(0x05, 0x0b, KEY_SPACE) \
+ \
+ MATRIX_KEY(0x06, 0x01, KEY_1) \
+ MATRIX_KEY(0x06, 0x02, KEY_3) \
+ MATRIX_KEY(0x06, 0x03, KEY_4) \
+ MATRIX_KEY(0x06, 0x04, KEY_2) \
+ MATRIX_KEY(0x06, 0x05, KEY_8) \
+ MATRIX_KEY(0x06, 0x06, KEY_7) \
+ MATRIX_KEY(0x06, 0x08, KEY_0) \
+ MATRIX_KEY(0x06, 0x09, KEY_9) \
+ MATRIX_KEY(0x06, 0x0a, KEY_LEFTALT) \
+ MATRIX_KEY(0x06, 0x0b, KEY_DOWN) \
+ MATRIX_KEY(0x06, 0x0c, KEY_RIGHT) \
+ \
+ MATRIX_KEY(0x07, 0x01, KEY_Q) \
+ MATRIX_KEY(0x07, 0x02, KEY_E) \
+ MATRIX_KEY(0x07, 0x03, KEY_R) \
+ MATRIX_KEY(0x07, 0x04, KEY_W) \
+ MATRIX_KEY(0x07, 0x05, KEY_I) \
+ MATRIX_KEY(0x07, 0x06, KEY_U) \
+ MATRIX_KEY(0x07, 0x07, KEY_RIGHTSHIFT) \
+ MATRIX_KEY(0x07, 0x08, KEY_P) \
+ MATRIX_KEY(0x07, 0x09, KEY_O) \
+ MATRIX_KEY(0x07, 0x0b, KEY_UP) \
+ MATRIX_KEY(0x07, 0x0c, KEY_LEFT)
+
+#endif /* _CROS_EC_KEYBOARD_H */
diff --git a/include/dt-bindings/interconnect/fsl,imx8mp.h b/include/dt-bindings/interconnect/fsl,imx8mp.h
new file mode 100644
index 000000000000..7357d417529a
--- /dev/null
+++ b/include/dt-bindings/interconnect/fsl,imx8mp.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright 2022 NXP
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MP_H
+#define __DT_BINDINGS_INTERCONNECT_IMX8MP_H
+
+#define IMX8MP_ICN_NOC 0
+#define IMX8MP_ICN_MAIN 1
+#define IMX8MP_ICS_DRAM 2
+#define IMX8MP_ICS_OCRAM 3
+#define IMX8MP_ICM_A53 4
+#define IMX8MP_ICM_SUPERMIX 5
+#define IMX8MP_ICM_GIC 6
+#define IMX8MP_ICM_MLMIX 7
+
+#define IMX8MP_ICN_AUDIO 8
+#define IMX8MP_ICM_DSP 9
+#define IMX8MP_ICM_SDMA2PER 10
+#define IMX8MP_ICM_SDMA2BURST 11
+#define IMX8MP_ICM_SDMA3PER 12
+#define IMX8MP_ICM_SDMA3BURST 13
+#define IMX8MP_ICM_EDMA 14
+
+#define IMX8MP_ICN_GPU 15
+#define IMX8MP_ICM_GPU2D 16
+#define IMX8MP_ICM_GPU3D 17
+
+#define IMX8MP_ICN_HDMI 18
+#define IMX8MP_ICM_HRV 19
+#define IMX8MP_ICM_LCDIF_HDMI 20
+#define IMX8MP_ICM_HDCP 21
+
+#define IMX8MP_ICN_HSIO 22
+#define IMX8MP_ICM_NOC_PCIE 23
+#define IMX8MP_ICM_USB1 24
+#define IMX8MP_ICM_USB2 25
+#define IMX8MP_ICM_PCIE 26
+
+#define IMX8MP_ICN_MEDIA 27
+#define IMX8MP_ICM_LCDIF_RD 28
+#define IMX8MP_ICM_LCDIF_WR 29
+#define IMX8MP_ICM_ISI0 30
+#define IMX8MP_ICM_ISI1 31
+#define IMX8MP_ICM_ISI2 32
+#define IMX8MP_ICM_ISP0 33
+#define IMX8MP_ICM_ISP1 34
+#define IMX8MP_ICM_DWE 35
+
+#define IMX8MP_ICN_VIDEO 36
+#define IMX8MP_ICM_VPU_G1 37
+#define IMX8MP_ICM_VPU_G2 38
+#define IMX8MP_ICM_VPU_H1 39
+
+#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MP_H */
diff --git a/include/dt-bindings/interconnect/qcom,icc.h b/include/dt-bindings/interconnect/qcom,icc.h
new file mode 100644
index 000000000000..cd34f36daaaa
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,icc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_ICC_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_ICC_H
+
+/*
+ * The AMC bucket denotes constraints that are applied to hardware when
+ * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
+ * when the execution environment transitions between active and low power mode.
+ */
+#define QCOM_ICC_BUCKET_AMC 0
+#define QCOM_ICC_BUCKET_WAKE 1
+#define QCOM_ICC_BUCKET_SLEEP 2
+#define QCOM_ICC_NUM_BUCKETS 3
+
+#define QCOM_ICC_TAG_AMC (1 << QCOM_ICC_BUCKET_AMC)
+#define QCOM_ICC_TAG_WAKE (1 << QCOM_ICC_BUCKET_WAKE)
+#define QCOM_ICC_TAG_SLEEP (1 << QCOM_ICC_BUCKET_SLEEP)
+#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
+#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
+ QCOM_ICC_TAG_SLEEP)
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8909.h b/include/dt-bindings/interconnect/qcom,msm8909.h
new file mode 100644
index 000000000000..76365d8aec21
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8909.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8909 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8909_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8909_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_OXILI 1
+#define MAS_SNOC_BIMC_0 2
+#define MAS_SNOC_BIMC_1 3
+#define MAS_TCU_0 4
+#define MAS_TCU_1 5
+#define SLV_EBI 6
+#define SLV_BIMC_SNOC 7
+
+/* PCNOC fabric */
+#define MAS_AUDIO 0
+#define MAS_SPDM 1
+#define MAS_DEHR 2
+#define MAS_QPIC 3
+#define MAS_BLSP_1 4
+#define MAS_USB_HS 5
+#define MAS_CRYPTO 6
+#define MAS_SDCC_1 7
+#define MAS_SDCC_2 8
+#define MAS_SNOC_PCNOC 9
+#define PCNOC_M_0 10
+#define PCNOC_M_1 11
+#define PCNOC_INT_0 12
+#define PCNOC_INT_1 13
+#define PCNOC_S_0 14
+#define PCNOC_S_1 15
+#define PCNOC_S_2 16
+#define PCNOC_S_3 17
+#define PCNOC_S_4 18
+#define PCNOC_S_5 19
+#define PCNOC_S_7 20
+#define SLV_TCSR 21
+#define SLV_SDCC_1 22
+#define SLV_BLSP_1 23
+#define SLV_CRYPTO_0_CFG 24
+#define SLV_MESSAGE_RAM 25
+#define SLV_PDM 26
+#define SLV_PRNG 27
+#define SLV_USB_HS 28
+#define SLV_QPIC 29
+#define SLV_SPDM 30
+#define SLV_SDCC_2 31
+#define SLV_AUDIO 32
+#define SLV_DEHR_CFG 33
+#define SLV_SNOC_CFG 34
+#define SLV_QDSS_CFG 35
+#define SLV_USB_PHY 36
+#define SLV_CAMERA_SS_CFG 37
+#define SLV_DISP_SS_CFG 38
+#define SLV_VENUS_CFG 39
+#define SLV_TLMM 40
+#define SLV_GPU_CFG 41
+#define SLV_IMEM_CFG 42
+#define SLV_BIMC_CFG 43
+#define SLV_PMIC_ARB 44
+#define SLV_TCU 45
+#define SLV_PCNOC_SNOC 46
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_MDP 2
+#define MAS_PCNOC_SNOC 3
+#define MAS_VENUS 4
+#define MAS_VFE 5
+#define MAS_QDSS_ETR 6
+#define MM_INT_0 7
+#define MM_INT_1 8
+#define MM_INT_2 9
+#define MM_INT_BIMC 10
+#define QDSS_INT 11
+#define SNOC_INT_0 12
+#define SNOC_INT_1 13
+#define SNOC_INT_BIMC 14
+#define SLV_KPSS_AHB 15
+#define SLV_SNOC_BIMC_0 16
+#define SLV_SNOC_BIMC_1 17
+#define SLV_IMEM 18
+#define SLV_SNOC_PCNOC 19
+#define SLV_QDSS_STM 20
+#define SLV_CATS_0 21
+#define SLV_CATS_1 22
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8909_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8939.h b/include/dt-bindings/interconnect/qcom,msm8939.h
new file mode 100644
index 000000000000..c22369a4b9f5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8939.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm interconnect IDs
+ *
+ * Copyright (c) 2020, Linaro Ltd.
+ * Author: Jun Nie <jun.nie@linaro.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8939_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8939_H
+
+#define BIMC_SNOC_SLV 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QDSS_ETR 2
+#define MASTER_SNOC_CFG 3
+#define PCNOC_SNOC_SLV 4
+#define SLAVE_APSS 5
+#define SLAVE_CATS_128 6
+#define SLAVE_OCMEM_64 7
+#define SLAVE_IMEM 8
+#define SLAVE_QDSS_STM 9
+#define SLAVE_SRVC_SNOC 10
+#define SNOC_BIMC_0_MAS 11
+#define SNOC_BIMC_1_MAS 12
+#define SNOC_BIMC_2_MAS 13
+#define SNOC_INT_0 14
+#define SNOC_INT_1 15
+#define SNOC_INT_BIMC 16
+#define SNOC_PCNOC_MAS 17
+#define SNOC_QDSS_INT 18
+
+#define MASTER_VIDEO_P0 0
+#define MASTER_JPEG 1
+#define MASTER_VFE 2
+#define MASTER_MDP_PORT0 3
+#define MASTER_MDP_PORT1 4
+#define MASTER_CPP 5
+#define SNOC_MM_INT_0 6
+#define SNOC_MM_INT_1 7
+#define SNOC_MM_INT_2 8
+
+#define BIMC_SNOC_MAS 0
+#define MASTER_AMPSS_M0 1
+#define MASTER_GRAPHICS_3D 2
+#define MASTER_TCU0 3
+#define SLAVE_AMPSS_L2 4
+#define SLAVE_EBI_CH0 5
+#define SNOC_BIMC_0_SLV 6
+#define SNOC_BIMC_1_SLV 7
+#define SNOC_BIMC_2_SLV 8
+
+#define MASTER_BLSP_1 0
+#define MASTER_DEHR 1
+#define MASTER_LPASS 2
+#define MASTER_CRYPTO_CORE0 3
+#define MASTER_SDCC_1 4
+#define MASTER_SDCC_2 5
+#define MASTER_SPDM 6
+#define MASTER_USB_HS1 7
+#define MASTER_USB_HS2 8
+#define PCNOC_INT_0 9
+#define PCNOC_INT_1 10
+#define PCNOC_MAS_0 11
+#define PCNOC_MAS_1 12
+#define PCNOC_SLV_0 13
+#define PCNOC_SLV_1 14
+#define PCNOC_SLV_2 15
+#define PCNOC_SLV_3 16
+#define PCNOC_SLV_4 17
+#define PCNOC_SLV_8 18
+#define PCNOC_SLV_9 19
+#define PCNOC_SNOC_MAS 20
+#define SLAVE_BIMC_CFG 21
+#define SLAVE_BLSP_1 22
+#define SLAVE_BOOT_ROM 23
+#define SLAVE_CAMERA_CFG 24
+#define SLAVE_CLK_CTL 25
+#define SLAVE_CRYPTO_0_CFG 26
+#define SLAVE_DEHR_CFG 27
+#define SLAVE_DISPLAY_CFG 28
+#define SLAVE_GRAPHICS_3D_CFG 29
+#define SLAVE_IMEM_CFG 30
+#define SLAVE_LPASS 31
+#define SLAVE_MPM 32
+#define SLAVE_MSG_RAM 33
+#define SLAVE_MSS 34
+#define SLAVE_PDM 35
+#define SLAVE_PMIC_ARB 36
+#define SLAVE_PCNOC_CFG 37
+#define SLAVE_PRNG 38
+#define SLAVE_QDSS_CFG 39
+#define SLAVE_RBCPR_CFG 40
+#define SLAVE_SDCC_1 41
+#define SLAVE_SDCC_2 42
+#define SLAVE_SECURITY 43
+#define SLAVE_SNOC_CFG 44
+#define SLAVE_SPDM 45
+#define SLAVE_TCSR 46
+#define SLAVE_TLMM 47
+#define SLAVE_USB_HS1 48
+#define SLAVE_USB_HS2 49
+#define SLAVE_VENUS_CFG 50
+#define SNOC_PCNOC_SLV 51
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8996-cbf.h b/include/dt-bindings/interconnect/qcom,msm8996-cbf.h
new file mode 100644
index 000000000000..aac5e69f6bd5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8996-cbf.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_CBF_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_CBF_H
+
+#define MASTER_CBF_M4M 0
+#define SLAVE_CBF_M4M 1
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8996.h b/include/dt-bindings/interconnect/qcom,msm8996.h
new file mode 100644
index 000000000000..a0b7c0ec7bed
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8996.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Qualcomm MSM8996 interconnect IDs
+ *
+ * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_H
+
+/* A0NOC */
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+
+/* A1NOC */
+#define MASTER_CNOC_A1NOC 0
+#define MASTER_CRYPTO_CORE0 1
+#define MASTER_PNOC_A1NOC 2
+
+/* A2NOC */
+#define MASTER_USB3 0
+#define MASTER_IPA 1
+#define MASTER_UFS 2
+
+/* BIMC */
+#define MASTER_AMPSS_M0 0
+#define MASTER_GRAPHICS_3D 1
+#define MASTER_MNOC_BIMC 2
+#define MASTER_SNOC_BIMC 3
+#define SLAVE_EBI_CH0 4
+#define SLAVE_HMSS_L3 5
+#define SLAVE_BIMC_SNOC_0 6
+#define SLAVE_BIMC_SNOC_1 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_CNOC_A1NOC 2
+#define SLAVE_CLK_CTL 3
+#define SLAVE_TCSR 4
+#define SLAVE_TLMM 5
+#define SLAVE_CRYPTO_0_CFG 6
+#define SLAVE_MPM 7
+#define SLAVE_PIMEM_CFG 8
+#define SLAVE_IMEM_CFG 9
+#define SLAVE_MESSAGE_RAM 10
+#define SLAVE_BIMC_CFG 11
+#define SLAVE_PMIC_ARB 12
+#define SLAVE_PRNG 13
+#define SLAVE_DCC_CFG 14
+#define SLAVE_RBCPR_MX 15
+#define SLAVE_QDSS_CFG 16
+#define SLAVE_RBCPR_CX 17
+#define SLAVE_QDSS_RBCPR_APU 18
+#define SLAVE_CNOC_MNOC_CFG 19
+#define SLAVE_SNOC_CFG 20
+#define SLAVE_SNOC_MPU_CFG 21
+#define SLAVE_EBI1_PHY_CFG 22
+#define SLAVE_A0NOC_CFG 23
+#define SLAVE_PCIE_1_CFG 24
+#define SLAVE_PCIE_2_CFG 25
+#define SLAVE_PCIE_0_CFG 26
+#define SLAVE_PCIE20_AHB2PHY 27
+#define SLAVE_A0NOC_MPU_CFG 28
+#define SLAVE_UFS_CFG 29
+#define SLAVE_A1NOC_CFG 30
+#define SLAVE_A1NOC_MPU_CFG 31
+#define SLAVE_A2NOC_CFG 32
+#define SLAVE_A2NOC_MPU_CFG 33
+#define SLAVE_SSC_CFG 34
+#define SLAVE_A0NOC_SMMU_CFG 35
+#define SLAVE_A1NOC_SMMU_CFG 36
+#define SLAVE_A2NOC_SMMU_CFG 37
+#define SLAVE_LPASS_SMMU_CFG 38
+#define SLAVE_CNOC_MNOC_MMSS_CFG 39
+
+/* MNOC */
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CPP 1
+#define MASTER_JPEG 2
+#define MASTER_MDP_PORT0 3
+#define MASTER_MDP_PORT1 4
+#define MASTER_ROTATOR 5
+#define MASTER_VIDEO_P0 6
+#define MASTER_VFE 7
+#define MASTER_SNOC_VMEM 8
+#define MASTER_VIDEO_P0_OCMEM 9
+#define MASTER_CNOC_MNOC_MMSS_CFG 10
+#define SLAVE_MNOC_BIMC 11
+#define SLAVE_VMEM 12
+#define SLAVE_SERVICE_MNOC 13
+#define SLAVE_MMAGIC_CFG 14
+#define SLAVE_CPR_CFG 15
+#define SLAVE_MISC_CFG 16
+#define SLAVE_VENUS_THROTTLE_CFG 17
+#define SLAVE_VENUS_CFG 18
+#define SLAVE_VMEM_CFG 19
+#define SLAVE_DSA_CFG 20
+#define SLAVE_MMSS_CLK_CFG 21
+#define SLAVE_DSA_MPU_CFG 22
+#define SLAVE_MNOC_MPU_CFG 23
+#define SLAVE_DISPLAY_CFG 24
+#define SLAVE_DISPLAY_THROTTLE_CFG 25
+#define SLAVE_CAMERA_CFG 26
+#define SLAVE_CAMERA_THROTTLE_CFG 27
+#define SLAVE_GRAPHICS_3D_CFG 28
+#define SLAVE_SMMU_MDP_CFG 29
+#define SLAVE_SMMU_ROT_CFG 30
+#define SLAVE_SMMU_VENUS_CFG 31
+#define SLAVE_SMMU_CPP_CFG 32
+#define SLAVE_SMMU_JPEG_CFG 33
+#define SLAVE_SMMU_VFE_CFG 34
+
+/* PNOC */
+#define MASTER_SNOC_PNOC 0
+#define MASTER_SDCC_1 1
+#define MASTER_SDCC_2 2
+#define MASTER_SDCC_4 3
+#define MASTER_USB_HS 4
+#define MASTER_BLSP_1 5
+#define MASTER_BLSP_2 6
+#define MASTER_TSIF 7
+#define SLAVE_PNOC_A1NOC 8
+#define SLAVE_USB_HS 9
+#define SLAVE_SDCC_2 10
+#define SLAVE_SDCC_4 11
+#define SLAVE_TSIF 12
+#define SLAVE_BLSP_2 13
+#define SLAVE_SDCC_1 14
+#define SLAVE_BLSP_1 15
+#define SLAVE_PDM 16
+#define SLAVE_AHB2PHY 17
+
+/* SNOC */
+#define MASTER_HMSS 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_BIMC_SNOC_0 3
+#define MASTER_BIMC_SNOC_1 4
+#define MASTER_A0NOC_SNOC 5
+#define MASTER_A1NOC_SNOC 6
+#define MASTER_A2NOC_SNOC 7
+#define MASTER_QDSS_ETR 8
+#define SLAVE_A0NOC_SNOC 9
+#define SLAVE_A1NOC_SNOC 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_HMSS 12
+#define SLAVE_LPASS 13
+#define SLAVE_USB3 14
+#define SLAVE_SNOC_BIMC 15
+#define SLAVE_SNOC_CNOC 16
+#define SLAVE_IMEM 17
+#define SLAVE_PIMEM 18
+#define SLAVE_SNOC_VMEM 19
+#define SLAVE_SNOC_PNOC 20
+#define SLAVE_QDSS_STM 21
+#define SLAVE_PCIE_0 22
+#define SLAVE_PCIE_1 23
+#define SLAVE_PCIE_2 24
+#define SLAVE_SERVICE_SNOC 25
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,osm-l3.h b/include/dt-bindings/interconnect/qcom,osm-l3.h
index 54858ff7674d..61ef649ae565 100644
--- a/include/dt-bindings/interconnect/qcom,osm-l3.h
+++ b/include/dt-bindings/interconnect/qcom,osm-l3.h
@@ -9,4 +9,7 @@
#define MASTER_OSM_L3_APPS 0
#define SLAVE_OSM_L3 1
+#define MASTER_EPSS_L3_APPS 0
+#define SLAVE_EPSS_L3_SHARED 1
+
#endif
diff --git a/include/dt-bindings/interconnect/qcom,qcm2290.h b/include/dt-bindings/interconnect/qcom,qcm2290.h
new file mode 100644
index 000000000000..6cbbb7fe0bd3
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qcm2290.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* QCM2290 interconnect IDs */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCM2290_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QCM2290_H
+
+/* BIMC */
+#define MASTER_APPSS_PROC 0
+#define MASTER_SNOC_BIMC_RT 1
+#define MASTER_SNOC_BIMC_NRT 2
+#define MASTER_SNOC_BIMC 3
+#define MASTER_TCU_0 4
+#define MASTER_GFX3D 5
+#define SLAVE_EBI1 6
+#define SLAVE_BIMC_SNOC 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_BIMC_CFG 2
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 3
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_DISPLAY_CFG 8
+#define SLAVE_DISPLAY_THROTTLE_CFG 9
+#define SLAVE_GPU_CFG 10
+#define SLAVE_HWKM 11
+#define SLAVE_IMEM_CFG 12
+#define SLAVE_IPA_CFG 13
+#define SLAVE_LPASS 14
+#define SLAVE_MESSAGE_RAM 15
+#define SLAVE_PDM 16
+#define SLAVE_PIMEM_CFG 17
+#define SLAVE_PKA_WRAPPER 18
+#define SLAVE_PMIC_ARB 19
+#define SLAVE_PRNG 20
+#define SLAVE_QDSS_CFG 21
+#define SLAVE_QM_CFG 22
+#define SLAVE_QM_MPU_CFG 23
+#define SLAVE_QPIC 24
+#define SLAVE_QUP_0 25
+#define SLAVE_SDCC_1 26
+#define SLAVE_SDCC_2 27
+#define SLAVE_SNOC_CFG 28
+#define SLAVE_TCSR 29
+#define SLAVE_USB3 30
+#define SLAVE_VENUS_CFG 31
+#define SLAVE_VENUS_THROTTLE_CFG 32
+#define SLAVE_VSENSE_CTRL_CFG 33
+#define SLAVE_SERVICE_CNOC 34
+
+/* SNOC */
+#define MASTER_CRYPTO_CORE0 0
+#define MASTER_SNOC_CFG 1
+#define MASTER_TIC 2
+#define MASTER_ANOC_SNOC 3
+#define MASTER_BIMC_SNOC 4
+#define MASTER_PIMEM 5
+#define MASTER_QDSS_BAM 6
+#define MASTER_QUP_0 7
+#define MASTER_IPA 8
+#define MASTER_QDSS_ETR 9
+#define MASTER_SDCC_1 10
+#define MASTER_SDCC_2 11
+#define MASTER_QPIC 12
+#define MASTER_USB3_0 13
+#define SLAVE_APPSS 14
+#define SLAVE_SNOC_CNOC 15
+#define SLAVE_IMEM 16
+#define SLAVE_PIMEM 17
+#define SLAVE_SNOC_BIMC 18
+#define SLAVE_SERVICE_SNOC 19
+#define SLAVE_QDSS_STM 20
+#define SLAVE_TCU 21
+#define SLAVE_ANOC_SNOC 22
+
+/* QUP Virtual */
+#define MASTER_QUP_CORE_0 0
+#define SLAVE_QUP_CORE_0 1
+
+/* MMNRT Virtual */
+#define MASTER_CAMNOC_SF 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define SLAVE_SNOC_BIMC_NRT 3
+
+/* MMRT Virtual */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP0 1
+#define SLAVE_SNOC_BIMC_RT 2
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,qdu1000-rpmh.h b/include/dt-bindings/interconnect/qcom,qdu1000-rpmh.h
new file mode 100644
index 000000000000..7f0ad1571128
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qdu1000-rpmh.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QDU1000_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QDU1000_H
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_SYS_TCU 0
+#define MASTER_APPSS_PROC 1
+#define MASTER_GEMNOC_ECPRI_DMA 2
+#define MASTER_FEC_2_GEMNOC 3
+#define MASTER_ANOC_PCIE_GEM_NOC 4
+#define MASTER_SNOC_GC_MEM_NOC 5
+#define MASTER_SNOC_SF_MEM_NOC 6
+#define MASTER_MSS_PROC 7
+#define SLAVE_GEM_NOC_CNOC 8
+#define SLAVE_LLCC 9
+#define SLAVE_GEMNOC_MODEM_CNOC 10
+#define SLAVE_MEM_NOC_PCIE_SNOC 11
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_GIC_AHB 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QPIC 2
+#define MASTER_QSPI_0 3
+#define MASTER_QUP_0 4
+#define MASTER_QUP_1 5
+#define MASTER_SNOC_CFG 6
+#define MASTER_ANOC_SNOC 7
+#define MASTER_ANOC_GSI 8
+#define MASTER_GEM_NOC_CNOC 9
+#define MASTER_GEMNOC_MODEM_CNOC 10
+#define MASTER_GEM_NOC_PCIE_SNOC 11
+#define MASTER_CRYPTO 12
+#define MASTER_ECPRI_GSI 13
+#define MASTER_PIMEM 14
+#define MASTER_SNOC_ECPRI_DMA 15
+#define MASTER_GIC 16
+#define MASTER_PCIE 17
+#define MASTER_QDSS_ETR 18
+#define MASTER_QDSS_ETR_1 19
+#define MASTER_SDCC_1 20
+#define MASTER_USB3 21
+#define SLAVE_AHB2PHY_SOUTH 22
+#define SLAVE_AHB2PHY_NORTH 23
+#define SLAVE_AHB2PHY_EAST 24
+#define SLAVE_AOSS 25
+#define SLAVE_CLK_CTL 26
+#define SLAVE_RBCPR_CX_CFG 27
+#define SLAVE_RBCPR_MX_CFG 28
+#define SLAVE_CRYPTO_0_CFG 29
+#define SLAVE_ECPRI_CFG 30
+#define SLAVE_IMEM_CFG 31
+#define SLAVE_IPC_ROUTER_CFG 32
+#define SLAVE_CNOC_MSS 33
+#define SLAVE_PCIE_CFG 34
+#define SLAVE_PDM 35
+#define SLAVE_PIMEM_CFG 36
+#define SLAVE_PRNG 37
+#define SLAVE_QDSS_CFG 38
+#define SLAVE_QPIC 40
+#define SLAVE_QSPI_0 41
+#define SLAVE_QUP_0 42
+#define SLAVE_QUP_1 43
+#define SLAVE_SDCC_2 44
+#define SLAVE_SMBUS_CFG 45
+#define SLAVE_SNOC_CFG 46
+#define SLAVE_TCSR 47
+#define SLAVE_TLMM 48
+#define SLAVE_TME_CFG 49
+#define SLAVE_TSC_CFG 50
+#define SLAVE_USB3_0 51
+#define SLAVE_VSENSE_CTRL_CFG 52
+#define SLAVE_A1NOC_SNOC 53
+#define SLAVE_ANOC_SNOC_GSI 54
+#define SLAVE_DDRSS_CFG 55
+#define SLAVE_ECPRI_GEMNOC 56
+#define SLAVE_SNOC_GEM_NOC_GC 57
+#define SLAVE_SNOC_GEM_NOC_SF 58
+#define SLAVE_MODEM_OFFLINE 59
+#define SLAVE_ANOC_PCIE_GEM_NOC 60
+#define SLAVE_IMEM 61
+#define SLAVE_PIMEM 62
+#define SLAVE_SERVICE_SNOC 63
+#define SLAVE_ETHERNET_SS 64
+#define SLAVE_PCIE_0 65
+#define SLAVE_QDSS_STM 66
+#define SLAVE_TCU 67
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,rpm-icc.h b/include/dt-bindings/interconnect/qcom,rpm-icc.h
new file mode 100644
index 000000000000..2cd56f91e5c5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,rpm-icc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_RPM_ICC_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_RPM_ICC_H
+
+#define RPM_ACTIVE_TAG (1 << 0)
+#define RPM_SLEEP_TAG (1 << 1)
+#define RPM_ALWAYS_TAG (RPM_ACTIVE_TAG | RPM_SLEEP_TAG)
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h b/include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h
new file mode 100644
index 000000000000..f21c39d0928e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SA8775P_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SA8775P_H
+
+/* aggre1_noc */
+#define MASTER_QUP_3 0
+#define MASTER_EMAC 1
+#define MASTER_EMAC_1 2
+#define MASTER_SDC 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB2 5
+#define MASTER_USB3_0 6
+#define MASTER_USB3_1 7
+#define SLAVE_A1NOC_SNOC 8
+
+/* aggre2_noc */
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_QUP_2 3
+#define MASTER_CNOC_A2NOC 4
+#define MASTER_CRYPTO_CORE0 5
+#define MASTER_CRYPTO_CORE1 6
+#define MASTER_IPA 7
+#define MASTER_QDSS_ETR_0 8
+#define MASTER_QDSS_ETR_1 9
+#define MASTER_UFS_CARD 10
+#define SLAVE_A2NOC_SNOC 11
+
+/* clk_virt */
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define MASTER_QUP_CORE_3 3
+#define SLAVE_QUP_CORE_0 4
+#define SLAVE_QUP_CORE_1 5
+#define SLAVE_QUP_CORE_2 6
+#define SLAVE_QUP_CORE_3 7
+
+/* config_noc */
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_0 2
+#define SLAVE_AHB2PHY_1 3
+#define SLAVE_AHB2PHY_2 4
+#define SLAVE_AHB2PHY_3 5
+#define SLAVE_ANOC_THROTTLE_CFG 6
+#define SLAVE_AOSS 7
+#define SLAVE_APPSS 8
+#define SLAVE_BOOT_ROM 9
+#define SLAVE_CAMERA_CFG 10
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 11
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 12
+#define SLAVE_CLK_CTL 13
+#define SLAVE_CDSP_CFG 14
+#define SLAVE_CDSP1_CFG 15
+#define SLAVE_RBCPR_CX_CFG 16
+#define SLAVE_RBCPR_MMCX_CFG 17
+#define SLAVE_RBCPR_MX_CFG 18
+#define SLAVE_CPR_NSPCX 19
+#define SLAVE_CRYPTO_0_CFG 20
+#define SLAVE_CX_RDPM 21
+#define SLAVE_DISPLAY_CFG 22
+#define SLAVE_DISPLAY_RT_THROTTLE_CFG 23
+#define SLAVE_DISPLAY1_CFG 24
+#define SLAVE_DISPLAY1_RT_THROTTLE_CFG 25
+#define SLAVE_EMAC_CFG 26
+#define SLAVE_EMAC1_CFG 27
+#define SLAVE_GP_DSP0_CFG 28
+#define SLAVE_GP_DSP1_CFG 29
+#define SLAVE_GPDSP0_THROTTLE_CFG 30
+#define SLAVE_GPDSP1_THROTTLE_CFG 31
+#define SLAVE_GPU_TCU_THROTTLE_CFG 32
+#define SLAVE_GFX3D_CFG 33
+#define SLAVE_HWKM 34
+#define SLAVE_IMEM_CFG 35
+#define SLAVE_IPA_CFG 36
+#define SLAVE_IPC_ROUTER_CFG 37
+#define SLAVE_LPASS 38
+#define SLAVE_LPASS_THROTTLE_CFG 39
+#define SLAVE_MX_RDPM 40
+#define SLAVE_MXC_RDPM 41
+#define SLAVE_PCIE_0_CFG 42
+#define SLAVE_PCIE_1_CFG 43
+#define SLAVE_PCIE_RSC_CFG 44
+#define SLAVE_PCIE_TCU_THROTTLE_CFG 45
+#define SLAVE_PCIE_THROTTLE_CFG 46
+#define SLAVE_PDM 47
+#define SLAVE_PIMEM_CFG 48
+#define SLAVE_PKA_WRAPPER_CFG 49
+#define SLAVE_QDSS_CFG 50
+#define SLAVE_QM_CFG 51
+#define SLAVE_QM_MPU_CFG 52
+#define SLAVE_QUP_0 53
+#define SLAVE_QUP_1 54
+#define SLAVE_QUP_2 55
+#define SLAVE_QUP_3 56
+#define SLAVE_SAIL_THROTTLE_CFG 57
+#define SLAVE_SDC1 58
+#define SLAVE_SECURITY 59
+#define SLAVE_SNOC_THROTTLE_CFG 60
+#define SLAVE_TCSR 61
+#define SLAVE_TLMM 62
+#define SLAVE_TSC_CFG 63
+#define SLAVE_UFS_CARD_CFG 64
+#define SLAVE_UFS_MEM_CFG 65
+#define SLAVE_USB2 66
+#define SLAVE_USB3_0 67
+#define SLAVE_USB3_1 68
+#define SLAVE_VENUS_CFG 69
+#define SLAVE_VENUS_CVP_THROTTLE_CFG 70
+#define SLAVE_VENUS_V_CPU_THROTTLE_CFG 71
+#define SLAVE_VENUS_VCODEC_THROTTLE_CFG 72
+#define SLAVE_DDRSS_CFG 73
+#define SLAVE_GPDSP_NOC_CFG 74
+#define SLAVE_CNOC_MNOC_HF_CFG 75
+#define SLAVE_CNOC_MNOC_SF_CFG 76
+#define SLAVE_PCIE_ANOC_CFG 77
+#define SLAVE_SNOC_CFG 78
+#define SLAVE_BOOT_IMEM 79
+#define SLAVE_IMEM 80
+#define SLAVE_PIMEM 81
+#define SLAVE_PCIE_0 82
+#define SLAVE_PCIE_1 83
+#define SLAVE_QDSS_STM 84
+#define SLAVE_TCU 85
+
+/* dc_noc */
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+/* gem_noc */
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_COMPUTE_NOC_1 5
+#define MASTER_GEM_NOC_CFG 6
+#define MASTER_GPDSP_SAIL 7
+#define MASTER_GFX3D 8
+#define MASTER_MNOC_HF_MEM_NOC 9
+#define MASTER_MNOC_SF_MEM_NOC 10
+#define MASTER_ANOC_PCIE_GEM_NOC 11
+#define MASTER_SNOC_GC_MEM_NOC 12
+#define MASTER_SNOC_SF_MEM_NOC 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_GEM_NOC_PCIE_CNOC 16
+#define SLAVE_SERVICE_GEM_NOC_1 17
+#define SLAVE_SERVICE_GEM_NOC_2 18
+#define SLAVE_SERVICE_GEM_NOC 19
+#define SLAVE_SERVICE_GEM_NOC2 20
+
+/* gpdsp_anoc */
+#define MASTER_DSP0 0
+#define MASTER_DSP1 1
+#define SLAVE_GP_DSP_SAIL_NOC 2
+
+/* lpass_ag_noc */
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+/* mc_virt */
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+/*mmss_noc */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP0 3
+#define MASTER_MDP1 4
+#define MASTER_MDP_CORE1_0 5
+#define MASTER_MDP_CORE1_1 6
+#define MASTER_CNOC_MNOC_HF_CFG 7
+#define MASTER_CNOC_MNOC_SF_CFG 8
+#define MASTER_VIDEO_P0 9
+#define MASTER_VIDEO_P1 10
+#define MASTER_VIDEO_PROC 11
+#define MASTER_VIDEO_V_PROC 12
+#define SLAVE_MNOC_HF_MEM_NOC 13
+#define SLAVE_MNOC_SF_MEM_NOC 14
+#define SLAVE_SERVICE_MNOC_HF 15
+#define SLAVE_SERVICE_MNOC_SF 16
+
+/* nspa_noc */
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_HCP_A 2
+#define SLAVE_CDSP_MEM_NOC 3
+#define SLAVE_SERVICE_NSP_NOC 4
+
+/* nspb_noc */
+#define MASTER_CDSPB_NOC_CFG 0
+#define MASTER_CDSP_PROC_B 1
+#define SLAVE_CDSPB_MEM_NOC 2
+#define SLAVE_HCP_B 3
+#define SLAVE_SERVICE_NSPB_NOC 4
+
+/* pcie_anoc */
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+
+/* system_noc */
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_SA8775P_H */
diff --git a/include/dt-bindings/interconnect/qcom,sc7180.h b/include/dt-bindings/interconnect/qcom,sc7180.h
index f9970f6032eb..de5d5867bd67 100644
--- a/include/dt-bindings/interconnect/qcom,sc7180.h
+++ b/include/dt-bindings/interconnect/qcom,sc7180.h
@@ -108,9 +108,6 @@
#define SLAVE_LLCC 11
#define SLAVE_SERVICE_GEM_NOC 12
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
-
#define MASTER_LLCC 0
#define SLAVE_EBI1 1
diff --git a/include/dt-bindings/interconnect/qcom,sc7280.h b/include/dt-bindings/interconnect/qcom,sc7280.h
new file mode 100644
index 000000000000..21b000443999
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc7280.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SC7280 interconnect IDs
+ *
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_A1NOC_CFG 3
+#define MASTER_PCIE_0 4
+#define MASTER_PCIE_1 5
+#define MASTER_SDCC_1 6
+#define MASTER_SDCC_2 7
+#define MASTER_SDCC_4 8
+#define MASTER_UFS_MEM 9
+#define MASTER_USB2 10
+#define MASTER_USB3_0 11
+#define SLAVE_A1NOC_SNOC 12
+#define SLAVE_ANOC_PCIE_GEM_NOC 13
+#define SLAVE_SERVICE_A1NOC 14
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_A2NOC_CFG 1
+#define MASTER_CNOC_A2NOC 2
+#define MASTER_CRYPTO 3
+#define MASTER_IPA 4
+#define MASTER_QDSS_ETR 5
+#define SLAVE_A2NOC_SNOC 6
+#define SLAVE_SERVICE_A2NOC 7
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_CNOC3_CNOC2 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_AHB2PHY_SOUTH 2
+#define SLAVE_AHB2PHY_NORTH 3
+#define SLAVE_CAMERA_CFG 4
+#define SLAVE_CLK_CTL 5
+#define SLAVE_CDSP_CFG 6
+#define SLAVE_RBCPR_CX_CFG 7
+#define SLAVE_RBCPR_MX_CFG 8
+#define SLAVE_CRYPTO_0_CFG 9
+#define SLAVE_CX_RDPM 10
+#define SLAVE_DCC_CFG 11
+#define SLAVE_DISPLAY_CFG 12
+#define SLAVE_GFX3D_CFG 13
+#define SLAVE_HWKM 14
+#define SLAVE_IMEM_CFG 15
+#define SLAVE_IPA_CFG 16
+#define SLAVE_IPC_ROUTER_CFG 17
+#define SLAVE_LPASS 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_RDPM 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_PIMEM_CFG 24
+#define SLAVE_PKA_WRAPPER_CFG 25
+#define SLAVE_PMU_WRAPPER_CFG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_0 29
+#define SLAVE_QUP_1 30
+#define SLAVE_SDCC_1 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SECURITY 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB2 38
+#define SLAVE_USB3_0 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_A1NOC_CFG 42
+#define SLAVE_A2NOC_CFG 43
+#define SLAVE_CNOC2_CNOC3 44
+#define SLAVE_CNOC_MNOC_CFG 45
+#define SLAVE_SNOC_CFG 46
+
+#define MASTER_CNOC2_CNOC3 0
+#define MASTER_GEM_NOC_CNOC 1
+#define MASTER_GEM_NOC_PCIE_SNOC 2
+#define SLAVE_AOSS 3
+#define SLAVE_APPSS 4
+#define SLAVE_CNOC3_CNOC2 5
+#define SLAVE_CNOC_A2NOC 6
+#define SLAVE_DDRSS_CFG 7
+#define SLAVE_BOOT_IMEM 8
+#define SLAVE_IMEM 9
+#define SLAVE_PIMEM 10
+#define SLAVE_PCIE_0 11
+#define SLAVE_PCIE_1 12
+#define SLAVE_QDSS_STM 13
+#define SLAVE_TCU 14
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_GEM_NOC_CFG 4
+#define MASTER_GFX3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_MSS_PROC_MS_MPU_CFG 11
+#define SLAVE_MCDMA_MS_MPU_CFG 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_MEM_NOC_PCIE_SNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define SLAVE_LPASS_CORE_CFG 1
+#define SLAVE_LPASS_LPI_CFG 2
+#define SLAVE_LPASS_MPU_CFG 3
+#define SLAVE_LPASS_TOP_CFG 4
+#define SLAVE_SERVICES_LPASS_AML_NOC 5
+#define SLAVE_SERVICE_LPASS_AG_NOC 6
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define MASTER_CAMNOC_HF 3
+#define MASTER_CAMNOC_ICP 4
+#define MASTER_CAMNOC_SF 5
+#define MASTER_MDP0 6
+#define SLAVE_MNOC_HF_MEM_NOC 7
+#define SLAVE_MNOC_SF_MEM_NOC 8
+#define SLAVE_SERVICE_MNOC 9
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_PIMEM 3
+#define MASTER_GIC 4
+#define SLAVE_SNOC_GEM_NOC_GC 5
+#define SLAVE_SNOC_GEM_NOC_SF 6
+#define SLAVE_SERVICE_SNOC 7
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc8180x.h b/include/dt-bindings/interconnect/qcom,sc8180x.h
new file mode 100644
index 000000000000..0bdc8d6cb401
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc8180x.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SC8180x interconnect IDs
+ *
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_UFS_CARD 1
+#define MASTER_UFS_GEN4 2
+#define MASTER_UFS_MEM 3
+#define MASTER_USB3 4
+#define MASTER_USB3_1 5
+#define MASTER_USB3_2 6
+#define A1NOC_SNOC_SLV 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QSPI_0 2
+#define MASTER_QSPI_1 3
+#define MASTER_QUP_0 4
+#define MASTER_QUP_1 5
+#define MASTER_QUP_2 6
+#define MASTER_SENSORS_AHB 7
+#define MASTER_CRYPTO_CORE_0 8
+#define MASTER_IPA 9
+#define MASTER_EMAC 10
+#define MASTER_PCIE 11
+#define MASTER_PCIE_1 12
+#define MASTER_PCIE_2 13
+#define MASTER_PCIE_3 14
+#define MASTER_QDSS_ETR 15
+#define MASTER_SDCC_2 16
+#define MASTER_SDCC_4 17
+#define A2NOC_SNOC_SLV 18
+#define SLAVE_ANOC_PCIE_GEM_NOC 19
+#define SLAVE_SERVICE_A2NOC 20
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_HF1_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define SLAVE_CAMNOC_UNCOMP 3
+
+#define MASTER_NPU 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define SNOC_CNOC_MAS 0
+#define SLAVE_A1NOC_CFG 1
+#define SLAVE_A2NOC_CFG 2
+#define SLAVE_AHB2PHY_CENTER 3
+#define SLAVE_AHB2PHY_EAST 4
+#define SLAVE_AHB2PHY_WEST 5
+#define SLAVE_AHB2PHY_SOUTH 6
+#define SLAVE_AOP 7
+#define SLAVE_AOSS 8
+#define SLAVE_CAMERA_CFG 9
+#define SLAVE_CLK_CTL 10
+#define SLAVE_CDSP_CFG 11
+#define SLAVE_RBCPR_CX_CFG 12
+#define SLAVE_RBCPR_MMCX_CFG 13
+#define SLAVE_RBCPR_MX_CFG 14
+#define SLAVE_CRYPTO_0_CFG 15
+#define SLAVE_CNOC_DDRSS 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_EMAC_CFG 18
+#define SLAVE_GLM 19
+#define SLAVE_GRAPHICS_3D_CFG 20
+#define SLAVE_IMEM_CFG 21
+#define SLAVE_IPA_CFG 22
+#define SLAVE_CNOC_MNOC_CFG 23
+#define SLAVE_NPU_CFG 24
+#define SLAVE_PCIE_0_CFG 25
+#define SLAVE_PCIE_1_CFG 26
+#define SLAVE_PCIE_2_CFG 27
+#define SLAVE_PCIE_3_CFG 28
+#define SLAVE_PDM 29
+#define SLAVE_PIMEM_CFG 30
+#define SLAVE_PRNG 31
+#define SLAVE_QDSS_CFG 32
+#define SLAVE_QSPI_0 33
+#define SLAVE_QSPI_1 34
+#define SLAVE_QUP_1 35
+#define SLAVE_QUP_2 36
+#define SLAVE_QUP_0 37
+#define SLAVE_SDCC_2 38
+#define SLAVE_SDCC_4 39
+#define SLAVE_SECURITY 40
+#define SLAVE_SNOC_CFG 41
+#define SLAVE_SPSS_CFG 42
+#define SLAVE_TCSR 43
+#define SLAVE_TLMM_EAST 44
+#define SLAVE_TLMM_SOUTH 45
+#define SLAVE_TLMM_WEST 46
+#define SLAVE_TSIF 47
+#define SLAVE_UFS_CARD_CFG 48
+#define SLAVE_UFS_MEM_0_CFG 49
+#define SLAVE_UFS_MEM_1_CFG 50
+#define SLAVE_USB3 51
+#define SLAVE_USB3_1 52
+#define SLAVE_USB3_2 53
+#define SLAVE_VENUS_CFG 54
+#define SLAVE_VSENSE_CTRL_CFG 55
+#define SLAVE_SERVICE_CNOC 56
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_GPU_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_GEM_NOC_CFG 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_GRAPHICS_3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_GEM_NOC_PCIE_SNOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_ECC 11
+#define SLAVE_MSS_PROC_MS_MPU_CFG 12
+#define SLAVE_ECC 13
+#define SLAVE_GEM_NOC_SNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_SERVICE_GEM_NOC 16
+#define SLAVE_SERVICE_GEM_NOC_1 17
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP_PORT0 4
+#define MASTER_MDP_PORT1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_PCIE_0 13
+#define SLAVE_PCIE_1 14
+#define SLAVE_PCIE_2 15
+#define SLAVE_PCIE_3 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_TCU 18
+
+#define MASTER_MNOC_HF_MEM_NOC_DISPLAY 0
+#define MASTER_MNOC_SF_MEM_NOC_DISPLAY 1
+#define SLAVE_LLCC_DISPLAY 2
+
+#define MASTER_LLCC_DISPLAY 0
+#define SLAVE_EBI_CH0_DISPLAY 1
+
+#define MASTER_MDP_PORT0_DISPLAY 0
+#define MASTER_MDP_PORT1_DISPLAY 1
+#define MASTER_ROTATOR_DISPLAY 2
+#define SLAVE_MNOC_SF_MEM_NOC_DISPLAY 3
+#define SLAVE_MNOC_HF_MEM_NOC_DISPLAY 4
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc8280xp.h b/include/dt-bindings/interconnect/qcom,sc8280xp.h
new file mode 100644
index 000000000000..f89f47e99c6d
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc8280xp.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+
+/* aggre1_noc */
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_2 2
+#define MASTER_A1NOC_CFG 3
+#define MASTER_IPA 4
+#define MASTER_EMAC_1 5
+#define MASTER_SDCC_4 6
+#define MASTER_UFS_MEM 7
+#define MASTER_USB3_0 8
+#define MASTER_USB3_1 9
+#define MASTER_USB3_MP 10
+#define MASTER_USB4_0 11
+#define MASTER_USB4_1 12
+#define SLAVE_A1NOC_SNOC 13
+#define SLAVE_USB_NOC_SNOC 14
+#define SLAVE_SERVICE_A1NOC 15
+
+/* aggre2_noc */
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_A2NOC_CFG 2
+#define MASTER_CRYPTO 3
+#define MASTER_SENSORS_PROC 4
+#define MASTER_SP 5
+#define MASTER_EMAC 6
+#define MASTER_PCIE_0 7
+#define MASTER_PCIE_1 8
+#define MASTER_PCIE_2A 9
+#define MASTER_PCIE_2B 10
+#define MASTER_PCIE_3A 11
+#define MASTER_PCIE_3B 12
+#define MASTER_PCIE_4 13
+#define MASTER_QDSS_ETR 14
+#define MASTER_SDCC_2 15
+#define MASTER_UFS_CARD 16
+#define SLAVE_A2NOC_SNOC 17
+#define SLAVE_ANOC_PCIE_GEM_NOC 18
+#define SLAVE_SERVICE_A2NOC 19
+
+/* clk_virt */
+/* 0 was used by MASTER_IPA_CORE, now represented as RPMh clock */
+#define MASTER_QUP_CORE_0 1
+#define MASTER_QUP_CORE_1 2
+#define MASTER_QUP_CORE_2 3
+/* 4 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+
+/* config_noc */
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_0 2
+#define SLAVE_AHB2PHY_1 3
+#define SLAVE_AHB2PHY_2 4
+#define SLAVE_AOSS 5
+#define SLAVE_APPSS 6
+#define SLAVE_CAMERA_CFG 7
+#define SLAVE_CLK_CTL 8
+#define SLAVE_CDSP_CFG 9
+#define SLAVE_CDSP1_CFG 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_RBCPR_MMCX_CFG 12
+#define SLAVE_RBCPR_MX_CFG 13
+#define SLAVE_CPR_NSPCX 14
+#define SLAVE_CRYPTO_0_CFG 15
+#define SLAVE_CX_RDPM 16
+#define SLAVE_DCC_CFG 17
+#define SLAVE_DISPLAY_CFG 18
+#define SLAVE_DISPLAY1_CFG 19
+#define SLAVE_EMAC_CFG 20
+#define SLAVE_EMAC1_CFG 21
+#define SLAVE_GFX3D_CFG 22
+#define SLAVE_HWKM 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_IPC_ROUTER_CFG 26
+#define SLAVE_LPASS 27
+#define SLAVE_MX_RDPM 28
+#define SLAVE_MXC_RDPM 29
+#define SLAVE_PCIE_0_CFG 30
+#define SLAVE_PCIE_1_CFG 31
+#define SLAVE_PCIE_2A_CFG 32
+#define SLAVE_PCIE_2B_CFG 33
+#define SLAVE_PCIE_3A_CFG 34
+#define SLAVE_PCIE_3B_CFG 35
+#define SLAVE_PCIE_4_CFG 36
+#define SLAVE_PCIE_RSC_CFG 37
+#define SLAVE_PDM 38
+#define SLAVE_PIMEM_CFG 39
+#define SLAVE_PKA_WRAPPER_CFG 40
+#define SLAVE_PMU_WRAPPER_CFG 41
+#define SLAVE_QDSS_CFG 42
+#define SLAVE_QSPI_0 43
+#define SLAVE_QUP_0 44
+#define SLAVE_QUP_1 45
+#define SLAVE_QUP_2 46
+#define SLAVE_SDCC_2 47
+#define SLAVE_SDCC_4 48
+#define SLAVE_SECURITY 49
+#define SLAVE_SMMUV3_CFG 50
+#define SLAVE_SMSS_CFG 51
+#define SLAVE_SPSS_CFG 52
+#define SLAVE_TCSR 53
+#define SLAVE_TLMM 54
+#define SLAVE_UFS_CARD_CFG 55
+#define SLAVE_UFS_MEM_CFG 56
+#define SLAVE_USB3_0 57
+#define SLAVE_USB3_1 58
+#define SLAVE_USB3_MP 59
+#define SLAVE_USB4_0 60
+#define SLAVE_USB4_1 61
+#define SLAVE_VENUS_CFG 62
+#define SLAVE_VSENSE_CTRL_CFG 63
+#define SLAVE_VSENSE_CTRL_R_CFG 64
+#define SLAVE_A1NOC_CFG 65
+#define SLAVE_A2NOC_CFG 66
+#define SLAVE_ANOC_PCIE_BRIDGE_CFG 67
+#define SLAVE_DDRSS_CFG 68
+#define SLAVE_CNOC_MNOC_CFG 69
+#define SLAVE_SNOC_CFG 70
+#define SLAVE_SNOC_SF_BRIDGE_CFG 71
+#define SLAVE_IMEM 72
+#define SLAVE_PIMEM 73
+#define SLAVE_SERVICE_CNOC 74
+#define SLAVE_PCIE_0 75
+#define SLAVE_PCIE_1 76
+#define SLAVE_PCIE_2A 77
+#define SLAVE_PCIE_2B 78
+#define SLAVE_PCIE_3A 79
+#define SLAVE_PCIE_3B 80
+#define SLAVE_PCIE_4 81
+#define SLAVE_QDSS_STM 82
+#define SLAVE_SMSS 83
+#define SLAVE_TCU 84
+
+/* dc_noc */
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+/* gem_noc */
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_COMPUTE_NOC_1 5
+#define MASTER_GEM_NOC_CFG 6
+#define MASTER_GFX3D 7
+#define MASTER_MNOC_HF_MEM_NOC 8
+#define MASTER_MNOC_SF_MEM_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_GC_MEM_NOC 11
+#define MASTER_SNOC_SF_MEM_NOC 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_GEM_NOC_PCIE_CNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+
+/* lpass_ag_noc */
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+/* mc_virt */
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+/*mmss_noc */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP0 1
+#define MASTER_MDP1 2
+#define MASTER_MDP_CORE1_0 3
+#define MASTER_MDP_CORE1_1 4
+#define MASTER_CNOC_MNOC_CFG 5
+#define MASTER_ROTATOR 6
+#define MASTER_ROTATOR_1 7
+#define MASTER_VIDEO_P0 8
+#define MASTER_VIDEO_P1 9
+#define MASTER_VIDEO_PROC 10
+#define MASTER_CAMNOC_ICP 11
+#define MASTER_CAMNOC_SF 12
+#define SLAVE_MNOC_HF_MEM_NOC 13
+#define SLAVE_MNOC_SF_MEM_NOC 14
+#define SLAVE_SERVICE_MNOC 15
+
+/* nspa_noc */
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_NSP_XFR 3
+#define SLAVE_SERVICE_NSP_NOC 4
+
+/* nspb_noc */
+#define MASTER_CDSPB_NOC_CFG 0
+#define MASTER_CDSP_PROC_B 1
+#define SLAVE_CDSPB_MEM_NOC 2
+#define SLAVE_NSPB_XFR 3
+#define SLAVE_SERVICE_NSPB_NOC 4
+
+/* system_noc */
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_USB_NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdm660.h b/include/dt-bindings/interconnect/qcom,sdm660.h
new file mode 100644
index 000000000000..62e8d8670d5e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdm660.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* SDM660 interconnect IDs */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM660_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM660_H
+
+/* A2NOC */
+#define MASTER_IPA 0
+#define MASTER_CNOC_A2NOC 1
+#define MASTER_SDCC_1 2
+#define MASTER_SDCC_2 3
+#define MASTER_BLSP_1 4
+#define MASTER_BLSP_2 5
+#define MASTER_UFS 6
+#define MASTER_USB_HS 7
+#define MASTER_USB3 8
+#define MASTER_CRYPTO_C0 9
+#define SLAVE_A2NOC_SNOC 10
+
+/* BIMC */
+#define MASTER_GNOC_BIMC 0
+#define MASTER_OXILI 1
+#define MASTER_MNOC_BIMC 2
+#define MASTER_SNOC_BIMC 3
+#define MASTER_PIMEM 4
+#define SLAVE_EBI 5
+#define SLAVE_HMSS_L3 6
+#define SLAVE_BIMC_SNOC 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_CNOC_A2NOC 2
+#define SLAVE_MPM 3
+#define SLAVE_PMIC_ARB 4
+#define SLAVE_TLMM_NORTH 5
+#define SLAVE_TCSR 6
+#define SLAVE_PIMEM_CFG 7
+#define SLAVE_IMEM_CFG 8
+#define SLAVE_MESSAGE_RAM 9
+#define SLAVE_GLM 10
+#define SLAVE_BIMC_CFG 11
+#define SLAVE_PRNG 12
+#define SLAVE_SPDM 13
+#define SLAVE_QDSS_CFG 14
+#define SLAVE_CNOC_MNOC_CFG 15
+#define SLAVE_SNOC_CFG 16
+#define SLAVE_QM_CFG 17
+#define SLAVE_CLK_CTL 18
+#define SLAVE_MSS_CFG 19
+#define SLAVE_TLMM_SOUTH 20
+#define SLAVE_UFS_CFG 21
+#define SLAVE_A2NOC_CFG 22
+#define SLAVE_A2NOC_SMMU_CFG 23
+#define SLAVE_GPUSS_CFG 24
+#define SLAVE_AHB2PHY 25
+#define SLAVE_BLSP_1 26
+#define SLAVE_SDCC_1 27
+#define SLAVE_SDCC_2 28
+#define SLAVE_TLMM_CENTER 29
+#define SLAVE_BLSP_2 30
+#define SLAVE_PDM 31
+#define SLAVE_CNOC_MNOC_MMSS_CFG 32
+#define SLAVE_USB_HS 33
+#define SLAVE_USB3_0 34
+#define SLAVE_SRVC_CNOC 35
+
+/* GNOC */
+#define MASTER_APSS_PROC 0
+#define SLAVE_GNOC_BIMC 1
+#define SLAVE_GNOC_SNOC 2
+
+/* MNOC */
+#define MASTER_CPP 0
+#define MASTER_JPEG 1
+#define MASTER_MDP_P0 2
+#define MASTER_MDP_P1 3
+#define MASTER_VENUS 4
+#define MASTER_VFE 5
+#define SLAVE_MNOC_BIMC 6
+#define MASTER_CNOC_MNOC_MMSS_CFG 7
+#define MASTER_CNOC_MNOC_CFG 8
+#define SLAVE_CAMERA_CFG 9
+#define SLAVE_CAMERA_THROTTLE_CFG 10
+#define SLAVE_MISC_CFG 11
+#define SLAVE_VENUS_THROTTLE_CFG 12
+#define SLAVE_VENUS_CFG 13
+#define SLAVE_MMSS_CLK_XPU_CFG 14
+#define SLAVE_MMSS_CLK_CFG 15
+#define SLAVE_MNOC_MPU_CFG 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_CSI_PHY_CFG 18
+#define SLAVE_DISPLAY_THROTTLE_CFG 19
+#define SLAVE_SMMU_CFG 20
+#define SLAVE_SRVC_MNOC 21
+
+/* SNOC */
+#define MASTER_QDSS_ETR 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_BIMC_SNOC 3
+#define MASTER_A2NOC_SNOC 4
+#define MASTER_GNOC_SNOC 5
+#define SLAVE_HMSS 6
+#define SLAVE_LPASS 7
+#define SLAVE_WLAN 8
+#define SLAVE_CDSP 9
+#define SLAVE_IPA 10
+#define SLAVE_SNOC_BIMC 11
+#define SLAVE_SNOC_CNOC 12
+#define SLAVE_IMEM 13
+#define SLAVE_PIMEM 14
+#define SLAVE_QDSS_STM 15
+#define SLAVE_SRVC_SNOC 16
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdm670-rpmh.h b/include/dt-bindings/interconnect/qcom,sdm670-rpmh.h
new file mode 100644
index 000000000000..9b516cc360bb
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdm670-rpmh.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Qualcomm SDM670 interconnect IDs
+ *
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM670_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM670_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_BLSP_1 1
+#define MASTER_TSIF 2
+#define MASTER_EMMC 3
+#define MASTER_SDCC_2 4
+#define MASTER_SDCC_4 5
+#define MASTER_UFS_MEM 6
+#define SLAVE_A1NOC_SNOC 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_BLSP_2 2
+#define MASTER_CNOC_A2NOC 3
+#define MASTER_CRYPTO_CORE_0 4
+#define MASTER_IPA 5
+#define MASTER_QDSS_ETR 6
+#define MASTER_USB3 7
+#define SLAVE_A2NOC_SNOC 8
+#define SLAVE_SERVICE_A2NOC 9
+
+
+#define MASTER_SPDM 0
+#define MASTER_SNOC_CNOC 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AOP 4
+#define SLAVE_AOSS 5
+#define SLAVE_CAMERA_CFG 6
+#define SLAVE_CLK_CTL 7
+#define SLAVE_CDSP_CFG 8
+#define SLAVE_RBCPR_CX_CFG 9
+#define SLAVE_CRYPTO_0_CFG 10
+#define SLAVE_DCC_CFG 11
+#define SLAVE_CNOC_DDRSS 12
+#define SLAVE_DISPLAY_CFG 13
+#define SLAVE_EMMC_CFG 14
+#define SLAVE_GLM 15
+#define SLAVE_GRAPHICS_3D_CFG 16
+#define SLAVE_IMEM_CFG 17
+#define SLAVE_IPA_CFG 18
+#define SLAVE_CNOC_MNOC_CFG 19
+#define SLAVE_PDM 20
+#define SLAVE_SOUTH_PHY_CFG 21
+#define SLAVE_PIMEM_CFG 22
+#define SLAVE_PRNG 23
+#define SLAVE_QDSS_CFG 24
+#define SLAVE_BLSP_2 25
+#define SLAVE_BLSP_1 26
+#define SLAVE_SDCC_2 27
+#define SLAVE_SDCC_4 28
+#define SLAVE_SNOC_CFG 29
+#define SLAVE_SPDM_WRAPPER 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM_NORTH 32
+#define SLAVE_TLMM_SOUTH 33
+#define SLAVE_TSIF 34
+#define SLAVE_UFS_MEM_CFG 35
+#define SLAVE_USB3 36
+#define SLAVE_VENUS_CFG 37
+#define SLAVE_VSENSE_CTRL_CFG 38
+#define SLAVE_CNOC_A2NOC 39
+#define SLAVE_SERVICE_CNOC 40
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_MEM_NOC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_GNOC_CFG 1
+#define SLAVE_GNOC_SNOC 2
+#define SLAVE_GNOC_MEM_NOC 3
+#define SLAVE_SERVICE_GNOC 4
+
+#define MASTER_TCU_0 0
+#define MASTER_MEM_NOC_CFG 1
+#define MASTER_GNOC_MEM_NOC 2
+#define MASTER_MNOC_HF_MEM_NOC 3
+#define MASTER_MNOC_SF_MEM_NOC 4
+#define MASTER_SNOC_GC_MEM_NOC 5
+#define MASTER_SNOC_SF_MEM_NOC 6
+#define MASTER_GRAPHICS_3D 7
+#define SLAVE_MSS_PROC_MS_MPU_CFG 8
+#define SLAVE_MEM_NOC_GNOC 9
+#define SLAVE_LLCC 10
+#define SLAVE_MEM_NOC_SNOC 11
+#define SLAVE_SERVICE_MEM_NOC 12
+#define MASTER_LLCC 13
+#define SLAVE_EBI_CH0 14
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP_PORT0 4
+#define MASTER_MDP_PORT1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_SNOC_CFG 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_GNOC_SNOC 3
+#define MASTER_MEM_NOC_SNOC 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_APPSS 7
+#define SLAVE_SNOC_CNOC 8
+#define SLAVE_SNOC_MEM_NOC_GC 9
+#define SLAVE_SNOC_MEM_NOC_SF 10
+#define SLAVE_OCIMEM 11
+#define SLAVE_PIMEM 12
+#define SLAVE_SERVICE_SNOC 13
+#define SLAVE_QDSS_STM 14
+#define SLAVE_TCU 15
+#define MASTER_CAMNOC_HF0_UNCOMP 16
+#define MASTER_CAMNOC_HF1_UNCOMP 17
+#define MASTER_CAMNOC_SF_UNCOMP 18
+#define SLAVE_CAMNOC_UNCOMP 19
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h
index 290be38f40e6..67b500e24915 100644
--- a/include/dt-bindings/interconnect/qcom,sdm845.h
+++ b/include/dt-bindings/interconnect/qcom,sdm845.h
@@ -19,6 +19,7 @@
#define SLAVE_A1NOC_SNOC 7
#define SLAVE_SERVICE_A1NOC 8
#define SLAVE_ANOC_PCIE_A1NOC_SNOC 9
+#define MASTER_QUP_1 10
#define MASTER_A2NOC_CFG 0
#define MASTER_QDSS_BAM 1
@@ -32,6 +33,7 @@
#define SLAVE_A2NOC_SNOC 9
#define SLAVE_ANOC_PCIE_SNOC 10
#define SLAVE_SERVICE_A2NOC 11
+#define MASTER_QUP_2 12
#define MASTER_SPDM 0
#define MASTER_TIC 1
diff --git a/include/dt-bindings/interconnect/qcom,sdx55.h b/include/dt-bindings/interconnect/qcom,sdx55.h
new file mode 100644
index 000000000000..1925f0784ab2
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx55.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SDX55 interconnect IDs
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX55_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX55_H
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_TCU_0 0
+#define MASTER_SNOC_GC_MEM_NOC 1
+#define MASTER_AMPSS_M0 2
+#define SLAVE_LLCC 3
+#define SLAVE_MEM_NOC_SNOC 4
+#define SLAVE_MEM_NOC_PCIE_SNOC 5
+
+#define MASTER_AUDIO 0
+#define MASTER_BLSP_1 1
+#define MASTER_QDSS_BAM 2
+#define MASTER_QPIC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_SPMI_FETCHER 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_IPA 7
+#define MASTER_MEM_NOC_SNOC 8
+#define MASTER_MEM_NOC_PCIE_SNOC 9
+#define MASTER_CRYPTO_CORE_0 10
+#define MASTER_EMAC 11
+#define MASTER_IPA_PCIE 12
+#define MASTER_PCIE 13
+#define MASTER_QDSS_ETR 14
+#define MASTER_SDCC_1 15
+#define MASTER_USB3 16
+#define SLAVE_AOP 17
+#define SLAVE_AOSS 18
+#define SLAVE_APPSS 19
+#define SLAVE_AUDIO 20
+#define SLAVE_BLSP_1 21
+#define SLAVE_CLK_CTL 22
+#define SLAVE_CRYPTO_0_CFG 23
+#define SLAVE_CNOC_DDRSS 24
+#define SLAVE_ECC_CFG 25
+#define SLAVE_EMAC_CFG 26
+#define SLAVE_IMEM_CFG 27
+#define SLAVE_IPA_CFG 28
+#define SLAVE_CNOC_MSS 29
+#define SLAVE_PCIE_PARF 30
+#define SLAVE_PDM 31
+#define SLAVE_PRNG 32
+#define SLAVE_QDSS_CFG 33
+#define SLAVE_QPIC 34
+#define SLAVE_SDCC_1 35
+#define SLAVE_SNOC_CFG 36
+#define SLAVE_SPMI_FETCHER 37
+#define SLAVE_SPMI_VGI_COEX 38
+#define SLAVE_TCSR 39
+#define SLAVE_TLMM 40
+#define SLAVE_USB3 41
+#define SLAVE_USB3_PHY_CFG 42
+#define SLAVE_ANOC_SNOC 43
+#define SLAVE_SNOC_MEM_NOC_GC 44
+#define SLAVE_OCIMEM 45
+#define SLAVE_SERVICE_SNOC 46
+#define SLAVE_PCIE_0 47
+#define SLAVE_QDSS_STM 48
+#define SLAVE_TCU 49
+
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx65.h b/include/dt-bindings/interconnect/qcom,sdx65.h
new file mode 100644
index 000000000000..b25288aa7d74
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx65.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_TCU_0 0
+#define MASTER_SNOC_GC_MEM_NOC 1
+#define MASTER_APPSS_PROC 2
+#define SLAVE_LLCC 3
+#define SLAVE_MEM_NOC_SNOC 4
+#define SLAVE_MEM_NOC_PCIE_SNOC 5
+
+#define MASTER_AUDIO 0
+#define MASTER_BLSP_1 1
+#define MASTER_QDSS_BAM 2
+#define MASTER_QPIC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_SPMI_FETCHER 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_IPA 7
+#define MASTER_MEM_NOC_SNOC 8
+#define MASTER_MEM_NOC_PCIE_SNOC 9
+#define MASTER_CRYPTO 10
+#define MASTER_IPA_PCIE 11
+#define MASTER_PCIE_0 12
+#define MASTER_QDSS_ETR 13
+#define MASTER_SDCC_1 14
+#define MASTER_USB3 15
+#define SLAVE_AOSS 16
+#define SLAVE_APPSS 17
+#define SLAVE_AUDIO 18
+#define SLAVE_BLSP_1 19
+#define SLAVE_CLK_CTL 20
+#define SLAVE_CRYPTO_0_CFG 21
+#define SLAVE_CNOC_DDRSS 22
+#define SLAVE_ECC_CFG 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_CNOC_MSS 26
+#define SLAVE_PCIE_PARF 27
+#define SLAVE_PDM 28
+#define SLAVE_PRNG 29
+#define SLAVE_QDSS_CFG 30
+#define SLAVE_QPIC 31
+#define SLAVE_SDCC_1 32
+#define SLAVE_SNOC_CFG 33
+#define SLAVE_SPMI_FETCHER 34
+#define SLAVE_SPMI_VGI_COEX 35
+#define SLAVE_TCSR 36
+#define SLAVE_TLMM 37
+#define SLAVE_USB3 38
+#define SLAVE_USB3_PHY_CFG 39
+#define SLAVE_ANOC_SNOC 40
+#define SLAVE_SNOC_MEM_NOC_GC 41
+#define SLAVE_IMEM 42
+#define SLAVE_SERVICE_SNOC 43
+#define SLAVE_PCIE_0 44
+#define SLAVE_QDSS_STM 45
+#define SLAVE_TCU 46
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx75.h b/include/dt-bindings/interconnect/qcom,sdx75.h
new file mode 100644
index 000000000000..e903f5f3dd8f
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx75.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
+
+#define MASTER_QPIC_CORE 0
+#define MASTER_QUP_CORE_0 1
+#define SLAVE_QPIC_CORE 2
+#define SLAVE_QUP_CORE_0 3
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LAGG_CFG 1
+#define SLAVE_MCCC_MASTER 2
+#define SLAVE_GEM_NOC_CFG 3
+#define SLAVE_SNOOP_BWMON 4
+
+#define MASTER_SYS_TCU 0
+#define MASTER_APPSS_PROC 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_MSS_PROC 3
+#define MASTER_ANOC_PCIE_GEM_NOC 4
+#define MASTER_SNOC_SF_MEM_NOC 5
+#define MASTER_GIC 6
+#define MASTER_IPA_PCIE 7
+#define SLAVE_GEM_NOC_CNOC 8
+#define SLAVE_LLCC 9
+#define SLAVE_MEM_NOC_PCIE_SNOC 10
+#define SLAVE_SERVICE_GEM_NOC 11
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+
+#define MASTER_AUDIO 0
+#define MASTER_GIC_AHB 1
+#define MASTER_PCIE_RSCC 2
+#define MASTER_QDSS_BAM 3
+#define MASTER_QPIC 4
+#define MASTER_QUP_0 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_GEM_NOC_CNOC 7
+#define MASTER_GEM_NOC_PCIE_SNOC 8
+#define MASTER_SNOC_CFG 9
+#define MASTER_PCIE_ANOC_CFG 10
+#define MASTER_CRYPTO 11
+#define MASTER_IPA 12
+#define MASTER_MVMSS 13
+#define MASTER_EMAC_0 14
+#define MASTER_EMAC_1 15
+#define MASTER_QDSS_ETR 16
+#define MASTER_QDSS_ETR_1 17
+#define MASTER_SDCC_1 18
+#define MASTER_SDCC_4 19
+#define MASTER_USB3_0 20
+#define SLAVE_ETH0_CFG 21
+#define SLAVE_ETH1_CFG 22
+#define SLAVE_AUDIO 23
+#define SLAVE_CLK_CTL 24
+#define SLAVE_CRYPTO_0_CFG 25
+#define SLAVE_IMEM_CFG 26
+#define SLAVE_IPA_CFG 27
+#define SLAVE_IPC_ROUTER_CFG 28
+#define SLAVE_CNOC_MSS 29
+#define SLAVE_ICBDI_MVMSS_CFG 30
+#define SLAVE_PCIE_0_CFG 31
+#define SLAVE_PCIE_1_CFG 32
+#define SLAVE_PCIE_2_CFG 33
+#define SLAVE_PCIE_RSC_CFG 34
+#define SLAVE_PDM 35
+#define SLAVE_PRNG 36
+#define SLAVE_QDSS_CFG 37
+#define SLAVE_QPIC 38
+#define SLAVE_QUP_0 39
+#define SLAVE_SDCC_1 40
+#define SLAVE_SDCC_4 41
+#define SLAVE_SPMI_VGI_COEX 42
+#define SLAVE_TCSR 43
+#define SLAVE_TLMM 44
+#define SLAVE_USB3 45
+#define SLAVE_USB3_PHY_CFG 46
+#define SLAVE_A1NOC_CFG 47
+#define SLAVE_DDRSS_CFG 48
+#define SLAVE_SNOC_GEM_NOC_SF 49
+#define SLAVE_SNOC_CFG 50
+#define SLAVE_PCIE_ANOC_CFG 51
+#define SLAVE_IMEM 52
+#define SLAVE_SERVICE_PCIE_ANOC 53
+#define SLAVE_SERVICE_SNOC 54
+#define SLAVE_PCIE_0 55
+#define SLAVE_PCIE_1 56
+#define SLAVE_PCIE_2 57
+#define SLAVE_QDSS_STM 58
+#define SLAVE_TCU 59
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm6115.h b/include/dt-bindings/interconnect/qcom,sm6115.h
new file mode 100644
index 000000000000..21090e585f05
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm6115.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H
+
+/* BIMC */
+#define MASTER_AMPSS_M0 0
+#define MASTER_SNOC_BIMC_RT 1
+#define MASTER_SNOC_BIMC_NRT 2
+#define SNOC_BIMC_MAS 3
+#define MASTER_GRAPHICS_3D 4
+#define MASTER_TCU_0 5
+#define SLAVE_EBI_CH0 6
+#define BIMC_SNOC_SLV 7
+
+/* CNOC */
+#define SNOC_CNOC_MAS 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_AHB2PHY_USB 2
+#define SLAVE_APSS_THROTTLE_CFG 3
+#define SLAVE_BIMC_CFG 4
+#define SLAVE_BOOT_ROM 5
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 6
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CLK_CTL 9
+#define SLAVE_RBCPR_CX_CFG 10
+#define SLAVE_RBCPR_MX_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_DCC_CFG 13
+#define SLAVE_DDR_PHY_CFG 14
+#define SLAVE_DDR_SS_CFG 15
+#define SLAVE_DISPLAY_CFG 16
+#define SLAVE_DISPLAY_THROTTLE_CFG 17
+#define SLAVE_GPU_CFG 18
+#define SLAVE_GPU_THROTTLE_CFG 19
+#define SLAVE_HWKM_CORE 20
+#define SLAVE_IMEM_CFG 21
+#define SLAVE_IPA_CFG 22
+#define SLAVE_LPASS 23
+#define SLAVE_MAPSS 24
+#define SLAVE_MDSP_MPU_CFG 25
+#define SLAVE_MESSAGE_RAM 26
+#define SLAVE_CNOC_MSS 27
+#define SLAVE_PDM 28
+#define SLAVE_PIMEM_CFG 29
+#define SLAVE_PKA_CORE 30
+#define SLAVE_PMIC_ARB 31
+#define SLAVE_QDSS_CFG 32
+#define SLAVE_QM_CFG 33
+#define SLAVE_QM_MPU_CFG 34
+#define SLAVE_QPIC 35
+#define SLAVE_QUP_0 36
+#define SLAVE_RPM 37
+#define SLAVE_SDCC_1 38
+#define SLAVE_SDCC_2 39
+#define SLAVE_SECURITY 40
+#define SLAVE_SNOC_CFG 41
+#define SLAVE_TCSR 42
+#define SLAVE_TLMM 43
+#define SLAVE_USB3 44
+#define SLAVE_VENUS_CFG 45
+#define SLAVE_VENUS_THROTTLE_CFG 46
+#define SLAVE_VSENSE_CTRL_CFG 47
+#define SLAVE_SERVICE_CNOC 48
+
+/* SNOC */
+#define MASTER_CRYPTO_CORE0 0
+#define MASTER_SNOC_CFG 1
+#define MASTER_TIC 2
+#define MASTER_ANOC_SNOC 3
+#define BIMC_SNOC_MAS 4
+#define MASTER_PIMEM 5
+#define MASTER_QDSS_BAM 6
+#define MASTER_QPIC 7
+#define MASTER_QUP_0 8
+#define MASTER_IPA 9
+#define MASTER_QDSS_ETR 10
+#define MASTER_SDCC_1 11
+#define MASTER_SDCC_2 12
+#define MASTER_USB3 13
+#define SLAVE_APPSS 14
+#define SNOC_CNOC_SLV 15
+#define SLAVE_OCIMEM 16
+#define SLAVE_PIMEM 17
+#define SNOC_BIMC_SLV 18
+#define SLAVE_SERVICE_SNOC 19
+#define SLAVE_QDSS_STM 20
+#define SLAVE_TCU 21
+#define SLAVE_ANOC_SNOC 22
+
+/* CLK Virtual */
+#define MASTER_QUP_CORE_0 0
+#define SLAVE_QUP_CORE_0 1
+
+/* MMRT Virtual */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP_PORT0 1
+#define SLAVE_SNOC_BIMC_RT 2
+
+/* MMNRT Virtual */
+#define MASTER_CAMNOC_SF 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define SLAVE_SNOC_BIMC_NRT 3
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm6350.h b/include/dt-bindings/interconnect/qcom,sm6350.h
new file mode 100644
index 000000000000..e662cede9aaa
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm6350.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Qualcomm SM6350 interconnect IDs
+ *
+ * Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QUP_0 1
+#define MASTER_EMMC 2
+#define MASTER_UFS_MEM 3
+#define A1NOC_SNOC_SLV 4
+#define SLAVE_SERVICE_A1NOC 5
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QUP_1 2
+#define MASTER_CRYPTO_CORE_0 3
+#define MASTER_IPA 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_SDCC_2 6
+#define MASTER_USB3 7
+#define A2NOC_SNOC_SLV 8
+#define SLAVE_SERVICE_A2NOC 9
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_ICP_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define MASTER_QUP_CORE_0 3
+#define MASTER_QUP_CORE_1 4
+#define MASTER_LLCC 5
+#define SLAVE_CAMNOC_UNCOMP 6
+#define SLAVE_QUP_CORE_0 7
+#define SLAVE_QUP_CORE_1 8
+#define SLAVE_EBI_CH0 9
+
+#define MASTER_NPU 0
+#define MASTER_NPU_PROC 1
+#define SLAVE_CDSP_GEM_NOC 2
+
+#define SNOC_CNOC_MAS 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AHB2PHY 4
+#define SLAVE_AHB2PHY_2 5
+#define SLAVE_AOSS 6
+#define SLAVE_BOOT_ROM 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 9
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 10
+#define SLAVE_CLK_CTL 11
+#define SLAVE_RBCPR_CX_CFG 12
+#define SLAVE_RBCPR_MX_CFG 13
+#define SLAVE_CRYPTO_0_CFG 14
+#define SLAVE_DCC_CFG 15
+#define SLAVE_CNOC_DDRSS 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_DISPLAY_THROTTLE_CFG 18
+#define SLAVE_EMMC_CFG 19
+#define SLAVE_GLM 20
+#define SLAVE_GRAPHICS_3D_CFG 21
+#define SLAVE_IMEM_CFG 22
+#define SLAVE_IPA_CFG 23
+#define SLAVE_CNOC_MNOC_CFG 24
+#define SLAVE_CNOC_MSS 25
+#define SLAVE_NPU_CFG 26
+#define SLAVE_PDM 27
+#define SLAVE_PIMEM_CFG 28
+#define SLAVE_PRNG 29
+#define SLAVE_QDSS_CFG 30
+#define SLAVE_QM_CFG 31
+#define SLAVE_QM_MPU_CFG 32
+#define SLAVE_QUP_0 33
+#define SLAVE_QUP_1 34
+#define SLAVE_SDCC_2 35
+#define SLAVE_SECURITY 36
+#define SLAVE_SNOC_CFG 37
+#define SLAVE_TCSR 38
+#define SLAVE_UFS_MEM_CFG 39
+#define SLAVE_USB3 40
+#define SLAVE_VENUS_CFG 41
+#define SLAVE_VENUS_THROTTLE_CFG 42
+#define SLAVE_VSENSE_CTRL_CFG 43
+#define SLAVE_SERVICE_CNOC 44
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_SYS_TCU 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_MNOC_HF_MEM_NOC 4
+#define MASTER_MNOC_SF_MEM_NOC 5
+#define MASTER_SNOC_GC_MEM_NOC 6
+#define MASTER_SNOC_SF_MEM_NOC 7
+#define MASTER_GRAPHICS_3D 8
+#define SLAVE_MCDMA_MS_MPU_CFG 9
+#define SLAVE_MSS_PROC_MS_MPU_CFG 10
+#define SLAVE_GEM_NOC_SNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_SERVICE_GEM_NOC 13
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define MASTER_CAMNOC_HF 3
+#define MASTER_CAMNOC_ICP 4
+#define MASTER_CAMNOC_SF 5
+#define MASTER_MDP_PORT0 6
+#define SLAVE_MNOC_HF_MEM_NOC 7
+#define SLAVE_MNOC_SF_MEM_NOC 8
+#define SLAVE_SERVICE_MNOC 9
+
+#define MASTER_NPU_SYS 0
+#define MASTER_NPU_NOC_CFG 1
+#define SLAVE_NPU_CAL_DP0 2
+#define SLAVE_NPU_CP 3
+#define SLAVE_NPU_INT_DMA_BWMON_CFG 4
+#define SLAVE_NPU_DPM 5
+#define SLAVE_ISENSE_CFG 6
+#define SLAVE_NPU_LLM_CFG 7
+#define SLAVE_NPU_TCM 8
+#define SLAVE_NPU_COMPUTE_NOC 9
+#define SLAVE_SERVICE_NPU_NOC 10
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_QDSS_STM 13
+#define SLAVE_TCU 14
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm7150-rpmh.h b/include/dt-bindings/interconnect/qcom,sm7150-rpmh.h
new file mode 100644
index 000000000000..1f610eb832aa
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm7150-rpmh.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Qualcomm SM7150 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM7150_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM7150_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QUP_0 1
+#define MASTER_TSIF 2
+#define MASTER_EMMC 3
+#define MASTER_SDCC_2 4
+#define MASTER_SDCC_4 5
+#define MASTER_UFS_MEM 6
+#define A1NOC_SNOC_SLV 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QUP_1 2
+#define MASTER_CNOC_A2NOC 3
+#define MASTER_CRYPTO_CORE_0 4
+#define MASTER_IPA 5
+#define MASTER_PCIE 6
+#define MASTER_QDSS_ETR 7
+#define MASTER_USB3 8
+#define A2NOC_SNOC_SLV 9
+#define SLAVE_ANOC_PCIE_GEM_NOC 10
+#define SLAVE_SERVICE_A2NOC 11
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_RT_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define MASTER_CAMNOC_NRT_UNCOMP 3
+#define SLAVE_CAMNOC_UNCOMP 4
+
+#define MASTER_NPU 0
+#define SLAVE_CDSP_GEM_NOC 1
+
+#define MASTER_SPDM 0
+#define SNOC_CNOC_MAS 1
+#define MASTER_QDSS_DAP 2
+#define SLAVE_A1NOC_CFG 3
+#define SLAVE_A2NOC_CFG 4
+#define SLAVE_AHB2PHY_NORTH 5
+#define SLAVE_AHB2PHY_SOUTH 6
+#define SLAVE_AHB2PHY_WEST 7
+#define SLAVE_AOP 8
+#define SLAVE_AOSS 9
+#define SLAVE_CAMERA_CFG 10
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 11
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 12
+#define SLAVE_CLK_CTL 13
+#define SLAVE_CDSP_CFG 14
+#define SLAVE_RBCPR_CX_CFG 15
+#define SLAVE_RBCPR_MX_CFG 16
+#define SLAVE_CRYPTO_0_CFG 17
+#define SLAVE_CNOC_DDRSS 18
+#define SLAVE_DISPLAY_CFG 19
+#define SLAVE_DISPLAY_THROTTLE_CFG 20
+#define SLAVE_EMMC_CFG 21
+#define SLAVE_GLM 22
+#define SLAVE_GRAPHICS_3D_CFG 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_CNOC_MNOC_CFG 26
+#define SLAVE_PCIE_CFG 27
+#define SLAVE_PDM 28
+#define SLAVE_PIMEM_CFG 29
+#define SLAVE_PRNG 30
+#define SLAVE_QDSS_CFG 31
+#define SLAVE_QUP_0 32
+#define SLAVE_QUP_1 33
+#define SLAVE_SDCC_2 34
+#define SLAVE_SDCC_4 35
+#define SLAVE_SNOC_CFG 36
+#define SLAVE_SPDM_WRAPPER 37
+#define SLAVE_TCSR 38
+#define SLAVE_TLMM_NORTH 39
+#define SLAVE_TLMM_SOUTH 40
+#define SLAVE_TLMM_WEST 41
+#define SLAVE_TSIF 42
+#define SLAVE_UFS_MEM_CFG 43
+#define SLAVE_USB3 44
+#define SLAVE_VENUS_CFG 45
+#define SLAVE_VENUS_CVP_THROTTLE_CFG 46
+#define SLAVE_VENUS_THROTTLE_CFG 47
+#define SLAVE_VSENSE_CTRL_CFG 48
+#define SLAVE_CNOC_A2NOC 49
+#define SLAVE_SERVICE_CNOC 50
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_SYS_TCU 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_MNOC_HF_MEM_NOC 4
+#define MASTER_MNOC_SF_MEM_NOC 5
+#define MASTER_GEM_NOC_PCIE_SNOC 6
+#define MASTER_SNOC_GC_MEM_NOC 7
+#define MASTER_SNOC_SF_MEM_NOC 8
+#define MASTER_GRAPHICS_3D 9
+#define SLAVE_MSS_PROC_MS_MPU_CFG 10
+#define SLAVE_GEM_NOC_SNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_SERVICE_GEM_NOC 13
+
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_NRT 2
+#define MASTER_CAMNOC_RT 3
+#define MASTER_CAMNOC_SF 4
+#define MASTER_MDP_PORT0 5
+#define MASTER_MDP_PORT1 6
+#define MASTER_ROTATOR 7
+#define MASTER_VIDEO_P0 8
+#define MASTER_VIDEO_P1 9
+#define MASTER_VIDEO_PROC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_MNOC_HF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_QDSS_STM 13
+#define SLAVE_TCU 14
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8150.h b/include/dt-bindings/interconnect/qcom,sm8150.h
new file mode 100644
index 000000000000..ef292791f52e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8150.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SM8150 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8150_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8150_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QUP_0 1
+#define MASTER_EMAC 2
+#define MASTER_UFS_MEM 3
+#define MASTER_USB3 4
+#define MASTER_USB3_1 5
+#define A1NOC_SNOC_SLV 6
+#define SLAVE_SERVICE_A1NOC 7
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QSPI 2
+#define MASTER_QUP_1 3
+#define MASTER_QUP_2 4
+#define MASTER_SENSORS_AHB 5
+#define MASTER_TSIF 6
+#define MASTER_CNOC_A2NOC 7
+#define MASTER_CRYPTO_CORE_0 8
+#define MASTER_IPA 9
+#define MASTER_PCIE 10
+#define MASTER_PCIE_1 11
+#define MASTER_QDSS_ETR 12
+#define MASTER_SDCC_2 13
+#define MASTER_SDCC_4 14
+#define A2NOC_SNOC_SLV 15
+#define SLAVE_ANOC_PCIE_GEM_NOC 16
+#define SLAVE_SERVICE_A2NOC 17
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_HF1_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define SLAVE_CAMNOC_UNCOMP 3
+
+#define MASTER_NPU 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_SPDM 0
+#define SNOC_CNOC_MAS 1
+#define MASTER_QDSS_DAP 2
+#define SLAVE_A1NOC_CFG 3
+#define SLAVE_A2NOC_CFG 4
+#define SLAVE_AHB2PHY_SOUTH 5
+#define SLAVE_AOP 6
+#define SLAVE_AOSS 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CLK_CTL 9
+#define SLAVE_CDSP_CFG 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_RBCPR_MMCX_CFG 12
+#define SLAVE_RBCPR_MX_CFG 13
+#define SLAVE_CRYPTO_0_CFG 14
+#define SLAVE_CNOC_DDRSS 15
+#define SLAVE_DISPLAY_CFG 16
+#define SLAVE_EMAC_CFG 17
+#define SLAVE_GLM 18
+#define SLAVE_GRAPHICS_3D_CFG 19
+#define SLAVE_IMEM_CFG 20
+#define SLAVE_IPA_CFG 21
+#define SLAVE_CNOC_MNOC_CFG 22
+#define SLAVE_NPU_CFG 23
+#define SLAVE_PCIE_0_CFG 24
+#define SLAVE_PCIE_1_CFG 25
+#define SLAVE_NORTH_PHY_CFG 26
+#define SLAVE_PIMEM_CFG 27
+#define SLAVE_PRNG 28
+#define SLAVE_QDSS_CFG 29
+#define SLAVE_QSPI 30
+#define SLAVE_QUP_2 31
+#define SLAVE_QUP_1 32
+#define SLAVE_QUP_0 33
+#define SLAVE_SDCC_2 34
+#define SLAVE_SDCC_4 35
+#define SLAVE_SNOC_CFG 36
+#define SLAVE_SPDM_WRAPPER 37
+#define SLAVE_SPSS_CFG 38
+#define SLAVE_SSC_CFG 39
+#define SLAVE_TCSR 40
+#define SLAVE_TLMM_EAST 41
+#define SLAVE_TLMM_NORTH 42
+#define SLAVE_TLMM_SOUTH 43
+#define SLAVE_TLMM_WEST 44
+#define SLAVE_TSIF 45
+#define SLAVE_UFS_CARD_CFG 46
+#define SLAVE_UFS_MEM_CFG 47
+#define SLAVE_USB3 48
+#define SLAVE_USB3_1 49
+#define SLAVE_VENUS_CFG 50
+#define SLAVE_VSENSE_CTRL_CFG 51
+#define SLAVE_CNOC_A2NOC 52
+#define SLAVE_SERVICE_CNOC 53
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_GPU_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_GEM_NOC_CFG 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_GRAPHICS_3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_GEM_NOC_PCIE_SNOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_ECC 11
+#define SLAVE_MSS_PROC_MS_MPU_CFG 12
+#define SLAVE_ECC 13
+#define SLAVE_GEM_NOC_SNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_SERVICE_GEM_NOC 16
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP_PORT0 4
+#define MASTER_MDP_PORT1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_PCIE_0 13
+#define SLAVE_PCIE_1 14
+#define SLAVE_QDSS_STM 15
+#define SLAVE_TCU 16
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8250.h b/include/dt-bindings/interconnect/qcom,sm8250.h
new file mode 100644
index 000000000000..2a656c02df4b
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8250.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SM8250 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8250_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8250_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QSPI_0 1
+#define MASTER_QUP_1 2
+#define MASTER_QUP_2 3
+#define MASTER_TSIF 4
+#define MASTER_PCIE_2 5
+#define MASTER_SDCC_4 6
+#define MASTER_UFS_MEM 7
+#define MASTER_USB3 8
+#define MASTER_USB3_1 9
+#define A1NOC_SNOC_SLV 10
+#define SLAVE_ANOC_PCIE_GEM_NOC_1 11
+#define SLAVE_SERVICE_A1NOC 12
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QUP_0 2
+#define MASTER_CNOC_A2NOC 3
+#define MASTER_CRYPTO_CORE_0 4
+#define MASTER_IPA 5
+#define MASTER_PCIE 6
+#define MASTER_PCIE_1 7
+#define MASTER_QDSS_ETR 8
+#define MASTER_SDCC_2 9
+#define MASTER_UFS_CARD 10
+#define A2NOC_SNOC_SLV 11
+#define SLAVE_ANOC_PCIE_GEM_NOC 12
+#define SLAVE_SERVICE_A2NOC 13
+
+#define MASTER_NPU 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define SNOC_CNOC_MAS 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AHB2PHY_SOUTH 4
+#define SLAVE_AHB2PHY_NORTH 5
+#define SLAVE_AOSS 6
+#define SLAVE_CAMERA_CFG 7
+#define SLAVE_CLK_CTL 8
+#define SLAVE_CDSP_CFG 9
+#define SLAVE_RBCPR_CX_CFG 10
+#define SLAVE_RBCPR_MMCX_CFG 11
+#define SLAVE_RBCPR_MX_CFG 12
+#define SLAVE_CRYPTO_0_CFG 13
+#define SLAVE_CX_RDPM 14
+#define SLAVE_DCC_CFG 15
+#define SLAVE_CNOC_DDRSS 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_GRAPHICS_3D_CFG 18
+#define SLAVE_IMEM_CFG 19
+#define SLAVE_IPA_CFG 20
+#define SLAVE_IPC_ROUTER_CFG 21
+#define SLAVE_LPASS 22
+#define SLAVE_CNOC_MNOC_CFG 23
+#define SLAVE_NPU_CFG 24
+#define SLAVE_PCIE_0_CFG 25
+#define SLAVE_PCIE_1_CFG 26
+#define SLAVE_PCIE_2_CFG 27
+#define SLAVE_PDM 28
+#define SLAVE_PIMEM_CFG 29
+#define SLAVE_PRNG 30
+#define SLAVE_QDSS_CFG 31
+#define SLAVE_QSPI_0 32
+#define SLAVE_QUP_0 33
+#define SLAVE_QUP_1 34
+#define SLAVE_QUP_2 35
+#define SLAVE_SDCC_2 36
+#define SLAVE_SDCC_4 37
+#define SLAVE_SNOC_CFG 38
+#define SLAVE_TCSR 39
+#define SLAVE_TLMM_NORTH 40
+#define SLAVE_TLMM_SOUTH 41
+#define SLAVE_TLMM_WEST 42
+#define SLAVE_TSIF 43
+#define SLAVE_UFS_CARD_CFG 44
+#define SLAVE_UFS_MEM_CFG 45
+#define SLAVE_USB3 46
+#define SLAVE_USB3_1 47
+#define SLAVE_VENUS_CFG 48
+#define SLAVE_VSENSE_CTRL_CFG 49
+#define SLAVE_CNOC_A2NOC 50
+#define SLAVE_SERVICE_CNOC 51
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_AMPSS_M0 2
+#define MASTER_GEM_NOC_CFG 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_GRAPHICS_3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_GEM_NOC_SNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+#define SLAVE_SERVICE_GEM_NOC_1 14
+#define SLAVE_SERVICE_GEM_NOC_2 15
+#define SLAVE_SERVICE_GEM_NOC 16
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF 1
+#define MASTER_CAMNOC_ICP 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_VIDEO_P0 4
+#define MASTER_VIDEO_P1 5
+#define MASTER_VIDEO_PROC 6
+#define MASTER_MDP_PORT0 7
+#define MASTER_MDP_PORT1 8
+#define MASTER_ROTATOR 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_NPU_SYS 0
+#define MASTER_NPU_CDP 1
+#define MASTER_NPU_NOC_CFG 2
+#define SLAVE_NPU_CAL_DP0 3
+#define SLAVE_NPU_CAL_DP1 4
+#define SLAVE_NPU_CP 5
+#define SLAVE_NPU_INT_DMA_BWMON_CFG 6
+#define SLAVE_NPU_DPM 7
+#define SLAVE_ISENSE_CFG 8
+#define SLAVE_NPU_LLM_CFG 9
+#define SLAVE_NPU_TCM 10
+#define SLAVE_NPU_COMPUTE_NOC 11
+#define SLAVE_SERVICE_NPU_NOC 12
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_GEM_NOC_PCIE_SNOC 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_APPSS 7
+#define SNOC_CNOC_SLV 8
+#define SLAVE_SNOC_GEM_NOC_GC 9
+#define SLAVE_SNOC_GEM_NOC_SF 10
+#define SLAVE_OCIMEM 11
+#define SLAVE_PIMEM 12
+#define SLAVE_SERVICE_SNOC 13
+#define SLAVE_PCIE_0 14
+#define SLAVE_PCIE_1 15
+#define SLAVE_PCIE_2 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_TCU 18
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8350.h b/include/dt-bindings/interconnect/qcom,sm8350.h
new file mode 100644
index 000000000000..c7f7ed315aeb
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8350.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SM8350 interconnect IDs
+ *
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8350_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8350_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_A1NOC_CFG 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define MASTER_USB3_1 6
+#define SLAVE_A1NOC_SNOC 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_2 2
+#define MASTER_A2NOC_CFG 3
+#define MASTER_CRYPTO 4
+#define MASTER_IPA 5
+#define MASTER_PCIE_0 6
+#define MASTER_PCIE_1 7
+#define MASTER_QDSS_ETR 8
+#define MASTER_SDCC_2 9
+#define MASTER_UFS_CARD 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_ANOC_PCIE_GEM_NOC 12
+#define SLAVE_SERVICE_A2NOC 13
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define MASTER_QDSS_DAP 2
+#define SLAVE_AHB2PHY_SOUTH 3
+#define SLAVE_AHB2PHY_NORTH 4
+#define SLAVE_AOSS 5
+#define SLAVE_APPSS 6
+#define SLAVE_CAMERA_CFG 7
+#define SLAVE_CLK_CTL 8
+#define SLAVE_CDSP_CFG 9
+#define SLAVE_RBCPR_CX_CFG 10
+#define SLAVE_RBCPR_MMCX_CFG 11
+#define SLAVE_RBCPR_MX_CFG 12
+#define SLAVE_CRYPTO_0_CFG 13
+#define SLAVE_CX_RDPM 14
+#define SLAVE_DCC_CFG 15
+#define SLAVE_DISPLAY_CFG 16
+#define SLAVE_GFX3D_CFG 17
+#define SLAVE_HWKM 18
+#define SLAVE_IMEM_CFG 19
+#define SLAVE_IPA_CFG 20
+#define SLAVE_IPC_ROUTER_CFG 21
+#define SLAVE_LPASS 22
+#define SLAVE_CNOC_MSS 23
+#define SLAVE_MX_RDPM 24
+#define SLAVE_PCIE_0_CFG 25
+#define SLAVE_PCIE_1_CFG 26
+#define SLAVE_PDM 27
+#define SLAVE_PIMEM_CFG 28
+#define SLAVE_PKA_WRAPPER_CFG 29
+#define SLAVE_PMU_WRAPPER_CFG 30
+#define SLAVE_QDSS_CFG 31
+#define SLAVE_QSPI_0 32
+#define SLAVE_QUP_0 33
+#define SLAVE_QUP_1 34
+#define SLAVE_QUP_2 35
+#define SLAVE_SDCC_2 36
+#define SLAVE_SDCC_4 37
+#define SLAVE_SECURITY 38
+#define SLAVE_SPSS_CFG 39
+#define SLAVE_TCSR 40
+#define SLAVE_TLMM 41
+#define SLAVE_UFS_CARD_CFG 42
+#define SLAVE_UFS_MEM_CFG 43
+#define SLAVE_USB3_0 44
+#define SLAVE_USB3_1 45
+#define SLAVE_VENUS_CFG 46
+#define SLAVE_VSENSE_CTRL_CFG 47
+#define SLAVE_A1NOC_CFG 48
+#define SLAVE_A2NOC_CFG 49
+#define SLAVE_DDRSS_CFG 50
+#define SLAVE_CNOC_MNOC_CFG 51
+#define SLAVE_SNOC_CFG 52
+#define SLAVE_BOOT_IMEM 53
+#define SLAVE_IMEM 54
+#define SLAVE_PIMEM 55
+#define SLAVE_SERVICE_CNOC 56
+#define SLAVE_PCIE_0 57
+#define SLAVE_PCIE_1 58
+#define SLAVE_QDSS_STM 59
+#define SLAVE_TCU 60
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_GEM_NOC_CFG 4
+#define MASTER_GFX3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_MSS_PROC_MS_MPU_CFG 11
+#define SLAVE_MCDMA_MS_MPU_CFG 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_MEM_NOC_PCIE_SNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+#define MASTER_MNOC_HF_MEM_NOC_DISP 19
+#define MASTER_MNOC_SF_MEM_NOC_DISP 20
+#define SLAVE_LLCC_DISP 21
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define SLAVE_LPASS_CORE_CFG 1
+#define SLAVE_LPASS_LPI_CFG 2
+#define SLAVE_LPASS_MPU_CFG 3
+#define SLAVE_LPASS_TOP_CFG 4
+#define SLAVE_SERVICES_LPASS_AML_NOC 5
+#define SLAVE_SERVICE_LPASS_AG_NOC 6
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_CNOC_MNOC_CFG 3
+#define MASTER_VIDEO_P0 4
+#define MASTER_VIDEO_P1 5
+#define MASTER_VIDEO_PROC 6
+#define MASTER_MDP0 7
+#define MASTER_MDP1 8
+#define MASTER_ROTATOR 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+#define MASTER_MDP0_DISP 13
+#define MASTER_MDP1_DISP 14
+#define MASTER_ROTATOR_DISP 15
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
+#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_PIMEM 3
+#define MASTER_GIC 4
+#define SLAVE_SNOC_GEM_NOC_GC 5
+#define SLAVE_SNOC_GEM_NOC_SF 6
+#define SLAVE_SERVICE_SNOC 7
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8450.h b/include/dt-bindings/interconnect/qcom,sm8450.h
new file mode 100644
index 000000000000..8f3c5e1fb4c4
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8450.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8450_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8450_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_A1NOC_CFG 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+#define SLAVE_SERVICE_A1NOC 7
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_2 2
+#define MASTER_A2NOC_CFG 3
+#define MASTER_CRYPTO 4
+#define MASTER_IPA 5
+#define MASTER_SENSORS_PROC 6
+#define MASTER_SP 7
+#define MASTER_QDSS_ETR 8
+#define MASTER_QDSS_ETR_1 9
+#define MASTER_SDCC_2 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_SERVICE_A2NOC 12
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_SOUTH 2
+#define SLAVE_AHB2PHY_NORTH 3
+#define SLAVE_AOSS 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CDSP_CFG 7
+#define SLAVE_RBCPR_CX_CFG 8
+#define SLAVE_RBCPR_MMCX_CFG 9
+#define SLAVE_RBCPR_MXA_CFG 10
+#define SLAVE_RBCPR_MXC_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_CX_RDPM 13
+#define SLAVE_DISPLAY_CFG 14
+#define SLAVE_GFX3D_CFG 15
+#define SLAVE_IMEM_CFG 16
+#define SLAVE_IPA_CFG 17
+#define SLAVE_IPC_ROUTER_CFG 18
+#define SLAVE_LPASS 19
+#define SLAVE_CNOC_MSS 20
+#define SLAVE_MX_RDPM 21
+#define SLAVE_PCIE_0_CFG 22
+#define SLAVE_PCIE_1_CFG 23
+#define SLAVE_PDM 24
+#define SLAVE_PIMEM_CFG 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_0 29
+#define SLAVE_QUP_1 30
+#define SLAVE_QUP_2 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_TME_CFG 37
+#define SLAVE_UFS_MEM_CFG 38
+#define SLAVE_USB3_0 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_A1NOC_CFG 42
+#define SLAVE_A2NOC_CFG 43
+#define SLAVE_DDRSS_CFG 44
+#define SLAVE_CNOC_MNOC_CFG 45
+#define SLAVE_PCIE_ANOC_CFG 46
+#define SLAVE_SNOC_CFG 47
+#define SLAVE_IMEM 48
+#define SLAVE_PIMEM 49
+#define SLAVE_SERVICE_CNOC 50
+#define SLAVE_PCIE_0 51
+#define SLAVE_PCIE_1 52
+#define SLAVE_QDSS_STM 53
+#define SLAVE_TCU 54
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_MSS_PROC 4
+#define MASTER_MNOC_HF_MEM_NOC 5
+#define MASTER_MNOC_SF_MEM_NOC 6
+#define MASTER_COMPUTE_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_GEM_NOC_CNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+#define MASTER_MNOC_HF_MEM_NOC_DISP 14
+#define MASTER_MNOC_SF_MEM_NOC_DISP 15
+#define MASTER_ANOC_PCIE_GEM_NOC_DISP 16
+#define SLAVE_LLCC_DISP 17
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CNOC_MNOC_CFG 4
+#define MASTER_ROTATOR 5
+#define MASTER_CDSP_HCP 6
+#define MASTER_VIDEO 7
+#define MASTER_VIDEO_CV_PROC 8
+#define MASTER_VIDEO_PROC 9
+#define MASTER_VIDEO_V_PROC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+#define MASTER_MDP_DISP 14
+#define MASTER_ROTATOR_DISP 15
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
+#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8550-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8550-rpmh.h
new file mode 100644
index 000000000000..b38d0da7886f
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8550-rpmh.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8550_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8550_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_SDCC_4 2
+#define MASTER_UFS_MEM 3
+#define MASTER_USB3_0 4
+#define SLAVE_A1NOC_SNOC 5
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_IPA 3
+#define MASTER_SP 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_QDSS_ETR_1 6
+#define MASTER_SDCC_2 7
+#define SLAVE_A2NOC_SNOC 8
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_APPSS 3
+#define SLAVE_CAMERA_CFG 4
+#define SLAVE_CLK_CTL 5
+#define SLAVE_RBCPR_CX_CFG 6
+#define SLAVE_RBCPR_MMCX_CFG 7
+#define SLAVE_RBCPR_MXA_CFG 8
+#define SLAVE_RBCPR_MXC_CFG 9
+#define SLAVE_CPR_NSPCX 10
+#define SLAVE_CRYPTO_0_CFG 11
+#define SLAVE_CX_RDPM 12
+#define SLAVE_DISPLAY_CFG 13
+#define SLAVE_GFX3D_CFG 14
+#define SLAVE_I2C 15
+#define SLAVE_IMEM_CFG 16
+#define SLAVE_IPA_CFG 17
+#define SLAVE_IPC_ROUTER_CFG 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_RDPM 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_PIMEM_CFG 24
+#define SLAVE_PRNG 25
+#define SLAVE_QDSS_CFG 26
+#define SLAVE_QSPI_0 27
+#define SLAVE_QUP_1 28
+#define SLAVE_QUP_2 29
+#define SLAVE_SDCC_2 30
+#define SLAVE_SDCC_4 31
+#define SLAVE_SPSS_CFG 32
+#define SLAVE_TCSR 33
+#define SLAVE_TLMM 34
+#define SLAVE_UFS_MEM_CFG 35
+#define SLAVE_USB3_0 36
+#define SLAVE_VENUS_CFG 37
+#define SLAVE_VSENSE_CTRL_CFG 38
+#define SLAVE_LPASS_QTB_CFG 39
+#define SLAVE_CNOC_MNOC_CFG 40
+#define SLAVE_NSP_QTB_CFG 41
+#define SLAVE_PCIE_ANOC_CFG 42
+#define SLAVE_QDSS_STM 43
+#define SLAVE_TCU 44
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_TME_CFG 3
+#define SLAVE_CNOC_CFG 4
+#define SLAVE_DDRSS_CFG 5
+#define SLAVE_BOOT_IMEM 6
+#define SLAVE_IMEM 7
+#define SLAVE_PCIE_0 8
+#define SLAVE_PCIE_1 9
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_GC_MEM_NOC 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define SLAVE_GEM_NOC_CNOC 12
+#define SLAVE_LLCC 13
+#define SLAVE_MEM_NOC_PCIE_SNOC 14
+#define MASTER_MNOC_HF_MEM_NOC_DISP 15
+#define MASTER_ANOC_PCIE_GEM_NOC_DISP 16
+#define SLAVE_LLCC_DISP 17
+#define MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0 18
+#define MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0 19
+#define MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0 20
+#define SLAVE_LLCC_CAM_IFE_0 21
+#define MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1 22
+#define MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1 23
+#define MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1 24
+#define SLAVE_LLCC_CAM_IFE_1 25
+#define MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2 26
+#define MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2 27
+#define MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2 28
+#define SLAVE_LLCC_CAM_IFE_2 29
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+#define MASTER_LLCC_CAM_IFE_0 4
+#define SLAVE_EBI1_CAM_IFE_0 5
+#define MASTER_LLCC_CAM_IFE_1 6
+#define SLAVE_EBI1_CAM_IFE_1 7
+#define MASTER_LLCC_CAM_IFE_2 8
+#define SLAVE_EBI1_CAM_IFE_2 9
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CDSP_HCP 4
+#define MASTER_VIDEO 5
+#define MASTER_VIDEO_CV_PROC 6
+#define MASTER_VIDEO_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+#define MASTER_MDP_DISP 13
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 14
+#define MASTER_CAMNOC_HF_CAM_IFE_0 15
+#define MASTER_CAMNOC_ICP_CAM_IFE_0 16
+#define MASTER_CAMNOC_SF_CAM_IFE_0 17
+#define SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0 18
+#define SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 19
+#define MASTER_CAMNOC_HF_CAM_IFE_1 20
+#define MASTER_CAMNOC_ICP_CAM_IFE_1 21
+#define MASTER_CAMNOC_SF_CAM_IFE_1 22
+#define SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1 23
+#define SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 24
+#define MASTER_CAMNOC_HF_CAM_IFE_2 25
+#define MASTER_CAMNOC_ICP_CAM_IFE_2 26
+#define MASTER_CAMNOC_SF_CAM_IFE_2 27
+#define SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2 28
+#define SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 29
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_GIC 3
+#define SLAVE_SNOC_GEM_NOC_GC 4
+#define SLAVE_SNOC_GEM_NOC_SF 5
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
new file mode 100644
index 000000000000..6c1eaf04e241
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_3 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_IPA 3
+#define MASTER_SP 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_QDSS_ETR_1 6
+#define MASTER_SDCC_2 7
+#define SLAVE_A2NOC_SNOC 8
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_RBCPR_CX_CFG 5
+#define SLAVE_CPR_HMX 6
+#define SLAVE_RBCPR_MMCX_CFG 7
+#define SLAVE_RBCPR_MXA_CFG 8
+#define SLAVE_RBCPR_MXC_CFG 9
+#define SLAVE_CPR_NSPCX 10
+#define SLAVE_CRYPTO_0_CFG 11
+#define SLAVE_CX_RDPM 12
+#define SLAVE_DISPLAY_CFG 13
+#define SLAVE_GFX3D_CFG 14
+#define SLAVE_I2C 15
+#define SLAVE_I3C_IBI0_CFG 16
+#define SLAVE_I3C_IBI1_CFG 17
+#define SLAVE_IMEM_CFG 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_2_RDPM 20
+#define SLAVE_MX_RDPM 21
+#define SLAVE_PCIE_0_CFG 22
+#define SLAVE_PCIE_1_CFG 23
+#define SLAVE_PCIE_RSCC 24
+#define SLAVE_PDM 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_3 29
+#define SLAVE_QUP_1 30
+#define SLAVE_QUP_2 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB3_0 38
+#define SLAVE_VENUS_CFG 39
+#define SLAVE_VSENSE_CTRL_CFG 40
+#define SLAVE_CNOC_MNOC_CFG 41
+#define SLAVE_NSP_QTB_CFG 42
+#define SLAVE_PCIE_ANOC_CFG 43
+#define SLAVE_SERVICE_CNOC_CFG 44
+#define SLAVE_QDSS_STM 45
+#define SLAVE_TCU 46
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_CFG 4
+#define SLAVE_TME_CFG 5
+#define SLAVE_APPSS 6
+#define SLAVE_CNOC_CFG 7
+#define SLAVE_DDRSS_CFG 8
+#define SLAVE_IMEM 9
+#define SLAVE_SERVICE_CNOC 10
+#define SLAVE_PCIE_0 11
+#define SLAVE_PCIE_1 12
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_UBWC_P_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_GFX3D 4
+#define MASTER_LPASS_GEM_NOC 5
+#define MASTER_MSS_PROC 6
+#define MASTER_MNOC_HF_MEM_NOC 7
+#define MASTER_MNOC_SF_MEM_NOC 8
+#define MASTER_COMPUTE_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define MASTER_UBWC_P 12
+#define MASTER_GIC 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_MEM_NOC_PCIE_SNOC 16
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CDSP_HCP 4
+#define MASTER_VIDEO 5
+#define MASTER_VIDEO_CV_PROC 6
+#define MASTER_VIDEO_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define SLAVE_SNOC_GEM_NOC_SF 2
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h
new file mode 100644
index 000000000000..7d9710881149
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_SDCC_4 2
+#define MASTER_UFS_MEM 3
+#define SLAVE_A1NOC_SNOC 4
+
+#define MASTER_QUP_0 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_SP 3
+#define MASTER_QDSS_ETR 4
+#define MASTER_QDSS_ETR_1 5
+#define MASTER_SDCC_2 6
+#define SLAVE_A2NOC_SNOC 7
+
+#define MASTER_DDR_PERF_MODE 0
+#define MASTER_QUP_CORE_0 1
+#define MASTER_QUP_CORE_1 2
+#define MASTER_QUP_CORE_2 3
+#define SLAVE_DDR_PERF_MODE 4
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_AHB2PHY_2 3
+#define SLAVE_AV1_ENC_CFG 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_DISPLAY_CFG 8
+#define SLAVE_GFX3D_CFG 9
+#define SLAVE_IMEM_CFG 10
+#define SLAVE_IPC_ROUTER_CFG 11
+#define SLAVE_PCIE_0_CFG 12
+#define SLAVE_PCIE_1_CFG 13
+#define SLAVE_PCIE_2_CFG 14
+#define SLAVE_PCIE_3_CFG 15
+#define SLAVE_PCIE_4_CFG 16
+#define SLAVE_PCIE_5_CFG 17
+#define SLAVE_PCIE_6A_CFG 18
+#define SLAVE_PCIE_6B_CFG 19
+#define SLAVE_PCIE_RSC_CFG 20
+#define SLAVE_PDM 21
+#define SLAVE_PRNG 22
+#define SLAVE_QDSS_CFG 23
+#define SLAVE_QSPI_0 24
+#define SLAVE_QUP_0 25
+#define SLAVE_QUP_1 26
+#define SLAVE_QUP_2 27
+#define SLAVE_SDCC_2 28
+#define SLAVE_SDCC_4 29
+#define SLAVE_SMMUV3_CFG 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM 32
+#define SLAVE_UFS_MEM_CFG 33
+#define SLAVE_USB2 34
+#define SLAVE_USB3_0 35
+#define SLAVE_USB3_1 36
+#define SLAVE_USB3_2 37
+#define SLAVE_USB3_MP 38
+#define SLAVE_USB4_0 39
+#define SLAVE_USB4_1 40
+#define SLAVE_USB4_2 41
+#define SLAVE_VENUS_CFG 42
+#define SLAVE_LPASS_QTB_CFG 43
+#define SLAVE_CNOC_MNOC_CFG 44
+#define SLAVE_NSP_QTB_CFG 45
+#define SLAVE_QDSS_STM 46
+#define SLAVE_TCU 47
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_TME_CFG 3
+#define SLAVE_APPSS 4
+#define SLAVE_CNOC_CFG 5
+#define SLAVE_BOOT_IMEM 6
+#define SLAVE_IMEM 7
+#define SLAVE_PCIE_0 8
+#define SLAVE_PCIE_1 9
+#define SLAVE_PCIE_2 10
+#define SLAVE_PCIE_3 11
+#define SLAVE_PCIE_4 12
+#define SLAVE_PCIE_5 13
+#define SLAVE_PCIE_6A 14
+#define SLAVE_PCIE_6B 15
+
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_GFX3D 4
+#define MASTER_LPASS_GEM_NOC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_GIC2 11
+#define SLAVE_GEM_NOC_CNOC 12
+#define SLAVE_LLCC 13
+#define SLAVE_MEM_NOC_PCIE_SNOC 14
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_AV1_ENC 0
+#define MASTER_CAMNOC_HF 1
+#define MASTER_CAMNOC_ICP 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_EVA 4
+#define MASTER_MDP 5
+#define MASTER_VIDEO 6
+#define MASTER_VIDEO_CV_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_NORTH 0
+#define MASTER_PCIE_SOUTH 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+
+#define MASTER_PCIE_3 0
+#define MASTER_PCIE_4 1
+#define MASTER_PCIE_5 2
+#define SLAVE_PCIE_NORTH 3
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+#define MASTER_PCIE_6A 3
+#define MASTER_PCIE_6B 4
+#define SLAVE_PCIE_SOUTH 5
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_GIC1 2
+#define MASTER_USB_NOC_SNOC 3
+#define SLAVE_SNOC_GEM_NOC_SF 4
+
+#define MASTER_AGGRE_USB_NORTH 0
+#define MASTER_AGGRE_USB_SOUTH 1
+#define SLAVE_USB_NOC_SNOC 2
+
+#define MASTER_USB2 0
+#define MASTER_USB3_MP 1
+#define SLAVE_AGGRE_USB_NORTH 2
+
+#define MASTER_USB3_0 0
+#define MASTER_USB3_1 1
+#define MASTER_USB3_2 2
+#define MASTER_USB4_0 3
+#define MASTER_USB4_1 4
+#define MASTER_USB4_2 5
+#define SLAVE_AGGRE_USB_SOUTH 6
+
+#endif
diff --git a/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h b/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h
new file mode 100644
index 000000000000..bd415cb7b669
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H
+#define _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H
+
+/* IRQID[11:0] - GPIOAO[11:0] */
+#define IRQID_GPIOAO_0 0
+#define IRQID_GPIOAO_1 1
+#define IRQID_GPIOAO_2 2
+#define IRQID_GPIOAO_3 3
+#define IRQID_GPIOAO_4 4
+#define IRQID_GPIOAO_5 5
+#define IRQID_GPIOAO_6 6
+#define IRQID_GPIOAO_7 7
+#define IRQID_GPIOAO_8 8
+#define IRQID_GPIOAO_9 9
+#define IRQID_GPIOAO_10 10
+#define IRQID_GPIOAO_11 11
+
+/* IRQID[27:12] - GPIOZ[15:0] */
+#define IRQID_GPIOZ_0 12
+#define IRQID_GPIOZ_1 13
+#define IRQID_GPIOZ_2 14
+#define IRQID_GPIOZ_3 15
+#define IRQID_GPIOZ_4 16
+#define IRQID_GPIOZ_5 17
+#define IRQID_GPIOZ_6 18
+#define IRQID_GPIOZ_7 19
+#define IRQID_GPIOZ_8 20
+#define IRQID_GPIOZ_9 21
+#define IRQID_GPIOZ_10 22
+#define IRQID_GPIOZ_11 23
+#define IRQID_GPIOZ_12 24
+#define IRQID_GPIOZ_13 25
+#define IRQID_GPIOZ_14 26
+#define IRQID_GPIOZ_15 27
+
+/* IRQID[36:28] - GPIOH[8:0] */
+#define IRQID_GPIOH_0 28
+#define IRQID_GPIOH_1 29
+#define IRQID_GPIOH_2 30
+#define IRQID_GPIOH_3 31
+#define IRQID_GPIOH_4 32
+#define IRQID_GPIOH_5 33
+#define IRQID_GPIOH_6 34
+#define IRQID_GPIOH_7 35
+#define IRQID_GPIOH_8 36
+
+/* IRQID[52:37] - BOOT[15:0] */
+#define IRQID_BOOT_0 37
+#define IRQID_BOOT_1 38
+#define IRQID_BOOT_2 39
+#define IRQID_BOOT_3 40
+#define IRQID_BOOT_4 41
+#define IRQID_BOOT_5 42
+#define IRQID_BOOT_6 43
+#define IRQID_BOOT_7 44
+#define IRQID_BOOT_8 45
+#define IRQID_BOOT_9 46
+#define IRQID_BOOT_10 47
+#define IRQID_BOOT_11 48
+#define IRQID_BOOT_12 49
+#define IRQID_BOOT_13 50
+#define IRQID_BOOT_14 51
+#define IRQID_BOOT_15 52
+
+/* IRQID[60:53] - GPIOC[7:0] */
+#define IRQID_GPIOC_0 53
+#define IRQID_GPIOC_1 54
+#define IRQID_GPIOC_2 55
+#define IRQID_GPIOC_3 56
+#define IRQID_GPIOC_4 57
+#define IRQID_GPIOC_5 58
+#define IRQID_GPIOC_6 59
+#define IRQID_GPIOC_7 60
+
+/* IRQID[76:61] - GPIOA[15:0] */
+#define IRQID_GPIOA_0 61
+#define IRQID_GPIOA_1 62
+#define IRQID_GPIOA_2 63
+#define IRQID_GPIOA_3 64
+#define IRQID_GPIOA_4 65
+#define IRQID_GPIOA_5 66
+#define IRQID_GPIOA_6 67
+#define IRQID_GPIOA_7 68
+#define IRQID_GPIOA_8 69
+#define IRQID_GPIOA_9 70
+#define IRQID_GPIOA_10 71
+#define IRQID_GPIOA_11 72
+#define IRQID_GPIOA_12 73
+#define IRQID_GPIOA_13 74
+#define IRQID_GPIOA_14 75
+#define IRQID_GPIOA_15 76
+
+/* IRQID[96:77] - GPIOX[19:0] */
+#define IRQID_GPIOX_0 77
+#define IRQID_GPIOX_1 78
+#define IRQID_GPIOX_2 79
+#define IRQID_GPIOX_3 80
+#define IRQID_GPIOX_4 81
+#define IRQID_GPIOX_5 82
+#define IRQID_GPIOX_6 83
+#define IRQID_GPIOX_7 84
+#define IRQID_GPIOX_8 85
+#define IRQID_GPIOX_9 86
+#define IRQID_GPIOX_10 87
+#define IRQID_GPIOX_11 88
+#define IRQID_GPIOX_12 89
+#define IRQID_GPIOX_13 90
+#define IRQID_GPIOX_14 91
+#define IRQID_GPIOX_15 92
+#define IRQID_GPIOX_16 93
+#define IRQID_GPIOX_17 94
+#define IRQID_GPIOX_18 95
+#define IRQID_GPIOX_19 96
+
+/* IRQID[99:97] - GPIOE[2:0] */
+#define IRQID_GPIOE_0 97
+#define IRQID_GPIOE_1 98
+#define IRQID_GPIOE_2 99
+
+#endif /* _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H */
diff --git a/include/dt-bindings/interrupt-controller/apple-aic.h b/include/dt-bindings/interrupt-controller/apple-aic.h
new file mode 100644
index 000000000000..bf3aac0e5491
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/apple-aic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_APPLE_AIC_H
+#define _DT_BINDINGS_INTERRUPT_CONTROLLER_APPLE_AIC_H
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#define AIC_IRQ 0
+#define AIC_FIQ 1
+
+#define AIC_TMR_HV_PHYS 0
+#define AIC_TMR_HV_VIRT 1
+#define AIC_TMR_GUEST_PHYS 2
+#define AIC_TMR_GUEST_VIRT 3
+#define AIC_CPU_PMU_E 4
+#define AIC_CPU_PMU_P 5
+
+#endif
diff --git a/include/dt-bindings/interrupt-controller/irqc-rzg2l.h b/include/dt-bindings/interrupt-controller/irqc-rzg2l.h
new file mode 100644
index 000000000000..34ce778885a1
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/irqc-rzg2l.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G2L family IRQC bindings.
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_IRQC_RZG2L_H
+#define __DT_BINDINGS_IRQC_RZG2L_H
+
+/* NMI maps to SPI0 */
+#define RZG2L_NMI 0
+
+/* IRQ0-7 map to SPI1-8 */
+#define RZG2L_IRQ0 1
+#define RZG2L_IRQ1 2
+#define RZG2L_IRQ2 3
+#define RZG2L_IRQ3 4
+#define RZG2L_IRQ4 5
+#define RZG2L_IRQ5 6
+#define RZG2L_IRQ6 7
+#define RZG2L_IRQ7 8
+
+#endif /* __DT_BINDINGS_IRQC_RZG2L_H */
diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h
index 52b619d44ba2..ecea167930d9 100644
--- a/include/dt-bindings/leds/common.h
+++ b/include/dt-bindings/leds/common.h
@@ -33,7 +33,12 @@
#define LED_COLOR_ID_MULTI 8 /* For multicolor LEDs */
#define LED_COLOR_ID_RGB 9 /* For multicolor LEDs that can do arbitrary color,
so this would include RGBW and similar */
-#define LED_COLOR_ID_MAX 10
+#define LED_COLOR_ID_PURPLE 10
+#define LED_COLOR_ID_ORANGE 11
+#define LED_COLOR_ID_PINK 12
+#define LED_COLOR_ID_CYAN 13
+#define LED_COLOR_ID_LIME 14
+#define LED_COLOR_ID_MAX 15
/* Standard LED functions */
/* Keyboard LEDs, usually it would be input4::capslock etc. */
@@ -60,6 +65,13 @@
#define LED_FUNCTION_MICMUTE "micmute"
#define LED_FUNCTION_MUTE "mute"
+/* Used for player LEDs as found on game controllers from e.g. Nintendo, Sony. */
+#define LED_FUNCTION_PLAYER1 "player-1"
+#define LED_FUNCTION_PLAYER2 "player-2"
+#define LED_FUNCTION_PLAYER3 "player-3"
+#define LED_FUNCTION_PLAYER4 "player-4"
+#define LED_FUNCTION_PLAYER5 "player-5"
+
/* Miscelleaus functions. Use functions above if you can. */
#define LED_FUNCTION_ACTIVITY "activity"
#define LED_FUNCTION_ALARM "alarm"
@@ -88,7 +100,11 @@
#define LED_FUNCTION_TX "tx"
#define LED_FUNCTION_USB "usb"
#define LED_FUNCTION_WAN "wan"
+#define LED_FUNCTION_WAN_ONLINE "wan-online"
#define LED_FUNCTION_WLAN "wlan"
+#define LED_FUNCTION_WLAN_2GHZ "wlan-2ghz"
+#define LED_FUNCTION_WLAN_5GHZ "wlan-5ghz"
+#define LED_FUNCTION_WLAN_6GHZ "wlan-6ghz"
#define LED_FUNCTION_WPS "wps"
#endif /* __DT_BINDINGS_LEDS_H */
diff --git a/include/dt-bindings/leds/leds-lp55xx.h b/include/dt-bindings/leds/leds-lp55xx.h
new file mode 100644
index 000000000000..a4fb4567715d
--- /dev/null
+++ b/include/dt-bindings/leds/leds-lp55xx.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef _DT_BINDINGS_LEDS_LP55XX_H
+#define _DT_BINDINGS_LEDS_LP55XX_H
+
+#define LP55XX_CP_OFF 0
+#define LP55XX_CP_BYPASS 1
+#define LP55XX_CP_BOOST 2
+#define LP55XX_CP_AUTO 3
+
+#endif /* _DT_BINDINGS_LEDS_LP55XX_H */
diff --git a/include/dt-bindings/leds/rt4831-backlight.h b/include/dt-bindings/leds/rt4831-backlight.h
new file mode 100644
index 000000000000..125c6351bba0
--- /dev/null
+++ b/include/dt-bindings/leds/rt4831-backlight.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for rt4831 backlight bindings.
+ *
+ * Copyright (C) 2020, Richtek Technology Corp.
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#ifndef _DT_BINDINGS_RT4831_BACKLIGHT_H
+#define _DT_BINDINGS_RT4831_BACKLIGHT_H
+
+#define RT4831_BLOVPLVL_17V 0
+#define RT4831_BLOVPLVL_21V 1
+#define RT4831_BLOVPLVL_25V 2
+#define RT4831_BLOVPLVL_29V 3
+
+#define RT4831_BLED_CH1EN (1 << 0)
+#define RT4831_BLED_CH2EN (1 << 1)
+#define RT4831_BLED_CH3EN (1 << 2)
+#define RT4831_BLED_CH4EN (1 << 3)
+#define RT4831_BLED_ALLCHEN ((1 << 4) - 1)
+
+#endif /* _DT_BINDINGS_RT4831_BACKLIGHT_H */
diff --git a/include/dt-bindings/mailbox/mediatek,mt8188-gce.h b/include/dt-bindings/mailbox/mediatek,mt8188-gce.h
new file mode 100644
index 000000000000..119865787b47
--- /dev/null
+++ b/include/dt-bindings/mailbox/mediatek,mt8188-gce.h
@@ -0,0 +1,967 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ *
+ */
+#ifndef _DT_BINDINGS_GCE_MT8188_H
+#define _DT_BINDINGS_GCE_MT8188_H
+
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+#define SUBSYS_1400XXXX 0
+#define SUBSYS_1401XXXX 1
+#define SUBSYS_1402XXXX 2
+#define SUBSYS_1c00XXXX 3
+#define SUBSYS_1c01XXXX 4
+#define SUBSYS_1c02XXXX 5
+#define SUBSYS_1c10XXXX 6
+#define SUBSYS_1c11XXXX 7
+#define SUBSYS_1c12XXXX 8
+#define SUBSYS_14f0XXXX 9
+#define SUBSYS_14f1XXXX 10
+#define SUBSYS_14f2XXXX 11
+#define SUBSYS_1800XXXX 12
+#define SUBSYS_1801XXXX 13
+#define SUBSYS_1802XXXX 14
+#define SUBSYS_1803XXXX 15
+#define SUBSYS_1032XXXX 16
+#define SUBSYS_1033XXXX 17
+#define SUBSYS_1600XXXX 18
+#define SUBSYS_1601XXXX 19
+#define SUBSYS_14e0XXXX 20
+#define SUBSYS_1c20XXXX 21
+#define SUBSYS_1c30XXXX 22
+#define SUBSYS_1c40XXXX 23
+#define SUBSYS_1c50XXXX 24
+#define SUBSYS_1c60XXXX 25
+#define SUBSYS_NO_SUPPORT 99
+
+#define CMDQ_EVENT_IMG_SOF 0
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_0 1
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_1 2
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_2 3
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_3 4
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_4 5
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_5 6
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_6 7
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_7 8
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_8 9
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_9 10
+#define CMDQ_EVENT_IMG_TRAW0_DMA_ERROR_INT 11
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_0 12
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_1 13
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_2 14
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_3 15
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_4 16
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_5 17
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_6 18
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_7 19
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_8 20
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_9 21
+#define CMDQ_EVENT_IMG_TRAW1_DMA_ERROR_INT 22
+#define CMDQ_EVENT_IMG_ADL_RESERVED 23
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_0 24
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_1 25
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_2 26
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_3 27
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_4 28
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_5 29
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_6 30
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_7 31
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_8 32
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_9 33
+#define CMDQ_EVENT_IMG_DIP_DMA_ERR 34
+#define CMDQ_EVENT_IMG_DIP_NR_DMA_ERR 35
+#define CMDQ_EVENT_DIP_DUMMY_0 36
+#define CMDQ_EVENT_DIP_DUMMY_1 37
+#define CMDQ_EVENT_DIP_DUMMY_2 38
+#define CMDQ_EVENT_IMG_WPE_EIS_GCE_FRAME_DONE 39
+#define CMDQ_EVENT_IMG_WPE_EIS_DONE_SYNC_OUT 40
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_0 41
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_1 42
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_2 43
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_3 44
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_4 45
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_5 46
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_6 47
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_7 48
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_8 49
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_9 50
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_0 51
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_1 52
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_2 53
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_3 54
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_4 55
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_5 56
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_6 57
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_7 58
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_8 59
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_9 60
+#define CMDQ_EVENT_IMG_PQDIP_A_DMA_ERR 61
+#define CMDQ_EVENT_WPE0_DUMMY_0 62
+#define CMDQ_EVENT_WPE0_DUMMY_1 63
+#define CMDQ_EVENT_WPE0_DUMMY_2 64
+#define CMDQ_EVENT_IMG_WPE_TNR_GCE_FRAME_DONE 65
+#define CMDQ_EVENT_IMG_WPE_TNR_DONE_SYNC_OUT 66
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_0 67
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_1 68
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_2 69
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_3 70
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_4 71
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_5 72
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_6 73
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_7 74
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_8 75
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_9 76
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_0 77
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_1 78
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_2 79
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_3 80
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_4 81
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_5 82
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_6 83
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_7 84
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_8 85
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_9 86
+#define CMDQ_EVENT_IMG_PQDIP_B_DMA_ERR 87
+#define CMDQ_EVENT_WPE1_DUMMY_0 88
+#define CMDQ_EVENT_WPE1_DUMMY_1 89
+#define CMDQ_EVENT_WPE1_DUMMY_2 90
+#define CMDQ_EVENT_IMG_WPE_LITE_GCE_FRAME_DONE 91
+#define CMDQ_EVENT_IMG_WPE_LITE_DONE_SYNC_OUT 92
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_0 93
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_1 94
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_2 95
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_3 96
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_4 97
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_5 98
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_6 99
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_7 100
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_8 101
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_9 102
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_0 103
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_1 104
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_2 105
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_3 106
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_4 107
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_5 108
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_6 109
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_7 110
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_8 111
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_9 112
+#define CMDQ_EVENT_IMG_XTRAW_DMA_ERR_EVENT 113
+#define CMDQ_EVENT_WPE2_DUMMY_0 114
+#define CMDQ_EVENT_WPE2_DUMMY_1 115
+#define CMDQ_EVENT_WPE2_DUMMY_2 116
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_DUMMY 117
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_FDVT_DONE 118
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_ME_DONE 119
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_DVS_DONE 120
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_DVP_DONE 121
+#define CMDQ_EVENT_FDVT1_RESERVED 122
+#define CMDQ_EVENT_IMG_ENG_EVENT 123
+#define CMDQ_EVENT_CAMSUBA_SW_PASS1_DONE 129
+#define CMDQ_EVENT_CAMSUBB_SW_PASS1_DONE 130
+#define CMDQ_EVENT_CAMSUBC_SW_PASS1_DONE 131
+#define CMDQ_EVENT_GCAMSV_A_1_SW_PASS1_DONE 132
+#define CMDQ_EVENT_GCAMSV_A_2_SW_PASS1_DONE 133
+#define CMDQ_EVENT_GCAMSV_B_1_SW_PASS1_DONE 134
+#define CMDQ_EVENT_GCAMSV_B_2_SW_PASS1_DONE 135
+#define CMDQ_EVENT_GCAMSV_C_1_SW_PASS1_DONE 136
+#define CMDQ_EVENT_GCAMSV_C_2_SW_PASS1_DONE 137
+#define CMDQ_EVENT_GCAMSV_D_1_SW_PASS1_DONE 138
+#define CMDQ_EVENT_GCAMSV_D_2_SW_PASS1_DONE 139
+#define CMDQ_EVENT_GCAMSV_E_1_SW_PASS1_DONE 140
+#define CMDQ_EVENT_GCAMSV_E_2_SW_PASS1_DONE 141
+#define CMDQ_EVENT_GCAMSV_F_1_SW_PASS1_DONE 142
+#define CMDQ_EVENT_GCAMSV_F_2_SW_PASS1_DONE 143
+#define CMDQ_EVENT_GCAMSV_G_1_SW_PASS1_DONE 144
+#define CMDQ_EVENT_GCAMSV_G_2_SW_PASS1_DONE 145
+#define CMDQ_EVENT_GCAMSV_H_1_SW_PASS1_DONE 146
+#define CMDQ_EVENT_GCAMSV_H_2_SW_PASS1_DONE 147
+#define CMDQ_EVENT_GCAMSV_I_1_SW_PASS1_DONE 148
+#define CMDQ_EVENT_GCAMSV_I_2_SW_PASS1_DONE 149
+#define CMDQ_EVENT_GCAMSV_J_1_SW_PASS1_DONE 150
+#define CMDQ_EVENT_GCAMSV_J_2_SW_PASS1_DONE 151
+#define CMDQ_EVENT_MRAW_0_SW_PASS1_DONE 152
+#define CMDQ_EVENT_MRAW_1_SW_PASS1_DONE 153
+#define CMDQ_EVENT_MRAW_2_SW_PASS1_DONE 154
+#define CMDQ_EVENT_MRAW_3_SW_PASS1_DONE 155
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 156
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 157
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 158
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 159
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 160
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 161
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 162
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 163
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 164
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 165
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 166
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 167
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 168
+#define CMDQ_EVENT_SENINF_CAM13_FIFO_FULL 169
+#define CMDQ_EVENT_SENINF_CAM14_FIFO_FULL 170
+#define CMDQ_EVENT_SENINF_CAM15_FIFO_FULL 171
+#define CMDQ_EVENT_SENINF_CAM16_FIFO_FULL 172
+#define CMDQ_EVENT_SENINF_CAM17_FIFO_FULL 173
+#define CMDQ_EVENT_SENINF_CAM18_FIFO_FULL 174
+#define CMDQ_EVENT_SENINF_CAM19_FIFO_FULL 175
+#define CMDQ_EVENT_SENINF_CAM20_FIFO_FULL 176
+#define CMDQ_EVENT_SENINF_CAM21_FIFO_FULL 177
+#define CMDQ_EVENT_SENINF_CAM22_FIFO_FULL 178
+#define CMDQ_EVENT_SENINF_CAM23_FIFO_FULL 179
+#define CMDQ_EVENT_SENINF_CAM24_FIFO_FULL 180
+#define CMDQ_EVENT_SENINF_CAM25_FIFO_FULL 181
+#define CMDQ_EVENT_SENINF_CAM26_FIFO_FULL 182
+#define CMDQ_EVENT_TG_OVRUN_MRAW0_INT 183
+#define CMDQ_EVENT_TG_OVRUN_MRAW1_INT 184
+#define CMDQ_EVENT_TG_OVRUN_MRAW2_INT 185
+#define CMDQ_EVENT_TG_OVRUN_MRAW3_INT 186
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW0_INT 187
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW1_INT 188
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW2_INT 189
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW3_INT 190
+#define CMDQ_EVENT_PDA0_IRQO_EVENT_DONE_D1 191
+#define CMDQ_EVENT_PDA1_IRQO_EVENT_DONE_D1 192
+#define CMDQ_EVENT_CAM_SUBA_TG_INT1 193
+#define CMDQ_EVENT_CAM_SUBA_TG_INT2 194
+#define CMDQ_EVENT_CAM_SUBA_TG_INT3 195
+#define CMDQ_EVENT_CAM_SUBA_TG_INT4 196
+#define CMDQ_EVENT_CAM_SUBB_TG_INT1 197
+#define CMDQ_EVENT_CAM_SUBB_TG_INT2 198
+#define CMDQ_EVENT_CAM_SUBB_TG_INT3 199
+#define CMDQ_EVENT_CAM_SUBB_TG_INT4 200
+#define CMDQ_EVENT_CAM_SUBC_TG_INT1 201
+#define CMDQ_EVENT_CAM_SUBC_TG_INT2 202
+#define CMDQ_EVENT_CAM_SUBC_TG_INT3 203
+#define CMDQ_EVENT_CAM_SUBC_TG_INT4 204
+#define CMDQ_EVENT_CAM_SUBA_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 205
+#define CMDQ_EVENT_CAM_SUBA_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 206
+#define CMDQ_EVENT_CAM_SUBA_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 207
+#define CMDQ_EVENT_CAM_SUBA_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 208
+#define CMDQ_EVENT_CAM_SUBB_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 209
+#define CMDQ_EVENT_CAM_SUBB_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 210
+#define CMDQ_EVENT_CAM_SUBB_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 211
+#define CMDQ_EVENT_CAM_SUBB_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 212
+#define CMDQ_EVENT_CAM_SUBC_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 213
+#define CMDQ_EVENT_CAM_SUBC_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 214
+#define CMDQ_EVENT_CAM_SUBC_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 215
+#define CMDQ_EVENT_CAM_SUBC_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 216
+#define CMDQ_EVENT_RAW_SEL_SOF_SUBA 217
+#define CMDQ_EVENT_RAW_SEL_SOF_SUBB 218
+#define CMDQ_EVENT_RAW_SEL_SOF_SUBC 219
+#define CMDQ_EVENT_CAM_SUBA_RING_BUFFER_OVERFLOW_INT_IN 220
+#define CMDQ_EVENT_CAM_SUBB_RING_BUFFER_OVERFLOW_INT_IN 221
+#define CMDQ_EVENT_CAM_SUBC_RING_BUFFER_OVERFLOW_INT_IN 222
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SOF 256
+#define CMDQ_EVENT_VPP0_MDP_FG_SOF 257
+#define CMDQ_EVENT_VPP0_STITCH_SOF 258
+#define CMDQ_EVENT_VPP0_MDP_HDR_SOF 259
+#define CMDQ_EVENT_VPP0_MDP_AAL_SOF 260
+#define CMDQ_EVENT_VPP0_MDP_RSZ_IN_RSZ_SOF 261
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_SOF 262
+#define CMDQ_EVENT_VPP0_DISP_COLOR_SOF 263
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_SOF 264
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_SOF 265
+#define CMDQ_EVENT_VPP0_MDP_TCC_IN_SOF 266
+#define CMDQ_EVENT_VPP0_MDP_WROT_SOF 267
+#define CMDQ_EVENT_VPP0_WARP0_MMSYS_TOP_RELAY_SOF_PRE 269
+#define CMDQ_EVENT_VPP0_WARP1_MMSYS_TOP_RELAY_SOF_PRE 270
+#define CMDQ_EVENT_VPP0_VPP1_MMSYS_TOP_RELAY_SOF 271
+#define CMDQ_EVENT_VPP0_VPP1_IN_MMSYS_TOP_RELAY_SOF_PRE 272
+#define CMDQ_EVENT_VPP0_DISP_RDMA_SOF 273
+#define CMDQ_EVENT_VPP0_DISP_WDMA_SOF 274
+#define CMDQ_EVENT_VPP0_MDP_HMS_SOF 275
+#define CMDQ_EVENT_VPP0_MDP_RDMA_FRAME_DONE 288
+#define CMDQ_EVENT_VPP0_MDP_FG_TILE_DONE 289
+#define CMDQ_EVENT_VPP0_STITCH_FRAME_DONE 290
+#define CMDQ_EVENT_VPP0_MDP_HDR_FRAME_DONE 291
+#define CMDQ_EVENT_VPP0_MDP_AAL_FRAME_DONE 292
+#define CMDQ_EVENT_VPP0_MDP_RSZ_FRAME_DONE 293
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_FRAME_DONE 294
+#define CMDQ_EVENT_VPP0_DISP_COLOR_FRAME_DONE 295
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_DONE 296
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_FRAME_DONE 297
+#define CMDQ_EVENT_VPP0_MDP_TCC_TCC_FRAME_DONE 298
+#define CMDQ_EVENT_VPP0_MDP_WROT_VIDO_WDONE 299
+#define CMDQ_EVENT_VPP0_DISP_RDMA_FRAME_DONE 305
+#define CMDQ_EVENT_VPP0_DISP_WDMA_FRAME_DONE 306
+#define CMDQ_EVENT_VPP0_MDP_HMS_FRAME_DONE 307
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_0 320
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_1 321
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_2 322
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_3 323
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_4 324
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_5 325
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_6 326
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_7 327
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_8 328
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_9 329
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_10 330
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_11 331
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_12 332
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_13 333
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_14 334
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_15 335
+#define CMDQ_EVENT_VPP0_DISP_RDMA_0_UNDERRUN 336
+#define CMDQ_EVENT_VPP0_DISP_RDMA_1_UNDERRUN 337
+#define CMDQ_EVENT_VPP0_U_MERGE4_UNDERRUN 338
+#define CMDQ_EVENT_VPP0_U_VPP_SPLIT_VIDEO_0_OVERFLOW 339
+#define CMDQ_EVENT_VPP0_U_VPP_SPLIT_VIDEO_1_OVERFLOW 340
+#define CMDQ_EVENT_VPP0_DSI_0_UNDERRUN 341
+#define CMDQ_EVENT_VPP0_DSI_1_UNDERRUN 342
+#define CMDQ_EVENT_VPP0_DP_INTF_0 343
+#define CMDQ_EVENT_VPP0_DP_INTF_1 344
+#define CMDQ_EVENT_VPP0_DPI_0 345
+#define CMDQ_EVENT_VPP0_DPI_1 346
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SW_RST_DONE 352
+#define CMDQ_EVENT_VPP0_MDP_RDMA_PM_VALID_EVENT 353
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_RESET_DONE_PULSE 354
+#define CMDQ_EVENT_VPP0_MDP_WROT_SW_RST_DONE 355
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_0 356
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_1 357
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_2 358
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_3 359
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_4 360
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_5 361
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_6 362
+#define CMDQ_EVENT_VPP0_DISP_RDMA_DISP_RDMA_VALID_EVENT 363
+#define CMDQ_EVENT_VPP0_DISP_RDMA_DISP_RDMA_TARGET_LINE_EVENT 364
+#define CMDQ_EVENT_VPP0_DISP_WDMA_SW_RST_DONE 365
+#define CMDQ_EVENT_VPP0_DISP_WDMA_WDMA_VALID_EVENT 366
+#define CMDQ_EVENT_VPP0_DISP_WDMA_WDMA_TARGET_LINE_EVENT 367
+#define CMDQ_EVENT_VPP1_HDMI_META_SOF 384
+#define CMDQ_EVENT_VPP1_DGI_SOF 385
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_SOF 386
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_SOF 387
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_SOF 388
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_SOF 389
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_SOF 390
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_SOF 391
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_SOF 392
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_SOF 393
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_SOF 394
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_SOF 395
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_SOF 396
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_SOF 397
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_SOF 398
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_SOF 399
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_SOF 400
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_SOF 401
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_SOF 402
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TDSHP_SOF 403
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_TDSHP_SOF 404
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_TDSHP_SOF 405
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_SOF 406
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_SOF 407
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_SOF 408
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_SOF 409
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_SOF 410
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_SOF 411
+#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_SOF 412
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_SOF 413
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_SOF 414
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SOF 415
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SOF 416
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SOF 417
+#define CMDQ_EVENT_VPP1_VPP0_DL_IRLY_SOF 418
+#define CMDQ_EVENT_VPP1_VPP0_DL_ORLY_SOF 419
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_0_SOF 420
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_1_SOF 421
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_0_SOF 422
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_1_SOF 423
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_FRAME_DONE 424
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_FRAME_DONE 425
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_FRAME_DONE 426
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_FRAME_DONE 427
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_FRAME_DONE 428
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_FRAME_DONE 429
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_FRAME_DONE 430
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_FRAME_DONE 431
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_FRAME_DONE 432
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_FRAME_DONE 433
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_TILE_DONE 434
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_TILE_DONE 435
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_TILE_DONE 436
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_FRAME_DONE 437
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_FRAME_DONE 438
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_FRAME_DONE 439
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_FRAME_DONE 440
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_FRAME_DONE 441
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_FRAME_DONE 442
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TDSHP_FRAME_DONE 443
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_TDSHP_FRAME_DONE 444
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_TDSHP_FRAME_DONE 445
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_FRAME_DONE 446
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_FRAME_DONE 447
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_FRAME_DONE 448
+#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_FRAME_DONE 449
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_FRAME_DONE 450
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_FRAME_DONE 451
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_FRAME_DONE 452
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_0 456
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_1 457
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_2 458
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_3 459
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_4 460
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_5 461
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_6 462
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_7 463
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_8 464
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_9 465
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_10 466
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_11 467
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_12 468
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_13 469
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_14 470
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_15 471
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_0 472
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_1 473
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_2 474
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_3 475
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_4 476
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_5 477
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_6 478
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_7 479
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_8 480
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_9 481
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_10 482
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_11 483
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_12 484
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_13 485
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_14 486
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_15 487
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_0 488
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_1 489
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_2 490
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_3 491
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_4 492
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_5 493
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_6 494
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_7 495
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_8 496
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_9 497
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_10 498
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_11 499
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_12 500
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_13 501
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_GCE_EVENT 502
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_GCE_EVENT 503
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_GCE_EVENT 504
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_DGI_GCE_EVENT 505
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_HDMI_GCE_EVENT 506
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SW_RST_DONE_GCE_EVENT 507
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SW_RST_DONE_GCE_EVENT 508
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SW_RST_DONE_GCE_EVENT 509
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_NEW_EVENT_0 510
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_NEW_EVENT_1 511
+#define CMDQ_EVENT_VDO0_DISP_OVL0_SOF 512
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_SOF 513
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_SOF 514
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_SOF 515
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_SOF 516
+#define CMDQ_EVENT_VDO0_DISP_AAL0_SOF 517
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_SOF 518
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_SOF 519
+#define CMDQ_EVENT_VDO0_DSI0_SOF 520
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_SOF 521
+#define CMDQ_EVENT_VDO0_DISP_OVL1_SOF 522
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_SOF 523
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_SOF 524
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_SOF 525
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_SOF 526
+#define CMDQ_EVENT_VDO0_DISP_AAL1_SOF 527
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_SOF 528
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_SOF 529
+#define CMDQ_EVENT_VDO0_DSI1_SOF 530
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_SOF 531
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_SOF 532
+#define CMDQ_EVENT_VDO0_DP_INTF0_SOF 533
+#define CMDQ_EVENT_VDO0_DISP_DPI0_SOF 534
+#define CMDQ_EVENT_VDO0_DISP_DPI1_SOF 535
+#define CMDQ_EVENT_VDO0_DISP_POSTMASK0_SOF 536
+#define CMDQ_EVENT_VDO0_MDP_WROT0_SOF 537
+#define CMDQ_EVENT_VDO0_DISP_RSZ0_SOF 538
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY0_SOF 539
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY1_SOF 540
+#define CMDQ_EVENT_VDO0_VDO1_DL_RELAY2_SOF 541
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY3_SOF 542
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY4_SOF 543
+#define CMDQ_EVENT_VDO0_DISP_PWM0_SOF 544
+#define CMDQ_EVENT_VDO0_DISP_PWM1_SOF 545
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_FRAME_DONE 546
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_FRAME_DONE 547
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_FRAME_DONE 548
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_O_FRAME_DONE 549
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_O_FRAME_DONE 550
+#define CMDQ_EVENT_VDO0_DISP_AAL0_O_FRAME_DONE 551
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_O_FRAME_DONE 552
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_O_FRAME_DONE 553
+#define CMDQ_EVENT_VDO0_DSI0_FRAME_DONE 554
+#define CMDQ_EVENT_VDO0_DSC_WRAP0_O_FRAME_DONE_0 555
+#define CMDQ_EVENT_VDO0_DISP_OVL1_O_FRAME_DONE 556
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_O_FRAME_DONE 557
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_O_FRAME_DONE 558
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_O_FRAME_DONE 559
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_O_FRAME_DONE 560
+#define CMDQ_EVENT_VDO0_DISP_AAL1_O_FRAME_DONE 561
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_O_FRAME_DONE 562
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_O_FRAME_DONE 563
+#define CMDQ_EVENT_VDO0_DSI1_FRAME_DONE 564
+#define CMDQ_EVENT_VDO0_DSC_WRAP0_O_FRAME_DONE_1 565
+#define CMDQ_EVENT_VDO0_DP_INTF0_FRAME_DONE 567
+#define CMDQ_EVENT_VDO0_DISP_DPI0_O_FRAME_DONE 568
+#define CMDQ_EVENT_VDO0_DISP_DPI1_O_FRAME_DONE 569
+#define CMDQ_EVENT_VDO0_DISP_POSTMASK0_O_FRAME_DONE 570
+#define CMDQ_EVENT_VDO0_MDP_WROT0_O_FRAME_DONE 571
+#define CMDQ_EVENT_VDO0_DISP_RSZ0_O_FRAME_DONE 572
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0 574
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_1 575
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_2 576
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_3 577
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_4 578
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_5 579
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_6 580
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_7 581
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_8 582
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_9 583
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_10 584
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_11 585
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_12 586
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_13 587
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_14 588
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_15 589
+#define CMDQ_EVENT_VDO0_DISP_RDMA_0_UNDERRUN 590
+#define CMDQ_EVENT_VDO0_DISP_RDMA_1_UNDERRUN 591
+#define CMDQ_EVENT_VDO0_U_MERGE4_UNDERRUN 592
+#define CMDQ_EVENT_VDO0_DSI_0_UNDERRUN 595
+#define CMDQ_EVENT_VDO0_DSI_1_UNDERRUN 596
+#define CMDQ_EVENT_VDO0_DP_INTF_0 597
+#define CMDQ_EVENT_VDO0_DP_INTF_1 598
+#define CMDQ_EVENT_VDO0_DPI_0 599
+#define CMDQ_EVENT_VDO0_DPI_1 600
+#define CMDQ_EVENT_VDO0_DISP_SMIASSERT_ENG_EVENT 606
+#define CMDQ_EVENT_VDO0_DSI0_O_DSI_IRQ_EVENT_MM 607
+#define CMDQ_EVENT_VDO0_DSI0_TE_ENG_EVENT_MM 608
+#define CMDQ_EVENT_VDO0_DSI0_O_DSI_DONE_EVENT_MM 609
+#define CMDQ_EVENT_VDO0_DSI0_O_DSI_VACTL_EVENT_MM 610
+#define CMDQ_EVENT_VDO0_DSI1_O_DSI_IRQ_EVENT_MM 611
+#define CMDQ_EVENT_VDO0_DSI1_TE_ENG_EVENT_MM 612
+#define CMDQ_EVENT_VDO0_DSI1_O_DSI_DONE_EVENT_MM 613
+#define CMDQ_EVENT_VDO0_DSI1_O_DSI_VACTL_EVENT_MM 614
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VSYNC_START_EVENT_MM 615
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VSYNC_END_EVENT_MM 616
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VDE_START_EVENT_MM 617
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VDE_END_EVENT_MM 618
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_VACT_TARGET_LINE_EVENT_MM 619
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_LAST_SAFE_BLANK_EVENT_MM 620
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_LAST_LINE_EVENT_MM 621
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_TRIGGER_LOOP_CLEAR_EVENT_MM 622
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_TARGET_LINE_0_EVENT_MM 623
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_TARGET_LINE_1_EVENT_MM 624
+#define CMDQ_EVENT_VDO0_DISP_POSTMASK0_O_FRAME_RESET_DONE_PULSE 625
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_O_VPP_MERGE_EVENT 626
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_FRAME_RESET_DONE_PULSE 627
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_O_DISP_RDMA_TARGET_LINE_EVENT 628
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_O_WDMA_TARGET_LINE_EVENT 629
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_O_SW_RST_DONE 630
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_0 631
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_1 632
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_2 633
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_3 634
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_4 635
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_5 636
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_6 637
+#define CMDQ_EVENT_VDO0_MDP_WROT0_O_SW_RST_DONE 638
+#define CMDQ_EVENT_VDO0_RESERVED 639
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SOF 640
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SOF 641
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SOF 642
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SOF 643
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SOF 644
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SOF 645
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SOF 646
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SOF 647
+#define CMDQ_EVENT_VDO1_DISP_PADDING0_SOF 648
+#define CMDQ_EVENT_VDO1_DISP_PADDING1_SOF 649
+#define CMDQ_EVENT_VDO1_DISP_PADDING2_SOF 650
+#define CMDQ_EVENT_VDO1_DISP_PADDING3_SOF 651
+#define CMDQ_EVENT_VDO1_DISP_PADDING4_SOF 652
+#define CMDQ_EVENT_VDO1_DISP_PADDING5_SOF 653
+#define CMDQ_EVENT_VDO1_DISP_PADDING6_SOF 654
+#define CMDQ_EVENT_VDO1_DISP_PADDING7_SOF 655
+#define CMDQ_EVENT_VDO1_DISP_RSZ0_SOF 656
+#define CMDQ_EVENT_VDO1_DISP_RSZ1_SOF 657
+#define CMDQ_EVENT_VDO1_DISP_RSZ2_SOF 658
+#define CMDQ_EVENT_VDO1_DISP_RSZ3_SOF 659
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_SOF 660
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_SOF 661
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_SOF 662
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_SOF 663
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_SOF 664
+#define CMDQ_EVENT_VDO1_VPP2_DL_RELAY_SOF 665
+#define CMDQ_EVENT_VDO1_VPP3_DL_RELAY_SOF 666
+#define CMDQ_EVENT_VDO0_DSC_DL_ASYNC_SOF 667
+#define CMDQ_EVENT_VDO0_MERGE_DL_ASYNC_SOF 668
+#define CMDQ_EVENT_VDO1_OUT_DL_RELAY_SOF 669
+#define CMDQ_EVENT_VDO1_DISP_MIXER_SOF 670
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE0_SOF 671
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_SOF 672
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_SOF 673
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_SOF 674
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_SOF 675
+#define CMDQ_EVENT_VDO1_HDR_MLOAD_SOF 676
+#define CMDQ_EVENT_VDO1_DPI0_EXT_SOF 677
+#define CMDQ_EVENT_VDO1_DPI1_EXT_SOF 678
+#define CMDQ_EVENT_VDO1_DP_INTF_EXT_EXT_SOF 679
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_FRAME_DONE 680
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_FRAME_DONE 681
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_FRAME_DONE 682
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_FRAME_DONE 683
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_FRAME_DONE 684
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_FRAME_DONE 685
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_FRAME_DONE 686
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_FRAME_DONE 687
+#define CMDQ_EVENT_VDO1_DISP_PADDING0_FRAME_DONE 688
+#define CMDQ_EVENT_VDO1_DISP_PADDING1_FRAME_DONE 689
+#define CMDQ_EVENT_VDO1_DISP_PADDING2_FRAME_DONE 690
+#define CMDQ_EVENT_VDO1_DISP_PADDING3_FRAME_DONE 691
+#define CMDQ_EVENT_VDO1_DISP_PADDING4_FRAME_DONE 692
+#define CMDQ_EVENT_VDO1_DISP_PADDING5_FRAME_DONE 693
+#define CMDQ_EVENT_VDO1_DISP_PADDING6_FRAME_DONE 694
+#define CMDQ_EVENT_VDO1_DISP_PADDING7_FRAME_DONE 695
+#define CMDQ_EVENT_VDO1_DISP_RSZ0_FRAME_DONE 696
+#define CMDQ_EVENT_VDO1_DISP_RSZ1_FRAME_DONE 697
+#define CMDQ_EVENT_VDO1_DISP_RSZ2_FRAME_DONE 698
+#define CMDQ_EVENT_VDO1_DISP_RSZ3_FRAME_DONE 699
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_FRAME_DONE 700
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_FRAME_DONE 701
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_FRAME_DONE 702
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_FRAME_DONE 703
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_FRAME_DONE 704
+#define CMDQ_EVENT_VDO1_DPI0_FRAME_DONE 705
+#define CMDQ_EVENT_VDO1_DPI1_FRAME_DONE 706
+#define CMDQ_EVENT_VDO1_DP_INTF0_FRAME_DONE 707
+#define CMDQ_EVENT_VDO1_DISP_MIXER_FRAME_DONE_MM 708
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0 709
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_1 710
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_2 711
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_3 712
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_4 713
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_5 714
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_6 715
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_7 716
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_8 717
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_9 718
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_10 719
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_11 720
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_12 721
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_13 722
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_14 723
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_15 724
+#define CMDQ_EVENT_VDO1_DISP_RDMA_0_UNDERRUN 725
+#define CMDQ_EVENT_VDO1_DISP_RDMA_1_UNDERRUN 726
+#define CMDQ_EVENT_VDO1_U_MERGE4_UNDERRUN 727
+#define CMDQ_EVENT_VDO1_U_VPP_SPLIT_VIDEO_0_OVERFLOW 728
+#define CMDQ_EVENT_VDO1_U_VPP_SPLIT_VIDEO_1_OVERFLOW 729
+#define CMDQ_EVENT_VDO1_DSI_0_UNDERRUN 730
+#define CMDQ_EVENT_VDO1_DSI_1_UNDERRUN 731
+#define CMDQ_EVENT_VDO1_DP_INTF_0 732
+#define CMDQ_EVENT_VDO1_DP_INTF_1 733
+#define CMDQ_EVENT_VDO1_DPI_0 734
+#define CMDQ_EVENT_VDO1_DPI_1 735
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SW_RST_DONE 741
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SW_RST_DONE 742
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SW_RST_DONE 743
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SW_RST_DONE 744
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SW_RST_DONE 745
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SW_RST_DONE 746
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SW_RST_DONE 747
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SW_RST_DONE 748
+#define CMDQ_EVENT_VDO1_DP0_VDE_END_ENG_EVENT_MM 749
+#define CMDQ_EVENT_VDO1_DP0_VDE_START_ENG_EVENT_MM 750
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_END_ENG_EVENT_MM 751
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_START_ENG_EVENT_MM 752
+#define CMDQ_EVENT_VDO1_DP0_TARGET_LINE_ENG_EVENT_MM 753
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_EVENT 754
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_EVENT 755
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_EVENT 756
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_EVENT 757
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_EVENT 758
+#define CMDQ_EVENT_VDO1_HDMITX_EVENT 759
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_ADL_TRIG_EVENT_MM 760
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_THDR_ADL_TRIG_EVENT_MM 761
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_DM_ADL_TRIG_EVENT_MM 762
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_THDR_ADL_TRIG_EVENT_MM 763
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_DM_ADL_TRIG_EVENT_MM 764
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_ADL_TRIG_EVENT_MM 765
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_AD0_TRIG_EVENT_MM 766
+#define CMDQ_EVENT_VDO1_DPI0_TARGET_LINE_1_EVENT_MM 767
+#define CMDQ_EVENT_HANDSHAKE_0 768
+#define CMDQ_EVENT_HANDSHAKE_1 769
+#define CMDQ_EVENT_HANDSHAKE_2 770
+#define CMDQ_EVENT_HANDSHAKE_3 771
+#define CMDQ_EVENT_HANDSHAKE_4 772
+#define CMDQ_EVENT_HANDSHAKE_5 773
+#define CMDQ_EVENT_HANDSHAKE_6 774
+#define CMDQ_EVENT_HANDSHAKE_7 775
+#define CMDQ_EVENT_HANDSHAKE_8 776
+#define CMDQ_EVENT_HANDSHAKE_9 777
+#define CMDQ_EVENT_HANDSHAKE_10 778
+#define CMDQ_EVENT_HANDSHAKE_11 779
+#define CMDQ_EVENT_HANDSHAKE_12 780
+#define CMDQ_EVENT_HANDSHAKE_13 781
+#define CMDQ_EVENT_HANDSHAKE_14 782
+#define CMDQ_EVENT_HANDSHAKE_15 783
+#define CMDQ_EVENT_VDEC_SOC_EVENT_0 800
+#define CMDQ_EVENT_VDEC_SOC_EVENT_1 801
+#define CMDQ_EVENT_VDEC_SOC_EVENT_2 802
+#define CMDQ_EVENT_VDEC_SOC_EVENT_3 803
+#define CMDQ_EVENT_VDEC_SOC_EVENT_4 804
+#define CMDQ_EVENT_VDEC_SOC_EVENT_5 805
+#define CMDQ_EVENT_VDEC_SOC_EVENT_6 806
+#define CMDQ_EVENT_VDEC_SOC_EVENT_7 807
+#define CMDQ_EVENT_VDEC_SOC_EVENT_8 808
+#define CMDQ_EVENT_VDEC_SOC_EVENT_9 809
+#define CMDQ_EVENT_VDEC_SOC_EVENT_10 810
+#define CMDQ_EVENT_VDEC_SOC_EVENT_11 811
+#define CMDQ_EVENT_VDEC_SOC_EVENT_12 812
+#define CMDQ_EVENT_VDEC_SOC_EVENT_13 813
+#define CMDQ_EVENT_VDEC_SOC_EVENT_14 814
+#define CMDQ_EVENT_VDEC_SOC_EVENT_15 815
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_0 832
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_1 833
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_2 834
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_3 835
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_4 836
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_5 837
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_6 838
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_7 839
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_8 840
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_9 841
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_10 842
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_11 843
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_12 844
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_13 845
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_14 846
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_15 847
+#define CMDQ_EVENT_VENC_TOP_VENC_FRAME_DONE 865
+#define CMDQ_EVENT_VENC_TOP_VENC_PAUSE_DONE 866
+#define CMDQ_EVENT_VENC_TOP_JPGENC_DONE 867
+#define CMDQ_EVENT_VENC_TOP_VENC_MB_DONE 868
+#define CMDQ_EVENT_VENC_TOP_VENC_128BYTE_DONE 869
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_DONE 870
+#define CMDQ_EVENT_VENC_TOP_VENC_SLICE_DONE 871
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_INSUFF_DONE 872
+#define CMDQ_EVENT_VENC_TOP_WP_2ND_STAGE_DONE 874
+#define CMDQ_EVENT_VENC_TOP_WP_3RD_STAGE_DONE 875
+#define CMDQ_EVENT_VENC_TOP_PPS_HEADER_DONE 876
+#define CMDQ_EVENT_VENC_TOP_SPS_HEADER_DONE 877
+#define CMDQ_EVENT_VENC_TOP_VPS_HEADER_DONE 878
+#define CMDQ_EVENT_WPE_VPP0_WPE_GCE_FRAME_DONE 882
+#define CMDQ_EVENT_WPE_VPP0_WPE_DONE_SYNC_OUT 883
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_2 896
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_3 897
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_4 898
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_5 899
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_6 900
+#define CMDQ_EVENT_VDO1_DPI0_TARGET_LINE_0_EVENT_MM 928
+#define CMDQ_EVENT_VDO1_DPI0_TRIGGER_LOOP_CLEAR_EVENT_MM 929
+#define CMDQ_EVENT_VDO1_DPI0_LAST_LINE_EVENT_MM 930
+#define CMDQ_EVENT_VDO1_DPI0_LAST_SAFE_BLANK_EVENT_MM 931
+#define CMDQ_EVENT_VDO1_DPI0_VSYNC_START_EVENT_MM 932
+#define CMDQ_EVENT_VDO1_DPI1_TARGET_LINE_1_EVENT_MM 933
+#define CMDQ_EVENT_VDO1_DPI1_TARGET_LINE_0_EVENT_MM 934
+#define CMDQ_EVENT_VDO1_DPI1_TRIGGER_LOOP_CLEAR_EVENT_MM 935
+#define CMDQ_EVENT_VDO1_DPI1_LAST_LINE_EVENT_MM 936
+#define CMDQ_EVENT_VDO1_DPI1_LAST_SAFE_BLANK_EVENT_MM 937
+#define CMDQ_EVENT_VDO1_DPI1_VSYNC_START_EVENT_MM 938
+#define CMDQ_EVENT_VDO1_DP_INTF_TARGET_LINE_1_EVENT_MM 939
+#define CMDQ_EVENT_VDO1_DP_INTF_TARGET_LINE_0_EVENT_MM 940
+#define CMDQ_EVENT_VDO1_DP_INTF_TRIGGER_LOOP_CLEAR_EVENT_MM 941
+#define CMDQ_EVENT_VDO1_DP_INTF_LAST_LINE_EVENT_MM 942
+#define CMDQ_EVENT_VDO1_DP_INTF_LAST_SAFE_BLANK_EVENT_MM 943
+#define CMDQ_EVENT_VBLANK_FALLING 946
+#define CMDQ_EVENT_VSC_FINISH 947
+#define CMDQ_EVENT_TPR_0 962
+#define CMDQ_EVENT_TPR_1 963
+#define CMDQ_EVENT_TPR_2 964
+#define CMDQ_EVENT_TPR_3 965
+#define CMDQ_EVENT_TPR_4 966
+#define CMDQ_EVENT_TPR_5 967
+#define CMDQ_EVENT_TPR_6 968
+#define CMDQ_EVENT_TPR_7 969
+#define CMDQ_EVENT_TPR_8 970
+#define CMDQ_EVENT_TPR_9 971
+#define CMDQ_EVENT_TPR_10 972
+#define CMDQ_EVENT_TPR_11 973
+#define CMDQ_EVENT_TPR_12 974
+#define CMDQ_EVENT_TPR_13 975
+#define CMDQ_EVENT_TPR_14 976
+#define CMDQ_EVENT_TPR_15 977
+#define CMDQ_EVENT_TPR_16 978
+#define CMDQ_EVENT_TPR_17 979
+#define CMDQ_EVENT_TPR_18 980
+#define CMDQ_EVENT_TPR_19 981
+#define CMDQ_EVENT_TPR_20 982
+#define CMDQ_EVENT_TPR_21 983
+#define CMDQ_EVENT_TPR_22 984
+#define CMDQ_EVENT_TPR_23 985
+#define CMDQ_EVENT_TPR_24 986
+#define CMDQ_EVENT_TPR_25 987
+#define CMDQ_EVENT_TPR_26 988
+#define CMDQ_EVENT_TPR_27 989
+#define CMDQ_EVENT_TPR_28 990
+#define CMDQ_EVENT_TPR_29 991
+#define CMDQ_EVENT_TPR_30 992
+#define CMDQ_EVENT_TPR_31 993
+#define CMDQ_EVENT_TPR_TIMEOUT_0 994
+#define CMDQ_EVENT_TPR_TIMEOUT_1 995
+#define CMDQ_EVENT_TPR_TIMEOUT_2 996
+#define CMDQ_EVENT_TPR_TIMEOUT_3 997
+#define CMDQ_EVENT_TPR_TIMEOUT_4 998
+#define CMDQ_EVENT_TPR_TIMEOUT_5 999
+#define CMDQ_EVENT_TPR_TIMEOUT_6 1000
+#define CMDQ_EVENT_TPR_TIMEOUT_7 1001
+#define CMDQ_EVENT_TPR_TIMEOUT_8 1002
+#define CMDQ_EVENT_TPR_TIMEOUT_9 1003
+#define CMDQ_EVENT_TPR_TIMEOUT_10 1004
+#define CMDQ_EVENT_TPR_TIMEOUT_11 1005
+#define CMDQ_EVENT_TPR_TIMEOUT_12 1006
+#define CMDQ_EVENT_TPR_TIMEOUT_13 1007
+#define CMDQ_EVENT_TPR_TIMEOUT_14 1008
+#define CMDQ_EVENT_TPR_TIMEOUT_15 1009
+#define CMDQ_EVENT_OUTPIN_0 1018
+#define CMDQ_EVENT_OUTPIN_1 1019
+
+#define CMDQ_SYNC_TOKEN_IMGSYS_WPE_EIS 124
+#define CMDQ_SYNC_TOKEN_IMGSYS_WPE_TNR 125
+#define CMDQ_SYNC_TOKEN_IMGSYS_WPE_LITE 126
+#define CMDQ_SYNC_TOKEN_IMGSYS_TRAW 127
+#define CMDQ_SYNC_TOKEN_IMGSYS_LTRAW 128
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_1 223
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_2 224
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_3 225
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_4 226
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_5 227
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_6 228
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_7 229
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_8 230
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_9 231
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_10 232
+#define CMDQ_SYNC_TOKEN_IMGSYS_XTRAW 233
+#define CMDQ_SYNC_TOKEN_IMGSYS_DIP 234
+#define CMDQ_SYNC_TOKEN_IMGSYS_PQDIP_A 235
+#define CMDQ_SYNC_TOKEN_IMGSYS_PQDIP_B 236
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_1 237
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_2 238
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_3 239
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_4 240
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_5 241
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_6 242
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_7 243
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_8 244
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_9 245
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_10 246
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_11 247
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_12 248
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_13 249
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_14 250
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_15 251
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_16 252
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_17 253
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_18 254
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_19 255
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_20 276
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_21 277
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_22 278
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_23 279
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_24 280
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_25 281
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_26 282
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_27 283
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_28 284
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_29 285
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_30 286
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_31 287
+#define CMDQ_SYNC_TOKEN_IPESYS_ME 300
+#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_TRAW 301
+#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_LTRAW 302
+#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_XTRAW 303
+#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_DIP 304
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_32 308
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_33 309
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_34 310
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_35 311
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_36 312
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_37 313
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_38 314
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_39 315
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_40 316
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_41 370
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_42 371
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_43 372
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_44 373
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_45 374
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_46 375
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_47 376
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_48 377
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_49 378
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_50 379
+#define CMDQ_SYNC_TOKEN_TZMP_ISP_WAIT 380
+#define CMDQ_SYNC_TOKEN_TZMP_ISP_SET 381
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_51 790
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_52 791
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_53 792
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_54 793
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_55 794
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_56 795
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_57 796
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_58 797
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_59 798
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_60 799
+#define CMDQ_SYNC_TOKEN_PREBUILT_MDP_WAIT 816
+#define CMDQ_SYNC_TOKEN_PREBUILT_MDP_SET 817
+#define CMDQ_SYNC_TOKEN_PREBUILT_MDP_LOCK 818
+#define CMDQ_SYNC_TOKEN_PREBUILT_MML_WAIT 819
+#define CMDQ_SYNC_TOKEN_PREBUILT_MML_SET 820
+#define CMDQ_SYNC_TOKEN_PREBUILT_MML_LOCK 821
+#define CMDQ_SYNC_TOKEN_PREBUILT_VFMT_WAIT 822
+#define CMDQ_SYNC_TOKEN_PREBUILT_VFMT_SET 823
+#define CMDQ_SYNC_TOKEN_PREBUILT_VFMT_LOCK 824
+#define CMDQ_SYNC_TOKEN_PREBUILT_DISP_WAIT 825
+#define CMDQ_SYNC_TOKEN_PREBUILT_DISP_SET 826
+#define CMDQ_SYNC_TOKEN_PREBUILT_DISP_LOCK 827
+#define CMDQ_SYNC_TOKEN_CONFIG_DIRTY 848
+#define CMDQ_SYNC_TOKEN_STREAM_EOF 849
+#define CMDQ_SYNC_TOKEN_ESD_EOF 850
+#define CMDQ_SYNC_TOKEN_STREAM_BLOCK 851
+#define CMDQ_SYNC_TOKEN_CABC_EOF 852
+#define CMDQ_SYNC_TOKEN_VENC_INPUT_READY 853
+#define CMDQ_SYNC_TOKEN_VENC_EOF 854
+#define CMDQ_SYNC_TOKEN_SECURE_THR_EOF 855
+#define CMDQ_SYNC_TOKEN_USER_0 856
+#define CMDQ_SYNC_TOKEN_USER_1 857
+#define CMDQ_SYNC_TOKEN_POLL_MONITOR 858
+#define CMDQ_TOKEN_TPR_LOCK 859
+#define CMDQ_SYNC_TOKEN_MSS 860
+#define CMDQ_SYNC_TOKEN_MSF 861
+#define CMDQ_SYNC_TOKEN_GPR_SET_0 884
+#define CMDQ_SYNC_TOKEN_GPR_SET_1 885
+#define CMDQ_SYNC_TOKEN_GPR_SET_2 886
+#define CMDQ_SYNC_TOKEN_GPR_SET_3 887
+#define CMDQ_SYNC_TOKEN_GPR_SET_4 888
+#define CMDQ_SYNC_RESOURCE_WROT0 889
+#define CMDQ_SYNC_RESOURCE_WROT1 890
+#define CMDQ_SYNC_TOKEN_DISP_VA_START 1012
+#define CMDQ_SYNC_TOKEN_DISP_VA_END 1013
+
+#endif
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
index 4c23eefed5f3..fbfa3febc66d 100644
--- a/include/dt-bindings/mailbox/qcom-ipcc.h
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -8,6 +8,7 @@
/* Signal IDs for MPROC protocol */
#define IPCC_MPROC_SIGNAL_GLINK_QMP 0
+#define IPCC_MPROC_SIGNAL_TZ 1
#define IPCC_MPROC_SIGNAL_SMP2P 2
#define IPCC_MPROC_SIGNAL_PING 3
@@ -29,5 +30,8 @@
#define IPCC_CLIENT_PCIE1 14
#define IPCC_CLIENT_PCIE2 15
#define IPCC_CLIENT_SPSS 16
+#define IPCC_CLIENT_NSP1 18
+#define IPCC_CLIENT_TME 23
+#define IPCC_CLIENT_WPSS 24
#endif
diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h
index 3bdec7a84d35..b9ccae2aa9e2 100644
--- a/include/dt-bindings/mailbox/tegra186-hsp.h
+++ b/include/dt-bindings/mailbox/tegra186-hsp.h
@@ -16,6 +16,11 @@
#define TEGRA_HSP_MBOX_TYPE_AS 0x3
/*
+ * These define the types of shared mailbox supported based on data size.
+ */
+#define TEGRA_HSP_MBOX_TYPE_SM_128BIT (1 << 8)
+
+/*
* These defines represent the bit associated with the given master ID in the
* doorbell registers.
*/
diff --git a/include/dt-bindings/media/video-interfaces.h b/include/dt-bindings/media/video-interfaces.h
new file mode 100644
index 000000000000..68ac4e05e37f
--- /dev/null
+++ b/include/dt-bindings/media/video-interfaces.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __DT_BINDINGS_MEDIA_VIDEO_INTERFACES_H__
+#define __DT_BINDINGS_MEDIA_VIDEO_INTERFACES_H__
+
+#define MEDIA_BUS_TYPE_CSI2_CPHY 1
+#define MEDIA_BUS_TYPE_CSI1 2
+#define MEDIA_BUS_TYPE_CCP2 3
+#define MEDIA_BUS_TYPE_CSI2_DPHY 4
+#define MEDIA_BUS_TYPE_PARALLEL 5
+#define MEDIA_BUS_TYPE_BT656 6
+
+#endif /* __DT_BINDINGS_MEDIA_VIDEO_INTERFACES_H__ */
diff --git a/include/dt-bindings/memory/mediatek,mt8188-memory-port.h b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
new file mode 100644
index 000000000000..337ab11262af
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chengci Xu <chengci.xu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU larbs:
+ * From below, for example larb11 has larb11a/larb11b/larb11c,
+ * the index of larb is not in order. So we reindexed these larbs from a
+ * software view.
+ */
+#define SMI_L0_ID 0
+#define SMI_L1_ID 1
+#define SMI_L2_ID 2
+#define SMI_L3_ID 3
+#define SMI_L4_ID 4
+#define SMI_L5_ID 5
+#define SMI_L6_ID 6
+#define SMI_L7_ID 7
+#define SMI_L9_ID 8
+#define SMI_L10_ID 9
+#define SMI_L11A_ID 10
+#define SMI_L11B_ID 11
+#define SMI_L11C_ID 12
+#define SMI_L12_ID 13
+#define SMI_L13_ID 14
+#define SMI_L14_ID 15
+#define SMI_L15_ID 16
+#define SMI_L16A_ID 17
+#define SMI_L16B_ID 18
+#define SMI_L17A_ID 19
+#define SMI_L17B_ID 20
+#define SMI_L19_ID 21
+#define SMI_L21_ID 22
+#define SMI_L23_ID 23
+#define SMI_L27_ID 24
+#define SMI_L28_ID 25
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19(21)[1]/21(22)/23
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb27(24): port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb27(24): port 2/3
+ *
+ * This SoC have two MM IOMMU HWs, this is the connected information:
+ * iommu-vdo: larb0/2/5/9/10/11A/11C/13/16B/17B/19/21
+ * iommu-vpp: larb1/3/4/6/7/11B/12/14/15/16A/17A/23/27
+ *
+ * [1]: This is larb19, but the index is 21 from the SW view.
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- VDO-0 */
+#define M4U_PORT_L0_DISP_RDMA1 MTK_M4U_ID(SMI_L0_ID, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(SMI_L0_ID, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(SMI_L0_ID, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(SMI_L0_ID, 4)
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(SMI_L0_ID, 5)
+#define M4U_PORT_L0_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 6)
+
+/* LARB 1 -- VD0-0 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(SMI_L1_ID, 0)
+#define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 1)
+#define M4U_PORT_L1_DISP_OVL1_RDMA0 MTK_M4U_ID(SMI_L1_ID, 2)
+#define M4U_PORT_L1_DISP_OVL1_RDMA1 MTK_M4U_ID(SMI_L1_ID, 3)
+#define M4U_PORT_L1_DISP_OVL1_HDR MTK_M4U_ID(SMI_L1_ID, 4)
+#define M4U_PORT_L1_DISP_WROT0 MTK_M4U_ID(SMI_L1_ID, 5)
+#define M4U_PORT_L1_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 6)
+
+/* LARB 2 -- VDO-1 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(SMI_L2_ID, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(SMI_L2_ID, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(SMI_L2_ID, 4)
+
+/* LARB 3 -- VDO-1 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(SMI_L3_ID, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(SMI_L3_ID, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(SMI_L3_ID, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(SMI_L3_ID, 3)
+#define M4U_PORT_L3_HDR_DS_SMI MTK_M4U_ID(SMI_L3_ID, 4)
+#define M4U_PORT_L3_HDR_ADL_SMI MTK_M4U_ID(SMI_L3_ID, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(SMI_L3_ID, 6)
+
+/* LARB 4 -- VPP-0 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(SMI_L4_ID, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(SMI_L4_ID, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(SMI_L4_ID, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(SMI_L4_ID, 3)
+#define M4U_PORT_L4_FAKE_ENG MTK_M4U_ID(SMI_L4_ID, 4)
+#define M4U_PORT_L4_DISP_RDMA MTK_M4U_ID(SMI_L4_ID, 5)
+#define M4U_PORT_L4_DISP_WDMA MTK_M4U_ID(SMI_L4_ID, 6)
+
+/* LARB 5 -- VPP-1 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(SMI_L5_ID, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(SMI_L5_ID, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(SMI_L5_ID, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 6)
+#define M4U_PORT_L5_LARB5_FAKE_ENG MTK_M4U_ID(SMI_L5_ID, 7)
+
+/* LARB 6 -- VPP-1 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(SMI_L6_ID, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(SMI_L6_ID, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(SMI_L6_ID, 2)
+#define M4U_PORT_L6_LARB6_FAKE_ENG MTK_M4U_ID(SMI_L6_ID, 3)
+
+/* LARB 7 -- WPE */
+#define M4U_PORT_L7_WPE_RDMA_0 MTK_M4U_ID(SMI_L7_ID, 0)
+#define M4U_PORT_L7_WPE_RDMA_1 MTK_M4U_ID(SMI_L7_ID, 1)
+#define M4U_PORT_L7_WPE_WDMA_0 MTK_M4U_ID(SMI_L7_ID, 2)
+
+/* LARB 9 -- IMG-M */
+#define M4U_PORT_L9_IMGI_T1_A MTK_M4U_ID(SMI_L9_ID, 0)
+#define M4U_PORT_L9_UFDI_T1_A MTK_M4U_ID(SMI_L9_ID, 1)
+#define M4U_PORT_L9_IMGBI_T1_A MTK_M4U_ID(SMI_L9_ID, 2)
+#define M4U_PORT_L9_IMGCI_T1_A MTK_M4U_ID(SMI_L9_ID, 3)
+#define M4U_PORT_L9_SMTI_T1_A MTK_M4U_ID(SMI_L9_ID, 4)
+#define M4U_PORT_L9_SMTI_T4_A MTK_M4U_ID(SMI_L9_ID, 5)
+#define M4U_PORT_L9_TNCSTI_T1_A MTK_M4U_ID(SMI_L9_ID, 6)
+#define M4U_PORT_L9_TNCSTI_T4_A MTK_M4U_ID(SMI_L9_ID, 7)
+#define M4U_PORT_L9_YUVO_T1_A MTK_M4U_ID(SMI_L9_ID, 8)
+#define M4U_PORT_L9_YUVBO_T1_A MTK_M4U_ID(SMI_L9_ID, 9)
+#define M4U_PORT_L9_YUVCO_T1_A MTK_M4U_ID(SMI_L9_ID, 10)
+#define M4U_PORT_L9_TIMGO_T1_A MTK_M4U_ID(SMI_L9_ID, 11)
+#define M4U_PORT_L9_YUVO_T2_A MTK_M4U_ID(SMI_L9_ID, 12)
+#define M4U_PORT_L9_YUVO_T5_A MTK_M4U_ID(SMI_L9_ID, 13)
+#define M4U_PORT_L9_IMGI_T1_B MTK_M4U_ID(SMI_L9_ID, 14)
+#define M4U_PORT_L9_IMGBI_T1_B MTK_M4U_ID(SMI_L9_ID, 15)
+#define M4U_PORT_L9_IMGCI_T1_B MTK_M4U_ID(SMI_L9_ID, 16)
+#define M4U_PORT_L9_SMTI_T4_B MTK_M4U_ID(SMI_L9_ID, 17)
+#define M4U_PORT_L9_TNCSO_T1_A MTK_M4U_ID(SMI_L9_ID, 18)
+#define M4U_PORT_L9_SMTO_T1_A MTK_M4U_ID(SMI_L9_ID, 19)
+#define M4U_PORT_L9_SMTO_T4_A MTK_M4U_ID(SMI_L9_ID, 20)
+#define M4U_PORT_L9_TNCSTO_T1_A MTK_M4U_ID(SMI_L9_ID, 21)
+#define M4U_PORT_L9_YUVO_T2_B MTK_M4U_ID(SMI_L9_ID, 22)
+#define M4U_PORT_L9_YUVO_T5_B MTK_M4U_ID(SMI_L9_ID, 23)
+#define M4U_PORT_L9_SMTO_T4_B MTK_M4U_ID(SMI_L9_ID, 24)
+
+/* LARB 10 -- IMG-D */
+#define M4U_PORT_L10_IMGI_D1 MTK_M4U_ID(SMI_L10_ID, 0)
+#define M4U_PORT_L10_IMGBI_D1 MTK_M4U_ID(SMI_L10_ID, 1)
+#define M4U_PORT_L10_IMGCI_D1 MTK_M4U_ID(SMI_L10_ID, 2)
+#define M4U_PORT_L10_IMGDI_D1 MTK_M4U_ID(SMI_L10_ID, 3)
+#define M4U_PORT_L10_DEPI_D1 MTK_M4U_ID(SMI_L10_ID, 4)
+#define M4U_PORT_L10_DMGI_D1 MTK_M4U_ID(SMI_L10_ID, 5)
+#define M4U_PORT_L10_SMTI_D1 MTK_M4U_ID(SMI_L10_ID, 6)
+#define M4U_PORT_L10_RECI_D1 MTK_M4U_ID(SMI_L10_ID, 7)
+#define M4U_PORT_L10_RECI_D1_N MTK_M4U_ID(SMI_L10_ID, 8)
+#define M4U_PORT_L10_TNRWI_D1 MTK_M4U_ID(SMI_L10_ID, 9)
+#define M4U_PORT_L10_TNRCI_D1 MTK_M4U_ID(SMI_L10_ID, 10)
+#define M4U_PORT_L10_TNRCI_D1_N MTK_M4U_ID(SMI_L10_ID, 11)
+#define M4U_PORT_L10_IMG4O_D1 MTK_M4U_ID(SMI_L10_ID, 12)
+#define M4U_PORT_L10_IMG4BO_D1 MTK_M4U_ID(SMI_L10_ID, 13)
+#define M4U_PORT_L10_SMTI_D8 MTK_M4U_ID(SMI_L10_ID, 14)
+#define M4U_PORT_L10_SMTO_D1 MTK_M4U_ID(SMI_L10_ID, 15)
+#define M4U_PORT_L10_TNRMO_D1 MTK_M4U_ID(SMI_L10_ID, 16)
+#define M4U_PORT_L10_TNRMO_D1_N MTK_M4U_ID(SMI_L10_ID, 17)
+#define M4U_PORT_L10_SMTO_D8 MTK_M4U_ID(SMI_L10_ID, 18)
+#define M4U_PORT_L10_DBGO_D1 MTK_M4U_ID(SMI_L10_ID, 19)
+
+/* LARB 11A -- IMG-D */
+#define M4U_PORT_L11A_WPE_RDMA_0 MTK_M4U_ID(SMI_L11A_ID, 0)
+#define M4U_PORT_L11A_WPE_RDMA_1 MTK_M4U_ID(SMI_L11A_ID, 1)
+#define M4U_PORT_L11A_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 2)
+#define M4U_PORT_L11A_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11A_ID, 3)
+#define M4U_PORT_L11A_WPE_CQ0 MTK_M4U_ID(SMI_L11A_ID, 4)
+#define M4U_PORT_L11A_WPE_CQ1 MTK_M4U_ID(SMI_L11A_ID, 5)
+#define M4U_PORT_L11A_PIMGI_P1 MTK_M4U_ID(SMI_L11A_ID, 6)
+#define M4U_PORT_L11A_PIMGBI_P1 MTK_M4U_ID(SMI_L11A_ID, 7)
+#define M4U_PORT_L11A_PIMGCI_P1 MTK_M4U_ID(SMI_L11A_ID, 8)
+#define M4U_PORT_L11A_IMGI_T1_C MTK_M4U_ID(SMI_L11A_ID, 9)
+#define M4U_PORT_L11A_IMGBI_T1_C MTK_M4U_ID(SMI_L11A_ID, 10)
+#define M4U_PORT_L11A_IMGCI_T1_C MTK_M4U_ID(SMI_L11A_ID, 11)
+#define M4U_PORT_L11A_SMTI_T1_C MTK_M4U_ID(SMI_L11A_ID, 12)
+#define M4U_PORT_L11A_SMTI_T4_C MTK_M4U_ID(SMI_L11A_ID, 13)
+#define M4U_PORT_L11A_SMTI_T6_C MTK_M4U_ID(SMI_L11A_ID, 14)
+#define M4U_PORT_L11A_YUVO_T1_C MTK_M4U_ID(SMI_L11A_ID, 15)
+#define M4U_PORT_L11A_YUVBO_T1_C MTK_M4U_ID(SMI_L11A_ID, 16)
+#define M4U_PORT_L11A_YUVCO_T1_C MTK_M4U_ID(SMI_L11A_ID, 17)
+#define M4U_PORT_L11A_WPE_WDMA_0 MTK_M4U_ID(SMI_L11A_ID, 18)
+#define M4U_PORT_L11A_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 19)
+#define M4U_PORT_L11A_WROT_P1 MTK_M4U_ID(SMI_L11A_ID, 20)
+#define M4U_PORT_L11A_TCCSO_P1 MTK_M4U_ID(SMI_L11A_ID, 21)
+#define M4U_PORT_L11A_TCCSI_P1 MTK_M4U_ID(SMI_L11A_ID, 22)
+#define M4U_PORT_L11A_TIMGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 23)
+#define M4U_PORT_L11A_YUVO_T2_C MTK_M4U_ID(SMI_L11A_ID, 24)
+#define M4U_PORT_L11A_YUVO_T5_C MTK_M4U_ID(SMI_L11A_ID, 25)
+#define M4U_PORT_L11A_SMTO_T1_C MTK_M4U_ID(SMI_L11A_ID, 26)
+#define M4U_PORT_L11A_SMTO_T4_C MTK_M4U_ID(SMI_L11A_ID, 27)
+#define M4U_PORT_L11A_SMTO_T6_C MTK_M4U_ID(SMI_L11A_ID, 28)
+#define M4U_PORT_L11A_DBGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 29)
+
+/* LARB 11B -- IMG-D */
+#define M4U_PORT_L11B_WPE_RDMA_0 MTK_M4U_ID(SMI_L11B_ID, 0)
+#define M4U_PORT_L11B_WPE_RDMA_1 MTK_M4U_ID(SMI_L11B_ID, 1)
+#define M4U_PORT_L11B_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 2)
+#define M4U_PORT_L11B_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11B_ID, 3)
+#define M4U_PORT_L11B_WPE_CQ0 MTK_M4U_ID(SMI_L11B_ID, 4)
+#define M4U_PORT_L11B_WPE_CQ1 MTK_M4U_ID(SMI_L11B_ID, 5)
+#define M4U_PORT_L11B_PIMGI_P1 MTK_M4U_ID(SMI_L11B_ID, 6)
+#define M4U_PORT_L11B_PIMGBI_P1 MTK_M4U_ID(SMI_L11B_ID, 7)
+#define M4U_PORT_L11B_PIMGCI_P1 MTK_M4U_ID(SMI_L11B_ID, 8)
+#define M4U_PORT_L11B_IMGI_T1_C MTK_M4U_ID(SMI_L11B_ID, 9)
+#define M4U_PORT_L11B_IMGBI_T1_C MTK_M4U_ID(SMI_L11B_ID, 10)
+#define M4U_PORT_L11B_IMGCI_T1_C MTK_M4U_ID(SMI_L11B_ID, 11)
+#define M4U_PORT_L11B_SMTI_T1_C MTK_M4U_ID(SMI_L11B_ID, 12)
+#define M4U_PORT_L11B_SMTI_T4_C MTK_M4U_ID(SMI_L11B_ID, 13)
+#define M4U_PORT_L11B_SMTI_T6_C MTK_M4U_ID(SMI_L11B_ID, 14)
+#define M4U_PORT_L11B_YUVO_T1_C MTK_M4U_ID(SMI_L11B_ID, 15)
+#define M4U_PORT_L11B_YUVBO_T1_C MTK_M4U_ID(SMI_L11B_ID, 16)
+#define M4U_PORT_L11B_YUVCO_T1_C MTK_M4U_ID(SMI_L11B_ID, 17)
+#define M4U_PORT_L11B_WPE_WDMA_0 MTK_M4U_ID(SMI_L11B_ID, 18)
+#define M4U_PORT_L11B_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 19)
+#define M4U_PORT_L11B_WROT_P1 MTK_M4U_ID(SMI_L11B_ID, 20)
+#define M4U_PORT_L11B_TCCSO_P1 MTK_M4U_ID(SMI_L11B_ID, 21)
+#define M4U_PORT_L11B_TCCSI_P1 MTK_M4U_ID(SMI_L11B_ID, 22)
+#define M4U_PORT_L11B_TIMGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 23)
+#define M4U_PORT_L11B_YUVO_T2_C MTK_M4U_ID(SMI_L11B_ID, 24)
+#define M4U_PORT_L11B_YUVO_T5_C MTK_M4U_ID(SMI_L11B_ID, 25)
+#define M4U_PORT_L11B_SMTO_T1_C MTK_M4U_ID(SMI_L11B_ID, 26)
+#define M4U_PORT_L11B_SMTO_T4_C MTK_M4U_ID(SMI_L11B_ID, 27)
+#define M4U_PORT_L11B_SMTO_T6_C MTK_M4U_ID(SMI_L11B_ID, 28)
+#define M4U_PORT_L11B_DBGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 29)
+
+/* LARB 11C -- IMG-D */
+#define M4U_PORT_L11C_WPE_RDMA_0 MTK_M4U_ID(SMI_L11C_ID, 0)
+#define M4U_PORT_L11C_WPE_RDMA_1 MTK_M4U_ID(SMI_L11C_ID, 1)
+#define M4U_PORT_L11C_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 2)
+#define M4U_PORT_L11C_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11C_ID, 3)
+#define M4U_PORT_L11C_WPE_CQ0 MTK_M4U_ID(SMI_L11C_ID, 4)
+#define M4U_PORT_L11C_WPE_CQ1 MTK_M4U_ID(SMI_L11C_ID, 5)
+#define M4U_PORT_L11C_PIMGI_P1 MTK_M4U_ID(SMI_L11C_ID, 6)
+#define M4U_PORT_L11C_PIMGBI_P1 MTK_M4U_ID(SMI_L11C_ID, 7)
+#define M4U_PORT_L11C_PIMGCI_P1 MTK_M4U_ID(SMI_L11C_ID, 8)
+#define M4U_PORT_L11C_IMGI_T1_C MTK_M4U_ID(SMI_L11C_ID, 9)
+#define M4U_PORT_L11C_IMGBI_T1_C MTK_M4U_ID(SMI_L11C_ID, 10)
+#define M4U_PORT_L11C_IMGCI_T1_C MTK_M4U_ID(SMI_L11C_ID, 11)
+#define M4U_PORT_L11C_SMTI_T1_C MTK_M4U_ID(SMI_L11C_ID, 12)
+#define M4U_PORT_L11C_SMTI_T4_C MTK_M4U_ID(SMI_L11C_ID, 13)
+#define M4U_PORT_L11C_SMTI_T6_C MTK_M4U_ID(SMI_L11C_ID, 14)
+#define M4U_PORT_L11C_YUVO_T1_C MTK_M4U_ID(SMI_L11C_ID, 15)
+#define M4U_PORT_L11C_YUVBO_T1_C MTK_M4U_ID(SMI_L11C_ID, 16)
+#define M4U_PORT_L11C_YUVCO_T1_C MTK_M4U_ID(SMI_L11C_ID, 17)
+#define M4U_PORT_L11C_WPE_WDMA_0 MTK_M4U_ID(SMI_L11C_ID, 18)
+#define M4U_PORT_L11C_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 19)
+#define M4U_PORT_L11C_WROT_P1 MTK_M4U_ID(SMI_L11C_ID, 20)
+#define M4U_PORT_L11C_TCCSO_P1 MTK_M4U_ID(SMI_L11C_ID, 21)
+#define M4U_PORT_L11C_TCCSI_P1 MTK_M4U_ID(SMI_L11C_ID, 22)
+#define M4U_PORT_L11C_TIMGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 23)
+#define M4U_PORT_L11C_YUVO_T2_C MTK_M4U_ID(SMI_L11C_ID, 24)
+#define M4U_PORT_L11C_YUVO_T5_C MTK_M4U_ID(SMI_L11C_ID, 25)
+#define M4U_PORT_L11C_SMTO_T1_C MTK_M4U_ID(SMI_L11C_ID, 26)
+#define M4U_PORT_L11C_SMTO_T4_C MTK_M4U_ID(SMI_L11C_ID, 27)
+#define M4U_PORT_L11C_SMTO_T6_C MTK_M4U_ID(SMI_L11C_ID, 28)
+#define M4U_PORT_L11C_DBGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 29)
+
+/* LARB 12 -- IPE */
+#define M4U_PORT_L12_FDVT_RDA_0 MTK_M4U_ID(SMI_L12_ID, 0)
+#define M4U_PORT_L12_FDVT_RDB_0 MTK_M4U_ID(SMI_L12_ID, 1)
+#define M4U_PORT_L12_FDVT_WRA_0 MTK_M4U_ID(SMI_L12_ID, 2)
+#define M4U_PORT_L12_FDVT_WRB_0 MTK_M4U_ID(SMI_L12_ID, 3)
+#define M4U_PORT_L12_ME_RDMA MTK_M4U_ID(SMI_L12_ID, 4)
+#define M4U_PORT_L12_ME_WDMA MTK_M4U_ID(SMI_L12_ID, 5)
+#define M4U_PORT_L12_DVS_RDMA MTK_M4U_ID(SMI_L12_ID, 6)
+#define M4U_PORT_L12_DVS_WDMA MTK_M4U_ID(SMI_L12_ID, 7)
+#define M4U_PORT_L12_DVP_RDMA MTK_M4U_ID(SMI_L12_ID, 8)
+#define M4U_PORT_L12_DVP_WDMA MTK_M4U_ID(SMI_L12_ID, 9)
+#define M4U_PORT_L12_FDVT_2ND_RDA_0 MTK_M4U_ID(SMI_L12_ID, 10)
+#define M4U_PORT_L12_FDVT_2ND_RDB_0 MTK_M4U_ID(SMI_L12_ID, 11)
+#define M4U_PORT_L12_FDVT_2ND_WRA_0 MTK_M4U_ID(SMI_L12_ID, 12)
+#define M4U_PORT_L12_FDVT_2ND_WRB_0 MTK_M4U_ID(SMI_L12_ID, 13)
+#define M4U_PORT_L12_DHZEI_E1 MTK_M4U_ID(SMI_L12_ID, 14)
+#define M4U_PORT_L12_DHZEO_E1 MTK_M4U_ID(SMI_L12_ID, 15)
+
+/* LARB 13 -- CAM-1 */
+#define M4U_PORT_L13_CAMSV_CQI_E1 MTK_M4U_ID(SMI_L13_ID, 0)
+#define M4U_PORT_L13_CAMSV_CQI_E2 MTK_M4U_ID(SMI_L13_ID, 1)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 2)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 3)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 4)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 5)
+#define M4U_PORT_L13_PDAI_A_0 MTK_M4U_ID(SMI_L13_ID, 6)
+#define M4U_PORT_L13_PDAI_A_1 MTK_M4U_ID(SMI_L13_ID, 7)
+#define M4U_PORT_L13_CAMSV_CQI_B_E1 MTK_M4U_ID(SMI_L13_ID, 8)
+#define M4U_PORT_L13_CAMSV_CQI_B_E2 MTK_M4U_ID(SMI_L13_ID, 9)
+#define M4U_PORT_L13_CAMSV_CQI_C_E1 MTK_M4U_ID(SMI_L13_ID, 10)
+#define M4U_PORT_L13_CAMSV_CQI_C_E2 MTK_M4U_ID(SMI_L13_ID, 11)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 12)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 13)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 14)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 15)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 16)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 17)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 18)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 19)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 20)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 21)
+#define M4U_PORT_L13_PDAO_A MTK_M4U_ID(SMI_L13_ID, 22)
+#define M4U_PORT_L13_PDAO_C MTK_M4U_ID(SMI_L13_ID, 23)
+
+/* LARB 14 -- CAM-1 */
+#define M4U_PORT_L14_GCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 0)
+#define M4U_PORT_L14_GCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 1)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 2)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 3)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 4)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 5)
+#define M4U_PORT_L14_PDAI_B_0 MTK_M4U_ID(SMI_L14_ID, 6)
+#define M4U_PORT_L14_PDAI_B_1 MTK_M4U_ID(SMI_L14_ID, 7)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 8)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 9)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 10)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 11)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 12)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 13)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 14)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 15)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 16)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 17)
+#define M4U_PORT_L14_PDAO_B MTK_M4U_ID(SMI_L14_ID, 18)
+#define M4U_PORT_L14_IPUI MTK_M4U_ID(SMI_L14_ID, 19)
+#define M4U_PORT_L14_IPUO MTK_M4U_ID(SMI_L14_ID, 20)
+#define M4U_PORT_L14_IPU3O MTK_M4U_ID(SMI_L14_ID, 21)
+#define M4U_PORT_L14_FAKE MTK_M4U_ID(SMI_L14_ID, 22)
+
+/* LARB 15 -- IMG-D */
+#define M4U_PORT_L15_VIPI_D1 MTK_M4U_ID(SMI_L15_ID, 0)
+#define M4U_PORT_L15_VIPBI_D1 MTK_M4U_ID(SMI_L15_ID, 1)
+#define M4U_PORT_L15_SMTI_D6 MTK_M4U_ID(SMI_L15_ID, 2)
+#define M4U_PORT_L15_TNCSTI_D1 MTK_M4U_ID(SMI_L15_ID, 3)
+#define M4U_PORT_L15_TNCSTI_D4 MTK_M4U_ID(SMI_L15_ID, 4)
+#define M4U_PORT_L15_SMTI_D4 MTK_M4U_ID(SMI_L15_ID, 5)
+#define M4U_PORT_L15_IMG3O_D1 MTK_M4U_ID(SMI_L15_ID, 6)
+#define M4U_PORT_L15_IMG3BO_D1 MTK_M4U_ID(SMI_L15_ID, 7)
+#define M4U_PORT_L15_IMG3CO_D1 MTK_M4U_ID(SMI_L15_ID, 8)
+#define M4U_PORT_L15_IMG2O_D1 MTK_M4U_ID(SMI_L15_ID, 9)
+#define M4U_PORT_L15_SMTI_D9 MTK_M4U_ID(SMI_L15_ID, 10)
+#define M4U_PORT_L15_SMTO_D4 MTK_M4U_ID(SMI_L15_ID, 11)
+#define M4U_PORT_L15_FEO_D1 MTK_M4U_ID(SMI_L15_ID, 12)
+#define M4U_PORT_L15_TNCSO_D1 MTK_M4U_ID(SMI_L15_ID, 13)
+#define M4U_PORT_L15_TNCSTO_D1 MTK_M4U_ID(SMI_L15_ID, 14)
+#define M4U_PORT_L15_SMTO_D6 MTK_M4U_ID(SMI_L15_ID, 15)
+#define M4U_PORT_L15_SMTO_D9 MTK_M4U_ID(SMI_L15_ID, 16)
+#define M4U_PORT_L15_TNCO_D1 MTK_M4U_ID(SMI_L15_ID, 17)
+#define M4U_PORT_L15_TNCO_D1_N MTK_M4U_ID(SMI_L15_ID, 18)
+
+/* LARB 16A -- CAM */
+#define M4U_PORT_L16A_IMGO_R1 MTK_M4U_ID(SMI_L16A_ID, 0)
+#define M4U_PORT_L16A_CQI_R1 MTK_M4U_ID(SMI_L16A_ID, 1)
+#define M4U_PORT_L16A_CQI_R2 MTK_M4U_ID(SMI_L16A_ID, 2)
+#define M4U_PORT_L16A_BPCI_R1 MTK_M4U_ID(SMI_L16A_ID, 3)
+#define M4U_PORT_L16A_LSCI_R1 MTK_M4U_ID(SMI_L16A_ID, 4)
+#define M4U_PORT_L16A_RAWI_R2 MTK_M4U_ID(SMI_L16A_ID, 5)
+#define M4U_PORT_L16A_RAWI_R3 MTK_M4U_ID(SMI_L16A_ID, 6)
+#define M4U_PORT_L16A_UFDI_R2 MTK_M4U_ID(SMI_L16A_ID, 7)
+#define M4U_PORT_L16A_UFDI_R3 MTK_M4U_ID(SMI_L16A_ID, 8)
+#define M4U_PORT_L16A_RAWI_R4 MTK_M4U_ID(SMI_L16A_ID, 9)
+#define M4U_PORT_L16A_RAWI_R5 MTK_M4U_ID(SMI_L16A_ID, 10)
+#define M4U_PORT_L16A_AAI_R1 MTK_M4U_ID(SMI_L16A_ID, 11)
+#define M4U_PORT_L16A_UFDI_R5 MTK_M4U_ID(SMI_L16A_ID, 12)
+#define M4U_PORT_L16A_FHO_R1 MTK_M4U_ID(SMI_L16A_ID, 13)
+#define M4U_PORT_L16A_AAO_R1 MTK_M4U_ID(SMI_L16A_ID, 14)
+#define M4U_PORT_L16A_TSFSO_R1 MTK_M4U_ID(SMI_L16A_ID, 15)
+#define M4U_PORT_L16A_FLKO_R1 MTK_M4U_ID(SMI_L16A_ID, 16)
+
+/* LARB 16B -- CAM */
+#define M4U_PORT_L16B_IMGO_R1 MTK_M4U_ID(SMI_L16B_ID, 0)
+#define M4U_PORT_L16B_CQI_R1 MTK_M4U_ID(SMI_L16B_ID, 1)
+#define M4U_PORT_L16B_CQI_R2 MTK_M4U_ID(SMI_L16B_ID, 2)
+#define M4U_PORT_L16B_BPCI_R1 MTK_M4U_ID(SMI_L16B_ID, 3)
+#define M4U_PORT_L16B_LSCI_R1 MTK_M4U_ID(SMI_L16B_ID, 4)
+#define M4U_PORT_L16B_RAWI_R2 MTK_M4U_ID(SMI_L16B_ID, 5)
+#define M4U_PORT_L16B_RAWI_R3 MTK_M4U_ID(SMI_L16B_ID, 6)
+#define M4U_PORT_L16B_UFDI_R2 MTK_M4U_ID(SMI_L16B_ID, 7)
+#define M4U_PORT_L16B_UFDI_R3 MTK_M4U_ID(SMI_L16B_ID, 8)
+#define M4U_PORT_L16B_RAWI_R4 MTK_M4U_ID(SMI_L16B_ID, 9)
+#define M4U_PORT_L16B_RAWI_R5 MTK_M4U_ID(SMI_L16B_ID, 10)
+#define M4U_PORT_L16B_AAI_R1 MTK_M4U_ID(SMI_L16B_ID, 11)
+#define M4U_PORT_L16B_UFDI_R5 MTK_M4U_ID(SMI_L16B_ID, 12)
+#define M4U_PORT_L16B_FHO_R1 MTK_M4U_ID(SMI_L16B_ID, 13)
+#define M4U_PORT_L16B_AAO_R1 MTK_M4U_ID(SMI_L16B_ID, 14)
+#define M4U_PORT_L16B_TSFSO_R1 MTK_M4U_ID(SMI_L16B_ID, 15)
+#define M4U_PORT_L16B_FLKO_R1 MTK_M4U_ID(SMI_L16B_ID, 16)
+
+/* LARB 17A -- CAM */
+#define M4U_PORT_L17A_YUVO_R1 MTK_M4U_ID(SMI_L17A_ID, 0)
+#define M4U_PORT_L17A_YUVO_R3 MTK_M4U_ID(SMI_L17A_ID, 1)
+#define M4U_PORT_L17A_YUVCO_R1 MTK_M4U_ID(SMI_L17A_ID, 2)
+#define M4U_PORT_L17A_YUVO_R2 MTK_M4U_ID(SMI_L17A_ID, 3)
+#define M4U_PORT_L17A_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17A_ID, 4)
+#define M4U_PORT_L17A_DRZS4NO_R1 MTK_M4U_ID(SMI_L17A_ID, 5)
+#define M4U_PORT_L17A_TNCSO_R1 MTK_M4U_ID(SMI_L17A_ID, 6)
+
+/* LARB 17B -- CAM */
+#define M4U_PORT_L17B_YUVO_R1 MTK_M4U_ID(SMI_L17B_ID, 0)
+#define M4U_PORT_L17B_YUVO_R3 MTK_M4U_ID(SMI_L17B_ID, 1)
+#define M4U_PORT_L17B_YUVCO_R1 MTK_M4U_ID(SMI_L17B_ID, 2)
+#define M4U_PORT_L17B_YUVO_R2 MTK_M4U_ID(SMI_L17B_ID, 3)
+#define M4U_PORT_L17B_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17B_ID, 4)
+#define M4U_PORT_L17B_DRZS4NO_R1 MTK_M4U_ID(SMI_L17B_ID, 5)
+#define M4U_PORT_L17B_TNCSO_R1 MTK_M4U_ID(SMI_L17B_ID, 6)
+
+/* LARB 19 -- VENC */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(SMI_L19_ID, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(SMI_L19_ID, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(SMI_L19_ID, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(SMI_L19_ID, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(SMI_L19_ID, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L19_ID, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(SMI_L19_ID, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(SMI_L19_ID, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L19_ID, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA_0 MTK_M4U_ID(SMI_L19_ID, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA_0 MTK_M4U_ID(SMI_L19_ID, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(SMI_L19_ID, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA_1 MTK_M4U_ID(SMI_L19_ID, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA_1 MTK_M4U_ID(SMI_L19_ID, 19)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_1 MTK_M4U_ID(SMI_L19_ID, 20)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_0 MTK_M4U_ID(SMI_L19_ID, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(SMI_L19_ID, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L19_ID, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(SMI_L19_ID, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(SMI_L19_ID, 25)
+#define M4U_PORT_L19_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L19_ID, 26)
+
+/* LARB 21 -- VDEC-CORE0 */
+#define M4U_PORT_L21_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L21_ID, 0)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L21_ID, 1)
+#define M4U_PORT_L21_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L21_ID, 2)
+#define M4U_PORT_L21_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L21_ID, 3)
+#define M4U_PORT_L21_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L21_ID, 4)
+#define M4U_PORT_L21_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L21_ID, 5)
+#define M4U_PORT_L21_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L21_ID, 6)
+#define M4U_PORT_L21_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L21_ID, 7)
+#define M4U_PORT_L21_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L21_ID, 8)
+#define M4U_PORT_L21_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L21_ID, 9)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT_C MTK_M4U_ID(SMI_L21_ID, 10)
+
+/* LARB 23 -- VDEC-SOC */
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD_EXT MTK_M4U_ID(SMI_L23_ID, 0)
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(SMI_L23_ID, 1)
+#define M4U_PORT_L23_HW_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(SMI_L23_ID, 2)
+#define M4U_PORT_L23_HW_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(SMI_L23_ID, 3)
+#define M4U_PORT_L23_HW_VDEC_LAT0_TILE_EXT MTK_M4U_ID(SMI_L23_ID, 4)
+#define M4U_PORT_L23_HW_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(SMI_L23_ID, 5)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L23_ID, 6)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT_C MTK_M4U_ID(SMI_L23_ID, 7)
+#define M4U_PORT_L23_HW_VDEC_MC_EXT_C MTK_M4U_ID(SMI_L23_ID, 8)
+
+/* LARB 27 -- CCU */
+#define M4U_PORT_L27_CCUI MTK_M4U_ID(SMI_L27_ID, 0)
+#define M4U_PORT_L27_CCUO MTK_M4U_ID(SMI_L27_ID, 1)
+#define M4U_PORT_L27_CCUI2 MTK_M4U_ID(SMI_L27_ID, 2)
+#define M4U_PORT_L27_CCUO2 MTK_M4U_ID(SMI_L27_ID, 3)
+
+/* LARB 28 -- AXI-CCU */
+#define M4U_PORT_L28_CCU_AXI_0 MTK_M4U_ID(SMI_L28_ID, 0)
+
+/* infra/peri */
+#define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0)
+
+#endif
diff --git a/include/dt-bindings/memory/mediatek,mt8365-larb-port.h b/include/dt-bindings/memory/mediatek,mt8365-larb-port.h
new file mode 100644
index 000000000000..56d5a5dd519e
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8365-larb-port.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8365_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8365_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_OVL0_2L MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 8)
+#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 9)
+#define M4U_PORT_APU_READ MTK_M4U_ID(M4U_LARB0_ID, 10)
+#define M4U_PORT_APU_WRITE MTK_M4U_ID(M4U_LARB0_ID, 11)
+
+/* larb1 */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_VENC_NBM_RDMA_LITE MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_JPGENC_Y_RDMA MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_JPGENC_C_RDMA MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_JPGENC_Q_TABLE MTK_M4U_ID(M4U_LARB1_ID, 9)
+#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 10)
+#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB1_ID, 11)
+#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 12)
+#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB1_ID, 13)
+#define M4U_PORT_VENC_NBM_WDMA_LITE MTK_M4U_ID(M4U_LARB1_ID, 14)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB1_ID, 15)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 16)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB1_ID, 17)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 18)
+
+/* larb2 */
+#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_CAM_LCS MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_CAM_CAM_SV0 MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_CAM_CAM_SV1 MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_CAM_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_CAM_AFO MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_CAM_SPARE MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB2_ID, 11)
+#define M4U_PORT_CAM_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 12)
+#define M4U_PORT_CAM_UFDI MTK_M4U_ID(M4U_LARB2_ID, 13)
+#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB2_ID, 14)
+#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 15)
+#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 16)
+#define M4U_PORT_CAM_WPE0_I MTK_M4U_ID(M4U_LARB2_ID, 17)
+#define M4U_PORT_CAM_WPE1_I MTK_M4U_ID(M4U_LARB2_ID, 18)
+#define M4U_PORT_CAM_WPE_O MTK_M4U_ID(M4U_LARB2_ID, 19)
+#define M4U_PORT_CAM_FD0_I MTK_M4U_ID(M4U_LARB2_ID, 20)
+#define M4U_PORT_CAM_FD1_I MTK_M4U_ID(M4U_LARB2_ID, 21)
+#define M4U_PORT_CAM_FD0_O MTK_M4U_ID(M4U_LARB2_ID, 22)
+#define M4U_PORT_CAM_FD1_O MTK_M4U_ID(M4U_LARB2_ID, 23)
+
+/* larb3 */
+#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_HW_VDEC_TILE_EXT MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(M4U_LARB3_ID, 10)
+
+#endif
diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h
index 2d85c2ec6cfd..25d03526f142 100644
--- a/include/dt-bindings/memory/mt2701-larb-port.h
+++ b/include/dt-bindings/memory/mt2701-larb-port.h
@@ -4,8 +4,8 @@
* Author: Honghui Zhang <honghui.zhang@mediatek.com>
*/
-#ifndef _MT2701_LARB_PORT_H_
-#define _MT2701_LARB_PORT_H_
+#ifndef _DT_BINDINGS_MEMORY_MT2701_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT2701_LARB_PORT_H_
/*
* Mediatek m4u generation 1 such as mt2701 has flat m4u port numbers,
diff --git a/include/dt-bindings/memory/mt2712-larb-port.h b/include/dt-bindings/memory/mt2712-larb-port.h
index 6f9aa7349cef..e41a2841bcff 100644
--- a/include/dt-bindings/memory/mt2712-larb-port.h
+++ b/include/dt-bindings/memory/mt2712-larb-port.h
@@ -3,10 +3,10 @@
* Copyright (c) 2017 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
-#ifndef __DTS_IOMMU_PORT_MT2712_H
-#define __DTS_IOMMU_PORT_MT2712_H
+#ifndef _DT_BINDINGS_MEMORY_MT2712_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT2712_LARB_PORT_H_
-#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+#include <dt-bindings/memory/mtk-memory-port.h>
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
diff --git a/include/dt-bindings/memory/mt6779-larb-port.h b/include/dt-bindings/memory/mt6779-larb-port.h
index 2ad0899fbf2f..3fb438a96e35 100644
--- a/include/dt-bindings/memory/mt6779-larb-port.h
+++ b/include/dt-bindings/memory/mt6779-larb-port.h
@@ -4,10 +4,10 @@
* Author: Chao Hao <chao.hao@mediatek.com>
*/
-#ifndef _DTS_IOMMU_PORT_MT6779_H_
-#define _DTS_IOMMU_PORT_MT6779_H_
+#ifndef _DT_BINDINGS_MEMORY_MT6779_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT6779_LARB_PORT_H_
-#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+#include <dt-bindings/memory/mtk-memory-port.h>
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
diff --git a/include/dt-bindings/memory/mt6795-larb-port.h b/include/dt-bindings/memory/mt6795-larb-port.h
new file mode 100644
index 000000000000..58cf6a6b6372
--- /dev/null
+++ b/include/dt-bindings/memory/mt6795-larb-port.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+#define M4U_LARB4_ID 4
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 8)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 9)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 10)
+#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 11)
+#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 12)
+#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB0_ID, 13)
+
+/* larb1 */
+#define M4U_PORT_VDEC_MC MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_VDEC_PP MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_VDEC_UFO MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_VDEC_VLD MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_VDEC_VLD2 MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_VDEC_AVC_MV MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_VDEC_PRED_RD MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_VDEC_PRED_WR MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_VDEC_PPWRAP MTK_M4U_ID(M4U_LARB1_ID, 8)
+
+/* larb2 */
+#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_CAM_LCSO MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_CAM_IMGO_S MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_CAM_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_CAM_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_CAM_UFDI MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB2_ID, 11)
+#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 12)
+#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 13)
+#define M4U_PORT_CAM_VIPI MTK_M4U_ID(M4U_LARB2_ID, 14)
+#define M4U_PORT_CAM_VIP2I MTK_M4U_ID(M4U_LARB2_ID, 15)
+#define M4U_PORT_CAM_VIP3I MTK_M4U_ID(M4U_LARB2_ID, 16)
+#define M4U_PORT_CAM_LCEI MTK_M4U_ID(M4U_LARB2_ID, 17)
+#define M4U_PORT_CAM_RB MTK_M4U_ID(M4U_LARB2_ID, 18)
+#define M4U_PORT_CAM_RP MTK_M4U_ID(M4U_LARB2_ID, 19)
+#define M4U_PORT_CAM_WR MTK_M4U_ID(M4U_LARB2_ID, 20)
+
+/* larb3 */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_REMDC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_REMDC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_JPGENC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 10)
+#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 11)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 12)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 13)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 14)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 15)
+#define M4U_PORT_REMDC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 16)
+#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 17)
+#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 18)
+
+/* larb4 */
+#define M4U_PORT_MJC_MV_RD MTK_M4U_ID(M4U_LARB4_ID, 0)
+#define M4U_PORT_MJC_MV_WR MTK_M4U_ID(M4U_LARB4_ID, 1)
+#define M4U_PORT_MJC_DMA_RD MTK_M4U_ID(M4U_LARB4_ID, 2)
+#define M4U_PORT_MJC_DMA_WR MTK_M4U_ID(M4U_LARB4_ID, 3)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8167-larb-port.h b/include/dt-bindings/memory/mt8167-larb-port.h
new file mode 100644
index 000000000000..aae57d4824ca
--- /dev/null
+++ b/include/dt-bindings/memory/mt8167-larb-port.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: Honghui Zhang <honghui.zhang@mediatek.com>
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8167_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8167_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_MDP_RDMA MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_MDP_WROT MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_FAKE MTK_M4U_ID(M4U_LARB0_ID, 7)
+
+/* larb1*/
+#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB1_ID, 9)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 10)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB1_ID, 11)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 12)
+
+/* larb2*/
+#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB2_ID, 6)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8173-larb-port.h b/include/dt-bindings/memory/mt8173-larb-port.h
index 9f31ccfeca21..167a7fc51868 100644
--- a/include/dt-bindings/memory/mt8173-larb-port.h
+++ b/include/dt-bindings/memory/mt8173-larb-port.h
@@ -3,10 +3,10 @@
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
-#ifndef __DTS_IOMMU_PORT_MT8173_H
-#define __DTS_IOMMU_PORT_MT8173_H
+#ifndef _DT_BINDINGS_MEMORY_MT8173_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8173_LARB_PORT_H_
-#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+#include <dt-bindings/memory/mtk-memory-port.h>
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
diff --git a/include/dt-bindings/memory/mt8183-larb-port.h b/include/dt-bindings/memory/mt8183-larb-port.h
index 2c579f305162..36abdf0ce5a2 100644
--- a/include/dt-bindings/memory/mt8183-larb-port.h
+++ b/include/dt-bindings/memory/mt8183-larb-port.h
@@ -3,10 +3,10 @@
* Copyright (c) 2018 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
-#ifndef __DTS_IOMMU_PORT_MT8183_H
-#define __DTS_IOMMU_PORT_MT8183_H
+#ifndef _DT_BINDINGS_MEMORY_MT8183_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8183_LARB_PORT_H_
-#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+#include <dt-bindings/memory/mtk-memory-port.h>
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
diff --git a/include/dt-bindings/memory/mt8186-memory-port.h b/include/dt-bindings/memory/mt8186-memory-port.h
new file mode 100644
index 000000000000..2bc6e4433048
--- /dev/null
+++ b/include/dt-bindings/memory/mt8186-memory-port.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ *
+ * Author: Anan Sun <anan.sun@mediatek.com>
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2
+ * vcodec 4G ~ 8G larb4/7
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb13: port 9/10
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb14: port 4/5
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- MMSYS */
+#define IOMMU_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(0, 0)
+#define IOMMU_PORT_L0_REVERSED MTK_M4U_ID(0, 1)
+#define IOMMU_PORT_L0_OVL_RDMA0 MTK_M4U_ID(0, 2)
+#define IOMMU_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 3)
+
+/* LARB 1 -- MMSYS */
+#define IOMMU_PORT_L1_DISP_RDMA1 MTK_M4U_ID(1, 0)
+#define IOMMU_PORT_L1_OVL_2L_RDMA0 MTK_M4U_ID(1, 1)
+#define IOMMU_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 2)
+#define IOMMU_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 3)
+#define IOMMU_PORT_L1_DISP_FAKE1 MTK_M4U_ID(1, 4)
+
+/* LARB 2 -- MMSYS */
+#define IOMMU_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define IOMMU_PORT_L2_MDP_RDMA1 MTK_M4U_ID(2, 1)
+#define IOMMU_PORT_L2_MDP_WROT0 MTK_M4U_ID(2, 2)
+#define IOMMU_PORT_L2_MDP_WROT1 MTK_M4U_ID(2, 3)
+#define IOMMU_PORT_L2_DISP_FAKE0 MTK_M4U_ID(2, 4)
+
+/* LARB 4 -- VDEC */
+#define IOMMU_PORT_L4_HW_VDEC_MC_EXT MTK_M4U_ID(4, 0)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_EXT MTK_M4U_ID(4, 1)
+#define IOMMU_PORT_L4_HW_VDEC_PP_EXT MTK_M4U_ID(4, 2)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(4, 3)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(4, 4)
+#define IOMMU_PORT_L4_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(4, 5)
+#define IOMMU_PORT_L4_HW_VDEC_TILE_EXT MTK_M4U_ID(4, 6)
+#define IOMMU_PORT_L4_HW_VDEC_VLD_EXT MTK_M4U_ID(4, 7)
+#define IOMMU_PORT_L4_HW_VDEC_VLD2_EXT MTK_M4U_ID(4, 8)
+#define IOMMU_PORT_L4_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(4, 9)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(4, 10)
+#define IOMMU_PORT_L4_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(4, 11)
+#define IOMMU_PORT_L4_HW_MINI_MDP_R0_EXT MTK_M4U_ID(4, 12)
+#define IOMMU_PORT_L4_HW_MINI_MDP_W0_EXT MTK_M4U_ID(4, 13)
+
+/* LARB 7 -- VENC */
+#define IOMMU_PORT_L7_VENC_RCPU MTK_M4U_ID(7, 0)
+#define IOMMU_PORT_L7_VENC_REC MTK_M4U_ID(7, 1)
+#define IOMMU_PORT_L7_VENC_BSDMA MTK_M4U_ID(7, 2)
+#define IOMMU_PORT_L7_VENC_SV_COMV MTK_M4U_ID(7, 3)
+#define IOMMU_PORT_L7_VENC_RD_COMV MTK_M4U_ID(7, 4)
+#define IOMMU_PORT_L7_VENC_CUR_LUMA MTK_M4U_ID(7, 5)
+#define IOMMU_PORT_L7_VENC_CUR_CHROMA MTK_M4U_ID(7, 6)
+#define IOMMU_PORT_L7_VENC_REF_LUMA MTK_M4U_ID(7, 7)
+#define IOMMU_PORT_L7_VENC_REF_CHROMA MTK_M4U_ID(7, 8)
+#define IOMMU_PORT_L7_JPGENC_Y_RDMA MTK_M4U_ID(7, 9)
+#define IOMMU_PORT_L7_JPGENC_C_RDMA MTK_M4U_ID(7, 10)
+#define IOMMU_PORT_L7_JPGENC_Q_TABLE MTK_M4U_ID(7, 11)
+#define IOMMU_PORT_L7_JPGENC_BSDMA MTK_M4U_ID(7, 12)
+
+/* LARB 8 -- WPE */
+#define IOMMU_PORT_L8_WPE_RDMA_0 MTK_M4U_ID(8, 0)
+#define IOMMU_PORT_L8_WPE_RDMA_1 MTK_M4U_ID(8, 1)
+#define IOMMU_PORT_L8_WPE_WDMA_0 MTK_M4U_ID(8, 2)
+
+/* LARB 9 -- IMG-1 */
+#define IOMMU_PORT_L9_IMG_IMGI_D1 MTK_M4U_ID(9, 0)
+#define IOMMU_PORT_L9_IMG_IMGBI_D1 MTK_M4U_ID(9, 1)
+#define IOMMU_PORT_L9_IMG_DMGI_D1 MTK_M4U_ID(9, 2)
+#define IOMMU_PORT_L9_IMG_DEPI_D1 MTK_M4U_ID(9, 3)
+#define IOMMU_PORT_L9_IMG_LCE_D1 MTK_M4U_ID(9, 4)
+#define IOMMU_PORT_L9_IMG_SMTI_D1 MTK_M4U_ID(9, 5)
+#define IOMMU_PORT_L9_IMG_SMTO_D2 MTK_M4U_ID(9, 6)
+#define IOMMU_PORT_L9_IMG_SMTO_D1 MTK_M4U_ID(9, 7)
+#define IOMMU_PORT_L9_IMG_CRZO_D1 MTK_M4U_ID(9, 8)
+#define IOMMU_PORT_L9_IMG_IMG3O_D1 MTK_M4U_ID(9, 9)
+#define IOMMU_PORT_L9_IMG_VIPI_D1 MTK_M4U_ID(9, 10)
+#define IOMMU_PORT_L9_IMG_SMTI_D5 MTK_M4U_ID(9, 11)
+#define IOMMU_PORT_L9_IMG_TIMGO_D1 MTK_M4U_ID(9, 12)
+#define IOMMU_PORT_L9_IMG_UFBC_W0 MTK_M4U_ID(9, 13)
+#define IOMMU_PORT_L9_IMG_UFBC_R0 MTK_M4U_ID(9, 14)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA1 MTK_M4U_ID(9, 15)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA0 MTK_M4U_ID(9, 16)
+#define IOMMU_PORT_L9_IMG_WPE_WDMA MTK_M4U_ID(9, 17)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA0 MTK_M4U_ID(9, 18)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA1 MTK_M4U_ID(9, 19)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA2 MTK_M4U_ID(9, 20)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA3 MTK_M4U_ID(9, 21)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA4 MTK_M4U_ID(9, 22)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA5 MTK_M4U_ID(9, 23)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA0 MTK_M4U_ID(9, 24)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA1 MTK_M4U_ID(9, 25)
+#define IOMMU_PORT_L9_IMG_RESERVE6 MTK_M4U_ID(9, 26)
+#define IOMMU_PORT_L9_IMG_RESERVE7 MTK_M4U_ID(9, 27)
+#define IOMMU_PORT_L9_IMG_RESERVE8 MTK_M4U_ID(9, 28)
+
+/* LARB 11 -- IMG-2 */
+#define IOMMU_PORT_L11_IMG_IMGI_D1 MTK_M4U_ID(11, 0)
+#define IOMMU_PORT_L11_IMG_IMGBI_D1 MTK_M4U_ID(11, 1)
+#define IOMMU_PORT_L11_IMG_DMGI_D1 MTK_M4U_ID(11, 2)
+#define IOMMU_PORT_L11_IMG_DEPI_D1 MTK_M4U_ID(11, 3)
+#define IOMMU_PORT_L11_IMG_LCE_D1 MTK_M4U_ID(11, 4)
+#define IOMMU_PORT_L11_IMG_SMTI_D1 MTK_M4U_ID(11, 5)
+#define IOMMU_PORT_L11_IMG_SMTO_D2 MTK_M4U_ID(11, 6)
+#define IOMMU_PORT_L11_IMG_SMTO_D1 MTK_M4U_ID(11, 7)
+#define IOMMU_PORT_L11_IMG_CRZO_D1 MTK_M4U_ID(11, 8)
+#define IOMMU_PORT_L11_IMG_IMG3O_D1 MTK_M4U_ID(11, 9)
+#define IOMMU_PORT_L11_IMG_VIPI_D1 MTK_M4U_ID(11, 10)
+#define IOMMU_PORT_L11_IMG_SMTI_D5 MTK_M4U_ID(11, 11)
+#define IOMMU_PORT_L11_IMG_TIMGO_D1 MTK_M4U_ID(11, 12)
+#define IOMMU_PORT_L11_IMG_UFBC_W0 MTK_M4U_ID(11, 13)
+#define IOMMU_PORT_L11_IMG_UFBC_R0 MTK_M4U_ID(11, 14)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA1 MTK_M4U_ID(11, 15)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA0 MTK_M4U_ID(11, 16)
+#define IOMMU_PORT_L11_IMG_WPE_WDMA MTK_M4U_ID(11, 17)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA0 MTK_M4U_ID(11, 18)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA1 MTK_M4U_ID(11, 19)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA2 MTK_M4U_ID(11, 20)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA3 MTK_M4U_ID(11, 21)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA4 MTK_M4U_ID(11, 22)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA5 MTK_M4U_ID(11, 23)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA0 MTK_M4U_ID(11, 24)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA1 MTK_M4U_ID(11, 25)
+#define IOMMU_PORT_L11_IMG_RESERVE6 MTK_M4U_ID(11, 26)
+#define IOMMU_PORT_L11_IMG_RESERVE7 MTK_M4U_ID(11, 27)
+#define IOMMU_PORT_L11_IMG_RESERVE8 MTK_M4U_ID(11, 28)
+
+/* LARB 13 -- CAM */
+#define IOMMU_PORT_L13_CAM_MRAWI MTK_M4U_ID(13, 0)
+#define IOMMU_PORT_L13_CAM_MRAWO_0 MTK_M4U_ID(13, 1)
+#define IOMMU_PORT_L13_CAM_MRAWO_1 MTK_M4U_ID(13, 2)
+#define IOMMU_PORT_L13_CAM_CAMSV_4 MTK_M4U_ID(13, 6)
+#define IOMMU_PORT_L13_CAM_CAMSV_5 MTK_M4U_ID(13, 7)
+#define IOMMU_PORT_L13_CAM_CAMSV_6 MTK_M4U_ID(13, 8)
+#define IOMMU_PORT_L13_CAM_CCUI MTK_M4U_ID(13, 9)
+#define IOMMU_PORT_L13_CAM_CCUO MTK_M4U_ID(13, 10)
+#define IOMMU_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 11)
+
+/* LARB 14 -- CAM */
+#define IOMMU_PORT_L14_CAM_CCUI MTK_M4U_ID(14, 4)
+#define IOMMU_PORT_L14_CAM_CCUO MTK_M4U_ID(14, 5)
+
+/* LARB 16 -- RAW-A */
+#define IOMMU_PORT_L16_CAM_IMGO_R1_A MTK_M4U_ID(16, 0)
+#define IOMMU_PORT_L16_CAM_RRZO_R1_A MTK_M4U_ID(16, 1)
+#define IOMMU_PORT_L16_CAM_CQI_R1_A MTK_M4U_ID(16, 2)
+#define IOMMU_PORT_L16_CAM_BPCI_R1_A MTK_M4U_ID(16, 3)
+#define IOMMU_PORT_L16_CAM_YUVO_R1_A MTK_M4U_ID(16, 4)
+#define IOMMU_PORT_L16_CAM_UFDI_R2_A MTK_M4U_ID(16, 5)
+#define IOMMU_PORT_L16_CAM_RAWI_R2_A MTK_M4U_ID(16, 6)
+#define IOMMU_PORT_L16_CAM_RAWI_R3_A MTK_M4U_ID(16, 7)
+#define IOMMU_PORT_L16_CAM_AAO_R1_A MTK_M4U_ID(16, 8)
+#define IOMMU_PORT_L16_CAM_AFO_R1_A MTK_M4U_ID(16, 9)
+#define IOMMU_PORT_L16_CAM_FLKO_R1_A MTK_M4U_ID(16, 10)
+#define IOMMU_PORT_L16_CAM_LCESO_R1_A MTK_M4U_ID(16, 11)
+#define IOMMU_PORT_L16_CAM_CRZO_R1_A MTK_M4U_ID(16, 12)
+#define IOMMU_PORT_L16_CAM_LTMSO_R1_A MTK_M4U_ID(16, 13)
+#define IOMMU_PORT_L16_CAM_RSSO_R1_A MTK_M4U_ID(16, 14)
+#define IOMMU_PORT_L16_CAM_AAHO_R1_A MTK_M4U_ID(16, 15)
+#define IOMMU_PORT_L16_CAM_LSCI_R1_A MTK_M4U_ID(16, 16)
+
+/* LARB 17 -- RAW-B */
+#define IOMMU_PORT_L17_CAM_IMGO_R1_B MTK_M4U_ID(17, 0)
+#define IOMMU_PORT_L17_CAM_RRZO_R1_B MTK_M4U_ID(17, 1)
+#define IOMMU_PORT_L17_CAM_CQI_R1_B MTK_M4U_ID(17, 2)
+#define IOMMU_PORT_L17_CAM_BPCI_R1_B MTK_M4U_ID(17, 3)
+#define IOMMU_PORT_L17_CAM_YUVO_R1_B MTK_M4U_ID(17, 4)
+#define IOMMU_PORT_L17_CAM_UFDI_R2_B MTK_M4U_ID(17, 5)
+#define IOMMU_PORT_L17_CAM_RAWI_R2_B MTK_M4U_ID(17, 6)
+#define IOMMU_PORT_L17_CAM_RAWI_R3_B MTK_M4U_ID(17, 7)
+#define IOMMU_PORT_L17_CAM_AAO_R1_B MTK_M4U_ID(17, 8)
+#define IOMMU_PORT_L17_CAM_AFO_R1_B MTK_M4U_ID(17, 9)
+#define IOMMU_PORT_L17_CAM_FLKO_R1_B MTK_M4U_ID(17, 10)
+#define IOMMU_PORT_L17_CAM_LCESO_R1_B MTK_M4U_ID(17, 11)
+#define IOMMU_PORT_L17_CAM_CRZO_R1_B MTK_M4U_ID(17, 12)
+#define IOMMU_PORT_L17_CAM_LTMSO_R1_B MTK_M4U_ID(17, 13)
+#define IOMMU_PORT_L17_CAM_RSSO_R1_B MTK_M4U_ID(17, 14)
+#define IOMMU_PORT_L17_CAM_AAHO_R1_B MTK_M4U_ID(17, 15)
+#define IOMMU_PORT_L17_CAM_LSCI_R1_B MTK_M4U_ID(17, 16)
+
+/* LARB 19 -- IPE */
+#define IOMMU_PORT_L19_IPE_DVS_RDMA MTK_M4U_ID(19, 0)
+#define IOMMU_PORT_L19_IPE_DVS_WDMA MTK_M4U_ID(19, 1)
+#define IOMMU_PORT_L19_IPE_DVP_RDMA MTK_M4U_ID(19, 2)
+#define IOMMU_PORT_L19_IPE_DVP_WDMA MTK_M4U_ID(19, 3)
+
+/* LARB 20 -- IPE */
+#define IOMMU_PORT_L20_IPE_FDVT_RDA MTK_M4U_ID(20, 0)
+#define IOMMU_PORT_L20_IPE_FDVT_RDB MTK_M4U_ID(20, 1)
+#define IOMMU_PORT_L20_IPE_FDVT_WRA MTK_M4U_ID(20, 2)
+#define IOMMU_PORT_L20_IPE_FDVT_WRB MTK_M4U_ID(20, 3)
+#define IOMMU_PORT_L20_IPE_RSC_RDMA0 MTK_M4U_ID(20, 4)
+#define IOMMU_PORT_L20_IPE_RSC_WDMA MTK_M4U_ID(20, 5)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8192-larb-port.h b/include/dt-bindings/memory/mt8192-larb-port.h
new file mode 100644
index 000000000000..23035a52c675
--- /dev/null
+++ b/include/dt-bindings/memory/mt8192-larb-port.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ *
+ * Author: Chao Hao <chao.hao@mediatek.com>
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8192_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8192_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address.
+ *
+ * The address will preassign like this:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1
+ * vcodec 4G ~ 8G larb4/5/7
+ * cam/mdp 8G ~ 12G larb2/9/11/13/14/16/17/18/19/20
+ * CCU0 0x4000_0000 ~ 0x43ff_ffff larb13: port 9/10
+ * CCU1 0x4400_0000 ~ 0x47ff_ffff larb14: port 4/5
+ *
+ * larb3/6/8/10/12/15 is null.
+ */
+
+/* larb0 */
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(0, 0)
+#define M4U_PORT_L0_OVL_RDMA0_HDR MTK_M4U_ID(0, 1)
+#define M4U_PORT_L0_OVL_RDMA0 MTK_M4U_ID(0, 2)
+#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_ID(0, 3)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(0, 4)
+#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 5)
+
+/* larb1 */
+#define M4U_PORT_L1_OVL_2L_RDMA0_HDR MTK_M4U_ID(1, 0)
+#define M4U_PORT_L1_OVL_2L_RDMA2_HDR MTK_M4U_ID(1, 1)
+#define M4U_PORT_L1_OVL_2L_RDMA0 MTK_M4U_ID(1, 2)
+#define M4U_PORT_L1_OVL_2L_RDMA2 MTK_M4U_ID(1, 3)
+#define M4U_PORT_L1_DISP_MDP_RDMA4 MTK_M4U_ID(1, 4)
+#define M4U_PORT_L1_DISP_RDMA4 MTK_M4U_ID(1, 5)
+#define M4U_PORT_L1_DISP_UFBC_WDMA0 MTK_M4U_ID(1, 6)
+#define M4U_PORT_L1_DISP_FAKE1 MTK_M4U_ID(1, 7)
+
+/* larb2 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define M4U_PORT_L2_MDP_RDMA1 MTK_M4U_ID(2, 1)
+#define M4U_PORT_L2_MDP_WROT0 MTK_M4U_ID(2, 2)
+#define M4U_PORT_L2_MDP_WROT1 MTK_M4U_ID(2, 3)
+#define M4U_PORT_L2_MDP_DISP_FAKE0 MTK_M4U_ID(2, 4)
+
+/* larb3: null */
+
+/* larb4 */
+#define M4U_PORT_L4_VDEC_MC_EXT MTK_M4U_ID(4, 0)
+#define M4U_PORT_L4_VDEC_UFO_EXT MTK_M4U_ID(4, 1)
+#define M4U_PORT_L4_VDEC_PP_EXT MTK_M4U_ID(4, 2)
+#define M4U_PORT_L4_VDEC_PRED_RD_EXT MTK_M4U_ID(4, 3)
+#define M4U_PORT_L4_VDEC_PRED_WR_EXT MTK_M4U_ID(4, 4)
+#define M4U_PORT_L4_VDEC_PPWRAP_EXT MTK_M4U_ID(4, 5)
+#define M4U_PORT_L4_VDEC_TILE_EXT MTK_M4U_ID(4, 6)
+#define M4U_PORT_L4_VDEC_VLD_EXT MTK_M4U_ID(4, 7)
+#define M4U_PORT_L4_VDEC_VLD2_EXT MTK_M4U_ID(4, 8)
+#define M4U_PORT_L4_VDEC_AVC_MV_EXT MTK_M4U_ID(4, 9)
+#define M4U_PORT_L4_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(4, 10)
+
+/* larb5 */
+#define M4U_PORT_L5_VDEC_LAT0_VLD_EXT MTK_M4U_ID(5, 0)
+#define M4U_PORT_L5_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(5, 1)
+#define M4U_PORT_L5_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(5, 2)
+#define M4U_PORT_L5_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(5, 3)
+#define M4U_PORT_L5_VDEC_LAT0_TILE_EXT MTK_M4U_ID(5, 4)
+#define M4U_PORT_L5_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(5, 5)
+#define M4U_PORT_L5_VDEC_LAT0_RG_CTRL_DMA_EXT MTK_M4U_ID(5, 6)
+#define M4U_PORT_L5_VDEC_UFO_ENC_EXT MTK_M4U_ID(5, 7)
+
+/* larb6: null */
+
+/* larb7 */
+#define M4U_PORT_L7_VENC_RCPU MTK_M4U_ID(7, 0)
+#define M4U_PORT_L7_VENC_REC MTK_M4U_ID(7, 1)
+#define M4U_PORT_L7_VENC_BSDMA MTK_M4U_ID(7, 2)
+#define M4U_PORT_L7_VENC_SV_COMV MTK_M4U_ID(7, 3)
+#define M4U_PORT_L7_VENC_RD_COMV MTK_M4U_ID(7, 4)
+#define M4U_PORT_L7_VENC_CUR_LUMA MTK_M4U_ID(7, 5)
+#define M4U_PORT_L7_VENC_CUR_CHROMA MTK_M4U_ID(7, 6)
+#define M4U_PORT_L7_VENC_REF_LUMA MTK_M4U_ID(7, 7)
+#define M4U_PORT_L7_VENC_REF_CHROMA MTK_M4U_ID(7, 8)
+#define M4U_PORT_L7_JPGENC_Y_RDMA MTK_M4U_ID(7, 9)
+#define M4U_PORT_L7_JPGENC_Q_RDMA MTK_M4U_ID(7, 10)
+#define M4U_PORT_L7_JPGENC_C_TABLE MTK_M4U_ID(7, 11)
+#define M4U_PORT_L7_JPGENC_BSDMA MTK_M4U_ID(7, 12)
+#define M4U_PORT_L7_VENC_SUB_R_LUMA MTK_M4U_ID(7, 13)
+#define M4U_PORT_L7_VENC_SUB_W_LUMA MTK_M4U_ID(7, 14)
+
+/* larb8: null */
+
+/* larb9 */
+#define M4U_PORT_L9_IMG_IMGI_D1 MTK_M4U_ID(9, 0)
+#define M4U_PORT_L9_IMG_IMGBI_D1 MTK_M4U_ID(9, 1)
+#define M4U_PORT_L9_IMG_DMGI_D1 MTK_M4U_ID(9, 2)
+#define M4U_PORT_L9_IMG_DEPI_D1 MTK_M4U_ID(9, 3)
+#define M4U_PORT_L9_IMG_ICE_D1 MTK_M4U_ID(9, 4)
+#define M4U_PORT_L9_IMG_SMTI_D1 MTK_M4U_ID(9, 5)
+#define M4U_PORT_L9_IMG_SMTO_D2 MTK_M4U_ID(9, 6)
+#define M4U_PORT_L9_IMG_SMTO_D1 MTK_M4U_ID(9, 7)
+#define M4U_PORT_L9_IMG_CRZO_D1 MTK_M4U_ID(9, 8)
+#define M4U_PORT_L9_IMG_IMG3O_D1 MTK_M4U_ID(9, 9)
+#define M4U_PORT_L9_IMG_VIPI_D1 MTK_M4U_ID(9, 10)
+#define M4U_PORT_L9_IMG_SMTI_D5 MTK_M4U_ID(9, 11)
+#define M4U_PORT_L9_IMG_TIMGO_D1 MTK_M4U_ID(9, 12)
+#define M4U_PORT_L9_IMG_UFBC_W0 MTK_M4U_ID(9, 13)
+#define M4U_PORT_L9_IMG_UFBC_R0 MTK_M4U_ID(9, 14)
+
+/* larb10: null */
+
+/* larb11 */
+#define M4U_PORT_L11_IMG_IMGI_D1 MTK_M4U_ID(11, 0)
+#define M4U_PORT_L11_IMG_IMGBI_D1 MTK_M4U_ID(11, 1)
+#define M4U_PORT_L11_IMG_DMGI_D1 MTK_M4U_ID(11, 2)
+#define M4U_PORT_L11_IMG_DEPI_D1 MTK_M4U_ID(11, 3)
+#define M4U_PORT_L11_IMG_ICE_D1 MTK_M4U_ID(11, 4)
+#define M4U_PORT_L11_IMG_SMTI_D1 MTK_M4U_ID(11, 5)
+#define M4U_PORT_L11_IMG_SMTO_D2 MTK_M4U_ID(11, 6)
+#define M4U_PORT_L11_IMG_SMTO_D1 MTK_M4U_ID(11, 7)
+#define M4U_PORT_L11_IMG_CRZO_D1 MTK_M4U_ID(11, 8)
+#define M4U_PORT_L11_IMG_IMG3O_D1 MTK_M4U_ID(11, 9)
+#define M4U_PORT_L11_IMG_VIPI_D1 MTK_M4U_ID(11, 10)
+#define M4U_PORT_L11_IMG_SMTI_D5 MTK_M4U_ID(11, 11)
+#define M4U_PORT_L11_IMG_TIMGO_D1 MTK_M4U_ID(11, 12)
+#define M4U_PORT_L11_IMG_UFBC_W0 MTK_M4U_ID(11, 13)
+#define M4U_PORT_L11_IMG_UFBC_R0 MTK_M4U_ID(11, 14)
+#define M4U_PORT_L11_IMG_WPE_RDMA1 MTK_M4U_ID(11, 15)
+#define M4U_PORT_L11_IMG_WPE_RDMA0 MTK_M4U_ID(11, 16)
+#define M4U_PORT_L11_IMG_WPE_WDMA MTK_M4U_ID(11, 17)
+#define M4U_PORT_L11_IMG_MFB_RDMA0 MTK_M4U_ID(11, 18)
+#define M4U_PORT_L11_IMG_MFB_RDMA1 MTK_M4U_ID(11, 19)
+#define M4U_PORT_L11_IMG_MFB_RDMA2 MTK_M4U_ID(11, 20)
+#define M4U_PORT_L11_IMG_MFB_RDMA3 MTK_M4U_ID(11, 21)
+#define M4U_PORT_L11_IMG_MFB_RDMA4 MTK_M4U_ID(11, 22)
+#define M4U_PORT_L11_IMG_MFB_RDMA5 MTK_M4U_ID(11, 23)
+#define M4U_PORT_L11_IMG_MFB_WDMA0 MTK_M4U_ID(11, 24)
+#define M4U_PORT_L11_IMG_MFB_WDMA1 MTK_M4U_ID(11, 25)
+
+/* larb12: null */
+
+/* larb13 */
+#define M4U_PORT_L13_CAM_MRAWI MTK_M4U_ID(13, 0)
+#define M4U_PORT_L13_CAM_MRAWO0 MTK_M4U_ID(13, 1)
+#define M4U_PORT_L13_CAM_MRAWO1 MTK_M4U_ID(13, 2)
+#define M4U_PORT_L13_CAM_CAMSV1 MTK_M4U_ID(13, 3)
+#define M4U_PORT_L13_CAM_CAMSV2 MTK_M4U_ID(13, 4)
+#define M4U_PORT_L13_CAM_CAMSV3 MTK_M4U_ID(13, 5)
+#define M4U_PORT_L13_CAM_CAMSV4 MTK_M4U_ID(13, 6)
+#define M4U_PORT_L13_CAM_CAMSV5 MTK_M4U_ID(13, 7)
+#define M4U_PORT_L13_CAM_CAMSV6 MTK_M4U_ID(13, 8)
+#define M4U_PORT_L13_CAM_CCUI MTK_M4U_ID(13, 9)
+#define M4U_PORT_L13_CAM_CCUO MTK_M4U_ID(13, 10)
+#define M4U_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 11)
+
+/* larb14 */
+#define M4U_PORT_L14_CAM_RESERVE1 MTK_M4U_ID(14, 0)
+#define M4U_PORT_L14_CAM_RESERVE2 MTK_M4U_ID(14, 1)
+#define M4U_PORT_L14_CAM_RESERVE3 MTK_M4U_ID(14, 2)
+#define M4U_PORT_L14_CAM_CAMSV0 MTK_M4U_ID(14, 3)
+#define M4U_PORT_L14_CAM_CCUI MTK_M4U_ID(14, 4)
+#define M4U_PORT_L14_CAM_CCUO MTK_M4U_ID(14, 5)
+
+/* larb15: null */
+
+/* larb16 */
+#define M4U_PORT_L16_CAM_IMGO_R1_A MTK_M4U_ID(16, 0)
+#define M4U_PORT_L16_CAM_RRZO_R1_A MTK_M4U_ID(16, 1)
+#define M4U_PORT_L16_CAM_CQI_R1_A MTK_M4U_ID(16, 2)
+#define M4U_PORT_L16_CAM_BPCI_R1_A MTK_M4U_ID(16, 3)
+#define M4U_PORT_L16_CAM_YUVO_R1_A MTK_M4U_ID(16, 4)
+#define M4U_PORT_L16_CAM_UFDI_R2_A MTK_M4U_ID(16, 5)
+#define M4U_PORT_L16_CAM_RAWI_R2_A MTK_M4U_ID(16, 6)
+#define M4U_PORT_L16_CAM_RAWI_R3_A MTK_M4U_ID(16, 7)
+#define M4U_PORT_L16_CAM_AAO_R1_A MTK_M4U_ID(16, 8)
+#define M4U_PORT_L16_CAM_AFO_R1_A MTK_M4U_ID(16, 9)
+#define M4U_PORT_L16_CAM_FLKO_R1_A MTK_M4U_ID(16, 10)
+#define M4U_PORT_L16_CAM_LCESO_R1_A MTK_M4U_ID(16, 11)
+#define M4U_PORT_L16_CAM_CRZO_R1_A MTK_M4U_ID(16, 12)
+#define M4U_PORT_L16_CAM_LTMSO_R1_A MTK_M4U_ID(16, 13)
+#define M4U_PORT_L16_CAM_RSSO_R1_A MTK_M4U_ID(16, 14)
+#define M4U_PORT_L16_CAM_AAHO_R1_A MTK_M4U_ID(16, 15)
+#define M4U_PORT_L16_CAM_LSCI_R1_A MTK_M4U_ID(16, 16)
+
+/* larb17 */
+#define M4U_PORT_L17_CAM_IMGO_R1_B MTK_M4U_ID(17, 0)
+#define M4U_PORT_L17_CAM_RRZO_R1_B MTK_M4U_ID(17, 1)
+#define M4U_PORT_L17_CAM_CQI_R1_B MTK_M4U_ID(17, 2)
+#define M4U_PORT_L17_CAM_BPCI_R1_B MTK_M4U_ID(17, 3)
+#define M4U_PORT_L17_CAM_YUVO_R1_B MTK_M4U_ID(17, 4)
+#define M4U_PORT_L17_CAM_UFDI_R2_B MTK_M4U_ID(17, 5)
+#define M4U_PORT_L17_CAM_RAWI_R2_B MTK_M4U_ID(17, 6)
+#define M4U_PORT_L17_CAM_RAWI_R3_B MTK_M4U_ID(17, 7)
+#define M4U_PORT_L17_CAM_AAO_R1_B MTK_M4U_ID(17, 8)
+#define M4U_PORT_L17_CAM_AFO_R1_B MTK_M4U_ID(17, 9)
+#define M4U_PORT_L17_CAM_FLKO_R1_B MTK_M4U_ID(17, 10)
+#define M4U_PORT_L17_CAM_LCESO_R1_B MTK_M4U_ID(17, 11)
+#define M4U_PORT_L17_CAM_CRZO_R1_B MTK_M4U_ID(17, 12)
+#define M4U_PORT_L17_CAM_LTMSO_R1_B MTK_M4U_ID(17, 13)
+#define M4U_PORT_L17_CAM_RSSO_R1_B MTK_M4U_ID(17, 14)
+#define M4U_PORT_L17_CAM_AAHO_R1_B MTK_M4U_ID(17, 15)
+#define M4U_PORT_L17_CAM_LSCI_R1_B MTK_M4U_ID(17, 16)
+
+/* larb18 */
+#define M4U_PORT_L18_CAM_IMGO_R1_C MTK_M4U_ID(18, 0)
+#define M4U_PORT_L18_CAM_RRZO_R1_C MTK_M4U_ID(18, 1)
+#define M4U_PORT_L18_CAM_CQI_R1_C MTK_M4U_ID(18, 2)
+#define M4U_PORT_L18_CAM_BPCI_R1_C MTK_M4U_ID(18, 3)
+#define M4U_PORT_L18_CAM_YUVO_R1_C MTK_M4U_ID(18, 4)
+#define M4U_PORT_L18_CAM_UFDI_R2_C MTK_M4U_ID(18, 5)
+#define M4U_PORT_L18_CAM_RAWI_R2_C MTK_M4U_ID(18, 6)
+#define M4U_PORT_L18_CAM_RAWI_R3_C MTK_M4U_ID(18, 7)
+#define M4U_PORT_L18_CAM_AAO_R1_C MTK_M4U_ID(18, 8)
+#define M4U_PORT_L18_CAM_AFO_R1_C MTK_M4U_ID(18, 9)
+#define M4U_PORT_L18_CAM_FLKO_R1_C MTK_M4U_ID(18, 10)
+#define M4U_PORT_L18_CAM_LCESO_R1_C MTK_M4U_ID(18, 11)
+#define M4U_PORT_L18_CAM_CRZO_R1_C MTK_M4U_ID(18, 12)
+#define M4U_PORT_L18_CAM_LTMSO_R1_C MTK_M4U_ID(18, 13)
+#define M4U_PORT_L18_CAM_RSSO_R1_C MTK_M4U_ID(18, 14)
+#define M4U_PORT_L18_CAM_AAHO_R1_C MTK_M4U_ID(18, 15)
+#define M4U_PORT_L18_CAM_LSCI_R1_C MTK_M4U_ID(18, 16)
+
+/* larb19 */
+#define M4U_PORT_L19_IPE_DVS_RDMA MTK_M4U_ID(19, 0)
+#define M4U_PORT_L19_IPE_DVS_WDMA MTK_M4U_ID(19, 1)
+#define M4U_PORT_L19_IPE_DVP_RDMA MTK_M4U_ID(19, 2)
+#define M4U_PORT_L19_IPE_DVP_WDMA MTK_M4U_ID(19, 3)
+
+/* larb20 */
+#define M4U_PORT_L20_IPE_FDVT_RDA MTK_M4U_ID(20, 0)
+#define M4U_PORT_L20_IPE_FDVT_RDB MTK_M4U_ID(20, 1)
+#define M4U_PORT_L20_IPE_FDVT_WRA MTK_M4U_ID(20, 2)
+#define M4U_PORT_L20_IPE_FDVT_WRB MTK_M4U_ID(20, 3)
+#define M4U_PORT_L20_IPE_RSC_RDMA0 MTK_M4U_ID(20, 4)
+#define M4U_PORT_L20_IPE_RSC_WDMA MTK_M4U_ID(20, 5)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8195-memory-port.h b/include/dt-bindings/memory/mt8195-memory-port.h
new file mode 100644
index 000000000000..70ba9f498eeb
--- /dev/null
+++ b/include/dt-bindings/memory/mt8195-memory-port.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19/20/21/22/23/24
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb18: port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb18: port 2/3
+ *
+ * This SoC have two IOMMU HWs, this is the detailed connected information:
+ * iommu-vdo: larb0/2/5/7/9/10/11/13/17/19/21/24/25/28
+ * iommu-vpp: larb1/3/4/6/8/12/14/16/18/20/22/23/26/27
+ */
+
+/* MM IOMMU ports */
+/* larb0 */
+#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_ID(0, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(0, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(0, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(0, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(0, 4)
+#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 5)
+
+/* larb1 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 0)
+#define M4U_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 1)
+#define M4U_PORT_L1_DISP_OVL0_RDMA0 MTK_M4U_ID(1, 2)
+#define M4U_PORT_L1_DISP_OVL0_RDMA1 MTK_M4U_ID(1, 3)
+#define M4U_PORT_L1_DISP_OVL0_HDR MTK_M4U_ID(1, 4)
+#define M4U_PORT_L1_DISP_FAKE0 MTK_M4U_ID(1, 5)
+
+/* larb2 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(2, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(2, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(2, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(2, 4)
+
+/* larb3 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(3, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(3, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(3, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(3, 3)
+#define M4U_PORT_L3_HDR_DS MTK_M4U_ID(3, 4)
+#define M4U_PORT_L3_HDR_ADL MTK_M4U_ID(3, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(3, 6)
+
+/* larb4 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(4, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(4, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(4, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(4, 3)
+#define M4U_PORT_L4_FAKE MTK_M4U_ID(4, 4)
+
+/* larb5 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(5, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(5, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(5, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(5, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(5, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(5, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(5, 6)
+#define M4U_PORT_L5_FAKE MTK_M4U_ID(5, 7)
+
+/* larb6 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(6, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(6, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(6, 2)
+#define M4U_PORT_L6_FAKE MTK_M4U_ID(6, 3)
+
+/* larb7 */
+#define M4U_PORT_L7_IMG_WPE_RDMA0 MTK_M4U_ID(7, 0)
+#define M4U_PORT_L7_IMG_WPE_RDMA1 MTK_M4U_ID(7, 1)
+#define M4U_PORT_L7_IMG_WPE_WDMA0 MTK_M4U_ID(7, 2)
+
+/* larb8 */
+#define M4U_PORT_L8_IMG_WPE_RDMA0 MTK_M4U_ID(8, 0)
+#define M4U_PORT_L8_IMG_WPE_RDMA1 MTK_M4U_ID(8, 1)
+#define M4U_PORT_L8_IMG_WPE_WDMA0 MTK_M4U_ID(8, 2)
+
+/* larb9 */
+#define M4U_PORT_L9_IMG_IMGI_T1_A MTK_M4U_ID(9, 0)
+#define M4U_PORT_L9_IMG_IMGBI_T1_A MTK_M4U_ID(9, 1)
+#define M4U_PORT_L9_IMG_IMGCI_T1_A MTK_M4U_ID(9, 2)
+#define M4U_PORT_L9_IMG_SMTI_T1_A MTK_M4U_ID(9, 3)
+#define M4U_PORT_L9_IMG_TNCSTI_T1_A MTK_M4U_ID(9, 4)
+#define M4U_PORT_L9_IMG_TNCSTI_T4_A MTK_M4U_ID(9, 5)
+#define M4U_PORT_L9_IMG_YUVO_T1_A MTK_M4U_ID(9, 6)
+#define M4U_PORT_L9_IMG_TIMGO_T1_A MTK_M4U_ID(9, 7)
+#define M4U_PORT_L9_IMG_YUVO_T2_A MTK_M4U_ID(9, 8)
+#define M4U_PORT_L9_IMG_IMGI_T1_B MTK_M4U_ID(9, 9)
+#define M4U_PORT_L9_IMG_IMGBI_T1_B MTK_M4U_ID(9, 10)
+#define M4U_PORT_L9_IMG_IMGCI_T1_B MTK_M4U_ID(9, 11)
+#define M4U_PORT_L9_IMG_YUVO_T5_A MTK_M4U_ID(9, 12)
+#define M4U_PORT_L9_IMG_SMTI_T1_B MTK_M4U_ID(9, 13)
+#define M4U_PORT_L9_IMG_TNCSO_T1_A MTK_M4U_ID(9, 14)
+#define M4U_PORT_L9_IMG_SMTO_T1_A MTK_M4U_ID(9, 15)
+#define M4U_PORT_L9_IMG_TNCSTO_T1_A MTK_M4U_ID(9, 16)
+#define M4U_PORT_L9_IMG_YUVO_T2_B MTK_M4U_ID(9, 17)
+#define M4U_PORT_L9_IMG_YUVO_T5_B MTK_M4U_ID(9, 18)
+#define M4U_PORT_L9_IMG_SMTO_T1_B MTK_M4U_ID(9, 19)
+
+/* larb10 */
+#define M4U_PORT_L10_IMG_IMGI_D1_A MTK_M4U_ID(10, 0)
+#define M4U_PORT_L10_IMG_IMGCI_D1_A MTK_M4U_ID(10, 1)
+#define M4U_PORT_L10_IMG_DEPI_D1_A MTK_M4U_ID(10, 2)
+#define M4U_PORT_L10_IMG_DMGI_D1_A MTK_M4U_ID(10, 3)
+#define M4U_PORT_L10_IMG_VIPI_D1_A MTK_M4U_ID(10, 4)
+#define M4U_PORT_L10_IMG_TNRWI_D1_A MTK_M4U_ID(10, 5)
+#define M4U_PORT_L10_IMG_RECI_D1_A MTK_M4U_ID(10, 6)
+#define M4U_PORT_L10_IMG_SMTI_D1_A MTK_M4U_ID(10, 7)
+#define M4U_PORT_L10_IMG_SMTI_D6_A MTK_M4U_ID(10, 8)
+#define M4U_PORT_L10_IMG_PIMGI_P1_A MTK_M4U_ID(10, 9)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_A MTK_M4U_ID(10, 10)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_A MTK_M4U_ID(10, 11)
+#define M4U_PORT_L10_IMG_PIMGI_P1_B MTK_M4U_ID(10, 12)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_B MTK_M4U_ID(10, 13)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_B MTK_M4U_ID(10, 14)
+#define M4U_PORT_L10_IMG_IMG3O_D1_A MTK_M4U_ID(10, 15)
+#define M4U_PORT_L10_IMG_IMG4O_D1_A MTK_M4U_ID(10, 16)
+#define M4U_PORT_L10_IMG_IMG3CO_D1_A MTK_M4U_ID(10, 17)
+#define M4U_PORT_L10_IMG_FEO_D1_A MTK_M4U_ID(10, 18)
+#define M4U_PORT_L10_IMG_IMG2O_D1_A MTK_M4U_ID(10, 19)
+#define M4U_PORT_L10_IMG_TNRWO_D1_A MTK_M4U_ID(10, 20)
+#define M4U_PORT_L10_IMG_SMTO_D1_A MTK_M4U_ID(10, 21)
+#define M4U_PORT_L10_IMG_WROT_P1_A MTK_M4U_ID(10, 22)
+#define M4U_PORT_L10_IMG_WROT_P1_B MTK_M4U_ID(10, 23)
+
+/* larb11 */
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA0_A MTK_M4U_ID(11, 0)
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA1_A MTK_M4U_ID(11, 1)
+#define M4U_PORT_L11_IMG_WPE_EIS_WDMA0_A MTK_M4U_ID(11, 2)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA0_A MTK_M4U_ID(11, 3)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA1_A MTK_M4U_ID(11, 4)
+#define M4U_PORT_L11_IMG_WPE_TNR_WDMA0_A MTK_M4U_ID(11, 5)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ0_A MTK_M4U_ID(11, 6)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ1_A MTK_M4U_ID(11, 7)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ0_A MTK_M4U_ID(11, 8)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ1_A MTK_M4U_ID(11, 9)
+
+/* larb12 */
+#define M4U_PORT_L12_IMG_FDVT_RDA MTK_M4U_ID(12, 0)
+#define M4U_PORT_L12_IMG_FDVT_RDB MTK_M4U_ID(12, 1)
+#define M4U_PORT_L12_IMG_FDVT_WRA MTK_M4U_ID(12, 2)
+#define M4U_PORT_L12_IMG_FDVT_WRB MTK_M4U_ID(12, 3)
+#define M4U_PORT_L12_IMG_ME_RDMA MTK_M4U_ID(12, 4)
+#define M4U_PORT_L12_IMG_ME_WDMA MTK_M4U_ID(12, 5)
+#define M4U_PORT_L12_IMG_DVS_RDMA MTK_M4U_ID(12, 6)
+#define M4U_PORT_L12_IMG_DVS_WDMA MTK_M4U_ID(12, 7)
+#define M4U_PORT_L12_IMG_DVP_RDMA MTK_M4U_ID(12, 8)
+#define M4U_PORT_L12_IMG_DVP_WDMA MTK_M4U_ID(12, 9)
+
+/* larb13 */
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E1 MTK_M4U_ID(13, 0)
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E2 MTK_M4U_ID(13, 1)
+#define M4U_PORT_L13_CAM_GCAMSV_A_IMGO_0 MTK_M4U_ID(13, 2)
+#define M4U_PORT_L13_CAM_SCAMSV_A_IMGO_0 MTK_M4U_ID(13, 3)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(13, 4)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(13, 5)
+#define M4U_PORT_L13_CAM_GCAMSV_A_UFEO_0 MTK_M4U_ID(13, 6)
+#define M4U_PORT_L13_CAM_GCAMSV_B_UFEO_0 MTK_M4U_ID(13, 7)
+#define M4U_PORT_L13_CAM_PDAI_0 MTK_M4U_ID(13, 8)
+#define M4U_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 9)
+
+/* larb14 */
+#define M4U_PORT_L14_CAM_GCAMSV_A_IMGO_1 MTK_M4U_ID(14, 0)
+#define M4U_PORT_L14_CAM_SCAMSV_A_IMGO_1 MTK_M4U_ID(14, 1)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(14, 2)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(14, 3)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_0 MTK_M4U_ID(14, 4)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_1 MTK_M4U_ID(14, 5)
+#define M4U_PORT_L14_CAM_IPUI MTK_M4U_ID(14, 6)
+#define M4U_PORT_L14_CAM_IPU2I MTK_M4U_ID(14, 7)
+#define M4U_PORT_L14_CAM_IPUO MTK_M4U_ID(14, 8)
+#define M4U_PORT_L14_CAM_IPU2O MTK_M4U_ID(14, 9)
+#define M4U_PORT_L14_CAM_IPU3O MTK_M4U_ID(14, 10)
+#define M4U_PORT_L14_CAM_GCAMSV_A_UFEO_1 MTK_M4U_ID(14, 11)
+#define M4U_PORT_L14_CAM_GCAMSV_B_UFEO_1 MTK_M4U_ID(14, 12)
+#define M4U_PORT_L14_CAM_PDAI_1 MTK_M4U_ID(14, 13)
+#define M4U_PORT_L14_CAM_PDAO MTK_M4U_ID(14, 14)
+
+/* larb15: null */
+
+/* larb16 */
+#define M4U_PORT_L16_CAM_IMGO_R1 MTK_M4U_ID(16, 0)
+#define M4U_PORT_L16_CAM_CQI_R1 MTK_M4U_ID(16, 1)
+#define M4U_PORT_L16_CAM_CQI_R2 MTK_M4U_ID(16, 2)
+#define M4U_PORT_L16_CAM_BPCI_R1 MTK_M4U_ID(16, 3)
+#define M4U_PORT_L16_CAM_LSCI_R1 MTK_M4U_ID(16, 4)
+#define M4U_PORT_L16_CAM_RAWI_R2 MTK_M4U_ID(16, 5)
+#define M4U_PORT_L16_CAM_RAWI_R3 MTK_M4U_ID(16, 6)
+#define M4U_PORT_L16_CAM_UFDI_R2 MTK_M4U_ID(16, 7)
+#define M4U_PORT_L16_CAM_UFDI_R3 MTK_M4U_ID(16, 8)
+#define M4U_PORT_L16_CAM_RAWI_R4 MTK_M4U_ID(16, 9)
+#define M4U_PORT_L16_CAM_RAWI_R5 MTK_M4U_ID(16, 10)
+#define M4U_PORT_L16_CAM_AAI_R1 MTK_M4U_ID(16, 11)
+#define M4U_PORT_L16_CAM_FHO_R1 MTK_M4U_ID(16, 12)
+#define M4U_PORT_L16_CAM_AAO_R1 MTK_M4U_ID(16, 13)
+#define M4U_PORT_L16_CAM_TSFSO_R1 MTK_M4U_ID(16, 14)
+#define M4U_PORT_L16_CAM_FLKO_R1 MTK_M4U_ID(16, 15)
+
+/* larb17 */
+#define M4U_PORT_L17_CAM_YUVO_R1 MTK_M4U_ID(17, 0)
+#define M4U_PORT_L17_CAM_YUVO_R3 MTK_M4U_ID(17, 1)
+#define M4U_PORT_L17_CAM_YUVCO_R1 MTK_M4U_ID(17, 2)
+#define M4U_PORT_L17_CAM_YUVO_R2 MTK_M4U_ID(17, 3)
+#define M4U_PORT_L17_CAM_RZH1N2TO_R1 MTK_M4U_ID(17, 4)
+#define M4U_PORT_L17_CAM_DRZS4NO_R1 MTK_M4U_ID(17, 5)
+#define M4U_PORT_L17_CAM_TNCSO_R1 MTK_M4U_ID(17, 6)
+
+/* larb18 */
+#define M4U_PORT_L18_CAM_CCUI MTK_M4U_ID(18, 0)
+#define M4U_PORT_L18_CAM_CCUO MTK_M4U_ID(18, 1)
+#define M4U_PORT_L18_CAM_CCUI2 MTK_M4U_ID(18, 2)
+#define M4U_PORT_L18_CAM_CCUO2 MTK_M4U_ID(18, 3)
+
+/* larb19 */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(19, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(19, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(19, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(19, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(19, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(19, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(19, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(19, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(19, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(19, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(19, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(19, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(19, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA0 MTK_M4U_ID(19, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA0 MTK_M4U_ID(19, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(19, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(19, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(19, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA1 MTK_M4U_ID(19, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA1 MTK_M4U_ID(19, 19)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(19, 20)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(19, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(19, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(19, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(19, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(19, 25)
+#define M4U_PORT_L19_VENC_SUB_R_CHROMA MTK_M4U_ID(19, 26)
+
+/* larb20 */
+#define M4U_PORT_L20_VENC_RCPU MTK_M4U_ID(20, 0)
+#define M4U_PORT_L20_VENC_REC MTK_M4U_ID(20, 1)
+#define M4U_PORT_L20_VENC_BSDMA MTK_M4U_ID(20, 2)
+#define M4U_PORT_L20_VENC_SV_COMV MTK_M4U_ID(20, 3)
+#define M4U_PORT_L20_VENC_RD_COMV MTK_M4U_ID(20, 4)
+#define M4U_PORT_L20_VENC_NBM_RDMA MTK_M4U_ID(20, 5)
+#define M4U_PORT_L20_VENC_NBM_RDMA_LITE MTK_M4U_ID(20, 6)
+#define M4U_PORT_L20_JPGENC_Y_RDMA MTK_M4U_ID(20, 7)
+#define M4U_PORT_L20_JPGENC_C_RDMA MTK_M4U_ID(20, 8)
+#define M4U_PORT_L20_JPGENC_Q_TABLE MTK_M4U_ID(20, 9)
+#define M4U_PORT_L20_VENC_SUB_W_LUMA MTK_M4U_ID(20, 10)
+#define M4U_PORT_L20_VENC_FCS_NBM_RDMA MTK_M4U_ID(20, 11)
+#define M4U_PORT_L20_JPGENC_BSDMA MTK_M4U_ID(20, 12)
+#define M4U_PORT_L20_JPGDEC_WDMA0 MTK_M4U_ID(20, 13)
+#define M4U_PORT_L20_JPGDEC_BSDMA0 MTK_M4U_ID(20, 14)
+#define M4U_PORT_L20_VENC_NBM_WDMA MTK_M4U_ID(20, 15)
+#define M4U_PORT_L20_VENC_NBM_WDMA_LITE MTK_M4U_ID(20, 16)
+#define M4U_PORT_L20_VENC_FCS_NBM_WDMA MTK_M4U_ID(20, 17)
+#define M4U_PORT_L20_JPGDEC_WDMA1 MTK_M4U_ID(20, 18)
+#define M4U_PORT_L20_JPGDEC_BSDMA1 MTK_M4U_ID(20, 19)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(20, 20)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(20, 21)
+#define M4U_PORT_L20_VENC_CUR_LUMA MTK_M4U_ID(20, 22)
+#define M4U_PORT_L20_VENC_CUR_CHROMA MTK_M4U_ID(20, 23)
+#define M4U_PORT_L20_VENC_REF_LUMA MTK_M4U_ID(20, 24)
+#define M4U_PORT_L20_VENC_REF_CHROMA MTK_M4U_ID(20, 25)
+#define M4U_PORT_L20_VENC_SUB_R_CHROMA MTK_M4U_ID(20, 26)
+
+/* larb21 */
+#define M4U_PORT_L21_VDEC_MC_EXT MTK_M4U_ID(21, 0)
+#define M4U_PORT_L21_VDEC_UFO_EXT MTK_M4U_ID(21, 1)
+#define M4U_PORT_L21_VDEC_PP_EXT MTK_M4U_ID(21, 2)
+#define M4U_PORT_L21_VDEC_PRED_RD_EXT MTK_M4U_ID(21, 3)
+#define M4U_PORT_L21_VDEC_PRED_WR_EXT MTK_M4U_ID(21, 4)
+#define M4U_PORT_L21_VDEC_PPWRAP_EXT MTK_M4U_ID(21, 5)
+#define M4U_PORT_L21_VDEC_TILE_EXT MTK_M4U_ID(21, 6)
+#define M4U_PORT_L21_VDEC_VLD_EXT MTK_M4U_ID(21, 7)
+#define M4U_PORT_L21_VDEC_VLD2_EXT MTK_M4U_ID(21, 8)
+#define M4U_PORT_L21_VDEC_AVC_MV_EXT MTK_M4U_ID(21, 9)
+
+/* larb22 */
+#define M4U_PORT_L22_VDEC_MC_EXT MTK_M4U_ID(22, 0)
+#define M4U_PORT_L22_VDEC_UFO_EXT MTK_M4U_ID(22, 1)
+#define M4U_PORT_L22_VDEC_PP_EXT MTK_M4U_ID(22, 2)
+#define M4U_PORT_L22_VDEC_PRED_RD_EXT MTK_M4U_ID(22, 3)
+#define M4U_PORT_L22_VDEC_PRED_WR_EXT MTK_M4U_ID(22, 4)
+#define M4U_PORT_L22_VDEC_PPWRAP_EXT MTK_M4U_ID(22, 5)
+#define M4U_PORT_L22_VDEC_TILE_EXT MTK_M4U_ID(22, 6)
+#define M4U_PORT_L22_VDEC_VLD_EXT MTK_M4U_ID(22, 7)
+#define M4U_PORT_L22_VDEC_VLD2_EXT MTK_M4U_ID(22, 8)
+#define M4U_PORT_L22_VDEC_AVC_MV_EXT MTK_M4U_ID(22, 9)
+
+/* larb23 */
+#define M4U_PORT_L23_VDEC_UFO_ENC_EXT MTK_M4U_ID(23, 0)
+#define M4U_PORT_L23_VDEC_RDMA_EXT MTK_M4U_ID(23, 1)
+
+/* larb24 */
+#define M4U_PORT_L24_VDEC_LAT0_VLD_EXT MTK_M4U_ID(24, 0)
+#define M4U_PORT_L24_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(24, 1)
+#define M4U_PORT_L24_VDEC_LAT0_AVC_MC_EXT MTK_M4U_ID(24, 2)
+#define M4U_PORT_L24_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(24, 3)
+#define M4U_PORT_L24_VDEC_LAT0_TILE_EXT MTK_M4U_ID(24, 4)
+#define M4U_PORT_L24_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(24, 5)
+#define M4U_PORT_L24_VDEC_LAT1_VLD_EXT MTK_M4U_ID(24, 6)
+#define M4U_PORT_L24_VDEC_LAT1_VLD2_EXT MTK_M4U_ID(24, 7)
+#define M4U_PORT_L24_VDEC_LAT1_AVC_MC_EXT MTK_M4U_ID(24, 8)
+#define M4U_PORT_L24_VDEC_LAT1_PRED_RD_EXT MTK_M4U_ID(24, 9)
+#define M4U_PORT_L24_VDEC_LAT1_TILE_EXT MTK_M4U_ID(24, 10)
+#define M4U_PORT_L24_VDEC_LAT1_WDMA_EXT MTK_M4U_ID(24, 11)
+
+/* larb25 */
+#define M4U_PORT_L25_CAM_MRAW0_LSCI_M1 MTK_M4U_ID(25, 0)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M1 MTK_M4U_ID(25, 1)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M2 MTK_M4U_ID(25, 2)
+#define M4U_PORT_L25_CAM_MRAW0_IMGO_M1 MTK_M4U_ID(25, 3)
+#define M4U_PORT_L25_CAM_MRAW0_IMGBO_M1 MTK_M4U_ID(25, 4)
+#define M4U_PORT_L25_CAM_MRAW2_LSCI_M1 MTK_M4U_ID(25, 5)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M1 MTK_M4U_ID(25, 6)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M2 MTK_M4U_ID(25, 7)
+#define M4U_PORT_L25_CAM_MRAW2_IMGO_M1 MTK_M4U_ID(25, 8)
+#define M4U_PORT_L25_CAM_MRAW2_IMGBO_M1 MTK_M4U_ID(25, 9)
+#define M4U_PORT_L25_CAM_MRAW0_AFO_M1 MTK_M4U_ID(25, 10)
+#define M4U_PORT_L25_CAM_MRAW2_AFO_M1 MTK_M4U_ID(25, 11)
+
+/* larb26 */
+#define M4U_PORT_L26_CAM_MRAW1_LSCI_M1 MTK_M4U_ID(26, 0)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M1 MTK_M4U_ID(26, 1)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M2 MTK_M4U_ID(26, 2)
+#define M4U_PORT_L26_CAM_MRAW1_IMGO_M1 MTK_M4U_ID(26, 3)
+#define M4U_PORT_L26_CAM_MRAW1_IMGBO_M1 MTK_M4U_ID(26, 4)
+#define M4U_PORT_L26_CAM_MRAW3_LSCI_M1 MTK_M4U_ID(26, 5)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M1 MTK_M4U_ID(26, 6)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M2 MTK_M4U_ID(26, 7)
+#define M4U_PORT_L26_CAM_MRAW3_IMGO_M1 MTK_M4U_ID(26, 8)
+#define M4U_PORT_L26_CAM_MRAW3_IMGBO_M1 MTK_M4U_ID(26, 9)
+#define M4U_PORT_L26_CAM_MRAW1_AFO_M1 MTK_M4U_ID(26, 10)
+#define M4U_PORT_L26_CAM_MRAW3_AFO_M1 MTK_M4U_ID(26, 11)
+
+/* larb27 */
+#define M4U_PORT_L27_CAM_IMGO_R1 MTK_M4U_ID(27, 0)
+#define M4U_PORT_L27_CAM_CQI_R1 MTK_M4U_ID(27, 1)
+#define M4U_PORT_L27_CAM_CQI_R2 MTK_M4U_ID(27, 2)
+#define M4U_PORT_L27_CAM_BPCI_R1 MTK_M4U_ID(27, 3)
+#define M4U_PORT_L27_CAM_LSCI_R1 MTK_M4U_ID(27, 4)
+#define M4U_PORT_L27_CAM_RAWI_R2 MTK_M4U_ID(27, 5)
+#define M4U_PORT_L27_CAM_RAWI_R3 MTK_M4U_ID(27, 6)
+#define M4U_PORT_L27_CAM_UFDI_R2 MTK_M4U_ID(27, 7)
+#define M4U_PORT_L27_CAM_UFDI_R3 MTK_M4U_ID(27, 8)
+#define M4U_PORT_L27_CAM_RAWI_R4 MTK_M4U_ID(27, 9)
+#define M4U_PORT_L27_CAM_RAWI_R5 MTK_M4U_ID(27, 10)
+#define M4U_PORT_L27_CAM_AAI_R1 MTK_M4U_ID(27, 11)
+#define M4U_PORT_L27_CAM_FHO_R1 MTK_M4U_ID(27, 12)
+#define M4U_PORT_L27_CAM_AAO_R1 MTK_M4U_ID(27, 13)
+#define M4U_PORT_L27_CAM_TSFSO_R1 MTK_M4U_ID(27, 14)
+#define M4U_PORT_L27_CAM_FLKO_R1 MTK_M4U_ID(27, 15)
+
+/* larb28 */
+#define M4U_PORT_L28_CAM_YUVO_R1 MTK_M4U_ID(28, 0)
+#define M4U_PORT_L28_CAM_YUVO_R3 MTK_M4U_ID(28, 1)
+#define M4U_PORT_L28_CAM_YUVCO_R1 MTK_M4U_ID(28, 2)
+#define M4U_PORT_L28_CAM_YUVO_R2 MTK_M4U_ID(28, 3)
+#define M4U_PORT_L28_CAM_RZH1N2TO_R1 MTK_M4U_ID(28, 4)
+#define M4U_PORT_L28_CAM_DRZS4NO_R1 MTK_M4U_ID(28, 5)
+#define M4U_PORT_L28_CAM_TNCSO_R1 MTK_M4U_ID(28, 6)
+
+/* Infra iommu ports */
+/* PCIe1: read: BIT16; write BIT17. */
+#define IOMMU_PORT_INFRA_PCIE1 MTK_IFAIOMMU_PERI_ID(16)
+/* PCIe0: read: BIT18; write BIT19. */
+#define IOMMU_PORT_INFRA_PCIE0 MTK_IFAIOMMU_PERI_ID(18)
+#define IOMMU_PORT_INFRA_SSUSB_P3_R MTK_IFAIOMMU_PERI_ID(20)
+#define IOMMU_PORT_INFRA_SSUSB_P3_W MTK_IFAIOMMU_PERI_ID(21)
+#define IOMMU_PORT_INFRA_SSUSB_P2_R MTK_IFAIOMMU_PERI_ID(22)
+#define IOMMU_PORT_INFRA_SSUSB_P2_W MTK_IFAIOMMU_PERI_ID(23)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_R MTK_IFAIOMMU_PERI_ID(24)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_W MTK_IFAIOMMU_PERI_ID(25)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_R MTK_IFAIOMMU_PERI_ID(26)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_W MTK_IFAIOMMU_PERI_ID(27)
+#define IOMMU_PORT_INFRA_SSUSB2_R MTK_IFAIOMMU_PERI_ID(28)
+#define IOMMU_PORT_INFRA_SSUSB2_W MTK_IFAIOMMU_PERI_ID(29)
+#define IOMMU_PORT_INFRA_SSUSB_R MTK_IFAIOMMU_PERI_ID(30)
+#define IOMMU_PORT_INFRA_SSUSB_W MTK_IFAIOMMU_PERI_ID(31)
+
+#endif
diff --git a/include/dt-bindings/memory/mtk-memory-port.h b/include/dt-bindings/memory/mtk-memory-port.h
new file mode 100644
index 000000000000..2f68a0511a25
--- /dev/null
+++ b/include/dt-bindings/memory/mtk-memory-port.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef __DT_BINDINGS_MEMORY_MTK_MEMORY_PORT_H_
+#define __DT_BINDINGS_MEMORY_MTK_MEMORY_PORT_H_
+
+#define MTK_LARB_NR_MAX 32
+
+#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x1f)
+#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
+
+#define MTK_IFAIOMMU_PERI_ID(port) MTK_M4U_ID(0, port)
+
+#endif
diff --git a/include/dt-bindings/memory/tegra124-mc.h b/include/dt-bindings/memory/tegra124-mc.h
index 186e6b7e9b35..7e73bb400eca 100644
--- a/include/dt-bindings/memory/tegra124-mc.h
+++ b/include/dt-bindings/memory/tegra124-mc.h
@@ -54,4 +54,72 @@
#define TEGRA124_MC_RESET_ISP2B 22
#define TEGRA124_MC_RESET_GPU 23
+#define TEGRA124_MC_PTCR 0
+#define TEGRA124_MC_DISPLAY0A 1
+#define TEGRA124_MC_DISPLAY0AB 2
+#define TEGRA124_MC_DISPLAY0B 3
+#define TEGRA124_MC_DISPLAY0BB 4
+#define TEGRA124_MC_DISPLAY0C 5
+#define TEGRA124_MC_DISPLAY0CB 6
+#define TEGRA124_MC_AFIR 14
+#define TEGRA124_MC_AVPCARM7R 15
+#define TEGRA124_MC_DISPLAYHC 16
+#define TEGRA124_MC_DISPLAYHCB 17
+#define TEGRA124_MC_HDAR 21
+#define TEGRA124_MC_HOST1XDMAR 22
+#define TEGRA124_MC_HOST1XR 23
+#define TEGRA124_MC_MSENCSRD 28
+#define TEGRA124_MC_PPCSAHBDMAR 29
+#define TEGRA124_MC_PPCSAHBSLVR 30
+#define TEGRA124_MC_SATAR 31
+#define TEGRA124_MC_VDEBSEVR 34
+#define TEGRA124_MC_VDEMBER 35
+#define TEGRA124_MC_VDEMCER 36
+#define TEGRA124_MC_VDETPER 37
+#define TEGRA124_MC_MPCORELPR 38
+#define TEGRA124_MC_MPCORER 39
+#define TEGRA124_MC_MSENCSWR 43
+#define TEGRA124_MC_AFIW 49
+#define TEGRA124_MC_AVPCARM7W 50
+#define TEGRA124_MC_HDAW 53
+#define TEGRA124_MC_HOST1XW 54
+#define TEGRA124_MC_MPCORELPW 56
+#define TEGRA124_MC_MPCOREW 57
+#define TEGRA124_MC_PPCSAHBDMAW 59
+#define TEGRA124_MC_PPCSAHBSLVW 60
+#define TEGRA124_MC_SATAW 61
+#define TEGRA124_MC_VDEBSEVW 62
+#define TEGRA124_MC_VDEDBGW 63
+#define TEGRA124_MC_VDEMBEW 64
+#define TEGRA124_MC_VDETPMW 65
+#define TEGRA124_MC_ISPRA 68
+#define TEGRA124_MC_ISPWA 70
+#define TEGRA124_MC_ISPWB 71
+#define TEGRA124_MC_XUSB_HOSTR 74
+#define TEGRA124_MC_XUSB_HOSTW 75
+#define TEGRA124_MC_XUSB_DEVR 76
+#define TEGRA124_MC_XUSB_DEVW 77
+#define TEGRA124_MC_ISPRAB 78
+#define TEGRA124_MC_ISPWAB 80
+#define TEGRA124_MC_ISPWBB 81
+#define TEGRA124_MC_TSECSRD 84
+#define TEGRA124_MC_TSECSWR 85
+#define TEGRA124_MC_A9AVPSCR 86
+#define TEGRA124_MC_A9AVPSCW 87
+#define TEGRA124_MC_GPUSRD 88
+#define TEGRA124_MC_GPUSWR 89
+#define TEGRA124_MC_DISPLAYT 90
+#define TEGRA124_MC_SDMMCRA 96
+#define TEGRA124_MC_SDMMCRAA 97
+#define TEGRA124_MC_SDMMCR 98
+#define TEGRA124_MC_SDMMCRAB 99
+#define TEGRA124_MC_SDMMCWA 100
+#define TEGRA124_MC_SDMMCWAA 101
+#define TEGRA124_MC_SDMMCW 102
+#define TEGRA124_MC_SDMMCWAB 103
+#define TEGRA124_MC_VICSRD 108
+#define TEGRA124_MC_VICSWR 109
+#define TEGRA124_MC_VIW 114
+#define TEGRA124_MC_DISPLAYD 115
+
#endif
diff --git a/include/dt-bindings/memory/tegra20-mc.h b/include/dt-bindings/memory/tegra20-mc.h
index 35e131eee198..6f8829508ad0 100644
--- a/include/dt-bindings/memory/tegra20-mc.h
+++ b/include/dt-bindings/memory/tegra20-mc.h
@@ -18,4 +18,57 @@
#define TEGRA20_MC_RESET_VDE 13
#define TEGRA20_MC_RESET_VI 14
+#define TEGRA20_MC_DISPLAY0A 0
+#define TEGRA20_MC_DISPLAY0AB 1
+#define TEGRA20_MC_DISPLAY0B 2
+#define TEGRA20_MC_DISPLAY0BB 3
+#define TEGRA20_MC_DISPLAY0C 4
+#define TEGRA20_MC_DISPLAY0CB 5
+#define TEGRA20_MC_DISPLAY1B 6
+#define TEGRA20_MC_DISPLAY1BB 7
+#define TEGRA20_MC_EPPUP 8
+#define TEGRA20_MC_G2PR 9
+#define TEGRA20_MC_G2SR 10
+#define TEGRA20_MC_MPEUNIFBR 11
+#define TEGRA20_MC_VIRUV 12
+#define TEGRA20_MC_AVPCARM7R 13
+#define TEGRA20_MC_DISPLAYHC 14
+#define TEGRA20_MC_DISPLAYHCB 15
+#define TEGRA20_MC_FDCDRD 16
+#define TEGRA20_MC_G2DR 17
+#define TEGRA20_MC_HOST1XDMAR 18
+#define TEGRA20_MC_HOST1XR 19
+#define TEGRA20_MC_IDXSRD 20
+#define TEGRA20_MC_MPCORER 21
+#define TEGRA20_MC_MPE_IPRED 22
+#define TEGRA20_MC_MPEAMEMRD 23
+#define TEGRA20_MC_MPECSRD 24
+#define TEGRA20_MC_PPCSAHBDMAR 25
+#define TEGRA20_MC_PPCSAHBSLVR 26
+#define TEGRA20_MC_TEXSRD 27
+#define TEGRA20_MC_VDEBSEVR 28
+#define TEGRA20_MC_VDEMBER 29
+#define TEGRA20_MC_VDEMCER 30
+#define TEGRA20_MC_VDETPER 31
+#define TEGRA20_MC_EPPU 32
+#define TEGRA20_MC_EPPV 33
+#define TEGRA20_MC_EPPY 34
+#define TEGRA20_MC_MPEUNIFBW 35
+#define TEGRA20_MC_VIWSB 36
+#define TEGRA20_MC_VIWU 37
+#define TEGRA20_MC_VIWV 38
+#define TEGRA20_MC_VIWY 39
+#define TEGRA20_MC_G2DW 40
+#define TEGRA20_MC_AVPCARM7W 41
+#define TEGRA20_MC_FDCDWR 42
+#define TEGRA20_MC_HOST1XW 43
+#define TEGRA20_MC_ISPW 44
+#define TEGRA20_MC_MPCOREW 45
+#define TEGRA20_MC_MPECSWR 46
+#define TEGRA20_MC_PPCSAHBDMAW 47
+#define TEGRA20_MC_PPCSAHBSLVW 48
+#define TEGRA20_MC_VDEBSEVW 49
+#define TEGRA20_MC_VDEMBEW 50
+#define TEGRA20_MC_VDETPMW 51
+
#endif
diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h
index cacf05617e03..5e082547f179 100644
--- a/include/dt-bindings/memory/tegra210-mc.h
+++ b/include/dt-bindings/memory/tegra210-mc.h
@@ -33,6 +33,16 @@
#define TEGRA_SWGROUP_AXIAP 28
#define TEGRA_SWGROUP_ETR 29
#define TEGRA_SWGROUP_TSECB 30
+#define TEGRA_SWGROUP_NV 31
+#define TEGRA_SWGROUP_NV2 32
+#define TEGRA_SWGROUP_PPCS1 33
+#define TEGRA_SWGROUP_DC1 34
+#define TEGRA_SWGROUP_PPCS2 35
+#define TEGRA_SWGROUP_HC1 36
+#define TEGRA_SWGROUP_SE1 37
+#define TEGRA_SWGROUP_TSEC1 38
+#define TEGRA_SWGROUP_TSECB1 39
+#define TEGRA_SWGROUP_NVDEC1 40
#define TEGRA210_MC_RESET_AFI 0
#define TEGRA210_MC_RESET_AVPC 1
diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h
new file mode 100644
index 000000000000..6e60d55491b3
--- /dev/null
+++ b/include/dt-bindings/memory/tegra234-mc.h
@@ -0,0 +1,544 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_MEMORY_TEGRA234_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA234_MC_H
+
+/* special clients */
+#define TEGRA234_SID_INVALID 0x00
+#define TEGRA234_SID_PASSTHROUGH 0x7f
+
+/* ISO stream IDs */
+#define TEGRA234_SID_ISO_NVDISPLAY 0x01
+#define TEGRA234_SID_ISO_VI 0x02
+#define TEGRA234_SID_ISO_VIFALC 0x03
+#define TEGRA234_SID_ISO_VI2 0x04
+#define TEGRA234_SID_ISO_VI2FALC 0x05
+#define TEGRA234_SID_ISO_VI_VM2 0x06
+#define TEGRA234_SID_ISO_VI2_VM2 0x07
+
+/* NISO0 stream IDs */
+#define TEGRA234_SID_AON 0x01
+#define TEGRA234_SID_APE 0x02
+#define TEGRA234_SID_HDA 0x03
+#define TEGRA234_SID_GPCDMA 0x04
+#define TEGRA234_SID_ETR 0x05
+#define TEGRA234_SID_MGBE 0x06
+#define TEGRA234_SID_NVDISPLAY 0x07
+#define TEGRA234_SID_DCE 0x08
+#define TEGRA234_SID_PSC 0x09
+#define TEGRA234_SID_RCE 0x0a
+#define TEGRA234_SID_SCE 0x0b
+#define TEGRA234_SID_UFSHC 0x0c
+#define TEGRA234_SID_APE_1 0x0d
+#define TEGRA234_SID_GPCDMA_1 0x0e
+#define TEGRA234_SID_GPCDMA_2 0x0f
+#define TEGRA234_SID_GPCDMA_3 0x10
+#define TEGRA234_SID_GPCDMA_4 0x11
+#define TEGRA234_SID_PCIE0 0x12
+#define TEGRA234_SID_PCIE4 0x13
+#define TEGRA234_SID_PCIE5 0x14
+#define TEGRA234_SID_PCIE6 0x15
+#define TEGRA234_SID_RCE_VM2 0x16
+#define TEGRA234_SID_RCE_SERVER 0x17
+#define TEGRA234_SID_SMMU_TEST 0x18
+#define TEGRA234_SID_UFS_1 0x19
+#define TEGRA234_SID_UFS_2 0x1a
+#define TEGRA234_SID_UFS_3 0x1b
+#define TEGRA234_SID_UFS_4 0x1c
+#define TEGRA234_SID_UFS_5 0x1d
+#define TEGRA234_SID_UFS_6 0x1e
+#define TEGRA234_SID_PCIE9 0x1f
+#define TEGRA234_SID_VSE_GPCDMA_VM0 0x20
+#define TEGRA234_SID_VSE_GPCDMA_VM1 0x21
+#define TEGRA234_SID_VSE_GPCDMA_VM2 0x22
+#define TEGRA234_SID_NVDLA1 0x23
+#define TEGRA234_SID_NVENC 0x24
+#define TEGRA234_SID_NVJPG1 0x25
+#define TEGRA234_SID_OFA 0x26
+#define TEGRA234_SID_MGBE_VF1 0x49
+#define TEGRA234_SID_MGBE_VF2 0x4a
+#define TEGRA234_SID_MGBE_VF3 0x4b
+#define TEGRA234_SID_MGBE_VF4 0x4c
+#define TEGRA234_SID_MGBE_VF5 0x4d
+#define TEGRA234_SID_MGBE_VF6 0x4e
+#define TEGRA234_SID_MGBE_VF7 0x4f
+#define TEGRA234_SID_MGBE_VF8 0x50
+#define TEGRA234_SID_MGBE_VF9 0x51
+#define TEGRA234_SID_MGBE_VF10 0x52
+#define TEGRA234_SID_MGBE_VF11 0x53
+#define TEGRA234_SID_MGBE_VF12 0x54
+#define TEGRA234_SID_MGBE_VF13 0x55
+#define TEGRA234_SID_MGBE_VF14 0x56
+#define TEGRA234_SID_MGBE_VF15 0x57
+#define TEGRA234_SID_MGBE_VF16 0x58
+#define TEGRA234_SID_MGBE_VF17 0x59
+#define TEGRA234_SID_MGBE_VF18 0x5a
+#define TEGRA234_SID_MGBE_VF19 0x5b
+#define TEGRA234_SID_MGBE_VF20 0x5c
+#define TEGRA234_SID_APE_2 0x5e
+#define TEGRA234_SID_APE_3 0x5f
+#define TEGRA234_SID_UFS_7 0x60
+#define TEGRA234_SID_UFS_8 0x61
+#define TEGRA234_SID_UFS_9 0x62
+#define TEGRA234_SID_UFS_10 0x63
+#define TEGRA234_SID_UFS_11 0x64
+#define TEGRA234_SID_UFS_12 0x65
+#define TEGRA234_SID_UFS_13 0x66
+#define TEGRA234_SID_UFS_14 0x67
+#define TEGRA234_SID_UFS_15 0x68
+#define TEGRA234_SID_UFS_16 0x69
+#define TEGRA234_SID_UFS_17 0x6a
+#define TEGRA234_SID_UFS_18 0x6b
+#define TEGRA234_SID_UFS_19 0x6c
+#define TEGRA234_SID_UFS_20 0x6d
+#define TEGRA234_SID_GPCDMA_5 0x6e
+#define TEGRA234_SID_GPCDMA_6 0x6f
+#define TEGRA234_SID_GPCDMA_7 0x70
+#define TEGRA234_SID_GPCDMA_8 0x71
+#define TEGRA234_SID_GPCDMA_9 0x72
+
+/* NISO1 stream IDs */
+#define TEGRA234_SID_SDMMC1A 0x01
+#define TEGRA234_SID_SDMMC4 0x02
+#define TEGRA234_SID_EQOS 0x03
+#define TEGRA234_SID_HWMP_PMA 0x04
+#define TEGRA234_SID_PCIE1 0x05
+#define TEGRA234_SID_PCIE2 0x06
+#define TEGRA234_SID_PCIE3 0x07
+#define TEGRA234_SID_PCIE7 0x08
+#define TEGRA234_SID_PCIE8 0x09
+#define TEGRA234_SID_PCIE10 0x0b
+#define TEGRA234_SID_QSPI0 0x0c
+#define TEGRA234_SID_QSPI1 0x0d
+#define TEGRA234_SID_XUSB_HOST 0x0e
+#define TEGRA234_SID_XUSB_DEV 0x0f
+#define TEGRA234_SID_BPMP 0x10
+#define TEGRA234_SID_FSI 0x11
+#define TEGRA234_SID_PVA0_VM0 0x12
+#define TEGRA234_SID_PVA0_VM1 0x13
+#define TEGRA234_SID_PVA0_VM2 0x14
+#define TEGRA234_SID_PVA0_VM3 0x15
+#define TEGRA234_SID_PVA0_VM4 0x16
+#define TEGRA234_SID_PVA0_VM5 0x17
+#define TEGRA234_SID_PVA0_VM6 0x18
+#define TEGRA234_SID_PVA0_VM7 0x19
+#define TEGRA234_SID_XUSB_VF0 0x1a
+#define TEGRA234_SID_XUSB_VF1 0x1b
+#define TEGRA234_SID_XUSB_VF2 0x1c
+#define TEGRA234_SID_XUSB_VF3 0x1d
+#define TEGRA234_SID_EQOS_VF1 0x1e
+#define TEGRA234_SID_EQOS_VF2 0x1f
+#define TEGRA234_SID_EQOS_VF3 0x20
+#define TEGRA234_SID_EQOS_VF4 0x21
+#define TEGRA234_SID_ISP_VM2 0x22
+#define TEGRA234_SID_HOST1X 0x27
+#define TEGRA234_SID_ISP 0x28
+#define TEGRA234_SID_NVDEC 0x29
+#define TEGRA234_SID_NVJPG 0x2a
+#define TEGRA234_SID_NVDLA0 0x2b
+#define TEGRA234_SID_PVA0 0x2c
+#define TEGRA234_SID_SES_SE0 0x2d
+#define TEGRA234_SID_SES_SE1 0x2e
+#define TEGRA234_SID_SES_SE2 0x2f
+#define TEGRA234_SID_SEU1_SE0 0x30
+#define TEGRA234_SID_SEU1_SE1 0x31
+#define TEGRA234_SID_SEU1_SE2 0x32
+#define TEGRA234_SID_TSEC 0x33
+#define TEGRA234_SID_VIC 0x34
+#define TEGRA234_SID_HC_VM0 0x3d
+#define TEGRA234_SID_HC_VM1 0x3e
+#define TEGRA234_SID_HC_VM2 0x3f
+#define TEGRA234_SID_HC_VM3 0x40
+#define TEGRA234_SID_HC_VM4 0x41
+#define TEGRA234_SID_HC_VM5 0x42
+#define TEGRA234_SID_HC_VM6 0x43
+#define TEGRA234_SID_HC_VM7 0x44
+#define TEGRA234_SID_SE_VM0 0x45
+#define TEGRA234_SID_SE_VM1 0x46
+#define TEGRA234_SID_SE_VM2 0x47
+#define TEGRA234_SID_ISPFALC 0x48
+#define TEGRA234_SID_NISO1_SMMU_TEST 0x49
+#define TEGRA234_SID_TSEC_VM0 0x4a
+
+/* Shared stream IDs */
+#define TEGRA234_SID_HOST1X_CTX0 0x35
+#define TEGRA234_SID_HOST1X_CTX1 0x36
+#define TEGRA234_SID_HOST1X_CTX2 0x37
+#define TEGRA234_SID_HOST1X_CTX3 0x38
+#define TEGRA234_SID_HOST1X_CTX4 0x39
+#define TEGRA234_SID_HOST1X_CTX5 0x3a
+#define TEGRA234_SID_HOST1X_CTX6 0x3b
+#define TEGRA234_SID_HOST1X_CTX7 0x3c
+
+/*
+ * memory client IDs
+ */
+
+/* Misses from System Memory Management Unit (SMMU) Page Table Cache (PTC) */
+#define TEGRA234_MEMORY_CLIENT_PTCR 0x00
+/* MSS internal memqual MIU7 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU7R 0x01
+/* MSS internal memqual MIU7 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU7W 0x02
+/* MSS internal memqual MIU8 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU8R 0x03
+/* MSS internal memqual MIU8 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU8W 0x04
+/* MSS internal memqual MIU9 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU9R 0x05
+/* MSS internal memqual MIU9 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU9W 0x06
+/* MSS internal memqual MIU10 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU10R 0x07
+/* MSS internal memqual MIU10 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU10W 0x08
+/* MSS internal memqual MIU11 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU11R 0x09
+/* MSS internal memqual MIU11 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU11W 0x0a
+/* MSS internal memqual MIU12 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU12R 0x0b
+/* MSS internal memqual MIU12 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU12W 0x0c
+/* MSS internal memqual MIU13 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU13R 0x0d
+/* MSS internal memqual MIU13 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU13W 0x0e
+#define TEGRA234_MEMORY_CLIENT_NVL5RHP 0x13
+#define TEGRA234_MEMORY_CLIENT_NVL5R 0x14
+/* High-definition audio (HDA) read clients */
+#define TEGRA234_MEMORY_CLIENT_HDAR 0x15
+/* Host channel data read clients */
+#define TEGRA234_MEMORY_CLIENT_HOST1XDMAR 0x16
+#define TEGRA234_MEMORY_CLIENT_NVL5W 0x17
+#define TEGRA234_MEMORY_CLIENT_NVL6RHP 0x18
+#define TEGRA234_MEMORY_CLIENT_NVL6R 0x19
+#define TEGRA234_MEMORY_CLIENT_NVL6W 0x1a
+#define TEGRA234_MEMORY_CLIENT_NVL7RHP 0x1b
+#define TEGRA234_MEMORY_CLIENT_NVENCSRD 0x1c
+#define TEGRA234_MEMORY_CLIENT_NVL7R 0x1d
+#define TEGRA234_MEMORY_CLIENT_NVL7W 0x1e
+#define TEGRA234_MEMORY_CLIENT_NVL8RHP 0x20
+#define TEGRA234_MEMORY_CLIENT_NVL8R 0x21
+#define TEGRA234_MEMORY_CLIENT_NVL8W 0x22
+#define TEGRA234_MEMORY_CLIENT_NVL9RHP 0x23
+#define TEGRA234_MEMORY_CLIENT_NVL9R 0x24
+#define TEGRA234_MEMORY_CLIENT_NVL9W 0x25
+/* PCIE6 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE6AR 0x28
+/* PCIE6 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE6AW 0x29
+/* PCIE7 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE7AR 0x2a
+#define TEGRA234_MEMORY_CLIENT_NVENCSWR 0x2b
+/* DLA0ARDB read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDB 0x2c
+/* DLA0ARDB1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDB1 0x2d
+/* DLA0 writes */
+#define TEGRA234_MEMORY_CLIENT_DLA0WRB 0x2e
+/* DLA1ARDB read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDB 0x2f
+/* PCIE7 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE7AW 0x30
+/* PCIE8 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE8AR 0x32
+/* High-definition audio (HDA) write clients */
+#define TEGRA234_MEMORY_CLIENT_HDAW 0x35
+/* Writes from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA234_MEMORY_CLIENT_MPCOREW 0x39
+/* OFAA client */
+#define TEGRA234_MEMORY_CLIENT_OFAR1 0x3a
+/* PCIE8 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE8AW 0x3b
+/* PCIE9 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE9AR 0x3c
+/* PCIE6r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE6AR1 0x3d
+/* PCIE9 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE9AW 0x3e
+/* PCIE10 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE10AR 0x3f
+/* PCIE10 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE10AW 0x40
+/* ISP read client for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPRA 0x44
+/* ISP read client 1 for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPFALR 0x45
+/* ISP Write client for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPWA 0x46
+/* ISP Write client Crossbar B */
+#define TEGRA234_MEMORY_CLIENT_ISPWB 0x47
+/* PCIE10r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE10AR1 0x48
+/* PCIE7r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE7AR1 0x49
+/* XUSB_HOST read clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_HOSTR 0x4a
+/* XUSB_HOST write clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_HOSTW 0x4b
+/* XUSB read clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_DEVR 0x4c
+/* XUSB_DEV write clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_DEVW 0x4d
+/* TSEC Memory Return Data Client Description */
+#define TEGRA234_MEMORY_CLIENT_TSECSRD 0x54
+/* TSEC Memory Write Client Description */
+#define TEGRA234_MEMORY_CLIENT_TSECSWR 0x55
+/* XSPI writes */
+#define TEGRA234_MEMORY_CLIENT_XSPI1W 0x56
+/* MGBE0 read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEARD 0x58
+/* MGBEB read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEBRD 0x59
+/* MGBEC read client */
+#define TEGRA234_MEMORY_CLIENT_MGBECRD 0x5a
+/* MGBED read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEDRD 0x5b
+/* MGBE0 write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEAWR 0x5c
+/* OFAA client */
+#define TEGRA234_MEMORY_CLIENT_OFAR 0x5d
+/* OFAA writes */
+#define TEGRA234_MEMORY_CLIENT_OFAW 0x5e
+/* MGBEB write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEBWR 0x5f
+/* sdmmca memory read client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCRA 0x60
+/* MGBEC write client */
+#define TEGRA234_MEMORY_CLIENT_MGBECWR 0x61
+/* sdmmcd memory read client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCRAB 0x63
+/* sdmmca memory write client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCWA 0x64
+/* MGBED write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEDWR 0x65
+/* sdmmcd memory write client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCWAB 0x67
+/* SE Memory Return Data Client Description */
+#define TEGRA234_MEMORY_CLIENT_SEU1RD 0x68
+/* SE Memory Write Client Description */
+#define TEGRA234_MEMORY_CLIENT_SUE1WR 0x69
+#define TEGRA234_MEMORY_CLIENT_VICSRD 0x6c
+#define TEGRA234_MEMORY_CLIENT_VICSWR 0x6d
+/* DLA1ARDB1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDB1 0x6e
+/* DLA1 writes */
+#define TEGRA234_MEMORY_CLIENT_DLA1WRB 0x6f
+/* VI FLACON read clients */
+#define TEGRA234_MEMORY_CLIENT_VI2FALR 0x71
+/* VI Write client */
+#define TEGRA234_MEMORY_CLIENT_VI2W 0x70
+/* VI Write client */
+#define TEGRA234_MEMORY_CLIENT_VIW 0x72
+/* NISO display read client */
+#define TEGRA234_MEMORY_CLIENT_NVDISPNISOR 0x73
+/* NVDISPNISO writes */
+#define TEGRA234_MEMORY_CLIENT_NVDISPNISOW 0x74
+/* XSPI client */
+#define TEGRA234_MEMORY_CLIENT_XSPI0R 0x75
+/* XSPI writes */
+#define TEGRA234_MEMORY_CLIENT_XSPI0W 0x76
+/* XSPI client */
+#define TEGRA234_MEMORY_CLIENT_XSPI1R 0x77
+#define TEGRA234_MEMORY_CLIENT_NVDECSRD 0x78
+#define TEGRA234_MEMORY_CLIENT_NVDECSWR 0x79
+/* Audio Processing (APE) engine read clients */
+#define TEGRA234_MEMORY_CLIENT_APER 0x7a
+/* Audio Processing (APE) engine write clients */
+#define TEGRA234_MEMORY_CLIENT_APEW 0x7b
+/* VI2FAL writes */
+#define TEGRA234_MEMORY_CLIENT_VI2FALW 0x7c
+#define TEGRA234_MEMORY_CLIENT_NVJPGSRD 0x7e
+#define TEGRA234_MEMORY_CLIENT_NVJPGSWR 0x7f
+/* SE Memory Return Data Client Description */
+#define TEGRA234_MEMORY_CLIENT_SESRD 0x80
+/* SE Memory Write Client Description */
+#define TEGRA234_MEMORY_CLIENT_SESWR 0x81
+/* AXI AP and DFD-AUX0/1 read clients Both share the same interface on the on MSS */
+#define TEGRA234_MEMORY_CLIENT_AXIAPR 0x82
+/* AXI AP and DFD-AUX0/1 write clients Both sahre the same interface on MSS */
+#define TEGRA234_MEMORY_CLIENT_AXIAPW 0x83
+/* ETR read clients */
+#define TEGRA234_MEMORY_CLIENT_ETRR 0x84
+/* ETR write clients */
+#define TEGRA234_MEMORY_CLIENT_ETRW 0x85
+/* AXI Switch read client */
+#define TEGRA234_MEMORY_CLIENT_AXISR 0x8c
+/* AXI Switch write client */
+#define TEGRA234_MEMORY_CLIENT_AXISW 0x8d
+/* EQOS read client */
+#define TEGRA234_MEMORY_CLIENT_EQOSR 0x8e
+/* EQOS write client */
+#define TEGRA234_MEMORY_CLIENT_EQOSW 0x8f
+/* UFSHC read client */
+#define TEGRA234_MEMORY_CLIENT_UFSHCR 0x90
+/* UFSHC write client */
+#define TEGRA234_MEMORY_CLIENT_UFSHCW 0x91
+/* NVDISPLAY read client */
+#define TEGRA234_MEMORY_CLIENT_NVDISPLAYR 0x92
+/* BPMP read client */
+#define TEGRA234_MEMORY_CLIENT_BPMPR 0x93
+/* BPMP write client */
+#define TEGRA234_MEMORY_CLIENT_BPMPW 0x94
+/* BPMPDMA read client */
+#define TEGRA234_MEMORY_CLIENT_BPMPDMAR 0x95
+/* BPMPDMA write client */
+#define TEGRA234_MEMORY_CLIENT_BPMPDMAW 0x96
+/* AON read client */
+#define TEGRA234_MEMORY_CLIENT_AONR 0x97
+/* AON write client */
+#define TEGRA234_MEMORY_CLIENT_AONW 0x98
+/* AONDMA read client */
+#define TEGRA234_MEMORY_CLIENT_AONDMAR 0x99
+/* AONDMA write client */
+#define TEGRA234_MEMORY_CLIENT_AONDMAW 0x9a
+/* SCE read client */
+#define TEGRA234_MEMORY_CLIENT_SCER 0x9b
+/* SCE write client */
+#define TEGRA234_MEMORY_CLIENT_SCEW 0x9c
+/* SCEDMA read client */
+#define TEGRA234_MEMORY_CLIENT_SCEDMAR 0x9d
+/* SCEDMA write client */
+#define TEGRA234_MEMORY_CLIENT_SCEDMAW 0x9e
+/* APEDMA read client */
+#define TEGRA234_MEMORY_CLIENT_APEDMAR 0x9f
+/* APEDMA write client */
+#define TEGRA234_MEMORY_CLIENT_APEDMAW 0xa0
+/* NVDISPLAY read client instance 2 */
+#define TEGRA234_MEMORY_CLIENT_NVDISPLAYR1 0xa1
+#define TEGRA234_MEMORY_CLIENT_VICSRD1 0xa2
+/* MSS internal memqual MIU0 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU0R 0xa6
+/* MSS internal memqual MIU0 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU0W 0xa7
+/* MSS internal memqual MIU1 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU1R 0xa8
+/* MSS internal memqual MIU1 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU1W 0xa9
+/* MSS internal memqual MIU2 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU2R 0xae
+/* MSS internal memqual MIU2 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU2W 0xaf
+/* MSS internal memqual MIU3 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU3R 0xb0
+/* MSS internal memqual MIU3 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU3W 0xb1
+/* MSS internal memqual MIU4 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU4R 0xb2
+/* MSS internal memqual MIU4 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU4W 0xb3
+#define TEGRA234_MEMORY_CLIENT_DPMUR 0xb4
+#define TEGRA234_MEMORY_CLIENT_DPMUW 0xb5
+#define TEGRA234_MEMORY_CLIENT_NVL0R 0xb6
+#define TEGRA234_MEMORY_CLIENT_NVL0W 0xb7
+#define TEGRA234_MEMORY_CLIENT_NVL1R 0xb8
+#define TEGRA234_MEMORY_CLIENT_NVL1W 0xb9
+#define TEGRA234_MEMORY_CLIENT_NVL2R 0xba
+#define TEGRA234_MEMORY_CLIENT_NVL2W 0xbb
+/* VI FLACON read clients */
+#define TEGRA234_MEMORY_CLIENT_VIFALR 0xbc
+/* VIFAL write clients */
+#define TEGRA234_MEMORY_CLIENT_VIFALW 0xbd
+/* DLA0ARDA read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDA 0xbe
+/* DLA0 Falcon read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0FALRDB 0xbf
+/* DLA0 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0WRA 0xc0
+/* DLA0 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0FALWRB 0xc1
+/* DLA1ARDA read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDA 0xc2
+/* DLA1 Falcon read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1FALRDB 0xc3
+/* DLA1 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1WRA 0xc4
+/* DLA1 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1FALWRB 0xc5
+/* PVA0RDA read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDA 0xc6
+/* PVA0RDB read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDB 0xc7
+/* PVA0RDC read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDC 0xc8
+/* PVA0WRA write clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0WRA 0xc9
+/* PVA0WRB write clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0WRB 0xca
+/* PVA0WRC write clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0WRC 0xcb
+/* RCE read client */
+#define TEGRA234_MEMORY_CLIENT_RCER 0xd2
+/* RCE write client */
+#define TEGRA234_MEMORY_CLIENT_RCEW 0xd3
+/* RCEDMA read client */
+#define TEGRA234_MEMORY_CLIENT_RCEDMAR 0xd4
+/* RCEDMA write client */
+#define TEGRA234_MEMORY_CLIENT_RCEDMAW 0xd5
+/* PCIE0 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE0R 0xd8
+/* PCIE0 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE0W 0xd9
+/* PCIE1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE1R 0xda
+/* PCIE1 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE1W 0xdb
+/* PCIE2 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE2AR 0xdc
+/* PCIE2 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE2AW 0xdd
+/* PCIE3 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE3R 0xde
+/* PCIE3 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE3W 0xdf
+/* PCIE4 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE4R 0xe0
+/* PCIE4 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE4W 0xe1
+/* PCIE5 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE5R 0xe2
+/* PCIE5 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE5W 0xe3
+/* ISP read client 1 for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPFALW 0xe4
+#define TEGRA234_MEMORY_CLIENT_NVL3R 0xe5
+#define TEGRA234_MEMORY_CLIENT_NVL3W 0xe6
+#define TEGRA234_MEMORY_CLIENT_NVL4R 0xe7
+#define TEGRA234_MEMORY_CLIENT_NVL4W 0xe8
+/* DLA0ARDA1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDA1 0xe9
+/* DLA1ARDA1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDA1 0xea
+/* PVA0RDA1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDA1 0xeb
+/* PVA0RDB1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDB1 0xec
+/* PCIE5r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE5R1 0xef
+#define TEGRA234_MEMORY_CLIENT_NVENCSRD1 0xf0
+/* ISP read client for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPRA1 0xf2
+#define TEGRA234_MEMORY_CLIENT_NVL0RHP 0xf4
+#define TEGRA234_MEMORY_CLIENT_NVL1RHP 0xf5
+#define TEGRA234_MEMORY_CLIENT_NVL2RHP 0xf6
+#define TEGRA234_MEMORY_CLIENT_NVL3RHP 0xf7
+#define TEGRA234_MEMORY_CLIENT_NVL4RHP 0xf8
+/* MSS internal memqual MIU5 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU5R 0xfc
+/* MSS internal memqual MIU5 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU5W 0xfd
+/* MSS internal memqual MIU6 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU6R 0xfe
+/* MSS internal memqual MIU6 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU6W 0xff
+#define TEGRA234_MEMORY_CLIENT_NVJPG1SRD 0x123
+#define TEGRA234_MEMORY_CLIENT_NVJPG1SWR 0x124
+
+/* ICC ID's for dummy MC clients used to represent CPU Clusters */
+#define TEGRA_ICC_MC_CPU_CLUSTER0 1003
+#define TEGRA_ICC_MC_CPU_CLUSTER1 1004
+#define TEGRA_ICC_MC_CPU_CLUSTER2 1005
+
+#endif
diff --git a/include/dt-bindings/memory/tegra30-mc.h b/include/dt-bindings/memory/tegra30-mc.h
index 169f005fbc78..930f708aca17 100644
--- a/include/dt-bindings/memory/tegra30-mc.h
+++ b/include/dt-bindings/memory/tegra30-mc.h
@@ -41,4 +41,71 @@
#define TEGRA30_MC_RESET_VDE 16
#define TEGRA30_MC_RESET_VI 17
+#define TEGRA30_MC_PTCR 0
+#define TEGRA30_MC_DISPLAY0A 1
+#define TEGRA30_MC_DISPLAY0AB 2
+#define TEGRA30_MC_DISPLAY0B 3
+#define TEGRA30_MC_DISPLAY0BB 4
+#define TEGRA30_MC_DISPLAY0C 5
+#define TEGRA30_MC_DISPLAY0CB 6
+#define TEGRA30_MC_DISPLAY1B 7
+#define TEGRA30_MC_DISPLAY1BB 8
+#define TEGRA30_MC_EPPUP 9
+#define TEGRA30_MC_G2PR 10
+#define TEGRA30_MC_G2SR 11
+#define TEGRA30_MC_MPEUNIFBR 12
+#define TEGRA30_MC_VIRUV 13
+#define TEGRA30_MC_AFIR 14
+#define TEGRA30_MC_AVPCARM7R 15
+#define TEGRA30_MC_DISPLAYHC 16
+#define TEGRA30_MC_DISPLAYHCB 17
+#define TEGRA30_MC_FDCDRD 18
+#define TEGRA30_MC_FDCDRD2 19
+#define TEGRA30_MC_G2DR 20
+#define TEGRA30_MC_HDAR 21
+#define TEGRA30_MC_HOST1XDMAR 22
+#define TEGRA30_MC_HOST1XR 23
+#define TEGRA30_MC_IDXSRD 24
+#define TEGRA30_MC_IDXSRD2 25
+#define TEGRA30_MC_MPE_IPRED 26
+#define TEGRA30_MC_MPEAMEMRD 27
+#define TEGRA30_MC_MPECSRD 28
+#define TEGRA30_MC_PPCSAHBDMAR 29
+#define TEGRA30_MC_PPCSAHBSLVR 30
+#define TEGRA30_MC_SATAR 31
+#define TEGRA30_MC_TEXSRD 32
+#define TEGRA30_MC_TEXSRD2 33
+#define TEGRA30_MC_VDEBSEVR 34
+#define TEGRA30_MC_VDEMBER 35
+#define TEGRA30_MC_VDEMCER 36
+#define TEGRA30_MC_VDETPER 37
+#define TEGRA30_MC_MPCORELPR 38
+#define TEGRA30_MC_MPCORER 39
+#define TEGRA30_MC_EPPU 40
+#define TEGRA30_MC_EPPV 41
+#define TEGRA30_MC_EPPY 42
+#define TEGRA30_MC_MPEUNIFBW 43
+#define TEGRA30_MC_VIWSB 44
+#define TEGRA30_MC_VIWU 45
+#define TEGRA30_MC_VIWV 46
+#define TEGRA30_MC_VIWY 47
+#define TEGRA30_MC_G2DW 48
+#define TEGRA30_MC_AFIW 49
+#define TEGRA30_MC_AVPCARM7W 50
+#define TEGRA30_MC_FDCDWR 51
+#define TEGRA30_MC_FDCDWR2 52
+#define TEGRA30_MC_HDAW 53
+#define TEGRA30_MC_HOST1XW 54
+#define TEGRA30_MC_ISPW 55
+#define TEGRA30_MC_MPCORELPW 56
+#define TEGRA30_MC_MPCOREW 57
+#define TEGRA30_MC_MPECSWR 58
+#define TEGRA30_MC_PPCSAHBDMAW 59
+#define TEGRA30_MC_PPCSAHBSLVW 60
+#define TEGRA30_MC_SATAW 61
+#define TEGRA30_MC_VDEBSEVW 62
+#define TEGRA30_MC_VDEDBGW 63
+#define TEGRA30_MC_VDEMBEW 64
+#define TEGRA30_MC_VDETPMW 65
+
#endif
diff --git a/include/dt-bindings/mfd/cros_ec.h b/include/dt-bindings/mfd/cros_ec.h
new file mode 100644
index 000000000000..3b29cd049578
--- /dev/null
+++ b/include/dt-bindings/mfd/cros_ec.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * DTS binding definitions used for the Chromium OS Embedded Controller.
+ *
+ * Copyright (c) 2022 The Chromium OS Authors. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_MFD_CROS_EC_H
+#define _DT_BINDINGS_MFD_CROS_EC_H
+
+/* Typed channel for keyboard backlight. */
+#define CROS_EC_PWM_DT_KB_LIGHT 0
+/* Typed channel for display backlight. */
+#define CROS_EC_PWM_DT_DISPLAY_LIGHT 1
+/* Number of typed channels. */
+#define CROS_EC_PWM_DT_COUNT 2
+
+#endif
diff --git a/include/dt-bindings/mfd/qcom-pm8008.h b/include/dt-bindings/mfd/qcom-pm8008.h
new file mode 100644
index 000000000000..eca9448df228
--- /dev/null
+++ b/include/dt-bindings/mfd/qcom-pm8008.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_MFD_QCOM_PM8008_H
+#define __DT_BINDINGS_MFD_QCOM_PM8008_H
+
+/* PM8008 IRQ numbers */
+#define PM8008_IRQ_MISC_UVLO 0
+#define PM8008_IRQ_MISC_OVLO 1
+#define PM8008_IRQ_MISC_OTST2 2
+#define PM8008_IRQ_MISC_OTST3 3
+#define PM8008_IRQ_MISC_LDO_OCP 4
+#define PM8008_IRQ_TEMP_ALARM 5
+#define PM8008_IRQ_GPIO1 6
+#define PM8008_IRQ_GPIO2 7
+
+#endif
diff --git a/include/dt-bindings/mfd/stm32f4-rcc.h b/include/dt-bindings/mfd/stm32f4-rcc.h
index 309e8c79f27b..36448a5619a1 100644
--- a/include/dt-bindings/mfd/stm32f4-rcc.h
+++ b/include/dt-bindings/mfd/stm32f4-rcc.h
@@ -34,7 +34,6 @@
#define STM32F4_AHB1_RESET(bit) (STM32F4_RCC_AHB1_##bit + (0x10 * 8))
#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit)
-
/* AHB2 */
#define STM32F4_RCC_AHB2_DCMI 0
#define STM32F4_RCC_AHB2_CRYP 4
diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h
index a90f3613c584..a4e4f9271395 100644
--- a/include/dt-bindings/mfd/stm32f7-rcc.h
+++ b/include/dt-bindings/mfd/stm32f7-rcc.h
@@ -64,6 +64,7 @@
#define STM32F7_RCC_APB1_TIM14 8
#define STM32F7_RCC_APB1_LPTIM1 9
#define STM32F7_RCC_APB1_WWDG 11
+#define STM32F7_RCC_APB1_CAN3 13
#define STM32F7_RCC_APB1_SPI2 14
#define STM32F7_RCC_APB1_SPI3 15
#define STM32F7_RCC_APB1_SPDIFRX 16
@@ -107,6 +108,7 @@
#define STM32F7_RCC_APB2_SAI1 22
#define STM32F7_RCC_APB2_SAI2 23
#define STM32F7_RCC_APB2_LTDC 26
+#define STM32F7_RCC_APB2_DSI 27
#define STM32F7_APB2_RESET(bit) (STM32F7_RCC_APB2_##bit + (0x24 * 8))
#define STM32F7_APB2_CLOCK(bit) (STM32F7_RCC_APB2_##bit + 0xA0)
diff --git a/include/dt-bindings/mux/mux-j721e-wiz.h b/include/dt-bindings/mux/mux-j721e-wiz.h
deleted file mode 100644
index fd1c4ea9fc7f..000000000000
--- a/include/dt-bindings/mux/mux-j721e-wiz.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for J721E WIZ.
- */
-
-#ifndef _DT_BINDINGS_J721E_WIZ
-#define _DT_BINDINGS_J721E_WIZ
-
-#define SERDES0_LANE0_QSGMII_LANE1 0x0
-#define SERDES0_LANE0_PCIE0_LANE0 0x1
-#define SERDES0_LANE0_USB3_0_SWAP 0x2
-
-#define SERDES0_LANE1_QSGMII_LANE2 0x0
-#define SERDES0_LANE1_PCIE0_LANE1 0x1
-#define SERDES0_LANE1_USB3_0 0x2
-
-#define SERDES1_LANE0_QSGMII_LANE3 0x0
-#define SERDES1_LANE0_PCIE1_LANE0 0x1
-#define SERDES1_LANE0_USB3_1_SWAP 0x2
-#define SERDES1_LANE0_SGMII_LANE0 0x3
-
-#define SERDES1_LANE1_QSGMII_LANE4 0x0
-#define SERDES1_LANE1_PCIE1_LANE1 0x1
-#define SERDES1_LANE1_USB3_1 0x2
-#define SERDES1_LANE1_SGMII_LANE1 0x3
-
-#define SERDES2_LANE0_PCIE2_LANE0 0x1
-#define SERDES2_LANE0_SGMII_LANE0 0x3
-#define SERDES2_LANE0_USB3_1_SWAP 0x2
-
-#define SERDES2_LANE1_PCIE2_LANE1 0x1
-#define SERDES2_LANE1_USB3_1 0x2
-#define SERDES2_LANE1_SGMII_LANE1 0x3
-
-#define SERDES3_LANE0_PCIE3_LANE0 0x1
-#define SERDES3_LANE0_USB3_0_SWAP 0x2
-
-#define SERDES3_LANE1_PCIE3_LANE1 0x1
-#define SERDES3_LANE1_USB3_0 0x2
-
-#define SERDES4_LANE0_EDP_LANE0 0x0
-#define SERDES4_LANE0_QSGMII_LANE5 0x2
-
-#define SERDES4_LANE1_EDP_LANE1 0x0
-#define SERDES4_LANE1_QSGMII_LANE6 0x2
-
-#define SERDES4_LANE2_EDP_LANE2 0x0
-#define SERDES4_LANE2_QSGMII_LANE7 0x2
-
-#define SERDES4_LANE3_EDP_LANE3 0x0
-#define SERDES4_LANE3_QSGMII_LANE8 0x2
-
-#endif /* _DT_BINDINGS_J721E_WIZ */
diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
new file mode 100644
index 000000000000..b0b1091aad6d
--- /dev/null
+++ b/include/dt-bindings/mux/ti-serdes.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for SERDES MUX for TI SoCs
+ */
+
+#ifndef _DT_BINDINGS_MUX_TI_SERDES
+#define _DT_BINDINGS_MUX_TI_SERDES
+
+/*
+ * These bindings are deprecated, because they do not match the actual
+ * concept of bindings but rather contain pure constants values used only
+ * in DTS board files.
+ * Instead include the header in the DTS source directory.
+ */
+#warning "These bindings are deprecated. Instead, use the header in the DTS source directory."
+
+/* J721E */
+
+#define J721E_SERDES0_LANE0_QSGMII_LANE1 0x0
+#define J721E_SERDES0_LANE0_PCIE0_LANE0 0x1
+#define J721E_SERDES0_LANE0_USB3_0_SWAP 0x2
+#define J721E_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J721E_SERDES0_LANE1_QSGMII_LANE2 0x0
+#define J721E_SERDES0_LANE1_PCIE0_LANE1 0x1
+#define J721E_SERDES0_LANE1_USB3_0 0x2
+#define J721E_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J721E_SERDES1_LANE0_QSGMII_LANE3 0x0
+#define J721E_SERDES1_LANE0_PCIE1_LANE0 0x1
+#define J721E_SERDES1_LANE0_USB3_1_SWAP 0x2
+#define J721E_SERDES1_LANE0_SGMII_LANE0 0x3
+
+#define J721E_SERDES1_LANE1_QSGMII_LANE4 0x0
+#define J721E_SERDES1_LANE1_PCIE1_LANE1 0x1
+#define J721E_SERDES1_LANE1_USB3_1 0x2
+#define J721E_SERDES1_LANE1_SGMII_LANE1 0x3
+
+#define J721E_SERDES2_LANE0_IP1_UNUSED 0x0
+#define J721E_SERDES2_LANE0_PCIE2_LANE0 0x1
+#define J721E_SERDES2_LANE0_USB3_1_SWAP 0x2
+#define J721E_SERDES2_LANE0_SGMII_LANE0 0x3
+
+#define J721E_SERDES2_LANE1_IP1_UNUSED 0x0
+#define J721E_SERDES2_LANE1_PCIE2_LANE1 0x1
+#define J721E_SERDES2_LANE1_USB3_1 0x2
+#define J721E_SERDES2_LANE1_SGMII_LANE1 0x3
+
+#define J721E_SERDES3_LANE0_IP1_UNUSED 0x0
+#define J721E_SERDES3_LANE0_PCIE3_LANE0 0x1
+#define J721E_SERDES3_LANE0_USB3_0_SWAP 0x2
+#define J721E_SERDES3_LANE0_IP4_UNUSED 0x3
+
+#define J721E_SERDES3_LANE1_IP1_UNUSED 0x0
+#define J721E_SERDES3_LANE1_PCIE3_LANE1 0x1
+#define J721E_SERDES3_LANE1_USB3_0 0x2
+#define J721E_SERDES3_LANE1_IP4_UNUSED 0x3
+
+#define J721E_SERDES4_LANE0_EDP_LANE0 0x0
+#define J721E_SERDES4_LANE0_IP2_UNUSED 0x1
+#define J721E_SERDES4_LANE0_QSGMII_LANE5 0x2
+#define J721E_SERDES4_LANE0_IP4_UNUSED 0x3
+
+#define J721E_SERDES4_LANE1_EDP_LANE1 0x0
+#define J721E_SERDES4_LANE1_IP2_UNUSED 0x1
+#define J721E_SERDES4_LANE1_QSGMII_LANE6 0x2
+#define J721E_SERDES4_LANE1_IP4_UNUSED 0x3
+
+#define J721E_SERDES4_LANE2_EDP_LANE2 0x0
+#define J721E_SERDES4_LANE2_IP2_UNUSED 0x1
+#define J721E_SERDES4_LANE2_QSGMII_LANE7 0x2
+#define J721E_SERDES4_LANE2_IP4_UNUSED 0x3
+
+#define J721E_SERDES4_LANE3_EDP_LANE3 0x0
+#define J721E_SERDES4_LANE3_IP2_UNUSED 0x1
+#define J721E_SERDES4_LANE3_QSGMII_LANE8 0x2
+#define J721E_SERDES4_LANE3_IP4_UNUSED 0x3
+
+/* J7200 */
+
+#define J7200_SERDES0_LANE0_QSGMII_LANE3 0x0
+#define J7200_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J7200_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J7200_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J7200_SERDES0_LANE1_QSGMII_LANE4 0x0
+#define J7200_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J7200_SERDES0_LANE1_IP3_UNUSED 0x2
+#define J7200_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J7200_SERDES0_LANE2_QSGMII_LANE1 0x0
+#define J7200_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J7200_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J7200_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J7200_SERDES0_LANE3_QSGMII_LANE2 0x0
+#define J7200_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J7200_SERDES0_LANE3_USB 0x2
+#define J7200_SERDES0_LANE3_IP4_UNUSED 0x3
+
+/* AM64 */
+
+#define AM64_SERDES0_LANE0_PCIE0 0x0
+#define AM64_SERDES0_LANE0_USB 0x1
+
+/* J721S2 */
+
+#define J721S2_SERDES0_LANE0_EDP_LANE0 0x0
+#define J721S2_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J721S2_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J721S2_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE1_EDP_LANE1 0x0
+#define J721S2_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J721S2_SERDES0_LANE1_USB 0x2
+#define J721S2_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE2_EDP_LANE2 0x0
+#define J721S2_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J721S2_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J721S2_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE3_EDP_LANE3 0x0
+#define J721S2_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J721S2_SERDES0_LANE3_USB 0x2
+#define J721S2_SERDES0_LANE3_IP4_UNUSED 0x3
+
+/* J784S4 */
+
+#define J784S4_SERDES0_LANE0_IP1_UNUSED 0x0
+#define J784S4_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J784S4_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE1_IP1_UNUSED 0x0
+#define J784S4_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J784S4_SERDES0_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE2_PCIE3_LANE0 0x0
+#define J784S4_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J784S4_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE3_PCIE3_LANE1 0x0
+#define J784S4_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J784S4_SERDES0_LANE3_USB 0x2
+#define J784S4_SERDES0_LANE3_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE0_QSGMII_LANE3 0x0
+#define J784S4_SERDES1_LANE0_PCIE0_LANE0 0x1
+#define J784S4_SERDES1_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES1_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE1_QSGMII_LANE4 0x0
+#define J784S4_SERDES1_LANE1_PCIE0_LANE1 0x1
+#define J784S4_SERDES1_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES1_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE2_QSGMII_LANE1 0x0
+#define J784S4_SERDES1_LANE2_PCIE0_LANE2 0x1
+#define J784S4_SERDES1_LANE2_PCIE2_LANE0 0x2
+#define J784S4_SERDES1_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE3_QSGMII_LANE2 0x0
+#define J784S4_SERDES1_LANE3_PCIE0_LANE3 0x1
+#define J784S4_SERDES1_LANE3_PCIE2_LANE1 0x2
+#define J784S4_SERDES1_LANE3_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE0_QSGMII_LANE5 0x0
+#define J784S4_SERDES2_LANE0_IP2_UNUSED 0x1
+#define J784S4_SERDES2_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE1_QSGMII_LANE6 0x0
+#define J784S4_SERDES2_LANE1_IP2_UNUSED 0x1
+#define J784S4_SERDES2_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE2_QSGMII_LANE7 0x0
+#define J784S4_SERDES2_LANE2_QSGMII_LANE1 0x1
+#define J784S4_SERDES2_LANE2_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE3_QSGMII_LANE8 0x0
+#define J784S4_SERDES2_LANE3_QSGMII_LANE2 0x1
+#define J784S4_SERDES2_LANE3_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE3_IP4_UNUSED 0x3
+
+#endif /* _DT_BINDINGS_MUX_TI_SERDES */
diff --git a/include/dt-bindings/net/pcs-rzn1-miic.h b/include/dt-bindings/net/pcs-rzn1-miic.h
new file mode 100644
index 000000000000..784782eaec9e
--- /dev/null
+++ b/include/dt-bindings/net/pcs-rzn1-miic.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#ifndef _DT_BINDINGS_PCS_RZN1_MIIC
+#define _DT_BINDINGS_PCS_RZN1_MIIC
+
+/*
+ * Reefer to the datasheet [1] section 8.2.1, Internal Connection of Ethernet
+ * Ports to check the available combination
+ *
+ * [1] REN_r01uh0750ej0140-rzn1-introduction_MAT_20210228.pdf
+ */
+
+#define MIIC_GMAC1_PORT 0
+#define MIIC_GMAC2_PORT 1
+#define MIIC_RTOS_PORT 2
+#define MIIC_SERCOS_PORTA 3
+#define MIIC_SERCOS_PORTB 4
+#define MIIC_ETHERCAT_PORTA 5
+#define MIIC_ETHERCAT_PORTB 6
+#define MIIC_ETHERCAT_PORTC 7
+#define MIIC_SWITCH_PORTA 8
+#define MIIC_SWITCH_PORTB 9
+#define MIIC_SWITCH_PORTC 10
+#define MIIC_SWITCH_PORTD 11
+#define MIIC_HSR_PORTA 12
+#define MIIC_HSR_PORTB 13
+
+#endif
diff --git a/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h
new file mode 100644
index 000000000000..f570b23165a2
--- /dev/null
+++ b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H
+#define _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H
+
+/*
+ * Need to have it as a multiple of 4 as NVMEM memory is registered with
+ * stride = 4.
+ */
+#define OTP_PKT(id) ((id) * 4)
+
+#endif
diff --git a/include/dt-bindings/phy/phy-cadence.h b/include/dt-bindings/phy/phy-cadence.h
new file mode 100644
index 000000000000..0671991208fc
--- /dev/null
+++ b/include/dt-bindings/phy/phy-cadence.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for Cadence SERDES.
+ */
+
+#ifndef _DT_BINDINGS_CADENCE_SERDES_H
+#define _DT_BINDINGS_CADENCE_SERDES_H
+
+#define CDNS_SERDES_NO_SSC 0
+#define CDNS_SERDES_EXTERNAL_SSC 1
+#define CDNS_SERDES_INTERNAL_SSC 2
+
+/* Torrent */
+#define CDNS_TORRENT_REFCLK_DRIVER 0
+#define CDNS_TORRENT_DERIVED_REFCLK 1
+#define CDNS_TORRENT_RECEIVED_REFCLK 2
+
+/* Sierra */
+#define CDNS_SIERRA_PLL_CMNLC 0
+#define CDNS_SIERRA_PLL_CMNLC1 1
+#define CDNS_SIERRA_DERIVED_REFCLK 2
+
+#endif /* _DT_BINDINGS_CADENCE_SERDES_H */
diff --git a/include/dt-bindings/phy/phy-imx8-pcie.h b/include/dt-bindings/phy/phy-imx8-pcie.h
new file mode 100644
index 000000000000..8bbe2d6538d8
--- /dev/null
+++ b/include/dt-bindings/phy/phy-imx8-pcie.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * This header provides constants for i.MX8 PCIe.
+ */
+
+#ifndef _DT_BINDINGS_IMX8_PCIE_H
+#define _DT_BINDINGS_IMX8_PCIE_H
+
+/* Reference clock PAD mode */
+#define IMX8_PCIE_REFCLK_PAD_UNUSED 0
+#define IMX8_PCIE_REFCLK_PAD_INPUT 1
+#define IMX8_PCIE_REFCLK_PAD_OUTPUT 2
+
+#endif /* _DT_BINDINGS_IMX8_PCIE_H */
diff --git a/include/dt-bindings/phy/phy-lan966x-serdes.h b/include/dt-bindings/phy/phy-lan966x-serdes.h
new file mode 100644
index 000000000000..4330269a901e
--- /dev/null
+++ b/include/dt-bindings/phy/phy-lan966x-serdes.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+#ifndef __PHY_LAN966X_SERDES_H__
+#define __PHY_LAN966X_SERDES_H__
+
+#define CU(x) (x)
+#define CU_MAX CU(2)
+#define SERDES6G(x) (CU_MAX + 1 + (x))
+#define SERDES6G_MAX SERDES6G(3)
+#define RGMII(x) (SERDES6G_MAX + 1 + (x))
+#define RGMII_MAX RGMII(2)
+#define SERDES_MAX (RGMII_MAX + 1)
+
+#endif
diff --git a/include/dt-bindings/phy/phy-qcom-qmp.h b/include/dt-bindings/phy/phy-qcom-qmp.h
new file mode 100644
index 000000000000..4edec4c5b224
--- /dev/null
+++ b/include/dt-bindings/phy/phy-qcom-qmp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Qualcomm QMP PHY constants
+ *
+ * Copyright (C) 2022 Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_PHY_QMP
+#define _DT_BINDINGS_PHY_QMP
+
+/* QMP USB4-USB3-DP clocks */
+#define QMP_USB43DP_USB3_PIPE_CLK 0
+#define QMP_USB43DP_DP_LINK_CLK 1
+#define QMP_USB43DP_DP_VCO_DIV_CLK 2
+
+/* QMP USB4-USB3-DP PHYs */
+#define QMP_USB43DP_USB3_PHY 0
+#define QMP_USB43DP_DP_PHY 1
+
+#endif /* _DT_BINDINGS_PHY_QMP */
diff --git a/include/dt-bindings/phy/phy-ti.h b/include/dt-bindings/phy/phy-ti.h
new file mode 100644
index 000000000000..ad955d3a56b4
--- /dev/null
+++ b/include/dt-bindings/phy/phy-ti.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for TI SERDES.
+ */
+
+#ifndef _DT_BINDINGS_TI_SERDES
+#define _DT_BINDINGS_TI_SERDES
+
+/* Clock index for output clocks from WIZ */
+
+/* MUX Clocks */
+#define TI_WIZ_PLL0_REFCLK 0
+#define TI_WIZ_PLL1_REFCLK 1
+#define TI_WIZ_REFCLK_DIG 2
+
+/* Reserve index here for future additions */
+
+/* MISC Clocks */
+#define TI_WIZ_PHY_EN_REFCLK 16
+
+#endif /* _DT_BINDINGS_TI_SERDES */
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index 36e8c241cf48..6b901b342348 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -19,5 +19,9 @@
#define PHY_TYPE_DP 6
#define PHY_TYPE_XPCS 7
#define PHY_TYPE_SGMII 8
+#define PHY_TYPE_QSGMII 9
+#define PHY_TYPE_DPHY 10
+#define PHY_TYPE_CPHY 11
+#define PHY_TYPE_USXGMII 12
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/apple.h b/include/dt-bindings/pinctrl/apple.h
new file mode 100644
index 000000000000..ea0a6f466592
--- /dev/null
+++ b/include/dt-bindings/pinctrl/apple.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * This header provides constants for Apple pinctrl bindings.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_APPLE_H
+#define _DT_BINDINGS_PINCTRL_APPLE_H
+
+#define APPLE_PINMUX(pin, func) ((pin) | ((func) << 16))
+#define APPLE_PIN(pinmux) ((pinmux) & 0xffff)
+#define APPLE_FUNC(pinmux) ((pinmux) >> 16)
+
+#endif /* _DT_BINDINGS_PINCTRL_APPLE_H */
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
index 0359bfdc9119..2175ec89c82f 100644
--- a/include/dt-bindings/pinctrl/hisi.h
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This header provides constants for hisilicon pinctrl bindings.
*
- * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 HiSilicon Limited.
* Copyright (c) 2015 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_PINCTRL_HISI_H
diff --git a/include/dt-bindings/pinctrl/k210-fpioa.h b/include/dt-bindings/pinctrl/k210-fpioa.h
new file mode 100644
index 000000000000..314285eab3a1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/k210-fpioa.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2020 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef PINCTRL_K210_FPIOA_H
+#define PINCTRL_K210_FPIOA_H
+
+/*
+ * Full list of FPIOA functions from
+ * kendryte-standalone-sdk/lib/drivers/include/fpioa.h
+ */
+#define K210_PCF_MASK GENMASK(7, 0)
+#define K210_PCF_JTAG_TCLK 0 /* JTAG Test Clock */
+#define K210_PCF_JTAG_TDI 1 /* JTAG Test Data In */
+#define K210_PCF_JTAG_TMS 2 /* JTAG Test Mode Select */
+#define K210_PCF_JTAG_TDO 3 /* JTAG Test Data Out */
+#define K210_PCF_SPI0_D0 4 /* SPI0 Data 0 */
+#define K210_PCF_SPI0_D1 5 /* SPI0 Data 1 */
+#define K210_PCF_SPI0_D2 6 /* SPI0 Data 2 */
+#define K210_PCF_SPI0_D3 7 /* SPI0 Data 3 */
+#define K210_PCF_SPI0_D4 8 /* SPI0 Data 4 */
+#define K210_PCF_SPI0_D5 9 /* SPI0 Data 5 */
+#define K210_PCF_SPI0_D6 10 /* SPI0 Data 6 */
+#define K210_PCF_SPI0_D7 11 /* SPI0 Data 7 */
+#define K210_PCF_SPI0_SS0 12 /* SPI0 Chip Select 0 */
+#define K210_PCF_SPI0_SS1 13 /* SPI0 Chip Select 1 */
+#define K210_PCF_SPI0_SS2 14 /* SPI0 Chip Select 2 */
+#define K210_PCF_SPI0_SS3 15 /* SPI0 Chip Select 3 */
+#define K210_PCF_SPI0_ARB 16 /* SPI0 Arbitration */
+#define K210_PCF_SPI0_SCLK 17 /* SPI0 Serial Clock */
+#define K210_PCF_UARTHS_RX 18 /* UART High speed Receiver */
+#define K210_PCF_UARTHS_TX 19 /* UART High speed Transmitter */
+#define K210_PCF_RESV6 20 /* Reserved function */
+#define K210_PCF_RESV7 21 /* Reserved function */
+#define K210_PCF_CLK_SPI1 22 /* Clock SPI1 */
+#define K210_PCF_CLK_I2C1 23 /* Clock I2C1 */
+#define K210_PCF_GPIOHS0 24 /* GPIO High speed 0 */
+#define K210_PCF_GPIOHS1 25 /* GPIO High speed 1 */
+#define K210_PCF_GPIOHS2 26 /* GPIO High speed 2 */
+#define K210_PCF_GPIOHS3 27 /* GPIO High speed 3 */
+#define K210_PCF_GPIOHS4 28 /* GPIO High speed 4 */
+#define K210_PCF_GPIOHS5 29 /* GPIO High speed 5 */
+#define K210_PCF_GPIOHS6 30 /* GPIO High speed 6 */
+#define K210_PCF_GPIOHS7 31 /* GPIO High speed 7 */
+#define K210_PCF_GPIOHS8 32 /* GPIO High speed 8 */
+#define K210_PCF_GPIOHS9 33 /* GPIO High speed 9 */
+#define K210_PCF_GPIOHS10 34 /* GPIO High speed 10 */
+#define K210_PCF_GPIOHS11 35 /* GPIO High speed 11 */
+#define K210_PCF_GPIOHS12 36 /* GPIO High speed 12 */
+#define K210_PCF_GPIOHS13 37 /* GPIO High speed 13 */
+#define K210_PCF_GPIOHS14 38 /* GPIO High speed 14 */
+#define K210_PCF_GPIOHS15 39 /* GPIO High speed 15 */
+#define K210_PCF_GPIOHS16 40 /* GPIO High speed 16 */
+#define K210_PCF_GPIOHS17 41 /* GPIO High speed 17 */
+#define K210_PCF_GPIOHS18 42 /* GPIO High speed 18 */
+#define K210_PCF_GPIOHS19 43 /* GPIO High speed 19 */
+#define K210_PCF_GPIOHS20 44 /* GPIO High speed 20 */
+#define K210_PCF_GPIOHS21 45 /* GPIO High speed 21 */
+#define K210_PCF_GPIOHS22 46 /* GPIO High speed 22 */
+#define K210_PCF_GPIOHS23 47 /* GPIO High speed 23 */
+#define K210_PCF_GPIOHS24 48 /* GPIO High speed 24 */
+#define K210_PCF_GPIOHS25 49 /* GPIO High speed 25 */
+#define K210_PCF_GPIOHS26 50 /* GPIO High speed 26 */
+#define K210_PCF_GPIOHS27 51 /* GPIO High speed 27 */
+#define K210_PCF_GPIOHS28 52 /* GPIO High speed 28 */
+#define K210_PCF_GPIOHS29 53 /* GPIO High speed 29 */
+#define K210_PCF_GPIOHS30 54 /* GPIO High speed 30 */
+#define K210_PCF_GPIOHS31 55 /* GPIO High speed 31 */
+#define K210_PCF_GPIO0 56 /* GPIO pin 0 */
+#define K210_PCF_GPIO1 57 /* GPIO pin 1 */
+#define K210_PCF_GPIO2 58 /* GPIO pin 2 */
+#define K210_PCF_GPIO3 59 /* GPIO pin 3 */
+#define K210_PCF_GPIO4 60 /* GPIO pin 4 */
+#define K210_PCF_GPIO5 61 /* GPIO pin 5 */
+#define K210_PCF_GPIO6 62 /* GPIO pin 6 */
+#define K210_PCF_GPIO7 63 /* GPIO pin 7 */
+#define K210_PCF_UART1_RX 64 /* UART1 Receiver */
+#define K210_PCF_UART1_TX 65 /* UART1 Transmitter */
+#define K210_PCF_UART2_RX 66 /* UART2 Receiver */
+#define K210_PCF_UART2_TX 67 /* UART2 Transmitter */
+#define K210_PCF_UART3_RX 68 /* UART3 Receiver */
+#define K210_PCF_UART3_TX 69 /* UART3 Transmitter */
+#define K210_PCF_SPI1_D0 70 /* SPI1 Data 0 */
+#define K210_PCF_SPI1_D1 71 /* SPI1 Data 1 */
+#define K210_PCF_SPI1_D2 72 /* SPI1 Data 2 */
+#define K210_PCF_SPI1_D3 73 /* SPI1 Data 3 */
+#define K210_PCF_SPI1_D4 74 /* SPI1 Data 4 */
+#define K210_PCF_SPI1_D5 75 /* SPI1 Data 5 */
+#define K210_PCF_SPI1_D6 76 /* SPI1 Data 6 */
+#define K210_PCF_SPI1_D7 77 /* SPI1 Data 7 */
+#define K210_PCF_SPI1_SS0 78 /* SPI1 Chip Select 0 */
+#define K210_PCF_SPI1_SS1 79 /* SPI1 Chip Select 1 */
+#define K210_PCF_SPI1_SS2 80 /* SPI1 Chip Select 2 */
+#define K210_PCF_SPI1_SS3 81 /* SPI1 Chip Select 3 */
+#define K210_PCF_SPI1_ARB 82 /* SPI1 Arbitration */
+#define K210_PCF_SPI1_SCLK 83 /* SPI1 Serial Clock */
+#define K210_PCF_SPI2_D0 84 /* SPI2 Data 0 */
+#define K210_PCF_SPI2_SS 85 /* SPI2 Select */
+#define K210_PCF_SPI2_SCLK 86 /* SPI2 Serial Clock */
+#define K210_PCF_I2S0_MCLK 87 /* I2S0 Master Clock */
+#define K210_PCF_I2S0_SCLK 88 /* I2S0 Serial Clock(BCLK) */
+#define K210_PCF_I2S0_WS 89 /* I2S0 Word Select(LRCLK) */
+#define K210_PCF_I2S0_IN_D0 90 /* I2S0 Serial Data Input 0 */
+#define K210_PCF_I2S0_IN_D1 91 /* I2S0 Serial Data Input 1 */
+#define K210_PCF_I2S0_IN_D2 92 /* I2S0 Serial Data Input 2 */
+#define K210_PCF_I2S0_IN_D3 93 /* I2S0 Serial Data Input 3 */
+#define K210_PCF_I2S0_OUT_D0 94 /* I2S0 Serial Data Output 0 */
+#define K210_PCF_I2S0_OUT_D1 95 /* I2S0 Serial Data Output 1 */
+#define K210_PCF_I2S0_OUT_D2 96 /* I2S0 Serial Data Output 2 */
+#define K210_PCF_I2S0_OUT_D3 97 /* I2S0 Serial Data Output 3 */
+#define K210_PCF_I2S1_MCLK 98 /* I2S1 Master Clock */
+#define K210_PCF_I2S1_SCLK 99 /* I2S1 Serial Clock(BCLK) */
+#define K210_PCF_I2S1_WS 100 /* I2S1 Word Select(LRCLK) */
+#define K210_PCF_I2S1_IN_D0 101 /* I2S1 Serial Data Input 0 */
+#define K210_PCF_I2S1_IN_D1 102 /* I2S1 Serial Data Input 1 */
+#define K210_PCF_I2S1_IN_D2 103 /* I2S1 Serial Data Input 2 */
+#define K210_PCF_I2S1_IN_D3 104 /* I2S1 Serial Data Input 3 */
+#define K210_PCF_I2S1_OUT_D0 105 /* I2S1 Serial Data Output 0 */
+#define K210_PCF_I2S1_OUT_D1 106 /* I2S1 Serial Data Output 1 */
+#define K210_PCF_I2S1_OUT_D2 107 /* I2S1 Serial Data Output 2 */
+#define K210_PCF_I2S1_OUT_D3 108 /* I2S1 Serial Data Output 3 */
+#define K210_PCF_I2S2_MCLK 109 /* I2S2 Master Clock */
+#define K210_PCF_I2S2_SCLK 110 /* I2S2 Serial Clock(BCLK) */
+#define K210_PCF_I2S2_WS 111 /* I2S2 Word Select(LRCLK) */
+#define K210_PCF_I2S2_IN_D0 112 /* I2S2 Serial Data Input 0 */
+#define K210_PCF_I2S2_IN_D1 113 /* I2S2 Serial Data Input 1 */
+#define K210_PCF_I2S2_IN_D2 114 /* I2S2 Serial Data Input 2 */
+#define K210_PCF_I2S2_IN_D3 115 /* I2S2 Serial Data Input 3 */
+#define K210_PCF_I2S2_OUT_D0 116 /* I2S2 Serial Data Output 0 */
+#define K210_PCF_I2S2_OUT_D1 117 /* I2S2 Serial Data Output 1 */
+#define K210_PCF_I2S2_OUT_D2 118 /* I2S2 Serial Data Output 2 */
+#define K210_PCF_I2S2_OUT_D3 119 /* I2S2 Serial Data Output 3 */
+#define K210_PCF_RESV0 120 /* Reserved function */
+#define K210_PCF_RESV1 121 /* Reserved function */
+#define K210_PCF_RESV2 122 /* Reserved function */
+#define K210_PCF_RESV3 123 /* Reserved function */
+#define K210_PCF_RESV4 124 /* Reserved function */
+#define K210_PCF_RESV5 125 /* Reserved function */
+#define K210_PCF_I2C0_SCLK 126 /* I2C0 Serial Clock */
+#define K210_PCF_I2C0_SDA 127 /* I2C0 Serial Data */
+#define K210_PCF_I2C1_SCLK 128 /* I2C1 Serial Clock */
+#define K210_PCF_I2C1_SDA 129 /* I2C1 Serial Data */
+#define K210_PCF_I2C2_SCLK 130 /* I2C2 Serial Clock */
+#define K210_PCF_I2C2_SDA 131 /* I2C2 Serial Data */
+#define K210_PCF_DVP_XCLK 132 /* DVP System Clock */
+#define K210_PCF_DVP_RST 133 /* DVP System Reset */
+#define K210_PCF_DVP_PWDN 134 /* DVP Power Down Mode */
+#define K210_PCF_DVP_VSYNC 135 /* DVP Vertical Sync */
+#define K210_PCF_DVP_HSYNC 136 /* DVP Horizontal Sync */
+#define K210_PCF_DVP_PCLK 137 /* Pixel Clock */
+#define K210_PCF_DVP_D0 138 /* Data Bit 0 */
+#define K210_PCF_DVP_D1 139 /* Data Bit 1 */
+#define K210_PCF_DVP_D2 140 /* Data Bit 2 */
+#define K210_PCF_DVP_D3 141 /* Data Bit 3 */
+#define K210_PCF_DVP_D4 142 /* Data Bit 4 */
+#define K210_PCF_DVP_D5 143 /* Data Bit 5 */
+#define K210_PCF_DVP_D6 144 /* Data Bit 6 */
+#define K210_PCF_DVP_D7 145 /* Data Bit 7 */
+#define K210_PCF_SCCB_SCLK 146 /* Serial Camera Control Bus Clock */
+#define K210_PCF_SCCB_SDA 147 /* Serial Camera Control Bus Data */
+#define K210_PCF_UART1_CTS 148 /* UART1 Clear To Send */
+#define K210_PCF_UART1_DSR 149 /* UART1 Data Set Ready */
+#define K210_PCF_UART1_DCD 150 /* UART1 Data Carrier Detect */
+#define K210_PCF_UART1_RI 151 /* UART1 Ring Indicator */
+#define K210_PCF_UART1_SIR_IN 152 /* UART1 Serial Infrared Input */
+#define K210_PCF_UART1_DTR 153 /* UART1 Data Terminal Ready */
+#define K210_PCF_UART1_RTS 154 /* UART1 Request To Send */
+#define K210_PCF_UART1_OUT2 155 /* UART1 User-designated Output 2 */
+#define K210_PCF_UART1_OUT1 156 /* UART1 User-designated Output 1 */
+#define K210_PCF_UART1_SIR_OUT 157 /* UART1 Serial Infrared Output */
+#define K210_PCF_UART1_BAUD 158 /* UART1 Transmit Clock Output */
+#define K210_PCF_UART1_RE 159 /* UART1 Receiver Output Enable */
+#define K210_PCF_UART1_DE 160 /* UART1 Driver Output Enable */
+#define K210_PCF_UART1_RS485_EN 161 /* UART1 RS485 Enable */
+#define K210_PCF_UART2_CTS 162 /* UART2 Clear To Send */
+#define K210_PCF_UART2_DSR 163 /* UART2 Data Set Ready */
+#define K210_PCF_UART2_DCD 164 /* UART2 Data Carrier Detect */
+#define K210_PCF_UART2_RI 165 /* UART2 Ring Indicator */
+#define K210_PCF_UART2_SIR_IN 166 /* UART2 Serial Infrared Input */
+#define K210_PCF_UART2_DTR 167 /* UART2 Data Terminal Ready */
+#define K210_PCF_UART2_RTS 168 /* UART2 Request To Send */
+#define K210_PCF_UART2_OUT2 169 /* UART2 User-designated Output 2 */
+#define K210_PCF_UART2_OUT1 170 /* UART2 User-designated Output 1 */
+#define K210_PCF_UART2_SIR_OUT 171 /* UART2 Serial Infrared Output */
+#define K210_PCF_UART2_BAUD 172 /* UART2 Transmit Clock Output */
+#define K210_PCF_UART2_RE 173 /* UART2 Receiver Output Enable */
+#define K210_PCF_UART2_DE 174 /* UART2 Driver Output Enable */
+#define K210_PCF_UART2_RS485_EN 175 /* UART2 RS485 Enable */
+#define K210_PCF_UART3_CTS 176 /* UART3 Clear To Send */
+#define K210_PCF_UART3_DSR 177 /* UART3 Data Set Ready */
+#define K210_PCF_UART3_DCD 178 /* UART3 Data Carrier Detect */
+#define K210_PCF_UART3_RI 179 /* UART3 Ring Indicator */
+#define K210_PCF_UART3_SIR_IN 180 /* UART3 Serial Infrared Input */
+#define K210_PCF_UART3_DTR 181 /* UART3 Data Terminal Ready */
+#define K210_PCF_UART3_RTS 182 /* UART3 Request To Send */
+#define K210_PCF_UART3_OUT2 183 /* UART3 User-designated Output 2 */
+#define K210_PCF_UART3_OUT1 184 /* UART3 User-designated Output 1 */
+#define K210_PCF_UART3_SIR_OUT 185 /* UART3 Serial Infrared Output */
+#define K210_PCF_UART3_BAUD 186 /* UART3 Transmit Clock Output */
+#define K210_PCF_UART3_RE 187 /* UART3 Receiver Output Enable */
+#define K210_PCF_UART3_DE 188 /* UART3 Driver Output Enable */
+#define K210_PCF_UART3_RS485_EN 189 /* UART3 RS485 Enable */
+#define K210_PCF_TIMER0_TOGGLE1 190 /* TIMER0 Toggle Output 1 */
+#define K210_PCF_TIMER0_TOGGLE2 191 /* TIMER0 Toggle Output 2 */
+#define K210_PCF_TIMER0_TOGGLE3 192 /* TIMER0 Toggle Output 3 */
+#define K210_PCF_TIMER0_TOGGLE4 193 /* TIMER0 Toggle Output 4 */
+#define K210_PCF_TIMER1_TOGGLE1 194 /* TIMER1 Toggle Output 1 */
+#define K210_PCF_TIMER1_TOGGLE2 195 /* TIMER1 Toggle Output 2 */
+#define K210_PCF_TIMER1_TOGGLE3 196 /* TIMER1 Toggle Output 3 */
+#define K210_PCF_TIMER1_TOGGLE4 197 /* TIMER1 Toggle Output 4 */
+#define K210_PCF_TIMER2_TOGGLE1 198 /* TIMER2 Toggle Output 1 */
+#define K210_PCF_TIMER2_TOGGLE2 199 /* TIMER2 Toggle Output 2 */
+#define K210_PCF_TIMER2_TOGGLE3 200 /* TIMER2 Toggle Output 3 */
+#define K210_PCF_TIMER2_TOGGLE4 201 /* TIMER2 Toggle Output 4 */
+#define K210_PCF_CLK_SPI2 202 /* Clock SPI2 */
+#define K210_PCF_CLK_I2C2 203 /* Clock I2C2 */
+#define K210_PCF_INTERNAL0 204 /* Internal function signal 0 */
+#define K210_PCF_INTERNAL1 205 /* Internal function signal 1 */
+#define K210_PCF_INTERNAL2 206 /* Internal function signal 2 */
+#define K210_PCF_INTERNAL3 207 /* Internal function signal 3 */
+#define K210_PCF_INTERNAL4 208 /* Internal function signal 4 */
+#define K210_PCF_INTERNAL5 209 /* Internal function signal 5 */
+#define K210_PCF_INTERNAL6 210 /* Internal function signal 6 */
+#define K210_PCF_INTERNAL7 211 /* Internal function signal 7 */
+#define K210_PCF_INTERNAL8 212 /* Internal function signal 8 */
+#define K210_PCF_INTERNAL9 213 /* Internal function signal 9 */
+#define K210_PCF_INTERNAL10 214 /* Internal function signal 10 */
+#define K210_PCF_INTERNAL11 215 /* Internal function signal 11 */
+#define K210_PCF_INTERNAL12 216 /* Internal function signal 12 */
+#define K210_PCF_INTERNAL13 217 /* Internal function signal 13 */
+#define K210_PCF_INTERNAL14 218 /* Internal function signal 14 */
+#define K210_PCF_INTERNAL15 219 /* Internal function signal 15 */
+#define K210_PCF_INTERNAL16 220 /* Internal function signal 16 */
+#define K210_PCF_INTERNAL17 221 /* Internal function signal 17 */
+#define K210_PCF_CONSTANT 222 /* Constant function */
+#define K210_PCF_INTERNAL18 223 /* Internal function signal 18 */
+#define K210_PCF_DEBUG0 224 /* Debug function 0 */
+#define K210_PCF_DEBUG1 225 /* Debug function 1 */
+#define K210_PCF_DEBUG2 226 /* Debug function 2 */
+#define K210_PCF_DEBUG3 227 /* Debug function 3 */
+#define K210_PCF_DEBUG4 228 /* Debug function 4 */
+#define K210_PCF_DEBUG5 229 /* Debug function 5 */
+#define K210_PCF_DEBUG6 230 /* Debug function 6 */
+#define K210_PCF_DEBUG7 231 /* Debug function 7 */
+#define K210_PCF_DEBUG8 232 /* Debug function 8 */
+#define K210_PCF_DEBUG9 233 /* Debug function 9 */
+#define K210_PCF_DEBUG10 234 /* Debug function 10 */
+#define K210_PCF_DEBUG11 235 /* Debug function 11 */
+#define K210_PCF_DEBUG12 236 /* Debug function 12 */
+#define K210_PCF_DEBUG13 237 /* Debug function 13 */
+#define K210_PCF_DEBUG14 238 /* Debug function 14 */
+#define K210_PCF_DEBUG15 239 /* Debug function 15 */
+#define K210_PCF_DEBUG16 240 /* Debug function 16 */
+#define K210_PCF_DEBUG17 241 /* Debug function 17 */
+#define K210_PCF_DEBUG18 242 /* Debug function 18 */
+#define K210_PCF_DEBUG19 243 /* Debug function 19 */
+#define K210_PCF_DEBUG20 244 /* Debug function 20 */
+#define K210_PCF_DEBUG21 245 /* Debug function 21 */
+#define K210_PCF_DEBUG22 246 /* Debug function 22 */
+#define K210_PCF_DEBUG23 247 /* Debug function 23 */
+#define K210_PCF_DEBUG24 248 /* Debug function 24 */
+#define K210_PCF_DEBUG25 249 /* Debug function 25 */
+#define K210_PCF_DEBUG26 250 /* Debug function 26 */
+#define K210_PCF_DEBUG27 251 /* Debug function 27 */
+#define K210_PCF_DEBUG28 252 /* Debug function 28 */
+#define K210_PCF_DEBUG29 253 /* Debug function 29 */
+#define K210_PCF_DEBUG30 254 /* Debug function 30 */
+#define K210_PCF_DEBUG31 255 /* Debug function 31 */
+
+#define K210_FPIOA(pin, func) (((pin) << 16) | (func))
+
+#define K210_PC_POWER_3V3 0
+#define K210_PC_POWER_1V8 1
+
+#endif /* PINCTRL_K210_FPIOA_H */
diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h
deleted file mode 100644
index b0eea7cc6e23..000000000000
--- a/include/dt-bindings/pinctrl/k3.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for pinctrl bindings for TI's K3 SoC
- * family.
- *
- * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
- */
-#ifndef _DT_BINDINGS_PINCTRL_TI_K3_H
-#define _DT_BINDINGS_PINCTRL_TI_K3_H
-
-#define PULLUDEN_SHIFT (16)
-#define PULLTYPESEL_SHIFT (17)
-#define RXACTIVE_SHIFT (18)
-
-#define PULL_DISABLE (1 << PULLUDEN_SHIFT)
-#define PULL_ENABLE (0 << PULLUDEN_SHIFT)
-
-#define PULL_UP (1 << PULLTYPESEL_SHIFT | PULL_ENABLE)
-#define PULL_DOWN (0 << PULLTYPESEL_SHIFT | PULL_ENABLE)
-
-#define INPUT_EN (1 << RXACTIVE_SHIFT)
-#define INPUT_DISABLE (0 << RXACTIVE_SHIFT)
-
-/* Only these macros are expected be used directly in device tree files */
-#define PIN_OUTPUT (INPUT_DISABLE | PULL_DISABLE)
-#define PIN_OUTPUT_PULLUP (INPUT_DISABLE | PULL_UP)
-#define PIN_OUTPUT_PULLDOWN (INPUT_DISABLE | PULL_DOWN)
-#define PIN_INPUT (INPUT_EN | PULL_DISABLE)
-#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP)
-#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN)
-
-#define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#endif
diff --git a/include/dt-bindings/pinctrl/keystone.h b/include/dt-bindings/pinctrl/keystone.h
index 7f97d776a8ff..66f8aecada53 100644
--- a/include/dt-bindings/pinctrl/keystone.h
+++ b/include/dt-bindings/pinctrl/keystone.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This header provides constants for Keystone pinctrl bindings.
*
* Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_PINCTRL_KEYSTONE_H
diff --git a/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h b/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h
new file mode 100644
index 000000000000..2688da2f621f
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h
@@ -0,0 +1,1280 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Hui Liu <hui.liu@mediatek.com>
+ */
+
+#ifndef __MEDIATEK_MT8188_PINFUNC_H
+#define __MEDIATEK_MT8188_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_B_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_O_SPIM5_CSB (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_O_UTXD1 (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_O_DMIC3_CLK (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_O_I2SO2_MCK (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_B0_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_B_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_O_SPIM5_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_I1_URXD1 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_B0_DBG_MON_A1 (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_B_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_B0_SPIM5_MOSI (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_O_URTS1 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_B0_I2SIN_WS (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_B0_I2SO2_WS (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_B0_DBG_MON_A2 (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_B_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_B0_SPIM5_MISO (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_I1_UCTS1 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_O_DMIC4_CLK (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_O_I2SO2_D0 (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_B0_DBG_MON_A3 (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_B_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_O_I2SO1_MCK (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_O_I2SO2_D1 (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_B0_DBG_MON_A4 (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_B_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_O_I2SO1_BCK (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_O_I2SO2_D2 (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_B0_DBG_MON_A5 (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_B_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_O_I2SO1_WS (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_O_DMIC1_CLK (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_O_I2SO2_D3 (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_B_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_O_SPIM3_CSB (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_O_CMVREF0 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_O_CLKM0 (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_B0_DBG_MON_A6 (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_B_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_O_SPIM3_CLK (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_O_CMVREF1 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_O_CLKM1 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_B0_DBG_MON_A7 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_B_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_B0_SPIM3_MOSI (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_O_DMIC2_CLK (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_O_CMFLASH0 (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_O_PWM_0 (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_B0_DBG_MON_A8 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_B_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_B0_SPIM3_MISO (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I0_TDMIN_DI (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_O_CMFLASH1 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_O_PWM_1 (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_B0_DBG_MON_A9 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_B_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_O_SPDIF_OUT (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_O_I2SO1_D0 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_O_CMVREF6 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_B0_DBG_MON_A10 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_B_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_O_SPIM4_CSB (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_B1_JTMS_SEL3 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_B1_APU_JTAG_TMS (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_I0_VPU_UDI_TMS (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_I0_IPU_JTAG_TMS (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_I0_HDMITX20_HTPLG (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_B_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_O_SPIM4_CLK (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I0_JTCK_SEL3 (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_I0_APU_JTAG_TCK (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_I0_VPU_UDI_TCK (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_I0_IPU_JTAG_TCK (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_B1_HDMITX20_CEC (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_B_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_B0_SPIM4_MOSI (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_I1_JTDI_SEL3 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_I1_APU_JTAG_TDI (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_I0_VPU_UDI_TDI (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I0_IPU_JTAG_TDI (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_B1_HDMITX20_SCL (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_B_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_B0_SPIM4_MISO (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_O_JTDO_SEL3 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_O_APU_JTAG_TDO (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_O_VPU_UDI_TDO (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_O_IPU_JTAG_TDO (MTK_PIN_NO(15) | 6)
+#define PINMUX_GPIO15__FUNC_B1_HDMITX20_SDA (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_B_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_O_UTXD3 (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_I1_JTRSTn_SEL3 (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_I0_APU_JTAG_TRST (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_I0_VPU_UDI_NTRST (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_I0_IPU_JTAG_TRST (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_O_HDMITX20_PWR5V (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_B_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_I1_URXD3 (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_O_CMFLASH2 (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_O_CMVREF7 (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_B_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_O_CMFLASH0 (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_O_CMVREF4 (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_O_UTXD1 (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_B0_DBG_MON_A11 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_B_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_O_CMFLASH1 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_O_CMVREF5 (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_I1_URXD1 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_B0_DBG_MON_A12 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_B_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_O_CMFLASH2 (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_O_CLKM2 (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_O_URTS1 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_O_TP_URTS1_AO (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_B0_DBG_MON_A13 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_B_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_O_CMFLASH3 (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_O_CLKM3 (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_I0_TDMIN_DI (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_I1_UCTS1 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_I1_TP_UCTS1_AO (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_B0_DBG_MON_A14 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_B_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_O_CMMCLK0 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_B0_DBG_MON_A15 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_B_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_O_CMMCLK1 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_O_PWM_2 (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(23) | 6)
+#define PINMUX_GPIO23__FUNC_B0_DBG_MON_A16 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_B_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_O_CMMCLK2 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_O_PWM_3 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_B0_MD32_0_GPIO2 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_B_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_O_LCM_RST (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_O_LCM1_RST (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(25) | 3)
+
+#define PINMUX_GPIO26__FUNC_B_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_I0_DSI_TE (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_I0_DSI1_TE (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(26) | 3)
+
+#define PINMUX_GPIO27__FUNC_B_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_O_LCM1_RST (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_O_LCM_RST (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_O_CMVREF2 (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_O_PWM_2 (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_B0_DBG_MON_A17 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_B_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_I0_DSI1_TE (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_I0_DSI_TE (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_O_CMVREF3 (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_O_PWM_3 (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_B0_DBG_MON_A18 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_B_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_O_DISP_PWM0 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_O_DISP_PWM1 (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_B_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_O_DISP_PWM1 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_O_DISP_PWM0 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_O_CMFLASH3 (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_O_PWM_1 (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_B0_DBG_MON_A19 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_B_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_O_UTXD0 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_O_MD32_0_TXD (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_O_MD32_1_TXD (MTK_PIN_NO(31) | 6)
+#define PINMUX_GPIO31__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_B_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I1_URXD0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(32) | 6)
+#define PINMUX_GPIO32__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_B_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_O_UTXD1 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_O_URTS2 (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_O_MD32_0_TXD (MTK_PIN_NO(33) | 6)
+#define PINMUX_GPIO33__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(33) | 7)
+
+#define PINMUX_GPIO34__FUNC_B_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I1_URXD1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I1_UCTS2 (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(34) | 6)
+#define PINMUX_GPIO34__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_B_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_O_UTXD2 (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_O_URTS1 (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_O_TP_URTS1_AO (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_O_MD32_1_TXD (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_B0_DBG_MON_A20 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_B_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_I1_URXD2 (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_I1_UCTS1 (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_I1_TP_UCTS1_AO (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_B0_DBG_MON_A21 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_B_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_B1_JTMS_SEL1 (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_I0_UDI_TMS (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_I1_SPM_JTAG_TMS (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_I1_ADSP_JTAG0_TMS (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_I1_SCP_JTAG0_TMS (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_I1_CCU0_JTAG_TMS (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_I1_MCUPM_JTAG_TMS (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_B_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_I0_JTCK_SEL1 (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_I0_UDI_TCK (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_I1_SPM_JTAG_TCK (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_I0_ADSP_JTAG0_TCK (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_I1_SCP_JTAG0_TCK (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_I1_CCU0_JTAG_TCK (MTK_PIN_NO(38) | 6)
+#define PINMUX_GPIO38__FUNC_I1_MCUPM_JTAG_TCK (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_B_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_I1_JTDI_SEL1 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_I0_UDI_TDI (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_I1_SPM_JTAG_TDI (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_I1_ADSP_JTAG0_TDI (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_I1_SCP_JTAG0_TDI (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_I1_CCU0_JTAG_TDI (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_I1_MCUPM_JTAG_TDI (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_B_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_O_JTDO_SEL1 (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_O_UDI_TDO (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_O_SPM_JTAG_TDO (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_O_ADSP_JTAG0_TDO (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_O_SCP_JTAG0_TDO (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_O_CCU0_JTAG_TDO (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_O_MCUPM_JTAG_TDO (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_B_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_I1_JTRSTn_SEL1 (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_I0_UDI_NTRST (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_I0_SPM_JTAG_TRSTN (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_I1_ADSP_JTAG0_TRSTN (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_I0_SCP_JTAG0_TRSTN (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_I1_CCU0_JTAG_TRST (MTK_PIN_NO(41) | 6)
+#define PINMUX_GPIO41__FUNC_I0_MCUPM_JTAG_TRSTN (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_B_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_B1_KPCOL0 (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_B_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_B1_KPCOL1 (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_O_CMFLASH2 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_B_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_B1_KPROW0 (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_B_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_B1_KPROW1 (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_O_CMFLASH3 (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_B_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_O_PWM_0 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_I0_VBUSVALID_2P (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_B0_DBG_MON_A22 (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_B_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_I1_WAKEN (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_O_GDU_TROOPS_DET0 (MTK_PIN_NO(47) | 6)
+
+#define PINMUX_GPIO48__FUNC_B_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_O_PERSTN (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_O_GDU_TROOPS_DET1 (MTK_PIN_NO(48) | 6)
+
+#define PINMUX_GPIO49__FUNC_B_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_B1_CLKREQN (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_O_GDU_TROOPS_DET2 (MTK_PIN_NO(49) | 6)
+
+#define PINMUX_GPIO50__FUNC_B_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_O_HDMITX20_PWR5V (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_I1_IDDIG_1P (MTK_PIN_NO(50) | 3)
+#define PINMUX_GPIO50__FUNC_I1_SCP_JTAG1_TMS (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_I1_SSPM_JTAG_TMS (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_I1_MD32_0_JTAG_TMS (MTK_PIN_NO(50) | 6)
+#define PINMUX_GPIO50__FUNC_I1_MD32_1_JTAG_TMS (MTK_PIN_NO(50) | 7)
+
+#define PINMUX_GPIO51__FUNC_B_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_I0_HDMITX20_HTPLG (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(51) | 2)
+#define PINMUX_GPIO51__FUNC_O_USB_DRVVBUS_1P (MTK_PIN_NO(51) | 3)
+#define PINMUX_GPIO51__FUNC_I1_SCP_JTAG1_TCK (MTK_PIN_NO(51) | 4)
+#define PINMUX_GPIO51__FUNC_I1_SSPM_JTAG_TCK (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_I1_MD32_0_JTAG_TCK (MTK_PIN_NO(51) | 6)
+#define PINMUX_GPIO51__FUNC_I1_MD32_1_JTAG_TCK (MTK_PIN_NO(51) | 7)
+
+#define PINMUX_GPIO52__FUNC_B_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_B1_HDMITX20_CEC (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_I0_VBUSVALID_1P (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_I1_SCP_JTAG1_TDI (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_I1_SSPM_JTAG_TDI (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_I1_MD32_0_JTAG_TDI (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_I1_MD32_1_JTAG_TDI (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_B_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_B1_HDMITX20_SCL (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_I1_IDDIG_2P (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_O_SCP_JTAG1_TDO (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_O_SSPM_JTAG_TDO (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_O_MD32_0_JTAG_TDO (MTK_PIN_NO(53) | 6)
+#define PINMUX_GPIO53__FUNC_O_MD32_1_JTAG_TDO (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_B_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_B1_HDMITX20_SDA (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_O_USB_DRVVBUS_2P (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_I0_SCP_JTAG1_TRSTN (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_I0_SSPM_JTAG_TRSTN (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_I1_MD32_0_JTAG_TRST (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_I1_MD32_1_JTAG_TRST (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_B_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_B1_SCL0 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(55) | 4)
+
+#define PINMUX_GPIO56__FUNC_B_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_B1_SDA0 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(56) | 4)
+
+#define PINMUX_GPIO57__FUNC_B_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_B1_SCL1 (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_B_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_B1_SDA1 (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_B_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_B1_SCL2 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(59) | 2)
+#define PINMUX_GPIO59__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(59) | 3)
+
+#define PINMUX_GPIO60__FUNC_B_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_B1_SDA2 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(60) | 3)
+
+#define PINMUX_GPIO61__FUNC_B_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_B1_SCL3 (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(61) | 3)
+#define PINMUX_GPIO61__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(61) | 4)
+
+#define PINMUX_GPIO62__FUNC_B_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_B1_SDA3 (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(62) | 3)
+#define PINMUX_GPIO62__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(62) | 4)
+
+#define PINMUX_GPIO63__FUNC_B_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_B1_SCL4 (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_B_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_B1_SDA4 (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_B_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_B1_SCL5 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(65) | 3)
+
+#define PINMUX_GPIO66__FUNC_B_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_B1_SDA5 (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(66) | 3)
+
+#define PINMUX_GPIO67__FUNC_B_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_B1_SCL6 (MTK_PIN_NO(67) | 1)
+#define PINMUX_GPIO67__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(67) | 2)
+#define PINMUX_GPIO67__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(67) | 3)
+#define PINMUX_GPIO67__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(67) | 4)
+
+#define PINMUX_GPIO68__FUNC_B_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_B1_SDA6 (MTK_PIN_NO(68) | 1)
+#define PINMUX_GPIO68__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(68) | 2)
+#define PINMUX_GPIO68__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(68) | 3)
+#define PINMUX_GPIO68__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(68) | 4)
+
+#define PINMUX_GPIO69__FUNC_B_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_O_SPIM0_CSB (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_O_SCP_SPI0_CS (MTK_PIN_NO(69) | 2)
+#define PINMUX_GPIO69__FUNC_O_DMIC3_CLK (MTK_PIN_NO(69) | 3)
+#define PINMUX_GPIO69__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(69) | 4)
+#define PINMUX_GPIO69__FUNC_O_CMVREF0 (MTK_PIN_NO(69) | 5)
+#define PINMUX_GPIO69__FUNC_O_GDU_SUM_TROOP0_0 (MTK_PIN_NO(69) | 6)
+#define PINMUX_GPIO69__FUNC_B0_DBG_MON_A23 (MTK_PIN_NO(69) | 7)
+
+#define PINMUX_GPIO70__FUNC_B_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_O_SPIM0_CLK (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_O_SCP_SPI0_CK (MTK_PIN_NO(70) | 2)
+#define PINMUX_GPIO70__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(70) | 3)
+#define PINMUX_GPIO70__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(70) | 4)
+#define PINMUX_GPIO70__FUNC_O_CMVREF1 (MTK_PIN_NO(70) | 5)
+#define PINMUX_GPIO70__FUNC_O_GDU_SUM_TROOP0_1 (MTK_PIN_NO(70) | 6)
+#define PINMUX_GPIO70__FUNC_B0_DBG_MON_A24 (MTK_PIN_NO(70) | 7)
+
+#define PINMUX_GPIO71__FUNC_B_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_O_SCP_SPI0_MO (MTK_PIN_NO(71) | 2)
+#define PINMUX_GPIO71__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(71) | 3)
+#define PINMUX_GPIO71__FUNC_B0_MD32_1_GPIO2 (MTK_PIN_NO(71) | 4)
+#define PINMUX_GPIO71__FUNC_O_CMVREF2 (MTK_PIN_NO(71) | 5)
+#define PINMUX_GPIO71__FUNC_O_GDU_SUM_TROOP0_2 (MTK_PIN_NO(71) | 6)
+#define PINMUX_GPIO71__FUNC_B0_DBG_MON_A25 (MTK_PIN_NO(71) | 7)
+
+#define PINMUX_GPIO72__FUNC_B_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_B0_SPIM0_MISO (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_I0_SCP_SPI0_MI (MTK_PIN_NO(72) | 2)
+#define PINMUX_GPIO72__FUNC_O_DMIC4_CLK (MTK_PIN_NO(72) | 3)
+#define PINMUX_GPIO72__FUNC_O_CMVREF3 (MTK_PIN_NO(72) | 5)
+#define PINMUX_GPIO72__FUNC_O_GDU_SUM_TROOP1_0 (MTK_PIN_NO(72) | 6)
+#define PINMUX_GPIO72__FUNC_B0_DBG_MON_A26 (MTK_PIN_NO(72) | 7)
+
+#define PINMUX_GPIO73__FUNC_B_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_B0_SPIM0_MIO2 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_O_UTXD3 (MTK_PIN_NO(73) | 2)
+#define PINMUX_GPIO73__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(73) | 3)
+#define PINMUX_GPIO73__FUNC_O_CLKM0 (MTK_PIN_NO(73) | 4)
+#define PINMUX_GPIO73__FUNC_O_CMVREF4 (MTK_PIN_NO(73) | 5)
+#define PINMUX_GPIO73__FUNC_O_GDU_SUM_TROOP1_1 (MTK_PIN_NO(73) | 6)
+#define PINMUX_GPIO73__FUNC_B0_DBG_MON_A27 (MTK_PIN_NO(73) | 7)
+
+#define PINMUX_GPIO74__FUNC_B_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_B0_SPIM0_MIO3 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_I1_URXD3 (MTK_PIN_NO(74) | 2)
+#define PINMUX_GPIO74__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(74) | 3)
+#define PINMUX_GPIO74__FUNC_O_CLKM1 (MTK_PIN_NO(74) | 4)
+#define PINMUX_GPIO74__FUNC_O_CMVREF5 (MTK_PIN_NO(74) | 5)
+#define PINMUX_GPIO74__FUNC_O_GDU_SUM_TROOP1_2 (MTK_PIN_NO(74) | 6)
+#define PINMUX_GPIO74__FUNC_B0_DBG_MON_A28 (MTK_PIN_NO(74) | 7)
+
+#define PINMUX_GPIO75__FUNC_B_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_O_SPIM1_CSB (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_O_SCP_SPI1_A_CS (MTK_PIN_NO(75) | 2)
+#define PINMUX_GPIO75__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(75) | 3)
+#define PINMUX_GPIO75__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(75) | 4)
+#define PINMUX_GPIO75__FUNC_O_CMVREF6 (MTK_PIN_NO(75) | 5)
+#define PINMUX_GPIO75__FUNC_O_GDU_SUM_TROOP2_0 (MTK_PIN_NO(75) | 6)
+#define PINMUX_GPIO75__FUNC_B0_DBG_MON_A29 (MTK_PIN_NO(75) | 7)
+
+#define PINMUX_GPIO76__FUNC_B_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_O_SPIM1_CLK (MTK_PIN_NO(76) | 1)
+#define PINMUX_GPIO76__FUNC_O_SCP_SPI1_A_CK (MTK_PIN_NO(76) | 2)
+#define PINMUX_GPIO76__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(76) | 3)
+#define PINMUX_GPIO76__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(76) | 4)
+#define PINMUX_GPIO76__FUNC_O_CMVREF7 (MTK_PIN_NO(76) | 5)
+#define PINMUX_GPIO76__FUNC_O_GDU_SUM_TROOP2_1 (MTK_PIN_NO(76) | 6)
+#define PINMUX_GPIO76__FUNC_B0_DBG_MON_A30 (MTK_PIN_NO(76) | 7)
+
+#define PINMUX_GPIO77__FUNC_B_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_O_SCP_SPI1_A_MO (MTK_PIN_NO(77) | 2)
+#define PINMUX_GPIO77__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(77) | 3)
+#define PINMUX_GPIO77__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(77) | 4)
+#define PINMUX_GPIO77__FUNC_O_GDU_SUM_TROOP2_2 (MTK_PIN_NO(77) | 6)
+#define PINMUX_GPIO77__FUNC_B0_DBG_MON_A31 (MTK_PIN_NO(77) | 7)
+
+#define PINMUX_GPIO78__FUNC_B_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_B0_SPIM1_MISO (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_I0_SCP_SPI1_A_MI (MTK_PIN_NO(78) | 2)
+#define PINMUX_GPIO78__FUNC_I0_TDMIN_DI (MTK_PIN_NO(78) | 3)
+#define PINMUX_GPIO78__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(78) | 4)
+#define PINMUX_GPIO78__FUNC_B0_DBG_MON_A32 (MTK_PIN_NO(78) | 7)
+
+#define PINMUX_GPIO79__FUNC_B_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_O_SPIM2_CSB (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_O_SCP_SPI2_CS (MTK_PIN_NO(79) | 2)
+#define PINMUX_GPIO79__FUNC_O_I2SO1_MCK (MTK_PIN_NO(79) | 3)
+#define PINMUX_GPIO79__FUNC_O_UTXD2 (MTK_PIN_NO(79) | 4)
+#define PINMUX_GPIO79__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(79) | 5)
+#define PINMUX_GPIO79__FUNC_B0_PCM_SYNC (MTK_PIN_NO(79) | 6)
+#define PINMUX_GPIO79__FUNC_B0_DBG_MON_B0 (MTK_PIN_NO(79) | 7)
+
+#define PINMUX_GPIO80__FUNC_B_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_O_SPIM2_CLK (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_O_SCP_SPI2_CK (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_O_I2SO1_BCK (MTK_PIN_NO(80) | 3)
+#define PINMUX_GPIO80__FUNC_I1_URXD2 (MTK_PIN_NO(80) | 4)
+#define PINMUX_GPIO80__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(80) | 5)
+#define PINMUX_GPIO80__FUNC_B0_PCM_CLK (MTK_PIN_NO(80) | 6)
+#define PINMUX_GPIO80__FUNC_B0_DBG_MON_B1 (MTK_PIN_NO(80) | 7)
+
+#define PINMUX_GPIO81__FUNC_B_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_O_SCP_SPI2_MO (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_O_I2SO1_WS (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_O_URTS2 (MTK_PIN_NO(81) | 4)
+#define PINMUX_GPIO81__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(81) | 5)
+#define PINMUX_GPIO81__FUNC_O_PCM_DO (MTK_PIN_NO(81) | 6)
+#define PINMUX_GPIO81__FUNC_B0_DBG_MON_B2 (MTK_PIN_NO(81) | 7)
+
+#define PINMUX_GPIO82__FUNC_B_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_B0_SPIM2_MISO (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_I0_SCP_SPI2_MI (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_O_I2SO1_D0 (MTK_PIN_NO(82) | 3)
+#define PINMUX_GPIO82__FUNC_I1_UCTS2 (MTK_PIN_NO(82) | 4)
+#define PINMUX_GPIO82__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(82) | 5)
+#define PINMUX_GPIO82__FUNC_I0_PCM_DI (MTK_PIN_NO(82) | 6)
+#define PINMUX_GPIO82__FUNC_B0_DBG_MON_B3 (MTK_PIN_NO(82) | 7)
+
+#define PINMUX_GPIO83__FUNC_B_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_I1_IDDIG (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_B_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_O_USB_DRVVBUS (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_B_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_I0_VBUSVALID (MTK_PIN_NO(85) | 1)
+
+#define PINMUX_GPIO86__FUNC_B_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_I1_IDDIG_1P (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_O_UTXD1 (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_O_URTS2 (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_O_PWM_2 (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_B0_DBG_MON_B4 (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_B_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_O_USB_DRVVBUS_1P (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_I1_URXD1 (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_I1_UCTS2 (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_O_PWM_3 (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_B0_DBG_MON_B5 (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_B_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_I0_VBUSVALID_1P (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_O_UTXD2 (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_O_URTS1 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_O_CLKM2 (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(88) | 6)
+#define PINMUX_GPIO88__FUNC_B0_DBG_MON_B6 (MTK_PIN_NO(88) | 7)
+
+#define PINMUX_GPIO89__FUNC_B_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_I1_IDDIG_2P (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_I1_URXD2 (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_I1_UCTS1 (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_O_CLKM3 (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(89) | 6)
+#define PINMUX_GPIO89__FUNC_B0_DBG_MON_B7 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_B_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_O_USB_DRVVBUS_2P (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_O_UTXD3 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_O_MD32_0_TXD (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_O_MD32_1_TXD (MTK_PIN_NO(90) | 6)
+#define PINMUX_GPIO90__FUNC_B0_DBG_MON_B8 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_B_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_I0_VBUSVALID_2P (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_I1_URXD3 (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(91) | 6)
+#define PINMUX_GPIO91__FUNC_B0_DBG_MON_B9 (MTK_PIN_NO(91) | 7)
+
+#define PINMUX_GPIO92__FUNC_B_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_O_PWRAP_SPI0_CSN (MTK_PIN_NO(92) | 1)
+
+#define PINMUX_GPIO93__FUNC_B_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_O_PWRAP_SPI0_CK (MTK_PIN_NO(93) | 1)
+
+#define PINMUX_GPIO94__FUNC_B_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_B0_PWRAP_SPI0_MO (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_B0_PWRAP_SPI0_MI (MTK_PIN_NO(94) | 2)
+
+#define PINMUX_GPIO95__FUNC_B_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_B0_PWRAP_SPI0_MI (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_B0_PWRAP_SPI0_MO (MTK_PIN_NO(95) | 2)
+
+#define PINMUX_GPIO96__FUNC_B_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_O_SRCLKENA0 (MTK_PIN_NO(96) | 1)
+
+#define PINMUX_GPIO97__FUNC_B_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_O_SRCLKENA1 (MTK_PIN_NO(97) | 1)
+
+#define PINMUX_GPIO98__FUNC_B_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_O_SCP_VREQ_VAO (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(98) | 2)
+
+#define PINMUX_GPIO99__FUNC_B_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_I0_RTC32K_CK (MTK_PIN_NO(99) | 1)
+
+#define PINMUX_GPIO100__FUNC_B_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_O_WATCHDOG (MTK_PIN_NO(100) | 1)
+
+#define PINMUX_GPIO101__FUNC_B_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_O_I2SO1_MCK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(101) | 3)
+
+#define PINMUX_GPIO102__FUNC_B_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_O_I2SO1_BCK (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_B0_I2SIN_WS (MTK_PIN_NO(102) | 3)
+
+#define PINMUX_GPIO103__FUNC_B_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_O_I2SO1_WS (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(103) | 3)
+
+#define PINMUX_GPIO104__FUNC_B_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_O_I2SO1_D0 (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(104) | 3)
+
+#define PINMUX_GPIO105__FUNC_B_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_I0_VOW_DAT_MISO (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(105) | 3)
+
+#define PINMUX_GPIO106__FUNC_B_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_I0_VOW_CLK_MISO (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(106) | 3)
+
+#define PINMUX_GPIO107__FUNC_B_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_I0_SPLIN_MCK (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_O_CMVREF4 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(107) | 5)
+#define PINMUX_GPIO107__FUNC_O_PGD_LV_LSC_PWR0 (MTK_PIN_NO(107) | 6)
+
+#define PINMUX_GPIO108__FUNC_B_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_I0_SPLIN_LRCK (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_O_DMIC4_CLK (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_O_CMVREF5 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_O_PGD_LV_LSC_PWR1 (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_B0_DBG_MON_B10 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_B_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_B0_I2SIN_WS (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_I0_SPLIN_BCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_O_CMVREF6 (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_O_PGD_LV_LSC_PWR2 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_B0_DBG_MON_B11 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_B_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_I0_SPLIN_D0 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_O_CMVREF7 (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_O_PGD_LV_LSC_PWR3 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_B0_DBG_MON_B12 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_B_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_I0_SPLIN_D1 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_O_DMIC3_CLK (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_O_SPDIF_OUT (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_O_PGD_LV_LSC_PWR4 (MTK_PIN_NO(111) | 6)
+#define PINMUX_GPIO111__FUNC_B0_DBG_MON_B13 (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_B_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_I0_SPLIN_D2 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_O_I2SO1_WS (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_O_PGD_LV_LSC_PWR5 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_B0_DBG_MON_B14 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_B_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_I0_SPLIN_D3 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_O_I2SO1_D0 (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_B0_DBG_MON_B15 (MTK_PIN_NO(113) | 7)
+
+#define PINMUX_GPIO114__FUNC_B_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_O_I2SO2_MCK (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_I1_MCUPM_JTAG_TMS (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_B1_APU_JTAG_TMS (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_I1_SCP_JTAG1_TMS (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_I1_SPM_JTAG_TMS (MTK_PIN_NO(114) | 6)
+#define PINMUX_GPIO114__FUNC_B0_DBG_MON_B16 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_B_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_I1_MCUPM_JTAG_TCK (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_I0_APU_JTAG_TCK (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_I1_SCP_JTAG1_TCK (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_I1_SPM_JTAG_TCK (MTK_PIN_NO(115) | 6)
+#define PINMUX_GPIO115__FUNC_B0_DBG_MON_B17 (MTK_PIN_NO(115) | 7)
+
+#define PINMUX_GPIO116__FUNC_B_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_B0_I2SO2_WS (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_B0_I2SIN_WS (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_I1_MCUPM_JTAG_TDI (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_I1_APU_JTAG_TDI (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_I1_SCP_JTAG1_TDI (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_I1_SPM_JTAG_TDI (MTK_PIN_NO(116) | 6)
+#define PINMUX_GPIO116__FUNC_B0_DBG_MON_B18 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_B_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_O_I2SO2_D0 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_O_MCUPM_JTAG_TDO (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_O_APU_JTAG_TDO (MTK_PIN_NO(117) | 4)
+#define PINMUX_GPIO117__FUNC_O_SCP_JTAG1_TDO (MTK_PIN_NO(117) | 5)
+#define PINMUX_GPIO117__FUNC_O_SPM_JTAG_TDO (MTK_PIN_NO(117) | 6)
+#define PINMUX_GPIO117__FUNC_B0_DBG_MON_B19 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_B_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_O_I2SO2_D1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_I0_MCUPM_JTAG_TRSTN (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_I0_APU_JTAG_TRST (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_I0_SCP_JTAG1_TRSTN (MTK_PIN_NO(118) | 5)
+#define PINMUX_GPIO118__FUNC_I0_SPM_JTAG_TRSTN (MTK_PIN_NO(118) | 6)
+#define PINMUX_GPIO118__FUNC_B0_DBG_MON_B20 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_B_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_O_I2SO2_D2 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_O_UTXD3 (MTK_PIN_NO(119) | 3)
+#define PINMUX_GPIO119__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(119) | 4)
+#define PINMUX_GPIO119__FUNC_O_I2SO1_MCK (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(119) | 6)
+#define PINMUX_GPIO119__FUNC_B0_DBG_MON_B21 (MTK_PIN_NO(119) | 7)
+
+#define PINMUX_GPIO120__FUNC_B_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_O_I2SO2_D3 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_I1_URXD3 (MTK_PIN_NO(120) | 3)
+#define PINMUX_GPIO120__FUNC_I0_TDMIN_DI (MTK_PIN_NO(120) | 4)
+#define PINMUX_GPIO120__FUNC_O_I2SO1_BCK (MTK_PIN_NO(120) | 5)
+#define PINMUX_GPIO120__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(120) | 6)
+#define PINMUX_GPIO120__FUNC_B0_DBG_MON_B22 (MTK_PIN_NO(120) | 7)
+
+#define PINMUX_GPIO121__FUNC_B_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_B0_PCM_CLK (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_O_SPIM4_CSB (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_O_SCP_SPI1_B_CS (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_O_PGD_DA_EFUSE_RDY (MTK_PIN_NO(121) | 6)
+#define PINMUX_GPIO121__FUNC_B0_DBG_MON_B23 (MTK_PIN_NO(121) | 7)
+
+#define PINMUX_GPIO122__FUNC_B_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_B0_PCM_SYNC (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_O_SPIM4_CLK (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_O_SCP_SPI1_B_CK (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_O_PGD_DA_EFUSE_RDY_PRE (MTK_PIN_NO(122) | 6)
+#define PINMUX_GPIO122__FUNC_B0_DBG_MON_B24 (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_B_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_O_PCM_DO (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_B0_SPIM4_MOSI (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_O_SCP_SPI1_B_MO (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_O_PGD_DA_PWRGD_RESET (MTK_PIN_NO(123) | 6)
+#define PINMUX_GPIO123__FUNC_B0_DBG_MON_B25 (MTK_PIN_NO(123) | 7)
+
+#define PINMUX_GPIO124__FUNC_B_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_I0_PCM_DI (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_B0_SPIM4_MISO (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_I0_SCP_SPI1_B_MI (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_O_PGD_DA_PWRGD_ENB (MTK_PIN_NO(124) | 6)
+#define PINMUX_GPIO124__FUNC_B0_DBG_MON_B26 (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_B_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_O_DMIC1_CLK (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_O_SPINOR_CK (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_O_LVTS_FOUT (MTK_PIN_NO(125) | 6)
+#define PINMUX_GPIO125__FUNC_B0_DBG_MON_B27 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_B_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_O_SPINOR_CS (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_O_LVTS_SDO (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_B0_DBG_MON_B28 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_B_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_B0_SPINOR_IO0 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_I0_LVTS_26M (MTK_PIN_NO(127) | 6)
+#define PINMUX_GPIO127__FUNC_B0_DBG_MON_B29 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_B_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_O_DMIC2_CLK (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_B0_SPINOR_IO1 (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_I0_TDMIN_DI (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_I0_LVTS_SCF (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_B0_DBG_MON_B30 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_B_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_B0_SPINOR_IO2 (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_I0_LVTS_SCK (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_B0_DBG_MON_B31 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_B_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_B0_SPINOR_IO3 (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_I0_LVTS_SDI (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_B0_DBG_MON_B32 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_B_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_O_DPI_D0 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_O_GBE_TXD3 (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_O_DMIC1_CLK (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_O_I2SO2_MCK (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_O_SPIM5_CSB (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_O_PGD_LV_HSC_PWR0 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_B_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_O_DPI_D1 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_O_GBE_TXD2 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_O_SPIM5_CLK (MTK_PIN_NO(132) | 6)
+#define PINMUX_GPIO132__FUNC_O_PGD_LV_HSC_PWR1 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_B_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_O_DPI_D2 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_O_GBE_TXD1 (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_B0_I2SO2_WS (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_B0_SPIM5_MOSI (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_O_PGD_LV_HSC_PWR2 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_B_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_O_DPI_D3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_O_GBE_TXD0 (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_O_DMIC2_CLK (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_O_I2SO2_D0 (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_B0_SPIM5_MISO (MTK_PIN_NO(134) | 6)
+#define PINMUX_GPIO134__FUNC_O_PGD_LV_HSC_PWR3 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_B_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_O_DPI_D4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_I0_GBE_RXD3 (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_O_I2SO2_D1 (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_I1_WAKEN (MTK_PIN_NO(135) | 6)
+#define PINMUX_GPIO135__FUNC_O_PGD_LV_HSC_PWR4 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_B_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_O_DPI_D5 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_I0_GBE_RXD2 (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_O_I2SO2_D2 (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_O_PERSTN (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_O_PGD_LV_HSC_PWR5 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_B_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_O_DPI_D6 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_I0_GBE_RXD1 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_O_DMIC3_CLK (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_O_I2SO2_D3 (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_B1_CLKREQN (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_O_PWM_0 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_B_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_O_DPI_D7 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_I0_GBE_RXD0 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_O_CLKM2 (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_B_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_O_DPI_D8 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_B0_GBE_TXC (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_O_CLKM3 (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_O_UTXD2 (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_B_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_O_DPI_D9 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_I0_GBE_RXC (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_O_DMIC4_CLK (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_O_PWM_2 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_I1_URXD2 (MTK_PIN_NO(140) | 6)
+#define PINMUX_GPIO140__FUNC_B0_MD32_0_GPIO2 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_B_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_O_DPI_D10 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_I0_GBE_RXDV (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_O_PWM_3 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_O_URTS2 (MTK_PIN_NO(141) | 6)
+#define PINMUX_GPIO141__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_B_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_O_DPI_D11 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_O_GBE_TXEN (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_O_PWM_1 (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(142) | 5)
+#define PINMUX_GPIO142__FUNC_I1_UCTS2 (MTK_PIN_NO(142) | 6)
+#define PINMUX_GPIO142__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_B_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_O_DPI_D12 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_O_GBE_MDC (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_O_CLKM0 (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_O_SPIM3_CSB (MTK_PIN_NO(143) | 5)
+#define PINMUX_GPIO143__FUNC_O_UTXD1 (MTK_PIN_NO(143) | 6)
+#define PINMUX_GPIO143__FUNC_B0_MD32_1_GPIO2 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_B_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_O_DPI_D13 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_B1_GBE_MDIO (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_O_CLKM1 (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_O_SPIM3_CLK (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_I1_URXD1 (MTK_PIN_NO(144) | 6)
+#define PINMUX_GPIO144__FUNC_O_PGD_HV_HSC_PWR0 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_B_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_O_DPI_D14 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_O_GBE_TXER (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_O_CMFLASH0 (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_B0_SPIM3_MOSI (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_B0_GBE_AUX_PPS2 (MTK_PIN_NO(145) | 6)
+#define PINMUX_GPIO145__FUNC_O_PGD_HV_HSC_PWR1 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_B_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_O_DPI_D15 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_I0_GBE_RXER (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_O_CMFLASH1 (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_B0_SPIM3_MISO (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_B0_GBE_AUX_PPS3 (MTK_PIN_NO(146) | 6)
+#define PINMUX_GPIO146__FUNC_O_PGD_HV_HSC_PWR2 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_B_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_O_DPI_HSYNC (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_I0_GBE_COL (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_O_I2SO1_MCK (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_O_CMVREF0 (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_O_SPDIF_OUT (MTK_PIN_NO(147) | 5)
+#define PINMUX_GPIO147__FUNC_O_URTS1 (MTK_PIN_NO(147) | 6)
+#define PINMUX_GPIO147__FUNC_O_PGD_HV_HSC_PWR3 (MTK_PIN_NO(147) | 7)
+
+#define PINMUX_GPIO148__FUNC_B_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_O_DPI_VSYNC (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_I0_GBE_INTR (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_O_I2SO1_BCK (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_O_CMVREF1 (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_I1_UCTS1 (MTK_PIN_NO(148) | 6)
+#define PINMUX_GPIO148__FUNC_O_PGD_HV_HSC_PWR4 (MTK_PIN_NO(148) | 7)
+
+#define PINMUX_GPIO149__FUNC_B_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_O_DPI_DE (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_B0_GBE_AUX_PPS0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_O_I2SO1_WS (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_O_CMVREF2 (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_O_UTXD3 (MTK_PIN_NO(149) | 6)
+#define PINMUX_GPIO149__FUNC_O_PGD_HV_HSC_PWR5 (MTK_PIN_NO(149) | 7)
+
+#define PINMUX_GPIO150__FUNC_B_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_O_DPI_CK (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_B0_GBE_AUX_PPS1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_O_I2SO1_D0 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_O_CMVREF3 (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_I1_URXD3 (MTK_PIN_NO(150) | 6)
+
+#define PINMUX_GPIO151__FUNC_B_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7 (MTK_PIN_NO(151) | 1)
+
+#define PINMUX_GPIO152__FUNC_B_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6 (MTK_PIN_NO(152) | 1)
+
+#define PINMUX_GPIO153__FUNC_B_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5 (MTK_PIN_NO(153) | 1)
+
+#define PINMUX_GPIO154__FUNC_B_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4 (MTK_PIN_NO(154) | 1)
+
+#define PINMUX_GPIO155__FUNC_B_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_O_MSDC0_RSTB (MTK_PIN_NO(155) | 1)
+
+#define PINMUX_GPIO156__FUNC_B_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_B1_MSDC0_CMD (MTK_PIN_NO(156) | 1)
+
+#define PINMUX_GPIO157__FUNC_B_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_B1_MSDC0_CLK (MTK_PIN_NO(157) | 1)
+
+#define PINMUX_GPIO158__FUNC_B_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3 (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_B_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2 (MTK_PIN_NO(159) | 1)
+
+#define PINMUX_GPIO160__FUNC_B_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1 (MTK_PIN_NO(160) | 1)
+
+#define PINMUX_GPIO161__FUNC_B_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0 (MTK_PIN_NO(161) | 1)
+
+#define PINMUX_GPIO162__FUNC_B_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_B0_MSDC0_DSL (MTK_PIN_NO(162) | 1)
+
+#define PINMUX_GPIO163__FUNC_B_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_B1_MSDC1_CMD (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_O_SPDIF_OUT (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_I1_MD32_0_JTAG_TMS (MTK_PIN_NO(163) | 3)
+#define PINMUX_GPIO163__FUNC_I1_ADSP_JTAG0_TMS (MTK_PIN_NO(163) | 4)
+#define PINMUX_GPIO163__FUNC_I1_SCP_JTAG0_TMS (MTK_PIN_NO(163) | 5)
+#define PINMUX_GPIO163__FUNC_I1_CCU0_JTAG_TMS (MTK_PIN_NO(163) | 6)
+#define PINMUX_GPIO163__FUNC_I0_IPU_JTAG_TMS (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_B_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_B1_MSDC1_CLK (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_I1_MD32_0_JTAG_TCK (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_I0_ADSP_JTAG0_TCK (MTK_PIN_NO(164) | 4)
+#define PINMUX_GPIO164__FUNC_I1_SCP_JTAG0_TCK (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_I1_CCU0_JTAG_TCK (MTK_PIN_NO(164) | 6)
+#define PINMUX_GPIO164__FUNC_I0_IPU_JTAG_TCK (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_B_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_I1_MD32_0_JTAG_TDI (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_I1_ADSP_JTAG0_TDI (MTK_PIN_NO(165) | 4)
+#define PINMUX_GPIO165__FUNC_I1_SCP_JTAG0_TDI (MTK_PIN_NO(165) | 5)
+#define PINMUX_GPIO165__FUNC_I1_CCU0_JTAG_TDI (MTK_PIN_NO(165) | 6)
+#define PINMUX_GPIO165__FUNC_I0_IPU_JTAG_TDI (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_B_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_O_MD32_0_JTAG_TDO (MTK_PIN_NO(166) | 3)
+#define PINMUX_GPIO166__FUNC_O_ADSP_JTAG0_TDO (MTK_PIN_NO(166) | 4)
+#define PINMUX_GPIO166__FUNC_O_SCP_JTAG0_TDO (MTK_PIN_NO(166) | 5)
+#define PINMUX_GPIO166__FUNC_O_CCU0_JTAG_TDO (MTK_PIN_NO(166) | 6)
+#define PINMUX_GPIO166__FUNC_O_IPU_JTAG_TDO (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_B_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_O_PWM_0 (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_I1_MD32_0_JTAG_TRST (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_I1_ADSP_JTAG0_TRSTN (MTK_PIN_NO(167) | 4)
+#define PINMUX_GPIO167__FUNC_I0_SCP_JTAG0_TRSTN (MTK_PIN_NO(167) | 5)
+#define PINMUX_GPIO167__FUNC_I1_CCU0_JTAG_TRST (MTK_PIN_NO(167) | 6)
+#define PINMUX_GPIO167__FUNC_I0_IPU_JTAG_TRST (MTK_PIN_NO(167) | 7)
+
+#define PINMUX_GPIO168__FUNC_B_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_O_PWM_1 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_O_CLKM0 (MTK_PIN_NO(168) | 3)
+
+#define PINMUX_GPIO169__FUNC_B_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_B1_MSDC2_CMD (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_O_LVTS_FOUT (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_I1_MD32_1_JTAG_TMS (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_I0_UDI_TMS (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_I0_VPU_UDI_TMS (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_I1_SSPM_JTAG_TMS (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_B_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_B1_MSDC2_CLK (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_O_LVTS_SDO (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_I1_MD32_1_JTAG_TCK (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_I0_UDI_TCK (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_I0_VPU_UDI_TCK (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(170) | 6)
+#define PINMUX_GPIO170__FUNC_I1_SSPM_JTAG_TCK (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_B_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0 (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_I0_LVTS_26M (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_I1_MD32_1_JTAG_TDI (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_I0_UDI_TDI (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_I0_VPU_UDI_TDI (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(171) | 6)
+#define PINMUX_GPIO171__FUNC_I1_SSPM_JTAG_TDI (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_B_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1 (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_I0_LVTS_SCF (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_O_MD32_1_JTAG_TDO (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_O_UDI_TDO (MTK_PIN_NO(172) | 4)
+#define PINMUX_GPIO172__FUNC_O_VPU_UDI_TDO (MTK_PIN_NO(172) | 5)
+#define PINMUX_GPIO172__FUNC_I0_TDMIN_DI (MTK_PIN_NO(172) | 6)
+#define PINMUX_GPIO172__FUNC_O_SSPM_JTAG_TDO (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_B_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2 (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_I0_LVTS_SCK (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_I1_MD32_1_JTAG_TRST (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_I0_UDI_NTRST (MTK_PIN_NO(173) | 4)
+#define PINMUX_GPIO173__FUNC_I0_VPU_UDI_NTRST (MTK_PIN_NO(173) | 5)
+#define PINMUX_GPIO173__FUNC_I0_SSPM_JTAG_TRSTN (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_B_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3 (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_I0_LVTS_SDI (MTK_PIN_NO(174) | 2)
+
+#define PINMUX_GPIO175__FUNC_B_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_B0_SPMI_M_SCL (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_B_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_B0_SPMI_M_SDA (MTK_PIN_NO(176) | 1)
+
+#endif /* __MEDIATEK_MT8188-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h
index 7e16e58fe1f7..f5934abcd1bd 100644
--- a/include/dt-bindings/pinctrl/mt65xx.h
+++ b/include/dt-bindings/pinctrl/mt65xx.h
@@ -16,6 +16,15 @@
#define MTK_PUPD_SET_R1R0_10 102
#define MTK_PUPD_SET_R1R0_11 103
+#define MTK_PULL_SET_RSEL_000 200
+#define MTK_PULL_SET_RSEL_001 201
+#define MTK_PULL_SET_RSEL_010 202
+#define MTK_PULL_SET_RSEL_011 203
+#define MTK_PULL_SET_RSEL_100 204
+#define MTK_PULL_SET_RSEL_101 205
+#define MTK_PULL_SET_RSEL_110 206
+#define MTK_PULL_SET_RSEL_111 207
+
#define MTK_DRIVE_2mA 2
#define MTK_DRIVE_4mA 4
#define MTK_DRIVE_6mA 6
diff --git a/include/dt-bindings/pinctrl/mt6795-pinfunc.h b/include/dt-bindings/pinctrl/mt6795-pinfunc.h
new file mode 100644
index 000000000000..dfd3f6f13e0d
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6795-pinfunc.h
@@ -0,0 +1,908 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __DTS_MT6795_PINFUNC_H
+#define __DTS_MT6795_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_IRDA_PDN (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S1_WS (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TDD_TMS (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_UTXD0 (MTK_PIN_NO(0) | 5)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_IRDA_RXD (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S1_BCK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_SDA4 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_TDD_TCK (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_URXD0 (MTK_PIN_NO(1) | 5)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_IRDA_TXD (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S1_MCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_SCL4 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_TDD_TDI (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_UTXD3 (MTK_PIN_NO(2) | 5)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_DSI1_TE (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S1_DO_1 (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_SDA3 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_TDD_TDO (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_URXD3 (MTK_PIN_NO(3) | 5)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_DISP_PWM1 (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S1_DO_2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SCL3 (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_TDD_TRSTN (MTK_PIN_NO(4) | 4)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_PCM1_CLK (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S2_WS (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SPI_CK_3 (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(5) | 5)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_PCM1_SYNC (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S2_BCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_SPI_MI_3 (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(6) | 5)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_PCM1_DI (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S2_DI_1 (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_SPI_MO_3 (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(7) | 5)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_PCM1_DO (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI_2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SPI_CS_3 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(8) | 5)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_USB_DRVVBUS (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_I2S2_MCK (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(9) | 5)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_I2S0_WS (MTK_PIN_NO(10) | 2)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_I2S0_BCK (MTK_PIN_NO(11) | 2)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_I2S0_MCK (MTK_PIN_NO(12) | 2)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_I2S0_DO (MTK_PIN_NO(13) | 2)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_I2S0_DI (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_DISP_PWM1 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_PWM4 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_IRDA_RXD (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I2S1_BCK (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_DSI1_TE (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_PWM5 (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_IRDA_TXD (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_I2S1_MCK (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_IDDIG (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_FLASH (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_PWM5 (MTK_PIN_NO(16) | 4)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SIM1_SCLK (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SIM2_SCLK (MTK_PIN_NO(17) | 2)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_SIM1_SRST (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SIM2_SRST (MTK_PIN_NO(18) | 2)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SIM1_SDAT (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SIM2_SDAT (MTK_PIN_NO(19) | 2)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SIM2_SCLK (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SIM1_SCLK (MTK_PIN_NO(20) | 2)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_SIM2_SRST (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_SIM1_SRST (MTK_PIN_NO(21) | 2)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_SIM2_SDAT (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_SIM1_SDAT (MTK_PIN_NO(22) | 2)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_MSDC3_DAT0 (MTK_PIN_NO(23) | 1)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_MSDC3_DAT1 (MTK_PIN_NO(24) | 1)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_MSDC3_DAT2 (MTK_PIN_NO(25) | 1)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_MSDC3_DAT3 (MTK_PIN_NO(26) | 1)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_MSDC3_CLK (MTK_PIN_NO(27) | 1)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_MSDC3_CMD (MTK_PIN_NO(28) | 1)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_PTA_RXD (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_UCTS2 (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_PTA_TXD (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_URTS2 (MTK_PIN_NO(30) | 2)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_URXD2 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_UTXD2 (MTK_PIN_NO(31) | 2)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_UTXD2 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_URXD2 (MTK_PIN_NO(32) | 2)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_MRG_CLK (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_PCM0_CLK (MTK_PIN_NO(33) | 2)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_MRG_DI (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_PCM0_DI (MTK_PIN_NO(34) | 2)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_MRG_DO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_PCM0_DO (MTK_PIN_NO(35) | 2)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_MRG_SYNC (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_PCM0_SYNC (MTK_PIN_NO(36) | 2)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_GPS_SYNC (MTK_PIN_NO(37) | 1)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_DAIRSTB (MTK_PIN_NO(38) | 1)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_CM2MCLK (MTK_PIN_NO(39) | 1)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_CM3MCLK (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_IRDA_PDN (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_PWM6 (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_I2S1_WS (MTK_PIN_NO(40) | 4)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_CMPCLK (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_CMCSK (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_FLASH (MTK_PIN_NO(41) | 3)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_CMMCLK (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_SDA2 (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_SCL2 (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SDA0 (MTK_PIN_NO(45) | 1)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SCL0 (MTK_PIN_NO(46) | 1)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_BPI_BUS0 (MTK_PIN_NO(47) | 1)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_BPI_BUS1 (MTK_PIN_NO(48) | 1)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_BPI_BUS2 (MTK_PIN_NO(49) | 1)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_BPI_BUS3 (MTK_PIN_NO(50) | 1)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_BPI_BUS4 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_BPI_BUS5 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_BPI_BUS6 (MTK_PIN_NO(53) | 1)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_BPI_BUS7 (MTK_PIN_NO(54) | 1)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_BPI_BUS11 (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_BPI_BUS12 (MTK_PIN_NO(59) | 1)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_BPI_BUS13 (MTK_PIN_NO(60) | 1)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_BPI_BUS14 (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_RFIC1_BSI_CK (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_RFIC1_BSI_D0 (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_RFIC1_BSI_D1 (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_RFIC1_BSI_D2 (MTK_PIN_NO(65) | 1)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_RFIC1_BSI_CS (MTK_PIN_NO(66) | 1)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_TD_TXBPI (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_RFIC0_BSI_CS (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_MISC_BSI_DO (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_MISC_BSI_CK (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_MISC_BSI_CS0B (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_MIPI1_SCLK (MTK_PIN_NO(75) | 2)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_MISC_BSI_CS1B (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_MISC_BSI_DI (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_MIPI1_SDATA (MTK_PIN_NO(77) | 2)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_LTE_TXBPI (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS15 (MTK_PIN_NO(79) | 1)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS16 (MTK_PIN_NO(80) | 1)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS17 (MTK_PIN_NO(81) | 1)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS18 (MTK_PIN_NO(82) | 1)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS19 (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS20 (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_BPI_BUS21 (MTK_PIN_NO(85) | 1)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_BPI_BUS22 (MTK_PIN_NO(86) | 1)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_BPI_BUS23 (MTK_PIN_NO(87) | 1)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_BPI_BUS24 (MTK_PIN_NO(88) | 1)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_BPI_BUS25 (MTK_PIN_NO(89) | 1)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_BPI_BUS26 (MTK_PIN_NO(90) | 1)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_BPI_BUS27 (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_PCM1_CLK (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_I2S0_BCK (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_NLD6 (MTK_PIN_NO(92) | 3)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_PCM1_SYNC (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_I2S0_WS (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_NLD7 (MTK_PIN_NO(93) | 3)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_PCM1_DI (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_I2S0_DI (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_NREB (MTK_PIN_NO(94) | 3)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_PCM1_DO (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_I2S0_DO (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_NRNB0 (MTK_PIN_NO(95) | 3)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_URXD1 (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_UTXD1 (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_NWEB (MTK_PIN_NO(96) | 3)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_UTXD1 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_URXD1 (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_NCEB0 (MTK_PIN_NO(97) | 3)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_URTS1 (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_UCTS1 (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_NALE (MTK_PIN_NO(98) | 3)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_UCTS1 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_URTS1 (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_NCLE (MTK_PIN_NO(99) | 3)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_MSDC2_DAT0 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_URXD1 (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_USB_DRVVBUS (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_SDA4 (MTK_PIN_NO(100) | 4)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_MSDC2_DAT1 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_UTXD1 (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SCL4 (MTK_PIN_NO(101) | 4)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_MSDC2_DAT2 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_URTS1 (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_UTXD0 (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_PWM0 (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_SPI_CK_1 (MTK_PIN_NO(102) | 6)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_MSDC2_DAT3 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_UCTS1 (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_URXD0 (MTK_PIN_NO(103) | 3)
+#define PINMUX_GPIO103__FUNC_PWM1 (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_SPI_MI_1 (MTK_PIN_NO(103) | 6)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_MSDC2_CLK (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_NLD4 (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_UTXD3 (MTK_PIN_NO(104) | 3)
+#define PINMUX_GPIO104__FUNC_SDA3 (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_PWM2 (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_SPI_MO_1 (MTK_PIN_NO(104) | 6)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_MSDC2_CMD (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_NLD5 (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_URXD3 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_SCL3 (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_PWM3 (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_SPI_CS_1 (MTK_PIN_NO(105) | 6)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_LCM_RST (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DSI_TE (MTK_PIN_NO(107) | 1)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_JTMS (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_MFG_JTAG_TMS (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_TDD_TMS (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_DFD_TMS (MTK_PIN_NO(108) | 6)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_JTCK (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_MFG_JTAG_TCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_TDD_TCK (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_DFD_TCK (MTK_PIN_NO(109) | 6)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_JTDI (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_MFG_JTAG_TDI (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_TDD_TDI (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_DFD_TDI (MTK_PIN_NO(110) | 6)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_JTDO (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_MFG_JTAG_TDO (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_TDD_TDO (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_DFD_TDO (MTK_PIN_NO(111) | 6)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_JTRST_B (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_MFG_JTAG_TRSTN (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_TDD_TRSTN (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_DFD_NTRST (MTK_PIN_NO(112) | 6)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_URXD0 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_UTXD0 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_MD_URXD (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_LTE_URXD (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_TDD_TXD (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_I2S2_WS (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_UTXD0 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_URXD0 (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_MD_UTXD (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_LTE_UTXD (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_TDD_TXD (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_I2S2_BCK (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_URTS0 (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_UCTS0 (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_MD_URXD (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_LTE_URXD (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_TDD_TXD (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_I2S2_MCK (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_UCTS0 (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_URTS0 (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_MD_UTXD (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_LTE_UTXD (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_TDD_TXD (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_I2S2_DI_1 (MTK_PIN_NO(116) | 6)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_URXD3 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_UTXD3 (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_MD_URXD (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_LTE_URXD (MTK_PIN_NO(117) | 4)
+#define PINMUX_GPIO117__FUNC_TDD_TXD (MTK_PIN_NO(117) | 5)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_UTXD3 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_URXD3 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_MD_UTXD (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_LTE_UTXD (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_TDD_TXD (MTK_PIN_NO(118) | 5)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_KROW0 (MTK_PIN_NO(119) | 1)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_KROW1 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_PWM6 (MTK_PIN_NO(120) | 3)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_KROW2 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_IRDA_PDN (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_I2S1_DO_1 (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_USB_DRVVBUS (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_SPI_CK_2 (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_PWM4 (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_KCOL0 (MTK_PIN_NO(122) | 1)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_KCOL1 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_IRDA_RXD (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_I2S2_DI_2 (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_PWM5 (MTK_PIN_NO(123) | 4)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_KCOL2 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_IRDA_TXD (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_I2S1_DO_2 (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_USB_DRVVBUS (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_SPI_MI_2 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_PWM3 (MTK_PIN_NO(124) | 6)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_SDA1 (MTK_PIN_NO(125) | 1)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_SCL1 (MTK_PIN_NO(126) | 1)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MD_EINT1 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_DISP_PWM1 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_SPI_MO_2 (MTK_PIN_NO(127) | 3)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MD_EINT2 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_DSI1_TE (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_SPI_CS_2 (MTK_PIN_NO(128) | 3)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_I2S3_WS (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_I2S2_WS (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_PWM0 (MTK_PIN_NO(129) | 3)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_I2S3_BCK (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_I2S2_BCK (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_PWM1 (MTK_PIN_NO(130) | 3)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_I2S2_MCK (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_PWM2 (MTK_PIN_NO(131) | 3)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_I2S3_DO_1 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_I2S2_DI_1 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_PWM3 (MTK_PIN_NO(132) | 3)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_I2S3_DO_2 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_I2S2_DI_2 (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_PWM4 (MTK_PIN_NO(133) | 3)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_I2S3_DO_3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_DISP_PWM1 (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_I2S1_DO_1 (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_PWM5 (MTK_PIN_NO(134) | 4)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_I2S3_DO_4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_DSI1_TE (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_I2S1_DO_2 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_PWM6 (MTK_PIN_NO(135) | 4)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_SDA3 (MTK_PIN_NO(136) | 1)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_SCL3 (MTK_PIN_NO(137) | 1)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_DPI_CK (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_NLD6 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_UTXD0 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_USB_DRVVBUS (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_IRDA_PDN (MTK_PIN_NO(138) | 5)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_DPI_DE (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_NLD7 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_URXD0 (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_MD_UTXD (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_IRDA_RXD (MTK_PIN_NO(139) | 5)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_DPI_D0 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_NREB (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_UCTS0 (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_MD_URXD (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_IRDA_TXD (MTK_PIN_NO(140) | 5)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_DPI_D1 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_NRNB0 (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_URTS0 (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_LTE_UTXD (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_I2S2_WS (MTK_PIN_NO(141) | 5)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_DPI_D2 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_NWEB (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_UTXD1 (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_LTE_URXD (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_I2S2_BCK (MTK_PIN_NO(142) | 5)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_DPI_D3 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_NCEB0 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_URXD1 (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_TDD_TXD (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_I2S2_MCK (MTK_PIN_NO(143) | 5)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_DPI_D4 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_NALE (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_UCTS1 (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_TDD_TMS (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_I2S2_DI_1 (MTK_PIN_NO(144) | 5)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_DPI_D5 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_NCLE (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_URTS1 (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_TDD_TCK (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_I2S2_DI_2 (MTK_PIN_NO(145) | 5)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_DPI_D6 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_NLD8 (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_UTXD2 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_TDD_TDI (MTK_PIN_NO(146) | 4)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_DPI_D7 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_NLD9 (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_URXD2 (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_TDD_TDO (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_I2S1_WS (MTK_PIN_NO(147) | 5)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_DPI_D8 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_NLD10 (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_UCTS2 (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TDD_TRSTN (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_I2S1_BCK (MTK_PIN_NO(148) | 5)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_DPI_D9 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_NLD11 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_URTS2 (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_I2S1_MCK (MTK_PIN_NO(149) | 5)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_DPI_D10 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_NLD12 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_UTXD3 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_I2S1_DO_1 (MTK_PIN_NO(150) | 5)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_DPI_D11 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_NLD13 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_URXD3 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(151) | 4)
+#define PINMUX_GPIO151__FUNC_I2S1_DO_2 (MTK_PIN_NO(151) | 5)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_DPI_HSYNC (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_NLD14 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_UCTS3 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(152) | 4)
+#define PINMUX_GPIO152__FUNC_DSI1_TE (MTK_PIN_NO(152) | 5)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_DPI_VSYNC (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_NLD15 (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_URTS3 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_DISP_PWM1 (MTK_PIN_NO(153) | 5)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_MSDC0_DAT0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_NLD8 (MTK_PIN_NO(154) | 2)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_MSDC0_DAT1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_NLD9 (MTK_PIN_NO(155) | 2)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_MSDC0_DAT2 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_NLD10 (MTK_PIN_NO(156) | 2)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_MSDC0_DAT3 (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_NLD11 (MTK_PIN_NO(157) | 2)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_MSDC0_DAT4 (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_NLD12 (MTK_PIN_NO(158) | 2)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_MSDC0_DAT5 (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_NLD13 (MTK_PIN_NO(159) | 2)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_MSDC0_DAT6 (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_NLD14 (MTK_PIN_NO(160) | 2)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_MSDC0_DAT7 (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_NLD15 (MTK_PIN_NO(161) | 2)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_MSDC0_CMD (MTK_PIN_NO(162) | 1)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_MSDC0_CLK (MTK_PIN_NO(163) | 1)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_MSDC0_DSL (MTK_PIN_NO(164) | 1)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_MSDC0_RSTB (MTK_PIN_NO(165) | 1)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_SPI_CK_0 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_PWM0 (MTK_PIN_NO(166) | 3)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_SPI_MI_0 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_PWM1 (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_SPI_MO_0 (MTK_PIN_NO(167) | 4)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_SPI_MO_0 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_MD_EINT3 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_PWM2 (MTK_PIN_NO(168) | 3)
+#define PINMUX_GPIO168__FUNC_SPI_MI_0 (MTK_PIN_NO(168) | 4)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_SPI_CS_0 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_MD_EINT4 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_PWM3 (MTK_PIN_NO(169) | 3)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_MSDC1_CMD (MTK_PIN_NO(170) | 1)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_MSDC1_DAT0 (MTK_PIN_NO(171) | 1)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_MSDC1_DAT1 (MTK_PIN_NO(172) | 1)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_MSDC1_DAT2 (MTK_PIN_NO(173) | 1)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_MSDC1_DAT3 (MTK_PIN_NO(174) | 1)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_MSDC1_CLK (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_PWRAP_SPIMI (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_PWRAP_SPIMO (MTK_PIN_NO(176) | 2)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_PWRAP_SPIMO (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_PWRAP_SPIMI (MTK_PIN_NO(177) | 2)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_PWRAP_SPICK (MTK_PIN_NO(178) | 1)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_PWRAP_SPICS (MTK_PIN_NO(179) | 1)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_I2S1_WS (MTK_PIN_NO(180) | 2)
+#define PINMUX_GPIO180__FUNC_I2S2_WS (MTK_PIN_NO(180) | 3)
+#define PINMUX_GPIO180__FUNC_I2S0_WS (MTK_PIN_NO(180) | 4)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_AUD_DAT_MISO_1 (MTK_PIN_NO(181) | 1)
+#define PINMUX_GPIO181__FUNC_I2S1_BCK (MTK_PIN_NO(181) | 2)
+#define PINMUX_GPIO181__FUNC_I2S2_BCK (MTK_PIN_NO(181) | 3)
+#define PINMUX_GPIO181__FUNC_I2S0_BCK (MTK_PIN_NO(181) | 4)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_AUD_DAT_MOSI_1 (MTK_PIN_NO(182) | 1)
+#define PINMUX_GPIO182__FUNC_I2S1_MCK (MTK_PIN_NO(182) | 2)
+#define PINMUX_GPIO182__FUNC_I2S2_MCK (MTK_PIN_NO(182) | 3)
+#define PINMUX_GPIO182__FUNC_I2S0_MCK (MTK_PIN_NO(182) | 4)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_AUD_DAT_MISO_2 (MTK_PIN_NO(183) | 1)
+#define PINMUX_GPIO183__FUNC_I2S1_DO_1 (MTK_PIN_NO(183) | 2)
+#define PINMUX_GPIO183__FUNC_I2S2_DI_1 (MTK_PIN_NO(183) | 3)
+#define PINMUX_GPIO183__FUNC_I2S0_DO (MTK_PIN_NO(183) | 4)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_AUD_DAT_MOSI_2 (MTK_PIN_NO(184) | 1)
+#define PINMUX_GPIO184__FUNC_I2S1_DO_2 (MTK_PIN_NO(184) | 2)
+#define PINMUX_GPIO184__FUNC_I2S2_DI_2 (MTK_PIN_NO(184) | 3)
+#define PINMUX_GPIO184__FUNC_I2S0_DI (MTK_PIN_NO(184) | 4)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_RTC32K_CK (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_DISP_PWM0 (MTK_PIN_NO(186) | 1)
+#define PINMUX_GPIO186__FUNC_DISP_PWM1 (MTK_PIN_NO(186) | 2)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_SRCLKENAI (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_SRCLKENAI2 (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_SRCLKENA0 (MTK_PIN_NO(189) | 1)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_SRCLKENA1 (MTK_PIN_NO(190) | 1)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_WATCHDOG_AO (MTK_PIN_NO(191) | 1)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_I2S0_WS (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_I2S1_WS (MTK_PIN_NO(192) | 2)
+#define PINMUX_GPIO192__FUNC_I2S2_WS (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_NCEB1 (MTK_PIN_NO(192) | 4)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_I2S0_BCK (MTK_PIN_NO(193) | 1)
+#define PINMUX_GPIO193__FUNC_I2S1_BCK (MTK_PIN_NO(193) | 2)
+#define PINMUX_GPIO193__FUNC_I2S2_BCK (MTK_PIN_NO(193) | 3)
+#define PINMUX_GPIO193__FUNC_NRNB1 (MTK_PIN_NO(193) | 4)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_I2S0_MCK (MTK_PIN_NO(194) | 1)
+#define PINMUX_GPIO194__FUNC_I2S1_MCK (MTK_PIN_NO(194) | 2)
+#define PINMUX_GPIO194__FUNC_I2S2_MCK (MTK_PIN_NO(194) | 3)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_I2S0_DO (MTK_PIN_NO(195) | 1)
+#define PINMUX_GPIO195__FUNC_I2S1_DO_1 (MTK_PIN_NO(195) | 2)
+#define PINMUX_GPIO195__FUNC_I2S2_DI_1 (MTK_PIN_NO(195) | 3)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_I2S0_DI (MTK_PIN_NO(196) | 1)
+#define PINMUX_GPIO196__FUNC_I2S1_DO_2 (MTK_PIN_NO(196) | 2)
+#define PINMUX_GPIO196__FUNC_I2S2_DI_2 (MTK_PIN_NO(196) | 3)
+
+
+#endif
diff --git a/include/dt-bindings/pinctrl/mt8135-pinfunc.h b/include/dt-bindings/pinctrl/mt8135-pinfunc.h
new file mode 100644
index 000000000000..ce0cb5a440eb
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8135-pinfunc.h
@@ -0,0 +1,1294 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ */
+
+#ifndef __DTS_MT8135_PINFUNC_H
+#define __DTS_MT8135_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(0) | 1)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_EINT49 (MTK_PIN_NO(0) | 2)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_I2SOUT_DAT (MTK_PIN_NO(0) | 3)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_DAC_DAT_OUT (MTK_PIN_NO(0) | 4)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_PCM1_DO (MTK_PIN_NO(0) | 5)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_SPI1_MO (MTK_PIN_NO(0) | 6)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_NALE (MTK_PIN_NO(0) | 7)
+
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(1) | 1)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_EINT48 (MTK_PIN_NO(1) | 2)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_I2SIN_WS (MTK_PIN_NO(1) | 3)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_DAC_WS (MTK_PIN_NO(1) | 4)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_PCM1_WS (MTK_PIN_NO(1) | 5)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_SPI1_CSN (MTK_PIN_NO(1) | 6)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_NCLE (MTK_PIN_NO(1) | 7)
+
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(2) | 1)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_EINT47 (MTK_PIN_NO(2) | 2)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_I2SIN_CK (MTK_PIN_NO(2) | 3)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_DAC_CK (MTK_PIN_NO(2) | 4)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_PCM1_CK (MTK_PIN_NO(2) | 5)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_SPI1_CLK (MTK_PIN_NO(2) | 6)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(2) | 7)
+
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(3) | 1)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_EINT46 (MTK_PIN_NO(3) | 2)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_A_FUNC_CK (MTK_PIN_NO(3) | 3)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_LSCE1B_2X (MTK_PIN_NO(3) | 6)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_NLD5 (MTK_PIN_NO(3) | 7)
+
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(4) | 1)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_EINT41 (MTK_PIN_NO(4) | 2)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_A_FUNC_DOUT_0 (MTK_PIN_NO(4) | 3)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_USB_TEST_IO_0 (MTK_PIN_NO(4) | 5)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_LRSTB_2X (MTK_PIN_NO(4) | 6)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_NRNB (MTK_PIN_NO(4) | 7)
+
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(5) | 1)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_EINT40 (MTK_PIN_NO(5) | 2)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_A_FUNC_DOUT_1 (MTK_PIN_NO(5) | 3)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_USB_TEST_IO_1 (MTK_PIN_NO(5) | 5)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_LPTE (MTK_PIN_NO(5) | 6)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_NREB (MTK_PIN_NO(5) | 7)
+
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(6) | 1)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_EINT45 (MTK_PIN_NO(6) | 2)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_A_FUNC_DOUT_2 (MTK_PIN_NO(6) | 3)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_USB_TEST_IO_2 (MTK_PIN_NO(6) | 5)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_LSCE0B_2X (MTK_PIN_NO(6) | 6)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_NLD7 (MTK_PIN_NO(6) | 7)
+
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(7) | 1)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_EINT44 (MTK_PIN_NO(7) | 2)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_A_FUNC_DOUT_3 (MTK_PIN_NO(7) | 3)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_USB_TEST_IO_3 (MTK_PIN_NO(7) | 5)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_LSA0_2X (MTK_PIN_NO(7) | 6)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_NLD14 (MTK_PIN_NO(7) | 7)
+
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(8) | 1)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_EINT43 (MTK_PIN_NO(8) | 2)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_USB_TEST_IO_4 (MTK_PIN_NO(8) | 5)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_LSCK_2X (MTK_PIN_NO(8) | 6)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_NLD11 (MTK_PIN_NO(8) | 7)
+
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(9) | 1)
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_EINT42 (MTK_PIN_NO(9) | 2)
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_USB_TEST_IO_5 (MTK_PIN_NO(9) | 5)
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_LSDA_2X (MTK_PIN_NO(9) | 6)
+
+#define MT8135_PIN_10_NCEB0__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT8135_PIN_10_NCEB0__FUNC_NCEB0 (MTK_PIN_NO(10) | 1)
+#define MT8135_PIN_10_NCEB0__FUNC_EINT139 (MTK_PIN_NO(10) | 2)
+#define MT8135_PIN_10_NCEB0__FUNC_TESTA_OUT4 (MTK_PIN_NO(10) | 7)
+
+#define MT8135_PIN_11_NCEB1__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT8135_PIN_11_NCEB1__FUNC_NCEB1 (MTK_PIN_NO(11) | 1)
+#define MT8135_PIN_11_NCEB1__FUNC_EINT140 (MTK_PIN_NO(11) | 2)
+#define MT8135_PIN_11_NCEB1__FUNC_USB_DRVVBUS (MTK_PIN_NO(11) | 6)
+#define MT8135_PIN_11_NCEB1__FUNC_TESTA_OUT5 (MTK_PIN_NO(11) | 7)
+
+#define MT8135_PIN_12_NRNB__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT8135_PIN_12_NRNB__FUNC_NRNB (MTK_PIN_NO(12) | 1)
+#define MT8135_PIN_12_NRNB__FUNC_EINT141 (MTK_PIN_NO(12) | 2)
+#define MT8135_PIN_12_NRNB__FUNC_A_FUNC_DOUT_4 (MTK_PIN_NO(12) | 3)
+#define MT8135_PIN_12_NRNB__FUNC_TESTA_OUT6 (MTK_PIN_NO(12) | 7)
+
+#define MT8135_PIN_13_NCLE__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT8135_PIN_13_NCLE__FUNC_NCLE (MTK_PIN_NO(13) | 1)
+#define MT8135_PIN_13_NCLE__FUNC_EINT142 (MTK_PIN_NO(13) | 2)
+#define MT8135_PIN_13_NCLE__FUNC_A_FUNC_DOUT_5 (MTK_PIN_NO(13) | 3)
+#define MT8135_PIN_13_NCLE__FUNC_CM2PDN_1X (MTK_PIN_NO(13) | 4)
+#define MT8135_PIN_13_NCLE__FUNC_NALE (MTK_PIN_NO(13) | 6)
+#define MT8135_PIN_13_NCLE__FUNC_TESTA_OUT7 (MTK_PIN_NO(13) | 7)
+
+#define MT8135_PIN_14_NALE__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT8135_PIN_14_NALE__FUNC_NALE (MTK_PIN_NO(14) | 1)
+#define MT8135_PIN_14_NALE__FUNC_EINT143 (MTK_PIN_NO(14) | 2)
+#define MT8135_PIN_14_NALE__FUNC_A_FUNC_DOUT_6 (MTK_PIN_NO(14) | 3)
+#define MT8135_PIN_14_NALE__FUNC_CM2MCLK_1X (MTK_PIN_NO(14) | 4)
+#define MT8135_PIN_14_NALE__FUNC_IRDA_RXD (MTK_PIN_NO(14) | 5)
+#define MT8135_PIN_14_NALE__FUNC_NCLE (MTK_PIN_NO(14) | 6)
+#define MT8135_PIN_14_NALE__FUNC_TESTA_OUT8 (MTK_PIN_NO(14) | 7)
+
+#define MT8135_PIN_15_NREB__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT8135_PIN_15_NREB__FUNC_NREB (MTK_PIN_NO(15) | 1)
+#define MT8135_PIN_15_NREB__FUNC_EINT144 (MTK_PIN_NO(15) | 2)
+#define MT8135_PIN_15_NREB__FUNC_A_FUNC_DOUT_7 (MTK_PIN_NO(15) | 3)
+#define MT8135_PIN_15_NREB__FUNC_CM2RST_1X (MTK_PIN_NO(15) | 4)
+#define MT8135_PIN_15_NREB__FUNC_IRDA_TXD (MTK_PIN_NO(15) | 5)
+#define MT8135_PIN_15_NREB__FUNC_TESTA_OUT9 (MTK_PIN_NO(15) | 7)
+
+#define MT8135_PIN_16_NWEB__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT8135_PIN_16_NWEB__FUNC_NWEB (MTK_PIN_NO(16) | 1)
+#define MT8135_PIN_16_NWEB__FUNC_EINT145 (MTK_PIN_NO(16) | 2)
+#define MT8135_PIN_16_NWEB__FUNC_A_FUNC_DIN_0 (MTK_PIN_NO(16) | 3)
+#define MT8135_PIN_16_NWEB__FUNC_CM2PCLK_1X (MTK_PIN_NO(16) | 4)
+#define MT8135_PIN_16_NWEB__FUNC_IRDA_PDN (MTK_PIN_NO(16) | 5)
+#define MT8135_PIN_16_NWEB__FUNC_TESTA_OUT10 (MTK_PIN_NO(16) | 7)
+
+#define MT8135_PIN_17_NLD0__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT8135_PIN_17_NLD0__FUNC_NLD0 (MTK_PIN_NO(17) | 1)
+#define MT8135_PIN_17_NLD0__FUNC_EINT146 (MTK_PIN_NO(17) | 2)
+#define MT8135_PIN_17_NLD0__FUNC_A_FUNC_DIN_1 (MTK_PIN_NO(17) | 3)
+#define MT8135_PIN_17_NLD0__FUNC_CM2DAT_1X_0 (MTK_PIN_NO(17) | 4)
+#define MT8135_PIN_17_NLD0__FUNC_I2SIN_CK (MTK_PIN_NO(17) | 5)
+#define MT8135_PIN_17_NLD0__FUNC_DAC_CK (MTK_PIN_NO(17) | 6)
+#define MT8135_PIN_17_NLD0__FUNC_TESTA_OUT11 (MTK_PIN_NO(17) | 7)
+
+#define MT8135_PIN_18_NLD1__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT8135_PIN_18_NLD1__FUNC_NLD1 (MTK_PIN_NO(18) | 1)
+#define MT8135_PIN_18_NLD1__FUNC_EINT147 (MTK_PIN_NO(18) | 2)
+#define MT8135_PIN_18_NLD1__FUNC_A_FUNC_DIN_2 (MTK_PIN_NO(18) | 3)
+#define MT8135_PIN_18_NLD1__FUNC_CM2DAT_1X_1 (MTK_PIN_NO(18) | 4)
+#define MT8135_PIN_18_NLD1__FUNC_I2SIN_WS (MTK_PIN_NO(18) | 5)
+#define MT8135_PIN_18_NLD1__FUNC_DAC_WS (MTK_PIN_NO(18) | 6)
+#define MT8135_PIN_18_NLD1__FUNC_TESTA_OUT12 (MTK_PIN_NO(18) | 7)
+
+#define MT8135_PIN_19_NLD2__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT8135_PIN_19_NLD2__FUNC_NLD2 (MTK_PIN_NO(19) | 1)
+#define MT8135_PIN_19_NLD2__FUNC_EINT148 (MTK_PIN_NO(19) | 2)
+#define MT8135_PIN_19_NLD2__FUNC_A_FUNC_DIN_3 (MTK_PIN_NO(19) | 3)
+#define MT8135_PIN_19_NLD2__FUNC_CM2DAT_1X_2 (MTK_PIN_NO(19) | 4)
+#define MT8135_PIN_19_NLD2__FUNC_I2SOUT_DAT (MTK_PIN_NO(19) | 5)
+#define MT8135_PIN_19_NLD2__FUNC_DAC_DAT_OUT (MTK_PIN_NO(19) | 6)
+#define MT8135_PIN_19_NLD2__FUNC_TESTA_OUT13 (MTK_PIN_NO(19) | 7)
+
+#define MT8135_PIN_20_NLD3__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT8135_PIN_20_NLD3__FUNC_NLD3 (MTK_PIN_NO(20) | 1)
+#define MT8135_PIN_20_NLD3__FUNC_EINT149 (MTK_PIN_NO(20) | 2)
+#define MT8135_PIN_20_NLD3__FUNC_A_FUNC_DIN_4 (MTK_PIN_NO(20) | 3)
+#define MT8135_PIN_20_NLD3__FUNC_CM2DAT_1X_3 (MTK_PIN_NO(20) | 4)
+#define MT8135_PIN_20_NLD3__FUNC_TESTA_OUT14 (MTK_PIN_NO(20) | 7)
+
+#define MT8135_PIN_21_NLD4__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT8135_PIN_21_NLD4__FUNC_NLD4 (MTK_PIN_NO(21) | 1)
+#define MT8135_PIN_21_NLD4__FUNC_EINT150 (MTK_PIN_NO(21) | 2)
+#define MT8135_PIN_21_NLD4__FUNC_A_FUNC_DIN_5 (MTK_PIN_NO(21) | 3)
+#define MT8135_PIN_21_NLD4__FUNC_CM2DAT_1X_4 (MTK_PIN_NO(21) | 4)
+#define MT8135_PIN_21_NLD4__FUNC_TESTA_OUT15 (MTK_PIN_NO(21) | 7)
+
+#define MT8135_PIN_22_NLD5__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT8135_PIN_22_NLD5__FUNC_NLD5 (MTK_PIN_NO(22) | 1)
+#define MT8135_PIN_22_NLD5__FUNC_EINT151 (MTK_PIN_NO(22) | 2)
+#define MT8135_PIN_22_NLD5__FUNC_A_FUNC_DIN_6 (MTK_PIN_NO(22) | 3)
+#define MT8135_PIN_22_NLD5__FUNC_CM2DAT_1X_5 (MTK_PIN_NO(22) | 4)
+#define MT8135_PIN_22_NLD5__FUNC_TESTA_OUT16 (MTK_PIN_NO(22) | 7)
+
+#define MT8135_PIN_23_NLD6__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT8135_PIN_23_NLD6__FUNC_NLD6 (MTK_PIN_NO(23) | 1)
+#define MT8135_PIN_23_NLD6__FUNC_EINT152 (MTK_PIN_NO(23) | 2)
+#define MT8135_PIN_23_NLD6__FUNC_A_FUNC_DIN_7 (MTK_PIN_NO(23) | 3)
+#define MT8135_PIN_23_NLD6__FUNC_CM2DAT_1X_6 (MTK_PIN_NO(23) | 4)
+#define MT8135_PIN_23_NLD6__FUNC_TESTA_OUT17 (MTK_PIN_NO(23) | 7)
+
+#define MT8135_PIN_24_NLD7__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT8135_PIN_24_NLD7__FUNC_NLD7 (MTK_PIN_NO(24) | 1)
+#define MT8135_PIN_24_NLD7__FUNC_EINT153 (MTK_PIN_NO(24) | 2)
+#define MT8135_PIN_24_NLD7__FUNC_A_FUNC_DIN_8 (MTK_PIN_NO(24) | 3)
+#define MT8135_PIN_24_NLD7__FUNC_CM2DAT_1X_7 (MTK_PIN_NO(24) | 4)
+#define MT8135_PIN_24_NLD7__FUNC_TESTA_OUT18 (MTK_PIN_NO(24) | 7)
+
+#define MT8135_PIN_25_NLD8__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT8135_PIN_25_NLD8__FUNC_NLD8 (MTK_PIN_NO(25) | 1)
+#define MT8135_PIN_25_NLD8__FUNC_EINT154 (MTK_PIN_NO(25) | 2)
+#define MT8135_PIN_25_NLD8__FUNC_CM2DAT_1X_8 (MTK_PIN_NO(25) | 4)
+
+#define MT8135_PIN_26_NLD9__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT8135_PIN_26_NLD9__FUNC_NLD9 (MTK_PIN_NO(26) | 1)
+#define MT8135_PIN_26_NLD9__FUNC_EINT155 (MTK_PIN_NO(26) | 2)
+#define MT8135_PIN_26_NLD9__FUNC_CM2DAT_1X_9 (MTK_PIN_NO(26) | 4)
+#define MT8135_PIN_26_NLD9__FUNC_PWM1 (MTK_PIN_NO(26) | 5)
+
+#define MT8135_PIN_27_NLD10__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT8135_PIN_27_NLD10__FUNC_NLD10 (MTK_PIN_NO(27) | 1)
+#define MT8135_PIN_27_NLD10__FUNC_EINT156 (MTK_PIN_NO(27) | 2)
+#define MT8135_PIN_27_NLD10__FUNC_CM2VSYNC_1X (MTK_PIN_NO(27) | 4)
+#define MT8135_PIN_27_NLD10__FUNC_PWM2 (MTK_PIN_NO(27) | 5)
+
+#define MT8135_PIN_28_NLD11__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT8135_PIN_28_NLD11__FUNC_NLD11 (MTK_PIN_NO(28) | 1)
+#define MT8135_PIN_28_NLD11__FUNC_EINT157 (MTK_PIN_NO(28) | 2)
+#define MT8135_PIN_28_NLD11__FUNC_CM2HSYNC_1X (MTK_PIN_NO(28) | 4)
+#define MT8135_PIN_28_NLD11__FUNC_PWM3 (MTK_PIN_NO(28) | 5)
+
+#define MT8135_PIN_29_NLD12__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT8135_PIN_29_NLD12__FUNC_NLD12 (MTK_PIN_NO(29) | 1)
+#define MT8135_PIN_29_NLD12__FUNC_EINT158 (MTK_PIN_NO(29) | 2)
+#define MT8135_PIN_29_NLD12__FUNC_I2SIN_CK (MTK_PIN_NO(29) | 3)
+#define MT8135_PIN_29_NLD12__FUNC_DAC_CK (MTK_PIN_NO(29) | 4)
+#define MT8135_PIN_29_NLD12__FUNC_PCM1_CK (MTK_PIN_NO(29) | 5)
+
+#define MT8135_PIN_30_NLD13__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT8135_PIN_30_NLD13__FUNC_NLD13 (MTK_PIN_NO(30) | 1)
+#define MT8135_PIN_30_NLD13__FUNC_EINT159 (MTK_PIN_NO(30) | 2)
+#define MT8135_PIN_30_NLD13__FUNC_I2SIN_WS (MTK_PIN_NO(30) | 3)
+#define MT8135_PIN_30_NLD13__FUNC_DAC_WS (MTK_PIN_NO(30) | 4)
+#define MT8135_PIN_30_NLD13__FUNC_PCM1_WS (MTK_PIN_NO(30) | 5)
+
+#define MT8135_PIN_31_NLD14__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT8135_PIN_31_NLD14__FUNC_NLD14 (MTK_PIN_NO(31) | 1)
+#define MT8135_PIN_31_NLD14__FUNC_EINT160 (MTK_PIN_NO(31) | 2)
+#define MT8135_PIN_31_NLD14__FUNC_I2SOUT_DAT (MTK_PIN_NO(31) | 3)
+#define MT8135_PIN_31_NLD14__FUNC_DAC_DAT_OUT (MTK_PIN_NO(31) | 4)
+#define MT8135_PIN_31_NLD14__FUNC_PCM1_DO (MTK_PIN_NO(31) | 5)
+
+#define MT8135_PIN_32_NLD15__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT8135_PIN_32_NLD15__FUNC_NLD15 (MTK_PIN_NO(32) | 1)
+#define MT8135_PIN_32_NLD15__FUNC_EINT161 (MTK_PIN_NO(32) | 2)
+#define MT8135_PIN_32_NLD15__FUNC_DISP_PWM (MTK_PIN_NO(32) | 3)
+#define MT8135_PIN_32_NLD15__FUNC_PWM4 (MTK_PIN_NO(32) | 4)
+#define MT8135_PIN_32_NLD15__FUNC_PCM1_DI (MTK_PIN_NO(32) | 5)
+
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(33) | 1)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_EINT50 (MTK_PIN_NO(33) | 2)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_I2SIN_DAT (MTK_PIN_NO(33) | 3)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_PCM1_DI (MTK_PIN_NO(33) | 5)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_SPI1_MI (MTK_PIN_NO(33) | 6)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_NLD10 (MTK_PIN_NO(33) | 7)
+
+#define MT8135_PIN_34_IDDIG__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT8135_PIN_34_IDDIG__FUNC_IDDIG (MTK_PIN_NO(34) | 1)
+#define MT8135_PIN_34_IDDIG__FUNC_EINT34 (MTK_PIN_NO(34) | 2)
+
+#define MT8135_PIN_35_SCL3__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT8135_PIN_35_SCL3__FUNC_SCL3 (MTK_PIN_NO(35) | 1)
+#define MT8135_PIN_35_SCL3__FUNC_EINT96 (MTK_PIN_NO(35) | 2)
+#define MT8135_PIN_35_SCL3__FUNC_CLKM6 (MTK_PIN_NO(35) | 3)
+#define MT8135_PIN_35_SCL3__FUNC_PWM6 (MTK_PIN_NO(35) | 4)
+
+#define MT8135_PIN_36_SDA3__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT8135_PIN_36_SDA3__FUNC_SDA3 (MTK_PIN_NO(36) | 1)
+#define MT8135_PIN_36_SDA3__FUNC_EINT97 (MTK_PIN_NO(36) | 2)
+
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_AUD_CLK (MTK_PIN_NO(37) | 1)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_ADC_CK (MTK_PIN_NO(37) | 2)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_HDMI_SDATA0 (MTK_PIN_NO(37) | 3)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_EINT19 (MTK_PIN_NO(37) | 4)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_USB_TEST_IO_6 (MTK_PIN_NO(37) | 5)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_TESTA_OUT19 (MTK_PIN_NO(37) | 7)
+
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(38) | 1)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_ADC_WS (MTK_PIN_NO(38) | 2)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_AUD_DAT_MISO (MTK_PIN_NO(38) | 3)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_EINT21 (MTK_PIN_NO(38) | 4)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_USB_TEST_IO_7 (MTK_PIN_NO(38) | 5)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_TESTA_OUT20 (MTK_PIN_NO(38) | 7)
+
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_AUD_DAT_MISO (MTK_PIN_NO(39) | 1)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_ADC_DAT_IN (MTK_PIN_NO(39) | 2)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(39) | 3)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_EINT20 (MTK_PIN_NO(39) | 4)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_USB_TEST_IO_8 (MTK_PIN_NO(39) | 5)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_TESTA_OUT21 (MTK_PIN_NO(39) | 7)
+
+#define MT8135_PIN_40_DAC_CLK__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT8135_PIN_40_DAC_CLK__FUNC_DAC_CK (MTK_PIN_NO(40) | 1)
+#define MT8135_PIN_40_DAC_CLK__FUNC_EINT22 (MTK_PIN_NO(40) | 2)
+#define MT8135_PIN_40_DAC_CLK__FUNC_HDMI_SDATA1 (MTK_PIN_NO(40) | 3)
+#define MT8135_PIN_40_DAC_CLK__FUNC_USB_TEST_IO_9 (MTK_PIN_NO(40) | 5)
+#define MT8135_PIN_40_DAC_CLK__FUNC_TESTA_OUT22 (MTK_PIN_NO(40) | 7)
+
+#define MT8135_PIN_41_DAC_WS__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT8135_PIN_41_DAC_WS__FUNC_DAC_WS (MTK_PIN_NO(41) | 1)
+#define MT8135_PIN_41_DAC_WS__FUNC_EINT24 (MTK_PIN_NO(41) | 2)
+#define MT8135_PIN_41_DAC_WS__FUNC_HDMI_SDATA2 (MTK_PIN_NO(41) | 3)
+#define MT8135_PIN_41_DAC_WS__FUNC_USB_TEST_IO_10 (MTK_PIN_NO(41) | 5)
+#define MT8135_PIN_41_DAC_WS__FUNC_TESTA_OUT23 (MTK_PIN_NO(41) | 7)
+
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_DAC_DAT_OUT (MTK_PIN_NO(42) | 1)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_EINT23 (MTK_PIN_NO(42) | 2)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_HDMI_SDATA3 (MTK_PIN_NO(42) | 3)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_USB_TEST_IO_11 (MTK_PIN_NO(42) | 5)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_TESTA_OUT24 (MTK_PIN_NO(42) | 7)
+
+#define MT8135_PIN_43_PWRAP_SPI0_MO__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT8135_PIN_43_PWRAP_SPI0_MO__FUNC_PWRAP_SPIDI (MTK_PIN_NO(43) | 1)
+#define MT8135_PIN_43_PWRAP_SPI0_MO__FUNC_EINT29 (MTK_PIN_NO(43) | 2)
+
+#define MT8135_PIN_44_PWRAP_SPI0_MI__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT8135_PIN_44_PWRAP_SPI0_MI__FUNC_PWRAP_SPIDO (MTK_PIN_NO(44) | 1)
+#define MT8135_PIN_44_PWRAP_SPI0_MI__FUNC_EINT28 (MTK_PIN_NO(44) | 2)
+
+#define MT8135_PIN_45_PWRAP_SPI0_CSN__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT8135_PIN_45_PWRAP_SPI0_CSN__FUNC_PWRAP_SPICS_B_I (MTK_PIN_NO(45) | 1)
+#define MT8135_PIN_45_PWRAP_SPI0_CSN__FUNC_EINT27 (MTK_PIN_NO(45) | 2)
+
+#define MT8135_PIN_46_PWRAP_SPI0_CLK__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT8135_PIN_46_PWRAP_SPI0_CLK__FUNC_PWRAP_SPICK_I (MTK_PIN_NO(46) | 1)
+#define MT8135_PIN_46_PWRAP_SPI0_CLK__FUNC_EINT26 (MTK_PIN_NO(46) | 2)
+
+#define MT8135_PIN_47_PWRAP_EVENT__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT8135_PIN_47_PWRAP_EVENT__FUNC_PWRAP_EVENT_IN (MTK_PIN_NO(47) | 1)
+#define MT8135_PIN_47_PWRAP_EVENT__FUNC_EINT25 (MTK_PIN_NO(47) | 2)
+#define MT8135_PIN_47_PWRAP_EVENT__FUNC_TESTA_OUT2 (MTK_PIN_NO(47) | 7)
+
+#define MT8135_PIN_48_RTC32K_CK__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT8135_PIN_48_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(48) | 1)
+
+#define MT8135_PIN_49_WATCHDOG__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT8135_PIN_49_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(49) | 1)
+#define MT8135_PIN_49_WATCHDOG__FUNC_EINT36 (MTK_PIN_NO(49) | 2)
+
+#define MT8135_PIN_50_SRCLKENA__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT8135_PIN_50_SRCLKENA__FUNC_SRCLKENA (MTK_PIN_NO(50) | 1)
+#define MT8135_PIN_50_SRCLKENA__FUNC_EINT38 (MTK_PIN_NO(50) | 2)
+
+#define MT8135_PIN_51_SRCVOLTEN__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT8135_PIN_51_SRCVOLTEN__FUNC_SRCVOLTEN (MTK_PIN_NO(51) | 1)
+#define MT8135_PIN_51_SRCVOLTEN__FUNC_EINT37 (MTK_PIN_NO(51) | 2)
+
+#define MT8135_PIN_52_EINT0__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT8135_PIN_52_EINT0__FUNC_EINT0 (MTK_PIN_NO(52) | 1)
+#define MT8135_PIN_52_EINT0__FUNC_PWM1 (MTK_PIN_NO(52) | 2)
+#define MT8135_PIN_52_EINT0__FUNC_CLKM0 (MTK_PIN_NO(52) | 3)
+#define MT8135_PIN_52_EINT0__FUNC_SPDIF_OUT (MTK_PIN_NO(52) | 4)
+#define MT8135_PIN_52_EINT0__FUNC_USB_TEST_IO_12 (MTK_PIN_NO(52) | 5)
+#define MT8135_PIN_52_EINT0__FUNC_USB_SCL (MTK_PIN_NO(52) | 7)
+
+#define MT8135_PIN_53_URXD2__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT8135_PIN_53_URXD2__FUNC_URXD2 (MTK_PIN_NO(53) | 1)
+#define MT8135_PIN_53_URXD2__FUNC_EINT83 (MTK_PIN_NO(53) | 2)
+#define MT8135_PIN_53_URXD2__FUNC_HDMI_LRCK (MTK_PIN_NO(53) | 4)
+#define MT8135_PIN_53_URXD2__FUNC_CLKM3 (MTK_PIN_NO(53) | 5)
+#define MT8135_PIN_53_URXD2__FUNC_UTXD2 (MTK_PIN_NO(53) | 7)
+
+#define MT8135_PIN_54_UTXD2__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT8135_PIN_54_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(54) | 1)
+#define MT8135_PIN_54_UTXD2__FUNC_EINT82 (MTK_PIN_NO(54) | 2)
+#define MT8135_PIN_54_UTXD2__FUNC_HDMI_BCK_OUT (MTK_PIN_NO(54) | 4)
+#define MT8135_PIN_54_UTXD2__FUNC_CLKM2 (MTK_PIN_NO(54) | 5)
+#define MT8135_PIN_54_UTXD2__FUNC_URXD2 (MTK_PIN_NO(54) | 7)
+
+#define MT8135_PIN_55_UCTS2__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT8135_PIN_55_UCTS2__FUNC_UCTS2 (MTK_PIN_NO(55) | 1)
+#define MT8135_PIN_55_UCTS2__FUNC_EINT84 (MTK_PIN_NO(55) | 2)
+#define MT8135_PIN_55_UCTS2__FUNC_PWM1 (MTK_PIN_NO(55) | 5)
+#define MT8135_PIN_55_UCTS2__FUNC_URTS2 (MTK_PIN_NO(55) | 7)
+
+#define MT8135_PIN_56_URTS2__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT8135_PIN_56_URTS2__FUNC_URTS2 (MTK_PIN_NO(56) | 1)
+#define MT8135_PIN_56_URTS2__FUNC_EINT85 (MTK_PIN_NO(56) | 2)
+#define MT8135_PIN_56_URTS2__FUNC_PWM2 (MTK_PIN_NO(56) | 5)
+#define MT8135_PIN_56_URTS2__FUNC_UCTS2 (MTK_PIN_NO(56) | 7)
+
+#define MT8135_PIN_57_JTCK__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT8135_PIN_57_JTCK__FUNC_JTCK (MTK_PIN_NO(57) | 1)
+#define MT8135_PIN_57_JTCK__FUNC_EINT188 (MTK_PIN_NO(57) | 2)
+#define MT8135_PIN_57_JTCK__FUNC_DSP1_ICK (MTK_PIN_NO(57) | 3)
+
+#define MT8135_PIN_58_JTDO__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT8135_PIN_58_JTDO__FUNC_JTDO (MTK_PIN_NO(58) | 1)
+#define MT8135_PIN_58_JTDO__FUNC_EINT190 (MTK_PIN_NO(58) | 2)
+#define MT8135_PIN_58_JTDO__FUNC_DSP2_IMS (MTK_PIN_NO(58) | 3)
+
+#define MT8135_PIN_59_JTRST_B__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT8135_PIN_59_JTRST_B__FUNC_JTRST_B (MTK_PIN_NO(59) | 1)
+#define MT8135_PIN_59_JTRST_B__FUNC_EINT0 (MTK_PIN_NO(59) | 2)
+#define MT8135_PIN_59_JTRST_B__FUNC_DSP2_ICK (MTK_PIN_NO(59) | 3)
+
+#define MT8135_PIN_60_JTDI__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT8135_PIN_60_JTDI__FUNC_JTDI (MTK_PIN_NO(60) | 1)
+#define MT8135_PIN_60_JTDI__FUNC_EINT189 (MTK_PIN_NO(60) | 2)
+#define MT8135_PIN_60_JTDI__FUNC_DSP1_IMS (MTK_PIN_NO(60) | 3)
+
+#define MT8135_PIN_61_JRTCK__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT8135_PIN_61_JRTCK__FUNC_JRTCK (MTK_PIN_NO(61) | 1)
+#define MT8135_PIN_61_JRTCK__FUNC_EINT187 (MTK_PIN_NO(61) | 2)
+#define MT8135_PIN_61_JRTCK__FUNC_DSP1_ID (MTK_PIN_NO(61) | 3)
+
+#define MT8135_PIN_62_JTMS__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT8135_PIN_62_JTMS__FUNC_JTMS (MTK_PIN_NO(62) | 1)
+#define MT8135_PIN_62_JTMS__FUNC_EINT191 (MTK_PIN_NO(62) | 2)
+#define MT8135_PIN_62_JTMS__FUNC_DSP2_ID (MTK_PIN_NO(62) | 3)
+
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_MSDC1_INSI (MTK_PIN_NO(63) | 1)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_SCL5 (MTK_PIN_NO(63) | 3)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_PWM6 (MTK_PIN_NO(63) | 4)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_CLKM5 (MTK_PIN_NO(63) | 5)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_TESTB_OUT6 (MTK_PIN_NO(63) | 7)
+
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_MSDC1_SDWPI (MTK_PIN_NO(64) | 1)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_EINT58 (MTK_PIN_NO(64) | 2)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_SDA5 (MTK_PIN_NO(64) | 3)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_PWM7 (MTK_PIN_NO(64) | 4)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_CLKM6 (MTK_PIN_NO(64) | 5)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_TESTB_OUT7 (MTK_PIN_NO(64) | 7)
+
+#define MT8135_PIN_65_MSDC2_INSI__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT8135_PIN_65_MSDC2_INSI__FUNC_MSDC2_INSI (MTK_PIN_NO(65) | 1)
+#define MT8135_PIN_65_MSDC2_INSI__FUNC_USB_TEST_IO_27 (MTK_PIN_NO(65) | 5)
+#define MT8135_PIN_65_MSDC2_INSI__FUNC_TESTA_OUT3 (MTK_PIN_NO(65) | 7)
+
+#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_MSDC2_SDWPI (MTK_PIN_NO(66) | 1)
+#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_EINT66 (MTK_PIN_NO(66) | 2)
+#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_USB_TEST_IO_28 (MTK_PIN_NO(66) | 5)
+
+#define MT8135_PIN_67_URXD4__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT8135_PIN_67_URXD4__FUNC_URXD4 (MTK_PIN_NO(67) | 1)
+#define MT8135_PIN_67_URXD4__FUNC_EINT89 (MTK_PIN_NO(67) | 2)
+#define MT8135_PIN_67_URXD4__FUNC_URXD1 (MTK_PIN_NO(67) | 3)
+#define MT8135_PIN_67_URXD4__FUNC_UTXD4 (MTK_PIN_NO(67) | 6)
+#define MT8135_PIN_67_URXD4__FUNC_TESTB_OUT10 (MTK_PIN_NO(67) | 7)
+
+#define MT8135_PIN_68_UTXD4__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT8135_PIN_68_UTXD4__FUNC_UTXD4 (MTK_PIN_NO(68) | 1)
+#define MT8135_PIN_68_UTXD4__FUNC_EINT88 (MTK_PIN_NO(68) | 2)
+#define MT8135_PIN_68_UTXD4__FUNC_UTXD1 (MTK_PIN_NO(68) | 3)
+#define MT8135_PIN_68_UTXD4__FUNC_URXD4 (MTK_PIN_NO(68) | 6)
+#define MT8135_PIN_68_UTXD4__FUNC_TESTB_OUT11 (MTK_PIN_NO(68) | 7)
+
+#define MT8135_PIN_69_URXD1__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT8135_PIN_69_URXD1__FUNC_URXD1 (MTK_PIN_NO(69) | 1)
+#define MT8135_PIN_69_URXD1__FUNC_EINT79 (MTK_PIN_NO(69) | 2)
+#define MT8135_PIN_69_URXD1__FUNC_URXD4 (MTK_PIN_NO(69) | 3)
+#define MT8135_PIN_69_URXD1__FUNC_UTXD1 (MTK_PIN_NO(69) | 6)
+#define MT8135_PIN_69_URXD1__FUNC_TESTB_OUT24 (MTK_PIN_NO(69) | 7)
+
+#define MT8135_PIN_70_UTXD1__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT8135_PIN_70_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(70) | 1)
+#define MT8135_PIN_70_UTXD1__FUNC_EINT78 (MTK_PIN_NO(70) | 2)
+#define MT8135_PIN_70_UTXD1__FUNC_UTXD4 (MTK_PIN_NO(70) | 3)
+#define MT8135_PIN_70_UTXD1__FUNC_URXD1 (MTK_PIN_NO(70) | 6)
+#define MT8135_PIN_70_UTXD1__FUNC_TESTB_OUT25 (MTK_PIN_NO(70) | 7)
+
+#define MT8135_PIN_71_UCTS1__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT8135_PIN_71_UCTS1__FUNC_UCTS1 (MTK_PIN_NO(71) | 1)
+#define MT8135_PIN_71_UCTS1__FUNC_EINT80 (MTK_PIN_NO(71) | 2)
+#define MT8135_PIN_71_UCTS1__FUNC_CLKM0 (MTK_PIN_NO(71) | 5)
+#define MT8135_PIN_71_UCTS1__FUNC_URTS1 (MTK_PIN_NO(71) | 6)
+#define MT8135_PIN_71_UCTS1__FUNC_TESTB_OUT31 (MTK_PIN_NO(71) | 7)
+
+#define MT8135_PIN_72_URTS1__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT8135_PIN_72_URTS1__FUNC_URTS1 (MTK_PIN_NO(72) | 1)
+#define MT8135_PIN_72_URTS1__FUNC_EINT81 (MTK_PIN_NO(72) | 2)
+#define MT8135_PIN_72_URTS1__FUNC_CLKM1 (MTK_PIN_NO(72) | 5)
+#define MT8135_PIN_72_URTS1__FUNC_UCTS1 (MTK_PIN_NO(72) | 6)
+#define MT8135_PIN_72_URTS1__FUNC_TESTB_OUT21 (MTK_PIN_NO(72) | 7)
+
+#define MT8135_PIN_73_PWM1__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT8135_PIN_73_PWM1__FUNC_PWM1 (MTK_PIN_NO(73) | 1)
+#define MT8135_PIN_73_PWM1__FUNC_EINT73 (MTK_PIN_NO(73) | 2)
+#define MT8135_PIN_73_PWM1__FUNC_USB_DRVVBUS (MTK_PIN_NO(73) | 5)
+#define MT8135_PIN_73_PWM1__FUNC_DISP_PWM (MTK_PIN_NO(73) | 6)
+#define MT8135_PIN_73_PWM1__FUNC_TESTB_OUT8 (MTK_PIN_NO(73) | 7)
+
+#define MT8135_PIN_74_PWM2__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT8135_PIN_74_PWM2__FUNC_PWM2 (MTK_PIN_NO(74) | 1)
+#define MT8135_PIN_74_PWM2__FUNC_EINT74 (MTK_PIN_NO(74) | 2)
+#define MT8135_PIN_74_PWM2__FUNC_DPI33_CK (MTK_PIN_NO(74) | 3)
+#define MT8135_PIN_74_PWM2__FUNC_PWM5 (MTK_PIN_NO(74) | 4)
+#define MT8135_PIN_74_PWM2__FUNC_URXD2 (MTK_PIN_NO(74) | 5)
+#define MT8135_PIN_74_PWM2__FUNC_DISP_PWM (MTK_PIN_NO(74) | 6)
+#define MT8135_PIN_74_PWM2__FUNC_TESTB_OUT9 (MTK_PIN_NO(74) | 7)
+
+#define MT8135_PIN_75_PWM3__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT8135_PIN_75_PWM3__FUNC_PWM3 (MTK_PIN_NO(75) | 1)
+#define MT8135_PIN_75_PWM3__FUNC_EINT75 (MTK_PIN_NO(75) | 2)
+#define MT8135_PIN_75_PWM3__FUNC_DPI33_D0 (MTK_PIN_NO(75) | 3)
+#define MT8135_PIN_75_PWM3__FUNC_PWM6 (MTK_PIN_NO(75) | 4)
+#define MT8135_PIN_75_PWM3__FUNC_UTXD2 (MTK_PIN_NO(75) | 5)
+#define MT8135_PIN_75_PWM3__FUNC_DISP_PWM (MTK_PIN_NO(75) | 6)
+#define MT8135_PIN_75_PWM3__FUNC_TESTB_OUT12 (MTK_PIN_NO(75) | 7)
+
+#define MT8135_PIN_76_PWM4__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT8135_PIN_76_PWM4__FUNC_PWM4 (MTK_PIN_NO(76) | 1)
+#define MT8135_PIN_76_PWM4__FUNC_EINT76 (MTK_PIN_NO(76) | 2)
+#define MT8135_PIN_76_PWM4__FUNC_DPI33_D1 (MTK_PIN_NO(76) | 3)
+#define MT8135_PIN_76_PWM4__FUNC_PWM7 (MTK_PIN_NO(76) | 4)
+#define MT8135_PIN_76_PWM4__FUNC_DISP_PWM (MTK_PIN_NO(76) | 6)
+#define MT8135_PIN_76_PWM4__FUNC_TESTB_OUT13 (MTK_PIN_NO(76) | 7)
+
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(77) | 1)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_EINT63 (MTK_PIN_NO(77) | 2)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_DSP2_IMS (MTK_PIN_NO(77) | 4)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_DPI33_D6 (MTK_PIN_NO(77) | 6)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_TESTA_OUT25 (MTK_PIN_NO(77) | 7)
+
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(78) | 1)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_EINT64 (MTK_PIN_NO(78) | 2)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_DSP2_ID (MTK_PIN_NO(78) | 4)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_DPI33_D7 (MTK_PIN_NO(78) | 6)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_TESTA_OUT26 (MTK_PIN_NO(78) | 7)
+
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(79) | 1)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_EINT60 (MTK_PIN_NO(79) | 2)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_DSP1_IMS (MTK_PIN_NO(79) | 4)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_PCM1_WS (MTK_PIN_NO(79) | 5)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_DPI33_D3 (MTK_PIN_NO(79) | 6)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_TESTA_OUT0 (MTK_PIN_NO(79) | 7)
+
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(80) | 1)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_EINT59 (MTK_PIN_NO(80) | 2)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_DSP1_ICK (MTK_PIN_NO(80) | 4)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_PCM1_CK (MTK_PIN_NO(80) | 5)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_DPI33_D2 (MTK_PIN_NO(80) | 6)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_TESTA_OUT1 (MTK_PIN_NO(80) | 7)
+
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(81) | 1)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_EINT62 (MTK_PIN_NO(81) | 2)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_DSP2_ICK (MTK_PIN_NO(81) | 4)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_PCM1_DO (MTK_PIN_NO(81) | 5)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_DPI33_D5 (MTK_PIN_NO(81) | 6)
+
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(82) | 1)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_EINT61 (MTK_PIN_NO(82) | 2)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_DSP1_ID (MTK_PIN_NO(82) | 4)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_PCM1_DI (MTK_PIN_NO(82) | 5)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_DPI33_D4 (MTK_PIN_NO(82) | 6)
+
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(83) | 1)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_EINT53 (MTK_PIN_NO(83) | 2)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_SCL1 (MTK_PIN_NO(83) | 3)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_PWM2 (MTK_PIN_NO(83) | 4)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_CLKM1 (MTK_PIN_NO(83) | 5)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_TESTB_OUT2 (MTK_PIN_NO(83) | 7)
+
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(84) | 1)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_EINT54 (MTK_PIN_NO(84) | 2)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_SDA1 (MTK_PIN_NO(84) | 3)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_PWM3 (MTK_PIN_NO(84) | 4)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_CLKM2 (MTK_PIN_NO(84) | 5)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_TESTB_OUT3 (MTK_PIN_NO(84) | 7)
+
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(85) | 1)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_EINT52 (MTK_PIN_NO(85) | 2)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_SDA0 (MTK_PIN_NO(85) | 3)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_PWM1 (MTK_PIN_NO(85) | 4)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_CLKM0 (MTK_PIN_NO(85) | 5)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_TESTB_OUT1 (MTK_PIN_NO(85) | 7)
+
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(86) | 1)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_EINT51 (MTK_PIN_NO(86) | 2)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_SCL0 (MTK_PIN_NO(86) | 3)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_DISP_PWM (MTK_PIN_NO(86) | 4)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_TESTB_OUT0 (MTK_PIN_NO(86) | 7)
+
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(87) | 1)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_EINT55 (MTK_PIN_NO(87) | 2)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_SCL4 (MTK_PIN_NO(87) | 3)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_PWM4 (MTK_PIN_NO(87) | 4)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_CLKM3 (MTK_PIN_NO(87) | 5)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_TESTB_OUT4 (MTK_PIN_NO(87) | 7)
+
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(88) | 1)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_EINT56 (MTK_PIN_NO(88) | 2)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_SDA4 (MTK_PIN_NO(88) | 3)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_PWM5 (MTK_PIN_NO(88) | 4)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_CLKM4 (MTK_PIN_NO(88) | 5)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_TESTB_OUT5 (MTK_PIN_NO(88) | 7)
+
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_MSDC4_DAT0 (MTK_PIN_NO(89) | 1)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_EINT133 (MTK_PIN_NO(89) | 2)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(89) | 4)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_USB_DRVVBUS (MTK_PIN_NO(89) | 5)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_A_FUNC_DIN_9 (MTK_PIN_NO(89) | 6)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_LPTE (MTK_PIN_NO(89) | 7)
+
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_MSDC4_DAT1 (MTK_PIN_NO(90) | 1)
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_EINT134 (MTK_PIN_NO(90) | 2)
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_A_FUNC_DIN_10 (MTK_PIN_NO(90) | 6)
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_LRSTB_1X (MTK_PIN_NO(90) | 7)
+
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_MSDC4_DAT5 (MTK_PIN_NO(91) | 1)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_EINT136 (MTK_PIN_NO(91) | 2)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_I2SIN_WS (MTK_PIN_NO(91) | 3)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_DAC_WS (MTK_PIN_NO(91) | 4)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_PCM1_WS (MTK_PIN_NO(91) | 5)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_A_FUNC_DIN_11 (MTK_PIN_NO(91) | 6)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_SPI1_CSN (MTK_PIN_NO(91) | 7)
+
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_MSDC4_DAT6 (MTK_PIN_NO(92) | 1)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_EINT137 (MTK_PIN_NO(92) | 2)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_I2SOUT_DAT (MTK_PIN_NO(92) | 3)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_DAC_DAT_OUT (MTK_PIN_NO(92) | 4)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_PCM1_DO (MTK_PIN_NO(92) | 5)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_A_FUNC_DIN_12 (MTK_PIN_NO(92) | 6)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_SPI1_MO (MTK_PIN_NO(92) | 7)
+
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_MSDC4_DAT7 (MTK_PIN_NO(93) | 1)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_EINT138 (MTK_PIN_NO(93) | 2)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_I2SIN_DAT (MTK_PIN_NO(93) | 3)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_PCM1_DI (MTK_PIN_NO(93) | 5)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_A_FUNC_DIN_13 (MTK_PIN_NO(93) | 6)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_SPI1_MI (MTK_PIN_NO(93) | 7)
+
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_MSDC4_DAT4 (MTK_PIN_NO(94) | 1)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_EINT135 (MTK_PIN_NO(94) | 2)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_I2SIN_CK (MTK_PIN_NO(94) | 3)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_DAC_CK (MTK_PIN_NO(94) | 4)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_PCM1_CK (MTK_PIN_NO(94) | 5)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_A_FUNC_DIN_14 (MTK_PIN_NO(94) | 6)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_SPI1_CLK (MTK_PIN_NO(94) | 7)
+
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_MSDC4_DAT2 (MTK_PIN_NO(95) | 1)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_EINT131 (MTK_PIN_NO(95) | 2)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_I2SIN_WS (MTK_PIN_NO(95) | 3)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_CM2PDN_2X (MTK_PIN_NO(95) | 4)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_DAC_WS (MTK_PIN_NO(95) | 5)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_PCM1_WS (MTK_PIN_NO(95) | 6)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_LSCE0B_1X (MTK_PIN_NO(95) | 7)
+
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_MSDC4_CLK (MTK_PIN_NO(96) | 1)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_EINT129 (MTK_PIN_NO(96) | 2)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_DPI1_CK_2X (MTK_PIN_NO(96) | 3)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_CM2PCLK_2X (MTK_PIN_NO(96) | 4)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_PWM4 (MTK_PIN_NO(96) | 5)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_PCM1_DI (MTK_PIN_NO(96) | 6)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_LSCK_1X (MTK_PIN_NO(96) | 7)
+
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_MSDC4_DAT3 (MTK_PIN_NO(97) | 1)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_EINT132 (MTK_PIN_NO(97) | 2)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_I2SOUT_DAT (MTK_PIN_NO(97) | 3)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_CM2RST_2X (MTK_PIN_NO(97) | 4)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_DAC_DAT_OUT (MTK_PIN_NO(97) | 5)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_PCM1_DO (MTK_PIN_NO(97) | 6)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_LSCE1B_1X (MTK_PIN_NO(97) | 7)
+
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_MSDC4_CMD (MTK_PIN_NO(98) | 1)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_EINT128 (MTK_PIN_NO(98) | 2)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_DPI1_DE_2X (MTK_PIN_NO(98) | 3)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_PWM3 (MTK_PIN_NO(98) | 5)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_LSDA_1X (MTK_PIN_NO(98) | 7)
+
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_MSDC4_RSTB (MTK_PIN_NO(99) | 1)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_EINT130 (MTK_PIN_NO(99) | 2)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_I2SIN_CK (MTK_PIN_NO(99) | 3)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_CM2MCLK_2X (MTK_PIN_NO(99) | 4)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_DAC_CK (MTK_PIN_NO(99) | 5)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_PCM1_CK (MTK_PIN_NO(99) | 6)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_LSA0_1X (MTK_PIN_NO(99) | 7)
+
+#define MT8135_PIN_100_SDA0__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT8135_PIN_100_SDA0__FUNC_SDA0 (MTK_PIN_NO(100) | 1)
+#define MT8135_PIN_100_SDA0__FUNC_EINT91 (MTK_PIN_NO(100) | 2)
+#define MT8135_PIN_100_SDA0__FUNC_CLKM1 (MTK_PIN_NO(100) | 3)
+#define MT8135_PIN_100_SDA0__FUNC_PWM1 (MTK_PIN_NO(100) | 4)
+#define MT8135_PIN_100_SDA0__FUNC_A_FUNC_DIN_15 (MTK_PIN_NO(100) | 7)
+
+#define MT8135_PIN_101_SCL0__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT8135_PIN_101_SCL0__FUNC_SCL0 (MTK_PIN_NO(101) | 1)
+#define MT8135_PIN_101_SCL0__FUNC_EINT90 (MTK_PIN_NO(101) | 2)
+#define MT8135_PIN_101_SCL0__FUNC_CLKM0 (MTK_PIN_NO(101) | 3)
+#define MT8135_PIN_101_SCL0__FUNC_DISP_PWM (MTK_PIN_NO(101) | 4)
+#define MT8135_PIN_101_SCL0__FUNC_A_FUNC_DIN_16 (MTK_PIN_NO(101) | 7)
+
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_EINT10 (MTK_PIN_NO(102) | 1)
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_USB_TEST_IO_16 (MTK_PIN_NO(102) | 5)
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_TESTB_OUT16 (MTK_PIN_NO(102) | 6)
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_A_FUNC_DIN_17 (MTK_PIN_NO(102) | 7)
+
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_EINT11 (MTK_PIN_NO(103) | 1)
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_USB_TEST_IO_17 (MTK_PIN_NO(103) | 5)
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_TESTB_OUT17 (MTK_PIN_NO(103) | 6)
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_A_FUNC_DIN_18 (MTK_PIN_NO(103) | 7)
+
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_EINT16 (MTK_PIN_NO(104) | 1)
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_USB_TEST_IO_18 (MTK_PIN_NO(104) | 5)
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_TESTB_OUT18 (MTK_PIN_NO(104) | 6)
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_A_FUNC_DIN_19 (MTK_PIN_NO(104) | 7)
+
+#define MT8135_PIN_105_I2S_CLK__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT8135_PIN_105_I2S_CLK__FUNC_I2SIN_CK (MTK_PIN_NO(105) | 1)
+#define MT8135_PIN_105_I2S_CLK__FUNC_EINT10 (MTK_PIN_NO(105) | 2)
+#define MT8135_PIN_105_I2S_CLK__FUNC_DAC_CK (MTK_PIN_NO(105) | 3)
+#define MT8135_PIN_105_I2S_CLK__FUNC_PCM1_CK (MTK_PIN_NO(105) | 4)
+#define MT8135_PIN_105_I2S_CLK__FUNC_USB_TEST_IO_19 (MTK_PIN_NO(105) | 5)
+#define MT8135_PIN_105_I2S_CLK__FUNC_TESTB_OUT19 (MTK_PIN_NO(105) | 6)
+#define MT8135_PIN_105_I2S_CLK__FUNC_A_FUNC_DIN_20 (MTK_PIN_NO(105) | 7)
+
+#define MT8135_PIN_106_I2S_WS__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT8135_PIN_106_I2S_WS__FUNC_I2SIN_WS (MTK_PIN_NO(106) | 1)
+#define MT8135_PIN_106_I2S_WS__FUNC_EINT13 (MTK_PIN_NO(106) | 2)
+#define MT8135_PIN_106_I2S_WS__FUNC_DAC_WS (MTK_PIN_NO(106) | 3)
+#define MT8135_PIN_106_I2S_WS__FUNC_PCM1_WS (MTK_PIN_NO(106) | 4)
+#define MT8135_PIN_106_I2S_WS__FUNC_USB_TEST_IO_20 (MTK_PIN_NO(106) | 5)
+#define MT8135_PIN_106_I2S_WS__FUNC_TESTB_OUT20 (MTK_PIN_NO(106) | 6)
+#define MT8135_PIN_106_I2S_WS__FUNC_A_FUNC_DIN_21 (MTK_PIN_NO(106) | 7)
+
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_I2SIN_DAT (MTK_PIN_NO(107) | 1)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_EINT11 (MTK_PIN_NO(107) | 2)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_PCM1_DI (MTK_PIN_NO(107) | 4)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_USB_TEST_IO_21 (MTK_PIN_NO(107) | 5)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_TESTB_OUT22 (MTK_PIN_NO(107) | 6)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_A_FUNC_DIN_22 (MTK_PIN_NO(107) | 7)
+
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_I2SOUT_DAT (MTK_PIN_NO(108) | 1)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_EINT12 (MTK_PIN_NO(108) | 2)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_DAC_DAT_OUT (MTK_PIN_NO(108) | 3)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_PCM1_DO (MTK_PIN_NO(108) | 4)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_USB_TEST_IO_22 (MTK_PIN_NO(108) | 5)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_TESTB_OUT23 (MTK_PIN_NO(108) | 6)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_A_FUNC_DIN_23 (MTK_PIN_NO(108) | 7)
+
+#define MT8135_PIN_109_EINT5__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT8135_PIN_109_EINT5__FUNC_EINT5 (MTK_PIN_NO(109) | 1)
+#define MT8135_PIN_109_EINT5__FUNC_PWM5 (MTK_PIN_NO(109) | 2)
+#define MT8135_PIN_109_EINT5__FUNC_CLKM3 (MTK_PIN_NO(109) | 3)
+#define MT8135_PIN_109_EINT5__FUNC_GPU_JTRSTB (MTK_PIN_NO(109) | 4)
+#define MT8135_PIN_109_EINT5__FUNC_USB_TEST_IO_23 (MTK_PIN_NO(109) | 5)
+#define MT8135_PIN_109_EINT5__FUNC_TESTB_OUT26 (MTK_PIN_NO(109) | 6)
+#define MT8135_PIN_109_EINT5__FUNC_A_FUNC_DIN_24 (MTK_PIN_NO(109) | 7)
+
+#define MT8135_PIN_110_EINT6__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT8135_PIN_110_EINT6__FUNC_EINT6 (MTK_PIN_NO(110) | 1)
+#define MT8135_PIN_110_EINT6__FUNC_PWM6 (MTK_PIN_NO(110) | 2)
+#define MT8135_PIN_110_EINT6__FUNC_CLKM4 (MTK_PIN_NO(110) | 3)
+#define MT8135_PIN_110_EINT6__FUNC_GPU_JTMS (MTK_PIN_NO(110) | 4)
+#define MT8135_PIN_110_EINT6__FUNC_USB_TEST_IO_24 (MTK_PIN_NO(110) | 5)
+#define MT8135_PIN_110_EINT6__FUNC_TESTB_OUT27 (MTK_PIN_NO(110) | 6)
+#define MT8135_PIN_110_EINT6__FUNC_A_FUNC_DIN_25 (MTK_PIN_NO(110) | 7)
+
+#define MT8135_PIN_111_EINT7__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT8135_PIN_111_EINT7__FUNC_EINT7 (MTK_PIN_NO(111) | 1)
+#define MT8135_PIN_111_EINT7__FUNC_PWM7 (MTK_PIN_NO(111) | 2)
+#define MT8135_PIN_111_EINT7__FUNC_CLKM5 (MTK_PIN_NO(111) | 3)
+#define MT8135_PIN_111_EINT7__FUNC_GPU_JTDO (MTK_PIN_NO(111) | 4)
+#define MT8135_PIN_111_EINT7__FUNC_USB_TEST_IO_25 (MTK_PIN_NO(111) | 5)
+#define MT8135_PIN_111_EINT7__FUNC_TESTB_OUT28 (MTK_PIN_NO(111) | 6)
+#define MT8135_PIN_111_EINT7__FUNC_A_FUNC_DIN_26 (MTK_PIN_NO(111) | 7)
+
+#define MT8135_PIN_112_EINT8__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT8135_PIN_112_EINT8__FUNC_EINT8 (MTK_PIN_NO(112) | 1)
+#define MT8135_PIN_112_EINT8__FUNC_DISP_PWM (MTK_PIN_NO(112) | 2)
+#define MT8135_PIN_112_EINT8__FUNC_CLKM6 (MTK_PIN_NO(112) | 3)
+#define MT8135_PIN_112_EINT8__FUNC_GPU_JTDI (MTK_PIN_NO(112) | 4)
+#define MT8135_PIN_112_EINT8__FUNC_USB_TEST_IO_26 (MTK_PIN_NO(112) | 5)
+#define MT8135_PIN_112_EINT8__FUNC_TESTB_OUT29 (MTK_PIN_NO(112) | 6)
+#define MT8135_PIN_112_EINT8__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(112) | 7)
+
+#define MT8135_PIN_113_EINT9__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT8135_PIN_113_EINT9__FUNC_EINT9 (MTK_PIN_NO(113) | 1)
+#define MT8135_PIN_113_EINT9__FUNC_GPU_JTCK (MTK_PIN_NO(113) | 4)
+#define MT8135_PIN_113_EINT9__FUNC_USB_DRVVBUS (MTK_PIN_NO(113) | 5)
+#define MT8135_PIN_113_EINT9__FUNC_TESTB_OUT30 (MTK_PIN_NO(113) | 6)
+#define MT8135_PIN_113_EINT9__FUNC_A_FUNC_DIN_27 (MTK_PIN_NO(113) | 7)
+
+#define MT8135_PIN_114_LPCE1B__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT8135_PIN_114_LPCE1B__FUNC_LPCE1B (MTK_PIN_NO(114) | 1)
+#define MT8135_PIN_114_LPCE1B__FUNC_EINT127 (MTK_PIN_NO(114) | 2)
+#define MT8135_PIN_114_LPCE1B__FUNC_PWM2 (MTK_PIN_NO(114) | 5)
+#define MT8135_PIN_114_LPCE1B__FUNC_TESTB_OUT14 (MTK_PIN_NO(114) | 6)
+#define MT8135_PIN_114_LPCE1B__FUNC_A_FUNC_DIN_28 (MTK_PIN_NO(114) | 7)
+
+#define MT8135_PIN_115_LPCE0B__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT8135_PIN_115_LPCE0B__FUNC_LPCE0B (MTK_PIN_NO(115) | 1)
+#define MT8135_PIN_115_LPCE0B__FUNC_EINT126 (MTK_PIN_NO(115) | 2)
+#define MT8135_PIN_115_LPCE0B__FUNC_PWM1 (MTK_PIN_NO(115) | 5)
+#define MT8135_PIN_115_LPCE0B__FUNC_TESTB_OUT15 (MTK_PIN_NO(115) | 6)
+#define MT8135_PIN_115_LPCE0B__FUNC_A_FUNC_DIN_29 (MTK_PIN_NO(115) | 7)
+
+#define MT8135_PIN_116_DISP_PWM__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT8135_PIN_116_DISP_PWM__FUNC_DISP_PWM (MTK_PIN_NO(116) | 1)
+#define MT8135_PIN_116_DISP_PWM__FUNC_EINT77 (MTK_PIN_NO(116) | 2)
+#define MT8135_PIN_116_DISP_PWM__FUNC_LSDI (MTK_PIN_NO(116) | 3)
+#define MT8135_PIN_116_DISP_PWM__FUNC_PWM1 (MTK_PIN_NO(116) | 4)
+#define MT8135_PIN_116_DISP_PWM__FUNC_PWM2 (MTK_PIN_NO(116) | 5)
+#define MT8135_PIN_116_DISP_PWM__FUNC_PWM3 (MTK_PIN_NO(116) | 7)
+
+#define MT8135_PIN_117_EINT1__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT8135_PIN_117_EINT1__FUNC_EINT1 (MTK_PIN_NO(117) | 1)
+#define MT8135_PIN_117_EINT1__FUNC_PWM2 (MTK_PIN_NO(117) | 2)
+#define MT8135_PIN_117_EINT1__FUNC_CLKM1 (MTK_PIN_NO(117) | 3)
+#define MT8135_PIN_117_EINT1__FUNC_USB_TEST_IO_13 (MTK_PIN_NO(117) | 5)
+#define MT8135_PIN_117_EINT1__FUNC_USB_SDA (MTK_PIN_NO(117) | 7)
+
+#define MT8135_PIN_118_EINT2__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT8135_PIN_118_EINT2__FUNC_EINT2 (MTK_PIN_NO(118) | 1)
+#define MT8135_PIN_118_EINT2__FUNC_PWM3 (MTK_PIN_NO(118) | 2)
+#define MT8135_PIN_118_EINT2__FUNC_CLKM2 (MTK_PIN_NO(118) | 3)
+#define MT8135_PIN_118_EINT2__FUNC_USB_TEST_IO_14 (MTK_PIN_NO(118) | 5)
+#define MT8135_PIN_118_EINT2__FUNC_SRCLKENAI2 (MTK_PIN_NO(118) | 6)
+#define MT8135_PIN_118_EINT2__FUNC_A_FUNC_DIN_30 (MTK_PIN_NO(118) | 7)
+
+#define MT8135_PIN_119_EINT3__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT8135_PIN_119_EINT3__FUNC_EINT3 (MTK_PIN_NO(119) | 1)
+#define MT8135_PIN_119_EINT3__FUNC_USB_TEST_IO_15 (MTK_PIN_NO(119) | 5)
+#define MT8135_PIN_119_EINT3__FUNC_SRCLKENAI1 (MTK_PIN_NO(119) | 6)
+#define MT8135_PIN_119_EINT3__FUNC_EXT_26M_CK (MTK_PIN_NO(119) | 7)
+
+#define MT8135_PIN_120_EINT4__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT8135_PIN_120_EINT4__FUNC_EINT4 (MTK_PIN_NO(120) | 1)
+#define MT8135_PIN_120_EINT4__FUNC_PWM4 (MTK_PIN_NO(120) | 2)
+#define MT8135_PIN_120_EINT4__FUNC_USB_DRVVBUS (MTK_PIN_NO(120) | 5)
+#define MT8135_PIN_120_EINT4__FUNC_A_FUNC_DIN_31 (MTK_PIN_NO(120) | 7)
+
+#define MT8135_PIN_121_DPIDE__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT8135_PIN_121_DPIDE__FUNC_DPI0_DE (MTK_PIN_NO(121) | 1)
+#define MT8135_PIN_121_DPIDE__FUNC_EINT100 (MTK_PIN_NO(121) | 2)
+#define MT8135_PIN_121_DPIDE__FUNC_I2SOUT_DAT (MTK_PIN_NO(121) | 3)
+#define MT8135_PIN_121_DPIDE__FUNC_DAC_DAT_OUT (MTK_PIN_NO(121) | 4)
+#define MT8135_PIN_121_DPIDE__FUNC_PCM1_DO (MTK_PIN_NO(121) | 5)
+#define MT8135_PIN_121_DPIDE__FUNC_IRDA_TXD (MTK_PIN_NO(121) | 6)
+
+#define MT8135_PIN_122_DPICK__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT8135_PIN_122_DPICK__FUNC_DPI0_CK (MTK_PIN_NO(122) | 1)
+#define MT8135_PIN_122_DPICK__FUNC_EINT101 (MTK_PIN_NO(122) | 2)
+#define MT8135_PIN_122_DPICK__FUNC_I2SIN_DAT (MTK_PIN_NO(122) | 3)
+#define MT8135_PIN_122_DPICK__FUNC_PCM1_DI (MTK_PIN_NO(122) | 5)
+#define MT8135_PIN_122_DPICK__FUNC_IRDA_PDN (MTK_PIN_NO(122) | 6)
+
+#define MT8135_PIN_123_DPIG4__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT8135_PIN_123_DPIG4__FUNC_DPI0_G4 (MTK_PIN_NO(123) | 1)
+#define MT8135_PIN_123_DPIG4__FUNC_EINT114 (MTK_PIN_NO(123) | 2)
+#define MT8135_PIN_123_DPIG4__FUNC_CM2DAT_2X_0 (MTK_PIN_NO(123) | 4)
+#define MT8135_PIN_123_DPIG4__FUNC_DSP2_ID (MTK_PIN_NO(123) | 5)
+
+#define MT8135_PIN_124_DPIG5__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT8135_PIN_124_DPIG5__FUNC_DPI0_G5 (MTK_PIN_NO(124) | 1)
+#define MT8135_PIN_124_DPIG5__FUNC_EINT115 (MTK_PIN_NO(124) | 2)
+#define MT8135_PIN_124_DPIG5__FUNC_CM2DAT_2X_1 (MTK_PIN_NO(124) | 4)
+#define MT8135_PIN_124_DPIG5__FUNC_DSP2_ICK (MTK_PIN_NO(124) | 5)
+
+#define MT8135_PIN_125_DPIR3__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT8135_PIN_125_DPIR3__FUNC_DPI0_R3 (MTK_PIN_NO(125) | 1)
+#define MT8135_PIN_125_DPIR3__FUNC_EINT121 (MTK_PIN_NO(125) | 2)
+#define MT8135_PIN_125_DPIR3__FUNC_CM2DAT_2X_7 (MTK_PIN_NO(125) | 4)
+
+#define MT8135_PIN_126_DPIG1__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT8135_PIN_126_DPIG1__FUNC_DPI0_G1 (MTK_PIN_NO(126) | 1)
+#define MT8135_PIN_126_DPIG1__FUNC_EINT111 (MTK_PIN_NO(126) | 2)
+#define MT8135_PIN_126_DPIG1__FUNC_DSP1_ICK (MTK_PIN_NO(126) | 5)
+
+#define MT8135_PIN_127_DPIVSYNC__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_DPI0_VSYNC (MTK_PIN_NO(127) | 1)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_EINT98 (MTK_PIN_NO(127) | 2)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_I2SIN_CK (MTK_PIN_NO(127) | 3)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_DAC_CK (MTK_PIN_NO(127) | 4)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_PCM1_CK (MTK_PIN_NO(127) | 5)
+
+#define MT8135_PIN_128_DPIHSYNC__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_DPI0_HSYNC (MTK_PIN_NO(128) | 1)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_EINT99 (MTK_PIN_NO(128) | 2)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_I2SIN_WS (MTK_PIN_NO(128) | 3)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_DAC_WS (MTK_PIN_NO(128) | 4)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_PCM1_WS (MTK_PIN_NO(128) | 5)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_IRDA_RXD (MTK_PIN_NO(128) | 6)
+
+#define MT8135_PIN_129_DPIB0__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define MT8135_PIN_129_DPIB0__FUNC_DPI0_B0 (MTK_PIN_NO(129) | 1)
+#define MT8135_PIN_129_DPIB0__FUNC_EINT102 (MTK_PIN_NO(129) | 2)
+#define MT8135_PIN_129_DPIB0__FUNC_SCL0 (MTK_PIN_NO(129) | 4)
+#define MT8135_PIN_129_DPIB0__FUNC_DISP_PWM (MTK_PIN_NO(129) | 5)
+
+#define MT8135_PIN_130_DPIB1__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define MT8135_PIN_130_DPIB1__FUNC_DPI0_B1 (MTK_PIN_NO(130) | 1)
+#define MT8135_PIN_130_DPIB1__FUNC_EINT103 (MTK_PIN_NO(130) | 2)
+#define MT8135_PIN_130_DPIB1__FUNC_CLKM0 (MTK_PIN_NO(130) | 3)
+#define MT8135_PIN_130_DPIB1__FUNC_SDA0 (MTK_PIN_NO(130) | 4)
+#define MT8135_PIN_130_DPIB1__FUNC_PWM1 (MTK_PIN_NO(130) | 5)
+
+#define MT8135_PIN_131_DPIB2__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define MT8135_PIN_131_DPIB2__FUNC_DPI0_B2 (MTK_PIN_NO(131) | 1)
+#define MT8135_PIN_131_DPIB2__FUNC_EINT104 (MTK_PIN_NO(131) | 2)
+#define MT8135_PIN_131_DPIB2__FUNC_CLKM1 (MTK_PIN_NO(131) | 3)
+#define MT8135_PIN_131_DPIB2__FUNC_SCL1 (MTK_PIN_NO(131) | 4)
+#define MT8135_PIN_131_DPIB2__FUNC_PWM2 (MTK_PIN_NO(131) | 5)
+
+#define MT8135_PIN_132_DPIB3__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define MT8135_PIN_132_DPIB3__FUNC_DPI0_B3 (MTK_PIN_NO(132) | 1)
+#define MT8135_PIN_132_DPIB3__FUNC_EINT105 (MTK_PIN_NO(132) | 2)
+#define MT8135_PIN_132_DPIB3__FUNC_CLKM2 (MTK_PIN_NO(132) | 3)
+#define MT8135_PIN_132_DPIB3__FUNC_SDA1 (MTK_PIN_NO(132) | 4)
+#define MT8135_PIN_132_DPIB3__FUNC_PWM3 (MTK_PIN_NO(132) | 5)
+
+#define MT8135_PIN_133_DPIB4__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define MT8135_PIN_133_DPIB4__FUNC_DPI0_B4 (MTK_PIN_NO(133) | 1)
+#define MT8135_PIN_133_DPIB4__FUNC_EINT106 (MTK_PIN_NO(133) | 2)
+#define MT8135_PIN_133_DPIB4__FUNC_CLKM3 (MTK_PIN_NO(133) | 3)
+#define MT8135_PIN_133_DPIB4__FUNC_SCL2 (MTK_PIN_NO(133) | 4)
+#define MT8135_PIN_133_DPIB4__FUNC_PWM4 (MTK_PIN_NO(133) | 5)
+
+#define MT8135_PIN_134_DPIB5__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define MT8135_PIN_134_DPIB5__FUNC_DPI0_B5 (MTK_PIN_NO(134) | 1)
+#define MT8135_PIN_134_DPIB5__FUNC_EINT107 (MTK_PIN_NO(134) | 2)
+#define MT8135_PIN_134_DPIB5__FUNC_CLKM4 (MTK_PIN_NO(134) | 3)
+#define MT8135_PIN_134_DPIB5__FUNC_SDA2 (MTK_PIN_NO(134) | 4)
+#define MT8135_PIN_134_DPIB5__FUNC_PWM5 (MTK_PIN_NO(134) | 5)
+
+#define MT8135_PIN_135_DPIB6__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define MT8135_PIN_135_DPIB6__FUNC_DPI0_B6 (MTK_PIN_NO(135) | 1)
+#define MT8135_PIN_135_DPIB6__FUNC_EINT108 (MTK_PIN_NO(135) | 2)
+#define MT8135_PIN_135_DPIB6__FUNC_CLKM5 (MTK_PIN_NO(135) | 3)
+#define MT8135_PIN_135_DPIB6__FUNC_SCL3 (MTK_PIN_NO(135) | 4)
+#define MT8135_PIN_135_DPIB6__FUNC_PWM6 (MTK_PIN_NO(135) | 5)
+
+#define MT8135_PIN_136_DPIB7__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define MT8135_PIN_136_DPIB7__FUNC_DPI0_B7 (MTK_PIN_NO(136) | 1)
+#define MT8135_PIN_136_DPIB7__FUNC_EINT109 (MTK_PIN_NO(136) | 2)
+#define MT8135_PIN_136_DPIB7__FUNC_CLKM6 (MTK_PIN_NO(136) | 3)
+#define MT8135_PIN_136_DPIB7__FUNC_SDA3 (MTK_PIN_NO(136) | 4)
+#define MT8135_PIN_136_DPIB7__FUNC_PWM7 (MTK_PIN_NO(136) | 5)
+
+#define MT8135_PIN_137_DPIG0__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define MT8135_PIN_137_DPIG0__FUNC_DPI0_G0 (MTK_PIN_NO(137) | 1)
+#define MT8135_PIN_137_DPIG0__FUNC_EINT110 (MTK_PIN_NO(137) | 2)
+#define MT8135_PIN_137_DPIG0__FUNC_DSP1_ID (MTK_PIN_NO(137) | 5)
+
+#define MT8135_PIN_138_DPIG2__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define MT8135_PIN_138_DPIG2__FUNC_DPI0_G2 (MTK_PIN_NO(138) | 1)
+#define MT8135_PIN_138_DPIG2__FUNC_EINT112 (MTK_PIN_NO(138) | 2)
+#define MT8135_PIN_138_DPIG2__FUNC_DSP1_IMS (MTK_PIN_NO(138) | 5)
+
+#define MT8135_PIN_139_DPIG3__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define MT8135_PIN_139_DPIG3__FUNC_DPI0_G3 (MTK_PIN_NO(139) | 1)
+#define MT8135_PIN_139_DPIG3__FUNC_EINT113 (MTK_PIN_NO(139) | 2)
+#define MT8135_PIN_139_DPIG3__FUNC_DSP2_IMS (MTK_PIN_NO(139) | 5)
+
+#define MT8135_PIN_140_DPIG6__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define MT8135_PIN_140_DPIG6__FUNC_DPI0_G6 (MTK_PIN_NO(140) | 1)
+#define MT8135_PIN_140_DPIG6__FUNC_EINT116 (MTK_PIN_NO(140) | 2)
+#define MT8135_PIN_140_DPIG6__FUNC_CM2DAT_2X_2 (MTK_PIN_NO(140) | 4)
+
+#define MT8135_PIN_141_DPIG7__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define MT8135_PIN_141_DPIG7__FUNC_DPI0_G7 (MTK_PIN_NO(141) | 1)
+#define MT8135_PIN_141_DPIG7__FUNC_EINT117 (MTK_PIN_NO(141) | 2)
+#define MT8135_PIN_141_DPIG7__FUNC_CM2DAT_2X_3 (MTK_PIN_NO(141) | 4)
+
+#define MT8135_PIN_142_DPIR0__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define MT8135_PIN_142_DPIR0__FUNC_DPI0_R0 (MTK_PIN_NO(142) | 1)
+#define MT8135_PIN_142_DPIR0__FUNC_EINT118 (MTK_PIN_NO(142) | 2)
+#define MT8135_PIN_142_DPIR0__FUNC_CM2DAT_2X_4 (MTK_PIN_NO(142) | 4)
+
+#define MT8135_PIN_143_DPIR1__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define MT8135_PIN_143_DPIR1__FUNC_DPI0_R1 (MTK_PIN_NO(143) | 1)
+#define MT8135_PIN_143_DPIR1__FUNC_EINT119 (MTK_PIN_NO(143) | 2)
+#define MT8135_PIN_143_DPIR1__FUNC_CM2DAT_2X_5 (MTK_PIN_NO(143) | 4)
+
+#define MT8135_PIN_144_DPIR2__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define MT8135_PIN_144_DPIR2__FUNC_DPI0_R2 (MTK_PIN_NO(144) | 1)
+#define MT8135_PIN_144_DPIR2__FUNC_EINT120 (MTK_PIN_NO(144) | 2)
+#define MT8135_PIN_144_DPIR2__FUNC_CM2DAT_2X_6 (MTK_PIN_NO(144) | 4)
+
+#define MT8135_PIN_145_DPIR4__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define MT8135_PIN_145_DPIR4__FUNC_DPI0_R4 (MTK_PIN_NO(145) | 1)
+#define MT8135_PIN_145_DPIR4__FUNC_EINT122 (MTK_PIN_NO(145) | 2)
+#define MT8135_PIN_145_DPIR4__FUNC_CM2DAT_2X_8 (MTK_PIN_NO(145) | 4)
+
+#define MT8135_PIN_146_DPIR5__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define MT8135_PIN_146_DPIR5__FUNC_DPI0_R5 (MTK_PIN_NO(146) | 1)
+#define MT8135_PIN_146_DPIR5__FUNC_EINT123 (MTK_PIN_NO(146) | 2)
+#define MT8135_PIN_146_DPIR5__FUNC_CM2DAT_2X_9 (MTK_PIN_NO(146) | 4)
+
+#define MT8135_PIN_147_DPIR6__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define MT8135_PIN_147_DPIR6__FUNC_DPI0_R6 (MTK_PIN_NO(147) | 1)
+#define MT8135_PIN_147_DPIR6__FUNC_EINT124 (MTK_PIN_NO(147) | 2)
+#define MT8135_PIN_147_DPIR6__FUNC_CM2VSYNC_2X (MTK_PIN_NO(147) | 4)
+
+#define MT8135_PIN_148_DPIR7__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define MT8135_PIN_148_DPIR7__FUNC_DPI0_R7 (MTK_PIN_NO(148) | 1)
+#define MT8135_PIN_148_DPIR7__FUNC_EINT125 (MTK_PIN_NO(148) | 2)
+#define MT8135_PIN_148_DPIR7__FUNC_CM2HSYNC_2X (MTK_PIN_NO(148) | 4)
+
+#define MT8135_PIN_149_TDN3__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define MT8135_PIN_149_TDN3__FUNC_EINT36 (MTK_PIN_NO(149) | 2)
+
+#define MT8135_PIN_150_TDP3__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define MT8135_PIN_150_TDP3__FUNC_EINT35 (MTK_PIN_NO(150) | 2)
+
+#define MT8135_PIN_151_TDN2__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define MT8135_PIN_151_TDN2__FUNC_EINT169 (MTK_PIN_NO(151) | 2)
+
+#define MT8135_PIN_152_TDP2__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define MT8135_PIN_152_TDP2__FUNC_EINT168 (MTK_PIN_NO(152) | 2)
+
+#define MT8135_PIN_153_TCN__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define MT8135_PIN_153_TCN__FUNC_EINT163 (MTK_PIN_NO(153) | 2)
+
+#define MT8135_PIN_154_TCP__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define MT8135_PIN_154_TCP__FUNC_EINT162 (MTK_PIN_NO(154) | 2)
+
+#define MT8135_PIN_155_TDN1__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define MT8135_PIN_155_TDN1__FUNC_EINT167 (MTK_PIN_NO(155) | 2)
+
+#define MT8135_PIN_156_TDP1__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define MT8135_PIN_156_TDP1__FUNC_EINT166 (MTK_PIN_NO(156) | 2)
+
+#define MT8135_PIN_157_TDN0__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define MT8135_PIN_157_TDN0__FUNC_EINT165 (MTK_PIN_NO(157) | 2)
+
+#define MT8135_PIN_158_TDP0__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define MT8135_PIN_158_TDP0__FUNC_EINT164 (MTK_PIN_NO(158) | 2)
+
+#define MT8135_PIN_159_RDN3__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define MT8135_PIN_159_RDN3__FUNC_EINT18 (MTK_PIN_NO(159) | 2)
+
+#define MT8135_PIN_160_RDP3__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define MT8135_PIN_160_RDP3__FUNC_EINT30 (MTK_PIN_NO(160) | 2)
+
+#define MT8135_PIN_161_RDN2__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define MT8135_PIN_161_RDN2__FUNC_EINT31 (MTK_PIN_NO(161) | 2)
+
+#define MT8135_PIN_162_RDP2__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define MT8135_PIN_162_RDP2__FUNC_EINT32 (MTK_PIN_NO(162) | 2)
+
+#define MT8135_PIN_163_RCN__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define MT8135_PIN_163_RCN__FUNC_EINT33 (MTK_PIN_NO(163) | 2)
+
+#define MT8135_PIN_164_RCP__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define MT8135_PIN_164_RCP__FUNC_EINT39 (MTK_PIN_NO(164) | 2)
+
+#define MT8135_PIN_165_RDN1__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+
+#define MT8135_PIN_166_RDP1__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+
+#define MT8135_PIN_167_RDN0__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+
+#define MT8135_PIN_168_RDP0__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+
+#define MT8135_PIN_169_RDN1_A__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define MT8135_PIN_169_RDN1_A__FUNC_CMDAT6 (MTK_PIN_NO(169) | 1)
+#define MT8135_PIN_169_RDN1_A__FUNC_EINT175 (MTK_PIN_NO(169) | 2)
+
+#define MT8135_PIN_170_RDP1_A__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define MT8135_PIN_170_RDP1_A__FUNC_CMDAT7 (MTK_PIN_NO(170) | 1)
+#define MT8135_PIN_170_RDP1_A__FUNC_EINT174 (MTK_PIN_NO(170) | 2)
+
+#define MT8135_PIN_171_RCN_A__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define MT8135_PIN_171_RCN_A__FUNC_CMDAT8 (MTK_PIN_NO(171) | 1)
+#define MT8135_PIN_171_RCN_A__FUNC_EINT171 (MTK_PIN_NO(171) | 2)
+
+#define MT8135_PIN_172_RCP_A__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define MT8135_PIN_172_RCP_A__FUNC_CMDAT9 (MTK_PIN_NO(172) | 1)
+#define MT8135_PIN_172_RCP_A__FUNC_EINT170 (MTK_PIN_NO(172) | 2)
+
+#define MT8135_PIN_173_RDN0_A__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define MT8135_PIN_173_RDN0_A__FUNC_CMHSYNC (MTK_PIN_NO(173) | 1)
+#define MT8135_PIN_173_RDN0_A__FUNC_EINT173 (MTK_PIN_NO(173) | 2)
+
+#define MT8135_PIN_174_RDP0_A__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define MT8135_PIN_174_RDP0_A__FUNC_CMVSYNC (MTK_PIN_NO(174) | 1)
+#define MT8135_PIN_174_RDP0_A__FUNC_EINT172 (MTK_PIN_NO(174) | 2)
+
+#define MT8135_PIN_175_RDN1_B__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define MT8135_PIN_175_RDN1_B__FUNC_CMDAT2 (MTK_PIN_NO(175) | 1)
+#define MT8135_PIN_175_RDN1_B__FUNC_EINT181 (MTK_PIN_NO(175) | 2)
+#define MT8135_PIN_175_RDN1_B__FUNC_CMCSD2 (MTK_PIN_NO(175) | 3)
+
+#define MT8135_PIN_176_RDP1_B__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define MT8135_PIN_176_RDP1_B__FUNC_CMDAT3 (MTK_PIN_NO(176) | 1)
+#define MT8135_PIN_176_RDP1_B__FUNC_EINT180 (MTK_PIN_NO(176) | 2)
+#define MT8135_PIN_176_RDP1_B__FUNC_CMCSD3 (MTK_PIN_NO(176) | 3)
+
+#define MT8135_PIN_177_RCN_B__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define MT8135_PIN_177_RCN_B__FUNC_CMDAT4 (MTK_PIN_NO(177) | 1)
+#define MT8135_PIN_177_RCN_B__FUNC_EINT177 (MTK_PIN_NO(177) | 2)
+
+#define MT8135_PIN_178_RCP_B__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define MT8135_PIN_178_RCP_B__FUNC_CMDAT5 (MTK_PIN_NO(178) | 1)
+#define MT8135_PIN_178_RCP_B__FUNC_EINT176 (MTK_PIN_NO(178) | 2)
+
+#define MT8135_PIN_179_RDN0_B__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define MT8135_PIN_179_RDN0_B__FUNC_CMDAT0 (MTK_PIN_NO(179) | 1)
+#define MT8135_PIN_179_RDN0_B__FUNC_EINT179 (MTK_PIN_NO(179) | 2)
+#define MT8135_PIN_179_RDN0_B__FUNC_CMCSD0 (MTK_PIN_NO(179) | 3)
+
+#define MT8135_PIN_180_RDP0_B__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define MT8135_PIN_180_RDP0_B__FUNC_CMDAT1 (MTK_PIN_NO(180) | 1)
+#define MT8135_PIN_180_RDP0_B__FUNC_EINT178 (MTK_PIN_NO(180) | 2)
+#define MT8135_PIN_180_RDP0_B__FUNC_CMCSD1 (MTK_PIN_NO(180) | 3)
+
+#define MT8135_PIN_181_CMPCLK__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define MT8135_PIN_181_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(181) | 1)
+#define MT8135_PIN_181_CMPCLK__FUNC_EINT182 (MTK_PIN_NO(181) | 2)
+#define MT8135_PIN_181_CMPCLK__FUNC_CMCSK (MTK_PIN_NO(181) | 3)
+#define MT8135_PIN_181_CMPCLK__FUNC_CM2MCLK_4X (MTK_PIN_NO(181) | 4)
+#define MT8135_PIN_181_CMPCLK__FUNC_TS_AUXADC_SEL_3 (MTK_PIN_NO(181) | 5)
+#define MT8135_PIN_181_CMPCLK__FUNC_VENC_TEST_CK (MTK_PIN_NO(181) | 6)
+#define MT8135_PIN_181_CMPCLK__FUNC_TESTA_OUT27 (MTK_PIN_NO(181) | 7)
+
+#define MT8135_PIN_182_CMMCLK__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define MT8135_PIN_182_CMMCLK__FUNC_CMMCLK (MTK_PIN_NO(182) | 1)
+#define MT8135_PIN_182_CMMCLK__FUNC_EINT183 (MTK_PIN_NO(182) | 2)
+#define MT8135_PIN_182_CMMCLK__FUNC_TS_AUXADC_SEL_2 (MTK_PIN_NO(182) | 5)
+#define MT8135_PIN_182_CMMCLK__FUNC_TESTA_OUT28 (MTK_PIN_NO(182) | 7)
+
+#define MT8135_PIN_183_CMRST__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define MT8135_PIN_183_CMRST__FUNC_CMRST (MTK_PIN_NO(183) | 1)
+#define MT8135_PIN_183_CMRST__FUNC_EINT185 (MTK_PIN_NO(183) | 2)
+#define MT8135_PIN_183_CMRST__FUNC_TS_AUXADC_SEL_1 (MTK_PIN_NO(183) | 5)
+#define MT8135_PIN_183_CMRST__FUNC_TESTA_OUT30 (MTK_PIN_NO(183) | 7)
+
+#define MT8135_PIN_184_CMPDN__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define MT8135_PIN_184_CMPDN__FUNC_CMPDN (MTK_PIN_NO(184) | 1)
+#define MT8135_PIN_184_CMPDN__FUNC_EINT184 (MTK_PIN_NO(184) | 2)
+#define MT8135_PIN_184_CMPDN__FUNC_TS_AUXADC_SEL_0 (MTK_PIN_NO(184) | 5)
+#define MT8135_PIN_184_CMPDN__FUNC_TESTA_OUT29 (MTK_PIN_NO(184) | 7)
+
+#define MT8135_PIN_185_CMFLASH__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define MT8135_PIN_185_CMFLASH__FUNC_CMFLASH (MTK_PIN_NO(185) | 1)
+#define MT8135_PIN_185_CMFLASH__FUNC_EINT186 (MTK_PIN_NO(185) | 2)
+#define MT8135_PIN_185_CMFLASH__FUNC_CM2MCLK_3X (MTK_PIN_NO(185) | 3)
+#define MT8135_PIN_185_CMFLASH__FUNC_MFG_TEST_CK_1 (MTK_PIN_NO(185) | 6)
+#define MT8135_PIN_185_CMFLASH__FUNC_TESTA_OUT31 (MTK_PIN_NO(185) | 7)
+
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_MRG_I2S_P_CLK (MTK_PIN_NO(186) | 1)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_EINT14 (MTK_PIN_NO(186) | 2)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_I2SIN_CK (MTK_PIN_NO(186) | 3)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_PCM0_CK (MTK_PIN_NO(186) | 4)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_DSP2_ICK (MTK_PIN_NO(186) | 5)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_IMG_TEST_CK (MTK_PIN_NO(186) | 6)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_USB_SCL (MTK_PIN_NO(186) | 7)
+
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_MRG_I2S_SYNC (MTK_PIN_NO(187) | 1)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_EINT16 (MTK_PIN_NO(187) | 2)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_I2SIN_WS (MTK_PIN_NO(187) | 3)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_PCM0_WS (MTK_PIN_NO(187) | 4)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_DISP_TEST_CK (MTK_PIN_NO(187) | 6)
+
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_MRG_I2S_PCM_RX (MTK_PIN_NO(188) | 1)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_EINT15 (MTK_PIN_NO(188) | 2)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_I2SIN_DAT (MTK_PIN_NO(188) | 3)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_PCM0_DI (MTK_PIN_NO(188) | 4)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_DSP2_ID (MTK_PIN_NO(188) | 5)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_MFG_TEST_CK (MTK_PIN_NO(188) | 6)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_USB_SDA (MTK_PIN_NO(188) | 7)
+
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_MRG_I2S_PCM_TX (MTK_PIN_NO(189) | 1)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_EINT17 (MTK_PIN_NO(189) | 2)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_I2SOUT_DAT (MTK_PIN_NO(189) | 3)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_PCM0_DO (MTK_PIN_NO(189) | 4)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_VDEC_TEST_CK (MTK_PIN_NO(189) | 6)
+
+#define MT8135_PIN_190_SRCLKENAI__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define MT8135_PIN_190_SRCLKENAI__FUNC_SRCLKENAI (MTK_PIN_NO(190) | 1)
+
+#define MT8135_PIN_191_URXD3__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define MT8135_PIN_191_URXD3__FUNC_URXD3 (MTK_PIN_NO(191) | 1)
+#define MT8135_PIN_191_URXD3__FUNC_EINT87 (MTK_PIN_NO(191) | 2)
+#define MT8135_PIN_191_URXD3__FUNC_UTXD3 (MTK_PIN_NO(191) | 3)
+#define MT8135_PIN_191_URXD3__FUNC_TS_AUX_ST (MTK_PIN_NO(191) | 5)
+#define MT8135_PIN_191_URXD3__FUNC_PWM4 (MTK_PIN_NO(191) | 6)
+
+#define MT8135_PIN_192_UTXD3__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define MT8135_PIN_192_UTXD3__FUNC_UTXD3 (MTK_PIN_NO(192) | 1)
+#define MT8135_PIN_192_UTXD3__FUNC_EINT86 (MTK_PIN_NO(192) | 2)
+#define MT8135_PIN_192_UTXD3__FUNC_URXD3 (MTK_PIN_NO(192) | 3)
+#define MT8135_PIN_192_UTXD3__FUNC_TS_AUX_CS_B (MTK_PIN_NO(192) | 5)
+#define MT8135_PIN_192_UTXD3__FUNC_PWM3 (MTK_PIN_NO(192) | 6)
+
+#define MT8135_PIN_193_SDA2__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define MT8135_PIN_193_SDA2__FUNC_SDA2 (MTK_PIN_NO(193) | 1)
+#define MT8135_PIN_193_SDA2__FUNC_EINT95 (MTK_PIN_NO(193) | 2)
+#define MT8135_PIN_193_SDA2__FUNC_CLKM5 (MTK_PIN_NO(193) | 3)
+#define MT8135_PIN_193_SDA2__FUNC_PWM5 (MTK_PIN_NO(193) | 4)
+#define MT8135_PIN_193_SDA2__FUNC_TS_AUX_PWDB (MTK_PIN_NO(193) | 5)
+
+#define MT8135_PIN_194_SCL2__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define MT8135_PIN_194_SCL2__FUNC_SCL2 (MTK_PIN_NO(194) | 1)
+#define MT8135_PIN_194_SCL2__FUNC_EINT94 (MTK_PIN_NO(194) | 2)
+#define MT8135_PIN_194_SCL2__FUNC_CLKM4 (MTK_PIN_NO(194) | 3)
+#define MT8135_PIN_194_SCL2__FUNC_PWM4 (MTK_PIN_NO(194) | 4)
+#define MT8135_PIN_194_SCL2__FUNC_TS_AUXADC_TEST_CK (MTK_PIN_NO(194) | 5)
+
+#define MT8135_PIN_195_SDA1__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define MT8135_PIN_195_SDA1__FUNC_SDA1 (MTK_PIN_NO(195) | 1)
+#define MT8135_PIN_195_SDA1__FUNC_EINT93 (MTK_PIN_NO(195) | 2)
+#define MT8135_PIN_195_SDA1__FUNC_CLKM3 (MTK_PIN_NO(195) | 3)
+#define MT8135_PIN_195_SDA1__FUNC_PWM3 (MTK_PIN_NO(195) | 4)
+#define MT8135_PIN_195_SDA1__FUNC_TS_AUX_SCLK_PWDB (MTK_PIN_NO(195) | 5)
+
+#define MT8135_PIN_196_SCL1__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define MT8135_PIN_196_SCL1__FUNC_SCL1 (MTK_PIN_NO(196) | 1)
+#define MT8135_PIN_196_SCL1__FUNC_EINT92 (MTK_PIN_NO(196) | 2)
+#define MT8135_PIN_196_SCL1__FUNC_CLKM2 (MTK_PIN_NO(196) | 3)
+#define MT8135_PIN_196_SCL1__FUNC_PWM2 (MTK_PIN_NO(196) | 4)
+#define MT8135_PIN_196_SCL1__FUNC_TS_AUX_DIN (MTK_PIN_NO(196) | 5)
+
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_MSDC3_DAT2 (MTK_PIN_NO(197) | 1)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_EINT71 (MTK_PIN_NO(197) | 2)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_SCL6 (MTK_PIN_NO(197) | 3)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_PWM5 (MTK_PIN_NO(197) | 4)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_CLKM4 (MTK_PIN_NO(197) | 5)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_MFG_TEST_CK_2 (MTK_PIN_NO(197) | 6)
+
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_MSDC3_DAT3 (MTK_PIN_NO(198) | 1)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_EINT72 (MTK_PIN_NO(198) | 2)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_SDA6 (MTK_PIN_NO(198) | 3)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_PWM6 (MTK_PIN_NO(198) | 4)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_CLKM5 (MTK_PIN_NO(198) | 5)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_MFG_TEST_CK_3 (MTK_PIN_NO(198) | 6)
+
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_MSDC3_CMD (MTK_PIN_NO(199) | 1)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_EINT68 (MTK_PIN_NO(199) | 2)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_SDA2 (MTK_PIN_NO(199) | 3)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_PWM2 (MTK_PIN_NO(199) | 4)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_CLKM1 (MTK_PIN_NO(199) | 5)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_MFG_TEST_CK_4 (MTK_PIN_NO(199) | 6)
+
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_MSDC3_CLK (MTK_PIN_NO(200) | 1)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_EINT67 (MTK_PIN_NO(200) | 2)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_SCL2 (MTK_PIN_NO(200) | 3)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_PWM1 (MTK_PIN_NO(200) | 4)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_CLKM0 (MTK_PIN_NO(200) | 5)
+
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_MSDC3_DAT1 (MTK_PIN_NO(201) | 1)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_EINT70 (MTK_PIN_NO(201) | 2)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_SDA3 (MTK_PIN_NO(201) | 3)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_PWM4 (MTK_PIN_NO(201) | 4)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_CLKM3 (MTK_PIN_NO(201) | 5)
+
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_MSDC3_DAT0 (MTK_PIN_NO(202) | 1)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_EINT69 (MTK_PIN_NO(202) | 2)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_SCL3 (MTK_PIN_NO(202) | 3)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_PWM3 (MTK_PIN_NO(202) | 4)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_CLKM2 (MTK_PIN_NO(202) | 5)
+
+#endif /* __DTS_MT8135_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8183-pinfunc.h b/include/dt-bindings/pinctrl/mt8183-pinfunc.h
new file mode 100644
index 000000000000..6221cd712718
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8183-pinfunc.h
@@ -0,0 +1,1120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __MT8183_PINFUNC_H
+#define __MT8183_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_MRG_SYNC (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_PCM0_SYNC (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_SRCLKENAI0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCP_SPI2_CS (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_I2S3_MCK (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_SPI2_CSB (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_MRG_CLK (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_PCM0_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_CLKM3 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SCP_SPI2_MO (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_I2S3_BCK (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_SPI2_MO (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_MRG_DO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_PCM0_DO (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_SCL6 (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_SCP_SPI2_CK (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_I2S3_LRCK (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_SPI2_CLK (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_MRG_DI (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_PCM0_DI (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_SDA6 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_TDM_MCK (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_I2S3_DO (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_PWM_B (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S0_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_MD_URXD1 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_TDM_BCK (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_DAP_MD32_SWD (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_PWM_C (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S0_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SSPM_URXD_AO (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_MD_UTXD1 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_TDM_LRCK (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_PWM_A (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S0_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_IDDIG (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_MD_URXD0 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TDM_DATA0 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_CMFLASH (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI1_B_MI (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S0_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_USB_DRVVBUS (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_MD_UTXD0 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TDM_DATA1 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SPI1_B_CSB (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_ANT_SEL3 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SCL7 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_TDM_DATA2 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_MD_INT0 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_SPI1_B_MO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_ANT_SEL4 (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_CMMCLK2 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_DBG_MON_B10 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_SPI1_B_CLK (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_ANT_SEL5 (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_CMMCLK3 (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_TDM_DATA3 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_DBG_MON_B11 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_TP_URXD1_AO (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_IDDIG (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_SCL6 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_UCTS1 (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_UCTS0 (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_SRCLKENAI1 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_I2S5_MCK (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_TP_UTXD1_AO (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_USB_DRVVBUS (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_SDA6 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_URTS1 (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_URTS0 (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_I2S2_DI2 (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_I2S5_BCK (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_DBPI_D0 (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI5_MI (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_PCM0_SYNC (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_MD_URXD0 (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_ANT_SEL3 (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_I2S0_MCK (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_DBG_MON_B15 (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_DBPI_D1 (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_SPI5_CSB (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_PCM0_CLK (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_MD_UTXD0 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_ANT_SEL4 (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I2S0_BCK (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_DBG_MON_B16 (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_DBPI_D2 (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_SPI5_MO (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_PCM0_DO (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_MD_URXD1 (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_ANT_SEL5 (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_I2S0_LRCK (MTK_PIN_NO(15) | 6)
+#define PINMUX_GPIO15__FUNC_DBG_MON_B17 (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_DBPI_D3 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_SPI5_CLK (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_PCM0_DI (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_MD_UTXD1 (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_ANT_SEL6 (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_I2S0_DI (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_DBG_MON_B23 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_DBPI_D4 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SPI4_MI (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_MD_INT0 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_ANT_SEL7 (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_I2S3_MCK (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_DBPI_D5 (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SPI4_CSB (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_MD_INT0 (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_I2S3_BCK (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_DBG_MON_A2 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_DBPI_D6 (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SPI4_MO (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_CONN_MCU_TDO (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_URXD1 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_I2S3_LRCK (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_DBG_MON_A3 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_DBPI_D7 (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SPI4_CLK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_UTXD1 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_I2S3_DO (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_DBG_MON_A19 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_DBPI_D8 (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_SPI3_MI (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_CONN_MCU_TMS (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_DAP_MD32_SWD (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_I2S2_MCK (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_B5 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_DBPI_D9 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_SPI3_CSB (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_CONN_MCU_TCK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_I2S2_BCK (MTK_PIN_NO(22) | 6)
+#define PINMUX_GPIO22__FUNC_DBG_MON_B6 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_DBPI_D10 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_SPI3_MO (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_CONN_MCU_TDI (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_UCTS1 (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_I2S2_LRCK (MTK_PIN_NO(23) | 6)
+#define PINMUX_GPIO23__FUNC_DBG_MON_B7 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_DBPI_D11 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_SPI3_CLK (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_SRCLKENAI0 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_URTS1 (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_IO_JTAG_TCK (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_I2S2_DI (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_DBG_MON_B31 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_DBPI_HSYNC (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_ANT_SEL0 (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SCL6 (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_KPCOL2 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_IO_JTAG_TMS (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_I2S1_MCK (MTK_PIN_NO(25) | 6)
+#define PINMUX_GPIO25__FUNC_DBG_MON_B0 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_DBPI_VSYNC (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_ANT_SEL1 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SDA6 (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_KPROW2 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_IO_JTAG_TDI (MTK_PIN_NO(26) | 5)
+#define PINMUX_GPIO26__FUNC_I2S1_BCK (MTK_PIN_NO(26) | 6)
+#define PINMUX_GPIO26__FUNC_DBG_MON_B1 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_DBPI_DE (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_ANT_SEL2 (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_SCL7 (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_DMIC_CLK (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_IO_JTAG_TDO (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_I2S1_LRCK (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_DBG_MON_B9 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_DBPI_CK (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SDA7 (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_DMIC_DAT (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_I2S1_DO (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_DBG_MON_B32 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_MSDC1_CLK (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_IO_JTAG_TCK (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_UDI_TCK (MTK_PIN_NO(29) | 3)
+#define PINMUX_GPIO29__FUNC_CONN_DSP_JCK (MTK_PIN_NO(29) | 4)
+#define PINMUX_GPIO29__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(29) | 5)
+#define PINMUX_GPIO29__FUNC_PCM1_CLK (MTK_PIN_NO(29) | 6)
+#define PINMUX_GPIO29__FUNC_DBG_MON_A6 (MTK_PIN_NO(29) | 7)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_MSDC1_DAT3 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_DAP_MD32_SWD (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_PCM1_DI (MTK_PIN_NO(30) | 6)
+#define PINMUX_GPIO30__FUNC_DBG_MON_A7 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_MSDC1_CMD (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_IO_JTAG_TMS (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_UDI_TMS (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_CONN_DSP_JMS (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_PCM1_SYNC (MTK_PIN_NO(31) | 6)
+#define PINMUX_GPIO31__FUNC_DBG_MON_A8 (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_MSDC1_DAT0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_IO_JTAG_TDI (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_UDI_TDI (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_CONN_DSP_JDI (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_PCM1_DO0 (MTK_PIN_NO(32) | 6)
+#define PINMUX_GPIO32__FUNC_DBG_MON_A9 (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_MSDC1_DAT2 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_UDI_NTRST (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_PCM1_DO2 (MTK_PIN_NO(33) | 6)
+#define PINMUX_GPIO33__FUNC_DBG_MON_A10 (MTK_PIN_NO(33) | 7)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_MSDC1_DAT1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_IO_JTAG_TDO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_UDI_TDO (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_CONN_DSP_JDO (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_PCM1_DO1 (MTK_PIN_NO(34) | 6)
+#define PINMUX_GPIO34__FUNC_DBG_MON_A11 (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_CONN_DSP_JMS (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_DBG_MON_A28 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A29 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_CONN_DSP_JDO (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A30 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A20 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_CONN_DSP_JCK (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A31 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_CONN_DSP_JDI (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A32 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_IDDIG (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_URXD1 (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_UCTS0 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_DMIC_CLK (MTK_PIN_NO(41) | 6)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_USB_DRVVBUS (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_UTXD1 (MTK_PIN_NO(42) | 2)
+#define PINMUX_GPIO42__FUNC_URTS0 (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_SSPM_URXD_AO (MTK_PIN_NO(42) | 4)
+#define PINMUX_GPIO42__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(42) | 5)
+#define PINMUX_GPIO42__FUNC_DMIC_DAT (MTK_PIN_NO(42) | 6)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_DISP_PWM (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_DSI_TE (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_LCM_RST (MTK_PIN_NO(45) | 1)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_URXD1 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_UCTS1 (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_CCU_UTXD_AO (MTK_PIN_NO(46) | 4)
+#define PINMUX_GPIO46__FUNC_TP_UCTS1_AO (MTK_PIN_NO(46) | 5)
+#define PINMUX_GPIO46__FUNC_IDDIG (MTK_PIN_NO(46) | 6)
+#define PINMUX_GPIO46__FUNC_I2S5_LRCK (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_UTXD1 (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_URTS1 (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_CCU_URXD_AO (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_TP_URTS1_AO (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_USB_DRVVBUS (MTK_PIN_NO(47) | 6)
+#define PINMUX_GPIO47__FUNC_I2S5_DO (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SCL5 (MTK_PIN_NO(48) | 1)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SDA5 (MTK_PIN_NO(49) | 1)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SCL3 (MTK_PIN_NO(50) | 1)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SDA3 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_BPI_ANT2 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_BPI_ANT0 (MTK_PIN_NO(53) | 1)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_BPI_OLAT1 (MTK_PIN_NO(54) | 1)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_SCL_6306 (MTK_PIN_NO(56) | 2)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_SDA_6306 (MTK_PIN_NO(57) | 2)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_SPM_BSI_D2 (MTK_PIN_NO(58) | 2)
+#define PINMUX_GPIO58__FUNC_PWM_B (MTK_PIN_NO(58) | 3)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_SPM_BSI_D1 (MTK_PIN_NO(59) | 2)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_SPM_BSI_D0 (MTK_PIN_NO(60) | 2)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_MIPI1_SDATA (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_MIPI1_SCLK (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_MIPI0_SDATA (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_MIPI0_SCLK (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_MIPI3_SDATA (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_BPI_OLAT2 (MTK_PIN_NO(65) | 2)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_MIPI3_SCLK (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_BPI_OLAT3 (MTK_PIN_NO(66) | 2)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_MIPI2_SDATA (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_MIPI2_SCLK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_BPI_BUS7 (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_BPI_BUS6 (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_BPI_BUS5 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS4 (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS3 (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS2 (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS1 (MTK_PIN_NO(75) | 1)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_BPI_BUS0 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_BPI_ANT1 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_OLAT0 (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_PA_VM1 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_MIPI4_SDATA (MTK_PIN_NO(79) | 2)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_PA_VM0 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_MIPI4_SCLK (MTK_PIN_NO(80) | 2)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_SDA1 (MTK_PIN_NO(81) | 1)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_SDA0 (MTK_PIN_NO(82) | 1)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_SCL0 (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_SCL1 (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_SPI0_MI (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_SCP_SPI0_MI (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_CLKM3 (MTK_PIN_NO(85) | 3)
+#define PINMUX_GPIO85__FUNC_I2S1_BCK (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_MFG_DFD_JTAG_TDO (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_DFD_TDO (MTK_PIN_NO(85) | 6)
+#define PINMUX_GPIO85__FUNC_JTDO_SEL1 (MTK_PIN_NO(85) | 7)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_SPI0_CSB (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_SCP_SPI0_CS (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_CLKM0 (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_I2S1_LRCK (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_MFG_DFD_JTAG_TMS (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_DFD_TMS (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_JTMS_SEL1 (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_SPI0_MO (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_SCP_SPI0_MO (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_SDA1 (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_I2S1_DO (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_MFG_DFD_JTAG_TDI (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_DFD_TDI (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_JTDI_SEL1 (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_SPI0_CLK (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_SCP_SPI0_CK (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_SCL1 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_I2S1_MCK (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_MFG_DFD_JTAG_TCK (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 6)
+#define PINMUX_GPIO88__FUNC_JTCK_SEL1 (MTK_PIN_NO(88) | 7)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_SRCLKENAI0 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_PWM_C (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_I2S5_BCK (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_ANT_SEL6 (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_SDA8 (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_CMVREF0 (MTK_PIN_NO(89) | 6)
+#define PINMUX_GPIO89__FUNC_DBG_MON_A21 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_PWM_A (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_CMMCLK2 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_I2S5_LRCK (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_SCL8 (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_PTA_RXD (MTK_PIN_NO(90) | 6)
+#define PINMUX_GPIO90__FUNC_DBG_MON_A22 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_KPROW1 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_PWM_B (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_I2S5_DO (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_ANT_SEL7 (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_CMMCLK3 (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_PTA_TXD (MTK_PIN_NO(91) | 6)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_KPROW0 (MTK_PIN_NO(92) | 1)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_KPCOL0 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_DBG_MON_B27 (MTK_PIN_NO(93) | 7)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_KPCOL1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_I2S2_DI2 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_I2S5_MCK (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_CMMCLK2 (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_SCP_SPI2_MI (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_SRCLKENAI1 (MTK_PIN_NO(94) | 6)
+#define PINMUX_GPIO94__FUNC_SPI2_MI (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_URXD0 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_UTXD0 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_MD_URXD0 (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_MD_URXD1 (MTK_PIN_NO(95) | 4)
+#define PINMUX_GPIO95__FUNC_SSPM_URXD_AO (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_CCU_URXD_AO (MTK_PIN_NO(95) | 6)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_UTXD0 (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_URXD0 (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_MD_UTXD0 (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_MD_UTXD1 (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_CCU_UTXD_AO (MTK_PIN_NO(96) | 6)
+#define PINMUX_GPIO96__FUNC_DBG_MON_B2 (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_UCTS0 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_I2S2_MCK (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_IDDIG (MTK_PIN_NO(97) | 3)
+#define PINMUX_GPIO97__FUNC_CONN_MCU_TDO (MTK_PIN_NO(97) | 4)
+#define PINMUX_GPIO97__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(97) | 5)
+#define PINMUX_GPIO97__FUNC_IO_JTAG_TDO (MTK_PIN_NO(97) | 6)
+#define PINMUX_GPIO97__FUNC_DBG_MON_B3 (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_URTS0 (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I2S2_BCK (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_USB_DRVVBUS (MTK_PIN_NO(98) | 3)
+#define PINMUX_GPIO98__FUNC_CONN_MCU_TMS (MTK_PIN_NO(98) | 4)
+#define PINMUX_GPIO98__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(98) | 5)
+#define PINMUX_GPIO98__FUNC_IO_JTAG_TMS (MTK_PIN_NO(98) | 6)
+#define PINMUX_GPIO98__FUNC_DBG_MON_B4 (MTK_PIN_NO(98) | 7)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_CMMCLK0 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(99) | 4)
+#define PINMUX_GPIO99__FUNC_DBG_MON_B28 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_CMMCLK1 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_PWM_C (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_DBG_MON_B29 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_CLKM2 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_I2S2_LRCK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_CMVREF1 (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_CONN_MCU_TCK (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_IO_JTAG_TCK (MTK_PIN_NO(101) | 6)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_CLKM1 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_I2S2_DI (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_CONN_MCU_TDI (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_IO_JTAG_TDI (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_DBG_MON_B8 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_SCL2 (MTK_PIN_NO(103) | 1)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_SDA2 (MTK_PIN_NO(104) | 1)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_SCL4 (MTK_PIN_NO(105) | 1)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_SDA4 (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DMIC_CLK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_ANT_SEL0 (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_CLKM0 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_SDA7 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(107) | 5)
+#define PINMUX_GPIO107__FUNC_PWM_A (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_DBG_MON_B12 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_CMMCLK2 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_ANT_SEL1 (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_CLKM1 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_SCL8 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_DAP_MD32_SWD (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_PWM_B (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_DBG_MON_B13 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_DMIC_DAT (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_ANT_SEL2 (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_CLKM2 (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_SDA8 (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_PWM_C (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_DBG_MON_B14 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_SCL7 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_ANT_SEL0 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_TP_URXD1_AO (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_USB_DRVVBUS (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_SRCLKENAI1 (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_KPCOL2 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_URXD1 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_CMMCLK3 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_ANT_SEL1 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_SRCLKENAI0 (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_SDA7 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_ANT_SEL2 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_TP_UTXD1_AO (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_IDDIG (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_AGPS_SYNC (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_KPROW2 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_UTXD1 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_CONN_TOP_CLK (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_SCL6 (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_AUXIF_CLK0 (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_TP_UCTS1_AO (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_CONN_TOP_DATA (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SDA6 (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_AUXIF_ST0 (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_TP_URTS1_AO (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_CONN_BT_CLK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_UTXD1 (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_PTA_TXD (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_AUXIF_CLK1 (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_DAP_MD32_SWD (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_TP_UTXD1_AO (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_CONN_BT_DATA (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_AUXIF_ST1 (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_TP_URXD2_AO (MTK_PIN_NO(116) | 6)
+#define PINMUX_GPIO116__FUNC_DBG_MON_A0 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_CONN_WF_HB0 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_TP_UTXD2_AO (MTK_PIN_NO(117) | 6)
+#define PINMUX_GPIO117__FUNC_DBG_MON_A4 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_CONN_WF_HB1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_SSPM_URXD_AO (MTK_PIN_NO(118) | 5)
+#define PINMUX_GPIO118__FUNC_TP_UCTS2_AO (MTK_PIN_NO(118) | 6)
+#define PINMUX_GPIO118__FUNC_DBG_MON_A5 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_CONN_WF_HB2 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_TP_URTS2_AO (MTK_PIN_NO(119) | 6)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_CONN_WB_PTA (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_CCU_URXD_AO (MTK_PIN_NO(120) | 5)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_CONN_HRST_B (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_URXD1 (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_PTA_RXD (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_CCU_UTXD_AO (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_TP_URXD1_AO (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_MSDC0_CMD (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_SSPM_URXD2_AO (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_ANT_SEL1 (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_DBG_MON_A12 (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_MSDC0_DAT0 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_ANT_SEL0 (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_DBG_MON_A13 (MTK_PIN_NO(123) | 7)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_MSDC0_CLK (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_DBG_MON_A14 (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_MSDC0_DAT2 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_MRG_CLK (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_DBG_MON_A15 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_MSDC0_DAT4 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_ANT_SEL5 (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_DBG_MON_A16 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MSDC0_DAT6 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_ANT_SEL4 (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(127) | 6)
+#define PINMUX_GPIO127__FUNC_DBG_MON_A17 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MSDC0_DAT1 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_ANT_SEL2 (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_DBG_MON_A18 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_MSDC0_DAT5 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_ANT_SEL3 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_DBG_MON_A23 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_MSDC0_DAT7 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_MRG_DO (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A24 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_MSDC0_DSL (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_MRG_SYNC (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A25 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_MSDC0_DAT3 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_MRG_DI (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A26 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_MSDC0_RSTB (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_AGPS_SYNC (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A27 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_RTC32K_CK (MTK_PIN_NO(134) | 1)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_WATCHDOG (MTK_PIN_NO(135) | 1)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_AUD_CLK_MISO (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_I2S1_MCK (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(136) | 6)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_I2S1_BCK (MTK_PIN_NO(137) | 3)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_I2S1_LRCK (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_DBG_MON_B24 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_I2S1_DO (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(139) | 6)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_AUD_CLK_MISO (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_I2S0_MCK (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(140) | 6)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_I2S0_BCK (MTK_PIN_NO(141) | 3)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_I2S0_LRCK (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_VOW_DAT_MISO (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_DBG_MON_B25 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_I2S0_DI (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_VOW_CLK_MISO (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(143) | 6)
+#define PINMUX_GPIO143__FUNC_DBG_MON_B26 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(144) | 2)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(145) | 1)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(146) | 2)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(147) | 1)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_SRCLKENA0 (MTK_PIN_NO(148) | 1)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_SRCLKENA1 (MTK_PIN_NO(149) | 1)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_PWM_A (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_CMFLASH (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_CLKM0 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_DBG_MON_B30 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_PWM_B (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CMVREF0 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_CLKM1 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_DBG_MON_B20 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_PWM_C (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_CMFLASH (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_CLKM2 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_DBG_MON_B21 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_PWM_A (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_CMVREF0 (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_CLKM3 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B22 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_DBG_MON_B18 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_ANT_SEL0 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_CMVREF1 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_ANT_SEL1 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_SRCLKENAI0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_SCL6 (MTK_PIN_NO(156) | 3)
+#define PINMUX_GPIO156__FUNC_KPCOL2 (MTK_PIN_NO(156) | 4)
+#define PINMUX_GPIO156__FUNC_IDDIG (MTK_PIN_NO(156) | 5)
+#define PINMUX_GPIO156__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_ANT_SEL2 (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_SRCLKENAI1 (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_SDA6 (MTK_PIN_NO(157) | 3)
+#define PINMUX_GPIO157__FUNC_KPROW2 (MTK_PIN_NO(157) | 4)
+#define PINMUX_GPIO157__FUNC_USB_DRVVBUS (MTK_PIN_NO(157) | 5)
+#define PINMUX_GPIO157__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_ANT_SEL3 (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_ANT_SEL4 (MTK_PIN_NO(159) | 1)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_ANT_SEL5 (MTK_PIN_NO(160) | 1)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SPI1_A_MI (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_SCP_SPI1_MI (MTK_PIN_NO(161) | 2)
+#define PINMUX_GPIO161__FUNC_IDDIG (MTK_PIN_NO(161) | 3)
+#define PINMUX_GPIO161__FUNC_ANT_SEL6 (MTK_PIN_NO(161) | 4)
+#define PINMUX_GPIO161__FUNC_KPCOL2 (MTK_PIN_NO(161) | 5)
+#define PINMUX_GPIO161__FUNC_PTA_RXD (MTK_PIN_NO(161) | 6)
+#define PINMUX_GPIO161__FUNC_DBG_MON_B19 (MTK_PIN_NO(161) | 7)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_SPI1_A_CSB (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_SCP_SPI1_CS (MTK_PIN_NO(162) | 2)
+#define PINMUX_GPIO162__FUNC_USB_DRVVBUS (MTK_PIN_NO(162) | 3)
+#define PINMUX_GPIO162__FUNC_ANT_SEL5 (MTK_PIN_NO(162) | 4)
+#define PINMUX_GPIO162__FUNC_KPROW2 (MTK_PIN_NO(162) | 5)
+#define PINMUX_GPIO162__FUNC_PTA_TXD (MTK_PIN_NO(162) | 6)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_SPI1_A_MO (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_SCP_SPI1_MO (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_SDA1 (MTK_PIN_NO(163) | 3)
+#define PINMUX_GPIO163__FUNC_ANT_SEL4 (MTK_PIN_NO(163) | 4)
+#define PINMUX_GPIO163__FUNC_CMMCLK2 (MTK_PIN_NO(163) | 5)
+#define PINMUX_GPIO163__FUNC_DMIC_CLK (MTK_PIN_NO(163) | 6)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_SPI1_A_CLK (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_SCP_SPI1_CK (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_SCL1 (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_ANT_SEL3 (MTK_PIN_NO(164) | 4)
+#define PINMUX_GPIO164__FUNC_CMMCLK3 (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_DMIC_DAT (MTK_PIN_NO(164) | 6)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_PWM_B (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_CMMCLK2 (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_TDM_MCK_2ND (MTK_PIN_NO(165) | 6)
+#define PINMUX_GPIO165__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_ANT_SEL6 (MTK_PIN_NO(166) | 1)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_SPM_BSI_EN (MTK_PIN_NO(167) | 2)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_SPM_BSI_CK (MTK_PIN_NO(168) | 2)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_PWM_C (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_CMMCLK3 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_CMVREF1 (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_ANT_SEL7 (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_AGPS_SYNC (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_TDM_BCK_2ND (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_I2S1_BCK (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_I2S3_BCK (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_SCL7 (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_I2S5_BCK (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_TDM_LRCK_2ND (MTK_PIN_NO(170) | 6)
+#define PINMUX_GPIO170__FUNC_ANT_SEL3 (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_I2S1_LRCK (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_I2S3_LRCK (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_SDA7 (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_I2S5_LRCK (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_URXD1 (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_TDM_DATA0_2ND (MTK_PIN_NO(171) | 6)
+#define PINMUX_GPIO171__FUNC_ANT_SEL4 (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_I2S1_DO (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_I2S3_DO (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_SCL8 (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_I2S5_DO (MTK_PIN_NO(172) | 4)
+#define PINMUX_GPIO172__FUNC_UTXD1 (MTK_PIN_NO(172) | 5)
+#define PINMUX_GPIO172__FUNC_TDM_DATA1_2ND (MTK_PIN_NO(172) | 6)
+#define PINMUX_GPIO172__FUNC_ANT_SEL5 (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_I2S1_MCK (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_I2S3_MCK (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_SDA8 (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_I2S5_MCK (MTK_PIN_NO(173) | 4)
+#define PINMUX_GPIO173__FUNC_UCTS0 (MTK_PIN_NO(173) | 5)
+#define PINMUX_GPIO173__FUNC_TDM_DATA2_2ND (MTK_PIN_NO(173) | 6)
+#define PINMUX_GPIO173__FUNC_ANT_SEL6 (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_I2S2_DI (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_I2S0_DI (MTK_PIN_NO(174) | 2)
+#define PINMUX_GPIO174__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(174) | 3)
+#define PINMUX_GPIO174__FUNC_I2S2_DI2 (MTK_PIN_NO(174) | 4)
+#define PINMUX_GPIO174__FUNC_URTS0 (MTK_PIN_NO(174) | 5)
+#define PINMUX_GPIO174__FUNC_TDM_DATA3_2ND (MTK_PIN_NO(174) | 6)
+#define PINMUX_GPIO174__FUNC_ANT_SEL7 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_ANT_SEL7 (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+
+#endif /* __MT8183-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8186-pinfunc.h b/include/dt-bindings/pinctrl/mt8186-pinfunc.h
new file mode 100644
index 000000000000..18d6683c6f65
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8186-pinfunc.h
@@ -0,0 +1,1174 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ * Author: Guodong Liu <Guodong.Liu@mediatek.com>
+ *
+ */
+
+#ifndef __MT8186_PINFUNC_H
+#define __MT8186_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_I2S0_MCK (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_SPI0_CLK_B (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_I2S2_MCK (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_CMFLASH0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCP_SPI0_CK (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_I2S0_BCK (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_SPI0_CSB_B (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_I2S2_BCK (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_CMFLASH1 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SCP_SPI0_CS (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 6)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_I2S0_LRCK (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_SPI0_MO_B (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_I2S2_LRCK (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_CMFLASH2 (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_SCP_SPI0_MO (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 6)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_I2S0_DI (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_SPI0_MI_B (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_I2S2_DI (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_SRCLKENAI1 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_SCP_SPI0_MI (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 6)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_I2S3_DO (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S1_DO (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_I2S3_MCK (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_SPI1_CLK_B (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_I2S1_MCK (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_DPI_DATA22 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_I2S3_BCK (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_SPI1_CSB_B (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_I2S1_BCK (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_DPI_DATA23 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_I2S3_LRCK (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_SPI1_MO_B (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_I2S1_LRCK (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_CONN_UART0_RXD (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_SSPM_URXD_AO (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_ADSP_UART_RX (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_I2S3_DO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_SPI1_MI_B (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_I2S1_DO (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_CONN_UART0_TXD (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_ADSP_UART_TX (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_I2S0_MCK (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_SPI4_CLK_A (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I2S2_MCK (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_CONN_MCU_TDI (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_I2S0_BCK (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_SPI4_CSB_A (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_I2S2_BCK (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_I2S0_LRCK (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_SPI4_MO_A (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2S2_LRCK (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_CONN_MCU_TCK (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_I2S0_DI (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI4_MI_A (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2S2_DI (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_CONN_MCU_TDO (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_CLKM0 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_CONN_MCU_TMS (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_SRCLKENAI1 (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_CLKM1 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_PWM0 (MTK_PIN_NO(15) | 4)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_CONN_WIFI_TXD (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI0 (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_CLKM2 (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_PWM1 (MTK_PIN_NO(16) | 4)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_CLKM3 (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_PWM2 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A32 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_CMVREF0 (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_SPI2_CLK_B (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_DBG_MON_A26 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_CMVREF1 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_ANT_SEL3 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_SPI2_CSB_B (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_DBG_MON_A2 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_CMVREF2 (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_ANT_SEL4 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_SPI2_MO_B (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_DBG_MON_A3 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_I2S0_MCK (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_I2S1_MCK (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_I2S3_MCK (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_ANT_SEL5 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_SPI2_MI_B (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_A4 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_I2S0_BCK (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_I2S1_BCK (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_I2S3_BCK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_TDM_RX_LRCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_ANT_SEL6 (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_DBG_MON_A5 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_I2S0_LRCK (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_I2S1_LRCK (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_I2S3_LRCK (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_TDM_RX_BCK (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_ANT_SEL7 (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_DBG_MON_A6 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_I2S0_DI (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_I2S1_DO (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_I2S3_DO (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_TDM_RX_MCK (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_DBG_MON_A7 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_I2S2_MCK (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_PCM_CLK (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SPI4_CLK_B (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_TDM_RX_DATA0 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_DBG_MON_A8 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_I2S2_BCK (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_PCM_SYNC (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SPI4_CSB_B (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_TDM_RX_DATA1 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_DBG_MON_A9 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_I2S2_LRCK (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_PCM_DI (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_SPI4_MO_B (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_TDM_RX_DATA2 (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_DBG_MON_A10 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_I2S2_DI (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_PCM_DO (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SPI4_MI_B (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_TDM_RX_DATA3 (MTK_PIN_NO(28) | 4)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_ANT_SEL0 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_ANT_SEL1 (MTK_PIN_NO(30) | 1)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_ANT_SEL2 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_SRCLKENAI1 (MTK_PIN_NO(31) | 3)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_URXD0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_UTXD0 (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_ADSP_UART_RX (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_TP_URXD1_AO (MTK_PIN_NO(32) | 4)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_UTXD0 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_URXD0 (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_ADSP_UART_TX (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_TP_UTXD1_AO (MTK_PIN_NO(33) | 4)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_URXD1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_TP_URXD2_AO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_SSPM_URXD_AO (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_ADSP_UART_RX (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_CONN_UART0_RXD (MTK_PIN_NO(34) | 5)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_UTXD1 (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_TP_UTXD2_AO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_ADSP_UART_TX (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_CONN_UART0_TXD (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_CONN_WIFI_TXD (MTK_PIN_NO(35) | 6)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_SPI0_CLK_A (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_CLKM0 (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_SCP_SPI0_CK (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SPINOR_CK (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A11 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_SPI0_CSB_A (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_CLKM1 (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_PWM0 (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_SCP_SPI0_CS (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_SPINOR_CS (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A12 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_SPI0_MO_A (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_CLKM2 (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_PWM1 (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_SCP_SPI0_MO (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_SPINOR_IO0 (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A13 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_SPI0_MI_A (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_CLKM3 (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_PWM2 (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_SCP_SPI0_MI (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_SPINOR_IO1 (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A14 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_SPI1_CLK_A (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_SCP_SPI1_CK (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_UCTS0 (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_SPINOR_IO2 (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_TP_UCTS1_AO (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A15 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_SPI1_CSB_A (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_SCP_SPI1_CS (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_PWM0 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_URTS0 (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_SPINOR_IO3 (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_TP_URTS1_AO (MTK_PIN_NO(41) | 6)
+#define PINMUX_GPIO41__FUNC_DBG_MON_A16 (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_SPI1_MO_A (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_SCP_SPI1_MO (MTK_PIN_NO(42) | 2)
+#define PINMUX_GPIO42__FUNC_PWM1 (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_UCTS1 (MTK_PIN_NO(42) | 4)
+#define PINMUX_GPIO42__FUNC_TP_UCTS2_AO (MTK_PIN_NO(42) | 6)
+#define PINMUX_GPIO42__FUNC_DBG_MON_A17 (MTK_PIN_NO(42) | 7)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_SPI1_MI_A (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_SCP_SPI1_MI (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_PWM2 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_URTS1 (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_TP_URTS2_AO (MTK_PIN_NO(43) | 6)
+#define PINMUX_GPIO43__FUNC_DBG_MON_A18 (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_SPI2_CLK_A (MTK_PIN_NO(44) | 1)
+#define PINMUX_GPIO44__FUNC_SCP_SPI0_CK (MTK_PIN_NO(44) | 2)
+#define PINMUX_GPIO44__FUNC_DBG_MON_A19 (MTK_PIN_NO(44) | 7)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SPI2_CSB_A (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_SCP_SPI0_CS (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_DBG_MON_A20 (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SPI2_MO_A (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_SCP_SPI0_MO (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_DBG_MON_A21 (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_SPI2_MI_A (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_SCP_SPI0_MI (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_DBG_MON_A22 (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SPI3_CLK (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_TP_URXD1_AO (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_TP_URXD2_AO (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_URXD1 (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_I2S2_MCK (MTK_PIN_NO(48) | 5)
+#define PINMUX_GPIO48__FUNC_SCP_SPI0_CK (MTK_PIN_NO(48) | 6)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SPI3_CSB (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_TP_UTXD1_AO (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_TP_UTXD2_AO (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_UTXD1 (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_I2S2_BCK (MTK_PIN_NO(49) | 5)
+#define PINMUX_GPIO49__FUNC_SCP_SPI0_CS (MTK_PIN_NO(49) | 6)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SPI3_MO (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_I2S2_LRCK (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_SCP_SPI0_MO (MTK_PIN_NO(50) | 6)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SPI3_MI (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_I2S2_DI (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_SCP_SPI0_MI (MTK_PIN_NO(51) | 6)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_SPI5_CLK (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_I2S2_MCK (MTK_PIN_NO(52) | 2)
+#define PINMUX_GPIO52__FUNC_I2S1_MCK (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_SCP_SPI1_CK (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_LVTS_26M (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_DFD_TCK_XI (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_DBG_MON_B30 (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_SPI5_CSB (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_I2S2_BCK (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_I2S1_BCK (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_SCP_SPI1_CS (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_LVTS_FOUT (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_DFD_TDI (MTK_PIN_NO(53) | 6)
+#define PINMUX_GPIO53__FUNC_DBG_MON_B31 (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_SPI5_MO (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_I2S2_LRCK (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_I2S1_LRCK (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_SCP_SPI1_MO (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_LVTS_SCK (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_DFD_TDO (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_DBG_MON_A1 (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_SPI5_MI (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_I2S2_DI (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_I2S1_DO (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_SCP_SPI1_MI (MTK_PIN_NO(55) | 4)
+#define PINMUX_GPIO55__FUNC_LVTS_SDO (MTK_PIN_NO(55) | 5)
+#define PINMUX_GPIO55__FUNC_DFD_TMS (MTK_PIN_NO(55) | 6)
+#define PINMUX_GPIO55__FUNC_DBG_MON_B32 (MTK_PIN_NO(55) | 7)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_I2S1_DO (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_I2S3_DO (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_DBG_MON_A23 (MTK_PIN_NO(56) | 7)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_I2S1_BCK (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_I2S3_BCK (MTK_PIN_NO(57) | 2)
+#define PINMUX_GPIO57__FUNC_DBG_MON_A24 (MTK_PIN_NO(57) | 7)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_I2S1_LRCK (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_I2S3_LRCK (MTK_PIN_NO(58) | 2)
+#define PINMUX_GPIO58__FUNC_DBG_MON_A25 (MTK_PIN_NO(58) | 7)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_I2S1_MCK (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_I2S3_MCK (MTK_PIN_NO(59) | 2)
+#define PINMUX_GPIO59__FUNC_DBG_MON_A27 (MTK_PIN_NO(59) | 7)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_TDM_RX_LRCK (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_ANT_SEL3 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(60) | 5)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_TDM_RX_BCK (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_ANT_SEL4 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_SPINOR_CK (MTK_PIN_NO(61) | 4)
+#define PINMUX_GPIO61__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(61) | 5)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_TDM_RX_MCK (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_ANT_SEL5 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_SPINOR_CS (MTK_PIN_NO(62) | 4)
+#define PINMUX_GPIO62__FUNC_CONN_MCU_TDI (MTK_PIN_NO(62) | 5)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_TDM_RX_DATA0 (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_ANT_SEL6 (MTK_PIN_NO(63) | 2)
+#define PINMUX_GPIO63__FUNC_SPINOR_IO0 (MTK_PIN_NO(63) | 4)
+#define PINMUX_GPIO63__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(63) | 5)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_TDM_RX_DATA1 (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_ANT_SEL7 (MTK_PIN_NO(64) | 2)
+#define PINMUX_GPIO64__FUNC_PWM0 (MTK_PIN_NO(64) | 3)
+#define PINMUX_GPIO64__FUNC_SPINOR_IO1 (MTK_PIN_NO(64) | 4)
+#define PINMUX_GPIO64__FUNC_CONN_MCU_TCK (MTK_PIN_NO(64) | 5)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_TDM_RX_DATA2 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_UCTS0 (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_PWM1 (MTK_PIN_NO(65) | 3)
+#define PINMUX_GPIO65__FUNC_SPINOR_IO2 (MTK_PIN_NO(65) | 4)
+#define PINMUX_GPIO65__FUNC_CONN_MCU_TDO (MTK_PIN_NO(65) | 5)
+#define PINMUX_GPIO65__FUNC_TP_UCTS1_AO (MTK_PIN_NO(65) | 6)
+#define PINMUX_GPIO65__FUNC_TP_UCTS2_AO (MTK_PIN_NO(65) | 7)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_TDM_RX_DATA3 (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_URTS0 (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_PWM2 (MTK_PIN_NO(66) | 3)
+#define PINMUX_GPIO66__FUNC_SPINOR_IO3 (MTK_PIN_NO(66) | 4)
+#define PINMUX_GPIO66__FUNC_CONN_MCU_TMS (MTK_PIN_NO(66) | 5)
+#define PINMUX_GPIO66__FUNC_TP_URTS1_AO (MTK_PIN_NO(66) | 6)
+#define PINMUX_GPIO66__FUNC_TP_URTS2_AO (MTK_PIN_NO(66) | 7)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_MSDC0_DSL (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_MSDC0_CLK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_MSDC0_CMD (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_MSDC0_RSTB (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_MSDC0_DAT0 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_MSDC0_DAT1 (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_MSDC0_DAT2 (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_MSDC0_DAT3 (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_MSDC0_DAT4 (MTK_PIN_NO(75) | 1)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_MSDC0_DAT5 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_MSDC0_DAT6 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_MSDC0_DAT7 (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_KPCOL0 (MTK_PIN_NO(79) | 1)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_KPCOL1 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_PWM0 (MTK_PIN_NO(80) | 3)
+#define PINMUX_GPIO80__FUNC_CLKM0 (MTK_PIN_NO(80) | 4)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_KPROW0 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_PWM1 (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_CLKM1 (MTK_PIN_NO(81) | 4)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_KPROW1 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_PWM2 (MTK_PIN_NO(82) | 3)
+#define PINMUX_GPIO82__FUNC_CLKM2 (MTK_PIN_NO(82) | 4)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_AP_GOOD (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_GPS_PPS (MTK_PIN_NO(83) | 2)
+#define PINMUX_GPIO83__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(83) | 4)
+#define PINMUX_GPIO83__FUNC_DBG_MON_A28 (MTK_PIN_NO(83) | 7)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_MSDC1_CLK (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(84) | 2)
+#define PINMUX_GPIO84__FUNC_UDI_TCK (MTK_PIN_NO(84) | 4)
+#define PINMUX_GPIO84__FUNC_CONN_DSP_JCK (MTK_PIN_NO(84) | 5)
+#define PINMUX_GPIO84__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(84) | 6)
+#define PINMUX_GPIO84__FUNC_DFD_TCK_XI (MTK_PIN_NO(84) | 7)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_MSDC1_CMD (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(85) | 3)
+#define PINMUX_GPIO85__FUNC_UDI_TMS (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_CONN_DSP_JMS (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(85) | 6)
+#define PINMUX_GPIO85__FUNC_DFD_TMS (MTK_PIN_NO(85) | 7)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_MSDC1_DAT0 (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_UDI_TDI (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_CONN_DSP_JDI (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_DFD_TDI (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_MSDC1_DAT1 (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_UDI_TDO (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_CONN_DSP_JDO (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_DFD_TDO (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_MSDC1_DAT2 (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_UDI_NTRST (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_CONN_WIFI_TXD (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(88) | 6)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_MSDC1_DAT3 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(89) | 5)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_IDDIG_P0 (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_PGD_HV_HSC_PWR4 (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_GDU_SUM_TROOP2_2 (MTK_PIN_NO(90) | 5)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_PGD_HV_HSC_PWR5 (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_GDU_TROOPS_DET0 (MTK_PIN_NO(91) | 5)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_VBUS_VALID_P0 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_PGD_DA_EFUSE_RDY (MTK_PIN_NO(92) | 4)
+#define PINMUX_GPIO92__FUNC_GDU_TROOPS_DET1 (MTK_PIN_NO(92) | 5)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_IDDIG_P1 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_PWM0 (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_CLKM0 (MTK_PIN_NO(93) | 3)
+#define PINMUX_GPIO93__FUNC_PGD_DA_EFUSE_RDY_PRE (MTK_PIN_NO(93) | 4)
+#define PINMUX_GPIO93__FUNC_GDU_TROOPS_DET2 (MTK_PIN_NO(93) | 5)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_PWM1 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_CLKM1 (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_PGD_DA_PWRGD_RESET (MTK_PIN_NO(94) | 4)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_VBUS_VALID_P1 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_PWM2 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_CLKM2 (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_PGD_DA_PWRGD_ENB (MTK_PIN_NO(95) | 4)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_DSI_TE (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_DBG_MON_A29 (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_DISP_PWM (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_DBG_MON_A30 (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_LCM_RST (MTK_PIN_NO(98) | 1)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_DPI_PCLK (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(99) | 3)
+#define PINMUX_GPIO99__FUNC_ANT_SEL0 (MTK_PIN_NO(99) | 5)
+#define PINMUX_GPIO99__FUNC_TP_GPIO0_AO (MTK_PIN_NO(99) | 6)
+#define PINMUX_GPIO99__FUNC_PGD_LV_LSC_PWR0 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_DPI_VSYNC (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_KPCOL2 (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_ANT_SEL1 (MTK_PIN_NO(100) | 5)
+#define PINMUX_GPIO100__FUNC_TP_GPIO1_AO (MTK_PIN_NO(100) | 6)
+#define PINMUX_GPIO100__FUNC_PGD_LV_LSC_PWR1 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_DPI_HSYNC (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_KPROW2 (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_ANT_SEL2 (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_TP_GPIO2_AO (MTK_PIN_NO(101) | 6)
+#define PINMUX_GPIO101__FUNC_PGD_LV_LSC_PWR2 (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_DPI_DE (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_ANT_SEL3 (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_TP_GPIO3_AO (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_PGD_LV_LSC_PWR3 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_DPI_DATA0 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(103) | 3)
+#define PINMUX_GPIO103__FUNC_CLKM0 (MTK_PIN_NO(103) | 4)
+#define PINMUX_GPIO103__FUNC_ANT_SEL4 (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_TP_GPIO4_AO (MTK_PIN_NO(103) | 6)
+#define PINMUX_GPIO103__FUNC_PGD_LV_LSC_PWR4 (MTK_PIN_NO(103) | 7)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_DPI_DATA1 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_GPS_PPS (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_UCTS2 (MTK_PIN_NO(104) | 3)
+#define PINMUX_GPIO104__FUNC_CLKM1 (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_ANT_SEL5 (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_TP_GPIO5_AO (MTK_PIN_NO(104) | 6)
+#define PINMUX_GPIO104__FUNC_PGD_LV_LSC_PWR5 (MTK_PIN_NO(104) | 7)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_DPI_DATA2 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_URTS2 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_CLKM2 (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_ANT_SEL6 (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_TP_GPIO6_AO (MTK_PIN_NO(105) | 6)
+#define PINMUX_GPIO105__FUNC_PGD_LV_HSC_PWR0 (MTK_PIN_NO(105) | 7)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_DPI_DATA3 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_TP_UTXD1_AO (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_UTXD2 (MTK_PIN_NO(106) | 3)
+#define PINMUX_GPIO106__FUNC_PWM0 (MTK_PIN_NO(106) | 4)
+#define PINMUX_GPIO106__FUNC_ANT_SEL7 (MTK_PIN_NO(106) | 5)
+#define PINMUX_GPIO106__FUNC_TP_GPIO7_AO (MTK_PIN_NO(106) | 6)
+#define PINMUX_GPIO106__FUNC_PGD_LV_HSC_PWR1 (MTK_PIN_NO(106) | 7)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DPI_DATA4 (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_TP_URXD1_AO (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_URXD2 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_PWM1 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_GDU_SUM_TROOP0_0 (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_PGD_LV_HSC_PWR2 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_DPI_DATA5 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_TP_UCTS1_AO (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_UCTS0 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_PWM2 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_GDU_SUM_TROOP0_1 (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_PGD_LV_HSC_PWR3 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_DPI_DATA6 (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_TP_URTS1_AO (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_URTS0 (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_I2S0_DI (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_I2S2_DI (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_GDU_SUM_TROOP0_2 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_PGD_LV_HSC_PWR4 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_DPI_DATA7 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_TP_UCTS2_AO (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_UCTS1 (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_I2S1_BCK (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_GDU_SUM_TROOP1_0 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_PGD_LV_HSC_PWR5 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_DPI_DATA8 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_TP_URTS2_AO (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_URTS1 (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_I2S3_MCK (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_I2S1_MCK (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_GDU_SUM_TROOP1_1 (MTK_PIN_NO(111) | 6)
+#define PINMUX_GPIO111__FUNC_PGD_HV_HSC_PWR0 (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_DPI_DATA9 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_TP_URXD2_AO (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_URXD1 (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_I2S3_LRCK (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_I2S1_LRCK (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_GDU_SUM_TROOP1_2 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_PGD_HV_HSC_PWR1 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_DPI_DATA10 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_TP_UTXD2_AO (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_UTXD1 (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_I2S3_DO (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_I2S1_DO (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_GDU_SUM_TROOP2_0 (MTK_PIN_NO(113) | 6)
+#define PINMUX_GPIO113__FUNC_PGD_HV_HSC_PWR2 (MTK_PIN_NO(113) | 7)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_DPI_DATA11 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_GDU_SUM_TROOP2_1 (MTK_PIN_NO(114) | 6)
+#define PINMUX_GPIO114__FUNC_PGD_HV_HSC_PWR3 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_PCM_CLK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_I2S0_BCK (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_I2S2_BCK (MTK_PIN_NO(115) | 3)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_PCM_SYNC (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_I2S0_LRCK (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_I2S2_LRCK (MTK_PIN_NO(116) | 3)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_PCM_DI (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_I2S0_DI (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_I2S2_DI (MTK_PIN_NO(117) | 3)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_PCM_DO (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_I2S0_MCK (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_I2S2_MCK (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_I2S3_DO (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_I2S1_DO (MTK_PIN_NO(118) | 5)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_JTMS_SEL1 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_UDI_TMS (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_DFD_TMS (MTK_PIN_NO(119) | 3)
+#define PINMUX_GPIO119__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(119) | 4)
+#define PINMUX_GPIO119__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(119) | 6)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_JTCK_SEL1 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_UDI_TCK (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_DFD_TCK_XI (MTK_PIN_NO(120) | 3)
+#define PINMUX_GPIO120__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(120) | 4)
+#define PINMUX_GPIO120__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(120) | 5)
+#define PINMUX_GPIO120__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(120) | 6)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_JTDI_SEL1 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_UDI_TDI (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_DFD_TDI (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_JTDO_SEL1 (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_UDI_TDO (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_DFD_TDO (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(122) | 6)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_UDI_NTRST (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(123) | 6)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_CMMCLK0 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_CLKM0 (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_PWM0 (MTK_PIN_NO(124) | 3)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_CMMCLK1 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_CLKM1 (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_PWM1 (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_DBG_MON_B0 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_CMMCLK2 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_CLKM2 (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_PWM2 (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_DBG_MON_B1 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_SCL0 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_SCP_SCL0 (MTK_PIN_NO(127) | 4)
+#define PINMUX_GPIO127__FUNC_SCP_SCL1 (MTK_PIN_NO(127) | 5)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_SDA0 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_SCP_SDA0 (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_SCP_SDA1 (MTK_PIN_NO(128) | 5)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_SCL1 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_SCP_SCL0 (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SCP_SCL1 (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_DBG_MON_B4 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_SDA1 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_SCP_SDA0 (MTK_PIN_NO(130) | 4)
+#define PINMUX_GPIO130__FUNC_SCP_SDA1 (MTK_PIN_NO(130) | 5)
+#define PINMUX_GPIO130__FUNC_DBG_MON_B5 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_SCL2 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_CONN_UART0_TXD (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_SCP_SCL0 (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SCP_SCL1 (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_DBG_MON_B6 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_SDA2 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_SSPM_URXD_AO (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_CONN_UART0_RXD (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_SCP_SDA0 (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_SCP_SDA1 (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_DBG_MON_B7 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_SCL3 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_SCP_SCL0 (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_SCP_SCL1 (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_DBG_MON_B8 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_SDA3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_GPS_PPS (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_SCP_SDA0 (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_SCP_SDA1 (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_DBG_MON_B9 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_SCL4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_TP_UTXD1_AO (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_UTXD1 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_SCP_SCL0 (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_SCP_SCL1 (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_DBG_MON_B10 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_SDA4 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_TP_URXD1_AO (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_URXD1 (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_SCP_SDA0 (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_SCP_SDA1 (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_DBG_MON_B11 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_SCL5 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_UTXD2 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_UCTS1 (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_SCP_SCL0 (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_SCP_SCL1 (MTK_PIN_NO(137) | 5)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_SDA5 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_URXD2 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_URTS1 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_SCP_SDA0 (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_SCP_SDA1 (MTK_PIN_NO(138) | 5)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_SCL6 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_UTXD1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_TP_UTXD1_AO (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_SCP_SCL0 (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_SCP_SCL1 (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_DBG_MON_B12 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_SDA6 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_URXD1 (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_TP_URXD1_AO (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_SCP_SDA0 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_SCP_SDA1 (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_DBG_MON_B13 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_SCL7 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_URTS0 (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_TP_URTS1_AO (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_SCP_SCL0 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_SCP_SCL1 (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_UDI_TCK (MTK_PIN_NO(141) | 6)
+#define PINMUX_GPIO141__FUNC_DBG_MON_B14 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SDA7 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_UCTS0 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_TP_UCTS1_AO (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_SCP_SDA0 (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_SCP_SDA1 (MTK_PIN_NO(142) | 5)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_SCL8 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_SCP_SCL0 (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_SCP_SCL1 (MTK_PIN_NO(143) | 5)
+#define PINMUX_GPIO143__FUNC_DBG_MON_B16 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_SDA8 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_SCP_SDA0 (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_SCP_SDA1 (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_DBG_MON_B17 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_SCL9 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_CMVREF1 (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_GPS_PPS (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_SCP_SCL0 (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_SCP_SCL1 (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_DBG_MON_B18 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_SDA9 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_CMVREF0 (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_SCP_SDA0 (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_SCP_SDA1 (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_DBG_MON_B19 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_CMFLASH0 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_LVTS_SDI (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_DPI_DATA12 (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_TP_GPIO0_AO (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_ANT_SEL3 (MTK_PIN_NO(147) | 5)
+#define PINMUX_GPIO147__FUNC_DFD_TCK_XI (MTK_PIN_NO(147) | 6)
+#define PINMUX_GPIO147__FUNC_DBG_MON_B20 (MTK_PIN_NO(147) | 7)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_CMFLASH1 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_LVTS_SCF (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_DPI_DATA13 (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TP_GPIO1_AO (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_ANT_SEL4 (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_DFD_TMS (MTK_PIN_NO(148) | 6)
+#define PINMUX_GPIO148__FUNC_DBG_MON_B21 (MTK_PIN_NO(148) | 7)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_CMFLASH2 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_CLKM0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_DPI_DATA14 (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_TP_GPIO2_AO (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_ANT_SEL5 (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_DFD_TDI (MTK_PIN_NO(149) | 6)
+#define PINMUX_GPIO149__FUNC_DBG_MON_B22 (MTK_PIN_NO(149) | 7)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_CLKM1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_DPI_DATA15 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_TP_GPIO3_AO (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_ANT_SEL6 (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_DFD_TDO (MTK_PIN_NO(150) | 6)
+#define PINMUX_GPIO150__FUNC_DBG_MON_B23 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CLKM2 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_DPI_DATA16 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_TP_GPIO4_AO (MTK_PIN_NO(151) | 4)
+#define PINMUX_GPIO151__FUNC_ANT_SEL7 (MTK_PIN_NO(151) | 5)
+#define PINMUX_GPIO151__FUNC_UDI_TMS (MTK_PIN_NO(151) | 6)
+#define PINMUX_GPIO151__FUNC_DBG_MON_B24 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_CLKM3 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_DPI_DATA17 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_TP_GPIO5_AO (MTK_PIN_NO(152) | 4)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_DPI_DATA18 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_TP_GPIO6_AO (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_UDI_TDI (MTK_PIN_NO(153) | 6)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B26 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_PWM0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_CMVREF2 (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_DPI_DATA19 (MTK_PIN_NO(154) | 3)
+#define PINMUX_GPIO154__FUNC_TP_GPIO7_AO (MTK_PIN_NO(154) | 4)
+#define PINMUX_GPIO154__FUNC_UDI_TDO (MTK_PIN_NO(154) | 6)
+#define PINMUX_GPIO154__FUNC_DBG_MON_B27 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_PWM1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_CMVREF1 (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_DPI_DATA20 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_UDI_NTRST (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_B28 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_PWM2 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_CMVREF0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_DPI_DATA21 (MTK_PIN_NO(156) | 3)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(157) | 1)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(159) | 2)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(160) | 2)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SRCLKENA0 (MTK_PIN_NO(161) | 1)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_SRCLKENA1 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_DBG_MON_A31 (MTK_PIN_NO(162) | 7)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(163) | 2)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_RTC32K_CK (MTK_PIN_NO(164) | 1)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_WATCHDOG (MTK_PIN_NO(165) | 1)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_AUD_CLK_MISO (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_I2S1_MCK (MTK_PIN_NO(166) | 3)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_I2S1_BCK (MTK_PIN_NO(167) | 3)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_I2S1_LRCK (MTK_PIN_NO(168) | 3)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_I2S1_DO (MTK_PIN_NO(169) | 3)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_AUD_CLK_MISO (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_I2S2_MCK (MTK_PIN_NO(170) | 3)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_I2S2_BCK (MTK_PIN_NO(171) | 3)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_I2S2_LRCK (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_VOW_DAT_MISO (MTK_PIN_NO(172) | 4)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_I2S2_DI (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_VOW_CLK_MISO (MTK_PIN_NO(173) | 4)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_CONN_TOP_CLK (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_AUXIF_CLK (MTK_PIN_NO(174) | 2)
+#define PINMUX_GPIO174__FUNC_DFD_TCK_XI (MTK_PIN_NO(174) | 3)
+#define PINMUX_GPIO174__FUNC_DBG_MON_B3 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_CONN_TOP_DATA (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_AUXIF_ST (MTK_PIN_NO(175) | 2)
+#define PINMUX_GPIO175__FUNC_DFD_TMS (MTK_PIN_NO(175) | 3)
+#define PINMUX_GPIO175__FUNC_DBG_MON_B15 (MTK_PIN_NO(175) | 7)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_CONN_BT_CLK (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_DFD_TDI (MTK_PIN_NO(176) | 3)
+#define PINMUX_GPIO176__FUNC_DBG_MON_B2 (MTK_PIN_NO(176) | 7)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_CONN_BT_DATA (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_DFD_TDO (MTK_PIN_NO(177) | 3)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_CONN_HRST_B (MTK_PIN_NO(178) | 1)
+#define PINMUX_GPIO178__FUNC_UDI_TMS (MTK_PIN_NO(178) | 3)
+#define PINMUX_GPIO178__FUNC_DBG_MON_B25 (MTK_PIN_NO(178) | 7)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_CONN_WB_PTA (MTK_PIN_NO(179) | 1)
+#define PINMUX_GPIO179__FUNC_UDI_TCK (MTK_PIN_NO(179) | 3)
+#define PINMUX_GPIO179__FUNC_DBG_MON_B29 (MTK_PIN_NO(179) | 7)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_UDI_TDI (MTK_PIN_NO(180) | 3)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(181) | 1)
+#define PINMUX_GPIO181__FUNC_UDI_TDO (MTK_PIN_NO(181) | 3)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(182) | 1)
+#define PINMUX_GPIO182__FUNC_UDI_NTRST (MTK_PIN_NO(182) | 3)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_SPMI_SCL (MTK_PIN_NO(183) | 1)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_SPMI_SDA (MTK_PIN_NO(184) | 1)
+
+#endif /* __MT8186_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8192-pinfunc.h b/include/dt-bindings/pinctrl/mt8192-pinfunc.h
new file mode 100644
index 000000000000..71ffe3a52578
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8192-pinfunc.h
@@ -0,0 +1,1344 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __MT8192_PINFUNC_H
+#define __MT8192_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_SPI6_CLK (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S5_MCK (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_PWM_0 (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_TDM_LRCK (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_MD_INT0 (MTK_PIN_NO(0) | 6)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_SPI6_CSB (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S5_BCK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_PWM_1 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_TDM_BCK (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_DBG_MON_A9 (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_SPI6_MI (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S5_LRCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_PWM_2 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_TDM_MCK (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_DBG_MON_A10 (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_SPI6_MO (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S5_DO (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_PWM_3 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_TDM_DATA0 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_CLKM0 (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_DBG_MON_A11 (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_SPI4_A_CLK (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S2_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_DMIC1_CLK (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_TDM_DATA1 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_PCM1_DI (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_IDDIG (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_SPI4_A_CSB (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S2_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_DMIC1_DAT (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_TDM_DATA2 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_PCM1_CLK (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_USB_DRVVBUS (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_SPI4_A_MI (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S2_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_DMIC_CLK (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_TDM_DATA3 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_PCM1_SYNC (MTK_PIN_NO(6) | 6)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI4_A_MO (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S2_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_DMIC_DAT (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_WIFI_TXD (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_PCM1_DO0 (MTK_PIN_NO(7) | 6)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SRCLKENAI1 (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_KPCOL2 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_CLKM1 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_PCM1_DO1 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_DBG_MON_A12 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_SRCLKENAI0 (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_KPROW2 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_CMMCLK4 (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_CLKM3 (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_PCM1_DO2 (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_DBG_MON_A13 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_MSDC2_CLK (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_SPI4_B_CLK (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I2S8_MCK (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_MD_INT0 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_TP_GPIO8_AO (MTK_PIN_NO(10) | 6)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_MSDC2_CMD (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_SPI4_B_CSB (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_I2S8_BCK (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_PCIE_CLKREQ_N (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_TP_GPIO9_AO (MTK_PIN_NO(11) | 6)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_MSDC2_DAT3 (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_SPI4_B_MI (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2S8_LRCK (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_DMIC1_CLK (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_TP_GPIO10_AO (MTK_PIN_NO(12) | 6)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_MSDC2_DAT0 (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI4_B_MO (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2S8_DI (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_DMIC1_DAT (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_ANT_SEL10 (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_TP_GPIO11_AO (MTK_PIN_NO(13) | 6)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_MSDC2_DAT2 (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_IDDIG (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_SCL_6306 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_PCIE_PERESET_N (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_ANT_SEL11 (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_TP_GPIO12_AO (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_MSDC2_DAT1 (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_SDA_6306 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_PCIE_WAKE_N (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_ANT_SEL12 (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_TP_GPIO13_AO (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI1 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_IDDIG (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_TP_GPIO14_AO (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_KPCOL2 (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_SPI7_A_MI (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_DBG_MON_A0 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SRCLKENAI0 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_USB_DRVVBUS (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_TP_GPIO15_AO (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_KPROW2 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_SPI7_A_MO (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_SRCLKENAI0 (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SPI4_C_MI (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_SPI1_B_MI (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_ANT_SEL10 (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_MD_INT0 (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_DBG_MON_B2 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SRCLKENAI1 (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SPI4_C_MO (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_SPI1_B_MO (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_ANT_SEL11 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_DBG_MON_B3 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SRCLKENAI0 (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SPI4_C_CLK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_SPI1_B_CLK (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_PWM_3 (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_ANT_SEL12 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_DBG_MON_B4 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_SPI4_C_CSB (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_SPI1_B_CSB (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_IDDIG (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_B5 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_SPI0_C_CLK (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_SPI7_B_CLK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_I2S7_BCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_I2S9_BCK (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_SCL_6306 (MTK_PIN_NO(22) | 6)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_SPI0_C_CSB (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_SPI7_B_CSB (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_I2S7_LRCK (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_I2S9_LRCK (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_SDA_6306 (MTK_PIN_NO(23) | 6)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_SRCLKENAI1 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_SPI0_C_MI (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_SPI7_B_MI (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_I2S6_DI (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_I2S8_DI (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_SPINOR_CS (MTK_PIN_NO(24) | 6)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_SRCLKENAI0 (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_SPI0_C_MO (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SPI7_B_MO (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_I2S7_DO (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_I2S9_DO (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_SPINOR_CK (MTK_PIN_NO(25) | 6)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_PWM_2 (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_CLKM0 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_USB_DRVVBUS (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_SPI5_C_MI (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_I2S9_BCK (MTK_PIN_NO(26) | 5)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_PWM_3 (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_CLKM1 (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_SPI5_C_MO (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_I2S9_LRCK (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_SPINOR_IO0 (MTK_PIN_NO(27) | 6)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_PWM_0 (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_CLKM2 (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SPI5_C_CSB (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_I2S9_MCK (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_SPINOR_IO1 (MTK_PIN_NO(28) | 6)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_PWM_1 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_CLKM3 (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_SPI5_C_CLK (MTK_PIN_NO(29) | 4)
+#define PINMUX_GPIO29__FUNC_I2S9_DO (MTK_PIN_NO(29) | 5)
+#define PINMUX_GPIO29__FUNC_SPINOR_IO2 (MTK_PIN_NO(29) | 6)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_PWM_2 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_CLKM0 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_I2S7_MCK (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_I2S9_MCK (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_SPINOR_IO3 (MTK_PIN_NO(30) | 6)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_I2S3_MCK (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_I2S1_MCK (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_I2S5_MCK (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_SRCLKENAI0 (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_I2S0_MCK (MTK_PIN_NO(31) | 5)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I2S3_BCK (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I2S1_BCK (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I2S5_BCK (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_PCM0_CLK (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_I2S0_BCK (MTK_PIN_NO(32) | 5)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_I2S3_LRCK (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_I2S1_LRCK (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_I2S5_LRCK (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_PCM0_SYNC (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_I2S0_LRCK (MTK_PIN_NO(33) | 5)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I2S0_DI (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I2S2_DI (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I2S2_DI2 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_PCM0_DI (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_I2S0_DI_A (MTK_PIN_NO(34) | 5)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_I2S3_DO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_I2S1_DO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_I2S5_DO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_PCM0_DO (MTK_PIN_NO(35) | 4)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_SPI5_A_CLK (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_DMIC1_CLK (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_MD_URXD0 (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_UCTS0 (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_URXD1 (MTK_PIN_NO(36) | 6)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_SPI5_A_CSB (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_DMIC1_DAT (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_MD_UTXD0 (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_URTS0 (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_UTXD1 (MTK_PIN_NO(37) | 6)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_SPI5_A_MI (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_DMIC_CLK (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_MD_URXD1 (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_URXD0 (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_UCTS1 (MTK_PIN_NO(38) | 6)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_SPI5_A_MO (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_DMIC_DAT (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_MD_UTXD1 (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_UTXD0 (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_URTS1 (MTK_PIN_NO(39) | 6)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_DISP_PWM (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A6 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_DSI_TE (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_DBG_MON_A7 (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_LCM_RST (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_DBG_MON_A8 (MTK_PIN_NO(42) | 7)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_SCL_6306 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_ADSP_URXD0 (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_PTA_RXD (MTK_PIN_NO(43) | 5)
+#define PINMUX_GPIO43__FUNC_SSPM_URXD_AO (MTK_PIN_NO(43) | 6)
+#define PINMUX_GPIO43__FUNC_DBG_MON_B0 (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(44) | 1)
+#define PINMUX_GPIO44__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(44) | 2)
+#define PINMUX_GPIO44__FUNC_SDA_6306 (MTK_PIN_NO(44) | 3)
+#define PINMUX_GPIO44__FUNC_ADSP_UTXD0 (MTK_PIN_NO(44) | 4)
+#define PINMUX_GPIO44__FUNC_PTA_TXD (MTK_PIN_NO(44) | 5)
+#define PINMUX_GPIO44__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(44) | 6)
+#define PINMUX_GPIO44__FUNC_DBG_MON_B1 (MTK_PIN_NO(44) | 7)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_MCUPM_JTAG_TDI (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_APU_JTAG_TDI (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(45) | 5)
+#define PINMUX_GPIO45__FUNC_LVTS_SCK (MTK_PIN_NO(45) | 6)
+#define PINMUX_GPIO45__FUNC_CONN_DSP_JDI (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_MCUPM_JTAG_TMS (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_APU_JTAG_TMS (MTK_PIN_NO(46) | 4)
+#define PINMUX_GPIO46__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(46) | 5)
+#define PINMUX_GPIO46__FUNC_LVTS_SDI (MTK_PIN_NO(46) | 6)
+#define PINMUX_GPIO46__FUNC_CONN_DSP_JMS (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_MCUPM_JTAG_TDO (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_APU_JTAG_TDO (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_LVTS_SCF (MTK_PIN_NO(47) | 6)
+#define PINMUX_GPIO47__FUNC_CONN_DSP_JDO (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_MCUPM_JTAG_TRSTN (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_APU_JTAG_TRST (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(48) | 5)
+#define PINMUX_GPIO48__FUNC_LVTS_FOUT (MTK_PIN_NO(48) | 6)
+#define PINMUX_GPIO48__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(48) | 7)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_MCUPM_JTAG_TCK (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_APU_JTAG_TCK (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(49) | 5)
+#define PINMUX_GPIO49__FUNC_LVTS_SDO (MTK_PIN_NO(49) | 6)
+#define PINMUX_GPIO49__FUNC_CONN_DSP_JCK (MTK_PIN_NO(49) | 7)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(50) | 2)
+#define PINMUX_GPIO50__FUNC_LVTS_26M (MTK_PIN_NO(50) | 6)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_MSDC1_CLK (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_PCM1_CLK (MTK_PIN_NO(51) | 2)
+#define PINMUX_GPIO51__FUNC_CONN_DSP_JCK (MTK_PIN_NO(51) | 3)
+#define PINMUX_GPIO51__FUNC_UDI_TCK (MTK_PIN_NO(51) | 4)
+#define PINMUX_GPIO51__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(51) | 6)
+#define PINMUX_GPIO51__FUNC_JTCK_SEL3 (MTK_PIN_NO(51) | 7)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_MSDC1_CMD (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_PCM1_SYNC (MTK_PIN_NO(52) | 2)
+#define PINMUX_GPIO52__FUNC_CONN_DSP_JMS (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_UDI_TMS (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_JTMS_SEL3 (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_MSDC1_DAT3 (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_PCM1_DI (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(53) | 4)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_MSDC1_DAT0 (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_PCM1_DO0 (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_CONN_DSP_JDI (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_UDI_TDI (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_JTDI_SEL3 (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_MSDC1_DAT2 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_PCM1_DO2 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_UDI_NTRST (MTK_PIN_NO(55) | 4)
+#define PINMUX_GPIO55__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(55) | 5)
+#define PINMUX_GPIO55__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(55) | 6)
+#define PINMUX_GPIO55__FUNC_JTRSTN_SEL3 (MTK_PIN_NO(55) | 7)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_MSDC1_DAT1 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_PCM1_DO1 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_CONN_DSP_JDO (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_UDI_TDO (MTK_PIN_NO(56) | 4)
+#define PINMUX_GPIO56__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(56) | 5)
+#define PINMUX_GPIO56__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(56) | 6)
+#define PINMUX_GPIO56__FUNC_JTDO_SEL3 (MTK_PIN_NO(56) | 7)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_MIPI2_D_SCLK (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_MIPI2_D_SDATA (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_MIPI_M_SCLK (MTK_PIN_NO(59) | 1)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_MIPI_M_SDATA (MTK_PIN_NO(60) | 1)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_MD_UCNT_A_TGL (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_DIGRF_IRQ (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_BPI_BUS0 (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_PCIE_WAKE_N (MTK_PIN_NO(63) | 3)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_BPI_BUS1 (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_PCIE_PERESET_N (MTK_PIN_NO(64) | 3)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_BPI_BUS2 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_PCIE_CLKREQ_N (MTK_PIN_NO(65) | 3)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_BPI_BUS3 (MTK_PIN_NO(66) | 1)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_BPI_BUS4 (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_BPI_BUS5 (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_BPI_BUS6 (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_CONN_BPI_BUS6 (MTK_PIN_NO(69) | 2)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_BPI_BUS7 (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_CONN_BPI_BUS7 (MTK_PIN_NO(70) | 2)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_BPI_BUS8 (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_CONN_BPI_BUS8 (MTK_PIN_NO(71) | 2)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS9 (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_CONN_BPI_BUS9 (MTK_PIN_NO(72) | 2)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS10 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_CONN_BPI_BUS10 (MTK_PIN_NO(73) | 2)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS11_OLAT0 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_CONN_BPI_BUS11_OLAT0 (MTK_PIN_NO(74) | 2)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS12_OLAT1 (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_CONN_BPI_BUS12_OLAT1 (MTK_PIN_NO(75) | 2)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_BPI_BUS13_OLAT2 (MTK_PIN_NO(76) | 1)
+#define PINMUX_GPIO76__FUNC_CONN_BPI_BUS13_OLAT2 (MTK_PIN_NO(76) | 2)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_BPI_BUS14_OLAT3 (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_CONN_BPI_BUS14_OLAT3 (MTK_PIN_NO(77) | 2)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_BUS15_OLAT4 (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_CONN_BPI_BUS15_OLAT4 (MTK_PIN_NO(78) | 2)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS16_OLAT5 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_CONN_BPI_BUS16_OLAT5 (MTK_PIN_NO(79) | 2)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS17_ANT0 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_CONN_BPI_BUS17_ANT0 (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_PCIE_WAKE_N (MTK_PIN_NO(80) | 3)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS18_ANT1 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_CONN_BPI_BUS18_ANT1 (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_PCIE_PERESET_N (MTK_PIN_NO(81) | 3)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS19_ANT2 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_CONN_BPI_BUS19_ANT2 (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_PCIE_CLKREQ_N (MTK_PIN_NO(82) | 3)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS20_ANT3 (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_CONN_BPI_BUS20_ANT3 (MTK_PIN_NO(83) | 2)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS21_ANT4 (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_CONN_BPI_BUS21_ANT4 (MTK_PIN_NO(84) | 2)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_MIPI1_D_SCLK (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_CONN_MIPI1_SCLK (MTK_PIN_NO(85) | 2)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_MIPI1_D_SDATA (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_CONN_MIPI1_SDATA (MTK_PIN_NO(86) | 2)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_MIPI0_D_SCLK (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_CONN_MIPI0_SCLK (MTK_PIN_NO(87) | 2)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_MIPI0_D_SDATA (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_CONN_MIPI0_SDATA (MTK_PIN_NO(88) | 2)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_SPMI_SCL (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_SCL10 (MTK_PIN_NO(89) | 2)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_SPMI_SDA (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_SDA10 (MTK_PIN_NO(90) | 2)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_AP_GOOD (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_URXD0 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_MD_URXD0 (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_MD_URXD1 (MTK_PIN_NO(92) | 3)
+#define PINMUX_GPIO92__FUNC_SSPM_URXD_AO (MTK_PIN_NO(92) | 4)
+#define PINMUX_GPIO92__FUNC_CONN_UART0_RXD (MTK_PIN_NO(92) | 5)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_UTXD0 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_MD_UTXD0 (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_MD_UTXD1 (MTK_PIN_NO(93) | 3)
+#define PINMUX_GPIO93__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(93) | 4)
+#define PINMUX_GPIO93__FUNC_CONN_UART0_TXD (MTK_PIN_NO(93) | 5)
+#define PINMUX_GPIO93__FUNC_WIFI_TXD (MTK_PIN_NO(93) | 6)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_URXD1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_ADSP_URXD0 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_MD32_0_RXD (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_SSPM_URXD_AO (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_TP_URXD1_AO (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_TP_URXD2_AO (MTK_PIN_NO(94) | 6)
+#define PINMUX_GPIO94__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_UTXD1 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_ADSP_UTXD0 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_MD32_0_TXD (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(95) | 4)
+#define PINMUX_GPIO95__FUNC_TP_UTXD1_AO (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_TP_UTXD2_AO (MTK_PIN_NO(95) | 6)
+#define PINMUX_GPIO95__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(95) | 7)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_TDM_LRCK (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_I2S7_LRCK (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_I2S9_LRCK (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_DPI_D0 (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_ADSP_JTAG0_TDI (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_IO_JTAG_TDI (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_TDM_BCK (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_I2S7_BCK (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_I2S9_BCK (MTK_PIN_NO(97) | 3)
+#define PINMUX_GPIO97__FUNC_DPI_D1 (MTK_PIN_NO(97) | 4)
+#define PINMUX_GPIO97__FUNC_ADSP_JTAG0_TRSTN (MTK_PIN_NO(97) | 5)
+#define PINMUX_GPIO97__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_TDM_MCK (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I2S7_MCK (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_I2S9_MCK (MTK_PIN_NO(98) | 3)
+#define PINMUX_GPIO98__FUNC_DPI_D2 (MTK_PIN_NO(98) | 4)
+#define PINMUX_GPIO98__FUNC_ADSP_JTAG0_TCK (MTK_PIN_NO(98) | 5)
+#define PINMUX_GPIO98__FUNC_IO_JTAG_TCK (MTK_PIN_NO(98) | 7)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_TDM_DATA0 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_I2S6_DI (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_I2S8_DI (MTK_PIN_NO(99) | 3)
+#define PINMUX_GPIO99__FUNC_DPI_D3 (MTK_PIN_NO(99) | 4)
+#define PINMUX_GPIO99__FUNC_ADSP_JTAG0_TDO (MTK_PIN_NO(99) | 5)
+#define PINMUX_GPIO99__FUNC_IO_JTAG_TDO (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_TDM_DATA1 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_I2S7_DO (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_I2S9_DO (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_DPI_D4 (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_ADSP_JTAG0_TMS (MTK_PIN_NO(100) | 5)
+#define PINMUX_GPIO100__FUNC_IO_JTAG_TMS (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_TDM_DATA2 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_DMIC1_CLK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SRCLKENAI0 (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_DPI_D5 (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_CLKM0 (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_DAP_MD32_SWD (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_TDM_DATA3 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_DMIC1_DAT (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_SRCLKENAI1 (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_DPI_D6 (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_SPI0_A_MI (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_SCP_SPI0_MI (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_DPI_D7 (MTK_PIN_NO(103) | 4)
+#define PINMUX_GPIO103__FUNC_DFD_TDO (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(103) | 6)
+#define PINMUX_GPIO103__FUNC_JTDO_SEL1 (MTK_PIN_NO(103) | 7)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_SPI0_A_CSB (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_SCP_SPI0_CS (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_DPI_D8 (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_DFD_TMS (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(104) | 6)
+#define PINMUX_GPIO104__FUNC_JTMS_SEL1 (MTK_PIN_NO(104) | 7)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_SPI0_A_MO (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_SCP_SPI0_MO (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_SCP_SDA0 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_DPI_D9 (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_DFD_TDI (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(105) | 6)
+#define PINMUX_GPIO105__FUNC_JTDI_SEL1 (MTK_PIN_NO(105) | 7)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_SPI0_A_CLK (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_SCP_SPI0_CK (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_SCP_SCL0 (MTK_PIN_NO(106) | 3)
+#define PINMUX_GPIO106__FUNC_DPI_D10 (MTK_PIN_NO(106) | 4)
+#define PINMUX_GPIO106__FUNC_DFD_TCK_XI (MTK_PIN_NO(106) | 5)
+#define PINMUX_GPIO106__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(106) | 6)
+#define PINMUX_GPIO106__FUNC_JTCK_SEL1 (MTK_PIN_NO(106) | 7)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DMIC_CLK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_PWM_0 (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_CLKM2 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_DMIC_DAT (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_PWM_1 (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_CLKM3 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_DAP_SONIC_SWD (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_I2S1_MCK (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_I2S3_MCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_I2S2_MCK (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_DPI_DE (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_I2S2_MCK_A (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_SRCLKENAI0 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_DAP_SONIC_SWCK (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_I2S1_BCK (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_I2S2_BCK (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_DPI_D11 (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_I2S2_BCK_A (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_CONN_MCU_TDO (MTK_PIN_NO(110) | 6)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_I2S1_LRCK (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_I2S3_LRCK (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_I2S2_LRCK (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_DPI_VSYNC (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_I2S2_LRCK_A (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_CONN_MCU_TDI (MTK_PIN_NO(111) | 6)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_I2S2_DI (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_I2S0_DI (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_I2S2_DI2 (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_DPI_CK (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_I2S2_DI_A (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_CONN_MCU_TMS (MTK_PIN_NO(112) | 6)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_I2S1_DO (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_I2S3_DO (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_I2S5_DO (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_DPI_HSYNC (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_I2S2_DI2 (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_CONN_MCU_TCK (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_SPI2_MI (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SCP_SPI2_MI (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_PCM0_DI (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_SPI2_CSB (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_SCP_SPI2_CS (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_PCM0_SYNC (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_SPI2_MO (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_SCP_SPI2_MO (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_SCP_SDA1 (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_PCM0_DO (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(116) | 6)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_SPI2_CLK (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_SCP_SPI2_CK (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_SCP_SCL1 (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_PCM0_CLK (MTK_PIN_NO(117) | 4)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_SCL1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_SCP_SCL0 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_SCP_SCL1 (MTK_PIN_NO(118) | 3)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_SDA1 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_SCP_SDA0 (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_SCP_SDA1 (MTK_PIN_NO(119) | 3)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_SCL9 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_SCP_SCL0 (MTK_PIN_NO(120) | 2)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_SDA9 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_SCP_SDA0 (MTK_PIN_NO(121) | 2)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_SCL8 (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_SCP_SDA0 (MTK_PIN_NO(122) | 2)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_SDA8 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_SCP_SCL0 (MTK_PIN_NO(123) | 2)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_SCL7 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_DMIC1_CLK (MTK_PIN_NO(124) | 2)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_SDA7 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_DMIC1_DAT (MTK_PIN_NO(125) | 2)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_CMFLASH0 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_PWM_2 (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_TP_UCTS1_AO (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_UCTS0 (MTK_PIN_NO(126) | 4)
+#define PINMUX_GPIO126__FUNC_SCL11 (MTK_PIN_NO(126) | 5)
+#define PINMUX_GPIO126__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_DBG_MON_A14 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_CMFLASH1 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_PWM_3 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_TP_URTS1_AO (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_URTS0 (MTK_PIN_NO(127) | 4)
+#define PINMUX_GPIO127__FUNC_SDA11 (MTK_PIN_NO(127) | 5)
+#define PINMUX_GPIO127__FUNC_DBG_MON_A15 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_CMFLASH2 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_PWM_0 (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_TP_UCTS2_AO (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UCTS1 (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_SCL_6306 (MTK_PIN_NO(128) | 5)
+#define PINMUX_GPIO128__FUNC_DBG_MON_A16 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_CMFLASH3 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_PWM_1 (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_TP_URTS2_AO (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_URTS1 (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SDA_6306 (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_DBG_MON_A17 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_CMVREF0 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_ANT_SEL10 (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_SCP_JTAG0_TDO (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_MD32_0_JTAG_TDO (MTK_PIN_NO(130) | 4)
+#define PINMUX_GPIO130__FUNC_SCL11 (MTK_PIN_NO(130) | 5)
+#define PINMUX_GPIO130__FUNC_SPI5_B_CLK (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A22 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_CMVREF1 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_ANT_SEL11 (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_SCP_JTAG0_TDI (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_MD32_0_JTAG_TDI (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SDA11 (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_SPI5_B_MO (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A25 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_CMVREF2 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_ANT_SEL12 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_SCP_JTAG0_TMS (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_MD32_0_JTAG_TMS (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A28 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_CMVREF3 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_SCP_JTAG0_TCK (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_MD32_0_JTAG_TCK (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_SPI5_B_CSB (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A23 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_CMVREF4 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_SCP_JTAG0_TRSTN (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_MD32_0_JTAG_TRST (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_DBG_MON_A26 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_PWM_0 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_SRCLKENAI1 (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_MD_URXD0 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_MD32_0_RXD (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_DBG_MON_A29 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_CMMCLK3 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_CLKM1 (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_MD_UTXD0 (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_MD32_0_TXD (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_SPI5_B_MI (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_DBG_MON_A24 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_CMMCLK4 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_CLKM2 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_MD_URXD1 (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_CONN_UART0_RXD (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_DBG_MON_A27 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_CMMCLK5 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_CLKM3 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_MD_UTXD1 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_CONN_UART0_TXD (MTK_PIN_NO(138) | 6)
+#define PINMUX_GPIO138__FUNC_DBG_MON_A30 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_SCL4 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_DBG_MON_A21 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_SDA4 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_DBG_MON_A20 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_SCL2 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_DBG_MON_A18 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SDA2 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_DBG_MON_A19 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_CMVREF0 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_SPI3_CLK (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_ADSP_JTAG1_TDO (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_SCP_JTAG1_TDO (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_DBG_MON_A31 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_CMVREF1 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_SPI3_CSB (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_ADSP_JTAG1_TDI (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_SCP_JTAG1_TDI (MTK_PIN_NO(144) | 4)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_CMVREF2 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_SPI3_MI (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_ADSP_JTAG1_TMS (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_SCP_JTAG1_TMS (MTK_PIN_NO(145) | 4)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_CMVREF3 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_SPI3_MO (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_ADSP_JTAG1_TCK (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_SCP_JTAG1_TCK (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_DBG_MON_A32 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_CMVREF4 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_ADSP_JTAG1_TRSTN (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_SCP_JTAG1_TRSTN (MTK_PIN_NO(147) | 4)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_PWM_1 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_AGPS_SYNC (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_CMMCLK5 (MTK_PIN_NO(148) | 3)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_CMMCLK0 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_CLKM0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_MD32_0_GPIO0 (MTK_PIN_NO(149) | 3)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_CMMCLK1 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_CLKM1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_MD32_0_GPIO1 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_CMMCLK2 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CLKM2 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_MD32_0_GPIO2 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_KPROW1 (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_PWM_2 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_IDDIG (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(152) | 6)
+#define PINMUX_GPIO152__FUNC_DBG_MON_B9 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_KPROW0 (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B8 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_KPCOL0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_DBG_MON_B6 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_KPCOL1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_PWM_3 (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_USB_DRVVBUS (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(155) | 4)
+#define PINMUX_GPIO155__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_B7 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_SPI1_A_CLK (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_MRG_CLK (MTK_PIN_NO(156) | 3)
+#define PINMUX_GPIO156__FUNC_AGPS_SYNC (MTK_PIN_NO(156) | 4)
+#define PINMUX_GPIO156__FUNC_MD_URXD0 (MTK_PIN_NO(156) | 5)
+#define PINMUX_GPIO156__FUNC_UDI_TMS (MTK_PIN_NO(156) | 6)
+#define PINMUX_GPIO156__FUNC_DBG_MON_B10 (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_SPI1_A_CSB (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_MRG_SYNC (MTK_PIN_NO(157) | 3)
+#define PINMUX_GPIO157__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(157) | 4)
+#define PINMUX_GPIO157__FUNC_MD_UTXD0 (MTK_PIN_NO(157) | 5)
+#define PINMUX_GPIO157__FUNC_UDI_TCK (MTK_PIN_NO(157) | 6)
+#define PINMUX_GPIO157__FUNC_DBG_MON_B11 (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_SPI1_A_MI (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(158) | 2)
+#define PINMUX_GPIO158__FUNC_MRG_DI (MTK_PIN_NO(158) | 3)
+#define PINMUX_GPIO158__FUNC_PTA_RXD (MTK_PIN_NO(158) | 4)
+#define PINMUX_GPIO158__FUNC_MD_URXD1 (MTK_PIN_NO(158) | 5)
+#define PINMUX_GPIO158__FUNC_UDI_TDO (MTK_PIN_NO(158) | 6)
+#define PINMUX_GPIO158__FUNC_DBG_MON_B12 (MTK_PIN_NO(158) | 7)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_SPI1_A_MO (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(159) | 2)
+#define PINMUX_GPIO159__FUNC_MRG_DO (MTK_PIN_NO(159) | 3)
+#define PINMUX_GPIO159__FUNC_PTA_TXD (MTK_PIN_NO(159) | 4)
+#define PINMUX_GPIO159__FUNC_MD_UTXD1 (MTK_PIN_NO(159) | 5)
+#define PINMUX_GPIO159__FUNC_UDI_NTRST (MTK_PIN_NO(159) | 6)
+#define PINMUX_GPIO159__FUNC_DBG_MON_B13 (MTK_PIN_NO(159) | 7)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_SCL3 (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_SCP_SCL1 (MTK_PIN_NO(160) | 3)
+#define PINMUX_GPIO160__FUNC_DBG_MON_B14 (MTK_PIN_NO(160) | 7)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SDA3 (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_SCP_SDA1 (MTK_PIN_NO(161) | 3)
+#define PINMUX_GPIO161__FUNC_DBG_MON_B15 (MTK_PIN_NO(161) | 7)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_ANT_SEL0 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(162) | 2)
+#define PINMUX_GPIO162__FUNC_UDI_TDI (MTK_PIN_NO(162) | 6)
+#define PINMUX_GPIO162__FUNC_DBG_MON_B16 (MTK_PIN_NO(162) | 7)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_ANT_SEL1 (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_DBG_MON_B17 (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_ANT_SEL2 (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_TP_URXD1_AO (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_UCTS0 (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_DBG_MON_B18 (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_ANT_SEL3 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_TP_UTXD1_AO (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(165) | 4)
+#define PINMUX_GPIO165__FUNC_URTS0 (MTK_PIN_NO(165) | 5)
+#define PINMUX_GPIO165__FUNC_DBG_MON_B19 (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_ANT_SEL4 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_TP_URXD2_AO (MTK_PIN_NO(166) | 3)
+#define PINMUX_GPIO166__FUNC_SRCLKENAI1 (MTK_PIN_NO(166) | 4)
+#define PINMUX_GPIO166__FUNC_UCTS1 (MTK_PIN_NO(166) | 5)
+#define PINMUX_GPIO166__FUNC_DBG_MON_B20 (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_ANT_SEL5 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_TP_UTXD2_AO (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_SRCLKENAI0 (MTK_PIN_NO(167) | 4)
+#define PINMUX_GPIO167__FUNC_URTS1 (MTK_PIN_NO(167) | 5)
+#define PINMUX_GPIO167__FUNC_DBG_MON_B21 (MTK_PIN_NO(167) | 7)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_ANT_SEL6 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_SPI0_B_CLK (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_TP_UCTS1_AO (MTK_PIN_NO(168) | 3)
+#define PINMUX_GPIO168__FUNC_KPCOL2 (MTK_PIN_NO(168) | 4)
+#define PINMUX_GPIO168__FUNC_MD_UCTS0 (MTK_PIN_NO(168) | 5)
+#define PINMUX_GPIO168__FUNC_SCL11 (MTK_PIN_NO(168) | 6)
+#define PINMUX_GPIO168__FUNC_DBG_MON_B22 (MTK_PIN_NO(168) | 7)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_ANT_SEL7 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_SPI0_B_CSB (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_TP_URTS1_AO (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_KPROW2 (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_MD_URTS0 (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_SDA11 (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_DBG_MON_B23 (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_ANT_SEL8 (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_SPI0_B_MI (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_TP_UCTS2_AO (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_SRCLKENAI1 (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_MD_UCTS1 (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_DBG_MON_B24 (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_ANT_SEL9 (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_SPI0_B_MO (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_TP_URTS2_AO (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_SRCLKENAI0 (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_MD_URTS1 (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_DBG_MON_B25 (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_CONN_TOP_CLK (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_AUXIF_CLK0 (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_DBG_MON_B29 (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_CONN_TOP_DATA (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_AUXIF_ST0 (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_DBG_MON_B30 (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_CONN_HRST_B (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_DBG_MON_B28 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_CONN_WB_PTA (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_DBG_MON_B31 (MTK_PIN_NO(175) | 7)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_CONN_BT_CLK (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_AUXIF_CLK1 (MTK_PIN_NO(176) | 2)
+#define PINMUX_GPIO176__FUNC_DBG_MON_B26 (MTK_PIN_NO(176) | 7)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_CONN_BT_DATA (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_AUXIF_ST1 (MTK_PIN_NO(177) | 2)
+#define PINMUX_GPIO177__FUNC_DBG_MON_B27 (MTK_PIN_NO(177) | 7)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(178) | 1)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(179) | 1)
+#define PINMUX_GPIO179__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(179) | 2)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(180) | 2)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_CONN_WF_CTRL3 (MTK_PIN_NO(181) | 1)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_CONN_WF_CTRL4 (MTK_PIN_NO(182) | 1)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_MSDC0_CMD (MTK_PIN_NO(183) | 1)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_MSDC0_DAT0 (MTK_PIN_NO(184) | 1)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_MSDC0_DAT2 (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_MSDC0_DAT4 (MTK_PIN_NO(186) | 1)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_MSDC0_DAT6 (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_MSDC0_DAT1 (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_MSDC0_DAT5 (MTK_PIN_NO(189) | 1)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_MSDC0_DAT7 (MTK_PIN_NO(190) | 1)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_MSDC0_DSL (MTK_PIN_NO(191) | 1)
+#define PINMUX_GPIO191__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(191) | 2)
+#define PINMUX_GPIO191__FUNC_IDDIG (MTK_PIN_NO(191) | 3)
+#define PINMUX_GPIO191__FUNC_DMIC_CLK (MTK_PIN_NO(191) | 4)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_MSDC0_CLK (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_USB_DRVVBUS (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_DMIC_DAT (MTK_PIN_NO(192) | 4)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_MSDC0_DAT3 (MTK_PIN_NO(193) | 1)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_MSDC0_RSTB (MTK_PIN_NO(194) | 1)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(195) | 1)
+#define PINMUX_GPIO195__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(195) | 2)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_AUD_DAT_MOSI2 (MTK_PIN_NO(196) | 1)
+
+#define PINMUX_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define PINMUX_GPIO197__FUNC_AUD_NLE_MOSI1 (MTK_PIN_NO(197) | 1)
+#define PINMUX_GPIO197__FUNC_AUD_CLK_MISO (MTK_PIN_NO(197) | 2)
+#define PINMUX_GPIO197__FUNC_I2S2_MCK (MTK_PIN_NO(197) | 3)
+#define PINMUX_GPIO197__FUNC_I2S6_MCK (MTK_PIN_NO(197) | 4)
+#define PINMUX_GPIO197__FUNC_I2S8_MCK (MTK_PIN_NO(197) | 5)
+
+#define PINMUX_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define PINMUX_GPIO198__FUNC_AUD_NLE_MOSI0 (MTK_PIN_NO(198) | 1)
+#define PINMUX_GPIO198__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(198) | 2)
+#define PINMUX_GPIO198__FUNC_I2S2_BCK (MTK_PIN_NO(198) | 3)
+#define PINMUX_GPIO198__FUNC_I2S6_BCK (MTK_PIN_NO(198) | 4)
+#define PINMUX_GPIO198__FUNC_I2S8_BCK (MTK_PIN_NO(198) | 5)
+
+#define PINMUX_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define PINMUX_GPIO199__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(199) | 1)
+#define PINMUX_GPIO199__FUNC_I2S2_DI2 (MTK_PIN_NO(199) | 3)
+
+#define PINMUX_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define PINMUX_GPIO200__FUNC_SCL6 (MTK_PIN_NO(200) | 1)
+#define PINMUX_GPIO200__FUNC_SCP_SCL1 (MTK_PIN_NO(200) | 3)
+#define PINMUX_GPIO200__FUNC_SCL_6306 (MTK_PIN_NO(200) | 4)
+#define PINMUX_GPIO200__FUNC_DBG_MON_A4 (MTK_PIN_NO(200) | 7)
+
+#define PINMUX_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define PINMUX_GPIO201__FUNC_SDA6 (MTK_PIN_NO(201) | 1)
+#define PINMUX_GPIO201__FUNC_SCP_SDA1 (MTK_PIN_NO(201) | 3)
+#define PINMUX_GPIO201__FUNC_SDA_6306 (MTK_PIN_NO(201) | 4)
+#define PINMUX_GPIO201__FUNC_DBG_MON_A5 (MTK_PIN_NO(201) | 7)
+
+#define PINMUX_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define PINMUX_GPIO202__FUNC_SCL5 (MTK_PIN_NO(202) | 1)
+
+#define PINMUX_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+#define PINMUX_GPIO203__FUNC_SDA5 (MTK_PIN_NO(203) | 1)
+
+#define PINMUX_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+#define PINMUX_GPIO204__FUNC_SCL0 (MTK_PIN_NO(204) | 1)
+#define PINMUX_GPIO204__FUNC_SPI7_A_CLK (MTK_PIN_NO(204) | 6)
+#define PINMUX_GPIO204__FUNC_DBG_MON_A2 (MTK_PIN_NO(204) | 7)
+
+#define PINMUX_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+#define PINMUX_GPIO205__FUNC_SDA0 (MTK_PIN_NO(205) | 1)
+#define PINMUX_GPIO205__FUNC_SPI7_A_CSB (MTK_PIN_NO(205) | 6)
+#define PINMUX_GPIO205__FUNC_DBG_MON_A3 (MTK_PIN_NO(205) | 7)
+
+#define PINMUX_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+#define PINMUX_GPIO206__FUNC_SRCLKENA0 (MTK_PIN_NO(206) | 1)
+
+#define PINMUX_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+#define PINMUX_GPIO207__FUNC_SRCLKENA1 (MTK_PIN_NO(207) | 1)
+
+#define PINMUX_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+#define PINMUX_GPIO208__FUNC_WATCHDOG (MTK_PIN_NO(208) | 1)
+
+#define PINMUX_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+#define PINMUX_GPIO209__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(209) | 1)
+#define PINMUX_GPIO209__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(209) | 2)
+
+#define PINMUX_GPIO210__FUNC_GPIO210 (MTK_PIN_NO(210) | 0)
+#define PINMUX_GPIO210__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(210) | 1)
+
+#define PINMUX_GPIO211__FUNC_GPIO211 (MTK_PIN_NO(211) | 0)
+#define PINMUX_GPIO211__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(211) | 1)
+#define PINMUX_GPIO211__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(211) | 2)
+
+#define PINMUX_GPIO212__FUNC_GPIO212 (MTK_PIN_NO(212) | 0)
+#define PINMUX_GPIO212__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(212) | 1)
+
+#define PINMUX_GPIO213__FUNC_GPIO213 (MTK_PIN_NO(213) | 0)
+#define PINMUX_GPIO213__FUNC_RTC32K_CK (MTK_PIN_NO(213) | 1)
+
+#define PINMUX_GPIO214__FUNC_GPIO214 (MTK_PIN_NO(214) | 0)
+#define PINMUX_GPIO214__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(214) | 1)
+#define PINMUX_GPIO214__FUNC_I2S1_MCK (MTK_PIN_NO(214) | 3)
+#define PINMUX_GPIO214__FUNC_I2S7_MCK (MTK_PIN_NO(214) | 4)
+#define PINMUX_GPIO214__FUNC_I2S9_MCK (MTK_PIN_NO(214) | 5)
+
+#define PINMUX_GPIO215__FUNC_GPIO215 (MTK_PIN_NO(215) | 0)
+#define PINMUX_GPIO215__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(215) | 1)
+#define PINMUX_GPIO215__FUNC_I2S1_BCK (MTK_PIN_NO(215) | 3)
+#define PINMUX_GPIO215__FUNC_I2S7_BCK (MTK_PIN_NO(215) | 4)
+#define PINMUX_GPIO215__FUNC_I2S9_BCK (MTK_PIN_NO(215) | 5)
+
+#define PINMUX_GPIO216__FUNC_GPIO216 (MTK_PIN_NO(216) | 0)
+#define PINMUX_GPIO216__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(216) | 1)
+#define PINMUX_GPIO216__FUNC_I2S1_LRCK (MTK_PIN_NO(216) | 3)
+#define PINMUX_GPIO216__FUNC_I2S7_LRCK (MTK_PIN_NO(216) | 4)
+#define PINMUX_GPIO216__FUNC_I2S9_LRCK (MTK_PIN_NO(216) | 5)
+
+#define PINMUX_GPIO217__FUNC_GPIO217 (MTK_PIN_NO(217) | 0)
+#define PINMUX_GPIO217__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(217) | 1)
+#define PINMUX_GPIO217__FUNC_I2S1_DO (MTK_PIN_NO(217) | 3)
+#define PINMUX_GPIO217__FUNC_I2S7_DO (MTK_PIN_NO(217) | 4)
+#define PINMUX_GPIO217__FUNC_I2S9_DO (MTK_PIN_NO(217) | 5)
+
+#define PINMUX_GPIO218__FUNC_GPIO218 (MTK_PIN_NO(218) | 0)
+#define PINMUX_GPIO218__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(218) | 1)
+#define PINMUX_GPIO218__FUNC_VOW_DAT_MISO (MTK_PIN_NO(218) | 2)
+#define PINMUX_GPIO218__FUNC_I2S2_LRCK (MTK_PIN_NO(218) | 3)
+#define PINMUX_GPIO218__FUNC_I2S6_LRCK (MTK_PIN_NO(218) | 4)
+#define PINMUX_GPIO218__FUNC_I2S8_LRCK (MTK_PIN_NO(218) | 5)
+
+#define PINMUX_GPIO219__FUNC_GPIO219 (MTK_PIN_NO(219) | 0)
+#define PINMUX_GPIO219__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(219) | 1)
+#define PINMUX_GPIO219__FUNC_VOW_CLK_MISO (MTK_PIN_NO(219) | 2)
+#define PINMUX_GPIO219__FUNC_I2S2_DI (MTK_PIN_NO(219) | 3)
+#define PINMUX_GPIO219__FUNC_I2S6_DI (MTK_PIN_NO(219) | 4)
+#define PINMUX_GPIO219__FUNC_I2S8_DI (MTK_PIN_NO(219) | 5)
+
+#endif /* __MT8192_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8195-pinfunc.h b/include/dt-bindings/pinctrl/mt8195-pinfunc.h
new file mode 100644
index 000000000000..666331bb9b40
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8195-pinfunc.h
@@ -0,0 +1,962 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ */
+
+#ifndef __MT8195_PINFUNC_H
+#define __MT8195_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_MSDC2_CMD (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TDMIN_MCK (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_CLKM0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_PERSTN_1 (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_IDDIG_1P (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_DMIC4_CLK (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_MSDC2_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_TDMIN_DI (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_CLKM1 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_CLKREQN_1 (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_USB_DRVVBUS_1P (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_DMIC4_DAT (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_MSDC2_DAT3 (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_TDMIN_LRCK (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_CLKM2 (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_WAKEN_1 (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_DMIC2_CLK (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_MSDC2_DAT0 (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_TDMIN_BCK (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_CLKM3 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_DMIC2_DAT (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_MSDC2_DAT2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SPDIF_IN1 (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_UTXD3 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_SDA2 (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_IDDIG_2P (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_MSDC2_DAT1 (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SPDIF_IN0 (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_URXD3 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_SCL2 (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_USB_DRVVBUS_2P (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_DP_TX_HPD (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_I2SO1_D4 (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_UTXD4 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_CMVREF3 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_DMIC3_CLK (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_EDP_TX_HPD (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_I2SO1_D5 (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_URXD4 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_CMVREF4 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_DMIC3_DAT (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SDA0 (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_PWM_0 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SPDIF_OUT (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_LVTS_FOUT (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_DBG_MON_A0 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_SCL0 (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_PWM_1 (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_IR_IN (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_LVTS_SDO (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_DBG_MON_A1 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_SDA1 (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_PWM_2 (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_ADSP_URXD0 (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_SPDIF_IN1 (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_LVTS_SCF (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_DBG_MON_A2 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_SCL1 (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_PWM_3 (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_ADSP_UTXD0 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_SPDIF_IN0 (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_LVTS_SCK (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_DBG_MON_A3 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_SDA2 (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_DMIC3_DAT_R (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2SO1_D6 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_LVTS_SDI (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_DBG_MON_A4 (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_SCL2 (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_DMIC4_DAT_R (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2SO1_D7 (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_DBG_MON_A5 (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_SDA3 (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_DMIC3_DAT (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_TDMIN_MCK (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_DBG_MON_A6 (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_SCL3 (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_DMIC3_CLK (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_TDMIN_DI (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_DBG_MON_A7 (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_SDA4 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_DMIC4_DAT (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_TDMIN_LRCK (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_DBG_MON_A8 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SCL4 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_DMIC4_CLK (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_TDMIN_BCK (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A9 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_DP_TX_HPD (MTK_PIN_NO(18) | 1)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_WAKEN (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SCP_SDA1 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_MD32_0_JTAG_TCK (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_ADSP_JTAG0_TCK (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_SDA6 (MTK_PIN_NO(19) | 5)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_PERSTN (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SCP_SCL1 (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_MD32_0_JTAG_TMS (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_ADSP_JTAG0_TMS (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_SCL6 (MTK_PIN_NO(20) | 5)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_CLKREQN (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_MD32_0_JTAG_TDI (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_ADSP_JTAG0_TDI (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_SCP_SDA1 (MTK_PIN_NO(21) | 5)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_CMMCLK0 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_PERSTN_1 (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_SCP_SCL1 (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_MD32_0_GPIO0 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_CMMCLK1 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_CLKREQN_1 (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_SDA4 (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_DMIC1_CLK (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_SCP_SDA0 (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_MD32_0_GPIO1 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_CMMCLK2 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_WAKEN_1 (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_SCL4 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_DMIC1_DAT (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_SCP_SCL0 (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_LVTS_26M (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_MD32_0_GPIO2 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_CMMRST (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_CMMCLK3 (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SPDIF_OUT (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_SDA6 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_ADSP_JTAG0_TRSTN (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_MD32_0_JTAG_TRST (MTK_PIN_NO(25) | 6)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_CMMPDN (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_CMMCLK4 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_IR_IN (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_SCL6 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_ADSP_JTAG0_TDO (MTK_PIN_NO(26) | 5)
+#define PINMUX_GPIO26__FUNC_MD32_0_JTAG_TDO (MTK_PIN_NO(26) | 6)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_HDMIRX20_HTPLG (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_CMFLASH0 (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_MD32_0_TXD (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_TP_UTXD2_AO (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_SCL7 (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_UCTS2 (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_DBG_MON_A18 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_HDMIRX20_PWR5V (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_CMFLASH1 (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_MD32_0_RXD (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_TP_URXD2_AO (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_SDA7 (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_URTS2 (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_DBG_MON_A19 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_HDMIRX20_SCL (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_CMFLASH2 (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_SCL5 (MTK_PIN_NO(29) | 3)
+#define PINMUX_GPIO29__FUNC_TP_URTS2_AO (MTK_PIN_NO(29) | 4)
+#define PINMUX_GPIO29__FUNC_UTXD2 (MTK_PIN_NO(29) | 6)
+#define PINMUX_GPIO29__FUNC_DBG_MON_A20 (MTK_PIN_NO(29) | 7)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_HDMIRX20_SDA (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_CMFLASH3 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_SDA5 (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_TP_UCTS2_AO (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_URXD2 (MTK_PIN_NO(30) | 6)
+#define PINMUX_GPIO30__FUNC_DBG_MON_A21 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_HDMITX20_PWR5V (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_DMIC1_DAT_R (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_PERSTN (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_DBG_MON_A22 (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_HDMITX20_HTPLG (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_CLKREQN (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_DBG_MON_A23 (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_HDMITX20_CEC (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_CMVREF0 (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_WAKEN (MTK_PIN_NO(33) | 3)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_HDMITX20_SCL (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_CMVREF1 (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_SCL7 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_SCL6 (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_DBG_MON_A24 (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_HDMITX20_SDA (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_CMVREF2 (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_SDA7 (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_SDA6 (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_DBG_MON_A25 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_RTC32K_CK (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A27 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_WATCHDOG (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A28 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_SRCLKENA0 (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A29 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_SRCLKENA1 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_DMIC2_DAT_R (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A30 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_SPIM3_CSB (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A31 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_SPIM3_CLK (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_DBG_MON_A32 (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(42) | 2)
+#define PINMUX_GPIO42__FUNC_SPIM3_MO (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_DBG_MON_B0 (MTK_PIN_NO(42) | 7)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_SPIM3_MI (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_DBG_MON_B1 (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_SPMI_M_SCL (MTK_PIN_NO(44) | 1)
+#define PINMUX_GPIO44__FUNC_I2SI00_DATA1 (MTK_PIN_NO(44) | 2)
+#define PINMUX_GPIO44__FUNC_SCL5 (MTK_PIN_NO(44) | 3)
+#define PINMUX_GPIO44__FUNC_UTXD5 (MTK_PIN_NO(44) | 4)
+#define PINMUX_GPIO44__FUNC_DBG_MON_B2 (MTK_PIN_NO(44) | 7)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SPMI_M_SDA (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_I2SI00_DATA2 (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_SDA5 (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_URXD5 (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_DBG_MON_B3 (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_I2SIN_MCK (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_I2SI00_DATA3 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_SPLIN_MCK (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_DBG_MON_B4 (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_I2SIN_BCK (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_I2SIN0_BCK (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_SPLIN_LRCK (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_DBG_MON_B5 (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_I2SIN_WS (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_I2SIN0_LRCK (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_SPLIN_BCK (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_DBG_MON_B6 (MTK_PIN_NO(48) | 7)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_I2SIN_D0 (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_I2SI00_DATA0 (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_SPLIN_D0 (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_DBG_MON_B7 (MTK_PIN_NO(49) | 7)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_I2SO1_MCK (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_I2SI5_D0 (MTK_PIN_NO(50) | 2)
+#define PINMUX_GPIO50__FUNC_I2SO4_MCK (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_DBG_MON_B8 (MTK_PIN_NO(50) | 7)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_I2SO1_BCK (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_I2SI5_BCK (MTK_PIN_NO(51) | 2)
+#define PINMUX_GPIO51__FUNC_DBG_MON_B9 (MTK_PIN_NO(51) | 7)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_I2SO1_WS (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_I2SI5_WS (MTK_PIN_NO(52) | 2)
+#define PINMUX_GPIO52__FUNC_DBG_MON_B10 (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_I2SO1_D0 (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_I2SI5_MCK (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_DBG_MON_B11 (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_I2SO1_D1 (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_I2SI01_DATA1 (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_SPLIN_D1 (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_I2SO4_BCK (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_DBG_MON_B12 (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_I2SO1_D2 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_I2SI01_DATA2 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_SPLIN_D2 (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_I2SO4_WS (MTK_PIN_NO(55) | 4)
+#define PINMUX_GPIO55__FUNC_DBG_MON_B13 (MTK_PIN_NO(55) | 7)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_I2SO1_D3 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_I2SI01_DATA3 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_SPLIN_D3 (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_I2SO4_D0 (MTK_PIN_NO(56) | 4)
+#define PINMUX_GPIO56__FUNC_DBG_MON_B14 (MTK_PIN_NO(56) | 7)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_I2SO2_MCK (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_I2SO1_D12 (MTK_PIN_NO(57) | 2)
+#define PINMUX_GPIO57__FUNC_LCM1_RST (MTK_PIN_NO(57) | 3)
+#define PINMUX_GPIO57__FUNC_DBG_MON_B15 (MTK_PIN_NO(57) | 7)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_I2SO2_BCK (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_I2SO1_D13 (MTK_PIN_NO(58) | 2)
+#define PINMUX_GPIO58__FUNC_I2SIN1_BCK (MTK_PIN_NO(58) | 3)
+#define PINMUX_GPIO58__FUNC_DBG_MON_B16 (MTK_PIN_NO(58) | 7)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_I2SO2_WS (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_I2SO1_D14 (MTK_PIN_NO(59) | 2)
+#define PINMUX_GPIO59__FUNC_I2SIN1_LRCK (MTK_PIN_NO(59) | 3)
+#define PINMUX_GPIO59__FUNC_DBG_MON_B17 (MTK_PIN_NO(59) | 7)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_I2SO2_D0 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_I2SO1_D15 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_I2SI01_DATA0 (MTK_PIN_NO(60) | 3)
+#define PINMUX_GPIO60__FUNC_DBG_MON_B18 (MTK_PIN_NO(60) | 7)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_DMIC1_CLK (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_I2SO2_BCK (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_SCP_SPI2_CK (MTK_PIN_NO(61) | 3)
+#define PINMUX_GPIO61__FUNC_DBG_MON_B19 (MTK_PIN_NO(61) | 7)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_DMIC1_DAT (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_I2SO2_WS (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_SCP_SPI2_MI (MTK_PIN_NO(62) | 3)
+#define PINMUX_GPIO62__FUNC_DBG_MON_B20 (MTK_PIN_NO(62) | 7)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_DMIC2_CLK (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_VBUSVALID (MTK_PIN_NO(63) | 2)
+#define PINMUX_GPIO63__FUNC_SCP_SPI2_MO (MTK_PIN_NO(63) | 3)
+#define PINMUX_GPIO63__FUNC_SCP_SCL2 (MTK_PIN_NO(63) | 4)
+#define PINMUX_GPIO63__FUNC_SCP_JTAG1_TDO (MTK_PIN_NO(63) | 5)
+#define PINMUX_GPIO63__FUNC_JTDO_SEL1 (MTK_PIN_NO(63) | 6)
+#define PINMUX_GPIO63__FUNC_DBG_MON_B21 (MTK_PIN_NO(63) | 7)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_DMIC2_DAT (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_VBUSVALID_1P (MTK_PIN_NO(64) | 2)
+#define PINMUX_GPIO64__FUNC_SCP_SPI2_CS (MTK_PIN_NO(64) | 3)
+#define PINMUX_GPIO64__FUNC_SCP_SDA2 (MTK_PIN_NO(64) | 4)
+#define PINMUX_GPIO64__FUNC_DBG_MON_B22 (MTK_PIN_NO(64) | 7)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_PCM_DO (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_AUXIF_ST0 (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_UCTS2 (MTK_PIN_NO(65) | 3)
+#define PINMUX_GPIO65__FUNC_SCP_JTAG1_TMS (MTK_PIN_NO(65) | 5)
+#define PINMUX_GPIO65__FUNC_JTMS_SEL1 (MTK_PIN_NO(65) | 6)
+#define PINMUX_GPIO65__FUNC_DBG_MON_B23 (MTK_PIN_NO(65) | 7)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_PCM_CLK (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_AUXIF_CLK0 (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_URTS2 (MTK_PIN_NO(66) | 3)
+#define PINMUX_GPIO66__FUNC_SCP_JTAG1_TCK (MTK_PIN_NO(66) | 5)
+#define PINMUX_GPIO66__FUNC_JTCK_SEL1 (MTK_PIN_NO(66) | 6)
+#define PINMUX_GPIO66__FUNC_DBG_MON_B24 (MTK_PIN_NO(66) | 7)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_PCM_DI (MTK_PIN_NO(67) | 1)
+#define PINMUX_GPIO67__FUNC_AUXIF_ST1 (MTK_PIN_NO(67) | 2)
+#define PINMUX_GPIO67__FUNC_UTXD2 (MTK_PIN_NO(67) | 3)
+#define PINMUX_GPIO67__FUNC_SCP_JTAG1_TRSTN (MTK_PIN_NO(67) | 5)
+#define PINMUX_GPIO67__FUNC_JTRSTn_SEL1 (MTK_PIN_NO(67) | 6)
+#define PINMUX_GPIO67__FUNC_DBG_MON_B25 (MTK_PIN_NO(67) | 7)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_PCM_SYNC (MTK_PIN_NO(68) | 1)
+#define PINMUX_GPIO68__FUNC_AUXIF_CLK1 (MTK_PIN_NO(68) | 2)
+#define PINMUX_GPIO68__FUNC_URXD2 (MTK_PIN_NO(68) | 3)
+#define PINMUX_GPIO68__FUNC_SCP_JTAG1_TDI (MTK_PIN_NO(68) | 5)
+#define PINMUX_GPIO68__FUNC_JTDI_SEL1 (MTK_PIN_NO(68) | 6)
+#define PINMUX_GPIO68__FUNC_DBG_MON_B26 (MTK_PIN_NO(68) | 7)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_I2SIN2_BCK (MTK_PIN_NO(69) | 2)
+#define PINMUX_GPIO69__FUNC_PWM_0 (MTK_PIN_NO(69) | 3)
+#define PINMUX_GPIO69__FUNC_WAKEN (MTK_PIN_NO(69) | 4)
+#define PINMUX_GPIO69__FUNC_DBG_MON_B27 (MTK_PIN_NO(69) | 7)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_I2SIN2_LRCK (MTK_PIN_NO(70) | 2)
+#define PINMUX_GPIO70__FUNC_PWM_1 (MTK_PIN_NO(70) | 3)
+#define PINMUX_GPIO70__FUNC_PERSTN (MTK_PIN_NO(70) | 4)
+#define PINMUX_GPIO70__FUNC_DBG_MON_B28 (MTK_PIN_NO(70) | 7)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_IDDIG_2P (MTK_PIN_NO(71) | 2)
+#define PINMUX_GPIO71__FUNC_PWM_2 (MTK_PIN_NO(71) | 3)
+#define PINMUX_GPIO71__FUNC_CLKREQN (MTK_PIN_NO(71) | 4)
+#define PINMUX_GPIO71__FUNC_DBG_MON_B29 (MTK_PIN_NO(71) | 7)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_USB_DRVVBUS_2P (MTK_PIN_NO(72) | 2)
+#define PINMUX_GPIO72__FUNC_PWM_3 (MTK_PIN_NO(72) | 3)
+#define PINMUX_GPIO72__FUNC_PERSTN_1 (MTK_PIN_NO(72) | 4)
+#define PINMUX_GPIO72__FUNC_DBG_MON_B30 (MTK_PIN_NO(72) | 7)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_I2SI02_DATA0 (MTK_PIN_NO(73) | 2)
+#define PINMUX_GPIO73__FUNC_CLKREQN_1 (MTK_PIN_NO(73) | 4)
+#define PINMUX_GPIO73__FUNC_VOW_DAT_MISO (MTK_PIN_NO(73) | 5)
+#define PINMUX_GPIO73__FUNC_DBG_MON_B31 (MTK_PIN_NO(73) | 7)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_I2SI02_DATA1 (MTK_PIN_NO(74) | 2)
+#define PINMUX_GPIO74__FUNC_WAKEN_1 (MTK_PIN_NO(74) | 4)
+#define PINMUX_GPIO74__FUNC_VOW_CLK_MISO (MTK_PIN_NO(74) | 5)
+#define PINMUX_GPIO74__FUNC_DBG_MON_B32 (MTK_PIN_NO(74) | 7)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_I2SI02_DATA2 (MTK_PIN_NO(75) | 2)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(76) | 1)
+#define PINMUX_GPIO76__FUNC_I2SI02_DATA3 (MTK_PIN_NO(76) | 2)
+#define PINMUX_GPIO76__FUNC_DBG_MON_A26 (MTK_PIN_NO(76) | 7)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_DGI_D0 (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_DPI_D0 (MTK_PIN_NO(77) | 2)
+#define PINMUX_GPIO77__FUNC_I2SI4_MCK (MTK_PIN_NO(77) | 3)
+#define PINMUX_GPIO77__FUNC_SPIM4_CLK (MTK_PIN_NO(77) | 4)
+#define PINMUX_GPIO77__FUNC_GBE_TXD3 (MTK_PIN_NO(77) | 5)
+#define PINMUX_GPIO77__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(77) | 6)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_DGI_D1 (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_DPI_D1 (MTK_PIN_NO(78) | 2)
+#define PINMUX_GPIO78__FUNC_I2SI4_BCK (MTK_PIN_NO(78) | 3)
+#define PINMUX_GPIO78__FUNC_SPIM4_MO (MTK_PIN_NO(78) | 4)
+#define PINMUX_GPIO78__FUNC_GBE_TXD2 (MTK_PIN_NO(78) | 5)
+#define PINMUX_GPIO78__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(78) | 6)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_DGI_D2 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_DPI_D2 (MTK_PIN_NO(79) | 2)
+#define PINMUX_GPIO79__FUNC_I2SI4_WS (MTK_PIN_NO(79) | 3)
+#define PINMUX_GPIO79__FUNC_SPIM4_CSB (MTK_PIN_NO(79) | 4)
+#define PINMUX_GPIO79__FUNC_GBE_TXD1 (MTK_PIN_NO(79) | 5)
+#define PINMUX_GPIO79__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(79) | 6)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_DGI_D3 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_DPI_D3 (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_I2SI4_D0 (MTK_PIN_NO(80) | 3)
+#define PINMUX_GPIO80__FUNC_SPIM4_MI (MTK_PIN_NO(80) | 4)
+#define PINMUX_GPIO80__FUNC_GBE_TXD0 (MTK_PIN_NO(80) | 5)
+#define PINMUX_GPIO80__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(80) | 6)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_DGI_D4 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_DPI_D4 (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_I2SI5_MCK (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_SPIM5_CLK (MTK_PIN_NO(81) | 4)
+#define PINMUX_GPIO81__FUNC_GBE_RXD3 (MTK_PIN_NO(81) | 5)
+#define PINMUX_GPIO81__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(81) | 6)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_DGI_D5 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_DPI_D5 (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_I2SI5_BCK (MTK_PIN_NO(82) | 3)
+#define PINMUX_GPIO82__FUNC_SPIM5_MO (MTK_PIN_NO(82) | 4)
+#define PINMUX_GPIO82__FUNC_GBE_RXD2 (MTK_PIN_NO(82) | 5)
+#define PINMUX_GPIO82__FUNC_MCUPM_JTAG_TDO (MTK_PIN_NO(82) | 6)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_DGI_D6 (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_DPI_D6 (MTK_PIN_NO(83) | 2)
+#define PINMUX_GPIO83__FUNC_I2SI5_WS (MTK_PIN_NO(83) | 3)
+#define PINMUX_GPIO83__FUNC_SPIM5_CSB (MTK_PIN_NO(83) | 4)
+#define PINMUX_GPIO83__FUNC_GBE_RXD1 (MTK_PIN_NO(83) | 5)
+#define PINMUX_GPIO83__FUNC_MCUPM_JTAG_TMS (MTK_PIN_NO(83) | 6)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_DGI_D7 (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_DPI_D7 (MTK_PIN_NO(84) | 2)
+#define PINMUX_GPIO84__FUNC_I2SI5_D0 (MTK_PIN_NO(84) | 3)
+#define PINMUX_GPIO84__FUNC_SPIM5_MI (MTK_PIN_NO(84) | 4)
+#define PINMUX_GPIO84__FUNC_GBE_RXD0 (MTK_PIN_NO(84) | 5)
+#define PINMUX_GPIO84__FUNC_MCUPM_JTAG_TCK (MTK_PIN_NO(84) | 6)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_DGI_D8 (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_DPI_D8 (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_I2SO4_MCK (MTK_PIN_NO(85) | 3)
+#define PINMUX_GPIO85__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_GBE_TXC (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_MCUPM_JTAG_TDI (MTK_PIN_NO(85) | 6)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_DGI_D9 (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_DPI_D9 (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_I2SO4_BCK (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_GBE_RXC (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_MCUPM_JTAG_TRSTN (MTK_PIN_NO(86) | 6)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_DGI_D10 (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_DPI_D10 (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_I2SO4_WS (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_GBE_RXDV (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(87) | 6)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_DGI_D11 (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_DPI_D11 (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_I2SO4_D0 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_GBE_TXEN (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(88) | 6)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_DGI_D12 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_DPI_D12 (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_MSDC2_CMD_A (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_I2SO5_BCK (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_GBE_MDC (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(89) | 6)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_DGI_D13 (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_DPI_D13 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_MSDC2_CLK_A (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_I2SO5_WS (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_GBE_MDIO (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(90) | 6)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_DGI_D14 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_DPI_D14 (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_MSDC2_DAT3_A (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_I2SO5_D0 (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_GBE_TXER (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(91) | 6)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_DGI_D15 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_DPI_D15 (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_MSDC2_DAT0_A (MTK_PIN_NO(92) | 3)
+#define PINMUX_GPIO92__FUNC_I2SO2_D1 (MTK_PIN_NO(92) | 4)
+#define PINMUX_GPIO92__FUNC_GBE_RXER (MTK_PIN_NO(92) | 5)
+#define PINMUX_GPIO92__FUNC_CCU0_JTAG_TDO (MTK_PIN_NO(92) | 6)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_DGI_HSYNC (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_DPI_HSYNC (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_MSDC2_DAT2_A (MTK_PIN_NO(93) | 3)
+#define PINMUX_GPIO93__FUNC_I2SO2_D2 (MTK_PIN_NO(93) | 4)
+#define PINMUX_GPIO93__FUNC_GBE_COL (MTK_PIN_NO(93) | 5)
+#define PINMUX_GPIO93__FUNC_CCU0_JTAG_TMS (MTK_PIN_NO(93) | 6)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_DGI_VSYNC (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_DPI_VSYNC (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_MSDC2_DAT1_A (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_I2SO2_D3 (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_GBE_INTR (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_CCU0_JTAG_TDI (MTK_PIN_NO(94) | 6)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_DGI_DE (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_DPI_DE (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_UTXD2 (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_I2SIN_D1 (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_CCU0_JTAG_TCK (MTK_PIN_NO(95) | 6)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_DGI_CK (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_DPI_CK (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_URXD2 (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_I2SO5_MCK (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_I2SIN_D2 (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_CCU0_JTAG_TRST (MTK_PIN_NO(96) | 6)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_DISP_PWM0 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(97) | 2)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_UTXD0 (MTK_PIN_NO(98) | 1)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_URXD0 (MTK_PIN_NO(99) | 1)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_URTS1 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_DSI_TE (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_I2SO1_D8 (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_KPROW2 (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_PWM_0 (MTK_PIN_NO(100) | 5)
+#define PINMUX_GPIO100__FUNC_TP_URTS1_AO (MTK_PIN_NO(100) | 6)
+#define PINMUX_GPIO100__FUNC_I2SIN_D0 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_UCTS1 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_DSI1_TE (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_I2SO1_D9 (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_KPCOL2 (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_PWM_1 (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_TP_UCTS1_AO (MTK_PIN_NO(101) | 6)
+#define PINMUX_GPIO101__FUNC_I2SIN_D1 (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_UTXD1 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_VBUSVALID_2P (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_I2SO1_D10 (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_TP_UTXD1_AO (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_MD32_1_TXD (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_I2SIN_D2 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_URXD1 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_VBUSVALID_3P (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_I2SO1_D11 (MTK_PIN_NO(103) | 3)
+#define PINMUX_GPIO103__FUNC_SSPM_URXD_AO (MTK_PIN_NO(103) | 4)
+#define PINMUX_GPIO103__FUNC_TP_URXD1_AO (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_MD32_1_RXD (MTK_PIN_NO(103) | 6)
+#define PINMUX_GPIO103__FUNC_I2SIN_D3 (MTK_PIN_NO(103) | 7)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_KPROW0 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_DISP_PWM1 (MTK_PIN_NO(104) | 2)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_KPROW1 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_EDP_TX_HPD (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_PWM_2 (MTK_PIN_NO(105) | 3)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_KPCOL0 (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_KPCOL1 (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_DSI1_TE (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_PWM_3 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_SCP_SCL3 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_I2SIN_MCK (MTK_PIN_NO(107) | 5)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_LCM_RST (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_KPCOL1 (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_SCP_SDA3 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_I2SIN_BCK (MTK_PIN_NO(108) | 5)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_DSI_TE (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_I2SIN_D3 (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_I2SIN_WS (MTK_PIN_NO(109) | 5)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_MSDC1_CMD (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_JTMS_SEL3 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_UDI_TMS (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_CCU1_JTAG_TMS (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(110) | 6)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_MSDC1_CLK (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_JTCK_SEL3 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_UDI_TCK (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_CCU1_JTAG_TCK (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(111) | 6)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_MSDC1_DAT0 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_JTDI_SEL3 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_UDI_TDI (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_I2SO2_D0 (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_CCU1_JTAG_TDI (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(112) | 6)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_MSDC1_DAT1 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_JTDO_SEL3 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_UDI_TDO (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_I2SO2_D1 (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_CCU1_JTAG_TDO (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_MSDC1_DAT2 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_JTRSTn_SEL3 (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_UDI_NTRST (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_I2SO2_D2 (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_CCU1_JTAG_TRST (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_MSDC1_DAT3 (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_I2SO2_D3 (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_MD32_1_GPIO2 (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_MSDC0_DAT7 (MTK_PIN_NO(116) | 1)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_MSDC0_DAT6 (MTK_PIN_NO(117) | 1)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_MSDC0_DAT5 (MTK_PIN_NO(118) | 1)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_MSDC0_DAT4 (MTK_PIN_NO(119) | 1)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_MSDC0_RSTB (MTK_PIN_NO(120) | 1)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_MSDC0_CMD (MTK_PIN_NO(121) | 1)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_MSDC0_CLK (MTK_PIN_NO(122) | 1)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_MSDC0_DAT3 (MTK_PIN_NO(123) | 1)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_MSDC0_DAT2 (MTK_PIN_NO(124) | 1)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_MSDC0_DAT1 (MTK_PIN_NO(125) | 1)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_MSDC0_DAT0 (MTK_PIN_NO(126) | 1)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MSDC0_DSL (MTK_PIN_NO(127) | 1)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_IDDIG (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_UCTS2 (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_UTXD5 (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_mbistreaden_trigger (MTK_PIN_NO(128) | 5)
+#define PINMUX_GPIO128__FUNC_MD32_1_GPIO0 (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_SCP_SCL2 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_USB_DRVVBUS (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_URTS2 (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_URXD5 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_mbistwriteen_trigger (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_MD32_1_GPIO1 (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_SCP_SDA2 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_IDDIG_1P (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_SPINOR_IO2 (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_SNFI_WP (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_VPU_UDI_NTRST (MTK_PIN_NO(130) | 4)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_USB_DRVVBUS_1P (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_SPINOR_IO3 (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_SNFI_HOLD (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_MD32_1_JTAG_TRST (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SCP_JTAG0_TRSTN (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_APU_JTAG_TRST (MTK_PIN_NO(131) | 6)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_SPIM0_CSB (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_SCP_SPI0_CS (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_SPIS0_CSB (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_VPU_UDI_TMS (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_I2SO5_D0 (MTK_PIN_NO(132) | 6)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_SPIM0_CLK (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_SCP_SPI0_CK (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_SPIS0_CLK (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_VPU_UDI_TCK (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_I2SO5_BCK (MTK_PIN_NO(133) | 6)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_SPIM0_MO (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_SCP_SPI0_MO (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_SPIS0_SI (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_VPU_UDI_TDO (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_I2SO5_WS (MTK_PIN_NO(134) | 6)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_SPIM0_MI (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_SCP_SPI0_MI (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_SPIS0_SO (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_VPU_UDI_TDI (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_I2SO5_MCK (MTK_PIN_NO(135) | 6)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_SPIM1_CSB (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_SPIS1_CSB (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_MD32_1_JTAG_TMS (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_SCP_JTAG0_TMS (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_APU_JTAG_TMS (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_DBG_MON_A15 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_SPIM1_CLK (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_SPIS1_CLK (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_MD32_1_JTAG_TCK (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_SCP_JTAG0_TCK (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_APU_JTAG_TCK (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_DBG_MON_A14 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_SPIM1_MO (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_SPIS1_SI (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_MD32_1_JTAG_TDO (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_SCP_JTAG0_TDO (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_APU_JTAG_TDO (MTK_PIN_NO(138) | 6)
+#define PINMUX_GPIO138__FUNC_DBG_MON_A16 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_SPIM1_MI (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_SPIS1_SO (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_MD32_1_JTAG_TDI (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_SCP_JTAG0_TDI (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_APU_JTAG_TDI (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_DBG_MON_A17 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_SPIM2_CSB (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_SPINOR_CS (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_SNFI_CS (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_DMIC3_DAT (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_DBG_MON_A11 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_SPIM2_CLK (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_SPINOR_CK (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_SNFI_CLK (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_DMIC3_CLK (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_DBG_MON_A10 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SPIM2_MO (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_SPINOR_IO0 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_SNFI_MOSI (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_DMIC4_DAT (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_DBG_MON_A12 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_SPIM2_MI (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_SPINOR_IO1 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_SNFI_MISO (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_DMIC4_CLK (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_DBG_MON_A13 (MTK_PIN_NO(143) | 7)
+
+#endif /* __MT8195-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8365-pinfunc.h b/include/dt-bindings/pinctrl/mt8365-pinfunc.h
new file mode 100644
index 000000000000..e2ec8af57dcf
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8365-pinfunc.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ */
+#ifndef __MT8365_PINFUNC_H
+#define __MT8365_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT8365_PIN_0_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT8365_PIN_0_GPIO0__FUNC_DPI_D0 (MTK_PIN_NO(0) | 1)
+#define MT8365_PIN_0_GPIO0__FUNC_PWM_A (MTK_PIN_NO(0) | 2)
+#define MT8365_PIN_0_GPIO0__FUNC_I2S2_BCK (MTK_PIN_NO(0) | 3)
+#define MT8365_PIN_0_GPIO0__FUNC_EXT_TXD0 (MTK_PIN_NO(0) | 4)
+#define MT8365_PIN_0_GPIO0__FUNC_CONN_MCU_TDO (MTK_PIN_NO(0) | 5)
+#define MT8365_PIN_0_GPIO0__FUNC_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define MT8365_PIN_1_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT8365_PIN_1_GPIO1__FUNC_DPI_D1 (MTK_PIN_NO(1) | 1)
+#define MT8365_PIN_1_GPIO1__FUNC_PWM_B (MTK_PIN_NO(1) | 2)
+#define MT8365_PIN_1_GPIO1__FUNC_I2S2_LRCK (MTK_PIN_NO(1) | 3)
+#define MT8365_PIN_1_GPIO1__FUNC_EXT_TXD1 (MTK_PIN_NO(1) | 4)
+#define MT8365_PIN_1_GPIO1__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(1) | 5)
+#define MT8365_PIN_1_GPIO1__FUNC_DBG_MON_A1 (MTK_PIN_NO(1) | 7)
+
+#define MT8365_PIN_2_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT8365_PIN_2_GPIO2__FUNC_DPI_D2 (MTK_PIN_NO(2) | 1)
+#define MT8365_PIN_2_GPIO2__FUNC_PWM_C (MTK_PIN_NO(2) | 2)
+#define MT8365_PIN_2_GPIO2__FUNC_I2S2_MCK (MTK_PIN_NO(2) | 3)
+#define MT8365_PIN_2_GPIO2__FUNC_EXT_TXD2 (MTK_PIN_NO(2) | 4)
+#define MT8365_PIN_2_GPIO2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(2) | 5)
+#define MT8365_PIN_2_GPIO2__FUNC_DBG_MON_A2 (MTK_PIN_NO(2) | 7)
+
+#define MT8365_PIN_3_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT8365_PIN_3_GPIO3__FUNC_DPI_D3 (MTK_PIN_NO(3) | 1)
+#define MT8365_PIN_3_GPIO3__FUNC_CLKM0 (MTK_PIN_NO(3) | 2)
+#define MT8365_PIN_3_GPIO3__FUNC_I2S2_DI (MTK_PIN_NO(3) | 3)
+#define MT8365_PIN_3_GPIO3__FUNC_EXT_TXD3 (MTK_PIN_NO(3) | 4)
+#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_TCK (MTK_PIN_NO(3) | 5)
+#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(3) | 6)
+#define MT8365_PIN_3_GPIO3__FUNC_DBG_MON_A3 (MTK_PIN_NO(3) | 7)
+
+#define MT8365_PIN_4_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT8365_PIN_4_GPIO4__FUNC_DPI_D4 (MTK_PIN_NO(4) | 1)
+#define MT8365_PIN_4_GPIO4__FUNC_CLKM1 (MTK_PIN_NO(4) | 2)
+#define MT8365_PIN_4_GPIO4__FUNC_I2S1_BCK (MTK_PIN_NO(4) | 3)
+#define MT8365_PIN_4_GPIO4__FUNC_EXT_TXC (MTK_PIN_NO(4) | 4)
+#define MT8365_PIN_4_GPIO4__FUNC_CONN_MCU_TDI (MTK_PIN_NO(4) | 5)
+#define MT8365_PIN_4_GPIO4__FUNC_VDEC_TEST_CK (MTK_PIN_NO(4) | 6)
+#define MT8365_PIN_4_GPIO4__FUNC_DBG_MON_A4 (MTK_PIN_NO(4) | 7)
+
+#define MT8365_PIN_5_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT8365_PIN_5_GPIO5__FUNC_DPI_D5 (MTK_PIN_NO(5) | 1)
+#define MT8365_PIN_5_GPIO5__FUNC_CLKM2 (MTK_PIN_NO(5) | 2)
+#define MT8365_PIN_5_GPIO5__FUNC_I2S1_LRCK (MTK_PIN_NO(5) | 3)
+#define MT8365_PIN_5_GPIO5__FUNC_EXT_RXER (MTK_PIN_NO(5) | 4)
+#define MT8365_PIN_5_GPIO5__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(5) | 5)
+#define MT8365_PIN_5_GPIO5__FUNC_MM_TEST_CK (MTK_PIN_NO(5) | 6)
+#define MT8365_PIN_5_GPIO5__FUNC_DBG_MON_A5 (MTK_PIN_NO(5) | 7)
+
+#define MT8365_PIN_6_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT8365_PIN_6_GPIO6__FUNC_DPI_D6 (MTK_PIN_NO(6) | 1)
+#define MT8365_PIN_6_GPIO6__FUNC_CLKM3 (MTK_PIN_NO(6) | 2)
+#define MT8365_PIN_6_GPIO6__FUNC_I2S1_MCK (MTK_PIN_NO(6) | 3)
+#define MT8365_PIN_6_GPIO6__FUNC_EXT_RXC (MTK_PIN_NO(6) | 4)
+#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_TMS (MTK_PIN_NO(6) | 5)
+#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(6) | 6)
+#define MT8365_PIN_6_GPIO6__FUNC_DBG_MON_A6 (MTK_PIN_NO(6) | 7)
+
+#define MT8365_PIN_7_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT8365_PIN_7_GPIO7__FUNC_DPI_D7 (MTK_PIN_NO(7) | 1)
+#define MT8365_PIN_7_GPIO7__FUNC_I2S1_DO (MTK_PIN_NO(7) | 3)
+#define MT8365_PIN_7_GPIO7__FUNC_EXT_RXDV (MTK_PIN_NO(7) | 4)
+#define MT8365_PIN_7_GPIO7__FUNC_CONN_DSP_JCK (MTK_PIN_NO(7) | 5)
+#define MT8365_PIN_7_GPIO7__FUNC_DBG_MON_A7 (MTK_PIN_NO(7) | 7)
+
+#define MT8365_PIN_8_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT8365_PIN_8_GPIO8__FUNC_DPI_D8 (MTK_PIN_NO(8) | 1)
+#define MT8365_PIN_8_GPIO8__FUNC_SPI_CLK (MTK_PIN_NO(8) | 2)
+#define MT8365_PIN_8_GPIO8__FUNC_I2S0_BCK (MTK_PIN_NO(8) | 3)
+#define MT8365_PIN_8_GPIO8__FUNC_EXT_RXD0 (MTK_PIN_NO(8) | 4)
+#define MT8365_PIN_8_GPIO8__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(8) | 5)
+#define MT8365_PIN_8_GPIO8__FUNC_DBG_MON_A8 (MTK_PIN_NO(8) | 7)
+
+#define MT8365_PIN_9_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT8365_PIN_9_GPIO9__FUNC_DPI_D9 (MTK_PIN_NO(9) | 1)
+#define MT8365_PIN_9_GPIO9__FUNC_SPI_CSB (MTK_PIN_NO(9) | 2)
+#define MT8365_PIN_9_GPIO9__FUNC_I2S0_LRCK (MTK_PIN_NO(9) | 3)
+#define MT8365_PIN_9_GPIO9__FUNC_EXT_RXD1 (MTK_PIN_NO(9) | 4)
+#define MT8365_PIN_9_GPIO9__FUNC_CONN_DSP_JDI (MTK_PIN_NO(9) | 5)
+#define MT8365_PIN_9_GPIO9__FUNC_DBG_MON_A9 (MTK_PIN_NO(9) | 7)
+
+#define MT8365_PIN_10_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT8365_PIN_10_GPIO10__FUNC_DPI_D10 (MTK_PIN_NO(10) | 1)
+#define MT8365_PIN_10_GPIO10__FUNC_SPI_MI (MTK_PIN_NO(10) | 2)
+#define MT8365_PIN_10_GPIO10__FUNC_I2S0_MCK (MTK_PIN_NO(10) | 3)
+#define MT8365_PIN_10_GPIO10__FUNC_EXT_RXD2 (MTK_PIN_NO(10) | 4)
+#define MT8365_PIN_10_GPIO10__FUNC_CONN_DSP_JMS (MTK_PIN_NO(10) | 5)
+#define MT8365_PIN_10_GPIO10__FUNC_DBG_MON_A10 (MTK_PIN_NO(10) | 7)
+
+#define MT8365_PIN_11_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT8365_PIN_11_GPIO11__FUNC_DPI_D11 (MTK_PIN_NO(11) | 1)
+#define MT8365_PIN_11_GPIO11__FUNC_SPI_MO (MTK_PIN_NO(11) | 2)
+#define MT8365_PIN_11_GPIO11__FUNC_I2S0_DI (MTK_PIN_NO(11) | 3)
+#define MT8365_PIN_11_GPIO11__FUNC_EXT_RXD3 (MTK_PIN_NO(11) | 4)
+#define MT8365_PIN_11_GPIO11__FUNC_CONN_DSP_JDO (MTK_PIN_NO(11) | 5)
+#define MT8365_PIN_11_GPIO11__FUNC_DBG_MON_A11 (MTK_PIN_NO(11) | 7)
+
+#define MT8365_PIN_12_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT8365_PIN_12_GPIO12__FUNC_DPI_DE (MTK_PIN_NO(12) | 1)
+#define MT8365_PIN_12_GPIO12__FUNC_UCTS1 (MTK_PIN_NO(12) | 2)
+#define MT8365_PIN_12_GPIO12__FUNC_I2S3_BCK (MTK_PIN_NO(12) | 3)
+#define MT8365_PIN_12_GPIO12__FUNC_EXT_TXEN (MTK_PIN_NO(12) | 4)
+#define MT8365_PIN_12_GPIO12__FUNC_O_WIFI_TXD (MTK_PIN_NO(12) | 5)
+#define MT8365_PIN_12_GPIO12__FUNC_DBG_MON_A12 (MTK_PIN_NO(12) | 7)
+
+#define MT8365_PIN_13_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT8365_PIN_13_GPIO13__FUNC_DPI_VSYNC (MTK_PIN_NO(13) | 1)
+#define MT8365_PIN_13_GPIO13__FUNC_URTS1 (MTK_PIN_NO(13) | 2)
+#define MT8365_PIN_13_GPIO13__FUNC_I2S3_LRCK (MTK_PIN_NO(13) | 3)
+#define MT8365_PIN_13_GPIO13__FUNC_EXT_COL (MTK_PIN_NO(13) | 4)
+#define MT8365_PIN_13_GPIO13__FUNC_SPDIF_IN (MTK_PIN_NO(13) | 5)
+#define MT8365_PIN_13_GPIO13__FUNC_DBG_MON_A13 (MTK_PIN_NO(13) | 7)
+
+#define MT8365_PIN_14_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT8365_PIN_14_GPIO14__FUNC_DPI_CK (MTK_PIN_NO(14) | 1)
+#define MT8365_PIN_14_GPIO14__FUNC_UCTS2 (MTK_PIN_NO(14) | 2)
+#define MT8365_PIN_14_GPIO14__FUNC_I2S3_MCK (MTK_PIN_NO(14) | 3)
+#define MT8365_PIN_14_GPIO14__FUNC_EXT_MDIO (MTK_PIN_NO(14) | 4)
+#define MT8365_PIN_14_GPIO14__FUNC_SPDIF_OUT (MTK_PIN_NO(14) | 5)
+#define MT8365_PIN_14_GPIO14__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(14) | 6)
+#define MT8365_PIN_14_GPIO14__FUNC_DBG_MON_A14 (MTK_PIN_NO(14) | 7)
+
+#define MT8365_PIN_15_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT8365_PIN_15_GPIO15__FUNC_DPI_HSYNC (MTK_PIN_NO(15) | 1)
+#define MT8365_PIN_15_GPIO15__FUNC_URTS2 (MTK_PIN_NO(15) | 2)
+#define MT8365_PIN_15_GPIO15__FUNC_I2S3_DO (MTK_PIN_NO(15) | 3)
+#define MT8365_PIN_15_GPIO15__FUNC_EXT_MDC (MTK_PIN_NO(15) | 4)
+#define MT8365_PIN_15_GPIO15__FUNC_IRRX (MTK_PIN_NO(15) | 5)
+#define MT8365_PIN_15_GPIO15__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(15) | 6)
+#define MT8365_PIN_15_GPIO15__FUNC_DBG_MON_A15 (MTK_PIN_NO(15) | 7)
+
+#define MT8365_PIN_16_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT8365_PIN_16_GPIO16__FUNC_DPI_D12 (MTK_PIN_NO(16) | 1)
+#define MT8365_PIN_16_GPIO16__FUNC_USB_DRVVBUS (MTK_PIN_NO(16) | 2)
+#define MT8365_PIN_16_GPIO16__FUNC_PWM_A (MTK_PIN_NO(16) | 3)
+#define MT8365_PIN_16_GPIO16__FUNC_CLKM0 (MTK_PIN_NO(16) | 4)
+#define MT8365_PIN_16_GPIO16__FUNC_ANT_SEL0 (MTK_PIN_NO(16) | 5)
+#define MT8365_PIN_16_GPIO16__FUNC_TSF_IN (MTK_PIN_NO(16) | 6)
+#define MT8365_PIN_16_GPIO16__FUNC_DBG_MON_A16 (MTK_PIN_NO(16) | 7)
+
+#define MT8365_PIN_17_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT8365_PIN_17_GPIO17__FUNC_DPI_D13 (MTK_PIN_NO(17) | 1)
+#define MT8365_PIN_17_GPIO17__FUNC_IDDIG (MTK_PIN_NO(17) | 2)
+#define MT8365_PIN_17_GPIO17__FUNC_PWM_B (MTK_PIN_NO(17) | 3)
+#define MT8365_PIN_17_GPIO17__FUNC_CLKM1 (MTK_PIN_NO(17) | 4)
+#define MT8365_PIN_17_GPIO17__FUNC_ANT_SEL1 (MTK_PIN_NO(17) | 5)
+#define MT8365_PIN_17_GPIO17__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(17) | 6)
+#define MT8365_PIN_17_GPIO17__FUNC_DBG_MON_A17 (MTK_PIN_NO(17) | 7)
+
+#define MT8365_PIN_18_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT8365_PIN_18_GPIO18__FUNC_DPI_D14 (MTK_PIN_NO(18) | 1)
+#define MT8365_PIN_18_GPIO18__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(18) | 2)
+#define MT8365_PIN_18_GPIO18__FUNC_PWM_C (MTK_PIN_NO(18) | 3)
+#define MT8365_PIN_18_GPIO18__FUNC_CLKM2 (MTK_PIN_NO(18) | 4)
+#define MT8365_PIN_18_GPIO18__FUNC_ANT_SEL2 (MTK_PIN_NO(18) | 5)
+#define MT8365_PIN_18_GPIO18__FUNC_MFG_TEST_CK (MTK_PIN_NO(18) | 6)
+#define MT8365_PIN_18_GPIO18__FUNC_DBG_MON_A18 (MTK_PIN_NO(18) | 7)
+
+#define MT8365_PIN_19_DISP_PWM__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT8365_PIN_19_DISP_PWM__FUNC_DISP_PWM (MTK_PIN_NO(19) | 1)
+#define MT8365_PIN_19_DISP_PWM__FUNC_PWM_A (MTK_PIN_NO(19) | 2)
+#define MT8365_PIN_19_DISP_PWM__FUNC_DBG_MON_A19 (MTK_PIN_NO(19) | 7)
+
+#define MT8365_PIN_20_LCM_RST__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT8365_PIN_20_LCM_RST__FUNC_LCM_RST (MTK_PIN_NO(20) | 1)
+#define MT8365_PIN_20_LCM_RST__FUNC_PWM_B (MTK_PIN_NO(20) | 2)
+#define MT8365_PIN_20_LCM_RST__FUNC_DBG_MON_A20 (MTK_PIN_NO(20) | 7)
+
+#define MT8365_PIN_21_DSI_TE__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT8365_PIN_21_DSI_TE__FUNC_DSI_TE (MTK_PIN_NO(21) | 1)
+#define MT8365_PIN_21_DSI_TE__FUNC_PWM_C (MTK_PIN_NO(21) | 2)
+#define MT8365_PIN_21_DSI_TE__FUNC_ANT_SEL0 (MTK_PIN_NO(21) | 3)
+#define MT8365_PIN_21_DSI_TE__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(21) | 4)
+#define MT8365_PIN_21_DSI_TE__FUNC_DBG_MON_A21 (MTK_PIN_NO(21) | 7)
+
+#define MT8365_PIN_22_KPROW0__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT8365_PIN_22_KPROW0__FUNC_KPROW0 (MTK_PIN_NO(22) | 1)
+#define MT8365_PIN_22_KPROW0__FUNC_DBG_MON_A22 (MTK_PIN_NO(22) | 7)
+
+#define MT8365_PIN_23_KPROW1__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT8365_PIN_23_KPROW1__FUNC_KPROW1 (MTK_PIN_NO(23) | 1)
+#define MT8365_PIN_23_KPROW1__FUNC_IDDIG (MTK_PIN_NO(23) | 2)
+#define MT8365_PIN_23_KPROW1__FUNC_WIFI_TXD (MTK_PIN_NO(23) | 3)
+#define MT8365_PIN_23_KPROW1__FUNC_CLKM3 (MTK_PIN_NO(23) | 4)
+#define MT8365_PIN_23_KPROW1__FUNC_ANT_SEL1 (MTK_PIN_NO(23) | 5)
+#define MT8365_PIN_23_KPROW1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 6)
+#define MT8365_PIN_23_KPROW1__FUNC_DBG_MON_B0 (MTK_PIN_NO(23) | 7)
+
+#define MT8365_PIN_24_KPCOL0__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT8365_PIN_24_KPCOL0__FUNC_KPCOL0 (MTK_PIN_NO(24) | 1)
+#define MT8365_PIN_24_KPCOL0__FUNC_DBG_MON_A23 (MTK_PIN_NO(24) | 7)
+
+#define MT8365_PIN_25_KPCOL1__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT8365_PIN_25_KPCOL1__FUNC_KPCOL1 (MTK_PIN_NO(25) | 1)
+#define MT8365_PIN_25_KPCOL1__FUNC_USB_DRVVBUS (MTK_PIN_NO(25) | 2)
+#define MT8365_PIN_25_KPCOL1__FUNC_APU_JTAG_TRST (MTK_PIN_NO(25) | 3)
+#define MT8365_PIN_25_KPCOL1__FUNC_UDI_NTRST_XI (MTK_PIN_NO(25) | 4)
+#define MT8365_PIN_25_KPCOL1__FUNC_DFD_NTRST_XI (MTK_PIN_NO(25) | 5)
+#define MT8365_PIN_25_KPCOL1__FUNC_CONN_TEST_CK (MTK_PIN_NO(25) | 6)
+#define MT8365_PIN_25_KPCOL1__FUNC_DBG_MON_B1 (MTK_PIN_NO(25) | 7)
+
+#define MT8365_PIN_26_SPI_CS__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT8365_PIN_26_SPI_CS__FUNC_SPI_CSB (MTK_PIN_NO(26) | 1)
+#define MT8365_PIN_26_SPI_CS__FUNC_APU_JTAG_TMS (MTK_PIN_NO(26) | 3)
+#define MT8365_PIN_26_SPI_CS__FUNC_UDI_TMS_XI (MTK_PIN_NO(26) | 4)
+#define MT8365_PIN_26_SPI_CS__FUNC_DFD_TMS_XI (MTK_PIN_NO(26) | 5)
+#define MT8365_PIN_26_SPI_CS__FUNC_CONN_TEST_CK (MTK_PIN_NO(26) | 6)
+#define MT8365_PIN_26_SPI_CS__FUNC_DBG_MON_A24 (MTK_PIN_NO(26) | 7)
+
+#define MT8365_PIN_27_SPI_CK__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT8365_PIN_27_SPI_CK__FUNC_SPI_CLK (MTK_PIN_NO(27) | 1)
+#define MT8365_PIN_27_SPI_CK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(27) | 3)
+#define MT8365_PIN_27_SPI_CK__FUNC_UDI_TCK_XI (MTK_PIN_NO(27) | 4)
+#define MT8365_PIN_27_SPI_CK__FUNC_DFD_TCK_XI (MTK_PIN_NO(27) | 5)
+#define MT8365_PIN_27_SPI_CK__FUNC_APU_TEST_CK (MTK_PIN_NO(27) | 6)
+#define MT8365_PIN_27_SPI_CK__FUNC_DBG_MON_A25 (MTK_PIN_NO(27) | 7)
+
+#define MT8365_PIN_28_SPI_MI__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MI (MTK_PIN_NO(28) | 1)
+#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MO (MTK_PIN_NO(28) | 2)
+#define MT8365_PIN_28_SPI_MI__FUNC_APU_JTAG_TDI (MTK_PIN_NO(28) | 3)
+#define MT8365_PIN_28_SPI_MI__FUNC_UDI_TDI_XI (MTK_PIN_NO(28) | 4)
+#define MT8365_PIN_28_SPI_MI__FUNC_DFD_TDI_XI (MTK_PIN_NO(28) | 5)
+#define MT8365_PIN_28_SPI_MI__FUNC_DSP_TEST_CK (MTK_PIN_NO(28) | 6)
+#define MT8365_PIN_28_SPI_MI__FUNC_DBG_MON_A26 (MTK_PIN_NO(28) | 7)
+
+#define MT8365_PIN_29_SPI_MO__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MO (MTK_PIN_NO(29) | 1)
+#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MI (MTK_PIN_NO(29) | 2)
+#define MT8365_PIN_29_SPI_MO__FUNC_APU_JTAG_TDO (MTK_PIN_NO(29) | 3)
+#define MT8365_PIN_29_SPI_MO__FUNC_UDI_TDO (MTK_PIN_NO(29) | 4)
+#define MT8365_PIN_29_SPI_MO__FUNC_DFD_TDO (MTK_PIN_NO(29) | 5)
+#define MT8365_PIN_29_SPI_MO__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(29) | 6)
+#define MT8365_PIN_29_SPI_MO__FUNC_DBG_MON_A27 (MTK_PIN_NO(29) | 7)
+
+#define MT8365_PIN_30_JTMS__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT8365_PIN_30_JTMS__FUNC_JTMS (MTK_PIN_NO(30) | 1)
+#define MT8365_PIN_30_JTMS__FUNC_DFD_TMS_XI (MTK_PIN_NO(30) | 2)
+#define MT8365_PIN_30_JTMS__FUNC_UDI_TMS_XI (MTK_PIN_NO(30) | 3)
+#define MT8365_PIN_30_JTMS__FUNC_MCU_SPM_TMS (MTK_PIN_NO(30) | 4)
+#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_TMS (MTK_PIN_NO(30) | 5)
+#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 6)
+
+#define MT8365_PIN_31_JTCK__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT8365_PIN_31_JTCK__FUNC_JTCK (MTK_PIN_NO(31) | 1)
+#define MT8365_PIN_31_JTCK__FUNC_DFD_TCK_XI (MTK_PIN_NO(31) | 2)
+#define MT8365_PIN_31_JTCK__FUNC_UDI_TCK_XI (MTK_PIN_NO(31) | 3)
+#define MT8365_PIN_31_JTCK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(31) | 4)
+#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_TCK (MTK_PIN_NO(31) | 5)
+#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(31) | 6)
+
+#define MT8365_PIN_32_JTDI__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT8365_PIN_32_JTDI__FUNC_JTDI (MTK_PIN_NO(32) | 1)
+#define MT8365_PIN_32_JTDI__FUNC_DFD_TDI_XI (MTK_PIN_NO(32) | 2)
+#define MT8365_PIN_32_JTDI__FUNC_UDI_TDI_XI (MTK_PIN_NO(32) | 3)
+#define MT8365_PIN_32_JTDI__FUNC_MCU_SPM_TDI (MTK_PIN_NO(32) | 4)
+#define MT8365_PIN_32_JTDI__FUNC_CONN_MCU_TDI (MTK_PIN_NO(32) | 5)
+
+#define MT8365_PIN_33_JTDO__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT8365_PIN_33_JTDO__FUNC_JTDO (MTK_PIN_NO(33) | 1)
+#define MT8365_PIN_33_JTDO__FUNC_DFD_TDO (MTK_PIN_NO(33) | 2)
+#define MT8365_PIN_33_JTDO__FUNC_UDI_TDO (MTK_PIN_NO(33) | 3)
+#define MT8365_PIN_33_JTDO__FUNC_MCU_SPM_TDO (MTK_PIN_NO(33) | 4)
+#define MT8365_PIN_33_JTDO__FUNC_CONN_MCU_TDO (MTK_PIN_NO(33) | 5)
+
+#define MT8365_PIN_34_JTRST__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT8365_PIN_34_JTRST__FUNC_JTRST (MTK_PIN_NO(34) | 1)
+#define MT8365_PIN_34_JTRST__FUNC_DFD_NTRST_XI (MTK_PIN_NO(34) | 2)
+#define MT8365_PIN_34_JTRST__FUNC_UDI_NTRST_XI (MTK_PIN_NO(34) | 3)
+#define MT8365_PIN_34_JTRST__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(34) | 4)
+#define MT8365_PIN_34_JTRST__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(34) | 5)
+
+#define MT8365_PIN_35_URXD0__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT8365_PIN_35_URXD0__FUNC_URXD0 (MTK_PIN_NO(35) | 1)
+#define MT8365_PIN_35_URXD0__FUNC_UTXD0 (MTK_PIN_NO(35) | 2)
+#define MT8365_PIN_35_URXD0__FUNC_DSP_URXD0 (MTK_PIN_NO(35) | 7)
+
+#define MT8365_PIN_36_UTXD0__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT8365_PIN_36_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(36) | 1)
+#define MT8365_PIN_36_UTXD0__FUNC_URXD0 (MTK_PIN_NO(36) | 2)
+#define MT8365_PIN_36_UTXD0__FUNC_DSP_UTXD0 (MTK_PIN_NO(36) | 7)
+
+#define MT8365_PIN_37_URXD1__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT8365_PIN_37_URXD1__FUNC_URXD1 (MTK_PIN_NO(37) | 1)
+#define MT8365_PIN_37_URXD1__FUNC_UTXD1 (MTK_PIN_NO(37) | 2)
+#define MT8365_PIN_37_URXD1__FUNC_UCTS2 (MTK_PIN_NO(37) | 3)
+#define MT8365_PIN_37_URXD1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(37) | 4)
+#define MT8365_PIN_37_URXD1__FUNC_CONN_UART0_RXD (MTK_PIN_NO(37) | 5)
+#define MT8365_PIN_37_URXD1__FUNC_I2S0_MCK (MTK_PIN_NO(37) | 6)
+#define MT8365_PIN_37_URXD1__FUNC_DSP_URXD0 (MTK_PIN_NO(37) | 7)
+
+#define MT8365_PIN_38_UTXD1__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT8365_PIN_38_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(38) | 1)
+#define MT8365_PIN_38_UTXD1__FUNC_URXD1 (MTK_PIN_NO(38) | 2)
+#define MT8365_PIN_38_UTXD1__FUNC_URTS2 (MTK_PIN_NO(38) | 3)
+#define MT8365_PIN_38_UTXD1__FUNC_ANT_SEL2 (MTK_PIN_NO(38) | 4)
+#define MT8365_PIN_38_UTXD1__FUNC_CONN_UART0_TXD (MTK_PIN_NO(38) | 5)
+#define MT8365_PIN_38_UTXD1__FUNC_I2S1_MCK (MTK_PIN_NO(38) | 6)
+#define MT8365_PIN_38_UTXD1__FUNC_DSP_UTXD0 (MTK_PIN_NO(38) | 7)
+
+#define MT8365_PIN_39_URXD2__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT8365_PIN_39_URXD2__FUNC_URXD2 (MTK_PIN_NO(39) | 1)
+#define MT8365_PIN_39_URXD2__FUNC_UTXD2 (MTK_PIN_NO(39) | 2)
+#define MT8365_PIN_39_URXD2__FUNC_UCTS1 (MTK_PIN_NO(39) | 3)
+#define MT8365_PIN_39_URXD2__FUNC_IDDIG (MTK_PIN_NO(39) | 4)
+#define MT8365_PIN_39_URXD2__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(39) | 5)
+#define MT8365_PIN_39_URXD2__FUNC_I2S2_MCK (MTK_PIN_NO(39) | 6)
+#define MT8365_PIN_39_URXD2__FUNC_DSP_URXD0 (MTK_PIN_NO(39) | 7)
+
+#define MT8365_PIN_40_UTXD2__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT8365_PIN_40_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(40) | 1)
+#define MT8365_PIN_40_UTXD2__FUNC_URXD2 (MTK_PIN_NO(40) | 2)
+#define MT8365_PIN_40_UTXD2__FUNC_URTS1 (MTK_PIN_NO(40) | 3)
+#define MT8365_PIN_40_UTXD2__FUNC_USB_DRVVBUS (MTK_PIN_NO(40) | 4)
+#define MT8365_PIN_40_UTXD2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(40) | 5)
+#define MT8365_PIN_40_UTXD2__FUNC_I2S3_MCK (MTK_PIN_NO(40) | 6)
+#define MT8365_PIN_40_UTXD2__FUNC_DSP_UTXD0 (MTK_PIN_NO(40) | 7)
+
+#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(41) | 1)
+#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(41) | 2)
+
+#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(42) | 1)
+#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(42) | 2)
+
+#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(43) | 1)
+
+#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(44) | 1)
+
+#define MT8365_PIN_45_RTC32K_CK__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT8365_PIN_45_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(45) | 1)
+
+#define MT8365_PIN_46_WATCHDOG__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT8365_PIN_46_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(46) | 1)
+
+#define MT8365_PIN_47_SRCLKENA0__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA0 (MTK_PIN_NO(47) | 1)
+#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA1 (MTK_PIN_NO(47) | 2)
+
+#define MT8365_PIN_48_SRCLKENA1__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT8365_PIN_48_SRCLKENA1__FUNC_SRCLKENA1 (MTK_PIN_NO(48) | 1)
+
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(49) | 1)
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MISO (MTK_PIN_NO(49) | 2)
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_I2S1_MCK (MTK_PIN_NO(49) | 3)
+
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(50) | 1)
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(50) | 2)
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_I2S1_BCK (MTK_PIN_NO(50) | 3)
+
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(51) | 1)
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(51) | 2)
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_I2S1_LRCK (MTK_PIN_NO(51) | 3)
+
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(52) | 1)
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(52) | 2)
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_I2S1_DO (MTK_PIN_NO(52) | 3)
+
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MISO (MTK_PIN_NO(53) | 1)
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(53) | 2)
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_I2S2_MCK (MTK_PIN_NO(53) | 3)
+
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(54) | 1)
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(54) | 2)
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_I2S2_BCK (MTK_PIN_NO(54) | 3)
+
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(55) | 1)
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(55) | 2)
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_I2S2_LRCK (MTK_PIN_NO(55) | 3)
+
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(56) | 1)
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(56) | 2)
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_I2S2_DI (MTK_PIN_NO(56) | 3)
+
+#define MT8365_PIN_57_SDA0__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT8365_PIN_57_SDA0__FUNC_SDA0_0 (MTK_PIN_NO(57) | 1)
+
+#define MT8365_PIN_58_SCL0__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT8365_PIN_58_SCL0__FUNC_SCL0_0 (MTK_PIN_NO(58) | 1)
+
+#define MT8365_PIN_59_SDA1__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT8365_PIN_59_SDA1__FUNC_SDA1_0 (MTK_PIN_NO(59) | 1)
+#define MT8365_PIN_59_SDA1__FUNC_USB_SDA (MTK_PIN_NO(59) | 6)
+#define MT8365_PIN_59_SDA1__FUNC_DBG_SDA (MTK_PIN_NO(59) | 7)
+
+#define MT8365_PIN_60_SCL1__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT8365_PIN_60_SCL1__FUNC_SCL1_0 (MTK_PIN_NO(60) | 1)
+#define MT8365_PIN_60_SCL1__FUNC_USB_SCL (MTK_PIN_NO(60) | 6)
+#define MT8365_PIN_60_SCL1__FUNC_DBG_SCL (MTK_PIN_NO(60) | 7)
+
+#define MT8365_PIN_61_SDA2__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT8365_PIN_61_SDA2__FUNC_SDA2_0 (MTK_PIN_NO(61) | 1)
+
+#define MT8365_PIN_62_SCL2__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT8365_PIN_62_SCL2__FUNC_SCL2_0 (MTK_PIN_NO(62) | 1)
+
+#define MT8365_PIN_63_SDA3__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT8365_PIN_63_SDA3__FUNC_SDA3_0 (MTK_PIN_NO(63) | 1)
+
+#define MT8365_PIN_64_SCL3__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT8365_PIN_64_SCL3__FUNC_SCL3_0 (MTK_PIN_NO(64) | 1)
+
+#define MT8365_PIN_65_CMMCLK0__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK0 (MTK_PIN_NO(65) | 1)
+#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK1 (MTK_PIN_NO(65) | 2)
+#define MT8365_PIN_65_CMMCLK0__FUNC_DBG_MON_A28 (MTK_PIN_NO(65) | 7)
+
+#define MT8365_PIN_66_CMMCLK1__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK1 (MTK_PIN_NO(66) | 1)
+#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK0 (MTK_PIN_NO(66) | 2)
+#define MT8365_PIN_66_CMMCLK1__FUNC_DBG_MON_B2 (MTK_PIN_NO(66) | 7)
+
+#define MT8365_PIN_67_CMPCLK__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT8365_PIN_67_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(67) | 1)
+#define MT8365_PIN_67_CMPCLK__FUNC_ANT_SEL0 (MTK_PIN_NO(67) | 2)
+#define MT8365_PIN_67_CMPCLK__FUNC_TDM_RX_BCK (MTK_PIN_NO(67) | 4)
+#define MT8365_PIN_67_CMPCLK__FUNC_I2S0_BCK (MTK_PIN_NO(67) | 5)
+#define MT8365_PIN_67_CMPCLK__FUNC_DBG_MON_B3 (MTK_PIN_NO(67) | 7)
+
+#define MT8365_PIN_68_CMDAT0__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT8365_PIN_68_CMDAT0__FUNC_CMDAT0 (MTK_PIN_NO(68) | 1)
+#define MT8365_PIN_68_CMDAT0__FUNC_ANT_SEL1 (MTK_PIN_NO(68) | 2)
+#define MT8365_PIN_68_CMDAT0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(68) | 4)
+#define MT8365_PIN_68_CMDAT0__FUNC_I2S0_LRCK (MTK_PIN_NO(68) | 5)
+#define MT8365_PIN_68_CMDAT0__FUNC_DBG_MON_B4 (MTK_PIN_NO(68) | 7)
+
+#define MT8365_PIN_69_CMDAT1__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT8365_PIN_69_CMDAT1__FUNC_CMDAT1 (MTK_PIN_NO(69) | 1)
+#define MT8365_PIN_69_CMDAT1__FUNC_ANT_SEL2 (MTK_PIN_NO(69) | 2)
+#define MT8365_PIN_69_CMDAT1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(69) | 3)
+#define MT8365_PIN_69_CMDAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(69) | 4)
+#define MT8365_PIN_69_CMDAT1__FUNC_I2S0_MCK (MTK_PIN_NO(69) | 5)
+#define MT8365_PIN_69_CMDAT1__FUNC_DBG_MON_B5 (MTK_PIN_NO(69) | 7)
+
+#define MT8365_PIN_70_CMDAT2__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT8365_PIN_70_CMDAT2__FUNC_CMDAT2 (MTK_PIN_NO(70) | 1)
+#define MT8365_PIN_70_CMDAT2__FUNC_ANT_SEL3 (MTK_PIN_NO(70) | 2)
+#define MT8365_PIN_70_CMDAT2__FUNC_TDM_RX_DI (MTK_PIN_NO(70) | 4)
+#define MT8365_PIN_70_CMDAT2__FUNC_I2S0_DI (MTK_PIN_NO(70) | 5)
+#define MT8365_PIN_70_CMDAT2__FUNC_DBG_MON_B6 (MTK_PIN_NO(70) | 7)
+
+#define MT8365_PIN_71_CMDAT3__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT8365_PIN_71_CMDAT3__FUNC_CMDAT3 (MTK_PIN_NO(71) | 1)
+#define MT8365_PIN_71_CMDAT3__FUNC_ANT_SEL4 (MTK_PIN_NO(71) | 2)
+#define MT8365_PIN_71_CMDAT3__FUNC_DBG_MON_B7 (MTK_PIN_NO(71) | 7)
+
+#define MT8365_PIN_72_CMDAT4__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT8365_PIN_72_CMDAT4__FUNC_CMDAT4 (MTK_PIN_NO(72) | 1)
+#define MT8365_PIN_72_CMDAT4__FUNC_ANT_SEL5 (MTK_PIN_NO(72) | 2)
+#define MT8365_PIN_72_CMDAT4__FUNC_I2S3_BCK (MTK_PIN_NO(72) | 5)
+#define MT8365_PIN_72_CMDAT4__FUNC_DBG_MON_B8 (MTK_PIN_NO(72) | 7)
+
+#define MT8365_PIN_73_CMDAT5__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT8365_PIN_73_CMDAT5__FUNC_CMDAT5 (MTK_PIN_NO(73) | 1)
+#define MT8365_PIN_73_CMDAT5__FUNC_ANT_SEL6 (MTK_PIN_NO(73) | 2)
+#define MT8365_PIN_73_CMDAT5__FUNC_I2S3_LRCK (MTK_PIN_NO(73) | 5)
+#define MT8365_PIN_73_CMDAT5__FUNC_DBG_MON_B9 (MTK_PIN_NO(73) | 7)
+
+#define MT8365_PIN_74_CMDAT6__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT8365_PIN_74_CMDAT6__FUNC_CMDAT6 (MTK_PIN_NO(74) | 1)
+#define MT8365_PIN_74_CMDAT6__FUNC_ANT_SEL7 (MTK_PIN_NO(74) | 2)
+#define MT8365_PIN_74_CMDAT6__FUNC_I2S3_MCK (MTK_PIN_NO(74) | 5)
+#define MT8365_PIN_74_CMDAT6__FUNC_DBG_MON_B10 (MTK_PIN_NO(74) | 7)
+
+#define MT8365_PIN_75_CMDAT7__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT8365_PIN_75_CMDAT7__FUNC_CMDAT7 (MTK_PIN_NO(75) | 1)
+#define MT8365_PIN_75_CMDAT7__FUNC_I2S3_DO (MTK_PIN_NO(75) | 5)
+#define MT8365_PIN_75_CMDAT7__FUNC_DBG_MON_B11 (MTK_PIN_NO(75) | 7)
+
+#define MT8365_PIN_76_CMDAT8__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT8365_PIN_76_CMDAT8__FUNC_CMDAT8 (MTK_PIN_NO(76) | 1)
+#define MT8365_PIN_76_CMDAT8__FUNC_PCM_CLK (MTK_PIN_NO(76) | 5)
+#define MT8365_PIN_76_CMDAT8__FUNC_DBG_MON_A29 (MTK_PIN_NO(76) | 7)
+
+#define MT8365_PIN_77_CMDAT9__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define MT8365_PIN_77_CMDAT9__FUNC_CMDAT9 (MTK_PIN_NO(77) | 1)
+#define MT8365_PIN_77_CMDAT9__FUNC_PCM_SYNC (MTK_PIN_NO(77) | 5)
+#define MT8365_PIN_77_CMDAT9__FUNC_DBG_MON_A30 (MTK_PIN_NO(77) | 7)
+
+#define MT8365_PIN_78_CMHSYNC__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define MT8365_PIN_78_CMHSYNC__FUNC_CMHSYNC (MTK_PIN_NO(78) | 1)
+#define MT8365_PIN_78_CMHSYNC__FUNC_PCM_RX (MTK_PIN_NO(78) | 5)
+#define MT8365_PIN_78_CMHSYNC__FUNC_DBG_MON_A31 (MTK_PIN_NO(78) | 7)
+
+#define MT8365_PIN_79_CMVSYNC__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define MT8365_PIN_79_CMVSYNC__FUNC_CMVSYNC (MTK_PIN_NO(79) | 1)
+#define MT8365_PIN_79_CMVSYNC__FUNC_PCM_TX (MTK_PIN_NO(79) | 5)
+#define MT8365_PIN_79_CMVSYNC__FUNC_DBG_MON_A32 (MTK_PIN_NO(79) | 7)
+
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(80) | 1)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_TDM_TX_LRCK (MTK_PIN_NO(80) | 2)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_UTXD1 (MTK_PIN_NO(80) | 3)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_DPI_D19 (MTK_PIN_NO(80) | 4)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_UDI_TMS_XI (MTK_PIN_NO(80) | 5)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(80) | 6)
+
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(81) | 1)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_TDM_TX_BCK (MTK_PIN_NO(81) | 2)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_URXD1 (MTK_PIN_NO(81) | 3)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_DPI_D20 (MTK_PIN_NO(81) | 4)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_UDI_TCK_XI (MTK_PIN_NO(81) | 5)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(81) | 6)
+
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(82) | 1)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(82) | 2)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UTXD2 (MTK_PIN_NO(82) | 3)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_DPI_D21 (MTK_PIN_NO(82) | 4)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UDI_TDI_XI (MTK_PIN_NO(82) | 5)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(82) | 6)
+
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(83) | 1)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(83) | 2)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_URXD2 (MTK_PIN_NO(83) | 3)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_DPI_D22 (MTK_PIN_NO(83) | 4)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_UDI_TDO (MTK_PIN_NO(83) | 5)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(83) | 6)
+
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(84) | 1)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(84) | 2)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_PWM_A (MTK_PIN_NO(84) | 3)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_DPI_D23 (MTK_PIN_NO(84) | 4)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_UDI_NTRST_XI (MTK_PIN_NO(84) | 5)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(84) | 6)
+
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(85) | 1)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(85) | 2)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_PWM_B (MTK_PIN_NO(85) | 3)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(85) | 5)
+
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_MSDC2_DSL (MTK_PIN_NO(86) | 1)
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_TDM_TX_MCK (MTK_PIN_NO(86) | 2)
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_PWM_C (MTK_PIN_NO(86) | 3)
+
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(87) | 1)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(87) | 2)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_DFD_TMS_XI (MTK_PIN_NO(87) | 3)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_APU_JTAG_TMS (MTK_PIN_NO(87) | 4)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_MCU_SPM_TMS (MTK_PIN_NO(87) | 5)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_DSP_JMS (MTK_PIN_NO(87) | 6)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(87) | 7)
+
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(88) | 1)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(88) | 2)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 3)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(88) | 4)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(88) | 5)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_DSP_JCK (MTK_PIN_NO(88) | 6)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(88) | 7)
+
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(89) | 1)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_PWM_C (MTK_PIN_NO(89) | 2)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_DFD_TDI_XI (MTK_PIN_NO(89) | 3)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_APU_JTAG_TDI (MTK_PIN_NO(89) | 4)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MCU_SPM_TDI (MTK_PIN_NO(89) | 5)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_CONN_DSP_JDI (MTK_PIN_NO(89) | 6)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(89) | 7)
+
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(90) | 1)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_SPDIF_IN (MTK_PIN_NO(90) | 2)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_DFD_TDO (MTK_PIN_NO(90) | 3)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_APU_JTAG_TDO (MTK_PIN_NO(90) | 4)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MCU_SPM_TDO (MTK_PIN_NO(90) | 5)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_CONN_DSP_JDO (MTK_PIN_NO(90) | 6)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(90) | 7)
+
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(91) | 1)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_SPDIF_OUT (MTK_PIN_NO(91) | 2)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_DFD_NTRST_XI (MTK_PIN_NO(91) | 3)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_APU_JTAG_TRST (MTK_PIN_NO(91) | 4)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(91) | 5)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(91) | 6)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(91) | 7)
+
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(92) | 1)
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_IRRX (MTK_PIN_NO(92) | 2)
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_PWM_A (MTK_PIN_NO(92) | 3)
+
+#define MT8365_PIN_93_MSDC0_DAT7__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT8365_PIN_93_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(93) | 1)
+#define MT8365_PIN_93_MSDC0_DAT7__FUNC_NLD7 (MTK_PIN_NO(93) | 2)
+
+#define MT8365_PIN_94_MSDC0_DAT6__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT8365_PIN_94_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(94) | 1)
+#define MT8365_PIN_94_MSDC0_DAT6__FUNC_NLD6 (MTK_PIN_NO(94) | 2)
+
+#define MT8365_PIN_95_MSDC0_DAT5__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define MT8365_PIN_95_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(95) | 1)
+#define MT8365_PIN_95_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(95) | 2)
+
+#define MT8365_PIN_96_MSDC0_DAT4__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define MT8365_PIN_96_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(96) | 1)
+#define MT8365_PIN_96_MSDC0_DAT4__FUNC_NLD3 (MTK_PIN_NO(96) | 2)
+
+#define MT8365_PIN_97_MSDC0_RSTB__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define MT8365_PIN_97_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(97) | 1)
+#define MT8365_PIN_97_MSDC0_RSTB__FUNC_NLD0 (MTK_PIN_NO(97) | 2)
+
+#define MT8365_PIN_98_MSDC0_CMD__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define MT8365_PIN_98_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(98) | 1)
+#define MT8365_PIN_98_MSDC0_CMD__FUNC_NALE (MTK_PIN_NO(98) | 2)
+
+#define MT8365_PIN_99_MSDC0_CLK__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define MT8365_PIN_99_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(99) | 1)
+#define MT8365_PIN_99_MSDC0_CLK__FUNC_NWEB (MTK_PIN_NO(99) | 2)
+
+#define MT8365_PIN_100_MSDC0_DAT3__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT8365_PIN_100_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(100) | 1)
+#define MT8365_PIN_100_MSDC0_DAT3__FUNC_NLD1 (MTK_PIN_NO(100) | 2)
+
+#define MT8365_PIN_101_MSDC0_DAT2__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT8365_PIN_101_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(101) | 1)
+#define MT8365_PIN_101_MSDC0_DAT2__FUNC_NLD5 (MTK_PIN_NO(101) | 2)
+
+#define MT8365_PIN_102_MSDC0_DAT1__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT8365_PIN_102_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(102) | 1)
+#define MT8365_PIN_102_MSDC0_DAT1__FUNC_NDQS (MTK_PIN_NO(102) | 2)
+
+#define MT8365_PIN_103_MSDC0_DAT0__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT8365_PIN_103_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(103) | 1)
+#define MT8365_PIN_103_MSDC0_DAT0__FUNC_NLD2 (MTK_PIN_NO(103) | 2)
+
+#define MT8365_PIN_104_MSDC0_DSL__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT8365_PIN_104_MSDC0_DSL__FUNC_MSDC0_DSL (MTK_PIN_NO(104) | 1)
+
+#define MT8365_PIN_105_NCLE__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT8365_PIN_105_NCLE__FUNC_NCLE (MTK_PIN_NO(105) | 1)
+#define MT8365_PIN_105_NCLE__FUNC_TDM_RX_MCK (MTK_PIN_NO(105) | 2)
+#define MT8365_PIN_105_NCLE__FUNC_DBG_MON_B12 (MTK_PIN_NO(105) | 7)
+
+#define MT8365_PIN_106_NCEB1__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT8365_PIN_106_NCEB1__FUNC_NCEB1 (MTK_PIN_NO(106) | 1)
+#define MT8365_PIN_106_NCEB1__FUNC_TDM_RX_BCK (MTK_PIN_NO(106) | 2)
+#define MT8365_PIN_106_NCEB1__FUNC_DBG_MON_B13 (MTK_PIN_NO(106) | 7)
+
+#define MT8365_PIN_107_NCEB0__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT8365_PIN_107_NCEB0__FUNC_NCEB0 (MTK_PIN_NO(107) | 1)
+#define MT8365_PIN_107_NCEB0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(107) | 2)
+#define MT8365_PIN_107_NCEB0__FUNC_DBG_MON_B14 (MTK_PIN_NO(107) | 7)
+
+#define MT8365_PIN_108_NREB__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT8365_PIN_108_NREB__FUNC_NREB (MTK_PIN_NO(108) | 1)
+#define MT8365_PIN_108_NREB__FUNC_TDM_RX_DI (MTK_PIN_NO(108) | 2)
+#define MT8365_PIN_108_NREB__FUNC_DBG_MON_B15 (MTK_PIN_NO(108) | 7)
+
+#define MT8365_PIN_109_NRNB__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT8365_PIN_109_NRNB__FUNC_NRNB (MTK_PIN_NO(109) | 1)
+#define MT8365_PIN_109_NRNB__FUNC_TSF_IN (MTK_PIN_NO(109) | 2)
+#define MT8365_PIN_109_NRNB__FUNC_DBG_MON_B16 (MTK_PIN_NO(109) | 7)
+
+#define MT8365_PIN_110_PCM_CLK__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT8365_PIN_110_PCM_CLK__FUNC_PCM_CLK (MTK_PIN_NO(110) | 1)
+#define MT8365_PIN_110_PCM_CLK__FUNC_I2S0_BCK (MTK_PIN_NO(110) | 2)
+#define MT8365_PIN_110_PCM_CLK__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 3)
+#define MT8365_PIN_110_PCM_CLK__FUNC_SPDIF_IN (MTK_PIN_NO(110) | 4)
+#define MT8365_PIN_110_PCM_CLK__FUNC_DPI_D15 (MTK_PIN_NO(110) | 5)
+
+#define MT8365_PIN_111_PCM_SYNC__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_PCM_SYNC (MTK_PIN_NO(111) | 1)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S0_LRCK (MTK_PIN_NO(111) | 2)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S3_LRCK (MTK_PIN_NO(111) | 3)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_SPDIF_OUT (MTK_PIN_NO(111) | 4)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_DPI_D16 (MTK_PIN_NO(111) | 5)
+
+#define MT8365_PIN_112_PCM_RX__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT8365_PIN_112_PCM_RX__FUNC_PCM_RX (MTK_PIN_NO(112) | 1)
+#define MT8365_PIN_112_PCM_RX__FUNC_I2S0_DI (MTK_PIN_NO(112) | 2)
+#define MT8365_PIN_112_PCM_RX__FUNC_I2S3_MCK (MTK_PIN_NO(112) | 3)
+#define MT8365_PIN_112_PCM_RX__FUNC_IRRX (MTK_PIN_NO(112) | 4)
+#define MT8365_PIN_112_PCM_RX__FUNC_DPI_D17 (MTK_PIN_NO(112) | 5)
+
+#define MT8365_PIN_113_PCM_TX__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT8365_PIN_113_PCM_TX__FUNC_PCM_TX (MTK_PIN_NO(113) | 1)
+#define MT8365_PIN_113_PCM_TX__FUNC_I2S0_MCK (MTK_PIN_NO(113) | 2)
+#define MT8365_PIN_113_PCM_TX__FUNC_I2S3_DO (MTK_PIN_NO(113) | 3)
+#define MT8365_PIN_113_PCM_TX__FUNC_PWM_B (MTK_PIN_NO(113) | 4)
+#define MT8365_PIN_113_PCM_TX__FUNC_DPI_D18 (MTK_PIN_NO(113) | 5)
+
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S0_DI (MTK_PIN_NO(114) | 1)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S1_DO (MTK_PIN_NO(114) | 2)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S2_DI (MTK_PIN_NO(114) | 3)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S3_DO (MTK_PIN_NO(114) | 4)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_PWM_A (MTK_PIN_NO(114) | 5)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_SPDIF_IN (MTK_PIN_NO(114) | 6)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_DBG_MON_B17 (MTK_PIN_NO(114) | 7)
+
+#define MT8365_PIN_115_I2S_LRCK__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S0_LRCK (MTK_PIN_NO(115) | 1)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S1_LRCK (MTK_PIN_NO(115) | 2)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S2_LRCK (MTK_PIN_NO(115) | 3)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(115) | 4)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_PWM_B (MTK_PIN_NO(115) | 5)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_SPDIF_OUT (MTK_PIN_NO(115) | 6)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_DBG_MON_B18 (MTK_PIN_NO(115) | 7)
+
+#define MT8365_PIN_116_I2S_BCK__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(116) | 1)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S1_BCK (MTK_PIN_NO(116) | 2)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S2_BCK (MTK_PIN_NO(116) | 3)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(116) | 4)
+#define MT8365_PIN_116_I2S_BCK__FUNC_PWM_C (MTK_PIN_NO(116) | 5)
+#define MT8365_PIN_116_I2S_BCK__FUNC_IRRX (MTK_PIN_NO(116) | 6)
+#define MT8365_PIN_116_I2S_BCK__FUNC_DBG_MON_B19 (MTK_PIN_NO(116) | 7)
+
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_DMIC0_CLK (MTK_PIN_NO(117) | 1)
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_I2S2_BCK (MTK_PIN_NO(117) | 2)
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_DBG_MON_B20 (MTK_PIN_NO(117) | 7)
+
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DMIC0_DAT0 (MTK_PIN_NO(118) | 1)
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_I2S2_DI (MTK_PIN_NO(118) | 2)
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DBG_MON_B21 (MTK_PIN_NO(118) | 7)
+
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DMIC0_DAT1 (MTK_PIN_NO(119) | 1)
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_I2S2_LRCK (MTK_PIN_NO(119) | 2)
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DBG_MON_B22 (MTK_PIN_NO(119) | 7)
+
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_DMIC1_CLK (MTK_PIN_NO(120) | 1)
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_I2S2_MCK (MTK_PIN_NO(120) | 2)
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_DBG_MON_B23 (MTK_PIN_NO(120) | 7)
+
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DMIC1_DAT0 (MTK_PIN_NO(121) | 1)
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_I2S1_BCK (MTK_PIN_NO(121) | 2)
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DBG_MON_B24 (MTK_PIN_NO(121) | 7)
+
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DMIC1_DAT1 (MTK_PIN_NO(122) | 1)
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_I2S1_LRCK (MTK_PIN_NO(122) | 2)
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DBG_MON_B25 (MTK_PIN_NO(122) | 7)
+
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_DMIC2_CLK (MTK_PIN_NO(123) | 1)
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_I2S1_MCK (MTK_PIN_NO(123) | 2)
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_DBG_MON_B26 (MTK_PIN_NO(123) | 7)
+
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DMIC2_DAT0 (MTK_PIN_NO(124) | 1)
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_I2S1_DO (MTK_PIN_NO(124) | 2)
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DBG_MON_B27 (MTK_PIN_NO(124) | 7)
+
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DMIC2_DAT1 (MTK_PIN_NO(125) | 1)
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_TDM_RX_BCK (MTK_PIN_NO(125) | 2)
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DBG_MON_B28 (MTK_PIN_NO(125) | 7)
+
+#define MT8365_PIN_126_DMIC3_CLK__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT8365_PIN_126_DMIC3_CLK__FUNC_DMIC3_CLK (MTK_PIN_NO(126) | 1)
+#define MT8365_PIN_126_DMIC3_CLK__FUNC_TDM_RX_LRCK (MTK_PIN_NO(126) | 2)
+
+#define MT8365_PIN_127_DMIC3_DAT0__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define MT8365_PIN_127_DMIC3_DAT0__FUNC_DMIC3_DAT0 (MTK_PIN_NO(127) | 1)
+#define MT8365_PIN_127_DMIC3_DAT0__FUNC_TDM_RX_DI (MTK_PIN_NO(127) | 2)
+
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_DMIC3_DAT1 (MTK_PIN_NO(128) | 1)
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(128) | 2)
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_VAD_CLK (MTK_PIN_NO(128) | 3)
+
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_TDM_TX_BCK (MTK_PIN_NO(129) | 1)
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(129) | 2)
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_ckmon1_ck (MTK_PIN_NO(129) | 3)
+
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_TDM_TX_LRCK (MTK_PIN_NO(130) | 1)
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(130) | 2)
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_ckmon2_ck (MTK_PIN_NO(130) | 3)
+
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_TDM_TX_MCK (MTK_PIN_NO(131) | 1)
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 2)
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_ckmon3_ck (MTK_PIN_NO(131) | 3)
+
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(132) | 1)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_I2S3_DO (MTK_PIN_NO(132) | 2)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_ckmon4_ck (MTK_PIN_NO(132) | 3)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_DBG_MON_B29 (MTK_PIN_NO(132) | 7)
+
+#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(133) | 1)
+#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_DBG_MON_B30 (MTK_PIN_NO(133) | 7)
+
+#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(134) | 1)
+#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_DBG_MON_B31 (MTK_PIN_NO(134) | 7)
+
+#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(135) | 1)
+#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_DBG_MON_B32 (MTK_PIN_NO(135) | 7)
+
+#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_CONN_TOP_CLK (MTK_PIN_NO(136) | 1)
+
+#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_CONN_TOP_DATA (MTK_PIN_NO(137) | 1)
+
+#define MT8365_PIN_138_CONN_HRST_B__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define MT8365_PIN_138_CONN_HRST_B__FUNC_CONN_HRST_B (MTK_PIN_NO(138) | 1)
+
+#define MT8365_PIN_139_CONN_WB_PTA__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define MT8365_PIN_139_CONN_WB_PTA__FUNC_CONN_WB_PTA (MTK_PIN_NO(139) | 1)
+
+#define MT8365_PIN_140_CONN_BT_CLK__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define MT8365_PIN_140_CONN_BT_CLK__FUNC_CONN_BT_CLK (MTK_PIN_NO(140) | 1)
+
+#define MT8365_PIN_141_CONN_BT_DATA__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define MT8365_PIN_141_CONN_BT_DATA__FUNC_CONN_BT_DATA (MTK_PIN_NO(141) | 1)
+
+#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(142) | 1)
+
+#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(143) | 1)
+
+#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(144) | 1)
+
+#endif /* __MT8365_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h
index 2d2a8c737822..f48245ff87e5 100644
--- a/include/dt-bindings/pinctrl/omap.h
+++ b/include/dt-bindings/pinctrl/omap.h
@@ -64,7 +64,7 @@
#define OMAP3_WKUP_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2a00) (val)
#define DM814X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
#define DM816X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
-#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
+#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) (0)
#define AM33XX_PADCONF(pa, conf, mux) OMAP_IOPAD_OFFSET((pa), 0x0800) (conf) (mux)
/*
diff --git a/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h b/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h
new file mode 100644
index 000000000000..a200f546d078
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__
+
+#define PAD_GPIO_OFFSET 0
+#define PAD_FUNC_SHARE_OFFSET 64
+#define PAD_GPIO(x) (PAD_GPIO_OFFSET + (x))
+#define PAD_FUNC_SHARE(x) (PAD_FUNC_SHARE_OFFSET + (x))
+
+/*
+ * GPIOMUX bits:
+ * | 31 - 24 | 23 - 16 | 15 - 8 | 7 | 6 | 5 - 0 |
+ * | dout | doen | din | dout rev | doen rev | gpio nr |
+ *
+ * dout: output signal
+ * doen: output enable signal
+ * din: optional input signal, 0xff = none
+ * dout rev: output signal reverse bit
+ * doen rev: output enable signal reverse bit
+ * gpio nr: gpio number, 0 - 63
+ */
+#define GPIOMUX(n, dout, doen, din) ( \
+ (((dout) & 0x80000000) >> (31 - 7)) | (((dout) & 0xff) << 24) | \
+ (((doen) & 0x80000000) >> (31 - 6)) | (((doen) & 0xff) << 16) | \
+ (((din) & 0xff) << 8) | \
+ ((n) & 0x3f))
+
+#define GPO_REVERSE 0x80000000
+
+#define GPO_LOW 0
+#define GPO_HIGH 1
+#define GPO_ENABLE 0
+#define GPO_DISABLE 1
+#define GPO_CLK_GMAC_PAPHYREF 2
+#define GPO_JTAG_TDO 3
+#define GPO_JTAG_TDO_OEN 4
+#define GPO_DMIC_CLK_OUT 5
+#define GPO_DSP_JTDOEN_PAD 6
+#define GPO_DSP_JTDO_PAD 7
+#define GPO_I2C0_PAD_SCK_OE 8
+#define GPO_I2C0_PAD_SCK_OEN (GPO_I2C0_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C0_PAD_SDA_OE 9
+#define GPO_I2C0_PAD_SDA_OEN (GPO_I2C0_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2C1_PAD_SCK_OE 10
+#define GPO_I2C1_PAD_SCK_OEN (GPO_I2C1_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C1_PAD_SDA_OE 11
+#define GPO_I2C1_PAD_SDA_OEN (GPO_I2C1_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2C2_PAD_SCK_OE 12
+#define GPO_I2C2_PAD_SCK_OEN (GPO_I2C2_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C2_PAD_SDA_OE 13
+#define GPO_I2C2_PAD_SDA_OEN (GPO_I2C2_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2C3_PAD_SCK_OE 14
+#define GPO_I2C3_PAD_SCK_OEN (GPO_I2C3_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C3_PAD_SDA_OE 15
+#define GPO_I2C3_PAD_SDA_OEN (GPO_I2C3_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2SRX_BCLK_OUT 16
+#define GPO_I2SRX_BCLK_OUT_OEN 17
+#define GPO_I2SRX_LRCK_OUT 18
+#define GPO_I2SRX_LRCK_OUT_OEN 19
+#define GPO_I2SRX_MCLK_OUT 20
+#define GPO_I2STX_BCLK_OUT 21
+#define GPO_I2STX_BCLK_OUT_OEN 22
+#define GPO_I2STX_LRCK_OUT 23
+#define GPO_I2STX_LRCK_OUT_OEN 24
+#define GPO_I2STX_MCLK_OUT 25
+#define GPO_I2STX_SDOUT0 26
+#define GPO_I2STX_SDOUT1 27
+#define GPO_LCD_PAD_CSM_N 28
+#define GPO_PWM_PAD_OE_N_BIT0 29
+#define GPO_PWM_PAD_OE_N_BIT1 30
+#define GPO_PWM_PAD_OE_N_BIT2 31
+#define GPO_PWM_PAD_OE_N_BIT3 32
+#define GPO_PWM_PAD_OE_N_BIT4 33
+#define GPO_PWM_PAD_OE_N_BIT5 34
+#define GPO_PWM_PAD_OE_N_BIT6 35
+#define GPO_PWM_PAD_OE_N_BIT7 36
+#define GPO_PWM_PAD_OUT_BIT0 37
+#define GPO_PWM_PAD_OUT_BIT1 38
+#define GPO_PWM_PAD_OUT_BIT2 39
+#define GPO_PWM_PAD_OUT_BIT3 40
+#define GPO_PWM_PAD_OUT_BIT4 41
+#define GPO_PWM_PAD_OUT_BIT5 42
+#define GPO_PWM_PAD_OUT_BIT6 43
+#define GPO_PWM_PAD_OUT_BIT7 44
+#define GPO_PWMDAC_LEFT_OUT 45
+#define GPO_PWMDAC_RIGHT_OUT 46
+#define GPO_QSPI_CSN1_OUT 47
+#define GPO_QSPI_CSN2_OUT 48
+#define GPO_QSPI_CSN3_OUT 49
+#define GPO_REGISTER23_SCFG_CMSENSOR_RST0 50
+#define GPO_REGISTER23_SCFG_CMSENSOR_RST1 51
+#define GPO_REGISTER32_SCFG_GMAC_PHY_RSTN 52
+#define GPO_SDIO0_PAD_CARD_POWER_EN 53
+#define GPO_SDIO0_PAD_CCLK_OUT 54
+#define GPO_SDIO0_PAD_CCMD_OE 55
+#define GPO_SDIO0_PAD_CCMD_OEN (GPO_SDIO0_PAD_CCMD_OE | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CCMD_OUT 56
+#define GPO_SDIO0_PAD_CDATA_OE_BIT0 57
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT0 (GPO_SDIO0_PAD_CDATA_OE_BIT0 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT1 58
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT1 (GPO_SDIO0_PAD_CDATA_OE_BIT1 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT2 59
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT2 (GPO_SDIO0_PAD_CDATA_OE_BIT2 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT3 60
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT3 (GPO_SDIO0_PAD_CDATA_OE_BIT3 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT4 61
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT4 (GPO_SDIO0_PAD_CDATA_OE_BIT4 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT5 62
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT5 (GPO_SDIO0_PAD_CDATA_OE_BIT5 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT6 63
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT6 (GPO_SDIO0_PAD_CDATA_OE_BIT6 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT7 64
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT7 (GPO_SDIO0_PAD_CDATA_OE_BIT7 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT0 65
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT1 66
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT2 67
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT3 68
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT4 69
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT5 70
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT6 71
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT7 72
+#define GPO_SDIO0_PAD_RST_N 73
+#define GPO_SDIO1_PAD_CARD_POWER_EN 74
+#define GPO_SDIO1_PAD_CCLK_OUT 75
+#define GPO_SDIO1_PAD_CCMD_OE 76
+#define GPO_SDIO1_PAD_CCMD_OEN (GPO_SDIO1_PAD_CCMD_OE | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CCMD_OUT 77
+#define GPO_SDIO1_PAD_CDATA_OE_BIT0 78
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT0 (GPO_SDIO1_PAD_CDATA_OE_BIT0 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT1 79
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT1 (GPO_SDIO1_PAD_CDATA_OE_BIT1 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT2 80
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT2 (GPO_SDIO1_PAD_CDATA_OE_BIT2 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT3 81
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT3 (GPO_SDIO1_PAD_CDATA_OE_BIT3 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT4 82
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT4 (GPO_SDIO1_PAD_CDATA_OE_BIT4 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT5 83
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT5 (GPO_SDIO1_PAD_CDATA_OE_BIT5 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT6 84
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT6 (GPO_SDIO1_PAD_CDATA_OE_BIT6 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT7 85
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT7 (GPO_SDIO1_PAD_CDATA_OE_BIT7 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT0 86
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT1 87
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT2 88
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT3 89
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT4 90
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT5 91
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT6 92
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT7 93
+#define GPO_SDIO1_PAD_RST_N 94
+#define GPO_SPDIF_TX_SDOUT 95
+#define GPO_SPDIF_TX_SDOUT_OEN 96
+#define GPO_SPI0_PAD_OE_N 97
+#define GPO_SPI0_PAD_SCK_OUT 98
+#define GPO_SPI0_PAD_SS_0_N 99
+#define GPO_SPI0_PAD_SS_1_N 100
+#define GPO_SPI0_PAD_TXD 101
+#define GPO_SPI1_PAD_OE_N 102
+#define GPO_SPI1_PAD_SCK_OUT 103
+#define GPO_SPI1_PAD_SS_0_N 104
+#define GPO_SPI1_PAD_SS_1_N 105
+#define GPO_SPI1_PAD_TXD 106
+#define GPO_SPI2_PAD_OE_N 107
+#define GPO_SPI2_PAD_SCK_OUT 108
+#define GPO_SPI2_PAD_SS_0_N 109
+#define GPO_SPI2_PAD_SS_1_N 110
+#define GPO_SPI2_PAD_TXD 111
+#define GPO_SPI2AHB_PAD_OE_N_BIT0 112
+#define GPO_SPI2AHB_PAD_OE_N_BIT1 113
+#define GPO_SPI2AHB_PAD_OE_N_BIT2 114
+#define GPO_SPI2AHB_PAD_OE_N_BIT3 115
+#define GPO_SPI2AHB_PAD_TXD_BIT0 116
+#define GPO_SPI2AHB_PAD_TXD_BIT1 117
+#define GPO_SPI2AHB_PAD_TXD_BIT2 118
+#define GPO_SPI2AHB_PAD_TXD_BIT3 119
+#define GPO_SPI3_PAD_OE_N 120
+#define GPO_SPI3_PAD_SCK_OUT 121
+#define GPO_SPI3_PAD_SS_0_N 122
+#define GPO_SPI3_PAD_SS_1_N 123
+#define GPO_SPI3_PAD_TXD 124
+#define GPO_UART0_PAD_DTRN 125
+#define GPO_UART0_PAD_RTSN 126
+#define GPO_UART0_PAD_SOUT 127
+#define GPO_UART1_PAD_SOUT 128
+#define GPO_UART2_PAD_DTR_N 129
+#define GPO_UART2_PAD_RTS_N 130
+#define GPO_UART2_PAD_SOUT 131
+#define GPO_UART3_PAD_SOUT 132
+#define GPO_USB_DRV_BUS 133
+
+#define GPI_CPU_JTAG_TCK 0
+#define GPI_CPU_JTAG_TDI 1
+#define GPI_CPU_JTAG_TMS 2
+#define GPI_CPU_JTAG_TRST 3
+#define GPI_DMIC_SDIN_BIT0 4
+#define GPI_DMIC_SDIN_BIT1 5
+#define GPI_DSP_JTCK_PAD 6
+#define GPI_DSP_JTDI_PAD 7
+#define GPI_DSP_JTMS_PAD 8
+#define GPI_DSP_TRST_PAD 9
+#define GPI_I2C0_PAD_SCK_IN 10
+#define GPI_I2C0_PAD_SDA_IN 11
+#define GPI_I2C1_PAD_SCK_IN 12
+#define GPI_I2C1_PAD_SDA_IN 13
+#define GPI_I2C2_PAD_SCK_IN 14
+#define GPI_I2C2_PAD_SDA_IN 15
+#define GPI_I2C3_PAD_SCK_IN 16
+#define GPI_I2C3_PAD_SDA_IN 17
+#define GPI_I2SRX_BCLK_IN 18
+#define GPI_I2SRX_LRCK_IN 19
+#define GPI_I2SRX_SDIN_BIT0 20
+#define GPI_I2SRX_SDIN_BIT1 21
+#define GPI_I2SRX_SDIN_BIT2 22
+#define GPI_I2STX_BCLK_IN 23
+#define GPI_I2STX_LRCK_IN 24
+#define GPI_SDIO0_PAD_CARD_DETECT_N 25
+#define GPI_SDIO0_PAD_CARD_WRITE_PRT 26
+#define GPI_SDIO0_PAD_CCMD_IN 27
+#define GPI_SDIO0_PAD_CDATA_IN_BIT0 28
+#define GPI_SDIO0_PAD_CDATA_IN_BIT1 29
+#define GPI_SDIO0_PAD_CDATA_IN_BIT2 30
+#define GPI_SDIO0_PAD_CDATA_IN_BIT3 31
+#define GPI_SDIO0_PAD_CDATA_IN_BIT4 32
+#define GPI_SDIO0_PAD_CDATA_IN_BIT5 33
+#define GPI_SDIO0_PAD_CDATA_IN_BIT6 34
+#define GPI_SDIO0_PAD_CDATA_IN_BIT7 35
+#define GPI_SDIO1_PAD_CARD_DETECT_N 36
+#define GPI_SDIO1_PAD_CARD_WRITE_PRT 37
+#define GPI_SDIO1_PAD_CCMD_IN 38
+#define GPI_SDIO1_PAD_CDATA_IN_BIT0 39
+#define GPI_SDIO1_PAD_CDATA_IN_BIT1 40
+#define GPI_SDIO1_PAD_CDATA_IN_BIT2 41
+#define GPI_SDIO1_PAD_CDATA_IN_BIT3 42
+#define GPI_SDIO1_PAD_CDATA_IN_BIT4 43
+#define GPI_SDIO1_PAD_CDATA_IN_BIT5 44
+#define GPI_SDIO1_PAD_CDATA_IN_BIT6 45
+#define GPI_SDIO1_PAD_CDATA_IN_BIT7 46
+#define GPI_SPDIF_RX_SDIN 47
+#define GPI_SPI0_PAD_RXD 48
+#define GPI_SPI0_PAD_SS_IN_N 49
+#define GPI_SPI1_PAD_RXD 50
+#define GPI_SPI1_PAD_SS_IN_N 51
+#define GPI_SPI2_PAD_RXD 52
+#define GPI_SPI2_PAD_SS_IN_N 53
+#define GPI_SPI2AHB_PAD_RXD_BIT0 54
+#define GPI_SPI2AHB_PAD_RXD_BIT1 55
+#define GPI_SPI2AHB_PAD_RXD_BIT2 56
+#define GPI_SPI2AHB_PAD_RXD_BIT3 57
+#define GPI_SPI2AHB_PAD_SS_N 58
+#define GPI_SPI2AHB_SLV_SCLKIN 59
+#define GPI_SPI3_PAD_RXD 60
+#define GPI_SPI3_PAD_SS_IN_N 61
+#define GPI_UART0_PAD_CTSN 62
+#define GPI_UART0_PAD_DCDN 63
+#define GPI_UART0_PAD_DSRN 64
+#define GPI_UART0_PAD_RIN 65
+#define GPI_UART0_PAD_SIN 66
+#define GPI_UART1_PAD_SIN 67
+#define GPI_UART2_PAD_CTS_N 68
+#define GPI_UART2_PAD_DCD_N 69
+#define GPI_UART2_PAD_DSR_N 70
+#define GPI_UART2_PAD_RI_N 71
+#define GPI_UART2_PAD_SIN 72
+#define GPI_UART3_PAD_SIN 73
+#define GPI_USB_OVER_CURRENT 74
+
+#define GPI_NONE 0xff
+
+#endif /* __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/pinctrl/pinctrl-zynq.h b/include/dt-bindings/pinctrl/pinctrl-zynq.h
new file mode 100644
index 000000000000..bbfc345f017d
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-zynq.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MIO pin configuration defines for Xilinx Zynq
+ *
+ * Copyright (C) 2021 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_ZYNQ_H
+#define _DT_BINDINGS_PINCTRL_ZYNQ_H
+
+/* Configuration options for different power supplies */
+#define IO_STANDARD_LVCMOS18 1
+#define IO_STANDARD_LVCMOS25 2
+#define IO_STANDARD_LVCMOS33 3
+#define IO_STANDARD_HSTL 4
+
+#endif /* _DT_BINDINGS_PINCTRL_ZYNQ_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-zynqmp.h b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
new file mode 100644
index 000000000000..cdb215734bdf
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MIO pin configuration defines for Xilinx ZynqMP
+ *
+ * Copyright (C) 2020 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_ZYNQMP_H
+#define _DT_BINDINGS_PINCTRL_ZYNQMP_H
+
+/* Bit value for different voltage levels */
+#define IO_STANDARD_LVCMOS33 0
+#define IO_STANDARD_LVCMOS18 1
+
+/* Bit values for Slew Rates */
+#define SLEW_RATE_FAST 0
+#define SLEW_RATE_SLOW 1
+
+#endif /* _DT_BINDINGS_PINCTRL_ZYNQMP_H */
diff --git a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
index 2d0c23e5d3a7..8736ce038eca 100644
--- a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
+++ b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
@@ -42,6 +42,6 @@
/*
* Convert a port and pin label to its global pin index
*/
- #define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin))
+#define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin))
#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H */
diff --git a/include/dt-bindings/pinctrl/rzg2l-pinctrl.h b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h
new file mode 100644
index 000000000000..c78ed5e5efb7
--- /dev/null
+++ b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G2L family pinctrl bindings.
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_RZG2L_PINCTRL_H
+#define __DT_BINDINGS_RZG2L_PINCTRL_H
+
+#define RZG2L_PINS_PER_PORT 8
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZG2L_PORT_PINMUX(b, p, f) ((b) * RZG2L_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZG2L_GPIO(port, pin) ((port) * RZG2L_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_RZG2L_PINCTRL_H */
diff --git a/include/dt-bindings/pinctrl/rzv2m-pinctrl.h b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h
new file mode 100644
index 000000000000..525532cd15da
--- /dev/null
+++ b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/V2M pinctrl bindings.
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_RZV2M_PINCTRL_H
+#define __DT_BINDINGS_RZV2M_PINCTRL_H
+
+#define RZV2M_PINS_PER_PORT 16
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZV2M_PORT_PINMUX(b, p, f) ((b) * RZV2M_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZV2M_GPIO(port, pin) ((port) * RZV2M_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_RZV2M_PINCTRL_H */
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
index b1832506b923..d1da5ff68d0c 100644
--- a/include/dt-bindings/pinctrl/samsung.h
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -10,6 +10,13 @@
#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
+/*
+ * These bindings are deprecated, because they do not match the actual
+ * concept of bindings but rather contain pure register values.
+ * Instead include the header in the DTS source directory.
+ */
+#warning "These bindings are deprecated. Instead use the header in the DTS source directory."
+
#define EXYNOS_PIN_PULL_NONE 0
#define EXYNOS_PIN_PULL_DOWN 1
#define EXYNOS_PIN_PULL_UP 3
@@ -36,7 +43,10 @@
#define EXYNOS5260_PIN_DRV_LV4 2
#define EXYNOS5260_PIN_DRV_LV6 3
-/* Drive strengths for Exynos5410, Exynos542x and Exynos5800 */
+/*
+ * Drive strengths for Exynos5410, Exynos542x, Exynos5800 and Exynos850 (except
+ * GPIO_HSI block)
+ */
#define EXYNOS5420_PIN_DRV_LV1 0
#define EXYNOS5420_PIN_DRV_LV2 1
#define EXYNOS5420_PIN_DRV_LV3 2
@@ -56,6 +66,14 @@
#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc
#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf
+/* Drive strengths for Exynos850 GPIO_HSI block */
+#define EXYNOS850_HSI_PIN_DRV_LV1 0 /* 1x */
+#define EXYNOS850_HSI_PIN_DRV_LV1_5 1 /* 1.5x */
+#define EXYNOS850_HSI_PIN_DRV_LV2 2 /* 2x */
+#define EXYNOS850_HSI_PIN_DRV_LV2_5 3 /* 2.5x */
+#define EXYNOS850_HSI_PIN_DRV_LV3 4 /* 3x */
+#define EXYNOS850_HSI_PIN_DRV_LV4 5 /* 4x */
+
#define EXYNOS_PIN_FUNC_INPUT 0
#define EXYNOS_PIN_FUNC_OUTPUT 1
#define EXYNOS_PIN_FUNC_2 2
diff --git a/include/dt-bindings/pinctrl/sppctl-sp7021.h b/include/dt-bindings/pinctrl/sppctl-sp7021.h
new file mode 100644
index 000000000000..629aa9b5ffbc
--- /dev/null
+++ b/include/dt-bindings/pinctrl/sppctl-sp7021.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Sunplus SP7021 dt-bindings Pinctrl header file
+ * Copyright (C) Sunplus Tech/Tibbo Tech.
+ * Author: Dvorkin Dmitry <dvorkin@tibbo.com>
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_SPPCTL_SP7021_H__
+#define __DT_BINDINGS_PINCTRL_SPPCTL_SP7021_H__
+
+#include <dt-bindings/pinctrl/sppctl.h>
+
+/*
+ * Please don't change the order of the following defines.
+ * They are based on order of 'hardware' control register
+ * defined in MOON2 ~ MOON3 registers.
+ */
+#define MUXF_GPIO 0
+#define MUXF_IOP 1
+#define MUXF_L2SW_CLK_OUT 2
+#define MUXF_L2SW_MAC_SMI_MDC 3
+#define MUXF_L2SW_LED_FLASH0 4
+#define MUXF_L2SW_LED_FLASH1 5
+#define MUXF_L2SW_LED_ON0 6
+#define MUXF_L2SW_LED_ON1 7
+#define MUXF_L2SW_MAC_SMI_MDIO 8
+#define MUXF_L2SW_P0_MAC_RMII_TXEN 9
+#define MUXF_L2SW_P0_MAC_RMII_TXD0 10
+#define MUXF_L2SW_P0_MAC_RMII_TXD1 11
+#define MUXF_L2SW_P0_MAC_RMII_CRSDV 12
+#define MUXF_L2SW_P0_MAC_RMII_RXD0 13
+#define MUXF_L2SW_P0_MAC_RMII_RXD1 14
+#define MUXF_L2SW_P0_MAC_RMII_RXER 15
+#define MUXF_L2SW_P1_MAC_RMII_TXEN 16
+#define MUXF_L2SW_P1_MAC_RMII_TXD0 17
+#define MUXF_L2SW_P1_MAC_RMII_TXD1 18
+#define MUXF_L2SW_P1_MAC_RMII_CRSDV 19
+#define MUXF_L2SW_P1_MAC_RMII_RXD0 20
+#define MUXF_L2SW_P1_MAC_RMII_RXD1 21
+#define MUXF_L2SW_P1_MAC_RMII_RXER 22
+#define MUXF_DAISY_MODE 23
+#define MUXF_SDIO_CLK 24
+#define MUXF_SDIO_CMD 25
+#define MUXF_SDIO_D0 26
+#define MUXF_SDIO_D1 27
+#define MUXF_SDIO_D2 28
+#define MUXF_SDIO_D3 29
+#define MUXF_PWM0 30
+#define MUXF_PWM1 31
+#define MUXF_PWM2 32
+#define MUXF_PWM3 33
+#define MUXF_PWM4 34
+#define MUXF_PWM5 35
+#define MUXF_PWM6 36
+#define MUXF_PWM7 37
+#define MUXF_ICM0_D 38
+#define MUXF_ICM1_D 39
+#define MUXF_ICM2_D 40
+#define MUXF_ICM3_D 41
+#define MUXF_ICM0_CLK 42
+#define MUXF_ICM1_CLK 43
+#define MUXF_ICM2_CLK 44
+#define MUXF_ICM3_CLK 45
+#define MUXF_SPIM0_INT 46
+#define MUXF_SPIM0_CLK 47
+#define MUXF_SPIM0_EN 48
+#define MUXF_SPIM0_DO 49
+#define MUXF_SPIM0_DI 50
+#define MUXF_SPIM1_INT 51
+#define MUXF_SPIM1_CLK 52
+#define MUXF_SPIM1_EN 53
+#define MUXF_SPIM1_DO 54
+#define MUXF_SPIM1_DI 55
+#define MUXF_SPIM2_INT 56
+#define MUXF_SPIM2_CLK 57
+#define MUXF_SPIM2_EN 58
+#define MUXF_SPIM2_DO 59
+#define MUXF_SPIM2_DI 60
+#define MUXF_SPIM3_INT 61
+#define MUXF_SPIM3_CLK 62
+#define MUXF_SPIM3_EN 63
+#define MUXF_SPIM3_DO 64
+#define MUXF_SPIM3_DI 65
+#define MUXF_SPI0S_INT 66
+#define MUXF_SPI0S_CLK 67
+#define MUXF_SPI0S_EN 68
+#define MUXF_SPI0S_DO 69
+#define MUXF_SPI0S_DI 70
+#define MUXF_SPI1S_INT 71
+#define MUXF_SPI1S_CLK 72
+#define MUXF_SPI1S_EN 73
+#define MUXF_SPI1S_DO 74
+#define MUXF_SPI1S_DI 75
+#define MUXF_SPI2S_INT 76
+#define MUXF_SPI2S_CLK 77
+#define MUXF_SPI2S_EN 78
+#define MUXF_SPI2S_DO 79
+#define MUXF_SPI2S_DI 80
+#define MUXF_SPI3S_INT 81
+#define MUXF_SPI3S_CLK 82
+#define MUXF_SPI3S_EN 83
+#define MUXF_SPI3S_DO 84
+#define MUXF_SPI3S_DI 85
+#define MUXF_I2CM0_CLK 86
+#define MUXF_I2CM0_DAT 87
+#define MUXF_I2CM1_CLK 88
+#define MUXF_I2CM1_DAT 89
+#define MUXF_I2CM2_CLK 90
+#define MUXF_I2CM2_DAT 91
+#define MUXF_I2CM3_CLK 92
+#define MUXF_I2CM3_DAT 93
+#define MUXF_UA1_TX 94
+#define MUXF_UA1_RX 95
+#define MUXF_UA1_CTS 96
+#define MUXF_UA1_RTS 97
+#define MUXF_UA2_TX 98
+#define MUXF_UA2_RX 99
+#define MUXF_UA2_CTS 100
+#define MUXF_UA2_RTS 101
+#define MUXF_UA3_TX 102
+#define MUXF_UA3_RX 103
+#define MUXF_UA3_CTS 104
+#define MUXF_UA3_RTS 105
+#define MUXF_UA4_TX 106
+#define MUXF_UA4_RX 107
+#define MUXF_UA4_CTS 108
+#define MUXF_UA4_RTS 109
+#define MUXF_TIMER0_INT 110
+#define MUXF_TIMER1_INT 111
+#define MUXF_TIMER2_INT 112
+#define MUXF_TIMER3_INT 113
+#define MUXF_GPIO_INT0 114
+#define MUXF_GPIO_INT1 115
+#define MUXF_GPIO_INT2 116
+#define MUXF_GPIO_INT3 117
+#define MUXF_GPIO_INT4 118
+#define MUXF_GPIO_INT5 119
+#define MUXF_GPIO_INT6 120
+#define MUXF_GPIO_INT7 121
+
+/*
+ * Please don't change the order of the following defines.
+ * They are based on order of items in array 'sppctl_list_funcs'
+ * in Sunplus pinctrl driver.
+ */
+#define GROP_SPI_FLASH 122
+#define GROP_SPI_FLASH_4BIT 123
+#define GROP_SPI_NAND 124
+#define GROP_CARD0_EMMC 125
+#define GROP_SD_CARD 126
+#define GROP_UA0 127
+#define GROP_ACHIP_DEBUG 128
+#define GROP_ACHIP_UA2AXI 129
+#define GROP_FPGA_IFX 130
+#define GROP_HDMI_TX 131
+#define GROP_AUD_EXT_ADC_IFX0 132
+#define GROP_AUD_EXT_DAC_IFX0 133
+#define GROP_SPDIF_RX 134
+#define GROP_SPDIF_TX 135
+#define GROP_TDMTX_IFX0 136
+#define GROP_TDMRX_IFX0 137
+#define GROP_PDMRX_IFX0 138
+#define GROP_PCM_IEC_TX 139
+#define GROP_LCDIF 140
+#define GROP_DVD_DSP_DEBUG 141
+#define GROP_I2C_DEBUG 142
+#define GROP_I2C_SLAVE 143
+#define GROP_WAKEUP 144
+#define GROP_UART2AXI 145
+#define GROP_USB0_I2C 146
+#define GROP_USB1_I2C 147
+#define GROP_USB0_OTG 148
+#define GROP_USB1_OTG 149
+#define GROP_UPHY0_DEBUG 150
+#define GROP_UPHY1_DEBUG 151
+#define GROP_UPHY0_EXT 152
+#define GROP_PROBE_PORT 153
+
+#endif
diff --git a/include/dt-bindings/pinctrl/sppctl.h b/include/dt-bindings/pinctrl/sppctl.h
new file mode 100644
index 000000000000..50557265dbfc
--- /dev/null
+++ b/include/dt-bindings/pinctrl/sppctl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Sunplus dt-bindings Pinctrl header file
+ * Copyright (C) Sunplus Tech / Tibbo Tech.
+ * Author: Dvorkin Dmitry <dvorkin@tibbo.com>
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_SPPCTL_H__
+#define __DT_BINDINGS_PINCTRL_SPPCTL_H__
+
+#define IOP_G_MASTE (0x01 << 0)
+#define IOP_G_FIRST (0x01 << 1)
+
+#define SPPCTL_PCTL_G_PMUX (0x00 | IOP_G_MASTE)
+#define SPPCTL_PCTL_G_GPIO (IOP_G_FIRST | IOP_G_MASTE)
+#define SPPCTL_PCTL_G_IOPP (IOP_G_FIRST | 0x00)
+
+#define SPPCTL_PCTL_L_OUT (0x01 << 0) /* Output LOW */
+#define SPPCTL_PCTL_L_OU1 (0x01 << 1) /* Output HIGH */
+#define SPPCTL_PCTL_L_INV (0x01 << 2) /* Input Invert */
+#define SPPCTL_PCTL_L_ONV (0x01 << 3) /* Output Invert */
+#define SPPCTL_PCTL_L_ODR (0x01 << 4) /* Output Open Drain */
+
+/*
+ * pack into 32-bit value:
+ * pin# (8bit), typ (8bit), function (8bit), flag (8bit)
+ */
+#define SPPCTL_IOPAD(pin, typ, fun, flg) (((pin) << 24) | ((typ) << 16) | \
+ ((fun) << 8) | (flg))
+
+#endif
diff --git a/include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h b/include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h
new file mode 100644
index 000000000000..3865f0139639
--- /dev/null
+++ b/include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_STARFIVE_JH7110_H__
+#define __DT_BINDINGS_PINCTRL_STARFIVE_JH7110_H__
+
+/* sys_iomux pins */
+#define PAD_GPIO0 0
+#define PAD_GPIO1 1
+#define PAD_GPIO2 2
+#define PAD_GPIO3 3
+#define PAD_GPIO4 4
+#define PAD_GPIO5 5
+#define PAD_GPIO6 6
+#define PAD_GPIO7 7
+#define PAD_GPIO8 8
+#define PAD_GPIO9 9
+#define PAD_GPIO10 10
+#define PAD_GPIO11 11
+#define PAD_GPIO12 12
+#define PAD_GPIO13 13
+#define PAD_GPIO14 14
+#define PAD_GPIO15 15
+#define PAD_GPIO16 16
+#define PAD_GPIO17 17
+#define PAD_GPIO18 18
+#define PAD_GPIO19 19
+#define PAD_GPIO20 20
+#define PAD_GPIO21 21
+#define PAD_GPIO22 22
+#define PAD_GPIO23 23
+#define PAD_GPIO24 24
+#define PAD_GPIO25 25
+#define PAD_GPIO26 26
+#define PAD_GPIO27 27
+#define PAD_GPIO28 28
+#define PAD_GPIO29 29
+#define PAD_GPIO30 30
+#define PAD_GPIO31 31
+#define PAD_GPIO32 32
+#define PAD_GPIO33 33
+#define PAD_GPIO34 34
+#define PAD_GPIO35 35
+#define PAD_GPIO36 36
+#define PAD_GPIO37 37
+#define PAD_GPIO38 38
+#define PAD_GPIO39 39
+#define PAD_GPIO40 40
+#define PAD_GPIO41 41
+#define PAD_GPIO42 42
+#define PAD_GPIO43 43
+#define PAD_GPIO44 44
+#define PAD_GPIO45 45
+#define PAD_GPIO46 46
+#define PAD_GPIO47 47
+#define PAD_GPIO48 48
+#define PAD_GPIO49 49
+#define PAD_GPIO50 50
+#define PAD_GPIO51 51
+#define PAD_GPIO52 52
+#define PAD_GPIO53 53
+#define PAD_GPIO54 54
+#define PAD_GPIO55 55
+#define PAD_GPIO56 56
+#define PAD_GPIO57 57
+#define PAD_GPIO58 58
+#define PAD_GPIO59 59
+#define PAD_GPIO60 60
+#define PAD_GPIO61 61
+#define PAD_GPIO62 62
+#define PAD_GPIO63 63
+#define PAD_SD0_CLK 64
+#define PAD_SD0_CMD 65
+#define PAD_SD0_DATA0 66
+#define PAD_SD0_DATA1 67
+#define PAD_SD0_DATA2 68
+#define PAD_SD0_DATA3 69
+#define PAD_SD0_DATA4 70
+#define PAD_SD0_DATA5 71
+#define PAD_SD0_DATA6 72
+#define PAD_SD0_DATA7 73
+#define PAD_SD0_STRB 74
+#define PAD_GMAC1_MDC 75
+#define PAD_GMAC1_MDIO 76
+#define PAD_GMAC1_RXD0 77
+#define PAD_GMAC1_RXD1 78
+#define PAD_GMAC1_RXD2 79
+#define PAD_GMAC1_RXD3 80
+#define PAD_GMAC1_RXDV 81
+#define PAD_GMAC1_RXC 82
+#define PAD_GMAC1_TXD0 83
+#define PAD_GMAC1_TXD1 84
+#define PAD_GMAC1_TXD2 85
+#define PAD_GMAC1_TXD3 86
+#define PAD_GMAC1_TXEN 87
+#define PAD_GMAC1_TXC 88
+#define PAD_QSPI_SCLK 89
+#define PAD_QSPI_CS0 90
+#define PAD_QSPI_DATA0 91
+#define PAD_QSPI_DATA1 92
+#define PAD_QSPI_DATA2 93
+#define PAD_QSPI_DATA3 94
+
+/* aon_iomux pins */
+#define PAD_TESTEN 0
+#define PAD_RGPIO0 1
+#define PAD_RGPIO1 2
+#define PAD_RGPIO2 3
+#define PAD_RGPIO3 4
+#define PAD_RSTN 5
+#define PAD_GMAC0_MDC 6
+#define PAD_GMAC0_MDIO 7
+#define PAD_GMAC0_RXD0 8
+#define PAD_GMAC0_RXD1 9
+#define PAD_GMAC0_RXD2 10
+#define PAD_GMAC0_RXD3 11
+#define PAD_GMAC0_RXDV 12
+#define PAD_GMAC0_RXC 13
+#define PAD_GMAC0_TXD0 14
+#define PAD_GMAC0_TXD1 15
+#define PAD_GMAC0_TXD2 16
+#define PAD_GMAC0_TXD3 17
+#define PAD_GMAC0_TXEN 18
+#define PAD_GMAC0_TXC 19
+
+#define GPOUT_LOW 0
+#define GPOUT_HIGH 1
+
+#define GPOEN_ENABLE 0
+#define GPOEN_DISABLE 1
+
+#define GPI_NONE 255
+
+#endif
diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h
index e6fb8ada3f4d..28ad0235086a 100644
--- a/include/dt-bindings/pinctrl/stm32-pinfunc.h
+++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h
@@ -37,6 +37,9 @@
#define STM32MP_PKG_AB 0x2
#define STM32MP_PKG_AC 0x4
#define STM32MP_PKG_AD 0x8
+#define STM32MP_PKG_AI 0x100
+#define STM32MP_PKG_AK 0x400
+#define STM32MP_PKG_AL 0x800
#endif /* _DT_BINDINGS_STM32_PINFUNC_H */
diff --git a/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h b/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h
new file mode 100644
index 000000000000..23cfb57256d6
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_
+#define _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_
+
+#define PD_CPU 0
+#define PD_VE 1
+#define PD_DSP 2
+
+#endif /* _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_ */
diff --git a/include/dt-bindings/power/amlogic,c3-pwrc.h b/include/dt-bindings/power/amlogic,c3-pwrc.h
new file mode 100644
index 000000000000..61759df4b2e7
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,c3-pwrc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc.
+ * Author: hongyu chen1 <hongyu.chen1@amlogic.com>
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_C3_POWER_H
+#define _DT_BINDINGS_AMLOGIC_C3_POWER_H
+
+#define PWRC_C3_NNA_ID 0
+#define PWRC_C3_AUDIO_ID 1
+#define PWRC_C3_RESV_SEC_ID 2
+#define PWRC_C3_SDIOA_ID 3
+#define PWRC_C3_EMMC_ID 4
+#define PWRC_C3_USB_COMB_ID 5
+#define PWRC_C3_SDCARD_ID 6
+#define PWRC_C3_ETH_ID 7
+#define PWRC_C3_RESV0_ID 8
+#define PWRC_C3_GE2D_ID 9
+#define PWRC_C3_CVE_ID 10
+#define PWRC_C3_GDC_WRAP_ID 11
+#define PWRC_C3_ISP_TOP_ID 12
+#define PWRC_C3_MIPI_ISP_WRAP_ID 13
+#define PWRC_C3_VCODEC_ID 14
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,t7-pwrc.h b/include/dt-bindings/power/amlogic,t7-pwrc.h
new file mode 100644
index 000000000000..1f1f2739cc26
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,t7-pwrc.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc.
+ * Author: Hongyu Chen <hongyu.chen1@amlogic.com>
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_T7_POWER_H
+#define _DT_BINDINGS_AMLOGIC_T7_POWER_H
+
+#define PWRC_T7_DSPA_ID 0
+#define PWRC_T7_DSPB_ID 1
+#define PWRC_T7_DOS_HCODEC_ID 2
+#define PWRC_T7_DOS_HEVC_ID 3
+#define PWRC_T7_DOS_VDEC_ID 4
+#define PWRC_T7_DOS_WAVE_ID 5
+#define PWRC_T7_VPU_HDMI_ID 6
+#define PWRC_T7_USB_COMB_ID 7
+#define PWRC_T7_PCIE_ID 8
+#define PWRC_T7_GE2D_ID 9
+#define PWRC_T7_SRAMA_ID 10
+#define PWRC_T7_SRAMB_ID 11
+#define PWRC_T7_HDMIRX_ID 12
+#define PWRC_T7_VI_CLK1_ID 13
+#define PWRC_T7_VI_CLK2_ID 14
+#define PWRC_T7_ETH_ID 15
+#define PWRC_T7_ISP_ID 16
+#define PWRC_T7_MIPI_ISP_ID 17
+#define PWRC_T7_GDC_ID 18
+#define PWRC_T7_CVE_ID 18
+#define PWRC_T7_DEWARP_ID 19
+#define PWRC_T7_SDIO_A_ID 20
+#define PWRC_T7_SDIO_B_ID 21
+#define PWRC_T7_EMMC_ID 22
+#define PWRC_T7_MALI_SC0_ID 23
+#define PWRC_T7_MALI_SC1_ID 24
+#define PWRC_T7_MALI_SC2_ID 25
+#define PWRC_T7_MALI_SC3_ID 26
+#define PWRC_T7_MALI_TOP_ID 27
+#define PWRC_T7_NNA_CORE0_ID 28
+#define PWRC_T7_NNA_CORE1_ID 29
+#define PWRC_T7_NNA_CORE2_ID 30
+#define PWRC_T7_NNA_CORE3_ID 31
+#define PWRC_T7_NNA_TOP_ID 32
+#define PWRC_T7_DDR0_ID 33
+#define PWRC_T7_DDR1_ID 34
+#define PWRC_T7_DMC0_ID 35
+#define PWRC_T7_DMC1_ID 36
+#define PWRC_T7_NOC_ID 37
+#define PWRC_T7_NIC2_ID 38
+#define PWRC_T7_NIC3_ID 39
+#define PWRC_T7_CCI_ID 40
+#define PWRC_T7_MIPI_DSI0_ID 41
+#define PWRC_T7_SPICC0_ID 42
+#define PWRC_T7_SPICC1_ID 43
+#define PWRC_T7_SPICC2_ID 44
+#define PWRC_T7_SPICC3_ID 45
+#define PWRC_T7_SPICC4_ID 46
+#define PWRC_T7_SPICC5_ID 47
+#define PWRC_T7_EDP0_ID 48
+#define PWRC_T7_EDP1_ID 49
+#define PWRC_T7_MIPI_DSI1_ID 50
+#define PWRC_T7_AUDIO_ID 51
+
+#endif
diff --git a/include/dt-bindings/power/fsl,imx93-power.h b/include/dt-bindings/power/fsl,imx93-power.h
new file mode 100644
index 000000000000..17f9f015bf7d
--- /dev/null
+++ b/include/dt-bindings/power/fsl,imx93-power.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2022 NXP
+ */
+
+#ifndef __DT_BINDINGS_IMX93_POWER_H__
+#define __DT_BINDINGS_IMX93_POWER_H__
+
+#define IMX93_MEDIABLK_PD_MIPI_DSI 0
+#define IMX93_MEDIABLK_PD_MIPI_CSI 1
+#define IMX93_MEDIABLK_PD_PXP 2
+#define IMX93_MEDIABLK_PD_LCDIF 3
+#define IMX93_MEDIABLK_PD_ISI 4
+
+#endif
diff --git a/include/dt-bindings/power/imx8mm-power.h b/include/dt-bindings/power/imx8mm-power.h
new file mode 100644
index 000000000000..648938f24c8e
--- /dev/null
+++ b/include/dt-bindings/power/imx8mm-power.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#ifndef __DT_BINDINGS_IMX8MM_POWER_H__
+#define __DT_BINDINGS_IMX8MM_POWER_H__
+
+#define IMX8MM_POWER_DOMAIN_HSIOMIX 0
+#define IMX8MM_POWER_DOMAIN_PCIE 1
+#define IMX8MM_POWER_DOMAIN_OTG1 2
+#define IMX8MM_POWER_DOMAIN_OTG2 3
+#define IMX8MM_POWER_DOMAIN_GPUMIX 4
+#define IMX8MM_POWER_DOMAIN_GPU 5
+#define IMX8MM_POWER_DOMAIN_VPUMIX 6
+#define IMX8MM_POWER_DOMAIN_VPUG1 7
+#define IMX8MM_POWER_DOMAIN_VPUG2 8
+#define IMX8MM_POWER_DOMAIN_VPUH1 9
+#define IMX8MM_POWER_DOMAIN_DISPMIX 10
+#define IMX8MM_POWER_DOMAIN_MIPI 11
+
+#define IMX8MM_VPUBLK_PD_G1 0
+#define IMX8MM_VPUBLK_PD_G2 1
+#define IMX8MM_VPUBLK_PD_H1 2
+
+#define IMX8MM_DISPBLK_PD_CSI_BRIDGE 0
+#define IMX8MM_DISPBLK_PD_LCDIF 1
+#define IMX8MM_DISPBLK_PD_MIPI_DSI 2
+#define IMX8MM_DISPBLK_PD_MIPI_CSI 3
+
+#endif
diff --git a/include/dt-bindings/power/imx8mn-power.h b/include/dt-bindings/power/imx8mn-power.h
new file mode 100644
index 000000000000..eedd0e581939
--- /dev/null
+++ b/include/dt-bindings/power/imx8mn-power.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Compass Electronics Group, LLC
+ */
+
+#ifndef __DT_BINDINGS_IMX8MN_POWER_H__
+#define __DT_BINDINGS_IMX8MN_POWER_H__
+
+#define IMX8MN_POWER_DOMAIN_HSIOMIX 0
+#define IMX8MN_POWER_DOMAIN_OTG1 1
+#define IMX8MN_POWER_DOMAIN_GPUMIX 2
+#define IMX8MN_POWER_DOMAIN_DISPMIX 3
+#define IMX8MN_POWER_DOMAIN_MIPI 4
+
+#define IMX8MN_DISPBLK_PD_MIPI_DSI 0
+#define IMX8MN_DISPBLK_PD_MIPI_CSI 1
+#define IMX8MN_DISPBLK_PD_LCDIF 2
+#define IMX8MN_DISPBLK_PD_ISI 3
+
+#endif
diff --git a/include/dt-bindings/power/imx8mp-power.h b/include/dt-bindings/power/imx8mp-power.h
new file mode 100644
index 000000000000..2fe3c2abad13
--- /dev/null
+++ b/include/dt-bindings/power/imx8mp-power.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ */
+
+#ifndef __DT_BINDINGS_IMX8MP_POWER_DOMAIN_POWER_H__
+#define __DT_BINDINGS_IMX8MP_POWER_DOMAIN_POWER_H__
+
+#define IMX8MP_POWER_DOMAIN_MIPI_PHY1 0
+#define IMX8MP_POWER_DOMAIN_PCIE_PHY 1
+#define IMX8MP_POWER_DOMAIN_USB1_PHY 2
+#define IMX8MP_POWER_DOMAIN_USB2_PHY 3
+#define IMX8MP_POWER_DOMAIN_MLMIX 4
+#define IMX8MP_POWER_DOMAIN_AUDIOMIX 5
+#define IMX8MP_POWER_DOMAIN_GPU2D 6
+#define IMX8MP_POWER_DOMAIN_GPUMIX 7
+#define IMX8MP_POWER_DOMAIN_VPUMIX 8
+#define IMX8MP_POWER_DOMAIN_GPU3D 9
+#define IMX8MP_POWER_DOMAIN_MEDIAMIX 10
+#define IMX8MP_POWER_DOMAIN_VPU_G1 11
+#define IMX8MP_POWER_DOMAIN_VPU_G2 12
+#define IMX8MP_POWER_DOMAIN_VPU_VC8000E 13
+#define IMX8MP_POWER_DOMAIN_HDMIMIX 14
+#define IMX8MP_POWER_DOMAIN_HDMI_PHY 15
+#define IMX8MP_POWER_DOMAIN_MIPI_PHY2 16
+#define IMX8MP_POWER_DOMAIN_HSIOMIX 17
+#define IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP 18
+
+#define IMX8MP_HSIOBLK_PD_USB 0
+#define IMX8MP_HSIOBLK_PD_USB_PHY1 1
+#define IMX8MP_HSIOBLK_PD_USB_PHY2 2
+#define IMX8MP_HSIOBLK_PD_PCIE 3
+#define IMX8MP_HSIOBLK_PD_PCIE_PHY 4
+
+#define IMX8MP_MEDIABLK_PD_MIPI_DSI_1 0
+#define IMX8MP_MEDIABLK_PD_MIPI_CSI2_1 1
+#define IMX8MP_MEDIABLK_PD_LCDIF_1 2
+#define IMX8MP_MEDIABLK_PD_ISI 3
+#define IMX8MP_MEDIABLK_PD_MIPI_CSI2_2 4
+#define IMX8MP_MEDIABLK_PD_LCDIF_2 5
+#define IMX8MP_MEDIABLK_PD_ISP 6
+#define IMX8MP_MEDIABLK_PD_DWE 7
+#define IMX8MP_MEDIABLK_PD_MIPI_DSI_2 8
+
+#define IMX8MP_HDMIBLK_PD_IRQSTEER 0
+#define IMX8MP_HDMIBLK_PD_LCDIF 1
+#define IMX8MP_HDMIBLK_PD_PAI 2
+#define IMX8MP_HDMIBLK_PD_PVI 3
+#define IMX8MP_HDMIBLK_PD_TRNG 4
+#define IMX8MP_HDMIBLK_PD_HDMI_TX 5
+#define IMX8MP_HDMIBLK_PD_HDMI_TX_PHY 6
+#define IMX8MP_HDMIBLK_PD_HDCP 7
+#define IMX8MP_HDMIBLK_PD_HRV 8
+
+#define IMX8MP_VPUBLK_PD_G1 0
+#define IMX8MP_VPUBLK_PD_G2 1
+#define IMX8MP_VPUBLK_PD_VC8000E 2
+
+#endif
diff --git a/include/dt-bindings/power/imx8mq-power.h b/include/dt-bindings/power/imx8mq-power.h
index 8a513bd9166e..9f7d0f1e7c32 100644
--- a/include/dt-bindings/power/imx8mq-power.h
+++ b/include/dt-bindings/power/imx8mq-power.h
@@ -18,4 +18,7 @@
#define IMX8M_POWER_DOMAIN_MIPI_CSI2 9
#define IMX8M_POWER_DOMAIN_PCIE2 10
+#define IMX8MQ_VPUBLK_PD_G1 0
+#define IMX8MQ_VPUBLK_PD_G2 1
+
#endif
diff --git a/include/dt-bindings/power/imx8ulp-power.h b/include/dt-bindings/power/imx8ulp-power.h
new file mode 100644
index 000000000000..a556b2e96df1
--- /dev/null
+++ b/include/dt-bindings/power/imx8ulp-power.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef __DT_BINDINGS_IMX8ULP_POWER_H__
+#define __DT_BINDINGS_IMX8ULP_POWER_H__
+
+#define IMX8ULP_PD_DMA1 0
+#define IMX8ULP_PD_FLEXSPI2 1
+#define IMX8ULP_PD_USB0 2
+#define IMX8ULP_PD_USDHC0 3
+#define IMX8ULP_PD_USDHC1 4
+#define IMX8ULP_PD_USDHC2_USB1 5
+#define IMX8ULP_PD_DCNANO 6
+#define IMX8ULP_PD_EPDC 7
+#define IMX8ULP_PD_DMA2 8
+#define IMX8ULP_PD_GPU2D 9
+#define IMX8ULP_PD_GPU3D 10
+#define IMX8ULP_PD_HIFI4 11
+#define IMX8ULP_PD_ISI 12
+#define IMX8ULP_PD_MIPI_CSI 13
+#define IMX8ULP_PD_MIPI_DSI 14
+#define IMX8ULP_PD_PXP 15
+
+#endif
diff --git a/include/dt-bindings/power/mediatek,mt8188-power.h b/include/dt-bindings/power/mediatek,mt8188-power.h
new file mode 100644
index 000000000000..57e75cf3aa2c
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8188-power.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8188_POWER_H
+#define _DT_BINDINGS_POWER_MT8188_POWER_H
+
+#define MT8188_POWER_DOMAIN_MFG0 0
+#define MT8188_POWER_DOMAIN_MFG1 1
+#define MT8188_POWER_DOMAIN_MFG2 2
+#define MT8188_POWER_DOMAIN_MFG3 3
+#define MT8188_POWER_DOMAIN_MFG4 4
+#define MT8188_POWER_DOMAIN_PEXTP_MAC_P0 5
+#define MT8188_POWER_DOMAIN_PEXTP_PHY_TOP 6
+#define MT8188_POWER_DOMAIN_CSIRX_TOP 7
+#define MT8188_POWER_DOMAIN_ETHER 8
+#define MT8188_POWER_DOMAIN_HDMI_TX 9
+#define MT8188_POWER_DOMAIN_ADSP_AO 10
+#define MT8188_POWER_DOMAIN_ADSP_INFRA 11
+#define MT8188_POWER_DOMAIN_ADSP 12
+#define MT8188_POWER_DOMAIN_AUDIO 13
+#define MT8188_POWER_DOMAIN_AUDIO_ASRC 14
+#define MT8188_POWER_DOMAIN_VPPSYS0 15
+#define MT8188_POWER_DOMAIN_VDOSYS0 16
+#define MT8188_POWER_DOMAIN_VDOSYS1 17
+#define MT8188_POWER_DOMAIN_DP_TX 18
+#define MT8188_POWER_DOMAIN_EDP_TX 19
+#define MT8188_POWER_DOMAIN_VPPSYS1 20
+#define MT8188_POWER_DOMAIN_WPE 21
+#define MT8188_POWER_DOMAIN_VDEC0 22
+#define MT8188_POWER_DOMAIN_VDEC1 23
+#define MT8188_POWER_DOMAIN_VENC 24
+#define MT8188_POWER_DOMAIN_IMG_VCORE 25
+#define MT8188_POWER_DOMAIN_IMG_MAIN 26
+#define MT8188_POWER_DOMAIN_DIP 27
+#define MT8188_POWER_DOMAIN_IPE 28
+#define MT8188_POWER_DOMAIN_CAM_VCORE 29
+#define MT8188_POWER_DOMAIN_CAM_MAIN 30
+#define MT8188_POWER_DOMAIN_CAM_SUBA 31
+#define MT8188_POWER_DOMAIN_CAM_SUBB 32
+
+#endif /* _DT_BINDINGS_POWER_MT8188_POWER_H */
diff --git a/include/dt-bindings/power/mediatek,mt8365-power.h b/include/dt-bindings/power/mediatek,mt8365-power.h
new file mode 100644
index 000000000000..e6cfd0ec7871
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8365-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8365_POWER_H
+#define _DT_BINDINGS_POWER_MT8365_POWER_H
+
+#define MT8365_POWER_DOMAIN_MM 0
+#define MT8365_POWER_DOMAIN_CONN 1
+#define MT8365_POWER_DOMAIN_MFG 2
+#define MT8365_POWER_DOMAIN_AUDIO 3
+#define MT8365_POWER_DOMAIN_CAM 4
+#define MT8365_POWER_DOMAIN_DSP 5
+#define MT8365_POWER_DOMAIN_VDEC 6
+#define MT8365_POWER_DOMAIN_VENC 7
+#define MT8365_POWER_DOMAIN_APU 8
+
+#endif /* _DT_BINDINGS_POWER_MT8365_POWER_H */
diff --git a/include/dt-bindings/power/meson-a1-power.h b/include/dt-bindings/power/meson-a1-power.h
index 6cf50bfb8ccf..724c370d6853 100644
--- a/include/dt-bindings/power/meson-a1-power.h
+++ b/include/dt-bindings/power/meson-a1-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 Amlogic, Inc.
* Author: Jianxin Pan <jianxin.pan@amlogic.com>
diff --git a/include/dt-bindings/power/meson-axg-power.h b/include/dt-bindings/power/meson-axg-power.h
new file mode 100644
index 000000000000..ace0e468ce21
--- /dev/null
+++ b/include/dt-bindings/power/meson-axg-power.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_AXG_POWER_H
+#define _DT_BINDINGS_MESON_AXG_POWER_H
+
+#define PWRC_AXG_VPU_ID 0
+#define PWRC_AXG_ETHERNET_MEM_ID 1
+#define PWRC_AXG_AUDIO_ID 2
+
+#endif
diff --git a/include/dt-bindings/power/meson-g12a-power.h b/include/dt-bindings/power/meson-g12a-power.h
index bb5e67a842de..01fd0ac4dd08 100644
--- a/include/dt-bindings/power/meson-g12a-power.h
+++ b/include/dt-bindings/power/meson-g12a-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
@@ -9,5 +9,7 @@
#define PWRC_G12A_VPU_ID 0
#define PWRC_G12A_ETH_ID 1
+#define PWRC_G12A_NNA_ID 2
+#define PWRC_G12A_ISP_ID 3
#endif
diff --git a/include/dt-bindings/power/meson-gxbb-power.h b/include/dt-bindings/power/meson-gxbb-power.h
index 1262dac696c0..8d0b32b6c02c 100644
--- a/include/dt-bindings/power/meson-gxbb-power.h
+++ b/include/dt-bindings/power/meson-gxbb-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson-s4-power.h b/include/dt-bindings/power/meson-s4-power.h
new file mode 100644
index 000000000000..f210a524a592
--- /dev/null
+++ b/include/dt-bindings/power/meson-s4-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc.
+ * Author: Shunzhou Jiang <shunzhou.jiang@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_S4_POWER_H
+#define _DT_BINDINGS_MESON_S4_POWER_H
+
+#define PWRC_S4_DOS_HEVC_ID 0
+#define PWRC_S4_DOS_VDEC_ID 1
+#define PWRC_S4_VPU_HDMI_ID 2
+#define PWRC_S4_USB_COMB_ID 3
+#define PWRC_S4_GE2D_ID 4
+#define PWRC_S4_ETH_ID 5
+#define PWRC_S4_DEMOD_ID 6
+#define PWRC_S4_AUDIO_ID 7
+
+#endif
diff --git a/include/dt-bindings/power/meson-sm1-power.h b/include/dt-bindings/power/meson-sm1-power.h
index a020ab00c134..d78e710dbfff 100644
--- a/include/dt-bindings/power/meson-sm1-power.h
+++ b/include/dt-bindings/power/meson-sm1-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson8-power.h b/include/dt-bindings/power/meson8-power.h
index dd8b2ddb82a7..7a55ba2cd22e 100644
--- a/include/dt-bindings/power/meson8-power.h
+++ b/include/dt-bindings/power/meson8-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*/
diff --git a/include/dt-bindings/power/mt6795-power.h b/include/dt-bindings/power/mt6795-power.h
new file mode 100644
index 000000000000..b0fc26cb1da4
--- /dev/null
+++ b/include/dt-bindings/power/mt6795-power.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef _DT_BINDINGS_POWER_MT6795_POWER_H
+#define _DT_BINDINGS_POWER_MT6795_POWER_H
+
+#define MT6795_POWER_DOMAIN_MM 0
+#define MT6795_POWER_DOMAIN_VDEC 1
+#define MT6795_POWER_DOMAIN_VENC 2
+#define MT6795_POWER_DOMAIN_ISP 3
+#define MT6795_POWER_DOMAIN_MJC 4
+#define MT6795_POWER_DOMAIN_AUDIO 5
+#define MT6795_POWER_DOMAIN_MFG_ASYNC 6
+#define MT6795_POWER_DOMAIN_MFG_2D 7
+#define MT6795_POWER_DOMAIN_MFG 8
+#define MT6795_POWER_DOMAIN_MODEM 9
+
+#endif /* _DT_BINDINGS_POWER_MT6795_POWER_H */
diff --git a/include/dt-bindings/power/mt6797-power.h b/include/dt-bindings/power/mt6797-power.h
index a60c1d81cf75..bd451d860e6a 100644
--- a/include/dt-bindings/power/mt6797-power.h
+++ b/include/dt-bindings/power/mt6797-power.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017 MediaTek Inc.
* Author: Mars.C <mars.cheng@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_POWER_MT6797_POWER_H
diff --git a/include/dt-bindings/power/mt8167-power.h b/include/dt-bindings/power/mt8167-power.h
new file mode 100644
index 000000000000..c8ec9983a4bc
--- /dev/null
+++ b/include/dt-bindings/power/mt8167-power.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8167_POWER_H
+#define _DT_BINDINGS_POWER_MT8167_POWER_H
+
+#define MT8167_POWER_DOMAIN_MM 0
+#define MT8167_POWER_DOMAIN_VDEC 1
+#define MT8167_POWER_DOMAIN_ISP 2
+#define MT8167_POWER_DOMAIN_CONN 3
+#define MT8167_POWER_DOMAIN_MFG_ASYNC 4
+#define MT8167_POWER_DOMAIN_MFG_2D 5
+#define MT8167_POWER_DOMAIN_MFG 6
+
+#endif /* _DT_BINDINGS_POWER_MT8167_POWER_H */
diff --git a/include/dt-bindings/power/mt8183-power.h b/include/dt-bindings/power/mt8183-power.h
new file mode 100644
index 000000000000..d1ab387ba8c7
--- /dev/null
+++ b/include/dt-bindings/power/mt8183-power.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H
+#define _DT_BINDINGS_POWER_MT8183_POWER_H
+
+#define MT8183_POWER_DOMAIN_AUDIO 0
+#define MT8183_POWER_DOMAIN_CONN 1
+#define MT8183_POWER_DOMAIN_MFG_ASYNC 2
+#define MT8183_POWER_DOMAIN_MFG 3
+#define MT8183_POWER_DOMAIN_MFG_CORE0 4
+#define MT8183_POWER_DOMAIN_MFG_CORE1 5
+#define MT8183_POWER_DOMAIN_MFG_2D 6
+#define MT8183_POWER_DOMAIN_DISP 7
+#define MT8183_POWER_DOMAIN_CAM 8
+#define MT8183_POWER_DOMAIN_ISP 9
+#define MT8183_POWER_DOMAIN_VDEC 10
+#define MT8183_POWER_DOMAIN_VENC 11
+#define MT8183_POWER_DOMAIN_VPU_TOP 12
+#define MT8183_POWER_DOMAIN_VPU_CORE0 13
+#define MT8183_POWER_DOMAIN_VPU_CORE1 14
+
+#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */
diff --git a/include/dt-bindings/power/mt8186-power.h b/include/dt-bindings/power/mt8186-power.h
new file mode 100644
index 000000000000..429f7197f6b6
--- /dev/null
+++ b/include/dt-bindings/power/mt8186-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8186_POWER_H
+#define _DT_BINDINGS_POWER_MT8186_POWER_H
+
+#define MT8186_POWER_DOMAIN_MFG0 0
+#define MT8186_POWER_DOMAIN_MFG1 1
+#define MT8186_POWER_DOMAIN_MFG2 2
+#define MT8186_POWER_DOMAIN_MFG3 3
+#define MT8186_POWER_DOMAIN_SSUSB 4
+#define MT8186_POWER_DOMAIN_SSUSB_P1 5
+#define MT8186_POWER_DOMAIN_DIS 6
+#define MT8186_POWER_DOMAIN_IMG 7
+#define MT8186_POWER_DOMAIN_IMG2 8
+#define MT8186_POWER_DOMAIN_IPE 9
+#define MT8186_POWER_DOMAIN_CAM 10
+#define MT8186_POWER_DOMAIN_CAM_RAWA 11
+#define MT8186_POWER_DOMAIN_CAM_RAWB 12
+#define MT8186_POWER_DOMAIN_VENC 13
+#define MT8186_POWER_DOMAIN_VDEC 14
+#define MT8186_POWER_DOMAIN_WPE 15
+#define MT8186_POWER_DOMAIN_CONN_ON 16
+#define MT8186_POWER_DOMAIN_CSIRX_TOP 17
+#define MT8186_POWER_DOMAIN_ADSP_AO 18
+#define MT8186_POWER_DOMAIN_ADSP_INFRA 19
+#define MT8186_POWER_DOMAIN_ADSP_TOP 20
+
+#endif /* _DT_BINDINGS_POWER_MT8186_POWER_H */
diff --git a/include/dt-bindings/power/mt8192-power.h b/include/dt-bindings/power/mt8192-power.h
new file mode 100644
index 000000000000..4eaa53d7270a
--- /dev/null
+++ b/include/dt-bindings/power/mt8192-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8192_POWER_H
+#define _DT_BINDINGS_POWER_MT8192_POWER_H
+
+#define MT8192_POWER_DOMAIN_AUDIO 0
+#define MT8192_POWER_DOMAIN_CONN 1
+#define MT8192_POWER_DOMAIN_MFG0 2
+#define MT8192_POWER_DOMAIN_MFG1 3
+#define MT8192_POWER_DOMAIN_MFG2 4
+#define MT8192_POWER_DOMAIN_MFG3 5
+#define MT8192_POWER_DOMAIN_MFG4 6
+#define MT8192_POWER_DOMAIN_MFG5 7
+#define MT8192_POWER_DOMAIN_MFG6 8
+#define MT8192_POWER_DOMAIN_DISP 9
+#define MT8192_POWER_DOMAIN_IPE 10
+#define MT8192_POWER_DOMAIN_ISP 11
+#define MT8192_POWER_DOMAIN_ISP2 12
+#define MT8192_POWER_DOMAIN_MDP 13
+#define MT8192_POWER_DOMAIN_VENC 14
+#define MT8192_POWER_DOMAIN_VDEC 15
+#define MT8192_POWER_DOMAIN_VDEC2 16
+#define MT8192_POWER_DOMAIN_CAM 17
+#define MT8192_POWER_DOMAIN_CAM_RAWA 18
+#define MT8192_POWER_DOMAIN_CAM_RAWB 19
+#define MT8192_POWER_DOMAIN_CAM_RAWC 20
+
+#endif /* _DT_BINDINGS_POWER_MT8192_POWER_H */
diff --git a/include/dt-bindings/power/mt8195-power.h b/include/dt-bindings/power/mt8195-power.h
new file mode 100644
index 000000000000..b20ca4b3e3a8
--- /dev/null
+++ b/include/dt-bindings/power/mt8195-power.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8195_POWER_H
+#define _DT_BINDINGS_POWER_MT8195_POWER_H
+
+#define MT8195_POWER_DOMAIN_PCIE_MAC_P0 0
+#define MT8195_POWER_DOMAIN_PCIE_MAC_P1 1
+#define MT8195_POWER_DOMAIN_PCIE_PHY 2
+#define MT8195_POWER_DOMAIN_SSUSB_PCIE_PHY 3
+#define MT8195_POWER_DOMAIN_CSI_RX_TOP 4
+#define MT8195_POWER_DOMAIN_ETHER 5
+#define MT8195_POWER_DOMAIN_ADSP 6
+#define MT8195_POWER_DOMAIN_AUDIO 7
+#define MT8195_POWER_DOMAIN_MFG0 8
+#define MT8195_POWER_DOMAIN_MFG1 9
+#define MT8195_POWER_DOMAIN_MFG2 10
+#define MT8195_POWER_DOMAIN_MFG3 11
+#define MT8195_POWER_DOMAIN_MFG4 12
+#define MT8195_POWER_DOMAIN_MFG5 13
+#define MT8195_POWER_DOMAIN_MFG6 14
+#define MT8195_POWER_DOMAIN_VPPSYS0 15
+#define MT8195_POWER_DOMAIN_VDOSYS0 16
+#define MT8195_POWER_DOMAIN_VPPSYS1 17
+#define MT8195_POWER_DOMAIN_VDOSYS1 18
+#define MT8195_POWER_DOMAIN_DP_TX 19
+#define MT8195_POWER_DOMAIN_EPD_TX 20
+#define MT8195_POWER_DOMAIN_HDMI_TX 21
+#define MT8195_POWER_DOMAIN_WPESYS 22
+#define MT8195_POWER_DOMAIN_VDEC0 23
+#define MT8195_POWER_DOMAIN_VDEC1 24
+#define MT8195_POWER_DOMAIN_VDEC2 25
+#define MT8195_POWER_DOMAIN_VENC 26
+#define MT8195_POWER_DOMAIN_VENC_CORE1 27
+#define MT8195_POWER_DOMAIN_IMG 28
+#define MT8195_POWER_DOMAIN_DIP 29
+#define MT8195_POWER_DOMAIN_IPE 30
+#define MT8195_POWER_DOMAIN_CAM 31
+#define MT8195_POWER_DOMAIN_CAM_RAWA 32
+#define MT8195_POWER_DOMAIN_CAM_RAWB 33
+#define MT8195_POWER_DOMAIN_CAM_MRAW 34
+
+#endif /* _DT_BINDINGS_POWER_MT8195_POWER_H */
diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h
new file mode 100644
index 000000000000..e54ffa361451
--- /dev/null
+++ b/include/dt-bindings/power/qcom,rpmhpd.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_POWER_QCOM_RPMHPD_H
+#define _DT_BINDINGS_POWER_QCOM_RPMHPD_H
+
+/* Generic RPMH Power Domain Indexes */
+#define RPMHPD_CX 0
+#define RPMHPD_CX_AO 1
+#define RPMHPD_EBI 2
+#define RPMHPD_GFX 3
+#define RPMHPD_LCX 4
+#define RPMHPD_LMX 5
+#define RPMHPD_MMCX 6
+#define RPMHPD_MMCX_AO 7
+#define RPMHPD_MX 8
+#define RPMHPD_MX_AO 9
+#define RPMHPD_MXC 10
+#define RPMHPD_MXC_AO 11
+#define RPMHPD_MSS 12
+#define RPMHPD_NSP 13
+#define RPMHPD_NSP0 14
+#define RPMHPD_NSP1 15
+#define RPMHPD_QPHY 16
+#define RPMHPD_DDR 17
+#define RPMHPD_XO 18
+#define RPMHPD_NSP2 19
+#define RPMHPD_GMXC 20
+
+#endif
diff --git a/include/dt-bindings/power/qcom-aoss-qmp.h b/include/dt-bindings/power/qcom-aoss-qmp.h
deleted file mode 100644
index ec336d31dee4..000000000000
--- a/include/dt-bindings/power/qcom-aoss-qmp.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Linaro Ltd. */
-
-#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-
-#define AOSS_QMP_LS_CDSP 0
-#define AOSS_QMP_LS_LPASS 1
-#define AOSS_QMP_LS_MODEM 2
-#define AOSS_QMP_LS_SLPI 3
-#define AOSS_QMP_LS_SPSS 4
-#define AOSS_QMP_LS_VENUS 5
-
-#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 5e61eaf73bdd..608087fb9a3d 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -4,6 +4,35 @@
#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H
#define _DT_BINDINGS_POWER_QCOM_RPMPD_H
+/* SA8775P Power Domain Indexes */
+#define SA8775P_CX 0
+#define SA8775P_CX_AO 1
+#define SA8775P_DDR 2
+#define SA8775P_EBI 3
+#define SA8775P_GFX 4
+#define SA8775P_LCX 5
+#define SA8775P_LMX 6
+#define SA8775P_MMCX 7
+#define SA8775P_MMCX_AO 8
+#define SA8775P_MSS 9
+#define SA8775P_MX 10
+#define SA8775P_MX_AO 11
+#define SA8775P_MXC 12
+#define SA8775P_MXC_AO 13
+#define SA8775P_NSP0 14
+#define SA8775P_NSP1 15
+#define SA8775P_XO 16
+
+/* SDM670 Power Domain Indexes */
+#define SDM670_MX 0
+#define SDM670_MX_AO 1
+#define SDM670_CX 2
+#define SDM670_CX_AO 3
+#define SDM670_LMX 4
+#define SDM670_LCX 5
+#define SDM670_GFX 6
+#define SDM670_MSS 7
+
/* SDM845 Power Domain Indexes */
#define SDM845_EBI 0
#define SDM845_MX 1
@@ -15,6 +44,39 @@
#define SDM845_GFX 7
#define SDM845_MSS 8
+/* SDX55 Power Domain Indexes */
+#define SDX55_MSS 0
+#define SDX55_MX 1
+#define SDX55_CX 2
+
+/* SDX65 Power Domain Indexes */
+#define SDX65_MSS 0
+#define SDX65_MX 1
+#define SDX65_MX_AO 2
+#define SDX65_CX 3
+#define SDX65_CX_AO 4
+#define SDX65_MXC 5
+
+/* SM6350 Power Domain Indexes */
+#define SM6350_CX 0
+#define SM6350_GFX 1
+#define SM6350_LCX 2
+#define SM6350_LMX 3
+#define SM6350_MSS 4
+#define SM6350_MX 5
+
+/* SM6350 Power Domain Indexes */
+#define SM6375_VDDCX 0
+#define SM6375_VDDCX_AO 1
+#define SM6375_VDDCX_VFL 2
+#define SM6375_VDDMX 3
+#define SM6375_VDDMX_AO 4
+#define SM6375_VDDMX_VFL 5
+#define SM6375_VDDGX 6
+#define SM6375_VDDGX_AO 7
+#define SM6375_VDD_LPI_CX 8
+#define SM6375_VDD_LPI_MX 9
+
/* SM8150 Power Domain Indexes */
#define SM8150_MSS 0
#define SM8150_EBI 1
@@ -28,6 +90,15 @@
#define SM8150_MMCX 9
#define SM8150_MMCX_AO 10
+/* SA8155P is a special case, kept for backwards compatibility */
+#define SA8155P_CX SM8150_CX
+#define SA8155P_CX_AO SM8150_CX_AO
+#define SA8155P_EBI SM8150_EBI
+#define SA8155P_GFX SM8150_GFX
+#define SA8155P_MSS SM8150_MSS
+#define SA8155P_MX SM8150_MX
+#define SA8155P_MX_AO SM8150_MX_AO
+
/* SM8250 Power Domain Indexes */
#define SM8250_CX 0
#define SM8250_CX_AO 1
@@ -40,6 +111,58 @@
#define SM8250_MX 8
#define SM8250_MX_AO 9
+/* SM8350 Power Domain Indexes */
+#define SM8350_CX 0
+#define SM8350_CX_AO 1
+#define SM8350_EBI 2
+#define SM8350_GFX 3
+#define SM8350_LCX 4
+#define SM8350_LMX 5
+#define SM8350_MMCX 6
+#define SM8350_MMCX_AO 7
+#define SM8350_MX 8
+#define SM8350_MX_AO 9
+#define SM8350_MXC 10
+#define SM8350_MXC_AO 11
+#define SM8350_MSS 12
+
+/* SM8450 Power Domain Indexes */
+#define SM8450_CX 0
+#define SM8450_CX_AO 1
+#define SM8450_EBI 2
+#define SM8450_GFX 3
+#define SM8450_LCX 4
+#define SM8450_LMX 5
+#define SM8450_MMCX 6
+#define SM8450_MMCX_AO 7
+#define SM8450_MX 8
+#define SM8450_MX_AO 9
+#define SM8450_MXC 10
+#define SM8450_MXC_AO 11
+#define SM8450_MSS 12
+
+/* SM8550 Power Domain Indexes */
+#define SM8550_CX 0
+#define SM8550_CX_AO 1
+#define SM8550_EBI 2
+#define SM8550_GFX 3
+#define SM8550_LCX 4
+#define SM8550_LMX 5
+#define SM8550_MMCX 6
+#define SM8550_MMCX_AO 7
+#define SM8550_MX 8
+#define SM8550_MX_AO 9
+#define SM8550_MXC 10
+#define SM8550_MXC_AO 11
+#define SM8550_MSS 12
+#define SM8550_NSP 13
+
+/* QDU1000/QRU1000 Power Domain Indexes */
+#define QDU1000_EBI 0
+#define QDU1000_MSS 1
+#define QDU1000_CX 2
+#define QDU1000_MX 3
+
/* SC7180 Power Domain Indexes */
#define SC7180_CX 0
#define SC7180_CX_AO 1
@@ -50,19 +173,147 @@
#define SC7180_LCX 6
#define SC7180_MSS 7
+/* SC7280 Power Domain Indexes */
+#define SC7280_CX 0
+#define SC7280_CX_AO 1
+#define SC7280_EBI 2
+#define SC7280_GFX 3
+#define SC7280_MX 4
+#define SC7280_MX_AO 5
+#define SC7280_LMX 6
+#define SC7280_LCX 7
+#define SC7280_MSS 8
+
+/* SC8180X Power Domain Indexes */
+#define SC8180X_CX 0
+#define SC8180X_CX_AO 1
+#define SC8180X_EBI 2
+#define SC8180X_GFX 3
+#define SC8180X_LCX 4
+#define SC8180X_LMX 5
+#define SC8180X_MMCX 6
+#define SC8180X_MMCX_AO 7
+#define SC8180X_MSS 8
+#define SC8180X_MX 9
+#define SC8180X_MX_AO 10
+
+/* SC8280XP Power Domain Indexes */
+#define SC8280XP_CX 0
+#define SC8280XP_CX_AO 1
+#define SC8280XP_DDR 2
+#define SC8280XP_EBI 3
+#define SC8280XP_GFX 4
+#define SC8280XP_LCX 5
+#define SC8280XP_LMX 6
+#define SC8280XP_MMCX 7
+#define SC8280XP_MMCX_AO 8
+#define SC8280XP_MSS 9
+#define SC8280XP_MX 10
+#define SC8280XP_MXC 12
+#define SC8280XP_MX_AO 11
+#define SC8280XP_NSP 13
+#define SC8280XP_QPHY 14
+#define SC8280XP_XO 15
+
/* SDM845 Power Domain performance levels */
-#define RPMH_REGULATOR_LEVEL_RETENTION 16
-#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
-#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
-#define RPMH_REGULATOR_LEVEL_SVS 128
-#define RPMH_REGULATOR_LEVEL_SVS_L0 144
-#define RPMH_REGULATOR_LEVEL_SVS_L1 192
-#define RPMH_REGULATOR_LEVEL_SVS_L2 224
-#define RPMH_REGULATOR_LEVEL_NOM 256
-#define RPMH_REGULATOR_LEVEL_NOM_L1 320
-#define RPMH_REGULATOR_LEVEL_NOM_L2 336
-#define RPMH_REGULATOR_LEVEL_TURBO 384
-#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+#define RPMH_REGULATOR_LEVEL_RETENTION 16
+#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2 52
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D0 60
+#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_P1 72
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L1 80
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L2 96
+#define RPMH_REGULATOR_LEVEL_SVS 128
+#define RPMH_REGULATOR_LEVEL_SVS_L0 144
+#define RPMH_REGULATOR_LEVEL_SVS_L1 192
+#define RPMH_REGULATOR_LEVEL_SVS_L2 224
+#define RPMH_REGULATOR_LEVEL_NOM 256
+#define RPMH_REGULATOR_LEVEL_NOM_L0 288
+#define RPMH_REGULATOR_LEVEL_NOM_L1 320
+#define RPMH_REGULATOR_LEVEL_NOM_L2 336
+#define RPMH_REGULATOR_LEVEL_TURBO 384
+#define RPMH_REGULATOR_LEVEL_TURBO_L0 400
+#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+#define RPMH_REGULATOR_LEVEL_TURBO_L2 432
+#define RPMH_REGULATOR_LEVEL_TURBO_L3 448
+#define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464
+#define RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR 480
+
+/* MDM9607 Power Domains */
+#define MDM9607_VDDCX 0
+#define MDM9607_VDDCX_AO 1
+#define MDM9607_VDDCX_VFL 2
+#define MDM9607_VDDMX 3
+#define MDM9607_VDDMX_AO 4
+#define MDM9607_VDDMX_VFL 5
+
+/* MSM8226 Power Domain Indexes */
+#define MSM8226_VDDCX 0
+#define MSM8226_VDDCX_AO 1
+#define MSM8226_VDDCX_VFC 2
+
+/* MSM8939 Power Domains */
+#define MSM8939_VDDMDCX 0
+#define MSM8939_VDDMDCX_AO 1
+#define MSM8939_VDDMDCX_VFC 2
+#define MSM8939_VDDCX 3
+#define MSM8939_VDDCX_AO 4
+#define MSM8939_VDDCX_VFC 5
+#define MSM8939_VDDMX 6
+#define MSM8939_VDDMX_AO 7
+
+/* MSM8916 Power Domain Indexes */
+#define MSM8916_VDDCX 0
+#define MSM8916_VDDCX_AO 1
+#define MSM8916_VDDCX_VFC 2
+#define MSM8916_VDDMX 3
+#define MSM8916_VDDMX_AO 4
+
+/* MSM8909 Power Domain Indexes */
+#define MSM8909_VDDCX MSM8916_VDDCX
+#define MSM8909_VDDCX_AO MSM8916_VDDCX_AO
+#define MSM8909_VDDCX_VFC MSM8916_VDDCX_VFC
+#define MSM8909_VDDMX MSM8916_VDDMX
+#define MSM8909_VDDMX_AO MSM8916_VDDMX_AO
+
+/* MSM8917 Power Domain Indexes */
+#define MSM8917_VDDCX 0
+#define MSM8917_VDDCX_AO 1
+#define MSM8917_VDDCX_VFL 2
+#define MSM8917_VDDMX 3
+#define MSM8917_VDDMX_AO 4
+
+/* MSM8937 Power Domain Indexes */
+#define MSM8937_VDDCX MSM8917_VDDCX
+#define MSM8937_VDDCX_AO MSM8917_VDDCX_AO
+#define MSM8937_VDDCX_VFL MSM8917_VDDCX_VFL
+#define MSM8937_VDDMX MSM8917_VDDMX
+#define MSM8937_VDDMX_AO MSM8917_VDDMX_AO
+
+/* QM215 Power Domain Indexes */
+#define QM215_VDDCX MSM8917_VDDCX
+#define QM215_VDDCX_AO MSM8917_VDDCX_AO
+#define QM215_VDDCX_VFL MSM8917_VDDCX_VFL
+#define QM215_VDDMX MSM8917_VDDMX
+#define QM215_VDDMX_AO MSM8917_VDDMX_AO
+
+/* MSM8953 Power Domain Indexes */
+#define MSM8953_VDDMD 0
+#define MSM8953_VDDMD_AO 1
+#define MSM8953_VDDCX 2
+#define MSM8953_VDDCX_AO 3
+#define MSM8953_VDDCX_VFL 4
+#define MSM8953_VDDMX 5
+#define MSM8953_VDDMX_AO 6
+
+/* MSM8974 Power Domain Indexes */
+#define MSM8974_VDDCX 0
+#define MSM8974_VDDCX_AO 1
+#define MSM8974_VDDCX_VFC 2
+#define MSM8974_VDDGFX 3
+#define MSM8974_VDDGFX_VFC 4
/* MSM8976 Power Domain Indexes */
#define MSM8976_VDDCX 0
@@ -72,6 +323,15 @@
#define MSM8976_VDDMX_AO 4
#define MSM8976_VDDMX_VFL 5
+/* MSM8994 Power Domain Indexes */
+#define MSM8994_VDDCX 0
+#define MSM8994_VDDCX_AO 1
+#define MSM8994_VDDCX_VFC 2
+#define MSM8994_VDDMX 3
+#define MSM8994_VDDMX_AO 4
+#define MSM8994_VDDGFX 5
+#define MSM8994_VDDGFX_VFC 6
+
/* MSM8996 Power Domain Indexes */
#define MSM8996_VDDCX 0
#define MSM8996_VDDCX_AO 1
@@ -102,6 +362,46 @@
#define QCS404_LPIMX 5
#define QCS404_LPIMX_VFL 6
+/* SDM660 Power Domains */
+#define SDM660_VDDCX 0
+#define SDM660_VDDCX_AO 1
+#define SDM660_VDDCX_VFL 2
+#define SDM660_VDDMX 3
+#define SDM660_VDDMX_AO 4
+#define SDM660_VDDMX_VFL 5
+#define SDM660_SSCCX 6
+#define SDM660_SSCCX_VFL 7
+#define SDM660_SSCMX 8
+#define SDM660_SSCMX_VFL 9
+
+/* SM6115 Power Domains */
+#define SM6115_VDDCX 0
+#define SM6115_VDDCX_AO 1
+#define SM6115_VDDCX_VFL 2
+#define SM6115_VDDMX 3
+#define SM6115_VDDMX_AO 4
+#define SM6115_VDDMX_VFL 5
+#define SM6115_VDD_LPI_CX 6
+#define SM6115_VDD_LPI_MX 7
+
+/* SM6125 Power Domains */
+#define SM6125_VDDCX 0
+#define SM6125_VDDCX_AO 1
+#define SM6125_VDDCX_VFL 2
+#define SM6125_VDDMX 3
+#define SM6125_VDDMX_AO 4
+#define SM6125_VDDMX_VFL 5
+
+/* QCM2290 Power Domains */
+#define QCM2290_VDDCX 0
+#define QCM2290_VDDCX_AO 1
+#define QCM2290_VDDCX_VFL 2
+#define QCM2290_VDDMX 3
+#define QCM2290_VDDMX_AO 4
+#define QCM2290_VDDMX_VFL 5
+#define QCM2290_VDD_LPI_CX 6
+#define QCM2290_VDD_LPI_MX 7
+
/* RPM SMD Power Domain performance levels */
#define RPM_SMD_LEVEL_RETENTION 16
#define RPM_SMD_LEVEL_RETENTION_PLUS 32
diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h
index eea6ad69f0b0..ff5323858572 100644
--- a/include/dt-bindings/power/r8a7795-sysc.h
+++ b/include/dt-bindings/power/r8a7795-sysc.h
@@ -30,7 +30,6 @@
#define R8A7795_PD_CA53_SCU 21
#define R8A7795_PD_3DG_E 22
#define R8A7795_PD_A3IR 24
-#define R8A7795_PD_A2VC0 25 /* ES1.x only */
#define R8A7795_PD_A2VC1 26
/* Always-on power area */
diff --git a/include/dt-bindings/power/r8a779a0-sysc.h b/include/dt-bindings/power/r8a779a0-sysc.h
new file mode 100644
index 000000000000..57929e459a67
--- /dev/null
+++ b/include/dt-bindings/power/r8a779a0-sysc.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A779A0_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A779A0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779A0_PD_A1E0D0C0 0
+#define R8A779A0_PD_A1E0D0C1 1
+#define R8A779A0_PD_A1E0D1C0 2
+#define R8A779A0_PD_A1E0D1C1 3
+#define R8A779A0_PD_A1E1D0C0 4
+#define R8A779A0_PD_A1E1D0C1 5
+#define R8A779A0_PD_A1E1D1C0 6
+#define R8A779A0_PD_A1E1D1C1 7
+#define R8A779A0_PD_A2E0D0 16
+#define R8A779A0_PD_A2E0D1 17
+#define R8A779A0_PD_A2E1D0 18
+#define R8A779A0_PD_A2E1D1 19
+#define R8A779A0_PD_A3E0 20
+#define R8A779A0_PD_A3E1 21
+#define R8A779A0_PD_3DG_A 24
+#define R8A779A0_PD_3DG_B 25
+#define R8A779A0_PD_A1CNN2 32
+#define R8A779A0_PD_A1DSP0 33
+#define R8A779A0_PD_A2IMP01 34
+#define R8A779A0_PD_A2DP0 35
+#define R8A779A0_PD_A2CV0 36
+#define R8A779A0_PD_A2CV1 37
+#define R8A779A0_PD_A2CV4 38
+#define R8A779A0_PD_A2CV6 39
+#define R8A779A0_PD_A2CN2 40
+#define R8A779A0_PD_A1CNN0 41
+#define R8A779A0_PD_A2CN0 42
+#define R8A779A0_PD_A3IR 43
+#define R8A779A0_PD_A1CNN1 44
+#define R8A779A0_PD_A1DSP1 45
+#define R8A779A0_PD_A2IMP23 46
+#define R8A779A0_PD_A2DP1 47
+#define R8A779A0_PD_A2CV2 48
+#define R8A779A0_PD_A2CV3 49
+#define R8A779A0_PD_A2CV5 50
+#define R8A779A0_PD_A2CV7 51
+#define R8A779A0_PD_A2CN1 52
+#define R8A779A0_PD_A3VIP0 56
+#define R8A779A0_PD_A3VIP1 57
+#define R8A779A0_PD_A3VIP2 58
+#define R8A779A0_PD_A3VIP3 59
+#define R8A779A0_PD_A3ISP01 60
+#define R8A779A0_PD_A3ISP23 61
+
+/* Always-on power area */
+#define R8A779A0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_R8A779A0_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a779f0-sysc.h b/include/dt-bindings/power/r8a779f0-sysc.h
new file mode 100644
index 000000000000..cde1536e9ed0
--- /dev/null
+++ b/include/dt-bindings/power/r8a779f0-sysc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A779F0_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A779F0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779F0_PD_A1E0D0C0 0
+#define R8A779F0_PD_A1E0D0C1 1
+#define R8A779F0_PD_A1E0D1C0 2
+#define R8A779F0_PD_A1E0D1C1 3
+#define R8A779F0_PD_A1E1D0C0 4
+#define R8A779F0_PD_A1E1D0C1 5
+#define R8A779F0_PD_A1E1D1C0 6
+#define R8A779F0_PD_A1E1D1C1 7
+#define R8A779F0_PD_A2E0D0 16
+#define R8A779F0_PD_A2E0D1 17
+#define R8A779F0_PD_A2E1D0 18
+#define R8A779F0_PD_A2E1D1 19
+#define R8A779F0_PD_A3E0 20
+#define R8A779F0_PD_A3E1 21
+
+/* Always-on power area */
+#define R8A779F0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_R8A779A0_SYSC_H__*/
diff --git a/include/dt-bindings/power/r8a779g0-sysc.h b/include/dt-bindings/power/r8a779g0-sysc.h
new file mode 100644
index 000000000000..c7b139fb075f
--- /dev/null
+++ b/include/dt-bindings/power/r8a779g0-sysc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A779G0_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A779G0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779G0_PD_A1E0D0C0 0
+#define R8A779G0_PD_A1E0D0C1 1
+#define R8A779G0_PD_A1E0D1C0 2
+#define R8A779G0_PD_A1E0D1C1 3
+#define R8A779G0_PD_A2E0D0 16
+#define R8A779G0_PD_A2E0D1 17
+#define R8A779G0_PD_A3E0 20
+#define R8A779G0_PD_A33DGA 24
+#define R8A779G0_PD_A23DGB 25
+#define R8A779G0_PD_A1DSP0 33
+#define R8A779G0_PD_A2IMP01 34
+#define R8A779G0_PD_A2PSC 35
+#define R8A779G0_PD_A2CV0 36
+#define R8A779G0_PD_A2CV1 37
+#define R8A779G0_PD_A1CNN0 41
+#define R8A779G0_PD_A2CN0 42
+#define R8A779G0_PD_A3IR 43
+#define R8A779G0_PD_A1DSP1 45
+#define R8A779G0_PD_A2IMP23 46
+#define R8A779G0_PD_A2DMA 47
+#define R8A779G0_PD_A2CV2 48
+#define R8A779G0_PD_A2CV3 49
+#define R8A779G0_PD_A1DSP2 53
+#define R8A779G0_PD_A1DSP3 54
+#define R8A779G0_PD_A3VIP0 56
+#define R8A779G0_PD_A3VIP1 57
+#define R8A779G0_PD_A3VIP2 58
+#define R8A779G0_PD_A3ISP0 60
+#define R8A779G0_PD_A3ISP1 61
+#define R8A779G0_PD_A3DUL 62
+
+/* Always-on power area */
+#define R8A779G0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_R8A779G0_SYSC_H__*/
diff --git a/include/dt-bindings/power/renesas,r8a779h0-sysc.h b/include/dt-bindings/power/renesas,r8a779h0-sysc.h
new file mode 100644
index 000000000000..f27976f523e8
--- /dev/null
+++ b/include/dt-bindings/power/renesas,r8a779h0-sysc.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_RENESAS_R8A779H0_SYSC_H__
+#define __DT_BINDINGS_POWER_RENESAS_R8A779H0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779H0_PD_A1E0D0C0 0
+#define R8A779H0_PD_A1E0D0C1 1
+#define R8A779H0_PD_A1E0D0C2 2
+#define R8A779H0_PD_A1E0D0C3 3
+#define R8A779H0_PD_A2E0D0 16
+#define R8A779H0_PD_A3CR0 21
+#define R8A779H0_PD_A3CR1 22
+#define R8A779H0_PD_A3CR2 23
+#define R8A779H0_PD_A33DGA 24
+#define R8A779H0_PD_A23DGB 25
+#define R8A779H0_PD_C4 31
+#define R8A779H0_PD_A1DSP0 33
+#define R8A779H0_PD_A2IMP01 34
+#define R8A779H0_PD_A2PSC 35
+#define R8A779H0_PD_A2CV0 36
+#define R8A779H0_PD_A2CV1 37
+#define R8A779H0_PD_A3IMR0 38
+#define R8A779H0_PD_A3IMR1 39
+#define R8A779H0_PD_A3VC 40
+#define R8A779H0_PD_A2CN0 42
+#define R8A779H0_PD_A1CN0 44
+#define R8A779H0_PD_A1DSP1 45
+#define R8A779H0_PD_A2DMA 47
+#define R8A779H0_PD_A2CV2 48
+#define R8A779H0_PD_A2CV3 49
+#define R8A779H0_PD_A3IMR2 50
+#define R8A779H0_PD_A3IMR3 51
+#define R8A779H0_PD_A3PCI 52
+#define R8A779H0_PD_A2PCIPHY 53
+#define R8A779H0_PD_A3VIP0 56
+#define R8A779H0_PD_A3VIP2 58
+#define R8A779H0_PD_A3ISP0 60
+#define R8A779H0_PD_A3DUL 62
+
+/* Always-on power area */
+#define R8A779H0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_RENESAS_R8A779H0_SYSC_H__ */
diff --git a/include/dt-bindings/power/rk3568-power.h b/include/dt-bindings/power/rk3568-power.h
new file mode 100644
index 000000000000..6cc1af1a9d26
--- /dev/null
+++ b/include/dt-bindings/power/rk3568-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_RK3568_POWER_H__
+#define __DT_BINDINGS_POWER_RK3568_POWER_H__
+
+/* VD_CORE */
+#define RK3568_PD_CPU_0 0
+#define RK3568_PD_CPU_1 1
+#define RK3568_PD_CPU_2 2
+#define RK3568_PD_CPU_3 3
+#define RK3568_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RK3568_PD_PMU 5
+
+/* VD_NPU */
+#define RK3568_PD_NPU 6
+
+/* VD_GPU */
+#define RK3568_PD_GPU 7
+
+/* VD_LOGIC */
+#define RK3568_PD_VI 8
+#define RK3568_PD_VO 9
+#define RK3568_PD_RGA 10
+#define RK3568_PD_VPU 11
+#define RK3568_PD_CENTER 12
+#define RK3568_PD_RKVDEC 13
+#define RK3568_PD_RKVENC 14
+#define RK3568_PD_PIPE 15
+#define RK3568_PD_LOGIC_ALIVE 16
+
+#endif
diff --git a/include/dt-bindings/power/rk3588-power.h b/include/dt-bindings/power/rk3588-power.h
new file mode 100644
index 000000000000..6b91a50cc6d6
--- /dev/null
+++ b/include/dt-bindings/power/rk3588-power.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3588_POWER_H__
+#define __DT_BINDINGS_POWER_RK3588_POWER_H__
+
+/* VD_LITDSU */
+#define RK3588_PD_CPU_0 0
+#define RK3588_PD_CPU_1 1
+#define RK3588_PD_CPU_2 2
+#define RK3588_PD_CPU_3 3
+
+/* VD_BIGCORE0 */
+#define RK3588_PD_CPU_4 4
+#define RK3588_PD_CPU_5 5
+
+/* VD_BIGCORE1 */
+#define RK3588_PD_CPU_6 6
+#define RK3588_PD_CPU_7 7
+
+/* VD_NPU */
+#define RK3588_PD_NPU 8
+#define RK3588_PD_NPUTOP 9
+#define RK3588_PD_NPU1 10
+#define RK3588_PD_NPU2 11
+
+/* VD_GPU */
+#define RK3588_PD_GPU 12
+
+/* VD_VCODEC */
+#define RK3588_PD_VCODEC 13
+#define RK3588_PD_RKVDEC0 14
+#define RK3588_PD_RKVDEC1 15
+#define RK3588_PD_VENC0 16
+#define RK3588_PD_VENC1 17
+
+/* VD_DD01 */
+#define RK3588_PD_DDR01 18
+
+/* VD_DD23 */
+#define RK3588_PD_DDR23 19
+
+/* VD_LOGIC */
+#define RK3588_PD_CENTER 20
+#define RK3588_PD_VDPU 21
+#define RK3588_PD_RGA30 22
+#define RK3588_PD_AV1 23
+#define RK3588_PD_VOP 24
+#define RK3588_PD_VO0 25
+#define RK3588_PD_VO1 26
+#define RK3588_PD_VI 27
+#define RK3588_PD_ISP1 28
+#define RK3588_PD_FEC 29
+#define RK3588_PD_RGA31 30
+#define RK3588_PD_USB 31
+#define RK3588_PD_PHP 32
+#define RK3588_PD_GMAC 33
+#define RK3588_PD_PCIE 34
+#define RK3588_PD_NVM 35
+#define RK3588_PD_NVM0 36
+#define RK3588_PD_SDIO 37
+#define RK3588_PD_AUDIO 38
+#define RK3588_PD_SECURE 39
+#define RK3588_PD_SDMMC 40
+#define RK3588_PD_CRYPTO 41
+#define RK3588_PD_BUS 42
+
+/* VD_PMU */
+#define RK3588_PD_PMU1 43
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rv1126-power.h b/include/dt-bindings/power/rockchip,rv1126-power.h
new file mode 100644
index 000000000000..38a68e000d38
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rv1126-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_POWER_RV1126_POWER_H__
+#define __DT_BINDINGS_POWER_RV1126_POWER_H__
+
+/* VD_CORE */
+#define RV1126_PD_CPU_0 0
+#define RV1126_PD_CPU_1 1
+#define RV1126_PD_CPU_2 2
+#define RV1126_PD_CPU_3 3
+#define RV1126_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RV1126_PD_PMU 5
+#define RV1126_PD_PMU_ALIVE 6
+
+/* VD_NPU */
+#define RV1126_PD_NPU 7
+
+/* VD_VEPU */
+#define RV1126_PD_VEPU 8
+
+/* VD_LOGIC */
+#define RV1126_PD_VI 9
+#define RV1126_PD_VO 10
+#define RV1126_PD_ISPP 11
+#define RV1126_PD_VDPU 12
+#define RV1126_PD_CRYPTO 13
+#define RV1126_PD_DDR 14
+#define RV1126_PD_NVM 15
+#define RV1126_PD_SDIO 16
+#define RV1126_PD_USB 17
+#define RV1126_PD_LOGIC_ALIVE 18
+
+#endif
diff --git a/include/dt-bindings/power/starfive,jh7110-pmu.h b/include/dt-bindings/power/starfive,jh7110-pmu.h
new file mode 100644
index 000000000000..7b4f24927dee
--- /dev/null
+++ b/include/dt-bindings/power/starfive,jh7110-pmu.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022-2023 StarFive Technology Co., Ltd.
+ * Author: Walker Chen <walker.chen@starfivetech.com>
+ */
+#ifndef __DT_BINDINGS_POWER_JH7110_POWER_H__
+#define __DT_BINDINGS_POWER_JH7110_POWER_H__
+
+#define JH7110_PD_SYSTOP 0
+#define JH7110_PD_CPU 1
+#define JH7110_PD_GPUA 2
+#define JH7110_PD_VDEC 3
+#define JH7110_PD_VOUT 4
+#define JH7110_PD_ISP 5
+#define JH7110_PD_VENC 6
+
+/* AON Power Domain */
+#define JH7110_AON_PD_DPHY_TX 0
+#define JH7110_AON_PD_DPHY_RX 1
+
+#endif
diff --git a/include/dt-bindings/power/summit,smb347-charger.h b/include/dt-bindings/power/summit,smb347-charger.h
new file mode 100644
index 000000000000..14f2f9cf2020
--- /dev/null
+++ b/include/dt-bindings/power/summit,smb347-charger.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) */
+/*
+ * Author: David Heidelberg <david@ixit.cz>
+ */
+
+#ifndef _DT_BINDINGS_SMB347_CHARGER_H
+#define _DT_BINDINGS_SMB347_CHARGER_H
+
+/* Charging compensation method */
+#define SMB3XX_SOFT_TEMP_COMPENSATE_NONE 0
+#define SMB3XX_SOFT_TEMP_COMPENSATE_CURRENT 1
+#define SMB3XX_SOFT_TEMP_COMPENSATE_VOLTAGE 2
+
+/* Charging enable control */
+#define SMB3XX_CHG_ENABLE_SW 0
+#define SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW 1
+#define SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH 2
+
+/* Polarity of INOK signal */
+#define SMB3XX_SYSOK_INOK_ACTIVE_LOW 0
+#define SMB3XX_SYSOK_INOK_ACTIVE_HIGH 1
+
+#endif
diff --git a/include/dt-bindings/power/tegra234-powergate.h b/include/dt-bindings/power/tegra234-powergate.h
new file mode 100644
index 000000000000..b0fec2ddec84
--- /dev/null
+++ b/include/dt-bindings/power/tegra234-powergate.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __ABI_MACH_T234_POWERGATE_T234_H_
+#define __ABI_MACH_T234_POWERGATE_T234_H_
+
+#define TEGRA234_POWER_DOMAIN_OFA 1U
+#define TEGRA234_POWER_DOMAIN_AUD 2U
+#define TEGRA234_POWER_DOMAIN_DISP 3U
+#define TEGRA234_POWER_DOMAIN_PCIEX8A 5U
+#define TEGRA234_POWER_DOMAIN_PCIEX4A 6U
+#define TEGRA234_POWER_DOMAIN_PCIEX4BA 7U
+#define TEGRA234_POWER_DOMAIN_PCIEX4BB 8U
+#define TEGRA234_POWER_DOMAIN_PCIEX1A 9U
+#define TEGRA234_POWER_DOMAIN_XUSBA 10U
+#define TEGRA234_POWER_DOMAIN_XUSBB 11U
+#define TEGRA234_POWER_DOMAIN_XUSBC 12U
+#define TEGRA234_POWER_DOMAIN_PCIEX4CA 13U
+#define TEGRA234_POWER_DOMAIN_PCIEX4CB 14U
+#define TEGRA234_POWER_DOMAIN_PCIEX4CC 15U
+#define TEGRA234_POWER_DOMAIN_PCIEX8B 16U
+#define TEGRA234_POWER_DOMAIN_MGBEA 17U
+#define TEGRA234_POWER_DOMAIN_MGBEB 18U
+#define TEGRA234_POWER_DOMAIN_MGBEC 19U
+#define TEGRA234_POWER_DOMAIN_MGBED 20U
+#define TEGRA234_POWER_DOMAIN_ISPA 22U
+#define TEGRA234_POWER_DOMAIN_NVDEC 23U
+#define TEGRA234_POWER_DOMAIN_NVJPGA 24U
+#define TEGRA234_POWER_DOMAIN_NVENC 25U
+#define TEGRA234_POWER_DOMAIN_VI 28U
+#define TEGRA234_POWER_DOMAIN_VIC 29U
+#define TEGRA234_POWER_DOMAIN_PVA 30U
+#define TEGRA234_POWER_DOMAIN_DLAA 32U
+#define TEGRA234_POWER_DOMAIN_DLAB 33U
+#define TEGRA234_POWER_DOMAIN_CV 34U
+#define TEGRA234_POWER_DOMAIN_GPU 35U
+#define TEGRA234_POWER_DOMAIN_NVJPGB 36U
+
+#endif
diff --git a/include/dt-bindings/power/xlnx-zynqmp-power.h b/include/dt-bindings/power/xlnx-zynqmp-power.h
index 0d9a412fd5e0..618024cbb20d 100644
--- a/include/dt-bindings/power/xlnx-zynqmp-power.h
+++ b/include/dt-bindings/power/xlnx-zynqmp-power.h
@@ -6,6 +6,12 @@
#ifndef _DT_BINDINGS_ZYNQMP_POWER_H
#define _DT_BINDINGS_ZYNQMP_POWER_H
+#define PD_RPU_0 7
+#define PD_RPU_1 8
+#define PD_R5_0_ATCM 15
+#define PD_R5_0_BTCM 16
+#define PD_R5_1_ATCM 17
+#define PD_R5_1_BTCM 18
#define PD_USB_0 22
#define PD_USB_1 23
#define PD_TTC_0 24
diff --git a/include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h b/include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h
new file mode 100644
index 000000000000..27c5ce68847b
--- /dev/null
+++ b/include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Nicolas Saenz Julienne
+ * Author: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ */
+
+#ifndef _DT_BINDINGS_RASPBERRYPI_FIRMWARE_PWM_H
+#define _DT_BINDINGS_RASPBERRYPI_FIRMWARE_PWM_H
+
+#define RASPBERRYPI_FIRMWARE_PWM_POE 0
+#define RASPBERRYPI_FIRMWARE_PWM_NUM 1
+
+#endif
diff --git a/include/dt-bindings/regulator/dlg,da9121-regulator.h b/include/dt-bindings/regulator/dlg,da9121-regulator.h
new file mode 100644
index 000000000000..954edf633ce7
--- /dev/null
+++ b/include/dt-bindings/regulator/dlg,da9121-regulator.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _DT_BINDINGS_REGULATOR_DLG_DA9121_H
+#define _DT_BINDINGS_REGULATOR_DLG_DA9121_H
+
+/*
+ * These buck mode constants may be used to specify values in device tree
+ * properties (e.g. regulator-initial-mode).
+ * A description of the following modes is in the manufacturers datasheet.
+ */
+
+#define DA9121_BUCK_MODE_FORCE_PFM 0
+#define DA9121_BUCK_MODE_FORCE_PWM 1
+#define DA9121_BUCK_MODE_FORCE_PWM_SHEDDING 2
+#define DA9121_BUCK_MODE_AUTO 3
+
+#define DA9121_BUCK_RIPPLE_CANCEL_NONE 0
+#define DA9121_BUCK_RIPPLE_CANCEL_SMALL 1
+#define DA9121_BUCK_RIPPLE_CANCEL_MID 2
+#define DA9121_BUCK_RIPPLE_CANCEL_LARGE 3
+
+#endif
diff --git a/include/dt-bindings/regulator/mediatek,mt6360-regulator.h b/include/dt-bindings/regulator/mediatek,mt6360-regulator.h
new file mode 100644
index 000000000000..21c75de700c0
--- /dev/null
+++ b/include/dt-bindings/regulator/mediatek,mt6360-regulator.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_MEDIATEK_MT6360_REGULATOR_H__
+#define __DT_BINDINGS_MEDIATEK_MT6360_REGULATOR_H__
+
+/*
+ * BUCK/LDO mode constants which may be used in devicetree properties
+ * (eg. regulator-allowed-modes).
+ * See the manufacturer's datasheet for more information on these modes.
+ */
+
+#define MT6360_OPMODE_LP 2
+#define MT6360_OPMODE_ULP 3
+#define MT6360_OPMODE_NORMAL 0
+
+#endif
diff --git a/include/dt-bindings/regulator/richtek,rt5190a-regulator.h b/include/dt-bindings/regulator/richtek,rt5190a-regulator.h
new file mode 100644
index 000000000000..63f99d4c1cb3
--- /dev/null
+++ b/include/dt-bindings/regulator/richtek,rt5190a-regulator.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_RICHTEK_RT5190A_REGULATOR_H__
+#define __DT_BINDINGS_RICHTEK_RT5190A_REGULATOR_H__
+
+/*
+ * BUCK/LDO mode constants which may be used in devicetree properties
+ * (eg. regulator-allowed-modes).
+ * See the manufacturer's datasheet for more information on these modes.
+ */
+
+#define RT5190A_OPMODE_AUTO 0
+#define RT5190A_OPMODE_FPWM 1
+
+#endif
diff --git a/include/dt-bindings/regulator/st,stm32mp13-regulator.h b/include/dt-bindings/regulator/st,stm32mp13-regulator.h
new file mode 100644
index 000000000000..b3a974dfc585
--- /dev/null
+++ b/include/dt-bindings/regulator/st,stm32mp13-regulator.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H
+#define __DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H
+
+/* SCMI voltage domains identifiers */
+
+/* SOC Internal regulators */
+#define VOLTD_SCMI_REG11 0
+#define VOLTD_SCMI_REG18 1
+#define VOLTD_SCMI_USB33 2
+#define VOLTD_SCMI_SDMMC1_IO 3
+#define VOLTD_SCMI_SDMMC2_IO 4
+#define VOLTD_SCMI_VREFBUF 5
+
+/* STPMIC1 regulators */
+#define VOLTD_SCMI_STPMIC1_BUCK1 6
+#define VOLTD_SCMI_STPMIC1_BUCK2 7
+#define VOLTD_SCMI_STPMIC1_BUCK3 8
+#define VOLTD_SCMI_STPMIC1_BUCK4 9
+#define VOLTD_SCMI_STPMIC1_LDO1 10
+#define VOLTD_SCMI_STPMIC1_LDO2 11
+#define VOLTD_SCMI_STPMIC1_LDO3 12
+#define VOLTD_SCMI_STPMIC1_LDO4 13
+#define VOLTD_SCMI_STPMIC1_LDO5 14
+#define VOLTD_SCMI_STPMIC1_LDO6 15
+#define VOLTD_SCMI_STPMIC1_VREFDDR 16
+#define VOLTD_SCMI_STPMIC1_BOOST 17
+#define VOLTD_SCMI_STPMIC1_PWR_SW1 18
+#define VOLTD_SCMI_STPMIC1_PWR_SW2 19
+
+/* External regulators */
+#define VOLTD_SCMI_REGU0 20
+#define VOLTD_SCMI_REGU1 21
+#define VOLTD_SCMI_REGU2 22
+#define VOLTD_SCMI_REGU3 23
+#define VOLTD_SCMI_REGU4 24
+
+#endif /*__DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H */
diff --git a/include/dt-bindings/regulator/ti,tps62864.h b/include/dt-bindings/regulator/ti,tps62864.h
new file mode 100644
index 000000000000..8db31f23d956
--- /dev/null
+++ b/include/dt-bindings/regulator/ti,tps62864.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_REGULATOR_TI_TPS62864_H
+#define _DT_BINDINGS_REGULATOR_TI_TPS62864_H
+
+#define TPS62864_MODE_NORMAL 0
+#define TPS62864_MODE_FPWM 1
+
+#endif
diff --git a/include/dt-bindings/reset/altr,rst-mgr-s10.h b/include/dt-bindings/reset/altr,rst-mgr-s10.h
index 70ea3a09dbe1..04c4d0c6fd34 100644
--- a/include/dt-bindings/reset/altr,rst-mgr-s10.h
+++ b/include/dt-bindings/reset/altr,rst-mgr-s10.h
@@ -63,12 +63,15 @@
#define I2C2_RESET 74
#define I2C3_RESET 75
#define I2C4_RESET 76
-/* 77-79 is empty */
+#define I3C0_RESET 77
+#define I3C1_RESET 78
+/* 79 is empty */
#define UART0_RESET 80
#define UART1_RESET 81
/* 82-87 is empty */
#define GPIO0_RESET 88
#define GPIO1_RESET 89
+#define WATCHDOG4_RESET 90
/* BRGMODRST */
#define SOC2FPGA_RESET 96
diff --git a/include/dt-bindings/reset/amlogic,c3-reset.h b/include/dt-bindings/reset/amlogic,c3-reset.h
new file mode 100644
index 000000000000..d9127863f603
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,c3-reset.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_C3_RESET_H
+#define _DT_BINDINGS_AMLOGIC_C3_RESET_H
+
+/* RESET0 */
+/* 0-3 */
+#define RESET_USBCTRL 4
+/* 5-7 */
+#define RESET_USBPHY20 8
+/* 9 */
+#define RESET_USB2DRD 10
+#define RESET_MIPI_DSI_HOST 11
+#define RESET_MIPI_DSI_PHY 12
+/* 13-20 */
+#define RESET_GE2D 21
+#define RESET_DWAP 22
+/* 23-31 */
+
+/* RESET1 */
+#define RESET_AUDIO 32
+/* 33-34 */
+#define RESET_DDRAPB 35
+#define RESET_DDR 36
+#define RESET_DOS_CAPB3 37
+#define RESET_DOS 38
+/* 39-46 */
+#define RESET_NNA 47
+#define RESET_ETHERNET 48
+#define RESET_ISP 49
+#define RESET_VC9000E_APB 50
+#define RESET_VC9000E_A 51
+/* 52 */
+#define RESET_VC9000E_CORE 53
+/* 54-63 */
+
+/* RESET2 */
+#define RESET_ABUS_ARB 64
+#define RESET_IRCTRL 65
+/* 66 */
+#define RESET_TEMP_PII 67
+/* 68-72 */
+#define RESET_SPICC_0 73
+#define RESET_SPICC_1 74
+#define RESET_RSA 75
+
+/* 76-79 */
+#define RESET_MSR_CLK 80
+#define RESET_SPIFC 81
+#define RESET_SAR_ADC 82
+/* 83-87 */
+#define RESET_ACODEC 88
+/* 89-90 */
+#define RESET_WATCHDOG 91
+/* 92-95 */
+
+/* RESET3 */
+#define RESET_ISP_NIC_GPV 96
+#define RESET_ISP_NIC_MAIN 97
+#define RESET_ISP_NIC_VCLK 98
+#define RESET_ISP_NIC_VOUT 99
+#define RESET_ISP_NIC_ALL 100
+#define RESET_VOUT 101
+#define RESET_VOUT_VENC 102
+/* 103 */
+#define RESET_CVE_NIC_GPV 104
+#define RESET_CVE_NIC_MAIN 105
+#define RESET_CVE_NIC_GE2D 106
+#define RESET_CVE_NIC_DW 106
+#define RESET_CVE_NIC_CVE 108
+#define RESET_CVE_NIC_ALL 109
+#define RESET_CVE 110
+/* 112-127 */
+
+/* RESET4 */
+#define RESET_RTC 128
+#define RESET_PWM_AB 129
+#define RESET_PWM_CD 130
+#define RESET_PWM_EF 131
+#define RESET_PWM_GH 132
+#define RESET_PWM_IJ 133
+#define RESET_PWM_KL 134
+#define RESET_PWM_MN 135
+/* 136-137 */
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+#define RESET_UART_C 140
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+#define RESET_UART_F 143
+#define RESET_I2C_S_A 144
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+/* 149-151 */
+#define RESET_SD_EMMC_A 152
+#define RESET_SD_EMMC_B 153
+#define RESET_SD_EMMC_C 154
+
+/* RESET5 */
+/* 160-172 */
+#define RESET_BRG_NIC_NNA 173
+#define RESET_BRG_MUX_NIC_MAIN 174
+#define RESET_BRG_AO_NIC_ALL 175
+/* 176-183 */
+#define RESET_BRG_NIC_VAPB 184
+#define RESET_BRG_NIC_SDIO_B 185
+#define RESET_BRG_NIC_SDIO_A 186
+#define RESET_BRG_NIC_EMMC 187
+#define RESET_BRG_NIC_DSU 188
+#define RESET_BRG_NIC_SYSCLK 189
+#define RESET_BRG_NIC_MAIN 190
+#define RESET_BRG_NIC_ALL 191
+
+#endif
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
index 6d487c5eba2c..45f6b8a951d0 100644
--- a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
@@ -69,7 +69,9 @@
#define RESET_PARSER_FETCH 72
#define RESET_CTL 73
#define RESET_PARSER_TOP 74
-/* 75-77 */
+/* 75 */
+#define RESET_NNA 76
+/* 77 */
#define RESET_DVALIN 78
#define RESET_HDMITX 79
/* 80-95 */
diff --git a/include/dt-bindings/reset/amlogic,meson-s4-reset.h b/include/dt-bindings/reset/amlogic,meson-s4-reset.h
new file mode 100644
index 000000000000..eab428eb8ad6
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-s4-reset.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Zelong Dong <zelong.dong@amlogic.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H
+
+/* RESET0 */
+#define RESET_USB_DDR0 0
+#define RESET_USB_DDR1 1
+#define RESET_USB_DDR2 2
+#define RESET_USB_DDR3 3
+#define RESET_USBCTRL 4
+/* 5-7 */
+#define RESET_USBPHY20 8
+#define RESET_USBPHY21 9
+/* 10-15 */
+#define RESET_HDMITX_APB 16
+#define RESET_BRG_VCBUS_DEC 17
+#define RESET_VCBUS 18
+#define RESET_VID_PLL_DIV 19
+#define RESET_VDI6 20
+#define RESET_GE2D 21
+#define RESET_HDMITXPHY 22
+#define RESET_VID_LOCK 23
+#define RESET_VENCL 24
+#define RESET_VDAC 25
+#define RESET_VENCP 26
+#define RESET_VENCI 27
+#define RESET_RDMA 28
+#define RESET_HDMI_TX 29
+#define RESET_VIU 30
+#define RESET_VENC 31
+
+/* RESET1 */
+#define RESET_AUDIO 32
+#define RESET_MALI_APB 33
+#define RESET_MALI 34
+#define RESET_DDR_APB 35
+#define RESET_DDR 36
+#define RESET_DOS_APB 37
+#define RESET_DOS 38
+/* 39-47 */
+#define RESET_ETH 48
+/* 49-51 */
+#define RESET_DEMOD 52
+/* 53-63 */
+
+/* RESET2 */
+#define RESET_ABUS_ARB 64
+#define RESET_IR_CTRL 65
+#define RESET_TEMPSENSOR_DDR 66
+#define RESET_TEMPSENSOR_PLL 67
+/* 68-71 */
+#define RESET_SMART_CARD 72
+#define RESET_SPICC0 73
+/* 74 */
+#define RESET_RSA 75
+/* 76-79 */
+#define RESET_MSR_CLK 80
+#define RESET_SPIFC 81
+#define RESET_SARADC 82
+/* 83-87 */
+#define RESET_ACODEC 88
+#define RESET_CEC 89
+#define RESET_AFIFO 90
+#define RESET_WATCHDOG 91
+/* 92-95 */
+
+/* RESET3 */
+/* 96-127 */
+
+/* RESET4 */
+/* 128-131 */
+#define RESET_PWM_AB 132
+#define RESET_PWM_CD 133
+#define RESET_PWM_EF 134
+#define RESET_PWM_GH 135
+#define RESET_PWM_IJ 136
+/* 137 */
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+#define RESET_UART_C 140
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+/* 143 */
+#define RESET_I2C_S_A 144
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+#define RESET_I2C_M_E 149
+/* 150-151 */
+#define RESET_SD_EMMC_A 152
+#define RESET_SD_EMMC_B 153
+#define RESET_NAND_EMMC 154
+/* 155-159 */
+
+/* RESET5 */
+#define RESET_BRG_VDEC_PIPL0 160
+#define RESET_BRG_HEVCF_PIPL0 161
+/* 162 */
+#define RESET_BRG_HCODEC_PIPL0 163
+#define RESET_BRG_GE2D_PIPL0 164
+#define RESET_BRG_VPU_PIPL0 165
+#define RESET_BRG_CPU_PIPL0 166
+#define RESET_BRG_MALI_PIPL0 167
+/* 168 */
+#define RESET_BRG_MALI_PIPL1 169
+/* 170-171 */
+#define RESET_BRG_HEVCF_PIPL1 172
+#define RESET_BRG_HEVCB_PIPL1 173
+/* 174-183 */
+#define RESET_RAMA 184
+/* 185-186 */
+#define RESET_BRG_NIC_VAPB 187
+#define RESET_BRG_NIC_DSU 188
+#define RESET_BRG_NIC_SYSCLK 189
+#define RESET_BRG_NIC_MAIN 190
+#define RESET_BRG_NIC_ALL 191
+
+#endif
diff --git a/include/dt-bindings/reset/bcm6318-reset.h b/include/dt-bindings/reset/bcm6318-reset.h
new file mode 100644
index 000000000000..f882662505ea
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6318-reset.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6318_H
+#define __DT_BINDINGS_RESET_BCM6318_H
+
+#define BCM6318_RST_SPI 0
+#define BCM6318_RST_EPHY 1
+#define BCM6318_RST_SAR 2
+#define BCM6318_RST_ENETSW 3
+#define BCM6318_RST_USBD 4
+#define BCM6318_RST_USBH 5
+#define BCM6318_RST_PCIE_CORE 6
+#define BCM6318_RST_PCIE 7
+#define BCM6318_RST_PCIE_EXT 8
+#define BCM6318_RST_PCIE_HARD 9
+#define BCM6318_RST_ADSL 10
+#define BCM6318_RST_PHYMIPS 11
+#define BCM6318_RST_HOSTMIPS 12
+
+#endif /* __DT_BINDINGS_RESET_BCM6318_H */
diff --git a/include/dt-bindings/reset/bcm63268-reset.h b/include/dt-bindings/reset/bcm63268-reset.h
new file mode 100644
index 000000000000..d87a7882782a
--- /dev/null
+++ b/include/dt-bindings/reset/bcm63268-reset.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM63268_H
+#define __DT_BINDINGS_RESET_BCM63268_H
+
+#define BCM63268_RST_SPI 0
+#define BCM63268_RST_IPSEC 1
+#define BCM63268_RST_EPHY 2
+#define BCM63268_RST_SAR 3
+#define BCM63268_RST_ENETSW 4
+#define BCM63268_RST_USBS 5
+#define BCM63268_RST_USBH 6
+#define BCM63268_RST_PCM 7
+#define BCM63268_RST_PCIE_CORE 8
+#define BCM63268_RST_PCIE 9
+#define BCM63268_RST_PCIE_EXT 10
+#define BCM63268_RST_WLAN_SHIM 11
+#define BCM63268_RST_DDR_PHY 12
+#define BCM63268_RST_FAP0 13
+#define BCM63268_RST_WLAN_UBUS 14
+#define BCM63268_RST_DECT 15
+#define BCM63268_RST_FAP1 16
+#define BCM63268_RST_PCIE_HARD 17
+#define BCM63268_RST_GPHY 18
+
+#define BCM63268_TRST_SW 29
+#define BCM63268_TRST_HW 30
+#define BCM63268_TRST_POR 31
+
+#endif /* __DT_BINDINGS_RESET_BCM63268_H */
diff --git a/include/dt-bindings/reset/bcm6328-reset.h b/include/dt-bindings/reset/bcm6328-reset.h
new file mode 100644
index 000000000000..0f3df87d47af
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6328-reset.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6328_H
+#define __DT_BINDINGS_RESET_BCM6328_H
+
+#define BCM6328_RST_SPI 0
+#define BCM6328_RST_EPHY 1
+#define BCM6328_RST_SAR 2
+#define BCM6328_RST_ENETSW 3
+#define BCM6328_RST_USBS 4
+#define BCM6328_RST_USBH 5
+#define BCM6328_RST_PCM 6
+#define BCM6328_RST_PCIE_CORE 7
+#define BCM6328_RST_PCIE 8
+#define BCM6328_RST_PCIE_EXT 9
+#define BCM6328_RST_PCIE_HARD 10
+
+#endif /* __DT_BINDINGS_RESET_BCM6328_H */
diff --git a/include/dt-bindings/reset/bcm6358-reset.h b/include/dt-bindings/reset/bcm6358-reset.h
new file mode 100644
index 000000000000..bda62ef84f5a
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6358-reset.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6358_H
+#define __DT_BINDINGS_RESET_BCM6358_H
+
+#define BCM6358_RST_SPI 0
+#define BCM6358_RST_ENET 2
+#define BCM6358_RST_MPI 3
+#define BCM6358_RST_EPHY 6
+#define BCM6358_RST_SAR 7
+#define BCM6358_RST_USBH 12
+#define BCM6358_RST_PCM 13
+#define BCM6358_RST_ADSL 14
+
+#endif /* __DT_BINDINGS_RESET_BCM6358_H */
diff --git a/include/dt-bindings/reset/bcm6362-reset.h b/include/dt-bindings/reset/bcm6362-reset.h
new file mode 100644
index 000000000000..7ebb0546e0ab
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6362-reset.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6362_H
+#define __DT_BINDINGS_RESET_BCM6362_H
+
+#define BCM6362_RST_SPI 0
+#define BCM6362_RST_IPSEC 1
+#define BCM6362_RST_EPHY 2
+#define BCM6362_RST_SAR 3
+#define BCM6362_RST_ENETSW 4
+#define BCM6362_RST_USBD 5
+#define BCM6362_RST_USBH 6
+#define BCM6362_RST_PCM 7
+#define BCM6362_RST_PCIE_CORE 8
+#define BCM6362_RST_PCIE 9
+#define BCM6362_RST_PCIE_EXT 10
+#define BCM6362_RST_WLAN_SHIM 11
+#define BCM6362_RST_DDR_PHY 12
+#define BCM6362_RST_FAP 13
+#define BCM6362_RST_WLAN_UBUS 14
+
+#endif /* __DT_BINDINGS_RESET_BCM6362_H */
diff --git a/include/dt-bindings/reset/bcm6368-reset.h b/include/dt-bindings/reset/bcm6368-reset.h
new file mode 100644
index 000000000000..c81d8eb6d173
--- /dev/null
+++ b/include/dt-bindings/reset/bcm6368-reset.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_RESET_BCM6368_H
+#define __DT_BINDINGS_RESET_BCM6368_H
+
+#define BCM6368_RST_SPI 0
+#define BCM6368_RST_MPI 3
+#define BCM6368_RST_IPSEC 4
+#define BCM6368_RST_EPHY 6
+#define BCM6368_RST_SAR 7
+#define BCM6368_RST_SWITCH 10
+#define BCM6368_RST_USBD 11
+#define BCM6368_RST_USBH 12
+#define BCM6368_RST_PCM 13
+
+#endif /* __DT_BINDINGS_RESET_BCM6368_H */
diff --git a/include/dt-bindings/reset/bt1-ccu.h b/include/dt-bindings/reset/bt1-ccu.h
index 3578e83026bc..c691efaa678f 100644
--- a/include/dt-bindings/reset/bt1-ccu.h
+++ b/include/dt-bindings/reset/bt1-ccu.h
@@ -21,5 +21,14 @@
#define CCU_SYS_SATA_REF_RST 0
#define CCU_SYS_APB_RST 1
+#define CCU_SYS_DDR_FULL_RST 2
+#define CCU_SYS_DDR_INIT_RST 3
+#define CCU_SYS_PCIE_PCS_PHY_RST 4
+#define CCU_SYS_PCIE_PIPE0_RST 5
+#define CCU_SYS_PCIE_CORE_RST 6
+#define CCU_SYS_PCIE_PWR_RST 7
+#define CCU_SYS_PCIE_STICKY_RST 8
+#define CCU_SYS_PCIE_NSTICKY_RST 9
+#define CCU_SYS_PCIE_HOT_RST 10
#endif /* __DT_BINDINGS_RESET_BT1_CCU_H */
diff --git a/include/dt-bindings/reset/delta,tn48m-reset.h b/include/dt-bindings/reset/delta,tn48m-reset.h
new file mode 100644
index 000000000000..d4e9ed12de3e
--- /dev/null
+++ b/include/dt-bindings/reset/delta,tn48m-reset.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Delta TN48M CPLD GPIO driver
+ *
+ * Copyright (C) 2021 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ */
+
+#ifndef _DT_BINDINGS_RESET_TN48M_H
+#define _DT_BINDINGS_RESET_TN48M_H
+
+#define CPU_88F7040_RESET 0
+#define CPU_88F6820_RESET 1
+#define MAC_98DX3265_RESET 2
+#define PHY_88E1680_RESET 3
+#define PHY_88E1512_RESET 4
+#define POE_RESET 5
+
+#endif /* _DT_BINDINGS_RESET_TN48M_H */
diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h
index a5b570737582..705870693ec2 100644
--- a/include/dt-bindings/reset/imx8mq-reset.h
+++ b/include/dt-bindings/reset/imx8mq-reset.h
@@ -58,7 +58,10 @@
#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM/i.MX8MN does NOT support */
#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM/i.MX8MN does NOT support */
#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_SW_M4C_RST 50
+#define IMX8MQ_RESET_SW_M4P_RST 51
+#define IMX8MQ_RESET_M4_ENABLE 52
-#define IMX8MQ_RESET_NUM 50
+#define IMX8MQ_RESET_NUM 53
#endif
diff --git a/include/dt-bindings/reset/imx8ulp-pcc-reset.h b/include/dt-bindings/reset/imx8ulp-pcc-reset.h
new file mode 100644
index 000000000000..e99a4735c3c4
--- /dev/null
+++ b/include/dt-bindings/reset/imx8ulp-pcc-reset.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef DT_BINDING_PCC_RESET_IMX8ULP_H
+#define DT_BINDING_PCC_RESET_IMX8ULP_H
+
+/* PCC3 */
+#define PCC3_WDOG3_SWRST 0
+#define PCC3_WDOG4_SWRST 1
+#define PCC3_LPIT1_SWRST 2
+#define PCC3_TPM4_SWRST 3
+#define PCC3_TPM5_SWRST 4
+#define PCC3_FLEXIO1_SWRST 5
+#define PCC3_I3C2_SWRST 6
+#define PCC3_LPI2C4_SWRST 7
+#define PCC3_LPI2C5_SWRST 8
+#define PCC3_LPUART4_SWRST 9
+#define PCC3_LPUART5_SWRST 10
+#define PCC3_LPSPI4_SWRST 11
+#define PCC3_LPSPI5_SWRST 12
+
+/* PCC4 */
+#define PCC4_FLEXSPI2_SWRST 0
+#define PCC4_TPM6_SWRST 1
+#define PCC4_TPM7_SWRST 2
+#define PCC4_LPI2C6_SWRST 3
+#define PCC4_LPI2C7_SWRST 4
+#define PCC4_LPUART6_SWRST 5
+#define PCC4_LPUART7_SWRST 6
+#define PCC4_SAI4_SWRST 7
+#define PCC4_SAI5_SWRST 8
+#define PCC4_USDHC0_SWRST 9
+#define PCC4_USDHC1_SWRST 10
+#define PCC4_USDHC2_SWRST 11
+#define PCC4_USB0_SWRST 12
+#define PCC4_USB0_PHY_SWRST 13
+#define PCC4_USB1_SWRST 14
+#define PCC4_USB1_PHY_SWRST 15
+#define PCC4_ENET_SWRST 16
+
+/* PCC5 */
+#define PCC5_TPM8_SWRST 0
+#define PCC5_SAI6_SWRST 1
+#define PCC5_SAI7_SWRST 2
+#define PCC5_SPDIF_SWRST 3
+#define PCC5_ISI_SWRST 4
+#define PCC5_CSI_REGS_SWRST 5
+#define PCC5_CSI_SWRST 6
+#define PCC5_DSI_SWRST 7
+#define PCC5_WDOG5_SWRST 8
+#define PCC5_EPDC_SWRST 9
+#define PCC5_PXP_SWRST 10
+#define PCC5_GPU2D_SWRST 11
+#define PCC5_GPU3D_SWRST 12
+#define PCC5_DC_NANO_SWRST 13
+
+#endif /*DT_BINDING_RESET_IMX8ULP_H */
diff --git a/include/dt-bindings/reset/k210-rst.h b/include/dt-bindings/reset/k210-rst.h
new file mode 100644
index 000000000000..883c1aed50e8
--- /dev/null
+++ b/include/dt-bindings/reset/k210-rst.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef RESET_K210_SYSCTL_H
+#define RESET_K210_SYSCTL_H
+
+/*
+ * Kendryte K210 SoC system controller K210_SYSCTL_SOFT_RESET register bits.
+ * Taken from Kendryte SDK (kendryte-standalone-sdk).
+ */
+#define K210_RST_ROM 0
+#define K210_RST_DMA 1
+#define K210_RST_AI 2
+#define K210_RST_DVP 3
+#define K210_RST_FFT 4
+#define K210_RST_GPIO 5
+#define K210_RST_SPI0 6
+#define K210_RST_SPI1 7
+#define K210_RST_SPI2 8
+#define K210_RST_SPI3 9
+#define K210_RST_I2S0 10
+#define K210_RST_I2S1 11
+#define K210_RST_I2S2 12
+#define K210_RST_I2C0 13
+#define K210_RST_I2C1 14
+#define K210_RST_I2C2 15
+#define K210_RST_UART1 16
+#define K210_RST_UART2 17
+#define K210_RST_UART3 18
+#define K210_RST_AES 19
+#define K210_RST_FPIOA 20
+#define K210_RST_TIMER0 21
+#define K210_RST_TIMER1 22
+#define K210_RST_TIMER2 23
+#define K210_RST_WDT0 24
+#define K210_RST_WDT1 25
+#define K210_RST_SHA 26
+#define K210_RST_RTC 29
+
+#endif /* RESET_K210_SYSCTL_H */
diff --git a/include/dt-bindings/reset/mediatek,mt6735-wdt.h b/include/dt-bindings/reset/mediatek,mt6735-wdt.h
new file mode 100644
index 000000000000..c6056e676d46
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-wdt.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MEDIATEK_MT6735_WDT_H_
+#define _DT_BINDINGS_RESET_MEDIATEK_MT6735_WDT_H_
+
+#define MT6735_TOPRGU_MM_RST 1
+#define MT6735_TOPRGU_MFG_RST 2
+#define MT6735_TOPRGU_VENC_RST 3
+#define MT6735_TOPRGU_VDEC_RST 4
+#define MT6735_TOPRGU_IMG_RST 5
+#define MT6735_TOPRGU_MD_RST 7
+#define MT6735_TOPRGU_CONN_RST 9
+#define MT6735_TOPRGU_C2K_SW_RST 14
+#define MT6735_TOPRGU_C2K_RST 15
+#define MT6735_TOPRGU_RST_NUM 9
+
+#endif
diff --git a/include/dt-bindings/reset/mediatek,mt6795-resets.h b/include/dt-bindings/reset/mediatek,mt6795-resets.h
new file mode 100644
index 000000000000..5464a4a79a70
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6795-resets.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT6795
+#define _DT_BINDINGS_RESET_CONTROLLER_MT6795
+
+/* INFRACFG resets */
+#define MT6795_INFRA_RST0_SCPSYS_RST 0
+#define MT6795_INFRA_RST0_PMIC_WRAP_RST 1
+#define MT6795_INFRA_RST1_MIPI_DSI_RST 2
+#define MT6795_INFRA_RST1_MIPI_CSI_RST 3
+#define MT6795_INFRA_RST1_MM_IOMMU_RST 4
+
+/* MMSYS resets */
+#define MT6795_MMSYS_SW0_RST_B_SMI_COMMON 0
+#define MT6795_MMSYS_SW0_RST_B_SMI_LARB 1
+#define MT6795_MMSYS_SW0_RST_B_CAM_MDP 2
+#define MT6795_MMSYS_SW0_RST_B_MDP_RDMA0 3
+#define MT6795_MMSYS_SW0_RST_B_MDP_RDMA1 4
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ0 5
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ1 6
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ2 7
+#define MT6795_MMSYS_SW0_RST_B_MDP_TDSHP0 8
+#define MT6795_MMSYS_SW0_RST_B_MDP_TDSHP1 9
+#define MT6795_MMSYS_SW0_RST_B_MDP_WDMA 10
+#define MT6795_MMSYS_SW0_RST_B_MDP_WROT0 11
+#define MT6795_MMSYS_SW0_RST_B_MDP_WROT1 12
+#define MT6795_MMSYS_SW0_RST_B_MDP_CROP 13
+
+/* PERICFG resets */
+#define MT6795_PERI_NFI_SW_RST 0
+#define MT6795_PERI_THERM_SW_RST 1
+#define MT6795_PERI_MSDC1_SW_RST 2
+
+/* TOPRGU resets */
+#define MT6795_TOPRGU_INFRA_SW_RST 0
+#define MT6795_TOPRGU_MM_SW_RST 1
+#define MT6795_TOPRGU_MFG_SW_RST 2
+#define MT6795_TOPRGU_VENC_SW_RST 3
+#define MT6795_TOPRGU_VDEC_SW_RST 4
+#define MT6795_TOPRGU_IMG_SW_RST 5
+#define MT6795_TOPRGU_DDRPHY_SW_RST 6
+#define MT6795_TOPRGU_MD_SW_RST 7
+#define MT6795_TOPRGU_INFRA_AO_SW_RST 8
+#define MT6795_TOPRGU_MD_LITE_SW_RST 9
+#define MT6795_TOPRGU_APMIXED_SW_RST 10
+#define MT6795_TOPRGU_PWRAP_SPI_CTL_RST 11
+#define MT6795_TOPRGU_SW_RST_NUM 12
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT6795 */
diff --git a/include/dt-bindings/reset/mediatek,mt7988-resets.h b/include/dt-bindings/reset/mediatek,mt7988-resets.h
new file mode 100644
index 000000000000..0eb152889a89
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt7988-resets.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7988
+#define _DT_BINDINGS_RESET_CONTROLLER_MT7988
+
+/* ETHWARP resets */
+#define MT7988_ETHWARP_RST_SWITCH 0
+
+/* INFRA resets */
+#define MT7988_INFRA_RST0_PEXTP_MAC_SWRST 0
+#define MT7988_INFRA_RST1_THERM_CTRL_SWRST 1
+
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7988 */
+
diff --git a/include/dt-bindings/reset-controller/mt2712-resets.h b/include/dt-bindings/reset/mt2712-resets.h
index 9e7ee762f076..9e7ee762f076 100644
--- a/include/dt-bindings/reset-controller/mt2712-resets.h
+++ b/include/dt-bindings/reset/mt2712-resets.h
diff --git a/include/dt-bindings/reset/mt7621-reset.h b/include/dt-bindings/reset/mt7621-reset.h
new file mode 100644
index 000000000000..7572c6b41453
--- /dev/null
+++ b/include/dt-bindings/reset/mt7621-reset.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Sergio Paracuellos
+ * Author: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+ */
+
+#ifndef DT_BINDING_MT7621_RESET_H
+#define DT_BINDING_MT7621_RESET_H
+
+#define MT7621_RST_SYS 0
+#define MT7621_RST_MCM 2
+#define MT7621_RST_HSDMA 5
+#define MT7621_RST_FE 6
+#define MT7621_RST_SPDIFTX 7
+#define MT7621_RST_TIMER 8
+#define MT7621_RST_INT 9
+#define MT7621_RST_MC 10
+#define MT7621_RST_PCM 11
+#define MT7621_RST_PIO 13
+#define MT7621_RST_GDMA 14
+#define MT7621_RST_NFI 15
+#define MT7621_RST_I2C 16
+#define MT7621_RST_I2S 17
+#define MT7621_RST_SPI 18
+#define MT7621_RST_UART1 19
+#define MT7621_RST_UART2 20
+#define MT7621_RST_UART3 21
+#define MT7621_RST_ETH 23
+#define MT7621_RST_PCIE0 24
+#define MT7621_RST_PCIE1 25
+#define MT7621_RST_PCIE2 26
+#define MT7621_RST_AUX_STCK 28
+#define MT7621_RST_CRYPTO 29
+#define MT7621_RST_SDXC 30
+#define MT7621_RST_PPE 31
+
+#endif /* DT_BINDING_MT7621_RESET_H */
diff --git a/include/dt-bindings/reset/mt7986-resets.h b/include/dt-bindings/reset/mt7986-resets.h
new file mode 100644
index 000000000000..af3d16c81192
--- /dev/null
+++ b/include/dt-bindings/reset/mt7986-resets.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7986
+#define _DT_BINDINGS_RESET_CONTROLLER_MT7986
+
+/* INFRACFG resets */
+#define MT7986_INFRACFG_PEXTP_MAC_SW_RST 6
+#define MT7986_INFRACFG_SSUSB_SW_RST 7
+#define MT7986_INFRACFG_EIP97_SW_RST 8
+#define MT7986_INFRACFG_AUDIO_SW_RST 13
+#define MT7986_INFRACFG_CQ_DMA_SW_RST 14
+
+#define MT7986_INFRACFG_TRNG_SW_RST 17
+#define MT7986_INFRACFG_AP_DMA_SW_RST 32
+#define MT7986_INFRACFG_I2C_SW_RST 33
+#define MT7986_INFRACFG_NFI_SW_RST 34
+#define MT7986_INFRACFG_SPI0_SW_RST 35
+#define MT7986_INFRACFG_SPI1_SW_RST 36
+#define MT7986_INFRACFG_UART0_SW_RST 37
+#define MT7986_INFRACFG_UART1_SW_RST 38
+#define MT7986_INFRACFG_UART2_SW_RST 39
+#define MT7986_INFRACFG_AUXADC_SW_RST 43
+
+#define MT7986_INFRACFG_APXGPT_SW_RST 66
+#define MT7986_INFRACFG_PWM_SW_RST 68
+
+#define MT7986_INFRACFG_SW_RST_NUM 69
+
+/* TOPRGU resets */
+#define MT7986_TOPRGU_APMIXEDSYS_SW_RST 0
+#define MT7986_TOPRGU_SGMII0_SW_RST 1
+#define MT7986_TOPRGU_SGMII1_SW_RST 2
+#define MT7986_TOPRGU_INFRA_SW_RST 3
+#define MT7986_TOPRGU_U2PHY_SW_RST 5
+#define MT7986_TOPRGU_PCIE_SW_RST 6
+#define MT7986_TOPRGU_SSUSB_SW_RST 7
+#define MT7986_TOPRGU_ETHDMA_SW_RST 20
+#define MT7986_TOPRGU_CONSYS_SW_RST 23
+
+#define MT7986_TOPRGU_SW_RST_NUM 24
+
+/* ETHSYS Subsystem resets */
+#define MT7986_ETHSYS_FE_SW_RST 6
+#define MT7986_ETHSYS_PMTR_SW_RST 8
+#define MT7986_ETHSYS_GMAC_SW_RST 23
+#define MT7986_ETHSYS_PPE0_SW_RST 30
+#define MT7986_ETHSYS_PPE1_SW_RST 31
+
+#define MT7986_ETHSYS_SW_RST_NUM 32
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7986 */
diff --git a/include/dt-bindings/reset/mt8173-resets.h b/include/dt-bindings/reset/mt8173-resets.h
index ba8636eda5ae..6a60c7cecc4c 100644
--- a/include/dt-bindings/reset/mt8173-resets.h
+++ b/include/dt-bindings/reset/mt8173-resets.h
@@ -27,6 +27,8 @@
#define MT8173_INFRA_GCE_FAXI_RST 40
#define MT8173_INFRA_MMIOMMURST 47
+/* MMSYS resets */
+#define MT8173_MMSYS_SW0_RST_B_DISP_DSI0 25
/* PERICFG resets */
#define MT8173_PERI_UART0_SW_RST 0
diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset/mt8183-resets.h
index a1bbd41e0d12..48c5d2de0a38 100644
--- a/include/dt-bindings/reset-controller/mt8183-resets.h
+++ b/include/dt-bindings/reset/mt8183-resets.h
@@ -80,6 +80,9 @@
#define MT8183_INFRACFG_SW_RST_NUM 128
+/* MMSYS resets */
+#define MT8183_MMSYS_SW0_RST_B_DISP_DSI0 25
+
#define MT8183_TOPRGU_MM_SW_RST 1
#define MT8183_TOPRGU_MFG_SW_RST 2
#define MT8183_TOPRGU_VENC_SW_RST 3
diff --git a/include/dt-bindings/reset/mt8186-resets.h b/include/dt-bindings/reset/mt8186-resets.h
new file mode 100644
index 000000000000..2e9029c22f38
--- /dev/null
+++ b/include/dt-bindings/reset/mt8186-resets.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Runyang Chen <runyang.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8186
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8186
+
+/* TOPRGU resets */
+#define MT8186_TOPRGU_INFRA_SW_RST 0
+#define MT8186_TOPRGU_MM_SW_RST 1
+#define MT8186_TOPRGU_MFG_SW_RST 2
+#define MT8186_TOPRGU_VENC_SW_RST 3
+#define MT8186_TOPRGU_VDEC_SW_RST 4
+#define MT8186_TOPRGU_IMG_SW_RST 5
+#define MT8186_TOPRGU_DDR_SW_RST 6
+#define MT8186_TOPRGU_INFRA_AO_SW_RST 8
+#define MT8186_TOPRGU_CONNSYS_SW_RST 9
+#define MT8186_TOPRGU_APMIXED_SW_RST 10
+#define MT8186_TOPRGU_PWRAP_SW_RST 11
+#define MT8186_TOPRGU_CONN_MCU_SW_RST 12
+#define MT8186_TOPRGU_IPNNA_SW_RST 13
+#define MT8186_TOPRGU_WPE_SW_RST 14
+#define MT8186_TOPRGU_ADSP_SW_RST 15
+#define MT8186_TOPRGU_AUDIO_SW_RST 17
+#define MT8186_TOPRGU_CAM_MAIN_SW_RST 18
+#define MT8186_TOPRGU_CAM_RAWA_SW_RST 19
+#define MT8186_TOPRGU_CAM_RAWB_SW_RST 20
+#define MT8186_TOPRGU_IPE_SW_RST 21
+#define MT8186_TOPRGU_IMG2_SW_RST 22
+#define MT8186_TOPRGU_SW_RST_NUM 23
+
+/* MMSYS resets */
+#define MT8186_MMSYS_SW0_RST_B_DISP_DSI0 19
+
+/* INFRA resets */
+#define MT8186_INFRA_THERMAL_CTRL_RST 0
+#define MT8186_INFRA_PTP_CTRL_RST 1
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8186 */
diff --git a/include/dt-bindings/reset/mt8188-resets.h b/include/dt-bindings/reset/mt8188-resets.h
new file mode 100644
index 000000000000..5a58c54e7d20
--- /dev/null
+++ b/include/dt-bindings/reset/mt8188-resets.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)*/
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Runyang Chen <runyang.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8188
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8188
+
+#define MT8188_TOPRGU_CONN_MCU_SW_RST 0
+#define MT8188_TOPRGU_INFRA_GRST_SW_RST 1
+#define MT8188_TOPRGU_IPU0_SW_RST 2
+#define MT8188_TOPRGU_IPU1_SW_RST 3
+#define MT8188_TOPRGU_IPU2_SW_RST 4
+#define MT8188_TOPRGU_AUD_ASRC_SW_RST 5
+#define MT8188_TOPRGU_INFRA_SW_RST 6
+#define MT8188_TOPRGU_MMSYS_SW_RST 7
+#define MT8188_TOPRGU_MFG_SW_RST 8
+#define MT8188_TOPRGU_VENC_SW_RST 9
+#define MT8188_TOPRGU_VDEC_SW_RST 10
+#define MT8188_TOPRGU_CAM_VCORE_SW_RST 11
+#define MT8188_TOPRGU_SCP_SW_RST 12
+#define MT8188_TOPRGU_APMIXEDSYS_SW_RST 13
+#define MT8188_TOPRGU_AUDIO_SW_RST 14
+#define MT8188_TOPRGU_CAMSYS_SW_RST 15
+#define MT8188_TOPRGU_MJC_SW_RST 16
+#define MT8188_TOPRGU_PERI_SW_RST 17
+#define MT8188_TOPRGU_PERI_AO_SW_RST 18
+#define MT8188_TOPRGU_PCIE_SW_RST 19
+#define MT8188_TOPRGU_ADSPSYS_SW_RST 21
+#define MT8188_TOPRGU_DPTX_SW_RST 22
+#define MT8188_TOPRGU_SPMI_MST_SW_RST 23
+
+#define MT8188_TOPRGU_SW_RST_NUM 24
+
+/* INFRA resets */
+#define MT8188_INFRA_RST1_THERMAL_MCU_RST 0
+#define MT8188_INFRA_RST1_THERMAL_CTRL_RST 1
+#define MT8188_INFRA_RST3_PTP_CTRL_RST 2
+
+#define MT8188_VDO0_RST_DISP_OVL0 0
+#define MT8188_VDO0_RST_FAKE_ENG0 1
+#define MT8188_VDO0_RST_DISP_CCORR0 2
+#define MT8188_VDO0_RST_DISP_MUTEX0 3
+#define MT8188_VDO0_RST_DISP_GAMMA0 4
+#define MT8188_VDO0_RST_DISP_DITHER0 5
+#define MT8188_VDO0_RST_DISP_WDMA0 6
+#define MT8188_VDO0_RST_DISP_RDMA0 7
+#define MT8188_VDO0_RST_DSI0 8
+#define MT8188_VDO0_RST_DSI1 9
+#define MT8188_VDO0_RST_DSC_WRAP0 10
+#define MT8188_VDO0_RST_VPP_MERGE0 11
+#define MT8188_VDO0_RST_DP_INTF0 12
+#define MT8188_VDO0_RST_DISP_AAL0 13
+#define MT8188_VDO0_RST_INLINEROT0 14
+#define MT8188_VDO0_RST_APB_BUS 15
+#define MT8188_VDO0_RST_DISP_COLOR0 16
+#define MT8188_VDO0_RST_MDP_WROT0 17
+#define MT8188_VDO0_RST_DISP_RSZ0 18
+
+#define MT8188_VDO1_RST_SMI_LARB2 0
+#define MT8188_VDO1_RST_SMI_LARB3 1
+#define MT8188_VDO1_RST_GALS 2
+#define MT8188_VDO1_RST_FAKE_ENG0 3
+#define MT8188_VDO1_RST_FAKE_ENG1 4
+#define MT8188_VDO1_RST_MDP_RDMA0 5
+#define MT8188_VDO1_RST_MDP_RDMA1 6
+#define MT8188_VDO1_RST_MDP_RDMA2 7
+#define MT8188_VDO1_RST_MDP_RDMA3 8
+#define MT8188_VDO1_RST_VPP_MERGE0 9
+#define MT8188_VDO1_RST_VPP_MERGE1 10
+#define MT8188_VDO1_RST_VPP_MERGE2 11
+#define MT8188_VDO1_RST_VPP_MERGE3 12
+#define MT8188_VDO1_RST_VPP_MERGE4 13
+#define MT8188_VDO1_RST_VPP2_TO_VDO1_DL_ASYNC 14
+#define MT8188_VDO1_RST_VPP3_TO_VDO1_DL_ASYNC 15
+#define MT8188_VDO1_RST_DISP_MUTEX 16
+#define MT8188_VDO1_RST_MDP_RDMA4 17
+#define MT8188_VDO1_RST_MDP_RDMA5 18
+#define MT8188_VDO1_RST_MDP_RDMA6 19
+#define MT8188_VDO1_RST_MDP_RDMA7 20
+#define MT8188_VDO1_RST_DP_INTF1_MMCK 21
+#define MT8188_VDO1_RST_DPI0_MM_CK 22
+#define MT8188_VDO1_RST_DPI1_MM_CK 23
+#define MT8188_VDO1_RST_MERGE0_DL_ASYNC 24
+#define MT8188_VDO1_RST_MERGE1_DL_ASYNC 25
+#define MT8188_VDO1_RST_MERGE2_DL_ASYNC 26
+#define MT8188_VDO1_RST_MERGE3_DL_ASYNC 27
+#define MT8188_VDO1_RST_MERGE4_DL_ASYNC 28
+#define MT8188_VDO1_RST_VDO0_DSC_TO_VDO1_DL_ASYNC 29
+#define MT8188_VDO1_RST_VDO0_MERGE_TO_VDO1_DL_ASYNC 30
+#define MT8188_VDO1_RST_PADDING0 31
+#define MT8188_VDO1_RST_PADDING1 32
+#define MT8188_VDO1_RST_PADDING2 33
+#define MT8188_VDO1_RST_PADDING3 34
+#define MT8188_VDO1_RST_PADDING4 35
+#define MT8188_VDO1_RST_PADDING5 36
+#define MT8188_VDO1_RST_PADDING6 37
+#define MT8188_VDO1_RST_PADDING7 38
+#define MT8188_VDO1_RST_DISP_RSZ0 39
+#define MT8188_VDO1_RST_DISP_RSZ1 40
+#define MT8188_VDO1_RST_DISP_RSZ2 41
+#define MT8188_VDO1_RST_DISP_RSZ3 42
+#define MT8188_VDO1_RST_HDR_VDO_FE0 43
+#define MT8188_VDO1_RST_HDR_GFX_FE0 44
+#define MT8188_VDO1_RST_HDR_VDO_BE 45
+#define MT8188_VDO1_RST_HDR_VDO_FE1 46
+#define MT8188_VDO1_RST_HDR_GFX_FE1 47
+#define MT8188_VDO1_RST_DISP_MIXER 48
+#define MT8188_VDO1_RST_HDR_VDO_FE0_DL_ASYNC 49
+#define MT8188_VDO1_RST_HDR_VDO_FE1_DL_ASYNC 50
+#define MT8188_VDO1_RST_HDR_GFX_FE0_DL_ASYNC 51
+#define MT8188_VDO1_RST_HDR_GFX_FE1_DL_ASYNC 52
+#define MT8188_VDO1_RST_HDR_VDO_BE_DL_ASYNC 53
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8188 */
diff --git a/include/dt-bindings/reset/mt8192-resets.h b/include/dt-bindings/reset/mt8192-resets.h
new file mode 100644
index 000000000000..12e2087c90a3
--- /dev/null
+++ b/include/dt-bindings/reset/mt8192-resets.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Yong Liang <yong.liang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8192
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8192
+
+/* TOPRGU resets */
+#define MT8192_TOPRGU_MM_SW_RST 1
+#define MT8192_TOPRGU_MFG_SW_RST 2
+#define MT8192_TOPRGU_VENC_SW_RST 3
+#define MT8192_TOPRGU_VDEC_SW_RST 4
+#define MT8192_TOPRGU_IMG_SW_RST 5
+#define MT8192_TOPRGU_MD_SW_RST 7
+#define MT8192_TOPRGU_CONN_SW_RST 9
+#define MT8192_TOPRGU_CONN_MCU_SW_RST 12
+#define MT8192_TOPRGU_IPU0_SW_RST 14
+#define MT8192_TOPRGU_IPU1_SW_RST 15
+#define MT8192_TOPRGU_AUDIO_SW_RST 17
+#define MT8192_TOPRGU_CAMSYS_SW_RST 18
+#define MT8192_TOPRGU_MJC_SW_RST 19
+#define MT8192_TOPRGU_C2K_S2_SW_RST 20
+#define MT8192_TOPRGU_C2K_SW_RST 21
+#define MT8192_TOPRGU_PERI_SW_RST 22
+#define MT8192_TOPRGU_PERI_AO_SW_RST 23
+
+#define MT8192_TOPRGU_SW_RST_NUM 23
+
+/* MMSYS resets */
+#define MT8192_MMSYS_SW0_RST_B_DISP_DSI0 15
+
+/* INFRA resets */
+#define MT8192_INFRA_RST0_THERM_CTRL_SWRST 0
+#define MT8192_INFRA_RST2_PEXTP_PHY_SWRST 1
+#define MT8192_INFRA_RST3_THERM_CTRL_PTP_SWRST 2
+#define MT8192_INFRA_RST4_PCIE_TOP_SWRST 3
+#define MT8192_INFRA_RST4_THERM_CTRL_MCU_SWRST 4
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8192 */
diff --git a/include/dt-bindings/reset/mt8195-resets.h b/include/dt-bindings/reset/mt8195-resets.h
new file mode 100644
index 000000000000..e61660438d61
--- /dev/null
+++ b/include/dt-bindings/reset/mt8195-resets.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)*/
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Christine Zhu <christine.zhu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8195
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8195
+
+/* TOPRGU resets */
+#define MT8195_TOPRGU_CONN_MCU_SW_RST 0
+#define MT8195_TOPRGU_INFRA_GRST_SW_RST 1
+#define MT8195_TOPRGU_APU_SW_RST 2
+#define MT8195_TOPRGU_INFRA_AO_GRST_SW_RST 6
+#define MT8195_TOPRGU_MMSYS_SW_RST 7
+#define MT8195_TOPRGU_MFG_SW_RST 8
+#define MT8195_TOPRGU_VENC_SW_RST 9
+#define MT8195_TOPRGU_VDEC_SW_RST 10
+#define MT8195_TOPRGU_IMG_SW_RST 11
+#define MT8195_TOPRGU_APMIXEDSYS_SW_RST 13
+#define MT8195_TOPRGU_AUDIO_SW_RST 14
+#define MT8195_TOPRGU_CAMSYS_SW_RST 15
+#define MT8195_TOPRGU_EDPTX_SW_RST 16
+#define MT8195_TOPRGU_ADSPSYS_SW_RST 21
+#define MT8195_TOPRGU_DPTX_SW_RST 22
+#define MT8195_TOPRGU_SPMI_MST_SW_RST 23
+
+#define MT8195_TOPRGU_SW_RST_NUM 16
+
+/* INFRA resets */
+#define MT8195_INFRA_RST0_THERM_CTRL_SWRST 0
+#define MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST 1
+#define MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST 2
+#define MT8195_INFRA_RST2_PCIE_P0_SWRST 3
+#define MT8195_INFRA_RST2_PCIE_P1_SWRST 4
+#define MT8195_INFRA_RST2_USBSIF_P1_SWRST 5
+
+/* VDOSYS1 */
+#define MT8195_VDOSYS1_SW0_RST_B_SMI_LARB2 0
+#define MT8195_VDOSYS1_SW0_RST_B_SMI_LARB3 1
+#define MT8195_VDOSYS1_SW0_RST_B_GALS 2
+#define MT8195_VDOSYS1_SW0_RST_B_FAKE_ENG0 3
+#define MT8195_VDOSYS1_SW0_RST_B_FAKE_ENG1 4
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA0 5
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA1 6
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA2 7
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA3 8
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE0 9
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE1 10
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE2 11
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE3 12
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE4 13
+#define MT8195_VDOSYS1_SW0_RST_B_VPP2_TO_VDO1_DL_ASYNC 14
+#define MT8195_VDOSYS1_SW0_RST_B_VPP3_TO_VDO1_DL_ASYNC 15
+#define MT8195_VDOSYS1_SW0_RST_B_DISP_MUTEX 16
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA4 17
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA5 18
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA6 19
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA7 20
+#define MT8195_VDOSYS1_SW0_RST_B_DP_INTF0 21
+#define MT8195_VDOSYS1_SW0_RST_B_DPI0 22
+#define MT8195_VDOSYS1_SW0_RST_B_DPI1 23
+#define MT8195_VDOSYS1_SW0_RST_B_DISP_MONITOR 24
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE0_DL_ASYNC 25
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE1_DL_ASYNC 26
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE2_DL_ASYNC 27
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE3_DL_ASYNC 28
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE4_DL_ASYNC 29
+#define MT8195_VDOSYS1_SW0_RST_B_VDO0_DSC_TO_VDO1_DL_ASYNC 30
+#define MT8195_VDOSYS1_SW0_RST_B_VDO0_MERGE_TO_VDO1_DL_ASYNC 31
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0 32
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0 33
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE 34
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1 48
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1 49
+#define MT8195_VDOSYS1_SW1_RST_B_DISP_MIXER 50
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0_DL_ASYNC 51
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1_DL_ASYNC 52
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0_DL_ASYNC 53
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1_DL_ASYNC 54
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE_DL_ASYNC 55
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8195 */
diff --git a/include/dt-bindings/reset/nuvoton,ma35d1-reset.h b/include/dt-bindings/reset/nuvoton,ma35d1-reset.h
new file mode 100644
index 000000000000..2e99ee0d68c5
--- /dev/null
+++ b/include/dt-bindings/reset/nuvoton,ma35d1-reset.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Nuvoton Technologies.
+ * Author: Chi-Fen Li <cfli0@nuvoton.com>
+ *
+ * Device Tree binding constants for MA35D1 reset controller.
+ */
+
+#ifndef __DT_BINDINGS_RESET_MA35D1_H
+#define __DT_BINDINGS_RESET_MA35D1_H
+
+#define MA35D1_RESET_CHIP 0
+#define MA35D1_RESET_CA35CR0 1
+#define MA35D1_RESET_CA35CR1 2
+#define MA35D1_RESET_CM4 3
+#define MA35D1_RESET_PDMA0 4
+#define MA35D1_RESET_PDMA1 5
+#define MA35D1_RESET_PDMA2 6
+#define MA35D1_RESET_PDMA3 7
+#define MA35D1_RESET_DISP 8
+#define MA35D1_RESET_VCAP0 9
+#define MA35D1_RESET_VCAP1 10
+#define MA35D1_RESET_GFX 11
+#define MA35D1_RESET_VDEC 12
+#define MA35D1_RESET_WHC0 13
+#define MA35D1_RESET_WHC1 14
+#define MA35D1_RESET_GMAC0 15
+#define MA35D1_RESET_GMAC1 16
+#define MA35D1_RESET_HWSEM 17
+#define MA35D1_RESET_EBI 18
+#define MA35D1_RESET_HSUSBH0 19
+#define MA35D1_RESET_HSUSBH1 20
+#define MA35D1_RESET_HSUSBD 21
+#define MA35D1_RESET_USBHL 22
+#define MA35D1_RESET_SDH0 23
+#define MA35D1_RESET_SDH1 24
+#define MA35D1_RESET_NAND 25
+#define MA35D1_RESET_GPIO 26
+#define MA35D1_RESET_MCTLP 27
+#define MA35D1_RESET_MCTLC 28
+#define MA35D1_RESET_DDRPUB 29
+#define MA35D1_RESET_TMR0 30
+#define MA35D1_RESET_TMR1 31
+#define MA35D1_RESET_TMR2 32
+#define MA35D1_RESET_TMR3 33
+#define MA35D1_RESET_I2C0 34
+#define MA35D1_RESET_I2C1 35
+#define MA35D1_RESET_I2C2 36
+#define MA35D1_RESET_I2C3 37
+#define MA35D1_RESET_QSPI0 38
+#define MA35D1_RESET_SPI0 39
+#define MA35D1_RESET_SPI1 40
+#define MA35D1_RESET_SPI2 41
+#define MA35D1_RESET_UART0 42
+#define MA35D1_RESET_UART1 43
+#define MA35D1_RESET_UART2 44
+#define MA35D1_RESET_UART3 45
+#define MA35D1_RESET_UART4 46
+#define MA35D1_RESET_UART5 47
+#define MA35D1_RESET_UART6 48
+#define MA35D1_RESET_UART7 49
+#define MA35D1_RESET_CANFD0 50
+#define MA35D1_RESET_CANFD1 51
+#define MA35D1_RESET_EADC0 52
+#define MA35D1_RESET_I2S0 53
+#define MA35D1_RESET_SC0 54
+#define MA35D1_RESET_SC1 55
+#define MA35D1_RESET_QSPI1 56
+#define MA35D1_RESET_SPI3 57
+#define MA35D1_RESET_EPWM0 58
+#define MA35D1_RESET_EPWM1 59
+#define MA35D1_RESET_QEI0 60
+#define MA35D1_RESET_QEI1 61
+#define MA35D1_RESET_ECAP0 62
+#define MA35D1_RESET_ECAP1 63
+#define MA35D1_RESET_CANFD2 64
+#define MA35D1_RESET_ADC0 65
+#define MA35D1_RESET_TMR4 66
+#define MA35D1_RESET_TMR5 67
+#define MA35D1_RESET_TMR6 68
+#define MA35D1_RESET_TMR7 69
+#define MA35D1_RESET_TMR8 70
+#define MA35D1_RESET_TMR9 71
+#define MA35D1_RESET_TMR10 72
+#define MA35D1_RESET_TMR11 73
+#define MA35D1_RESET_UART8 74
+#define MA35D1_RESET_UART9 75
+#define MA35D1_RESET_UART10 76
+#define MA35D1_RESET_UART11 77
+#define MA35D1_RESET_UART12 78
+#define MA35D1_RESET_UART13 79
+#define MA35D1_RESET_UART14 80
+#define MA35D1_RESET_UART15 81
+#define MA35D1_RESET_UART16 82
+#define MA35D1_RESET_I2S1 83
+#define MA35D1_RESET_I2C4 84
+#define MA35D1_RESET_I2C5 85
+#define MA35D1_RESET_EPWM2 86
+#define MA35D1_RESET_ECAP2 87
+#define MA35D1_RESET_QEI2 88
+#define MA35D1_RESET_CANFD3 89
+#define MA35D1_RESET_KPI 90
+#define MA35D1_RESET_GIC 91
+#define MA35D1_RESET_SSMCC 92
+#define MA35D1_RESET_SSPCC 93
+#define MA35D1_RESET_COUNT 94
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq5018.h b/include/dt-bindings/reset/qcom,gcc-ipq5018.h
new file mode 100644
index 000000000000..8f03c92fc23b
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-ipq5018.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_5018_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_5018_H
+
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 0
+#define GCC_BLSP1_BCR 1
+#define GCC_BLSP1_QUP1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_QUP3_BCR 4
+#define GCC_BLSP1_UART1_BCR 5
+#define GCC_BLSP1_UART2_BCR 6
+#define GCC_BOOT_ROM_BCR 7
+#define GCC_BTSS_BCR 8
+#define GCC_CMN_BLK_BCR 9
+#define GCC_CMN_LDO_BCR 10
+#define GCC_CE_BCR 11
+#define GCC_CRYPTO_BCR 12
+#define GCC_DCC_BCR 13
+#define GCC_DCD_BCR 14
+#define GCC_DDRSS_BCR 15
+#define GCC_EDPD_BCR 16
+#define GCC_GEPHY_BCR 17
+#define GCC_GEPHY_MDC_SW_ARES 18
+#define GCC_GEPHY_DSP_HW_ARES 19
+#define GCC_GEPHY_RX_ARES 20
+#define GCC_GEPHY_TX_ARES 21
+#define GCC_GMAC0_BCR 22
+#define GCC_GMAC0_CFG_ARES 23
+#define GCC_GMAC0_SYS_ARES 24
+#define GCC_GMAC1_BCR 25
+#define GCC_GMAC1_CFG_ARES 26
+#define GCC_GMAC1_SYS_ARES 27
+#define GCC_IMEM_BCR 28
+#define GCC_LPASS_BCR 29
+#define GCC_MDIO0_BCR 30
+#define GCC_MDIO1_BCR 31
+#define GCC_MPM_BCR 32
+#define GCC_PCIE0_BCR 33
+#define GCC_PCIE0_LINK_DOWN_BCR 34
+#define GCC_PCIE0_PHY_BCR 35
+#define GCC_PCIE0PHY_PHY_BCR 36
+#define GCC_PCIE0_PIPE_ARES 37
+#define GCC_PCIE0_SLEEP_ARES 38
+#define GCC_PCIE0_CORE_STICKY_ARES 39
+#define GCC_PCIE0_AXI_MASTER_ARES 40
+#define GCC_PCIE0_AXI_SLAVE_ARES 41
+#define GCC_PCIE0_AHB_ARES 42
+#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 43
+#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 44
+#define GCC_PCIE1_BCR 45
+#define GCC_PCIE1_LINK_DOWN_BCR 46
+#define GCC_PCIE1_PHY_BCR 47
+#define GCC_PCIE1PHY_PHY_BCR 48
+#define GCC_PCIE1_PIPE_ARES 49
+#define GCC_PCIE1_SLEEP_ARES 50
+#define GCC_PCIE1_CORE_STICKY_ARES 51
+#define GCC_PCIE1_AXI_MASTER_ARES 52
+#define GCC_PCIE1_AXI_SLAVE_ARES 53
+#define GCC_PCIE1_AHB_ARES 54
+#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 55
+#define GCC_PCIE1_AXI_SLAVE_STICKY_ARES 56
+#define GCC_PCNOC_BCR 57
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 58
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 59
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 60
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 61
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 62
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 63
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 64
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 65
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 66
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 67
+#define GCC_PCNOC_BUS_TIMEOUT10_BCR 68
+#define GCC_PCNOC_BUS_TIMEOUT11_BCR 69
+#define GCC_PRNG_BCR 70
+#define GCC_Q6SS_DBG_ARES 71
+#define GCC_Q6_AHB_S_ARES 72
+#define GCC_Q6_AHB_ARES 73
+#define GCC_Q6_AXIM2_ARES 74
+#define GCC_Q6_AXIM_ARES 75
+#define GCC_Q6_AXIS_ARES 76
+#define GCC_QDSS_BCR 77
+#define GCC_QPIC_BCR 78
+#define GCC_QUSB2_0_PHY_BCR 79
+#define GCC_SDCC1_BCR 80
+#define GCC_SEC_CTRL_BCR 81
+#define GCC_SPDM_BCR 82
+#define GCC_SYSTEM_NOC_BCR 83
+#define GCC_TCSR_BCR 84
+#define GCC_TLMM_BCR 85
+#define GCC_UBI0_AXI_ARES 86
+#define GCC_UBI0_AHB_ARES 87
+#define GCC_UBI0_NC_AXI_ARES 88
+#define GCC_UBI0_DBG_ARES 89
+#define GCC_UBI0_UTCM_ARES 90
+#define GCC_UBI0_CORE_ARES 91
+#define GCC_UBI32_BCR 92
+#define GCC_UNIPHY_BCR 93
+#define GCC_UNIPHY_AHB_ARES 94
+#define GCC_UNIPHY_SYS_ARES 95
+#define GCC_UNIPHY_RX_ARES 96
+#define GCC_UNIPHY_TX_ARES 97
+#define GCC_USB0_BCR 98
+#define GCC_USB0_PHY_BCR 99
+#define GCC_WCSS_BCR 100
+#define GCC_WCSS_DBG_ARES 101
+#define GCC_WCSS_ECAHB_ARES 102
+#define GCC_WCSS_ACMT_ARES 103
+#define GCC_WCSS_DBG_BDG_ARES 104
+#define GCC_WCSS_AHB_S_ARES 105
+#define GCC_WCSS_AXI_M_ARES 106
+#define GCC_WCSS_AXI_S_ARES 107
+#define GCC_WCSS_Q6_BCR 108
+#define GCC_WCSSAON_RESET 109
+#define GCC_UNIPHY_SOFT_RESET 110
+#define GCC_GEPHY_MISC_ARES 111
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
index 26b6f9200620..020c9cf18751 100644
--- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
@@ -163,5 +163,10 @@
#define NSS_CAL_PRBS_RST_N_RESET 154
#define NSS_LCKDT_RST_N_RESET 155
#define NSS_SRDS_N_RESET 156
+#define CRYPTO_ENG1_RESET 157
+#define CRYPTO_ENG2_RESET 158
+#define CRYPTO_ENG3_RESET 159
+#define CRYPTO_ENG4_RESET 160
+#define CRYPTO_AHB_RESET 161
#endif
diff --git a/include/dt-bindings/reset/qcom,ipq9574-gcc.h b/include/dt-bindings/reset/qcom,ipq9574-gcc.h
new file mode 100644
index 000000000000..c709d103673d
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,ipq9574-gcc.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_9574_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_9574_H
+
+#define GCC_ADSS_BCR 0
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 1
+#define GCC_BLSP1_BCR 2
+#define GCC_BLSP1_QUP1_BCR 3
+#define GCC_BLSP1_QUP2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_QUP4_BCR 6
+#define GCC_BLSP1_QUP5_BCR 7
+#define GCC_BLSP1_QUP6_BCR 8
+#define GCC_BLSP1_UART1_BCR 9
+#define GCC_BLSP1_UART2_BCR 10
+#define GCC_BLSP1_UART3_BCR 11
+#define GCC_BLSP1_UART4_BCR 12
+#define GCC_BLSP1_UART5_BCR 13
+#define GCC_BLSP1_UART6_BCR 14
+#define GCC_BOOT_ROM_BCR 15
+#define GCC_MDIO_BCR 16
+#define GCC_NSS_BCR 17
+#define GCC_NSS_TBU_BCR 18
+#define GCC_PCIE0_BCR 19
+#define GCC_PCIE0_LINK_DOWN_BCR 20
+#define GCC_PCIE0_PHY_BCR 21
+#define GCC_PCIE0PHY_PHY_BCR 22
+#define GCC_PCIE1_BCR 23
+#define GCC_PCIE1_LINK_DOWN_BCR 24
+#define GCC_PCIE1_PHY_BCR 25
+#define GCC_PCIE1PHY_PHY_BCR 26
+#define GCC_PCIE2_BCR 27
+#define GCC_PCIE2_LINK_DOWN_BCR 28
+#define GCC_PCIE2_PHY_BCR 29
+#define GCC_PCIE2PHY_PHY_BCR 30
+#define GCC_PCIE3_BCR 31
+#define GCC_PCIE3_LINK_DOWN_BCR 32
+#define GCC_PCIE3_PHY_BCR 33
+#define GCC_PCIE3PHY_PHY_BCR 34
+#define GCC_PRNG_BCR 35
+#define GCC_QUSB2_0_PHY_BCR 36
+#define GCC_SDCC_BCR 37
+#define GCC_TLMM_BCR 38
+#define GCC_UNIPHY0_BCR 39
+#define GCC_UNIPHY1_BCR 40
+#define GCC_UNIPHY2_BCR 41
+#define GCC_USB0_PHY_BCR 42
+#define GCC_USB3PHY_0_PHY_BCR 43
+#define GCC_USB_BCR 44
+#define GCC_ANOC0_TBU_BCR 45
+#define GCC_ANOC1_TBU_BCR 46
+#define GCC_ANOC_BCR 47
+#define GCC_APSS_TCU_BCR 48
+#define GCC_CMN_BLK_BCR 49
+#define GCC_CMN_BLK_AHB_ARES 50
+#define GCC_CMN_BLK_SYS_ARES 51
+#define GCC_CMN_BLK_APU_ARES 52
+#define GCC_DCC_BCR 53
+#define GCC_DDRSS_BCR 54
+#define GCC_IMEM_BCR 55
+#define GCC_LPASS_BCR 56
+#define GCC_MPM_BCR 57
+#define GCC_MSG_RAM_BCR 58
+#define GCC_NSSNOC_MEMNOC_1_ARES 59
+#define GCC_NSSNOC_PCNOC_1_ARES 60
+#define GCC_NSSNOC_SNOC_1_ARES 61
+#define GCC_NSSNOC_XO_DCD_ARES 62
+#define GCC_NSSNOC_TS_ARES 63
+#define GCC_NSSCC_ARES 64
+#define GCC_NSSNOC_NSSCC_ARES 65
+#define GCC_NSSNOC_ATB_ARES 66
+#define GCC_NSSNOC_MEMNOC_ARES 67
+#define GCC_NSSNOC_QOSGEN_REF_ARES 68
+#define GCC_NSSNOC_SNOC_ARES 69
+#define GCC_NSSNOC_TIMEOUT_REF_ARES 70
+#define GCC_NSS_CFG_ARES 71
+#define GCC_UBI0_DBG_ARES 72
+#define GCC_PCIE0_AHB_ARES 73
+#define GCC_PCIE0_AUX_ARES 74
+#define GCC_PCIE0_AXI_M_ARES 75
+#define GCC_PCIE0_AXI_M_STICKY_ARES 76
+#define GCC_PCIE0_AXI_S_ARES 77
+#define GCC_PCIE0_AXI_S_STICKY_ARES 78
+#define GCC_PCIE0_CORE_STICKY_ARES 79
+#define GCC_PCIE0_PIPE_ARES 80
+#define GCC_PCIE1_AHB_ARES 81
+#define GCC_PCIE1_AUX_ARES 82
+#define GCC_PCIE1_AXI_M_ARES 83
+#define GCC_PCIE1_AXI_M_STICKY_ARES 84
+#define GCC_PCIE1_AXI_S_ARES 85
+#define GCC_PCIE1_AXI_S_STICKY_ARES 86
+#define GCC_PCIE1_CORE_STICKY_ARES 87
+#define GCC_PCIE1_PIPE_ARES 88
+#define GCC_PCIE2_AHB_ARES 89
+#define GCC_PCIE2_AUX_ARES 90
+#define GCC_PCIE2_AXI_M_ARES 91
+#define GCC_PCIE2_AXI_M_STICKY_ARES 92
+#define GCC_PCIE2_AXI_S_ARES 93
+#define GCC_PCIE2_AXI_S_STICKY_ARES 94
+#define GCC_PCIE2_CORE_STICKY_ARES 95
+#define GCC_PCIE2_PIPE_ARES 96
+#define GCC_PCIE3_AHB_ARES 97
+#define GCC_PCIE3_AUX_ARES 98
+#define GCC_PCIE3_AXI_M_ARES 99
+#define GCC_PCIE3_AXI_M_STICKY_ARES 100
+#define GCC_PCIE3_AXI_S_ARES 101
+#define GCC_PCIE3_AXI_S_STICKY_ARES 102
+#define GCC_PCIE3_CORE_STICKY_ARES 103
+#define GCC_PCIE3_PIPE_ARES 104
+#define GCC_PCNOC_BCR 105
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 106
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 107
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 108
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 109
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 110
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 111
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 112
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 113
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 114
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 115
+#define GCC_PCNOC_TBU_BCR 116
+#define GCC_Q6SS_DBG_ARES 117
+#define GCC_Q6_AHB_ARES 118
+#define GCC_Q6_AHB_S_ARES 119
+#define GCC_Q6_AXIM2_ARES 120
+#define GCC_Q6_AXIM_ARES 121
+#define GCC_QDSS_BCR 122
+#define GCC_QPIC_BCR 123
+#define GCC_QPIC_AHB_ARES 124
+#define GCC_QPIC_ARES 125
+#define GCC_RBCPR_BCR 126
+#define GCC_RBCPR_MX_BCR 127
+#define GCC_SEC_CTRL_BCR 128
+#define GCC_SMMU_CFG_BCR 129
+#define GCC_SNOC_BCR 130
+#define GCC_SPDM_BCR 131
+#define GCC_TME_BCR 132
+#define GCC_UNIPHY0_SYS_RESET 133
+#define GCC_UNIPHY0_AHB_RESET 134
+#define GCC_UNIPHY0_XPCS_RESET 135
+#define GCC_UNIPHY1_SYS_RESET 136
+#define GCC_UNIPHY1_AHB_RESET 137
+#define GCC_UNIPHY1_XPCS_RESET 138
+#define GCC_UNIPHY2_SYS_RESET 139
+#define GCC_UNIPHY2_AHB_RESET 140
+#define GCC_UNIPHY2_XPCS_RESET 141
+#define GCC_USB_MISC_RESET 142
+#define GCC_WCSSAON_RESET 143
+#define GCC_WCSS_ACMT_ARES 144
+#define GCC_WCSS_AHB_S_ARES 145
+#define GCC_WCSS_AXI_M_ARES 146
+#define GCC_WCSS_BCR 147
+#define GCC_WCSS_DBG_ARES 148
+#define GCC_WCSS_DBG_BDG_ARES 149
+#define GCC_WCSS_ECAHB_ARES 150
+#define GCC_WCSS_Q6_BCR 151
+#define GCC_WCSS_Q6_TBU_BCR 152
+#define GCC_TCSR_BCR 153
+#define GCC_CRYPTO_BCR 154
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sdm845-pdc.h b/include/dt-bindings/reset/qcom,sdm845-pdc.h
index 53c37f9c319a..03a0c0eb8147 100644
--- a/include/dt-bindings/reset/qcom,sdm845-pdc.h
+++ b/include/dt-bindings/reset/qcom,sdm845-pdc.h
@@ -16,5 +16,7 @@
#define PDC_DISPLAY_SYNC_RESET 7
#define PDC_COMPUTE_SYNC_RESET 8
#define PDC_MODEM_SYNC_RESET 9
+#define PDC_WLAN_RF_SYNC_RESET 10
+#define PDC_WPSS_SYNC_RESET 11
#endif
diff --git a/include/dt-bindings/reset/qcom,sm8350-videocc.h b/include/dt-bindings/reset/qcom,sm8350-videocc.h
new file mode 100644
index 000000000000..cd356b207a4a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8350-videocc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_VIDEO_CC_SM8350_H
+#define _DT_BINDINGS_RESET_QCOM_VIDEO_CC_SM8350_H
+
+#define VIDEO_CC_CVP_INTERFACE_BCR 0
+#define VIDEO_CC_CVP_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_CVP_MVS0C_BCR 3
+#define VIDEO_CC_CVP_MVS1_BCR 4
+#define VIDEO_CC_MVS1C_CLK_ARES 5
+#define VIDEO_CC_CVP_MVS1C_BCR 6
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sm8450-gpucc.h b/include/dt-bindings/reset/qcom,sm8450-gpucc.h
new file mode 100644
index 000000000000..58ba8f987107
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8450-gpucc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8450_H
+#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8450_H
+
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 8
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sm8650-gpucc.h b/include/dt-bindings/reset/qcom,sm8650-gpucc.h
new file mode 100644
index 000000000000..f021a6cccc66
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8650-gpucc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H
+#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H
+
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 8
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,x1e80100-gpucc.h b/include/dt-bindings/reset/qcom,x1e80100-gpucc.h
new file mode 100644
index 000000000000..32b43e71a16f
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,x1e80100-gpucc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_X1E80100_GPU_CC_H
+#define _DT_BINDINGS_RESET_QCOM_X1E80100_GPU_CC_H
+
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CB_BCR 1
+#define GPUCC_GPU_CC_CX_BCR 2
+#define GPUCC_GPU_CC_FAST_HUB_BCR 3
+#define GPUCC_GPU_CC_FF_BCR 4
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 5
+#define GPUCC_GPU_CC_GMU_BCR 6
+#define GPUCC_GPU_CC_GX_BCR 7
+#define GPUCC_GPU_CC_XO_BCR 8
+
+#endif
diff --git a/include/dt-bindings/reset/raspberrypi,firmware-reset.h b/include/dt-bindings/reset/raspberrypi,firmware-reset.h
new file mode 100644
index 000000000000..1a4f4c792723
--- /dev/null
+++ b/include/dt-bindings/reset/raspberrypi,firmware-reset.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Nicolas Saenz Julienne
+ * Author: Nicolas Saenz Julienne <nsaenzjulienne@suse.com>
+ */
+
+#ifndef _DT_BINDINGS_RASPBERRYPI_FIRMWARE_RESET_H
+#define _DT_BINDINGS_RASPBERRYPI_FIRMWARE_RESET_H
+
+#define RASPBERRYPI_FIRMWARE_RESET_ID_USB 0
+#define RASPBERRYPI_FIRMWARE_RESET_NUM_IDS 1
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3588-cru.h b/include/dt-bindings/reset/rockchip,rk3588-cru.h
new file mode 100644
index 000000000000..d4264db2a07f
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3588-cru.h
@@ -0,0 +1,754 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2022 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Sebastian Reichel <sebastian.reichel@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3588_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3588_H
+
+#define SRST_A_TOP_BIU 0
+#define SRST_P_TOP_BIU 1
+#define SRST_P_CSIPHY0 2
+#define SRST_CSIPHY0 3
+#define SRST_P_CSIPHY1 4
+#define SRST_CSIPHY1 5
+#define SRST_A_TOP_M500_BIU 6
+
+#define SRST_A_TOP_M400_BIU 7
+#define SRST_A_TOP_S200_BIU 8
+#define SRST_A_TOP_S400_BIU 9
+#define SRST_A_TOP_M300_BIU 10
+#define SRST_USBDP_COMBO_PHY0_INIT 11
+#define SRST_USBDP_COMBO_PHY0_CMN 12
+#define SRST_USBDP_COMBO_PHY0_LANE 13
+#define SRST_USBDP_COMBO_PHY0_PCS 14
+#define SRST_USBDP_COMBO_PHY1_INIT 15
+
+#define SRST_USBDP_COMBO_PHY1_CMN 16
+#define SRST_USBDP_COMBO_PHY1_LANE 17
+#define SRST_USBDP_COMBO_PHY1_PCS 18
+#define SRST_DCPHY0 19
+#define SRST_P_MIPI_DCPHY0 20
+#define SRST_P_MIPI_DCPHY0_GRF 21
+
+#define SRST_DCPHY1 22
+#define SRST_P_MIPI_DCPHY1 23
+#define SRST_P_MIPI_DCPHY1_GRF 24
+#define SRST_P_APB2ASB_SLV_CDPHY 25
+#define SRST_P_APB2ASB_SLV_CSIPHY 26
+#define SRST_P_APB2ASB_SLV_VCCIO3_5 27
+#define SRST_P_APB2ASB_SLV_VCCIO6 28
+#define SRST_P_APB2ASB_SLV_EMMCIO 29
+#define SRST_P_APB2ASB_SLV_IOC_TOP 30
+#define SRST_P_APB2ASB_SLV_IOC_RIGHT 31
+
+#define SRST_P_CRU 32
+#define SRST_A_CHANNEL_SECURE2VO1USB 33
+#define SRST_A_CHANNEL_SECURE2CENTER 34
+#define SRST_H_CHANNEL_SECURE2VO1USB 35
+#define SRST_H_CHANNEL_SECURE2CENTER 36
+
+#define SRST_P_CHANNEL_SECURE2VO1USB 37
+#define SRST_P_CHANNEL_SECURE2CENTER 38
+
+#define SRST_H_AUDIO_BIU 39
+#define SRST_P_AUDIO_BIU 40
+#define SRST_H_I2S0_8CH 41
+#define SRST_M_I2S0_8CH_TX 42
+#define SRST_M_I2S0_8CH_RX 43
+#define SRST_P_ACDCDIG 44
+#define SRST_H_I2S2_2CH 45
+#define SRST_H_I2S3_2CH 46
+
+#define SRST_M_I2S2_2CH 47
+#define SRST_M_I2S3_2CH 48
+#define SRST_DAC_ACDCDIG 49
+#define SRST_H_SPDIF0 50
+
+#define SRST_M_SPDIF0 51
+#define SRST_H_SPDIF1 52
+#define SRST_M_SPDIF1 53
+#define SRST_H_PDM1 54
+#define SRST_PDM1 55
+
+#define SRST_A_BUS_BIU 56
+#define SRST_P_BUS_BIU 57
+#define SRST_A_GIC 58
+#define SRST_A_GIC_DBG 59
+#define SRST_A_DMAC0 60
+#define SRST_A_DMAC1 61
+#define SRST_A_DMAC2 62
+#define SRST_P_I2C1 63
+#define SRST_P_I2C2 64
+#define SRST_P_I2C3 65
+#define SRST_P_I2C4 66
+#define SRST_P_I2C5 67
+#define SRST_P_I2C6 68
+#define SRST_P_I2C7 69
+#define SRST_P_I2C8 70
+
+#define SRST_I2C1 71
+#define SRST_I2C2 72
+#define SRST_I2C3 73
+#define SRST_I2C4 74
+#define SRST_I2C5 75
+#define SRST_I2C6 76
+#define SRST_I2C7 77
+#define SRST_I2C8 78
+#define SRST_P_CAN0 79
+#define SRST_CAN0 80
+#define SRST_P_CAN1 81
+#define SRST_CAN1 82
+#define SRST_P_CAN2 83
+#define SRST_CAN2 84
+#define SRST_P_SARADC 85
+
+#define SRST_P_TSADC 86
+#define SRST_TSADC 87
+#define SRST_P_UART1 88
+#define SRST_P_UART2 89
+#define SRST_P_UART3 90
+#define SRST_P_UART4 91
+#define SRST_P_UART5 92
+#define SRST_P_UART6 93
+#define SRST_P_UART7 94
+#define SRST_P_UART8 95
+#define SRST_P_UART9 96
+#define SRST_S_UART1 97
+
+#define SRST_S_UART2 98
+#define SRST_S_UART3 99
+#define SRST_S_UART4 100
+#define SRST_S_UART5 101
+#define SRST_S_UART6 102
+#define SRST_S_UART7 103
+
+#define SRST_S_UART8 104
+#define SRST_S_UART9 105
+#define SRST_P_SPI0 106
+#define SRST_P_SPI1 107
+#define SRST_P_SPI2 108
+#define SRST_P_SPI3 109
+#define SRST_P_SPI4 110
+#define SRST_SPI0 111
+#define SRST_SPI1 112
+#define SRST_SPI2 113
+#define SRST_SPI3 114
+#define SRST_SPI4 115
+
+#define SRST_P_WDT0 116
+#define SRST_T_WDT0 117
+#define SRST_P_SYS_GRF 118
+#define SRST_P_PWM1 119
+#define SRST_PWM1 120
+#define SRST_P_PWM2 121
+#define SRST_PWM2 122
+#define SRST_P_PWM3 123
+#define SRST_PWM3 124
+#define SRST_P_BUSTIMER0 125
+#define SRST_P_BUSTIMER1 126
+#define SRST_BUSTIMER0 127
+
+#define SRST_BUSTIMER1 128
+#define SRST_BUSTIMER2 129
+#define SRST_BUSTIMER3 130
+#define SRST_BUSTIMER4 131
+#define SRST_BUSTIMER5 132
+#define SRST_BUSTIMER6 133
+#define SRST_BUSTIMER7 134
+#define SRST_BUSTIMER8 135
+#define SRST_BUSTIMER9 136
+#define SRST_BUSTIMER10 137
+#define SRST_BUSTIMER11 138
+#define SRST_P_MAILBOX0 139
+#define SRST_P_MAILBOX1 140
+#define SRST_P_MAILBOX2 141
+#define SRST_P_GPIO1 142
+#define SRST_GPIO1 143
+
+#define SRST_P_GPIO2 144
+#define SRST_GPIO2 145
+#define SRST_P_GPIO3 146
+#define SRST_GPIO3 147
+#define SRST_P_GPIO4 148
+#define SRST_GPIO4 149
+#define SRST_A_DECOM 150
+#define SRST_P_DECOM 151
+#define SRST_D_DECOM 152
+#define SRST_P_TOP 153
+#define SRST_A_GICADB_GIC2CORE_BUS 154
+#define SRST_P_DFT2APB 155
+#define SRST_P_APB2ASB_MST_TOP 156
+#define SRST_P_APB2ASB_MST_CDPHY 157
+#define SRST_P_APB2ASB_MST_BOT_RIGHT 158
+
+#define SRST_P_APB2ASB_MST_IOC_TOP 159
+#define SRST_P_APB2ASB_MST_IOC_RIGHT 160
+#define SRST_P_APB2ASB_MST_CSIPHY 161
+#define SRST_P_APB2ASB_MST_VCCIO3_5 162
+#define SRST_P_APB2ASB_MST_VCCIO6 163
+#define SRST_P_APB2ASB_MST_EMMCIO 164
+#define SRST_A_SPINLOCK 165
+#define SRST_P_OTPC_NS 166
+#define SRST_OTPC_NS 167
+#define SRST_OTPC_ARB 168
+
+#define SRST_P_BUSIOC 169
+#define SRST_P_PMUCM0_INTMUX 170
+#define SRST_P_DDRCM0_INTMUX 171
+
+#define SRST_P_DDR_DFICTL_CH0 172
+#define SRST_P_DDR_MON_CH0 173
+#define SRST_P_DDR_STANDBY_CH0 174
+#define SRST_P_DDR_UPCTL_CH0 175
+#define SRST_TM_DDR_MON_CH0 176
+#define SRST_P_DDR_GRF_CH01 177
+#define SRST_DFI_CH0 178
+#define SRST_SBR_CH0 179
+#define SRST_DDR_UPCTL_CH0 180
+#define SRST_DDR_DFICTL_CH0 181
+#define SRST_DDR_MON_CH0 182
+#define SRST_DDR_STANDBY_CH0 183
+#define SRST_A_DDR_UPCTL_CH0 184
+#define SRST_P_DDR_DFICTL_CH1 185
+#define SRST_P_DDR_MON_CH1 186
+#define SRST_P_DDR_STANDBY_CH1 187
+
+#define SRST_P_DDR_UPCTL_CH1 188
+#define SRST_TM_DDR_MON_CH1 189
+#define SRST_DFI_CH1 190
+#define SRST_SBR_CH1 191
+#define SRST_DDR_UPCTL_CH1 192
+#define SRST_DDR_DFICTL_CH1 193
+#define SRST_DDR_MON_CH1 194
+#define SRST_DDR_STANDBY_CH1 195
+#define SRST_A_DDR_UPCTL_CH1 196
+#define SRST_A_DDR01_MSCH0 197
+#define SRST_A_DDR01_RS_MSCH0 198
+#define SRST_A_DDR01_FRS_MSCH0 199
+
+#define SRST_A_DDR01_SCRAMBLE0 200
+#define SRST_A_DDR01_FRS_SCRAMBLE0 201
+#define SRST_A_DDR01_MSCH1 202
+#define SRST_A_DDR01_RS_MSCH1 203
+#define SRST_A_DDR01_FRS_MSCH1 204
+#define SRST_A_DDR01_SCRAMBLE1 205
+#define SRST_A_DDR01_FRS_SCRAMBLE1 206
+#define SRST_P_DDR01_MSCH0 207
+#define SRST_P_DDR01_MSCH1 208
+
+#define SRST_P_DDR_DFICTL_CH2 209
+#define SRST_P_DDR_MON_CH2 210
+#define SRST_P_DDR_STANDBY_CH2 211
+#define SRST_P_DDR_UPCTL_CH2 212
+#define SRST_TM_DDR_MON_CH2 213
+#define SRST_P_DDR_GRF_CH23 214
+#define SRST_DFI_CH2 215
+#define SRST_SBR_CH2 216
+#define SRST_DDR_UPCTL_CH2 217
+#define SRST_DDR_DFICTL_CH2 218
+#define SRST_DDR_MON_CH2 219
+#define SRST_DDR_STANDBY_CH2 220
+#define SRST_A_DDR_UPCTL_CH2 221
+#define SRST_P_DDR_DFICTL_CH3 222
+#define SRST_P_DDR_MON_CH3 223
+#define SRST_P_DDR_STANDBY_CH3 224
+
+#define SRST_P_DDR_UPCTL_CH3 225
+#define SRST_TM_DDR_MON_CH3 226
+#define SRST_DFI_CH3 227
+#define SRST_SBR_CH3 228
+#define SRST_DDR_UPCTL_CH3 229
+#define SRST_DDR_DFICTL_CH3 230
+#define SRST_DDR_MON_CH3 231
+#define SRST_DDR_STANDBY_CH3 232
+#define SRST_A_DDR_UPCTL_CH3 233
+#define SRST_A_DDR23_MSCH2 234
+#define SRST_A_DDR23_RS_MSCH2 235
+#define SRST_A_DDR23_FRS_MSCH2 236
+
+#define SRST_A_DDR23_SCRAMBLE2 237
+#define SRST_A_DDR23_FRS_SCRAMBLE2 238
+#define SRST_A_DDR23_MSCH3 239
+#define SRST_A_DDR23_RS_MSCH3 240
+#define SRST_A_DDR23_FRS_MSCH3 241
+#define SRST_A_DDR23_SCRAMBLE3 242
+#define SRST_A_DDR23_FRS_SCRAMBLE3 243
+#define SRST_P_DDR23_MSCH2 244
+#define SRST_P_DDR23_MSCH3 245
+
+#define SRST_ISP1 246
+#define SRST_ISP1_VICAP 247
+#define SRST_A_ISP1_BIU 248
+#define SRST_H_ISP1_BIU 249
+
+#define SRST_A_RKNN1 250
+#define SRST_A_RKNN1_BIU 251
+#define SRST_H_RKNN1 252
+#define SRST_H_RKNN1_BIU 253
+
+#define SRST_A_RKNN2 254
+#define SRST_A_RKNN2_BIU 255
+#define SRST_H_RKNN2 256
+#define SRST_H_RKNN2_BIU 257
+
+#define SRST_A_RKNN_DSU0 258
+#define SRST_P_NPUTOP_BIU 259
+#define SRST_P_NPU_TIMER 260
+#define SRST_NPUTIMER0 261
+#define SRST_NPUTIMER1 262
+#define SRST_P_NPU_WDT 263
+#define SRST_T_NPU_WDT 264
+#define SRST_P_NPU_PVTM 265
+#define SRST_P_NPU_GRF 266
+#define SRST_NPU_PVTM 267
+
+#define SRST_NPU_PVTPLL 268
+#define SRST_H_NPU_CM0_BIU 269
+#define SRST_F_NPU_CM0_CORE 270
+#define SRST_T_NPU_CM0_JTAG 271
+#define SRST_A_RKNN0 272
+#define SRST_A_RKNN0_BIU 273
+#define SRST_H_RKNN0 274
+#define SRST_H_RKNN0_BIU 275
+
+#define SRST_H_NVM_BIU 276
+#define SRST_A_NVM_BIU 277
+#define SRST_H_EMMC 278
+#define SRST_A_EMMC 279
+#define SRST_C_EMMC 280
+#define SRST_B_EMMC 281
+#define SRST_T_EMMC 282
+#define SRST_S_SFC 283
+#define SRST_H_SFC 284
+#define SRST_H_SFC_XIP 285
+
+#define SRST_P_GRF 286
+#define SRST_P_DEC_BIU 287
+#define SRST_P_PHP_BIU 288
+#define SRST_A_PCIE_GRIDGE 289
+#define SRST_A_PHP_BIU 290
+#define SRST_A_GMAC0 291
+#define SRST_A_GMAC1 292
+#define SRST_A_PCIE_BIU 293
+#define SRST_PCIE0_POWER_UP 294
+#define SRST_PCIE1_POWER_UP 295
+#define SRST_PCIE2_POWER_UP 296
+
+#define SRST_PCIE3_POWER_UP 297
+#define SRST_PCIE4_POWER_UP 298
+#define SRST_P_PCIE0 299
+#define SRST_P_PCIE1 300
+#define SRST_P_PCIE2 301
+#define SRST_P_PCIE3 302
+
+#define SRST_P_PCIE4 303
+#define SRST_A_PHP_GIC_ITS 304
+#define SRST_A_MMU_PCIE 305
+#define SRST_A_MMU_PHP 306
+#define SRST_A_MMU_BIU 307
+
+#define SRST_A_USB3OTG2 308
+
+#define SRST_PMALIVE0 309
+#define SRST_PMALIVE1 310
+#define SRST_PMALIVE2 311
+#define SRST_A_SATA0 312
+#define SRST_A_SATA1 313
+#define SRST_A_SATA2 314
+#define SRST_RXOOB0 315
+#define SRST_RXOOB1 316
+#define SRST_RXOOB2 317
+#define SRST_ASIC0 318
+#define SRST_ASIC1 319
+#define SRST_ASIC2 320
+
+#define SRST_A_RKVDEC_CCU 321
+#define SRST_H_RKVDEC0 322
+#define SRST_A_RKVDEC0 323
+#define SRST_H_RKVDEC0_BIU 324
+#define SRST_A_RKVDEC0_BIU 325
+#define SRST_RKVDEC0_CA 326
+#define SRST_RKVDEC0_HEVC_CA 327
+#define SRST_RKVDEC0_CORE 328
+
+#define SRST_H_RKVDEC1 329
+#define SRST_A_RKVDEC1 330
+#define SRST_H_RKVDEC1_BIU 331
+#define SRST_A_RKVDEC1_BIU 332
+#define SRST_RKVDEC1_CA 333
+#define SRST_RKVDEC1_HEVC_CA 334
+#define SRST_RKVDEC1_CORE 335
+
+#define SRST_A_USB_BIU 336
+#define SRST_H_USB_BIU 337
+#define SRST_A_USB3OTG0 338
+#define SRST_A_USB3OTG1 339
+#define SRST_H_HOST0 340
+#define SRST_H_HOST_ARB0 341
+#define SRST_H_HOST1 342
+#define SRST_H_HOST_ARB1 343
+#define SRST_A_USB_GRF 344
+#define SRST_C_USB2P0_HOST0 345
+
+#define SRST_C_USB2P0_HOST1 346
+#define SRST_HOST_UTMI0 347
+#define SRST_HOST_UTMI1 348
+
+#define SRST_A_VDPU_BIU 349
+#define SRST_A_VDPU_LOW_BIU 350
+#define SRST_H_VDPU_BIU 351
+#define SRST_A_JPEG_DECODER_BIU 352
+#define SRST_A_VPU 353
+#define SRST_H_VPU 354
+#define SRST_A_JPEG_ENCODER0 355
+#define SRST_H_JPEG_ENCODER0 356
+#define SRST_A_JPEG_ENCODER1 357
+#define SRST_H_JPEG_ENCODER1 358
+#define SRST_A_JPEG_ENCODER2 359
+#define SRST_H_JPEG_ENCODER2 360
+
+#define SRST_A_JPEG_ENCODER3 361
+#define SRST_H_JPEG_ENCODER3 362
+#define SRST_A_JPEG_DECODER 363
+#define SRST_H_JPEG_DECODER 364
+#define SRST_H_IEP2P0 365
+#define SRST_A_IEP2P0 366
+#define SRST_IEP2P0_CORE 367
+#define SRST_H_RGA2 368
+#define SRST_A_RGA2 369
+#define SRST_RGA2_CORE 370
+#define SRST_H_RGA3_0 371
+#define SRST_A_RGA3_0 372
+#define SRST_RGA3_0_CORE 373
+
+#define SRST_H_RKVENC0_BIU 374
+#define SRST_A_RKVENC0_BIU 375
+#define SRST_H_RKVENC0 376
+#define SRST_A_RKVENC0 377
+#define SRST_RKVENC0_CORE 378
+
+#define SRST_H_RKVENC1_BIU 379
+#define SRST_A_RKVENC1_BIU 380
+#define SRST_H_RKVENC1 381
+#define SRST_A_RKVENC1 382
+#define SRST_RKVENC1_CORE 383
+
+#define SRST_A_VI_BIU 384
+#define SRST_H_VI_BIU 385
+#define SRST_P_VI_BIU 386
+#define SRST_D_VICAP 387
+#define SRST_A_VICAP 388
+#define SRST_H_VICAP 389
+#define SRST_ISP0 390
+#define SRST_ISP0_VICAP 391
+
+#define SRST_FISHEYE0 392
+#define SRST_FISHEYE1 393
+#define SRST_P_CSI_HOST_0 394
+#define SRST_P_CSI_HOST_1 395
+#define SRST_P_CSI_HOST_2 396
+#define SRST_P_CSI_HOST_3 397
+#define SRST_P_CSI_HOST_4 398
+#define SRST_P_CSI_HOST_5 399
+
+#define SRST_CSIHOST0_VICAP 400
+#define SRST_CSIHOST1_VICAP 401
+#define SRST_CSIHOST2_VICAP 402
+#define SRST_CSIHOST3_VICAP 403
+#define SRST_CSIHOST4_VICAP 404
+#define SRST_CSIHOST5_VICAP 405
+#define SRST_CIFIN 406
+
+#define SRST_A_VOP_BIU 407
+#define SRST_A_VOP_LOW_BIU 408
+#define SRST_H_VOP_BIU 409
+#define SRST_P_VOP_BIU 410
+#define SRST_H_VOP 411
+#define SRST_A_VOP 412
+#define SRST_D_VOP0 413
+#define SRST_D_VOP2HDMI_BRIDGE0 414
+#define SRST_D_VOP2HDMI_BRIDGE1 415
+
+#define SRST_D_VOP1 416
+#define SRST_D_VOP2 417
+#define SRST_D_VOP3 418
+#define SRST_P_VOPGRF 419
+#define SRST_P_DSIHOST0 420
+#define SRST_P_DSIHOST1 421
+#define SRST_DSIHOST0 422
+#define SRST_DSIHOST1 423
+#define SRST_VOP_PMU 424
+#define SRST_P_VOP_CHANNEL_BIU 425
+
+#define SRST_H_VO0_BIU 426
+#define SRST_H_VO0_S_BIU 427
+#define SRST_P_VO0_BIU 428
+#define SRST_P_VO0_S_BIU 429
+#define SRST_A_HDCP0_BIU 430
+#define SRST_P_VO0GRF 431
+#define SRST_H_HDCP_KEY0 432
+#define SRST_A_HDCP0 433
+#define SRST_H_HDCP0 434
+#define SRST_HDCP0 435
+
+#define SRST_P_TRNG0 436
+#define SRST_DP0 437
+#define SRST_DP1 438
+#define SRST_H_I2S4_8CH 439
+#define SRST_M_I2S4_8CH_TX 440
+#define SRST_H_I2S8_8CH 441
+
+#define SRST_M_I2S8_8CH_TX 442
+#define SRST_H_SPDIF2_DP0 443
+#define SRST_M_SPDIF2_DP0 444
+#define SRST_H_SPDIF5_DP1 445
+#define SRST_M_SPDIF5_DP1 446
+
+#define SRST_A_HDCP1_BIU 447
+#define SRST_A_VO1_BIU 448
+#define SRST_H_VOP1_BIU 449
+#define SRST_H_VOP1_S_BIU 450
+#define SRST_P_VOP1_BIU 451
+#define SRST_P_VO1GRF 452
+#define SRST_P_VO1_S_BIU 453
+
+#define SRST_H_I2S7_8CH 454
+#define SRST_M_I2S7_8CH_RX 455
+#define SRST_H_HDCP_KEY1 456
+#define SRST_A_HDCP1 457
+#define SRST_H_HDCP1 458
+#define SRST_HDCP1 459
+#define SRST_P_TRNG1 460
+#define SRST_P_HDMITX0 461
+
+#define SRST_HDMITX0_REF 462
+#define SRST_P_HDMITX1 463
+#define SRST_HDMITX1_REF 464
+#define SRST_A_HDMIRX 465
+#define SRST_P_HDMIRX 466
+#define SRST_HDMIRX_REF 467
+
+#define SRST_P_EDP0 468
+#define SRST_EDP0_24M 469
+#define SRST_P_EDP1 470
+#define SRST_EDP1_24M 471
+#define SRST_M_I2S5_8CH_TX 472
+#define SRST_H_I2S5_8CH 473
+#define SRST_M_I2S6_8CH_TX 474
+
+#define SRST_M_I2S6_8CH_RX 475
+#define SRST_H_I2S6_8CH 476
+#define SRST_H_SPDIF3 477
+#define SRST_M_SPDIF3 478
+#define SRST_H_SPDIF4 479
+#define SRST_M_SPDIF4 480
+#define SRST_H_SPDIFRX0 481
+#define SRST_M_SPDIFRX0 482
+#define SRST_H_SPDIFRX1 483
+#define SRST_M_SPDIFRX1 484
+
+#define SRST_H_SPDIFRX2 485
+#define SRST_M_SPDIFRX2 486
+#define SRST_LINKSYM_HDMITXPHY0 487
+#define SRST_LINKSYM_HDMITXPHY1 488
+#define SRST_VO1_BRIDGE0 489
+#define SRST_VO1_BRIDGE1 490
+
+#define SRST_H_I2S9_8CH 491
+#define SRST_M_I2S9_8CH_RX 492
+#define SRST_H_I2S10_8CH 493
+#define SRST_M_I2S10_8CH_RX 494
+#define SRST_P_S_HDMIRX 495
+
+#define SRST_GPU 496
+#define SRST_SYS_GPU 497
+#define SRST_A_S_GPU_BIU 498
+#define SRST_A_M0_GPU_BIU 499
+#define SRST_A_M1_GPU_BIU 500
+#define SRST_A_M2_GPU_BIU 501
+#define SRST_A_M3_GPU_BIU 502
+#define SRST_P_GPU_BIU 503
+#define SRST_P_GPU_PVTM 504
+
+#define SRST_GPU_PVTM 505
+#define SRST_P_GPU_GRF 506
+#define SRST_GPU_PVTPLL 507
+#define SRST_GPU_JTAG 508
+
+#define SRST_A_AV1_BIU 509
+#define SRST_A_AV1 510
+#define SRST_P_AV1_BIU 511
+#define SRST_P_AV1 512
+
+#define SRST_A_DDR_BIU 513
+#define SRST_A_DMA2DDR 514
+#define SRST_A_DDR_SHAREMEM 515
+#define SRST_A_DDR_SHAREMEM_BIU 516
+#define SRST_A_CENTER_S200_BIU 517
+#define SRST_A_CENTER_S400_BIU 518
+#define SRST_H_AHB2APB 519
+#define SRST_H_CENTER_BIU 520
+#define SRST_F_DDR_CM0_CORE 521
+
+#define SRST_DDR_TIMER0 522
+#define SRST_DDR_TIMER1 523
+#define SRST_T_WDT_DDR 524
+#define SRST_T_DDR_CM0_JTAG 525
+#define SRST_P_CENTER_GRF 526
+#define SRST_P_AHB2APB 527
+#define SRST_P_WDT 528
+#define SRST_P_TIMER 529
+#define SRST_P_DMA2DDR 530
+#define SRST_P_SHAREMEM 531
+#define SRST_P_CENTER_BIU 532
+#define SRST_P_CENTER_CHANNEL_BIU 533
+
+#define SRST_P_USBDPGRF0 534
+#define SRST_P_USBDPPHY0 535
+#define SRST_P_USBDPGRF1 536
+#define SRST_P_USBDPPHY1 537
+#define SRST_P_HDPTX0 538
+#define SRST_P_HDPTX1 539
+#define SRST_P_APB2ASB_SLV_BOT_RIGHT 540
+#define SRST_P_USB2PHY_U3_0_GRF0 541
+#define SRST_P_USB2PHY_U3_1_GRF0 542
+#define SRST_P_USB2PHY_U2_0_GRF0 543
+#define SRST_P_USB2PHY_U2_1_GRF0 544
+#define SRST_HDPTX0_ROPLL 545
+#define SRST_HDPTX0_LCPLL 546
+#define SRST_HDPTX0 547
+#define SRST_HDPTX1_ROPLL 548
+
+#define SRST_HDPTX1_LCPLL 549
+#define SRST_HDPTX1 550
+#define SRST_HDPTX0_HDMIRXPHY_SET 551
+#define SRST_USBDP_COMBO_PHY0 552
+#define SRST_USBDP_COMBO_PHY0_LCPLL 553
+#define SRST_USBDP_COMBO_PHY0_ROPLL 554
+#define SRST_USBDP_COMBO_PHY0_PCS_HS 555
+#define SRST_USBDP_COMBO_PHY1 556
+#define SRST_USBDP_COMBO_PHY1_LCPLL 557
+#define SRST_USBDP_COMBO_PHY1_ROPLL 558
+#define SRST_USBDP_COMBO_PHY1_PCS_HS 559
+#define SRST_HDMIHDP0 560
+#define SRST_HDMIHDP1 561
+
+#define SRST_A_VO1USB_TOP_BIU 562
+#define SRST_H_VO1USB_TOP_BIU 563
+
+#define SRST_H_SDIO_BIU 564
+#define SRST_H_SDIO 565
+#define SRST_SDIO 566
+
+#define SRST_H_RGA3_BIU 567
+#define SRST_A_RGA3_BIU 568
+#define SRST_H_RGA3_1 569
+#define SRST_A_RGA3_1 570
+#define SRST_RGA3_1_CORE 571
+
+#define SRST_REF_PIPE_PHY0 572
+#define SRST_REF_PIPE_PHY1 573
+#define SRST_REF_PIPE_PHY2 574
+
+#define SRST_P_PHPTOP_CRU 575
+#define SRST_P_PCIE2_GRF0 576
+#define SRST_P_PCIE2_GRF1 577
+#define SRST_P_PCIE2_GRF2 578
+#define SRST_P_PCIE2_PHY0 579
+#define SRST_P_PCIE2_PHY1 580
+#define SRST_P_PCIE2_PHY2 581
+#define SRST_P_PCIE3_PHY 582
+#define SRST_P_APB2ASB_SLV_CHIP_TOP 583
+#define SRST_PCIE30_PHY 584
+
+#define SRST_H_PMU1_BIU 585
+#define SRST_P_PMU1_BIU 586
+#define SRST_H_PMU_CM0_BIU 587
+#define SRST_F_PMU_CM0_CORE 588
+#define SRST_T_PMU1_CM0_JTAG 589
+
+#define SRST_DDR_FAIL_SAFE 590
+#define SRST_P_CRU_PMU1 591
+#define SRST_P_PMU1_GRF 592
+#define SRST_P_PMU1_IOC 593
+#define SRST_P_PMU1WDT 594
+#define SRST_T_PMU1WDT 595
+#define SRST_P_PMU1TIMER 596
+#define SRST_PMU1TIMER0 597
+#define SRST_PMU1TIMER1 598
+#define SRST_P_PMU1PWM 599
+#define SRST_PMU1PWM 600
+
+#define SRST_P_I2C0 601
+#define SRST_I2C0 602
+#define SRST_S_UART0 603
+#define SRST_P_UART0 604
+#define SRST_H_I2S1_8CH 605
+#define SRST_M_I2S1_8CH_TX 606
+#define SRST_M_I2S1_8CH_RX 607
+#define SRST_H_PDM0 608
+#define SRST_PDM0 609
+
+#define SRST_H_VAD 610
+#define SRST_HDPTX0_INIT 611
+#define SRST_HDPTX0_CMN 612
+#define SRST_HDPTX0_LANE 613
+#define SRST_HDPTX1_INIT 614
+
+#define SRST_HDPTX1_CMN 615
+#define SRST_HDPTX1_LANE 616
+#define SRST_M_MIPI_DCPHY0 617
+#define SRST_S_MIPI_DCPHY0 618
+#define SRST_M_MIPI_DCPHY1 619
+#define SRST_S_MIPI_DCPHY1 620
+#define SRST_OTGPHY_U3_0 621
+#define SRST_OTGPHY_U3_1 622
+#define SRST_OTGPHY_U2_0 623
+#define SRST_OTGPHY_U2_1 624
+
+#define SRST_P_PMU0GRF 625
+#define SRST_P_PMU0IOC 626
+#define SRST_P_GPIO0 627
+#define SRST_GPIO0 628
+
+#define SRST_A_SECURE_NS_BIU 629
+#define SRST_H_SECURE_NS_BIU 630
+#define SRST_A_SECURE_S_BIU 631
+#define SRST_H_SECURE_S_BIU 632
+#define SRST_P_SECURE_S_BIU 633
+#define SRST_CRYPTO_CORE 634
+
+#define SRST_CRYPTO_PKA 635
+#define SRST_CRYPTO_RNG 636
+#define SRST_A_CRYPTO 637
+#define SRST_H_CRYPTO 638
+#define SRST_KEYLADDER_CORE 639
+#define SRST_KEYLADDER_RNG 640
+#define SRST_A_KEYLADDER 641
+#define SRST_H_KEYLADDER 642
+#define SRST_P_OTPC_S 643
+#define SRST_OTPC_S 644
+#define SRST_WDT_S 645
+
+#define SRST_T_WDT_S 646
+#define SRST_H_BOOTROM 647
+#define SRST_A_DCF 648
+#define SRST_P_DCF 649
+#define SRST_H_BOOTROM_NS 650
+#define SRST_P_KEYLADDER 651
+#define SRST_H_TRNG_S 652
+
+#define SRST_H_TRNG_NS 653
+#define SRST_D_SDMMC_BUFFER 654
+#define SRST_H_SDMMC 655
+#define SRST_H_SDMMC_BUFFER 656
+#define SRST_SDMMC 657
+#define SRST_P_TRNG_CHK 658
+#define SRST_TRNG_S 659
+
+#endif
diff --git a/include/dt-bindings/reset/sama7g5-reset.h b/include/dt-bindings/reset/sama7g5-reset.h
new file mode 100644
index 000000000000..2116f41d04e0
--- /dev/null
+++ b/include/dt-bindings/reset/sama7g5-reset.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_BINDINGS_RESET_SAMA7G5_H
+#define __DT_BINDINGS_RESET_SAMA7G5_H
+
+#define SAMA7G5_RESET_USB_PHY1 4
+#define SAMA7G5_RESET_USB_PHY2 5
+#define SAMA7G5_RESET_USB_PHY3 6
+
+#endif /* __DT_BINDINGS_RESET_SAMA7G5_H */
diff --git a/include/dt-bindings/reset/sophgo,sg2042-reset.h b/include/dt-bindings/reset/sophgo,sg2042-reset.h
new file mode 100644
index 000000000000..9ab0980625c1
--- /dev/null
+++ b/include/dt-bindings/reset/sophgo,sg2042-reset.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Technology Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_RESET_SOPHGO_SG2042_H_
+#define __DT_BINDINGS_RESET_SOPHGO_SG2042_H_
+
+#define RST_MAIN_AP 0
+#define RST_RISCV_CPU 1
+#define RST_RISCV_LOW_SPEED_LOGIC 2
+#define RST_RISCV_CMN 3
+#define RST_HSDMA 4
+#define RST_SYSDMA 5
+#define RST_EFUSE0 6
+#define RST_EFUSE1 7
+#define RST_RTC 8
+#define RST_TIMER 9
+#define RST_WDT 10
+#define RST_AHB_ROM0 11
+#define RST_AHB_ROM1 12
+#define RST_I2C0 13
+#define RST_I2C1 14
+#define RST_I2C2 15
+#define RST_I2C3 16
+#define RST_GPIO0 17
+#define RST_GPIO1 18
+#define RST_GPIO2 19
+#define RST_PWM 20
+#define RST_AXI_SRAM0 21
+#define RST_AXI_SRAM1 22
+#define RST_SF0 23
+#define RST_SF1 24
+#define RST_LPC 25
+#define RST_ETH0 26
+#define RST_EMMC 27
+#define RST_SD 28
+#define RST_UART0 29
+#define RST_UART1 30
+#define RST_UART2 31
+#define RST_UART3 32
+#define RST_SPI0 33
+#define RST_SPI1 34
+#define RST_DBG_I2C 35
+#define RST_PCIE0 36
+#define RST_PCIE1 37
+#define RST_DDR0 38
+#define RST_DDR1 39
+#define RST_DDR2 40
+#define RST_DDR3 41
+#define RST_FAU0 42
+#define RST_FAU1 43
+#define RST_FAU2 44
+#define RST_RXU0 45
+#define RST_RXU1 46
+#define RST_RXU2 47
+#define RST_RXU3 48
+#define RST_RXU4 49
+#define RST_RXU5 50
+#define RST_RXU6 51
+#define RST_RXU7 52
+#define RST_RXU8 53
+#define RST_RXU9 54
+#define RST_RXU10 55
+#define RST_RXU11 56
+#define RST_RXU12 57
+#define RST_RXU13 58
+#define RST_RXU14 59
+#define RST_RXU15 60
+#define RST_RXU16 61
+#define RST_RXU17 62
+#define RST_RXU18 63
+#define RST_RXU19 64
+#define RST_RXU20 65
+#define RST_RXU21 66
+#define RST_RXU22 67
+#define RST_RXU23 68
+#define RST_RXU24 69
+#define RST_RXU25 70
+#define RST_RXU26 71
+#define RST_RXU27 72
+#define RST_RXU28 73
+#define RST_RXU29 74
+#define RST_RXU30 75
+#define RST_RXU31 76
+
+#endif /* __DT_BINDINGS_RESET_SOPHGO_SG2042_H_ */
diff --git a/include/dt-bindings/reset/st,stm32mp25-rcc.h b/include/dt-bindings/reset/st,stm32mp25-rcc.h
new file mode 100644
index 000000000000..d5615930bcc8
--- /dev/null
+++ b/include/dt-bindings/reset/st,stm32mp25-rcc.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author(s): Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP25_RESET_H_
+#define _DT_BINDINGS_STM32MP25_RESET_H_
+
+#define TIM1_R 0
+#define TIM2_R 1
+#define TIM3_R 2
+#define TIM4_R 3
+#define TIM5_R 4
+#define TIM6_R 5
+#define TIM7_R 6
+#define TIM8_R 7
+#define TIM10_R 8
+#define TIM11_R 9
+#define TIM12_R 10
+#define TIM13_R 11
+#define TIM14_R 12
+#define TIM15_R 13
+#define TIM16_R 14
+#define TIM17_R 15
+#define TIM20_R 16
+#define LPTIM1_R 17
+#define LPTIM2_R 18
+#define LPTIM3_R 19
+#define LPTIM4_R 20
+#define LPTIM5_R 21
+#define SPI1_R 22
+#define SPI2_R 23
+#define SPI3_R 24
+#define SPI4_R 25
+#define SPI5_R 26
+#define SPI6_R 27
+#define SPI7_R 28
+#define SPI8_R 29
+#define SPDIFRX_R 30
+#define USART1_R 31
+#define USART2_R 32
+#define USART3_R 33
+#define UART4_R 34
+#define UART5_R 35
+#define USART6_R 36
+#define UART7_R 37
+#define UART8_R 38
+#define UART9_R 39
+#define LPUART1_R 40
+#define IS2M_R 41
+#define I2C1_R 42
+#define I2C2_R 43
+#define I2C3_R 44
+#define I2C4_R 45
+#define I2C5_R 46
+#define I2C6_R 47
+#define I2C7_R 48
+#define I2C8_R 49
+#define SAI1_R 50
+#define SAI2_R 51
+#define SAI3_R 52
+#define SAI4_R 53
+#define MDF1_R 54
+#define MDF2_R 55
+#define FDCAN_R 56
+#define HDP_R 57
+#define ADC12_R 58
+#define ADC3_R 59
+#define ETH1_R 60
+#define ETH2_R 61
+#define USB2_R 62
+#define USB2PHY1_R 63
+#define USB2PHY2_R 64
+#define USB3DR_R 65
+#define USB3PCIEPHY_R 66
+#define USBTC_R 67
+#define ETHSW_R 68
+#define SDMMC1_R 69
+#define SDMMC1DLL_R 70
+#define SDMMC2_R 71
+#define SDMMC2DLL_R 72
+#define SDMMC3_R 73
+#define SDMMC3DLL_R 74
+#define GPU_R 75
+#define LTDC_R 76
+#define DSI_R 77
+#define LVDS_R 78
+#define CSI_R 79
+#define DCMIPP_R 80
+#define CCI_R 81
+#define VDEC_R 82
+#define VENC_R 83
+#define WWDG1_R 84
+#define WWDG2_R 85
+#define VREF_R 86
+#define DTS_R 87
+#define CRC_R 88
+#define SERC_R 89
+#define OSPIIOM_R 90
+#define I3C1_R 91
+#define I3C2_R 92
+#define I3C3_R 93
+#define I3C4_R 94
+#define IWDG2_KER_R 95
+#define IWDG4_KER_R 96
+#define RNG_R 97
+#define PKA_R 98
+#define SAES_R 99
+#define HASH_R 100
+#define CRYP1_R 101
+#define CRYP2_R 102
+#define PCIE_R 103
+#define OSPI1_R 104
+#define OSPI1DLL_R 105
+#define OSPI2_R 106
+#define OSPI2DLL_R 107
+#define FMC_R 108
+#define DBG_R 109
+#define GPIOA_R 110
+#define GPIOB_R 111
+#define GPIOC_R 112
+#define GPIOD_R 113
+#define GPIOE_R 114
+#define GPIOF_R 115
+#define GPIOG_R 116
+#define GPIOH_R 117
+#define GPIOI_R 118
+#define GPIOJ_R 119
+#define GPIOK_R 120
+#define GPIOZ_R 121
+#define HPDMA1_R 122
+#define HPDMA2_R 123
+#define HPDMA3_R 124
+#define LPDMA_R 125
+#define HSEM_R 126
+#define IPCC1_R 127
+#define IPCC2_R 128
+#define C2_HOLDBOOT_R 129
+#define C1_HOLDBOOT_R 130
+#define C1_R 131
+#define C1P1POR_R 132
+#define C1P1_R 133
+#define C2_R 134
+#define C3_R 135
+#define SYS_R 136
+#define VSW_R 137
+#define C1MS_R 138
+#define DDRCP_R 139
+#define DDRCAPB_R 140
+#define DDRPHYCAPB_R 141
+#define DDRCFG_R 142
+#define DDR_R 143
+
+#define STM32MP25_LAST_RESET 144
+
+#define RST_SCMI_C1_R 0
+#define RST_SCMI_C2_R 1
+#define RST_SCMI_C1_HOLDBOOT_R 2
+#define RST_SCMI_C2_HOLDBOOT_R 3
+#define RST_SCMI_FMC 4
+#define RST_SCMI_OSPI1 5
+#define RST_SCMI_OSPI1DLL 6
+#define RST_SCMI_OSPI2 7
+#define RST_SCMI_OSPI2DLL 8
+
+#endif /* _DT_BINDINGS_STM32MP25_RESET_H_ */
diff --git a/include/dt-bindings/reset/starfive,jh7110-crg.h b/include/dt-bindings/reset/starfive,jh7110-crg.h
new file mode 100644
index 000000000000..eaf4a0d84f6a
--- /dev/null
+++ b/include/dt-bindings/reset/starfive,jh7110-crg.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#ifndef __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__
+#define __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__
+
+/* SYSCRG resets */
+#define JH7110_SYSRST_JTAG_APB 0
+#define JH7110_SYSRST_SYSCON_APB 1
+#define JH7110_SYSRST_IOMUX_APB 2
+#define JH7110_SYSRST_BUS 3
+#define JH7110_SYSRST_DEBUG 4
+#define JH7110_SYSRST_CORE0 5
+#define JH7110_SYSRST_CORE1 6
+#define JH7110_SYSRST_CORE2 7
+#define JH7110_SYSRST_CORE3 8
+#define JH7110_SYSRST_CORE4 9
+#define JH7110_SYSRST_CORE0_ST 10
+#define JH7110_SYSRST_CORE1_ST 11
+#define JH7110_SYSRST_CORE2_ST 12
+#define JH7110_SYSRST_CORE3_ST 13
+#define JH7110_SYSRST_CORE4_ST 14
+#define JH7110_SYSRST_TRACE0 15
+#define JH7110_SYSRST_TRACE1 16
+#define JH7110_SYSRST_TRACE2 17
+#define JH7110_SYSRST_TRACE3 18
+#define JH7110_SYSRST_TRACE4 19
+#define JH7110_SYSRST_TRACE_COM 20
+#define JH7110_SYSRST_GPU_APB 21
+#define JH7110_SYSRST_GPU_DOMA 22
+#define JH7110_SYSRST_NOC_BUS_APB 23
+#define JH7110_SYSRST_NOC_BUS_AXICFG0_AXI 24
+#define JH7110_SYSRST_NOC_BUS_CPU_AXI 25
+#define JH7110_SYSRST_NOC_BUS_DISP_AXI 26
+#define JH7110_SYSRST_NOC_BUS_GPU_AXI 27
+#define JH7110_SYSRST_NOC_BUS_ISP_AXI 28
+#define JH7110_SYSRST_NOC_BUS_DDRC 29
+#define JH7110_SYSRST_NOC_BUS_STG_AXI 30
+#define JH7110_SYSRST_NOC_BUS_VDEC_AXI 31
+
+#define JH7110_SYSRST_NOC_BUS_VENC_AXI 32
+#define JH7110_SYSRST_AXI_CFG1_AHB 33
+#define JH7110_SYSRST_AXI_CFG1_MAIN 34
+#define JH7110_SYSRST_AXI_CFG0_MAIN 35
+#define JH7110_SYSRST_AXI_CFG0_MAIN_DIV 36
+#define JH7110_SYSRST_AXI_CFG0_HIFI4 37
+#define JH7110_SYSRST_DDR_AXI 38
+#define JH7110_SYSRST_DDR_OSC 39
+#define JH7110_SYSRST_DDR_APB 40
+#define JH7110_SYSRST_ISP_TOP 41
+#define JH7110_SYSRST_ISP_TOP_AXI 42
+#define JH7110_SYSRST_VOUT_TOP_SRC 43
+#define JH7110_SYSRST_CODAJ12_AXI 44
+#define JH7110_SYSRST_CODAJ12_CORE 45
+#define JH7110_SYSRST_CODAJ12_APB 46
+#define JH7110_SYSRST_WAVE511_AXI 47
+#define JH7110_SYSRST_WAVE511_BPU 48
+#define JH7110_SYSRST_WAVE511_VCE 49
+#define JH7110_SYSRST_WAVE511_APB 50
+#define JH7110_SYSRST_VDEC_JPG 51
+#define JH7110_SYSRST_VDEC_MAIN 52
+#define JH7110_SYSRST_AXIMEM0_AXI 53
+#define JH7110_SYSRST_WAVE420L_AXI 54
+#define JH7110_SYSRST_WAVE420L_BPU 55
+#define JH7110_SYSRST_WAVE420L_VCE 56
+#define JH7110_SYSRST_WAVE420L_APB 57
+#define JH7110_SYSRST_AXIMEM1_AXI 58
+#define JH7110_SYSRST_AXIMEM2_AXI 59
+#define JH7110_SYSRST_INTMEM 60
+#define JH7110_SYSRST_QSPI_AHB 61
+#define JH7110_SYSRST_QSPI_APB 62
+#define JH7110_SYSRST_QSPI_REF 63
+
+#define JH7110_SYSRST_SDIO0_AHB 64
+#define JH7110_SYSRST_SDIO1_AHB 65
+#define JH7110_SYSRST_GMAC1_AXI 66
+#define JH7110_SYSRST_GMAC1_AHB 67
+#define JH7110_SYSRST_MAILBOX_APB 68
+#define JH7110_SYSRST_SPI0_APB 69
+#define JH7110_SYSRST_SPI1_APB 70
+#define JH7110_SYSRST_SPI2_APB 71
+#define JH7110_SYSRST_SPI3_APB 72
+#define JH7110_SYSRST_SPI4_APB 73
+#define JH7110_SYSRST_SPI5_APB 74
+#define JH7110_SYSRST_SPI6_APB 75
+#define JH7110_SYSRST_I2C0_APB 76
+#define JH7110_SYSRST_I2C1_APB 77
+#define JH7110_SYSRST_I2C2_APB 78
+#define JH7110_SYSRST_I2C3_APB 79
+#define JH7110_SYSRST_I2C4_APB 80
+#define JH7110_SYSRST_I2C5_APB 81
+#define JH7110_SYSRST_I2C6_APB 82
+#define JH7110_SYSRST_UART0_APB 83
+#define JH7110_SYSRST_UART0_CORE 84
+#define JH7110_SYSRST_UART1_APB 85
+#define JH7110_SYSRST_UART1_CORE 86
+#define JH7110_SYSRST_UART2_APB 87
+#define JH7110_SYSRST_UART2_CORE 88
+#define JH7110_SYSRST_UART3_APB 89
+#define JH7110_SYSRST_UART3_CORE 90
+#define JH7110_SYSRST_UART4_APB 91
+#define JH7110_SYSRST_UART4_CORE 92
+#define JH7110_SYSRST_UART5_APB 93
+#define JH7110_SYSRST_UART5_CORE 94
+#define JH7110_SYSRST_SPDIF_APB 95
+
+#define JH7110_SYSRST_PWMDAC_APB 96
+#define JH7110_SYSRST_PDM_DMIC 97
+#define JH7110_SYSRST_PDM_APB 98
+#define JH7110_SYSRST_I2SRX_APB 99
+#define JH7110_SYSRST_I2SRX_BCLK 100
+#define JH7110_SYSRST_I2STX0_APB 101
+#define JH7110_SYSRST_I2STX0_BCLK 102
+#define JH7110_SYSRST_I2STX1_APB 103
+#define JH7110_SYSRST_I2STX1_BCLK 104
+#define JH7110_SYSRST_TDM_AHB 105
+#define JH7110_SYSRST_TDM_CORE 106
+#define JH7110_SYSRST_TDM_APB 107
+#define JH7110_SYSRST_PWM_APB 108
+#define JH7110_SYSRST_WDT_APB 109
+#define JH7110_SYSRST_WDT_CORE 110
+#define JH7110_SYSRST_CAN0_APB 111
+#define JH7110_SYSRST_CAN0_CORE 112
+#define JH7110_SYSRST_CAN0_TIMER 113
+#define JH7110_SYSRST_CAN1_APB 114
+#define JH7110_SYSRST_CAN1_CORE 115
+#define JH7110_SYSRST_CAN1_TIMER 116
+#define JH7110_SYSRST_TIMER_APB 117
+#define JH7110_SYSRST_TIMER0 118
+#define JH7110_SYSRST_TIMER1 119
+#define JH7110_SYSRST_TIMER2 120
+#define JH7110_SYSRST_TIMER3 121
+#define JH7110_SYSRST_INT_CTRL_APB 122
+#define JH7110_SYSRST_TEMP_APB 123
+#define JH7110_SYSRST_TEMP_CORE 124
+#define JH7110_SYSRST_JTAG_CERTIFICATION 125
+
+#define JH7110_SYSRST_END 126
+
+/* AONCRG resets */
+#define JH7110_AONRST_GMAC0_AXI 0
+#define JH7110_AONRST_GMAC0_AHB 1
+#define JH7110_AONRST_IOMUX 2
+#define JH7110_AONRST_PMU_APB 3
+#define JH7110_AONRST_PMU_WKUP 4
+#define JH7110_AONRST_RTC_APB 5
+#define JH7110_AONRST_RTC_CAL 6
+#define JH7110_AONRST_RTC_32K 7
+
+#define JH7110_AONRST_END 8
+
+/* STGCRG resets */
+#define JH7110_STGRST_SYSCON 0
+#define JH7110_STGRST_HIFI4_CORE 1
+#define JH7110_STGRST_HIFI4_AXI 2
+#define JH7110_STGRST_SEC_AHB 3
+#define JH7110_STGRST_E24_CORE 4
+#define JH7110_STGRST_DMA1P_AXI 5
+#define JH7110_STGRST_DMA1P_AHB 6
+#define JH7110_STGRST_USB0_AXI 7
+#define JH7110_STGRST_USB0_APB 8
+#define JH7110_STGRST_USB0_UTMI_APB 9
+#define JH7110_STGRST_USB0_PWRUP 10
+#define JH7110_STGRST_PCIE0_AXI_MST0 11
+#define JH7110_STGRST_PCIE0_AXI_SLV0 12
+#define JH7110_STGRST_PCIE0_AXI_SLV 13
+#define JH7110_STGRST_PCIE0_BRG 14
+#define JH7110_STGRST_PCIE0_CORE 15
+#define JH7110_STGRST_PCIE0_APB 16
+#define JH7110_STGRST_PCIE1_AXI_MST0 17
+#define JH7110_STGRST_PCIE1_AXI_SLV0 18
+#define JH7110_STGRST_PCIE1_AXI_SLV 19
+#define JH7110_STGRST_PCIE1_BRG 20
+#define JH7110_STGRST_PCIE1_CORE 21
+#define JH7110_STGRST_PCIE1_APB 22
+
+#define JH7110_STGRST_END 23
+
+/* ISPCRG resets */
+#define JH7110_ISPRST_ISPV2_TOP_WRAPPER_P 0
+#define JH7110_ISPRST_ISPV2_TOP_WRAPPER_C 1
+#define JH7110_ISPRST_M31DPHY_HW 2
+#define JH7110_ISPRST_M31DPHY_B09_AON 3
+#define JH7110_ISPRST_VIN_APB 4
+#define JH7110_ISPRST_VIN_PIXEL_IF0 5
+#define JH7110_ISPRST_VIN_PIXEL_IF1 6
+#define JH7110_ISPRST_VIN_PIXEL_IF2 7
+#define JH7110_ISPRST_VIN_PIXEL_IF3 8
+#define JH7110_ISPRST_VIN_SYS 9
+#define JH7110_ISPRST_VIN_P_AXI_RD 10
+#define JH7110_ISPRST_VIN_P_AXI_WR 11
+
+#define JH7110_ISPRST_END 12
+
+/* VOUTCRG resets */
+#define JH7110_VOUTRST_DC8200_AXI 0
+#define JH7110_VOUTRST_DC8200_AHB 1
+#define JH7110_VOUTRST_DC8200_CORE 2
+#define JH7110_VOUTRST_DSITX_DPI 3
+#define JH7110_VOUTRST_DSITX_APB 4
+#define JH7110_VOUTRST_DSITX_RXESC 5
+#define JH7110_VOUTRST_DSITX_SYS 6
+#define JH7110_VOUTRST_DSITX_TXBYTEHS 7
+#define JH7110_VOUTRST_DSITX_TXESC 8
+#define JH7110_VOUTRST_HDMI_TX_HDMI 9
+#define JH7110_VOUTRST_MIPITX_DPHY_SYS 10
+#define JH7110_VOUTRST_MIPITX_DPHY_TXBYTEHS 11
+
+#define JH7110_VOUTRST_END 12
+
+#endif /* __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__ */
diff --git a/include/dt-bindings/reset/starfive-jh7100.h b/include/dt-bindings/reset/starfive-jh7100.h
new file mode 100644
index 000000000000..540e19254f39
--- /dev/null
+++ b/include/dt-bindings/reset/starfive-jh7100.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Ahmad Fatoum, Pengutronix
+ */
+
+#ifndef __DT_BINDINGS_RESET_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_RESET_STARFIVE_JH7100_H__
+
+#define JH7100_RSTN_DOM3AHB_BUS 0
+#define JH7100_RSTN_DOM7AHB_BUS 1
+#define JH7100_RST_U74 2
+#define JH7100_RSTN_U74_AXI 3
+#define JH7100_RSTN_SGDMA2P_AHB 4
+#define JH7100_RSTN_SGDMA2P_AXI 5
+#define JH7100_RSTN_DMA2PNOC_AXI 6
+#define JH7100_RSTN_DLA_AXI 7
+#define JH7100_RSTN_DLANOC_AXI 8
+#define JH7100_RSTN_DLA_APB 9
+#define JH7100_RST_VP6_DRESET 10
+#define JH7100_RST_VP6_BRESET 11
+#define JH7100_RSTN_VP6_AXI 12
+#define JH7100_RSTN_VDECBRG_MAIN 13
+#define JH7100_RSTN_VDEC_AXI 14
+#define JH7100_RSTN_VDEC_BCLK 15
+#define JH7100_RSTN_VDEC_CCLK 16
+#define JH7100_RSTN_VDEC_APB 17
+#define JH7100_RSTN_JPEG_AXI 18
+#define JH7100_RSTN_JPEG_CCLK 19
+#define JH7100_RSTN_JPEG_APB 20
+#define JH7100_RSTN_JPCGC300_MAIN 21
+#define JH7100_RSTN_GC300_2X 22
+#define JH7100_RSTN_GC300_AXI 23
+#define JH7100_RSTN_GC300_AHB 24
+#define JH7100_RSTN_VENC_AXI 25
+#define JH7100_RSTN_VENCBRG_MAIN 26
+#define JH7100_RSTN_VENC_BCLK 27
+#define JH7100_RSTN_VENC_CCLK 28
+#define JH7100_RSTN_VENC_APB 29
+#define JH7100_RSTN_DDRPHY_APB 30
+#define JH7100_RSTN_NOC_ROB 31
+#define JH7100_RSTN_NOC_COG 32
+#define JH7100_RSTN_HIFI4_AXI 33
+#define JH7100_RSTN_HIFI4NOC_AXI 34
+#define JH7100_RST_HIFI4_DRESET 35
+#define JH7100_RST_HIFI4_BRESET 36
+#define JH7100_RSTN_USB_AXI 37
+#define JH7100_RSTN_USBNOC_AXI 38
+#define JH7100_RSTN_SGDMA1P_AXI 39
+#define JH7100_RSTN_DMA1P_AXI 40
+#define JH7100_RSTN_X2C_AXI 41
+#define JH7100_RSTN_NNE_AHB 42
+#define JH7100_RSTN_NNE_AXI 43
+#define JH7100_RSTN_NNENOC_AXI 44
+#define JH7100_RSTN_DLASLV_AXI 45
+#define JH7100_RSTN_DSPX2C_AXI 46
+#define JH7100_RSTN_VIN_SRC 47
+#define JH7100_RSTN_ISPSLV_AXI 48
+#define JH7100_RSTN_VIN_AXI 49
+#define JH7100_RSTN_VINNOC_AXI 50
+#define JH7100_RSTN_ISP0_AXI 51
+#define JH7100_RSTN_ISP0NOC_AXI 52
+#define JH7100_RSTN_ISP1_AXI 53
+#define JH7100_RSTN_ISP1NOC_AXI 54
+#define JH7100_RSTN_VOUT_SRC 55
+#define JH7100_RSTN_DISP_AXI 56
+#define JH7100_RSTN_DISPNOC_AXI 57
+#define JH7100_RSTN_SDIO0_AHB 58
+#define JH7100_RSTN_SDIO1_AHB 59
+#define JH7100_RSTN_GMAC_AHB 60
+#define JH7100_RSTN_SPI2AHB_AHB 61
+#define JH7100_RSTN_SPI2AHB_CORE 62
+#define JH7100_RSTN_EZMASTER_AHB 63
+#define JH7100_RST_E24 64
+#define JH7100_RSTN_QSPI_AHB 65
+#define JH7100_RSTN_QSPI_CORE 66
+#define JH7100_RSTN_QSPI_APB 67
+#define JH7100_RSTN_SEC_AHB 68
+#define JH7100_RSTN_AES 69
+#define JH7100_RSTN_PKA 70
+#define JH7100_RSTN_SHA 71
+#define JH7100_RSTN_TRNG_APB 72
+#define JH7100_RSTN_OTP_APB 73
+#define JH7100_RSTN_UART0_APB 74
+#define JH7100_RSTN_UART0_CORE 75
+#define JH7100_RSTN_UART1_APB 76
+#define JH7100_RSTN_UART1_CORE 77
+#define JH7100_RSTN_SPI0_APB 78
+#define JH7100_RSTN_SPI0_CORE 79
+#define JH7100_RSTN_SPI1_APB 80
+#define JH7100_RSTN_SPI1_CORE 81
+#define JH7100_RSTN_I2C0_APB 82
+#define JH7100_RSTN_I2C0_CORE 83
+#define JH7100_RSTN_I2C1_APB 84
+#define JH7100_RSTN_I2C1_CORE 85
+#define JH7100_RSTN_GPIO_APB 86
+#define JH7100_RSTN_UART2_APB 87
+#define JH7100_RSTN_UART2_CORE 88
+#define JH7100_RSTN_UART3_APB 89
+#define JH7100_RSTN_UART3_CORE 90
+#define JH7100_RSTN_SPI2_APB 91
+#define JH7100_RSTN_SPI2_CORE 92
+#define JH7100_RSTN_SPI3_APB 93
+#define JH7100_RSTN_SPI3_CORE 94
+#define JH7100_RSTN_I2C2_APB 95
+#define JH7100_RSTN_I2C2_CORE 96
+#define JH7100_RSTN_I2C3_APB 97
+#define JH7100_RSTN_I2C3_CORE 98
+#define JH7100_RSTN_WDTIMER_APB 99
+#define JH7100_RSTN_WDT 100
+#define JH7100_RSTN_TIMER0 101
+#define JH7100_RSTN_TIMER1 102
+#define JH7100_RSTN_TIMER2 103
+#define JH7100_RSTN_TIMER3 104
+#define JH7100_RSTN_TIMER4 105
+#define JH7100_RSTN_TIMER5 106
+#define JH7100_RSTN_TIMER6 107
+#define JH7100_RSTN_VP6INTC_APB 108
+#define JH7100_RSTN_PWM_APB 109
+#define JH7100_RSTN_MSI_APB 110
+#define JH7100_RSTN_TEMP_APB 111
+#define JH7100_RSTN_TEMP_SENSE 112
+#define JH7100_RSTN_SYSERR_APB 113
+
+#define JH7100_RSTN_END 114
+
+#endif /* __DT_BINDINGS_RESET_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h b/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h
new file mode 100644
index 000000000000..ea906896c70f
--- /dev/null
+++ b/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_STE_PRCC_RESET
+#define _DT_BINDINGS_STE_PRCC_RESET
+
+#define DB8500_PRCC_1 1
+#define DB8500_PRCC_2 2
+#define DB8500_PRCC_3 3
+#define DB8500_PRCC_6 6
+
+/* Reset lines on PRCC 1 */
+#define DB8500_PRCC_1_RESET_UART0 0
+#define DB8500_PRCC_1_RESET_UART1 1
+#define DB8500_PRCC_1_RESET_I2C1 2
+#define DB8500_PRCC_1_RESET_MSP0 3
+#define DB8500_PRCC_1_RESET_MSP1 4
+#define DB8500_PRCC_1_RESET_SDI0 5
+#define DB8500_PRCC_1_RESET_I2C2 6
+#define DB8500_PRCC_1_RESET_SPI3 7
+#define DB8500_PRCC_1_RESET_SLIMBUS0 8
+#define DB8500_PRCC_1_RESET_I2C4 9
+#define DB8500_PRCC_1_RESET_MSP3 10
+#define DB8500_PRCC_1_RESET_PER_MSP3 11
+#define DB8500_PRCC_1_RESET_PER_MSP1 12
+#define DB8500_PRCC_1_RESET_PER_MSP0 13
+#define DB8500_PRCC_1_RESET_PER_SLIMBUS 14
+
+/* Reset lines on PRCC 2 */
+#define DB8500_PRCC_2_RESET_I2C3 0
+#define DB8500_PRCC_2_RESET_PWL 1
+#define DB8500_PRCC_2_RESET_SDI4 2
+#define DB8500_PRCC_2_RESET_MSP2 3
+#define DB8500_PRCC_2_RESET_SDI1 4
+#define DB8500_PRCC_2_RESET_SDI3 5
+#define DB8500_PRCC_2_RESET_HSIRX 6
+#define DB8500_PRCC_2_RESET_HSITX 7
+#define DB8500_PRCC_1_RESET_PER_MSP2 8
+
+/* Reset lines on PRCC 3 */
+#define DB8500_PRCC_3_RESET_SSP0 1
+#define DB8500_PRCC_3_RESET_SSP1 2
+#define DB8500_PRCC_3_RESET_I2C0 3
+#define DB8500_PRCC_3_RESET_SDI2 4
+#define DB8500_PRCC_3_RESET_SKE 5
+#define DB8500_PRCC_3_RESET_UART2 6
+#define DB8500_PRCC_3_RESET_SDI5 7
+
+/* Reset lines on PRCC 6 */
+#define DB8500_PRCC_3_RESET_RNG 0
+
+#endif
diff --git a/include/dt-bindings/reset/stih415-resets.h b/include/dt-bindings/reset/stih415-resets.h
deleted file mode 100644
index 96f7831a1db0..000000000000
--- a/include/dt-bindings/reset/stih415-resets.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for the reset controller
- * based peripheral powerdown requests on the STMicroelectronics
- * STiH415 SoC.
- */
-#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH415
-#define _DT_BINDINGS_RESET_CONTROLLER_STIH415
-
-#define STIH415_EMISS_POWERDOWN 0
-#define STIH415_NAND_POWERDOWN 1
-#define STIH415_KEYSCAN_POWERDOWN 2
-#define STIH415_USB0_POWERDOWN 3
-#define STIH415_USB1_POWERDOWN 4
-#define STIH415_USB2_POWERDOWN 5
-#define STIH415_SATA0_POWERDOWN 6
-#define STIH415_SATA1_POWERDOWN 7
-#define STIH415_PCIE_POWERDOWN 8
-
-#define STIH415_ETH0_SOFTRESET 0
-#define STIH415_ETH1_SOFTRESET 1
-#define STIH415_IRB_SOFTRESET 2
-#define STIH415_USB0_SOFTRESET 3
-#define STIH415_USB1_SOFTRESET 4
-#define STIH415_USB2_SOFTRESET 5
-#define STIH415_KEYSCAN_SOFTRESET 6
-
-#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH415 */
diff --git a/include/dt-bindings/reset/stih416-resets.h b/include/dt-bindings/reset/stih416-resets.h
deleted file mode 100644
index f682c906ed5a..000000000000
--- a/include/dt-bindings/reset/stih416-resets.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for the reset controller
- * based peripheral powerdown requests on the STMicroelectronics
- * STiH416 SoC.
- */
-#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH416
-#define _DT_BINDINGS_RESET_CONTROLLER_STIH416
-
-#define STIH416_EMISS_POWERDOWN 0
-#define STIH416_NAND_POWERDOWN 1
-#define STIH416_KEYSCAN_POWERDOWN 2
-#define STIH416_USB0_POWERDOWN 3
-#define STIH416_USB1_POWERDOWN 4
-#define STIH416_USB2_POWERDOWN 5
-#define STIH416_USB3_POWERDOWN 6
-#define STIH416_SATA0_POWERDOWN 7
-#define STIH416_SATA1_POWERDOWN 8
-#define STIH416_PCIE0_POWERDOWN 9
-#define STIH416_PCIE1_POWERDOWN 10
-
-#define STIH416_ETH0_SOFTRESET 0
-#define STIH416_ETH1_SOFTRESET 1
-#define STIH416_IRB_SOFTRESET 2
-#define STIH416_USB0_SOFTRESET 3
-#define STIH416_USB1_SOFTRESET 4
-#define STIH416_USB2_SOFTRESET 5
-#define STIH416_USB3_SOFTRESET 6
-#define STIH416_SATA0_SOFTRESET 7
-#define STIH416_SATA1_SOFTRESET 8
-#define STIH416_PCIE0_SOFTRESET 9
-#define STIH416_PCIE1_SOFTRESET 10
-#define STIH416_AUD_DAC_SOFTRESET 11
-#define STIH416_HDTVOUT_SOFTRESET 12
-#define STIH416_VTAC_M_RX_SOFTRESET 13
-#define STIH416_VTAC_A_RX_SOFTRESET 14
-#define STIH416_SYNC_HD_SOFTRESET 15
-#define STIH416_SYNC_SD_SOFTRESET 16
-#define STIH416_BLITTER_SOFTRESET 17
-#define STIH416_GPU_SOFTRESET 18
-#define STIH416_VTAC_M_TX_SOFTRESET 19
-#define STIH416_VTAC_A_TX_SOFTRESET 20
-#define STIH416_VTG_AUX_SOFTRESET 21
-#define STIH416_JPEG_DEC_SOFTRESET 22
-#define STIH416_HVA_SOFTRESET 23
-#define STIH416_COMPO_M_SOFTRESET 24
-#define STIH416_COMPO_A_SOFTRESET 25
-#define STIH416_VP8_DEC_SOFTRESET 26
-#define STIH416_VTG_MAIN_SOFTRESET 27
-#define STIH416_KEYSCAN_SOFTRESET 28
-
-#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH416 */
diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h
index f0c3aaef67a0..9071f139649f 100644
--- a/include/dt-bindings/reset/stm32mp1-resets.h
+++ b/include/dt-bindings/reset/stm32mp1-resets.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
@@ -7,6 +7,7 @@
#ifndef _DT_BINDINGS_STM32MP1_RESET_H_
#define _DT_BINDINGS_STM32MP1_RESET_H_
+#define MCU_HOLD_BOOT_R 2144
#define LTDC_R 3072
#define DSI_R 3076
#define DDRPERFM_R 3080
@@ -105,4 +106,18 @@
#define GPIOJ_R 19785
#define GPIOK_R 19786
+/* SCMI reset domain identifiers */
+#define RST_SCMI_SPI6 0
+#define RST_SCMI_I2C4 1
+#define RST_SCMI_I2C6 2
+#define RST_SCMI_USART1 3
+#define RST_SCMI_STGEN 4
+#define RST_SCMI_GPIOZ 5
+#define RST_SCMI_CRYP1 6
+#define RST_SCMI_HASH1 7
+#define RST_SCMI_RNG1 8
+#define RST_SCMI_MDMA 9
+#define RST_SCMI_MCU 10
+#define RST_SCMI_MCU_HOLD_BOOT 11
+
#endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */
diff --git a/include/dt-bindings/reset/stm32mp13-resets.h b/include/dt-bindings/reset/stm32mp13-resets.h
new file mode 100644
index 000000000000..ecb37c7ddde1
--- /dev/null
+++ b/include/dt-bindings/reset/stm32mp13-resets.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP13_RESET_H_
+#define _DT_BINDINGS_STM32MP13_RESET_H_
+
+#define TIM2_R 13568
+#define TIM3_R 13569
+#define TIM4_R 13570
+#define TIM5_R 13571
+#define TIM6_R 13572
+#define TIM7_R 13573
+#define LPTIM1_R 13577
+#define SPI2_R 13579
+#define SPI3_R 13580
+#define USART3_R 13583
+#define UART4_R 13584
+#define UART5_R 13585
+#define UART7_R 13586
+#define UART8_R 13587
+#define I2C1_R 13589
+#define I2C2_R 13590
+#define SPDIF_R 13594
+#define TIM1_R 13632
+#define TIM8_R 13633
+#define SPI1_R 13640
+#define USART6_R 13645
+#define SAI1_R 13648
+#define SAI2_R 13649
+#define DFSDM_R 13652
+#define FDCAN_R 13656
+#define LPTIM2_R 13696
+#define LPTIM3_R 13697
+#define LPTIM4_R 13698
+#define LPTIM5_R 13699
+#define SYSCFG_R 13707
+#define VREF_R 13709
+#define DTS_R 13712
+#define PMBCTRL_R 13713
+#define LTDC_R 13760
+#define DCMIPP_R 13761
+#define DDRPERFM_R 13768
+#define USBPHY_R 13776
+#define STGEN_R 13844
+#define USART1_R 13888
+#define USART2_R 13889
+#define SPI4_R 13890
+#define SPI5_R 13891
+#define I2C3_R 13892
+#define I2C4_R 13893
+#define I2C5_R 13894
+#define TIM12_R 13895
+#define TIM13_R 13896
+#define TIM14_R 13897
+#define TIM15_R 13898
+#define TIM16_R 13899
+#define TIM17_R 13900
+#define DMA1_R 13952
+#define DMA2_R 13953
+#define DMAMUX1_R 13954
+#define DMA3_R 13955
+#define DMAMUX2_R 13956
+#define ADC1_R 13957
+#define ADC2_R 13958
+#define USBO_R 13960
+#define GPIOA_R 14080
+#define GPIOB_R 14081
+#define GPIOC_R 14082
+#define GPIOD_R 14083
+#define GPIOE_R 14084
+#define GPIOF_R 14085
+#define GPIOG_R 14086
+#define GPIOH_R 14087
+#define GPIOI_R 14088
+#define TSC_R 14095
+#define PKA_R 14146
+#define SAES_R 14147
+#define CRYP1_R 14148
+#define HASH1_R 14149
+#define RNG1_R 14150
+#define AXIMC_R 14160
+#define MDMA_R 14208
+#define MCE_R 14209
+#define ETH1MAC_R 14218
+#define FMC_R 14220
+#define QSPI_R 14222
+#define SDMMC1_R 14224
+#define SDMMC2_R 14225
+#define CRC1_R 14228
+#define USBH_R 14232
+#define ETH2MAC_R 14238
+
+/* SCMI reset domain identifiers */
+#define RST_SCMI_LTDC 0
+#define RST_SCMI_MDMA 1
+
+#endif /* _DT_BINDINGS_STM32MP13_RESET_H_ */
diff --git a/include/dt-bindings/reset/sun20i-d1-ccu.h b/include/dt-bindings/reset/sun20i-d1-ccu.h
new file mode 100644
index 000000000000..79e52aca5912
--- /dev/null
+++ b/include/dt-bindings/reset/sun20i-d1-ccu.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN20I_D1_CCU_H_
+#define _DT_BINDINGS_RST_SUN20I_D1_CCU_H_
+
+#define RST_MBUS 0
+#define RST_BUS_DE 1
+#define RST_BUS_DI 2
+#define RST_BUS_G2D 3
+#define RST_BUS_CE 4
+#define RST_BUS_VE 5
+#define RST_BUS_DMA 6
+#define RST_BUS_MSGBOX0 7
+#define RST_BUS_MSGBOX1 8
+#define RST_BUS_MSGBOX2 9
+#define RST_BUS_SPINLOCK 10
+#define RST_BUS_HSTIMER 11
+#define RST_BUS_DBG 12
+#define RST_BUS_PWM 13
+#define RST_BUS_DRAM 14
+#define RST_BUS_MMC0 15
+#define RST_BUS_MMC1 16
+#define RST_BUS_MMC2 17
+#define RST_BUS_UART0 18
+#define RST_BUS_UART1 19
+#define RST_BUS_UART2 20
+#define RST_BUS_UART3 21
+#define RST_BUS_UART4 22
+#define RST_BUS_UART5 23
+#define RST_BUS_I2C0 24
+#define RST_BUS_I2C1 25
+#define RST_BUS_I2C2 26
+#define RST_BUS_I2C3 27
+#define RST_BUS_SPI0 28
+#define RST_BUS_SPI1 29
+#define RST_BUS_EMAC 30
+#define RST_BUS_IR_TX 31
+#define RST_BUS_GPADC 32
+#define RST_BUS_THS 33
+#define RST_BUS_I2S0 34
+#define RST_BUS_I2S1 35
+#define RST_BUS_I2S2 36
+#define RST_BUS_SPDIF 37
+#define RST_BUS_DMIC 38
+#define RST_BUS_AUDIO 39
+#define RST_USB_PHY0 40
+#define RST_USB_PHY1 41
+#define RST_BUS_OHCI0 42
+#define RST_BUS_OHCI1 43
+#define RST_BUS_EHCI0 44
+#define RST_BUS_EHCI1 45
+#define RST_BUS_OTG 46
+#define RST_BUS_LRADC 47
+#define RST_BUS_DPSS_TOP 48
+#define RST_BUS_HDMI_SUB 49
+#define RST_BUS_HDMI_MAIN 50
+#define RST_BUS_MIPI_DSI 51
+#define RST_BUS_TCON_LCD0 52
+#define RST_BUS_TCON_TV 53
+#define RST_BUS_LVDS0 54
+#define RST_BUS_TVE 55
+#define RST_BUS_TVE_TOP 56
+#define RST_BUS_TVD 57
+#define RST_BUS_TVD_TOP 58
+#define RST_BUS_LEDC 59
+#define RST_BUS_CSI 60
+#define RST_BUS_TPADC 61
+#define RST_DSP 62
+#define RST_BUS_DSP_CFG 63
+#define RST_BUS_DSP_DBG 64
+#define RST_BUS_RISCV_CFG 65
+#define RST_BUS_CAN0 66
+#define RST_BUS_CAN1 67
+
+#endif /* _DT_BINDINGS_RST_SUN20I_D1_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun20i-d1-r-ccu.h b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
new file mode 100644
index 000000000000..e20babc990af
--- /dev/null
+++ b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_
+
+#define RST_BUS_R_TIMER 0
+#define RST_BUS_R_TWD 1
+#define RST_BUS_R_PPU 2
+#define RST_BUS_R_IR_RX 3
+#define RST_BUS_R_RTC 4
+#define RST_BUS_R_CPUCFG 5
+
+#endif /* _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun50i-a100-ccu.h b/include/dt-bindings/reset/sun50i-a100-ccu.h
new file mode 100644
index 000000000000..d13764bc1860
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-a100-ccu.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_SUN50I_A100_H_
+#define _DT_BINDINGS_RESET_SUN50I_A100_H_
+
+#define RST_MBUS 0
+#define RST_BUS_DE 1
+#define RST_BUS_G2D 2
+#define RST_BUS_GPU 3
+#define RST_BUS_CE 4
+#define RST_BUS_VE 5
+#define RST_BUS_DMA 6
+#define RST_BUS_MSGBOX 7
+#define RST_BUS_SPINLOCK 8
+#define RST_BUS_HSTIMER 9
+#define RST_BUS_DBG 10
+#define RST_BUS_PSI 11
+#define RST_BUS_PWM 12
+#define RST_BUS_DRAM 13
+#define RST_BUS_NAND 14
+#define RST_BUS_MMC0 15
+#define RST_BUS_MMC1 16
+#define RST_BUS_MMC2 17
+#define RST_BUS_UART0 18
+#define RST_BUS_UART1 19
+#define RST_BUS_UART2 20
+#define RST_BUS_UART3 21
+#define RST_BUS_UART4 22
+#define RST_BUS_I2C0 23
+#define RST_BUS_I2C1 24
+#define RST_BUS_I2C2 25
+#define RST_BUS_I2C3 26
+#define RST_BUS_SPI0 27
+#define RST_BUS_SPI1 28
+#define RST_BUS_SPI2 29
+#define RST_BUS_EMAC 30
+#define RST_BUS_IR_RX 31
+#define RST_BUS_IR_TX 32
+#define RST_BUS_GPADC 33
+#define RST_BUS_THS 34
+#define RST_BUS_I2S0 35
+#define RST_BUS_I2S1 36
+#define RST_BUS_I2S2 37
+#define RST_BUS_I2S3 38
+#define RST_BUS_SPDIF 39
+#define RST_BUS_DMIC 40
+#define RST_BUS_AUDIO_CODEC 41
+#define RST_USB_PHY0 42
+#define RST_USB_PHY1 43
+#define RST_BUS_OHCI0 44
+#define RST_BUS_OHCI1 45
+#define RST_BUS_EHCI0 46
+#define RST_BUS_EHCI1 47
+#define RST_BUS_OTG 48
+#define RST_BUS_LRADC 49
+#define RST_BUS_DPSS_TOP0 50
+#define RST_BUS_DPSS_TOP1 51
+#define RST_BUS_MIPI_DSI 52
+#define RST_BUS_TCON_LCD 53
+#define RST_BUS_LVDS 54
+#define RST_BUS_LEDC 55
+#define RST_BUS_CSI 56
+#define RST_BUS_CSI_ISP 57
+
+#endif /* _DT_BINDINGS_RESET_SUN50I_A100_H_ */
diff --git a/include/dt-bindings/reset/sun50i-a100-r-ccu.h b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
new file mode 100644
index 000000000000..1e7c4431f03c
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN50I_A100_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN50I_A100_R_CCU_H_
+
+#define RST_R_APB1_TIMER 0
+#define RST_R_APB1_BUS_PWM 1
+#define RST_R_APB1_PPU 2
+#define RST_R_APB2_UART 3
+#define RST_R_APB2_I2C0 4
+#define RST_R_APB2_I2C1 5
+#define RST_R_APB1_BUS_IR 6
+#define RST_R_AHB_BUS_RTC 7
+
+#endif /* _DT_BINDINGS_RST_SUN50I_A100_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun50i-h6-ccu.h b/include/dt-bindings/reset/sun50i-h6-ccu.h
index 81106f455097..d038ddfa4818 100644
--- a/include/dt-bindings/reset/sun50i-h6-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h6-ccu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
index 01c84dba49a4..d541ade884fc 100644
--- a/include/dt-bindings/reset/sun50i-h6-r-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
*/
@@ -13,5 +13,6 @@
#define RST_R_APB2_I2C 4
#define RST_R_APB1_IR 5
#define RST_R_APB1_W1 6
+#define RST_R_APB2_RSB 7
#endif /* _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun50i-h616-ccu.h b/include/dt-bindings/reset/sun50i-h616-ccu.h
new file mode 100644
index 000000000000..1bd8bb0a11be
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-h616-ccu.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (C) 2020 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_RESET_SUN50I_H616_H_
+#define _DT_BINDINGS_RESET_SUN50I_H616_H_
+
+#define RST_MBUS 0
+#define RST_BUS_DE 1
+#define RST_BUS_DEINTERLACE 2
+#define RST_BUS_GPU 3
+#define RST_BUS_CE 4
+#define RST_BUS_VE 5
+#define RST_BUS_DMA 6
+#define RST_BUS_HSTIMER 7
+#define RST_BUS_DBG 8
+#define RST_BUS_PSI 9
+#define RST_BUS_PWM 10
+#define RST_BUS_IOMMU 11
+#define RST_BUS_DRAM 12
+#define RST_BUS_NAND 13
+#define RST_BUS_MMC0 14
+#define RST_BUS_MMC1 15
+#define RST_BUS_MMC2 16
+#define RST_BUS_UART0 17
+#define RST_BUS_UART1 18
+#define RST_BUS_UART2 19
+#define RST_BUS_UART3 20
+#define RST_BUS_UART4 21
+#define RST_BUS_UART5 22
+#define RST_BUS_I2C0 23
+#define RST_BUS_I2C1 24
+#define RST_BUS_I2C2 25
+#define RST_BUS_I2C3 26
+#define RST_BUS_I2C4 27
+#define RST_BUS_SPI0 28
+#define RST_BUS_SPI1 29
+#define RST_BUS_EMAC0 30
+#define RST_BUS_EMAC1 31
+#define RST_BUS_TS 32
+#define RST_BUS_THS 33
+#define RST_BUS_SPDIF 34
+#define RST_BUS_DMIC 35
+#define RST_BUS_AUDIO_CODEC 36
+#define RST_BUS_AUDIO_HUB 37
+#define RST_USB_PHY0 38
+#define RST_USB_PHY1 39
+#define RST_USB_PHY2 40
+#define RST_USB_PHY3 41
+#define RST_BUS_OHCI0 42
+#define RST_BUS_OHCI1 43
+#define RST_BUS_OHCI2 44
+#define RST_BUS_OHCI3 45
+#define RST_BUS_EHCI0 46
+#define RST_BUS_EHCI1 47
+#define RST_BUS_EHCI2 48
+#define RST_BUS_EHCI3 49
+#define RST_BUS_OTG 50
+#define RST_BUS_HDMI 51
+#define RST_BUS_HDMI_SUB 52
+#define RST_BUS_TCON_TOP 53
+#define RST_BUS_TCON_TV0 54
+#define RST_BUS_TCON_TV1 55
+#define RST_BUS_TVE_TOP 56
+#define RST_BUS_TVE0 57
+#define RST_BUS_HDCP 58
+#define RST_BUS_KEYADC 59
+
+#endif /* _DT_BINDINGS_RESET_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/reset/sunplus,sp7021-reset.h b/include/dt-bindings/reset/sunplus,sp7021-reset.h
new file mode 100644
index 000000000000..ab486707387f
--- /dev/null
+++ b/include/dt-bindings/reset/sunplus,sp7021-reset.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+#ifndef _DT_BINDINGS_RST_SUNPLUS_SP7021_H
+#define _DT_BINDINGS_RST_SUNPLUS_SP7021_H
+
+#define RST_SYSTEM 0
+#define RST_RTC 1
+#define RST_IOCTL 2
+#define RST_IOP 3
+#define RST_OTPRX 4
+#define RST_NOC 5
+#define RST_BR 6
+#define RST_RBUS_L00 7
+#define RST_SPIFL 8
+#define RST_SDCTRL0 9
+#define RST_PERI0 10
+#define RST_A926 11
+#define RST_UMCTL2 12
+#define RST_PERI1 13
+#define RST_DDR_PHY0 14
+#define RST_ACHIP 15
+#define RST_STC0 16
+#define RST_STC_AV0 17
+#define RST_STC_AV1 18
+#define RST_STC_AV2 19
+#define RST_UA0 20
+#define RST_UA1 21
+#define RST_UA2 22
+#define RST_UA3 23
+#define RST_UA4 24
+#define RST_HWUA 25
+#define RST_DDC0 26
+#define RST_UADMA 27
+#define RST_CBDMA0 28
+#define RST_CBDMA1 29
+#define RST_SPI_COMBO_0 30
+#define RST_SPI_COMBO_1 31
+#define RST_SPI_COMBO_2 32
+#define RST_SPI_COMBO_3 33
+#define RST_AUD 34
+#define RST_USBC0 35
+#define RST_USBC1 36
+#define RST_UPHY0 37
+#define RST_UPHY1 38
+#define RST_I2CM0 39
+#define RST_I2CM1 40
+#define RST_I2CM2 41
+#define RST_I2CM3 42
+#define RST_PMC 43
+#define RST_CARD_CTL0 44
+#define RST_CARD_CTL1 45
+#define RST_CARD_CTL4 46
+#define RST_BCH 47
+#define RST_DDFCH 48
+#define RST_CSIIW0 49
+#define RST_CSIIW1 50
+#define RST_MIPICSI0 51
+#define RST_MIPICSI1 52
+#define RST_HDMI_TX 53
+#define RST_VPOST 54
+#define RST_TGEN 55
+#define RST_DMIX 56
+#define RST_TCON 57
+#define RST_INTERRUPT 58
+#define RST_RGST 59
+#define RST_GPIO 60
+#define RST_RBUS_TOP 61
+#define RST_MAILBOX 62
+#define RST_SPIND 63
+#define RST_I2C2CBUS 64
+#define RST_SEC 65
+#define RST_DVE 66
+#define RST_GPOST0 67
+#define RST_OSD0 68
+#define RST_DISP_PWM 69
+#define RST_UADBG 70
+#define RST_DUMMY_MASTER 71
+#define RST_FIO_CTL 72
+#define RST_FPGA 73
+#define RST_L2SW 74
+#define RST_ICM 75
+#define RST_AXI_GLOBAL 76
+
+#endif
diff --git a/include/dt-bindings/reset/tegra234-reset.h b/include/dt-bindings/reset/tegra234-reset.h
new file mode 100644
index 000000000000..85cc423a7bdf
--- /dev/null
+++ b/include/dt-bindings/reset/tegra234-reset.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_RESET_TEGRA234_RESET_H
+#define DT_BINDINGS_RESET_TEGRA234_RESET_H
+
+/**
+ * @file
+ * @defgroup bpmp_reset_ids Reset ID's
+ * @brief Identifiers for Resets controllable by firmware
+ * @{
+ */
+#define TEGRA234_RESET_ACTMON 1U
+#define TEGRA234_RESET_ADSP_ALL 2U
+#define TEGRA234_RESET_DSI_CORE 3U
+#define TEGRA234_RESET_CAN1 4U
+#define TEGRA234_RESET_CAN2 5U
+#define TEGRA234_RESET_DLA0 6U
+#define TEGRA234_RESET_DLA1 7U
+#define TEGRA234_RESET_DPAUX 8U
+#define TEGRA234_RESET_OFA 9U
+#define TEGRA234_RESET_NVJPG1 10U
+#define TEGRA234_RESET_PEX1_CORE_6 11U
+#define TEGRA234_RESET_PEX1_CORE_6_APB 12U
+#define TEGRA234_RESET_PEX1_COMMON_APB 13U
+#define TEGRA234_RESET_PEX2_CORE_7 14U
+#define TEGRA234_RESET_PEX2_CORE_7_APB 15U
+#define TEGRA234_RESET_NVDISPLAY 16U
+#define TEGRA234_RESET_EQOS 17U
+#define TEGRA234_RESET_GPCDMA 18U
+#define TEGRA234_RESET_GPU 19U
+#define TEGRA234_RESET_HDA 20U
+#define TEGRA234_RESET_HDACODEC 21U
+#define TEGRA234_RESET_EQOS_MACSEC 22U
+#define TEGRA234_RESET_EQOS_MACSEC_SECURE 23U
+#define TEGRA234_RESET_I2C1 24U
+#define TEGRA234_RESET_PEX2_CORE_8 25U
+#define TEGRA234_RESET_PEX2_CORE_8_APB 26U
+#define TEGRA234_RESET_PEX2_CORE_9 27U
+#define TEGRA234_RESET_PEX2_CORE_9_APB 28U
+#define TEGRA234_RESET_I2C2 29U
+#define TEGRA234_RESET_I2C3 30U
+#define TEGRA234_RESET_I2C4 31U
+#define TEGRA234_RESET_I2C6 32U
+#define TEGRA234_RESET_I2C7 33U
+#define TEGRA234_RESET_I2C8 34U
+#define TEGRA234_RESET_I2C9 35U
+#define TEGRA234_RESET_ISP 36U
+#define TEGRA234_RESET_MIPI_CAL 37U
+#define TEGRA234_RESET_MPHY_CLK_CTL 38U
+#define TEGRA234_RESET_MPHY_L0_RX 39U
+#define TEGRA234_RESET_MPHY_L0_TX 40U
+#define TEGRA234_RESET_MPHY_L1_RX 41U
+#define TEGRA234_RESET_MPHY_L1_TX 42U
+#define TEGRA234_RESET_NVCSI 43U
+#define TEGRA234_RESET_NVDEC 44U
+#define TEGRA234_RESET_MGBE0_PCS 45U
+#define TEGRA234_RESET_MGBE0_MAC 46U
+#define TEGRA234_RESET_MGBE0_MACSEC 47U
+#define TEGRA234_RESET_MGBE0_MACSEC_SECURE 48U
+#define TEGRA234_RESET_MGBE1_PCS 49U
+#define TEGRA234_RESET_MGBE1_MAC 50U
+#define TEGRA234_RESET_MGBE1_MACSEC 51U
+#define TEGRA234_RESET_MGBE1_MACSEC_SECURE 52U
+#define TEGRA234_RESET_MGBE2_PCS 53U
+#define TEGRA234_RESET_MGBE2_MAC 54U
+#define TEGRA234_RESET_MGBE2_MACSEC 55U
+#define TEGRA234_RESET_PEX2_CORE_10 56U
+#define TEGRA234_RESET_PEX2_CORE_10_APB 57U
+#define TEGRA234_RESET_PEX2_COMMON_APB 58U
+#define TEGRA234_RESET_NVENC 59U
+#define TEGRA234_RESET_MGBE2_MACSEC_SECURE 60U
+#define TEGRA234_RESET_NVJPG 61U
+#define TEGRA234_RESET_LA 64U
+#define TEGRA234_RESET_HWPM 65U
+#define TEGRA234_RESET_PVA0_ALL 66U
+#define TEGRA234_RESET_CEC 67U
+#define TEGRA234_RESET_PWM1 68U
+#define TEGRA234_RESET_PWM2 69U
+#define TEGRA234_RESET_PWM3 70U
+#define TEGRA234_RESET_PWM4 71U
+#define TEGRA234_RESET_PWM5 72U
+#define TEGRA234_RESET_PWM6 73U
+#define TEGRA234_RESET_PWM7 74U
+#define TEGRA234_RESET_PWM8 75U
+#define TEGRA234_RESET_QSPI0 76U
+#define TEGRA234_RESET_QSPI1 77U
+#define TEGRA234_RESET_I2S7 78U
+#define TEGRA234_RESET_I2S8 79U
+#define TEGRA234_RESET_SCE_ALL 80U
+#define TEGRA234_RESET_RCE_ALL 81U
+#define TEGRA234_RESET_SDMMC1 82U
+#define TEGRA234_RESET_RSVD_83 83U
+#define TEGRA234_RESET_RSVD_84 84U
+#define TEGRA234_RESET_SDMMC4 85U
+#define TEGRA234_RESET_MGBE3_PCS 87U
+#define TEGRA234_RESET_MGBE3_MAC 88U
+#define TEGRA234_RESET_MGBE3_MACSEC 89U
+#define TEGRA234_RESET_MGBE3_MACSEC_SECURE 90U
+#define TEGRA234_RESET_SPI1 91U
+#define TEGRA234_RESET_SPI2 92U
+#define TEGRA234_RESET_SPI3 93U
+#define TEGRA234_RESET_SPI4 94U
+#define TEGRA234_RESET_TACH0 95U
+#define TEGRA234_RESET_TACH1 96U
+#define TEGRA234_RESET_SPI5 97U
+#define TEGRA234_RESET_TSEC 98U
+#define TEGRA234_RESET_UARTI 99U
+#define TEGRA234_RESET_UARTA 100U
+#define TEGRA234_RESET_UARTB 101U
+#define TEGRA234_RESET_UARTC 102U
+#define TEGRA234_RESET_UARTD 103U
+#define TEGRA234_RESET_UARTE 104U
+#define TEGRA234_RESET_UARTF 105U
+#define TEGRA234_RESET_UARTJ 106U
+#define TEGRA234_RESET_UARTH 107U
+#define TEGRA234_RESET_UFSHC 108U
+#define TEGRA234_RESET_UFSHC_AXI_M 109U
+#define TEGRA234_RESET_UFSHC_LP_SEQ 110U
+#define TEGRA234_RESET_RSVD_111 111U
+#define TEGRA234_RESET_VI 112U
+#define TEGRA234_RESET_VIC 113U
+#define TEGRA234_RESET_XUSB_PADCTL 114U
+#define TEGRA234_RESET_VI2 115U
+#define TEGRA234_RESET_PEX0_CORE_0 116U
+#define TEGRA234_RESET_PEX0_CORE_1 117U
+#define TEGRA234_RESET_PEX0_CORE_2 118U
+#define TEGRA234_RESET_PEX0_CORE_3 119U
+#define TEGRA234_RESET_PEX0_CORE_4 120U
+#define TEGRA234_RESET_PEX0_CORE_0_APB 121U
+#define TEGRA234_RESET_PEX0_CORE_1_APB 122U
+#define TEGRA234_RESET_PEX0_CORE_2_APB 123U
+#define TEGRA234_RESET_PEX0_CORE_3_APB 124U
+#define TEGRA234_RESET_PEX0_CORE_4_APB 125U
+#define TEGRA234_RESET_PEX0_COMMON_APB 126U
+#define TEGRA234_RESET_RSVD_127 127U
+#define TEGRA234_RESET_NVHS_UPHY_PLL1 128U
+#define TEGRA234_RESET_PEX1_CORE_5 129U
+#define TEGRA234_RESET_PEX1_CORE_5_APB 130U
+#define TEGRA234_RESET_GBE_UPHY 131U
+#define TEGRA234_RESET_GBE_UPHY_PM 132U
+#define TEGRA234_RESET_NVHS_UPHY 133U
+#define TEGRA234_RESET_NVHS_UPHY_PLL0 134U
+#define TEGRA234_RESET_NVHS_UPHY_L0 135U
+#define TEGRA234_RESET_NVHS_UPHY_L1 136U
+#define TEGRA234_RESET_NVHS_UPHY_L2 137U
+#define TEGRA234_RESET_NVHS_UPHY_L3 138U
+#define TEGRA234_RESET_NVHS_UPHY_L4 139U
+#define TEGRA234_RESET_NVHS_UPHY_L5 140U
+#define TEGRA234_RESET_NVHS_UPHY_L6 141U
+#define TEGRA234_RESET_NVHS_UPHY_L7 142U
+#define TEGRA234_RESET_NVHS_UPHY_PM 143U
+#define TEGRA234_RESET_DMIC5 144U
+#define TEGRA234_RESET_APE 145U
+#define TEGRA234_RESET_PEX_USB_UPHY 146U
+#define TEGRA234_RESET_PEX_USB_UPHY_L0 147U
+#define TEGRA234_RESET_PEX_USB_UPHY_L1 148U
+#define TEGRA234_RESET_PEX_USB_UPHY_L2 149U
+#define TEGRA234_RESET_PEX_USB_UPHY_L3 150U
+#define TEGRA234_RESET_PEX_USB_UPHY_L4 151U
+#define TEGRA234_RESET_PEX_USB_UPHY_L5 152U
+#define TEGRA234_RESET_PEX_USB_UPHY_L6 153U
+#define TEGRA234_RESET_PEX_USB_UPHY_L7 154U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL0 159U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL1 160U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL2 161U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL3 162U
+#define TEGRA234_RESET_GBE_UPHY_L0 163U
+#define TEGRA234_RESET_GBE_UPHY_L1 164U
+#define TEGRA234_RESET_GBE_UPHY_L2 165U
+#define TEGRA234_RESET_GBE_UPHY_L3 166U
+#define TEGRA234_RESET_GBE_UPHY_L4 167U
+#define TEGRA234_RESET_GBE_UPHY_L5 168U
+#define TEGRA234_RESET_GBE_UPHY_L6 169U
+#define TEGRA234_RESET_GBE_UPHY_L7 170U
+#define TEGRA234_RESET_GBE_UPHY_PLL0 171U
+#define TEGRA234_RESET_GBE_UPHY_PLL1 172U
+#define TEGRA234_RESET_GBE_UPHY_PLL2 173U
+
+/** @} */
+
+#endif
diff --git a/include/dt-bindings/reset/toshiba,tmpv770x.h b/include/dt-bindings/reset/toshiba,tmpv770x.h
new file mode 100644
index 000000000000..c1007acb1941
--- /dev/null
+++ b/include/dt-bindings/reset/toshiba,tmpv770x.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_
+#define _DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_
+
+/* Reset */
+#define TMPV770X_RESET_PIETHER_2P5M 0
+#define TMPV770X_RESET_PIETHER_25M 1
+#define TMPV770X_RESET_PIETHER_50M 2
+#define TMPV770X_RESET_PIETHER_125M 3
+#define TMPV770X_RESET_HOX 4
+#define TMPV770X_RESET_PCIE_MSTR 5
+#define TMPV770X_RESET_PCIE_AUX 6
+#define TMPV770X_RESET_PIINTC 7
+#define TMPV770X_RESET_PIETHER_BUS 8
+#define TMPV770X_RESET_PISPI0 9
+#define TMPV770X_RESET_PISPI1 10
+#define TMPV770X_RESET_PISPI2 11
+#define TMPV770X_RESET_PISPI3 12
+#define TMPV770X_RESET_PISPI4 13
+#define TMPV770X_RESET_PISPI5 14
+#define TMPV770X_RESET_PISPI6 15
+#define TMPV770X_RESET_PIUART0 16
+#define TMPV770X_RESET_PIUART1 17
+#define TMPV770X_RESET_PIUART2 18
+#define TMPV770X_RESET_PIUART3 19
+#define TMPV770X_RESET_PII2C0 20
+#define TMPV770X_RESET_PII2C1 21
+#define TMPV770X_RESET_PII2C2 22
+#define TMPV770X_RESET_PII2C3 23
+#define TMPV770X_RESET_PII2C4 24
+#define TMPV770X_RESET_PII2C5 25
+#define TMPV770X_RESET_PII2C6 26
+#define TMPV770X_RESET_PII2C7 27
+#define TMPV770X_RESET_PII2C8 28
+#define TMPV770X_RESET_PIPCMIF 29
+#define TMPV770X_RESET_PICKMON 30
+#define TMPV770X_RESET_SBUSCLK 31
+#define TMPV770X_NR_RESET 32
+
+#endif /*_DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/reset/xlnx-versal-resets.h b/include/dt-bindings/reset/xlnx-versal-resets.h
new file mode 100644
index 000000000000..895424e9b0e5
--- /dev/null
+++ b/include/dt-bindings/reset/xlnx-versal-resets.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_VERSAL_RESETS_H
+#define _DT_BINDINGS_VERSAL_RESETS_H
+
+#define VERSAL_RST_PMC_POR (0xc30c001U)
+#define VERSAL_RST_PMC (0xc410002U)
+#define VERSAL_RST_PS_POR (0xc30c003U)
+#define VERSAL_RST_PL_POR (0xc30c004U)
+#define VERSAL_RST_NOC_POR (0xc30c005U)
+#define VERSAL_RST_FPD_POR (0xc30c006U)
+#define VERSAL_RST_ACPU_0_POR (0xc30c007U)
+#define VERSAL_RST_ACPU_1_POR (0xc30c008U)
+#define VERSAL_RST_OCM2_POR (0xc30c009U)
+#define VERSAL_RST_PS_SRST (0xc41000aU)
+#define VERSAL_RST_PL_SRST (0xc41000bU)
+#define VERSAL_RST_NOC (0xc41000cU)
+#define VERSAL_RST_NPI (0xc41000dU)
+#define VERSAL_RST_SYS_RST_1 (0xc41000eU)
+#define VERSAL_RST_SYS_RST_2 (0xc41000fU)
+#define VERSAL_RST_SYS_RST_3 (0xc410010U)
+#define VERSAL_RST_FPD (0xc410011U)
+#define VERSAL_RST_PL0 (0xc410012U)
+#define VERSAL_RST_PL1 (0xc410013U)
+#define VERSAL_RST_PL2 (0xc410014U)
+#define VERSAL_RST_PL3 (0xc410015U)
+#define VERSAL_RST_APU (0xc410016U)
+#define VERSAL_RST_ACPU_0 (0xc410017U)
+#define VERSAL_RST_ACPU_1 (0xc410018U)
+#define VERSAL_RST_ACPU_L2 (0xc410019U)
+#define VERSAL_RST_ACPU_GIC (0xc41001aU)
+#define VERSAL_RST_RPU_ISLAND (0xc41001bU)
+#define VERSAL_RST_RPU_AMBA (0xc41001cU)
+#define VERSAL_RST_R5_0 (0xc41001dU)
+#define VERSAL_RST_R5_1 (0xc41001eU)
+#define VERSAL_RST_SYSMON_PMC_SEQ_RST (0xc41001fU)
+#define VERSAL_RST_SYSMON_PMC_CFG_RST (0xc410020U)
+#define VERSAL_RST_SYSMON_FPD_CFG_RST (0xc410021U)
+#define VERSAL_RST_SYSMON_FPD_SEQ_RST (0xc410022U)
+#define VERSAL_RST_SYSMON_LPD (0xc410023U)
+#define VERSAL_RST_PDMA_RST1 (0xc410024U)
+#define VERSAL_RST_PDMA_RST0 (0xc410025U)
+#define VERSAL_RST_ADMA (0xc410026U)
+#define VERSAL_RST_TIMESTAMP (0xc410027U)
+#define VERSAL_RST_OCM (0xc410028U)
+#define VERSAL_RST_OCM2_RST (0xc410029U)
+#define VERSAL_RST_IPI (0xc41002aU)
+#define VERSAL_RST_SBI (0xc41002bU)
+#define VERSAL_RST_LPD (0xc41002cU)
+#define VERSAL_RST_QSPI (0xc10402dU)
+#define VERSAL_RST_OSPI (0xc10402eU)
+#define VERSAL_RST_SDIO_0 (0xc10402fU)
+#define VERSAL_RST_SDIO_1 (0xc104030U)
+#define VERSAL_RST_I2C_PMC (0xc104031U)
+#define VERSAL_RST_GPIO_PMC (0xc104032U)
+#define VERSAL_RST_GEM_0 (0xc104033U)
+#define VERSAL_RST_GEM_1 (0xc104034U)
+#define VERSAL_RST_SPARE (0xc104035U)
+#define VERSAL_RST_USB_0 (0xc104036U)
+#define VERSAL_RST_UART_0 (0xc104037U)
+#define VERSAL_RST_UART_1 (0xc104038U)
+#define VERSAL_RST_SPI_0 (0xc104039U)
+#define VERSAL_RST_SPI_1 (0xc10403aU)
+#define VERSAL_RST_CAN_FD_0 (0xc10403bU)
+#define VERSAL_RST_CAN_FD_1 (0xc10403cU)
+#define VERSAL_RST_I2C_0 (0xc10403dU)
+#define VERSAL_RST_I2C_1 (0xc10403eU)
+#define VERSAL_RST_GPIO_LPD (0xc10403fU)
+#define VERSAL_RST_TTC_0 (0xc104040U)
+#define VERSAL_RST_TTC_1 (0xc104041U)
+#define VERSAL_RST_TTC_2 (0xc104042U)
+#define VERSAL_RST_TTC_3 (0xc104043U)
+#define VERSAL_RST_SWDT_FPD (0xc104044U)
+#define VERSAL_RST_SWDT_LPD (0xc104045U)
+#define VERSAL_RST_USB (0xc104046U)
+#define VERSAL_RST_DPC (0xc208047U)
+#define VERSAL_RST_PMCDBG (0xc208048U)
+#define VERSAL_RST_DBG_TRACE (0xc208049U)
+#define VERSAL_RST_DBG_FPD (0xc20804aU)
+#define VERSAL_RST_DBG_TSTMP (0xc20804bU)
+#define VERSAL_RST_RPU0_DBG (0xc20804cU)
+#define VERSAL_RST_RPU1_DBG (0xc20804dU)
+#define VERSAL_RST_HSDP (0xc20804eU)
+#define VERSAL_RST_DBG_LPD (0xc20804fU)
+#define VERSAL_RST_CPM_POR (0xc30c050U)
+#define VERSAL_RST_CPM (0xc410051U)
+#define VERSAL_RST_CPMDBG (0xc208052U)
+#define VERSAL_RST_PCIE_CFG (0xc410053U)
+#define VERSAL_RST_PCIE_CORE0 (0xc410054U)
+#define VERSAL_RST_PCIE_CORE1 (0xc410055U)
+#define VERSAL_RST_PCIE_DMA (0xc410056U)
+#define VERSAL_RST_CMN (0xc410057U)
+#define VERSAL_RST_L2_0 (0xc410058U)
+#define VERSAL_RST_L2_1 (0xc410059U)
+#define VERSAL_RST_ADDR_REMAP (0xc41005aU)
+#define VERSAL_RST_CPI0 (0xc41005bU)
+#define VERSAL_RST_CPI1 (0xc41005cU)
+#define VERSAL_RST_XRAM (0xc30c05dU)
+#define VERSAL_RST_AIE_ARRAY (0xc10405eU)
+#define VERSAL_RST_AIE_SHIM (0xc10405fU)
+
+#endif
diff --git a/include/dt-bindings/soc/bcm-pmb.h b/include/dt-bindings/soc/bcm-pmb.h
new file mode 100644
index 000000000000..385884468007
--- /dev/null
+++ b/include/dt-bindings/soc/bcm-pmb.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */
+
+#ifndef __DT_BINDINGS_SOC_BCM_PMB_H
+#define __DT_BINDINGS_SOC_BCM_PMB_H
+
+#define BCM_PMB_PCIE0 0x01
+#define BCM_PMB_PCIE1 0x02
+#define BCM_PMB_PCIE2 0x03
+#define BCM_PMB_HOST_USB 0x04
+#define BCM_PMB_SATA 0x05
+
+#endif
diff --git a/include/dt-bindings/soc/bcm6318-pm.h b/include/dt-bindings/soc/bcm6318-pm.h
new file mode 100644
index 000000000000..05931dce8333
--- /dev/null
+++ b/include/dt-bindings/soc/bcm6318-pm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_BMIPS_BCM6318_PM_H
+#define __DT_BINDINGS_BMIPS_BCM6318_PM_H
+
+#define BCM6318_POWER_DOMAIN_PCIE 0
+#define BCM6318_POWER_DOMAIN_USB 1
+#define BCM6318_POWER_DOMAIN_EPHY0 2
+#define BCM6318_POWER_DOMAIN_EPHY1 3
+#define BCM6318_POWER_DOMAIN_EPHY2 4
+#define BCM6318_POWER_DOMAIN_EPHY3 5
+#define BCM6318_POWER_DOMAIN_LDO2P5 6
+#define BCM6318_POWER_DOMAIN_LDO2P9 7
+#define BCM6318_POWER_DOMAIN_SW1P0 8
+#define BCM6318_POWER_DOMAIN_PAD 9
+
+#endif /* __DT_BINDINGS_BMIPS_BCM6318_PM_H */
diff --git a/include/dt-bindings/soc/bcm63268-pm.h b/include/dt-bindings/soc/bcm63268-pm.h
new file mode 100644
index 000000000000..84ded53a732f
--- /dev/null
+++ b/include/dt-bindings/soc/bcm63268-pm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_BMIPS_BCM63268_PM_H
+#define __DT_BINDINGS_BMIPS_BCM63268_PM_H
+
+#define BCM63268_POWER_DOMAIN_SAR 0
+#define BCM63268_POWER_DOMAIN_IPSEC 1
+#define BCM63268_POWER_DOMAIN_MIPS 2
+#define BCM63268_POWER_DOMAIN_DECT 3
+#define BCM63268_POWER_DOMAIN_USBH 4
+#define BCM63268_POWER_DOMAIN_USBD 5
+#define BCM63268_POWER_DOMAIN_ROBOSW 6
+#define BCM63268_POWER_DOMAIN_PCM 7
+#define BCM63268_POWER_DOMAIN_PERIPH 8
+#define BCM63268_POWER_DOMAIN_VDSL_PHY 9
+#define BCM63268_POWER_DOMAIN_VDSL_MIPS 10
+#define BCM63268_POWER_DOMAIN_FAP 11
+#define BCM63268_POWER_DOMAIN_PCIE 12
+#define BCM63268_POWER_DOMAIN_WLAN_PADS 13
+
+#endif /* __DT_BINDINGS_BMIPS_BCM63268_PM_H */
diff --git a/include/dt-bindings/soc/bcm6328-pm.h b/include/dt-bindings/soc/bcm6328-pm.h
new file mode 100644
index 000000000000..557e1a69b7f7
--- /dev/null
+++ b/include/dt-bindings/soc/bcm6328-pm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_BMIPS_BCM6328_PM_H
+#define __DT_BINDINGS_BMIPS_BCM6328_PM_H
+
+#define BCM6328_POWER_DOMAIN_ADSL2_MIPS 0
+#define BCM6328_POWER_DOMAIN_ADSL2_PHY 1
+#define BCM6328_POWER_DOMAIN_ADSL2_AFE 2
+#define BCM6328_POWER_DOMAIN_SAR 3
+#define BCM6328_POWER_DOMAIN_PCM 4
+#define BCM6328_POWER_DOMAIN_USBD 5
+#define BCM6328_POWER_DOMAIN_USBH 6
+#define BCM6328_POWER_DOMAIN_PCIE 7
+#define BCM6328_POWER_DOMAIN_ROBOSW 8
+#define BCM6328_POWER_DOMAIN_EPHY 9
+
+#endif /* __DT_BINDINGS_BMIPS_BCM6328_PM_H */
diff --git a/include/dt-bindings/soc/bcm6362-pm.h b/include/dt-bindings/soc/bcm6362-pm.h
new file mode 100644
index 000000000000..d087ba63c7a1
--- /dev/null
+++ b/include/dt-bindings/soc/bcm6362-pm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_BMIPS_BCM6362_PM_H
+#define __DT_BINDINGS_BMIPS_BCM6362_PM_H
+
+#define BCM6362_POWER_DOMAIN_SAR 0
+#define BCM6362_POWER_DOMAIN_IPSEC 1
+#define BCM6362_POWER_DOMAIN_MIPS 2
+#define BCM6362_POWER_DOMAIN_DECT 3
+#define BCM6362_POWER_DOMAIN_USBH 4
+#define BCM6362_POWER_DOMAIN_USBD 5
+#define BCM6362_POWER_DOMAIN_ROBOSW 6
+#define BCM6362_POWER_DOMAIN_PCM 7
+#define BCM6362_POWER_DOMAIN_PERIPH 8
+#define BCM6362_POWER_DOMAIN_ADSL_PHY 9
+#define BCM6362_POWER_DOMAIN_GMII_PADS 10
+#define BCM6362_POWER_DOMAIN_FAP 11
+#define BCM6362_POWER_DOMAIN_PCIE 12
+#define BCM6362_POWER_DOMAIN_WLAN_PADS 13
+
+#endif /* __DT_BINDINGS_BMIPS_BCM6362_PM_H */
diff --git a/include/dt-bindings/soc/cpm1-fsl,tsa.h b/include/dt-bindings/soc/cpm1-fsl,tsa.h
new file mode 100644
index 000000000000..2cc44e867dbe
--- /dev/null
+++ b/include/dt-bindings/soc/cpm1-fsl,tsa.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_SOC_FSL_TSA_H
+#define __DT_BINDINGS_SOC_FSL_TSA_H
+
+#define FSL_CPM_TSA_NU 0 /* Pseuso Cell Id for not used item */
+#define FSL_CPM_TSA_SCC2 1
+#define FSL_CPM_TSA_SCC3 2
+#define FSL_CPM_TSA_SCC4 3
+#define FSL_CPM_TSA_SMC1 4
+#define FSL_CPM_TSA_SMC2 5
+
+#endif
diff --git a/include/dt-bindings/soc/qcom,gpr.h b/include/dt-bindings/soc/qcom,gpr.h
new file mode 100644
index 000000000000..3107da59319c
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,gpr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_QCOM_GPR_H
+#define __DT_BINDINGS_QCOM_GPR_H
+
+/* DOMAINS */
+
+#define GPR_DOMAIN_ID_MODEM 1
+#define GPR_DOMAIN_ID_ADSP 2
+#define GPR_DOMAIN_ID_APPS 3
+
+/* Static Services */
+
+#define GPR_APM_MODULE_IID 1
+#define GPR_PRM_MODULE_IID 2
+#define GPR_AMDB_MODULE_IID 3
+#define GPR_VCPM_MODULE_IID 4
+
+#endif /* __DT_BINDINGS_QCOM_GPR_H */
diff --git a/include/dt-bindings/soc/rockchip,vop2.h b/include/dt-bindings/soc/rockchip,vop2.h
new file mode 100644
index 000000000000..668f199df9f0
--- /dev/null
+++ b/include/dt-bindings/soc/rockchip,vop2.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_ROCKCHIP_VOP2_H
+#define __DT_BINDINGS_ROCKCHIP_VOP2_H
+
+#define ROCKCHIP_VOP2_EP_RGB0 1
+#define ROCKCHIP_VOP2_EP_HDMI0 2
+#define ROCKCHIP_VOP2_EP_EDP0 3
+#define ROCKCHIP_VOP2_EP_MIPI0 4
+#define ROCKCHIP_VOP2_EP_LVDS0 5
+#define ROCKCHIP_VOP2_EP_MIPI1 6
+#define ROCKCHIP_VOP2_EP_LVDS1 7
+#define ROCKCHIP_VOP2_EP_HDMI1 8
+#define ROCKCHIP_VOP2_EP_EDP1 9
+#define ROCKCHIP_VOP2_EP_DP0 10
+#define ROCKCHIP_VOP2_EP_DP1 11
+
+#endif /* __DT_BINDINGS_ROCKCHIP_VOP2_H */
diff --git a/include/dt-bindings/soc/samsung,boot-mode.h b/include/dt-bindings/soc/samsung,boot-mode.h
new file mode 100644
index 000000000000..47ef1cdd3916
--- /dev/null
+++ b/include/dt-bindings/soc/samsung,boot-mode.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
+ * Author: Chanho Park <chanho61.park@samsung.com>
+ *
+ * Device Tree bindings for Samsung Boot Mode.
+ */
+
+#ifndef __DT_BINDINGS_SAMSUNG_BOOT_MODE_H
+#define __DT_BINDINGS_SAMSUNG_BOOT_MODE_H
+
+/* Boot mode definitions for Exynos Auto v9 SoC */
+
+#define EXYNOSAUTOV9_BOOT_FASTBOOT 0xfa
+#define EXYNOSAUTOV9_BOOT_BOOTLOADER 0xfc
+#define EXYNOSAUTOV9_BOOT_RECOVERY 0xff
+
+#endif /* __DT_BINDINGS_SAMSUNG_BOOT_MODE_H */
diff --git a/include/dt-bindings/soc/samsung,exynos-usi.h b/include/dt-bindings/soc/samsung,exynos-usi.h
new file mode 100644
index 000000000000..a01af169d249
--- /dev/null
+++ b/include/dt-bindings/soc/samsung,exynos-usi.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Device Tree bindings for Samsung Exynos USI (Universal Serial Interface).
+ */
+
+#ifndef __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
+#define __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
+
+#define USI_V2_NONE 0
+#define USI_V2_UART 1
+#define USI_V2_SPI 2
+#define USI_V2_I2C 3
+
+#endif /* __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H */
diff --git a/include/dt-bindings/soc/zte,pm_domains.h b/include/dt-bindings/soc/zte,pm_domains.h
deleted file mode 100644
index df044705a5ec..000000000000
--- a/include/dt-bindings/soc/zte,pm_domains.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Linaro Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#ifndef _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H
-#define _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H
-
-#define DM_ZX296718_SAPPU 0
-#define DM_ZX296718_VDE 1 /* g1v6 */
-#define DM_ZX296718_VCE 2 /* h1v6 */
-#define DM_ZX296718_HDE 3 /* g2v2 */
-#define DM_ZX296718_VIU 4
-#define DM_ZX296718_USB20 5
-#define DM_ZX296718_USB21 6
-#define DM_ZX296718_USB30 7
-#define DM_ZX296718_HSIC 8
-#define DM_ZX296718_GMAC 9
-#define DM_ZX296718_TS 10
-#define DM_ZX296718_VOU 11
-
-#endif /* _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H */
diff --git a/include/dt-bindings/sound/adi,adau1977.h b/include/dt-bindings/sound/adi,adau1977.h
new file mode 100644
index 000000000000..8eebec6570f2
--- /dev/null
+++ b/include/dt-bindings/sound/adi,adau1977.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __DT_BINDINGS_ADI_ADAU1977_H__
+#define __DT_BINDINGS_ADI_ADAU1977_H__
+
+#define ADAU1977_MICBIAS_5V0 0x0
+#define ADAU1977_MICBIAS_5V5 0x1
+#define ADAU1977_MICBIAS_6V0 0x2
+#define ADAU1977_MICBIAS_6V5 0x3
+#define ADAU1977_MICBIAS_7V0 0x4
+#define ADAU1977_MICBIAS_7V5 0x5
+#define ADAU1977_MICBIAS_8V0 0x6
+#define ADAU1977_MICBIAS_8V5 0x7
+#define ADAU1977_MICBIAS_9V0 0x8
+
+#endif /* __DT_BINDINGS_ADI_ADAU1977_H__ */
diff --git a/include/dt-bindings/sound/apq8016-lpass.h b/include/dt-bindings/sound/apq8016-lpass.h
index 3c3e16c0aadb..dc605c4bc224 100644
--- a/include/dt-bindings/sound/apq8016-lpass.h
+++ b/include/dt-bindings/sound/apq8016-lpass.h
@@ -2,9 +2,8 @@
#ifndef __DT_APQ8016_LPASS_H
#define __DT_APQ8016_LPASS_H
-#define MI2S_PRIMARY 0
-#define MI2S_SECONDARY 1
-#define MI2S_TERTIARY 2
-#define MI2S_QUATERNARY 3
+#include <dt-bindings/sound/qcom,lpass.h>
+
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
#endif /* __DT_APQ8016_LPASS_H */
diff --git a/include/dt-bindings/sound/cs35l45.h b/include/dt-bindings/sound/cs35l45.h
new file mode 100644
index 000000000000..25386af18445
--- /dev/null
+++ b/include/dt-bindings/sound/cs35l45.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * cs35l45.h -- CS35L45 ALSA SoC audio driver DT bindings header
+ *
+ * Copyright 2022 Cirrus Logic, Inc.
+ */
+
+#ifndef DT_CS35L45_H
+#define DT_CS35L45_H
+
+/*
+ * cirrus,asp-sdout-hiz-ctrl
+ *
+ * TX_HIZ_UNUSED: TX pin high-impedance during unused slots.
+ * TX_HIZ_DISABLED: TX pin high-impedance when all channels disabled.
+ */
+#define CS35L45_ASP_TX_HIZ_UNUSED 0x1
+#define CS35L45_ASP_TX_HIZ_DISABLED 0x2
+
+/*
+ * Optional GPIOX Sub-nodes:
+ * The cs35l45 node can have up to three "cirrus,gpio-ctrlX" ('X' = [1,2,3])
+ * sub-nodes for configuring the GPIO pins.
+ *
+ * - gpio-dir : GPIO pin direction. Valid only when 'gpio-ctrl'
+ * is 1.
+ * 0 = Output
+ * 1 = Input (Default)
+ *
+ * - gpio-lvl : GPIO level. Valid only when 'gpio-ctrl' is 1 and 'gpio-dir' is 0.
+ *
+ * 0 = Low (Default)
+ * 1 = High
+ *
+ * - gpio-op-cfg : GPIO output configuration. Valid only when 'gpio-ctrl' is 1
+ * and 'gpio-dir' is 0.
+ *
+ * 0 = CMOS (Default)
+ * 1 = Open Drain
+ *
+ * - gpio-pol : GPIO output polarity select. Valid only when 'gpio-ctrl' is 1
+ * and 'gpio-dir' is 0.
+ *
+ * 0 = Non-inverted, Active High (Default)
+ * 1 = Inverted, Active Low
+ *
+ * - gpio-invert : Defines the polarity of the GPIO pin if configured
+ * as input.
+ *
+ * 0 = Not inverted (Default)
+ * 1 = Inverted
+ *
+ * - gpio-ctrl : Defines the function of the GPIO pin.
+ *
+ * GPIO1:
+ * 0 = High impedance input (Default)
+ * 1 = Pin acts as a GPIO, direction controlled by 'gpio-dir'
+ * 2 = Pin acts as MDSYNC, direction controlled by MDSYNC
+ * 3-7 = Reserved
+ *
+ * GPIO2:
+ * 0 = High impedance input (Default)
+ * 1 = Pin acts as a GPIO, direction controlled by 'gpio-dir'
+ * 2 = Pin acts as open drain INT
+ * 3 = Reserved
+ * 4 = Pin acts as push-pull output INT. Active low.
+ * 5 = Pin acts as push-pull output INT. Active high.
+ * 6,7 = Reserved
+ *
+ * GPIO3:
+ * 0 = High impedance input (Default)
+ * 1 = Pin acts as a GPIO, direction controlled by 'gpio-dir'
+ * 2-7 = Reserved
+ */
+#define CS35L45_NUM_GPIOS 0x3
+
+#endif /* DT_CS35L45_H */
diff --git a/include/dt-bindings/sound/microchip,pdmc.h b/include/dt-bindings/sound/microchip,pdmc.h
new file mode 100644
index 000000000000..96cde94ce74f
--- /dev/null
+++ b/include/dt-bindings/sound/microchip,pdmc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_MICROCHIP_PDMC_H__
+#define __DT_BINDINGS_MICROCHIP_PDMC_H__
+
+/* PDM microphone's pin placement */
+#define MCHP_PDMC_DS0 0
+#define MCHP_PDMC_DS1 1
+
+/* PDM microphone clock edge sampling */
+#define MCHP_PDMC_CLK_POSITIVE 0
+#define MCHP_PDMC_CLK_NEGATIVE 1
+
+#endif /* __DT_BINDINGS_MICROCHIP_PDMC_H__ */
diff --git a/include/dt-bindings/sound/qcom,lpass.h b/include/dt-bindings/sound/qcom,lpass.h
new file mode 100644
index 000000000000..a9404c3b8884
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,lpass.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_QCOM_LPASS_H
+#define __DT_QCOM_LPASS_H
+
+#define MI2S_PRIMARY 0
+#define MI2S_SECONDARY 1
+#define MI2S_TERTIARY 2
+#define MI2S_QUATERNARY 3
+#define MI2S_QUINARY 4
+
+#define LPASS_DP_RX 5
+
+#define LPASS_CDC_DMA_RX0 6
+#define LPASS_CDC_DMA_RX1 7
+#define LPASS_CDC_DMA_RX2 8
+#define LPASS_CDC_DMA_RX3 9
+#define LPASS_CDC_DMA_RX4 10
+#define LPASS_CDC_DMA_RX5 11
+#define LPASS_CDC_DMA_RX6 12
+#define LPASS_CDC_DMA_RX7 13
+#define LPASS_CDC_DMA_RX8 14
+#define LPASS_CDC_DMA_RX9 15
+
+#define LPASS_CDC_DMA_TX0 16
+#define LPASS_CDC_DMA_TX1 17
+#define LPASS_CDC_DMA_TX2 18
+#define LPASS_CDC_DMA_TX3 19
+#define LPASS_CDC_DMA_TX4 20
+#define LPASS_CDC_DMA_TX5 21
+#define LPASS_CDC_DMA_TX6 22
+#define LPASS_CDC_DMA_TX7 23
+#define LPASS_CDC_DMA_TX8 24
+
+#define LPASS_CDC_DMA_VA_TX0 25
+#define LPASS_CDC_DMA_VA_TX1 26
+#define LPASS_CDC_DMA_VA_TX2 27
+#define LPASS_CDC_DMA_VA_TX3 28
+#define LPASS_CDC_DMA_VA_TX4 29
+#define LPASS_CDC_DMA_VA_TX5 30
+#define LPASS_CDC_DMA_VA_TX6 31
+#define LPASS_CDC_DMA_VA_TX7 32
+#define LPASS_CDC_DMA_VA_TX8 33
+
+#define LPASS_MCLK0 0
+
+#endif /* __DT_QCOM_LPASS_H */
diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h
index 1df06f8ad5c3..9d5d89cfabcf 100644
--- a/include/dt-bindings/sound/qcom,q6afe.h
+++ b/include/dt-bindings/sound/qcom,q6afe.h
@@ -2,111 +2,8 @@
#ifndef __DT_BINDINGS_Q6_AFE_H__
#define __DT_BINDINGS_Q6_AFE_H__
-/* Audio Front End (AFE) virtual ports IDs */
-#define HDMI_RX 1
-#define SLIMBUS_0_RX 2
-#define SLIMBUS_0_TX 3
-#define SLIMBUS_1_RX 4
-#define SLIMBUS_1_TX 5
-#define SLIMBUS_2_RX 6
-#define SLIMBUS_2_TX 7
-#define SLIMBUS_3_RX 8
-#define SLIMBUS_3_TX 9
-#define SLIMBUS_4_RX 10
-#define SLIMBUS_4_TX 11
-#define SLIMBUS_5_RX 12
-#define SLIMBUS_5_TX 13
-#define SLIMBUS_6_RX 14
-#define SLIMBUS_6_TX 15
-#define PRIMARY_MI2S_RX 16
-#define PRIMARY_MI2S_TX 17
-#define SECONDARY_MI2S_RX 18
-#define SECONDARY_MI2S_TX 19
-#define TERTIARY_MI2S_RX 20
-#define TERTIARY_MI2S_TX 21
-#define QUATERNARY_MI2S_RX 22
-#define QUATERNARY_MI2S_TX 23
-#define PRIMARY_TDM_RX_0 24
-#define PRIMARY_TDM_TX_0 25
-#define PRIMARY_TDM_RX_1 26
-#define PRIMARY_TDM_TX_1 27
-#define PRIMARY_TDM_RX_2 28
-#define PRIMARY_TDM_TX_2 29
-#define PRIMARY_TDM_RX_3 30
-#define PRIMARY_TDM_TX_3 31
-#define PRIMARY_TDM_RX_4 32
-#define PRIMARY_TDM_TX_4 33
-#define PRIMARY_TDM_RX_5 34
-#define PRIMARY_TDM_TX_5 35
-#define PRIMARY_TDM_RX_6 36
-#define PRIMARY_TDM_TX_6 37
-#define PRIMARY_TDM_RX_7 38
-#define PRIMARY_TDM_TX_7 39
-#define SECONDARY_TDM_RX_0 40
-#define SECONDARY_TDM_TX_0 41
-#define SECONDARY_TDM_RX_1 42
-#define SECONDARY_TDM_TX_1 43
-#define SECONDARY_TDM_RX_2 44
-#define SECONDARY_TDM_TX_2 45
-#define SECONDARY_TDM_RX_3 46
-#define SECONDARY_TDM_TX_3 47
-#define SECONDARY_TDM_RX_4 48
-#define SECONDARY_TDM_TX_4 49
-#define SECONDARY_TDM_RX_5 50
-#define SECONDARY_TDM_TX_5 51
-#define SECONDARY_TDM_RX_6 52
-#define SECONDARY_TDM_TX_6 53
-#define SECONDARY_TDM_RX_7 54
-#define SECONDARY_TDM_TX_7 55
-#define TERTIARY_TDM_RX_0 56
-#define TERTIARY_TDM_TX_0 57
-#define TERTIARY_TDM_RX_1 58
-#define TERTIARY_TDM_TX_1 59
-#define TERTIARY_TDM_RX_2 60
-#define TERTIARY_TDM_TX_2 61
-#define TERTIARY_TDM_RX_3 62
-#define TERTIARY_TDM_TX_3 63
-#define TERTIARY_TDM_RX_4 64
-#define TERTIARY_TDM_TX_4 65
-#define TERTIARY_TDM_RX_5 66
-#define TERTIARY_TDM_TX_5 67
-#define TERTIARY_TDM_RX_6 68
-#define TERTIARY_TDM_TX_6 69
-#define TERTIARY_TDM_RX_7 70
-#define TERTIARY_TDM_TX_7 71
-#define QUATERNARY_TDM_RX_0 72
-#define QUATERNARY_TDM_TX_0 73
-#define QUATERNARY_TDM_RX_1 74
-#define QUATERNARY_TDM_TX_1 75
-#define QUATERNARY_TDM_RX_2 76
-#define QUATERNARY_TDM_TX_2 77
-#define QUATERNARY_TDM_RX_3 78
-#define QUATERNARY_TDM_TX_3 79
-#define QUATERNARY_TDM_RX_4 80
-#define QUATERNARY_TDM_TX_4 81
-#define QUATERNARY_TDM_RX_5 82
-#define QUATERNARY_TDM_TX_5 83
-#define QUATERNARY_TDM_RX_6 84
-#define QUATERNARY_TDM_TX_6 85
-#define QUATERNARY_TDM_RX_7 86
-#define QUATERNARY_TDM_TX_7 87
-#define QUINARY_TDM_RX_0 88
-#define QUINARY_TDM_TX_0 89
-#define QUINARY_TDM_RX_1 90
-#define QUINARY_TDM_TX_1 91
-#define QUINARY_TDM_RX_2 92
-#define QUINARY_TDM_TX_2 93
-#define QUINARY_TDM_RX_3 94
-#define QUINARY_TDM_TX_3 95
-#define QUINARY_TDM_RX_4 96
-#define QUINARY_TDM_TX_4 97
-#define QUINARY_TDM_RX_5 98
-#define QUINARY_TDM_TX_5 99
-#define QUINARY_TDM_RX_6 100
-#define QUINARY_TDM_TX_6 101
-#define QUINARY_TDM_RX_7 102
-#define QUINARY_TDM_TX_7 103
-#define DISPLAY_PORT_RX 104
+/* This file exists due to backward compatibility reasons, Please do not DELETE! */
-#endif /* __DT_BINDINGS_Q6_AFE_H__ */
+#include <dt-bindings/sound/qcom,q6dsp-lpass-ports.h>
+#endif /* __DT_BINDINGS_Q6_AFE_H__ */
diff --git a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
new file mode 100644
index 000000000000..39f203256c4f
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_AUDIO_PORTS_H__
+#define __DT_BINDINGS_Q6_AUDIO_PORTS_H__
+
+/* LPASS Audio virtual ports IDs */
+#define HDMI_RX 1
+#define SLIMBUS_0_RX 2
+#define SLIMBUS_0_TX 3
+#define SLIMBUS_1_RX 4
+#define SLIMBUS_1_TX 5
+#define SLIMBUS_2_RX 6
+#define SLIMBUS_2_TX 7
+#define SLIMBUS_3_RX 8
+#define SLIMBUS_3_TX 9
+#define SLIMBUS_4_RX 10
+#define SLIMBUS_4_TX 11
+#define SLIMBUS_5_RX 12
+#define SLIMBUS_5_TX 13
+#define SLIMBUS_6_RX 14
+#define SLIMBUS_6_TX 15
+#define PRIMARY_MI2S_RX 16
+#define PRIMARY_MI2S_TX 17
+#define SECONDARY_MI2S_RX 18
+#define SECONDARY_MI2S_TX 19
+#define TERTIARY_MI2S_RX 20
+#define TERTIARY_MI2S_TX 21
+#define QUATERNARY_MI2S_RX 22
+#define QUATERNARY_MI2S_TX 23
+#define PRIMARY_TDM_RX_0 24
+#define PRIMARY_TDM_TX_0 25
+#define PRIMARY_TDM_RX_1 26
+#define PRIMARY_TDM_TX_1 27
+#define PRIMARY_TDM_RX_2 28
+#define PRIMARY_TDM_TX_2 29
+#define PRIMARY_TDM_RX_3 30
+#define PRIMARY_TDM_TX_3 31
+#define PRIMARY_TDM_RX_4 32
+#define PRIMARY_TDM_TX_4 33
+#define PRIMARY_TDM_RX_5 34
+#define PRIMARY_TDM_TX_5 35
+#define PRIMARY_TDM_RX_6 36
+#define PRIMARY_TDM_TX_6 37
+#define PRIMARY_TDM_RX_7 38
+#define PRIMARY_TDM_TX_7 39
+#define SECONDARY_TDM_RX_0 40
+#define SECONDARY_TDM_TX_0 41
+#define SECONDARY_TDM_RX_1 42
+#define SECONDARY_TDM_TX_1 43
+#define SECONDARY_TDM_RX_2 44
+#define SECONDARY_TDM_TX_2 45
+#define SECONDARY_TDM_RX_3 46
+#define SECONDARY_TDM_TX_3 47
+#define SECONDARY_TDM_RX_4 48
+#define SECONDARY_TDM_TX_4 49
+#define SECONDARY_TDM_RX_5 50
+#define SECONDARY_TDM_TX_5 51
+#define SECONDARY_TDM_RX_6 52
+#define SECONDARY_TDM_TX_6 53
+#define SECONDARY_TDM_RX_7 54
+#define SECONDARY_TDM_TX_7 55
+#define TERTIARY_TDM_RX_0 56
+#define TERTIARY_TDM_TX_0 57
+#define TERTIARY_TDM_RX_1 58
+#define TERTIARY_TDM_TX_1 59
+#define TERTIARY_TDM_RX_2 60
+#define TERTIARY_TDM_TX_2 61
+#define TERTIARY_TDM_RX_3 62
+#define TERTIARY_TDM_TX_3 63
+#define TERTIARY_TDM_RX_4 64
+#define TERTIARY_TDM_TX_4 65
+#define TERTIARY_TDM_RX_5 66
+#define TERTIARY_TDM_TX_5 67
+#define TERTIARY_TDM_RX_6 68
+#define TERTIARY_TDM_TX_6 69
+#define TERTIARY_TDM_RX_7 70
+#define TERTIARY_TDM_TX_7 71
+#define QUATERNARY_TDM_RX_0 72
+#define QUATERNARY_TDM_TX_0 73
+#define QUATERNARY_TDM_RX_1 74
+#define QUATERNARY_TDM_TX_1 75
+#define QUATERNARY_TDM_RX_2 76
+#define QUATERNARY_TDM_TX_2 77
+#define QUATERNARY_TDM_RX_3 78
+#define QUATERNARY_TDM_TX_3 79
+#define QUATERNARY_TDM_RX_4 80
+#define QUATERNARY_TDM_TX_4 81
+#define QUATERNARY_TDM_RX_5 82
+#define QUATERNARY_TDM_TX_5 83
+#define QUATERNARY_TDM_RX_6 84
+#define QUATERNARY_TDM_TX_6 85
+#define QUATERNARY_TDM_RX_7 86
+#define QUATERNARY_TDM_TX_7 87
+#define QUINARY_TDM_RX_0 88
+#define QUINARY_TDM_TX_0 89
+#define QUINARY_TDM_RX_1 90
+#define QUINARY_TDM_TX_1 91
+#define QUINARY_TDM_RX_2 92
+#define QUINARY_TDM_TX_2 93
+#define QUINARY_TDM_RX_3 94
+#define QUINARY_TDM_TX_3 95
+#define QUINARY_TDM_RX_4 96
+#define QUINARY_TDM_TX_4 97
+#define QUINARY_TDM_RX_5 98
+#define QUINARY_TDM_TX_5 99
+#define QUINARY_TDM_RX_6 100
+#define QUINARY_TDM_TX_6 101
+#define QUINARY_TDM_RX_7 102
+#define QUINARY_TDM_TX_7 103
+#define DISPLAY_PORT_RX 104
+#define WSA_CODEC_DMA_RX_0 105
+#define WSA_CODEC_DMA_TX_0 106
+#define WSA_CODEC_DMA_RX_1 107
+#define WSA_CODEC_DMA_TX_1 108
+#define WSA_CODEC_DMA_TX_2 109
+#define VA_CODEC_DMA_TX_0 110
+#define VA_CODEC_DMA_TX_1 111
+#define VA_CODEC_DMA_TX_2 112
+#define RX_CODEC_DMA_RX_0 113
+#define TX_CODEC_DMA_TX_0 114
+#define RX_CODEC_DMA_RX_1 115
+#define TX_CODEC_DMA_TX_1 116
+#define RX_CODEC_DMA_RX_2 117
+#define TX_CODEC_DMA_TX_2 118
+#define RX_CODEC_DMA_RX_3 119
+#define TX_CODEC_DMA_TX_3 120
+#define RX_CODEC_DMA_RX_4 121
+#define TX_CODEC_DMA_TX_4 122
+#define RX_CODEC_DMA_RX_5 123
+#define TX_CODEC_DMA_TX_5 124
+#define RX_CODEC_DMA_RX_6 125
+#define RX_CODEC_DMA_RX_7 126
+#define QUINARY_MI2S_RX 127
+#define QUINARY_MI2S_TX 128
+#define DISPLAY_PORT_RX_0 DISPLAY_PORT_RX
+#define DISPLAY_PORT_RX_1 129
+#define DISPLAY_PORT_RX_2 130
+#define DISPLAY_PORT_RX_3 131
+#define DISPLAY_PORT_RX_4 132
+#define DISPLAY_PORT_RX_5 133
+#define DISPLAY_PORT_RX_6 134
+#define DISPLAY_PORT_RX_7 135
+
+#define LPASS_CLK_ID_PRI_MI2S_IBIT 1
+#define LPASS_CLK_ID_PRI_MI2S_EBIT 2
+#define LPASS_CLK_ID_SEC_MI2S_IBIT 3
+#define LPASS_CLK_ID_SEC_MI2S_EBIT 4
+#define LPASS_CLK_ID_TER_MI2S_IBIT 5
+#define LPASS_CLK_ID_TER_MI2S_EBIT 6
+#define LPASS_CLK_ID_QUAD_MI2S_IBIT 7
+#define LPASS_CLK_ID_QUAD_MI2S_EBIT 8
+#define LPASS_CLK_ID_SPEAKER_I2S_IBIT 9
+#define LPASS_CLK_ID_SPEAKER_I2S_EBIT 10
+#define LPASS_CLK_ID_SPEAKER_I2S_OSR 11
+#define LPASS_CLK_ID_QUI_MI2S_IBIT 12
+#define LPASS_CLK_ID_QUI_MI2S_EBIT 13
+#define LPASS_CLK_ID_SEN_MI2S_IBIT 14
+#define LPASS_CLK_ID_SEN_MI2S_EBIT 15
+#define LPASS_CLK_ID_INT0_MI2S_IBIT 16
+#define LPASS_CLK_ID_INT1_MI2S_IBIT 17
+#define LPASS_CLK_ID_INT2_MI2S_IBIT 18
+#define LPASS_CLK_ID_INT3_MI2S_IBIT 19
+#define LPASS_CLK_ID_INT4_MI2S_IBIT 20
+#define LPASS_CLK_ID_INT5_MI2S_IBIT 21
+#define LPASS_CLK_ID_INT6_MI2S_IBIT 22
+#define LPASS_CLK_ID_QUI_MI2S_OSR 23
+#define LPASS_CLK_ID_PRI_PCM_IBIT 24
+#define LPASS_CLK_ID_PRI_PCM_EBIT 25
+#define LPASS_CLK_ID_SEC_PCM_IBIT 26
+#define LPASS_CLK_ID_SEC_PCM_EBIT 27
+#define LPASS_CLK_ID_TER_PCM_IBIT 28
+#define LPASS_CLK_ID_TER_PCM_EBIT 29
+#define LPASS_CLK_ID_QUAD_PCM_IBIT 30
+#define LPASS_CLK_ID_QUAD_PCM_EBIT 31
+#define LPASS_CLK_ID_QUIN_PCM_IBIT 32
+#define LPASS_CLK_ID_QUIN_PCM_EBIT 33
+#define LPASS_CLK_ID_QUI_PCM_OSR 34
+#define LPASS_CLK_ID_PRI_TDM_IBIT 35
+#define LPASS_CLK_ID_PRI_TDM_EBIT 36
+#define LPASS_CLK_ID_SEC_TDM_IBIT 37
+#define LPASS_CLK_ID_SEC_TDM_EBIT 38
+#define LPASS_CLK_ID_TER_TDM_IBIT 39
+#define LPASS_CLK_ID_TER_TDM_EBIT 40
+#define LPASS_CLK_ID_QUAD_TDM_IBIT 41
+#define LPASS_CLK_ID_QUAD_TDM_EBIT 42
+#define LPASS_CLK_ID_QUIN_TDM_IBIT 43
+#define LPASS_CLK_ID_QUIN_TDM_EBIT 44
+#define LPASS_CLK_ID_QUIN_TDM_OSR 45
+#define LPASS_CLK_ID_MCLK_1 46
+#define LPASS_CLK_ID_MCLK_2 47
+#define LPASS_CLK_ID_MCLK_3 48
+#define LPASS_CLK_ID_MCLK_4 49
+#define LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE 50
+#define LPASS_CLK_ID_INT_MCLK_0 51
+#define LPASS_CLK_ID_INT_MCLK_1 52
+#define LPASS_CLK_ID_MCLK_5 53
+#define LPASS_CLK_ID_WSA_CORE_MCLK 54
+#define LPASS_CLK_ID_WSA_CORE_NPL_MCLK 55
+#define LPASS_CLK_ID_VA_CORE_MCLK 56
+#define LPASS_CLK_ID_TX_CORE_MCLK 57
+#define LPASS_CLK_ID_TX_CORE_NPL_MCLK 58
+#define LPASS_CLK_ID_RX_CORE_MCLK 59
+#define LPASS_CLK_ID_RX_CORE_NPL_MCLK 60
+#define LPASS_CLK_ID_VA_CORE_2X_MCLK 61
+/* Clock ID for MCLK for WSA2 core */
+#define LPASS_CLK_ID_WSA2_CORE_MCLK 62
+/* Clock ID for NPL MCLK for WSA2 core */
+#define LPASS_CLK_ID_WSA2_CORE_2X_MCLK 63
+/* Clock ID for RX Core TX MCLK */
+#define LPASS_CLK_ID_RX_CORE_TX_MCLK 64
+/* Clock ID for RX CORE TX 2X MCLK */
+#define LPASS_CLK_ID_RX_CORE_TX_2X_MCLK 65
+/* Clock ID for WSA core TX MCLK */
+#define LPASS_CLK_ID_WSA_CORE_TX_MCLK 66
+/* Clock ID for WSA core TX 2X MCLK */
+#define LPASS_CLK_ID_WSA_CORE_TX_2X_MCLK 67
+/* Clock ID for WSA2 core TX MCLK */
+#define LPASS_CLK_ID_WSA2_CORE_TX_MCLK 68
+/* Clock ID for WSA2 core TX 2X MCLK */
+#define LPASS_CLK_ID_WSA2_CORE_TX_2X_MCLK 69
+/* Clock ID for RX CORE MCLK2 2X MCLK */
+#define LPASS_CLK_ID_RX_CORE_MCLK2_2X_MCLK 70
+
+#define LPASS_HW_AVTIMER_VOTE 101
+#define LPASS_HW_MACRO_VOTE 102
+#define LPASS_HW_DCODEC_VOTE 103
+
+#define Q6AFE_MAX_CLK_ID 104
+
+#define LPASS_CLK_ATTRIBUTE_INVALID 0x0
+#define LPASS_CLK_ATTRIBUTE_COUPLE_NO 0x1
+#define LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND 0x2
+#define LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR 0x3
+
+#endif /* __DT_BINDINGS_Q6_AUDIO_PORTS_H__ */
diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h
new file mode 100644
index 000000000000..f5e9f1db091e
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,wcd9335.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_SOUND_QCOM_WCD9335_H
+#define __DT_SOUND_QCOM_WCD9335_H
+
+#define AIF1_PB 0
+#define AIF1_CAP 1
+#define AIF2_PB 2
+#define AIF2_CAP 3
+#define AIF3_PB 4
+#define AIF3_CAP 5
+#define AIF4_PB 6
+#define NUM_CODEC_DAIS 7
+
+#endif
diff --git a/include/dt-bindings/sound/rt5640.h b/include/dt-bindings/sound/rt5640.h
index 154c9b4414f2..655f6946388a 100644
--- a/include/dt-bindings/sound/rt5640.h
+++ b/include/dt-bindings/sound/rt5640.h
@@ -16,6 +16,7 @@
#define RT5640_JD_SRC_GPIO2 4
#define RT5640_JD_SRC_GPIO3 5
#define RT5640_JD_SRC_GPIO4 6
+#define RT5640_JD_SRC_HDA_HEADER 7
#define RT5640_OVCD_SF_0P5 0
#define RT5640_OVCD_SF_0P75 1
diff --git a/include/dt-bindings/sound/sc7180-lpass.h b/include/dt-bindings/sound/sc7180-lpass.h
new file mode 100644
index 000000000000..5c1ee8b36b19
--- /dev/null
+++ b/include/dt-bindings/sound/sc7180-lpass.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_SC7180_LPASS_H
+#define __DT_SC7180_LPASS_H
+
+#include <dt-bindings/sound/qcom,lpass.h>
+
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
+
+#endif /* __DT_APQ8016_LPASS_H */
diff --git a/include/dt-bindings/sound/tlv320adc3xxx.h b/include/dt-bindings/sound/tlv320adc3xxx.h
new file mode 100644
index 000000000000..ec988439da20
--- /dev/null
+++ b/include/dt-bindings/sound/tlv320adc3xxx.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Devicetree bindings definitions for tlv320adc3xxx driver.
+ *
+ * Copyright (C) 2021 Axis Communications AB
+ */
+#ifndef __DT_TLV320ADC3XXX_H
+#define __DT_TLV320ADC3XXX_H
+
+#define ADC3XXX_GPIO_DISABLED 0 /* I/O buffers powered down */
+#define ADC3XXX_GPIO_INPUT 1 /* Various non-GPIO inputs */
+#define ADC3XXX_GPIO_GPI 2 /* General purpose input */
+#define ADC3XXX_GPIO_GPO 3 /* General purpose output */
+#define ADC3XXX_GPIO_CLKOUT 4 /* Source set in reg. CLKOUT_MUX */
+#define ADC3XXX_GPIO_INT1 5 /* INT1 output */
+#define ADC3XXX_GPIO_INT2 6 /* INT2 output */
+/* value 7 is reserved */
+#define ADC3XXX_GPIO_SECONDARY_BCLK 8 /* Codec interface secondary BCLK */
+#define ADC3XXX_GPIO_SECONDARY_WCLK 9 /* Codec interface secondary WCLK */
+#define ADC3XXX_GPIO_ADC_MOD_CLK 10 /* Clock output for digital mics */
+/* values 11-15 reserved */
+
+#define ADC3XXX_MICBIAS_OFF 0 /* Micbias pin powered off */
+#define ADC3XXX_MICBIAS_2_0V 1 /* Micbias pin set to 2.0V */
+#define ADC3XXX_MICBIAS_2_5V 2 /* Micbias pin set to 2.5V */
+#define ADC3XXX_MICBIAS_AVDD 3 /* Use AVDD voltage for micbias pin */
+
+#endif /* __DT_TLV320ADC3XXX_H */
diff --git a/include/dt-bindings/sound/tlv320aic31xx-micbias.h b/include/dt-bindings/sound/tlv320aic31xx-micbias.h
deleted file mode 100644
index c6895a18a455..000000000000
--- a/include/dt-bindings/sound/tlv320aic31xx-micbias.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DT_TLV320AIC31XX_MICBIAS_H
-#define __DT_TLV320AIC31XX_MICBIAS_H
-
-#define MICBIAS_2_0V 1
-#define MICBIAS_2_5V 2
-#define MICBIAS_AVDDV 3
-
-#endif /* __DT_TLV320AIC31XX_MICBIAS_H */
diff --git a/include/dt-bindings/sound/tlv320aic31xx.h b/include/dt-bindings/sound/tlv320aic31xx.h
new file mode 100644
index 000000000000..4a80238ab250
--- /dev/null
+++ b/include/dt-bindings/sound/tlv320aic31xx.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_TLV320AIC31XX_H
+#define __DT_TLV320AIC31XX_H
+
+#define MICBIAS_2_0V 1
+#define MICBIAS_2_5V 2
+#define MICBIAS_AVDDV 3
+
+#define PLL_CLKIN_MCLK 0x00
+#define PLL_CLKIN_BCLK 0x01
+#define PLL_CLKIN_GPIO1 0x02
+#define PLL_CLKIN_DIN 0x03
+
+#endif /* __DT_TLV320AIC31XX_H */
diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
new file mode 100644
index 000000000000..997e2f55128a
--- /dev/null
+++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Balsam CHIHI <bchihi@baylibre.com>
+ */
+
+#ifndef __MEDIATEK_LVTS_DT_H
+#define __MEDIATEK_LVTS_DT_H
+
+#define MT7988_CPU_0 0
+#define MT7988_CPU_1 1
+#define MT7988_ETH2P5G_0 2
+#define MT7988_ETH2P5G_1 3
+#define MT7988_TOPS_0 4
+#define MT7988_TOPS_1 5
+#define MT7988_ETHWARP_0 6
+#define MT7988_ETHWARP_1 7
+
+#define MT8195_MCU_BIG_CPU0 0
+#define MT8195_MCU_BIG_CPU1 1
+#define MT8195_MCU_BIG_CPU2 2
+#define MT8195_MCU_BIG_CPU3 3
+#define MT8195_MCU_LITTLE_CPU0 4
+#define MT8195_MCU_LITTLE_CPU1 5
+#define MT8195_MCU_LITTLE_CPU2 6
+#define MT8195_MCU_LITTLE_CPU3 7
+
+#define MT8195_AP_VPU0 8
+#define MT8195_AP_VPU1 9
+#define MT8195_AP_GPU0 10
+#define MT8195_AP_GPU1 11
+#define MT8195_AP_VDEC 12
+#define MT8195_AP_IMG 13
+#define MT8195_AP_INFRA 14
+#define MT8195_AP_CAM0 15
+#define MT8195_AP_CAM1 16
+
+#define MT8192_MCU_BIG_CPU0 0
+#define MT8192_MCU_BIG_CPU1 1
+#define MT8192_MCU_BIG_CPU2 2
+#define MT8192_MCU_BIG_CPU3 3
+#define MT8192_MCU_LITTLE_CPU0 4
+#define MT8192_MCU_LITTLE_CPU1 5
+#define MT8192_MCU_LITTLE_CPU2 6
+#define MT8192_MCU_LITTLE_CPU3 7
+
+#define MT8192_AP_VPU0 8
+#define MT8192_AP_VPU1 9
+#define MT8192_AP_GPU0 10
+#define MT8192_AP_GPU1 11
+#define MT8192_AP_INFRA 12
+#define MT8192_AP_CAM 13
+#define MT8192_AP_MD0 14
+#define MT8192_AP_MD1 15
+#define MT8192_AP_MD2 16
+
+#endif /* __MEDIATEK_LVTS_DT_H */
diff --git a/include/dt-bindings/thermal/tegra234-bpmp-thermal.h b/include/dt-bindings/thermal/tegra234-bpmp-thermal.h
new file mode 100644
index 000000000000..934787950932
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra234-bpmp-thermal.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for binding nvidia,tegra234-bpmp-thermal.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA234_BPMP_THERMAL_H
+#define _DT_BINDINGS_THERMAL_TEGRA234_BPMP_THERMAL_H
+
+#define TEGRA234_BPMP_THERMAL_ZONE_CPU 0
+#define TEGRA234_BPMP_THERMAL_ZONE_GPU 1
+#define TEGRA234_BPMP_THERMAL_ZONE_CV0 2
+#define TEGRA234_BPMP_THERMAL_ZONE_CV1 3
+#define TEGRA234_BPMP_THERMAL_ZONE_CV2 4
+#define TEGRA234_BPMP_THERMAL_ZONE_SOC0 5
+#define TEGRA234_BPMP_THERMAL_ZONE_SOC1 6
+#define TEGRA234_BPMP_THERMAL_ZONE_SOC2 7
+#define TEGRA234_BPMP_THERMAL_ZONE_TJ_MAX 8
+
+#endif
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
index 985f2bbd4d24..e6526b138174 100644
--- a/include/dt-bindings/usb/pd.h
+++ b/include/dt-bindings/usb/pd.h
@@ -85,4 +85,384 @@
PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \
PDO_PPS_APDO_MAX_CURR(max_ma))
- #endif /* __DT_POWER_DELIVERY_H */
+ /*
+ * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
+ * Version 1.2"
+ * Initial current capability of the new source when vSafe5V is applied.
+ */
+#define FRS_DEFAULT_POWER 1
+#define FRS_5V_1P5A 2
+#define FRS_5V_3A 3
+
+/*
+ * SVDM Identity Header
+ * --------------------
+ * <31> :: data capable as a USB host
+ * <30> :: data capable as a USB device
+ * <29:27> :: product type (UFP / Cable / VPD)
+ * <26> :: modal operation supported (1b == yes)
+ * <25:23> :: product type (DFP) (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <22:21> :: connector type (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <20:16> :: Reserved, Shall be set to zero
+ * <15:0> :: USB-IF assigned VID for this cable vendor
+ */
+
+/* PD Rev2.0 definition */
+#define IDH_PTYPE_UNDEF 0
+
+/* SOP Product Type (UFP) */
+#define IDH_PTYPE_NOT_UFP 0
+#define IDH_PTYPE_HUB 1
+#define IDH_PTYPE_PERIPH 2
+#define IDH_PTYPE_PSD 3
+#define IDH_PTYPE_AMA 5
+
+/* SOP' Product Type (Cable Plug / VPD) */
+#define IDH_PTYPE_NOT_CABLE 0
+#define IDH_PTYPE_PCABLE 3
+#define IDH_PTYPE_ACABLE 4
+#define IDH_PTYPE_VPD 6
+
+/* SOP Product Type (DFP) */
+#define IDH_PTYPE_NOT_DFP 0
+#define IDH_PTYPE_DFP_HUB 1
+#define IDH_PTYPE_DFP_HOST 2
+#define IDH_PTYPE_DFP_PB 3
+
+#define VDO_IDH(usbh, usbd, ufp_cable, is_modal, dfp, conn, vid) \
+ ((usbh) << 31 | (usbd) << 30 | ((ufp_cable) & 0x7) << 27 \
+ | (is_modal) << 26 | ((dfp) & 0x7) << 23 | ((conn) & 0x3) << 21 \
+ | ((vid) & 0xffff))
+
+/*
+ * Cert Stat VDO
+ * -------------
+ * <31:0> : USB-IF assigned XID for this cable
+ */
+#define VDO_CERT(xid) ((xid) & 0xffffffff)
+
+/*
+ * Product VDO
+ * -----------
+ * <31:16> : USB Product ID
+ * <15:0> : USB bcdDevice
+ */
+#define VDO_PRODUCT(pid, bcd) (((pid) & 0xffff) << 16 | ((bcd) & 0xffff))
+
+/*
+ * UFP VDO (PD Revision 3.0+ only)
+ * --------
+ * <31:29> :: UFP VDO version
+ * <28> :: Reserved
+ * <27:24> :: Device capability
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:11> :: Reserved
+ * <10:8> :: Vconn power (AMA only)
+ * <7> :: Vconn required (AMA only, 0b == no, 1b == yes)
+ * <6> :: Vbus required (AMA only, 0b == yes, 1b == no)
+ * <5:3> :: Alternate modes
+ * <2:0> :: USB highest speed
+ */
+/* UFP VDO Version */
+#define UFP_VDO_VER1_2 2
+
+/* Device Capability */
+#define DEV_USB2_CAPABLE (1 << 0)
+#define DEV_USB2_BILLBOARD (1 << 1)
+#define DEV_USB3_CAPABLE (1 << 2)
+#define DEV_USB4_CAPABLE (1 << 3)
+
+/* Connector Type */
+#define UFP_RECEPTACLE 2
+#define UFP_CAPTIVE 3
+
+/* Vconn Power (AMA only, set to AMA_VCONN_NOT_REQ if Vconn is not required) */
+#define AMA_VCONN_PWR_1W 0
+#define AMA_VCONN_PWR_1W5 1
+#define AMA_VCONN_PWR_2W 2
+#define AMA_VCONN_PWR_3W 3
+#define AMA_VCONN_PWR_4W 4
+#define AMA_VCONN_PWR_5W 5
+#define AMA_VCONN_PWR_6W 6
+
+/* Vconn Required (AMA only) */
+#define AMA_VCONN_NOT_REQ 0
+#define AMA_VCONN_REQ 1
+
+/* Vbus Required (AMA only) */
+#define AMA_VBUS_REQ 0
+#define AMA_VBUS_NOT_REQ 1
+
+/* Alternate Modes */
+#define UFP_ALTMODE_NOT_SUPP 0
+#define UFP_ALTMODE_TBT3 (1 << 0)
+#define UFP_ALTMODE_RECFG (1 << 1)
+#define UFP_ALTMODE_NO_RECFG (1 << 2)
+
+/* USB Highest Speed */
+#define UFP_USB2_ONLY 0
+#define UFP_USB32_GEN1 1
+#define UFP_USB32_4_GEN2 2
+#define UFP_USB4_GEN3 3
+
+#define VDO_UFP(ver, cap, conn, vcpwr, vcr, vbr, alt, spd) \
+ (((ver) & 0x7) << 29 | ((cap) & 0xf) << 24 | ((conn) & 0x3) << 22 \
+ | ((vcpwr) & 0x7) << 8 | (vcr) << 7 | (vbr) << 6 | ((alt) & 0x7) << 3 \
+ | ((spd) & 0x7))
+
+/*
+ * DFP VDO (PD Revision 3.0+ only)
+ * --------
+ * <31:29> :: DFP VDO version
+ * <28:27> :: Reserved
+ * <26:24> :: Host capability
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:5> :: Reserved
+ * <4:0> :: Port number
+ */
+#define DFP_VDO_VER1_1 1
+#define HOST_USB2_CAPABLE (1 << 0)
+#define HOST_USB3_CAPABLE (1 << 1)
+#define HOST_USB4_CAPABLE (1 << 2)
+#define DFP_RECEPTACLE 2
+#define DFP_CAPTIVE 3
+
+#define VDO_DFP(ver, cap, conn, pnum) \
+ (((ver) & 0x7) << 29 | ((cap) & 0x7) << 24 | ((conn) & 0x3) << 22 \
+ | ((pnum) & 0x1f))
+
+/*
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9> :: SSTX2 Directionality support
+ * <8> :: SSRX1 Directionality support
+ * <7> :: SSRX2 Directionality support
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Type-C to Type-C/Captive (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == Vconn not req, 01b == Vconn req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8:7> :: Reserved, Shall be set to zero
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4:3> :: Reserved, Shall be set to zero
+ * <2:0> :: USB highest speed
+ *
+ * Active Cable VDO 1 (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Connector type (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == one end active, 11b == both ends active VCONN req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8> :: SBU supported (0b == supported, 1b == not supported)
+ * <7> :: SBU type (0b == passive, 1b == active)
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB highest speed
+ */
+/* Cable VDO Version */
+#define CABLE_VDO_VER1_0 0
+#define CABLE_VDO_VER1_3 3
+
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
+#define CABLE_ATYPE 0
+#define CABLE_BTYPE 1
+#define CABLE_CTYPE 2
+#define CABLE_CAPTIVE 3
+
+/* Cable Latency */
+#define CABLE_LATENCY_1M 1
+#define CABLE_LATENCY_2M 2
+#define CABLE_LATENCY_3M 3
+#define CABLE_LATENCY_4M 4
+#define CABLE_LATENCY_5M 5
+#define CABLE_LATENCY_6M 6
+#define CABLE_LATENCY_7M 7
+#define CABLE_LATENCY_7M_PLUS 8
+
+/* Cable Termination Type */
+#define PCABLE_VCONN_NOT_REQ 0
+#define PCABLE_VCONN_REQ 1
+#define ACABLE_ONE_END 2
+#define ACABLE_BOTH_END 3
+
+/* Maximum Vbus Voltage */
+#define CABLE_MAX_VBUS_20V 0
+#define CABLE_MAX_VBUS_30V 1
+#define CABLE_MAX_VBUS_40V 2
+#define CABLE_MAX_VBUS_50V 3
+
+/* Active Cable SBU Supported/Type */
+#define ACABLE_SBU_SUPP 0
+#define ACABLE_SBU_NOT_SUPP 1
+#define ACABLE_SBU_PASSIVE 0
+#define ACABLE_SBU_ACTIVE 1
+
+/* Vbus Current Handling Capability */
+#define CABLE_CURR_DEF 0
+#define CABLE_CURR_3A 1
+#define CABLE_CURR_5A 2
+
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
+#define CABLE_USBSS_U2_ONLY 0
+#define CABLE_USBSS_U31_GEN1 1
+#define CABLE_USBSS_U31_GEN2 2
+
+/* USB Highest Speed */
+#define CABLE_USB2_ONLY 0
+#define CABLE_USB32_GEN1 1
+#define CABLE_USB32_4_GEN2 2
+#define CABLE_USB4_GEN3 3
+
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \
+ | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
+ | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
+#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7))
+#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
+ | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7))
+
+/*
+ * Active Cable VDO 2
+ * ---------
+ * <31:24> :: Maximum operating temperature
+ * <23:16> :: Shutdown temperature
+ * <15> :: Reserved, Shall be set to zero
+ * <14:12> :: U3/CLd power
+ * <11> :: U3 to U0 transition mode (0b == direct, 1b == through U3S)
+ * <10> :: Physical connection (0b == copper, 1b == optical)
+ * <9> :: Active element (0b == redriver, 1b == retimer)
+ * <8> :: USB4 supported (0b == yes, 1b == no)
+ * <7:6> :: USB2 hub hops consumed
+ * <5> :: USB2 supported (0b == yes, 1b == no)
+ * <4> :: USB3.2 supported (0b == yes, 1b == no)
+ * <3> :: USB lanes supported (0b == one lane, 1b == two lanes)
+ * <2> :: Optically isolated active cable (0b == no, 1b == yes)
+ * <1> :: Reserved, Shall be set to zero
+ * <0> :: USB gen (0b == gen1, 1b == gen2+)
+ */
+/* U3/CLd Power*/
+#define ACAB2_U3_CLD_10MW_PLUS 0
+#define ACAB2_U3_CLD_10MW 1
+#define ACAB2_U3_CLD_5MW 2
+#define ACAB2_U3_CLD_1MW 3
+#define ACAB2_U3_CLD_500UW 4
+#define ACAB2_U3_CLD_200UW 5
+#define ACAB2_U3_CLD_50UW 6
+
+/* Other Active Cable VDO 2 Fields */
+#define ACAB2_U3U0_DIRECT 0
+#define ACAB2_U3U0_U3S 1
+#define ACAB2_PHY_COPPER 0
+#define ACAB2_PHY_OPTICAL 1
+#define ACAB2_REDRIVER 0
+#define ACAB2_RETIMER 1
+#define ACAB2_USB4_SUPP 0
+#define ACAB2_USB4_NOT_SUPP 1
+#define ACAB2_USB2_SUPP 0
+#define ACAB2_USB2_NOT_SUPP 1
+#define ACAB2_USB32_SUPP 0
+#define ACAB2_USB32_NOT_SUPP 1
+#define ACAB2_LANES_ONE 0
+#define ACAB2_LANES_TWO 1
+#define ACAB2_OPT_ISO_NO 0
+#define ACAB2_OPT_ISO_YES 1
+#define ACAB2_GEN_1 0
+#define ACAB2_GEN_2_PLUS 1
+
+#define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen) \
+ (((mtemp) & 0xff) << 24 | ((stemp) & 0xff) << 16 | ((u3p) & 0x7) << 12 \
+ | (trans) << 11 | (phy) << 10 | (ele) << 9 | (u4) << 8 \
+ | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3 \
+ | (iso) << 2 | (gen))
+
+/*
+ * AMA VDO (PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10> :: SSTX2 Directionality support
+ * <9> :: SSRX1 Directionality support
+ * <8> :: SSRX2 Directionality support
+ * <7:5> :: Vconn power
+ * <4> :: Vconn power required
+ * <3> :: Vbus power required
+ * <2:0> :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \
+ | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \
+ | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \
+ | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
+
+#define AMA_USBSS_U2_ONLY 0
+#define AMA_USBSS_U31_GEN1 1
+#define AMA_USBSS_U31_GEN2 2
+#define AMA_USBSS_BBONLY 3
+
+/*
+ * VPD VDO
+ * ---------
+ * <31:28> :: HW version
+ * <27:24> :: FW version
+ * <23:21> :: VDO version
+ * <20:17> :: Reserved, Shall be set to zero
+ * <16:15> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <14> :: Charge through current support (0b == 3A, 1b == 5A)
+ * <13> :: Reserved, Shall be set to zero
+ * <12:7> :: Vbus impedance
+ * <6:1> :: Ground impedance
+ * <0> :: Charge through support (0b == no, 1b == yes)
+ */
+#define VPD_VDO_VER1_0 0
+#define VPD_MAX_VBUS_20V 0
+#define VPD_MAX_VBUS_30V 1
+#define VPD_MAX_VBUS_40V 2
+#define VPD_MAX_VBUS_50V 3
+#define VPDCT_CURR_3A 0
+#define VPDCT_CURR_5A 1
+#define VPDCT_NOT_SUPP 0
+#define VPDCT_SUPP 1
+
+#define VDO_VPD(hw, fw, ver, vbm, curr, vbi, gi, ct) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((vbm) & 0x3) << 15 | (curr) << 14 | ((vbi) & 0x3f) << 7 \
+ | ((gi) & 0x3f) << 1 | (ct))
+
+#endif /* __DT_POWER_DELIVERY_H */
diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h
new file mode 100644
index 000000000000..7ae6d84b2bd9
--- /dev/null
+++ b/include/dt-bindings/watchdog/aspeed-wdt.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef DT_BINDINGS_ASPEED_WDT_H
+#define DT_BINDINGS_ASPEED_WDT_H
+
+#define AST2500_WDT_RESET_CPU (1 << 0)
+#define AST2500_WDT_RESET_COPROC (1 << 1)
+#define AST2500_WDT_RESET_SDRAM (1 << 2)
+#define AST2500_WDT_RESET_AHB (1 << 3)
+#define AST2500_WDT_RESET_I2C (1 << 4)
+#define AST2500_WDT_RESET_MAC0 (1 << 5)
+#define AST2500_WDT_RESET_MAC1 (1 << 6)
+#define AST2500_WDT_RESET_GRAPHICS (1 << 7)
+#define AST2500_WDT_RESET_USB2_HOST_HUB (1 << 8)
+#define AST2500_WDT_RESET_USB_HOST (1 << 9)
+#define AST2500_WDT_RESET_HID_EHCI (1 << 10)
+#define AST2500_WDT_RESET_VIDEO (1 << 11)
+#define AST2500_WDT_RESET_HAC (1 << 12)
+#define AST2500_WDT_RESET_LPC (1 << 13)
+#define AST2500_WDT_RESET_SDIO (1 << 14)
+#define AST2500_WDT_RESET_MIC (1 << 15)
+#define AST2500_WDT_RESET_CRT (1 << 16)
+#define AST2500_WDT_RESET_PWM (1 << 17)
+#define AST2500_WDT_RESET_PECI (1 << 18)
+#define AST2500_WDT_RESET_JTAG (1 << 19)
+#define AST2500_WDT_RESET_ADC (1 << 20)
+#define AST2500_WDT_RESET_GPIO (1 << 21)
+#define AST2500_WDT_RESET_MCTP (1 << 22)
+#define AST2500_WDT_RESET_XDMA (1 << 23)
+#define AST2500_WDT_RESET_SPI (1 << 24)
+#define AST2500_WDT_RESET_SOC_MISC (1 << 25)
+
+#define AST2500_WDT_RESET_DEFAULT 0x023ffff3
+
+#define AST2600_WDT_RESET1_CPU (1 << 0)
+#define AST2600_WDT_RESET1_SDRAM (1 << 1)
+#define AST2600_WDT_RESET1_AHB (1 << 2)
+#define AST2600_WDT_RESET1_SLI (1 << 3)
+#define AST2600_WDT_RESET1_SOC_MISC0 (1 << 4)
+#define AST2600_WDT_RESET1_COPROC (1 << 5)
+#define AST2600_WDT_RESET1_USB_A (1 << 6)
+#define AST2600_WDT_RESET1_USB_B (1 << 7)
+#define AST2600_WDT_RESET1_UHCI (1 << 8)
+#define AST2600_WDT_RESET1_GRAPHICS (1 << 9)
+#define AST2600_WDT_RESET1_CRT (1 << 10)
+#define AST2600_WDT_RESET1_VIDEO (1 << 11)
+#define AST2600_WDT_RESET1_HAC (1 << 12)
+#define AST2600_WDT_RESET1_DP (1 << 13)
+#define AST2600_WDT_RESET1_DP_MCU (1 << 14)
+#define AST2600_WDT_RESET1_GP_MCU (1 << 15)
+#define AST2600_WDT_RESET1_MAC0 (1 << 16)
+#define AST2600_WDT_RESET1_MAC1 (1 << 17)
+#define AST2600_WDT_RESET1_SDIO0 (1 << 18)
+#define AST2600_WDT_RESET1_JTAG0 (1 << 19)
+#define AST2600_WDT_RESET1_MCTP0 (1 << 20)
+#define AST2600_WDT_RESET1_MCTP1 (1 << 21)
+#define AST2600_WDT_RESET1_XDMA0 (1 << 22)
+#define AST2600_WDT_RESET1_XDMA1 (1 << 23)
+#define AST2600_WDT_RESET1_GPIO0 (1 << 24)
+#define AST2600_WDT_RESET1_RVAS (1 << 25)
+
+#define AST2600_WDT_RESET1_DEFAULT 0x030f1ff1
+
+#define AST2600_WDT_RESET2_CPU (1 << 0)
+#define AST2600_WDT_RESET2_SPI (1 << 1)
+#define AST2600_WDT_RESET2_AHB2 (1 << 2)
+#define AST2600_WDT_RESET2_SLI2 (1 << 3)
+#define AST2600_WDT_RESET2_SOC_MISC1 (1 << 4)
+#define AST2600_WDT_RESET2_MAC2 (1 << 5)
+#define AST2600_WDT_RESET2_MAC3 (1 << 6)
+#define AST2600_WDT_RESET2_SDIO1 (1 << 7)
+#define AST2600_WDT_RESET2_JTAG1 (1 << 8)
+#define AST2600_WDT_RESET2_GPIO1 (1 << 9)
+#define AST2600_WDT_RESET2_MDIO (1 << 10)
+#define AST2600_WDT_RESET2_LPC (1 << 11)
+#define AST2600_WDT_RESET2_PECI (1 << 12)
+#define AST2600_WDT_RESET2_PWM (1 << 13)
+#define AST2600_WDT_RESET2_ADC (1 << 14)
+#define AST2600_WDT_RESET2_FSI (1 << 15)
+#define AST2600_WDT_RESET2_I2C (1 << 16)
+#define AST2600_WDT_RESET2_I3C_GLOBAL (1 << 17)
+#define AST2600_WDT_RESET2_I3C0 (1 << 18)
+#define AST2600_WDT_RESET2_I3C1 (1 << 19)
+#define AST2600_WDT_RESET2_I3C2 (1 << 20)
+#define AST2600_WDT_RESET2_I3C3 (1 << 21)
+#define AST2600_WDT_RESET2_I3C4 (1 << 22)
+#define AST2600_WDT_RESET2_I3C5 (1 << 23)
+#define AST2600_WDT_RESET2_ESPI (1 << 26)
+
+#define AST2600_WDT_RESET2_DEFAULT 0x03fffff1
+
+#endif
diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h
index c47dc5405f79..516a3f51179e 100644
--- a/include/keys/asymmetric-parser.h
+++ b/include/keys/asymmetric-parser.h
@@ -10,6 +10,8 @@
#ifndef _KEYS_ASYMMETRIC_PARSER_H
#define _KEYS_ASYMMETRIC_PARSER_H
+struct key_preparsed_payload;
+
/*
* Key data parser. Called during key instantiation.
*/
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index a29d3ff2e7e8..69a13e1e5b2e 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -53,7 +53,7 @@ struct asymmetric_key_id {
};
struct asymmetric_key_ids {
- void *id[2];
+ void *id[3];
};
extern bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
@@ -72,11 +72,21 @@ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
return key->payload.data[asym_key_ids];
}
+static inline
+const struct public_key *asymmetric_key_public_key(const struct key *key)
+{
+ return key->payload.data[asym_crypto];
+}
+
extern struct key *find_asymmetric_key(struct key *keyring,
const struct asymmetric_key_id *id_0,
const struct asymmetric_key_id *id_1,
+ const struct asymmetric_key_id *id_2,
bool partial);
+int x509_load_certificate_list(const u8 cert_list[], const unsigned long list_size,
+ const struct key *keyring);
+
/*
* The payload is at the discretion of the subtype.
*/
diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h
index 38afb341c3f2..abfcbe02001a 100644
--- a/include/keys/encrypted-type.h
+++ b/include/keys/encrypted-type.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2010 IBM Corporation
* Copyright (C) 2010 Politecnico di Torino, Italy
- * TORSEC group -- http://security.polito.it
+ * TORSEC group -- https://security.polito.it
*
* Authors:
* Mimi Zohar <zohar@us.ibm.com>
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index 2b0b15a71228..333c0f49a9cd 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -32,62 +32,14 @@ struct rxkad_key {
};
/*
- * Kerberos 5 principal
- * name/name/name@realm
- */
-struct krb5_principal {
- u8 n_name_parts; /* N of parts of the name part of the principal */
- char **name_parts; /* parts of the name part of the principal */
- char *realm; /* parts of the realm part of the principal */
-};
-
-/*
- * Kerberos 5 tagged data
- */
-struct krb5_tagged_data {
- /* for tag value, see /usr/include/krb5/krb5.h
- * - KRB5_AUTHDATA_* for auth data
- * -
- */
- s32 tag;
- u32 data_len;
- u8 *data;
-};
-
-/*
- * RxRPC key for Kerberos V (type-5 security)
- */
-struct rxk5_key {
- u64 authtime; /* time at which auth token generated */
- u64 starttime; /* time at which auth token starts */
- u64 endtime; /* time at which auth token expired */
- u64 renew_till; /* time to which auth token can be renewed */
- s32 is_skey; /* T if ticket is encrypted in another ticket's
- * skey */
- s32 flags; /* mask of TKT_FLG_* bits (krb5/krb5.h) */
- struct krb5_principal client; /* client principal name */
- struct krb5_principal server; /* server principal name */
- u16 ticket_len; /* length of ticket */
- u16 ticket2_len; /* length of second ticket */
- u8 n_authdata; /* number of authorisation data elements */
- u8 n_addresses; /* number of addresses */
- struct krb5_tagged_data session; /* session data; tag is enctype */
- struct krb5_tagged_data *addresses; /* addresses */
- u8 *ticket; /* krb5 ticket */
- u8 *ticket2; /* second krb5 ticket, if related to ticket (via
- * DUPLICATE-SKEY or ENC-TKT-IN-SKEY) */
- struct krb5_tagged_data *authdata; /* authorisation data */
-};
-
-/*
* list of tokens attached to an rxrpc key
*/
struct rxrpc_key_token {
u16 security_index; /* RxRPC header security index */
+ bool no_leak_key; /* Don't copy the key to userspace */
struct rxrpc_key_token *next; /* the next token in the list */
union {
struct rxkad_key *kad;
- struct rxk5_key *k5;
};
};
@@ -116,12 +68,6 @@ struct rxrpc_key_data_v1 {
#define AFSTOKEN_RK_TIX_MAX 12000 /* max RxKAD ticket size */
#define AFSTOKEN_GK_KEY_MAX 64 /* max GSSAPI key size */
#define AFSTOKEN_GK_TOKEN_MAX 16384 /* max GSSAPI token size */
-#define AFSTOKEN_K5_COMPONENTS_MAX 16 /* max K5 components */
-#define AFSTOKEN_K5_NAME_MAX 128 /* max K5 name length */
-#define AFSTOKEN_K5_REALM_MAX 64 /* max K5 realm name length */
-#define AFSTOKEN_K5_TIX_MAX 16384 /* max K5 ticket size */
-#define AFSTOKEN_K5_ADDRESSES_MAX 16 /* max K5 addresses */
-#define AFSTOKEN_K5_AUTHDATA_MAX 16 /* max K5 pieces of auth data */
/*
* Truncate a time64_t to the range from 1970 to 2106 as in the network
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index fb8b07daa9d1..8365adf842ef 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -10,15 +10,34 @@
#include <linux/key.h>
+enum blacklist_hash_type {
+ /* TBSCertificate hash */
+ BLACKLIST_HASH_X509_TBS = 1,
+ /* Raw data hash */
+ BLACKLIST_HASH_BINARY = 2,
+};
+
#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
extern int restrict_link_by_builtin_trusted(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
+int restrict_link_by_digsig_builtin(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
+extern __init int load_module_cert(struct key *keyring);
#else
#define restrict_link_by_builtin_trusted restrict_link_reject
+#define restrict_link_by_digsig_builtin restrict_link_reject
+
+static inline __init int load_module_cert(struct key *keyring)
+{
+ return 0;
+}
+
#endif
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
@@ -27,18 +46,43 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
+int restrict_link_by_digsig_builtin_and_secondary(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
+void __init add_to_secondary_keyring(const char *source, const void *data, size_t len);
#else
#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
+#define restrict_link_by_digsig_builtin_and_secondary restrict_link_by_digsig_builtin
+static inline void __init add_to_secondary_keyring(const char *source, const void *data, size_t len)
+{
+}
#endif
+#ifdef CONFIG_INTEGRITY_MACHINE_KEYRING
+extern int restrict_link_by_builtin_secondary_and_machine(
+ struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restrict_key);
+extern void __init set_machine_trusted_keys(struct key *keyring);
+#else
+#define restrict_link_by_builtin_secondary_and_machine restrict_link_by_builtin_trusted
+static inline void __init set_machine_trusted_keys(struct key *keyring)
+{
+}
+#endif
+
+extern struct pkcs7_message *pkcs7;
#ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
-extern int mark_hash_blacklisted(const char *hash);
+extern int mark_hash_blacklisted(const u8 *hash, size_t hash_len,
+ enum blacklist_hash_type hash_type);
extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
- const char *type);
+ enum blacklist_hash_type hash_type);
extern int is_binary_blacklisted(const u8 *hash, size_t hash_len);
#else
static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len,
- const char *type)
+ enum blacklist_hash_type hash_type)
{
return 0;
}
@@ -49,6 +93,20 @@ static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
}
#endif
+#ifdef CONFIG_SYSTEM_REVOCATION_LIST
+extern int add_key_to_revocation_list(const char *data, size_t size);
+extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7);
+#else
+static inline int add_key_to_revocation_list(const char *data, size_t size)
+{
+ return 0;
+}
+static inline int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
+{
+ return -ENOKEY;
+}
+#endif
+
#ifdef CONFIG_IMA_BLACKLIST_KEYRING
extern struct key *ima_blacklist_keyring;
diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
index a94c03a61d8f..4eb64548a74f 100644
--- a/include/keys/trusted-type.h
+++ b/include/keys/trusted-type.h
@@ -11,6 +11,12 @@
#include <linux/rcupdate.h>
#include <linux/tpm.h>
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "trusted_key: " fmt
+
#define MIN_KEY_SIZE 32
#define MAX_KEY_SIZE 128
#define MAX_BLOB_SIZE 512
@@ -22,6 +28,7 @@ struct trusted_key_payload {
unsigned int key_len;
unsigned int blob_len;
unsigned char migratable;
+ unsigned char old_format;
unsigned char key[MAX_KEY_SIZE + 1];
unsigned char blob[MAX_BLOB_SIZE];
};
@@ -30,6 +37,7 @@ struct trusted_key_options {
uint16_t keytype;
uint32_t keyhandle;
unsigned char keyauth[TPM_DIGEST_SIZE];
+ uint32_t blobauth_len;
unsigned char blobauth[TPM_DIGEST_SIZE];
uint32_t pcrinfo_len;
unsigned char pcrinfo[MAX_PCRINFO_SIZE];
@@ -40,6 +48,53 @@ struct trusted_key_options {
uint32_t policyhandle;
};
+struct trusted_key_ops {
+ /*
+ * flag to indicate if trusted key implementation supports migration
+ * or not.
+ */
+ unsigned char migratable;
+
+ /* Initialize key interface. */
+ int (*init)(void);
+
+ /* Seal a key. */
+ int (*seal)(struct trusted_key_payload *p, char *datablob);
+
+ /* Unseal a key. */
+ int (*unseal)(struct trusted_key_payload *p, char *datablob);
+
+ /* Optional: Get a randomized key. */
+ int (*get_random)(unsigned char *key, size_t key_len);
+
+ /* Exit key interface. */
+ void (*exit)(void);
+};
+
+struct trusted_key_source {
+ char *name;
+ struct trusted_key_ops *ops;
+};
+
extern struct key_type key_type_trusted;
+#define TRUSTED_DEBUG 0
+
+#if TRUSTED_DEBUG
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+ pr_info("key_len %d\n", p->key_len);
+ print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
+ 16, 1, p->key, p->key_len, 0);
+ pr_info("bloblen %d\n", p->blob_len);
+ print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
+ 16, 1, p->blob, p->blob_len, 0);
+ pr_info("migratable %d\n", p->migratable);
+}
+#else
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+}
+#endif
+
#endif /* _KEYS_TRUSTED_TYPE_H */
diff --git a/include/keys/trusted_caam.h b/include/keys/trusted_caam.h
new file mode 100644
index 000000000000..73fe2f32f65e
--- /dev/null
+++ b/include/keys/trusted_caam.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ */
+
+#ifndef __CAAM_TRUSTED_KEY_H
+#define __CAAM_TRUSTED_KEY_H
+
+extern struct trusted_key_ops trusted_key_caam_ops;
+
+#endif
diff --git a/include/keys/trusted_tee.h b/include/keys/trusted_tee.h
new file mode 100644
index 000000000000..151be25a979e
--- /dev/null
+++ b/include/keys/trusted_tee.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ *
+ * Author:
+ * Sumit Garg <sumit.garg@linaro.org>
+ */
+
+#ifndef __TEE_TRUSTED_KEY_H
+#define __TEE_TRUSTED_KEY_H
+
+#include <keys/trusted-type.h>
+
+extern struct trusted_key_ops trusted_key_tee_ops;
+
+#endif
diff --git a/include/keys/trusted_tpm.h b/include/keys/trusted_tpm.h
index a56d8e1298f2..7769b726863a 100644
--- a/include/keys/trusted_tpm.h
+++ b/include/keys/trusted_tpm.h
@@ -16,6 +16,8 @@
#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset])
#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
+extern struct trusted_key_ops trusted_key_tpm_ops;
+
struct osapsess {
uint32_t handle;
unsigned char secret[SHA1_DIGEST_SIZE];
@@ -52,30 +54,19 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
#if TPM_DEBUG
static inline void dump_options(struct trusted_key_options *o)
{
- pr_info("trusted_key: sealing key type %d\n", o->keytype);
- pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle);
- pr_info("trusted_key: pcrlock %d\n", o->pcrlock);
- pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len);
+ pr_info("sealing key type %d\n", o->keytype);
+ pr_info("sealing key handle %0X\n", o->keyhandle);
+ pr_info("pcrlock %d\n", o->pcrlock);
+ pr_info("pcrinfo %d\n", o->pcrinfo_len);
print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
16, 1, o->pcrinfo, o->pcrinfo_len, 0);
}
-static inline void dump_payload(struct trusted_key_payload *p)
-{
- pr_info("trusted_key: key_len %d\n", p->key_len);
- print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
- 16, 1, p->key, p->key_len, 0);
- pr_info("trusted_key: bloblen %d\n", p->blob_len);
- print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
- 16, 1, p->blob, p->blob_len, 0);
- pr_info("trusted_key: migratable %d\n", p->migratable);
-}
-
static inline void dump_sess(struct osapsess *s)
{
print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
16, 1, &s->handle, 4, 0);
- pr_info("trusted-key: secret:\n");
+ pr_info("secret:\n");
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
pr_info("trusted-key: enonce:\n");
@@ -87,7 +78,7 @@ static inline void dump_tpm_buf(unsigned char *buf)
{
int len;
- pr_info("\ntrusted-key: tpm buffer\n");
+ pr_info("\ntpm buffer\n");
len = LOAD32(buf, TPM_SIZE_OFFSET);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
}
@@ -96,10 +87,6 @@ static inline void dump_options(struct trusted_key_options *o)
{
}
-static inline void dump_payload(struct trusted_key_payload *p)
-{
-}
-
static inline void dump_sess(struct osapsess *s)
{
}
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index ad889b539ab3..24c2b9fa61e8 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -10,7 +10,7 @@
#define _KUNIT_ASSERT_H
#include <linux/err.h>
-#include <linux/kernel.h>
+#include <linux/printk.h>
struct kunit;
struct string_stream;
@@ -29,57 +29,32 @@ enum kunit_assert_type {
};
/**
- * struct kunit_assert - Data for printing a failed assertion or expectation.
- * @test: the test case this expectation/assertion is associated with.
- * @type: the type (either an expectation or an assertion) of this kunit_assert.
- * @line: the source code line number that the expectation/assertion is at.
- * @file: the file path of the source file that the expectation/assertion is in.
- * @message: an optional message to provide additional context.
- * @format: a function which formats the data in this kunit_assert to a string.
- *
- * Represents a failed expectation/assertion. Contains all the data necessary to
- * format a string to a user reporting the failure.
+ * struct kunit_loc - Identifies the source location of a line of code.
+ * @line: the line number in the file.
+ * @file: the file name.
*/
-struct kunit_assert {
- struct kunit *test;
- enum kunit_assert_type type;
+struct kunit_loc {
int line;
const char *file;
- struct va_format message;
- void (*format)(const struct kunit_assert *assert,
- struct string_stream *stream);
};
-/**
- * KUNIT_INIT_VA_FMT_NULL - Default initializer for struct va_format.
- *
- * Used inside a struct initialization block to initialize struct va_format to
- * default values where fmt and va are null.
- */
-#define KUNIT_INIT_VA_FMT_NULL { .fmt = NULL, .va = NULL }
+#define KUNIT_CURRENT_LOC { .file = __FILE__, .line = __LINE__ }
/**
- * KUNIT_INIT_ASSERT_STRUCT() - Initializer for a &struct kunit_assert.
- * @kunit: The test case that this expectation/assertion is associated with.
- * @assert_type: The type (assertion or expectation) of this kunit_assert.
- * @fmt: The formatting function which builds a string out of this kunit_assert.
+ * struct kunit_assert - Data for printing a failed assertion or expectation.
*
- * The base initializer for a &struct kunit_assert.
+ * Represents a failed expectation/assertion. Contains all the data necessary to
+ * format a string to a user reporting the failure.
*/
-#define KUNIT_INIT_ASSERT_STRUCT(kunit, assert_type, fmt) { \
- .test = kunit, \
- .type = assert_type, \
- .file = __FILE__, \
- .line = __LINE__, \
- .message = KUNIT_INIT_VA_FMT_NULL, \
- .format = fmt \
-}
+struct kunit_assert {};
-void kunit_base_assert_format(const struct kunit_assert *assert,
- struct string_stream *stream);
+typedef void (*assert_format_t)(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream);
-void kunit_assert_print_msg(const struct kunit_assert *assert,
- struct string_stream *stream);
+void kunit_assert_prologue(const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ struct string_stream *stream);
/**
* struct kunit_fail_assert - Represents a plain fail expectation/assertion.
@@ -92,23 +67,10 @@ struct kunit_fail_assert {
};
void kunit_fail_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_FAIL_ASSERT_STRUCT() - Initializer for &struct kunit_fail_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- *
- * Initializes a &struct kunit_fail_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_FAIL_ASSERT_STRUCT(test, type) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_fail_assert_format) \
-}
-
-/**
* struct kunit_unary_assert - Represents a KUNIT_{EXPECT|ASSERT}_{TRUE|FALSE}
* @assert: The parent of this type.
* @condition: A string representation of a conditional expression.
@@ -125,27 +87,10 @@ struct kunit_unary_assert {
};
void kunit_unary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_UNARY_ASSERT_STRUCT() - Initializes &struct kunit_unary_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @cond: A string representation of the expression asserted true or false.
- * @expect_true: True if of type KUNIT_{EXPECT|ASSERT}_TRUE, false otherwise.
- *
- * Initializes a &struct kunit_unary_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_UNARY_ASSERT_STRUCT(test, type, cond, expect_true) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_unary_assert_format), \
- .condition = cond, \
- .expected_true = expect_true \
-}
-
-/**
* struct kunit_ptr_not_err_assert - An expectation/assertion that a pointer is
* not NULL and not a -errno.
* @assert: The parent of this type.
@@ -162,35 +107,28 @@ struct kunit_ptr_not_err_assert {
};
void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_PTR_NOT_ERR_ASSERT_STRUCT() - Initializes a
- * &struct kunit_ptr_not_err_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @txt: A string representation of the expression passed to the expectation.
- * @val: The actual evaluated pointer value of the expression.
- *
- * Initializes a &struct kunit_ptr_not_err_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ * struct kunit_binary_assert_text - holds strings for &struct
+ * kunit_binary_assert and friends to try and make the structs smaller.
+ * @operation: A string representation of the comparison operator (e.g. "==").
+ * @left_text: A string representation of the left expression (e.g. "2+2").
+ * @right_text: A string representation of the right expression (e.g. "2+2").
*/
-#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, type, txt, val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_ptr_not_err_assert_format), \
- .text = txt, \
- .value = val \
-}
+struct kunit_binary_assert_text {
+ const char *operation;
+ const char *left_text;
+ const char *right_text;
+};
/**
* struct kunit_binary_assert - An expectation/assertion that compares two
* non-pointer values (for example, KUNIT_EXPECT_EQ(test, 1 + 1, 2)).
* @assert: The parent of this type.
- * @operation: A string representation of the comparison operator (e.g. "==").
- * @left_text: A string representation of the expression in the left slot.
+ * @text: Holds the textual representations of the operands and op (e.g. "==").
* @left_value: The actual evaluated value of the expression in the left slot.
- * @right_text: A string representation of the expression in the right slot.
* @right_value: The actual evaluated value of the expression in the right slot.
*
* Represents an expectation/assertion that compares two non-pointer values. For
@@ -199,55 +137,21 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
*/
struct kunit_binary_assert {
struct kunit_assert assert;
- const char *operation;
- const char *left_text;
+ const struct kunit_binary_assert_text *text;
long long left_value;
- const char *right_text;
long long right_value;
};
void kunit_binary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a
- * &struct kunit_binary_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @op_str: A string representation of the comparison operator (e.g. "==").
- * @left_str: A string representation of the expression in the left slot.
- * @left_val: The actual evaluated value of the expression in the left slot.
- * @right_str: A string representation of the expression in the right slot.
- * @right_val: The actual evaluated value of the expression in the right slot.
- *
- * Initializes a &struct kunit_binary_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
- type, \
- op_str, \
- left_str, \
- left_val, \
- right_str, \
- right_val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_binary_assert_format), \
- .operation = op_str, \
- .left_text = left_str, \
- .left_value = left_val, \
- .right_text = right_str, \
- .right_value = right_val \
-}
-
-/**
* struct kunit_binary_ptr_assert - An expectation/assertion that compares two
* pointer values (for example, KUNIT_EXPECT_PTR_EQ(test, foo, bar)).
* @assert: The parent of this type.
- * @operation: A string representation of the comparison operator (e.g. "==").
- * @left_text: A string representation of the expression in the left slot.
+ * @text: Holds the textual representations of the operands and op (e.g. "==").
* @left_value: The actual evaluated value of the expression in the left slot.
- * @right_text: A string representation of the expression in the right slot.
* @right_value: The actual evaluated value of the expression in the right slot.
*
* Represents an expectation/assertion that compares two pointer values. For
@@ -256,55 +160,21 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
*/
struct kunit_binary_ptr_assert {
struct kunit_assert assert;
- const char *operation;
- const char *left_text;
+ const struct kunit_binary_assert_text *text;
const void *left_value;
- const char *right_text;
const void *right_value;
};
void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT() - Initializes a
- * &struct kunit_binary_ptr_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @op_str: A string representation of the comparison operator (e.g. "==").
- * @left_str: A string representation of the expression in the left slot.
- * @left_val: The actual evaluated value of the expression in the left slot.
- * @right_str: A string representation of the expression in the right slot.
- * @right_val: The actual evaluated value of the expression in the right slot.
- *
- * Initializes a &struct kunit_binary_ptr_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT(test, \
- type, \
- op_str, \
- left_str, \
- left_val, \
- right_str, \
- right_val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_binary_ptr_assert_format), \
- .operation = op_str, \
- .left_text = left_str, \
- .left_value = left_val, \
- .right_text = right_str, \
- .right_value = right_val \
-}
-
-/**
* struct kunit_binary_str_assert - An expectation/assertion that compares two
* string values (for example, KUNIT_EXPECT_STREQ(test, foo, "bar")).
* @assert: The parent of this type.
- * @operation: A string representation of the comparison operator (e.g. "==").
- * @left_text: A string representation of the expression in the left slot.
+ * @text: Holds the textual representations of the operands and comparator.
* @left_value: The actual evaluated value of the expression in the left slot.
- * @right_text: A string representation of the expression in the right slot.
* @right_value: The actual evaluated value of the expression in the right slot.
*
* Represents an expectation/assertion that compares two string values. For
@@ -313,45 +183,39 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
*/
struct kunit_binary_str_assert {
struct kunit_assert assert;
- const char *operation;
- const char *left_text;
+ const struct kunit_binary_assert_text *text;
const char *left_value;
- const char *right_text;
const char *right_value;
};
void kunit_binary_str_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_BINARY_STR_ASSERT_STRUCT() - Initializes a
- * &struct kunit_binary_str_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @op_str: A string representation of the comparison operator (e.g. "==").
- * @left_str: A string representation of the expression in the left slot.
- * @left_val: The actual evaluated value of the expression in the left slot.
- * @right_str: A string representation of the expression in the right slot.
- * @right_val: The actual evaluated value of the expression in the right slot.
+ * struct kunit_mem_assert - An expectation/assertion that compares two
+ * memory blocks.
+ * @assert: The parent of this type.
+ * @text: Holds the textual representations of the operands and comparator.
+ * @left_value: The actual evaluated value of the expression in the left slot.
+ * @right_value: The actual evaluated value of the expression in the right slot.
+ * @size: Size of the memory block analysed in bytes.
*
- * Initializes a &struct kunit_binary_str_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ * Represents an expectation/assertion that compares two memory blocks. For
+ * example, to expect that the first three bytes of foo is equal to the
+ * first three bytes of bar, you can use the expectation
+ * KUNIT_EXPECT_MEMEQ(test, foo, bar, 3);
*/
-#define KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \
- type, \
- op_str, \
- left_str, \
- left_val, \
- right_str, \
- right_val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_binary_str_assert_format), \
- .operation = op_str, \
- .left_text = left_str, \
- .left_value = left_val, \
- .right_text = right_str, \
- .right_value = right_val \
-}
+struct kunit_mem_assert {
+ struct kunit_assert assert;
+ const struct kunit_binary_assert_text *text;
+ const void *left_value;
+ const void *right_value;
+ const size_t size;
+};
+
+void kunit_mem_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream);
#endif /* _KUNIT_ASSERT_H */
diff --git a/include/kunit/attributes.h b/include/kunit/attributes.h
new file mode 100644
index 000000000000..bc76a0b786d2
--- /dev/null
+++ b/include/kunit/attributes.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit API to save and access test attributes
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#ifndef _KUNIT_ATTRIBUTES_H
+#define _KUNIT_ATTRIBUTES_H
+
+/*
+ * struct kunit_attr_filter - representation of attributes filter with the
+ * attribute object and string input
+ */
+struct kunit_attr_filter {
+ struct kunit_attr *attr;
+ char *input;
+};
+
+/*
+ * Returns the name of the filter's attribute.
+ */
+const char *kunit_attr_filter_name(struct kunit_attr_filter filter);
+
+/*
+ * Print all test attributes for a test case or suite.
+ * Output format for test cases: "# <test_name>.<attribute>: <value>"
+ * Output format for test suites: "# <attribute>: <value>"
+ */
+void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level);
+
+/*
+ * Returns the number of fitlers in input.
+ */
+int kunit_get_filter_count(char *input);
+
+/*
+ * Parse attributes filter input and return an objects containing the
+ * attribute object and the string input of the next filter.
+ */
+struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err);
+
+/*
+ * Returns a copy of the suite containing only tests that pass the filter.
+ */
+struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite,
+ struct kunit_attr_filter filter, char *action, int *err);
+
+#endif /* _KUNIT_ATTRIBUTES_H */
diff --git a/include/kunit/device.h b/include/kunit/device.h
new file mode 100644
index 000000000000..2450110ad64e
--- /dev/null
+++ b/include/kunit/device.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit basic device implementation
+ *
+ * Helpers for creating and managing fake devices for KUnit tests.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_DEVICE_H
+#define _KUNIT_DEVICE_H
+
+#if IS_ENABLED(CONFIG_KUNIT)
+
+#include <kunit/test.h>
+
+struct device;
+struct device_driver;
+
+/**
+ * kunit_driver_create() - Create a struct device_driver attached to the kunit_bus
+ * @test: The test context object.
+ * @name: The name to give the created driver.
+ *
+ * Creates a struct device_driver attached to the kunit_bus, with the name @name.
+ * This driver will automatically be cleaned up on test exit.
+ *
+ * Return: a stub struct device_driver, managed by KUnit, with the name @name.
+ */
+struct device_driver *kunit_driver_create(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given name,
+ * and a corresponding driver. The device and driver will be cleaned up on test
+ * exit, or when kunit_device_unregister is called. See also
+ * kunit_device_register_with_driver, if you wish to provide your own
+ * struct device_driver.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register_with_driver() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ * @drv: The struct device_driver to associate with the device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given
+ * name, and driver. The device will be cleaned up on test exit, or when
+ * kunit_device_unregister is called. See also kunit_device_register, if you
+ * wish KUnit to create and manage a driver for you.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register_with_driver(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv);
+
+/**
+ * kunit_device_unregister() - Unregister a KUnit-managed device
+ * @test: The test context object which created the device
+ * @dev: The device.
+ *
+ * Unregisters and destroys a struct device which was created with
+ * kunit_device_register or kunit_device_register_with_driver. If KUnit created
+ * a driver, cleans it up as well.
+ */
+void kunit_device_unregister(struct kunit *test, struct device *dev);
+
+#endif
+
+#endif
diff --git a/include/kunit/resource.h b/include/kunit/resource.h
new file mode 100644
index 000000000000..4ad69a2642a5
--- /dev/null
+++ b/include/kunit/resource.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit resource API for test managed resources (allocations, etc.).
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#ifndef _KUNIT_RESOURCE_H
+#define _KUNIT_RESOURCE_H
+
+#include <kunit/test.h>
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct kunit_resource;
+
+typedef int (*kunit_resource_init_t)(struct kunit_resource *, void *);
+typedef void (*kunit_resource_free_t)(struct kunit_resource *);
+
+/**
+ * struct kunit_resource - represents a *test managed resource*
+ * @data: for the user to store arbitrary data.
+ * @name: optional name
+ * @free: a user supplied function to free the resource.
+ *
+ * Represents a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. This cleanup is performed by the 'free'
+ * function. The struct kunit_resource itself is freed automatically with
+ * kfree() if it was allocated by KUnit (e.g., by kunit_alloc_resource()), but
+ * must be freed by the user otherwise.
+ *
+ * Resources are reference counted so if a resource is retrieved via
+ * kunit_alloc_and_get_resource() or kunit_find_resource(), we need
+ * to call kunit_put_resource() to reduce the resource reference count
+ * when finished with it. Note that kunit_alloc_resource() does not require a
+ * kunit_resource_put() because it does not retrieve the resource itself.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * struct kunit_kmalloc_params {
+ * size_t size;
+ * gfp_t gfp;
+ * };
+ *
+ * static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
+ * {
+ * struct kunit_kmalloc_params *params = context;
+ * res->data = kmalloc(params->size, params->gfp);
+ *
+ * if (!res->data)
+ * return -ENOMEM;
+ *
+ * return 0;
+ * }
+ *
+ * static void kunit_kmalloc_free(struct kunit_resource *res)
+ * {
+ * kfree(res->data);
+ * }
+ *
+ * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+ * {
+ * struct kunit_kmalloc_params params;
+ *
+ * params.size = size;
+ * params.gfp = gfp;
+ *
+ * return kunit_alloc_resource(test, kunit_kmalloc_init,
+ * kunit_kmalloc_free, gfp, &params);
+ * }
+ *
+ * Resources can also be named, with lookup/removal done on a name
+ * basis also. kunit_add_named_resource(), kunit_find_named_resource()
+ * and kunit_destroy_named_resource(). Resource names must be
+ * unique within the test instance.
+ */
+struct kunit_resource {
+ void *data;
+ const char *name;
+ kunit_resource_free_t free;
+
+ /* private: internal use only. */
+ struct kref refcount;
+ struct list_head node;
+ bool should_kfree;
+};
+
+/**
+ * kunit_get_resource() - Hold resource for use. Should not need to be used
+ * by most users as we automatically get resources
+ * retrieved by kunit_find_resource*().
+ * @res: resource
+ */
+static inline void kunit_get_resource(struct kunit_resource *res)
+{
+ kref_get(&res->refcount);
+}
+
+/*
+ * Called when refcount reaches zero via kunit_put_resource();
+ * should not be called directly.
+ */
+static inline void kunit_release_resource(struct kref *kref)
+{
+ struct kunit_resource *res = container_of(kref, struct kunit_resource,
+ refcount);
+
+ if (res->free)
+ res->free(res);
+
+ /* 'res' is valid here, as if should_kfree is set, res->free may not free
+ * 'res' itself, just res->data
+ */
+ if (res->should_kfree)
+ kfree(res);
+}
+
+/**
+ * kunit_put_resource() - When caller is done with retrieved resource,
+ * kunit_put_resource() should be called to drop
+ * reference count. The resource list maintains
+ * a reference count on resources, so if no users
+ * are utilizing a resource and it is removed from
+ * the resource list, it will be freed via the
+ * associated free function (if any). Only
+ * needs to be used if we alloc_and_get() or
+ * find() resource.
+ * @res: resource
+ */
+static inline void kunit_put_resource(struct kunit_resource *res)
+{
+ kref_put(&res->refcount, kunit_release_resource);
+}
+
+/**
+ * __kunit_add_resource() - Internal helper to add a resource.
+ *
+ * res->should_kfree is not initialised.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the result (if needed). If
+ * none is supplied, the resource data value is simply set to @data.
+ * If an init function is supplied, @data is passed to it instead.
+ * @free: a user-supplied function to free the resource (if needed).
+ * @res: The resource.
+ * @data: value to pass to init function or set in resource data field.
+ */
+int __kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data);
+
+/**
+ * kunit_add_resource() - Add a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the result (if needed). If
+ * none is supplied, the resource data value is simply set to @data.
+ * If an init function is supplied, @data is passed to it instead.
+ * @free: a user-supplied function to free the resource (if needed).
+ * @res: The resource.
+ * @data: value to pass to init function or set in resource data field.
+ */
+static inline int kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data)
+{
+ res->should_kfree = false;
+ return __kunit_add_resource(test, init, free, res, data);
+}
+
+static inline struct kunit_resource *
+kunit_find_named_resource(struct kunit *test, const char *name);
+
+/**
+ * kunit_add_named_resource() - Add a named *test managed resource*.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the resource data, if needed.
+ * @free: a user-supplied function to free the resource data, if needed.
+ * @res: The resource.
+ * @name: name to be set for resource.
+ * @data: value to pass to init function or set in resource data field.
+ */
+static inline int kunit_add_named_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ const char *name,
+ void *data)
+{
+ struct kunit_resource *existing;
+
+ if (!name)
+ return -EINVAL;
+
+ existing = kunit_find_named_resource(test, name);
+ if (existing) {
+ kunit_put_resource(existing);
+ return -EEXIST;
+ }
+
+ res->name = name;
+ res->should_kfree = false;
+
+ return __kunit_add_resource(test, init, free, res, data);
+}
+
+/**
+ * kunit_alloc_and_get_resource() - Allocates and returns a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user supplied function to initialize the resource.
+ * @free: a user supplied function to free the resource (if needed).
+ * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
+ * @context: for the user to pass in arbitrary data to the init function.
+ *
+ * Allocates a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. See &struct kunit_resource for an
+ * example.
+ *
+ * This is effectively identical to kunit_alloc_resource, but returns the
+ * struct kunit_resource pointer, not just the 'data' pointer. It therefore
+ * also increments the resource's refcount, so kunit_put_resource() should be
+ * called when you've finished with it.
+ *
+ * Note: KUnit needs to allocate memory for a kunit_resource object. You must
+ * specify an @internal_gfp that is compatible with the use context of your
+ * resource.
+ */
+static inline struct kunit_resource *
+kunit_alloc_and_get_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context)
+{
+ struct kunit_resource *res;
+ int ret;
+
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
+
+ res->should_kfree = true;
+
+ ret = __kunit_add_resource(test, init, free, res, context);
+ if (!ret) {
+ /*
+ * bump refcount for get; kunit_resource_put() should be called
+ * when done.
+ */
+ kunit_get_resource(res);
+ return res;
+ }
+ return NULL;
+}
+
+/**
+ * kunit_alloc_resource() - Allocates a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user supplied function to initialize the resource.
+ * @free: a user supplied function to free the resource (if needed).
+ * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
+ * @context: for the user to pass in arbitrary data to the init function.
+ *
+ * Allocates a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. See &struct kunit_resource for an
+ * example.
+ *
+ * Note: KUnit needs to allocate memory for a kunit_resource object. You must
+ * specify an @internal_gfp that is compatible with the use context of your
+ * resource.
+ */
+static inline void *kunit_alloc_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context)
+{
+ struct kunit_resource *res;
+
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
+
+ res->should_kfree = true;
+ if (!__kunit_add_resource(test, init, free, res, context))
+ return res->data;
+
+ return NULL;
+}
+
+typedef bool (*kunit_resource_match_t)(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_data);
+
+/**
+ * kunit_resource_name_match() - Match a resource with the same name.
+ * @test: Test case to which the resource belongs.
+ * @res: The resource.
+ * @match_name: The name to match against.
+ */
+static inline bool kunit_resource_name_match(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_name)
+{
+ return res->name && strcmp(res->name, match_name) == 0;
+}
+
+/**
+ * kunit_find_resource() - Find a resource using match function/data.
+ * @test: Test case to which the resource belongs.
+ * @match: match function to be applied to resources/match data.
+ * @match_data: data to be used in matching.
+ */
+static inline struct kunit_resource *
+kunit_find_resource(struct kunit *test,
+ kunit_resource_match_t match,
+ void *match_data)
+{
+ struct kunit_resource *res, *found = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&test->lock, flags);
+
+ list_for_each_entry_reverse(res, &test->resources, node) {
+ if (match(test, res, (void *)match_data)) {
+ found = res;
+ kunit_get_resource(found);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ return found;
+}
+
+/**
+ * kunit_find_named_resource() - Find a resource using match name.
+ * @test: Test case to which the resource belongs.
+ * @name: match name.
+ */
+static inline struct kunit_resource *
+kunit_find_named_resource(struct kunit *test,
+ const char *name)
+{
+ return kunit_find_resource(test, kunit_resource_name_match,
+ (void *)name);
+}
+
+/**
+ * kunit_destroy_resource() - Find a kunit_resource and destroy it.
+ * @test: Test case to which the resource belongs.
+ * @match: Match function. Returns whether a given resource matches @match_data.
+ * @match_data: Data passed into @match.
+ *
+ * RETURNS:
+ * 0 if kunit_resource is found and freed, -ENOENT if not found.
+ */
+int kunit_destroy_resource(struct kunit *test,
+ kunit_resource_match_t match,
+ void *match_data);
+
+static inline int kunit_destroy_named_resource(struct kunit *test,
+ const char *name)
+{
+ return kunit_destroy_resource(test, kunit_resource_name_match,
+ (void *)name);
+}
+
+/**
+ * kunit_remove_resource() - remove resource from resource list associated with
+ * test.
+ * @test: The test context object.
+ * @res: The resource to be removed.
+ *
+ * Note that the resource will not be immediately freed since it is likely
+ * the caller has a reference to it via alloc_and_get() or find();
+ * in this case a final call to kunit_put_resource() is required.
+ */
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
+
+/* A 'deferred action' function to be used with kunit_add_action. */
+typedef void (kunit_action_t)(void *);
+
+/**
+ * KUNIT_DEFINE_ACTION_WRAPPER() - Wrap a function for use as a deferred action.
+ *
+ * @wrapper: The name of the new wrapper function define.
+ * @orig: The original function to wrap.
+ * @arg_type: The type of the argument accepted by @orig.
+ *
+ * Defines a wrapper for a function which accepts a single, pointer-sized
+ * argument. This wrapper can then be passed to kunit_add_action() and
+ * similar. This should be used in preference to casting a function
+ * directly to kunit_action_t, as casting function pointers will break
+ * control flow integrity (CFI), leading to crashes.
+ */
+#define KUNIT_DEFINE_ACTION_WRAPPER(wrapper, orig, arg_type) \
+ static void wrapper(void *in) \
+ { \
+ arg_type arg = (arg_type)in; \
+ orig(arg); \
+ }
+
+
+/**
+ * kunit_add_action() - Call a function when the test ends.
+ * @test: Test case to associate the action with.
+ * @action: The function to run on test exit
+ * @ctx: Data passed into @func
+ *
+ * Defer the execution of a function until the test exits, either normally or
+ * due to a failure. @ctx is passed as additional context. All functions
+ * registered with kunit_add_action() will execute in the opposite order to that
+ * they were registered in.
+ *
+ * This is useful for cleaning up allocated memory and resources, as these
+ * functions are called even if the test aborts early due to, e.g., a failed
+ * assertion.
+ *
+ * See also: devm_add_action() for the devres equivalent.
+ *
+ * Returns:
+ * 0 on success, an error if the action could not be deferred.
+ */
+int kunit_add_action(struct kunit *test, kunit_action_t *action, void *ctx);
+
+/**
+ * kunit_add_action_or_reset() - Call a function when the test ends.
+ * @test: Test case to associate the action with.
+ * @action: The function to run on test exit
+ * @ctx: Data passed into @func
+ *
+ * Defer the execution of a function until the test exits, either normally or
+ * due to a failure. @ctx is passed as additional context. All functions
+ * registered with kunit_add_action() will execute in the opposite order to that
+ * they were registered in.
+ *
+ * This is useful for cleaning up allocated memory and resources, as these
+ * functions are called even if the test aborts early due to, e.g., a failed
+ * assertion.
+ *
+ * If the action cannot be created (e.g., due to the system being out of memory),
+ * then action(ctx) will be called immediately, and an error will be returned.
+ *
+ * See also: devm_add_action_or_reset() for the devres equivalent.
+ *
+ * Returns:
+ * 0 on success, an error if the action could not be deferred.
+ */
+int kunit_add_action_or_reset(struct kunit *test, kunit_action_t *action,
+ void *ctx);
+
+/**
+ * kunit_remove_action() - Cancel a matching deferred action.
+ * @test: Test case the action is associated with.
+ * @action: The deferred function to cancel.
+ * @ctx: The context passed to the deferred function to trigger.
+ *
+ * Prevent an action deferred via kunit_add_action() from executing when the
+ * test terminates.
+ *
+ * If the function/context pair was deferred multiple times, only the most
+ * recent one will be cancelled.
+ *
+ * See also: devm_remove_action() for the devres equivalent.
+ */
+void kunit_remove_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx);
+
+/**
+ * kunit_release_action() - Run a matching action call immediately.
+ * @test: Test case the action is associated with.
+ * @action: The deferred function to trigger.
+ * @ctx: The context passed to the deferred function to trigger.
+ *
+ * Execute a function deferred via kunit_add_action()) immediately, rather than
+ * when the test ends.
+ *
+ * If the function/context pair was deferred multiple times, it will only be
+ * executed once here. The most recent deferral will no longer execute when
+ * the test ends.
+ *
+ * kunit_release_action(test, func, ctx);
+ * is equivalent to
+ * func(ctx);
+ * kunit_remove_action(test, func, ctx);
+ *
+ * See also: devm_release_action() for the devres equivalent.
+ */
+void kunit_release_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx);
+#endif /* _KUNIT_RESOURCE_H */
diff --git a/include/kunit/skbuff.h b/include/kunit/skbuff.h
new file mode 100644
index 000000000000..44d12370939a
--- /dev/null
+++ b/include/kunit/skbuff.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit resource management helpers for SKBs (skbuff).
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef _KUNIT_SKBUFF_H
+#define _KUNIT_SKBUFF_H
+
+#include <kunit/resource.h>
+#include <linux/skbuff.h>
+
+static void kunit_action_kfree_skb(void *p)
+{
+ kfree_skb((struct sk_buff *)p);
+}
+
+/**
+ * kunit_zalloc_skb() - Allocate and initialize a resource managed skb.
+ * @test: The test case to which the skb belongs
+ * @len: size to allocate
+ *
+ * Allocate a new struct sk_buff with GFP_KERNEL, zero fill the give length
+ * and add it as a resource to the kunit test for automatic cleanup.
+ *
+ * Returns: newly allocated SKB, or %NULL on error
+ */
+static inline struct sk_buff *kunit_zalloc_skb(struct kunit *test, int len,
+ gfp_t gfp)
+{
+ struct sk_buff *res = alloc_skb(len, GFP_KERNEL);
+
+ if (!res || skb_pad(res, len))
+ return NULL;
+
+ if (kunit_add_action_or_reset(test, kunit_action_kfree_skb, res))
+ return NULL;
+
+ return res;
+}
+
+/**
+ * kunit_kfree_skb() - Like kfree_skb except for allocations managed by KUnit.
+ * @test: The test case to which the resource belongs.
+ * @skb: The SKB to free.
+ */
+static inline void kunit_kfree_skb(struct kunit *test, struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ kunit_release_action(test, kunit_action_kfree_skb, (void *)skb);
+}
+
+#endif /* _KUNIT_SKBUFF_H */
diff --git a/include/kunit/static_stub.h b/include/kunit/static_stub.h
new file mode 100644
index 000000000000..bf940322dfc0
--- /dev/null
+++ b/include/kunit/static_stub.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit function redirection (static stubbing) API.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+#ifndef _KUNIT_STATIC_STUB_H
+#define _KUNIT_STATIC_STUB_H
+
+#if !IS_ENABLED(CONFIG_KUNIT)
+
+/* If CONFIG_KUNIT is not enabled, these stubs quietly disappear. */
+#define KUNIT_STATIC_STUB_REDIRECT(real_fn_name, args...) do {} while (0)
+
+#else
+
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include <linux/compiler.h> /* for {un,}likely() */
+#include <linux/sched.h> /* for task_struct */
+
+
+/**
+ * KUNIT_STATIC_STUB_REDIRECT() - call a replacement 'static stub' if one exists
+ * @real_fn_name: The name of this function (as an identifier, not a string)
+ * @args: All of the arguments passed to this function
+ *
+ * This is a function prologue which is used to allow calls to the current
+ * function to be redirected by a KUnit test. KUnit tests can call
+ * kunit_activate_static_stub() to pass a replacement function in. The
+ * replacement function will be called by KUNIT_STATIC_STUB_REDIRECT(), which
+ * will then return from the function. If the caller is not in a KUnit context,
+ * the function will continue execution as normal.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * int real_func(int n)
+ * {
+ * KUNIT_STATIC_STUB_REDIRECT(real_func, n);
+ * return 0;
+ * }
+ *
+ * int replacement_func(int n)
+ * {
+ * return 42;
+ * }
+ *
+ * void example_test(struct kunit *test)
+ * {
+ * kunit_activate_static_stub(test, real_func, replacement_func);
+ * KUNIT_EXPECT_EQ(test, real_func(1), 42);
+ * }
+ *
+ */
+#define KUNIT_STATIC_STUB_REDIRECT(real_fn_name, args...) \
+do { \
+ typeof(&real_fn_name) replacement; \
+ struct kunit *current_test = kunit_get_current_test(); \
+ \
+ if (likely(!current_test)) \
+ break; \
+ \
+ replacement = kunit_hooks.get_static_stub_address(current_test, \
+ &real_fn_name); \
+ \
+ if (unlikely(replacement)) \
+ return replacement(args); \
+} while (0)
+
+/* Helper function for kunit_activate_static_stub(). The macro does
+ * typechecking, so use it instead.
+ */
+void __kunit_activate_static_stub(struct kunit *test,
+ void *real_fn_addr,
+ void *replacement_addr);
+
+/**
+ * kunit_activate_static_stub() - replace a function using static stubs.
+ * @test: A pointer to the 'struct kunit' test context for the current test.
+ * @real_fn_addr: The address of the function to replace.
+ * @replacement_addr: The address of the function to replace it with.
+ *
+ * When activated, calls to real_fn_addr from within this test (even if called
+ * indirectly) will instead call replacement_addr. The function pointed to by
+ * real_fn_addr must begin with the static stub prologue in
+ * KUNIT_STATIC_STUB_REDIRECT() for this to work. real_fn_addr and
+ * replacement_addr must have the same type.
+ *
+ * The redirection can be disabled again with kunit_deactivate_static_stub().
+ */
+#define kunit_activate_static_stub(test, real_fn_addr, replacement_addr) do { \
+ typecheck_fn(typeof(&replacement_addr), real_fn_addr); \
+ __kunit_activate_static_stub(test, real_fn_addr, replacement_addr); \
+} while (0)
+
+
+/**
+ * kunit_deactivate_static_stub() - disable a function redirection
+ * @test: A pointer to the 'struct kunit' test context for the current test.
+ * @real_fn_addr: The address of the function to no-longer redirect
+ *
+ * Deactivates a redirection configured with kunit_activate_static_stub(). After
+ * this function returns, calls to real_fn_addr() will execute the original
+ * real_fn, not any previously-configured replacement.
+ */
+void kunit_deactivate_static_stub(struct kunit *test, void *real_fn_addr);
+
+#endif
+#endif
diff --git a/include/kunit/test-bug.h b/include/kunit/test-bug.h
new file mode 100644
index 000000000000..47aa8f21ccce
--- /dev/null
+++ b/include/kunit/test-bug.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit API providing hooks for non-test code to interact with tests.
+ *
+ * Copyright (C) 2020, Google LLC.
+ * Author: Uriel Guajardo <urielguajardo@google.com>
+ */
+
+#ifndef _KUNIT_TEST_BUG_H
+#define _KUNIT_TEST_BUG_H
+
+#include <linux/stddef.h> /* for NULL */
+
+#if IS_ENABLED(CONFIG_KUNIT)
+
+#include <linux/jump_label.h> /* For static branch */
+#include <linux/sched.h>
+
+/* Static key if KUnit is running any tests. */
+DECLARE_STATIC_KEY_FALSE(kunit_running);
+
+/* Hooks table: a table of function pointers filled in when kunit loads */
+extern struct kunit_hooks_table {
+ __printf(3, 4) void (*fail_current_test)(const char*, int, const char*, ...);
+ void *(*get_static_stub_address)(struct kunit *test, void *real_fn_addr);
+} kunit_hooks;
+
+/**
+ * kunit_get_current_test() - Return a pointer to the currently running
+ * KUnit test.
+ *
+ * If a KUnit test is running in the current task, returns a pointer to its
+ * associated struct kunit. This pointer can then be passed to any KUnit
+ * function or assertion. If no test is running (or a test is running in a
+ * different task), returns NULL.
+ *
+ * This function is safe to call even when KUnit is disabled. If CONFIG_KUNIT
+ * is not enabled, it will compile down to nothing and will return quickly no
+ * test is running.
+ */
+static inline struct kunit *kunit_get_current_test(void)
+{
+ if (!static_branch_unlikely(&kunit_running))
+ return NULL;
+
+ return current->kunit_test;
+}
+
+
+/**
+ * kunit_fail_current_test() - If a KUnit test is running, fail it.
+ *
+ * If a KUnit test is running in the current task, mark that test as failed.
+ */
+#define kunit_fail_current_test(fmt, ...) do { \
+ if (static_branch_unlikely(&kunit_running)) { \
+ /* Guaranteed to be non-NULL when kunit_running true*/ \
+ kunit_hooks.fail_current_test(__FILE__, __LINE__, \
+ fmt, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#else
+
+static inline struct kunit *kunit_get_current_test(void) { return NULL; }
+
+#define kunit_fail_current_test(fmt, ...) do {} while (0)
+
+#endif
+
+#endif /* _KUNIT_TEST_BUG_H */
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 59f3144f009a..61637ef32302 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -11,101 +11,86 @@
#include <kunit/assert.h>
#include <kunit/try-catch.h>
-#include <linux/kernel.h>
+
+#include <linux/args.h>
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/kconfig.h>
+#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/types.h>
-#include <linux/kref.h>
-
-struct kunit_resource;
-typedef int (*kunit_resource_init_t)(struct kunit_resource *, void *);
-typedef void (*kunit_resource_free_t)(struct kunit_resource *);
-
-/**
- * struct kunit_resource - represents a *test managed resource*
- * @data: for the user to store arbitrary data.
- * @free: a user supplied function to free the resource. Populated by
- * kunit_resource_alloc().
- *
- * Represents a *test managed resource*, a resource which will automatically be
- * cleaned up at the end of a test case.
- *
- * Resources are reference counted so if a resource is retrieved via
- * kunit_alloc_and_get_resource() or kunit_find_resource(), we need
- * to call kunit_put_resource() to reduce the resource reference count
- * when finished with it. Note that kunit_alloc_resource() does not require a
- * kunit_resource_put() because it does not retrieve the resource itself.
- *
- * Example:
- *
- * .. code-block:: c
- *
- * struct kunit_kmalloc_params {
- * size_t size;
- * gfp_t gfp;
- * };
- *
- * static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
- * {
- * struct kunit_kmalloc_params *params = context;
- * res->data = kmalloc(params->size, params->gfp);
- *
- * if (!res->data)
- * return -ENOMEM;
- *
- * return 0;
- * }
- *
- * static void kunit_kmalloc_free(struct kunit_resource *res)
- * {
- * kfree(res->data);
- * }
- *
- * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
- * {
- * struct kunit_kmalloc_params params;
- *
- * params.size = size;
- * params.gfp = gfp;
- *
- * return kunit_alloc_resource(test, kunit_kmalloc_init,
- * kunit_kmalloc_free, &params);
- * }
- *
- * Resources can also be named, with lookup/removal done on a name
- * basis also. kunit_add_named_resource(), kunit_find_named_resource()
- * and kunit_destroy_named_resource(). Resource names must be
- * unique within the test instance.
- */
-struct kunit_resource {
- void *data;
- const char *name; /* optional name */
+#include <asm/rwonce.h>
- /* private: internal use only. */
- kunit_resource_free_t free;
- struct kref refcount;
- struct list_head node;
-};
+/* Static key: true if any KUnit tests are currently running */
+DECLARE_STATIC_KEY_FALSE(kunit_running);
struct kunit;
+struct string_stream;
+
+/* Maximum size of parameter description string. */
+#define KUNIT_PARAM_DESC_SIZE 128
-/* Size of log associated with test. */
-#define KUNIT_LOG_SIZE 512
+/* Maximum size of a status comment. */
+#define KUNIT_STATUS_COMMENT_SIZE 256
/*
* TAP specifies subtest stream indentation of 4 spaces, 8 spaces for a
* sub-subtest. See the "Subtests" section in
* https://node-tap.org/tap-protocol/
*/
+#define KUNIT_INDENT_LEN 4
#define KUNIT_SUBTEST_INDENT " "
#define KUNIT_SUBSUBTEST_INDENT " "
/**
+ * enum kunit_status - Type of result for a test or test suite
+ * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped
+ * @KUNIT_FAILURE: Denotes the test has failed.
+ * @KUNIT_SKIPPED: Denotes the test has been skipped.
+ */
+enum kunit_status {
+ KUNIT_SUCCESS,
+ KUNIT_FAILURE,
+ KUNIT_SKIPPED,
+};
+
+/* Attribute struct/enum definitions */
+
+/*
+ * Speed Attribute is stored as an enum and separated into categories of
+ * speed: very_slowm, slow, and normal. These speeds are relative to
+ * other KUnit tests.
+ *
+ * Note: unset speed attribute acts as default of KUNIT_SPEED_NORMAL.
+ */
+enum kunit_speed {
+ KUNIT_SPEED_UNSET,
+ KUNIT_SPEED_VERY_SLOW,
+ KUNIT_SPEED_SLOW,
+ KUNIT_SPEED_NORMAL,
+ KUNIT_SPEED_MAX = KUNIT_SPEED_NORMAL,
+};
+
+/* Holds attributes for each test case and suite */
+struct kunit_attributes {
+ enum kunit_speed speed;
+};
+
+/**
* struct kunit_case - represents an individual test case.
*
* @run_case: the function representing the actual test case.
* @name: the name of the test case.
+ * @generate_params: the generator function for parameterized tests.
+ * @attr: the attributes associated with the test
*
* A test case is a function with the signature,
* ``void (*)(struct kunit *)``
@@ -140,15 +125,25 @@ struct kunit;
struct kunit_case {
void (*run_case)(struct kunit *test);
const char *name;
+ const void* (*generate_params)(const void *prev, char *desc);
+ struct kunit_attributes attr;
/* private: internal use only. */
- bool success;
- char *log;
+ enum kunit_status status;
+ char *module_name;
+ struct string_stream *log;
};
-static inline char *kunit_status_to_string(bool status)
+static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
{
- return status ? "ok" : "not ok";
+ switch (status) {
+ case KUNIT_SKIPPED:
+ case KUNIT_SUCCESS:
+ return "ok";
+ case KUNIT_FAILURE:
+ return "not ok";
+ }
+ return "invalid";
}
/**
@@ -160,33 +155,111 @@ static inline char *kunit_status_to_string(bool status)
* &struct kunit_case object from it. See the documentation for
* &struct kunit_case for an example on how to use it.
*/
-#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }
+#define KUNIT_CASE(test_name) \
+ { .run_case = test_name, .name = #test_name, \
+ .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_ATTR - A helper for creating a &struct kunit_case
+ * with attributes
+ *
+ * @test_name: a reference to a test case function.
+ * @attributes: a reference to a struct kunit_attributes object containing
+ * test attributes
+ */
+#define KUNIT_CASE_ATTR(test_name, attributes) \
+ { .run_case = test_name, .name = #test_name, \
+ .attr = attributes, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_SLOW - A helper for creating a &struct kunit_case
+ * with the slow attribute
+ *
+ * @test_name: a reference to a test case function.
+ */
+
+#define KUNIT_CASE_SLOW(test_name) \
+ { .run_case = test_name, .name = #test_name, \
+ .attr.speed = KUNIT_SPEED_SLOW, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_PARAM - A helper for creation a parameterized &struct kunit_case
+ *
+ * @test_name: a reference to a test case function.
+ * @gen_params: a reference to a parameter generator function.
+ *
+ * The generator function::
+ *
+ * const void* gen_params(const void *prev, char *desc)
+ *
+ * is used to lazily generate a series of arbitrarily typed values that fit into
+ * a void*. The argument @prev is the previously returned value, which should be
+ * used to derive the next value; @prev is set to NULL on the initial generator
+ * call. When no more values are available, the generator must return NULL.
+ * Optionally write a string into @desc (size of KUNIT_PARAM_DESC_SIZE)
+ * describing the parameter.
+ */
+#define KUNIT_CASE_PARAM(test_name, gen_params) \
+ { .run_case = test_name, .name = #test_name, \
+ .generate_params = gen_params, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_PARAM_ATTR - A helper for creating a parameterized &struct
+ * kunit_case with attributes
+ *
+ * @test_name: a reference to a test case function.
+ * @gen_params: a reference to a parameter generator function.
+ * @attributes: a reference to a struct kunit_attributes object containing
+ * test attributes
+ */
+#define KUNIT_CASE_PARAM_ATTR(test_name, gen_params, attributes) \
+ { .run_case = test_name, .name = #test_name, \
+ .generate_params = gen_params, \
+ .attr = attributes, .module_name = KBUILD_MODNAME}
/**
* struct kunit_suite - describes a related collection of &struct kunit_case
*
* @name: the name of the test. Purely informational.
+ * @suite_init: called once per test suite before the test cases.
+ * @suite_exit: called once per test suite after all test cases.
* @init: called before every test case.
* @exit: called after every test case.
* @test_cases: a null terminated array of test cases.
+ * @attr: the attributes associated with the test suite
*
* A kunit_suite is a collection of related &struct kunit_case s, such that
* @init is called before every test case and @exit is called after every
* test case, similar to the notion of a *test fixture* or a *test class*
* in other unit testing frameworks like JUnit or Googletest.
*
+ * Note that @exit and @suite_exit will run even if @init or @suite_init
+ * fail: make sure they can handle any inconsistent state which may result.
+ *
* Every &struct kunit_case must be associated with a kunit_suite for KUnit
* to run it.
*/
struct kunit_suite {
const char name[256];
+ int (*suite_init)(struct kunit_suite *suite);
+ void (*suite_exit)(struct kunit_suite *suite);
int (*init)(struct kunit *test);
void (*exit)(struct kunit *test);
struct kunit_case *test_cases;
+ struct kunit_attributes attr;
/* private: internal use only */
+ char status_comment[KUNIT_STATUS_COMMENT_SIZE];
struct dentry *debugfs;
- char *log;
+ struct string_stream *log;
+ int suite_init_err;
+ bool is_init;
+};
+
+/* Stores an array of suites, end points one past the end */
+struct kunit_suite_set {
+ struct kunit_suite * const *start;
+ struct kunit_suite * const *end;
};
/**
@@ -205,8 +278,12 @@ struct kunit {
/* private: internal use only. */
const char *name; /* Read only after initialization! */
- char *log; /* Points at case log after initialization */
+ struct string_stream *log; /* Points at case log after initialization */
struct kunit_try_catch try_catch;
+ /* param_value is the current parameter value for a test case. */
+ const void *param_value;
+ /* param_index stores the index of the parameter in parameterized tests. */
+ int param_index;
/*
* success starts as true, and may only be set to false during a
* test case; thus, it is safe to update this across multiple
@@ -214,17 +291,30 @@ struct kunit {
* be read after the test case finishes once all threads associated
* with the test case have terminated.
*/
- bool success; /* Read only after test_case finishes! */
spinlock_t lock; /* Guards all mutable test state. */
+ enum kunit_status status; /* Read only after test_case finishes! */
/*
* Because resources is a list that may be updated multiple times (with
* new resources) from any thread associated with a test case, we must
* protect it with some type of lock.
*/
struct list_head resources; /* Protected by lock. */
+
+ char status_comment[KUNIT_STATUS_COMMENT_SIZE];
};
-void kunit_init_test(struct kunit *test, const char *name, char *log);
+static inline void kunit_set_failure(struct kunit *test)
+{
+ WRITE_ONCE(test->status, KUNIT_FAILURE);
+}
+
+bool kunit_enabled(void);
+const char *kunit_action(void);
+const char *kunit_filter_glob(void);
+char *kunit_filter(void);
+char *kunit_filter_action(void);
+
+void kunit_init_test(struct kunit *test, const char *name, struct string_stream *log);
int kunit_run_tests(struct kunit_suite *suite);
@@ -233,277 +323,110 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite);
unsigned int kunit_test_case_num(struct kunit_suite *suite,
struct kunit_case *test_case);
-int __kunit_test_suites_init(struct kunit_suite **suites);
-
-void __kunit_test_suites_exit(struct kunit_suite **suites);
-
-/**
- * kunit_test_suites() - used to register one or more &struct kunit_suite
- * with KUnit.
- *
- * @suites_list...: a statically allocated list of &struct kunit_suite.
- *
- * Registers @suites_list with the test framework. See &struct kunit_suite for
- * more information.
- *
- * When builtin, KUnit tests are all run as late_initcalls; this means
- * that they cannot test anything where tests must run at a different init
- * phase. One significant restriction resulting from this is that KUnit
- * cannot reliably test anything that is initialize in the late_init phase;
- * another is that KUnit is useless to test things that need to be run in
- * an earlier init phase.
- *
- * An alternative is to build the tests as a module. Because modules
- * do not support multiple late_initcall()s, we need to initialize an
- * array of suites for a module.
- *
- * TODO(brendanhiggins@google.com): Don't run all KUnit tests as
- * late_initcalls. I have some future work planned to dispatch all KUnit
- * tests from the same place, and at the very least to do so after
- * everything else is definitely initialized.
- */
-#define kunit_test_suites(suites_list...) \
- static struct kunit_suite *suites[] = {suites_list, NULL}; \
- static int kunit_test_suites_init(void) \
- { \
- return __kunit_test_suites_init(suites); \
- } \
- late_initcall(kunit_test_suites_init); \
- static void __exit kunit_test_suites_exit(void) \
- { \
- return __kunit_test_suites_exit(suites); \
- } \
- module_exit(kunit_test_suites_exit)
+struct kunit_suite_set
+kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ const char *filter_glob,
+ char *filters,
+ char *filter_action,
+ int *err);
+void kunit_free_suite_set(struct kunit_suite_set suite_set);
-#define kunit_test_suite(suite) kunit_test_suites(&suite)
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites);
-#define kunit_suite_for_each_test_case(suite, test_case) \
- for (test_case = suite->test_cases; test_case->run_case; test_case++)
+void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites);
-bool kunit_suite_has_succeeded(struct kunit_suite *suite);
+void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin);
+void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr);
-/*
- * Like kunit_alloc_resource() below, but returns the struct kunit_resource
- * object that contains the allocation. This is mostly for testing purposes.
- */
-struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *context);
+struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
+ struct kunit_suite_set suite_set);
-/**
- * kunit_get_resource() - Hold resource for use. Should not need to be used
- * by most users as we automatically get resources
- * retrieved by kunit_find_resource*().
- * @res: resource
- */
-static inline void kunit_get_resource(struct kunit_resource *res)
+#if IS_BUILTIN(CONFIG_KUNIT)
+int kunit_run_all_tests(void);
+#else
+static inline int kunit_run_all_tests(void)
{
- kref_get(&res->refcount);
+ return 0;
}
+#endif /* IS_BUILTIN(CONFIG_KUNIT) */
-/*
- * Called when refcount reaches zero via kunit_put_resources();
- * should not be called directly.
- */
-static inline void kunit_release_resource(struct kref *kref)
-{
- struct kunit_resource *res = container_of(kref, struct kunit_resource,
- refcount);
-
- /* If free function is defined, resource was dynamically allocated. */
- if (res->free) {
- res->free(res);
- kfree(res);
- }
-}
-
-/**
- * kunit_put_resource() - When caller is done with retrieved resource,
- * kunit_put_resource() should be called to drop
- * reference count. The resource list maintains
- * a reference count on resources, so if no users
- * are utilizing a resource and it is removed from
- * the resource list, it will be freed via the
- * associated free function (if any). Only
- * needs to be used if we alloc_and_get() or
- * find() resource.
- * @res: resource
- */
-static inline void kunit_put_resource(struct kunit_resource *res)
-{
- kref_put(&res->refcount, kunit_release_resource);
-}
-
-/**
- * kunit_add_resource() - Add a *test managed resource*.
- * @test: The test context object.
- * @init: a user-supplied function to initialize the result (if needed). If
- * none is supplied, the resource data value is simply set to @data.
- * If an init function is supplied, @data is passed to it instead.
- * @free: a user-supplied function to free the resource (if needed).
- * @data: value to pass to init function or set in resource data field.
- */
-int kunit_add_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- struct kunit_resource *res,
- void *data);
+#define __kunit_test_suites(unique_array, ...) \
+ static struct kunit_suite *unique_array[] \
+ __aligned(sizeof(struct kunit_suite *)) \
+ __used __section(".kunit_test_suites") = { __VA_ARGS__ }
/**
- * kunit_add_named_resource() - Add a named *test managed resource*.
- * @test: The test context object.
- * @init: a user-supplied function to initialize the resource data, if needed.
- * @free: a user-supplied function to free the resource data, if needed.
- * @name_data: name and data to be set for resource.
- */
-int kunit_add_named_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- struct kunit_resource *res,
- const char *name,
- void *data);
-
-/**
- * kunit_alloc_resource() - Allocates a *test managed resource*.
- * @test: The test context object.
- * @init: a user supplied function to initialize the resource.
- * @free: a user supplied function to free the resource.
- * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
- * @context: for the user to pass in arbitrary data to the init function.
- *
- * Allocates a *test managed resource*, a resource which will automatically be
- * cleaned up at the end of a test case. See &struct kunit_resource for an
- * example.
- *
- * Note: KUnit needs to allocate memory for a kunit_resource object. You must
- * specify an @internal_gfp that is compatible with the use context of your
- * resource.
+ * kunit_test_suites() - used to register one or more &struct kunit_suite
+ * with KUnit.
+ *
+ * @__suites: a statically allocated list of &struct kunit_suite.
+ *
+ * Registers @suites with the test framework.
+ * This is done by placing the array of struct kunit_suite * in the
+ * .kunit_test_suites ELF section.
+ *
+ * When builtin, KUnit tests are all run via the executor at boot, and when
+ * built as a module, they run on module load.
+ *
*/
-static inline void *kunit_alloc_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *context)
-{
- struct kunit_resource *res;
-
- res = kzalloc(sizeof(*res), internal_gfp);
- if (!res)
- return NULL;
+#define kunit_test_suites(__suites...) \
+ __kunit_test_suites(__UNIQUE_ID(array), \
+ ##__suites)
- if (!kunit_add_resource(test, init, free, res, context))
- return res->data;
-
- return NULL;
-}
+#define kunit_test_suite(suite) kunit_test_suites(&suite)
-typedef bool (*kunit_resource_match_t)(struct kunit *test,
- struct kunit_resource *res,
- void *match_data);
+#define __kunit_init_test_suites(unique_array, ...) \
+ static struct kunit_suite *unique_array[] \
+ __aligned(sizeof(struct kunit_suite *)) \
+ __used __section(".kunit_init_test_suites") = { __VA_ARGS__ }
/**
- * kunit_resource_instance_match() - Match a resource with the same instance.
- * @test: Test case to which the resource belongs.
- * @res: The resource.
- * @match_data: The resource pointer to match against.
+ * kunit_test_init_section_suites() - used to register one or more &struct
+ * kunit_suite containing init functions or
+ * init data.
*
- * An instance of kunit_resource_match_t that matches a resource whose
- * allocation matches @match_data.
- */
-static inline bool kunit_resource_instance_match(struct kunit *test,
- struct kunit_resource *res,
- void *match_data)
-{
- return res->data == match_data;
-}
-
-/**
- * kunit_resource_name_match() - Match a resource with the same name.
- * @test: Test case to which the resource belongs.
- * @res: The resource.
- * @match_name: The name to match against.
- */
-static inline bool kunit_resource_name_match(struct kunit *test,
- struct kunit_resource *res,
- void *match_name)
-{
- return res->name && strcmp(res->name, match_name) == 0;
-}
-
-/**
- * kunit_find_resource() - Find a resource using match function/data.
- * @test: Test case to which the resource belongs.
- * @match: match function to be applied to resources/match data.
- * @match_data: data to be used in matching.
+ * @__suites: a statically allocated list of &struct kunit_suite.
+ *
+ * This functions similar to kunit_test_suites() except that it compiles the
+ * list of suites during init phase.
+ *
+ * This macro also suffixes the array and suite declarations it makes with
+ * _probe; so that modpost suppresses warnings about referencing init data
+ * for symbols named in this manner.
+ *
+ * Note: these init tests are not able to be run after boot so there is no
+ * "run" debugfs file generated for these tests.
+ *
+ * Also, do not mark the suite or test case structs with __initdata because
+ * they will be used after the init phase with debugfs.
*/
-static inline struct kunit_resource *
-kunit_find_resource(struct kunit *test,
- kunit_resource_match_t match,
- void *match_data)
-{
- struct kunit_resource *res, *found = NULL;
-
- spin_lock(&test->lock);
-
- list_for_each_entry_reverse(res, &test->resources, node) {
- if (match(test, res, (void *)match_data)) {
- found = res;
- kunit_get_resource(found);
- break;
- }
- }
+#define kunit_test_init_section_suites(__suites...) \
+ __kunit_init_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
+ ##__suites)
- spin_unlock(&test->lock);
+#define kunit_test_init_section_suite(suite) \
+ kunit_test_init_section_suites(&suite)
- return found;
-}
-
-/**
- * kunit_find_named_resource() - Find a resource using match name.
- * @test: Test case to which the resource belongs.
- * @name: match name.
- */
-static inline struct kunit_resource *
-kunit_find_named_resource(struct kunit *test,
- const char *name)
-{
- return kunit_find_resource(test, kunit_resource_name_match,
- (void *)name);
-}
-
-/**
- * kunit_destroy_resource() - Find a kunit_resource and destroy it.
- * @test: Test case to which the resource belongs.
- * @match: Match function. Returns whether a given resource matches @match_data.
- * @match_data: Data passed into @match.
- *
- * RETURNS:
- * 0 if kunit_resource is found and freed, -ENOENT if not found.
- */
-int kunit_destroy_resource(struct kunit *test,
- kunit_resource_match_t match,
- void *match_data);
+#define kunit_suite_for_each_test_case(suite, test_case) \
+ for (test_case = suite->test_cases; test_case->run_case; test_case++)
-static inline int kunit_destroy_named_resource(struct kunit *test,
- const char *name)
-{
- return kunit_destroy_resource(test, kunit_resource_name_match,
- (void *)name);
-}
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
/**
- * kunit_remove_resource: remove resource from resource list associated with
- * test.
+ * kunit_kmalloc_array() - Like kmalloc_array() except the allocation is *test managed*.
* @test: The test context object.
- * @res: The resource to be removed.
+ * @n: number of elements.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
*
- * Note that the resource will not be immediately freed since it is likely
- * the caller has a reference to it via alloc_and_get() or find();
- * in this case a final call to kunit_put_resource() is required.
+ * Just like `kmalloc_array(...)`, except the allocation is managed by the test case
+ * and is automatically cleaned up after the test case concludes. See kunit_add_action()
+ * for more information.
+ *
+ * Note that some internal context data is also allocated with GFP_KERNEL,
+ * regardless of the gfp passed in.
*/
-void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
/**
* kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
@@ -511,11 +434,15 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
* @size: The size in bytes of the desired memory.
* @gfp: flags passed to underlying kmalloc().
*
- * Just like `kmalloc(...)`, except the allocation is managed by the test case
- * and is automatically cleaned up after the test case concludes. See &struct
- * kunit_resource for more information.
+ * See kmalloc() and kunit_kmalloc_array() for more information.
+ *
+ * Note that some internal context data is also allocated with GFP_KERNEL,
+ * regardless of the gfp passed in.
*/
-void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp);
+static inline void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+{
+ return kunit_kmalloc_array(test, 1, size, gfp);
+}
/**
* kunit_kfree() - Like kfree except for allocations managed by KUnit.
@@ -530,16 +457,66 @@ void kunit_kfree(struct kunit *test, const void *ptr);
* @size: The size in bytes of the desired memory.
* @gfp: flags passed to underlying kmalloc().
*
- * See kzalloc() and kunit_kmalloc() for more information.
+ * See kzalloc() and kunit_kmalloc_array() for more information.
*/
static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
{
return kunit_kmalloc(test, size, gfp | __GFP_ZERO);
}
+/**
+ * kunit_kcalloc() - Just like kunit_kmalloc_array(), but zeroes the allocation.
+ * @test: The test context object.
+ * @n: number of elements.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kcalloc() and kunit_kmalloc_array() for more information.
+ */
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
+{
+ return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
+}
+
void kunit_cleanup(struct kunit *test);
-void kunit_log_append(char *log, const char *fmt, ...);
+void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, ...);
+
+/**
+ * kunit_mark_skipped() - Marks @test_or_suite as skipped
+ *
+ * @test_or_suite: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Marks the test as skipped. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
+ *
+ * Test execution continues after kunit_mark_skipped() is called.
+ */
+#define kunit_mark_skipped(test_or_suite, fmt, ...) \
+ do { \
+ WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \
+ scnprintf((test_or_suite)->status_comment, \
+ KUNIT_STATUS_COMMENT_SIZE, \
+ fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * kunit_skip() - Marks @test_or_suite as skipped
+ *
+ * @test_or_suite: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Skips the test. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
+ *
+ * Test execution is halted after kunit_skip() is called.
+ */
+#define kunit_skip(test_or_suite, fmt, ...) \
+ do { \
+ kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\
+ kunit_try_catch_throw(&((test_or_suite)->try_catch)); \
+ } while (0)
/*
* printk and log to per-test or per-suite log buffer. Logging only done
@@ -548,7 +525,7 @@ void kunit_log_append(char *log, const char *fmt, ...);
#define kunit_log(lvl, test_or_suite, fmt, ...) \
do { \
printk(lvl fmt, ##__VA_ARGS__); \
- kunit_log_append((test_or_suite)->log, fmt "\n", \
+ kunit_log_append((test_or_suite)->log, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -600,28 +577,38 @@ void kunit_log_append(char *log, const char *fmt, ...);
*/
#define KUNIT_SUCCEED(test) do {} while (0)
-void kunit_do_assertion(struct kunit *test,
- struct kunit_assert *assert,
- bool pass,
- const char *fmt, ...);
-
-#define KUNIT_ASSERTION(test, pass, assert_class, INITIALIZER, fmt, ...) do { \
- struct assert_class __assertion = INITIALIZER; \
- kunit_do_assertion(test, \
- &__assertion.assert, \
- pass, \
- fmt, \
- ##__VA_ARGS__); \
+void __noreturn __kunit_abort(struct kunit *test);
+
+void __printf(6, 7) __kunit_do_failed_assertion(struct kunit *test,
+ const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ const struct kunit_assert *assert,
+ assert_format_t assert_format,
+ const char *fmt, ...);
+
+#define _KUNIT_FAILED(test, assert_type, assert_class, assert_format, INITIALIZER, fmt, ...) do { \
+ static const struct kunit_loc __loc = KUNIT_CURRENT_LOC; \
+ const struct assert_class __assertion = INITIALIZER; \
+ __kunit_do_failed_assertion(test, \
+ &__loc, \
+ assert_type, \
+ &__assertion.assert, \
+ assert_format, \
+ fmt, \
+ ##__VA_ARGS__); \
+ if (assert_type == KUNIT_ASSERTION) \
+ __kunit_abort(test); \
} while (0)
#define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) \
- KUNIT_ASSERTION(test, \
- false, \
- kunit_fail_assert, \
- KUNIT_INIT_FAIL_ASSERT_STRUCT(test, assert_type), \
- fmt, \
- ##__VA_ARGS__)
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_fail_assert, \
+ kunit_fail_assert_format, \
+ {}, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_FAIL() - Always causes a test to fail when evaluated.
@@ -640,21 +627,28 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
##__VA_ARGS__)
+/* Helper to safely pass around an initializer list to other macros. */
+#define KUNIT_INIT_ASSERT(initializers...) { initializers }
+
#define KUNIT_UNARY_ASSERTION(test, \
assert_type, \
- condition, \
- expected_true, \
+ condition_, \
+ expected_true_, \
fmt, \
...) \
- KUNIT_ASSERTION(test, \
- !!(condition) == !!expected_true, \
- kunit_unary_assert, \
- KUNIT_INIT_UNARY_ASSERT_STRUCT(test, \
- assert_type, \
- #condition, \
- expected_true), \
- fmt, \
- ##__VA_ARGS__)
+do { \
+ if (likely(!!(condition_) == !!expected_true_)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_unary_assert, \
+ kunit_unary_assert_format, \
+ KUNIT_INIT_ASSERT(.condition = #condition_, \
+ .expected_true = expected_true_), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
#define KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
KUNIT_UNARY_ASSERTION(test, \
@@ -664,9 +658,6 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
##__VA_ARGS__)
-#define KUNIT_TRUE_ASSERTION(test, assert_type, condition) \
- KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, NULL)
-
#define KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
KUNIT_UNARY_ASSERTION(test, \
assert_type, \
@@ -675,9 +666,6 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
##__VA_ARGS__)
-#define KUNIT_FALSE_ASSERTION(test, assert_type, condition) \
- KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, NULL)
-
/*
* A factory macro for defining the assertions and expectations for the basic
* comparisons defined for the built in types.
@@ -694,7 +682,7 @@ void kunit_do_assertion(struct kunit *test,
*/
#define KUNIT_BASE_BINARY_ASSERTION(test, \
assert_class, \
- ASSERT_CLASS_INIT, \
+ format_func, \
assert_type, \
left, \
op, \
@@ -702,354 +690,58 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
...) \
do { \
- typeof(left) __left = (left); \
- typeof(right) __right = (right); \
- ((void)__typecheck(__left, __right)); \
+ const typeof(left) __left = (left); \
+ const typeof(right) __right = (right); \
+ static const struct kunit_binary_assert_text __text = { \
+ .operation = #op, \
+ .left_text = #left, \
+ .right_text = #right, \
+ }; \
+ \
+ if (likely(__left op __right)) \
+ break; \
\
- KUNIT_ASSERTION(test, \
- __left op __right, \
- assert_class, \
- ASSERT_CLASS_INIT(test, \
- assert_type, \
- #op, \
- #left, \
- __left, \
- #right, \
- __right), \
- fmt, \
- ##__VA_ARGS__); \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ assert_class, \
+ format_func, \
+ KUNIT_INIT_ASSERT(.text = &__text, \
+ .left_value = __left, \
+ .right_value = __right), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define KUNIT_BASE_EQ_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, ==, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_NE_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, !=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_LT_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, <, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_LE_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, <=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_GT_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
+#define KUNIT_BINARY_INT_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
...) \
KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
+ kunit_binary_assert, \
+ kunit_binary_assert_format, \
assert_type, \
- left, >, right, \
+ left, op, right, \
fmt, \
##__VA_ARGS__)
-#define KUNIT_BASE_GE_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
+#define KUNIT_BINARY_PTR_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
...) \
KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, >=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_EQ_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_EQ_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_EQ_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_EQ_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_EQ_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_NE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_NE_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_NE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_NE_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_NE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_LT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_LT_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_LT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_LT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_LT_MSG_ASSERTION(test, \
kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ kunit_binary_ptr_assert_format, \
assert_type, \
- left, \
- right, \
+ left, op, right, \
fmt, \
##__VA_ARGS__)
-#define KUNIT_BINARY_PTR_LT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_LE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_LE_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_LE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_LE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_LE_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_LE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_GT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_GT_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_GT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_GT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_GT_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_GT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_GE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_GE_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_GE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_GE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_GE_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_GE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
#define KUNIT_BINARY_STR_ASSERTION(test, \
assert_type, \
left, \
@@ -1058,60 +750,62 @@ do { \
fmt, \
...) \
do { \
- typeof(left) __left = (left); \
- typeof(right) __right = (right); \
+ const char *__left = (left); \
+ const char *__right = (right); \
+ static const struct kunit_binary_assert_text __text = { \
+ .operation = #op, \
+ .left_text = #left, \
+ .right_text = #right, \
+ }; \
+ \
+ if (likely((__left) && (__right) && (strcmp(__left, __right) op 0))) \
+ break; \
\
- KUNIT_ASSERTION(test, \
- strcmp(__left, __right) op 0, \
- kunit_binary_str_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
- assert_type, \
- #op, \
- #left, \
- __left, \
- #right, \
- __right), \
- fmt, \
- ##__VA_ARGS__); \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_binary_str_assert, \
+ kunit_binary_str_assert_format, \
+ KUNIT_INIT_ASSERT(.text = &__text, \
+ .left_value = __left, \
+ .right_value = __right), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BINARY_STR_ASSERTION(test, \
- assert_type, \
- left, ==, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_STR_EQ_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BINARY_STR_ASSERTION(test, \
- assert_type, \
- left, !=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_STR_NE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
+#define KUNIT_MEM_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ size_, \
+ fmt, \
+ ...) \
+do { \
+ const void *__left = (left); \
+ const void *__right = (right); \
+ const size_t __size = (size_); \
+ static const struct kunit_binary_assert_text __text = { \
+ .operation = #op, \
+ .left_text = #left, \
+ .right_text = #right, \
+ }; \
+ \
+ if (likely(__left && __right)) \
+ if (likely(memcmp(__left, __right, __size) op 0)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_mem_assert, \
+ kunit_mem_assert_format, \
+ KUNIT_INIT_ASSERT(.text = &__text, \
+ .left_value = __left, \
+ .right_value = __right, \
+ .size = __size), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
#define KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
assert_type, \
@@ -1119,25 +813,20 @@ do { \
fmt, \
...) \
do { \
- typeof(ptr) __ptr = (ptr); \
+ const typeof(ptr) __ptr = (ptr); \
\
- KUNIT_ASSERTION(test, \
- !IS_ERR_OR_NULL(__ptr), \
- kunit_ptr_not_err_assert, \
- KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, \
- assert_type, \
- #ptr, \
- __ptr), \
- fmt, \
- ##__VA_ARGS__); \
+ if (!IS_ERR_OR_NULL(__ptr)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_ptr_not_err_assert, \
+ kunit_ptr_not_err_assert_format, \
+ KUNIT_INIT_ASSERT(.text = #ptr, .value = __ptr), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, assert_type, ptr) \
- KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
- assert_type, \
- ptr, \
- NULL)
-
/**
* KUNIT_EXPECT_TRUE() - Causes a test failure when the expression is not true.
* @test: The test context object.
@@ -1150,7 +839,7 @@ do { \
* *expectation failure*.
*/
#define KUNIT_EXPECT_TRUE(test, condition) \
- KUNIT_TRUE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+ KUNIT_EXPECT_TRUE_MSG(test, condition, NULL)
#define KUNIT_EXPECT_TRUE_MSG(test, condition, fmt, ...) \
KUNIT_TRUE_MSG_ASSERTION(test, \
@@ -1169,7 +858,7 @@ do { \
* KUNIT_EXPECT_TRUE() for more information.
*/
#define KUNIT_EXPECT_FALSE(test, condition) \
- KUNIT_FALSE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+ KUNIT_EXPECT_FALSE_MSG(test, condition, NULL)
#define KUNIT_EXPECT_FALSE_MSG(test, condition, fmt, ...) \
KUNIT_FALSE_MSG_ASSERTION(test, \
@@ -1190,15 +879,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_EQ(test, left, right) \
- KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_EQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_PTR_EQ() - Expects that pointers @left and @right are equal.
@@ -1212,18 +900,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_PTR_EQ(test, left, right) \
- KUNIT_BINARY_PTR_EQ_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right)
+ KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_NE() - An expectation that @left and @right are not equal.
@@ -1237,15 +921,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_NE(test, left, right) \
- KUNIT_BINARY_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_NE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_NE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_PTR_NE() - Expects that pointers @left and @right are not equal.
@@ -1259,18 +942,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_PTR_NE(test, left, right) \
- KUNIT_BINARY_PTR_NE_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right)
+ KUNIT_EXPECT_PTR_NE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_PTR_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_LT() - An expectation that @left is less than @right.
@@ -1284,15 +963,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_LT(test, left, right) \
- KUNIT_BINARY_LT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_LT_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_LT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LT_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, <, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_LE() - Expects that @left is less than or equal to @right.
@@ -1306,15 +984,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_LE(test, left, right) \
- KUNIT_BINARY_LE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_LE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, <=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_GT() - An expectation that @left is greater than @right.
@@ -1328,15 +1005,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_GT(test, left, right) \
- KUNIT_BINARY_GT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_GT_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_GT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GT_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, >, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_GE() - Expects that @left is greater than or equal to @right.
@@ -1350,15 +1026,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_GE(test, left, right) \
- KUNIT_BINARY_GE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_GE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_GE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, >=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_STREQ() - Expects that strings @left and @right are equal.
@@ -1372,15 +1047,14 @@ do { \
* for more information.
*/
#define KUNIT_EXPECT_STREQ(test, left, right) \
- KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_STREQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_STREQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_STRNEQ() - Expects that strings @left and @right are not equal.
@@ -1394,15 +1068,110 @@ do { \
* for more information.
*/
#define KUNIT_EXPECT_STRNEQ(test, left, right) \
- KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_STRNEQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_STRNEQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_MEMEQ() - Expects that the first @size bytes of @left and @right are equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, !memcmp((@left), (@right), (@size))). See
+ * KUNIT_EXPECT_TRUE() for more information.
+ *
+ * Although this expectation works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This expectation is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_EXPECT_MEMEQ(test, left, right, size) \
+ KUNIT_EXPECT_MEMEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_EXPECT_MEMEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_MEMNEQ() - Expects that the first @size bytes of @left and @right are not equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, memcmp((@left), (@right), (@size))). See
+ * KUNIT_EXPECT_TRUE() for more information.
+ *
+ * Although this expectation works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This expectation is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_EXPECT_MEMNEQ(test, left, right, size) \
+ KUNIT_EXPECT_MEMNEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_EXPECT_MEMNEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NULL() - Expects that @ptr is null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an expectation that the value that @ptr evaluates to is null. This is
+ * semantically equivalent to KUNIT_EXPECT_PTR_EQ(@test, ptr, NULL).
+ * See KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_EXPECT_NULL(test, ptr) \
+ KUNIT_EXPECT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_EXPECT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ ptr, ==, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NOT_NULL() - Expects that @ptr is not null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an expectation that the value that @ptr evaluates to is not null. This
+ * is semantically equivalent to KUNIT_EXPECT_PTR_NE(@test, ptr, NULL).
+ * See KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_EXPECT_NOT_NULL(test, ptr) \
+ KUNIT_EXPECT_NOT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_EXPECT_NOT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ ptr, !=, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_NOT_ERR_OR_NULL() - Expects that @ptr is not null and not err.
@@ -1415,7 +1184,7 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr) \
- KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_EXPECTATION, ptr)
+ KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, NULL)
#define KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
@@ -1439,7 +1208,7 @@ do { \
* this is otherwise known as an *assertion failure*.
*/
#define KUNIT_ASSERT_TRUE(test, condition) \
- KUNIT_TRUE_ASSERTION(test, KUNIT_ASSERTION, condition)
+ KUNIT_ASSERT_TRUE_MSG(test, condition, NULL)
#define KUNIT_ASSERT_TRUE_MSG(test, condition, fmt, ...) \
KUNIT_TRUE_MSG_ASSERTION(test, \
@@ -1458,7 +1227,7 @@ do { \
* (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_FALSE(test, condition) \
- KUNIT_FALSE_ASSERTION(test, KUNIT_ASSERTION, condition)
+ KUNIT_ASSERT_FALSE_MSG(test, condition, NULL)
#define KUNIT_ASSERT_FALSE_MSG(test, condition, fmt, ...) \
KUNIT_FALSE_MSG_ASSERTION(test, \
@@ -1478,15 +1247,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_EQ(test, left, right) \
- KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_EQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_PTR_EQ() - Asserts that pointers @left and @right are equal.
@@ -1499,15 +1267,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_PTR_EQ(test, left, right) \
- KUNIT_BINARY_PTR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_NE() - An assertion that @left and @right are not equal.
@@ -1520,15 +1287,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_NE(test, left, right) \
- KUNIT_BINARY_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_NE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_NE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_PTR_NE() - Asserts that pointers @left and @right are not equal.
@@ -1542,15 +1308,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_PTR_NE(test, left, right) \
- KUNIT_BINARY_PTR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_PTR_NE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_PTR_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_LT() - An assertion that @left is less than @right.
* @test: The test context object.
@@ -1563,15 +1328,14 @@ do { \
* is not met.
*/
#define KUNIT_ASSERT_LT(test, left, right) \
- KUNIT_BINARY_LT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_LT_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LT_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, <, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_LE() - An assertion that @left is less than or equal to @right.
* @test: The test context object.
@@ -1584,15 +1348,14 @@ do { \
* KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_LE(test, left, right) \
- KUNIT_BINARY_LE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_LE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_LE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, <=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_GT() - An assertion that @left is greater than @right.
@@ -1606,15 +1369,14 @@ do { \
* is not met.
*/
#define KUNIT_ASSERT_GT(test, left, right) \
- KUNIT_BINARY_GT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_GT_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GT_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, >, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_GE() - Assertion that @left is greater than or equal to @right.
@@ -1628,15 +1390,14 @@ do { \
* is not met.
*/
#define KUNIT_ASSERT_GE(test, left, right) \
- KUNIT_BINARY_GE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_GE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_GE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, >=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_STREQ() - An assertion that strings @left and @right are equal.
@@ -1649,15 +1410,14 @@ do { \
* assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_STREQ(test, left, right) \
- KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_STREQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_STREQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_STRNEQ() - Expects that strings @left and @right are not equal.
@@ -1671,15 +1431,56 @@ do { \
* for more information.
*/
#define KUNIT_ASSERT_STRNEQ(test, left, right) \
- KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_STRNEQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_STRNEQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NULL() - Asserts that pointers @ptr is null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an assertion that the values that @ptr evaluates to is null. This is
+ * the same as KUNIT_EXPECT_NULL(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NULL(test, ptr) \
+ KUNIT_ASSERT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_ASSERT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ ptr, ==, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NOT_NULL() - Asserts that pointers @ptr is not null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an assertion that the values that @ptr evaluates to is not null. This
+ * is the same as KUNIT_EXPECT_NOT_NULL(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NOT_NULL(test, ptr) \
+ KUNIT_ASSERT_NOT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_ASSERT_NOT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ ptr, !=, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_NOT_ERR_OR_NULL() - Assertion that @ptr is not null and not err.
@@ -1692,7 +1493,7 @@ do { \
* KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr) \
- KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_ASSERTION, ptr)
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, NULL)
#define KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
@@ -1701,4 +1502,48 @@ do { \
fmt, \
##__VA_ARGS__)
+/**
+ * KUNIT_ARRAY_PARAM() - Define test parameter generator from an array.
+ * @name: prefix for the test parameter generator function.
+ * @array: array of test parameters.
+ * @get_desc: function to convert param to description; NULL to use default
+ *
+ * Define function @name_gen_params which uses @array to generate parameters.
+ */
+#define KUNIT_ARRAY_PARAM(name, array, get_desc) \
+ static const void *name##_gen_params(const void *prev, char *desc) \
+ { \
+ typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (__next - (array) < ARRAY_SIZE((array))) { \
+ void (*__get_desc)(typeof(__next), char *) = get_desc; \
+ if (__get_desc) \
+ __get_desc(__next, desc); \
+ return __next; \
+ } \
+ return NULL; \
+ }
+
+/**
+ * KUNIT_ARRAY_PARAM_DESC() - Define test parameter generator from an array.
+ * @name: prefix for the test parameter generator function.
+ * @array: array of test parameters.
+ * @desc_member: structure member from array element to use as description
+ *
+ * Define function @name_gen_params which uses @array to generate parameters.
+ */
+#define KUNIT_ARRAY_PARAM_DESC(name, array, desc_member) \
+ static const void *name##_gen_params(const void *prev, char *desc) \
+ { \
+ typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (__next - (array) < ARRAY_SIZE((array))) { \
+ strscpy(desc, __next->desc_member, KUNIT_PARAM_DESC_SIZE); \
+ return __next; \
+ } \
+ return NULL; \
+ }
+
+// TODO(dlatypov@google.com): consider eventually migrating users to explicitly
+// include resource.h themselves if they need it.
+#include <kunit/resource.h>
+
#endif /* _KUNIT_TEST_H */
diff --git a/include/kunit/visibility.h b/include/kunit/visibility.h
new file mode 100644
index 000000000000..0dfe35feeec6
--- /dev/null
+++ b/include/kunit/visibility.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit API to allow symbols to be conditionally visible during KUnit
+ * testing
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#ifndef _KUNIT_VISIBILITY_H
+#define _KUNIT_VISIBILITY_H
+
+#if IS_ENABLED(CONFIG_KUNIT)
+ /**
+ * VISIBLE_IF_KUNIT - A macro that sets symbols to be static if
+ * CONFIG_KUNIT is not enabled. Otherwise if CONFIG_KUNIT is enabled
+ * there is no change to the symbol definition.
+ */
+ #define VISIBLE_IF_KUNIT
+ /**
+ * EXPORT_SYMBOL_IF_KUNIT(symbol) - Exports symbol into
+ * EXPORTED_FOR_KUNIT_TESTING namespace only if CONFIG_KUNIT is
+ * enabled. Must use MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING)
+ * in test file in order to use symbols.
+ */
+ #define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, \
+ EXPORTED_FOR_KUNIT_TESTING)
+#else
+ #define VISIBLE_IF_KUNIT static
+ #define EXPORT_SYMBOL_IF_KUNIT(symbol)
+#endif
+
+#endif /* _KUNIT_VISIBILITY_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 51c19381108c..c819c5d16613 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -13,6 +13,9 @@
enum kvm_arch_timers {
TIMER_PTIMER,
TIMER_VTIMER,
+ NR_KVM_EL0_TIMERS,
+ TIMER_HVTIMER = NR_KVM_EL0_TIMERS,
+ TIMER_HPTIMER,
NR_KVM_TIMERS
};
@@ -21,17 +24,41 @@ enum kvm_arch_timer_regs {
TIMER_REG_CVAL,
TIMER_REG_TVAL,
TIMER_REG_CTL,
+ TIMER_REG_VOFF,
+};
+
+struct arch_timer_offset {
+ /*
+ * If set, pointer to one of the offsets in the kvm's offset
+ * structure. If NULL, assume a zero offset.
+ */
+ u64 *vm_offset;
+ /*
+ * If set, pointer to one of the offsets in the vcpu's sysreg
+ * array. If NULL, assume a zero offset.
+ */
+ u64 *vcpu_offset;
+};
+
+struct arch_timer_vm_data {
+ /* Offset applied to the virtual timer/counter */
+ u64 voffset;
+ /* Offset applied to the physical timer/counter */
+ u64 poffset;
+
+ /* The PPI for each timer, global to the VM */
+ u8 ppi[NR_KVM_TIMERS];
};
struct arch_timer_context {
struct kvm_vcpu *vcpu;
- /* Timer IRQ */
- struct kvm_irq_level irq;
-
/* Emulated Timer (may be unused) */
struct hrtimer hrtimer;
+ u64 ns_frac;
+ /* Offset for this counter/timer */
+ struct arch_timer_offset offset;
/*
* We have multiple paths which can save/restore the timer state onto
* the hardware, so we need some way of keeping track of where the
@@ -39,17 +66,24 @@ struct arch_timer_context {
*/
bool loaded;
+ /* Output level of the timer IRQ */
+ struct {
+ bool level;
+ } irq;
+
/* Duplicated state from arch_timer.c for convenience */
u32 host_timer_irq;
- u32 host_timer_irq_flags;
};
struct timer_map {
struct arch_timer_context *direct_vtimer;
struct arch_timer_context *direct_ptimer;
+ struct arch_timer_context *emul_vtimer;
struct arch_timer_context *emul_ptimer;
};
+void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
+
struct arch_timer_cpu {
struct arch_timer_context timers[NR_KVM_TIMERS];
@@ -60,15 +94,17 @@ struct arch_timer_cpu {
bool enabled;
};
-int kvm_timer_hyp_init(bool);
+int __init kvm_timer_hyp_init(bool has_gic);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
-int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
+void kvm_timer_init_vm(struct kvm *kvm);
+
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
@@ -76,8 +112,6 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
-bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
-
u64 kvm_phys_timer_read(void);
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
@@ -85,15 +119,18 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
void kvm_timer_init_vhe(void);
-bool kvm_arch_timer_get_input_level(int vintid);
-
#define vcpu_timer(v) (&(v)->arch.timer_cpu)
#define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)])
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
+#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
+#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
+#define timer_vm_data(ctx) (&(ctx)->vcpu->kvm->arch.timer_data)
+#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
+
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timers tmr,
enum kvm_arch_timer_regs treg);
@@ -106,4 +143,13 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);
+/* CPU HP callbacks */
+void kvm_timer_cpu_up(void);
+void kvm_timer_cpu_down(void);
+
+static inline bool has_cntpoff(void)
+{
+ return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
+}
+
#endif
diff --git a/include/kvm/arm_hypercalls.h b/include/kvm/arm_hypercalls.h
index 0e2509d27910..2df152207ccd 100644
--- a/include/kvm/arm_hypercalls.h
+++ b/include/kvm/arm_hypercalls.h
@@ -6,7 +6,7 @@
#include <asm/kvm_emulate.h>
-int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+int kvm_smccc_call_handler(struct kvm_vcpu *vcpu);
static inline u32 smccc_get_function(struct kvm_vcpu *vcpu)
{
@@ -40,4 +40,16 @@ static inline void smccc_set_retval(struct kvm_vcpu *vcpu,
vcpu_set_reg(vcpu, 3, a3);
}
+struct kvm_one_reg;
+
+void kvm_arm_init_hypercalls(struct kvm *kvm);
+void kvm_arm_teardown_hypercalls(struct kvm *kvm);
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
+int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr);
+int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr);
+
#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 6db030439e29..eb4c369a79eb 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -8,32 +8,47 @@
#define __ASM_ARM_KVM_PMU_H
#include <linux/perf_event.h>
-#include <asm/perf_event.h>
+#include <linux/perf/arm_pmuv3.h>
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
-#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
-
-#ifdef CONFIG_KVM_ARM_PMU
+#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event;
};
+struct kvm_pmu_events {
+ u32 events_host;
+ u32 events_guest;
+};
+
struct kvm_pmu {
- int irq_num;
+ struct irq_work overflow_work;
+ struct kvm_pmu_events events;
struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
- DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
- bool ready;
+ int irq_num;
bool created;
bool irq_level;
};
-#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
+struct arm_pmu_entry {
+ struct list_head entry;
+ struct arm_pmu *arm_pmu;
+};
+
+DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+
+static __always_inline bool kvm_arm_support_pmu_v3(void)
+{
+ return static_branch_likely(&kvm_arm_pmu_available);
+}
+
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -47,7 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx);
-bool kvm_arm_support_pmu_v3(void);
+void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
@@ -55,11 +70,41 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
+
+struct kvm_pmu_events *kvm_get_pmu_events(void);
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_resync_el0(void);
+
+#define kvm_vcpu_has_pmu(vcpu) \
+ (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
+
+/*
+ * Updates the vcpu's view of the pmu events for this cpu.
+ * Must be called before every vcpu run after disabling interrupts, to ensure
+ * that an interrupt cannot fire and update the structure.
+ */
+#define kvm_pmu_update_vcpu_events(vcpu) \
+ do { \
+ if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
+ vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
+ } while (0)
+
+u8 kvm_arm_pmu_get_pmuver_limit(void);
+u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
+int kvm_arm_set_default_pmu(struct kvm *kvm);
+u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
+
+u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
#else
struct kvm_pmu {
};
-#define kvm_arm_pmu_v3_ready(v) (false)
+static inline bool kvm_arm_support_pmu_v3(void)
+{
+ return false;
+}
+
#define kvm_arm_pmu_irq_initialized(v) (false)
static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx)
@@ -88,7 +133,6 @@ static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
u64 data, u64 select_idx) {}
-static inline bool kvm_arm_support_pmu_v3(void) { return false; }
static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -108,6 +152,41 @@ static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
return 0;
}
+static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
+{
+ return 0;
+}
+
+#define kvm_vcpu_has_pmu(vcpu) ({ false; })
+static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
+static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
+{
+ return 0;
+}
+static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
+{
+ return 0;
+}
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
+static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
+{
+ return -ENODEV;
+}
+
+static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
#endif
#endif
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index 5b58bd2fe088..e8fb624013d1 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -13,14 +13,11 @@
#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
+#define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1)
-#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
+#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_1
-/*
- * We need the KVM pointer independently from the vcpu as we can call
- * this from HYP, and need to apply kern_hyp_va on it...
- */
-static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
+static inline int kvm_psci_version(struct kvm_vcpu *vcpu)
{
/*
* Our PSCI implementation stays the same across versions from
@@ -29,7 +26,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
* revisions. It is thus safe to return the latest, unless
* userspace has instructed us otherwise.
*/
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+ if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) {
if (vcpu->kvm->arch.psci_version)
return vcpu->kvm->arch.psci_version;
@@ -42,11 +39,4 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
int kvm_psci_call(struct kvm_vcpu *vcpu);
-struct kvm_one_reg;
-
-int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-
#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index a8d8fdcd3723..47035946648e 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -5,12 +5,15 @@
#ifndef __KVM_ARM_VGIC_H
#define __KVM_ARM_VGIC_H
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/static_key.h>
#include <linux/types.h>
+#include <linux/xarray.h>
#include <kvm/iodev.h>
#include <linux/list.h>
#include <linux/jump_label.h>
@@ -72,6 +75,9 @@ struct vgic_global {
bool has_gicv4;
bool has_gicv4_1;
+ /* Pseudo GICv3 from outer space */
+ bool no_hw_deactivation;
+
/* GIC system register CPU interface */
struct static_key_false gicv3_cpuif;
@@ -89,9 +95,29 @@ enum vgic_irq_config {
VGIC_CONFIG_LEVEL
};
+/*
+ * Per-irq ops overriding some common behavious.
+ *
+ * Always called in non-preemptible section and the functions can use
+ * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs.
+ */
+struct irq_ops {
+ /* Per interrupt flags for special-cased interrupts */
+ unsigned long flags;
+
+#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */
+
+ /*
+ * Callback function pointer to in-kernel devices that can tell us the
+ * state of the input level of mapped level-triggered IRQ faster than
+ * peaking into the physical GIC.
+ */
+ bool (*get_input_level)(int vintid);
+};
+
struct vgic_irq {
raw_spinlock_t irq_lock; /* Protects the content of the struct */
- struct list_head lpi_list; /* Used to link all LPIs together */
+ struct rcu_head rcu;
struct list_head ap_list;
struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
@@ -126,21 +152,17 @@ struct vgic_irq {
u8 group; /* 0 == group 0, 1 == group 1 */
enum vgic_irq_config config; /* Level or edge */
- /*
- * Callback function pointer to in-kernel devices that can tell us the
- * state of the input level of mapped level-triggered IRQ faster than
- * peaking into the physical GIC.
- *
- * Always called in non-preemptible section and the functions can use
- * kvm_arm_get_running_vcpu() to get the vcpu pointer for private
- * IRQs.
- */
- bool (*get_input_level)(int vintid);
+ struct irq_ops *ops;
void *owner; /* Opaque pointer to reserve an interrupt
for in-kernel devices. */
};
+static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq)
+{
+ return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE);
+}
+
struct vgic_register_region;
struct vgic_its;
@@ -210,6 +232,9 @@ struct vgic_dist {
/* Implementation revision as reported in the GICD_IIDR */
u32 implementation_rev;
+#define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */
+#define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */
+#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3
/* Userspace can write to GICv2 IGROUPR */
bool v2_groups_user_writable;
@@ -239,6 +264,7 @@ struct vgic_dist {
struct vgic_io_device dist_iodev;
bool has_its;
+ bool table_write_in_progress;
/*
* Contains the attributes and gpa of the LPI configuration table.
@@ -248,10 +274,10 @@ struct vgic_dist {
*/
u64 propbaser;
- /* Protects the lpi_list and the count value below. */
+ /* Protects the lpi_list. */
raw_spinlock_t lpi_list_lock;
- struct list_head lpi_list_head;
- int lpi_list_count;
+ struct xarray lpi_xa;
+ atomic_t lpi_count;
/* LPI translation cache */
struct list_head lpi_translation_cache;
@@ -322,11 +348,13 @@ struct vgic_cpu {
*/
struct vgic_io_device rd_iodev;
struct vgic_redist_region *rdreg;
+ u32 rdreg_index;
+ atomic_t syncr_busy;
/* Contains the attributes and gpa of the LPI pending tables. */
u64 pendbaser;
-
- bool lpis_enabled;
+ /* GICR_CTLR.{ENABLE_LPIS,RWP} */
+ atomic_t ctlr;
/* Cache guest priority bits */
u32 num_pri_bits;
@@ -338,7 +366,7 @@ struct vgic_cpu {
extern struct static_key_false vgic_v2_cpuif_trap;
extern struct static_key_false vgic_v3_cpuif_trap;
-int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
+int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
int kvm_vgic_create(struct kvm *kvm, u32 type);
@@ -348,11 +376,12 @@ int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
void kvm_vgic_init_cpu_hardware(void);
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level, void *owner);
+int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ unsigned int intid, bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid, bool (*get_input_level)(int vindid));
+ u32 vintid, struct irq_ops *ops);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
+int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
@@ -402,6 +431,11 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
int vgic_v4_load(struct kvm_vcpu *vcpu);
-int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
+void vgic_v4_commit(struct kvm_vcpu *vcpu);
+int vgic_v4_put(struct kvm_vcpu *vcpu);
+
+/* CPU HP callbacks */
+void kvm_vgic_cpu_up(void);
+void kvm_vgic_cpu_down(void);
#endif /* __KVM_ARM_VGIC_H */
diff --git a/include/kvm/iodev.h b/include/kvm/iodev.h
index d75fc4365746..56619e33251e 100644
--- a/include/kvm/iodev.h
+++ b/include/kvm/iodev.h
@@ -55,10 +55,4 @@ static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
: -EOPNOTSUPP;
}
-static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
-{
- if (dev->ops->destructor)
- dev->ops->destructor(dev);
-}
-
#endif /* __KVM_IODEV_H__ */
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
deleted file mode 100644
index 600cf45645c6..000000000000
--- a/include/linux/a.out.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __A_OUT_GNU_H__
-#define __A_OUT_GNU_H__
-
-#include <uapi/linux/a.out.h>
-
-#ifndef __ASSEMBLY__
-#ifdef linux
-#include <asm/page.h>
-#if defined(__i386__) || defined(__mc68000__)
-#else
-#ifndef SEGMENT_SIZE
-#define SEGMENT_SIZE PAGE_SIZE
-#endif
-#endif
-#endif
-#endif /*__ASSEMBLY__ */
-#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acct.h b/include/linux/acct.h
index bc70e81895c0..2718c4854815 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -21,7 +21,6 @@
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pid_namespace;
-extern int acct_parm[]; /* for sysctl */
extern void acct_collect(long exitcode, int group_dead);
extern void acct_process(void);
extern void acct_exit_ns(struct pid_namespace *);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 1e4cdc6c7ae2..34829f2c517a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -10,11 +10,15 @@
#include <linux/errno.h>
#include <linux/ioport.h> /* for struct resource */
-#include <linux/irqdomain.h>
#include <linux/resource_ext.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
+#include <linux/node.h>
+
+struct irq_domain;
+struct irq_domain_ops;
#ifndef _LINUX
#define _LINUX
@@ -24,10 +28,10 @@
#ifdef CONFIG_ACPI
#include <linux/list.h>
-#include <linux/mod_devicetable.h>
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/fw_table.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -35,6 +39,16 @@
#include <acpi/acpi_io.h>
#include <asm/acpi.h>
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
{
return adev ? adev->handle : NULL;
@@ -55,7 +69,7 @@ static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
if (!fwnode)
return NULL;
- fwnode->ops = &acpi_static_fwnode_ops;
+ fwnode_init(fwnode, &acpi_static_fwnode_ops);
return fwnode;
}
@@ -68,19 +82,6 @@ static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
kfree(fwnode);
}
-/**
- * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
- * the PCI-defined class-code information
- *
- * @_cls : the class, subclass, prog-if triple for this device
- * @_msk : the class mask for this device
- *
- * This macro is used to create a struct acpi_device_id that matches a
- * specific PCI class. The .id and .driver_data fields will be left
- * initialized with the default value.
- */
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
-
static inline bool has_acpi_companion(struct device *dev)
{
return is_acpi_device_node(dev->fwnode);
@@ -105,6 +106,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
+ ACPI_IRQ_MODEL_LPIC,
ACPI_IRQ_MODEL_COUNT
};
@@ -129,16 +131,8 @@ enum acpi_address_range_id {
/* Table Handlers */
-union acpi_subtable_headers {
- struct acpi_subtable_header common;
- struct acpi_hmat_structure hmat;
-};
-
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
-typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
- const unsigned long end);
-
/* Debugger support */
struct acpi_debugger_ops {
@@ -212,39 +206,46 @@ static inline int acpi_debugger_notify_command_complete(void)
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-struct acpi_subtable_proc {
- int id;
- acpi_tbl_entry_handler handler;
- int count;
-};
-
void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
int acpi_table_init (void);
+
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
-int __init acpi_table_parse_entries(char *id, unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries);
-int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries(char *id,
+ unsigned long table_size, int entry_id,
+ acpi_tbl_entry_handler handler, unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries_array(char *id,
+ unsigned long table_size, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
int acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
+int __init_or_acpilib
+acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg);
+
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
+static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc)
+{
+ return gicc->flags & ACPI_MADT_ENABLED;
+}
+
/* the following numa functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
static inline void
@@ -253,6 +254,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { }
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
+#if defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+void acpi_arch_dma_setup(struct device *dev);
+#else
+static inline void acpi_arch_dma_setup(struct device *dev) { }
+#endif
+
#ifdef CONFIG_ARM64
void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa);
#else
@@ -328,7 +335,8 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
void acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *fwnode);
+ struct fwnode_handle *(*)(u32));
+void acpi_set_gsi_to_irq_fallback(u32 (*)(u32));
struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
unsigned int size,
@@ -379,6 +387,8 @@ extern bool acpi_is_pnp_device(struct acpi_device *);
typedef void (*wmi_notify_handler) (u32 value, void *context);
+int wmi_instance_count(const char *guid);
+
extern acpi_status wmi_evaluate_method(const char *guid, u8 instance,
u32 method_id,
const struct acpi_buffer *in,
@@ -415,33 +425,49 @@ extern int acpi_blacklisted(void);
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
+#ifdef CONFIG_ACPI_THERMAL_LIB
+int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
+int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
+#endif
+
+#ifdef CONFIG_ACPI_HMAT
+int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord);
+#else
+static inline int acpi_get_genport_coordinates(u32 uid,
+ struct access_coordinate *coord)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#ifdef CONFIG_ACPI_NUMA
int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
/**
- * acpi_map_pxm_to_online_node - Map proximity ID to online node
+ * pxm_to_online_node - Map proximity ID to online node
* @pxm: ACPI proximity ID
*
- * This is similar to acpi_map_pxm_to_node(), but always returns an online
+ * This is similar to pxm_to_node(), but always returns an online
* node. When the mapped node from a given proximity ID is offline, it
* looks up the node distance table and returns the nearest online node.
*
* ACPI device drivers, which are called after the NUMA initialization has
* completed in the kernel, can call this interface to obtain their device
* NUMA topology from ACPI tables. Such drivers do not have to deal with
- * offline nodes. A node may be offline when a device proximity ID is
- * unique, SRAT memory entry does not exist, or NUMA is disabled, ex.
- * "numa=off" on x86.
+ * offline nodes. A node may be offline when SRAT memory entry does not exist,
+ * or NUMA is disabled, ex. "numa=off" on x86.
*/
-static inline int acpi_map_pxm_to_online_node(int pxm)
+static inline int pxm_to_online_node(int pxm)
{
- int node = acpi_map_pxm_to_node(pxm);
+ int node = pxm_to_node(pxm);
return numa_map_to_online_node(node);
}
#else
-static inline int acpi_map_pxm_to_online_node(int pxm)
+static inline int pxm_to_online_node(int pxm)
{
return 0;
}
@@ -454,8 +480,6 @@ static inline int acpi_get_node(acpi_handle handle)
return 0;
}
#endif
-extern int acpi_paddr_to_node(u64 start_addr, u64 size);
-
extern int pnpacpi_disabled;
#define PXM_INVAL (-1)
@@ -466,7 +490,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares,
struct resource_win *win);
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
struct resource_win *win);
-unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable);
unsigned int acpi_dev_get_irq_type(int triggering, int polarity);
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res);
@@ -477,6 +501,7 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
void *preproc_data);
int acpi_dev_get_dma_resources(struct acpi_device *adev,
struct list_head *list);
+int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list);
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
unsigned long types);
@@ -493,13 +518,10 @@ int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
-acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
- u32 level);
-
int acpi_resources_are_enforced(void);
#ifdef CONFIG_HIBERNATION
-void __init acpi_no_s4_hw_signature(void);
+extern int acpi_check_s4_hw_signature;
#endif
#ifdef CONFIG_PM_SLEEP
@@ -523,10 +545,16 @@ struct acpi_osc_context {
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
-/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+/* Number of _OSC capability DWORDS depends on bridge type */
+#define OSC_PCI_CAPABILITY_DWORDS 3
+#define OSC_CXL_CAPABILITY_DWORDS 5
+
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 to 5 are device-specific) */
#define OSC_QUERY_DWORD 0 /* DWORD 1 */
#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
+#define OSC_EXT_SUPPORT_DWORD 3 /* DWORD 4 */
+#define OSC_EXT_CONTROL_DWORD 4 /* DWORD 5 */
/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
#define OSC_QUERY_ENABLE 0x00000001 /* input */
@@ -546,9 +574,25 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
+#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
+#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
+#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
+#define OSC_SB_PRM_SUPPORT 0x00200000
+#define OSC_SB_FFH_OPR_SUPPORT 0x00400000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
+extern bool osc_sb_native_usb4_support_confirmed;
+extern bool osc_sb_cppc2_support_acked;
+extern bool osc_cpc_flexible_adr_space_confirmed;
+
+/* USB4 Capabilities */
+#define OSC_USB_USB3_TUNNELING 0x00000001
+#define OSC_USB_DP_TUNNELING 0x00000002
+#define OSC_USB_PCIE_TUNNELING 0x00000004
+#define OSC_USB_XDOMAIN 0x00000008
+
+extern u32 osc_sb_native_usb4_control;
/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
@@ -558,7 +602,6 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_MSI_SUPPORT 0x00000010
#define OSC_PCI_EDR_SUPPORT 0x00000080
#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100
-#define OSC_PCI_SUPPORT_MASKS 0x0000019f
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
@@ -568,7 +611,29 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080
-#define OSC_PCI_CONTROL_MASKS 0x000000bf
+
+/* CXL _OSC: Capabilities DWORD 4: Support Field */
+#define OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT 0x00000001
+#define OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT 0x00000002
+#define OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT 0x00000004
+#define OSC_CXL_NATIVE_HP_SUPPORT 0x00000008
+
+/* CXL _OSC: Capabilities DWORD 5: Control Field */
+#define OSC_CXL_ERROR_REPORTING_CONTROL 0x00000001
+
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_CONTROL_DWORD];
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_EXT_CONTROL_DWORD];
+}
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
@@ -581,9 +646,6 @@ extern bool osc_pc_lpi_support_confirmed;
#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E
#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F
-extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
- u32 *mask, u32 req);
-
/* Enable _OST when all relevant hotplug operations are enabled */
#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \
@@ -640,25 +702,26 @@ int acpi_match_platform_list(const struct acpi_platform_list *plat);
extern void acpi_early_init(void);
extern void acpi_subsystem_init(void);
-extern void arch_post_acpi_subsys_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
void *data);
+const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids,
+ const struct acpi_device *adev);
+
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
const void *acpi_device_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
-int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
+int acpi_device_uevent_modalias(const struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
-void acpi_walk_dep_device_list(acpi_handle handle);
struct platform_device *acpi_create_platform_device(struct acpi_device *,
- struct property_entry *);
+ const struct property_entry *);
#define ACPI_PTR(_ptr) (_ptr)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -699,6 +762,9 @@ static inline u64 acpi_arch_get_root_pointer(void)
}
#endif
+int acpi_get_local_address(acpi_handle handle, u32 *addr);
+const char *acpi_get_subsystem_id(acpi_handle handle);
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -707,7 +773,12 @@ static inline u64 acpi_arch_get_root_pointer(void)
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
+
+/* Get rid of the -Wunused-variable for adev */
+#define acpi_dev_uid_match(adev, uid2) (adev && false)
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false)
+
+#include <acpi/acpi_numa.h>
struct fwnode_handle;
@@ -723,10 +794,9 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
struct acpi_device;
-static inline bool
-acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
+static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
{
- return false;
+ return -ENODEV;
}
static inline struct acpi_device *
@@ -735,34 +805,39 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
return NULL;
}
+static inline bool acpi_reduced_hardware(void)
+{
+ return false;
+}
+
static inline void acpi_dev_put(struct acpi_device *adev) {}
-static inline bool is_acpi_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool is_acpi_data_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode)
+static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode,
const char *name)
{
return false;
@@ -805,9 +880,12 @@ static inline int acpi_boot_init(void)
return 0;
}
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
static inline void acpi_boot_table_init(void)
{
- return;
}
static inline int acpi_mps_check(void)
@@ -846,6 +924,12 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
struct acpi_device_id;
+static inline const struct acpi_device_id *acpi_match_acpi_device(
+ const struct acpi_device_id *ids, const struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline const struct acpi_device_id *acpi_match_device(
const struct acpi_device_id *ids, const struct device *dev)
{
@@ -863,15 +947,30 @@ static inline bool acpi_driver_match_device(struct device *dev,
return false;
}
+static inline bool acpi_check_dsm(acpi_handle handle, const guid_t *guid,
+ u64 rev, u64 funcs)
+{
+ return false;
+}
+
static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
const guid_t *guid,
- int rev, int func,
+ u64 rev, u64 func,
union acpi_object *argv4)
{
return NULL;
}
-static inline int acpi_device_uevent_modalias(struct device *dev,
+static inline union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle,
+ const guid_t *guid,
+ u64 rev, u64 func,
+ union acpi_object *argv4,
+ acpi_object_type type)
+{
+ return NULL;
+}
+
+static inline int acpi_device_uevent_modalias(const struct device *dev,
struct kobj_uevent_env *env)
{
return -ENODEV;
@@ -883,7 +982,14 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
-static inline bool acpi_dma_supported(struct acpi_device *adev)
+static inline struct platform_device *
+acpi_create_platform_device(struct acpi_device *adev,
+ const struct property_entry *properties)
+{
+ return NULL;
+}
+
+static inline bool acpi_dma_supported(const struct acpi_device *adev)
{
return false;
}
@@ -893,8 +999,7 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
-static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr,
- u64 *offset, u64 *size)
+static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
{
return -ENODEV;
}
@@ -937,8 +1042,45 @@ static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
return NULL;
}
+static inline int acpi_get_local_address(acpi_handle handle, u32 *addr)
+{
+ return -ENODEV;
+}
+
+static inline const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int acpi_register_wakeup_handler(int wake_irq,
+ bool (*wakeup)(void *context), void *context)
+{
+ return -ENXIO;
+}
+
+static inline void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context) { }
+
+struct acpi_osc_context;
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline bool acpi_sleep_state_supported(u8 sleep_state)
+{
+ return false;
+}
+
#endif /* !CONFIG_ACPI */
+extern void arch_post_acpi_subsys_init(void);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_ioapic_add(acpi_handle root);
#else
@@ -957,15 +1099,23 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-
-#ifdef CONFIG_X86
-void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
-#else
-static inline void arch_reserve_mem_area(acpi_physical_address addr,
- size_t size)
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
+struct acpi_s2idle_dev_ops {
+ struct list_head list_node;
+ void (*prepare)(void);
+ void (*check)(void);
+ void (*restore)(void);
+};
+int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+int acpi_get_lps0_constraint(struct acpi_device *adev);
+#else /* CONFIG_SUSPEND && CONFIG_X86 */
+static inline int acpi_get_lps0_constraint(struct device *dev)
{
+ return ACPI_STATE_UNKNOWN;
}
-#endif /* CONFIG_X86 */
+#endif /* CONFIG_SUSPEND && CONFIG_X86 */
+void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
#endif
@@ -976,15 +1126,23 @@ int acpi_dev_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
+bool acpi_storage_d3(struct device *dev);
+bool acpi_dev_state_d0(struct device *dev);
#else
-static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
-static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
return 0;
}
+static inline bool acpi_storage_d3(struct device *dev)
+{
+ return false;
+}
+static inline bool acpi_dev_state_d0(struct device *dev)
+{
+ return true;
+}
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
@@ -997,6 +1155,7 @@ int acpi_subsys_freeze(struct device *dev);
int acpi_subsys_poweroff(struct device *dev);
void acpi_ec_mark_gpe_for_wake(void);
void acpi_ec_set_gpe_wake_mask(u8 action);
+int acpi_subsys_restore_early(struct device *dev);
#else
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
static inline void acpi_subsys_complete(struct device *dev) {}
@@ -1005,17 +1164,24 @@ static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
+static inline int acpi_subsys_restore_early(struct device *dev) { return 0; }
static inline void acpi_ec_mark_gpe_for_wake(void) {}
static inline void acpi_ec_set_gpe_wake_mask(u8 action) {}
#endif
#ifdef CONFIG_ACPI
+char *acpi_handle_path(acpi_handle handle);
__printf(3, 4)
void acpi_handle_printk(const char *level, acpi_handle handle,
const char *fmt, ...);
+void acpi_evaluation_failure_warn(acpi_handle handle, const char *name,
+ acpi_status status);
#else /* !CONFIG_ACPI */
static inline __printf(3, 4) void
acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
+static inline void acpi_evaluation_failure_warn(acpi_handle handle,
+ const char *name,
+ acpi_status status) {}
#endif /* !CONFIG_ACPI */
#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG)
@@ -1065,19 +1231,45 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
-int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
+bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio);
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index,
+ bool *wake_capable);
#else
static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio)
{
return false;
}
-static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ return false;
+}
+static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name,
+ int index, bool *wake_capable)
{
return -ENXIO;
}
#endif
+static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index,
+ bool *wake_capable)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable);
+}
+
+static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name,
+ int index)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, name, index, NULL);
+}
+
+static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, NULL);
+}
+
/* Device properties */
#ifdef CONFIG_ACPI
@@ -1103,22 +1295,13 @@ static inline bool acpi_dev_has_props(const struct acpi_device *adev)
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
- const union acpi_object *properties);
+ union acpi_object *properties);
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
-int acpi_dev_prop_read_single(struct acpi_device *adev,
- const char *propname, enum dev_prop_type proptype,
- void *val);
-int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname, enum dev_prop_type proptype,
- void *val, size_t nval);
-int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval);
struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1153,7 +1336,7 @@ struct acpi_probe_entry {
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
- __used __section(__##table##_acpi_probe_table) = { \
+ __used __section("__" #table "_acpi_probe_table") = { \
.id = table_id, \
.type = subtable, \
.subtable_valid = valid, \
@@ -1164,7 +1347,7 @@ struct acpi_probe_entry {
#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \
subtable, valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
- __used __section(__##table##_acpi_probe_table) = { \
+ __used __section("__" #table "_acpi_probe_table") = { \
.id = table_id, \
.type = subtable, \
.subtable_valid = valid, \
@@ -1216,37 +1399,6 @@ static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode,
return -ENXIO;
}
-static inline int acpi_dev_prop_get(const struct acpi_device *adev,
- const char *propname,
- void **valptr)
-{
- return -ENXIO;
-}
-
-static inline int acpi_dev_prop_read_single(const struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val)
-{
- return -ENXIO;
-}
-
-static inline int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
-static inline int acpi_dev_prop_read(const struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
static inline struct fwnode_handle *
acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
@@ -1255,12 +1407,6 @@ acpi_get_next_subnode(const struct fwnode_handle *fwnode,
}
static inline struct fwnode_handle *
-acpi_node_get_parent(const struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
-static inline struct fwnode_handle *
acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
@@ -1329,12 +1475,21 @@ static inline int lpit_read_residency_count_address(u64 *address)
}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+#ifndef arch_get_idle_state_flags
+static inline unsigned int arch_get_idle_state_flags(u32 arch_flags)
+{
+ return 0;
+}
+#endif
+#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
+
#ifdef CONFIG_ACPI_PPTT
int acpi_pptt_cpu_is_thread(unsigned int cpu);
int find_acpi_cpu_topology(unsigned int cpu, int level);
+int find_acpi_cpu_topology_cluster(unsigned int cpu);
int find_acpi_cpu_topology_package(unsigned int cpu);
int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
-int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
#else
static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
{
@@ -1344,27 +1499,73 @@ static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
{
return -EINVAL;
}
-static inline int find_acpi_cpu_topology_package(unsigned int cpu)
+static inline int find_acpi_cpu_topology_cluster(unsigned int cpu)
{
return -EINVAL;
}
-static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
+static inline int find_acpi_cpu_topology_package(unsigned int cpu)
{
return -EINVAL;
}
-static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
+static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
{
return -EINVAL;
}
#endif
+#ifdef CONFIG_ARM64
+void acpi_arm_init(void);
+#else
+static inline void acpi_arm_init(void) { }
+#endif
+
+#ifdef CONFIG_ACPI_PCC
+void acpi_init_pcc(void);
+#else
+static inline void acpi_init_pcc(void) { }
+#endif
+
+#ifdef CONFIG_ACPI_FFH
+void acpi_init_ffh(void);
+extern int acpi_ffh_address_space_arch_setup(void *handler_ctxt,
+ void **region_ctxt);
+extern int acpi_ffh_address_space_arch_handler(acpi_integer *value,
+ void *region_context);
+#else
+static inline void acpi_init_ffh(void) { }
+#endif
+
#ifdef CONFIG_ACPI
-extern int acpi_platform_notify(struct device *dev, enum kobject_action action);
+extern void acpi_device_notify(struct device *dev);
+extern void acpi_device_notify_remove(struct device *dev);
#else
-static inline int
-acpi_platform_notify(struct device *dev, enum kobject_action action)
+static inline void acpi_device_notify(struct device *dev) { }
+static inline void acpi_device_notify_remove(struct device *dev) { }
+#endif
+
+static inline void acpi_use_parent_companion(struct device *dev)
{
- return 0;
+ ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent));
+}
+
+#ifdef CONFIG_ACPI_HMAT
+int hmat_update_target_coordinates(int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access);
+#else
+static inline int hmat_update_target_coordinates(int nid,
+ struct access_coordinate *coord,
+ enum access_coordinate_class access)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_ACPI_NUMA
+bool acpi_node_backed_by_real_pxm(int nid);
+#else
+static inline bool acpi_node_backed_by_real_pxm(int nid)
+{
+ return false;
}
#endif
diff --git a/include/linux/acpi_amd_wbrf.h b/include/linux/acpi_amd_wbrf.h
new file mode 100644
index 000000000000..898f31d536d4
--- /dev/null
+++ b/include/linux/acpi_amd_wbrf.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Wifi Band Exclusion Interface (AMD ACPI Implementation)
+ * Copyright (C) 2023 Advanced Micro Devices
+ */
+
+#ifndef _ACPI_AMD_WBRF_H
+#define _ACPI_AMD_WBRF_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+/* The maximum number of frequency band ranges */
+#define MAX_NUM_OF_WBRF_RANGES 11
+
+/* Record actions */
+#define WBRF_RECORD_ADD 0x0
+#define WBRF_RECORD_REMOVE 0x1
+
+/**
+ * struct freq_band_range - Wifi frequency band range definition
+ * @start: start frequency point (in Hz)
+ * @end: end frequency point (in Hz)
+ */
+struct freq_band_range {
+ u64 start;
+ u64 end;
+};
+
+/**
+ * struct wbrf_ranges_in_out - wbrf ranges info
+ * @num_of_ranges: total number of band ranges in this struct
+ * @band_list: array of Wifi band ranges
+ */
+struct wbrf_ranges_in_out {
+ u64 num_of_ranges;
+ struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES];
+};
+
+/**
+ * enum wbrf_notifier_actions - wbrf notifier actions index
+ * @WBRF_CHANGED: there was some frequency band updates. The consumers
+ * should retrieve the latest active frequency bands.
+ */
+enum wbrf_notifier_actions {
+ WBRF_CHANGED,
+};
+
+#if IS_ENABLED(CONFIG_AMD_WBRF)
+bool acpi_amd_wbrf_supported_producer(struct device *dev);
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in);
+bool acpi_amd_wbrf_supported_consumer(struct device *dev);
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out);
+int amd_wbrf_register_notifier(struct notifier_block *nb);
+int amd_wbrf_unregister_notifier(struct notifier_block *nb);
+#else
+static inline
+bool acpi_amd_wbrf_supported_consumer(struct device *dev)
+{
+ return false;
+}
+
+static inline
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in)
+{
+ return -ENODEV;
+}
+
+static inline
+bool acpi_amd_wbrf_supported_producer(struct device *dev)
+{
+ return false;
+}
+static inline
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_register_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_unregister_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_AMD_WBRF */
+
+#endif /* _ACPI_AMD_WBRF_H */
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 20a32120bb88..1cb65592c95d 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -21,40 +21,50 @@
*/
#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */
int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
+int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+
#ifdef CONFIG_ACPI_IORT
-void acpi_iort_init(void);
u32 iort_msi_map_id(struct device *dev, u32 id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev);
-int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
/* IOMMU interface */
-void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
-const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
- const u32 *id_in);
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
+int iort_dma_get_ranges(struct device *dev, u64 *size);
+int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
+phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
-static inline void acpi_iort_init(void) { }
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
{ return id; }
static inline struct irq_domain *iort_get_device_domain(
struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
+static inline
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
+static inline
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
/* IOMMU interface */
-static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
- u64 *size) { }
-static inline const struct iommu_ops *iort_iommu_configure_id(
- struct device *dev, const u32 *id_in)
-{ return NULL; }
+static inline int iort_dma_get_ranges(struct device *dev, u64 *size)
+{ return -ENODEV; }
+static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
+{ return -ENODEV; }
static inline
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
-{ return 0; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
+
+static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void)
+{ return PHYS_ADDR_MAX; }
#endif
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h
new file mode 100644
index 000000000000..8e2eefa9fbc0
--- /dev/null
+++ b/include/linux/acpi_mdio.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ACPI helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_ACPI_MDIO_H
+#define __LINUX_ACPI_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_ACPI_MDIO)
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+ struct module *owner);
+
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle)
+{
+ return __acpi_mdiobus_register(mdio, handle, THIS_MODULE);
+}
+#else /* CONFIG_ACPI_MDIO */
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+{
+ /*
+ * Fall back to mdiobus_register() function to register a bus.
+ * This way, we don't have to keep compat bits around in drivers.
+ */
+
+ return mdiobus_register(mdio);
+}
+#endif
+
+#endif /* __LINUX_ACPI_MDIO_H */
diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h
new file mode 100644
index 000000000000..a5a122431563
--- /dev/null
+++ b/include/linux/acpi_viot.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ACPI_VIOT_H__
+#define __ACPI_VIOT_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_VIOT
+void __init acpi_viot_early_init(void);
+void __init acpi_viot_init(void);
+int viot_iommu_configure(struct device *dev);
+#else
+static inline void acpi_viot_early_init(void) {}
+static inline void acpi_viot_init(void) {}
+static inline int viot_iommu_configure(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ACPI_VIOT_H__ */
diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h
new file mode 100644
index 000000000000..c637e0997f6d
--- /dev/null
+++ b/include/linux/adreno-smmu-priv.h
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef __ADRENO_SMMU_PRIV_H
+#define __ADRENO_SMMU_PRIV_H
+
+#include <linux/io-pgtable.h>
+
+/**
+ * struct adreno_smmu_fault_info - container for key fault information
+ *
+ * @far: The faulting IOVA from ARM_SMMU_CB_FAR
+ * @ttbr0: The current TTBR0 pagetable from ARM_SMMU_CB_TTBR0
+ * @contextidr: The value of ARM_SMMU_CB_CONTEXTIDR
+ * @fsr: The fault status from ARM_SMMU_CB_FSR
+ * @fsynr0: The value of FSYNR0 from ARM_SMMU_CB_FSYNR0
+ * @fsynr1: The value of FSYNR1 from ARM_SMMU_CB_FSYNR0
+ * @cbfrsynra: The value of CBFRSYNRA from ARM_SMMU_GR1_CBFRSYNRA(idx)
+ *
+ * This struct passes back key page fault information to the GPU driver
+ * through the get_fault_info function pointer.
+ * The GPU driver can use this information to print informative
+ * log messages and provide deeper GPU specific insight into the fault.
+ */
+struct adreno_smmu_fault_info {
+ u64 far;
+ u64 ttbr0;
+ u32 contextidr;
+ u32 fsr;
+ u32 fsynr0;
+ u32 fsynr1;
+ u32 cbfrsynra;
+};
+
+/**
+ * struct adreno_smmu_priv - private interface between adreno-smmu and GPU
+ *
+ * @cookie: An opque token provided by adreno-smmu and passed
+ * back into the callbacks
+ * @get_ttbr1_cfg: Get the TTBR1 config for the GPUs context-bank
+ * @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank. A
+ * NULL config disables TTBR0 translation, otherwise
+ * TTBR0 translation is enabled with the specified cfg
+ * @get_fault_info: Called by the GPU fault handler to get information about
+ * the fault
+ * @set_stall: Configure whether stall on fault (CFCFG) is enabled. Call
+ * before set_ttbr0_cfg(). If stalling on fault is enabled,
+ * the GPU driver must call resume_translation()
+ * @resume_translation: Resume translation after a fault
+ *
+ *
+ * The GPU driver (drm/msm) and adreno-smmu work together for controlling
+ * the GPU's SMMU instance. This is by necessity, as the GPU is directly
+ * updating the SMMU for context switches, while on the other hand we do
+ * not want to duplicate all of the initial setup logic from arm-smmu.
+ *
+ * This private interface is used for the two drivers to coordinate. The
+ * cookie and callback functions are populated when the GPU driver attaches
+ * it's domain.
+ */
+struct adreno_smmu_priv {
+ const void *cookie;
+ const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie);
+ int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg);
+ void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
+ void (*set_stall)(const void *cookie, bool enabled);
+ void (*resume_translation)(const void *cookie, bool terminate);
+};
+
+#endif /* __ADRENO_SMMU_PRIV_H */
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 97f64ba1b34a..4b97f38f3fcf 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -18,11 +18,8 @@
struct pci_dev;
-struct aer_header_log_regs {
- unsigned int dw0;
- unsigned int dw1;
- unsigned int dw2;
- unsigned int dw3;
+struct pcie_tlp_log {
+ u32 dw[4];
};
struct aer_capability_regs {
@@ -33,38 +30,27 @@ struct aer_capability_regs {
u32 cor_status;
u32 cor_mask;
u32 cap_control;
- struct aer_header_log_regs header_log;
+ struct pcie_tlp_log header_log;
u32 root_command;
u32 root_status;
u16 cor_err_source;
u16 uncor_err_source;
};
+int pcie_read_tlp_log(struct pci_dev *dev, int where, struct pcie_tlp_log *log);
+
#if defined(CONFIG_PCIEAER)
-/* PCIe port driver needs this function to enable AER */
-int pci_enable_pcie_error_reporting(struct pci_dev *dev);
-int pci_disable_pcie_error_reporting(struct pci_dev *dev);
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
-void pci_save_aer_state(struct pci_dev *dev);
-void pci_restore_aer_state(struct pci_dev *dev);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
-static inline void pci_save_aer_state(struct pci_dev *dev) {}
-static inline void pci_restore_aer_state(struct pci_dev *dev) {}
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 49e5383d4222..fe0760ce34c8 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -13,6 +13,7 @@
#include <linux/compiler.h>
+struct clk;
struct device;
struct ata_port_info;
struct ahci_host_priv;
@@ -21,8 +22,12 @@ struct scsi_host_template;
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
+struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv,
+ const char *con_id);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv);
+int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
@@ -32,7 +37,7 @@ struct ahci_host_priv *ahci_platform_get_resources(
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
void ahci_platform_shutdown(struct platform_device *pdev);
@@ -41,6 +46,7 @@ int ahci_platform_resume_host(struct device *dev);
int ahci_platform_suspend(struct device *dev);
int ahci_platform_resume(struct device *dev);
-#define AHCI_PLATFORM_GET_RESETS 0x01
+#define AHCI_PLATFORM_GET_RESETS BIT(0)
+#define AHCI_PLATFORM_RST_TRIGGER BIT(1)
#endif /* _AHCI_PLATFORM_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b83e68dd006f..86892a4fe7c8 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -20,8 +20,4 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
-/* for sysctl: */
-extern unsigned long aio_nr;
-extern unsigned long aio_max_nr;
-
#endif /* __LINUX__AIO_H */
diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h
index 8274ed525e9f..c4a0b23846d8 100644
--- a/include/linux/alcor_pci.h
+++ b/include/linux/alcor_pci.h
@@ -268,13 +268,6 @@ struct alcor_pci_priv {
unsigned long id; /* idr id */
struct alcor_dev_cfg *cfg;
-
- /* PCI ASPM related vars */
- int pdev_cap_off;
- u8 pdev_aspm_cap;
- int parent_cap_off;
- u8 parent_aspm_cap;
- u8 ext_config_dev_aspm;
};
void alcor_write8(struct alcor_pci_priv *priv, u8 val, unsigned int addr);
diff --git a/include/linux/align.h b/include/linux/align.h
new file mode 100644
index 000000000000..2b4acec7b95a
--- /dev/null
+++ b/include/linux/align.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ALIGN_H
+#define _LINUX_ALIGN_H
+
+#include <linux/const.h>
+
+/* @a is a power of 2 value */
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
+#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+
+#endif /* _LINUX_ALIGN_H */
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 0bbfd647f5c6..c60a6a14638c 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -67,18 +67,31 @@ struct amba_device {
struct clk *pclk;
struct device_dma_parameters dma_parms;
unsigned int periphid;
+ struct mutex periphid_lock;
unsigned int cid;
struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
- char *driver_override;
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
};
struct amba_driver {
struct device_driver drv;
int (*probe)(struct amba_device *, const struct amba_id *);
- int (*remove)(struct amba_device *);
+ void (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
const struct amba_id *id_table;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
/*
@@ -90,70 +103,36 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
AMBA_VENDOR_QCOM = 0x51,
AMBA_VENDOR_LSI = 0xb6,
- AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */
};
-/* This is used to generate pseudo-ID for AMBA device */
-#define AMBA_LINUX_ID(conf, rev, part) \
- (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \
- AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff))
-
extern struct bus_type amba_bustype;
-#define to_amba_device(d) container_of(d, struct amba_device, dev)
+#define to_amba_device(d) container_of_const(d, struct amba_device, dev)
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
+#ifdef CONFIG_ARM_AMBA
int amba_driver_register(struct amba_driver *);
void amba_driver_unregister(struct amba_driver *);
+#else
+static inline int amba_driver_register(struct amba_driver *drv)
+{
+ return -EINVAL;
+}
+static inline void amba_driver_unregister(struct amba_driver *drv)
+{
+}
+#endif
+
struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
void amba_device_put(struct amba_device *);
int amba_device_add(struct amba_device *, struct resource *);
int amba_device_register(struct amba_device *, struct resource *);
-struct amba_device *amba_apb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *amba_ahb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *
-amba_apb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
-struct amba_device *
-amba_ahb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
void amba_device_unregister(struct amba_device *);
-struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
int amba_request_regions(struct amba_device *, const char *);
void amba_release_regions(struct amba_device *);
-static inline int amba_pclk_enable(struct amba_device *dev)
-{
- return clk_enable(dev->pclk);
-}
-
-static inline void amba_pclk_disable(struct amba_device *dev)
-{
- clk_disable(dev->pclk);
-}
-
-static inline int amba_pclk_prepare(struct amba_device *dev)
-{
- return clk_prepare(dev->pclk);
-}
-
-static inline void amba_pclk_unprepare(struct amba_device *dev)
-{
- clk_unprepare(dev->pclk);
-}
-
/* Some drivers don't use the struct amba_device */
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index c92ebc39fc1f..6f96dc2209c0 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -13,17 +13,11 @@
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
- * @ios_handler: a callback function to act on specfic ios changes,
- * used for example to control a levelshifter
- * mask into a value to be binary (or set some other custom bits
- * in MMCIPWR) or:ed and written into the MMCIPWR register of the
- * block. May also control external power based on the power_mode.
* @status: if no GPIO line was given to the block in this function will
* be called to determine whether a card is present in the MMC slot or not
*/
struct mmci_platform_data {
unsigned int ocr_mask;
- int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
};
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 131b27c97209..d7b07d0311e1 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -16,6 +16,7 @@
#ifndef _SSP_PL022_H
#define _SSP_PL022_H
+#include <linux/dmaengine.h>
#include <linux/types.h>
/**
@@ -223,11 +224,8 @@ struct dma_chan;
/**
* struct pl022_ssp_master - device.platform_data for SPI controller devices.
* @bus_id: identifier for this bus
- * @num_chipselect: chipselects are used to distinguish individual
- * SPI slaves, and are numbered from zero to num_chipselects - 1.
- * each slave has a chipselect signal, but it's common that not
- * every chipselect is connected to a slave.
* @enable_dma: if true enables DMA driven transfers.
+ * @dma_filter: callback filter for dma_request_channel.
* @dma_rx_param: parameter to locate an RX DMA channel.
* @dma_tx_param: parameter to locate a TX DMA channel.
* @autosuspend_delay: delay in ms following transfer completion before the
@@ -235,18 +233,15 @@ struct dma_chan;
* indicates no delay and the device will be suspended immediately.
* @rt: indicates the controller should run the message pump with realtime
* priority to minimise the transfer latency on the bus.
- * @chipselects: list of <num_chipselects> chip select gpios
*/
struct pl022_ssp_controller {
u16 bus_id;
- u8 num_chipselect;
u8 enable_dma:1;
- bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ dma_filter_fn dma_filter;
void *dma_rx_param;
void *dma_tx_param;
int autosuspend_delay;
bool rt;
- int *chipselects;
};
/**
@@ -265,8 +260,6 @@ struct pl022_ssp_controller {
* @duplex: Microwire interface: Full/Half duplex
* @clkdelay: on the PL023 variant, the delay in feeback clock cycles
* before sampling the incoming line
- * @cs_control: function pointer to board-specific function to
- * assert/deassert I/O port to control HW generation of devices chip-select.
*/
struct pl022_config_chip {
enum ssp_interface iface;
@@ -280,7 +273,6 @@ struct pl022_config_chip {
enum ssp_microwire_wait_state wait_state;
enum ssp_duplex duplex;
enum ssp_clkdelay clkdelay;
- void (*cs_control) (u32 control);
};
#endif /* _SSP_PL022_H */
diff --git a/include/linux/amba/pl093.h b/include/linux/amba/pl093.h
deleted file mode 100644
index b17166e3b49a..000000000000
--- a/include/linux/amba/pl093.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* linux/amba/pl093.h
- *
- * Copyright (c) 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * AMBA PL093 SSMC (synchronous static memory controller)
- * See DDI0236.pdf (r0p4) for more details
-*/
-
-#define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */
-
-/* Offsets for SMBxxxxRy registers */
-
-#define SMBIDCYR (0x00)
-#define SMBWSTRDR (0x04)
-#define SMBWSTWRR (0x08)
-#define SMBWSTOENR (0x0C)
-#define SMBWSTWENR (0x10)
-#define SMBCR (0x14)
-#define SMBSR (0x18)
-#define SMBWSTBRDR (0x1C)
-
-/* Masks for SMB registers */
-#define IDCY_MASK (0xf)
-#define WSTRD_MASK (0xf)
-#define WSTWR_MASK (0xf)
-#define WSTOEN_MASK (0xf)
-#define WSTWEN_MASK (0xf)
-
-/* Notes from datasheet:
- * WSTOEN <= WSTRD
- * WSTWEN <= WSTWR
- *
- * WSTOEN is not used with nWAIT
- */
-
-/* SMBCR bit definitions */
-#define SMBCR_BIWRITEEN (1 << 21)
-#define SMBCR_ADDRVALIDWRITEEN (1 << 20)
-#define SMBCR_SYNCWRITE (1 << 17)
-#define SMBCR_BMWRITE (1 << 16)
-#define SMBCR_WRAPREAD (1 << 14)
-#define SMBCR_BIREADEN (1 << 13)
-#define SMBCR_ADDRVALIDREADEN (1 << 12)
-#define SMBCR_SYNCREAD (1 << 9)
-#define SMBCR_BMREAD (1 << 8)
-#define SMBCR_SMBLSPOL (1 << 6)
-#define SMBCR_WP (1 << 3)
-#define SMBCR_WAITEN (1 << 2)
-#define SMBCR_WAITPOL (1 << 1)
-#define SMBCR_RBLE (1 << 0)
-
-#define SMBCR_BURSTLENWRITE_MASK (3 << 18)
-#define SMBCR_BURSTLENWRITE_4 (0 << 18)
-#define SMBCR_BURSTLENWRITE_8 (1 << 18)
-#define SMBCR_BURSTLENWRITE_RESERVED (2 << 18)
-#define SMBCR_BURSTLENWRITE_CONTINUOUS (3 << 18)
-
-#define SMBCR_BURSTLENREAD_MASK (3 << 10)
-#define SMBCR_BURSTLENREAD_4 (0 << 10)
-#define SMBCR_BURSTLENREAD_8 (1 << 10)
-#define SMBCR_BURSTLENREAD_16 (2 << 10)
-#define SMBCR_BURSTLENREAD_CONTINUOUS (3 << 10)
-
-#define SMBCR_MW_MASK (3 << 4)
-#define SMBCR_MW_8BIT (0 << 4)
-#define SMBCR_MW_16BIT (1 << 4)
-#define SMBCR_MW_M32BIT (2 << 4)
-
-/* SSMC status registers */
-#define SSMCCSR (0x200)
-#define SSMCCR (0x204)
-#define SSMCITCR (0x208)
-#define SSMCITIP (0x20C)
-#define SSMCITIOP (0x210)
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index a1307b58cc2c..9120de05ead0 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -10,6 +10,11 @@
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
+#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#endif
+
#include <linux/types.h>
/* -------------------------------------------------------------------------------
@@ -70,141 +75,145 @@
#define ZX_UART011_ICR 0x4c
#define ZX_UART011_DMACR 0x50
-#define UART011_DR_OE (1 << 11)
-#define UART011_DR_BE (1 << 10)
-#define UART011_DR_PE (1 << 9)
-#define UART011_DR_FE (1 << 8)
-
-#define UART01x_RSR_OE 0x08
-#define UART01x_RSR_BE 0x04
-#define UART01x_RSR_PE 0x02
-#define UART01x_RSR_FE 0x01
-
-#define UART011_FR_RI 0x100
-#define UART011_FR_TXFE 0x080
-#define UART011_FR_RXFF 0x040
-#define UART01x_FR_TXFF 0x020
-#define UART01x_FR_RXFE 0x010
-#define UART01x_FR_BUSY 0x008
-#define UART01x_FR_DCD 0x004
-#define UART01x_FR_DSR 0x002
-#define UART01x_FR_CTS 0x001
+#define UART011_DR_OE BIT(11)
+#define UART011_DR_BE BIT(10)
+#define UART011_DR_PE BIT(9)
+#define UART011_DR_FE BIT(8)
+
+#define UART01x_RSR_OE BIT(3)
+#define UART01x_RSR_BE BIT(2)
+#define UART01x_RSR_PE BIT(1)
+#define UART01x_RSR_FE BIT(0)
+
+#define UART011_FR_RI BIT(8)
+#define UART011_FR_TXFE BIT(7)
+#define UART011_FR_RXFF BIT(6)
+#define UART01x_FR_TXFF (1 << 5) /* used in ASM */
+#define UART01x_FR_RXFE BIT(4)
+#define UART01x_FR_BUSY (1 << 3) /* used in ASM */
+#define UART01x_FR_DCD BIT(2)
+#define UART01x_FR_DSR BIT(1)
+#define UART01x_FR_CTS BIT(0)
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
/*
* Some bits of Flag Register on ZTE device have different position from
* standard ones.
*/
-#define ZX_UART01x_FR_BUSY 0x100
-#define ZX_UART01x_FR_DSR 0x008
-#define ZX_UART01x_FR_CTS 0x002
-#define ZX_UART011_FR_RI 0x001
-
-#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
-#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */
-#define UART011_CR_OUT2 0x2000 /* OUT2 */
-#define UART011_CR_OUT1 0x1000 /* OUT1 */
-#define UART011_CR_RTS 0x0800 /* RTS */
-#define UART011_CR_DTR 0x0400 /* DTR */
-#define UART011_CR_RXE 0x0200 /* receive enable */
-#define UART011_CR_TXE 0x0100 /* transmit enable */
-#define UART011_CR_LBE 0x0080 /* loopback enable */
-#define UART010_CR_RTIE 0x0040
-#define UART010_CR_TIE 0x0020
-#define UART010_CR_RIE 0x0010
-#define UART010_CR_MSIE 0x0008
-#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */
-#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */
-#define UART01x_CR_SIREN 0x0002 /* SIR enable */
-#define UART01x_CR_UARTEN 0x0001 /* UART enable */
-
-#define UART011_LCRH_SPS 0x80
+#define ZX_UART01x_FR_BUSY BIT(8)
+#define ZX_UART01x_FR_DSR BIT(3)
+#define ZX_UART01x_FR_CTS BIT(1)
+#define ZX_UART011_FR_RI BIT(0)
+
+#define UART011_CR_CTSEN BIT(15) /* CTS hardware flow control */
+#define UART011_CR_RTSEN BIT(14) /* RTS hardware flow control */
+#define UART011_CR_OUT2 BIT(13) /* OUT2 */
+#define UART011_CR_OUT1 BIT(12) /* OUT1 */
+#define UART011_CR_RTS BIT(11) /* RTS */
+#define UART011_CR_DTR BIT(10) /* DTR */
+#define UART011_CR_RXE BIT(9) /* receive enable */
+#define UART011_CR_TXE BIT(8) /* transmit enable */
+#define UART011_CR_LBE BIT(7) /* loopback enable */
+#define UART010_CR_RTIE BIT(6)
+#define UART010_CR_TIE BIT(5)
+#define UART010_CR_RIE BIT(4)
+#define UART010_CR_MSIE BIT(3)
+#define ST_UART011_CR_OVSFACT BIT(3) /* Oversampling factor */
+#define UART01x_CR_IIRLP BIT(2) /* SIR low power mode */
+#define UART01x_CR_SIREN BIT(1) /* SIR enable */
+#define UART01x_CR_UARTEN BIT(0) /* UART enable */
+
+#define UART011_LCRH_SPS BIT(7)
#define UART01x_LCRH_WLEN_8 0x60
#define UART01x_LCRH_WLEN_7 0x40
#define UART01x_LCRH_WLEN_6 0x20
#define UART01x_LCRH_WLEN_5 0x00
-#define UART01x_LCRH_FEN 0x10
-#define UART01x_LCRH_STP2 0x08
-#define UART01x_LCRH_EPS 0x04
-#define UART01x_LCRH_PEN 0x02
-#define UART01x_LCRH_BRK 0x01
-
-#define ST_UART011_DMAWM_RX_1 (0 << 3)
-#define ST_UART011_DMAWM_RX_2 (1 << 3)
-#define ST_UART011_DMAWM_RX_4 (2 << 3)
-#define ST_UART011_DMAWM_RX_8 (3 << 3)
-#define ST_UART011_DMAWM_RX_16 (4 << 3)
-#define ST_UART011_DMAWM_RX_32 (5 << 3)
-#define ST_UART011_DMAWM_RX_48 (6 << 3)
-#define ST_UART011_DMAWM_TX_1 0
-#define ST_UART011_DMAWM_TX_2 1
-#define ST_UART011_DMAWM_TX_4 2
-#define ST_UART011_DMAWM_TX_8 3
-#define ST_UART011_DMAWM_TX_16 4
-#define ST_UART011_DMAWM_TX_32 5
-#define ST_UART011_DMAWM_TX_48 6
-
-#define UART010_IIR_RTIS 0x08
-#define UART010_IIR_TIS 0x04
-#define UART010_IIR_RIS 0x02
-#define UART010_IIR_MIS 0x01
-
-#define UART011_IFLS_RX1_8 (0 << 3)
-#define UART011_IFLS_RX2_8 (1 << 3)
-#define UART011_IFLS_RX4_8 (2 << 3)
-#define UART011_IFLS_RX6_8 (3 << 3)
-#define UART011_IFLS_RX7_8 (4 << 3)
-#define UART011_IFLS_TX1_8 (0 << 0)
-#define UART011_IFLS_TX2_8 (1 << 0)
-#define UART011_IFLS_TX4_8 (2 << 0)
-#define UART011_IFLS_TX6_8 (3 << 0)
-#define UART011_IFLS_TX7_8 (4 << 0)
+#define UART01x_LCRH_FEN BIT(4)
+#define UART01x_LCRH_STP2 BIT(3)
+#define UART01x_LCRH_EPS BIT(2)
+#define UART01x_LCRH_PEN BIT(1)
+#define UART01x_LCRH_BRK BIT(0)
+
+#define ST_UART011_DMAWM_RX GENMASK(5, 3)
+#define ST_UART011_DMAWM_RX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 0)
+#define ST_UART011_DMAWM_RX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 1)
+#define ST_UART011_DMAWM_RX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 2)
+#define ST_UART011_DMAWM_RX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 3)
+#define ST_UART011_DMAWM_RX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 4)
+#define ST_UART011_DMAWM_RX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 5)
+#define ST_UART011_DMAWM_RX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 6)
+#define ST_UART011_DMAWM_TX GENMASK(2, 0)
+#define ST_UART011_DMAWM_TX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 0)
+#define ST_UART011_DMAWM_TX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 1)
+#define ST_UART011_DMAWM_TX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 2)
+#define ST_UART011_DMAWM_TX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 3)
+#define ST_UART011_DMAWM_TX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 4)
+#define ST_UART011_DMAWM_TX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 5)
+#define ST_UART011_DMAWM_TX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 6)
+
+#define UART010_IIR_RTIS BIT(3)
+#define UART010_IIR_TIS BIT(2)
+#define UART010_IIR_RIS BIT(1)
+#define UART010_IIR_MIS BIT(0)
+
+#define UART011_IFLS_RXIFLSEL GENMASK(5, 3)
+#define UART011_IFLS_RX1_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 0)
+#define UART011_IFLS_RX2_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 1)
+#define UART011_IFLS_RX4_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 2)
+#define UART011_IFLS_RX6_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 3)
+#define UART011_IFLS_RX7_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 4)
+#define UART011_IFLS_TXIFLSEL GENMASK(2, 0)
+#define UART011_IFLS_TX1_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 0)
+#define UART011_IFLS_TX2_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 1)
+#define UART011_IFLS_TX4_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 2)
+#define UART011_IFLS_TX6_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 3)
+#define UART011_IFLS_TX7_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 4)
/* special values for ST vendor with deeper fifo */
-#define UART011_IFLS_RX_HALF (5 << 3)
-#define UART011_IFLS_TX_HALF (5 << 0)
-
-#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */
-#define UART011_BEIM (1 << 9) /* break error interrupt mask */
-#define UART011_PEIM (1 << 8) /* parity error interrupt mask */
-#define UART011_FEIM (1 << 7) /* framing error interrupt mask */
-#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */
-#define UART011_TXIM (1 << 5) /* transmit interrupt mask */
-#define UART011_RXIM (1 << 4) /* receive interrupt mask */
-#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */
-#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */
-#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */
-#define UART011_RIMIM (1 << 0) /* RI interrupt mask */
-
-#define UART011_OEIS (1 << 10) /* overrun error interrupt status */
-#define UART011_BEIS (1 << 9) /* break error interrupt status */
-#define UART011_PEIS (1 << 8) /* parity error interrupt status */
-#define UART011_FEIS (1 << 7) /* framing error interrupt status */
-#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */
-#define UART011_TXIS (1 << 5) /* transmit interrupt status */
-#define UART011_RXIS (1 << 4) /* receive interrupt status */
-#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */
-#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */
-#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */
-#define UART011_RIMIS (1 << 0) /* RI interrupt status */
-
-#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */
-#define UART011_BEIC (1 << 9) /* break error interrupt clear */
-#define UART011_PEIC (1 << 8) /* parity error interrupt clear */
-#define UART011_FEIC (1 << 7) /* framing error interrupt clear */
-#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */
-#define UART011_TXIC (1 << 5) /* transmit interrupt clear */
-#define UART011_RXIC (1 << 4) /* receive interrupt clear */
-#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */
-#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */
-#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */
-#define UART011_RIMIC (1 << 0) /* RI interrupt clear */
-
-#define UART011_DMAONERR (1 << 2) /* disable dma on error */
-#define UART011_TXDMAE (1 << 1) /* enable transmit dma */
-#define UART011_RXDMAE (1 << 0) /* enable receive dma */
-
-#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE)
-#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
+#define UART011_IFLS_RX_HALF FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 5)
+#define UART011_IFLS_TX_HALF FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 5)
+
+#define UART011_OEIM BIT(10) /* overrun error interrupt mask */
+#define UART011_BEIM BIT(9) /* break error interrupt mask */
+#define UART011_PEIM BIT(8) /* parity error interrupt mask */
+#define UART011_FEIM BIT(7) /* framing error interrupt mask */
+#define UART011_RTIM BIT(6) /* receive timeout interrupt mask */
+#define UART011_TXIM BIT(5) /* transmit interrupt mask */
+#define UART011_RXIM BIT(4) /* receive interrupt mask */
+#define UART011_DSRMIM BIT(3) /* DSR interrupt mask */
+#define UART011_DCDMIM BIT(2) /* DCD interrupt mask */
+#define UART011_CTSMIM BIT(1) /* CTS interrupt mask */
+#define UART011_RIMIM BIT(0) /* RI interrupt mask */
+
+#define UART011_OEIS BIT(10) /* overrun error interrupt status */
+#define UART011_BEIS BIT(9) /* break error interrupt status */
+#define UART011_PEIS BIT(8) /* parity error interrupt status */
+#define UART011_FEIS BIT(7) /* framing error interrupt status */
+#define UART011_RTIS BIT(6) /* receive timeout interrupt status */
+#define UART011_TXIS BIT(5) /* transmit interrupt status */
+#define UART011_RXIS BIT(4) /* receive interrupt status */
+#define UART011_DSRMIS BIT(3) /* DSR interrupt status */
+#define UART011_DCDMIS BIT(2) /* DCD interrupt status */
+#define UART011_CTSMIS BIT(1) /* CTS interrupt status */
+#define UART011_RIMIS BIT(0) /* RI interrupt status */
+
+#define UART011_OEIC BIT(10) /* overrun error interrupt clear */
+#define UART011_BEIC BIT(9) /* break error interrupt clear */
+#define UART011_PEIC BIT(8) /* parity error interrupt clear */
+#define UART011_FEIC BIT(7) /* framing error interrupt clear */
+#define UART011_RTIC BIT(6) /* receive timeout interrupt clear */
+#define UART011_TXIC BIT(5) /* transmit interrupt clear */
+#define UART011_RXIC BIT(4) /* receive interrupt clear */
+#define UART011_DSRMIC BIT(3) /* DSR interrupt clear */
+#define UART011_DCDMIC BIT(2) /* DCD interrupt clear */
+#define UART011_CTSMIC BIT(1) /* CTS interrupt clear */
+#define UART011_RIMIC BIT(0) /* RI interrupt clear */
+
+#define UART011_DMAONERR BIT(2) /* disable dma on error */
+#define UART011_TXDMAE BIT(1) /* enable transmit dma */
+#define UART011_RXDMAE BIT(0) /* enable receive dma */
+
+#define UART01x_RSR_ANY (UART01x_RSR_OE | UART01x_RSR_BE | UART01x_RSR_PE | UART01x_RSR_FE)
+#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD | UART01x_FR_DSR | UART01x_FR_CTS)
#ifndef __ASSEMBLY__
struct amba_device; /* in uncompress this is included but amba/bus.h is not */
@@ -220,8 +229,8 @@ struct amba_pl011_data {
bool dma_rx_poll_enable;
unsigned int dma_rx_poll_rate;
unsigned int dma_rx_poll_timeout;
- void (*init) (void);
- void (*exit) (void);
+ void (*init)(void);
+ void (*exit)(void);
};
#endif
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 21e950e4ab62..2b90c48a6a87 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -10,6 +10,8 @@
#include <linux/types.h>
+struct amd_iommu;
+
/*
* This is mainly used to communicate information back-and-forth
* between SVM and IOMMU for setting up and tearing down posted
@@ -30,146 +32,7 @@ struct task_struct;
struct pci_dev;
extern int amd_iommu_detect(void);
-extern int amd_iommu_init_hardware(void);
-
-/**
- * amd_iommu_enable_device_erratum() - Enable erratum workaround for device
- * in the IOMMUv2 driver
- * @pdev: The PCI device the workaround is necessary for
- * @erratum: The erratum workaround to enable
- *
- * The function needs to be called before amd_iommu_init_device().
- * Possible values for the erratum number are for now:
- * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI
- * is enabled
- * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI
- * requests to one
- */
-#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0
-#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1
-
-extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum);
-
-/**
- * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
- * @pdev: The PCI device to initialize
- * @pasids: Number of PASIDs to support for this device
- *
- * This function does all setup for the device pdev so that it can be
- * used with IOMMUv2.
- * Returns 0 on success or negative value on error.
- */
-extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids);
-/**
- * amd_iommu_free_device() - Free all IOMMUv2 related device resources
- * and disable IOMMUv2 usage for this device
- * @pdev: The PCI device to disable IOMMUv2 usage for'
- */
-extern void amd_iommu_free_device(struct pci_dev *pdev);
-
-/**
- * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device
- * @pdev: The PCI device to bind the task to
- * @pasid: The PASID on the device the task should be bound to
- * @task: the task to bind
- *
- * The function returns 0 on success or a negative value on error.
- */
-extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
- struct task_struct *task);
-
-/**
- * amd_iommu_unbind_pasid() - Unbind a PASID from its task on
- * a device
- * @pdev: The device of the PASID
- * @pasid: The PASID to unbind
- *
- * When this function returns the device is no longer using the PASID
- * and the PASID is no longer bound to its task.
- */
-extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid);
-
-/**
- * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
- * PRI requests
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- *
- * The IOMMUv2 driver invokes this call-back when it is unable to
- * successfully handle a PRI request. The device driver can then decide
- * which PRI response the device should see. Possible return values for
- * the call-back are:
- *
- * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device
- * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device
- * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device,
- * the device is required to disable
- * PRI when it receives this response
- *
- * The function returns 0 on success or negative value on error.
- */
-#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0
-#define AMD_IOMMU_INV_PRI_RSP_INVALID 1
-#define AMD_IOMMU_INV_PRI_RSP_FAIL 2
-
-typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
- int pasid,
- unsigned long address,
- u16);
-
-extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
- amd_iommu_invalid_ppr_cb cb);
-
-#define PPR_FAULT_EXEC (1 << 1)
-#define PPR_FAULT_READ (1 << 2)
-#define PPR_FAULT_WRITE (1 << 5)
-#define PPR_FAULT_USER (1 << 6)
-#define PPR_FAULT_RSVD (1 << 7)
-#define PPR_FAULT_GN (1 << 8)
-
-/**
- * amd_iommu_device_info() - Get information about IOMMUv2 support of a
- * PCI device
- * @pdev: PCI device to query information from
- * @info: A pointer to an amd_iommu_device_info structure which will contain
- * the information about the PCI device
- *
- * Returns 0 on success, negative value on error
- */
-
-#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */
-#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution
- on memory pages */
-#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request
- super-user privileges */
-
-struct amd_iommu_device_info {
- int max_pasids;
- u32 flags;
-};
-
-extern int amd_iommu_device_info(struct pci_dev *pdev,
- struct amd_iommu_device_info *info);
-
-/**
- * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating
- * a pasid context. This call-back is
- * invoked when the IOMMUv2 driver needs to
- * invalidate a PASID context, for example
- * because the task that is bound to that
- * context is about to exit.
- *
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- */
-
-typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
-
-extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
- amd_iommu_invalidate_ctx cb);
#else /* CONFIG_AMD_IOMMU */
static inline int amd_iommu_detect(void) { return -ENODEV; }
@@ -212,4 +75,20 @@ static inline int amd_iommu_deactivate_guest_mode(void *data)
}
#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
+int amd_iommu_get_num_iommus(void);
+bool amd_iommu_pc_supported(void);
+u8 amd_iommu_pc_get_max_banks(unsigned int idx);
+u8 amd_iommu_pc_get_max_counters(unsigned int idx);
+int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
+ u64 *value);
+int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
+ u64 *value);
+struct amd_iommu *get_amd_iommu(unsigned int idx);
+
+#ifdef CONFIG_KVM_AMD_SEV
+int amd_iommu_snp_disable(void);
+#else
+static inline int amd_iommu_snp_disable(void) { return 0; }
+#endif
+
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h
new file mode 100644
index 000000000000..b4f818205216
--- /dev/null
+++ b/include/linux/amd-pmf-io.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Platform Management Framework Interface
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#ifndef AMD_PMF_IO_H
+#define AMD_PMF_IO_H
+
+#include <linux/types.h>
+
+/**
+ * enum sfh_message_type - Query the SFH message type
+ * @MT_HPD: Message ID to know the Human presence info from MP2 FW
+ * @MT_ALS: Message ID to know the Ambient light info from MP2 FW
+ */
+enum sfh_message_type {
+ MT_HPD,
+ MT_ALS,
+};
+
+/**
+ * enum sfh_hpd_info - Query the Human presence information
+ * @SFH_NOT_DETECTED: Check the HPD connection information from MP2 FW
+ * @SFH_USER_PRESENT: Check if the user is present from HPD sensor
+ * @SFH_USER_AWAY: Check if the user is away from HPD sensor
+ */
+enum sfh_hpd_info {
+ SFH_NOT_DETECTED,
+ SFH_USER_PRESENT,
+ SFH_USER_AWAY,
+};
+
+/**
+ * struct amd_sfh_info - get HPD sensor info from MP2 FW
+ * @ambient_light: Populates the ambient light information
+ * @user_present: Populates the user presence information
+ */
+struct amd_sfh_info {
+ u32 ambient_light;
+ u8 user_present;
+};
+
+int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op);
+#endif
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
new file mode 100644
index 000000000000..d21838835abd
--- /dev/null
+++ b/include/linux/amd-pstate.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/include/linux/amd-pstate.h
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Author: Meng Li <li.meng@amd.com>
+ */
+
+#ifndef _LINUX_AMD_PSTATE_H
+#define _LINUX_AMD_PSTATE_H
+
+#include <linux/pm_qos.h>
+
+#define AMD_CPPC_EPP_PERFORMANCE 0x00
+#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
+#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
+#define AMD_CPPC_EPP_POWERSAVE 0xFF
+
+/*********************************************************************
+ * AMD P-state INTERFACE *
+ *********************************************************************/
+/**
+ * struct amd_aperf_mperf
+ * @aperf: actual performance frequency clock count
+ * @mperf: maximum performance frequency clock count
+ * @tsc: time stamp counter
+ */
+struct amd_aperf_mperf {
+ u64 aperf;
+ u64 mperf;
+ u64 tsc;
+};
+
+/**
+ * struct amd_cpudata - private CPU data for AMD P-State
+ * @cpu: CPU number
+ * @req: constraint request to apply
+ * @cppc_req_cached: cached performance request hints
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * For platforms that do not support the preferred core feature, the
+ * highest_pef may be configured with 166 or 255, to avoid max frequency
+ * calculated wrongly. we take the fixed value as the highest_perf.
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
+ * priority.
+ * @max_freq: the frequency that mapped to highest_perf
+ * @min_freq: the frequency that mapped to lowest_perf
+ * @nominal_freq: the frequency that mapped to nominal_perf
+ * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+ * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
+ * @prev: Last Aperf/Mperf/tsc count value read from register
+ * @freq: current cpu frequency value
+ * @boost_supported: check whether the Processor or SBIOS supports boost mode
+ * @hw_prefcore: check whether HW supports preferred core featue.
+ * Only when hw_prefcore and early prefcore param are true,
+ * AMD P-State driver supports preferred core featue.
+ * @epp_policy: Last saved policy used to set energy-performance preference
+ * @epp_cached: Cached CPPC energy-performance preference value
+ * @policy: Cpufreq policy value
+ * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
+ *
+ * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
+ * represents all the attributes and goals that AMD P-State requests at runtime.
+ */
+struct amd_cpudata {
+ int cpu;
+
+ struct freq_qos_request req[2];
+ u64 cppc_req_cached;
+
+ u32 highest_perf;
+ u32 nominal_perf;
+ u32 lowest_nonlinear_perf;
+ u32 lowest_perf;
+ u32 prefcore_ranking;
+ u32 min_limit_perf;
+ u32 max_limit_perf;
+ u32 min_limit_freq;
+ u32 max_limit_freq;
+
+ u32 max_freq;
+ u32 min_freq;
+ u32 nominal_freq;
+ u32 lowest_nonlinear_freq;
+
+ struct amd_aperf_mperf cur;
+ struct amd_aperf_mperf prev;
+
+ u64 freq;
+ bool boost_supported;
+ bool hw_prefcore;
+
+ /* EPP feature related attributes*/
+ s16 epp_policy;
+ s16 epp_cached;
+ u32 policy;
+ u64 cppc_cap1_cached;
+ bool suspended;
+};
+
+/*
+ * enum amd_pstate_mode - driver working mode of amd pstate
+ */
+enum amd_pstate_mode {
+ AMD_PSTATE_UNDEFINED = 0,
+ AMD_PSTATE_DISABLE,
+ AMD_PSTATE_PASSIVE,
+ AMD_PSTATE_ACTIVE,
+ AMD_PSTATE_GUIDED,
+ AMD_PSTATE_MAX,
+};
+
+static const char * const amd_pstate_mode_string[] = {
+ [AMD_PSTATE_UNDEFINED] = "undefined",
+ [AMD_PSTATE_DISABLE] = "disable",
+ [AMD_PSTATE_PASSIVE] = "passive",
+ [AMD_PSTATE_ACTIVE] = "active",
+ [AMD_PSTATE_GUIDED] = "guided",
+ NULL,
+};
+#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index d0d7d96261ad..93a5f16d03f3 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -10,12 +10,21 @@
#define _LINUX_ANON_INODES_H
struct file_operations;
+struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
+struct file *anon_inode_create_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
+int anon_inode_create_getfd(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
#endif /* _LINUX_ANON_INODES_H */
diff --git a/include/linux/aperture.h b/include/linux/aperture.h
new file mode 100644
index 000000000000..1a9a88b11584
--- /dev/null
+++ b/include/linux/aperture.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _LINUX_APERTURE_H_
+#define _LINUX_APERTURE_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+struct platform_device;
+
+#if defined(CONFIG_APERTURE_HELPERS)
+int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size);
+
+int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ const char *name);
+
+int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev);
+
+int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name);
+#else
+static inline int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ const char *name)
+{
+ return 0;
+}
+
+static inline int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
+{
+ return 0;
+}
+#endif
+
+/**
+ * aperture_remove_all_conflicting_devices - remove all existing framebuffers
+ * @name: a descriptive name of the requesting driver
+ *
+ * This function removes all graphics device drivers. Use this function on systems
+ * that can have their framebuffer located anywhere in memory.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+static inline int aperture_remove_all_conflicting_devices(const char *name)
+{
+ return aperture_remove_conflicting_devices(0, (resource_size_t)-1, name);
+}
+
+#endif
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
index ddb10aa67b14..206d97ffda79 100644
--- a/include/linux/apple-gmux.h
+++ b/include/linux/apple-gmux.h
@@ -8,18 +8,154 @@
#define LINUX_APPLE_GMUX_H
#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/pnp.h>
#define GMUX_ACPI_HID "APP000B"
+/*
+ * gmux port offsets. Many of these are not yet used, but may be in the
+ * future, and it's useful to have them documented here anyhow.
+ */
+#define GMUX_PORT_VERSION_MAJOR 0x04
+#define GMUX_PORT_VERSION_MINOR 0x05
+#define GMUX_PORT_VERSION_RELEASE 0x06
+#define GMUX_PORT_SWITCH_DISPLAY 0x10
+#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11
+#define GMUX_PORT_INTERRUPT_ENABLE 0x14
+#define GMUX_PORT_INTERRUPT_STATUS 0x16
+#define GMUX_PORT_SWITCH_DDC 0x28
+#define GMUX_PORT_SWITCH_EXTERNAL 0x40
+#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41
+#define GMUX_PORT_DISCRETE_POWER 0x50
+#define GMUX_PORT_MAX_BRIGHTNESS 0x70
+#define GMUX_PORT_BRIGHTNESS 0x74
+#define GMUX_PORT_VALUE 0xc2
+#define GMUX_PORT_READ 0xd0
+#define GMUX_PORT_WRITE 0xd4
+
+#define GMUX_MMIO_PORT_SELECT 0x0e
+#define GMUX_MMIO_COMMAND_SEND 0x0f
+
+#define GMUX_MMIO_READ 0x00
+#define GMUX_MMIO_WRITE 0x40
+
+#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
+
+enum apple_gmux_type {
+ APPLE_GMUX_TYPE_PIO,
+ APPLE_GMUX_TYPE_INDEXED,
+ APPLE_GMUX_TYPE_MMIO,
+};
+
#if IS_ENABLED(CONFIG_APPLE_GMUX)
+static inline bool apple_gmux_is_indexed(unsigned long iostart)
+{
+ u16 val;
+
+ outb(0xaa, iostart + 0xcc);
+ outb(0x55, iostart + 0xcd);
+ outb(0x00, iostart + 0xce);
+
+ val = inb(iostart + 0xcc) | (inb(iostart + 0xcd) << 8);
+ if (val == 0x55aa)
+ return true;
+
+ return false;
+}
+
+static inline bool apple_gmux_is_mmio(unsigned long iostart)
+{
+ u8 __iomem *iomem_base = ioremap(iostart, 16);
+ u8 val;
+
+ if (!iomem_base)
+ return false;
+
+ /*
+ * If this is 0xff, then gmux must not be present, as the gmux would
+ * reset it to 0x00, or it would be one of 0x1, 0x4, 0x41, 0x44 if a
+ * command is currently being processed.
+ */
+ val = ioread8(iomem_base + GMUX_MMIO_COMMAND_SEND);
+ iounmap(iomem_base);
+ return (val != 0xff);
+}
/**
- * apple_gmux_present() - detect if gmux is built into the machine
+ * apple_gmux_detect() - detect if gmux is built into the machine
+ *
+ * @pnp_dev: Device to probe or NULL to use the first matching device
+ * @type_ret: Returns (by reference) the apple_gmux_type of the device
+ *
+ * Detect if a supported gmux device is present by actually probing it.
+ * This avoids the false positives returned on some models by
+ * apple_gmux_present().
+ *
+ * Return: %true if a supported gmux ACPI device is detected and the kernel
+ * was configured with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, enum apple_gmux_type *type_ret)
+{
+ u8 ver_major, ver_minor, ver_release;
+ struct device *dev = NULL;
+ struct acpi_device *adev;
+ struct resource *res;
+ enum apple_gmux_type type = APPLE_GMUX_TYPE_PIO;
+ bool ret = false;
+
+ if (!pnp_dev) {
+ adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+ if (!adev)
+ return false;
+
+ dev = get_device(acpi_get_first_physical_node(adev));
+ acpi_dev_put(adev);
+ if (!dev)
+ return false;
+
+ pnp_dev = to_pnp_dev(dev);
+ }
+
+ res = pnp_get_resource(pnp_dev, IORESOURCE_IO, 0);
+ if (res && resource_size(res) >= GMUX_MIN_IO_LEN) {
+ /*
+ * Invalid version information may indicate either that the gmux
+ * device isn't present or that it's a new one that uses indexed io.
+ */
+ ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR);
+ ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR);
+ ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE);
+ if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
+ if (apple_gmux_is_indexed(res->start))
+ type = APPLE_GMUX_TYPE_INDEXED;
+ else
+ goto out;
+ }
+ } else {
+ res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
+ if (res && apple_gmux_is_mmio(res->start))
+ type = APPLE_GMUX_TYPE_MMIO;
+ else
+ goto out;
+ }
+
+ if (type_ret)
+ *type_ret = type;
+
+ ret = true;
+out:
+ put_device(dev);
+ return ret;
+}
+
+/**
+ * apple_gmux_present() - check if gmux ACPI device is present
*
* Drivers may use this to activate quirks specific to dual GPU MacBook Pros
* and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
*
- * Return: %true if gmux is present and the kernel was configured
+ * Return: %true if gmux ACPI device is present and the kernel was configured
* with CONFIG_APPLE_GMUX, %false otherwise.
*/
static inline bool apple_gmux_present(void)
@@ -34,6 +170,11 @@ static inline bool apple_gmux_present(void)
return false;
}
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
+{
+ return false;
+}
+
#endif /* !CONFIG_APPLE_GMUX */
#endif /* LINUX_APPLE_GMUX_H */
diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h
deleted file mode 100644
index 445af2e3cc21..000000000000
--- a/include/linux/apple_bl.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * apple_bl exported symbols
- */
-
-#ifndef _LINUX_APPLE_BL_H
-#define _LINUX_APPLE_BL_H
-
-#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
-
-extern int apple_bl_register(void);
-extern void apple_bl_unregister(void);
-
-#else /* !CONFIG_BACKLIGHT_APPLE */
-
-static inline int apple_bl_register(void)
-{
- return 0;
-}
-
-static inline void apple_bl_unregister(void)
-{
-}
-
-#endif /* !CONFIG_BACKLIGHT_APPLE */
-
-#endif /* _LINUX_APPLE_BL_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 69b1dabe39dc..a63d61ca55af 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -11,6 +11,10 @@
void topology_normalize_cpu_scale(void);
int topology_update_cpu_topology(void);
+#ifdef CONFIG_ACPI_CPPC_LIB
+void topology_init_cpu_capacity_cppc(void);
+#endif
+
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
@@ -23,14 +27,38 @@ static inline unsigned long topology_get_cpu_scale(int cpu)
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
-DECLARE_PER_CPU(unsigned long, freq_scale);
+DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
+
+static inline unsigned long topology_get_freq_ref(int cpu)
+{
+ return per_cpu(capacity_freq_ref, cpu);
+}
+
+DECLARE_PER_CPU(unsigned long, arch_freq_scale);
static inline unsigned long topology_get_freq_scale(int cpu)
{
- return per_cpu(freq_scale, cpu);
+ return per_cpu(arch_freq_scale, cpu);
}
-bool arch_freq_counters_available(struct cpumask *cpus);
+void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq);
+bool topology_scale_freq_invariant(void);
+
+enum scale_freq_source {
+ SCALE_FREQ_SOURCE_CPUFREQ = 0,
+ SCALE_FREQ_SOURCE_ARCH,
+ SCALE_FREQ_SOURCE_CPPC,
+};
+
+struct scale_freq_data {
+ enum scale_freq_source source;
+ void (*set_freq_scale)(void);
+};
+
+void topology_scale_freq_tick(void);
+void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus);
+void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus);
DECLARE_PER_CPU(unsigned long, thermal_pressure);
@@ -39,16 +67,17 @@ static inline unsigned long topology_get_thermal_pressure(int cpu)
return per_cpu(thermal_pressure, cpu);
}
-void topology_set_thermal_pressure(const struct cpumask *cpus,
- unsigned long th_pressure);
+void topology_update_thermal_pressure(const struct cpumask *cpus,
+ unsigned long capped_freq);
struct cpu_topology {
int thread_id;
int core_id;
+ int cluster_id;
int package_id;
- int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
+ cpumask_t cluster_sibling;
cpumask_t llc_sibling;
};
@@ -56,17 +85,21 @@ struct cpu_topology {
extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
+#define topology_cluster_id(cpu) (cpu_topology[cpu].cluster_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+const struct cpumask *cpu_clustergroup_mask(int cpu);
void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
+void freq_inv_set_max_ratio(int cpu, u64 max_rate);
#endif
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/args.h b/include/linux/args.h
new file mode 100644
index 000000000000..8ff60a54eb7d
--- /dev/null
+++ b/include/linux/args.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_ARGS_H
+#define _LINUX_ARGS_H
+
+/*
+ * How do these macros work?
+ *
+ * In __COUNT_ARGS() _0 to _12 are just placeholders from the start
+ * in order to make sure _n is positioned over the correct number
+ * from 12 to 0 (depending on X, which is a variadic argument list).
+ * They serve no purpose other than occupying a position. Since each
+ * macro parameter must have a distinct identifier, those identifiers
+ * are as good as any.
+ *
+ * In COUNT_ARGS() we use actual integers, so __COUNT_ARGS() returns
+ * that as _n.
+ */
+
+/* This counts to 12. Any more, it will return 13th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+/* Concatenate two parameters, but allow them to be expanded beforehand. */
+#define __CONCAT(a, b) a ## b
+#define CONCATENATE(a, b) __CONCAT(a, b)
+
+#endif /* _LINUX_ARGS_H */
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
index d0e44201d855..7f7a576267bc 100644
--- a/include/linux/arm-cci.h
+++ b/include/linux/arm-cci.h
@@ -43,6 +43,8 @@ static inline int __cci_control_port_by_index(u32 port, bool enable)
}
#endif
+void cci_enable_port_for_self(void);
+
#define cci_disable_port_by_device(dev) \
__cci_control_port_by_device(dev, false)
#define cci_enable_port_by_device(dev) \
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 15c706fb0a37..083f85653716 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -5,6 +5,7 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <linux/args.h>
#include <linux/init.h>
#include <uapi/linux/const.h>
@@ -49,17 +50,25 @@
#define ARM_SMCCC_OWNER_OEM 3
#define ARM_SMCCC_OWNER_STANDARD 4
#define ARM_SMCCC_OWNER_STANDARD_HYP 5
+#define ARM_SMCCC_OWNER_VENDOR_HYP 6
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01
+
#define ARM_SMCCC_QUIRK_NONE 0
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
#define ARM_SMCCC_VERSION_1_0 0x10000
#define ARM_SMCCC_VERSION_1_1 0x10001
#define ARM_SMCCC_VERSION_1_2 0x10002
+#define ARM_SMCCC_VERSION_1_3 0x10003
+
+#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+#define ARM_SMCCC_CALL_HINTS ARM_SMCCC_1_3_SVE_HINT
+
#define ARM_SMCCC_VERSION_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
@@ -86,6 +95,52 @@
ARM_SMCCC_SMC_32, \
0, 0x7fff)
+#define ARM_SMCCC_ARCH_WORKAROUND_3 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x3fff)
+
+#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_FUNC_QUERY_CALL_UID)
+
+/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU
+
+/* KVM "vendor specific" services */
+#define ARM_SMCCC_KVM_FUNC_FEATURES 0
+#define ARM_SMCCC_KVM_FUNC_PTP 1
+#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
+#define ARM_SMCCC_KVM_NUM_FUNCS 128
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_FEATURES)
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
+
+/*
+ * ptp_kvm is a feature used for time sync between vm and host.
+ * ptp_kvm module in guest kernel will get service from host using
+ * this hypercall ID.
+ */
+#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_PTP)
+
+/* ptp_kvm counter type ID */
+#define KVM_PTP_VIRT_COUNTER 0
+#define KVM_PTP_PHYS_COUNTER 1
+
/* Paravirtualised time calls (defined by ARM DEN0057A) */
#define ARM_SMCCC_HV_PV_TIME_FEATURES \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
@@ -99,6 +154,37 @@
ARM_SMCCC_OWNER_STANDARD_HYP, \
0x21)
+/* TRNG entropy source calls (defined by ARM DEN0098) */
+#define ARM_SMCCC_TRNG_VERSION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x50)
+
+#define ARM_SMCCC_TRNG_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x51)
+
+#define ARM_SMCCC_TRNG_GET_UUID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x52)
+
+#define ARM_SMCCC_TRNG_RND32 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+#define ARM_SMCCC_TRNG_RND64 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
/*
* Return codes defined in ARM DEN 0070A
* ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
@@ -141,6 +227,26 @@ u32 arm_smccc_get_version(void);
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
+extern u64 smccc_has_sve_hint;
+
+/**
+ * arm_smccc_get_soc_id_version()
+ *
+ * Returns the SOC ID version.
+ *
+ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
+ */
+s32 arm_smccc_get_soc_id_version(void);
+
+/**
+ * arm_smccc_get_soc_id_revision()
+ *
+ * Returns the SOC ID revision.
+ *
+ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
+ */
+s32 arm_smccc_get_soc_id_revision(void);
+
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -152,6 +258,61 @@ struct arm_smccc_res {
unsigned long a3;
};
+#ifdef CONFIG_ARM64
+/**
+ * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call
+ * @a0-a17 argument values from registers 0 to 17
+ */
+struct arm_smccc_1_2_regs {
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long a8;
+ unsigned long a9;
+ unsigned long a10;
+ unsigned long a11;
+ unsigned long a12;
+ unsigned long a13;
+ unsigned long a14;
+ unsigned long a15;
+ unsigned long a16;
+ unsigned long a17;
+};
+
+/**
+ * arm_smccc_1_2_hvc() - make HVC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make HVC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the HVC instruction. The return values
+ * are updated with the content from registers on return from the HVC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+
+/**
+ * arm_smccc_1_2_smc() - make SMC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make SMC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the SMC instruction. The return values
+ * are updated with the content from registers on return from the SMC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+#endif
+
/**
* struct arm_smccc_quirk - Contains quirk information
* @id: quirk identification
@@ -166,6 +327,15 @@ struct arm_smccc_quirk {
};
/**
+ * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls
+ *
+ * Sets the SMCCC hint bit to indicate if there is live state in the SVE
+ * registers, this modifies x0 in place and should never be called from C
+ * code.
+ */
+asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
+
+/**
* __arm_smccc_smc() - make SMC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
@@ -177,10 +347,20 @@ struct arm_smccc_quirk {
* from register 0 to 3 on return from the SMC instruction. An optional
* quirk structure provides vendor specific behavior.
*/
+#ifdef CONFIG_HAVE_ARM_SMCCC
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+ *res = (struct arm_smccc_res){};
+}
+#endif
/**
* __arm_smccc_hvc() - make HVC calls
@@ -222,95 +402,76 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#endif
-#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
-
-#define __count_args(...) \
- ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
-
-#define __constraint_write_0 \
- "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
-#define __constraint_write_1 \
- "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
-#define __constraint_write_2 \
- "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
-#define __constraint_write_3 \
- "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
-#define __constraint_write_4 __constraint_write_3
-#define __constraint_write_5 __constraint_write_4
-#define __constraint_write_6 __constraint_write_5
-#define __constraint_write_7 __constraint_write_6
-
-#define __constraint_read_0
-#define __constraint_read_1
-#define __constraint_read_2
-#define __constraint_read_3
-#define __constraint_read_4 "r" (r4)
-#define __constraint_read_5 __constraint_read_4, "r" (r5)
-#define __constraint_read_6 __constraint_read_5, "r" (r6)
-#define __constraint_read_7 __constraint_read_6, "r" (r7)
-
-#define __declare_arg_0(a0, res) \
+/* nVHE hypervisor doesn't have a current thread so needs separate checks */
+#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__)
+
+#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \
+ ARM64_SVE)
+#define smccc_sve_clobbers "x16", "x30", "cc",
+
+#else
+
+#define SMCCC_SVE_CHECK
+#define smccc_sve_clobbers
+
+#endif
+
+#define __constraint_read_2 "r" (arg0)
+#define __constraint_read_3 __constraint_read_2, "r" (arg1)
+#define __constraint_read_4 __constraint_read_3, "r" (arg2)
+#define __constraint_read_5 __constraint_read_4, "r" (arg3)
+#define __constraint_read_6 __constraint_read_5, "r" (arg4)
+#define __constraint_read_7 __constraint_read_6, "r" (arg5)
+#define __constraint_read_8 __constraint_read_7, "r" (arg6)
+#define __constraint_read_9 __constraint_read_8, "r" (arg7)
+
+#define __declare_arg_2(a0, res) \
struct arm_smccc_res *___res = res; \
- register unsigned long r0 asm("r0") = (u32)a0; \
- register unsigned long r1 asm("r1"); \
- register unsigned long r2 asm("r2"); \
- register unsigned long r3 asm("r3")
+ register unsigned long arg0 asm("r0") = (u32)a0
-#define __declare_arg_1(a0, a1, res) \
+#define __declare_arg_3(a0, a1, res) \
typeof(a1) __a1 = a1; \
struct arm_smccc_res *___res = res; \
- register unsigned long r0 asm("r0") = (u32)a0; \
- register unsigned long r1 asm("r1") = __a1; \
- register unsigned long r2 asm("r2"); \
- register unsigned long r3 asm("r3")
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1
-#define __declare_arg_2(a0, a1, a2, res) \
+#define __declare_arg_4(a0, a1, a2, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
struct arm_smccc_res *___res = res; \
- register unsigned long r0 asm("r0") = (u32)a0; \
- register unsigned long r1 asm("r1") = __a1; \
- register unsigned long r2 asm("r2") = __a2; \
- register unsigned long r3 asm("r3")
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1; \
+ register typeof(a2) arg2 asm("r2") = __a2
-#define __declare_arg_3(a0, a1, a2, a3, res) \
+#define __declare_arg_5(a0, a1, a2, a3, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
typeof(a3) __a3 = a3; \
struct arm_smccc_res *___res = res; \
- register unsigned long r0 asm("r0") = (u32)a0; \
- register unsigned long r1 asm("r1") = __a1; \
- register unsigned long r2 asm("r2") = __a2; \
- register unsigned long r3 asm("r3") = __a3
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1; \
+ register typeof(a2) arg2 asm("r2") = __a2; \
+ register typeof(a3) arg3 asm("r3") = __a3
-#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+#define __declare_arg_6(a0, a1, a2, a3, a4, res) \
typeof(a4) __a4 = a4; \
- __declare_arg_3(a0, a1, a2, a3, res); \
- register unsigned long r4 asm("r4") = __a4
+ __declare_arg_5(a0, a1, a2, a3, res); \
+ register typeof(a4) arg4 asm("r4") = __a4
-#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, res) \
typeof(a5) __a5 = a5; \
- __declare_arg_4(a0, a1, a2, a3, a4, res); \
- register unsigned long r5 asm("r5") = __a5
+ __declare_arg_6(a0, a1, a2, a3, a4, res); \
+ register typeof(a5) arg5 asm("r5") = __a5
-#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+#define __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res) \
typeof(a6) __a6 = a6; \
- __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
- register unsigned long r6 asm("r6") = __a6
+ __declare_arg_7(a0, a1, a2, a3, a4, a5, res); \
+ register typeof(a6) arg6 asm("r6") = __a6
-#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+#define __declare_arg_9(a0, a1, a2, a3, a4, a5, a6, a7, res) \
typeof(a7) __a7 = a7; \
- __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
- register unsigned long r7 asm("r7") = __a7
-
-#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
-#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
-
-#define ___constraints(count) \
- : __constraint_write_ ## count \
- : __constraint_read_ ## count \
- : "memory"
-#define __constraints(count) ___constraints(count)
+ __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res); \
+ register typeof(a7) arg7 asm("r7") = __a7
/*
* We have an output list that is not necessarily used, and GCC feels
@@ -319,9 +480,18 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
*/
#define __arm_smccc_1_1(inst, ...) \
do { \
- __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
- asm volatile(inst "\n" \
- __constraints(__count_args(__VA_ARGS__))); \
+ register unsigned long r0 asm("r0"); \
+ register unsigned long r1 asm("r1"); \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3"); \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
+ asm volatile(SMCCC_SVE_CHECK \
+ inst "\n" : \
+ "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : smccc_sve_clobbers "memory"); \
if (___res) \
*___res = (typeof(*___res)){r0, r1, r2, r3}; \
} while (0)
@@ -365,8 +535,12 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
*/
#define __fail_smccc_1_1(...) \
do { \
- __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
- asm ("" __constraints(__count_args(__VA_ARGS__))); \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
+ asm ("" : \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : smccc_sve_clobbers "memory"); \
if (___res) \
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
} while (0)
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
new file mode 100644
index 000000000000..c906f666ff5d
--- /dev/null
+++ b/include/linux/arm_ffa.h
@@ -0,0 +1,433 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _LINUX_ARM_FFA_H
+#define _LINUX_ARM_FFA_H
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+#define FFA_SMC(calling_convention, func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \
+ ARM_SMCCC_OWNER_STANDARD, (func_num))
+
+#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num))
+#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num))
+
+#define FFA_ERROR FFA_SMC_32(0x60)
+#define FFA_SUCCESS FFA_SMC_32(0x61)
+#define FFA_FN64_SUCCESS FFA_SMC_64(0x61)
+#define FFA_INTERRUPT FFA_SMC_32(0x62)
+#define FFA_VERSION FFA_SMC_32(0x63)
+#define FFA_FEATURES FFA_SMC_32(0x64)
+#define FFA_RX_RELEASE FFA_SMC_32(0x65)
+#define FFA_RXTX_MAP FFA_SMC_32(0x66)
+#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66)
+#define FFA_RXTX_UNMAP FFA_SMC_32(0x67)
+#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68)
+#define FFA_ID_GET FFA_SMC_32(0x69)
+#define FFA_MSG_POLL FFA_SMC_32(0x6A)
+#define FFA_MSG_WAIT FFA_SMC_32(0x6B)
+#define FFA_YIELD FFA_SMC_32(0x6C)
+#define FFA_RUN FFA_SMC_32(0x6D)
+#define FFA_MSG_SEND FFA_SMC_32(0x6E)
+#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F)
+#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F)
+#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70)
+#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70)
+#define FFA_MEM_DONATE FFA_SMC_32(0x71)
+#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71)
+#define FFA_MEM_LEND FFA_SMC_32(0x72)
+#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72)
+#define FFA_MEM_SHARE FFA_SMC_32(0x73)
+#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73)
+#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74)
+#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74)
+#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75)
+#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76)
+#define FFA_MEM_RECLAIM FFA_SMC_32(0x77)
+#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78)
+#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79)
+#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
+#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
+#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
+#define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D)
+#define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E)
+#define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F)
+#define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80)
+#define FFA_NOTIFICATION_SET FFA_SMC_32(0x81)
+#define FFA_NOTIFICATION_GET FFA_SMC_32(0x82)
+#define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83)
+#define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83)
+#define FFA_RX_ACQUIRE FFA_SMC_32(0x84)
+#define FFA_SPM_ID_GET FFA_SMC_32(0x85)
+#define FFA_MSG_SEND2 FFA_SMC_32(0x86)
+#define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87)
+#define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87)
+#define FFA_MEM_PERM_GET FFA_SMC_32(0x88)
+#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
+#define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
+#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
+
+/*
+ * For some calls it is necessary to use SMC64 to pass or return 64-bit values.
+ * For such calls FFA_FN_NATIVE(name) will choose the appropriate
+ * (native-width) function ID.
+ */
+#ifdef CONFIG_64BIT
+#define FFA_FN_NATIVE(name) FFA_FN64_##name
+#else
+#define FFA_FN_NATIVE(name) FFA_##name
+#endif
+
+/* FFA error codes. */
+#define FFA_RET_SUCCESS (0)
+#define FFA_RET_NOT_SUPPORTED (-1)
+#define FFA_RET_INVALID_PARAMETERS (-2)
+#define FFA_RET_NO_MEMORY (-3)
+#define FFA_RET_BUSY (-4)
+#define FFA_RET_INTERRUPTED (-5)
+#define FFA_RET_DENIED (-6)
+#define FFA_RET_RETRY (-7)
+#define FFA_RET_ABORTED (-8)
+#define FFA_RET_NO_DATA (-9)
+
+/* FFA version encoding */
+#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
+#define FFA_MINOR_VERSION_MASK GENMASK(15, 0)
+#define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x))))
+#define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x))))
+#define FFA_PACK_VERSION_INFO(major, minor) \
+ (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \
+ FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
+#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
+#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1)
+
+/**
+ * FF-A specification mentions explicitly about '4K pages'. This should
+ * not be confused with the kernel PAGE_SIZE, which is the translation
+ * granule kernel is configured and may be one among 4K, 16K and 64K.
+ */
+#define FFA_PAGE_SIZE SZ_4K
+
+/*
+ * Minimum buffer size/alignment encodings returned by an FFA_FEATURES
+ * query for FFA_RXTX_MAP.
+ */
+#define FFA_FEAT_RXTX_MIN_SZ_4K 0
+#define FFA_FEAT_RXTX_MIN_SZ_64K 1
+#define FFA_FEAT_RXTX_MIN_SZ_16K 2
+
+/* FFA Bus/Device/Driver related */
+struct ffa_device {
+ u32 id;
+ int vm_id;
+ bool mode_32bit;
+ uuid_t uuid;
+ struct device dev;
+ const struct ffa_ops *ops;
+};
+
+#define to_ffa_dev(d) container_of(d, struct ffa_device, dev)
+
+struct ffa_device_id {
+ uuid_t uuid;
+};
+
+struct ffa_driver {
+ const char *name;
+ int (*probe)(struct ffa_device *sdev);
+ void (*remove)(struct ffa_device *sdev);
+ const struct ffa_device_id *id_table;
+
+ struct device_driver driver;
+};
+
+#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver)
+
+static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
+{
+ dev_set_drvdata(&fdev->dev, data);
+}
+
+static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
+{
+ return dev_get_drvdata(&fdev->dev);
+}
+
+#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops);
+void ffa_device_unregister(struct ffa_device *ffa_dev);
+int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name);
+void ffa_driver_unregister(struct ffa_driver *driver);
+bool ffa_device_is_valid(struct ffa_device *ffa_dev);
+
+#else
+static inline
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops)
+{
+ return NULL;
+}
+
+static inline void ffa_device_unregister(struct ffa_device *dev) {}
+
+static inline int
+ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ return -EINVAL;
+}
+
+static inline void ffa_driver_unregister(struct ffa_driver *driver) {}
+
+static inline
+bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
+
+#endif /* CONFIG_ARM_FFA_TRANSPORT */
+
+#define ffa_register(driver) \
+ ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define ffa_unregister(driver) \
+ ffa_driver_unregister(driver)
+
+/**
+ * module_ffa_driver() - Helper macro for registering a psa_ffa driver
+ * @__ffa_driver: ffa_driver structure
+ *
+ * Helper macro for psa_ffa drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_ffa_driver(__ffa_driver) \
+ module_driver(__ffa_driver, ffa_register, ffa_unregister)
+
+extern const struct bus_type ffa_bus_type;
+
+/* FFA transport related */
+struct ffa_partition_info {
+ u16 id;
+ u16 exec_ctxt;
+/* partition supports receipt of direct requests */
+#define FFA_PARTITION_DIRECT_RECV BIT(0)
+/* partition can send direct requests. */
+#define FFA_PARTITION_DIRECT_SEND BIT(1)
+/* partition can send and receive indirect messages. */
+#define FFA_PARTITION_INDIRECT_MSG BIT(2)
+/* partition runs in the AArch64 execution state. */
+#define FFA_PARTITION_AARCH64_EXEC BIT(8)
+ u32 properties;
+ u32 uuid[4];
+};
+
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
+struct ffa_send_direct_data {
+ unsigned long data0; /* w3/x3 */
+ unsigned long data1; /* w4/x4 */
+ unsigned long data2; /* w5/x5 */
+ unsigned long data3; /* w6/x6 */
+ unsigned long data4; /* w7/x7 */
+};
+
+struct ffa_mem_region_addr_range {
+ /* The base IPA of the constituent memory region, aligned to 4 kiB */
+ u64 address;
+ /* The number of 4 kiB pages in the constituent memory region. */
+ u32 pg_cnt;
+ u32 reserved;
+};
+
+struct ffa_composite_mem_region {
+ /*
+ * The total number of 4 kiB pages included in this memory region. This
+ * must be equal to the sum of page counts specified in each
+ * `struct ffa_mem_region_addr_range`.
+ */
+ u32 total_pg_cnt;
+ /* The number of constituents included in this memory region range */
+ u32 addr_range_cnt;
+ u64 reserved;
+ /** An array of `addr_range_cnt` memory region constituents. */
+ struct ffa_mem_region_addr_range constituents[];
+};
+
+struct ffa_mem_region_attributes {
+ /* The ID of the VM to which the memory is being given or shared. */
+ u16 receiver;
+ /*
+ * The permissions with which the memory region should be mapped in the
+ * receiver's page table.
+ */
+#define FFA_MEM_EXEC BIT(3)
+#define FFA_MEM_NO_EXEC BIT(2)
+#define FFA_MEM_RW BIT(1)
+#define FFA_MEM_RO BIT(0)
+ u8 attrs;
+ /*
+ * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+ * for memory regions with multiple borrowers.
+ */
+#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
+ u8 flag;
+ /*
+ * Offset in bytes from the start of the outer `ffa_memory_region` to
+ * an `struct ffa_mem_region_addr_range`.
+ */
+ u32 composite_off;
+ u64 reserved;
+};
+
+struct ffa_mem_region {
+ /* The ID of the VM/owner which originally sent the memory region */
+ u16 sender_id;
+#define FFA_MEM_NORMAL BIT(5)
+#define FFA_MEM_DEVICE BIT(4)
+
+#define FFA_MEM_WRITE_BACK (3 << 2)
+#define FFA_MEM_NON_CACHEABLE (1 << 2)
+
+#define FFA_DEV_nGnRnE (0 << 2)
+#define FFA_DEV_nGnRE (1 << 2)
+#define FFA_DEV_nGRE (2 << 2)
+#define FFA_DEV_GRE (3 << 2)
+
+#define FFA_MEM_NON_SHAREABLE (0)
+#define FFA_MEM_OUTER_SHAREABLE (2)
+#define FFA_MEM_INNER_SHAREABLE (3)
+ /* Memory region attributes, upper byte MBZ pre v1.1 */
+ u16 attributes;
+/*
+ * Clear memory region contents after unmapping it from the sender and
+ * before mapping it for any receiver.
+ */
+#define FFA_MEM_CLEAR BIT(0)
+/*
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_TIME_SLICE_ENABLE BIT(1)
+
+#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3)
+
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9)
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5)
+ /* Flags to control behaviour of the transaction. */
+ u32 flags;
+#define HANDLE_LOW_MASK GENMASK_ULL(31, 0)
+#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32)
+#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x))))
+#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x))))
+
+#define PACK_HANDLE(l, h) \
+ (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
+ /*
+ * A globally-unique ID assigned by the hypervisor for a region
+ * of memory being sent between VMs.
+ */
+ u64 handle;
+ /*
+ * An implementation defined value associated with the receiver and the
+ * memory region.
+ */
+ u64 tag;
+ /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */
+ u32 ep_mem_size;
+ /*
+ * The number of `ffa_mem_region_attributes` entries included in this
+ * transaction.
+ */
+ u32 ep_count;
+ /*
+ * 16-byte aligned offset from the base address of this descriptor
+ * to the first element of the endpoint memory access descriptor array
+ * Valid only from v1.1
+ */
+ u32 ep_mem_offset;
+ /* MBZ, valid only from v1.1 */
+ u32 reserved[3];
+};
+
+#define CONSTITUENTS_OFFSET(x) \
+ (offsetof(struct ffa_composite_mem_region, constituents[x]))
+
+static inline u32
+ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
+{
+ u32 offset = count * sizeof(struct ffa_mem_region_attributes);
+ /*
+ * Earlier to v1.1, the endpoint memory descriptor array started at
+ * offset 32(i.e. offset of ep_mem_offset in the current structure)
+ */
+ if (ffa_version <= FFA_VERSION_1_0)
+ offset += offsetof(struct ffa_mem_region, ep_mem_offset);
+ else
+ offset += sizeof(struct ffa_mem_region);
+
+ return offset;
+}
+
+struct ffa_mem_ops_args {
+ bool use_txbuf;
+ u32 nattrs;
+ u32 flags;
+ u64 tag;
+ u64 g_handle;
+ struct scatterlist *sg;
+ struct ffa_mem_region_attributes *attrs;
+};
+
+struct ffa_info_ops {
+ u32 (*api_version_get)(void);
+ int (*partition_info_get)(const char *uuid_str,
+ struct ffa_partition_info *buffer);
+};
+
+struct ffa_msg_ops {
+ void (*mode_32bit_set)(struct ffa_device *dev);
+ int (*sync_send_receive)(struct ffa_device *dev,
+ struct ffa_send_direct_data *data);
+};
+
+struct ffa_mem_ops {
+ int (*memory_reclaim)(u64 g_handle, u32 flags);
+ int (*memory_share)(struct ffa_mem_ops_args *args);
+ int (*memory_lend)(struct ffa_mem_ops_args *args);
+};
+
+struct ffa_cpu_ops {
+ int (*run)(struct ffa_device *dev, u16 vcpu);
+};
+
+typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data);
+typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data);
+
+struct ffa_notifier_ops {
+ int (*sched_recv_cb_register)(struct ffa_device *dev,
+ ffa_sched_recv_cb cb, void *cb_data);
+ int (*sched_recv_cb_unregister)(struct ffa_device *dev);
+ int (*notify_request)(struct ffa_device *dev, bool per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id);
+ int (*notify_relinquish)(struct ffa_device *dev, int notify_id);
+ int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu,
+ u16 vcpu);
+};
+
+struct ffa_ops {
+ const struct ffa_info_ops *info_ops;
+ const struct ffa_msg_ops *msg_ops;
+ const struct ffa_mem_ops *mem_ops;
+ const struct ffa_cpu_ops *cpu_ops;
+ const struct ffa_notifier_ops *notifier_ops;
+};
+
+#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 0a241c5c911d..255701e1251b 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -46,9 +46,13 @@ int sdei_unregister_ghes(struct ghes *ghes);
/* For use by arch code when CPU hotplug notifiers are not appropriate. */
int sdei_mask_local_cpu(void);
int sdei_unmask_local_cpu(void);
+void __init sdei_init(void);
+void sdei_handler_abort(void);
#else
static inline int sdei_mask_local_cpu(void) { return 0; }
static inline int sdei_unmask_local_cpu(void) { return 0; }
+static inline void sdei_init(void) { }
+static inline void sdei_handler_abort(void) { }
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/include/linux/armada-37xx-rwtm-mailbox.h b/include/linux/armada-37xx-rwtm-mailbox.h
index 57bb54f6767a..ef4bd705eb65 100644
--- a/include/linux/armada-37xx-rwtm-mailbox.h
+++ b/include/linux/armada-37xx-rwtm-mailbox.h
@@ -2,7 +2,7 @@
/*
* rWTM BIU Mailbox driver for Armada 37xx
*
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
*/
#ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
diff --git a/include/linux/array_size.h b/include/linux/array_size.h
new file mode 100644
index 000000000000..06d7d83196ca
--- /dev/null
+++ b/include/linux/array_size.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ARRAY_SIZE_H
+#define _LINUX_ARRAY_SIZE_H
+
+#include <linux/compiler.h>
+
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
+#endif /* _LINUX_ARRAY_SIZE_H */
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
index 4cc40201273e..83ad775ad0aa 100644
--- a/include/linux/ascii85.h
+++ b/include/linux/ascii85.h
@@ -8,7 +8,8 @@
#ifndef _ASCII85_H_
#define _ASCII85_H_
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/types.h>
#define ASCII85_BUFSZ 6
diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h
new file mode 100644
index 000000000000..08cd0c2ad34f
--- /dev/null
+++ b/include/linux/asn1_encoder.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_ASN1_ENCODER_H
+#define _LINUX_ASN1_ENCODER_H
+
+#include <linux/types.h>
+#include <linux/asn1.h>
+#include <linux/asn1_ber_bytecode.h>
+#include <linux/bug.h>
+
+#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32))
+unsigned char *
+asn1_encode_integer(unsigned char *data, const unsigned char *end_data,
+ s64 integer);
+unsigned char *
+asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
+ u32 oid[], int oid_len);
+unsigned char *
+asn1_encode_tag(unsigned char *data, const unsigned char *end_data,
+ u32 tag, const unsigned char *string, int len);
+unsigned char *
+asn1_encode_octet_string(unsigned char *data,
+ const unsigned char *end_data,
+ const unsigned char *string, u32 len);
+unsigned char *
+asn1_encode_sequence(unsigned char *data, const unsigned char *end_data,
+ const unsigned char *seq, int len);
+unsigned char *
+asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
+ bool val);
+
+#endif
diff --git a/include/linux/async.h b/include/linux/async.h
index 0a17cd27f348..19b778d08600 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
return async_schedule_node(func, dev, dev_to_node(dev));
}
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
+
/**
* async_schedule_dev_domain - A device specific version of async_schedule_domain
* @func: function to execute asynchronously
@@ -112,11 +114,11 @@ async_schedule_dev_domain(async_func_t func, struct device *dev,
return async_schedule_node_domain(func, dev, dev_to_node(dev), domain);
}
-void async_unregister_domain(struct async_domain *domain);
extern void async_synchronize_full(void);
extern void async_synchronize_full_domain(struct async_domain *domain);
extern void async_synchronize_cookie(async_cookie_t cookie);
extern void async_synchronize_cookie_domain(async_cookie_t cookie,
struct async_domain *domain);
extern bool current_is_async(void);
+extern void async_init(void);
#endif
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 4c328fef403c..5cc73d7e5b52 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -163,11 +163,22 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
+async_xor_offs(struct page *dest, unsigned int offset,
+ struct page **src_list, unsigned int *src_offset,
+ int src_cnt, size_t len, struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum sum_check_flags *result,
struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
+async_xor_val_offs(struct page *dest, unsigned int offset,
+ struct page **src_list, unsigned int *src_offset,
+ int src_cnt, size_t len, enum sum_check_flags *result,
+ struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
unsigned int src_offset, size_t len,
struct async_submit_ctl *submit);
@@ -175,21 +186,23 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
+async_gen_syndrome(struct page **blocks, unsigned int *offsets, int src_cnt,
size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
+async_syndrome_val(struct page **blocks, unsigned int *offsets, int src_cnt,
size_t len, enum sum_check_flags *pqres, struct page *spare,
- struct async_submit_ctl *submit);
+ unsigned int s_off, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
- struct page **ptrs, struct async_submit_ctl *submit);
+ struct page **ptrs, unsigned int *offs,
+ struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_raid6_datap_recov(int src_num, size_t bytes, int faila,
- struct page **ptrs, struct async_submit_ctl *submit);
+ struct page **ptrs, unsigned int *offs,
+ struct async_submit_ctl *submit);
void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6e67aded28f8..792e10a09787 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -13,10 +13,9 @@
#ifndef __LINUX_ATA_H__
#define __LINUX_ATA_H__
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <asm/byteorder.h>
/* defines only for the constants which don't work well as enums */
#define ATA_DMA_BOUNDARY 0xffffUL
@@ -323,14 +322,21 @@ enum {
ATA_LOG_SATA_NCQ = 0x10,
ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
+ ATA_LOG_CDL = 0x18,
+ ATA_LOG_CDL_SIZE = ATA_SECT_SIZE,
ATA_LOG_IDENTIFY_DEVICE = 0x30,
+ ATA_LOG_SENSE_NCQ = 0x0F,
+ ATA_LOG_SENSE_NCQ_SIZE = ATA_SECT_SIZE * 2,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device log pages: */
+ ATA_LOG_SUPPORTED_CAPABILITIES = 0x03,
+ ATA_LOG_CURRENT_SETTINGS = 0x04,
ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09,
- /* Identify device SATA settings log:*/
+ /* Identify device SATA settings log: */
ATA_LOG_DEVSLP_OFFSET = 0x30,
ATA_LOG_DEVSLP_SIZE = 0x08,
ATA_LOG_DEVSLP_MDAT = 0x00,
@@ -415,6 +421,8 @@ enum {
SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */
SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */
+ SETFEATURES_CDL = 0x0d, /* Enable/disable cmd duration limits */
+
/* SETFEATURE Sector counts for SATA features */
SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */
SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */
@@ -425,6 +433,7 @@ enum {
SATA_DEVSLP = 0x09, /* Device Sleep */
SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
+ SETFEATURE_SENSE_DATA_SUCC_NCQ = 0xC4, /* Sense Data for successful NCQ commands */
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
@@ -565,6 +574,18 @@ struct ata_bmdma_prd {
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 2)))
+#define ata_id_has_devslp(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)))
+#define ata_id_has_ncq_autosense(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)))
+#define ata_id_has_dipm(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3)))
#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
#define ata_id_u32(id,n) \
@@ -577,9 +598,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
-#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -591,17 +609,6 @@ static inline bool ata_id_has_hipm(const u16 *id)
return val & (1 << 9);
}
-static inline bool ata_id_has_dipm(const u16 *id)
-{
- u16 val = id[ATA_ID_FEATURE_SUPP];
-
- if (val == 0 || val == 0xffff)
- return false;
-
- return val & (1 << 3);
-}
-
-
static inline bool ata_id_has_fua(const u16 *id)
{
if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000)
@@ -616,15 +623,6 @@ static inline bool ata_id_has_flush(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 12);
}
-static inline bool ata_id_flush_enabled(const u16 *id)
-{
- if (ata_id_has_flush(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- return id[ATA_ID_CFS_ENABLE_2] & (1 << 12);
-}
-
static inline bool ata_id_has_flush_ext(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
@@ -632,19 +630,6 @@ static inline bool ata_id_has_flush_ext(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 13);
}
-static inline bool ata_id_flush_ext_enabled(const u16 *id)
-{
- if (ata_id_has_flush_ext(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- /*
- * some Maxtor disks have bit 13 defined incorrectly
- * so check bit 10 too
- */
- return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400;
-}
-
static inline u32 ata_id_logical_sector_size(const u16 *id)
{
/* T13/1699-D Revision 6a, Sep 6, 2008. Page 128.
@@ -699,15 +684,6 @@ static inline bool ata_id_has_lba48(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 10);
}
-static inline bool ata_id_lba48_enabled(const u16 *id)
-{
- if (ata_id_has_lba48(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- return id[ATA_ID_CFS_ENABLE_2] & (1 << 10);
-}
-
static inline bool ata_id_hpa_enabled(const u16 *id)
{
/* Yes children, word 83 valid bits cover word 82 data */
@@ -770,16 +746,21 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15)))
+ return false;
+ if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_3] & BIT(6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!ata_id_has_sense_reporting(id))
+ return false;
+ /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */
+ if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_4] & BIT(6);
}
/**
@@ -1044,76 +1025,6 @@ static inline bool atapi_id_dmadir(const u16 *dev_id)
return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000);
}
-/*
- * ata_id_is_lba_capacity_ok() performs a sanity check on
- * the claimed LBA capacity value for the device.
- *
- * Returns 1 if LBA capacity looks sensible, 0 otherwise.
- *
- * It is called only once for each device.
- */
-static inline bool ata_id_is_lba_capacity_ok(u16 *id)
-{
- unsigned long lba_sects, chs_sects, head, tail;
-
- /* No non-LBA info .. so valid! */
- if (id[ATA_ID_CYLS] == 0)
- return true;
-
- lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
-
- /*
- * The ATA spec tells large drives to return
- * C/H/S = 16383/16/63 independent of their size.
- * Some drives can be jumpered to use 15 heads instead of 16.
- * Some drives can be jumpered to use 4092 cyls instead of 16383.
- */
- if ((id[ATA_ID_CYLS] == 16383 ||
- (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) &&
- id[ATA_ID_SECTORS] == 63 &&
- (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) &&
- (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS]))
- return true;
-
- chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS];
-
- /* perform a rough sanity check on lba_sects: within 10% is OK */
- if (lba_sects - chs_sects < chs_sects/10)
- return true;
-
- /* some drives have the word order reversed */
- head = (lba_sects >> 16) & 0xffff;
- tail = lba_sects & 0xffff;
- lba_sects = head | (tail << 16);
-
- if (lba_sects - chs_sects < chs_sects/10) {
- *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects);
- return true; /* LBA capacity is (now) good */
- }
-
- return false; /* LBA capacity value may be bad */
-}
-
-static inline void ata_id_to_hd_driveid(u16 *id)
-{
-#ifdef __BIG_ENDIAN
- /* accessed in struct hd_driveid as 8-bit values */
- id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]);
- id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]);
- id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]);
- id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]);
- id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]);
-
- /* as 32-bit values */
- *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
- *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG);
-
- /* as 64-bit value */
- *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] =
- ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
-#endif
-}
-
static inline bool ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index 9cafec92282d..b9745cc08e38 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -19,7 +19,7 @@ extern int __pata_platform_probe(struct device *dev,
struct resource *irq_res,
unsigned int ioport_shift,
int __pio_mask,
- struct scsi_host_template *sht,
+ const struct scsi_host_template *sht,
bool use16bit);
/*
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index f6034ba774be..a55bfc6567d0 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -113,7 +113,7 @@ extern int aarp_proto_init(void);
/* Inter module exports */
/* Give a device find its atif control structure */
-#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
+#if IS_ENABLED(CONFIG_ATALK)
static inline struct atalk_iface *atalk_find_dev(struct net_device *dev)
{
return dev->atalk_ptr;
diff --git a/include/linux/atm_suni.h b/include/linux/atm_suni.h
deleted file mode 100644
index 84f3aab54468..000000000000
--- a/include/linux/atm_suni.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by
- driver-specific utilities) */
-
-/* Written 1998,2000 by Werner Almesberger, EPFL ICA */
-
-
-#ifndef LINUX_ATM_SUNI_H
-#define LINUX_ATM_SUNI_H
-
-/* everything obsoleted */
-
-#endif
diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h
index c8ecf6f68fb5..2558439d849b 100644
--- a/include/linux/atm_tcp.h
+++ b/include/linux/atm_tcp.h
@@ -9,6 +9,8 @@
#include <uapi/linux/atm_tcp.h>
+struct atm_vcc;
+struct module;
struct atm_tcp_ops {
int (*attach)(struct atm_vcc *vcc,int itf);
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 5d5ff2203fa2..9b02961d65ee 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -151,7 +151,7 @@ struct atm_dev {
const char *type; /* device type name */
int number; /* device index */
void *dev_data; /* per-device data */
- void *phy_data; /* private PHY date */
+ void *phy_data; /* private PHY data */
unsigned long flags; /* device flags (ATM_DF_*) */
struct list_head local; /* local ATM addresses */
struct list_head lecs; /* LECS ATM addresses learned via ILMI */
@@ -186,6 +186,7 @@ struct atmdev_ops { /* only send is required */
void __user *arg);
#endif
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
+ int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
void (*phy_put)(struct atm_dev *dev,unsigned char value,
unsigned long addr);
@@ -206,7 +207,7 @@ struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
unsigned int acct_truesize; /* truesize accounted to vcc */
-};
+} __packed;
#define VCC_HTABLE_SIZE 32
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
deleted file mode 100644
index 1491af38cc6e..000000000000
--- a/include/linux/atmel-mci.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_ATMEL_MCI_H
-#define __LINUX_ATMEL_MCI_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-#define ATMCI_MAX_NR_SLOTS 2
-
-/**
- * struct mci_slot_pdata - board-specific per-slot configuration
- * @bus_width: Number of data lines wired up the slot
- * @detect_pin: GPIO pin wired to the card detect switch
- * @wp_pin: GPIO pin wired to the write protect sensor
- * @detect_is_active_high: The state of the detect pin when it is active
- * @non_removable: The slot is not removable, only detect once
- *
- * If a given slot is not present on the board, @bus_width should be
- * set to 0. The other fields are ignored in this case.
- *
- * Any pins that aren't available should be set to a negative value.
- *
- * Note that support for multiple slots is experimental -- some cards
- * might get upset if we don't get the clock management exactly right.
- * But in most cases, it should work just fine.
- */
-struct mci_slot_pdata {
- unsigned int bus_width;
- int detect_pin;
- int wp_pin;
- bool detect_is_active_high;
- bool non_removable;
-};
-
-/**
- * struct mci_platform_data - board-specific MMC/SDcard configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- * @slot: Per-slot configuration data.
- */
-struct mci_platform_data {
- void *dma_slave;
- dma_filter_fn dma_filter;
- struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS];
-};
-
-#endif /* __LINUX_ATMEL_MCI_H */
diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h
deleted file mode 100644
index bcb6aa27cfa6..000000000000
--- a/include/linux/atomic-arch-fallback.h
+++ /dev/null
@@ -1,2291 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-fallback.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _LINUX_ATOMIC_FALLBACK_H
-#define _LINUX_ATOMIC_FALLBACK_H
-
-#include <linux/compiler.h>
-
-#ifndef arch_xchg_relaxed
-#define arch_xchg_relaxed arch_xchg
-#define arch_xchg_acquire arch_xchg
-#define arch_xchg_release arch_xchg
-#else /* arch_xchg_relaxed */
-
-#ifndef arch_xchg_acquire
-#define arch_xchg_acquire(...) \
- __atomic_op_acquire(arch_xchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_xchg_release
-#define arch_xchg_release(...) \
- __atomic_op_release(arch_xchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_xchg
-#define arch_xchg(...) \
- __atomic_op_fence(arch_xchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_xchg_relaxed */
-
-#ifndef arch_cmpxchg_relaxed
-#define arch_cmpxchg_relaxed arch_cmpxchg
-#define arch_cmpxchg_acquire arch_cmpxchg
-#define arch_cmpxchg_release arch_cmpxchg
-#else /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg_acquire
-#define arch_cmpxchg_acquire(...) \
- __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg_release
-#define arch_cmpxchg_release(...) \
- __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg
-#define arch_cmpxchg(...) \
- __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg64_relaxed
-#define arch_cmpxchg64_relaxed arch_cmpxchg64
-#define arch_cmpxchg64_acquire arch_cmpxchg64
-#define arch_cmpxchg64_release arch_cmpxchg64
-#else /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_cmpxchg64_acquire
-#define arch_cmpxchg64_acquire(...) \
- __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg64_release
-#define arch_cmpxchg64_release(...) \
- __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg64
-#define arch_cmpxchg64(...) \
- __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_atomic_read_acquire
-static __always_inline int
-arch_atomic_read_acquire(const atomic_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define arch_atomic_read_acquire arch_atomic_read_acquire
-#endif
-
-#ifndef arch_atomic_set_release
-static __always_inline void
-arch_atomic_set_release(atomic_t *v, int i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define arch_atomic_set_release arch_atomic_set_release
-#endif
-
-#ifndef arch_atomic_add_return_relaxed
-#define arch_atomic_add_return_acquire arch_atomic_add_return
-#define arch_atomic_add_return_release arch_atomic_add_return
-#define arch_atomic_add_return_relaxed arch_atomic_add_return
-#else /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_add_return_acquire
-static __always_inline int
-arch_atomic_add_return_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
-#endif
-
-#ifndef arch_atomic_add_return_release
-static __always_inline int
-arch_atomic_add_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_add_return_relaxed(i, v);
-}
-#define arch_atomic_add_return_release arch_atomic_add_return_release
-#endif
-
-#ifndef arch_atomic_add_return
-static __always_inline int
-arch_atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_add_return arch_atomic_add_return
-#endif
-
-#endif /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_fetch_add_relaxed
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add
-#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
-#else /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_fetch_add_acquire
-static __always_inline int
-arch_atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
-#endif
-
-#ifndef arch_atomic_fetch_add_release
-static __always_inline int
-arch_atomic_fetch_add_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_add_relaxed(i, v);
-}
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
-#endif
-
-#ifndef arch_atomic_fetch_add
-static __always_inline int
-arch_atomic_fetch_add(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_add arch_atomic_fetch_add
-#endif
-
-#endif /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_sub_return_relaxed
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return
-#define arch_atomic_sub_return_release arch_atomic_sub_return
-#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
-#else /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_sub_return_acquire
-static __always_inline int
-arch_atomic_sub_return_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
-#endif
-
-#ifndef arch_atomic_sub_return_release
-static __always_inline int
-arch_atomic_sub_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_sub_return_relaxed(i, v);
-}
-#define arch_atomic_sub_return_release arch_atomic_sub_return_release
-#endif
-
-#ifndef arch_atomic_sub_return
-static __always_inline int
-arch_atomic_sub_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_sub_return arch_atomic_sub_return
-#endif
-
-#endif /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_fetch_sub_relaxed
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
-#else /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_fetch_sub_acquire
-static __always_inline int
-arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
-#endif
-
-#ifndef arch_atomic_fetch_sub_release
-static __always_inline int
-arch_atomic_fetch_sub_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_sub_relaxed(i, v);
-}
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
-#endif
-
-#ifndef arch_atomic_fetch_sub
-static __always_inline int
-arch_atomic_fetch_sub(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_sub arch_atomic_fetch_sub
-#endif
-
-#endif /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_inc
-static __always_inline void
-arch_atomic_inc(atomic_t *v)
-{
- arch_atomic_add(1, v);
-}
-#define arch_atomic_inc arch_atomic_inc
-#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-#ifdef arch_atomic_inc_return
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return
-#define arch_atomic_inc_return_release arch_atomic_inc_return
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
-#endif /* arch_atomic_inc_return */
-
-#ifndef arch_atomic_inc_return
-static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
-{
- return arch_atomic_add_return(1, v);
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
-#endif
-
-#ifndef arch_atomic_inc_return_acquire
-static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
-{
- return arch_atomic_add_return_acquire(1, v);
-}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
-#endif
-
-#ifndef arch_atomic_inc_return_release
-static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
-{
- return arch_atomic_add_return_release(1, v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
-#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-static __always_inline int
-arch_atomic_inc_return_relaxed(atomic_t *v)
-{
- return arch_atomic_add_return_relaxed(1, v);
-}
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
-#endif
-
-#else /* arch_atomic_inc_return_relaxed */
-
-#ifndef arch_atomic_inc_return_acquire
-static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
-{
- int ret = arch_atomic_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
-#endif
-
-#ifndef arch_atomic_inc_return_release
-static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_inc_return_relaxed(v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
-#endif
-
-#ifndef arch_atomic_inc_return
-static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
-#endif
-
-#endif /* arch_atomic_inc_return_relaxed */
-
-#ifndef arch_atomic_fetch_inc_relaxed
-#ifdef arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
-#endif /* arch_atomic_fetch_inc */
-
-#ifndef arch_atomic_fetch_inc
-static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
-{
- return arch_atomic_fetch_add(1, v);
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
-#endif
-
-#ifndef arch_atomic_fetch_inc_acquire
-static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
-{
- return arch_atomic_fetch_add_acquire(1, v);
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic_fetch_inc_release
-static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
-{
- return arch_atomic_fetch_add_release(1, v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
-#endif
-
-#ifndef arch_atomic_fetch_inc_relaxed
-static __always_inline int
-arch_atomic_fetch_inc_relaxed(atomic_t *v)
-{
- return arch_atomic_fetch_add_relaxed(1, v);
-}
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
-#endif
-
-#else /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_fetch_inc_acquire
-static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
-{
- int ret = arch_atomic_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic_fetch_inc_release
-static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_inc_relaxed(v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
-#endif
-
-#ifndef arch_atomic_fetch_inc
-static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
-#endif
-
-#endif /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_dec
-static __always_inline void
-arch_atomic_dec(atomic_t *v)
-{
- arch_atomic_sub(1, v);
-}
-#define arch_atomic_dec arch_atomic_dec
-#endif
-
-#ifndef arch_atomic_dec_return_relaxed
-#ifdef arch_atomic_dec_return
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return
-#define arch_atomic_dec_return_release arch_atomic_dec_return
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
-#endif /* arch_atomic_dec_return */
-
-#ifndef arch_atomic_dec_return
-static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
-{
- return arch_atomic_sub_return(1, v);
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
-#endif
-
-#ifndef arch_atomic_dec_return_acquire
-static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
-{
- return arch_atomic_sub_return_acquire(1, v);
-}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
-#endif
-
-#ifndef arch_atomic_dec_return_release
-static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
-{
- return arch_atomic_sub_return_release(1, v);
-}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
-#endif
-
-#ifndef arch_atomic_dec_return_relaxed
-static __always_inline int
-arch_atomic_dec_return_relaxed(atomic_t *v)
-{
- return arch_atomic_sub_return_relaxed(1, v);
-}
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-#endif
-
-#else /* arch_atomic_dec_return_relaxed */
-
-#ifndef arch_atomic_dec_return_acquire
-static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
-{
- int ret = arch_atomic_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
-#endif
-
-#ifndef arch_atomic_dec_return_release
-static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_dec_return_relaxed(v);
-}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
-#endif
-
-#ifndef arch_atomic_dec_return
-static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
-#endif
-
-#endif /* arch_atomic_dec_return_relaxed */
-
-#ifndef arch_atomic_fetch_dec_relaxed
-#ifdef arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
-#endif /* arch_atomic_fetch_dec */
-
-#ifndef arch_atomic_fetch_dec
-static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
-{
- return arch_atomic_fetch_sub(1, v);
-}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
-#endif
-
-#ifndef arch_atomic_fetch_dec_acquire
-static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
-{
- return arch_atomic_fetch_sub_acquire(1, v);
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
-{
- return arch_atomic_fetch_sub_release(1, v);
-}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-
-#ifndef arch_atomic_fetch_dec_relaxed
-static __always_inline int
-arch_atomic_fetch_dec_relaxed(atomic_t *v)
-{
- return arch_atomic_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
-#endif
-
-#else /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_dec_acquire
-static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
-{
- int ret = arch_atomic_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_dec_relaxed(v);
-}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-
-#ifndef arch_atomic_fetch_dec
-static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
-#endif
-
-#endif /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_and_relaxed
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and
-#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
-#else /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_fetch_and_acquire
-static __always_inline int
-arch_atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
-#endif
-
-#ifndef arch_atomic_fetch_and_release
-static __always_inline int
-arch_atomic_fetch_and_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_and_relaxed(i, v);
-}
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
-#endif
-
-#ifndef arch_atomic_fetch_and
-static __always_inline int
-arch_atomic_fetch_and(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_and arch_atomic_fetch_and
-#endif
-
-#endif /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_andnot
-static __always_inline void
-arch_atomic_andnot(int i, atomic_t *v)
-{
- arch_atomic_and(~i, v);
-}
-#define arch_atomic_andnot arch_atomic_andnot
-#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-#ifdef arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
-#endif /* arch_atomic_fetch_andnot */
-
-#ifndef arch_atomic_fetch_andnot
-static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and(~i, v);
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
-#endif
-
-#ifndef arch_atomic_fetch_andnot_acquire
-static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and_acquire(~i, v);
-}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic_fetch_andnot_release
-static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and_release(~i, v);
-}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-static __always_inline int
-arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and_relaxed(~i, v);
-}
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
-#endif
-
-#else /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_andnot_acquire
-static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic_fetch_andnot_release
-static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic_fetch_andnot
-static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
-#endif
-
-#endif /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_or_relaxed
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or
-#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
-#else /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_or_acquire
-static __always_inline int
-arch_atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
-#endif
-
-#ifndef arch_atomic_fetch_or_release
-static __always_inline int
-arch_atomic_fetch_or_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_or_relaxed(i, v);
-}
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
-#endif
-
-#ifndef arch_atomic_fetch_or
-static __always_inline int
-arch_atomic_fetch_or(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_or arch_atomic_fetch_or
-#endif
-
-#endif /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_xor_relaxed
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
-#else /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_fetch_xor_acquire
-static __always_inline int
-arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
-#endif
-
-#ifndef arch_atomic_fetch_xor_release
-static __always_inline int
-arch_atomic_fetch_xor_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_xor_relaxed(i, v);
-}
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
-#endif
-
-#ifndef arch_atomic_fetch_xor
-static __always_inline int
-arch_atomic_fetch_xor(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_xor arch_atomic_fetch_xor
-#endif
-
-#endif /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_xchg_relaxed
-#define arch_atomic_xchg_acquire arch_atomic_xchg
-#define arch_atomic_xchg_release arch_atomic_xchg
-#define arch_atomic_xchg_relaxed arch_atomic_xchg
-#else /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_xchg_acquire
-static __always_inline int
-arch_atomic_xchg_acquire(atomic_t *v, int i)
-{
- int ret = arch_atomic_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
-#endif
-
-#ifndef arch_atomic_xchg_release
-static __always_inline int
-arch_atomic_xchg_release(atomic_t *v, int i)
-{
- __atomic_release_fence();
- return arch_atomic_xchg_relaxed(v, i);
-}
-#define arch_atomic_xchg_release arch_atomic_xchg_release
-#endif
-
-#ifndef arch_atomic_xchg
-static __always_inline int
-arch_atomic_xchg(atomic_t *v, int i)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_xchg arch_atomic_xchg
-#endif
-
-#endif /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
-#else /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_acquire
-static __always_inline int
-arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_cmpxchg_release
-static __always_inline int
-arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- __atomic_release_fence();
- return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_cmpxchg
-static __always_inline int
-arch_atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-#endif
-
-#endif /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_relaxed
-#ifdef arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
-#endif /* arch_atomic_try_cmpxchg */
-
-#ifndef arch_atomic_try_cmpxchg
-static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = arch_atomic_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = arch_atomic_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_release
-static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = arch_atomic_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_relaxed
-static __always_inline bool
-arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = arch_atomic_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_release
-static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- __atomic_release_fence();
- return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_try_cmpxchg
-static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-#endif
-
-#endif /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_sub_and_test
-/**
- * arch_atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic_sub_and_test(int i, atomic_t *v)
-{
- return arch_atomic_sub_return(i, v) == 0;
-}
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
-#endif
-
-#ifndef arch_atomic_dec_and_test
-/**
- * arch_atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-arch_atomic_dec_and_test(atomic_t *v)
-{
- return arch_atomic_dec_return(v) == 0;
-}
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
-#endif
-
-#ifndef arch_atomic_inc_and_test
-/**
- * arch_atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic_inc_and_test(atomic_t *v)
-{
- return arch_atomic_inc_return(v) == 0;
-}
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
-#endif
-
-#ifndef arch_atomic_add_negative
-/**
- * arch_atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-arch_atomic_add_negative(int i, atomic_t *v)
-{
- return arch_atomic_add_return(i, v) < 0;
-}
-#define arch_atomic_add_negative arch_atomic_add_negative
-#endif
-
-#ifndef arch_atomic_fetch_add_unless
-/**
- * arch_atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline int
-arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int c = arch_atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#endif
-
-#ifndef arch_atomic_add_unless
-/**
- * arch_atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-arch_atomic_add_unless(atomic_t *v, int a, int u)
-{
- return arch_atomic_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic_add_unless arch_atomic_add_unless
-#endif
-
-#ifndef arch_atomic_inc_not_zero
-/**
- * arch_atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-arch_atomic_inc_not_zero(atomic_t *v)
-{
- return arch_atomic_add_unless(v, 1, 0);
-}
-#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
-#endif
-
-#ifndef arch_atomic_inc_unless_negative
-static __always_inline bool
-arch_atomic_inc_unless_negative(atomic_t *v)
-{
- int c = arch_atomic_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
-#endif
-
-#ifndef arch_atomic_dec_unless_positive
-static __always_inline bool
-arch_atomic_dec_unless_positive(atomic_t *v)
-{
- int c = arch_atomic_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
-#endif
-
-#ifndef arch_atomic_dec_if_positive
-static __always_inline int
-arch_atomic_dec_if_positive(atomic_t *v)
-{
- int dec, c = arch_atomic_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!arch_atomic_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#ifndef arch_atomic64_read_acquire
-static __always_inline s64
-arch_atomic64_read_acquire(const atomic64_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define arch_atomic64_read_acquire arch_atomic64_read_acquire
-#endif
-
-#ifndef arch_atomic64_set_release
-static __always_inline void
-arch_atomic64_set_release(atomic64_t *v, s64 i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define arch_atomic64_set_release arch_atomic64_set_release
-#endif
-
-#ifndef arch_atomic64_add_return_relaxed
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return
-#define arch_atomic64_add_return_release arch_atomic64_add_return
-#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
-#else /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_add_return_acquire
-static __always_inline s64
-arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
-#endif
-
-#ifndef arch_atomic64_add_return_release
-static __always_inline s64
-arch_atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_add_return_relaxed(i, v);
-}
-#define arch_atomic64_add_return_release arch_atomic64_add_return_release
-#endif
-
-#ifndef arch_atomic64_add_return
-static __always_inline s64
-arch_atomic64_add_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_add_return arch_atomic64_add_return
-#endif
-
-#endif /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_fetch_add_relaxed
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
-#else /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_fetch_add_acquire
-static __always_inline s64
-arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_add_release
-static __always_inline s64
-arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_add_relaxed(i, v);
-}
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
-#endif
-
-#ifndef arch_atomic64_fetch_add
-static __always_inline s64
-arch_atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-#endif
-
-#endif /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_sub_return_relaxed
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return
-#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
-#else /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_sub_return_acquire
-static __always_inline s64
-arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
-#endif
-
-#ifndef arch_atomic64_sub_return_release
-static __always_inline s64
-arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_sub_return_relaxed(i, v);
-}
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
-#endif
-
-#ifndef arch_atomic64_sub_return
-static __always_inline s64
-arch_atomic64_sub_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_sub_return arch_atomic64_sub_return
-#endif
-
-#endif /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_relaxed
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
-#else /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_acquire
-static __always_inline s64
-arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_sub_release
-static __always_inline s64
-arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
-#endif
-
-#ifndef arch_atomic64_fetch_sub
-static __always_inline s64
-arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
-#endif
-
-#endif /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_inc
-static __always_inline void
-arch_atomic64_inc(atomic64_t *v)
-{
- arch_atomic64_add(1, v);
-}
-#define arch_atomic64_inc arch_atomic64_inc
-#endif
-
-#ifndef arch_atomic64_inc_return_relaxed
-#ifdef arch_atomic64_inc_return
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
-#endif /* arch_atomic64_inc_return */
-
-#ifndef arch_atomic64_inc_return
-static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
-{
- return arch_atomic64_add_return(1, v);
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
-#endif
-
-#ifndef arch_atomic64_inc_return_acquire
-static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
-{
- return arch_atomic64_add_return_acquire(1, v);
-}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
-#endif
-
-#ifndef arch_atomic64_inc_return_release
-static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
-{
- return arch_atomic64_add_return_release(1, v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
-#endif
-
-#ifndef arch_atomic64_inc_return_relaxed
-static __always_inline s64
-arch_atomic64_inc_return_relaxed(atomic64_t *v)
-{
- return arch_atomic64_add_return_relaxed(1, v);
-}
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
-#endif
-
-#else /* arch_atomic64_inc_return_relaxed */
-
-#ifndef arch_atomic64_inc_return_acquire
-static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
-{
- s64 ret = arch_atomic64_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
-#endif
-
-#ifndef arch_atomic64_inc_return_release
-static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_inc_return_relaxed(v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
-#endif
-
-#ifndef arch_atomic64_inc_return
-static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
-#endif
-
-#endif /* arch_atomic64_inc_return_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_relaxed
-#ifdef arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
-#endif /* arch_atomic64_fetch_inc */
-
-#ifndef arch_atomic64_fetch_inc
-static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
-{
- return arch_atomic64_fetch_add(1, v);
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
-#endif
-
-#ifndef arch_atomic64_fetch_inc_acquire
-static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- return arch_atomic64_fetch_add_acquire(1, v);
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_inc_release
-static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
-{
- return arch_atomic64_fetch_add_release(1, v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
-#endif
-
-#ifndef arch_atomic64_fetch_inc_relaxed
-static __always_inline s64
-arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- return arch_atomic64_fetch_add_relaxed(1, v);
-}
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_acquire
-static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_inc_release
-static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_inc_relaxed(v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
-#endif
-
-#ifndef arch_atomic64_fetch_inc
-static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
-#endif
-
-#endif /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_dec
-static __always_inline void
-arch_atomic64_dec(atomic64_t *v)
-{
- arch_atomic64_sub(1, v);
-}
-#define arch_atomic64_dec arch_atomic64_dec
-#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-#ifdef arch_atomic64_dec_return
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
-#endif /* arch_atomic64_dec_return */
-
-#ifndef arch_atomic64_dec_return
-static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
-{
- return arch_atomic64_sub_return(1, v);
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
-#endif
-
-#ifndef arch_atomic64_dec_return_acquire
-static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
-{
- return arch_atomic64_sub_return_acquire(1, v);
-}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
-#endif
-
-#ifndef arch_atomic64_dec_return_release
-static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
-{
- return arch_atomic64_sub_return_release(1, v);
-}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
-#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-static __always_inline s64
-arch_atomic64_dec_return_relaxed(atomic64_t *v)
-{
- return arch_atomic64_sub_return_relaxed(1, v);
-}
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
-#endif
-
-#else /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_dec_return_acquire
-static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
-{
- s64 ret = arch_atomic64_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
-#endif
-
-#ifndef arch_atomic64_dec_return_release
-static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_dec_return_relaxed(v);
-}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
-#endif
-
-#ifndef arch_atomic64_dec_return
-static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
-#endif
-
-#endif /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_relaxed
-#ifdef arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
-#endif /* arch_atomic64_fetch_dec */
-
-#ifndef arch_atomic64_fetch_dec
-static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub(1, v);
-}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
-#endif
-
-#ifndef arch_atomic64_fetch_dec_acquire
-static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub_acquire(1, v);
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub_release(1, v);
-}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-
-#ifndef arch_atomic64_fetch_dec_relaxed
-static __always_inline s64
-arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_acquire
-static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_dec_relaxed(v);
-}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-
-#ifndef arch_atomic64_fetch_dec
-static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
-#endif
-
-#endif /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_and_relaxed
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
-#else /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_fetch_and_acquire
-static __always_inline s64
-arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_and_release
-static __always_inline s64
-arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_and_relaxed(i, v);
-}
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
-#endif
-
-#ifndef arch_atomic64_fetch_and
-static __always_inline s64
-arch_atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_and arch_atomic64_fetch_and
-#endif
-
-#endif /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_andnot
-static __always_inline void
-arch_atomic64_andnot(s64 i, atomic64_t *v)
-{
- arch_atomic64_and(~i, v);
-}
-#define arch_atomic64_andnot arch_atomic64_andnot
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_relaxed
-#ifdef arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
-#endif /* arch_atomic64_fetch_andnot */
-
-#ifndef arch_atomic64_fetch_andnot
-static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and(~i, v);
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_acquire
-static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and_acquire(~i, v);
-}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_release
-static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and_release(~i, v);
-}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_relaxed
-static __always_inline s64
-arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and_relaxed(~i, v);
-}
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_andnot_relaxed */
-
-#ifndef arch_atomic64_fetch_andnot_acquire
-static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_release
-static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic64_fetch_andnot
-static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
-#endif
-
-#endif /* arch_atomic64_fetch_andnot_relaxed */
-
-#ifndef arch_atomic64_fetch_or_relaxed
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
-#else /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_or_acquire
-static __always_inline s64
-arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_or_release
-static __always_inline s64
-arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_or_relaxed(i, v);
-}
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
-#endif
-
-#ifndef arch_atomic64_fetch_or
-static __always_inline s64
-arch_atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_or arch_atomic64_fetch_or
-#endif
-
-#endif /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_relaxed
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
-#else /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_acquire
-static __always_inline s64
-arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_xor_release
-static __always_inline s64
-arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
-#endif
-
-#ifndef arch_atomic64_fetch_xor
-static __always_inline s64
-arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
-#endif
-
-#endif /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_xchg_relaxed
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg
-#define arch_atomic64_xchg_release arch_atomic64_xchg
-#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
-#else /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_xchg_acquire
-static __always_inline s64
-arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- s64 ret = arch_atomic64_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
-#endif
-
-#ifndef arch_atomic64_xchg_release
-static __always_inline s64
-arch_atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- __atomic_release_fence();
- return arch_atomic64_xchg_relaxed(v, i);
-}
-#define arch_atomic64_xchg_release arch_atomic64_xchg_release
-#endif
-
-#ifndef arch_atomic64_xchg
-static __always_inline s64
-arch_atomic64_xchg(atomic64_t *v, s64 i)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_xchg arch_atomic64_xchg
-#endif
-
-#endif /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_relaxed
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
-#else /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_acquire
-static __always_inline s64
-arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_cmpxchg_release
-static __always_inline s64
-arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- __atomic_release_fence();
- return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_cmpxchg
-static __always_inline s64
-arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#endif
-
-#endif /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_relaxed
-#ifdef arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
-#endif /* arch_atomic64_try_cmpxchg */
-
-#ifndef arch_atomic64_try_cmpxchg
-static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = arch_atomic64_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = arch_atomic64_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_release
-static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = arch_atomic64_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_relaxed
-static __always_inline bool
-arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = arch_atomic64_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_release
-static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- __atomic_release_fence();
- return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg
-static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-#endif
-
-#endif /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_sub_and_test
-/**
- * arch_atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- return arch_atomic64_sub_return(i, v) == 0;
-}
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
-#endif
-
-#ifndef arch_atomic64_dec_and_test
-/**
- * arch_atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-arch_atomic64_dec_and_test(atomic64_t *v)
-{
- return arch_atomic64_dec_return(v) == 0;
-}
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
-#endif
-
-#ifndef arch_atomic64_inc_and_test
-/**
- * arch_atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic64_inc_and_test(atomic64_t *v)
-{
- return arch_atomic64_inc_return(v) == 0;
-}
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
-#endif
-
-#ifndef arch_atomic64_add_negative
-/**
- * arch_atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-arch_atomic64_add_negative(s64 i, atomic64_t *v)
-{
- return arch_atomic64_add_return(i, v) < 0;
-}
-#define arch_atomic64_add_negative arch_atomic64_add_negative
-#endif
-
-#ifndef arch_atomic64_fetch_add_unless
-/**
- * arch_atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline s64
-arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- s64 c = arch_atomic64_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
-#endif
-
-#ifndef arch_atomic64_add_unless
-/**
- * arch_atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- return arch_atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic64_add_unless arch_atomic64_add_unless
-#endif
-
-#ifndef arch_atomic64_inc_not_zero
-/**
- * arch_atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-arch_atomic64_inc_not_zero(atomic64_t *v)
-{
- return arch_atomic64_add_unless(v, 1, 0);
-}
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
-#endif
-
-#ifndef arch_atomic64_inc_unless_negative
-static __always_inline bool
-arch_atomic64_inc_unless_negative(atomic64_t *v)
-{
- s64 c = arch_atomic64_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
-#endif
-
-#ifndef arch_atomic64_dec_unless_positive
-static __always_inline bool
-arch_atomic64_dec_unless_positive(atomic64_t *v)
-{
- s64 c = arch_atomic64_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
-#endif
-
-#ifndef arch_atomic64_dec_if_positive
-static __always_inline s64
-arch_atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = arch_atomic64_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#endif
-
-#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 90cd26cfd69d2250303d654955a0cc12620fb91b
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
deleted file mode 100644
index fd525c71d676..000000000000
--- a/include/linux/atomic-fallback.h
+++ /dev/null
@@ -1,2525 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-fallback.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _LINUX_ATOMIC_FALLBACK_H
-#define _LINUX_ATOMIC_FALLBACK_H
-
-#include <linux/compiler.h>
-
-#ifndef xchg_relaxed
-#define xchg_relaxed xchg
-#define xchg_acquire xchg
-#define xchg_release xchg
-#else /* xchg_relaxed */
-
-#ifndef xchg_acquire
-#define xchg_acquire(...) \
- __atomic_op_acquire(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg_release
-#define xchg_release(...) \
- __atomic_op_release(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg
-#define xchg(...) \
- __atomic_op_fence(xchg, __VA_ARGS__)
-#endif
-
-#endif /* xchg_relaxed */
-
-#ifndef cmpxchg_relaxed
-#define cmpxchg_relaxed cmpxchg
-#define cmpxchg_acquire cmpxchg
-#define cmpxchg_release cmpxchg
-#else /* cmpxchg_relaxed */
-
-#ifndef cmpxchg_acquire
-#define cmpxchg_acquire(...) \
- __atomic_op_acquire(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg_release
-#define cmpxchg_release(...) \
- __atomic_op_release(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg
-#define cmpxchg(...) \
- __atomic_op_fence(cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg_relaxed */
-
-#ifndef cmpxchg64_relaxed
-#define cmpxchg64_relaxed cmpxchg64
-#define cmpxchg64_acquire cmpxchg64
-#define cmpxchg64_release cmpxchg64
-#else /* cmpxchg64_relaxed */
-
-#ifndef cmpxchg64_acquire
-#define cmpxchg64_acquire(...) \
- __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64_release
-#define cmpxchg64_release(...) \
- __atomic_op_release(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64
-#define cmpxchg64(...) \
- __atomic_op_fence(cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg64_relaxed */
-
-#define arch_atomic_read atomic_read
-#define arch_atomic_read_acquire atomic_read_acquire
-
-#ifndef atomic_read_acquire
-static __always_inline int
-atomic_read_acquire(const atomic_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic_read_acquire atomic_read_acquire
-#endif
-
-#define arch_atomic_set atomic_set
-#define arch_atomic_set_release atomic_set_release
-
-#ifndef atomic_set_release
-static __always_inline void
-atomic_set_release(atomic_t *v, int i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic_set_release atomic_set_release
-#endif
-
-#define arch_atomic_add atomic_add
-
-#define arch_atomic_add_return atomic_add_return
-#define arch_atomic_add_return_acquire atomic_add_return_acquire
-#define arch_atomic_add_return_release atomic_add_return_release
-#define arch_atomic_add_return_relaxed atomic_add_return_relaxed
-
-#ifndef atomic_add_return_relaxed
-#define atomic_add_return_acquire atomic_add_return
-#define atomic_add_return_release atomic_add_return
-#define atomic_add_return_relaxed atomic_add_return
-#else /* atomic_add_return_relaxed */
-
-#ifndef atomic_add_return_acquire
-static __always_inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-
-#ifndef atomic_add_return_release
-static __always_inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_add_return_relaxed(i, v);
-}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-
-#ifndef atomic_add_return
-static __always_inline int
-atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_add_return atomic_add_return
-#endif
-
-#endif /* atomic_add_return_relaxed */
-
-#define arch_atomic_fetch_add atomic_fetch_add
-#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire
-#define arch_atomic_fetch_add_release atomic_fetch_add_release
-#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-
-#ifndef atomic_fetch_add_relaxed
-#define atomic_fetch_add_acquire atomic_fetch_add
-#define atomic_fetch_add_release atomic_fetch_add
-#define atomic_fetch_add_relaxed atomic_fetch_add
-#else /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_fetch_add_acquire
-static __always_inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-
-#ifndef atomic_fetch_add_release
-static __always_inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_add_relaxed(i, v);
-}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-
-#ifndef atomic_fetch_add
-static __always_inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-
-#endif /* atomic_fetch_add_relaxed */
-
-#define arch_atomic_sub atomic_sub
-
-#define arch_atomic_sub_return atomic_sub_return
-#define arch_atomic_sub_return_acquire atomic_sub_return_acquire
-#define arch_atomic_sub_return_release atomic_sub_return_release
-#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed
-
-#ifndef atomic_sub_return_relaxed
-#define atomic_sub_return_acquire atomic_sub_return
-#define atomic_sub_return_release atomic_sub_return
-#define atomic_sub_return_relaxed atomic_sub_return
-#else /* atomic_sub_return_relaxed */
-
-#ifndef atomic_sub_return_acquire
-static __always_inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-
-#ifndef atomic_sub_return_release
-static __always_inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_sub_return_relaxed(i, v);
-}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-
-#ifndef atomic_sub_return
-static __always_inline int
-atomic_sub_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_sub_return atomic_sub_return
-#endif
-
-#endif /* atomic_sub_return_relaxed */
-
-#define arch_atomic_fetch_sub atomic_fetch_sub
-#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#define arch_atomic_fetch_sub_release atomic_fetch_sub_release
-#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-
-#ifndef atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_acquire atomic_fetch_sub
-#define atomic_fetch_sub_release atomic_fetch_sub
-#define atomic_fetch_sub_relaxed atomic_fetch_sub
-#else /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_fetch_sub_acquire
-static __always_inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-
-#ifndef atomic_fetch_sub_release
-static __always_inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_sub_relaxed(i, v);
-}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-
-#ifndef atomic_fetch_sub
-static __always_inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-
-#endif /* atomic_fetch_sub_relaxed */
-
-#define arch_atomic_inc atomic_inc
-
-#ifndef atomic_inc
-static __always_inline void
-atomic_inc(atomic_t *v)
-{
- atomic_add(1, v);
-}
-#define atomic_inc atomic_inc
-#endif
-
-#define arch_atomic_inc_return atomic_inc_return
-#define arch_atomic_inc_return_acquire atomic_inc_return_acquire
-#define arch_atomic_inc_return_release atomic_inc_return_release
-#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed
-
-#ifndef atomic_inc_return_relaxed
-#ifdef atomic_inc_return
-#define atomic_inc_return_acquire atomic_inc_return
-#define atomic_inc_return_release atomic_inc_return
-#define atomic_inc_return_relaxed atomic_inc_return
-#endif /* atomic_inc_return */
-
-#ifndef atomic_inc_return
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- return atomic_add_return(1, v);
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#ifndef atomic_inc_return_acquire
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- return atomic_add_return_acquire(1, v);
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- return atomic_add_return_release(1, v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return_relaxed
-static __always_inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
- return atomic_add_return_relaxed(1, v);
-}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-
-#else /* atomic_inc_return_relaxed */
-
-#ifndef atomic_inc_return_acquire
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- int ret = atomic_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_inc_return_relaxed(v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#endif /* atomic_inc_return_relaxed */
-
-#define arch_atomic_fetch_inc atomic_fetch_inc
-#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#define arch_atomic_fetch_inc_release atomic_fetch_inc_release
-#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-
-#ifndef atomic_fetch_inc_relaxed
-#ifdef atomic_fetch_inc
-#define atomic_fetch_inc_acquire atomic_fetch_inc
-#define atomic_fetch_inc_release atomic_fetch_inc
-#define atomic_fetch_inc_relaxed atomic_fetch_inc
-#endif /* atomic_fetch_inc */
-
-#ifndef atomic_fetch_inc
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- return atomic_fetch_add(1, v);
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#ifndef atomic_fetch_inc_acquire
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- return atomic_fetch_add_acquire(1, v);
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- return atomic_fetch_add_release(1, v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc_relaxed
-static __always_inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
- return atomic_fetch_add_relaxed(1, v);
-}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-
-#else /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_fetch_inc_acquire
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_inc_relaxed(v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#endif /* atomic_fetch_inc_relaxed */
-
-#define arch_atomic_dec atomic_dec
-
-#ifndef atomic_dec
-static __always_inline void
-atomic_dec(atomic_t *v)
-{
- atomic_sub(1, v);
-}
-#define atomic_dec atomic_dec
-#endif
-
-#define arch_atomic_dec_return atomic_dec_return
-#define arch_atomic_dec_return_acquire atomic_dec_return_acquire
-#define arch_atomic_dec_return_release atomic_dec_return_release
-#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed
-
-#ifndef atomic_dec_return_relaxed
-#ifdef atomic_dec_return
-#define atomic_dec_return_acquire atomic_dec_return
-#define atomic_dec_return_release atomic_dec_return
-#define atomic_dec_return_relaxed atomic_dec_return
-#endif /* atomic_dec_return */
-
-#ifndef atomic_dec_return
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- return atomic_sub_return(1, v);
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#ifndef atomic_dec_return_acquire
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- return atomic_sub_return_acquire(1, v);
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- return atomic_sub_return_release(1, v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return_relaxed
-static __always_inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
- return atomic_sub_return_relaxed(1, v);
-}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-
-#else /* atomic_dec_return_relaxed */
-
-#ifndef atomic_dec_return_acquire
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- int ret = atomic_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_dec_return_relaxed(v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#endif /* atomic_dec_return_relaxed */
-
-#define arch_atomic_fetch_dec atomic_fetch_dec
-#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#define arch_atomic_fetch_dec_release atomic_fetch_dec_release
-#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-
-#ifndef atomic_fetch_dec_relaxed
-#ifdef atomic_fetch_dec
-#define atomic_fetch_dec_acquire atomic_fetch_dec
-#define atomic_fetch_dec_release atomic_fetch_dec
-#define atomic_fetch_dec_relaxed atomic_fetch_dec
-#endif /* atomic_fetch_dec */
-
-#ifndef atomic_fetch_dec
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- return atomic_fetch_sub(1, v);
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#ifndef atomic_fetch_dec_acquire
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- return atomic_fetch_sub_acquire(1, v);
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- return atomic_fetch_sub_release(1, v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec_relaxed
-static __always_inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
- return atomic_fetch_sub_relaxed(1, v);
-}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
-
-#else /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_dec_acquire
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_dec_relaxed(v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#endif /* atomic_fetch_dec_relaxed */
-
-#define arch_atomic_and atomic_and
-
-#define arch_atomic_fetch_and atomic_fetch_and
-#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire
-#define arch_atomic_fetch_and_release atomic_fetch_and_release
-#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-
-#ifndef atomic_fetch_and_relaxed
-#define atomic_fetch_and_acquire atomic_fetch_and
-#define atomic_fetch_and_release atomic_fetch_and
-#define atomic_fetch_and_relaxed atomic_fetch_and
-#else /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_fetch_and_acquire
-static __always_inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-
-#ifndef atomic_fetch_and_release
-static __always_inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_and_relaxed(i, v);
-}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-
-#ifndef atomic_fetch_and
-static __always_inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-
-#endif /* atomic_fetch_and_relaxed */
-
-#define arch_atomic_andnot atomic_andnot
-
-#ifndef atomic_andnot
-static __always_inline void
-atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-#define atomic_andnot atomic_andnot
-#endif
-
-#define arch_atomic_fetch_andnot atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release
-#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-
-#ifndef atomic_fetch_andnot_relaxed
-#ifdef atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#endif /* atomic_fetch_andnot */
-
-#ifndef atomic_fetch_andnot
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#ifndef atomic_fetch_andnot_acquire
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return atomic_fetch_and_acquire(~i, v);
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- return atomic_fetch_and_release(~i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot_relaxed
-static __always_inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return atomic_fetch_and_relaxed(~i, v);
-}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
-
-#else /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_andnot_acquire
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_andnot_relaxed(i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#endif /* atomic_fetch_andnot_relaxed */
-
-#define arch_atomic_or atomic_or
-
-#define arch_atomic_fetch_or atomic_fetch_or
-#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire
-#define arch_atomic_fetch_or_release atomic_fetch_or_release
-#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-
-#ifndef atomic_fetch_or_relaxed
-#define atomic_fetch_or_acquire atomic_fetch_or
-#define atomic_fetch_or_release atomic_fetch_or
-#define atomic_fetch_or_relaxed atomic_fetch_or
-#else /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_or_acquire
-static __always_inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-
-#ifndef atomic_fetch_or_release
-static __always_inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_or_relaxed(i, v);
-}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-
-#ifndef atomic_fetch_or
-static __always_inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-
-#endif /* atomic_fetch_or_relaxed */
-
-#define arch_atomic_xor atomic_xor
-
-#define arch_atomic_fetch_xor atomic_fetch_xor
-#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#define arch_atomic_fetch_xor_release atomic_fetch_xor_release
-#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-
-#ifndef atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_acquire atomic_fetch_xor
-#define atomic_fetch_xor_release atomic_fetch_xor
-#define atomic_fetch_xor_relaxed atomic_fetch_xor
-#else /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_fetch_xor_acquire
-static __always_inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-
-#ifndef atomic_fetch_xor_release
-static __always_inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_xor_relaxed(i, v);
-}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-
-#ifndef atomic_fetch_xor
-static __always_inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-
-#endif /* atomic_fetch_xor_relaxed */
-
-#define arch_atomic_xchg atomic_xchg
-#define arch_atomic_xchg_acquire atomic_xchg_acquire
-#define arch_atomic_xchg_release atomic_xchg_release
-#define arch_atomic_xchg_relaxed atomic_xchg_relaxed
-
-#ifndef atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg
-#define atomic_xchg_release atomic_xchg
-#define atomic_xchg_relaxed atomic_xchg
-#else /* atomic_xchg_relaxed */
-
-#ifndef atomic_xchg_acquire
-static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
- int ret = atomic_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-
-#ifndef atomic_xchg_release
-static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
- __atomic_release_fence();
- return atomic_xchg_relaxed(v, i);
-}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-
-#ifndef atomic_xchg
-static __always_inline int
-atomic_xchg(atomic_t *v, int i)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_xchg atomic_xchg
-#endif
-
-#endif /* atomic_xchg_relaxed */
-
-#define arch_atomic_cmpxchg atomic_cmpxchg
-#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#define arch_atomic_cmpxchg_release atomic_cmpxchg_release
-#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-
-#ifndef atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg
-#define atomic_cmpxchg_release atomic_cmpxchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg
-#else /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_cmpxchg_acquire
-static __always_inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- int ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-
-#ifndef atomic_cmpxchg_release
-static __always_inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- __atomic_release_fence();
- return atomic_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-
-#ifndef atomic_cmpxchg
-static __always_inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-
-#endif /* atomic_cmpxchg_relaxed */
-
-#define arch_atomic_try_cmpxchg atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-
-#ifndef atomic_try_cmpxchg_relaxed
-#ifdef atomic_try_cmpxchg
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
-#endif /* atomic_try_cmpxchg */
-
-#ifndef atomic_try_cmpxchg
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#ifndef atomic_try_cmpxchg_acquire
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg_relaxed
-static __always_inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic_try_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg_acquire
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- __atomic_release_fence();
- return atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#endif /* atomic_try_cmpxchg_relaxed */
-
-#define arch_atomic_sub_and_test atomic_sub_and_test
-
-#ifndef atomic_sub_and_test
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
- return atomic_sub_return(i, v) == 0;
-}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-
-#define arch_atomic_dec_and_test atomic_dec_and_test
-
-#ifndef atomic_dec_and_test
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-atomic_dec_and_test(atomic_t *v)
-{
- return atomic_dec_return(v) == 0;
-}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-
-#define arch_atomic_inc_and_test atomic_inc_and_test
-
-#ifndef atomic_inc_and_test
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic_inc_and_test(atomic_t *v)
-{
- return atomic_inc_return(v) == 0;
-}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-
-#define arch_atomic_add_negative atomic_add_negative
-
-#ifndef atomic_add_negative
-/**
- * atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-#define atomic_add_negative atomic_add_negative
-#endif
-
-#define arch_atomic_fetch_add_unless atomic_fetch_add_unless
-
-#ifndef atomic_fetch_add_unless
-/**
- * atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-
-#define arch_atomic_add_unless atomic_add_unless
-
-#ifndef atomic_add_unless
-/**
- * atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
- return atomic_fetch_add_unless(v, a, u) != u;
-}
-#define atomic_add_unless atomic_add_unless
-#endif
-
-#define arch_atomic_inc_not_zero atomic_inc_not_zero
-
-#ifndef atomic_inc_not_zero
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
- return atomic_add_unless(v, 1, 0);
-}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-
-#define arch_atomic_inc_unless_negative atomic_inc_unless_negative
-
-#ifndef atomic_inc_unless_negative
-static __always_inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-
-#define arch_atomic_dec_unless_positive atomic_dec_unless_positive
-
-#ifndef atomic_dec_unless_positive
-static __always_inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-
-#define arch_atomic_dec_if_positive atomic_dec_if_positive
-
-#ifndef atomic_dec_if_positive
-static __always_inline int
-atomic_dec_if_positive(atomic_t *v)
-{
- int dec, c = atomic_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#define arch_atomic64_read atomic64_read
-#define arch_atomic64_read_acquire atomic64_read_acquire
-
-#ifndef atomic64_read_acquire
-static __always_inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
-
-#define arch_atomic64_set atomic64_set
-#define arch_atomic64_set_release atomic64_set_release
-
-#ifndef atomic64_set_release
-static __always_inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic64_set_release atomic64_set_release
-#endif
-
-#define arch_atomic64_add atomic64_add
-
-#define arch_atomic64_add_return atomic64_add_return
-#define arch_atomic64_add_return_acquire atomic64_add_return_acquire
-#define arch_atomic64_add_return_release atomic64_add_return_release
-#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed
-
-#ifndef atomic64_add_return_relaxed
-#define atomic64_add_return_acquire atomic64_add_return
-#define atomic64_add_return_release atomic64_add_return
-#define atomic64_add_return_relaxed atomic64_add_return
-#else /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_add_return_acquire
-static __always_inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-
-#ifndef atomic64_add_return_release
-static __always_inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_add_return_relaxed(i, v);
-}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-
-#ifndef atomic64_add_return
-static __always_inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_add_return atomic64_add_return
-#endif
-
-#endif /* atomic64_add_return_relaxed */
-
-#define arch_atomic64_fetch_add atomic64_fetch_add
-#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#define arch_atomic64_fetch_add_release atomic64_fetch_add_release
-#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-
-#ifndef atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_acquire atomic64_fetch_add
-#define atomic64_fetch_add_release atomic64_fetch_add
-#define atomic64_fetch_add_relaxed atomic64_fetch_add
-#else /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_fetch_add_acquire
-static __always_inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-
-#ifndef atomic64_fetch_add_release
-static __always_inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_add_relaxed(i, v);
-}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-
-#ifndef atomic64_fetch_add
-static __always_inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-
-#endif /* atomic64_fetch_add_relaxed */
-
-#define arch_atomic64_sub atomic64_sub
-
-#define arch_atomic64_sub_return atomic64_sub_return
-#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire
-#define arch_atomic64_sub_return_release atomic64_sub_return_release
-#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-
-#ifndef atomic64_sub_return_relaxed
-#define atomic64_sub_return_acquire atomic64_sub_return
-#define atomic64_sub_return_release atomic64_sub_return
-#define atomic64_sub_return_relaxed atomic64_sub_return
-#else /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_sub_return_acquire
-static __always_inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-
-#ifndef atomic64_sub_return_release
-static __always_inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_sub_return_relaxed(i, v);
-}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-
-#ifndef atomic64_sub_return
-static __always_inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-
-#endif /* atomic64_sub_return_relaxed */
-
-#define arch_atomic64_fetch_sub atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release
-#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-
-#ifndef atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub
-#define atomic64_fetch_sub_release atomic64_fetch_sub
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
-#else /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_fetch_sub_acquire
-static __always_inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-
-#ifndef atomic64_fetch_sub_release
-static __always_inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_sub_relaxed(i, v);
-}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-
-#ifndef atomic64_fetch_sub
-static __always_inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-
-#endif /* atomic64_fetch_sub_relaxed */
-
-#define arch_atomic64_inc atomic64_inc
-
-#ifndef atomic64_inc
-static __always_inline void
-atomic64_inc(atomic64_t *v)
-{
- atomic64_add(1, v);
-}
-#define atomic64_inc atomic64_inc
-#endif
-
-#define arch_atomic64_inc_return atomic64_inc_return
-#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire
-#define arch_atomic64_inc_return_release atomic64_inc_return_release
-#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-
-#ifndef atomic64_inc_return_relaxed
-#ifdef atomic64_inc_return
-#define atomic64_inc_return_acquire atomic64_inc_return
-#define atomic64_inc_return_release atomic64_inc_return
-#define atomic64_inc_return_relaxed atomic64_inc_return
-#endif /* atomic64_inc_return */
-
-#ifndef atomic64_inc_return
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- return atomic64_add_return(1, v);
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#ifndef atomic64_inc_return_acquire
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- return atomic64_add_return_acquire(1, v);
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- return atomic64_add_return_release(1, v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return_relaxed
-static __always_inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
- return atomic64_add_return_relaxed(1, v);
-}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-
-#else /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_inc_return_acquire
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_inc_return_relaxed(v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#endif /* atomic64_inc_return_relaxed */
-
-#define arch_atomic64_fetch_inc atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release
-#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-
-#ifndef atomic64_fetch_inc_relaxed
-#ifdef atomic64_fetch_inc
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc
-#define atomic64_fetch_inc_release atomic64_fetch_inc
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
-#endif /* atomic64_fetch_inc */
-
-#ifndef atomic64_fetch_inc
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- return atomic64_fetch_add(1, v);
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#ifndef atomic64_fetch_inc_acquire
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- return atomic64_fetch_add_acquire(1, v);
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- return atomic64_fetch_add_release(1, v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc_relaxed
-static __always_inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_add_relaxed(1, v);
-}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-
-#else /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_fetch_inc_acquire
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_inc_relaxed(v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#endif /* atomic64_fetch_inc_relaxed */
-
-#define arch_atomic64_dec atomic64_dec
-
-#ifndef atomic64_dec
-static __always_inline void
-atomic64_dec(atomic64_t *v)
-{
- atomic64_sub(1, v);
-}
-#define atomic64_dec atomic64_dec
-#endif
-
-#define arch_atomic64_dec_return atomic64_dec_return
-#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire
-#define arch_atomic64_dec_return_release atomic64_dec_return_release
-#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-
-#ifndef atomic64_dec_return_relaxed
-#ifdef atomic64_dec_return
-#define atomic64_dec_return_acquire atomic64_dec_return
-#define atomic64_dec_return_release atomic64_dec_return
-#define atomic64_dec_return_relaxed atomic64_dec_return
-#endif /* atomic64_dec_return */
-
-#ifndef atomic64_dec_return
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- return atomic64_sub_return(1, v);
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#ifndef atomic64_dec_return_acquire
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- return atomic64_sub_return_acquire(1, v);
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- return atomic64_sub_return_release(1, v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return_relaxed
-static __always_inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
- return atomic64_sub_return_relaxed(1, v);
-}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-
-#else /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_dec_return_acquire
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_dec_return_relaxed(v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#endif /* atomic64_dec_return_relaxed */
-
-#define arch_atomic64_fetch_dec atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release
-#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-
-#ifndef atomic64_fetch_dec_relaxed
-#ifdef atomic64_fetch_dec
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec
-#define atomic64_fetch_dec_release atomic64_fetch_dec
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
-#endif /* atomic64_fetch_dec */
-
-#ifndef atomic64_fetch_dec
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- return atomic64_fetch_sub(1, v);
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#ifndef atomic64_fetch_dec_acquire
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- return atomic64_fetch_sub_acquire(1, v);
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- return atomic64_fetch_sub_release(1, v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec_relaxed
-static __always_inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_sub_relaxed(1, v);
-}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
-
-#else /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_dec_acquire
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_dec_relaxed(v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#endif /* atomic64_fetch_dec_relaxed */
-
-#define arch_atomic64_and atomic64_and
-
-#define arch_atomic64_fetch_and atomic64_fetch_and
-#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#define arch_atomic64_fetch_and_release atomic64_fetch_and_release
-#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-
-#ifndef atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_acquire atomic64_fetch_and
-#define atomic64_fetch_and_release atomic64_fetch_and
-#define atomic64_fetch_and_relaxed atomic64_fetch_and
-#else /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_fetch_and_acquire
-static __always_inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-
-#ifndef atomic64_fetch_and_release
-static __always_inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_and_relaxed(i, v);
-}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-
-#ifndef atomic64_fetch_and
-static __always_inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-
-#endif /* atomic64_fetch_and_relaxed */
-
-#define arch_atomic64_andnot atomic64_andnot
-
-#ifndef atomic64_andnot
-static __always_inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
- atomic64_and(~i, v);
-}
-#define atomic64_andnot atomic64_andnot
-#endif
-
-#define arch_atomic64_fetch_andnot atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-
-#ifndef atomic64_fetch_andnot_relaxed
-#ifdef atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#endif /* atomic64_fetch_andnot */
-
-#ifndef atomic64_fetch_andnot
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and(~i, v);
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#ifndef atomic64_fetch_andnot_acquire
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_acquire(~i, v);
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_release(~i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot_relaxed
-static __always_inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_relaxed(~i, v);
-}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_andnot_acquire
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_andnot_relaxed(i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#endif /* atomic64_fetch_andnot_relaxed */
-
-#define arch_atomic64_or atomic64_or
-
-#define arch_atomic64_fetch_or atomic64_fetch_or
-#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#define arch_atomic64_fetch_or_release atomic64_fetch_or_release
-#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-
-#ifndef atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_acquire atomic64_fetch_or
-#define atomic64_fetch_or_release atomic64_fetch_or
-#define atomic64_fetch_or_relaxed atomic64_fetch_or
-#else /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_or_acquire
-static __always_inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-
-#ifndef atomic64_fetch_or_release
-static __always_inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_or_relaxed(i, v);
-}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-
-#ifndef atomic64_fetch_or
-static __always_inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-
-#endif /* atomic64_fetch_or_relaxed */
-
-#define arch_atomic64_xor atomic64_xor
-
-#define arch_atomic64_fetch_xor atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release
-#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-
-#ifndef atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor
-#define atomic64_fetch_xor_release atomic64_fetch_xor
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
-#else /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_fetch_xor_acquire
-static __always_inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-
-#ifndef atomic64_fetch_xor_release
-static __always_inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_xor_relaxed(i, v);
-}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-
-#ifndef atomic64_fetch_xor
-static __always_inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-
-#endif /* atomic64_fetch_xor_relaxed */
-
-#define arch_atomic64_xchg atomic64_xchg
-#define arch_atomic64_xchg_acquire atomic64_xchg_acquire
-#define arch_atomic64_xchg_release atomic64_xchg_release
-#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed
-
-#ifndef atomic64_xchg_relaxed
-#define atomic64_xchg_acquire atomic64_xchg
-#define atomic64_xchg_release atomic64_xchg
-#define atomic64_xchg_relaxed atomic64_xchg
-#else /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_xchg_acquire
-static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- s64 ret = atomic64_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-
-#ifndef atomic64_xchg_release
-static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- __atomic_release_fence();
- return atomic64_xchg_relaxed(v, i);
-}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-
-#ifndef atomic64_xchg
-static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_xchg atomic64_xchg
-#endif
-
-#endif /* atomic64_xchg_relaxed */
-
-#define arch_atomic64_cmpxchg atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release
-#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-
-#ifndef atomic64_cmpxchg_relaxed
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg
-#define atomic64_cmpxchg_release atomic64_cmpxchg
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
-#else /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_cmpxchg_acquire
-static __always_inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_cmpxchg_release
-static __always_inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-
-#ifndef atomic64_cmpxchg
-static __always_inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-
-#endif /* atomic64_cmpxchg_relaxed */
-
-#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-
-#ifndef atomic64_try_cmpxchg_relaxed
-#ifdef atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
-#endif /* atomic64_try_cmpxchg */
-
-#ifndef atomic64_try_cmpxchg
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#ifndef atomic64_try_cmpxchg_acquire
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg_relaxed
-static __always_inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic64_try_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg_acquire
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#endif /* atomic64_try_cmpxchg_relaxed */
-
-#define arch_atomic64_sub_and_test atomic64_sub_and_test
-
-#ifndef atomic64_sub_and_test
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- return atomic64_sub_return(i, v) == 0;
-}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-
-#define arch_atomic64_dec_and_test atomic64_dec_and_test
-
-#ifndef atomic64_dec_and_test
-/**
- * atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
- return atomic64_dec_return(v) == 0;
-}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-
-#define arch_atomic64_inc_and_test atomic64_inc_and_test
-
-#ifndef atomic64_inc_and_test
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
- return atomic64_inc_return(v) == 0;
-}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-
-#define arch_atomic64_add_negative atomic64_add_negative
-
-#ifndef atomic64_add_negative
-/**
- * atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-
-#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless
-
-#ifndef atomic64_fetch_add_unless
-/**
- * atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-
-#define arch_atomic64_add_unless atomic64_add_unless
-
-#ifndef atomic64_add_unless
-/**
- * atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- return atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-
-#define arch_atomic64_inc_not_zero atomic64_inc_not_zero
-
-#ifndef atomic64_inc_not_zero
-/**
- * atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
- return atomic64_add_unless(v, 1, 0);
-}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-
-#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative
-
-#ifndef atomic64_inc_unless_negative
-static __always_inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-
-#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive
-
-#ifndef atomic64_dec_unless_positive
-static __always_inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-
-#define arch_atomic64_dec_if_positive atomic64_dec_if_positive
-
-#ifndef atomic64_dec_if_positive
-static __always_inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = atomic64_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-
-#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 9d95b56f98d82a2a26c7b79ccdd0c47572d50a6f
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 571a11008ab5..8dd57c3a99e9 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -77,13 +77,8 @@
__ret; \
})
-#ifdef ARCH_ATOMIC
-#include <linux/atomic-arch-fallback.h>
-#include <asm-generic/atomic-instrumented.h>
-#else
-#include <linux/atomic-fallback.h>
-#endif
-
-#include <asm-generic/atomic-long.h>
+#include <linux/atomic/atomic-arch-fallback.h>
+#include <linux/atomic/atomic-long.h>
+#include <linux/atomic/atomic-instrumented.h>
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
new file mode 100644
index 000000000000..956bcba5dbf2
--- /dev/null
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -0,0 +1,4693 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#if defined(arch_xchg)
+#define raw_xchg arch_xchg
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#else
+extern void raw_xchg_not_implemented(void);
+#define raw_xchg(...) raw_xchg_not_implemented()
+#endif
+
+#if defined(arch_xchg_acquire)
+#define raw_xchg_acquire arch_xchg_acquire
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_acquire arch_xchg
+#else
+extern void raw_xchg_acquire_not_implemented(void);
+#define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
+#endif
+
+#if defined(arch_xchg_release)
+#define raw_xchg_release arch_xchg_release
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_release arch_xchg
+#else
+extern void raw_xchg_release_not_implemented(void);
+#define raw_xchg_release(...) raw_xchg_release_not_implemented()
+#endif
+
+#if defined(arch_xchg_relaxed)
+#define raw_xchg_relaxed arch_xchg_relaxed
+#elif defined(arch_xchg)
+#define raw_xchg_relaxed arch_xchg
+#else
+extern void raw_xchg_relaxed_not_implemented(void);
+#define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg)
+#define raw_cmpxchg arch_cmpxchg
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#else
+extern void raw_cmpxchg_not_implemented(void);
+#define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_acquire)
+#define raw_cmpxchg_acquire arch_cmpxchg_acquire
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_acquire arch_cmpxchg
+#else
+extern void raw_cmpxchg_acquire_not_implemented(void);
+#define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_release)
+#define raw_cmpxchg_release arch_cmpxchg_release
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_release(...) \
+ __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_release arch_cmpxchg
+#else
+extern void raw_cmpxchg_release_not_implemented(void);
+#define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_relaxed arch_cmpxchg
+#else
+extern void raw_cmpxchg_relaxed_not_implemented(void);
+#define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64)
+#define raw_cmpxchg64 arch_cmpxchg64
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#else
+extern void raw_cmpxchg64_not_implemented(void);
+#define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_acquire)
+#define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_acquire arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_acquire_not_implemented(void);
+#define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_release)
+#define raw_cmpxchg64_release arch_cmpxchg64_release
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_release(...) \
+ __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_release arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_release_not_implemented(void);
+#define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_relaxed_not_implemented(void);
+#define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128)
+#define raw_cmpxchg128 arch_cmpxchg128
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128(...) \
+ __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
+#else
+extern void raw_cmpxchg128_not_implemented(void);
+#define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_acquire)
+#define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_acquire arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_acquire_not_implemented(void);
+#define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_release)
+#define raw_cmpxchg128_release arch_cmpxchg128_release
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_release(...) \
+ __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_release arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_release_not_implemented(void);
+#define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_relaxed_not_implemented(void);
+#define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
+#endif
+
+#if defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg arch_try_cmpxchg
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg(...) \
+ __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg_acquire)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg_release)
+#define raw_try_cmpxchg_release arch_try_cmpxchg_release
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_release(...) \
+ __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_release arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64 arch_try_cmpxchg64
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64(...) \
+ __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg64(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64_acquire)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64_release)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_release(...) \
+ __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128 arch_try_cmpxchg128
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128(...) \
+ __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg128(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128_acquire)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128_release)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_release(...) \
+ __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_cmpxchg_local arch_cmpxchg_local
+
+#ifdef arch_try_cmpxchg_local
+#define raw_try_cmpxchg_local arch_try_cmpxchg_local
+#else
+#define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_cmpxchg64_local arch_cmpxchg64_local
+
+#ifdef arch_try_cmpxchg64_local
+#define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
+#else
+#define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_cmpxchg128_local arch_cmpxchg128_local
+
+#ifdef arch_try_cmpxchg128_local
+#define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
+#else
+#define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_sync_cmpxchg arch_sync_cmpxchg
+
+#ifdef arch_sync_try_cmpxchg
+#define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
+#else
+#define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+/**
+ * raw_atomic_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+raw_atomic_read(const atomic_t *v)
+{
+ return arch_atomic_read(v);
+}
+
+/**
+ * raw_atomic_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+raw_atomic_read_acquire(const atomic_t *v)
+{
+#if defined(arch_atomic_read_acquire)
+ return arch_atomic_read_acquire(v);
+#else
+ int ret;
+
+ if (__native_word(atomic_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = raw_atomic_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+#endif
+}
+
+/**
+ * raw_atomic_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_set(atomic_t *v, int i)
+{
+ arch_atomic_set(v, i);
+}
+
+/**
+ * raw_atomic_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_set_release(atomic_t *v, int i)
+{
+#if defined(arch_atomic_set_release)
+ arch_atomic_set_release(v, i);
+#else
+ if (__native_word(atomic_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ raw_atomic_set(v, i);
+ }
+#endif
+}
+
+/**
+ * raw_atomic_add() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_add(int i, atomic_t *v)
+{
+ arch_atomic_add(i, v);
+}
+
+/**
+ * raw_atomic_add_return() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_add_return"
+#endif
+}
+
+/**
+ * raw_atomic_add_return_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_acquire)
+ return arch_atomic_add_return_acquire(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ int ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_add_return_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_release)
+ return arch_atomic_add_return_release(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_add_return_relaxed(i, v);
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_release"
+#endif
+}
+
+/**
+ * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_relaxed)
+ return arch_atomic_add_return_relaxed(i, v);
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_add"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_acquire)
+ return arch_atomic_fetch_add_acquire(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ int ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_release)
+ return arch_atomic_fetch_add_release(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_relaxed)
+ return arch_atomic_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_sub() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_sub(int i, atomic_t *v)
+{
+ arch_atomic_sub(i, v);
+}
+
+/**
+ * raw_atomic_sub_return() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_sub_return"
+#endif
+}
+
+/**
+ * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_acquire)
+ return arch_atomic_sub_return_acquire(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ int ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_sub_return_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_release)
+ return arch_atomic_sub_return_release(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_sub_return_relaxed(i, v);
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_release"
+#endif
+}
+
+/**
+ * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_relaxed)
+ return arch_atomic_sub_return_relaxed(i, v);
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_sub"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub_acquire)
+ return arch_atomic_fetch_sub_acquire(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ int ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub_release)
+ return arch_atomic_fetch_sub_release(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub_relaxed)
+ return arch_atomic_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_inc(atomic_t *v)
+{
+#if defined(arch_atomic_inc)
+ arch_atomic_inc(v);
+#else
+ raw_atomic_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_add_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return_acquire)
+ return arch_atomic_inc_return_acquire(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ int ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return_release(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return_release)
+ return arch_atomic_inc_return_release(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_inc_return_relaxed(v);
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return_relaxed)
+ return arch_atomic_inc_return_relaxed(v);
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc_acquire)
+ return arch_atomic_fetch_inc_acquire(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ int ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc_release(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc_release)
+ return arch_atomic_fetch_inc_release(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_inc_relaxed(v);
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc_relaxed)
+ return arch_atomic_fetch_inc_relaxed(v);
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_dec(atomic_t *v)
+{
+#if defined(arch_atomic_dec)
+ arch_atomic_dec(v);
+#else
+ raw_atomic_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_sub_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_acquire)
+ return arch_atomic_dec_return_acquire(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ int ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_release(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_release)
+ return arch_atomic_dec_return_release(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_dec_return_relaxed(v);
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_relaxed)
+ return arch_atomic_dec_return_relaxed(v);
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec_acquire)
+ return arch_atomic_fetch_dec_acquire(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ int ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec_release(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec_release)
+ return arch_atomic_fetch_dec_release(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_dec_relaxed(v);
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec_relaxed)
+ return arch_atomic_fetch_dec_relaxed(v);
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_and() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_and(int i, atomic_t *v)
+{
+ arch_atomic_and(i, v);
+}
+
+/**
+ * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_and"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and_acquire)
+ return arch_atomic_fetch_and_acquire(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
+ int ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and_release)
+ return arch_atomic_fetch_and_release(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and_relaxed)
+ return arch_atomic_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_andnot(int i, atomic_t *v)
+{
+#if defined(arch_atomic_andnot)
+ arch_atomic_andnot(i, v);
+#else
+ raw_atomic_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_acquire)
+ return arch_atomic_fetch_andnot_acquire(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_acquire(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_release)
+ return arch_atomic_fetch_andnot_release(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_release(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_relaxed)
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_relaxed(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_or() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_or(int i, atomic_t *v)
+{
+ arch_atomic_or(i, v);
+}
+
+/**
+ * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_or"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_acquire)
+ return arch_atomic_fetch_or_acquire(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
+ int ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_release)
+ return arch_atomic_fetch_or_release(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_relaxed)
+ return arch_atomic_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_xor(int i, atomic_t *v)
+{
+ arch_atomic_xor(i, v);
+}
+
+/**
+ * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_xor"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_acquire)
+ return arch_atomic_fetch_xor_acquire(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
+ int ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_release)
+ return arch_atomic_fetch_xor_release(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_relaxed)
+ return arch_atomic_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_xchg_relaxed(v, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_xchg(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_acquire(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_acquire)
+ return arch_atomic_xchg_acquire(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ int ret = arch_atomic_xchg_relaxed(v, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_acquire(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_release(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_release)
+ return arch_atomic_xchg_release(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_xchg_relaxed(v, new);
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_release(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_relaxed(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_relaxed)
+ return arch_atomic_xchg_relaxed(v, new);
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_relaxed(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_cmpxchg(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg_acquire)
+ return arch_atomic_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
+ int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_acquire(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg_release)
+ return arch_atomic_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_release(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg_relaxed)
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_relaxed(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg_acquire)
+ return arch_atomic_try_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg_release)
+ return arch_atomic_try_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg_relaxed)
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_sub_and_test(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_and_test)
+ return arch_atomic_sub_and_test(i, v);
+#else
+ return raw_atomic_sub_return(i, v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_dec_and_test(atomic_t *v)
+{
+#if defined(arch_atomic_dec_and_test)
+ return arch_atomic_dec_and_test(v);
+#else
+ return raw_atomic_dec_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_inc_and_test(atomic_t *v)
+{
+#if defined(arch_atomic_inc_and_test)
+ return arch_atomic_inc_and_test(v);
+#else
+ return raw_atomic_inc_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative() - atomic add and test if negative with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_add_return(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative_acquire)
+ return arch_atomic_add_negative_acquire(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ bool ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_acquire(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative_release)
+ return arch_atomic_add_negative_release(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_add_negative_relaxed(i, v);
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_release(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative_relaxed)
+ return arch_atomic_add_negative_relaxed(i, v);
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_relaxed(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+#if defined(arch_atomic_fetch_add_unless)
+ return arch_atomic_fetch_add_unless(v, a, u);
+#else
+ int c = raw_atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+#endif
+}
+
+/**
+ * raw_atomic_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_unless(atomic_t *v, int a, int u)
+{
+#if defined(arch_atomic_add_unless)
+ return arch_atomic_add_unless(v, a, u);
+#else
+ return raw_atomic_fetch_add_unless(v, a, u) != u;
+#endif
+}
+
+/**
+ * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_inc_not_zero(atomic_t *v)
+{
+#if defined(arch_atomic_inc_not_zero)
+ return arch_atomic_inc_not_zero(v);
+#else
+ return raw_atomic_add_unless(v, 1, 0);
+#endif
+}
+
+/**
+ * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_inc_unless_negative(atomic_t *v)
+{
+#if defined(arch_atomic_inc_unless_negative)
+ return arch_atomic_inc_unless_negative(v);
+#else
+ int c = raw_atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_dec_unless_positive(atomic_t *v)
+{
+#if defined(arch_atomic_dec_unless_positive)
+ return arch_atomic_dec_unless_positive(v);
+#else
+ int c = raw_atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline int
+raw_atomic_dec_if_positive(atomic_t *v)
+{
+#if defined(arch_atomic_dec_if_positive)
+ return arch_atomic_dec_if_positive(v);
+#else
+ int dec, c = raw_atomic_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!raw_atomic_try_cmpxchg(v, &c, dec));
+
+ return dec;
+#endif
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+/**
+ * raw_atomic64_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+raw_atomic64_read(const atomic64_t *v)
+{
+ return arch_atomic64_read(v);
+}
+
+/**
+ * raw_atomic64_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+raw_atomic64_read_acquire(const atomic64_t *v)
+{
+#if defined(arch_atomic64_read_acquire)
+ return arch_atomic64_read_acquire(v);
+#else
+ s64 ret;
+
+ if (__native_word(atomic64_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = raw_atomic64_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+#endif
+}
+
+/**
+ * raw_atomic64_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_set(atomic64_t *v, s64 i)
+{
+ arch_atomic64_set(v, i);
+}
+
+/**
+ * raw_atomic64_set_release() - atomic set with release ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_set_release(atomic64_t *v, s64 i)
+{
+#if defined(arch_atomic64_set_release)
+ arch_atomic64_set_release(v, i);
+#else
+ if (__native_word(atomic64_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ raw_atomic64_set(v, i);
+ }
+#endif
+}
+
+/**
+ * raw_atomic64_add() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_add(s64 i, atomic64_t *v)
+{
+ arch_atomic64_add(i, v);
+}
+
+/**
+ * raw_atomic64_add_return() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_add_return"
+#endif
+}
+
+/**
+ * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_acquire)
+ return arch_atomic64_add_return_acquire(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ s64 ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_add_return_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_release)
+ return arch_atomic64_add_return_release(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_add_return_relaxed(i, v);
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_release"
+#endif
+}
+
+/**
+ * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_relaxed)
+ return arch_atomic64_add_return_relaxed(i, v);
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_add"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_acquire)
+ return arch_atomic64_fetch_add_acquire(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_release)
+ return arch_atomic64_fetch_add_release(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_relaxed)
+ return arch_atomic64_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_sub() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_sub(s64 i, atomic64_t *v)
+{
+ arch_atomic64_sub(i, v);
+}
+
+/**
+ * raw_atomic64_sub_return() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_sub_return"
+#endif
+}
+
+/**
+ * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_acquire)
+ return arch_atomic64_sub_return_acquire(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_sub_return_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_release)
+ return arch_atomic64_sub_return_release(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_sub_return_relaxed(i, v);
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_release"
+#endif
+}
+
+/**
+ * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_relaxed)
+ return arch_atomic64_sub_return_relaxed(i, v);
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_sub"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub_acquire)
+ return arch_atomic64_fetch_sub_acquire(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub_release)
+ return arch_atomic64_fetch_sub_release(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub_relaxed)
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_inc(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc)
+ arch_atomic64_inc(v);
+#else
+ raw_atomic64_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_add_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return_acquire)
+ return arch_atomic64_inc_return_acquire(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ s64 ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return_release)
+ return arch_atomic64_inc_return_release(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_inc_return_relaxed(v);
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return_relaxed)
+ return arch_atomic64_inc_return_relaxed(v);
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc_acquire)
+ return arch_atomic64_fetch_inc_acquire(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc_release)
+ return arch_atomic64_fetch_inc_release(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_inc_relaxed(v);
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc_relaxed)
+ return arch_atomic64_fetch_inc_relaxed(v);
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_dec(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec)
+ arch_atomic64_dec(v);
+#else
+ raw_atomic64_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_sub_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_acquire)
+ return arch_atomic64_dec_return_acquire(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ s64 ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_release)
+ return arch_atomic64_dec_return_release(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_dec_return_relaxed(v);
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_relaxed)
+ return arch_atomic64_dec_return_relaxed(v);
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec_acquire)
+ return arch_atomic64_fetch_dec_acquire(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec_release)
+ return arch_atomic64_fetch_dec_release(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_dec_relaxed(v);
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec_relaxed)
+ return arch_atomic64_fetch_dec_relaxed(v);
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_and(s64 i, atomic64_t *v)
+{
+ arch_atomic64_and(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_and"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and_acquire)
+ return arch_atomic64_fetch_and_acquire(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
+ s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and_release)
+ return arch_atomic64_fetch_and_release(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and_relaxed)
+ return arch_atomic64_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_andnot(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_andnot)
+ arch_atomic64_andnot(i, v);
+#else
+ raw_atomic64_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_acquire)
+ return arch_atomic64_fetch_andnot_acquire(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_acquire(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_release)
+ return arch_atomic64_fetch_andnot_release(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_release(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_relaxed)
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_relaxed(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_or(s64 i, atomic64_t *v)
+{
+ arch_atomic64_or(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_or"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_acquire)
+ return arch_atomic64_fetch_or_acquire(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
+ s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_release)
+ return arch_atomic64_fetch_or_release(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_relaxed)
+ return arch_atomic64_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_xor(s64 i, atomic64_t *v)
+{
+ arch_atomic64_xor(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_xor"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_acquire)
+ return arch_atomic64_fetch_xor_acquire(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
+ s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_release)
+ return arch_atomic64_fetch_xor_release(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_relaxed)
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_xchg_relaxed(v, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_xchg(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_acquire)
+ return arch_atomic64_xchg_acquire(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ s64 ret = arch_atomic64_xchg_relaxed(v, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_acquire(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_release(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_release)
+ return arch_atomic64_xchg_release(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_xchg_relaxed(v, new);
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_release(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_relaxed)
+ return arch_atomic64_xchg_relaxed(v, new);
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_relaxed(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_cmpxchg(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg_acquire)
+ return arch_atomic64_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
+ s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_acquire(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg_release)
+ return arch_atomic64_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_release(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg_relaxed)
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_relaxed(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg_acquire)
+ return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg_release)
+ return arch_atomic64_try_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg_relaxed)
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_and_test)
+ return arch_atomic64_sub_and_test(i, v);
+#else
+ return raw_atomic64_sub_return(i, v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_dec_and_test(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_and_test)
+ return arch_atomic64_dec_and_test(v);
+#else
+ return raw_atomic64_dec_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_inc_and_test(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_and_test)
+ return arch_atomic64_inc_and_test(v);
+#else
+ return raw_atomic64_inc_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_add_return(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative_acquire)
+ return arch_atomic64_add_negative_acquire(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ bool ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_acquire(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative_release)
+ return arch_atomic64_add_negative_release(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_add_negative_relaxed(i, v);
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_release(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative_relaxed)
+ return arch_atomic64_add_negative_relaxed(i, v);
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_relaxed(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+#if defined(arch_atomic64_fetch_add_unless)
+ return arch_atomic64_fetch_add_unless(v, a, u);
+#else
+ s64 c = raw_atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
+#endif
+}
+
+/**
+ * raw_atomic64_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+#if defined(arch_atomic64_add_unless)
+ return arch_atomic64_add_unless(v, a, u);
+#else
+ return raw_atomic64_fetch_add_unless(v, a, u) != u;
+#endif
+}
+
+/**
+ * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_inc_not_zero(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_not_zero)
+ return arch_atomic64_inc_not_zero(v);
+#else
+ return raw_atomic64_add_unless(v, 1, 0);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_inc_unless_negative(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_unless_negative)
+ return arch_atomic64_inc_unless_negative(v);
+#else
+ s64 c = raw_atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_dec_unless_positive(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_unless_positive)
+ return arch_atomic64_dec_unless_positive(v);
+#else
+ s64 c = raw_atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline s64
+raw_atomic64_dec_if_positive(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_if_positive)
+ return arch_atomic64_dec_if_positive(v);
+#else
+ s64 dec, c = raw_atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
+#endif
+}
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// 14850c0b0db20c62fdc78ccd1d42b98b88d76331
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
new file mode 100644
index 000000000000..debd487fe971
--- /dev/null
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -0,0 +1,5053 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provoides atomic operations with explicit instrumentation (e.g.
+ * KASAN, KCSAN), which should be used unless it is necessary to avoid
+ * instrumentation. Where it is necessary to aovid instrumenation, the
+ * raw_atomic*() operations should be used.
+ */
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
+
+/**
+ * atomic_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+atomic_read(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_read(v);
+}
+
+/**
+ * atomic_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+atomic_read_acquire(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_read_acquire(v);
+}
+
+/**
+ * atomic_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_set() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_set(atomic_t *v, int i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_set(v, i);
+}
+
+/**
+ * atomic_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_set_release() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_set_release(atomic_t *v, int i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_set_release(v, i);
+}
+
+/**
+ * atomic_add() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_add(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_add(i, v);
+}
+
+/**
+ * atomic_add_return() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return(i, v);
+}
+
+/**
+ * atomic_add_return_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return_acquire(i, v);
+}
+
+/**
+ * atomic_add_return_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return_release(i, v);
+}
+
+/**
+ * atomic_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return_relaxed(i, v);
+}
+
+/**
+ * atomic_fetch_add() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add(i, v);
+}
+
+/**
+ * atomic_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_add_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_release(i, v);
+}
+
+/**
+ * atomic_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_relaxed(i, v);
+}
+
+/**
+ * atomic_sub() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_sub(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_sub(i, v);
+}
+
+/**
+ * atomic_sub_return() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return(i, v);
+}
+
+/**
+ * atomic_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return_acquire(i, v);
+}
+
+/**
+ * atomic_sub_return_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return_release(i, v);
+}
+
+/**
+ * atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return_relaxed(i, v);
+}
+
+/**
+ * atomic_fetch_sub() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub(i, v);
+}
+
+/**
+ * atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_sub_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub_release(i, v);
+}
+
+/**
+ * atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub_relaxed(i, v);
+}
+
+/**
+ * atomic_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_inc(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_inc(v);
+}
+
+/**
+ * atomic_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return(v);
+}
+
+/**
+ * atomic_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return_acquire(v);
+}
+
+/**
+ * atomic_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return_release(v);
+}
+
+/**
+ * atomic_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return_relaxed(v);
+}
+
+/**
+ * atomic_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc(v);
+}
+
+/**
+ * atomic_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc_acquire(v);
+}
+
+/**
+ * atomic_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc_release(v);
+}
+
+/**
+ * atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc_relaxed(v);
+}
+
+/**
+ * atomic_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_dec(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_dec(v);
+}
+
+/**
+ * atomic_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return(v);
+}
+
+/**
+ * atomic_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return_acquire(v);
+}
+
+/**
+ * atomic_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return_release(v);
+}
+
+/**
+ * atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return_relaxed(v);
+}
+
+/**
+ * atomic_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec(v);
+}
+
+/**
+ * atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec_acquire(v);
+}
+
+/**
+ * atomic_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec_release(v);
+}
+
+/**
+ * atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec_relaxed(v);
+}
+
+/**
+ * atomic_and() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_and() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_and(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_and(i, v);
+}
+
+/**
+ * atomic_fetch_and() - atomic bitwise AND with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and(i, v);
+}
+
+/**
+ * atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and_release(i, v);
+}
+
+/**
+ * atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and_relaxed(i, v);
+}
+
+/**
+ * atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_andnot() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_andnot(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_andnot(i, v);
+}
+
+/**
+ * atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot(i, v);
+}
+
+/**
+ * atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot_release(i, v);
+}
+
+/**
+ * atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot_relaxed(i, v);
+}
+
+/**
+ * atomic_or() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_or() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_or(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_or(i, v);
+}
+
+/**
+ * atomic_fetch_or() - atomic bitwise OR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or(i, v);
+}
+
+/**
+ * atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or_release(i, v);
+}
+
+/**
+ * atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or_relaxed(i, v);
+}
+
+/**
+ * atomic_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xor() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_xor(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_xor(i, v);
+}
+
+/**
+ * atomic_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor(i, v);
+}
+
+/**
+ * atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor_release(i, v);
+}
+
+/**
+ * atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor_relaxed(i, v);
+}
+
+/**
+ * atomic_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg(atomic_t *v, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg(v, new);
+}
+
+/**
+ * atomic_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg_acquire(atomic_t *v, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg_acquire(v, new);
+}
+
+/**
+ * atomic_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg_release(atomic_t *v, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg_release(v, new);
+}
+
+/**
+ * atomic_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg_relaxed(atomic_t *v, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg_relaxed(v, new);
+}
+
+/**
+ * atomic_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_and_test(i, v);
+}
+
+/**
+ * atomic_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_and_test(v);
+}
+
+/**
+ * atomic_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_and_test(v);
+}
+
+/**
+ * atomic_add_negative() - atomic add and test if negative with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative(i, v);
+}
+
+/**
+ * atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative_acquire(i, v);
+}
+
+/**
+ * atomic_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative_release(i, v);
+}
+
+/**
+ * atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative_relaxed(i, v);
+}
+
+/**
+ * atomic_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_unless(v, a, u);
+}
+
+/**
+ * atomic_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_unless(v, a, u);
+}
+
+/**
+ * atomic_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_not_zero(v);
+}
+
+/**
+ * atomic_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_unless_negative(v);
+}
+
+/**
+ * atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_unless_positive(v);
+}
+
+/**
+ * atomic_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_if_positive(v);
+}
+
+/**
+ * atomic64_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+atomic64_read(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic64_read(v);
+}
+
+/**
+ * atomic64_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic64_read_acquire(v);
+}
+
+/**
+ * atomic64_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_set() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic64_set(v, i);
+}
+
+/**
+ * atomic64_set_release() - atomic set with release ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_set_release() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic64_set_release(v, i);
+}
+
+/**
+ * atomic64_add() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_add(i, v);
+}
+
+/**
+ * atomic64_add_return() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return(i, v);
+}
+
+/**
+ * atomic64_add_return_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return_acquire(i, v);
+}
+
+/**
+ * atomic64_add_return_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return_release(i, v);
+}
+
+/**
+ * atomic64_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return_relaxed(i, v);
+}
+
+/**
+ * atomic64_fetch_add() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add(i, v);
+}
+
+/**
+ * atomic64_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_add_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_release(i, v);
+}
+
+/**
+ * atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_relaxed(i, v);
+}
+
+/**
+ * atomic64_sub() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_sub(i, v);
+}
+
+/**
+ * atomic64_sub_return() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return(i, v);
+}
+
+/**
+ * atomic64_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return_acquire(i, v);
+}
+
+/**
+ * atomic64_sub_return_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return_release(i, v);
+}
+
+/**
+ * atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return_relaxed(i, v);
+}
+
+/**
+ * atomic64_fetch_sub() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub(i, v);
+}
+
+/**
+ * atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_sub_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub_release(i, v);
+}
+
+/**
+ * atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub_relaxed(i, v);
+}
+
+/**
+ * atomic64_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_inc(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_inc(v);
+}
+
+/**
+ * atomic64_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return(v);
+}
+
+/**
+ * atomic64_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return_acquire(v);
+}
+
+/**
+ * atomic64_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return_release(v);
+}
+
+/**
+ * atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return_relaxed(v);
+}
+
+/**
+ * atomic64_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc(v);
+}
+
+/**
+ * atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc_acquire(v);
+}
+
+/**
+ * atomic64_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc_release(v);
+}
+
+/**
+ * atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc_relaxed(v);
+}
+
+/**
+ * atomic64_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_dec(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_dec(v);
+}
+
+/**
+ * atomic64_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return(v);
+}
+
+/**
+ * atomic64_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return_acquire(v);
+}
+
+/**
+ * atomic64_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return_release(v);
+}
+
+/**
+ * atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return_relaxed(v);
+}
+
+/**
+ * atomic64_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec(v);
+}
+
+/**
+ * atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec_acquire(v);
+}
+
+/**
+ * atomic64_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec_release(v);
+}
+
+/**
+ * atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec_relaxed(v);
+}
+
+/**
+ * atomic64_and() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_and() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_and(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_and(i, v);
+}
+
+/**
+ * atomic64_fetch_and() - atomic bitwise AND with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and(i, v);
+}
+
+/**
+ * atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and_release(i, v);
+}
+
+/**
+ * atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and_relaxed(i, v);
+}
+
+/**
+ * atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_andnot() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_andnot(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot_release(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+/**
+ * atomic64_or() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_or() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_or(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_or(i, v);
+}
+
+/**
+ * atomic64_fetch_or() - atomic bitwise OR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or(i, v);
+}
+
+/**
+ * atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or_release(i, v);
+}
+
+/**
+ * atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or_relaxed(i, v);
+}
+
+/**
+ * atomic64_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xor() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_xor(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_xor(i, v);
+}
+
+/**
+ * atomic64_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor(i, v);
+}
+
+/**
+ * atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor_release(i, v);
+}
+
+/**
+ * atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor_relaxed(i, v);
+}
+
+/**
+ * atomic64_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg(atomic64_t *v, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg(v, new);
+}
+
+/**
+ * atomic64_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg_acquire(v, new);
+}
+
+/**
+ * atomic64_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg_release(atomic64_t *v, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg_release(v, new);
+}
+
+/**
+ * atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg_relaxed(v, new);
+}
+
+/**
+ * atomic64_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_and_test(i, v);
+}
+
+/**
+ * atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_and_test(v);
+}
+
+/**
+ * atomic64_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_and_test(v);
+}
+
+/**
+ * atomic64_add_negative() - atomic add and test if negative with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative(i, v);
+}
+
+/**
+ * atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative_acquire(i, v);
+}
+
+/**
+ * atomic64_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative_release(i, v);
+}
+
+/**
+ * atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative_relaxed(i, v);
+}
+
+/**
+ * atomic64_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_unless(v, a, u);
+}
+
+/**
+ * atomic64_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_unless(v, a, u);
+}
+
+/**
+ * atomic64_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_not_zero(v);
+}
+
+/**
+ * atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_unless_negative(v);
+}
+
+/**
+ * atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_unless_positive(v);
+}
+
+/**
+ * atomic64_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_if_positive(v);
+}
+
+/**
+ * atomic_long_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+atomic_long_read(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_long_read(v);
+}
+
+/**
+ * atomic_long_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_long_read_acquire(v);
+}
+
+/**
+ * atomic_long_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_set() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_long_set(v, i);
+}
+
+/**
+ * atomic_long_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_set_release() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_long_set_release(v, i);
+}
+
+/**
+ * atomic_long_add() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_add(i, v);
+}
+
+/**
+ * atomic_long_add_return() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return(i, v);
+}
+
+/**
+ * atomic_long_add_return_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return_acquire(i, v);
+}
+
+/**
+ * atomic_long_add_return_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return_release(i, v);
+}
+
+/**
+ * atomic_long_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return_relaxed(i, v);
+}
+
+/**
+ * atomic_long_fetch_add() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_relaxed(i, v);
+}
+
+/**
+ * atomic_long_sub() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_sub(i, v);
+}
+
+/**
+ * atomic_long_sub_return() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return(i, v);
+}
+
+/**
+ * atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return_acquire(i, v);
+}
+
+/**
+ * atomic_long_sub_return_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return_release(i, v);
+}
+
+/**
+ * atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return_relaxed(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub_relaxed(i, v);
+}
+
+/**
+ * atomic_long_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_inc(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_inc(v);
+}
+
+/**
+ * atomic_long_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return(v);
+}
+
+/**
+ * atomic_long_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return_acquire(v);
+}
+
+/**
+ * atomic_long_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return_release(v);
+}
+
+/**
+ * atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return_relaxed(v);
+}
+
+/**
+ * atomic_long_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc(v);
+}
+
+/**
+ * atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc_acquire(v);
+}
+
+/**
+ * atomic_long_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc_release(v);
+}
+
+/**
+ * atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc_relaxed(v);
+}
+
+/**
+ * atomic_long_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_dec(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_dec(v);
+}
+
+/**
+ * atomic_long_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return(v);
+}
+
+/**
+ * atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return_acquire(v);
+}
+
+/**
+ * atomic_long_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return_release(v);
+}
+
+/**
+ * atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return_relaxed(v);
+}
+
+/**
+ * atomic_long_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec(v);
+}
+
+/**
+ * atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec_acquire(v);
+}
+
+/**
+ * atomic_long_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec_release(v);
+}
+
+/**
+ * atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec_relaxed(v);
+}
+
+/**
+ * atomic_long_and() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_and() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_and(i, v);
+}
+
+/**
+ * atomic_long_fetch_and() - atomic bitwise AND with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and(i, v);
+}
+
+/**
+ * atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and_relaxed(i, v);
+}
+
+/**
+ * atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_andnot() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_andnot(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot_relaxed(i, v);
+}
+
+/**
+ * atomic_long_or() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_or() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_or(i, v);
+}
+
+/**
+ * atomic_long_fetch_or() - atomic bitwise OR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or(i, v);
+}
+
+/**
+ * atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or_relaxed(i, v);
+}
+
+/**
+ * atomic_long_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xor() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_xor(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor_relaxed(i, v);
+}
+
+/**
+ * atomic_long_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg(atomic_long_t *v, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg(v, new);
+}
+
+/**
+ * atomic_long_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg_acquire(v, new);
+}
+
+/**
+ * atomic_long_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg_release(atomic_long_t *v, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg_release(v, new);
+}
+
+/**
+ * atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg_relaxed(v, new);
+}
+
+/**
+ * atomic_long_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_and_test(i, v);
+}
+
+/**
+ * atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_and_test(v);
+}
+
+/**
+ * atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_and_test(v);
+}
+
+/**
+ * atomic_long_add_negative() - atomic add and test if negative with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative(i, v);
+}
+
+/**
+ * atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative_acquire(i, v);
+}
+
+/**
+ * atomic_long_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative_release(i, v);
+}
+
+/**
+ * atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative_relaxed(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_unless(v, a, u);
+}
+
+/**
+ * atomic_long_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_unless(v, a, u);
+}
+
+/**
+ * atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_not_zero(v);
+}
+
+/**
+ * atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_unless_negative(v);
+}
+
+/**
+ * atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_unless_positive(v);
+}
+
+/**
+ * atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_if_positive(v);
+}
+
+#define xchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define cmpxchg_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define sync_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define sync_try_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+// ce5b65e0f1f8a276268b667194581d24bed219d4
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
new file mode 100644
index 000000000000..3ef844b3ab8a
--- /dev/null
+++ b/include/linux/atomic/atomic-long.h
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_LONG_H
+#define _LINUX_ATOMIC_LONG_H
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
+#endif
+
+/**
+ * raw_atomic_long_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+raw_atomic_long_read(const atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_read(v);
+#else
+ return raw_atomic_read(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+raw_atomic_long_read_acquire(const atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_read_acquire(v);
+#else
+ return raw_atomic_read_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_set(atomic_long_t *v, long i)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_set(v, i);
+#else
+ raw_atomic_set(v, i);
+#endif
+}
+
+/**
+ * raw_atomic_long_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_set_release(atomic_long_t *v, long i)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_set_release(v, i);
+#else
+ raw_atomic_set_release(v, i);
+#endif
+}
+
+/**
+ * raw_atomic_long_add() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_add(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_add(i, v);
+#else
+ raw_atomic_add(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return(i, v);
+#else
+ return raw_atomic_add_return(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_acquire(i, v);
+#else
+ return raw_atomic_add_return_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_release(i, v);
+#else
+ return raw_atomic_add_return_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_relaxed(i, v);
+#else
+ return raw_atomic_add_return_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add(i, v);
+#else
+ return raw_atomic_fetch_add(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_acquire(i, v);
+#else
+ return raw_atomic_fetch_add_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_release(i, v);
+#else
+ return raw_atomic_fetch_add_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_relaxed(i, v);
+#else
+ return raw_atomic_fetch_add_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_sub(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_sub(i, v);
+#else
+ raw_atomic_sub(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return(i, v);
+#else
+ return raw_atomic_sub_return(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_acquire(i, v);
+#else
+ return raw_atomic_sub_return_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_release(i, v);
+#else
+ return raw_atomic_sub_return_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_relaxed(i, v);
+#else
+ return raw_atomic_sub_return_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub(i, v);
+#else
+ return raw_atomic_fetch_sub(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_acquire(i, v);
+#else
+ return raw_atomic_fetch_sub_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_release(i, v);
+#else
+ return raw_atomic_fetch_sub_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_relaxed(i, v);
+#else
+ return raw_atomic_fetch_sub_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_inc(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_inc(v);
+#else
+ raw_atomic_inc(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return(v);
+#else
+ return raw_atomic_inc_return(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_acquire(v);
+#else
+ return raw_atomic_inc_return_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_release(v);
+#else
+ return raw_atomic_inc_return_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_relaxed(v);
+#else
+ return raw_atomic_inc_return_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc(v);
+#else
+ return raw_atomic_fetch_inc(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_acquire(v);
+#else
+ return raw_atomic_fetch_inc_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_release(v);
+#else
+ return raw_atomic_fetch_inc_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_relaxed(v);
+#else
+ return raw_atomic_fetch_inc_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_dec(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_dec(v);
+#else
+ raw_atomic_dec(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return(v);
+#else
+ return raw_atomic_dec_return(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_acquire(v);
+#else
+ return raw_atomic_dec_return_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_release(v);
+#else
+ return raw_atomic_dec_return_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_relaxed(v);
+#else
+ return raw_atomic_dec_return_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec(v);
+#else
+ return raw_atomic_fetch_dec(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_acquire(v);
+#else
+ return raw_atomic_fetch_dec_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_release(v);
+#else
+ return raw_atomic_fetch_dec_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_relaxed(v);
+#else
+ return raw_atomic_fetch_dec_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_and() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_and(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_and(i, v);
+#else
+ raw_atomic_and(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and() - atomic bitwise AND with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and(i, v);
+#else
+ return raw_atomic_fetch_and(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_acquire(i, v);
+#else
+ return raw_atomic_fetch_and_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_release(i, v);
+#else
+ return raw_atomic_fetch_and_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_relaxed(i, v);
+#else
+ return raw_atomic_fetch_and_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_andnot(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_andnot(i, v);
+#else
+ raw_atomic_andnot(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_andnot(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_acquire(i, v);
+#else
+ return raw_atomic_fetch_andnot_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_release(i, v);
+#else
+ return raw_atomic_fetch_andnot_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_relaxed(i, v);
+#else
+ return raw_atomic_fetch_andnot_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_or() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_or(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_or(i, v);
+#else
+ raw_atomic_or(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or() - atomic bitwise OR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or(i, v);
+#else
+ return raw_atomic_fetch_or(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_acquire(i, v);
+#else
+ return raw_atomic_fetch_or_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_release(i, v);
+#else
+ return raw_atomic_fetch_or_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_relaxed(i, v);
+#else
+ return raw_atomic_fetch_or_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_xor(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_xor(i, v);
+#else
+ raw_atomic_xor(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor(i, v);
+#else
+ return raw_atomic_fetch_xor(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_acquire(i, v);
+#else
+ return raw_atomic_fetch_xor_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_release(i, v);
+#else
+ return raw_atomic_fetch_xor_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_relaxed(i, v);
+#else
+ return raw_atomic_fetch_xor_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg(v, new);
+#else
+ return raw_atomic_xchg(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg_acquire(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_acquire(v, new);
+#else
+ return raw_atomic_xchg_acquire(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg_release(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_release(v, new);
+#else
+ return raw_atomic_xchg_release(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_relaxed(v, new);
+#else
+ return raw_atomic_xchg_relaxed(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_atomic_cmpxchg(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_acquire(v, old, new);
+#else
+ return raw_atomic_cmpxchg_acquire(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_release(v, old, new);
+#else
+ return raw_atomic_cmpxchg_release(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_relaxed(v, old, new);
+#else
+ return raw_atomic_cmpxchg_relaxed(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_acquire(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_release(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_and_test(i, v);
+#else
+ return raw_atomic_sub_and_test(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_dec_and_test(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_and_test(v);
+#else
+ return raw_atomic_dec_and_test(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_inc_and_test(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_and_test(v);
+#else
+ return raw_atomic_inc_and_test(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative() - atomic add and test if negative with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative(i, v);
+#else
+ return raw_atomic_add_negative(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_acquire(i, v);
+#else
+ return raw_atomic_add_negative_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_release(i, v);
+#else
+ return raw_atomic_add_negative_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_relaxed(i, v);
+#else
+ return raw_atomic_add_negative_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_unless(v, a, u);
+#else
+ return raw_atomic_fetch_add_unless(v, a, u);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_unless(v, a, u);
+#else
+ return raw_atomic_add_unless(v, a, u);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_not_zero(v);
+#else
+ return raw_atomic_inc_not_zero(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_unless_negative(v);
+#else
+ return raw_atomic_inc_unless_negative(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_unless_positive(v);
+#else
+ return raw_atomic_dec_unless_positive(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline long
+raw_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_if_positive(v);
+#else
+ return raw_atomic_dec_if_positive(v);
+#endif
+}
+
+#endif /* _LINUX_ATOMIC_LONG_H */
+// 1c4a26fc77f345342953770ebe3c4d08e7ce2f9a
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b3d859831a31..0050ef288ab3 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -11,8 +11,10 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
+#include <linux/audit_arch.h>
#include <uapi/linux/audit.h>
#include <uapi/linux/netfilter/nf_tables.h>
+#include <uapi/linux/fanotify.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -34,6 +36,7 @@ struct mqstat;
struct audit_watch;
struct audit_tree;
struct sk_buff;
+struct kern_ipc_perm;
struct audit_krule {
u32 pflags;
@@ -115,11 +118,11 @@ enum audit_nfcfgop {
AUDIT_NFT_OP_OBJ_RESET,
AUDIT_NFT_OP_FLOWTABLE_REGISTER,
AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_RESET,
+ AUDIT_NFT_OP_RULE_RESET,
AUDIT_NFT_OP_INVALID,
};
-extern int is_audit_feature_set(int which);
-
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
extern int audit_classify_arch(int arch);
@@ -130,8 +133,6 @@ extern unsigned compat_dir_class[];
extern unsigned compat_chattr_class[];
extern unsigned compat_signal_class[];
-extern int audit_classify_compat_syscall(int abi, unsigned syscall);
-
/* audit_names->type values */
#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
#define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */
@@ -287,12 +288,13 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
/* Public API */
extern int audit_alloc(struct task_struct *task);
extern void __audit_free(struct task_struct *task);
+extern void __audit_uring_entry(u8 op);
+extern void __audit_uring_exit(int success, long code);
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
extern struct filename *__audit_reusename(const __user char *uptr);
extern void __audit_getname(struct filename *name);
-extern void __audit_getcwd(void);
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
extern void __audit_file(const struct file *);
@@ -324,6 +326,21 @@ static inline void audit_free(struct task_struct *task)
if (unlikely(task->audit_context))
__audit_free(task);
}
+static inline void audit_uring_entry(u8 op)
+{
+ /*
+ * We intentionally check audit_context() before audit_enabled as most
+ * Linux systems (as of ~2021) rely on systemd which forces audit to
+ * be enabled regardless of the user's audit configuration.
+ */
+ if (unlikely(audit_context() && audit_enabled))
+ __audit_uring_entry(op);
+}
+static inline void audit_uring_exit(int success, long code)
+{
+ if (unlikely(audit_context()))
+ __audit_uring_exit(success, code);
+}
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -351,11 +368,6 @@ static inline void audit_getname(struct filename *name)
if (unlikely(!audit_dummy_context()))
__audit_getname(name);
}
-static inline void audit_getcwd(void)
-{
- if (unlikely(audit_context()))
- __audit_getcwd();
-}
static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
unsigned int aflags) {
@@ -404,8 +416,9 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *old);
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
+extern void __audit_openat2_how(struct open_how *how);
extern void __audit_log_kern_module(char *name);
-extern void __audit_fanotify(unsigned int response);
+extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar);
extern void __audit_tk_injoffset(struct timespec64 offset);
extern void __audit_ntp_log(const struct audit_ntp_data *ad);
extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries,
@@ -500,16 +513,22 @@ static inline void audit_mmap_fd(int fd, int flags)
__audit_mmap_fd(fd, flags);
}
+static inline void audit_openat2_how(struct open_how *how)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_openat2_how(how);
+}
+
static inline void audit_log_kern_module(char *name)
{
if (!audit_dummy_context())
__audit_log_kern_module(name);
}
-static inline void audit_fanotify(unsigned int response)
+static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
{
if (!audit_dummy_context())
- __audit_fanotify(response);
+ __audit_fanotify(response, friar);
}
static inline void audit_tk_injoffset(struct timespec64 offset)
@@ -562,6 +581,10 @@ static inline int audit_alloc(struct task_struct *task)
}
static inline void audit_free(struct task_struct *task)
{ }
+static inline void audit_uring_entry(u8 op)
+{ }
+static inline void audit_uring_exit(int success, long code)
+{ }
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -584,8 +607,6 @@ static inline struct filename *audit_reusename(const __user char *name)
}
static inline void audit_getname(struct filename *name)
{ }
-static inline void audit_getcwd(void)
-{ }
static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
unsigned int aflags)
@@ -653,11 +674,14 @@ static inline void audit_log_capset(const struct cred *new,
static inline void audit_mmap_fd(int fd, int flags)
{ }
+static inline void audit_openat2_how(struct open_how *how)
+{ }
+
static inline void audit_log_kern_module(char *name)
{
}
-static inline void audit_fanotify(unsigned int response)
+static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
{ }
static inline void audit_tk_injoffset(struct timespec64 offset)
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
new file mode 100644
index 000000000000..0e34d673ef17
--- /dev/null
+++ b/include/linux/audit_arch.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* audit_arch.h -- Arch layer specific support for audit
+ *
+ * Copyright 2021 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * Author: Richard Guy Briggs <rgb@redhat.com>
+ */
+#ifndef _LINUX_AUDIT_ARCH_H_
+#define _LINUX_AUDIT_ARCH_H_
+
+enum auditsc_class_t {
+ AUDITSC_NATIVE = 0,
+ AUDITSC_COMPAT,
+ AUDITSC_OPEN,
+ AUDITSC_OPENAT,
+ AUDITSC_SOCKETCALL,
+ AUDITSC_EXECVE,
+ AUDITSC_OPENAT2,
+
+ AUDITSC_NVALS /* count */
+};
+
+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
+
+#endif
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
new file mode 100644
index 000000000000..de21d9d24a95
--- /dev/null
+++ b/include/linux/auxiliary_bus.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020 Intel Corporation
+ *
+ * Please see Documentation/driver-api/auxiliary_bus.rst for more information.
+ */
+
+#ifndef _AUXILIARY_BUS_H_
+#define _AUXILIARY_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * DOC: DEVICE_LIFESPAN
+ *
+ * The registering driver is the entity that allocates memory for the
+ * auxiliary_device and registers it on the auxiliary bus. It is important to
+ * note that, as opposed to the platform bus, the registering driver is wholly
+ * responsible for the management of the memory used for the device object.
+ *
+ * To be clear the memory for the auxiliary_device is freed in the release()
+ * callback defined by the registering driver. The registering driver should
+ * only call auxiliary_device_delete() and then auxiliary_device_uninit() when
+ * it is done with the device. The release() function is then automatically
+ * called if and when other code releases their reference to the devices.
+ *
+ * A parent object, defined in the shared header file, contains the
+ * auxiliary_device. It also contains a pointer to the shared object(s), which
+ * also is defined in the shared header. Both the parent object and the shared
+ * object(s) are allocated by the registering driver. This layout allows the
+ * auxiliary_driver's registering module to perform a container_of() call to go
+ * from the pointer to the auxiliary_device, that is passed during the call to
+ * the auxiliary_driver's probe function, up to the parent object, and then
+ * have access to the shared object(s).
+ *
+ * The memory for the shared object(s) must have a lifespan equal to, or
+ * greater than, the lifespan of the memory for the auxiliary_device. The
+ * auxiliary_driver should only consider that the shared object is valid as
+ * long as the auxiliary_device is still registered on the auxiliary bus. It
+ * is up to the registering driver to manage (e.g. free or keep available) the
+ * memory for the shared object beyond the life of the auxiliary_device.
+ *
+ * The registering driver must unregister all auxiliary devices before its own
+ * driver.remove() is completed. An easy way to ensure this is to use the
+ * devm_add_action_or_reset() call to register a function against the parent
+ * device which unregisters the auxiliary device object(s).
+ *
+ * Finally, any operations which operate on the auxiliary devices must continue
+ * to function (if only to return an error) after the registering driver
+ * unregisters the auxiliary device.
+ */
+
+/**
+ * struct auxiliary_device - auxiliary device object.
+ * @dev: Device,
+ * The release and parent fields of the device structure must be filled
+ * in
+ * @name: Match name found by the auxiliary device driver,
+ * @id: unique identitier if multiple devices of the same name are exported,
+ *
+ * An auxiliary_device represents a part of its parent device's functionality.
+ * It is given a name that, combined with the registering drivers
+ * KBUILD_MODNAME, creates a match_name that is used for driver binding, and an
+ * id that combined with the match_name provide a unique name to register with
+ * the bus subsystem. For example, a driver registering an auxiliary device is
+ * named 'foo_mod.ko' and the subdevice is named 'foo_dev'. The match name is
+ * therefore 'foo_mod.foo_dev'.
+ *
+ * Registering an auxiliary_device is a three-step process.
+ *
+ * First, a 'struct auxiliary_device' needs to be defined or allocated for each
+ * sub-device desired. The name, id, dev.release, and dev.parent fields of
+ * this structure must be filled in as follows.
+ *
+ * The 'name' field is to be given a name that is recognized by the auxiliary
+ * driver. If two auxiliary_devices with the same match_name, eg
+ * "foo_mod.foo_dev", are registered onto the bus, they must have unique id
+ * values (e.g. "x" and "y") so that the registered devices names are
+ * "foo_mod.foo_dev.x" and "foo_mod.foo_dev.y". If match_name + id are not
+ * unique, then the device_add fails and generates an error message.
+ *
+ * The auxiliary_device.dev.type.release or auxiliary_device.dev.release must
+ * be populated with a non-NULL pointer to successfully register the
+ * auxiliary_device. This release call is where resources associated with the
+ * auxiliary device must be free'ed. Because once the device is placed on the
+ * bus the parent driver can not tell what other code may have a reference to
+ * this data.
+ *
+ * The auxiliary_device.dev.parent should be set. Typically to the registering
+ * drivers device.
+ *
+ * Second, call auxiliary_device_init(), which checks several aspects of the
+ * auxiliary_device struct and performs a device_initialize(). After this step
+ * completes, any error state must have a call to auxiliary_device_uninit() in
+ * its resolution path.
+ *
+ * The third and final step in registering an auxiliary_device is to perform a
+ * call to auxiliary_device_add(), which sets the name of the device and adds
+ * the device to the bus.
+ *
+ * .. code-block:: c
+ *
+ * #define MY_DEVICE_NAME "foo_dev"
+ *
+ * ...
+ *
+ * struct auxiliary_device *my_aux_dev = my_aux_dev_alloc(xxx);
+ *
+ * // Step 1:
+ * my_aux_dev->name = MY_DEVICE_NAME;
+ * my_aux_dev->id = my_unique_id_alloc(xxx);
+ * my_aux_dev->dev.release = my_aux_dev_release;
+ * my_aux_dev->dev.parent = my_dev;
+ *
+ * // Step 2:
+ * if (auxiliary_device_init(my_aux_dev))
+ * goto fail;
+ *
+ * // Step 3:
+ * if (auxiliary_device_add(my_aux_dev)) {
+ * auxiliary_device_uninit(my_aux_dev);
+ * goto fail;
+ * }
+ *
+ * ...
+ *
+ *
+ * Unregistering an auxiliary_device is a two-step process to mirror the
+ * register process. First call auxiliary_device_delete(), then call
+ * auxiliary_device_uninit().
+ *
+ * .. code-block:: c
+ *
+ * auxiliary_device_delete(my_dev->my_aux_dev);
+ * auxiliary_device_uninit(my_dev->my_aux_dev);
+ */
+struct auxiliary_device {
+ struct device dev;
+ const char *name;
+ u32 id;
+};
+
+/**
+ * struct auxiliary_driver - Definition of an auxiliary bus driver
+ * @probe: Called when a matching device is added to the bus.
+ * @remove: Called when device is removed from the bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @name: Driver name.
+ * @driver: Core driver structure.
+ * @id_table: Table of devices this driver should match on the bus.
+ *
+ * Auxiliary drivers follow the standard driver model convention, where
+ * discovery/enumeration is handled by the core, and drivers provide probe()
+ * and remove() methods. They support power management and shutdown
+ * notifications using the standard conventions.
+ *
+ * Auxiliary drivers register themselves with the bus by calling
+ * auxiliary_driver_register(). The id_table contains the match_names of
+ * auxiliary devices that a driver can bind with.
+ *
+ * .. code-block:: c
+ *
+ * static const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * {},
+ * };
+ *
+ * MODULE_DEVICE_TABLE(auxiliary, my_auxiliary_id_table);
+ *
+ * struct auxiliary_driver my_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_drv_probe,
+ * .remove = my_drv_remove
+ * };
+ */
+struct auxiliary_driver {
+ int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
+ void (*remove)(struct auxiliary_device *auxdev);
+ void (*shutdown)(struct auxiliary_device *auxdev);
+ int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state);
+ int (*resume)(struct auxiliary_device *auxdev);
+ const char *name;
+ struct device_driver driver;
+ const struct auxiliary_device_id *id_table;
+};
+
+static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
+{
+ return dev_get_drvdata(&auxdev->dev);
+}
+
+static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
+{
+ dev_set_drvdata(&auxdev->dev, data);
+}
+
+static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev)
+{
+ return container_of(dev, struct auxiliary_device, dev);
+}
+
+static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct auxiliary_driver, driver);
+}
+
+int auxiliary_device_init(struct auxiliary_device *auxdev);
+int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname);
+#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME)
+
+static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev)
+{
+ put_device(&auxdev->dev);
+}
+
+static inline void auxiliary_device_delete(struct auxiliary_device *auxdev)
+{
+ device_del(&auxdev->dev);
+}
+
+int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner,
+ const char *modname);
+#define auxiliary_driver_register(auxdrv) \
+ __auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME)
+
+void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
+
+/**
+ * module_auxiliary_driver() - Helper macro for registering an auxiliary driver
+ * @__auxiliary_driver: auxiliary driver struct
+ *
+ * Helper macro for auxiliary drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ *
+ * .. code-block:: c
+ *
+ * module_auxiliary_driver(my_drv);
+ */
+#define module_auxiliary_driver(__auxiliary_driver) \
+ module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister)
+
+struct auxiliary_device *auxiliary_find_device(struct device *start,
+ const void *data,
+ int (*match)(struct device *dev, const void *data));
+
+#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index f68d0ec2d740..407f7005e6d6 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -4,6 +4,6 @@
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 22 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 40bad71865ea..8e177b67e82f 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -1,21 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright (c) 2013-2022, Intel Corporation. */
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/overflow.h>
+#include <uapi/linux/if_ether.h>
+
/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the drivers for all devices starting from our 40G product line
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+ * from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
@@ -29,8 +26,8 @@
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value
- * is of status_code type, defined in the shared type.h.
+ * except RESET_VF, which does not require any response. The returned value
+ * is of virtchnl_status_code type, defined here.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
@@ -122,9 +119,14 @@ enum virtchnl_ops {
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
+ /* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
@@ -136,6 +138,20 @@ enum virtchnl_ops {
VIRTCHNL_OP_DISABLE_CHANNELS = 31,
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+ /* opcode 34 - 43 are reserved */
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
+ VIRTCHNL_OP_ADD_RSS_CFG = 45,
+ VIRTCHNL_OP_DEL_RSS_CFG = 46,
+ VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
+ VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
+ VIRTCHNL_OP_ADD_VLAN_V2 = 52,
+ VIRTCHNL_OP_DEL_VLAN_V2 = 53,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+ VIRTCHNL_OP_MAX,
};
/* These macros are used to generate compilation errors if a structure/union
@@ -148,19 +164,6 @@ enum virtchnl_ops {
#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum virtchnl_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-
/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
@@ -221,7 +224,9 @@ enum virtchnl_vsi_type {
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
- enum virtchnl_vsi_type vsi_type;
+
+ /* see enum virtchnl_vsi_type */
+ s32 vsi_type;
u16 qset_handle;
u8 default_mac_addr[ETH_ALEN];
};
@@ -232,24 +237,30 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
-#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
-#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
-#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
-#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
-
-/* Define below the capability flags that are not offloads */
-#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
+#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
+#define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
+#define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
+/* used to negotiate communicating link speeds in Mbps */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
+#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
+#define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
+#define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
+#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
+
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -264,10 +275,11 @@ struct virtchnl_vf_resource {
u32 rss_key_size;
u32 rss_lut_size;
- struct virtchnl_vsi_resource vsi_res[1];
+ struct virtchnl_vsi_resource vsi_res[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
+#define virtchnl_vf_resource_LEGACY_SIZEOF 36
/* VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
@@ -290,7 +302,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
*/
/* Rx queue config info */
@@ -302,9 +320,13 @@ struct virtchnl_rxq_info {
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
- u32 pad1;
+ u8 crc_disable;
+ u8 rxdid;
+ u8 pad1[2];
u64 dma_ring_addr;
- enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+
+ /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
+ s32 rx_split_pos;
u32 pad2;
};
@@ -316,6 +338,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
@@ -329,10 +354,11 @@ struct virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
u32 pad;
- struct virtchnl_queue_pair_info qpair[1];
+ struct virtchnl_queue_pair_info qpair[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
+#define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
@@ -353,8 +379,13 @@ struct virtchnl_vf_res_request {
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
* PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
*/
struct virtchnl_vector_map {
u16 vsi_id;
@@ -369,10 +400,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
struct virtchnl_irq_map_info {
u16 num_vectors;
- struct virtchnl_vector_map vecmap[1];
+ struct virtchnl_vector_map vecmap[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
+#define virtchnl_irq_map_info_LEGACY_SIZEOF 14
/* VIRTCHNL_OP_ENABLE_QUEUES
* VIRTCHNL_OP_DISABLE_QUEUES
@@ -381,6 +413,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
*/
struct virtchnl_queue_select {
u16 vsi_id;
@@ -403,9 +438,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
* PF removes the filters and returns status.
*/
+/* VIRTCHNL_ETHER_ADDR_LEGACY
+ * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
+ * bytes. Moving forward all VF drivers should not set type to
+ * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
+ * behavior. The control plane function (i.e. PF) can use a best effort method
+ * of tracking the primary/device unicast in this case, but there is no
+ * guarantee and functionality depends on the implementation of the PF.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_PRIMARY
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
+ * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
+ * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
+ * function (i.e. PF) to accurately track and use this MAC address for
+ * displaying on the host and for VM/function reset.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_EXTRA
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
+ * unicast and/or multicast filters that are being added/deleted via
+ * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
+ */
struct virtchnl_ether_addr {
u8 addr[ETH_ALEN];
- u8 pad[2];
+ u8 type;
+#define VIRTCHNL_ETHER_ADDR_LEGACY 0
+#define VIRTCHNL_ETHER_ADDR_PRIMARY 1
+#define VIRTCHNL_ETHER_ADDR_EXTRA 2
+#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
+ u8 pad;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
@@ -413,10 +475,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
struct virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
- struct virtchnl_ether_addr list[1];
+ struct virtchnl_ether_addr list[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
+#define virtchnl_ether_addr_list_LEGACY_SIZEOF 12
/* VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
@@ -435,10 +498,357 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
struct virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
- u16 vlan_id[1];
+ u16 vlan_id[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
+#define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6
+
+/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
+ * structures and opcodes.
+ *
+ * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
+ * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
+ * by the PF concurrently. For example, if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
+ * would OR the following bits:
+ *
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * The VF would interpret this as VLAN filtering can be supported on both 0x8100
+ * and 0x88A8 VLAN ethertypes.
+ *
+ * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
+ * by the PF concurrently. For example if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
+ * offload it would OR the following bits:
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * The VF would interpret this as VLAN stripping can be supported on either
+ * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
+ * the previously set value.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
+ * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
+ * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
+ * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
+ *
+ * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
+ * VLAN filtering if the underlying PF supports it.
+ *
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
+ * certain VLAN capability can be toggled. For example if the underlying PF/CP
+ * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
+ * set this bit along with the supported ethertypes.
+ */
+enum virtchnl_vlan_support {
+ VIRTCHNL_VLAN_UNSUPPORTED = 0,
+ VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
+ VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
+ VIRTCHNL_VLAN_PRIO = BIT(24),
+ VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
+ VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
+ VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
+ VIRTCHNL_VLAN_TOGGLE = BIT(31),
+};
+
+/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * for filtering, insertion, and stripping capabilities.
+ *
+ * If only outer capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective.
+ *
+ * If only inner capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective. Functionally this is the same as if only outer capabilities are
+ * supported. The VF driver is just forced to use the inner fields when
+ * adding/deleting filters and enabling/disabling offloads (if supported).
+ *
+ * If both outer and inner capabilities are supported (for filtering, insertion,
+ * and/or stripping) then outer refers to the outer most or single VLAN and
+ * inner refers to the second VLAN, if it exists, in the packet.
+ *
+ * There is no support for tunneled VLAN offloads, so outer or inner are never
+ * referring to a tunneled packet from the VF's perspective.
+ */
+struct virtchnl_vlan_supported_caps {
+ u32 outer;
+ u32 inner;
+};
+
+/* The PF populates these fields based on the supported VLAN filtering. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
+ * the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN filtering setting if the
+ * VIRTCHNL_VLAN_TOGGLE bit is set.
+ *
+ * The ethertype(s) specified in the ethertype_init field are the ethertypes
+ * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
+ * most VLAN from the VF's perspective. If both inner and outer filtering are
+ * allowed then ethertype_init only refers to the outer most VLAN as only
+ * VLAN ethertype supported for inner VLAN filtering is
+ * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
+ * when both inner and outer filtering are allowed.
+ *
+ * The max_filters field tells the VF how many VLAN filters it's allowed to have
+ * at any one time. If it exceeds this amount and tries to add another filter,
+ * then the request will be rejected by the PF. To prevent failures, the VF
+ * should keep track of how many VLAN filters it has added and not attempt to
+ * add more than max_filters.
+ */
+struct virtchnl_vlan_filtering_caps {
+ struct virtchnl_vlan_supported_caps filtering_support;
+ u32 ethertype_init;
+ u16 max_filters;
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
+
+/* This enum is used for the virtchnl_vlan_offload_caps structure to specify
+ * if the PF supports a different ethertype for stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
+ * for stripping affect the ethertype(s) specified for insertion and visa versa
+ * as well. If the VF tries to configure VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
+ * that will be the ethertype for both stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
+ * stripping do not affect the ethertype(s) specified for insertion and visa
+ * versa.
+ */
+enum virtchnl_vlan_ethertype_match {
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
+ VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
+};
+
+/* The PF populates these fields based on the supported VLAN offloads. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN offload setting if the
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
+ *
+ * The VF driver needs to be aware of how the tags are stripped by hardware and
+ * inserted by the VF driver based on the level of offload support. The PF will
+ * populate these fields based on where the VLAN tags are expected to be
+ * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
+ * interpret these fields. See the definition of the
+ * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
+ * enumeration.
+ */
+struct virtchnl_vlan_offload_caps {
+ struct virtchnl_vlan_supported_caps stripping_support;
+ struct virtchnl_vlan_supported_caps insertion_support;
+ u32 ethertype_init;
+ u8 ethertype_match;
+ u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
+
+/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * VF sends this message to determine its VLAN capabilities.
+ *
+ * PF will mark which capabilities it supports based on hardware support and
+ * current configuration. For example, if a port VLAN is configured the PF will
+ * not allow outer VLAN filtering, stripping, or insertion to be configured so
+ * it will block these features from the VF.
+ *
+ * The VF will need to cross reference its capabilities with the PFs
+ * capabilities in the response message from the PF to determine the VLAN
+ * support.
+ */
+struct virtchnl_vlan_caps {
+ struct virtchnl_vlan_filtering_caps filtering;
+ struct virtchnl_vlan_offload_caps offloads;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
+
+struct virtchnl_vlan {
+ u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
+ u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
+ * filtering caps
+ */
+ u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
+ * filtering caps. Note that tpid here does not refer to
+ * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
+ * actual 2-byte VLAN TPID
+ */
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
+
+struct virtchnl_vlan_filter {
+ struct virtchnl_vlan inner;
+ struct virtchnl_vlan outer;
+ u8 pad[16];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
+
+/* VIRTCHNL_OP_ADD_VLAN_V2
+ * VIRTCHNL_OP_DEL_VLAN_V2
+ *
+ * VF sends these messages to add/del one or more VLAN tag filters for Rx
+ * traffic.
+ *
+ * The PF attempts to add the filters and returns status.
+ *
+ * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
+ * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
+ */
+struct virtchnl_vlan_filter_list_v2 {
+ u16 vport_id;
+ u16 num_elements;
+ u8 pad[4];
+ struct virtchnl_vlan_filter filters[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
+#define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40
+
+/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ *
+ * VF sends this message to enable or disable VLAN stripping or insertion. It
+ * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
+ * allowed and whether or not it's allowed to enable/disable the specific
+ * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.offloads fields to determine which offload
+ * messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable and/or disable 0x8100 inner
+ * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
+ * case means the outer most or single VLAN from the VF's perspective. This is
+ * because no outer offloads are supported. See the comments above the
+ * virtchnl_vlan_supported_caps structure for more details.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * In order to enable inner (again note that in this case inner is the outer
+ * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
+ * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
+ * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.inner_ethertype_setting =
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * The reason that VLAN TPID(s) are not being used for the
+ * outer_ethertype_setting and inner_ethertype_setting fields is because it's
+ * possible a device could support VLAN insertion and/or stripping offload on
+ * multiple ethertypes concurrently, so this method allows a VF to request
+ * multiple ethertypes in one message using the virtchnl_vlan_support
+ * enumeration.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
+ * VLAN insertion and stripping simultaneously. The
+ * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
+ * populated based on what the PF can support.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
+ * would populate the virthcnl_vlan_offload_structure in the following manner
+ * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * There is also the case where a PF and the underlying hardware can support
+ * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
+ * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
+ * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
+ * offloads. The ethertypes must match for stripping and insertion.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.ethertype_match =
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ *
+ * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
+ * populate the virtchnl_vlan_setting structure in the following manner and send
+ * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
+ * ethertype for VLAN insertion if it's enabled. So, for completeness, a
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ */
+struct virtchnl_vlan_setting {
+ u32 outer_ethertype_setting;
+ u32 inner_ethertype_setting;
+ u16 vport_id;
+ u8 pad[6];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
@@ -475,20 +885,20 @@ VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
struct virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
- u8 pad[1];
+ u8 key[]; /* RSS hash key, packed bytes */
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
+#define virtchnl_rss_key_LEGACY_SIZEOF 6
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
- u8 lut[1]; /* RSS lookup table */
- u8 pad[1];
+ u8 lut[]; /* RSS lookup table */
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
+#define virtchnl_rss_lut_LEGACY_SIZEOF 6
/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
* VIRTCHNL_OP_SET_RSS_HENA
@@ -503,6 +913,29 @@ struct virtchnl_rss_hena {
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_HFUNC
+ * VF sends this message to configure the RSS hash function. Only supported
+ * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation.
+ * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
+ * by the PF.
+ */
+struct virtchnl_rss_hfunc {
+ u16 vsi_id;
+ u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
+ u32 reserved;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
+
/* VIRTCHNL_OP_ENABLE_CHANNELS
* VIRTCHNL_OP_DISABLE_CHANNELS
* VF sends these messages to enable or disable channels based on
@@ -522,10 +955,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
struct virtchnl_tc_info {
u32 num_tc;
u32 pad;
- struct virtchnl_channel_info list[1];
+ struct virtchnl_channel_info list[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
+#define virtchnl_tc_info_LEGACY_SIZEOF 24
/* VIRTCHNL_ADD_CLOUD_FILTER
* VIRTCHNL_DEL_CLOUD_FILTER
@@ -559,6 +993,11 @@ enum virtchnl_action {
/* action types */
VIRTCHNL_ACTION_DROP = 0,
VIRTCHNL_ACTION_TC_REDIRECT,
+ VIRTCHNL_ACTION_PASSTHRU,
+ VIRTCHNL_ACTION_QUEUE,
+ VIRTCHNL_ACTION_Q_REGION,
+ VIRTCHNL_ACTION_MARK,
+ VIRTCHNL_ACTION_COUNT,
};
enum virtchnl_flow_type {
@@ -570,8 +1009,12 @@ enum virtchnl_flow_type {
struct virtchnl_filter {
union virtchnl_flow_spec data;
union virtchnl_flow_spec mask;
- enum virtchnl_flow_type flow_type;
- enum virtchnl_action action;
+
+ /* see enum virtchnl_flow_type */
+ s32 flow_type;
+
+ /* see enum virtchnl_action */
+ s32 action;
u32 action_meta;
u8 field_flags;
u8 pad[3];
@@ -579,6 +1022,10 @@ struct virtchnl_filter {
VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+struct virtchnl_supported_rxdids {
+ u64 supported_rxdids;
+};
+
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
@@ -595,7 +1042,8 @@ enum virtchnl_event_codes {
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
- enum virtchnl_event_codes event;
+ /* see enum virtchnl_event_codes */
+ s32 event;
union {
/* If the PF driver does not support the new speed reporting
* capabilities then use link_event else use link_event_adv to
@@ -608,6 +1056,7 @@ struct virtchnl_pf_event {
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
+ u8 pad[3];
} link_event;
struct {
/* link_speed provided in Mbps */
@@ -617,39 +1066,42 @@ struct virtchnl_pf_event {
} link_event_adv;
} event_data;
- int severity;
+ s32 severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
-/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
+/* used to specify if a ceq_idx or aeq_idx is invalid */
+#define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
+/* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
+ * VF uses this message to request PF to map RDMA vectors to RDMA queues.
+ * The request for this originates from the VF RDMA driver through
+ * a client interface between VF LAN and VF RDMA driver.
* A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
+ * there is a single AEQ per VF RDMA instance in which case
+ * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
+ * idx for ceqs There will never be a case where there will be multiple CEQs
+ * attached to a single vector.
* PF configures interrupt mapping and returns status.
*/
-struct virtchnl_iwarp_qv_info {
+struct virtchnl_rdma_qv_info {
u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
+ u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
+ u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
u8 itr_idx;
u8 pad[3];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
-struct virtchnl_iwarp_qvlist_info {
+struct virtchnl_rdma_qvlist_info {
u32 num_vectors;
- struct virtchnl_iwarp_qv_info qv_info[1];
+ struct virtchnl_rdma_qv_info qv_info[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
+#define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
@@ -668,6 +1120,316 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define PROTO_HDR_SHIFT 5
+#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
+#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
+
+/* VF use these macros to configure each protocol header.
+ * Specify which protocol headers and protocol header fields base on
+ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
+ * @param hdr: a struct of virtchnl_proto_hdr
+ * @param hdr_type: ETH/IPV4/TCP, etc
+ * @param field: SRC/DST/TEID/SPI, etc
+ */
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
+ ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
+
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+
+#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
+ ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
+#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
+ (((hdr)->type) >> PROTO_HDR_SHIFT)
+#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
+ ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
+#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
+ (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
+ VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl_proto_hdr_type {
+ VIRTCHNL_PROTO_HDR_NONE,
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_S_VLAN,
+ VIRTCHNL_PROTO_HDR_C_VLAN,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_GTPU_EH,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ VIRTCHNL_PROTO_HDR_PPPOE,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_PFCP,
+};
+
+/* Protocol header field within a protocol header. */
+enum virtchnl_proto_hdr_field {
+ /* ETHER */
+ VIRTCHNL_PROTO_HDR_ETH_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
+ VIRTCHNL_PROTO_HDR_ETH_DST,
+ VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
+ /* S-VLAN */
+ VIRTCHNL_PROTO_HDR_S_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
+ /* C-VLAN */
+ VIRTCHNL_PROTO_HDR_C_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
+ /* IPV4 */
+ VIRTCHNL_PROTO_HDR_IPV4_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
+ VIRTCHNL_PROTO_HDR_IPV4_DST,
+ VIRTCHNL_PROTO_HDR_IPV4_DSCP,
+ VIRTCHNL_PROTO_HDR_IPV4_TTL,
+ VIRTCHNL_PROTO_HDR_IPV4_PROT,
+ /* IPV6 */
+ VIRTCHNL_PROTO_HDR_IPV6_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
+ VIRTCHNL_PROTO_HDR_IPV6_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_TC,
+ VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
+ VIRTCHNL_PROTO_HDR_IPV6_PROT,
+ /* TCP */
+ VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
+ VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+ /* UDP */
+ VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
+ VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+ /* SCTP */
+ VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
+ VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+ /* GTPU_IP */
+ VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
+ /* GTPU_EH */
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
+ VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
+ /* PPPOE */
+ VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
+ /* L2TPV3 */
+ VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
+ /* ESP */
+ VIRTCHNL_PROTO_HDR_ESP_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
+ /* AH */
+ VIRTCHNL_PROTO_HDR_AH_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
+ /* PFCP */
+ VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
+ VIRTCHNL_PROTO_HDR_PFCP_SEID,
+};
+
+struct virtchnl_proto_hdr {
+ /* see enum virtchnl_proto_hdr_type */
+ s32 type;
+ u32 field_selector; /* a bit mask to select field for header type */
+ u8 buffer[64];
+ /**
+ * binary buffer in network order for specific header type.
+ * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+ * header is expected to be copied into the buffer.
+ */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
+
+struct virtchnl_proto_hdrs {
+ u8 tunnel_level;
+ u8 pad[3];
+ /**
+ * specify where protocol header start from.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * ....
+ **/
+ int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
+ struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
+
+struct virtchnl_rss_cfg {
+ struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
+
+ /* see enum virtchnl_rss_algorithm; rss algorithm type */
+ s32 rss_algorithm;
+ u8 reserved[128]; /* reserve for future */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
+
+/* action configuration for FDIR */
+struct virtchnl_filter_action {
+ /* see enum virtchnl_action type */
+ s32 type;
+ union {
+ /* used for queue and qgroup action */
+ struct {
+ u16 index;
+ u8 region;
+ } queue;
+ /* used for count action */
+ struct {
+ /* share counter ID with other flow rules */
+ u8 shared;
+ u32 id; /* counter ID */
+ } count;
+ /* used for mark action */
+ u32 mark_id;
+ u8 reserve[32];
+ } act_conf;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
+
+#define VIRTCHNL_MAX_NUM_ACTIONS 8
+
+struct virtchnl_filter_action_set {
+ /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
+ int count;
+ struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
+
+/* pattern and action for FDIR rule */
+struct virtchnl_fdir_rule {
+ struct virtchnl_proto_hdrs proto_hdrs;
+ struct virtchnl_filter_action_set action_set;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
+
+/* Status returned to VF after VF requests FDIR commands
+ * VIRTCHNL_FDIR_SUCCESS
+ * VF FDIR related request is successfully done by PF
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
+ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
+ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
+ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
+ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
+ * OP_ADD_FDIR_FILTER request is failed due to parameters validation
+ * or HW doesn't support.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
+ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
+ * for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
+ */
+enum virtchnl_fdir_prgm_status {
+ VIRTCHNL_FDIR_SUCCESS = 0,
+ VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
+ VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
+ VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+ VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
+};
+
+/* VIRTCHNL_OP_ADD_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only and rule_cfg. PF will return flow_id
+ * if the request is successfully done and return add_status to VF.
+ */
+struct virtchnl_fdir_add {
+ u16 vsi_id; /* INPUT */
+ /*
+ * 1 for validating a fdir rule, 0 for creating a fdir rule.
+ * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
+ */
+ u16 validate_only; /* INPUT */
+ u32 flow_id; /* OUTPUT */
+ struct virtchnl_fdir_rule rule_cfg; /* INPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
+
+/* VIRTCHNL_OP_DEL_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return del_status to VF.
+ */
+struct virtchnl_fdir_del {
+ u16 vsi_id; /* INPUT */
+ u16 pad;
+ u32 flow_id; /* INPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+
+#define __vss_byone(p, member, count, old) \
+ (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
+
+#define __vss_byelem(p, member, count, old) \
+ (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
+
+#define __vss_full(p, member, count, old) \
+ (struct_size(p, member, count) + (old - struct_size(p, member, 0)))
+
+#define __vss(type, func, p, member, count) \
+ struct type: func(p, member, count, type##_LEGACY_SIZEOF)
+
+#define virtchnl_struct_size(p, m, c) \
+ _Generic(*p, \
+ __vss(virtchnl_vf_resource, __vss_full, p, m, c), \
+ __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \
+ __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \
+ __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \
+ __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \
+ __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
+ __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_rss_key, __vss_byone, p, m, c), \
+ __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
+
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
@@ -682,7 +1444,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len = 0;
+ u32 valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
@@ -702,24 +1464,23 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
- valid_len += (vqc->num_queue_pairs *
- sizeof(struct
- virtchnl_queue_pair_info));
+ valid_len = virtchnl_struct_size(vqc, qpair,
+ vqc->num_queue_pairs);
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_irq_map_info);
+ valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
- valid_len += (vimi->num_vectors *
- sizeof(struct virtchnl_vector_map));
+ valid_len = virtchnl_struct_size(vimi, vecmap,
+ vimi->num_vectors);
if (vimi->num_vectors == 0)
err_msg_format = true;
}
@@ -730,23 +1491,24 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
- valid_len = sizeof(struct virtchnl_ether_addr_list);
+ valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
- valid_len += veal->num_elements *
- sizeof(struct virtchnl_ether_addr);
+ valid_len = virtchnl_struct_size(veal, list,
+ veal->num_elements);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
- valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
- valid_len += vfl->num_elements * sizeof(u16);
+ valid_len = virtchnl_struct_size(vfl, vlan_id,
+ vfl->num_elements);
if (vfl->num_elements == 0)
err_msg_format = true;
}
@@ -757,7 +1519,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
- case VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_RDMA:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
@@ -767,37 +1529,39 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
else
err_msg_format = true;
break;
- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
+ valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
- struct virtchnl_iwarp_qvlist_info *qv =
- (struct virtchnl_iwarp_qvlist_info *)msg;
- if (qv->num_vectors == 0) {
- err_msg_format = true;
- break;
- }
- valid_len += ((qv->num_vectors - 1) *
- sizeof(struct virtchnl_iwarp_qv_info));
+ struct virtchnl_rdma_qvlist_info *qv =
+ (struct virtchnl_rdma_qvlist_info *)msg;
+
+ valid_len = virtchnl_struct_size(qv, qv_info,
+ qv->num_vectors);
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- valid_len = sizeof(struct virtchnl_rss_key);
+ valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
- valid_len += vrk->key_len - 1;
+ valid_len = virtchnl_struct_size(vrk, key,
+ vrk->key_len);
}
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- valid_len = sizeof(struct virtchnl_rss_lut);
+ valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
- valid_len += vrl->lut_entries - 1;
+ valid_len = virtchnl_struct_size(vrl, lut,
+ vrl->lut_entries);
}
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ valid_len = sizeof(struct virtchnl_rss_hfunc);
+ break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case VIRTCHNL_OP_SET_RSS_HENA:
@@ -810,12 +1574,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
case VIRTCHNL_OP_ENABLE_CHANNELS:
- valid_len = sizeof(struct virtchnl_tc_info);
+ valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_tc_info *vti =
(struct virtchnl_tc_info *)msg;
- valid_len += (vti->num_tc - 1) *
- sizeof(struct virtchnl_channel_info);
+ valid_len = virtchnl_struct_size(vti, list,
+ vti->num_tc);
if (vti->num_tc == 0)
err_msg_format = true;
}
@@ -823,11 +1587,45 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_CHANNELS:
break;
case VIRTCHNL_OP_ADD_CLOUD_FILTER:
- valid_len = sizeof(struct virtchnl_filter);
- break;
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG:
+ case VIRTCHNL_OP_DEL_RSS_CFG:
+ valid_len = sizeof(struct virtchnl_rss_cfg);
+ break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_add);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_del);
+ break;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ case VIRTCHNL_OP_DEL_VLAN_V2:
+ valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+
+ valid_len = virtchnl_struct_size(vfl, filters,
+ vfl->num_elements);
+
+ if (vfl->num_elements == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ valid_len = sizeof(struct virtchnl_vlan_setting);
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index fff9367a6348..2ad261082bba 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -28,11 +28,6 @@ enum wb_state {
WB_start_all, /* nr_pages == 0 (all) work pending */
};
-enum wb_congested_state {
- WB_async_congested, /* The async (write) queue is getting full */
- WB_sync_congested, /* The sync queue is getting full */
-};
-
enum wb_stat_item {
WB_RECLAIMABLE,
WB_WRITEBACK,
@@ -103,6 +98,9 @@ struct wb_completion {
* change as blkcg is disabled and enabled higher up in the hierarchy, a wb
* is tested for blkcg after lookup and removed from index on mismatch so
* that a new wb for the combination can be created.
+ *
+ * Each bdi_writeback that is not embedded into the backing_dev_info must hold
+ * a reference to the parent backing_dev_info. See cgwb_create() for details.
*/
struct bdi_writeback {
struct backing_dev_info *bdi; /* our parent bdi */
@@ -116,10 +114,9 @@ struct bdi_writeback {
struct list_head b_dirty_time; /* time stamps are dirty */
spinlock_t list_lock; /* protects the b_* lists */
+ atomic_t writeback_inodes; /* number of inodes under writeback */
struct percpu_counter stat[NR_WB_STAT_ITEMS];
- unsigned long congested; /* WB_[a]sync_congested flags */
-
unsigned long bw_time_stamp; /* last time write bw is updated */
unsigned long dirtied_stamp;
unsigned long written_stamp; /* pages written at bw_time_stamp */
@@ -142,8 +139,7 @@ struct bdi_writeback {
spinlock_t work_lock; /* protects work_list & dwork scheduling */
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
-
- unsigned long dirty_sleep; /* last wait */
+ struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
struct list_head bdi_node; /* anchored at bdi->wb_list */
@@ -154,6 +150,8 @@ struct bdi_writeback {
struct cgroup_subsys_state *blkcg_css; /* and blkcg */
struct list_head memcg_node; /* anchored at memcg->cgwb_list */
struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
+ struct list_head b_attached; /* attached inodes, protected by list_lock */
+ struct list_head offline_node; /* anchored at offline_cgwbs */
union {
struct work_struct release_work;
@@ -179,6 +177,11 @@ struct backing_dev_info {
* any dirty wbs, which is depended upon by bdi_has_dirty().
*/
atomic_long_t tot_write_bandwidth;
+ /*
+ * Jiffies when last process was dirty throttled on this bdi. Used by
+ * blk-wbt.
+ */
+ unsigned long last_bdp_sleep;
struct bdi_writeback wb; /* the root writeback info for this bdi */
struct list_head wb_list; /* list of all wbs */
@@ -200,14 +203,6 @@ struct backing_dev_info {
#endif
};
-enum {
- BLK_RW_ASYNC = 0,
- BLK_RW_SYNC = 1,
-};
-
-void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
-void set_bdi_congested(struct backing_dev_info *bdi, int sync);
-
struct wb_lock_cookie {
bool locked;
unsigned long flags;
@@ -239,8 +234,9 @@ static inline void wb_get(struct bdi_writeback *wb)
/**
* wb_put - decrement a wb's refcount
* @wb: bdi_writeback to put
+ * @nr: number of references to put
*/
-static inline void wb_put(struct bdi_writeback *wb)
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
{
if (WARN_ON_ONCE(!wb->bdi)) {
/*
@@ -251,7 +247,16 @@ static inline void wb_put(struct bdi_writeback *wb)
}
if (wb != &wb->bdi->wb)
- percpu_ref_put(&wb->refcnt);
+ percpu_ref_put_many(&wb->refcnt, nr);
+}
+
+/**
+ * wb_put - decrement a wb's refcount
+ * @wb: bdi_writeback to put
+ */
+static inline void wb_put(struct bdi_writeback *wb)
+{
+ wb_put_many(wb, 1);
}
/**
@@ -280,6 +285,10 @@ static inline void wb_put(struct bdi_writeback *wb)
{
}
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
+{
+}
+
static inline bool wb_dying(struct bdi_writeback *wb)
{
return false;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0b06b2d26c9a..8e7af9a03b41 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -12,10 +12,8 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/writeback.h>
-#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
@@ -40,7 +38,6 @@ struct backing_dev_info *bdi_alloc(int node_id);
void wb_start_background_writeback(struct bdi_writeback *wb);
void wb_workfn(struct work_struct *work);
-void wb_wakeup_delayed(struct bdi_writeback *wb);
void wb_wait_for_completion(struct wb_completion *done);
@@ -48,7 +45,6 @@ extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
-extern struct workqueue_struct *bdi_async_bio_wq;
static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
{
@@ -64,7 +60,7 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
return atomic_long_read(&bdi->tot_write_bandwidth);
}
-static inline void __add_wb_stat(struct bdi_writeback *wb,
+static inline void wb_stat_mod(struct bdi_writeback *wb,
enum wb_stat_item item, s64 amount)
{
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
@@ -72,12 +68,12 @@ static inline void __add_wb_stat(struct bdi_writeback *wb,
static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- __add_wb_stat(wb, item, 1);
+ wb_stat_mod(wb, item, 1);
}
static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- __add_wb_stat(wb, item, -1);
+ wb_stat_mod(wb, item, -1);
}
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
@@ -104,42 +100,35 @@ static inline unsigned long wb_stat_error(void)
#endif
}
+/* BDI ratio is expressed as part per 1000000 for finer granularity. */
+#define BDI_RATIO_SCALE 10000
+
+u64 bdi_get_min_bytes(struct backing_dev_info *bdi);
+u64 bdi_get_max_bytes(struct backing_dev_info *bdi);
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
+int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio);
+int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio);
+int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes);
+int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes);
+int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit);
/*
* Flags in backing_dev_info::capability
*
- * The first three flags control whether dirty pages will contribute to the
- * VM's accounting and whether writepages() should be called for dirty pages
- * (something that would not, for example, be appropriate for ramfs)
- *
- * WARNING: these flags are closely related and should not normally be
- * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
- * three flags into a single convenience macro.
- *
- * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
- * BDI_CAP_NO_WRITEBACK: Don't write pages back
- * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
- * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
- *
- * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
- * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
- * inefficient.
+ * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
+ * should contribute to accounting
+ * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
+ * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
*/
-#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
-#define BDI_CAP_NO_WRITEBACK 0x00000002
-#define BDI_CAP_NO_ACCT_WB 0x00000004
-#define BDI_CAP_STABLE_WRITES 0x00000008
-#define BDI_CAP_STRICTLIMIT 0x00000010
-#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
-#define BDI_CAP_SYNCHRONOUS_IO 0x00000040
-
-#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
- (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
+#define BDI_CAP_WRITEBACK (1 << 0)
+#define BDI_CAP_WRITEBACK_ACCT (1 << 1)
+#define BDI_CAP_STRICTLIMIT (1 << 2)
extern struct backing_dev_info noop_backing_dev_info;
+int bdi_init(struct backing_dev_info *bdi);
+
/**
* writeback_in_progress - determine whether there is writeback in progress
* @wb: bdi_writeback of interest
@@ -152,70 +141,11 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb)
return test_bit(WB_writeback_running, &wb->state);
}
-static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
-{
- struct super_block *sb;
-
- if (!inode)
- return &noop_backing_dev_info;
-
- sb = inode->i_sb;
-#ifdef CONFIG_BLOCK
- if (sb_is_blkdev_sb(sb))
- return I_BDEV(inode)->bd_bdi;
-#endif
- return sb->s_bdi;
-}
-
-static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
-{
- return wb->congested & cong_bits;
-}
-
-long congestion_wait(int sync, long timeout);
-long wait_iff_congested(int sync, long timeout);
-
-static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
-{
- return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
-}
-
-static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
-{
- return bdi->capabilities & BDI_CAP_STABLE_WRITES;
-}
-
-static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
-{
- return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
-}
-
-static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
-{
- return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
-}
-
-static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
-{
- /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
- return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
- BDI_CAP_NO_WRITEBACK));
-}
-
-static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
-{
- return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
-}
-
-static inline bool mapping_cap_account_dirty(struct address_space *mapping)
-{
- return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
-}
+struct backing_dev_info *inode_to_bdi(struct inode *inode);
-static inline int bdi_sched_wait(void *word)
+static inline bool mapping_can_writeback(struct address_space *mapping)
{
- schedule();
- return 0;
+ return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -226,16 +156,15 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
struct cgroup_subsys_state *memcg_css,
gfp_t gfp);
void wb_memcg_offline(struct mem_cgroup *memcg);
-void wb_blkcg_offline(struct blkcg *blkcg);
-int inode_congested(struct inode *inode, int cong_bits);
+void wb_blkcg_offline(struct cgroup_subsys_state *css);
/**
* inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
* @inode: inode of interest
*
- * cgroup writeback requires support from both the bdi and filesystem.
- * Also, both memcg and iocg have to be on the default hierarchy. Test
- * whether all conditions are met.
+ * Cgroup writeback requires support from the filesystem. Also, both memcg and
+ * iocg have to be on the default hierarchy. Test whether all conditions are
+ * met.
*
* Note that the test result may change dynamically on the same inode
* depending on how memcg and iocg are configured.
@@ -246,8 +175,7 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
cgroup_subsys_on_dfl(io_cgrp_subsys) &&
- bdi_cap_account_dirty(bdi) &&
- (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
+ (bdi->capabilities & BDI_CAP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
}
@@ -310,18 +238,6 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
}
/**
- * inode_to_wb_is_valid - test whether an inode has a wb associated
- * @inode: inode of interest
- *
- * Returns %true if @inode has a wb associated. May be called without any
- * locking.
- */
-static inline bool inode_to_wb_is_valid(struct inode *inode)
-{
- return inode->i_wb;
-}
-
-/**
* inode_to_wb - determine the wb of an inode
* @inode: inode of interest
*
@@ -340,6 +256,17 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
return inode->i_wb;
}
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
+{
+ /*
+ * If wbc does not have inode attached, it means cgroup writeback was
+ * disabled when wbc started. Just use the default wb in that case.
+ */
+ return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
+}
+
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
@@ -408,16 +335,19 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
return &bdi->wb;
}
-static inline bool inode_to_wb_is_valid(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
{
- return true;
+ return &inode_to_bdi(inode)->wb;
}
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
{
- return &inode_to_bdi(inode)->wb;
+ return inode_to_wb(inode);
}
+
static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
@@ -433,54 +363,12 @@ static inline void wb_memcg_offline(struct mem_cgroup *memcg)
{
}
-static inline void wb_blkcg_offline(struct blkcg *blkcg)
+static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
{
}
-static inline int inode_congested(struct inode *inode, int cong_bits)
-{
- return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
-}
-
#endif /* CONFIG_CGROUP_WRITEBACK */
-static inline int inode_read_congested(struct inode *inode)
-{
- return inode_congested(inode, 1 << WB_sync_congested);
-}
-
-static inline int inode_write_congested(struct inode *inode)
-{
- return inode_congested(inode, 1 << WB_async_congested);
-}
-
-static inline int inode_rw_congested(struct inode *inode)
-{
- return inode_congested(inode, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
-}
-
-static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
-{
- return wb_congested(&bdi->wb, cong_bits);
-}
-
-static inline int bdi_read_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << WB_sync_congested);
-}
-
-static inline int bdi_write_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << WB_async_congested);
-}
-
-static inline int bdi_rw_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
-}
-
const char *bdi_dev_name(struct backing_dev_info *bdi);
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h
new file mode 100644
index 000000000000..3f1fe1774f1b
--- /dev/null
+++ b/include/linux/backing-file.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common helpers for stackable filesystems and backing files.
+ *
+ * Copyright (C) 2023 CTERA Networks.
+ */
+
+#ifndef _LINUX_BACKING_FILE_H
+#define _LINUX_BACKING_FILE_H
+
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/fs.h>
+
+struct backing_file_ctx {
+ const struct cred *cred;
+ struct file *user_file;
+ void (*accessed)(struct file *);
+ void (*end_write)(struct file *);
+};
+
+struct file *backing_file_open(const struct path *user_path, int flags,
+ const struct path *real_path,
+ const struct cred *cred);
+ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
+ struct backing_file_ctx *ctx);
+
+#endif /* _LINUX_BACKING_FILE_H */
diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h
index 2426276b9bd3..670f2dae692f 100644
--- a/include/linux/badblocks.h
+++ b/include/linux/badblocks.h
@@ -15,6 +15,7 @@
#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
+#define BB_END(x) (BB_OFFSET(x) + BB_LEN(x))
#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
/* Bad block numbers are stored sorted in a single page.
@@ -41,6 +42,12 @@ struct badblocks {
sector_t size; /* in sectors */
};
+struct badblocks_context {
+ sector_t start;
+ sector_t len;
+ int ack;
+};
+
int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors);
int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
@@ -63,4 +70,27 @@ static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb)
}
badblocks_exit(bb);
}
+
+static inline int badblocks_full(struct badblocks *bb)
+{
+ return (bb->count >= MAX_BADBLOCKS);
+}
+
+static inline int badblocks_empty(struct badblocks *bb)
+{
+ return (bb->count == 0);
+}
+
+static inline void set_changed(struct badblocks *bb)
+{
+ if (bb->changed != 1)
+ bb->changed = 1;
+}
+
+static inline void clear_changed(struct badblocks *bb)
+{
+ if (bb->changed != 0)
+ bb->changed = 0;
+}
+
#endif
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 338aa27e4773..5ca2d5699620 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -57,7 +57,6 @@ struct balloon_dev_info {
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
- struct inode *inode;
};
extern struct page *balloon_page_alloc(void);
@@ -75,17 +74,10 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
- balloon->inode = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
-extern const struct address_space_operations balloon_aops;
-extern bool balloon_page_isolate(struct page *page,
- isolate_mode_t mode);
-extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct address_space *mapping,
- struct page *newpage,
- struct page *page, enum migrate_mode mode);
+extern const struct movable_operations balloon_mops;
/*
* balloon_page_insert - insert a page into the balloon's page list and make
@@ -100,7 +92,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
__SetPageOffline(page);
- __SetPageMovable(page, balloon->inode->i_mapping);
+ __SetPageMovable(page, &balloon_mops);
set_page_private(page, (unsigned long)balloon);
list_add(&page->lru, &balloon->pages);
}
@@ -155,22 +147,6 @@ static inline void balloon_page_delete(struct page *page)
list_del(&page->lru);
}
-static inline bool balloon_page_isolate(struct page *page)
-{
- return false;
-}
-
-static inline void balloon_page_putback(struct page *page)
-{
- return;
-}
-
-static inline int balloon_page_migrate(struct page *newpage,
- struct page *page, enum migrate_mode mode)
-{
- return 0;
-}
-
static inline gfp_t balloon_mapping_gfp_mask(void)
{
return GFP_HIGHUSER;
diff --git a/include/linux/base64.h b/include/linux/base64.h
new file mode 100644
index 000000000000..660d4cb1ef31
--- /dev/null
+++ b/include/linux/base64.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64 encoding, lifted from fs/crypto/fname.c.
+ */
+
+#ifndef _LINUX_BASE64_H
+#define _LINUX_BASE64_H
+
+#include <linux/types.h>
+
+#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
+
+int base64_encode(const u8 *src, int len, char *dst);
+int base64_decode(const char *src, int len, u8 *dst);
+
+#endif /* _LINUX_BASE64_H */
diff --git a/include/linux/bcd.h b/include/linux/bcd.h
index 118bea36d7d4..abbc8149178e 100644
--- a/include/linux/bcd.h
+++ b/include/linux/bcd.h
@@ -14,8 +14,12 @@
const_bin2bcd(x) : \
_bin2bcd(x))
+#define bcd_is_valid(x) \
+ const_bcd_is_valid(x)
+
#define const_bcd2bin(x) (((x) & 0x0f) + ((x) >> 4) * 10)
#define const_bin2bcd(x) ((((x) / 10) << 4) + (x) % 10)
+#define const_bcd_is_valid(x) (((x) & 0x0f) < 10 && ((x) >> 4) < 10)
unsigned _bcd2bin(unsigned char val) __attribute_const__;
unsigned char _bin2bcd(unsigned val) __attribute_const__;
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index 53b31f69b74a..7615f8d7b1ed 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#ifdef CONFIG_BCM47XX_NVRAM
+int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start, size_t res_size);
int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
int bcm47xx_nvram_gpio_pin(const char *name);
@@ -20,6 +21,11 @@ static inline void bcm47xx_nvram_release_contents(char *nvram)
vfree(nvram);
};
#else
+static inline int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start,
+ size_t res_size)
+{
+ return -ENOTSUPP;
+}
static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
{
return -ENOTSUPP;
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
index b0f4424f34fc..f8254fd53e15 100644
--- a/include/linux/bcm47xx_sprom.h
+++ b/include/linux/bcm47xx_sprom.h
@@ -9,9 +9,19 @@
#include <linux/kernel.h>
#include <linux/vmalloc.h>
+struct ssb_sprom;
+
#ifdef CONFIG_BCM47XX_SPROM
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback);
int bcm47xx_sprom_register_fallbacks(void);
#else
+static inline void bcm47xx_fill_sprom(struct ssb_sprom *sprom,
+ const char *prefix,
+ bool fallback)
+{
+}
+
static inline int bcm47xx_sprom_register_fallbacks(void)
{
return -ENOTSUPP;
diff --git a/include/linux/bcm963xx_tag.h b/include/linux/bcm963xx_tag.h
index b87945cb6946..7edb809a2586 100644
--- a/include/linux/bcm963xx_tag.h
+++ b/include/linux/bcm963xx_tag.h
@@ -84,7 +84,7 @@ struct bcm_tag {
char flash_layout_ver[FLASHLAYOUTVER_LEN];
/* 196-199: kernel+rootfs CRC32 */
__u32 fskernel_crc;
- /* 200-215: Unused except on Alice Gate where is is information */
+ /* 200-215: Unused except on Alice Gate where it is information */
char information2[TAGINFO2_LEN];
/* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
__u32 image_crc;
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index d35b9206096d..0cb6638b55e5 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -3,7 +3,8 @@
#define LINUX_BCMA_DRIVER_CC_H_
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/platform_data/brcmnand.h>
+#include <linux/gpio/driver.h>
/** ChipCommon core registers. **/
#define BCMA_CC_ID 0x0000
@@ -270,6 +271,7 @@
#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000
#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000
#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010
+#define BCMA_CC_SROM_CONTROL_OTP_PRESENT 0x00000020
#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008
#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006
#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000
@@ -599,6 +601,10 @@ struct bcma_sflash {
#ifdef CONFIG_BCMA_NFLASH
struct bcma_nflash {
+ /* Must be the fist member for the brcmnand driver to
+ * de-reference that structure.
+ */
+ struct brcmnand_platform_data brcmnand_info;
bool present;
bool boot; /* This is the flash the SoC boots from */
};
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 0571701ab1c5..70f97f685bff 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -8,6 +8,7 @@
#include <uapi/linux/binfmts.h>
struct filename;
+struct coredump_params;
#define CORENAME_MAX_SIZE 128
@@ -42,9 +43,6 @@ struct linux_binprm {
* original userspace.
*/
point_of_no_return:1;
-#ifdef __alpha__
- unsigned int taso:1;
-#endif
struct file *executable; /* Executable to pass to the interpreter */
struct file *interpreter;
struct file *file;
@@ -73,16 +71,9 @@ struct linux_binprm {
#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
-/* Function parameter for binfmt->coredump */
-struct coredump_params {
- const kernel_siginfo_t *siginfo;
- struct pt_regs *regs;
- struct file *file;
- unsigned long limit;
- unsigned long mm_flags;
- loff_t written;
- loff_t pos;
-};
+/* preserve argv0 for the interpreter */
+#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3
+#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT)
/*
* This structure defines the functions that are used to load the binary formats that
@@ -93,10 +84,22 @@ struct linux_binfmt {
struct module *module;
int (*load_binary)(struct linux_binprm *);
int (*load_shlib)(struct file *);
+#ifdef CONFIG_COREDUMP
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
+#endif
} __randomize_layout;
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+struct binfmt_misc {
+ struct list_head entries;
+ rwlock_t entries_lock;
+ bool enabled;
+} __randomize_layout;
+
+extern struct binfmt_misc init_binfmt_misc;
+#endif
+
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
/* Registration of default binfmt handlers */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index c6d765382926..875d792bffff 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -5,21 +5,19 @@
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H
-#include <linux/highmem.h>
#include <linux/mempool.h>
-#include <linux/ioprio.h>
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
+#include <linux/uio.h>
-#define BIO_DEBUG
+#define BIO_MAX_VECS 256U
-#ifdef BIO_DEBUG
-#define BIO_BUG_ON BUG_ON
-#else
-#define BIO_BUG_ON
-#endif
+struct queue_limits;
-#define BIO_MAX_PAGES 256
+static inline unsigned int bio_max_segs(unsigned int nr_segs)
+{
+ return min(nr_segs, BIO_MAX_VECS);
+}
#define bio_prio(bio) (bio)->bi_ioprio
#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
@@ -38,9 +36,6 @@
#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
-#define bio_multiple_segments(bio) \
- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
-
#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
@@ -72,26 +67,9 @@ static inline bool bio_no_advance_iter(const struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
- bio_op(bio) == REQ_OP_WRITE_SAME ||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
}
-static inline bool bio_mergeable(struct bio *bio)
-{
- if (bio->bi_opf & REQ_NOMERGE_FLAGS)
- return false;
-
- return true;
-}
-
-static inline unsigned int bio_cur_bytes(struct bio *bio)
-{
- if (bio_has_data(bio))
- return bio_iovec(bio).bv_len;
- else /* dataless requests such as discard */
- return bio->bi_iter.bi_size;
-}
-
static inline void *bio_data(struct bio *bio)
{
if (bio_has_data(bio))
@@ -100,25 +78,6 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
-/**
- * bio_full - check if the bio is full
- * @bio: bio to check
- * @len: length of one segment to be added
- *
- * Return true if @bio is full and one segment with @len bytes can't be
- * added to the bio, otherwise return false
- */
-static inline bool bio_full(struct bio *bio, unsigned len)
-{
- if (bio->bi_vcnt >= bio->bi_max_vecs)
- return true;
-
- if (bio->bi_iter.bi_size > UINT_MAX - len)
- return true;
-
- return false;
-}
-
static inline bool bio_next_segment(const struct bio *bio,
struct bvec_iter_all *iter)
{
@@ -148,11 +107,46 @@ static inline void bio_advance_iter(const struct bio *bio,
/* TODO: It is reasonable to complete bio with error here. */
}
+/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
+static inline void bio_advance_iter_single(const struct bio *bio,
+ struct bvec_iter *iter,
+ unsigned int bytes)
+{
+ iter->bi_sector += bytes >> 9;
+
+ if (bio_no_advance_iter(bio))
+ iter->bi_size -= bytes;
+ else
+ bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
+}
+
+void __bio_advance(struct bio *, unsigned bytes);
+
+/**
+ * bio_advance - increment/complete a bio by some number of bytes
+ * @bio: bio to advance
+ * @nbytes: number of bytes to complete
+ *
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
+ * be updated on the last bvec as well.
+ *
+ * @bio will then represent the remaining, uncompleted portion of the io.
+ */
+static inline void bio_advance(struct bio *bio, unsigned int nbytes)
+{
+ if (nbytes == bio->bi_iter.bi_size) {
+ bio->bi_iter.bi_size = 0;
+ return;
+ }
+ __bio_advance(bio, nbytes);
+}
+
#define __bio_for_each_segment(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bio_iter_iovec((bio), (iter))), 1); \
- bio_advance_iter((bio), &(iter), (bvl).bv_len))
+ bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
#define bio_for_each_segment(bvl, bio, iter) \
__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
@@ -161,7 +155,7 @@ static inline void bio_advance_iter(const struct bio *bio,
for (iter = (start); \
(iter).bi_size && \
((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
- bio_advance_iter((bio), &(iter), (bvl).bv_len))
+ bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
/* iterate over multi-page bvec */
#define bio_for_each_bvec(bvl, bio, iter) \
@@ -173,7 +167,7 @@ static inline void bio_advance_iter(const struct bio *bio,
*/
#define bio_for_each_bvec_all(bvl, bio, i) \
for (i = 0, bvl = bio_first_bvec_all(bio); \
- i < (bio)->bi_vcnt; i++, bvl++) \
+ i < (bio)->bi_vcnt; i++, bvl++)
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
@@ -193,8 +187,6 @@ static inline unsigned bio_segments(struct bio *bio)
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
return 0;
- case REQ_OP_WRITE_SAME:
- return 1;
default:
break;
}
@@ -237,7 +229,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
static inline bool bio_flagged(struct bio *bio, unsigned int bit)
{
- return (bio->bi_flags & (1U << bit)) != 0;
+ return bio->bi_flags & (1U << bit);
}
static inline void bio_set_flag(struct bio *bio, unsigned int bit)
@@ -250,38 +242,6 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio->bi_flags &= ~(1U << bit);
}
-static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
-{
- *bv = bio_iovec(bio);
-}
-
-static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
-{
- struct bvec_iter iter = bio->bi_iter;
- int idx;
-
- if (unlikely(!bio_multiple_segments(bio))) {
- *bv = bio_iovec(bio);
- return;
- }
-
- bio_advance_iter(bio, &iter, iter.bi_size);
-
- if (!iter.bi_bvec_done)
- idx = iter.bi_idx - 1;
- else /* in the middle of bvec */
- idx = iter.bi_idx;
-
- *bv = bio->bi_io_vec[idx];
-
- /*
- * iter.bi_bvec_done records actual length of the last bvec
- * if this bio ends in the middle of one io vector
- */
- if (iter.bi_bvec_done)
- bv->bv_len = iter.bi_bvec_done;
-}
-
static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
@@ -293,18 +253,82 @@ static inline struct page *bio_first_page_all(struct bio *bio)
return bio_first_bvec_all(bio)->bv_page;
}
+static inline struct folio *bio_first_folio_all(struct bio *bio)
+{
+ return page_folio(bio_first_page_all(bio));
+}
+
static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
return &bio->bi_io_vec[bio->bi_vcnt - 1];
}
+/**
+ * struct folio_iter - State for iterating all folios in a bio.
+ * @folio: The current folio we're iterating. NULL after the last folio.
+ * @offset: The byte offset within the current folio.
+ * @length: The number of bytes in this iteration (will not cross folio
+ * boundary).
+ */
+struct folio_iter {
+ struct folio *folio;
+ size_t offset;
+ size_t length;
+ /* private: for use by the iterator */
+ struct folio *_next;
+ size_t _seg_count;
+ int _i;
+};
+
+static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ int i)
+{
+ struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
+
+ if (unlikely(i >= bio->bi_vcnt)) {
+ fi->folio = NULL;
+ return;
+ }
+
+ fi->folio = page_folio(bvec->bv_page);
+ fi->offset = bvec->bv_offset +
+ PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
+ fi->_seg_count = bvec->bv_len;
+ fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ fi->_i = i;
+}
+
+static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
+{
+ fi->_seg_count -= fi->length;
+ if (fi->_seg_count) {
+ fi->folio = fi->_next;
+ fi->offset = 0;
+ fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ } else {
+ bio_first_folio(fi, bio, fi->_i + 1);
+ }
+}
+
+/**
+ * bio_for_each_folio_all - Iterate over each folio in a bio.
+ * @fi: struct folio_iter which is updated for each folio.
+ * @bio: struct bio to iterate over.
+ */
+#define bio_for_each_folio_all(fi, bio) \
+ for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
+ BIP_INTEGRITY_USER = 1 << 5, /* Integrity payload is user address */
+ BIP_COPY_USER = 1 << 6, /* Kernel bounce buffer in use */
};
/*
@@ -315,7 +339,6 @@ struct bio_integrity_payload {
struct bvec_iter bip_iter;
- unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
@@ -361,9 +384,11 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-extern void bio_trim(struct bio *bio, int offset, int size);
+void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
+struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
+ unsigned *segs, struct bio_set *bs, unsigned max_bytes);
/**
* bio_next_split - get next @sectors from a bio, splitting if necessary
@@ -372,7 +397,7 @@ extern struct bio *bio_split(struct bio *bio, int sectors,
* @gfp: gfp mask
* @bs: bio set to allocate from
*
- * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * Return: a bio representing the next @sectors of @bio - if the bio is smaller
* than @sectors, returns the original bio unchanged.
*/
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
@@ -387,31 +412,32 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
enum {
BIOSET_NEED_BVECS = BIT(0),
BIOSET_NEED_RESCUER = BIT(1),
+ BIOSET_PERCPU_CACHE = BIT(2),
};
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
-extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
-extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
+struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
+ blk_opf_t opf, gfp_t gfp_mask,
+ struct bio_set *bs);
+struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
extern void bio_put(struct bio *);
-extern void __bio_clone_fast(struct bio *, struct bio *);
-extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
+struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
+ gfp_t gfp, struct bio_set *bs);
+int bio_init_clone(struct block_device *bdev, struct bio *bio,
+ struct bio *bio_src, gfp_t gfp);
extern struct bio_set fs_bio_set;
-static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
-}
-
-static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+static inline struct bio *bio_alloc(struct block_device *bdev,
+ unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
+ return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
}
-extern blk_qc_t submit_bio(struct bio *);
+void submit_bio(struct bio *bio);
extern void bio_endio(struct bio *);
@@ -428,72 +454,72 @@ static inline void bio_wouldblock_error(struct bio *bio)
bio_endio(bio);
}
+/*
+ * Calculate number of bvec segments that should be allocated to fit data
+ * pointed by @iter. If @iter is backed by bvec it's going to be reused
+ * instead of allocating a new one.
+ */
+static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
+{
+ if (iov_iter_is_bvec(iter))
+ return 0;
+ return iov_iter_npages(iter, max_segs);
+}
+
struct request_queue;
extern int submit_bio_wait(struct bio *bio);
-extern void bio_advance(struct bio *, unsigned);
-
-extern void bio_init(struct bio *bio, struct bio_vec *table,
- unsigned short max_vecs);
+void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
+ unsigned short max_vecs, blk_opf_t opf);
extern void bio_uninit(struct bio *);
-extern void bio_reset(struct bio *);
+void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
-extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
+int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
+ unsigned off);
+bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
+ size_t len, size_t off);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
-bool __bio_try_merge_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int off, bool *same_page);
+int bio_add_zone_append_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset);
void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
+void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
+ size_t off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
-void bio_release_pages(struct bio *bio, bool mark_dirty);
+void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
+void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);
-extern void bio_list_copy_data(struct bio *dst, struct bio *src);
extern void bio_free_pages(struct bio *bio);
-void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
-void bio_truncate(struct bio *bio, unsigned new_size);
void guard_bio_eod(struct bio *bio);
+void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
static inline void zero_fill_bio(struct bio *bio)
{
zero_fill_bio_iter(bio, bio->bi_iter);
}
-extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
-extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
-extern unsigned int bvec_nr_vecs(unsigned short idx);
-extern const char *bio_devname(struct bio *bio, char *buffer);
-
-#define bio_set_dev(bio, bdev) \
-do { \
- if ((bio)->bi_disk != (bdev)->bd_disk) \
- bio_clear_flag(bio, BIO_THROTTLED);\
- (bio)->bi_disk = (bdev)->bd_disk; \
- (bio)->bi_partno = (bdev)->bd_partno; \
- bio_associate_blkg(bio); \
-} while (0)
-
-#define bio_copy_dev(dst, src) \
-do { \
- (dst)->bi_disk = (src)->bi_disk; \
- (dst)->bi_partno = (src)->bi_partno; \
- bio_clone_blkg_association(dst, src); \
-} while (0)
+static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
+{
+ if (bio_flagged(bio, BIO_PAGE_PINNED))
+ __bio_release_pages(bio, mark_dirty);
+}
#define bio_dev(bio) \
- disk_devt((bio)->bi_disk)
+ disk_devt((bio)->bi_bdev->bd_disk)
#ifdef CONFIG_BLK_CGROUP
void bio_associate_blkg(struct bio *bio);
void bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css);
void bio_clone_blkg_association(struct bio *dst, struct bio *src);
+void blkcg_punt_bio_submit(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
static inline void bio_associate_blkg(struct bio *bio) { }
static inline void bio_associate_blkg_from_css(struct bio *bio,
@@ -501,48 +527,20 @@ static inline void bio_associate_blkg_from_css(struct bio *bio,
{ }
static inline void bio_clone_blkg_association(struct bio *dst,
struct bio *src) { }
-#endif /* CONFIG_BLK_CGROUP */
-
-#ifdef CONFIG_HIGHMEM
-/*
- * remember never ever reenable interrupts between a bvec_kmap_irq and
- * bvec_kunmap_irq!
- */
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
- unsigned long addr;
-
- /*
- * might not be a highmem page, but the preempt/irq count
- * balancing is a lot nicer this way
- */
- local_irq_save(*flags);
- addr = (unsigned long) kmap_atomic(bvec->bv_page);
-
- BUG_ON(addr & ~PAGE_MASK);
-
- return (char *) addr + bvec->bv_offset;
-}
-
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
-{
- unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
- kunmap_atomic((void *) ptr);
- local_irq_restore(*flags);
-}
-
-#else
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+static inline void blkcg_punt_bio_submit(struct bio *bio)
{
- return page_address(bvec->bv_page) + bvec->bv_offset;
+ submit_bio(bio);
}
+#endif /* CONFIG_BLK_CGROUP */
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
{
- *flags = 0;
+ bio_clear_flag(bio, BIO_REMAPPED);
+ if (bio->bi_bdev != bdev)
+ bio_clear_flag(bio, BIO_BPS_THROTTLED);
+ bio->bi_bdev = bdev;
+ bio_associate_blkg(bio);
}
-#endif
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
@@ -683,6 +681,11 @@ struct bio_set {
struct kmem_cache *bio_slab;
unsigned int front_pad;
+ /*
+ * per-cpu bio alloc cache
+ */
+ struct bio_alloc_cache __percpu *cache;
+
mempool_t bio_pool;
mempool_t bvec_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -690,6 +693,7 @@ struct bio_set {
mempool_t bvec_integrity_pool;
#endif
+ unsigned int back_pad;
/*
* Deadlock avoidance for stacking block drivers: see comments in
* bio_alloc_bioset() for details
@@ -698,12 +702,11 @@ struct bio_set {
struct bio_list rescue_list;
struct work_struct rescue_work;
struct workqueue_struct *rescue_workqueue;
-};
-struct biovec_slab {
- int nr_vecs;
- char *name;
- struct kmem_cache *slab;
+ /*
+ * Hot un-plug notifier for the per-cpu cache, if used
+ */
+ struct hlist_node cpuhp_dead;
};
static inline bool bioset_initialized(struct bio_set *bs)
@@ -711,12 +714,6 @@ static inline bool bioset_initialized(struct bio_set *bs)
return bs->bio_slab != NULL;
}
-/*
- * a small number of entries is fine, not going to be performance critical.
- * basically we just need to survive
- */
-#define BIO_SPLIT_ENTRIES 2
-
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#define bip_for_each_vec(bvl, bip, iter) \
@@ -726,6 +723,7 @@ static inline bool bioset_initialized(struct bio_set *bs)
for_each_bio(_bio) \
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
+int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_prep(struct bio *);
@@ -797,6 +795,12 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
return 0;
}
+static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
+ ssize_t len, u32 seed)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
/*
@@ -808,9 +812,17 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
*/
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
- bio->bi_opf |= REQ_HIPRI;
- if (!is_sync_kiocb(kiocb))
+ bio->bi_opf |= REQ_POLLED;
+ if (kiocb->ki_flags & IOCB_NOWAIT)
bio->bi_opf |= REQ_NOWAIT;
}
+static inline void bio_clear_polled(struct bio *bio)
+{
+ bio->bi_opf &= ~REQ_POLLED;
+}
+
+struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
+
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 4e035aca6f7e..63928f173223 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -19,6 +19,9 @@
*
* Example:
*
+ * #include <linux/bitfield.h>
+ * #include <linux/bits.h>
+ *
* #define REG_FIELD_A GENMASK(6, 0)
* #define REG_FIELD_B BIT(7)
* #define REG_FIELD_C GENMASK(15, 8)
@@ -41,15 +44,33 @@
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+#define __scalar_type_to_unsigned_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (unsigned type)0
+
+#define __unsigned_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (unsigned char)0, \
+ __scalar_type_to_unsigned_cases(char), \
+ __scalar_type_to_unsigned_cases(short), \
+ __scalar_type_to_unsigned_cases(int), \
+ __scalar_type_to_unsigned_cases(long), \
+ __scalar_type_to_unsigned_cases(long long), \
+ default: (x)))
+
+#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
+
#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
_pfx "mask is not constant"); \
BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
- ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ ~((_mask) >> __bf_shf(_mask)) & \
+ (0 + (_val)) : 0, \
_pfx "value too large for the field"); \
- BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
+ BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
+ __bf_cast_unsigned(_reg, ~0ull), \
_pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \
@@ -95,6 +116,32 @@
((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
})
+#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
+
+/**
+ * FIELD_PREP_CONST() - prepare a constant bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_CONST() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ *
+ * Unlike FIELD_PREP() this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version, and non-constant masks cannot be used.
+ */
+#define FIELD_PREP_CONST(_mask, _val) \
+ ( \
+ /* mask must be non-zero */ \
+ BUILD_BUG_ON_ZERO((_mask) == 0) + \
+ /* check if value fits */ \
+ BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \
+ /* check if mask is contiguous */ \
+ __BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) + \
+ /* and create the value */ \
+ (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) \
+ )
+
/**
* FIELD_GET() - extract a bitfield element
* @_mask: shifted mask defining the field's length and position
diff --git a/include/linux/bitmap-str.h b/include/linux/bitmap-str.h
new file mode 100644
index 000000000000..17caeca94cab
--- /dev/null
+++ b/include/linux/bitmap-str.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITMAP_STR_H
+#define __LINUX_BITMAP_STR_H
+
+int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits);
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits);
+extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits);
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits);
+int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+
+#endif /* __LINUX_BITMAP_STR_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 99058eb81042..aa4096126553 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -4,10 +4,17 @@
#ifndef __ASSEMBLY__
-#include <linux/types.h>
+#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/find.h>
+#include <linux/limits.h>
#include <linux/string.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitmap-str.h>
+
+struct device;
/*
* bitmaps provide bit arrays that consume one or more unsigned
@@ -47,20 +54,18 @@
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
+ * bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above
- * bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region
- * bitmap_next_set_region(map, &start, &end, nbits) Find next set region
- * bitmap_for_each_clear_region(map, rs, re, start, end)
- * Iterate over all clear regions
- * bitmap_for_each_set_region(map, rs, re, start, end)
- * Iterate over all set regions
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
* bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask)
+ * bitmap_scatter(dst, src, mask, nbits) *dst = map(dense, sparse)(src)
+ * bitmap_gather(dst, src, mask, nbits) *dst = map(sparse, dense)(src)
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
@@ -73,7 +78,9 @@
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
+ * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
+ * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_get_value8(map, start) Get 8bit value from map at start
* bitmap_set_value8(map, value, start) Set 8bit value to map at start
*
@@ -118,56 +125,67 @@
* Allocation and deallocation of bitmap.
* Provided in lib/bitmap.c to avoid circular dependency.
*/
-extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
-extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
-extern void bitmap_free(const unsigned long *bitmap);
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node);
+unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node);
+void bitmap_free(const unsigned long *bitmap);
+
+DEFINE_FREE(bitmap, unsigned long *, if (_T) bitmap_free(_T))
+
+/* Managed variants of the above. */
+unsigned long *devm_bitmap_alloc(struct device *dev,
+ unsigned int nbits, gfp_t flags);
+unsigned long *devm_bitmap_zalloc(struct device *dev,
+ unsigned int nbits, gfp_t flags);
/*
* lib/bitmap.c provides these functions:
*/
-extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
-extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
-extern int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern bool __pure __bitmap_or_equal(const unsigned long *src1,
- const unsigned long *src2,
- const unsigned long *src3,
- unsigned int nbits);
-extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits);
-extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits);
-extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits);
-extern void bitmap_cut(unsigned long *dst, const unsigned long *src,
- unsigned int first, unsigned int cut,
- unsigned int nbits);
-extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_replace(unsigned long *dst,
- const unsigned long *old, const unsigned long *new,
- const unsigned long *mask, unsigned int nbits);
-extern int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
-extern void __bitmap_set(unsigned long *map, unsigned int start, int len);
-extern void __bitmap_clear(unsigned long *map, unsigned int start, int len);
-
-extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask,
- unsigned long align_offset);
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __pure __bitmap_or_equal(const unsigned long *src1,
+ const unsigned long *src2,
+ const unsigned long *src3,
+ unsigned int nbits);
+void __bitmap_complement(unsigned long *dst, const unsigned long *src,
+ unsigned int nbits);
+void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+void bitmap_cut(unsigned long *dst, const unsigned long *src,
+ unsigned int first, unsigned int cut, unsigned int nbits);
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_replace(unsigned long *dst,
+ const unsigned long *old, const unsigned long *new,
+ const unsigned long *mask, unsigned int nbits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
+unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_set(unsigned long *map, unsigned int start, int len);
+void __bitmap_clear(unsigned long *map, unsigned int start, int len);
+
+unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset);
/**
* bitmap_find_next_zero_area - find a contiguous aligned zero area
@@ -192,63 +210,47 @@ bitmap_find_next_zero_area(unsigned long *map,
align_mask, 0);
}
-extern int bitmap_parse(const char *buf, unsigned int buflen,
- unsigned long *dst, int nbits);
-extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
-extern int bitmap_parselist(const char *buf, unsigned long *maskp,
- int nmaskbits);
-extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
-extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
+void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
-extern int bitmap_bitremap(int oldbit,
+int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
-extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
-extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
-extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
-extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
-extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-
-#ifdef __BIG_ENDIAN
-extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
-#else
-#define bitmap_copy_le bitmap_copy
-#endif
-extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
-extern int bitmap_print_to_pagebuf(bool list, char *buf,
- const unsigned long *maskp, int nmaskbits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
-/*
- * The static inlines below do not handle constant nbits==0 correctly,
- * so make such users (should any ever turn up) call the out-of-line
- * versions.
- */
-#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
-
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
+
+ if (small_const_nbits(nbits))
+ *dst = 0;
+ else
+ memset(dst, 0, len);
}
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0xff, len);
+
+ if (small_const_nbits(nbits))
+ *dst = ~0UL;
+ else
+ memset(dst, 0xff, len);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memcpy(dst, src, len);
+
+ if (small_const_nbits(nbits))
+ *dst = *src;
+ else
+ memcpy(dst, src, len);
}
/*
@@ -263,13 +265,17 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst,
}
/*
- * On 32-bit systems bitmaps are represented as u32 arrays internally, and
- * therefore conversion is not needed when copying data from/to arrays of u32.
+ * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead
+ * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit
+ * architectures are not using bitmap_copy_clear_tail().
*/
#if BITS_PER_LONG == 64
-extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
unsigned int nbits);
-extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
+void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
unsigned int nbits);
#else
#define bitmap_from_arr32(bitmap, buf, nbits) \
@@ -280,7 +286,21 @@ extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
(const unsigned long *) (bitmap), (nbits))
#endif
-static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+/*
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. So,
+ * the conversion is not needed when copying data from/to arrays of u64.
+ */
+#if BITS_PER_LONG == 32
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
+#else
+#define bitmap_from_arr64(bitmap, buf, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits))
+#define bitmap_to_arr64(buf, bitmap, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
+#endif
+
+static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -306,7 +326,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -330,8 +350,8 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -361,8 +381,9 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -370,8 +391,8 @@ static inline int bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline int bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_subset(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -379,7 +400,7 @@ static inline int bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
-static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -387,7 +408,7 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -395,18 +416,39 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline
+unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
+static __always_inline
+unsigned long bitmap_weight_and(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_and(src1, src2, nbits);
+}
+
+static __always_inline
+unsigned long bitmap_weight_andnot(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_andnot(src1, src2, nbits);
+}
+
static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map |= GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
@@ -421,6 +463,8 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map &= ~GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
@@ -460,12 +504,105 @@ static inline void bitmap_replace(unsigned long *dst,
__bitmap_replace(dst, old, new, mask, nbits);
}
-static inline void bitmap_next_clear_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
+/**
+ * bitmap_scatter - Scatter a bitmap according to the given mask
+ * @dst: scattered bitmap
+ * @src: gathered bitmap
+ * @mask: mask representing bits to assign to in the scattered bitmap
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Scatters bitmap with sequential bits according to the given @mask.
+ *
+ * Example:
+ * If @src bitmap = 0x005a, with @mask = 0x1313, @dst will be 0x0302.
+ *
+ * Or in binary form
+ * @src @mask @dst
+ * 0000000001011010 0001001100010011 0000001100000010
+ *
+ * (Bits 0, 1, 2, 3, 4, 5 are copied to the bits 0, 1, 4, 8, 9, 12)
+ *
+ * A more 'visual' description of the operation::
+ *
+ * src: 0000000001011010
+ * ||||||
+ * +------+|||||
+ * | +----+||||
+ * | |+----+|||
+ * | || +-+||
+ * | || | ||
+ * mask: ...v..vv...v..vv
+ * ...0..11...0..10
+ * dst: 0000001100000010
+ *
+ * A relationship exists between bitmap_scatter() and bitmap_gather().
+ * bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
+ * See bitmap_scatter() for details related to this relationship.
+ */
+static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
+{
+ unsigned int n = 0;
+ unsigned int bit;
+
+ bitmap_zero(dst, nbits);
+
+ for_each_set_bit(bit, mask, nbits)
+ __assign_bit(bit, dst, test_bit(n++, src));
+}
+
+/**
+ * bitmap_gather - Gather a bitmap according to given mask
+ * @dst: gathered bitmap
+ * @src: scattered bitmap
+ * @mask: mask representing bits to extract from in the scattered bitmap
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Gathers bitmap with sparse bits according to the given @mask.
+ *
+ * Example:
+ * If @src bitmap = 0x0302, with @mask = 0x1313, @dst will be 0x001a.
+ *
+ * Or in binary form
+ * @src @mask @dst
+ * 0000001100000010 0001001100010011 0000000000011010
+ *
+ * (Bits 0, 1, 4, 8, 9, 12 are copied to the bits 0, 1, 2, 3, 4, 5)
+ *
+ * A more 'visual' description of the operation::
+ *
+ * mask: ...v..vv...v..vv
+ * src: 0000001100000010
+ * ^ ^^ ^ 0
+ * | || | 10
+ * | || > 010
+ * | |+--> 1010
+ * | +--> 11010
+ * +----> 011010
+ * dst: 0000000000011010
+ *
+ * A relationship exists between bitmap_gather() and bitmap_scatter(). See
+ * bitmap_scatter() for the bitmap scatter detailed operations.
+ * Suppose scattered computed using bitmap_scatter(scattered, src, mask, n).
+ * The operation bitmap_gather(result, scattered, mask, n) leads to a result
+ * equal or equivalent to src.
+ *
+ * The result can be 'equivalent' because bitmap_scatter() and bitmap_gather()
+ * are not bijective.
+ * The result and src values are equivalent in that sense that a call to
+ * bitmap_scatter(res, src, mask, n) and a call to
+ * bitmap_scatter(res, result, mask, n) will lead to the same res value.
+ */
+static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
- *rs = find_next_zero_bit(bitmap, end, *rs);
- *re = find_next_bit(bitmap, end, *rs + 1);
+ unsigned int n = 0;
+ unsigned int bit;
+
+ bitmap_zero(dst, nbits);
+
+ for_each_set_bit(bit, mask, nbits)
+ __assign_bit(n++, dst, test_bit(bit, src));
}
static inline void bitmap_next_set_region(unsigned long *bitmap,
@@ -476,24 +613,65 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
-/*
- * Bitmap region iterators. Iterates over the bitmap between [@start, @end).
- * @rs and @re should be integer variables and will be set to start and end
- * index of the current clear or set region.
+/**
+ * bitmap_release_region - release allocated bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to release
+ * @order: region size (log base 2 of number of bits) to release
+ *
+ * This is the complement to __bitmap_find_free_region() and releases
+ * the found region (by clearing it in the bitmap).
*/
-#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), \
- bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, \
- bitmap_next_clear_region((bitmap), &(rs), &(re), (end)))
-
-#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), \
- bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, \
- bitmap_next_set_region((bitmap), &(rs), &(re), (end)))
+static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ bitmap_clear(bitmap, pos, BIT(order));
+}
+
+/**
+ * bitmap_allocate_region - allocate bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to allocate
+ * @order: region size (log base 2 of number of bits) to allocate
+ *
+ * Allocate (set bits in) a specified region of a bitmap.
+ *
+ * Returns: 0 on success, or %-EBUSY if specified region wasn't
+ * free (not all bits were zero).
+ */
+static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ unsigned int len = BIT(order);
+
+ if (find_next_bit(bitmap, pos + len, pos) < pos + len)
+ return -EBUSY;
+ bitmap_set(bitmap, pos, len);
+ return 0;
+}
+
+/**
+ * bitmap_find_free_region - find a contiguous aligned mem region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @bits: number of bits in the bitmap
+ * @order: region size (log base 2 of number of bits) to find
+ *
+ * Find a region of free (zero) bits in a @bitmap of @bits bits and
+ * allocate them (set them to one). Only consider regions of length
+ * a power (@order) of two, aligned to that power of two, which
+ * makes the search algorithm much faster.
+ *
+ * Returns: the bit offset in bitmap of the allocated region,
+ * or -errno on failure.
+ */
+static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+{
+ unsigned int pos, end; /* scans bitmap by regions of size order */
+
+ for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) {
+ if (!bitmap_allocate_region(bitmap, pos, order))
+ return pos;
+ }
+ return -ENOMEM;
+}
/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
@@ -540,10 +718,7 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
*/
static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
- dst[0] = mask & ULONG_MAX;
-
- if (sizeof(mask) > sizeof(unsigned long))
- dst[1] = mask >> 32;
+ bitmap_from_arr64(dst, &mask, 64);
}
/**
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 99f2ac30b1d9..2ba557e067fe 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -1,8 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
+
#include <asm/types.h>
#include <linux/bits.h>
+#include <linux/typecheck.h>
+
+#include <uapi/linux/kernel.h>
/* Set bits in the first 'n' bytes when loaded from memory */
#ifdef __LITTLE_ENDIAN
@@ -12,10 +16,10 @@
#endif
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
-#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
-#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
-#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
+#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
+#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
+#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
@@ -23,44 +27,61 @@ extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
+ * Defined here because those may be needed by architecture-specific static
+ * inlines.
+ */
+
+#include <asm-generic/bitops/generic-non-atomic.h>
+
+/*
+ * Many architecture-specific non-atomic bitops contain inline asm code and due
+ * to that the compiler can't optimize them to compile-time expressions or
+ * constants. In contrary, generic_*() helpers are defined in pure C and
+ * compilers optimize them just well.
+ * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
+ * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
+ * the arguments can be resolved at compile time. That expression itself is a
+ * constant and doesn't bring any functional changes to the rest of cases.
+ * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
+ * passing a bitmap from .bss or .data (-> `!!addr` is always true).
+ */
+#define bitop(op, nr, addr) \
+ ((__builtin_constant_p(nr) && \
+ __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
+ (uintptr_t)(addr) != (uintptr_t)NULL && \
+ __builtin_constant_p(*(const unsigned long *)(addr))) ? \
+ const##op(nr, addr) : op(nr, addr))
+
+#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
+#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
+#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
+#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
+#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
+#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
+#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
+#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
+
+/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+/* Check that the bitops prototypes are sane */
+#define __check_bitop_pr(name) \
+ static_assert(__same_type(arch_##name, generic_##name) && \
+ __same_type(const_##name, generic_##name) && \
+ __same_type(_##name, generic_##name))
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_from(bit, addr, size) \
- for ((bit) = find_next_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+__check_bitop_pr(__set_bit);
+__check_bitop_pr(__clear_bit);
+__check_bitop_pr(__change_bit);
+__check_bitop_pr(__test_and_set_bit);
+__check_bitop_pr(__test_and_clear_bit);
+__check_bitop_pr(__test_and_change_bit);
+__check_bitop_pr(test_bit);
-#define for_each_clear_bit(bit, addr, size) \
- for ((bit) = find_first_zero_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-
-/* same as for_each_clear_bit() but use bit as value to start with */
-#define for_each_clear_bit_from(bit, addr, size) \
- for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-
-/**
- * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
- * @start: bit offset to start search and to store the current iteration offset
- * @clump: location to store copy of current 8-bit clump
- * @bits: bitmap address to base the search on
- * @size: bitmap size in number of bits
- */
-#define for_each_set_clump8(start, clump, bits, size) \
- for ((start) = find_first_clump8(&(clump), (bits), (size)); \
- (start) < (size); \
- (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+#undef __check_bitop_pr
static inline int get_bitmask_order(unsigned int count)
{
@@ -188,12 +209,10 @@ static inline unsigned fls_long(unsigned long l)
static inline int get_count_order(unsigned int count)
{
- int order;
+ if (count == 0)
+ return -1;
- order = fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
+ return fls(--count);
}
/**
@@ -206,17 +225,14 @@ static inline int get_count_order_long(unsigned long l)
{
if (l == 0UL)
return -1;
- else if (l & (l - 1UL))
- return (int)fls_long(l);
- else
- return (int)fls_long(l) - 1;
+ return (int)fls_long(--l);
}
/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
- * On 64 bit arches this is a synomyn for __ffs
+ * On 64 bit arches this is a synonym for __ffs
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
@@ -232,6 +248,25 @@ static inline unsigned long __ffs64(u64 word)
}
/**
+ * fns - find N'th set bit in a word
+ * @word: The word to search
+ * @n: Bit to find
+ */
+static inline unsigned long fns(unsigned long word, unsigned int n)
+{
+ unsigned int bit;
+
+ while (word) {
+ bit = __ffs(word);
+ if (n-- == 0)
+ return bit;
+ __clear_bit(bit, &word);
+ }
+
+ return BITS_PER_LONG;
+}
+
+/**
* assign_bit - Assign value to a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
@@ -255,6 +290,55 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
__clear_bit(nr, addr);
}
+/**
+ * __ptr_set_bit - Set bit in a pointer's value
+ * @nr: the bit to set
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_set_bit(bit, &p);
+ */
+#define __ptr_set_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __set_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_clear_bit - Clear bit in a pointer's value
+ * @nr: the bit to clear
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_clear_bit(bit, &p);
+ */
+#define __ptr_clear_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __clear_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_test_bit - Test bit in a pointer's value
+ * @nr: the bit to test
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * if (__ptr_test_bit(bit, &p)) {
+ * ...
+ * } else {
+ * ...
+ * }
+ */
+#define __ptr_test_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ test_bit(nr, (unsigned long *)(addr)); \
+ })
+
#ifdef __KERNEL__
#ifndef set_mask_bits
@@ -263,10 +347,10 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old__ = READ_ONCE(*(ptr)); \
new__ = (old__ & ~mask__) | bits__; \
- } while (cmpxchg(ptr, old__, new__) != old__); \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
old__; \
})
@@ -278,27 +362,16 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old__ = READ_ONCE(*(ptr)); \
+ if (old__ & test__) \
+ break; \
new__ = old__ & ~clear__; \
- } while (!(old__ & test__) && \
- cmpxchg(ptr, old__, new__) != old__); \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
!(old__ & test__); \
})
#endif
-#ifndef find_last_bit
-/**
- * find_last_bit - find the last set bit in a memory region
- * @addr: The address to start the search at
- * @size: The number of bits to search
- *
- * Returns the bit number of the last set bit, or size.
- */
-extern unsigned long find_last_bit(const unsigned long *addr,
- unsigned long size);
-#endif
-
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 7f475d59a097..0eb24d21aac2 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -4,9 +4,9 @@
#include <linux/const.h>
#include <vdso/bits.h>
+#include <uapi/linux/bits.h>
#include <asm/bitsperlong.h>
-#define BIT_ULL(nr) (ULL(1) << (nr))
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
@@ -22,7 +22,7 @@
#include <linux/build_bug.h>
#define GENMASK_INPUT_CHECK(h, l) \
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __builtin_constant_p((l) > (h)), (l) > (h), 0)))
+ __is_constexpr((l) > (h)), (l) > (h), 0)))
#else
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
@@ -31,15 +31,8 @@
#define GENMASK_INPUT_CHECK(h, l) 0
#endif
-#define __GENMASK(h, l) \
- (((~UL(0)) - (UL(1) << (l)) + 1) & \
- (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
#define GENMASK(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-
-#define __GENMASK_ULL(h, l) \
- (((~ULL(0)) - (ULL(1) << (l)) + 1) & \
- (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
#define GENMASK_ULL(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index c8fc9792ac77..dd5841a42c33 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -14,650 +14,38 @@
* Nauman Rafique <nauman@google.com>
*/
-#include <linux/cgroup.h>
-#include <linux/percpu.h>
-#include <linux/percpu_counter.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/seq_file.h>
-#include <linux/radix-tree.h>
-#include <linux/blkdev.h>
-#include <linux/atomic.h>
-#include <linux/kthread.h>
-#include <linux/fs.h>
+#include <linux/types.h>
-/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
-#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
+struct bio;
+struct cgroup_subsys_state;
+struct gendisk;
-/* Max limits for throttle policy */
-#define THROTL_IOPS_MAX UINT_MAX
+#define FC_APPID_LEN 129
#ifdef CONFIG_BLK_CGROUP
-
-enum blkg_iostat_type {
- BLKG_IOSTAT_READ,
- BLKG_IOSTAT_WRITE,
- BLKG_IOSTAT_DISCARD,
-
- BLKG_IOSTAT_NR,
-};
-
-struct blkcg_gq;
-
-struct blkcg {
- struct cgroup_subsys_state css;
- spinlock_t lock;
- refcount_t online_pin;
-
- struct radix_tree_root blkg_tree;
- struct blkcg_gq __rcu *blkg_hint;
- struct hlist_head blkg_list;
-
- struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
-
- struct list_head all_blkcgs_node;
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct list_head cgwb_list;
-#endif
-};
-
-struct blkg_iostat {
- u64 bytes[BLKG_IOSTAT_NR];
- u64 ios[BLKG_IOSTAT_NR];
-};
-
-struct blkg_iostat_set {
- struct u64_stats_sync sync;
- struct blkg_iostat cur;
- struct blkg_iostat last;
-};
-
-/*
- * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
- * request_queue (q). This is used by blkcg policies which need to track
- * information per blkcg - q pair.
- *
- * There can be multiple active blkcg policies and each blkg:policy pair is
- * represented by a blkg_policy_data which is allocated and freed by each
- * policy's pd_alloc/free_fn() methods. A policy can allocate private data
- * area by allocating larger data structure which embeds blkg_policy_data
- * at the beginning.
- */
-struct blkg_policy_data {
- /* the blkg and policy id this per-policy data belongs to */
- struct blkcg_gq *blkg;
- int plid;
-};
-
-/*
- * Policies that need to keep per-blkcg data which is independent from any
- * request_queue associated to it should implement cpd_alloc/free_fn()
- * methods. A policy can allocate private data area by allocating larger
- * data structure which embeds blkcg_policy_data at the beginning.
- * cpd_init() is invoked to let each policy handle per-blkcg data.
- */
-struct blkcg_policy_data {
- /* the blkcg and policy id this per-policy data belongs to */
- struct blkcg *blkcg;
- int plid;
-};
-
-/* association between a blk cgroup and a request queue */
-struct blkcg_gq {
- /* Pointer to the associated request_queue */
- struct request_queue *q;
- struct list_head q_node;
- struct hlist_node blkcg_node;
- struct blkcg *blkcg;
-
- /* all non-root blkcg_gq's are guaranteed to have access to parent */
- struct blkcg_gq *parent;
-
- /* reference count */
- struct percpu_ref refcnt;
-
- /* is this blkg online? protected by both blkcg and q locks */
- bool online;
-
- struct blkg_iostat_set __percpu *iostat_cpu;
- struct blkg_iostat_set iostat;
-
- struct blkg_policy_data *pd[BLKCG_MAX_POLS];
-
- spinlock_t async_bio_lock;
- struct bio_list async_bios;
- struct work_struct async_bio_work;
-
- atomic_t use_delay;
- atomic64_t delay_nsec;
- atomic64_t delay_start;
- u64 last_delay;
- int last_use;
-
- struct rcu_head rcu_head;
-};
-
-typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
-typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
- struct request_queue *q, struct blkcg *blkcg);
-typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
-typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
- size_t size);
-
-struct blkcg_policy {
- int plid;
- /* cgroup files for the policy */
- struct cftype *dfl_cftypes;
- struct cftype *legacy_cftypes;
-
- /* operations */
- blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
- blkcg_pol_init_cpd_fn *cpd_init_fn;
- blkcg_pol_free_cpd_fn *cpd_free_fn;
- blkcg_pol_bind_cpd_fn *cpd_bind_fn;
-
- blkcg_pol_alloc_pd_fn *pd_alloc_fn;
- blkcg_pol_init_pd_fn *pd_init_fn;
- blkcg_pol_online_pd_fn *pd_online_fn;
- blkcg_pol_offline_pd_fn *pd_offline_fn;
- blkcg_pol_free_pd_fn *pd_free_fn;
- blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
- blkcg_pol_stat_pd_fn *pd_stat_fn;
-};
-
-extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
-extern bool blkcg_debug_stats;
-struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
- struct request_queue *q, bool update_hint);
-int blkcg_init_queue(struct request_queue *q);
-void blkcg_exit_queue(struct request_queue *q);
-
-/* Blkio controller policy registration */
-int blkcg_policy_register(struct blkcg_policy *pol);
-void blkcg_policy_unregister(struct blkcg_policy *pol);
-int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-
-const char *blkg_dev_name(struct blkcg_gq *blkg);
-void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
- u64 (*prfill)(struct seq_file *,
- struct blkg_policy_data *, int),
- const struct blkcg_policy *pol, int data,
- bool show_total);
-u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
-
-struct blkg_conf_ctx {
- struct gendisk *disk;
- struct blkcg_gq *blkg;
- char *body;
-};
-
-struct gendisk *blkcg_conf_get_disk(char **inputp);
-int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- char *input, struct blkg_conf_ctx *ctx);
-void blkg_conf_finish(struct blkg_conf_ctx *ctx);
-
-/**
- * blkcg_css - find the current css
- *
- * Find the css associated with either the kthread or the current task.
- * This may return a dying css, so it is up to the caller to use tryget logic
- * to confirm it is alive and well.
- */
-static inline struct cgroup_subsys_state *blkcg_css(void)
-{
- struct cgroup_subsys_state *css;
-
- css = kthread_blkcg();
- if (css)
- return css;
- return task_css(current, io_cgrp_id);
-}
-
-static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct blkcg, css) : NULL;
-}
-
-/**
- * __bio_blkcg - internal, inconsistent version to get blkcg
- *
- * DO NOT USE.
- * This function is inconsistent and consequently is dangerous to use. The
- * first part of the function returns a blkcg where a reference is owned by the
- * bio. This means it does not need to be rcu protected as it cannot go away
- * with the bio owning a reference to it. However, the latter potentially gets
- * it from task_css(). This can race against task migration and the cgroup
- * dying. It is also semantically different as it must be called rcu protected
- * and is susceptible to failure when trying to get a reference to it.
- * Therefore, it is not ok to assume that *_get() will always succeed on the
- * blkcg returned here.
- */
-static inline struct blkcg *__bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_blkg)
- return bio->bi_blkg->blkcg;
- return css_to_blkcg(blkcg_css());
-}
-
-/**
- * bio_blkcg - grab the blkcg associated with a bio
- * @bio: target bio
- *
- * This returns the blkcg associated with a bio, %NULL if not associated.
- * Callers are expected to either handle %NULL or know association has been
- * done prior to calling this.
- */
-static inline struct blkcg *bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_blkg)
- return bio->bi_blkg->blkcg;
- return NULL;
-}
-
-static inline bool blk_cgroup_congested(void)
-{
- struct cgroup_subsys_state *css;
- bool ret = false;
-
- rcu_read_lock();
- css = kthread_blkcg();
- if (!css)
- css = task_css(current, io_cgrp_id);
- while (css) {
- if (atomic_read(&css->cgroup->congestion_count)) {
- ret = true;
- break;
- }
- css = css->parent;
- }
- rcu_read_unlock();
- return ret;
-}
-
-/**
- * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
- * @return: true if this bio needs to be submitted with the root blkg context.
- *
- * In order to avoid priority inversions we sometimes need to issue a bio as if
- * it were attached to the root blkg, and then backcharge to the actual owning
- * blkg. The idea is we do bio_blkcg() to look up the actual context for the
- * bio and attach the appropriate blkg to the bio. Then we call this helper and
- * if it is true run with the root blkg for that queue and then do any
- * backcharging to the originating cgroup once the io is complete.
- */
-static inline bool bio_issue_as_root_blkg(struct bio *bio)
-{
- return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
-}
-
-/**
- * blkcg_parent - get the parent of a blkcg
- * @blkcg: blkcg of interest
- *
- * Return the parent blkcg of @blkcg. Can be called anytime.
- */
-static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
-{
- return css_to_blkcg(blkcg->css.parent);
-}
-
-/**
- * __blkg_lookup - internal version of blkg_lookup()
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- * @update_hint: whether to update lookup hint with the result or not
- *
- * This is internal version and shouldn't be used by policy
- * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
- * @q's bypass state. If @update_hint is %true, the caller should be
- * holding @q->queue_lock and lookup hint is updated on success.
- */
-static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q,
- bool update_hint)
-{
- struct blkcg_gq *blkg;
-
- if (blkcg == &blkcg_root)
- return q->root_blkg;
-
- blkg = rcu_dereference(blkcg->blkg_hint);
- if (blkg && blkg->q == q)
- return blkg;
-
- return blkg_lookup_slowpath(blkcg, q, update_hint);
-}
-
-/**
- * blkg_lookup - lookup blkg for the specified blkcg - q pair
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- *
- * Lookup blkg for the @blkcg - @q pair. This function should be called
- * under RCU read lock.
- */
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return __blkg_lookup(blkcg, q, false);
-}
-
-/**
- * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
- * @q: request_queue of interest
- *
- * Lookup blkg for @q at the root level. See also blkg_lookup().
- */
-static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
-{
- return q->root_blkg;
-}
-
-/**
- * blkg_to_pdata - get policy private data
- * @blkg: blkg of interest
- * @pol: policy of interest
- *
- * Return pointer to private data associated with the @blkg-@pol pair.
- */
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol)
-{
- return blkg ? blkg->pd[pol->plid] : NULL;
-}
-
-static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
- struct blkcg_policy *pol)
-{
- return blkcg ? blkcg->cpd[pol->plid] : NULL;
-}
-
-/**
- * pdata_to_blkg - get blkg associated with policy private data
- * @pd: policy private data of interest
- *
- * @pd is policy private data. Determine the blkg it's associated with.
- */
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
-{
- return pd ? pd->blkg : NULL;
-}
-
-static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
-{
- return cpd ? cpd->blkcg : NULL;
-}
-
-extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
-
-/**
- * blkcg_pin_online - pin online state
- * @blkcg: blkcg of interest
- *
- * While pinned, a blkcg is kept online. This is primarily used to
- * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
- * while an associated cgwb is still active.
- */
-static inline void blkcg_pin_online(struct blkcg *blkcg)
-{
- refcount_inc(&blkcg->online_pin);
-}
-
-/**
- * blkcg_unpin_online - unpin online state
- * @blkcg: blkcg of interest
- *
- * This is primarily used to impedance-match blkg and cgwb lifetimes so
- * that blkg doesn't go offline while an associated cgwb is still active.
- * When this count goes to zero, all active cgwbs have finished so the
- * blkcg can continue destruction by calling blkcg_destroy_blkgs().
- */
-static inline void blkcg_unpin_online(struct blkcg *blkcg)
-{
- do {
- if (!refcount_dec_and_test(&blkcg->online_pin))
- break;
- blkcg_destroy_blkgs(blkcg);
- blkcg = blkcg_parent(blkcg);
- } while (blkcg);
-}
-
-/**
- * blkg_path - format cgroup path of blkg
- * @blkg: blkg of interest
- * @buf: target buffer
- * @buflen: target buffer length
- *
- * Format the path of the cgroup of @blkg into @buf.
- */
-static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
-{
- return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
-}
-
-/**
- * blkg_get - get a blkg reference
- * @blkg: blkg to get
- *
- * The caller should be holding an existing reference.
- */
-static inline void blkg_get(struct blkcg_gq *blkg)
-{
- percpu_ref_get(&blkg->refcnt);
-}
-
-/**
- * blkg_tryget - try and get a blkg reference
- * @blkg: blkg to get
- *
- * This is for use when doing an RCU lookup of the blkg. We may be in the midst
- * of freeing this blkg, so we can only use it if the refcnt is not zero.
- */
-static inline bool blkg_tryget(struct blkcg_gq *blkg)
-{
- return blkg && percpu_ref_tryget(&blkg->refcnt);
-}
-
-/**
- * blkg_put - put a blkg reference
- * @blkg: blkg to put
- */
-static inline void blkg_put(struct blkcg_gq *blkg)
-{
- percpu_ref_put(&blkg->refcnt);
-}
-
-/**
- * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
- * read locked. If called under either blkcg or queue lock, the iteration
- * is guaranteed to include all and only online blkgs. The caller may
- * update @pos_css by calling css_rightmost_descendant() to skip subtree.
- * @p_blkg is included in the iteration and the first node to be visited.
- */
-#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-/**
- * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Similar to blkg_for_each_descendant_pre() but performs post-order
- * traversal instead. Synchronization rules are the same. @p_blkg is
- * included in the iteration and the last node to be visited.
- */
-#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-bool __blkcg_punt_bio_submit(struct bio *bio);
-
-static inline bool blkcg_punt_bio_submit(struct bio *bio)
-{
- if (bio->bi_opf & REQ_CGROUP_PUNT)
- return __blkcg_punt_bio_submit(bio);
- else
- return false;
-}
-
-static inline void blkcg_bio_issue_init(struct bio *bio)
-{
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-}
-
-static inline void blkcg_use_delay(struct blkcg_gq *blkg)
-{
- if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
- return;
- if (atomic_add_return(1, &blkg->use_delay) == 1)
- atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
-}
-
-static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
-{
- int old = atomic_read(&blkg->use_delay);
-
- if (WARN_ON_ONCE(old < 0))
- return 0;
- if (old == 0)
- return 0;
-
- /*
- * We do this song and dance because we can race with somebody else
- * adding or removing delay. If we just did an atomic_dec we'd end up
- * negative and we'd already be in trouble. We need to subtract 1 and
- * then check to see if we were the last delay so we can drop the
- * congestion count on the cgroup.
- */
- while (old) {
- int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
- if (cur == old)
- break;
- old = cur;
- }
-
- if (old == 0)
- return 0;
- if (old == 1)
- atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
- return 1;
-}
-
-/**
- * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
- * @blkg: target blkg
- * @delay: delay duration in nsecs
- *
- * When enabled with this function, the delay is not decayed and must be
- * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
- * blkcg_[un]use_delay() and blkcg_add_delay() usages.
- */
-static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
-{
- int old = atomic_read(&blkg->use_delay);
-
- /* We only want 1 person setting the congestion count for this blkg. */
- if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
- atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
-
- atomic64_set(&blkg->delay_nsec, delay);
-}
-
-/**
- * blkcg_clear_delay - Disable allocator delay mechanism
- * @blkg: target blkg
- *
- * Disable use_delay mechanism. See blkcg_set_delay().
- */
-static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
-{
- int old = atomic_read(&blkg->use_delay);
-
- /* We only want 1 person clearing the congestion count for this blkg. */
- if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
- atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
-}
-
-void blk_cgroup_bio_start(struct bio *bio);
-void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
-void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
+void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
-#else /* CONFIG_BLK_CGROUP */
-
-struct blkcg {
-};
-
-struct blkg_policy_data {
-};
+bool blk_cgroup_congested(void);
+void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
+void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css);
+struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
-struct blkcg_policy_data {
-};
-
-struct blkcg_gq {
-};
-
-struct blkcg_policy {
-};
+#else /* CONFIG_BLK_CGROUP */
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
static inline void blkcg_maybe_throttle_current(void) { }
static inline bool blk_cgroup_congested(void) { return false; }
+static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
+{
+ return NULL;
+}
+#endif /* CONFIG_BLK_CGROUP */
-#ifdef CONFIG_BLOCK
-
-static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
-
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
-static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
-{ return NULL; }
-static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
-static inline void blkcg_exit_queue(struct request_queue *q) { }
-static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
-static inline int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { }
-
-static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
-static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
-
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol) { return NULL; }
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
-static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
-static inline void blkg_get(struct blkcg_gq *blkg) { }
-static inline void blkg_put(struct blkcg_gq *blkg) { }
-
-static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
-static inline void blkcg_bio_issue_init(struct bio *bio) { }
-static inline void blk_cgroup_bio_start(struct bio *bio) { }
-
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len);
+char *blkcg_get_fc_appid(struct bio *bio);
-#endif /* CONFIG_BLOCK */
-#endif /* CONFIG_BLK_CGROUP */
#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
new file mode 100644
index 000000000000..90ab33cb5d0e
--- /dev/null
+++ b/include/linux/blk-crypto-profile.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_BLK_CRYPTO_PROFILE_H
+#define __LINUX_BLK_CRYPTO_PROFILE_H
+
+#include <linux/bio.h>
+#include <linux/blk-crypto.h>
+
+struct blk_crypto_profile;
+
+/**
+ * struct blk_crypto_ll_ops - functions to control inline encryption hardware
+ *
+ * Low-level operations for controlling inline encryption hardware. This
+ * interface must be implemented by storage drivers that support inline
+ * encryption. All functions may sleep, are serialized by profile->lock, and
+ * are never called while profile->dev (if set) is runtime-suspended.
+ */
+struct blk_crypto_ll_ops {
+
+ /**
+ * @keyslot_program: Program a key into the inline encryption hardware.
+ *
+ * Program @key into the specified @slot in the inline encryption
+ * hardware, overwriting any key that the keyslot may already contain.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * This is required if the device has keyslots. Otherwise (i.e. if the
+ * device is a layered device, or if the device is real hardware that
+ * simply doesn't have the concept of keyslots) it is never called.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_program)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+
+ /**
+ * @keyslot_evict: Evict a key from the inline encryption hardware.
+ *
+ * If the device has keyslots, this function must evict the key from the
+ * specified @slot. The slot will contain @key, but there should be no
+ * need for the @key argument to be used as @slot should be sufficient.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * If the device doesn't have keyslots itself, this function must evict
+ * @key from any underlying devices. @slot won't be valid in this case.
+ *
+ * If there are no keyslots and no underlying devices, this function
+ * isn't required.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_evict)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+};
+
+/**
+ * struct blk_crypto_profile - inline encryption profile for a device
+ *
+ * This struct contains a storage device's inline encryption capabilities (e.g.
+ * the supported crypto algorithms), driver-provided functions to control the
+ * inline encryption hardware (e.g. programming and evicting keys), and optional
+ * device-independent keyslot management data.
+ */
+struct blk_crypto_profile {
+
+ /* public: Drivers must initialize the following fields. */
+
+ /**
+ * @ll_ops: Driver-provided functions to control the inline encryption
+ * hardware, e.g. program and evict keys.
+ */
+ struct blk_crypto_ll_ops ll_ops;
+
+ /**
+ * @max_dun_bytes_supported: The maximum number of bytes supported for
+ * specifying the data unit number (DUN). Specifically, the range of
+ * supported DUNs is 0 through (1 << (8 * max_dun_bytes_supported)) - 1.
+ */
+ unsigned int max_dun_bytes_supported;
+
+ /**
+ * @modes_supported: Array of bitmasks that specifies whether each
+ * combination of crypto mode and data unit size is supported.
+ * Specifically, the i'th bit of modes_supported[crypto_mode] is set if
+ * crypto_mode can be used with a data unit size of (1 << i). Note that
+ * only data unit sizes that are powers of 2 can be supported.
+ */
+ unsigned int modes_supported[BLK_ENCRYPTION_MODE_MAX];
+
+ /**
+ * @dev: An optional device for runtime power management. If the driver
+ * provides this device, it will be runtime-resumed before any function
+ * in @ll_ops is called and will remain resumed during the call.
+ */
+ struct device *dev;
+
+ /* private: The following fields shouldn't be accessed by drivers. */
+
+ /* Number of keyslots, or 0 if not applicable */
+ unsigned int num_slots;
+
+ /*
+ * Serializes all calls to functions in @ll_ops as well as all changes
+ * to @slot_hashtable. This can also be taken in read mode to look up
+ * keyslots while ensuring that they can't be changed concurrently.
+ */
+ struct rw_semaphore lock;
+ struct lock_class_key lockdep_key;
+
+ /* List of idle slots, with least recently used slot at front */
+ wait_queue_head_t idle_slots_wait_queue;
+ struct list_head idle_slots;
+ spinlock_t idle_slots_lock;
+
+ /*
+ * Hash table which maps struct *blk_crypto_key to keyslots, so that we
+ * can find a key's keyslot in O(1) time rather than O(num_slots).
+ * Protected by 'lock'.
+ */
+ struct hlist_head *slot_hashtable;
+ unsigned int log_slot_ht_size;
+
+ /* Per-keyslot data */
+ struct blk_crypto_keyslot *slots;
+};
+
+int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+int devm_blk_crypto_profile_init(struct device *dev,
+ struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot);
+
+void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);
+
+void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
+
+void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
+ const struct blk_crypto_profile *child);
+
+bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
+ const struct blk_crypto_profile *reference);
+
+void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
+ const struct blk_crypto_profile *src);
+
+#endif /* __LINUX_BLK_CRYPTO_PROFILE_H */
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index e82342907f2b..5e5822c18ee4 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -13,6 +13,7 @@ enum blk_crypto_mode_num {
BLK_ENCRYPTION_MODE_AES_256_XTS,
BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
BLK_ENCRYPTION_MODE_ADIANTUM,
+ BLK_ENCRYPTION_MODE_SM4_XTS,
BLK_ENCRYPTION_MODE_MAX,
};
@@ -71,9 +72,6 @@ struct bio_crypt_ctx {
#include <linux/blk_types.h>
#include <linux/blkdev.h>
-struct request;
-struct request_queue;
-
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
static inline bool bio_has_crypt_ctx(struct bio *bio)
@@ -94,13 +92,15 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
unsigned int dun_bytes,
unsigned int data_unit_size);
-int blk_crypto_start_using_key(const struct blk_crypto_key *key,
- struct request_queue *q);
+int blk_crypto_start_using_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);
-int blk_crypto_evict_key(struct request_queue *q,
- const struct blk_crypto_key *key);
+void blk_crypto_evict_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);
-bool blk_crypto_config_supported(struct request_queue *q,
+bool blk_crypto_config_supported_natively(struct block_device *bdev,
+ const struct blk_crypto_config *cfg);
+bool blk_crypto_config_supported(struct block_device *bdev,
const struct blk_crypto_config *cfg);
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
@@ -112,12 +112,24 @@ static inline bool bio_has_crypt_ctx(struct bio *bio)
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
-void __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
-static inline void bio_crypt_clone(struct bio *dst, struct bio *src,
- gfp_t gfp_mask)
+int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
+/**
+ * bio_crypt_clone - clone bio encryption context
+ * @dst: destination bio
+ * @src: source bio
+ * @gfp_mask: memory allocation flags
+ *
+ * If @src has an encryption context, clone it to @dst.
+ *
+ * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
+ * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
+ */
+static inline int bio_crypt_clone(struct bio *dst, struct bio *src,
+ gfp_t gfp_mask)
{
if (bio_has_crypt_ctx(src))
- __bio_crypt_clone(dst, src, gfp_mask);
+ return __bio_crypt_clone(dst, src, gfp_mask);
+ return 0;
}
#endif /* __LINUX_BLK_CRYPTO_H */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
new file mode 100644
index 000000000000..e253e7bd0d17
--- /dev/null
+++ b/include/linux/blk-integrity.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BLK_INTEGRITY_H
+#define _LINUX_BLK_INTEGRITY_H
+
+#include <linux/blk-mq.h>
+
+struct request;
+
+enum blk_integrity_flags {
+ BLK_INTEGRITY_VERIFY = 1 << 0,
+ BLK_INTEGRITY_GENERATE = 1 << 1,
+ BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
+ BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
+};
+
+struct blk_integrity_iter {
+ void *prot_buf;
+ void *data_buf;
+ sector_t seed;
+ unsigned int data_size;
+ unsigned short interval;
+ unsigned char tuple_size;
+ unsigned char pi_offset;
+ const char *disk_name;
+};
+
+typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
+typedef void (integrity_prepare_fn) (struct request *);
+typedef void (integrity_complete_fn) (struct request *, unsigned int);
+
+struct blk_integrity_profile {
+ integrity_processing_fn *generate_fn;
+ integrity_processing_fn *verify_fn;
+ integrity_prepare_fn *prepare_fn;
+ integrity_complete_fn *complete_fn;
+ const char *name;
+};
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+void blk_integrity_register(struct gendisk *, struct blk_integrity *);
+void blk_integrity_unregister(struct gendisk *);
+int blk_integrity_compare(struct gendisk *, struct gendisk *);
+int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
+ struct scatterlist *);
+int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ struct blk_integrity *bi = &disk->queue->integrity;
+
+ if (!bi->profile)
+ return NULL;
+
+ return bi;
+}
+
+static inline struct blk_integrity *
+bdev_get_integrity(struct block_device *bdev)
+{
+ return blk_get_integrity(bdev->bd_disk);
+}
+
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return q->integrity.profile;
+}
+
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+ q->limits.max_integrity_segments = segs;
+}
+
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return q->limits.max_integrity_segments;
+}
+
+/**
+ * bio_integrity_intervals - Return number of integrity intervals for a bio
+ * @bi: blk_integrity profile for device
+ * @sectors: Size of the bio in 512-byte sectors
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the data integrity
+ * interval size of the storage device. Convert the block layer sectors
+ * to the appropriate number of integrity intervals.
+ */
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return sectors >> (bi->interval_exp - 9);
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+}
+
+static inline bool blk_integrity_rq(struct request *rq)
+{
+ return rq->cmd_flags & REQ_INTEGRITY;
+}
+
+/*
+ * Return the first bvec that contains integrity data. Only drivers that are
+ * limited to a single integrity segment should use this helper.
+ */
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
+ return NULL;
+ return rq->bio->bi_integrity->bip_vec;
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+ struct bio *b)
+{
+ return 0;
+}
+static inline int blk_rq_map_integrity_sg(struct request_queue *q,
+ struct bio *b,
+ struct scatterlist *s)
+{
+ return 0;
+}
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
+{
+ return NULL;
+}
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ return NULL;
+}
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return false;
+}
+static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
+{
+ return 0;
+}
+static inline void blk_integrity_register(struct gendisk *d,
+ struct blk_integrity *b)
+{
+}
+static inline void blk_integrity_unregister(struct gendisk *d)
+{
+}
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+}
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+static inline int blk_integrity_rq(struct request *rq)
+{
+ return 0;
+}
+
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ return NULL;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+#endif /* _LINUX_BLK_INTEGRITY_H */
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 0b1f45c62623..ca544e1d3508 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -5,7 +5,7 @@
struct blk_mq_queue_map;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
- int offset);
+void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
+ int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
deleted file mode 100644
index 5cc5f0f36218..000000000000
--- a/include/linux/blk-mq-rdma.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BLK_MQ_RDMA_H
-#define _LINUX_BLK_MQ_RDMA_H
-
-struct blk_mq_tag_set;
-struct ib_device;
-
-int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
- struct ib_device *dev, int first_vec);
-
-#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
index 687ae287e1dc..13226e9b22dd 100644
--- a/include/linux/blk-mq-virtio.h
+++ b/include/linux/blk-mq-virtio.h
@@ -5,7 +5,7 @@
struct blk_mq_queue_map;
struct virtio_device;
-int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
+void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec);
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9d2d5ad367a4..d3d8fd8e229b 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -4,11 +4,283 @@
#include <linux/blkdev.h>
#include <linux/sbitmap.h>
+#include <linux/lockdep.h>
+#include <linux/scatterlist.h>
+#include <linux/prefetch.h>
#include <linux/srcu.h>
+#include <linux/rw_hint.h>
struct blk_mq_tags;
struct blk_flush_queue;
+#define BLKDEV_MIN_RQ 4
+#define BLKDEV_DEFAULT_RQ 128
+
+enum rq_end_io_ret {
+ RQ_END_IO_NONE,
+ RQ_END_IO_FREE,
+};
+
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+
+/*
+ * request flags */
+typedef __u32 __bitwise req_flags_t;
+
+/* drive already may have started this one */
+#define RQF_STARTED ((__force req_flags_t)(1 << 1))
+/* request for flush sequence */
+#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
+/* merge of different types, fail separately */
+#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
+/* don't call prep for this one */
+#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
+/* use hctx->sched_tags */
+#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8))
+/* use an I/O scheduler for this request */
+#define RQF_USE_SCHED ((__force req_flags_t)(1 << 9))
+/* vaguely specified driver internal error. Ignored by the block layer */
+#define RQF_FAILED ((__force req_flags_t)(1 << 10))
+/* don't warn about errors */
+#define RQF_QUIET ((__force req_flags_t)(1 << 11))
+/* account into disk and partition IO statistics */
+#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
+/* runtime pm request */
+#define RQF_PM ((__force req_flags_t)(1 << 15))
+/* on IO scheduler merge hash */
+#define RQF_HASHED ((__force req_flags_t)(1 << 16))
+/* track IO completion time */
+#define RQF_STATS ((__force req_flags_t)(1 << 17))
+/* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+/* The per-zone write lock is held for this request */
+#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* ->timeout has been called, don't expire again */
+#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
+#define RQF_RESV ((__force req_flags_t)(1 << 23))
+
+/* flags that prevent us from merging requests: */
+#define RQF_NOMERGE_FLAGS \
+ (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+};
+
+/*
+ * Try to put the fields that are referenced together in the same cacheline.
+ *
+ * If you modify this structure, make sure to update blk_rq_init() and
+ * especially blk_mq_rq_ctx_init() to take care of the added fields.
+ */
+struct request {
+ struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
+
+ blk_opf_t cmd_flags; /* op and common flags */
+ req_flags_t rq_flags;
+
+ int tag;
+ int internal_tag;
+
+ unsigned int timeout;
+
+ /* the following two fields are internal, NEVER access directly */
+ unsigned int __data_len; /* total data len */
+ sector_t __sector; /* sector cursor */
+
+ struct bio *bio;
+ struct bio *biotail;
+
+ union {
+ struct list_head queuelist;
+ struct request *rq_next;
+ };
+
+ struct block_device *part;
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+ /* Time that the first bio started allocating this request. */
+ u64 alloc_time_ns;
+#endif
+ /* Time that this request was allocated for this IO. */
+ u64 start_time_ns;
+ /* Time that I/O was submitted to the device. */
+ u64 io_start_time_ns;
+
+#ifdef CONFIG_BLK_WBT
+ unsigned short wbt_flags;
+#endif
+ /*
+ * rq sectors used for blk stats. It has the same value
+ * with blk_rq_sectors(rq), except that it never be zeroed
+ * by completion.
+ */
+ unsigned short stats_sectors;
+
+ /*
+ * Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ unsigned short nr_integrity_segments;
+#endif
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *crypt_ctx;
+ struct blk_crypto_keyslot *crypt_keyslot;
+#endif
+
+ enum rw_hint write_hint;
+ unsigned short ioprio;
+
+ enum mq_rq_state state;
+ atomic_t ref;
+
+ unsigned long deadline;
+
+ /*
+ * The hash is used inside the scheduler, and killed once the
+ * request reaches the dispatch list. The ipi_list is only used
+ * to queue the request for softirq completion, which is long
+ * after the request has been unhashed (and even removed from
+ * the dispatch list).
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+ struct llist_node ipi_list;
+ };
+
+ /*
+ * The rb_node is only used inside the io scheduler, requests
+ * are pruned when moved to the dispatch queue. special_vec must
+ * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
+ * insert into an IO scheduler.
+ */
+ union {
+ struct rb_node rb_node; /* sort/lookup */
+ struct bio_vec special_vec;
+ };
+
+ /*
+ * Three pointers are available for the IO schedulers, if they need
+ * more they have to dynamically allocate it.
+ */
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
+
+ struct {
+ unsigned int seq;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+
+ u64 fifo_time;
+
+ /*
+ * completion callback.
+ */
+ rq_end_io_fn *end_io;
+ void *end_io_data;
+};
+
+static inline enum req_op req_op(const struct request *req)
+{
+ return req->cmd_flags & REQ_OP_MASK;
+}
+
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return blk_op_is_passthrough(rq->cmd_flags);
+}
+
+static inline unsigned short req_get_ioprio(struct request *req)
+{
+ return req->ioprio;
+}
+
+#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
+
+#define rq_dma_dir(rq) \
+ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+#define rq_list_add(listptr, rq) do { \
+ (rq)->rq_next = *(listptr); \
+ *(listptr) = rq; \
+} while (0)
+
+#define rq_list_add_tail(lastpptr, rq) do { \
+ (rq)->rq_next = NULL; \
+ **(lastpptr) = rq; \
+ *(lastpptr) = &rq->rq_next; \
+} while (0)
+
+#define rq_list_pop(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) { \
+ __req = *(listptr); \
+ *(listptr) = __req->rq_next; \
+ } \
+ __req; \
+})
+
+#define rq_list_peek(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) \
+ __req = *(listptr); \
+ __req; \
+})
+
+#define rq_list_for_each(listptr, pos) \
+ for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
+
+#define rq_list_for_each_safe(listptr, pos, nxt) \
+ for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
+ pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
+
+#define rq_list_next(rq) (rq)->rq_next
+#define rq_list_empty(list) ((list) == (struct request *) NULL)
+
+/**
+ * rq_list_move() - move a struct request from one list to another
+ * @src: The source list @rq is currently in
+ * @dst: The destination list that @rq will be appended to
+ * @rq: The request to move
+ * @prev: The request preceding @rq in @src (NULL if @rq is the head)
+ */
+static inline void rq_list_move(struct request **src, struct request **dst,
+ struct request *rq, struct request *prev)
+{
+ if (prev)
+ prev->rq_next = rq->rq_next;
+ else
+ *src = rq->rq_next;
+ rq_list_add(dst, rq);
+}
+
+/**
+ * enum blk_eh_timer_return - How the timeout handler should proceed
+ * @BLK_EH_DONE: The block driver completed the command or will complete it at
+ * a later time.
+ * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
+ * request to complete.
+ */
+enum blk_eh_timer_return {
+ BLK_EH_DONE,
+ BLK_EH_RESET_TIMER,
+};
+
+#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
+#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
+
/**
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
* block device
@@ -121,14 +393,6 @@ struct blk_mq_hw_ctx {
*/
struct blk_mq_tags *sched_tags;
- /** @queued: Number of queued requests. */
- unsigned long queued;
- /** @run: Number of dispatched requests. */
- unsigned long run;
-#define BLK_MQ_MAX_DISPATCH_ORDER 7
- /** @dispatched: Number of dispatch requests by queue. */
- unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
-
/** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
/** @queue_num: Index of this hardware queue. */
@@ -147,13 +411,6 @@ struct blk_mq_hw_ctx {
/** @kobj: Kernel object for sysfs. */
struct kobject kobj;
- /** @poll_considered: Count times blk_poll() was called. */
- unsigned long poll_considered;
- /** @poll_invoked: Count how many requests blk_poll() polled. */
- unsigned long poll_invoked;
- /** @poll_success: Count how many polled requests were completed. */
- unsigned long poll_success;
-
#ifdef CONFIG_BLK_DEBUG_FS
/**
* @debugfs_dir: debugfs directory for this hardware queue. Named
@@ -169,13 +426,6 @@ struct blk_mq_hw_ctx {
* q->unused_hctx_list.
*/
struct list_head hctx_list;
-
- /**
- * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
- * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
- * blk_mq_hw_ctx_size().
- */
- struct srcu_struct srcu[];
};
/**
@@ -211,6 +461,7 @@ enum hctx_type {
/**
* struct blk_mq_tag_set - tag set that can be shared between request queues
+ * @ops: Pointers to functions that implement block driver behavior.
* @map: One or more ctx -> hctx mappings. One map exists for each
* hardware queue type (enum hctx_type) that the driver wishes
* to support. There are no restrictions on maps being of the
@@ -218,7 +469,6 @@ enum hctx_type {
* types.
* @nr_maps: Number of elements in the @map array. A number in the range
* [1, HCTX_MAX_TYPES].
- * @ops: Pointers to functions that implement block driver behavior.
* @nr_hw_queues: Number of hardware queues supported by the block driver that
* owns this data structure.
* @queue_depth: Number of tags per hardware queue, reserved tags included.
@@ -233,14 +483,19 @@ enum hctx_type {
* tag set.
* @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
* elements.
+ * @shared_tags:
+ * Shared set of tags. Has @nr_hw_queues elements. If set,
+ * shared by all @tags.
* @tag_list_lock: Serializes tag_list accesses.
* @tag_list: List of the request queues that use this tag set. See also
* request_queue.tag_set_list.
+ * @srcu: Use as lock when type of the request queue is blocking
+ * (BLK_MQ_F_BLOCKING).
*/
struct blk_mq_tag_set {
+ const struct blk_mq_ops *ops;
struct blk_mq_queue_map map[HCTX_MAX_TYPES];
unsigned int nr_maps;
- const struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int queue_depth;
unsigned int reserved_tags;
@@ -252,8 +507,11 @@ struct blk_mq_tag_set {
struct blk_mq_tags **tags;
+ struct blk_mq_tags *shared_tags;
+
struct mutex tag_list_lock;
struct list_head tag_list;
+ struct srcu_struct *srcu;
};
/**
@@ -267,9 +525,7 @@ struct blk_mq_queue_data {
bool last;
};
-typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
- bool);
-typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
+typedef bool (busy_tag_iter_fn)(struct request *, void *);
/**
* struct blk_mq_ops - Callback functions that implements block driver
@@ -292,27 +548,44 @@ struct blk_mq_ops {
void (*commit_rqs)(struct blk_mq_hw_ctx *);
/**
+ * @queue_rqs: Queue a list of new requests. Driver is guaranteed
+ * that each request belongs to the same queue. If the driver doesn't
+ * empty the @rqlist completely, then the rest will be queued
+ * individually by the block layer upon return.
+ */
+ void (*queue_rqs)(struct request **rqlist);
+
+ /**
* @get_budget: Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case
* of .get_budget for avoiding I/O deadlock.
*/
- bool (*get_budget)(struct request_queue *);
+ int (*get_budget)(struct request_queue *);
/**
* @put_budget: Release the reserved budget.
*/
- void (*put_budget)(struct request_queue *);
+ void (*put_budget)(struct request_queue *, int);
+
+ /**
+ * @set_rq_budget_token: store rq's budget token
+ */
+ void (*set_rq_budget_token)(struct request *, int);
+ /**
+ * @get_rq_budget_token: retrieve rq's budget token
+ */
+ int (*get_rq_budget_token)(struct request *);
/**
* @timeout: Called on request timeout.
*/
- enum blk_eh_timer_return (*timeout)(struct request *, bool);
+ enum blk_eh_timer_return (*timeout)(struct request *);
/**
* @poll: Called to poll for completion of a specific tag.
*/
- int (*poll)(struct blk_mq_hw_ctx *);
+ int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
/**
* @complete: Mark the request as complete.
@@ -346,11 +619,6 @@ struct blk_mq_ops {
unsigned int);
/**
- * @initialize_rq_fn: Called from inside blk_get_request().
- */
- void (*initialize_rq_fn)(struct request *rq);
-
- /**
* @cleanup_rq: Called before freeing one request which isn't completed
* yet, and usually for freeing the driver private data.
*/
@@ -365,7 +633,7 @@ struct blk_mq_ops {
* @map_queues: This allows drivers specify their own queue mapping by
* overriding the setup-time function that builds the mq_map.
*/
- int (*map_queues)(struct blk_mq_tag_set *set);
+ void (*map_queues)(struct blk_mq_tag_set *set);
#ifdef CONFIG_BLK_DEBUG_FS
/**
@@ -378,14 +646,21 @@ struct blk_mq_ops {
enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
- BLK_MQ_F_TAG_SHARED = 1 << 1,
+ BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
/*
* Set when this device requires underlying blk-mq device for
* completing IO:
*/
BLK_MQ_F_STACKING = 1 << 2,
+ BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
BLK_MQ_F_BLOCKING = 1 << 5,
+ /* Do not allow an I/O scheduler to be configured. */
BLK_MQ_F_NO_SCHED = 1 << 6,
+ /*
+ * Select 'none' during queue registration in case of a single hwq
+ * or shared hwqs instead of 'mq-deadline'.
+ */
+ BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -407,24 +682,34 @@ enum {
((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
-struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
-struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
- void *queuedata);
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q,
- bool elevator_init);
-struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
- const struct blk_mq_ops *ops,
- unsigned int queue_depth,
- unsigned int set_flags);
-void blk_mq_unregister_dev(struct device *, struct request_queue *);
+#define BLK_MQ_NO_HCTX_IDX (-1U)
+
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
+ struct queue_limits *lim, void *queuedata,
+ struct lock_class_key *lkclass);
+#define blk_mq_alloc_disk(set, lim, queuedata) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_mq_alloc_disk(set, lim, queuedata, &__key); \
+})
+struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
+ struct lock_class_key *lkclass);
+struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
+ struct queue_limits *lim, void *queuedata);
+int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
+void blk_mq_destroy_queue(struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
+int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int queue_depth,
+ unsigned int set_flags);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
-void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-
void blk_mq_free_request(struct request *rq);
+int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+ unsigned int poll_flags);
bool blk_mq_queue_inflight(struct request_queue *q);
@@ -433,16 +718,48 @@ enum {
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
/* allocate from reserved pool */
BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
- /* set RQF_PREEMPT */
- BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
+ /* set RQF_PM */
+ BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
};
-struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
+struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
- unsigned int op, blk_mq_req_flags_t flags,
+ blk_opf_t opf, blk_mq_req_flags_t flags,
unsigned int hctx_idx);
-struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+
+/*
+ * Tag address space map.
+ */
+struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+ unsigned int active_queues;
+
+ struct sbitmap_queue bitmap_tags;
+ struct sbitmap_queue breserved_tags;
+
+ struct request **rqs;
+ struct request **static_rqs;
+ struct list_head page_list;
+
+ /*
+ * used to clear request reference in rqs[] before freeing one
+ * request pool
+ */
+ spinlock_t lock;
+};
+
+static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ if (tag < tags->nr_tags) {
+ prefetch(tags->rqs[tag]);
+ return tags->rqs[tag];
+ }
+
+ return NULL;
+}
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -480,18 +797,84 @@ static inline int blk_mq_request_completed(struct request *rq)
return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
}
+/*
+ *
+ * Set the state to complete when completing a request from inside ->queue_rq.
+ * This is used by drivers that want to ensure special complete actions that
+ * need access to the request are called on failure, e.g. by nvme for
+ * multipathing.
+ */
+static inline void blk_mq_set_request_complete(struct request *rq)
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+}
+
+/*
+ * Complete the request directly instead of deferring it to softirq or
+ * completing it another CPU. Useful in preemptible instead of an interrupt.
+ */
+static inline void blk_mq_complete_request_direct(struct request *rq,
+ void (*complete)(struct request *rq))
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+ complete(rq);
+}
+
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
+void blk_mq_end_request_batch(struct io_comp_batch *ib);
+
+/*
+ * Only need start/end time stamping if we have iostat or
+ * blk stats enabled, or using an IO scheduler.
+ */
+static inline bool blk_mq_need_time_stamp(struct request *rq)
+{
+ /*
+ * passthrough io doesn't use iostat accounting, cgroup stats
+ * and io scheduler functionalities.
+ */
+ if (blk_rq_is_passthrough(rq))
+ return false;
+ return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
+}
+
+static inline bool blk_mq_is_reserved_rq(struct request *rq)
+{
+ return rq->rq_flags & RQF_RESV;
+}
+
+/*
+ * Batched completions only work when there is no I/O error and no special
+ * ->end_io handler.
+ */
+static inline bool blk_mq_add_to_batch(struct request *req,
+ struct io_comp_batch *iob, int ioerror,
+ void (*complete)(struct io_comp_batch *))
+{
+ /*
+ * blk_mq_end_request_batch() can't end request allocated from
+ * sched tags
+ */
+ if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
+ (req->end_io && !blk_rq_is_passthrough(req)))
+ return false;
+
+ if (!iob->complete)
+ iob->complete = complete;
+ else if (iob->complete != complete)
+ return false;
+ iob->need_ts |= blk_mq_need_time_stamp(req);
+ rq_list_add(&iob->req_list, req);
+ return true;
+}
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
bool blk_mq_complete_request_remote(struct request *rq);
-bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
- struct bio *bio, unsigned int nr_segs);
-bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
@@ -499,6 +882,9 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
+void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
+void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
+void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
@@ -514,7 +900,7 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
@@ -559,29 +945,290 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
}
#define queue_for_each_hw_ctx(q, hctx, i) \
- for ((i) = 0; (i) < (q)->nr_hw_queues && \
- ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
+ xa_for_each(&(q)->hctx_table, (i), (hctx))
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
-static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+static inline void blk_mq_cleanup_rq(struct request *rq)
{
- if (rq->tag != -1)
- return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
+ if (rq->q->mq_ops->cleanup_rq)
+ rq->q->mq_ops->cleanup_rq(rq);
+}
- return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
- BLK_QC_T_INTERNAL;
+static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
+ unsigned int nr_segs)
+{
+ rq->nr_phys_segments = nr_segs;
+ rq->__data_len = bio->bi_iter.bi_size;
+ rq->bio = rq->biotail = bio;
+ rq->ioprio = bio_prio(bio);
}
-static inline void blk_mq_cleanup_rq(struct request *rq)
+void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
+ struct lock_class_key *key);
+
+static inline bool rq_is_sync(struct request *rq)
{
- if (rq->q->mq_ops->cleanup_rq)
- rq->q->mq_ops->cleanup_rq(rq);
+ return op_is_sync(rq->cmd_flags);
}
-blk_qc_t blk_mq_submit_bio(struct bio *bio);
+void blk_rq_init(struct request_queue *q, struct request *rq);
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
+void blk_rq_unprep_clone(struct request *rq);
+blk_status_t blk_insert_cloned_request(struct request *rq);
+
+struct rq_map_data {
+ struct page **pages;
+ unsigned long offset;
+ unsigned short page_order;
+ unsigned short nr_entries;
+ bool null_mapped;
+ bool from_user;
+};
-#endif
+int blk_rq_map_user(struct request_queue *, struct request *,
+ struct rq_map_data *, void __user *, unsigned long, gfp_t);
+int blk_rq_map_user_io(struct request *, struct rq_map_data *,
+ void __user *, unsigned long, gfp_t, bool, int, bool, int);
+int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct rq_map_data *, const struct iov_iter *, gfp_t);
+int blk_rq_unmap_user(struct bio *);
+int blk_rq_map_kern(struct request_queue *, struct request *, void *,
+ unsigned int, gfp_t);
+int blk_rq_append_bio(struct request *rq, struct bio *bio);
+void blk_execute_rq_nowait(struct request *rq, bool at_head);
+blk_status_t blk_execute_rq(struct request *rq, bool at_head);
+bool blk_rq_is_poll(struct request *rq);
+
+struct req_iterator {
+ struct bvec_iter iter;
+ struct bio *bio;
+};
+
+#define __rq_for_each_bio(_bio, rq) \
+ if ((rq->bio)) \
+ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+
+#define rq_for_each_segment(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+
+#define rq_for_each_bvec(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
+
+#define rq_iter_last(bvec, _iter) \
+ (_iter.bio->bi_next == NULL && \
+ bio_iter_last(bvec, _iter.iter))
+
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ * blk_rq_stats_sectors() : sectors of the entire request used for stats
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ if (!rq->bio)
+ return 0;
+ if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
+ return rq->bio->bi_iter.bi_size;
+ return bio_iovec(rq->bio).bv_len;
+}
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
+{
+ return rq->stats_sectors;
+}
+
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request. Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec.bv_len;
+ return blk_rq_bytes(rq);
+}
+
+/*
+ * Return the first full biovec in the request. The caller needs to check that
+ * there are any bvecs before calling this helper.
+ */
+static inline struct bio_vec req_bvec(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
+}
+
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+ unsigned int nr_bios = 0;
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ nr_bios++;
+
+ return nr_bios;
+}
+
+void blk_steal_bios(struct bio_list *list, struct request *rq);
+
+/*
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ */
+bool blk_update_request(struct request *rq, blk_status_t error,
+ unsigned int nr_bytes);
+void blk_abort_request(struct request *);
+
+/*
+ * Number of physical segments as sent to the device.
+ *
+ * Normally this is the number of discontiguous data segments sent by the
+ * submitter. But for data-less command like discard we might have no
+ * actual data segments submitted, but the driver might have to add it's
+ * own special payload. In that case we still return 1 here so that this
+ * special payload will be mapped.
+ */
+static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return 1;
+ return rq->nr_phys_segments;
+}
+
+/*
+ * Number of discard segments (or ranges) the driver needs to fill in.
+ * Each discard bio merged into a request is counted as one segment.
+ */
+static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
+{
+ return max_t(unsigned short, rq->nr_phys_segments, 1);
+}
+
+int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist, struct scatterlist **last_sg);
+static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *last_sg = NULL;
+
+ return __blk_rq_map_sg(q, rq, sglist, &last_sg);
+}
+void blk_dump_rq_flags(struct request *, char *);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int blk_rq_zone_no(struct request *rq)
+{
+ return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
+}
+
+static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+{
+ return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
+}
+
+/**
+ * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
+ * @rq: Request to examine.
+ *
+ * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
+ */
+static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
+{
+ return op_needs_zoned_write_locking(req_op(rq)) &&
+ blk_rq_zone_is_seq(rq);
+}
+
+bool blk_req_needs_zone_write_lock(struct request *rq);
+bool blk_req_zone_write_trylock(struct request *rq);
+void __blk_req_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_unlock(struct request *rq);
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+ if (blk_req_needs_zone_write_lock(rq))
+ __blk_req_zone_write_lock(rq);
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+ if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
+ __blk_req_zone_write_unlock(rq);
+}
+
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return rq->q->disk->seq_zones_wlock &&
+ test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ if (!blk_req_needs_zone_write_lock(rq))
+ return true;
+ return !blk_req_zone_is_write_locked(rq);
+}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ return false;
+}
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+}
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ return true;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+#endif /* BLK_MQ_H */
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
index b80c65aba249..004b38a538ff 100644
--- a/include/linux/blk-pm.h
+++ b/include/linux/blk-pm.h
@@ -14,8 +14,7 @@ extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
-extern void blk_set_runtime_active(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
struct device *dev) {}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 4ecf4fed171f..cb1526ec44b5 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -8,7 +8,9 @@
#include <linux/types.h>
#include <linux/bvec.h>
+#include <linux/device.h>
#include <linux/ktime.h>
+#include <linux/rw_hint.h>
struct bio_set;
struct bio;
@@ -19,42 +21,81 @@ struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
struct bio_crypt_ctx;
+/*
+ * The basic unit of block I/O is a sector. It is used in a number of contexts
+ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
+ * bytes. Variables of type sector_t represent an offset or size that is a
+ * multiple of 512 bytes. Hence these two constants.
+ */
+#ifndef SECTOR_SHIFT
+#define SECTOR_SHIFT 9
+#endif
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#endif
+
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
struct block_device {
- dev_t bd_dev; /* not a kdev_t - it's a search key */
- int bd_openers;
- struct inode * bd_inode; /* will die */
- struct super_block * bd_super;
- struct mutex bd_mutex; /* open/close mutex */
+ sector_t bd_start_sect;
+ sector_t bd_nr_sectors;
+ struct gendisk * bd_disk;
+ struct request_queue * bd_queue;
+ struct disk_stats __percpu *bd_stats;
+ unsigned long bd_stamp;
+ bool bd_read_only; /* read-only policy */
+ u8 bd_partno;
+ bool bd_write_holder;
+ bool bd_has_submit_bio;
+ dev_t bd_dev;
+ struct inode *bd_inode; /* will die */
+
+ atomic_t bd_openers;
+ spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
void * bd_claiming;
void * bd_holder;
+ const struct blk_holder_ops *bd_holder_ops;
+ struct mutex bd_holder_lock;
int bd_holders;
- bool bd_write_holder;
-#ifdef CONFIG_SYSFS
- struct list_head bd_holder_disks;
-#endif
- struct block_device * bd_contains;
- u8 bd_partno;
- struct hd_struct * bd_part;
- /* number of times partitions within this device have been opened. */
- unsigned bd_part_count;
- int bd_invalidated;
- struct gendisk * bd_disk;
- struct backing_dev_info *bd_bdi;
+ struct kobject *bd_holder_dir;
+
+ atomic_t bd_fsfreeze_count; /* number of freeze requests */
+ struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
+ struct partition_meta_info *bd_meta_info;
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ bool bd_make_it_fail;
+#endif
+ bool bd_ro_warned;
+ int bd_writers;
+ /*
+ * keep this out-of-line as it's both big and not needed in the fast
+ * path
+ */
+ struct device bd_device;
} __randomize_layout;
+#define bdev_whole(_bdev) \
+ ((_bdev)->bd_disk->part0)
+
+#define dev_to_bdev(device) \
+ container_of((device), struct block_device, bd_device)
+
+#define bdev_kobj(_bdev) \
+ (&((_bdev)->bd_device.kobj))
+
/*
* Block error status values. See block/blk-core:blk_errors for the details.
* Alpha cannot write a byte atomically, so we need to use 32-bit value.
*/
#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
typedef u32 __bitwise blk_status_t;
+typedef u32 blk_short_t;
#else
typedef u8 __bitwise blk_status_t;
+typedef u16 blk_short_t;
#endif
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
@@ -62,7 +103,7 @@ typedef u8 __bitwise blk_status_t;
#define BLK_STS_NOSPC ((__force blk_status_t)3)
#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
#define BLK_STS_TARGET ((__force blk_status_t)5)
-#define BLK_STS_NEXUS ((__force blk_status_t)6)
+#define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
#define BLK_STS_MEDIUM ((__force blk_status_t)7)
#define BLK_STS_PROTECTION ((__force blk_status_t)8)
#define BLK_STS_RESOURCE ((__force blk_status_t)9)
@@ -71,6 +112,10 @@ typedef u8 __bitwise blk_status_t;
/* hack for device mapper, don't use elsewhere: */
#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
+/*
+ * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
+ * and the bio would block (cf bio_wouldblock_error())
+ */
#define BLK_STS_AGAIN ((__force blk_status_t)12)
/*
@@ -103,6 +148,37 @@ typedef u8 __bitwise blk_status_t;
*/
#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
+/*
+ * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
+ * path if the device returns a status indicating that too many zone resources
+ * are currently open. The same command should be successful if resubmitted
+ * after the number of open zones decreases below the device's limits, which is
+ * reported in the request_queue's max_open_zones.
+ */
+#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
+
+/*
+ * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
+ * path if the device returns a status indicating that too many zone resources
+ * are currently active. The same command should be successful if resubmitted
+ * after the number of active zones decreases below the device's limits, which
+ * is reported in the request_queue's max_active_zones.
+ */
+#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
+
+/*
+ * BLK_STS_OFFLINE is returned from the driver when the target device is offline
+ * or is being taken offline. This could help differentiate the case where a
+ * device is intentionally being shut down from a real I/O error.
+ */
+#define BLK_STS_OFFLINE ((__force blk_status_t)17)
+
+/*
+ * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
+ * aborted the command because it exceeded one of its Command Duration Limits.
+ */
+#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18)
+
/**
* blk_path_error - returns true if error may be path related
* @error: status the request was completed with
@@ -121,7 +197,7 @@ static inline bool blk_path_error(blk_status_t error)
case BLK_STS_NOTSUPP:
case BLK_STS_NOSPC:
case BLK_STS_TARGET:
- case BLK_STS_NEXUS:
+ case BLK_STS_RESV_CONFLICT:
case BLK_STS_MEDIUM:
case BLK_STS_PROTECTION:
return false;
@@ -131,51 +207,14 @@ static inline bool blk_path_error(blk_status_t error)
return true;
}
-/*
- * From most significant bit:
- * 1 bit: reserved for other usage, see below
- * 12 bits: original size of bio
- * 51 bits: issue time of bio
- */
-#define BIO_ISSUE_RES_BITS 1
-#define BIO_ISSUE_SIZE_BITS 12
-#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
-#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
-#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
-#define BIO_ISSUE_SIZE_MASK \
- (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
-#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
-
-/* Reserved bit for blk-throtl */
-#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
-
struct bio_issue {
u64 value;
};
-static inline u64 __bio_issue_time(u64 time)
-{
- return time & BIO_ISSUE_TIME_MASK;
-}
-
-static inline u64 bio_issue_time(struct bio_issue *issue)
-{
- return __bio_issue_time(issue->value);
-}
-
-static inline sector_t bio_issue_size(struct bio_issue *issue)
-{
- return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
-}
+typedef __u32 __bitwise blk_opf_t;
-static inline void bio_issue_init(struct bio_issue *issue,
- sector_t size)
-{
- size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
- issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
- (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
- ((u64)size << BIO_ISSUE_SIZE_SHIFT));
-}
+typedef unsigned int blk_qc_t;
+#define BLK_QC_T_NONE -1U
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
@@ -183,22 +222,20 @@ static inline void bio_issue_init(struct bio_issue *issue,
*/
struct bio {
struct bio *bi_next; /* request queue link */
- struct gendisk *bi_disk;
- unsigned int bi_opf; /* bottom bits req flags,
- * top bits REQ_OP. Use
- * accessors.
+ struct block_device *bi_bdev;
+ blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
+ * req_flags.
*/
- unsigned short bi_flags; /* status, etc and bvec pool number */
+ unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
- unsigned short bi_write_hint;
+ enum rw_hint bi_write_hint;
blk_status_t bi_status;
- u8 bi_partno;
atomic_t __bi_remaining;
struct bvec_iter bi_iter;
+ blk_qc_t bi_cookie;
bio_end_io_t *bi_end_io;
-
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
@@ -247,60 +284,38 @@ struct bio {
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
+#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
/*
* bio flags
*/
enum {
- BIO_NO_PAGE_REF, /* don't put release vec pages */
+ BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
BIO_CLONED, /* doesn't own data */
BIO_BOUNCED, /* bio is a bounce bio */
- BIO_USER_MAPPED, /* contains user pages */
- BIO_NULL_MAPPED, /* contains invalid user pages */
- BIO_WORKINGSET, /* contains userspace workingset pages */
BIO_QUIET, /* Make BIO Quiet */
BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
BIO_REFFED, /* bio has elevated ->bi_cnt */
- BIO_THROTTLED, /* This bio has already been subjected to
+ BIO_BPS_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
* of this bio. */
BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
- BIO_TRACKED, /* set if bio goes through the rq_qos path */
+ BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
+ BIO_QOS_MERGED, /* but went through rq_qos merge path */
+ BIO_REMAPPED,
+ BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
BIO_FLAG_LAST
};
-/* See BVEC_POOL_OFFSET below before adding new flags */
-
-/*
- * We support 6 different bvec pools, the last one is magic in that it
- * is backed by a mempool.
- */
-#define BVEC_POOL_NR 6
-#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
-
-/*
- * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
- * 1 to the actual index so that 0 indicates that there are no bvecs to be
- * freed.
- */
-#define BVEC_POOL_BITS (3)
-#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
-#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
-#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
-# error "BVEC_POOL_BITS is too small"
-#endif
-
-/*
- * Flags starting here get preserved by bio_reset() - this includes
- * only BVEC_POOL_IDX()
- */
-#define BIO_RESET_BITS BVEC_POOL_OFFSET
-
typedef __u32 __bitwise blk_mq_req_flags_t;
-/*
- * Operations and flags common to the bio and request structures.
+#define REQ_OP_BITS 8
+#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
+#define REQ_FLAG_BITS 24
+
+/**
+ * enum req_op - Operations common to the bio and request structures.
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
*
* The least significant bit of the operation number indicates the data
@@ -312,46 +327,37 @@ typedef __u32 __bitwise blk_mq_req_flags_t;
* If a operation does not transfer data the least significant bit has no
* meaning.
*/
-#define REQ_OP_BITS 8
-#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
-#define REQ_FLAG_BITS 24
-
-enum req_opf {
+enum req_op {
/* read sectors from the device */
- REQ_OP_READ = 0,
+ REQ_OP_READ = (__force blk_opf_t)0,
/* write sectors to the device */
- REQ_OP_WRITE = 1,
+ REQ_OP_WRITE = (__force blk_opf_t)1,
/* flush the volatile write cache */
- REQ_OP_FLUSH = 2,
+ REQ_OP_FLUSH = (__force blk_opf_t)2,
/* discard sectors */
- REQ_OP_DISCARD = 3,
+ REQ_OP_DISCARD = (__force blk_opf_t)3,
/* securely erase sectors */
- REQ_OP_SECURE_ERASE = 5,
- /* write the same sector many times */
- REQ_OP_WRITE_SAME = 7,
+ REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
+ /* write data at the current zone write pointer */
+ REQ_OP_ZONE_APPEND = (__force blk_opf_t)7,
/* write the zero filled sector many times */
- REQ_OP_WRITE_ZEROES = 9,
+ REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
/* Open a zone */
- REQ_OP_ZONE_OPEN = 10,
+ REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
/* Close a zone */
- REQ_OP_ZONE_CLOSE = 11,
+ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
/* Transition a zone to full */
- REQ_OP_ZONE_FINISH = 12,
- /* write data at the current zone write pointer */
- REQ_OP_ZONE_APPEND = 13,
+ REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
/* reset a zone write pointer */
- REQ_OP_ZONE_RESET = 15,
+ REQ_OP_ZONE_RESET = (__force blk_opf_t)13,
/* reset all the zone present on the device */
- REQ_OP_ZONE_RESET_ALL = 17,
+ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15,
- /* SCSI passthrough using struct scsi_request */
- REQ_OP_SCSI_IN = 32,
- REQ_OP_SCSI_OUT = 33,
/* Driver private requests */
- REQ_OP_DRV_IN = 34,
- REQ_OP_DRV_OUT = 35,
+ REQ_OP_DRV_IN = (__force blk_opf_t)34,
+ REQ_OP_DRV_OUT = (__force blk_opf_t)35,
- REQ_OP_LAST,
+ REQ_OP_LAST = (__force blk_opf_t)36,
};
enum req_flag_bits {
@@ -370,47 +376,45 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
+ __REQ_POLLED, /* caller polls for completion using bio_poll */
+ __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
+ __REQ_SWAP, /* swap I/O */
+ __REQ_DRV, /* for driver use */
+ __REQ_FS_PRIVATE, /* for file system (submitter) use */
+
/*
- * When a shared kthread needs to issue a bio for a cgroup, doing
- * so synchronously can lead to priority inversions as the kthread
- * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
- * submit_bio() punt the actual issuing to a dedicated per-blkcg
- * work item to avoid such priority inversions.
+ * Command specific flags, keep last:
*/
- __REQ_CGROUP_PUNT,
-
- /* command specific flags for REQ_OP_WRITE_ZEROES: */
+ /* for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
- __REQ_HIPRI,
-
- /* for driver use */
- __REQ_DRV,
- __REQ_SWAP, /* swapping request. */
__REQ_NR_BITS, /* stops here */
};
-#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
-#define REQ_SYNC (1ULL << __REQ_SYNC)
-#define REQ_META (1ULL << __REQ_META)
-#define REQ_PRIO (1ULL << __REQ_PRIO)
-#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
-#define REQ_IDLE (1ULL << __REQ_IDLE)
-#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
-#define REQ_FUA (1ULL << __REQ_FUA)
-#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
-#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
-#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
-#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
-#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
-
-#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
-#define REQ_HIPRI (1ULL << __REQ_HIPRI)
-
-#define REQ_DRV (1ULL << __REQ_DRV)
-#define REQ_SWAP (1ULL << __REQ_SWAP)
+#define REQ_FAILFAST_DEV \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
+#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
+#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
+#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
+#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
+#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
+#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
+#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
+#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
+#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
+#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
+#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
+#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
+#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
+#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
+#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
+
+#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -427,28 +431,21 @@ enum stat_group {
NR_STAT_GROUPS
};
-#define bio_op(bio) \
- ((bio)->bi_opf & REQ_OP_MASK)
-#define req_op(req) \
- ((req)->cmd_flags & REQ_OP_MASK)
-
-/* obsolete, don't use in new code */
-static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
- unsigned op_flags)
+static inline enum req_op bio_op(const struct bio *bio)
{
- bio->bi_opf = op | op_flags;
+ return bio->bi_opf & REQ_OP_MASK;
}
-static inline bool op_is_write(unsigned int op)
+static inline bool op_is_write(blk_opf_t op)
{
- return (op & 1);
+ return !!(op & (__force blk_opf_t)1);
}
/*
* Check if the bio or request is one that needs special treatment in the
* flush state machine.
*/
-static inline bool op_is_flush(unsigned int op)
+static inline bool op_is_flush(blk_opf_t op)
{
return op & (REQ_FUA | REQ_PREFLUSH);
}
@@ -458,13 +455,13 @@ static inline bool op_is_flush(unsigned int op)
* PREFLUSH flag. Other operations may be marked as synchronous using the
* REQ_SYNC flag.
*/
-static inline bool op_is_sync(unsigned int op)
+static inline bool op_is_sync(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_READ ||
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}
-static inline bool op_is_discard(unsigned int op)
+static inline bool op_is_discard(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}
@@ -475,7 +472,7 @@ static inline bool op_is_discard(unsigned int op)
* due to its different handling in the block layer and device response in
* case of command failure.
*/
-static inline bool op_is_zone_mgmt(enum req_opf op)
+static inline bool op_is_zone_mgmt(enum req_op op)
{
switch (op & REQ_OP_MASK) {
case REQ_OP_ZONE_RESET:
@@ -488,39 +485,13 @@ static inline bool op_is_zone_mgmt(enum req_opf op)
}
}
-static inline int op_stat_group(unsigned int op)
+static inline int op_stat_group(enum req_op op)
{
if (op_is_discard(op))
return STAT_DISCARD;
return op_is_write(op);
}
-typedef unsigned int blk_qc_t;
-#define BLK_QC_T_NONE -1U
-#define BLK_QC_T_EAGAIN -2U
-#define BLK_QC_T_SHIFT 16
-#define BLK_QC_T_INTERNAL (1U << 31)
-
-static inline bool blk_qc_t_valid(blk_qc_t cookie)
-{
- return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
-}
-
-static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
-{
- return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
-}
-
-static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
-{
- return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
-}
-
-static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
-{
- return (cookie & BLK_QC_T_INTERNAL) != 0;
-}
-
struct blk_rq_stat {
u64 mean;
u64 min;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bb5636cc17b9..c3e8f7cf96be 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,323 +1,281 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions Copyright (C) 1992 Drew Eckhardt
+ */
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/major.h>
-#include <linux/genhd.h>
+#include <linux/types.h>
+#include <linux/blk_types.h>
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <linux/pagemap.h>
-#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
-#include <linux/mempool.h>
-#include <linux/pfn.h>
#include <linux/bio.h>
-#include <linux/stringify.h>
#include <linux/gfp.h>
-#include <linux/bsg.h>
-#include <linux/smp.h>
+#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
-#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/sched.h>
+#include <linux/sbitmap.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
+#include <linux/file.h>
struct module;
-struct scsi_ioctl_command;
-
struct request_queue;
struct elevator_queue;
struct blk_trace;
struct request;
struct sg_io_hdr;
-struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
+struct kiocb;
struct pr_ops;
struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
-struct blk_keyslot_manager;
-
-#define BLKDEV_MIN_RQ 4
-#define BLKDEV_MAX_RQ 128 /* Default maximum */
+struct blk_crypto_profile;
-/* Must be consistent with blk_mq_poll_stats_bkt() */
-#define BLK_MQ_POLL_STATS_BKTS 16
-
-/* Doing classic polling */
-#define BLK_MQ_POLL_CLASSIC -1
+extern const struct device_type disk_type;
+extern const struct device_type part_type;
+extern const struct class block_class;
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 5
-
-typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+#define BLKCG_MAX_POLS 6
-/*
- * request flags */
-typedef __u32 __bitwise req_flags_t;
-
-/* elevator knows about this request */
-#define RQF_SORTED ((__force req_flags_t)(1 << 0))
-/* drive already may have started this one */
-#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* may not be passed by ioscheduler */
-#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
-/* request for flush sequence */
-#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
-/* merge of different types, fail separately */
-#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
-/* track inflight for MQ */
-#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
-/* don't call prep for this one */
-#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
-/* set for "ide_preempt" requests and also for requests for which the SCSI
- "quiesce" state must be ignored. */
-#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
-/* vaguely specified driver internal error. Ignored by the block layer */
-#define RQF_FAILED ((__force req_flags_t)(1 << 10))
-/* don't warn about errors */
-#define RQF_QUIET ((__force req_flags_t)(1 << 11))
-/* elevator private data attached */
-#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
-/* account into disk and partition IO statistics */
-#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
-/* request came from our alloc pool */
-#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
-/* runtime pm request */
-#define RQF_PM ((__force req_flags_t)(1 << 15))
-/* on IO scheduler merge hash */
-#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* track IO completion time */
-#define RQF_STATS ((__force req_flags_t)(1 << 17))
-/* Look at ->special_vec for the actual data payload instead of the
- bio chain. */
-#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
-/* The per-zone write lock is held for this request */
-#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
-/* already slept for hybrid poll */
-#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
-/* ->timeout has been called, don't expire again */
-#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
-
-/* flags that prevent us from merging requests: */
-#define RQF_NOMERGE_FLAGS \
- (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+#define DISK_MAX_PARTS 256
+#define DISK_NAME_LEN 32
+#define PARTITION_META_INFO_VOLNAMELTH 64
/*
- * Request state for blk-mq.
+ * Enough for the string representation of any kind of UUID plus NULL.
+ * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
*/
-enum mq_rq_state {
- MQ_RQ_IDLE = 0,
- MQ_RQ_IN_FLIGHT = 1,
- MQ_RQ_COMPLETE = 2,
+#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
+
+struct partition_meta_info {
+ char uuid[PARTITION_META_INFO_UUIDLTH];
+ u8 volname[PARTITION_META_INFO_VOLNAMELTH];
};
-/*
- * Try to put the fields that are referenced together in the same cacheline.
+/**
+ * DOC: genhd capability flags
+ *
+ * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
+ * removable media. When set, the device remains present even when media is not
+ * inserted. Shall not be set for devices which are removed entirely when the
+ * media is removed.
+ *
+ * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
+ * doesn't appear in sysfs, and can't be opened from userspace or using
+ * blkdev_get*. Used for the underlying components of multipath devices.
+ *
+ * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
+ * scan for partitions from add_disk, and users can't add partitions manually.
*
- * If you modify this structure, make sure to update blk_rq_init() and
- * especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
-struct request {
- struct request_queue *q;
- struct blk_mq_ctx *mq_ctx;
- struct blk_mq_hw_ctx *mq_hctx;
-
- unsigned int cmd_flags; /* op and common flags */
- req_flags_t rq_flags;
-
- int tag;
- int internal_tag;
-
- /* the following two fields are internal, NEVER access directly */
- unsigned int __data_len; /* total data len */
- sector_t __sector; /* sector cursor */
+enum {
+ GENHD_FL_REMOVABLE = 1 << 0,
+ GENHD_FL_HIDDEN = 1 << 1,
+ GENHD_FL_NO_PART = 1 << 2,
+};
- struct bio *bio;
- struct bio *biotail;
+enum {
+ DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
+ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
+};
- struct list_head queuelist;
+enum {
+ /* Poll even if events_poll_msecs is unset */
+ DISK_EVENT_FLAG_POLL = 1 << 0,
+ /* Forward events to udev */
+ DISK_EVENT_FLAG_UEVENT = 1 << 1,
+ /* Block event polling when open for exclusive write */
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
+};
- /*
- * The hash is used inside the scheduler, and killed once the
- * request reaches the dispatch list. The ipi_list is only used
- * to queue the request for softirq completion, which is long
- * after the request has been unhashed (and even removed from
- * the dispatch list).
- */
- union {
- struct hlist_node hash; /* merge hash */
- struct list_head ipi_list;
- };
+struct disk_events;
+struct badblocks;
- /*
- * The rb_node is only used inside the io scheduler, requests
- * are pruned when moved to the dispatch queue. So let the
- * completion_data share space with the rb_node.
- */
- union {
- struct rb_node rb_node; /* sort/lookup */
- struct bio_vec special_vec;
- void *completion_data;
- int error_count; /* for legacy drivers, don't use */
- };
+struct blk_integrity {
+ const struct blk_integrity_profile *profile;
+ unsigned char flags;
+ unsigned char tuple_size;
+ unsigned char pi_offset;
+ unsigned char interval_exp;
+ unsigned char tag_size;
+};
+typedef unsigned int __bitwise blk_mode_t;
+
+/* open for reading */
+#define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0))
+/* open for writing */
+#define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1))
+/* open exclusively (vs other exclusive openers */
+#define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2))
+/* opened with O_NDELAY */
+#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
+/* open for "writes" only for ioctls (specialy hack for floppy.c) */
+#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
+/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
+#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
+
+struct gendisk {
/*
- * Three pointers are available for the IO schedulers, if they need
- * more they have to dynamically allocate it. Flush requests are
- * never put on the IO scheduler. So let the flush fields share
- * space with the elevator data.
+ * major/first_minor/minors should not be set by any new driver, the
+ * block core will take care of allocating them automatically.
*/
- union {
- struct {
- struct io_cq *icq;
- void *priv[2];
- } elv;
-
- struct {
- unsigned int seq;
- struct list_head list;
- rq_end_io_fn *saved_end_io;
- } flush;
- };
-
- struct gendisk *rq_disk;
- struct hd_struct *part;
-#ifdef CONFIG_BLK_RQ_ALLOC_TIME
- /* Time that the first bio started allocating this request. */
- u64 alloc_time_ns;
+ int major;
+ int first_minor;
+ int minors;
+
+ char disk_name[DISK_NAME_LEN]; /* name of major driver */
+
+ unsigned short events; /* supported events */
+ unsigned short event_flags; /* flags related to event processing */
+
+ struct xarray part_tbl;
+ struct block_device *part0;
+
+ const struct block_device_operations *fops;
+ struct request_queue *queue;
+ void *private_data;
+
+ struct bio_set bio_split;
+
+ int flags;
+ unsigned long state;
+#define GD_NEED_PART_SCAN 0
+#define GD_READ_ONLY 1
+#define GD_DEAD 2
+#define GD_NATIVE_CAPACITY 3
+#define GD_ADDED 4
+#define GD_SUPPRESS_PART_SCAN 5
+#define GD_OWNS_QUEUE 6
+
+ struct mutex open_mutex; /* open/close mutex */
+ unsigned open_partitions; /* number of open partitions */
+
+ struct backing_dev_info *bdi;
+ struct kobject queue_kobj; /* the queue/ directory */
+ struct kobject *slave_dir;
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+ struct list_head slave_bdevs;
#endif
- /* Time that this request was allocated for this IO. */
- u64 start_time_ns;
- /* Time that I/O was submitted to the device. */
- u64 io_start_time_ns;
-
-#ifdef CONFIG_BLK_WBT
- unsigned short wbt_flags;
-#endif
- /*
- * rq sectors used for blk stats. It has the same value
- * with blk_rq_sectors(rq), except that it never be zeroed
- * by completion.
- */
- unsigned short stats_sectors;
+ struct timer_rand_state *random;
+ atomic_t sync_io; /* RAID */
+ struct disk_events *ev;
+#ifdef CONFIG_BLK_DEV_ZONED
/*
- * Number of scatter-gather DMA addr+len pairs after
- * physical address coalescing is performed.
+ * Zoned block device information for request dispatch control.
+ * nr_zones is the total number of zones of the device. This is always
+ * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
+ * bits which indicates if a zone is conventional (bit set) or
+ * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
+ * bits which indicates if a zone is write locked, that is, if a write
+ * request targeting the zone was dispatched.
+ *
+ * Reads of this information must be protected with blk_queue_enter() /
+ * blk_queue_exit(). Modifying this information is only allowed while
+ * no requests are being processed. See also blk_mq_freeze_queue() and
+ * blk_mq_unfreeze_queue().
*/
- unsigned short nr_phys_segments;
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- unsigned short nr_integrity_segments;
-#endif
+ unsigned int nr_zones;
+ unsigned long *conv_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+#endif /* CONFIG_BLK_DEV_ZONED */
-#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- struct bio_crypt_ctx *crypt_ctx;
- struct blk_ksm_keyslot *crypt_keyslot;
+#if IS_ENABLED(CONFIG_CDROM)
+ struct cdrom_device_info *cdi;
#endif
-
- unsigned short write_hint;
- unsigned short ioprio;
-
- enum mq_rq_state state;
- refcount_t ref;
-
- unsigned int timeout;
- unsigned long deadline;
-
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
+ int node_id;
+ struct badblocks *bb;
+ struct lockdep_map lockdep_map;
+ u64 diskseq;
+ blk_mode_t open_mode;
/*
- * completion callback.
+ * Independent sector access ranges. This is always NULL for
+ * devices that do not have multiple independent access ranges.
*/
- rq_end_io_fn *end_io;
- void *end_io_data;
+ struct blk_independent_access_ranges *ia_ranges;
};
-static inline bool blk_op_is_scsi(unsigned int op)
+static inline bool disk_live(struct gendisk *disk)
{
- return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
+ return !inode_unhashed(disk->part0->bd_inode);
}
-static inline bool blk_op_is_private(unsigned int op)
+/**
+ * disk_openers - returns how many openers are there for a disk
+ * @disk: disk to check
+ *
+ * This returns the number of openers for a disk. Note that this value is only
+ * stable if disk->open_mutex is held.
+ *
+ * Note: Due to a quirk in the block layer open code, each open partition is
+ * only counted once even if there are multiple openers.
+ */
+static inline unsigned int disk_openers(struct gendisk *disk)
{
- return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
+ return atomic_read(&disk->part0->bd_openers);
}
-static inline bool blk_rq_is_scsi(struct request *rq)
-{
- return blk_op_is_scsi(req_op(rq));
-}
+/*
+ * The gendisk is refcounted by the part0 block_device, and the bd_device
+ * therein is also used for device model presentation in sysfs.
+ */
+#define dev_to_disk(device) \
+ (dev_to_bdev(device)->bd_disk)
+#define disk_to_dev(disk) \
+ (&((disk)->part0->bd_device))
-static inline bool blk_rq_is_private(struct request *rq)
-{
- return blk_op_is_private(req_op(rq));
-}
+#if IS_REACHABLE(CONFIG_CDROM)
+#define disk_to_cdi(disk) ((disk)->cdi)
+#else
+#define disk_to_cdi(disk) NULL
+#endif
-static inline bool blk_rq_is_passthrough(struct request *rq)
+static inline dev_t disk_devt(struct gendisk *disk)
{
- return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
+ return MKDEV(disk->major, disk->first_minor);
}
-static inline bool bio_is_passthrough(struct bio *bio)
+static inline int blk_validate_block_size(unsigned long bsize)
{
- unsigned op = bio_op(bio);
+ if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ return -EINVAL;
- return blk_op_is_scsi(op) || blk_op_is_private(op);
+ return 0;
}
-static inline unsigned short req_get_ioprio(struct request *req)
+static inline bool blk_op_is_passthrough(blk_opf_t op)
{
- return req->ioprio;
+ op &= REQ_OP_MASK;
+ return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
-#include <linux/elevator.h>
-
-struct blk_queue_ctx;
-
-struct bio_vec;
-
-enum blk_eh_timer_return {
- BLK_EH_DONE, /* drivers has completed the command */
- BLK_EH_RESET_TIMER, /* reset timer and try again */
-};
-
-enum blk_queue_state {
- Queue_down,
- Queue_up,
-};
-
-#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
-#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
-
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
/*
- * Zoned block device models (zoned limit).
- *
- * Note: This needs to be ordered from the least to the most severe
- * restrictions for the inheritance in blk_stack_limits() to work.
+ * BLK_BOUNCE_NONE: never bounce (default)
+ * BLK_BOUNCE_HIGH: bounce all highmem pages
*/
-enum blk_zoned_model {
- BLK_ZONED_NONE = 0, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
+enum blk_bounce {
+ BLK_BOUNCE_NONE,
+ BLK_BOUNCE_HIGH,
};
struct queue_limits {
- unsigned long bounce_pfn;
+ enum blk_bounce bounce;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
@@ -325,6 +283,7 @@ struct queue_limits {
unsigned int max_dev_sectors;
unsigned int chunk_sectors;
unsigned int max_sectors;
+ unsigned int max_user_sectors;
unsigned int max_segment_size;
unsigned int physical_block_size;
unsigned int logical_block_size;
@@ -333,11 +292,13 @@ struct queue_limits {
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
- unsigned int max_write_same_sectors;
+ unsigned int max_user_discard_sectors;
+ unsigned int max_secure_erase_sectors;
unsigned int max_write_zeroes_sectors;
unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int zone_write_granularity;
unsigned short max_segments;
unsigned short max_integrity_segments;
@@ -346,187 +307,164 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
- enum blk_zoned_model zoned;
+ bool zoned;
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
+
+ /*
+ * Drivers that set dma_alignment to less than 511 must be prepared to
+ * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+ * due to possible offsets.
+ */
+ unsigned int dma_alignment;
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
-#ifdef CONFIG_BLK_DEV_ZONED
+void disk_set_zoned(struct gendisk *disk);
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
-unsigned int blkdev_nr_zones(struct gendisk *disk);
-extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
- sector_t sectors, sector_t nr_sectors,
- gfp_t gfp_mask);
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
+ sector_t sectors, sector_t nr_sectors);
int blk_revalidate_disk_zones(struct gendisk *disk,
- void (*update_driver_data)(struct gendisk *disk));
-
-extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-
-#else /* CONFIG_BLK_DEV_ZONED */
-
-static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
-{
- return 0;
-}
+ void (*update_driver_data)(struct gendisk *disk));
-static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
-static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
+/*
+ * Independent access ranges: struct blk_independent_access_range describes
+ * a range of contiguous sectors that can be accessed using device command
+ * execution resources that are independent from the resources used for
+ * other access ranges. This is typically found with single-LUN multi-actuator
+ * HDDs where each access range is served by a different set of heads.
+ * The set of independent ranges supported by the device is defined using
+ * struct blk_independent_access_ranges. The independent ranges must not overlap
+ * and must include all sectors within the disk capacity (no sector holes
+ * allowed).
+ * For a device with multiple ranges, requests targeting sectors in different
+ * ranges can be executed in parallel. A request can straddle an access range
+ * boundary.
+ */
+struct blk_independent_access_range {
+ struct kobject kobj;
+ sector_t sector;
+ sector_t nr_sectors;
+};
-#endif /* CONFIG_BLK_DEV_ZONED */
+struct blk_independent_access_ranges {
+ struct kobject kobj;
+ bool sysfs_registered;
+ unsigned int nr_ia_ranges;
+ struct blk_independent_access_range ia_range[];
+};
struct request_queue {
- struct request *last_merge;
- struct elevator_queue *elevator;
+ /*
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
+ */
+ void *queuedata;
- struct blk_queue_stats *stats;
- struct rq_qos *rq_qos;
+ struct elevator_queue *elevator;
const struct blk_mq_ops *mq_ops;
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
- unsigned int queue_depth;
-
- /* hw dispatch queues */
- struct blk_mq_hw_ctx **queue_hw_ctx;
- unsigned int nr_hw_queues;
-
- struct backing_dev_info *backing_dev_info;
-
- /*
- * The queue owner gets to use this for whatever they like.
- * ll_rw_blk doesn't touch it.
- */
- void *queuedata;
-
/*
* various queue flags, see QUEUE_* below
*/
unsigned long queue_flags;
- /*
- * Number of contexts that have called blk_set_pm_only(). If this
- * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
- * processed.
- */
- atomic_t pm_only;
- /*
- * ida allocated id for this queue. Used to index queues from
- * ioctx.
- */
- int id;
+ unsigned int rq_timeout;
- /*
- * queue needs bounce pages for pages above this limit
- */
- gfp_t bounce_gfp;
+ unsigned int queue_depth;
+
+ refcount_t refs;
+
+ /* hw dispatch queues */
+ unsigned int nr_hw_queues;
+ struct xarray hctx_table;
+
+ struct percpu_ref q_usage_counter;
+
+ struct request *last_merge;
spinlock_t queue_lock;
- /*
- * queue kobject
- */
- struct kobject kobj;
+ int quiesce_depth;
+
+ struct gendisk *disk;
/*
* mq queue kobject
*/
struct kobject *mq_kobj;
+ struct queue_limits limits;
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity integrity;
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#ifdef CONFIG_PM
struct device *dev;
- int rpm_status;
- unsigned int nr_pending;
+ enum rpm_status rpm_status;
#endif
/*
- * queue settings
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM requests are processed.
*/
- unsigned long nr_requests; /* Max # of requests */
+ atomic_t pm_only;
+
+ struct blk_queue_stats *stats;
+ struct rq_qos *rq_qos;
+ struct mutex rq_qos_mutex;
+
+ /*
+ * ida allocated id for this queue. Used to index queues from
+ * ioctx.
+ */
+ int id;
unsigned int dma_pad_mask;
- unsigned int dma_alignment;
+
+ /*
+ * queue settings
+ */
+ unsigned long nr_requests; /* Max # of requests */
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- /* Inline crypto capabilities */
- struct blk_keyslot_manager *ksm;
+ struct blk_crypto_profile *crypto_profile;
+ struct kobject *crypto_kobject;
#endif
- unsigned int rq_timeout;
- int poll_nsec;
-
- struct blk_stat_callback *poll_cb;
- struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
-
struct timer_list timeout;
struct work_struct timeout_work;
+ atomic_t nr_active_requests_shared_tags;
+
+ unsigned int required_elevator_features;
+
+ struct blk_mq_tags *sched_shared_tags;
+
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
+ struct mutex blkcg_mutex;
#endif
- struct queue_limits limits;
-
- unsigned int required_elevator_features;
+ int node;
-#ifdef CONFIG_BLK_DEV_ZONED
- /*
- * Zoned block device information for request dispatch control.
- * nr_zones is the total number of zones of the device. This is always
- * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
- * bits which indicates if a zone is conventional (bit set) or
- * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
- * bits which indicates if a zone is write locked, that is, if a write
- * request targeting the zone was dispatched. All three fields are
- * initialized by the low level device driver (e.g. scsi/sd.c).
- * Stacking drivers (device mappers) may or may not initialize
- * these fields.
- *
- * Reads of this information must be protected with blk_queue_enter() /
- * blk_queue_exit(). Modifying this information is only allowed while
- * no requests are being processed. See also blk_mq_freeze_queue() and
- * blk_mq_unfreeze_queue().
- */
- unsigned int nr_zones;
- unsigned long *conv_zones_bitmap;
- unsigned long *seq_zones_wlock;
- unsigned int max_open_zones;
- unsigned int max_active_zones;
-#endif /* CONFIG_BLK_DEV_ZONED */
+ spinlock_t requeue_lock;
+ struct list_head requeue_list;
+ struct delayed_work requeue_work;
- /*
- * sg stuff
- */
- unsigned int sg_timeout;
- unsigned int sg_reserved_size;
- int node;
- struct mutex debugfs_mutex;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace __rcu *blk_trace;
#endif
@@ -534,13 +472,11 @@ struct request_queue {
* for flush operations
*/
struct blk_flush_queue *fq;
-
- struct list_head requeue_list;
- spinlock_t requeue_lock;
- struct delayed_work requeue_work;
+ struct list_head flush_list;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
+ struct mutex limits_lock;
/*
* for reusing dead hctx instance in case of updating
@@ -551,10 +487,6 @@ struct request_queue {
int mq_freeze_depth;
-#if defined(CONFIG_BLK_DEV_BSG)
- struct bsg_class_device bsg_dev;
-#endif
-
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
@@ -566,25 +498,19 @@ struct request_queue {
* percpu_ref_kill() and percpu_ref_reinit().
*/
struct mutex mq_freeze_lock;
- struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
- struct bio_set bio_split;
struct dentry *debugfs_dir;
-
-#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
-#endif
+ /*
+ * Serializes all debugfs metadata operations using the above dentries.
+ */
+ struct mutex debugfs_mutex;
bool mq_sysfs_init_done;
-
- size_t cmd_size;
-
-#define BLK_MAX_WRITE_HINTS 5
- u64 write_hints[BLK_MAX_WRITE_HINTS];
};
/* Keep blk_queue_flag_name[] in sync with the definitions below */
@@ -596,28 +522,31 @@ struct request_queue {
#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
-#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */
+#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
-#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
+#define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
+#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
#define QUEUE_FLAG_WC 17 /* Write back caching */
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
#define QUEUE_FLAG_DAX 19 /* device supports DAX */
#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
-#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
-#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
+#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
+#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
+#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
+#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/
-#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP))
+#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
+ (1UL << QUEUE_FLAG_SAME_COMP) | \
+ (1UL << QUEUE_FLAG_NOWAIT))
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
@@ -625,22 +554,18 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
-#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_stable_writes(q) \
+ test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_zone_resetall(q) \
test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
-#define blk_queue_secure_erase(q) \
- (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
-#define blk_queue_scsi_passthrough(q) \
- test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
#define blk_queue_pci_p2pdma(q) \
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
@@ -655,24 +580,16 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
-#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
+#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
+#define blk_queue_skip_tagset_quiesce(q) \
+ test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
-static inline bool blk_account_rq(struct request *rq)
-{
- return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
-}
-
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
-
-#define rq_dma_dir(rq) \
- (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
-
#define dma_map_bvec(dev, bv, dir, attrs) \
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
(dir), (attrs))
@@ -682,133 +599,97 @@ static inline bool queue_is_mq(struct request_queue *q)
return q->mq_ops;
}
-static inline enum blk_zoned_model
-blk_queue_zoned_model(struct request_queue *q)
+#ifdef CONFIG_PM
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
- return q->limits.zoned;
+ return q->rpm_status;
}
-
-static inline bool blk_queue_is_zoned(struct request_queue *q)
+#else
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
- switch (blk_queue_zoned_model(q)) {
- case BLK_ZONED_HA:
- case BLK_ZONED_HM:
- return true;
- default:
- return false;
- }
+ return RPM_ACTIVE;
}
+#endif
-static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
+static inline bool blk_queue_is_zoned(struct request_queue *q)
{
- return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned;
}
#ifdef CONFIG_BLK_DEV_ZONED
-static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+unsigned int bdev_nr_zones(struct block_device *bdev);
+
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
- return blk_queue_is_zoned(q) ? q->nr_zones : 0;
+ return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
}
-static inline unsigned int blk_queue_zone_no(struct request_queue *q,
- sector_t sector)
+static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
- if (!blk_queue_is_zoned(q))
+ if (!blk_queue_is_zoned(disk->queue))
return 0;
- return sector >> ilog2(q->limits.chunk_sectors);
+ return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline bool blk_queue_zone_is_seq(struct request_queue *q,
- sector_t sector)
+static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
{
- if (!blk_queue_is_zoned(q))
+ if (!blk_queue_is_zoned(disk->queue))
return false;
- if (!q->conv_zones_bitmap)
+ if (!disk->conv_zones_bitmap)
return true;
- return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
+ return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
}
-static inline void blk_queue_max_open_zones(struct request_queue *q,
+static inline void disk_set_max_open_zones(struct gendisk *disk,
unsigned int max_open_zones)
{
- q->max_open_zones = max_open_zones;
+ disk->queue->limits.max_open_zones = max_open_zones;
}
-static inline unsigned int queue_max_open_zones(const struct request_queue *q)
+static inline void disk_set_max_active_zones(struct gendisk *disk,
+ unsigned int max_active_zones)
{
- return q->max_open_zones;
+ disk->queue->limits.max_active_zones = max_active_zones;
}
-static inline void blk_queue_max_active_zones(struct request_queue *q,
- unsigned int max_active_zones)
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
- q->max_active_zones = max_active_zones;
+ return bdev->bd_disk->queue->limits.max_open_zones;
}
-static inline unsigned int queue_max_active_zones(const struct request_queue *q)
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
- return q->max_active_zones;
+ return bdev->bd_disk->queue->limits.max_active_zones;
}
+
#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{
return 0;
}
-static inline bool blk_queue_zone_is_seq(struct request_queue *q,
- sector_t sector)
-{
- return false;
-}
-static inline unsigned int blk_queue_zone_no(struct request_queue *q,
- sector_t sector)
+
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
}
-static inline unsigned int queue_max_open_zones(const struct request_queue *q)
+static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
{
- return 0;
+ return false;
}
-static inline unsigned int queue_max_active_zones(const struct request_queue *q)
+static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
return 0;
}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
-static inline bool rq_is_sync(struct request *rq)
-{
- return op_is_sync(rq->cmd_flags);
-}
-
-static inline bool rq_mergeable(struct request *rq)
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
- if (blk_rq_is_passthrough(rq))
- return false;
-
- if (req_op(rq) == REQ_OP_FLUSH)
- return false;
-
- if (req_op(rq) == REQ_OP_WRITE_ZEROES)
- return false;
-
- if (req_op(rq) == REQ_OP_ZONE_APPEND)
- return false;
-
- if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
- return false;
- if (rq->rq_flags & RQF_NOMERGE_FLAGS)
- return false;
-
- return true;
+ return 0;
}
-static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
- if (bio_page(a) == bio_page(b) &&
- bio_offset(a) == bio_offset(b))
- return true;
-
- return false;
+ return 0;
}
+#endif /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_depth(struct request_queue *q)
{
@@ -818,329 +699,234 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
return q->nr_requests;
}
-extern unsigned long blk_max_low_pfn, blk_max_pfn;
-
-/*
- * standard bounce addresses:
- *
- * BLK_BOUNCE_HIGH : bounce all highmem pages
- * BLK_BOUNCE_ANY : don't bounce anything
- * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
- */
-
-#if BITS_PER_LONG == 32
-#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
-#else
-#define BLK_BOUNCE_HIGH -1ULL
-#endif
-#define BLK_BOUNCE_ANY (-1ULL)
-#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
-
/*
* default timeout for SG_IO if none specified
*/
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
-struct rq_map_data {
- struct page **pages;
- int page_order;
- int nr_entries;
- unsigned long offset;
- int null_mapped;
- int from_user;
-};
-
-struct req_iterator {
- struct bvec_iter iter;
- struct bio *bio;
-};
-
/* This should not be used directly - use rq_for_each_segment */
#define for_each_bio(_bio) \
for (; _bio; _bio = _bio->bi_next)
-#define __rq_for_each_bio(_bio, rq) \
- if ((rq->bio)) \
- for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
-
-#define rq_for_each_segment(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_segment(bvl, _iter.bio, _iter.iter)
-
-#define rq_for_each_bvec(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
-#define rq_iter_last(bvec, _iter) \
- (_iter.bio->bi_next == NULL && \
- bio_iter_last(bvec, _iter.iter))
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-extern void rq_flush_dcache_pages(struct request *rq);
-#else
-static inline void rq_flush_dcache_pages(struct request *rq)
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
+static inline int __must_check add_disk(struct gendisk *disk)
{
+ return device_add_disk(NULL, disk, NULL);
}
-#endif
-
-extern int blk_register_queue(struct gendisk *disk);
-extern void blk_unregister_queue(struct gendisk *disk);
-blk_qc_t submit_bio_noacct(struct bio *bio);
-extern void blk_rq_init(struct request_queue *q, struct request *rq);
-extern void blk_put_request(struct request *);
-extern struct request *blk_get_request(struct request_queue *, unsigned int op,
- blk_mq_req_flags_t flags);
-extern int blk_lld_busy(struct request_queue *q);
-extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data);
-extern void blk_rq_unprep_clone(struct request *rq);
-extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
- struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
-extern void blk_queue_split(struct bio **);
-extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
-extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
- unsigned int, void __user *);
-extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- unsigned int, void __user *);
-extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- struct scsi_ioctl_command __user *);
-extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
-extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
+void del_gendisk(struct gendisk *gp);
+void invalidate_disk(struct gendisk *disk);
+void set_disk_ro(struct gendisk *disk, bool read_only);
+void disk_uevent(struct gendisk *disk, enum kobject_action action);
-extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
-extern void blk_queue_exit(struct request_queue *q);
-extern void blk_sync_queue(struct request_queue *q);
-extern int blk_rq_map_user(struct request_queue *, struct request *,
- struct rq_map_data *, void __user *, unsigned long,
- gfp_t);
-extern int blk_rq_unmap_user(struct bio *);
-extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct iov_iter *,
- gfp_t);
-extern void blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
-extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
- struct request *, int, rq_end_io_fn *);
-
-/* Helper to convert REQ_OP_XXX to its string format XXX */
-extern const char *blk_op_str(unsigned int op);
-
-int blk_status_to_errno(blk_status_t status);
-blk_status_t errno_to_blk_status(int errno);
-
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
-
-static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
+static inline int get_disk_ro(struct gendisk *disk)
{
- return bdev->bd_disk->queue; /* this is never NULL */
+ return disk->part0->bd_read_only ||
+ test_bit(GD_READ_ONLY, &disk->state);
}
-/*
- * The basic unit of block I/O is a sector. It is used in a number of contexts
- * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
- * bytes. Variables of type sector_t represent an offset or size that is a
- * multiple of 512 bytes. Hence these two constants.
- */
-#ifndef SECTOR_SHIFT
-#define SECTOR_SHIFT 9
-#endif
-#ifndef SECTOR_SIZE
-#define SECTOR_SIZE (1 << SECTOR_SHIFT)
-#endif
-
-/*
- * blk_rq_pos() : the current sector
- * blk_rq_bytes() : bytes left in the entire request
- * blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_err_bytes() : bytes left till the next error boundary
- * blk_rq_sectors() : sectors left in the entire request
- * blk_rq_cur_sectors() : sectors left in the current segment
- * blk_rq_stats_sectors() : sectors of the entire request used for stats
- */
-static inline sector_t blk_rq_pos(const struct request *rq)
-{
- return rq->__sector;
-}
-
-static inline unsigned int blk_rq_bytes(const struct request *rq)
+static inline int bdev_read_only(struct block_device *bdev)
{
- return rq->__data_len;
+ return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
}
-static inline int blk_rq_cur_bytes(const struct request *rq)
-{
- return rq->bio ? bio_cur_bytes(rq->bio) : 0;
-}
+bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
+void disk_force_media_change(struct gendisk *disk);
+void bdev_mark_dead(struct block_device *bdev, bool surprise);
-extern unsigned int blk_rq_err_bytes(const struct request *rq);
+void add_disk_randomness(struct gendisk *disk) __latent_entropy;
+void rand_initialize_disk(struct gendisk *disk);
-static inline unsigned int blk_rq_sectors(const struct request *rq)
+static inline sector_t get_start_sect(struct block_device *bdev)
{
- return blk_rq_bytes(rq) >> SECTOR_SHIFT;
+ return bdev->bd_start_sect;
}
-static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{
- return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+ return bdev->bd_nr_sectors;
}
-static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
+static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{
- return rq->stats_sectors;
+ return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
}
-#ifdef CONFIG_BLK_DEV_ZONED
-
-/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
-const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
-
-static inline unsigned int blk_rq_zone_no(struct request *rq)
+static inline sector_t get_capacity(struct gendisk *disk)
{
- return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
+ return bdev_nr_sectors(disk->part0);
}
-static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
{
- return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
+ return bdev_nr_sectors(sb->s_bdev) >>
+ (sb->s_blocksize_bits - SECTOR_SHIFT);
}
-#endif /* CONFIG_BLK_DEV_ZONED */
-/*
- * Some commands like WRITE SAME have a payload or data transfer size which
- * is different from the size of the request. Any driver that supports such
- * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
- * calculate the data transfer size.
+int bdev_disk_changed(struct gendisk *disk, bool invalidate);
+
+void put_disk(struct gendisk *disk);
+struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
+ struct lock_class_key *lkclass);
+
+/**
+ * blk_alloc_disk - allocate a gendisk structure
+ * @lim: queue limits to be used for this disk.
+ * @node_id: numa node to allocate on
+ *
+ * Allocate and pre-initialize a gendisk structure for use with BIO based
+ * drivers.
+ *
+ * Returns an ERR_PTR on error, else the allocated disk.
+ *
+ * Context: can sleep
*/
-static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+#define blk_alloc_disk(lim, node_id) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_alloc_disk(lim, node_id, &__key); \
+})
+
+int __register_blkdev(unsigned int major, const char *name,
+ void (*probe)(dev_t devt));
+#define register_blkdev(major, name) \
+ __register_blkdev(major, name, NULL)
+void unregister_blkdev(unsigned int major, const char *name);
+
+bool disk_check_media_change(struct gendisk *disk);
+void set_capacity(struct gendisk *disk, sector_t size);
+
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
+#else
+static inline int bd_link_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec.bv_len;
- return blk_rq_bytes(rq);
+ return 0;
}
-
-/*
- * Return the first full biovec in the request. The caller needs to check that
- * there are any bvecs before calling this helper.
- */
-static inline struct bio_vec req_bvec(struct request *rq)
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec;
- return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
}
+#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
-static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
- int op)
-{
- if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
- return min(q->limits.max_discard_sectors,
- UINT_MAX >> SECTOR_SHIFT);
+dev_t part_devt(struct gendisk *disk, u8 partno);
+void inc_diskseq(struct gendisk *disk);
+void blk_request_module(dev_t devt);
- if (unlikely(op == REQ_OP_WRITE_SAME))
- return q->limits.max_write_same_sectors;
+extern int blk_register_queue(struct gendisk *disk);
+extern void blk_unregister_queue(struct gendisk *disk);
+void submit_bio_noacct(struct bio *bio);
+struct bio *bio_split_to_limits(struct bio *bio);
- if (unlikely(op == REQ_OP_WRITE_ZEROES))
- return q->limits.max_write_zeroes_sectors;
+extern int blk_lld_busy(struct request_queue *q);
+extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
+extern void blk_queue_exit(struct request_queue *q);
+extern void blk_sync_queue(struct request_queue *q);
- return q->limits.max_sectors;
-}
+/* Helper to convert REQ_OP_XXX to its string format XXX */
+extern const char *blk_op_str(enum req_op op);
-/*
- * Return maximum size of a request at given offset. Only valid for
- * file system requests.
- */
-static inline unsigned int blk_max_size_offset(struct request_queue *q,
- sector_t offset)
-{
- if (!q->limits.chunk_sectors)
- return q->limits.max_sectors;
+int blk_status_to_errno(blk_status_t status);
+blk_status_t errno_to_blk_status(int errno);
+const char *blk_status_to_str(blk_status_t status);
- return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
- (offset & (q->limits.chunk_sectors - 1))));
-}
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT (1 << 0)
+int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
+int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags);
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
- sector_t offset)
+static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
- struct request_queue *q = rq->q;
-
- if (blk_rq_is_passthrough(rq))
- return q->limits.max_hw_sectors;
+ return bdev->bd_queue; /* this is never NULL */
+}
- if (!q->limits.chunk_sectors ||
- req_op(rq) == REQ_OP_DISCARD ||
- req_op(rq) == REQ_OP_SECURE_ERASE)
- return blk_queue_get_max_sectors(q, req_op(rq));
+/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
+const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
- return min(blk_max_size_offset(q, offset),
- blk_queue_get_max_sectors(q, req_op(rq)));
+static inline unsigned int bio_zone_no(struct bio *bio)
+{
+ return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
-static inline unsigned int blk_rq_count_bios(struct request *rq)
+static inline unsigned int bio_zone_is_seq(struct bio *bio)
{
- unsigned int nr_bios = 0;
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- nr_bios++;
-
- return nr_bios;
+ return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
-void blk_steal_bios(struct bio_list *list, struct request *rq);
-
/*
- * Request completion related functions.
- *
- * blk_update_request() completes given number of bytes and updates
- * the request without completing it.
+ * Return how much of the chunk is left to be used for I/O at a given offset.
*/
-extern bool blk_update_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
+static inline unsigned int blk_chunk_sectors_left(sector_t offset,
+ unsigned int chunk_sectors)
+{
+ if (unlikely(!is_power_of_2(chunk_sectors)))
+ return chunk_sectors - sector_div(offset, chunk_sectors);
+ return chunk_sectors - (offset & (chunk_sectors - 1));
+}
-extern void blk_abort_request(struct request *);
+/**
+ * queue_limits_start_update - start an atomic update of queue limits
+ * @q: queue to update
+ *
+ * This functions starts an atomic update of the queue limits. It takes a lock
+ * to prevent other updates and returns a snapshot of the current limits that
+ * the caller can modify. The caller must call queue_limits_commit_update()
+ * to finish the update.
+ *
+ * Context: process context. The caller must have frozen the queue or ensured
+ * that there is outstanding I/O by other means.
+ */
+static inline struct queue_limits
+queue_limits_start_update(struct request_queue *q)
+ __acquires(q->limits_lock)
+{
+ mutex_lock(&q->limits_lock);
+ return q->limits;
+}
+int queue_limits_commit_update(struct request_queue *q,
+ struct queue_limits *lim);
+int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
/*
* Access functions for manipulating queue properties
*/
-extern void blk_cleanup_queue(struct request_queue *);
-extern void blk_queue_bounce_limit(struct request_queue *, u64);
+void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_discard_segments(struct request_queue *,
unsigned short);
+void blk_queue_max_secure_erase_sectors(struct request_queue *q,
+ unsigned int max_sectors);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
-extern void blk_queue_max_write_same_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
unsigned int max_zone_append_sectors);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
+void blk_queue_zone_write_granularity(struct request_queue *q,
+ unsigned int size);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
+void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
-extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
-extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
- sector_t offset);
+void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
+ sector_t offset, const char *pfx);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
@@ -1148,51 +934,27 @@ extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
-extern void blk_queue_required_elevator_features(struct request_queue *q,
- unsigned int features);
-extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
- struct device *dev);
-/*
- * Number of physical segments as sent to the device.
- *
- * Normally this is the number of discontiguous data segments sent by the
- * submitter. But for data-less command like discard we might have no
- * actual data segments submitted, but the driver might have to add it's
- * own special payload. In that case we still return 1 here so that this
- * special payload will be mapped.
- */
-static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
-{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return 1;
- return rq->nr_phys_segments;
-}
+struct blk_independent_access_ranges *
+disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
+void disk_set_independent_access_ranges(struct gendisk *disk,
+ struct blk_independent_access_ranges *iars);
/*
- * Number of discard segments (or ranges) the driver needs to fill in.
- * Each discard bio merged into a request is counted as one segment.
+ * Elevator features for blk_queue_required_elevator_features:
*/
-static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
-{
- return max_t(unsigned short, rq->nr_phys_segments, 1);
-}
+/* Supports zoned block devices sequential write constraint */
+#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
-int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist, struct scatterlist **last_sg);
-static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist)
-{
- struct scatterlist *last_sg = NULL;
-
- return __blk_rq_map_sg(q, rq, sglist, &last_sg);
-}
-extern void blk_dump_rq_flags(struct request *, char *);
+extern void blk_queue_required_elevator_features(struct request_queue *q,
+ unsigned int features);
+extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
+ struct device *dev);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(int node_id);
extern void blk_put_queue(struct request_queue *);
-extern void blk_set_queue_dying(struct request_queue *);
+
+void blk_mark_disk_dead(struct gendisk *disk);
#ifdef CONFIG_BLOCK
/*
@@ -1203,19 +965,24 @@ extern void blk_set_queue_dying(struct request_queue *);
* as the lock contention for request_queue lock is reduced.
*
* It is ok not to disable preemption when adding the request to the plug list
- * or when attempting a merge, because blk_schedule_flush_list() will only flush
- * the plug list when the task sleeps by itself. For details, please see
- * schedule() where blk_schedule_flush_plug() is called.
+ * or when attempting a merge. For details, please see schedule() where
+ * blk_flush_plug() is called.
*/
struct blk_plug {
- struct list_head mq_list; /* blk-mq requests */
- struct list_head cb_list; /* md requires an unplug callback */
+ struct request *mq_list; /* blk-mq requests */
+
+ /* if ios_left is > 1, we can batch tag/rq allocations */
+ struct request *cached_rq;
+ u64 cur_ktime;
+ unsigned short nr_ios;
+
unsigned short rq_count;
+
bool multiple_queues;
- bool nowait;
+ bool has_elevator;
+
+ struct list_head cb_list; /* md requires an unplug callback */
};
-#define BLK_MAX_REQUEST_COUNT 16
-#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
struct blk_plug_cb;
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
@@ -1227,63 +994,56 @@ struct blk_plug_cb {
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size);
extern void blk_start_plug(struct blk_plug *);
+extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
-extern void blk_flush_plug_list(struct blk_plug *, bool);
-static inline void blk_flush_plug(struct task_struct *tsk)
+void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
- struct blk_plug *plug = tsk->plug;
-
if (plug)
- blk_flush_plug_list(plug, false);
+ __blk_flush_plug(plug, async);
}
-static inline void blk_schedule_flush_plug(struct task_struct *tsk)
+/*
+ * tsk == current here
+ */
+static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
if (plug)
- blk_flush_plug_list(plug, true);
-}
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
-{
- struct blk_plug *plug = tsk->plug;
-
- return plug &&
- (!list_empty(&plug->mq_list) ||
- !list_empty(&plug->cb_list));
+ plug->cur_ktime = 0;
+ current->flags &= ~PF_BLOCK_TS;
}
-int blkdev_issue_flush(struct block_device *, gfp_t);
+int blkdev_issue_flush(struct block_device *bdev);
long nr_blockdev_pages(void);
#else /* CONFIG_BLOCK */
struct blk_plug {
};
-static inline void blk_start_plug(struct blk_plug *plug)
+static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
+ unsigned short nr_ios)
{
}
-static inline void blk_finish_plug(struct blk_plug *plug)
+static inline void blk_start_plug(struct blk_plug *plug)
{
}
-static inline void blk_flush_plug(struct task_struct *task)
+static inline void blk_finish_plug(struct blk_plug *plug)
{
}
-static inline void blk_schedule_flush_plug(struct task_struct *task)
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
}
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{
- return false;
}
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
+static inline int blkdev_issue_flush(struct block_device *bdev)
{
return 0;
}
@@ -1296,16 +1056,12 @@ static inline long nr_blockdev_pages(void)
extern void blk_io_schedule(void);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
-
-#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
-
-extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
-extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int flags,
- struct bio **biop);
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
+int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp);
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
@@ -1324,7 +1080,7 @@ static inline int sb_issue_discard(struct super_block *sb, sector_t block,
SECTOR_SHIFT),
nr_blocks << (sb->s_blocksize_bits -
SECTOR_SHIFT),
- gfp_mask, flags);
+ gfp_mask);
}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
@@ -1337,16 +1093,27 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
gfp_mask, 0);
}
-extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
+static inline bool bdev_is_partition(struct block_device *bdev)
+{
+ return bdev->bd_partno;
+}
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
- BLK_DEF_MAX_SECTORS = 2560,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
+/*
+ * Default upper limit for the software max_sectors limit used for
+ * regular file system I/O. This can be increased through sysfs.
+ *
+ * Not to be confused with the max_hw_sector limit that is entirely
+ * controlled by the driver, usually based on hardware limits.
+ */
+#define BLK_DEF_MAX_SECTORS_CAP 2560u
+
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
return q->limits.seg_boundary_mask;
@@ -1362,6 +1129,11 @@ static inline unsigned int queue_max_sectors(const struct request_queue *q)
return q->limits.max_sectors;
}
+static inline unsigned int queue_max_bytes(struct request_queue *q)
+{
+ return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
+}
+
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{
return q->limits.max_hw_sectors;
@@ -1384,7 +1156,21 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
{
- return q->limits.max_zone_append_sectors;
+
+ const struct queue_limits *l = &q->limits;
+
+ return min(l->max_zone_append_sectors, l->max_sectors);
+}
+
+static inline unsigned int
+bdev_max_zone_append_sectors(struct block_device *bdev)
+{
+ return queue_max_zone_append_sectors(bdev_get_queue(bdev));
+}
+
+static inline unsigned int bdev_max_segments(struct block_device *bdev)
+{
+ return queue_max_segments(bdev_get_queue(bdev));
}
static inline unsigned queue_logical_block_size(const struct request_queue *q)
@@ -1432,85 +1218,35 @@ static inline int bdev_io_opt(struct block_device *bdev)
return queue_io_opt(bdev_get_queue(bdev));
}
-static inline int queue_alignment_offset(const struct request_queue *q)
+static inline unsigned int
+queue_zone_write_granularity(const struct request_queue *q)
{
- if (q->limits.misaligned)
- return -1;
-
- return q->limits.alignment_offset;
+ return q->limits.zone_write_granularity;
}
-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+static inline unsigned int
+bdev_zone_write_granularity(struct block_device *bdev)
{
- unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
- << SECTOR_SHIFT;
-
- return (granularity + lim->alignment_offset - alignment) % granularity;
+ return queue_zone_write_granularity(bdev_get_queue(bdev));
}
-static inline int bdev_alignment_offset(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q->limits.misaligned)
- return -1;
-
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->alignment_offset;
+int bdev_alignment_offset(struct block_device *bdev);
+unsigned int bdev_discard_alignment(struct block_device *bdev);
- return q->limits.alignment_offset;
-}
-
-static inline int queue_discard_alignment(const struct request_queue *q)
+static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
{
- if (q->limits.discard_misaligned)
- return -1;
-
- return q->limits.discard_alignment;
+ return bdev_get_queue(bdev)->limits.max_discard_sectors;
}
-static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
+static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- unsigned int alignment, granularity, offset;
-
- if (!lim->max_discard_sectors)
- return 0;
-
- /* Why are these in bytes, not sectors? */
- alignment = lim->discard_alignment >> SECTOR_SHIFT;
- granularity = lim->discard_granularity >> SECTOR_SHIFT;
- if (!granularity)
- return 0;
-
- /* Offset of the partition start in 'granularity' sectors */
- offset = sector_div(sector, granularity);
-
- /* And why do we do this modulus *again* in blkdev_issue_discard()? */
- offset = (granularity + alignment - offset) % granularity;
-
- /* Turn it back into bytes, gaah */
- return offset << SECTOR_SHIFT;
+ return bdev_get_queue(bdev)->limits.discard_granularity;
}
-static inline int bdev_discard_alignment(struct block_device *bdev)
+static inline unsigned int
+bdev_max_secure_erase_sectors(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->discard_alignment;
-
- return q->limits.discard_alignment;
-}
-
-static inline unsigned int bdev_write_same(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_same_sectors;
-
- return 0;
+ return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
}
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
@@ -1523,395 +1259,188 @@ static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
return 0;
}
-static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
+static inline bool bdev_nonrot(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_zoned_model(q);
-
- return BLK_ZONED_NONE;
+ return blk_queue_nonrot(bdev_get_queue(bdev));
}
-static inline bool bdev_is_zoned(struct block_device *bdev)
+static inline bool bdev_synchronous(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_is_zoned(q);
-
- return false;
+ return test_bit(QUEUE_FLAG_SYNCHRONOUS,
+ &bdev_get_queue(bdev)->queue_flags);
}
-static inline sector_t bdev_zone_sectors(struct block_device *bdev)
+static inline bool bdev_stable_writes(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_zone_sectors(q);
- return 0;
+ return test_bit(QUEUE_FLAG_STABLE_WRITES,
+ &bdev_get_queue(bdev)->queue_flags);
}
-static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
+static inline bool bdev_write_cache(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return queue_max_open_zones(q);
- return 0;
-}
-
-static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return queue_max_active_zones(q);
- return 0;
+ return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
}
-static inline int queue_dma_alignment(const struct request_queue *q)
+static inline bool bdev_fua(struct block_device *bdev)
{
- return q ? q->dma_alignment : 511;
+ return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
}
-static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
- unsigned int len)
+static inline bool bdev_nowait(struct block_device *bdev)
{
- unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- return !(addr & alignment) && !(len & alignment);
+ return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
}
-/* assumes size > 256 */
-static inline unsigned int blksize_bits(unsigned int size)
+static inline bool bdev_is_zoned(struct block_device *bdev)
{
- unsigned int bits = 8;
- do {
- bits++;
- size >>= 1;
- } while (size > 256);
- return bits;
+ return blk_queue_is_zoned(bdev_get_queue(bdev));
}
-static inline unsigned int block_size(struct block_device *bdev)
+static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
{
- return 1 << bdev->bd_inode->i_blkbits;
+ return disk_zone_no(bdev->bd_disk, sec);
}
-int kblockd_schedule_work(struct work_struct *work);
-int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
-
-#define MODULE_ALIAS_BLOCKDEV(major,minor) \
- MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
-#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
- MODULE_ALIAS("block-major-" __stringify(major) "-*")
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-enum blk_integrity_flags {
- BLK_INTEGRITY_VERIFY = 1 << 0,
- BLK_INTEGRITY_GENERATE = 1 << 1,
- BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
- BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
-};
-
-struct blk_integrity_iter {
- void *prot_buf;
- void *data_buf;
- sector_t seed;
- unsigned int data_size;
- unsigned short interval;
- const char *disk_name;
-};
-
-typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
-typedef void (integrity_prepare_fn) (struct request *);
-typedef void (integrity_complete_fn) (struct request *, unsigned int);
-
-struct blk_integrity_profile {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
- integrity_prepare_fn *prepare_fn;
- integrity_complete_fn *complete_fn;
- const char *name;
-};
-
-extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
-extern void blk_integrity_unregister(struct gendisk *);
-extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
-extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
-extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
- struct request *);
-extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
- struct bio *);
-
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+/* Whether write serialization is required for @op on zoned devices. */
+static inline bool op_needs_zoned_write_locking(enum req_op op)
{
- struct blk_integrity *bi = &disk->queue->integrity;
-
- if (!bi->profile)
- return NULL;
-
- return bi;
+ return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
}
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
+ enum req_op op)
{
- return blk_get_integrity(bdev->bd_disk);
+ return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op);
}
-static inline bool
-blk_integrity_queue_supports_integrity(struct request_queue *q)
+static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
- return q->integrity.profile;
-}
+ struct request_queue *q = bdev_get_queue(bdev);
-static inline bool blk_integrity_rq(struct request *rq)
-{
- return rq->cmd_flags & REQ_INTEGRITY;
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return q->limits.chunk_sectors;
}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
+static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- q->limits.max_integrity_segments = segs;
+ return sector & (bdev_zone_sectors(bdev) - 1);
}
-static inline unsigned short
-queue_max_integrity_segments(const struct request_queue *q)
+static inline bool bdev_is_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- return q->limits.max_integrity_segments;
+ return bdev_offset_from_zone_start(bdev, sector) == 0;
}
-/**
- * bio_integrity_intervals - Return number of integrity intervals for a bio
- * @bi: blk_integrity profile for device
- * @sectors: Size of the bio in 512-byte sectors
- *
- * Description: The block layer calculates everything in 512 byte
- * sectors but integrity metadata is done in terms of the data integrity
- * interval size of the storage device. Convert the block layer sectors
- * to the appropriate number of integrity intervals.
- */
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
+static inline int queue_dma_alignment(const struct request_queue *q)
{
- return sectors >> (bi->interval_exp - 9);
+ return q ? q->limits.dma_alignment : 511;
}
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
+static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{
- return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+ return queue_dma_alignment(bdev_get_queue(bdev));
}
-/*
- * Return the first bvec that contains integrity data. Only drivers that are
- * limited to a single integrity segment should use this helper.
- */
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+static inline bool bdev_iter_is_aligned(struct block_device *bdev,
+ struct iov_iter *iter)
{
- if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
- return NULL;
- return rq->bio->bi_integrity->bip_vec;
+ return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
+ bdev_logical_block_size(bdev) - 1);
}
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-
-struct bio;
-struct block_device;
-struct gendisk;
-struct blk_integrity;
-
-static inline int blk_integrity_rq(struct request *rq)
-{
- return 0;
-}
-static inline int blk_rq_count_integrity_sg(struct request_queue *q,
- struct bio *b)
-{
- return 0;
-}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
- struct scatterlist *s)
-{
- return 0;
-}
-static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
-{
- return NULL;
-}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
-{
- return NULL;
-}
-static inline bool
-blk_integrity_queue_supports_integrity(struct request_queue *q)
-{
- return false;
-}
-static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
-{
- return 0;
-}
-static inline void blk_integrity_register(struct gendisk *d,
- struct blk_integrity *b)
-{
-}
-static inline void blk_integrity_unregister(struct gendisk *d)
-{
-}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
-}
-static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
-{
- return 0;
-}
-static inline bool blk_integrity_merge_rq(struct request_queue *rq,
- struct request *r1,
- struct request *r2)
-{
- return true;
-}
-static inline bool blk_integrity_merge_bio(struct request_queue *rq,
- struct request *r,
- struct bio *b)
+static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
+ unsigned int len)
{
- return true;
+ unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ return !(addr & alignment) && !(len & alignment);
}
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
+/* assumes size > 256 */
+static inline unsigned int blksize_bits(unsigned int size)
{
- return 0;
+ return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
}
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
+static inline unsigned int block_size(struct block_device *bdev)
{
- return 0;
+ return 1 << bdev->bd_inode->i_blkbits;
}
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
-{
- return NULL;
-}
+int kblockd_schedule_work(struct work_struct *work);
+int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+#define MODULE_ALIAS_BLOCKDEV(major,minor) \
+ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
+#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
+ MODULE_ALIAS("block-major-" __stringify(major) "-*")
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
-
-void blk_ksm_unregister(struct request_queue *q);
+bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q);
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
- struct request_queue *q)
+static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q)
{
return true;
}
-static inline void blk_ksm_unregister(struct request_queue *q) { }
-
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+enum blk_unique_id {
+ /* these match the Designator Types specified in SPC */
+ BLK_UID_T10 = 1,
+ BLK_UID_EUI64 = 2,
+ BLK_UID_NAA = 3,
+};
struct block_device_operations {
- blk_qc_t (*submit_bio) (struct bio *bio);
- int (*open) (struct block_device *, fmode_t);
- void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ void (*submit_bio)(struct bio *bio);
+ int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
+ unsigned int flags);
+ int (*open)(struct gendisk *disk, blk_mode_t mode);
+ void (*release)(struct gendisk *disk);
+ int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
+ int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
void (*unlock_native_capacity) (struct gendisk *);
- int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
+ int (*set_read_only)(struct block_device *bdev, bool ro);
+ void (*free_disk)(struct gendisk *disk);
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
char *(*devnode)(struct gendisk *disk, umode_t *mode);
+ /* returns the length of the identifier or a negative errno: */
+ int (*get_unique_id)(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id id_type);
struct module *owner;
const struct pr_ops *pr_ops;
+
+ /*
+ * Special callback for probing GPT entry at a given sector.
+ * Needed by Android devices, used by GPT scanner and MMC blk
+ * driver.
+ */
+ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
};
#ifdef CONFIG_COMPAT
-extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
+extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
unsigned int, unsigned long);
#else
#define blkdev_compat_ptr_ioctl NULL
#endif
-extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
- unsigned long);
-extern int bdev_read_page(struct block_device *, sector_t, struct page *);
-extern int bdev_write_page(struct block_device *, sector_t, struct page *,
- struct writeback_control *);
-
-#ifdef CONFIG_BLK_DEV_ZONED
-bool blk_req_needs_zone_write_lock(struct request *rq);
-bool blk_req_zone_write_trylock(struct request *rq);
-void __blk_req_zone_write_lock(struct request *rq);
-void __blk_req_zone_write_unlock(struct request *rq);
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
- if (blk_req_needs_zone_write_lock(rq))
- __blk_req_zone_write_lock(rq);
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
- if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
- __blk_req_zone_write_unlock(rq);
-}
-
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return rq->q->seq_zones_wlock &&
- test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- if (!blk_req_needs_zone_write_lock(rq))
- return true;
- return !blk_req_zone_is_write_locked(rq);
-}
-#else
-static inline bool blk_req_needs_zone_write_lock(struct request *rq)
-{
- return false;
-}
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
-}
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return false;
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- return true;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
static inline void blk_wake_io_task(struct task_struct *waiter)
{
/*
@@ -1925,37 +1454,29 @@ static inline void blk_wake_io_task(struct task_struct *waiter)
wake_up_process(waiter);
}
-unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
- unsigned int op);
-void disk_end_io_acct(struct gendisk *disk, unsigned int op,
- unsigned long start_time);
+unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned long start_time);
+void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned int sectors, unsigned long start_time);
-/**
- * bio_start_io_acct - start I/O accounting for bio based drivers
- * @bio: bio to start account for
- *
- * Returns the start time that should be passed back to bio_end_io_acct().
- */
-static inline unsigned long bio_start_io_acct(struct bio *bio)
-{
- return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio));
-}
+unsigned long bio_start_io_acct(struct bio *bio);
+void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
+ struct block_device *orig_bdev);
/**
* bio_end_io_acct - end I/O accounting for bio based drivers
* @bio: bio to end account for
- * @start: start time returned by bio_start_io_acct()
+ * @start_time: start time returned by bio_start_io_acct()
*/
static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
- return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time);
+ return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
int bdev_read_only(struct block_device *bdev);
int set_blocksize(struct block_device *bdev, int size);
-const char *bdevname(struct block_device *bdev, char *buffer);
-struct block_device *lookup_bdev(const char *);
+int lookup_bdev(const char *pathname, dev_t *dev);
void blkdev_show(struct seq_file *seqf, off_t offset);
@@ -1967,24 +1488,74 @@ void blkdev_show(struct seq_file *seqf, off_t offset);
#define BLKDEV_MAJOR_MAX 0
#endif
-int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
-struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder);
-struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
-int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
- void *holder);
-void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
- void *holder);
-void blkdev_put(struct block_device *bdev, fmode_t mode);
+struct blk_holder_ops {
+ void (*mark_dead)(struct block_device *bdev, bool surprise);
+
+ /*
+ * Sync the file system mounted on the block device.
+ */
+ void (*sync)(struct block_device *bdev);
+
+ /*
+ * Freeze the file system mounted on the block device.
+ */
+ int (*freeze)(struct block_device *bdev);
+
+ /*
+ * Thaw the file system mounted on the block device.
+ */
+ int (*thaw)(struct block_device *bdev);
+
+ /*
+ * If needed, get a reference to the holder.
+ */
+ void (*get_holder)(void *holder);
+
+ /*
+ * Release the holder.
+ */
+ void (*put_holder)(void *holder);
+};
+
+/*
+ * For filesystems using @fs_holder_ops, the @holder argument passed to
+ * helpers used to open and claim block devices via
+ * bd_prepare_to_claim() must point to a superblock.
+ */
+extern const struct blk_holder_ops fs_holder_ops;
+
+/*
+ * Return the correct open flags for blkdev_get_by_* for super block flags
+ * as stored in sb->s_flags.
+ */
+#define sb_open_mode(flags) \
+ (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
+ (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+
+struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops);
+struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops);
+int bd_prepare_to_claim(struct block_device *bdev, void *holder,
+ const struct blk_holder_ops *hops);
+void bd_abort_claiming(struct block_device *bdev, void *holder);
+
+/* just for blk-cgroup, don't use elsewhere */
+struct block_device *blkdev_get_no_open(dev_t dev);
+void blkdev_put_no_open(struct block_device *bdev);
struct block_device *I_BDEV(struct inode *inode);
-struct block_device *bdget(dev_t);
-struct block_device *bdgrab(struct block_device *bdev);
-void bdput(struct block_device *);
+struct block_device *file_bdev(struct file *bdev_file);
#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
int sync_blockdev(struct block_device *bdev);
+int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
+int sync_blockdev_nowait(struct block_device *bdev);
+void sync_bdevs(bool wait);
+void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
+void printk_all_partitions(void);
+int __init early_lookup_bdev(const char *pathname, dev_t *dev);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
@@ -1993,10 +1564,34 @@ static inline int sync_blockdev(struct block_device *bdev)
{
return 0;
}
-#endif
-int fsync_bdev(struct block_device *bdev);
+static inline int sync_blockdev_nowait(struct block_device *bdev)
+{
+ return 0;
+}
+static inline void sync_bdevs(bool wait)
+{
+}
+static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
+{
+}
+static inline void printk_all_partitions(void)
+{
+}
+static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BLOCK */
+
+int bdev_freeze(struct block_device *bdev);
+int bdev_thaw(struct block_device *bdev);
+
+struct io_comp_batch {
+ struct request *req_list;
+ bool need_ts;
+ void (*complete)(struct io_comp_batch *);
+};
-struct super_block *freeze_bdev(struct block_device *bdev);
-int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3b6ff5902edc..122c62e561fc 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -2,11 +2,12 @@
#ifndef BLKTRACE_H
#define BLKTRACE_H
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/relay.h>
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
#include <linux/list.h>
+#include <linux/blk_types.h>
#if defined(CONFIG_BLK_DEV_IO_TRACE)
@@ -23,18 +24,14 @@ struct blk_trace {
u32 pid;
u32 dev;
struct dentry *dir;
- struct dentry *dropped_file;
- struct dentry *msg_file;
struct list_head running_list;
atomic_t dropped;
};
-struct blkcg;
-
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern __printf(3, 4)
-void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...);
+__printf(3, 4) void __blk_trace_note_message(struct blk_trace *bt,
+ struct cgroup_subsys_state *css, const char *fmt, ...);
/**
* blk_add_trace_msg - Add a (simple) message to the blktrace stream
@@ -49,14 +46,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
* NOTE: Can not use 'static inline' due to presence of var args...
*
**/
-#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
+#define blk_add_cgroup_trace_msg(q, css, fmt, ...) \
do { \
struct blk_trace *bt; \
\
rcu_read_lock(); \
bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
- __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
+ __blk_trace_note_message(bt, css, fmt, ##__VA_ARGS__);\
rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
@@ -75,34 +72,27 @@ static inline bool blk_trace_note_message_enabled(struct request_queue *q)
return ret;
}
-extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
- void *data, size_t len);
+extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct block_device *bdev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
-extern void blk_trace_remove_sysfs(struct device *dev);
-extern int blk_trace_init_sysfs(struct device *dev);
-
-extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
# define blk_trace_shutdown(q) do { } while (0)
-# define blk_add_driver_data(q, rq, data, len) do {} while (0)
+# define blk_add_driver_data(rq, data, len) do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
# define blk_trace_startstop(q, start) (-ENOTTY)
-# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
-# define blk_trace_remove_sysfs(dev) do { } while (0)
# define blk_trace_note_message_enabled(q) (false)
-static inline int blk_trace_init_sysfs(struct device *dev)
+
+static inline int blk_trace_remove(struct request_queue *q)
{
- return 0;
+ return -ENOTTY;
}
-
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_COMPAT
@@ -120,7 +110,7 @@ struct compat_blk_user_trace_setup {
#endif
-extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
+void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{
diff --git a/include/linux/bma150.h b/include/linux/bma150.h
index 31c9e323a391..4d4a62d49341 100644
--- a/include/linux/bma150.h
+++ b/include/linux/bma150.h
@@ -33,8 +33,8 @@ struct bma150_cfg {
unsigned char lg_hyst; /* Low-G hysterisis */
unsigned char lg_dur; /* Low-G duration */
unsigned char lg_thres; /* Low-G threshold */
- unsigned char range; /* one of BMA0150_RANGE_xxx */
- unsigned char bandwidth; /* one of BMA0150_BW_xxx */
+ unsigned char range; /* one of BMA150_RANGE_xxx */
+ unsigned char bandwidth; /* one of BMA150_BW_xxx */
};
struct bma150_platform_data {
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index 9903088891fa..ca73940e26df 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -7,18 +7,51 @@
* Author: Masami Hiramatsu <mhiramat@kernel.org>
*/
+#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/types.h>
+#else /* !__KERNEL__ */
+/*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+ * run the parser sanity test.
+ * This does NOT mean linux/bootconfig.h is available in the user space.
+ * However, if you change this file, please make sure the tools/bootconfig
+ * has no issue on building and running.
+ */
+#endif
#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
#define BOOTCONFIG_MAGIC_LEN 12
+#define BOOTCONFIG_ALIGN_SHIFT 2
+#define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT)
+#define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1)
+
+/**
+ * xbc_calc_checksum() - Calculate checksum of bootconfig
+ * @data: Bootconfig data.
+ * @size: The size of the bootconfig data.
+ *
+ * Calculate the checksum value of the bootconfig data.
+ * The checksum will be used with the BOOTCONFIG_MAGIC and the size for
+ * embedding the bootconfig in the initrd image.
+ */
+static inline __init uint32_t xbc_calc_checksum(void *data, uint32_t size)
+{
+ unsigned char *p = data;
+ uint32_t ret = 0;
+
+ while (size--)
+ ret += *p++;
+
+ return ret;
+}
/* XBC tree node */
struct xbc_node {
- u16 next;
- u16 child;
- u16 parent;
- u16 data;
+ uint16_t next;
+ uint16_t child;
+ uint16_t parent;
+ uint16_t data;
} __attribute__ ((__packed__));
#define XBC_KEY 0
@@ -26,7 +59,7 @@ struct xbc_node {
/* Maximum size of boot config is 32KB - 1 */
#define XBC_DATA_MAX (XBC_VALUE - 1)
-#define XBC_NODE_MAX 1024
+#define XBC_NODE_MAX 8192
#define XBC_KEYLEN_MAX 256
#define XBC_DEPTH_MAX 16
@@ -68,7 +101,7 @@ static inline __init bool xbc_node_is_key(struct xbc_node *node)
*/
static inline __init bool xbc_node_is_array(struct xbc_node *node)
{
- return xbc_node_is_value(node) && node->next != 0;
+ return xbc_node_is_value(node) && node->child != 0;
}
/**
@@ -77,6 +110,8 @@ static inline __init bool xbc_node_is_array(struct xbc_node *node)
*
* Test the @node is a leaf key node which is a key node and has a value node
* or no child. Returns true if it is a leaf node, or false if not.
+ * Note that the leaf node can have subkey nodes in addition to the
+ * value node.
*/
static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
{
@@ -85,7 +120,7 @@ static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
}
/* Tree-based key-value access APIs */
-struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent,
+struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent,
const char *key);
const char * __init xbc_node_find_value(struct xbc_node *parent,
@@ -123,7 +158,24 @@ xbc_find_value(const char *key, struct xbc_node **vnode)
*/
static inline struct xbc_node * __init xbc_find_node(const char *key)
{
- return xbc_node_find_child(NULL, key);
+ return xbc_node_find_subkey(NULL, key);
+}
+
+/**
+ * xbc_node_get_subkey() - Return the first subkey node if exists
+ * @node: Parent node
+ *
+ * Return the first subkey node of the @node. If the @node has no child
+ * or only value node, this will return NULL.
+ */
+static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node)
+{
+ struct xbc_node *child = xbc_node_get_child(node);
+
+ if (child && xbc_node_is_value(child))
+ return xbc_node_get_next(child);
+ else
+ return child;
}
/**
@@ -137,7 +189,7 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
*/
#define xbc_array_for_each_value(anode, value) \
for (value = xbc_node_get_data(anode); anode != NULL ; \
- anode = xbc_node_get_next(anode), \
+ anode = xbc_node_get_child(anode), \
value = anode ? xbc_node_get_data(anode) : NULL)
/**
@@ -146,12 +198,25 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
* @child: Iterated XBC node.
*
* Iterate child nodes of @parent. Each child nodes are stored to @child.
+ * The @child can be mixture of a value node and subkey nodes.
*/
#define xbc_node_for_each_child(parent, child) \
for (child = xbc_node_get_child(parent); child != NULL ; \
child = xbc_node_get_next(child))
/**
+ * xbc_node_for_each_subkey() - Iterate child subkey nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate subkey nodes of @parent. Each child nodes are stored to @child.
+ * The @child is only the subkey node.
+ */
+#define xbc_node_for_each_subkey(parent, child) \
+ for (child = xbc_node_get_subkey(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
* xbc_node_for_each_array_value() - Iterate array entries of geven key
* @node: An XBC node.
* @key: A key string searched under @node
@@ -159,16 +224,16 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
* @value: Iterated value of array entry.
*
* Iterate array entries of given @key under @node. Each array entry node
- * is stroed to @anode and @value. If the @node doesn't have @key node,
+ * is stored to @anode and @value. If the @node doesn't have @key node,
* it does nothing.
* Note that even if the found key node has only one value (not array)
- * this executes block once. Hoever, if the found key node has no value
+ * this executes block once. However, if the found key node has no value
* (key-only node), this does nothing. So don't use this for testing the
* key-value pair existence.
*/
#define xbc_node_for_each_array_value(node, key, anode, value) \
for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
- anode = xbc_node_get_next(anode), \
+ anode = xbc_node_get_child(anode), \
value = anode ? xbc_node_get_data(anode) : NULL)
/**
@@ -216,13 +281,22 @@ static inline int __init xbc_node_compose_key(struct xbc_node *node,
}
/* XBC node initializer */
-int __init xbc_init(char *buf, const char **emsg, int *epos);
+int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
+/* XBC node and size information */
+int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
-void __init xbc_destroy_all(void);
+void __init xbc_exit(void);
-/* Debug dump functions */
-void __init xbc_debug_dump(void);
+/* XBC embedded bootconfig data in kernel */
+#ifdef CONFIG_BOOT_CONFIG_EMBED
+const char * __init xbc_get_embedded_bootconfig(size_t *size);
+#else
+static inline const char *xbc_get_embedded_bootconfig(size_t *size)
+{
+ return NULL;
+}
+#endif
#endif
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
new file mode 100644
index 000000000000..cffa38a73618
--- /dev/null
+++ b/include/linux/bootmem_info.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BOOTMEM_INFO_H
+#define __LINUX_BOOTMEM_INFO_H
+
+#include <linux/mm.h>
+#include <linux/kmemleak.h>
+
+/*
+ * Types for free bootmem stored in page->lru.next. These have to be in
+ * some random range in unsigned long space for debugging purposes.
+ */
+enum {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+ SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
+ MIX_SECTION_INFO,
+ NODE_INFO,
+ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
+};
+
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
+void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
+
+void get_page_bootmem(unsigned long info, struct page *page,
+ unsigned long type);
+void put_page_bootmem(struct page *page);
+
+/*
+ * Any memory allocated via the memblock allocator and not via the
+ * buddy will be marked reserved already in the memmap. For those
+ * pages, we can call this function to free it to buddy allocator.
+ */
+static inline void free_bootmem_page(struct page *page)
+{
+ unsigned long magic = page->index;
+
+ /*
+ * The reserve_bootmem_region sets the reserved flag on bootmem
+ * pages.
+ */
+ VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
+
+ if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
+ put_page_bootmem(page);
+ else
+ VM_BUG_ON_PAGE(1, page);
+}
+#else
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
+static inline void put_page_bootmem(struct page *page)
+{
+}
+
+static inline void get_page_bootmem(unsigned long info, struct page *page,
+ unsigned long type)
+{
+}
+
+static inline void free_bootmem_page(struct page *page)
+{
+ kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
+ free_reserved_page(page);
+}
+#endif
+
+#endif /* __LINUX_BOOTMEM_INFO_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index a19519f4241d..fc53e0ad56d9 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,9 +2,10 @@
#ifndef _LINUX_BH_H
#define _LINUX_BH_H
+#include <linux/instruction_pointer.h>
#include <linux/preempt.h>
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS)
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
@@ -32,4 +33,10 @@ static inline void local_bh_enable(void)
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
+#ifdef CONFIG_PREEMPT_RT
+extern bool local_bh_blocked(void);
+#else
+static inline bool local_bh_blocked(void) { return false; }
+#endif
+
#endif /* _LINUX_BH_H */
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
new file mode 100644
index 000000000000..0985221d5478
--- /dev/null
+++ b/include/linux/bpf-cgroup-defs.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_CGROUP_DEFS_H
+#define _BPF_CGROUP_DEFS_H
+
+#ifdef CONFIG_CGROUP_BPF
+
+#include <linux/list.h>
+#include <linux/percpu-refcount.h>
+#include <linux/workqueue.h>
+
+struct bpf_prog_array;
+
+#ifdef CONFIG_BPF_LSM
+/* Maximum number of concurrently attachable per-cgroup LSM hooks. */
+#define CGROUP_LSM_NUM 10
+#else
+#define CGROUP_LSM_NUM 0
+#endif
+
+enum cgroup_bpf_attach_type {
+ CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
+ CGROUP_INET_INGRESS = 0,
+ CGROUP_INET_EGRESS,
+ CGROUP_INET_SOCK_CREATE,
+ CGROUP_SOCK_OPS,
+ CGROUP_DEVICE,
+ CGROUP_INET4_BIND,
+ CGROUP_INET6_BIND,
+ CGROUP_INET4_CONNECT,
+ CGROUP_INET6_CONNECT,
+ CGROUP_UNIX_CONNECT,
+ CGROUP_INET4_POST_BIND,
+ CGROUP_INET6_POST_BIND,
+ CGROUP_UDP4_SENDMSG,
+ CGROUP_UDP6_SENDMSG,
+ CGROUP_UNIX_SENDMSG,
+ CGROUP_SYSCTL,
+ CGROUP_UDP4_RECVMSG,
+ CGROUP_UDP6_RECVMSG,
+ CGROUP_UNIX_RECVMSG,
+ CGROUP_GETSOCKOPT,
+ CGROUP_SETSOCKOPT,
+ CGROUP_INET4_GETPEERNAME,
+ CGROUP_INET6_GETPEERNAME,
+ CGROUP_UNIX_GETPEERNAME,
+ CGROUP_INET4_GETSOCKNAME,
+ CGROUP_INET6_GETSOCKNAME,
+ CGROUP_UNIX_GETSOCKNAME,
+ CGROUP_INET_SOCK_RELEASE,
+ CGROUP_LSM_START,
+ CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1,
+ MAX_CGROUP_BPF_ATTACH_TYPE
+};
+
+struct cgroup_bpf {
+ /* array of effective progs in this cgroup */
+ struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* attached progs to this cgroup and attach flags
+ * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
+ * have either zero or one element
+ * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
+ */
+ struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* list of cgroup shared storages */
+ struct list_head storages;
+
+ /* temp storage for effective prog array used by prog_attach/detach */
+ struct bpf_prog_array *inactive;
+
+ /* reference counter used to detach bpf programs after cgroup removal */
+ struct percpu_ref refcnt;
+
+ /* cgroup_bpf is released using a work queue */
+ struct work_struct release_work;
+};
+
+#else /* CONFIG_CGROUP_BPF */
+struct cgroup_bpf {};
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 64f367044e25..fb3c3e7181e6 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -3,11 +3,12 @@
#define _BPF_CGROUP_H
#include <linux/bpf.h>
+#include <linux/bpf-cgroup-defs.h>
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
-#include <linux/percpu-refcount.h>
#include <linux/rbtree.h>
+#include <net/sock.h>
#include <uapi/linux/bpf.h>
struct sock;
@@ -20,14 +21,61 @@ struct bpf_sock_ops_kern;
struct bpf_cgroup_storage;
struct ctl_table;
struct ctl_table_header;
+struct task_struct;
+
+unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
+ const struct bpf_insn *insn);
#ifdef CONFIG_CGROUP_BPF
-extern struct static_key_false cgroup_bpf_enabled_key;
-#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+#define CGROUP_ATYPE(type) \
+ case BPF_##type: return type
+
+static inline enum cgroup_bpf_attach_type
+to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ CGROUP_ATYPE(CGROUP_INET_INGRESS);
+ CGROUP_ATYPE(CGROUP_INET_EGRESS);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
+ CGROUP_ATYPE(CGROUP_SOCK_OPS);
+ CGROUP_ATYPE(CGROUP_DEVICE);
+ CGROUP_ATYPE(CGROUP_INET4_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_BIND);
+ CGROUP_ATYPE(CGROUP_INET4_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+ CGROUP_ATYPE(CGROUP_UNIX_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
+ CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_SENDMSG);
+ CGROUP_ATYPE(CGROUP_SYSCTL);
+ CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_RECVMSG);
+ CGROUP_ATYPE(CGROUP_GETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_SETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+ default:
+ return CGROUP_BPF_ATTACH_TYPE_INVALID;
+ }
+}
-DECLARE_PER_CPU(struct bpf_cgroup_storage*,
- bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
+#undef CGROUP_ATYPE
+
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
#define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
@@ -59,94 +107,54 @@ struct bpf_cgroup_link {
};
struct bpf_prog_list {
- struct list_head node;
+ struct hlist_node node;
struct bpf_prog *prog;
struct bpf_cgroup_link *link;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
-struct bpf_prog_array;
-
-struct cgroup_bpf {
- /* array of effective progs in this cgroup */
- struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
-
- /* attached progs to this cgroup and attach flags
- * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
- * have either zero or one element
- * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
- */
- struct list_head progs[MAX_BPF_ATTACH_TYPE];
- u32 flags[MAX_BPF_ATTACH_TYPE];
-
- /* list of cgroup shared storages */
- struct list_head storages;
-
- /* temp storage for effective prog array used by prog_attach/detach */
- struct bpf_prog_array *inactive;
-
- /* reference counter used to detach bpf programs after cgroup removal */
- struct percpu_ref refcnt;
-
- /* cgroup_bpf is released using a work queue */
- struct work_struct release_work;
-};
-
int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
-int __cgroup_bpf_attach(struct cgroup *cgrp,
- struct bpf_prog *prog, struct bpf_prog *replace_prog,
- struct bpf_cgroup_link *link,
- enum bpf_attach_type type, u32 flags);
-int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_cgroup_link *link,
- enum bpf_attach_type type);
-int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-
-/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
-int cgroup_bpf_attach(struct cgroup *cgrp,
- struct bpf_prog *prog, struct bpf_prog *replace_prog,
- struct bpf_cgroup_link *link, enum bpf_attach_type type,
- u32 flags);
-int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type);
-int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sk(struct sock *sk,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
- enum bpf_attach_type type,
- void *t_ctx);
+ int *uaddrlen,
+ enum cgroup_bpf_attach_type atype,
+ void *t_ctx,
+ u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
- short access, enum bpf_attach_type type);
+ short access, enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
struct ctl_table *table, int write,
- void **buf, size_t *pcount, loff_t *ppos,
- enum bpf_attach_type type);
+ char **buf, size_t *pcount, loff_t *ppos,
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
- int *optname, char __user *optval,
+ int *optname, sockptr_t optval,
int *optlen, char **kernel_optval);
+
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *optlen, int max_optlen,
+ int optname, sockptr_t optval,
+ sockptr_t optlen, int max_optlen,
int retval);
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+ int optname, void *optval,
+ int *optlen, int retval);
+
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
@@ -156,15 +164,6 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
return BPF_CGROUP_STORAGE_SHARED;
}
-static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
- *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
- enum bpf_cgroup_storage_type stype;
-
- for_each_cgroup_storage_type(stype)
- this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
-}
-
struct bpf_cgroup_storage *
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
void *key, bool locked);
@@ -181,13 +180,26 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);
+/* Opportunistic check to see whether we have any BPF program attached*/
+static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ enum cgroup_bpf_attach_type type)
+{
+ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ struct bpf_prog_array *array;
+
+ array = rcu_access_pointer(cgrp->bpf.effective[type]);
+ return array != &bpf_empty_prog_array.hdr;
+}
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
+ cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS) && sk && \
+ sk_fullsock(sk)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
- BPF_CGROUP_INET_INGRESS); \
+ CGROUP_INET_INGRESS); \
\
__ret; \
})
@@ -195,110 +207,161 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk)) \
+ if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \
+ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
- BPF_CGROUP_INET_EGRESS); \
+ CGROUP_INET_EGRESS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
+#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
- __ret = __cgroup_bpf_run_filter_sk(sk, type); \
+ if (cgroup_bpf_enabled(atype)) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- NULL); \
+ if (cgroup_bpf_enabled(atype)) \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
+ atype, NULL, NULL); \
__ret; \
})
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
+ if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- t_ctx); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
+ atype, t_ctx, NULL); \
release_sock(sk); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
-
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
-
-#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
- sk->sk_prot->pre_connect)
-
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
-
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
-
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
-
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
-
-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
-
-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
-
-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
+/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
+ * via upper bits of return code. The only flag that is supported
+ * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
+ * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
+ */
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
+({ \
+ u32 __flags = 0; \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
+ atype, NULL, &__flags); \
+ release_sock(sk); \
+ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
+ *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
+ } \
+ __ret; \
+})
-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
+ ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
+ cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
+ (sk)->sk_prot->pre_connect)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
+
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
+
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx)
+
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL)
+
+/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
+ * fullsock and its parent fullsock cannot be traced by
+ * sk_to_full_sk().
+ *
+ * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
+ * Its listener-sk is not attached to the rsk_listener.
+ * In this case, the caller holds the listener-sk (unlocked),
+ * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
+ * the listener-sk such that the cgroup-bpf-progs of the
+ * listener-sk will be run.
+ *
+ * Regardless of syncookie mode or not,
+ * calling bpf_setsockopt on listener-sk will not make sense anyway,
+ * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
+ */
+#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
+ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
+ sock_ops, \
+ CGROUP_SOCK_OPS); \
+ __ret; \
+})
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && (sock_ops)->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
- BPF_CGROUP_SOCK_OPS); \
+ CGROUP_SOCK_OPS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
+ if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
+ __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
access, \
- BPF_CGROUP_DEVICE); \
+ CGROUP_DEVICE); \
\
__ret; \
})
@@ -307,10 +370,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
buf, count, pos, \
- BPF_CGROUP_SYSCTL); \
+ CGROUP_SYSCTL); \
__ret; \
})
@@ -318,7 +381,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
kernel_optval) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
@@ -329,8 +393,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
- get_user(__ret, optlen); \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
+ copy_from_sockptr(&__ret, optlen, sizeof(int)); \
__ret; \
})
@@ -338,11 +402,25 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
max_optlen, retval) \
({ \
int __ret = retval; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
- optname, optval, \
- optlen, max_optlen, \
- retval); \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
+ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
+ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
+ tcp_bpf_bypass_getsockopt, \
+ level, optname)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt( \
+ sock, level, optname, optval, optlen, \
+ max_optlen, retval); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) \
+({ \
+ int __ret = retval; \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
+ sock, level, optname, optval, optlen, retval); \
__ret; \
})
@@ -353,10 +431,13 @@ int cgroup_bpf_prog_detach(const union bpf_attr *attr,
int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr);
+
+const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
-struct bpf_prog;
-struct cgroup_bpf {};
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
@@ -385,8 +466,18 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
-static inline void bpf_cgroup_storage_set(
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
+static inline const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
+static inline const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
@@ -402,31 +493,36 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
return 0;
}
-#define cgroup_bpf_enabled (0)
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
+#define cgroup_bpf_enabled(atype) (0)
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; })
diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h
index 722f799c1a2e..413cfa5e4b07 100644
--- a/include/linux/bpf-netns.h
+++ b/include/linux/bpf-netns.h
@@ -3,15 +3,9 @@
#define _BPF_NETNS_H
#include <linux/mutex.h>
+#include <net/netns/bpf.h>
#include <uapi/linux/bpf.h>
-enum netns_bpf_attach_type {
- NETNS_BPF_INVALID = -1,
- NETNS_BPF_FLOW_DISSECTOR = 0,
- NETNS_BPF_SK_LOOKUP,
- MAX_NETNS_BPF_ATTACH_TYPE
-};
-
static inline enum netns_bpf_attach_type
to_netns_bpf_attach_type(enum bpf_attach_type attach_type)
{
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 55f694b63164..4f20f62f9d63 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -5,6 +5,7 @@
#define _LINUX_BPF_H 1
#include <uapi/linux/bpf.h>
+#include <uapi/linux/filter.h>
#include <linux/workqueue.h>
#include <linux/file.h>
@@ -14,12 +15,21 @@
#include <linux/numa.h>
#include <linux/mm_types.h>
#include <linux/wait.h>
-#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/capability.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+#include <linux/percpu-refcount.h>
+#include <linux/stddef.h>
+#include <linux/bpfptr.h>
+#include <linux/btf.h>
+#include <linux/rcupdate_trace.h>
+#include <linux/static_call.h>
+#include <linux/memcontrol.h>
+#include <linux/cfi.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -27,6 +37,7 @@ struct perf_event;
struct bpf_prog;
struct bpf_prog_aux;
struct bpf_map;
+struct bpf_arena;
struct sock;
struct seq_file;
struct btf;
@@ -34,13 +45,31 @@ struct btf_type;
struct exception_table_entry;
struct seq_operations;
struct bpf_iter_aux_info;
+struct bpf_local_storage;
+struct bpf_local_storage_map;
+struct kobject;
+struct mem_cgroup;
+struct module;
+struct bpf_func_state;
+struct ftrace_ops;
+struct cgroup;
+struct bpf_token;
+struct user_namespace;
+struct super_block;
+struct inode;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
+extern struct kobject *btf_kobj;
+extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
+extern bool bpf_global_ma_set;
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
+typedef unsigned int (*bpf_func_t)(const void *,
+ const struct bpf_insn *);
struct bpf_iter_seq_info {
const struct seq_operations *seq_ops;
bpf_iter_init_seq_priv_t init_seq_private;
@@ -48,7 +77,7 @@ struct bpf_iter_seq_info {
u32 seq_priv_size;
};
-/* map is generic key/value storage optionally accesible by eBPF programs */
+/* map is generic key/value storage optionally accessible by eBPF programs */
struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */
int (*map_alloc_check)(union bpf_attr *attr);
@@ -60,27 +89,35 @@ struct bpf_map_ops {
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
+ int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
+ void *value, u64 flags);
int (*map_lookup_and_delete_batch)(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
- int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
+ const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
- int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
- int (*map_delete_elem)(struct bpf_map *map, void *key);
- int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
- int (*map_pop_elem)(struct bpf_map *map, void *value);
- int (*map_peek_elem)(struct bpf_map *map, void *value);
+ long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
+ long (*map_delete_elem)(struct bpf_map *map, void *key);
+ long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
+ long (*map_pop_elem)(struct bpf_map *map, void *value);
+ long (*map_peek_elem)(struct bpf_map *map, void *value);
+ void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
- void (*map_fd_put_ptr)(void *ptr);
- u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
+ /* If need_defer is true, the implementation should guarantee that
+ * the to-be-put element is still alive before the bpf program, which
+ * may manipulate it, exists.
+ */
+ void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
+ int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
@@ -103,25 +140,122 @@ struct bpf_map_ops {
int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
struct poll_table_struct *pts);
+ unsigned long (*map_get_unmapped_area)(struct file *filep, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+ /* Functions called by bpf_local_storage maps */
+ int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
+
+ /* Misc helpers.*/
+ long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
+
+ /* map_meta_equal must be implemented for maps that can be
+ * used as an inner map. It is a runtime check to ensure
+ * an inner map can be inserted to an outer map.
+ *
+ * Some properties of the inner map has been used during the
+ * verification time. When inserting an inner map at the runtime,
+ * map_meta_equal has to ensure the inserting map has the same
+ * properties that the verifier has used earlier.
+ */
+ bool (*map_meta_equal)(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
+
+ int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
+ long (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
+ void *callback_ctx, u64 flags);
+
+ u64 (*map_mem_usage)(const struct bpf_map *map);
- /* BTF name and id of struct allocated by map_alloc */
- const char * const map_btf_name;
+ /* BTF id of struct allocated by map_alloc */
int *map_btf_id;
/* bpf_iter info used to open a seq_file */
const struct bpf_iter_seq_info *iter_seq_info;
};
-struct bpf_map_memory {
- u32 pages;
- struct user_struct *user;
+enum {
+ /* Support at most 10 fields in a BTF type */
+ BTF_FIELDS_MAX = 10,
};
-struct bpf_map {
- /* The first two cachelines with read-mostly members of which some
- * are also accessed in fast-path (e.g. ops, max_entries).
+enum btf_field_type {
+ BPF_SPIN_LOCK = (1 << 0),
+ BPF_TIMER = (1 << 1),
+ BPF_KPTR_UNREF = (1 << 2),
+ BPF_KPTR_REF = (1 << 3),
+ BPF_KPTR_PERCPU = (1 << 4),
+ BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
+ BPF_LIST_HEAD = (1 << 5),
+ BPF_LIST_NODE = (1 << 6),
+ BPF_RB_ROOT = (1 << 7),
+ BPF_RB_NODE = (1 << 8),
+ BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
+ BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
+ BPF_REFCOUNT = (1 << 9),
+};
+
+typedef void (*btf_dtor_kfunc_t)(void *);
+
+struct btf_field_kptr {
+ struct btf *btf;
+ struct module *module;
+ /* dtor used if btf_is_kernel(btf), otherwise the type is
+ * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
*/
- const struct bpf_map_ops *ops ____cacheline_aligned;
+ btf_dtor_kfunc_t dtor;
+ u32 btf_id;
+};
+
+struct btf_field_graph_root {
+ struct btf *btf;
+ u32 value_btf_id;
+ u32 node_offset;
+ struct btf_record *value_rec;
+};
+
+struct btf_field {
+ u32 offset;
+ u32 size;
+ enum btf_field_type type;
+ union {
+ struct btf_field_kptr kptr;
+ struct btf_field_graph_root graph_root;
+ };
+};
+
+struct btf_record {
+ u32 cnt;
+ u32 field_mask;
+ int spin_lock_off;
+ int timer_off;
+ int refcount_off;
+ struct btf_field fields[];
+};
+
+/* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
+struct bpf_rb_node_kern {
+ struct rb_node rb_node;
+ void *owner;
+} __attribute__((aligned(8)));
+
+/* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
+struct bpf_list_node_kern {
+ struct list_head list_head;
+ void *owner;
+} __attribute__((aligned(8)));
+
+struct bpf_map {
+ const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
void *security;
@@ -130,59 +264,273 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
+ u64 map_extra; /* any per-map-type extra fields */
u32 map_flags;
- int spin_lock_off; /* >=0 valid offset, <0 error */
u32 id;
+ struct btf_record *record;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
+ u32 btf_vmlinux_value_type_id;
struct btf *btf;
- struct bpf_map_memory memory;
+#ifdef CONFIG_MEMCG_KMEM
+ struct obj_cgroup *objcg;
+#endif
char name[BPF_OBJ_NAME_LEN];
- u32 btf_vmlinux_value_type_id;
+ struct mutex freeze_mutex;
+ atomic64_t refcnt;
+ atomic64_t usercnt;
+ /* rcu is used before freeing and work is only used during freeing */
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
+ atomic64_t writecnt;
+ /* 'Ownership' of program-containing map is claimed by the first program
+ * that is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have the
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+ struct {
+ spinlock_t lock;
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
+ } owner;
bool bypass_spec_v1;
bool frozen; /* write-once; write-protected by freeze_mutex */
- /* 22 bytes hole */
-
- /* The 3rd and 4th cacheline with misc members to avoid false sharing
- * particularly with refcounting.
- */
- atomic64_t refcnt ____cacheline_aligned;
- atomic64_t usercnt;
- struct work_struct work;
- struct mutex freeze_mutex;
- u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
+ bool free_after_mult_rcu_gp;
+ bool free_after_rcu_gp;
+ atomic64_t sleepable_refcnt;
+ s64 __percpu *elem_count;
};
-static inline bool map_value_has_spin_lock(const struct bpf_map *map)
+static inline const char *btf_field_type_name(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return "bpf_spin_lock";
+ case BPF_TIMER:
+ return "bpf_timer";
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ return "kptr";
+ case BPF_KPTR_PERCPU:
+ return "percpu_kptr";
+ case BPF_LIST_HEAD:
+ return "bpf_list_head";
+ case BPF_LIST_NODE:
+ return "bpf_list_node";
+ case BPF_RB_ROOT:
+ return "bpf_rb_root";
+ case BPF_RB_NODE:
+ return "bpf_rb_node";
+ case BPF_REFCOUNT:
+ return "bpf_refcount";
+ default:
+ WARN_ON_ONCE(1);
+ return "unknown";
+ }
+}
+
+static inline u32 btf_field_type_size(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return sizeof(struct bpf_spin_lock);
+ case BPF_TIMER:
+ return sizeof(struct bpf_timer);
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ return sizeof(u64);
+ case BPF_LIST_HEAD:
+ return sizeof(struct bpf_list_head);
+ case BPF_LIST_NODE:
+ return sizeof(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return sizeof(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return sizeof(struct bpf_rb_node);
+ case BPF_REFCOUNT:
+ return sizeof(struct bpf_refcount);
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static inline u32 btf_field_type_align(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return __alignof__(struct bpf_spin_lock);
+ case BPF_TIMER:
+ return __alignof__(struct bpf_timer);
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ return __alignof__(u64);
+ case BPF_LIST_HEAD:
+ return __alignof__(struct bpf_list_head);
+ case BPF_LIST_NODE:
+ return __alignof__(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return __alignof__(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return __alignof__(struct bpf_rb_node);
+ case BPF_REFCOUNT:
+ return __alignof__(struct bpf_refcount);
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
+{
+ memset(addr, 0, field->size);
+
+ switch (field->type) {
+ case BPF_REFCOUNT:
+ refcount_set((refcount_t *)addr, 1);
+ break;
+ case BPF_RB_NODE:
+ RB_CLEAR_NODE((struct rb_node *)addr);
+ break;
+ case BPF_LIST_HEAD:
+ case BPF_LIST_NODE:
+ INIT_LIST_HEAD((struct list_head *)addr);
+ break;
+ case BPF_RB_ROOT:
+ /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
+ case BPF_SPIN_LOCK:
+ case BPF_TIMER:
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
+
+static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type)
+{
+ if (IS_ERR_OR_NULL(rec))
+ return false;
+ return rec->field_mask & type;
+}
+
+static inline void bpf_obj_init(const struct btf_record *rec, void *obj)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(rec))
+ return;
+ for (i = 0; i < rec->cnt; i++)
+ bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset);
+}
+
+/* 'dst' must be a temporary buffer and should not point to memory that is being
+ * used in parallel by a bpf program or bpf syscall, otherwise the access from
+ * the bpf program or bpf syscall may be corrupted by the reinitialization,
+ * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
+ * allocator, it is still possible for 'dst' to be used in parallel by a bpf
+ * program or bpf syscall.
+ */
+static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
- return map->spin_lock_off >= 0;
+ bpf_obj_init(map->record, dst);
+}
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only. No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+ const long *lsrc = src;
+ long *ldst = dst;
+
+ size /= sizeof(long);
+ while (size--)
+ data_race(*ldst++ = *lsrc++);
}
-static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
+/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
+static inline void bpf_obj_memcpy(struct btf_record *rec,
+ void *dst, void *src, u32 size,
+ bool long_memcpy)
{
- if (likely(!map_value_has_spin_lock(map)))
+ u32 curr_off = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rec)) {
+ if (long_memcpy)
+ bpf_long_memcpy(dst, src, round_up(size, 8));
+ else
+ memcpy(dst, src, size);
return;
- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
- (struct bpf_spin_lock){};
+ }
+
+ for (i = 0; i < rec->cnt; i++) {
+ u32 next_off = rec->fields[i].offset;
+ u32 sz = next_off - curr_off;
+
+ memcpy(dst + curr_off, src + curr_off, sz);
+ curr_off += rec->fields[i].size + sz;
+ }
+ memcpy(dst + curr_off, src + curr_off, size - curr_off);
}
-/* copy everything but bpf_spin_lock */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
- if (unlikely(map_value_has_spin_lock(map))) {
- u32 off = map->spin_lock_off;
+ bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
+}
+
+static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
+{
+ bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
+}
+
+static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
+{
+ u32 curr_off = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rec)) {
+ memset(dst, 0, size);
+ return;
+ }
+
+ for (i = 0; i < rec->cnt; i++) {
+ u32 next_off = rec->fields[i].offset;
+ u32 sz = next_off - curr_off;
- memcpy(dst, src, off);
- memcpy(dst + off + sizeof(struct bpf_spin_lock),
- src + off + sizeof(struct bpf_spin_lock),
- map->value_size - off - sizeof(struct bpf_spin_lock));
- } else {
- memcpy(dst, src, map->value_size);
+ memset(dst + curr_off, 0, sz);
+ curr_off += rec->fields[i].size + sz;
}
+ memset(dst + curr_off, 0, size - curr_off);
}
+
+static inline void zero_map_value(struct bpf_map *map, void *dst)
+{
+ bpf_obj_memzero(map->record, dst, map->value_size);
+}
+
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
+void bpf_timer_cancel_and_free(void *timer);
+void bpf_list_head_free(const struct btf_field *field, void *list_head,
+ struct bpf_spin_lock *spin_lock);
+void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
+ struct bpf_spin_lock *spin_lock);
+u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena);
+u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena);
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
struct bpf_offload_dev;
@@ -227,8 +575,128 @@ int map_check_no_btf(const struct bpf_map *map,
const struct btf_type *key_type,
const struct btf_type *value_type);
+bool bpf_map_meta_equal(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
extern const struct bpf_map_ops bpf_map_offload_ops;
+/* bpf_type_flag contains a set of flags that are applicable to the values of
+ * arg_type, ret_type and reg_type. For example, a pointer value may be null,
+ * or a memory is read-only. We classify types into two categories: base types
+ * and extended types. Extended types are base types combined with a type flag.
+ *
+ * Currently there are no more than 32 base types in arg_type, ret_type and
+ * reg_types.
+ */
+#define BPF_BASE_TYPE_BITS 8
+
+enum bpf_type_flag {
+ /* PTR may be NULL. */
+ PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
+ * compatible with both mutable and immutable memory.
+ */
+ MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
+
+ /* MEM points to BPF ring buffer reservation. */
+ MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is in user address space. */
+ MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
+ * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
+ * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
+ * or bpf_this_cpu_ptr(), which will return the pointer corresponding
+ * to the specified cpu.
+ */
+ MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
+
+ /* Indicates that the argument will be released. */
+ OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
+
+ /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
+ * unreferenced and referenced kptr loaded from map value using a load
+ * instruction, so that they can only be dereferenced but not escape the
+ * BPF program into the kernel (i.e. cannot be passed as arguments to
+ * kfunc or bpf helpers).
+ */
+ PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
+
+ MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to memory local to the bpf program. */
+ DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to a kernel-produced ringbuf record. */
+ DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
+
+ /* Size is known at compile time. */
+ MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is of an allocated object of type in program BTF. This is used to
+ * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
+ */
+ MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
+
+ /* PTR was passed from the kernel in a trusted context, and may be
+ * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
+ * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
+ * PTR_UNTRUSTED refers to a kptr that was read directly from a map
+ * without invoking bpf_kptr_xchg(). What we really need to know is
+ * whether a pointer is safe to pass to a kfunc or BPF helper function.
+ * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
+ * helpers, they do not cover all possible instances of unsafe
+ * pointers. For example, a pointer that was obtained from walking a
+ * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
+ * fact that it may be NULL, invalid, etc. This is due to backwards
+ * compatibility requirements, as this was the behavior that was first
+ * introduced when kptrs were added. The behavior is now considered
+ * deprecated, and PTR_UNTRUSTED will eventually be removed.
+ *
+ * PTR_TRUSTED, on the other hand, is a pointer that the kernel
+ * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
+ * For example, pointers passed to tracepoint arguments are considered
+ * PTR_TRUSTED, as are pointers that are passed to struct_ops
+ * callbacks. As alluded to above, pointers that are obtained from
+ * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
+ * struct task_struct *task is PTR_TRUSTED, then accessing
+ * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
+ * in a BPF register. Similarly, pointers passed to certain programs
+ * types such as kretprobes are not guaranteed to be valid, as they may
+ * for example contain an object that was recently freed.
+ */
+ PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
+ MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
+
+ /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
+ * Currently only valid for linked-list and rbtree nodes. If the nodes
+ * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
+ */
+ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to sk_buff */
+ DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to xdp_buff */
+ DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+
+ __BPF_TYPE_FLAG_MAX,
+ __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
+};
+
+#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
+ | DYNPTR_TYPE_XDP)
+
+/* Max number of base types. */
+#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
+
+/* Max number of all types. */
+#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
+
/* function argument constraints */
enum bpf_arg_type {
ARG_DONTCARE = 0, /* unused argument in helper function */
@@ -239,49 +707,86 @@ enum bpf_arg_type {
ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
- ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
- ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */
- /* the following constraints used to prototype bpf_memcmp() and other
- * functions that access data on eBPF program stack
+ /* Used to prototype bpf_memcmp() and other functions that access data
+ * on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
- ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
- ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
- * helper function must fill all bytes or clear
- * them in error case.
- */
+ ARG_PTR_TO_ARENA,
ARG_CONST_SIZE, /* number of bytes accessed from memory */
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
- ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
ARG_PTR_TO_INT, /* pointer to int */
ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
- ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
- ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
- ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
+ ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
+ ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
+ ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
+ ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
+ ARG_PTR_TO_STACK, /* pointer to stack */
+ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
+ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
+ ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
+ ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
+ __BPF_ARG_TYPE_MAX,
+
+ /* Extended arg_types. */
+ ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
+ ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
+ ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
+ ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
+ ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
+ ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
+ /* pointer to memory does not need to be initialized, helper function must fill
+ * all bytes or clear them in error case.
+ */
+ ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
+ /* Pointer to valid memory of size known at compile time. */
+ ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* type of values returned from helper functions */
enum bpf_return_type {
RET_INTEGER, /* function returns integer */
RET_VOID, /* function doesn't return anything */
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
- RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
- RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
- RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
- RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
- RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */
- RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
+ RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
+ RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
+ RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
+ RET_PTR_TO_MEM, /* returns a pointer to memory */
+ RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
+ RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
+ __BPF_RET_TYPE_MAX,
+
+ /* Extended ret_types. */
+ RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
+ RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
+ RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
+ RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
+ RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM,
+ RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM,
+ RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
+ RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
* to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
@@ -291,6 +796,7 @@ struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
bool pkt_access;
+ bool might_sleep;
enum bpf_return_type ret_type;
union {
struct {
@@ -302,13 +808,26 @@ struct bpf_func_proto {
};
enum bpf_arg_type arg_type[5];
};
- int *btf_id; /* BTF ids of arguments */
- bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is
- * valid. Often used if more
- * than one btf id is permitted
- * for this argument.
- */
+ union {
+ struct {
+ u32 *arg1_btf_id;
+ u32 *arg2_btf_id;
+ u32 *arg3_btf_id;
+ u32 *arg4_btf_id;
+ u32 *arg5_btf_id;
+ };
+ u32 *arg_btf_id[5];
+ struct {
+ size_t arg1_size;
+ size_t arg2_size;
+ size_t arg3_size;
+ size_t arg4_size;
+ size_t arg5_size;
+ };
+ size_t arg_size[5];
+ };
int *ret_btf_id; /* return value btf_id */
+ bool (*allowed)(const struct bpf_prog *prog);
};
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
@@ -338,29 +857,52 @@ enum bpf_reg_type {
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
+ PTR_TO_MAP_KEY, /* reg points to a map element key */
PTR_TO_STACK, /* reg == frame_pointer + offset */
PTR_TO_PACKET_META, /* skb->data - meta_len */
PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
PTR_TO_SOCKET, /* reg points to struct bpf_sock */
- PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
PTR_TO_SOCK_COMMON, /* reg points to sock_common */
- PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
- PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
- PTR_TO_BTF_ID, /* reg points to kernel struct */
- PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */
+ /* PTR_TO_BTF_ID points to a kernel struct that does not need
+ * to be null checked by the BPF program. This does not imply the
+ * pointer is _not_ null and in practice this can easily be a null
+ * pointer when reading pointer chains. The assumption is program
+ * context will handle null pointer dereference typically via fault
+ * handling. The verifier must keep this in mind and can make no
+ * assumptions about null or non-null when doing branch analysis.
+ * Further, when passed into helpers the helpers can not, without
+ * additional context, assume the value is non-null.
+ */
+ PTR_TO_BTF_ID,
+ /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
+ * been checked for null. Used primarily to inform the verifier
+ * an explicit null check is required for this struct.
+ */
PTR_TO_MEM, /* reg points to valid memory region */
- PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */
- PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */
- PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
- PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
- PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
+ PTR_TO_ARENA,
+ PTR_TO_BUF, /* reg points to a read/write buffer */
+ PTR_TO_FUNC, /* reg points to a bpf program function */
+ CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
+ __BPF_REG_TYPE_MAX,
+
+ /* Extended reg_types. */
+ PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
+ PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
+ PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
+ PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
+ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* The information passed from prog-specific *_is_valid_access
* back to the verifier.
@@ -369,7 +911,10 @@ struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
union {
int ctx_field_size;
- u32 btf_id;
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ };
};
struct bpf_verifier_log *log; /* for verbose logs */
};
@@ -380,11 +925,22 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
+static bool bpf_is_ldimm64(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+}
+
+static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+{
+ return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
+}
+
struct bpf_prog_ops {
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
};
+struct bpf_reg_state;
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
const struct bpf_func_proto *
@@ -406,9 +962,8 @@ struct bpf_verifier_ops {
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
int (*btf_struct_access)(struct bpf_verifier_log *log,
- const struct btf_type *t, int off, int size,
- enum bpf_access_type atype,
- u32 *next_btf_id);
+ const struct bpf_reg_state *reg,
+ int off, int size);
};
struct bpf_prog_offload_ops {
@@ -451,16 +1006,23 @@ enum bpf_cgroup_storage_type {
*/
#define MAX_BPF_FUNC_ARGS 12
-struct bpf_prog_stats {
- u64 cnt;
- u64 nsecs;
- struct u64_stats_sync syncp;
-} __aligned(2 * sizeof(u64));
+/* The maximum number of arguments passed through registers
+ * a single function may have.
+ */
+#define MAX_BPF_FUNC_REG_ARGS 5
+
+/* The argument is a structure. */
+#define BTF_FMODEL_STRUCT_ARG BIT(0)
+
+/* The argument is signed. */
+#define BTF_FMODEL_SIGNED_ARG BIT(1)
struct btf_func_model {
u8 ret_size;
+ u8 ret_flags;
u8 nr_args;
u8 arg_size[MAX_BPF_FUNC_ARGS];
+ u8 arg_flags[MAX_BPF_FUNC_ARGS];
};
/* Restore arguments before returning from trampoline to let original function
@@ -476,17 +1038,57 @@ struct btf_func_model {
* programs only. Should not be used with normal calls and indirect calls.
*/
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
+/* Store IP address of the caller on the trampoline stack,
+ * so it's available for trampoline's programs.
+ */
+#define BPF_TRAMP_F_IP_ARG BIT(3)
+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
+#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
+
+/* Get original function from stack instead of from provided direct address.
+ * Makes sense for trampolines with fexit or fmod_ret programs.
+ */
+#define BPF_TRAMP_F_ORIG_STACK BIT(5)
+
+/* This trampoline is on a function with another ftrace_ops with IPMODIFY,
+ * e.g., a live patch. This flag is set and cleared by ftrace call backs,
+ */
+#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+
+/* Indicate that current trampoline is in a tail call context. Then, it has to
+ * cache and restore tail_call_cnt to avoid infinite tail call loop.
+ */
+#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
+
+/*
+ * Indicate the trampoline should be suitable to receive indirect calls;
+ * without this indirectly calling the generated code can result in #UD/#CP,
+ * depending on the CFI options.
+ *
+ * Used by bpf_struct_ops.
+ *
+ * Incompatible with FENTRY usage, overloads @func_addr argument.
+ */
+#define BPF_TRAMP_F_INDIRECT BIT(8)
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
- * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
+ * bytes on x86.
*/
-#define BPF_MAX_TRAMP_PROGS 40
+enum {
+#if defined(__s390x__)
+ BPF_MAX_TRAMP_LINKS = 27,
+#else
+ BPF_MAX_TRAMP_LINKS = 38,
+#endif
+};
-struct bpf_tramp_progs {
- struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
- int nr_progs;
+struct bpf_tramp_links {
+ struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
+ int nr_links;
};
+struct bpf_tramp_run_ctx;
+
/* Different use cases for BPF trampoline:
* 1. replace nop at the function entry (kprobe equivalent)
* flags = BPF_TRAMP_F_RESTORE_REGS
@@ -507,13 +1109,30 @@ struct bpf_tramp_progs {
* fentry = a set of program to run before calling original function
* fexit = a set of program to run after original function
*/
-int arch_prepare_bpf_trampoline(void *image, void *image_end,
+struct bpf_tramp_image;
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
- struct bpf_tramp_progs *tprogs,
- void *orig_call);
-/* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(void);
-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
+ struct bpf_tramp_links *tlinks,
+ void *func_addr);
+void *arch_alloc_bpf_trampoline(unsigned int size);
+void arch_free_bpf_trampoline(void *image, unsigned int size);
+void arch_protect_bpf_trampoline(void *image, unsigned int size);
+void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr);
+
+u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
+void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
+typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
+bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
struct bpf_ksym {
unsigned long start;
@@ -532,12 +1151,27 @@ enum bpf_tramp_prog_type {
BPF_TRAMP_REPLACE, /* more than MAX */
};
+struct bpf_tramp_image {
+ void *image;
+ int size;
+ struct bpf_ksym ksym;
+ struct percpu_ref pcref;
+ void *ip_after_call;
+ void *ip_epilogue;
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
+};
+
struct bpf_trampoline {
/* hlist for trampoline_table */
struct hlist_node hlist;
+ struct ftrace_ops *fops;
/* serializes access to fields of this trampoline */
struct mutex mutex;
refcount_t refcnt;
+ u32 flags;
u64 key;
struct {
struct btf_func_model model;
@@ -554,9 +1188,15 @@ struct bpf_trampoline {
/* Number of attached programs. A counter per kind. */
int progs_cnt[BPF_TRAMP_MAX];
/* Executable image of trampoline */
- void *image;
- u64 selector;
- struct bpf_ksym ksym;
+ struct bpf_tramp_image *cur_image;
+};
+
+struct bpf_attach_target_info {
+ struct btf_func_model fmodel;
+ long tgt_addr;
+ struct module *tgt_mod;
+ const char *tgt_name;
+ const struct btf_type *tgt_type;
};
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
@@ -573,23 +1213,95 @@ struct bpf_dispatcher {
struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
int num_progs;
void *image;
+ void *rw_image;
u32 image_off;
struct bpf_ksym ksym;
+#ifdef CONFIG_HAVE_STATIC_CALL
+ struct static_call_key *sc_key;
+ void *sc_tramp;
+#endif
};
-static __always_inline unsigned int bpf_dispatcher_nop_func(
+#ifndef __bpfcall
+#define __bpfcall __nocfi
+#endif
+
+static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
const void *ctx,
const struct bpf_insn *insnsi,
- unsigned int (*bpf_func)(const void *,
- const struct bpf_insn *))
+ bpf_func_t bpf_func)
{
return bpf_func(ctx, insnsi);
}
+
+/* the implementation of the opaque uapi struct bpf_dynptr */
+struct bpf_dynptr_kern {
+ void *data;
+ /* Size represents the number of usable bytes of dynptr data.
+ * If for example the offset is at 4 for a local dynptr whose data is
+ * of type u64, the number of usable bytes is 4.
+ *
+ * The upper 8 bits are reserved. It is as follows:
+ * Bits 0 - 23 = size
+ * Bits 24 - 30 = dynptr type
+ * Bit 31 = whether dynptr is read-only
+ */
+ u32 size;
+ u32 offset;
+} __aligned(8);
+
+enum bpf_dynptr_type {
+ BPF_DYNPTR_TYPE_INVALID,
+ /* Points to memory that is local to the bpf program */
+ BPF_DYNPTR_TYPE_LOCAL,
+ /* Underlying data is a ringbuf record */
+ BPF_DYNPTR_TYPE_RINGBUF,
+ /* Underlying data is a sk_buff */
+ BPF_DYNPTR_TYPE_SKB,
+ /* Underlying data is a xdp_buff */
+ BPF_DYNPTR_TYPE_XDP,
+};
+
+int bpf_dynptr_check_size(u32 size);
+u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
+const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
+void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
+
#ifdef CONFIG_BPF_JIT
-struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
-int bpf_trampoline_link_prog(struct bpf_prog *prog);
-int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
+int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
+int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
+
+/*
+ * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
+ * indirection with a direct call to the bpf program. If the architecture does
+ * not have STATIC_CALL, avoid a double-indirection.
+ */
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __BPF_DISPATCHER_SC_INIT(_name) \
+ .sc_key = &STATIC_CALL_KEY(_name), \
+ .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
+
+#define __BPF_DISPATCHER_SC(name) \
+ DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
+
+#define __BPF_DISPATCHER_CALL(name) \
+ static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
+
+#define __BPF_DISPATCHER_UPDATE(_d, _new) \
+ __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
+
+#else
+#define __BPF_DISPATCHER_SC_INIT(name)
+#define __BPF_DISPATCHER_SC(name)
+#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
+#define __BPF_DISPATCHER_UPDATE(_d, _new)
+#endif
+
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
.func = &_name##_func, \
@@ -601,49 +1313,56 @@ void bpf_trampoline_put(struct bpf_trampoline *tr);
.name = #_name, \
.lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
}, \
+ __BPF_DISPATCHER_SC_INIT(_name##_call) \
}
#define DEFINE_BPF_DISPATCHER(name) \
- noinline unsigned int bpf_dispatcher_##name##_func( \
+ __BPF_DISPATCHER_SC(name); \
+ noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
- unsigned int (*bpf_func)(const void *, \
- const struct bpf_insn *)) \
+ bpf_func_t bpf_func) \
{ \
- return bpf_func(ctx, insnsi); \
+ return __BPF_DISPATCHER_CALL(name); \
} \
EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
struct bpf_dispatcher bpf_dispatcher_##name = \
BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
+
#define DECLARE_BPF_DISPATCHER(name) \
unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
- unsigned int (*bpf_func)(const void *, \
- const struct bpf_insn *)); \
+ bpf_func_t bpf_func); \
extern struct bpf_dispatcher bpf_dispatcher_##name;
+
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
/* Called only from JIT-enabled code, so there's no need for stubs. */
-void *bpf_jit_alloc_exec_page(void);
-void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
+void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
+int bpf_jit_charge_modmem(u32 size);
+void bpf_jit_uncharge_modmem(u32 size);
+bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
#else
-static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr)
{
- return NULL;
+ return -ENOTSUPP;
}
-static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
+static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr)
{
return -ENOTSUPP;
}
-static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
+static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info)
{
- return -ENOTSUPP;
+ return NULL;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
@@ -657,11 +1376,17 @@ static inline bool is_bpf_image_address(unsigned long address)
{
return false;
}
+static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
+{
+ return false;
+}
#endif
struct bpf_func_info_aux {
u16 linkage;
bool unreliable;
+ bool called : 1;
+ bool verified : 1;
};
enum bpf_jit_poke_reason {
@@ -670,48 +1395,71 @@ enum bpf_jit_poke_reason {
/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
- void *ip;
+ void *tailcall_target;
+ void *tailcall_bypass;
+ void *bypass_addr;
+ void *aux;
union {
struct {
struct bpf_map *map;
u32 key;
} tail_call;
};
- bool ip_stable;
+ bool tailcall_target_stable;
u8 adj_off;
u16 reason;
+ u32 insn_idx;
};
/* reg_type info for ctx arguments */
struct bpf_ctx_arg_aux {
u32 offset;
enum bpf_reg_type reg_type;
+ struct btf *btf;
u32 btf_id;
};
+struct btf_mod_pair {
+ struct btf *btf;
+ struct module *module;
+};
+
+struct bpf_kfunc_desc_tab;
+
struct bpf_prog_aux {
atomic64_t refcnt;
u32 used_map_cnt;
+ u32 used_btf_cnt;
u32 max_ctx_offset;
u32 max_pkt_offset;
u32 max_tp_access;
u32 stack_depth;
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
u32 ctx_arg_info_size;
u32 max_rdonly_access;
u32 max_rdwr_access;
+ struct btf *attach_btf;
const struct bpf_ctx_arg_aux *ctx_arg_info;
- struct bpf_prog *linked_prog;
+ struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
+ struct bpf_prog *dst_prog;
+ struct bpf_trampoline *dst_trampoline;
+ enum bpf_prog_type saved_dst_prog_type;
+ enum bpf_attach_type saved_dst_attach_type;
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
- bool offload_requested;
+ bool dev_bound; /* Program is bound to the netdev. */
+ bool offload_requested; /* Program is bound and offloaded to the netdev. */
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
+ bool attach_tracing_prog; /* true if tracing another tracing program */
bool func_proto_unreliable;
- enum bpf_tramp_prog_type trampoline_prog_type;
- struct bpf_trampoline *trampoline;
- struct hlist_node tramp_hlist;
+ bool tail_call_reachable;
+ bool xdp_has_frags;
+ bool exception_cb;
+ bool exception_boundary;
+ struct bpf_arena *arena;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
@@ -719,18 +1467,29 @@ struct bpf_prog_aux {
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab;
+ struct bpf_kfunc_desc_tab *kfunc_tab;
+ struct bpf_kfunc_btf_tab *kfunc_btf_tab;
u32 size_poke_tab;
+#ifdef CONFIG_FINEIBT
+ struct bpf_ksym ksym_prefix;
+#endif
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
+ struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
+ struct btf_mod_pair *used_btfs;
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
+ u32 verified_insns;
+ int cgroup_atype; /* enum cgroup_bpf_attach_type */
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
+ u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64);
#ifdef CONFIG_SECURITY
void *security;
#endif
+ struct bpf_token *token;
struct bpf_prog_offload *offload;
struct btf *btf;
struct bpf_func_info *func_info;
@@ -757,23 +1516,51 @@ struct bpf_prog_aux {
* main prog always has linfo_idx == 0
*/
u32 linfo_idx;
+ struct module *mod;
u32 num_exentries;
struct exception_table_entry *extable;
- struct bpf_prog_stats __percpu *stats;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
+struct bpf_prog {
+ u16 pages; /* Number of allocated pages */
+ u16 jited:1, /* Is our filter JIT'ed? */
+ jit_requested:1,/* archs need to JIT the prog */
+ gpl_compatible:1, /* Is filter GPL compatible? */
+ cb_access:1, /* Is control block accessed? */
+ dst_needed:1, /* Do we need dst entry? */
+ blinding_requested:1, /* needs constant blinding */
+ blinded:1, /* Was blinded */
+ is_func:1, /* program is a bpf function */
+ kprobe_override:1, /* Do we override a kprobe? */
+ has_callchain_buf:1, /* callchain buffer allocated? */
+ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
+ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
+ call_get_func_ip:1, /* Do we call get_func_ip() */
+ tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
+ sleepable:1; /* BPF program is sleepable */
+ enum bpf_prog_type type; /* Type of BPF program */
+ enum bpf_attach_type expected_attach_type; /* For some prog types */
+ u32 len; /* Number of filter blocks */
+ u32 jited_len; /* Size of jited insns in bytes */
+ u8 tag[BPF_TAG_SIZE];
+ struct bpf_prog_stats __percpu *stats;
+ int __percpu *active;
+ unsigned int (*bpf_func)(const void *ctx,
+ const struct bpf_insn *insn);
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ /* Instructions for interpreter */
+ union {
+ DECLARE_FLEX_ARRAY(struct sock_filter, insns);
+ DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
+ };
+};
+
struct bpf_array_aux {
- /* 'Ownership' of prog array is claimed by the first program that
- * is going to use this map or by the first program which FD is
- * stored in the map to make sure that all callers and callees have
- * the same prog type and JITed flag.
- */
- enum bpf_prog_type type;
- bool jited;
/* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs;
struct bpf_map *map;
@@ -799,6 +1586,26 @@ struct bpf_link_ops {
void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
int (*fill_link_info)(const struct bpf_link *link,
struct bpf_link_info *info);
+ int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
+ struct bpf_map *old_map);
+};
+
+struct bpf_tramp_link {
+ struct bpf_link link;
+ struct hlist_node tramp_hlist;
+ u64 cookie;
+};
+
+struct bpf_shim_tramp_link {
+ struct bpf_tramp_link link;
+ struct bpf_trampoline *trampoline;
+};
+
+struct bpf_tracing_link {
+ struct bpf_tramp_link link;
+ enum bpf_attach_type attach_type;
+ struct bpf_trampoline *trampoline;
+ struct bpf_prog *tgt_prog;
};
struct bpf_link_primer {
@@ -808,37 +1615,164 @@ struct bpf_link_primer {
u32 id;
};
+struct bpf_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+
+ /* BPF token-related delegation options */
+ u64 delegate_cmds;
+ u64 delegate_maps;
+ u64 delegate_progs;
+ u64 delegate_attachs;
+};
+
+struct bpf_token {
+ struct work_struct work;
+ atomic64_t refcnt;
+ struct user_namespace *userns;
+ u64 allowed_cmds;
+ u64 allowed_maps;
+ u64 allowed_progs;
+ u64 allowed_attachs;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
+};
+
struct bpf_struct_ops_value;
-struct btf_type;
struct btf_member;
#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
+/**
+ * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
+ * define a BPF_MAP_TYPE_STRUCT_OPS map type composed
+ * of BPF_PROG_TYPE_STRUCT_OPS progs.
+ * @verifier_ops: A structure of callbacks that are invoked by the verifier
+ * when determining whether the struct_ops progs in the
+ * struct_ops map are valid.
+ * @init: A callback that is invoked a single time, and before any other
+ * callback, to initialize the structure. A nonzero return value means
+ * the subsystem could not be initialized.
+ * @check_member: When defined, a callback invoked by the verifier to allow
+ * the subsystem to determine if an entry in the struct_ops map
+ * is valid. A nonzero return value means that the map is
+ * invalid and should be rejected by the verifier.
+ * @init_member: A callback that is invoked for each member of the struct_ops
+ * map to allow the subsystem to initialize the member. A nonzero
+ * value means the member could not be initialized. This callback
+ * is exclusive with the @type, @type_id, @value_type, and
+ * @value_id fields.
+ * @reg: A callback that is invoked when the struct_ops map has been
+ * initialized and is being attached to. Zero means the struct_ops map
+ * has been successfully registered and is live. A nonzero return value
+ * means the struct_ops map could not be registered.
+ * @unreg: A callback that is invoked when the struct_ops map should be
+ * unregistered.
+ * @update: A callback that is invoked when the live struct_ops map is being
+ * updated to contain new values. This callback is only invoked when
+ * the struct_ops map is loaded with BPF_F_LINK. If not defined, the
+ * it is assumed that the struct_ops map cannot be updated.
+ * @validate: A callback that is invoked after all of the members have been
+ * initialized. This callback should perform static checks on the
+ * map, meaning that it should either fail or succeed
+ * deterministically. A struct_ops map that has been validated may
+ * not necessarily succeed in being registered if the call to @reg
+ * fails. For example, a valid struct_ops map may be loaded, but
+ * then fail to be registered due to there being another active
+ * struct_ops map on the system in the subsystem already. For this
+ * reason, if this callback is not defined, the check is skipped as
+ * the struct_ops map will have final verification performed in
+ * @reg.
+ * @type: BTF type.
+ * @value_type: Value type.
+ * @name: The name of the struct bpf_struct_ops object.
+ * @func_models: Func models
+ * @type_id: BTF type id.
+ * @value_id: BTF value id.
+ */
struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
int (*check_member)(const struct btf_type *t,
- const struct btf_member *member);
+ const struct btf_member *member,
+ const struct bpf_prog *prog);
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
int (*reg)(void *kdata);
void (*unreg)(void *kdata);
- const struct btf_type *type;
- const struct btf_type *value_type;
+ int (*update)(void *kdata, void *old_kdata);
+ int (*validate)(void *kdata);
+ void *cfi_stubs;
+ struct module *owner;
const char *name;
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+};
+
+/* Every member of a struct_ops type has an instance even a member is not
+ * an operator (function pointer). The "info" field will be assigned to
+ * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the
+ * argument information required by the verifier to verify the program.
+ *
+ * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the
+ * corresponding entry for an given argument.
+ */
+struct bpf_struct_ops_arg_info {
+ struct bpf_ctx_arg_aux *info;
+ u32 cnt;
+};
+
+struct bpf_struct_ops_desc {
+ struct bpf_struct_ops *st_ops;
+
+ const struct btf_type *type;
+ const struct btf_type *value_type;
u32 type_id;
u32 value_id;
+
+ /* Collection of argument information for each member */
+ struct bpf_struct_ops_arg_info *arg_info;
+};
+
+enum bpf_struct_ops_state {
+ BPF_STRUCT_OPS_STATE_INIT,
+ BPF_STRUCT_OPS_STATE_INUSE,
+ BPF_STRUCT_OPS_STATE_TOBEFREE,
+ BPF_STRUCT_OPS_STATE_READY,
+};
+
+struct bpf_struct_ops_common_value {
+ refcount_t refcnt;
+ enum bpf_struct_ops_state state;
};
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+/* This macro helps developer to register a struct_ops type and generate
+ * type information correctly. Developers should use this macro to register
+ * a struct_ops type instead of calling __register_bpf_struct_ops() directly.
+ */
+#define register_bpf_struct_ops(st_ops, type) \
+ ({ \
+ struct bpf_struct_ops_##type { \
+ struct bpf_struct_ops_common_value common; \
+ struct type data ____cacheline_aligned_in_smp; \
+ }; \
+ BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \
+ __register_bpf_struct_ops(st_ops); \
+ })
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
-const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
-void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
+int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
+ struct bpf_tramp_link *link,
+ const struct btf_func_model *model,
+ void *stub_func,
+ void **image, u32 *image_off,
+ bool allow_alloc);
+void bpf_struct_ops_image_free(void *image);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
@@ -853,15 +1787,31 @@ static inline void bpf_module_put(const void *data, struct module *owner)
else
module_put(owner);
}
+int bpf_struct_ops_link_create(union bpf_attr *attr);
+
+#ifdef CONFIG_NET
+/* Define it here to avoid the use of forward declaration */
+struct bpf_dummy_ops_state {
+ int val;
+};
+
+struct bpf_dummy_ops {
+ int (*test_1)(struct bpf_dummy_ops_state *cb);
+ int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
+ char a3, unsigned long a4);
+ int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
+};
+
+int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+#endif
+int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ struct btf *btf,
+ struct bpf_verifier_log *log);
+void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc);
#else
-static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
-{
- return NULL;
-}
-static inline void bpf_struct_ops_init(struct btf *btf,
- struct bpf_verifier_log *log)
-{
-}
+#define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; })
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
return try_module_get(owner);
@@ -876,6 +1826,33 @@ static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
{
return -EINVAL;
}
+static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+}
+
+static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
+{
+}
+
+#endif
+
+#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
+int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype);
+void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
+#else
+static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
+{
+}
#endif
struct bpf_array {
@@ -884,14 +1861,21 @@ struct bpf_array {
u32 index_mask;
struct bpf_array_aux *aux;
union {
- char value[0] __aligned(8);
- void *ptrs[0] __aligned(8);
- void __percpu *pptrs[0] __aligned(8);
+ DECLARE_FLEX_ARRAY(char, value) __aligned(8);
+ DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
+ DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
};
};
#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
-#define MAX_TAIL_CALL_CNT 32
+#define MAX_TAIL_CALL_CNT 33
+
+/* Maximum number of loops for bpf_loop and bpf_iter_num.
+ * It's enum to expose it (and thus make it discoverable) through BTF.
+ */
+enum {
+ BPF_MAX_LOOPS = 8 * 1024 * 1024,
+};
#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
BPF_F_RDONLY_PROG | \
@@ -901,6 +1885,11 @@ struct bpf_array {
#define BPF_MAP_CAN_READ BIT(0)
#define BPF_MAP_CAN_WRITE BIT(1)
+/* Maximum number of user-producer ring buffer samples that can be drained in
+ * a call to bpf_user_ringbuf_drain().
+ */
+#define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
+
static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
@@ -929,11 +1918,18 @@ struct bpf_event_entry {
struct rcu_head rcu;
};
-bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+static inline bool map_type_contains_progs(struct bpf_map *map)
+{
+ return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
+ map->map_type == BPF_MAP_TYPE_DEVMAP ||
+ map->map_type == BPF_MAP_TYPE_CPUMAP;
+}
+
+bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
-const char *kernel_type_name(u32 btf_type_id);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
@@ -949,7 +1945,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
/* an array of programs to be executed under rcu_lock.
*
* Typical usage:
- * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
+ * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
*
* the structure returned by bpf_prog_array_alloc() should be populated
* with program pointers and the last pointer must be NULL.
@@ -960,7 +1956,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
*/
struct bpf_prog_array_item {
struct bpf_prog *prog;
- struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ union {
+ struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ u64 bpf_cookie;
+ };
};
struct bpf_prog_array {
@@ -968,8 +1967,23 @@ struct bpf_prog_array {
struct bpf_prog_array_item items[];
};
+struct bpf_empty_prog_array {
+ struct bpf_prog_array hdr;
+ struct bpf_prog *null_prog;
+};
+
+/* to avoid allocating empty bpf_prog_array for cgroups that
+ * don't have bpf program attached use one global 'bpf_empty_prog_array'
+ * It will not be modified the caller of bpf_prog_array_alloc()
+ * (since caller requested prog_cnt == 0)
+ * that pointer should be 'freed' by bpf_prog_array_free()
+ */
+extern struct bpf_empty_prog_array bpf_empty_prog_array;
+
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array *progs);
+/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
+void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
@@ -986,86 +2000,134 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array,
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
+ u64 bpf_cookie,
struct bpf_prog_array **new_array);
-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
- ({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 _ret = 1; \
- migrate_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- if (unlikely(check_non_null && !_array))\
- goto _out; \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- bpf_cgroup_storage_set(_item->cgroup_storage); \
- _ret &= func(_prog, ctx); \
- _item++; \
- } \
-_out: \
- rcu_read_unlock(); \
- migrate_enable(); \
- _ret; \
- })
-
-/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
- * so BPF programs can request cwr for TCP packets.
- *
- * Current cgroup skb programs can only return 0 or 1 (0 to drop the
- * packet. This macro changes the behavior so the low order bit
- * indicates whether the packet should be dropped (0) or not (1)
- * and the next bit is a congestion notification bit. This could be
- * used by TCP to call tcp_enter_cwr()
+struct bpf_run_ctx {};
+
+struct bpf_cg_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ const struct bpf_prog_array_item *prog_item;
+ int retval;
+};
+
+struct bpf_trace_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+ bool is_uprobe;
+};
+
+struct bpf_tramp_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+ struct bpf_run_ctx *saved_run_ctx;
+};
+
+static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
+{
+ struct bpf_run_ctx *old_ctx = NULL;
+
+#ifdef CONFIG_BPF_SYSCALL
+ old_ctx = current->bpf_ctx;
+ current->bpf_ctx = new_ctx;
+#endif
+ return old_ctx;
+}
+
+static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ current->bpf_ctx = old_ctx;
+#endif
+}
+
+/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
+#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
+/* BPF program asks to set CN on the packet. */
+#define BPF_RET_SET_CN (1 << 0)
+
+typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
+
+static __always_inline u32
+bpf_prog_run_array(const struct bpf_prog_array *array,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
+
+ if (unlikely(!array))
+ return ret;
+
+ run_ctx.is_uprobe = false;
+
+ migrate_disable();
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ migrate_enable();
+ return ret;
+}
+
+/* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
*
- * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
- * 0: drop packet
- * 1: keep packet
- * 2: drop packet and cn
- * 3: keep packet and cn
+ * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
+ * overall. As a result, we must use the bpf_prog_array_free_sleepable
+ * in order to use the tasks_trace rcu grace period.
*
- * This macro then converts it to one of the NET_XMIT or an error
- * code that is then interpreted as drop packet (and no cn):
- * 0: NET_XMIT_SUCCESS skb should be transmitted
- * 1: NET_XMIT_DROP skb should be dropped and cn
- * 2: NET_XMIT_CN skb should be transmitted and cn
- * 3: -EPERM skb should be dropped
+ * When a non-sleepable program is inside the array, we take the rcu read
+ * section and disable preemption for that program alone, so it can access
+ * rcu-protected dynamically sized maps.
*/
-#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
- ({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 ret; \
- u32 _ret = 1; \
- u32 _cn = 0; \
- migrate_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- bpf_cgroup_storage_set(_item->cgroup_storage); \
- ret = func(_prog, ctx); \
- _ret &= (ret & 1); \
- _cn |= (ret & 2); \
- _item++; \
- } \
- rcu_read_unlock(); \
- migrate_enable(); \
- if (_ret) \
- _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
- else \
- _ret = (_cn ? NET_XMIT_DROP : -EPERM); \
- _ret; \
- })
+static __always_inline u32
+bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ const struct bpf_prog_array *array;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ might_fault();
+
+ rcu_read_lock_trace();
+ migrate_disable();
+
+ run_ctx.is_uprobe = true;
-#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
+ array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
+ if (unlikely(!array))
+ goto out;
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ if (!prog->sleepable)
+ rcu_read_lock();
-#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+
+ if (!prog->sleepable)
+ rcu_read_unlock();
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+out:
+ migrate_enable();
+ rcu_read_unlock_trace();
+ return ret;
+}
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
@@ -1076,31 +2138,20 @@ extern struct mutex bpf_stats_enabled_mutex;
* kprobes, tracepoints) to prevent deadlocks on map operations as any of
* these events can happen inside a region which holds a map bucket lock
* and can deadlock on it.
- *
- * Use the preemption safe inc/dec variants on RT because migrate disable
- * is preemptible on RT and preemption in the middle of the RMW operation
- * might lead to inconsistent state. Use the raw variants for non RT
- * kernels as migrate_disable() maps to preempt_disable() so the slightly
- * more expensive save operation can be avoided.
*/
static inline void bpf_disable_instrumentation(void)
{
migrate_disable();
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_inc(bpf_prog_active);
- else
- __this_cpu_inc(bpf_prog_active);
+ this_cpu_inc(bpf_prog_active);
}
static inline void bpf_enable_instrumentation(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_dec(bpf_prog_active);
- else
- __this_cpu_dec(bpf_prog_active);
+ this_cpu_dec(bpf_prog_active);
migrate_enable();
}
+extern const struct super_operations bpf_super_ops;
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
extern const struct file_operations bpf_iter_fops;
@@ -1128,36 +2179,38 @@ void bpf_prog_sub(struct bpf_prog *prog, int i);
void bpf_prog_inc(struct bpf_prog *prog);
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-int __bpf_prog_charge(struct user_struct *user, u32 pages);
-void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
-void __bpf_free_used_maps(struct bpf_prog_aux *aux,
- struct bpf_map **used_maps, u32 len);
-void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+void bpf_prog_free_id(struct bpf_prog *prog);
+void bpf_map_free_id(struct bpf_map *map);
+
+struct btf_field *btf_record_find(const struct btf_record *rec,
+ u32 offset, u32 field_mask);
+void btf_record_free(struct btf_record *rec);
+void bpf_map_free_record(struct bpf_map *map);
+struct btf_record *btf_record_dup(const struct btf_record *rec);
+bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
+void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
+void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
+void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
+struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
-int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
-void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
-int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
-void bpf_map_charge_finish(struct bpf_map_memory *mem);
-void bpf_map_charge_move(struct bpf_map_memory *dst,
- struct bpf_map_memory *src);
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
+bool bpf_map_write_active(const struct bpf_map *map);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
-int generic_map_update_batch(struct bpf_map *map,
+int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int generic_map_delete_batch(struct bpf_map *map,
@@ -1166,26 +2219,95 @@ int generic_map_delete_batch(struct bpf_map *map,
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
+int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
+ unsigned long nr_pages, struct page **page_array);
+#ifdef CONFIG_MEMCG_KMEM
+void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node);
+void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
+void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
+ gfp_t flags);
+void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
+ size_t align, gfp_t flags);
+#else
+static inline void *
+bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node)
+{
+ return kmalloc_node(size, flags, node);
+}
+
+static inline void *
+bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
+{
+ return kzalloc(size, flags);
+}
+
+static inline void *
+bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
+{
+ return kvcalloc(n, size, flags);
+}
+
+static inline void __percpu *
+bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
+ gfp_t flags)
+{
+ return __alloc_percpu_gfp(size, align, flags);
+}
+#endif
+
+static inline int
+bpf_map_init_elem_count(struct bpf_map *map)
+{
+ size_t size = sizeof(*map->elem_count), align = size;
+ gfp_t flags = GFP_USER | __GFP_NOWARN;
+
+ map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
+ if (!map->elem_count)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void
+bpf_map_free_elem_count(struct bpf_map *map)
+{
+ free_percpu(map->elem_count);
+}
+
+static inline void bpf_map_inc_elem_count(struct bpf_map *map)
+{
+ this_cpu_inc(*map->elem_count);
+}
+
+static inline void bpf_map_dec_elem_count(struct bpf_map *map)
+{
+ this_cpu_dec(*map->elem_count);
+}
+
extern int sysctl_unprivileged_bpf_disabled;
-static inline bool bpf_allow_ptr_leaks(void)
+bool bpf_token_capable(const struct bpf_token *token, int cap);
+
+static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_allow_ptr_to_map_access(void)
+static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_bypass_spec_v1(void)
+static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
{
- return perfmon_capable();
+ return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_bypass_spec_v4(void)
+static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
{
- return perfmon_capable();
+ return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
}
int bpf_map_new_fd(struct bpf_map *map, int flags);
@@ -1199,32 +2321,92 @@ void bpf_link_cleanup(struct bpf_link_primer *primer);
void bpf_link_inc(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
-struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
+struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
+
+void bpf_token_inc(struct bpf_token *token);
+void bpf_token_put(struct bpf_token *token);
+int bpf_token_create(union bpf_attr *attr);
+struct bpf_token *bpf_token_get_from_fd(u32 ufd);
+
+bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
+bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
+bool bpf_token_allow_prog_type(const struct bpf_token *token,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type attach_type);
-int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
-int bpf_obj_get_user(const char __user *pathname, int flags);
+int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
+int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
+struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
+ umode_t mode);
#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
#define DEFINE_BPF_ITER_FUNC(target, args...) \
extern int bpf_iter_ ## target(args); \
int __init bpf_iter_ ## target(args) { return 0; }
+/*
+ * The task type of iterators.
+ *
+ * For BPF task iterators, they can be parameterized with various
+ * parameters to visit only some of tasks.
+ *
+ * BPF_TASK_ITER_ALL (default)
+ * Iterate over resources of every task.
+ *
+ * BPF_TASK_ITER_TID
+ * Iterate over resources of a task/tid.
+ *
+ * BPF_TASK_ITER_TGID
+ * Iterate over resources of every task of a process / task group.
+ */
+enum bpf_iter_task_type {
+ BPF_TASK_ITER_ALL = 0,
+ BPF_TASK_ITER_TID,
+ BPF_TASK_ITER_TGID,
+};
+
struct bpf_iter_aux_info {
+ /* for map_elem iter */
struct bpf_map *map;
+
+ /* for cgroup iter */
+ struct {
+ struct cgroup *start; /* starting cgroup */
+ enum bpf_cgroup_iter_order order;
+ } cgroup;
+ struct {
+ enum bpf_iter_task_type type;
+ u32 pid;
+ } task;
};
typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
union bpf_iter_link_info *linfo,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
+typedef const struct bpf_func_proto *
+(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
+
+enum bpf_iter_feature {
+ BPF_ITER_RESCHED = BIT(0),
+};
#define BPF_ITER_CTX_ARG_MAX 2
struct bpf_iter_reg {
const char *target;
bpf_iter_attach_target_t attach_target;
bpf_iter_detach_target_t detach_target;
+ bpf_iter_show_fdinfo_t show_fdinfo;
+ bpf_iter_fill_link_info_t fill_link_info;
+ bpf_iter_get_func_proto_t get_func_proto;
u32 ctx_arg_info_size;
+ u32 feature;
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
const struct bpf_iter_seq_info *seq_info;
};
@@ -1245,11 +2427,21 @@ struct bpf_iter__bpf_map_elem {
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
-int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+const struct bpf_func_proto *
+bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
+void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
+
+int map_set_for_each_callback_args(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
@@ -1268,50 +2460,42 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
int bpf_get_file_flag(int flags);
-int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
+int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
size_t actual_size);
-/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
- * forced to use 'long' read/writes to try to atomically copy long counters.
- * Best-effort only. No barriers here, since it _will_ race with concurrent
- * updates from BPF programs. Called from bpf syscall and mostly used with
- * size 8 or 16 bytes, so ask compiler to inline it.
- */
-static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
-{
- const long *lsrc = src;
- long *ldst = dst;
-
- size /= sizeof(long);
- while (size--)
- *ldst++ = *lsrc++;
-}
-
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
- union bpf_attr __user *uattr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
+#endif
+
+struct btf *bpf_get_btf_vmlinux(void);
/* Map specifics */
-struct xdp_buff;
+struct xdp_frame;
struct sk_buff;
+struct bpf_dtab_netdev;
+struct bpf_cpu_map_entry;
-struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
-struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
void __dev_flush(void);
-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx);
-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
struct net_device *dev_rx);
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog);
-bool dev_map_can_have_prog(struct bpf_map *map);
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress);
-struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(void);
-int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
+int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
struct net_device *dev_rx);
-bool cpu_map_prog_allowed(struct bpf_map *map);
+int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb);
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
@@ -1333,15 +2517,49 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_nf(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
+
+static inline bool bpf_tracing_ctx_access(int off, int size,
+ enum bpf_access_type type)
+{
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return true;
+}
+
+static inline bool bpf_tracing_btf_ctx_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (!bpf_tracing_ctx_access(off, size, type))
+ return false;
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
int btf_struct_access(struct bpf_verifier_log *log,
- const struct btf_type *t, int off, int size,
- enum bpf_access_type atype,
- u32 *next_btf_id);
-int btf_resolve_helper_id(struct bpf_verifier_log *log,
- const struct bpf_func_proto *fn, int);
+ const struct bpf_reg_state *reg,
+ int off, int size, enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
+bool btf_struct_ids_match(struct bpf_verifier_log *log,
+ const struct btf *btf, u32 id, int off,
+ const struct btf *need_btf, u32 need_type_id,
+ bool strict);
int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
@@ -1350,16 +2568,68 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf_func_model *m);
struct bpf_reg_state;
-int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs);
-int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *reg);
-int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
+int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
+const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key);
+int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key, int last_id);
struct bpf_prog *bpf_prog_by_id(u32 id);
+struct bpf_link *bpf_link_by_id(u32 id);
+
+const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
+void bpf_task_storage_free(struct task_struct *task);
+void bpf_cgrp_storage_free(struct cgroup *cgroup);
+bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
+const struct btf_func_model *
+bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
+ const struct bpf_insn *insn);
+int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
+ u16 btf_fd_idx, u8 **func_addr);
+
+struct bpf_core_ctx {
+ struct bpf_verifier_log *log;
+ const struct btf *btf;
+};
+
+bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ const char *field_name, u32 btf_id, const char *suffix);
+
+bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
+ const struct btf *reg_btf, u32 reg_id,
+ const struct btf *arg_btf, u32 arg_id);
+
+int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
+ int relo_idx, void *insn);
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return !sysctl_unprivileged_bpf_disabled;
+}
+
+/* Not all bpf prog type has the bpf_ctx.
+ * For the bpf prog type that has initialized the bpf_ctx,
+ * this function can be used to decide if a kernel function
+ * is called by a bpf program.
+ */
+static inline bool has_current_bpf_ctx(void)
+{
+ return !!current->bpf_ctx;
+}
+
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
+
+void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size);
+void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
+void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
-const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
+bool dev_check_flush(void);
+bool cpu_map_check_flush(void);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -1395,15 +2665,6 @@ bpf_prog_inc_not_zero(struct bpf_prog *prog)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
-{
- return 0;
-}
-
-static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
-{
-}
-
static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops,
struct bpf_prog *prog)
@@ -1438,43 +2699,53 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
return -EOPNOTSUPP;
}
-static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
- u32 key)
+static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
{
- return NULL;
+ return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
}
-static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map,
- u32 key)
+static inline void bpf_token_inc(struct bpf_token *token)
{
- return NULL;
}
-static inline bool dev_map_can_have_prog(struct bpf_map *map)
+
+static inline void bpf_token_put(struct bpf_token *token)
{
- return false;
+}
+
+static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void __dev_flush(void)
{
}
-struct xdp_buff;
+struct xdp_frame;
struct bpf_dtab_netdev;
+struct bpf_cpu_map_entry;
static inline
-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return 0;
}
static inline
-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return 0;
}
+static inline
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress)
+{
+ return 0;
+}
+
struct sk_buff;
static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
@@ -1485,9 +2756,11 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
}
static inline
-struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress)
{
- return NULL;
+ return 0;
}
static inline void __cpu_map_flush(void)
@@ -1495,15 +2768,16 @@ static inline void __cpu_map_flush(void)
}
static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
- struct xdp_buff *xdp,
+ struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return 0;
}
-static inline bool cpu_map_prog_allowed(struct bpf_map *map)
+static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb)
{
- return false;
+ return -EOPNOTSUPP;
}
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
@@ -1540,6 +2814,13 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
return -ENOTSUPP;
}
+static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
static inline void bpf_map_put(struct bpf_map *map)
{
}
@@ -1549,23 +2830,104 @@ static inline struct bpf_prog *bpf_prog_by_id(u32 id)
return ERR_PTR(-ENOTSUPP);
}
+static inline int btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size, enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag,
+ const char **field_name)
+{
+ return -EACCES;
+}
+
static inline const struct bpf_func_proto *
-bpf_base_func_proto(enum bpf_func_id func_id)
+bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
return NULL;
}
+
+static inline void bpf_task_storage_free(struct task_struct *task)
+{
+}
+
+static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
+{
+ return false;
+}
+
+static inline const struct btf_func_model *
+bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
+ const struct bpf_insn *insn)
+{
+ return NULL;
+}
+
+static inline int
+bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
+ u16 btf_fd_idx, u8 **func_addr)
+{
+ return -ENOTSUPP;
+}
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return false;
+}
+
+static inline bool has_current_bpf_ctx(void)
+{
+ return false;
+}
+
+static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+}
+
+static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
+{
+}
+
+static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size)
+{
+}
+
+static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
+{
+}
+
+static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
+static __always_inline int
+bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
+{
+ int ret = -EFAULT;
+
+ if (IS_ENABLED(CONFIG_BPF_EVENTS))
+ ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+ return ret;
+}
+
+void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
+ struct btf_mod_pair *used_btfs, u32 len);
+
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
{
return bpf_prog_get_type_dev(ufd, type, false);
}
+void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+ struct bpf_map **used_maps, u32 len);
+
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog);
-void bpf_prog_offload_destroy(struct bpf_prog *prog);
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
struct bpf_prog *prog);
@@ -1590,34 +2952,94 @@ void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
struct net_device *netdev);
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
+void unpriv_ebpf_notify(int new_state);
+
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
-int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux);
+void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
+void bpf_dev_bound_netdev_unregister(struct net_device *dev);
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
{
+ return aux->dev_bound;
+}
+
+static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
+{
return aux->offload_requested;
}
-static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
{
return unlikely(map->ops == &bpf_map_offload_ops);
}
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
+u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
+int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+
+int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
+int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+
+void sock_map_unhash(struct sock *sk);
+void sock_map_destroy(struct sock *sk);
+void sock_map_close(struct sock *sk, long timeout);
#else
-static inline int bpf_prog_offload_init(struct bpf_prog *prog,
- union bpf_attr *attr)
+static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux)
{
return -EOPNOTSUPP;
}
-static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
+static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
+ u32 func_id)
+{
+ return NULL;
+}
+
+static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
+ union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
+{
+}
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
{
return false;
}
-static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
+{
+ return false;
+}
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
{
return false;
}
@@ -1630,23 +3052,20 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
-#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
-#if defined(CONFIG_BPF_STREAM_PARSER)
-int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
- struct bpf_prog *old, u32 which);
-int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
-int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
-void sock_map_unhash(struct sock *sk);
-void sock_map_close(struct sock *sk, long timeout);
-#else
-static inline int sock_map_prog_update(struct bpf_map *map,
- struct bpf_prog *prog,
- struct bpf_prog *old, u32 which)
+static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
{
- return -EOPNOTSUPP;
+ return 0;
}
+static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+#ifdef CONFIG_BPF_SYSCALL
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
struct bpf_prog *prog)
{
@@ -1658,7 +3077,36 @@ static inline int sock_map_prog_detach(const union bpf_attr *attr,
{
return -EOPNOTSUPP;
}
-#endif /* CONFIG_BPF_STREAM_PARSER */
+
+static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
+ u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+
+static __always_inline void
+bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
+{
+ const struct bpf_prog_array_item *item;
+ struct bpf_prog *prog;
+
+ if (unlikely(!array))
+ return;
+
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ bpf_prog_inc_misses_counter(prog);
+ item++;
+ }
+}
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
@@ -1694,6 +3142,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
@@ -1701,6 +3150,7 @@ extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
+extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
@@ -1713,6 +3163,7 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
@@ -1731,14 +3182,42 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
extern const struct bpf_func_proto bpf_ringbuf_query_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
-
-const struct bpf_func_proto *bpf_tracing_func_proto(
- enum bpf_func_id func_id, const struct bpf_prog *prog);
+extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_proto;
+extern const struct bpf_func_proto bpf_snprintf_btf_proto;
+extern const struct bpf_func_proto bpf_snprintf_proto;
+extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
+extern const struct bpf_func_proto bpf_sock_from_file_proto;
+extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_proto;
+extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
+extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
+extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_find_vma_proto;
+extern const struct bpf_func_proto bpf_loop_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
+extern const struct bpf_func_proto bpf_set_retval_proto;
+extern const struct bpf_func_proto bpf_get_retval_proto;
+extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
+extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
+extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
@@ -1759,6 +3238,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
+int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
+ struct bpf_dynptr_kern *ptr);
#else
static inline bool bpf_sock_common_is_valid_access(int off, int size,
enum bpf_access_type type,
@@ -1780,6 +3261,11 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
{
return 0;
}
+static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
+ struct bpf_dynptr_kern *ptr)
+{
+ return -EOPNOTSUPP;
+}
#endif
#ifdef CONFIG_INET
@@ -1787,6 +3273,7 @@ struct sk_reuseport_kern {
struct sk_buff *skb;
struct sock *sk;
struct sock *selected_sk;
+ struct sock *migrating_sk;
void *data_end;
u32 hash;
u32 reuseport_id;
@@ -1850,4 +3337,61 @@ enum bpf_text_poke_type {
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2);
+void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+ struct bpf_prog *new, struct bpf_prog *old);
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+int bpf_arch_text_invalidate(void *dst, size_t len);
+
+struct btf_id_set;
+bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+
+#define MAX_BPRINTF_VARARGS 12
+#define MAX_BPRINTF_BUF 1024
+
+struct bpf_bprintf_data {
+ u32 *bin_args;
+ char *buf;
+ bool get_bin_args;
+ bool get_buf;
+};
+
+int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+ u32 num_args, struct bpf_bprintf_data *data);
+void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
+
+#ifdef CONFIG_BPF_LSM
+void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
+void bpf_cgroup_atype_put(int cgroup_atype);
+#else
+static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
+static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
+#endif /* CONFIG_BPF_LSM */
+
+struct key;
+
+#ifdef CONFIG_KEYS
+struct bpf_key {
+ struct key *key;
+ bool has_ref;
+};
+#endif /* CONFIG_KEYS */
+
+static inline bool type_is_alloc(u32 type)
+{
+ return type & MEM_ALLOC;
+}
+
+static inline gfp_t bpf_memcg_flags(gfp_t flags)
+{
+ if (memcg_bpf_enabled())
+ return flags | __GFP_ACCOUNT;
+ return flags;
+}
+
+static inline bool bpf_is_subprog(const struct bpf_prog *prog)
+{
+ return prog->aux->func_idx != 0;
+}
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
new file mode 100644
index 000000000000..dcddb0aef7d8
--- /dev/null
+++ b/include/linux/bpf_local_storage.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Facebook
+ * Copyright 2020 Google LLC.
+ */
+
+#ifndef _BPF_LOCAL_STORAGE_H
+#define _BPF_LOCAL_STORAGE_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/bpf_mem_alloc.h>
+#include <uapi/linux/btf.h>
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+#define bpf_rcu_lock_held() \
+ (rcu_read_lock_held() || rcu_read_lock_trace_held() || \
+ rcu_read_lock_bh_held())
+struct bpf_local_storage_map_bucket {
+ struct hlist_head list;
+ raw_spinlock_t lock;
+};
+
+/* Thp map is not the primary owner of a bpf_local_storage_elem.
+ * Instead, the container object (eg. sk->sk_bpf_storage) is.
+ *
+ * The map (bpf_local_storage_map) is for two purposes
+ * 1. Define the size of the "local storage". It is
+ * the map's value_size.
+ *
+ * 2. Maintain a list to keep track of all elems such
+ * that they can be cleaned up during the map destruction.
+ *
+ * When a bpf local storage is being looked up for a
+ * particular object, the "bpf_map" pointer is actually used
+ * as the "key" to search in the list of elem in
+ * the respective bpf_local_storage owned by the object.
+ *
+ * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
+ * as the searching key.
+ */
+struct bpf_local_storage_map {
+ struct bpf_map map;
+ /* Lookup elem does not require accessing the map.
+ *
+ * Updating/Deleting requires a bucket lock to
+ * link/unlink the elem from the map. Having
+ * multiple buckets to improve contention.
+ */
+ struct bpf_local_storage_map_bucket *buckets;
+ u32 bucket_log;
+ u16 elem_size;
+ u16 cache_idx;
+ struct bpf_mem_alloc selem_ma;
+ struct bpf_mem_alloc storage_ma;
+ bool bpf_ma;
+};
+
+struct bpf_local_storage_data {
+ /* smap is used as the searching key when looking up
+ * from the object's bpf_local_storage.
+ *
+ * Put it in the same cacheline as the data to minimize
+ * the number of cachelines accessed during the cache hit case.
+ */
+ struct bpf_local_storage_map __rcu *smap;
+ u8 data[] __aligned(8);
+};
+
+/* Linked to bpf_local_storage and bpf_local_storage_map */
+struct bpf_local_storage_elem {
+ struct hlist_node map_node; /* Linked to bpf_local_storage_map */
+ struct hlist_node snode; /* Linked to bpf_local_storage */
+ struct bpf_local_storage __rcu *local_storage;
+ struct rcu_head rcu;
+ /* 8 bytes hole */
+ /* The data is stored in another cacheline to minimize
+ * the number of cachelines access during a cache hit.
+ */
+ struct bpf_local_storage_data sdata ____cacheline_aligned;
+};
+
+struct bpf_local_storage {
+ struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
+ struct bpf_local_storage_map __rcu *smap;
+ struct hlist_head list; /* List of bpf_local_storage_elem */
+ void *owner; /* The object that owns the above "list" of
+ * bpf_local_storage_elem.
+ */
+ struct rcu_head rcu;
+ raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+};
+
+/* U16_MAX is much more than enough for sk local storage
+ * considering a tcp_sock is ~2k.
+ */
+#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE \
+ min_t(u32, \
+ (KMALLOC_MAX_SIZE - MAX_BPF_STACK - \
+ sizeof(struct bpf_local_storage_elem)), \
+ (U16_MAX - sizeof(struct bpf_local_storage_elem)))
+
+#define SELEM(_SDATA) \
+ container_of((_SDATA), struct bpf_local_storage_elem, sdata)
+#define SDATA(_SELEM) (&(_SELEM)->sdata)
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+struct bpf_local_storage_cache {
+ spinlock_t idx_lock;
+ u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
+};
+
+#define DEFINE_BPF_STORAGE_CACHE(name) \
+static struct bpf_local_storage_cache name = { \
+ .idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \
+}
+
+/* Helper functions for bpf_local_storage */
+int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
+
+struct bpf_map *
+bpf_local_storage_map_alloc(union bpf_attr *attr,
+ struct bpf_local_storage_cache *cache,
+ bool bpf_ma);
+
+void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+/* If cacheit_lockit is false, this lookup function is lockless */
+static inline struct bpf_local_storage_data *
+bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ bool cacheit_lockit)
+{
+ struct bpf_local_storage_data *sdata;
+ struct bpf_local_storage_elem *selem;
+
+ /* Fast path (cache hit) */
+ sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
+ bpf_rcu_lock_held());
+ if (sdata && rcu_access_pointer(sdata->smap) == smap)
+ return sdata;
+
+ /* Slow path (cache miss) */
+ hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
+ rcu_read_lock_trace_held())
+ if (rcu_access_pointer(SDATA(selem)->smap) == smap)
+ break;
+
+ if (!selem)
+ return NULL;
+ if (cacheit_lockit)
+ __bpf_local_storage_insert_cache(local_storage, smap, selem);
+ return SDATA(selem);
+}
+
+void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
+
+void bpf_local_storage_map_free(struct bpf_map *map,
+ struct bpf_local_storage_cache *cache,
+ int __percpu *busy_counter);
+
+int bpf_local_storage_map_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
+void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem);
+
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
+
+void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+
+struct bpf_local_storage_elem *
+bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
+ bool charge_mem, gfp_t gfp_flags);
+
+void bpf_selem_free(struct bpf_local_storage_elem *selem,
+ struct bpf_local_storage_map *smap,
+ bool reuse_now);
+
+int
+bpf_local_storage_alloc(void *owner,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *first_selem,
+ gfp_t gfp_flags);
+
+struct bpf_local_storage_data *
+bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ void *value, u64 map_flags, gfp_t gfp_flags);
+
+u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
+
+#endif /* _BPF_LOCAL_STORAGE_H */
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
index af74712af585..1de7ece5d36d 100644
--- a/include/linux/bpf_lsm.h
+++ b/include/linux/bpf_lsm.h
@@ -7,6 +7,7 @@
#ifndef _LINUX_BPF_LSM_H
#define _LINUX_BPF_LSM_H
+#include <linux/sched.h>
#include <linux/bpf.h>
#include <linux/lsm_hooks.h>
@@ -17,17 +18,66 @@
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
+struct bpf_storage_blob {
+ struct bpf_local_storage __rcu *storage;
+};
+
+extern struct lsm_blob_sizes bpf_lsm_blob_sizes;
+
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog);
+bool bpf_lsm_is_sleepable_hook(u32 btf_id);
+bool bpf_lsm_is_trusted(const struct bpf_prog *prog);
+
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ if (unlikely(!inode->i_security))
+ return NULL;
+
+ return inode->i_security + bpf_lsm_blob_sizes.lbs_inode;
+}
+
+extern const struct bpf_func_proto bpf_inode_storage_get_proto;
+extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
+void bpf_inode_storage_free(struct inode *inode);
+
+void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+
#else /* !CONFIG_BPF_LSM */
+static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
+{
+ return false;
+}
+
+static inline bool bpf_lsm_is_trusted(const struct bpf_prog *prog)
+{
+ return false;
+}
+
static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
{
return -EOPNOTSUPP;
}
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline void bpf_inode_storage_free(struct inode *inode)
+{
+}
+
+static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
+ bpf_func_t *bpf_func)
+{
+}
+
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
new file mode 100644
index 000000000000..aaf004d94322
--- /dev/null
+++ b/include/linux/bpf_mem_alloc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_MEM_ALLOC_H
+#define _BPF_MEM_ALLOC_H
+#include <linux/compiler_types.h>
+#include <linux/workqueue.h>
+
+struct bpf_mem_cache;
+struct bpf_mem_caches;
+
+struct bpf_mem_alloc {
+ struct bpf_mem_caches __percpu *caches;
+ struct bpf_mem_cache __percpu *cache;
+ struct obj_cgroup *objcg;
+ bool percpu;
+ struct work_struct work;
+};
+
+/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
+ * Alloc and free are done with bpf_mem_cache_{alloc,free}().
+ *
+ * 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects.
+ * Alloc and free are done with bpf_mem_{alloc,free}() and the size of
+ * the returned object is given by the size argument of bpf_mem_alloc().
+ * If percpu equals true, error will be returned in order to avoid
+ * large memory consumption and the below bpf_mem_alloc_percpu_unit_init()
+ * should be used to do on-demand per-cpu allocation for each size.
+ */
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+/* Initialize a non-fix-size percpu memory allocator */
+int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg);
+/* The percpu allocation with a specific unit size. */
+int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
+void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
+
+/* kmalloc/kfree equivalent: */
+void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
+void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
+
+/* kmem_cache_alloc/free equivalent: */
+void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
+void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_cache_raw_free(void *ptr);
+void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags);
+
+#endif /* _BPF_MEM_ALLOC_H */
diff --git a/include/linux/bpf_mprog.h b/include/linux/bpf_mprog.h
new file mode 100644
index 000000000000..929225f7b095
--- /dev/null
+++ b/include/linux/bpf_mprog.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Isovalent */
+#ifndef __BPF_MPROG_H
+#define __BPF_MPROG_H
+
+#include <linux/bpf.h>
+
+/* bpf_mprog framework:
+ *
+ * bpf_mprog is a generic layer for multi-program attachment. In-kernel users
+ * of the bpf_mprog don't need to care about the dependency resolution
+ * internals, they can just consume it with few API calls. Currently available
+ * dependency directives are BPF_F_{BEFORE,AFTER} which enable insertion of
+ * a BPF program or BPF link relative to an existing BPF program or BPF link
+ * inside the multi-program array as well as prepend and append behavior if
+ * no relative object was specified, see corresponding selftests for concrete
+ * examples (e.g. tc_links and tc_opts test cases of test_progs).
+ *
+ * Usage of bpf_mprog_{attach,detach,query}() core APIs with pseudo code:
+ *
+ * Attach case:
+ *
+ * struct bpf_mprog_entry *entry, *entry_new;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_attach(entry, &entry_new, [...]);
+ * if (!ret) {
+ * if (entry != entry_new) {
+ * // swap @entry to @entry_new at attach location
+ * // ensure there are no inflight users of @entry:
+ * synchronize_rcu();
+ * }
+ * bpf_mprog_commit(entry);
+ * } else {
+ * // error path, bail out, propagate @ret
+ * }
+ * // bpf_mprog user-side unlock
+ *
+ * Detach case:
+ *
+ * struct bpf_mprog_entry *entry, *entry_new;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_detach(entry, &entry_new, [...]);
+ * if (!ret) {
+ * // all (*) marked is optional and depends on the use-case
+ * // whether bpf_mprog_bundle should be freed or not
+ * if (!bpf_mprog_total(entry_new)) (*)
+ * entry_new = NULL (*)
+ * // swap @entry to @entry_new at attach location
+ * // ensure there are no inflight users of @entry:
+ * synchronize_rcu();
+ * bpf_mprog_commit(entry);
+ * if (!entry_new) (*)
+ * // free bpf_mprog_bundle (*)
+ * } else {
+ * // error path, bail out, propagate @ret
+ * }
+ * // bpf_mprog user-side unlock
+ *
+ * Query case:
+ *
+ * struct bpf_mprog_entry *entry;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_query(attr, uattr, entry);
+ * // bpf_mprog user-side unlock
+ *
+ * Data/fast path:
+ *
+ * struct bpf_mprog_entry *entry;
+ * struct bpf_mprog_fp *fp;
+ * struct bpf_prog *prog;
+ * int ret = [...];
+ *
+ * rcu_read_lock();
+ * // fetch active @entry from attach location
+ * [...]
+ * bpf_mprog_foreach_prog(entry, fp, prog) {
+ * ret = bpf_prog_run(prog, [...]);
+ * // process @ret from program
+ * }
+ * [...]
+ * rcu_read_unlock();
+ *
+ * bpf_mprog locking considerations:
+ *
+ * bpf_mprog_{attach,detach,query}() must be protected by an external lock
+ * (like RTNL in case of tcx).
+ *
+ * bpf_mprog_entry pointer can be an __rcu annotated pointer (in case of tcx
+ * the netdevice has tcx_ingress and tcx_egress __rcu pointer) which gets
+ * updated via rcu_assign_pointer() pointing to the active bpf_mprog_entry of
+ * the bpf_mprog_bundle.
+ *
+ * Fast path accesses the active bpf_mprog_entry within RCU critical section
+ * (in case of tcx it runs in NAPI which provides RCU protection there,
+ * other users might need explicit rcu_read_lock()). The bpf_mprog_commit()
+ * assumes that for the old bpf_mprog_entry there are no inflight users
+ * anymore.
+ *
+ * The READ_ONCE()/WRITE_ONCE() pairing for bpf_mprog_fp's prog access is for
+ * the replacement case where we don't swap the bpf_mprog_entry.
+ */
+
+#define bpf_mprog_foreach_tuple(entry, fp, cp, t) \
+ for (fp = &entry->fp_items[0], cp = &entry->parent->cp_items[0];\
+ ({ \
+ t.prog = READ_ONCE(fp->prog); \
+ t.link = cp->link; \
+ t.prog; \
+ }); \
+ fp++, cp++)
+
+#define bpf_mprog_foreach_prog(entry, fp, p) \
+ for (fp = &entry->fp_items[0]; \
+ (p = READ_ONCE(fp->prog)); \
+ fp++)
+
+#define BPF_MPROG_MAX 64
+
+struct bpf_mprog_fp {
+ struct bpf_prog *prog;
+};
+
+struct bpf_mprog_cp {
+ struct bpf_link *link;
+};
+
+struct bpf_mprog_entry {
+ struct bpf_mprog_fp fp_items[BPF_MPROG_MAX];
+ struct bpf_mprog_bundle *parent;
+};
+
+struct bpf_mprog_bundle {
+ struct bpf_mprog_entry a;
+ struct bpf_mprog_entry b;
+ struct bpf_mprog_cp cp_items[BPF_MPROG_MAX];
+ struct bpf_prog *ref;
+ atomic64_t revision;
+ u32 count;
+};
+
+struct bpf_tuple {
+ struct bpf_prog *prog;
+ struct bpf_link *link;
+};
+
+static inline struct bpf_mprog_entry *
+bpf_mprog_peer(const struct bpf_mprog_entry *entry)
+{
+ if (entry == &entry->parent->a)
+ return &entry->parent->b;
+ else
+ return &entry->parent->a;
+}
+
+static inline void bpf_mprog_bundle_init(struct bpf_mprog_bundle *bundle)
+{
+ BUILD_BUG_ON(sizeof(bundle->a.fp_items[0]) > sizeof(u64));
+ BUILD_BUG_ON(ARRAY_SIZE(bundle->a.fp_items) !=
+ ARRAY_SIZE(bundle->cp_items));
+
+ memset(bundle, 0, sizeof(*bundle));
+ atomic64_set(&bundle->revision, 1);
+ bundle->a.parent = bundle;
+ bundle->b.parent = bundle;
+}
+
+static inline void bpf_mprog_inc(struct bpf_mprog_entry *entry)
+{
+ entry->parent->count++;
+}
+
+static inline void bpf_mprog_dec(struct bpf_mprog_entry *entry)
+{
+ entry->parent->count--;
+}
+
+static inline int bpf_mprog_max(void)
+{
+ return ARRAY_SIZE(((struct bpf_mprog_entry *)NULL)->fp_items) - 1;
+}
+
+static inline int bpf_mprog_total(struct bpf_mprog_entry *entry)
+{
+ int total = entry->parent->count;
+
+ WARN_ON_ONCE(total > bpf_mprog_max());
+ return total;
+}
+
+static inline bool bpf_mprog_exists(struct bpf_mprog_entry *entry,
+ struct bpf_prog *prog)
+{
+ const struct bpf_mprog_fp *fp;
+ const struct bpf_prog *tmp;
+
+ bpf_mprog_foreach_prog(entry, fp, tmp) {
+ if (tmp == prog)
+ return true;
+ }
+ return false;
+}
+
+static inline void bpf_mprog_mark_for_release(struct bpf_mprog_entry *entry,
+ struct bpf_tuple *tuple)
+{
+ WARN_ON_ONCE(entry->parent->ref);
+ if (!tuple->link)
+ entry->parent->ref = tuple->prog;
+}
+
+static inline void bpf_mprog_complete_release(struct bpf_mprog_entry *entry)
+{
+ /* In the non-link case prog deletions can only drop the reference
+ * to the prog after the bpf_mprog_entry got swapped and the
+ * bpf_mprog ensured that there are no inflight users anymore.
+ *
+ * Paired with bpf_mprog_mark_for_release().
+ */
+ if (entry->parent->ref) {
+ bpf_prog_put(entry->parent->ref);
+ entry->parent->ref = NULL;
+ }
+}
+
+static inline void bpf_mprog_revision_new(struct bpf_mprog_entry *entry)
+{
+ atomic64_inc(&entry->parent->revision);
+}
+
+static inline void bpf_mprog_commit(struct bpf_mprog_entry *entry)
+{
+ bpf_mprog_complete_release(entry);
+ bpf_mprog_revision_new(entry);
+}
+
+static inline u64 bpf_mprog_revision(struct bpf_mprog_entry *entry)
+{
+ return atomic64_read(&entry->parent->revision);
+}
+
+static inline void bpf_mprog_entry_copy(struct bpf_mprog_entry *dst,
+ struct bpf_mprog_entry *src)
+{
+ memcpy(dst->fp_items, src->fp_items, sizeof(src->fp_items));
+}
+
+static inline void bpf_mprog_entry_clear(struct bpf_mprog_entry *dst)
+{
+ memset(dst->fp_items, 0, sizeof(dst->fp_items));
+}
+
+static inline void bpf_mprog_clear_all(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new)
+{
+ struct bpf_mprog_entry *peer;
+
+ peer = bpf_mprog_peer(entry);
+ bpf_mprog_entry_clear(peer);
+ peer->parent->count = 0;
+ *entry_new = peer;
+}
+
+static inline void bpf_mprog_entry_grow(struct bpf_mprog_entry *entry, int idx)
+{
+ int total = bpf_mprog_total(entry);
+
+ memmove(entry->fp_items + idx + 1,
+ entry->fp_items + idx,
+ (total - idx) * sizeof(struct bpf_mprog_fp));
+
+ memmove(entry->parent->cp_items + idx + 1,
+ entry->parent->cp_items + idx,
+ (total - idx) * sizeof(struct bpf_mprog_cp));
+}
+
+static inline void bpf_mprog_entry_shrink(struct bpf_mprog_entry *entry, int idx)
+{
+ /* Total array size is needed in this case to enure the NULL
+ * entry is copied at the end.
+ */
+ int total = ARRAY_SIZE(entry->fp_items);
+
+ memmove(entry->fp_items + idx,
+ entry->fp_items + idx + 1,
+ (total - idx - 1) * sizeof(struct bpf_mprog_fp));
+
+ memmove(entry->parent->cp_items + idx,
+ entry->parent->cp_items + idx + 1,
+ (total - idx - 1) * sizeof(struct bpf_mprog_cp));
+}
+
+static inline void bpf_mprog_read(struct bpf_mprog_entry *entry, u32 idx,
+ struct bpf_mprog_fp **fp,
+ struct bpf_mprog_cp **cp)
+{
+ *fp = &entry->fp_items[idx];
+ *cp = &entry->parent->cp_items[idx];
+}
+
+static inline void bpf_mprog_write(struct bpf_mprog_fp *fp,
+ struct bpf_mprog_cp *cp,
+ struct bpf_tuple *tuple)
+{
+ WRITE_ONCE(fp->prog, tuple->prog);
+ cp->link = tuple->link;
+}
+
+int bpf_mprog_attach(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new,
+ struct bpf_prog *prog_new, struct bpf_link *link,
+ struct bpf_prog *prog_old,
+ u32 flags, u32 id_or_fd, u64 revision);
+
+int bpf_mprog_detach(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new,
+ struct bpf_prog *prog, struct bpf_link *link,
+ u32 flags, u32 id_or_fd, u64 revision);
+
+int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
+ struct bpf_mprog_entry *entry);
+
+static inline bool bpf_mprog_supported(enum bpf_prog_type type)
+{
+ switch (type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif /* __BPF_MPROG_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index a52a5688418e..9f2a6b83b49e 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -77,6 +77,12 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm,
void *, void *)
#endif /* CONFIG_BPF_LSM */
#endif
+BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall,
+ void *, void *)
+#ifdef CONFIG_NETFILTER_BPF_LINK
+BPF_PROG_TYPE(BPF_PROG_TYPE_NETFILTER, netfilter,
+ struct bpf_nf_ctx, struct bpf_nf_ctx)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -84,6 +90,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
#ifdef CONFIG_CGROUPS
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGRP_STORAGE, cgrp_storage_map_ops)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
@@ -99,19 +106,21 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
+#ifdef CONFIG_BPF_LSM
+BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
-#if defined(CONFIG_BPF_STREAM_PARSER)
-BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
-BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
-#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
#endif
#ifdef CONFIG_INET
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
#endif
@@ -121,6 +130,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARENA, arena_map_ops)
BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
@@ -130,4 +142,14 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
#ifdef CONFIG_NET
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter)
+BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit)
+#endif
+#ifdef CONFIG_PERF_EVENTS
+BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
#endif
+BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
+BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
+BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 53c7bd568c5d..7cb1b75eee38 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -5,6 +5,7 @@
#define _LINUX_BPF_VERIFIER_H 1
#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/btf.h> /* for struct btf and btf_id() */
#include <linux/filter.h> /* for MAX_BPF_STACK */
#include <linux/tnum.h>
@@ -17,6 +18,11 @@
* that converting umax_value to int cannot overflow.
*/
#define BPF_MAX_VAR_SIZ (1 << 29)
+/* size of tmp_str_buf in bpf_verifier.
+ * we need at least 306 bytes to fit full stack mask representation
+ * (in the "-8,-16,...,-512" form)
+ */
+#define TMP_STR_BUF_LEN 320
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
@@ -40,27 +46,114 @@ enum bpf_reg_liveness {
REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
};
+/* For every reg representing a map value or allocated object pointer,
+ * we consider the tuple of (ptr, id) for them to be unique in verifier
+ * context and conside them to not alias each other for the purposes of
+ * tracking lock state.
+ */
+struct bpf_active_lock {
+ /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
+ * there's no active lock held, and other fields have no
+ * meaning. If non-NULL, it indicates that a lock is held and
+ * id member has the reg->id of the register which can be >= 0.
+ */
+ void *ptr;
+ /* This will be reg->id */
+ u32 id;
+};
+
+#define ITER_PREFIX "bpf_iter_"
+
+enum bpf_iter_state {
+ BPF_ITER_STATE_INVALID, /* for non-first slot */
+ BPF_ITER_STATE_ACTIVE,
+ BPF_ITER_STATE_DRAINED,
+};
+
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
+ /* Fixed part of pointer offset, pointer types only */
+ s32 off;
union {
/* valid when type == PTR_TO_PACKET */
- u16 range;
+ int range;
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
- struct bpf_map *map_ptr;
+ struct {
+ struct bpf_map *map_ptr;
+ /* To distinguish map lookups from outer map
+ * the map_uid is non-zero for registers
+ * pointing to inner maps.
+ */
+ u32 map_uid;
+ };
- u32 btf_id; /* for PTR_TO_BTF_ID */
+ /* for PTR_TO_BTF_ID */
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ };
+
+ struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ u32 mem_size;
+ u32 dynptr_id; /* for dynptr slices */
+ };
- u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ /* For dynptr stack slots */
+ struct {
+ enum bpf_dynptr_type type;
+ /* A dynptr is 16 bytes so it takes up 2 stack slots.
+ * We need to track which slot is the first slot
+ * to protect against cases where the user may try to
+ * pass in an address starting at the second slot of the
+ * dynptr.
+ */
+ bool first_slot;
+ } dynptr;
+
+ /* For bpf_iter stack slots */
+ struct {
+ /* BTF container and BTF type ID describing
+ * struct bpf_iter_<type> of an iterator state
+ */
+ struct btf *btf;
+ u32 btf_id;
+ /* packing following two fields to fit iter state into 16 bytes */
+ enum bpf_iter_state state:2;
+ int depth:30;
+ } iter;
/* Max size from any of the above. */
- unsigned long raw;
+ struct {
+ unsigned long raw1;
+ unsigned long raw2;
+ } raw;
+
+ u32 subprogno; /* for PTR_TO_FUNC */
};
- /* Fixed part of pointer offset, pointer types only */
- s32 off;
+ /* For scalar types (SCALAR_VALUE), this represents our knowledge of
+ * the actual value.
+ * For pointer types, this represents the variable part of the offset
+ * from the pointed-to object, and is shared with all bpf_reg_states
+ * with the same id as us.
+ */
+ struct tnum var_off;
+ /* Used to determine if any memory access using this register will
+ * result in a bad access.
+ * These refer to the same value as var_off, not necessarily the actual
+ * contents of the register.
+ */
+ s64 smin_value; /* minimum possible (s64)value */
+ s64 smax_value; /* maximum possible (s64)value */
+ u64 umin_value; /* minimum possible (u64)value */
+ u64 umax_value; /* maximum possible (u64)value */
+ s32 s32_min_value; /* minimum possible (s32)value */
+ s32 s32_max_value; /* maximum possible (s32)value */
+ u32 u32_min_value; /* minimum possible (u32)value */
+ u32 u32_max_value; /* maximum possible (u32)value */
/* For PTR_TO_PACKET, used to find other pointers with the same variable
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
@@ -69,6 +162,10 @@ struct bpf_reg_state {
* for the purpose of tracking that it's freed.
* For PTR_TO_SOCKET this is used to share which pointers retain the
* same reference to the socket, to determine proper reference freeing.
+ * For stack slots that are dynptrs, this is used to track references to
+ * the dynptr to determine proper reference freeing.
+ * Similarly to dynptrs, we use ID to track "belonging" of a reference
+ * to a specific instance of bpf_iter.
*/
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
@@ -111,26 +208,6 @@ struct bpf_reg_state {
* allowed and has the same effect as bpf_sk_release(sk).
*/
u32 ref_obj_id;
- /* For scalar types (SCALAR_VALUE), this represents our knowledge of
- * the actual value.
- * For pointer types, this represents the variable part of the offset
- * from the pointed-to object, and is shared with all bpf_reg_states
- * with the same id as us.
- */
- struct tnum var_off;
- /* Used to determine if any memory access using this register will
- * result in a bad access.
- * These refer to the same value as var_off, not necessarily the actual
- * contents of the register.
- */
- s64 smin_value; /* minimum possible (s64)value */
- s64 smax_value; /* maximum possible (s64)value */
- u64 umin_value; /* minimum possible (u64)value */
- u64 umax_value; /* maximum possible (u64)value */
- s32 s32_min_value; /* minimum possible (s32)value */
- s32 s32_max_value; /* maximum possible (s32)value */
- u32 u32_min_value; /* minimum possible (u32)value */
- u32 u32_max_value; /* maximum possible (u32)value */
/* parentage chain for liveness checking */
struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
@@ -155,10 +232,22 @@ enum bpf_stack_slot_type {
STACK_SPILL, /* register spilled into stack */
STACK_MISC, /* BPF program wrote some data into this slot */
STACK_ZERO, /* BPF program wrote constant zero */
+ /* A dynptr is stored in this stack slot. The type of dynptr
+ * is stored in bpf_stack_state->spilled_ptr.dynptr.type
+ */
+ STACK_DYNPTR,
+ STACK_ITER,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
+ (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
+ (1 << BPF_REG_5))
+
+#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
+#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
+
struct bpf_stack_state {
struct bpf_reg_state spilled_ptr;
u8 slot_type[BPF_REG_SIZE];
@@ -173,6 +262,22 @@ struct bpf_reference_state {
* is used purely to inform the user of a reference leak.
*/
int insn_idx;
+ /* There can be a case like:
+ * main (frame 0)
+ * cb (frame 1)
+ * func (frame 3)
+ * cb (frame 4)
+ * Hence for frame 4, if callback_ref just stored boolean, it would be
+ * impossible to distinguish nested callback refs. Hence store the
+ * frameno and compare that to callback_ref in check_reference_leak when
+ * exiting a callback function.
+ */
+ int callback_ref;
+};
+
+struct bpf_retval_range {
+ s32 minval;
+ s32 maxval;
};
/* state of the program:
@@ -187,24 +292,79 @@ struct bpf_func_state {
* 0 = main function, 1 = first callee.
*/
u32 frameno;
- /* subprog number == index within subprog_stack_depth
+ /* subprog number == index within subprog_info
* zero == main subprog
*/
u32 subprogno;
+ /* Every bpf_timer_start will increment async_entry_cnt.
+ * It's used to distinguish:
+ * void foo(void) { for(;;); }
+ * void foo(void) { bpf_timer_set_callback(,foo); }
+ */
+ u32 async_entry_cnt;
+ struct bpf_retval_range callback_ret_range;
+ bool in_callback_fn;
+ bool in_async_callback_fn;
+ bool in_exception_callback_fn;
+ /* For callback calling functions that limit number of possible
+ * callback executions (e.g. bpf_loop) keeps track of current
+ * simulated iteration number.
+ * Value in frame N refers to number of times callback with frame
+ * N+1 was simulated, e.g. for the following call:
+ *
+ * bpf_loop(..., fn, ...); | suppose current frame is N
+ * | fn would be simulated in frame N+1
+ * | number of simulations is tracked in frame N
+ */
+ u32 callback_depth;
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
struct bpf_reference_state *refs;
- int allocated_stack;
+ /* The state of the stack. Each element of the array describes BPF_REG_SIZE
+ * (i.e. 8) bytes worth of stack memory.
+ * stack[0] represents bytes [*(r10-8)..*(r10-1)]
+ * stack[1] represents bytes [*(r10-16)..*(r10-9)]
+ * ...
+ * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
+ */
struct bpf_stack_state *stack;
+ /* Size of the current stack, in bytes. The stack state is tracked below, in
+ * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
+ */
+ int allocated_stack;
+};
+
+#define MAX_CALL_FRAMES 8
+
+/* instruction history flags, used in bpf_jmp_history_entry.flags field */
+enum {
+ /* instruction references stack slot through PTR_TO_STACK register;
+ * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
+ * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
+ * 8 bytes per slot, so slot index (spi) is [0, 63])
+ */
+ INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
+
+ INSN_F_SPI_MASK = 0x3f, /* 6 bits */
+ INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
+
+ INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
};
-struct bpf_idx_pair {
- u32 prev_idx;
+static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
+static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
+
+struct bpf_jmp_history_entry {
u32 idx;
+ /* insn idx can't be bigger than 1 million */
+ u32 prev_idx : 22;
+ /* special flags, e.g., whether insn is doing register stack spill/load */
+ u32 flags : 10;
};
-#define MAX_CALL_FRAMES 8
+/* Maximum number of register states that can exist at once */
+#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
@@ -245,7 +405,7 @@ struct bpf_verifier_state {
* If is_state_visited() sees a state with branches > 0 it means
* there is a loop. If such state is exactly equal to the current state
* it's an infinite loop. Note states_equal() checks for states
- * equvalency, so two states being 'states_equal' does not mean
+ * equivalency, so two states being 'states_equal' does not mean
* infinite loop. The exact comparison is provided by
* states_maybe_looping() function. It's a stronger pre-check and
* much faster than states_equal().
@@ -257,31 +417,75 @@ struct bpf_verifier_state {
u32 branches;
u32 insn_idx;
u32 curframe;
- u32 active_spin_lock;
+
+ struct bpf_active_lock active_lock;
bool speculative;
+ bool active_rcu_lock;
+ /* If this state was ever pointed-to by other state's loop_entry field
+ * this flag would be set to true. Used to avoid freeing such states
+ * while they are still in use.
+ */
+ bool used_as_loop_entry;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
u32 last_insn_idx;
+ /* If this state is a part of states loop this field points to some
+ * parent of this state such that:
+ * - it is also a member of the same states loop;
+ * - DFS states traversal starting from initial state visits loop_entry
+ * state before this state.
+ * Used to compute topmost loop entry for state loops.
+ * State loops might appear because of open coded iterators logic.
+ * See get_loop_entry() for more information.
+ */
+ struct bpf_verifier_state *loop_entry;
/* jmp history recorded from first to last.
* backtracking is using it to go from last to first.
* For most states jmp_history_cnt is [0-3].
* For loops can go up to ~40.
*/
- struct bpf_idx_pair *jmp_history;
+ struct bpf_jmp_history_entry *jmp_history;
u32 jmp_history_cnt;
+ u32 dfs_depth;
+ u32 callback_unroll_depth;
+ u32 may_goto_depth;
};
-#define bpf_get_spilled_reg(slot, frame) \
+#define bpf_get_spilled_reg(slot, frame, mask) \
(((slot < frame->allocated_stack / BPF_REG_SIZE) && \
- (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
+ ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
? &frame->stack[slot].spilled_ptr : NULL)
/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
-#define bpf_for_each_spilled_reg(iter, frame, reg) \
- for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
+#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
+ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
iter < frame->allocated_stack / BPF_REG_SIZE; \
- iter++, reg = bpf_get_spilled_reg(iter, frame))
+ iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
+
+#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
+ ({ \
+ struct bpf_verifier_state *___vstate = __vst; \
+ int ___i, ___j; \
+ for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
+ struct bpf_reg_state *___regs; \
+ __state = ___vstate->frame[___i]; \
+ ___regs = __state->regs; \
+ for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
+ __reg = &___regs[___j]; \
+ (void)(__expr); \
+ } \
+ bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
+ if (!__reg) \
+ continue; \
+ (void)(__expr); \
+ } \
+ } \
+ })
+
+/* Invoke __expr over regsiters in __vst, setting __state and __reg */
+#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+ bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
@@ -290,11 +494,20 @@ struct bpf_verifier_state_list {
int miss_cnt, hit_cnt;
};
+struct bpf_loop_inline_state {
+ unsigned int initialized:1; /* set to true upon first entry */
+ unsigned int fit_for_inline:1; /* true if callback function is the same
+ * at each call and flags are always zero
+ */
+ u32 callback_subprogno; /* valid when fit_for_inline is true */
+};
+
/* Possible states for alu_state member. */
-#define BPF_ALU_SANITIZE_SRC 1U
-#define BPF_ALU_SANITIZE_DST 2U
+#define BPF_ALU_SANITIZE_SRC (1U << 0)
+#define BPF_ALU_SANITIZE_DST (1U << 1)
#define BPF_ALU_NEG_VALUE (1U << 2)
#define BPF_ALU_NON_POINTER (1U << 3)
+#define BPF_ALU_IMMEDIATE (1U << 4)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
@@ -308,56 +521,141 @@ struct bpf_insn_aux_data {
u32 map_index; /* index into used_maps[] */
u32 map_off; /* offset from value base address */
};
+ struct {
+ enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
+ union {
+ struct {
+ struct btf *btf;
+ u32 btf_id; /* btf_id for struct typed var */
+ };
+ u32 mem_size; /* mem_size for non-struct typed var */
+ };
+ } btf_var;
+ /* if instruction is a call to bpf_loop this field tracks
+ * the state of the relevant registers to make decision about inlining
+ */
+ struct bpf_loop_inline_state loop_inline_state;
+ };
+ union {
+ /* remember the size of type passed to bpf_obj_new to rewrite R1 */
+ u64 obj_new_size;
+ /* remember the offset of node field within type to rewrite */
+ u64 insert_off;
};
+ struct btf_struct_meta *kptr_struct_meta;
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int sanitize_stack_off; /* stack slot to be cleared */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
+ bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
bool zext_dst; /* this insn zero extends dst reg */
+ bool needs_zext; /* alu op needs to clear upper bits */
+ bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
+ bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
+ bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
+ bool jmp_point;
bool prune_point;
+ /* ensure we check state equivalence and save state checkpoint and
+ * this instruction, regardless of any heuristics
+ */
+ bool force_checkpoint;
+ /* true if instruction is a call to a helper function that
+ * accepts callback function as a parameter.
+ */
+ bool calls_callback;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
struct bpf_verifier_log {
- u32 level;
- char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
+ /* Logical start and end positions of a "log window" of the verifier log.
+ * start_pos == 0 means we haven't truncated anything.
+ * Once truncation starts to happen, start_pos + len_total == end_pos,
+ * except during log reset situations, in which (end_pos - start_pos)
+ * might get smaller than len_total (see bpf_vlog_reset()).
+ * Generally, (end_pos - start_pos) gives number of useful data in
+ * user log buffer.
+ */
+ u64 start_pos;
+ u64 end_pos;
char __user *ubuf;
- u32 len_used;
+ u32 level;
u32 len_total;
+ u32 len_max;
+ char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
};
-static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
-{
- return log->len_used >= log->len_total - 1;
-}
-
#define BPF_LOG_LEVEL1 1
#define BPF_LOG_LEVEL2 2
#define BPF_LOG_STATS 4
+#define BPF_LOG_FIXED 8
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
-#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
+#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
+#define BPF_LOG_MIN_ALIGNMENT 8U
+#define BPF_LOG_ALIGNMENT 40U
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{
- return (log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
- log->level == BPF_LOG_KERNEL;
+ return log && log->level;
}
#define BPF_MAX_SUBPROGS 256
+struct bpf_subprog_arg_info {
+ enum bpf_arg_type arg_type;
+ union {
+ u32 mem_size;
+ u32 btf_id;
+ };
+};
+
struct bpf_subprog_info {
/* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
+ u16 stack_extra;
+ bool has_tail_call: 1;
+ bool tail_call_reachable: 1;
+ bool has_ld_abs: 1;
+ bool is_cb: 1;
+ bool is_async_cb: 1;
+ bool is_exception_cb: 1;
+ bool args_cached: 1;
+
+ u8 arg_cnt;
+ struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
+};
+
+struct bpf_verifier_env;
+
+struct backtrack_state {
+ struct bpf_verifier_env *env;
+ u32 frame;
+ u32 reg_masks[MAX_CALL_FRAMES];
+ u64 stack_masks[MAX_CALL_FRAMES];
+};
+
+struct bpf_id_pair {
+ u32 old;
+ u32 cur;
+};
+
+struct bpf_idmap {
+ u32 tmp_id_gen;
+ struct bpf_id_pair map[BPF_ID_MAP_SIZE];
+};
+
+struct bpf_idset {
+ u32 count;
+ u32 ids[BPF_ID_MAP_SIZE];
};
/* single container for all structs
@@ -368,31 +666,49 @@ struct bpf_verifier_env {
u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */
const struct bpf_verifier_ops *ops;
+ struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
bool test_state_freq; /* test verifier with different pruning frequency */
+ bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_verifier_state_list *free_list;
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
u32 used_map_cnt; /* number of used maps */
+ u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
+ u32 hidden_subprog_cnt; /* number of hidden subprogs */
+ int exception_callback_subprog;
+ bool explore_alu_limits;
bool allow_ptr_leaks;
- bool allow_ptr_to_map_access;
+ /* Allow access to uninitialized stack memory. Writes with fixed offset are
+ * always allowed, so this refers to reads (with fixed or variable offset),
+ * to writes with variable offset and to indirect (helper) accesses.
+ */
+ bool allow_uninit_stack;
bool bpf_capable;
bool bypass_spec_v1;
bool bypass_spec_v4;
bool seen_direct_write;
+ bool seen_exception;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
- struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
+ struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
+ union {
+ struct bpf_idmap idmap_scratch;
+ struct bpf_idset idset_scratch;
+ };
struct {
int *insn_state;
int *insn_stack;
int cur_stack;
} cfg;
+ struct backtrack_state bt;
+ struct bpf_jmp_history_entry *cur_hist_ent;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
@@ -412,14 +728,45 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
+ bpfptr_t fd_array;
+
+ /* bit mask to keep track of whether a register has been accessed
+ * since the last time the function state was printed
+ */
+ u32 scratched_regs;
+ /* Same as scratched_regs but for stack slots */
+ u64 scratched_stack_slots;
+ u64 prev_log_pos, prev_insn_print_pos;
+ /* buffer used to generate temporary string representations,
+ * e.g., in reg_type_str() to generate reg_type string
+ */
+ char tmp_str_buf[TMP_STR_BUF_LEN];
};
+static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->prog->aux->func_info_aux[subprog];
+}
+
+static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->subprog_info[subprog];
+}
+
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
const char *fmt, ...);
+int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
+ char __user *log_buf, u32 log_size);
+void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
+int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
+
+__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
+ u32 insn_off,
+ const char *prefix_fmt, ...);
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
@@ -443,7 +790,154 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
-int check_ctx_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno);
+/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
+static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
+ struct btf *btf, u32 btf_id)
+{
+ if (tgt_prog)
+ return ((u64)tgt_prog->aux->id << 32) | btf_id;
+ else
+ return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
+}
+
+/* unpack the IDs from the key as constructed above */
+static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
+{
+ if (obj_id)
+ *obj_id = key >> 32;
+ if (btf_id)
+ *btf_id = key & 0x7FFFFFFF;
+}
+
+int bpf_check_attach_target(struct bpf_verifier_log *log,
+ const struct bpf_prog *prog,
+ const struct bpf_prog *tgt_prog,
+ u32 btf_id,
+ struct bpf_attach_target_info *tgt_info);
+void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
+
+int mark_chain_precision(struct bpf_verifier_env *env, int regno);
+
+#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
+
+/* extract base type from bpf_{arg, return, reg}_type. */
+static inline u32 base_type(u32 type)
+{
+ return type & BPF_BASE_TYPE_MASK;
+}
+
+/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
+static inline u32 type_flag(u32 type)
+{
+ return type & ~BPF_BASE_TYPE_MASK;
+}
+
+/* only use after check_attach_btf_id() */
+static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
+{
+ return prog->type == BPF_PROG_TYPE_EXT ?
+ prog->aux->dst_prog->type : prog->type;
+}
+
+static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+{
+ switch (resolve_prog_type(prog)) {
+ case BPF_PROG_TYPE_TRACING:
+ return prog->expected_attach_type != BPF_TRACE_ITER;
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ case BPF_PROG_TYPE_LSM:
+ return false;
+ default:
+ return true;
+ }
+}
+
+#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
+
+static inline bool bpf_type_has_unsafe_modifiers(u32 type)
+{
+ return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
+}
+
+static inline bool type_is_ptr_alloc_obj(u32 type)
+{
+ return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
+}
+
+static inline bool type_is_non_owning_ref(u32 type)
+{
+ return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
+}
+
+static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
+{
+ type = base_type(type);
+ return type == PTR_TO_PACKET ||
+ type == PTR_TO_PACKET_META;
+}
+
+static inline bool type_is_sk_pointer(enum bpf_reg_type type)
+{
+ return type == PTR_TO_SOCKET ||
+ type == PTR_TO_SOCK_COMMON ||
+ type == PTR_TO_TCP_SOCK ||
+ type == PTR_TO_XDP_SOCK;
+}
+
+static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
+{
+ env->scratched_regs |= 1U << regno;
+}
+
+static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
+{
+ env->scratched_stack_slots |= 1ULL << spi;
+}
+
+static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
+{
+ return (env->scratched_regs >> regno) & 1;
+}
+
+static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
+{
+ return (env->scratched_stack_slots >> regno) & 1;
+}
+
+static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
+{
+ return env->scratched_regs || env->scratched_stack_slots;
+}
+
+static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = 0U;
+ env->scratched_stack_slots = 0ULL;
+}
+
+/* Used for printing the entire verifier state. */
+static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = ~0U;
+ env->scratched_stack_slots = ~0ULL;
+}
+
+static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
+{
+#ifdef __BIG_ENDIAN
+ off -= spill_size - fill_size;
+#endif
+
+ return !(off % BPF_REG_SIZE);
+}
+
+const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
+const char *dynptr_type_str(enum bpf_dynptr_type type);
+const char *iter_type_str(const struct btf *btf, u32 btf_id);
+const char *iter_state_str(enum bpf_iter_state state);
+
+void print_verifier_state(struct bpf_verifier_env *env,
+ const struct bpf_func_state *state, bool print_all);
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
deleted file mode 100644
index 2ae3c8e1d83c..000000000000
--- a/include/linux/bpfilter.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BPFILTER_H
-#define _LINUX_BPFILTER_H
-
-#include <uapi/linux/bpfilter.h>
-#include <linux/usermode_driver.h>
-#include <linux/sockptr.h>
-
-struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
- unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
- int __user *optlen);
-void bpfilter_umh_cleanup(struct umd_info *info);
-
-struct bpfilter_umh_ops {
- struct umd_info info;
- /* since ip_getsockopt() can run in parallel, serialize access to umh */
- struct mutex lock;
- int (*sockopt)(struct sock *sk, int optname, sockptr_t optval,
- unsigned int optlen, bool is_set);
- int (*start)(void);
-};
-extern struct bpfilter_umh_ops bpfilter_ops;
-#endif
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
new file mode 100644
index 000000000000..79b2f78eec1a
--- /dev/null
+++ b/include/linux/bpfptr.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* A pointer that can point to either kernel or userspace memory. */
+#ifndef _LINUX_BPFPTR_H
+#define _LINUX_BPFPTR_H
+
+#include <linux/mm.h>
+#include <linux/sockptr.h>
+
+typedef sockptr_t bpfptr_t;
+
+static inline bool bpfptr_is_kernel(bpfptr_t bpfptr)
+{
+ return bpfptr.is_kernel;
+}
+
+static inline bpfptr_t KERNEL_BPFPTR(void *p)
+{
+ return (bpfptr_t) { .kernel = p, .is_kernel = true };
+}
+
+static inline bpfptr_t USER_BPFPTR(void __user *p)
+{
+ return (bpfptr_t) { .user = p };
+}
+
+static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel)
+{
+ if (is_kernel)
+ return KERNEL_BPFPTR((void*) (uintptr_t) addr);
+ else
+ return USER_BPFPTR(u64_to_user_ptr(addr));
+}
+
+static inline bool bpfptr_is_null(bpfptr_t bpfptr)
+{
+ if (bpfptr_is_kernel(bpfptr))
+ return !bpfptr.kernel;
+ return !bpfptr.user;
+}
+
+static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
+{
+ if (bpfptr_is_kernel(*bpfptr))
+ bpfptr->kernel += val;
+ else
+ bpfptr->user += val;
+}
+
+static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
+ size_t offset, size_t size)
+{
+ if (!bpfptr_is_kernel(src))
+ return copy_from_user(dst, src.user + offset, size);
+ return copy_from_kernel_nofault(dst, src.kernel + offset, size);
+}
+
+static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
+{
+ return copy_from_bpfptr_offset(dst, src, 0, size);
+}
+
+static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
+ const void *src, size_t size)
+{
+ return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
+}
+
+static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
+{
+ void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_bpfptr(p, src, len)) {
+ kvfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ return p;
+}
+
+static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
+{
+ if (bpfptr_is_kernel(src))
+ return strncpy_from_kernel_nofault(dst, src.kernel, count);
+ return strncpy_from_user(dst, src.user, count);
+}
+
+#endif /* _LINUX_BPFPTR_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 6ad4c000661a..1394ba302367 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -11,11 +11,13 @@
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
+#define PHY_ID_BCM5221 0x004061e0
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
#define PHY_ID_BCM5395 0x0143bcf0
#define PHY_ID_BCM53125 0x03625f20
+#define PHY_ID_BCM53128 0x03625e10
#define PHY_ID_BCM54810 0x03625d00
#define PHY_ID_BCM54811 0x03625cc0
#define PHY_ID_BCM5482 0x0143bcb0
@@ -30,6 +32,9 @@
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM89610 0x03625cd0
+#define PHY_ID_BCM72113 0x35905310
+#define PHY_ID_BCM72116 0x35905350
+#define PHY_ID_BCM72165 0x35905340
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
@@ -40,6 +45,7 @@
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7346 0x600d8650
#define PHY_ID_BCM7362 0x600d84b0
+#define PHY_ID_BCM74165 0x359052c0
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7435 0x600d8750
@@ -47,6 +53,7 @@
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
+#define PHY_ID_BCM7712 0x35905330
#define PHY_ID_BCM_CYGNUS 0xae025200
#define PHY_ID_BCM_OMEGA 0xae025100
@@ -59,19 +66,12 @@
#define PHY_BCM_OUI_5 0x03625e00
#define PHY_BCM_OUI_6 0xae025000
-#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
-#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
-#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
-#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
-#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100
-#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200
-#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400
-#define PHY_BRCM_STD_IBND_DISABLE 0x00000800
-#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000
-#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
-#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
-#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
-#define PHY_BRCM_EN_MASTER_MODE 0x00010000
+#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000001
+#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000002
+#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004
+#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008
+#define PHY_BRCM_EN_MASTER_MODE 0x00000010
+#define PHY_BRCM_IDDQ_SUSPEND 0x00000020
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
@@ -89,7 +89,9 @@
#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_TOP 0x0d00 /* TOP_MISC expansion register select */
#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
+#define MII_BCM54XX_EXP_SEL_WOL 0x0e00 /* Wake-on-LAN expansion select register */
#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */
@@ -135,6 +137,7 @@
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
@@ -160,6 +163,7 @@
#define BCM_LED_SRC_OPENSHORT 0xb
#define BCM_LED_SRC_OFF 0xe /* Tied high */
#define BCM_LED_SRC_ON 0xf /* Tied low */
+#define BCM_LED_SRC_MASK GENMASK(3, 0)
/*
* Broadcom Multicolor LED configurations (expansion register 4)
@@ -196,6 +200,7 @@
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
+#define BCM54XX_SHD_SCR3_RXCTXC_DIS 0x0100
/* 01010: Auto Power-Down */
#define BCM54XX_SHD_APD 0x0a
@@ -204,11 +209,13 @@
#define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */
#define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */
-#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
+#define BCM54XX_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
/* LED3 / ~LINKSPD[2] selector */
-#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
+#define BCM54XX_SHD_LEDS_SHIFT(led) (4 * (led))
+#define BCM54XX_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
/* LED1 / ~LINKSPD[1] selector */
-#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_LEDS2 0x0e /* 01110: LED Selector 2 */
#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
@@ -221,6 +228,9 @@
/* 11111: Mode Control Register */
#define BCM54XX_SHD_MODE 0x1f
#define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */
+#define BCM54XX_SHD_INTF_SEL_RGMII 0x02
+#define BCM54XX_SHD_INTF_SEL_SGMII 0x04
+#define BCM54XX_SHD_INTF_SEL_GBIC 0x06
#define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */
/*
@@ -234,6 +244,7 @@
#define MII_BCM54XX_EXP_EXP08 0x0F08
#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
+#define MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE 0x0100
#define MII_BCM54XX_EXP_EXP75 0x0f75
#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
@@ -242,6 +253,15 @@
#define MII_BCM54XX_EXP_EXP97 0x0f97
#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
+/* Top-MISC expansion registers */
+#define BCM54XX_TOP_MISC_IDDQ_CTRL (MII_BCM54XX_EXP_SEL_TOP + 0x06)
+#define BCM54XX_TOP_MISC_IDDQ_LP (1 << 0)
+#define BCM54XX_TOP_MISC_IDDQ_SD (1 << 2)
+#define BCM54XX_TOP_MISC_IDDQ_SR (1 << 3)
+
+#define BCM54XX_TOP_MISC_LED_CTL (MII_BCM54XX_EXP_SEL_TOP + 0x0C)
+#define BCM54XX_LED4_SEL_INTR BIT(1)
+
/*
* BCM5482: Secondary SerDes registers
*/
@@ -256,12 +276,71 @@
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
-#define BCM54810_SHD_SCR3_TRDDAPD 0x0100
/* BCM54612E Registers */
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
#define BCM54612E_LED4_CLK125OUT_EN (1 << 1)
+
+/* Wake-on-LAN registers */
+#define BCM54XX_WOL_MAIN_CTL (MII_BCM54XX_EXP_SEL_WOL + 0x80)
+#define BCM54XX_WOL_EN BIT(0)
+#define BCM54XX_WOL_MODE_SINGLE_MPD 0
+#define BCM54XX_WOL_MODE_SINGLE_MPDSEC 1
+#define BCM54XX_WOL_MODE_DUAL 2
+#define BCM54XX_WOL_MODE_SHIFT 1
+#define BCM54XX_WOL_MODE_MASK 0x3
+#define BCM54XX_WOL_MP_MSB_FF_EN BIT(3)
+#define BCM54XX_WOL_SECKEY_OPT_4B 0
+#define BCM54XX_WOL_SECKEY_OPT_6B 1
+#define BCM54XX_WOL_SECKEY_OPT_8B 2
+#define BCM54XX_WOL_SECKEY_OPT_SHIFT 4
+#define BCM54XX_WOL_SECKEY_OPT_MASK 0x3
+#define BCM54XX_WOL_L2_TYPE_CHK BIT(6)
+#define BCM54XX_WOL_L4IPV4UDP_CHK BIT(7)
+#define BCM54XX_WOL_L4IPV6UDP_CHK BIT(8)
+#define BCM54XX_WOL_UDPPORT_CHK BIT(9)
+#define BCM54XX_WOL_CRC_CHK BIT(10)
+#define BCM54XX_WOL_SECKEY_MODE BIT(11)
+#define BCM54XX_WOL_RST BIT(12)
+#define BCM54XX_WOL_DIR_PKT_EN BIT(13)
+#define BCM54XX_WOL_MASK_MODE_DA_FF 0
+#define BCM54XX_WOL_MASK_MODE_DA_MPD 1
+#define BCM54XX_WOL_MASK_MODE_DA_ONLY 2
+#define BCM54XX_WOL_MASK_MODE_MPD 3
+#define BCM54XX_WOL_MASK_MODE_SHIFT 14
+#define BCM54XX_WOL_MASK_MODE_MASK 0x3
+
+#define BCM54XX_WOL_INNER_PROTO (MII_BCM54XX_EXP_SEL_WOL + 0x81)
+#define BCM54XX_WOL_OUTER_PROTO (MII_BCM54XX_EXP_SEL_WOL + 0x82)
+#define BCM54XX_WOL_OUTER_PROTO2 (MII_BCM54XX_EXP_SEL_WOL + 0x83)
+
+#define BCM54XX_WOL_MPD_DATA1(x) (MII_BCM54XX_EXP_SEL_WOL + 0x84 + (x))
+#define BCM54XX_WOL_MPD_DATA2(x) (MII_BCM54XX_EXP_SEL_WOL + 0x87 + (x))
+#define BCM54XX_WOL_SEC_KEY_8B (MII_BCM54XX_EXP_SEL_WOL + 0x8A)
+#define BCM54XX_WOL_MASK(x) (MII_BCM54XX_EXP_SEL_WOL + 0x8B + (x))
+#define BCM54XX_SEC_KEY_STORE(x) (MII_BCM54XX_EXP_SEL_WOL + 0x8E)
+#define BCM54XX_WOL_SHARED_CNT (MII_BCM54XX_EXP_SEL_WOL + 0x92)
+
+#define BCM54XX_WOL_INT_MASK (MII_BCM54XX_EXP_SEL_WOL + 0x93)
+#define BCM54XX_WOL_PKT1 BIT(0)
+#define BCM54XX_WOL_PKT2 BIT(1)
+#define BCM54XX_WOL_DIR BIT(2)
+#define BCM54XX_WOL_ALL_INTRS (BCM54XX_WOL_PKT1 | \
+ BCM54XX_WOL_PKT2 | \
+ BCM54XX_WOL_DIR)
+
+#define BCM54XX_WOL_INT_STATUS (MII_BCM54XX_EXP_SEL_WOL + 0x94)
+
+/* BCM5221 Registers */
+#define BCM5221_AEGSR 0x1C
+#define BCM5221_AEGSR_MDIX_STATUS BIT(13)
+#define BCM5221_AEGSR_MDIX_MAN_SWAP BIT(12)
+#define BCM5221_AEGSR_MDIX_DIS BIT(11)
+
+#define BCM5221_SHDW_AM4_EN_CLK_LPM BIT(2)
+#define BCM5221_SHDW_AM4_FORCE_LPM BIT(1)
+
/*****************************************************************************/
/* Fast Ethernet Transceiver definitions. */
/*****************************************************************************/
@@ -283,6 +362,7 @@
#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
+#define MII_BRCM_FET_SHDW_AM4_STANDBY 0x0008 /* Standby enable */
#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
@@ -293,6 +373,8 @@
#define LPI_FEATURE_EN 0x8000
#define LPI_FEATURE_EN_DIG1000X 0x4000
+#define BRCM_CL45VEN_EEE_LPI_CNT 0x803f
+
/* Core register definitions*/
#define MII_BRCM_CORE_BASE12 0x12
#define MII_BRCM_CORE_BASE13 0x13
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 960988d42f77..9e97ced2896d 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -10,8 +10,8 @@
#define _BLK_BSG_
#include <linux/blkdev.h>
-#include <scsi/scsi_request.h>
+struct bsg_job;
struct request;
struct device;
struct scatterlist;
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index dac37b6e00ec..ee2df73edf83 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -4,36 +4,16 @@
#include <uapi/linux/bsg.h>
-struct request;
+struct bsg_device;
+struct device;
+struct request_queue;
-#ifdef CONFIG_BLK_DEV_BSG
-struct bsg_ops {
- int (*check_proto)(struct sg_io_v4 *hdr);
- int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
- fmode_t mode);
- int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
- void (*free_rq)(struct request *rq);
-};
+typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
+ bool open_for_write, unsigned int timeout);
-struct bsg_class_device {
- struct device *class_dev;
- int minor;
- struct request_queue *queue;
- const struct bsg_ops *ops;
-};
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ bsg_sg_io_fn *sg_io_fn);
+void bsg_unregister_queue(struct bsg_device *bcd);
-int bsg_register_queue(struct request_queue *q, struct device *parent,
- const char *name, const struct bsg_ops *ops);
-int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
-void bsg_unregister_queue(struct request_queue *q);
-#else
-static inline int bsg_scsi_register_queue(struct request_queue *q,
- struct device *parent)
-{
- return 0;
-}
-static inline void bsg_unregister_queue(struct request_queue *q)
-{
-}
-#endif /* CONFIG_BLK_DEV_BSG */
#endif /* _LINUX_BSG_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 8b81fbb4497c..f9e56fd12a9f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -5,19 +5,142 @@
#define _LINUX_BTF_H 1
#include <linux/types.h>
+#include <linux/bpfptr.h>
+#include <linux/bsearch.h>
+#include <linux/btf_ids.h>
#include <uapi/linux/btf.h>
+#include <uapi/linux/bpf.h>
#define BTF_TYPE_EMIT(type) ((void)(type *)0)
+#define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val)
+
+/* These need to be macros, as the expressions are used in assembler input */
+#define KF_ACQUIRE (1 << 0) /* kfunc is an acquire function */
+#define KF_RELEASE (1 << 1) /* kfunc is a release function */
+#define KF_RET_NULL (1 << 2) /* kfunc returns a pointer that may be NULL */
+/* Trusted arguments are those which are guaranteed to be valid when passed to
+ * the kfunc. It is used to enforce that pointers obtained from either acquire
+ * kfuncs, or from the main kernel on a tracepoint or struct_ops callback
+ * invocation, remain unmodified when being passed to helpers taking trusted
+ * args.
+ *
+ * Consider, for example, the following new task tracepoint:
+ *
+ * SEC("tp_btf/task_newtask")
+ * int BPF_PROG(new_task_tp, struct task_struct *task, u64 clone_flags)
+ * {
+ * ...
+ * }
+ *
+ * And the following kfunc:
+ *
+ * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+ *
+ * All invocations to the kfunc must pass the unmodified, unwalked task:
+ *
+ * bpf_task_acquire(task); // Allowed
+ * bpf_task_acquire(task->last_wakee); // Rejected, walked task
+ *
+ * Programs may also pass referenced tasks directly to the kfunc:
+ *
+ * struct task_struct *acquired;
+ *
+ * acquired = bpf_task_acquire(task); // Allowed, same as above
+ * bpf_task_acquire(acquired); // Allowed
+ * bpf_task_acquire(task); // Allowed
+ * bpf_task_acquire(acquired->last_wakee); // Rejected, walked task
+ *
+ * Programs may _not_, however, pass a task from an arbitrary fentry/fexit, or
+ * kprobe/kretprobe to the kfunc, as BPF cannot guarantee that all of these
+ * pointers are guaranteed to be safe. For example, the following BPF program
+ * would be rejected:
+ *
+ * SEC("kretprobe/free_task")
+ * int BPF_PROG(free_task_probe, struct task_struct *tsk)
+ * {
+ * struct task_struct *acquired;
+ *
+ * acquired = bpf_task_acquire(acquired); // Rejected, not a trusted pointer
+ * bpf_task_release(acquired);
+ *
+ * return 0;
+ * }
+ */
+#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */
+#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
+#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
+#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */
+/* only one of KF_ITER_{NEW,NEXT,DESTROY} could be specified per kfunc */
+#define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */
+#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
+#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
+#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */
+
+/*
+ * Tag marking a kernel function as a kfunc. This is meant to minimize the
+ * amount of copy-paste that kfunc authors have to include for correctness so
+ * as to avoid issues such as the compiler inlining or eliding either a static
+ * kfunc, or a global kfunc in an LTO build.
+ */
+#define __bpf_kfunc __used noinline
+
+#define __bpf_kfunc_start_defs() \
+ __diag_push(); \
+ __diag_ignore_all("-Wmissing-declarations", \
+ "Global kfuncs as their definitions will be in BTF");\
+ __diag_ignore_all("-Wmissing-prototypes", \
+ "Global kfuncs as their definitions will be in BTF")
+
+#define __bpf_kfunc_end_defs() __diag_pop()
+#define __bpf_hook_start() __bpf_kfunc_start_defs()
+#define __bpf_hook_end() __bpf_kfunc_end_defs()
+
+/*
+ * Return the name of the passed struct, if exists, or halt the build if for
+ * example the structure gets renamed. In this way, developers have to revisit
+ * the code using that structure name, and update it accordingly.
+ */
+#define stringify_struct(x) \
+ ({ BUILD_BUG_ON(sizeof(struct x) < 0); \
+ __stringify(x); })
struct btf;
struct btf_member;
struct btf_type;
union bpf_attr;
+struct btf_show;
+struct btf_id_set;
+struct bpf_prog;
+
+typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *prog, u32 kfunc_id);
+
+struct btf_kfunc_id_set {
+ struct module *owner;
+ struct btf_id_set8 *set;
+ btf_kfunc_filter_t filter;
+};
+
+struct btf_id_dtor_kfunc {
+ u32 btf_id;
+ u32 kfunc_btf_id;
+};
+
+struct btf_struct_meta {
+ u32 btf_id;
+ struct btf_record *record;
+};
+
+struct btf_struct_metas {
+ u32 cnt;
+ struct btf_struct_meta types[];
+};
extern const struct file_operations btf_fops;
+const char *btf_get_name(const struct btf *btf);
+void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
-int btf_new_fd(const union bpf_attr *attr);
+int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
struct btf *btf_get_by_fd(int fd);
int btf_get_info_by_fd(const struct btf *btf,
const union bpf_attr *attr,
@@ -46,16 +169,60 @@ int btf_get_info_by_fd(const struct btf *btf,
const struct btf_type *btf_type_id_size(const struct btf *btf,
u32 *type_id,
u32 *ret_size);
+
+/*
+ * Options to control show behaviour.
+ * - BTF_SHOW_COMPACT: no formatting around type information
+ * - BTF_SHOW_NONAME: no struct/union member names/types
+ * - BTF_SHOW_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_SHOW_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ * - BTF_SHOW_UNSAFE: skip use of bpf_probe_read() to safely read
+ * data before displaying it.
+ */
+#define BTF_SHOW_COMPACT BTF_F_COMPACT
+#define BTF_SHOW_NONAME BTF_F_NONAME
+#define BTF_SHOW_PTR_RAW BTF_F_PTR_RAW
+#define BTF_SHOW_ZERO BTF_F_ZERO
+#define BTF_SHOW_UNSAFE (1ULL << 4)
+
void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m);
+int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, void *obj,
+ struct seq_file *m, u64 flags);
+
+/*
+ * Copy len bytes of string representation of obj of BTF type_id into buf.
+ *
+ * @btf: struct btf object
+ * @type_id: type id of type obj points to
+ * @obj: pointer to typed data
+ * @buf: buffer to write to
+ * @len: maximum length to write to buf
+ * @flags: show options (see above)
+ *
+ * Return: length that would have been/was copied as per snprintf, or
+ * negative error.
+ */
+int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
+ char *buf, int len, u64 flags);
+
int btf_get_fd_by_id(u32 id);
-u32 btf_id(const struct btf *btf);
+u32 btf_obj_id(const struct btf *btf);
+bool btf_is_kernel(const struct btf *btf);
+bool btf_is_module(const struct btf *btf);
+struct module *btf_try_get_module(const struct btf *btf);
+u32 btf_nr_types(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
-int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
+struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
+ u32 field_mask, u32 value_size);
+int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
@@ -64,14 +231,19 @@ const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *
btf_resolve_size(const struct btf *btf, const struct btf_type *type,
- u32 *type_size, const struct btf_type **elem_type,
- u32 *total_nelems);
+ u32 *type_size);
+const char *btf_type_str(const struct btf_type *t);
#define for_each_member(i, struct_type, member) \
for (i = 0, member = btf_type_member(struct_type); \
i < btf_type_vlen(struct_type); \
i++, member++)
+#define for_each_vsi(i, datasec_type, member) \
+ for (i = 0, member = btf_type_var_secinfo(datasec_type); \
+ i < btf_type_vlen(datasec_type); \
+ i++, member++)
+
static inline bool btf_type_is_ptr(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
@@ -87,16 +259,101 @@ static inline bool btf_type_is_small_int(const struct btf_type *t)
return btf_type_is_int(t) && t->size <= sizeof(u64);
}
+static inline u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_signed_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && (btf_int_encoding(t) & BTF_INT_SIGNED);
+}
+
static inline bool btf_type_is_enum(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
}
+static inline bool btf_is_any_enum(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM ||
+ BTF_INFO_KIND(t->info) == BTF_KIND_ENUM64;
+}
+
+static inline bool btf_kind_core_compat(const struct btf_type *t1,
+ const struct btf_type *t2)
+{
+ return BTF_INFO_KIND(t1->info) == BTF_INFO_KIND(t2->info) ||
+ (btf_is_any_enum(t1) && btf_is_any_enum(t2));
+}
+
+static inline bool str_is_empty(const char *s)
+{
+ return !s || !s[0];
+}
+
+static inline u16 btf_kind(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info);
+}
+
+static inline bool btf_is_enum(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_is_enum64(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM64;
+}
+
+static inline u64 btf_enum64_value(const struct btf_enum64 *e)
+{
+ return ((u64)e->val_hi32 << 32) | e->val_lo32;
+}
+
+static inline bool btf_is_composite(const struct btf_type *t)
+{
+ u16 kind = btf_kind(t);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool btf_is_array(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ARRAY;
+}
+
+static inline bool btf_is_int(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_INT;
+}
+
+static inline bool btf_is_ptr(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_PTR;
+}
+
+static inline u8 btf_int_offset(const struct btf_type *t)
+{
+ return BTF_INT_OFFSET(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_scalar(const struct btf_type *t)
+{
+ return btf_type_is_int(t) || btf_type_is_enum(t);
+}
+
static inline bool btf_type_is_typedef(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
}
+static inline bool btf_type_is_volatile(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_VOLATILE;
+}
+
static inline bool btf_type_is_func(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
@@ -107,11 +364,46 @@ static inline bool btf_type_is_func_proto(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
}
+static inline bool btf_type_is_var(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
+}
+
+static inline bool btf_type_is_type_tag(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG;
+}
+
+/* union is only a special case of struct:
+ * all its offsetof(member) == 0
+ */
+static inline bool btf_type_is_struct(const struct btf_type *t)
+{
+ u8 kind = BTF_INFO_KIND(t->info);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool __btf_type_is_struct(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
+}
+
+static inline bool btf_type_is_array(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
+}
+
static inline u16 btf_type_vlen(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
}
+static inline u16 btf_vlen(const struct btf_type *t)
+{
+ return btf_type_vlen(t);
+}
+
static inline u16 btf_func_linkage(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
@@ -122,30 +414,129 @@ static inline bool btf_type_kflag(const struct btf_type *t)
return BTF_INFO_KFLAG(t->info);
}
-static inline u32 btf_member_bit_offset(const struct btf_type *struct_type,
- const struct btf_member *member)
+static inline u32 __btf_member_bit_offset(const struct btf_type *struct_type,
+ const struct btf_member *member)
{
return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
: member->offset;
}
-static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type,
- const struct btf_member *member)
+static inline u32 __btf_member_bitfield_size(const struct btf_type *struct_type,
+ const struct btf_member *member)
{
return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
: 0;
}
+static inline struct btf_member *btf_members(const struct btf_type *t)
+{
+ return (struct btf_member *)(t + 1);
+}
+
+static inline u32 btf_member_bit_offset(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bit_offset(t, m);
+}
+
+static inline u32 btf_member_bitfield_size(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bitfield_size(t, m);
+}
+
static inline const struct btf_member *btf_type_member(const struct btf_type *t)
{
return (const struct btf_member *)(t + 1);
}
+static inline struct btf_array *btf_array(const struct btf_type *t)
+{
+ return (struct btf_array *)(t + 1);
+}
+
+static inline struct btf_enum *btf_enum(const struct btf_type *t)
+{
+ return (struct btf_enum *)(t + 1);
+}
+
+static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
+{
+ return (struct btf_enum64 *)(t + 1);
+}
+
+static inline const struct btf_var_secinfo *btf_type_var_secinfo(
+ const struct btf_type *t)
+{
+ return (const struct btf_var_secinfo *)(t + 1);
+}
+
+static inline struct btf_param *btf_params(const struct btf_type *t)
+{
+ return (struct btf_param *)(t + 1);
+}
+
+static inline int btf_id_cmp_func(const void *a, const void *b)
+{
+ const int *pa = a, *pb = b;
+
+ return *pa - *pb;
+}
+
+static inline bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
+{
+ return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
+}
+
+static inline void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id)
+{
+ return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func);
+}
+
+bool btf_param_match_suffix(const struct btf *btf,
+ const struct btf_param *arg,
+ const char *suffix);
+int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
+ u32 arg_no);
+
+struct bpf_verifier_log;
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+struct bpf_struct_ops;
+int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find_value(struct btf *btf, u32 value_id);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id);
+#else
+static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
+u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
+ const struct bpf_prog *prog);
+u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
+ const struct bpf_prog *prog);
+int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s);
+int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset);
+s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
+int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
+ struct module *owner);
+struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
+bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg);
+int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
+bool btf_types_are_same(const struct btf *btf1, u32 id1,
+ const struct btf *btf2, u32 id2);
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
@@ -157,6 +548,57 @@ static inline const char *btf_name_by_offset(const struct btf *btf,
{
return NULL;
}
+static inline u32 *btf_kfunc_id_set_contains(const struct btf *btf,
+ u32 kfunc_btf_id,
+ struct bpf_prog *prog)
+
+{
+ return NULL;
+}
+static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s)
+{
+ return 0;
+}
+static inline s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
+{
+ return -ENOENT;
+}
+static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors,
+ u32 add_cnt, struct module *owner)
+{
+ return 0;
+}
+static inline struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
+{
+ return NULL;
+}
+static inline bool
+btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg)
+{
+ return false;
+}
+static inline int get_kern_ctx_btf_id(struct bpf_verifier_log *log,
+ enum bpf_prog_type prog_type) {
+ return -EINVAL;
+}
+static inline bool btf_types_are_same(const struct btf *btf1, u32 id1,
+ const struct btf *btf2, u32 id2)
+{
+ return false;
+}
#endif
+static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
+{
+ if (!btf_type_is_ptr(t))
+ return false;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ return btf_type_is_struct(t);
+}
+
#endif
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index 4867d549e3c1..e24aabfe8ecc 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -3,9 +3,28 @@
#ifndef _LINUX_BTF_IDS_H
#define _LINUX_BTF_IDS_H
+struct btf_id_set {
+ u32 cnt;
+ u32 ids[];
+};
+
+/* This flag implies BTF_SET8 holds kfunc(s) */
+#define BTF_SET8_KFUNCS (1 << 0)
+
+struct btf_id_set8 {
+ u32 cnt;
+ u32 flags;
+ struct {
+ u32 id;
+ u32 flags;
+ } pairs[];
+};
+
#ifdef CONFIG_DEBUG_INFO_BTF
#include <linux/compiler.h> /* for __PASTE */
+#include <linux/compiler_attributes.h> /* for __maybe_unused */
+#include <linux/stringify.h>
/*
* Following macros help to define lists of BTF IDs placed
@@ -19,7 +38,7 @@
#define BTF_IDS_SECTION ".BTF_ids"
-#define ____BTF_ID(symbol) \
+#define ____BTF_ID(symbol, word) \
asm( \
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
".local " #symbol " ; \n" \
@@ -27,20 +46,28 @@ asm( \
".size " #symbol ", 4; \n" \
#symbol ": \n" \
".zero 4 \n" \
+word \
".popsection; \n");
-#define __BTF_ID(symbol) \
- ____BTF_ID(symbol)
+#define __BTF_ID(symbol, word) \
+ ____BTF_ID(symbol, word)
#define __ID(prefix) \
- __PASTE(prefix, __COUNTER__)
+ __PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
/*
* The BTF_ID defines unique symbol for each ID pointing
* to 4 zero bytes.
*/
#define BTF_ID(prefix, name) \
- __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__))
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), "")
+
+#define ____BTF_ID_FLAGS(prefix, name, flags) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), ".long " #flags "\n")
+#define __BTF_ID_FLAGS(prefix, name, flags, ...) \
+ ____BTF_ID_FLAGS(prefix, name, flags)
+#define BTF_ID_FLAGS(prefix, name, ...) \
+ __BTF_ID_FLAGS(prefix, name, ##__VA_ARGS__, 0)
/*
* The BTF_ID_LIST macro defines pure (unsorted) list
@@ -62,15 +89,25 @@ asm( \
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
"." #scope " " #name "; \n" \
#name ":; \n" \
-".popsection; \n"); \
+".popsection; \n");
#define BTF_ID_LIST(name) \
__BTF_ID_LIST(name, local) \
extern u32 name[];
-#define BTF_ID_LIST_GLOBAL(name) \
+#define BTF_ID_LIST_GLOBAL(name, n) \
__BTF_ID_LIST(name, globl)
+/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
+ * a single entry.
+ */
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST(name) \
+ BTF_ID(prefix, typename)
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST_GLOBAL(name, 1) \
+ BTF_ID(prefix, typename)
+
/*
* The BTF_ID_UNUSED macro defines 4 zero bytes.
* It's used when we want to define 'unused' entry
@@ -88,12 +125,110 @@ asm( \
".zero 4 \n" \
".popsection; \n");
+/*
+ * The BTF_SET_START/END macros pair defines sorted list of
+ * BTF IDs plus its members count, with following layout:
+ *
+ * BTF_SET_START(list)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ * BTF_SET_END(list)
+ *
+ * __BTF_ID__set__list:
+ * .zero 4
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * __BTF_ID__type2__name2__4:
+ * .zero 4
+ *
+ */
+#define __BTF_SET_START(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set__" #name "; \n" \
+"__BTF_ID__set__" #name ":; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define BTF_SET_START(name) \
+__BTF_ID_LIST(name, local) \
+__BTF_SET_START(name, local)
+
+#define BTF_SET_START_GLOBAL(name) \
+__BTF_ID_LIST(name, globl) \
+__BTF_SET_START(name, globl)
+
+#define BTF_SET_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set name;
+
+/*
+ * The BTF_SET8_START/END macros pair defines sorted list of
+ * BTF IDs and their flags plus its members count, with the
+ * following layout:
+ *
+ * BTF_SET8_START(list)
+ * BTF_ID_FLAGS(type1, name1, flags)
+ * BTF_ID_FLAGS(type2, name2, flags)
+ * BTF_SET8_END(list)
+ *
+ * __BTF_ID__set8__list:
+ * .zero 8
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * .word (1 << 0) | (1 << 2)
+ * __BTF_ID__type2__name2__5:
+ * .zero 4
+ * .word (1 << 3) | (1 << 1) | (1 << 2)
+ *
+ */
+#define __BTF_SET8_START(name, scope, flags) \
+__BTF_ID_LIST(name, local) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set8__" #name "; \n" \
+"__BTF_ID__set8__" #name ":; \n" \
+".zero 4 \n" \
+".long " __stringify(flags) "\n" \
+".popsection; \n");
+
+#define BTF_SET8_START(name) \
+__BTF_SET8_START(name, local, 0)
+
+#define BTF_SET8_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set8__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set8 name;
+
+#define BTF_KFUNCS_START(name) \
+__BTF_SET8_START(name, local, BTF_SET8_KFUNCS)
+
+#define BTF_KFUNCS_END(name) \
+BTF_SET8_END(name)
+
#else
-#define BTF_ID_LIST(name) static u32 name[5];
+#define BTF_ID_LIST(name) static u32 __maybe_unused name[64];
#define BTF_ID(prefix, name)
+#define BTF_ID_FLAGS(prefix, name, ...)
#define BTF_ID_UNUSED
-#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
+#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
+#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_END(name)
+#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 };
+#define BTF_SET8_END(name)
+#define BTF_KFUNCS_START(name) static struct btf_id_set8 __maybe_unused name = { .flags = BTF_SET8_KFUNCS };
+#define BTF_KFUNCS_END(name)
#endif /* CONFIG_DEBUG_INFO_BTF */
@@ -115,7 +250,10 @@ asm( \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
- BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
enum {
#define BTF_SOCK_TYPE(name, str) name,
@@ -127,4 +265,21 @@ MAX_BTF_SOCK_TYPE,
extern u32 btf_sock_ids[];
#endif
+#define BTF_TRACING_TYPE_xxx \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
+
+enum {
+#define BTF_TRACING_TYPE(name, type) name,
+BTF_TRACING_TYPE_xxx
+#undef BTF_TRACING_TYPE
+MAX_BTF_TRACING_TYPE,
+};
+
+extern u32 btf_tracing_ids[];
+extern u32 bpf_cgroup_btf_id[];
+extern u32 bpf_local_storage_map_btf_id[];
+extern u32 btf_bpf_map_id[];
+
#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 6b47f94378c5..d78454a4dd1f 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -9,14 +9,13 @@
#define _LINUX_BUFFER_HEAD_H
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/pagemap.h>
#include <linux/wait.h>
#include <linux/atomic.h>
-#ifdef CONFIG_BLOCK
-
enum bh_state_bits {
BH_Uptodate, /* Contains valid data */
BH_Dirty, /* Is dirty */
@@ -60,7 +59,10 @@ typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
struct buffer_head {
unsigned long b_state; /* buffer state bitmap (see above) */
struct buffer_head *b_this_page;/* circular list of page's buffers */
- struct page *b_page; /* the page this bh is mapped to */
+ union {
+ struct page *b_page; /* the page this bh is mapped to */
+ struct folio *b_folio; /* the folio this bh is mapped to */
+ };
sector_t b_blocknr; /* start block number */
size_t b_size; /* size of mapping */
@@ -117,7 +119,6 @@ static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
* of the form "mark_buffer_foo()". These are higher-level functions which
* do something in addition to setting a b_state bit.
*/
-BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked)
@@ -135,7 +136,45 @@ BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
BUFFER_FNS(Defer_Completion, defer_completion)
-#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
+{
+ /*
+ * If somebody else already set this uptodate, they will
+ * have done the memory barrier, and a reader will thus
+ * see *some* valid buffer state.
+ *
+ * Any other serialization (with IO errors or whatever that
+ * might clear the bit) has to come from other state (eg BH_Lock).
+ */
+ if (test_bit(BH_Uptodate, &bh->b_state))
+ return;
+
+ /*
+ * make it consistent with folio_mark_uptodate
+ * pairs with smp_load_acquire in buffer_uptodate
+ */
+ smp_mb__before_atomic();
+ set_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
+{
+ clear_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline int buffer_uptodate(const struct buffer_head *bh)
+{
+ /*
+ * make it consistent with folio_test_uptodate
+ * pairs with smp_mb__before_atomic in set_buffer_uptodate
+ */
+ return test_bit_acquire(BH_Uptodate, &bh->b_state);
+}
+
+static inline unsigned long bh_offset(const struct buffer_head *bh)
+{
+ return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
+}
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
@@ -144,8 +183,9 @@ BUFFER_FNS(Defer_Completion, defer_completion)
((struct buffer_head *)page_private(page)); \
})
#define page_has_buffers(page) PagePrivate(page)
+#define folio_buffers(folio) folio_get_private(folio)
-void buffer_check_dirty_writeback(struct page *page,
+void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback);
/*
@@ -155,23 +195,23 @@ void buffer_check_dirty_writeback(struct page *page,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
void touch_buffer(struct buffer_head *bh);
-void set_bh_page(struct buffer_head *bh,
- struct page *page, unsigned long offset);
-int try_to_free_buffers(struct page *);
+void folio_set_bh(struct buffer_head *bh, struct folio *folio,
+ unsigned long offset);
+struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
+ gfp_t gfp);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
-void create_empty_buffers(struct page *, unsigned long,
- unsigned long b_state);
+struct buffer_head *create_empty_buffers(struct folio *folio,
+ unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
-int inode_has_buffers(struct inode *);
-void invalidate_inode_buffers(struct inode *);
-int remove_inode_buffers(struct inode *inode);
-int sync_mapping_buffers(struct address_space *mapping);
+int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
+ bool datasync);
+int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
+ bool datasync);
void clean_bdev_aliases(struct block_device *bdev, sector_t block,
sector_t len);
static inline void clean_bdev_bh_alias(struct buffer_head *bh)
@@ -184,48 +224,41 @@ void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
-struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp);
+struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp);
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
-void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
- gfp_t gfp);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
-void invalidate_bh_lrus(void);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
-void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
-int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
-void write_dirty_buffer(struct buffer_head *bh, int op_flags);
-int submit_bh(int, int, struct buffer_head *);
+int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
-int bh_submit_read(struct buffer_head *bh);
-
-extern int buffer_heads_over_limit;
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags, bool force_lock);
/*
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
-void block_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
-int __block_write_full_page(struct inode *inode, struct page *page,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler);
-int block_read_full_page(struct page*, get_block_t*);
-int block_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count);
+void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+ void *get_block);
+int __block_write_full_folio(struct inode *inode, struct folio *folio,
+ get_block_t *get_block, struct writeback_control *wbc);
+int block_read_full_folio(struct folio *, get_block_t *);
+bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- unsigned flags, struct page **pagep, get_block_t *get_block);
+ struct page **pagep, get_block_t *get_block);
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block);
int block_write_end(struct file *, struct address_space *,
@@ -234,39 +267,26 @@ int block_write_end(struct file *, struct address_space *,
int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
-void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
-void clean_page_buffers(struct page *page);
+void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
int cont_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, unsigned, struct page **, void **,
+ unsigned, struct page **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
-int block_commit_write(struct page *page, unsigned from, unsigned to);
+void block_commit_write(struct page *page, unsigned int from, unsigned int to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
-/* Convert errno to return value from ->page_mkwrite() call */
-static inline vm_fault_t block_page_mkwrite_return(int err)
-{
- if (err == 0)
- return VM_FAULT_LOCKED;
- if (err == -EFAULT || err == -EAGAIN)
- return VM_FAULT_NOPAGE;
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- /* -ENOSPC, -EDQUOT, -EIO ... */
- return VM_FAULT_SIGBUS;
-}
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
- struct page **, void **, get_block_t*);
-int nobh_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
-void buffer_init(void);
+#ifdef CONFIG_MIGRATION
+extern int buffer_migrate_folio(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+extern int buffer_migrate_folio_norefs(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+#else
+#define buffer_migrate_folio NULL
+#define buffer_migrate_folio_norefs NULL
+#endif
/*
* inline definitions
@@ -313,23 +333,38 @@ sb_breadahead(struct super_block *sb, sector_t block)
__breadahead(sb->s_bdev, block, sb->s_blocksize);
}
-static inline void
-sb_breadahead_unmovable(struct super_block *sb, sector_t block)
+static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
+ sector_t block, unsigned size)
{
- __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
+ gfp_t gfp;
+
+ gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
+ gfp |= __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
}
-static inline struct buffer_head *
-sb_getblk(struct super_block *sb, sector_t block)
+static inline struct buffer_head *__getblk(struct block_device *bdev,
+ sector_t block, unsigned size)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
+ gfp_t gfp;
+
+ gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
+ gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
}
+static inline struct buffer_head *sb_getblk(struct super_block *sb,
+ sector_t block)
+{
+ return __getblk(sb->s_bdev, block, sb->s_blocksize);
+}
-static inline struct buffer_head *
-sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb,
+ sector_t block, gfp_t gfp)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+ return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp);
}
static inline struct buffer_head *
@@ -366,18 +401,39 @@ static inline void lock_buffer(struct buffer_head *bh)
__lock_buffer(bh);
}
-static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
- sector_t block,
- unsigned size)
+static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
{
- return __getblk_gfp(bdev, block, size, 0);
+ if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
+ if (!buffer_uptodate(bh))
+ __bh_read(bh, op_flags, false);
+ else
+ unlock_buffer(bh);
+ }
}
-static inline struct buffer_head *__getblk(struct block_device *bdev,
- sector_t block,
- unsigned size)
+static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!bh_uptodate_or_lock(bh))
+ __bh_read(bh, op_flags, false);
+}
+
+/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
+static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (bh_uptodate_or_lock(bh))
+ return 1;
+ return __bh_read(bh, op_flags, true);
+}
+
+static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
+{
+ __bh_read_batch(nr, bhs, 0, true);
+}
+
+static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags)
{
- return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
+ __bh_read_batch(nr, bhs, op_flags, false);
}
/**
@@ -396,17 +452,55 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
}
-extern int __set_page_dirty_buffers(struct page *page);
+/**
+ * get_nth_bh - Get a reference on the n'th buffer after this one.
+ * @bh: The buffer to start counting from.
+ * @count: How many buffers to skip.
+ *
+ * This is primarily useful for finding the nth buffer in a folio; in
+ * that case you pass the head buffer and the byte offset in the folio
+ * divided by the block size. It can be used for other purposes, but
+ * it will wrap at the end of the folio rather than returning NULL or
+ * proceeding to the next folio for you.
+ *
+ * Return: The requested buffer with an elevated refcount.
+ */
+static inline __must_check
+struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count)
+{
+ while (count--)
+ bh = bh->b_this_page;
+ get_bh(bh);
+ return bh;
+}
+
+bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
+
+#ifdef CONFIG_BUFFER_HEAD
+
+void buffer_init(void);
+bool try_to_free_buffers(struct folio *folio);
+int inode_has_buffers(struct inode *inode);
+void invalidate_inode_buffers(struct inode *inode);
+int remove_inode_buffers(struct inode *inode);
+int sync_mapping_buffers(struct address_space *mapping);
+void invalidate_bh_lrus(void);
+void invalidate_bh_lrus_cpu(void);
+bool has_bh_in_lru(int cpu, void *dummy);
+extern int buffer_heads_over_limit;
-#else /* CONFIG_BLOCK */
+#else /* CONFIG_BUFFER_HEAD */
static inline void buffer_init(void) {}
-static inline int try_to_free_buffers(struct page *page) { return 1; }
+static inline bool try_to_free_buffers(struct folio *folio) { return true; }
static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
+static inline void invalidate_bh_lrus(void) {}
+static inline void invalidate_bh_lrus_cpu(void) {}
+static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
#define buffer_heads_over_limit 0
-#endif /* CONFIG_BLOCK */
+#endif /* CONFIG_BUFFER_HEAD */
#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index f639bd0122f3..348acf2558f3 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -36,6 +36,9 @@ static inline int is_warning_bug(const struct bug_entry *bug)
return bug->flags & BUGFLAG_WARNING;
}
+void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line);
+
struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
@@ -58,6 +61,13 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
return BUG_TRAP_TYPE_BUG;
}
+struct bug_entry;
+static inline void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line)
+{
+ *file = NULL;
+ *line = 0;
+}
static inline void generic_bug_clear_once(void) {}
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index e3a0be2c90ad..3aa3640f8c18 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -77,4 +77,13 @@
#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+
+/*
+ * Compile time check that field has an expected offset
+ */
+#define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \
+ BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \
+ "Offset of " #field " in " #type " has changed.")
+
+
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
new file mode 100644
index 000000000000..20aa3c2d89f7
--- /dev/null
+++ b/include/linux/buildid.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILDID_H
+#define _LINUX_BUILDID_H
+
+#include <linux/types.h>
+
+#define BUILD_ID_SIZE_MAX 20
+
+struct vm_area_struct;
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size);
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
+extern unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX];
+void init_vmlinux_build_id(void);
+#else
+static inline void init_vmlinux_build_id(void) { }
+#endif
+
+#endif
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index dd74503f7e5e..bd1e361b351c 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -4,13 +4,17 @@
*
* Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
*/
-#ifndef __LINUX_BVEC_ITER_H
-#define __LINUX_BVEC_ITER_H
+#ifndef __LINUX_BVEC_H
+#define __LINUX_BVEC_H
-#include <linux/kernel.h>
+#include <linux/highmem.h>
#include <linux/bug.h>
#include <linux/errno.h>
-#include <linux/mm.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+
+struct page;
/**
* struct bio_vec - a contiguous range of physical memory addresses
@@ -30,6 +34,46 @@ struct bio_vec {
unsigned int bv_offset;
};
+/**
+ * bvec_set_page - initialize a bvec based off a struct page
+ * @bv: bvec to initialize
+ * @page: page the bvec should point to
+ * @len: length of the bvec
+ * @offset: offset into the page
+ */
+static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ bv->bv_page = page;
+ bv->bv_len = len;
+ bv->bv_offset = offset;
+}
+
+/**
+ * bvec_set_folio - initialize a bvec based off a struct folio
+ * @bv: bvec to initialize
+ * @folio: folio the bvec should point to
+ * @len: length of the bvec
+ * @offset: offset into the folio
+ */
+static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio,
+ unsigned int len, unsigned int offset)
+{
+ bvec_set_page(bv, &folio->page, len, offset);
+}
+
+/**
+ * bvec_set_virt - initialize a bvec based on a virtual address
+ * @bv: bvec to initialize
+ * @vaddr: virtual address to set the bvec to
+ * @len: length of the bvec
+ */
+static inline void bvec_set_virt(struct bio_vec *bv, void *vaddr,
+ unsigned int len)
+{
+ bvec_set_page(bv, virt_to_page(vaddr), len, offset_in_page(vaddr));
+}
+
struct bvec_iter {
sector_t bi_sector; /* device address in 512 byte
sectors */
@@ -39,7 +83,7 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
-};
+} __packed __aligned(4);
struct bvec_iter_all {
struct bio_vec bv;
@@ -117,18 +161,28 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
return true;
}
-static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
+/*
+ * A simpler version of bvec_iter_advance(), @bytes should not span
+ * across multiple bvec entries, i.e. bytes <= bv[i->bi_idx].bv_len
+ */
+static inline void bvec_iter_advance_single(const struct bio_vec *bv,
+ struct bvec_iter *iter, unsigned int bytes)
{
- iter->bi_bvec_done = 0;
- iter->bi_idx++;
+ unsigned int done = iter->bi_bvec_done + bytes;
+
+ if (done == bv[iter->bi_idx].bv_len) {
+ done = 0;
+ iter->bi_idx++;
+ }
+ iter->bi_bvec_done = done;
+ iter->bi_size -= bytes;
}
#define for_each_bvec(bvl, bio_vec, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
- (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
- (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
+ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
@@ -169,4 +223,61 @@ static inline void bvec_advance(const struct bio_vec *bvec,
}
}
-#endif /* __LINUX_BVEC_ITER_H */
+/**
+ * bvec_kmap_local - map a bvec into the kernel virtual address space
+ * @bvec: bvec to map
+ *
+ * Must be called on single-page bvecs only. Call kunmap_local on the returned
+ * address to unmap.
+ */
+static inline void *bvec_kmap_local(struct bio_vec *bvec)
+{
+ return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
+}
+
+/**
+ * memcpy_from_bvec - copy data from a bvec
+ * @bvec: bvec to copy from
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
+{
+ memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * memcpy_to_bvec - copy data to a bvec
+ * @bvec: bvec to copy to
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
+{
+ memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
+}
+
+/**
+ * memzero_bvec - zero all data in a bvec
+ * @bvec: bvec to zero
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memzero_bvec(struct bio_vec *bvec)
+{
+ memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * bvec_virt - return the virtual address for a bvec
+ * @bvec: bvec to return the virtual address for
+ *
+ * Note: the caller must ensure that @bvec->bv_page is not a highmem page.
+ */
+static inline void *bvec_virt(struct bio_vec *bvec)
+{
+ WARN_ON_ONCE(PageHighMem(bvec->bv_page));
+ return page_address(bvec->bv_page) + bvec->bv_offset;
+}
+
+#endif /* __LINUX_BVEC_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 4b13e0a3e15b..c9a4c96c9943 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -190,7 +190,7 @@ static inline void be64_add_cpu(__be64 *var, u64 val)
static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
{
- int i;
+ size_t i;
for (i = 0; i < len; i++)
dst[i] = cpu_to_be32(src[i]);
@@ -198,7 +198,7 @@ static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
{
- int i;
+ size_t i;
for (i = 0; i < len; i++)
dst[i] = be32_to_cpu(src[i]);
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 1aa8009f6d06..0ecb17bb6883 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -34,7 +34,7 @@
* but may get written to during init, so can't live in .rodata (via "const").
*/
#ifndef __ro_after_init
-#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+#define __ro_after_init __section(".data..ro_after_init")
#endif
#ifndef ____cacheline_aligned
@@ -85,4 +85,48 @@
#define cache_line_size() L1_CACHE_BYTES
#endif
+#ifndef __cacheline_group_begin
+#define __cacheline_group_begin(GROUP) \
+ __u8 __cacheline_group_begin__##GROUP[0]
+#endif
+
+#ifndef __cacheline_group_end
+#define __cacheline_group_end(GROUP) \
+ __u8 __cacheline_group_end__##GROUP[0]
+#endif
+
+#ifndef CACHELINE_ASSERT_GROUP_MEMBER
+#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
+ BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \
+ offsetofend(TYPE, MEMBER) <= \
+ offsetof(TYPE, __cacheline_group_end__##GROUP)))
+#endif
+
+#ifndef CACHELINE_ASSERT_GROUP_SIZE
+#define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \
+ BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \
+ SIZE)
+#endif
+
+/*
+ * Helper to add padding within a struct to ensure data fall into separate
+ * cachelines.
+ */
+#if defined(CONFIG_SMP)
+struct cacheline_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define CACHELINE_PADDING(name) struct cacheline_padding name
+#else
+#define CACHELINE_PADDING(name)
+#endif
+
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_HAS_DMA_MINALIGN
+#else
+#define ARCH_DMA_MINALIGN __alignof__(unsigned long long)
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
new file mode 100644
index 000000000000..55f297b2c23f
--- /dev/null
+++ b/include/linux/cacheflush.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CACHEFLUSH_H
+#define _LINUX_CACHEFLUSH_H
+
+#include <asm/cacheflush.h>
+
+struct folio;
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+#ifndef flush_dcache_folio
+void flush_dcache_folio(struct folio *folio);
+#endif
+#else
+static inline void flush_dcache_folio(struct folio *folio)
+{
+}
+#define flush_dcache_folio flush_dcache_folio
+#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+
+#ifndef flush_icache_pages
+static inline void flush_icache_pages(struct vm_area_struct *vma,
+ struct page *page, unsigned int nr)
+{
+}
+#endif
+
+#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
+
+#endif /* _LINUX_CACHEFLUSH_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 46b92cd61d0c..2cb15fe4fe12 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -73,50 +73,75 @@ struct cacheinfo {
struct cpu_cacheinfo {
struct cacheinfo *info_list;
+ unsigned int per_cpu_data_slice_size;
unsigned int num_levels;
unsigned int num_leaves;
bool cpu_map_populated;
+ bool early_ci_levels;
};
-/*
- * Helpers to make sure "func" is executed on the cpu whose cache
- * attributes are being detected
- */
-#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
-static inline void _##func(void *ret) \
-{ \
- int cpu = smp_processor_id(); \
- *(int *)ret = __##func(cpu); \
-} \
- \
-int func(unsigned int cpu) \
-{ \
- int ret; \
- smp_call_function_single(cpu, _##func, &ret, true); \
- return ret; \
-}
-
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
+int early_cache_level(unsigned int cpu);
int init_cache_level(unsigned int cpu);
+int init_of_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
int cache_setup_acpi(unsigned int cpu);
+bool last_level_cache_is_valid(unsigned int cpu);
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
+int fetch_cache_info(unsigned int cpu);
+int detect_cache_attributes(unsigned int cpu);
#ifndef CONFIG_ACPI_PPTT
/*
- * acpi_find_last_cache_level is only called on ACPI enabled
+ * acpi_get_cache_info() is only called on ACPI enabled
* platforms using the PPTT for topology. This means that if
* the platform supports other firmware configuration methods
* we need to stub out the call when ACPI is disabled.
* ACPI enabled platforms not using PPTT won't be making calls
* to this function so we need not worry about them.
*/
-static inline int acpi_find_last_cache_level(unsigned int cpu)
+static inline
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels)
{
- return 0;
+ return -ENOENT;
}
#else
-int acpi_find_last_cache_level(unsigned int cpu);
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels);
#endif
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
+/*
+ * Get the id of the cache associated with @cpu at level @level.
+ * cpuhp lock must be held.
+ */
+static inline int get_cpu_cacheinfo_id(int cpu, int level)
+{
+ struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+ int i;
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == level) {
+ if (ci->info_list[i].attributes & CACHE_ID)
+ return ci->info_list[i].id;
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
+#ifdef CONFIG_ARM64
+#define use_arch_cache_info() (true)
+#else
+#define use_arch_cache_info() (false)
+#endif
+
+#ifndef CONFIG_ARCH_HAS_CPU_CACHE_ALIASING
+#define cpu_dcache_is_aliasing() false
+#else
+#include <asm/cachetype.h>
+#endif
+
#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
new file mode 100644
index 000000000000..9b8a9c39614b
--- /dev/null
+++ b/include/linux/can/bittiming.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#ifndef _CAN_BITTIMING_H
+#define _CAN_BITTIMING_H
+
+#include <linux/netdevice.h>
+#include <linux/can/netlink.h>
+
+#define CAN_SYNC_SEG 1
+
+#define CAN_BITRATE_UNSET 0
+#define CAN_BITRATE_UNKNOWN (-1U)
+
+#define CAN_CTRLMODE_TDC_MASK \
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
+
+/*
+ * struct can_tdc - CAN FD Transmission Delay Compensation parameters
+ *
+ * At high bit rates, the propagation delay from the TX pin to the RX
+ * pin of the transceiver causes measurement errors: the sample point
+ * on the RX pin might occur on the previous bit.
+ *
+ * To solve this issue, ISO 11898-1 introduces in section 11.3.3
+ * "Transmitter delay compensation" a SSP (Secondary Sample Point)
+ * equal to the distance from the start of the bit time on the TX pin
+ * to the actual measurement on the RX pin.
+ *
+ * This structure contains the parameters to calculate that SSP.
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------- TDCO ------->|
+ * |<----------- Secondary Sample Point ---------->|
+ *
+ * To increase precision, contrary to the other bittiming parameters
+ * which are measured in time quanta, the TDC parameters are measured
+ * in clock periods (also referred as "minimum time quantum" in ISO
+ * 11898-1).
+ *
+ * @tdcv: Transmitter Delay Compensation Value. The time needed for
+ * the signal to propagate, i.e. the distance, in clock periods,
+ * from the start of the bit on the TX pin to when it is received
+ * on the RX pin. @tdcv depends on the controller modes:
+ *
+ * CAN_CTRLMODE_TDC_AUTO is set: The transceiver dynamically
+ * measures @tdcv for each transmitted CAN FD frame and the
+ * value provided here should be ignored.
+ *
+ * CAN_CTRLMODE_TDC_MANUAL is set: use the fixed provided @tdcv
+ * value.
+ *
+ * N.B. CAN_CTRLMODE_TDC_AUTO and CAN_CTRLMODE_TDC_MANUAL are
+ * mutually exclusive. Only one can be set at a time. If both
+ * CAN_TDC_CTRLMODE_AUTO and CAN_TDC_CTRLMODE_MANUAL are unset,
+ * TDC is disabled and all the values of this structure should be
+ * ignored.
+ *
+ * @tdco: Transmitter Delay Compensation Offset. Offset value, in
+ * clock periods, defining the distance between the start of the
+ * bit reception on the RX pin of the transceiver and the SSP
+ * position such that SSP = @tdcv + @tdco.
+ *
+ * @tdcf: Transmitter Delay Compensation Filter window. Defines the
+ * minimum value for the SSP position in clock periods. If the
+ * SSP position is less than @tdcf, then no delay compensations
+ * occur and the normal sampling point is used instead. The
+ * feature is enabled if and only if @tdcv is set to zero
+ * (automatic mode) and @tdcf is configured to a value greater
+ * than @tdco.
+ */
+struct can_tdc {
+ u32 tdcv;
+ u32 tdco;
+ u32 tdcf;
+};
+
+/*
+ * struct can_tdc_const - CAN hardware-dependent constant for
+ * Transmission Delay Compensation
+ *
+ * @tdcv_min: Transmitter Delay Compensation Value minimum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ * @tdcv_max: Transmitter Delay Compensation Value maximum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ *
+ * @tdco_min: Transmitter Delay Compensation Offset minimum value.
+ * @tdco_max: Transmitter Delay Compensation Offset maximum value.
+ * Should not be zero. If the controller does not support TDC,
+ * then the pointer to this structure should be NULL.
+ *
+ * @tdcf_min: Transmitter Delay Compensation Filter window minimum
+ * value. If @tdcf_max is zero, this value is ignored.
+ * @tdcf_max: Transmitter Delay Compensation Filter window maximum
+ * value. Should be set to zero if the controller does not
+ * support this feature.
+ */
+struct can_tdc_const {
+ u32 tdcv_min;
+ u32 tdcv_max;
+ u32 tdco_min;
+ u32 tdco_max;
+ u32 tdcf_min;
+ u32 tdcf_max;
+};
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported);
+#else /* !CONFIG_CAN_CALC_BITTIMING */
+static inline int
+can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ netdev_err(dev, "bit-timing calculation not available\n");
+ return -EINVAL;
+}
+
+static inline void
+can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
+{
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+void can_sjw_set_default(struct can_bittiming *bt);
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack);
+
+/*
+ * can_bit_time() - Duration of one bit
+ *
+ * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
+ * additional information.
+ *
+ * Return: the number of time quanta in one bit.
+ */
+static inline unsigned int can_bit_time(const struct can_bittiming *bt)
+{
+ return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
+}
+
+#endif /* !_CAN_BITTIMING_H */
diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
index 2f5d731ae251..8afa92d15a66 100644
--- a/include/linux/can/can-ml.h
+++ b/include/linux/can/can-ml.h
@@ -44,6 +44,7 @@
#include <linux/can.h>
#include <linux/list.h>
+#include <linux/netdevice.h>
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
#define CAN_EFF_RCV_HASH_BITS 10
@@ -65,4 +66,15 @@ struct can_ml_priv {
#endif
};
+static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
+{
+ return netdev_get_ml_priv(dev, ML_PRIV_CAN);
+}
+
+static inline void can_set_ml_priv(struct net_device *dev,
+ struct can_ml_priv *ml_priv)
+{
+ netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
+}
+
#endif /* CAN_ML_H */
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index e20a0cd09ba5..5fb8d0e3f9c1 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -2,7 +2,7 @@
/*
* linux/can/core.h
*
- * Protoypes and definitions for CAN protocol modules using the PF_CAN core
+ * Prototypes and definitions for CAN protocol modules using the PF_CAN core
*
* Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
* Urs Thuermann <urs.thuermann@volkswagen.de>
@@ -18,13 +18,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20170425"
-
-/* increment this number each time you change some user-space interface */
-#define CAN_ABI_VERSION "9"
-
-#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION
-
#define DNAME(dev) ((dev) ? (dev)->name : "any")
/**
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5e3d45525bd3..1b92aed49363 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -15,10 +15,12 @@
#define _CAN_DEV_H
#include <linux/can.h>
+#include <linux/can/bittiming.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
+#include <linux/can/length.h>
#include <linux/can/netlink.h>
#include <linux/can/skb.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
/*
@@ -30,6 +32,12 @@ enum can_mode {
CAN_MODE_SLEEP
};
+enum can_termination_gpio {
+ CAN_TERMINATION_GPIO_DISABLED = 0,
+ CAN_TERMINATION_GPIO_ENABLED,
+ CAN_TERMINATION_GPIO_MAX,
+};
+
/*
* CAN common private data
*/
@@ -37,25 +45,33 @@ struct can_priv {
struct net_device *dev;
struct can_device_stats can_stats;
- struct can_bittiming bittiming, data_bittiming;
const struct can_bittiming_const *bittiming_const,
*data_bittiming_const;
- const u16 *termination_const;
- unsigned int termination_const_cnt;
- u16 termination;
- const u32 *bitrate_const;
+ struct can_bittiming bittiming, data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc tdc;
+
unsigned int bitrate_const_cnt;
+ const u32 *bitrate_const;
const u32 *data_bitrate_const;
unsigned int data_bitrate_const_cnt;
u32 bitrate_max;
struct can_clock clock;
+ unsigned int termination_const_cnt;
+ const u16 *termination_const;
+ u16 termination;
+ struct gpio_desc *termination_gpio;
+ u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX];
+
+ unsigned int echo_skb_max;
+ struct sk_buff **echo_skb;
+
enum can_state state;
/* CAN controller features - see include/uapi/linux/can/netlink.h */
u32 ctrlmode; /* current options setting */
u32 ctrlmode_supported; /* options that can be modified by netlink */
- u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
struct delayed_work restart_work;
@@ -68,114 +84,91 @@ struct can_priv {
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
struct can_berr_counter *bec);
-
- unsigned int echo_skb_max;
- struct sk_buff **echo_skb;
-
-#ifdef CONFIG_CAN_LEDS
- struct led_trigger *tx_led_trig;
- char tx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rx_led_trig;
- char rx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rxtx_led_trig;
- char rxtx_led_trig_name[CAN_LED_NAME_SZ];
-#endif
+ int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
};
+static inline bool can_tdc_is_enabled(const struct can_priv *priv)
+{
+ return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK);
+}
+
/*
- * get_can_dlc(value) - helper macro to cast a given data length code (dlc)
- * to __u8 and ensure the dlc value to be max. 8 bytes.
+ * can_get_relative_tdco() - TDCO relative to the sample point
*
- * To be used in the CAN netdriver receive path to ensure conformance with
- * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ * struct can_tdc::tdco represents the absolute offset from TDCV. Some
+ * controllers use instead an offset relative to the Sample Point (SP)
+ * such that:
+ *
+ * SSP = TDCV + absolute TDCO
+ * = TDCV + SP + relative TDCO
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------------------------>| absolute TDCO
+ * |<--- Sample Point --->|
+ * | |<->| relative TDCO
+ * |<------------- Secondary Sample Point ------------>|
*/
-#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
-#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
+static inline s32 can_get_relative_tdco(const struct can_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->data_bittiming;
+ s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ return (s32)priv->tdc.tdco - sample_point_in_tc;
+}
-/* Check for outgoing skbs that have not been created by the CAN subsystem */
-static inline bool can_skb_headroom_valid(struct net_device *dev,
- struct sk_buff *skb)
+/* helper to define static CAN controller features at device creation time */
+static inline int __must_check can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode)
{
- /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
- if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
- return false;
-
- /* af_packet does not apply CAN skb specific settings */
- if (skb->ip_summed == CHECKSUM_NONE) {
- /* init headroom */
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* preform proper loopback on capable devices */
- if (dev->flags & IFF_ECHO)
- skb->pkt_type = PACKET_LOOPBACK;
- else
- skb->pkt_type = PACKET_HOST;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ if (priv->ctrlmode_supported & static_mode) {
+ netdev_warn(dev,
+ "Controller features can not be supported and static at the same time\n");
+ return -EINVAL;
}
+ priv->ctrlmode = static_mode;
+
+ /* override MTU which was set by default in can_setup()? */
+ if (static_mode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
- return true;
+ return 0;
}
-/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline bool can_dropped_invalid_skb(struct net_device *dev,
- struct sk_buff *skb)
+static inline u32 can_get_static_ctrlmode(struct can_priv *priv)
{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
-
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
- goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
- goto inval_skb;
- } else
- goto inval_skb;
-
- if (!can_skb_headroom_valid(dev, skb))
- goto inval_skb;
-
- return false;
-
-inval_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return true;
+ return priv->ctrlmode & ~priv->ctrlmode_supported;
}
-static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
{
- /* the CAN specific type of skb is identified by its data length */
- return skb->len == CANFD_MTU;
+ return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
}
-/* helper to define static CAN controller features at device creation time */
-static inline void can_set_static_ctrlmode(struct net_device *dev,
- u32 static_mode)
+/* drop skb if it does not contain a valid CAN frame for sending */
+static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
{
struct can_priv *priv = netdev_priv(dev);
- /* alloc_candev() succeeded => netdev_priv() is valid at this point */
- priv->ctrlmode = static_mode;
- priv->ctrlmode_static = static_mode;
+ if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ netdev_info_once(dev,
+ "interface in listen only mode, dropping skb\n");
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+ }
- /* override MTU which was set by default in can_setup()? */
- if (static_mode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
+ return can_dropped_invalid_skb(dev, skb);
}
-/* get data length from can_dlc with sanitized can_dlc */
-u8 can_dlc2len(u8 can_dlc);
-
-/* map the sanitized data length to an appropriate data length code */
-u8 can_len2dlc(u8 len);
+void can_setup(struct net_device *dev);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
unsigned int txqs, unsigned int rxqs);
@@ -191,6 +184,9 @@ struct can_priv *safe_candev_priv(struct net_device *dev);
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
int can_change_mtu(struct net_device *dev, int new_mtu);
+int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct ethtool_ts_info *info);
int register_candev(struct net_device *dev);
void unregister_candev(struct net_device *dev);
@@ -198,26 +194,22 @@ void unregister_candev(struct net_device *dev);
int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
+const char *can_get_state_str(const enum can_state state);
+void can_state_get_by_berr_counter(const struct net_device *dev,
+ const struct can_berr_counter *bec,
+ enum can_state *tx_state,
+ enum can_state *rx_state);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx);
-struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
- u8 *len_ptr);
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
-void can_free_echo_skb(struct net_device *dev, unsigned int idx);
-
#ifdef CONFIG_OF
void of_can_transceiver(struct net_device *dev);
#else
static inline void of_can_transceiver(struct net_device *dev) { }
#endif
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd);
-struct sk_buff *alloc_can_err_skb(struct net_device *dev,
- struct can_frame **cf);
+extern struct rtnl_link_ops can_link_ops;
+int can_netlink_register(void);
+void can_netlink_unregister(void);
#endif /* !_CAN_DEV_H */
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
index 5fd627e9da19..f38772fd0c07 100644
--- a/include/linux/can/dev/peak_canfd.h
+++ b/include/linux/can/dev/peak_canfd.h
@@ -282,7 +282,7 @@ static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg)
}
/* return the dlc value from any received message channel_dlc field */
-static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
+static inline u8 pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
{
return msg->channel_dlc >> 4;
}
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
deleted file mode 100644
index 7c3cfd798c56..000000000000
--- a/include/linux/can/led.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- */
-
-#ifndef _CAN_LED_H
-#define _CAN_LED_H
-
-#include <linux/if.h>
-#include <linux/leds.h>
-#include <linux/netdevice.h>
-
-enum can_led_event {
- CAN_LED_EVENT_OPEN,
- CAN_LED_EVENT_STOP,
- CAN_LED_EVENT_TX,
- CAN_LED_EVENT_RX,
-};
-
-#ifdef CONFIG_CAN_LEDS
-
-/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
- * suffix and null terminator
- */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
-
-void can_led_event(struct net_device *netdev, enum can_led_event event);
-void devm_can_led_init(struct net_device *netdev);
-int __init can_led_notifier_init(void);
-void __exit can_led_notifier_exit(void);
-
-#else
-
-static inline void can_led_event(struct net_device *netdev,
- enum can_led_event event)
-{
-}
-static inline void devm_can_led_init(struct net_device *netdev)
-{
-}
-static inline int can_led_notifier_init(void)
-{
- return 0;
-}
-static inline void can_led_notifier_exit(void)
-{
-}
-
-#endif
-
-#endif /* !_CAN_LED_H */
diff --git a/include/linux/can/length.h b/include/linux/can/length.h
new file mode 100644
index 000000000000..abc978b38f79
--- /dev/null
+++ b/include/linux/can/length.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (C) 2020, 2023 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#ifndef _CAN_LENGTH_H
+#define _CAN_LENGTH_H
+
+#include <linux/bits.h>
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/math.h>
+
+/*
+ * Size of a Classical CAN Standard Frame header in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Remote Transmission Request (RTR) 1
+ * Control field:
+ * IDentifier Extension bit (IDE) 1
+ * FD Format indicator (FDF) 1
+ * Data Length Code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CAN_FRAME_HEADER_SFF_BITS 19
+
+/*
+ * Size of a Classical CAN Extended Frame header in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Substitute Remote Request (SRR) 1
+ * IDentifier Extension bit (IDE) 1
+ * ID extension 18
+ * Remote Transmission Request (RTR) 1
+ * Control field:
+ * FD Format indicator (FDF) 1
+ * Reserved bit (r0) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CAN_FRAME_HEADER_EFF_BITS 39
+
+/*
+ * Size of a CAN-FD Standard Frame in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Remote Request Substitution (RRS) 1
+ * Control field:
+ * IDentifier Extension bit (IDE) 1
+ * FD Format indicator (FDF) 1
+ * Reserved bit (res) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CANFD_FRAME_HEADER_SFF_BITS 22
+
+/*
+ * Size of a CAN-FD Extended Frame in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Substitute Remote Request (SRR) 1
+ * IDentifier Extension bit (IDE) 1
+ * ID extension 18
+ * Remote Request Substitution (RRS) 1
+ * Control field:
+ * FD Format indicator (FDF) 1
+ * Reserved bit (res) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CANFD_FRAME_HEADER_EFF_BITS 41
+
+/*
+ * Size of a CAN CRC Field in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * CRC sequence (CRC15) 15
+ * CRC Delimiter 1
+ *
+ * ignoring bitstuffing
+ */
+#define CAN_FRAME_CRC_FIELD_BITS 16
+
+/*
+ * Size of a CAN-FD CRC17 Field in bits (length: 0..16)
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Stuff Count 4
+ * CRC Sequence (CRC17) 17
+ * CRC Delimiter 1
+ * Fixed stuff bits 6
+ */
+#define CANFD_FRAME_CRC17_FIELD_BITS 28
+
+/*
+ * Size of a CAN-FD CRC21 Field in bits (length: 20..64)
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Stuff Count 4
+ * CRC sequence (CRC21) 21
+ * CRC Delimiter 1
+ * Fixed stuff bits 7
+ */
+#define CANFD_FRAME_CRC21_FIELD_BITS 33
+
+/*
+ * Size of a CAN(-FD) Frame footer in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * ACK slot 1
+ * ACK delimiter 1
+ * End Of Frame (EOF) 7
+ *
+ * including all fields following the CRC field
+ */
+#define CAN_FRAME_FOOTER_BITS 9
+
+/*
+ * First part of the Inter Frame Space
+ * (a.k.a. IMF - intermission field)
+ */
+#define CAN_INTERMISSION_BITS 3
+
+/**
+ * can_bitstuffing_len() - Calculate the maximum length with bitstuffing
+ * @destuffed_len: length of a destuffed bit stream
+ *
+ * The worst bit stuffing case is a sequence in which dominant and
+ * recessive bits alternate every four bits:
+ *
+ * Destuffed: 1 1111 0000 1111 0000 1111
+ * Stuffed: 1 1111o 0000i 1111o 0000i 1111o
+ *
+ * Nomenclature
+ *
+ * - "0": dominant bit
+ * - "o": dominant stuff bit
+ * - "1": recessive bit
+ * - "i": recessive stuff bit
+ *
+ * Aside from the first bit, one stuff bit is added every four bits.
+ *
+ * Return: length of the stuffed bit stream in the worst case scenario.
+ */
+#define can_bitstuffing_len(destuffed_len) \
+ (destuffed_len + (destuffed_len - 1) / 4)
+
+#define __can_bitstuffing_len(bitstuffing, destuffed_len) \
+ (bitstuffing ? can_bitstuffing_len(destuffed_len) : \
+ destuffed_len)
+
+#define __can_cc_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ __can_bitstuffing_len(bitstuffing, \
+ (is_eff ? CAN_FRAME_HEADER_EFF_BITS : \
+ CAN_FRAME_HEADER_SFF_BITS) + \
+ (data_len) * BITS_PER_BYTE + \
+ CAN_FRAME_CRC_FIELD_BITS) + \
+ CAN_FRAME_FOOTER_BITS + \
+ (intermission ? CAN_INTERMISSION_BITS : 0) \
+)
+
+#define __can_fd_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ __can_bitstuffing_len(bitstuffing, \
+ (is_eff ? CANFD_FRAME_HEADER_EFF_BITS : \
+ CANFD_FRAME_HEADER_SFF_BITS) + \
+ (data_len) * BITS_PER_BYTE) + \
+ ((data_len) <= 16 ? \
+ CANFD_FRAME_CRC17_FIELD_BITS : \
+ CANFD_FRAME_CRC21_FIELD_BITS) + \
+ CAN_FRAME_FOOTER_BITS + \
+ (intermission ? CAN_INTERMISSION_BITS : 0) \
+)
+
+/**
+ * can_frame_bits() - Calculate the number of bits on the wire in a
+ * CAN frame
+ * @is_fd: true: CAN-FD frame; false: Classical CAN frame.
+ * @is_eff: true: Extended frame; false: Standard frame.
+ * @bitstuffing: true: calculate the bitstuffing worst case; false:
+ * calculate the bitstuffing best case (no dynamic
+ * bitstuffing). CAN-FD's fixed stuff bits are always included.
+ * @intermission: if and only if true, include the inter frame space
+ * assuming no bus idle (i.e. only the intermission). Strictly
+ * speaking, the inter frame space is not part of the
+ * frame. However, it is needed when calculating the delay
+ * between the Start Of Frame of two consecutive frames.
+ * @data_len: length of the data field in bytes. Correspond to
+ * can(fd)_frame->len. Should be zero for remote frames. No
+ * sanitization is done on @data_len and it shall have no side
+ * effects.
+ *
+ * Return: the numbers of bits on the wire of a CAN frame.
+ */
+#define can_frame_bits(is_fd, is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ is_fd ? __can_fd_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) : \
+ __can_cc_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+)
+
+/*
+ * Number of bytes in a CAN frame
+ * (rounded up, including intermission)
+ */
+#define can_frame_bytes(is_fd, is_eff, bitstuffing, data_len) \
+ DIV_ROUND_UP(can_frame_bits(is_fd, is_eff, bitstuffing, \
+ true, data_len), \
+ BITS_PER_BYTE)
+
+/*
+ * Maximum size of a Classical CAN frame
+ * (rounded up, ignoring bitstuffing but including intermission)
+ */
+#define CAN_FRAME_LEN_MAX can_frame_bytes(false, true, false, CAN_MAX_DLEN)
+
+/*
+ * Maximum size of a CAN-FD frame
+ * (rounded up, ignoring dynamic bitstuffing but including intermission)
+ */
+#define CANFD_FRAME_LEN_MAX can_frame_bytes(true, true, false, CANFD_MAX_DLEN)
+
+/*
+ * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
+ * Classical CAN frame into a valid data length of max. 8 bytes.
+ *
+ * To be used in the CAN netdriver receive path to ensure conformance with
+ * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ */
+#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN))
+
+/* helper to get the data length code (DLC) for Classical CAN raw DLC access */
+static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode)
+{
+ /* return len8_dlc as dlc value only if all conditions apply */
+ if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) &&
+ (cf->len == CAN_MAX_DLEN) &&
+ (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC))
+ return cf->len8_dlc;
+
+ /* return the payload length as dlc value */
+ return cf->len;
+}
+
+/* helper to set len and len8_dlc value for Classical CAN raw DLC access */
+static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc,
+ const u32 ctrlmode)
+{
+ /* the caller already ensured that dlc is a value from 0 .. 15 */
+ if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN)
+ cf->len8_dlc = dlc;
+
+ /* limit the payload length 'len' to CAN_MAX_DLEN */
+ cf->len = can_cc_dlc2len(dlc);
+}
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc);
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len);
+
+/* calculate the CAN Frame length in bytes of a given skb */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb);
+
+/* map the data length to an appropriate data link layer length */
+static inline u8 canfd_sanitize_len(u8 len)
+{
+ return can_fd_dlc2len(can_fd_len2dlc(len));
+}
+
+#endif /* !_CAN_LENGTH_H */
diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h
new file mode 100644
index 000000000000..1b536fb999de
--- /dev/null
+++ b/include/linux/can/platform/flexcan.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAN_PLATFORM_FLEXCAN_H
+#define _CAN_PLATFORM_FLEXCAN_H
+
+struct flexcan_platform_data {
+ u32 clock_frequency;
+ u8 clk_src;
+};
+
+#endif /* _CAN_PLATFORM_FLEXCAN_H */
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
index 5755ae5a4712..6a869682c120 100644
--- a/include/linux/can/platform/sja1000.h
+++ b/include/linux/can/platform/sja1000.h
@@ -14,7 +14,7 @@
#define OCR_MODE_TEST 0x01
#define OCR_MODE_NORMAL 0x02
#define OCR_MODE_CLOCK 0x03
-#define OCR_MODE_MASK 0x07
+#define OCR_MODE_MASK 0x03
#define OCR_TX0_INVERT 0x04
#define OCR_TX0_PULLDOWN 0x08
#define OCR_TX0_PULLUP 0x10
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 1b78a0cfb615..d29bb4521947 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -3,7 +3,7 @@
* linux/can/rx-offload.h
*
* Copyright (c) 2014 David Jander, Protonic Holland
- * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014-2017, 2023 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
*/
#ifndef _CAN_RX_OFFLOAD_H
@@ -20,6 +20,7 @@ struct can_rx_offload {
bool drop);
struct sk_buff_head skb_queue;
+ struct sk_buff_head skb_irq_queue;
u32 skb_queue_len_max;
unsigned int mb_first;
@@ -35,23 +36,27 @@ int can_rx_offload_add_timestamp(struct net_device *dev,
int can_rx_offload_add_fifo(struct net_device *dev,
struct can_rx_offload *offload,
unsigned int weight);
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight);
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
- struct sk_buff *skb, u32 timestamp);
-unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp);
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
+unsigned int can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
+ unsigned int idx,
+ unsigned int *frame_len_ptr);
+void can_rx_offload_irq_finish(struct can_rx_offload *offload);
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
-static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
-{
- napi_schedule(&offload->napi);
-}
-
static inline void can_rx_offload_disable(struct can_rx_offload *offload)
{
napi_disable(&offload->napi);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 900b9f4e0605..1abc25a8d144 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -16,6 +16,27 @@
#include <linux/can.h>
#include <net/sock.h>
+void can_flush_echo_skb(struct net_device *dev);
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr,
+ unsigned int *frame_len_ptr);
+unsigned int __must_check can_get_echo_skb(struct net_device *dev,
+ unsigned int idx,
+ unsigned int *frame_len_ptr);
+void can_free_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr);
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len);
+struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf);
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb);
+
/*
* The struct can_skb_priv is used to transport additional information along
* with the stored struct can(fd)_frame that can not be contained in existing
@@ -29,11 +50,13 @@
* struct can_skb_priv - private additional data inside CAN sk_buffs
* @ifindex: ifindex of the first interface the CAN frame appeared on
* @skbcnt: atomic counter to have an unique id together with skb pointer
+ * @frame_len: length of CAN frame in data link layer
* @cf: align to the following CAN frame at skb->data
*/
struct can_skb_priv {
int ifindex;
int skbcnt;
+ unsigned int frame_len;
struct can_frame cf[];
};
@@ -49,8 +72,12 @@ static inline void can_skb_reserve(struct sk_buff *skb)
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
- if (sk) {
- sock_hold(sk);
+ /* If the socket has already been closed by user space, the
+ * refcount may already be 0 (and the socket will be freed
+ * after the last TX skb has been freed). So only increase
+ * socket refcount if the refcount is > 0.
+ */
+ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
skb->destructor = sock_efree;
skb->sk = sk;
}
@@ -61,21 +88,72 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
*/
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
{
- if (skb_shared(skb)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (likely(nskb)) {
- can_skb_set_owner(nskb, skb->sk);
- consume_skb(skb);
- return nskb;
- } else {
- kfree_skb(skb);
- return NULL;
- }
+ struct sk_buff *nskb;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ kfree_skb(skb);
+ return NULL;
}
- /* we can assume to have an unshared skb with proper owner */
- return skb;
+ can_skb_set_owner(nskb, skb->sk);
+ consume_skb(skb);
+ return nskb;
+}
+
+static inline bool can_is_can_skb(const struct sk_buff *skb)
+{
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CAN_MTU && cf->len <= CAN_MAX_DLEN);
+}
+
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CANFD_MTU && cfd->len <= CANFD_MAX_DLEN);
+}
+
+static inline bool can_is_canxl_skb(const struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ if (skb->len < CANXL_HDR_SIZE + CANXL_MIN_DLEN || skb->len > CANXL_MTU)
+ return false;
+
+ /* this also checks valid CAN XL data length boundaries */
+ if (skb->len != CANXL_HDR_SIZE + cxl->len)
+ return false;
+
+ return cxl->flags & CANXL_XLF;
+}
+
+/* get length element value from can[|fd|xl]_frame structure */
+static inline unsigned int can_skb_get_len_val(struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (can_is_canxl_skb(skb))
+ return cxl->len;
+
+ return cfd->len;
+}
+
+/* get needed data length inside CAN frame for all frame types (RTR aware) */
+static inline unsigned int can_skb_get_data_len(struct sk_buff *skb)
+{
+ unsigned int len = can_skb_get_len_val(skb);
+ const struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* RTR frames have an actual length of zero */
+ if (can_is_can_skb(skb) && cf->can_id & CAN_RTR_FLAG)
+ return 0;
+
+ return len;
}
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 1e7fe311cabe..0c356a517991 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -15,43 +15,31 @@
#include <uapi/linux/capability.h>
#include <linux/uidgid.h>
+#include <linux/bits.h>
#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
-#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
extern int file_caps_enabled;
-typedef struct kernel_cap_struct {
- __u32 cap[_KERNEL_CAPABILITY_U32S];
-} kernel_cap_t;
+typedef struct { u64 val; } kernel_cap_t;
/* same as vfs_ns_cap_data but in cpu endian and always filled completely */
struct cpu_vfs_cap_data {
__u32 magic_etc;
+ kuid_t rootid;
kernel_cap_t permitted;
kernel_cap_t inheritable;
- kuid_t rootid;
};
#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
-
struct file;
struct inode;
struct dentry;
struct task_struct;
struct user_namespace;
-
-extern const kernel_cap_t __cap_empty_set;
-extern const kernel_cap_t __cap_init_eff_set;
-
-/*
- * Internal kernel functions only
- */
-
-#define CAP_FOR_EACH_U32(__capi) \
- for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
+struct mnt_idmap;
/*
* CAP_FS_MASK and CAP_NFSD_MASKS:
@@ -66,94 +54,52 @@ extern const kernel_cap_t __cap_init_eff_set;
* 2. The security.* and trusted.* xattrs are fs-related MAC permissions
*/
-# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
- | CAP_TO_MASK(CAP_MKNOD) \
- | CAP_TO_MASK(CAP_DAC_OVERRIDE) \
- | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
- | CAP_TO_MASK(CAP_FOWNER) \
- | CAP_TO_MASK(CAP_FSETID))
-
-# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE))
+# define CAP_FS_MASK (BIT_ULL(CAP_CHOWN) \
+ | BIT_ULL(CAP_MKNOD) \
+ | BIT_ULL(CAP_DAC_OVERRIDE) \
+ | BIT_ULL(CAP_DAC_READ_SEARCH) \
+ | BIT_ULL(CAP_FOWNER) \
+ | BIT_ULL(CAP_FSETID) \
+ | BIT_ULL(CAP_MAC_OVERRIDE))
+#define CAP_VALID_MASK (BIT_ULL(CAP_LAST_CAP+1)-1)
-#if _KERNEL_CAPABILITY_U32S != 2
-# error Fix up hand-coded capability macro initializers
-#else /* HAND-CODED capability initializers */
+# define CAP_EMPTY_SET ((kernel_cap_t) { 0 })
+# define CAP_FULL_SET ((kernel_cap_t) { CAP_VALID_MASK })
+# define CAP_FS_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_LINUX_IMMUTABLE) })
+# define CAP_NFSD_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_SYS_RESOURCE) })
-#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
-#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
+# define cap_clear(c) do { (c).val = 0; } while (0)
-# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
-# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
-# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
- CAP_FS_MASK_B1 } })
-# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_SYS_RESOURCE), \
- CAP_FS_MASK_B1 } })
-
-#endif /* _KERNEL_CAPABILITY_U32S != 2 */
-
-# define cap_clear(c) do { (c) = __cap_empty_set; } while (0)
-
-#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag))
-#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag))
-#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag))
-
-#define CAP_BOP_ALL(c, a, b, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \
- } \
-} while (0)
-
-#define CAP_UOP_ALL(c, a, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = OP a.cap[__capi]; \
- } \
-} while (0)
+#define cap_raise(c, flag) ((c).val |= BIT_ULL(flag))
+#define cap_lower(c, flag) ((c).val &= ~BIT_ULL(flag))
+#define cap_raised(c, flag) (((c).val & BIT_ULL(flag)) != 0)
static inline kernel_cap_t cap_combine(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, |);
- return dest;
+ return (kernel_cap_t) { a.val | b.val };
}
static inline kernel_cap_t cap_intersect(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, &);
- return dest;
+ return (kernel_cap_t) { a.val & b.val };
}
static inline kernel_cap_t cap_drop(const kernel_cap_t a,
const kernel_cap_t drop)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, drop, &~);
- return dest;
+ return (kernel_cap_t) { a.val &~ drop.val };
}
-static inline kernel_cap_t cap_invert(const kernel_cap_t c)
+static inline bool cap_isclear(const kernel_cap_t a)
{
- kernel_cap_t dest;
- CAP_UOP_ALL(dest, c, ~);
- return dest;
+ return !a.val;
}
-static inline bool cap_isclear(const kernel_cap_t a)
+static inline bool cap_isidentical(const kernel_cap_t a, const kernel_cap_t b)
{
- unsigned __capi;
- CAP_FOR_EACH_U32(__capi) {
- if (a.cap[__capi] != 0)
- return false;
- }
- return true;
+ return a.val == b.val;
}
/*
@@ -165,39 +111,31 @@ static inline bool cap_isclear(const kernel_cap_t a)
*/
static inline bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
- kernel_cap_t dest;
- dest = cap_drop(a, set);
- return cap_isclear(dest);
+ return !(a.val & ~set.val);
}
/* Used to decide between falling back on the old suser() or fsuser(). */
static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_FS_SET);
}
static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_fs_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_FS_SET));
}
static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_NFSD_SET);
}
static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_nfsd_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_NFSD_SET));
}
#ifdef CONFIG_MULTIUSER
@@ -247,8 +185,11 @@ static inline bool ns_capable_setid(struct user_namespace *ns, int cap)
return true;
}
#endif /* CONFIG_MULTIUSER */
-extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
-extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+bool privileged_wrt_inode_uidgid(struct user_namespace *ns,
+ struct mnt_idmap *idmap,
+ const struct inode *inode);
+bool capable_wrt_inode_uidgid(struct mnt_idmap *idmap,
+ const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
static inline bool perfmon_capable(void)
@@ -268,8 +209,11 @@ static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns)
}
/* audit system wants to get cap info from files as well */
-extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
+ const struct dentry *dentry,
+ struct cpu_vfs_cap_data *cpu_caps);
-extern int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size);
+int cap_convert_nscap(struct mnt_idmap *idmap, struct dentry *dentry,
+ const void **ivalue, size_t size);
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
new file mode 100644
index 000000000000..cb0d6cd1c12f
--- /dev/null
+++ b/include/linux/cc_platform.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Confidential Computing Platform Capability checks
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ */
+
+#ifndef _LINUX_CC_PLATFORM_H
+#define _LINUX_CC_PLATFORM_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+/**
+ * enum cc_attr - Confidential computing attributes
+ *
+ * These attributes represent confidential computing features that are
+ * currently active.
+ */
+enum cc_attr {
+ /**
+ * @CC_ATTR_MEM_ENCRYPT: Memory encryption is active
+ *
+ * The platform/OS is running with active memory encryption. This
+ * includes running either as a bare-metal system or a hypervisor
+ * and actively using memory encryption or as a guest/virtual machine
+ * and actively using memory encryption.
+ *
+ * Examples include SME, SEV and SEV-ES.
+ */
+ CC_ATTR_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_HOST_MEM_ENCRYPT: Host memory encryption is active
+ *
+ * The platform/OS is running as a bare-metal system or a hypervisor
+ * and actively using memory encryption.
+ *
+ * Examples include SME.
+ */
+ CC_ATTR_HOST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_MEM_ENCRYPT: Guest memory encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption.
+ *
+ * Examples include SEV and SEV-ES.
+ */
+ CC_ATTR_GUEST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_STATE_ENCRYPT: Guest state encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption and register state encryption.
+ *
+ * Examples include SEV-ES.
+ */
+ CC_ATTR_GUEST_STATE_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_UNROLL_STRING_IO: String I/O is implemented with
+ * IN/OUT instructions
+ *
+ * The platform/OS is running as a guest/virtual machine and uses
+ * IN/OUT instructions in place of string I/O.
+ *
+ * Examples include TDX guest & SEV.
+ */
+ CC_ATTR_GUEST_UNROLL_STRING_IO,
+
+ /**
+ * @CC_ATTR_SEV_SNP: Guest SNP is active.
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using AMD SEV-SNP features.
+ */
+ CC_ATTR_GUEST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_HOTPLUG_DISABLED: Hotplug is not supported or disabled.
+ *
+ * The platform/OS is running as a guest/virtual machine does not
+ * support CPU hotplug feature.
+ *
+ * Examples include TDX Guest.
+ */
+ CC_ATTR_HOTPLUG_DISABLED,
+};
+
+#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+
+/**
+ * cc_platform_has() - Checks if the specified cc_attr attribute is active
+ * @attr: Confidential computing attribute to check
+ *
+ * The cc_platform_has() function will return an indicator as to whether the
+ * specified Confidential Computing attribute is currently active.
+ *
+ * Context: Any context
+ * Return:
+ * * TRUE - Specified Confidential Computing attribute is active
+ * * FALSE - Specified Confidential Computing attribute is not active
+ */
+bool cc_platform_has(enum cc_attr attr);
+
+#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
+
+static inline bool cc_platform_has(enum cc_attr attr) { return false; }
+
+#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
+
+#endif /* _LINUX_CC_PLATFORM_H */
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index a5dfbaf2470d..868924dec5a1 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -15,7 +15,8 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <crypto/aes.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
struct ccp_device;
struct ccp_cmd;
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index f48d0a31deae..98c6fd0b39b6 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -13,6 +13,7 @@
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/list.h>
+#include <linux/blkdev.h>
#include <scsi/scsi_common.h>
#include <uapi/linux/cdrom.h>
@@ -61,9 +62,10 @@ struct cdrom_device_info {
__u8 last_sense;
__u8 media_written; /* dirty flag, DVD+RW bookkeeping */
unsigned short mmc3_profile; /* current MMC3 profile */
- int for_data;
int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
+ bool opened_for_data;
+ __s64 last_media_change_ms;
};
struct cdrom_device_ops {
@@ -76,7 +78,6 @@ struct cdrom_device_ops {
int (*tray_move) (struct cdrom_device_info *, int);
int (*lock_door) (struct cdrom_device_info *, int);
int (*select_speed) (struct cdrom_device_info *, int);
- int (*select_disc) (struct cdrom_device_info *, int);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn) (struct cdrom_device_info *,
@@ -86,11 +87,13 @@ struct cdrom_device_ops {
/* play stuff */
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
-/* driver specifications */
- const int capability; /* capability flags */
/* handle uniform packets for scsi type devices (scsi,atapi) */
int (*generic_packet) (struct cdrom_device_info *,
struct packet_command *);
+ int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nframes, u8 *last_sense);
+/* driver specifications */
+ const int capability; /* capability flags */
};
int cdrom_multisession(struct cdrom_device_info *cdi,
@@ -99,11 +102,10 @@ int cdrom_read_tocentry(struct cdrom_device_info *cdi,
struct cdrom_tocentry *entry);
/* the general block_device operations structure: */
-extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode);
-extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
-extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg);
+int cdrom_open(struct cdrom_device_info *cdi, blk_mode_t mode);
+void cdrom_release(struct cdrom_device_info *cdi);
+int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ unsigned int cmd, unsigned long arg);
extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
unsigned int clearing);
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
new file mode 100644
index 000000000000..b57118aaa679
--- /dev/null
+++ b/include/linux/cdx/cdx_bus.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * CDX bus public interface
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef _CDX_BUS_H_
+#define _CDX_BUS_H_
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/msi.h>
+
+#define MAX_CDX_DEV_RESOURCES 4
+#define CDX_CONTROLLER_ID_SHIFT 4
+#define CDX_BUS_NUM_MASK 0xF
+
+/* Forward declaration for CDX controller */
+struct cdx_controller;
+
+enum {
+ CDX_DEV_MSI_CONF,
+ CDX_DEV_BUS_MASTER_CONF,
+ CDX_DEV_RESET_CONF,
+ CDX_DEV_MSI_ENABLE,
+};
+
+struct cdx_msi_config {
+ u64 addr;
+ u32 data;
+ u16 msi_index;
+};
+
+struct cdx_device_config {
+ u8 type;
+ union {
+ struct cdx_msi_config msi;
+ bool bus_master_enable;
+ bool msi_enable;
+ };
+};
+
+typedef int (*cdx_bus_enable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
+typedef int (*cdx_bus_disable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
+typedef int (*cdx_scan_cb)(struct cdx_controller *cdx);
+
+typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_device_config *dev_config);
+
+/**
+ * CDX_DEVICE - macro used to describe a specific CDX device
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ *
+ * This macro is used to create a struct cdx_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
+ */
+#define CDX_DEVICE(vend, dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = CDX_ANY_ID, .subdevice = CDX_ANY_ID
+
+/**
+ * CDX_DEVICE_DRIVER_OVERRIDE - macro used to describe a CDX device with
+ * override_only flags.
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ * @driver_override: the 32 bit CDX Device override_only
+ *
+ * This macro is used to create a struct cdx_device_id that matches only a
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
+ */
+#define CDX_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
+ .vendor = (vend), .device = (dev), .subvendor = CDX_ANY_ID,\
+ .subdevice = CDX_ANY_ID, .override_only = (driver_override)
+
+/**
+ * struct cdx_ops - Callbacks supported by CDX controller.
+ * @bus_enable: enable bus on the controller
+ * @bus_disable: disable bus on the controller
+ * @scan: scan the devices on the controller
+ * @dev_configure: configuration like reset, master_enable,
+ * msi_config etc for a CDX device
+ */
+struct cdx_ops {
+ cdx_bus_enable_cb bus_enable;
+ cdx_bus_disable_cb bus_disable;
+ cdx_scan_cb scan;
+ cdx_dev_configure_cb dev_configure;
+};
+
+/**
+ * struct cdx_controller: CDX controller object
+ * @dev: Linux device associated with the CDX controller.
+ * @priv: private data
+ * @msi_domain: MSI domain
+ * @id: Controller ID
+ * @controller_registered: controller registered with bus
+ * @ops: CDX controller ops
+ */
+struct cdx_controller {
+ struct device *dev;
+ void *priv;
+ struct irq_domain *msi_domain;
+ u32 id;
+ bool controller_registered;
+ struct cdx_ops *ops;
+};
+
+/**
+ * struct cdx_device - CDX device object
+ * @dev: Linux driver model device object
+ * @cdx: CDX controller associated with the device
+ * @vendor: Vendor ID for CDX device
+ * @device: Device ID for CDX device
+ * @subsystem_vendor: Subsystem Vendor ID for CDX device
+ * @subsystem_device: Subsystem Device ID for CDX device
+ * @class: Class for the CDX device
+ * @revision: Revision of the CDX device
+ * @bus_num: Bus number for this CDX device
+ * @dev_num: Device number for this device
+ * @res: array of MMIO region entries
+ * @res_attr: resource binary attribute
+ * @debugfs_dir: debugfs directory for this device
+ * @res_count: number of valid MMIO regions
+ * @dma_mask: Default DMA mask
+ * @flags: CDX device flags
+ * @req_id: Requestor ID associated with CDX device
+ * @is_bus: Is this bus device
+ * @enabled: is this bus enabled
+ * @msi_dev_id: MSI Device ID associated with CDX device
+ * @num_msi: Number of MSI's supported by the device
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
+ * @irqchip_lock: lock to synchronize irq/msi configuration
+ * @msi_write_pending: MSI write pending for this device
+ */
+struct cdx_device {
+ struct device dev;
+ struct cdx_controller *cdx;
+ u16 vendor;
+ u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem_device;
+ u32 class;
+ u8 revision;
+ u8 bus_num;
+ u8 dev_num;
+ struct resource res[MAX_CDX_DEV_RESOURCES];
+ struct bin_attribute *res_attr[MAX_CDX_DEV_RESOURCES];
+ struct dentry *debugfs_dir;
+ u8 res_count;
+ u64 dma_mask;
+ u16 flags;
+ u32 req_id;
+ bool is_bus;
+ bool enabled;
+ u32 msi_dev_id;
+ u32 num_msi;
+ const char *driver_override;
+ struct mutex irqchip_lock;
+ bool msi_write_pending;
+};
+
+#define to_cdx_device(_dev) \
+ container_of(_dev, struct cdx_device, dev)
+
+#define cdx_resource_start(dev, num) ((dev)->res[(num)].start)
+#define cdx_resource_end(dev, num) ((dev)->res[(num)].end)
+#define cdx_resource_flags(dev, num) ((dev)->res[(num)].flags)
+#define cdx_resource_len(dev, num) \
+ ((cdx_resource_start((dev), (num)) == 0 && \
+ cdx_resource_end((dev), (num)) == \
+ cdx_resource_start((dev), (num))) ? 0 : \
+ (cdx_resource_end((dev), (num)) - \
+ cdx_resource_start((dev), (num)) + 1))
+/**
+ * struct cdx_driver - CDX device driver
+ * @driver: Generic device driver
+ * @match_id_table: table of supported device matching Ids
+ * @probe: Function called when a device is added
+ * @remove: Function called when a device is removed
+ * @shutdown: Function called at shutdown time to quiesce the device
+ * @reset_prepare: Function called before is reset to notify driver
+ * @reset_done: Function called after reset is complete to notify driver
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
+ */
+struct cdx_driver {
+ struct device_driver driver;
+ const struct cdx_device_id *match_id_table;
+ int (*probe)(struct cdx_device *dev);
+ int (*remove)(struct cdx_device *dev);
+ void (*shutdown)(struct cdx_device *dev);
+ void (*reset_prepare)(struct cdx_device *dev);
+ void (*reset_done)(struct cdx_device *dev);
+ bool driver_managed_dma;
+};
+
+#define to_cdx_driver(_drv) \
+ container_of(_drv, struct cdx_driver, driver)
+
+/* Macro to avoid include chaining to get THIS_MODULE */
+#define cdx_driver_register(drv) \
+ __cdx_driver_register(drv, THIS_MODULE)
+
+/**
+ * __cdx_driver_register - registers a CDX device driver
+ * @cdx_driver: CDX driver to register
+ * @owner: module owner
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int __must_check __cdx_driver_register(struct cdx_driver *cdx_driver,
+ struct module *owner);
+
+/**
+ * cdx_driver_unregister - unregisters a device driver from the
+ * CDX bus.
+ * @cdx_driver: CDX driver to register
+ */
+void cdx_driver_unregister(struct cdx_driver *cdx_driver);
+
+extern struct bus_type cdx_bus_type;
+
+/**
+ * cdx_dev_reset - Reset CDX device
+ * @dev: device pointer
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_dev_reset(struct device *dev);
+
+/**
+ * cdx_set_master - enables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to enable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_set_master(struct cdx_device *cdx_dev);
+
+/**
+ * cdx_clear_master - disables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to disable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_clear_master(struct cdx_device *cdx_dev);
+
+#ifdef CONFIG_GENERIC_MSI_IRQ
+/**
+ * cdx_enable_msi - Enable MSI for the CDX device.
+ * @cdx_dev: device pointer
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_enable_msi(struct cdx_device *cdx_dev);
+
+/**
+ * cdx_disable_msi - Disable MSI for the CDX device.
+ * @cdx_dev: device pointer
+ */
+void cdx_disable_msi(struct cdx_device *cdx_dev);
+
+#else /* CONFIG_GENERIC_MSI_IRQ */
+
+static inline int cdx_enable_msi(struct cdx_device *cdx_dev)
+{
+ return -ENODEV;
+}
+
+static inline void cdx_disable_msi(struct cdx_device *cdx_dev)
+{
+}
+
+#endif /* CONFIG_GENERIC_MSI_IRQ */
+
+#endif /* _CDX_BUS_H_ */
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 6728c2ee0205..6b138fa97db8 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -32,8 +32,6 @@ struct ceph_auth_handshake {
};
struct ceph_auth_client_ops {
- const char *name;
-
/*
* true if we are authenticated and can connect to
* services.
@@ -52,8 +50,10 @@ struct ceph_auth_client_ops {
* another request.
*/
int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
- int (*handle_reply)(struct ceph_auth_client *ac, int result,
- void *buf, void *end);
+ int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
+ void *buf, void *end, u8 *session_key,
+ int *session_key_len, u8 *con_secret,
+ int *con_secret_len);
/*
* Create authorizer for connecting to a service, and verify
@@ -69,7 +69,10 @@ struct ceph_auth_client_ops {
void *challenge_buf,
int challenge_buf_len);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+ struct ceph_authorizer *a,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
@@ -95,11 +98,17 @@ struct ceph_auth_client {
const struct ceph_crypto_key *key; /* our secret key */
unsigned want_keys; /* which services we want */
+ int preferred_mode; /* CEPH_CON_MODE_* */
+ int fallback_mode; /* ditto */
+
struct mutex mutex;
};
-extern struct ceph_auth_client *ceph_auth_init(const char *name,
- const struct ceph_crypto_key *key);
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
+
+struct ceph_auth_client *ceph_auth_init(const char *name,
+ const struct ceph_crypto_key *key,
+ const int *con_modes);
extern void ceph_auth_destroy(struct ceph_auth_client *ac);
extern void ceph_auth_reset(struct ceph_auth_client *ac);
@@ -113,21 +122,22 @@ int ceph_auth_entity_name_encode(const char *name, void **p, void *end);
extern int ceph_build_auth(struct ceph_auth_client *ac,
void *msg_buf, size_t msg_len);
-
extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
-extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
- int peer_type,
- struct ceph_auth_handshake *auth);
+
+int __ceph_auth_get_authorizer(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ int peer_type, bool force_new,
+ int *proto, int *pref_mode, int *fallb_mode);
void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
-extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
- int peer_type,
- struct ceph_auth_handshake *a);
int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac,
struct ceph_authorizer *a,
void *challenge_buf,
int challenge_buf_len);
-extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
int peer_type);
@@ -147,4 +157,34 @@ int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth,
return auth->check_message_signature(auth, msg);
return 0;
}
+
+int ceph_auth_get_request(struct ceph_auth_client *ac, void *buf, int buf_len);
+int ceph_auth_handle_reply_more(struct ceph_auth_client *ac, void *reply,
+ int reply_len, void *buf, int buf_len);
+int ceph_auth_handle_reply_done(struct ceph_auth_client *ac,
+ u64 global_id, void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+bool ceph_auth_handle_bad_method(struct ceph_auth_client *ac,
+ int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
+int ceph_auth_get_authorizer(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ int peer_type, void *buf, int *buf_len);
+int ceph_auth_handle_svc_reply_more(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ void *reply, int reply_len,
+ void *buf, int *buf_len);
+int ceph_auth_handle_svc_reply_done(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+bool ceph_auth_handle_bad_authorizer(struct ceph_auth_client *ac,
+ int peer_type, int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
#endif
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index d5a5da838caf..11a92a946016 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -19,12 +19,25 @@
pr_debug("%.*s %12.12s:%-4d : " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \
kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug("%.*s %12.12s:%-4d : [%pU %llu] " fmt, \
+ 8 - (int)sizeof(KBUILD_MODNAME), " ", \
+ kbasename(__FILE__), __LINE__, \
+ &client->fsid, client->monc.auth->global_id, \
+ ##__VA_ARGS__)
# else
/* faux printk call just to see any compiler warnings. */
# define dout(fmt, ...) do { \
if (0) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
+# define doutc(client, fmt, ...) do { \
+ if (0) \
+ printk(KERN_DEBUG "[%pU %llu] " fmt, \
+ &client->fsid, \
+ client->monc.auth->global_id, \
+ ##__VA_ARGS__); \
+ } while (0)
# endif
#else
@@ -33,7 +46,32 @@
* or, just wrap pr_debug
*/
# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug(" [%pU %llu] %s: " fmt, &client->fsid, \
+ client->monc.auth->global_id, __func__, ##__VA_ARGS__)
#endif
+#define pr_notice_client(client, fmt, ...) \
+ pr_notice("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_info_client(client, fmt, ...) \
+ pr_info("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_client(client, fmt, ...) \
+ pr_warn("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_once_client(client, fmt, ...) \
+ pr_warn_once("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_client(client, fmt, ...) \
+ pr_err("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_ratelimited_client(client, fmt, ...) \
+ pr_warn_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_ratelimited_client(client, fmt, ...) \
+ pr_err_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+
#endif
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 999636d53cf2..3a47acd9cc14 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -8,7 +8,8 @@
* feature. Base case is 1 (first use).
*/
#define CEPH_FEATURE_INCARNATION_1 (0ull)
-#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL
+#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // SERVER_JEWEL
+#define CEPH_FEATURE_INCARNATION_3 ((1ull<<57)|(1ull<<28)) // SERVER_MIMIC
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
static const uint64_t __maybe_unused CEPH_FEATURE_##name = (1ULL<<bit); \
@@ -75,7 +76,7 @@
DEFINE_CEPH_FEATURE( 0, 1, UID)
DEFINE_CEPH_FEATURE( 1, 1, NOSRCADDR)
DEFINE_CEPH_FEATURE_RETIRED( 2, 1, MONCLOCKCHECK, JEWEL, LUMINOUS)
-
+DEFINE_CEPH_FEATURE( 2, 3, SERVER_NAUTILUS)
DEFINE_CEPH_FEATURE( 3, 1, FLOCK)
DEFINE_CEPH_FEATURE( 4, 1, SUBSCRIBE2)
DEFINE_CEPH_FEATURE( 5, 1, MONNAMES)
@@ -114,7 +115,7 @@ DEFINE_CEPH_FEATURE(25, 1, CRUSH_TUNABLES2)
DEFINE_CEPH_FEATURE(26, 1, CREATEPOOLID)
DEFINE_CEPH_FEATURE(27, 1, REPLY_CREATE_INODE)
DEFINE_CEPH_FEATURE_RETIRED(28, 1, OSD_HBMSGS, HAMMER, JEWEL)
-DEFINE_CEPH_FEATURE(28, 2, SERVER_M)
+DEFINE_CEPH_FEATURE(28, 2, SERVER_MIMIC)
DEFINE_CEPH_FEATURE(29, 1, MDSENC)
DEFINE_CEPH_FEATURE(30, 1, OSDHASHPSPOOL)
DEFINE_CEPH_FEATURE(31, 1, MON_SINGLE_PAXOS) // deprecate me
@@ -177,13 +178,16 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_SERVER_NAUTILUS | \
CEPH_FEATURE_FLOCK | \
CEPH_FEATURE_SUBSCRIBE2 | \
+ CEPH_FEATURE_MONNAMES | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_DIRLAYOUTHASH | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
+ CEPH_FEATURE_MONENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_SERVER_LUMINOUS | \
CEPH_FEATURE_RESEND_ON_SPLIT | \
@@ -193,6 +197,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
+ CEPH_FEATURE_SERVER_MIMIC | \
CEPH_FEATURE_MDSENC | \
CEPH_FEATURE_OSDHASHPSPOOL | \
CEPH_FEATURE_OSD_CACHEPOOL | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 455e9b9e2adf..ee1d0e5f9789 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -28,8 +28,8 @@
#define CEPH_INO_ROOT 1
-#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
-#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
+#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
+#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
@@ -93,8 +93,19 @@ struct ceph_dir_layout {
#define CEPH_AUTH_NONE 0x1
#define CEPH_AUTH_CEPHX 0x2
+#define CEPH_AUTH_MODE_NONE 0
+#define CEPH_AUTH_MODE_AUTHORIZER 1
+#define CEPH_AUTH_MODE_MON 10
+
+/* msgr2 protocol modes */
+#define CEPH_CON_MODE_UNKNOWN 0x0
+#define CEPH_CON_MODE_CRC 0x1
+#define CEPH_CON_MODE_SECURE 0x2
+
#define CEPH_AUTH_UID_DEFAULT ((__u64) -1)
+const char *ceph_auth_proto_name(int proto);
+const char *ceph_con_mode_name(int mode);
/*********************************************
* message layer
@@ -288,8 +299,11 @@ enum {
CEPH_SESSION_FLUSHMSG_ACK,
CEPH_SESSION_FORCE_RO,
CEPH_SESSION_REJECT,
+ CEPH_SESSION_REQUEST_FLUSH_MDLOG,
};
+#define CEPH_SESSION_BLOCKLISTED (1 << 0) /* session blocklisted */
+
extern const char *ceph_session_op_name(int op);
struct ceph_mds_session_head {
@@ -314,6 +328,7 @@ enum {
CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
CEPH_MDS_OP_LOOKUPINO = 0x00104,
CEPH_MDS_OP_LOOKUPNAME = 0x00105,
+ CEPH_MDS_OP_GETVXATTR = 0x00106,
CEPH_MDS_OP_SETXATTR = 0x01105,
CEPH_MDS_OP_RMXATTR = 0x01106,
@@ -342,16 +357,26 @@ enum {
CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
-extern const char *ceph_mds_op_name(int op);
+#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE || \
+ op == CEPH_MDS_OP_MKNOD || \
+ op == CEPH_MDS_OP_MKDIR || \
+ op == CEPH_MDS_OP_SYMLINK)
+extern const char *ceph_mds_op_name(int op);
-#define CEPH_SETATTR_MODE 1
-#define CEPH_SETATTR_UID 2
-#define CEPH_SETATTR_GID 4
-#define CEPH_SETATTR_MTIME 8
-#define CEPH_SETATTR_ATIME 16
-#define CEPH_SETATTR_SIZE 32
-#define CEPH_SETATTR_CTIME 64
+#define CEPH_SETATTR_MODE (1 << 0)
+#define CEPH_SETATTR_UID (1 << 1)
+#define CEPH_SETATTR_GID (1 << 2)
+#define CEPH_SETATTR_MTIME (1 << 3)
+#define CEPH_SETATTR_ATIME (1 << 4)
+#define CEPH_SETATTR_SIZE (1 << 5)
+#define CEPH_SETATTR_CTIME (1 << 6)
+#define CEPH_SETATTR_MTIME_NOW (1 << 7)
+#define CEPH_SETATTR_ATIME_NOW (1 << 8)
+#define CEPH_SETATTR_BTIME (1 << 9)
+#define CEPH_SETATTR_KILL_SGUID (1 << 10)
+#define CEPH_SETATTR_FSCRYPT_AUTH (1 << 11)
+#define CEPH_SETATTR_FSCRYPT_FILE (1 << 12)
/*
* Ceph setxattr request flags.
@@ -418,12 +443,13 @@ union ceph_mds_request_args {
__le32 stripe_unit; /* layout for newly created file */
__le32 stripe_count; /* ... */
__le32 object_size;
- __le32 file_replication;
- __le32 mask; /* CEPH_CAP_* */
- __le32 old_size;
+ __le32 pool;
+ __le32 mask; /* CEPH_CAP_* */
+ __le64 old_size;
} __attribute__ ((packed)) open;
struct {
__le32 flags;
+ __le32 osdmap_epoch; /* used for setting file/dir layouts */
} __attribute__ ((packed)) setxattr;
struct {
struct ceph_file_layout_legacy layout;
@@ -445,11 +471,25 @@ union ceph_mds_request_args {
} __attribute__ ((packed)) lookupino;
} __attribute__ ((packed));
+union ceph_mds_request_args_ext {
+ union ceph_mds_request_args old;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ struct ceph_timespec btime;
+ } __attribute__ ((packed)) setattr_ext;
+};
+
#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */
-struct ceph_mds_request_head {
+struct ceph_mds_request_head_legacy {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
@@ -462,6 +502,42 @@ struct ceph_mds_request_head {
union ceph_mds_request_args args;
} __attribute__ ((packed));
+#define CEPH_MDS_REQUEST_HEAD_VERSION 3
+
+struct ceph_mds_request_head_old {
+ __le16 version; /* struct version */
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* count retry, fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args_ext args;
+} __attribute__ ((packed));
+
+struct ceph_mds_request_head {
+ __le16 version; /* struct version */
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* legacy count retry and fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args_ext args;
+
+ __le32 ext_num_retry; /* new count retry attempts */
+ __le32 ext_num_fwd; /* new count fwd attempts */
+
+ __le32 struct_len; /* to store size of struct ceph_mds_request_head */
+ __le32 owner_uid, owner_gid; /* used for OPs which create inodes */
+} __attribute__ ((packed));
+
/* cap/lease release record */
struct ceph_mds_request_release {
__le64 ino, cap_id; /* ino and unique cap id */
@@ -722,7 +798,7 @@ struct ceph_mds_caps {
__le32 xattr_len;
__le64 xattr_version;
- /* filelock */
+ /* a union of non-export and export bodies. */
__le64 size, max_size, truncate_size;
__le32 truncate_seq;
struct ceph_timespec mtime, atime, ctime;
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 450384fe487c..04f3ace5787b 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -220,6 +220,8 @@ static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
*/
#define CEPH_ENTITY_ADDR_TYPE_NONE 0
#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1)
+#define CEPH_ENTITY_ADDR_TYPE_MSGR2 __cpu_to_le32(2)
+#define CEPH_ENTITY_ADDR_TYPE_ANY __cpu_to_le32(3)
static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a)
{
@@ -239,6 +241,12 @@ static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a)
extern int ceph_decode_entity_addr(void **p, void *end,
struct ceph_entity_addr *addr);
+int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ struct ceph_entity_addr *addr);
+
+int ceph_entity_addr_encoding_len(const struct ceph_entity_addr *addr);
+void ceph_encode_entity_addr(void **p, const struct ceph_entity_addr *addr);
+
/*
* encoders
*/
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index c8645f0b797d..4497d0a6772c 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -31,11 +31,11 @@
#define CEPH_OPT_FSID (1<<0)
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
-#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
-#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
-#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
-#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
-#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes (msgr1) */
+#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
+#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
+#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
@@ -53,6 +53,7 @@ struct ceph_options {
unsigned long osd_keepalive_timeout; /* jiffies */
unsigned long osd_request_timeout; /* jiffies */
u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */
+ int con_modes[2]; /* CEPH_CON_MODE_* */
/*
* any type that can't be simply compared or doesn't need
@@ -83,6 +84,7 @@ struct ceph_options {
#define CEPH_MONC_HUNT_BACKOFF 2
#define CEPH_MONC_HUNT_MAX_MULT 10
+#define CEPH_MSG_MAX_CONTROL_LEN (16*1024*1024)
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
@@ -97,15 +99,6 @@ struct ceph_options {
#define CEPH_AUTH_NAME_DEFAULT "guest"
-/* mount state */
-enum {
- CEPH_MOUNT_MOUNTING,
- CEPH_MOUNT_MOUNTED,
- CEPH_MOUNT_UNMOUNTING,
- CEPH_MOUNT_UNMOUNTED,
- CEPH_MOUNT_SHUTDOWN,
-};
-
static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
{
return timeout ?: MAX_SCHEDULE_TIMEOUT;
@@ -150,6 +143,10 @@ struct ceph_client {
#define from_msgr(ms) container_of(ms, struct ceph_client, msgr)
+static inline bool ceph_msgr2(struct ceph_client *client)
+{
+ return client->options->con_modes[0] != CEPH_CON_MODE_UNKNOWN;
+}
/*
* snapshots
@@ -277,6 +274,7 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_cap_snap_cachep;
extern struct kmem_cache *ceph_cap_flush_cachep;
extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
@@ -289,13 +287,13 @@ extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
-extern void *ceph_kvmalloc(size_t size, gfp_t flags);
+extern int ceph_parse_fsid(const char *str, struct ceph_fsid *fsid);
struct fs_parameter;
struct fc_log;
struct ceph_options *ceph_alloc_options(void);
int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
- struct fc_log *l);
+ struct fc_log *l, char delim);
int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
struct fc_log *l);
int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
deleted file mode 100644
index 35d385296fbb..000000000000
--- a/include/linux/ceph/mdsmap.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _FS_CEPH_MDSMAP_H
-#define _FS_CEPH_MDSMAP_H
-
-#include <linux/bug.h>
-#include <linux/ceph/types.h>
-
-/*
- * mds map - describe servers in the mds cluster.
- *
- * we limit fields to those the client actually xcares about
- */
-struct ceph_mds_info {
- u64 global_id;
- struct ceph_entity_addr addr;
- s32 state;
- int num_export_targets;
- bool laggy;
- u32 *export_targets;
-};
-
-struct ceph_mdsmap {
- u32 m_epoch, m_client_epoch, m_last_failure;
- u32 m_root;
- u32 m_session_timeout; /* seconds */
- u32 m_session_autoclose; /* seconds */
- u64 m_max_file_size;
- u32 m_max_mds; /* expected up:active mds number */
- u32 m_num_active_mds; /* actual up:active mds number */
- u32 possible_max_rank; /* possible max rank index */
- struct ceph_mds_info *m_info;
-
- /* which object pools file data can be stored in */
- int m_num_data_pg_pools;
- u64 *m_data_pg_pools;
- u64 m_cas_pg_pool;
-
- bool m_enabled;
- bool m_damaged;
- int m_num_laggy;
-};
-
-static inline struct ceph_entity_addr *
-ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
-{
- if (w >= m->possible_max_rank)
- return NULL;
- return &m->m_info[w].addr;
-}
-
-static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
-{
- BUG_ON(w < 0);
- if (w >= m->possible_max_rank)
- return CEPH_MDS_STATE_DNE;
- return m->m_info[w].state;
-}
-
-static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
-{
- if (w >= 0 && w < m->possible_max_rank)
- return m->m_info[w].laggy;
- return false;
-}
-
-extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
-extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
-extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
-extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
-
-#endif
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 76371aaae2d1..1717cc57cdac 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -3,6 +3,7 @@
#define __FS_CEPH_MESSENGER_H
#include <linux/bvec.h>
+#include <linux/crypto.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/net.h>
@@ -16,6 +17,7 @@
struct ceph_msg;
struct ceph_connection;
+struct ceph_msg_data_cursor;
/*
* Ceph defines these callbacks for handling connection events.
@@ -52,9 +54,50 @@ struct ceph_connection_operations {
int (*sign_message) (struct ceph_msg *msg);
int (*check_message_signature) (struct ceph_msg *msg);
+
+ /* msgr2 authentication exchange */
+ int (*get_auth_request)(struct ceph_connection *con,
+ void *buf, int *buf_len,
+ void **authorizer, int *authorizer_len);
+ int (*handle_auth_reply_more)(struct ceph_connection *con,
+ void *reply, int reply_len,
+ void *buf, int *buf_len,
+ void **authorizer, int *authorizer_len);
+ int (*handle_auth_done)(struct ceph_connection *con,
+ u64 global_id, void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+ int (*handle_auth_bad_method)(struct ceph_connection *con,
+ int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
+ /**
+ * sparse_read: read sparse data
+ * @con: connection we're reading from
+ * @cursor: data cursor for reading extents
+ * @buf: optional buffer to read into
+ *
+ * This should be called more than once, each time setting up to
+ * receive an extent into the current cursor position, and zeroing
+ * the holes between them.
+ *
+ * Returns amount of data to be read (in bytes), 0 if reading is
+ * complete, or -errno if there was an error.
+ *
+ * If @buf is set on a >0 return, then the data should be read into
+ * the provided buffer. Otherwise, it should be read into the cursor.
+ *
+ * The sparse read operation is expected to initialize the cursor
+ * with a length covering up to the end of the last extent.
+ */
+ int (*sparse_read)(struct ceph_connection *con,
+ struct ceph_msg_data_cursor *cursor,
+ char **buf);
+
};
-/* use format string %s%d */
+/* use format string %s%lld */
#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num)
struct ceph_messenger {
@@ -80,6 +123,7 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
#endif /* CONFIG_BLOCK */
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
+ CEPH_MSG_DATA_ITER, /* data source/destination is an iov_iter */
};
#ifdef CONFIG_BLOCK
@@ -181,6 +225,7 @@ struct ceph_msg_data {
bool own_pages;
};
struct ceph_pagelist *pagelist;
+ struct iov_iter iter;
};
};
@@ -189,7 +234,7 @@ struct ceph_msg_data_cursor {
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
- bool last_piece; /* current is last piece */
+ int sr_resid; /* residual sparse_read len */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
@@ -205,6 +250,10 @@ struct ceph_msg_data_cursor {
struct page *page; /* page from list */
size_t offset; /* bytes from list */
};
+ struct {
+ struct iov_iter iov_iter;
+ unsigned int lastlen;
+ };
};
};
@@ -234,15 +283,182 @@ struct ceph_msg {
struct kref kref;
bool more_to_follow;
bool needs_out_seq;
+ u64 sparse_read_total;
int front_alloc_len;
- unsigned long ack_stamp; /* tx: when we were acked */
struct ceph_msgpool *pool;
};
+/*
+ * connection states
+ */
+#define CEPH_CON_S_CLOSED 1
+#define CEPH_CON_S_PREOPEN 2
+#define CEPH_CON_S_V1_BANNER 3
+#define CEPH_CON_S_V1_CONNECT_MSG 4
+#define CEPH_CON_S_V2_BANNER_PREFIX 5
+#define CEPH_CON_S_V2_BANNER_PAYLOAD 6
+#define CEPH_CON_S_V2_HELLO 7
+#define CEPH_CON_S_V2_AUTH 8
+#define CEPH_CON_S_V2_AUTH_SIGNATURE 9
+#define CEPH_CON_S_V2_SESSION_CONNECT 10
+#define CEPH_CON_S_V2_SESSION_RECONNECT 11
+#define CEPH_CON_S_OPEN 12
+#define CEPH_CON_S_STANDBY 13
+
+/*
+ * ceph_connection flag bits
+ */
+#define CEPH_CON_F_LOSSYTX 0 /* we can close channel or drop
+ messages on errors */
+#define CEPH_CON_F_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
+#define CEPH_CON_F_WRITE_PENDING 2 /* we have data ready to send */
+#define CEPH_CON_F_SOCK_CLOSED 3 /* socket state changed to closed */
+#define CEPH_CON_F_BACKOFF 4 /* need to retry queuing delayed
+ work */
+
/* ceph connection fault delay defaults, for exponential backoff */
-#define BASE_DELAY_INTERVAL (HZ/2)
-#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
+#define BASE_DELAY_INTERVAL (HZ / 4)
+#define MAX_DELAY_INTERVAL (15 * HZ)
+
+struct ceph_connection_v1_info {
+ struct kvec out_kvec[8], /* sending header/footer data */
+ *out_kvec_cur;
+ int out_kvec_left; /* kvec's left in out_kvec */
+ int out_skip; /* skip this many bytes */
+ int out_kvec_bytes; /* total bytes left */
+ bool out_more; /* there is more data after the kvecs */
+ bool out_msg_done;
+
+ struct ceph_auth_handshake *auth;
+ int auth_retry; /* true if we need a newer authorizer */
+
+ /* connection negotiation temps */
+ u8 in_banner[CEPH_BANNER_MAX_LEN];
+ struct ceph_entity_addr actual_peer_addr;
+ struct ceph_entity_addr peer_addr_for_me;
+ struct ceph_msg_connect out_connect;
+ struct ceph_msg_connect_reply in_reply;
+
+ int in_base_pos; /* bytes read */
+
+ /* sparse reads */
+ struct kvec in_sr_kvec; /* current location to receive into */
+ u64 in_sr_len; /* amount of data in this extent */
+
+ /* message in temps */
+ u8 in_tag; /* protocol control byte */
+ struct ceph_msg_header in_hdr;
+ __le64 in_temp_ack; /* for reading an ack */
+
+ /* message out temps */
+ struct ceph_msg_header out_hdr;
+ __le64 out_temp_ack; /* for writing an ack */
+ struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+ stamp */
+
+ u32 connect_seq; /* identify the most recent connection
+ attempt for this session */
+ u32 peer_global_seq; /* peer's global seq for this connection */
+};
+
+#define CEPH_CRC_LEN 4
+#define CEPH_GCM_KEY_LEN 16
+#define CEPH_GCM_IV_LEN sizeof(struct ceph_gcm_nonce)
+#define CEPH_GCM_BLOCK_LEN 16
+#define CEPH_GCM_TAG_LEN 16
+
+#define CEPH_PREAMBLE_LEN 32
+#define CEPH_PREAMBLE_INLINE_LEN 48
+#define CEPH_PREAMBLE_PLAIN_LEN CEPH_PREAMBLE_LEN
+#define CEPH_PREAMBLE_SECURE_LEN (CEPH_PREAMBLE_LEN + \
+ CEPH_PREAMBLE_INLINE_LEN + \
+ CEPH_GCM_TAG_LEN)
+#define CEPH_EPILOGUE_PLAIN_LEN (1 + 3 * CEPH_CRC_LEN)
+#define CEPH_EPILOGUE_SECURE_LEN (CEPH_GCM_BLOCK_LEN + CEPH_GCM_TAG_LEN)
+
+#define CEPH_FRAME_MAX_SEGMENT_COUNT 4
+
+struct ceph_frame_desc {
+ int fd_tag; /* FRAME_TAG_* */
+ int fd_seg_cnt;
+ int fd_lens[CEPH_FRAME_MAX_SEGMENT_COUNT]; /* logical */
+ int fd_aligns[CEPH_FRAME_MAX_SEGMENT_COUNT];
+};
+
+struct ceph_gcm_nonce {
+ __le32 fixed;
+ __le64 counter __packed;
+};
+
+struct ceph_connection_v2_info {
+ struct iov_iter in_iter;
+ struct kvec in_kvecs[5]; /* recvmsg */
+ struct bio_vec in_bvec; /* recvmsg (in_cursor) */
+ int in_kvec_cnt;
+ int in_state; /* IN_S_* */
+
+ struct iov_iter out_iter;
+ struct kvec out_kvecs[8]; /* sendmsg */
+ struct bio_vec out_bvec; /* sendpage (out_cursor, out_zero),
+ sendmsg (out_enc_pages) */
+ int out_kvec_cnt;
+ int out_state; /* OUT_S_* */
+
+ int out_zero; /* # of zero bytes to send */
+ bool out_iter_sendpage; /* use sendpage if possible */
+
+ struct ceph_frame_desc in_desc;
+ struct ceph_msg_data_cursor in_cursor;
+ struct ceph_msg_data_cursor out_cursor;
+
+ struct crypto_shash *hmac_tfm; /* post-auth signature */
+ struct crypto_aead *gcm_tfm; /* on-wire encryption */
+ struct aead_request *gcm_req;
+ struct crypto_wait gcm_wait;
+ struct ceph_gcm_nonce in_gcm_nonce;
+ struct ceph_gcm_nonce out_gcm_nonce;
+
+ struct page **in_enc_pages;
+ int in_enc_page_cnt;
+ int in_enc_resid;
+ int in_enc_i;
+ struct page **out_enc_pages;
+ int out_enc_page_cnt;
+ int out_enc_resid;
+ int out_enc_i;
+
+ int con_mode; /* CEPH_CON_MODE_* */
+
+ void *conn_bufs[16];
+ int conn_buf_cnt;
+ int data_len_remain;
+
+ struct kvec in_sign_kvecs[8];
+ struct kvec out_sign_kvecs[8];
+ int in_sign_kvec_cnt;
+ int out_sign_kvec_cnt;
+
+ u64 client_cookie;
+ u64 server_cookie;
+ u64 global_seq;
+ u64 connect_seq;
+ u64 peer_global_seq;
+
+ u8 in_buf[CEPH_PREAMBLE_SECURE_LEN];
+ u8 out_buf[CEPH_PREAMBLE_SECURE_LEN];
+ struct {
+ u8 late_status; /* FRAME_LATE_STATUS_* */
+ union {
+ struct {
+ u32 front_crc;
+ u32 middle_crc;
+ u32 data_crc;
+ } __packed;
+ u8 pad[CEPH_GCM_BLOCK_LEN - 1];
+ };
+ } out_epil;
+};
/*
* A single connection with another host.
@@ -258,24 +474,16 @@ struct ceph_connection {
struct ceph_messenger *msgr;
+ int state; /* CEPH_CON_S_* */
atomic_t sock_state;
struct socket *sock;
- struct ceph_entity_addr peer_addr; /* peer address */
- struct ceph_entity_addr peer_addr_for_me;
- unsigned long flags;
- unsigned long state;
+ unsigned long flags; /* CEPH_CON_F_* */
const char *error_msg; /* error message, if any */
struct ceph_entity_name peer_name; /* peer name */
-
+ struct ceph_entity_addr peer_addr; /* peer address */
u64 peer_features;
- u32 connect_seq; /* identify the most recent connection
- attempt for this connection, client */
- u32 peer_global_seq; /* peer's global seq for this connection */
-
- struct ceph_auth_handshake *auth;
- int auth_retry; /* true if we need a newer authorizer */
struct mutex mutex;
@@ -286,50 +494,86 @@ struct ceph_connection {
u64 in_seq, in_seq_acked; /* last message received, acked */
- /* connection negotiation temps */
- char in_banner[CEPH_BANNER_MAX_LEN];
- struct ceph_msg_connect out_connect;
- struct ceph_msg_connect_reply in_reply;
- struct ceph_entity_addr actual_peer_addr;
-
- /* message out temps */
- struct ceph_msg_header out_hdr;
+ struct ceph_msg *in_msg;
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
- bool out_msg_done;
-
- struct kvec out_kvec[8], /* sending header/footer data */
- *out_kvec_cur;
- int out_kvec_left; /* kvec's left in out_kvec */
- int out_skip; /* skip this many bytes */
- int out_kvec_bytes; /* total bytes left */
- int out_more; /* there is more data after the kvecs */
- __le64 out_temp_ack; /* for writing an ack */
- struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
- stamp */
- /* message in temps */
- struct ceph_msg_header in_hdr;
- struct ceph_msg *in_msg;
+ struct page *bounce_page;
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
- char in_tag; /* protocol control byte */
- int in_base_pos; /* bytes read */
- __le64 in_temp_ack; /* for reading an ack */
-
struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
+
+ union {
+ struct ceph_connection_v1_info v1;
+ struct ceph_connection_v2_info v2;
+ };
};
+extern struct page *ceph_zero_page;
+
+void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag);
+void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag);
+bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag);
+bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
+ unsigned long con_flag);
+bool ceph_con_flag_test_and_set(struct ceph_connection *con,
+ unsigned long con_flag);
+
+void ceph_encode_my_addr(struct ceph_messenger *msgr);
+
+int ceph_tcp_connect(struct ceph_connection *con);
+int ceph_con_close_socket(struct ceph_connection *con);
+void ceph_con_reset_session(struct ceph_connection *con);
+
+u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt);
+void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq);
+void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq);
+
+void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
+ struct ceph_msg *msg, size_t length);
+struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
+ size_t *page_offset, size_t *length);
+void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
+
+u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
+ unsigned int length);
+
+bool ceph_addr_is_blank(const struct ceph_entity_addr *addr);
+int ceph_addr_port(const struct ceph_entity_addr *addr);
+void ceph_addr_set_port(struct ceph_entity_addr *addr, int p);
+
+void ceph_con_process_message(struct ceph_connection *con);
+int ceph_con_in_msg_alloc(struct ceph_connection *con,
+ struct ceph_msg_header *hdr, int *skip);
+void ceph_con_get_out_msg(struct ceph_connection *con);
+
+/* messenger_v1.c */
+int ceph_con_v1_try_read(struct ceph_connection *con);
+int ceph_con_v1_try_write(struct ceph_connection *con);
+void ceph_con_v1_revoke(struct ceph_connection *con);
+void ceph_con_v1_revoke_incoming(struct ceph_connection *con);
+bool ceph_con_v1_opened(struct ceph_connection *con);
+void ceph_con_v1_reset_session(struct ceph_connection *con);
+void ceph_con_v1_reset_protocol(struct ceph_connection *con);
+
+/* messenger_v2.c */
+int ceph_con_v2_try_read(struct ceph_connection *con);
+int ceph_con_v2_try_write(struct ceph_connection *con);
+void ceph_con_v2_revoke(struct ceph_connection *con);
+void ceph_con_v2_revoke_incoming(struct ceph_connection *con);
+bool ceph_con_v2_opened(struct ceph_connection *con);
+void ceph_con_v2_reset_session(struct ceph_connection *con);
+void ceph_con_v2_reset_protocol(struct ceph_connection *con);
+
extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr);
extern int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
- int max_count, int *count);
-
+ int max_count, int *count, char delim);
extern int ceph_msgr_init(void);
extern void ceph_msgr_exit(void);
@@ -367,6 +611,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
#endif /* CONFIG_BLOCK */
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
+void ceph_msg_data_add_iter(struct ceph_msg *msg,
+ struct iov_iter *iter);
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
gfp_t flags, bool can_fail);
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index ce4ffeb384d7..7a9a40163c0f 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -19,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[];
+ struct ceph_entity_inst mon_inst[] __counted_by(num_mon);
};
struct ceph_mon_client;
@@ -142,7 +142,7 @@ int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
ceph_monc_callback_t cb, u64 private_data);
-int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
+int ceph_monc_blocklist_add(struct ceph_mon_client *monc,
struct ceph_entity_addr *client_addr);
extern int ceph_monc_open_session(struct ceph_mon_client *monc);
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 9e50aede46c8..3989dcb94d3d 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -9,24 +9,45 @@
#define CEPH_MON_PORT 6789 /* default monitor port */
/*
- * client-side processes will try to bind to ports in this
- * range, simply for the benefit of tools like nmap or wireshark
- * that would like to identify the protocol.
- */
-#define CEPH_PORT_FIRST 6789
-#define CEPH_PORT_START 6800 /* non-monitors start here */
-#define CEPH_PORT_LAST 6900
-
-/*
* tcp connection banner. include a protocol version. and adjust
* whenever the wire protocol changes. try to keep this string length
* constant.
*/
#define CEPH_BANNER "ceph v027"
+#define CEPH_BANNER_LEN 9
#define CEPH_BANNER_MAX_LEN 30
/*
+ * messenger V2 connection banner prefix.
+ * The full banner string should have the form: "ceph v2\n<le16>"
+ * the 2 bytes are the length of the remaining banner.
+ */
+#define CEPH_BANNER_V2 "ceph v2\n"
+#define CEPH_BANNER_V2_LEN 8
+#define CEPH_BANNER_V2_PREFIX_LEN (CEPH_BANNER_V2_LEN + sizeof(__le16))
+
+/*
+ * messenger V2 features
+ */
+#define CEPH_MSGR2_INCARNATION_1 (0ull)
+
+#define DEFINE_MSGR2_FEATURE(bit, incarnation, name) \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATUREMASK_##name = \
+ (1ULL << bit | CEPH_MSGR2_INCARNATION_##incarnation);
+
+#define HAVE_MSGR2_FEATURE(x, name) \
+ (((x) & (CEPH_MSGR2_FEATUREMASK_##name)) == (CEPH_MSGR2_FEATUREMASK_##name))
+
+DEFINE_MSGR2_FEATURE( 0, 1, REVISION_1) // msgr2.1
+
+#define CEPH_MSGR2_SUPPORTED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1)
+
+#define CEPH_MSGR2_REQUIRED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1)
+
+
+/*
* Rollover-safe type and comparator for 32-bit sequence numbers.
* Comparator returns -1, 0, or 1.
*/
@@ -61,11 +82,18 @@ extern const char *ceph_entity_type_name(int type);
* entity_addr -- network address
*/
struct ceph_entity_addr {
- __le32 type;
+ __le32 type; /* CEPH_ENTITY_ADDR_TYPE_* */
__le32 nonce; /* unique id for process (e.g. pid) */
struct sockaddr_storage in_addr;
} __attribute__ ((packed));
+static inline bool ceph_addr_equal_no_type(const struct ceph_entity_addr *lhs,
+ const struct ceph_entity_addr *rhs)
+{
+ return !memcmp(&lhs->in_addr, &rhs->in_addr, sizeof(lhs->in_addr)) &&
+ lhs->nonce == rhs->nonce;
+}
+
struct ceph_entity_inst {
struct ceph_entity_name name;
struct ceph_entity_addr addr;
@@ -160,6 +188,24 @@ struct ceph_msg_header {
__le32 crc; /* header crc32c */
} __attribute__ ((packed));
+struct ceph_msg_header2 {
+ __le64 seq; /* message seq# for this session */
+ __le64 tid; /* transaction id */
+ __le16 type; /* message type */
+ __le16 priority; /* priority. higher value == higher priority */
+ __le16 version; /* version of message encoding */
+
+ __le32 data_pre_padding_len;
+ __le16 data_off; /* sender: include full offset;
+ receiver: mask against ~PAGE_MASK */
+
+ __le64 ack_seq;
+ __u8 flags;
+ /* oldest code we think can decode this. unknown if zero. */
+ __le16 compat_version;
+ __le16 reserved;
+} __attribute__ ((packed));
+
#define CEPH_MSG_PRIO_LOW 64
#define CEPH_MSG_PRIO_DEFAULT 127
#define CEPH_MSG_PRIO_HIGH 196
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 83fa08a06507..f66f6aac74f6 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -29,14 +29,63 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
#define CEPH_HOMELESS_OSD -1
-/* a given osd we're communicating with */
+/*
+ * A single extent in a SPARSE_READ reply.
+ *
+ * Note that these come from the OSD as little-endian values. On BE arches,
+ * we convert them in-place after receipt.
+ */
+struct ceph_sparse_extent {
+ u64 off;
+ u64 len;
+} __packed;
+
+/* Sparse read state machine state values */
+enum ceph_sparse_read_state {
+ CEPH_SPARSE_READ_HDR = 0,
+ CEPH_SPARSE_READ_EXTENTS,
+ CEPH_SPARSE_READ_DATA_LEN,
+ CEPH_SPARSE_READ_DATA_PRE,
+ CEPH_SPARSE_READ_DATA,
+};
+
+/*
+ * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of
+ * 64-bit offset/length pairs, and then all of the actual file data
+ * concatenated after it (sans holes).
+ *
+ * Unfortunately, we don't know how long the extent array is until we've
+ * started reading the data section of the reply. The caller should send down
+ * a destination buffer for the array, but we'll alloc one if it's too small
+ * or if the caller doesn't.
+ */
+struct ceph_sparse_read {
+ enum ceph_sparse_read_state sr_state; /* state machine state */
+ u64 sr_req_off; /* orig request offset */
+ u64 sr_req_len; /* orig request length */
+ u64 sr_pos; /* current pos in buffer */
+ int sr_index; /* current extent index */
+ u32 sr_datalen; /* length of actual data */
+ u32 sr_count; /* extent count in reply */
+ int sr_ext_len; /* length of extent array */
+ struct ceph_sparse_extent *sr_extent; /* extent array */
+};
+
+/*
+ * A given osd we're communicating with.
+ *
+ * Note that the o_requests tree can be searched while holding the "lock" mutex
+ * or the "o_requests_lock" spinlock. Insertion or removal requires both!
+ */
struct ceph_osd {
refcount_t o_ref;
+ int o_sparse_op_idx;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
struct rb_node o_node;
struct ceph_connection o_con;
+ spinlock_t o_requests_lock;
struct rb_root o_requests;
struct rb_root o_linger_requests;
struct rb_root o_backoff_mappings;
@@ -46,6 +95,7 @@ struct ceph_osd {
unsigned long lru_ttl;
struct list_head o_keepalive_item;
struct mutex lock;
+ struct ceph_sparse_read o_sparse_read;
};
#define CEPH_OSD_SLAB_OPS 2
@@ -59,6 +109,7 @@ enum ceph_osd_data_type {
CEPH_OSD_DATA_TYPE_BIO,
#endif /* CONFIG_BLOCK */
CEPH_OSD_DATA_TYPE_BVECS,
+ CEPH_OSD_DATA_TYPE_ITER,
};
struct ceph_osd_data {
@@ -82,6 +133,7 @@ struct ceph_osd_data {
struct ceph_bvec_iter bvec_pos;
u32 num_bvecs;
};
+ struct iov_iter iter;
};
};
@@ -98,6 +150,8 @@ struct ceph_osd_req_op {
u64 offset, length;
u64 truncate_size;
u32 truncate_seq;
+ int sparse_ext_cnt;
+ struct ceph_sparse_extent *sparse_ext;
struct ceph_osd_data osd_data;
} extent;
struct {
@@ -145,6 +199,9 @@ struct ceph_osd_req_op {
u32 src_fadvise_flags;
struct ceph_osd_data osd_data;
} copy_from;
+ struct {
+ u64 ver;
+ } assert_ver;
};
};
@@ -199,6 +256,7 @@ struct ceph_osd_request {
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
+ bool r_linger; /* don't resend on failure */
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
@@ -211,9 +269,9 @@ struct ceph_osd_request {
struct ceph_snap_context *r_snapc; /* for writes */
struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
- bool r_linger; /* don't resend on failure */
/* internal */
+ u64 r_version; /* data version sent in reply */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
ktime_t r_start_latency; /* ktime_t */
@@ -221,7 +279,7 @@ struct ceph_osd_request {
int r_attempts;
u32 r_map_dne_bound;
- struct ceph_osd_req_op r_ops[];
+ struct ceph_osd_req_op r_ops[] __counted_by(r_num_ops);
};
struct ceph_request_redirect {
@@ -287,6 +345,9 @@ struct ceph_osd_linger_request {
rados_watcherrcb_t errcb;
void *data;
+ struct ceph_pagelist *request_pl;
+ struct page **notify_id_pages;
+
struct page ***preply_pages;
size_t *preply_len;
};
@@ -447,6 +508,8 @@ void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
unsigned int which,
struct ceph_bvec_iter *bvec_pos);
+void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
+ unsigned int which, struct iov_iter *iter);
extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
unsigned int which,
@@ -475,6 +538,14 @@ extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
u64 expected_object_size,
u64 expected_write_size,
u32 flags);
+extern int osd_req_op_copy_from_init(struct ceph_osd_request *req,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ u32 dst_fadvise_flags,
+ u32 truncate_seq, u64 truncate_size,
+ u8 copy_from_flags);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
@@ -493,12 +564,28 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
+int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
+
+/*
+ * How big an extent array should we preallocate for a sparse read? This is
+ * just a starting value. If we get more than this back from the OSD, the
+ * receiver will reallocate.
+ */
+#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16
+
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
+{
+ if (!cnt)
+ cnt = CEPH_SPARSE_EXT_ARRAY_INITIAL;
+
+ return __ceph_alloc_sparse_ext_map(op, cnt);
+}
+
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
-extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req,
- bool nofail);
+void ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req);
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
@@ -515,17 +602,6 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
struct page *req_page, size_t req_len,
struct page **resp_pages, size_t *resp_len);
-int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
- u64 src_snapid, u64 src_version,
- struct ceph_object_id *src_oid,
- struct ceph_object_locator *src_oloc,
- u32 src_fadvise_flags,
- struct ceph_object_id *dst_oid,
- struct ceph_object_locator *dst_oloc,
- u32 dst_fadvise_flags,
- u32 truncate_seq, u64 truncate_size,
- u8 copy_from_flags);
-
/* watch/notify */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,
@@ -559,5 +635,19 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_locator *oloc,
struct ceph_watch_item **watchers,
u32 *num_watchers);
-#endif
+/* Find offset into the buffer of the end of the extent map */
+static inline u64 ceph_sparse_ext_map_end(struct ceph_osd_req_op *op)
+{
+ struct ceph_sparse_extent *ext;
+
+ /* No extents? No data */
+ if (op->extent.sparse_ext_cnt == 0)
+ return 0;
+
+ ext = &op->extent.sparse_ext[op->extent.sparse_ext_cnt - 1];
+
+ return ext->off + ext->len - op->extent.offset;
+}
+
+#endif
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 3f4498fef6ad..5553019c3f07 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -137,6 +137,17 @@ int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
const char *fmt, ...);
void ceph_oid_destroy(struct ceph_object_id *oid);
+struct workspace_manager {
+ struct list_head idle_ws;
+ spinlock_t ws_lock;
+ /* Number of free workspaces */
+ int free_ws;
+ /* Total number of allocated workspaces */
+ atomic_t total_ws;
+ /* Waiters for a free workspace */
+ wait_queue_head_t ws_wait;
+};
+
struct ceph_pg_mapping {
struct rb_node node;
struct ceph_pg pgid;
@@ -184,8 +195,7 @@ struct ceph_osdmap {
* the list of osds that store+replicate them. */
struct crush_map *crush;
- struct mutex crush_workspace_mutex;
- void *crush_workspace;
+ struct workspace_manager crush_wsm;
};
static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
@@ -241,8 +251,8 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
}
struct ceph_osdmap *ceph_osdmap_alloc(void);
-extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
-struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2);
+struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
struct ceph_osdmap *map);
extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 3a518fd0eaad..73c3efbec36c 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -424,7 +424,7 @@ enum {
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
-#define EBLACKLISTED ESHUTDOWN /* blacklisted */
+#define EBLOCKLISTED ESHUTDOWN /* blocklisted */
/* xattr comparison */
enum {
@@ -524,6 +524,10 @@ struct ceph_osd_op {
__le64 cookie;
} __attribute__ ((packed)) notify;
struct {
+ __le64 unused;
+ __le64 ver;
+ } __attribute__ ((packed)) assert_ver;
+ struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index 4060004968c8..6617d9c68d86 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -4,7 +4,7 @@
* Version: 0.1.0
* Description: cfag12864b LCD driver header
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-12
*/
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
new file mode 100644
index 000000000000..f0df518e11dd
--- /dev/null
+++ b/include/linux/cfi.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) support.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef _LINUX_CFI_H
+#define _LINUX_CFI_H
+
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <asm/cfi.h>
+
+#ifndef cfi_get_offset
+static inline int cfi_get_offset(void)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_CFI_CLANG
+enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
+ unsigned long *target, u32 type);
+
+static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return report_cfi_failure(regs, addr, NULL, 0);
+}
+#endif /* CONFIG_CFI_CLANG */
+
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+bool is_cfi_trap(unsigned long addr);
+#else
+static inline bool is_cfi_trap(unsigned long addr) { return false; }
+#endif
+
+#ifdef CONFIG_MODULES
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+void module_cfi_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod);
+#else
+static inline void module_cfi_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod) {}
+#endif /* CONFIG_ARCH_USES_CFI_TRAPS */
+#endif /* CONFIG_MODULES */
+
+#ifndef CFI_NOSEAL
+#define CFI_NOSEAL(x)
+#endif
+
+#endif /* _LINUX_CFI_H */
diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h
new file mode 100644
index 000000000000..6b8713675765
--- /dev/null
+++ b/include/linux/cfi_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) type definitions.
+ */
+#ifndef _LINUX_CFI_TYPES_H
+#define _LINUX_CFI_TYPES_H
+
+#ifdef __ASSEMBLY__
+#include <linux/linkage.h>
+
+#ifdef CONFIG_CFI_CLANG
+/*
+ * Use the __kcfi_typeid_<function> type identifier symbol to
+ * annotate indirectly called assembly functions. The compiler emits
+ * these symbols for all address-taken function declarations in C
+ * code.
+ */
+#ifndef __CFI_TYPE
+#define __CFI_TYPE(name) \
+ .4byte __kcfi_typeid_##name
+#endif
+
+#define SYM_TYPED_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ __CFI_TYPE(name) ASM_NL \
+ name:
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_TYPED_ENTRY(name, linkage, align)
+
+#else /* CONFIG_CFI_CLANG */
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_START(name, linkage, align)
+
+#endif /* CONFIG_CFI_CLANG */
+
+#ifndef SYM_TYPED_FUNC_START
+#define SYM_TYPED_FUNC_START(name) \
+ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_CFI_TYPES_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index fee0b5547cd0..ea48c861cd36 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -19,7 +19,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
-#include <linux/bpf-cgroup.h>
+#include <linux/bpf-cgroup-defs.h>
#include <linux/psi_types.h>
#ifdef CONFIG_CGROUPS
@@ -71,6 +71,9 @@ enum {
/* Cgroup is frozen. */
CGRP_FROZEN,
+
+ /* Control group has to be killed. */
+ CGRP_KILL,
};
/* cgroup_root->flags */
@@ -86,19 +89,37 @@ enum {
CGRP_ROOT_NS_DELEGATE = (1 << 3),
/*
+ * Reduce latencies on dynamic cgroup modifications such as task
+ * migrations and controller on/offs by disabling percpu operation on
+ * cgroup_threadgroup_rwsem. This makes hot path operations such as
+ * forks and exits into the slow path and more expensive.
+ *
+ * The static usage pattern of creating a cgroup, enabling controllers,
+ * and then seeding it with CLONE_INTO_CGROUP doesn't require write
+ * locking cgroup_threadgroup_rwsem and thus doesn't benefit from
+ * favordynmod.
+ */
+ CGRP_ROOT_FAVOR_DYNMODS = (1 << 4),
+
+ /*
* Enable cpuset controller in v1 cgroup to use v2 behavior.
*/
- CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
+ CGRP_ROOT_CPUSET_V2_MODE = (1 << 16),
/*
* Enable legacy local memory.events.
*/
- CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5),
+ CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17),
/*
* Enable recursive subtree protection
*/
- CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6),
+ CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
+
+ /*
+ * Enable hugetlb accounting for the memory controller.
+ */
+ CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
};
/* cftype->flags */
@@ -114,6 +135,7 @@ enum {
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
+ __CFTYPE_ADDED = (1 << 18),
};
/*
@@ -221,7 +243,7 @@ struct css_set {
* Lists running through all tasks using this cgroup group.
* mg_tasks lists tasks which belong to this cset but are in the
* process of being migrated out or in. Protected by
- * css_set_rwsem, but, during migration, once tasks are moved to
+ * css_set_lock, but, during migration, once tasks are moved to
* mg_tasks, it can be read safely while holding cgroup_mutex.
*/
struct list_head tasks;
@@ -232,7 +254,7 @@ struct css_set {
struct list_head task_iters;
/*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * On the default hierarchy, ->subsys[ssid] may point to a css
* attached to an ancestor instead of the cgroup this css_set is
* associated with. The following node is anchored at
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
@@ -260,7 +282,8 @@ struct css_set {
* List of csets participating in the on-going migration either as
* source or destination. Protected by cgroup_mutex.
*/
- struct list_head mg_preload_node;
+ struct list_head mg_src_preload_node;
+ struct list_head mg_dst_preload_node;
struct list_head mg_node;
/*
@@ -283,6 +306,10 @@ struct css_set {
struct cgroup_base_stat {
struct task_cputime cputime;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 forceidle_sum;
+#endif
};
/*
@@ -320,6 +347,20 @@ struct cgroup_rstat_cpu {
struct cgroup_base_stat last_bstat;
/*
+ * This field is used to record the cumulative per-cpu time of
+ * the cgroup and its descendants. Currently it can be read via
+ * eBPF/drgn etc, and we are still trying to determine how to
+ * expose it in the cgroupfs interface.
+ */
+ struct cgroup_base_stat subtree_bstat;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the per-cpu subtree_bstat.
+ */
+ struct cgroup_base_stat last_subtree_bstat;
+
+ /*
* Child cgroups with stat updates on this cpu since the last read
* are linked on the parent's ->updated_children through
* ->updated_next.
@@ -362,7 +403,7 @@ struct cgroup {
/*
* The depth this cgroup is at. The root is at depth zero and each
* step down the hierarchy increments the level. This along with
- * ancestor_ids[] can determine whether a given cgroup is a
+ * ancestors[] can determine whether a given cgroup is a
* descendant of another without traversing the hierarchy.
*/
int level;
@@ -406,10 +447,13 @@ struct cgroup {
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
struct cgroup_file events_file; /* handle for "cgroup.events" */
+ /* handles for "{cpu,memory,io,irq}.pressure" */
+ struct cgroup_file psi_files[NR_PSI_RESOURCES];
+
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
- * "cgroup.subtree_control" while ->child_ss_mask is the effective
+ * "cgroup.subtree_control" while ->subtree_ss_mask is the effective
* one which may have more subsystems enabled. Controller knobs
* are made available iff it's enabled in ->subtree_control.
*/
@@ -452,6 +496,20 @@ struct cgroup {
struct cgroup_rstat_cpu __percpu *rstat_cpu;
struct list_head rstat_css_list;
+ /*
+ * Add padding to separate the read mostly rstat_cpu and
+ * rstat_css_list into a different cacheline from the following
+ * rstat_flush_next and *bstat fields which can have frequent updates.
+ */
+ CACHELINE_PADDING(_pad_);
+
+ /*
+ * A singly-linked list of cgroup structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock.
+ */
+ struct cgroup *rstat_flush_next;
+
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
struct cgroup_base_stat bstat;
@@ -471,7 +529,7 @@ struct cgroup {
struct work_struct release_agent_work;
/* used to track pressure stalls */
- struct psi_group psi;
+ struct psi_group *psi;
/* used to store eBPF programs */
struct cgroup_bpf bpf;
@@ -482,8 +540,12 @@ struct cgroup {
/* Used to store internal freezer state */
struct cgroup_freezer_state freezer;
- /* ids of the ancestors at each level including self */
- u64 ancestor_ids[];
+#ifdef CONFIG_BPF_SYSCALL
+ struct bpf_local_storage __rcu *bpf_cgrp_storage;
+#endif
+
+ /* All ancestors including self */
+ struct cgroup *ancestors[];
};
/*
@@ -500,18 +562,23 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
- /* The root cgroup. Root is destroyed on its release. */
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+ struct rcu_head rcu; /* Must be near the top */
+
+ /*
+ * The root cgroup. The containing cgroup_root will be destroyed on its
+ * release. cgrp->ancestors[0] will be used overflowing into the
+ * following field. cgrp_ancestor_storage must immediately follow.
+ */
struct cgroup cgrp;
- /* for cgrp->ancestor_ids[0] */
- u64 cgrp_ancestor_id_storage;
+ /* must follow cgrp for cgrp->ancestors[0], see above */
+ struct cgroup *cgrp_ancestor_storage;
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
/* Hierarchy-specific flags */
unsigned int flags;
@@ -628,6 +695,8 @@ struct cgroup_subsys {
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
int (*css_extra_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);
+ int (*css_local_stat_show)(struct seq_file *seq,
+ struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
@@ -668,22 +737,7 @@ struct cgroup_subsys {
*/
bool threaded:1;
- /*
- * If %false, this subsystem is properly hierarchical -
- * configuration, resource accounting and restriction on a parent
- * cgroup cover those of its children. If %true, hierarchy support
- * is broken in some ways - some subsystems ignore hierarchy
- * completely while others are only implemented half-way.
- *
- * It's now disallowed to create nested cgroups if the subsystem is
- * broken and cgroup core will emit a warning message on such
- * cases. Eventually, all subsystems will be made properly
- * hierarchical and this will go away.
- */
- bool broken_hierarchy:1;
- bool warned_broken_hierarchy:1;
-
- /* the following two fields are initialized automtically during boot */
+ /* the following two fields are initialized automatically during boot */
int id;
const char *name;
@@ -763,107 +817,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
* per-socket cgroup information except for memcg association.
*
- * On legacy hierarchies, net_prio and net_cls controllers directly set
- * attributes on each sock which can then be tested by the network layer.
- * On the default hierarchy, each sock is associated with the cgroup it was
- * created in and the networking layer can match the cgroup directly.
- *
- * To avoid carrying all three cgroup related fields separately in sock,
- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
- * On boot, sock_cgroup_data records the cgroup that the sock was created
- * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overriden to carry prioidx and/or
- * classid. The two modes are distinguished by whether the lowest bit is
- * set. Clear bit indicates cgroup pointer while set bit prioidx and
- * classid.
- *
- * While userland may start using net_prio or net_cls at any time, once
- * either is used, cgroup2 matching no longer works. There is no reason to
- * mix the two and this is in line with how legacy and v2 compatibility is
- * handled. On mode switch, cgroup references which are already being
- * pointed to by socks may be leaked. While this can be remedied by adding
- * synchronization around sock_cgroup_data, given that the number of leaked
- * cgroups is bound and highly unlikely to be high, this seems to be the
- * better trade-off.
+ * On legacy hierarchies, net_prio and net_cls controllers directly
+ * set attributes on each sock which can then be tested by the network
+ * layer. On the default hierarchy, each sock is associated with the
+ * cgroup it was created in and the networking layer can match the
+ * cgroup directly.
*/
struct sock_cgroup_data {
- union {
-#ifdef __LITTLE_ENDIAN
- struct {
- u8 is_data : 1;
- u8 no_refcnt : 1;
- u8 unused : 6;
- u8 padding;
- u16 prioidx;
- u32 classid;
- } __packed;
-#else
- struct {
- u32 classid;
- u16 prioidx;
- u8 padding;
- u8 unused : 6;
- u8 no_refcnt : 1;
- u8 is_data : 1;
- } __packed;
+ struct cgroup *cgroup; /* v2 */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ u32 classid; /* v1 */
+#endif
+#ifdef CONFIG_CGROUP_NET_PRIO
+ u16 prioidx; /* v1 */
#endif
- u64 val;
- };
};
-/*
- * There's a theoretical window where the following accessors race with
- * updaters and return part of the previous pointer as the prioidx or
- * classid. Such races are short-lived and the result isn't critical.
- */
static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
- /* fallback to 1 which is always the ID of the root cgroup */
- return (skcd->is_data & 1) ? skcd->prioidx : 1;
+#ifdef CONFIG_CGROUP_NET_PRIO
+ return READ_ONCE(skcd->prioidx);
+#else
+ return 1;
+#endif
}
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
- /* fallback to 0 which is the unconfigured default classid */
- return (skcd->is_data & 1) ? skcd->classid : 0;
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ return READ_ONCE(skcd->classid);
+#else
+ return 0;
+#endif
}
-/*
- * If invoked concurrently, the updaters may clobber each other. The
- * caller is responsible for synchronization.
- */
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.prioidx = prioidx;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_PRIO
+ WRITE_ONCE(skcd->prioidx, prioidx);
+#endif
}
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_classid(&skcd_buf) == classid)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.classid = classid;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ WRITE_ONCE(skcd->classid, classid);
+#endif
}
#else /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 618838c48313..34aaf0e87def 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -32,7 +32,7 @@ struct kernel_clone_args;
#ifdef CONFIG_CGROUPS
/*
- * All weight knobs on the default hierarhcy should use the following min,
+ * All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
* MIN and MAX and allows 100x to be expressed in both directions.
*/
@@ -40,13 +40,11 @@ struct kernel_clone_args;
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
-/* walk only threadgroup leaders */
-#define CSS_TASK_ITER_PROCS (1U << 0)
-/* walk all threaded css_sets in the domain */
-#define CSS_TASK_ITER_THREADED (1U << 1)
-
-/* internal flags */
-#define CSS_TASK_ITER_SKIPPED (1U << 16)
+enum {
+ CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
+ CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
+ CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
+};
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
@@ -68,8 +66,10 @@ struct css_task_iter {
struct list_head iters_node; /* css_set->task_iters */
};
+extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
+extern spinlock_t css_set_lock;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -106,6 +106,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup *cgroup_get_from_path(const char *path);
struct cgroup *cgroup_get_from_fd(int fd);
+struct cgroup *cgroup_v1v2_get_from_fd(int fd);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
@@ -114,8 +115,8 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
+void cgroup_file_show(struct cgroup_file *cfile, bool show);
-int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
@@ -307,69 +308,22 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/
-static inline u64 cgroup_id(struct cgroup *cgrp)
-{
- return cgrp->kn->id;
-}
-
-/**
- * css_get - obtain a reference on the specified css
- * @css: target css
- *
- * The caller must already have a reference.
- */
-static inline void css_get(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get(&css->refcnt);
-}
-
-/**
- * css_get_many - obtain references on the specified css
- * @css: target css
- * @n: number of references to get
- *
- * The caller must already have a reference.
- */
-static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get_many(&css->refcnt, n);
-}
-
-/**
- * css_tryget - try to obtain a reference on the specified css
- * @css: target css
- *
- * Obtain a reference on @css unless it already has reached zero and is
- * being released. This function doesn't care whether @css is on or
- * offline. The caller naturally needs to ensure that @css is accessible
- * but doesn't have to be holding a reference on it - IOW, RCU protected
- * access is good enough for this function. Returns %true if a reference
- * count was successfully obtained; %false otherwise.
- */
-static inline bool css_tryget(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget(&css->refcnt);
- return true;
-}
+#ifdef CONFIG_DEBUG_CGROUP_REF
+void css_get(struct cgroup_subsys_state *css);
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n);
+bool css_tryget(struct cgroup_subsys_state *css);
+bool css_tryget_online(struct cgroup_subsys_state *css);
+void css_put(struct cgroup_subsys_state *css);
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n);
+#else
+#define CGROUP_REF_FN_ATTRS static inline
+#define CGROUP_REF_EXPORT(fn)
+#include <linux/cgroup_refcnt.h>
+#endif
-/**
- * css_tryget_online - try to obtain a reference on the specified css if online
- * @css: target css
- *
- * Obtain a reference on @css if it's online. The caller naturally needs
- * to ensure that @css is accessible but doesn't have to be holding a
- * reference on it - IOW, RCU protected access is good enough for this
- * function. Returns %true if a reference count was successfully obtained;
- * %false otherwise.
- */
-static inline bool css_tryget_online(struct cgroup_subsys_state *css)
+static inline u64 cgroup_id(const struct cgroup *cgrp)
{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget_live(&css->refcnt);
- return true;
+ return cgrp->kn->id;
}
/**
@@ -392,31 +346,6 @@ static inline bool css_is_dying(struct cgroup_subsys_state *css)
return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
}
-/**
- * css_put - put a css reference
- * @css: target css
- *
- * Put a reference obtained via css_get() and css_tryget_online().
- */
-static inline void css_put(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put(&css->refcnt);
-}
-
-/**
- * css_put_many - put css references
- * @css: target css
- * @n: number of references to put
- *
- * Put references obtained via css_get() and css_tryget_online().
- */
-static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put_many(&css->refcnt, n);
-}
-
static inline void cgroup_get(struct cgroup *cgrp)
{
css_get(&cgrp->self);
@@ -432,6 +361,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
css_put(&cgrp->self);
}
+extern struct mutex cgroup_mutex;
+
+static inline void cgroup_lock(void)
+{
+ mutex_lock(&cgroup_mutex);
+}
+
+static inline void cgroup_unlock(void)
+{
+ mutex_unlock(&cgroup_mutex);
+}
+
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -446,10 +387,9 @@ static inline void cgroup_put(struct cgroup *cgrp)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern struct mutex cgroup_mutex;
-extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
+ rcu_read_lock_sched_held() || \
lockdep_is_held(&cgroup_mutex) || \
lockdep_is_held(&css_set_lock) || \
((task)->flags & PF_EXITING) || (__c))
@@ -573,7 +513,7 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
- return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
+ return cgrp->ancestors[ancestor->level] == ancestor;
}
/**
@@ -590,11 +530,9 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
- if (cgrp->level < ancestor_level)
+ if (ancestor_level < 0 || ancestor_level > cgrp->level)
return NULL;
- while (cgrp && cgrp->level > ancestor_level)
- cgrp = cgroup_parent(cgrp);
- return cgrp;
+ return cgrp->ancestors[ancestor_level];
}
/**
@@ -671,10 +609,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
-{
- return &cgrp->psi;
-}
+bool cgroup_psi_enabled(void);
static inline void cgroup_init_kthreadd(void)
{
@@ -696,14 +631,17 @@ static inline void cgroup_kthread_ready(void)
}
void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
+struct cgroup *cgroup_get_from_id(u64 id);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
struct cgroup;
-static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
+static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline void cgroup_lock(void) {}
+static inline void cgroup_unlock(void) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t) { return 0; }
static inline int cgroupstats_build(struct cgroupstats *stats,
@@ -730,9 +668,9 @@ static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
return NULL;
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+static inline bool cgroup_psi_enabled(void)
{
- return NULL;
+ return false;
}
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
@@ -751,7 +689,6 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
*/
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
void cgroup_rstat_flush(struct cgroup *cgrp);
-void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
void cgroup_rstat_flush_release(void);
@@ -778,11 +715,9 @@ static inline void cgroup_account_cputime(struct task_struct *task,
cpuacct_charge(task, delta_exec);
- rcu_read_lock();
cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime(cgrp, delta_exec);
- rcu_read_unlock();
}
static inline void cgroup_account_cputime_field(struct task_struct *task,
@@ -793,11 +728,9 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
cpuacct_account_field(task, index, delta_exec);
- rcu_read_lock();
cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime_field(cgrp, index, delta_exec);
- rcu_read_unlock();
}
#else /* CONFIG_CGROUPS */
@@ -816,33 +749,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-extern spinlock_t cgroup_sk_update_lock;
-#endif
-
-void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
- unsigned long v;
-
- /*
- * @skcd->val is 64bit but the following is safe on 32bit too as we
- * just need the lower ulong to be written and read atomically.
- */
- v = READ_ONCE(skcd->val);
-
- if (v & 3)
- return &cgrp_dfl_root.cgrp;
-
- return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
-#else
- return (struct cgroup *)(unsigned long)skcd->val;
-#endif
+ return skcd->cgroup;
}
#else /* CONFIG_CGROUP_DATA */
@@ -854,7 +767,6 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
struct cgroup_namespace {
- refcount_t count;
struct ns_common ns;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -889,12 +801,12 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
static inline void get_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns)
- refcount_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
}
static inline void put_cgroup_ns(struct cgroup_namespace *ns)
{
- if (ns && refcount_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->ns.count))
free_cgroup_ns(ns);
}
@@ -907,20 +819,6 @@ void cgroup_freeze(struct cgroup *cgrp, bool freeze);
void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
struct cgroup *dst);
-static inline bool cgroup_task_freeze(struct task_struct *task)
-{
- bool ret;
-
- if (task->flags & PF_KTHREAD)
- return false;
-
- rcu_read_lock();
- ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
- rcu_read_unlock();
-
- return ret;
-}
-
static inline bool cgroup_task_frozen(struct task_struct *task)
{
return task->frozen;
@@ -930,10 +828,6 @@ static inline bool cgroup_task_frozen(struct task_struct *task)
static inline void cgroup_enter_frozen(void) { }
static inline void cgroup_leave_frozen(bool always_leave) { }
-static inline bool cgroup_task_freeze(struct task_struct *task)
-{
- return false;
-}
static inline bool cgroup_task_frozen(struct task_struct *task)
{
return false;
@@ -959,4 +853,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */
+struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_api.h b/include/linux/cgroup_api.h
new file mode 100644
index 000000000000..d0cfe8025111
--- /dev/null
+++ b/include/linux/cgroup_api.h
@@ -0,0 +1 @@
+#include <linux/cgroup.h>
diff --git a/include/linux/cgroup_refcnt.h b/include/linux/cgroup_refcnt.h
new file mode 100644
index 000000000000..2eea0a69ecfc
--- /dev/null
+++ b/include/linux/cgroup_refcnt.h
@@ -0,0 +1,96 @@
+/**
+ * css_get - obtain a reference on the specified css
+ * @css: target css
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_get)
+
+/**
+ * css_get_many - obtain references on the specified css
+ * @css: target css
+ * @n: number of references to get
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_get_many)
+
+/**
+ * css_tryget - try to obtain a reference on the specified css
+ * @css: target css
+ *
+ * Obtain a reference on @css unless it already has reached zero and is
+ * being released. This function doesn't care whether @css is on or
+ * offline. The caller naturally needs to ensure that @css is accessible
+ * but doesn't have to be holding a reference on it - IOW, RCU protected
+ * access is good enough for this function. Returns %true if a reference
+ * count was successfully obtained; %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget)
+
+/**
+ * css_tryget_online - try to obtain a reference on the specified css if online
+ * @css: target css
+ *
+ * Obtain a reference on @css if it's online. The caller naturally needs
+ * to ensure that @css is accessible but doesn't have to be holding a
+ * reference on it - IOW, RCU protected access is good enough for this
+ * function. Returns %true if a reference count was successfully obtained;
+ * %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget_online(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget_live(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget_online)
+
+/**
+ * css_put - put a css reference
+ * @css: target css
+ *
+ * Put a reference obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_put)
+
+/**
+ * css_put_many - put css references
+ * @css: target css
+ * @n: number of references to put
+ *
+ * Put references obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_put_many)
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index acb77dcff3b4..445235487230 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -61,6 +61,10 @@ SUBSYS(pids)
SUBSYS(rdma)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_MISC)
+SUBSYS(misc)
+#endif
+
/*
* The following subsystems are not supported on the default hierarchy.
*/
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
deleted file mode 100644
index 5f5730c1d324..000000000000
--- a/include/linux/cleancache.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_CLEANCACHE_H
-#define _LINUX_CLEANCACHE_H
-
-#include <linux/fs.h>
-#include <linux/exportfs.h>
-#include <linux/mm.h>
-
-#define CLEANCACHE_NO_POOL -1
-#define CLEANCACHE_NO_BACKEND -2
-#define CLEANCACHE_NO_BACKEND_SHARED -3
-
-#define CLEANCACHE_KEY_MAX 6
-
-/*
- * cleancache requires every file with a page in cleancache to have a
- * unique key unless/until the file is removed/truncated. For some
- * filesystems, the inode number is unique, but for "modern" filesystems
- * an exportable filehandle is required (see exportfs.h)
- */
-struct cleancache_filekey {
- union {
- ino_t ino;
- __u32 fh[CLEANCACHE_KEY_MAX];
- u32 key[CLEANCACHE_KEY_MAX];
- } u;
-};
-
-struct cleancache_ops {
- int (*init_fs)(size_t);
- int (*init_shared_fs)(uuid_t *uuid, size_t);
- int (*get_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*put_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
- void (*invalidate_inode)(int, struct cleancache_filekey);
- void (*invalidate_fs)(int);
-};
-
-extern int cleancache_register_ops(const struct cleancache_ops *ops);
-extern void __cleancache_init_fs(struct super_block *);
-extern void __cleancache_init_shared_fs(struct super_block *);
-extern int __cleancache_get_page(struct page *);
-extern void __cleancache_put_page(struct page *);
-extern void __cleancache_invalidate_page(struct address_space *, struct page *);
-extern void __cleancache_invalidate_inode(struct address_space *);
-extern void __cleancache_invalidate_fs(struct super_block *);
-
-#ifdef CONFIG_CLEANCACHE
-#define cleancache_enabled (1)
-static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
-{
- return mapping->host->i_sb->cleancache_poolid >= 0;
-}
-static inline bool cleancache_fs_enabled(struct page *page)
-{
- return cleancache_fs_enabled_mapping(page->mapping);
-}
-#else
-#define cleancache_enabled (0)
-#define cleancache_fs_enabled(_page) (0)
-#define cleancache_fs_enabled_mapping(_page) (0)
-#endif
-
-/*
- * The shim layer provided by these inline functions allows the compiler
- * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
- * is disabled, to a single global variable check if CONFIG_CLEANCACHE
- * is enabled but no cleancache "backend" has dynamically enabled it,
- * and, for the most frequent cleancache ops, to a single global variable
- * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
- * and a cleancache backend has dynamically enabled cleancache, but the
- * filesystem referenced by that cleancache op has not enabled cleancache.
- * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
- * no measurable performance impact.
- */
-
-static inline void cleancache_init_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_fs(sb);
-}
-
-static inline void cleancache_init_shared_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_shared_fs(sb);
-}
-
-static inline int cleancache_get_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- return __cleancache_get_page(page);
- return -1;
-}
-
-static inline void cleancache_put_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- __cleancache_put_page(page);
-}
-
-static inline void cleancache_invalidate_page(struct address_space *mapping,
- struct page *page)
-{
- /* careful... page->mapping is NULL sometimes when this is called */
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_page(mapping, page);
-}
-
-static inline void cleancache_invalidate_inode(struct address_space *mapping)
-{
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_inode(mapping);
-}
-
-static inline void cleancache_invalidate_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_invalidate_fs(sb);
-}
-
-#endif /* _LINUX_CLEANCACHE_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
new file mode 100644
index 000000000000..c2d09bc4f976
--- /dev/null
+++ b/include/linux/cleanup.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GUARDS_H
+#define __LINUX_GUARDS_H
+
+#include <linux/compiler.h>
+
+/*
+ * DEFINE_FREE(name, type, free):
+ * simple helper macro that defines the required wrapper for a __free()
+ * based cleanup function. @free is an expression using '_T' to access the
+ * variable. @free should typically include a NULL test before calling a
+ * function, see the example below.
+ *
+ * __free(name):
+ * variable attribute to add a scoped based cleanup to the variable.
+ *
+ * no_free_ptr(var):
+ * like a non-atomic xchg(var, NULL), such that the cleanup function will
+ * be inhibited -- provided it sanely deals with a NULL value.
+ *
+ * NOTE: this has __must_check semantics so that it is harder to accidentally
+ * leak the resource.
+ *
+ * return_ptr(p):
+ * returns p while inhibiting the __free().
+ *
+ * Ex.
+ *
+ * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+ *
+ * void *alloc_obj(...)
+ * {
+ * struct obj *p __free(kfree) = kmalloc(...);
+ * if (!p)
+ * return NULL;
+ *
+ * if (!init_obj(p))
+ * return NULL;
+ *
+ * return_ptr(p);
+ * }
+ *
+ * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
+ * kfree() is fine to be called with a NULL value. This is on purpose. This way
+ * the compiler sees the end of our alloc_obj() function as:
+ *
+ * tmp = p;
+ * p = NULL;
+ * if (p)
+ * kfree(p);
+ * return tmp;
+ *
+ * And through the magic of value-propagation and dead-code-elimination, it
+ * eliminates the actual cleanup call and compiles into:
+ *
+ * return p;
+ *
+ * Without the NULL test it turns into a mess and the compiler can't help us.
+ */
+
+#define DEFINE_FREE(_name, _type, _free) \
+ static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
+
+#define __free(_name) __cleanup(__free_##_name)
+
+#define __get_and_null_ptr(p) \
+ ({ __auto_type __ptr = &(p); \
+ __auto_type __val = *__ptr; \
+ *__ptr = NULL; __val; })
+
+static inline __must_check
+const volatile void * __must_check_fn(const volatile void *val)
+{ return val; }
+
+#define no_free_ptr(p) \
+ ((typeof(p)) __must_check_fn(__get_and_null_ptr(p)))
+
+#define return_ptr(p) return no_free_ptr(p)
+
+
+/*
+ * DEFINE_CLASS(name, type, exit, init, init_args...):
+ * helper to define the destructor and constructor for a type.
+ * @exit is an expression using '_T' -- similar to FREE above.
+ * @init is an expression in @init_args resulting in @type
+ *
+ * EXTEND_CLASS(name, ext, init, init_args...):
+ * extends class @name to @name@ext with the new constructor
+ *
+ * CLASS(name, var)(args...):
+ * declare the variable @var as an instance of the named class
+ *
+ * Ex.
+ *
+ * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
+ *
+ * CLASS(fdget, f)(fd);
+ * if (!f.file)
+ * return -EBADF;
+ *
+ * // use 'f' without concern
+ */
+
+#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
+typedef _type class_##_name##_t; \
+static inline void class_##_name##_destructor(_type *p) \
+{ _type _T = *p; _exit; } \
+static inline _type class_##_name##_constructor(_init_args) \
+{ _type t = _init; return t; }
+
+#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+typedef class_##_name##_t class_##_name##ext##_t; \
+static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
+{ class_##_name##_destructor(p); } \
+static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+{ class_##_name##_t t = _init; return t; }
+
+#define CLASS(_name, var) \
+ class_##_name##_t var __cleanup(class_##_name##_destructor) = \
+ class_##_name##_constructor
+
+
+/*
+ * DEFINE_GUARD(name, type, lock, unlock):
+ * trivial wrapper around DEFINE_CLASS() above specifically
+ * for locks.
+ *
+ * DEFINE_GUARD_COND(name, ext, condlock)
+ * wrapper around EXTEND_CLASS above to add conditional lock
+ * variants to a base class, eg. mutex_trylock() or
+ * mutex_lock_interruptible().
+ *
+ * guard(name):
+ * an anonymous instance of the (guard) class, not recommended for
+ * conditional locks.
+ *
+ * scoped_guard (name, args...) { }:
+ * similar to CLASS(name, scope)(args), except the variable (with the
+ * explicit name 'scope') is declard in a for-loop such that its scope is
+ * bound to the next (compound) statement.
+ *
+ * for conditional locks the loop body is skipped when the lock is not
+ * acquired.
+ *
+ * scoped_cond_guard (name, fail, args...) { }:
+ * similar to scoped_guard(), except it does fail when the lock
+ * acquire fails.
+ *
+ */
+
+#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
+ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { return *_T; }
+
+#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
+ class_##_name##_t _T) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
+
+#define guard(_name) \
+ CLASS(_name, __UNIQUE_ID(guard))
+
+#define __guard_ptr(_name) class_##_name##_lock_ptr
+
+#define scoped_guard(_name, args...) \
+ for (CLASS(_name, scope)(args), \
+ *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
+
+#define scoped_cond_guard(_name, _fail, args...) \
+ for (CLASS(_name, scope)(args), \
+ *done = NULL; !done; done = (void *)1) \
+ if (!__guard_ptr(_name)(&scope)) _fail; \
+ else
+
+/*
+ * Additional helper macros for generating lock guards with types, either for
+ * locks that don't have a native type (eg. RCU, preempt) or those that need a
+ * 'fat' pointer (eg. spin_lock_irqsave).
+ *
+ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
+ *
+ * will result in the following type:
+ *
+ * typedef struct {
+ * type *lock; // 'type := void' for the _0 variant
+ * __VA_ARGS__;
+ * } class_##name##_t;
+ *
+ * As above, both _lock and _unlock are statements, except this time '_T' will
+ * be a pointer to the above struct.
+ */
+
+#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
+typedef struct { \
+ _type *lock; \
+ __VA_ARGS__; \
+} class_##_name##_t; \
+ \
+static inline void class_##_name##_destructor(class_##_name##_t *_T) \
+{ \
+ if (_T->lock) { _unlock; } \
+} \
+ \
+static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
+{ \
+ return _T->lock; \
+}
+
+
+#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
+static inline class_##_name##_t class_##_name##_constructor(_type *l) \
+{ \
+ class_##_name##_t _t = { .lock = l }, *_T = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
+static inline class_##_name##_t class_##_name##_constructor(void) \
+{ \
+ class_##_name##_t _t = { .lock = (void*)1 }, \
+ *_T __maybe_unused = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
+__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
+
+#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
+__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_0(_name, _lock)
+
+#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
+ if (_T->lock && !(_condlock)) _T->lock = NULL; \
+ _t; }), \
+ typeof_member(class_##_name##_t, lock) l) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
+
+
+#endif /* __LINUX_GUARDS_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 03a5de5f99f4..4a537260f655 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -42,6 +42,9 @@ struct dentry;
* struct clk_rate_request - Structure encoding the clk constraints that
* a clock user might require.
*
+ * Should be initialized by calling clk_hw_init_rate_request().
+ *
+ * @core: Pointer to the struct clk_core affected by this request
* @rate: Requested clock rate. This field will be adjusted by
* clock drivers according to hardware capabilities.
* @min_rate: Minimum rate imposed by clk users.
@@ -53,6 +56,7 @@ struct dentry;
*
*/
struct clk_rate_request {
+ struct clk_core *core;
unsigned long rate;
unsigned long min_rate;
unsigned long max_rate;
@@ -60,8 +64,17 @@ struct clk_rate_request {
struct clk_hw *best_parent_hw;
};
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate);
+void clk_hw_forward_rate_request(const struct clk_hw *core,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate);
+
/**
- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
+ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
*
* @num: Numerator of the duty cycle ratio
* @den: Denominator of the duty cycle ratio
@@ -116,10 +129,11 @@ struct clk_duty {
* @restore_context: Restore the context of the clock after a restoration
* of power.
*
- * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
+ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
- * ensure that the prepare_mutex is held across this call.
- * Returns the calculated rate. Optional, but recommended - if
+ * ensure that the prepare_mutex is held across this call. If the
+ * driver cannot figure out a rate for this clock, it must return
+ * 0. Returns the calculated rate. Optional, but recommended - if
* this op is not set then clock rate will be initialized to 0.
*
* @round_rate: Given a target rate as input, returns the closest rate actually
@@ -342,7 +356,7 @@ struct clk_fixed_rate {
unsigned long flags;
};
-#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
+#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
extern const struct clk_ops clk_fixed_rate_ops;
struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
@@ -350,7 +364,7 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy,
- unsigned long clk_fixed_flags);
+ unsigned long clk_fixed_flags, bool devm);
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
@@ -365,7 +379,20 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
*/
#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
- NULL, (flags), (fixed_rate), 0, 0)
+ NULL, (flags), (fixed_rate), 0, 0, false)
+
+/**
+ * devm_clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (fixed_rate), 0, 0, true)
/**
* clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
* the clock framework
@@ -378,7 +405,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags, \
fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
- NULL, (flags), (fixed_rate), 0, 0)
+ NULL, (flags), (fixed_rate), 0, 0, false)
/**
* clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
* the clock framework
@@ -388,11 +415,11 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
*/
-#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_hw, flags, \
+#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
(parent_data), (flags), (fixed_rate), 0, \
- 0)
+ 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
* the clock framework
@@ -408,7 +435,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), \
NULL, NULL, (flags), (fixed_rate), \
- (fixed_accuracy), 0)
+ (fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
* clock with the clock framework
@@ -421,15 +448,15 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
*/
#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \
parent_hw, flags, fixed_rate, fixed_accuracy) \
- __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
- NULL, NULL, (flags), (fixed_rate), \
- (fixed_accuracy), 0)
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (fixed_rate), \
+ (fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
* clock with the clock framework
* @dev: device that is registering this clock
* @name: name of this clock
- * @parent_name: name of clock's parent
+ * @parent_data: name of clock's parent
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
* @fixed_accuracy: non-adjustable clock accuracy
@@ -438,7 +465,21 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
parent_data, flags, fixed_rate, fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
(parent_data), NULL, (flags), \
- (fixed_rate), (fixed_accuracy), 0)
+ (fixed_rate), (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_parent_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_accuracy(dev, name, parent_data, \
+ flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ CLK_FIXED_RATE_PARENT_ACCURACY, false)
void clk_unregister_fixed_rate(struct clk *clk);
void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
@@ -490,6 +531,13 @@ struct clk_hw *__clk_hw_register_gate(struct device *dev,
unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
@@ -544,6 +592,41 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
__clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \
(flags), (reg), (bit_idx), \
(clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx,\
+ clk_gate_flags, lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate_parent_data - register a gate clock with the
+ * clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate_parent_data(dev, name, parent_data, flags, \
+ reg, bit_idx, clk_gate_flags, \
+ lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+
void clk_unregister_gate(struct clk *clk);
void clk_hw_unregister_gate(struct clk_hw *hw);
int clk_gate_is_enabled(struct clk_hw *hw);
@@ -566,7 +649,7 @@ struct clk_div_table {
* Clock with an adjustable divider affecting its output frequency. Implements
* .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
* register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
* the raw value read from the register, with the value of zero considered
@@ -629,6 +712,12 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val);
+int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags);
+int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val);
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags);
@@ -639,6 +728,12 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
const struct clk_parent_data *parent_data, unsigned long flags,
void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -779,6 +874,63 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
(parent_data), (flags), (reg), (shift), \
(width), (clk_divider_flags), (table), \
(lock))
+/**
+ * devm_clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \
+ width, clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * devm_clk_hw_register_divider_parent_hw - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, \
+ reg, shift, width, \
+ clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), NULL, \
+ (parent_hw), NULL, (flags), (reg), \
+ (shift), (width), (clk_divider_flags), \
+ NULL, (lock))
+/**
+ * devm_clk_hw_register_divider_table - register a table based divider clock
+ * with the clock framework (devres variant)
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_table(dev, name, parent_name, flags, \
+ reg, shift, width, \
+ clk_divider_flags, table, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), \
+ NULL, NULL, (flags), (reg), (shift), \
+ (width), (clk_divider_flags), (table), \
+ (lock))
void clk_unregister_divider(struct clk *clk);
void clk_hw_unregister_divider(struct clk_hw *hw);
@@ -815,7 +967,7 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
- u32 *table;
+ const u32 *table;
u32 mask;
u8 shift;
u8 flags;
@@ -840,11 +992,18 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
const struct clk_hw **parent_hws,
const struct clk_parent_data *parent_data,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
+ const char *name, u8 num_parents,
+ const char * const *parent_names,
+ const struct clk_hw **parent_hws,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg, \
shift, width, clk_mux_flags, lock) \
@@ -858,6 +1017,13 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
(parent_names), NULL, NULL, (flags), (reg), \
(shift), (mask), (clk_mux_flags), (table), \
(lock))
+#define clk_hw_register_mux_table_parent_data(dev, name, parent_data, \
+ num_parents, flags, reg, shift, mask, \
+ clk_mux_flags, table, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ NULL, NULL, (parent_data), (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
shift, width, clk_mux_flags, lock) \
__clk_hw_register_mux((dev), NULL, (name), (num_parents), \
@@ -875,10 +1041,37 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
__clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
(parent_data), (flags), (reg), (shift), \
BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
-
-int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
+#define clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
+#define devm_clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, (clk_mux_flags), \
+ NULL, (lock))
+#define devm_clk_hw_register_mux_parent_hws(dev, name, parent_hws, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ (parent_hws), NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, \
+ (clk_mux_flags), NULL, (lock))
+#define devm_clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ NULL, (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
+
+int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
unsigned int val);
-unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index);
+unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index);
void clk_unregister_mux(struct clk *clk);
void clk_hw_unregister_mux(struct clk_hw *hw);
@@ -891,18 +1084,28 @@ void of_fixed_factor_clk_setup(struct device_node *node);
* @hw: handle between common and hardware-specific interfaces
* @mult: multiplier
* @div: divider
+ * @acc: fixed accuracy in ppb
+ * @flags: behavior modifying flags
*
* Clock with a fixed multiplier and divider. The output frequency is the
* parent clock rate divided by div and multiplied by mult.
- * Implements .recalc_rate, .set_rate and .round_rate
+ * Implements .recalc_rate, .set_rate, .round_rate and .recalc_accuracy
+ *
+ * Flags:
+ * * CLK_FIXED_FACTOR_FIXED_ACCURACY - Use the value in @acc instead of the
+ * parent clk accuracy.
*/
struct clk_fixed_factor {
struct clk_hw hw;
unsigned int mult;
unsigned int div;
+ unsigned long acc;
+ unsigned int flags;
};
+#define CLK_FIXED_FACTOR_FIXED_ACCURACY BIT(0)
+
#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
extern const struct clk_ops clk_fixed_factor_ops;
@@ -913,8 +1116,35 @@ void clk_unregister_fixed_factor(struct clk *clk);
struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
+struct clk_hw *clk_hw_register_fixed_factor_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div);
+struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div,
+ unsigned long acc);
void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
+struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div,
+ unsigned long acc);
+struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
+
+struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
*
@@ -924,11 +1154,12 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
* @mwidth: width of the numerator bit field
* @nshift: shift to the denominator bit field
* @nwidth: width of the denominator bit field
+ * @approximation: clk driver's callback for calculating the divider clock
* @lock: register lock
*
* Clock with adjustable fractional divider affecting its output frequency.
*
- * Flags:
+ * @flags:
* CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
* is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
* is set then the numerator and denominator are both the value read
@@ -936,16 +1167,20 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
* CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are
* used for the divider register. Setting this flag makes the register
* accesses big endian.
+ * CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might
+ * be saturated and the caller will get quite far from the good enough
+ * approximation. Instead the caller may require, by setting this flag,
+ * to shift left by a few bits in case, when the asked one is quite small
+ * to satisfy the desired range of denominator. It assumes that on the
+ * caller's side the power-of-two capable prescaler exists.
*/
struct clk_fractional_divider {
struct clk_hw hw;
void __iomem *reg;
u8 mshift;
u8 mwidth;
- u32 mmask;
u8 nshift;
u8 nwidth;
- u32 nmask;
u8 flags;
void (*approximation)(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate,
@@ -957,8 +1192,8 @@ struct clk_fractional_divider {
#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1)
+#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2)
-extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
@@ -981,7 +1216,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
* Clock with an adjustable multiplier affecting its output frequency.
* Implements .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
* from the register, with 0 being a valid value effectively
* zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
@@ -1004,9 +1239,9 @@ struct clk_multiplier {
#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
-#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
+#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
-#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
+#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_multiplier_ops;
@@ -1062,6 +1297,13 @@ struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
+struct clk_hw *devm_clk_hw_register_composite_pdata(struct device *dev,
+ const char *name, const struct clk_parent_data *parent_data,
+ int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags);
void clk_hw_unregister_composite(struct clk_hw *hw);
struct clk *clk_register(struct device *dev, struct clk_hw *hw);
@@ -1072,10 +1314,8 @@ int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw);
int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw);
void clk_unregister(struct clk *clk);
-void devm_clk_unregister(struct device *dev, struct clk *clk);
void clk_hw_unregister(struct clk_hw *hw);
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
@@ -1088,6 +1328,11 @@ static inline struct clk_hw *__clk_get_hw(struct clk *clk)
return (struct clk_hw *)clk;
}
#endif
+
+struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id);
+struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
+ const char *con_id);
+
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
@@ -1113,7 +1358,11 @@ int __clk_mux_determine_rate_closest(struct clk_hw *hw,
int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags);
+int clk_hw_determine_rate_no_reparent(struct clk_hw *hw,
+ struct clk_rate_request *req);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate);
@@ -1155,10 +1404,16 @@ struct clk_onecell_data {
struct clk_hw_onecell_data {
unsigned int num;
- struct clk_hw *hws[];
+ struct clk_hw *hws[] __counted_by(num);
};
-#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
+#define CLK_OF_DECLARE(name, compat, fn) \
+ static void __init __##name##_of_clk_init_declare(struct device_node *np) \
+ { \
+ fn(np); \
+ fwnode_dev_initialized(of_fwnode_handle(np), true); \
+ } \
+ OF_DECLARE_1(clk, name, compat, __##name##_of_clk_init_declare)
/*
* Use this macro when you have a driver that requires two initialization
@@ -1313,7 +1568,7 @@ int devm_of_clk_add_hw_provider(struct device *dev,
void *data),
void *data);
void of_clk_del_provider(struct device_node *np);
-void devm_of_clk_del_provider(struct device *dev);
+
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
@@ -1350,7 +1605,7 @@ static inline int devm_of_clk_add_hw_provider(struct device *dev,
return 0;
}
static inline void of_clk_del_provider(struct device_node *np) {}
-static inline void devm_of_clk_del_provider(struct device *dev) {}
+
static inline struct clk *of_clk_src_simple_get(
struct of_phandle_args *clkspec, void *data)
{
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 7fd6a1febcf4..00623f4de5e1 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -92,7 +92,7 @@ struct clk_bulk_data {
#ifdef CONFIG_COMMON_CLK
/**
- * clk_notifier_register: register a clock rate-change notifier callback
+ * clk_notifier_register - register a clock rate-change notifier callback
* @clk: clock whose rate we are interested in
* @nb: notifier block with callback function pointer
*
@@ -103,13 +103,24 @@ struct clk_bulk_data {
int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
/**
- * clk_notifier_unregister: unregister a clock rate-change notifier callback
+ * clk_notifier_unregister - unregister a clock rate-change notifier callback
* @clk: clock whose rate we are no longer interested in
* @nb: notifier block which will be unregistered
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
/**
+ * devm_clk_notifier_register - register a managed rate-change notifier callback
+ * @dev: device for clock "consumer"
+ * @clk: clock whose rate we are interested in
+ * @nb: notifier block with callback function pointer
+ *
+ * Returns 0 on success, -EERROR otherwise
+ */
+int devm_clk_notifier_register(struct device *dev, struct clk *clk,
+ struct notifier_block *nb);
+
+/**
* clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
* for a clock source.
* @clk: clock source
@@ -150,7 +161,7 @@ int clk_get_phase(struct clk *clk);
int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
/**
- * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
+ * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
* @clk: clock signal source
* @scale: scaling factor to be applied to represent the ratio as an integer
*
@@ -172,6 +183,51 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
*/
bool clk_is_match(const struct clk *p, const struct clk *q);
+/**
+ * clk_rate_exclusive_get - get exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to get exclusive control over the rate of a
+ * provider. It prevents any other consumer to execute, even indirectly,
+ * opereation which could alter the rate of the provider or cause glitches
+ *
+ * If exlusivity is claimed more than once on clock, even by the same driver,
+ * the rate effectively gets locked as exclusivity can't be preempted.
+ *
+ * Must not be called from within atomic context.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_rate_exclusive_get(struct clk *clk);
+
+/**
+ * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get
+ * @dev: device the exclusivity is bound to
+ * @clk: clock source
+ *
+ * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler
+ * on @dev to call clk_rate_exclusive_put().
+ *
+ * Must not be called from within atomic context.
+ */
+int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk);
+
+/**
+ * clk_rate_exclusive_put - release exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to release the exclusivity it previously got
+ * from clk_rate_exclusive_get()
+ *
+ * The caller must balance the number of clk_rate_exclusive_get() and
+ * clk_rate_exclusive_put() calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_rate_exclusive_put(struct clk *clk);
+
#else
static inline int clk_notifier_register(struct clk *clk,
@@ -186,6 +242,13 @@ static inline int clk_notifier_unregister(struct clk *clk,
return -ENOTSUPP;
}
+static inline int devm_clk_notifier_register(struct device *dev,
+ struct clk *clk,
+ struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
static inline long clk_get_accuracy(struct clk *clk)
{
return -ENOTSUPP;
@@ -218,8 +281,16 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
return p == q;
}
+static inline int clk_rate_exclusive_get(struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_rate_exclusive_put(struct clk *clk) {}
+
#endif
+#ifdef CONFIG_HAVE_CLK_PREPARE
/**
* clk_prepare - prepare a clock source
* @clk: clock source
@@ -228,10 +299,26 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
*
* Must not be called from within atomic context.
*/
-#ifdef CONFIG_HAVE_CLK_PREPARE
int clk_prepare(struct clk *clk);
int __must_check clk_bulk_prepare(int num_clks,
const struct clk_bulk_data *clks);
+
+/**
+ * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
+ * @clk: clock source
+ *
+ * Returns true if clk_prepare() implicitly enables the clock, effectively
+ * making clk_enable()/clk_disable() no-ops, false otherwise.
+ *
+ * This is of interest mainly to the power management code where actually
+ * disabling the clock also requires unpreparing it to have any material
+ * effect.
+ *
+ * Regardless of the value returned here, the caller must always invoke
+ * clk_enable() or clk_prepare_enable() and counterparts for usage counts
+ * to be right.
+ */
+bool clk_is_enabled_when_prepared(struct clk *clk);
#else
static inline int clk_prepare(struct clk *clk)
{
@@ -245,6 +332,11 @@ clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
might_sleep();
return 0;
}
+
+static inline bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return false;
+}
#endif
/**
@@ -399,19 +491,36 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks);
/**
+ * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed)
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Returns success (0) or negative errno.
+ *
+ * This helper function allows drivers to get all clocks of the
+ * consumer and enables them in one operation with management.
+ * The clks will automatically be disabled and freed when the device
+ * is unbound.
+ */
+
+int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
+ struct clk_bulk_data **clks);
+
+/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Returns a struct clk corresponding to the clock producer, or
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
* uses @dev and @id to determine the clock consumer, and thereby
* the clock producer. (IOW, @id may be identical strings, but
* clk_get may return different clock producers depending on @dev.)
*
- * Drivers must assume that the clock source is not enabled.
- *
- * devm_clk_get should not be called from within interrupt context.
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
*
* The clock will automatically be freed when the device is unbound
* from the bus.
@@ -419,64 +528,129 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
- * devm_clk_get_optional - lookup and obtain a managed reference to an optional
- * clock producer.
+ * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Behaves the same as devm_clk_get() except where there is no clock producer.
- * In this case, instead of returning -ENOENT, the function returns NULL.
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared. Drivers must however assume
+ * that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the device
+ * is unbound from the bus.
*/
-struct clk *devm_clk_get_optional(struct device *dev, const char *id);
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
/**
- * devm_get_clk_from_child - lookup and obtain a managed reference to a
- * clock producer from child node.
+ * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
* @dev: device for clock "consumer"
- * @np: pointer to clock consumer node
- * @con_id: clock consumer ID
+ * @id: clock consumer ID
*
- * This function parses the clocks, and uses them to look up the
- * struct clk from the registered list of clock providers by using
- * @np and @con_id
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional - lookup and obtain a managed reference to an optional
+ * clock producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get().
+ *
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
*
* The clock will automatically be freed when the device is unbound
* from the bus.
*/
-struct clk *devm_get_clk_from_child(struct device *dev,
- struct device_node *np, const char *con_id);
+struct clk *devm_clk_get_optional(struct device *dev, const char *id);
+
/**
- * clk_rate_exclusive_get - get exclusivity over the rate control of a
- * producer
- * @clk: clock source
+ * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
*
- * This function allows drivers to get exclusive control over the rate of a
- * provider. It prevents any other consumer to execute, even indirectly,
- * opereation which could alter the rate of the provider or cause glitches
+ * Context: May sleep.
*
- * If exlusivity is claimed more than once on clock, even by the same driver,
- * the rate effectively gets locked as exclusivity can't be preempted.
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_prepared().
*
- * Must not be called from within atomic context.
+ * The returned clk (if valid) is prepared. Drivers must however
+ * assume that the clock is not enabled.
*
- * Returns success (0) or negative errno.
+ * The clock will automatically be unprepared and freed when the
+ * device is unbound from the bus.
*/
-int clk_rate_exclusive_get(struct clk *clk);
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
/**
- * clk_rate_exclusive_put - release exclusivity over the rate control of a
- * producer
- * @clk: clock source
+ * devm_clk_get_optional_enabled - devm_clk_get_optional() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
*
- * This function allows drivers to release the exclusivity it previously got
- * from clk_rate_exclusive_get()
+ * Context: May sleep.
*
- * The caller must balance the number of clk_rate_exclusive_get() and
- * clk_rate_exclusive_put() calls.
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
*
- * Must not be called from within atomic context.
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
*/
-void clk_rate_exclusive_put(struct clk *clk);
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
+
+/**
+ * devm_get_clk_from_child - lookup and obtain a managed reference to a
+ * clock producer from child node.
+ * @dev: device for clock "consumer"
+ * @np: pointer to clock consumer node
+ * @con_id: clock consumer ID
+ *
+ * This function parses the clocks, and uses them to look up the
+ * struct clk from the registered list of clock providers by using
+ * @np and @con_id
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
+ */
+struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id);
/**
* clk_enable - inform the system when the clock source should be running.
@@ -661,7 +835,7 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent);
+bool clk_has_parent(const struct clk *clk, const struct clk *parent);
/**
* clk_set_rate_range - set a rate range for a clock source
@@ -773,12 +947,36 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
+static inline struct clk *devm_clk_get_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline struct clk *devm_clk_get_optional(struct device *dev,
const char *id)
{
return NULL;
}
+static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
@@ -798,6 +996,12 @@ static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
return 0;
}
+static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+ return 0;
+}
+
static inline struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
@@ -812,14 +1016,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
-
-static inline int clk_rate_exclusive_get(struct clk *clk)
-{
- return 0;
-}
-
-static inline void clk_rate_exclusive_put(struct clk *clk) {}
-
static inline int clk_enable(struct clk *clk)
{
return 0;
@@ -947,6 +1143,17 @@ static inline void clk_bulk_disable_unprepare(int num_clks,
}
/**
+ * clk_drop_range - Reset any range set on that clock
+ * @clk: clock source
+ *
+ * Returns success (0) or negative errno.
+ */
+static inline int clk_drop_range(struct clk *clk)
+{
+ return clk_set_rate_range(clk, 0, ULONG_MAX);
+}
+
+/**
* clk_get_optional - lookup and obtain a reference to an optional clock
* producer.
* @dev: device for clock "consumer"
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index a4f82e836a7c..7af499bdbecb 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -12,6 +12,8 @@
#ifndef AT91_PMC_H
#define AT91_PMC_H
+#include <linux/bits.h>
+
#define AT91_PMC_V1 (1) /* PMC version 1 */
#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */
@@ -45,8 +47,8 @@
#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */
#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */
-#define AT91_PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL /* Default PLL ACR value for UPLL */
-#define AT91_PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL /* Default PLL ACR value for PLLA */
+#define AT91_PMC_PLL_ACR_DEFAULT_UPLL UL(0x12020010) /* Default PLL ACR value for UPLL */
+#define AT91_PMC_PLL_ACR_DEFAULT_PLLA UL(0x00020010) /* Default PLL ACR value for PLLA */
#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */
#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */
@@ -78,6 +80,10 @@
#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */
#define AT91_CKGR_PLLAR 0x28 /* PLL A Register */
+
+#define AT91_PMC_RATIO 0x2c /* Processor clock ratio register [SAMA7G5 only] */
+#define AT91_PMC_RATIO_RATIO (0xf) /* CPU clock ratio. */
+
#define AT91_CKGR_PLLBR 0x2c /* PLL B Register */
#define AT91_PMC_DIV (0xff << 0) /* Divider */
#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */
@@ -137,6 +143,32 @@
#define AT91_PMC_PLLADIV2_ON (1 << 12)
#define AT91_PMC_H32MXDIV BIT(24)
+#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */
+#define AT91_PMC_MCR_V2_ID_MSK (0xF)
+#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK)
+#define AT91_PMC_MCR_V2_CMD (1 << 7)
+#define AT91_PMC_MCR_V2_DIV (7 << 8)
+#define AT91_PMC_MCR_V2_DIV1 (0 << 8)
+#define AT91_PMC_MCR_V2_DIV2 (1 << 8)
+#define AT91_PMC_MCR_V2_DIV4 (2 << 8)
+#define AT91_PMC_MCR_V2_DIV8 (3 << 8)
+#define AT91_PMC_MCR_V2_DIV16 (4 << 8)
+#define AT91_PMC_MCR_V2_DIV32 (5 << 8)
+#define AT91_PMC_MCR_V2_DIV64 (6 << 8)
+#define AT91_PMC_MCR_V2_DIV3 (7 << 8)
+#define AT91_PMC_MCR_V2_CSS (0x1F << 16)
+#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16)
+#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16)
+#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16)
+#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16)
+#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16)
+#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16)
+#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16)
+#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16)
+#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16)
+#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16)
+#define AT91_PMC_MCR_V2_EN (1 << 28)
+
#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h
index 8a7b5cd7eac0..e1d37451e03f 100644
--- a/include/linux/clk/davinci.h
+++ b/include/linux/clk/davinci.h
@@ -19,22 +19,5 @@ int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchi
#ifdef CONFIG_ARCH_DAVINCI_DA850
int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
-int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm355_psc_init(struct device *dev, void __iomem *base);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM365
-int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm365_psc_init(struct device *dev, void __iomem *base);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
-int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm644x_psc_init(struct device *dev, void __iomem *base);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
-int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm646x_psc_init(struct device *dev, void __iomem *base);
-#endif
#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
diff --git a/include/linux/clk/imx.h b/include/linux/clk/imx.h
new file mode 100644
index 000000000000..75a0d9696552
--- /dev/null
+++ b/include/linux/clk/imx.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Freescale Semiconductor, Inc.
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_IMX_H
+#define __LINUX_CLK_IMX_H
+
+#include <linux/types.h>
+
+void imx6sl_set_wait_clk(bool enter);
+
+#endif
diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h
deleted file mode 100644
index 445130460380..000000000000
--- a/include/linux/clk/mmp.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CLK_MMP_H
-#define __CLK_MMP_H
-
-#include <linux/types.h>
-
-extern void pxa168_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-extern void pxa910_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys,
- phys_addr_t apbcp_phys);
-extern void mmp2_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-
-#endif
diff --git a/include/linux/clk/pxa.h b/include/linux/clk/pxa.h
new file mode 100644
index 000000000000..736b8bb91bd7
--- /dev/null
+++ b/include/linux/clk/pxa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern int pxa25x_clocks_init(void __iomem *regs);
+extern int pxa27x_clocks_init(void __iomem *regs);
+extern int pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg);
+
+#ifdef CONFIG_PXA3xx
+extern unsigned pxa3xx_get_clk_frequency_khz(int);
+extern void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask);
+#else
+#define pxa3xx_get_clk_frequency_khz(x) (0)
+#define pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask) do { } while (0)
+#endif
diff --git a/include/linux/clk/samsung.h b/include/linux/clk/samsung.h
new file mode 100644
index 000000000000..0cf7aac83439
--- /dev/null
+++ b/include/linux/clk/samsung.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+ */
+
+#ifndef __LINUX_CLK_SAMSUNG_H_
+#define __LINUX_CLK_SAMSUNG_H_
+
+#include <linux/compiler_types.h>
+
+struct device_node;
+
+#ifdef CONFIG_S3C64XX_COMMON_CLK
+void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
+ unsigned long xusbxti_f, bool s3c6400,
+ void __iomem *base);
+#else
+static inline void s3c64xx_clk_init(struct device_node *np,
+ unsigned long xtal_f,
+ unsigned long xusbxti_f,
+ bool s3c6400, void __iomem *base) { }
+#endif /* CONFIG_S3C64XX_COMMON_CLK */
+
+#endif /* __LINUX_CLK_SAMSUNG_H_ */
diff --git a/include/linux/clk/spear.h b/include/linux/clk/spear.h
new file mode 100644
index 000000000000..eaf95ca656f8
--- /dev/null
+++ b/include/linux/clk/spear.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 STMicroelectronics - All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_SPEAR_H
+#define __LINUX_CLK_SPEAR_H
+
+#ifdef CONFIG_ARCH_SPEAR3XX
+void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base);
+#else
+static inline void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base) {}
+#endif
+
+#ifdef CONFIG_ARCH_SPEAR6XX
+void __init spear6xx_clk_init(void __iomem *misc_base);
+#else
+static inline void __init spear6xx_clk_init(void __iomem *misc_base) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1310
+void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base);
+#else
+static inline void spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+void __init spear1340_clk_init(void __iomem *misc_base);
+#else
+static inline void spear1340_clk_init(void __iomem *misc_base) {}
+#endif
+
+#endif
diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h
index 3cd14acde0a1..57c8ec44ab4e 100644
--- a/include/linux/clk/sunxi-ng.h
+++ b/include/linux/clk/sunxi-ng.h
@@ -6,22 +6,9 @@
#ifndef _LINUX_CLK_SUNXI_NG_H_
#define _LINUX_CLK_SUNXI_NG_H_
-#include <linux/errno.h>
-
-#ifdef CONFIG_SUNXI_CCU
int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
-#else
-static inline int sunxi_ccu_set_mmc_timing_mode(struct clk *clk,
- bool new_mode)
-{
- return -ENOTSUPP;
-}
-static inline int sunxi_ccu_get_mmc_timing_mode(struct clk *clk)
-{
- return -ENOTSUPP;
-}
-#endif
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 3f01d43f0598..3650e926e93f 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2020, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __LINUX_CLK_TEGRA_H_
@@ -42,6 +42,7 @@ struct tegra_cpu_car_ops {
#endif
};
+#ifdef CONFIG_ARCH_TEGRA
extern struct tegra_cpu_car_ops *tegra_cpu_car_ops;
static inline void tegra_wait_cpu_in_reset(u32 cpu)
@@ -83,8 +84,29 @@ static inline void tegra_disable_cpu_clock(u32 cpu)
tegra_cpu_car_ops->disable_clock(cpu);
}
+#else
+static inline void tegra_wait_cpu_in_reset(u32 cpu)
+{
+}
-#ifdef CONFIG_PM_SLEEP
+static inline void tegra_put_cpu_in_reset(u32 cpu)
+{
+}
+
+static inline void tegra_cpu_out_of_reset(u32 cpu)
+{
+}
+
+static inline void tegra_enable_cpu_clock(u32 cpu)
+{
+}
+
+static inline void tegra_disable_cpu_clock(u32 cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_PM_SLEEP)
static inline bool tegra_cpu_rail_off_ready(void)
{
if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready))
@@ -123,28 +145,17 @@ static inline void tegra_cpu_clock_resume(void)
}
#endif
-extern void tegra210_xusb_pll_hw_control_enable(void);
-extern void tegra210_xusb_pll_hw_sequence_start(void);
-extern void tegra210_sata_pll_hw_control_enable(void);
-extern void tegra210_sata_pll_hw_sequence_start(void);
-extern void tegra210_set_sata_pll_seq_sw(bool state);
-extern void tegra210_put_utmipll_in_iddq(void);
-extern void tegra210_put_utmipll_out_iddq(void);
-extern int tegra210_clk_handle_mbist_war(unsigned int id);
-extern void tegra210_clk_emc_dll_enable(bool flag);
-extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
-extern void tegra210_clk_emc_update_setting(u32 emc_src_value);
-
struct clk;
+struct tegra_emc;
typedef long (tegra20_clk_emc_round_cb)(unsigned long rate,
unsigned long min_rate,
unsigned long max_rate,
void *arg);
-
-void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
- void *cb_arg);
-int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
+typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
struct tegra210_clk_emc_config {
unsigned long rate;
@@ -166,8 +177,87 @@ struct tegra210_clk_emc_provider {
const struct tegra210_clk_emc_config *config);
};
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
+void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg);
+int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+#else
+static inline void
+tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg)
+{
+}
+
+static inline int
+tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_TEGRA124_CLK_EMC
+void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb);
+#else
+static inline void
+tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+int tegra210_plle_hw_sequence_start(void);
+bool tegra210_plle_hw_sequence_is_enabled(void);
+void tegra210_xusb_pll_hw_control_enable(void);
+void tegra210_xusb_pll_hw_sequence_start(void);
+void tegra210_sata_pll_hw_control_enable(void);
+void tegra210_sata_pll_hw_sequence_start(void);
+void tegra210_set_sata_pll_seq_sw(bool state);
+void tegra210_put_utmipll_in_iddq(void);
+void tegra210_put_utmipll_out_iddq(void);
+int tegra210_clk_handle_mbist_war(unsigned int id);
+void tegra210_clk_emc_dll_enable(bool flag);
+void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
+void tegra210_clk_emc_update_setting(u32 emc_src_value);
+
int tegra210_clk_emc_attach(struct clk *clk,
struct tegra210_clk_emc_provider *provider);
void tegra210_clk_emc_detach(struct clk *clk);
+#else
+static inline int tegra210_plle_hw_sequence_start(void)
+{
+ return 0;
+}
+
+static inline bool tegra210_plle_hw_sequence_is_enabled(void)
+{
+ return false;
+}
+
+static inline int tegra210_clk_handle_mbist_war(unsigned int id)
+{
+ return 0;
+}
+
+static inline int
+tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider)
+{
+ return 0;
+}
+
+static inline void tegra210_xusb_pll_hw_control_enable(void) {}
+static inline void tegra210_xusb_pll_hw_sequence_start(void) {}
+static inline void tegra210_sata_pll_hw_control_enable(void) {}
+static inline void tegra210_sata_pll_hw_sequence_start(void) {}
+static inline void tegra210_set_sata_pll_seq_sw(bool state) {}
+static inline void tegra210_put_utmipll_in_iddq(void) {}
+static inline void tegra210_put_utmipll_out_iddq(void) {}
+static inline void tegra210_clk_emc_dll_enable(bool flag) {}
+static inline void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value) {}
+static inline void tegra210_clk_emc_update_setting(u32 emc_src_value) {}
+static inline void tegra210_clk_emc_detach(struct clk *clk) {}
+#endif
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index c62f6fa6763d..e656f63efdce 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI clock drivers support
*
* Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_CLK_TI_H__
#define __LINUX_CLK_TI_H__
@@ -21,11 +13,14 @@
/**
* struct clk_omap_reg - OMAP register declaration
* @offset: offset from the master IP module base address
+ * @bit: register bit offset
* @index: index of the master IP module
+ * @flags: flags
*/
struct clk_omap_reg {
void __iomem *ptr;
u16 offset;
+ u8 bit;
u8 index;
u8 flags;
};
@@ -63,6 +58,17 @@ struct clk_omap_reg {
* @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
* @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
* @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
+ * @ssc_deltam_reg: register containing the DPLL SSC frequency spreading
+ * @ssc_modfreq_reg: register containing the DPLL SSC modulation frequency
+ * @ssc_modfreq_mant_mask: mask of the mantissa component in @ssc_modfreq_reg
+ * @ssc_modfreq_exp_mask: mask of the exponent component in @ssc_modfreq_reg
+ * @ssc_enable_mask: mask of the DPLL SSC enable bit in @control_reg
+ * @ssc_downspread_mask: mask of the DPLL SSC low frequency only bit in
+ * @control_reg
+ * @ssc_modfreq: the DPLL SSC frequency modulation in kHz
+ * @ssc_deltam: the DPLL SSC frequency spreading in permille (10th of percent)
+ * @ssc_downspread: require the only low frequency spread of the DPLL in SSC
+ * mode
* @flags: DPLL type/features (see below)
*
* Possible values for @flags:
@@ -110,6 +116,17 @@ struct dpll_data {
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
+ struct clk_omap_reg ssc_deltam_reg;
+ struct clk_omap_reg ssc_modfreq_reg;
+ u32 ssc_deltam_int_mask;
+ u32 ssc_deltam_frac_mask;
+ u32 ssc_modfreq_mant_mask;
+ u32 ssc_modfreq_exp_mask;
+ u32 ssc_enable_mask;
+ u32 ssc_downspread_mask;
+ u32 ssc_modfreq;
+ u32 ssc_deltam;
+ bool ssc_downspread;
u8 flags;
};
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index fd06b2780a22..45570bc21a43 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -30,11 +30,6 @@ struct clk_lookup {
.clk = c, \
}
-struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
@@ -51,6 +46,4 @@ int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
const char *con_id, const char *dev_id);
-void devm_clk_release_clkdev(struct device *dev, const char *con_id,
- const char *dev_id);
#endif
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 8ae9a95ebf5b..9aac31d856f3 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -211,7 +211,7 @@ extern int tick_receive_broadcast(void);
extern void tick_setup_hrtimer_broadcast(void);
extern int tick_check_broadcast_expired(void);
# else
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
# endif
@@ -219,7 +219,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 86d143db6523..0ad8b550bb4b 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -17,6 +17,7 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/clocksource_ids.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -42,6 +43,8 @@ struct module;
* @shift: Cycle to nanosecond divisor (power of two)
* @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs)
* @maxadj: Maximum adjustment value to mult (~11%)
+ * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second.
+ * Zero says to use default WATCHDOG_THRESHOLD.
* @archdata: Optional arch-specific data
* @max_cycles: Maximum safe cycle value which won't overflow on
* multiplication
@@ -62,6 +65,10 @@ struct module;
* 400-499: Perfect
* The ideal clocksource. A must-use where
* available.
+ * @id: Defaults to CSID_GENERIC. The id value is captured
+ * in certain snapshot functions to allow callers to
+ * validate the clocksource from which the snapshot was
+ * taken.
* @flags: Flags describing special properties
* @enable: Optional function to enable the clocksource
* @disable: Optional function to disable the clocksource
@@ -70,7 +77,7 @@ struct module;
* @mark_unstable: Optional function to inform the clocksource driver that
* the watchdog marked the clocksource unstable
* @tick_stable: Optional function called periodically from the watchdog
- * code to provide stable syncrhonization points
+ * code to provide stable synchronization points
* @wd_list: List head to enqueue into the watchdog list (internal)
* @cs_last: Last clocksource value for clocksource watchdog
* @wd_last: Last watchdog value corresponding to @cs_last
@@ -93,6 +100,7 @@ struct clocksource {
u32 shift;
u64 max_idle_ns;
u32 maxadj;
+ u32 uncertainty_margin;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
@@ -100,6 +108,7 @@ struct clocksource {
const char *name;
struct list_head list;
int rating;
+ enum clocksource_ids id;
enum vdso_clock_mode vdso_clock_mode;
unsigned long flags;
@@ -131,7 +140,7 @@ struct clocksource {
#define CLOCK_SOURCE_UNSTABLE 0x40
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
#define CLOCK_SOURCE_RESELECT 0x100
-
+#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
@@ -282,4 +291,19 @@ static inline void timer_probe(void) {}
#define TIMER_ACPI_DECLARE(name, table_id, fn) \
ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
+static inline unsigned int clocksource_get_max_watchdog_retry(void)
+{
+ /*
+ * When system is in the boot phase or under heavy workload, there
+ * can be random big latencies during the clocksource/watchdog
+ * read, so allow retries to filter the noise latency. As the
+ * latency's frequency and maximum value goes up with the number of
+ * CPUs, scale the number of retries with the number of online
+ * CPUs.
+ */
+ return (ilog2(num_online_cpus()) / 2) + 1;
+}
+
+void clocksource_verify_percpu(struct clocksource *cs);
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h
new file mode 100644
index 000000000000..a4fa3436940c
--- /dev/null
+++ b/include/linux/clocksource_ids.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOCKSOURCE_IDS_H
+#define _LINUX_CLOCKSOURCE_IDS_H
+
+/* Enum to give clocksources a unique identifier */
+enum clocksource_ids {
+ CSID_GENERIC = 0,
+ CSID_ARM_ARCH_COUNTER,
+ CSID_X86_TSC_EARLY,
+ CSID_X86_TSC,
+ CSID_X86_KVM_CLK,
+ CSID_MAX,
+};
+
+#endif
diff --git a/include/linux/closure.h b/include/linux/closure.h
new file mode 100644
index 000000000000..c554c6a08768
--- /dev/null
+++ b/include/linux/closure.h
@@ -0,0 +1,415 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOSURE_H
+#define _LINUX_CLOSURE_H
+
+#include <linux/llist.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/workqueue.h>
+
+/*
+ * Closure is perhaps the most overused and abused term in computer science, but
+ * since I've been unable to come up with anything better you're stuck with it
+ * again.
+ *
+ * What are closures?
+ *
+ * They embed a refcount. The basic idea is they count "things that are in
+ * progress" - in flight bios, some other thread that's doing something else -
+ * anything you might want to wait on.
+ *
+ * The refcount may be manipulated with closure_get() and closure_put().
+ * closure_put() is where many of the interesting things happen, when it causes
+ * the refcount to go to 0.
+ *
+ * Closures can be used to wait on things both synchronously and asynchronously,
+ * and synchronous and asynchronous use can be mixed without restriction. To
+ * wait synchronously, use closure_sync() - you will sleep until your closure's
+ * refcount hits 1.
+ *
+ * To wait asynchronously, use
+ * continue_at(cl, next_function, workqueue);
+ *
+ * passing it, as you might expect, the function to run when nothing is pending
+ * and the workqueue to run that function out of.
+ *
+ * continue_at() also, critically, requires a 'return' immediately following the
+ * location where this macro is referenced, to return to the calling function.
+ * There's good reason for this.
+ *
+ * To use safely closures asynchronously, they must always have a refcount while
+ * they are running owned by the thread that is running them. Otherwise, suppose
+ * you submit some bios and wish to have a function run when they all complete:
+ *
+ * foo_endio(struct bio *bio)
+ * {
+ * closure_put(cl);
+ * }
+ *
+ * closure_init(cl);
+ *
+ * do_stuff();
+ * closure_get(cl);
+ * bio1->bi_endio = foo_endio;
+ * bio_submit(bio1);
+ *
+ * do_more_stuff();
+ * closure_get(cl);
+ * bio2->bi_endio = foo_endio;
+ * bio_submit(bio2);
+ *
+ * continue_at(cl, complete_some_read, system_wq);
+ *
+ * If closure's refcount started at 0, complete_some_read() could run before the
+ * second bio was submitted - which is almost always not what you want! More
+ * importantly, it wouldn't be possible to say whether the original thread or
+ * complete_some_read()'s thread owned the closure - and whatever state it was
+ * associated with!
+ *
+ * So, closure_init() initializes a closure's refcount to 1 - and when a
+ * closure_fn is run, the refcount will be reset to 1 first.
+ *
+ * Then, the rule is - if you got the refcount with closure_get(), release it
+ * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
+ * on a closure because you called closure_init() or you were run out of a
+ * closure - _always_ use continue_at(). Doing so consistently will help
+ * eliminate an entire class of particularly pernicious races.
+ *
+ * Lastly, you might have a wait list dedicated to a specific event, and have no
+ * need for specifying the condition - you just want to wait until someone runs
+ * closure_wake_up() on the appropriate wait list. In that case, just use
+ * closure_wait(). It will return either true or false, depending on whether the
+ * closure was already on a wait list or not - a closure can only be on one wait
+ * list at a time.
+ *
+ * Parents:
+ *
+ * closure_init() takes two arguments - it takes the closure to initialize, and
+ * a (possibly null) parent.
+ *
+ * If parent is non null, the new closure will have a refcount for its lifetime;
+ * a closure is considered to be "finished" when its refcount hits 0 and the
+ * function to run is null. Hence
+ *
+ * continue_at(cl, NULL, NULL);
+ *
+ * returns up the (spaghetti) stack of closures, precisely like normal return
+ * returns up the C stack. continue_at() with non null fn is better thought of
+ * as doing a tail call.
+ *
+ * All this implies that a closure should typically be embedded in a particular
+ * struct (which its refcount will normally control the lifetime of), and that
+ * struct can very much be thought of as a stack frame.
+ */
+
+struct closure;
+struct closure_syncer;
+typedef void (closure_fn) (struct work_struct *);
+extern struct dentry *bcache_debug;
+
+struct closure_waitlist {
+ struct llist_head list;
+};
+
+enum closure_state {
+ /*
+ * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
+ * the thread that owns the closure, and cleared by the thread that's
+ * waking up the closure.
+ *
+ * The rest are for debugging and don't affect behaviour:
+ *
+ * CLOSURE_RUNNING: Set when a closure is running (i.e. by
+ * closure_init() and when closure_put() runs then next function), and
+ * must be cleared before remaining hits 0. Primarily to help guard
+ * against incorrect usage and accidentally transferring references.
+ * continue_at() and closure_return() clear it for you, if you're doing
+ * something unusual you can use closure_set_dead() which also helps
+ * annotate where references are being transferred.
+ */
+
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
+};
+
+#define CLOSURE_GUARD_MASK \
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
+
+#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
+#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
+
+struct closure {
+ union {
+ struct {
+ struct workqueue_struct *wq;
+ struct closure_syncer *s;
+ struct llist_node list;
+ closure_fn *fn;
+ };
+ struct work_struct work;
+ };
+
+ struct closure *parent;
+
+ atomic_t remaining;
+ bool closure_get_happened;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+#define CLOSURE_MAGIC_DEAD 0xc054dead
+#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+
+ unsigned int magic;
+ struct list_head all;
+ unsigned long ip;
+ unsigned long waiting_on;
+#endif
+};
+
+void closure_sub(struct closure *cl, int v);
+void closure_put(struct closure *cl);
+void __closure_wake_up(struct closure_waitlist *list);
+bool closure_wait(struct closure_waitlist *list, struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+static inline unsigned closure_nr_remaining(struct closure *cl)
+{
+ return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
+}
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened);
+#endif
+
+ if (cl->closure_get_happened)
+ __closure_sync(cl);
+}
+
+#ifdef CONFIG_DEBUG_CLOSURES
+
+void closure_debug_create(struct closure *cl);
+void closure_debug_destroy(struct closure *cl);
+
+#else
+
+static inline void closure_debug_create(struct closure *cl) {}
+static inline void closure_debug_destroy(struct closure *cl) {}
+
+#endif
+
+static inline void closure_set_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _THIS_IP_;
+#endif
+}
+
+static inline void closure_set_ret_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _RET_IP_;
+#endif
+}
+
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->waiting_on = f;
+#endif
+}
+
+static inline void closure_set_stopped(struct closure *cl)
+{
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+}
+
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
+
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(&cl->work);
+}
+
+/**
+ * closure_get - increment a closure's refcount
+ */
+static inline void closure_get(struct closure *cl)
+{
+ cl->closure_get_happened = true;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
+
+/**
+ * closure_init - Initialize a closure, setting the refcount to 1
+ * @cl: closure to initialize
+ * @parent: parent of the new closure. cl will take a refcount on it for its
+ * lifetime; may be NULL.
+ */
+static inline void closure_init(struct closure *cl, struct closure *parent)
+{
+ cl->fn = NULL;
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
+
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+ cl->closure_get_happened = false;
+
+ closure_debug_create(cl);
+ closure_set_ip(cl);
+}
+
+static inline void closure_init_stack(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+}
+
+/**
+ * closure_wake_up - wake up all closures on a wait list,
+ * with memory barrier
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+ /* Memory barrier for the wait list */
+ smp_mb();
+ __closure_wake_up(list);
+}
+
+#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
+#define closure_type(name, type, member) \
+ struct closure *cl = container_of(ws, struct closure, work); \
+ type *name = container_of(cl, type, member)
+
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
+ */
+#define continue_at(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_sub(_cl, CLOSURE_RUNNING + 1); \
+} while (0)
+
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
+#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
+#define continue_at_nobarrier(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_queue(_cl); \
+} while (0)
+
+/**
+ * closure_return_with_destructor - finish execution of a closure,
+ * with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
+#define closure_return_with_destructor(_cl, _destructor) \
+do { \
+ set_closure_fn(_cl, _destructor, NULL); \
+ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
+} while (0)
+
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
+static inline void closure_call(struct closure *cl, closure_fn fn,
+ struct workqueue_struct *wq,
+ struct closure *parent)
+{
+ closure_init(cl, parent);
+ continue_at_nobarrier(cl, fn, wq);
+}
+
+#define __closure_wait_event(waitlist, _cond) \
+do { \
+ struct closure cl; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) \
+ break; \
+ closure_sync(&cl); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+} while (0)
+
+#define closure_wait_event(waitlist, _cond) \
+do { \
+ if (!(_cond)) \
+ __closure_wait_event(waitlist, _cond); \
+} while (0)
+
+#endif /* _LINUX_CLOSURE_H */
diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h
deleted file mode 100644
index ea4958e07a14..000000000000
--- a/include/linux/cm4000_cs.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CM4000_H_
-#define _CM4000_H_
-
-#include <uapi/linux/cm4000_cs.h>
-
-
-#define DEVICE_NAME "cmm"
-#define MODULE_NAME "cm4000_cs"
-
-#endif /* _CM4000_H_ */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 6ff79fefd01f..9db877506ea8 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -6,17 +6,19 @@
#include <linux/types.h>
#include <linux/numa.h>
-/*
- * There is always at least global CMA area and a few optional
- * areas configured in kernel .config.
- */
#ifdef CONFIG_CMA_AREAS
-#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+#define MAX_CMA_AREAS CONFIG_CMA_AREAS
+#endif
-#else
-#define MAX_CMA_AREAS (0)
+#define CMA_MAX_NAME 64
-#endif
+/*
+ * the buddy -- especially pageblock merging and alloc_contig_range()
+ * -- can deal with only some pageblocks of a higher-order page being
+ * MIGRATE_CMA, we can use pageblock_nr_pages.
+ */
+#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
+#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
struct cma;
@@ -42,9 +44,12 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
const char *name,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
+extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn);
-extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
+extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
+
+extern void cma_reserve_pages_on_error(struct cma *cma);
#endif
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
deleted file mode 100644
index 68a541807bdf..000000000000
--- a/include/linux/cmdline-parser.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Parsing command line, get the partitions information.
- *
- * Written by Cai Zhiyong <caizhiyong@huawei.com>
- *
- */
-#ifndef CMDLINEPARSEH
-#define CMDLINEPARSEH
-
-#include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-/* partition flags */
-#define PF_RDONLY 0x01 /* Device is read only */
-#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
-
-struct cmdline_subpart {
- char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
- sector_t from;
- sector_t size;
- int flags;
- struct cmdline_subpart *next_subpart;
-};
-
-struct cmdline_parts {
- char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
- unsigned int nr_subparts;
- struct cmdline_subpart *subpart;
- struct cmdline_parts *next_parts;
-};
-
-void cmdline_parts_free(struct cmdline_parts **parts);
-
-int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
-
-struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
- const char *bdev);
-
-int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
- int slot,
- int (*add_part)(int, struct cmdline_subpart *, void *),
- void *param);
-
-#endif /* CMDLINEPARSEH */
diff --git a/include/linux/comedi/comedi_8254.h b/include/linux/comedi/comedi_8254.h
new file mode 100644
index 000000000000..d527f04400df
--- /dev/null
+++ b/include/linux/comedi/comedi_8254.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_8254.h
+ * Generic 8254 timer/counter support
+ * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_8254_H
+#define _COMEDI_8254_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+struct comedi_device;
+struct comedi_insn;
+struct comedi_subdevice;
+
+/*
+ * Common oscillator base values in nanoseconds
+ */
+#define I8254_OSC_BASE_10MHZ 100
+#define I8254_OSC_BASE_5MHZ 200
+#define I8254_OSC_BASE_4MHZ 250
+#define I8254_OSC_BASE_2MHZ 500
+#define I8254_OSC_BASE_1MHZ 1000
+#define I8254_OSC_BASE_100KHZ 10000
+#define I8254_OSC_BASE_10KHZ 100000
+#define I8254_OSC_BASE_1KHZ 1000000
+
+/*
+ * I/O access size used to read/write registers
+ */
+#define I8254_IO8 1
+#define I8254_IO16 2
+#define I8254_IO32 4
+
+/*
+ * Register map for generic 8254 timer (I8254_IO8 with 0 regshift)
+ */
+#define I8254_COUNTER0_REG 0x00
+#define I8254_COUNTER1_REG 0x01
+#define I8254_COUNTER2_REG 0x02
+#define I8254_CTRL_REG 0x03
+#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
+#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x))
+#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4)
+#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5)
+#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
+#define I8254_CTRL_RW(x) (((x) & 0x3) << 4)
+#define I8254_CTRL_LATCH I8254_CTRL_RW(0)
+#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1)
+#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2)
+#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3)
+
+/* counter maps zero to 0x10000 */
+#define I8254_MAX_COUNT 0x10000
+
+struct comedi_8254;
+
+/**
+ * typedef comedi_8254_iocb_fn - call-back function type for 8254 register access
+ * @i8254: pointer to struct comedi_8254
+ * @dir: direction (0 = read, 1 = write)
+ * @reg: register number
+ * @val: value to write
+ *
+ * Return: Register value when reading, 0 when writing.
+ */
+typedef unsigned int comedi_8254_iocb_fn(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val);
+
+/**
+ * struct comedi_8254 - private data used by this module
+ * @iocb: I/O call-back function for register access
+ * @context: context for register access (e.g. a base address)
+ * @iosize: I/O size used to access the registers (b/w/l)
+ * @regshift: register gap shift
+ * @osc_base: cascaded oscillator speed in ns
+ * @divisor: divisor for single counter
+ * @divisor1: divisor loaded into first cascaded counter
+ * @divisor2: divisor loaded into second cascaded counter
+ * #next_div: next divisor for single counter
+ * @next_div1: next divisor to use for first cascaded counter
+ * @next_div2: next divisor to use for second cascaded counter
+ * @clock_src; current clock source for each counter (driver specific)
+ * @gate_src; current gate source for each counter (driver specific)
+ * @busy: flags used to indicate that a counter is "busy"
+ * @insn_config: driver specific (*insn_config) callback
+ */
+struct comedi_8254 {
+ comedi_8254_iocb_fn *iocb;
+ unsigned long context;
+ unsigned int iosize;
+ unsigned int regshift;
+ unsigned int osc_base;
+ unsigned int divisor;
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int next_div;
+ unsigned int next_div1;
+ unsigned int next_div2;
+ unsigned int clock_src[3];
+ unsigned int gate_src[3];
+ bool busy[3];
+
+ int (*insn_config)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+};
+
+unsigned int comedi_8254_status(struct comedi_8254 *i8254,
+ unsigned int counter);
+unsigned int comedi_8254_read(struct comedi_8254 *i8254, unsigned int counter);
+void comedi_8254_write(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int val);
+
+int comedi_8254_set_mode(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int mode);
+int comedi_8254_load(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int val, unsigned int mode);
+
+void comedi_8254_pacer_enable(struct comedi_8254 *i8254,
+ unsigned int counter1, unsigned int counter2,
+ bool enable);
+void comedi_8254_update_divisors(struct comedi_8254 *i8254);
+void comedi_8254_cascade_ns_to_timer(struct comedi_8254 *i8254,
+ unsigned int *nanosec, unsigned int flags);
+void comedi_8254_ns_to_timer(struct comedi_8254 *i8254,
+ unsigned int *nanosec, unsigned int flags);
+
+void comedi_8254_set_busy(struct comedi_8254 *i8254,
+ unsigned int counter, bool busy);
+
+void comedi_8254_subdevice_init(struct comedi_subdevice *s,
+ struct comedi_8254 *i8254);
+
+#ifdef CONFIG_HAS_IOPORT
+struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+#else
+static inline struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift)
+{
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
+struct comedi_8254 *comedi_8254_mm_alloc(void __iomem *mmio,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+
+#endif /* _COMEDI_8254_H */
diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h
new file mode 100644
index 000000000000..d24a69da389b
--- /dev/null
+++ b/include/linux/comedi/comedi_8255.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_8255.h
+ * Generic 8255 digital I/O subdevice support
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_8255_H
+#define _COMEDI_8255_H
+
+#include <linux/errno.h>
+
+#define I8255_SIZE 0x04
+
+#define I8255_DATA_A_REG 0x00
+#define I8255_DATA_B_REG 0x01
+#define I8255_DATA_C_REG 0x02
+#define I8255_CTRL_REG 0x03
+#define I8255_CTRL_C_LO_IO BIT(0)
+#define I8255_CTRL_B_IO BIT(1)
+#define I8255_CTRL_B_MODE BIT(2)
+#define I8255_CTRL_C_HI_IO BIT(3)
+#define I8255_CTRL_A_IO BIT(4)
+#define I8255_CTRL_A_MODE(x) ((x) << 5)
+#define I8255_CTRL_CW BIT(7)
+
+struct comedi_device;
+struct comedi_subdevice;
+
+#ifdef CONFIG_HAS_IOPORT
+int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase);
+#else
+static inline int subdev_8255_io_init(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned long regbase)
+{
+ return -ENXIO;
+}
+#endif
+
+int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase);
+
+int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long context),
+ unsigned long context);
+
+unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
+
+#endif
diff --git a/include/linux/comedi/comedi_isadma.h b/include/linux/comedi/comedi_isadma.h
new file mode 100644
index 000000000000..9d2b12db7e6e
--- /dev/null
+++ b/include/linux/comedi/comedi_isadma.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * COMEDI ISA DMA support functions
+ * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ */
+
+#ifndef _COMEDI_ISADMA_H
+#define _COMEDI_ISADMA_H
+
+#include <linux/types.h>
+
+struct comedi_device;
+struct device;
+
+/*
+ * These are used to avoid issues when <asm/dma.h> and the DMA_MODE_
+ * defines are not available.
+ */
+#define COMEDI_ISADMA_READ 0
+#define COMEDI_ISADMA_WRITE 1
+
+/**
+ * struct comedi_isadma_desc - cookie for ISA DMA
+ * @virt_addr: virtual address of buffer
+ * @hw_addr: hardware (bus) address of buffer
+ * @chan: DMA channel
+ * @maxsize: allocated size of buffer (in bytes)
+ * @size: transfer size (in bytes)
+ * @mode: DMA_MODE_READ or DMA_MODE_WRITE
+ */
+struct comedi_isadma_desc {
+ void *virt_addr;
+ dma_addr_t hw_addr;
+ unsigned int chan;
+ unsigned int maxsize;
+ unsigned int size;
+ char mode;
+};
+
+/**
+ * struct comedi_isadma - ISA DMA data
+ * @dev: device to allocate non-coherent memory for
+ * @desc: cookie for each DMA buffer
+ * @n_desc: the number of cookies
+ * @cur_dma: the current cookie in use
+ * @chan: the first DMA channel requested
+ * @chan2: the second DMA channel requested
+ */
+struct comedi_isadma {
+ struct device *dev;
+ struct comedi_isadma_desc *desc;
+ int n_desc;
+ int cur_dma;
+ unsigned int chan;
+ unsigned int chan2;
+};
+
+#if IS_ENABLED(CONFIG_ISA_DMA_API)
+
+void comedi_isadma_program(struct comedi_isadma_desc *desc);
+unsigned int comedi_isadma_disable(unsigned int dma_chan);
+unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
+ unsigned int size);
+unsigned int comedi_isadma_poll(struct comedi_isadma *dma);
+void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir);
+
+struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
+ int n_desc, unsigned int dma_chan1,
+ unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir);
+void comedi_isadma_free(struct comedi_isadma *dma);
+
+#else /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+static inline void comedi_isadma_program(struct comedi_isadma_desc *desc)
+{
+}
+
+static inline unsigned int comedi_isadma_disable(unsigned int dma_chan)
+{
+ return 0;
+}
+
+static inline unsigned int
+comedi_isadma_disable_on_sample(unsigned int dma_chan, unsigned int size)
+{
+ return 0;
+}
+
+static inline unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
+{
+ return 0;
+}
+
+static inline void comedi_isadma_set_mode(struct comedi_isadma_desc *desc,
+ char dma_dir)
+{
+}
+
+static inline struct comedi_isadma *
+comedi_isadma_alloc(struct comedi_device *dev, int n_desc,
+ unsigned int dma_chan1, unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir)
+{
+ return NULL;
+}
+
+static inline void comedi_isadma_free(struct comedi_isadma *dma)
+{
+}
+
+#endif /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+#endif /* #ifndef _COMEDI_ISADMA_H */
diff --git a/include/linux/comedi/comedi_pci.h b/include/linux/comedi/comedi_pci.h
new file mode 100644
index 000000000000..2fb50663e3ed
--- /dev/null
+++ b/include/linux/comedi/comedi_pci.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_pci.h
+ * header file for Comedi PCI drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_PCI_H
+#define _COMEDI_PCI_H
+
+#include <linux/pci.h>
+#include <linux/comedi/comedidev.h>
+
+/*
+ * PCI Vendor IDs not in <linux/pci_ids.h>
+ */
+#define PCI_VENDOR_ID_KOLTER 0x1001
+#define PCI_VENDOR_ID_ICP 0x104c
+#define PCI_VENDOR_ID_DT 0x1116
+#define PCI_VENDOR_ID_IOTECH 0x1616
+#define PCI_VENDOR_ID_CONTEC 0x1221
+#define PCI_VENDOR_ID_RTD 0x1435
+#define PCI_VENDOR_ID_HUMUSOFT 0x186c
+
+struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev);
+
+int comedi_pci_enable(struct comedi_device *dev);
+void comedi_pci_disable(struct comedi_device *dev);
+void comedi_pci_detach(struct comedi_device *dev);
+
+int comedi_pci_auto_config(struct pci_dev *pcidev, struct comedi_driver *driver,
+ unsigned long context);
+void comedi_pci_auto_unconfig(struct pci_dev *pcidev);
+
+int comedi_pci_driver_register(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver);
+void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver);
+
+/**
+ * module_comedi_pci_driver() - Helper macro for registering a comedi PCI driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pci_driver: pci_driver struct
+ *
+ * Helper macro for comedi PCI drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pci_driver(__comedi_driver, __pci_driver) \
+ module_driver(__comedi_driver, comedi_pci_driver_register, \
+ comedi_pci_driver_unregister, &(__pci_driver))
+
+#endif /* _COMEDI_PCI_H */
diff --git a/include/linux/comedi/comedi_pcmcia.h b/include/linux/comedi/comedi_pcmcia.h
new file mode 100644
index 000000000000..a33dfb65b869
--- /dev/null
+++ b/include/linux/comedi/comedi_pcmcia.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_pcmcia.h
+ * header file for Comedi PCMCIA drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_PCMCIA_H
+#define _COMEDI_PCMCIA_H
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <linux/comedi/comedidev.h>
+
+struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev);
+
+int comedi_pcmcia_enable(struct comedi_device *dev,
+ int (*conf_check)(struct pcmcia_device *p_dev,
+ void *priv_data));
+void comedi_pcmcia_disable(struct comedi_device *dev);
+
+int comedi_pcmcia_auto_config(struct pcmcia_device *link,
+ struct comedi_driver *driver);
+void comedi_pcmcia_auto_unconfig(struct pcmcia_device *link);
+
+int comedi_pcmcia_driver_register(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver);
+void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver);
+
+/**
+ * module_comedi_pcmcia_driver() - Helper macro for registering a comedi
+ * PCMCIA driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pcmcia_driver: pcmcia_driver struct
+ *
+ * Helper macro for comedi PCMCIA drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \
+ module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
+ comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
+
+#endif /* _COMEDI_PCMCIA_H */
diff --git a/include/linux/comedi/comedi_usb.h b/include/linux/comedi/comedi_usb.h
new file mode 100644
index 000000000000..5d17dd425bd2
--- /dev/null
+++ b/include/linux/comedi/comedi_usb.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* comedi_usb.h
+ * header file for USB Comedi drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_USB_H
+#define _COMEDI_USB_H
+
+#include <linux/usb.h>
+#include <linux/comedi/comedidev.h>
+
+struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev);
+struct usb_device *comedi_to_usb_dev(struct comedi_device *dev);
+
+int comedi_usb_auto_config(struct usb_interface *intf,
+ struct comedi_driver *driver, unsigned long context);
+void comedi_usb_auto_unconfig(struct usb_interface *intf);
+
+int comedi_usb_driver_register(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver);
+void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver);
+
+/**
+ * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
+ * @__comedi_driver: comedi_driver struct
+ * @__usb_driver: usb_driver struct
+ *
+ * Helper macro for comedi USB drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
+ module_driver(__comedi_driver, comedi_usb_driver_register, \
+ comedi_usb_driver_unregister, &(__usb_driver))
+
+#endif /* _COMEDI_USB_H */
diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h
new file mode 100644
index 000000000000..c08416a7364b
--- /dev/null
+++ b/include/linux/comedi/comedidev.h
@@ -0,0 +1,1053 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedidev.h
+ * header file for kernel-only structures, variables, and constants
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDIDEV_H
+#define _COMEDIDEV_H
+
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/rwsem.h>
+#include <linux/kref.h>
+#include <linux/comedi.h>
+
+#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
+ COMEDI_MINORVERSION, COMEDI_MICROVERSION)
+#define COMEDI_RELEASE VERSION
+
+#define COMEDI_NUM_BOARD_MINORS 0x30
+
+/**
+ * struct comedi_subdevice - Working data for a COMEDI subdevice
+ * @device: COMEDI device to which this subdevice belongs. (Initialized by
+ * comedi_alloc_subdevices().)
+ * @index: Index of this subdevice within device's array of subdevices.
+ * (Initialized by comedi_alloc_subdevices().)
+ * @type: Type of subdevice from &enum comedi_subdevice_type. (Initialized by
+ * the low-level driver.)
+ * @n_chan: Number of channels the subdevice supports. (Initialized by the
+ * low-level driver.)
+ * @subdev_flags: Various "SDF" flags indicating aspects of the subdevice to
+ * the COMEDI core and user application. (Initialized by the low-level
+ * driver.)
+ * @len_chanlist: Maximum length of a channel list if the subdevice supports
+ * asynchronous acquisition commands. (Optionally initialized by the
+ * low-level driver, or changed from 0 to 1 during post-configuration.)
+ * @private: Private data pointer which is either set by the low-level driver
+ * itself, or by a call to comedi_alloc_spriv() which allocates storage.
+ * In the latter case, the storage is automatically freed after the
+ * low-level driver's "detach" handler is called for the device.
+ * (Initialized by the low-level driver.)
+ * @async: Pointer to &struct comedi_async id the subdevice supports
+ * asynchronous acquisition commands. (Allocated and initialized during
+ * post-configuration if needed.)
+ * @lock: Pointer to a file object that performed a %COMEDI_LOCK ioctl on the
+ * subdevice. (Initially NULL.)
+ * @busy: Pointer to a file object that is performing an asynchronous
+ * acquisition command on the subdevice. (Initially NULL.)
+ * @runflags: Internal flags for use by COMEDI core, mostly indicating whether
+ * an asynchronous acquisition command is running.
+ * @spin_lock: Generic spin-lock for use by the COMEDI core and the low-level
+ * driver. (Initialized by comedi_alloc_subdevices().)
+ * @io_bits: Bit-mask indicating the channel directions for a DIO subdevice
+ * with no more than 32 channels. A '1' at a bit position indicates the
+ * corresponding channel is configured as an output. (Initialized by the
+ * low-level driver for a DIO subdevice. Forced to all-outputs during
+ * post-configuration for a digital output subdevice.)
+ * @maxdata: If non-zero, this is the maximum raw data value of each channel.
+ * If zero, the maximum data value is channel-specific. (Initialized by
+ * the low-level driver.)
+ * @maxdata_list: If the maximum data value is channel-specific, this points
+ * to an array of maximum data values indexed by channel index.
+ * (Initialized by the low-level driver.)
+ * @range_table: If non-NULL, this points to a COMEDI range table for the
+ * subdevice. If NULL, the range table is channel-specific. (Initialized
+ * by the low-level driver, will be set to an "invalid" range table during
+ * post-configuration if @range_table and @range_table_list are both
+ * NULL.)
+ * @range_table_list: If the COMEDI range table is channel-specific, this
+ * points to an array of pointers to COMEDI range tables indexed by
+ * channel number. (Initialized by the low-level driver.)
+ * @chanlist: Not used.
+ * @insn_read: Optional pointer to a handler for the %INSN_READ instruction.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_write: Optional pointer to a handler for the %INSN_WRITE instruction.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_bits: Optional pointer to a handler for the %INSN_BITS instruction
+ * for a digital input, digital output or digital input/output subdevice.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_config: Optional pointer to a handler for the %INSN_CONFIG
+ * instruction. (Initialized by the low-level driver, or set to a default
+ * handler during post-configuration.)
+ * @do_cmd: If the subdevice supports asynchronous acquisition commands, this
+ * points to a handler to set it up in hardware. (Initialized by the
+ * low-level driver.)
+ * @do_cmdtest: If the subdevice supports asynchronous acquisition commands,
+ * this points to a handler used to check and possibly tweak a prospective
+ * acquisition command without setting it up in hardware. (Initialized by
+ * the low-level driver.)
+ * @poll: If the subdevice supports asynchronous acquisition commands, this
+ * is an optional pointer to a handler for the %COMEDI_POLL ioctl which
+ * instructs the low-level driver to synchronize buffers. (Initialized by
+ * the low-level driver if needed.)
+ * @cancel: If the subdevice supports asynchronous acquisition commands, this
+ * points to a handler used to terminate a running command. (Initialized
+ * by the low-level driver.)
+ * @buf_change: If the subdevice supports asynchronous acquisition commands,
+ * this is an optional pointer to a handler that is called when the data
+ * buffer for handling asynchronous commands is allocated or reallocated.
+ * (Initialized by the low-level driver if needed.)
+ * @munge: If the subdevice supports asynchronous acquisition commands and
+ * uses DMA to transfer data from the hardware to the acquisition buffer,
+ * this points to a function used to "munge" the data values from the
+ * hardware into the format expected by COMEDI. (Initialized by the
+ * low-level driver if needed.)
+ * @async_dma_dir: If the subdevice supports asynchronous acquisition commands
+ * and uses DMA to transfer data from the hardware to the acquisition
+ * buffer, this sets the DMA direction for the buffer. (initialized to
+ * %DMA_NONE by comedi_alloc_subdevices() and changed by the low-level
+ * driver if necessary.)
+ * @state: Handy bit-mask indicating the output states for a DIO or digital
+ * output subdevice with no more than 32 channels. (Initialized by the
+ * low-level driver.)
+ * @class_dev: If the subdevice supports asynchronous acquisition commands,
+ * this points to a sysfs comediX_subdY device where X is the minor device
+ * number of the COMEDI device and Y is the subdevice number. The minor
+ * device number for the sysfs device is allocated dynamically in the
+ * range 48 to 255. This is used to allow the COMEDI device to be opened
+ * with a different default read or write subdevice. (Allocated during
+ * post-configuration if needed.)
+ * @minor: If @class_dev is set, this is its dynamically allocated minor
+ * device number. (Set during post-configuration if necessary.)
+ * @readback: Optional pointer to memory allocated by
+ * comedi_alloc_subdev_readback() used to hold the values written to
+ * analog output channels so they can be read back. The storage is
+ * automatically freed after the low-level driver's "detach" handler is
+ * called for the device. (Initialized by the low-level driver.)
+ *
+ * This is the main control structure for a COMEDI subdevice. If the subdevice
+ * supports asynchronous acquisition commands, additional information is stored
+ * in the &struct comedi_async pointed to by @async.
+ *
+ * Most of the subdevice is initialized by the low-level driver's "attach" or
+ * "auto_attach" handlers but parts of it are initialized by
+ * comedi_alloc_subdevices(), and other parts are initialized during
+ * post-configuration on return from that handler.
+ *
+ * A low-level driver that sets @insn_bits for a digital input, digital output,
+ * or DIO subdevice may leave @insn_read and @insn_write uninitialized, in
+ * which case they will be set to a default handler during post-configuration
+ * that uses @insn_bits to emulate the %INSN_READ and %INSN_WRITE instructions.
+ */
+struct comedi_subdevice {
+ struct comedi_device *device;
+ int index;
+ int type;
+ int n_chan;
+ int subdev_flags;
+ int len_chanlist; /* maximum length of channel/gain list */
+
+ void *private;
+
+ struct comedi_async *async;
+
+ void *lock;
+ void *busy;
+ unsigned int runflags;
+ spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */
+
+ unsigned int io_bits;
+
+ unsigned int maxdata; /* if maxdata==0, use list */
+ const unsigned int *maxdata_list; /* list is channel specific */
+
+ const struct comedi_lrange *range_table;
+ const struct comedi_lrange *const *range_table_list;
+
+ unsigned int *chanlist; /* driver-owned chanlist (not used) */
+
+ int (*insn_read)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_write)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_bits)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_config)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
+
+ int (*do_cmd)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*do_cmdtest)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
+ int (*poll)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*cancel)(struct comedi_device *dev, struct comedi_subdevice *s);
+
+ /* called when the buffer changes */
+ int (*buf_change)(struct comedi_device *dev,
+ struct comedi_subdevice *s);
+
+ void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index);
+ enum dma_data_direction async_dma_dir;
+
+ unsigned int state;
+
+ struct device *class_dev;
+ int minor;
+
+ unsigned int *readback;
+};
+
+/**
+ * struct comedi_buf_page - Describe a page of a COMEDI buffer
+ * @virt_addr: Kernel address of page.
+ * @dma_addr: DMA address of page if in DMA coherent memory.
+ */
+struct comedi_buf_page {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct comedi_buf_map - Describe pages in a COMEDI buffer
+ * @dma_hw_dev: Low-level hardware &struct device pointer copied from the
+ * COMEDI device's hw_dev member.
+ * @page_list: Pointer to array of &struct comedi_buf_page, one for each
+ * page in the buffer.
+ * @n_pages: Number of pages in the buffer.
+ * @dma_dir: DMA direction used to allocate pages of DMA coherent memory,
+ * or %DMA_NONE if pages allocated from regular memory.
+ * @refcount: &struct kref reference counter used to free the buffer.
+ *
+ * A COMEDI data buffer is allocated as individual pages, either in
+ * conventional memory or DMA coherent memory, depending on the attached,
+ * low-level hardware device. (The buffer pages also get mapped into the
+ * kernel's contiguous virtual address space pointed to by the 'prealloc_buf'
+ * member of &struct comedi_async.)
+ *
+ * The buffer is normally freed when the COMEDI device is detached from the
+ * low-level driver (which may happen due to device removal), but if it happens
+ * to be mmapped at the time, the pages cannot be freed until the buffer has
+ * been munmapped. That is what the reference counter is for. (The virtual
+ * address space pointed by 'prealloc_buf' is freed when the COMEDI device is
+ * detached.)
+ */
+struct comedi_buf_map {
+ struct device *dma_hw_dev;
+ struct comedi_buf_page *page_list;
+ unsigned int n_pages;
+ enum dma_data_direction dma_dir;
+ struct kref refcount;
+};
+
+/**
+ * struct comedi_async - Control data for asynchronous COMEDI commands
+ * @prealloc_buf: Kernel virtual address of allocated acquisition buffer.
+ * @prealloc_bufsz: Buffer size (in bytes).
+ * @buf_map: Map of buffer pages.
+ * @max_bufsize: Maximum allowed buffer size (in bytes).
+ * @buf_write_count: "Write completed" count (in bytes, modulo 2**32).
+ * @buf_write_alloc_count: "Allocated for writing" count (in bytes,
+ * modulo 2**32).
+ * @buf_read_count: "Read completed" count (in bytes, modulo 2**32).
+ * @buf_read_alloc_count: "Allocated for reading" count (in bytes,
+ * modulo 2**32).
+ * @buf_write_ptr: Buffer position for writer.
+ * @buf_read_ptr: Buffer position for reader.
+ * @cur_chan: Current position in chanlist for scan (for those drivers that
+ * use it).
+ * @scans_done: The number of scans completed.
+ * @scan_progress: Amount received or sent for current scan (in bytes).
+ * @munge_chan: Current position in chanlist for "munging".
+ * @munge_count: "Munge" count (in bytes, modulo 2**32).
+ * @munge_ptr: Buffer position for "munging".
+ * @events: Bit-vector of events that have occurred.
+ * @cmd: Details of comedi command in progress.
+ * @wait_head: Task wait queue for file reader or writer.
+ * @cb_mask: Bit-vector of events that should wake waiting tasks.
+ * @inttrig: Software trigger function for command, or NULL.
+ *
+ * Note about the ..._count and ..._ptr members:
+ *
+ * Think of the _Count values being integers of unlimited size, indexing
+ * into a buffer of infinite length (though only an advancing portion
+ * of the buffer of fixed length prealloc_bufsz is accessible at any
+ * time). Then:
+ *
+ * Buf_Read_Count <= Buf_Read_Alloc_Count <= Munge_Count <=
+ * Buf_Write_Count <= Buf_Write_Alloc_Count <=
+ * (Buf_Read_Count + prealloc_bufsz)
+ *
+ * (Those aren't the actual members, apart from prealloc_bufsz.) When the
+ * buffer is reset, those _Count values start at 0 and only increase in value,
+ * maintaining the above inequalities until the next time the buffer is
+ * reset. The buffer is divided into the following regions by the inequalities:
+ *
+ * [0, Buf_Read_Count):
+ * old region no longer accessible
+ *
+ * [Buf_Read_Count, Buf_Read_Alloc_Count):
+ * filled and munged region allocated for reading but not yet read
+ *
+ * [Buf_Read_Alloc_Count, Munge_Count):
+ * filled and munged region not yet allocated for reading
+ *
+ * [Munge_Count, Buf_Write_Count):
+ * filled region not yet munged
+ *
+ * [Buf_Write_Count, Buf_Write_Alloc_Count):
+ * unfilled region allocated for writing but not yet written
+ *
+ * [Buf_Write_Alloc_Count, Buf_Read_Count + prealloc_bufsz):
+ * unfilled region not yet allocated for writing
+ *
+ * [Buf_Read_Count + prealloc_bufsz, infinity):
+ * unfilled region not yet accessible
+ *
+ * Data needs to be written into the buffer before it can be read out,
+ * and may need to be converted (or "munged") between the two
+ * operations. Extra unfilled buffer space may need to allocated for
+ * writing (advancing Buf_Write_Alloc_Count) before new data is written.
+ * After writing new data, the newly filled space needs to be released
+ * (advancing Buf_Write_Count). This also results in the new data being
+ * "munged" (advancing Munge_Count). Before data is read out of the
+ * buffer, extra space may need to be allocated for reading (advancing
+ * Buf_Read_Alloc_Count). After the data has been read out, the space
+ * needs to be released (advancing Buf_Read_Count).
+ *
+ * The actual members, buf_read_count, buf_read_alloc_count,
+ * munge_count, buf_write_count, and buf_write_alloc_count take the
+ * value of the corresponding capitalized _Count values modulo 2^32
+ * (UINT_MAX+1). Subtracting a "higher" _count value from a "lower"
+ * _count value gives the same answer as subtracting a "higher" _Count
+ * value from a lower _Count value because prealloc_bufsz < UINT_MAX+1.
+ * The modulo operation is done implicitly.
+ *
+ * The buf_read_ptr, munge_ptr, and buf_write_ptr members take the value
+ * of the corresponding capitalized _Count values modulo prealloc_bufsz.
+ * These correspond to byte indices in the physical buffer. The modulo
+ * operation is done by subtracting prealloc_bufsz when the value
+ * exceeds prealloc_bufsz (assuming prealloc_bufsz plus the increment is
+ * less than or equal to UINT_MAX).
+ */
+struct comedi_async {
+ void *prealloc_buf;
+ unsigned int prealloc_bufsz;
+ struct comedi_buf_map *buf_map;
+ unsigned int max_bufsize;
+ unsigned int buf_write_count;
+ unsigned int buf_write_alloc_count;
+ unsigned int buf_read_count;
+ unsigned int buf_read_alloc_count;
+ unsigned int buf_write_ptr;
+ unsigned int buf_read_ptr;
+ unsigned int cur_chan;
+ unsigned int scans_done;
+ unsigned int scan_progress;
+ unsigned int munge_chan;
+ unsigned int munge_count;
+ unsigned int munge_ptr;
+ unsigned int events;
+ struct comedi_cmd cmd;
+ wait_queue_head_t wait_head;
+ unsigned int cb_mask;
+ int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned int x);
+};
+
+/**
+ * enum comedi_cb - &struct comedi_async callback "events"
+ * @COMEDI_CB_EOS: end-of-scan
+ * @COMEDI_CB_EOA: end-of-acquisition/output
+ * @COMEDI_CB_BLOCK: data has arrived, wakes up read() / write()
+ * @COMEDI_CB_EOBUF: DEPRECATED: end of buffer
+ * @COMEDI_CB_ERROR: card error during acquisition
+ * @COMEDI_CB_OVERFLOW: buffer overflow/underflow
+ * @COMEDI_CB_ERROR_MASK: events that indicate an error has occurred
+ * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command
+ */
+enum comedi_cb {
+ COMEDI_CB_EOS = BIT(0),
+ COMEDI_CB_EOA = BIT(1),
+ COMEDI_CB_BLOCK = BIT(2),
+ COMEDI_CB_EOBUF = BIT(3),
+ COMEDI_CB_ERROR = BIT(4),
+ COMEDI_CB_OVERFLOW = BIT(5),
+ /* masks */
+ COMEDI_CB_ERROR_MASK = (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW),
+ COMEDI_CB_CANCEL_MASK = (COMEDI_CB_EOA | COMEDI_CB_ERROR_MASK)
+};
+
+/**
+ * struct comedi_driver - COMEDI driver registration
+ * @driver_name: Name of driver.
+ * @module: Owning module.
+ * @attach: The optional "attach" handler for manually configured COMEDI
+ * devices.
+ * @detach: The "detach" handler for deconfiguring COMEDI devices.
+ * @auto_attach: The optional "auto_attach" handler for automatically
+ * configured COMEDI devices.
+ * @num_names: Optional number of "board names" supported.
+ * @board_name: Optional pointer to a pointer to a board name. The pointer
+ * to a board name is embedded in an element of a driver-defined array
+ * of static, read-only board type information.
+ * @offset: Optional size of each element of the driver-defined array of
+ * static, read-only board type information, i.e. the offset between each
+ * pointer to a board name.
+ *
+ * This is used with comedi_driver_register() and comedi_driver_unregister() to
+ * register and unregister a low-level COMEDI driver with the COMEDI core.
+ *
+ * If @num_names is non-zero, @board_name should be non-NULL, and @offset
+ * should be at least sizeof(*board_name). These are used by the handler for
+ * the %COMEDI_DEVCONFIG ioctl to match a hardware device and its driver by
+ * board name. If @num_names is zero, the %COMEDI_DEVCONFIG ioctl matches a
+ * hardware device and its driver by driver name. This is only useful if the
+ * @attach handler is set. If @num_names is non-zero, the driver's @attach
+ * handler will be called with the COMEDI device structure's board_ptr member
+ * pointing to the matched pointer to a board name within the driver's private
+ * array of static, read-only board type information.
+ *
+ * The @detach handler has two roles. If a COMEDI device was successfully
+ * configured by the @attach or @auto_attach handler, it is called when the
+ * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to
+ * unloading of the driver, or due to device removal). It is also called when
+ * the @attach or @auto_attach handler returns an error. Therefore, the
+ * @attach or @auto_attach handlers can defer clean-up on error until the
+ * @detach handler is called. If the @attach or @auto_attach handlers free
+ * any resources themselves, they must prevent the @detach handler from
+ * freeing the same resources. The @detach handler must not assume that all
+ * resources requested by the @attach or @auto_attach handler were
+ * successfully allocated.
+ */
+struct comedi_driver {
+ /* private: */
+ struct comedi_driver *next; /* Next in list of COMEDI drivers. */
+ /* public: */
+ const char *driver_name;
+ struct module *module;
+ int (*attach)(struct comedi_device *dev, struct comedi_devconfig *it);
+ void (*detach)(struct comedi_device *dev);
+ int (*auto_attach)(struct comedi_device *dev, unsigned long context);
+ unsigned int num_names;
+ const char *const *board_name;
+ int offset;
+};
+
+/**
+ * struct comedi_device - Working data for a COMEDI device
+ * @use_count: Number of open file objects.
+ * @driver: Low-level COMEDI driver attached to this COMEDI device.
+ * @pacer: Optional pointer to a dynamically allocated acquisition pacer
+ * control. It is freed automatically after the COMEDI device is
+ * detached from the low-level driver.
+ * @private: Optional pointer to private data allocated by the low-level
+ * driver. It is freed automatically after the COMEDI device is
+ * detached from the low-level driver.
+ * @class_dev: Sysfs comediX device.
+ * @minor: Minor device number of COMEDI char device (0-47).
+ * @detach_count: Counter incremented every time the COMEDI device is detached.
+ * Used for checking a previous attachment is still valid.
+ * @hw_dev: Optional pointer to the low-level hardware &struct device. It is
+ * required for automatically configured COMEDI devices and optional for
+ * COMEDI devices configured by the %COMEDI_DEVCONFIG ioctl, although
+ * the bus-specific COMEDI functions only work if it is set correctly.
+ * It is also passed to dma_alloc_coherent() for COMEDI subdevices that
+ * have their 'async_dma_dir' member set to something other than
+ * %DMA_NONE.
+ * @board_name: Pointer to a COMEDI board name or a COMEDI driver name. When
+ * the low-level driver's "attach" handler is called by the handler for
+ * the %COMEDI_DEVCONFIG ioctl, it either points to a matched board name
+ * string if the 'num_names' member of the &struct comedi_driver is
+ * non-zero, otherwise it points to the low-level driver name string.
+ * When the low-lever driver's "auto_attach" handler is called for an
+ * automatically configured COMEDI device, it points to the low-level
+ * driver name string. The low-level driver is free to change it in its
+ * "attach" or "auto_attach" handler if it wishes.
+ * @board_ptr: Optional pointer to private, read-only board type information in
+ * the low-level driver. If the 'num_names' member of the &struct
+ * comedi_driver is non-zero, the handler for the %COMEDI_DEVCONFIG ioctl
+ * will point it to a pointer to a matched board name string within the
+ * driver's private array of static, read-only board type information when
+ * calling the driver's "attach" handler. The low-level driver is free to
+ * change it.
+ * @attached: Flag indicating that the COMEDI device is attached to a low-level
+ * driver.
+ * @ioenabled: Flag used to indicate that a PCI device has been enabled and
+ * its regions requested.
+ * @spinlock: Generic spin-lock for use by the low-level driver.
+ * @mutex: Generic mutex for use by the COMEDI core module.
+ * @attach_lock: &struct rw_semaphore used to guard against the COMEDI device
+ * being detached while an operation is in progress. The down_write()
+ * operation is only allowed while @mutex is held and is used when
+ * changing @attached and @detach_count and calling the low-level driver's
+ * "detach" handler. The down_read() operation is generally used without
+ * holding @mutex.
+ * @refcount: &struct kref reference counter for freeing COMEDI device.
+ * @n_subdevices: Number of COMEDI subdevices allocated by the low-level
+ * driver for this device.
+ * @subdevices: Dynamically allocated array of COMEDI subdevices.
+ * @mmio: Optional pointer to a remapped MMIO region set by the low-level
+ * driver.
+ * @iobase: Optional base of an I/O port region requested by the low-level
+ * driver.
+ * @iolen: Length of I/O port region requested at @iobase.
+ * @irq: Optional IRQ number requested by the low-level driver.
+ * @read_subdev: Optional pointer to a default COMEDI subdevice operated on by
+ * the read() file operation. Set by the low-level driver.
+ * @write_subdev: Optional pointer to a default COMEDI subdevice operated on by
+ * the write() file operation. Set by the low-level driver.
+ * @async_queue: Storage for fasync_helper().
+ * @open: Optional pointer to a function set by the low-level driver to be
+ * called when @use_count changes from 0 to 1.
+ * @close: Optional pointer to a function set by the low-level driver to be
+ * called when @use_count changed from 1 to 0.
+ * @insn_device_config: Optional pointer to a handler for all sub-instructions
+ * except %INSN_DEVICE_CONFIG_GET_ROUTES of the %INSN_DEVICE_CONFIG
+ * instruction. If this is not initialized by the low-level driver, a
+ * default handler will be set during post-configuration.
+ * @get_valid_routes: Optional pointer to a handler for the
+ * %INSN_DEVICE_CONFIG_GET_ROUTES sub-instruction of the
+ * %INSN_DEVICE_CONFIG instruction set. If this is not initialized by the
+ * low-level driver, a default handler that copies zero routes back to the
+ * user will be used.
+ *
+ * This is the main control data structure for a COMEDI device (as far as the
+ * COMEDI core is concerned). There are two groups of COMEDI devices -
+ * "legacy" devices that are configured by the handler for the
+ * %COMEDI_DEVCONFIG ioctl, and automatically configured devices resulting
+ * from a call to comedi_auto_config() as a result of a bus driver probe in
+ * a low-level COMEDI driver. The "legacy" COMEDI devices are allocated
+ * during module initialization if the "comedi_num_legacy_minors" module
+ * parameter is non-zero and use minor device numbers from 0 to
+ * comedi_num_legacy_minors minus one. The automatically configured COMEDI
+ * devices are allocated on demand and use minor device numbers from
+ * comedi_num_legacy_minors to 47.
+ */
+struct comedi_device {
+ int use_count;
+ struct comedi_driver *driver;
+ struct comedi_8254 *pacer;
+ void *private;
+
+ struct device *class_dev;
+ int minor;
+ unsigned int detach_count;
+ struct device *hw_dev;
+
+ const char *board_name;
+ const void *board_ptr;
+ unsigned int attached:1;
+ unsigned int ioenabled:1;
+ spinlock_t spinlock; /* generic spin-lock for low-level driver */
+ struct mutex mutex; /* generic mutex for COMEDI core */
+ struct rw_semaphore attach_lock;
+ struct kref refcount;
+
+ int n_subdevices;
+ struct comedi_subdevice *subdevices;
+
+ /* dumb */
+ void __iomem *mmio;
+ unsigned long iobase;
+ unsigned long iolen;
+ unsigned int irq;
+
+ struct comedi_subdevice *read_subdev;
+ struct comedi_subdevice *write_subdev;
+
+ struct fasync_struct *async_queue;
+
+ int (*open)(struct comedi_device *dev);
+ void (*close)(struct comedi_device *dev);
+ int (*insn_device_config)(struct comedi_device *dev,
+ struct comedi_insn *insn, unsigned int *data);
+ unsigned int (*get_valid_routes)(struct comedi_device *dev,
+ unsigned int n_pairs,
+ unsigned int *pair_data);
+};
+
+/*
+ * function prototypes
+ */
+
+void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
+
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
+int comedi_dev_put(struct comedi_device *dev);
+
+bool comedi_is_subdevice_running(struct comedi_subdevice *s);
+
+void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size);
+void comedi_set_spriv_auto_free(struct comedi_subdevice *s);
+
+int comedi_check_chanlist(struct comedi_subdevice *s,
+ int n,
+ unsigned int *chanlist);
+
+/* range stuff */
+
+#define RANGE(a, b) {(a) * 1e6, (b) * 1e6, 0}
+#define RANGE_ext(a, b) {(a) * 1e6, (b) * 1e6, RF_EXTERNAL}
+#define RANGE_mA(a, b) {(a) * 1e6, (b) * 1e6, UNIT_mA}
+#define RANGE_unitless(a, b) {(a) * 1e6, (b) * 1e6, 0}
+#define BIP_RANGE(a) {-(a) * 1e6, (a) * 1e6, 0}
+#define UNI_RANGE(a) {0, (a) * 1e6, 0}
+
+extern const struct comedi_lrange range_bipolar10;
+extern const struct comedi_lrange range_bipolar5;
+extern const struct comedi_lrange range_bipolar2_5;
+extern const struct comedi_lrange range_unipolar10;
+extern const struct comedi_lrange range_unipolar5;
+extern const struct comedi_lrange range_unipolar2_5;
+extern const struct comedi_lrange range_0_20mA;
+extern const struct comedi_lrange range_4_20mA;
+extern const struct comedi_lrange range_0_32mA;
+extern const struct comedi_lrange range_unknown;
+
+#define range_digital range_unipolar5
+
+/**
+ * struct comedi_lrange - Describes a COMEDI range table
+ * @length: Number of entries in the range table.
+ * @range: Array of &struct comedi_krange, one for each range.
+ *
+ * Each element of @range[] describes the minimum and maximum physical range
+ * and the type of units. Typically, the type of unit is %UNIT_volt
+ * (i.e. volts) and the minimum and maximum are in millionths of a volt.
+ * There may also be a flag that indicates the minimum and maximum are merely
+ * scale factors for an unknown, external reference.
+ */
+struct comedi_lrange {
+ int length;
+ struct comedi_krange range[] __counted_by(length);
+};
+
+/**
+ * comedi_range_is_bipolar() - Test if subdevice range is bipolar
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is bipolar by checking whether its minimum value
+ * is negative.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min < 0;
+}
+
+/**
+ * comedi_range_is_unipolar() - Test if subdevice range is unipolar
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is unipolar by checking whether its minimum value
+ * is at least 0.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is unipolar.
+ * %false if the range is bipolar.
+ */
+static inline bool comedi_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min >= 0;
+}
+
+/**
+ * comedi_range_is_external() - Test if subdevice range is external
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is externally reference by checking whether its
+ * %RF_EXTERNAL flag is set.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is external.
+ * %false if the range is internal.
+ */
+static inline bool comedi_range_is_external(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return !!(s->range_table->range[range].flags & RF_EXTERNAL);
+}
+
+/**
+ * comedi_chan_range_is_bipolar() - Test if channel-specific range is bipolar
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is bipolar by checking whether its minimum value
+ * is negative.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_chan_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min < 0;
+}
+
+/**
+ * comedi_chan_range_is_unipolar() - Test if channel-specific range is unipolar
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is unipolar by checking whether its minimum value
+ * is at least 0.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is unipolar.
+ * %false if the range is bipolar.
+ */
+static inline bool comedi_chan_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min >= 0;
+}
+
+/**
+ * comedi_chan_range_is_external() - Test if channel-specific range is external
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is externally reference by checking whether its
+ * %RF_EXTERNAL flag is set.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_chan_range_is_external(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return !!(s->range_table_list[chan]->range[range].flags & RF_EXTERNAL);
+}
+
+/**
+ * comedi_offset_munge() - Convert between offset binary and 2's complement
+ * @s: COMEDI subdevice.
+ * @val: Value to be converted.
+ *
+ * Toggles the highest bit of a sample value to toggle between offset binary
+ * and 2's complement. Assumes that @s->maxdata is a power of 2 minus 1.
+ *
+ * Return: The converted value.
+ */
+static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s,
+ unsigned int val)
+{
+ return val ^ s->maxdata ^ (s->maxdata >> 1);
+}
+
+/**
+ * comedi_bytes_per_sample() - Determine subdevice sample size
+ * @s: COMEDI subdevice.
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the %SDF_LSAMPL subdevice flag is set or not.
+ *
+ * Return: The subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_per_sample(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? sizeof(int) : sizeof(short);
+}
+
+/**
+ * comedi_sample_shift() - Determine log2 of subdevice sample size
+ * @s: COMEDI subdevice.
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the %SDF_LSAMPL subdevice flag is set or not. The log2 of the
+ * sample size will be 2 or 1 and can be used as the right operand of a
+ * bit-shift operator to multiply or divide something by the sample size.
+ *
+ * Return: log2 of the subdevice sample size.
+ */
+static inline unsigned int comedi_sample_shift(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? 2 : 1;
+}
+
+/**
+ * comedi_bytes_to_samples() - Convert a number of bytes to a number of samples
+ * @s: COMEDI subdevice.
+ * @nbytes: Number of bytes
+ *
+ * Return: The number of bytes divided by the subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_to_samples(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ return nbytes >> comedi_sample_shift(s);
+}
+
+/**
+ * comedi_samples_to_bytes() - Convert a number of samples to a number of bytes
+ * @s: COMEDI subdevice.
+ * @nsamples: Number of samples.
+ *
+ * Return: The number of samples multiplied by the subdevice sample size.
+ * (Does not check for arithmetic overflow.)
+ */
+static inline unsigned int comedi_samples_to_bytes(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ return nsamples << comedi_sample_shift(s);
+}
+
+/**
+ * comedi_check_trigger_src() - Trivially validate a comedi_cmd trigger source
+ * @src: Pointer to the trigger source to validate.
+ * @flags: Bitmask of valid %TRIG_* for the trigger.
+ *
+ * This is used in "step 1" of the do_cmdtest functions of comedi drivers
+ * to validate the comedi_cmd triggers. The mask of the @src against the
+ * @flags allows the userspace comedilib to pass all the comedi_cmd
+ * triggers as %TRIG_ANY and get back a bitmask of the valid trigger sources.
+ *
+ * Return:
+ * 0 if trigger sources in *@src are all supported.
+ * -EINVAL if any trigger source in *@src is unsupported.
+ */
+static inline int comedi_check_trigger_src(unsigned int *src,
+ unsigned int flags)
+{
+ unsigned int orig_src = *src;
+
+ *src = orig_src & flags;
+ if (*src == TRIG_INVALID || *src != orig_src)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_is_unique() - Make sure a trigger source is unique
+ * @src: The trigger source to check.
+ *
+ * Return:
+ * 0 if no more than one trigger source is set.
+ * -EINVAL if more than one trigger source is set.
+ */
+static inline int comedi_check_trigger_is_unique(unsigned int src)
+{
+ /* this test is true if more than one _src bit is set */
+ if ((src & (src - 1)) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_is() - Trivially validate a trigger argument
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The value the argument should be.
+ *
+ * Forces *@arg to be @val.
+ *
+ * Return:
+ * 0 if *@arg was already @val.
+ * -EINVAL if *@arg differed from @val.
+ */
+static inline int comedi_check_trigger_arg_is(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg != val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_min() - Trivially validate a trigger argument min
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The minimum value the argument should be.
+ *
+ * Forces *@arg to be at least @val, setting it to @val if necessary.
+ *
+ * Return:
+ * 0 if *@arg was already at least @val.
+ * -EINVAL if *@arg was less than @val.
+ */
+static inline int comedi_check_trigger_arg_min(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg < val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_max() - Trivially validate a trigger argument max
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The maximum value the argument should be.
+ *
+ * Forces *@arg to be no more than @val, setting it to @val if necessary.
+ *
+ * Return:
+ * 0 if*@arg was already no more than @val.
+ * -EINVAL if *@arg was greater than @val.
+ */
+static inline int comedi_check_trigger_arg_max(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg > val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Must set dev->hw_dev if you wish to dma directly into comedi's buffer.
+ * Also useful for retrieving a previously configured hardware device of
+ * known bus type. Set automatically for auto-configured devices.
+ * Automatically set to NULL when detaching hardware device.
+ */
+int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev);
+
+/**
+ * comedi_buf_n_bytes_ready - Determine amount of unread data in buffer
+ * @s: COMEDI subdevice.
+ *
+ * Determines the number of bytes of unread data in the asynchronous
+ * acquisition data buffer for a subdevice. The data in question might not
+ * have been fully "munged" yet.
+ *
+ * Returns: The amount of unread data in bytes.
+ */
+static inline unsigned int comedi_buf_n_bytes_ready(struct comedi_subdevice *s)
+{
+ return s->async->buf_write_count - s->async->buf_read_count;
+}
+
+unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s, unsigned int n);
+unsigned int comedi_buf_write_free(struct comedi_subdevice *s, unsigned int n);
+
+unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s);
+unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n);
+unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n);
+
+unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data, unsigned int nsamples);
+unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples);
+
+/* drivers.c - general comedi driver functions */
+
+#define COMEDI_TIMEOUT_MS 1000
+
+int comedi_timeout(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ int (*cb)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned long context),
+ unsigned long context);
+
+unsigned int comedi_handle_events(struct comedi_device *dev,
+ struct comedi_subdevice *s);
+
+int comedi_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data,
+ unsigned int mask);
+unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
+ unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
+unsigned int comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans);
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples);
+void comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes);
+
+void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size);
+int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices);
+int comedi_alloc_subdev_readback(struct comedi_subdevice *s);
+
+int comedi_readback_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+
+int comedi_load_firmware(struct comedi_device *dev, struct device *hw_dev,
+ const char *name,
+ int (*cb)(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context),
+ unsigned long context);
+
+int __comedi_request_region(struct comedi_device *dev,
+ unsigned long start, unsigned long len);
+int comedi_request_region(struct comedi_device *dev,
+ unsigned long start, unsigned long len);
+void comedi_legacy_detach(struct comedi_device *dev);
+
+int comedi_auto_config(struct device *hardware_device,
+ struct comedi_driver *driver, unsigned long context);
+void comedi_auto_unconfig(struct device *hardware_device);
+
+int comedi_driver_register(struct comedi_driver *driver);
+void comedi_driver_unregister(struct comedi_driver *driver);
+
+/**
+ * module_comedi_driver() - Helper macro for registering a comedi driver
+ * @__comedi_driver: comedi_driver struct
+ *
+ * Helper macro for comedi drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only use
+ * this macro once, and calling it replaces module_init() and module_exit().
+ */
+#define module_comedi_driver(__comedi_driver) \
+ module_driver(__comedi_driver, comedi_driver_register, \
+ comedi_driver_unregister)
+
+#endif /* _COMEDIDEV_H */
diff --git a/include/linux/comedi/comedilib.h b/include/linux/comedi/comedilib.h
new file mode 100644
index 000000000000..0223c9cd9215
--- /dev/null
+++ b/include/linux/comedi/comedilib.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedilib.h
+ * Header file for kcomedilib
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _LINUX_COMEDILIB_H
+#define _LINUX_COMEDILIB_H
+
+struct comedi_device *comedi_open(const char *path);
+int comedi_close(struct comedi_device *dev);
+int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int *io);
+int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int io);
+int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
+ unsigned int mask, unsigned int *bits,
+ unsigned int base_channel);
+int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
+ unsigned int subd);
+int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice);
+
+#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 25a521d299c1..e94776496049 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -29,21 +29,18 @@ enum compact_result {
/* compaction didn't start as it was deferred due to past failures */
COMPACT_DEFERRED,
- /* compaction not active last round */
- COMPACT_INACTIVE = COMPACT_DEFERRED,
-
/* For more detailed tracepoint output - internal to compaction */
COMPACT_NO_SUITABLE_PAGE,
/* compaction should continue to another pageblock */
COMPACT_CONTINUE,
/*
- * The full zone was compacted scanned but wasn't successfull to compact
+ * The full zone was compacted scanned but wasn't successful to compact
* suitable pages.
*/
COMPACT_COMPLETE,
/*
- * direct compaction has scanned part of the zone but wasn't successfull
+ * direct compaction has scanned part of the zone but wasn't successful
* to compact suitable pages.
*/
COMPACT_PARTIAL_SKIPPED,
@@ -84,12 +81,6 @@ static inline unsigned long compact_gap(unsigned int order)
}
#ifdef CONFIG_COMPACTION
-extern int sysctl_compact_memory;
-extern unsigned int sysctl_compaction_proactiveness;
-extern int sysctl_compaction_handler(struct ctl_table *table, int write,
- void *buffer, size_t *length, loff_t *ppos);
-extern int sysctl_extfrag_threshold;
-extern int sysctl_compact_unevictable_allowed;
extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
extern int fragmentation_index(struct zone *zone, unsigned int order);
@@ -98,92 +89,17 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
const struct alloc_context *ac, enum compact_priority prio,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
-extern enum compact_result compaction_suitable(struct zone *zone, int order,
- unsigned int alloc_flags, int highest_zoneidx);
+extern bool compaction_suitable(struct zone *zone, int order,
+ int highest_zoneidx);
-extern void defer_compaction(struct zone *zone, int order);
-extern bool compaction_deferred(struct zone *zone, int order);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
-extern bool compaction_restarting(struct zone *zone, int order);
-
-/* Compaction has made some progress and retrying makes sense */
-static inline bool compaction_made_progress(enum compact_result result)
-{
- /*
- * Even though this might sound confusing this in fact tells us
- * that the compaction successfully isolated and migrated some
- * pageblocks.
- */
- if (result == COMPACT_SUCCESS)
- return true;
-
- return false;
-}
-
-/* Compaction has failed and it doesn't make much sense to keep retrying. */
-static inline bool compaction_failed(enum compact_result result)
-{
- /* All zones were scanned completely and still not result. */
- if (result == COMPACT_COMPLETE)
- return true;
-
- return false;
-}
-
-/* Compaction needs reclaim to be performed first, so it can continue. */
-static inline bool compaction_needs_reclaim(enum compact_result result)
-{
- /*
- * Compaction backed off due to watermark checks for order-0
- * so the regular reclaim has to try harder and reclaim something.
- */
- if (result == COMPACT_SKIPPED)
- return true;
-
- return false;
-}
-
-/*
- * Compaction has backed off for some reason after doing some work or none
- * at all. It might be throttling or lock contention. Retrying might be still
- * worthwhile, but with a higher priority if allowed.
- */
-static inline bool compaction_withdrawn(enum compact_result result)
-{
- /*
- * If compaction is deferred for high-order allocations, it is
- * because sync compaction recently failed. If this is the case
- * and the caller requested a THP allocation, we do not want
- * to heavily disrupt the system, so we fail the allocation
- * instead of entering direct reclaim.
- */
- if (result == COMPACT_DEFERRED)
- return true;
-
- /*
- * If compaction in async mode encounters contention or blocks higher
- * priority task we back off early rather than cause stalls.
- */
- if (result == COMPACT_CONTENDED)
- return true;
-
- /*
- * Page scanners have met but we haven't scanned full zones so this
- * is a back off in fact.
- */
- if (result == COMPACT_PARTIAL_SKIPPED)
- return true;
-
- return false;
-}
-
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags);
-extern int kcompactd_run(int nid);
-extern void kcompactd_stop(int nid);
+extern void __meminit kcompactd_run(int nid);
+extern void __meminit kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
#else
@@ -191,44 +107,14 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
-static inline enum compact_result compaction_suitable(struct zone *zone, int order,
- int alloc_flags, int highest_zoneidx)
-{
- return COMPACT_SKIPPED;
-}
-
-static inline void defer_compaction(struct zone *zone, int order)
-{
-}
-
-static inline bool compaction_deferred(struct zone *zone, int order)
-{
- return true;
-}
-
-static inline bool compaction_made_progress(enum compact_result result)
-{
- return false;
-}
-
-static inline bool compaction_failed(enum compact_result result)
-{
- return false;
-}
-
-static inline bool compaction_needs_reclaim(enum compact_result result)
+static inline bool compaction_suitable(struct zone *zone, int order,
+ int highest_zoneidx)
{
return false;
}
-static inline bool compaction_withdrawn(enum compact_result result)
+static inline void kcompactd_run(int nid)
{
- return true;
-}
-
-static inline int kcompactd_run(int nid)
-{
- return 0;
}
static inline void kcompactd_stop(int nid)
{
diff --git a/include/linux/compat.h b/include/linux/compat.h
index b354ce58966e..233f61ec8afc 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -20,11 +20,8 @@
#include <linux/unistd.h>
#include <asm/compat.h>
-
-#ifdef CONFIG_COMPAT
#include <asm/siginfo.h>
#include <asm/signal.h>
-#endif
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
/*
@@ -75,7 +72,6 @@
__diag_push(); \
__diag_ignore(GCC, 8, "-Wattribute-alias", \
"Type aliasing is used to sanitize syscall arguments");\
- asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_compat_sys##name)))); \
ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \
@@ -91,7 +87,10 @@
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* COMPAT_SYSCALL_DEFINEx */
-#ifdef CONFIG_COMPAT
+struct compat_iovec {
+ compat_uptr_t iov_base;
+ compat_size_t iov_len;
+};
#ifndef compat_user_stack_pointer
#define compat_user_stack_pointer() current_user_stack_pointer()
@@ -209,12 +208,11 @@ typedef struct compat_siginfo {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
compat_uptr_t _addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \
sizeof(short) : __alignof__(compat_uptr_t))
union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
/*
* used when si_code=BUS_MCEERR_AR or
* used when si_code=BUS_MCEERR_AO
@@ -231,6 +229,12 @@ typedef struct compat_siginfo {
char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD];
u32 _pkey;
} _addr_pkey;
+ /* used when si_code=TRAP_PERF */
+ struct {
+ compat_ulong_t _data;
+ u32 _type;
+ u32 _flags;
+ } _perf;
};
} _sigfault;
@@ -248,16 +252,42 @@ typedef struct compat_siginfo {
} _sifields;
} compat_siginfo_t;
-struct compat_iovec {
- compat_uptr_t iov_base;
- compat_size_t iov_len;
-};
-
struct compat_rlimit {
compat_ulong_t rlim_cur;
compat_ulong_t rlim_max;
};
+#ifdef __ARCH_NEED_COMPAT_FLOCK64_PACKED
+#define __ARCH_COMPAT_FLOCK64_PACK __attribute__((packed))
+#else
+#define __ARCH_COMPAT_FLOCK64_PACK
+#endif
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+#ifdef __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+ __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+#endif
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK_PAD
+ __ARCH_COMPAT_FLOCK_PAD
+#endif
+};
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK64_PAD
+ __ARCH_COMPAT_FLOCK64_PAD
+#endif
+} __ARCH_COMPAT_FLOCK64_PACK;
+
struct compat_rusage {
struct old_timeval32 ru_utime;
struct old_timeval32 ru_stime;
@@ -381,6 +411,7 @@ struct compat_keyctl_kdf_params {
__u32 __spare[8];
};
+struct compat_stat;
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
@@ -394,14 +425,6 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
-
-#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
-
-long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
- unsigned long bitmap_size);
-long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
- unsigned long bitmap_size);
void copy_siginfo_to_external32(struct compat_siginfo *to,
const struct kernel_siginfo *from);
int copy_siginfo_from_user32(kernel_siginfo_t *to,
@@ -425,7 +448,7 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
unsigned int size)
{
/* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
-#ifdef __BIG_ENDIAN
+#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT)
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
@@ -442,6 +465,73 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
#endif
}
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define unsafe_put_compat_sigset(compat, set, label) do { \
+ compat_sigset_t __user *__c = compat; \
+ const sigset_t *__s = set; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ unsafe_put_user(__s->sig[3] >> 32, &__c->sig[7], label); \
+ unsafe_put_user(__s->sig[3], &__c->sig[6], label); \
+ fallthrough; \
+ case 3: \
+ unsafe_put_user(__s->sig[2] >> 32, &__c->sig[5], label); \
+ unsafe_put_user(__s->sig[2], &__c->sig[4], label); \
+ fallthrough; \
+ case 2: \
+ unsafe_put_user(__s->sig[1] >> 32, &__c->sig[3], label); \
+ unsafe_put_user(__s->sig[1], &__c->sig[2], label); \
+ fallthrough; \
+ case 1: \
+ unsafe_put_user(__s->sig[0] >> 32, &__c->sig[1], label); \
+ unsafe_put_user(__s->sig[0], &__c->sig[0], label); \
+ } \
+} while (0)
+
+#define unsafe_get_compat_sigset(set, compat, label) do { \
+ const compat_sigset_t __user *__c = compat; \
+ compat_sigset_word hi, lo; \
+ sigset_t *__s = set; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ unsafe_get_user(lo, &__c->sig[7], label); \
+ unsafe_get_user(hi, &__c->sig[6], label); \
+ __s->sig[3] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 3: \
+ unsafe_get_user(lo, &__c->sig[5], label); \
+ unsafe_get_user(hi, &__c->sig[4], label); \
+ __s->sig[2] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 2: \
+ unsafe_get_user(lo, &__c->sig[3], label); \
+ unsafe_get_user(hi, &__c->sig[2], label); \
+ __s->sig[1] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 1: \
+ unsafe_get_user(lo, &__c->sig[1], label); \
+ unsafe_get_user(hi, &__c->sig[0], label); \
+ __s->sig[0] = hi | (((long)lo) << 32); \
+ } \
+} while (0)
+#else
+#define unsafe_put_compat_sigset(compat, set, label) do { \
+ compat_sigset_t __user *__c = compat; \
+ const sigset_t *__s = set; \
+ \
+ unsafe_copy_to_user(__c, __s, sizeof(*__c), label); \
+} while (0)
+
+#define unsafe_get_compat_sigset(set, compat, label) do { \
+ const compat_sigset_t __user *__c = compat; \
+ sigset_t *__s = set; \
+ \
+ unsafe_copy_from_user(__s, __c, sizeof(*__c), label); \
+} while (0)
+#endif
+
extern int compat_ptrace_request(struct task_struct *child,
compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
@@ -451,14 +541,6 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
struct epoll_event; /* fortunately, this one is fixed-layout */
-extern ssize_t compat_rw_copy_check_uvector(int type,
- const struct compat_iovec __user *uvector,
- unsigned long nr_segs,
- unsigned long fast_segs, struct iovec *fast_pointer,
- struct iovec **ret_pointer);
-
-extern void __user *compat_alloc_user_space(unsigned long len);
-
int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
#define unsafe_compat_save_altstack(uss, sp, label) do { \
@@ -468,8 +550,6 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
&__uss->ss_sp, label); \
unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
} while (0);
/*
@@ -501,34 +581,23 @@ asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
struct io_event __user *events,
struct __kernel_timespec __user *timeout,
const struct __compat_aio_sigset __user *usig);
-
-/* fs/cookies.c */
-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
-
-/* fs/eventpoll.c */
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct epoll_event __user *events,
int maxevents, int timeout,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-
-/* fs/fcntl.c */
+asmlinkage long compat_sys_epoll_pwait2(int epfd,
+ struct epoll_event __user *events,
+ int maxevents,
+ const struct __kernel_timespec __user *timeout,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
-
-/* fs/ioctl.c */
asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
-
-/* fs/namespace.c */
-asmlinkage long compat_sys_mount(const char __user *dev_name,
- const char __user *dir_name,
- const char __user *type, compat_ulong_t flags,
- const void __user *data);
-
-/* fs/open.c */
asmlinkage long compat_sys_statfs(const char __user *pathname,
struct compat_statfs __user *buf);
asmlinkage long compat_sys_statfs64(const char __user *pathname,
@@ -543,44 +612,32 @@ asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
/* No generic prototype for truncate64, ftruncate64, fallocate */
asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
int flags, umode_t mode);
-
-/* fs/readdir.c */
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-
-/* fs/read_write.c */
asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
-asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
-asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
/* No generic prototype for pread64 and pwrite64 */
asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
asmlinkage long compat_sys_preadv64(unsigned long fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
unsigned long vlen, loff_t pos);
#endif
#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
asmlinkage long compat_sys_pwritev64(unsigned long fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
unsigned long vlen, loff_t pos);
#endif
-
-/* fs/sendfile.c */
asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
compat_off_t __user *offset, compat_size_t count);
asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
compat_loff_t __user *offset, compat_size_t count);
-
-/* fs/select.c */
asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
@@ -601,72 +658,45 @@ asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds,
struct __kernel_timespec __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-
-/* fs/signalfd.c */
asmlinkage long compat_sys_signalfd4(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, int flags);
-
-/* fs/splice.c */
-asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
- unsigned int nr_segs, unsigned int flags);
-
-/* fs/stat.c */
asmlinkage long compat_sys_newfstatat(unsigned int dfd,
const char __user *filename,
struct compat_stat __user *statbuf,
int flag);
asmlinkage long compat_sys_newfstat(unsigned int fd,
struct compat_stat __user *statbuf);
-
-/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */
-
-/* kernel/exit.c */
+/* No generic prototype for sync_file_range and sync_file_range2 */
asmlinkage long compat_sys_waitid(int, compat_pid_t,
struct compat_siginfo __user *, int,
struct compat_rusage __user *);
-
-
-
-/* kernel/futex.c */
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
compat_size_t len);
asmlinkage long
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
-
-/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
struct old_itimerval32 __user *it);
asmlinkage long compat_sys_setitimer(int which,
struct old_itimerval32 __user *in,
struct old_itimerval32 __user *out);
-
-/* kernel/kexec.c */
asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
compat_ulong_t nr_segments,
struct compat_kexec_segment __user *,
compat_ulong_t flags);
-
-/* kernel/posix-timers.c */
asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
-
-/* kernel/ptrace.c */
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
compat_long_t addr, compat_long_t data);
-
-/* kernel/sched/core.c */
asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
-
-/* kernel/signal.c */
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
compat_stack_t __user *uoss_ptr);
asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
@@ -691,25 +721,17 @@ asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
-
-/* kernel/sys.c */
asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
asmlinkage long compat_sys_getrlimit(unsigned int resource,
struct compat_rlimit __user *rlim);
asmlinkage long compat_sys_setrlimit(unsigned int resource,
struct compat_rlimit __user *rlim);
asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
-
-/* kernel/time.c */
asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
-
-/* kernel/timer.c */
asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
-
-/* ipc/mqueue.c */
asmlinkage long compat_sys_mq_open(const char __user *u_name,
int oflag, compat_mode_t mode,
struct compat_mq_attr __user *u_attr);
@@ -718,22 +740,14 @@ asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
const struct compat_mq_attr __user *u_mqstat,
struct compat_mq_attr __user *u_omqstat);
-
-/* ipc/msg.c */
asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr);
asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);
-
-/* ipc/sem.c */
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
-
-/* ipc/shm.c */
asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
-
-/* net/socket.c */
asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
unsigned flags, struct sockaddr __user *addr,
int __user *addrlen);
@@ -741,40 +755,13 @@ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
unsigned flags);
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
unsigned int flags);
-
-/* mm/filemap.c: No generic prototype for readahead */
-
-/* security/keys/keyctl.c */
+/* No generic prototype for readahead */
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
-
-/* arch/example/kernel/sys_example.c */
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp);
-
-/* mm/fadvise.c: No generic prototype for fadvise64_64 */
-
-/* mm/, CONFIG_MMU only */
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
- compat_ulong_t mode,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode, compat_ulong_t flags);
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode,
- compat_ulong_t addr,
- compat_ulong_t flags);
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode);
-asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
- compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
- const compat_ulong_t __user *new_nodes);
-asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
- __u32 __user *pages,
- const int __user *nodes,
- int __user *status,
- int flags);
-
+/* No generic prototype for fadvise64_64 */
+/* CONFIG_MMU only */
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
@@ -794,32 +781,24 @@ asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
int flags);
asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags);
-asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
- compat_ulong_t riovcnt, compat_ulong_t flags);
-asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
- compat_ulong_t riovcnt, compat_ulong_t flags);
asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp, int flags);
asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
-asmlinkage long compat_sys_readv64v2(unsigned long fd,
- const struct compat_iovec __user *vec,
+asmlinkage long compat_sys_preadv64v2(unsigned long fd,
+ const struct iovec __user *vec,
unsigned long vlen, loff_t pos, rwf_t flags);
#endif
#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
- const struct compat_iovec __user *vec,
+ const struct iovec __user *vec,
unsigned long vlen, loff_t pos, rwf_t flags);
#endif
@@ -852,18 +831,18 @@ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
-/* obsolete: fs/readdir.c */
+/* obsolete */
asmlinkage long compat_sys_old_readdir(unsigned int fd,
struct compat_old_linux_dirent __user *,
unsigned int count);
-/* obsolete: fs/select.c */
+/* obsolete */
asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
-/* obsolete: ipc */
+/* obsolete */
asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
-/* obsolete: kernel/signal.c */
+/* obsolete */
#ifdef __ARCH_WANT_SYS_SIGPENDING
asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
#endif
@@ -878,22 +857,48 @@ asmlinkage long compat_sys_sigaction(int sig,
struct compat_old_sigaction __user *oact);
#endif
-/* obsolete: net/socket.c */
+/* obsolete */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
-#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+#ifdef __ARCH_WANT_COMPAT_TRUNCATE64
+asmlinkage long compat_sys_truncate64(const char __user *pathname, compat_arg_u64(len));
+#endif
+#ifdef __ARCH_WANT_COMPAT_FTRUNCATE64
+asmlinkage long compat_sys_ftruncate64(unsigned int fd, compat_arg_u64(len));
+#endif
-/*
- * For most but not all architectures, "am I in a compat syscall?" and
- * "am I a compat task?" are the same question. For architectures on which
- * they aren't the same question, arch code can override in_compat_syscall.
- */
+#ifdef __ARCH_WANT_COMPAT_FALLOCATE
+asmlinkage long compat_sys_fallocate(int fd, int mode, compat_arg_u64(offset),
+ compat_arg_u64(len));
+#endif
-#ifndef in_compat_syscall
-static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#ifdef __ARCH_WANT_COMPAT_PREAD64
+asmlinkage long compat_sys_pread64(unsigned int fd, char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_PWRITE64
+asmlinkage long compat_sys_pwrite64(unsigned int fd, const char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+asmlinkage long compat_sys_sync_file_range(int fd, compat_arg_u64(pos),
+ compat_arg_u64(nbytes), unsigned int flags);
#endif
+#ifdef __ARCH_WANT_COMPAT_FADVISE64_64
+asmlinkage long compat_sys_fadvise64_64(int fd, compat_arg_u64(pos),
+ compat_arg_u64(len), int advice);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_READAHEAD
+asmlinkage long compat_sys_readahead(int fd, compat_arg_u64(offset), size_t count);
+#endif
+
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
/**
* ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
@@ -923,6 +928,17 @@ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user * buf);
+#ifdef CONFIG_COMPAT
+
+/*
+ * For most but not all architectures, "am I in a compat syscall?" and
+ * "am I a compat task?" are the same question. For architectures on which
+ * they aren't the same question, arch code can override in_compat_syscall.
+ */
+#ifndef in_compat_syscall
+static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#endif
+
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
@@ -932,6 +948,24 @@ static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
+
+#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
+
+long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
+ unsigned long bitmap_size);
+long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
+ unsigned long bitmap_size);
+
+/*
+ * Some legacy ABIs like the i386 one use less than natural alignment for 64-bit
+ * types, and will need special compat treatment for that. Most architectures
+ * don't need that special handling even for compat syscalls.
+ */
+#ifndef compat_need_64bit_alignment_fixup
+#define compat_need_64bit_alignment_fixup() false
+#endif
+
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index cee0c728d39a..49feac0162a5 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -5,14 +5,24 @@
/* Compiler specific definitions for Clang compiler */
-/* same as gcc, this was present in clang-2.6 so we can assume it works
- * with any version that can compile the kernel
+/*
+ * Clang prior to 17 is being silly and considers many __cleanup() variables
+ * as unused (because they are, their sole purpose is to go out of scope).
+ *
+ * https://github.com/llvm/llvm-project/commit/877210faa447f4cc7db87812f8ed80e398fedd61
*/
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+#undef __cleanup
+#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
/* all clang versions usable with the kernel support KASAN ABI version 5 */
#define KASAN_ABI_VERSION 5
+/*
+ * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+ * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
+ * to avoid adding redundant attributes in other configurations.
+ */
+
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
@@ -31,6 +41,12 @@
#define __no_sanitize_thread
#endif
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
#if __has_feature(undefined_behavior_sanitizer)
/* GCC does not have __SANITIZE_UNDEFINED__ */
#define __no_sanitize_undefined \
@@ -39,25 +55,66 @@
#define __no_sanitize_undefined
#endif
+#if __has_feature(memory_sanitizer)
+#define __SANITIZE_MEMORY__
/*
- * Not all versions of clang implement the type-generic versions
- * of the builtin overflow checkers. Fortunately, clang implements
- * __has_builtin allowing us to avoid awkward version
- * checks. Unfortunately, we don't know which version of gcc clang
- * pretends to be, so the macro may or may not be defined.
+ * Unlike other sanitizers, KMSAN still inserts code into functions marked with
+ * no_sanitize("kernel-memory"). Using disable_sanitizer_instrumentation
+ * provides the behavior consistent with other __no_sanitize_ attributes,
+ * guaranteeing that __no_sanitize_memory functions remain uninstrumented.
*/
-#if __has_builtin(__builtin_mul_overflow) && \
- __has_builtin(__builtin_add_overflow) && \
- __has_builtin(__builtin_sub_overflow)
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+#define __no_sanitize_memory __disable_sanitizer_instrumentation
+
+/*
+ * The __no_kmsan_checks attribute ensures that a function does not produce
+ * false positive reports by:
+ * - initializing all local variables and memory stores in this function;
+ * - skipping all shadow checks;
+ * - passing initialized arguments to this function's callees.
+ */
+#define __no_kmsan_checks __attribute__((no_sanitize("kernel-memory")))
+#else
+#define __no_sanitize_memory
+#define __no_kmsan_checks
#endif
-/* The following are for compatibility with GCC, from compiler-gcc.h,
- * and may be redefined here because they should not be shared with other
- * compilers, like ICC.
+/*
+ * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together
+ * with no_sanitize("coverage"). Prior versions of Clang support coverage
+ * instrumentation, but cannot be queried for support by the preprocessor.
*/
-#define barrier() __asm__ __volatile__("" : : : "memory")
+#if __has_feature(coverage_sanitizer)
+#define __no_sanitize_coverage __attribute__((no_sanitize("coverage")))
+#else
+#define __no_sanitize_coverage
+#endif
#if __has_feature(shadow_call_stack)
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
+
+#if __has_feature(kcfi)
+/* Disable CFI checking inside a function. */
+#define __nocfi __attribute__((__no_sanitize__("kcfi")))
+#endif
+
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_clang(version, severity, s) \
+ __diag_clang_ ## version(__diag_clang_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_clang_ignore ignored
+#define __diag_clang_warn warning
+#define __diag_clang_error error
+
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(clang diagnostic s))
+
+#define __diag_clang_13(s) __diag(s)
+
+#define __diag_ignore_all(option, comment) \
+ __diag_clang(13, ignore, option)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 7a3769040d7d..aff92b1d284f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -10,30 +10,6 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
-#if GCC_VERSION < 40900
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
-
-/* Optimization barrier */
-
-/* The "volatile" is due to gcc bugs */
-#define barrier() __asm__ __volatile__("": : :"memory")
-/*
- * This version is i.e. to prevent dead stores elimination on @ptr
- * where gcc and llvm may behave differently when otherwise using
- * normal barrier(): while gcc behavior gets along with a normal
- * barrier(), llvm needs an explicit input variable to be assumed
- * clobbered. The issue is as follows: while the inline asm might
- * access any memory it wants, the compiler could have fit all of
- * @ptr into memory registers instead, and since @ptr never escaped
- * from that, it proved that the inline asm wasn't touching any of
- * it. This version works well with both compilers, i.e. we're telling
- * the compiler that the inline asm absolutely may see the contents
- * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
- */
-#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
-
/*
* This macro obfuscates arithmetic on a variable address so that gcc
* shouldn't recognize the original var, and make assumptions about it.
@@ -59,17 +35,10 @@
(typeof(ptr)) (__ptr + (off)); \
})
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_MITIGATION_RETPOLINE
#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-
-#define __compiletime_warning(message) __attribute__((__warning__(message)))
-#define __compiletime_error(message) __attribute__((__error__(message)))
-
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
@@ -95,66 +64,76 @@
__builtin_unreachable(); \
} while (0)
-#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
-#define __randomize_layout __attribute__((randomize_layout))
-#define __no_randomize_layout __attribute__((no_randomize_layout))
-/* This anon struct can add padding, so only enable it under randstruct. */
-#define randomized_struct_fields_start struct {
-#define randomized_struct_fields_end } __randomize_layout;
-#endif
-
/*
- * GCC 'asm goto' miscompiles certain code sequences:
+ * GCC 'asm goto' with outputs miscompiles certain code sequences:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921
*
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ * Work around it via the same compiler barrier quirk that we used
+ * to use for the old 'asm goto' workaround.
*
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ * Also, always mark such 'asm goto' statements as volatile: all
+ * asm goto statements are supposed to be volatile as per the
+ * documentation, but some versions of gcc didn't actually do
+ * that for asms with outputs:
*
- * (asm goto is automatically volatile - the naming reflects this.)
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619
*/
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+#ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND
+#define asm_goto_output(x...) \
+ do { asm volatile goto(x); asm (""); } while (0)
+#endif
-/*
- * sparse (__CHECKER__) pretends to be gcc, but can't do constant
- * folding in __builtin_bswap*() (yet), so don't set these for it.
- */
-#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#define __HAVE_BUILTIN_BSWAP16__
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
#if GCC_VERSION >= 70000
#define KASAN_ABI_VERSION 5
-#elif GCC_VERSION >= 50000
+#else
#define KASAN_ABI_VERSION 4
-#elif GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
#endif
-#if __has_attribute(__no_sanitize_address__)
-#define __no_sanitize_address __attribute__((no_sanitize_address))
-#else
-#define __no_sanitize_address
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
-#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
-#define __no_sanitize_thread __attribute__((no_sanitize_thread))
+#define __no_sanitize_address __attribute__((__no_sanitize_address__))
+
+#if defined(__SANITIZE_THREAD__)
+#define __no_sanitize_thread __attribute__((__no_sanitize_thread__))
#else
#define __no_sanitize_thread
#endif
-#if __has_attribute(__no_sanitize_undefined__)
-#define __no_sanitize_undefined __attribute__((no_sanitize_undefined))
+#define __no_sanitize_undefined __attribute__((__no_sanitize_undefined__))
+
+/*
+ * Only supported since gcc >= 12
+ */
+#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__)
+#define __no_sanitize_coverage __attribute__((__no_sanitize_coverage__))
#else
-#define __no_sanitize_undefined
+#define __no_sanitize_coverage
#endif
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+/*
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel,
+ * matching the defines used by Clang.
+ */
+#ifdef __SANITIZE_HWADDRESS__
+#define __SANITIZE_ADDRESS__
#endif
/*
+ * GCC does not support KMSAN.
+ */
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+
+/*
* Turn individual warnings and errors on and off locally, depending
* on version.
*/
@@ -176,4 +155,13 @@
#define __diag_GCC_8(s)
#endif
-#define __no_fgcse __attribute__((optimize("-fno-gcse")))
+#define __diag_ignore_all(option, comment) \
+ __diag(__diag_GCC_ignore option)
+
+/*
+ * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
+ * attribute) do not work, and must be disabled.
+ */
+#if GCC_VERSION < 90100
+#undef __alloc_size__
+#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
deleted file mode 100644
index b17f3cd18334..000000000000
--- a/include/linux/compiler-intel.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_COMPILER_TYPES_H
-#error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#ifdef __ECC
-
-/* Compiler specific definitions for Intel ECC compiler */
-
-#include <asm/intrinsics.h>
-
-/* Intel ECC compiler doesn't support gcc specific asm stmts.
- * It uses intrinsics to do the equivalent things.
- */
-
-#define barrier() __memory_barrier()
-#define barrier_data(ptr) barrier()
-
-#define RELOC_HIDE(ptr, off) \
- ({ unsigned long __ptr; \
- __ptr = (unsigned long) (ptr); \
- (typeof(ptr)) (__ptr + (off)); })
-
-/* This should act as an optimization barrier on var.
- * Given that this compiler does not have inline assembly, a compiler barrier
- * is the best we can do.
- */
-#define OPTIMIZER_HIDE_VAR(var) barrier()
-
-#endif
-
-/* icc has this, but it's called _bswap16 */
-#define __HAVE_BUILTIN_BSWAP16__
-#define __builtin_bswap16 _bswap16
diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h
new file mode 100644
index 000000000000..573fa85b6c0c
--- /dev/null
+++ b/include/linux/compiler-version.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifdef __LINUX_COMPILER_VERSION_H
+#error "Please do not include <linux/compiler-version.h>. This is done by the build system."
+#endif
+#define __LINUX_COMPILER_VERSION_H
+
+/*
+ * This header exists to force full rebuild when the compiler is upgraded.
+ *
+ * When fixdep scans this, it will find this string "CONFIG_CC_VERSION_TEXT"
+ * and add dependency on include/config/CC_VERSION_TEXT, which is touched
+ * by Kconfig when the version string from the compiler changes.
+ */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 6810d80acb0b..52730e423681 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -12,11 +12,10 @@
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
-#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
- && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
int expect, int is_constant);
-
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
@@ -24,7 +23,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
long ______r; \
static struct ftrace_likely_data \
__aligned(4) \
- __section(_ftrace_annotated_branch) \
+ __section("_ftrace_annotated_branch") \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
@@ -60,7 +59,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __trace_if_value(cond) ({ \
static struct ftrace_branch_data \
__aligned(4) \
- __section(_ftrace_branch) \
+ __section("_ftrace_branch") \
__if_trace = { \
.func = __func__, \
.file = __FILE__, \
@@ -76,15 +75,31 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
+# define likely_notrace(x) likely(x)
+# define unlikely_notrace(x) unlikely(x)
#endif
/* Optimization barrier */
#ifndef barrier
-# define barrier() __memory_barrier()
+/* The "volatile" is due to gcc bugs */
+# define barrier() __asm__ __volatile__("": : :"memory")
#endif
#ifndef barrier_data
-# define barrier_data(ptr) barrier()
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proved that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
#endif
/* workaround for GCC PR82365 if needed */
@@ -93,42 +108,30 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
/* Unreachable code */
-#ifdef CONFIG_STACK_VALIDATION
+#ifdef CONFIG_OBJTOOL
/*
* These macros help objtool understand GCC code flow for unreachable code.
* The __COUNTER__ based labels are a hack to make each instance of the macros
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
-#define annotate_reachable() ({ \
- asm volatile("%c0:\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
-})
-#define annotate_unreachable() ({ \
- asm volatile("%c0:\n\t" \
+#define __stringify_label(n) #n
+
+#define __annotate_unreachable(c) ({ \
+ asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify_label(c) "b - .\n\t" \
+ ".popsection\n\t" : : "i" (c)); \
})
-#define ASM_UNREACHABLE \
- "999:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long 999b - .\n\t" \
- ".popsection\n\t"
+#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(.rodata..c_jump_table)
+#define __annotate_jump_table __section(".rodata..c_jump_table")
-#else
-#define annotate_reachable()
+#else /* !CONFIG_OBJTOOL */
#define annotate_unreachable()
#define __annotate_jump_table
-#endif
+#endif /* CONFIG_OBJTOOL */
-#ifndef ASM_UNREACHABLE
-# define ASM_UNREACHABLE
-#endif
#ifndef unreachable
# define unreachable() do { \
annotate_unreachable(); \
@@ -155,7 +158,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
extern typeof(sym) sym; \
static const unsigned long __kentry_##sym \
__used \
- __section("___kentry" "+" #sym ) \
+ __attribute__((__section__("___kentry+" #sym))) \
= (unsigned long)&sym;
#endif
@@ -166,16 +169,15 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
(typeof(ptr)) (__ptr + (off)); })
#endif
+#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
+
#ifndef OPTIMIZER_HIDE_VAR
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) \
__asm__ ("" : "=r" (var) : "0" (var))
#endif
-/* Not-quite-unique ID. */
-#ifndef __UNIQUE_ID
-# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
-#endif
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
/**
* data_race - mark an expression as containing intentional data races
@@ -205,9 +207,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* otherwise, or eliminated entirely due to lack of references that are
* visible to the compiler.
*/
+#define ___ADDRESSABLE(sym, __attrs) \
+ static void * __used __attrs \
+ __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym;
#define __ADDRESSABLE(sym) \
- static void * __section(.discard.addressable) __used \
- __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
+ ___ADDRESSABLE(sym, __section(".discard.addressable"))
/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer
@@ -224,6 +228,60 @@ static inline void *offset_to_ptr(const int *off)
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ *
+ * Details:
+ * - sizeof() return an integer constant expression, and does not evaluate
+ * the value of its operand; it only examines the type of its operand.
+ * - The results of comparing two integer constant expressions is also
+ * an integer constant expression.
+ * - The first literal "8" isn't important. It could be any literal value.
+ * - The second literal "8" is to avoid warnings about unaligned pointers;
+ * this could otherwise just be "1".
+ * - (long)(x) is used to avoid warnings about 64-bit types on 32-bit
+ * architectures.
+ * - The C Standard defines "null pointer constant", "(void *)0", as
+ * distinct from other void pointers.
+ * - If (x) is an integer constant expression, then the "* 0l" resolves
+ * it into an integer constant expression of value 0. Since it is cast to
+ * "void *", this makes the second operand a null pointer constant.
+ * - If (x) is not an integer constant expression, then the second operand
+ * resolves to a void pointer (but not a null pointer constant: the value
+ * is not an integer constant 0).
+ * - The conditional operator's third operand, "(int *)8", is an object
+ * pointer (to type "int").
+ * - The behavior (including the return type) of the conditional operator
+ * ("operand1 ? operand2 : operand3") depends on the kind of expressions
+ * given for the second and third operands. This is the central mechanism
+ * of the macro:
+ * - When one operand is a null pointer constant (i.e. when x is an integer
+ * constant expression) and the other is an object pointer (i.e. our
+ * third operand), the conditional operator returns the type of the
+ * object pointer operand (i.e. "int *). Here, within the sizeof(), we
+ * would then get:
+ * sizeof(*((int *)(...)) == sizeof(int) == 4
+ * - When one operand is a void pointer (i.e. when x is not an integer
+ * constant expression) and the other is an object pointer (i.e. our
+ * third operand), the conditional operator returns a "void *" type.
+ * Here, within the sizeof(), we would then get:
+ * sizeof(*((void *)(...)) == sizeof(void) == 1
+ * - The equality comparison to "sizeof(int)" therefore depends on (x):
+ * sizeof(int) == sizeof(int) (x) was a constant expression
+ * sizeof(int) != sizeof(void) (x) was not a constant expression
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
+/*
+ * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
+ * bool and also pointer types.
+ */
+#define is_signed_type(type) (((type)(-1)) < (__force type)1)
+#define is_unsigned_type(type) (!is_signed_type(type))
+
+/*
* This is needed in functions which generate the stack canary, see
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
*/
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 6122efdad6ad..8bdf6e0918c1 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -21,31 +21,6 @@
*/
/*
- * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
- * by hand.
- *
- * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
- * depending on the compiler used to build it; however, these attributes have
- * no semantic effects for sparse, so it does not matter. Also note that,
- * in order to avoid sparse's warnings, even the unsupported ones must be
- * defined to 0.
- */
-#ifndef __has_attribute
-# define __has_attribute(x) __GCC4_has_attribute_##x
-# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
-# define __GCC4_has_attribute___copy__ 0
-# define __GCC4_has_attribute___designated_init__ 0
-# define __GCC4_has_attribute___externally_visible__ 1
-# define __GCC4_has_attribute___no_caller_saved_registers__ 0
-# define __GCC4_has_attribute___noclone__ 1
-# define __GCC4_has_attribute___nonstring__ 0
-# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
-# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9)
-# define __GCC4_has_attribute___fallthrough__ 0
-#endif
-
-/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
*/
#define __alias(symbol) __attribute__((__alias__(#symbol)))
@@ -59,6 +34,16 @@
#define __aligned_largest __attribute__((__aligned__))
/*
+ * Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
+ * available and includes other attributes. For GCC < 9.1, __alloc_size__ gets undefined
+ * in compiler-gcc.h, due to misbehaviors.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
+ */
+#define __alloc_size__(x, ...) __attribute__((__alloc_size__(x, ## __VA_ARGS__)))
+
+/*
* Note: users of __always_inline currently do not write "inline" themselves,
* which seems to be required by gcc to apply the attribute according
* to its docs (and also "warning: always_inline function might not be
@@ -79,23 +64,16 @@
* compiler should see some alignment anyway, when the return value is
* massaged by 'flags = ptr & 3; ptr &= ~3;').
*
- * Optional: only supported since gcc >= 4.9
- * Optional: not supported by icc
- *
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned
*/
-#if __has_attribute(__assume_aligned__)
-# define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
-#else
-# define __assume_aligned(a, ...)
-#endif
+#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
/*
- * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
- * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
*/
-#define __cold __attribute__((__cold__))
+#define __cleanup(func) __attribute__((__cleanup__(func)))
/*
* Note the long name.
@@ -107,7 +85,6 @@
/*
* Optional: only supported since gcc >= 9
* Optional: not supported by clang
- * Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
*/
@@ -118,6 +95,31 @@
#endif
/*
+ * Optional: only supported since gcc >= 15
+ * Optional: only supported since clang >= 18
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
+ * clang: https://github.com/llvm/llvm-project/pull/76348
+ */
+#if __has_attribute(__counted_by__)
+# define __counted_by(member) __attribute__((__counted_by__(member)))
+#else
+# define __counted_by(member)
+#endif
+
+/*
+ * Optional: not supported by gcc
+ * Optional: only supported since clang >= 14.0
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#diagnose_as_builtin
+ */
+#if __has_attribute(__diagnose_as_builtin__)
+# define __diagnose_as(builtin...) __attribute__((__diagnose_as_builtin__(builtin)))
+#else
+# define __diagnose_as(builtin...)
+#endif
+
+/*
* Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
* attribute warnings entirely and for good") for more information.
*
@@ -130,9 +132,7 @@
#define __deprecated
/*
- * Optional: only supported since gcc >= 5.1
* Optional: not supported by clang
- * Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute
*/
@@ -143,6 +143,17 @@
#endif
/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute
+ */
+#if __has_attribute(__error__)
+# define __compiletime_error(msg) __attribute__((__error__(msg)))
+#else
+# define __compiletime_error(msg)
+#endif
+
+/*
* Optional: not supported by clang
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
@@ -168,6 +179,7 @@
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#malloc
*/
#define __malloc __attribute__((__malloc__))
@@ -205,6 +217,7 @@
* must end with any of these keywords:
* break;
* fallthrough;
+ * continue;
* goto <label>;
* return [expression];
*
@@ -217,6 +230,12 @@
#endif
/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#flatten
+ */
+# define __flatten __attribute__((flatten))
+
+/*
* Note the missing underscores.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
@@ -227,7 +246,6 @@
/*
* Optional: only supported since gcc >= 8
* Optional: not supported by clang
- * Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute
*/
@@ -238,6 +256,18 @@
#endif
/*
+ * Optional: only supported since GCC >= 7.1, clang >= 13.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fprofile_005finstrument_005ffunction-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-profile-instrument-function
+ */
+#if __has_attribute(__no_profile_instrument_function__)
+# define __no_profile __attribute__((__no_profile_instrument_function__))
+#else
+# define __no_profile
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
* clang: https://clang.llvm.org/docs/AttributeReference.html#id1
@@ -245,12 +275,53 @@
#define __noreturn __attribute__((__noreturn__))
/*
+ * Optional: only supported since GCC >= 11.1, clang >= 7.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fstack_005fprotector-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-stack-protector-safebuffers
+ */
+#if __has_attribute(__no_stack_protector__)
+# define __no_stack_protector __attribute__((__no_stack_protector__))
+#else
+# define __no_stack_protector
+#endif
+
+/*
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#overloadable
+ */
+#if __has_attribute(__overloadable__)
+# define __overloadable __attribute__((__overloadable__))
+#else
+# define __overloadable
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
* clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
*/
#define __packed __attribute__((__packed__))
/*
+ * Note: the "type" argument should match any __builtin_object_size(p, type) usage.
+ *
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#pass-object-size-pass-dynamic-object-size
+ */
+#if __has_attribute(__pass_dynamic_object_size__)
+# define __pass_dynamic_object_size(type) __attribute__((__pass_dynamic_object_size__(type)))
+#else
+# define __pass_dynamic_object_size(type)
+#endif
+#if __has_attribute(__pass_object_size__)
+# define __pass_object_size(type) __attribute__((__pass_object_size__(type)))
+#else
+# define __pass_object_size(type)
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
*/
#define __pure __attribute__((__pure__))
@@ -260,7 +331,19 @@
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate
*/
-#define __section(S) __attribute__((__section__(#S)))
+#define __section(section) __attribute__((__section__(section)))
+
+/*
+ * Optional: only supported since gcc >= 12
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#uninitialized
+ */
+#if __has_attribute(__uninitialized__)
+# define __uninitialized __attribute__((__uninitialized__))
+#else
+# define __uninitialized
+#endif
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
@@ -279,9 +362,51 @@
#define __used __attribute__((__used__))
/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
+ */
+#define __must_check __attribute__((__warn_unused_result__))
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute
+ */
+#if __has_attribute(__warning__)
+# define __compiletime_warning(msg) __attribute__((__warning__(msg)))
+#else
+# define __compiletime_warning(msg)
+#endif
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#disable-sanitizer-instrumentation
+ *
+ * disable_sanitizer_instrumentation is not always similar to
+ * no_sanitize((<sanitizer-name>)): the latter may still let specific sanitizers
+ * insert code into functions to prevent false positives. Unlike that,
+ * disable_sanitizer_instrumentation prevents all kinds of instrumentation to
+ * functions with the attribute.
+ */
+#if __has_attribute(disable_sanitizer_instrumentation)
+# define __disable_sanitizer_instrumentation \
+ __attribute__((disable_sanitizer_instrumentation))
+#else
+# define __disable_sanitizer_instrumentation
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
*/
#define __weak __attribute__((__weak__))
+/*
+ * Used by functions that use '__builtin_return_address'. These function
+ * don't want to be splited or made inline, which can make
+ * the '__builtin_return_address' get unexpected address.
+ */
+#define __fix_address noinline __noclone
+
#endif /* __LINUX_COMPILER_ATTRIBUTES_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 4b33cb385f96..2abaa3a825a9 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -2,8 +2,29 @@
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
+/*
+ * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
+ * In the meantime, to support gcc < 10, we implement __has_builtin
+ * by hand.
+ */
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
#ifndef __ASSEMBLY__
+/*
+ * Skipped when running bindgen due to a libclang issue;
+ * see https://github.com/rust-lang/rust-bindgen/issues/2244.
+ */
+#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
+ __has_attribute(btf_type_tag) && !defined(__BINDGEN__)
+# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
+#else
+# define BTF_TYPE_TAG(value) /* nothing */
+#endif
+
+/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
/* address spaces */
# define __kernel __attribute__((address_space(0)))
@@ -11,11 +32,12 @@
# define __iomem __attribute__((noderef, address_space(__iomem)))
# define __percpu __attribute__((noderef, address_space(__percpu)))
# define __rcu __attribute__((noderef, address_space(__rcu)))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
+static inline void __chk_user_ptr(const volatile void __user *ptr) { }
+static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
/* context/locking */
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
+# define __cond_acquires(x) __attribute__((context(x,0,-1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
@@ -32,16 +54,18 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# ifdef STRUCTLEAK_PLUGIN
# define __user __attribute__((user))
# else
-# define __user
+# define __user BTF_TYPE_TAG(user)
# endif
# define __iomem
-# define __percpu
-# define __rcu
+# define __percpu BTF_TYPE_TAG(percpu)
+# define __rcu BTF_TYPE_TAG(rcu)
+
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
/* context/locking */
# define __must_hold(x)
# define __acquires(x)
+# define __cond_acquires(x)
# define __releases(x)
# define __acquire(x) (void)0
# define __release(x) (void)0
@@ -64,11 +88,64 @@ extern void __chk_io_ptr(const volatile void __iomem *);
/* Attributes */
#include <linux/compiler_attributes.h>
+#if CONFIG_FUNCTION_ALIGNMENT > 0
+#define __function_aligned __aligned(CONFIG_FUNCTION_ALIGNMENT)
+#else
+#define __function_aligned
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+ *
+ * When -falign-functions=N is in use, we must avoid the cold attribute as
+ * GCC drops the alignment for cold functions. Worse, GCC can implicitly mark
+ * callees of cold functions as cold themselves, so it's not sufficient to add
+ * __function_aligned here as that will not ensure that callees are correctly
+ * aligned.
+ *
+ * See:
+ *
+ * https://lore.kernel.org/lkml/Y77%2FqVgvaJidFpYt@FVFF77S0Q05N
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c9
+ */
+#if defined(CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT) || (CONFIG_FUNCTION_ALIGNMENT == 0)
+#define __cold __attribute__((__cold__))
+#else
+#define __cold
+#endif
+
+/*
+ * On x86-64 and arm64 targets, __preserve_most changes the calling convention
+ * of a function to make the code in the caller as unintrusive as possible. This
+ * convention behaves identically to the C calling convention on how arguments
+ * and return values are passed, but uses a different set of caller- and callee-
+ * saved registers.
+ *
+ * The purpose is to alleviates the burden of saving and recovering a large
+ * register set before and after the call in the caller. This is beneficial for
+ * rarely taken slow paths, such as error-reporting functions that may be called
+ * from hot paths.
+ *
+ * Note: This may conflict with instrumentation inserted on function entry which
+ * does not use __preserve_most or equivalent convention (if in assembly). Since
+ * function tracing assumes the normal C calling convention, where the attribute
+ * is supported, __preserve_most implies notrace. It is recommended to restrict
+ * use of the attribute to functions that should or already disable tracing.
+ *
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most
+ */
+#if __has_attribute(__preserve_most__) && (defined(CONFIG_X86_64) || defined(CONFIG_ARM64))
+# define __preserve_most notrace __attribute__((__preserve_most__))
+#else
+# define __preserve_most
+#endif
+
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
-#elif defined(__INTEL_COMPILER)
-#include <linux/compiler-intel.h>
#elif defined(__GNUC__)
/* The above compilers also define __GNUC__, so order is important here. */
#include <linux/compiler-gcc.h>
@@ -110,12 +187,6 @@ struct ftrace_likely_data {
unsigned long constant;
};
-#ifdef CONFIG_ENABLE_MUST_CHECK
-#define __must_check __attribute__((__warn_unused_result__))
-#else
-#define __must_check
-#endif
-
#if defined(CC_USING_HOTPATCH)
#define notrace __attribute__((hotpatch(0, 0)))
#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
@@ -132,8 +203,6 @@ struct ftrace_likely_data {
*/
#define __naked __attribute__((__naked__)) notrace
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-
/*
* Prefer gnu_inline, so that extern inline functions do not emit an
* externally visible function. This makes extern inline behave as per gnu89
@@ -193,19 +262,52 @@ struct ftrace_likely_data {
# define __no_kasan_or_inline __always_inline
#endif
-#define __no_kcsan __no_sanitize_thread
#ifdef __SANITIZE_THREAD__
+/*
+ * Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
+ * atomics even with __no_sanitize_thread (to avoid false positives in userspace
+ * ThreadSanitizer). The kernel's requirements are stricter and we really do not
+ * want any instrumentation with __no_kcsan.
+ *
+ * Therefore we add __disable_sanitizer_instrumentation where available to
+ * disable all instrumentation. See Kconfig.kcsan where this is mandatory.
+ */
+# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
+#else
+# define __no_kcsan
#endif
#ifndef __no_sanitize_or_inline
#define __no_sanitize_or_inline __always_inline
#endif
+/* Do not trap wrapping arithmetic within an annotated function. */
+#ifdef CONFIG_UBSAN_SIGNED_WRAP
+# define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow")))
+#else
+# define __signed_wrap
+#endif
+
/* Section for code which can't be instrumented at all */
-#define noinstr \
- noinline notrace __attribute((__section__(".noinstr.text"))) \
- __no_kcsan __no_sanitize_address
+#define __noinstr_section(section) \
+ noinline notrace __attribute((__section__(section))) \
+ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
+ __no_sanitize_memory __signed_wrap
+
+#define noinstr __noinstr_section(".noinstr.text")
+
+/*
+ * The __cpuidle section is used twofold:
+ *
+ * 1) the original use -- identifying if a CPU is 'stuck' in idle state based
+ * on it's instruction pointer. See cpu_in_idle().
+ *
+ * 2) supressing instrumentation around where cpuidle disables RCU; where the
+ * function isn't strictly required for #1, this is interchangeable with
+ * noinstr.
+ */
+#define __cpuidle __noinstr_section(".cpuidle.text")
#endif /* __KERNEL__ */
@@ -220,15 +322,15 @@ struct ftrace_likely_data {
# define __latent_entropy
#endif
-#ifndef __randomize_layout
+#if defined(RANDSTRUCT) && !defined(__CHECKER__)
+# define __randomize_layout __designated_init __attribute__((randomize_layout))
+# define __no_randomize_layout __attribute__((no_randomize_layout))
+/* This anon struct can add padding, so only enable it under randstruct. */
+# define randomized_struct_fields_start struct {
+# define randomized_struct_fields_end } __randomize_layout;
+#else
# define __randomize_layout __designated_init
-#endif
-
-#ifndef __no_randomize_layout
# define __no_randomize_layout
-#endif
-
-#ifndef randomized_struct_fields_start
# define randomized_struct_fields_start
# define randomized_struct_fields_end
#endif
@@ -237,8 +339,45 @@ struct ftrace_likely_data {
# define __noscs
#endif
-#ifndef asm_volatile_goto
-#define asm_volatile_goto(x...) asm goto(x)
+#ifndef __nocfi
+# define __nocfi
+#endif
+
+/*
+ * Any place that could be marked with the "alloc_size" attribute is also
+ * a place to be marked with the "malloc" attribute, except those that may
+ * be performing a _reallocation_, as that may alias the existing pointer.
+ * For these, use __realloc_size().
+ */
+#ifdef __alloc_size__
+# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
+# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
+#else
+# define __alloc_size(x, ...) __malloc
+# define __realloc_size(x, ...)
+#endif
+
+/*
+ * When the size of an allocated object is needed, use the best available
+ * mechanism to find it. (For cases where sizeof() cannot be used.)
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
+#define __member_size(p) __builtin_dynamic_object_size(p, 1)
+#else
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+#endif
+
+/*
+ * Some versions of gcc do not mark 'asm goto' volatile:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
+ *
+ * We do it here by hand, because it doesn't hurt.
+ */
+#ifndef asm_goto_output
+#define asm_goto_output(x...) asm volatile goto(x)
#endif
#ifdef CONFIG_CC_HAS_ASM_INLINE
@@ -247,10 +386,6 @@ struct ftrace_likely_data {
#define asm_inline asm
#endif
-#ifndef __no_fgcse
-# define __no_fgcse
-#endif
-
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
@@ -281,21 +416,16 @@ struct ftrace_likely_data {
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-/* Compile time object size, -1 for unknown */
-#ifndef __compiletime_object_size
-# define __compiletime_object_size(obj) -1
-#endif
-#ifndef __compiletime_warning
-# define __compiletime_warning(message)
-#endif
-#ifndef __compiletime_error
-# define __compiletime_error(message)
-#endif
-
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
- extern void prefix ## suffix(void) __compiletime_error(msg); \
+ /* \
+ * __noreturn is needed to give the compiler enough \
+ * information to avoid certain possibly-uninitialized \
+ * warnings (regardless of the build failing). \
+ */ \
+ __noreturn extern void prefix ## suffix(void) \
+ __compiletime_error(msg); \
if (!(condition)) \
prefix ## suffix(); \
} while (0)
@@ -341,4 +471,8 @@ struct ftrace_likely_data {
#define __diag_error(compiler, version, option, comment) \
__diag_ ## compiler(version, error, option)
+#ifndef __diag_ignore_all
+#define __diag_ignore_all(option, comment)
+#endif
+
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index bf8e77001f18..fb2915676574 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -28,8 +28,7 @@ struct completion {
struct swait_queue_head wait;
};
-#define init_completion_map(x, m) __init_completion(x)
-#define init_completion(x) __init_completion(x)
+#define init_completion_map(x, m) init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
@@ -82,7 +81,7 @@ static inline void complete_release(struct completion *x) {}
* This inline function will initialize a dynamically created completion
* structure.
*/
-static inline void __init_completion(struct completion *x)
+static inline void init_completion(struct completion *x)
{
x->done = 0;
init_swait_queue_head(&x->wait);
@@ -104,6 +103,7 @@ extern void wait_for_completion(struct completion *);
extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
+extern int wait_for_completion_state(struct completion *x, unsigned int state);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_io_timeout(struct completion *x,
@@ -116,6 +116,7 @@ extern bool try_wait_for_completion(struct completion *x);
extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
+extern void complete_on_current_cpu(struct completion *x);
extern void complete_all(struct completion *);
#endif
diff --git a/include/linux/component.h b/include/linux/component.h
index 16de18f473d7..df4aa75c9e7c 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -38,10 +38,10 @@ int component_add_typed(struct device *dev, const struct component_ops *ops,
int subcomponent);
void component_del(struct device *, const struct component_ops *);
-int component_bind_all(struct device *master, void *master_data);
-void component_unbind_all(struct device *master, void *master_data);
+int component_bind_all(struct device *parent, void *data);
+void component_unbind_all(struct device *parent, void *data);
-struct master;
+struct aggregate_device;
/**
* struct component_master_ops - callback for the aggregate driver
@@ -82,6 +82,12 @@ struct component_master_ops {
void (*unbind)(struct device *master);
};
+/* A set helper functions for component compare/release */
+int component_compare_of(struct device *dev, void *data);
+void component_release_of(struct device *dev, void *data);
+int component_compare_dev(struct device *dev, void *data);
+int component_compare_dev_name(struct device *dev, void *data);
+
void component_master_del(struct device *,
const struct component_master_ops *);
@@ -89,22 +95,22 @@ struct component_match;
int component_master_add_with_match(struct device *,
const struct component_master_ops *, struct component_match *);
-void component_match_add_release(struct device *master,
+void component_match_add_release(struct device *parent,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data);
-void component_match_add_typed(struct device *master,
+void component_match_add_typed(struct device *parent,
struct component_match **matchptr,
int (*compare_typed)(struct device *, int, void *), void *compare_data);
/**
* component_match_add - add a component match entry
- * @master: device with the aggregate driver
+ * @parent: device with the aggregate driver
* @matchptr: pointer to the list of component matches
* @compare: compare function to match against all components
* @compare_data: opaque pointer passed to the @compare function
*
- * Adds a new component match to the list stored in @matchptr, which the @master
+ * Adds a new component match to the list stored in @matchptr, which the @parent
* aggregate driver needs to function. The list of component matches pointed to
* by @matchptr must be initialized to NULL before adding the first match. This
* only matches against components added with component_add().
@@ -114,11 +120,11 @@ void component_match_add_typed(struct device *master,
*
* See also component_match_add_release() and component_match_add_typed().
*/
-static inline void component_match_add(struct device *master,
+static inline void component_match_add(struct device *parent,
struct component_match **matchptr,
int (*compare)(struct device *, void *), void *compare_data)
{
- component_match_add_release(master, matchptr, NULL, compare,
+ component_match_add_release(parent, matchptr, NULL, compare,
compare_data);
}
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 2e8c69b43c64..2606711adb18 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
+/*
* configfs.h - definitions for the device driver filesystem
*
* Based on sysfs:
@@ -206,8 +204,6 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
* group children. default_groups may coexist alongsize make_group() or
* make_item(), but if the group wishes to have only default_groups
* children (disallowing mkdir(2)), it need not provide either function.
- * If the group has commit(), it supports pending and committed (active)
- * items.
*/
struct configfs_item_operations {
void (*release)(struct config_item *);
@@ -218,7 +214,6 @@ struct configfs_item_operations {
struct configfs_group_operations {
struct config_item *(*make_item)(struct config_group *group, const char *name);
struct config_group *(*make_group)(struct config_group *group, const char *name);
- int (*commit_item)(struct config_item *item);
void (*disconnect_notify)(struct config_group *group, struct config_item *item);
void (*drop_item)(struct config_group *group, struct config_item *item);
};
diff --git a/include/linux/connector.h b/include/linux/connector.h
index cb732643471b..70bc1160f3d8 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -64,14 +64,14 @@ struct cn_dev {
* @callback: connector's callback.
* parameters are %cn_msg and the sender's credentials
*/
-int cn_add_callback(struct cb_id *id, const char *name,
+int cn_add_callback(const struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
/**
* cn_del_callback() - Unregisters new callback with connector core.
*
* @id: unique connector's user identifier.
*/
-void cn_del_callback(struct cb_id *id);
+void cn_del_callback(const struct cb_id *id);
/**
@@ -90,16 +90,21 @@ void cn_del_callback(struct cb_id *id);
* If @group is not zero, then message will be delivered
* to the specified group.
* @gfp_mask: GFP mask.
+ * @filter: Filter function to be used at netlink layer.
+ * @filter_data:Filter data to be supplied to the filter function
*
* It can be safely called from softirq context, but may silently
* fail under strong memory pressure.
*
* If there are no listeners for given group %-ESRCH can be returned.
*/
-int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
+int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid,
+ u32 group, gfp_t gfp_mask,
+ netlink_filter_fn filter,
+ void *filter_data);
/**
- * cn_netlink_send_mult - Sends message to the specified groups.
+ * cn_netlink_send - Sends message to the specified groups.
*
* @msg: message header(with attached data).
* @portid: destination port.
@@ -122,14 +127,14 @@ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp
int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
- struct cb_id *id,
+ const struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
-void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id);
void cn_queue_release_callback(struct cn_callback_entry *);
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
-int cn_cb_equal(struct cb_id *, struct cb_id *);
+int cn_cb_equal(const struct cb_id *, const struct cb_id *);
#endif /* __CONNECTOR_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index 0670d3491e0e..31a8f5b85f5d 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -15,7 +15,10 @@
#define _LINUX_CONSOLE_H_ 1
#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/rculist.h>
#include <linux/types.h>
+#include <linux/vesa.h>
struct vc_data;
struct console_font_op;
@@ -34,63 +37,91 @@ enum vc_intensity;
/**
* struct consw - callbacks for consoles
*
+ * @owner: the module to get references of when this console is used
+ * @con_startup: set up the console and return its name (like VGA, EGA, ...)
+ * @con_init: initialize the console on @vc. @init is true for the very first
+ * call on this @vc.
+ * @con_deinit: deinitialize the console from @vc.
+ * @con_clear: erase @count characters at [@x, @y] on @vc. @count >= 1.
+ * @con_putc: emit one character with attributes @ca to [@x, @y] on @vc.
+ * (optional -- @con_putcs would be called instead)
+ * @con_putcs: emit @count characters with attributes @s to [@x, @y] on @vc.
+ * @con_cursor: enable/disable cursor depending on @enable
* @con_scroll: move lines from @top to @bottom in direction @dir by @lines.
* Return true if no generic handling should be done.
* Invoked by csi_M and printing to the console.
- * @con_set_palette: sets the palette of the console to @table (optional)
+ * @con_switch: notifier about the console switch; it is supposed to return
+ * true if a redraw is needed.
+ * @con_blank: blank/unblank the console. The target mode is passed in @blank.
+ * @mode_switch is set if changing from/to text/graphics. The hook
+ * is supposed to return true if a redraw is needed.
+ * @con_font_set: set console @vc font to @font with height @vpitch. @flags can
+ * be %KD_FONT_FLAG_DONT_RECALC. (optional)
+ * @con_font_get: fetch the current font on @vc of height @vpitch into @font.
+ * (optional)
+ * @con_font_default: set default font on @vc. @name can be %NULL or font name
+ * to search for. @font can be filled back. (optional)
+ * @con_resize: resize the @vc console to @width x @height. @from_user is true
+ * when this change comes from the user space.
+ * @con_set_palette: sets the palette of the console @vc to @table (optional)
* @con_scrolldelta: the contents of the console should be scrolled by @lines.
* Invoked by user. (optional)
+ * @con_set_origin: set origin (see &vc_data::vc_origin) of the @vc. If not
+ * provided or returns false, the origin is set to
+ * @vc->vc_screenbuf. (optional)
+ * @con_save_screen: save screen content into @vc->vc_screenbuf. Called e.g.
+ * upon entering graphics. (optional)
+ * @con_build_attr: build attributes based on @color, @intensity and other
+ * parameters. The result is used for both normal and erase
+ * characters. (optional)
+ * @con_invert_region: invert a region of length @count on @vc starting at @p.
+ * (optional)
+ * @con_debug_enter: prepare the console for the debugger. This includes, but
+ * is not limited to, unblanking the console, loading an
+ * appropriate palette, and allowing debugger generated output.
+ * (optional)
+ * @con_debug_leave: restore the console to its pre-debug state as closely as
+ * possible. (optional)
*/
struct consw {
struct module *owner;
const char *(*con_startup)(void);
- void (*con_init)(struct vc_data *vc, int init);
+ void (*con_init)(struct vc_data *vc, bool init);
void (*con_deinit)(struct vc_data *vc);
- void (*con_clear)(struct vc_data *vc, int sy, int sx, int height,
- int width);
- void (*con_putc)(struct vc_data *vc, int c, int ypos, int xpos);
- void (*con_putcs)(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos);
- void (*con_cursor)(struct vc_data *vc, int mode);
+ void (*con_clear)(struct vc_data *vc, unsigned int y,
+ unsigned int x, unsigned int count);
+ void (*con_putc)(struct vc_data *vc, u16 ca, unsigned int y,
+ unsigned int x);
+ void (*con_putcs)(struct vc_data *vc, const u16 *s,
+ unsigned int count, unsigned int ypos,
+ unsigned int xpos);
+ void (*con_cursor)(struct vc_data *vc, bool enable);
bool (*con_scroll)(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
unsigned int lines);
- int (*con_switch)(struct vc_data *vc);
- int (*con_blank)(struct vc_data *vc, int blank, int mode_switch);
- int (*con_font_set)(struct vc_data *vc, struct console_font *font,
- unsigned int flags);
- int (*con_font_get)(struct vc_data *vc, struct console_font *font);
+ bool (*con_switch)(struct vc_data *vc);
+ bool (*con_blank)(struct vc_data *vc, enum vesa_blank_mode blank,
+ bool mode_switch);
+ int (*con_font_set)(struct vc_data *vc,
+ const struct console_font *font,
+ unsigned int vpitch, unsigned int flags);
+ int (*con_font_get)(struct vc_data *vc, struct console_font *font,
+ unsigned int vpitch);
int (*con_font_default)(struct vc_data *vc,
- struct console_font *font, char *name);
- int (*con_font_copy)(struct vc_data *vc, int con);
+ struct console_font *font, const char *name);
int (*con_resize)(struct vc_data *vc, unsigned int width,
- unsigned int height, unsigned int user);
+ unsigned int height, bool from_user);
void (*con_set_palette)(struct vc_data *vc,
const unsigned char *table);
void (*con_scrolldelta)(struct vc_data *vc, int lines);
- int (*con_set_origin)(struct vc_data *vc);
+ bool (*con_set_origin)(struct vc_data *vc);
void (*con_save_screen)(struct vc_data *vc);
u8 (*con_build_attr)(struct vc_data *vc, u8 color,
enum vc_intensity intensity,
bool blink, bool underline, bool reverse, bool italic);
void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
- u16 *(*con_screen_pos)(struct vc_data *vc, int offset);
- unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position,
- int *px, int *py);
- /*
- * Flush the video console driver's scrollback buffer
- */
- void (*con_flush_scrollback)(struct vc_data *vc);
- /*
- * Prepare the console for the debugger. This includes, but is not
- * limited to, unblanking the console, loading an appropriate
- * palette, and allowing debugger generated output.
- */
- int (*con_debug_enter)(struct vc_data *vc);
- /*
- * Restore the console to its pre-debug state as closely as possible.
- */
- int (*con_debug_leave)(struct vc_data *vc);
+ void (*con_debug_enter)(struct vc_data *vc);
+ void (*con_debug_leave)(struct vc_data *vc);
};
extern const struct consw *conswitchp;
@@ -99,66 +130,361 @@ extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
+struct screen_info;
+#ifdef CONFIG_VGA_CONSOLE
+void vgacon_register_screen(struct screen_info *si);
+#else
+static inline void vgacon_register_screen(struct screen_info *si) { }
+#endif
+
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
void give_up_console(const struct consw *sw);
-#ifdef CONFIG_HW_CONSOLE
-int con_debug_enter(struct vc_data *vc);
-int con_debug_leave(void);
+#ifdef CONFIG_VT
+void con_debug_enter(struct vc_data *vc);
+void con_debug_leave(void);
#else
-static inline int con_debug_enter(struct vc_data *vc)
-{
- return 0;
-}
-static inline int con_debug_leave(void)
-{
- return 0;
-}
+static inline void con_debug_enter(struct vc_data *vc) { }
+static inline void con_debug_leave(void) { }
#endif
-/* cursor */
-#define CM_DRAW (1)
-#define CM_ERASE (2)
-#define CM_MOVE (3)
-
/*
* The interface for a console, or any other device that wants to capture
* console messages (printer driver?)
+ */
+
+/**
+ * enum cons_flags - General console flags
+ * @CON_PRINTBUFFER: Used by newly registered consoles to avoid duplicate
+ * output of messages that were already shown by boot
+ * consoles or read by userspace via syslog() syscall.
+ * @CON_CONSDEV: Indicates that the console driver is backing
+ * /dev/console.
+ * @CON_ENABLED: Indicates if a console is allowed to print records. If
+ * false, the console also will not advance to later
+ * records.
+ * @CON_BOOT: Marks the console driver as early console driver which
+ * is used during boot before the real driver becomes
+ * available. It will be automatically unregistered
+ * when the real console driver is registered unless
+ * "keep_bootcon" parameter is used.
+ * @CON_ANYTIME: A misnomed historical flag which tells the core code
+ * that the legacy @console::write callback can be invoked
+ * on a CPU which is marked OFFLINE. That is misleading as
+ * it suggests that there is no contextual limit for
+ * invoking the callback. The original motivation was
+ * readiness of the per-CPU areas.
+ * @CON_BRL: Indicates a braille device which is exempt from
+ * receiving the printk spam for obvious reasons.
+ * @CON_EXTENDED: The console supports the extended output format of
+ * /dev/kmesg which requires a larger output buffer.
+ * @CON_SUSPENDED: Indicates if a console is suspended. If true, the
+ * printing callbacks must not be called.
+ * @CON_NBCON: Console can operate outside of the legacy style console_lock
+ * constraints.
+ */
+enum cons_flags {
+ CON_PRINTBUFFER = BIT(0),
+ CON_CONSDEV = BIT(1),
+ CON_ENABLED = BIT(2),
+ CON_BOOT = BIT(3),
+ CON_ANYTIME = BIT(4),
+ CON_BRL = BIT(5),
+ CON_EXTENDED = BIT(6),
+ CON_SUSPENDED = BIT(7),
+ CON_NBCON = BIT(8),
+};
+
+/**
+ * struct nbcon_state - console state for nbcon consoles
+ * @atom: Compound of the state fields for atomic operations
+ *
+ * @req_prio: The priority of a handover request
+ * @prio: The priority of the current owner
+ * @unsafe: Console is busy in a non takeover region
+ * @unsafe_takeover: A hostile takeover in an unsafe state happened in the
+ * past. The console cannot be safe until re-initialized.
+ * @cpu: The CPU on which the owner runs
+ *
+ * To be used for reading and preparing of the value stored in the nbcon
+ * state variable @console::nbcon_state.
*
- * If a console driver is marked CON_BOOT then it will be auto-unregistered
- * when the first real console is registered. This is for early-printk drivers.
+ * The @prio and @req_prio fields are particularly important to allow
+ * spin-waiting to timeout and give up without the risk of a waiter being
+ * assigned the lock after giving up.
*/
+struct nbcon_state {
+ union {
+ unsigned int atom;
+ struct {
+ unsigned int prio : 2;
+ unsigned int req_prio : 2;
+ unsigned int unsafe : 1;
+ unsigned int unsafe_takeover : 1;
+ unsigned int cpu : 24;
+ };
+ };
+};
-#define CON_PRINTBUFFER (1)
-#define CON_CONSDEV (2) /* Preferred console, /dev/console */
-#define CON_ENABLED (4)
-#define CON_BOOT (8)
-#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
-#define CON_BRL (32) /* Used for a braille device */
-#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
+/*
+ * The nbcon_state struct is used to easily create and interpret values that
+ * are stored in the @console::nbcon_state variable. Ensure this struct stays
+ * within the size boundaries of the atomic variable's underlying type in
+ * order to avoid any accidental truncation.
+ */
+static_assert(sizeof(struct nbcon_state) <= sizeof(int));
+/**
+ * enum nbcon_prio - console owner priority for nbcon consoles
+ * @NBCON_PRIO_NONE: Unused
+ * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
+ * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
+ * @NBCON_PRIO_PANIC: Panic output
+ * @NBCON_PRIO_MAX: The number of priority levels
+ *
+ * A higher priority context can takeover the console when it is
+ * in the safe state. The final attempt to flush consoles in panic()
+ * can be allowed to do so even in an unsafe state (Hope and pray).
+ */
+enum nbcon_prio {
+ NBCON_PRIO_NONE = 0,
+ NBCON_PRIO_NORMAL,
+ NBCON_PRIO_EMERGENCY,
+ NBCON_PRIO_PANIC,
+ NBCON_PRIO_MAX,
+};
+
+struct console;
+struct printk_buffers;
+
+/**
+ * struct nbcon_context - Context for console acquire/release
+ * @console: The associated console
+ * @spinwait_max_us: Limit for spin-wait acquire
+ * @prio: Priority of the context
+ * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
+ * be used only with NBCON_PRIO_PANIC @prio. It
+ * might cause a system freeze when the console
+ * is used later.
+ * @backlog: Ringbuffer has pending records
+ * @pbufs: Pointer to the text buffer for this context
+ * @seq: The sequence number to print for this context
+ */
+struct nbcon_context {
+ /* members set by caller */
+ struct console *console;
+ unsigned int spinwait_max_us;
+ enum nbcon_prio prio;
+ unsigned int allow_unsafe_takeover : 1;
+
+ /* members set by emit */
+ unsigned int backlog : 1;
+
+ /* members set by acquire */
+ struct printk_buffers *pbufs;
+ u64 seq;
+};
+
+/**
+ * struct nbcon_write_context - Context handed to the nbcon write callbacks
+ * @ctxt: The core console context
+ * @outbuf: Pointer to the text buffer for output
+ * @len: Length to write
+ * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
+ */
+struct nbcon_write_context {
+ struct nbcon_context __private ctxt;
+ char *outbuf;
+ unsigned int len;
+ bool unsafe_takeover;
+};
+
+/**
+ * struct console - The console descriptor structure
+ * @name: The name of the console driver
+ * @write: Write callback to output messages (Optional)
+ * @read: Read callback for console input (Optional)
+ * @device: The underlying TTY device driver (Optional)
+ * @unblank: Callback to unblank the console (Optional)
+ * @setup: Callback for initializing the console (Optional)
+ * @exit: Callback for teardown of the console (Optional)
+ * @match: Callback for matching a console (Optional)
+ * @flags: Console flags. See enum cons_flags
+ * @index: Console index, e.g. port number
+ * @cflag: TTY control mode flags
+ * @ispeed: TTY input speed
+ * @ospeed: TTY output speed
+ * @seq: Sequence number of the next ringbuffer record to print
+ * @dropped: Number of unreported dropped ringbuffer records
+ * @data: Driver private data
+ * @node: hlist node for the console list
+ *
+ * @write_atomic: Write callback for atomic context
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
+ */
struct console {
- char name[16];
- void (*write)(struct console *, const char *, unsigned);
- int (*read)(struct console *, char *, unsigned);
- struct tty_driver *(*device)(struct console *, int *);
- void (*unblank)(void);
- int (*setup)(struct console *, char *);
- int (*exit)(struct console *);
- int (*match)(struct console *, char *name, int idx, char *options);
- short flags;
- short index;
- int cflag;
- void *data;
- struct console *next;
+ char name[16];
+ void (*write)(struct console *co, const char *s, unsigned int count);
+ int (*read)(struct console *co, char *s, unsigned int count);
+ struct tty_driver *(*device)(struct console *co, int *index);
+ void (*unblank)(void);
+ int (*setup)(struct console *co, char *options);
+ int (*exit)(struct console *co);
+ int (*match)(struct console *co, char *name, int idx, char *options);
+ short flags;
+ short index;
+ int cflag;
+ uint ispeed;
+ uint ospeed;
+ u64 seq;
+ unsigned long dropped;
+ void *data;
+ struct hlist_node node;
+
+ /* nbcon console specific members */
+ bool (*write_atomic)(struct console *con,
+ struct nbcon_write_context *wctxt);
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
};
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_console_list_lock_held(void);
+#else
+static inline void lockdep_assert_console_list_lock_held(void)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern bool console_srcu_read_lock_is_held(void);
+#else
+static inline bool console_srcu_read_lock_is_held(void)
+{
+ return 1;
+}
+#endif
+
+extern int console_srcu_read_lock(void);
+extern void console_srcu_read_unlock(int cookie);
+
+extern void console_list_lock(void) __acquires(console_mutex);
+extern void console_list_unlock(void) __releases(console_mutex);
+
+extern struct hlist_head console_list;
+
+/**
+ * console_srcu_read_flags - Locklessly read the console flags
+ * @con: struct console pointer of console to read flags from
+ *
+ * This function provides the necessary READ_ONCE() and data_race()
+ * notation for locklessly reading the console flags. The READ_ONCE()
+ * in this function matches the WRITE_ONCE() when @flags are modified
+ * for registered consoles with console_srcu_write_flags().
+ *
+ * Only use this function to read console flags when locklessly
+ * iterating the console list via srcu.
+ *
+ * Context: Any context.
+ */
+static inline short console_srcu_read_flags(const struct console *con)
+{
+ WARN_ON_ONCE(!console_srcu_read_lock_is_held());
+
+ /*
+ * Locklessly reading console->flags provides a consistent
+ * read value because there is at most one CPU modifying
+ * console->flags and that CPU is using only read-modify-write
+ * operations to do so.
+ */
+ return data_race(READ_ONCE(con->flags));
+}
+
+/**
+ * console_srcu_write_flags - Write flags for a registered console
+ * @con: struct console pointer of console to write flags to
+ * @flags: new flags value to write
+ *
+ * Only use this function to write flags for registered consoles. It
+ * requires holding the console_list_lock.
+ *
+ * Context: Any context.
+ */
+static inline void console_srcu_write_flags(struct console *con, short flags)
+{
+ lockdep_assert_console_list_lock_held();
+
+ /* This matches the READ_ONCE() in console_srcu_read_flags(). */
+ WRITE_ONCE(con->flags, flags);
+}
+
+/* Variant of console_is_registered() when the console_list_lock is held. */
+static inline bool console_is_registered_locked(const struct console *con)
+{
+ lockdep_assert_console_list_lock_held();
+ return !hlist_unhashed(&con->node);
+}
+
/*
- * for_each_console() allows you to iterate on each console
+ * console_is_registered - Check if the console is registered
+ * @con: struct console pointer of console to check
+ *
+ * Context: Process context. May sleep while acquiring console list lock.
+ * Return: true if the console is in the console list, otherwise false.
+ *
+ * If false is returned for a console that was previously registered, it
+ * can be assumed that the console's unregistration is fully completed,
+ * including the exit() callback after console list removal.
+ */
+static inline bool console_is_registered(const struct console *con)
+{
+ bool ret;
+
+ console_list_lock();
+ ret = console_is_registered_locked(con);
+ console_list_unlock();
+ return ret;
+}
+
+/**
+ * for_each_console_srcu() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * Although SRCU guarantees the console list will be consistent, the
+ * struct console fields may be updated by other CPUs while iterating.
+ *
+ * Requires console_srcu_read_lock to be held. Can be invoked from
+ * any context.
*/
-#define for_each_console(con) \
- for (con = console_drivers; con != NULL; con = con->next)
+#define for_each_console_srcu(con) \
+ hlist_for_each_entry_srcu(con, &console_list, node, \
+ console_srcu_read_lock_is_held())
+
+/**
+ * for_each_console() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * The console list and the &console.flags are immutable while iterating.
+ *
+ * Requires console_list_lock to be held.
+ */
+#define for_each_console(con) \
+ lockdep_assert_console_list_lock_held(); \
+ hlist_for_each_entry(con, &console_list, node)
+
+#ifdef CONFIG_PRINTK
+extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
+extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+#else
+static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+#endif
extern int console_set_on_cmdline;
extern struct console *early_console;
@@ -168,10 +494,10 @@ enum con_flush_mode {
CONSOLE_REPLAY_ALL,
};
-extern int add_preferred_console(char *name, int idx, char *options);
+extern int add_preferred_console(const char *name, const short idx, char *options);
+extern void console_force_preferred_locked(struct console *con);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
-extern struct console *console_drivers;
extern void console_lock(void);
extern int console_trylock(void);
extern void console_unlock(void);
@@ -212,18 +538,6 @@ void vcs_remove_sysfs(int index);
*/
extern atomic_t ignore_console_lock_warning;
-/* VESA Blanking Levels */
-#define VESA_NO_BLANKING 0
-#define VESA_VSYNC_SUSPEND 1
-#define VESA_HSYNC_SUSPEND 2
-#define VESA_POWERDOWN 3
-
-#ifdef CONFIG_VGA_CONSOLE
-extern bool vgacon_text_force(void);
-#else
-static inline bool vgacon_text_force(void) { return false; }
-#endif
-
extern void console_init(void);
/* For deferred console takeover */
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index 153734816b49..20f564e98552 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -17,8 +17,7 @@
#include <linux/vt.h>
#include <linux/workqueue.h>
-struct uni_pagedir;
-struct uni_screen;
+struct uni_pagedict;
#define NPAR 16
#define VC_TABSTOPS_COUNT 256U
@@ -101,6 +100,7 @@ struct vc_data {
unsigned int vc_rows;
unsigned int vc_size_row; /* Bytes per row */
unsigned int vc_scan_lines; /* # of scan lines */
+ unsigned int vc_cell_height; /* CRTC character cell height */
unsigned long vc_origin; /* [!] Start of real screen */
unsigned long vc_scr_end; /* [!] End of real screen */
unsigned long vc_visible_origin; /* [!] Top of visible window */
@@ -151,14 +151,13 @@ struct vc_data {
DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */
unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */
unsigned short * vc_translate;
- unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
unsigned short vc_cur_blink_ms; /* Cursor blink duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
- struct uni_pagedir *vc_uni_pagedir;
- struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
- struct uni_screen *vc_uni_screen; /* unicode screen content */
+ struct uni_pagedict *uni_pagedict;
+ struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */
+ u32 **vc_uni_lines; /* unicode screen content */
/* additional information is in vt_kern.h */
};
diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h
index 254246673390..c35db4896c37 100644
--- a/include/linux/consolemap.h
+++ b/include/linux/consolemap.h
@@ -7,29 +7,56 @@
#ifndef __LINUX_CONSOLEMAP_H__
#define __LINUX_CONSOLEMAP_H__
-#define LAT1_MAP 0
-#define GRAF_MAP 1
-#define IBMPC_MAP 2
-#define USER_MAP 3
+enum translation_map {
+ LAT1_MAP,
+ GRAF_MAP,
+ IBMPC_MAP,
+ USER_MAP,
+
+ FIRST_MAP = LAT1_MAP,
+ LAST_MAP = USER_MAP,
+};
#include <linux/types.h>
-#ifdef CONFIG_CONSOLE_TRANSLATIONS
struct vc_data;
-extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode);
-extern unsigned short *set_translate(int m, struct vc_data *vc);
-extern int conv_uni_to_pc(struct vc_data *conp, long ucs);
-extern u32 conv_8bit_to_uni(unsigned char c);
-extern int conv_uni_to_8bit(u32 uni);
+#ifdef CONFIG_CONSOLE_TRANSLATIONS
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode);
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc);
+int conv_uni_to_pc(struct vc_data *conp, long ucs);
+u32 conv_8bit_to_uni(unsigned char c);
+int conv_uni_to_8bit(u32 uni);
void console_map_init(void);
#else
-#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph)
-#define set_translate(m, vc) ((unsigned short *)NULL)
-#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs))
-#define conv_8bit_to_uni(c) ((uint32_t)(c))
-#define conv_uni_to_8bit(c) ((int) ((c) & 0xff))
-#define console_map_init(c) do { ; } while (0)
+static inline u16 inverse_translate(const struct vc_data *conp, u16 glyph,
+ bool use_unicode)
+{
+ return glyph;
+}
+
+static inline unsigned short *set_translate(enum translation_map m,
+ struct vc_data *vc)
+{
+ return NULL;
+}
+
+static inline int conv_uni_to_pc(struct vc_data *conp, long ucs)
+{
+ return ucs > 0xff ? -1 : ucs;
+}
+
+static inline u32 conv_8bit_to_uni(unsigned char c)
+{
+ return c;
+}
+
+static inline int conv_uni_to_8bit(u32 uni)
+{
+ return uni & 0xff;
+}
+
+static inline void console_map_init(void) { }
#endif /* CONFIG_CONSOLE_TRANSLATIONS */
#endif /* __LINUX_CONSOLEMAP_H__ */
diff --git a/include/linux/container.h b/include/linux/container.h
index 2566a1baa736..dd00cc918a92 100644
--- a/include/linux/container.h
+++ b/include/linux/container.h
@@ -12,7 +12,7 @@
#include <linux/device.h>
/* drivers/base/power/container.c */
-extern struct bus_type container_subsys;
+extern const struct bus_type container_subsys;
struct container_dev {
struct device dev;
diff --git a/include/linux/container_of.h b/include/linux/container_of.h
new file mode 100644
index 000000000000..713890c867be
--- /dev/null
+++ b/include/linux/container_of.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTAINER_OF_H
+#define _LINUX_CONTAINER_OF_H
+
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+
+#define typeof_member(T, m) typeof(((T*)0)->m)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * WARNING: any const qualifier of @ptr is lost.
+ */
+#define container_of(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ static_assert(__same_type(*(ptr), ((type *)0)->member) || \
+ __same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type *)(__mptr - offsetof(type, member))); })
+
+/**
+ * container_of_const - cast a member of a structure out to the containing
+ * structure and preserve the const-ness of the pointer
+ * @ptr: the pointer to the member
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ */
+#define container_of_const(ptr, type, member) \
+ _Generic(ptr, \
+ const typeof(*(ptr)) *: ((const type *)container_of(ptr, type, member)),\
+ default: ((type *)container_of(ptr, type, member)) \
+ )
+
+#endif /* _LINUX_CONTAINER_OF_H */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index d53cd331c4dd..6e76b9dba00e 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -10,170 +10,157 @@
#include <asm/ptrace.h>
-#ifdef CONFIG_CONTEXT_TRACKING
-extern void context_tracking_cpu_set(int cpu);
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern void ct_cpu_track_user(int cpu);
/* Called with interrupts disabled. */
-extern void __context_tracking_enter(enum ctx_state state);
-extern void __context_tracking_exit(enum ctx_state state);
+extern void __ct_user_enter(enum ctx_state state);
+extern void __ct_user_exit(enum ctx_state state);
-extern void context_tracking_enter(enum ctx_state state);
-extern void context_tracking_exit(enum ctx_state state);
-extern void context_tracking_user_enter(void);
-extern void context_tracking_user_exit(void);
+extern void ct_user_enter(enum ctx_state state);
+extern void ct_user_exit(enum ctx_state state);
+
+extern void user_enter_callable(void);
+extern void user_exit_callable(void);
static inline void user_enter(void)
{
if (context_tracking_enabled())
- context_tracking_enter(CONTEXT_USER);
+ ct_user_enter(CONTEXT_USER);
}
static inline void user_exit(void)
{
if (context_tracking_enabled())
- context_tracking_exit(CONTEXT_USER);
+ ct_user_exit(CONTEXT_USER);
}
/* Called with interrupts disabled. */
static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
- __context_tracking_enter(CONTEXT_USER);
+ __ct_user_enter(CONTEXT_USER);
}
static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
- __context_tracking_exit(CONTEXT_USER);
+ __ct_user_exit(CONTEXT_USER);
}
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!context_tracking_enabled())
+ if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
+ !context_tracking_enabled())
return 0;
- prev_ctx = this_cpu_read(context_tracking.state);
+ prev_ctx = __ct_state();
if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_exit(prev_ctx);
+ ct_user_exit(prev_ctx);
return prev_ctx;
}
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (context_tracking_enabled()) {
+ if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
+ context_tracking_enabled()) {
if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_enter(prev_ctx);
+ ct_user_enter(prev_ctx);
}
}
+static __always_inline bool context_tracking_guest_enter(void)
+{
+ if (context_tracking_enabled())
+ __ct_user_enter(CONTEXT_GUEST);
-/**
- * ct_state() - return the current context tracking state if known
- *
- * Returns the current cpu's context tracking state if context tracking
- * is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
- */
-static __always_inline enum ctx_state ct_state(void)
+ return context_tracking_enabled_this_cpu();
+}
+
+static __always_inline void context_tracking_guest_exit(void)
{
- return context_tracking_enabled() ?
- this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
+ if (context_tracking_enabled())
+ __ct_user_exit(CONTEXT_GUEST);
}
+
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
+
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline void user_enter_irqoff(void) { }
static inline void user_exit_irqoff(void) { }
-static inline enum ctx_state exception_enter(void) { return 0; }
+static inline int exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
-#endif /* !CONFIG_CONTEXT_TRACKING */
-
-#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
-
-#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+static inline int ct_state(void) { return -1; }
+static inline int __ct_state(void) { return -1; }
+static __always_inline bool context_tracking_guest_enter(void) { return false; }
+static __always_inline void context_tracking_guest_exit(void) { }
+#define CT_WARN_ON(cond) do { } while (0)
+#endif /* !CONFIG_CONTEXT_TRACKING_USER */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
extern void context_tracking_init(void);
#else
static inline void context_tracking_init(void) { }
-#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
+#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+extern void ct_idle_enter(void);
+extern void ct_idle_exit(void);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-/* must be called with irqs disabled */
-static __always_inline void guest_enter_irqoff(void)
+/*
+ * Is the current CPU in an extended quiescent state?
+ *
+ * No ordering, as we are sampling CPU-local information.
+ */
+static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{
- instrumentation_begin();
- if (vtime_accounting_enabled_this_cpu())
- vtime_guest_enter(current);
- else
- current->flags |= PF_VCPU;
- instrumentation_end();
-
- if (context_tracking_enabled())
- __context_tracking_enter(CONTEXT_GUEST);
-
- /* KVM does not hold any references to rcu protected data when it
- * switches CPU into a guest mode. In fact switching to a guest mode
- * is very similar to exiting to userspace from rcu point of view. In
- * addition CPU may stay in a guest mode for quite a long time (up to
- * one time slice). Lets treat guest mode as quiescent state, just like
- * we do with user-mode execution.
- */
- if (!context_tracking_enabled_this_cpu()) {
- instrumentation_begin();
- rcu_virt_note_context_switch(smp_processor_id());
- instrumentation_end();
- }
+ return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
}
-static __always_inline void guest_exit_irqoff(void)
+/*
+ * Increment the current CPU's context_tracking structure's ->state field
+ * with ordering. Return the new value.
+ */
+static __always_inline unsigned long ct_state_inc(int incby)
{
- if (context_tracking_enabled())
- __context_tracking_exit(CONTEXT_GUEST);
-
- instrumentation_begin();
- if (vtime_accounting_enabled_this_cpu())
- vtime_guest_exit(current);
- else
- current->flags &= ~PF_VCPU;
- instrumentation_end();
+ return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
}
-#else
-static __always_inline void guest_enter_irqoff(void)
+static __always_inline bool warn_rcu_enter(void)
{
+ bool ret = false;
+
/*
- * This is running in ioctl context so its safe
- * to assume that it's the stime pending cputime
- * to flush.
+ * Horrible hack to shut up recursive RCU isn't watching fail since
+ * lots of the actual reporting also relies on RCU.
*/
- instrumentation_begin();
- vtime_account_kernel(current);
- current->flags |= PF_VCPU;
- rcu_virt_note_context_switch(smp_processor_id());
- instrumentation_end();
+ preempt_disable_notrace();
+ if (rcu_dynticks_curr_cpu_in_eqs()) {
+ ret = true;
+ ct_state_inc(RCU_DYNTICKS_IDX);
+ }
+
+ return ret;
}
-static __always_inline void guest_exit_irqoff(void)
+static __always_inline void warn_rcu_exit(bool rcu)
{
- instrumentation_begin();
- /* Flush the guest cputime we spent on the guest */
- vtime_account_kernel(current);
- current->flags &= ~PF_VCPU;
- instrumentation_end();
+ if (rcu)
+ ct_state_inc(RCU_DYNTICKS_IDX);
+ preempt_enable_notrace();
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-static inline void guest_exit(void)
-{
- unsigned long flags;
+#else
+static inline void ct_idle_enter(void) { }
+static inline void ct_idle_exit(void) { }
- local_irq_save(flags);
- guest_exit_irqoff();
- local_irq_restore(flags);
-}
+static __always_inline bool warn_rcu_enter(void) { return false; }
+static __always_inline void warn_rcu_exit(bool rcu) { }
+#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
#endif
diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h
new file mode 100644
index 000000000000..c50b5670c4a5
--- /dev/null
+++ b/include/linux/context_tracking_irq.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTEXT_TRACKING_IRQ_H
+#define _LINUX_CONTEXT_TRACKING_IRQ_H
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+void ct_irq_enter(void);
+void ct_irq_exit(void);
+void ct_irq_enter_irqson(void);
+void ct_irq_exit_irqson(void);
+void ct_nmi_enter(void);
+void ct_nmi_exit(void);
+#else
+static inline void ct_irq_enter(void) { }
+static inline void ct_irq_exit(void) { }
+static inline void ct_irq_enter_irqson(void) { }
+static inline void ct_irq_exit_irqson(void) { }
+static inline void ct_nmi_enter(void) { }
+static inline void ct_nmi_exit(void) { }
+#endif
+
+#endif
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 65a60d3313b0..bbff5f7f8803 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -4,8 +4,28 @@
#include <linux/percpu.h>
#include <linux/static_key.h>
+#include <linux/context_tracking_irq.h>
+
+/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
+#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+
+enum ctx_state {
+ CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
+ CONTEXT_KERNEL = 0,
+ CONTEXT_IDLE = 1,
+ CONTEXT_USER = 2,
+ CONTEXT_GUEST = 3,
+ CONTEXT_MAX = 4,
+};
+
+/* Even value for idle, else odd. */
+#define RCU_DYNTICKS_IDX CONTEXT_MAX
+
+#define CT_STATE_MASK (CONTEXT_MAX - 1)
+#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
struct context_tracking {
+#ifdef CONFIG_CONTEXT_TRACKING_USER
/*
* When active is false, probes are unset in order
* to minimize overhead: TIF flags are cleared
@@ -14,17 +34,74 @@ struct context_tracking {
*/
bool active;
int recursion;
- enum ctx_state {
- CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
- CONTEXT_KERNEL = 0,
- CONTEXT_USER,
- CONTEXT_GUEST,
- } state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING
+ atomic_t state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+ long dynticks_nesting; /* Track process nesting level. */
+ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
+#endif
};
#ifdef CONFIG_CONTEXT_TRACKING
-extern struct static_key_false context_tracking_key;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
+#endif
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+static __always_inline int __ct_state(void)
+{
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
+}
+#endif
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+static __always_inline int ct_dynticks(void)
+{
+ return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
+}
+
+static __always_inline int ct_dynticks_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
+}
+
+static __always_inline int ct_dynticks_cpu_acquire(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
+}
+
+static __always_inline long ct_dynticks_nesting(void)
+{
+ return __this_cpu_read(context_tracking.dynticks_nesting);
+}
+
+static __always_inline long ct_dynticks_nesting_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->dynticks_nesting;
+}
+
+static __always_inline long ct_dynticks_nmi_nesting(void)
+{
+ return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
+}
+
+static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->dynticks_nmi_nesting;
+}
+#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern struct static_key_false context_tracking_key;
static __always_inline bool context_tracking_enabled(void)
{
@@ -41,15 +118,31 @@ static inline bool context_tracking_enabled_this_cpu(void)
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
-static __always_inline bool context_tracking_in_user(void)
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled. If context tracking is disabled, returns
+ * CONTEXT_DISABLED. This should be used primarily for debugging.
+ */
+static __always_inline int ct_state(void)
{
- return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
+ int ret;
+
+ if (!context_tracking_enabled())
+ return CONTEXT_DISABLED;
+
+ preempt_disable();
+ ret = __ct_state();
+ preempt_enable();
+
+ return ret;
}
+
#else
-static inline bool context_tracking_in_user(void) { return false; }
-static inline bool context_tracking_enabled(void) { return false; }
-static inline bool context_tracking_enabled_cpu(int cpu) { return false; }
-static inline bool context_tracking_enabled_this_cpu(void) { return false; }
-#endif /* CONFIG_CONTEXT_TRACKING */
+static __always_inline bool context_tracking_enabled(void) { return false; }
+static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
+static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
+#endif /* CONFIG_CONTEXT_TRACKING_USER */
#endif
diff --git a/include/linux/cookie.h b/include/linux/cookie.h
new file mode 100644
index 000000000000..0c159f585109
--- /dev/null
+++ b/include/linux/cookie.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COOKIE_H
+#define __LINUX_COOKIE_H
+
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <asm/local.h>
+
+struct pcpu_gen_cookie {
+ local_t nesting;
+ u64 last;
+} __aligned(16);
+
+struct gen_cookie {
+ struct pcpu_gen_cookie __percpu *local;
+ atomic64_t forward_last ____cacheline_aligned_in_smp;
+ atomic64_t reverse_last;
+};
+
+#define COOKIE_LOCAL_BATCH 4096
+
+#define DEFINE_COOKIE(name) \
+ static DEFINE_PER_CPU(struct pcpu_gen_cookie, __##name); \
+ static struct gen_cookie name = { \
+ .local = &__##name, \
+ .forward_last = ATOMIC64_INIT(0), \
+ .reverse_last = ATOMIC64_INIT(0), \
+ }
+
+static __always_inline u64 gen_cookie_next(struct gen_cookie *gc)
+{
+ struct pcpu_gen_cookie *local = this_cpu_ptr(gc->local);
+ u64 val;
+
+ if (likely(local_inc_return(&local->nesting) == 1)) {
+ val = local->last;
+ if (__is_defined(CONFIG_SMP) &&
+ unlikely((val & (COOKIE_LOCAL_BATCH - 1)) == 0)) {
+ s64 next = atomic64_add_return(COOKIE_LOCAL_BATCH,
+ &gc->forward_last);
+ val = next - COOKIE_LOCAL_BATCH;
+ }
+ local->last = ++val;
+ } else {
+ val = atomic64_dec_return(&gc->reverse_last);
+ }
+ local_dec(&local->nesting);
+ return val;
+}
+
+#endif /* __LINUX_COOKIE_H */
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 7a899e83835d..d3eba4360150 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -7,23 +7,48 @@
#include <linux/fs.h>
#include <asm/siginfo.h>
+#ifdef CONFIG_COREDUMP
+struct core_vma_metadata {
+ unsigned long start, end;
+ unsigned long flags;
+ unsigned long dump_size;
+ unsigned long pgoff;
+ struct file *file;
+};
+
+struct coredump_params {
+ const kernel_siginfo_t *siginfo;
+ struct file *file;
+ unsigned long limit;
+ unsigned long mm_flags;
+ int cpu;
+ loff_t written;
+ loff_t pos;
+ loff_t to_skip;
+ int vma_count;
+ size_t vma_data_size;
+ struct core_vma_metadata *vma_meta;
+};
+
/*
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
*/
-struct coredump_params;
-extern int dump_skip(struct coredump_params *cprm, size_t nr);
+extern void dump_skip_to(struct coredump_params *cprm, unsigned long to);
+extern void dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
-extern void dump_truncate(struct coredump_params *cprm);
-#ifdef CONFIG_COREDUMP
+int dump_user_range(struct coredump_params *cprm, unsigned long start,
+ unsigned long len);
extern void do_coredump(const kernel_siginfo_t *siginfo);
#else
static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
#endif
-extern int core_uses_pid;
-extern char core_pattern[];
-extern unsigned int core_pipe_limit;
+#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
+extern void validate_coredump_safety(void);
+#else
+static inline void validate_coredump_safety(void) {}
+#endif
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index b0e35eec6499..51ac441a37c3 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -7,30 +7,54 @@
#ifndef _LINUX_CORESIGHT_PMU_H
#define _LINUX_CORESIGHT_PMU_H
+#include <linux/bits.h>
+
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
-#define CORESIGHT_ETM_PMU_SEED 0x10
-/* ETMv3.5/PTM's ETMCR config bit */
-#define ETM_OPT_CYCACC 12
-#define ETM_OPT_CTXTID 14
-#define ETM_OPT_TS 28
-#define ETM_OPT_RETSTK 29
+/*
+ * The legacy Trace ID system based on fixed calculation from the cpu
+ * number. This has been replaced by drivers using a dynamic allocation
+ * system - but need to retain the legacy algorithm for backward comparibility
+ * in certain situations:-
+ * a) new perf running on older systems that generate the legacy mapping
+ * b) older tools that may not update at the same time as the kernel.
+ */
+#define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2))
+
+/*
+ * Below are the definition of bit offsets for perf option, and works as
+ * arbitrary values for all ETM versions.
+ *
+ * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
+ * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
+ * directly use below macros as config bits.
+ */
+#define ETM_OPT_BRANCH_BROADCAST 8
+#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
+#define ETM_OPT_CTXTID2 15
+#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_BB 3
#define ETM4_CFG_BIT_CYCACC 4
#define ETM4_CFG_BIT_CTXTID 6
+#define ETM4_CFG_BIT_VMID 7
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
+#define ETM4_CFG_BIT_VMID_OPT 15
+
+/*
+ * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
+ * Used to associate a CPU with the CoreSight Trace ID.
+ * [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
+ * [59:08] - Unused (SBZ)
+ * [63:60] - Version
+ */
+#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
+#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
-static inline int coresight_get_trace_id(int cpu)
-{
- /*
- * A trace ID of value 0 is invalid, so let's start at some
- * random value that fits in 7 bits and go from there. Since
- * the common convention is to have data trace IDs be I(N) + 1,
- * set instruction trace IDs as a function of the CPU number.
- */
- return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
-}
+#define CS_AUX_HW_ID_CURR_VERSION 0
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 58fffdecdbfd..5f288d475490 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -6,7 +6,10 @@
#ifndef _LINUX_CORESIGHT_H
#define _LINUX_CORESIGHT_H
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/perf_event.h>
#include <linux/sched.h>
@@ -32,48 +35,42 @@
#define CORESIGHT_UNLOCK 0xc5acce55
-extern struct bus_type coresight_bustype;
+extern const struct bus_type coresight_bustype;
enum coresight_dev_type {
- CORESIGHT_DEV_TYPE_NONE,
CORESIGHT_DEV_TYPE_SINK,
CORESIGHT_DEV_TYPE_LINK,
CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
CORESIGHT_DEV_TYPE_HELPER,
- CORESIGHT_DEV_TYPE_ECT,
+ CORESIGHT_DEV_TYPE_MAX
};
enum coresight_dev_subtype_sink {
- CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_SINK_DUMMY,
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
+ CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM,
};
enum coresight_dev_subtype_link {
- CORESIGHT_DEV_SUBTYPE_LINK_NONE,
CORESIGHT_DEV_SUBTYPE_LINK_MERG,
CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
};
enum coresight_dev_subtype_source {
- CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS,
};
enum coresight_dev_subtype_helper {
- CORESIGHT_DEV_SUBTYPE_HELPER_NONE,
CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
-};
-
-/* Embedded Cross Trigger (ECT) sub-types */
-enum coresight_dev_subtype_ect {
- CORESIGHT_DEV_SUBTYPE_ECT_NONE,
- CORESIGHT_DEV_SUBTYPE_ECT_CTI,
+ CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI
};
/**
@@ -86,8 +83,6 @@ enum coresight_dev_subtype_ect {
* by @coresight_dev_subtype_source.
* @helper_subtype: type of helper this component is, as defined
* by @coresight_dev_subtype_helper.
- * @ect_subtype: type of cross trigger this component is, as
- * defined by @coresight_dev_subtype_ect
*/
union coresight_dev_subtype {
/* We have some devices which acts as LINK and SINK */
@@ -97,23 +92,53 @@ union coresight_dev_subtype {
};
enum coresight_dev_subtype_source source_subtype;
enum coresight_dev_subtype_helper helper_subtype;
- enum coresight_dev_subtype_ect ect_subtype;
};
/**
* struct coresight_platform_data - data harvested from the firmware
* specification.
*
- * @nr_inport: Number of elements for the input connections.
- * @nr_outport: Number of elements for the output connections.
- * @conns: Sparse array of nr_outport connections from this component.
+ * @nr_inconns: Number of elements for the input connections.
+ * @nr_outconns: Number of elements for the output connections.
+ * @out_conns: Array of nr_outconns pointers to connections from this
+ * component.
+ * @in_conns: Sparse array of pointers to input connections. Sparse
+ * because the source device owns the connection so when it's
+ * unloaded the connection leaves an empty slot.
*/
struct coresight_platform_data {
- int nr_inport;
- int nr_outport;
- struct coresight_connection *conns;
+ int nr_inconns;
+ int nr_outconns;
+ struct coresight_connection **out_conns;
+ struct coresight_connection **in_conns;
+};
+
+/**
+ * struct csdev_access - Abstraction of a CoreSight device access.
+ *
+ * @io_mem : True if the device has memory mapped I/O
+ * @base : When io_mem == true, base address of the component
+ * @read : Read from the given "offset" of the given instance.
+ * @write : Write "val" to the given "offset".
+ */
+struct csdev_access {
+ bool io_mem;
+ union {
+ void __iomem *base;
+ struct {
+ u64 (*read)(u32 offset, bool relaxed, bool _64bit);
+ void (*write)(u64 val, u32 offset, bool relaxed,
+ bool _64bit);
+ };
+ };
};
+#define CSDEV_ACCESS_IOMEM(_addr) \
+ ((struct csdev_access) { \
+ .io_mem = true, \
+ .base = (_addr), \
+ })
+
/**
* struct coresight_desc - description of a component required from drivers
* @type: as defined by @coresight_dev_type.
@@ -125,6 +150,7 @@ struct coresight_platform_data {
* @groups: operations specific to this component. These will end up
* in the component's sysfs sub-directory.
* @name: name for the coresight device, also shown under sysfs.
+ * @access: Describe access to the device
*/
struct coresight_desc {
enum coresight_dev_type type;
@@ -134,23 +160,47 @@ struct coresight_desc {
struct device *dev;
const struct attribute_group **groups;
const char *name;
+ struct csdev_access access;
};
/**
* struct coresight_connection - representation of a single connection
- * @outport: a connection's output port number.
- * @child_port: remote component's port number @output is connected to.
- * @chid_fwnode: remote component's fwnode handle.
- * @child_dev: a @coresight_device representation of the component
- connected to @outport.
+ * @src_port: a connection's output port number.
+ * @dest_port: destination's input port number @src_port is connected to.
+ * @dest_fwnode: destination component's fwnode handle.
+ * @dest_dev: a @coresight_device representation of the component
+ connected to @src_port. NULL until the device is created
* @link: Representation of the connection as a sysfs link.
+ *
+ * The full connection structure looks like this, where in_conns store
+ * references to same connection as the source device's out_conns.
+ *
+ * +-----------------------------+ +-----------------------------+
+ * |coresight_device | |coresight_connection |
+ * |-----------------------------| |-----------------------------|
+ * | | | |
+ * | | | dest_dev*|<--
+ * |pdata->out_conns[nr_outconns]|<->|src_dev* | |
+ * | | | | |
+ * +-----------------------------+ +-----------------------------+ |
+ * |
+ * +-----------------------------+ |
+ * |coresight_device | |
+ * |------------------------------ |
+ * | | |
+ * | pdata->in_conns[nr_inconns]|<--
+ * | |
+ * +-----------------------------+
*/
struct coresight_connection {
- int outport;
- int child_port;
- struct fwnode_handle *child_fwnode;
- struct coresight_device *child_dev;
+ int src_port;
+ int dest_port;
+ struct fwnode_handle *dest_fwnode;
+ struct coresight_device *dest_dev;
struct coresight_sysfs_link *link;
+ struct coresight_device *src_dev;
+ atomic_t src_refcnt;
+ atomic_t dest_refcnt;
};
/**
@@ -173,41 +223,61 @@ struct coresight_sysfs_link {
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
* @ops: generic operations for this component, as defined
- by @coresight_ops.
+ * by @coresight_ops.
+ * @access: Device i/o access abstraction for this device.
* @dev: The device entity associated to this component.
- * @refcnt: keep track of what is in use.
+ * @mode: This tracer's mode, i.e sysFS, Perf or disabled. This is
+ * actually an 'enum cs_mode', but is stored in an atomic type.
+ * This is always accessed through local_read() and local_set(),
+ * but wherever it's done from within the Coresight device's lock,
+ * a non-atomic read would also work. This is the main point of
+ * synchronisation between code happening inside the sysfs mode's
+ * coresight_mutex and outside when running in Perf mode. A compare
+ * and exchange swap is done to atomically claim one mode or the
+ * other.
+ * @refcnt: keep track of what is in use. Only access this outside of the
+ * device's spinlock when the coresight_mutex held and mode ==
+ * CS_MODE_SYSFS. Otherwise it must be accessed from inside the
+ * spinlock.
* @orphan: true if the component has connections that haven't been linked.
- * @enable: 'true' if component is currently part of an active path.
- * @activated: 'true' only if a _sink_ has been activated. A sink can be
- * activated but not yet enabled. Enabling for a _sink_
- * happens when a source has been selected and a path is enabled
- * from source to that sink.
+ * @sysfs_sink_activated: 'true' when a sink has been selected for use via sysfs
+ * by writing a 1 to the 'enable_sink' file. A sink can be
+ * activated but not yet enabled. Enabling for a _sink_ happens
+ * when a source has been selected and a path is enabled from
+ * source to that sink. A sink can also become enabled but not
+ * activated if it's used via Perf.
* @ea: Device attribute for sink representation under PMU directory.
* @def_sink: cached reference to default sink found for this device.
- * @ect_dev: Associated cross trigger device. Not part of the trace data
- * path or connections.
* @nr_links: number of sysfs links created to other components from this
* device. These will appear in the "connections" group.
* @has_conns_grp: Have added a "connections" group for sysfs links.
+ * @feature_csdev_list: List of complex feature programming added to the device.
+ * @config_csdev_list: List of system configurations added to the device.
+ * @cscfg_csdev_lock: Protect the lists of configurations and features.
+ * @active_cscfg_ctxt: Context information for current active system configuration.
*/
struct coresight_device {
struct coresight_platform_data *pdata;
enum coresight_dev_type type;
union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
+ struct csdev_access access;
struct device dev;
- atomic_t *refcnt;
+ local_t mode;
+ int refcnt;
bool orphan;
- bool enable; /* true only if configured as part of a path */
/* sink specific fields */
- bool activated; /* true only if a sink is part of a path */
+ bool sysfs_sink_activated;
struct dev_ext_attribute *ea;
struct coresight_device *def_sink;
- /* cross trigger handling */
- struct coresight_device *ect_dev;
/* sysfs links between components */
int nr_links;
bool has_conns_grp;
+ /* system configuration and feature lists */
+ struct list_head feature_csdev_list;
+ struct list_head config_csdev_list;
+ spinlock_t cscfg_csdev_lock;
+ void *active_cscfg_ctxt;
};
/*
@@ -234,6 +304,12 @@ static struct coresight_dev_list (var) = { \
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+enum cs_mode {
+ CS_MODE_DISABLED,
+ CS_MODE_SYSFS,
+ CS_MODE_PERF,
+};
+
#define source_ops(csdev) csdev->ops->source_ops
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
@@ -250,7 +326,8 @@ static struct coresight_dev_list (var) = { \
* @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
- int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ void *data);
int (*disable)(struct coresight_device *csdev);
void *(*alloc_buffer)(struct coresight_device *csdev,
struct perf_event *event, void **pages,
@@ -268,8 +345,12 @@ struct coresight_ops_sink {
* @disable: disables flow between iport and oport.
*/
struct coresight_ops_link {
- int (*enable)(struct coresight_device *csdev, int iport, int oport);
- void (*disable)(struct coresight_device *csdev, int iport, int oport);
+ int (*enable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
+ void (*disable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
};
/**
@@ -277,16 +358,13 @@ struct coresight_ops_link {
* Operations available for sources.
* @cpu_id: returns the value of the CPU number this component
* is associated to.
- * @trace_id: returns the value of the component's trace ID as known
- * to the HW.
* @enable: enables tracing for a source.
* @disable: disables tracing for a source.
*/
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
- int (*trace_id)(struct coresight_device *csdev);
- int (*enable)(struct coresight_device *csdev,
- struct perf_event *event, u32 mode);
+ int (*enable)(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
};
@@ -301,77 +379,283 @@ struct coresight_ops_source {
* @disable : Disable the device
*/
struct coresight_ops_helper {
- int (*enable)(struct coresight_device *csdev, void *data);
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ void *data);
int (*disable)(struct coresight_device *csdev, void *data);
};
-/**
- * struct coresight_ops_ect - Ops for an embedded cross trigger device
- *
- * @enable : Enable the device
- * @disable : Disable the device
- */
-struct coresight_ops_ect {
- int (*enable)(struct coresight_device *csdev);
- int (*disable)(struct coresight_device *csdev);
-};
-
struct coresight_ops {
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
const struct coresight_ops_helper *helper_ops;
- const struct coresight_ops_ect *ect_ops;
};
-#ifdef CONFIG_CORESIGHT
-extern struct coresight_device *
-coresight_register(struct coresight_desc *desc);
-extern void coresight_unregister(struct coresight_device *csdev);
-extern int coresight_enable(struct coresight_device *csdev);
-extern void coresight_disable(struct coresight_device *csdev);
-extern int coresight_timeout(void __iomem *addr, u32 offset,
- int position, int value);
+static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, false);
+}
-extern int coresight_claim_device(void __iomem *base);
-extern int coresight_claim_device_unlocked(void __iomem *base);
+#define CORESIGHT_CIDRn(i) (0xFF0 + ((i) * 4))
-extern void coresight_disclaim_device(void __iomem *base);
-extern void coresight_disclaim_device_unlocked(void __iomem *base);
-extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
- struct device *dev);
+static inline u32 coresight_get_cid(void __iomem *base)
+{
+ u32 i, cid = 0;
-extern bool coresight_loses_context_with_cpu(struct device *dev);
-#else
-static inline struct coresight_device *
-coresight_register(struct coresight_desc *desc) { return NULL; }
-static inline void coresight_unregister(struct coresight_device *csdev) {}
-static inline int
-coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
-static inline void coresight_disable(struct coresight_device *csdev) {}
-static inline int coresight_timeout(void __iomem *addr, u32 offset,
- int position, int value) { return 1; }
-static inline int coresight_claim_device_unlocked(void __iomem *base)
+ for (i = 0; i < 4; i++)
+ cid |= readl(base + CORESIGHT_CIDRn(i)) << (i * 8);
+
+ return cid;
+}
+
+static inline bool is_coresight_device(void __iomem *base)
{
- return -EINVAL;
+ u32 cid = coresight_get_cid(base);
+
+ return cid == CORESIGHT_CID;
}
-static inline int coresight_claim_device(void __iomem *base)
+/*
+ * Attempt to find and enable "APB clock" for the given device
+ *
+ * Returns:
+ *
+ * clk - Clock is found and enabled
+ * NULL - clock is not found
+ * ERROR - Clock is found but failed to enable
+ */
+static inline struct clk *coresight_get_enable_apb_pclk(struct device *dev)
{
- return -EINVAL;
+ struct clk *pclk;
+ int ret;
+
+ pclk = clk_get(dev, "apb_pclk");
+ if (IS_ERR(pclk))
+ return NULL;
+
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
+ clk_put(pclk);
+ return ERR_PTR(ret);
+ }
+ return pclk;
}
-static inline void coresight_disclaim_device(void __iomem *base) {}
-static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
+#define CORESIGHT_PIDRn(i) (0xFE0 + ((i) * 4))
-static inline bool coresight_loses_context_with_cpu(struct device *dev)
+static inline u32 coresight_get_pid(struct csdev_access *csa)
{
- return false;
+ u32 i, pid = 0;
+
+ for (i = 0; i < 4; i++)
+ pid |= csdev_access_relaxed_read32(csa, CORESIGHT_PIDRn(i)) << (i * 8);
+
+ return pid;
}
-#endif
+
+static inline u64 csdev_access_relaxed_read_pair(struct csdev_access *csa,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ return readl_relaxed(csa->base + lo_offset) |
+ ((u64)readl_relaxed(csa->base + hi_offset) << 32);
+ }
+
+ return csa->read(lo_offset, true, false) | (csa->read(hi_offset, true, false) << 32);
+}
+
+static inline void csdev_access_relaxed_write_pair(struct csdev_access *csa, u64 val,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ writel_relaxed((u32)val, csa->base + lo_offset);
+ writel_relaxed((u32)(val >> 32), csa->base + hi_offset);
+ } else {
+ csa->write((u32)val, lo_offset, true, false);
+ csa->write((u32)(val >> 32), hi_offset, true, false);
+ }
+}
+
+static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl(csa->base + offset);
+
+ return csa->read(offset, false, false);
+}
+
+static inline void csdev_access_relaxed_write32(struct csdev_access *csa,
+ u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, false);
+}
+
+static inline void csdev_access_write32(struct csdev_access *csa, u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, false);
+}
+
+#ifdef CONFIG_64BIT
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, true);
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq(csa->base + offset);
+
+ return csa->read(offset, false, true);
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, true);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, true);
+}
+
+#else /* !CONFIG_64BIT */
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+#endif /* CONFIG_64BIT */
+
+static inline bool coresight_is_percpu_source(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC);
+}
+
+static inline bool coresight_is_percpu_sink(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SINK) &&
+ (csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM);
+}
+
+/*
+ * Atomically try to take the device and set a new mode. Returns true on
+ * success, false if the device is already taken by someone else.
+ */
+static inline bool coresight_take_mode(struct coresight_device *csdev,
+ enum cs_mode new_mode)
+{
+ return local_cmpxchg(&csdev->mode, CS_MODE_DISABLED, new_mode) ==
+ CS_MODE_DISABLED;
+}
+
+static inline enum cs_mode coresight_get_mode(struct coresight_device *csdev)
+{
+ return local_read(&csdev->mode);
+}
+
+static inline void coresight_set_mode(struct coresight_device *csdev,
+ enum cs_mode new_mode)
+{
+ enum cs_mode current_mode = coresight_get_mode(csdev);
+
+ /*
+ * Changing to a new mode must be done from an already disabled state
+ * unless it's synchronized with coresight_take_mode(). Otherwise the
+ * device is already in use and signifies a locking issue.
+ */
+ WARN(new_mode != CS_MODE_DISABLED && current_mode != CS_MODE_DISABLED &&
+ current_mode != new_mode, "Device already in use\n");
+
+ local_set(&csdev->mode, new_mode);
+}
+
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable_sysfs(struct coresight_device *csdev);
+extern void coresight_disable_sysfs(struct coresight_device *csdev);
+extern int coresight_timeout(struct csdev_access *csa, u32 offset,
+ int position, int value);
+
+extern int coresight_claim_device(struct coresight_device *csdev);
+extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
+
+extern void coresight_disclaim_device(struct coresight_device *csdev);
+extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
+extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
+ struct device *dev);
+
+extern bool coresight_loses_context_with_cpu(struct device *dev);
+
+u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset);
+u32 coresight_read32(struct coresight_device *csdev, u32 offset);
+void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset);
+void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset);
+u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset);
+u64 coresight_read64(struct coresight_device *csdev, u32 offset);
+void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset);
+void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
extern int coresight_get_cpu(struct device *dev);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
-
-#endif
+struct coresight_connection *
+coresight_add_out_conn(struct device *dev,
+ struct coresight_platform_data *pdata,
+ const struct coresight_connection *new_conn);
+int coresight_add_in_conn(struct coresight_connection *conn);
+struct coresight_device *
+coresight_find_input_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
+struct coresight_device *
+coresight_find_output_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
+
+#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/counter.h b/include/linux/counter.h
index 9dbd5df4cd34..702e9108bbb4 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -6,417 +6,352 @@
#ifndef _COUNTER_H_
#define _COUNTER_H_
-#include <linux/counter_enum.h>
+#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
#include <linux/types.h>
-
-enum counter_count_direction {
- COUNTER_COUNT_DIRECTION_FORWARD = 0,
- COUNTER_COUNT_DIRECTION_BACKWARD
-};
-extern const char *const counter_count_direction_str[2];
-
-enum counter_count_mode {
- COUNTER_COUNT_MODE_NORMAL = 0,
- COUNTER_COUNT_MODE_RANGE_LIMIT,
- COUNTER_COUNT_MODE_NON_RECYCLE,
- COUNTER_COUNT_MODE_MODULO_N
-};
-extern const char *const counter_count_mode_str[4];
+#include <linux/wait.h>
+#include <uapi/linux/counter.h>
struct counter_device;
+struct counter_count;
+struct counter_synapse;
struct counter_signal;
+enum counter_comp_type {
+ COUNTER_COMP_U8,
+ COUNTER_COMP_U64,
+ COUNTER_COMP_BOOL,
+ COUNTER_COMP_SIGNAL_LEVEL,
+ COUNTER_COMP_FUNCTION,
+ COUNTER_COMP_SYNAPSE_ACTION,
+ COUNTER_COMP_ENUM,
+ COUNTER_COMP_COUNT_DIRECTION,
+ COUNTER_COMP_COUNT_MODE,
+ COUNTER_COMP_SIGNAL_POLARITY,
+ COUNTER_COMP_ARRAY,
+};
+
/**
- * struct counter_signal_ext - Counter Signal extensions
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
+ * struct counter_comp - Counter component node
+ * @type: Counter component data type
+ * @name: device-specific component name
+ * @priv: component-relevant data
+ * @action_read: Synapse action mode read callback. The read value of the
+ * respective Synapse action mode should be passed back via
+ * the action parameter.
+ * @device_u8_read: Device u8 component read callback. The read value of the
+ * respective Device u8 component should be passed back via
+ * the val parameter.
+ * @count_u8_read: Count u8 component read callback. The read value of the
+ * respective Count u8 component should be passed back via
+ * the val parameter.
+ * @signal_u8_read: Signal u8 component read callback. The read value of the
+ * respective Signal u8 component should be passed back via
+ * the val parameter.
+ * @device_u32_read: Device u32 component read callback. The read value of
+ * the respective Device u32 component should be passed
+ * back via the val parameter.
+ * @count_u32_read: Count u32 component read callback. The read value of the
+ * respective Count u32 component should be passed back via
+ * the val parameter.
+ * @signal_u32_read: Signal u32 component read callback. The read value of
+ * the respective Signal u32 component should be passed
+ * back via the val parameter.
+ * @device_u64_read: Device u64 component read callback. The read value of
+ * the respective Device u64 component should be passed
+ * back via the val parameter.
+ * @count_u64_read: Count u64 component read callback. The read value of the
+ * respective Count u64 component should be passed back via
+ * the val parameter.
+ * @signal_u64_read: Signal u64 component read callback. The read value of
+ * the respective Signal u64 component should be passed
+ * back via the val parameter.
+ * @signal_array_u32_read: Signal u32 array component read callback. The
+ * index of the respective Count u32 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u32 array component element should be
+ * passed back via the val parameter.
+ * @device_array_u64_read: Device u64 array component read callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Device u64 array component element should be
+ * passed back via the val parameter.
+ * @count_array_u64_read: Count u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @signal_array_u64_read: Signal u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @action_write: Synapse action mode write callback. The write value of
+ * the respective Synapse action mode is passed via the
+ * action parameter.
+ * @device_u8_write: Device u8 component write callback. The write value of
+ * the respective Device u8 component is passed via the val
+ * parameter.
+ * @count_u8_write: Count u8 component write callback. The write value of
+ * the respective Count u8 component is passed via the val
+ * parameter.
+ * @signal_u8_write: Signal u8 component write callback. The write value of
+ * the respective Signal u8 component is passed via the val
+ * parameter.
+ * @device_u32_write: Device u32 component write callback. The write value of
+ * the respective Device u32 component is passed via the
+ * val parameter.
+ * @count_u32_write: Count u32 component write callback. The write value of
+ * the respective Count u32 component is passed via the val
+ * parameter.
+ * @signal_u32_write: Signal u32 component write callback. The write value of
+ * the respective Signal u32 component is passed via the
+ * val parameter.
+ * @device_u64_write: Device u64 component write callback. The write value of
+ * the respective Device u64 component is passed via the
+ * val parameter.
+ * @count_u64_write: Count u64 component write callback. The write value of
+ * the respective Count u64 component is passed via the val
+ * parameter.
+ * @signal_u64_write: Signal u64 component write callback. The write value of
+ * the respective Signal u64 component is passed via the
+ * val parameter.
+ * @signal_array_u32_write: Signal u32 array component write callback. The
+ * index of the respective Signal u32 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u32 array component element is passed via
+ * the val parameter.
+ * @device_array_u64_write: Device u64 array component write callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Device u64 array component element is passed via
+ * the val parameter.
+ * @count_array_u64_write: Count u64 array component write callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Count u64 array component element is passed via
+ * the val parameter.
+ * @signal_array_u64_write: Signal u64 array component write callback. The
+ * index of the respective Signal u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u64 array component element is passed via
+ * the val parameter.
*/
-struct counter_signal_ext {
+struct counter_comp {
+ enum counter_comp_type type;
const char *name;
- ssize_t (*read)(struct counter_device *counter,
- struct counter_signal *signal, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- const char *buf, size_t len);
void *priv;
+ union {
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*device_u8_read)(struct counter_device *counter, u8 *val);
+ int (*count_u8_read)(struct counter_device *counter,
+ struct counter_count *count, u8 *val);
+ int (*signal_u8_read)(struct counter_device *counter,
+ struct counter_signal *signal, u8 *val);
+ int (*device_u32_read)(struct counter_device *counter,
+ u32 *val);
+ int (*count_u32_read)(struct counter_device *counter,
+ struct counter_count *count, u32 *val);
+ int (*signal_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal, u32 *val);
+ int (*device_u64_read)(struct counter_device *counter,
+ u64 *val);
+ int (*count_u64_read)(struct counter_device *counter,
+ struct counter_count *count, u64 *val);
+ int (*signal_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal, u64 *val);
+ int (*signal_array_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 *val);
+ int (*device_array_u64_read)(struct counter_device *counter,
+ size_t idx, u64 *val);
+ int (*count_array_u64_read)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 *val);
+ int (*signal_array_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 *val);
+ };
+ union {
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*device_u8_write)(struct counter_device *counter, u8 val);
+ int (*count_u8_write)(struct counter_device *counter,
+ struct counter_count *count, u8 val);
+ int (*signal_u8_write)(struct counter_device *counter,
+ struct counter_signal *signal, u8 val);
+ int (*device_u32_write)(struct counter_device *counter,
+ u32 val);
+ int (*count_u32_write)(struct counter_device *counter,
+ struct counter_count *count, u32 val);
+ int (*signal_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal, u32 val);
+ int (*device_u64_write)(struct counter_device *counter,
+ u64 val);
+ int (*count_u64_write)(struct counter_device *counter,
+ struct counter_count *count, u64 val);
+ int (*signal_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal, u64 val);
+ int (*signal_array_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 val);
+ int (*device_array_u64_write)(struct counter_device *counter,
+ size_t idx, u64 val);
+ int (*count_array_u64_write)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 val);
+ int (*signal_array_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 val);
+ };
};
/**
* struct counter_signal - Counter Signal node
- * @id: unique ID used to identify signal
- * @name: device-specific Signal name; ideally, this should match the name
- * as it appears in the datasheet documentation
- * @ext: optional array of Counter Signal extensions
- * @num_ext: number of Counter Signal extensions specified in @ext
- * @priv: optional private data supplied by driver
+ * @id: unique ID used to identify the Signal
+ * @name: device-specific Signal name
+ * @ext: optional array of Signal extensions
+ * @num_ext: number of Signal extensions specified in @ext
*/
struct counter_signal {
int id;
const char *name;
- const struct counter_signal_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
-
- void *priv;
-};
-
-/**
- * struct counter_signal_enum_ext - Signal enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_signal_enum_ext structure can be used to implement enum style
- * Signal extension attributes. Enum style attributes are those which have a set
- * of strings that map to unsigned integer values. The Generic Counter Signal
- * enum extension helper code takes care of mapping between value and string, as
- * well as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_signal_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter,
- struct counter_signal *signal, size_t *item);
- int (*set)(struct counter_device *counter,
- struct counter_signal *signal, size_t item);
-};
-
-/**
- * COUNTER_SIGNAL_ENUM() - Initialize Signal enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_signal_enum_ext structure
- *
- * This should usually be used together with COUNTER_SIGNAL_ENUM_AVAILABLE()
- */
-#define COUNTER_SIGNAL_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_signal_enum_read, \
- .write = counter_signal_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_SIGNAL_ENUM_AVAILABLE() - Initialize Signal enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_signal_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_SIGNAL_ENUM()
- */
-#define COUNTER_SIGNAL_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_signal_enum_available_read, \
- .priv = (_e) \
-}
-
-enum counter_synapse_action {
- COUNTER_SYNAPSE_ACTION_NONE = 0,
- COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
/**
* struct counter_synapse - Counter Synapse node
- * @action: index of current action mode
* @actions_list: array of available action modes
* @num_actions: number of action modes specified in @actions_list
- * @signal: pointer to associated signal
+ * @signal: pointer to the associated Signal
*/
struct counter_synapse {
- size_t action;
const enum counter_synapse_action *actions_list;
size_t num_actions;
struct counter_signal *signal;
};
-struct counter_count;
-
-/**
- * struct counter_count_ext - Counter Count extension
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
- */
-struct counter_count_ext {
- const char *name;
- ssize_t (*read)(struct counter_device *counter,
- struct counter_count *count, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter,
- struct counter_count *count, void *priv,
- const char *buf, size_t len);
- void *priv;
-};
-
-enum counter_count_function {
- COUNTER_COUNT_FUNCTION_INCREASE = 0,
- COUNTER_COUNT_FUNCTION_DECREASE,
- COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X4
-};
-
/**
* struct counter_count - Counter Count node
- * @id: unique ID used to identify Count
- * @name: device-specific Count name; ideally, this should match
- * the name as it appears in the datasheet documentation
- * @function: index of current function mode
- * @functions_list: array available function modes
+ * @id: unique ID used to identify the Count
+ * @name: device-specific Count name
+ * @functions_list: array of available function modes
* @num_functions: number of function modes specified in @functions_list
- * @synapses: array of synapses for initialization
- * @num_synapses: number of synapses specified in @synapses
- * @ext: optional array of Counter Count extensions
- * @num_ext: number of Counter Count extensions specified in @ext
- * @priv: optional private data supplied by driver
+ * @synapses: array of Synapses for initialization
+ * @num_synapses: number of Synapses specified in @synapses
+ * @ext: optional array of Count extensions
+ * @num_ext: number of Count extensions specified in @ext
*/
struct counter_count {
int id;
const char *name;
- size_t function;
- const enum counter_count_function *functions_list;
+ const enum counter_function *functions_list;
size_t num_functions;
struct counter_synapse *synapses;
size_t num_synapses;
- const struct counter_count_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
-
- void *priv;
-};
-
-/**
- * struct counter_count_enum_ext - Count enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_count_enum_ext structure can be used to implement enum style
- * Count extension attributes. Enum style attributes are those which have a set
- * of strings that map to unsigned integer values. The Generic Counter Count
- * enum extension helper code takes care of mapping between value and string, as
- * well as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_count_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter, struct counter_count *count,
- size_t *item);
- int (*set)(struct counter_device *counter, struct counter_count *count,
- size_t item);
-};
-
-/**
- * COUNTER_COUNT_ENUM() - Initialize Count enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_count_enum_ext structure
- *
- * This should usually be used together with COUNTER_COUNT_ENUM_AVAILABLE()
- */
-#define COUNTER_COUNT_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_count_enum_read, \
- .write = counter_count_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_COUNT_ENUM_AVAILABLE() - Initialize Count enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_count_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_COUNT_ENUM()
- */
-#define COUNTER_COUNT_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_count_enum_available_read, \
- .priv = (_e) \
-}
-
-/**
- * struct counter_device_attr_group - internal container for attribute group
- * @attr_group: Counter sysfs attributes group
- * @attr_list: list to keep track of created Counter sysfs attributes
- * @num_attr: number of Counter sysfs attributes
- */
-struct counter_device_attr_group {
- struct attribute_group attr_group;
- struct list_head attr_list;
- size_t num_attr;
};
/**
- * struct counter_device_state - internal state container for a Counter device
- * @id: unique ID used to identify the Counter
- * @dev: internal device structure
- * @groups_list: attribute groups list (for Signals, Counts, and ext)
- * @num_groups: number of attribute groups containers
- * @groups: Counter sysfs attribute groups (to populate @dev.groups)
+ * struct counter_event_node - Counter Event node
+ * @l: list of current watching Counter events
+ * @event: event that triggers
+ * @channel: event channel
+ * @comp_list: list of components to watch when event triggers
*/
-struct counter_device_state {
- int id;
- struct device dev;
- struct counter_device_attr_group *groups_list;
- size_t num_groups;
- const struct attribute_group **groups;
-};
-
-enum counter_signal_value {
- COUNTER_SIGNAL_LOW = 0,
- COUNTER_SIGNAL_HIGH
+struct counter_event_node {
+ struct list_head l;
+ u8 event;
+ u8 channel;
+ struct list_head comp_list;
};
/**
* struct counter_ops - Callbacks from driver
- * @signal_read: optional read callback for Signal attribute. The read
- * value of the respective Signal should be passed back via
- * the val parameter.
- * @count_read: optional read callback for Count attribute. The read
- * value of the respective Count should be passed back via
- * the val parameter.
- * @count_write: optional write callback for Count attribute. The write
- * value for the respective Count is passed in via the val
+ * @signal_read: optional read callback for Signals. The read level of
+ * the respective Signal should be passed back via the
+ * level parameter.
+ * @count_read: read callback for Counts. The read value of the
+ * respective Count should be passed back via the value
+ * parameter.
+ * @count_write: optional write callback for Counts. The write value for
+ * the respective Count is passed in via the value
* parameter.
- * @function_get: function to get the current count function mode. Returns
- * 0 on success and negative error code on error. The index
- * of the respective Count's returned function mode should
- * be passed back via the function parameter.
- * @function_set: function to set the count function mode. function is the
- * index of the requested function mode from the respective
- * Count's functions_list array.
- * @action_get: function to get the current action mode. Returns 0 on
- * success and negative error code on error. The index of
- * the respective Synapse's returned action mode should be
+ * @function_read: read callback the Count function modes. The read
+ * function mode of the respective Count should be passed
+ * back via the function parameter.
+ * @function_write: optional write callback for Count function modes. The
+ * function mode to write for the respective Count is
+ * passed in via the function parameter.
+ * @action_read: optional read callback the Synapse action modes. The
+ * read action mode of the respective Synapse should be
* passed back via the action parameter.
- * @action_set: function to set the action mode. action is the index of
- * the requested action mode from the respective Synapse's
- * actions_list array.
+ * @action_write: optional write callback for Synapse action modes. The
+ * action mode to write for the respective Synapse is
+ * passed in via the action parameter.
+ * @events_configure: optional write callback to configure events. The list of
+ * struct counter_event_node may be accessed via the
+ * events_list member of the counter parameter.
+ * @watch_validate: optional callback to validate a watch. The Counter
+ * component watch configuration is passed in via the watch
+ * parameter. A return value of 0 indicates a valid Counter
+ * component watch configuration.
*/
struct counter_ops {
int (*signal_read)(struct counter_device *counter,
struct counter_signal *signal,
- enum counter_signal_value *val);
+ enum counter_signal_level *level);
int (*count_read)(struct counter_device *counter,
- struct counter_count *count, unsigned long *val);
+ struct counter_count *count, u64 *value);
int (*count_write)(struct counter_device *counter,
- struct counter_count *count, unsigned long val);
- int (*function_get)(struct counter_device *counter,
- struct counter_count *count, size_t *function);
- int (*function_set)(struct counter_device *counter,
- struct counter_count *count, size_t function);
- int (*action_get)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t *action);
- int (*action_set)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t action);
+ struct counter_count *count, u64 value);
+ int (*function_read)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function);
+ int (*function_write)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function);
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*events_configure)(struct counter_device *counter);
+ int (*watch_validate)(struct counter_device *counter,
+ const struct counter_watch *watch);
};
/**
- * struct counter_device_ext - Counter device extension
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
- */
-struct counter_device_ext {
- const char *name;
- ssize_t (*read)(struct counter_device *counter, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter, void *priv,
- const char *buf, size_t len);
- void *priv;
-};
-
-/**
- * struct counter_device_enum_ext - Counter enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_device_enum_ext structure can be used to implement enum style
- * Counter extension attributes. Enum style attributes are those which have a
- * set of strings that map to unsigned integer values. The Generic Counter enum
- * extension helper code takes care of mapping between value and string, as well
- * as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_device_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter, size_t *item);
- int (*set)(struct counter_device *counter, size_t item);
-};
-
-/**
- * COUNTER_DEVICE_ENUM() - Initialize Counter enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_device_enum_ext structure
- *
- * This should usually be used together with COUNTER_DEVICE_ENUM_AVAILABLE()
- */
-#define COUNTER_DEVICE_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_device_enum_read, \
- .write = counter_device_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_DEVICE_ENUM_AVAILABLE() - Initialize Counter enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_device_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_DEVICE_ENUM()
- */
-#define COUNTER_DEVICE_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_device_enum_available_read, \
- .priv = (_e) \
-}
-
-/**
* struct counter_device - Counter data structure
- * @name: name of the device as it appears in the datasheet
+ * @name: name of the device
* @parent: optional parent device providing the counters
- * @device_state: internal device state container
* @ops: callbacks from driver
* @signals: array of Signals
* @num_signals: number of Signals specified in @signals
@@ -425,11 +360,21 @@ struct counter_device_enum_ext {
* @ext: optional array of Counter device extensions
* @num_ext: number of Counter device extensions specified in @ext
* @priv: optional private data supplied by driver
+ * @dev: internal device structure
+ * @chrdev: internal character device structure
+ * @events_list: list of current watching Counter events
+ * @events_list_lock: lock to protect Counter events list operations
+ * @next_events_list: list of next watching Counter events
+ * @n_events_list_lock: lock to protect Counter next events list operations
+ * @events: queue of detected Counter events
+ * @events_wait: wait queue to allow blocking reads of Counter events
+ * @events_in_lock: lock to protect Counter events queue in operations
+ * @events_out_lock: lock to protect Counter events queue out operations
+ * @ops_exist_lock: lock to prevent use during removal
*/
struct counter_device {
const char *name;
struct device *parent;
- struct counter_device_state *device_state;
const struct counter_ops *ops;
@@ -438,17 +383,250 @@ struct counter_device {
struct counter_count *counts;
size_t num_counts;
- const struct counter_device_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
- void *priv;
+ struct device dev;
+ struct cdev chrdev;
+ struct list_head events_list;
+ spinlock_t events_list_lock;
+ struct list_head next_events_list;
+ struct mutex n_events_list_lock;
+ DECLARE_KFIFO_PTR(events, struct counter_event);
+ wait_queue_head_t events_wait;
+ spinlock_t events_in_lock;
+ struct mutex events_out_lock;
+ struct mutex ops_exist_lock;
};
-int counter_register(struct counter_device *const counter);
+void *counter_priv(const struct counter_device *const counter) __attribute_const__;
+
+struct counter_device *counter_alloc(size_t sizeof_priv);
+void counter_put(struct counter_device *const counter);
+int counter_add(struct counter_device *const counter);
+
void counter_unregister(struct counter_device *const counter);
-int devm_counter_register(struct device *dev,
- struct counter_device *const counter);
-void devm_counter_unregister(struct device *dev,
- struct counter_device *const counter);
+struct counter_device *devm_counter_alloc(struct device *dev,
+ size_t sizeof_priv);
+int devm_counter_add(struct device *dev,
+ struct counter_device *const counter);
+void counter_push_event(struct counter_device *const counter, const u8 event,
+ const u8 channel);
+
+#define COUNTER_COMP_DEVICE_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .device_u64_read = (_read), \
+ .device_u64_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .count_u64_read = (_read), \
+ .count_u64_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .signal_u64_read = (_read), \
+ .signal_u64_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+struct counter_available {
+ union {
+ const u32 *enums;
+ const char *const *strs;
+ };
+ size_t num_items;
+};
+
+#define DEFINE_COUNTER_AVAILABLE(_name, _enums) \
+ struct counter_available _name = { \
+ .enums = (_enums), \
+ .num_items = ARRAY_SIZE(_enums), \
+ }
+
+#define DEFINE_COUNTER_ENUM(_name, _strs) \
+ struct counter_available _name = { \
+ .strs = (_strs), \
+ .num_items = ARRAY_SIZE(_strs), \
+ }
+
+#define COUNTER_COMP_DEVICE_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .device_u32_read = (_get), \
+ .device_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_COUNT_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .count_u32_read = (_get), \
+ .count_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_SIGNAL_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .signal_u32_read = (_get), \
+ .signal_u32_write = (_set), \
+ .priv = &(_available), \
+}
+
+struct counter_array {
+ enum counter_comp_type type;
+ const struct counter_available *avail;
+ union {
+ size_t length;
+ size_t idx;
+ };
+};
+
+#define DEFINE_COUNTER_ARRAY_U64(_name, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_U64, \
+ .length = (_length), \
+ }
+
+#define DEFINE_COUNTER_ARRAY_CAPTURE(_name, _length) \
+ DEFINE_COUNTER_ARRAY_U64(_name, _length)
+
+#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _available, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .avail = &(_available), \
+ .length = (_length), \
+ }
+
+#define COUNTER_COMP_DEVICE_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .device_array_u64_read = (_read), \
+ .device_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_COUNT_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .count_array_u64_read = (_read), \
+ .count_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_SIGNAL_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .signal_array_u64_read = (_read), \
+ .signal_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+
+#define COUNTER_COMP_CAPTURE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("capture", _read, _write)
+
+#define COUNTER_COMP_CEILING(_read, _write) \
+ COUNTER_COMP_COUNT_U64("ceiling", _read, _write)
+
+#define COUNTER_COMP_COUNT_MODE(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_COUNT_MODE, \
+ .name = "count_mode", \
+ .count_u32_read = (_read), \
+ .count_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_DIRECTION(_read) \
+{ \
+ .type = COUNTER_COMP_COUNT_DIRECTION, \
+ .name = "direction", \
+ .count_u32_read = (_read), \
+}
+
+#define COUNTER_COMP_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("enable", _read, _write)
+
+#define COUNTER_COMP_FLOOR(_read, _write) \
+ COUNTER_COMP_COUNT_U64("floor", _read, _write)
+
+#define COUNTER_COMP_POLARITY(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .name = "polarity", \
+ .signal_u32_read = (_read), \
+ .signal_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_PRESET(_read, _write) \
+ COUNTER_COMP_COUNT_U64("preset", _read, _write)
+
+#define COUNTER_COMP_PRESET_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("preset_enable", _read, _write)
+
+#define COUNTER_COMP_ARRAY_CAPTURE(_read, _write, _array) \
+ COUNTER_COMP_COUNT_ARRAY_U64("capture", _read, _write, _array)
+
+#define COUNTER_COMP_ARRAY_POLARITY(_read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = "polarity", \
+ .signal_array_u32_read = (_read), \
+ .signal_array_u32_write = (_write), \
+ .priv = &(_array), \
+}
#endif /* _COUNTER_H_ */
diff --git a/include/linux/counter_enum.h b/include/linux/counter_enum.h
deleted file mode 100644
index 9f917298a88f..000000000000
--- a/include/linux/counter_enum.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Counter interface enum functions
- * Copyright (C) 2018 William Breathitt Gray
- */
-#ifndef _COUNTER_ENUM_H_
-#define _COUNTER_ENUM_H_
-
-#include <linux/types.h>
-
-struct counter_device;
-struct counter_signal;
-struct counter_count;
-
-ssize_t counter_signal_enum_read(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- char *buf);
-ssize_t counter_signal_enum_write(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_signal_enum_available_read(struct counter_device *counter,
- struct counter_signal *signal,
- void *priv, char *buf);
-
-ssize_t counter_count_enum_read(struct counter_device *counter,
- struct counter_count *count, void *priv,
- char *buf);
-ssize_t counter_count_enum_write(struct counter_device *counter,
- struct counter_count *count, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_count_enum_available_read(struct counter_device *counter,
- struct counter_count *count,
- void *priv, char *buf);
-
-ssize_t counter_device_enum_read(struct counter_device *counter, void *priv,
- char *buf);
-ssize_t counter_device_enum_write(struct counter_device *counter, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_device_enum_available_read(struct counter_device *counter,
- void *priv, char *buf);
-
-#endif /* _COUNTER_ENUM_H_ */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 8537e9282a65..265b0f8fc0b3 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -90,6 +90,29 @@ enum {
GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
0x72, 0x2D, 0xEB, 0x41)
+/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CPER_SEC_CXL_GEN_MEDIA_GUID \
+ GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
+ 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
+/*
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CPER_SEC_CXL_DRAM_GUID \
+ GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
+ 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CPER_SEC_CXL_MEM_MODULE_GUID \
+ GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
+ 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
+
/*
* Flags bits definitions for flags in struct cper_record_header
* If set, the error has been recovered
@@ -230,6 +253,18 @@ enum {
#define CPER_MEM_VALID_RANK_NUMBER 0x8000
#define CPER_MEM_VALID_CARD_HANDLE 0x10000
#define CPER_MEM_VALID_MODULE_HANDLE 0x20000
+#define CPER_MEM_VALID_ROW_EXT 0x40000
+#define CPER_MEM_VALID_BANK_GROUP 0x80000
+#define CPER_MEM_VALID_BANK_ADDRESS 0x100000
+#define CPER_MEM_VALID_CHIP_ID 0x200000
+
+#define CPER_MEM_EXT_ROW_MASK 0x3
+#define CPER_MEM_EXT_ROW_SHIFT 16
+
+#define CPER_MEM_BANK_ADDRESS_MASK 0xff
+#define CPER_MEM_BANK_GROUP_SHIFT 8
+
+#define CPER_MEM_CHIP_ID_SHIFT 5
#define CPER_PCIE_VALID_PORT_TYPE 0x0001
#define CPER_PCIE_VALID_VERSION 0x0002
@@ -443,7 +478,7 @@ struct cper_sec_mem_err_old {
u8 error_type;
};
-/* Memory Error Section (UEFI >= v2.3), UEFI v2.7 sec N.2.5 */
+/* Memory Error Section (UEFI >= v2.3), UEFI v2.8 sec N.2.5 */
struct cper_sec_mem_err {
u64 validation_bits;
u64 error_status;
@@ -461,7 +496,7 @@ struct cper_sec_mem_err {
u64 responder_id;
u64 target_id;
u8 error_type;
- u8 reserved;
+ u8 extended;
u16 rank;
u16 mem_array_handle; /* "card handle" in UEFI 2.4 */
u16 mem_dev_handle; /* "module handle" in UEFI 2.4 */
@@ -483,8 +518,16 @@ struct cper_mem_err_compact {
u16 rank;
u16 mem_array_handle;
u16 mem_dev_handle;
+ u8 extended;
};
+static inline u32 cper_get_mem_extension(u64 mem_valid, u8 mem_extended)
+{
+ if (!(mem_valid & CPER_MEM_VALID_ROW_EXT))
+ return 0;
+ return (mem_extended & CPER_MEM_EXT_ROW_MASK) << CPER_MEM_EXT_ROW_SHIFT;
+}
+
/* PCI Express Error Section, UEFI v2.7 sec N.2.7 */
struct cper_sec_pcie {
u64 validation_bits;
@@ -538,6 +581,7 @@ extern const char *const cper_proc_error_type_strs[4];
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
+const char *cper_mem_err_status_str(u64 status);
void cper_print_bits(const char *prefix, unsigned int bits,
const char * const strs[], unsigned int strs_size);
void cper_mem_err_pack(const struct cper_sec_mem_err *,
@@ -548,5 +592,13 @@ void cper_print_proc_arm(const char *pfx,
const struct cper_sec_proc_arm *proc);
void cper_print_proc_ia(const char *pfx,
const struct cper_sec_proc_ia *proc);
+int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg);
+int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg);
+
+struct acpi_hest_generic_status;
+void cper_estatus_print(const char *pfx,
+ const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 8aa84c052fdf..272e4e79e15c 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -18,6 +18,7 @@
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <linux/cpuhotplug.h>
+#include <linux/cpu_smt.h>
struct device;
struct device_node;
@@ -65,17 +66,35 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
extern ssize_t cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_gds(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
+extern bool arch_cpu_is_hotpluggable(int cpu);
+extern int arch_register_cpu(int cpu);
+extern void arch_unregister_cpu(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+DECLARE_PER_CPU(struct cpu, cpu_devices);
+#endif
+
/*
* These states are not related to the core CPU hotplug mechanism. They are
* used by various (sub)architectures to track internal state
@@ -95,7 +114,7 @@ void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
int bringup_hibernate_cpu(unsigned int sleep_cpu);
-void bringup_nonboot_cpus(unsigned int setup_max_cpus);
+void bringup_nonboot_cpus(unsigned int max_cpus);
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
@@ -108,8 +127,12 @@ static inline void cpu_maps_update_done(void)
{
}
+static inline int add_cpu(unsigned int cpu) { return 0;}
+
#endif /* CONFIG_SMP */
-extern struct bus_type cpu_subsys;
+extern const struct bus_type cpu_subsys;
+
+extern int lockdep_is_cpus_held(void);
#ifdef CONFIG_HOTPLUG_CPU
extern void cpus_write_lock(void);
@@ -135,14 +158,11 @@ static inline int cpus_read_trylock(void) { return true; }
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
+static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
#endif /* !CONFIG_HOTPLUG_CPU */
-/* Wrappers which go away once all code is converted */
-static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
-static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
-static inline void get_online_cpus(void) { cpus_read_lock(); }
-static inline void put_online_cpus(void) { cpus_read_unlock(); }
+DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
@@ -168,24 +188,26 @@ static inline int suspend_disable_secondary_cpus(void) { return 0; }
static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */
-void cpu_startup_entry(enum cpuhp_state state);
+void __noreturn cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
-/* Attach to any functions which should be considered cpuidle. */
-#define __cpuidle __attribute__((__section__(".cpuidle.text")))
-
bool cpu_in_idle(unsigned long pc);
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
-void arch_cpu_idle_dead(void);
+void arch_tick_broadcast_enter(void);
+void arch_tick_broadcast_exit(void);
+void __noreturn arch_cpu_idle_dead(void);
+
+#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
+void arch_cpu_finalize_init(void);
+#else
+static inline void arch_cpu_finalize_init(void) { }
+#endif
-int cpu_report_state(int cpu);
-int cpu_check_up_prepare(int cpu);
-void cpu_set_state_online(int cpu);
void play_idle_precise(u64 duration_ns, u64 latency_ns);
static inline void play_idle(unsigned long duration_us)
@@ -194,37 +216,11 @@ static inline void play_idle(unsigned long duration_us)
}
#ifdef CONFIG_HOTPLUG_CPU
-bool cpu_wait_death(unsigned int cpu, int seconds);
-bool cpu_report_death(void);
void cpuhp_report_idle_dead(void);
#else
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-enum cpuhp_smt_control {
- CPU_SMT_ENABLED,
- CPU_SMT_DISABLED,
- CPU_SMT_FORCE_DISABLED,
- CPU_SMT_NOT_SUPPORTED,
- CPU_SMT_NOT_IMPLEMENTED,
-};
-
-#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
-extern enum cpuhp_smt_control cpu_smt_control;
-extern void cpu_smt_disable(bool force);
-extern void cpu_smt_check_topology(void);
-extern bool cpu_smt_possible(void);
-extern int cpuhp_smt_enable(void);
-extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
-#else
-# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
-static inline void cpu_smt_disable(bool force) { }
-static inline void cpu_smt_check_topology(void) { }
-static inline bool cpu_smt_possible(void) { return false; }
-static inline int cpuhp_smt_enable(void) { return 0; }
-static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
-#endif
-
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index be8aea04d023..cae324d10965 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -16,14 +16,13 @@
* struct cpu_rmap - CPU affinity reverse-map
* @refcount: kref for object
* @size: Number of objects to be reverse-mapped
- * @used: Number of objects added
* @obj: Pointer to array of object pointers
* @near: For each CPU, the index and distance to the nearest object,
* based on affinity masks
*/
struct cpu_rmap {
struct kref refcount;
- u16 size, used;
+ u16 size;
void **obj;
struct {
u16 index;
@@ -61,6 +60,7 @@ static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
}
extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
#endif /* __LINUX_CPU_RMAP_H */
diff --git a/include/linux/cpu_smt.h b/include/linux/cpu_smt.h
new file mode 100644
index 000000000000..0c1664294b57
--- /dev/null
+++ b/include/linux/cpu_smt.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CPU_SMT_H_
+#define _LINUX_CPU_SMT_H_
+
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED,
+ CPU_SMT_DISABLED,
+ CPU_SMT_FORCE_DISABLED,
+ CPU_SMT_NOT_SUPPORTED,
+ CPU_SMT_NOT_IMPLEMENTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern unsigned int cpu_smt_num_threads;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads);
+extern bool cpu_smt_possible(void);
+extern int cpuhp_smt_enable(void);
+extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
+#else
+# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
+# define cpu_smt_num_threads 1
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads) { }
+static inline bool cpu_smt_possible(void) { return false; }
+static inline int cpuhp_smt_enable(void) { return 0; }
+static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
+#endif
+
+#endif /* _LINUX_CPU_SMT_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index a911e5d06845..9956afb9acc2 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -9,13 +9,17 @@
#define _LINUX_CPUFREQ_H
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
+#include <linux/minmax.h>
/*********************************************************************
* CPUFREQ INTERFACE *
@@ -65,7 +69,6 @@ struct cpufreq_policy {
unsigned int max; /* in kHz */
unsigned int cur; /* in kHz, only needed if cpufreq
* governors are used */
- unsigned int restore_freq; /* = policy->cur before transition */
unsigned int suspend_freq; /* freq to set during suspend */
unsigned int policy; /* see above */
@@ -110,6 +113,19 @@ struct cpufreq_policy {
bool fast_switch_enabled;
/*
+ * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
+ * governor.
+ */
+ bool strict_target;
+
+ /*
+ * Set if inefficient frequencies were found in the frequency table.
+ * This indicates if the relation flag CPUFREQ_RELATION_E can be
+ * honored.
+ */
+ bool efficiencies_available;
+
+ /*
* Preferred average time interval between consecutive invocations of
* the driver to set the frequency for this policy. To be set by the
* scaling driver (0, which is the default, means no preference).
@@ -125,6 +141,9 @@ struct cpufreq_policy {
*/
bool dvfs_possible_from_any_cpu;
+ /* Per policy boost enabled flag. */
+ bool boost_enabled;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
unsigned int cached_resolved_idx;
@@ -217,9 +236,11 @@ void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
+bool cpufreq_supports_freq_invariance(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
+bool has_target_index(void);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{
@@ -237,7 +258,12 @@ static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
return 0;
}
+static inline bool cpufreq_supports_freq_invariance(void)
+{
+ return false;
+}
static inline void disable_cpufreq(void) { }
+static inline void cpufreq_update_limits(unsigned int cpu) { }
#endif
#ifdef CONFIG_CPU_FREQ_STAT
@@ -259,6 +285,12 @@ static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
+/* relation flags */
+#define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */
+
+#define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E)
struct freq_attr {
struct attribute attr;
@@ -293,7 +325,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN];
- u8 flags;
+ u16 flags;
void *driver_data;
/* needed by all drivers */
@@ -303,10 +335,6 @@ struct cpufreq_driver {
/* define one out of two */
int (*setpolicy)(struct cpufreq_policy *policy);
- /*
- * On failure, should always restore frequency to policy->restore_freq
- * (i.e. old freq).
- */
int (*target)(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation); /* Deprecated */
@@ -314,15 +342,18 @@ struct cpufreq_driver {
unsigned int index);
unsigned int (*fast_switch)(struct cpufreq_policy *policy,
unsigned int target_freq);
-
/*
- * Caches and returns the lowest driver-supported frequency greater than
- * or equal to the target frequency, subject to any driver limitations.
- * Does not set the frequency. Only to be implemented for drivers with
- * target().
+ * ->fast_switch() replacement for drivers that use an internal
+ * representation of performance levels and can pass hints other than
+ * the target performance level to the hardware. This can only be set
+ * if ->fast_switch is set too, because in those cases (under specific
+ * conditions) scale invariance can be disabled, which causes the
+ * schedutil governor to fall back to the latter.
*/
- unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
- unsigned int target_freq);
+ void (*adjust_perf)(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity);
/*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
@@ -344,7 +375,7 @@ struct cpufreq_driver {
int (*target_intermediate)(struct cpufreq_policy *policy,
unsigned int index);
- /* should be defined, if possible */
+ /* should be defined, if possible, return 0 on error */
unsigned int (*get)(unsigned int cpu);
/* Called to update policy limits on firmware notifications. */
@@ -356,7 +387,6 @@ struct cpufreq_driver {
int (*online)(struct cpufreq_policy *policy);
int (*offline)(struct cpufreq_policy *policy);
int (*exit)(struct cpufreq_policy *policy);
- void (*stop_cpu)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
@@ -368,18 +398,32 @@ struct cpufreq_driver {
/* platform specific boost support code */
bool boost_enabled;
int (*set_boost)(struct cpufreq_policy *policy, int state);
+
+ /*
+ * Set by drivers that want to register with the energy model after the
+ * policy is properly initialized, but before the governor is started.
+ */
+ void (*register_em)(struct cpufreq_policy *policy);
};
/* flags */
-/* driver isn't removed even if all ->init() calls failed */
-#define CPUFREQ_STICKY BIT(0)
+/*
+ * Set by drivers that need to update internal upper and lower boundaries along
+ * with the target frequency and so the core and governors should also invoke
+ * the diver if the target frequency does not change, but the policy min or max
+ * may have changed.
+ */
+#define CPUFREQ_NEED_UPDATE_LIMITS BIT(0)
/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
#define CPUFREQ_CONST_LOOPS BIT(1)
-/* don't warn on suspend/resume speed mismatches */
-#define CPUFREQ_PM_NO_WARN BIT(2)
+/*
+ * Set by drivers that want the core to automatically register the cpufreq
+ * driver as a thermal cooling device.
+ */
+#define CPUFREQ_IS_COOLING_DEV BIT(2)
/*
* This should be set by platforms having multiple clock-domains, i.e.
@@ -411,15 +455,10 @@ struct cpufreq_driver {
*/
#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
-/*
- * Set by drivers that want the core to automatically register the cpufreq
- * driver as a thermal cooling device.
- */
-#define CPUFREQ_IS_COOLING_DEV BIT(7)
-
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
-int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+void cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+bool cpufreq_driver_test_flags(u16 flags);
const char *cpufreq_get_current_driver(void);
void *cpufreq_get_driver_data(void);
@@ -433,17 +472,8 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *poli
unsigned int min,
unsigned int max)
{
- if (policy->min < min)
- policy->min = min;
- if (policy->max < min)
- policy->max = min;
- if (policy->min > max)
- policy->min = max;
- if (policy->max > max)
- policy->max = max;
- if (policy->min > policy->max)
- policy->min = policy->max;
- return;
+ policy->max = clamp(policy->max, min, max);
+ policy->min = clamp(policy->min, min, policy->max);
}
static inline void
@@ -539,9 +569,7 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
/*
* The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor. The
- * ondemand governor will work on any processor with transition latency <= 10ms,
- * using appropriate sampling rate.
+ * polling frequency is 1000 times the transition latency of the processor.
*/
#define LATENCY_MULTIPLIER (1000)
@@ -556,15 +584,28 @@ struct cpufreq_governor {
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
- /* For governors which change frequency dynamically by themselves */
- bool dynamic_switching;
struct list_head governor_list;
struct module *owner;
+ u8 flags;
};
+/* Governor flags */
+
+/* For governors which change frequency dynamically by themselves */
+#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
+
+/* For governors wanting the target frequency to be set exactly */
+#define CPUFREQ_GOV_STRICT_TARGET BIT(1)
+
+
/* Pass a target to the cpufreq driver */
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq);
+void cpufreq_driver_adjust_perf(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity);
+bool cpufreq_driver_has_adjust_perf(void);
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
@@ -599,9 +640,11 @@ struct cpufreq_governor *cpufreq_fallback_governor(void);
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
{
if (policy->max < policy->cur)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_HE);
else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_LE);
}
/* Governor attribute set */
@@ -615,6 +658,11 @@ struct gov_attr_set {
/* sysfs ops for cpufreq governors */
extern const struct sysfs_ops governor_sysfs_ops;
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+ return container_of(kobj, struct gov_attr_set, kobj);
+}
+
void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
@@ -632,10 +680,11 @@ struct governor_attr {
*********************************************************************/
/* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID ~0u
-#define CPUFREQ_TABLE_END ~1u
+#define CPUFREQ_ENTRY_INVALID ~0u
+#define CPUFREQ_TABLE_END ~1u
/* Special Values of .flags field */
-#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
struct cpufreq_frequency_table {
unsigned int flags;
@@ -644,26 +693,6 @@ struct cpufreq_frequency_table {
* order */
};
-#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
-int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-#else
-static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table
- **table)
-{
- return -EINVAL;
-}
-
-static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table
- **table)
-{
-}
-#endif
-
/*
* cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -712,6 +741,22 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
continue; \
else
+/**
+ * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID and
+ * CPUFREQ_INEFFICIENT_FREQ frequencies.
+ * @pos: the &struct cpufreq_frequency_table to use as a loop cursor.
+ * @table: the &struct cpufreq_frequency_table to iterate over.
+ * @idx: the table entry currently being processed.
+ * @efficiencies: set to true to only iterate over efficient frequencies.
+ */
+
+#define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \
+ continue; \
+ else
+
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
@@ -736,14 +781,15 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy);
/* Find lowest freq at or above target in a table in ascending order */
static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq >= target_freq)
@@ -757,14 +803,15 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
/* Find lowest freq at or above target in a table in descending order */
static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -787,26 +834,30 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_al(policy, target_freq);
+ return cpufreq_table_find_index_al(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dl(policy, target_freq);
+ return cpufreq_table_find_index_dl(policy, target_freq,
+ efficiencies);
}
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -829,14 +880,15 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
/* Find highest freq at or below target in a table in descending order */
static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq <= target_freq)
@@ -850,26 +902,30 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ah(policy, target_freq);
+ return cpufreq_table_find_index_ah(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dh(policy, target_freq);
+ return cpufreq_table_find_index_dh(policy, target_freq,
+ efficiencies);
}
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -896,14 +952,15 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
/* Find closest freq to target in a table in descending order */
static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -930,35 +987,71 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ac(policy, target_freq);
+ return cpufreq_table_find_index_ac(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dc(policy, target_freq);
+ return cpufreq_table_find_index_dc(policy, target_freq,
+ efficiencies);
+}
+
+static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
+{
+ unsigned int freq;
+
+ if (idx < 0)
+ return false;
+
+ freq = policy->freq_table[idx].frequency;
+
+ return freq == clamp_val(freq, policy->min, policy->max);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
+ bool efficiencies = policy->efficiencies_available &&
+ (relation & CPUFREQ_RELATION_E);
+ int idx;
+
+ /* cpufreq_table_index_unsorted() has no use for this flag anyway */
+ relation &= ~CPUFREQ_RELATION_E;
+
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
return cpufreq_table_index_unsorted(policy, target_freq,
relation);
-
+retry:
switch (relation) {
case CPUFREQ_RELATION_L:
- return cpufreq_table_find_index_l(policy, target_freq);
+ idx = cpufreq_table_find_index_l(policy, target_freq,
+ efficiencies);
+ break;
case CPUFREQ_RELATION_H:
- return cpufreq_table_find_index_h(policy, target_freq);
+ idx = cpufreq_table_find_index_h(policy, target_freq,
+ efficiencies);
+ break;
case CPUFREQ_RELATION_C:
- return cpufreq_table_find_index_c(policy, target_freq);
+ idx = cpufreq_table_find_index_c(policy, target_freq,
+ efficiencies);
+ break;
default:
WARN_ON_ONCE(1);
return 0;
}
+
+ /* Limit frequency index to honor policy->min/max */
+ if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
+ efficiencies = false;
+ goto retry;
+ }
+
+ return idx;
}
static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
@@ -974,6 +1067,88 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy
return count;
}
+
+/**
+ * cpufreq_table_set_inefficient() - Mark a frequency as inefficient
+ * @policy: the &struct cpufreq_policy containing the inefficient frequency
+ * @frequency: the inefficient frequency
+ *
+ * The &struct cpufreq_policy must use a sorted frequency table
+ *
+ * Return: %0 on success or a negative errno code
+ */
+
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ struct cpufreq_frequency_table *pos;
+
+ /* Not supported */
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)
+ return -EINVAL;
+
+ cpufreq_for_each_valid_entry(pos, policy->freq_table) {
+ if (pos->frequency == frequency) {
+ pos->flags |= CPUFREQ_INEFFICIENT_FREQ;
+ policy->efficiencies_available = true;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static inline int parse_perf_domain(int cpu, const char *list_name,
+ const char *cell_name,
+ struct of_phandle_args *args)
+{
+ struct device_node *cpu_np;
+ int ret;
+
+ cpu_np = of_cpu_device_node_get(cpu);
+ if (!cpu_np)
+ return -ENODEV;
+
+ ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
+ args);
+ if (ret < 0)
+ return ret;
+
+ of_node_put(cpu_np);
+
+ return 0;
+}
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
+{
+ int cpu, ret;
+ struct of_phandle_args args;
+
+ ret = parse_perf_domain(pcpu, list_name, cell_name, pargs);
+ if (ret < 0)
+ return ret;
+
+ cpumask_set_cpu(pcpu, cpumask);
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == pcpu)
+ continue;
+
+ ret = parse_perf_domain(cpu, list_name, cell_name, &args);
+ if (ret < 0)
+ continue;
+
+ if (of_phandle_args_equal(pargs, &args))
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(args.np);
+ }
+
+ return 0;
+}
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -993,21 +1168,32 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
return false;
}
-#endif
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov);
-#else
-static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov) { }
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ return -EINVAL;
+}
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
+{
+ return -EOPNOTSUPP;
+}
#endif
-extern void arch_freq_prepare_all(void);
extern unsigned int arch_freq_get_on_cpu(int cpu);
-extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
- unsigned long max_freq);
+#ifndef arch_set_freq_scale
+static __always_inline
+void arch_set_freq_scale(const struct cpumask *cpus,
+ unsigned long cur_freq,
+ unsigned long max_freq)
+{
+}
+#endif
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
@@ -1019,4 +1205,10 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+
+static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
+{
+ dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
+ policy->related_cpus);
+}
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 3215023d4852..35e78ddb2b37 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -22,8 +22,42 @@
* AP_ACTIVE AP_ACTIVE
*/
+/*
+ * CPU hotplug states. The state machine invokes the installed state
+ * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
+ * during a CPU online operation. During a CPU offline operation the
+ * installed teardown callbacks are invoked in the reverse order from
+ * CPU_ONLINE - 1 down to CPUHP_OFFLINE.
+ *
+ * The state space has three sections: PREPARE, STARTING and ONLINE.
+ *
+ * PREPARE: The callbacks are invoked on a control CPU before the
+ * hotplugged CPU is started up or after the hotplugged CPU has died.
+ *
+ * STARTING: The callbacks are invoked on the hotplugged CPU from the low level
+ * hotplug startup/teardown code with interrupts disabled.
+ *
+ * ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU
+ * hotplug thread with interrupts and preemption enabled.
+ *
+ * Adding explicit states to this enum is only necessary when:
+ *
+ * 1) The state is within the STARTING section
+ *
+ * 2) The state has ordering constraints vs. other states in the
+ * same section.
+ *
+ * If neither #1 nor #2 apply, please use the dynamic state space when
+ * setting up a state by using CPUHP_BP_PREPARE_DYN or CPUHP_AP_ONLINE_DYN
+ * for the @state argument of the setup function.
+ *
+ * See Documentation/core-api/cpu_hotplug.rst for further information and
+ * examples.
+ */
enum cpuhp_state {
CPUHP_INVALID = -1,
+
+ /* PREPARE section invoked on a control CPU */
CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
@@ -32,10 +66,11 @@ enum cpuhp_state {
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
- CPUHP_X86_APB_DEAD,
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
+ CPUHP_IBMVNIC_DEAD,
CPUHP_SLUB_DEAD,
+ CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
@@ -45,6 +80,7 @@ enum cpuhp_state {
CPUHP_ARM_OMAP_WAKE_DEAD,
CPUHP_IRQ_POLL_DEAD,
CPUHP_BLOCK_SOFTIRQ_DEAD,
+ CPUHP_BIO_DEAD,
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
@@ -53,13 +89,14 @@ enum cpuhp_state {
CPUHP_MM_MEMCQ_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
- CPUHP_PAGE_ALLOC_DEAD,
+ CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
- CPUHP_IOMMU_INTEL_DEAD,
- CPUHP_LUSTRE_CFS_DEAD,
+ CPUHP_IOMMU_IOVA_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
+ CPUHP_AP_DTPM_CPU_DEAD,
+ CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -67,7 +104,6 @@ enum cpuhp_state {
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
- CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
CPUHP_CPUIDLE_COUPLED_PREPARE,
@@ -77,13 +113,11 @@ enum cpuhp_state {
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
- CPUHP_NET_FLOW_PREPARE,
CPUHP_TOPOLOGY_PREPARE,
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
CPUHP_MM_ZS_PREPARE,
- CPUHP_MM_ZSWP_MEM_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
@@ -91,69 +125,77 @@ enum cpuhp_state {
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
+ CPUHP_BP_KICK_AP,
CPUHP_BRINGUP_CPU,
+
+ /*
+ * STARTING section invoked on the hotplugged CPU in low level
+ * bringup and teardown code.
+ */
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
+ CPUHP_AP_CACHECTRL_STARTING,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
CPUHP_AP_CPU_PM_STARTING,
CPUHP_AP_IRQ_GIC_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
+ CPUHP_AP_IRQ_APPLE_AIC_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_RISCV_STARTING,
+ CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
- CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CQM_STARTING,
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
- CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
- CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
+ CPUHP_AP_PERF_RISCV_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
- CPUHP_AP_MARCO_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_CLINT_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
+ CPUHP_AP_TI_GP_TIMER_STARTING,
CPUHP_AP_HYPERV_TIMER_STARTING,
- CPUHP_AP_KVM_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_STARTING,
- CPUHP_AP_KVM_ARM_TIMER_STARTING,
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
- CPUHP_AP_ARM_KVMPV_STARTING,
+ CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
+ CPUHP_AP_HRTIMERS_DYING,
+ CPUHP_AP_TICK_DYING,
CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
+
+ /* Online section invoked on the hotplugged CPU from the hotplug thread */
CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_HYPERV_ONLINE,
+ CPUHP_AP_KVM_ONLINE,
+ CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
- CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
CPUHP_AP_BLK_MQ_ONLINE,
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
@@ -164,31 +206,40 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
+ CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
+ CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
+ CPUHP_AP_PERF_CSKY_ONLINE,
+ CPUHP_AP_TMIGR_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
+ CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
- CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
@@ -205,14 +256,15 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
int (*teardown)(unsigned int cpu),
bool multi_instance);
/**
- * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup
+ * callback
* @state: The state for which the calls are installed
* @name: Name of the callback (will be used in debug output)
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Installs the callback functions and invokes the startup callback on
- * the present cpus which have already reached the @state.
+ * Installs the callback functions and invokes the @startup callback on
+ * the online cpus which have already reached the @state.
*/
static inline int cpuhp_setup_state(enum cpuhp_state state,
const char *name,
@@ -222,6 +274,18 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
}
+/**
+ * cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling
+ * @startup callback from a cpus_read_lock()
+ * held region
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback (will be used in debug output)
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state() except that it must be invoked from within a
+ * cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -233,14 +297,14 @@ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
/**
* cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
- * callbacks
+ * @startup callback
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Same as @cpuhp_setup_state except that no calls are executed are invoked
- * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
+ * Same as cpuhp_setup_state() except that the @startup callback is not
+ * invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n.
*/
static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
const char *name,
@@ -251,6 +315,19 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
false);
}
+/**
+ * cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without
+ * invoking the @startup callback from
+ * a cpus_read_lock() held region
+ * callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback.
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state_nocalls() except that it must be invoked from
+ * within a cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -264,13 +341,13 @@ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
* cpuhp_setup_state_multi - Add callbacks for multi state
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
* Sets the internal multi_instance flag and prepares a state to work as a multi
* instance callback. No callbacks are invoked at this point. The callbacks are
* invoked once an instance for this state are registered via
- * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
+ * cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls()
*/
static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
const char *name,
@@ -295,9 +372,10 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state and invokes the startup callback on
- * the present cpus which have already reached the @state. The @state must have
- * been earlier marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state and invokes the registered startup
+ * callback on the online cpus which have already reached the @state. The
+ * @state must have been earlier marked as multi-instance by
+ * cpuhp_setup_state_multi().
*/
static inline int cpuhp_state_add_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -311,8 +389,9 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state The @state must have been earlier
- * marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state. The @state must have been earlier
+ * marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or
+ * HOTPLUG_CPU=n.
*/
static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
struct hlist_node *node)
@@ -320,6 +399,17 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
return __cpuhp_state_add_instance(state, node, false);
}
+/**
+ * cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state
+ * without invoking the startup
+ * callback from a cpus_read_lock()
+ * held region.
+ * @state: The state for which the instance is installed
+ * @node: The node for this individual state.
+ *
+ * Same as cpuhp_state_add_instance_nocalls() except that it must be
+ * invoked from within a cpus_read_lock() held region.
+ */
static inline int
cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
struct hlist_node *node)
@@ -335,7 +425,7 @@ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
* @state: The state for which the calls are removed
*
* Removes the callback functions and invokes the teardown callback on
- * the present cpus which have already reached the @state.
+ * the online cpus which have already reached the @state.
*/
static inline void cpuhp_remove_state(enum cpuhp_state state)
{
@@ -344,7 +434,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state)
/**
* cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
- * teardown
+ * the teardown callback
* @state: The state for which the calls are removed
*/
static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
@@ -352,6 +442,14 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
__cpuhp_remove_state(state, false);
}
+/**
+ * cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking
+ * teardown from a cpus_read_lock() held region.
+ * @state: The state for which the calls are removed
+ *
+ * Same as cpuhp_remove_state nocalls() except that it must be invoked
+ * from within a cpus_read_lock() held region.
+ */
static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
{
__cpuhp_remove_state_cpuslocked(state, false);
@@ -379,8 +477,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
- * Removes the instance and invokes the teardown callback on the present cpus
- * which have already reached the @state.
+ * Removes the instance and invokes the teardown callback on the online cpus
+ * which have already reached @state.
*/
static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -390,7 +488,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
/**
* cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
- * without invoking the reatdown callback
+ * without invoking the teardown callback
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
@@ -408,4 +506,20 @@ void cpuhp_online_idle(enum cpuhp_state state);
static inline void cpuhp_online_idle(enum cpuhp_state state) { }
#endif
+struct task_struct;
+
+void cpuhp_ap_sync_alive(void);
+void arch_cpuhp_sync_state_poll(void);
+void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu);
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle);
+bool arch_cpuhp_init_parallel_bringup(void);
+
+#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
+void cpuhp_ap_report_dead(void);
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu);
+#else
+static inline void cpuhp_ap_report_dead(void) { }
+static inline void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
+#endif
+
#endif
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 75895e6363b8..3183aeb7f5b4 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
+#include <linux/context_tracking.h>
#define CPUIDLE_STATE_MAX 10
#define CPUIDLE_NAME_LEN 16
@@ -38,6 +39,7 @@ struct cpuidle_state_usage {
u64 time_ns;
unsigned long long above; /* Number of times it's been too deep */
unsigned long long below; /* Number of times it's been too shallow */
+ unsigned long long rejected; /* Number of times idle entry was rejected */
#ifdef CONFIG_SUSPEND
unsigned long long s2idle_usage;
unsigned long long s2idle_time; /* in US */
@@ -48,8 +50,8 @@ struct cpuidle_state {
char name[CPUIDLE_NAME_LEN];
char desc[CPUIDLE_DESC_LEN];
- u64 exit_latency_ns;
- u64 target_residency_ns;
+ s64 exit_latency_ns;
+ s64 target_residency_ns;
unsigned int flags;
unsigned int exit_latency; /* in US */
int power_usage; /* in mW */
@@ -82,6 +84,7 @@ struct cpuidle_state {
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
+#define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
@@ -113,6 +116,35 @@ struct cpuidle_device {
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
+static __always_inline void ct_cpuidle_enter(void)
+{
+ lockdep_assert_irqs_disabled();
+ /*
+ * Idle is allowed to (temporary) enable IRQs. It
+ * will return with IRQs disabled.
+ *
+ * Trace IRQs enable here, then switch off RCU, and have
+ * arch_cpu_idle() use raw_local_irq_enable(). Note that
+ * ct_idle_enter() relies on lockdep IRQ state, so switch that
+ * last -- this is very similar to the entry code.
+ */
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+ ct_idle_enter();
+ lockdep_hardirqs_on(_RET_IP_);
+}
+
+static __always_inline void ct_cpuidle_exit(void)
+{
+ /*
+ * Carefully undo the above.
+ */
+ lockdep_hardirqs_off(_RET_IP_);
+ ct_idle_exit();
+ instrumentation_begin();
+}
+
/****************************
* CPUIDLE DRIVER INTERFACE *
****************************/
@@ -269,18 +301,13 @@ struct cpuidle_governor {
void (*reflect) (struct cpuidle_device *dev, int index);
};
-#ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
extern s64 cpuidle_governor_latency_req(unsigned int cpu);
-#else
-static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
-{return 0;}
-#endif
#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
idx, \
state, \
- is_retention) \
+ is_retention, is_rcu) \
({ \
int __ret = 0; \
\
@@ -292,7 +319,11 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
if (!is_retention) \
__ret = cpu_pm_enter(); \
if (!__ret) { \
+ if (!is_rcu) \
+ ct_cpuidle_enter(); \
__ret = low_level_idle_enter(state); \
+ if (!is_rcu) \
+ ct_cpuidle_exit(); \
if (!is_retention) \
cpu_pm_exit(); \
} \
@@ -301,15 +332,21 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
})
#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0, 0)
#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1, 0)
#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 1)
#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 1)
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index f0d895d6ac39..1c29947db848 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -4,14 +4,17 @@
/*
* Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number. In general,
+ * set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
#include <linux/atomic.h>
#include <linux/bug.h>
+#include <linux/gfp_types.h>
+#include <linux/numa.h>
/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
@@ -33,19 +36,56 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
-#if NR_CPUS == 1
-#define nr_cpu_ids 1U
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+#define nr_cpu_ids ((unsigned int)NR_CPUS)
#else
extern unsigned int nr_cpu_ids;
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
+static inline void set_nr_cpu_ids(unsigned int nr)
+{
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+ WARN_ON(nr != nr_cpu_ids);
+#else
+ nr_cpu_ids = nr;
+#endif
+}
+
+/*
+ * We have several different "preferred sizes" for the cpumask
+ * operations, depending on operation.
+ *
+ * For example, the bitmap scanning and operating operations have
+ * optimized routines that work for the single-word case, but only when
+ * the size is constant. So if NR_CPUS fits in one single word, we are
+ * better off using that small constant, in order to trigger the
+ * optimized bit finding. That is 'small_cpumask_size'.
+ *
+ * The clearing and copying operations will similarly perform better
+ * with a constant size, but we limit that size arbitrarily to four
+ * words. We call this 'large_cpumask_size'.
+ *
+ * Finally, some operations just want the exact limit, either because
+ * they set bits or just don't have any faster fixed-sized versions. We
+ * call this just 'nr_cpumask_bits'.
+ *
+ * Note that these optional constants are always guaranteed to be at
+ * least as big as 'nr_cpu_ids' itself is, and all our cpumask
+ * allocations are at least that size (see cpumask_size()). The
+ * optimization comes from being able to potentially use a compile-time
+ * constant instead of a run-time generated exact number of CPUs.
+ */
+#if NR_CPUS <= BITS_PER_LONG
+ #define small_cpumask_bits ((unsigned int)NR_CPUS)
+ #define large_cpumask_bits ((unsigned int)NR_CPUS)
+#elif NR_CPUS <= 4*BITS_PER_LONG
+ #define small_cpumask_bits nr_cpu_ids
+ #define large_cpumask_bits ((unsigned int)NR_CPUS)
#else
-#define nr_cpumask_bits ((unsigned int)NR_CPUS)
+ #define small_cpumask_bits nr_cpu_ids
+ #define large_cpumask_bits nr_cpu_ids
#endif
+#define nr_cpumask_bits nr_cpu_ids
/*
* The following particular system cpumasks and operations manage
@@ -58,17 +98,13 @@ extern unsigned int nr_cpu_ids;
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
- * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
+ * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs
* that it is possible might ever be plugged in at anytime during the
* life of that system boot. The cpu_present_mask is dynamic(*),
* representing which CPUs are currently plugged in. And
* cpu_online_mask is the dynamic subset of cpu_present_mask,
* indicating those CPUs available for scheduling.
*
- * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
- * all NR_CPUS bits set, otherwise it is just the set of CPUs that
- * ACPI reports present at boot.
- *
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
* depending on what ACPI reports as currently plugged in, otherwise
* cpu_present_mask is just a copy of cpu_possible_mask.
@@ -77,7 +113,7 @@ extern unsigned int nr_cpu_ids;
* hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
*
* Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
* assumption that their single CPU is online. The UP
* cpu_{online,possible,present}_masks are placebos. Changing them
* will have no useful affect on the following num_*_cpus()
@@ -91,47 +127,18 @@ extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
+extern struct cpumask __cpu_dying_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
+#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
extern atomic_t __num_online_cpus;
-#if NR_CPUS > 1
-/**
- * num_online_cpus() - Read the number of online CPUs
- *
- * Despite the fact that __num_online_cpus is of type atomic_t, this
- * interface gives only a momentary snapshot and is not protected against
- * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
- * region.
- */
-static inline unsigned int num_online_cpus(void)
-{
- return atomic_read(&__num_online_cpus);
-}
-#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
-#define num_present_cpus() cpumask_weight(cpu_present_mask)
-#define num_active_cpus() cpumask_weight(cpu_active_mask)
-#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
-#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
-#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
-#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
-#else
-#define num_online_cpus() 1U
-#define num_possible_cpus() 1U
-#define num_present_cpus() 1U
-#define num_active_cpus() 1U
-#define cpu_online(cpu) ((cpu) == 0)
-#define cpu_possible(cpu) ((cpu) == 0)
-#define cpu_present(cpu) ((cpu) == 0)
-#define cpu_active(cpu) ((cpu) == 0)
-#endif
-
extern cpumask_t cpus_booted_once_mask;
-static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
+static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
WARN_ON_ONCE(cpu >= bits);
@@ -139,160 +146,175 @@ static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
}
/* verify cpu argument to cpumask_* operators */
-static inline unsigned int cpumask_check(unsigned int cpu)
+static __always_inline unsigned int cpumask_check(unsigned int cpu)
{
- cpu_max_bits_warn(cpu, nr_cpumask_bits);
+ cpu_max_bits_warn(cpu, small_cpumask_bits);
return cpu;
}
-#if NR_CPUS == 1
-/* Uniprocessor. Assume all masks are "1". */
+/**
+ * cpumask_first - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no cpus set.
+ */
static inline unsigned int cpumask_first(const struct cpumask *srcp)
{
- return 0;
-}
-
-static inline unsigned int cpumask_last(const struct cpumask *srcp)
-{
- return 0;
-}
-
-/* Valid inputs for n are -1 and 0. */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
-{
- return n+1;
-}
-
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
-{
- return n+1;
-}
-
-static inline unsigned int cpumask_next_and(int n,
- const struct cpumask *srcp,
- const struct cpumask *andp)
-{
- return n+1;
-}
-
-static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
- int start, bool wrap)
-{
- /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
- return (wrap && n == 0);
-}
-
-/* cpu must be a valid cpu, ie 0, so there's no other choice. */
-static inline unsigned int cpumask_any_but(const struct cpumask *mask,
- unsigned int cpu)
-{
- return 1;
+ return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+/**
+ * cpumask_first_zero - get the first unset cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if all cpus are set.
+ */
+static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
- return 0;
-}
-
-static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p) {
- return cpumask_next_and(-1, src1p, src2p);
+ return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
}
-#define for_each_cpu(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
-#define for_each_cpu_and(cpu, mask1, mask2) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
-#else
/**
- * cpumask_first - get the first cpu in a cpumask
- * @srcp: the cpumask pointer
+ * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
+ * @srcp1: the first input
+ * @srcp2: the second input
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+static inline
+unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
- return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
+ return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
* cpumask_last - get the last CPU in a cpumask
* @srcp: - the cpumask pointer
*
- * Returns >= nr_cpumask_bits if no CPUs set.
+ * Return: >= nr_cpumask_bits if no CPUs set.
*/
static inline unsigned int cpumask_last(const struct cpumask *srcp)
{
- return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
+ return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
-unsigned int cpumask_next(int n, const struct cpumask *srcp);
+/**
+ * cpumask_next - get the next cpu in a cpumask
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @srcp: the cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no further cpus set.
+ */
+static inline
+unsigned int cpumask_next(int n, const struct cpumask *srcp)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_bit(cpumask_bits(srcp), small_cpumask_bits, n + 1);
+}
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus unset.
+ * Return: >= nr_cpu_ids if no further cpus unset.
*/
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
- return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+ return find_next_zero_bit(cpumask_bits(srcp), small_cpumask_bits, n+1);
+}
+
+#if NR_CPUS == 1
+/* Uniprocessor: there is only one valid CPU */
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+{
+ return 0;
+}
+
+static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return cpumask_first_and(src1p, src2p);
}
-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
-int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+{
+ return cpumask_first(srcp);
+}
+#else
unsigned int cpumask_local_spread(unsigned int i, int node);
-int cpumask_any_and_distribute(const struct cpumask *src1p,
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p);
+unsigned int cpumask_any_distribute(const struct cpumask *srcp);
+#endif /* NR_CPUS */
/**
- * for_each_cpu - iterate over every cpu in a mask
- * @cpu: the (optionally unsigned) integer iterator
- * @mask: the cpumask pointer
+ * cpumask_next_and - get the next cpu in *src1p & *src2p
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
*
- * After the loop, cpu is >= nr_cpu_ids.
+ * Return: >= nr_cpu_ids if no further cpus set in both.
*/
-#define for_each_cpu(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+static inline
+unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
/**
- * for_each_cpu_not - iterate over every cpu in a complemented mask
+ * for_each_cpu - iterate over every cpu in a mask
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_zero((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+#define for_each_cpu(cpu, mask) \
+ for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
+
+#if NR_CPUS == 1
+static inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ cpumask_check(start);
+ if (n != -1)
+ cpumask_check(n);
+
+ /*
+ * Return the first available CPU when wrapping, or when starting before cpu0,
+ * since there is only one valid option.
+ */
+ if (wrap && n >= 0)
+ return nr_cpumask_bits;
-extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+ return cpumask_first(mask);
+}
+#else
+unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+#endif
/**
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the cpumask poiter
+ * @mask: the cpumask pointer
* @start: the start location
*
* The implementation does not assume any bit in @mask is set (including @start).
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
- (cpu) < nr_cpumask_bits; \
- (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start)
/**
* for_each_cpu_and - iterate over every cpu in both masks
@@ -309,10 +331,126 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_and(cpu, mask1, mask2) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
- (cpu) < nr_cpu_ids;)
-#endif /* SMP */
+ for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
+
+/**
+ * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
+ * those present in another.
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_andnot(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_andnot(cpu, mask1, mask2) \
+ for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
+
+/**
+ * for_each_cpu_or - iterate over every cpu present in either mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_or(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_or(cpu, mask1, mask2) \
+ for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
+
+/**
+ * cpumask_any_but - return a "random" in a cpumask, but not this one.
+ * @mask: the cpumask to search
+ * @cpu: the cpu to ignore.
+ *
+ * Often used to find any cpu but smp_processor_id() in a mask.
+ * Return: >= nr_cpu_ids if no cpus set.
+ */
+static inline
+unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
+{
+ unsigned int i;
+
+ cpumask_check(cpu);
+ for_each_cpu(i, mask)
+ if (i != cpu)
+ break;
+ return i;
+}
+
+/**
+ * cpumask_nth - get the Nth cpu in a cpumask
+ * @srcp: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+{
+ return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and - get the Nth cpu in 2 cpumasks
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @srcp3: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2,
+ const struct cpumask *srcp3)
+{
+ return find_nth_and_andnot_bit(cpumask_bits(srcp1),
+ cpumask_bits(srcp2),
+ cpumask_bits(srcp3),
+ small_cpumask_bits, cpumask_check(cpu));
+}
#define CPU_BITS_NONE \
{ \
@@ -329,12 +467,12 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -345,12 +483,12 @@ static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -360,9 +498,9 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in @cpumask, else returns 0
+ * Return: true if @cpu is set in @cpumask, else returns false
*/
-static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -372,11 +510,11 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
- *
* test_and_set_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -386,11 +524,11 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
- *
* test_and_clear_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -401,6 +539,10 @@ static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
*/
static inline void cpumask_setall(struct cpumask *dstp)
{
+ if (small_const_nbits(small_cpumask_bits)) {
+ cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
+ return;
+ }
bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -410,7 +552,7 @@ static inline void cpumask_setall(struct cpumask *dstp)
*/
static inline void cpumask_clear(struct cpumask *dstp)
{
- bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
+ bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
/**
@@ -419,14 +561,14 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * Return: false if *@dstp is empty, else returns true
*/
-static inline int cpumask_and(struct cpumask *dstp,
+static inline bool cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -439,7 +581,7 @@ static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -453,7 +595,7 @@ static inline void cpumask_xor(struct cpumask *dstp,
const struct cpumask *src2p)
{
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -462,38 +604,28 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * Return: false if *@dstp is empty, else returns true
*/
-static inline int cpumask_andnot(struct cpumask *dstp,
+static inline bool cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
-}
-
-/**
- * cpumask_complement - *dstp = ~*srcp
- * @dstp: the cpumask result
- * @srcp: the input to invert
- */
-static inline void cpumask_complement(struct cpumask *dstp,
- const struct cpumask *srcp)
-{
- bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
- nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
* cpumask_equal - *src1p == *src2p
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if the cpumasks are equal, false if not
*/
static inline bool cpumask_equal(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -501,25 +633,31 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
* @src3p: the third input
+ *
+ * Return: true if first cpumask ORed with second cpumask == third cpumask,
+ * otherwise false
*/
static inline bool cpumask_or_equal(const struct cpumask *src1p,
const struct cpumask *src2p,
const struct cpumask *src3p)
{
return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
- cpumask_bits(src3p), nr_cpumask_bits);
+ cpumask_bits(src3p), small_cpumask_bits);
}
/**
* cpumask_intersects - (*src1p & *src2p) != 0
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if first cpumask ANDed with second cpumask is non-empty,
+ * otherwise false
*/
static inline bool cpumask_intersects(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -527,27 +665,31 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*
- * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
+ * Return: true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline int cpumask_subset(const struct cpumask *src1p,
+static inline bool cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
* cpumask_empty - *srcp == 0
* @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
+ *
+ * Return: true if srcp is empty (has no bits set), else false
*/
static inline bool cpumask_empty(const struct cpumask *srcp)
{
- return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
+ return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
/**
* cpumask_full - *srcp == 0xFFFFFFFF...
* @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
+ *
+ * Return: true if srcp is full (has all bits set), else false
*/
static inline bool cpumask_full(const struct cpumask *srcp)
{
@@ -557,10 +699,38 @@ static inline bool cpumask_full(const struct cpumask *srcp)
/**
* cpumask_weight - Count of bits in *srcp
* @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in *srcp
*/
static inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
- return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
+ return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
+}
+
+/**
+ * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
+ */
+static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
+}
+
+/**
+ * cpumask_weight_andnot - Count of bits in (*srcp1 & ~*srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
+ */
+static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
@@ -573,7 +743,7 @@ static inline void cpumask_shift_right(struct cpumask *dstp,
const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -597,32 +767,23 @@ static inline void cpumask_shift_left(struct cpumask *dstp,
static inline void cpumask_copy(struct cpumask *dstp,
const struct cpumask *srcp)
{
- bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
+ bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
/**
* cpumask_any - pick a "random" cpu from *srcp
* @srcp: the input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any(srcp) cpumask_first(srcp)
/**
- * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
- * @src1p: the first input
- * @src2p: the second input
- *
- * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
- */
-#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
-
-/**
* cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
* @mask1: the first input cpumask
* @mask2: the second input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
@@ -638,7 +799,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
@@ -652,7 +813,7 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parselist_user(const char __user *buf, int len,
struct cpumask *dstp)
@@ -666,7 +827,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
@@ -678,7 +839,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
@@ -686,11 +847,13 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
}
/**
- * cpumask_size - size to allocate for a 'struct cpumask' in bytes
+ * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes
+ *
+ * Return: size to allocate for a &struct cpumask in bytes
*/
static inline unsigned int cpumask_size(void)
{
- return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
+ return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long);
}
/*
@@ -700,7 +863,7 @@ static inline unsigned int cpumask_size(void)
* little more difficult, we typedef cpumask_var_t to an array or a
* pointer: doing &mask on an array is a noop, so it still works.
*
- * ie.
+ * i.e.
* cpumask_var_t tmpmask;
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
* return -ENOMEM;
@@ -740,9 +903,37 @@ typedef struct cpumask *cpumask_var_t;
#define __cpumask_var_read_mostly __read_mostly
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
-bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+
+static inline
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
+{
+ return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
+}
+
+/**
+ * alloc_cpumask_var - allocate a struct cpumask
+ * @mask: pointer to cpumask_var_t where the cpumask is returned
+ * @flags: GFP_ flags
+ *
+ * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
+ * a nop returning a constant 1 (in <linux/cpumask.h>).
+ *
+ * See alloc_cpumask_var_node.
+ *
+ * Return: %true if allocation succeeded, %false if not
+ */
+static inline
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
+}
+
+static inline
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var(mask, flags | __GFP_ZERO);
+}
+
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
@@ -800,6 +991,8 @@ static inline bool cpumask_available(cpumask_var_t mask)
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
+DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T));
+
/* It's common to want to use cpu_all_mask in struct member initializers,
* so it has to refer to an address rather than a pointer. */
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
@@ -808,9 +1001,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
/* First bits of cpu_bit_bitmap are in fact unset. */
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
+#if NR_CPUS == 1
+/* Uniprocessor: the possible/online/present masks are always "1" */
+#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#else
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+#endif
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
@@ -851,9 +1051,17 @@ set_cpu_active(unsigned int cpu, bool active)
cpumask_clear_cpu(cpu, &__cpu_active_mask);
}
+static inline void
+set_cpu_dying(unsigned int cpu, bool dying)
+{
+ if (dying)
+ cpumask_set_cpu(cpu, &__cpu_dying_mask);
+ else
+ cpumask_clear_cpu(cpu, &__cpu_dying_mask);
+}
/**
- * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
*
* There are a few places where cpumask_var_t isn't appropriate and
@@ -888,6 +1096,84 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
return to_cpumask(p);
}
+#if NR_CPUS > 1
+/**
+ * num_online_cpus() - Read the number of online CPUs
+ *
+ * Despite the fact that __num_online_cpus is of type atomic_t, this
+ * interface gives only a momentary snapshot and is not protected against
+ * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
+ * region.
+ *
+ * Return: momentary snapshot of the number of online CPUs
+ */
+static __always_inline unsigned int num_online_cpus(void)
+{
+ return raw_atomic_read(&__num_online_cpus);
+}
+#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+#define num_present_cpus() cpumask_weight(cpu_present_mask)
+#define num_active_cpus() cpumask_weight(cpu_active_mask)
+
+static inline bool cpu_online(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_online_mask);
+}
+
+static inline bool cpu_possible(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_possible_mask);
+}
+
+static inline bool cpu_present(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_present_mask);
+}
+
+static inline bool cpu_active(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_active_mask);
+}
+
+static inline bool cpu_dying(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_dying_mask);
+}
+
+#else
+
+#define num_online_cpus() 1U
+#define num_possible_cpus() 1U
+#define num_present_cpus() 1U
+#define num_active_cpus() 1U
+
+static inline bool cpu_online(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_possible(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_present(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_active(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_dying(unsigned int cpu)
+{
+ return false;
+}
+
+#endif /* NR_CPUS > 1 */
+
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#if NR_CPUS <= BITS_PER_LONG
@@ -912,7 +1198,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
* @mask: the cpumask to copy
* @buf: the buffer to copy into
*
- * Returns the length of the (null-terminated) @buf string, zero if
+ * Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
static inline ssize_t
@@ -922,6 +1208,52 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
nr_cpu_ids);
}
+/**
+ * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as
+ * hex values of cpumask
+ *
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The function prints the cpumask into the buffer as hex values of
+ * cpumask; Typically used by bin_attribute to export cpumask bitmask
+ * ABI.
+ *
+ * Return: the length of how many bytes have been copied, excluding
+ * terminating '\0'.
+ */
+static inline ssize_t
+cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
+/**
+ * cpumap_print_list_to_buf - copies the cpumask into the buffer as
+ * comma-separated list of cpus
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * Everything is same with the above cpumap_print_bitmask_to_buf()
+ * except the print format.
+ *
+ * Return: the length of how many bytes have been copied, excluding
+ * terminating '\0'.
+ */
+static inline ssize_t
+cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
#if NR_CPUS <= BITS_PER_LONG
#define CPU_MASK_ALL \
(cpumask_t) { { \
@@ -945,4 +1277,23 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
[0] = 1UL \
} }
+/*
+ * Provide a valid theoretical max size for cpumap and cpulist sysfs files
+ * to avoid breaking userspace which may allocate a buffer based on the size
+ * reported by e.g. fstat.
+ *
+ * for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
+ *
+ * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
+ * to 2 orders of magnitude larger than 8192. And then we divide by 2 to
+ * cover a worst-case of every other cpu being on one of two nodes for a
+ * very large NR_CPUS.
+ *
+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ * unsigned comparison to -1.
+ */
+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
+ ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
+#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
+
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cpumask_api.h b/include/linux/cpumask_api.h
new file mode 100644
index 000000000000..83bd3ebe82b0
--- /dev/null
+++ b/include/linux/cpumask_api.h
@@ -0,0 +1 @@
+#include <linux/cpumask.h>
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 04c20de66afc..0ce6ff0d9c9a 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -15,6 +15,7 @@
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/jump_label.h>
#ifdef CONFIG_CPUSETS
@@ -33,6 +34,8 @@
*/
extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
+extern struct static_key_false cpusets_insane_config_key;
+
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
@@ -50,32 +53,41 @@ static inline void cpuset_dec(void)
static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
}
+/*
+ * This will get enabled whenever a cpuset configuration is considered
+ * unsupportable in general. E.g. movable only node which cannot satisfy
+ * any non movable allocations (see update_nodemask). Page allocator
+ * needs to make additional checks for those configurations and this
+ * check is meant to guard those checks without any overhead for sane
+ * configurations.
+ */
+static inline bool cpusets_insane_config(void)
+{
+ return static_branch_unlikely(&cpusets_insane_config_key);
+}
+
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
-extern void cpuset_read_lock(void);
-extern void cpuset_read_unlock(void);
+extern void inc_dl_tasks_cs(struct task_struct *task);
+extern void dec_dl_tasks_cs(struct task_struct *task);
+extern void cpuset_lock(void);
+extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
-
-static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
-{
- if (cpusets_enabled())
- return __cpuset_node_allowed(node, gfp_mask);
- return true;
-}
+extern bool cpuset_node_allowed(int node, gfp_t gfp_mask);
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+ return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
}
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
@@ -109,11 +121,6 @@ static inline int cpuset_do_page_mem_spread(void)
return task_spread_page(current);
}
-static inline int cpuset_do_slab_mem_spread(void)
-{
- return task_spread_slab(current);
-}
-
extern bool current_cpuset_is_being_rebound(void);
extern void rebuild_sched_domains(void);
@@ -166,6 +173,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
static inline bool cpusets_enabled(void) { return false; }
+static inline bool cpusets_insane_config(void) { return false; }
+
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
@@ -178,17 +187,25 @@ static inline void cpuset_update_active_cpus(void)
static inline void cpuset_wait_for_hotplug(void) { }
-static inline void cpuset_read_lock(void) { }
-static inline void cpuset_read_unlock(void) { }
+static inline void inc_dl_tasks_cs(struct task_struct *task) { }
+static inline void dec_dl_tasks_cs(struct task_struct *task) { }
+static inline void cpuset_lock(void) { }
+static inline void cpuset_unlock(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- cpumask_copy(mask, cpu_possible_mask);
+ cpumask_copy(mask, task_cpu_possible_mask(p));
}
-static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
+static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
{
+ return false;
+}
+
+static inline bool cpuset_cpu_is_isolated(int cpu)
+{
+ return false;
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -204,11 +221,6 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
-static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
-{
- return true;
-}
-
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return true;
@@ -247,11 +259,6 @@ static inline int cpuset_do_page_mem_spread(void)
return 0;
}
-static inline int cpuset_do_slab_mem_spread(void)
-{
- return 0;
-}
-
static inline bool current_cpuset_is_being_rebound(void)
{
return false;
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 6594dbc34a37..23270b16e1db 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -6,79 +6,90 @@
#include <linux/elfcore.h>
#include <linux/elf.h>
-#define CRASH_CORE_NOTE_NAME "CORE"
-#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
-#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
-#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+struct kimage;
+#ifdef CONFIG_CRASH_DUMP
+
+int crash_shrink_memory(unsigned long new_size);
+ssize_t crash_get_memory_size(void);
+
+#ifndef arch_kexec_protect_crashkres
/*
- * The per-cpu notes area is a list of notes terminated by a "NULL"
- * note header. For kdump, the code in vmcore.c runs in the context
- * of the second kernel to combine them into one note.
+ * Protection mechanism for crashkernel reserved memory after
+ * the kdump kernel is loaded.
+ *
+ * Provide an empty default implementation here -- architecture
+ * code may override this
*/
-#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
- CRASH_CORE_NOTE_NAME_BYTES + \
- CRASH_CORE_NOTE_DESC_BYTES)
-
-#define VMCOREINFO_BYTES PAGE_SIZE
-#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
-#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
-#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
- VMCOREINFO_NOTE_NAME_BYTES + \
- VMCOREINFO_BYTES)
-
-typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
-
-void crash_update_vmcoreinfo_safecopy(void *ptr);
-void crash_save_vmcoreinfo(void);
-void arch_crash_save_vmcoreinfo(void);
-__printf(1, 2)
-void vmcoreinfo_append_str(const char *fmt, ...);
-phys_addr_t paddr_vmcoreinfo_note(void);
-
-#define VMCOREINFO_OSRELEASE(value) \
- vmcoreinfo_append_str("OSRELEASE=%s\n", value)
-#define VMCOREINFO_BUILD_ID(value) \
- vmcoreinfo_append_str("BUILD-ID=%s\n", value)
-#define VMCOREINFO_PAGESIZE(value) \
- vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
-#define VMCOREINFO_SYMBOL(name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
-#define VMCOREINFO_SYMBOL_ARRAY(name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
-#define VMCOREINFO_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(name))
-#define VMCOREINFO_STRUCT_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(struct name))
-#define VMCOREINFO_OFFSET(name, field) \
- vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
- (unsigned long)offsetof(struct name, field))
-#define VMCOREINFO_LENGTH(name, value) \
- vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
-#define VMCOREINFO_NUMBER(name) \
- vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
-#define VMCOREINFO_CONFIG(name) \
- vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
-
-extern unsigned char *vmcoreinfo_data;
-extern size_t vmcoreinfo_size;
-extern u32 *vmcoreinfo_note;
-
-/* raw contents of kernel .notes section */
-extern const void __start_notes __weak;
-extern const void __stop_notes __weak;
-
-Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
- void *data, size_t data_len);
-void final_note(Elf_Word *buf);
-
-int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
+static inline void arch_kexec_protect_crashkres(void) { }
+#endif
+
+#ifndef arch_kexec_unprotect_crashkres
+static inline void arch_kexec_unprotect_crashkres(void) { }
+#endif
+
+
+
+#ifndef arch_crash_handle_hotplug_event
+static inline void arch_crash_handle_hotplug_event(struct kimage *image) { }
+#endif
+
+int crash_check_update_elfcorehdr(void);
+
+#ifndef crash_hotplug_cpu_support
+static inline int crash_hotplug_cpu_support(void) { return 0; }
+#endif
+
+#ifndef crash_hotplug_memory_support
+static inline int crash_hotplug_memory_support(void) { return 0; }
+#endif
+
+#ifndef crash_get_elfcorehdr_size
+static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
+#endif
+
+/* Alignment required for elf header segment */
+#define ELF_CORE_HEADER_ALIGN 4096
+
+struct crash_mem {
+ unsigned int max_nr_ranges;
+ unsigned int nr_ranges;
+ struct range ranges[] __counted_by(max_nr_ranges);
+};
+
+extern int crash_exclude_mem_range(struct crash_mem *mem,
+ unsigned long long mstart,
+ unsigned long long mend);
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
+ void **addr, unsigned long *sz);
+
+struct kimage;
+struct kexec_segment;
+
+#define KEXEC_CRASH_HP_NONE 0
+#define KEXEC_CRASH_HP_ADD_CPU 1
+#define KEXEC_CRASH_HP_REMOVE_CPU 2
+#define KEXEC_CRASH_HP_ADD_MEMORY 3
+#define KEXEC_CRASH_HP_REMOVE_MEMORY 4
+#define KEXEC_CRASH_HP_INVALID_CPU -1U
+
+extern void __crash_kexec(struct pt_regs *regs);
+extern void crash_kexec(struct pt_regs *regs);
+int kexec_should_crash(struct task_struct *p);
+int kexec_crash_loaded(void);
+void crash_save_cpu(struct pt_regs *regs, int cpu);
+extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
+
+#else /* !CONFIG_CRASH_DUMP*/
+struct pt_regs;
+struct task_struct;
+struct kimage;
+static inline void __crash_kexec(struct pt_regs *regs) { }
+static inline void crash_kexec(struct pt_regs *regs) { }
+static inline int kexec_should_crash(struct task_struct *p) { return 0; }
+static inline int kexec_crash_loaded(void) { return 0; }
+static inline void crash_save_cpu(struct pt_regs *regs, int cpu) {};
+static inline int kimage_crash_copy_vmcoreinfo(struct kimage *image) { return 0; };
+#endif /* CONFIG_CRASH_DUMP*/
#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index a5192b718dbe..acc55626afdc 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -8,15 +8,14 @@
#include <linux/pgtable.h>
#include <uapi/linux/vmcore.h>
-#include <linux/pgtable.h> /* for pgprot_t */
-
-#ifdef CONFIG_CRASH_DUMP
+/* For IS_ENABLED(CONFIG_CRASH_DUMP) */
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
+#ifdef CONFIG_CRASH_DUMP
extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
extern void elfcorehdr_free(unsigned long long addr);
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
@@ -25,11 +24,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot);
-extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
- unsigned long, int);
-extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset,
- int userbuf);
+ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
+ unsigned long offset);
+ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset);
void vmcore_cleanup(void);
@@ -52,6 +50,7 @@ void vmcore_cleanup(void);
#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
#endif
+#ifndef is_kdump_kernel
/*
* is_kdump_kernel() checks whether this kernel is booting after a panic of
* previous kernel or not. This is determined by checking if previous kernel
@@ -66,6 +65,7 @@ static inline bool is_kdump_kernel(void)
{
return elfcorehdr_addr != ELFCORE_ADDR_MAX;
}
+#endif
/* is_vmcore_usable() checks if the kernel is booting after a panic and
* the vmcore region is usable.
@@ -77,7 +77,8 @@ static inline bool is_kdump_kernel(void)
static inline int is_vmcore_usable(void)
{
- return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_ERR &&
+ elfcorehdr_addr != ELFCORE_ADDR_MAX ? 1 : 0;
}
/* vmcore_unusable() marks the vmcore as unusable,
@@ -86,16 +87,35 @@ static inline int is_vmcore_usable(void)
static inline void vmcore_unusable(void)
{
- if (is_kdump_kernel())
- elfcorehdr_addr = ELFCORE_ADDR_ERR;
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
}
-#define HAVE_OLDMEM_PFN_IS_RAM 1
-extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
-extern void unregister_oldmem_pfn_is_ram(void);
+/**
+ * struct vmcore_cb - driver callbacks for /proc/vmcore handling
+ * @pfn_is_ram: check whether a PFN really is RAM and should be accessed when
+ * reading the vmcore. Will return "true" if it is RAM or if the
+ * callback cannot tell. If any callback returns "false", it's not
+ * RAM and the page must not be accessed; zeroes should be
+ * indicated in the vmcore instead. For example, a ballooned page
+ * contains no data and reading from such a page will cause high
+ * load in the hypervisor.
+ * @next: List head to manage registered callbacks internally; initialized by
+ * register_vmcore_cb().
+ *
+ * vmcore callbacks allow drivers managing physical memory ranges to
+ * coordinate with vmcore handling code, for example, to prevent accessing
+ * physical memory ranges that should not be accessed when reading the vmcore,
+ * although included in the vmcore header as memory ranges to dump.
+ */
+struct vmcore_cb {
+ bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
+ struct list_head next;
+};
+extern void register_vmcore_cb(struct vmcore_cb *cb);
+extern void unregister_vmcore_cb(struct vmcore_cb *cb);
#else /* !CONFIG_CRASH_DUMP */
-static inline bool is_kdump_kernel(void) { return 0; }
+static inline bool is_kdump_kernel(void) { return false; }
#endif /* CONFIG_CRASH_DUMP */
/* Device Dump information to be filled by drivers */
@@ -116,13 +136,11 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
#ifdef CONFIG_PROC_VMCORE
-ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted);
+ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted);
#else
-static inline ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted)
+static inline ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/crash_reserve.h b/include/linux/crash_reserve.h
new file mode 100644
index 000000000000..5a9df944fb80
--- /dev/null
+++ b/include/linux/crash_reserve.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_CRASH_RESERVE_H
+#define LINUX_CRASH_RESERVE_H
+
+#include <linux/linkage.h>
+#include <linux/elfcore.h>
+#include <linux/elf.h>
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#include <asm/crash_reserve.h>
+#endif
+
+/* Location of a reserved region to hold the crash kernel.
+ */
+extern struct resource crashk_res;
+extern struct resource crashk_low_res;
+
+int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base,
+ unsigned long long *low_size, bool *high);
+
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
+#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+#endif
+#ifndef CRASH_ALIGN
+#define CRASH_ALIGN SZ_2M
+#endif
+#ifndef CRASH_ADDR_LOW_MAX
+#define CRASH_ADDR_LOW_MAX SZ_4G
+#endif
+#ifndef CRASH_ADDR_HIGH_MAX
+#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
+#endif
+
+void __init reserve_crashkernel_generic(char *cmdline,
+ unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high);
+#else
+static inline void __init reserve_crashkernel_generic(char *cmdline,
+ unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high)
+{}
+#endif
+#endif /* LINUX_CRASH_RESERVE_H */
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index 72c92c396bb8..cd4f420231ba 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,19 +5,12 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
-extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
-extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
-static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
-{
- return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
-}
-
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index a4367051e192..2f991a427ade 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -4,7 +4,7 @@
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^5 + 1)
* Init 0
*/
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
index bd21af828ff6..357ae4611a45 100644
--- a/include/linux/crc32c.h
+++ b/include/linux/crc32c.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
extern u32 crc32c(u32 crc, const void *address, unsigned int length);
-extern const char *crc32c_impl(void);
/* This macro exists for backwards-compatibility. */
#define crc32c_le crc32c
diff --git a/include/linux/crc64.h b/include/linux/crc64.h
index c756e65a1b58..e044c60d1e61 100644
--- a/include/linux/crc64.h
+++ b/include/linux/crc64.h
@@ -7,5 +7,12 @@
#include <linux/types.h>
+#define CRC64_ROCKSOFT_STRING "crc64-rocksoft"
+
u64 __pure crc64_be(u64 crc, const void *p, size_t len);
+u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len);
+
+u64 crc64_rocksoft(const unsigned char *buffer, size_t len);
+u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len);
+
#endif /* _LINUX_CRC64_H */
diff --git a/include/linux/crc8.h b/include/linux/crc8.h
index 13c8dabb0441..674045c59a04 100644
--- a/include/linux/crc8.h
+++ b/include/linux/crc8.h
@@ -96,6 +96,6 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
* Williams, Ross N., ross<at>ross.net
* (see URL http://www.ross.net/crc/download/crc_v3.txt).
*/
-u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc);
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc);
#endif /* __CRC8_H_ */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 18639c069263..2976f534a7a3 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/key.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/uidgid.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
@@ -23,9 +24,9 @@ struct inode;
* COW Supplementary groups list
*/
struct group_info {
- atomic_t usage;
+ refcount_t usage;
int ngroups;
- kgid_t gid[0];
+ kgid_t gid[];
} __randomize_layout;
/**
@@ -39,7 +40,7 @@ struct group_info {
*/
static inline struct group_info *get_group_info(struct group_info *gi)
{
- atomic_inc(&gi->usage);
+ refcount_inc(&gi->usage);
return gi;
}
@@ -49,11 +50,10 @@ static inline struct group_info *get_group_info(struct group_info *gi)
*/
#define put_group_info(group_info) \
do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
+ if (refcount_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
-extern struct group_info init_groups;
#ifdef CONFIG_MULTIUSER
extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);
@@ -109,14 +109,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp)
* same context as task->real_cred.
*/
struct cred {
- atomic_t usage;
-#ifdef CONFIG_DEBUG_CREDENTIALS
- atomic_t subscribers; /* number of processes subscribed */
- void *put_addr;
- unsigned magic;
-#define CRED_MAGIC 0x43736564
-#define CRED_MAGIC_DEAD 0x44656144
-#endif
+ atomic_long_t usage;
kuid_t uid; /* real UID of the task */
kgid_t gid; /* real GID of the task */
kuid_t suid; /* saved UID of the task */
@@ -140,10 +133,11 @@ struct cred {
struct key *request_key_auth; /* assumed request_key authority */
#endif
#ifdef CONFIG_SECURITY
- void *security; /* subjective LSM security */
+ void *security; /* LSM security */
#endif
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
+ struct ucounts *ucounts;
struct group_info *group_info; /* supplementary groups for euid/fsgid */
/* RCU deletion */
union {
@@ -164,52 +158,12 @@ extern void abort_creds(struct cred *);
extern const struct cred *override_creds(const struct cred *);
extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
-extern int change_create_files_as(struct cred *, struct inode *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
-
-/*
- * check for validity of credentials
- */
-#ifdef CONFIG_DEBUG_CREDENTIALS
-extern void __invalid_creds(const struct cred *, const char *, unsigned);
-extern void __validate_process_creds(struct task_struct *,
- const char *, unsigned);
-
-extern bool creds_are_invalid(const struct cred *cred);
-
-static inline void __validate_creds(const struct cred *cred,
- const char *file, unsigned line)
-{
- if (unlikely(creds_are_invalid(cred)))
- __invalid_creds(cred, file, line);
-}
-
-#define validate_creds(cred) \
-do { \
- __validate_creds((cred), __FILE__, __LINE__); \
-} while(0)
-
-#define validate_process_creds() \
-do { \
- __validate_process_creds(current, __FILE__, __LINE__); \
-} while(0)
-
-extern void validate_creds_for_do_exit(struct task_struct *);
-#else
-static inline void validate_creds(const struct cred *cred)
-{
-}
-static inline void validate_creds_for_do_exit(struct task_struct *tsk)
-{
-}
-static inline void validate_process_creds(void)
-{
-}
-#endif
+extern int set_cred_ucounts(struct cred *);
static inline bool cap_ambient_invariant_ok(const struct cred *cred)
{
@@ -219,6 +173,20 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
}
/**
+ * get_new_cred_many - Get references on a new set of credentials
+ * @cred: The new credentials to reference
+ * @nr: Number of references to acquire
+ *
+ * Get references on the specified set of new credentials. The caller must
+ * release all acquired references.
+ */
+static inline struct cred *get_new_cred_many(struct cred *cred, int nr)
+{
+ atomic_long_add(nr, &cred->usage);
+ return cred;
+}
+
+/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
*
@@ -227,16 +195,16 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
*/
static inline struct cred *get_new_cred(struct cred *cred)
{
- atomic_inc(&cred->usage);
- return cred;
+ return get_new_cred_many(cred, 1);
}
/**
- * get_cred - Get a reference on a set of credentials
+ * get_cred_many - Get references on a set of credentials
* @cred: The credentials to reference
+ * @nr: Number of references to acquire
*
- * Get a reference on the specified set of credentials. The caller must
- * release the reference. If %NULL is passed, it is returned with no action.
+ * Get references on the specified set of credentials. The caller must release
+ * all acquired reference. If %NULL is passed, it is returned with no action.
*
* This is used to deal with a committed set of credentials. Although the
* pointer is const, this will temporarily discard the const and increment the
@@ -244,14 +212,27 @@ static inline struct cred *get_new_cred(struct cred *cred)
* accidental alteration of a set of credentials that should be considered
* immutable.
*/
-static inline const struct cred *get_cred(const struct cred *cred)
+static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
{
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return cred;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
- return get_new_cred(nonconst_cred);
+ return get_new_cred_many(nonconst_cred, nr);
+}
+
+/*
+ * get_cred - Get a reference on a set of credentials
+ * @cred: The credentials to reference
+ *
+ * Get a reference on the specified set of credentials. The caller must
+ * release the reference. If %NULL is passed, it is returned with no action.
+ *
+ * This is used to deal with a committed set of credentials.
+ */
+static inline const struct cred *get_cred(const struct cred *cred)
+{
+ return get_cred_many(cred, 1);
}
static inline const struct cred *get_cred_rcu(const struct cred *cred)
@@ -259,9 +240,8 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return NULL;
- if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
return NULL;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
return cred;
}
@@ -269,6 +249,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
+ * @nr: Number of references to release
*
* Release a reference to a set of credentials, deleting them when the last ref
* is released. If %NULL is passed, nothing is done.
@@ -277,17 +258,28 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
* on task_struct are attached by const pointers to prevent accidental
* alteration of otherwise immutable credential sets.
*/
-static inline void put_cred(const struct cred *_cred)
+static inline void put_cred_many(const struct cred *_cred, int nr)
{
struct cred *cred = (struct cred *) _cred;
if (cred) {
- validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
+ if (atomic_long_sub_and_test(nr, &cred->usage))
__put_cred(cred);
}
}
+/*
+ * put_cred - Release a reference to a set of credentials
+ * @cred: The credentials to release
+ *
+ * Release a reference to a set of credentials, deleting them when the last ref
+ * is released. If %NULL is passed, nothing is done.
+ */
+static inline void put_cred(const struct cred *cred)
+{
+ put_cred_many(cred, 1);
+}
+
/**
* current_cred - Access the current task's subjective credentials
*
@@ -370,6 +362,7 @@ static inline void put_cred(const struct cred *_cred)
#define task_uid(task) (task_cred_xxx((task), uid))
#define task_euid(task) (task_cred_xxx((task), euid))
+#define task_ucounts(task) (task_cred_xxx((task), ucounts))
#define current_cred_xxx(xxx) \
({ \
@@ -386,6 +379,7 @@ static inline void put_cred(const struct cred *_cred)
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
+#define current_ucounts() (current_cred_xxx(ucounts))
extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 2f811baf78d2..30dba392b730 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -346,6 +346,9 @@ struct crush_work_bucket {
struct crush_work {
struct crush_work_bucket **work; /* Per-bucket working store */
+#ifdef __KERNEL__
+ struct list_head item;
+#endif
};
#ifdef __KERNEL__
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index ef90e07c9635..b164da5e129e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -12,26 +12,10 @@
#ifndef _LINUX_CRYPTO_H
#define _LINUX_CRYPTO_H
-#include <linux/atomic.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/bug.h>
+#include <linux/completion.h>
#include <linux/refcount.h>
#include <linux/slab.h>
-#include <linux/completion.h>
-
-/*
- * Autoloaded crypto modules should only use a prefixed name to avoid allowing
- * arbitrary modules to be loaded. Loading from userspace may still need the
- * unprefixed names, so retains those aliases as well.
- * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
- * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
- * expands twice on the same line. Instead, use a separate base name for the
- * alias.
- */
-#define MODULE_ALIAS_CRYPTO(name) \
- __MODULE_INFO(alias, alias_userspace, name); \
- __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+#include <linux/types.h>
/*
* Algorithm masks and types.
@@ -40,18 +24,18 @@
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
+#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006
+#define CRYPTO_ALG_TYPE_SIG 0x00000007
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
-#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_HASH 0x0000000e
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
-#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -126,13 +110,21 @@
* crypto_aead_walksize() (with the remainder going at the end), no chunk
* can cross a page boundary or a scatterlist element boundary.
* ahash:
- * - The result buffer must be aligned to the algorithm's alignmask.
* - crypto_ahash_finup() must not be used unless the algorithm implements
* ->finup() natively.
*/
#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
/*
+ * Mark an algorithm as a service implementation only usable by a
+ * template and never by a normal user of the kernel crypto API.
+ * This is intended to be used by algorithms that are themselves
+ * not FIPS-approved but may instead be used to implement parts of
+ * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)).
+ */
+#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
@@ -151,20 +143,22 @@
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
* declaration) is used to ensure that the crypto_tfm context structure is
* aligned correctly for the given architecture so that there are no alignment
- * faults for C data types. In particular, this is required on platforms such
- * as arm where pointers are 32-bit aligned but there are data types such as
- * u64 which require 64-bit alignment.
+ * faults for C data types. On architectures that support non-cache coherent
+ * DMA, such as ARM or arm64, it also takes into account the minimal alignment
+ * that is required to ensure that the context struct member does not share any
+ * cachelines with the rest of the struct. This is needed to ensure that cache
+ * maintenance for non-coherent DMA (cache invalidation in particular) does not
+ * affect data that may be accessed by the CPU concurrently.
*/
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
-struct scatterlist;
-struct crypto_async_request;
struct crypto_tfm;
struct crypto_type;
+struct module;
-typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+typedef void (*crypto_completion_t)(void *req, int err);
/**
* DOC: Block Cipher Context Data Structures
@@ -263,116 +257,6 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
-#ifdef CONFIG_CRYPTO_STATS
-/*
- * struct crypto_istat_aead - statistics for AEAD algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for AEAD requests
- */
-struct crypto_istat_aead {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_akcipher - statistics for akcipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @verify_cnt: number of verify operation
- * @sign_cnt: number of sign requests
- * @err_cnt: number of error for akcipher requests
- */
-struct crypto_istat_akcipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t verify_cnt;
- atomic64_t sign_cnt;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_cipher - statistics for cipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for cipher requests
- */
-struct crypto_istat_cipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_compress - statistics for compress algorithm
- * @compress_cnt: number of compress requests
- * @compress_tlen: total data size handled by compress requests
- * @decompress_cnt: number of decompress requests
- * @decompress_tlen: total data size handled by decompress requests
- * @err_cnt: number of error for compress requests
- */
-struct crypto_istat_compress {
- atomic64_t compress_cnt;
- atomic64_t compress_tlen;
- atomic64_t decompress_cnt;
- atomic64_t decompress_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_hash - statistics for has algorithm
- * @hash_cnt: number of hash requests
- * @hash_tlen: total data size hashed
- * @err_cnt: number of error for hash requests
- */
-struct crypto_istat_hash {
- atomic64_t hash_cnt;
- atomic64_t hash_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_kpp - statistics for KPP algorithm
- * @setsecret_cnt: number of setsecrey operation
- * @generate_public_key_cnt: number of generate_public_key operation
- * @compute_shared_secret_cnt: number of compute_shared_secret operation
- * @err_cnt: number of error for KPP requests
- */
-struct crypto_istat_kpp {
- atomic64_t setsecret_cnt;
- atomic64_t generate_public_key_cnt;
- atomic64_t compute_shared_secret_cnt;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_rng: statistics for RNG algorithm
- * @generate_cnt: number of RNG generate requests
- * @generate_tlen: total data size of generated data by the RNG
- * @seed_cnt: number of times the RNG was seeded
- * @err_cnt: number of error for RNG requests
- */
-struct crypto_istat_rng {
- atomic64_t generate_cnt;
- atomic64_t generate_tlen;
- atomic64_t seed_cnt;
- atomic64_t err_cnt;
-};
-#endif /* CONFIG_CRYPTO_STATS */
-
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -393,18 +277,20 @@ struct crypto_istat_rng {
* @cra_ctxsize: Size of the operational context of the transformation. This
* value informs the kernel crypto API about the memory size
* needed to be allocated for the transformation context.
- * @cra_alignmask: Alignment mask for the input and output data buffer. The data
- * buffer containing the input data for the algorithm must be
- * aligned to this alignment mask. The data buffer for the
- * output data must be aligned to this alignment mask. Note that
- * the Crypto API will do the re-alignment in software, but
- * only under special conditions and there is a performance hit.
- * The re-alignment happens at these occasions for different
- * @cra_u types: cipher -- For both input data and output data
- * buffer; ahash -- For output hash destination buf; shash --
- * For output hash destination buf.
- * This is needed on hardware which is flawed by design and
- * cannot pick data from arbitrary addresses.
+ * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
+ * 1 less than the alignment, in bytes, that the algorithm
+ * implementation requires for input and output buffers. When
+ * the crypto API is invoked with buffers that are not aligned
+ * to this alignment, the crypto API automatically utilizes
+ * appropriately aligned temporary buffers to comply with what
+ * the algorithm needs. (For scatterlists this happens only if
+ * the algorithm uses the skcipher_walk helper functions.) This
+ * misalignment handling carries a performance penalty, so it is
+ * preferred that algorithms do not set a nonzero alignmask.
+ * Also, crypto API users may wish to allocate buffers aligned
+ * to the alignmask of the algorithm being used, in order to
+ * avoid the API having to realign them. Note: the alignmask is
+ * not supported for hash algorithms and is always 0 for them.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest
@@ -450,15 +336,6 @@ struct crypto_istat_rng {
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
- * @stats: union of all possible crypto_istat_xxx structures
- * @stats.aead: statistics for AEAD algorithm
- * @stats.akcipher: statistics for akcipher algorithm
- * @stats.cipher: statistics for cipher algorithm
- * @stats.compress: statistics for compress algorithm
- * @stats.hash: statistics for hash algorithm
- * @stats.rng: statistics for rng algorithm
- * @stats.kpp: statistics for KPP algorithm
- *
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
* be used by a cipher implementation as it is internal to the Crypto API.
@@ -490,81 +367,8 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
-
-#ifdef CONFIG_CRYPTO_STATS
- union {
- struct crypto_istat_aead aead;
- struct crypto_istat_akcipher akcipher;
- struct crypto_istat_cipher cipher;
- struct crypto_istat_compress compress;
- struct crypto_istat_hash hash;
- struct crypto_istat_rng rng;
- struct crypto_istat_kpp kpp;
- } stats;
-#endif /* CONFIG_CRYPTO_STATS */
-
} CRYPTO_MINALIGN_ATTR;
-#ifdef CONFIG_CRYPTO_STATS
-void crypto_stats_init(struct crypto_alg *alg);
-void crypto_stats_get(struct crypto_alg *alg);
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
-void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
-void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
-void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
-void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
-void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
-void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
-void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
-void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
-void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
-void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
-#else
-static inline void crypto_stats_init(struct crypto_alg *alg)
-{}
-static inline void crypto_stats_get(struct crypto_alg *alg)
-{}
-static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
-{}
-static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
-#endif
/*
* A helper struct for waiting for completion of async crypto ops
*/
@@ -583,7 +387,7 @@ struct crypto_wait {
/*
* Async ops completion helper functioons
*/
-void crypto_req_done(struct crypto_async_request *req, int err);
+void crypto_req_done(void *req, int err);
static inline int crypto_wait_req(int err, struct crypto_wait *wait)
{
@@ -605,14 +409,6 @@ static inline void crypto_init_wait(struct crypto_wait *wait)
}
/*
- * Algorithm registration interface.
- */
-int crypto_register_alg(struct crypto_alg *alg);
-void crypto_unregister_alg(struct crypto_alg *alg);
-int crypto_register_algs(struct crypto_alg *algs, int count);
-void crypto_unregister_algs(struct crypto_alg *algs, int count);
-
-/*
* Algorithm query interface.
*/
int crypto_has_alg(const char *name, u32 type, u32 mask);
@@ -624,6 +420,7 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
*/
struct crypto_tfm {
+ refcount_t refcnt;
u32 crt_flags;
@@ -636,40 +433,10 @@ struct crypto_tfm {
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
-struct crypto_cipher {
- struct crypto_tfm base;
-};
-
struct crypto_comp {
struct crypto_tfm base;
};
-enum {
- CRYPTOA_UNSPEC,
- CRYPTOA_ALG,
- CRYPTOA_TYPE,
- CRYPTOA_U32,
- __CRYPTOA_MAX,
-};
-
-#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
-
-/* Maximum number of (rtattr) parameters for each template. */
-#define CRYPTO_MAX_ATTRS 32
-
-struct crypto_attr_alg {
- char name[CRYPTO_MAX_ALG_NAME];
-};
-
-struct crypto_attr_type {
- u32 type;
- u32 mask;
-};
-
-struct crypto_attr_u32 {
- u32 num;
-};
-
/*
* Transform user interface.
*/
@@ -682,8 +449,6 @@ static inline void crypto_free_tfm(struct crypto_tfm *tfm)
return crypto_destroy_tfm(tfm, tfm);
}
-int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-
/*
* Transform helpers which query the underlying algorithm.
*/
@@ -697,16 +462,6 @@ static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_driver_name;
}
-static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_priority;
-}
-
-static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
-}
-
static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_blocksize;
@@ -732,176 +487,12 @@ static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
tfm->crt_flags &= ~flags;
}
-static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
-{
- return tfm->__crt_ctx;
-}
-
static inline unsigned int crypto_tfm_ctx_alignment(void)
{
struct crypto_tfm *tfm;
return __alignof__(tfm->__crt_ctx);
}
-/**
- * DOC: Single Block Cipher API
- *
- * The single block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
- *
- * Using the single block cipher API calls, operations with the basic cipher
- * primitive can be implemented. These cipher primitives exclude any block
- * chaining operations including IV handling.
- *
- * The purpose of this single block cipher API is to support the implementation
- * of templates or other concepts that only need to perform the cipher operation
- * on one block at a time. Templates invoke the underlying cipher primitive
- * block-wise and process either the input or the output data of these cipher
- * operations.
- */
-
-static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_cipher *)tfm;
-}
-
-/**
- * crypto_alloc_cipher() - allocate single block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a single block cipher. The returned struct
- * crypto_cipher is the cipher handle that is required for any subsequent API
- * invocation for that single block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_cipher() - zeroize and free the single block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_cipher(struct crypto_cipher *tfm)
-{
- crypto_free_tfm(crypto_cipher_tfm(tfm));
-}
-
-/**
- * crypto_has_cipher() - Search for the availability of a single block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the single block cipher is known to the kernel crypto API;
- * false otherwise
- */
-static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-/**
- * crypto_cipher_blocksize() - obtain block size for cipher
- * @tfm: cipher handle
- *
- * The block size for the single block cipher referenced with the cipher handle
- * tfm is returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
-}
-
-static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
-}
-
-static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_cipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the single block cipher referenced by the
- * cipher handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-int crypto_cipher_setkey(struct crypto_cipher *tfm,
- const u8 *key, unsigned int keylen);
-
-/**
- * crypto_cipher_encrypt_one() - encrypt one block of plaintext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the ciphertext
- * @src: buffer holding the plaintext to be encrypted
- *
- * Invoke the encryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src);
-
-/**
- * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the plaintext
- * @src: buffer holding the ciphertext to be decrypted
- *
- * Invoke the decryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src);
-
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index 363b004426db..bc95aef2219c 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_CTYPE_H
#define _LINUX_CTYPE_H
+#include <linux/compiler.h>
+
/*
* NOTE! This ctype does not handle EOF like the standard C
* library is required to.
@@ -23,10 +25,6 @@ extern const unsigned char _ctype[];
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
-static inline int isdigit(int c)
-{
- return '0' <= c && c <= '9';
-}
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
@@ -39,6 +37,15 @@ static inline int isdigit(int c)
#define isascii(c) (((unsigned char)(c))<=0x7f)
#define toascii(c) (((unsigned char)(c))&0x7f)
+#if __has_builtin(__builtin_isdigit)
+#define isdigit(c) __builtin_isdigit(c)
+#else
+static inline int isdigit(int c)
+{
+ return '0' <= c && c <= '9';
+}
+#endif
+
static inline unsigned char __tolower(unsigned char c)
{
if (isupper(c))
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
index 45bfe9d61271..daf3e6f98444 100644
--- a/include/linux/cuda.h
+++ b/include/linux/cuda.h
@@ -12,7 +12,7 @@
#include <uapi/linux/cuda.h>
-extern int find_via_cuda(void);
+extern int __init find_via_cuda(void);
extern int cuda_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
extern void cuda_poll(void);
diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h
new file mode 100644
index 000000000000..03fa6d50d46f
--- /dev/null
+++ b/include/linux/cxl-event.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Intel Corporation. */
+#ifndef _LINUX_CXL_EVENT_H
+#define _LINUX_CXL_EVENT_H
+
+/*
+ * Common Event Record Format
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_hdr {
+ u8 length;
+ u8 flags[3];
+ __le16 handle;
+ __le16 related_handle;
+ __le64 timestamp;
+ u8 maint_op_class;
+ u8 reserved[15];
+} __packed;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+struct cxl_event_generic {
+ struct cxl_event_record_hdr hdr;
+ u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
+} __packed;
+
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+struct cxl_event_gen_media {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+ u8 device[3];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 reserved[46];
+} __packed;
+
+/*
+ * DRAM Event Record - DER
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
+ */
+#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
+struct cxl_event_dram {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ u8 column[2];
+ u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
+ u8 reserved[0x17];
+} __packed;
+
+/*
+ * Get Health Info Record
+ * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
+ */
+struct cxl_get_health_info {
+ u8 health_status;
+ u8 media_status;
+ u8 add_status;
+ u8 life_used;
+ u8 device_temp[2];
+ u8 dirty_shutdown_cnt[4];
+ u8 cor_vol_err_cnt[4];
+ u8 cor_per_err_cnt[4];
+} __packed;
+
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+struct cxl_event_mem_module {
+ struct cxl_event_record_hdr hdr;
+ u8 event_type;
+ struct cxl_get_health_info info;
+ u8 reserved[0x3d];
+} __packed;
+
+union cxl_event {
+ struct cxl_event_generic generic;
+ struct cxl_event_gen_media gen_media;
+ struct cxl_event_dram dram;
+ struct cxl_event_mem_module mem_module;
+} __packed;
+
+/*
+ * Common Event Record Format; in event logs
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_raw {
+ uuid_t id;
+ union cxl_event event;
+} __packed;
+
+enum cxl_event_type {
+ CXL_CPER_EVENT_GENERIC,
+ CXL_CPER_EVENT_GEN_MEDIA,
+ CXL_CPER_EVENT_DRAM,
+ CXL_CPER_EVENT_MEM_MODULE,
+};
+
+#define CPER_CXL_DEVICE_ID_VALID BIT(0)
+#define CPER_CXL_DEVICE_SN_VALID BIT(1)
+#define CPER_CXL_COMP_EVENT_LOG_VALID BIT(2)
+struct cxl_cper_event_rec {
+ struct {
+ u32 length;
+ u64 validation_bits;
+ struct cper_cxl_event_devid {
+ u16 vendor_id;
+ u16 device_id;
+ u8 func_num;
+ u8 device_num;
+ u8 bus_num;
+ u16 segment_num;
+ u16 slot_num; /* bits 2:0 reserved */
+ u8 reserved;
+ } __packed device_id;
+ struct cper_cxl_event_sn {
+ u32 lower_dw;
+ u32 upper_dw;
+ } __packed dev_serial_num;
+ } __packed hdr;
+
+ union cxl_event event;
+} __packed;
+
+#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
deleted file mode 100644
index 05ee0f19448a..000000000000
--- a/include/linux/cyclades.h
+++ /dev/null
@@ -1,364 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $
- * linux/include/linux/cyclades.h
- *
- * This file was initially written by
- * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by
- * Ivan Passos <ivan@cyclades.com>.
- *
- * This file contains the general definitions for the cyclades.c driver
- *$Log: cyclades.h,v $
- *Revision 3.1 2002/01/29 11:36:16 henrique
- *added throttle field on struct cyclades_port to indicate whether the
- *port is throttled or not
- *
- *Revision 3.1 2000/04/19 18:52:52 ivan
- *converted address fields to unsigned long and added fields for physical
- *addresses on cyclades_card structure;
- *
- *Revision 3.0 1998/11/02 14:20:59 ivan
- *added nports field on cyclades_card structure;
- *
- *Revision 2.5 1998/08/03 16:57:01 ivan
- *added cyclades_idle_stats structure;
- *
- *Revision 2.4 1998/06/01 12:09:53 ivan
- *removed closing_wait2 from cyclades_port structure;
- *
- *Revision 2.3 1998/03/16 18:01:12 ivan
- *changes in the cyclades_port structure to get it closer to the
- *standard serial port structure;
- *added constants for new ioctls;
- *
- *Revision 2.2 1998/02/17 16:50:00 ivan
- *changes in the cyclades_port structure (addition of shutdown_wait and
- *chip_rev variables);
- *added constants for new ioctls and for CD1400 rev. numbers.
- *
- *Revision 2.1 1997/10/24 16:03:00 ivan
- *added rflow (which allows enabling the CD1400 special flow control
- *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to
- *cyclades_port structure;
- *added Alpha support
- *
- *Revision 2.0 1997/06/30 10:30:00 ivan
- *added some new doorbell command constants related to IOCTLW and
- *UART error signaling
- *
- *Revision 1.8 1997/06/03 15:30:00 ivan
- *added constant ZFIRM_HLT
- *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin)
- *
- *Revision 1.7 1997/03/26 10:30:00 daniel
- *new entries at the end of cyclades_port struct to reallocate
- *variables illegally allocated within card memory.
- *
- *Revision 1.6 1996/09/09 18:35:30 bentson
- *fold in changes for Cyclom-Z -- including structures for
- *communicating with board as well modest changes to original
- *structures to support new features.
- *
- *Revision 1.5 1995/11/13 21:13:31 bentson
- *changes suggested by Michael Chastain <mec@duracef.shout.net>
- *to support use of this file in non-kernel applications
- *
- *
- */
-#ifndef _LINUX_CYCLADES_H
-#define _LINUX_CYCLADES_H
-
-#include <uapi/linux/cyclades.h>
-
-
-/* Per card data structure */
-struct cyclades_card {
- void __iomem *base_addr;
- union {
- void __iomem *p9050;
- struct RUNTIME_9060 __iomem *p9060;
- } ctl_addr;
- struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */
- int irq;
- unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
- unsigned int first_line; /* minor number of first channel on card */
- unsigned int nports; /* Number of ports in the card */
- int bus_index; /* address shift - 0 for ISA, 1 for PCI */
- int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
- u32 hw_ver;
- spinlock_t card_lock;
- struct cyclades_port *ports;
-};
-
-/***************************************
- * Memory access functions/macros *
- * (required to support Alpha systems) *
- ***************************************/
-
-#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0)
-#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0)
-#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0)
-
-/*
- * Statistics counters
- */
-struct cyclades_icount {
- __u32 cts, dsr, rng, dcd, tx, rx;
- __u32 frame, parity, overrun, brk;
- __u32 buf_overrun;
-};
-
-/*
- * This is our internal structure for each serial port's state.
- *
- * Many fields are paralleled by the structure used by the serial_struct
- * structure.
- *
- * For definitions of the flags field, see tty.h
- */
-
-struct cyclades_port {
- int magic;
- struct tty_port port;
- struct cyclades_card *card;
- union {
- struct {
- void __iomem *base_addr;
- } cyy;
- struct {
- struct CH_CTRL __iomem *ch_ctrl;
- struct BUF_CTRL __iomem *buf_ctrl;
- } cyz;
- } u;
- int line;
- int flags; /* defined in tty.h */
- int type; /* UART type */
- int read_status_mask;
- int ignore_status_mask;
- int timeout;
- int xmit_fifo_size;
- int cor1,cor2,cor3,cor4,cor5;
- int tbpr,tco,rbpr,rco;
- int baud;
- int rflow;
- int rtsdtr_inv;
- int chip_rev;
- int custom_divisor;
- u8 x_char; /* to be pushed out ASAP */
- int breakon;
- int breakoff;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
- int default_threshold;
- int default_timeout;
- unsigned long rflush_count;
- struct cyclades_monitor mon;
- struct cyclades_idle_stats idle_stats;
- struct cyclades_icount icount;
- struct completion shutdown_wait;
- int throttle;
-#ifdef CONFIG_CYZ_INTR
- struct timer_list rx_full_timer;
-#endif
-};
-
-#define CLOSING_WAIT_DELAY 30*HZ
-#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE
-#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF
-
-
-#define CyMAX_CHIPS_PER_CARD 8
-#define CyMAX_CHAR_FIFO 12
-#define CyPORTS_PER_CHIP 4
-#define CD1400_MAX_SPEED 115200
-
-#define CyISA_Ywin 0x2000
-
-#define CyPCI_Ywin 0x4000
-#define CyPCI_Yctl 0x80
-#define CyPCI_Zctl CTRL_WINDOW_SIZE
-#define CyPCI_Zwin 0x80000
-#define CyPCI_Ze_win (2 * CyPCI_Zwin)
-
-#define PCI_DEVICE_ID_MASK 0x06
-
-/**** CD1400 registers ****/
-
-#define CD1400_REV_G 0x46
-#define CD1400_REV_J 0x48
-
-#define CyRegSize 0x0400
-#define Cy_HwReset 0x1400
-#define Cy_ClrIntr 0x1800
-#define Cy_EpldRev 0x1e00
-
-/* Global Registers */
-
-#define CyGFRCR (0x40*2)
-#define CyRevE (44)
-#define CyCAR (0x68*2)
-#define CyCHAN_0 (0x00)
-#define CyCHAN_1 (0x01)
-#define CyCHAN_2 (0x02)
-#define CyCHAN_3 (0x03)
-#define CyGCR (0x4B*2)
-#define CyCH0_SERIAL (0x00)
-#define CyCH0_PARALLEL (0x80)
-#define CySVRR (0x67*2)
-#define CySRModem (0x04)
-#define CySRTransmit (0x02)
-#define CySRReceive (0x01)
-#define CyRICR (0x44*2)
-#define CyTICR (0x45*2)
-#define CyMICR (0x46*2)
-#define CyICR0 (0x00)
-#define CyICR1 (0x01)
-#define CyICR2 (0x02)
-#define CyICR3 (0x03)
-#define CyRIR (0x6B*2)
-#define CyTIR (0x6A*2)
-#define CyMIR (0x69*2)
-#define CyIRDirEq (0x80)
-#define CyIRBusy (0x40)
-#define CyIRUnfair (0x20)
-#define CyIRContext (0x1C)
-#define CyIRChannel (0x03)
-#define CyPPR (0x7E*2)
-#define CyCLOCK_20_1MS (0x27)
-#define CyCLOCK_25_1MS (0x31)
-#define CyCLOCK_25_5MS (0xf4)
-#define CyCLOCK_60_1MS (0x75)
-#define CyCLOCK_60_2MS (0xea)
-
-/* Virtual Registers */
-
-#define CyRIVR (0x43*2)
-#define CyTIVR (0x42*2)
-#define CyMIVR (0x41*2)
-#define CyIVRMask (0x07)
-#define CyIVRRxEx (0x07)
-#define CyIVRRxOK (0x03)
-#define CyIVRTxOK (0x02)
-#define CyIVRMdmOK (0x01)
-#define CyTDR (0x63*2)
-#define CyRDSR (0x62*2)
-#define CyTIMEOUT (0x80)
-#define CySPECHAR (0x70)
-#define CyBREAK (0x08)
-#define CyPARITY (0x04)
-#define CyFRAME (0x02)
-#define CyOVERRUN (0x01)
-#define CyMISR (0x4C*2)
-/* see CyMCOR_ and CyMSVR_ for bits*/
-#define CyEOSRR (0x60*2)
-
-/* Channel Registers */
-
-#define CyLIVR (0x18*2)
-#define CyMscsr (0x01)
-#define CyTdsr (0x02)
-#define CyRgdsr (0x03)
-#define CyRedsr (0x07)
-#define CyCCR (0x05*2)
-/* Format 1 */
-#define CyCHAN_RESET (0x80)
-#define CyCHIP_RESET (0x81)
-#define CyFlushTransFIFO (0x82)
-/* Format 2 */
-#define CyCOR_CHANGE (0x40)
-#define CyCOR1ch (0x02)
-#define CyCOR2ch (0x04)
-#define CyCOR3ch (0x08)
-/* Format 3 */
-#define CySEND_SPEC_1 (0x21)
-#define CySEND_SPEC_2 (0x22)
-#define CySEND_SPEC_3 (0x23)
-#define CySEND_SPEC_4 (0x24)
-/* Format 4 */
-#define CyCHAN_CTL (0x10)
-#define CyDIS_RCVR (0x01)
-#define CyENB_RCVR (0x02)
-#define CyDIS_XMTR (0x04)
-#define CyENB_XMTR (0x08)
-#define CySRER (0x06*2)
-#define CyMdmCh (0x80)
-#define CyRxData (0x10)
-#define CyTxRdy (0x04)
-#define CyTxMpty (0x02)
-#define CyNNDT (0x01)
-#define CyCOR1 (0x08*2)
-#define CyPARITY_NONE (0x00)
-#define CyPARITY_0 (0x20)
-#define CyPARITY_1 (0xA0)
-#define CyPARITY_E (0x40)
-#define CyPARITY_O (0xC0)
-#define Cy_1_STOP (0x00)
-#define Cy_1_5_STOP (0x04)
-#define Cy_2_STOP (0x08)
-#define Cy_5_BITS (0x00)
-#define Cy_6_BITS (0x01)
-#define Cy_7_BITS (0x02)
-#define Cy_8_BITS (0x03)
-#define CyCOR2 (0x09*2)
-#define CyIXM (0x80)
-#define CyTxIBE (0x40)
-#define CyETC (0x20)
-#define CyAUTO_TXFL (0x60)
-#define CyLLM (0x10)
-#define CyRLM (0x08)
-#define CyRtsAO (0x04)
-#define CyCtsAE (0x02)
-#define CyDsrAE (0x01)
-#define CyCOR3 (0x0A*2)
-#define CySPL_CH_DRANGE (0x80) /* special character detect range */
-#define CySPL_CH_DET1 (0x40) /* enable special character detection
- on SCHR4-SCHR3 */
-#define CyFL_CTRL_TRNSP (0x20) /* Flow Control Transparency */
-#define CySPL_CH_DET2 (0x10) /* Enable special character detection
- on SCHR2-SCHR1 */
-#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */
-#define CyCOR4 (0x1E*2)
-#define CyCOR5 (0x1F*2)
-#define CyCCSR (0x0B*2)
-#define CyRxEN (0x80)
-#define CyRxFloff (0x40)
-#define CyRxFlon (0x20)
-#define CyTxEN (0x08)
-#define CyTxFloff (0x04)
-#define CyTxFlon (0x02)
-#define CyRDCR (0x0E*2)
-#define CySCHR1 (0x1A*2)
-#define CySCHR2 (0x1B*2)
-#define CySCHR3 (0x1C*2)
-#define CySCHR4 (0x1D*2)
-#define CySCRL (0x22*2)
-#define CySCRH (0x23*2)
-#define CyLNC (0x24*2)
-#define CyMCOR1 (0x15*2)
-#define CyMCOR2 (0x16*2)
-#define CyRTPR (0x21*2)
-#define CyMSVR1 (0x6C*2)
-#define CyMSVR2 (0x6D*2)
-#define CyANY_DELTA (0xF0)
-#define CyDSR (0x80)
-#define CyCTS (0x40)
-#define CyRI (0x20)
-#define CyDCD (0x10)
-#define CyDTR (0x02)
-#define CyRTS (0x01)
-#define CyPVSR (0x6F*2)
-#define CyRBPR (0x78*2)
-#define CyRCOR (0x7C*2)
-#define CyTBPR (0x72*2)
-#define CyTCOR (0x76*2)
-
-/* Custom Registers */
-
-#define CyPLX_VER (0x3400)
-#define PLX_9050 0x0b
-#define PLX_9060 0x0c
-#define PLX_9080 0x0d
-
-/***************************************************************************/
-
-#endif /* _LINUX_CYCLADES_H */
diff --git a/include/linux/damon.h b/include/linux/damon.h
new file mode 100644
index 000000000000..886d07294f4e
--- /dev/null
+++ b/include/linux/damon.h
@@ -0,0 +1,769 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON api
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#ifndef _DAMON_H_
+#define _DAMON_H_
+
+#include <linux/memcontrol.h>
+#include <linux/mutex.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/random.h>
+
+/* Minimal region size. Every damon_region is aligned by this. */
+#define DAMON_MIN_REGION PAGE_SIZE
+/* Max priority score for DAMON-based operation schemes */
+#define DAMOS_MAX_SCORE (99)
+
+/* Get a random number in [l, r) */
+static inline unsigned long damon_rand(unsigned long l, unsigned long r)
+{
+ return l + get_random_u32_below(r - l);
+}
+
+/**
+ * struct damon_addr_range - Represents an address region of [@start, @end).
+ * @start: Start address of the region (inclusive).
+ * @end: End address of the region (exclusive).
+ */
+struct damon_addr_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+/**
+ * struct damon_region - Represents a monitoring target region.
+ * @ar: The address range of the region.
+ * @sampling_addr: Address of the sample for the next access check.
+ * @nr_accesses: Access frequency of this region.
+ * @nr_accesses_bp: @nr_accesses in basis point (0.01%) that updated for
+ * each sampling interval.
+ * @list: List head for siblings.
+ * @age: Age of this region.
+ *
+ * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be
+ * increased for every &damon_attrs->sample_interval if an access to the region
+ * during the last sampling interval is found. The update of this field should
+ * not be done with direct access but with the helper function,
+ * damon_update_region_access_rate().
+ *
+ * @nr_accesses_bp is another representation of @nr_accesses in basis point
+ * (1 in 10,000) that updated for every &damon_attrs->sample_interval in a
+ * manner similar to moving sum. By the algorithm, this value becomes
+ * @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval. This can
+ * be used when the aggregation interval is too huge and therefore cannot wait
+ * for it before getting the access monitoring results.
+ *
+ * @age is initially zero, increased for each aggregation interval, and reset
+ * to zero again if the access frequency is significantly changed. If two
+ * regions are merged into a new region, both @nr_accesses and @age of the new
+ * region are set as region size-weighted average of those of the two regions.
+ */
+struct damon_region {
+ struct damon_addr_range ar;
+ unsigned long sampling_addr;
+ unsigned int nr_accesses;
+ unsigned int nr_accesses_bp;
+ struct list_head list;
+
+ unsigned int age;
+/* private: Internal value for age calculation. */
+ unsigned int last_nr_accesses;
+};
+
+/**
+ * struct damon_target - Represents a monitoring target.
+ * @pid: The PID of the virtual address space to monitor.
+ * @nr_regions: Number of monitoring target regions of this target.
+ * @regions_list: Head of the monitoring target regions of this target.
+ * @list: List head for siblings.
+ *
+ * Each monitoring context could have multiple targets. For example, a context
+ * for virtual memory address spaces could have multiple target processes. The
+ * @pid should be set for appropriate &struct damon_operations including the
+ * virtual address spaces monitoring operations.
+ */
+struct damon_target {
+ struct pid *pid;
+ unsigned int nr_regions;
+ struct list_head regions_list;
+ struct list_head list;
+};
+
+/**
+ * enum damos_action - Represents an action of a Data Access Monitoring-based
+ * Operation Scheme.
+ *
+ * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
+ * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
+ * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT.
+ * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
+ * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
+ * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists.
+ * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists.
+ * @DAMOS_STAT: Do nothing but count the stat.
+ * @NR_DAMOS_ACTIONS: Total number of DAMOS actions
+ *
+ * The support of each action is up to running &struct damon_operations.
+ * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR supports all actions except
+ * &enum DAMOS_LRU_PRIO and &enum DAMOS_LRU_DEPRIO. &enum DAMON_OPS_PADDR
+ * supports only &enum DAMOS_PAGEOUT, &enum DAMOS_LRU_PRIO, &enum
+ * DAMOS_LRU_DEPRIO, and &DAMOS_STAT.
+ */
+enum damos_action {
+ DAMOS_WILLNEED,
+ DAMOS_COLD,
+ DAMOS_PAGEOUT,
+ DAMOS_HUGEPAGE,
+ DAMOS_NOHUGEPAGE,
+ DAMOS_LRU_PRIO,
+ DAMOS_LRU_DEPRIO,
+ DAMOS_STAT, /* Do nothing but only record the stat */
+ NR_DAMOS_ACTIONS,
+};
+
+/**
+ * enum damos_quota_goal_metric - Represents the metric to be used as the goal
+ *
+ * @DAMOS_QUOTA_USER_INPUT: User-input value.
+ * @DAMOS_QUOTA_SOME_MEM_PSI_US: System level some memory PSI in us.
+ * @NR_DAMOS_QUOTA_GOAL_METRICS: Number of DAMOS quota goal metrics.
+ *
+ * Metrics equal to larger than @NR_DAMOS_QUOTA_GOAL_METRICS are unsupported.
+ */
+enum damos_quota_goal_metric {
+ DAMOS_QUOTA_USER_INPUT,
+ DAMOS_QUOTA_SOME_MEM_PSI_US,
+ NR_DAMOS_QUOTA_GOAL_METRICS,
+};
+
+/**
+ * struct damos_quota_goal - DAMOS scheme quota auto-tuning goal.
+ * @metric: Metric to be used for representing the goal.
+ * @target_value: Target value of @metric to achieve with the tuning.
+ * @current_value: Current value of @metric.
+ * @last_psi_total: Last measured total PSI
+ * @list: List head for siblings.
+ *
+ * Data structure for getting the current score of the quota tuning goal. The
+ * score is calculated by how close @current_value and @target_value are. Then
+ * the score is entered to DAMON's internal feedback loop mechanism to get the
+ * auto-tuned quota.
+ *
+ * If @metric is DAMOS_QUOTA_USER_INPUT, @current_value should be manually
+ * entered by the user, probably inside the kdamond callbacks. Otherwise,
+ * DAMON sets @current_value with self-measured value of @metric.
+ */
+struct damos_quota_goal {
+ enum damos_quota_goal_metric metric;
+ unsigned long target_value;
+ unsigned long current_value;
+ /* metric-dependent fields */
+ union {
+ u64 last_psi_total;
+ };
+ struct list_head list;
+};
+
+/**
+ * struct damos_quota - Controls the aggressiveness of the given scheme.
+ * @reset_interval: Charge reset interval in milliseconds.
+ * @ms: Maximum milliseconds that the scheme can use.
+ * @sz: Maximum bytes of memory that the action can be applied.
+ * @goals: Head of quota tuning goals (&damos_quota_goal) list.
+ * @esz: Effective size quota in bytes.
+ *
+ * @weight_sz: Weight of the region's size for prioritization.
+ * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
+ * @weight_age: Weight of the region's age for prioritization.
+ *
+ * To avoid consuming too much CPU time or IO resources for applying the
+ * &struct damos->action to large memory, DAMON allows users to set time and/or
+ * size quotas. The quotas can be set by writing non-zero values to &ms and
+ * &sz, respectively. If the time quota is set, DAMON tries to use only up to
+ * &ms milliseconds within &reset_interval for applying the action. If the
+ * size quota is set, DAMON tries to apply the action only up to &sz bytes
+ * within &reset_interval.
+ *
+ * Internally, the time quota is transformed to a size quota using estimated
+ * throughput of the scheme's action. DAMON then compares it against &sz and
+ * uses smaller one as the effective quota.
+ *
+ * If @goals is not empt, DAMON calculates yet another size quota based on the
+ * goals using its internal feedback loop algorithm, for every @reset_interval.
+ * Then, if the new size quota is smaller than the effective quota, it uses the
+ * new size quota as the effective quota.
+ *
+ * The resulting effective size quota in bytes is set to @esz.
+ *
+ * For selecting regions within the quota, DAMON prioritizes current scheme's
+ * target memory regions using the &struct damon_operations->get_scheme_score.
+ * You could customize the prioritization logic by setting &weight_sz,
+ * &weight_nr_accesses, and &weight_age, because monitoring operations are
+ * encouraged to respect those.
+ */
+struct damos_quota {
+ unsigned long reset_interval;
+ unsigned long ms;
+ unsigned long sz;
+ struct list_head goals;
+ unsigned long esz;
+
+ unsigned int weight_sz;
+ unsigned int weight_nr_accesses;
+ unsigned int weight_age;
+
+/* private: */
+ /* For throughput estimation */
+ unsigned long total_charged_sz;
+ unsigned long total_charged_ns;
+
+ /* For charging the quota */
+ unsigned long charged_sz;
+ unsigned long charged_from;
+ struct damon_target *charge_target_from;
+ unsigned long charge_addr_from;
+
+ /* For prioritization */
+ unsigned long histogram[DAMOS_MAX_SCORE + 1];
+ unsigned int min_score;
+
+ /* For feedback loop */
+ unsigned long esz_bp;
+};
+
+/**
+ * enum damos_wmark_metric - Represents the watermark metric.
+ *
+ * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
+ * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
+ * @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics
+ */
+enum damos_wmark_metric {
+ DAMOS_WMARK_NONE,
+ DAMOS_WMARK_FREE_MEM_RATE,
+ NR_DAMOS_WMARK_METRICS,
+};
+
+/**
+ * struct damos_watermarks - Controls when a given scheme should be activated.
+ * @metric: Metric for the watermarks.
+ * @interval: Watermarks check time interval in microseconds.
+ * @high: High watermark.
+ * @mid: Middle watermark.
+ * @low: Low watermark.
+ *
+ * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active
+ * means DAMON does monitoring and applying the action of the scheme to
+ * appropriate memory regions. Else, DAMON checks &metric of the system for at
+ * least every &interval microseconds and works as below.
+ *
+ * If &metric is higher than &high, the scheme is inactivated. If &metric is
+ * between &mid and &low, the scheme is activated. If &metric is lower than
+ * &low, the scheme is inactivated.
+ */
+struct damos_watermarks {
+ enum damos_wmark_metric metric;
+ unsigned long interval;
+ unsigned long high;
+ unsigned long mid;
+ unsigned long low;
+
+/* private: */
+ bool activated;
+};
+
+/**
+ * struct damos_stat - Statistics on a given scheme.
+ * @nr_tried: Total number of regions that the scheme is tried to be applied.
+ * @sz_tried: Total size of regions that the scheme is tried to be applied.
+ * @nr_applied: Total number of regions that the scheme is applied.
+ * @sz_applied: Total size of regions that the scheme is applied.
+ * @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ */
+struct damos_stat {
+ unsigned long nr_tried;
+ unsigned long sz_tried;
+ unsigned long nr_applied;
+ unsigned long sz_applied;
+ unsigned long qt_exceeds;
+};
+
+/**
+ * enum damos_filter_type - Type of memory for &struct damos_filter
+ * @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
+ * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
+ * @DAMOS_FILTER_TYPE_ADDR: Address range.
+ * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target.
+ * @NR_DAMOS_FILTER_TYPES: Number of filter types.
+ *
+ * The anon pages type and memcg type filters are handled by underlying
+ * &struct damon_operations as a part of scheme action trying, and therefore
+ * accounted as 'tried'. In contrast, other types are handled by core layer
+ * before trying of the action and therefore not accounted as 'tried'.
+ *
+ * The support of the filters that handled by &struct damon_operations depend
+ * on the running &struct damon_operations.
+ * &enum DAMON_OPS_PADDR supports both anon pages type and memcg type filters,
+ * while &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR don't support any of
+ * the two types.
+ */
+enum damos_filter_type {
+ DAMOS_FILTER_TYPE_ANON,
+ DAMOS_FILTER_TYPE_MEMCG,
+ DAMOS_FILTER_TYPE_ADDR,
+ DAMOS_FILTER_TYPE_TARGET,
+ NR_DAMOS_FILTER_TYPES,
+};
+
+/**
+ * struct damos_filter - DAMOS action target memory filter.
+ * @type: Type of the page.
+ * @matching: If the matching page should filtered out or in.
+ * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
+ * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR.
+ * @target_idx: Index of the &struct damon_target of
+ * &damon_ctx->adaptive_targets if @type is
+ * DAMOS_FILTER_TYPE_TARGET.
+ * @list: List head for siblings.
+ *
+ * Before applying the &damos->action to a memory region, DAMOS checks if each
+ * page of the region matches to this and avoid applying the action if so.
+ * Support of each filter type depends on the running &struct damon_operations
+ * and the type. Refer to &enum damos_filter_type for more detai.
+ */
+struct damos_filter {
+ enum damos_filter_type type;
+ bool matching;
+ union {
+ unsigned short memcg_id;
+ struct damon_addr_range addr_range;
+ int target_idx;
+ };
+ struct list_head list;
+};
+
+/**
+ * struct damos_access_pattern - Target access pattern of the given scheme.
+ * @min_sz_region: Minimum size of target regions.
+ * @max_sz_region: Maximum size of target regions.
+ * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
+ * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
+ * @min_age_region: Minimum age of target regions.
+ * @max_age_region: Maximum age of target regions.
+ */
+struct damos_access_pattern {
+ unsigned long min_sz_region;
+ unsigned long max_sz_region;
+ unsigned int min_nr_accesses;
+ unsigned int max_nr_accesses;
+ unsigned int min_age_region;
+ unsigned int max_age_region;
+};
+
+/**
+ * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * @pattern: Access pattern of target regions.
+ * @action: &damo_action to be applied to the target regions.
+ * @apply_interval_us: The time between applying the @action.
+ * @quota: Control the aggressiveness of this scheme.
+ * @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @filters: Additional set of &struct damos_filter for &action.
+ * @stat: Statistics of this scheme.
+ * @list: List head for siblings.
+ *
+ * For each @apply_interval_us, DAMON finds regions which fit in the
+ * &pattern and applies &action to those. To avoid consuming too much
+ * CPU time or IO resources for the &action, &quota is used.
+ *
+ * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead.
+ *
+ * To do the work only when needed, schemes can be activated for specific
+ * system situations using &wmarks. If all schemes that registered to the
+ * monitoring context are inactive, DAMON stops monitoring either, and just
+ * repeatedly checks the watermarks.
+ *
+ * Before applying the &action to a memory region, &struct damon_operations
+ * implementation could check pages of the region and skip &action to respect
+ * &filters
+ *
+ * After applying the &action to each region, &stat_count and &stat_sz is
+ * updated to reflect the number of regions and total size of regions that the
+ * &action is applied.
+ */
+struct damos {
+ struct damos_access_pattern pattern;
+ enum damos_action action;
+ unsigned long apply_interval_us;
+/* private: internal use only */
+ /*
+ * number of sample intervals that should be passed before applying
+ * @action
+ */
+ unsigned long next_apply_sis;
+/* public: */
+ struct damos_quota quota;
+ struct damos_watermarks wmarks;
+ struct list_head filters;
+ struct damos_stat stat;
+ struct list_head list;
+};
+
+/**
+ * enum damon_ops_id - Identifier for each monitoring operations implementation
+ *
+ * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces
+ * @DAMON_OPS_FVADDR: Monitoring operations for only fixed ranges of virtual
+ * address spaces
+ * @DAMON_OPS_PADDR: Monitoring operations for the physical address space
+ * @NR_DAMON_OPS: Number of monitoring operations implementations
+ */
+enum damon_ops_id {
+ DAMON_OPS_VADDR,
+ DAMON_OPS_FVADDR,
+ DAMON_OPS_PADDR,
+ NR_DAMON_OPS,
+};
+
+struct damon_ctx;
+
+/**
+ * struct damon_operations - Monitoring operations for given use cases.
+ *
+ * @id: Identifier of this operations set.
+ * @init: Initialize operations-related data structures.
+ * @update: Update operations-related data structures.
+ * @prepare_access_checks: Prepare next access check of target regions.
+ * @check_accesses: Check the accesses to target regions.
+ * @reset_aggregated: Reset aggregated accesses monitoring results.
+ * @get_scheme_score: Get the score of a region for a scheme.
+ * @apply_scheme: Apply a DAMON-based operation scheme.
+ * @target_valid: Determine if the target is valid.
+ * @cleanup: Clean up the context.
+ *
+ * DAMON can be extended for various address spaces and usages. For this,
+ * users should register the low level operations for their target address
+ * space and usecase via the &damon_ctx.ops. Then, the monitoring thread
+ * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
+ * the monitoring, @update after each &damon_attrs.ops_update_interval, and
+ * @check_accesses, @target_valid and @prepare_access_checks after each
+ * &damon_attrs.sample_interval. Finally, @reset_aggregated is called after
+ * each &damon_attrs.aggr_interval.
+ *
+ * Each &struct damon_operations instance having valid @id can be registered
+ * via damon_register_ops() and selected by damon_select_ops() later.
+ * @init should initialize operations-related data structures. For example,
+ * this could be used to construct proper monitoring target regions and link
+ * those to @damon_ctx.adaptive_targets.
+ * @update should update the operations-related data structures. For example,
+ * this could be used to update monitoring target regions for current status.
+ * @prepare_access_checks should manipulate the monitoring regions to be
+ * prepared for the next access check.
+ * @check_accesses should check the accesses to each region that made after the
+ * last preparation and update the number of observed accesses of each region.
+ * It should also return max number of observed accesses that made as a result
+ * of its update. The value will be used for regions adjustment threshold.
+ * @reset_aggregated should reset the access monitoring results that aggregated
+ * by @check_accesses.
+ * @get_scheme_score should return the priority score of a region for a scheme
+ * as an integer in [0, &DAMOS_MAX_SCORE].
+ * @apply_scheme is called from @kdamond when a region for user provided
+ * DAMON-based operation scheme is found. It should apply the scheme's action
+ * to the region and return bytes of the region that the action is successfully
+ * applied.
+ * @target_valid should check whether the target is still valid for the
+ * monitoring.
+ * @cleanup is called from @kdamond just before its termination.
+ */
+struct damon_operations {
+ enum damon_ops_id id;
+ void (*init)(struct damon_ctx *context);
+ void (*update)(struct damon_ctx *context);
+ void (*prepare_access_checks)(struct damon_ctx *context);
+ unsigned int (*check_accesses)(struct damon_ctx *context);
+ void (*reset_aggregated)(struct damon_ctx *context);
+ int (*get_scheme_score)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ unsigned long (*apply_scheme)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ bool (*target_valid)(struct damon_target *t);
+ void (*cleanup)(struct damon_ctx *context);
+};
+
+/**
+ * struct damon_callback - Monitoring events notification callbacks.
+ *
+ * @before_start: Called before starting the monitoring.
+ * @after_wmarks_check: Called after each schemes' watermarks check.
+ * @after_sampling: Called after each sampling.
+ * @after_aggregation: Called after each aggregation.
+ * @before_damos_apply: Called before applying DAMOS action.
+ * @before_terminate: Called before terminating the monitoring.
+ * @private: User private data.
+ *
+ * The monitoring thread (&damon_ctx.kdamond) calls @before_start and
+ * @before_terminate just before starting and finishing the monitoring,
+ * respectively. Therefore, those are good places for installing and cleaning
+ * @private.
+ *
+ * The monitoring thread calls @after_wmarks_check after each DAMON-based
+ * operation schemes' watermarks check. If users need to make changes to the
+ * attributes of the monitoring context while it's deactivated due to the
+ * watermarks, this is the good place to do.
+ *
+ * The monitoring thread calls @after_sampling and @after_aggregation for each
+ * of the sampling intervals and aggregation intervals, respectively.
+ * Therefore, users can safely access the monitoring results without additional
+ * protection. For the reason, users are recommended to use these callback for
+ * the accesses to the results.
+ *
+ * If any callback returns non-zero, monitoring stops.
+ */
+struct damon_callback {
+ void *private;
+
+ int (*before_start)(struct damon_ctx *context);
+ int (*after_wmarks_check)(struct damon_ctx *context);
+ int (*after_sampling)(struct damon_ctx *context);
+ int (*after_aggregation)(struct damon_ctx *context);
+ int (*before_damos_apply)(struct damon_ctx *context,
+ struct damon_target *target,
+ struct damon_region *region,
+ struct damos *scheme);
+ void (*before_terminate)(struct damon_ctx *context);
+};
+
+/**
+ * struct damon_attrs - Monitoring attributes for accuracy/overhead control.
+ *
+ * @sample_interval: The time between access samplings.
+ * @aggr_interval: The time between monitor results aggregations.
+ * @ops_update_interval: The time between monitoring operations updates.
+ * @min_nr_regions: The minimum number of adaptive monitoring
+ * regions.
+ * @max_nr_regions: The maximum number of adaptive monitoring
+ * regions.
+ *
+ * For each @sample_interval, DAMON checks whether each region is accessed or
+ * not during the last @sample_interval. If such access is found, DAMON
+ * aggregates the information by increasing &damon_region->nr_accesses for
+ * @aggr_interval time. For each @aggr_interval, the count is reset. DAMON
+ * also checks whether the target memory regions need update (e.g., by
+ * ``mmap()`` calls from the application, in case of virtual memory monitoring)
+ * and applies the changes for each @ops_update_interval. All time intervals
+ * are in micro-seconds. Please refer to &struct damon_operations and &struct
+ * damon_callback for more detail.
+ */
+struct damon_attrs {
+ unsigned long sample_interval;
+ unsigned long aggr_interval;
+ unsigned long ops_update_interval;
+ unsigned long min_nr_regions;
+ unsigned long max_nr_regions;
+};
+
+/**
+ * struct damon_ctx - Represents a context for each monitoring. This is the
+ * main interface that allows users to set the attributes and get the results
+ * of the monitoring.
+ *
+ * @attrs: Monitoring attributes for accuracy/overhead control.
+ * @kdamond: Kernel thread who does the monitoring.
+ * @kdamond_lock: Mutex for the synchronizations with @kdamond.
+ *
+ * For each monitoring context, one kernel thread for the monitoring is
+ * created. The pointer to the thread is stored in @kdamond.
+ *
+ * Once started, the monitoring thread runs until explicitly required to be
+ * terminated or every monitoring target is invalid. The validity of the
+ * targets is checked via the &damon_operations.target_valid of @ops. The
+ * termination can also be explicitly requested by calling damon_stop().
+ * The thread sets @kdamond to NULL when it terminates. Therefore, users can
+ * know whether the monitoring is ongoing or terminated by reading @kdamond.
+ * Reads and writes to @kdamond from outside of the monitoring thread must
+ * be protected by @kdamond_lock.
+ *
+ * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
+ * Accesses to other fields must be protected by themselves.
+ *
+ * @ops: Set of monitoring operations for given use cases.
+ * @callback: Set of callbacks for monitoring events notifications.
+ *
+ * @adaptive_targets: Head of monitoring targets (&damon_target) list.
+ * @schemes: Head of schemes (&damos) list.
+ */
+struct damon_ctx {
+ struct damon_attrs attrs;
+
+/* private: internal use only */
+ /* number of sample intervals that passed since this context started */
+ unsigned long passed_sample_intervals;
+ /*
+ * number of sample intervals that should be passed before next
+ * aggregation
+ */
+ unsigned long next_aggregation_sis;
+ /*
+ * number of sample intervals that should be passed before next ops
+ * update
+ */
+ unsigned long next_ops_update_sis;
+ /* for waiting until the execution of the kdamond_fn is started */
+ struct completion kdamond_started;
+
+/* public: */
+ struct task_struct *kdamond;
+ struct mutex kdamond_lock;
+
+ struct damon_operations ops;
+ struct damon_callback callback;
+
+ struct list_head adaptive_targets;
+ struct list_head schemes;
+};
+
+static inline struct damon_region *damon_next_region(struct damon_region *r)
+{
+ return container_of(r->list.next, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_prev_region(struct damon_region *r)
+{
+ return container_of(r->list.prev, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_last_region(struct damon_target *t)
+{
+ return list_last_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_first_region(struct damon_target *t)
+{
+ return list_first_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline unsigned long damon_sz_region(struct damon_region *r)
+{
+ return r->ar.end - r->ar.start;
+}
+
+
+#define damon_for_each_region(r, t) \
+ list_for_each_entry(r, &t->regions_list, list)
+
+#define damon_for_each_region_from(r, t) \
+ list_for_each_entry_from(r, &t->regions_list, list)
+
+#define damon_for_each_region_safe(r, next, t) \
+ list_for_each_entry_safe(r, next, &t->regions_list, list)
+
+#define damon_for_each_target(t, ctx) \
+ list_for_each_entry(t, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_target_safe(t, next, ctx) \
+ list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_scheme(s, ctx) \
+ list_for_each_entry(s, &(ctx)->schemes, list)
+
+#define damon_for_each_scheme_safe(s, next, ctx) \
+ list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
+
+#define damos_for_each_quota_goal(goal, quota) \
+ list_for_each_entry(goal, &quota->goals, list)
+
+#define damos_for_each_quota_goal_safe(goal, next, quota) \
+ list_for_each_entry_safe(goal, next, &(quota)->goals, list)
+
+#define damos_for_each_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->filters, list)
+
+#define damos_for_each_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->filters, list)
+
+#ifdef CONFIG_DAMON
+
+struct damon_region *damon_new_region(unsigned long start, unsigned long end);
+
+/*
+ * Add a region between two other regions
+ */
+static inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next,
+ struct damon_target *t)
+{
+ __list_add(&r->list, &prev->list, &next->list);
+ t->nr_regions++;
+}
+
+void damon_add_region(struct damon_region *r, struct damon_target *t);
+void damon_destroy_region(struct damon_region *r, struct damon_target *t);
+int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
+ unsigned int nr_ranges);
+void damon_update_region_access_rate(struct damon_region *r, bool accessed,
+ struct damon_attrs *attrs);
+
+struct damos_filter *damos_new_filter(enum damos_filter_type type,
+ bool matching);
+void damos_add_filter(struct damos *s, struct damos_filter *f);
+void damos_destroy_filter(struct damos_filter *f);
+
+struct damos_quota_goal *damos_new_quota_goal(
+ enum damos_quota_goal_metric metric,
+ unsigned long target_value);
+void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g);
+void damos_destroy_quota_goal(struct damos_quota_goal *goal);
+
+struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+ enum damos_action action,
+ unsigned long apply_interval_us,
+ struct damos_quota *quota,
+ struct damos_watermarks *wmarks);
+void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
+void damon_destroy_scheme(struct damos *s);
+
+struct damon_target *damon_new_target(void);
+void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
+bool damon_targets_empty(struct damon_ctx *ctx);
+void damon_free_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t);
+unsigned int damon_nr_regions(struct damon_target *t);
+
+struct damon_ctx *damon_new_ctx(void);
+void damon_destroy_ctx(struct damon_ctx *ctx);
+int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
+void damon_set_schemes(struct damon_ctx *ctx,
+ struct damos **schemes, ssize_t nr_schemes);
+int damon_nr_running_ctxs(void);
+bool damon_is_registered_ops(enum damon_ops_id id);
+int damon_register_ops(struct damon_operations *ops);
+int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
+
+static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+{
+ return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+}
+
+static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
+{
+ /* {aggr,sample}_interval are unsigned long, hence could overflow */
+ return min(attrs->aggr_interval / attrs->sample_interval,
+ (unsigned long)UINT_MAX);
+}
+
+
+int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+
+int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end);
+
+#endif /* CONFIG_DAMON */
+
+#endif /* _DAMON_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 6904d4e0b2e0..9d3e3327af4c 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -6,14 +6,19 @@
#include <linux/mm.h>
#include <linux/radix-tree.h>
-/* Flag for synchronous flush */
-#define DAXDEV_F_SYNC (1UL << 0)
-
typedef unsigned long dax_entry_t;
+struct dax_device;
+struct gendisk;
struct iomap_ops;
+struct iomap_iter;
struct iomap;
-struct dax_device;
+
+enum dax_access_mode {
+ DAX_ACCESS,
+ DAX_RECOVERY_WRITE,
+};
+
struct dax_operations {
/*
* direct_access: translate a device-relative
@@ -21,43 +26,48 @@ struct dax_operations {
* number of pages available for DAX at that pfn.
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
- void **, pfn_t *);
+ enum dax_access_mode, void **, pfn_t *);
/*
* Validate whether this device is usable as an fsdax backing
* device.
*/
bool (*dax_supported)(struct dax_device *, struct block_device *, int,
sector_t, sector_t);
- /* copy_from_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
- /* copy_to_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
+ /*
+ * recovery_write: recover a poisoned range by DAX device driver
+ * capable of clearing poison.
+ */
+ size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *iter);
};
-extern struct attribute_group dax_attribute_group;
+struct dax_holder_operations {
+ /*
+ * notify_failure - notify memory failure into inner holder device
+ * @dax_dev: the dax device which contains the holder
+ * @offset: offset on this dax device where memory failure occurs
+ * @len: length of this memory failure event
+ * @flags: action flags for memory failure handler
+ */
+ int (*notify_failure)(struct dax_device *dax_dev, u64 offset,
+ u64 len, int mf_flags);
+};
#if IS_ENABLED(CONFIG_DAX)
-struct dax_device *dax_get_by_host(const char *host);
-struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops, unsigned long flags);
+struct dax_device *alloc_dax(void *private, const struct dax_operations *ops);
+void *dax_holder(struct dax_device *dax_dev);
void put_dax(struct dax_device *dax_dev);
void kill_dax(struct dax_device *dax_dev);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
-bool __dax_synchronous(struct dax_device *dax_dev);
-static inline bool dax_synchronous(struct dax_device *dax_dev)
-{
- return __dax_synchronous(dax_dev);
-}
-void __set_dax_synchronous(struct dax_device *dax_dev);
-static inline void set_dax_synchronous(struct dax_device *dax_dev)
-{
- __set_dax_synchronous(dax_dev);
-}
+bool dax_synchronous(struct dax_device *dax_dev);
+void set_dax_nocache(struct dax_device *dax_dev);
+void set_dax_nomc(struct dax_device *dax_dev);
+void set_dax_synchronous(struct dax_device *dax_dev);
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
/*
* Check if given mapping is supported by the file / underlying device.
*/
@@ -71,18 +81,14 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
return dax_synchronous(dax_dev);
}
#else
-static inline struct dax_device *dax_get_by_host(const char *host)
+static inline void *dax_holder(struct dax_device *dax_dev)
{
return NULL;
}
-static inline struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops, unsigned long flags)
+static inline struct dax_device *alloc_dax(void *private,
+ const struct dax_operations *ops)
{
- /*
- * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
- * NULL is an error or expected.
- */
- return NULL;
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void put_dax(struct dax_device *dax_dev)
{
@@ -101,6 +107,12 @@ static inline bool dax_synchronous(struct dax_device *dax_dev)
{
return true;
}
+static inline void set_dax_nocache(struct dax_device *dax_dev)
+{
+}
+static inline void set_dax_nomc(struct dax_device *dax_dev)
+{
+}
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
}
@@ -109,64 +121,58 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
{
return !(vma->vm_flags & VM_SYNC);
}
+static inline size_t dax_recovery_write(struct dax_device *dax_dev,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+ return 0;
+}
#endif
struct writeback_control;
-int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
-#if IS_ENABLED(CONFIG_FS_DAX)
-bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
-static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
+#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
+int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
+void dax_remove_host(struct gendisk *disk);
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops);
+void fs_put_dax(struct dax_device *dax_dev, void *holder);
+#else
+static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
{
- return __bdev_dax_supported(bdev, blocksize);
+ return 0;
}
-
-bool __generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors);
-static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
+static inline void dax_remove_host(struct gendisk *disk)
{
- return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
- sectors);
}
-
-static inline void fs_put_dax(struct dax_device *dax_dev)
+static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
+ u64 *start_off, void *holder,
+ const struct dax_holder_operations *ops)
+{
+ return NULL;
+}
+static inline void fs_put_dax(struct dax_device *dax_dev, void *holder)
{
- put_dax(dax_dev);
}
+#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
+#if IS_ENABLED(CONFIG_FS_DAX)
int dax_writeback_mapping_range(struct address_space *mapping,
struct dax_device *dax_dev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
-dax_entry_t dax_lock_page(struct page *page);
-void dax_unlock_page(struct page *page, dax_entry_t cookie);
+struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
+dax_entry_t dax_lock_folio(struct folio *folio);
+void dax_unlock_folio(struct folio *folio, dax_entry_t cookie);
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page);
+void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie);
#else
-static inline bool bdev_dax_supported(struct block_device *bdev,
- int blocksize)
-{
- return false;
-}
-
-static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
-{
- return false;
-}
-
-static inline void fs_put_dax(struct dax_device *dax_dev)
-{
-}
-
-static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
+static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
return NULL;
}
-static inline struct page *dax_layout_busy_page(struct address_space *mapping)
+static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
{
return NULL;
}
@@ -177,48 +183,107 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP;
}
-static inline dax_entry_t dax_lock_page(struct page *page)
+static inline dax_entry_t dax_lock_folio(struct folio *folio)
{
- if (IS_DAX(page->mapping->host))
+ if (IS_DAX(folio->mapping->host))
return ~0UL;
return 0;
}
-static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
+static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
+{
+}
+
+static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page)
+{
+ return 0;
+}
+
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie)
{
}
#endif
+int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+ const struct iomap_ops *ops);
+int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ const struct iomap_ops *ops);
+int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ const struct iomap_ops *ops);
+
+#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
+#else
+static inline int dax_read_lock(void)
+{
+ return 0;
+}
+
+static inline void dax_read_unlock(int id)
+{
+}
+#endif /* CONFIG_DAX */
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- void **kaddr, pfn_t *pfn);
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len);
+ enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len,
+ int mf_flags);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size, pfn_t pfn);
+ unsigned int order, pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
-int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
- struct iomap *iomap);
+int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dest, loff_t destoff,
+ loff_t len, bool *is_same,
+ const struct iomap_ops *ops);
+int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *ops);
static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
}
+/*
+ * Due to dax's memory and block duo personalities, hwpoison reporting
+ * takes into consideration which personality is presently visible.
+ * When dax acts like a block device, such as in block IO, an encounter of
+ * dax hwpoison is reported as -EIO.
+ * When dax acts like memory, such as in page fault, a detection of hwpoison
+ * is reported as -EHWPOISON which leads to VM_FAULT_HWPOISON.
+ */
+static inline int dax_mem2blk_err(int err)
+{
+ return (err == -EHWPOISON) ? -EIO : err;
+}
+
+#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
+void hmem_register_resource(int target_nid, struct resource *r);
+#else
+static inline void hmem_register_resource(int target_nid, struct resource *r)
+{
+}
+#endif
+
+typedef int (*walk_hmem_fn)(struct device *dev, int target_nid,
+ const struct resource *res);
+int walk_hmem_resources(struct device *dev, walk_hmem_fn fn);
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 65d975bf9390..bf53e3894aae 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/list.h>
+#include <linux/math.h>
#include <linux/rculist.h>
#include <linux/rculist_bl.h>
#include <linux/spinlock.h>
@@ -15,6 +16,7 @@
#include <linux/wait.h>
struct path;
+struct file;
struct vfsmount;
/*
@@ -58,16 +60,7 @@ struct qstr {
extern const struct qstr empty_name;
extern const struct qstr slash_name;
-
-struct dentry_stat_t {
- long nr_dentry;
- long nr_unused;
- long age_limit; /* age in seconds */
- long want_pages; /* pages requested by system */
- long nr_negative; /* # of unused negative dentries */
- long dummy; /* Reserved for future use */
-};
-extern struct dentry_stat_t dentry_stat;
+extern const struct qstr dotdot_name;
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
@@ -75,12 +68,12 @@ extern struct dentry_stat_t dentry_stat;
* large memory footprint increase).
*/
#ifdef CONFIG_64BIT
-# define DNAME_INLINE_LEN 32 /* 192 bytes */
+# define DNAME_INLINE_LEN 40 /* 192 bytes */
#else
# ifdef CONFIG_SMP
-# define DNAME_INLINE_LEN 36 /* 128 bytes */
-# else
# define DNAME_INLINE_LEN 40 /* 128 bytes */
+# else
+# define DNAME_INLINE_LEN 44 /* 128 bytes */
# endif
#endif
@@ -108,8 +101,8 @@ struct dentry {
struct list_head d_lru; /* LRU list */
wait_queue_head_t *d_wait; /* in-lookup ones only */
};
- struct list_head d_child; /* child of parent list */
- struct list_head d_subdirs; /* our children */
+ struct hlist_node d_sib; /* child of parent list */
+ struct hlist_head d_children; /* our children */
/*
* d_alias and d_rcu can share memory
*/
@@ -118,7 +111,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
-} __randomize_layout;
+};
/*
* dentry->d_lock spinlock nesting subclasses:
@@ -132,6 +125,11 @@ enum dentry_d_lock_class
DENTRY_D_LOCK_NESTED
};
+enum d_real_type {
+ D_REAL_DATA,
+ D_REAL_METADATA,
+};
+
struct dentry_operations {
int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
@@ -146,7 +144,7 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
- struct dentry *(*d_real)(struct dentry *, const struct inode *);
+ struct dentry *(*d_real)(struct dentry *, enum d_real_type type);
} ____cacheline_aligned;
/*
@@ -158,13 +156,13 @@ struct dentry_operations {
*/
/* d_flags entries */
-#define DCACHE_OP_HASH 0x00000001
-#define DCACHE_OP_COMPARE 0x00000002
-#define DCACHE_OP_REVALIDATE 0x00000004
-#define DCACHE_OP_DELETE 0x00000008
-#define DCACHE_OP_PRUNE 0x00000010
+#define DCACHE_OP_HASH BIT(0)
+#define DCACHE_OP_COMPARE BIT(1)
+#define DCACHE_OP_REVALIDATE BIT(2)
+#define DCACHE_OP_DELETE BIT(3)
+#define DCACHE_OP_PRUNE BIT(4)
-#define DCACHE_DISCONNECTED 0x00000020
+#define DCACHE_DISCONNECTED BIT(5)
/* This dentry is possibly not currently connected to the dcache tree, in
* which case its parent will either be itself, or will have this flag as
* well. nfsd will not use a dentry with this bit set, but will first
@@ -175,50 +173,47 @@ struct dentry_operations {
* dentry into place and return that dentry rather than the passed one,
* typically using d_splice_alias. */
-#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
+#define DCACHE_REFERENCED BIT(6) /* Recently used, don't discard. */
-#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */
+#define DCACHE_DONTCACHE BIT(7) /* Purge from memory on final dput() */
-#define DCACHE_CANT_MOUNT 0x00000100
-#define DCACHE_GENOCIDE 0x00000200
-#define DCACHE_SHRINK_LIST 0x00000400
+#define DCACHE_CANT_MOUNT BIT(8)
+#define DCACHE_GENOCIDE BIT(9)
+#define DCACHE_SHRINK_LIST BIT(10)
-#define DCACHE_OP_WEAK_REVALIDATE 0x00000800
+#define DCACHE_OP_WEAK_REVALIDATE BIT(11)
-#define DCACHE_NFSFS_RENAMED 0x00001000
+#define DCACHE_NFSFS_RENAMED BIT(12)
/* this dentry has been "silly renamed" and has to be deleted on the last
* dput() */
-#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000
+#define DCACHE_FSNOTIFY_PARENT_WATCHED BIT(14)
/* Parent inode is watched by some fsnotify listener */
-#define DCACHE_DENTRY_KILLED 0x00008000
+#define DCACHE_DENTRY_KILLED BIT(15)
-#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */
+#define DCACHE_MOUNTED BIT(16) /* is a mountpoint */
+#define DCACHE_NEED_AUTOMOUNT BIT(17) /* handle automount on this dir */
+#define DCACHE_MANAGE_TRANSIT BIT(18) /* manage transit from this dirent */
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
-#define DCACHE_LRU_LIST 0x00080000
+#define DCACHE_LRU_LIST BIT(19)
-#define DCACHE_ENTRY_TYPE 0x00700000
-#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
-#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */
-#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */
-#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */
-#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */
-#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */
+#define DCACHE_ENTRY_TYPE (7 << 20) /* bits 20..22 are for storing type: */
+#define DCACHE_MISS_TYPE (0 << 20) /* Negative dentry */
+#define DCACHE_WHITEOUT_TYPE (1 << 20) /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE (2 << 20) /* Normal directory */
+#define DCACHE_AUTODIR_TYPE (3 << 20) /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE (4 << 20) /* Regular file type */
+#define DCACHE_SPECIAL_TYPE (5 << 20) /* Other file type */
+#define DCACHE_SYMLINK_TYPE (6 << 20) /* Symlink */
-#define DCACHE_MAY_FREE 0x00800000
-#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_ENCRYPTED_NAME 0x02000000 /* Encrypted name (dir key was unavailable) */
-#define DCACHE_OP_REAL 0x04000000
+#define DCACHE_NOKEY_NAME BIT(25) /* Encrypted name encoded without key */
+#define DCACHE_OP_REAL BIT(26)
-#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
-#define DCACHE_DENTRY_CURSOR 0x20000000
-#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
+#define DCACHE_PAR_LOOKUP BIT(28) /* being looked up (with parent locked shared) */
+#define DCACHE_DENTRY_CURSOR BIT(29)
+#define DCACHE_NORCU BIT(30) /* No RCU delay for freeing */
extern seqlock_t rename_lock;
@@ -227,8 +222,6 @@ extern seqlock_t rename_lock;
*/
extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
-extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
-extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
@@ -241,26 +234,27 @@ extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
+ const struct qstr *name);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
-extern void shrink_dcache_for_umount(struct super_block *);
extern void d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_make_root(struct inode *);
-/* <clickety>-<click> the ramfs-type tree */
-extern void d_genocide(struct dentry *);
-
-extern void d_tmpfile(struct dentry *, struct inode *);
+extern void d_mark_tmpfile(struct file *, struct inode *);
+extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
+extern struct dentry *d_find_alias_rcu(struct inode *);
+
/* test whether we have any submounts in a subdir tree */
extern int path_has_submounts(const struct path *);
@@ -276,12 +270,8 @@ extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
-/* appendix may either be NULL or be used for transname suffixes */
extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
-extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
- const struct qstr *name, unsigned *seq);
static inline unsigned d_count(const struct dentry *dentry)
{
@@ -291,32 +281,52 @@ static inline unsigned d_count(const struct dentry *dentry)
/*
* helper function for dentry_operations.d_dname() members
*/
-extern __printf(4, 5)
-char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern __printf(3, 4)
+char *dynamic_dname(char *, int, const char *, ...);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
-extern char *dentry_path_raw(struct dentry *, char *, int);
-extern char *dentry_path(struct dentry *, char *, int);
+extern char *dentry_path_raw(const struct dentry *, char *, int);
+extern char *dentry_path(const struct dentry *, char *, int);
/* Allocation counts.. */
/**
- * dget, dget_dlock - get a reference to a dentry
- * @dentry: dentry to get a reference to
+ * dget_dlock - get a reference to a dentry
+ * @dentry: dentry to get a reference to
*
- * Given a dentry or %NULL pointer increment the reference count
- * if appropriate and return the dentry. A dentry will not be
- * destroyed when it has references.
+ * Given a live dentry, increment the reference count and return the dentry.
+ * Caller must hold @dentry->d_lock. Making sure that dentry is alive is
+ * caller's resonsibility. There are many conditions sufficient to guarantee
+ * that; e.g. anything with non-negative refcount is alive, so's anything
+ * hashed, anything positive, anyone's parent, etc.
*/
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
- if (dentry)
- dentry->d_lockref.count++;
+ dentry->d_lockref.count++;
return dentry;
}
+
+/**
+ * dget - get a reference to a dentry
+ * @dentry: dentry to get a reference to
+ *
+ * Given a dentry or %NULL pointer increment the reference count
+ * if appropriate and return the dentry. A dentry will not be
+ * destroyed when it has references. Conversely, a dentry with
+ * no references can disappear for any number of reasons, starting
+ * with memory pressure. In other words, that primitive is
+ * used to clone an existing reference; using it on something with
+ * zero refcount is a bug.
+ *
+ * NOTE: it will spin if @dentry->d_lock is held. From the deadlock
+ * avoidance point of view it is equivalent to spin_lock()/increment
+ * refcount/spin_unlock(), so calling it under @dentry->d_lock is
+ * always a bug; so's calling it under ->d_lock on any of its descendents.
+ *
+ */
static inline struct dentry *dget(struct dentry *dentry)
{
if (dentry)
@@ -327,12 +337,11 @@ static inline struct dentry *dget(struct dentry *dentry)
extern struct dentry *dget_parent(struct dentry *dentry);
/**
- * d_unhashed - is dentry hashed
- * @dentry: entry to check
+ * d_unhashed - is dentry hashed
+ * @dentry: entry to check
*
- * Returns true if the dentry passed is not currently hashed.
+ * Returns true if the dentry passed is not currently hashed.
*/
-
static inline int d_unhashed(const struct dentry *dentry)
{
return hlist_bl_unhashed(&dentry->d_hash);
@@ -355,7 +364,7 @@ static inline void dont_mount(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
-extern void __d_lookup_done(struct dentry *);
+extern void __d_lookup_unhash_wake(struct dentry *dentry);
static inline int d_in_lookup(const struct dentry *dentry)
{
@@ -364,11 +373,8 @@ static inline int d_in_lookup(const struct dentry *dentry)
static inline void d_lookup_done(struct dentry *dentry)
{
- if (unlikely(d_in_lookup(dentry))) {
- spin_lock(&dentry->d_lock);
- __d_lookup_done(dentry);
- spin_unlock(&dentry->d_lock);
- }
+ if (unlikely(d_in_lookup(dentry)))
+ __d_lookup_unhash_wake(dentry);
}
extern void dput(struct dentry *);
@@ -495,14 +501,6 @@ static inline int simple_positive(const struct dentry *dentry)
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
-extern void d_set_fallthru(struct dentry *dentry);
-
-static inline bool d_is_fallthru(const struct dentry *dentry)
-{
- return dentry->d_flags & DCACHE_FALLTHRU;
-}
-
-
extern int sysctl_vfs_cache_pressure;
static inline unsigned long vfs_pressure_ratio(unsigned long val)
@@ -552,41 +550,25 @@ static inline struct inode *d_backing_inode(const struct dentry *upper)
}
/**
- * d_backing_dentry - Get upper or lower dentry we should be using
- * @upper: The upper layer
- *
- * This is the helper that should be used to get the dentry of the inode that
- * will be used if this dentry were opened as a file. It may be the upper
- * dentry or it may be a lower dentry pinned by the upper.
- *
- * Normal filesystems should not use this to access their own dentries.
- */
-static inline struct dentry *d_backing_dentry(struct dentry *upper)
-{
- return upper;
-}
-
-/**
* d_real - Return the real dentry
* @dentry: the dentry to query
- * @inode: inode to select the dentry from multiple layers (can be NULL)
+ * @type: the type of real dentry (data or metadata)
*
* If dentry is on a union/overlay, then return the underlying, real dentry.
* Otherwise return the dentry itself.
*
* See also: Documentation/filesystems/vfs.rst
*/
-static inline struct dentry *d_real(struct dentry *dentry,
- const struct inode *inode)
+static inline struct dentry *d_real(struct dentry *dentry, enum d_real_type type)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
- return dentry->d_op->d_real(dentry, inode);
+ return dentry->d_op->d_real(dentry, type);
else
return dentry;
}
/**
- * d_real_inode - Return the real inode
+ * d_real_inode - Return the real inode hosting the data
* @dentry: The dentry to query
*
* If dentry is on a union/overlay, then return the underlying, real inode.
@@ -595,7 +577,7 @@ static inline struct dentry *d_real(struct dentry *dentry,
static inline struct inode *d_real_inode(const struct dentry *dentry)
{
/* This usage of d_real() results in const dentry */
- return d_backing_inode(d_real((struct dentry *) dentry, NULL));
+ return d_inode(d_real((struct dentry *) dentry, D_REAL_DATA));
}
struct name_snapshot {
@@ -605,4 +587,14 @@ struct name_snapshot {
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
+static inline struct dentry *d_first_child(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_children.first, struct dentry, d_sib);
+}
+
+static inline struct dentry *d_next_sibling(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib);
+}
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 07e547c02fd8..325af611909f 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -305,10 +305,8 @@ struct dccp_sock {
struct timer_list dccps_xmit_timer;
};
-static inline struct dccp_sock *dccp_sk(const struct sock *sk)
-{
- return (struct dccp_sock *)sk;
-}
+#define dccp_sk(ptr) container_of_const(ptr, struct dccp_sock, \
+ dccps_inet_connection.icsk_inet.sk)
static inline const char *dccp_role(const struct sock *sk)
{
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
deleted file mode 100644
index ddfdac20cad0..000000000000
--- a/include/linux/dcookies.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * dcookies.h
- *
- * Persistent cookie-path mappings
- *
- * Copyright 2002 John Levon <levon@movementarian.org>
- */
-
-#ifndef DCOOKIES_H
-#define DCOOKIES_H
-
-
-#ifdef CONFIG_PROFILING
-
-#include <linux/dcache.h>
-#include <linux/types.h>
-
-struct dcookie_user;
-struct path;
-
-/**
- * dcookie_register - register a user of dcookies
- *
- * Register as a dcookie user. Returns %NULL on failure.
- */
-struct dcookie_user * dcookie_register(void);
-
-/**
- * dcookie_unregister - unregister a user of dcookies
- *
- * Unregister as a dcookie user. This may invalidate
- * any dcookie values returned from get_dcookie().
- */
-void dcookie_unregister(struct dcookie_user * user);
-
-/**
- * get_dcookie - acquire a dcookie
- *
- * Convert the given dentry/vfsmount pair into
- * a cookie value.
- *
- * Returns -EINVAL if no living task has registered as a
- * dcookie user.
- *
- * Returns 0 on success, with *cookie filled in
- */
-int get_dcookie(const struct path *path, unsigned long *cookie);
-
-#else
-
-static inline struct dcookie_user * dcookie_register(void)
-{
- return NULL;
-}
-
-static inline void dcookie_unregister(struct dcookie_user * user)
-{
- return;
-}
-
-static inline int get_dcookie(const struct path *path, unsigned long *cookie)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_PROFILING */
-
-#endif /* DCOOKIES_H */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index e7e45f0cc7da..dbb409d77d4f 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -2,9 +2,8 @@
#ifndef __LINUX_DEBUG_LOCKING_H
#define __LINUX_DEBUG_LOCKING_H
-#include <linux/kernel.h>
#include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/cache.h>
struct task_struct;
@@ -27,8 +26,10 @@ extern int debug_locks_off(void);
int __ret = 0; \
\
if (!oops_in_progress && unlikely(c)) { \
+ instrumentation_begin(); \
if (debug_locks_off() && !debug_locks_silent) \
WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
+ instrumentation_end(); \
__ret = 1; \
} \
__ret; \
@@ -46,8 +47,6 @@ extern int debug_locks_off(void);
# define locking_selftest() do { } while (0)
#endif
-struct task_struct;
-
#ifdef CONFIG_LOCKDEP
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 851dd1f9a8a5..c9c65b132c0f 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -45,7 +45,7 @@ struct debugfs_u32_array {
extern struct dentry *arch_debugfs_dir;
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -56,10 +56,16 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
- .write = debugfs_attr_write, \
+ .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
.llseek = no_llseek, \
}
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
#if defined(CONFIG_DEBUG_FS)
@@ -91,6 +97,8 @@ struct dentry *debugfs_create_automount(const char *name,
void debugfs_remove(struct dentry *dentry);
#define debugfs_remove_recursive debugfs_remove
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
int debugfs_file_get(struct dentry *dentry);
@@ -100,6 +108,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
@@ -112,8 +122,8 @@ void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent,
u32 *value);
void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
u64 *value);
-struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value);
+void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent,
+ unsigned long *value);
void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
u8 *value);
void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
@@ -126,8 +136,10 @@ void debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent, size_t *value);
void debugfs_create_atomic_t(const char *name, umode_t mode,
struct dentry *parent, atomic_t *value);
-struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, bool *value);
+void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent,
+ bool *value);
+void debugfs_create_str(const char *name, umode_t mode,
+ struct dentry *parent, char **value);
struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
@@ -144,10 +156,9 @@ void debugfs_create_u32_array(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_u32_array *array);
-struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
- struct dentry *parent,
- int (*read_fn)(struct seq_file *s,
- void *data));
+void debugfs_create_devm_seqfile(struct device *dev, const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s, void *data));
bool debugfs_initialized(void);
@@ -157,6 +168,28 @@ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos);
+ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+/**
+ * struct debugfs_cancellation - cancellation data
+ * @list: internal, for keeping track
+ * @cancel: callback to call
+ * @cancel_data: extra data for the callback to call
+ */
+struct debugfs_cancellation {
+ struct list_head list;
+ void (*cancel)(struct dentry *, void *);
+ void *cancel_data;
+};
+
+void __acquires(cancellation)
+debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+void __releases(cancellation)
+debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+
#else
#include <linux/err.h>
@@ -221,6 +254,10 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
+static inline void debugfs_lookup_and_remove(const char *name,
+ struct dentry *parent)
+{ }
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
static inline int debugfs_file_get(struct dentry *dentry)
@@ -244,6 +281,13 @@ static inline ssize_t debugfs_attr_write(struct file *file,
return -ENODEV;
}
+static inline ssize_t debugfs_attr_write_signed(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, char *new_name)
{
@@ -262,13 +306,9 @@ static inline void debugfs_create_u32(const char *name, umode_t mode,
static inline void debugfs_create_u64(const char *name, umode_t mode,
struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_ulong(const char *name,
- umode_t mode,
- struct dentry *parent,
- unsigned long *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value) { }
static inline void debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value) { }
@@ -291,12 +331,13 @@ static inline void debugfs_create_atomic_t(const char *name, umode_t mode,
atomic_t *value)
{ }
-static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent,
- bool *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent, bool *value) { }
+
+static inline void debugfs_create_str(const char *name, umode_t mode,
+ struct dentry *parent,
+ char **value)
+{ }
static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
@@ -327,13 +368,12 @@ static inline void debugfs_create_u32_array(const char *name, umode_t mode,
{
}
-static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
- const char *name,
- struct dentry *parent,
- int (*read_fn)(struct seq_file *s,
- void *data))
+static inline void debugfs_create_devm_seqfile(struct device *dev,
+ const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data))
{
- return ERR_PTR(-ENODEV);
}
static inline ssize_t debugfs_read_file_bool(struct file *file,
@@ -350,6 +390,13 @@ static inline ssize_t debugfs_write_file_bool(struct file *file,
return -ENODEV;
}
+static inline ssize_t debugfs_read_file_str(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
#endif
/**
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index afc416e5dcab..32444686b6ff 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -18,7 +18,7 @@ enum debug_obj_state {
struct debug_obj_descr;
/**
- * struct debug_obj - representaion of an tracked object
+ * struct debug_obj - representation of an tracked object
* @node: hlist node to link the object into the tracker list
* @state: tracked object state
* @astate: current active state
@@ -30,7 +30,7 @@ struct debug_obj {
enum debug_obj_state state;
unsigned int astate;
void *object;
- struct debug_obj_descr *descr;
+ const struct debug_obj_descr *descr;
};
/**
@@ -64,14 +64,14 @@ struct debug_obj_descr {
};
#ifdef CONFIG_DEBUG_OBJECTS
-extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_init (void *addr, const struct debug_obj_descr *descr);
extern void
-debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
-extern int debug_object_activate (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
-extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
+debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr);
+extern int debug_object_activate (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_destroy (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_free (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr);
/*
* Active state:
@@ -79,26 +79,26 @@ extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
* - Must return to 0 before deactivation.
*/
extern void
-debug_object_active_state(void *addr, struct debug_obj_descr *descr,
+debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next);
extern void debug_objects_early_init(void);
extern void debug_objects_mem_init(void);
#else
static inline void
-debug_object_init (void *addr, struct debug_obj_descr *descr) { }
+debug_object_init (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
+debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) { }
static inline int
-debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; }
+debug_object_activate (void *addr, const struct debug_obj_descr *descr) { return 0; }
static inline void
-debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
+debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
+debug_object_destroy (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_free (void *addr, struct debug_obj_descr *descr) { }
+debug_object_free (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
+debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) { }
static inline void debug_objects_early_init(void) { }
static inline void debug_objects_mem_init(void) { }
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 868e9eacd69e..ac862422df15 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -25,13 +25,21 @@
#define STATIC_RW_DATA static
#endif
+/*
+ * When an architecture needs to share the malloc()/free() implementation
+ * between compilation units, it needs to have non-local visibility.
+ */
+#ifndef MALLOC_VISIBLE
+#define MALLOC_VISIBLE static
+#endif
+
/* A trivial malloc implementation, adapted from
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
*/
STATIC_RW_DATA unsigned long malloc_ptr;
STATIC_RW_DATA int malloc_count;
-static void *malloc(int size)
+MALLOC_VISIBLE void *malloc(int size)
{
void *p;
@@ -40,7 +48,7 @@ static void *malloc(int size)
if (!malloc_ptr)
malloc_ptr = free_mem_ptr;
- malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+ malloc_ptr = (malloc_ptr + 7) & ~7; /* Align */
p = (void *)malloc_ptr;
malloc_ptr += size;
@@ -52,7 +60,7 @@ static void *malloc(int size)
return p;
}
-static void free(void *where)
+MALLOC_VISIBLE void free(void *where)
{
malloc_count--;
if (!malloc_count)
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 1d0e2ce6b6d9..ff9cda975e30 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -19,7 +19,8 @@
* https://lists.openwall.net/linux-kernel/2011/01/09/56
*/
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/sched.h>
extern unsigned long loops_per_jiffy;
@@ -55,10 +56,22 @@ static inline void ndelay(unsigned long x)
extern unsigned long lpj_fine;
void calibrate_delay(void);
+unsigned long calibrate_delay_is_known(void);
void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+ unsigned int state);
+
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+static inline void usleep_idle_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_IDLE);
+}
static inline void ssleep(unsigned int seconds)
{
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 2d3bdcccf5eb..6639f48dac36 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -9,18 +9,9 @@
#include <uapi/linux/taskstats.h>
-/*
- * Per-task flags relevant to delay accounting
- * maintained privately to avoid exhausting similar flags in sched.h:PF_*
- * Used to set current->delays->flags
- */
-#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
-#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */
-
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
raw_spinlock_t lock;
- unsigned int flags; /* Private per-task flags */
/* For each stat XXX, add following, aligned appropriately
*
@@ -37,13 +28,13 @@ struct task_delay_info {
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
- u64 blkio_start; /* Shared by blkio, swapin */
+ u64 blkio_start;
u64 blkio_delay; /* wait for sync block io completion */
- u64 swapin_delay; /* wait for swapin block io completion */
+ u64 swapin_start;
+ u64 swapin_delay; /* wait for swapin */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
- u32 swapin_count; /* total count of the number of swapin block */
- /* io operations performed */
+ u32 swapin_count; /* total count of swapin */
u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
@@ -51,48 +42,49 @@ struct task_delay_info {
u64 thrashing_start;
u64 thrashing_delay; /* wait for thrashing page */
+ u64 compact_start;
+ u64 compact_delay; /* wait for memory compact */
+
+ u64 wpcopy_start;
+ u64 wpcopy_delay; /* wait for write-protect copy */
+
+ u64 irq_delay; /* wait for IRQ/SOFTIRQ */
+
u32 freepages_count; /* total count of memory reclaim */
u32 thrashing_count; /* total count of thrash waits */
+ u32 compact_count; /* total count of memory compact */
+ u32 wpcopy_count; /* total count of write-protect copy */
+ u32 irq_count; /* total count of IRQ/SOFTIRQ */
};
#endif
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_TASK_DELAY_ACCT
+DECLARE_STATIC_KEY_FALSE(delayacct_key);
extern int delayacct_on; /* Delay accounting turned on/off */
extern struct kmem_cache *delayacct_cache;
extern void delayacct_init(void);
+
extern void __delayacct_tsk_init(struct task_struct *);
extern void __delayacct_tsk_exit(struct task_struct *);
extern void __delayacct_blkio_start(void);
extern void __delayacct_blkio_end(struct task_struct *);
-extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
+extern int delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
-extern void __delayacct_thrashing_start(void);
-extern void __delayacct_thrashing_end(void);
-
-static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
-{
- if (p->delays)
- return (p->delays->flags & DELAYACCT_PF_BLKIO);
- else
- return 0;
-}
-
-static inline void delayacct_set_flag(int flag)
-{
- if (current->delays)
- current->delays->flags |= flag;
-}
-
-static inline void delayacct_clear_flag(int flag)
-{
- if (current->delays)
- current->delays->flags &= ~flag;
-}
+extern void __delayacct_thrashing_start(bool *in_thrashing);
+extern void __delayacct_thrashing_end(bool *in_thrashing);
+extern void __delayacct_swapin_start(void);
+extern void __delayacct_swapin_end(void);
+extern void __delayacct_compact_start(void);
+extern void __delayacct_compact_end(void);
+extern void __delayacct_wpcopy_start(void);
+extern void __delayacct_wpcopy_end(void);
+extern void __delayacct_irq(struct task_struct *task, u32 delta);
static inline void delayacct_tsk_init(struct task_struct *tsk)
{
@@ -114,24 +106,20 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
static inline void delayacct_blkio_start(void)
{
- delayacct_set_flag(DELAYACCT_PF_BLKIO);
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_blkio_start();
}
static inline void delayacct_blkio_end(struct task_struct *p)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (p->delays)
__delayacct_blkio_end(p);
- delayacct_clear_flag(DELAYACCT_PF_BLKIO);
-}
-
-static inline int delayacct_add_tsk(struct taskstats *d,
- struct task_struct *tsk)
-{
- if (!delayacct_on || !tsk->delays)
- return 0;
- return __delayacct_add_tsk(d, tsk);
}
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
@@ -143,33 +131,104 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
static inline void delayacct_freepages_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_start();
}
static inline void delayacct_freepages_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_end();
}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_start(in_thrashing);
+}
+
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_end(in_thrashing);
+}
+
+static inline void delayacct_swapin_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_start();
+}
+
+static inline void delayacct_swapin_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
- __delayacct_thrashing_start();
+ __delayacct_swapin_end();
}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_compact_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_start();
+}
+
+static inline void delayacct_compact_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_end();
+}
+
+static inline void delayacct_wpcopy_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
- __delayacct_thrashing_end();
+ __delayacct_wpcopy_start();
+}
+
+static inline void delayacct_wpcopy_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_wpcopy_end();
+}
+
+static inline void delayacct_irq(struct task_struct *task, u32 delta)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (task->delays)
+ __delayacct_irq(task, delta);
}
#else
-static inline void delayacct_set_flag(int flag)
-{}
-static inline void delayacct_clear_flag(int flag)
-{}
static inline void delayacct_init(void)
{}
static inline void delayacct_tsk_init(struct task_struct *tsk)
@@ -191,9 +250,23 @@ static inline void delayacct_freepages_start(void)
{}
static inline void delayacct_freepages_end(void)
{}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{}
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{}
+static inline void delayacct_swapin_start(void)
+{}
+static inline void delayacct_swapin_end(void)
+{}
+static inline void delayacct_compact_start(void)
+{}
+static inline void delayacct_compact_end(void)
+{}
+static inline void delayacct_wpcopy_start(void)
+{}
+static inline void delayacct_wpcopy_end(void)
{}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_irq(struct task_struct *task, u32 delta)
{}
#endif /* CONFIG_TASK_DELAY_ACCT */
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 3028b644b4fb..6bfe70decc9f 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -21,6 +21,14 @@
struct device;
+#define PRINTK_INFO_SUBSYSTEM_LEN 16
+#define PRINTK_INFO_DEVICE_LEN 48
+
+struct dev_printk_info {
+ char subsystem[PRINTK_INFO_SUBSYSTEM_LEN];
+ char device[PRINTK_INFO_DEVICE_LEN];
+};
+
#ifdef CONFIG_PRINTK
__printf(3, 0) __cold
@@ -30,8 +38,8 @@ __printf(3, 4) __cold
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
__printf(3, 4) __cold
-void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...);
+void _dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...);
__printf(2, 3) __cold
void _dev_emerg(const struct device *dev, const char *fmt, ...);
__printf(2, 3) __cold
@@ -61,7 +69,7 @@ static inline void __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{}
static inline __printf(3, 4)
-void dev_printk(const char *level, const struct device *dev,
+void _dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
{}
@@ -90,24 +98,56 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
#endif
/*
+ * Need to take variadic arguments even though we don't use them, as dev_fmt()
+ * may only just have been expanded and may result in multiple arguments.
+ */
+#define dev_printk_index_emit(level, fmt, ...) \
+ printk_index_subsys_emit("%s %s: ", level, fmt)
+
+#define dev_printk_index_wrap(_p_func, level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _p_func(dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
+ * Some callsites directly call dev_printk rather than going through the
+ * dev_<level> infrastructure, so we need to emit here as well as inside those
+ * level-specific macros. Only one index entry will be produced, either way,
+ * since dev_printk's `fmt` isn't known at compile time if going through the
+ * dev_<level> macros.
+ *
+ * dev_fmt() isn't called for dev_printk when used directly, as it's used by
+ * the dev_<level> macros internally which already have dev_fmt() processed.
+ *
+ * We also can't use dev_printk_index_wrap directly, because we have a separate
+ * level to process.
+ */
+#define dev_printk(level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
* #defines for all the dev_<level> macros to prefix with whatever
* possible use of #define dev_fmt(fmt) ...
*/
-#define dev_emerg(dev, fmt, ...) \
- _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_crit(dev, fmt, ...) \
- _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_alert(dev, fmt, ...) \
- _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_err(dev, fmt, ...) \
- _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_warn(dev, fmt, ...) \
- _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_notice(dev, fmt, ...) \
- _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_info(dev, fmt, ...) \
- _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_emerg(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_emerg, KERN_EMERG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_crit(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_crit, KERN_CRIT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_alert(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_alert, KERN_ALERT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_err(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_warn(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_warn, KERN_WARNING, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_notice(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_notice, KERN_NOTICE, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_info(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__)
#if defined(CONFIG_DYNAMIC_DEBUG) || \
(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
@@ -228,10 +268,12 @@ do { \
* using WARN/WARN_ONCE to include file/line information and a backtrace.
*/
#define dev_WARN(dev, format, arg...) \
- WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg)
#define dev_WARN_ONCE(dev, condition, format, arg...) \
WARN_ONCE(condition, "%s %s: " format, \
dev_driver_string(dev), dev_name(dev), ## arg)
+__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+
#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index f14f17f8cb7f..4a50a5c71a5f 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -106,8 +106,11 @@ extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata);
extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
- struct device *dev, int index);
-extern int devfreq_event_get_edev_count(struct device *dev);
+ struct device *dev,
+ const char *phandle_name,
+ int index);
+extern int devfreq_event_get_edev_count(struct device *dev,
+ const char *phandle_name);
extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc);
extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
@@ -152,12 +155,15 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
}
static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
- struct device *dev, int index)
+ struct device *dev,
+ const char *phandle_name,
+ int index)
{
return ERR_PTR(-EINVAL);
}
-static inline int devfreq_event_get_edev_count(struct device *dev)
+static inline int devfreq_event_get_edev_count(struct device *dev,
+ const char *phandle_name)
{
return -EINVAL;
}
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 12782fbb4c25..d312ffbac4dd 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -15,8 +15,6 @@
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
-#define DEVFREQ_NAME_LEN 16
-
/* DEVFREQ governor name */
#define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand"
#define DEVFREQ_GOV_PERFORMANCE "performance"
@@ -40,6 +38,8 @@ enum devfreq_timer {
struct devfreq;
struct devfreq_governor;
+struct devfreq_cpu_data;
+struct thermal_cooling_device;
/**
* struct devfreq_dev_status - Data given from devfreq user device to
@@ -100,6 +100,9 @@ struct devfreq_dev_status {
* @freq_table: Optional list of frequencies to support statistics
* and freq_table must be generated in ascending order.
* @max_state: The size of freq_table.
+ *
+ * @is_cooling_device: A self-explanatory boolean giving the device a
+ * cooling effect property.
*/
struct devfreq_dev_profile {
unsigned long initial_freq;
@@ -114,6 +117,8 @@ struct devfreq_dev_profile {
unsigned long *freq_table;
unsigned int max_state;
+
+ bool is_cooling_device;
};
/**
@@ -139,15 +144,17 @@ struct devfreq_stats {
* using devfreq.
* @profile: device-specific devfreq profile
* @governor: method how to choose frequency based on the usage.
- * @governor_name: devfreq governor name for use with this devfreq
+ * @opp_table: Reference to OPP table of dev.parent, if one exists.
* @nb: notifier block used to notify devfreq object that it should
* reevaluate operable frequencies. Devfreq users may use
* devfreq.nb to the corresponding register notifier call chain.
* @work: delayed work for load monitoring.
+ * @freq_table: current frequency table used by the devfreq driver.
+ * @max_state: count of entry present in the frequency table.
* @previous_freq: previously configured frequency value.
* @last_status: devfreq user device info, performance statistics
- * @data: Private data of the governor. The devfreq framework does not
- * touch this.
+ * @data: devfreq driver pass to governors, governor should not change it.
+ * @governor_data: private data for governors, devfreq core doesn't touch it.
* @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs)
* @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs)
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
@@ -158,6 +165,7 @@ struct devfreq_stats {
* @suspend_count: suspend requests counter for a device.
* @stats: Statistics of devfreq device behavior
* @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
+ * @cdev: Cooling device pointer if the devfreq has cooling property
* @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY
* @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY
*
@@ -176,14 +184,18 @@ struct devfreq {
struct device dev;
struct devfreq_dev_profile *profile;
const struct devfreq_governor *governor;
- char governor_name[DEVFREQ_NAME_LEN];
+ struct opp_table *opp_table;
struct notifier_block nb;
struct delayed_work work;
+ unsigned long *freq_table;
+ unsigned int max_state;
+
unsigned long previous_freq;
struct devfreq_dev_status last_status;
- void *data; /* private data for governors */
+ void *data;
+ void *governor_data;
struct dev_pm_qos_request user_min_freq_req;
struct dev_pm_qos_request user_max_freq_req;
@@ -200,6 +212,9 @@ struct devfreq {
struct srcu_notifier_head transition_notifier_list;
+ /* Pointer to the cooling device if used for thermal mitigation */
+ struct thermal_cooling_device *cdev;
+
struct notifier_block nb_min;
struct notifier_block nb_max;
};
@@ -228,12 +243,7 @@ int devfreq_resume_device(struct devfreq *devfreq);
void devfreq_suspend(void);
void devfreq_resume(void);
-/**
- * update_devfreq() - Reevaluate the device and configure frequency
- * @devfreq: the devfreq device
- *
- * Note: devfreq->lock must be held
- */
+/* update_devfreq() - Reevaluate the device and configure frequency */
int update_devfreq(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
@@ -261,9 +271,11 @@ void devm_devfreq_unregister_notifier(struct device *dev,
struct devfreq *devfreq,
struct notifier_block *nb,
unsigned int list);
-struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index);
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node);
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ const char *phandle_name, int index);
+#endif /* CONFIG_PM_DEVFREQ */
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
* struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
@@ -281,9 +293,12 @@ struct devfreq_simple_ondemand_data {
unsigned int upthreshold;
unsigned int downdifferential;
};
-#endif
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+enum devfreq_parent_dev_type {
+ DEVFREQ_PARENT_DEV,
+ CPUFREQ_PARENT_DEV,
+};
+
/**
* struct devfreq_passive_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
@@ -295,8 +310,11 @@ struct devfreq_simple_ondemand_data {
* using governors except for passive governor.
* If the devfreq device has the specific method to decide
* the next frequency, should use this callback.
- * @this: the devfreq instance of own device.
- * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list
+ * @parent_type: the parent type of the device.
+ * @this: the devfreq instance of own device.
+ * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER or
+ * CPUFREQ_TRANSITION_NOTIFIER list.
+ * @cpu_data_list: the list of cpu frequency data for all cpufreq_policy.
*
* The devfreq_passive_data have to set the devfreq instance of parent
* device with governors except for the passive governor. But, don't need to
@@ -310,13 +328,16 @@ struct devfreq_passive_data {
/* Optional callback to decide the next frequency of passvice device */
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ /* Should set the type of parent device */
+ enum devfreq_parent_dev_type parent_type;
+
/* For passive governor's internal use. Don't need to set them */
struct devfreq *this;
struct notifier_block nb;
+ struct list_head cpu_data_list;
};
-#endif
-#else /* !CONFIG_PM_DEVFREQ */
+#if !defined(CONFIG_PM_DEVFREQ)
static inline struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
const char *governor_name,
@@ -414,8 +435,13 @@ static inline void devm_devfreq_unregister_notifier(struct device *dev,
{
}
+static inline struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
- int index)
+ const char *phandle_name, int index)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 9df2dfca68dd..14baa73fc2de 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -16,17 +16,6 @@
/**
* struct devfreq_cooling_power - Devfreq cooling power ops
- * @get_static_power: Take voltage, in mV, and return the static power
- * in mW. If NULL, the static power is assumed
- * to be 0.
- * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and
- * return the dynamic power draw in mW. If NULL,
- * a simple power model is used.
- * @dyn_power_coeff: Coefficient for the simple dynamic power model in
- * mW/(MHz mV mV).
- * If get_dynamic_power() is NULL, then the
- * dynamic power is calculated as
- * @dyn_power_coeff * frequency * voltage^2
* @get_real_power: When this is set, the framework uses it to ask the
* device driver for the actual power.
* Some devices have more sophisticated methods
@@ -46,14 +35,8 @@
* max total (static + dynamic) power value for each OPP.
*/
struct devfreq_cooling_power {
- unsigned long (*get_static_power)(struct devfreq *devfreq,
- unsigned long voltage);
- unsigned long (*get_dynamic_power)(struct devfreq *devfreq,
- unsigned long freq,
- unsigned long voltage);
int (*get_real_power)(struct devfreq *df, u32 *power,
unsigned long freq, unsigned long voltage);
- unsigned long dyn_power_coeff;
};
#ifdef CONFIG_DEVFREQ_THERMAL
@@ -65,6 +48,9 @@ struct thermal_cooling_device *
of_devfreq_cooling_register(struct device_node *np, struct devfreq *df);
struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df);
void devfreq_cooling_unregister(struct thermal_cooling_device *dfc);
+struct thermal_cooling_device *
+devfreq_cooling_em_register(struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power);
#else /* !CONFIG_DEVFREQ_THERMAL */
@@ -87,6 +73,13 @@ devfreq_cooling_register(struct devfreq *df)
return ERR_PTR(-EINVAL);
}
+static inline struct thermal_cooling_device *
+devfreq_cooling_em_register(struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline void
devfreq_cooling_unregister(struct thermal_cooling_device *dfc)
{
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 93096e524e43..82b2195efaca 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -20,6 +21,7 @@ struct dm_table;
struct dm_report_zones_args;
struct mapped_device;
struct bio_vec;
+enum dax_access_mode;
/*
* Type of table, mapped_device's mempool and request_queue
@@ -29,10 +31,9 @@ enum dm_queue_mode {
DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_DAX_BIO_BASED = 3,
- DM_TYPE_NVME_BIO_BASED = 4,
};
-typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
+typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
union map_info {
void *ptr;
@@ -87,16 +88,25 @@ typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
- unsigned status_flags, char *result, unsigned maxlen);
+ unsigned int status_flags, char *result, unsigned int maxlen);
-typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen);
+typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen);
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+#ifdef CONFIG_BLK_DEV_ZONED
typedef int (*dm_report_zones_fn) (struct dm_target *ti,
struct dm_report_zones_args *args,
unsigned int nr_zones);
+#else
+/*
+ * Define dm_report_zones_fn so that targets can assign to NULL if
+ * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
+ * awkward #ifdefs in their target_type, etc.
+ */
+typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
+#endif
/*
* These iteration functions are typically used to check (and combine)
@@ -138,29 +148,34 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
* >= 0 : the number of bytes accessible at the address
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
-typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i);
+ long nr_pages, enum dax_access_mode node, void **kaddr,
+ pfn_t *pfn);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
-#define PAGE_SECTORS (PAGE_SIZE / 512)
+
+/*
+ * Returns:
+ * != 0 : number of bytes transferred
+ * 0 : recovery write failed
+ */
+typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct file *bdev_file;
struct dax_device *dax_dev;
- fmode_t mode;
+ blk_mode_t mode;
char name[16];
};
-dev_t dm_get_dev_t(const char *path);
-
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
*/
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
struct dm_dev **result);
void dm_put_device(struct dm_target *ti, struct dm_dev *d);
@@ -172,7 +187,7 @@ struct target_type {
uint64_t features;
const char *name;
struct module *module;
- unsigned version[3];
+ unsigned int version[3];
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
@@ -188,16 +203,13 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_prepare_ioctl_fn prepare_ioctl;
-#ifdef CONFIG_BLK_DEV_ZONED
dm_report_zones_fn report_zones;
-#endif
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
- dm_dax_copy_iter_fn dax_copy_from_iter;
- dm_dax_copy_iter_fn dax_copy_to_iter;
dm_dax_zero_page_range_fn dax_zero_page_range;
+ dm_dax_recovery_write_fn dax_recovery_write;
/* For internal device-mapper use. */
struct list_head list;
@@ -247,10 +259,40 @@ struct target_type {
#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
/*
- * Indicates that a target supports host-managed zoned block devices.
+ * Indicates support for zoned block devices:
+ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
+ * block devices but does not support combining different zoned models.
+ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
+ * devices with different zoned models.
*/
+#ifdef CONFIG_BLK_DEV_ZONED
#define DM_TARGET_ZONED_HM 0x00000040
#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
+#else
+#define DM_TARGET_ZONED_HM 0x00000000
+#define dm_target_supports_zoned_hm(type) (false)
+#endif
+
+/*
+ * A target handles REQ_NOWAIT
+ */
+#define DM_TARGET_NOWAIT 0x00000080
+#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
+
+/*
+ * A target supports passing through inline crypto support.
+ */
+#define DM_TARGET_PASSES_CRYPTO 0x00000100
+#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
+
+#ifdef CONFIG_BLK_DEV_ZONED
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
+#define dm_target_supports_mixed_zoned_model(type) \
+ ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
+#else
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
+#define dm_target_supports_mixed_zoned_model(type) (false)
+#endif
struct dm_target {
struct dm_table *table;
@@ -271,37 +313,31 @@ struct dm_target {
* It is a responsibility of the target driver to remap these bios
* to the real underlying devices.
*/
- unsigned num_flush_bios;
+ unsigned int num_flush_bios;
/*
* The number of discard bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_discard_bios;
+ unsigned int num_discard_bios;
/*
* The number of secure erase bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_secure_erase_bios;
-
- /*
- * The number of WRITE SAME bios that will be submitted to the target.
- * The bio number can be accessed with dm_bio_get_target_bio_nr.
- */
- unsigned num_write_same_bios;
+ unsigned int num_secure_erase_bios;
/*
* The number of WRITE ZEROES bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_write_zeroes_bios;
+ unsigned int num_write_zeroes_bios;
/*
* The minimum number of extra bytes allocated in each io for the
* target to use.
*/
- unsigned per_io_data_size;
+ unsigned int per_io_data_size;
/* target specific data */
void *private;
@@ -320,11 +356,52 @@ struct dm_target {
* whether or not its underlying devices have support.
*/
bool discards_supported:1;
+
+ /*
+ * Set if this target requires that discards be split on
+ * 'max_discard_sectors' boundaries.
+ */
+ bool max_discard_granularity:1;
+
+ /*
+ * Set if this target requires that secure_erases be split on
+ * 'max_secure_erase_sectors' boundaries.
+ */
+ bool max_secure_erase_granularity:1;
+
+ /*
+ * Set if this target requires that write_zeroes be split on
+ * 'max_write_zeroes_sectors' boundaries.
+ */
+ bool max_write_zeroes_granularity:1;
+
+ /*
+ * Set if we need to limit the number of in-flight bios when swapping.
+ */
+ bool limit_swap_bios:1;
+
+ /*
+ * Set if this target implements a zoned device and needs emulation of
+ * zone append operations using regular writes.
+ */
+ bool emulate_zone_append:1;
+
+ /*
+ * Set if the target will submit IO using dm_submit_bio_remap()
+ * after returning DM_MAPIO_SUBMITTED from its map function.
+ */
+ bool accounts_remapped_io:1;
+
+ /*
+ * Set if the target will submit the DM bio without first calling
+ * bio_set_dev(). NOTE: ideally a target should _not_ need this.
+ */
+ bool needs_bio_set_dev:1;
};
void *dm_per_bio_data(struct bio *bio, size_t data_size);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
-unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
+unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
u64 dm_start_time_ns_from_clone(struct bio *bio);
@@ -335,7 +412,7 @@ void dm_unregister_target(struct target_type *t);
* Target argument parsing.
*/
struct dm_arg_set {
- unsigned argc;
+ unsigned int argc;
char **argv;
};
@@ -344,8 +421,8 @@ struct dm_arg_set {
* the error message to use if the number is found to be outside that range.
*/
struct dm_arg {
- unsigned min;
- unsigned max;
+ unsigned int min;
+ unsigned int max;
char *error;
};
@@ -354,7 +431,7 @@ struct dm_arg {
* returning -EINVAL and setting *error.
*/
int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *value, char **error);
+ unsigned int *value, char **error);
/*
* Process the next argument as the start of a group containing between
@@ -362,7 +439,7 @@ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
* *num_args or, if invalid, return -EINVAL and set *error.
*/
int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *num_args, char **error);
+ unsigned int *num_args, char **error);
/*
* Return the current argument and shift to the next.
@@ -372,12 +449,14 @@ const char *dm_shift_arg(struct dm_arg_set *as);
/*
* Move through num_args arguments.
*/
-void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
+void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Functions for creating and manipulating mapped devices.
* Drop the reference with dm_put when you finish with the object.
- *---------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
/*
* DM_ANY_MINOR chooses the next available minor number.
@@ -402,7 +481,7 @@ void *dm_get_mdptr(struct mapped_device *md);
/*
* A device can still be used while suspended, but I/O is deferred.
*/
-int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
+int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
int dm_resume(struct mapped_device *md);
/*
@@ -422,7 +501,8 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
+void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
+void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
union map_info *dm_get_rq_mapinfo(struct request *rq);
#ifdef CONFIG_BLK_DEV_ZONED
@@ -437,7 +517,8 @@ struct dm_report_zones_args {
/* must be filled by ->report_zones before calling dm_report_zones_cb */
sector_t start;
};
-int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
+int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
+ struct dm_report_zones_args *args, unsigned int nr_zones);
#endif /* CONFIG_BLK_DEV_ZONED */
/*
@@ -448,23 +529,23 @@ int __init dm_early_create(struct dm_ioctl *dmi,
struct dm_target_spec **spec_array,
char **target_params_array);
-struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
-
/*
* Geometry functions.
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Functions for manipulating device-mapper tables.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* First create an empty table.
*/
-int dm_table_create(struct dm_table **result, fmode_t mode,
- unsigned num_targets, struct mapped_device *md);
+int dm_table_create(struct dm_table **result, blk_mode_t mode,
+ unsigned int num_targets, struct mapped_device *md);
/*
* Then call this once for each target.
@@ -506,8 +587,7 @@ void dm_sync_table(struct mapped_device *md);
* Queries
*/
sector_t dm_table_get_size(struct dm_table *t);
-unsigned int dm_table_get_num_targets(struct dm_table *t);
-fmode_t dm_table_get_mode(struct dm_table *t);
+blk_mode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
const char *dm_table_device_name(struct dm_table *t);
@@ -529,13 +609,15 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
struct dm_table *t);
/*
- * A wrapper around vmalloc.
+ * Table blk_crypto_profile functions
*/
-void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Macros.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
#define DM_NAME "device-mapper"
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
@@ -552,8 +634,31 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
- 0 : scnprintf(result + sz, maxlen - sz, x))
+#define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
+
+#define DMEMIT_TARGET_NAME_VERSION(y) \
+ DMEMIT("target_name=%s,target_version=%u.%u.%u", \
+ (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
+
+/**
+ * module_dm() - Helper macro for DM targets that don't do anything
+ * special in their module_init and module_exit.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * @name: DM target's name
+ */
+#define module_dm(name) \
+static int __init dm_##name##_init(void) \
+{ \
+ return dm_register_target(&(name##_target)); \
+} \
+module_init(dm_##name##_init) \
+static void __exit dm_##name##_exit(void) \
+{ \
+ dm_unregister_target(&(name##_target)); \
+} \
+module_exit(dm_##name##_exit)
/*
* Definitions of return values from target end_io function.
diff --git a/include/linux/device.h b/include/linux/device.h
index ca18da4768e3..97c4b046c09d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -30,6 +30,7 @@
#include <linux/device/bus.h>
#include <linux/device/class.h>
#include <linux/device/driver.h>
+#include <linux/cleanup.h>
#include <asm/device.h>
struct device;
@@ -41,15 +42,15 @@ struct class;
struct subsys_private;
struct device_node;
struct fwnode_handle;
-struct iommu_ops;
struct iommu_group;
struct dev_pin_info;
struct dev_iommu;
+struct msi_device_data;
/**
* struct subsys_interface - interfaces to device functions
* @name: name of the device function
- * @subsys: subsytem of the devices to attach to
+ * @subsys: subsystem of the devices to attach to
* @node: the list of functions registered at the subsystem
* @add_dev: device hookup to device function handler
* @remove_dev: device hookup to device function handler
@@ -61,7 +62,7 @@ struct dev_iommu;
*/
struct subsys_interface {
const char *name;
- struct bus_type *subsys;
+ const struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
@@ -70,9 +71,9 @@ struct subsys_interface {
int subsys_interface_register(struct subsys_interface *sif);
void subsys_interface_unregister(struct subsys_interface *sif);
-int subsys_system_register(struct bus_type *subsys,
+int subsys_system_register(const struct bus_type *subsys,
const struct attribute_group **groups);
-int subsys_virtual_register(struct bus_type *subsys,
+int subsys_virtual_register(const struct bus_type *subsys,
const struct attribute_group **groups);
/*
@@ -87,15 +88,20 @@ int subsys_virtual_register(struct bus_type *subsys,
struct device_type {
const char *name;
const struct attribute_group **groups;
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode,
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid);
void (*release)(struct device *dev);
const struct dev_pm_ops *pm;
};
-/* interface for exporting device attributes */
+/**
+ * struct device_attribute - Interface for exporting device attributes.
+ * @attr: sysfs attribute definition.
+ * @show: Show handler.
+ * @store: Store handler.
+ */
struct device_attribute {
struct attribute attr;
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
@@ -104,6 +110,11 @@ struct device_attribute {
const char *buf, size_t count);
};
+/**
+ * struct dev_ext_attribute - Exported device attribute with extra context.
+ * @attr: Exported device attribute.
+ * @var: Pointer to context.
+ */
struct dev_ext_attribute {
struct device_attribute attr;
void *var;
@@ -122,30 +133,124 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
+/**
+ * DEVICE_ATTR - Define a device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Convenience macro for defining a struct device_attribute.
+ *
+ * For example, ``DEVICE_ATTR(foo, 0644, foo_show, foo_store);`` expands to:
+ *
+ * .. code-block:: c
+ *
+ * struct device_attribute dev_attr_foo = {
+ * .attr = { .name = "foo", .mode = 0644 },
+ * .show = foo_show,
+ * .store = foo_store,
+ * };
+ */
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_PREALLOC - Define a preallocated device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Like DEVICE_ATTR(), but ``SYSFS_PREALLOC`` is set on @_mode.
+ */
#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_PREALLOC(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_RW - Define a read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0644, @_show is <_name>_show,
+ * and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RW - Define an admin-only read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RW(), but @_mode is 0600.
+ */
#define DEVICE_ATTR_ADMIN_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
+
+/**
+ * DEVICE_ATTR_RO - Define a readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0444 and @_show is <_name>_show.
+ */
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RO - Define an admin-only readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RO(), but @_mode is 0400.
+ */
#define DEVICE_ATTR_ADMIN_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
+
+/**
+ * DEVICE_ATTR_WO - Define an admin-only writable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0200 and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
+
+/**
+ * DEVICE_ULONG_ATTR - Define a device attribute backed by an unsigned long.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of unsigned long.
+ *
+ * Like DEVICE_ATTR(), but @_show and @_store are automatically provided
+ * such that reads and writes to the attribute from userspace affect @_var.
+ */
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+
+/**
+ * DEVICE_INT_ATTR - Define a device attribute backed by an int.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of int.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is an int.
+ */
#define DEVICE_INT_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
+
+/**
+ * DEVICE_BOOL_ATTR - Define a device attribute backed by a bool.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of bool.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is a bool.
+ */
#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
+
#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
@@ -165,21 +270,12 @@ void device_remove_bin_file(struct device *dev,
typedef void (*dr_release_t)(struct device *dev, void *res);
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
-#ifdef CONFIG_DEBUG_DEVRES
void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
int nid, const char *name) __malloc;
#define devres_alloc(release, size, gfp) \
__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
#define devres_alloc_node(release, size, gfp, nid) \
__devres_alloc_node(release, size, gfp, nid, #release)
-#else
-void *devres_alloc_node(dr_release_t release, size_t size,
- gfp_t gfp, int nid) __malloc;
-static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
-{
- return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
-}
-#endif
void devres_for_each_res(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data,
@@ -205,7 +301,9 @@ void devres_remove_group(struct device *dev, void *id);
int devres_release_group(struct device *dev, void *id);
/* managed devm_k.alloc/kfree for device drivers */
-void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
+void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __alloc_size(2);
+void *devm_krealloc(struct device *dev, void *ptr, size_t size,
+ gfp_t gfp) __must_check __realloc_size(3);
__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
const char *fmt, va_list ap) __malloc;
__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
@@ -229,15 +327,28 @@ static inline void *devm_kcalloc(struct device *dev,
{
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
+static inline __realloc_size(3, 4) void * __must_check
+devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return devm_krealloc(dev, p, bytes, flags);
+}
+
void devm_kfree(struct device *dev, const void *p);
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
-void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
+void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
+ __realloc_size(3);
unsigned long devm_get_free_pages(struct device *dev,
gfp_t gfp_mask, unsigned int order);
void devm_free_pages(struct device *dev, unsigned long addr);
+#ifdef CONFIG_HAS_IOMEM
void __iomem *devm_ioremap_resource(struct device *dev,
const struct resource *res);
void __iomem *devm_ioremap_resource_wc(struct device *dev,
@@ -246,23 +357,53 @@ void __iomem *devm_ioremap_resource_wc(struct device *dev,
void __iomem *devm_of_iomap(struct device *dev,
struct device_node *node, int index,
resource_size_t *size);
+#else
+
+static inline
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_ioremap_resource_wc(struct device *dev,
+ const struct resource *res)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_of_iomap(struct device *dev,
+ struct device_node *node, int index,
+ resource_size_t *size)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif
/* allows to add/remove a custom action to devres stack */
-int devm_add_action(struct device *dev, void (*action)(void *), void *data);
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
void devm_release_action(struct device *dev, void (*action)(void *), void *data);
-static inline int devm_add_action_or_reset(struct device *dev,
- void (*action)(void *), void *data)
+int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
+#define devm_add_action(dev, action, data) \
+ __devm_add_action(dev, action, data, #action)
+
+static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
+ void *data, const char *name)
{
int ret;
- ret = devm_add_action(dev, action, data);
+ ret = __devm_add_action(dev, action, data, name);
if (ret)
action(data);
return ret;
}
+#define devm_add_action_or_reset(dev, action, data) \
+ __devm_add_action_or_reset(dev, action, data, #action)
/**
* devm_alloc_percpu - Resource-managed alloc_percpu
@@ -289,66 +430,11 @@ struct device_dma_parameters {
* sg limitations.
*/
unsigned int max_segment_size;
+ unsigned int min_align_mask;
unsigned long segment_boundary_mask;
};
/**
- * struct device_connection - Device Connection Descriptor
- * @fwnode: The device node of the connected device
- * @endpoint: The names of the two devices connected together
- * @id: Unique identifier for the connection
- * @list: List head, private, for internal use only
- *
- * NOTE: @fwnode is not used together with @endpoint. @fwnode is used when
- * platform firmware defines the connection. When the connection is registered
- * with device_connection_add() @endpoint is used instead.
- */
-struct device_connection {
- struct fwnode_handle *fwnode;
- const char *endpoint[2];
- const char *id;
- struct list_head list;
-};
-
-typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep,
- void *data);
-
-void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
- const char *con_id, void *data,
- devcon_match_fn_t match);
-void *device_connection_find_match(struct device *dev, const char *con_id,
- void *data, devcon_match_fn_t match);
-
-struct device *device_connection_find(struct device *dev, const char *con_id);
-
-void device_connection_add(struct device_connection *con);
-void device_connection_remove(struct device_connection *con);
-
-/**
- * device_connections_add - Add multiple device connections at once
- * @cons: Zero terminated array of device connection descriptors
- */
-static inline void device_connections_add(struct device_connection *cons)
-{
- struct device_connection *c;
-
- for (c = cons; c->endpoint[0]; c++)
- device_connection_add(c);
-}
-
-/**
- * device_connections_remove - Remove multiple device connections at once
- * @cons: Zero terminated array of device connection descriptors
- */
-static inline void device_connections_remove(struct device_connection *cons)
-{
- struct device_connection *c;
-
- for (c = cons; c->endpoint[0]; c++)
- device_connection_remove(c);
-}
-
-/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
@@ -377,6 +463,7 @@ enum device_link_state {
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
* MANAGED: The core tracks presence of supplier/consumer drivers (internal).
* SYNC_STATE_ONLY: Link only affects sync_state() behavior.
+ * INFERRED: Inferred from data (eg: firmware) and not from driver actions.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
@@ -386,6 +473,8 @@ enum device_link_state {
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
#define DL_FLAG_MANAGED BIT(6)
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
+#define DL_FLAG_INFERRED BIT(8)
+#define DL_FLAG_CYCLE BIT(9)
/**
* enum dl_dev_state - Device driver presence tracking information.
@@ -402,26 +491,117 @@ enum dl_dev_state {
};
/**
+ * enum device_removable - Whether the device is removable. The criteria for a
+ * device to be classified as removable is determined by its subsystem or bus.
+ * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this
+ * device (default).
+ * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown.
+ * @DEVICE_FIXED: Device is not removable by the user.
+ * @DEVICE_REMOVABLE: Device is removable by the user.
+ */
+enum device_removable {
+ DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */
+ DEVICE_REMOVABLE_UNKNOWN,
+ DEVICE_FIXED,
+ DEVICE_REMOVABLE,
+};
+
+/**
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
- * @needs_suppliers: Hook to global list of devices waiting for suppliers.
- * @defer_hook: Hook to global list of devices that have deferred sync_state or
- * deferred fw_devlink.
- * @need_for_probe: If needs_suppliers is on a list, this indicates if the
- * suppliers are needed for probe or not.
+ * @defer_sync: Hook to global list of devices that have deferred sync_state.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
- struct list_head needs_suppliers;
- struct list_head defer_hook;
- bool need_for_probe;
+ struct list_head defer_sync;
enum dl_dev_state status;
};
/**
+ * struct dev_msi_info - Device data related to MSI
+ * @domain: The MSI interrupt domain associated to the device
+ * @data: Pointer to MSI device data
+ */
+struct dev_msi_info {
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct irq_domain *domain;
+ struct msi_device_data *data;
+#endif
+};
+
+/**
+ * enum device_physical_location_panel - Describes which panel surface of the
+ * system's housing the device connection point resides on.
+ * @DEVICE_PANEL_TOP: Device connection point is on the top panel.
+ * @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel.
+ * @DEVICE_PANEL_LEFT: Device connection point is on the left panel.
+ * @DEVICE_PANEL_RIGHT: Device connection point is on the right panel.
+ * @DEVICE_PANEL_FRONT: Device connection point is on the front panel.
+ * @DEVICE_PANEL_BACK: Device connection point is on the back panel.
+ * @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown.
+ */
+enum device_physical_location_panel {
+ DEVICE_PANEL_TOP,
+ DEVICE_PANEL_BOTTOM,
+ DEVICE_PANEL_LEFT,
+ DEVICE_PANEL_RIGHT,
+ DEVICE_PANEL_FRONT,
+ DEVICE_PANEL_BACK,
+ DEVICE_PANEL_UNKNOWN,
+};
+
+/**
+ * enum device_physical_location_vertical_position - Describes vertical
+ * position of the device connection point on the panel surface.
+ * @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel.
+ * @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel.
+ */
+enum device_physical_location_vertical_position {
+ DEVICE_VERT_POS_UPPER,
+ DEVICE_VERT_POS_CENTER,
+ DEVICE_VERT_POS_LOWER,
+};
+
+/**
+ * enum device_physical_location_horizontal_position - Describes horizontal
+ * position of the device connection point on the panel surface.
+ * @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel.
+ * @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel.
+ */
+enum device_physical_location_horizontal_position {
+ DEVICE_HORI_POS_LEFT,
+ DEVICE_HORI_POS_CENTER,
+ DEVICE_HORI_POS_RIGHT,
+};
+
+/**
+ * struct device_physical_location - Device data related to physical location
+ * of the device connection point.
+ * @panel: Panel surface of the system's housing that the device connection
+ * point resides on.
+ * @vertical_position: Vertical position of the device connection point within
+ * the panel.
+ * @horizontal_position: Horizontal position of the device connection point
+ * within the panel.
+ * @dock: Set if the device connection point resides in a docking station or
+ * port replicator.
+ * @lid: Set if this device connection point resides on the lid of laptop
+ * system.
+ */
+struct device_physical_location {
+ enum device_physical_location_panel panel;
+ enum device_physical_location_vertical_position vertical_position;
+ enum device_physical_location_horizontal_position horizontal_position;
+ bool dock;
+ bool lid;
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -435,8 +615,6 @@ struct dev_links_info {
* This identifies the device type and carries type-specific
* information.
* @mutex: Mutex to synchronize calls to its driver.
- * @lockdep_mutex: An optional debug lock that a subsystem can use as a
- * peer lock to gain localized lockdep coverage of the device_lock.
* @bus: Type of bus device is on.
* @driver: Which driver has allocated this
* @platform_data: Platform data specific to the device.
@@ -454,10 +632,10 @@ struct dev_links_info {
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
+ * @em_pd: device's energy model performance domain
* @pins: For device pin management.
- * See Documentation/driver-api/pinctl.rst for details.
- * @msi_list: Hosts MSI descriptors
- * @msi_domain: The generic MSI domain this device is using.
+ * See Documentation/driver-api/pin-control.rst for details.
+ * @msi: MSI related data
* @numa_node: NUMA node this device is close to.
* @dma_ops: DMA mapping operations for this device.
* @dma_mask: Dma mask (if dma'ble device).
@@ -466,12 +644,16 @@ struct dev_links_info {
* such descriptors.
* @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
* DMA limit than the device itself supports.
- * @dma_pfn_offset: offset of DMA memory range relatively of RAM
+ * @dma_range_map: map for DMA memory ranges relative to that of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
+ * @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
+ * @dma_io_tlb_pools: List of transient swiotlb memory pools.
+ * @dma_io_tlb_lock: Protects changes to the list of active pools.
+ * @dma_uses_io_tlb: %true if device has used the software IO TLB.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@@ -479,7 +661,6 @@ struct dev_links_info {
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
* @devres_head: The resources list of the device.
- * @knode_class: The node used to add the device to the class list.
* @class: The class of the device.
* @groups: Optional attribute groups.
* @release: Callback to free the device after all references have
@@ -487,6 +668,11 @@ struct dev_links_info {
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
* @iommu: Per device generic IOMMU runtime data
+ * @physical_location: Describes physical location of the device connection
+ * point in the system housing.
+ * @removable: Whether the device can be removed from the system. This
+ * should be set by the subsystem / bus driver that discovered
+ * the device.
*
* @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline().
@@ -495,6 +681,9 @@ struct dev_links_info {
* @state_synced: The hardware state of this device has been synced to match
* the software state of this device by calling the driver/bus
* sync_state() callback.
+ * @can_match: The device has matched with a driver at least once or it is in
+ * a bus (like AMBA) which can't check for matching drivers until
+ * other devices probe successfully.
* @dma_coherent: this particular device is dma coherent, even if the
* architecture supports non-coherent devices.
* @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
@@ -520,16 +709,13 @@ struct device {
const char *init_name; /* initial name of the device */
const struct device_type *type;
- struct bus_type *bus; /* type of bus device is on */
+ const struct bus_type *bus; /* type of bus device is on */
struct device_driver *driver; /* which driver has allocated this
device */
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
-#ifdef CONFIG_PROVE_LOCKING
- struct mutex lockdep_mutex;
-#endif
struct mutex mutex; /* mutex to synchronize calls to
* its driver.
*/
@@ -542,15 +728,10 @@ struct device {
struct em_perf_domain *em_pd;
#endif
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *msi_domain;
-#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
-#ifdef CONFIG_GENERIC_MSI_IRQ
- struct list_head msi_list;
-#endif
+ struct dev_msi_info msi;
#ifdef CONFIG_DMA_OPS
const struct dma_map_ops *dma_ops;
#endif
@@ -561,7 +742,7 @@ struct device {
64 bit addresses for consistent
allocations such descriptors. */
u64 bus_dma_limit; /* upstream dma constraint */
- unsigned long dma_pfn_offset;
+ const struct bus_dma_region *dma_range_map;
struct device_dma_parameters *dma_parms;
@@ -575,6 +756,14 @@ struct device {
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
+#ifdef CONFIG_SWIOTLB
+ struct io_tlb_mem *dma_io_tlb_mem;
+#endif
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head dma_io_tlb_pools;
+ spinlock_t dma_io_tlb_lock;
+ bool dma_uses_io_tlb;
+#endif
/* arch specific additions */
struct dev_archdata archdata;
@@ -590,17 +779,22 @@ struct device {
spinlock_t devres_lock;
struct list_head devres_head;
- struct class *class;
+ const struct class *class;
const struct attribute_group **groups; /* optional groups */
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
struct dev_iommu *iommu;
+ struct device_physical_location *physical_location;
+
+ enum device_removable removable;
+
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
bool state_synced:1;
+ bool can_match:1;
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
@@ -622,7 +816,7 @@ struct device {
* @flags: Link flags.
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
* @kref: Count repeated addition of the same link.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ * @rm_work: Work structure used for removing the link.
* @supplier_preactivated: Supplier has been made active before consumer probe.
*/
struct device_link {
@@ -635,16 +829,11 @@ struct device_link {
u32 flags;
refcount_t rpm_active;
struct kref kref;
-#ifdef CONFIG_SRCU
- struct rcu_head rcu_head;
-#endif
+ struct work_struct rm_work;
bool supplier_preactivated; /* Owned by consumer probe. */
};
-static inline struct device *kobj_to_dev(struct kobject *kobj)
-{
- return container_of(kobj, struct device, kobj);
-}
+#define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj)
/**
* device_iommu_mapped - Returns true when the device DMA is translated
@@ -659,6 +848,11 @@ static inline bool device_iommu_mapped(struct device *dev)
/* Get the wakeup routines, which depend on struct device */
#include <linux/pm_wakeup.h>
+/**
+ * dev_name - Return a device's name.
+ * @dev: Device with name to get.
+ * Return: The kobject name of the device, or its initial name if unavailable.
+ */
static inline const char *dev_name(const struct device *dev)
{
/* Use the init name until the kobject becomes available */
@@ -668,6 +862,18 @@ static inline const char *dev_name(const struct device *dev)
return kobject_name(&dev->kobj);
}
+/**
+ * dev_bus_name - Return a device's bus/class name, if at all possible
+ * @dev: struct device to get the bus/class name of
+ *
+ * Will return the name of the bus/class the device is attached to. If it is
+ * not attached to a bus/class, an empty string will be returned.
+ */
+static inline const char *dev_bus_name(const struct device *dev)
+{
+ return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
+}
+
__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
#ifdef CONFIG_NUMA
@@ -691,8 +897,8 @@ static inline void set_dev_node(struct device *dev, int node)
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- return dev->msi_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ return dev->msi.domain;
#else
return NULL;
#endif
@@ -700,8 +906,8 @@ static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- dev->msi_domain = d;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ dev->msi.domain = d;
#endif
}
@@ -799,6 +1005,8 @@ static inline void device_unlock(struct device *dev)
mutex_unlock(&dev->mutex);
}
+DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
+
static inline void device_lock_assert(struct device *dev)
{
lockdep_assert_held(&dev->mutex);
@@ -822,6 +1030,22 @@ static inline bool dev_has_sync_state(struct device *dev)
return false;
}
+static inline void dev_set_removable(struct device *dev,
+ enum device_removable removable)
+{
+ dev->removable = removable;
+}
+
+static inline bool dev_is_removable(struct device *dev)
+{
+ return dev->removable == DEVICE_REMOVABLE;
+}
+
+static inline bool dev_removable_is_valid(struct device *dev)
+{
+ return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
+}
+
/*
* High level routines for use by the bus drivers
*/
@@ -830,6 +1054,9 @@ void device_unregister(struct device *dev);
void device_initialize(struct device *dev);
int __must_check device_add(struct device *dev);
void device_del(struct device *dev);
+
+DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
+
int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
int device_for_each_child_reverse(struct device *dev, void *data,
@@ -838,19 +1065,61 @@ struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
struct device *device_find_child_by_name(struct device *parent,
const char *name);
+struct device *device_find_any_child(struct device *parent);
+
int device_rename(struct device *dev, const char *new_name);
int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
-const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
- kgid_t *gid, const char **tmp);
-int device_is_dependent(struct device *dev, void *target);
static inline bool device_supports_offline(struct device *dev)
{
return dev->bus && dev->bus->offline && dev->bus->online;
}
+#define __device_lock_set_class(dev, name, key) \
+do { \
+ struct device *__d2 __maybe_unused = dev; \
+ lock_set_class(&__d2->mutex.dep_map, name, key, 0, _THIS_IP_); \
+} while (0)
+
+/**
+ * device_lock_set_class - Specify a temporary lock class while a device
+ * is attached to a driver
+ * @dev: device to modify
+ * @key: lock class key data
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->probe(). Take care to only override the default
+ * lockdep_no_validate class.
+ */
+#ifdef CONFIG_LOCKDEP
+#define device_lock_set_class(dev, key) \
+do { \
+ struct device *__d = dev; \
+ dev_WARN_ONCE(__d, !lockdep_match_class(&__d->mutex, \
+ &__lockdep_no_validate__), \
+ "overriding existing custom lock class\n"); \
+ __device_lock_set_class(__d, #key, key); \
+} while (0)
+#else
+#define device_lock_set_class(dev, key) __device_lock_set_class(dev, #key, key)
+#endif
+
+/**
+ * device_lock_reset_class - Return a device to the default lockdep novalidate state
+ * @dev: device to modify
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->remove().
+ */
+#define device_lock_reset_class(dev) \
+do { \
+ struct device *__d __maybe_unused = dev; \
+ lock_set_novalidate_class(&__d->mutex.dep_map, "&dev->mutex", \
+ _THIS_IP_); \
+} while (0)
+
void lock_device_hotplug(void);
void unlock_device_hotplug(void);
int lock_device_hotplug_sysfs(void);
@@ -859,6 +1128,7 @@ int device_online(struct device *dev);
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
static inline int dev_num_vf(struct device *dev)
{
@@ -887,6 +1157,8 @@ static inline void *dev_get_platdata(const struct device *dev)
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
+int __must_check device_driver_attach(struct device_driver *drv,
+ struct device *dev);
int __must_check device_bind_driver(struct device *dev);
void device_release_driver(struct device *dev);
int __must_check device_attach(struct device *dev);
@@ -900,13 +1172,13 @@ bool device_is_bound(struct device *dev);
* Easy functions for dynamically creating devices on the fly
*/
__printf(5, 6) struct device *
-device_create(struct class *cls, struct device *parent, dev_t devt,
+device_create(const struct class *cls, struct device *parent, dev_t devt,
void *drvdata, const char *fmt, ...);
__printf(6, 7) struct device *
-device_create_with_groups(struct class *cls, struct device *parent, dev_t devt,
+device_create_with_groups(const struct class *cls, struct device *parent, dev_t devt,
void *drvdata, const struct attribute_group **groups,
const char *fmt, ...);
-void device_destroy(struct class *cls, dev_t devt);
+void device_destroy(const struct class *cls, dev_t devt);
int __must_check device_add_groups(struct device *dev,
const struct attribute_group **groups);
@@ -931,12 +1203,8 @@ static inline void device_remove_group(struct device *dev,
int __must_check devm_device_add_groups(struct device *dev,
const struct attribute_group **groups);
-void devm_device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
int __must_check devm_device_add_group(struct device *dev,
const struct attribute_group *grp);
-void devm_device_remove_group(struct device *dev,
- const struct attribute_group *grp);
/*
* Platform "fixup" functions - allow the platform to have their say
@@ -955,6 +1223,9 @@ extern int (*platform_notify_remove)(struct device *dev);
*/
struct device *get_device(struct device *dev);
void put_device(struct device *dev);
+
+DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
+
bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
@@ -977,19 +1248,10 @@ void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
-extern __printf(3, 4)
-int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
-
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
MODULE_ALIAS("char-major-" __stringify(major) "-*")
-#ifdef CONFIG_SYSFS_DEPRECATED
-extern long sysfs_deprecated;
-#else
-#define sysfs_deprecated 0
-#endif
-
#endif /* _DEVICE_H_ */
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 1ea5e1d1545b..5ef4ec1c36c3 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -26,7 +26,6 @@ struct fwnode_handle;
*
* @name: The name of the bus.
* @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
- * @dev_root: Default device to use as the parent.
* @bus_groups: Default attributes of the bus.
* @dev_groups: Default attributes of the devices on the bus.
* @drv_groups: Default attributes of the device drivers on the bus.
@@ -59,14 +58,10 @@ struct fwnode_handle;
* bus supports.
* @dma_configure: Called to setup DMA configuration on a device on
* this bus.
+ * @dma_cleanup: Called to cleanup DMA configuration on a device on
+ * this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
- * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
- * driver implementations to a bus and allow the driver to do
- * bus-specific setup
- * @p: The private data of the driver core, only the driver core can
- * touch this.
- * @lock_key: Lock class key for use by the lock validator
* @need_parent_lock: When probing or removing a device on this bus, the
* device core should lock the device's parent.
*
@@ -82,16 +77,15 @@ struct fwnode_handle;
struct bus_type {
const char *name;
const char *dev_name;
- struct device *dev_root;
const struct attribute_group **bus_groups;
const struct attribute_group **dev_groups;
const struct attribute_group **drv_groups;
int (*match)(struct device *dev, struct device_driver *drv);
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
void (*sync_state)(struct device *dev);
- int (*remove)(struct device *dev);
+ void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
int (*online)(struct device *dev);
@@ -103,27 +97,23 @@ struct bus_type {
int (*num_vf)(struct device *dev);
int (*dma_configure)(struct device *dev);
+ void (*dma_cleanup)(struct device *dev);
const struct dev_pm_ops *pm;
- const struct iommu_ops *iommu_ops;
-
- struct subsys_private *p;
- struct lock_class_key lock_key;
-
bool need_parent_lock;
};
-extern int __must_check bus_register(struct bus_type *bus);
+int __must_check bus_register(const struct bus_type *bus);
-extern void bus_unregister(struct bus_type *bus);
+void bus_unregister(const struct bus_type *bus);
-extern int __must_check bus_rescan_devices(struct bus_type *bus);
+int __must_check bus_rescan_devices(const struct bus_type *bus);
struct bus_attribute {
struct attribute attr;
- ssize_t (*show)(struct bus_type *bus, char *buf);
- ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
+ ssize_t (*show)(const struct bus_type *bus, char *buf);
+ ssize_t (*store)(const struct bus_type *bus, const char *buf, size_t count);
};
#define BUS_ATTR_RW(_name) \
@@ -133,9 +123,8 @@ struct bus_attribute {
#define BUS_ATTR_WO(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
-extern int __must_check bus_create_file(struct bus_type *,
- struct bus_attribute *);
-extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
+int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr);
+void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr);
/* Generic device matching functions that all busses can use to match with */
int device_match_name(struct device *dev, const void *name);
@@ -143,23 +132,13 @@ int device_match_of_node(struct device *dev, const void *np);
int device_match_fwnode(struct device *dev, const void *fwnode);
int device_match_devt(struct device *dev, const void *pdevt);
int device_match_acpi_dev(struct device *dev, const void *adev);
+int device_match_acpi_handle(struct device *dev, const void *handle);
int device_match_any(struct device *dev, const void *unused);
/* iterator helpers for buses */
-struct subsys_dev_iter {
- struct klist_iter ki;
- const struct device_type *type;
-};
-void subsys_dev_iter_init(struct subsys_dev_iter *iter,
- struct bus_type *subsys,
- struct device *start,
- const struct device_type *type);
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
-
-int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
+int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
-struct device *bus_find_device(struct bus_type *bus, struct device *start,
+struct device *bus_find_device(const struct bus_type *bus, struct device *start,
const void *data,
int (*match)(struct device *dev, const void *data));
/**
@@ -169,7 +148,7 @@ struct device *bus_find_device(struct bus_type *bus, struct device *start,
* @start: Device to begin with
* @name: name of the device to match
*/
-static inline struct device *bus_find_device_by_name(struct bus_type *bus,
+static inline struct device *bus_find_device_by_name(const struct bus_type *bus,
struct device *start,
const char *name)
{
@@ -183,7 +162,7 @@ static inline struct device *bus_find_device_by_name(struct bus_type *bus,
* @np: of_node of the device to match.
*/
static inline struct device *
-bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
+bus_find_device_by_of_node(const struct bus_type *bus, const struct device_node *np)
{
return bus_find_device(bus, NULL, np, device_match_of_node);
}
@@ -195,7 +174,7 @@ bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
* @fwnode: fwnode of the device to match.
*/
static inline struct device *
-bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode)
+bus_find_device_by_fwnode(const struct bus_type *bus, const struct fwnode_handle *fwnode)
{
return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
}
@@ -206,7 +185,7 @@ bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwno
* @bus: bus type
* @devt: device type of the device to match.
*/
-static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
+static inline struct device *bus_find_device_by_devt(const struct bus_type *bus,
dev_t devt)
{
return bus_find_device(bus, NULL, &devt, device_match_devt);
@@ -219,7 +198,7 @@ static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
* @cur: device to begin the search with.
*/
static inline struct device *
-bus_find_next_device(struct bus_type *bus,struct device *cur)
+bus_find_next_device(const struct bus_type *bus,struct device *cur)
{
return bus_find_device(bus, cur, NULL, device_match_any);
}
@@ -234,23 +213,21 @@ struct acpi_device;
* @adev: ACPI COMPANION device to match.
*/
static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev)
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device *adev)
{
return bus_find_device(bus, NULL, adev, device_match_acpi_dev);
}
#else
static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev)
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
{
return NULL;
}
#endif
-struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
- struct device *hint);
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *));
-void bus_sort_breadthfirst(struct bus_type *bus,
+void bus_sort_breadthfirst(const struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b));
/*
@@ -261,28 +238,40 @@ void bus_sort_breadthfirst(struct bus_type *bus,
*/
struct notifier_block;
-extern int bus_register_notifier(struct bus_type *bus,
- struct notifier_block *nb);
-extern int bus_unregister_notifier(struct bus_type *bus,
- struct notifier_block *nb);
+int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb);
+int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb);
-/* All 4 notifers below get called with the target struct device *
- * as an argument. Note that those functions are likely to be called
- * with the device lock held in the core, so be careful.
+/**
+ * enum bus_notifier_event - Bus Notifier events that have happened
+ * @BUS_NOTIFY_ADD_DEVICE: device is added to this bus
+ * @BUS_NOTIFY_DEL_DEVICE: device is about to be removed from this bus
+ * @BUS_NOTIFY_REMOVED_DEVICE: device is successfully removed from this bus
+ * @BUS_NOTIFY_BIND_DRIVER: a driver is about to be bound to this device on this bus
+ * @BUS_NOTIFY_BOUND_DRIVER: a driver is successfully bound to this device on this bus
+ * @BUS_NOTIFY_UNBIND_DRIVER: a driver is about to be unbound from this device on this bus
+ * @BUS_NOTIFY_UNBOUND_DRIVER: a driver is successfully unbound from this device on this bus
+ * @BUS_NOTIFY_DRIVER_NOT_BOUND: a driver failed to be bound to this device on this bus
+ *
+ * These are the value passed to a bus notifier when a specific event happens.
+ *
+ * Note that bus notifiers are likely to be called with the device lock already
+ * held by the driver core, so be careful in any notifier callback as to what
+ * you do with the device structure.
+ *
+ * All bus notifiers are called with the target struct device * as an argument.
*/
-#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
-#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
-#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
-#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
- bound */
-#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
- unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
- from the device */
-#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */
+enum bus_notifier_event {
+ BUS_NOTIFY_ADD_DEVICE,
+ BUS_NOTIFY_DEL_DEVICE,
+ BUS_NOTIFY_REMOVED_DEVICE,
+ BUS_NOTIFY_BIND_DRIVER,
+ BUS_NOTIFY_BOUND_DRIVER,
+ BUS_NOTIFY_UNBIND_DRIVER,
+ BUS_NOTIFY_UNBOUND_DRIVER,
+ BUS_NOTIFY_DRIVER_NOT_BOUND,
+};
-extern struct kset *bus_get_kset(struct bus_type *bus);
-extern struct klist *bus_get_device_klist(struct bus_type *bus);
+struct kset *bus_get_kset(const struct bus_type *bus);
+struct device *bus_get_dev_root(const struct bus_type *bus);
#endif
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index e8d470c457d1..c576b49c55c2 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -25,10 +25,8 @@ struct fwnode_handle;
/**
* struct class - device classes
* @name: Name of the class.
- * @owner: The module owner.
* @class_groups: Default attributes of this class.
* @dev_groups: Default attributes of the devices that belong to the class.
- * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
* @dev_uevent: Called when a device is added, removed from this class, or a
* few other things that generate uevents to add the environment
* variables.
@@ -42,8 +40,6 @@ struct fwnode_handle;
* for the devices belonging to the class. Usually tied to
* device's namespace.
* @pm: The default device power management operations of this class.
- * @p: The private data of the driver core, no one other than the
- * driver core can touch this.
*
* A class is a higher-level view of a device that abstracts out low-level
* implementation details. Drivers may see a SCSI disk or an ATA disk, but,
@@ -53,48 +49,35 @@ struct fwnode_handle;
*/
struct class {
const char *name;
- struct module *owner;
const struct attribute_group **class_groups;
const struct attribute_group **dev_groups;
- struct kobject *dev_kobj;
- int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode);
+ int (*dev_uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(const struct device *dev, umode_t *mode);
- void (*class_release)(struct class *class);
+ void (*class_release)(const struct class *class);
void (*dev_release)(struct device *dev);
int (*shutdown_pre)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
- const void *(*namespace)(struct device *dev);
+ const void *(*namespace)(const struct device *dev);
- void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
+ void (*get_ownership)(const struct device *dev, kuid_t *uid, kgid_t *gid);
const struct dev_pm_ops *pm;
-
- struct subsys_private *p;
};
struct class_dev_iter {
struct klist_iter ki;
const struct device_type *type;
+ struct subsys_private *sp;
};
-extern struct kobject *sysfs_dev_block_kobj;
-extern struct kobject *sysfs_dev_char_kobj;
-extern int __must_check __class_register(struct class *class,
- struct lock_class_key *key);
-extern void class_unregister(struct class *class);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_register(class) \
-({ \
- static struct lock_class_key __key; \
- __class_register(class, &__key); \
-})
+int __must_check class_register(const struct class *class);
+void class_unregister(const struct class *class);
+bool class_is_registered(const struct class *class);
struct class_compat;
struct class_compat *class_compat_register(const char *name);
@@ -104,19 +87,15 @@ int class_compat_create_link(struct class_compat *cls, struct device *dev,
void class_compat_remove_link(struct class_compat *cls, struct device *dev,
struct device *device_link);
-extern void class_dev_iter_init(struct class_dev_iter *iter,
- struct class *class,
- struct device *start,
- const struct device_type *type);
-extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
-extern void class_dev_iter_exit(struct class_dev_iter *iter);
+void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
+ const struct device *start, const struct device_type *type);
+struct device *class_dev_iter_next(struct class_dev_iter *iter);
+void class_dev_iter_exit(struct class_dev_iter *iter);
-extern int class_for_each_device(struct class *class, struct device *start,
- void *data,
- int (*fn)(struct device *dev, void *data));
-extern struct device *class_find_device(struct class *class,
- struct device *start, const void *data,
- int (*match)(struct device *, const void *));
+int class_for_each_device(const struct class *class, const struct device *start, void *data,
+ int (*fn)(struct device *dev, void *data));
+struct device *class_find_device(const struct class *class, const struct device *start,
+ const void *data, int (*match)(struct device *, const void *));
/**
* class_find_device_by_name - device iterator for locating a particular device
@@ -124,7 +103,7 @@ extern struct device *class_find_device(struct class *class,
* @class: class type
* @name: name of the device to match
*/
-static inline struct device *class_find_device_by_name(struct class *class,
+static inline struct device *class_find_device_by_name(const struct class *class,
const char *name)
{
return class_find_device(class, NULL, name, device_match_name);
@@ -136,8 +115,8 @@ static inline struct device *class_find_device_by_name(struct class *class,
* @class: class type
* @np: of_node of the device to match.
*/
-static inline struct device *
-class_find_device_by_of_node(struct class *class, const struct device_node *np)
+static inline struct device *class_find_device_by_of_node(const struct class *class,
+ const struct device_node *np)
{
return class_find_device(class, NULL, np, device_match_of_node);
}
@@ -148,9 +127,8 @@ class_find_device_by_of_node(struct class *class, const struct device_node *np)
* @class: class type
* @fwnode: fwnode of the device to match.
*/
-static inline struct device *
-class_find_device_by_fwnode(struct class *class,
- const struct fwnode_handle *fwnode)
+static inline struct device *class_find_device_by_fwnode(const struct class *class,
+ const struct fwnode_handle *fwnode)
{
return class_find_device(class, NULL, fwnode, device_match_fwnode);
}
@@ -161,7 +139,7 @@ class_find_device_by_fwnode(struct class *class,
* @class: class type
* @devt: device type of the device to match.
*/
-static inline struct device *class_find_device_by_devt(struct class *class,
+static inline struct device *class_find_device_by_devt(const struct class *class,
dev_t devt)
{
return class_find_device(class, NULL, &devt, device_match_devt);
@@ -175,14 +153,14 @@ struct acpi_device;
* @class: class type
* @adev: ACPI_COMPANION device to match.
*/
-static inline struct device *
-class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev)
+static inline struct device *class_find_device_by_acpi_dev(const struct class *class,
+ const struct acpi_device *adev)
{
return class_find_device(class, NULL, adev, device_match_acpi_dev);
}
#else
-static inline struct device *
-class_find_device_by_acpi_dev(struct class *class, const void *adev)
+static inline struct device *class_find_device_by_acpi_dev(const struct class *class,
+ const void *adev)
{
return NULL;
}
@@ -190,10 +168,10 @@ class_find_device_by_acpi_dev(struct class *class, const void *adev)
struct class_attribute {
struct attribute attr;
- ssize_t (*show)(struct class *class, struct class_attribute *attr,
+ ssize_t (*show)(const struct class *class, const struct class_attribute *attr,
char *buf);
- ssize_t (*store)(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count);
+ ssize_t (*store)(const struct class *class, const struct class_attribute *attr,
+ const char *buf, size_t count);
};
#define CLASS_ATTR_RW(_name) \
@@ -203,20 +181,18 @@ struct class_attribute {
#define CLASS_ATTR_WO(_name) \
struct class_attribute class_attr_##_name = __ATTR_WO(_name)
-extern int __must_check class_create_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
-extern void class_remove_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
+int __must_check class_create_file_ns(const struct class *class, const struct class_attribute *attr,
+ const void *ns);
+void class_remove_file_ns(const struct class *class, const struct class_attribute *attr,
+ const void *ns);
-static inline int __must_check class_create_file(struct class *class,
- const struct class_attribute *attr)
+static inline int __must_check class_create_file(const struct class *class,
+ const struct class_attribute *attr)
{
return class_create_file_ns(class, attr, NULL);
}
-static inline void class_remove_file(struct class *class,
+static inline void class_remove_file(const struct class *class,
const struct class_attribute *attr)
{
return class_remove_file_ns(class, attr, NULL);
@@ -235,32 +211,21 @@ struct class_attribute_string {
struct class_attribute_string class_attr_##_name = \
_CLASS_ATTR_STRING(_name, _mode, _str)
-extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
- char *buf);
+ssize_t show_class_attr_string(const struct class *class, const struct class_attribute *attr,
+ char *buf);
struct class_interface {
struct list_head node;
- struct class *class;
+ const struct class *class;
- int (*add_dev) (struct device *, struct class_interface *);
- void (*remove_dev) (struct device *, struct class_interface *);
+ int (*add_dev) (struct device *dev);
+ void (*remove_dev) (struct device *dev);
};
-extern int __must_check class_interface_register(struct class_interface *);
-extern void class_interface_unregister(struct class_interface *);
-
-extern struct class * __must_check __class_create(struct module *owner,
- const char *name,
- struct lock_class_key *key);
-extern void class_destroy(struct class *cls);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_create(owner, name) \
-({ \
- static struct lock_class_key __key; \
- __class_create(owner, name, &__key); \
-})
+int __must_check class_interface_register(struct class_interface *);
+void class_interface_unregister(struct class_interface *);
+struct class * __must_check class_create(const char *name);
+void class_destroy(const struct class *cls);
#endif /* _DEVICE_CLASS_H_ */
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index ee7ba5b5417e..7738f458995f 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -18,6 +18,7 @@
#include <linux/klist.h>
#include <linux/pm.h>
#include <linux/device/bus.h>
+#include <linux/module.h>
/**
* enum probe_type - device driver probe type to try
@@ -75,7 +76,7 @@ enum probe_type {
* @resume: Called to bring a device from sleep mode.
* @groups: Default attributes that get created by the driver core
* automatically.
- * @dev_groups: Additional attributes attached to device instance once the
+ * @dev_groups: Additional attributes attached to device instance once
* it is bound to the driver.
* @pm: Power management operations of the device which matched
* this driver.
@@ -94,7 +95,7 @@ enum probe_type {
*/
struct device_driver {
const char *name;
- struct bus_type *bus;
+ const struct bus_type *bus;
struct module *owner;
const char *mod_name; /* used for built-in modules */
@@ -121,13 +122,13 @@ struct device_driver {
};
-extern int __must_check driver_register(struct device_driver *drv);
-extern void driver_unregister(struct device_driver *drv);
+int __must_check driver_register(struct device_driver *drv);
+void driver_unregister(struct device_driver *drv);
-extern struct device_driver *driver_find(const char *name,
- struct bus_type *bus);
-extern int driver_probe_done(void);
-extern void wait_for_device_probe(void);
+struct device_driver *driver_find(const char *name, const struct bus_type *bus);
+bool __init driver_probe_done(void);
+void wait_for_device_probe(void);
+void __init wait_for_init_devices_probe(void);
/* sysfs interface for exporting driver attributes */
@@ -145,16 +146,15 @@ struct driver_attribute {
#define DRIVER_ATTR_WO(_name) \
struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
-extern int __must_check driver_create_file(struct device_driver *driver,
- const struct driver_attribute *attr);
-extern void driver_remove_file(struct device_driver *driver,
- const struct driver_attribute *attr);
+int __must_check driver_create_file(struct device_driver *driver,
+ const struct driver_attribute *attr);
+void driver_remove_file(struct device_driver *driver,
+ const struct driver_attribute *attr);
-extern int __must_check driver_for_each_device(struct device_driver *drv,
- struct device *start,
- void *data,
- int (*fn)(struct device *dev,
- void *));
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len);
+int __must_check driver_for_each_device(struct device_driver *drv, struct device *start,
+ void *data, int (*fn)(struct device *dev, void *));
struct device *driver_find_device(struct device_driver *drv,
struct device *start, const void *data,
int (*match)(struct device *dev, const void *data));
@@ -236,7 +236,6 @@ driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
}
#endif
-extern int driver_deferred_probe_timeout;
void driver_deferred_probe_add(struct device *dev);
int driver_deferred_probe_check_state(struct device *dev);
void driver_init(void);
diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h
new file mode 100644
index 000000000000..74891802200d
--- /dev/null
+++ b/include/linux/devm-helpers.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __LINUX_DEVM_HELPERS_H
+#define __LINUX_DEVM_HELPERS_H
+
+/*
+ * Functions which do automatically cancel operations or release resources upon
+ * driver detach.
+ *
+ * These should be helpful to avoid mixing the manual and devm-based resource
+ * management which can be source of annoying, rarely occurring,
+ * hard-to-reproduce bugs.
+ *
+ * Please take into account that devm based cancellation may be performed some
+ * time after the remove() is ran.
+ *
+ * Thus mixing devm and manual resource management can easily cause problems
+ * when unwinding operations with dependencies. IRQ scheduling a work in a queue
+ * is typical example where IRQs are often devm-managed and WQs are manually
+ * cleaned at remove(). If IRQs are not manually freed at remove() (and this is
+ * often the case when we use devm for IRQs) we have a period of time after
+ * remove() - and before devm managed IRQs are freed - where new IRQ may fire
+ * and schedule a work item which won't be cancelled because remove() was
+ * already ran.
+ */
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+static inline void devm_delayed_work_drop(void *res)
+{
+ cancel_delayed_work_sync(res);
+}
+
+/**
+ * devm_delayed_work_autocancel - Resource-managed delayed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work item to be queued
+ * @worker: Worker function
+ *
+ * Initialize delayed work which is automatically cancelled when driver is
+ * detached. A few drivers need delayed work which must be cancelled before
+ * driver is detached to avoid accessing removed resources.
+ * devm_delayed_work_autocancel() can be used to omit the explicit
+ * cancelleation when driver is detached.
+ */
+static inline int devm_delayed_work_autocancel(struct device *dev,
+ struct delayed_work *w,
+ work_func_t worker)
+{
+ INIT_DELAYED_WORK(w, worker);
+ return devm_add_action(dev, devm_delayed_work_drop, w);
+}
+
+static inline void devm_work_drop(void *res)
+{
+ cancel_work_sync(res);
+}
+
+/**
+ * devm_work_autocancel - Resource-managed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work to be added (and automatically cancelled)
+ * @worker: Worker function
+ *
+ * Initialize work which is automatically cancelled when driver is detached.
+ * A few drivers need to queue work which must be cancelled before driver
+ * is detached to avoid accessing removed resources.
+ * devm_work_autocancel() can be used to omit the explicit
+ * cancelleation when driver is detached.
+ */
+static inline int devm_work_autocancel(struct device *dev,
+ struct work_struct *w,
+ work_func_t worker)
+{
+ INIT_WORK(w, worker);
+ return devm_add_action(dev, devm_work_drop, w);
+}
+
+#endif
diff --git a/include/linux/dfl.h b/include/linux/dfl.h
new file mode 100644
index 000000000000..0a7a00a0ee7f
--- /dev/null
+++ b/include/linux/dfl.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for DFL driver and device API
+ *
+ * Copyright (C) 2020 Intel Corporation, Inc.
+ */
+
+#ifndef __LINUX_DFL_H
+#define __LINUX_DFL_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * enum dfl_id_type - define the DFL FIU types
+ */
+enum dfl_id_type {
+ FME_ID = 0,
+ PORT_ID = 1,
+ DFL_ID_MAX,
+};
+
+/**
+ * struct dfl_device - represent an dfl device on dfl bus
+ *
+ * @dev: generic device interface.
+ * @id: id of the dfl device.
+ * @type: type of DFL FIU of the device. See enum dfl_id_type.
+ * @feature_id: feature identifier local to its DFL FIU type.
+ * @revision: revision of this dfl device feature.
+ * @mmio_res: mmio resource of this dfl device.
+ * @irqs: list of Linux IRQ numbers of this dfl device.
+ * @num_irqs: number of IRQs supported by this dfl device.
+ * @cdev: pointer to DFL FPGA container device this dfl device belongs to.
+ * @id_entry: matched id entry in dfl driver's id table.
+ * @dfh_version: version of DFH for the device
+ * @param_size: size of the block parameters in bytes
+ * @params: pointer to block of parameters copied memory
+ */
+struct dfl_device {
+ struct device dev;
+ int id;
+ u16 type;
+ u16 feature_id;
+ u8 revision;
+ struct resource mmio_res;
+ int *irqs;
+ unsigned int num_irqs;
+ struct dfl_fpga_cdev *cdev;
+ const struct dfl_device_id *id_entry;
+ u8 dfh_version;
+ unsigned int param_size;
+ void *params;
+};
+
+/**
+ * struct dfl_driver - represent an dfl device driver
+ *
+ * @drv: driver model structure.
+ * @id_table: pointer to table of device IDs the driver is interested in.
+ * { } member terminated.
+ * @probe: mandatory callback for device binding.
+ * @remove: callback for device unbinding.
+ */
+struct dfl_driver {
+ struct device_driver drv;
+ const struct dfl_device_id *id_table;
+
+ int (*probe)(struct dfl_device *dfl_dev);
+ void (*remove)(struct dfl_device *dfl_dev);
+};
+
+#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
+#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE.
+ */
+#define dfl_driver_register(drv) \
+ __dfl_driver_register(drv, THIS_MODULE)
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
+void dfl_driver_unregister(struct dfl_driver *dfl_drv);
+
+/*
+ * module_dfl_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dfl_driver(__dfl_driver) \
+ module_driver(__dfl_driver, dfl_driver_register, \
+ dfl_driver_unregister)
+
+void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *pcount);
+#endif /* __LINUX_DFL_H */
diff --git a/include/linux/dim.h b/include/linux/dim.h
index b698266d0035..f343bc9aa2ec 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -21,7 +21,7 @@
* We consider 10% difference as significant.
*/
#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100UL * abs((val) - (ref))) / (ref)) > 10)
+ ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10))
/*
* Calculate the gap between two values.
@@ -236,8 +236,9 @@ void dim_park_tired(struct dim *dim);
*
* Calculate the delta between two samples (in data rates).
* Takes into consideration counter wrap-around.
+ * Returned boolean indicates whether curr_stats are reliable.
*/
-void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
struct dim_stats *curr_stats);
/**
diff --git a/include/linux/dio.h b/include/linux/dio.h
index 5abd07361eb5..2b5923909f96 100644
--- a/include/linux/dio.h
+++ b/include/linux/dio.h
@@ -68,7 +68,7 @@ struct dio_bus {
};
extern struct dio_bus dio_bus; /* Single DIO bus */
-extern struct bus_type dio_bus_type;
+extern const struct bus_type dio_bus_type;
/*
* DIO device IDs
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index ff951e9f6f20..c58c4f790c04 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -53,12 +53,6 @@ struct dlm_lockspace_ops {
* The dlm should not use a resource directory, but statically assign
* resource mastery to nodes based on the name hash that is otherwise
* used to select the directory node. Must be the same on all nodes.
- * DLM_LSFL_TIMEWARN
- * The dlm should emit netlink messages if locks have been waiting
- * for a configurable amount of time. (Unused.)
- * DLM_LSFL_FS
- * The lockspace user is in the kernel (i.e. filesystem). Enables
- * direct bast/cast callbacks.
* DLM_LSFL_NEWEXCL
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
*
@@ -134,7 +128,7 @@ int dlm_lock(dlm_lockspace_t *lockspace,
int mode,
struct dlm_lksb *lksb,
uint32_t flags,
- void *name,
+ const void *name,
unsigned int namelen,
uint32_t parent_lkid,
void (*lockast) (void *astarg),
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
index e6d76e8715a6..15fc856d198c 100644
--- a/include/linux/dlm_plock.h
+++ b/include/linux/dlm_plock.h
@@ -11,6 +11,8 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
int cmd, struct file_lock *fl);
int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
+int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl);
int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
#endif
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 29d255fdd5d6..d1503b815a78 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2009-2011 Red Hat, Inc.
*
@@ -18,19 +19,27 @@ struct dm_bufio_client;
struct dm_buffer;
/*
+ * Flags for dm_bufio_client_create
+ */
+#define DM_BUFIO_CLIENT_NO_SLEEP 0x1
+
+/*
* Create a buffered IO cache on a given device
*/
struct dm_bufio_client *
-dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
- unsigned reserved_buffers, unsigned aux_size,
+dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+ unsigned int reserved_buffers, unsigned int aux_size,
void (*alloc_callback)(struct dm_buffer *),
- void (*write_callback)(struct dm_buffer *));
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags);
/*
* Release a buffered IO cache.
*/
void dm_bufio_client_destroy(struct dm_bufio_client *c);
+void dm_bufio_client_reset(struct dm_bufio_client *c);
+
/*
* Set the sector range.
* When this function is called, there must be no I/O in progress on the bufio
@@ -55,6 +64,9 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
struct dm_buffer **bp);
+void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp, unsigned short ioprio);
+
/*
* Like dm_bufio_read, but return buffer from cache, don't read
* it. If the buffer is not in the cache, return NULL.
@@ -75,7 +87,11 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
* I/O to finish.
*/
void dm_bufio_prefetch(struct dm_bufio_client *c,
- sector_t block, unsigned n_blocks);
+ sector_t block, unsigned int n_blocks);
+
+void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c,
+ sector_t block, unsigned int n_blocks,
+ unsigned short ioprio);
/*
* Release a reference obtained with dm_bufio_{read,get,new}. The data
@@ -100,7 +116,7 @@ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
* write the specified part of the buffer or it may write a larger superset.
*/
void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
- unsigned start, unsigned end);
+ unsigned int start, unsigned int end);
/*
* Initiate writing of dirty buffers, without waiting for completion.
@@ -124,12 +140,6 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c);
int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
/*
- * Like dm_bufio_release but also move the buffer to the new
- * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
- */
-void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
-
-/*
* Free the given buffer.
* This is just a hint, if the buffer is in use or dirty, this function
* does nothing.
@@ -146,10 +156,11 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
/*
* Set the minimum number of buffers before cleanup happens.
*/
-void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n);
-unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
+unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
void *dm_bufio_get_block_data(struct dm_buffer *b);
void *dm_bufio_get_aux_data(struct dm_buffer *b);
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 7084503c3405..0b10faedb26a 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -33,7 +34,7 @@ struct dm_dirty_log_type {
struct list_head list;
int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void (*dtr)(struct dm_dirty_log *log);
/*
@@ -96,7 +97,7 @@ struct dm_dirty_log_type {
* Do not confuse this function with 'in_sync()', one
* tells you if an area is synchronised, the other
* assigns recovery work.
- */
+ */
int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
/*
@@ -116,7 +117,7 @@ struct dm_dirty_log_type {
* Support function for mirror status requests.
*/
int (*status)(struct dm_dirty_log *log, status_type_t status_type,
- char *result, unsigned maxlen);
+ char *result, unsigned int maxlen);
/*
* is_remote_recovering is necessary for cluster mirroring. It provides
@@ -139,7 +140,7 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
struct dm_target *ti,
int (*flush_callback_fn)(struct dm_target *ti),
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index a52c6580cc9a..7b2968612b7e 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -13,6 +14,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
struct dm_io_region {
struct block_device *bdev;
@@ -25,7 +27,7 @@ struct page_list {
struct page *page;
};
-typedef void (*io_notify_fn)(unsigned long error, void *context);
+typedef void (*io_notify_fn)(unsigned int long error, void *context);
enum dm_io_mem_type {
DM_IO_PAGE_LIST,/* Page list */
@@ -37,7 +39,7 @@ enum dm_io_mem_type {
struct dm_io_memory {
enum dm_io_mem_type type;
- unsigned offset;
+ unsigned int offset;
union {
struct page_list *pl;
@@ -57,8 +59,7 @@ struct dm_io_notify {
*/
struct dm_io_client;
struct dm_io_request {
- int bi_op; /* REQ_OP */
- int bi_op_flags; /* req_flag_bits */
+ blk_opf_t bi_opf; /* Request type and flags */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */
@@ -78,8 +79,9 @@ void dm_io_client_destroy(struct dm_io_client *client);
* Each bit in the optional 'sync_error_bits' bitset indicates whether an
* error occurred doing io to the corresponding region.
*/
-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
- struct dm_io_region *region, unsigned long *sync_error_bits);
+int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+ struct dm_io_region *region, unsigned int long *sync_error_bits,
+ unsigned short ioprio);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_IO_H */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index e42de7750c88..51fb1af0b63e 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 - 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -23,11 +24,11 @@
#define DM_KCOPYD_WRITE_SEQ 2
struct dm_kcopyd_throttle {
- unsigned throttle;
- unsigned num_io_jobs;
- unsigned io_period;
- unsigned total_period;
- unsigned last_jiffies;
+ unsigned int throttle;
+ unsigned int num_io_jobs;
+ unsigned int io_period;
+ unsigned int total_period;
+ unsigned int last_jiffies;
};
/*
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(name, description)
struct dm_kcopyd_client;
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle);
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
+void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc);
/*
* Submit a copy job to kcopyd. This is built on top of the
@@ -59,12 +61,12 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
* read_err is a boolean,
* write_err is a bitset, with 1 bit for each destination region
*/
-typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
+typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned int long write_err,
void *context);
void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
/*
* Prepare a callback and submit it via the kcopyd thread.
@@ -79,11 +81,11 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
*/
void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
dm_kcopyd_notify_fn fn, void *context);
-void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
+void dm_kcopyd_do_callback(void *job, int read_err, unsigned int long write_err);
void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_KCOPYD_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index 9e2a7a401df5..3079ed93dd2d 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -12,9 +13,11 @@
#include <linux/dm-dirty-log.h>
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Region hash
- *----------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
struct dm_region_hash;
struct dm_region;
@@ -37,7 +40,7 @@ struct dm_region_hash *dm_region_hash_create(
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
- sector_t target_begin, unsigned max_recovery,
+ sector_t target_begin, unsigned int max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions);
void dm_region_hash_destroy(struct dm_region_hash *rh);
diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h
new file mode 100644
index 000000000000..3ac6dbaeaa37
--- /dev/null
+++ b/include/linux/dm-verity-loadpin.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_DM_VERITY_LOADPIN_H
+#define __LINUX_DM_VERITY_LOADPIN_H
+
+#include <linux/list.h>
+
+struct block_device;
+
+extern struct list_head dm_verity_loadpin_trusted_root_digests;
+
+struct dm_verity_loadpin_trusted_root_digest {
+ struct list_head node;
+ unsigned int len;
+ u8 data[] __counted_by(len);
+};
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY)
+bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev);
+#else
+static inline bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
+{
+ return false;
+}
+#endif
+
+#endif /* __LINUX_DM_VERITY_LOADPIN_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index a2ca294eaebe..8ff4add71f88 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -13,6 +13,7 @@
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
+#include <linux/iosys-map.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
@@ -53,7 +54,7 @@ struct dma_buf_ops {
* device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
- * allocation fullfills the DMA constraints of the new device. If this
+ * allocation fulfills the DMA constraints of the new device. If this
* is not the case, and the allocation cannot be moved, it should also
* fail the attach operation.
*
@@ -84,14 +85,22 @@ struct dma_buf_ops {
/**
* @pin:
*
- * This is called by dma_buf_pin and lets the exporter know that the
- * DMA-buf can't be moved any more.
+ * This is called by dma_buf_pin() and lets the exporter know that the
+ * DMA-buf can't be moved any more. Ideally, the exporter should
+ * pin the buffer so that it is generally accessible by all
+ * devices.
*
- * This is called with the dmabuf->resv object locked and is mutual
+ * This is called with the &dmabuf.resv object locked and is mutual
* exclusive with @cache_sgt_mapping.
*
- * This callback is optional and should only be used in limited use
- * cases like scanout and not for temporary pin operations.
+ * This is called automatically for non-dynamic importers from
+ * dma_buf_attach().
+ *
+ * Note that similar to non-dynamic exporters in their @map_dma_buf
+ * callback the driver must guarantee that the memory is available for
+ * use and cleared of any old data by the time this function returns.
+ * Drivers which pipeline their buffer moves internally must wait for
+ * all moves and clears to complete.
*
* Returns:
*
@@ -102,7 +111,7 @@ struct dma_buf_ops {
/**
* @unpin:
*
- * This is called by dma_buf_unpin and lets the exporter know that the
+ * This is called by dma_buf_unpin() and lets the exporter know that the
* DMA-buf can be moved again.
*
* This is called with the dmabuf->resv object locked and is mutual
@@ -141,15 +150,31 @@ struct dma_buf_ops {
* This is always called with the dmabuf->resv object locked when
* the dynamic_mapping flag is true.
*
+ * Note that for non-dynamic exporters the driver must guarantee that
+ * that the memory is available for use and cleared of any old data by
+ * the time this function returns. Drivers which pipeline their buffer
+ * moves internally must wait for all moves and clears to complete.
+ * Dynamic exporters do not need to follow this rule: For non-dynamic
+ * importers the buffer is already pinned through @pin, which has the
+ * same requirements. Dynamic importers otoh are required to obey the
+ * dma_resv fences.
+ *
* Returns:
*
- * A &sg_table scatter list of or the backing storage of the DMA buffer,
+ * A &sg_table scatter list of the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
- * with the provided &dma_buf_attachment.
+ * with the provided &dma_buf_attachment. The addresses and lengths in
+ * the scatter list are PAGE_SIZE aligned.
*
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being
* blocked.
+ *
+ * Note that exporters should not try to cache the scatter list, or
+ * return the same one for multiple calls. Caching is done either by the
+ * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
+ * of the scatter list is transferred to the caller, and returned by
+ * @unmap_dma_buf.
*/
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
enum dma_data_direction);
@@ -158,7 +183,7 @@ struct dma_buf_ops {
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
- * For static dma_buf handling this might also unpins the backing
+ * For static dma_buf handling this might also unpin the backing
* storage if this is the last mapping of the DMA buffer.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
@@ -181,24 +206,19 @@ struct dma_buf_ops {
* @begin_cpu_access:
*
* This is called from dma_buf_begin_cpu_access() and allows the
- * exporter to ensure that the memory is actually available for cpu
- * access - the exporter might need to allocate or swap-in and pin the
- * backing storage. The exporter also needs to ensure that cpu access is
- * coherent for the access direction. The direction can be used by the
- * exporter to optimize the cache flushing, i.e. access with a different
+ * exporter to ensure that the memory is actually coherent for cpu
+ * access. The exporter also needs to ensure that cpu access is coherent
+ * for the access direction. The direction can be used by the exporter
+ * to optimize the cache flushing, i.e. access with a different
* direction (read instead of write) might return stale or even bogus
* data (e.g. when the exporter needs to copy the data to temporary
* storage).
*
- * This callback is optional.
+ * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
+ * command for userspace mappings established through @mmap, and also
+ * for kernel mappings established with @vmap.
*
- * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
- * from userspace (where storage shouldn't be pinned to avoid handing
- * de-factor mlock rights to userspace) and for the kernel-internal
- * users of the various kmap interfaces, where the backing storage must
- * be pinned to guarantee that the atomic kmap calls can succeed. Since
- * there's no in-kernel users of the kmap interfaces yet this isn't a
- * real problem.
+ * This callback is optional.
*
* Returns:
*
@@ -214,9 +234,7 @@ struct dma_buf_ops {
*
* This is called from dma_buf_end_cpu_access() when the importer is
* done accessing the CPU. The exporter can use this to flush caches and
- * unpin any resources pinned in @begin_cpu_access.
- * The result of any dma_buf kmap calls after end_cpu_access is
- * undefined.
+ * undo anything else done in @begin_cpu_access.
*
* This callback is optional.
*
@@ -234,7 +252,7 @@ struct dma_buf_ops {
* This callback is used by the dma_buf_mmap() function
*
* Note that the mapping needs to be incoherent, userspace is expected
- * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
+ * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
*
* Because dma-buf buffers have invariant size over their lifetime, the
* dma-buf core checks whether a vma is too large and rejects such
@@ -265,32 +283,12 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- void *(*vmap)(struct dma_buf *);
- void (*vunmap)(struct dma_buf *, void *vaddr);
+ int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
+ void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
};
/**
* struct dma_buf - shared buffer object
- * @size: size of the buffer
- * @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached,
- * protected by dma_resv lock.
- * @ops: dma_buf_ops associated with this buffer object.
- * @lock: used internally to serialize list manipulation, attach/detach and
- * vmap/unmap
- * @vmapping_counter: used internally to refcnt the vmaps
- * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
- * @exp_name: name of the exporter; useful for debugging.
- * @name: userspace-provided name; useful for accounting and debugging,
- * protected by @resv.
- * @owner: pointer to exporter module; used for refcounting when exporter is a
- * kernel module.
- * @list_node: node for dma_buf accounting and debugging.
- * @priv: exporter specific private data for this buffer object.
- * @resv: reservation object linked to this dma-buf
- * @poll: for userspace poll support
- * @cb_excl: for userspace poll support
- * @cb_shared: for userspace poll support
*
* This represents a shared buffer, created by calling dma_buf_export(). The
* userspace representation is a normal file descriptor, which can be created by
@@ -302,30 +300,155 @@ struct dma_buf_ops {
* Device DMA access is handled by the separate &struct dma_buf_attachment.
*/
struct dma_buf {
+ /**
+ * @size:
+ *
+ * Size of the buffer; invariant over the lifetime of the buffer.
+ */
size_t size;
+
+ /**
+ * @file:
+ *
+ * File pointer used for sharing buffers across, and for refcounting.
+ * See dma_buf_get() and dma_buf_put().
+ */
struct file *file;
+
+ /**
+ * @attachments:
+ *
+ * List of dma_buf_attachment that denotes all devices attached,
+ * protected by &dma_resv lock @resv.
+ */
struct list_head attachments;
+
+ /** @ops: dma_buf_ops associated with this buffer object. */
const struct dma_buf_ops *ops;
- struct mutex lock;
+
+ /**
+ * @vmapping_counter:
+ *
+ * Used internally to refcnt the vmaps returned by dma_buf_vmap().
+ * Protected by @lock.
+ */
unsigned vmapping_counter;
- void *vmap_ptr;
+
+ /**
+ * @vmap_ptr:
+ * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
+ */
+ struct iosys_map vmap_ptr;
+
+ /**
+ * @exp_name:
+ *
+ * Name of the exporter; useful for debugging. Must not be NULL
+ */
const char *exp_name;
+
+ /**
+ * @name:
+ *
+ * Userspace-provided name. Default value is NULL. If not NULL,
+ * length cannot be longer than DMA_BUF_NAME_LEN, including NIL
+ * char. Useful for accounting and debugging. Read/Write accesses
+ * are protected by @name_lock
+ *
+ * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B
+ */
const char *name;
- spinlock_t name_lock; /* spinlock to protect name access */
+
+ /** @name_lock: Spinlock to protect name access for read access. */
+ spinlock_t name_lock;
+
+ /**
+ * @owner:
+ *
+ * Pointer to exporter module; used for refcounting when exporter is a
+ * kernel module.
+ */
struct module *owner;
+
+ /** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
+
+ /** @priv: exporter specific private data for this buffer object. */
void *priv;
+
+ /**
+ * @resv:
+ *
+ * Reservation object linked to this dma-buf.
+ *
+ * IMPLICIT SYNCHRONIZATION RULES:
+ *
+ * Drivers which support implicit synchronization of buffer access as
+ * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
+ * below rules.
+ *
+ * - Drivers must add a read fence through dma_resv_add_fence() with the
+ * DMA_RESV_USAGE_READ flag for anything the userspace API considers a
+ * read access. This highly depends upon the API and window system.
+ *
+ * - Similarly drivers must add a write fence through
+ * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
+ * anything the userspace API considers write access.
+ *
+ * - Drivers may just always add a write fence, since that only
+ * causes unnecessary synchronization, but no correctness issues.
+ *
+ * - Some drivers only expose a synchronous userspace API with no
+ * pipelining across drivers. These do not set any fences for their
+ * access. An example here is v4l.
+ *
+ * - Driver should use dma_resv_usage_rw() when retrieving fences as
+ * dependency for implicit synchronization.
+ *
+ * DYNAMIC IMPORTER RULES:
+ *
+ * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
+ * additional constraints on how they set up fences:
+ *
+ * - Dynamic importers must obey the write fences and wait for them to
+ * signal before allowing access to the buffer's underlying storage
+ * through the device.
+ *
+ * - Dynamic importers should set fences for any access that they can't
+ * disable immediately from their &dma_buf_attach_ops.move_notify
+ * callback.
+ *
+ * IMPORTANT:
+ *
+ * All drivers and memory management related functions must obey the
+ * struct dma_resv rules, specifically the rules for updating and
+ * obeying fences. See enum dma_resv_usage for further descriptions.
+ */
struct dma_resv *resv;
- /* poll support */
+ /** @poll: for userspace poll support */
wait_queue_head_t poll;
+ /** @cb_in: for userspace poll support */
+ /** @cb_out: for userspace poll support */
struct dma_buf_poll_cb_t {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
__poll_t active;
- } cb_excl, cb_shared;
+ } cb_in, cb_out;
+#ifdef CONFIG_DMABUF_SYSFS_STATS
+ /**
+ * @sysfs_entry:
+ *
+ * For exposing information about this buffer in sysfs. See also
+ * `DMA-BUF statistics`_ for the uapi this enables.
+ */
+ struct dma_buf_sysfs_entry {
+ struct kobject kobj;
+ struct dma_buf *dmabuf;
+ } *sysfs_entry;
+#endif
};
/**
@@ -402,7 +525,7 @@ struct dma_buf_attachment {
* @exp_name: name of the exporter - useful for debugging.
* @owner: pointer to exporter module - used for refcounting kernel module
* @ops: Attach allocator-defined dma buf ops to the new buffer
- * @size: Size of the buffer
+ * @size: Size of the buffer - invariant over the lifetime of the buffer
* @flags: mode flags for the file
* @resv: reservation-object, NULL to allocate default one
* @priv: Attach private data of allocator to this buffer
@@ -460,7 +583,7 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
/**
* dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
- * mappinsg
+ * mappings
* @attach: the DMA-buf attachment to check
*
* Returns true if a DMA-buf importer wants to call the map/unmap functions with
@@ -498,9 +621,17 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
+struct sg_table *
+dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction);
+void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction);
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
-void *dma_buf_vmap(struct dma_buf *);
-void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
+int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
deleted file mode 100644
index 03f8e98e3bcc..000000000000
--- a/include/linux/dma-contiguous.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef __LINUX_CMA_H
-#define __LINUX_CMA_H
-
-/*
- * Contiguous Memory Allocator for DMA mapping framework
- * Copyright (c) 2010-2011 by Samsung Electronics.
- * Written by:
- * Marek Szyprowski <m.szyprowski@samsung.com>
- * Michal Nazarewicz <mina86@mina86.com>
- */
-
-/*
- * Contiguous Memory Allocator
- *
- * The Contiguous Memory Allocator (CMA) makes it possible to
- * allocate big contiguous chunks of memory after the system has
- * booted.
- *
- * Why is it needed?
- *
- * Various devices on embedded systems have no scatter-getter and/or
- * IO map support and require contiguous blocks of memory to
- * operate. They include devices such as cameras, hardware video
- * coders, etc.
- *
- * Such devices often require big memory buffers (a full HD frame
- * is, for instance, more then 2 mega pixels large, i.e. more than 6
- * MB of memory), which makes mechanisms such as kmalloc() or
- * alloc_page() ineffective.
- *
- * At the same time, a solution where a big memory region is
- * reserved for a device is suboptimal since often more memory is
- * reserved then strictly required and, moreover, the memory is
- * inaccessible to page system even if device drivers don't use it.
- *
- * CMA tries to solve this issue by operating on memory regions
- * where only movable pages can be allocated from. This way, kernel
- * can use the memory for pagecache and when device driver requests
- * it, allocated pages can be migrated.
- *
- * Driver usage
- *
- * CMA should not be used by the device drivers directly. It is
- * only a helper framework for dma-mapping subsystem.
- *
- * For more information, see kernel-docs in kernel/dma/contiguous.c
- */
-
-#ifdef __KERNEL__
-
-#include <linux/device.h>
-#include <linux/mm.h>
-
-struct cma;
-struct page;
-
-#ifdef CONFIG_DMA_CMA
-
-extern struct cma *dma_contiguous_default_area;
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- if (dev && dev->cma_area)
- return dev->cma_area;
- return dma_contiguous_default_area;
-}
-
-static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
-{
- if (dev)
- dev->cma_area = cma;
-}
-
-static inline void dma_contiguous_set_default(struct cma *cma)
-{
- dma_contiguous_default_area = cma;
-}
-
-void dma_contiguous_reserve(phys_addr_t addr_limit);
-
-int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
- phys_addr_t limit, struct cma **res_cma,
- bool fixed);
-
-/**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- * for particular device
- * @dev: Pointer to device structure.
- * @size: Size of the reserved memory.
- * @base: Start address of the reserved memory (optional, 0 for any).
- * @limit: End address of the reserved memory (optional, 0 for any).
- *
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
- */
-
-static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
-{
- struct cma *cma;
- int ret;
- ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
- if (ret == 0)
- dev_set_cma_area(dev, cma);
-
- return ret;
-}
-
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, bool no_warn);
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
- int count);
-struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
-void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
-
-#else
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- return NULL;
-}
-
-static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
-
-static inline void dma_contiguous_set_default(struct cma *cma) { }
-
-static inline void dma_contiguous_reserve(phys_addr_t limit) { }
-
-static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
- phys_addr_t limit, struct cma **res_cma,
- bool fixed)
-{
- return -ENOSYS;
-}
-
-static inline
-int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
-{
- return -ENOSYS;
-}
-
-static inline
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, bool no_warn)
-{
- return NULL;
-}
-
-static inline
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
- int count)
-{
- return false;
-}
-
-/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
-static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
- gfp_t gfp)
-{
- return NULL;
-}
-
-static inline void dma_free_contiguous(struct device *dev, struct page *page,
- size_t size)
-{
- __free_pages(page, get_order(size));
-}
-
-#endif
-
-#endif
-
-#endif
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
deleted file mode 100644
index 7b3b04ba60f3..000000000000
--- a/include/linux/dma-debug.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008 Advanced Micro Devices, Inc.
- *
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- */
-
-#ifndef __DMA_DEBUG_H
-#define __DMA_DEBUG_H
-
-#include <linux/types.h>
-
-struct device;
-struct scatterlist;
-struct bus_type;
-
-#ifdef CONFIG_DMA_API_DEBUG
-
-extern void dma_debug_add_bus(struct bus_type *bus);
-
-extern void debug_dma_map_single(struct device *dev, const void *addr,
- unsigned long len);
-
-extern void debug_dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- int direction, dma_addr_t dma_addr);
-
-extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-
-extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction);
-
-extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction);
-
-extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir);
-
-extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt);
-
-extern void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr);
-
-extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
- size_t size, int direction,
- dma_addr_t dma_addr);
-
-extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction);
-
-extern void debug_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- int direction);
-
-extern void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction);
-
-extern void debug_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction);
-
-extern void debug_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction);
-
-extern void debug_dma_dump_mappings(struct device *dev);
-
-#else /* CONFIG_DMA_API_DEBUG */
-
-static inline void dma_debug_add_bus(struct bus_type *bus)
-{
-}
-
-static inline void debug_dma_map_single(struct device *dev, const void *addr,
- unsigned long len)
-{
-}
-
-static inline void debug_dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- int direction, dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction)
-{
-}
-
-static inline void debug_dma_unmap_sg(struct device *dev,
- struct scatterlist *sglist,
- int nelems, int dir)
-{
-}
-
-static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt)
-{
-}
-
-static inline void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr)
-{
-}
-
-static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
- size_t size, int direction,
- dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_unmap_resource(struct device *dev,
- dma_addr_t dma_addr, size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
-}
-
-static inline void debug_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
-}
-
-static inline void debug_dma_dump_mappings(struct device *dev)
-{
-}
-
-#endif /* CONFIG_DMA_API_DEBUG */
-
-#endif /* __DMA_DEBUG_H */
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 6e87225600ae..3eb3589ff43e 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -7,64 +7,107 @@
#define _LINUX_DMA_DIRECT_H 1
#include <linux/dma-mapping.h>
-#include <linux/dma-noncoherent.h>
+#include <linux/dma-map-ops.h>
#include <linux/memblock.h> /* for min_low_pfn */
#include <linux/mem_encrypt.h>
#include <linux/swiotlb.h>
extern unsigned int zone_dma_bits;
-#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
-#include <asm/dma-direct.h>
-#else
-static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+/*
+ * Record the mapping of CPU physical to DMA addresses for a given region.
+ */
+struct bus_dma_region {
+ phys_addr_t cpu_start;
+ dma_addr_t dma_start;
+ u64 size;
+};
+
+static inline dma_addr_t translate_phys_to_dma(struct device *dev,
+ phys_addr_t paddr)
{
- dma_addr_t dev_addr = (dma_addr_t)paddr;
+ const struct bus_dma_region *m;
- return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = paddr - m->cpu_start;
+
+ if (paddr >= m->cpu_start && offset < m->size)
+ return m->dma_start + offset;
+ }
+
+ /* make sure dma_capable fails when no translation is available */
+ return DMA_MAPPING_ERROR;
}
-static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+static inline phys_addr_t translate_dma_to_phys(struct device *dev,
+ dma_addr_t dma_addr)
{
- phys_addr_t paddr = (phys_addr_t)dev_addr;
+ const struct bus_dma_region *m;
+
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = dma_addr - m->dma_start;
- return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+ if (dma_addr >= m->dma_start && offset < m->size)
+ return m->cpu_start + offset;
+ }
+
+ return (phys_addr_t)-1;
}
-#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
-#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
-bool force_dma_unencrypted(struct device *dev);
+#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
+#include <asm/dma-direct.h>
+#ifndef phys_to_dma_unencrypted
+#define phys_to_dma_unencrypted phys_to_dma
+#endif
#else
-static inline bool force_dma_unencrypted(struct device *dev)
+static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
+ phys_addr_t paddr)
{
- return false;
+ if (dev->dma_range_map)
+ return translate_phys_to_dma(dev, paddr);
+ return paddr;
}
-#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
/*
* If memory encryption is supported, phys_to_dma will set the memory encryption
- * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
- * and __dma_to_phys versions should only be used on non-encrypted memory for
- * special occasions like DMA coherent buffers.
+ * bit in the DMA address, and dma_to_phys will clear it.
+ * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
+ * buffers.
*/
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- return __sme_set(__phys_to_dma(dev, paddr));
+ return __sme_set(phys_to_dma_unencrypted(dev, paddr));
}
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
- return __sme_clr(__dma_to_phys(dev, daddr));
+ phys_addr_t paddr;
+
+ if (dev->dma_range_map)
+ paddr = translate_dma_to_phys(dev, dma_addr);
+ else
+ paddr = dma_addr;
+
+ return __sme_clr(paddr);
}
+#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
+
+#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
+bool force_dma_unencrypted(struct device *dev);
+#else
+static inline bool force_dma_unencrypted(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
bool is_ram)
{
dma_addr_t end = addr + size - 1;
- if (!dev->dma_mask)
+ if (addr == DMA_MAPPING_ERROR)
return false;
-
if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
return false;
@@ -77,115 +120,13 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
-void *dma_direct_alloc_pages(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
-void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_addr, unsigned long attrs);
-int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-bool dma_direct_can_mmap(struct device *dev);
-int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
+struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_direct_free_pages(struct device *dev, size_t size,
+ struct page *page, dma_addr_t dma_addr,
+ enum dma_data_direction dir);
int dma_direct_supported(struct device *dev, u64 mask);
-bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
-int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir, unsigned long attrs);
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
-size_t dma_direct_max_mapping_size(struct device *dev);
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
- defined(CONFIG_SWIOTLB)
-void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir);
-#else
-static inline void dma_direct_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-}
-#endif
-
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
- defined(CONFIG_SWIOTLB)
-void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir, unsigned long attrs);
-void dma_direct_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir);
-#else
-static inline void dma_direct_unmap_sg(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
-static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-}
-#endif
-
-static inline void dma_direct_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
- phys_addr_t paddr = dma_to_phys(dev, addr);
-
- if (unlikely(is_swiotlb_buffer(paddr)))
- swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
-
- if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_device(paddr, size, dir);
-}
-
-static inline void dma_direct_sync_single_for_cpu(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
- phys_addr_t paddr = dma_to_phys(dev, addr);
-
- if (!dev_is_dma_coherent(dev)) {
- arch_sync_dma_for_cpu(paddr, size, dir);
- arch_sync_dma_for_cpu_all();
- }
-
- if (unlikely(is_swiotlb_buffer(paddr)))
- swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
-}
-
-static inline dma_addr_t dma_direct_map_page(struct device *dev,
- struct page *page, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- phys_addr_t phys = page_to_phys(page) + offset;
- dma_addr_t dma_addr = phys_to_dma(dev, phys);
-
- if (unlikely(swiotlb_force == SWIOTLB_FORCE))
- return swiotlb_map(dev, phys, size, dir, attrs);
-
- if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
- if (swiotlb_force != SWIOTLB_NO_FORCE)
- return swiotlb_map(dev, phys, size, dir, attrs);
-
- dev_WARN_ONCE(dev, 1,
- "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
- &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
- return DMA_MAPPING_ERROR;
- }
-
- if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(phys, size, dir);
- return dma_addr;
-}
-
-static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- phys_addr_t phys = dma_to_phys(dev, addr);
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_direct_sync_single_for_cpu(dev, addr, size, dir);
-
- if (unlikely(is_swiotlb_buffer(phys)))
- swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
-}
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h
index 9c96e30e6a0b..a2fe4571bc92 100644
--- a/include/linux/dma-direction.h
+++ b/include/linux/dma-direction.h
@@ -9,4 +9,10 @@ enum dma_data_direction {
DMA_NONE = 3,
};
-#endif
+static inline int valid_dma_direction(enum dma_data_direction dir)
+{
+ return dir == DMA_BIDIRECTIONAL || dir == DMA_TO_DEVICE ||
+ dir == DMA_FROM_DEVICE;
+}
+
+#endif /* _LINUX_DMA_DIRECTION_H */
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 303dd712220f..ec7f25def392 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -45,19 +45,6 @@ struct dma_fence_array {
struct irq_work work;
};
-extern const struct dma_fence_ops dma_fence_array_ops;
-
-/**
- * dma_fence_is_array - check if a fence is from the array subsclass
- * @fence: fence to test
- *
- * Return true if it is a dma_fence_array and false otherwise.
- */
-static inline bool dma_fence_is_array(struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_array_ops;
-}
-
/**
* to_dma_fence_array - cast a fence to a dma_fence_array
* @fence: fence to cast to a dma_fence_array
@@ -68,12 +55,27 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
static inline struct dma_fence_array *
to_dma_fence_array(struct dma_fence *fence)
{
- if (fence->ops != &dma_fence_array_ops)
+ if (!fence || !dma_fence_is_array(fence))
return NULL;
return container_of(fence, struct dma_fence_array, base);
}
+/**
+ * dma_fence_array_for_each - iterate over all fences in array
+ * @fence: current fence
+ * @index: index into the array
+ * @head: potential dma_fence_array object
+ *
+ * Test if @array is a dma_fence_array object and if yes iterate over all fences
+ * in the array. If not just iterate over the fence in @array itself.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_array_for_each(fence, index, head) \
+ for (index = 0, fence = dma_fence_array_first(head); fence; \
+ ++(index), fence = dma_fence_array_next(head, index))
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
@@ -81,4 +83,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
bool dma_fence_match_context(struct dma_fence *fence, u64 context);
+struct dma_fence *dma_fence_array_first(struct dma_fence *head);
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+ unsigned int index);
+
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index 10462a029da2..4bdf0b96da28 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -12,28 +12,43 @@
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
+#include <linux/slab.h>
/**
* struct dma_fence_chain - fence to represent an node of a fence chain
* @base: fence base class
- * @lock: spinlock for fence handling
* @prev: previous fence of the chain
* @prev_seqno: original previous seqno before garbage collection
* @fence: encapsulated fence
- * @cb: callback structure for signaling
- * @work: irq work item for signaling
+ * @lock: spinlock for fence handling
*/
struct dma_fence_chain {
struct dma_fence base;
- spinlock_t lock;
struct dma_fence __rcu *prev;
u64 prev_seqno;
struct dma_fence *fence;
- struct dma_fence_cb cb;
- struct irq_work work;
+ union {
+ /**
+ * @cb: callback for signaling
+ *
+ * This is used to add the callback for signaling the
+ * complection of the fence chain. Never used at the same time
+ * as the irq work.
+ */
+ struct dma_fence_cb cb;
+
+ /**
+ * @work: irq work item for signaling
+ *
+ * Irq work structure to allow us to add the callback without
+ * running into lock inversion. Never used at the same time as
+ * the callback.
+ */
+ struct irq_work work;
+ };
+ spinlock_t lock;
};
-extern const struct dma_fence_ops dma_fence_chain_ops;
/**
* to_dma_fence_chain - cast a fence to a dma_fence_chain
@@ -45,19 +60,60 @@ extern const struct dma_fence_ops dma_fence_chain_ops;
static inline struct dma_fence_chain *
to_dma_fence_chain(struct dma_fence *fence)
{
- if (!fence || fence->ops != &dma_fence_chain_ops)
+ if (!fence || !dma_fence_is_chain(fence))
return NULL;
return container_of(fence, struct dma_fence_chain, base);
}
/**
+ * dma_fence_chain_contained - return the contained fence
+ * @fence: the fence to test
+ *
+ * If the fence is a dma_fence_chain the function returns the fence contained
+ * inside the chain object, otherwise it returns the fence itself.
+ */
+static inline struct dma_fence *
+dma_fence_chain_contained(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+ return chain ? chain->fence : fence;
+}
+
+/**
+ * dma_fence_chain_alloc
+ *
+ * Returns a new struct dma_fence_chain object or NULL on failure.
+ */
+static inline struct dma_fence_chain *dma_fence_chain_alloc(void)
+{
+ return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+};
+
+/**
+ * dma_fence_chain_free
+ * @chain: chain node to free
+ *
+ * Frees up an allocated but not used struct dma_fence_chain object. This
+ * doesn't need an RCU grace period since the fence was never initialized nor
+ * published. After dma_fence_chain_init() has been called the fence must be
+ * released by calling dma_fence_put(), and not through this function.
+ */
+static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
+{
+ kfree(chain);
+};
+
+/**
* dma_fence_chain_for_each - iterate over all fences in chain
* @iter: current fence
* @head: starting point
*
* Iterate over all fences in the chain. We keep a reference to the current
* fence while inside the loop which must be dropped when breaking out.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
*/
#define dma_fence_chain_for_each(iter, head) \
for (iter = dma_fence_get(head); iter; \
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
new file mode 100644
index 000000000000..66b1e56fbb81
--- /dev/null
+++ b/include/linux/dma-fence-unwrap.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_UNWRAP_H
+#define __LINUX_DMA_FENCE_UNWRAP_H
+
+struct dma_fence;
+
+/**
+ * struct dma_fence_unwrap - cursor into the container structure
+ *
+ * Should be used with dma_fence_unwrap_for_each() iterator macro.
+ */
+struct dma_fence_unwrap {
+ /**
+ * @chain: potential dma_fence_chain, but can be other fence as well
+ */
+ struct dma_fence *chain;
+ /**
+ * @array: potential dma_fence_array, but can be other fence as well
+ */
+ struct dma_fence *array;
+ /**
+ * @index: last returned index if @array is really a dma_fence_array
+ */
+ unsigned int index;
+};
+
+struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
+ struct dma_fence_unwrap *cursor);
+struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor);
+
+/**
+ * dma_fence_unwrap_for_each - iterate over all fences in containers
+ * @fence: current fence
+ * @cursor: current position inside the containers
+ * @head: starting point for the iterator
+ *
+ * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
+ * potential fences in them. If @head is just a normal fence only that one is
+ * returned.
+ */
+#define dma_fence_unwrap_for_each(fence, cursor, head) \
+ for (fence = dma_fence_unwrap_first(head, cursor); fence; \
+ fence = dma_fence_unwrap_next(cursor))
+
+struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ struct dma_fence **fences,
+ struct dma_fence_unwrap *cursors);
+
+/**
+ * dma_fence_unwrap_merge - unwrap and merge fences
+ *
+ * All fences given as parameters are unwrapped and merged back together as flat
+ * dma_fence_array. Useful if multiple containers need to be merged together.
+ *
+ * Implemented as a macro to allocate the necessary arrays on the stack and
+ * account the stack frame size to the caller.
+ *
+ * Returns NULL on memory allocation failure, a dma_fence object representing
+ * all the given fences otherwise.
+ */
+#define dma_fence_unwrap_merge(...) \
+ ({ \
+ struct dma_fence *__f[] = { __VA_ARGS__ }; \
+ struct dma_fence_unwrap __c[ARRAY_SIZE(__f)]; \
+ \
+ __dma_fence_unwrap_merge(ARRAY_SIZE(__f), __f, __c); \
+ })
+
+#endif
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 09e23adb351d..e06bad467f55 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/printk.h>
#include <linux/rcupdate.h>
+#include <linux/timekeeping.h>
struct dma_fence;
struct dma_fence_ops;
@@ -214,19 +215,15 @@ struct dma_fence_ops {
* Custom wait implementation, defaults to dma_fence_default_wait() if
* not set.
*
- * The dma_fence_default_wait implementation should work for any fence, as long
- * as @enable_signaling works correctly. This hook allows drivers to
- * have an optimized version for the case where a process context is
- * already available, e.g. if @enable_signaling for the general case
- * needs to set up a worker thread.
+ * Deprecated and should not be used by new implementations. Only used
+ * by existing implementations which need special handling for their
+ * hardware reset procedure.
*
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
* timed out. Can also return other error values on custom implementations,
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
- *
- * This callback is optional.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -261,6 +258,26 @@ struct dma_fence_ops {
*/
void (*timeline_value_str)(struct dma_fence *fence,
char *str, int size);
+
+ /**
+ * @set_deadline:
+ *
+ * Callback to allow a fence waiter to inform the fence signaler of
+ * an upcoming deadline, such as vblank, by which point the waiter
+ * would prefer the fence to be signaled by. This is intended to
+ * give feedback to the fence signaler to aid in power management
+ * decisions, such as boosting GPU frequency.
+ *
+ * This is called without &dma_fence.lock held, it can be called
+ * multiple times and from any context. Locking is up to the callee
+ * if it has some state to manage. If multiple deadlines are set,
+ * the expectation is to track the soonest one. If the deadline is
+ * before the current time, it should be interpreted as an immediate
+ * deadline.
+ *
+ * This callback is optional.
+ */
+ void (*set_deadline)(struct dma_fence *fence, ktime_t deadline);
};
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
@@ -268,6 +285,7 @@ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence);
+void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
/**
* dma_fence_put - decreases refcount of the fence
@@ -372,6 +390,9 @@ static inline void __dma_fence_might_wait(void) {}
int dma_fence_signal(struct dma_fence *fence);
int dma_fence_signal_locked(struct dma_fence *fence);
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
+ ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,
@@ -479,6 +500,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
}
/**
+ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2 or the same fence. Both
+ * fences must be from the same context, since a seqno is not re-used across
+ * contexts.
+ */
+static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ return f1 == f2 || dma_fence_is_later(f1, f2);
+}
+
+/**
* dma_fence_later - return the chronologically later fence
* @f1: the first fence from the same context
* @f2: the second fence from the same context
@@ -548,6 +584,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
fence->error = error;
}
+/**
+ * dma_fence_timestamp - helper to get the completion timestamp of a fence
+ * @fence: fence to get the timestamp from.
+ *
+ * After a fence is signaled the timestamp is updated with the signaling time,
+ * but setting the timestamp can race with tasks waiting for the signaling. This
+ * helper busy waits for the correct timestamp to appear.
+ */
+static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
+{
+ if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
+ return ktime_get();
+
+ while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+ cpu_relax();
+
+ return fence->timestamp;
+}
+
signed long dma_fence_wait_timeout(struct dma_fence *,
bool intr, signed long timeout);
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
@@ -583,29 +638,48 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
+void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline);
+
struct dma_fence *dma_fence_get_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
u64 dma_fence_context_alloc(unsigned num);
-#define DMA_FENCE_TRACE(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
- pr_info("f %llu#%llu: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define DMA_FENCE_WARN(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
- ##args); \
- } while (0)
-
-#define DMA_FENCE_ERR(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
+extern const struct dma_fence_ops dma_fence_array_ops;
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * dma_fence_is_chain - check if a fence is from the chain subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_chain and false otherwise.
+ */
+static inline bool dma_fence_is_chain(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+/**
+ * dma_fence_is_container - check if a fence is a container for other fences
+ * @fence: the fence to test
+ *
+ * Return true if this fence is a container for other fences, false otherwise.
+ * This is important since we can't build up large fence structure or otherwise
+ * we run into recursion during operation on those fences.
+ */
+static inline bool dma_fence_is_container(struct dma_fence *fence)
+{
+ return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+}
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 454e354d1ffb..0c05561cad6e 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -16,15 +16,15 @@ struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
- * @allocate: allocate dmabuf and return fd
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
*
- * allocate returns dmabuf fd on success, -errno on error.
+ * allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
struct dma_heap_ops {
- int (*allocate)(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags);
+ struct dma_buf *(*allocate)(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags);
};
/**
@@ -51,6 +51,15 @@ struct dma_heap_export_info {
void *dma_heap_get_drvdata(struct dma_heap *heap);
/**
+ * dma_heap_get_name() - get heap name
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The char* for the heap name.
+ */
+const char *dma_heap_get_name(struct dma_heap *heap);
+
+/**
* dma_heap_add - adds a heap to dmabuf heaps
* @exp_info: information needed to register this heap
*/
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
deleted file mode 100644
index 2112f21f73d8..000000000000
--- a/include/linux/dma-iommu.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2014-2015 ARM Ltd.
- */
-#ifndef __DMA_IOMMU_H
-#define __DMA_IOMMU_H
-
-#include <linux/errno.h>
-#include <linux/types.h>
-
-#ifdef CONFIG_IOMMU_DMA
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-#include <linux/msi.h>
-
-/* Domain management interface for IOMMU drivers */
-int iommu_get_dma_cookie(struct iommu_domain *domain);
-int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
-void iommu_put_dma_cookie(struct iommu_domain *domain);
-
-/* Setup call for arch DMA mapping code */
-void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
-
-/* The DMA API isn't _quite_ the whole story, though... */
-/*
- * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device
- *
- * The MSI page will be stored in @desc.
- *
- * Return: 0 on success otherwise an error describing the failure.
- */
-int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
-
-/* Update the MSI message if required. */
-void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg);
-
-void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
-
-#else /* CONFIG_IOMMU_DMA */
-
-struct iommu_domain;
-struct msi_desc;
-struct msi_msg;
-struct device;
-
-static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size)
-{
-}
-
-static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
-{
- return -ENODEV;
-}
-
-static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
-{
- return -ENODEV;
-}
-
-static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
-{
-}
-
-static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
- phys_addr_t msi_addr)
-{
- return 0;
-}
-
-static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg)
-{
-}
-
-static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
-{
-}
-
-#endif /* CONFIG_IOMMU_DMA */
-#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
new file mode 100644
index 000000000000..4abc60f04209
--- /dev/null
+++ b/include/linux/dma-map-ops.h
@@ -0,0 +1,513 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header is for implementations of dma_map_ops and related code.
+ * It should not be included in drivers just using the DMA API.
+ */
+#ifndef _LINUX_DMA_MAP_OPS_H
+#define _LINUX_DMA_MAP_OPS_H
+
+#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
+#include <linux/slab.h>
+
+struct cma;
+struct iommu_ops;
+
+/*
+ * Values for struct dma_map_ops.flags:
+ *
+ * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
+ * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
+ */
+#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
+
+struct dma_map_ops {
+ unsigned int flags;
+
+ void *(*alloc)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs);
+ void (*free)(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs);
+ struct page *(*alloc_pages)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir,
+ gfp_t gfp);
+ void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+ struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp,
+ unsigned long attrs);
+ void (*free_noncontiguous)(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+ int (*mmap)(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t, unsigned long attrs);
+
+ int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ /*
+ * map_sg should return a negative error code on error. See
+ * dma_map_sgtable() for a list of appropriate error codes
+ * and their meanings.
+ */
+ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+ dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ u64 (*get_required_mask)(struct device *dev);
+ size_t (*max_mapping_size)(struct device *dev);
+ size_t (*opt_mapping_size)(void);
+ unsigned long (*get_merge_boundary)(struct device *dev);
+};
+
+#ifdef CONFIG_DMA_OPS
+#include <asm/dma-mapping.h>
+
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev->dma_ops)
+ return dev->dma_ops;
+ return get_arch_dma_ops();
+}
+
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+ dev->dma_ops = dma_ops;
+}
+#else /* CONFIG_DMA_OPS */
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return NULL;
+}
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+}
+#endif /* CONFIG_DMA_OPS */
+
+#ifdef CONFIG_DMA_CMA
+extern struct cma *dma_contiguous_default_area;
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma, bool fixed);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
+ unsigned int order, bool no_warn);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count);
+struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
+void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+#else /* CONFIG_DMA_CMA */
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ return NULL;
+}
+static inline void dma_contiguous_reserve(phys_addr_t limit)
+{
+}
+static inline int dma_contiguous_reserve_area(phys_addr_t size,
+ phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
+ bool fixed)
+{
+ return -ENOSYS;
+}
+static inline struct page *dma_alloc_from_contiguous(struct device *dev,
+ size_t count, unsigned int order, bool no_warn)
+{
+ return NULL;
+}
+static inline bool dma_release_from_contiguous(struct device *dev,
+ struct page *pages, int count)
+{
+ return false;
+}
+/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
+static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
+ gfp_t gfp)
+{
+ return NULL;
+}
+static inline void dma_free_contiguous(struct device *dev, struct page *page,
+ size_t size)
+{
+ __free_pages(page, get_order(size));
+}
+#endif /* CONFIG_DMA_CMA*/
+
+#ifdef CONFIG_DMA_DECLARE_COHERENT
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size);
+void dma_release_coherent_memory(struct device *dev);
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret);
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
+#else
+static inline int dma_declare_coherent_memory(struct device *dev,
+ phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
+{
+ return -ENOSYS;
+}
+
+#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+static inline void dma_release_coherent_memory(struct device *dev) { }
+#endif /* CONFIG_DMA_DECLARE_COHERENT */
+
+#ifdef CONFIG_DMA_GLOBAL_POOL
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle);
+int dma_release_from_global_coherent(int order, void *vaddr);
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
+ size_t size, int *ret);
+int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
+#else
+static inline void *dma_alloc_from_global_coherent(struct device *dev,
+ ssize_t size, dma_addr_t *dma_handle)
+{
+ return NULL;
+}
+static inline int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ return 0;
+}
+static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret)
+{
+ return 0;
+}
+#endif /* CONFIG_DMA_GLOBAL_POOL */
+
+/*
+ * This is the actual return value from the ->alloc_noncontiguous method.
+ * The users of the DMA API should only care about the sg_table, but to make
+ * the DMA-API internal vmaping and freeing easier we stash away the page
+ * array as well (except for the fallback case). This can go away any time,
+ * e.g. when a vmap-variant that takes a scatterlist comes along.
+ */
+struct dma_sgt_handle {
+ struct sg_table sgt;
+ struct page **pages;
+};
+#define sgt_handle(sgt) \
+ container_of((sgt), struct dma_sgt_handle, sgt)
+
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+struct page *dma_common_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+
+struct page **dma_common_find_pages(void *cpu_addr);
+void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
+ const void *caller);
+void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
+ const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size);
+
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+ void **cpu_addr, gfp_t flags,
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
+bool dma_free_from_pool(struct device *dev, void *start, size_t size);
+
+int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
+ dma_addr_t dma_start, u64 size);
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+extern bool dma_default_coherent;
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return dev->dma_coherent;
+}
+#else
+#define dma_default_coherent true
+
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return true;
+}
+#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
+
+/*
+ * Check whether potential kmalloc() buffers are safe for non-coherent DMA.
+ */
+static inline bool dma_kmalloc_safe(struct device *dev,
+ enum dma_data_direction dir)
+{
+ /*
+ * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
+ * caches have already been aligned to a DMA-safe size.
+ */
+ if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
+ return true;
+
+ /*
+ * kmalloc() buffers are DMA-safe irrespective of size if the device
+ * is coherent or the direction is DMA_TO_DEVICE (non-desctructive
+ * cache maintenance and benign cache line evictions).
+ */
+ if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
+ * sufficiently aligned for non-coherent DMA.
+ */
+static inline bool dma_kmalloc_size_aligned(size_t size)
+{
+ /*
+ * Larger kmalloc() sizes are guaranteed to be aligned to
+ * ARCH_DMA_MINALIGN.
+ */
+ if (size >= 2 * ARCH_DMA_MINALIGN ||
+ IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
+ return true;
+
+ return false;
+}
+
+/*
+ * Check whether the given object size may have originated from a kmalloc()
+ * buffer with a slab alignment below the DMA-safe alignment and needs
+ * bouncing for non-coherent DMA. The pointer alignment is not considered and
+ * in-structure DMA-safe offsets are the responsibility of the caller. Such
+ * code should use the static ARCH_DMA_MINALIGN for compiler annotations.
+ *
+ * The heuristics can have false positives, bouncing unnecessarily, though the
+ * buffers would be small. False negatives are theoretically possible if, for
+ * example, multiple small kmalloc() buffers are coalesced into a larger
+ * buffer that passes the alignment check. There are no such known constructs
+ * in the kernel.
+ */
+static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
+ enum dma_data_direction dir)
+{
+ return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+
+#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
+void arch_dma_set_mask(struct device *dev, u64 mask);
+#else
+#define arch_dma_set_mask(dev, mask) do { } while (0)
+#endif
+
+#ifdef CONFIG_MMU
+/*
+ * Page protection so that devices that can't snoop CPU caches can use the
+ * memory coherently. We default to pgprot_noncached which is usually used
+ * for ioremap as a safe bet, but architectures can override this with less
+ * strict semantics if possible.
+ */
+#ifndef pgprot_dmacoherent
+#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
+#endif
+
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
+#else
+static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
+ return prot; /* no protection bits supported without page tables */
+}
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+void arch_sync_dma_for_cpu_all(void);
+#else
+static inline void arch_sync_dma_for_cpu_all(void)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+
+#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
+void arch_dma_prep_coherent(struct page *page, size_t size);
+#else
+static inline void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
+
+#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
+void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
+#else
+static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
+{
+}
+#endif /* ARCH_HAS_DMA_MARK_CLEAN */
+
+void *arch_dma_set_uncached(void *addr, size_t size);
+void arch_dma_clear_uncached(void *addr, size_t size);
+
+#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
+bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
+bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
+bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents);
+bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents);
+#else
+#define arch_dma_map_page_direct(d, a) (false)
+#define arch_dma_unmap_page_direct(d, a) (false)
+#define arch_dma_map_sg_direct(d, s, n) (false)
+#define arch_dma_unmap_sg_direct(d, s, n) (false)
+#endif
+
+#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ bool coherent);
+#else
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, bool coherent)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
+
+#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
+void arch_teardown_dma_ops(struct device *dev);
+#else
+static inline void arch_teardown_dma_ops(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
+
+#ifdef CONFIG_DMA_API_DEBUG
+void dma_debug_add_bus(const struct bus_type *bus);
+void debug_dma_dump_mappings(struct device *dev);
+#else
+static inline void dma_debug_add_bus(const struct bus_type *bus)
+{
+}
+static inline void debug_dma_dump_mappings(struct device *dev)
+{
+}
+#endif /* CONFIG_DMA_API_DEBUG */
+
+extern const struct dma_map_ops dma_dummy_ops;
+
+enum pci_p2pdma_map_type {
+ /*
+ * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
+ * type hasn't been calculated yet. Functions that return this enum
+ * never return this value.
+ */
+ PCI_P2PDMA_MAP_UNKNOWN = 0,
+
+ /*
+ * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
+ * traverse the host bridge and the host bridge is not in the
+ * allowlist. DMA Mapping routines should return an error when
+ * this is returned.
+ */
+ PCI_P2PDMA_MAP_NOT_SUPPORTED,
+
+ /*
+ * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
+ * each other directly through a PCI switch and the transaction will
+ * not traverse the host bridge. Such a mapping should program
+ * the DMA engine with PCI bus addresses.
+ */
+ PCI_P2PDMA_MAP_BUS_ADDR,
+
+ /*
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
+ * to each other, but the transaction traverses a host bridge on the
+ * allowlist. In this case, a normal mapping either with CPU physical
+ * addresses (in the case of dma-direct) or IOVA addresses (in the
+ * case of IOMMUs) should be used to program the DMA engine.
+ */
+ PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
+};
+
+struct pci_p2pdma_map_state {
+ struct dev_pagemap *pgmap;
+ int map;
+ u64 bus_off;
+};
+
+#ifdef CONFIG_PCI_P2PDMA
+enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg);
+#else /* CONFIG_PCI_P2PDMA */
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg)
+{
+ return PCI_P2PDMA_MAP_NOT_SUPPORTED;
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
+#endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 52635e91143b..4a658de44ee9 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -2,11 +2,11 @@
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
+#include <linux/cache.h>
#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/bug.h>
@@ -28,11 +28,6 @@
*/
#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
/*
- * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
- * consistent or non-consistent memory as it sees fit.
- */
-#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
-/*
* DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
* virtual mapping for the allocated buffer.
*/
@@ -68,158 +63,40 @@
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
- * A dma_addr_t can hold any valid DMA or bus address for the platform.
- * It can be given to a device to use as a DMA source or target. A CPU cannot
- * reference a dma_addr_t directly because there may be translation between
- * its physical address space and the bus address space.
+ * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
+ * be given to a device to use as a DMA source or target. It is specific to a
+ * given device and there may be a translation between the CPU physical address
+ * space and the bus address space.
+ *
+ * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
+ * be used directly in drivers, but checked for using dma_mapping_error()
+ * instead.
*/
-struct dma_map_ops {
- void* (*alloc)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs);
- void (*free)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs);
- int (*mmap)(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t,
- unsigned long attrs);
-
- int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
- dma_addr_t, size_t, unsigned long attrs);
-
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- /*
- * map_sg returns 0 on error and a value > 0 on success.
- * It should never return a value < 0.
- */
- int (*map_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_sg)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir,
- unsigned long attrs);
- dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*sync_single_for_cpu)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir);
- void (*sync_single_for_device)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir);
- void (*sync_sg_for_cpu)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
- void (*sync_sg_for_device)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
- void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
- int (*dma_supported)(struct device *dev, u64 mask);
- u64 (*get_required_mask)(struct device *dev);
- size_t (*max_mapping_size)(struct device *dev);
- unsigned long (*get_merge_boundary)(struct device *dev);
-};
-
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
-extern const struct dma_map_ops dma_virt_ops;
-extern const struct dma_map_ops dma_dummy_ops;
-
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
-#define DMA_MASK_NONE 0x0ULL
-
-static inline int valid_dma_direction(int dma_direction)
-{
- return ((dma_direction == DMA_BIDIRECTIONAL) ||
- (dma_direction == DMA_TO_DEVICE) ||
- (dma_direction == DMA_FROM_DEVICE));
-}
-
-#ifdef CONFIG_DMA_DECLARE_COHERENT
-/*
- * These three functions are only for dma allocator.
- * Don't use them in device drivers.
- */
-int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret);
-int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
-
-int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, size_t size, int *ret);
-
-void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
-int dma_release_from_global_coherent(int order, void *vaddr);
-int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
- size_t size, int *ret);
-
+#ifdef CONFIG_DMA_API_DEBUG
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len);
#else
-#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
-#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
-
-static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle)
-{
- return NULL;
-}
-
-static inline int dma_release_from_global_coherent(int order, void *vaddr)
+static inline void debug_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
{
- return 0;
}
-
-static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
- void *cpu_addr, size_t size,
- int *ret)
+static inline void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len)
{
- return 0;
}
-#endif /* CONFIG_DMA_DECLARE_COHERENT */
+#endif /* CONFIG_DMA_API_DEBUG */
#ifdef CONFIG_HAS_DMA
-#include <asm/dma-mapping.h>
-
-#ifdef CONFIG_DMA_OPS
-static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- if (dev->dma_ops)
- return dev->dma_ops;
- return get_arch_dma_ops(dev->bus);
-}
-
-static inline void set_dma_ops(struct device *dev,
- const struct dma_map_ops *dma_ops)
-{
- dev->dma_ops = dma_ops;
-}
-#else /* CONFIG_DMA_OPS */
-static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- return NULL;
-}
-static inline void set_dma_ops(struct device *dev,
- const struct dma_map_ops *dma_ops)
-{
-}
-#endif /* CONFIG_DMA_OPS */
-
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
debug_dma_mapping_error(dev, dma_addr);
- if (dma_addr == DMA_MAPPING_ERROR)
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
return -ENOMEM;
return 0;
}
@@ -229,11 +106,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
unsigned long attrs);
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
-int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs);
+unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs);
+int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs);
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
@@ -254,8 +133,6 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir);
int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
@@ -263,13 +140,24 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
bool dma_can_mmap(struct device *dev);
-int dma_supported(struct device *dev, u64 mask);
+bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
+bool dma_addressing_limited(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
+size_t dma_opt_mapping_size(struct device *dev);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
unsigned long dma_get_merge_boundary(struct device *dev);
+struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
+int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
#else /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct page *page, size_t offset, size_t size,
@@ -281,8 +169,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
}
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+static inline unsigned int dma_map_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
{
return 0;
}
@@ -291,6 +180,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
unsigned long attrs)
{
}
+static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -339,10 +233,6 @@ static inline void dmam_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
}
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
-}
static inline int dma_get_sgtable_attrs(struct device *dev,
struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
size_t size, unsigned long attrs)
@@ -359,9 +249,9 @@ static inline bool dma_can_mmap(struct device *dev)
{
return false;
}
-static inline int dma_supported(struct device *dev, u64 mask)
+static inline bool dma_pci_p2pdma_supported(struct device *dev)
{
- return 0;
+ return false;
}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
@@ -375,10 +265,18 @@ static inline u64 dma_get_required_mask(struct device *dev)
{
return 0;
}
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return false;
+}
static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
}
+static inline size_t dma_opt_mapping_size(struct device *dev)
+{
+ return 0;
+}
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return false;
@@ -387,8 +285,51 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
{
return 0;
}
+static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
+ size_t size, enum dma_data_direction dir, gfp_t gfp,
+ unsigned long attrs)
+{
+ return NULL;
+}
+static inline void dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+}
+static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt)
+{
+ return NULL;
+}
+static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
+{
+}
+static inline int dma_mmap_noncontiguous(struct device *dev,
+ struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_HAS_DMA */
+struct page *dma_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_free_pages(struct device *dev, size_t size, struct page *page,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct page *page);
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
+{
+ struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
+ return page ? page_address(page) : NULL;
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
+{
+ dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
+}
+
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
@@ -422,34 +363,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
}
/**
- * dma_map_sgtable - Map the given buffer for DMA
- * @dev: The device for which to perform the DMA operation
- * @sgt: The sg_table object describing the buffer
- * @dir: DMA direction
- * @attrs: Optional DMA attributes for the map operation
- *
- * Maps a buffer described by a scatterlist stored in the given sg_table
- * object for the @dir DMA operation by the @dev device. After success the
- * ownership for the buffer is transferred to the DMA domain. One has to
- * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
- * ownership of the buffer back to the CPU domain before touching the
- * buffer by the CPU.
- *
- * Returns 0 on success or -EINVAL on error during mapping the buffer.
- */
-static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
- enum dma_data_direction dir, unsigned long attrs)
-{
- int nents;
-
- nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
- if (nents <= 0)
- return -EINVAL;
- sgt->nents = nents;
- return 0;
-}
-
-/**
* dma_unmap_sgtable - Unmap the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
@@ -510,31 +423,11 @@ static inline void dma_sync_sgtable_for_device(struct device *dev,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-struct page **dma_common_find_pages(void *cpu_addr);
-void *dma_common_contiguous_remap(struct page *page, size_t size,
- pgprot_t prot, const void *caller);
-
-void *dma_common_pages_remap(struct page **pages, size_t size,
- pgprot_t prot, const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size);
-
-struct page *dma_alloc_from_pool(struct device *dev, size_t size,
- void **cpu_addr, gfp_t flags,
- bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
-bool dma_free_from_pool(struct device *dev, void *start, size_t size);
-
-int
-dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs);
+bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
-
return dma_alloc_attrs(dev, size, dma_handle, gfp,
(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
@@ -577,38 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
-/**
- * dma_addressing_limited - return if the device is addressing limited
- * @dev: device to check
- *
- * Return %true if the devices DMA mask is too small to address all memory in
- * the system, else %false. Lack of addressing bits is the prime reason for
- * bounce buffering, but might not be the only one.
- */
-static inline bool dma_addressing_limited(struct device *dev)
-{
- return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
- dma_get_required_mask(dev);
-}
-
-#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
-#else
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu, bool coherent)
-{
-}
-#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
-
-#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
-void arch_teardown_dma_ops(struct device *dev);
-#else
-static inline void arch_teardown_dma_ops(struct device *dev)
-{
-}
-#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
-
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->max_segment_size)
@@ -629,7 +490,26 @@ static inline unsigned long dma_get_seg_boundary(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
return dev->dma_parms->segment_boundary_mask;
- return DMA_BIT_MASK(32);
+ return ULONG_MAX;
+}
+
+/**
+ * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
+ * @dev: device to guery the boundary for
+ * @page_shift: ilog() of the IOMMU page size
+ *
+ * Return the segment boundary in IOMMU page units (which may be different from
+ * the CPU page size) for the passed in device.
+ *
+ * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
+ * non-DMA API callers.
+ */
+static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
+ unsigned int page_shift)
+{
+ if (!dev)
+ return (U32_MAX >> page_shift) + 1;
+ return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
@@ -641,25 +521,31 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
+static inline unsigned int dma_get_min_align_mask(struct device *dev)
+{
+ if (dev->dma_parms)
+ return dev->dma_parms->min_align_mask;
+ return 0;
+}
+
+static inline int dma_set_min_align_mask(struct device *dev,
+ unsigned int min_align_mask)
+{
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return -EIO;
+ dev->dma_parms->min_align_mask = min_align_mask;
+ return 0;
+}
+
+#ifndef dma_get_cache_alignment
static inline int dma_get_cache_alignment(void)
{
-#ifdef ARCH_DMA_MINALIGN
+#ifdef ARCH_HAS_DMA_MINALIGN
return ARCH_DMA_MINALIGN;
#endif
return 1;
}
-
-#ifdef CONFIG_DMA_DECLARE_COHERENT
-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size);
-#else
-static inline int
-dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size)
-{
- return -ENOSYS;
-}
-#endif /* CONFIG_DMA_DECLARE_COHERENT */
+#endif
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
@@ -711,4 +597,4 @@ static inline int dma_mmap_wc(struct device *dev,
#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif
-#endif
+#endif /* _LINUX_DMA_MAPPING_H */
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
deleted file mode 100644
index ca09a4e07d2d..000000000000
--- a/include/linux/dma-noncoherent.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_DMA_NONCOHERENT_H
-#define _LINUX_DMA_NONCOHERENT_H 1
-
-#include <linux/dma-mapping.h>
-#include <linux/pgtable.h>
-
-#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
-#include <asm/dma-coherence.h>
-#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
-static inline bool dev_is_dma_coherent(struct device *dev)
-{
- return dev->dma_coherent;
-}
-#else
-static inline bool dev_is_dma_coherent(struct device *dev)
-{
- return true;
-}
-#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
-
-/*
- * Check if an allocation needs to be marked uncached to be coherent.
- */
-static __always_inline bool dma_alloc_need_uncached(struct device *dev,
- unsigned long attrs)
-{
- if (dev_is_dma_coherent(dev))
- return false;
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
- return false;
- if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
- (attrs & DMA_ATTR_NON_CONSISTENT))
- return false;
- return true;
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs);
-void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_addr, unsigned long attrs);
-
-#ifdef CONFIG_MMU
-/*
- * Page protection so that devices that can't snoop CPU caches can use the
- * memory coherently. We default to pgprot_noncached which is usually used
- * for ioremap as a safe bet, but architectures can override this with less
- * strict semantics if possible.
- */
-#ifndef pgprot_dmacoherent
-#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
-#endif
-
-pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
-#else
-static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
- unsigned long attrs)
-{
- return prot; /* no protection bits supported without page tables */
-}
-#endif /* CONFIG_MMU */
-
-#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
-void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
-#else
-static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
- size_t size, enum dma_data_direction direction)
-{
-}
-#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
-
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir);
-#else
-static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
-{
-}
-#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
-
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir);
-#else
-static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
-{
-}
-#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
-
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
-void arch_sync_dma_for_cpu_all(void);
-#else
-static inline void arch_sync_dma_for_cpu_all(void)
-{
-}
-#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
-
-#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
-void arch_dma_prep_coherent(struct page *page, size_t size);
-#else
-static inline void arch_dma_prep_coherent(struct page *page, size_t size)
-{
-}
-#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
-
-void *arch_dma_set_uncached(void *addr, size_t size);
-void arch_dma_clear_uncached(void *addr, size_t size);
-
-#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index d44a77e8a7e3..8d0e34dad446 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -47,52 +47,277 @@
extern struct ww_class reservation_ww_class;
+struct dma_resv_list;
+
/**
- * struct dma_resv_list - a list of shared fences
- * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
+ * enum dma_resv_usage - how the fences from a dma_resv obj are used
+ *
+ * This enum describes the different use cases for a dma_resv object and
+ * controls which fences are returned when queried.
+ *
+ * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
+ *
+ * Already used fences can be promoted in the sense that a fence with
+ * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again
+ * with this usage. But fences can never be degraded in the sense that a fence
+ * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ.
*/
-struct dma_resv_list {
- struct rcu_head rcu;
- u32 shared_count, shared_max;
- struct dma_fence __rcu *shared[];
+enum dma_resv_usage {
+ /**
+ * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+ *
+ * This should only be used for things like copying or clearing memory
+ * with a DMA hardware engine for the purpose of kernel memory
+ * management.
+ *
+ * Drivers *always* must wait for those fences before accessing the
+ * resource protected by the dma_resv object. The only exception for
+ * that is when the resource is known to be locked down in place by
+ * pinning it previously.
+ */
+ DMA_RESV_USAGE_KERNEL,
+
+ /**
+ * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit write dependency.
+ */
+ DMA_RESV_USAGE_WRITE,
+
+ /**
+ * @DMA_RESV_USAGE_READ: Implicit read synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit read dependency.
+ */
+ DMA_RESV_USAGE_READ,
+
+ /**
+ * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
+ *
+ * This should be used by submissions which don't want to participate in
+ * any implicit synchronization.
+ *
+ * The most common case are preemption fences, page table updates, TLB
+ * flushes as well as explicit synced user submissions.
+ *
+ * Explicit synced user user submissions can be promoted to
+ * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
+ * dma_buf_import_sync_file() when implicit synchronization should
+ * become necessary after initial adding of the fence.
+ */
+ DMA_RESV_USAGE_BOOKKEEP
};
/**
+ * dma_resv_usage_rw - helper for implicit sync
+ * @write: true if we create a new implicit sync write
+ *
+ * This returns the implicit synchronization usage for write or read accesses,
+ * see enum dma_resv_usage and &dma_buf.resv.
+ */
+static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
+{
+ /* This looks confusing at first sight, but is indeed correct.
+ *
+ * The rational is that new write operations needs to wait for the
+ * existing read and write operations to finish.
+ * But a new read operation only needs to wait for the existing write
+ * operations to finish.
+ */
+ return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
+}
+
+/**
* struct dma_resv - a reservation object manages fences for a buffer
- * @lock: update side lock
- * @seq: sequence count for managing RCU read-side synchronization
- * @fence_excl: the exclusive fence, if there is one currently
- * @fence: list of current shared fences
+ *
+ * This is a container for dma_fence objects which needs to handle multiple use
+ * cases.
+ *
+ * One use is to synchronize cross-driver access to a struct dma_buf, either for
+ * dynamic buffer management or just to handle implicit synchronization between
+ * different users of the buffer in userspace. See &dma_buf.resv for a more
+ * in-depth discussion.
+ *
+ * The other major use is to manage access and locking within a driver in a
+ * buffer based memory manager. struct ttm_buffer_object is the canonical
+ * example here, since this is where reservation objects originated from. But
+ * use in drivers is spreading and some drivers also manage struct
+ * drm_gem_object with the same scheme.
*/
struct dma_resv {
+ /**
+ * @lock:
+ *
+ * Update side lock. Don't use directly, instead use the wrapper
+ * functions like dma_resv_lock() and dma_resv_unlock().
+ *
+ * Drivers which use the reservation object to manage memory dynamically
+ * also use this lock to protect buffer object state like placement,
+ * allocation policies or throughout command submission.
+ */
struct ww_mutex lock;
- seqcount_ww_mutex_t seq;
- struct dma_fence __rcu *fence_excl;
- struct dma_resv_list __rcu *fence;
+ /**
+ * @fences:
+ *
+ * Array of fences which where added to the dma_resv object
+ *
+ * A new fence is added by calling dma_resv_add_fence(). Since this
+ * often needs to be done past the point of no return in command
+ * submission it cannot fail, and therefore sufficient slots need to be
+ * reserved by calling dma_resv_reserve_fences().
+ */
+ struct dma_resv_list __rcu *fences;
};
-#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
-#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ *
+ * IMPORTANT
+ *
+ * When using the lockless iterators like dma_resv_iter_next_unlocked() or
+ * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
+ * Code which accumulates statistics or similar needs to check for this with
+ * dma_resv_iter_is_restarted().
+ */
+struct dma_resv_iter {
+ /** @obj: The dma_resv object we iterate over */
+ struct dma_resv *obj;
+
+ /** @usage: Return fences with this usage or lower. */
+ enum dma_resv_usage usage;
+
+ /** @fence: the currently handled fence */
+ struct dma_fence *fence;
+
+ /** @fence_usage: the usage of the current fence */
+ enum dma_resv_usage fence_usage;
+
+ /** @index: index into the shared fences */
+ unsigned int index;
+
+ /** @fences: the shared fences; private, *MUST* not dereference */
+ struct dma_resv_list *fences;
+
+ /** @num_fences: number of fences */
+ unsigned int num_fences;
+
+ /** @is_restarted: true if this is the first returned fence */
+ bool is_restarted;
+};
+
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
/**
- * dma_resv_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterate over
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+ struct dma_resv *obj,
+ enum dma_resv_usage usage)
+{
+ cursor->obj = obj;
+ cursor->usage = usage;
+ cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+ dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_usage - Return the usage of the current fence
+ * @cursor: the cursor of the current position
+ *
+ * Returns the usage of the currently processed fence.
+ */
+static inline enum dma_resv_usage
+dma_resv_iter_usage(struct dma_resv_iter *cursor)
+{
+ return cursor->fence_usage;
+}
+
+/**
+ * dma_resv_iter_is_restarted - test if this is the first fence after a restart
+ * @cursor: the cursor with the current position
*
- * Returns the shared fence list. Does NOT take references to
- * the fence. The obj->lock must be held.
+ * Return true if this is the first fence in an iteration after a restart.
*/
-static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
+static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
{
- return rcu_dereference_protected(obj->fence,
- dma_resv_held(obj));
+ return cursor->is_restarted;
}
/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
+ * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
+ * the iterator a reference to the dma_fence is held and the RCU lock dropped.
+ *
+ * Beware that the iterator can be restarted when the struct dma_resv for
+ * @cursor is modified. Code which accumulates statistics or similar needs to
+ * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
+ * lock iterator dma_resv_for_each_fence() whenever possible.
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence) \
+ for (fence = dma_resv_iter_first_unlocked(cursor); \
+ fence; fence = dma_resv_iter_next_unlocked(cursor))
+
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @usage: controls which fences to return
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * &dma_resv.lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator and the fence stays
+ * valid as long as the lock is held and so no extra reference to the fence is
+ * taken.
+ */
+#define dma_resv_for_each_fence(cursor, obj, usage, fence) \
+ for (dma_resv_iter_begin(cursor, obj, usage), \
+ fence = dma_resv_iter_first(cursor); fence; \
+ fence = dma_resv_iter_next(cursor))
+
+#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+
+#ifdef CONFIG_DEBUG_MUTEXES
+void dma_resv_reset_max_fences(struct dma_resv *obj);
+#else
+static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
+#endif
+
+/**
* dma_resv_lock - lock the reservation object
* @obj: the reservation object
* @ctx: the locking context
@@ -106,6 +331,13 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
+ *
+ * See also dma_resv_lock_interruptible() for the interruptible variant.
*/
static inline int dma_resv_lock(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -127,6 +359,12 @@ static inline int dma_resv_lock(struct dma_resv *obj,
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
+ * @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
*/
static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -142,6 +380,8 @@ static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
* Acquires the reservation object after a die case. This function
* will sleep until the lock becomes available. See dma_resv_lock() as
* well.
+ *
+ * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
*/
static inline void dma_resv_lock_slow(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -175,13 +415,13 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
* if they overlap with a writer.
*
* Also note that since no context is provided, no deadlock protection is
- * possible.
+ * possible, which is also not needed for a trylock.
*
* Returns true if the lock was acquired, false otherwise.
*/
static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
{
- return ww_mutex_trylock(&obj->lock);
+ return ww_mutex_trylock(&obj->lock, NULL);
}
/**
@@ -201,6 +441,11 @@ static inline bool dma_resv_is_locked(struct dma_resv *obj)
*
* Returns the context used to lock a reservation object or NULL if no context
* was used or the object is not locked at all.
+ *
+ * WARNING: This interface is pretty horrible, but TTM needs it because it
+ * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
+ * Everyone else just uses it to check whether they're holding a reservation or
+ * not.
*/
static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
{
@@ -215,79 +460,28 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
*/
static inline void dma_resv_unlock(struct dma_resv *obj)
{
-#ifdef CONFIG_DEBUG_MUTEXES
- /* Test shared fence slot reservation */
- if (rcu_access_pointer(obj->fence)) {
- struct dma_resv_list *fence = dma_resv_get_list(obj);
-
- fence->shared_max = fence->shared_count;
- }
-#endif
+ dma_resv_reset_max_fences(obj);
ww_mutex_unlock(&obj->lock);
}
-/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any). Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-dma_resv_get_excl(struct dma_resv *obj)
-{
- return rcu_dereference_protected(obj->fence_excl,
- dma_resv_held(obj));
-}
-
-/**
- * dma_resv_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-dma_resv_get_excl_rcu(struct dma_resv *obj)
-{
- struct dma_fence *fence;
-
- if (!rcu_access_pointer(obj->fence_excl))
- return NULL;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&obj->fence_excl);
- rcu_read_unlock();
-
- return fence;
-}
-
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared);
-
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage);
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage);
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
+ unsigned int *num_fences, struct dma_fence ***fences);
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
+ struct dma_fence **fence);
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout);
-
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout);
+void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
+ ktime_t deadline);
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
+void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/dma/amd_xdma.h b/include/linux/dma/amd_xdma.h
new file mode 100644
index 000000000000..ceba69ed7cb4
--- /dev/null
+++ b/include/linux/dma/amd_xdma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _DMAENGINE_AMD_XDMA_H
+#define _DMAENGINE_AMD_XDMA_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num);
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num);
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index);
+
+#endif /* _DMAENGINE_AMD_XDMA_H */
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
index cab6e18773da..3080747689f6 100644
--- a/include/linux/dma/edma.h
+++ b/include/linux/dma/edma.h
@@ -12,24 +12,97 @@
#include <linux/device.h>
#include <linux/dmaengine.h>
+#define EDMA_MAX_WR_CH 8
+#define EDMA_MAX_RD_CH 8
+
struct dw_edma;
+struct dw_edma_region {
+ u64 paddr;
+ union {
+ void *mem;
+ void __iomem *io;
+ } vaddr;
+ size_t sz;
+};
+
+/**
+ * struct dw_edma_core_ops - platform-specific eDMA methods
+ * @irq_vector: Get IRQ number of the passed eDMA channel. Note the
+ * method accepts the channel id in the end-to-end
+ * numbering with the eDMA write channels being placed
+ * first in the row.
+ * @pci_address: Get PCIe bus address corresponding to the passed CPU
+ * address. Note there is no need in specifying this
+ * function if the address translation is performed by
+ * the DW PCIe RP/EP controller with the DW eDMA device in
+ * subject and DMA_BYPASS isn't set for all the outbound
+ * iATU windows. That will be done by the controller
+ * automatically.
+ */
+struct dw_edma_plat_ops {
+ int (*irq_vector)(struct device *dev, unsigned int nr);
+ u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
+};
+
+enum dw_edma_map_format {
+ EDMA_MF_EDMA_LEGACY = 0x0,
+ EDMA_MF_EDMA_UNROLL = 0x1,
+ EDMA_MF_HDMA_COMPAT = 0x5,
+ EDMA_MF_HDMA_NATIVE = 0x7,
+};
+
+/**
+ * enum dw_edma_chip_flags - Flags specific to an eDMA chip
+ * @DW_EDMA_CHIP_LOCAL: eDMA is used locally by an endpoint
+ */
+enum dw_edma_chip_flags {
+ DW_EDMA_CHIP_LOCAL = BIT(0),
+};
+
/**
* struct dw_edma_chip - representation of DesignWare eDMA controller hardware
* @dev: struct device of the eDMA controller
* @id: instance ID
- * @irq: irq line
- * @dw: struct dw_edma that is filed by dw_edma_probe()
+ * @nr_irqs: total number of DMA IRQs
+ * @ops DMA channel to IRQ number mapping
+ * @flags dw_edma_chip_flags
+ * @reg_base DMA register base address
+ * @ll_wr_cnt DMA write link list count
+ * @ll_rd_cnt DMA read link list count
+ * @rg_region DMA register region
+ * @ll_region_wr DMA descriptor link list memory for write channel
+ * @ll_region_rd DMA descriptor link list memory for read channel
+ * @dt_region_wr DMA data memory for write channel
+ * @dt_region_rd DMA data memory for read channel
+ * @mf DMA register map format
+ * @dw: struct dw_edma that is filled by dw_edma_probe()
*/
struct dw_edma_chip {
struct device *dev;
- int id;
- int irq;
+ int nr_irqs;
+ const struct dw_edma_plat_ops *ops;
+ u32 flags;
+
+ void __iomem *reg_base;
+
+ u16 ll_wr_cnt;
+ u16 ll_rd_cnt;
+ /* link list address */
+ struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
+
+ /* data region */
+ struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
+
+ enum dw_edma_map_format mf;
+
struct dw_edma *dw;
};
/* Export to the platform drivers */
-#if IS_ENABLED(CONFIG_DW_EDMA)
+#if IS_REACHABLE(CONFIG_DW_EDMA)
int dw_edma_probe(struct dw_edma_chip *chip);
int dw_edma_remove(struct dw_edma_chip *chip);
#else
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index a6b7bc707356..77ea602c287c 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -8,11 +8,13 @@
#ifndef _DMA_HSU_H
#define _DMA_HSU_H
-#include <linux/device.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+#include <linux/types.h>
#include <linux/platform_data/dma-hsu.h>
+struct device;
struct hsu_dma;
/**
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/dma/imx-dma.h
index 281adbb26e6b..cfec5f946e23 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/dma/imx-dma.h
@@ -3,8 +3,8 @@
* Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
*/
-#ifndef __ASM_ARCH_MXC_DMA_H__
-#define __ASM_ARCH_MXC_DMA_H__
+#ifndef __LINUX_DMA_IMX_H
+#define __LINUX_DMA_IMX_H
#include <linux/scatterlist.h>
#include <linux/device.h>
@@ -39,6 +39,8 @@ enum sdma_peripheral_type {
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
IMX_DMATYPE_SAI, /* SAI */
+ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
+ IMX_DMATYPE_HDMI, /* HDMI Audio */
};
enum imx_dma_prio {
@@ -65,4 +67,36 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
!strcmp(chan->device->dev->driver->name, "imx-dma");
}
-#endif
+/**
+ * struct sdma_peripheral_config - SDMA config for audio
+ * @n_fifos_src: Number of FIFOs for recording
+ * @n_fifos_dst: Number of FIFOs for playback
+ * @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are
+ * continuous, 1 means 1 word stride between FIFOs. All stride
+ * between FIFOs should be same.
+ * @stride_fifos_dst: FIFO address stride for playback
+ * @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means
+ * one channel per FIFO, 2 means 2 channels per FIFO..
+ * If 'n_fifos_src = 4' and 'words_per_fifo = 2', it
+ * means the first two words(channels) fetch from FIFO0
+ * and then jump to FIFO1 for next two words, and so on
+ * after the last FIFO3 fetched, roll back to FIFO0.
+ * @sw_done: Use software done. Needed for PDM (micfil)
+ *
+ * Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO
+ * registers. For multichannel recording/playback the SAI/micfil have
+ * one FIFO register per channel and the SDMA engine has to read/write
+ * the next channel from/to the next register and wrap around to the
+ * first register when all channels are handled. The number of active
+ * channels must be communicated to the SDMA engine using this struct.
+ */
+struct sdma_peripheral_config {
+ int n_fifos_src;
+ int n_fifos_dst;
+ int stride_fifos_src;
+ int stride_fifos_dst;
+ int words_per_fifo;
+ bool sw_done;
+};
+
+#endif /* __LINUX_DMA_IMX_H */
diff --git a/include/linux/dma/k3-event-router.h b/include/linux/dma/k3-event-router.h
new file mode 100644
index 000000000000..e3f88b2f87be
--- /dev/null
+++ b/include/linux/dma/k3-event-router.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef K3_EVENT_ROUTER_
+#define K3_EVENT_ROUTER_
+
+#include <linux/types.h>
+
+struct k3_event_route_data {
+ void *priv;
+ int (*set_event)(void *priv, u32 event);
+};
+
+#endif /* K3_EVENT_ROUTER_ */
diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h
index 1962f75fa2d3..5f106d852f1c 100644
--- a/include/linux/dma/k3-psil.h
+++ b/include/linux/dma/k3-psil.h
@@ -42,27 +42,42 @@ enum psil_endpoint_type {
/**
* struct psil_endpoint_config - PSI-L Endpoint configuration
* @ep_type: PSI-L endpoint type
+ * @channel_tpl: Desired throughput level for the channel
* @pkt_mode: If set, the channel must be in Packet mode, otherwise in
* TR mode
* @notdpkt: TDCM must be suppressed on the TX channel
* @needs_epib: Endpoint needs EPIB
- * @psd_size: If set, PSdata is used by the endpoint
- * @channel_tpl: Desired throughput level for the channel
* @pdma_acc32: ACC32 must be enabled on the PDMA side
* @pdma_burst: BURST must be enabled on the PDMA side
+ * @psd_size: If set, PSdata is used by the endpoint
+ * @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels.
+ * The thread must be serviced by the specified channel if
+ * mapped_channel_id is >= 0 in case of PKTDMA
+ * @flow_start: PKDMA flow range start of mapped channel. Unmapped
+ * channels use flow_id == chan_id
+ * @flow_num: PKDMA flow count of mapped channel. Unmapped channels
+ * use flow_id == chan_id
+ * @default_flow_id: PKDMA default (r)flow index of mapped channel.
+ * Must be within the flow range of the mapped channel.
*/
struct psil_endpoint_config {
enum psil_endpoint_type ep_type;
+ enum udma_tp_level channel_tpl;
unsigned pkt_mode:1;
unsigned notdpkt:1;
unsigned needs_epib:1;
- u32 psd_size;
- enum udma_tp_level channel_tpl;
-
/* PDMA properties, valid for PSIL_EP_PDMA_* */
unsigned pdma_acc32:1;
unsigned pdma_burst:1;
+
+ u32 psd_size;
+ /* PKDMA mapped channel */
+ s16 mapped_channel_id;
+ /* PKTDMA tflow and rflow ranges for mapped channel */
+ u16 flow_start;
+ u16 flow_num;
+ s16 default_flow_id;
};
int psil_set_new_ep_config(struct device *dev, const char *name,
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index 5eb34ad973a7..1e491c5dcac2 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -26,6 +26,11 @@ struct k3_udma_glue_tx_channel;
struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_tx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id);
+
void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
struct cppi5_host_desc_t *desc_tx,
@@ -41,6 +46,12 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
+struct device *
+ k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr);
+void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr);
enum {
K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
@@ -103,6 +114,11 @@ struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg);
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id);
+
void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
@@ -130,5 +146,11 @@ int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
+struct device *
+ k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr);
+void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr);
#endif /* K3_UDMA_GLUE_H_ */
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
deleted file mode 100644
index 25cab62a28c4..000000000000
--- a/include/linux/dma/mmp-pdma.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MMP_PDMA_H_
-#define _MMP_PDMA_H_
-
-struct dma_chan;
-
-#ifdef CONFIG_MMP_PDMA
-bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
-#else
-static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
-#endif /* _MMP_PDMA_H_ */
diff --git a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h
new file mode 100644
index 000000000000..6680dd1a43c6
--- /dev/null
+++ b/include/linux/dma/qcom-gpi-dma.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, Linaro Limited
+ */
+
+#ifndef QCOM_GPI_DMA_H
+#define QCOM_GPI_DMA_H
+
+/**
+ * enum spi_transfer_cmd - spi transfer commands
+ */
+enum spi_transfer_cmd {
+ SPI_TX = 1,
+ SPI_RX,
+ SPI_DUPLEX,
+};
+
+/**
+ * struct gpi_spi_config - spi config for peripheral
+ *
+ * @loopback_en: spi loopback enable when set
+ * @clock_pol_high: clock polarity
+ * @data_pol_high: data polarity
+ * @pack_en: process tx/rx buffers as packed
+ * @word_len: spi word length
+ * @clk_div: source clock divider
+ * @clk_src: serial clock
+ * @cmd: spi cmd
+ * @fragmentation: keep CS asserted at end of sequence
+ * @cs: chip select toggle
+ * @set_config: set peripheral config
+ * @rx_len: receive length for buffer
+ */
+struct gpi_spi_config {
+ u8 set_config;
+ u8 loopback_en;
+ u8 clock_pol_high;
+ u8 data_pol_high;
+ u8 pack_en;
+ u8 word_len;
+ u8 fragmentation;
+ u8 cs;
+ u32 clk_div;
+ u32 clk_src;
+ enum spi_transfer_cmd cmd;
+ u32 rx_len;
+};
+
+enum i2c_op {
+ I2C_WRITE = 1,
+ I2C_READ,
+};
+
+/**
+ * struct gpi_i2c_config - i2c config for peripheral
+ *
+ * @pack_enable: process tx/rx buffers as packed
+ * @cycle_count: clock cycles to be sent
+ * @high_count: high period of clock
+ * @low_count: low period of clock
+ * @clk_div: source clock divider
+ * @addr: i2c bus address
+ * @stretch: stretch the clock at eot
+ * @set_config: set peripheral config
+ * @rx_len: receive length for buffer
+ * @op: i2c cmd
+ * @muli-msg: is part of multi i2c r-w msgs
+ */
+struct gpi_i2c_config {
+ u8 set_config;
+ u8 pack_enable;
+ u8 cycle_count;
+ u8 high_count;
+ u8 low_count;
+ u8 addr;
+ u8 stretch;
+ u16 clk_div;
+ u32 rx_len;
+ enum i2c_op op;
+ bool multi_msg;
+};
+
+#endif /* QCOM_GPI_DMA_H */
diff --git a/include/linux/dma/qcom_adm.h b/include/linux/dma/qcom_adm.h
new file mode 100644
index 000000000000..af20df674f0c
--- /dev/null
+++ b/include/linux/dma/qcom_adm.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_DMA_QCOM_ADM_H
+#define __LINUX_DMA_QCOM_ADM_H
+
+#include <linux/types.h>
+
+struct qcom_adm_peripheral_config {
+ u32 crci;
+ u32 mux;
+};
+
+#endif /* __LINUX_DMA_QCOM_ADM_H */
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
index 5896441ee604..c53c0f6e3b1a 100644
--- a/include/linux/dma/ti-cppi5.h
+++ b/include/linux/dma/ti-cppi5.h
@@ -47,7 +47,7 @@ struct cppi5_host_desc_t {
u32 buf_info1;
u32 org_buf_len;
u64 org_buf_ptr;
- u32 epib[0];
+ u32 epib[];
} __packed;
#define CPPI5_DESC_MIN_ALIGN (16U)
@@ -139,7 +139,7 @@ struct cppi5_desc_epib_t {
*/
struct cppi5_monolithic_desc_t {
struct cppi5_desc_hdr_t hdr;
- u32 epib[0];
+ u32 epib[];
};
#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U)
@@ -616,6 +616,7 @@ static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
#define CPPI5_TR_CSF_SUPR_EVT BIT(2)
#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U)
#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4)
+#define CPPI5_TR_CSF_EOL_ICNT0 BIT(4)
#define CPPI5_TR_CSF_EOP BIT(7)
/**
diff --git a/include/linux/dma/xilinx_dpdma.h b/include/linux/dma/xilinx_dpdma.h
new file mode 100644
index 000000000000..02a4adf8921b
--- /dev/null
+++ b/include/linux/dma/xilinx_dpdma.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_DMA_XILINX_DPDMA_H
+#define __LINUX_DMA_XILINX_DPDMA_H
+
+#include <linux/types.h>
+
+struct xilinx_dpdma_peripheral_config {
+ bool video_group;
+};
+
+#endif /* __LINUX_DMA_XILINX_DPDMA_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 6fbd5c99e30c..752dbde4cec1 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -230,12 +230,6 @@ enum sum_check_flags {
typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
/**
- * struct dma_chan_percpu - the per-CPU part of struct dma_chan
- * @memcpy_count: transaction counter
- * @bytes_transferred: byte counter
- */
-
-/**
* enum dma_desc_metadata_mode - per descriptor metadata mode types supported
* @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
* client driver and it is attached (via the dmaengine_desc_attach_metadata()
@@ -291,6 +285,11 @@ enum dma_desc_metadata_mode {
DESC_METADATA_ENGINE = BIT(1),
};
+/**
+ * struct dma_chan_percpu - the per-CPU part of struct dma_chan
+ * @memcpy_count: transaction counter
+ * @bytes_transferred: byte counter
+ */
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
@@ -357,11 +356,14 @@ struct dma_chan {
* @chan: driver channel device
* @device: sysfs device
* @dev_id: parent dma_device dev_id
+ * @chan_dma_dev: The channel is using custom/different dma-mapping
+ * compared to the parent dma_device
*/
struct dma_chan_dev {
struct dma_chan *chan;
struct device device;
int dev_id;
+ bool chan_dma_dev;
};
/**
@@ -378,6 +380,7 @@ enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+ DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
};
/**
@@ -391,12 +394,12 @@ enum dma_slave_buswidth {
* should be read (RX), if the source is memory this argument is
* ignored.
* @dst_addr: this is the physical address where DMA slave data
- * should be written (TX), if the source is memory this argument
+ * should be written (TX), if the destination is memory this argument
* is ignored.
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
- * Legal values: 1, 2, 3, 4, 8, 16, 32, 64.
+ * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in
@@ -415,9 +418,9 @@ enum dma_slave_buswidth {
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
- * @slave_id: Slave requester id. Only valid for slave channels. The dma
- * slave peripheral will have unique id as dma requester which need to be
- * pass as slave config.
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
@@ -442,7 +445,8 @@ struct dma_slave_config {
u32 src_port_window_size;
u32 dst_port_window_size;
bool device_fc;
- unsigned int slave_id;
+ void *peripheral_config;
+ size_t peripheral_size;
};
/**
@@ -513,8 +517,6 @@ static inline const char *dma_chan_name(struct dma_chan *chan)
return dev_name(&chan->dev->device);
}
-void dma_chan_cleanup(struct kref *kref);
-
/**
* typedef dma_filter_fn - callback filter for dma_request_channel
* @chan: channel to be reviewed
@@ -737,6 +739,8 @@ enum dmaengine_alignment {
DMAENGINE_ALIGN_16_BYTES = 4,
DMAENGINE_ALIGN_32_BYTES = 5,
DMAENGINE_ALIGN_64_BYTES = 6,
+ DMAENGINE_ALIGN_128_BYTES = 7,
+ DMAENGINE_ALIGN_256_BYTES = 8,
};
/**
@@ -767,6 +771,7 @@ struct dma_filter {
/**
* struct dma_device - info on the entity supplying DMA services
+ * @ref: reference is taken and put every time a channel is allocated or freed
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
@@ -783,6 +788,7 @@ struct dma_filter {
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @owner: owner module (automatically set based on the provided dev)
+ * @chan_ida: unique channel ID
* @src_addr_widths: bit mask of src addr widths the device supports
* Width is specified in bytes, e.g. for a device supporting
* a width of 4 the mask should have BIT(4) set.
@@ -796,10 +802,12 @@ struct dma_filter {
* @max_sg_burst: max number of SG list entries executed in a single burst
* DMA tansaction with no software intervention for reinitialization.
* Zero value means unlimited number of entries.
+ * @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
* number of allocated descriptors
+ * @device_router_config: optional callback for DMA router configuration
* @device_free_chan_resources: release DMA channel's resources
* @device_prep_dma_memcpy: prepares a memcpy operation
* @device_prep_dma_xor: prepares a xor operation
@@ -832,7 +840,6 @@ struct dma_filter {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
- * @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @device_release: called sometime atfer dma_async_device_unregister() is
* called and there are no further references to this structure. This
* must be implemented to free resources however many existing drivers
@@ -840,6 +847,7 @@ struct dma_filter {
* @dbg_summary_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra,
* controller specific information.
+ * @dbg_dev_root: the root folder in debugfs for this device
*/
struct dma_device {
struct kref ref;
@@ -848,7 +856,7 @@ struct dma_device {
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
- dma_cap_mask_t cap_mask;
+ dma_cap_mask_t cap_mask;
enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
@@ -862,7 +870,6 @@ struct dma_device {
struct device *dev;
struct module *owner;
struct ida chan_ida;
- struct mutex chan_mutex; /* to protect chan_ida */
u32 src_addr_widths;
u32 dst_addr_widths;
@@ -874,6 +881,7 @@ struct dma_device {
enum dma_residue_granularity residue_granularity;
int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ int (*device_router_config)(struct dma_chan *chan);
void (*device_free_chan_resources)(struct dma_chan *chan);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -917,10 +925,8 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t dst, u64 data,
unsigned long flags);
- void (*device_caps)(struct dma_chan *chan,
- struct dma_slave_caps *caps);
- int (*device_config)(struct dma_chan *chan,
- struct dma_slave_config *config);
+ void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
+ int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
int (*device_pause)(struct dma_chan *chan);
int (*device_resume)(struct dma_chan *chan);
int (*device_terminate_all)(struct dma_chan *chan);
@@ -932,10 +938,8 @@ struct dma_device {
void (*device_issue_pending)(struct dma_chan *chan);
void (*device_release)(struct dma_device *dev);
/* debugfs support */
-#ifdef CONFIG_DEBUG_FS
void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
struct dentry *dbg_dev_root;
-#endif
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -949,7 +953,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
static inline bool is_slave_direction(enum dma_transfer_direction direction)
{
- return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
+ (direction == DMA_DEV_TO_DEV);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
@@ -1019,6 +1024,14 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+/**
+ * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
+ * @chan: The channel to be used for this descriptor
+ * @dest: Address of buffer to be set
+ * @value: Treated as a single byte value that fills the destination buffer
+ * @len: The total size of dest
+ * @flags: DMA engine flags
+ */
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags)
@@ -1472,7 +1485,6 @@ void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param,
struct device_node *np);
-struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
@@ -1502,11 +1514,6 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
{
return NULL;
}
-static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
- const char *name)
-{
- return NULL;
-}
static inline struct dma_chan *dma_request_chan(struct device *dev,
const char *name)
{
@@ -1527,8 +1534,6 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
}
#endif
-#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
-
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
@@ -1577,6 +1582,15 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
#define dma_request_channel(mask, x, y) \
__dma_request_channel(&(mask), x, y, NULL)
+/* Deprecated, please use dma_request_chan() directly */
+static inline struct dma_chan * __deprecated
+dma_request_slave_channel(struct device *dev, const char *name)
+{
+ struct dma_chan *ch = dma_request_chan(dev, name);
+
+ return IS_ERR(ch) ? NULL : ch;
+}
+
static inline struct dma_chan
*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
dma_filter_fn fn, void *fn_param,
@@ -1610,4 +1624,13 @@ dmaengine_get_direction_text(enum dma_transfer_direction dir)
return "invalid";
}
}
+
+static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
+{
+ if (chan->dev->chan_dma_dev)
+ return &chan->dev->device;
+
+ return chan->device->dev;
+}
+
#endif /* DMAENGINE_H */
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 65565820328a..e34b601b71fd 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -18,11 +18,7 @@
struct acpi_dmar_header;
-#ifdef CONFIG_X86
-# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define DMAR_UNITS_SUPPORTED 64
-#endif
+#define DMAR_UNITS_SUPPORTED 1024
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
@@ -43,6 +39,7 @@ struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
+ unsigned long reg_size; /* size of register set */
struct dmar_dev_scope *devices;/* target device array */
int devices_cnt; /* target device count */
u16 segment; /* PCI domain */
@@ -109,8 +106,6 @@ static inline bool dmar_rcu_check(void)
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
extern void dmar_register_bus_notifier(void);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
@@ -121,7 +116,7 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
u16 segment, struct dmar_dev_scope *devices,
int count);
/* Intel IOMMU detection */
-extern int detect_intel_iommu(void);
+void detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
extern int dmar_device_add(acpi_handle handle);
extern int dmar_device_remove(acpi_handle handle);
@@ -131,6 +126,14 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+#ifdef CONFIG_DMAR_DEBUG
+void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid);
+#else
+static inline void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid) {}
+#endif
+
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
@@ -138,6 +141,7 @@ extern void intel_iommu_shutdown(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
@@ -149,6 +153,7 @@ static inline void intel_iommu_shutdown(void) { }
#define dmar_parse_one_atsr dmar_res_noop
#define dmar_check_one_atsr dmar_res_noop
#define dmar_release_one_atsr dmar_res_noop
+#define dmar_parse_one_satc dmar_res_noop
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
@@ -187,71 +192,82 @@ static inline bool dmar_platform_optin(void)
return false;
}
+static inline void detect_intel_iommu(void)
+{
+}
+
#endif /* CONFIG_DMAR_TABLE */
struct irte {
union {
- /* Shared between remapped and posted mode*/
- struct {
- __u64 present : 1, /* 0 */
- fpd : 1, /* 1 */
- __res0 : 6, /* 2 - 6 */
- avail : 4, /* 8 - 11 */
- __res1 : 3, /* 12 - 14 */
- pst : 1, /* 15 */
- vector : 8, /* 16 - 23 */
- __res2 : 40; /* 24 - 63 */
- };
-
- /* Remapped mode */
- struct {
- __u64 r_present : 1, /* 0 */
- r_fpd : 1, /* 1 */
- dst_mode : 1, /* 2 */
- redir_hint : 1, /* 3 */
- trigger_mode : 1, /* 4 */
- dlvry_mode : 3, /* 5 - 7 */
- r_avail : 4, /* 8 - 11 */
- r_res0 : 4, /* 12 - 15 */
- r_vector : 8, /* 16 - 23 */
- r_res1 : 8, /* 24 - 31 */
- dest_id : 32; /* 32 - 63 */
- };
-
- /* Posted mode */
- struct {
- __u64 p_present : 1, /* 0 */
- p_fpd : 1, /* 1 */
- p_res0 : 6, /* 2 - 7 */
- p_avail : 4, /* 8 - 11 */
- p_res1 : 2, /* 12 - 13 */
- p_urgent : 1, /* 14 */
- p_pst : 1, /* 15 */
- p_vector : 8, /* 16 - 23 */
- p_res2 : 14, /* 24 - 37 */
- pda_l : 26; /* 38 - 63 */
- };
- __u64 low;
- };
-
- union {
- /* Shared between remapped and posted mode*/
- struct {
- __u64 sid : 16, /* 64 - 79 */
- sq : 2, /* 80 - 81 */
- svt : 2, /* 82 - 83 */
- __res3 : 44; /* 84 - 127 */
- };
-
- /* Posted mode*/
struct {
- __u64 p_sid : 16, /* 64 - 79 */
- p_sq : 2, /* 80 - 81 */
- p_svt : 2, /* 82 - 83 */
- p_res3 : 12, /* 84 - 95 */
- pda_h : 32; /* 96 - 127 */
+ union {
+ /* Shared between remapped and posted mode*/
+ struct {
+ __u64 present : 1, /* 0 */
+ fpd : 1, /* 1 */
+ __res0 : 6, /* 2 - 6 */
+ avail : 4, /* 8 - 11 */
+ __res1 : 3, /* 12 - 14 */
+ pst : 1, /* 15 */
+ vector : 8, /* 16 - 23 */
+ __res2 : 40; /* 24 - 63 */
+ };
+
+ /* Remapped mode */
+ struct {
+ __u64 r_present : 1, /* 0 */
+ r_fpd : 1, /* 1 */
+ dst_mode : 1, /* 2 */
+ redir_hint : 1, /* 3 */
+ trigger_mode : 1, /* 4 */
+ dlvry_mode : 3, /* 5 - 7 */
+ r_avail : 4, /* 8 - 11 */
+ r_res0 : 4, /* 12 - 15 */
+ r_vector : 8, /* 16 - 23 */
+ r_res1 : 8, /* 24 - 31 */
+ dest_id : 32; /* 32 - 63 */
+ };
+
+ /* Posted mode */
+ struct {
+ __u64 p_present : 1, /* 0 */
+ p_fpd : 1, /* 1 */
+ p_res0 : 6, /* 2 - 7 */
+ p_avail : 4, /* 8 - 11 */
+ p_res1 : 2, /* 12 - 13 */
+ p_urgent : 1, /* 14 */
+ p_pst : 1, /* 15 */
+ p_vector : 8, /* 16 - 23 */
+ p_res2 : 14, /* 24 - 37 */
+ pda_l : 26; /* 38 - 63 */
+ };
+ __u64 low;
+ };
+
+ union {
+ /* Shared between remapped and posted mode*/
+ struct {
+ __u64 sid : 16, /* 64 - 79 */
+ sq : 2, /* 80 - 81 */
+ svt : 2, /* 82 - 83 */
+ __res3 : 44; /* 84 - 127 */
+ };
+
+ /* Posted mode*/
+ struct {
+ __u64 p_sid : 16, /* 64 - 79 */
+ p_sq : 2, /* 80 - 81 */
+ p_svt : 2, /* 82 - 83 */
+ p_res3 : 12, /* 84 - 95 */
+ pda_h : 32; /* 96 - 127 */
+ };
+ __u64 high;
+ };
};
- __u64 high;
+#ifdef CONFIG_IRQ_REMAP
+ __u128 irte;
+#endif
};
};
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index 0aad774beaec..9f183a679277 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -26,12 +26,11 @@ struct dnotify_struct {
FS_MODIFY | FS_MODIFY_CHILD |\
FS_ACCESS | FS_ACCESS_CHILD |\
FS_ATTRIB | FS_ATTRIB_CHILD |\
- FS_CREATE | FS_DN_RENAME |\
+ FS_CREATE | FS_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
-extern int dir_notify_enable;
extern void dnotify_flush(struct file *, fl_owner_t);
-extern int fcntl_dirnotify(int, struct file *, unsigned long);
+extern int fcntl_dirnotify(int, struct file *, unsigned int);
#else
@@ -39,7 +38,7 @@ static inline void dnotify_flush(struct file *filp, fl_owner_t id)
{
}
-static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
+static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
{
return -EINVAL;
}
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
new file mode 100644
index 000000000000..d275736230b3
--- /dev/null
+++ b/include/linux/dpll.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel and affiliates
+ */
+
+#ifndef __DPLL_H__
+#define __DPLL_H__
+
+#include <uapi/linux/dpll.h>
+#include <linux/device.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+struct dpll_device;
+struct dpll_pin;
+
+struct dpll_device_ops {
+ int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack);
+ int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack);
+ int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
+ s32 *temp, struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_ops {
+ int (*frequency_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u64 frequency,
+ struct netlink_ext_ack *extack);
+ int (*frequency_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack);
+ int (*direction_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack);
+ int (*direction_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*prio_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack);
+ int (*prio_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u32 prio, struct netlink_ext_ack *extack);
+ int (*phase_offset_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *phase_offset,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const s32 phase_adjust,
+ struct netlink_ext_ack *extack);
+ int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_frequency {
+ u64 min;
+ u64 max;
+};
+
+#define DPLL_PIN_FREQUENCY_RANGE(_min, _max) \
+ { \
+ .min = _min, \
+ .max = _max, \
+ }
+
+#define DPLL_PIN_FREQUENCY(_val) DPLL_PIN_FREQUENCY_RANGE(_val, _val)
+#define DPLL_PIN_FREQUENCY_1PPS \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_1_HZ)
+#define DPLL_PIN_FREQUENCY_10MHZ \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_MHZ)
+#define DPLL_PIN_FREQUENCY_IRIG_B \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_KHZ)
+#define DPLL_PIN_FREQUENCY_DCF77 \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_77_5_KHZ)
+
+struct dpll_pin_phase_adjust_range {
+ s32 min;
+ s32 max;
+};
+
+struct dpll_pin_properties {
+ const char *board_label;
+ const char *panel_label;
+ const char *package_label;
+ enum dpll_pin_type type;
+ unsigned long capabilities;
+ u32 freq_supported_num;
+ struct dpll_pin_frequency *freq_supported;
+ struct dpll_pin_phase_adjust_range phase_range;
+};
+
+#if IS_ENABLED(CONFIG_DPLL)
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
+void dpll_netdev_pin_clear(struct net_device *dev);
+
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev);
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev);
+#else
+static inline void
+dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) { }
+static inline void dpll_netdev_pin_clear(struct net_device *dev) { }
+
+static inline size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline int
+dpll_netdev_add_pin_handle(struct sk_buff *msg, const struct net_device *dev)
+{
+ return 0;
+}
+#endif
+
+struct dpll_device *
+dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module);
+
+void dpll_device_put(struct dpll_device *dpll);
+
+int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
+ const struct dpll_device_ops *ops, void *priv);
+
+void dpll_device_unregister(struct dpll_device *dpll,
+ const struct dpll_device_ops *ops, void *priv);
+
+struct dpll_pin *
+dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module,
+ const struct dpll_pin_properties *prop);
+
+int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_put(struct dpll_pin *pin);
+
+int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+int dpll_device_change_ntf(struct dpll_device *dpll);
+
+int dpll_pin_change_ntf(struct dpll_pin *pin);
+
+#endif
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 5755537b51b1..5468a2399d48 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -38,13 +38,6 @@
#endif
-extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.11"
-#define API_VERSION 1
-#define PRO_VERSION_MIN 86
-#define PRO_VERSION_MAX 101
-
-
enum drbd_io_error_p {
EP_PASS_ON, /* FIXME should the better be named "Ignore"? */
EP_CALL_HELPER,
diff --git a/include/linux/drbd_config.h b/include/linux/drbd_config.h
new file mode 100644
index 000000000000..d215365c6bb1
--- /dev/null
+++ b/include/linux/drbd_config.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * drbd_config.h
+ * DRBD's compile time configuration.
+ */
+
+#ifndef DRBD_CONFIG_H
+#define DRBD_CONFIG_H
+
+extern const char *drbd_buildtag(void);
+
+#define REL_VERSION "8.4.11"
+#define PRO_VERSION_MIN 86
+#define PRO_VERSION_MAX 101
+
+#endif
diff --git a/include/linux/drbd_genl_api.h b/include/linux/drbd_genl_api.h
index bd62efc29002..70682c058027 100644
--- a/include/linux/drbd_genl_api.h
+++ b/include/linux/drbd_genl_api.h
@@ -47,7 +47,7 @@ enum drbd_state_info_bcast_reason {
#undef linux
#include <linux/drbd.h>
-#define GENL_MAGIC_VERSION API_VERSION
+#define GENL_MAGIC_VERSION 1
#define GENL_MAGIC_FAMILY drbd
#define GENL_MAGIC_FAMILY_HDRSZ sizeof(struct drbd_genlmsghdr)
#define GENL_MAGIC_INCLUDE_FILE <linux/drbd_genl.h>
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 9e33f7038bea..5b042fb427e9 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
drbd_limits.h
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
@@ -16,123 +16,123 @@
#define DEBUG_RANGE_CHECK 0
-#define DRBD_MINOR_COUNT_MIN 1
-#define DRBD_MINOR_COUNT_MAX 255
-#define DRBD_MINOR_COUNT_DEF 32
+#define DRBD_MINOR_COUNT_MIN 1U
+#define DRBD_MINOR_COUNT_MAX 255U
+#define DRBD_MINOR_COUNT_DEF 32U
#define DRBD_MINOR_COUNT_SCALE '1'
-#define DRBD_VOLUME_MAX 65535
+#define DRBD_VOLUME_MAX 65534U
-#define DRBD_DIALOG_REFRESH_MIN 0
-#define DRBD_DIALOG_REFRESH_MAX 600
+#define DRBD_DIALOG_REFRESH_MIN 0U
+#define DRBD_DIALOG_REFRESH_MAX 600U
#define DRBD_DIALOG_REFRESH_SCALE '1'
/* valid port number */
-#define DRBD_PORT_MIN 1
-#define DRBD_PORT_MAX 0xffff
+#define DRBD_PORT_MIN 1U
+#define DRBD_PORT_MAX 0xffffU
#define DRBD_PORT_SCALE '1'
/* startup { */
/* if you want more than 3.4 days, disable */
-#define DRBD_WFC_TIMEOUT_MIN 0
-#define DRBD_WFC_TIMEOUT_MAX 300000
-#define DRBD_WFC_TIMEOUT_DEF 0
+#define DRBD_WFC_TIMEOUT_MIN 0U
+#define DRBD_WFC_TIMEOUT_MAX 300000U
+#define DRBD_WFC_TIMEOUT_DEF 0U
#define DRBD_WFC_TIMEOUT_SCALE '1'
-#define DRBD_DEGR_WFC_TIMEOUT_MIN 0
-#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000
-#define DRBD_DEGR_WFC_TIMEOUT_DEF 0
+#define DRBD_DEGR_WFC_TIMEOUT_MIN 0U
+#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000U
+#define DRBD_DEGR_WFC_TIMEOUT_DEF 0U
#define DRBD_DEGR_WFC_TIMEOUT_SCALE '1'
-#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0
-#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000
-#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0
+#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0U
+#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000U
+#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0U
#define DRBD_OUTDATED_WFC_TIMEOUT_SCALE '1'
/* }*/
/* net { */
/* timeout, unit centi seconds
* more than one minute timeout is not useful */
-#define DRBD_TIMEOUT_MIN 1
-#define DRBD_TIMEOUT_MAX 600
-#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
+#define DRBD_TIMEOUT_MIN 1U
+#define DRBD_TIMEOUT_MAX 600U
+#define DRBD_TIMEOUT_DEF 60U /* 6 seconds */
#define DRBD_TIMEOUT_SCALE '1'
/* If backing disk takes longer than disk_timeout, mark the disk as failed */
-#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */
-#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
-#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */
+#define DRBD_DISK_TIMEOUT_MIN 0U /* 0 = disabled */
+#define DRBD_DISK_TIMEOUT_MAX 6000U /* 10 Minutes */
+#define DRBD_DISK_TIMEOUT_DEF 0U /* disabled */
#define DRBD_DISK_TIMEOUT_SCALE '1'
/* active connection retries when C_WF_CONNECTION */
-#define DRBD_CONNECT_INT_MIN 1
-#define DRBD_CONNECT_INT_MAX 120
-#define DRBD_CONNECT_INT_DEF 10 /* seconds */
+#define DRBD_CONNECT_INT_MIN 1U
+#define DRBD_CONNECT_INT_MAX 120U
+#define DRBD_CONNECT_INT_DEF 10U /* seconds */
#define DRBD_CONNECT_INT_SCALE '1'
/* keep-alive probes when idle */
-#define DRBD_PING_INT_MIN 1
-#define DRBD_PING_INT_MAX 120
-#define DRBD_PING_INT_DEF 10
+#define DRBD_PING_INT_MIN 1U
+#define DRBD_PING_INT_MAX 120U
+#define DRBD_PING_INT_DEF 10U
#define DRBD_PING_INT_SCALE '1'
/* timeout for the ping packets.*/
-#define DRBD_PING_TIMEO_MIN 1
-#define DRBD_PING_TIMEO_MAX 300
-#define DRBD_PING_TIMEO_DEF 5
+#define DRBD_PING_TIMEO_MIN 1U
+#define DRBD_PING_TIMEO_MAX 300U
+#define DRBD_PING_TIMEO_DEF 5U
#define DRBD_PING_TIMEO_SCALE '1'
/* max number of write requests between write barriers */
-#define DRBD_MAX_EPOCH_SIZE_MIN 1
-#define DRBD_MAX_EPOCH_SIZE_MAX 20000
-#define DRBD_MAX_EPOCH_SIZE_DEF 2048
+#define DRBD_MAX_EPOCH_SIZE_MIN 1U
+#define DRBD_MAX_EPOCH_SIZE_MAX 20000U
+#define DRBD_MAX_EPOCH_SIZE_DEF 2048U
#define DRBD_MAX_EPOCH_SIZE_SCALE '1'
/* I don't think that a tcp send buffer of more than 10M is useful */
-#define DRBD_SNDBUF_SIZE_MIN 0
-#define DRBD_SNDBUF_SIZE_MAX (10<<20)
-#define DRBD_SNDBUF_SIZE_DEF 0
+#define DRBD_SNDBUF_SIZE_MIN 0U
+#define DRBD_SNDBUF_SIZE_MAX (10U<<20)
+#define DRBD_SNDBUF_SIZE_DEF 0U
#define DRBD_SNDBUF_SIZE_SCALE '1'
-#define DRBD_RCVBUF_SIZE_MIN 0
-#define DRBD_RCVBUF_SIZE_MAX (10<<20)
-#define DRBD_RCVBUF_SIZE_DEF 0
+#define DRBD_RCVBUF_SIZE_MIN 0U
+#define DRBD_RCVBUF_SIZE_MAX (10U<<20)
+#define DRBD_RCVBUF_SIZE_DEF 0U
#define DRBD_RCVBUF_SIZE_SCALE '1'
/* @4k PageSize -> 128kB - 512MB */
-#define DRBD_MAX_BUFFERS_MIN 32
-#define DRBD_MAX_BUFFERS_MAX 131072
-#define DRBD_MAX_BUFFERS_DEF 2048
+#define DRBD_MAX_BUFFERS_MIN 32U
+#define DRBD_MAX_BUFFERS_MAX 131072U
+#define DRBD_MAX_BUFFERS_DEF 2048U
#define DRBD_MAX_BUFFERS_SCALE '1'
/* @4k PageSize -> 4kB - 512MB */
-#define DRBD_UNPLUG_WATERMARK_MIN 1
-#define DRBD_UNPLUG_WATERMARK_MAX 131072
+#define DRBD_UNPLUG_WATERMARK_MIN 1U
+#define DRBD_UNPLUG_WATERMARK_MAX 131072U
#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16)
#define DRBD_UNPLUG_WATERMARK_SCALE '1'
/* 0 is disabled.
* 200 should be more than enough even for very short timeouts */
-#define DRBD_KO_COUNT_MIN 0
-#define DRBD_KO_COUNT_MAX 200
-#define DRBD_KO_COUNT_DEF 7
+#define DRBD_KO_COUNT_MIN 0U
+#define DRBD_KO_COUNT_MAX 200U
+#define DRBD_KO_COUNT_DEF 7U
#define DRBD_KO_COUNT_SCALE '1'
/* } */
/* syncer { */
/* FIXME allow rate to be zero? */
-#define DRBD_RESYNC_RATE_MIN 1
+#define DRBD_RESYNC_RATE_MIN 1U
/* channel bonding 10 GbE, or other hardware */
#define DRBD_RESYNC_RATE_MAX (4 << 20)
-#define DRBD_RESYNC_RATE_DEF 250
+#define DRBD_RESYNC_RATE_DEF 250U
#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_AL_EXTENTS_MIN 67
+#define DRBD_AL_EXTENTS_MIN 67U
/* we use u16 as "slot number", (u16)~0 is "FREE".
* If you use >= 292 kB on-disk ring buffer,
* this is the maximum you can use: */
-#define DRBD_AL_EXTENTS_MAX 0xfffe
-#define DRBD_AL_EXTENTS_DEF 1237
+#define DRBD_AL_EXTENTS_MAX 0xfffeU
+#define DRBD_AL_EXTENTS_DEF 1237U
#define DRBD_AL_EXTENTS_SCALE '1'
#define DRBD_MINOR_NUMBER_MIN -1
@@ -147,9 +147,9 @@
* the upper limit with 64bit kernel, enough ram and flexible meta data
* is 1 PiB, currently. */
/* DRBD_MAX_SECTORS */
-#define DRBD_DISK_SIZE_MIN 0
-#define DRBD_DISK_SIZE_MAX (1 * (2LLU << 40))
-#define DRBD_DISK_SIZE_DEF 0 /* = disabled = no user size... */
+#define DRBD_DISK_SIZE_MIN 0LLU
+#define DRBD_DISK_SIZE_MAX (1LLU * (2LLU << 40))
+#define DRBD_DISK_SIZE_DEF 0LLU /* = disabled = no user size... */
#define DRBD_DISK_SIZE_SCALE 's' /* sectors */
#define DRBD_ON_IO_ERROR_DEF EP_DETACH
@@ -162,39 +162,39 @@
#define DRBD_ON_CONGESTION_DEF OC_BLOCK
#define DRBD_READ_BALANCING_DEF RB_PREFER_LOCAL
-#define DRBD_MAX_BIO_BVECS_MIN 0
-#define DRBD_MAX_BIO_BVECS_MAX 128
-#define DRBD_MAX_BIO_BVECS_DEF 0
+#define DRBD_MAX_BIO_BVECS_MIN 0U
+#define DRBD_MAX_BIO_BVECS_MAX 128U
+#define DRBD_MAX_BIO_BVECS_DEF 0U
#define DRBD_MAX_BIO_BVECS_SCALE '1'
-#define DRBD_C_PLAN_AHEAD_MIN 0
-#define DRBD_C_PLAN_AHEAD_MAX 300
-#define DRBD_C_PLAN_AHEAD_DEF 20
+#define DRBD_C_PLAN_AHEAD_MIN 0U
+#define DRBD_C_PLAN_AHEAD_MAX 300U
+#define DRBD_C_PLAN_AHEAD_DEF 20U
#define DRBD_C_PLAN_AHEAD_SCALE '1'
-#define DRBD_C_DELAY_TARGET_MIN 1
-#define DRBD_C_DELAY_TARGET_MAX 100
-#define DRBD_C_DELAY_TARGET_DEF 10
+#define DRBD_C_DELAY_TARGET_MIN 1U
+#define DRBD_C_DELAY_TARGET_MAX 100U
+#define DRBD_C_DELAY_TARGET_DEF 10U
#define DRBD_C_DELAY_TARGET_SCALE '1'
-#define DRBD_C_FILL_TARGET_MIN 0
-#define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */
-#define DRBD_C_FILL_TARGET_DEF 100 /* Try to place 50KiB in socket send buffer during resync */
+#define DRBD_C_FILL_TARGET_MIN 0U
+#define DRBD_C_FILL_TARGET_MAX (1U<<20) /* 500MByte in sec */
+#define DRBD_C_FILL_TARGET_DEF 100U /* Try to place 50KiB in socket send buffer during resync */
#define DRBD_C_FILL_TARGET_SCALE 's' /* sectors */
-#define DRBD_C_MAX_RATE_MIN 250
-#define DRBD_C_MAX_RATE_MAX (4 << 20)
-#define DRBD_C_MAX_RATE_DEF 102400
+#define DRBD_C_MAX_RATE_MIN 250U
+#define DRBD_C_MAX_RATE_MAX (4U << 20)
+#define DRBD_C_MAX_RATE_DEF 102400U
#define DRBD_C_MAX_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_C_MIN_RATE_MIN 0
-#define DRBD_C_MIN_RATE_MAX (4 << 20)
-#define DRBD_C_MIN_RATE_DEF 250
+#define DRBD_C_MIN_RATE_MIN 0U
+#define DRBD_C_MIN_RATE_MAX (4U << 20)
+#define DRBD_C_MIN_RATE_DEF 250U
#define DRBD_C_MIN_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_CONG_FILL_MIN 0
-#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */
-#define DRBD_CONG_FILL_DEF 0
+#define DRBD_CONG_FILL_MIN 0U
+#define DRBD_CONG_FILL_MAX (10U<<21) /* 10GByte in sectors */
+#define DRBD_CONG_FILL_DEF 0U
#define DRBD_CONG_FILL_SCALE 's' /* sectors */
#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN
@@ -204,48 +204,48 @@
#define DRBD_PROTOCOL_DEF DRBD_PROT_C
-#define DRBD_DISK_BARRIER_DEF 0
-#define DRBD_DISK_FLUSHES_DEF 1
-#define DRBD_DISK_DRAIN_DEF 1
-#define DRBD_MD_FLUSHES_DEF 1
-#define DRBD_TCP_CORK_DEF 1
-#define DRBD_AL_UPDATES_DEF 1
+#define DRBD_DISK_BARRIER_DEF 0U
+#define DRBD_DISK_FLUSHES_DEF 1U
+#define DRBD_DISK_DRAIN_DEF 1U
+#define DRBD_MD_FLUSHES_DEF 1U
+#define DRBD_TCP_CORK_DEF 1U
+#define DRBD_AL_UPDATES_DEF 1U
/* We used to ignore the discard_zeroes_data setting.
* To not change established (and expected) behaviour,
* by default assume that, for discard_zeroes_data=0,
* we can make that an effective discard_zeroes_data=1,
* if we only explicitly zero-out unaligned partial chunks. */
-#define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1
+#define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1U
/* Some backends pretend to support WRITE SAME,
* but fail such requests when they are actually submitted.
* This is to tell DRBD to not even try. */
-#define DRBD_DISABLE_WRITE_SAME_DEF 0
+#define DRBD_DISABLE_WRITE_SAME_DEF 0U
-#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
-#define DRBD_ALWAYS_ASBP_DEF 0
-#define DRBD_USE_RLE_DEF 1
-#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0
+#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0U
+#define DRBD_ALWAYS_ASBP_DEF 0U
+#define DRBD_USE_RLE_DEF 1U
+#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0U
-#define DRBD_AL_STRIPES_MIN 1
-#define DRBD_AL_STRIPES_MAX 1024
-#define DRBD_AL_STRIPES_DEF 1
+#define DRBD_AL_STRIPES_MIN 1U
+#define DRBD_AL_STRIPES_MAX 1024U
+#define DRBD_AL_STRIPES_DEF 1U
#define DRBD_AL_STRIPES_SCALE '1'
-#define DRBD_AL_STRIPE_SIZE_MIN 4
-#define DRBD_AL_STRIPE_SIZE_MAX 16777216
-#define DRBD_AL_STRIPE_SIZE_DEF 32
+#define DRBD_AL_STRIPE_SIZE_MIN 4U
+#define DRBD_AL_STRIPE_SIZE_MAX 16777216U
+#define DRBD_AL_STRIPE_SIZE_DEF 32U
#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
-#define DRBD_SOCKET_CHECK_TIMEO_MIN 0
+#define DRBD_SOCKET_CHECK_TIMEO_MIN 0U
#define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX
-#define DRBD_SOCKET_CHECK_TIMEO_DEF 0
+#define DRBD_SOCKET_CHECK_TIMEO_DEF 0U
#define DRBD_SOCKET_CHECK_TIMEO_SCALE '1'
-#define DRBD_RS_DISCARD_GRANULARITY_MIN 0
-#define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */
-#define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */
+#define DRBD_RS_DISCARD_GRANULARITY_MIN 0U
+#define DRBD_RS_DISCARD_GRANULARITY_MAX (1U<<20) /* 1MiByte */
+#define DRBD_RS_DISCARD_GRANULARITY_DEF 0U /* disabled by default */
#define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */
#endif
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index 311aa04e7520..f3664ee12170 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -5,120 +5,27 @@
#ifndef _NET_DSA_8021Q_H
#define _NET_DSA_8021Q_H
+#include <net/dsa.h>
#include <linux/types.h>
-struct dsa_switch;
-struct sk_buff;
-struct net_device;
-struct packet_type;
+int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
-struct dsa_8021q_crosschip_link {
- struct list_head list;
- int port;
- struct dsa_switch *other_ds;
- int other_port;
- refcount_t refcount;
-};
+void dsa_tag_8021q_unregister(struct dsa_switch *ds);
-#define DSA_8021Q_N_SUBVLAN 8
+int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
-#if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q)
+void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
-int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
- bool enabled);
+u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num);
-int dsa_8021q_crosschip_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links);
-
-int dsa_8021q_crosschip_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links);
-
-struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
- u16 tpid, u16 tci);
-
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
-
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
-
-u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan);
+u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp);
int dsa_8021q_rx_switch_id(u16 vid);
int dsa_8021q_rx_source_port(u16 vid);
-u16 dsa_8021q_rx_subvlan(u16 vid);
-
bool vid_is_dsa_8021q(u16 vid);
-#else
-
-int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
- bool enabled)
-{
- return 0;
-}
-
-int dsa_8021q_crosschip_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links)
-{
- return 0;
-}
-
-int dsa_8021q_crosschip_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links)
-{
- return 0;
-}
-
-struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
- u16 tpid, u16 tci)
-{
- return NULL;
-}
-
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan)
-{
- return 0;
-}
-
-int dsa_8021q_rx_switch_id(u16 vid)
-{
- return 0;
-}
-
-int dsa_8021q_rx_source_port(u16 vid)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_subvlan(u16 vid)
-{
- return 0;
-}
-
-bool vid_is_dsa_8021q(u16 vid)
-{
- return false;
-}
-
-#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
-
#endif /* _NET_DSA_8021Q_H */
diff --git a/include/linux/dsa/brcm.h b/include/linux/dsa/brcm.h
new file mode 100644
index 000000000000..47545a948784
--- /dev/null
+++ b/include/linux/dsa/brcm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2014 Broadcom Corporation
+ */
+
+/* Included by drivers/net/ethernet/broadcom/bcmsysport.c and
+ * net/dsa/tag_brcm.c
+ */
+#ifndef _NET_DSA_BRCM_H
+#define _NET_DSA_BRCM_H
+
+/* Broadcom tag specific helpers to insert and extract queue/port number */
+#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
+#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
+#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+
+#endif
diff --git a/include/linux/dsa/ksz_common.h b/include/linux/dsa/ksz_common.h
new file mode 100644
index 000000000000..576a99ca698d
--- /dev/null
+++ b/include/linux/dsa/ksz_common.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch tag common header
+ *
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_KSZ_COMMON_H_
+#define _NET_DSA_KSZ_COMMON_H_
+
+#include <net/dsa.h>
+
+/* All time stamps from the KSZ consist of 2 bits for seconds and 30 bits for
+ * nanoseconds. This is NOT the same as 32 bits for nanoseconds.
+ */
+#define KSZ_TSTAMP_SEC_MASK GENMASK(31, 30)
+#define KSZ_TSTAMP_NSEC_MASK GENMASK(29, 0)
+
+static inline ktime_t ksz_decode_tstamp(u32 tstamp)
+{
+ u64 ns = FIELD_GET(KSZ_TSTAMP_SEC_MASK, tstamp) * NSEC_PER_SEC +
+ FIELD_GET(KSZ_TSTAMP_NSEC_MASK, tstamp);
+
+ return ns_to_ktime(ns);
+}
+
+struct ksz_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ksz_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*hwtstamp_set_state)(struct dsa_switch *ds, bool on);
+};
+
+struct ksz_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_type;
+ bool update_correction;
+ u32 tstamp;
+};
+
+#define KSZ_SKB_CB(skb) \
+ ((struct ksz_skb_cb *)((skb)->cb))
+
+static inline struct ksz_tagger_data *
+ksz_tagger_data(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
+#endif /* _NET_DSA_KSZ_COMMON_H_ */
diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h
index 5a3470bcc8a7..b8fef35591aa 100644
--- a/include/linux/dsa/loop.h
+++ b/include/linux/dsa/loop.h
@@ -2,6 +2,7 @@
#ifndef DSA_LOOP_H
#define DSA_LOOP_H
+#include <linux/if_vlan.h>
#include <linux/types.h>
#include <linux/ethtool.h>
#include <net/dsa.h>
diff --git a/include/linux/dsa/mv88e6xxx.h b/include/linux/dsa/mv88e6xxx.h
new file mode 100644
index 000000000000..8c3d45eca46b
--- /dev/null
+++ b/include/linux/dsa/mv88e6xxx.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_MV88E6XXX_H
+#define _NET_DSA_TAG_MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+
+#define MV88E6XXX_VID_STANDALONE 0
+#define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1)
+
+#endif
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
new file mode 100644
index 000000000000..dca2969015d8
--- /dev/null
+++ b/include/linux/dsa/ocelot.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2019-2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_OCELOT_H
+#define _NET_DSA_TAG_OCELOT_H
+
+#include <linux/kthread.h>
+#include <linux/packing.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+struct ocelot_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_class; /* valid only for clones */
+ u32 tstamp_lo;
+ u8 ptp_cmd;
+ u8 ts_id;
+};
+
+#define OCELOT_SKB_CB(skb) \
+ ((struct ocelot_skb_cb *)((skb)->cb))
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
+#define OCELOT_TAG_LEN 16
+#define OCELOT_SHORT_PREFIX_LEN 4
+#define OCELOT_LONG_PREFIX_LEN 16
+#define OCELOT_TOTAL_TAG_LEN (OCELOT_SHORT_PREFIX_LEN + OCELOT_TAG_LEN)
+
+/* The CPU injection header and the CPU extraction header can have 3 types of
+ * prefixes: long, short and no prefix. The format of the header itself is the
+ * same in all 3 cases.
+ *
+ * Extraction with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | ff:ff:ff:ff:ff:ff | fe:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Extraction with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | extraction | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Extraction with no prefix:
+ *
+ * +------------+-------+
+ * | extraction | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ *
+ * Injection with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | any dmac | any smac | 8880 | 000a | injection | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Injection with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | injection | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Injection with no prefix:
+ *
+ * +------------+-------+
+ * | injection | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ * The injection header looks like this (network byte order, bit 127
+ * is part of lowest address byte in memory, bit 0 is part of highest
+ * address byte):
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP|
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | RSV | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER|
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | TFRM_TIMER | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 | RSV | DP | POP_CNT | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ *
+ * And the extraction header looks like this:
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 | RSV | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL | LLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | LLEN | WLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | WLEN | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | ACL_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | ACL_ID | RSV | SFLOW_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ */
+
+struct felix_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ocelot_8021q_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+};
+
+static inline struct ocelot_8021q_tagger_data *
+ocelot_8021q_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_OCELOT_8021Q);
+
+ return ds->tagger_data;
+}
+
+static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
+{
+ packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_len(void *extraction, u64 *len)
+{
+ u64 llen, wlen;
+
+ packing(extraction, &llen, 84, 79, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &wlen, 78, 71, OCELOT_TAG_LEN, UNPACK, 0);
+
+ *len = 60 * wlen + llen - 80;
+}
+
+static inline void ocelot_xfh_get_src_port(void *extraction, u64 *src_port)
+{
+ packing(extraction, src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_qos_class(void *extraction, u64 *qos_class)
+{
+ packing(extraction, qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_tag_type(void *extraction, u64 *tag_type)
+{
+ packing(extraction, tag_type, 16, 16, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_vlan_tci(void *extraction, u64 *vlan_tci)
+{
+ packing(extraction, vlan_tci, 15, 0, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_ifh_set_bypass(void *injection, u64 bypass)
+{
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_rew_op(void *injection, u64 rew_op)
+{
+ packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 56, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_qos_class(void *injection, u64 qos_class)
+{
+ packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void seville_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_src(void *injection, u64 src)
+{
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type)
+{
+ packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_vlan_tci(void *injection, u64 vlan_tci)
+{
+ packing(injection, &vlan_tci, 15, 0, OCELOT_TAG_LEN, PACK, 0);
+}
+
+/* Determine the PTP REW_OP to use for injecting the given skb */
+static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+ u32 rew_op = 0;
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+ rew_op = ptp_cmd;
+ rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+ } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ rew_op = ptp_cmd;
+ }
+
+ return rew_op;
+}
+
+#endif
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index dd93735ae228..b9dd35d4b8f5 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -14,6 +14,9 @@
#define ETH_P_SJA1105 ETH_P_DSA_8021Q
#define ETH_P_SJA1105_META 0x0008
+#define ETH_P_SJA1110 0xdadc
+
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
@@ -25,43 +28,48 @@
/* Source and Destination MAC of follow-up meta frames.
* Whereas the choice of SMAC only affects the unique identification of the
* switch as sender of meta frames, the DMAC must be an address that is present
- * in the DSA master port's multicast MAC filter.
+ * in the DSA conduit port's multicast MAC filter.
* 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588
* over L2 use this address for some purpose already.
*/
#define SJA1105_META_SMAC 0x222222222222ull
#define SJA1105_META_DMAC 0x0180C200000Eull
-#define SJA1105_HWTS_RX_EN 0
+enum sja1110_meta_tstamp {
+ SJA1110_META_TSTAMP_TX = 0,
+ SJA1110_META_TSTAMP_RX = 1,
+};
-/* Global tagger data: each struct sja1105_port has a reference to
- * the structure defined in struct sja1105_private.
- */
+struct sja1105_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+/* Global tagger data */
struct sja1105_tagger_data {
- struct sk_buff *stampable_skb;
- /* Protects concurrent access to the meta state machine
- * from taggers running on multiple ports on SMP systems
- */
- spinlock_t meta_lock;
- unsigned long state;
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*meta_tstamp_handler)(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
};
struct sja1105_skb_cb {
- u32 meta_tstamp;
+ struct sk_buff *clone;
+ u64 tstamp;
+ /* Only valid for packets cloned for 2-step TX timestamping */
+ u8 ts_id;
};
#define SJA1105_SKB_CB(skb) \
- ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
+ ((struct sja1105_skb_cb *)((skb)->cb))
-struct sja1105_port {
- u16 subvlan_map[DSA_8021Q_N_SUBVLAN];
- struct kthread_worker *xmit_worker;
- struct kthread_work xmit_work;
- struct sk_buff_head xmit_queue;
- struct sja1105_tagger_data *data;
- struct dsa_port *dp;
- bool hwts_tx_en;
- u16 xmit_tpid;
-};
+static inline struct sja1105_tagger_data *
+sja1105_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1105 &&
+ ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1110);
+
+ return ds->tagger_data;
+}
#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h
new file mode 100644
index 000000000000..ee657452f122
--- /dev/null
+++ b/include/linux/dsa/tag_qca.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TAG_QCA_H
+#define __TAG_QCA_H
+
+#include <linux/types.h>
+
+struct dsa_switch;
+struct sk_buff;
+
+#define QCA_HDR_LEN 2
+#define QCA_HDR_VERSION 0x2
+
+#define QCA_HDR_RECV_VERSION GENMASK(15, 14)
+#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_RECV_TYPE GENMASK(10, 6)
+#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
+#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0)
+
+/* Packet type for recv */
+#define QCA_HDR_RECV_TYPE_NORMAL 0x0
+#define QCA_HDR_RECV_TYPE_MIB 0x1
+#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2
+
+#define QCA_HDR_XMIT_VERSION GENMASK(15, 14)
+#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8)
+#define QCA_HDR_XMIT_FROM_CPU BIT(7)
+#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0)
+
+/* Packet type for xmit */
+#define QCA_HDR_XMIT_TYPE_NORMAL 0x0
+#define QCA_HDR_XMIT_TYPE_RW_REG 0x1
+
+/* Check code for a valid mgmt packet. Switch will ignore the packet
+ * with this wrong.
+ */
+#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5
+
+/* Specific define for in-band MDIO read/write with Ethernet packet */
+#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */
+#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */
+#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */
+#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \
+ QCA_HDR_MGMT_COMMAND_LEN + \
+ QCA_HDR_MGMT_DATA1_LEN)
+
+#define QCA_HDR_MGMT_DATA2_LEN 28 /* Other 28 byte for the mdio data */
+#define QCA_HDR_MGMT_PADDING_LEN 18 /* Padding to reach the min Ethernet packet */
+
+#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \
+ QCA_HDR_LEN + \
+ QCA_HDR_MGMT_DATA2_LEN + \
+ QCA_HDR_MGMT_PADDING_LEN)
+
+#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */
+#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */
+#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */
+#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */
+#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */
+
+/* Special struct emulating a Ethernet header */
+struct qca_mgmt_ethhdr {
+ __le32 command; /* command bit 31:0 */
+ __le32 seq; /* seq 63:32 */
+ __le32 mdio_data; /* first 4byte mdio */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+enum mdio_cmd {
+ MDIO_WRITE = 0x0,
+ MDIO_READ
+};
+
+struct mib_ethhdr {
+ __le32 data[3]; /* first 3 mib counter */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+struct qca_tagger_data {
+ void (*rw_reg_ack_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+ void (*mib_autocast_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+};
+
+#endif /* __TAG_QCA_H */
diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h
new file mode 100644
index 000000000000..a4a13514b730
--- /dev/null
+++ b/include/linux/dtpm.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#ifndef ___DTPM_H__
+#define ___DTPM_H__
+
+#include <linux/powercap.h>
+
+#define MAX_DTPM_DESCR 8
+#define MAX_DTPM_CONSTRAINTS 1
+
+struct dtpm {
+ struct powercap_zone zone;
+ struct dtpm *parent;
+ struct list_head sibling;
+ struct list_head children;
+ struct dtpm_ops *ops;
+ unsigned long flags;
+ u64 power_limit;
+ u64 power_max;
+ u64 power_min;
+ int weight;
+};
+
+struct dtpm_ops {
+ u64 (*set_power_uw)(struct dtpm *, u64);
+ u64 (*get_power_uw)(struct dtpm *);
+ int (*update_power_uw)(struct dtpm *);
+ void (*release)(struct dtpm *);
+};
+
+struct device_node;
+
+struct dtpm_subsys_ops {
+ const char *name;
+ int (*init)(void);
+ void (*exit)(void);
+ int (*setup)(struct dtpm *, struct device_node *);
+};
+
+enum DTPM_NODE_TYPE {
+ DTPM_NODE_VIRTUAL = 0,
+ DTPM_NODE_DT,
+};
+
+struct dtpm_node {
+ enum DTPM_NODE_TYPE type;
+ const char *name;
+ struct dtpm_node *parent;
+};
+
+static inline struct dtpm *to_dtpm(struct powercap_zone *zone)
+{
+ return container_of(zone, struct dtpm, zone);
+}
+
+int dtpm_update_power(struct dtpm *dtpm);
+
+int dtpm_release_zone(struct powercap_zone *pcz);
+
+void dtpm_init(struct dtpm *dtpm, struct dtpm_ops *ops);
+
+void dtpm_unregister(struct dtpm *dtpm);
+
+int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent);
+
+int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table);
+
+void dtpm_destroy_hierarchy(void);
+#endif
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index aa9ff9e1c0b3..4fcbf4d4fd0a 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -6,6 +6,8 @@
#include <linux/jump_label.h>
#endif
+#include <linux/build_bug.h>
+
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -21,6 +23,9 @@ struct _ddebug {
const char *filename;
const char *format;
unsigned int lineno:18;
+#define CLS_BITS 6
+ unsigned int class_id:CLS_BITS;
+#define _DPRINTK_CLASS_DFLT ((1 << CLS_BITS) - 1)
/*
* The flags field controls the behaviour at the callsite.
* The bits here are changed dynamically when the user
@@ -32,6 +37,13 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+#define _DPRINTK_FLAGS_INCL_SOURCENAME (1<<5)
+
+#define _DPRINTK_FLAGS_INCL_ANY \
+ (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID |\
+ _DPRINTK_FLAGS_INCL_SOURCENAME)
+
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
#else
@@ -46,18 +58,88 @@ struct _ddebug {
#endif
} __attribute__((aligned(8)));
+enum class_map_type {
+ DD_CLASS_TYPE_DISJOINT_BITS,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_BITS: classes are independent, one per bit.
+ * expecting hex input. Built for drm.debug, basis for other types.
+ */
+ DD_CLASS_TYPE_LEVEL_NUM,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NUM: input is numeric level, 0-N.
+ * N turns on just bits N-1 .. 0, so N=0 turns all bits off.
+ */
+ DD_CLASS_TYPE_DISJOINT_NAMES,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * classes are independent, like _DISJOINT_BITS.
+ */
+ DD_CLASS_TYPE_LEVEL_NAMES,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * intended for names like: INFO,DEBUG,TRACE, with a module prefix
+ * avoid EMERG,ALERT,CRIT,ERR,WARNING: they're not debug
+ */
+};
+
+struct ddebug_class_map {
+ struct list_head link;
+ struct module *mod;
+ const char *mod_name; /* needed for builtins */
+ const char **class_names;
+ const int length;
+ const int base; /* index of 1st .class_id, allows split/shared space */
+ enum class_map_type map_type;
+};
+
+/**
+ * DECLARE_DYNDBG_CLASSMAP - declare classnames known by a module
+ * @_var: a struct ddebug_class_map, passed to module_param_cb
+ * @_type: enum class_map_type, chooses bits/verbose, numeric/symbolic
+ * @_base: offset of 1st class-name. splits .class_id space
+ * @classes: class-names used to control class'd prdbgs
+ */
+#define DECLARE_DYNDBG_CLASSMAP(_var, _maptype, _base, ...) \
+ static const char *_var##_classnames[] = { __VA_ARGS__ }; \
+ static struct ddebug_class_map __aligned(8) __used \
+ __section("__dyndbg_classes") _var = { \
+ .mod = THIS_MODULE, \
+ .mod_name = KBUILD_MODNAME, \
+ .base = _base, \
+ .map_type = _maptype, \
+ .length = NUM_TYPE_ARGS(char*, __VA_ARGS__), \
+ .class_names = _var##_classnames, \
+ }
+#define NUM_TYPE_ARGS(eltype, ...) \
+ (sizeof((eltype[]){__VA_ARGS__}) / sizeof(eltype))
+
+/* encapsulate linker provided built-in (or module) dyndbg data */
+struct _ddebug_info {
+ struct _ddebug *descs;
+ struct ddebug_class_map *classes;
+ unsigned int num_descs;
+ unsigned int num_classes;
+};
+
+struct ddebug_class_param {
+ union {
+ unsigned long *bits;
+ unsigned int *lvl;
+ };
+ char flags[8];
+ const struct ddebug_class_map *map;
+};
+/*
+ * pr_debug() and friends are globally enabled or modules have selectively
+ * enabled them.
+ */
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
-#if defined(CONFIG_DYNAMIC_DEBUG_CORE)
-int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname);
-extern int ddebug_remove_module(const char *mod_name);
extern __printf(2, 3)
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
-extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
- const char *modname);
-
struct device;
extern __printf(3, 4)
@@ -78,17 +160,23 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
const struct ib_device *ibdev,
const char *fmt, ...);
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+#define DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, cls, fmt) \
static struct _ddebug __aligned(8) \
- __section(__dyndbg) name = { \
+ __section("__dyndbg") name = { \
.modname = KBUILD_MODNAME, \
.function = __func__, \
.filename = __FILE__, \
.format = (fmt), \
.lineno = __LINE__, \
.flags = _DPRINTK_FLAGS_DEFAULT, \
+ .class_id = cls, \
_DPRINTK_KEY_INIT \
- }
+ }; \
+ BUILD_BUG_ON_MSG(cls > _DPRINTK_CLASS_DFLT, \
+ "classid value overflow")
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, _DPRINTK_CLASS_DFLT, fmt)
#ifdef CONFIG_JUMP_LABEL
@@ -105,7 +193,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
static_branch_unlikely(&descriptor.key.dd_key_false)
#endif
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
#define _DPRINTK_KEY_INIT
@@ -117,19 +205,36 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
#endif
-#endif
-
-#define __dynamic_func_call(id, fmt, func, ...) do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(id)) \
- func(&id, ##__VA_ARGS__); \
-} while (0)
+#endif /* CONFIG_JUMP_LABEL */
-#define __dynamic_func_call_no_desc(id, fmt, func, ...) do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
+/*
+ * Factory macros: ($prefix)dynamic_func_call($suffix)
+ *
+ * Lower layer (with __ prefix) gets the callsite metadata, and wraps
+ * the func inside a debug-branch/static-key construct. Upper layer
+ * (with _ prefix) does the UNIQUE_ID once, so that lower can ref the
+ * name/label multiple times, and tie the elements together.
+ * Multiple flavors:
+ * (|_cls): adds in _DPRINT_CLASS_DFLT as needed
+ * (|_no_desc): former gets callsite descriptor as 1st arg (for prdbgs)
+ */
+#define __dynamic_func_call_cls(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
if (DYNAMIC_DEBUG_BRANCH(id)) \
- func(__VA_ARGS__); \
+ func(&id, ##__VA_ARGS__); \
+} while (0)
+#define __dynamic_func_call(id, fmt, func, ...) \
+ __dynamic_func_call_cls(id, _DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define __dynamic_func_call_cls_no_desc(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(id)) \
+ func(__VA_ARGS__); \
} while (0)
+#define __dynamic_func_call_no_desc(id, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(id, _DPRINTK_CLASS_DFLT, \
+ fmt, func, ##__VA_ARGS__)
/*
* "Factory macro" for generating a call to func, guarded by a
@@ -139,22 +244,33 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
* the varargs. Note that fmt is repeated in invocations of this
* macro.
*/
+#define _dynamic_func_call_cls(cls, fmt, func, ...) \
+ __dynamic_func_call_cls(__UNIQUE_ID(ddebug), cls, fmt, func, ##__VA_ARGS__)
#define _dynamic_func_call(fmt, func, ...) \
- __dynamic_func_call(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__)
+ _dynamic_func_call_cls(_DPRINTK_CLASS_DFLT, fmt, func, ##__VA_ARGS__)
+
/*
* A variant that does the same, except that the descriptor is not
* passed as the first argument to the function; it is only called
* with precisely the macro's varargs.
*/
-#define _dynamic_func_call_no_desc(fmt, func, ...) \
- __dynamic_func_call_no_desc(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__)
+#define _dynamic_func_call_cls_no_desc(cls, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(__UNIQUE_ID(ddebug), cls, fmt, \
+ func, ##__VA_ARGS__)
+#define _dynamic_func_call_no_desc(fmt, func, ...) \
+ _dynamic_func_call_cls_no_desc(_DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define dynamic_pr_debug_cls(cls, fmt, ...) \
+ _dynamic_func_call_cls(cls, fmt, __dynamic_pr_debug, \
+ pr_fmt(fmt), ##__VA_ARGS__)
#define dynamic_pr_debug(fmt, ...) \
- _dynamic_func_call(fmt, __dynamic_pr_debug, \
+ _dynamic_func_call(fmt, __dynamic_pr_debug, \
pr_fmt(fmt), ##__VA_ARGS__)
#define dynamic_dev_dbg(dev, fmt, ...) \
- _dynamic_func_call(fmt,__dynamic_dev_dbg, \
+ _dynamic_func_call(fmt, __dynamic_dev_dbg, \
dev, fmt, ##__VA_ARGS__)
#define dynamic_netdev_dbg(dev, fmt, ...) \
@@ -172,26 +288,50 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii)
-#else
+/* for test only, generally expect drm.debug style macro wrappers */
+#define __pr_debug_cls(cls, fmt, ...) do { \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(cls), \
+ "expecting constant class int/enum"); \
+ dynamic_pr_debug_cls(cls, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#else /* !(CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE)) */
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/printk.h>
-static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname)
-{
- return 0;
-}
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
+#define DYNAMIC_DEBUG_BRANCH(descriptor) false
-static inline int ddebug_remove_module(const char *mod)
-{
- return 0;
-}
+#define dynamic_pr_debug(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define dynamic_dev_dbg(dev, fmt, ...) \
+ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ do { if (0) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii); \
+ } while (0)
+
+#endif /* CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE) */
+
+
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+
+extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
+ const char *modname);
+struct kernel_param;
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp);
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp);
+
+#else
static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
const char *modname)
{
- if (strstr(param, "dyndbg")) {
+ if (!strcmp(param, "dyndbg")) {
/* avoid pr_warn(), which wants pr_fmt() fully defined */
printk(KERN_WARNING "dyndbg param is supported only in "
"CONFIG_DYNAMIC_DEBUG builds\n");
@@ -200,16 +340,15 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
return -EINVAL;
}
-#define dynamic_pr_debug(fmt, ...) \
- do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
-#define dynamic_dev_dbg(dev, fmt, ...) \
- do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
-#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
- groupsize, buf, len, ascii) \
- do { if (0) \
- print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
- rowsize, groupsize, buf, len, ascii); \
- } while (0)
-#endif
+struct kernel_param;
+static inline int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
+{ return 0; }
+static inline int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
+{ return 0; }
#endif
+
+
+extern const struct kernel_param_ops param_ops_dyndbg_classes;
+
+#endif /* _DYNAMIC_DEBUG_H */
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 407c2f281b64..5693a4be0d9a 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -38,14 +38,22 @@
#ifdef __KERNEL__
+#include <linux/bitops.h>
#include <asm/bug.h>
+#define DQL_HIST_LEN 4
+#define DQL_HIST_ENT(dql, idx) ((dql)->history[(idx) % DQL_HIST_LEN])
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
unsigned int adj_limit; /* limit + num_completed */
unsigned int last_obj_cnt; /* Count at last queuing */
+ unsigned long history_head; /* top 58 bits of jiffies */
+ /* stall entries, a bit per entry */
+ unsigned long history[DQL_HIST_LEN];
+
/* Fields accessed only by completion path (dql_completed) */
unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */
@@ -62,6 +70,13 @@ struct dql {
unsigned int max_limit; /* Max limit */
unsigned int min_limit; /* Minimum limit */
unsigned int slack_hold_time; /* Time to measure slack */
+
+ /* Stall threshold (in jiffies), defined by user */
+ unsigned short stall_thrs;
+ /* Longest stall detected, reported to user */
+ unsigned short stall_max;
+ unsigned long last_reap; /* Last reap (in jiffies) */
+ unsigned long stall_cnt; /* Number of stalls */
};
/* Set some static maximums */
@@ -74,6 +89,8 @@ struct dql {
*/
static inline void dql_queued(struct dql *dql, unsigned int count)
{
+ unsigned long map, now, now_hi, i;
+
BUG_ON(count > DQL_MAX_OBJECT);
dql->last_obj_cnt = count;
@@ -86,6 +103,34 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
barrier();
dql->num_queued += count;
+
+ now = jiffies;
+ now_hi = now / BITS_PER_LONG;
+
+ /* The following code set a bit in the ring buffer, where each
+ * bit trackes time the packet was queued. The dql->history buffer
+ * tracks DQL_HIST_LEN * BITS_PER_LONG time (jiffies) slot
+ */
+ if (unlikely(now_hi != dql->history_head)) {
+ /* About to reuse slots, clear them */
+ for (i = 0; i < DQL_HIST_LEN; i++) {
+ /* Multiplication masks high bits */
+ if (now_hi * BITS_PER_LONG ==
+ (dql->history_head + i) * BITS_PER_LONG)
+ break;
+ DQL_HIST_ENT(dql, dql->history_head + i + 1) = 0;
+ }
+ /* pairs with smp_rmb() in dql_check_stall() */
+ smp_wmb();
+ WRITE_ONCE(dql->history_head, now_hi);
+ }
+
+ /* __set_bit() does not guarantee WRITE_ONCE() semantics */
+ map = DQL_HIST_ENT(dql, now_hi);
+
+ /* Populate the history with an entry (bit) per queued */
+ if (!(map & BIT_MASK(now)))
+ WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now));
}
/* Returns how many objects can be queued, < 0 indicates over limit. */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 15e8f3d8a895..b4ee8961e623 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -30,7 +30,7 @@ struct device;
extern int edac_op_state;
-struct bus_type *edac_get_sysfs_subsys(void);
+const struct bus_type *edac_get_sysfs_subsys(void);
static inline void opstate_init(void)
{
@@ -175,11 +175,19 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
* @MEM_LRDDR3: Load-Reduced DDR3 memory.
+ * @MEM_LPDDR3: Low-Power DDR3 memory.
* @MEM_DDR4: Unbuffered DDR4 RAM
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
+ * @MEM_LPDDR4: Low-Power DDR4 memory.
+ * @MEM_DDR5: Unbuffered DDR5 RAM
+ * @MEM_RDDR5: Registered DDR5 RAM
+ * @MEM_LRDDR5: Load-Reduced DDR5 memory.
* @MEM_NVDIMM: Non-volatile RAM
+ * @MEM_WIO2: Wide I/O 2.
+ * @MEM_HBM2: High bandwidth Memory Gen 2.
+ * @MEM_HBM3: High bandwidth Memory Gen 3.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -200,10 +208,18 @@ enum mem_type {
MEM_DDR3,
MEM_RDDR3,
MEM_LRDDR3,
+ MEM_LPDDR3,
MEM_DDR4,
MEM_RDDR4,
MEM_LRDDR4,
+ MEM_LPDDR4,
+ MEM_DDR5,
+ MEM_RDDR5,
+ MEM_LRDDR5,
MEM_NVDIMM,
+ MEM_WIO2,
+ MEM_HBM2,
+ MEM_HBM3,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -217,19 +233,27 @@ enum mem_type {
#define MEM_FLAG_DDR BIT(MEM_DDR)
#define MEM_FLAG_RDDR BIT(MEM_RDDR)
#define MEM_FLAG_RMBS BIT(MEM_RMBS)
-#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
-#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
-#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
-#define MEM_FLAG_XDR BIT(MEM_XDR)
-#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
-#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
-#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
-#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
-#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
-#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
+#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
+#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3)
+#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
+#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
+#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
+#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
+#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5)
+#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5)
+#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
+#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
+#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
+#define MEM_FLAG_HBM3 BIT(MEM_HBM3)
/**
- * enum edac-type - Error Detection and Correction capabilities and mode
+ * enum edac_type - Error Detection and Correction capabilities and mode
* @EDAC_UNKNOWN: Unknown if ECC is available
* @EDAC_NONE: Doesn't support ECC
* @EDAC_RESERVED: Reserved ECC type
@@ -309,7 +333,7 @@ enum scrub_type {
#define OP_OFFLINE 0x300
/**
- * enum edac_mc_layer - memory controller hierarchy layer
+ * enum edac_mc_layer_type - memory controller hierarchy layer
*
* @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
* @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
@@ -471,7 +495,7 @@ struct edac_raw_error_desc {
*/
struct mem_ctl_info {
struct device dev;
- struct bus_type *bus;
+ const struct bus_type *bus;
struct list_head link; /* for global list of mem_ctl_info structs */
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index eec7928ff8fe..34c2175e6a1e 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -10,12 +10,17 @@ struct eeprom_93xx46_platform_data {
#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
#define EE_READONLY 0x08 /* forbid writing */
+#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */
+#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */
+#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */
unsigned int quirks;
/* Single word read transfers only; no sequential read. */
#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
/* Instructions such as EWEN are (addrlen + 2) in length. */
#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
+/* Add extra cycle after address during a read */
+#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
/*
* optional hooks to control additional logic
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 73db1ae04cef..d59b0947fba0 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -24,21 +24,23 @@
#include <linux/range.h>
#include <linux/reboot.h>
#include <linux/uuid.h>
-#include <linux/screen_info.h>
#include <asm/page.h>
+struct screen_info;
+
#define EFI_SUCCESS 0
-#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_ACCESS_DENIED (15 | (1UL << (BITS_PER_LONG-1)))
#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1)))
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
@@ -72,8 +74,10 @@ typedef void *efi_handle_t;
*/
typedef guid_t efi_guid_t __aligned(__alignof__(u32));
-#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
- GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \
+ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, d } }
/*
* Generic EFI table header
@@ -106,7 +110,8 @@ typedef struct {
#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
#define EFI_PAL_CODE 13
#define EFI_PERSISTENT_MEMORY 14
-#define EFI_MAX_MEMORY_TYPE 15
+#define EFI_UNACCEPTED_MEMORY 15
+#define EFI_MAX_MEMORY_TYPE 16
/* Attribute values: */
#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
@@ -122,6 +127,7 @@ typedef struct {
((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */
+#define EFI_MEMORY_CPU_CRYPTO ((u64)0x0000000000080000ULL) /* supports encryption */
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
@@ -145,6 +151,52 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER */
+struct efi_manage_capsule_header {
+ u32 ver;
+ u16 emb_drv_cnt;
+ u16 payload_cnt;
+ /*
+ * Variable-size array of the size given by the sum of
+ * emb_drv_cnt and payload_cnt.
+ */
+ u64 offset_list[];
+} __packed;
+
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER */
+struct efi_manage_capsule_image_header {
+ u32 ver;
+ efi_guid_t image_type_id;
+ u8 image_index;
+ u8 reserved_bytes[3];
+ u32 image_size;
+ u32 vendor_code_size;
+ /* hw_ins was introduced in version 2 */
+ u64 hw_ins;
+ /* capsule_support was introduced in version 3 */
+ u64 capsule_support;
+} __packed;
+
+/* WIN_CERTIFICATE */
+struct win_cert {
+ u32 len;
+ u16 rev;
+ u16 cert_type;
+};
+
+/* WIN_CERTIFICATE_UEFI_GUID */
+struct win_cert_uefi_guid {
+ struct win_cert hdr;
+ efi_guid_t cert_type;
+ u8 cert_data[];
+};
+
+/* EFI_FIRMWARE_IMAGE_AUTHENTICATION */
+struct efi_image_auth {
+ u64 mon_count;
+ struct win_cert_uefi_guid auth_info;
+};
+
/*
* EFI capsule flags
*/
@@ -164,10 +216,10 @@ struct capsule_info {
size_t page_bytes_remain;
};
+int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+ size_t hdr_bytes);
int __efi_capsule_setup_info(struct capsule_info *cap_info);
-typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
-
/*
* Types and defines for Time Services
*/
@@ -307,18 +359,19 @@ void efi_native_runtime_setup(void);
* where the UEFI SPEC breaks the line.
*/
#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
-#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
-#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0xbc62157e, 0x3e33, 0x4fec, 0x99, 0x20, 0x2d, 0x3b, 0x36, 0xd7, 0x50, 0xdf)
+#define EFI_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0x09576e91, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c)
+#define EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID EFI_GUID(0x05c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
@@ -333,9 +386,13 @@ void efi_native_runtime_setup(void);
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
+#define EFI_TCG2_FINAL_EVENTS_TABLE_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d)
#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
+#define EFI_DXE_SERVICES_TABLE_GUID EFI_GUID(0x05ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
+#define EFI_SMBIOS_PROTOCOL_GUID EFI_GUID(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
+#define EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID EFI_GUID(0xf4560cf6, 0x40ec, 0x4b4a, 0xa1, 0x92, 0xbf, 0x1d, 0x57, 0xd0, 0xb1, 0x89)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -343,23 +400,46 @@ void efi_native_runtime_setup(void);
#define EFI_CERT_SHA256_GUID EFI_GUID(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28)
#define EFI_CERT_X509_GUID EFI_GUID(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72)
#define EFI_CERT_X509_SHA256_GUID EFI_GUID(0x3bd2a492, 0x96c0, 0x4079, 0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed)
+#define EFI_CC_BLOB_GUID EFI_GUID(0x067b1f5f, 0xcf26, 0x44c5, 0x85, 0x54, 0x93, 0xd7, 0x77, 0x91, 0x2d, 0x42)
+#define EFI_CC_MEASUREMENT_PROTOCOL_GUID EFI_GUID(0x96751a3d, 0x72f4, 0x41a6, 0xa7, 0x94, 0xed, 0x5d, 0x0e, 0x67, 0xae, 0x6b)
+#define EFI_CC_FINAL_EVENTS_TABLE_GUID EFI_GUID(0xdd4a4648, 0x2de7, 0x4665, 0x96, 0x4d, 0x21, 0xd9, 0xef, 0x5f, 0xb4, 0x46)
/*
* This GUID is used to pass to the kernel proper the struct screen_info
* structure that was populated by the stub based on the GOP protocol instance
* associated with ConOut
*/
-#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
-#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68)
+#define LINUX_EFI_MOK_VARIABLE_TABLE_GUID EFI_GUID(0xc451ed2b, 0x9694, 0x45d3, 0xba, 0xba, 0xed, 0x9f, 0x89, 0x88, 0xa3, 0x89)
+#define LINUX_EFI_COCO_SECRET_AREA_GUID EFI_GUID(0xadf956ad, 0xe98c, 0x484c, 0xae, 0x11, 0xb5, 0x1c, 0x7d, 0x33, 0x64, 0x47)
+#define LINUX_EFI_BOOT_MEMMAP_GUID EFI_GUID(0x800f683f, 0xd08b, 0x423a, 0xa2, 0x93, 0x96, 0x5c, 0x3c, 0x6f, 0xe2, 0xb4)
+#define LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID EFI_GUID(0xd5d1de3c, 0x105c, 0x44f9, 0x9e, 0xa9, 0xbc, 0xef, 0x98, 0x12, 0x00, 0x31)
+
+#define RISCV_EFI_BOOT_PROTOCOL_GUID EFI_GUID(0xccd15fec, 0x6f73, 0x4eec, 0x83, 0x95, 0x3e, 0x69, 0xe4, 0xb9, 0x40, 0xbf)
+
+/*
+ * This GUID may be installed onto the kernel image's handle as a NULL protocol
+ * to signal to the stub that the placement of the image should be respected,
+ * and moving the image in physical memory is undesirable. To ensure
+ * compatibility with 64k pages kernels with virtually mapped stacks, and to
+ * avoid defeating physical randomization, this protocol should only be
+ * installed if the image was placed at a randomized 128k aligned address in
+ * memory.
+ */
+#define LINUX_EFI_LOADED_IMAGE_FIXED_GUID EFI_GUID(0xf5a37b6d, 0x3344, 0x42a5, 0xb6, 0xbb, 0x97, 0x86, 0x48, 0xc1, 0x89, 0x0a)
/* OEM GUIDs */
#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55)
+#define AMD_SEV_MEM_ENCRYPT_GUID EFI_GUID(0x0cf29b71, 0x9e51, 0x433a, 0xa3, 0xb7, 0x81, 0xf3, 0xab, 0x16, 0xb8, 0x75)
+
+/* OVMF protocol GUIDs */
+#define OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID EFI_GUID(0xc5a010fe, 0x38a7, 0x4531, 0x8a, 0x4a, 0x05, 0x00, 0xd2, 0xfd, 0x16, 0x49)
typedef struct {
efi_guid_t guid;
@@ -386,6 +466,7 @@ typedef struct {
} efi_config_table_type_t;
#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
+#define EFI_DXE_SERVICES_TABLE_SIGNATURE ((u64)0x565245535f455844ULL)
#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20))
@@ -450,6 +531,23 @@ typedef union {
efi_system_table_32_t mixed_mode;
} efi_system_table_t;
+struct efi_boot_memmap {
+ unsigned long map_size;
+ unsigned long desc_size;
+ u32 desc_ver;
+ unsigned long map_key;
+ unsigned long buff_size;
+ efi_memory_desc_t map[];
+};
+
+struct efi_unaccepted_memory {
+ u32 version;
+ u32 unit_size;
+ u64 phys_base;
+ u64 size;
+ unsigned long bitmap[];
+};
+
/*
* Architecture independent structure for describing a memory map for the
* benefit of efi_memmap_init_early(), and for passing context between
@@ -500,11 +598,15 @@ typedef struct {
#define EFI_INVALID_TABLE_ADDR (~0UL)
+// BIT0 implies that Runtime code includes the forward control flow guard
+// instruction, such as X86 CET-IBT or ARM BTI.
+#define EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD 0x1
+
typedef struct {
u32 version;
u32 num_entries;
u32 desc_size;
- u32 reserved;
+ u32 flags;
efi_memory_desc_t entry[0];
} efi_memory_attributes_table_t;
@@ -546,6 +648,9 @@ extern struct efi {
unsigned long esrt; /* ESRT table */
unsigned long tpm_log; /* TPM2 Event Log table */
unsigned long tpm_final_log; /* TPM2 Final Events Log table */
+ unsigned long mokvar_table; /* MOK variable config table */
+ unsigned long coco_secret; /* Confidential computing secret table */
+ unsigned long unaccepted; /* Unaccepted memory table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
@@ -566,8 +671,8 @@ extern struct efi {
unsigned long flags;
} efi;
-#define EFI_RT_SUPPORTED_GET_TIME 0x0001
-#define EFI_RT_SUPPORTED_SET_TIME 0x0002
+#define EFI_RT_SUPPORTED_GET_TIME 0x0001
+#define EFI_RT_SUPPORTED_SET_TIME 0x0002
#define EFI_RT_SUPPORTED_GET_WAKEUP_TIME 0x0004
#define EFI_RT_SUPPORTED_SET_WAKEUP_TIME 0x0008
#define EFI_RT_SUPPORTED_GET_VARIABLE 0x0010
@@ -583,11 +688,17 @@ extern struct efi {
#define EFI_RT_SUPPORTED_ALL 0x3fff
-#define EFI_RT_SUPPORTED_TIME_SERVICES 0x000f
+#define EFI_RT_SUPPORTED_TIME_SERVICES 0x0003
+#define EFI_RT_SUPPORTED_WAKEUP_SERVICES 0x000c
#define EFI_RT_SUPPORTED_VARIABLE_SERVICES 0x0070
extern struct mm_struct efi_mm;
+static inline bool mm_is_efi(struct mm_struct *mm)
+{
+ return IS_ENABLED(CONFIG_EFI) && mm == &efi_mm;
+}
+
static inline int
efi_guidcmp (efi_guid_t left, efi_guid_t right)
{
@@ -602,10 +713,7 @@ efi_guid_to_str(efi_guid_t *guid, char *out)
}
extern void efi_init (void);
-extern void *efi_get_pal_addr (void);
-extern void efi_map_pal_code (void);
-extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-extern void efi_gettimeofday (struct timespec64 *ts);
+extern void efi_earlycon_reprobe(void);
#ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#else
@@ -624,20 +732,11 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
return EFI_SUCCESS;
}
#endif
-extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
-extern int __init efi_memmap_alloc(unsigned int num_entries,
- struct efi_memory_map_data *data);
-extern void __efi_memmap_free(u64 phys, unsigned long size,
- unsigned long flags);
+extern int __init __efi_memmap_init(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);
-extern int __init efi_memmap_install(struct efi_memory_map_data *data);
-extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
- struct range *range);
-extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
- void *buf, struct efi_mem_range *mem);
#ifdef CONFIG_EFI_ESRT
extern void __init efi_esrt_init(void);
@@ -647,8 +746,7 @@ static inline void efi_esrt_init(void) { }
extern int efi_config_parse_tables(const efi_config_table_t *config_tables,
int count,
const efi_config_table_type_t *arch_tables);
-extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
- int min_major_version);
+extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr);
extern void efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
unsigned long fw_vendor);
extern u64 efi_get_iobase (void);
@@ -658,6 +756,7 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
+extern int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
@@ -668,12 +767,6 @@ extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
extern bool efi_poweroff_required(void);
-#ifdef CONFIG_EFI_FAKE_MEMMAP
-extern void __init efi_fake_memmap(void);
-#else
-static inline void efi_fake_memmap(void) { }
-#endif
-
extern unsigned long efi_mem_attr_table;
/*
@@ -683,7 +776,7 @@ extern unsigned long efi_mem_attr_table;
* argument in the page tables referred to by the
* first argument.
*/
-typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool);
extern int efi_memattr_init(void);
extern int efi_memattr_apply_permissions(struct mm_struct *mm,
@@ -763,10 +856,6 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len)
return 1;
}
-#ifdef CONFIG_EFI_PCDP
-extern int __init efi_setup_pcdp_console(char *);
-#endif
-
/*
* We play games with efi_enabled so that the compiler will, if
* possible, remove EFI-related code altogether.
@@ -806,6 +895,7 @@ static inline bool efi_rt_services_supported(unsigned int mask)
{
return (efi.runtime_supported_mask & mask) == mask;
}
+extern void efi_find_mirror(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -814,12 +904,6 @@ static inline bool efi_enabled(int feature)
static inline void
efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
-static inline bool
-efi_capsule_pending(int *reset_type)
-{
- return false;
-}
-
static inline bool efi_soft_reserve_enabled(void)
{
return false;
@@ -829,6 +913,8 @@ static inline bool efi_rt_services_supported(unsigned int mask)
{
return false;
}
+
+static inline void efi_find_mirror(void) {}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -844,7 +930,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
-#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
+#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS | \
EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
@@ -889,6 +975,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_MEDIA_VENDOR 3
#define EFI_DEV_MEDIA_FILE 4
#define EFI_DEV_MEDIA_PROTOCOL 5
+#define EFI_DEV_MEDIA_REL_OFFSET 8
#define EFI_DEV_BIOS_BOOT 0x05
#define EFI_DEV_END_PATH 0x7F
#define EFI_DEV_END_PATH2 0xFF
@@ -919,12 +1006,32 @@ struct efi_vendor_dev_path {
u8 vendordata[];
} __packed;
+struct efi_rel_offset_dev_path {
+ struct efi_generic_dev_path header;
+ u32 reserved;
+ u64 starting_offset;
+ u64 ending_offset;
+} __packed;
+
+struct efi_mem_mapped_dev_path {
+ struct efi_generic_dev_path header;
+ u32 memory_type;
+ u64 starting_addr;
+ u64 ending_addr;
+} __packed;
+
+struct efi_file_path_dev_path {
+ struct efi_generic_dev_path header;
+ efi_char16_t filename[];
+} __packed;
+
struct efi_dev_path {
union {
struct efi_generic_dev_path header;
struct efi_acpi_dev_path acpi;
struct efi_pci_dev_path pci;
struct efi_vendor_dev_path vendor;
+ struct efi_rel_offset_dev_path rel_offset;
};
} __packed;
@@ -950,14 +1057,20 @@ struct efivar_operations {
efi_set_variable_t *set_variable;
efi_set_variable_t *set_variable_nonblocking;
efi_query_variable_store_t *query_variable_store;
+ efi_query_variable_info_t *query_variable_info;
};
struct efivars {
struct kset *kset;
- struct kobject *kobject;
const struct efivar_operations *ops;
};
+#ifdef CONFIG_X86
+u64 __attribute_const__ efivar_reserved_space(void);
+#else
+static inline u64 efivar_reserved_space(void) { return 0; }
+#endif
+
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
@@ -967,85 +1080,40 @@ struct efivars {
#define EFI_VAR_NAME_LEN 1024
-struct efi_variable {
- efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
- efi_guid_t VendorGuid;
- unsigned long DataSize;
- __u8 Data[1024];
- efi_status_t Status;
- __u32 Attributes;
-} __attribute__((packed));
-
-struct efivar_entry {
- struct efi_variable var;
- struct list_head list;
- struct kobject kobj;
- bool scanning;
- bool deleting;
-};
-
-extern struct list_head efivar_sysfs_list;
-
-static inline void
-efivar_unregister(struct efivar_entry *var)
-{
- kobject_put(&var->kobj);
-}
-
int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject);
+ const struct efivar_operations *ops);
int efivars_unregister(struct efivars *efivars);
-struct kobject *efivars_kobject(void);
-
-int efivar_supports_writes(void);
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool duplicates, struct list_head *head);
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-int efivar_entry_remove(struct efivar_entry *entry);
-
-int __efivar_entry_delete(struct efivar_entry *entry);
-int efivar_entry_delete(struct efivar_entry *entry);
-
-int efivar_entry_size(struct efivar_entry *entry, unsigned long *size);
-int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
- unsigned long size, void *data, struct list_head *head);
-int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
- unsigned long *size, void *data, bool *set);
-int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
- bool block, unsigned long size, void *data);
+#ifdef CONFIG_EFI
+bool efivar_is_available(void);
+#else
+static inline bool efivar_is_available(void) { return false; }
+#endif
-int efivar_entry_iter_begin(void);
-void efivar_entry_iter_end(void);
+bool efivar_supports_writes(void);
-int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data,
- struct efivar_entry **prev);
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data);
+int efivar_lock(void);
+int efivar_trylock(void);
+void efivar_unlock(void);
-struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
- struct list_head *head, bool remove);
+efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *size, void *data);
-bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
- unsigned long data_size);
-bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
- size_t len);
+efi_status_t efivar_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name, efi_guid_t *vendor);
-extern struct work_struct efivar_work;
-void efivar_run_worker(void);
+efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data, bool nonblocking);
-#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
-int efivars_sysfs_init(void);
+efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data);
-#define EFIVARS_DATA_SIZE_MAX 1024
+efi_status_t efivar_query_variable_info(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size);
-#endif /* CONFIG_EFI_VARS */
+#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER)
extern bool efi_capsule_pending(int *reset_type);
extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
@@ -1053,33 +1121,8 @@ extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
extern int efi_capsule_update(efi_capsule_header_t *capsule,
phys_addr_t *pages);
-
-#ifdef CONFIG_EFI_RUNTIME_MAP
-int efi_runtime_map_init(struct kobject *);
-int efi_get_runtime_map_size(void);
-int efi_get_runtime_map_desc_size(void);
-int efi_runtime_map_copy(void *buf, size_t bufsz);
#else
-static inline int efi_runtime_map_init(struct kobject *kobj)
-{
- return 0;
-}
-
-static inline int efi_get_runtime_map_size(void)
-{
- return 0;
-}
-
-static inline int efi_get_runtime_map_desc_size(void)
-{
- return 0;
-}
-
-static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
-{
- return 0;
-}
-
+static inline bool efi_capsule_pending(int *reset_type) { return false; }
#endif
#ifdef CONFIG_EFI
@@ -1088,7 +1131,7 @@ extern bool efi_runtime_disabled(void);
static inline bool efi_runtime_disabled(void) { return true; }
#endif
-extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+extern void efi_call_virt_check_flags(unsigned long flags, const void *caller);
extern unsigned long efi_call_virt_save_flags(void);
enum efi_secureboot_mode {
@@ -1097,14 +1140,28 @@ enum efi_secureboot_mode {
efi_secureboot_mode_disabled,
efi_secureboot_mode_enabled,
};
-enum efi_secureboot_mode efi_get_secureboot(void);
-#ifdef CONFIG_RESET_ATTACK_MITIGATION
-void efi_enable_reset_attack_mitigation(void);
-#else
-static inline void
-efi_enable_reset_attack_mitigation(void) { }
-#endif
+static inline
+enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var)
+{
+ u8 secboot, setupmode = 0;
+ efi_status_t status;
+ unsigned long size;
+
+ size = sizeof(secboot);
+ status = get_var(L"SecureBoot", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size,
+ &secboot);
+ if (status == EFI_NOT_FOUND)
+ return efi_secureboot_mode_disabled;
+ if (status != EFI_SUCCESS)
+ return efi_secureboot_mode_unknown;
+
+ size = sizeof(setupmode);
+ get_var(L"SetupMode", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size, &setupmode);
+ if (secboot == 0 || setupmode == 1)
+ return efi_secureboot_mode_disabled;
+ return efi_secureboot_mode_enabled;
+}
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
void efi_check_for_embedded_firmwares(void);
@@ -1112,13 +1169,10 @@ void efi_check_for_embedded_firmwares(void);
static inline void efi_check_for_embedded_firmwares(void) { }
#endif
-efi_status_t efi_random_get_seed(void);
-
-void efi_retrieve_tpm2_eventlog(void);
+#define arch_efi_call_virt(p, f, args...) ((p)->f(args))
/*
- * Arch code can implement the following three template macros, avoiding
- * reptition for the void/non-void return cases of {__,}efi_call_virt():
+ * Arch code must implement the following three routines:
*
* * arch_efi_call_virt_setup()
*
@@ -1127,9 +1181,8 @@ void efi_retrieve_tpm2_eventlog(void);
*
* * arch_efi_call_virt()
*
- * Performs the call. The last expression in the macro must be the call
- * itself, allowing the logic to be shared by the void and non-void
- * cases.
+ * Performs the call. This routine takes a variable number of arguments so
+ * it must be implemented as a variadic preprocessor macro.
*
* * arch_efi_call_virt_teardown()
*
@@ -1138,34 +1191,21 @@ void efi_retrieve_tpm2_eventlog(void);
#define efi_call_virt_pointer(p, f, args...) \
({ \
- efi_status_t __s; \
+ typeof((p)->f(args)) __s; \
unsigned long __flags; \
\
arch_efi_call_virt_setup(); \
\
__flags = efi_call_virt_save_flags(); \
__s = arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
+ efi_call_virt_check_flags(__flags, NULL); \
\
arch_efi_call_virt_teardown(); \
\
__s; \
})
-#define __efi_call_virt_pointer(p, f, args...) \
-({ \
- unsigned long __flags; \
- \
- arch_efi_call_virt_setup(); \
- \
- __flags = efi_call_virt_save_flags(); \
- arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
- \
- arch_efi_call_virt_teardown(); \
-})
-
-#define EFI_RANDOM_SEED_SIZE 64U
+#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
u32 size;
@@ -1190,6 +1230,10 @@ extern int efi_tpm_final_log_size;
extern unsigned long rci2_table_phys;
+efi_status_t
+efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
+ u64 param_buffer_addr, void *context);
+
/*
* efi_runtime_service() function identifiers.
* "NONE" is used by efi_recover_from_page_fault() to check if the page
@@ -1209,25 +1253,26 @@ enum efi_rts_ids {
EFI_RESET_SYSTEM,
EFI_UPDATE_CAPSULE,
EFI_QUERY_CAPSULE_CAPS,
+ EFI_ACPI_PRM_HANDLER,
};
+union efi_rts_args;
+
/*
* efi_runtime_work: Details of EFI Runtime Service work
- * @arg<1-5>: EFI Runtime Service function arguments
+ * @args: Pointer to union describing the arguments
* @status: Status of executing EFI Runtime Service
* @efi_rts_id: EFI Runtime Service function identifier
* @efi_rts_comp: Struct used for handling completions
+ * @caller: The caller of the runtime service
*/
struct efi_runtime_work {
- void *arg1;
- void *arg2;
- void *arg3;
- void *arg4;
- void *arg5;
- efi_status_t status;
- struct work_struct work;
- enum efi_rts_ids efi_rts_id;
- struct completion efi_rts_comp;
+ union efi_rts_args *args;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+ const void *caller;
};
extern struct efi_runtime_work efi_rts_work;
@@ -1252,4 +1297,74 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
char *efi_systab_show_arch(char *str);
+/*
+ * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided
+ * to the kernel by an EFI boot loader. The table contains a packed
+ * sequence of these entries, one for each named MOK variable.
+ * The sequence is terminated by an entry with a completely NULL
+ * name and 0 data size.
+ */
+struct efi_mokvar_table_entry {
+ char name[256];
+ u64 data_size;
+ u8 data[];
+} __attribute((packed));
+
+#ifdef CONFIG_LOAD_UEFI_KEYS
+extern void __init efi_mokvar_table_init(void);
+extern struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry);
+extern struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name);
+#else
+static inline void efi_mokvar_table_init(void) { }
+static inline struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry)
+{
+ return NULL;
+}
+static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
+ const char *name)
+{
+ return NULL;
+}
+#endif
+
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
+struct linux_efi_coco_secret_area {
+ u64 base_pa;
+ u64 size;
+};
+
+struct linux_efi_initrd {
+ unsigned long base;
+ unsigned long size;
+};
+
+/* Header of a populated EFI secret area */
+#define EFI_SECRET_TABLE_HEADER_GUID EFI_GUID(0x1e74f542, 0x71dd, 0x4d66, 0x96, 0x3e, 0xef, 0x42, 0x87, 0xff, 0x17, 0x3b)
+
+bool xen_efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table);
+
+static inline
+bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
+{
+ if (!IS_ENABLED(CONFIG_XEN_EFI))
+ return true;
+ return xen_efi_config_table_is_usable(guid, table);
+}
+
+umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+
+/*
+ * efivar ops event type
+ */
+#define EFIVAR_OPS_RDONLY 0
+#define EFIVAR_OPS_RDWR 1
+
+extern struct blocking_notifier_head efivar_ops_nh;
+
+void efivars_generic_ops_register(void);
+void efivars_generic_ops_unregister(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h
index 57eac5241303..a97a12bb2c9e 100644
--- a/include/linux/efi_embedded_fw.h
+++ b/include/linux/efi_embedded_fw.h
@@ -8,8 +8,8 @@
#define EFI_EMBEDDED_FW_PREFIX_LEN 8
/*
- * This struct and efi_embedded_fw_list are private to the efi-embedded fw
- * implementation they are in this header for use by lib/test_firmware.c only!
+ * This struct is private to the efi-embedded fw implementation.
+ * They are in this header for use by lib/test_firmware.c only!
*/
struct efi_embedded_fw {
struct list_head list;
@@ -18,8 +18,6 @@ struct efi_embedded_fw {
size_t length;
};
-extern struct list_head efi_embedded_fw_list;
-
/**
* struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw
* code to search for embedded firmwares.
diff --git a/include/linux/einj-cxl.h b/include/linux/einj-cxl.h
new file mode 100644
index 000000000000..624ff6ff41f9
--- /dev/null
+++ b/include/linux/einj-cxl.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CXL protocol Error INJection support.
+ *
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Ben Cheatham <benjamin.cheatham@amd.com>
+ */
+#ifndef EINJ_CXL_H
+#define EINJ_CXL_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct pci_dev;
+struct seq_file;
+
+#if IS_ENABLED(CONFIG_ACPI_APEI_EINJ_CXL)
+int einj_cxl_available_error_type_show(struct seq_file *m, void *v);
+int einj_cxl_inject_error(struct pci_dev *dport_dev, u64 type);
+int einj_cxl_inject_rch_error(u64 rcrb, u64 type);
+bool einj_cxl_is_initialized(void);
+#else /* !IS_ENABLED(CONFIG_ACPI_APEI_EINJ_CXL) */
+static inline int einj_cxl_available_error_type_show(struct seq_file *m,
+ void *v)
+{
+ return -ENXIO;
+}
+
+static inline int einj_cxl_inject_error(struct pci_dev *dport_dev, u64 type)
+{
+ return -ENXIO;
+}
+
+static inline int einj_cxl_inject_rch_error(u64 rcrb, u64 type)
+{
+ return -ENXIO;
+}
+
+static inline bool einj_cxl_is_initialized(void) { return false; }
+#endif /* CONFIG_ACPI_APEI_EINJ_CXL */
+
+#endif /* EINJ_CXL_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
deleted file mode 100644
index bacc40a0bdf3..000000000000
--- a/include/linux/elevator.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_ELEVATOR_H
-#define _LINUX_ELEVATOR_H
-
-#include <linux/percpu.h>
-#include <linux/hashtable.h>
-
-#ifdef CONFIG_BLOCK
-
-struct io_cq;
-struct elevator_type;
-#ifdef CONFIG_BLK_DEBUG_FS
-struct blk_mq_debugfs_attr;
-#endif
-
-/*
- * Return values from elevator merger
- */
-enum elv_merge {
- ELEVATOR_NO_MERGE = 0,
- ELEVATOR_FRONT_MERGE = 1,
- ELEVATOR_BACK_MERGE = 2,
- ELEVATOR_DISCARD_MERGE = 3,
-};
-
-struct blk_mq_alloc_data;
-struct blk_mq_hw_ctx;
-
-struct elevator_mq_ops {
- int (*init_sched)(struct request_queue *, struct elevator_type *);
- void (*exit_sched)(struct elevator_queue *);
- int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
- void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
- void (*depth_updated)(struct blk_mq_hw_ctx *);
-
- bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
- bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
- int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
- void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
- void (*requests_merged)(struct request_queue *, struct request *, struct request *);
- void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
- void (*prepare_request)(struct request *);
- void (*finish_request)(struct request *);
- void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
- struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
- bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct request *, u64);
- void (*requeue_request)(struct request *);
- struct request *(*former_request)(struct request_queue *, struct request *);
- struct request *(*next_request)(struct request_queue *, struct request *);
- void (*init_icq)(struct io_cq *);
- void (*exit_icq)(struct io_cq *);
-};
-
-#define ELV_NAME_MAX (16)
-
-struct elv_fs_entry {
- struct attribute attr;
- ssize_t (*show)(struct elevator_queue *, char *);
- ssize_t (*store)(struct elevator_queue *, const char *, size_t);
-};
-
-/*
- * identifies an elevator type, such as AS or deadline
- */
-struct elevator_type
-{
- /* managed by elevator core */
- struct kmem_cache *icq_cache;
-
- /* fields provided by elevator implementation */
- struct elevator_mq_ops ops;
-
- size_t icq_size; /* see iocontext.h */
- size_t icq_align; /* ditto */
- struct elv_fs_entry *elevator_attrs;
- const char *elevator_name;
- const char *elevator_alias;
- const unsigned int elevator_features;
- struct module *elevator_owner;
-#ifdef CONFIG_BLK_DEBUG_FS
- const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
- const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
-#endif
-
- /* managed by elevator core */
- char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
- struct list_head list;
-};
-
-#define ELV_HASH_BITS 6
-
-void elv_rqhash_del(struct request_queue *q, struct request *rq);
-void elv_rqhash_add(struct request_queue *q, struct request *rq);
-void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
-struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
-
-/*
- * each queue has an elevator_queue associated with it
- */
-struct elevator_queue
-{
- struct elevator_type *type;
- void *elevator_data;
- struct kobject kobj;
- struct mutex sysfs_lock;
- unsigned int registered:1;
- DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
-};
-
-/*
- * block elevator interface
- */
-extern enum elv_merge elv_merge(struct request_queue *, struct request **,
- struct bio *);
-extern void elv_merge_requests(struct request_queue *, struct request *,
- struct request *);
-extern void elv_merged_request(struct request_queue *, struct request *,
- enum elv_merge);
-extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
-extern struct request *elv_former_request(struct request_queue *, struct request *);
-extern struct request *elv_latter_request(struct request_queue *, struct request *);
-
-/*
- * io scheduler registration
- */
-extern int elv_register(struct elevator_type *);
-extern void elv_unregister(struct elevator_type *);
-
-/*
- * io scheduler sysfs switching
- */
-extern ssize_t elv_iosched_show(struct request_queue *, char *);
-extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
-
-extern bool elv_bio_merge_ok(struct request *, struct bio *);
-extern struct elevator_queue *elevator_alloc(struct request_queue *,
- struct elevator_type *);
-
-/*
- * Helper functions.
- */
-extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
-extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
-
-/*
- * rb support functions.
- */
-extern void elv_rb_add(struct rb_root *, struct request *);
-extern void elv_rb_del(struct rb_root *, struct request *);
-extern struct request *elv_rb_find(struct rb_root *, sector_t);
-
-/*
- * Insertion selection
- */
-#define ELEVATOR_INSERT_FRONT 1
-#define ELEVATOR_INSERT_BACK 2
-#define ELEVATOR_INSERT_SORT 3
-#define ELEVATOR_INSERT_REQUEUE 4
-#define ELEVATOR_INSERT_FLUSH 5
-#define ELEVATOR_INSERT_SORT_MERGE 6
-
-#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
-#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
-
-#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
-
-/*
- * Elevator features.
- */
-
-/* Supports zoned block devices sequential write constraint */
-#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
-
-#endif /* CONFIG_BLOCK */
-#endif
diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h
index 3bea95a1af53..e533f4513194 100644
--- a/include/linux/elf-fdpic.h
+++ b/include/linux/elf-fdpic.h
@@ -10,13 +10,25 @@
#include <uapi/linux/elf-fdpic.h>
+#if ELF_CLASS == ELFCLASS32
+#define Elf_Sword Elf32_Sword
+#define elf_fdpic_loadseg elf32_fdpic_loadseg
+#define elf_fdpic_loadmap elf32_fdpic_loadmap
+#define ELF_FDPIC_LOADMAP_VERSION ELF32_FDPIC_LOADMAP_VERSION
+#else
+#define Elf_Sword Elf64_Sxword
+#define elf_fdpic_loadmap elf64_fdpic_loadmap
+#define elf_fdpic_loadseg elf64_fdpic_loadseg
+#define ELF_FDPIC_LOADMAP_VERSION ELF64_FDPIC_LOADMAP_VERSION
+#endif
+
/*
* binfmt binary parameters structure
*/
struct elf_fdpic_params {
struct elfhdr hdr; /* ref copy of ELF header */
struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */
- struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
+ struct elf_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
unsigned long elfhdr_addr; /* mapped ELF header user address */
unsigned long ph_addr; /* mapped PT_PHDR user address */
unsigned long map_addr; /* mapped loadmap user address */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 5d5b0321da0b..c9a46c4e183b 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -22,6 +22,16 @@
SET_PERSONALITY(ex)
#endif
+#ifndef START_THREAD
+#define START_THREAD(elf_ex, regs, elf_entry, start_stack) \
+ start_thread(regs, elf_entry, start_stack)
+#endif
+
+#if defined(ARCH_HAS_SETUP_ADDITIONAL_PAGES) && !defined(ARCH_SETUP_ADDITIONAL_PAGES)
+#define ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \
+ arch_setup_additional_pages(bprm, interpreter)
+#endif
+
#define ELF32_GNU_PROPERTY_ALIGN 4
#define ELF64_GNU_PROPERTY_ALIGN 8
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index 10485f0c9740..54feb64e9b5d 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -17,7 +17,7 @@ struct compat_elf_siginfo
compat_int_t si_errno;
};
-struct compat_elf_prstatus
+struct compat_elf_prstatus_common
{
struct compat_elf_siginfo pr_info;
short pr_cursig;
@@ -31,8 +31,6 @@ struct compat_elf_prstatus
struct old_timeval32 pr_stime;
struct old_timeval32 pr_cutime;
struct old_timeval32 pr_cstime;
- compat_elf_gregset_t pr_reg;
- compat_int_t pr_fpvalid;
};
struct compat_elf_prpsinfo
@@ -45,8 +43,24 @@ struct compat_elf_prpsinfo
__compat_uid_t pr_uid;
__compat_gid_t pr_gid;
compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
};
+#ifdef CONFIG_ARCH_HAS_ELFCORE_COMPAT
+#include <asm/elfcore-compat.h>
+#endif
+
+struct compat_elf_prstatus
+{
+ struct compat_elf_prstatus_common common;
+ compat_elf_gregset_t pr_reg;
+ compat_int_t pr_fpvalid;
+};
+
#endif /* _LINUX_ELFCORE_COMPAT_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 46c3d691f677..bd5560542c79 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -29,7 +29,7 @@ struct elf_siginfo
* the SVR4 structure, but more Linuxy, with things that Linux does
* not support and which gdb doesn't really use excluded.
*/
-struct elf_prstatus
+struct elf_prstatus_common
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
@@ -43,6 +43,11 @@ struct elf_prstatus
struct __kernel_old_timeval pr_stime; /* System time */
struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
+};
+
+struct elf_prstatus
+{
+ struct elf_prstatus_common common;
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
@@ -60,6 +65,11 @@ struct elf_prpsinfo
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
@@ -74,36 +84,19 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
#endif
}
-static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
-{
-#ifdef ELF_CORE_COPY_KERNEL_REGS
- ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
-#else
- elf_core_copy_regs(elfregs, regs);
-#endif
-}
-
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
#if defined (ELF_CORE_COPY_TASK_REGS)
return ELF_CORE_COPY_TASK_REGS(t, elfregs);
-#elif defined (task_pt_regs)
+#else
elf_core_copy_regs(elfregs, task_pt_regs(t));
#endif
return 0;
}
-extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
-
-static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-#ifdef ELF_CORE_COPY_FPREGS
- return ELF_CORE_COPY_FPREGS(t, fpu);
-#else
- return dump_fpu(regs, fpu);
-#endif
-}
+int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
+#ifdef CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
@@ -112,11 +105,32 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the gate DSO was being used.
*/
-extern Elf_Half elf_core_extra_phdrs(void);
+extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm);
extern int
elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
extern int
elf_core_write_extra_data(struct coredump_params *cprm);
-extern size_t elf_core_extra_data_size(void);
+extern size_t elf_core_extra_data_size(struct coredump_params *cprm);
+#else
+static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
+{
+ return 0;
+}
+
+static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+{
+ return 1;
+}
+
+static inline int elf_core_write_extra_data(struct coredump_params *cprm)
+{
+ return 1;
+}
+
+static inline size_t elf_core_extra_data_size(struct coredump_params *cprm)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS */
#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/elfnote-lto.h b/include/linux/elfnote-lto.h
new file mode 100644
index 000000000000..d4635a3ecc4f
--- /dev/null
+++ b/include/linux/elfnote-lto.h
@@ -0,0 +1,14 @@
+#ifndef __ELFNOTE_LTO_H
+#define __ELFNOTE_LTO_H
+
+#include <linux/elfnote.h>
+
+#define LINUX_ELFNOTE_LTO_INFO 0x101
+
+#ifdef CONFIG_LTO
+#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 1)
+#else
+#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 0)
+#endif
+
+#endif /* __ELFNOTE_LTO_H */
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index b67a51c574b9..770755df852f 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -5,30 +5,57 @@
#include <linux/device.h>
#include <linux/jump_label.h>
#include <linux/kobject.h>
+#include <linux/kref.h>
#include <linux/rcupdate.h>
#include <linux/sched/cpufreq.h>
#include <linux/sched/topology.h>
#include <linux/types.h>
/**
- * em_perf_state - Performance state of a performance domain
+ * struct em_perf_state - Performance state of a performance domain
+ * @performance: CPU performance (capacity) at a given frequency
* @frequency: The frequency in KHz, for consistency with CPUFreq
- * @power: The power consumed at this level, in milli-watts (by 1 CPU or
- by a registered device). It can be a total power: static and
- dynamic.
+ * @power: The power consumed at this level (by 1 CPU or by a registered
+ * device). It can be a total power: static and dynamic.
* @cost: The cost coefficient associated with this level, used during
* energy calculation. Equal to: power * max_frequency / frequency
+ * @flags: see "em_perf_state flags" description below.
*/
struct em_perf_state {
+ unsigned long performance;
unsigned long frequency;
unsigned long power;
unsigned long cost;
+ unsigned long flags;
};
+/*
+ * em_perf_state flags:
+ *
+ * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
+ * in this em_perf_domain, another performance state with a higher frequency
+ * but a lower or equal power cost. Such inefficient states are ignored when
+ * using em_pd_get_efficient_*() functions.
+ */
+#define EM_PERF_STATE_INEFFICIENT BIT(0)
+
/**
- * em_perf_domain - Performance domain
- * @table: List of performance states, in ascending order
+ * struct em_perf_table - Performance states table
+ * @rcu: RCU used for safe access and destruction
+ * @kref: Reference counter to track the users
+ * @state: List of performance states, in ascending order
+ */
+struct em_perf_table {
+ struct rcu_head rcu;
+ struct kref kref;
+ struct em_perf_state state[];
+};
+
+/**
+ * struct em_perf_domain - Performance domain
+ * @em_table: Pointer to the runtime modifiable em_perf_table
* @nr_perf_states: Number of performance states
+ * @flags: See "em_perf_domain flags"
* @cpus: Cpumask covering the CPUs of the domain. It's here
* for performance reasons to avoid potential cache
* misses during energy calculations in the scheduler
@@ -41,53 +68,152 @@ struct em_perf_state {
* field is unused.
*/
struct em_perf_domain {
- struct em_perf_state *table;
+ struct em_perf_table __rcu *em_table;
int nr_perf_states;
+ unsigned long flags;
unsigned long cpus[];
};
+/*
+ * em_perf_domain flags:
+ *
+ * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
+ * other scale.
+ *
+ * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
+ * energy consumption.
+ *
+ * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
+ * created by platform missing real power information
+ */
+#define EM_PERF_DOMAIN_MICROWATTS BIT(0)
+#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
+#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
+
#define em_span_cpus(em) (to_cpumask((em)->cpus))
+#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
#ifdef CONFIG_ENERGY_MODEL
-#define EM_MAX_POWER 0xFFFF
+/*
+ * The max power value in micro-Watts. The limit of 64 Watts is set as
+ * a safety net to not overflow multiplications on 32bit platforms. The
+ * 32bit value limit for total Perf Domain power implies a limit of
+ * maximum CPUs in such domain to 64.
+ */
+#define EM_MAX_POWER (64000000) /* 64 Watts */
+
+/*
+ * To avoid possible energy estimation overflow on 32bit machines add
+ * limits to number of CPUs in the Perf. Domain.
+ * We are safe on 64bit machine, thus some big number.
+ */
+#ifdef CONFIG_64BIT
+#define EM_MAX_NUM_CPUS 4096
+#else
+#define EM_MAX_NUM_CPUS 16
+#endif
struct em_data_callback {
/**
* active_power() - Provide power at the next performance state of
* a device
- * @power : Active power at the performance state in mW
+ * @dev : Device for which we do this operation (can be a CPU)
+ * @power : Active power at the performance state
* (modified)
* @freq : Frequency at the performance state in kHz
* (modified)
- * @dev : Device for which we do this operation (can be a CPU)
*
* active_power() must find the lowest performance state of 'dev' above
* 'freq' and update 'power' and 'freq' to the matching active power
* and frequency.
*
* In case of CPUs, the power is the one of a single CPU in the domain,
- * expressed in milli-watts. It is expected to fit in the
- * [0, EM_MAX_POWER] range.
+ * expressed in micro-Watts or an abstract scale. It is expected to
+ * fit in the [0, EM_MAX_POWER] range.
*
* Return 0 on success.
*/
- int (*active_power)(unsigned long *power, unsigned long *freq,
- struct device *dev);
+ int (*active_power)(struct device *dev, unsigned long *power,
+ unsigned long *freq);
+
+ /**
+ * get_cost() - Provide the cost at the given performance state of
+ * a device
+ * @dev : Device for which we do this operation (can be a CPU)
+ * @freq : Frequency at the performance state in kHz
+ * @cost : The cost value for the performance state
+ * (modified)
+ *
+ * In case of CPUs, the cost is the one of a single CPU in the domain.
+ * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
+ * usage in EAS calculation.
+ *
+ * Return 0 on success, or appropriate error value in case of failure.
+ */
+ int (*get_cost)(struct device *dev, unsigned long freq,
+ unsigned long *cost);
};
-#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \
+ { .active_power = _active_power_cb, \
+ .get_cost = _cost_cb }
+#define EM_DATA_CB(_active_power_cb) \
+ EM_ADV_DATA_CB(_active_power_cb, NULL)
struct em_perf_domain *em_cpu_get(int cpu);
struct em_perf_domain *em_pd_get(struct device *dev);
+int em_dev_update_perf_domain(struct device *dev,
+ struct em_perf_table __rcu *new_table);
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
- struct em_data_callback *cb, cpumask_t *span);
+ struct em_data_callback *cb, cpumask_t *span,
+ bool microwatts);
void em_dev_unregister_perf_domain(struct device *dev);
+struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd);
+void em_table_free(struct em_perf_table __rcu *table);
+int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
+ int nr_states);
+
+/**
+ * em_pd_get_efficient_state() - Get an efficient performance state from the EM
+ * @table: List of performance states, in ascending order
+ * @nr_perf_states: Number of performance states
+ * @max_util: Max utilization to map with the EM
+ * @pd_flags: Performance Domain flags
+ *
+ * It is called from the scheduler code quite frequently and as a consequence
+ * doesn't implement any check.
+ *
+ * Return: An efficient performance state id, high enough to meet @max_util
+ * requirement.
+ */
+static inline int
+em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states,
+ unsigned long max_util, unsigned long pd_flags)
+{
+ struct em_perf_state *ps;
+ int i;
+
+ for (i = 0; i < nr_perf_states; i++) {
+ ps = &table[i];
+ if (ps->performance >= max_util) {
+ if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
+ ps->flags & EM_PERF_STATE_INEFFICIENT)
+ continue;
+ return i;
+ }
+ }
+
+ return nr_perf_states - 1;
+}
/**
* em_cpu_energy() - Estimates the energy consumed by the CPUs of a
- performance domain
+ * performance domain
* @pd : performance domain for which energy has to be estimated
* @max_util : highest utilization among CPUs of the domain
* @sum_util : sum of the utilization of all CPUs in the domain
+ * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
+ * might reflect reduced frequency (due to thermal)
*
* This function must be used only for CPU devices. There is no validation,
* i.e. if the EM is a CPU type and has cpumask allocated. It is called from
@@ -97,39 +223,47 @@ void em_dev_unregister_perf_domain(struct device *dev);
* a capacity state satisfying the max utilization of the domain.
*/
static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
- unsigned long max_util, unsigned long sum_util)
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
{
- unsigned long freq, scale_cpu;
+ struct em_perf_table *em_table;
struct em_perf_state *ps;
- int i, cpu;
+ int i;
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
+#endif
+
+ if (!sum_util)
+ return 0;
/*
* In order to predict the performance state, map the utilization of
* the most utilized CPU of the performance domain to a requested
- * frequency, like schedutil.
+ * performance, like schedutil. Take also into account that the real
+ * performance might be set lower (due to thermal capping). Thus, clamp
+ * max utilization to the allowed CPU capacity before calculating
+ * effective performance.
*/
- cpu = cpumask_first(to_cpumask(pd->cpus));
- scale_cpu = arch_scale_cpu_capacity(cpu);
- ps = &pd->table[pd->nr_perf_states - 1];
- freq = map_util_freq(max_util, ps->frequency, scale_cpu);
+ max_util = map_util_perf(max_util);
+ max_util = min(max_util, allowed_cpu_cap);
/*
* Find the lowest performance state of the Energy Model above the
- * requested frequency.
+ * requested performance.
*/
- for (i = 0; i < pd->nr_perf_states; i++) {
- ps = &pd->table[i];
- if (ps->frequency >= freq)
- break;
- }
+ em_table = rcu_dereference(pd->em_table);
+ i = em_pd_get_efficient_state(em_table->state, pd->nr_perf_states,
+ max_util, pd->flags);
+ ps = &em_table->state[i];
/*
- * The capacity of a CPU in the domain at the performance state (ps)
- * can be computed as:
+ * The performance (capacity) of a CPU in the domain at the performance
+ * state (ps) can be computed as:
*
- * ps->freq * scale_cpu
- * ps->cap = -------------------- (1)
- * cpu_max_freq
+ * ps->freq * scale_cpu
+ * ps->performance = -------------------- (1)
+ * cpu_max_freq
*
* So, ignoring the costs of idle states (which are not available in
* the EM), the energy consumed by this CPU at that performance state
@@ -137,9 +271,10 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
*
* ps->power * cpu_util
* cpu_nrg = -------------------- (2)
- * ps->cap
+ * ps->performance
*
- * since 'cpu_util / ps->cap' represents its percentage of busy time.
+ * since 'cpu_util / ps->performance' represents its percentage of busy
+ * time.
*
* NOTE: Although the result of this computation actually is in
* units of power, it can be manipulated as an energy value
@@ -149,9 +284,9 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
* of two terms:
*
- * ps->power * cpu_max_freq cpu_util
- * cpu_nrg = ------------------------ * --------- (3)
- * ps->freq scale_cpu
+ * ps->power * cpu_max_freq
+ * cpu_nrg = ------------------------ * cpu_util (3)
+ * ps->freq * scale_cpu
*
* The first term is static, and is stored in the em_perf_state struct
* as 'ps->cost'.
@@ -161,11 +296,9 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* total energy of the domain (which is the simple sum of the energy of
* all of its CPUs) can be factorized as:
*
- * ps->cost * \Sum cpu_util
- * pd_nrg = ------------------------ (4)
- * scale_cpu
+ * pd_nrg = ps->cost * \Sum cpu_util (4)
*/
- return ps->cost * sum_util / scale_cpu;
+ return ps->cost * sum_util;
}
/**
@@ -180,13 +313,33 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
return pd->nr_perf_states;
}
+/**
+ * em_perf_state_from_pd() - Get the performance states table of perf.
+ * domain
+ * @pd : performance domain for which this must be done
+ *
+ * To use this function the rcu_read_lock() should be hold. After the usage
+ * of the performance states table is finished, the rcu_read_unlock() should
+ * be called.
+ *
+ * Return: the pointer to performance states table of the performance domain
+ */
+static inline
+struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
+{
+ return rcu_dereference(pd->em_table)->state;
+}
+
#else
struct em_data_callback {};
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
#define EM_DATA_CB(_active_power_cb) { }
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
static inline
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
- struct em_data_callback *cb, cpumask_t *span)
+ struct em_data_callback *cb, cpumask_t *span,
+ bool microwatts)
{
return -EINVAL;
}
@@ -202,7 +355,8 @@ static inline struct em_perf_domain *em_pd_get(struct device *dev)
return NULL;
}
static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
- unsigned long max_util, unsigned long sum_util)
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
{
return 0;
}
@@ -210,6 +364,29 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
return 0;
}
+static inline
+struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd)
+{
+ return NULL;
+}
+static inline void em_table_free(struct em_perf_table __rcu *table) {}
+static inline
+int em_dev_update_perf_domain(struct device *dev,
+ struct em_perf_table __rcu *new_table)
+{
+ return -EINVAL;
+}
+static inline
+struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
+{
+ return NULL;
+}
+static inline
+int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
+ int nr_states)
+{
+ return -EINVAL;
+}
#endif
#endif
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index efebbffcd5cc..b0fb775a600d 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -2,10 +2,16 @@
#ifndef __LINUX_ENTRYCOMMON_H
#define __LINUX_ENTRYCOMMON_H
-#include <linux/tracehook.h>
+#include <linux/static_call_types.h>
+#include <linux/ptrace.h>
#include <linux/syscalls.h>
#include <linux/seccomp.h>
#include <linux/sched.h>
+#include <linux/context_tracking.h>
+#include <linux/livepatch.h>
+#include <linux/resume_user_mode.h>
+#include <linux/tick.h>
+#include <linux/kmsan.h>
#include <asm/entry-common.h>
@@ -13,22 +19,6 @@
* Define dummy _TIF work flags if not defined by the architecture or for
* disabled functionality.
*/
-#ifndef _TIF_SYSCALL_EMU
-# define _TIF_SYSCALL_EMU (0)
-#endif
-
-#ifndef _TIF_SYSCALL_TRACEPOINT
-# define _TIF_SYSCALL_TRACEPOINT (0)
-#endif
-
-#ifndef _TIF_SECCOMP
-# define _TIF_SECCOMP (0)
-#endif
-
-#ifndef _TIF_SYSCALL_AUDIT
-# define _TIF_SYSCALL_AUDIT (0)
-#endif
-
#ifndef _TIF_PATCH_PENDING
# define _TIF_PATCH_PENDING (0)
#endif
@@ -38,27 +28,32 @@
#endif
/*
- * TIF flags handled in syscall_enter_from_usermode()
+ * SYSCALL_WORK flags handled in syscall_enter_from_user_mode()
*/
-#ifndef ARCH_SYSCALL_ENTER_WORK
-# define ARCH_SYSCALL_ENTER_WORK (0)
+#ifndef ARCH_SYSCALL_WORK_ENTER
+# define ARCH_SYSCALL_WORK_ENTER (0)
#endif
-#define SYSCALL_ENTER_WORK \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
- _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_EMU | \
- ARCH_SYSCALL_ENTER_WORK)
-
/*
- * TIF flags handled in syscall_exit_to_user_mode()
+ * SYSCALL_WORK flags handled in syscall_exit_to_user_mode()
*/
-#ifndef ARCH_SYSCALL_EXIT_WORK
-# define ARCH_SYSCALL_EXIT_WORK (0)
+#ifndef ARCH_SYSCALL_WORK_EXIT
+# define ARCH_SYSCALL_WORK_EXIT (0)
#endif
-#define SYSCALL_EXIT_WORK \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK)
+#define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \
+ SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_EMU | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ ARCH_SYSCALL_WORK_ENTER)
+#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
+ ARCH_SYSCALL_WORK_EXIT)
/*
* TIF flags handled in exit_to_user_mode_loop()
@@ -69,11 +64,11 @@
#define EXIT_TO_USER_MODE_WORK \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \
+ _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
ARCH_EXIT_TO_USER_MODE_WORK)
/**
- * arch_check_user_regs - Architecture specific sanity check for user mode regs
+ * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
* @regs: Pointer to currents pt_regs
*
* Defaults to an empty implementation. Can be replaced by architecture
@@ -83,42 +78,74 @@
* section. Use __always_inline so the compiler cannot push it out of line
* and make it instrumentable.
*/
-static __always_inline void arch_check_user_regs(struct pt_regs *regs);
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
-#ifndef arch_check_user_regs
-static __always_inline void arch_check_user_regs(struct pt_regs *regs) {}
+#ifndef arch_enter_from_user_mode
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
#endif
/**
- * arch_syscall_enter_tracehook - Wrapper around tracehook_report_syscall_entry()
- * @regs: Pointer to currents pt_regs
+ * enter_from_user_mode - Establish state when coming from user mode
*
- * Returns: 0 on success or an error code to skip the syscall.
+ * Syscall/interrupt entry disables interrupts, but user mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
*
- * Defaults to tracehook_report_syscall_entry(). Can be replaced by
- * architecture specific code.
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
*
- * Invoked from syscall_enter_from_user_mode()
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct and interrupts are still
+ * disabled. The subsequent functions can be instrumented.
+ *
+ * This is invoked when there is architecture specific functionality to be
+ * done between establishing state and enabling interrupts. The caller must
+ * enable interrupts before invoking syscall_enter_from_user_mode_work().
*/
-static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs);
-
-#ifndef arch_syscall_enter_tracehook
-static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs)
+static __always_inline void enter_from_user_mode(struct pt_regs *regs)
{
- return tracehook_report_syscall_entry(regs);
+ arch_enter_from_user_mode(regs);
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
+ CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
}
-#endif
/**
- * syscall_enter_from_user_mode - Check and handle work before invoking
- * a syscall
+ * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
* @regs: Pointer to currents pt_regs
- * @syscall: The syscall number
*
* Invoked from architecture specific syscall entry code with interrupts
* disabled. The calling code has to be non-instrumentable. When the
- * function returns all state is correct and the subsequent functions can be
- * instrumented.
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This handles lockdep, RCU (context tracking) and tracing state, i.e.
+ * the functionality provided by enter_from_user_mode().
+ *
+ * This is invoked when there is extra architecture specific functionality
+ * to be done between establishing state and handling user mode entry work.
+ */
+void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+
+long syscall_trace_enter(struct pt_regs *regs, long syscall,
+ unsigned long work);
+
+/**
+ * syscall_enter_from_user_mode_work - Check and handle work before invoking
+ * a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
+ * architecture specific work.
*
* Returns: The original or a modified syscall number
*
@@ -127,14 +154,52 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs
* syscall_set_return_value() first. If neither of those are called and -1
* is returned, then the syscall will fail with ENOSYS.
*
- * The following functionality is handled here:
+ * It handles the following work items:
+ *
+ * 1) syscall_work flag dependent invocations of
+ * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
+ * 2) Invocation of audit_syscall_entry()
+ */
+static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+
+ if (work & SYSCALL_WORK_ENTER)
+ syscall = syscall_trace_enter(regs, syscall, work);
+
+ return syscall;
+}
+
+/**
+ * syscall_enter_from_user_mode - Establish state and check and handle work
+ * before invoking a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This is combination of syscall_enter_from_user_mode_prepare() and
+ * syscall_enter_from_user_mode_work().
*
- * 1) Establish state (lockdep, RCU (context tracking), tracing)
- * 2) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
- * __secure_computing(), trace_sys_enter()
- * 3) Invocation of audit_syscall_entry()
+ * Returns: The original or a modified syscall number. See
+ * syscall_enter_from_user_mode_work() for further explanation.
*/
-long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+{
+ long ret;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ local_irq_enable();
+ ret = syscall_enter_from_user_mode_work(regs, syscall);
+ instrumentation_end();
+
+ return ret;
+}
/**
* local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
@@ -226,31 +291,94 @@ static __always_inline void arch_exit_to_user_mode(void) { }
#endif
/**
- * arch_do_signal - Architecture specific signal delivery function
+ * arch_do_signal_or_restart - Architecture specific signal delivery function
* @regs: Pointer to currents pt_regs
*
* Invoked from exit_to_user_mode_loop().
*/
-void arch_do_signal(struct pt_regs *regs);
+void arch_do_signal_or_restart(struct pt_regs *regs);
/**
- * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit()
- * @regs: Pointer to currents pt_regs
- * @step: Indicator for single step
- *
- * Defaults to tracehook_report_syscall_exit(). Can be replaced by
- * architecture specific code.
- *
- * Invoked from syscall_exit_to_user_mode()
+ * exit_to_user_mode_loop - do any pending work before leaving to user space
*/
-static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step);
+unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ unsigned long ti_work);
-#ifndef arch_syscall_exit_tracehook
-static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step)
+/**
+ * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * 1) check that interrupts are disabled
+ * 2) call tick_nohz_user_enter_prepare()
+ * 3) call exit_to_user_mode_loop() if any flags from
+ * EXIT_TO_USER_MODE_WORK are set
+ * 4) check that interrupts are still disabled
+ */
+static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
{
- tracehook_report_syscall_exit(regs, step);
+ unsigned long ti_work;
+
+ lockdep_assert_irqs_disabled();
+
+ /* Flush pending rcuog wakeup before the last need_resched() check */
+ tick_nohz_user_enter_prepare();
+
+ ti_work = read_thread_flags();
+ if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+
+ arch_exit_to_user_mode_prepare(regs, ti_work);
+
+ /* Ensure that kernel state is sane for a return to userspace */
+ kmap_assert_nomap();
+ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+}
+
+/**
+ * exit_to_user_mode - Fixup state when exiting to user mode
+ *
+ * Syscall/interrupt exit enables interrupts, but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Invoke architecture specific last minute exit code, e.g. speculation
+ * mitigations, etc.: arch_exit_to_user_mode()
+ * 4) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code when syscall_exit_to_user_mode()
+ * is not suitable as the last step before returning to userspace. Must be
+ * invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke syscall_exit_to_user_mode_work() before this.
+ */
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ user_enter_irqoff();
+ arch_exit_to_user_mode();
+ lockdep_hardirqs_on(CALLER_ADDR0);
}
-#endif
+
+/**
+ * syscall_exit_to_user_mode_work - Handle work before returning to user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling
+ * exit_to_user_mode() to perform the final transition to user mode.
+ *
+ * Calling convention is the same as for syscall_exit_to_user_mode() and it
+ * returns with all work handled and interrupts disabled. The caller must
+ * invoke exit_to_user_mode() before actually switching to user mode to
+ * make the final state transitions. Interrupts must stay disabled between
+ * return from this function and the invocation of exit_to_user_mode().
+ */
+void syscall_exit_to_user_mode_work(struct pt_regs *regs);
/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
@@ -266,7 +394,7 @@ static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step)
* - rseq syscall exit
* - audit
* - syscall tracing
- * - tracehook (single stepping)
+ * - ptrace (single stepping)
*
* 2) Preparatory work
* - Exit to user mode loop (common TIF handling). Invokes
@@ -274,8 +402,12 @@ static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step)
* - Architecture specific one time work arch_exit_to_user_mode_prepare()
* - Address limit and lockdep checks
*
- * 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes
- * arch_exit_to_user_mode() to handle e.g. speculation mitigations
+ * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the
+ * functionality in exit_to_user_mode().
+ *
+ * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
+ * exit_to_user_mode(). This function is preferred unless there is a
+ * compelling architectural reason to use the separate functions.
*/
void syscall_exit_to_user_mode(struct pt_regs *regs);
@@ -296,7 +428,7 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs);
* irqentry_exit_to_user_mode - Interrupt exit work
* @regs: Pointer to current's pt_regs
*
- * Invoked with interrupts disbled and fully valid regs. Returns with all
+ * Invoked with interrupts disabled and fully valid regs. Returns with all
* work handled, interrupts disabled such that the caller can immediately
* switch to user mode. Called from architecture specific interrupt
* handling code.
@@ -308,8 +440,26 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs);
void irqentry_exit_to_user_mode(struct pt_regs *regs);
#ifndef irqentry_state
+/**
+ * struct irqentry_state - Opaque object for exception state storage
+ * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
+ * exit path has to invoke ct_irq_exit().
+ * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
+ * lockdep state is restored correctly on exit from nmi.
+ *
+ * This opaque object is filled in by the irqentry_*_enter() functions and
+ * must be passed back into the corresponding irqentry_*_exit() functions
+ * when the exception is complete.
+ *
+ * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
+ * and all members private. Descriptions of the members are provided to aid in
+ * the maintenance of the irqentry_*() functions.
+ */
typedef struct irqentry_state {
- bool exit_rcu;
+ union {
+ bool exit_rcu;
+ bool lockdep;
+ };
} irqentry_state_t;
#endif
@@ -331,12 +481,12 @@ typedef struct irqentry_state {
*
* For kernel mode entries RCU handling is done conditional. If RCU is
* watching then the only RCU requirement is to check whether the tick has
- * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
- * invoked on entry and rcu_irq_exit() on exit.
+ * to be restarted. If RCU is not watching then ct_irq_enter() has to be
+ * invoked on entry and ct_irq_exit() on exit.
*
- * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
+ * Avoiding the ct_irq_enter/exit() calls is an optimization but also
* solves the problem of kernel mode pagefaults which can schedule, which
- * is not possible after invoking rcu_irq_enter() without undoing it.
+ * is not possible after invoking ct_irq_enter() without undoing it.
*
* For user mode entries irqentry_enter_from_user_mode() is invoked to
* establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
@@ -351,7 +501,21 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
*
* Conditional reschedule with additional sanity checks.
*/
-void irqentry_exit_cond_resched(void);
+void raw_irqentry_exit_cond_resched(void);
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
+#define irqentry_exit_cond_resched_dynamic_disabled NULL
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
+#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
+#endif
+#else /* CONFIG_PREEMPT_DYNAMIC */
+#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
+#endif /* CONFIG_PREEMPT_DYNAMIC */
/**
* irqentry_exit - Handle return from exception that used irqentry_enter()
@@ -359,7 +523,7 @@ void irqentry_exit_cond_resched(void);
* @state: Return value from matching call to irqentry_enter()
*
* Depending on the return target (kernel/user) this runs the necessary
- * preemption and work checks if possible and reguired and returns to
+ * preemption and work checks if possible and required and returns to
* the caller with interrupts disabled and no further work pending.
*
* This is the last action before returning to the low level ASM code which
@@ -369,4 +533,23 @@ void irqentry_exit_cond_resched(void);
*/
void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
+/**
+ * irqentry_nmi_enter - Handle NMI entry
+ * @regs: Pointer to currents pt_regs
+ *
+ * Similar to irqentry_enter() but taking care of the NMI constraints.
+ */
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_nmi_exit - Handle return from NMI handling
+ * @regs: Pointer to pt_regs (NMI entry regs)
+ * @irq_state: Return value from matching call to irqentry_nmi_enter()
+ *
+ * Last action before returning to the low level assembly code.
+ *
+ * Counterpart to irqentry_nmi_enter().
+ */
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+
#endif
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
index 0cef17afb41a..6813171afccb 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-kvm.h
@@ -2,7 +2,12 @@
#ifndef __LINUX_ENTRYKVM_H
#define __LINUX_ENTRYKVM_H
-#include <linux/entry-common.h>
+#include <linux/static_call_types.h>
+#include <linux/resume_user_mode.h>
+#include <linux/syscalls.h>
+#include <linux/seccomp.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
/* Transfer to guest mode work */
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
@@ -11,8 +16,8 @@
# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
#endif
-#define XFER_TO_GUEST_MODE_WORK \
- (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+#define XFER_TO_GUEST_MODE_WORK \
+ (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
_TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
struct kvm_vcpu;
@@ -47,6 +52,20 @@ static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
/**
+ * xfer_to_guest_mode_prepare - Perform last minute preparation work that
+ * need to be handled while IRQs are disabled
+ * upon entering to guest.
+ *
+ * Has to be invoked with interrupts disabled before the last call
+ * to xfer_to_guest_mode_work_pending().
+ */
+static inline void xfer_to_guest_mode_prepare(void)
+{
+ lockdep_assert_irqs_disabled();
+ tick_nohz_user_enter_prepare();
+}
+
+/**
* __xfer_to_guest_mode_work_pending - Check if work is pending
*
* Returns: True if work pending, False otherwise.
@@ -56,7 +75,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
*/
static inline bool __xfer_to_guest_mode_work_pending(void)
{
- unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
+ unsigned long ti_work = read_thread_flags();
return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
}
diff --git a/include/linux/err.h b/include/linux/err.h
index a139c64aef2a..b5d9bb2a2349 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -19,23 +19,54 @@
#ifndef __ASSEMBLY__
+/**
+ * IS_ERR_VALUE - Detect an error pointer.
+ * @x: The pointer to check.
+ *
+ * Like IS_ERR(), but does not generate a compiler warning if result is unused.
+ */
#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
+/**
+ * ERR_PTR - Create an error pointer.
+ * @error: A negative error code.
+ *
+ * Encodes @error into a pointer value. Users should consider the result
+ * opaque and not assume anything about how the error is encoded.
+ *
+ * Return: A pointer with @error encoded within its value.
+ */
static inline void * __must_check ERR_PTR(long error)
{
return (void *) error;
}
+/**
+ * PTR_ERR - Extract the error code from an error pointer.
+ * @ptr: An error pointer.
+ * Return: The error code within @ptr.
+ */
static inline long __must_check PTR_ERR(__force const void *ptr)
{
return (long) ptr;
}
+/**
+ * IS_ERR - Detect an error pointer.
+ * @ptr: The pointer to check.
+ * Return: true if @ptr is an error pointer, false otherwise.
+ */
static inline bool __must_check IS_ERR(__force const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
+/**
+ * IS_ERR_OR_NULL - Detect an error pointer or a null pointer.
+ * @ptr: The pointer to check.
+ *
+ * Like IS_ERR(), but also returns true for a null pointer.
+ */
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
@@ -54,6 +85,23 @@ static inline void * __must_check ERR_CAST(__force const void *ptr)
return (void *) ptr;
}
+/**
+ * PTR_ERR_OR_ZERO - Extract the error code from a pointer if it has one.
+ * @ptr: A potential error pointer.
+ *
+ * Convenience function that can be used inside a function that returns
+ * an error code to propagate errors received as error pointers.
+ * For example, ``return PTR_ERR_OR_ZERO(ptr);`` replaces:
+ *
+ * .. code-block:: c
+ *
+ * if (IS_ERR(ptr))
+ * return PTR_ERR(ptr);
+ * else
+ * return 0;
+ *
+ * Return: The error code within @ptr if it is an error pointer; 0 otherwise.
+ */
static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
{
if (IS_ERR(ptr))
diff --git a/include/linux/errno.h b/include/linux/errno.h
index d73f597a2484..8b0c754bab02 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -31,5 +31,6 @@
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
#define ERECALLCONFLICT 530 /* conflict with recalled state */
+#define ENOGRACE 531 /* NFS file lock reclaim refused */
#endif
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
index 635a95caf29f..20e738f4eae8 100644
--- a/include/linux/error-injection.h
+++ b/include/linux/error-injection.h
@@ -3,6 +3,7 @@
#define _LINUX_ERROR_INJECTION_H
#include <linux/compiler.h>
+#include <linux/errno.h>
#include <asm-generic/error-injection.h>
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
@@ -19,7 +20,7 @@ static inline bool within_error_injection_list(unsigned long addr)
static inline int get_injectable_error_type(unsigned long addr)
{
- return EI_ETYPE_NONE;
+ return -EOPNOTSUPP;
}
#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2e5debc0373c..224645f17c33 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -11,7 +11,7 @@
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
- * Relocated to include/linux where it belongs by Alan Cox
+ * Relocated to include/linux where it belongs by Alan Cox
* <gw4pts@gw4pts.ampr.org>
*/
#ifndef _LINUX_ETHERDEVICE_H
@@ -26,10 +26,17 @@
#ifdef __KERNEL__
struct device;
+struct fwnode_handle;
+
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
+int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
unsigned char *arch_get_platform_mac_address(void);
int nvmem_get_mac_address(struct device *dev, void *addrbuf);
-u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len);
+int device_get_mac_address(struct device *dev, char *addr);
+int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
+int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
+
+u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
@@ -127,7 +134,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
#endif
}
-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
#ifdef __BIG_ENDIAN
@@ -227,8 +234,6 @@ static inline void eth_random_addr(u8 *addr)
addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
}
-#define random_ether_addr(addr) eth_random_addr(addr)
-
/**
* eth_broadcast_addr - Assign broadcast address
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -262,8 +267,11 @@ static inline void eth_zero_addr(u8 *addr)
*/
static inline void eth_hw_addr_random(struct net_device *dev)
{
+ u8 addr[ETH_ALEN];
+
+ eth_random_addr(addr);
+ __dev_addr_set(dev, addr, ETH_ALEN);
dev->addr_assign_type = NET_ADDR_RANDOM;
- eth_random_addr(dev->dev_addr);
}
/**
@@ -300,6 +308,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
}
/**
+ * eth_hw_addr_set - Assign Ethernet address to a net_device
+ * @dev: pointer to net_device structure
+ * @addr: address to assign
+ *
+ * Assign given address to the net_device, addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, ETH_ALEN);
+}
+
+/**
* eth_hw_addr_inherit - Copy dev_addr from another net_device
* @dst: pointer to net_device to copy dev_addr to
* @src: pointer to net_device to copy dev_addr from
@@ -311,7 +331,7 @@ static inline void eth_hw_addr_inherit(struct net_device *dst,
struct net_device *src)
{
dst->addr_assign_type = src->addr_assign_type;
- ether_addr_copy(dst->dev_addr, src->dev_addr);
+ eth_hw_addr_set(dst, src->dev_addr);
}
/**
@@ -352,8 +372,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
*/
-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
- const u8 addr2[6+2])
+static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
@@ -409,6 +428,28 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
return true;
}
+static inline bool ether_addr_is_ipv4_mcast(const u8 *addr)
+{
+ u8 base[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, base, mask);
+}
+
+static inline bool ether_addr_is_ipv6_mcast(const u8 *addr)
+{
+ u8 base[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, base, mask);
+}
+
+static inline bool ether_addr_is_ip_mcast(const u8 *addr)
+{
+ return ether_addr_is_ipv4_mcast(addr) ||
+ ether_addr_is_ipv6_mcast(addr);
+}
+
/**
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -467,6 +508,20 @@ static inline void eth_addr_inc(u8 *addr)
}
/**
+ * eth_addr_add() - Add (or subtract) an offset to/from the given MAC address.
+ *
+ * @offset: Offset to add.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_add(u8 *addr, long offset)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u += offset;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -532,6 +587,27 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
}
/**
+ * eth_hw_addr_gen - Generate and assign Ethernet address to a port
+ * @dev: pointer to port's net_device structure
+ * @base_addr: base Ethernet address
+ * @id: offset to add to the base address
+ *
+ * Generate a MAC address using a base address and an offset and assign it
+ * to a net_device. Commonly used by switch drivers which need to compute
+ * addresses for all their ports. addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
+ unsigned int id)
+{
+ u64 u = ether_addr_to_u64(base_addr);
+ u8 addr[ETH_ALEN];
+
+ u += id;
+ u64_to_ether_addr(u, addr);
+ eth_hw_addr_set(dev, addr);
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 969a80211df6..9901e563f706 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -15,10 +15,10 @@
#include <linux/bitmap.h>
#include <linux/compat.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
#include <uapi/linux/ethtool.h>
-#ifdef CONFIG_COMPAT
-
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
union ethtool_flow_union h_u;
@@ -38,8 +38,6 @@ struct compat_ethtool_rxnfc {
u32 rule_locs[];
};
-#endif /* CONFIG_COMPAT */
-
#include <linux/rculist.h>
/**
@@ -70,6 +68,44 @@ enum {
ETH_RSS_HASH_FUNCS_COUNT
};
+/**
+ * struct kernel_ethtool_ringparam - RX/TX ring configuration
+ * @rx_buf_len: Current length of buffers on the rx ring.
+ * @tcp_data_split: Scatter packet headers and data to separate buffers
+ * @tx_push: The flag of tx push mode
+ * @rx_push: The flag of rx push mode
+ * @cqe_size: Size of TX/RX completion queue event
+ * @tx_push_buf_len: Size of TX push buffer
+ * @tx_push_buf_max_len: Maximum allowed size of TX push buffer
+ */
+struct kernel_ethtool_ringparam {
+ u32 rx_buf_len;
+ u8 tcp_data_split;
+ u8 tx_push;
+ u8 rx_push;
+ u32 cqe_size;
+ u32 tx_push_buf_len;
+ u32 tx_push_buf_max_len;
+};
+
+/**
+ * enum ethtool_supported_ring_param - indicator caps for setting ring params
+ * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
+ * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
+ * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
+ * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
+ * @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len
+ * @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split
+ */
+enum ethtool_supported_ring_param {
+ ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
+ ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
+ ETHTOOL_RING_USE_TX_PUSH = BIT(2),
+ ETHTOOL_RING_USE_RX_PUSH = BIT(3),
+ ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4),
+ ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5),
+};
+
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
@@ -81,15 +117,9 @@ enum {
#define ETH_RSS_HASH_NO_CHANGE 0
struct net_device;
+struct netlink_ext_ack;
-/* Some generic methods drivers may use in their ethtool_ops */
-u32 ethtool_op_get_link(struct net_device *dev);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
-
-
-/**
- * struct ethtool_link_ext_state_info - link extended state and substate.
- */
+/* Link extended state and substate. */
struct ethtool_link_ext_state_info {
enum ethtool_link_ext_state link_ext_state;
union {
@@ -98,10 +128,25 @@ struct ethtool_link_ext_state_info {
enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch;
enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
enum ethtool_link_ext_substate_cable_issue cable_issue;
- u8 __link_ext_substate;
+ enum ethtool_link_ext_substate_module module;
+ u32 __link_ext_substate;
};
};
+struct ethtool_link_ext_stats {
+ /* Custom Linux statistic for PHY level link down events.
+ * In a simpler world it should be equal to netdev->carrier_down_count
+ * unfortunately netdev also counts local reconfigurations which don't
+ * actually take the physical link down, not to mention NC-SI which,
+ * if present, keeps the link up regardless of host state.
+ * This statistic counts when PHY _actually_ went down, or lost link.
+ *
+ * Note that we need u64 for ethtool_stats_init() and comparisons
+ * to ETHTOOL_STAT_NOT_SET, but only u32 is exposed to the user.
+ */
+ u64 link_down_events;
+};
+
/**
* ethtool_rxfh_indir_default - get default value for RX flow hash indirection
* @index: Index in RX flow hash indirection table
@@ -128,6 +173,7 @@ struct ethtool_link_ksettings {
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
} link_modes;
+ u32 lanes;
};
/**
@@ -176,6 +222,24 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+struct ethtool_keee {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertised);
+ u32 tx_lpi_timer;
+ bool tx_lpi_enabled;
+ bool eee_active;
+ bool eee_enabled;
+};
+
+struct kernel_ethtool_coalesce {
+ u8 use_cqe_mode_tx;
+ u8 use_cqe_mode_rx;
+ u32 tx_aggr_max_bytes;
+ u32 tx_aggr_max_frames;
+ u32 tx_aggr_time_usecs;
+};
+
/**
* ethtool_intersect_link_masks - Given two link masks, AND them together
* @dst: first mask and where result is stored
@@ -215,6 +279,12 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
+#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
+#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25)
+#define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(26, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
@@ -240,14 +310,351 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+#define ETHTOOL_COALESCE_USE_CQE \
+ (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
+#define ETHTOOL_COALESCE_TX_AGGR \
+ (ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES | \
+ ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES | \
+ ETHTOOL_COALESCE_TX_AGGR_TIME_USECS)
+
+#define ETHTOOL_STAT_NOT_SET (~0ULL)
+
+static inline void ethtool_stats_init(u64 *stats, unsigned int n)
+{
+ while (n--)
+ stats[n] = ETHTOOL_STAT_NOT_SET;
+}
+
+/* Basic IEEE 802.3 MAC statistics (30.3.1.1.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_mac_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 FramesTransmittedOK;
+ u64 SingleCollisionFrames;
+ u64 MultipleCollisionFrames;
+ u64 FramesReceivedOK;
+ u64 FrameCheckSequenceErrors;
+ u64 AlignmentErrors;
+ u64 OctetsTransmittedOK;
+ u64 FramesWithDeferredXmissions;
+ u64 LateCollisions;
+ u64 FramesAbortedDueToXSColls;
+ u64 FramesLostDueToIntMACXmitError;
+ u64 CarrierSenseErrors;
+ u64 OctetsReceivedOK;
+ u64 FramesLostDueToIntMACRcvError;
+ u64 MulticastFramesXmittedOK;
+ u64 BroadcastFramesXmittedOK;
+ u64 FramesWithExcessiveDeferral;
+ u64 MulticastFramesReceivedOK;
+ u64 BroadcastFramesReceivedOK;
+ u64 InRangeLengthErrors;
+ u64 OutOfRangeLengthField;
+ u64 FrameTooLongErrors;
+ );
+};
+
+/* Basic IEEE 802.3 PHY statistics (30.3.2.1.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_phy_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 SymbolErrorDuringCarrier;
+ );
+};
+
+/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_ctrl_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 MACControlFramesTransmitted;
+ u64 MACControlFramesReceived;
+ u64 UnsupportedOpcodesReceived;
+ );
+};
+
+/**
+ * struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
+ * @tx_pause_frames: transmitted pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES.
+ *
+ * Equivalent to `30.3.4.2 aPAUSEMACCtrlFramesTransmitted`
+ * from the standard.
+ *
+ * @rx_pause_frames: received pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_RX_FRAMES. Equivalent to:
+ *
+ * Equivalent to `30.3.4.3 aPAUSEMACCtrlFramesReceived`
+ * from the standard.
+ */
+struct ethtool_pause_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 tx_pause_frames;
+ u64 rx_pause_frames;
+ );
+};
+
+#define ETHTOOL_MAX_LANES 8
+
+/**
+ * struct ethtool_fec_stats - statistics for IEEE 802.3 FEC
+ * @corrected_blocks: number of received blocks corrected by FEC
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_CORRECTED.
+ *
+ * Equivalent to `30.5.1.1.17 aFECCorrectedBlocks` from the standard.
+ *
+ * @uncorrectable_blocks: number of received blocks FEC was not able to correct
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_UNCORR.
+ *
+ * Equivalent to `30.5.1.1.18 aFECUncorrectableBlocks` from the standard.
+ *
+ * @corrected_bits: number of bits corrected by FEC
+ * Similar to @corrected_blocks but counts individual bit changes,
+ * not entire FEC data blocks. This is a non-standard statistic.
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS.
+ *
+ * For each of the above fields, the two substructure members are:
+ *
+ * - @lanes: per-lane/PCS-instance counts as defined by the standard
+ * - @total: error counts for the entire port, for drivers incapable of reporting
+ * per-lane stats
+ *
+ * Drivers should fill in either only total or per-lane statistics, core
+ * will take care of adding lane values up to produce the total.
+ */
+struct ethtool_fec_stats {
+ struct ethtool_fec_stat {
+ u64 total;
+ u64 lanes[ETHTOOL_MAX_LANES];
+ } corrected_blocks, uncorrectable_blocks, corrected_bits;
+};
+
+/**
+ * struct ethtool_rmon_hist_range - byte range for histogram statistics
+ * @low: low bound of the bucket (inclusive)
+ * @high: high bound of the bucket (inclusive)
+ */
+struct ethtool_rmon_hist_range {
+ u16 low;
+ u16 high;
+};
+
+#define ETHTOOL_RMON_HIST_MAX 10
+
+/**
+ * struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
+ * @undersize_pkts: Equivalent to `etherStatsUndersizePkts` from the RFC.
+ * @oversize_pkts: Equivalent to `etherStatsOversizePkts` from the RFC.
+ * @fragments: Equivalent to `etherStatsFragments` from the RFC.
+ * @jabbers: Equivalent to `etherStatsJabbers` from the RFC.
+ * @hist: Packet counter for packet length buckets (e.g.
+ * `etherStatsPkts128to255Octets` from the RFC).
+ * @hist_tx: Tx counters in similar form to @hist, not defined in the RFC.
+ *
+ * Selection of RMON (RFC 2819) statistics which are not exposed via different
+ * APIs, primarily the packet-length-based counters.
+ * Unfortunately different designs choose different buckets beyond
+ * the 1024B mark (jumbo frame teritory), so the definition of the bucket
+ * ranges is left to the driver.
+ */
+struct ethtool_rmon_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 undersize_pkts;
+ u64 oversize_pkts;
+ u64 fragments;
+ u64 jabbers;
+
+ u64 hist[ETHTOOL_RMON_HIST_MAX];
+ u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+ );
+};
+
+#define ETH_MODULE_EEPROM_PAGE_LEN 128
+#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f
+
+/**
+ * struct ethtool_module_eeprom - EEPROM dump from specified page
+ * @offset: Offset within the specified EEPROM page to begin read, in bytes.
+ * @length: Number of bytes to read.
+ * @page: Page number to read from.
+ * @bank: Page bank number to read from, if applicable by EEPROM spec.
+ * @i2c_address: I2C address of a page. Value less than 0x7f expected. Most
+ * EEPROMs use 0x50 or 0x51.
+ * @data: Pointer to buffer with EEPROM data of @length size.
+ *
+ * This can be used to manage pages during EEPROM dump in ethtool and pass
+ * required information to the driver.
+ */
+struct ethtool_module_eeprom {
+ u32 offset;
+ u32 length;
+ u8 page;
+ u8 bank;
+ u8 i2c_address;
+ u8 *data;
+};
+
+/**
+ * struct ethtool_module_power_mode_params - module power mode parameters
+ * @policy: The power mode policy enforced by the host for the plug-in module.
+ * @mode: The operational power mode of the plug-in module. Should be filled by
+ * device drivers on get operations.
+ */
+struct ethtool_module_power_mode_params {
+ enum ethtool_module_power_mode_policy policy;
+ enum ethtool_module_power_mode mode;
+};
+
+/**
+ * struct ethtool_mm_state - 802.3 MAC merge layer state
+ * @verify_time:
+ * wait time between verification attempts in ms (according to clause
+ * 30.14.1.6 aMACMergeVerifyTime)
+ * @max_verify_time:
+ * maximum accepted value for the @verify_time variable in set requests
+ * @verify_status:
+ * state of the verification state machine of the MM layer (according to
+ * clause 30.14.1.2 aMACMergeStatusVerify)
+ * @tx_enabled:
+ * set if the MM layer is administratively enabled in the TX direction
+ * (according to clause 30.14.1.3 aMACMergeEnableTx)
+ * @tx_active:
+ * set if the MM layer is enabled in the TX direction, which makes FP
+ * possible (according to 30.14.1.5 aMACMergeStatusTx). This should be
+ * true if MM is enabled, and the verification status is either verified,
+ * or disabled.
+ * @pmac_enabled:
+ * set if the preemptible MAC is powered on and is able to receive
+ * preemptible packets and respond to verification frames.
+ * @verify_enabled:
+ * set if the Verify function of the MM layer (which sends SMD-V
+ * verification requests) is administratively enabled (regardless of
+ * whether it is currently in the ETHTOOL_MM_VERIFY_STATUS_DISABLED state
+ * or not), according to clause 30.14.1.4 aMACMergeVerifyDisableTx (but
+ * using positive rather than negative logic). The device should always
+ * respond to received SMD-V requests as long as @pmac_enabled is set.
+ * @tx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that the link partner
+ * supports receiving, expressed in octets. Compared to the definition
+ * from clause 30.14.1.7 aMACMergeAddFragSize which is expressed in the
+ * range 0 to 3 (requiring a translation to the size in octets according
+ * to the formula 64 * (1 + addFragSize) - 4), a value in a continuous and
+ * unbounded range can be specified here.
+ * @rx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that this device
+ * supports receiving, expressed in octets.
+ */
+struct ethtool_mm_state {
+ u32 verify_time;
+ u32 max_verify_time;
+ enum ethtool_mm_verify_status verify_status;
+ bool tx_enabled;
+ bool tx_active;
+ bool pmac_enabled;
+ bool verify_enabled;
+ u32 tx_min_frag_size;
+ u32 rx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_cfg - 802.3 MAC merge layer configuration
+ * @verify_time: see struct ethtool_mm_state
+ * @verify_enabled: see struct ethtool_mm_state
+ * @tx_enabled: see struct ethtool_mm_state
+ * @pmac_enabled: see struct ethtool_mm_state
+ * @tx_min_frag_size: see struct ethtool_mm_state
+ */
+struct ethtool_mm_cfg {
+ u32 verify_time;
+ bool verify_enabled;
+ bool tx_enabled;
+ bool pmac_enabled;
+ u32 tx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_stats - 802.3 MAC merge layer statistics
+ * @MACMergeFrameAssErrorCount:
+ * received MAC frames with reassembly errors
+ * @MACMergeFrameSmdErrorCount:
+ * received MAC frames/fragments rejected due to unknown or incorrect SMD
+ * @MACMergeFrameAssOkCount:
+ * received MAC frames that were successfully reassembled and passed up
+ * @MACMergeFragCountRx:
+ * number of additional correct SMD-C mPackets received due to preemption
+ * @MACMergeFragCountTx:
+ * number of additional mPackets sent due to preemption
+ * @MACMergeHoldCount:
+ * number of times the MM layer entered the HOLD state, which blocks
+ * transmission of preemptible traffic
+ */
+struct ethtool_mm_stats {
+ u64 MACMergeFrameAssErrorCount;
+ u64 MACMergeFrameSmdErrorCount;
+ u64 MACMergeFrameAssOkCount;
+ u64 MACMergeFragCountRx;
+ u64 MACMergeFragCountTx;
+ u64 MACMergeHoldCount;
+};
+
+/**
+ * struct ethtool_rxfh_param - RXFH (RSS) parameters
+ * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
+ * Valid values are one of the %ETH_RSS_HASH_*.
+ * @indir_size: On SET, the array size of the user buffer for the
+ * indirection table, which may be zero, or
+ * %ETH_RXFH_INDIR_NO_CHANGE. On GET (read from the driver),
+ * the array size of the hardware indirection table.
+ * @indir: The indirection table of size @indir_size entries.
+ * @key_size: On SET, the array size of the user buffer for the hash key,
+ * which may be zero. On GET (read from the driver), the size of the
+ * hardware hash key.
+ * @key: The hash key of size @key_size bytes.
+ * @rss_context: RSS context identifier. Context 0 is the default for normal
+ * traffic; other contexts can be referenced as the destination for RX flow
+ * classification rules. On SET, %ETH_RXFH_CONTEXT_ALLOC is used
+ * to allocate a new RSS context; on return this field will
+ * contain the ID of the newly allocated context.
+ * @rss_delete: Set to non-ZERO to remove the @rss_context context.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ */
+struct ethtool_rxfh_param {
+ u8 hfunc;
+ u32 indir_size;
+ u32 *indir;
+ u32 key_size;
+ u8 *key;
+ u32 rss_context;
+ u8 rss_delete;
+ u8 input_xfrm;
+};
/**
* struct ethtool_ops - optional netdev operations
+ * @cap_link_lanes_supported: indicates if the driver supports lanes
+ * parameter.
+ * @cap_rss_ctx_supported: indicates if the driver supports RSS
+ * contexts.
+ * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor
+ * RSS.
* @supported_coalesce_params: supported types of interrupt coalescing.
- * @get_drvinfo: Report driver/device information. Should only set the
- * @driver, @version, @fw_version and @bus_info fields. If not
- * implemented, the @driver and @bus_info fields will be filled in
- * according to the netdev's parent device.
+ * @supported_ring_params: supported ring params.
+ * @get_drvinfo: Report driver/device information. Modern drivers no
+ * longer have to implement this callback. Most fields are
+ * correctly filled in by the core using system information, or
+ * populated using other driver operations.
* @get_regs_len: Get buffer length required for @get_regs
* @get_regs: Get device registers
* @get_wol: Report whether Wake-on-Lan is enabled
@@ -266,6 +673,10 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* do not attach ext_substate attribute to netlink message). If link_ext_state
* and link_ext_substate are unknown, return -ENODATA. If not implemented,
* link_ext_state and link_ext_substate will not be sent to userspace.
+ * @get_link_ext_stats: Read extra link-related counters.
+ * @get_eeprom_len: Read range of EEPROM addresses for validation of
+ * @get_eeprom and @set_eeprom requests.
+ * Returns 0 if device does not support EEPROM access.
* @get_eeprom: Read data from the device EEPROM.
* Should fill in the magic field. Don't need to check len for zero
* or wraparound. Fill in the data argument with the eeprom values
@@ -282,6 +693,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* Returns a negative error code or zero.
* @get_ringparam: Report ring sizes
* @set_ringparam: Set ring sizes. Returns a negative error code or zero.
+ * @get_pause_stats: Report pause frame statistics. Drivers must not zero
+ * statistics which they don't report. The stats structure is initialized
+ * to ETHTOOL_STAT_NOT_SET indicating driver does not report statistics.
* @get_pauseparam: Report pause parameters
* @set_pauseparam: Set pause parameters. Returns a negative error code
* or zero.
@@ -331,15 +745,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
- * @get_rxfh_context: Get the contents of the RX flow hash indirection table,
- * hash key, and/or hash function assiciated to the given rss context.
- * Returns a negative error code or zero.
- * @set_rxfh_context: Create, remove and configure RSS contexts. Allows setting
- * the contents of the RX flow hash indirection table, hash key, and/or
- * hash function associated to the given context. Arguments which are set
- * to %NULL or zero will remain unchanged.
- * Returns a negative error code or zero. An error code must be returned
- * if at least one unsupported change was requested.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -348,6 +753,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_dump_data: Get dump data.
* @set_dump: Set dump specific flags to the device.
* @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
+ * It may be called with RCU, or rtnl or reference on the device.
* Drivers supporting transmit time stamps in software should set this to
* ethtool_op_get_ts_info().
* @get_module_info: Get the size and type of the eeprom contained within
@@ -355,6 +761,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_module_eeprom: Get the eeprom information from the plug-in module
* @get_eee: Get Energy-Efficient (EEE) supported and status.
* @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_tunable: Read the value of a driver / device tunable.
+ * @set_tunable: Set the value of a driver / device tunable.
* @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
@@ -376,11 +784,34 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
* any change to them will be overwritten by kernel. Returns a negative
* error code or zero.
+ * @get_fec_stats: Report FEC statistics.
+ * Core will sum up per-lane stats to get the total.
+ * Drivers must not zero statistics which they don't report. The stats
+ * structure is initialized to ETHTOOL_STAT_NOT_SET indicating driver does
+ * not report statistics.
* @get_fecparam: Get the network device Forward Error Correction parameters.
* @set_fecparam: Set the network device Forward Error Correction parameters.
* @get_ethtool_phy_stats: Return extended statistics about the PHY device.
* This is only useful if the device maintains PHY statistics and
* cannot use the standard PHY library helpers.
+ * @get_phy_tunable: Read the value of a PHY tunable.
+ * @set_phy_tunable: Set the value of a PHY tunable.
+ * @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from
+ * specified page. Returns a negative error code or the amount of bytes
+ * read.
+ * @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics.
+ * @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics.
+ * @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics.
+ * @get_rmon_stats: Query some of the RMON (RFC 2819) statistics.
+ * Set %ranges to a pointer to zero-terminated array of byte ranges.
+ * @get_module_power_mode: Get the power mode policy for the plug-in module
+ * used by the network device and its operational power mode, if
+ * plugged-in.
+ * @set_module_power_mode: Set the power mode policy for the plug-in module
+ * used by the network device.
+ * @get_mm: Query the 802.3 MAC Merge layer state.
+ * @set_mm: Set the 802.3 MAC Merge layer parameters.
+ * @get_mm_stats: Query the 802.3 MAC Merge layer statistics.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -395,7 +826,11 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* of the generic netdev features interface.
*/
struct ethtool_ops {
+ u32 cap_link_lanes_supported:1;
+ u32 cap_rss_ctx_supported:1;
+ u32 cap_rss_sym_xor_supported:1;
u32 supported_coalesce_params;
+ u32 supported_ring_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -407,17 +842,31 @@ struct ethtool_ops {
u32 (*get_link)(struct net_device *);
int (*get_link_ext_state)(struct net_device *,
struct ethtool_link_ext_state_info *);
+ void (*get_link_ext_stats)(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats);
int (*get_eeprom_len)(struct net_device *);
int (*get_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
int (*set_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
- int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*get_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
+ int (*set_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
void (*get_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
int (*set_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
+ void (*get_pause_stats)(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
void (*get_pauseparam)(struct net_device *,
struct ethtool_pauseparam*);
int (*set_pauseparam)(struct net_device *,
@@ -439,15 +888,9 @@ struct ethtool_ops {
int (*reset)(struct net_device *, u32 *);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
- int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
- u8 *hfunc);
- int (*set_rxfh)(struct net_device *, const u32 *indir,
- const u8 *key, const u8 hfunc);
- int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key,
- u8 *hfunc, u32 rss_context);
- int (*set_rxfh_context)(struct net_device *, const u32 *indir,
- const u8 *key, const u8 hfunc,
- u32 *rss_context, bool delete);
+ int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
+ int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
+ struct netlink_ext_ack *extack);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -459,8 +902,8 @@ struct ethtool_ops {
struct ethtool_modinfo *);
int (*get_module_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_eee)(struct net_device *, struct ethtool_eee *);
- int (*set_eee)(struct net_device *, struct ethtool_eee *);
+ int (*get_eee)(struct net_device *dev, struct ethtool_keee *eee);
+ int (*set_eee)(struct net_device *dev, struct ethtool_keee *eee);
int (*get_tunable)(struct net_device *,
const struct ethtool_tunable *, void *);
int (*set_tunable)(struct net_device *,
@@ -473,12 +916,40 @@ struct ethtool_ops {
struct ethtool_link_ksettings *);
int (*set_link_ksettings)(struct net_device *,
const struct ethtool_link_ksettings *);
+ void (*get_fec_stats)(struct net_device *dev,
+ struct ethtool_fec_stats *fec_stats);
int (*get_fecparam)(struct net_device *,
struct ethtool_fecparam *);
int (*set_fecparam)(struct net_device *,
struct ethtool_fecparam *);
void (*get_ethtool_phy_stats)(struct net_device *,
struct ethtool_stats *, u64 *);
+ int (*get_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, void *);
+ int (*set_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, const void *);
+ int (*get_module_eeprom_by_page)(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+ void (*get_eth_phy_stats)(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+ int (*get_module_power_mode)(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*set_module_power_mode)(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*get_mm)(struct net_device *dev, struct ethtool_mm_state *state);
+ int (*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+ void (*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats);
};
int ethtool_check_ops(const struct ethtool_ops *ops);
@@ -502,17 +973,21 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd,
u32 *dev_speed, u8 *dev_duplex);
-struct netlink_ext_ack;
struct phy_device;
struct phy_tdr_config;
+struct phy_plca_cfg;
+struct phy_plca_status;
/**
* struct ethtool_phy_ops - Optional PHY device options
* @get_sset_count: Get number of strings that @get_strings will write.
* @get_strings: Return a set of strings that describe the requested objects
* @get_stats: Return extended statistics about the PHY device.
- * @start_cable_test - Start a cable test
- * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test
+ * @get_plca_cfg: Return PLCA configuration.
+ * @set_plca_cfg: Set PLCA configuration.
+ * @get_plca_status: Get PLCA configuration.
+ * @start_cable_test: Start a cable test
+ * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
*
* All operations are optional (i.e. the function pointer may be set to %NULL)
* and callers must take this into account. Callers must hold the RTNL lock.
@@ -522,6 +997,13 @@ struct ethtool_phy_ops {
int (*get_strings)(struct phy_device *dev, u8 *data);
int (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
int (*start_cable_test)(struct phy_device *phydev,
struct netlink_ext_ack *extack);
int (*start_cable_test_tdr)(struct phy_device *phydev,
@@ -535,4 +1017,117 @@ struct ethtool_phy_ops {
*/
void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
+/**
+ * ethtool_params_from_link_mode - Derive link parameters from a given link mode
+ * @link_ksettings: Link parameters to be derived from the link mode
+ * @link_mode: Link mode
+ */
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+ enum ethtool_link_mode_bit_indices link_mode);
+
+/**
+ * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller
+ * is responsible to free memory of vclock_index
+ * @dev: pointer to net_device structure
+ * @vclock_index: pointer to pointer of vclock index
+ *
+ * Return number of phc vclocks
+ */
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+
+/* Some generic methods drivers may use in their ethtool_ops */
+u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+
+/**
+ * ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment
+ * size expressed as multiplier into (absolute) minimum fragment size
+ * value expressed in octets
+ * @val_add: Value of addFragSize multiplier
+ */
+static inline u32 ethtool_mm_frag_size_add_to_min(u32 val_add)
+{
+ return (ETH_ZLEN + ETH_FCS_LEN) * (1 + val_add) - ETH_FCS_LEN;
+}
+
+/**
+ * ethtool_mm_frag_size_min_to_add - Translate (absolute) minimum fragment size
+ * expressed in octets into (standard) additional fragment size expressed
+ * as multiplier
+ * @val_min: Value of addFragSize variable in octets
+ * @val_add: Pointer where the standard addFragSize value is to be returned
+ * @extack: Netlink extended ack
+ *
+ * Translate a value in octets to one of 0, 1, 2, 3 according to the reverse
+ * application of the 802.3 formula 64 * (1 + addFragSize) - 4. To be called
+ * by drivers which do not support programming the minimum fragment size to a
+ * continuous range. Returns error on other fragment length values.
+ */
+static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+ struct netlink_ext_ack *extack)
+{
+ u32 add_frag_size;
+
+ for (add_frag_size = 0; add_frag_size < 4; add_frag_size++) {
+ if (ethtool_mm_frag_size_add_to_min(add_frag_size) == val_min) {
+ *val_add = add_frag_size;
+ return 0;
+ }
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "minFragSize required to be one of 60, 124, 188 or 252");
+ return -EINVAL;
+}
+
+/**
+ * ethtool_get_ts_info_by_layer - Obtains time stamping capabilities from the MAC or PHY layer.
+ * @dev: pointer to net_device structure
+ * @info: buffer to hold the result
+ * Returns zero on success, non-zero otherwise.
+ */
+int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info);
+
+/**
+ * ethtool_sprintf - Write formatted string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to update
+ * @fmt: Format of string to write
+ *
+ * Write formatted string to *data. Update *data to point at start of
+ * next string.
+ */
+extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+
+/**
+ * ethtool_puts - Write string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to update
+ * @str: String to write
+ *
+ * Write string to *data without a trailing newline. Update *data
+ * to point at start of next string.
+ *
+ * Prefer this function to ethtool_sprintf() when given only
+ * two arguments or if @fmt is just "%s".
+ */
+extern void ethtool_puts(u8 **data, const char *str);
+
+/* Link mode to forced speed capabilities maps */
+struct ethtool_forced_speed_map {
+ u32 speed;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+ const u32 *cap_arr;
+ u32 arr_size;
+};
+
+#define ETHTOOL_FORCED_SPEED_MAP(prefix, value) \
+{ \
+ .speed = SPEED_##value, \
+ .cap_arr = prefix##_##value, \
+ .arr_size = ARRAY_SIZE(prefix##_##value), \
+}
+
+void
+ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size);
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index 1e7bf78cb382..fae0dfb9a9c8 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -10,6 +10,9 @@
#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
+#define ETHTOOL_PAUSE_STAT_CNT (__ETHTOOL_A_PAUSE_STAT_CNT - \
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES)
+
enum ethtool_multicast_groups {
ETHNL_MCGRP_MONITOR,
};
@@ -26,6 +29,18 @@ int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV);
int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV);
int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last,
u32 step);
+void ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+void ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+void ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
+void ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats);
+bool ethtool_dev_mm_supported(struct net_device *dev);
+
#else
static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
{
@@ -67,5 +82,41 @@ static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
{
return -EOPNOTSUPP;
}
+
+static inline void
+ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats)
+{
+}
+
+static inline bool ethtool_dev_mm_supported(struct net_device *dev)
+{
+ return false;
+}
+
#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index dc4fd8a6644d..e32bee4345fb 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -9,11 +9,12 @@
#ifndef _LINUX_EVENTFD_H
#define _LINUX_EVENTFD_H
-#include <linux/fcntl.h>
#include <linux/wait.h>
#include <linux/err.h>
#include <linux/percpu-defs.h>
#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <uapi/linux/eventfd.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -22,10 +23,6 @@
* from eventfd, in order to leave a free define-space for
* shared O_* flags.
*/
-#define EFD_SEMAPHORE (1 << 0)
-#define EFD_CLOEXEC O_CLOEXEC
-#define EFD_NONBLOCK O_NONBLOCK
-
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
@@ -38,15 +35,14 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
+void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
-DECLARE_PER_CPU(int, eventfd_wake_count);
-
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
{
- return this_cpu_read(eventfd_wake_count);
+ return !current->in_eventfd;
}
#else /* CONFIG_EVENTFD */
@@ -61,9 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
- return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
@@ -77,12 +72,22 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
return -ENOSYS;
}
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
+{
+ return true;
+}
+
+static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
{
- return false;
+
}
#endif
+static inline void eventfd_signal(struct eventfd_ctx *ctx)
+{
+ eventfd_signal_mask(ctx, 0);
+}
+
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 8f000fada5a4..3337745d81bd 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -18,18 +18,10 @@ struct file;
#ifdef CONFIG_EPOLL
-#ifdef CONFIG_CHECKPOINT_RESTORE
+#ifdef CONFIG_KCMP
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
#endif
-/* Used to initialize the epoll bits inside the "struct file" */
-static inline void eventpoll_init_file(struct file *file)
-{
- INIT_LIST_HEAD(&file->f_ep_links);
- INIT_LIST_HEAD(&file->f_tfile_llink);
-}
-
-
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
@@ -50,7 +42,7 @@ static inline void eventpoll_release(struct file *file)
* because the file in on the way to be removed and nobody ( but
* eventpoll ) has still a reference to this file.
*/
- if (likely(list_empty(&file->f_ep_links)))
+ if (likely(!file->f_ep))
return;
/*
@@ -72,9 +64,26 @@ static inline int ep_op_has_event(int op)
#else
-static inline void eventpoll_init_file(struct file *file) {}
static inline void eventpoll_release(struct file *file) {}
#endif
+#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT)
+/* ARM OABI has an incompatible struct layout and needs a special handler */
+extern struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent);
+#else
+static inline struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent)
+{
+ if (__put_user(revents, &uevent->events) ||
+ __put_user(data, &uevent->data))
+ return NULL;
+
+ return uevent+1;
+}
+#endif
+
#endif /* #ifndef _LINUX_EVENTPOLL_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 8302bc29bb35..d48d6da32315 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -12,29 +12,20 @@
#include <linux/integrity.h>
#include <linux/xattr.h>
-struct integrity_iint_cache;
-
#ifdef CONFIG_EVM
extern int evm_set_key(void *key, size_t keylen);
extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
const char *xattr_name,
void *xattr_value,
- size_t xattr_value_len,
- struct integrity_iint_cache *iint);
-extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
-extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
-extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size);
-extern void evm_inode_post_setxattr(struct dentry *dentry,
- const char *xattr_name,
- const void *xattr_value,
- size_t xattr_value_len);
-extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
-extern void evm_inode_post_removexattr(struct dentry *dentry,
- const char *xattr_name);
-extern int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm);
+ size_t xattr_value_len);
+int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count);
+extern bool evm_revalidate_status(const char *xattr_name);
+extern int evm_protected_xattr_if_enabled(const char *req_xattr_name);
+extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt);
#ifdef CONFIG_FS_POSIX_ACL
extern int posix_xattr_acl(const char *xattrname);
#else
@@ -54,54 +45,35 @@ static inline int evm_set_key(void *key, size_t keylen)
static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
const char *xattr_name,
void *xattr_value,
- size_t xattr_value_len,
- struct integrity_iint_cache *iint)
+ size_t xattr_value_len)
{
return INTEGRITY_UNKNOWN;
}
#endif
-static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
-{
- return 0;
-}
-
-static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
-{
- return;
-}
-
-static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size)
+static inline int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ struct xattr *xattrs,
+ int *xattr_count)
{
return 0;
}
-static inline void evm_inode_post_setxattr(struct dentry *dentry,
- const char *xattr_name,
- const void *xattr_value,
- size_t xattr_value_len)
+static inline bool evm_revalidate_status(const char *xattr_name)
{
- return;
+ return false;
}
-static inline int evm_inode_removexattr(struct dentry *dentry,
- const char *xattr_name)
+static inline int evm_protected_xattr_if_enabled(const char *req_xattr_name)
{
- return 0;
+ return false;
}
-static inline void evm_inode_post_removexattr(struct dentry *dentry,
- const char *xattr_name)
+static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt)
{
- return;
-}
-
-static inline int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm)
-{
- return 0;
+ return -EOPNOTSUPP;
}
#endif /* CONFIG_EVM */
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
new file mode 100644
index 000000000000..d445705ac13c
--- /dev/null
+++ b/include/linux/export-internal.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Please do not include this explicitly.
+ * This is used by C files generated by modpost.
+ */
+
+#ifndef __LINUX_EXPORT_INTERNAL_H__
+#define __LINUX_EXPORT_INTERNAL_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#if defined(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)
+/*
+ * relative reference: this reduces the size by half on 64-bit architectures,
+ * and eliminates the need for absolute relocations that require runtime
+ * processing on relocatable kernels.
+ */
+#define __KSYM_ALIGN ".balign 4"
+#define __KSYM_REF(sym) ".long " #sym "- ."
+#elif defined(CONFIG_64BIT)
+#define __KSYM_ALIGN ".balign 8"
+#define __KSYM_REF(sym) ".quad " #sym
+#else
+#define __KSYM_ALIGN ".balign 4"
+#define __KSYM_REF(sym) ".long " #sym
+#endif
+
+/*
+ * For every exported symbol, do the following:
+ *
+ * - Put the name of the symbol and namespace (empty string "" for none) in
+ * __ksymtab_strings.
+ * - Place a struct kernel_symbol entry in the __ksymtab section.
+ *
+ * Note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+#define __KSYMTAB(name, sym, sec, ns) \
+ asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1" "\n" \
+ "__kstrtab_" #name ":" "\n" \
+ " .asciz \"" #name "\"" "\n" \
+ "__kstrtabns_" #name ":" "\n" \
+ " .asciz \"" ns "\"" "\n" \
+ " .previous" "\n" \
+ " .section \"___ksymtab" sec "+" #name "\", \"a\"" "\n" \
+ __KSYM_ALIGN "\n" \
+ "__ksymtab_" #name ":" "\n" \
+ __KSYM_REF(sym) "\n" \
+ __KSYM_REF(__kstrtab_ ##name) "\n" \
+ __KSYM_REF(__kstrtabns_ ##name) "\n" \
+ " .previous" "\n" \
+ )
+
+#if defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
+#else
+#define KSYM_FUNC(name) name
+#endif
+
+#define KSYMTAB_FUNC(name, sec, ns) __KSYMTAB(name, KSYM_FUNC(name), sec, ns)
+#define KSYMTAB_DATA(name, sec, ns) __KSYMTAB(name, name, sec, ns)
+
+#define SYMBOL_CRC(sym, crc, sec) \
+ asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
+ ".balign 4" "\n" \
+ "__crc_" #sym ":" "\n" \
+ ".long " #crc "\n" \
+ ".previous" "\n")
+
+#endif /* __LINUX_EXPORT_INTERNAL_H__ */
diff --git a/include/linux/export.h b/include/linux/export.h
index fceb5e855717..0bbd02fd351d 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -2,173 +2,72 @@
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
-/*
- * Export symbols from the kernel to modules. Forked from module.h
- * to reduce the amount of pointless cruft we feed to gcc when only
- * exporting a simple symbol or two.
- *
- * Try not to add #includes here. It slows compilation and makes kernel
- * hackers place grumpy comments in header files.
- */
-
-#ifndef __ASSEMBLY__
-#ifdef MODULE
-extern struct module __this_module;
-#define THIS_MODULE (&__this_module)
-#else
-#define THIS_MODULE ((struct module *)0)
-#endif
-
-#ifdef CONFIG_MODVERSIONS
-/* Mark the CRC weak since genksyms apparently decides not to
- * generate a checksums for some symbols */
-#if defined(CONFIG_MODULE_REL_CRCS)
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak __crc_" #sym " \n" \
- " .long __crc_" #sym " - . \n" \
- " .previous \n")
-#else
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak __crc_" #sym " \n" \
- " .long __crc_" #sym " \n" \
- " .previous \n")
-#endif
-#else
-#define __CRC_SYMBOL(sym, sec)
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
#include <linux/compiler.h>
-/*
- * Emit the ksymtab entry as a pair of relative references: this reduces
- * the size by half on 64-bit architectures, and eliminates the need for
- * absolute relocations that require runtime processing on relocatable
- * kernels.
- */
-#define __KSYMTAB_ENTRY(sym, sec) \
- __ADDRESSABLE(sym) \
- asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
- " .balign 4 \n" \
- "__ksymtab_" #sym ": \n" \
- " .long " #sym "- . \n" \
- " .long __kstrtab_" #sym "- . \n" \
- " .long __kstrtabns_" #sym "- . \n" \
- " .previous \n")
-
-struct kernel_symbol {
- int value_offset;
- int name_offset;
- int namespace_offset;
-};
-#else
-#define __KSYMTAB_ENTRY(sym, sec) \
- static const struct kernel_symbol __ksymtab_##sym \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- __aligned(sizeof(void *)) \
- = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
-
-struct kernel_symbol {
- unsigned long value;
- const char *name;
- const char *namespace;
-};
-#endif
-
-#ifdef __GENKSYMS__
-
-#define ___EXPORT_SYMBOL(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-
-#else
+#include <linux/linkage.h>
+#include <linux/stringify.h>
/*
- * For every exported symbol, do the following:
- *
- * - If applicable, place a CRC entry in the __kcrctab section.
- * - Put the name of the symbol and namespace (empty string "" for none) in
- * __ksymtab_strings.
- * - Place a struct kernel_symbol entry in the __ksymtab section.
+ * This comment block is used by fixdep. Please do not remove.
*
- * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
- * section flag requires it. Use '%progbits' instead of '@progbits' since the
- * former apparently works on all arches according to the binutils source.
+ * When CONFIG_MODVERSIONS is changed from n to y, all source files having
+ * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
+ * side effect of the *.o build rule.
*/
-#define ___EXPORT_SYMBOL(sym, sec, ns) \
- extern typeof(sym) sym; \
- extern const char __kstrtab_##sym[]; \
- extern const char __kstrtabns_##sym[]; \
- __CRC_SYMBOL(sym, sec); \
- asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \
- "__kstrtab_" #sym ": \n" \
- " .asciz \"" #sym "\" \n" \
- "__kstrtabns_" #sym ": \n" \
- " .asciz \"" ns "\" \n" \
- " .previous \n"); \
- __KSYMTAB_ENTRY(sym, sec)
+#ifdef CONFIG_64BIT
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 8 ASM_NL \
+ .quad sym
+#else
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 4 ASM_NL \
+ .long sym
#endif
-#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS)
+#define ___EXPORT_SYMBOL(sym, license, ns) \
+ .section ".export_symbol","a" ASM_NL \
+ __export_symbol_##sym: ASM_NL \
+ .asciz license ASM_NL \
+ .asciz ns ASM_NL \
+ __EXPORT_SYMBOL_REF(sym) ASM_NL \
+ .previous
+
+#if defined(__DISABLE_EXPORTS)
/*
* Allow symbol exports to be disabled completely so that C code may
* be reused in other execution contexts such as the UEFI stub or the
* decompressor.
*/
-#define __EXPORT_SYMBOL(sym, sec, ns)
+#define __EXPORT_SYMBOL(sym, license, ns)
-#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+#elif defined(__GENKSYMS__)
-#include <generated/autoksyms.h>
+#define __EXPORT_SYMBOL(sym, license, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-/*
- * For fine grained build dependencies, we want to tell the build system
- * about each possible exported symbol even if they're not actually exported.
- * We use a symbol pattern __ksym_marker_<symbol> that the build system filters
- * from the $(NM) output (see scripts/gen_ksymdeps.sh). These symbols are
- * discarded in the final link stage.
- */
-#define __ksym_marker(sym) \
- static int __ksym_marker_##sym[0] __section(".discard.ksym") __used
-
-#define __EXPORT_SYMBOL(sym, sec, ns) \
- __ksym_marker(sym); \
- __cond_export_sym(sym, sec, ns, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, sec, ns, conf) \
- ___cond_export_sym(sym, sec, ns, conf)
-#define ___cond_export_sym(sym, sec, ns, enabled) \
- __cond_export_sym_##enabled(sym, sec, ns)
-#define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
-#define __cond_export_sym_0(sym, sec, ns) /* nothing */
+#elif defined(__ASSEMBLY__)
+
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ ___EXPORT_SYMBOL(sym, license, ns)
#else
-#define __EXPORT_SYMBOL(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ extern typeof(sym) sym; \
+ __ADDRESSABLE(sym) \
+ asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
-#endif /* CONFIG_MODULES */
+#endif
#ifdef DEFAULT_SYMBOL_NAMESPACE
-#include <linux/stringify.h>
-#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE))
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE))
#else
-#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, "")
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, "")
#endif
#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
-#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl")
-#define EXPORT_SYMBOL_GPL_FUTURE(sym) _EXPORT_SYMBOL(sym, "_gpl_future")
-#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", #ns)
-#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", #ns)
-
-#ifdef CONFIG_UNUSED_SYMBOLS
-#define EXPORT_UNUSED_SYMBOL(sym) _EXPORT_SYMBOL(sym, "_unused")
-#define EXPORT_UNUSED_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_unused_gpl")
-#else
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
-#endif
-
-#endif /* !__ASSEMBLY__ */
+#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "GPL")
+#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns))
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns))
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 3ceb72b67a7a..bb37ad5cc954 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -99,12 +99,29 @@ enum fid_type {
FILEID_FAT_WITH_PARENT = 0x72,
/*
+ * 64 bit inode number, 32 bit generation number.
+ */
+ FILEID_INO64_GEN = 0x81,
+
+ /*
+ * 64 bit inode number, 32 bit generation number,
+ * 64 bit parent inode number, 32 bit parent generation.
+ */
+ FILEID_INO64_GEN_PARENT = 0x82,
+
+ /*
* 128 bit child FID (struct lu_fid)
* 128 bit parent FID (struct lu_fid)
*/
FILEID_LUSTRE = 0x97,
/*
+ * 64 bit inode number, 32 bit subvolume, 32 bit generation number:
+ */
+ FILEID_BCACHEFS_WITHOUT_PARENT = 0xb1,
+ FILEID_BCACHEFS_WITH_PARENT = 0xb2,
+
+ /*
* 64 bit unique kernfs id
*/
FILEID_KERNFS = 0xfe,
@@ -123,7 +140,11 @@ struct fid {
u32 parent_ino;
u32 parent_gen;
} i32;
- struct {
+ struct {
+ u64 ino;
+ u32 gen;
+ } __packed i64;
+ struct {
u32 block;
u16 partref;
u16 parent_partref;
@@ -131,10 +152,13 @@ struct fid {
u32 parent_block;
u32 parent_generation;
} udf;
- __u32 raw[0];
+ DECLARE_FLEX_ARRAY(__u32, raw);
};
};
+#define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */
+#define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */
+
/**
* struct export_operations - for nfsd to communicate with file systems
* @encode_fh: encode a file handle fragment from a dentry
@@ -150,7 +174,7 @@ struct fid {
* encode_fh:
* @encode_fh should store in the file handle fragment @fh (using at most
* @max_len bytes) information that can be used by @decode_fh to recover the
- * file referred to by the &struct dentry @de. If the @connectable flag is
+ * file referred to by the &struct dentry @de. If @flag has CONNECTABLE bit
* set, the encode_fh() should store sufficient information so that a good
* attempt can be made to find not only the file but also it's place in the
* filesystem. This typically means storing a reference to de->d_parent in
@@ -213,12 +237,76 @@ struct export_operations {
bool write, u32 *device_generation);
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
+#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
+#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
+#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
+#define EXPORT_OP_REMOTE_FS (0x8) /* Filesystem is remote */
+#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
+ atomic attribute updates
+ */
+#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
+#define EXPORT_OP_ASYNC_LOCK (0x40) /* fs can do async lock request */
+ unsigned long flags;
};
+/**
+ * exportfs_lock_op_is_async() - export op supports async lock operation
+ * @export_ops: the nfs export operations to check
+ *
+ * Returns true if the nfs export_operations structure has
+ * EXPORT_OP_ASYNC_LOCK in their flags set
+ */
+static inline bool
+exportfs_lock_op_is_async(const struct export_operations *export_ops)
+{
+ return export_ops->flags & EXPORT_OP_ASYNC_LOCK;
+}
+
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
- int *max_len, struct inode *parent);
+ int *max_len, struct inode *parent,
+ int flags);
extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
- int *max_len, int connectable);
+ int *max_len, int flags);
+
+static inline bool exportfs_can_encode_fid(const struct export_operations *nop)
+{
+ return !nop || nop->encode_fh;
+}
+
+static inline bool exportfs_can_decode_fh(const struct export_operations *nop)
+{
+ return nop && nop->fh_to_dentry;
+}
+
+static inline bool exportfs_can_encode_fh(const struct export_operations *nop,
+ int fh_flags)
+{
+ /*
+ * If a non-decodeable file handle was requested, we only need to make
+ * sure that filesystem did not opt-out of encoding fid.
+ */
+ if (fh_flags & EXPORT_FH_FID)
+ return exportfs_can_encode_fid(nop);
+
+ /*
+ * If a decodeable file handle was requested, we need to make sure that
+ * filesystem can also decode file handles.
+ */
+ return exportfs_can_decode_fh(nop);
+}
+
+static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid,
+ int *max_len)
+{
+ return exportfs_encode_inode_fh(inode, fid, max_len, NULL,
+ EXPORT_FH_FID);
+}
+
+extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt,
+ struct fid *fid, int fh_len,
+ int fileid_type,
+ int (*acceptable)(void *, struct dentry *),
+ void *context);
extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
void *context);
@@ -226,10 +314,12 @@ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
/*
* Generic helpers for filesystems.
*/
-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent);
+struct dentry *generic_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+struct dentry *generic_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index fd183fb9c20f..e596a0abcb27 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -76,6 +76,8 @@
#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
#define EXTCON_DISP_DP 44 /* Display Port */
#define EXTCON_DISP_HMD 45 /* Head-Mounted Display */
+#define EXTCON_DISP_CVBS 46 /* Composite Video Broadcast Signal */
+#define EXTCON_DISP_EDP 47 /* Embedded Display Port */
/* Miscellaneous external connector */
#define EXTCON_DOCK 60
@@ -271,9 +273,32 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb) { }
+
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
- return ERR_PTR(-ENODEV);
+ return NULL;
}
static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
@@ -303,16 +328,4 @@ struct extcon_specific_cable_nb {
struct extcon_dev *edev;
unsigned long previous_value;
};
-
-static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
-{
- return -EINVAL;
-}
-
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
-{
- return -EINVAL;
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 3c383ddd92dd..a357287eac1e 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -13,10 +13,10 @@
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
-#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
-#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
-#define F2FS_BLKSIZE 4096 /* support only 4KB block */
-#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
+#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */
+#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
+#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
+#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
@@ -27,6 +27,7 @@
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -34,20 +35,11 @@
#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
+#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid)
#define F2FS_MAX_QUOTAS 3
#define F2FS_ENC_UTF8_12_1 1
-#define F2FS_ENC_STRICT_MODE_FL (1 << 0)
-#define f2fs_has_strict_mode(sbi) \
- (sbi->s_encoding_flags & F2FS_ENC_STRICT_MODE_FL)
-
-#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
-#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */
-#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */
-#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
-#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
-#define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1)
/* This flag is used by node and meta inodes, and by recovery */
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
@@ -75,6 +67,45 @@ struct f2fs_device {
__le32 total_segments;
} __packed;
+/* reason of stop_checkpoint */
+enum stop_cp_reason {
+ STOP_CP_REASON_SHUTDOWN,
+ STOP_CP_REASON_FAULT_INJECT,
+ STOP_CP_REASON_META_PAGE,
+ STOP_CP_REASON_WRITE_FAIL,
+ STOP_CP_REASON_CORRUPTED_SUMMARY,
+ STOP_CP_REASON_UPDATE_INODE,
+ STOP_CP_REASON_FLUSH_FAIL,
+ STOP_CP_REASON_NO_SEGMENT,
+ STOP_CP_REASON_MAX,
+};
+
+#define MAX_STOP_REASON 32
+
+/* detail reason for EFSCORRUPTED */
+enum f2fs_error {
+ ERROR_CORRUPTED_CLUSTER,
+ ERROR_FAIL_DECOMPRESSION,
+ ERROR_INVALID_BLKADDR,
+ ERROR_CORRUPTED_DIRENT,
+ ERROR_CORRUPTED_INODE,
+ ERROR_INCONSISTENT_SUMMARY,
+ ERROR_INCONSISTENT_FOOTER,
+ ERROR_INCONSISTENT_SUM_TYPE,
+ ERROR_CORRUPTED_JOURNAL,
+ ERROR_INCONSISTENT_NODE_COUNT,
+ ERROR_INCONSISTENT_BLOCK_COUNT,
+ ERROR_INVALID_CURSEG,
+ ERROR_INCONSISTENT_SIT,
+ ERROR_CORRUPTED_VERITY_XATTR,
+ ERROR_CORRUPTED_XATTR,
+ ERROR_INVALID_NODE_REFERENCE,
+ ERROR_INCONSISTENT_NAT,
+ ERROR_MAX,
+};
+
+#define MAX_F2FS_ERRORS 16
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -118,7 +149,9 @@ struct f2fs_super_block {
__u8 hot_ext_count; /* # of hot file extension */
__le16 s_encoding; /* Filename charset encoding */
__le16 s_encoding_flags; /* Filename charset encoding flags */
- __u8 reserved[306]; /* valid reserved region */
+ __u8 s_stop_reason[MAX_STOP_REASON]; /* stop checkpoint reason */
+ __u8 s_errors[MAX_F2FS_ERRORS]; /* reason of image corrupts */
+ __u8 reserved[258]; /* valid reserved region */
__le32 crc; /* checksum of superblock */
} __packed;
@@ -171,17 +204,17 @@ struct f2fs_checkpoint {
unsigned char alloc_type[MAX_ACTIVE_LOGS];
/* SIT and NAT version bitmap */
- unsigned char sit_nat_version_bitmap[1];
+ unsigned char sit_nat_version_bitmap[];
} __packed;
-#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
+#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */
#define CP_MIN_CHKSUM_OFFSET \
(offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
/*
* For orphan inode management
*/
-#define F2FS_ORPHANS_PER_BLOCK 1020
+#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32))
#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
@@ -207,14 +240,31 @@ struct f2fs_extent {
#define F2FS_NAME_LEN 255
/* 200 bytes for inline xattrs by default */
#define DEFAULT_INLINE_XATTR_ADDRS 50
-#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+
+#define OFFSET_OF_END_OF_I_EXT 360
+#define SIZE_OF_I_NID 20
+
+struct node_footer {
+ __le32 nid; /* node id */
+ __le32 ino; /* inode number */
+ __le32 flag; /* include cold/fsync/dentry marks and offset */
+ __le64 cp_ver; /* checkpoint version */
+ __le32 next_blkaddr; /* next node page block address */
+} __packed;
+
+/* Address Pointers in an Inode */
+#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \
+ - SIZE_OF_I_NID \
+ - sizeof(struct node_footer)) / sizeof(__le32))
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
-#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
-#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+/* Node IDs in an Indirect Block */
+#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_PAGE(page, inode) \
(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
@@ -232,6 +282,7 @@ struct f2fs_extent {
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
+#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -276,7 +327,10 @@ struct f2fs_inode {
__le64 i_compr_blocks; /* # of compressed blocks */
__u8 i_compress_algorithm; /* compress algorithm */
__u8 i_log_cluster_size; /* log of cluster size */
- __le16 i_padding; /* padding */
+ __le16 i_compress_flag; /* compress flag */
+ /* 0 bit: chksum flag
+ * [8,15] bits: compress level
+ */
__le32 i_extra_end[0]; /* for attribute size calculation */
} __packed;
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
@@ -300,15 +354,7 @@ enum {
OFFSET_BIT_SHIFT
};
-#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
-
-struct node_footer {
- __le32 nid; /* node id */
- __le32 ino; /* inode number */
- __le32 flag; /* include cold/fsync/dentry marks and offset */
- __le64 cp_ver; /* checkpoint version */
- __le32 next_blkaddr; /* next node page block address */
-} __packed;
+#define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0)
struct f2fs_node {
/* can be one of three types: inode, direct, and indirect types */
@@ -323,7 +369,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -338,12 +384,13 @@ struct f2fs_nat_block {
/*
* For SIT entries
*
- * Each segment is 2MB in size by default so that a bitmap for validity of
- * there-in blocks should occupy 64 bytes, 512 bits.
+ * A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size,
+ * this results in a segment size of 2MB. For 16k pages, the default segment size
+ * is 8MB.
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
@@ -378,7 +425,7 @@ struct f2fs_sit_block {
* For segment summary
*
* One summary block contains exactly 512 summary entries, which represents
- * exactly 2MB segment by default. Not allow to change the basic units.
+ * exactly one segment by default. Not allow to change the basic units.
*
* NOTE: For initializing fields, you must use set_summary
*
@@ -389,12 +436,12 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM 512
-#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
+#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
+#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
-/* a summary entry for a 4KB-sized block in a segment */
+/* a summary entry for a block in a segment */
struct f2fs_summary {
__le32 nid; /* parent node id */
union {
@@ -478,7 +525,7 @@ struct f2fs_journal {
};
} __packed;
-/* 4KB-sized summary block structure */
+/* Block-sized summary block structure */
struct f2fs_summary_block {
struct f2fs_summary entries[ENTRIES_IN_SUM];
struct f2fs_journal journal;
@@ -505,7 +552,7 @@ typedef __le32 f2fs_hash_t;
#define MAX_DIR_HASH_DEPTH 63
/* MAX buckets in one level of dir */
-#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+#define MAX_DIR_BUCKETS BIT((MAX_DIR_HASH_DEPTH / 2) - 1)
/*
* space utilization of regular dentry and inline dentry (w/o extra reservation)
@@ -519,11 +566,14 @@ typedef __le32 f2fs_hash_t;
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
*/
-#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
+
+/* the number of dentry in a block */
+#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1))
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
-#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
+#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \
F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
@@ -536,7 +586,7 @@ struct f2fs_dir_entry {
__u8 file_type; /* file type */
} __packed;
-/* 4KB-sized directory entry block */
+/* Block-sized directory entry block */
struct f2fs_dentry_block {
/* validity bitmap for directory entries in each block */
__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
@@ -545,21 +595,6 @@ struct f2fs_dentry_block {
__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
} __packed;
-/* file types used in inode_info->flags */
-enum {
- F2FS_FT_UNKNOWN,
- F2FS_FT_REG_FILE,
- F2FS_FT_DIR,
- F2FS_FT_CHRDEV,
- F2FS_FT_BLKDEV,
- F2FS_FT_FIFO,
- F2FS_FT_SOCK,
- F2FS_FT_SYMLINK,
- F2FS_FT_MAX
-};
-
-#define S_SHIFT 12
-
#define F2FS_DEF_PROJID 0 /* default project ID */
#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 3e9c56ee651f..4f1c4f603118 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_FANOTIFY_H
#define _LINUX_FANOTIFY_H
+#include <linux/sysctl.h>
#include <uapi/linux/fanotify.h>
#define FAN_GROUP_FLAG(group, flag) \
@@ -15,27 +16,62 @@
* these constant, the programs may break if re-compiled with new uapi headers
* and then run on an old kernel.
*/
-#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \
+
+/* Group classes where permission events are allowed */
+#define FANOTIFY_PERM_CLASSES (FAN_CLASS_CONTENT | \
FAN_CLASS_PRE_CONTENT)
-#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME)
+#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FANOTIFY_PERM_CLASSES)
+
+#define FANOTIFY_FID_BITS (FAN_REPORT_DFID_NAME_TARGET)
+
+#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
+
+/*
+ * fanotify_init() flags that require CAP_SYS_ADMIN.
+ * We do not allow unprivileged groups to request permission events.
+ * We do not allow unprivileged groups to get other process pid in events.
+ * We do not allow unprivileged groups to use unlimited resources.
+ */
+#define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \
+ FAN_REPORT_TID | \
+ FAN_REPORT_PIDFD | \
+ FAN_UNLIMITED_QUEUE | \
+ FAN_UNLIMITED_MARKS)
+
+/*
+ * fanotify_init() flags that are allowed for user without CAP_SYS_ADMIN.
+ * FAN_CLASS_NOTIF is the only class we allow for unprivileged group.
+ * We do not allow unprivileged groups to get file descriptors in events,
+ * so one of the flags for reporting file handles is required.
+ */
+#define FANOTIFY_USER_INIT_FLAGS (FAN_CLASS_NOTIF | \
+ FANOTIFY_FID_BITS | \
+ FAN_CLOEXEC | FAN_NONBLOCK)
+
+#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \
+ FANOTIFY_USER_INIT_FLAGS)
-#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | FANOTIFY_FID_BITS | \
- FAN_REPORT_TID | \
- FAN_CLOEXEC | FAN_NONBLOCK | \
- FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS)
+/* Internal group flags */
+#define FANOTIFY_UNPRIV 0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV)
#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
FAN_MARK_FILESYSTEM)
+#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \
+ FAN_MARK_FLUSH)
+
+#define FANOTIFY_MARK_IGNORE_BITS (FAN_MARK_IGNORED_MASK | \
+ FAN_MARK_IGNORE)
+
#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \
- FAN_MARK_ADD | \
- FAN_MARK_REMOVE | \
+ FANOTIFY_MARK_CMD_BITS | \
+ FANOTIFY_MARK_IGNORE_BITS | \
FAN_MARK_DONT_FOLLOW | \
FAN_MARK_ONLYDIR | \
- FAN_MARK_IGNORED_MASK | \
FAN_MARK_IGNORED_SURV_MODIFY | \
- FAN_MARK_FLUSH)
+ FAN_MARK_EVICTABLE)
/*
* Events that can be reported with data type FSNOTIFY_EVENT_PATH.
@@ -49,15 +85,23 @@
* Directory entry modification events - reported only to directory
* where entry is modified and not to a watching parent.
*/
-#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE)
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
+ FAN_RENAME)
+
+/* Events that can be reported with event->fd */
+#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS)
/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF)
+/* Events that can only be reported with data type FSNOTIFY_EVENT_ERROR */
+#define FANOTIFY_ERROR_EVENTS (FAN_FS_ERROR)
+
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \
- FANOTIFY_INODE_EVENTS)
+ FANOTIFY_INODE_EVENTS | \
+ FANOTIFY_ERROR_EVENTS)
/* Events that require a permission response from user */
#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
@@ -71,9 +115,18 @@
FANOTIFY_PERM_EVENTS | \
FAN_Q_OVERFLOW | FAN_ONDIR)
+/* Events and flags relevant only for directories */
+#define FANOTIFY_DIRONLY_EVENT_BITS (FANOTIFY_DIRENT_EVENTS | \
+ FAN_EVENT_ON_CHILD | FAN_ONDIR)
+
#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
FANOTIFY_EVENT_FLAGS)
+/* These masks check for invalid bits in permission responses. */
+#define FANOTIFY_RESPONSE_ACCESS (FAN_ALLOW | FAN_DENY)
+#define FANOTIFY_RESPONSE_FLAGS (FAN_AUDIT | FAN_INFO)
+#define FANOTIFY_RESPONSE_VALID_MASK (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS)
+
/* Do not use these old uapi constants internally */
#undef FAN_ALL_CLASS_BITS
#undef FAN_ALL_INIT_FLAGS
diff --git a/include/linux/fault-inject-usercopy.h b/include/linux/fault-inject-usercopy.h
new file mode 100644
index 000000000000..56c3a693fdd9
--- /dev/null
+++ b/include/linux/fault-inject-usercopy.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FAULT_INJECT_USERCOPY_H__
+#define __LINUX_FAULT_INJECT_USERCOPY_H__
+
+/*
+ * This header provides a wrapper for injecting failures to user space memory
+ * access functions.
+ */
+
+#include <linux/types.h>
+
+#ifdef CONFIG_FAULT_INJECTION_USERCOPY
+
+bool should_fail_usercopy(void);
+
+#else
+
+static inline bool should_fail_usercopy(void) { return false; }
+
+#endif /* CONFIG_FAULT_INJECTION_USERCOPY */
+
+#endif /* __LINUX_FAULT_INJECT_USERCOPY_H__ */
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index e525f6957c49..6d5edef09d45 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/debugfs.h>
+#include <linux/configfs.h>
#include <linux/ratelimit.h>
#include <linux/atomic.h>
@@ -31,6 +32,10 @@ struct fault_attr {
struct dentry *dname;
};
+enum fault_flags {
+ FAULT_NOWARN = 1 << 0,
+};
+
#define FAULT_ATTR_INITIALIZER { \
.interval = 1, \
.times = ATOMIC_INIT(1), \
@@ -43,6 +48,7 @@ struct fault_attr {
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
int setup_fault_attr(struct fault_attr *attr, char *str);
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
@@ -60,10 +66,42 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+#ifdef CONFIG_FAULT_INJECTION_CONFIGFS
+
+struct fault_config {
+ struct fault_attr attr;
+ struct config_group group;
+};
+
+void fault_config_init(struct fault_config *config, const char *name);
+
+#else /* CONFIG_FAULT_INJECTION_CONFIGFS */
+
+struct fault_config {
+};
+
+static inline void fault_config_init(struct fault_config *config,
+ const char *name)
+{
+}
+
+#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
+
#endif /* CONFIG_FAULT_INJECTION */
struct kmem_cache;
+bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
+
+#ifdef CONFIG_FAIL_PAGE_ALLOC
+bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
+#else
+static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+{
+ return false;
+}
+#endif /* CONFIG_FAIL_PAGE_ALLOC */
+
int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#ifdef CONFIG_FAILSLAB
extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 850f79e9a7cb..708e6a177b1b 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -2,26 +2,30 @@
#ifndef _LINUX_FB_H
#define _LINUX_FB_H
-#include <linux/kgdb.h>
#include <uapi/linux/fb.h>
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user)
-#include <linux/fs.h>
-#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/notifier.h>
-#include <linux/list.h>
-#include <linux/backlight.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-struct vm_area_struct;
-struct fb_info;
+#include <asm/fb.h>
+
+struct backlight_device;
struct device;
+struct device_node;
+struct fb_info;
struct file;
+struct i2c_adapter;
+struct inode;
+struct module;
+struct notifier_block;
+struct page;
struct videomode;
-struct device_node;
+struct vm_area_struct;
/* Definitions below are used in the parsed monitor specs */
#define FB_DPMS_ACTIVE_OFF 1
@@ -124,7 +128,7 @@ struct fb_cursor_user {
* Register/unregister for framebuffer events
*/
-/* The resolution of the passed in fb_info about to change */
+/* The resolution of the passed in fb_info about to change */
#define FB_EVENT_MODE_CHANGE 0x01
#ifdef CONFIG_GUMSTIX_AM200EPD
@@ -200,13 +204,21 @@ struct fb_pixmap {
};
#ifdef CONFIG_FB_DEFERRED_IO
+struct fb_deferred_io_pageref {
+ struct page *page;
+ unsigned long offset;
+ /* private */
+ struct list_head list;
+};
+
struct fb_deferred_io {
/* delay between mkwrite and deferred handler */
unsigned long delay;
- struct mutex lock; /* mutex that protects the page list */
- struct list_head pagelist; /* list of touched pages */
+ bool sort_pagereflist; /* sort pagelist by offset */
+ int open_count; /* number of opened files; protected by fb_info lock */
+ struct mutex lock; /* mutex that protects the pageref list */
+ struct list_head pagereflist; /* list of pagerefs for touched pages */
/* callback */
- void (*first_io)(struct fb_info *info);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
#endif
@@ -373,7 +385,6 @@ struct fb_tile_ops {
#endif /* CONFIG_FB_TILEBLITTING */
/* FBINFO_* = fb_info.flags bit flags */
-#define FBINFO_DEFAULT 0
#define FBINFO_HWACCEL_DISABLED 0x0002
/* When FBINFO_HWACCEL_DISABLED is set:
* Hardware acceleration is turned off. Software implementations
@@ -414,8 +425,6 @@ struct fb_tile_ops {
*/
#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
-/* where the fb is a firmware driver, and can be replaced with a proper one */
-#define FBINFO_MISC_FIRMWARE 0x80000
/*
* Host and GPU endianness differ.
*/
@@ -435,7 +444,7 @@ struct fb_tile_ops {
struct fb_info {
- atomic_t count;
+ refcount_t count;
int node;
int flags;
/*
@@ -448,7 +457,6 @@ struct fb_info {
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
- struct work_struct queue; /* Framebuffer event queue */
struct fb_pixmap pixmap; /* Image hardware mapper */
struct fb_pixmap sprite; /* Cursor hardware mapper */
struct fb_cmap cmap; /* Current cmap */
@@ -457,22 +465,26 @@ struct fb_info {
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
/* assigned backlight device */
- /* set before framebuffer registration,
+ /* set before framebuffer registration,
remove after unregister */
struct backlight_device *bl_dev;
/* Backlight level curve */
- struct mutex bl_curve_mutex;
+ struct mutex bl_curve_mutex;
u8 bl_curve[FB_BACKLIGHT_LEVELS];
#endif
#ifdef CONFIG_FB_DEFERRED_IO
struct delayed_work deferred_work;
+ unsigned long npagerefs;
+ struct fb_deferred_io_pageref *pagerefs;
struct fb_deferred_io *fbdefio;
#endif
const struct fb_ops *fbops;
struct device *device; /* This is the parent */
+#if defined(CONFIG_FB_DEVICE)
struct device *dev; /* This is this fb device */
+#endif
int class_flag; /* private sysfs flags */
#ifdef CONFIG_FB_TILEBLITTING
struct fb_tile_ops *tileops; /* Tile Blitting */
@@ -481,40 +493,18 @@ struct fb_info {
char __iomem *screen_base; /* Virtual address */
char *screen_buffer;
};
- unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
- void *pseudo_palette; /* Fake palette of 16 colors */
+ unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
+ void *pseudo_palette; /* Fake palette of 16 colors */
#define FBINFO_STATE_RUNNING 0
#define FBINFO_STATE_SUSPENDED 1
u32 state; /* Hardware state i.e suspend */
void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
- /* we need the PCI or similar aperture base/size not
- smem_start/size as smem_start may just be an object
- allocated inside the aperture so may not actually overlap */
- struct apertures_struct {
- unsigned int count;
- struct aperture {
- resource_size_t base;
- resource_size_t size;
- } ranges[0];
- } *apertures;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
};
-static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
- struct apertures_struct *a;
-
- a = kzalloc(struct_size(a, ranges, max_num), GFP_KERNEL);
- if (!a)
- return NULL;
- a->count = max_num;
- return a;
-}
-
-#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
-
/* This will go away
* fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags
* when it wants to turn the acceleration engine on. This is
@@ -523,58 +513,6 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
*/
#define STUPID_ACCELF_TEXT_SHIT
-// This will go away
-#if defined(__sparc__)
-
-/* We map all of our framebuffers such that big-endian accesses
- * are what we want, so the following is sufficient.
- */
-
-// This will go away
-#define fb_readb sbus_readb
-#define fb_readw sbus_readw
-#define fb_readl sbus_readl
-#define fb_readq sbus_readq
-#define fb_writeb sbus_writeb
-#define fb_writew sbus_writew
-#define fb_writel sbus_writel
-#define fb_writeq sbus_writeq
-#define fb_memset sbus_memset_io
-#define fb_memcpy_fromfb sbus_memcpy_fromio
-#define fb_memcpy_tofb sbus_memcpy_toio
-
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \
- defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \
- defined(__arm__) || defined(__aarch64__)
-
-#define fb_readb __raw_readb
-#define fb_readw __raw_readw
-#define fb_readl __raw_readl
-#define fb_readq __raw_readq
-#define fb_writeb __raw_writeb
-#define fb_writew __raw_writew
-#define fb_writel __raw_writel
-#define fb_writeq __raw_writeq
-#define fb_memset memset_io
-#define fb_memcpy_fromfb memcpy_fromio
-#define fb_memcpy_tofb memcpy_toio
-
-#else
-
-#define fb_readb(addr) (*(volatile u8 *) (addr))
-#define fb_readw(addr) (*(volatile u16 *) (addr))
-#define fb_readl(addr) (*(volatile u32 *) (addr))
-#define fb_readq(addr) (*(volatile u64 *) (addr))
-#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b))
-#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b))
-#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b))
-#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b))
-#define fb_memset memset
-#define fb_memcpy_fromfb memcpy
-#define fb_memcpy_tofb memcpy
-
-#endif
-
#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0)
#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \
(val) << (bits))
@@ -585,15 +523,44 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
* `Generic' versions of the frame buffer device operations
*/
-extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
-extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_blank(struct fb_info *info, int blank);
-extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+
+/*
+ * Helpers for framebuffers in I/O memory
+ */
+
+extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
+extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
+
+#define __FB_DEFAULT_IOMEM_OPS_RDWR \
+ .fb_read = fb_io_read, \
+ .fb_write = fb_io_write
+
+#define __FB_DEFAULT_IOMEM_OPS_DRAW \
+ .fb_fillrect = cfb_fillrect, \
+ .fb_copyarea = cfb_copyarea, \
+ .fb_imageblit = cfb_imageblit
+
+#define __FB_DEFAULT_IOMEM_OPS_MMAP \
+ .fb_mmap = fb_io_mmap
+
+#define FB_DEFAULT_IOMEM_OPS \
+ __FB_DEFAULT_IOMEM_OPS_RDWR, \
+ __FB_DEFAULT_IOMEM_OPS_DRAW, \
+ __FB_DEFAULT_IOMEM_OPS_MMAP
+
/*
- * Drawing operations where framebuffer is in system RAM
+ * Helpers for framebuffers in system memory
*/
+
extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void sys_imageblit(struct fb_info *info, const struct fb_image *image);
@@ -602,15 +569,31 @@ extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
-/* drivers/video/fbmem.c */
+#define __FB_DEFAULT_SYSMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_SYSMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
+/*
+ * Helpers for framebuffers in DMA-able memory
+ */
+
+#define __FB_DEFAULT_DMAMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_DMAMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
+/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
-extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const char *name);
-extern int remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary);
-extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
-extern int fb_show_logo(struct fb_info *fb_info, int rotate);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
u32 height, u32 shift_high, u32 shift_low, u32 mod);
@@ -621,16 +604,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern struct fb_info *registered_fb[FB_MAX];
-extern int num_registered_fb;
-extern bool fb_center_logo;
-extern int fb_logo_count;
-extern struct class *fb_class;
-
-#define for_each_registered_fb(i) \
- for (i = 0; i < FB_MAX; i++) \
- if (!registered_fb[i]) {} else
-
static inline void lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
@@ -656,16 +629,86 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
}
}
-/* drivers/video/fb_defio.c */
+/* fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
-extern void fb_deferred_io_init(struct fb_info *info);
+extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file);
+extern void fb_deferred_io_release(struct fb_info *info);
extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);
+/*
+ * Generate callbacks for deferred I/O
+ */
+
+#define __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, __mode) \
+ static ssize_t __prefix ## _defio_read(struct fb_info *info, char __user *buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ return fb_ ## __mode ## _read(info, buf, count, ppos); \
+ } \
+ static ssize_t __prefix ## _defio_write(struct fb_info *info, const char __user *buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ unsigned long offset = *ppos; \
+ ssize_t ret = fb_ ## __mode ## _write(info, buf, count, ppos); \
+ if (ret > 0) \
+ __damage_range(info, offset, ret); \
+ return ret; \
+ }
+
+#define __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, __mode) \
+ static void __prefix ## _defio_fillrect(struct fb_info *info, \
+ const struct fb_fillrect *rect) \
+ { \
+ __mode ## _fillrect(info, rect); \
+ __damage_area(info, rect->dx, rect->dy, rect->width, rect->height); \
+ } \
+ static void __prefix ## _defio_copyarea(struct fb_info *info, \
+ const struct fb_copyarea *area) \
+ { \
+ __mode ## _copyarea(info, area); \
+ __damage_area(info, area->dx, area->dy, area->width, area->height); \
+ } \
+ static void __prefix ## _defio_imageblit(struct fb_info *info, \
+ const struct fb_image *image) \
+ { \
+ __mode ## _imageblit(info, image); \
+ __damage_area(info, image->dx, image->dy, image->width, image->height); \
+ }
+
+#define FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, io) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, cfb)
+
+#define FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
+/*
+ * Initializes struct fb_ops for deferred I/O.
+ */
+
+#define __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix) \
+ .fb_read = __prefix ## _defio_read, \
+ .fb_write = __prefix ## _defio_write
+
+#define __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix) \
+ .fb_fillrect = __prefix ## _defio_fillrect, \
+ .fb_copyarea = __prefix ## _defio_copyarea, \
+ .fb_imageblit = __prefix ## _defio_imageblit
+
+#define __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix) \
+ .fb_mmap = fb_deferred_io_mmap
+
+#define FB_DEFAULT_DEFERRED_OPS(__prefix) \
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix), \
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix), \
+ __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix)
+
static inline bool fb_be_math(struct fb_info *info)
{
#ifdef CONFIG_FB_FOREIGN_ENDIAN
@@ -685,14 +728,11 @@ static inline bool fb_be_math(struct fb_info *info)
#endif /* CONFIG_FB_FOREIGN_ENDIAN */
}
-/* drivers/video/fbsysfs.c */
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
-extern int fb_init_device(struct fb_info *fb_info);
-extern void fb_cleanup_device(struct fb_info *head);
extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
-/* drivers/video/fbmon.c */
+/* fbmon.c */
#define FB_MAXTIMINGS 0
#define FB_VSYNCTIMINGS 1
#define FB_HSYNCTIMINGS 2
@@ -726,7 +766,7 @@ extern int of_get_fb_videomode(struct device_node *np,
extern int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode);
-/* drivers/video/modedb.c */
+/* modedb.c */
#define VESA_MODEDB_SIZE 43
#define DMT_SIZE 0x50
@@ -752,7 +792,7 @@ extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num,
extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs,
struct list_head *head);
-/* drivers/video/fbcmap.c */
+/* fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
@@ -787,7 +827,6 @@ struct dmt_videomode {
const struct fb_videomode *mode;
};
-extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct dmt_videomode dmt_modes[];
@@ -803,7 +842,12 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
const struct fb_videomode *default_mode,
unsigned int default_bpp);
-/* Convenience logging macros */
+bool fb_modesetting_disabled(const char *drvname);
+
+/*
+ * Convenience logging macros
+ */
+
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_notice(info, fmt, ...) \
@@ -815,4 +859,12 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
#define fb_dbg(fb_info, fmt, ...) \
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn_once(fb_info, fmt, ...) \
+ pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
+#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \
+ WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_WARN_ON_ONCE(fb_info, x) \
+ fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")")
+
#endif /* _LINUX_FB_H */
diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h
index ff5596dd30f8..2382dec6d6ab 100644
--- a/include/linux/fbcon.h
+++ b/include/linux/fbcon.h
@@ -15,6 +15,8 @@ void fbcon_new_modelist(struct fb_info *info);
void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps);
void fbcon_fb_blanked(struct fb_info *info, int blank);
+int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var);
void fbcon_update_vcs(struct fb_info *info, bool all);
void fbcon_remap_all(struct fb_info *info);
int fbcon_set_con2fb_map_ioctl(void __user *argp);
@@ -33,6 +35,8 @@ static inline void fbcon_new_modelist(struct fb_info *info) {}
static inline void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps) {}
static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {}
+static inline int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var) { return 0; }
static inline void fbcon_update_vcs(struct fb_info *info, bool all) {}
static inline void fbcon_remap_all(struct fb_info *info) {}
static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; }
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 7bcdcf4f6ab2..a332e79b3207 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -8,18 +8,14 @@
/* List of all valid flags for the open/openat flags argument: */
#define VALID_OPEN_FLAGS \
(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
- O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | __O_SYNC | O_DSYNC | \
FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
-/* List of all valid flags for the how->upgrade_mask argument: */
-#define VALID_UPGRADE_FLAGS \
- (UPGRADE_NOWRITE | UPGRADE_NOREAD)
-
/* List of all valid flags for the how->resolve argument: */
#define VALID_RESOLVE_FLAGS \
(RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
- RESOLVE_BENEATH | RESOLVE_IN_ROOT)
+ RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED)
/* List of all open_how "versions". */
#define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index a32bf47c593e..78c8326d74ae 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -80,50 +80,46 @@ struct dentry;
/*
* The caller must ensure that fd table isn't shared or hold rcu or file lock
*/
-static inline struct file *__fcheck_files(struct files_struct *files, unsigned int fd)
+static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
-
- if (fd < fdt->max_fds) {
- fd = array_index_nospec(fd, fdt->max_fds);
- return rcu_dereference_raw(fdt->fd[fd]);
- }
- return NULL;
+ unsigned long mask = array_index_mask_nospec(fd, fdt->max_fds);
+ struct file *needs_masking;
+
+ /*
+ * 'mask' is zero for an out-of-bounds fd, all ones for ok.
+ * 'fd&mask' is 'fd' for ok, or 0 for out of bounds.
+ *
+ * Accessing fdt->fd[0] is ok, but needs masking of the result.
+ */
+ needs_masking = rcu_dereference_raw(fdt->fd[fd&mask]);
+ return (struct file *)(mask & (unsigned long)needs_masking);
}
-static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
+static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
- !lockdep_is_held(&files->file_lock),
+ RCU_LOCKDEP_WARN(!lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
- return __fcheck_files(files, fd);
+ return files_lookup_fd_raw(files, fd);
}
-/*
- * Check whether the specified fd has an open file.
- */
-#define fcheck(fd) fcheck_files(current->files, fd)
+struct file *lookup_fdget_rcu(unsigned int fd);
+struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd);
+struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd);
struct task_struct;
-struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
-void reset_files_struct(struct files_struct *);
-int unshare_files(struct files_struct **);
+int unshare_files(void);
struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
const void *);
-extern int __alloc_fd(struct files_struct *files,
- unsigned start, unsigned end, unsigned flags);
-extern void __fd_install(struct files_struct *files,
- unsigned int fd, struct file *file);
-extern int __close_fd(struct files_struct *files,
- unsigned int fd);
+extern int close_fd(unsigned int fd);
extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
-extern int __close_fd_get_file(unsigned int fd, struct file **res);
+extern struct file *file_close_fd(unsigned int fd);
extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
struct files_struct **new_fdp);
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
index 4e624c466583..c50882f19235 100644
--- a/include/linux/fiemap.h
+++ b/include/linux/fiemap.h
@@ -18,8 +18,4 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
u64 phys, u64 len, u32 flags);
-int generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start, u64 len,
- get_block_t *get_block);
-
#endif /* _LINUX_FIEMAP_H 1 */
diff --git a/include/linux/file.h b/include/linux/file.h
index 225982792fa2..169692cb1906 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -10,11 +10,11 @@
#include <linux/types.h>
#include <linux/posix_types.h>
#include <linux/errno.h>
+#include <linux/cleanup.h>
struct file;
extern void fput(struct file *);
-extern void fput_many(struct file *, unsigned int);
struct file_operations;
struct task_struct;
@@ -24,6 +24,8 @@ struct inode;
struct path;
extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_pseudo_noaccount(struct inode *, struct vfsmount *,
+ const char *, int flags, const struct file_operations *);
extern struct file *alloc_file_clone(struct file *, int flags,
const struct file_operations *);
@@ -47,7 +49,6 @@ static inline void fdput(struct fd fd)
}
extern struct file *fget(unsigned int fd);
-extern struct file *fget_many(unsigned int fd, unsigned int refs);
extern struct file *fget_raw(unsigned int fd);
extern struct file *fget_task(struct task_struct *task, unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
@@ -82,6 +83,8 @@ static inline void fdput_pos(struct fd f)
fdput(f);
}
+DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
+
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
@@ -90,25 +93,14 @@ extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
+DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
+ get_unused_fd_flags(flags), unsigned flags)
+
extern void fd_install(unsigned int fd, struct file *file);
-extern int __receive_fd(int fd, struct file *file, int __user *ufd,
- unsigned int o_flags);
-static inline int receive_fd_user(struct file *file, int __user *ufd,
- unsigned int o_flags)
-{
- if (ufd == NULL)
- return -EFAULT;
- return __receive_fd(-1, file, ufd, o_flags);
-}
-static inline int receive_fd(struct file *file, unsigned int o_flags)
-{
- return __receive_fd(-1, file, NULL, o_flags);
-}
-static inline int receive_fd_replace(int fd, struct file *file, unsigned int o_flags)
-{
- return __receive_fd(fd, file, NULL, o_flags);
-}
+int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags);
+
+int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
extern void flush_delayed_fput(void);
extern void __fput_sync(struct file *);
diff --git a/include/linux/fileattr.h b/include/linux/fileattr.h
new file mode 100644
index 000000000000..47c05a9851d0
--- /dev/null
+++ b/include/linux/fileattr.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_FILEATTR_H
+#define _LINUX_FILEATTR_H
+
+/* Flags shared betwen flags/xflags */
+#define FS_COMMON_FL \
+ (FS_SYNC_FL | FS_IMMUTABLE_FL | FS_APPEND_FL | \
+ FS_NODUMP_FL | FS_NOATIME_FL | FS_DAX_FL | \
+ FS_PROJINHERIT_FL)
+
+#define FS_XFLAG_COMMON \
+ (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND | \
+ FS_XFLAG_NODUMP | FS_XFLAG_NOATIME | FS_XFLAG_DAX | \
+ FS_XFLAG_PROJINHERIT)
+
+/*
+ * Merged interface for miscellaneous file attributes. 'flags' originates from
+ * ext* and 'fsx_flags' from xfs. There's some overlap between the two, which
+ * is handled by the VFS helpers, so filesystems are free to implement just one
+ * or both of these sub-interfaces.
+ */
+struct fileattr {
+ u32 flags; /* flags (FS_IOC_GETFLAGS/FS_IOC_SETFLAGS) */
+ /* struct fsxattr: */
+ u32 fsx_xflags; /* xflags field value (get/set) */
+ u32 fsx_extsize; /* extsize field value (get/set)*/
+ u32 fsx_nextents; /* nextents field value (get) */
+ u32 fsx_projid; /* project identifier (get/set) */
+ u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/
+ /* selectors: */
+ bool flags_valid:1;
+ bool fsx_valid:1;
+};
+
+int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa);
+
+void fileattr_fill_xflags(struct fileattr *fa, u32 xflags);
+void fileattr_fill_flags(struct fileattr *fa, u32 flags);
+
+/**
+ * fileattr_has_fsx - check for extended flags/attributes
+ * @fa: fileattr pointer
+ *
+ * Return: true if any attributes are present that are not represented in
+ * ->flags.
+ */
+static inline bool fileattr_has_fsx(const struct fileattr *fa)
+{
+ return fa->fsx_valid &&
+ ((fa->fsx_xflags & ~FS_XFLAG_COMMON) || fa->fsx_extsize != 0 ||
+ fa->fsx_projid != 0 || fa->fsx_cowextsize != 0);
+}
+
+int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct fileattr *fa);
+
+#endif /* _LINUX_FILEATTR_H */
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
new file mode 100644
index 000000000000..daee999d05f3
--- /dev/null
+++ b/include/linux/filelock.h
@@ -0,0 +1,510 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FILELOCK_H
+#define _LINUX_FILELOCK_H
+
+#include <linux/fs.h>
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_DELEG 4 /* NFSv4 delegation */
+#define FL_ACCESS 8 /* not trying to lock, just looking */
+#define FL_EXISTS 16 /* when unlocking, test for existence */
+#define FL_LEASE 32 /* lease held on this file */
+#define FL_CLOSE 64 /* unlock on close */
+#define FL_SLEEP 128 /* A blocking lock */
+#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
+#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
+#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
+#define FL_LAYOUT 2048 /* outstanding pNFS layout */
+#define FL_RECLAIM 4096 /* reclaiming from a reboot server */
+
+#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
+
+/*
+ * Special return value from posix_lock_file() and vfs_lock_file() for
+ * asynchronous locking.
+ */
+#define FILE_LOCK_DEFERRED 1
+
+struct file_lock;
+struct file_lease;
+
+struct file_lock_operations {
+ void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+ void (*fl_release_private)(struct file_lock *);
+};
+
+struct lock_manager_operations {
+ void *lm_mod_owner;
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
+ void (*lm_put_owner)(fl_owner_t);
+ void (*lm_notify)(struct file_lock *); /* unblock callback */
+ int (*lm_grant)(struct file_lock *, int);
+ bool (*lm_lock_expirable)(struct file_lock *cfl);
+ void (*lm_expire_lock)(void);
+};
+
+struct lease_manager_operations {
+ bool (*lm_break)(struct file_lease *);
+ int (*lm_change)(struct file_lease *, int, struct list_head *);
+ void (*lm_setup)(struct file_lease *, void **);
+ bool (*lm_breaker_owns_lease)(struct file_lease *);
+};
+
+struct lock_manager {
+ struct list_head list;
+ /*
+ * NFSv4 and up also want opens blocked during the grace period;
+ * NLM doesn't care:
+ */
+ bool block_opens;
+};
+
+struct net;
+void locks_start_grace(struct net *, struct lock_manager *);
+void locks_end_grace(struct lock_manager *);
+bool locks_in_grace(struct net *);
+bool opens_in_grace(struct net *);
+
+/*
+ * struct file_lock has a union that some filesystems use to track
+ * their own private info. The NFS side of things is defined here:
+ */
+#include <linux/nfs_fs_i.h>
+
+/*
+ * struct file_lock represents a generic "file lock". It's used to represent
+ * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
+ * note that the same struct is used to represent both a request for a lock and
+ * the lock itself, but the same object is never used for both.
+ *
+ * FIXME: should we create a separate "struct lock_request" to help distinguish
+ * these two uses?
+ *
+ * The varous i_flctx lists are ordered by:
+ *
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
+ *
+ * Obviously, the last two criteria only matter for POSIX locks.
+ */
+
+struct file_lock_core {
+ struct file_lock_core *flc_blocker; /* The lock that is blocking us */
+ struct list_head flc_list; /* link into file_lock_context */
+ struct hlist_node flc_link; /* node in global lists */
+ struct list_head flc_blocked_requests; /* list of requests with
+ * ->fl_blocker pointing here
+ */
+ struct list_head flc_blocked_member; /* node in
+ * ->fl_blocker->fl_blocked_requests
+ */
+ fl_owner_t flc_owner;
+ unsigned int flc_flags;
+ unsigned char flc_type;
+ pid_t flc_pid;
+ int flc_link_cpu; /* what cpu's list is this on? */
+ wait_queue_head_t flc_wait;
+ struct file *flc_file;
+};
+
+struct file_lock {
+ struct file_lock_core c;
+ loff_t fl_start;
+ loff_t fl_end;
+
+ const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
+ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
+ union {
+ struct nfs_lock_info nfs_fl;
+ struct nfs4_lock_info nfs4_fl;
+ struct {
+ struct list_head link; /* link in AFS vnode's pending_locks list */
+ int state; /* state of grant or error if -ve */
+ unsigned int debug_id;
+ } afs;
+ struct {
+ struct inode *inode;
+ } ceph;
+ } fl_u;
+} __randomize_layout;
+
+struct file_lease {
+ struct file_lock_core c;
+ struct fasync_struct * fl_fasync; /* for lease break notifications */
+ /* for lease breaks: */
+ unsigned long fl_break_time;
+ unsigned long fl_downgrade_time;
+ const struct lease_manager_operations *fl_lmops; /* Callbacks for lease managers */
+} __randomize_layout;
+
+struct file_lock_context {
+ spinlock_t flc_lock;
+ struct list_head flc_flock;
+ struct list_head flc_posix;
+ struct list_head flc_lease;
+};
+
+#ifdef CONFIG_FILE_LOCKING
+int fcntl_getlk(struct file *, unsigned int, struct flock *);
+int fcntl_setlk(unsigned int, struct file *, unsigned int,
+ struct flock *);
+
+#if BITS_PER_LONG == 32
+int fcntl_getlk64(struct file *, unsigned int, struct flock64 *);
+int fcntl_setlk64(unsigned int, struct file *, unsigned int,
+ struct flock64 *);
+#endif
+
+int fcntl_setlease(unsigned int fd, struct file *filp, int arg);
+int fcntl_getlease(struct file *filp);
+
+static inline bool lock_is_unlock(struct file_lock *fl)
+{
+ return fl->c.flc_type == F_UNLCK;
+}
+
+static inline bool lock_is_read(struct file_lock *fl)
+{
+ return fl->c.flc_type == F_RDLCK;
+}
+
+static inline bool lock_is_write(struct file_lock *fl)
+{
+ return fl->c.flc_type == F_WRLCK;
+}
+
+static inline void locks_wake_up(struct file_lock *fl)
+{
+ wake_up(&fl->c.flc_wait);
+}
+
+/* fs/locks.c */
+void locks_free_lock_context(struct inode *inode);
+void locks_free_lock(struct file_lock *fl);
+void locks_init_lock(struct file_lock *);
+struct file_lock *locks_alloc_lock(void);
+void locks_copy_lock(struct file_lock *, struct file_lock *);
+void locks_copy_conflock(struct file_lock *, struct file_lock *);
+void locks_remove_posix(struct file *, fl_owner_t);
+void locks_remove_file(struct file *);
+void locks_release_private(struct file_lock *);
+void posix_test_lock(struct file *, struct file_lock *);
+int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
+int locks_delete_block(struct file_lock *);
+int vfs_test_lock(struct file *, struct file_lock *);
+int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+bool vfs_inode_has_locks(struct inode *inode);
+int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
+
+void locks_init_lease(struct file_lease *);
+void locks_free_lease(struct file_lease *fl);
+struct file_lease *locks_alloc_lease(void);
+int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
+void lease_get_mtime(struct inode *, struct timespec64 *time);
+int generic_setlease(struct file *, int, struct file_lease **, void **priv);
+int kernel_setlease(struct file *, int, struct file_lease **, void **);
+int vfs_setlease(struct file *, int, struct file_lease **, void **);
+int lease_modify(struct file_lease *, int, struct list_head *);
+
+struct notifier_block;
+int lease_register_notifier(struct notifier_block *);
+void lease_unregister_notifier(struct notifier_block *);
+
+struct files_struct;
+void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files);
+bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner);
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return smp_load_acquire(&inode->i_flctx);
+}
+
+#else /* !CONFIG_FILE_LOCKING */
+static inline int fcntl_getlk(struct file *file, unsigned int cmd,
+ struct flock __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock __user *user)
+{
+ return -EACCES;
+}
+
+#if BITS_PER_LONG == 32
+static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
+ struct flock64 *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk64(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock64 *user)
+{
+ return -EACCES;
+}
+#endif
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_getlease(struct file *filp)
+{
+ return F_UNLCK;
+}
+
+static inline bool lock_is_unlock(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline bool lock_is_read(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline bool lock_is_write(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline void locks_wake_up(struct file_lock *fl)
+{
+}
+
+static inline void
+locks_free_lock_context(struct inode *inode)
+{
+}
+
+static inline void locks_init_lock(struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_init_lease(struct file_lease *fl)
+{
+ return;
+}
+
+static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
+{
+ return;
+}
+
+static inline void locks_remove_file(struct file *filp)
+{
+ return;
+}
+
+static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return;
+}
+
+static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+{
+ return -ENOLCK;
+}
+
+static inline int locks_delete_block(struct file_lock *waiter)
+{
+ return -ENOENT;
+}
+
+static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
+ struct file_lock *fl, struct file_lock *conf)
+{
+ return -ENOLCK;
+}
+
+static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline bool vfs_inode_has_locks(struct inode *inode)
+{
+ return false;
+}
+
+static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+{
+ return -ENOLCK;
+}
+
+static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+{
+ return 0;
+}
+
+static inline void lease_get_mtime(struct inode *inode,
+ struct timespec64 *time)
+{
+ return;
+}
+
+static inline int generic_setlease(struct file *filp, int arg,
+ struct file_lease **flp, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int kernel_setlease(struct file *filp, int arg,
+ struct file_lease **lease, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int vfs_setlease(struct file *filp, int arg,
+ struct file_lease **lease, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int lease_modify(struct file_lease *fl, int arg,
+ struct list_head *dispose)
+{
+ return -EINVAL;
+}
+
+struct files_struct;
+static inline void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files) {}
+static inline bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner)
+{
+ return false;
+}
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_FILE_LOCKING */
+
+/* for walking lists of file_locks linked by fl_list */
+#define for_each_file_lock(_fl, _head) list_for_each_entry(_fl, _head, c.flc_list)
+
+static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return locks_lock_inode_wait(file_inode(filp), fl);
+}
+
+#ifdef CONFIG_FILE_LOCKING
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode, mode, FL_LEASE);
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode, mode, FL_DELEG);
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+ int ret;
+
+ ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
+ if (ret == -EWOULDBLOCK && delegated_inode) {
+ *delegated_inode = inode;
+ ihold(inode);
+ }
+ return ret;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+ int ret;
+
+ ret = break_deleg(*delegated_inode, O_WRONLY);
+ iput(*delegated_inode);
+ *delegated_inode = NULL;
+ return ret;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode,
+ wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
+ FL_LAYOUT);
+ return 0;
+}
+
+#else /* !CONFIG_FILE_LOCKING */
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+ return 0;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+ BUG();
+ return 0;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ return 0;
+}
+
+#endif /* CONFIG_FILE_LOCKING */
+
+#endif /* _LINUX_FILELOCK_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ebfb7cfb65f1..c99bc3df2d28 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -5,9 +5,8 @@
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
-#include <stdarg.h>
-
#include <linux/atomic.h>
+#include <linux/bpf.h>
#include <linux/refcount.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
@@ -15,19 +14,20 @@
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/capability.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/sockptr.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <linux/u64_stats_sync.h>
#include <net/sch_generic.h>
#include <asm/byteorder.h>
#include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
struct sk_buff;
struct sock;
@@ -69,9 +69,20 @@ struct ctl_table_header;
/* unused opcode to mark special load instruction. Same as BPF_ABS */
#define BPF_PROBE_MEM 0x20
+/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
+#define BPF_PROBE_MEMSX 0x40
+
+/* unused opcode to mark special load instruction. Same as BPF_MSH */
+#define BPF_PROBE_MEM32 0xa0
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
+/* unused opcode to mark speculation barrier for mitigating
+ * Speculative Store Bypass
+ */
+#define BPF_NOSPEC 0xc0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -85,39 +96,49 @@ struct ctl_table_header;
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
-#define BPF_ALU64_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
-#define BPF_ALU32_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG(OP, DST, SRC) \
+ BPF_ALU64_REG_OFF(OP, DST, SRC, 0)
+
+#define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
+#define BPF_ALU32_REG(OP, DST, SRC) \
+ BPF_ALU32_REG_OFF(OP, DST, SRC, 0)
+
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
-#define BPF_ALU64_IMM(OP, DST, IMM) \
+#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU64_IMM(OP, DST, IMM) \
+ BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
-#define BPF_ALU32_IMM(OP, DST, IMM) \
+#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU32_IMM(OP, DST, IMM) \
+ BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
@@ -129,6 +150,16 @@ struct ctl_table_header;
.off = 0, \
.imm = LEN })
+/* Byte Swap, bswap16/32/64 */
+
+#define BPF_BSWAP(DST, LEN) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = LEN })
+
/* Short form of mov, dst_reg = src_reg */
#define BPF_MOV64_REG(DST, SRC) \
@@ -165,6 +196,24 @@ struct ctl_table_header;
.off = 0, \
.imm = IMM })
+/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
+
+#define BPF_MOVSX64_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+#define BPF_MOVSX32_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Special form of mov32, used for doing explicit zero extension on dst. */
#define BPF_ZEXT_REG(DST) \
((struct bpf_insn) { \
@@ -249,6 +298,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
+
+#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
@@ -259,15 +318,32 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+/*
+ * Atomic operations:
+ *
+ * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
+ * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
+ * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
+ * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
+ * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
+ * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
+ * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
+ * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
+ * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
+ * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
((struct bpf_insn) { \
- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
- .imm = 0 })
+ .imm = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
@@ -339,10 +415,9 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = TGT })
-/* Function call */
+/* Convert function address to BPF immediate */
-#define BPF_CAST_CALL(x) \
- ((u64 (*)(u64, u64, u64, u64, u64))(x))
+#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@ -350,7 +425,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@ -372,6 +447,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = 0 })
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC() \
+ ((struct bpf_insn) { \
+ .code = BPF_ST | BPF_NOSPEC, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
/* Internal classic blocks for direct assignment */
#define __BPF_STMT(CODE, K) \
@@ -465,24 +550,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
u64, __ur_3, u64, __ur_4, u64, __ur_5)
-#define BPF_CALL_x(x, name, ...) \
+#define BPF_CALL_x(x, attr, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
-#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
-#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
-#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
-#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
-#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
-#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+#define __NOATTR
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
+
+#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
#define bpf_ctx_range(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
@@ -518,36 +606,16 @@ struct sock_fprog_kern {
#define BPF_IMAGE_ALIGNMENT 8
struct bpf_binary_header {
- u32 pages;
+ u32 size;
u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
};
-struct bpf_prog {
- u16 pages; /* Number of allocated pages */
- u16 jited:1, /* Is our filter JIT'ed? */
- jit_requested:1,/* archs need to JIT the prog */
- gpl_compatible:1, /* Is filter GPL compatible? */
- cb_access:1, /* Is control block accessed? */
- dst_needed:1, /* Do we need dst entry? */
- blinded:1, /* Was blinded */
- is_func:1, /* program is a bpf function */
- kprobe_override:1, /* Do we override a kprobe? */
- has_callchain_buf:1, /* callchain buffer allocated? */
- enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
- call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */
- enum bpf_prog_type type; /* Type of BPF program */
- enum bpf_attach_type expected_attach_type; /* For some prog types */
- u32 len; /* Number of filter blocks */
- u32 jited_len; /* Size of jited insns in bytes */
- u8 tag[BPF_TAG_SIZE];
- struct bpf_prog_aux *aux; /* Auxiliary fields */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
- unsigned int (*bpf_func)(const void *ctx,
- const struct bpf_insn *insn);
- /* Instructions for interpreter */
- struct sock_filter insns[0];
- struct bpf_insn insnsi[];
-};
+struct bpf_prog_stats {
+ u64_stats_t cnt;
+ u64_stats_t nsecs;
+ u64_stats_t misses;
+ struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
struct sk_filter {
refcount_t refcnt;
@@ -557,25 +625,44 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
- u32 ret; \
- cant_migrate(); \
- if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *stats; \
- u64 start = sched_clock(); \
- ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- stats = this_cpu_ptr(prog->aux->stats); \
- u64_stats_update_begin(&stats->syncp); \
- stats->cnt++; \
- stats->nsecs += sched_clock() - start; \
- u64_stats_update_end(&stats->syncp); \
- } else { \
- ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- } \
- ret; })
-
-#define BPF_PROG_RUN(prog, ctx) \
- __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
+extern struct mutex nf_conn_btf_access_lock;
+extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size);
+
+typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
+ const struct bpf_insn *insnsi,
+ unsigned int (*bpf_func)(const void *,
+ const struct bpf_insn *));
+
+static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
+ const void *ctx,
+ bpf_dispatcher_fn dfunc)
+{
+ u32 ret;
+
+ cant_migrate();
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+ struct bpf_prog_stats *stats;
+ u64 start = sched_clock();
+ unsigned long flags;
+
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ stats = this_cpu_ptr(prog->stats);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, sched_clock() - start);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ } else {
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ }
+ return ret;
+}
+
+static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
+{
+ return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
+}
/*
* Use in preemptible and therefore migratable context to make sure that
@@ -584,9 +671,6 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
* This uses migrate_disable/enable() explicitly to document that the
* invocation of a BPF program does not require reentrancy protection
* against a BPF program which is invoked from a preempting task.
- *
- * For non RT enabled kernels migrate_disable/enable() maps to
- * preempt_disable/enable(), i.e. it disables also preemption.
*/
static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
const void *ctx)
@@ -594,7 +678,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
u32 ret;
migrate_disable();
- ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
+ ret = bpf_prog_run(prog, ctx);
migrate_enable();
return ret;
}
@@ -607,12 +691,23 @@ struct bpf_skb_data_end {
void *data_end;
};
+struct bpf_nh_params {
+ u32 nh_family;
+ union {
+ u32 ipv4_nh;
+ struct in6_addr ipv6_nh;
+ };
+};
+
struct bpf_redirect_info {
- u32 flags;
- u32 tgt_index;
+ u64 tgt_index;
void *tgt_value;
struct bpf_map *map;
+ u32 flags;
u32 kern_flags;
+ u32 map_id;
+ enum bpf_map_type map_type;
+ struct bpf_nh_params nh;
};
DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
@@ -647,7 +742,7 @@ static inline void bpf_compute_and_save_data_end(
cb->data_end = skb->data + skb_headlen(skb);
}
-/* Restore data saved by bpf_compute_data_pointers(). */
+/* Restore data saved by bpf_compute_and_save_data_end(). */
static inline void bpf_restore_data_end(
struct sk_buff *skb, void *saved_data_end)
{
@@ -656,7 +751,7 @@ static inline void bpf_restore_data_end(
cb->data_end = saved_data_end;
}
-static inline u8 *bpf_skb_cb(struct sk_buff *skb)
+static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
* data between tail calls. Since this also needs to work with
@@ -677,8 +772,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
/* Must be invoked with migration disabled */
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
- struct sk_buff *skb)
+ const void *ctx)
{
+ const struct sk_buff *skb = ctx;
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
u32 res;
@@ -688,7 +784,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
memset(cb_data, 0, sizeof(cb_saved));
}
- res = BPF_PROG_RUN(prog, skb);
+ res = bpf_prog_run(prog, skb);
if (unlikely(prog->cb_access))
memcpy(cb_data, cb_saved, sizeof(cb_saved));
@@ -722,17 +818,9 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
DECLARE_BPF_DISPATCHER(xdp)
-static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
- struct xdp_buff *xdp)
-{
- /* Caller needs to hold rcu_read_lock() (!), otherwise program
- * can be released while still running, or map elements could be
- * freed early while still having concurrent users. XDP fastpath
- * already takes rcu_read_lock() when fetching the program, so
- * it's not necessary here anymore.
- */
- return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
-}
+DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
+
+u32 xdp_master_redirect(struct xdp_buff *xdp);
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
@@ -812,17 +900,7 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
set_vm_flush_reset_perms(hdr);
- set_memory_ro((unsigned long)hdr, hdr->pages);
- set_memory_x((unsigned long)hdr, hdr->pages);
-}
-
-static inline struct bpf_binary_header *
-bpf_jit_binary_hdr(const struct bpf_prog *fp)
-{
- unsigned long real_start = (unsigned long)fp->bpf_func;
- unsigned long addr = real_start & PAGE_MASK;
-
- return (void *)addr;
+ set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
@@ -836,12 +914,10 @@ void bpf_prog_free(struct bpf_prog *fp);
bool bpf_opcode_in_insntable(u8 code);
-void bpf_prog_free_linfo(struct bpf_prog *prog);
void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
const u32 *insn_to_jit_off);
int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
-void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
-void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_jit_attempt_done(struct bpf_prog *prog);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
@@ -868,8 +944,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
- unsigned int len);
+int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len);
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
@@ -877,11 +952,18 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#define __bpf_call_base_args \
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
- __bpf_call_base)
+ (void *)__bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_jit_needs_zext(void);
+bool bpf_jit_supports_subprog_tailcalls(void);
+bool bpf_jit_supports_kfunc_call(void);
+bool bpf_jit_supports_far_kfunc_call(void);
+bool bpf_jit_supports_exceptions(void);
+bool bpf_jit_supports_ptr_xchg(void);
+bool bpf_jit_supports_arena(void);
+void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
static inline bool bpf_dump_raw_ok(const struct cred *cred)
@@ -945,24 +1027,24 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
+int xdp_do_redirect_frame(struct net_device *dev,
+ struct xdp_buff *xdp,
+ struct xdp_frame *xdpf,
+ struct bpf_prog *prog);
void xdp_do_flush(void);
-/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
- * it is no longer only flushing maps. Keep this define for compatibility
- * until all drivers are updated - do not use xdp_do_flush_map() in new code!
- */
-#define xdp_do_flush_map xdp_do_flush
-
-void bpf_warn_invalid_xdp_action(u32 act);
+void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash);
#else
static inline struct sock *
bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash)
{
return NULL;
@@ -974,9 +1056,12 @@ extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
extern long bpf_jit_limit;
+extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
+void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
@@ -986,6 +1071,29 @@ u64 bpf_jit_alloc_exec_limit(void);
void *bpf_jit_alloc_exec(unsigned long size);
void bpf_jit_free_exec(void *addr);
void bpf_jit_free(struct bpf_prog *fp);
+struct bpf_binary_header *
+bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
+
+void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
+void bpf_prog_pack_free(void *ptr, u32 size);
+
+static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
+{
+ return list_empty(&fp->aux->ksym.lnode) ||
+ fp->aux->ksym.lnode.prev == LIST_POISON2;
+}
+
+struct bpf_binary_header *
+bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
+ unsigned int alignment,
+ struct bpf_binary_header **rw_hdr,
+ u8 **rw_image,
+ bpf_jit_fill_hole_t bpf_fill_ill_insns);
+int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
+ struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
+void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
struct bpf_jit_poke_descriptor *poke);
@@ -1039,7 +1147,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
return false;
if (!bpf_jit_harden)
return false;
- if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+ if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
return true;
@@ -1065,6 +1173,7 @@ const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
+struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
@@ -1132,6 +1241,11 @@ static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
return -ERANGE;
}
+static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+{
+ return NULL;
+}
+
static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
@@ -1209,15 +1323,6 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
int k, unsigned int size);
-static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
- unsigned int size, void *buffer)
-{
- if (k >= 0)
- return skb_header_pointer(skb, k, size, buffer);
-
- return bpf_internal_load_pointer_neg_helper(skb, k, size);
-}
-
static inline int bpf_tell_extensions(void)
{
return SKF_AD_MAX;
@@ -1232,17 +1337,22 @@ struct bpf_sock_addr_kern {
*/
u64 tmp_reg;
void *t_ctx; /* Attach type specific context. */
+ u32 uaddrlen;
};
struct bpf_sock_ops_kern {
struct sock *sk;
- u32 op;
union {
u32 args[4];
u32 reply;
u32 replylong[4];
};
- u32 is_fullsock;
+ struct sk_buff *syn_skb;
+ struct sk_buff *skb;
+ void *skb_data_end;
+ u8 op;
+ u8 is_fullsock;
+ u8 remaining_opt_len;
u64 temp; /* temp and everything after is not
* initialized to 0 before calling
* the BPF program. New fields that
@@ -1268,6 +1378,11 @@ struct bpf_sysctl_kern {
u64 tmp_reg;
};
+#define BPF_SOCKOPT_KERN_BUF_SIZE 32
+struct bpf_sockopt_buf {
+ u8 data[BPF_SOCKOPT_KERN_BUF_SIZE];
+};
+
struct bpf_sockopt_kern {
struct sock *sk;
u8 *optval;
@@ -1275,7 +1390,10 @@ struct bpf_sockopt_kern {
s32 level;
s32 optname;
s32 optlen;
- s32 retval;
+ /* for retval in struct bpf_cg_run_ctx */
+ struct task_struct *current_task;
+ /* Temporary "register" for indirect stores to ppos. */
+ u64 tmp_reg;
};
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
@@ -1283,6 +1401,8 @@ int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
struct bpf_sk_lookup_kern {
u16 family;
u16 protocol;
+ __be16 sport;
+ u16 dport;
struct {
__be32 saddr;
__be32 daddr;
@@ -1291,9 +1411,8 @@ struct bpf_sk_lookup_kern {
const struct in6_addr *saddr;
const struct in6_addr *daddr;
} v6;
- __be16 sport;
- u16 dport;
struct sock *selected_sk;
+ u32 ingress_ifindex;
bool no_reuseport;
};
@@ -1356,7 +1475,7 @@ extern struct static_key_false bpf_sk_lookup_enabled;
static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 dport,
- struct sock **psk)
+ const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL;
@@ -1372,10 +1491,11 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
.v4.daddr = daddr,
.sport = sport,
.dport = dport,
+ .ingress_ifindex = ifindex,
};
u32 act;
- act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;
@@ -1394,7 +1514,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
const __be16 sport,
const struct in6_addr *daddr,
const u16 dport,
- struct sock **psk)
+ const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL;
@@ -1410,10 +1530,11 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
.v6.daddr = daddr,
.sport = sport,
.dport = dport,
+ .ingress_ifindex = ifindex,
};
u32 act;
- act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;
@@ -1427,4 +1548,87 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
}
#endif /* IS_ENABLED(CONFIG_IPV6) */
+static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
+ u64 flags, const u64 flag_mask,
+ void *lookup_elem(struct bpf_map *map, u32 key))
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
+
+ /* Lower bits of the flags are used as return code on lookup failure */
+ if (unlikely(flags & ~(action_mask | flag_mask)))
+ return XDP_ABORTED;
+
+ ri->tgt_value = lookup_elem(map, index);
+ if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
+ /* If the lookup fails we want to clear out the state in the
+ * redirect_info struct completely, so that if an eBPF program
+ * performs multiple lookups, the last one always takes
+ * precedence.
+ */
+ ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->map_type = BPF_MAP_TYPE_UNSPEC;
+ return flags & action_mask;
+ }
+
+ ri->tgt_index = index;
+ ri->map_id = map->id;
+ ri->map_type = map->map_type;
+
+ if (flags & BPF_F_BROADCAST) {
+ WRITE_ONCE(ri->map, map);
+ ri->flags = flags;
+ } else {
+ WRITE_ONCE(ri->map, NULL);
+ ri->flags = 0;
+ }
+
+ return XDP_REDIRECT;
+}
+
+#ifdef CONFIG_NET
+int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len);
+int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from,
+ u32 len, u64 flags);
+int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
+int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
+void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len);
+void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
+ void *buf, unsigned long len, bool flush);
+#else /* CONFIG_NET */
+static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset,
+ void *to, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset,
+ void *buf, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset,
+ void *buf, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
+{
+ return NULL;
+}
+
+static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf,
+ unsigned long len, bool flush)
+{
+}
+#endif /* CONFIG_NET */
+
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/find.h b/include/linux/find.h
new file mode 100644
index 000000000000..c69598e383c1
--- /dev/null
+++ b/include/linux/find.h
@@ -0,0 +1,670 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FIND_H_
+#define __LINUX_FIND_H_
+
+#ifndef __LINUX_BITMAP_H
+#error only <linux/bitmap.h> can be included directly
+#endif
+
+#include <linux/bitops.h>
+
+unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
+ unsigned long start);
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start);
+extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
+unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
+unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ const unsigned long *addr3, unsigned long size,
+ unsigned long n);
+extern unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size);
+extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
+extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
+
+#ifdef __BIG_ENDIAN
+unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+#endif
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & *addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_and_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_andnot_bit
+/**
+ * find_next_andnot_bit - find the next set bit in *addr1 excluding all the bits
+ * in *addr2
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_andnot_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_or_bit
+/**
+ * find_next_or_bit - find the next set bit in either memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_or_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = (*addr1 | *addr2) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_or_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_bit
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_bit(addr, size);
+}
+#endif
+
+/**
+ * find_nth_bit - find N'th set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * The following is semantically equivalent:
+ * idx = find_nth_bit(addr, size, 0);
+ * idx = find_first_bit(addr, size);
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_bit(addr, size, n);
+}
+
+/**
+ * find_nth_and_bit - find N'th set bit in 2 memory regions
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_bit(addr1, addr2, size, n);
+}
+
+/**
+ * find_nth_andnot_bit - find N'th set bit in 2 memory regions,
+ * flipping bits in 2nd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_andnot_bit(addr1, addr2, size, n);
+}
+
+/**
+ * find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
+ * excluding those set in 3rd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @addr3: The 3rd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static __always_inline
+unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & (~*addr3) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_andnot_bit(addr1, addr2, addr3, size, n);
+}
+
+#ifndef find_first_and_bit
+/**
+ * find_first_and_bit - find the first set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_bit(addr1, addr2, size);
+}
+#endif
+
+#ifndef find_first_zero_bit
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit(addr, size);
+}
+#endif
+
+#ifndef find_last_bit
+/**
+ * find_last_bit - find the last set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The number of bits to search
+ *
+ * Returns the bit number of the last set bit, or size.
+ */
+static inline
+unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __fls(val) : size;
+ }
+
+ return _find_last_bit(addr, size);
+}
+#endif
+
+/**
+ * find_next_and_bit_wrap - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_and_bit(addr1, addr2, size, offset);
+
+ if (bit < size || offset == 0)
+ return bit;
+
+ bit = find_first_and_bit(addr1, addr2, offset);
+ return bit < offset ? bit : size;
+}
+
+/**
+ * find_next_bit_wrap - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_bit_wrap(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_bit(addr, size, offset);
+
+ if (bit < size || offset == 0)
+ return bit;
+
+ bit = find_first_bit(addr, offset);
+ return bit < offset ? bit : size;
+}
+
+/*
+ * Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
+ * before using it alone.
+ */
+static inline
+unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
+ unsigned long start, unsigned long n)
+{
+ unsigned long bit;
+
+ /* If not wrapped around */
+ if (n > start) {
+ /* and have a bit, just return it. */
+ bit = find_next_bit(bitmap, size, n);
+ if (bit < size)
+ return bit;
+
+ /* Otherwise, wrap around and ... */
+ n = 0;
+ }
+
+ /* Search the other part. */
+ bit = find_next_bit(bitmap, start, n);
+ return bit < start ? bit : size;
+}
+
+/**
+ * find_next_clump8 - find next 8-bit clump with set bits in a memory region
+ * @clump: location to store copy of found clump
+ * @addr: address to base the search on
+ * @size: bitmap size in number of bits
+ * @offset: bit offset at which to start searching
+ *
+ * Returns the bit offset for the next set clump; the found clump value is
+ * copied to the location pointed by @clump. If no bits are set, returns @size.
+ */
+extern unsigned long find_next_clump8(unsigned long *clump,
+ const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+#define find_first_clump8(clump, bits, size) \
+ find_next_clump8((clump), (bits), (size), 0)
+
+#if defined(__LITTLE_ENDIAN)
+
+static inline unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
+
+static inline unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+static inline unsigned long find_first_zero_bit_le(const void *addr,
+ unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
+
+#elif defined(__BIG_ENDIAN)
+
+#ifndef find_next_zero_bit_le
+static inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit_le(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_zero_bit_le
+static inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = swab(*(const unsigned long *)addr) | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit_le(addr, size);
+}
+#endif
+
+#ifndef find_next_bit_le
+static inline
+unsigned long find_next_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit_le(addr, size, offset);
+}
+#endif
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = 0; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_and_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_and_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_andnot_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_or_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_or_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); \
+ (bit)++)
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+/**
+ * for_each_set_bitrange - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit)
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_bit((addr), (size), b), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_bit((addr), (size), (b)), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first unset bit)
+ * @e: bit offset of end of current bitrange (first set bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bit_wrap - iterate over all set bits starting from @start, and
+ * wrapping around the end of bitmap.
+ * @bit: offset for current iteration
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ * @start: Starting bit for bitmap traversing, wrapping around the bitmap end
+ */
+#define for_each_set_bit_wrap(bit, addr, size, start) \
+ for ((bit) = find_next_bit_wrap((addr), (size), (start)); \
+ (bit) < (size); \
+ (bit) = __for_each_wrap((addr), (size), (start), (bit) + 1))
+
+/**
+ * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
+ * @start: bit offset to start search and to store the current iteration offset
+ * @clump: location to store copy of current 8-bit clump
+ * @bits: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_clump8(start, clump, bits, size) \
+ for ((start) = find_first_clump8(&(clump), (bits), (size)); \
+ (start) < (size); \
+ (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+
+#endif /*__LINUX_FIND_H_ */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index aec8f30ab200..dd9f2d765e68 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -75,7 +75,7 @@ void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
-extern struct bus_type fw_bus_type;
+extern const struct bus_type fw_bus_type;
struct fw_card_driver;
struct fw_node;
@@ -150,6 +150,8 @@ static inline void fw_card_put(struct fw_card *card)
kref_put(&card->kref, fw_card_release);
}
+int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time);
+
struct fw_attribute_group {
struct attribute_group *groups[2];
struct attribute_group group;
@@ -206,10 +208,7 @@ struct fw_device {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_device *fw_device(struct device *dev)
-{
- return container_of(dev, struct fw_device, device);
-}
+#define fw_device(dev) container_of_const(dev, struct fw_device, device)
static inline int fw_device_is_shutdown(struct fw_device *device)
{
@@ -227,10 +226,7 @@ struct fw_unit {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_unit *fw_unit(struct device *dev)
-{
- return container_of(dev, struct fw_unit, device);
-}
+#define fw_unit(dev) container_of_const(dev, struct fw_unit, device)
static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
{
@@ -244,10 +240,7 @@ static inline void fw_unit_put(struct fw_unit *unit)
put_device(&unit->device);
}
-static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
-{
- return fw_device(unit->device.parent);
-}
+#define fw_parent_device(unit) fw_device(unit->device.parent)
struct ieee1394_device_id;
@@ -268,6 +261,15 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data);
+typedef void (*fw_transaction_callback_with_tstamp_t)(struct fw_card *card, int rcode,
+ u32 request_tstamp, u32 response_tstamp, void *data,
+ size_t length, void *callback_data);
+
+union fw_transaction_callback {
+ fw_transaction_callback_t without_tstamp;
+ fw_transaction_callback_with_tstamp_t with_tstamp;
+};
+
/*
* This callback handles an inbound request subaction. It is called in
* RCU read-side context, therefore must not sleep.
@@ -276,9 +278,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
* Otherwise there is a danger of recursion of inbound and outbound
* transactions from and to the local node.
*
- * The callback is responsible that either fw_send_response() or kfree()
- * is called on the @request, except for FCP registers for which the core
- * takes care of that.
+ * The callback is responsible that fw_send_response() is called on the @request, except for FCP
+ * registers for which the core takes care of that.
*/
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
@@ -320,6 +321,7 @@ struct fw_transaction {
struct fw_card *card;
bool is_split_transaction;
struct timer_list split_timeout_timer;
+ u32 split_timeout_cycle;
struct fw_packet packet;
@@ -327,7 +329,8 @@ struct fw_transaction {
* The data passed to the callback is valid only during the
* callback.
*/
- fw_transaction_callback_t callback;
+ union fw_transaction_callback callback;
+ bool with_tstamp;
void *callback_data;
};
@@ -352,10 +355,72 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler);
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode);
int fw_get_request_speed(struct fw_request *request);
-void fw_send_request(struct fw_card *card, struct fw_transaction *t,
- int tcode, int destination_id, int generation, int speed,
- unsigned long long offset, void *payload, size_t length,
- fw_transaction_callback_t callback, void *callback_data);
+u32 fw_request_get_timestamp(const struct fw_request *request);
+
+void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, union fw_transaction_callback callback,
+ bool with_tstamp, void *callback_data);
+
+/**
+ * fw_send_request() - submit a request packet for transmission to generate callback for response
+ * subaction without time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction without time
+ * stamp.
+ */
+static inline void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed,
+ unsigned long long offset, void *payload, size_t length,
+ fw_transaction_callback_t callback, void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .without_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, false, callback_data);
+}
+
+/**
+ * fw_send_request_with_tstamp() - submit a request packet for transmission to generate callback for
+ * response with time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction with time stamp.
+ */
+static inline void fw_send_request_with_tstamp(struct fw_card *card, struct fw_transaction *t,
+ int tcode, int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, fw_transaction_callback_with_tstamp_t callback,
+ void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .with_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, true, callback_data);
+}
+
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction);
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
@@ -398,7 +463,7 @@ struct fw_iso_packet {
u32 tag:2; /* tx: Tag in packet header */
u32 sy:4; /* tx: Sy in packet header */
u32 header_length:8; /* Length of immediate header */
- u32 header[0]; /* tx: Top of 1394 isoch. data_block */
+ u32 header[]; /* tx: Top of 1394 isoch. data_block */
};
#define FW_ISO_CONTEXT_TRANSMIT 0
@@ -436,6 +501,12 @@ typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
void *header, void *data);
typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
dma_addr_t completed, void *data);
+
+union fw_iso_callback {
+ fw_iso_callback_t sc;
+ fw_iso_mc_callback_t mc;
+};
+
struct fw_iso_context {
struct fw_card *card;
int type;
@@ -443,10 +514,7 @@ struct fw_iso_context {
int speed;
bool drop_overflow_headers;
size_t header_size;
- union {
- fw_iso_callback_t sc;
- fw_iso_mc_callback_t mc;
- } callback;
+ union fw_iso_callback callback;
void *callback_data;
};
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index cb3e2c06ed8a..f026f8926d79 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -4,10 +4,11 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/gfp.h>
-#define FW_ACTION_NOHOTPLUG 0
-#define FW_ACTION_HOTPLUG 1
+#define FW_ACTION_NOUEVENT 0
+#define FW_ACTION_UEVENT 1
struct firmware {
size_t size;
@@ -17,28 +18,84 @@ struct firmware {
void *priv;
};
-struct module;
-struct device;
+/**
+ * enum fw_upload_err - firmware upload error codes
+ * @FW_UPLOAD_ERR_NONE: returned to indicate success
+ * @FW_UPLOAD_ERR_HW_ERROR: error signalled by hardware, see kernel log
+ * @FW_UPLOAD_ERR_TIMEOUT: SW timed out on handshake with HW/firmware
+ * @FW_UPLOAD_ERR_CANCELED: upload was cancelled by the user
+ * @FW_UPLOAD_ERR_BUSY: there is an upload operation already in progress
+ * @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size
+ * @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log
+ * @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry
+ * @FW_UPLOAD_ERR_FW_INVALID: invalid firmware file
+ * @FW_UPLOAD_ERR_MAX: Maximum error code marker
+ */
+enum fw_upload_err {
+ FW_UPLOAD_ERR_NONE,
+ FW_UPLOAD_ERR_HW_ERROR,
+ FW_UPLOAD_ERR_TIMEOUT,
+ FW_UPLOAD_ERR_CANCELED,
+ FW_UPLOAD_ERR_BUSY,
+ FW_UPLOAD_ERR_INVALID_SIZE,
+ FW_UPLOAD_ERR_RW_ERROR,
+ FW_UPLOAD_ERR_WEAROUT,
+ FW_UPLOAD_ERR_FW_INVALID,
+ FW_UPLOAD_ERR_MAX
+};
-struct builtin_fw {
- char *name;
- void *data;
- unsigned long size;
+struct fw_upload {
+ void *dd_handle; /* reference to parent driver */
+ void *priv; /* firmware loader private fields */
};
-/* We have to play tricks here much like stringify() to get the
- __COUNTER__ macro to be expanded as we want it */
-#define __fw_concat1(x, y) x##y
-#define __fw_concat(x, y) __fw_concat1(x, y)
+/**
+ * struct fw_upload_ops - device specific operations to support firmware upload
+ * @prepare: Required: Prepare secure update
+ * @write: Required: The write() op receives the remaining
+ * size to be written and must return the actual
+ * size written or a negative error code. The write()
+ * op will be called repeatedly until all data is
+ * written.
+ * @poll_complete: Required: Check for the completion of the
+ * HW authentication/programming process.
+ * @cancel: Required: Request cancellation of update. This op
+ * is called from the context of a different kernel
+ * thread, so race conditions need to be considered.
+ * @cleanup: Optional: Complements the prepare()
+ * function and is called at the completion
+ * of the update, on success or failure, if the
+ * prepare function succeeded.
+ */
+struct fw_upload_ops {
+ enum fw_upload_err (*prepare)(struct fw_upload *fw_upload,
+ const u8 *data, u32 size);
+ enum fw_upload_err (*write)(struct fw_upload *fw_upload,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written);
+ enum fw_upload_err (*poll_complete)(struct fw_upload *fw_upload);
+ void (*cancel)(struct fw_upload *fw_upload);
+ void (*cleanup)(struct fw_upload *fw_upload);
+};
-#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
- DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
+struct module;
+struct device;
-#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
- static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
- __used __section(.builtin_fw) = { name, blob, size }
+/*
+ * Built-in firmware functionality is only available if FW_LOADER=y, but not
+ * FW_LOADER=m
+ */
+#ifdef CONFIG_FW_LOADER
+bool firmware_request_builtin(struct firmware *fw, const char *name);
+#else
+static inline bool firmware_request_builtin(struct firmware *fw,
+ const char *name)
+{
+ return false;
+}
+#endif
-#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
+#if IS_REACHABLE(CONFIG_FW_LOADER)
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
int firmware_request_nowarn(const struct firmware **fw, const char *name,
@@ -53,6 +110,9 @@ int request_firmware_direct(const struct firmware **fw, const char *name,
struct device *device);
int request_firmware_into_buf(const struct firmware **firmware_p,
const char *name, struct device *device, void *buf, size_t size);
+int request_partial_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device,
+ void *buf, size_t size, size_t offset);
void release_firmware(const struct firmware *fw);
#else
@@ -102,8 +162,43 @@ static inline int request_firmware_into_buf(const struct firmware **firmware_p,
return -EINVAL;
}
+static inline int request_partial_firmware_into_buf
+ (const struct firmware **firmware_p,
+ const char *name,
+ struct device *device,
+ void *buf, size_t size, size_t offset)
+{
+ return -EINVAL;
+}
+
+#endif
+
+#ifdef CONFIG_FW_UPLOAD
+
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle);
+void firmware_upload_unregister(struct fw_upload *fw_upload);
+
+#else
+
+static inline struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+}
+
#endif
int firmware_request_cache(struct device *device, const char *name);
+DEFINE_FREE(firmware, struct firmware *, release_firmware(_T))
+
#endif
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
new file mode 100644
index 000000000000..23384a54d575
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -0,0 +1,328 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * cs_dsp.h -- Cirrus Logic DSP firmware support
+ *
+ * Based on sound/soc/codecs/wm_adsp.h
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ * Copyright (C) 2015-2021 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+#ifndef __CS_DSP_H
+#define __CS_DSP_H
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+
+#define CS_ADSP2_REGION_0 BIT(0)
+#define CS_ADSP2_REGION_1 BIT(1)
+#define CS_ADSP2_REGION_2 BIT(2)
+#define CS_ADSP2_REGION_3 BIT(3)
+#define CS_ADSP2_REGION_4 BIT(4)
+#define CS_ADSP2_REGION_5 BIT(5)
+#define CS_ADSP2_REGION_6 BIT(6)
+#define CS_ADSP2_REGION_7 BIT(7)
+#define CS_ADSP2_REGION_8 BIT(8)
+#define CS_ADSP2_REGION_9 BIT(9)
+#define CS_ADSP2_REGION_1_9 (CS_ADSP2_REGION_1 | \
+ CS_ADSP2_REGION_2 | CS_ADSP2_REGION_3 | \
+ CS_ADSP2_REGION_4 | CS_ADSP2_REGION_5 | \
+ CS_ADSP2_REGION_6 | CS_ADSP2_REGION_7 | \
+ CS_ADSP2_REGION_8 | CS_ADSP2_REGION_9)
+#define CS_ADSP2_REGION_ALL (CS_ADSP2_REGION_0 | CS_ADSP2_REGION_1_9)
+
+#define CS_DSP_DATA_WORD_SIZE 3
+#define CS_DSP_DATA_WORD_BITS (3 * BITS_PER_BYTE)
+
+#define CS_DSP_ACKED_CTL_TIMEOUT_MS 100
+#define CS_DSP_ACKED_CTL_N_QUICKPOLLS 10
+#define CS_DSP_ACKED_CTL_MIN_VALUE 0
+#define CS_DSP_ACKED_CTL_MAX_VALUE 0xFFFFFF
+
+/**
+ * struct cs_dsp_region - Describes a logical memory region in DSP address space
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_region {
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_alg_region - Describes a logical algorithm region in DSP address space
+ * @list: List node for internal use
+ * @alg: Algorithm id
+ * @ver: Expected algorithm version
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_alg_region {
+ struct list_head list;
+ unsigned int alg;
+ unsigned int ver;
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_coeff_ctl - Describes a coefficient control
+ * @list: List node for internal use
+ * @dsp: DSP instance associated with this control
+ * @cache: Cached value of the control
+ * @fw_name: Name of the firmware
+ * @subname: Name of the control parsed from the WMFW
+ * @subname_len: Length of subname
+ * @offset: Offset of control within alg_region in words
+ * @len: Length of the cached value in bytes
+ * @type: One of the WMFW_CTL_TYPE_ control types defined in wmfw.h
+ * @flags: Bitfield of WMFW_CTL_FLAG_ control flags defined in wmfw.h
+ * @set: Flag indicating the value has been written by the user
+ * @enabled: Flag indicating whether control is enabled
+ * @alg_region: Logical region associated with this control
+ * @priv: For use by the client
+ */
+struct cs_dsp_coeff_ctl {
+ struct list_head list;
+ struct cs_dsp *dsp;
+ void *cache;
+ const char *fw_name;
+ /* Subname is needed to match with firmware */
+ const char *subname;
+ unsigned int subname_len;
+ unsigned int offset;
+ size_t len;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int set:1;
+ unsigned int enabled:1;
+ struct cs_dsp_alg_region alg_region;
+
+ void *priv;
+};
+
+struct cs_dsp_ops;
+struct cs_dsp_client_ops;
+
+/**
+ * struct cs_dsp - Configuration and state of a Cirrus Logic DSP
+ * @name: The name of the DSP instance
+ * @rev: Revision of the DSP
+ * @num: DSP instance number
+ * @type: Type of DSP
+ * @dev: Driver model representation of the device
+ * @regmap: Register map of the device
+ * @ops: Function pointers for internal callbacks
+ * @client_ops: Function pointers for client callbacks
+ * @base: Address of the DSP registers
+ * @base_sysinfo: Address of the sysinfo register (Halo only)
+ * @sysclk_reg: Address of the sysclk register (ADSP1 only)
+ * @sysclk_mask: Mask of frequency bits within sysclk register (ADSP1 only)
+ * @sysclk_shift: Shift of frequency bits within sysclk register (ADSP1 only)
+ * @alg_regions: List of currently loaded algorithm regions
+ * @fw_name: Name of the current firmware
+ * @fw_id: ID of the current firmware, obtained from the wmfw
+ * @fw_id_version: Version of the firmware, obtained from the wmfw
+ * @fw_vendor_id: Vendor of the firmware, obtained from the wmfw
+ * @mem: DSP memory region descriptions
+ * @num_mems: Number of memory regions in this DSP
+ * @fw_ver: Version of the wmfw file format
+ * @booted: Flag indicating DSP has been configured
+ * @running: Flag indicating DSP is executing firmware
+ * @ctl_list: Controls defined within the loaded DSP firmware
+ * @lock_regions: Enable MPU traps on specified memory regions
+ * @pwr_lock: Lock used to serialize accesses
+ * @debugfs_root: Debugfs directory for this DSP instance
+ * @wmfw_file_name: Filename of the currently loaded firmware
+ * @bin_file_name: Filename of the currently loaded coefficients
+ */
+struct cs_dsp {
+ const char *name;
+ int rev;
+ int num;
+ int type;
+ struct device *dev;
+ struct regmap *regmap;
+
+ const struct cs_dsp_ops *ops;
+ const struct cs_dsp_client_ops *client_ops;
+
+ unsigned int base;
+ unsigned int base_sysinfo;
+ unsigned int sysclk_reg;
+ unsigned int sysclk_mask;
+ unsigned int sysclk_shift;
+ bool no_core_startstop;
+
+ struct list_head alg_regions;
+
+ const char *fw_name;
+ unsigned int fw_id;
+ unsigned int fw_id_version;
+ unsigned int fw_vendor_id;
+
+ const struct cs_dsp_region *mem;
+ int num_mems;
+
+ int fw_ver;
+
+ bool booted;
+ bool running;
+
+ struct list_head ctl_list;
+
+ struct mutex pwr_lock;
+
+ unsigned int lock_regions;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+ char *wmfw_file_name;
+ char *bin_file_name;
+#endif
+};
+
+/**
+ * struct cs_dsp_client_ops - client callbacks
+ * @control_add: Called under the pwr_lock when a control is created
+ * @control_remove: Called under the pwr_lock when a control is destroyed
+ * @pre_run: Called under the pwr_lock by cs_dsp_run() before the core is started
+ * @post_run: Called under the pwr_lock by cs_dsp_run() after the core is started
+ * @pre_stop: Called under the pwr_lock by cs_dsp_stop() before the core is stopped
+ * @post_stop: Called under the pwr_lock by cs_dsp_stop() after the core is stopped
+ * @watchdog_expired: Called when a watchdog expiry is detected
+ *
+ * These callbacks give the cs_dsp client an opportunity to respond to events
+ * or to perform actions atomically.
+ */
+struct cs_dsp_client_ops {
+ int (*control_add)(struct cs_dsp_coeff_ctl *ctl);
+ void (*control_remove)(struct cs_dsp_coeff_ctl *ctl);
+ int (*pre_run)(struct cs_dsp *dsp);
+ int (*post_run)(struct cs_dsp *dsp);
+ void (*pre_stop)(struct cs_dsp *dsp);
+ void (*post_stop)(struct cs_dsp *dsp);
+ void (*watchdog_expired)(struct cs_dsp *dsp);
+};
+
+int cs_dsp_adsp1_init(struct cs_dsp *dsp);
+int cs_dsp_adsp2_init(struct cs_dsp *dsp);
+int cs_dsp_halo_init(struct cs_dsp *dsp);
+
+int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_adsp1_power_down(struct cs_dsp *dsp);
+int cs_dsp_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_power_down(struct cs_dsp *dsp);
+int cs_dsp_run(struct cs_dsp *dsp);
+void cs_dsp_stop(struct cs_dsp *dsp);
+
+void cs_dsp_remove(struct cs_dsp *dsp);
+
+int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq);
+void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp);
+
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root);
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp);
+
+int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id);
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ const void *buf, size_t len);
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ void *buf, size_t len);
+struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg);
+
+int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr,
+ unsigned int num_words, __be32 *data);
+int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data);
+int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data);
+void cs_dsp_remove_padding(u32 *buf, int nwords);
+
+struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
+ int type, unsigned int id);
+
+const char *cs_dsp_mem_region_name(unsigned int type);
+
+/**
+ * struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP
+ * @data: Pointer to underlying buffer memory
+ * @max: Pointer to end of the buffer memory
+ * @bytes: Number of bytes read/written into the memory chunk
+ * @cache: Temporary holding data as it is formatted
+ * @cachebits: Number of bits of data currently in cache
+ */
+struct cs_dsp_chunk {
+ u8 *data;
+ u8 *max;
+ int bytes;
+
+ u32 cache;
+ int cachebits;
+};
+
+/**
+ * cs_dsp_chunk() - Create a DSP memory chunk
+ * @data: Pointer to the buffer that will be used to store data
+ * @size: Size of the buffer in bytes
+ *
+ * Return: A cs_dsp_chunk structure
+ */
+static inline struct cs_dsp_chunk cs_dsp_chunk(void *data, int size)
+{
+ struct cs_dsp_chunk ch = {
+ .data = data,
+ .max = data + size,
+ };
+
+ return ch;
+}
+
+/**
+ * cs_dsp_chunk_end() - Check if a DSP memory chunk is full
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the whole buffer has been read/written
+ */
+static inline bool cs_dsp_chunk_end(struct cs_dsp_chunk *ch)
+{
+ return ch->data == ch->max;
+}
+
+/**
+ * cs_dsp_chunk_bytes() - Number of bytes written/read from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: Number of bytes read/written to the buffer
+ */
+static inline int cs_dsp_chunk_bytes(struct cs_dsp_chunk *ch)
+{
+ return ch->bytes;
+}
+
+/**
+ * cs_dsp_chunk_valid_addr() - Check if an address is in a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the given address is within the buffer
+ */
+static inline bool cs_dsp_chunk_valid_addr(struct cs_dsp_chunk *ch, void *addr)
+{
+ return (u8 *)addr >= ch->data && (u8 *)addr < ch->max;
+}
+
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val);
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch);
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits);
+
+#endif
diff --git a/include/linux/firmware/cirrus/wmfw.h b/include/linux/firmware/cirrus/wmfw.h
new file mode 100644
index 000000000000..74e5a4f6c13a
--- /dev/null
+++ b/include/linux/firmware/cirrus/wmfw.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * wmfw.h - Wolfson firmware format information
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ */
+
+#ifndef __WMFW_H
+#define __WMFW_H
+
+#include <linux/types.h>
+
+#define WMFW_MAX_ALG_NAME 256
+#define WMFW_MAX_ALG_DESCR_NAME 256
+
+#define WMFW_MAX_COEFF_NAME 256
+#define WMFW_MAX_COEFF_DESCR_NAME 256
+
+#define WMFW_CTL_FLAG_SYS 0x8000
+#define WMFW_CTL_FLAG_VOLATILE 0x0004
+#define WMFW_CTL_FLAG_WRITEABLE 0x0002
+#define WMFW_CTL_FLAG_READABLE 0x0001
+
+#define WMFW_CTL_TYPE_BYTES 0x0004 /* byte control */
+
+/* Non-ALSA coefficient types start at 0x1000 */
+#define WMFW_CTL_TYPE_ACKED 0x1000 /* acked control */
+#define WMFW_CTL_TYPE_HOSTEVENT 0x1001 /* event control */
+#define WMFW_CTL_TYPE_HOST_BUFFER 0x1002 /* host buffer pointer */
+#define WMFW_CTL_TYPE_FWEVENT 0x1004 /* firmware event control */
+
+struct wmfw_header {
+ char magic[4];
+ __le32 len;
+ __le16 rev;
+ u8 core;
+ u8 ver;
+} __packed;
+
+struct wmfw_footer {
+ __le64 timestamp;
+ __le32 checksum;
+} __packed;
+
+struct wmfw_adsp1_sizes {
+ __le32 dm;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_adsp2_sizes {
+ __le32 xm;
+ __le32 ym;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_region {
+ union {
+ __be32 type;
+ __le32 offset;
+ };
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmfw_id_hdr {
+ __be32 core_id;
+ __be32 core_rev;
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_v3_id_hdr {
+ __be32 core_id;
+ __be32 block_rev;
+ __be32 vendor_id;
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 dm;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_adsp2_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_halo_id_hdr {
+ struct wmfw_v3_id_hdr fw;
+ __be32 xm_base;
+ __be32 xm_size;
+ __be32 ym_base;
+ __be32 ym_size;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_alg_hdr {
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 dm;
+} __packed;
+
+struct wmfw_adsp2_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+} __packed;
+
+struct wmfw_halo_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 xm_base;
+ __be32 xm_size;
+ __be32 ym_base;
+ __be32 ym_size;
+} __packed;
+
+struct wmfw_adsp_alg_data {
+ __le32 id;
+ u8 name[WMFW_MAX_ALG_NAME];
+ u8 descr[WMFW_MAX_ALG_DESCR_NAME];
+ __le32 ncoeff;
+ u8 data[];
+} __packed;
+
+struct wmfw_adsp_coeff_data {
+ struct {
+ __le16 offset;
+ __le16 type;
+ __le32 size;
+ } hdr;
+ u8 name[WMFW_MAX_COEFF_NAME];
+ u8 descr[WMFW_MAX_COEFF_DESCR_NAME];
+ __le16 ctl_type;
+ __le16 flags;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmfw_coeff_hdr {
+ u8 magic[4];
+ __le32 len;
+ union {
+ __be32 rev;
+ __le32 ver;
+ };
+ union {
+ __be32 core;
+ __le32 core_ver;
+ };
+ u8 data[];
+} __packed;
+
+struct wmfw_coeff_item {
+ __le16 offset;
+ __le16 type;
+ __le32 id;
+ __le32 ver;
+ __le32 sr;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+#define WMFW_ADSP1 1
+#define WMFW_ADSP2 2
+#define WMFW_HALO 4
+
+#define WMFW_ABSOLUTE 0xf0
+#define WMFW_ALGORITHM_DATA 0xf2
+#define WMFW_METADATA 0xfc
+#define WMFW_NAME_TEXT 0xfe
+#define WMFW_INFO_TEXT 0xff
+
+#define WMFW_ADSP1_PM 2
+#define WMFW_ADSP1_DM 3
+#define WMFW_ADSP1_ZM 4
+
+#define WMFW_ADSP2_PM 2
+#define WMFW_ADSP2_ZM 4
+#define WMFW_ADSP2_XM 5
+#define WMFW_ADSP2_YM 6
+
+#define WMFW_HALO_PM_PACKED 0x10
+#define WMFW_HALO_XM_PACKED 0x11
+#define WMFW_HALO_YM_PACKED 0x12
+
+#endif
diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h
index 7562099c9e46..1f176a2683fe 100644
--- a/include/linux/firmware/imx/dsp.h
+++ b/include/linux/firmware/imx/dsp.h
@@ -37,17 +37,11 @@ struct imx_dsp_ipc {
static inline void imx_dsp_set_data(struct imx_dsp_ipc *ipc, void *data)
{
- if (!ipc)
- return;
-
ipc->private_data = data;
}
static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc)
{
- if (!ipc)
- return NULL;
-
return ipc->private_data;
}
@@ -55,6 +49,9 @@ static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc)
int imx_dsp_ring_doorbell(struct imx_dsp_ipc *dsp, unsigned int chan_idx);
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx);
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx);
+
#else
static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc,
@@ -63,5 +60,12 @@ static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc,
return -ENOTSUPP;
}
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx) { }
+
#endif
#endif /* _IMX_DSP_IPC_H */
diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h
index 891057434858..0b4643571625 100644
--- a/include/linux/firmware/imx/ipc.h
+++ b/include/linux/firmware/imx/ipc.h
@@ -34,6 +34,7 @@ struct imx_sc_rpc_msg {
uint8_t func;
};
+#ifdef CONFIG_IMX_SCU
/*
* This is an function to send an RPC message over an IPC channel.
* It is called by client-side SCFW API function shims.
@@ -55,4 +56,16 @@ int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
* @return Returns an error code (0 = success, failed if < 0)
*/
int imx_scu_get_handle(struct imx_sc_ipc **ipc);
+#else
+static inline int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg,
+ bool have_resp)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_get_handle(struct imx_sc_ipc **ipc)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* _SC_IPC_H */
diff --git a/include/linux/firmware/imx/s4.h b/include/linux/firmware/imx/s4.h
new file mode 100644
index 000000000000..9e34923ae1d6
--- /dev/null
+++ b/include/linux/firmware/imx/s4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 NXP
+ *
+ * Header file for the IPC implementation.
+ */
+
+#ifndef _S4_IPC_H
+#define _S4_IPC_H
+
+struct imx_s4_ipc;
+
+struct imx_s4_rpc_msg {
+ uint8_t ver;
+ uint8_t size;
+ uint8_t cmd;
+ uint8_t tag;
+} __packed;
+
+#endif /* _S4_IPC_H */
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 22c76571a294..df17196df5ff 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -16,9 +16,42 @@
#include <linux/firmware/imx/svc/pm.h>
#include <linux/firmware/imx/svc/rm.h>
+#if IS_ENABLED(CONFIG_IMX_SCU)
int imx_scu_enable_general_irq_channel(struct device *dev);
int imx_scu_irq_register_notifier(struct notifier_block *nb);
int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_irq_get_status(u8 group, u32 *irq_status);
int imx_scu_soc_init(struct device *dev);
+#else
+static inline int imx_scu_soc_init(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_get_status(u8 group, u32 *irq_status)
+{
+ return -EOPNOTSUPP;
+}
+#endif
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h
index 031dd4d3c766..760db08a67fc 100644
--- a/include/linux/firmware/imx/svc/misc.h
+++ b/include/linux/firmware/imx/svc/misc.h
@@ -46,6 +46,7 @@ enum imx_misc_func {
* Control Functions
*/
+#ifdef CONFIG_IMX_SCU
int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 val);
@@ -54,5 +55,23 @@ int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
bool enable, u64 phys_addr);
+#else
+static inline int imx_sc_misc_set_control(struct imx_sc_ipc *ipc,
+ u32 resource, u8 ctrl, u32 val)
+{
+ return -ENOTSUPP;
+}
+static inline int imx_sc_misc_get_control(struct imx_sc_ipc *ipc,
+ u32 resource, u8 ctrl, u32 *val)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* _SC_MISC_API_H */
diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h
index 456b6a59d29b..31456f897aa9 100644
--- a/include/linux/firmware/imx/svc/rm.h
+++ b/include/linux/firmware/imx/svc/rm.h
@@ -59,11 +59,16 @@ enum imx_sc_rm_func {
#if IS_ENABLED(CONFIG_IMX_SCU)
bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource);
+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt);
#else
static inline bool
imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
{
return true;
}
+static inline int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index c3e5ab014caf..ee80ca4bb0d0 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -321,8 +321,6 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_ECC_DBE \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
-#endif
-
/**
* Request INTEL_SIP_SMC_RSU_NOTIFY
*
@@ -404,3 +402,222 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18
#define INTEL_SIP_SMC_RSU_MAX_RETRY \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_DCMF_STATUS
+ *
+ * Sync call used by service driver at EL1 to query DCMF status from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_DCMF_STATUS
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 dcmf3 | dcmf2 | dcmf1 | dcmf0
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS 20
+#define INTEL_SIP_SMC_RSU_DCMF_STATUS \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_SERVICE_COMPLETED
+ * Sync call to check if the secure world have completed service request
+ * or not.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_SERVICE_COMPLETED
+ * a1: this register is optional. If used, it is the physical address for
+ * secure firmware to put output data
+ * a2: this register is optional. If used, it is the size of output data
+ * a3-a7: not used
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR,
+ * INTEL_SIP_SMC_REJECTED or INTEL_SIP_SMC_STATUS_BUSY
+ * a1: mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR
+ * a2: physical address containing the process info
+ * for FCS certificate -- the data contains the certificate status
+ * for FCS cryption -- the data contains the actual data size FW processes
+ * a3: output data size
+ */
+#define INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED 30
+#define INTEL_SIP_SMC_SERVICE_COMPLETED \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED)
+
+/**
+ * Request INTEL_SIP_SMC_FIRMWARE_VERSION
+ *
+ * Sync call used to query the version of running firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FIRMWARE_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR
+ * a1 running firmware version
+ */
+#define INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION 31
+#define INTEL_SIP_SMC_FIRMWARE_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
+
+/**
+ * SMC call protocol for Mailbox, starting FUNCID from 60
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_MBOX_SEND_CMD
+ * a1 mailbox command code
+ * a2 physical address that contain mailbox command data (not include header)
+ * a3 mailbox command data size in word
+ * a4 set to 0 for CASUAL, set to 1 for URGENT
+ * a5 physical address for secure firmware to put response data
+ * (not include header)
+ * a6 maximum size in word of physical address to store response data
+ * a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_REJECTED or
+ * INTEL_SIP_SMC_STATUS_ERROR
+ * a1 mailbox error code
+ * a2 response data length in word
+ * a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD 60
+ #define INTEL_SIP_SMC_MBOX_SEND_CMD \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD)
+
+/**
+ * Request INTEL_SIP_SMC_SVC_VERSION
+ *
+ * Sync call used to query the SIP SMC API Version
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_SVC_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Major
+ * a2 Minor
+ */
+#define INTEL_SIP_SMC_SVC_FUNCID_VERSION 512
+#define INTEL_SIP_SMC_SVC_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_SVC_FUNCID_VERSION)
+
+/**
+ * SMC call protocol for FPGA Crypto Service (FCS)
+ * FUNCID starts from 90
+ */
+
+/**
+ * Request INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ *
+ * Sync call used to query the random number generated by the firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ * a1 the physical address for firmware to write generated random data
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 the physical address of generated random number
+ * a3 size
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER 90
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_CRYPTION
+ * Async call for data encryption and HMAC signature generation, or for
+ * data decryption and HMAC verification.
+ *
+ * Call INTEL_SIP_SMC_SERVICE_COMPLETED to get the output encrypted or
+ * decrypted data
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_CRYPTION
+ * a1 cryption mode (1 for encryption and 0 for decryption)
+ * a2 physical address which stores to be encrypted or decrypted data
+ * a3 input data size
+ * a4 physical address which will hold the encrypted or decrypted output data
+ * a5 output data size
+ * a6-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or
+ * INTEL_SIP_SMC_STATUS_REJECTED
+ * a1-3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_CRYPTION 91
+#define INTEL_SIP_SMC_FCS_CRYPTION \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CRYPTION)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * Async call for authentication service of HPS software
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * a1 the physical address of data block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_ERROR or
+ * INTEL_SIP_SMC_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST 92
+#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST)
+
+/**
+ * Request INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE
+ * Sync call to send a signed certificate
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SEND_CERTIFICATE
+ * a1 the physical address of CERTIFICATE block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FCS_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE 93
+#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * Sync call to dump all the fuses and key hashes
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * a1 the physical address for firmware to write structure of fuse and
+ * key hashes
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 physical address for the structure of fuse and key hashes
+ * a3 the size of structure
+ *
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA 94
+#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA)
+
+#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index a93d85932eb9..60ed82112680 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -6,7 +6,7 @@
#ifndef __STRATIX10_SVC_CLIENT_H
#define __STRATIX10_SVC_CLIENT_H
-/**
+/*
* Service layer driver supports client names
*
* fpga: for FPGA configuration
@@ -14,8 +14,9 @@
*/
#define SVC_CLIENT_FPGA "fpga"
#define SVC_CLIENT_RSU "rsu"
+#define SVC_CLIENT_FCS "fcs"
-/**
+/*
* Status of the sent command, in bit number
*
* SVC_STATUS_OK:
@@ -49,16 +50,17 @@
#define SVC_STATUS_BUSY 4
#define SVC_STATUS_ERROR 5
#define SVC_STATUS_NO_SUPPORT 6
+#define SVC_STATUS_INVALID_PARAM 7
-/**
+/*
* Flag bit for COMMAND_RECONFIG
*
* COMMAND_RECONFIG_FLAG_PARTIAL:
* Set to FPGA configuration type (full or partial).
*/
-#define COMMAND_RECONFIG_FLAG_PARTIAL 1
+#define COMMAND_RECONFIG_FLAG_PARTIAL 0
-/**
+/*
* Timeout settings for service clients:
* timeout value used in Stratix10 FPGA manager driver.
* timeout value used in RSU driver
@@ -66,6 +68,8 @@
#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300
#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720
#define SVC_RSU_REQUEST_TIMEOUT_MS 300
+#define SVC_FCS_REQUEST_TIMEOUT_MS 2000
+#define SVC_COMPLETED_TIMEOUT_MS 30000
struct stratix10_svc_chan;
@@ -104,31 +108,85 @@ struct stratix10_svc_chan;
*
* @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status
* is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_POLL_SERVICE_STATUS: poll if the service request is complete,
+ * return statis is SVC_STATUS_OK, SVC_STATUS_ERROR or SVC_STATUS_BUSY
+ *
+ * @COMMAND_FIRMWARE_VERSION: query running firmware version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version,
+ * return status is SVC_STATUS_OK
+ *
+ * @COMMAND_MBOX_SEND_CMD: send generic mailbox command, return status is
+ * SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_REQUEST_SERVICE: request validation of image from firmware,
+ * return status is SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM
+ *
+ * @COMMAND_FCS_SEND_CERTIFICATE: send a certificate, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_GET_PROVISION_DATA: read the provisioning data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_ENCRYPTION: encrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_DECRYPTION: decrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_RANDOM_NUMBER_GEN: generate a random number, return status
+ * is SVC_STATUS_OK, SVC_STATUS_ERROR
*/
enum stratix10_svc_command_code {
+ /* for FPGA */
COMMAND_NOOP = 0,
COMMAND_RECONFIG,
COMMAND_RECONFIG_DATA_SUBMIT,
COMMAND_RECONFIG_DATA_CLAIM,
COMMAND_RECONFIG_STATUS,
- COMMAND_RSU_STATUS,
+ /* for RSU */
+ COMMAND_RSU_STATUS = 10,
COMMAND_RSU_UPDATE,
COMMAND_RSU_NOTIFY,
COMMAND_RSU_RETRY,
COMMAND_RSU_MAX_RETRY,
COMMAND_RSU_DCMF_VERSION,
+ COMMAND_RSU_DCMF_STATUS,
+ COMMAND_FIRMWARE_VERSION,
+ /* for FCS */
+ COMMAND_FCS_REQUEST_SERVICE = 20,
+ COMMAND_FCS_SEND_CERTIFICATE,
+ COMMAND_FCS_GET_PROVISION_DATA,
+ COMMAND_FCS_DATA_ENCRYPTION,
+ COMMAND_FCS_DATA_DECRYPTION,
+ COMMAND_FCS_RANDOM_NUMBER_GEN,
+ /* for general status poll */
+ COMMAND_POLL_SERVICE_STATUS = 40,
+ /* for generic mailbox send command */
+ COMMAND_MBOX_SEND_CMD = 100,
+ /* Non-mailbox SMC Call */
+ COMMAND_SMC_SVC_VERSION = 200,
};
/**
* struct stratix10_svc_client_msg - message sent by client to service
* @payload: starting address of data need be processed
- * @payload_length: data size in bytes
+ * @payload_length: to be processed data size in bytes
+ * @payload_output: starting address of processed data
+ * @payload_length_output: processed data size in bytes
* @command: service command
* @arg: args to be passed via registers and not physically mapped buffers
*/
struct stratix10_svc_client_msg {
void *payload;
size_t payload_length;
+ void *payload_output;
+ size_t payload_length_output;
enum stratix10_svc_command_code command;
u64 arg[3];
};
@@ -218,7 +276,7 @@ void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr);
int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg);
/**
- * intel_svc_done() - complete service request
+ * stratix10_svc_done() - complete service request
* @chan: service channel assigned to the client
*
* This function is used by service client to inform service layer that
diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
new file mode 100644
index 000000000000..5b1d16fa3f56
--- /dev/null
+++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef MTK_ADSP_IPC_H
+#define MTK_ADSP_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#define MTK_ADSP_IPC_REQ 0
+#define MTK_ADSP_IPC_RSP 1
+#define MTK_ADSP_IPC_OP_REQ 0x1
+#define MTK_ADSP_IPC_OP_RSP 0x2
+
+enum {
+ MTK_ADSP_MBOX_REPLY,
+ MTK_ADSP_MBOX_REQUEST,
+ MTK_ADSP_MBOX_NUM,
+};
+
+struct mtk_adsp_ipc;
+
+struct mtk_adsp_ipc_ops {
+ void (*handle_reply)(struct mtk_adsp_ipc *ipc);
+ void (*handle_request)(struct mtk_adsp_ipc *ipc);
+};
+
+struct mtk_adsp_chan {
+ struct mtk_adsp_ipc *ipc;
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ char *name;
+ int idx;
+};
+
+struct mtk_adsp_ipc {
+ struct mtk_adsp_chan chans[MTK_ADSP_MBOX_NUM];
+ struct device *dev;
+ struct mtk_adsp_ipc_ops *ops;
+ void *private_data;
+};
+
+static inline void mtk_adsp_ipc_set_data(struct mtk_adsp_ipc *ipc, void *data)
+{
+ ipc->private_data = data;
+}
+
+static inline void *mtk_adsp_ipc_get_data(struct mtk_adsp_ipc *ipc)
+{
+ return ipc->private_data;
+}
+
+int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t op);
+
+#endif /* MTK_ADSP_IPC_H */
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 95b0da2326a9..8eaf8922ab02 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -19,7 +19,7 @@ enum {
struct meson_sm_firmware;
int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
- u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+ s32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
unsigned int b_size, unsigned int cmd_index, u32 arg0,
u32 arg1, u32 arg2, u32 arg3, u32 arg4);
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
new file mode 100644
index 000000000000..5c28298a98be
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Qualcomm Secure Execution Environment (SEE) interface (QSEECOM).
+ * Responsible for setting up and managing QSEECOM client devices.
+ *
+ * Copyright (C) 2023 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef __QCOM_QSEECOM_H
+#define __QCOM_QSEECOM_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/types.h>
+
+#include <linux/firmware/qcom/qcom_scm.h>
+
+/**
+ * struct qseecom_client - QSEECOM client device.
+ * @aux_dev: Underlying auxiliary device.
+ * @app_id: ID of the loaded application.
+ */
+struct qseecom_client {
+ struct auxiliary_device aux_dev;
+ u32 app_id;
+};
+
+/**
+ * qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
+ * @client: The QSEECOM client associated with the target app.
+ * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req_size: Size of the request buffer.
+ * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp_size: Size of the response buffer.
+ *
+ * Sends a request to the QSEE app associated with the given client and read
+ * back its response. The caller must provide two DMA memory regions, one for
+ * the request and one for the response, and fill out the @req region with the
+ * respective (app-specific) request data. The QSEE app reads this and returns
+ * its response in the @rsp region.
+ *
+ * Note: This is a convenience wrapper around qcom_scm_qseecom_app_send().
+ * Clients should prefer to use this wrapper.
+ *
+ * Return: Zero on success, nonzero on failure.
+ */
+static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
+{
+ return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
+}
+
+#endif /* __QCOM_QSEECOM_H */
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
new file mode 100644
index 000000000000..ccaf28846054
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+#ifndef __QCOM_SCM_H
+#define __QCOM_SCM_H
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/cpumask.h>
+
+#include <dt-bindings/firmware/qcom,scm.h>
+
+#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
+#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
+#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
+
+struct qcom_scm_hdcp_req {
+ u32 addr;
+ u32 val;
+};
+
+struct qcom_scm_vmperm {
+ int vmid;
+ int perm;
+};
+
+enum qcom_scm_ocmem_client {
+ QCOM_SCM_OCMEM_UNUSED_ID = 0x0,
+ QCOM_SCM_OCMEM_GRAPHICS_ID,
+ QCOM_SCM_OCMEM_VIDEO_ID,
+ QCOM_SCM_OCMEM_LP_AUDIO_ID,
+ QCOM_SCM_OCMEM_SENSORS_ID,
+ QCOM_SCM_OCMEM_OTHER_OS_ID,
+ QCOM_SCM_OCMEM_DEBUG_ID,
+};
+
+enum qcom_scm_sec_dev_id {
+ QCOM_SCM_MDSS_DEV_ID = 1,
+ QCOM_SCM_OCMEM_DEV_ID = 5,
+ QCOM_SCM_PCIE0_DEV_ID = 11,
+ QCOM_SCM_PCIE1_DEV_ID = 12,
+ QCOM_SCM_GFX_DEV_ID = 18,
+ QCOM_SCM_UFS_DEV_ID = 19,
+ QCOM_SCM_ICE_DEV_ID = 20,
+};
+
+enum qcom_scm_ice_cipher {
+ QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0,
+ QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3,
+ QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
+};
+
+#define QCOM_SCM_PERM_READ 0x4
+#define QCOM_SCM_PERM_WRITE 0x2
+#define QCOM_SCM_PERM_EXEC 0x1
+#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
+#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
+
+bool qcom_scm_is_available(void);
+
+int qcom_scm_set_cold_boot_addr(void *entry);
+int qcom_scm_set_warm_boot_addr(void *entry);
+void qcom_scm_cpu_power_down(u32 flags);
+int qcom_scm_set_remote_state(u32 state, u32 id);
+
+struct qcom_scm_pas_metadata {
+ void *ptr;
+ dma_addr_t phys;
+ ssize_t size;
+};
+
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+ struct qcom_scm_pas_metadata *ctx);
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+int qcom_scm_pas_auth_and_reset(u32 peripheral);
+int qcom_scm_pas_shutdown(u32 peripheral);
+bool qcom_scm_pas_supported(u32 peripheral);
+
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+
+bool qcom_scm_restore_sec_cfg_available(void);
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
+int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start, u32 cp_nonpixel_size);
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, u64 *src,
+ const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt);
+
+bool qcom_scm_ocmem_lock_available(void);
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+ u32 mode);
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size);
+
+bool qcom_scm_ice_available(void);
+int qcom_scm_ice_invalidate_key(u32 index);
+int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher, u32 data_unit_size);
+
+bool qcom_scm_hdcp_available(void);
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+
+int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
+
+int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version);
+int qcom_scm_lmh_profile_change(u32 profile_id);
+bool qcom_scm_lmh_dcvsh_available(void);
+
+#ifdef CONFIG_QCOM_QSEECOM
+
+int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
+int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
+ size_t rsp_size);
+
+#else /* CONFIG_QCOM_QSEECOM */
+
+static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
+{
+ return -EINVAL;
+}
+
+static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req,
+ size_t req_size, void *rsp,
+ size_t rsp_size)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_QCOM_QSEECOM */
+
+#endif
diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h
index be5984bda592..931b6c5c72df 100644
--- a/include/linux/firmware/trusted_foundations.h
+++ b/include/linux/firmware/trusted_foundations.h
@@ -71,12 +71,16 @@ static inline void register_trusted_foundations(
static inline void of_register_trusted_foundations(void)
{
+ struct device_node *np = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+
+ if (!np)
+ return;
+ of_node_put(np);
/*
* If we find the target should enable TF but does not support it,
* fail as the system won't be able to do much anyway
*/
- if (of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations"))
- register_trusted_foundations(NULL);
+ register_trusted_foundations(NULL);
}
static inline bool trusted_foundations_registered(void)
diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h
new file mode 100644
index 000000000000..82e8254b0f80
--- /dev/null
+++ b/include/linux/firmware/xlnx-event-manager.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _FIRMWARE_XLNX_EVENT_MANAGER_H_
+#define _FIRMWARE_XLNX_EVENT_MANAGER_H_
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define CB_MAX_PAYLOAD_SIZE (4U) /*In payload maximum 32bytes */
+
+/************************** Exported Function *****************************/
+
+typedef void (*event_cb_func_t)(const u32 *payload, void *data);
+
+#if IS_REACHABLE(CONFIG_XLNX_EVENT_MANAGER)
+int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data);
+
+int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data);
+#else
+static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+
+static inline int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _FIRMWARE_XLNX_EVENT_MANAGER_H_ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 5968df82b991..1a069a56c961 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -2,9 +2,10 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2019 Xilinx
+ * Copyright (C) 2014-2021 Xilinx
+ * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
*
- * Michal Simek <michal.simek@xilinx.com>
+ * Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
* Rajan Vaja <rajanv@xilinx.com>
@@ -12,6 +13,9 @@
#ifndef __FIRMWARE_ZYNQMP_H__
#define __FIRMWARE_ZYNQMP_H__
+#include <linux/types.h>
+
+#include <linux/err.h>
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -27,6 +31,29 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
+
+/* PM API versions */
+#define PM_API_VERSION_1 1
+#define PM_API_VERSION_2 2
+
+#define PM_PINCTRL_PARAM_SET_VERSION 2
+
+#define ZYNQMP_FAMILY_CODE 0x23
+#define VERSAL_FAMILY_CODE 0x26
+
+/* When all subfamily of platform need to support */
+#define ALL_SUB_FAMILY_CODE 0x00
+#define VERSAL_SUB_FAMILY_CODE 0x01
+#define VERSALNET_SUB_FAMILY_CODE 0x03
+
+#define FAMILY_CODE_MASK GENMASK(27, 21)
+#define SUB_FAMILY_CODE_MASK GENMASK(20, 19)
+
+#define API_ID_MASK GENMASK(7, 0)
+#define MODULE_ID_MASK GENMASK(11, 8)
+
+/* ATF only commands */
+#define TF_A_PM_REGISTER_SGI 0xa04
#define PM_GET_TRUSTZONE_VERSION 0xa03
#define PM_SET_SUSPEND_MODE 0xa02
#define GET_CALLBACK_DATA 0xa01
@@ -50,9 +77,9 @@
#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
#define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
-/* Feature check status */
-#define PM_FEATURE_INVALID -1
-#define PM_FEATURE_UNCHECKED 0
+/* Loader commands */
+#define PM_LOAD_PDI 0x701
+#define PDI_SRC_DDR 0xF
/*
* Firmware FPGA Manager flags
@@ -62,242 +89,430 @@
#define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U
#define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0)
+/* FPGA Status Reg */
+#define XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET 7U
+#define XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG 0U
+
+/*
+ * Node IDs for the Error Events.
+ */
+#define VERSAL_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR1 (0x28108000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR3 (0x28108000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR1 (0x2810C000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR2 (0x28110000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR3 (0x28114000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR4 (0x28118000U)
+
+/* ZynqMP SD tap delay tuning */
+#define SD_ITAPDLY 0xFF180314
+#define SD_OTAPDLYSEL 0xFF180318
+
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_CR: Error event mask for DDRMC MC Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_CR BIT(18)
+
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_NCR: Error event mask for DDRMC MC Non-Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_NCR BIT(19)
+#define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13)
+#define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12)
+
+enum pm_module_id {
+ PM_MODULE_ID = 0x0,
+ XSEM_MODULE_ID = 0x3,
+ TF_A_MODULE_ID = 0xa,
+};
+
+enum pm_api_cb_id {
+ PM_INIT_SUSPEND_CB = 30,
+ PM_ACKNOWLEDGE_CB = 31,
+ PM_NOTIFY_CB = 32,
+};
+
enum pm_api_id {
+ PM_API_FEATURES = 0,
PM_GET_API_VERSION = 1,
+ PM_REGISTER_NOTIFIER = 5,
+ PM_FORCE_POWERDOWN = 8,
+ PM_REQUEST_WAKEUP = 10,
PM_SYSTEM_SHUTDOWN = 12,
PM_REQUEST_NODE = 13,
- PM_RELEASE_NODE,
- PM_SET_REQUIREMENT,
+ PM_RELEASE_NODE = 14,
+ PM_SET_REQUIREMENT = 15,
PM_RESET_ASSERT = 17,
- PM_RESET_GET_STATUS,
+ PM_RESET_GET_STATUS = 18,
+ PM_MMIO_WRITE = 19,
+ PM_MMIO_READ = 20,
PM_PM_INIT_FINALIZE = 21,
- PM_FPGA_LOAD,
- PM_FPGA_GET_STATUS,
+ PM_FPGA_LOAD = 22,
+ PM_FPGA_GET_STATUS = 23,
PM_GET_CHIPID = 24,
+ PM_SECURE_SHA = 26,
+ PM_PINCTRL_REQUEST = 28,
+ PM_PINCTRL_RELEASE = 29,
+ PM_PINCTRL_SET_FUNCTION = 31,
+ PM_PINCTRL_CONFIG_PARAM_GET = 32,
+ PM_PINCTRL_CONFIG_PARAM_SET = 33,
PM_IOCTL = 34,
- PM_QUERY_DATA,
- PM_CLOCK_ENABLE,
- PM_CLOCK_DISABLE,
- PM_CLOCK_GETSTATE,
- PM_CLOCK_SETDIVIDER,
- PM_CLOCK_GETDIVIDER,
- PM_CLOCK_SETRATE,
- PM_CLOCK_GETRATE,
- PM_CLOCK_SETPARENT,
- PM_CLOCK_GETPARENT,
+ PM_QUERY_DATA = 35,
+ PM_CLOCK_ENABLE = 36,
+ PM_CLOCK_DISABLE = 37,
+ PM_CLOCK_GETSTATE = 38,
+ PM_CLOCK_SETDIVIDER = 39,
+ PM_CLOCK_GETDIVIDER = 40,
+ PM_CLOCK_SETPARENT = 43,
+ PM_CLOCK_GETPARENT = 44,
+ PM_FPGA_READ = 46,
PM_SECURE_AES = 47,
+ PM_EFUSE_ACCESS = 53,
PM_FEATURE_CHECK = 63,
- PM_API_MAX,
};
/* PMU-FW return status codes */
enum pm_ret_status {
XST_PM_SUCCESS = 0,
+ XST_PM_INVALID_VERSION = 4,
XST_PM_NO_FEATURE = 19,
+ XST_PM_INVALID_CRC = 301,
XST_PM_INTERNAL = 2000,
- XST_PM_CONFLICT,
- XST_PM_NO_ACCESS,
- XST_PM_INVALID_NODE,
- XST_PM_DOUBLE_REQ,
- XST_PM_ABORT_SUSPEND,
+ XST_PM_CONFLICT = 2001,
+ XST_PM_NO_ACCESS = 2002,
+ XST_PM_INVALID_NODE = 2003,
+ XST_PM_DOUBLE_REQ = 2004,
+ XST_PM_ABORT_SUSPEND = 2005,
XST_PM_MULT_USER = 2008,
};
enum pm_ioctl_id {
+ IOCTL_GET_RPU_OPER_MODE = 0,
+ IOCTL_SET_RPU_OPER_MODE = 1,
+ IOCTL_RPU_BOOT_ADDR_CONFIG = 2,
+ IOCTL_TCM_COMB_CONFIG = 3,
+ IOCTL_SET_TAPDELAY_BYPASS = 4,
IOCTL_SD_DLL_RESET = 6,
- IOCTL_SET_SD_TAPDELAY,
- IOCTL_SET_PLL_FRAC_MODE,
- IOCTL_GET_PLL_FRAC_MODE,
- IOCTL_SET_PLL_FRAC_DATA,
- IOCTL_GET_PLL_FRAC_DATA,
+ IOCTL_SET_SD_TAPDELAY = 7,
+ IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_GET_PLL_FRAC_MODE = 9,
+ IOCTL_SET_PLL_FRAC_DATA = 10,
+ IOCTL_GET_PLL_FRAC_DATA = 11,
IOCTL_WRITE_GGS = 12,
IOCTL_READ_GGS = 13,
IOCTL_WRITE_PGGS = 14,
IOCTL_READ_PGGS = 15,
/* Set healthy bit value */
IOCTL_SET_BOOT_HEALTH_STATUS = 17,
+ IOCTL_OSPI_MUX_SELECT = 21,
+ /* Register SGI to ATF */
+ IOCTL_REGISTER_SGI = 25,
+ /* Runtime feature configuration */
+ IOCTL_SET_FEATURE_CONFIG = 26,
+ IOCTL_GET_FEATURE_CONFIG = 27,
+ /* Dynamic SD/GEM configuration */
+ IOCTL_SET_SD_CONFIG = 30,
+ IOCTL_SET_GEM_CONFIG = 31,
};
enum pm_query_id {
- PM_QID_INVALID,
- PM_QID_CLOCK_GET_NAME,
- PM_QID_CLOCK_GET_TOPOLOGY,
- PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
- PM_QID_CLOCK_GET_PARENTS,
- PM_QID_CLOCK_GET_ATTRIBUTES,
+ PM_QID_INVALID = 0,
+ PM_QID_CLOCK_GET_NAME = 1,
+ PM_QID_CLOCK_GET_TOPOLOGY = 2,
+ PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3,
+ PM_QID_CLOCK_GET_PARENTS = 4,
+ PM_QID_CLOCK_GET_ATTRIBUTES = 5,
+ PM_QID_PINCTRL_GET_NUM_PINS = 6,
+ PM_QID_PINCTRL_GET_NUM_FUNCTIONS = 7,
+ PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS = 8,
+ PM_QID_PINCTRL_GET_FUNCTION_NAME = 9,
+ PM_QID_PINCTRL_GET_FUNCTION_GROUPS = 10,
+ PM_QID_PINCTRL_GET_PIN_GROUPS = 11,
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
- PM_QID_CLOCK_GET_MAX_DIVISOR,
+ PM_QID_CLOCK_GET_MAX_DIVISOR = 13,
+};
+
+enum rpu_oper_mode {
+ PM_RPU_MODE_LOCKSTEP = 0,
+ PM_RPU_MODE_SPLIT = 1,
+};
+
+enum rpu_boot_mem {
+ PM_RPU_BOOTMEM_LOVEC = 0,
+ PM_RPU_BOOTMEM_HIVEC = 1,
+};
+
+enum rpu_tcm_comb {
+ PM_RPU_TCM_SPLIT = 0,
+ PM_RPU_TCM_COMB = 1,
};
enum zynqmp_pm_reset_action {
- PM_RESET_ACTION_RELEASE,
- PM_RESET_ACTION_ASSERT,
- PM_RESET_ACTION_PULSE,
+ PM_RESET_ACTION_RELEASE = 0,
+ PM_RESET_ACTION_ASSERT = 1,
+ PM_RESET_ACTION_PULSE = 2,
};
enum zynqmp_pm_reset {
ZYNQMP_PM_RESET_START = 1000,
ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START,
- ZYNQMP_PM_RESET_PCIE_BRIDGE,
- ZYNQMP_PM_RESET_PCIE_CTRL,
- ZYNQMP_PM_RESET_DP,
- ZYNQMP_PM_RESET_SWDT_CRF,
- ZYNQMP_PM_RESET_AFI_FM5,
- ZYNQMP_PM_RESET_AFI_FM4,
- ZYNQMP_PM_RESET_AFI_FM3,
- ZYNQMP_PM_RESET_AFI_FM2,
- ZYNQMP_PM_RESET_AFI_FM1,
- ZYNQMP_PM_RESET_AFI_FM0,
- ZYNQMP_PM_RESET_GDMA,
- ZYNQMP_PM_RESET_GPU_PP1,
- ZYNQMP_PM_RESET_GPU_PP0,
- ZYNQMP_PM_RESET_GPU,
- ZYNQMP_PM_RESET_GT,
- ZYNQMP_PM_RESET_SATA,
- ZYNQMP_PM_RESET_ACPU3_PWRON,
- ZYNQMP_PM_RESET_ACPU2_PWRON,
- ZYNQMP_PM_RESET_ACPU1_PWRON,
- ZYNQMP_PM_RESET_ACPU0_PWRON,
- ZYNQMP_PM_RESET_APU_L2,
- ZYNQMP_PM_RESET_ACPU3,
- ZYNQMP_PM_RESET_ACPU2,
- ZYNQMP_PM_RESET_ACPU1,
- ZYNQMP_PM_RESET_ACPU0,
- ZYNQMP_PM_RESET_DDR,
- ZYNQMP_PM_RESET_APM_FPD,
- ZYNQMP_PM_RESET_SOFT,
- ZYNQMP_PM_RESET_GEM0,
- ZYNQMP_PM_RESET_GEM1,
- ZYNQMP_PM_RESET_GEM2,
- ZYNQMP_PM_RESET_GEM3,
- ZYNQMP_PM_RESET_QSPI,
- ZYNQMP_PM_RESET_UART0,
- ZYNQMP_PM_RESET_UART1,
- ZYNQMP_PM_RESET_SPI0,
- ZYNQMP_PM_RESET_SPI1,
- ZYNQMP_PM_RESET_SDIO0,
- ZYNQMP_PM_RESET_SDIO1,
- ZYNQMP_PM_RESET_CAN0,
- ZYNQMP_PM_RESET_CAN1,
- ZYNQMP_PM_RESET_I2C0,
- ZYNQMP_PM_RESET_I2C1,
- ZYNQMP_PM_RESET_TTC0,
- ZYNQMP_PM_RESET_TTC1,
- ZYNQMP_PM_RESET_TTC2,
- ZYNQMP_PM_RESET_TTC3,
- ZYNQMP_PM_RESET_SWDT_CRL,
- ZYNQMP_PM_RESET_NAND,
- ZYNQMP_PM_RESET_ADMA,
- ZYNQMP_PM_RESET_GPIO,
- ZYNQMP_PM_RESET_IOU_CC,
- ZYNQMP_PM_RESET_TIMESTAMP,
- ZYNQMP_PM_RESET_RPU_R50,
- ZYNQMP_PM_RESET_RPU_R51,
- ZYNQMP_PM_RESET_RPU_AMBA,
- ZYNQMP_PM_RESET_OCM,
- ZYNQMP_PM_RESET_RPU_PGE,
- ZYNQMP_PM_RESET_USB0_CORERESET,
- ZYNQMP_PM_RESET_USB1_CORERESET,
- ZYNQMP_PM_RESET_USB0_HIBERRESET,
- ZYNQMP_PM_RESET_USB1_HIBERRESET,
- ZYNQMP_PM_RESET_USB0_APB,
- ZYNQMP_PM_RESET_USB1_APB,
- ZYNQMP_PM_RESET_IPI,
- ZYNQMP_PM_RESET_APM_LPD,
- ZYNQMP_PM_RESET_RTC,
- ZYNQMP_PM_RESET_SYSMON,
- ZYNQMP_PM_RESET_AFI_FM6,
- ZYNQMP_PM_RESET_LPD_SWDT,
- ZYNQMP_PM_RESET_FPD,
- ZYNQMP_PM_RESET_RPU_DBG1,
- ZYNQMP_PM_RESET_RPU_DBG0,
- ZYNQMP_PM_RESET_DBG_LPD,
- ZYNQMP_PM_RESET_DBG_FPD,
- ZYNQMP_PM_RESET_APLL,
- ZYNQMP_PM_RESET_DPLL,
- ZYNQMP_PM_RESET_VPLL,
- ZYNQMP_PM_RESET_IOPLL,
- ZYNQMP_PM_RESET_RPLL,
- ZYNQMP_PM_RESET_GPO3_PL_0,
- ZYNQMP_PM_RESET_GPO3_PL_1,
- ZYNQMP_PM_RESET_GPO3_PL_2,
- ZYNQMP_PM_RESET_GPO3_PL_3,
- ZYNQMP_PM_RESET_GPO3_PL_4,
- ZYNQMP_PM_RESET_GPO3_PL_5,
- ZYNQMP_PM_RESET_GPO3_PL_6,
- ZYNQMP_PM_RESET_GPO3_PL_7,
- ZYNQMP_PM_RESET_GPO3_PL_8,
- ZYNQMP_PM_RESET_GPO3_PL_9,
- ZYNQMP_PM_RESET_GPO3_PL_10,
- ZYNQMP_PM_RESET_GPO3_PL_11,
- ZYNQMP_PM_RESET_GPO3_PL_12,
- ZYNQMP_PM_RESET_GPO3_PL_13,
- ZYNQMP_PM_RESET_GPO3_PL_14,
- ZYNQMP_PM_RESET_GPO3_PL_15,
- ZYNQMP_PM_RESET_GPO3_PL_16,
- ZYNQMP_PM_RESET_GPO3_PL_17,
- ZYNQMP_PM_RESET_GPO3_PL_18,
- ZYNQMP_PM_RESET_GPO3_PL_19,
- ZYNQMP_PM_RESET_GPO3_PL_20,
- ZYNQMP_PM_RESET_GPO3_PL_21,
- ZYNQMP_PM_RESET_GPO3_PL_22,
- ZYNQMP_PM_RESET_GPO3_PL_23,
- ZYNQMP_PM_RESET_GPO3_PL_24,
- ZYNQMP_PM_RESET_GPO3_PL_25,
- ZYNQMP_PM_RESET_GPO3_PL_26,
- ZYNQMP_PM_RESET_GPO3_PL_27,
- ZYNQMP_PM_RESET_GPO3_PL_28,
- ZYNQMP_PM_RESET_GPO3_PL_29,
- ZYNQMP_PM_RESET_GPO3_PL_30,
- ZYNQMP_PM_RESET_GPO3_PL_31,
- ZYNQMP_PM_RESET_RPU_LS,
- ZYNQMP_PM_RESET_PS_ONLY,
- ZYNQMP_PM_RESET_PL,
- ZYNQMP_PM_RESET_PS_PL0,
- ZYNQMP_PM_RESET_PS_PL1,
- ZYNQMP_PM_RESET_PS_PL2,
- ZYNQMP_PM_RESET_PS_PL3,
+ ZYNQMP_PM_RESET_PCIE_BRIDGE = 1001,
+ ZYNQMP_PM_RESET_PCIE_CTRL = 1002,
+ ZYNQMP_PM_RESET_DP = 1003,
+ ZYNQMP_PM_RESET_SWDT_CRF = 1004,
+ ZYNQMP_PM_RESET_AFI_FM5 = 1005,
+ ZYNQMP_PM_RESET_AFI_FM4 = 1006,
+ ZYNQMP_PM_RESET_AFI_FM3 = 1007,
+ ZYNQMP_PM_RESET_AFI_FM2 = 1008,
+ ZYNQMP_PM_RESET_AFI_FM1 = 1009,
+ ZYNQMP_PM_RESET_AFI_FM0 = 1010,
+ ZYNQMP_PM_RESET_GDMA = 1011,
+ ZYNQMP_PM_RESET_GPU_PP1 = 1012,
+ ZYNQMP_PM_RESET_GPU_PP0 = 1013,
+ ZYNQMP_PM_RESET_GPU = 1014,
+ ZYNQMP_PM_RESET_GT = 1015,
+ ZYNQMP_PM_RESET_SATA = 1016,
+ ZYNQMP_PM_RESET_ACPU3_PWRON = 1017,
+ ZYNQMP_PM_RESET_ACPU2_PWRON = 1018,
+ ZYNQMP_PM_RESET_ACPU1_PWRON = 1019,
+ ZYNQMP_PM_RESET_ACPU0_PWRON = 1020,
+ ZYNQMP_PM_RESET_APU_L2 = 1021,
+ ZYNQMP_PM_RESET_ACPU3 = 1022,
+ ZYNQMP_PM_RESET_ACPU2 = 1023,
+ ZYNQMP_PM_RESET_ACPU1 = 1024,
+ ZYNQMP_PM_RESET_ACPU0 = 1025,
+ ZYNQMP_PM_RESET_DDR = 1026,
+ ZYNQMP_PM_RESET_APM_FPD = 1027,
+ ZYNQMP_PM_RESET_SOFT = 1028,
+ ZYNQMP_PM_RESET_GEM0 = 1029,
+ ZYNQMP_PM_RESET_GEM1 = 1030,
+ ZYNQMP_PM_RESET_GEM2 = 1031,
+ ZYNQMP_PM_RESET_GEM3 = 1032,
+ ZYNQMP_PM_RESET_QSPI = 1033,
+ ZYNQMP_PM_RESET_UART0 = 1034,
+ ZYNQMP_PM_RESET_UART1 = 1035,
+ ZYNQMP_PM_RESET_SPI0 = 1036,
+ ZYNQMP_PM_RESET_SPI1 = 1037,
+ ZYNQMP_PM_RESET_SDIO0 = 1038,
+ ZYNQMP_PM_RESET_SDIO1 = 1039,
+ ZYNQMP_PM_RESET_CAN0 = 1040,
+ ZYNQMP_PM_RESET_CAN1 = 1041,
+ ZYNQMP_PM_RESET_I2C0 = 1042,
+ ZYNQMP_PM_RESET_I2C1 = 1043,
+ ZYNQMP_PM_RESET_TTC0 = 1044,
+ ZYNQMP_PM_RESET_TTC1 = 1045,
+ ZYNQMP_PM_RESET_TTC2 = 1046,
+ ZYNQMP_PM_RESET_TTC3 = 1047,
+ ZYNQMP_PM_RESET_SWDT_CRL = 1048,
+ ZYNQMP_PM_RESET_NAND = 1049,
+ ZYNQMP_PM_RESET_ADMA = 1050,
+ ZYNQMP_PM_RESET_GPIO = 1051,
+ ZYNQMP_PM_RESET_IOU_CC = 1052,
+ ZYNQMP_PM_RESET_TIMESTAMP = 1053,
+ ZYNQMP_PM_RESET_RPU_R50 = 1054,
+ ZYNQMP_PM_RESET_RPU_R51 = 1055,
+ ZYNQMP_PM_RESET_RPU_AMBA = 1056,
+ ZYNQMP_PM_RESET_OCM = 1057,
+ ZYNQMP_PM_RESET_RPU_PGE = 1058,
+ ZYNQMP_PM_RESET_USB0_CORERESET = 1059,
+ ZYNQMP_PM_RESET_USB1_CORERESET = 1060,
+ ZYNQMP_PM_RESET_USB0_HIBERRESET = 1061,
+ ZYNQMP_PM_RESET_USB1_HIBERRESET = 1062,
+ ZYNQMP_PM_RESET_USB0_APB = 1063,
+ ZYNQMP_PM_RESET_USB1_APB = 1064,
+ ZYNQMP_PM_RESET_IPI = 1065,
+ ZYNQMP_PM_RESET_APM_LPD = 1066,
+ ZYNQMP_PM_RESET_RTC = 1067,
+ ZYNQMP_PM_RESET_SYSMON = 1068,
+ ZYNQMP_PM_RESET_AFI_FM6 = 1069,
+ ZYNQMP_PM_RESET_LPD_SWDT = 1070,
+ ZYNQMP_PM_RESET_FPD = 1071,
+ ZYNQMP_PM_RESET_RPU_DBG1 = 1072,
+ ZYNQMP_PM_RESET_RPU_DBG0 = 1073,
+ ZYNQMP_PM_RESET_DBG_LPD = 1074,
+ ZYNQMP_PM_RESET_DBG_FPD = 1075,
+ ZYNQMP_PM_RESET_APLL = 1076,
+ ZYNQMP_PM_RESET_DPLL = 1077,
+ ZYNQMP_PM_RESET_VPLL = 1078,
+ ZYNQMP_PM_RESET_IOPLL = 1079,
+ ZYNQMP_PM_RESET_RPLL = 1080,
+ ZYNQMP_PM_RESET_GPO3_PL_0 = 1081,
+ ZYNQMP_PM_RESET_GPO3_PL_1 = 1082,
+ ZYNQMP_PM_RESET_GPO3_PL_2 = 1083,
+ ZYNQMP_PM_RESET_GPO3_PL_3 = 1084,
+ ZYNQMP_PM_RESET_GPO3_PL_4 = 1085,
+ ZYNQMP_PM_RESET_GPO3_PL_5 = 1086,
+ ZYNQMP_PM_RESET_GPO3_PL_6 = 1087,
+ ZYNQMP_PM_RESET_GPO3_PL_7 = 1088,
+ ZYNQMP_PM_RESET_GPO3_PL_8 = 1089,
+ ZYNQMP_PM_RESET_GPO3_PL_9 = 1090,
+ ZYNQMP_PM_RESET_GPO3_PL_10 = 1091,
+ ZYNQMP_PM_RESET_GPO3_PL_11 = 1092,
+ ZYNQMP_PM_RESET_GPO3_PL_12 = 1093,
+ ZYNQMP_PM_RESET_GPO3_PL_13 = 1094,
+ ZYNQMP_PM_RESET_GPO3_PL_14 = 1095,
+ ZYNQMP_PM_RESET_GPO3_PL_15 = 1096,
+ ZYNQMP_PM_RESET_GPO3_PL_16 = 1097,
+ ZYNQMP_PM_RESET_GPO3_PL_17 = 1098,
+ ZYNQMP_PM_RESET_GPO3_PL_18 = 1099,
+ ZYNQMP_PM_RESET_GPO3_PL_19 = 1100,
+ ZYNQMP_PM_RESET_GPO3_PL_20 = 1101,
+ ZYNQMP_PM_RESET_GPO3_PL_21 = 1102,
+ ZYNQMP_PM_RESET_GPO3_PL_22 = 1103,
+ ZYNQMP_PM_RESET_GPO3_PL_23 = 1104,
+ ZYNQMP_PM_RESET_GPO3_PL_24 = 1105,
+ ZYNQMP_PM_RESET_GPO3_PL_25 = 1106,
+ ZYNQMP_PM_RESET_GPO3_PL_26 = 1107,
+ ZYNQMP_PM_RESET_GPO3_PL_27 = 1108,
+ ZYNQMP_PM_RESET_GPO3_PL_28 = 1109,
+ ZYNQMP_PM_RESET_GPO3_PL_29 = 1110,
+ ZYNQMP_PM_RESET_GPO3_PL_30 = 1111,
+ ZYNQMP_PM_RESET_GPO3_PL_31 = 1112,
+ ZYNQMP_PM_RESET_RPU_LS = 1113,
+ ZYNQMP_PM_RESET_PS_ONLY = 1114,
+ ZYNQMP_PM_RESET_PL = 1115,
+ ZYNQMP_PM_RESET_PS_PL0 = 1116,
+ ZYNQMP_PM_RESET_PS_PL1 = 1117,
+ ZYNQMP_PM_RESET_PS_PL2 = 1118,
+ ZYNQMP_PM_RESET_PS_PL3 = 1119,
ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
};
enum zynqmp_pm_suspend_reason {
SUSPEND_POWER_REQUEST = 201,
- SUSPEND_ALERT,
- SUSPEND_SYSTEM_SHUTDOWN,
+ SUSPEND_ALERT = 202,
+ SUSPEND_SYSTEM_SHUTDOWN = 203,
};
enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NO = 1,
- ZYNQMP_PM_REQUEST_ACK_BLOCKING,
- ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING = 2,
+ ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING = 3,
};
enum pm_node_id {
NODE_SD_0 = 39,
- NODE_SD_1,
+ NODE_SD_1 = 40,
};
enum tap_delay_type {
PM_TAPDELAY_INPUT = 0,
- PM_TAPDELAY_OUTPUT,
+ PM_TAPDELAY_OUTPUT = 1,
};
enum dll_reset_type {
- PM_DLL_RESET_ASSERT,
- PM_DLL_RESET_RELEASE,
- PM_DLL_RESET_PULSE,
+ PM_DLL_RESET_ASSERT = 0,
+ PM_DLL_RESET_RELEASE = 1,
+ PM_DLL_RESET_PULSE = 2,
+};
+
+enum pm_pinctrl_config_param {
+ PM_PINCTRL_CONFIG_SLEW_RATE = 0,
+ PM_PINCTRL_CONFIG_BIAS_STATUS = 1,
+ PM_PINCTRL_CONFIG_PULL_CTRL = 2,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS = 3,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH = 4,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS = 5,
+ PM_PINCTRL_CONFIG_TRI_STATE = 6,
+ PM_PINCTRL_CONFIG_MAX = 7,
+};
+
+enum pm_pinctrl_slew_rate {
+ PM_PINCTRL_SLEW_RATE_FAST = 0,
+ PM_PINCTRL_SLEW_RATE_SLOW = 1,
+};
+
+enum pm_pinctrl_bias_status {
+ PM_PINCTRL_BIAS_DISABLE = 0,
+ PM_PINCTRL_BIAS_ENABLE = 1,
+};
+
+enum pm_pinctrl_pull_ctrl {
+ PM_PINCTRL_BIAS_PULL_DOWN = 0,
+ PM_PINCTRL_BIAS_PULL_UP = 1,
+};
+
+enum pm_pinctrl_schmitt_cmos {
+ PM_PINCTRL_INPUT_TYPE_CMOS = 0,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT = 1,
+};
+
+enum pm_pinctrl_drive_strength {
+ PM_PINCTRL_DRIVE_STRENGTH_2MA = 0,
+ PM_PINCTRL_DRIVE_STRENGTH_4MA = 1,
+ PM_PINCTRL_DRIVE_STRENGTH_8MA = 2,
+ PM_PINCTRL_DRIVE_STRENGTH_12MA = 3,
+};
+
+enum pm_pinctrl_tri_state {
+ PM_PINCTRL_TRI_STATE_DISABLE = 0,
+ PM_PINCTRL_TRI_STATE_ENABLE = 1,
};
enum zynqmp_pm_shutdown_type {
- ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN,
- ZYNQMP_PM_SHUTDOWN_TYPE_RESET,
- ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY = 2,
};
enum zynqmp_pm_shutdown_subtype {
- ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
- ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
- ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM = 0,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY = 1,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2,
+};
+
+enum tap_delay_signal_type {
+ PM_TAPDELAY_NAND_DQS_IN = 0,
+ PM_TAPDELAY_NAND_DQS_OUT = 1,
+ PM_TAPDELAY_QSPI = 2,
+ PM_TAPDELAY_MAX = 3,
+};
+
+enum tap_delay_bypass_ctrl {
+ PM_TAPDELAY_BYPASS_DISABLE = 0,
+ PM_TAPDELAY_BYPASS_ENABLE = 1,
+};
+
+enum ospi_mux_select_type {
+ PM_OSPI_MUX_SEL_DMA = 0,
+ PM_OSPI_MUX_SEL_LINEAR = 1,
+};
+
+enum pm_feature_config_id {
+ PM_FEATURE_INVALID = 0,
+ PM_FEATURE_OVERTEMP_STATUS = 1,
+ PM_FEATURE_OVERTEMP_VALUE = 2,
+ PM_FEATURE_EXTWDT_STATUS = 3,
+ PM_FEATURE_EXTWDT_VALUE = 4,
+};
+
+/**
+ * enum pm_sd_config_type - PM SD configuration.
+ * @SD_CONFIG_EMMC_SEL: To set SD_EMMC_SEL in CTRL_REG_SD and SD_SLOTTYPE
+ * @SD_CONFIG_BASECLK: To set SD_BASECLK in SD_CONFIG_REG1
+ * @SD_CONFIG_8BIT: To set SD_8BIT in SD_CONFIG_REG2
+ * @SD_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_sd_config_type {
+ SD_CONFIG_EMMC_SEL = 1,
+ SD_CONFIG_BASECLK = 2,
+ SD_CONFIG_8BIT = 3,
+ SD_CONFIG_FIXED = 4,
+};
+
+/**
+ * enum pm_gem_config_type - PM GEM configuration.
+ * @GEM_CONFIG_SGMII_MODE: To set GEM_SGMII_MODE in GEM_CLK_CTRL register
+ * @GEM_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_gem_config_type {
+ GEM_CONFIG_SGMII_MODE = 1,
+ GEM_CONFIG_FIXED = 2,
};
/**
@@ -314,21 +529,18 @@ struct zynqmp_pm_query_data {
u32 arg3;
};
-
-int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 *ret_payload);
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
int zynqmp_pm_get_api_version(u32 *version);
int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily);
int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
int zynqmp_pm_clock_enable(u32 clock_id);
int zynqmp_pm_clock_disable(u32 clock_id);
int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
-int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate);
-int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate);
int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
@@ -337,9 +549,12 @@ int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data);
int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
+int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select);
int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
const enum zynqmp_pm_reset_action assert_flag);
int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
+unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode);
+int zynqmp_pm_bootmode_write(u32 ps_mode);
int zynqmp_pm_init_finalize(void);
int zynqmp_pm_set_suspend_mode(u32 mode);
int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
@@ -349,164 +564,364 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_efuse_access(const u64 address, u32 *out);
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_get_status(u32 *value);
+int zynqmp_pm_fpga_get_config_status(u32 *value);
int zynqmp_pm_write_ggs(u32 index, u32 value);
int zynqmp_pm_read_ggs(u32 index, u32 *value);
int zynqmp_pm_write_pggs(u32 index, u32 value);
int zynqmp_pm_read_pggs(u32 index, u32 *value);
+int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value);
int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
int zynqmp_pm_set_boot_health_status(u32 value);
+int zynqmp_pm_pinctrl_request(const u32 pin);
+int zynqmp_pm_pinctrl_release(const u32 pin);
+int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id);
+int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value);
+int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value);
+int zynqmp_pm_load_pdi(const u32 src, const u64 address);
+int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable);
+int zynqmp_pm_feature(const u32 api_id);
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
+int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
+int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
+int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset);
+int zynqmp_pm_force_pwrdwn(const u32 target,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_request_wake(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode);
+int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode);
+int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode);
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value);
#else
-static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
-{
- return ERR_PTR(-ENODEV);
-}
static inline int zynqmp_pm_get_api_version(u32 *version)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
u32 *out)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_clock_enable(u32 clock_id)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_clock_disable(u32 clock_id)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
{
return -ENODEV;
}
-static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
-{
- return -ENODEV;
-}
-static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
-{
- return -ENODEV;
-}
+
static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
- const enum zynqmp_pm_reset_action assert_flag)
+ const enum zynqmp_pm_reset_action assert_flag)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
u32 *status)
{
return -ENODEV;
}
+
+static inline unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_bootmode_write(u32 ps_mode)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_init_finalize(void)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_set_suspend_mode(u32 mode)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_release_node(const u32 node)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_set_requirement(const u32 node,
- const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack)
+ const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_efuse_access(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
const u32 flags)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_fpga_get_status(u32 *value)
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_fpga_get_config_status(u32 *value)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_write_ggs(u32 index, u32 value)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_read_ggs(u32 index, u32 *value)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_write_pggs(u32 index, u32 value)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_read_pggs(u32 index, u32 *value)
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
{
return -ENODEV;
}
+
static inline int zynqmp_pm_set_boot_health_status(u32 value)
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_feature(const u32 api_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_feature_config(enum pm_feature_config_id id,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
+ u32 *payload)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_force_pwrdwn(const u32 target,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_request_wake(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sd_config(u32 node,
+ enum pm_sd_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_gem_config(u32 node,
+ enum pm_gem_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
index 8396013785ef..e485fb0c1201 100644
--- a/include/linux/fixp-arith.h
+++ b/include/linux/fixp-arith.h
@@ -2,6 +2,7 @@
#ifndef _FIXP_ARITH_H
#define _FIXP_ARITH_H
+#include <linux/bug.h>
#include <linux/math64.h>
/*
@@ -141,4 +142,23 @@ static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
#define fixp_cos32_rad(rad, twopi) \
fixp_sin32_rad(rad + twopi / 4, twopi)
+/**
+ * fixp_linear_interpolate() - interpolates a value from two known points
+ *
+ * @x0: x value of point 0
+ * @y0: y value of point 0
+ * @x1: x value of point 1
+ * @y1: y value of point 1
+ * @x: the linear interpolant
+ */
+static inline int fixp_linear_interpolate(int x0, int y0, int x1, int y1, int x)
+{
+ if (y0 == y1 || x == x0)
+ return y0;
+ if (x1 == x0 || x == x1)
+ return y1;
+
+ return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
+}
+
#endif
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index c12df59d3f5f..e9a72fd0bfe7 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -39,38 +39,6 @@ void fprop_global_destroy(struct fprop_global *p);
bool fprop_new_period(struct fprop_global *p, int periods);
/*
- * ---- SINGLE ----
- */
-struct fprop_local_single {
- /* the local events counter */
- unsigned long events;
- /* Period in which we last updated events */
- unsigned int period;
- raw_spinlock_t lock; /* Protect period and numerator */
-};
-
-#define INIT_FPROP_LOCAL_SINGLE(name) \
-{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
-}
-
-int fprop_local_init_single(struct fprop_local_single *pl);
-void fprop_local_destroy_single(struct fprop_local_single *pl);
-void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
-void fprop_fraction_single(struct fprop_global *p,
- struct fprop_local_single *pl, unsigned long *numerator,
- unsigned long *denominator);
-
-static inline
-void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __fprop_inc_single(p, pl);
- local_irq_restore(flags);
-}
-
-/*
* ---- PERCPU ----
*/
struct fprop_local_percpu {
@@ -83,9 +51,10 @@ struct fprop_local_percpu {
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
-void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
- int max_frac);
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr);
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);
@@ -96,7 +65,7 @@ void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
unsigned long flags;
local_irq_save(flags);
- __fprop_inc_percpu(p, pl);
+ __fprop_add_percpu(p, pl, 1);
local_irq_restore(flags);
}
diff --git a/include/linux/font.h b/include/linux/font.h
index 51b91c8b69d5..abf1442ce719 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -16,7 +16,8 @@
struct font_desc {
int idx;
const char *name;
- int width, height;
+ unsigned int width, height;
+ unsigned int charcount;
const void *data;
int pref;
};
@@ -33,6 +34,7 @@ struct font_desc {
#define MINI4x6_IDX 9
#define FONT6x10_IDX 10
#define TER16x32_IDX 11
+#define FONT6x8_IDX 12
extern const struct font_desc font_vga_8x8,
font_vga_8x16,
@@ -45,7 +47,8 @@ extern const struct font_desc font_vga_8x8,
font_acorn_8x8,
font_mini_4x6,
font_6x10,
- font_ter_16x32;
+ font_ter_16x32,
+ font_6x8;
/* Find a font with a specific name */
@@ -59,4 +62,17 @@ extern const struct font_desc *get_default_font(int xres, int yres,
/* Max. length for the name of a predefined font */
#define MAX_FONT_NAME 32
+/* Extra word getters */
+#define REFCOUNT(fd) (((int *)(fd))[-1])
+#define FNTSIZE(fd) (((int *)(fd))[-2])
+#define FNTCHARCNT(fd) (((int *)(fd))[-3])
+#define FNTSUM(fd) (((int *)(fd))[-4])
+
+#define FONT_EXTRA_WORDS 4
+
+struct font_data {
+ unsigned int extra[FONT_EXTRA_WORDS];
+ const unsigned char data[];
+} __packed;
+
#endif /* _VIDEO_FONT_H */
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
new file mode 100644
index 000000000000..6aeebe0a6777
--- /dev/null
+++ b/include/linux/fortify-string.h
@@ -0,0 +1,791 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FORTIFY_STRING_H_
+#define _LINUX_FORTIFY_STRING_H_
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/const.h>
+#include <linux/limits.h>
+
+#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
+#define __RENAME(x) __asm__(#x)
+
+#define FORTIFY_REASON_DIR(r) FIELD_GET(BIT(0), r)
+#define FORTIFY_REASON_FUNC(r) FIELD_GET(GENMASK(7, 1), r)
+#define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \
+ FIELD_PREP(GENMASK(7, 1), func))
+
+#ifndef fortify_panic
+# define fortify_panic(func, write, avail, size, retfail) \
+ __fortify_panic(FORTIFY_REASON(func, write), avail, size)
+#endif
+
+#define FORTIFY_READ 0
+#define FORTIFY_WRITE 1
+
+#define EACH_FORTIFY_FUNC(macro) \
+ macro(strncpy), \
+ macro(strnlen), \
+ macro(strlen), \
+ macro(strscpy), \
+ macro(strlcat), \
+ macro(strcat), \
+ macro(strncat), \
+ macro(memset), \
+ macro(memcpy), \
+ macro(memmove), \
+ macro(memscan), \
+ macro(memcmp), \
+ macro(memchr), \
+ macro(memchr_inv), \
+ macro(kmemdup), \
+ macro(strcpy), \
+ macro(UNKNOWN),
+
+#define MAKE_FORTIFY_FUNC(func) FORTIFY_FUNC_##func
+
+enum fortify_func {
+ EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC)
+};
+
+void __fortify_report(const u8 reason, const size_t avail, const size_t size);
+void __fortify_panic(const u8 reason, const size_t avail, const size_t size) __cold __noreturn;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)");
+void __read_overflow2_field(size_t avail, size_t wanted) __compiletime_warning("detected read beyond size of field (2nd parameter); maybe use struct_group()?");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)");
+void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("detected write beyond size of field (1st parameter); maybe use struct_group()?");
+
+#define __compiletime_strlen(p) \
+({ \
+ char *__p = (char *)(p); \
+ size_t __ret = SIZE_MAX; \
+ const size_t __p_size = __member_size(p); \
+ if (__p_size != SIZE_MAX && \
+ __builtin_constant_p(*__p)) { \
+ size_t __p_len = __p_size - 1; \
+ if (__builtin_constant_p(__p[__p_len]) && \
+ __p[__p_len] == '\0') \
+ __ret = __builtin_strlen(__p); \
+ } \
+ __ret; \
+})
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+#else
+
+#if defined(__SANITIZE_MEMORY__)
+/*
+ * For KMSAN builds all memcpy/memset/memmove calls should be replaced by the
+ * corresponding __msan_XXX functions.
+ */
+#include <linux/kmsan_string.h>
+#define __underlying_memcpy __msan_memcpy
+#define __underlying_memmove __msan_memmove
+#define __underlying_memset __msan_memset
+#else
+#define __underlying_memcpy __builtin_memcpy
+#define __underlying_memmove __builtin_memmove
+#define __underlying_memset __builtin_memset
+#endif
+
+#define __underlying_memchr __builtin_memchr
+#define __underlying_memcmp __builtin_memcmp
+#define __underlying_strcat __builtin_strcat
+#define __underlying_strcpy __builtin_strcpy
+#define __underlying_strlen __builtin_strlen
+#define __underlying_strncat __builtin_strncat
+#define __underlying_strncpy __builtin_strncpy
+#endif
+
+/**
+ * unsafe_memcpy - memcpy implementation with no FORTIFY bounds checking
+ *
+ * @dst: Destination memory address to write to
+ * @src: Source memory address to read from
+ * @bytes: How many bytes to write to @dst from @src
+ * @justification: Free-form text or comment describing why the use is needed
+ *
+ * This should be used for corner cases where the compiler cannot do the
+ * right thing, or during transitions between APIs, etc. It should be used
+ * very rarely, and includes a place for justification detailing where bounds
+ * checking has happened, and why existing solutions cannot be employed.
+ */
+#define unsafe_memcpy(dst, src, bytes, justification) \
+ __underlying_memcpy(dst, src, bytes)
+
+/*
+ * Clang's use of __builtin_*object_size() within inlines needs hinting via
+ * __pass_*object_size(). The preference is to only ever use type 1 (member
+ * size, rather than struct size), but there remain some stragglers using
+ * type 0 that will be converted in the future.
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define POS __pass_dynamic_object_size(1)
+#define POS0 __pass_dynamic_object_size(0)
+#else
+#define POS __pass_object_size(1)
+#define POS0 __pass_object_size(0)
+#endif
+
+#define __compiletime_lessthan(bounds, length) ( \
+ __builtin_constant_p((bounds) < (length)) && \
+ (bounds) < (length) \
+)
+
+/**
+ * strncpy - Copy a string to memory with non-guaranteed NUL padding
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ * @size: bytes to write at @p
+ *
+ * If strlen(@q) >= @size, the copy of @q will stop after @size bytes,
+ * and @p will NOT be NUL-terminated
+ *
+ * If strlen(@q) < @size, following the copy of @q, trailing NUL bytes
+ * will be written to @p until @size total bytes have been written.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * over-reads of @q, it cannot defend against writing unterminated
+ * results to @p. Using strncpy() remains ambiguous and fragile.
+ * Instead, please choose an alternative, so that the expectation
+ * of @p's contents is unambiguous:
+ *
+ * +--------------------+--------------------+------------+
+ * | **p** needs to be: | padded to **size** | not padded |
+ * +====================+====================+============+
+ * | NUL-terminated | strscpy_pad() | strscpy() |
+ * +--------------------+--------------------+------------+
+ * | not NUL-terminated | strtomem_pad() | strtomem() |
+ * +--------------------+--------------------+------------+
+ *
+ * Note strscpy*()'s differing return values for detecting truncation,
+ * and strtomem*()'s expectation that the destination is marked with
+ * __nonstring when it is a character array.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
+char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
+{
+ const size_t p_size = __member_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_strncpy, FORTIFY_WRITE, p_size, size, p);
+ return __underlying_strncpy(p, q, size);
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+/**
+ * strnlen - Return bounded count of characters in a NUL-terminated string
+ *
+ * @p: pointer to NUL-terminated string to count.
+ * @maxlen: maximum number of characters to count.
+ *
+ * Returns number of characters in @p (NOT including the final NUL), or
+ * @maxlen, if no NUL has been found up to there.
+ *
+ */
+__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
+{
+ const size_t p_size = __member_size(p);
+ const size_t p_len = __compiletime_strlen(p);
+ size_t ret;
+
+ /* We can take compile-time actions when maxlen is const. */
+ if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) {
+ /* If p is const, we can use its compile-time-known len. */
+ if (maxlen >= p_size)
+ return p_len;
+ }
+
+ /* Do not check characters beyond the end of p. */
+ ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(FORTIFY_FUNC_strnlen, FORTIFY_READ, p_size, ret + 1, ret);
+ return ret;
+}
+
+/*
+ * Defined after fortified strnlen to reuse it. However, it must still be
+ * possible for strlen() to be used on compile-time strings for use in
+ * static initializers (i.e. as a constant expression).
+ */
+/**
+ * strlen - Return count of characters in a NUL-terminated string
+ *
+ * @p: pointer to NUL-terminated string to count.
+ *
+ * Do not use this function unless the string length is known at
+ * compile-time. When @p is unterminated, this function may crash
+ * or return unexpected counts that could lead to memory content
+ * exposures. Prefer strnlen().
+ *
+ * Returns number of characters in @p (NOT including the final NUL).
+ *
+ */
+#define strlen(p) \
+ __builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \
+ __builtin_strlen(p), __fortify_strlen(p))
+__FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
+__kernel_size_t __fortify_strlen(const char * const POS p)
+{
+ const size_t p_size = __member_size(p);
+ __kernel_size_t ret;
+
+ /* Give up if we don't know how large p is. */
+ if (p_size == SIZE_MAX)
+ return __underlying_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
+ fortify_panic(FORTIFY_FUNC_strlen, FORTIFY_READ, p_size, ret + 1, ret);
+ return ret;
+}
+
+/* Defined after fortified strnlen() to reuse it. */
+extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(sized_strscpy);
+__FORTIFY_INLINE ssize_t sized_strscpy(char * const POS p, const char * const POS q, size_t size)
+{
+ /* Use string size rather than possible enclosing struct size. */
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t len;
+
+ /* If we cannot get size of p and q default to call strscpy. */
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strscpy(p, q, size);
+
+ /*
+ * If size can be known at compile time and is greater than
+ * p_size, generate a compile time write overflow error.
+ */
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+
+ /* Short-circuit for compile-time known-safe lengths. */
+ if (__compiletime_lessthan(p_size, SIZE_MAX)) {
+ len = __compiletime_strlen(q);
+
+ if (len < SIZE_MAX && __compiletime_lessthan(len, size)) {
+ __underlying_memcpy(p, q, len + 1);
+ return len;
+ }
+ }
+
+ /*
+ * This call protects from read overflow, because len will default to q
+ * length if it smaller than size.
+ */
+ len = strnlen(q, size);
+ /*
+ * If len equals size, we will copy only size bytes which leads to
+ * -E2BIG being returned.
+ * Otherwise we will copy len + 1 because of the final '\O'.
+ */
+ len = len == size ? size : len + 1;
+
+ /*
+ * Generate a runtime write overflow error if len is greater than
+ * p_size.
+ */
+ if (p_size < len)
+ fortify_panic(FORTIFY_FUNC_strscpy, FORTIFY_WRITE, p_size, len, -E2BIG);
+
+ /*
+ * We can now safely call vanilla strscpy because we are protected from:
+ * 1. Read overflow thanks to call to strnlen().
+ * 2. Write overflow thanks to above ifs.
+ */
+ return __real_strscpy(p, q, len);
+}
+
+/* Defined after fortified strlen() to reuse it. */
+extern size_t __real_strlcat(char *p, const char *q, size_t avail) __RENAME(strlcat);
+/**
+ * strlcat - Append a string to an existing string
+ *
+ * @p: pointer to %NUL-terminated string to append to
+ * @q: pointer to %NUL-terminated string to append from
+ * @avail: Maximum bytes available in @p
+ *
+ * Appends %NUL-terminated string @q after the %NUL-terminated
+ * string at @p, but will not write beyond @avail bytes total,
+ * potentially truncating the copy from @q. @p will stay
+ * %NUL-terminated only if a %NUL already existed within
+ * the @avail bytes of @p. If so, the resulting number of
+ * bytes copied from @q will be at most "@avail - strlen(@p) - 1".
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the sizes
+ * of @p and @q are known to the compiler. Prefer building the
+ * string with formatting, via scnprintf(), seq_buf, or similar.
+ *
+ * Returns total bytes that _would_ have been contained by @p
+ * regardless of truncation, similar to snprintf(). If return
+ * value is >= @avail, the string has been truncated.
+ *
+ */
+__FORTIFY_INLINE
+size_t strlcat(char * const POS p, const char * const POS q, size_t avail)
+{
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t p_len, copy_len;
+ size_t actual, wanted;
+
+ /* Give up immediately if both buffer sizes are unknown. */
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strlcat(p, q, avail);
+
+ p_len = strnlen(p, avail);
+ copy_len = strlen(q);
+ wanted = actual = p_len + copy_len;
+
+ /* Cannot append any more: report truncation. */
+ if (avail <= p_len)
+ return wanted;
+
+ /* Give up if string is already overflowed. */
+ if (p_size <= p_len)
+ fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_READ, p_size, p_len + 1, wanted);
+
+ if (actual >= avail) {
+ copy_len = avail - p_len - 1;
+ actual = p_len + copy_len;
+ }
+
+ /* Give up if copy will overflow. */
+ if (p_size <= actual)
+ fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_WRITE, p_size, actual + 1, wanted);
+ __underlying_memcpy(p + p_len, q, copy_len);
+ p[actual] = '\0';
+
+ return wanted;
+}
+
+/* Defined after fortified strlcat() to reuse it. */
+/**
+ * strcat - Append a string to an existing string
+ *
+ * @p: pointer to NUL-terminated string to append to
+ * @q: pointer to NUL-terminated source string to append from
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the
+ * destination buffer size is known to the compiler. Prefer
+ * building the string with formatting, via scnprintf() or similar.
+ * At the very least, use strncat().
+ *
+ * Returns @p.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
+char *strcat(char * const POS p, const char *q)
+{
+ const size_t p_size = __member_size(p);
+ const size_t wanted = strlcat(p, q, p_size);
+
+ if (p_size <= wanted)
+ fortify_panic(FORTIFY_FUNC_strcat, FORTIFY_WRITE, p_size, wanted + 1, p);
+ return p;
+}
+
+/**
+ * strncat - Append a string to an existing string
+ *
+ * @p: pointer to NUL-terminated string to append to
+ * @q: pointer to source string to append from
+ * @count: Maximum bytes to read from @q
+ *
+ * Appends at most @count bytes from @q (stopping at the first
+ * NUL byte) after the NUL-terminated string at @p. @p will be
+ * NUL-terminated.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the sizes
+ * of @p and @q are known to the compiler. Prefer building the
+ * string with formatting, via scnprintf() or similar.
+ *
+ * Returns @p.
+ *
+ */
+/* Defined after fortified strlen() and strnlen() to reuse them. */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
+char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
+{
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t p_len, copy_len, total;
+
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __underlying_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ total = p_len + copy_len + 1;
+ if (p_size < total)
+ fortify_panic(FORTIFY_FUNC_strncat, FORTIFY_WRITE, p_size, total, p);
+ __underlying_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+
+__FORTIFY_INLINE bool fortify_memset_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t p_size_field)
+{
+ if (__builtin_constant_p(size)) {
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
+ __write_overflow();
+
+ /* Warn when write size is larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
+ }
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if (p_size != SIZE_MAX && p_size < size)
+ fortify_panic(FORTIFY_FUNC_memset, FORTIFY_WRITE, p_size, size, true);
+ return false;
+}
+
+#define __fortify_memset_chk(p, c, size, p_size, p_size_field) ({ \
+ size_t __fortify_size = (size_t)(size); \
+ fortify_memset_chk(__fortify_size, p_size, p_size_field), \
+ __underlying_memset(p, c, __fortify_size); \
+})
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#ifndef CONFIG_KMSAN
+#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
+ __struct_size(p), __member_size(p))
+#endif
+
+/*
+ * To make sure the compiler can enforce protection against buffer overflows,
+ * memcpy(), memmove(), and memset() must not be used beyond individual
+ * struct members. If you need to copy across multiple members, please use
+ * struct_group() to create a named mirror of an anonymous struct union.
+ * (e.g. see struct sk_buff.) Read overflow checking is currently only
+ * done when a write overflow is also present, or when building with W=1.
+ *
+ * Mitigation coverage matrix
+ * Bounds checking at:
+ * +-------+-------+-------+-------+
+ * | Compile time | Run time |
+ * memcpy() argument sizes: | write | read | write | read |
+ * dest source length +-------+-------+-------+-------+
+ * memcpy(known, known, constant) | y | y | n/a | n/a |
+ * memcpy(known, unknown, constant) | y | n | n/a | V |
+ * memcpy(known, known, dynamic) | n | n | B | B |
+ * memcpy(known, unknown, dynamic) | n | n | B | V |
+ * memcpy(unknown, known, constant) | n | y | V | n/a |
+ * memcpy(unknown, unknown, constant) | n | n | V | V |
+ * memcpy(unknown, known, dynamic) | n | n | V | B |
+ * memcpy(unknown, unknown, dynamic) | n | n | V | V |
+ * +-------+-------+-------+-------+
+ *
+ * y = perform deterministic compile-time bounds checking
+ * n = cannot perform deterministic compile-time bounds checking
+ * n/a = no run-time bounds checking needed since compile-time deterministic
+ * B = can perform run-time bounds checking (currently unimplemented)
+ * V = vulnerable to run-time overflow (will need refactoring to solve)
+ *
+ */
+__FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t q_size,
+ const size_t p_size_field,
+ const size_t q_size_field,
+ const u8 func)
+{
+ if (__builtin_constant_p(size)) {
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
+ __write_overflow();
+ if (__compiletime_lessthan(q_size_field, q_size) &&
+ __compiletime_lessthan(q_size, size))
+ __read_overflow2();
+
+ /* Warn when write size argument larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
+ /*
+ * Warn for source field over-read when building with W=1
+ * or when an over-write happened, so both can be fixed at
+ * the same time.
+ */
+ if ((IS_ENABLED(KBUILD_EXTRA_WARN1) ||
+ __compiletime_lessthan(p_size_field, size)) &&
+ __compiletime_lessthan(q_size_field, size))
+ __read_overflow2_field(q_size_field, size);
+ }
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if (p_size != SIZE_MAX && p_size < size)
+ fortify_panic(func, FORTIFY_WRITE, p_size, size, true);
+ else if (q_size != SIZE_MAX && q_size < size)
+ fortify_panic(func, FORTIFY_READ, p_size, size, true);
+
+ /*
+ * Warn when writing beyond destination field size.
+ *
+ * We must ignore p_size_field == 0 for existing 0-element
+ * fake flexible arrays, until they are all converted to
+ * proper flexible arrays.
+ *
+ * The implementation of __builtin_*object_size() behaves
+ * like sizeof() when not directly referencing a flexible
+ * array member, which means there will be many bounds checks
+ * that will appear at run-time, without a way for them to be
+ * detected at compile-time (as can be done when the destination
+ * is specifically the flexible array member).
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
+ */
+ if (p_size_field != 0 && p_size_field != SIZE_MAX &&
+ p_size != p_size_field && p_size_field < size)
+ return true;
+
+ return false;
+}
+
+#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
+ p_size_field, q_size_field, op) ({ \
+ const size_t __fortify_size = (size_t)(size); \
+ const size_t __p_size = (p_size); \
+ const size_t __q_size = (q_size); \
+ const size_t __p_size_field = (p_size_field); \
+ const size_t __q_size_field = (q_size_field); \
+ WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \
+ __q_size, __p_size_field, \
+ __q_size_field, FORTIFY_FUNC_ ##op), \
+ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
+ __fortify_size, \
+ "field \"" #p "\" at " FILE_LINE, \
+ __p_size_field); \
+ __underlying_##op(p, q, __fortify_size); \
+})
+
+/*
+ * Notes about compile-time buffer size detection:
+ *
+ * With these types...
+ *
+ * struct middle {
+ * u16 a;
+ * u8 middle_buf[16];
+ * int b;
+ * };
+ * struct end {
+ * u16 a;
+ * u8 end_buf[16];
+ * };
+ * struct flex {
+ * int a;
+ * u8 flex_buf[];
+ * };
+ *
+ * void func(TYPE *ptr) { ... }
+ *
+ * Cases where destination size cannot be currently detected:
+ * - the size of ptr's object (seemingly by design, gcc & clang fail):
+ * __builtin_object_size(ptr, 1) == SIZE_MAX
+ * - the size of flexible arrays in ptr's obj (by design, dynamic size):
+ * __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX
+ * - the size of ANY array at the end of ptr's obj (gcc and clang bug):
+ * __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836
+ *
+ * Cases where destination size is currently detected:
+ * - the size of non-array members within ptr's object:
+ * __builtin_object_size(ptr->a, 1) == 2
+ * - the size of non-flexible-array in the middle of ptr's obj:
+ * __builtin_object_size(ptr->middle_buf, 1) == 16
+ *
+ */
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memcpy)
+#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memmove)
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
+{
+ const size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_memscan, FORTIFY_READ, p_size, size, NULL);
+ return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
+int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
+{
+ const size_t p_size = __struct_size(p);
+ const size_t q_size = __struct_size(q);
+
+ if (__builtin_constant_p(size)) {
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (__compiletime_lessthan(q_size, size))
+ __read_overflow2();
+ }
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, p_size, size, INT_MIN);
+ else if (q_size < size)
+ fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, q_size, size, INT_MIN);
+ return __underlying_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
+void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
+{
+ const size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_memchr, FORTIFY_READ, p_size, size, NULL);
+ return __underlying_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
+{
+ const size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_memchr_inv, FORTIFY_READ, p_size, size, NULL);
+ return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup)
+ __realloc_size(2);
+__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp)
+{
+ const size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, NULL);
+ return __real_kmemdup(p, size, gfp);
+}
+
+/**
+ * strcpy - Copy a string into another string buffer
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * overflows, this is only possible when the sizes of @q and @p are
+ * known to the compiler. Prefer strscpy(), though note its different
+ * return values for detecting truncation.
+ *
+ * Returns @p.
+ *
+ */
+/* Defined after fortified strlen to reuse it. */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
+char *strcpy(char * const POS p, const char * const POS q)
+{
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t size;
+
+ /* If neither buffer size is known, immediately give up. */
+ if (__builtin_constant_p(p_size) &&
+ __builtin_constant_p(q_size) &&
+ p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __underlying_strcpy(p, q);
+ size = strlen(q) + 1;
+ /* Compile-time check for const size overflow. */
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+ /* Run-time check for dynamic size overflow. */
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_strcpy, FORTIFY_WRITE, p_size, size, p);
+ __underlying_memcpy(p, q, size);
+ return p;
+}
+
+/* Don't use these outside the FORITFY_SOURCE implementation */
+#undef __underlying_memchr
+#undef __underlying_memcmp
+#undef __underlying_strcat
+#undef __underlying_strcpy
+#undef __underlying_strlen
+#undef __underlying_strncat
+#undef __underlying_strncpy
+
+#undef POS
+#undef POS0
+
+#endif /* _LINUX_FORTIFY_STRING_H_ */
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
index 0b08ac20ab16..a6b4c07858cc 100644
--- a/include/linux/fpga/altera-pr-ip-core.h
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -13,6 +13,5 @@
#include <linux/io.h>
int alt_pr_register(struct device *dev, void __iomem *reg_base);
-void alt_pr_unregister(struct device *dev);
#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index 817600a32c93..223da48a6d18 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -11,7 +11,7 @@ struct fpga_bridge;
/**
* struct fpga_bridge_ops - ops for low level FPGA bridge drivers
* @enable_show: returns the FPGA bridge's status
- * @enable_set: set a FPGA bridge as enabled or disabled
+ * @enable_set: set an FPGA bridge as enabled or disabled
* @fpga_bridge_remove: set FPGA into a specific state during driver remove
* @groups: optional attribute groups.
*/
@@ -23,6 +23,23 @@ struct fpga_bridge_ops {
};
/**
+ * struct fpga_bridge_info - collection of parameters an FPGA Bridge
+ * @name: fpga bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: fpga bridge private data
+ *
+ * fpga_bridge_info contains parameters for the register function. These
+ * are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_bridge_info {
+ const char *name;
+ const struct fpga_bridge_ops *br_ops;
+ void *priv;
+};
+
+/**
* struct fpga_bridge - FPGA bridge structure
* @name: name of low level FPGA bridge
* @dev: FPGA bridge device
@@ -62,15 +79,10 @@ int of_fpga_bridge_get_to_list(struct device_node *np,
struct fpga_image_info *info,
struct list_head *bridge_list);
-struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops,
- void *priv);
-void fpga_bridge_free(struct fpga_bridge *br);
-int fpga_bridge_register(struct fpga_bridge *br);
+struct fpga_bridge *
+fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv);
void fpga_bridge_unregister(struct fpga_bridge *br);
-struct fpga_bridge
-*devm_fpga_bridge_create(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops, void *priv);
-
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index e8ca62b2cb5b..54f63459efd6 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -22,6 +22,8 @@ struct sg_table;
* @FPGA_MGR_STATE_RESET: FPGA in reset state
* @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress
* @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed
+ * @FPGA_MGR_STATE_PARSE_HEADER: parse FPGA image header
+ * @FPGA_MGR_STATE_PARSE_HEADER_ERR: Error during PARSE_HEADER stage
* @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming
* @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
* @FPGA_MGR_STATE_WRITE: writing image to FPGA
@@ -41,7 +43,9 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_FIRMWARE_REQ,
FPGA_MGR_STATE_FIRMWARE_REQ_ERR,
- /* write sequence: init, write, complete */
+ /* write sequence: parse header, init, write, complete */
+ FPGA_MGR_STATE_PARSE_HEADER,
+ FPGA_MGR_STATE_PARSE_HEADER_ERR,
FPGA_MGR_STATE_WRITE_INIT,
FPGA_MGR_STATE_WRITE_INIT_ERR,
FPGA_MGR_STATE_WRITE,
@@ -75,7 +79,7 @@ enum fpga_mgr_states {
#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
/**
- * struct fpga_image_info - information specific to a FPGA image
+ * struct fpga_image_info - information specific to an FPGA image
* @flags: boolean flags as defined above
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
@@ -85,6 +89,9 @@ enum fpga_mgr_states {
* @sgt: scatter/gather table containing FPGA image
* @buf: contiguous buffer containing FPGA image
* @count: size of buf
+ * @header_size: size of image header.
+ * @data_size: size of image data to be sent to the device. If not specified,
+ * whole image will be used. Header may be skipped in either case.
* @region_id: id of target region
* @dev: device that owns this
* @overlay: Device Tree overlay
@@ -98,6 +105,8 @@ struct fpga_image_info {
struct sg_table *sgt;
const char *buf;
size_t count;
+ size_t header_size;
+ size_t data_size;
int region_id;
struct device *dev;
#ifdef CONFIG_OF
@@ -106,11 +115,48 @@ struct fpga_image_info {
};
/**
+ * struct fpga_compat_id - id for compatibility check
+ *
+ * @id_h: high 64bit of the compat_id
+ * @id_l: low 64bit of the compat_id
+ */
+struct fpga_compat_id {
+ u64 id_h;
+ u64 id_l;
+};
+
+/**
+ * struct fpga_manager_info - collection of parameters for an FPGA Manager
+ * @name: fpga manager name
+ * @compat_id: FPGA manager id for compatibility check.
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * fpga_manager_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_manager_info {
+ const char *name;
+ struct fpga_compat_id *compat_id;
+ const struct fpga_manager_ops *mops;
+ void *priv;
+};
+
+/**
* struct fpga_manager_ops - ops for low level fpga manager drivers
- * @initial_header_size: Maximum number of bytes that should be passed into write_init
+ * @initial_header_size: minimum number of bytes that should be passed into
+ * parse_header and write_init.
+ * @skip_header: bool flag to tell fpga-mgr core whether it should skip
+ * info->header_size part at the beginning of the image when invoking
+ * write callback.
* @state: returns an enum value of the FPGA's state
* @status: returns status of the FPGA, including reconfiguration error code
- * @write_init: prepare the FPGA to receive confuration data
+ * @parse_header: parse FPGA image header to set info->header_size and
+ * info->data_size. In case the input buffer is not large enough, set
+ * required size to info->header_size and return -EAGAIN.
+ * @write_init: prepare the FPGA to receive configuration data
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
@@ -123,8 +169,12 @@ struct fpga_image_info {
*/
struct fpga_manager_ops {
size_t initial_header_size;
+ bool skip_header;
enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
u64 (*status)(struct fpga_manager *mgr);
+ int (*parse_header)(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count);
int (*write_init)(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count);
@@ -144,17 +194,6 @@ struct fpga_manager_ops {
#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4)
/**
- * struct fpga_compat_id - id for compatibility check
- *
- * @id_h: high 64bit of the compat_id
- * @id_l: low 64bit of the compat_id
- */
-struct fpga_compat_id {
- u64 id_h;
- u64 id_l;
-};
-
-/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
* @dev: fpga manager device
@@ -191,15 +230,18 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
void fpga_mgr_put(struct fpga_manager *mgr);
-struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv);
-void fpga_mgr_free(struct fpga_manager *mgr);
-int fpga_mgr_register(struct fpga_manager *mgr);
+struct fpga_manager *
+fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
+
+struct fpga_manager *
+fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv);
void fpga_mgr_unregister(struct fpga_manager *mgr);
-struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv);
+struct fpga_manager *
+devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
+struct fpga_manager *
+devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv);
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 27cb706275db..9d4d32909340 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -7,6 +7,27 @@
#include <linux/fpga/fpga-mgr.h>
#include <linux/fpga/fpga-bridge.h>
+struct fpga_region;
+
+/**
+ * struct fpga_region_info - collection of parameters an FPGA Region
+ * @mgr: fpga region manager
+ * @compat_id: FPGA region id for compatibility check.
+ * @priv: fpga region private data
+ * @get_bridges: optional function to get bridges to a list
+ *
+ * fpga_region_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_region_info {
+ struct fpga_manager *mgr;
+ struct fpga_compat_id *compat_id;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+};
+
/**
* struct fpga_region - FPGA Region structure
* @dev: FPGA Region device
@@ -31,21 +52,18 @@ struct fpga_region {
#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
-struct fpga_region *fpga_region_class_find(
- struct device *start, const void *data,
- int (*match)(struct device *, const void *));
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+ int (*match)(struct device *, const void *));
int fpga_region_program_fpga(struct fpga_region *region);
-struct fpga_region
-*fpga_region_create(struct device *dev, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *));
-void fpga_region_free(struct fpga_region *region);
-int fpga_region_register(struct fpga_region *region);
-void fpga_region_unregister(struct fpga_region *region);
+struct fpga_region *
+fpga_region_register_full(struct device *parent, const struct fpga_region_info *info);
-struct fpga_region
-*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *));
+struct fpga_region *
+fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *));
+void fpga_region_unregister(struct fpga_region *region);
#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
new file mode 100644
index 000000000000..3e03758151f4
--- /dev/null
+++ b/include/linux/fprobe.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Simple ftrace probe wrapper */
+#ifndef _LINUX_FPROBE_H
+#define _LINUX_FPROBE_H
+
+#include <linux/compiler.h>
+#include <linux/ftrace.h>
+#include <linux/rethook.h>
+
+/**
+ * struct fprobe - ftrace based probe.
+ * @ops: The ftrace_ops.
+ * @nmissed: The counter for missing events.
+ * @flags: The status flag.
+ * @rethook: The rethook data structure. (internal data)
+ * @entry_data_size: The private data storage size.
+ * @nr_maxactive: The max number of active functions.
+ * @entry_handler: The callback function for function entry.
+ * @exit_handler: The callback function for function exit.
+ */
+struct fprobe {
+#ifdef CONFIG_FUNCTION_TRACER
+ /*
+ * If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too.
+ * But user of fprobe may keep embedding the struct fprobe on their own
+ * code. To avoid build error, this will keep the fprobe data structure
+ * defined here, but remove ftrace_ops data structure.
+ */
+ struct ftrace_ops ops;
+#endif
+ unsigned long nmissed;
+ unsigned int flags;
+ struct rethook *rethook;
+ size_t entry_data_size;
+ int nr_maxactive;
+
+ int (*entry_handler)(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *entry_data);
+ void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *entry_data);
+};
+
+/* This fprobe is soft-disabled. */
+#define FPROBE_FL_DISABLED 1
+
+/*
+ * This fprobe handler will be shared with kprobes.
+ * This flag must be set before registering.
+ */
+#define FPROBE_FL_KPROBE_SHARED 2
+
+static inline bool fprobe_disabled(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_DISABLED : false;
+}
+
+static inline bool fprobe_shared_with_kprobes(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false;
+}
+
+#ifdef CONFIG_FPROBE
+int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter);
+int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
+int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
+int unregister_fprobe(struct fprobe *fp);
+bool fprobe_is_registered(struct fprobe *fp);
+#else
+static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int unregister_fprobe(struct fprobe *fp)
+{
+ return -EOPNOTSUPP;
+}
+static inline bool fprobe_is_registered(struct fprobe *fp)
+{
+ return false;
+}
+#endif
+
+/**
+ * disable_fprobe() - Disable fprobe
+ * @fp: The fprobe to be disabled.
+ *
+ * This will soft-disable @fp. Note that this doesn't remove the ftrace
+ * hooks from the function entry.
+ */
+static inline void disable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags |= FPROBE_FL_DISABLED;
+}
+
+/**
+ * enable_fprobe() - Enable fprobe
+ * @fp: The fprobe to be enabled.
+ *
+ * This will soft-enable @fp.
+ */
+static inline void enable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags &= ~FPROBE_FL_DISABLED;
+}
+
+#endif
diff --git a/include/linux/frame.h b/include/linux/frame.h
deleted file mode 100644
index 303cda600e56..000000000000
--- a/include/linux/frame.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FRAME_H
-#define _LINUX_FRAME_H
-
-#ifdef CONFIG_STACK_VALIDATION
-/*
- * This macro marks the given function's stack frame as "non-standard", which
- * tells objtool to ignore the function when doing stack metadata validation.
- * It should only be used in special cases where you're 100% sure it won't
- * affect the reliability of frame pointers and kernel stack traces.
- *
- * For more information, see tools/objtool/Documentation/stack-validation.txt.
- */
-#define STACK_FRAME_NON_STANDARD(func) \
- static void __used __section(.discard.func_stack_frame_non_standard) \
- *__func_stack_frame_non_standard_##func = func
-
-/*
- * This macro indicates that the following intra-function call is valid.
- * Any non-annotated intra-function call will cause objtool to issue a warning.
- */
-#define ANNOTATE_INTRA_FUNCTION_CALL \
- 999: \
- .pushsection .discard.intra_function_calls; \
- .long 999b; \
- .popsection;
-
-#else /* !CONFIG_STACK_VALIDATION */
-
-#define STACK_FRAME_NON_STANDARD(func)
-#define ANNOTATE_INTRA_FUNCTION_CALL
-
-#endif /* CONFIG_STACK_VALIDATION */
-
-#endif /* _LINUX_FRAME_H */
diff --git a/include/linux/framer/framer-provider.h b/include/linux/framer/framer-provider.h
new file mode 100644
index 000000000000..9724d4b44b9c
--- /dev/null
+++ b/include/linux/framer/framer-provider.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer profider header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_PROVIDER_FRAMER_H
+#define __DRIVERS_PROVIDER_FRAMER_H
+
+#include <linux/export.h>
+#include <linux/framer/framer.h>
+#include <linux/types.h>
+
+#define FRAMER_FLAG_POLL_STATUS BIT(0)
+
+/**
+ * struct framer_ops - set of function pointers for performing framer operations
+ * @init: operation to be performed for initializing the framer
+ * @exit: operation to be performed while exiting
+ * @power_on: powering on the framer
+ * @power_off: powering off the framer
+ * @flags: OR-ed flags (FRAMER_FLAG_*) to ask for core functionality
+ * - @FRAMER_FLAG_POLL_STATUS:
+ * Ask the core to perform a polling to get the framer status and
+ * notify consumers on change.
+ * The framer should call @framer_notify_status_change() when it
+ * detects a status change. This is usually done using interrupts.
+ * If the framer cannot detect this change, it can ask the core for
+ * a status polling. The core will call @get_status() periodically
+ * and, on change detected, it will notify the consumer.
+ * the @get_status()
+ * @owner: the module owner containing the ops
+ */
+struct framer_ops {
+ int (*init)(struct framer *framer);
+ void (*exit)(struct framer *framer);
+ int (*power_on)(struct framer *framer);
+ int (*power_off)(struct framer *framer);
+
+ /**
+ * @get_status:
+ *
+ * Optional.
+ *
+ * Used to get the framer status. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_status)(struct framer *framer, struct framer_status *status);
+
+ /**
+ * @set_config:
+ *
+ * Optional.
+ *
+ * Used to set the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*set_config)(struct framer *framer, const struct framer_config *config);
+
+ /**
+ * @get_config:
+ *
+ * Optional.
+ *
+ * Used to get the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_config)(struct framer *framer, struct framer_config *config);
+
+ u32 flags;
+ struct module *owner;
+};
+
+/**
+ * struct framer_provider - represents the framer provider
+ * @dev: framer provider device
+ * @owner: the module owner having of_xlate
+ * @list: to maintain a linked list of framer providers
+ * @of_xlate: function pointer to obtain framer instance from framer pointer
+ */
+struct framer_provider {
+ struct device *dev;
+ struct module *owner;
+ struct list_head list;
+ struct framer * (*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args);
+};
+
+static inline void framer_set_drvdata(struct framer *framer, void *data)
+{
+ dev_set_drvdata(&framer->dev, data);
+}
+
+static inline void *framer_get_drvdata(struct framer *framer)
+{
+ return dev_get_drvdata(&framer->dev);
+}
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+
+/* Create and destroy a framer */
+struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+void framer_destroy(struct framer *framer);
+
+/* devm version */
+struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+
+struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args);
+
+struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args));
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider);
+
+struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args));
+
+void framer_notify_status_change(struct framer *framer);
+
+#else /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+static inline struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void framer_destroy(struct framer *framer)
+{
+}
+
+/* devm version */
+static inline struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider)
+{
+}
+
+static inline struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_notify_status_change(struct framer *framer)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+#define framer_provider_of_register(dev, xlate) \
+ __framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#define devm_framer_provider_of_register(dev, xlate) \
+ __devm_framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#endif /* __DRIVERS_PROVIDER_FRAMER_H */
diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h
new file mode 100644
index 000000000000..9a9b88962c29
--- /dev/null
+++ b/include/linux/framer/framer.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_FRAMER_H
+#define __DRIVERS_FRAMER_H
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+/**
+ * enum framer_iface - Framer interface
+ * @FRAMER_IFACE_E1: E1 interface
+ * @FRAMER_IFACE_T1: T1 interface
+ */
+enum framer_iface {
+ FRAMER_IFACE_E1,
+ FRAMER_IFACE_T1,
+};
+
+/**
+ * enum framer_clock_type - Framer clock type
+ * @FRAMER_CLOCK_EXT: External clock
+ * @FRAMER_CLOCK_INT: Internal clock
+ */
+enum framer_clock_type {
+ FRAMER_CLOCK_EXT,
+ FRAMER_CLOCK_INT,
+};
+
+/**
+ * struct framer_config - Framer configuration
+ * @iface: Framer line interface
+ * @clock_type: Framer clock type
+ * @line_clock_rate: Framer line clock rate
+ */
+struct framer_config {
+ enum framer_iface iface;
+ enum framer_clock_type clock_type;
+ unsigned long line_clock_rate;
+};
+
+/**
+ * struct framer_status - Framer status
+ * @link_is_on: Framer link state. true, the link is on, false, the link is off.
+ */
+struct framer_status {
+ bool link_is_on;
+};
+
+/**
+ * enum framer_event - Event available for notification
+ * @FRAMER_EVENT_STATUS: Event notified on framer_status changes
+ */
+enum framer_event {
+ FRAMER_EVENT_STATUS,
+};
+
+/**
+ * struct framer - represents the framer device
+ * @dev: framer device
+ * @id: id of the framer device
+ * @ops: function pointers for performing framer operations
+ * @mutex: mutex to protect framer_ops
+ * @init_count: used to protect when the framer is used by multiple consumers
+ * @power_count: used to protect when the framer is used by multiple consumers
+ * @pwr: power regulator associated with the framer
+ * @notify_status_work: work structure used for status notifications
+ * @notifier_list: notifier list used for notifications
+ * @polling_work: delayed work structure used for the polling task
+ * @prev_status: previous read status used by the polling task to detect changes
+ */
+struct framer {
+ struct device dev;
+ int id;
+ const struct framer_ops *ops;
+ struct mutex mutex; /* Protect framer */
+ int init_count;
+ int power_count;
+ struct regulator *pwr;
+ struct work_struct notify_status_work;
+ struct blocking_notifier_head notifier_list;
+ struct delayed_work polling_work;
+ struct framer_status prev_status;
+};
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+int framer_pm_runtime_get(struct framer *framer);
+int framer_pm_runtime_get_sync(struct framer *framer);
+int framer_pm_runtime_put(struct framer *framer);
+int framer_pm_runtime_put_sync(struct framer *framer);
+int framer_init(struct framer *framer);
+int framer_exit(struct framer *framer);
+int framer_power_on(struct framer *framer);
+int framer_power_off(struct framer *framer);
+int framer_get_status(struct framer *framer, struct framer_status *status);
+int framer_get_config(struct framer *framer, struct framer_config *config);
+int framer_set_config(struct framer *framer, const struct framer_config *config);
+int framer_notifier_register(struct framer *framer, struct notifier_block *nb);
+int framer_notifier_unregister(struct framer *framer, struct notifier_block *nb);
+
+struct framer *framer_get(struct device *dev, const char *con_id);
+void framer_put(struct device *dev, struct framer *framer);
+
+struct framer *devm_framer_get(struct device *dev, const char *con_id);
+struct framer *devm_framer_optional_get(struct device *dev, const char *con_id);
+#else
+static inline int framer_pm_runtime_get(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_get_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_init(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_exit(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_on(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_off(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_status(struct framer *framer, struct framer_status *status)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_config(struct framer *framer, struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_set_config(struct framer *framer, const struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_register(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_unregister(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+struct framer *framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_put(struct device *dev, struct framer *framer)
+{
+}
+
+static inline struct framer *devm_framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *devm_framer_optional_get(struct device *dev, const char *con_id)
+{
+ return NULL;
+}
+
+#endif
+
+#endif /* __DRIVERS_FRAMER_H */
diff --git a/include/linux/framer/pef2256.h b/include/linux/framer/pef2256.h
new file mode 100644
index 000000000000..71d80af58c40
--- /dev/null
+++ b/include/linux/framer/pef2256.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PEF2256 consumer API
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+#ifndef __PEF2256_H__
+#define __PEF2256_H__
+
+#include <linux/types.h>
+
+struct pef2256;
+struct regmap;
+
+/* Retrieve the PEF2256 regmap */
+struct regmap *pef2256_get_regmap(struct pef2256 *pef2256);
+
+/* PEF2256 hardware versions */
+enum pef2256_version {
+ PEF2256_VERSION_UNKNOWN,
+ PEF2256_VERSION_1_2,
+ PEF2256_VERSION_2_1,
+ PEF2256_VERSION_2_2,
+};
+
+/* Get the PEF2256 hardware version */
+enum pef2256_version pef2256_get_version(struct pef2256 *pef2256);
+
+#endif /* __PEF2256_H__ */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 27828145ca09..b303472255be 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -8,9 +8,11 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_FREEZER
-extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
+DECLARE_STATIC_KEY_FALSE(freezer_active);
+
extern bool pm_freezing; /* PM freezing in effect */
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
@@ -22,10 +24,7 @@ extern unsigned int freeze_timeout_msecs;
/*
* Check if a process has been frozen
*/
-static inline bool frozen(struct task_struct *p)
-{
- return p->flags & PF_FROZEN;
-}
+extern bool frozen(struct task_struct *p);
extern bool freezing_slow_path(struct task_struct *p);
@@ -34,9 +33,10 @@ extern bool freezing_slow_path(struct task_struct *p);
*/
static inline bool freezing(struct task_struct *p)
{
- if (likely(!atomic_read(&system_freezing_cnt)))
- return false;
- return freezing_slow_path(p);
+ if (static_branch_unlikely(&freezer_active))
+ return freezing_slow_path(p);
+
+ return false;
}
/* Takes and releases task alloc lock using task_lock() */
@@ -48,23 +48,14 @@ extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
extern void thaw_kernel_threads(void);
-/*
- * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
- * If try_to_freeze causes a lockdep warning it means the caller may deadlock
- */
-static inline bool try_to_freeze_unsafe(void)
+static inline bool try_to_freeze(void)
{
might_sleep();
if (likely(!freezing(current)))
return false;
- return __refrigerator(false);
-}
-
-static inline bool try_to_freeze(void)
-{
if (!(current->flags & PF_NOFREEZE))
debug_check_no_locks_held();
- return try_to_freeze_unsafe();
+ return __refrigerator(false);
}
extern bool freeze_task(struct task_struct *p);
@@ -79,195 +70,6 @@ static inline bool cgroup_freezing(struct task_struct *task)
}
#endif /* !CONFIG_CGROUP_FREEZER */
-/*
- * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
- * calls wait_for_completion(&vfork) and reset right after it returns from this
- * function. Next, the parent should call try_to_freeze() to freeze itself
- * appropriately in case the child has exited before the freezing of tasks is
- * complete. However, we don't want kernel threads to be frozen in unexpected
- * places, so we allow them to block freeze_processes() instead or to set
- * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
- * parent won't really block freeze_processes(), since ____call_usermodehelper()
- * (the child) does a little before exec/exit and it can't be frozen before
- * waking up the parent.
- */
-
-
-/**
- * freezer_do_not_count - tell freezer to ignore %current
- *
- * Tell freezers to ignore the current task when determining whether the
- * target frozen state is reached. IOW, the current task will be
- * considered frozen enough by freezers.
- *
- * The caller shouldn't do anything which isn't allowed for a frozen task
- * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
- * wrap a scheduling operation and nothing much else.
- */
-static inline void freezer_do_not_count(void)
-{
- current->flags |= PF_FREEZER_SKIP;
-}
-
-/**
- * freezer_count - tell freezer to stop ignoring %current
- *
- * Undo freezer_do_not_count(). It tells freezers that %current should be
- * considered again and tries to freeze if freezing condition is already in
- * effect.
- */
-static inline void freezer_count(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- /*
- * If freezing is in progress, the following paired with smp_mb()
- * in freezer_should_skip() ensures that either we see %true
- * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
- */
- smp_mb();
- try_to_freeze();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezer_count_unsafe(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- smp_mb();
- try_to_freeze_unsafe();
-}
-
-/**
- * freezer_should_skip - whether to skip a task when determining frozen
- * state is reached
- * @p: task in quesion
- *
- * This function is used by freezers after establishing %true freezing() to
- * test whether a task should be skipped when determining the target frozen
- * state is reached. IOW, if this function returns %true, @p is considered
- * frozen enough.
- */
-static inline bool freezer_should_skip(struct task_struct *p)
-{
- /*
- * The following smp_mb() paired with the one in freezer_count()
- * ensures that either freezer_count() sees %true freezing() or we
- * see cleared %PF_FREEZER_SKIP and return %false. This makes it
- * impossible for a task to slip frozen state testing after
- * clearing %PF_FREEZER_SKIP.
- */
- smp_mb();
- return p->flags & PF_FREEZER_SKIP;
-}
-
-/*
- * These functions are intended to be used whenever you want allow a sleeping
- * task to be frozen. Note that neither return any clear indication of
- * whether a freeze event happened while in this function.
- */
-
-/* Like schedule(), but should not block the freezer. */
-static inline void freezable_schedule(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezable_schedule_unsafe(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count_unsafe();
-}
-
-/*
- * Like schedule_timeout(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout(timeout);
- freezer_count();
- return __retval;
-}
-
-/*
- * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout_interruptible(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_interruptible(timeout);
- freezer_count();
- return __retval;
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
-{
- long __retval;
-
- freezer_do_not_count();
- __retval = schedule_timeout_interruptible(timeout);
- freezer_count_unsafe();
- return __retval;
-}
-
-/* Like schedule_timeout_killable(), but should not block the freezer. */
-static inline long freezable_schedule_timeout_killable(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count();
- return __retval;
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count_unsafe();
- return __retval;
-}
-
-/*
- * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
- u64 delta, const enum hrtimer_mode mode)
-{
- int __retval;
- freezer_do_not_count();
- __retval = schedule_hrtimeout_range(expires, delta, mode);
- freezer_count();
- return __retval;
-}
-
-/*
- * Freezer-friendly wrappers around wait_event_interruptible(),
- * wait_event_killable() and wait_event_interruptible_timeout(), originally
- * defined in <linux/wait.h>
- */
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-#define wait_event_freezekillable_unsafe(wq, condition) \
-({ \
- int __retval; \
- freezer_do_not_count(); \
- __retval = wait_event_killable(wq, (condition)); \
- freezer_count_unsafe(); \
- __retval; \
-})
-
#else /* !CONFIG_FREEZER */
static inline bool frozen(struct task_struct *p) { return false; }
static inline bool freezing(struct task_struct *p) { return false; }
@@ -279,38 +81,10 @@ static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
static inline void thaw_kernel_threads(void) {}
-static inline bool try_to_freeze_nowarn(void) { return false; }
static inline bool try_to_freeze(void) { return false; }
-static inline void freezer_do_not_count(void) {}
-static inline void freezer_count(void) {}
-static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-#define freezable_schedule() schedule()
-
-#define freezable_schedule_unsafe() schedule()
-
-#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
-
-#define freezable_schedule_timeout_interruptible(timeout) \
- schedule_timeout_interruptible(timeout)
-
-#define freezable_schedule_timeout_interruptible_unsafe(timeout) \
- schedule_timeout_interruptible(timeout)
-
-#define freezable_schedule_timeout_killable(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_timeout_killable_unsafe(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
- schedule_hrtimeout_range(expires, delta, mode)
-
-#define wait_event_freezekillable_unsafe(wq, condition) \
- wait_event_killable(wq, condition)
-
#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
deleted file mode 100644
index b07d88c92bb2..000000000000
--- a/include/linux/frontswap.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FRONTSWAP_H
-#define _LINUX_FRONTSWAP_H
-
-#include <linux/swap.h>
-#include <linux/mm.h>
-#include <linux/bitops.h>
-#include <linux/jump_label.h>
-
-/*
- * Return code to denote that requested number of
- * frontswap pages are unused(moved to page cache).
- * Used in shmem_unuse and try_to_unuse.
- */
-#define FRONTSWAP_PAGES_UNUSED 2
-
-struct frontswap_ops {
- void (*init)(unsigned); /* this swap type was just swapon'ed */
- int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
- int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
- void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
- void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
- struct frontswap_ops *next; /* private pointer to next ops */
-};
-
-extern void frontswap_register_ops(struct frontswap_ops *ops);
-extern void frontswap_shrink(unsigned long);
-extern unsigned long frontswap_curr_pages(void);
-extern void frontswap_writethrough(bool);
-#define FRONTSWAP_HAS_EXCLUSIVE_GETS
-extern void frontswap_tmem_exclusive_gets(bool);
-
-extern bool __frontswap_test(struct swap_info_struct *, pgoff_t);
-extern void __frontswap_init(unsigned type, unsigned long *map);
-extern int __frontswap_store(struct page *page);
-extern int __frontswap_load(struct page *page);
-extern void __frontswap_invalidate_page(unsigned, pgoff_t);
-extern void __frontswap_invalidate_area(unsigned);
-
-#ifdef CONFIG_FRONTSWAP
-extern struct static_key_false frontswap_enabled_key;
-
-static inline bool frontswap_enabled(void)
-{
- return static_branch_unlikely(&frontswap_enabled_key);
-}
-
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return __frontswap_test(sis, offset);
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
- p->frontswap_map = map;
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return p->frontswap_map;
-}
-#else
-/* all inline routines become no-ops and all externs are ignored */
-
-static inline bool frontswap_enabled(void)
-{
- return false;
-}
-
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return false;
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return NULL;
-}
-#endif
-
-static inline int frontswap_store(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_store(page);
-
- return -1;
-}
-
-static inline int frontswap_load(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_load(page);
-
- return -1;
-}
-
-static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_page(type, offset);
-}
-
-static inline void frontswap_invalidate_area(unsigned type)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_area(type);
-}
-
-static inline void frontswap_init(unsigned type, unsigned long *map)
-{
-#ifdef CONFIG_FRONTSWAP
- __frontswap_init(type, map);
-#endif
-}
-
-#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7519ae003a08..00fc429b0af0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -39,6 +39,12 @@
#include <linux/fs_types.h>
#include <linux/build_bug.h>
#include <linux/stddef.h>
+#include <linux/mount.h>
+#include <linux/cred.h>
+#include <linux/mnt_idmapping.h>
+#include <linux/slab.h>
+#include <linux/maple_tree.h>
+#include <linux/rw_hint.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -46,6 +52,7 @@
struct backing_dev_info;
struct bdi_writeback;
struct bio;
+struct io_comp_batch;
struct export_operations;
struct fiemap_extent_info;
struct hd_geometry;
@@ -62,27 +69,22 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
-struct fscrypt_info;
+struct fscrypt_inode_info;
struct fscrypt_operations;
struct fsverity_info;
struct fsverity_operations;
struct fs_context;
struct fs_parameter_spec;
+struct fileattr;
+struct iomap_ops;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
extern void __init files_init(void);
extern void __init files_maxfiles_init(void);
-extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
extern unsigned int sysctl_nr_open;
-extern struct inodes_stat_t inodes_stat;
-extern int leases_enable, lease_break_time;
-extern int sysctl_protected_symlinks;
-extern int sysctl_protected_hardlinks;
-extern int sysctl_protected_fifos;
-extern int sysctl_protected_regular;
typedef __kernel_rwf_t rwf_t;
@@ -119,13 +121,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_PWRITE ((__force fmode_t)0x10)
/* File is opened for execution with sys_execve / sys_uselib */
#define FMODE_EXEC ((__force fmode_t)0x20)
-/* File is opened with O_NDELAY (only set for block devices) */
-#define FMODE_NDELAY ((__force fmode_t)0x40)
-/* File is opened with O_EXCL (only set for block devices) */
-#define FMODE_EXCL ((__force fmode_t)0x80)
-/* File is opened using open(.., 3, ..) and is writeable only for ioctls
- (specialy hack for floppy.c) */
-#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
/* 32bit hashes as llseek() offset (for directories) */
#define FMODE_32BITHASH ((__force fmode_t)0x200)
/* 64bit hashes as llseek() offset (for directories) */
@@ -142,7 +137,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)0x1000)
-/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
+/* File is huge (eg. /dev/mem): treat loff_t as unsigned */
#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
/* File is opened with O_PATH; almost nothing can be done with it */
@@ -163,6 +158,17 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File is stream-like */
#define FMODE_STREAM ((__force fmode_t)0x200000)
+/* File supports DIRECT IO */
+#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000)
+
+#define FMODE_NOREUSE ((__force fmode_t)0x800000)
+
+/* File supports non-exclusive O_DIRECT writes from multiple threads */
+#define FMODE_DIO_PARALLEL_WRITE ((__force fmode_t)0x1000000)
+
+/* File is embedded in backing_file object */
+#define FMODE_BACKING ((__force fmode_t)0x2000000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -178,13 +184,8 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File supports async buffered reads */
#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000)
-/*
- * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
- * that indicates that they should check the contents of the iovec are
- * valid, but not check the memory that the iovec elements
- * points too.
- */
-#define CHECK_IOVEC_ONLY -1
+/* File supports async nowait buffered writes */
+#define FMODE_BUF_WASYNC ((__force fmode_t)0x80000000)
/*
* Attribute flags. These should be or-ed together to figure out what
@@ -227,8 +228,26 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
struct iattr {
unsigned int ia_valid;
umode_t ia_mode;
- kuid_t ia_uid;
- kgid_t ia_gid;
+ /*
+ * The two anonymous unions wrap structures with the same member.
+ *
+ * Filesystems raising FS_ALLOW_IDMAP need to use ia_vfs{g,u}id which
+ * are a dedicated type requiring the filesystem to use the dedicated
+ * helpers. Other filesystem can continue to use ia_{g,u}id until they
+ * have been ported.
+ *
+ * They always contain the same value. In other words FS_ALLOW_IDMAP
+ * pass down the same value on idmapped mounts as they would on regular
+ * mounts.
+ */
+ union {
+ kuid_t ia_uid;
+ vfsuid_t ia_vfsuid;
+ };
+ union {
+ kgid_t ia_gid;
+ vfsgid_t ia_vfsgid;
+ };
loff_t ia_size;
struct timespec64 ia_atime;
struct timespec64 ia_mtime;
@@ -271,7 +290,7 @@ struct iattr {
* trying again. The aop will be taking reasonable
* precautions not to livelock. If the caller held a page
* reference, it should drop it before retrying. Returned
- * by readpage().
+ * by read_folio().
*
* address_space_operation functions return these large constants to indicate
* special semantics to the caller. These are much larger than the bytes in a
@@ -284,11 +303,6 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
-#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
- * helper code (eg buffer layer)
- * to clear GFP_FS from alloc */
-
/*
* oh the beauties of C type declarations.
*/
@@ -297,49 +311,78 @@ struct address_space;
struct writeback_control;
struct readahead_control;
-/*
- * Write life time hint values.
- * Stored in struct inode as u8.
- */
-enum rw_hint {
- WRITE_LIFE_NOT_SET = 0,
- WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE,
- WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT,
- WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM,
- WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG,
- WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
-};
-
-#define IOCB_EVENTFD (1 << 0)
-#define IOCB_APPEND (1 << 1)
-#define IOCB_DIRECT (1 << 2)
-#define IOCB_HIPRI (1 << 3)
-#define IOCB_DSYNC (1 << 4)
-#define IOCB_SYNC (1 << 5)
-#define IOCB_WRITE (1 << 6)
-#define IOCB_NOWAIT (1 << 7)
+/* Match RWF_* bits to IOCB bits */
+#define IOCB_HIPRI (__force int) RWF_HIPRI
+#define IOCB_DSYNC (__force int) RWF_DSYNC
+#define IOCB_SYNC (__force int) RWF_SYNC
+#define IOCB_NOWAIT (__force int) RWF_NOWAIT
+#define IOCB_APPEND (__force int) RWF_APPEND
+
+/* non-RWF related bits - start at 16 */
+#define IOCB_EVENTFD (1 << 16)
+#define IOCB_DIRECT (1 << 17)
+#define IOCB_WRITE (1 << 18)
/* iocb->ki_waitq is valid */
-#define IOCB_WAITQ (1 << 8)
-#define IOCB_NOIO (1 << 9)
+#define IOCB_WAITQ (1 << 19)
+#define IOCB_NOIO (1 << 20)
+/* can use bio alloc cache */
+#define IOCB_ALLOC_CACHE (1 << 21)
+/*
+ * IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the
+ * iocb completion can be passed back to the owner for execution from a safe
+ * context rather than needing to be punted through a workqueue. If this
+ * flag is set, the bio completion handling may set iocb->dio_complete to a
+ * handler function and iocb->private to context information for that handler.
+ * The issuer should call the handler with that context information from task
+ * context to complete the processing of the iocb. Note that while this
+ * provides a task context for the dio_complete() callback, it should only be
+ * used on the completion side for non-IO generating completions. It's fine to
+ * call blocking functions from this callback, but they should not wait for
+ * unrelated IO (like cache flushing, new IO generation, etc).
+ */
+#define IOCB_DIO_CALLER_COMP (1 << 22)
+/* kiocb is a read or write operation submitted by fs/aio.c. */
+#define IOCB_AIO_RW (1 << 23)
+
+/* for use in trace events */
+#define TRACE_IOCB_STRINGS \
+ { IOCB_HIPRI, "HIPRI" }, \
+ { IOCB_DSYNC, "DSYNC" }, \
+ { IOCB_SYNC, "SYNC" }, \
+ { IOCB_NOWAIT, "NOWAIT" }, \
+ { IOCB_APPEND, "APPEND" }, \
+ { IOCB_EVENTFD, "EVENTFD"}, \
+ { IOCB_DIRECT, "DIRECT" }, \
+ { IOCB_WRITE, "WRITE" }, \
+ { IOCB_WAITQ, "WAITQ" }, \
+ { IOCB_NOIO, "NOIO" }, \
+ { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
+ { IOCB_DIO_CALLER_COMP, "CALLER_COMP" }
struct kiocb {
struct file *ki_filp;
-
- /* The 'ki_filp' pointer is shared in a union for aio */
- randomized_struct_fields_start
-
loff_t ki_pos;
- void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void (*ki_complete)(struct kiocb *iocb, long ret);
void *private;
int ki_flags;
- u16 ki_hint;
u16 ki_ioprio; /* See linux/ioprio.h */
union {
- unsigned int ki_cookie; /* for ->iopoll */
- struct wait_page_queue *ki_waitq; /* for async buffered IO */
+ /*
+ * Only used for async buffered reads, where it denotes the
+ * page waitqueue associated with completing the read. Valid
+ * IFF IOCB_WAITQ is set.
+ */
+ struct wait_page_queue *ki_waitq;
+ /*
+ * Can be used for O_DIRECT IO, where the completion handling
+ * is punted back to the issuer of the IO. May only be set
+ * if IOCB_DIO_CALLER_COMP is set by the issuer, and the issuer
+ * must then check for presence of this handler when ki_complete
+ * is invoked. The data passed in to this handler must be
+ * assigned to ->private when dio_complete is assigned.
+ */
+ ssize_t (*dio_complete)(void *data);
};
-
- randomized_struct_fields_end
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -347,48 +390,20 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
return kiocb->ki_complete == NULL;
}
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
- size_t written;
- size_t count;
- union {
- char __user *buf;
- void *data;
- } arg;
- int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
- unsigned long, unsigned long);
-
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
- int (*readpage)(struct file *, struct page *);
+ int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
- /* Set a page dirty. Return true if this dirtied it */
- int (*set_page_dirty)(struct page *page);
+ /* Mark a folio dirty. Return true if this dirtied it */
+ bool (*dirty_folio)(struct address_space *, struct folio *);
- /*
- * Reads in the requested pages. Unlike ->readpage(), this is
- * PURELY used for read-ahead!.
- */
- int (*readpages)(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages);
void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
@@ -396,66 +411,57 @@ struct address_space_operations {
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
- void (*invalidatepage) (struct page *, unsigned int, unsigned int);
- int (*releasepage) (struct page *, gfp_t);
- void (*freepage)(struct page *);
+ void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
+ bool (*release_folio)(struct folio *, gfp_t);
+ void (*free_folio)(struct folio *folio);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
- * migrate the contents of a page to the specified target. If
+ * migrate the contents of a folio to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
*/
- int (*migratepage) (struct address_space *,
- struct page *, struct page *, enum migrate_mode);
- bool (*isolate_page)(struct page *, isolate_mode_t);
- void (*putback_page)(struct page *);
- int (*launder_page) (struct page *);
- int (*is_partially_uptodate) (struct page *, unsigned long,
- unsigned long);
- void (*is_dirty_writeback) (struct page *, bool *, bool *);
- int (*error_remove_page)(struct address_space *, struct page *);
+ int (*migrate_folio)(struct address_space *, struct folio *dst,
+ struct folio *src, enum migrate_mode);
+ int (*launder_folio)(struct folio *);
+ bool (*is_partially_uptodate) (struct folio *, size_t from,
+ size_t count);
+ void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
+ int (*error_remove_folio)(struct address_space *, struct folio *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
sector_t *span);
void (*swap_deactivate)(struct file *file);
+ int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
};
extern const struct address_space_operations empty_aops;
-/*
- * pagecache_write_begin/pagecache_write_end must be used by general code
- * to write into the pagecache.
- */
-int pagecache_write_begin(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-
-int pagecache_write_end(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
-
/**
* struct address_space - Contents of a cacheable, mappable object.
* @host: Owner, either the inode or the block_device.
* @i_pages: Cached pages.
+ * @invalidate_lock: Guards coherency between page cache contents and
+ * file offset->disk block mappings in the filesystem during invalidates.
+ * It is also used to block modification of page cache contents through
+ * memory mappings.
* @gfp_mask: Memory allocation flags to use for allocating pages.
- * @i_mmap_writable: Number of VM_SHARED mappings.
+ * @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings.
* @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
* @nrpages: Number of page entries, protected by the i_pages lock.
- * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock.
* @writeback_index: Writeback starts here.
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
- * @private_lock: For use by the owner of the address_space.
- * @private_list: For use by the owner of the address_space.
- * @private_data: For use by the owner of the address_space.
+ * @i_private_lock: For use by the owner of the address_space.
+ * @i_private_list: For use by the owner of the address_space.
+ * @i_private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host;
struct xarray i_pages;
+ struct rw_semaphore invalidate_lock;
gfp_t gfp_mask;
atomic_t i_mmap_writable;
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -463,16 +469,15 @@ struct address_space {
atomic_t nr_thps;
#endif
struct rb_root_cached i_mmap;
- struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
- unsigned long nrexceptional;
pgoff_t writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
errseq_t wb_err;
- spinlock_t private_lock;
- struct list_head private_list;
- void *private_data;
+ spinlock_t i_private_lock;
+ struct list_head i_private_list;
+ struct rw_semaphore i_mmap_rwsem;
+ void * i_private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@@ -508,6 +513,11 @@ static inline void i_mmap_unlock_write(struct address_space *mapping)
up_write(&mapping->i_mmap_rwsem);
}
+static inline int i_mmap_trylock_read(struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->i_mmap_rwsem);
+}
+
static inline void i_mmap_lock_read(struct address_space *mapping)
{
down_read(&mapping->i_mmap_rwsem);
@@ -538,7 +548,7 @@ static inline int mapping_mapped(struct address_space *mapping)
/*
* Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
+ * Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*
@@ -585,6 +595,11 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+/*
+ * ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
+ * cache the ACL. This also means that ->get_inode_acl() can be called in RCU
+ * mode with the LOOKUP_RCU flag.
+ */
#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
@@ -647,13 +662,13 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec64 i_atime;
- struct timespec64 i_mtime;
- struct timespec64 i_ctime;
+ struct timespec64 __i_atime;
+ struct timespec64 __i_mtime;
+ struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
u8 i_blkbits;
- u8 i_write_hint;
+ enum rw_hint i_write_hint;
blkcnt_t i_blocks;
#ifdef __NEED_I_SIZE_ORDERED
@@ -701,7 +716,6 @@ struct inode {
struct list_head i_devices;
union {
struct pipe_inode_info *i_pipe;
- struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
unsigned i_dir_seq;
@@ -715,7 +729,7 @@ struct inode {
#endif
#ifdef CONFIG_FS_ENCRYPTION
- struct fscrypt_info *i_crypt_info;
+ struct fscrypt_inode_info *i_crypt_info;
#endif
#ifdef CONFIG_FS_VERITY
@@ -819,9 +833,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla
down_read_nested(&inode->i_rwsem, subclass);
}
+static inline void filemap_invalidate_lock(struct address_space *mapping)
+{
+ down_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock(struct address_space *mapping)
+{
+ up_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
+{
+ down_read(&mapping->invalidate_lock);
+}
+
+static inline int filemap_invalidate_trylock_shared(
+ struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock_shared(
+ struct address_space *mapping)
+{
+ up_read(&mapping->invalidate_lock);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+
+
/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
@@ -851,7 +898,8 @@ static inline loff_t i_size_read(const struct inode *inode)
preempt_enable();
return i_size;
#else
- return inode->i_size;
+ /* Pairs with smp_store_release() in i_size_write() */
+ return smp_load_acquire(&inode->i_size);
#endif
}
@@ -873,7 +921,12 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
inode->i_size = i_size;
preempt_enable();
#else
- inode->i_size = i_size;
+ /*
+ * Pairs with smp_load_acquire() in i_size_read() to ensure
+ * changes related to inode size (such as page contents) are
+ * visible before we see the changed inode size.
+ */
+ smp_store_release(&inode->i_size, i_size);
#endif
}
@@ -895,18 +948,27 @@ struct fown_struct {
int signum; /* posix.1b rt signal to be delivered on IO */
};
-/*
- * Track a single file's readahead state
+/**
+ * struct file_ra_state - Track a file's readahead state.
+ * @start: Where the most recent readahead started.
+ * @size: Number of pages read in the most recent readahead.
+ * @async_size: Numer of pages that were/are not needed immediately
+ * and so were/are genuinely "ahead". Start next readahead when
+ * the first of these pages is accessed.
+ * @ra_pages: Maximum size of a readahead request, copied from the bdi.
+ * @mmap_miss: How many mmap accesses missed in the page cache.
+ * @prev_pos: The last byte in the most recent read request.
+ *
+ * When this structure is passed to ->readahead(), the "most recent"
+ * readahead means the current readahead.
*/
struct file_ra_state {
- pgoff_t start; /* where readahead started */
- unsigned int size; /* # of readahead pages */
- unsigned int async_size; /* do asynchronous readahead when
- there are only # of pages ahead */
-
- unsigned int ra_pages; /* Maximum readahead window */
- unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
- loff_t prev_pos; /* Cache last read() position */
+ pgoff_t start;
+ unsigned int size;
+ unsigned int async_size;
+ unsigned int ra_pages;
+ unsigned int mmap_miss;
+ loff_t prev_pos;
};
/*
@@ -918,29 +980,37 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
index < ra->start + ra->size);
}
+/*
+ * f_{lock,count,pos_lock} members can be highly contended and share
+ * the same cacheline. f_{lock,mode} are very frequently used together
+ * and so share the same cacheline as well. The read-mostly
+ * f_{path,inode,op} are kept on a separate cacheline.
+ */
struct file {
union {
- struct llist_node fu_llist;
- struct rcu_head fu_rcuhead;
- } f_u;
- struct path f_path;
- struct inode *f_inode; /* cached value */
- const struct file_operations *f_op;
+ /* fput() uses task work when closing and freeing file (default). */
+ struct callback_head f_task_work;
+ /* fput() must use workqueue (most kernel threads). */
+ struct llist_node f_llist;
+ unsigned int f_iocb_flags;
+ };
/*
- * Protects f_ep_links, f_flags.
+ * Protects f_ep, f_flags.
* Must not be taken from IRQ context.
*/
spinlock_t f_lock;
- enum rw_hint f_write_hint;
- atomic_long_t f_count;
- unsigned int f_flags;
fmode_t f_mode;
+ atomic_long_t f_count;
struct mutex f_pos_lock;
loff_t f_pos;
+ unsigned int f_flags;
struct fown_struct f_owner;
const struct cred *f_cred;
struct file_ra_state f_ra;
+ struct path f_path;
+ struct inode *f_inode; /* cached value */
+ const struct file_operations *f_op;
u64 f_version;
#ifdef CONFIG_SECURITY
@@ -951,8 +1021,7 @@ struct file {
#ifdef CONFIG_EPOLL
/* Used by fs/eventpoll.c to link all the hooks to this file */
- struct list_head f_ep_links;
- struct list_head f_tfile_llink;
+ struct hlist_head *f_ep;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
errseq_t f_wb_err;
@@ -972,9 +1041,10 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
-#define get_file_rcu_many(x, cnt) \
- atomic_long_add_unless(&(x)->f_count, (cnt), 0)
-#define get_file_rcu(x) get_file_rcu_many((x), 1)
+
+struct file *get_file_rcu(struct file __rcu **f);
+struct file *get_file_active(struct file **f);
+
#define file_count(x) atomic_long_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -987,333 +1057,39 @@ static inline struct file *get_file(struct file *f)
#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif
-#define FL_POSIX 1
-#define FL_FLOCK 2
-#define FL_DELEG 4 /* NFSv4 delegation */
-#define FL_ACCESS 8 /* not trying to lock, just looking */
-#define FL_EXISTS 16 /* when unlocking, test for existence */
-#define FL_LEASE 32 /* lease held on this file */
-#define FL_CLOSE 64 /* unlock on close */
-#define FL_SLEEP 128 /* A blocking lock */
-#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
-#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
-#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
-#define FL_LAYOUT 2048 /* outstanding pNFS layout */
-
-#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
-
-/*
- * Special return value from posix_lock_file() and vfs_lock_file() for
- * asynchronous locking.
- */
-#define FILE_LOCK_DEFERRED 1
-
/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
struct file_lock;
-
-struct file_lock_operations {
- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
- void (*fl_release_private)(struct file_lock *);
-};
-
-struct lock_manager_operations {
- fl_owner_t (*lm_get_owner)(fl_owner_t);
- void (*lm_put_owner)(fl_owner_t);
- void (*lm_notify)(struct file_lock *); /* unblock callback */
- int (*lm_grant)(struct file_lock *, int);
- bool (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock *, int, struct list_head *);
- void (*lm_setup)(struct file_lock *, void **);
- bool (*lm_breaker_owns_lease)(struct file_lock *);
-};
-
-struct lock_manager {
- struct list_head list;
- /*
- * NFSv4 and up also want opens blocked during the grace period;
- * NLM doesn't care:
- */
- bool block_opens;
-};
-
-struct net;
-void locks_start_grace(struct net *, struct lock_manager *);
-void locks_end_grace(struct lock_manager *);
-bool locks_in_grace(struct net *);
-bool opens_in_grace(struct net *);
-
-/* that will die - we need it for nfs_lock_info */
-#include <linux/nfs_fs_i.h>
-
-/*
- * struct file_lock represents a generic "file lock". It's used to represent
- * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
- * note that the same struct is used to represent both a request for a lock and
- * the lock itself, but the same object is never used for both.
- *
- * FIXME: should we create a separate "struct lock_request" to help distinguish
- * these two uses?
- *
- * The varous i_flctx lists are ordered by:
- *
- * 1) lock owner
- * 2) lock range start
- * 3) lock range end
- *
- * Obviously, the last two criteria only matter for POSIX locks.
- */
-struct file_lock {
- struct file_lock *fl_blocker; /* The lock, that is blocking us */
- struct list_head fl_list; /* link into file_lock_context */
- struct hlist_node fl_link; /* node in global lists */
- struct list_head fl_blocked_requests; /* list of requests with
- * ->fl_blocker pointing here
- */
- struct list_head fl_blocked_member; /* node in
- * ->fl_blocker->fl_blocked_requests
- */
- fl_owner_t fl_owner;
- unsigned int fl_flags;
- unsigned char fl_type;
- unsigned int fl_pid;
- int fl_link_cpu; /* what cpu's list is this on? */
- wait_queue_head_t fl_wait;
- struct file *fl_file;
- loff_t fl_start;
- loff_t fl_end;
-
- struct fasync_struct * fl_fasync; /* for lease break notifications */
- /* for lease breaks: */
- unsigned long fl_break_time;
- unsigned long fl_downgrade_time;
-
- const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
- const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
- union {
- struct nfs_lock_info nfs_fl;
- struct nfs4_lock_info nfs4_fl;
- struct {
- struct list_head link; /* link in AFS vnode's pending_locks list */
- int state; /* state of grant or error if -ve */
- unsigned int debug_id;
- } afs;
- } fl_u;
-} __randomize_layout;
-
-struct file_lock_context {
- spinlock_t flc_lock;
- struct list_head flc_flock;
- struct list_head flc_posix;
- struct list_head flc_lease;
-};
+struct file_lease;
/* The following constant reflects the upper bound of the file/locking space */
#ifndef OFFSET_MAX
-#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
-#define OFFSET_MAX INT_LIMIT(loff_t)
-#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
+#define OFFSET_MAX type_max(loff_t)
+#define OFFT_OFFSET_MAX type_max(off_t)
#endif
extern void send_sigio(struct fown_struct *fown, int fd, int band);
-#define locks_inode(f) file_inode(f)
-
-#ifdef CONFIG_FILE_LOCKING
-extern int fcntl_getlk(struct file *, unsigned int, struct flock *);
-extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
- struct flock *);
-
-#if BITS_PER_LONG == 32
-extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *);
-extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
- struct flock64 *);
-#endif
-
-extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
-extern int fcntl_getlease(struct file *filp);
-
-/* fs/locks.c */
-void locks_free_lock_context(struct inode *inode);
-void locks_free_lock(struct file_lock *fl);
-extern void locks_init_lock(struct file_lock *);
-extern struct file_lock * locks_alloc_lock(void);
-extern void locks_copy_lock(struct file_lock *, struct file_lock *);
-extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
-extern void locks_remove_posix(struct file *, fl_owner_t);
-extern void locks_remove_file(struct file *);
-extern void locks_release_private(struct file_lock *);
-extern void posix_test_lock(struct file *, struct file_lock *);
-extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int locks_delete_block(struct file_lock *);
-extern int vfs_test_lock(struct file *, struct file_lock *);
-extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
-extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
-extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
-extern void lease_get_mtime(struct inode *, struct timespec64 *time);
-extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
-extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
-extern int lease_modify(struct file_lock *, int, struct list_head *);
-
-struct notifier_block;
-extern int lease_register_notifier(struct notifier_block *);
-extern void lease_unregister_notifier(struct notifier_block *);
-
-struct files_struct;
-extern void show_fd_locks(struct seq_file *f,
- struct file *filp, struct files_struct *files);
-#else /* !CONFIG_FILE_LOCKING */
-static inline int fcntl_getlk(struct file *file, unsigned int cmd,
- struct flock __user *user)
-{
- return -EINVAL;
-}
-
-static inline int fcntl_setlk(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock __user *user)
-{
- return -EACCES;
-}
-
-#if BITS_PER_LONG == 32
-static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
- struct flock64 __user *user)
-{
- return -EINVAL;
-}
-
-static inline int fcntl_setlk64(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock64 __user *user)
-{
- return -EACCES;
-}
-#endif
-static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
-{
- return -EINVAL;
-}
-
-static inline int fcntl_getlease(struct file *filp)
-{
- return F_UNLCK;
-}
-
-static inline void
-locks_free_lock_context(struct inode *inode)
-{
-}
-
-static inline void locks_init_lock(struct file_lock *fl)
-{
- return;
-}
-
-static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
-{
- return;
-}
-
-static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
-{
- return;
-}
-
-static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
-{
- return;
-}
-
-static inline void locks_remove_file(struct file *filp)
-{
- return;
-}
-
-static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
-{
- return;
-}
-
-static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
- struct file_lock *conflock)
-{
- return -ENOLCK;
-}
-
-static inline int locks_delete_block(struct file_lock *waiter)
-{
- return -ENOENT;
-}
-
-static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
-{
- return 0;
-}
-
-static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
- struct file_lock *fl, struct file_lock *conf)
-{
- return -ENOLCK;
-}
-
-static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
-{
- return 0;
-}
-
-static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
-{
- return -ENOLCK;
-}
-
-static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
-{
- return 0;
-}
-
-static inline void lease_get_mtime(struct inode *inode,
- struct timespec64 *time)
-{
- return;
-}
-
-static inline int generic_setlease(struct file *filp, long arg,
- struct file_lock **flp, void **priv)
-{
- return -EINVAL;
-}
-
-static inline int vfs_setlease(struct file *filp, long arg,
- struct file_lock **lease, void **priv)
-{
- return -EINVAL;
-}
-
-static inline int lease_modify(struct file_lock *fl, int arg,
- struct list_head *dispose)
-{
- return -EINVAL;
-}
-
-struct files_struct;
-static inline void show_fd_locks(struct seq_file *f,
- struct file *filp, struct files_struct *files) {}
-#endif /* !CONFIG_FILE_LOCKING */
-
static inline struct inode *file_inode(const struct file *f)
{
return f->f_inode;
}
+/*
+ * file_dentry() is a relic from the days that overlayfs was using files with a
+ * "fake" path, meaning, f_path on overlayfs and f_inode on underlying fs.
+ * In those days, file_dentry() was needed to get the underlying fs dentry that
+ * matches f_inode.
+ * Files with "fake" path should not exist nowadays, so use an assertion to make
+ * sure that file_dentry() was not papering over filesystem bugs.
+ */
static inline struct dentry *file_dentry(const struct file *file)
{
- return d_real(file->f_path.dentry, file_inode(file));
-}
+ struct dentry *dentry = file->f_path.dentry;
-static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
-{
- return locks_lock_inode_wait(locks_inode(filp), fl);
+ WARN_ON_ONCE(d_inode(dentry) != file_inode(file));
+ return dentry;
}
struct fasync_struct {
@@ -1338,7 +1114,7 @@ extern void fasync_free(struct fasync_struct *);
extern void kill_fasync(struct fasync_struct **, int, int);
extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
-extern int f_setown(struct file *filp, unsigned long arg, int force);
+extern int f_setown(struct file *filp, int who, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
@@ -1347,29 +1123,37 @@ extern int send_sigurg(struct fown_struct *fown);
* sb->s_flags. Note that these mirror the equivalent MS_* flags where
* represented in both.
*/
-#define SB_RDONLY 1 /* Mount read-only */
-#define SB_NOSUID 2 /* Ignore suid and sgid bits */
-#define SB_NODEV 4 /* Disallow access to device special files */
-#define SB_NOEXEC 8 /* Disallow program execution */
-#define SB_SYNCHRONOUS 16 /* Writes are synced at once */
-#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */
-#define SB_DIRSYNC 128 /* Directory modifications are synchronous */
-#define SB_NOATIME 1024 /* Do not update access times. */
-#define SB_NODIRATIME 2048 /* Do not update directory access times */
-#define SB_SILENT 32768
-#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */
-#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */
-#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */
-#define SB_I_VERSION (1<<23) /* Update inode I_version field */
-#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
+#define SB_RDONLY BIT(0) /* Mount read-only */
+#define SB_NOSUID BIT(1) /* Ignore suid and sgid bits */
+#define SB_NODEV BIT(2) /* Disallow access to device special files */
+#define SB_NOEXEC BIT(3) /* Disallow program execution */
+#define SB_SYNCHRONOUS BIT(4) /* Writes are synced at once */
+#define SB_MANDLOCK BIT(6) /* Allow mandatory locks on an FS */
+#define SB_DIRSYNC BIT(7) /* Directory modifications are synchronous */
+#define SB_NOATIME BIT(10) /* Do not update access times. */
+#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
+#define SB_SILENT BIT(15)
+#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
+#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
+#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
+#define SB_I_VERSION BIT(23) /* Update inode I_version field */
+#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
/* These sb flags are internal to the kernel */
-#define SB_SUBMOUNT (1<<26)
-#define SB_FORCE (1<<27)
-#define SB_NOSEC (1<<28)
-#define SB_BORN (1<<29)
-#define SB_ACTIVE (1<<30)
-#define SB_NOUSER (1<<31)
+#define SB_DEAD BIT(21)
+#define SB_DYING BIT(24)
+#define SB_SUBMOUNT BIT(26)
+#define SB_FORCE BIT(27)
+#define SB_NOSEC BIT(28)
+#define SB_BORN BIT(29)
+#define SB_ACTIVE BIT(30)
+#define SB_NOUSER BIT(31)
+
+/* These flags relate to encoding and casefolding */
+#define SB_ENC_STRICT_MODE_FL (1 << 0)
+
+#define sb_has_strict_encoding(sb) \
+ (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
/*
* Umount options
@@ -1385,14 +1169,19 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
-#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
+#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_EVM_UNSUPPORTED 0x00000080
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
+#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
+#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
+#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
/* Possible states of 'frozen' field */
enum {
@@ -1407,8 +1196,9 @@ enum {
#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
struct sb_writers {
- int frozen; /* Is sb frozen? */
- wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
+ unsigned short frozen; /* Is sb frozen? */
+ int freeze_kcount; /* How many kernel freeze requests? */
+ int freeze_ucount; /* How many userspace freeze requests? */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1433,17 +1223,22 @@ struct super_block {
#ifdef CONFIG_SECURITY
void *s_security;
#endif
- const struct xattr_handler **s_xattr;
+ const struct xattr_handler * const *s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
- struct key *s_master_keys; /* master crypto keys in use */
+ struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
#endif
#ifdef CONFIG_FS_VERITY
const struct fsverity_operations *s_vop;
#endif
+#if IS_ENABLED(CONFIG_UNICODE)
+ struct unicode_map *s_encoding;
+ __u16 s_encoding_flags;
+#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
- struct block_device *s_bdev;
+ struct block_device *s_bdev; /* can go away once we use an accessor for @s_bdev_file */
+ struct file *s_bdev_file;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct hlist_node s_instances;
@@ -1469,11 +1264,24 @@ struct super_block {
struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
#endif
+ /*
+ * q: why are s_id and s_sysfs_name not the same? both are human
+ * readable strings that identify the filesystem
+ * a: s_id is allowed to change at runtime; it's used in log messages,
+ * and we want to when a device starts out as single device (s_id is dev
+ * name) but then a device is hot added and we have to switch to
+ * identifying it by UUID
+ * but s_sysfs_name is a handle for programmatic access, and can't
+ * change at runtime
+ */
char s_id[32]; /* Informational name */
uuid_t s_uuid; /* UUID */
+ u8 s_uuid_len; /* Default 16, possibly smaller for weird filesystems */
+
+ /* if set, fs shows up under sysfs at /sys/fs/$FSTYP/s_sysfs_name */
+ char s_sysfs_name[UUID_STRING_LEN + 1];
unsigned int s_max_links;
- fmode_t s_mode;
/*
* The next field is for VFS *only*. No filesystems have any business
@@ -1489,20 +1297,18 @@ struct super_block {
const struct dentry_operations *s_d_op; /* default d_op for dentries */
- /*
- * Saved pool identifier for cleancache (-1 means none)
- */
- int cleancache_poolid;
-
- struct shrinker s_shrink; /* per-sb shrinker handle */
+ struct shrinker *s_shrink; /* per-sb shrinker handle */
/* Number of inodes with nlink == 0 but still referenced */
atomic_long_t s_remove_count;
- /* Pending fsnotify inode refs */
- atomic_long_t s_fsnotify_inode_refs;
+ /*
+ * Number of inode/mount/sb objects that are being watched, note that
+ * inodes objects are currently double-accounted.
+ */
+ atomic_long_t s_fsnotify_connectors;
- /* Being remounted read-only */
+ /* Read-only state of the superblock is being changed */
int s_readonly_remount;
/* per-sb errseq_t for reporting writeback errors via syncfs */
@@ -1544,6 +1350,11 @@ struct super_block {
struct list_head s_inodes_wb; /* writeback inodes */
} __randomize_layout;
+static inline struct user_namespace *i_user_ns(const struct inode *inode)
+{
+ return inode->i_sb->s_user_ns;
+}
+
/* Helper functions so that in most cases filesystems will
* not need to deal directly with kuid_t and kgid_t and can
* instead deal with the raw numeric values that are stored
@@ -1551,32 +1362,310 @@ struct super_block {
*/
static inline uid_t i_uid_read(const struct inode *inode)
{
- return from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
+ return from_kuid(i_user_ns(inode), inode->i_uid);
}
static inline gid_t i_gid_read(const struct inode *inode)
{
- return from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
+ return from_kgid(i_user_ns(inode), inode->i_gid);
}
static inline void i_uid_write(struct inode *inode, uid_t uid)
{
- inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid);
+ inode->i_uid = make_kuid(i_user_ns(inode), uid);
}
static inline void i_gid_write(struct inode *inode, gid_t gid)
{
- inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
+ inode->i_gid = make_kgid(i_user_ns(inode), gid);
+}
+
+/**
+ * i_uid_into_vfsuid - map an inode's i_uid down according to an idmapping
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: whe inode's i_uid mapped down according to @idmap.
+ * If the inode's i_uid has no mapping INVALID_VFSUID is returned.
+ */
+static inline vfsuid_t i_uid_into_vfsuid(struct mnt_idmap *idmap,
+ const struct inode *inode)
+{
+ return make_vfsuid(idmap, i_user_ns(inode), inode->i_uid);
+}
+
+/**
+ * i_uid_needs_update - check whether inode's i_uid needs to be updated
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_uid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_uid field needs to be updated, false if not.
+ */
+static inline bool i_uid_needs_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ const struct inode *inode)
+{
+ return ((attr->ia_valid & ATTR_UID) &&
+ !vfsuid_eq(attr->ia_vfsuid,
+ i_uid_into_vfsuid(idmap, inode)));
}
-extern struct timespec64 current_time(struct inode *inode);
+/**
+ * i_uid_update - update @inode's i_uid field
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_uid field translating the vfsuid of any idmapped
+ * mount into the filesystem kuid.
+ */
+static inline void i_uid_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ struct inode *inode)
+{
+ if (attr->ia_valid & ATTR_UID)
+ inode->i_uid = from_vfsuid(idmap, i_user_ns(inode),
+ attr->ia_vfsuid);
+}
+
+/**
+ * i_gid_into_vfsgid - map an inode's i_gid down according to an idmapping
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: the inode's i_gid mapped down according to @idmap.
+ * If the inode's i_gid has no mapping INVALID_VFSGID is returned.
+ */
+static inline vfsgid_t i_gid_into_vfsgid(struct mnt_idmap *idmap,
+ const struct inode *inode)
+{
+ return make_vfsgid(idmap, i_user_ns(inode), inode->i_gid);
+}
+
+/**
+ * i_gid_needs_update - check whether inode's i_gid needs to be updated
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_gid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_gid field needs to be updated, false if not.
+ */
+static inline bool i_gid_needs_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ const struct inode *inode)
+{
+ return ((attr->ia_valid & ATTR_GID) &&
+ !vfsgid_eq(attr->ia_vfsgid,
+ i_gid_into_vfsgid(idmap, inode)));
+}
+
+/**
+ * i_gid_update - update @inode's i_gid field
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_gid field translating the vfsgid of any idmapped
+ * mount into the filesystem kgid.
+ */
+static inline void i_gid_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ struct inode *inode)
+{
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = from_vfsgid(idmap, i_user_ns(inode),
+ attr->ia_vfsgid);
+}
+
+/**
+ * inode_fsuid_set - initialize inode's i_uid field with callers fsuid
+ * @inode: inode to initialize
+ * @idmap: idmap of the mount the inode was found from
+ *
+ * Initialize the i_uid field of @inode. If the inode was found/created via
+ * an idmapped mount map the caller's fsuid according to @idmap.
+ */
+static inline void inode_fsuid_set(struct inode *inode,
+ struct mnt_idmap *idmap)
+{
+ inode->i_uid = mapped_fsuid(idmap, i_user_ns(inode));
+}
+
+/**
+ * inode_fsgid_set - initialize inode's i_gid field with callers fsgid
+ * @inode: inode to initialize
+ * @idmap: idmap of the mount the inode was found from
+ *
+ * Initialize the i_gid field of @inode. If the inode was found/created via
+ * an idmapped mount map the caller's fsgid according to @idmap.
+ */
+static inline void inode_fsgid_set(struct inode *inode,
+ struct mnt_idmap *idmap)
+{
+ inode->i_gid = mapped_fsgid(idmap, i_user_ns(inode));
+}
+
+/**
+ * fsuidgid_has_mapping() - check whether caller's fsuid/fsgid is mapped
+ * @sb: the superblock we want a mapping in
+ * @idmap: idmap of the relevant mount
+ *
+ * Check whether the caller's fsuid and fsgid have a valid mapping in the
+ * s_user_ns of the superblock @sb. If the caller is on an idmapped mount map
+ * the caller's fsuid and fsgid according to the @idmap first.
+ *
+ * Return: true if fsuid and fsgid is mapped, false if not.
+ */
+static inline bool fsuidgid_has_mapping(struct super_block *sb,
+ struct mnt_idmap *idmap)
+{
+ struct user_namespace *fs_userns = sb->s_user_ns;
+ kuid_t kuid;
+ kgid_t kgid;
+
+ kuid = mapped_fsuid(idmap, fs_userns);
+ if (!uid_valid(kuid))
+ return false;
+ kgid = mapped_fsgid(idmap, fs_userns);
+ if (!gid_valid(kgid))
+ return false;
+ return kuid_has_mapping(fs_userns, kuid) &&
+ kgid_has_mapping(fs_userns, kgid);
+}
+
+struct timespec64 current_time(struct inode *inode);
+struct timespec64 inode_set_ctime_current(struct inode *inode);
+
+static inline time64_t inode_get_atime_sec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_sec;
+}
+
+static inline long inode_get_atime_nsec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_atime(const struct inode *inode)
+{
+ return inode->__i_atime;
+}
+
+static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_atime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_atime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_atime_to_ts(inode, ts);
+}
+
+static inline time64_t inode_get_mtime_sec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_sec;
+}
+
+static inline long inode_get_mtime_nsec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_mtime(const struct inode *inode)
+{
+ return inode->__i_mtime;
+}
+
+static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_mtime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_mtime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_mtime_to_ts(inode, ts);
+}
+
+static inline time64_t inode_get_ctime_sec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_sec;
+}
+
+static inline long inode_get_ctime_nsec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_ctime(const struct inode *inode)
+{
+ return inode->__i_ctime;
+}
+
+static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_ctime = ts;
+ return ts;
+}
+
+/**
+ * inode_set_ctime - set the ctime in the inode
+ * @inode: inode in which to set the ctime
+ * @sec: tv_sec value to set
+ * @nsec: tv_nsec value to set
+ *
+ * Set the ctime in @inode to { @sec, @nsec }
+ */
+static inline struct timespec64 inode_set_ctime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+
+ return inode_set_ctime_to_ts(inode, ts);
+}
+
+struct timespec64 simple_inode_init_ts(struct inode *inode);
/*
* Snapshotting support.
*/
-void __sb_end_write(struct super_block *sb, int level);
-int __sb_start_write(struct super_block *sb, int level, bool wait);
+/*
+ * These are internal functions, please use sb_start_{write,pagefault,intwrite}
+ * instead.
+ */
+static inline void __sb_end_write(struct super_block *sb, int level)
+{
+ percpu_up_read(sb->s_writers.rw_sem + level-1);
+}
+
+static inline void __sb_start_write(struct super_block *sb, int level)
+{
+ percpu_down_read(sb->s_writers.rw_sem + level - 1);
+}
+
+static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
+{
+ return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
+}
#define __sb_writers_acquired(sb, lev) \
percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
@@ -1584,6 +1673,72 @@ int __sb_start_write(struct super_block *sb, int level, bool wait);
percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
/**
+ * __sb_write_started - check if sb freeze level is held
+ * @sb: the super we write to
+ * @level: the freeze level
+ *
+ * * > 0 - sb freeze level is held
+ * * 0 - sb freeze level is not held
+ * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
+ */
+static inline int __sb_write_started(const struct super_block *sb, int level)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
+}
+
+/**
+ * sb_write_started - check if SB_FREEZE_WRITE is held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_not_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
+}
+
+/**
+ * file_write_started - check if SB_FREEZE_WRITE is held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_started(file_inode(file)->i_sb);
+}
+
+/**
+ * file_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_not_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_not_started(file_inode(file)->i_sb);
+}
+
+/**
* sb_end_write - drop write access to a superblock
* @sb: the super we wrote to
*
@@ -1640,12 +1795,12 @@ static inline void sb_end_intwrite(struct super_block *sb)
*/
static inline void sb_start_write(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_WRITE, true);
+ __sb_start_write(sb, SB_FREEZE_WRITE);
}
-static inline int sb_start_write_trylock(struct super_block *sb)
+static inline bool sb_start_write_trylock(struct super_block *sb)
{
- return __sb_start_write(sb, SB_FREEZE_WRITE, false);
+ return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
}
/**
@@ -1669,10 +1824,10 @@ static inline int sb_start_write_trylock(struct super_block *sb)
*/
static inline void sb_start_pagefault(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
+ __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
}
-/*
+/**
* sb_start_intwrite - get write access to a superblock for internal fs purposes
* @sb: the super we write to
*
@@ -1687,36 +1842,71 @@ static inline void sb_start_pagefault(struct super_block *sb)
*/
static inline void sb_start_intwrite(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_FS, true);
+ __sb_start_write(sb, SB_FREEZE_FS);
}
-static inline int sb_start_intwrite_trylock(struct super_block *sb)
+static inline bool sb_start_intwrite_trylock(struct super_block *sb)
{
- return __sb_start_write(sb, SB_FREEZE_FS, false);
+ return __sb_start_write_trylock(sb, SB_FREEZE_FS);
}
-
-extern bool inode_owner_or_capable(const struct inode *inode);
+bool inode_owner_or_capable(struct mnt_idmap *idmap,
+ const struct inode *inode);
/*
* VFS helper functions..
*/
-extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
-extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
-extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
-extern int vfs_symlink(struct inode *, struct dentry *, const char *);
-extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
-extern int vfs_rmdir(struct inode *, struct dentry *);
-extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
+int vfs_create(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t, bool);
+int vfs_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
+int vfs_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
+ umode_t, dev_t);
+int vfs_symlink(struct mnt_idmap *, struct inode *,
+ struct dentry *, const char *);
+int vfs_link(struct dentry *, struct mnt_idmap *, struct inode *,
+ struct dentry *, struct inode **);
+int vfs_rmdir(struct mnt_idmap *, struct inode *, struct dentry *);
+int vfs_unlink(struct mnt_idmap *, struct inode *, struct dentry *,
+ struct inode **);
-static inline int vfs_whiteout(struct inode *dir, struct dentry *dentry)
+/**
+ * struct renamedata - contains all information required for renaming
+ * @old_mnt_idmap: idmap of the old mount the inode was found from
+ * @old_dir: parent of source
+ * @old_dentry: source
+ * @new_mnt_idmap: idmap of the new mount the inode was found from
+ * @new_dir: parent of destination
+ * @new_dentry: destination
+ * @delegated_inode: returns an inode needing a delegation break
+ * @flags: rename flags
+ */
+struct renamedata {
+ struct mnt_idmap *old_mnt_idmap;
+ struct inode *old_dir;
+ struct dentry *old_dentry;
+ struct mnt_idmap *new_mnt_idmap;
+ struct inode *new_dir;
+ struct dentry *new_dentry;
+ struct inode **delegated_inode;
+ unsigned int flags;
+} __randomize_layout;
+
+int vfs_rename(struct renamedata *);
+
+static inline int vfs_whiteout(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry)
{
- return vfs_mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
+ return vfs_mknod(idmap, dir, dentry, S_IFCHR | WHITEOUT_MODE,
+ WHITEOUT_DEV);
}
-extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
- int open_flag);
+struct file *kernel_tmpfile_open(struct mnt_idmap *idmap,
+ const struct path *parentpath,
+ umode_t mode, int open_flag,
+ const struct cred *cred);
+struct file *kernel_file_open(const struct path *path, int flags,
+ struct inode *inode, const struct cred *cred);
int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
@@ -1738,18 +1928,21 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
/*
* VFS file helper functions.
*/
-extern void inode_init_owner(struct inode *inode, const struct inode *dir,
- umode_t mode);
+void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
+ const struct inode *dir, umode_t mode);
extern bool may_open_dev(const struct path *path);
+umode_t mode_strip_sgid(struct mnt_idmap *idmap,
+ const struct inode *dir, umode_t mode);
/*
* This is the "filldir" function type, used by readdir() to let
* the kernel specify what kind of dirent layout it wants to have.
* This allows the kernel to read directories into kernel space or
* to have different dirent layouts depending on the binary type.
+ * Return 'true' to keep going and 'false' if there are no more entries.
*/
struct dir_context;
-typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
+typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
unsigned);
struct dir_context {
@@ -1796,7 +1989,17 @@ struct dir_context {
*/
#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
+/*
+ * These flags control the behavior of vfs_copy_file_range().
+ * They are not available to the user via syscall.
+ *
+ * COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops
+ */
+#define COPY_FILE_SPLICE (1 << 0)
+
struct iov_iter;
+struct io_uring_cmd;
+struct offset_ctx;
struct file_operations {
struct module *owner;
@@ -1805,8 +2008,8 @@ struct file_operations {
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
- int (*iopoll)(struct kiocb *kiocb, bool spin);
- int (*iterate) (struct file *, struct dir_context *);
+ int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
+ unsigned int flags);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -1819,13 +2022,13 @@ struct file_operations {
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
- ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
- int (*setlease)(struct file *, long, struct file_lock **, void **);
+ void (*splice_eof)(struct file *file);
+ int (*setlease)(struct file *, int, struct file_lease **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f);
@@ -1838,36 +2041,59 @@ struct file_operations {
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
+ int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+ int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
+ unsigned int poll_flags);
} __randomize_layout;
+/* Wrap a directory iterator that needs exclusive inode access */
+int wrap_directory_iterator(struct file *, struct dir_context *,
+ int (*) (struct file *, struct dir_context *));
+#define WRAP_DIR_ITER(x) \
+ static int shared_##x(struct file *file , struct dir_context *ctx) \
+ { return wrap_directory_iterator(file, ctx, x); }
+
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
- int (*permission) (struct inode *, int);
- struct posix_acl * (*get_acl)(struct inode *, int);
+ int (*permission) (struct mnt_idmap *, struct inode *, int);
+ struct posix_acl * (*get_inode_acl)(struct inode *, int, bool);
int (*readlink) (struct dentry *, char __user *,int);
- int (*create) (struct inode *,struct dentry *, umode_t, bool);
+ int (*create) (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
- int (*symlink) (struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct inode *,struct dentry *,umode_t);
+ int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,
+ const char *);
+ int (*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
- int (*rename) (struct inode *, struct dentry *,
+ int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t,dev_t);
+ int (*rename) (struct mnt_idmap *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
- int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
+ int (*setattr) (struct mnt_idmap *, struct dentry *, struct iattr *);
+ int (*getattr) (struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, struct timespec64 *, int);
+ int (*update_time)(struct inode *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
- int (*tmpfile) (struct inode *, struct dentry *, umode_t);
- int (*set_acl)(struct inode *, struct posix_acl *, int);
+ int (*tmpfile) (struct mnt_idmap *, struct inode *,
+ struct file *, umode_t);
+ struct posix_acl *(*get_acl)(struct mnt_idmap *, struct dentry *,
+ int);
+ int (*set_acl)(struct mnt_idmap *, struct dentry *,
+ struct posix_acl *, int);
+ int (*fileattr_set)(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct fileattr *fa);
+ int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
+ struct offset_ctx *(*get_offset_ctx)(struct inode *inode);
} ____cacheline_aligned;
static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
@@ -1887,27 +2113,17 @@ static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
return file->f_op->mmap(file, vma);
}
-ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
- unsigned long nr_segs, unsigned long fast_segs,
- struct iovec *fast_pointer,
- struct iovec **ret_pointer);
-
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
-extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- size_t len, unsigned int flags);
-extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *count,
- unsigned int remap_flags);
-extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
+int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *dax_read_ops);
+int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
- loff_t len, unsigned int remap_flags);
+ loff_t *count, unsigned int remap_flags);
extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
@@ -1917,6 +2133,25 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
+/**
+ * enum freeze_holder - holder of the freeze
+ * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
+ * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
+ * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ *
+ * Indicate who the owner of the freeze or thaw request is and whether
+ * the freeze needs to be exclusive or can nest.
+ * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
+ * same holder aren't allowed. It is however allowed to hold a single
+ * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
+ * the same time. This is relied upon by some filesystems during online
+ * repair or similar.
+ */
+enum freeze_holder {
+ FREEZE_HOLDER_KERNEL = (1U << 0),
+ FREEZE_HOLDER_USERSPACE = (1U << 1),
+ FREEZE_MAY_NEST = (1U << 2),
+};
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
@@ -1929,9 +2164,9 @@ struct super_operations {
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
- int (*freeze_super) (struct super_block *);
+ int (*freeze_super) (struct super_block *, enum freeze_holder who);
int (*freeze_fs) (struct super_block *);
- int (*thaw_super) (struct super_block *);
+ int (*thaw_super) (struct super_block *, enum freeze_holder who);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
@@ -1944,13 +2179,13 @@ struct super_operations {
#ifdef CONFIG_QUOTA
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
- struct dquot **(*get_dquots)(struct inode *);
+ struct dquot __rcu **(*get_dquots)(struct inode *);
#endif
- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
long (*nr_cached_objects)(struct super_block *,
struct shrink_control *);
long (*free_cached_objects)(struct super_block *,
struct shrink_control *);
+ void (*shutdown)(struct super_block *sb);
};
/*
@@ -1977,6 +2212,7 @@ struct super_operations {
#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
#define S_CASEFOLD (1 << 15) /* Casefolded file */
#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
+#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -2006,7 +2242,12 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+
+#ifdef CONFIG_FS_POSIX_ACL
#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
+#else
+#define IS_POSIXACL(inode) 0
+#endif
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
@@ -2023,36 +2264,18 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
-static inline bool HAS_UNMAPPED_ID(struct inode *inode)
-{
- return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid);
-}
-
-static inline enum rw_hint file_write_hint(struct file *file)
+static inline bool HAS_UNMAPPED_ID(struct mnt_idmap *idmap,
+ struct inode *inode)
{
- if (file->f_write_hint != WRITE_LIFE_NOT_SET)
- return file->f_write_hint;
-
- return file_inode(file)->i_write_hint;
-}
-
-static inline int iocb_flags(struct file *file);
-
-static inline u16 ki_hint_validate(enum rw_hint hint)
-{
- typeof(((struct kiocb *)0)->ki_hint) max_hint = -1;
-
- if (hint <= max_hint)
- return hint;
- return 0;
+ return !vfsuid_valid(i_uid_into_vfsuid(idmap, inode)) ||
+ !vfsgid_valid(i_gid_into_vfsgid(idmap, inode));
}
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
- .ki_flags = iocb_flags(filp),
- .ki_hint = ki_hint_validate(file_write_hint(filp)),
+ .ki_flags = filp->f_iocb_flags,
.ki_ioprio = get_current_ioprio(),
};
}
@@ -2063,7 +2286,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = kiocb_src->ki_flags,
- .ki_hint = kiocb_src->ki_hint,
.ki_ioprio = kiocb_src->ki_ioprio,
.ki_pos = kiocb_src->ki_pos,
};
@@ -2072,8 +2294,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
/*
* Inode state bits. Protected by inode->i_lock
*
- * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
- * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
+ * Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
+ * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
*
* Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
* until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
@@ -2082,12 +2304,21 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
* Two bits are used for locking and completion notification, I_NEW and I_SYNC.
*
* I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
- * fdatasync(). i_atime is the usual cause.
- * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+ * fdatasync() (unless I_DIRTY_DATASYNC is also set).
+ * Timestamp updates are the usual cause.
+ * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
* these changes separately from I_DIRTY_SYNC so that we
* don't have to write inode on fdatasync() when only
- * mtime has changed in it.
+ * e.g. the timestamps have changed.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
+ * I_DIRTY_TIME The inode itself has dirty timestamps, and the
+ * lazytime mount option is enabled. We keep track of this
+ * separately from I_DIRTY_SYNC in order to implement
+ * lazytime. This gets cleared if I_DIRTY_INODE
+ * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. But
+ * I_DIRTY_TIME can still be set if I_DIRTY_SYNC is already
+ * in place because writeback might already be in progress
+ * and we don't want to lose the time update
* I_NEW Serves as both a mutex and completion notification.
* New inodes set I_NEW. If two processes both create
* the same inode, one of them will release its inode and
@@ -2136,6 +2367,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
* Used to detect that mark_inode_dirty() should not move
* inode between dirty lists.
*
+ * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2158,6 +2391,7 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
#define I_CREATING (1 << 15)
#define I_DONTCACHE (1 << 16)
#define I_SYNC_QUEUED (1 << 17)
+#define I_PINNING_NETFS_WB (1 << 18)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2174,6 +2408,21 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
+/*
+ * Returns true if the given inode itself only has dirty timestamps (its pages
+ * may still be dirty) and isn't currently being allocated or freed.
+ * Filesystems should call this if when writing an inode when lazytime is
+ * enabled, they want to opportunistically write the timestamps of other inodes
+ * located very nearby on-disk, e.g. in the same inode block. This returns true
+ * if the given inode is in need of such an opportunistic update. Requires
+ * i_lock, or at least later re-checking under i_lock.
+ */
+static inline bool inode_is_dirtytime_only(struct inode *inode)
+{
+ return (inode->i_state & (I_DIRTY_TIME | I_NEW |
+ I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
+}
+
extern void inc_nlink(struct inode *inode);
extern void drop_nlink(struct inode *inode);
extern void clear_nlink(struct inode *inode);
@@ -2200,6 +2449,8 @@ enum file_time_flags {
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
+int inode_update_time(struct inode *inode, int flags);
+
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
@@ -2207,8 +2458,8 @@ static inline void file_accessed(struct file *file)
}
extern int file_modified(struct file *file);
+int kiocb_modified(struct kiocb *iocb);
-int sync_inode(struct inode *inode, struct writeback_control *wbc);
int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
@@ -2219,6 +2470,7 @@ struct file_system_type {
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
+#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2236,6 +2488,7 @@ struct file_system_type {
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
+ struct lock_class_key invalidate_lock_key;
struct lock_class_key i_mutex_dir_key;
};
@@ -2251,6 +2504,7 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
+void retire_super(struct super_block *sb);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
@@ -2268,6 +2522,7 @@ struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
+struct super_block *sget_dev(struct fs_context *fc, dev_t dev);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
@@ -2288,236 +2543,141 @@ struct super_block *sget(struct file_system_type *type,
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount(struct file_system_type *);
-extern void kern_unmount(struct vfsmount *mnt);
-extern int may_umount_tree(struct vfsmount *);
-extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char __user *,
- const char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(const struct path *);
-extern void drop_collected_mounts(struct vfsmount *);
-extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
- struct vfsmount *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-extern int freeze_super(struct super_block *super);
-extern int thaw_super(struct super_block *super);
-extern bool our_mnt(struct vfsmount *mnt);
+int freeze_super(struct super_block *super, enum freeze_holder who);
+int thaw_super(struct super_block *super, enum freeze_holder who);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
-extern int current_umask(void);
-
-extern void ihold(struct inode * inode);
-extern void iput(struct inode *);
-extern int generic_update_time(struct inode *, struct timespec64 *, int);
-
-/* /sys/fs */
-extern struct kobject *fs_kobj;
-
-#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
-
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-extern int locks_mandatory_locked(struct file *);
-extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit - an otherwise meaningless combination.
- */
-
-static inline int __mandatory_lock(struct inode *ino)
+static inline void super_set_uuid(struct super_block *sb, const u8 *uuid, unsigned len)
{
- return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
+ if (WARN_ON(len > sizeof(sb->s_uuid)))
+ len = sizeof(sb->s_uuid);
+ sb->s_uuid_len = len;
+ memcpy(&sb->s_uuid, uuid, len);
}
-/*
- * ... and these candidates should be on SB_MANDLOCK mounted fs,
- * otherwise these will be advisory locks
- */
-
-static inline int mandatory_lock(struct inode *ino)
+/* set sb sysfs name based on sb->s_bdev */
+static inline void super_set_sysfs_name_bdev(struct super_block *sb)
{
- return IS_MANDLOCK(ino) && __mandatory_lock(ino);
+ snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pg", sb->s_bdev);
}
-static inline int locks_verify_locked(struct file *file)
+/* set sb sysfs name based on sb->s_uuid */
+static inline void super_set_sysfs_name_uuid(struct super_block *sb)
{
- if (mandatory_lock(locks_inode(file)))
- return locks_mandatory_locked(file);
- return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode,
- struct file *f,
- loff_t size)
-{
- if (!inode->i_flctx || !mandatory_lock(inode))
- return 0;
-
- if (size < inode->i_size) {
- return locks_mandatory_area(inode, f, size, inode->i_size - 1,
- F_WRLCK);
- } else {
- return locks_mandatory_area(inode, f, inode->i_size, size - 1,
- F_WRLCK);
- }
-}
-
-#else /* !CONFIG_MANDATORY_FILE_LOCKING */
-
-static inline int locks_mandatory_locked(struct file *file)
-{
- return 0;
+ WARN_ON(sb->s_uuid_len != sizeof(sb->s_uuid));
+ snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pU", sb->s_uuid.b);
}
-static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
- loff_t start, loff_t end, unsigned char type)
+/* set sb sysfs name based on sb->s_id */
+static inline void super_set_sysfs_name_id(struct super_block *sb)
{
- return 0;
+ strscpy(sb->s_sysfs_name, sb->s_id, sizeof(sb->s_sysfs_name));
}
-static inline int __mandatory_lock(struct inode *inode)
+/* try to use something standard before you use this */
+__printf(2, 3)
+static inline void super_set_sysfs_name_generic(struct super_block *sb, const char *fmt, ...)
{
- return 0;
-}
+ va_list args;
-static inline int mandatory_lock(struct inode *inode)
-{
- return 0;
+ va_start(args, fmt);
+ vsnprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), fmt, args);
+ va_end(args);
}
-static inline int locks_verify_locked(struct file *file)
-{
- return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
- size_t size)
-{
- return 0;
-}
-
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
-
-#ifdef CONFIG_FILE_LOCKING
-static inline int break_lease(struct inode *inode, unsigned int mode)
-{
- /*
- * Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking i_flctx->flc_lease. Otherwise, we
- * could end up racing with tasks trying to set a new lease on this
- * file.
- */
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_LEASE);
- return 0;
-}
-
-static inline int break_deleg(struct inode *inode, unsigned int mode)
-{
- /*
- * Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking i_flctx->flc_lease. Otherwise, we
- * could end up racing with tasks trying to set a new lease on this
- * file.
- */
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_DELEG);
- return 0;
-}
-
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
-{
- int ret;
-
- ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
- if (ret == -EWOULDBLOCK && delegated_inode) {
- *delegated_inode = inode;
- ihold(inode);
- }
- return ret;
-}
-
-static inline int break_deleg_wait(struct inode **delegated_inode)
-{
- int ret;
-
- ret = break_deleg(*delegated_inode, O_WRONLY);
- iput(*delegated_inode);
- *delegated_inode = NULL;
- return ret;
-}
-
-static inline int break_layout(struct inode *inode, bool wait)
-{
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode,
- wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
- FL_LAYOUT);
- return 0;
-}
-
-#else /* !CONFIG_FILE_LOCKING */
-static inline int break_lease(struct inode *inode, unsigned int mode)
-{
- return 0;
-}
-
-static inline int break_deleg(struct inode *inode, unsigned int mode)
-{
- return 0;
-}
-
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
-{
- return 0;
-}
+extern int current_umask(void);
-static inline int break_deleg_wait(struct inode **delegated_inode)
-{
- BUG();
- return 0;
-}
+extern void ihold(struct inode * inode);
+extern void iput(struct inode *);
+int inode_update_timestamps(struct inode *inode, int flags);
+int generic_update_time(struct inode *, int);
-static inline int break_layout(struct inode *inode, bool wait)
-{
- return 0;
-}
+/* /sys/fs */
+extern struct kobject *fs_kobj;
-#endif /* CONFIG_FILE_LOCKING */
+#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
/* fs/open.c */
struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
- int refcnt;
+ atomic_t refcnt;
struct audit_names *aname;
const char iname[];
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
+static inline struct mnt_idmap *file_mnt_idmap(const struct file *file)
+{
+ return mnt_idmap(file->f_path.mnt);
+}
+
+/**
+ * is_idmapped_mnt - check whether a mount is mapped
+ * @mnt: the mount to check
+ *
+ * If @mnt has an non @nop_mnt_idmap attached to it then @mnt is mapped.
+ *
+ * Return: true if mount is mapped, false if not.
+ */
+static inline bool is_idmapped_mnt(const struct vfsmount *mnt)
+{
+ return mnt_idmap(mnt) != &nop_mnt_idmap;
+}
+
extern long vfs_truncate(const struct path *, loff_t);
-extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
- struct file *filp);
+int do_truncate(struct mnt_idmap *, struct dentry *, loff_t start,
+ unsigned int time_attrs, struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
umode_t mode);
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
-extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+extern struct file *file_open_root(const struct path *,
const char *, int, umode_t);
-extern struct file * dentry_open(const struct path *, int, const struct cred *);
-extern struct file * open_with_fake_path(const struct path *, int,
- struct inode*, const struct cred *);
+static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
+ const char *name, int flags, umode_t mode)
+{
+ return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root},
+ name, flags, mode);
+}
+struct file *dentry_open(const struct path *path, int flags,
+ const struct cred *creds);
+struct file *dentry_create(const struct path *path, int flags, umode_t mode,
+ const struct cred *cred);
+struct path *backing_file_user_path(struct file *f);
+
+/*
+ * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
+ * stored in ->vm_file is a backing file whose f_inode is on the underlying
+ * filesystem. When the mapped file path and inode number are displayed to
+ * user (e.g. via /proc/<pid>/maps), these helpers should be used to get the
+ * path and inode number to display to the user, which is the path of the fd
+ * that user has requested to map and the inode number that would be returned
+ * by fstat() on that same fd.
+ */
+/* Get the path to display in /proc/<pid>/maps */
+static inline const struct path *file_user_path(struct file *f)
+{
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return backing_file_user_path(f);
+ return &f->f_path;
+}
+/* Get the inode whose inode number to display in /proc/<pid>/maps */
+static inline const struct inode *file_user_inode(struct file *f)
+{
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return d_inode(backing_file_user_path(f)->dentry);
+ return file_inode(f);
+}
+
static inline struct file *file_clone_open(struct file *file)
{
return dentry_open(&file->f_path, file->f_flags, file->f_cred);
@@ -2525,6 +2685,7 @@ static inline struct file *file_clone_open(struct file *file)
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
+extern struct filename *getname_uflags(const char __user *, int);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
extern void putname(struct filename *name);
@@ -2533,6 +2694,15 @@ extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
+/* Helper for the simple case when original dentry is used */
+static inline int finish_open_simple(struct file *file, int error)
+{
+ if (error)
+ return error;
+
+ return finish_open(file, file->f_path.dentry, NULL);
+}
+
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
@@ -2588,48 +2758,6 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-
-static inline void invalidate_remote_inode(struct inode *inode)
-{
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
- invalidate_mapping_pages(inode->i_mapping, 0, -1);
-}
-extern int invalidate_inode_pages2(struct address_space *mapping);
-extern int invalidate_inode_pages2_range(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-extern int write_inode_now(struct inode *, int);
-extern int filemap_fdatawrite(struct address_space *);
-extern int filemap_flush(struct address_space *);
-extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
-extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
- loff_t lend);
-extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
- loff_t start_byte, loff_t end_byte);
-
-static inline int filemap_fdatawait(struct address_space *mapping)
-{
- return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
-}
-
-extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
- loff_t lend);
-extern int filemap_write_and_wait_range(struct address_space *mapping,
- loff_t lstart, loff_t lend);
-extern int __filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end, int sync_mode);
-extern int filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end);
-extern int filemap_check_errors(struct address_space *mapping);
-extern void __filemap_set_wb_err(struct address_space *mapping, int err);
-
-static inline int filemap_write_and_wait(struct address_space *mapping)
-{
- return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
-}
-
extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
@@ -2641,94 +2769,6 @@ static inline int file_write_and_wait(struct file *file)
return file_write_and_wait_range(file, 0, LLONG_MAX);
}
-/**
- * filemap_set_wb_err - set a writeback error on an address_space
- * @mapping: mapping in which to set writeback error
- * @err: error to be set in mapping
- *
- * When writeback fails in some way, we must record that error so that
- * userspace can be informed when fsync and the like are called. We endeavor
- * to report errors on any file that was open at the time of the error. Some
- * internal callers also need to know when writeback errors have occurred.
- *
- * When a writeback error occurs, most filesystems will want to call
- * filemap_set_wb_err to record the error in the mapping so that it will be
- * automatically reported whenever fsync is called on the file.
- */
-static inline void filemap_set_wb_err(struct address_space *mapping, int err)
-{
- /* Fastpath for common case of no error */
- if (unlikely(err))
- __filemap_set_wb_err(mapping, err);
-}
-
-/**
- * filemap_check_wb_err - has an error occurred since the mark was sampled?
- * @mapping: mapping to check for writeback errors
- * @since: previously-sampled errseq_t
- *
- * Grab the errseq_t value from the mapping, and see if it has changed "since"
- * the given value was sampled.
- *
- * If it has then report the latest error set, otherwise return 0.
- */
-static inline int filemap_check_wb_err(struct address_space *mapping,
- errseq_t since)
-{
- return errseq_check(&mapping->wb_err, since);
-}
-
-/**
- * filemap_sample_wb_err - sample the current errseq_t to test for later errors
- * @mapping: mapping to be sampled
- *
- * Writeback errors are always reported relative to a particular sample point
- * in the past. This function provides those sample points.
- */
-static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
-{
- return errseq_sample(&mapping->wb_err);
-}
-
-/**
- * file_sample_sb_err - sample the current errseq_t to test for later errors
- * @file: file pointer to be sampled
- *
- * Grab the most current superblock-level errseq_t value for the given
- * struct file.
- */
-static inline errseq_t file_sample_sb_err(struct file *file)
-{
- return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
-}
-
-static inline int filemap_nr_thps(struct address_space *mapping)
-{
-#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- return atomic_read(&mapping->nr_thps);
-#else
- return 0;
-#endif
-}
-
-static inline void filemap_nr_thps_inc(struct address_space *mapping)
-{
-#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- atomic_inc(&mapping->nr_thps);
-#else
- WARN_ON_ONCE(1);
-#endif
-}
-
-static inline void filemap_nr_thps_dec(struct address_space *mapping)
-{
-#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- atomic_dec(&mapping->nr_thps);
-#else
- WARN_ON_ONCE(1);
-#endif
-}
-
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
@@ -2736,6 +2776,12 @@ extern int vfs_fsync(struct file *file, int datasync);
extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
unsigned int flags);
+static inline bool iocb_is_dsync(const struct kiocb *iocb)
+{
+ return (iocb->ki_flags & IOCB_DSYNC) ||
+ IS_SYNC(iocb->ki_filp->f_mapping->host);
+}
+
/*
* Sync the bytes written if this was a synchronous write. Expect ki_pos
* to already be updated for the write, and will return either the amount
@@ -2743,7 +2789,7 @@ extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
*/
static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
{
- if (iocb->ki_flags & IOCB_DSYNC) {
+ if (iocb_is_dsync(iocb)) {
int ret = vfs_fsync_range(iocb->ki_filp,
iocb->ki_pos - count, iocb->ki_pos - 1,
(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
@@ -2766,47 +2812,118 @@ static inline int bmap(struct inode *inode, sector_t *block)
}
#endif
-extern int notify_change(struct dentry *, struct iattr *, struct inode **);
-extern int inode_permission(struct inode *, int);
-extern int generic_permission(struct inode *, int);
-extern int __check_sticky(struct inode *dir, struct inode *inode);
+int notify_change(struct mnt_idmap *, struct dentry *,
+ struct iattr *, struct inode **);
+int inode_permission(struct mnt_idmap *, struct inode *, int);
+int generic_permission(struct mnt_idmap *, struct inode *, int);
+static inline int file_permission(struct file *file, int mask)
+{
+ return inode_permission(file_mnt_idmap(file),
+ file_inode(file), mask);
+}
+static inline int path_permission(const struct path *path, int mask)
+{
+ return inode_permission(mnt_idmap(path->mnt),
+ d_inode(path->dentry), mask);
+}
+int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
+ struct inode *inode);
static inline bool execute_ok(struct inode *inode)
{
return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
}
+static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
+{
+ return (inode->i_mode ^ mode) & S_IFMT;
+}
+
+/**
+ * file_start_write - get write access to a superblock for regular file io
+ * @file: the file we want to write to
+ *
+ * This is a variant of sb_start_write() which is a noop on non-regualr file.
+ * Should be matched with a call to file_end_write().
+ */
static inline void file_start_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
+ sb_start_write(file_inode(file)->i_sb);
}
static inline bool file_start_write_trylock(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return true;
- return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false);
+ return sb_start_write_trylock(file_inode(file)->i_sb);
}
+/**
+ * file_end_write - drop write access to a superblock of a regular file
+ * @file: the file we wrote to
+ *
+ * Should be matched with a call to file_start_write().
+ */
static inline void file_end_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(file_inode(file)->i_sb);
+}
+
+/**
+ * kiocb_start_write - get write access to a superblock for async file io
+ * @iocb: the io context we want to submit the write with
+ *
+ * This is a variant of sb_start_write() for async io submission.
+ * Should be matched with a call to kiocb_end_write().
+ */
+static inline void kiocb_start_write(struct kiocb *iocb)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ sb_start_write(inode->i_sb);
+ /*
+ * Fool lockdep by telling it the lock got released so that it
+ * doesn't complain about the held lock when we return to userspace.
+ */
+ __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * kiocb_end_write - drop write access to a superblock after async file io
+ * @iocb: the io context we sumbitted the write with
+ *
+ * Should be matched with a call to kiocb_start_write().
+ */
+static inline void kiocb_end_write(struct kiocb *iocb)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ /*
+ * Tell lockdep we inherited freeze protection from submission thread.
+ */
+ __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(inode->i_sb);
}
/*
+ * This is used for regular files where some users -- especially the
+ * currently executed binary in a process, previously handled via
+ * VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap
+ * read-write shared) accesses.
+ *
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
- * This is used for regular files.
- * We cannot support write (and maybe mmap read-write shared) accesses and
- * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
- * can have the following values:
- * 0: no writers, no VM_DENYWRITE mappings
- * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
- * > 0: (i_writecount) users are writing to the file.
+ * deny_write_access() denies write access to a file.
+ * allow_write_access() re-enables write access to a file.
+ *
+ * The i_writecount field of an inode can have the following values:
+ * 0: no write access, no denied write access
+ * < 0: (-i_writecount) users that denied write access to the file.
+ * > 0: (i_writecount) users that have write access to the file.
*
* Normally we operate on that counter with atomic_{inc,dec} and it's safe
* except for the cases where we don't hold i_writecount yet. Then we need to
@@ -2839,8 +2956,7 @@ static inline bool inode_is_open_for_write(const struct inode *inode)
#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
static inline void i_readcount_dec(struct inode *inode)
{
- BUG_ON(!atomic_read(&inode->i_readcount));
- atomic_dec(&inode->i_readcount);
+ BUG_ON(atomic_dec_return(&inode->i_readcount) < 0);
}
static inline void i_readcount_inc(struct inode *inode)
{
@@ -2858,45 +2974,6 @@ static inline void i_readcount_inc(struct inode *inode)
#endif
extern int do_pipe_flags(int *, int);
-#define __kernel_read_file_id(id) \
- id(UNKNOWN, unknown) \
- id(FIRMWARE, firmware) \
- id(FIRMWARE_PREALLOC_BUFFER, firmware) \
- id(FIRMWARE_EFI_EMBEDDED, firmware) \
- id(MODULE, kernel-module) \
- id(KEXEC_IMAGE, kexec-image) \
- id(KEXEC_INITRAMFS, kexec-initramfs) \
- id(POLICY, security-policy) \
- id(X509_CERTIFICATE, x509-certificate) \
- id(MAX_ID, )
-
-#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
-#define __fid_stringify(dummy, str) #str,
-
-enum kernel_read_file_id {
- __kernel_read_file_id(__fid_enumify)
-};
-
-static const char * const kernel_read_file_str[] = {
- __kernel_read_file_id(__fid_stringify)
-};
-
-static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
-{
- if ((unsigned)id >= READING_MAX_ID)
- return kernel_read_file_str[READING_UNKNOWN];
-
- return kernel_read_file_str[id];
-}
-
-extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern int kernel_read_file_from_path(const char *, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern int kernel_read_file_from_path_initns(const char *, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
@@ -2909,6 +2986,17 @@ extern bool path_is_under(const struct path *, const struct path *);
extern char *file_path(struct file *, char *, int);
+/**
+ * is_dot_dotdot - returns true only if @name is "." or ".."
+ * @name: file name to check
+ * @len: length of file name, in bytes
+ */
+static inline bool is_dot_dotdot(const char *name, size_t len)
+{
+ return len && unlikely(name[0] == '.') &&
+ (len == 1 || (len == 2 && name[1] == '.'));
+}
+
#include <linux/err.h>
/* needed for stackable file system support */
@@ -2925,8 +3013,7 @@ extern int inode_needs_sync(struct inode *inode);
extern int generic_delete_inode(struct inode *inode);
static inline int generic_drop_inode(struct inode *inode)
{
- return !inode->i_nlink || inode_unhashed(inode) ||
- (inode->i_state & I_DONTCACHE);
+ return !inode->i_nlink || inode_unhashed(inode);
}
extern void d_mark_dontcache(struct inode *inode);
@@ -2962,9 +3049,10 @@ extern void unlock_new_inode(struct inode *);
extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
+void dump_mapping(const struct address_space *);
/*
- * Userspace may rely on the the inode number being non-zero. For example, glibc
+ * Userspace may rely on the inode number being non-zero. For example, glibc
* simply ignores files with zero i_ino in unlink() and other places.
*
* As an additional complication, if userspace was compiled with
@@ -2985,8 +3073,21 @@ extern void __destroy_inode(struct inode *);
extern struct inode *new_inode_pseudo(struct super_block *sb);
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
-extern int should_remove_suid(struct dentry *);
+extern int setattr_should_drop_suidgid(struct mnt_idmap *, struct inode *);
+extern int file_remove_privs_flags(struct file *file, unsigned int flags);
extern int file_remove_privs(struct file *);
+int setattr_should_drop_sgid(struct mnt_idmap *idmap,
+ const struct inode *inode);
+
+/*
+ * This must be used for allocating filesystems specific inodes to set
+ * up the inode reclaim context correctly.
+ */
+static inline void *
+alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp)
+{
+ return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp);
+}
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
@@ -3002,6 +3103,7 @@ static inline void remove_inode_hash(struct inode *inode)
}
extern void inode_sb_list_add(struct inode *inode);
+extern void inode_add_lru(struct inode *inode);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
@@ -3009,20 +3111,19 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
-extern int generic_remap_checks(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *count, unsigned int remap_flags);
+int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
+extern int generic_write_check_limits(struct file *file, loff_t pos,
+ loff_t *count);
extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
-extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- size_t *count, unsigned int flags);
-extern ssize_t generic_file_buffered_read(struct kiocb *iocb,
- struct iov_iter *to, ssize_t already_read);
+ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to,
+ ssize_t already_read);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
+ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
+ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
+ ssize_t direct_written, ssize_t buffered_written);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
@@ -3033,28 +3134,21 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);
-/* fs/block_dev.c */
-extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
-extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
-extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
- int datasync);
-extern void block_sync_page(struct page *page);
-
/* fs/splice.c */
-extern ssize_t generic_file_splice_read(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
+ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
+ssize_t copy_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
-extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
- struct file *out, loff_t *, size_t len, unsigned int flags);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
-extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
+#define no_llseek NULL
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
@@ -3063,6 +3157,7 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
+int rw_verify_area(int, struct file *, const loff_t *, size_t);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
extern int stream_open(struct inode * inode, struct file * filp);
@@ -3079,12 +3174,10 @@ enum {
DIO_SKIP_HOLES = 0x02,
};
-void dio_end_io(struct bio *bio);
-
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
get_block_t get_block,
- dio_iodone_t end_io, dio_submit_t submit_io,
+ dio_iodone_t end_io,
int flags);
static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
@@ -3093,13 +3186,13 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
get_block_t get_block)
{
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
+ get_block, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
void inode_dio_wait(struct inode *inode);
-/*
+/**
* inode_dio_begin - signal start of a direct I/O requests
* @inode: inode the direct I/O happens on
*
@@ -3111,7 +3204,7 @@ static inline void inode_dio_begin(struct inode *inode)
atomic_inc(&inode->i_dio_count);
}
-/*
+/**
* inode_dio_end - signal finish of a direct I/O requests
* @inode: inode the direct I/O happens on
*
@@ -3124,11 +3217,6 @@ static inline void inode_dio_end(struct inode *inode)
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
-/*
- * Warn about a page cache invalidation failure diring a direct I/O write.
- */
-void dio_warn_stale_pagecache(struct file *filp);
-
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
@@ -3141,12 +3229,11 @@ extern int page_readlink(struct dentry *, char __user *, int);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
-extern int __page_symlink(struct inode *inode, const char *symname, int len,
- int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-extern void generic_fillattr(struct inode *, struct kstat *);
+void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
+void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -3165,30 +3252,18 @@ extern const struct inode_operations simple_symlink_inode_operations;
extern int iterate_dir(struct file *, struct dir_context *);
-extern int vfs_statx(int, const char __user *, int, struct kstat *, u32);
-extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int);
+int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
+ int flags);
+int vfs_fstat(int fd, struct kstat *stat);
static inline int vfs_stat(const char __user *filename, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
+ return vfs_fstatat(AT_FDCWD, filename, stat, 0);
}
static inline int vfs_lstat(const char __user *name, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
+ return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
}
-static inline int vfs_fstatat(int dfd, const char __user *filename,
- struct kstat *stat, int flags)
-{
- return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
-}
-static inline int vfs_fstat(int fd, struct kstat *stat)
-{
- return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0);
-}
-
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int vfs_readlink(struct dentry *, char __user *, int);
@@ -3196,10 +3271,6 @@ extern int vfs_readlink(struct dentry *, char __user *, int);
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_super(struct block_device *);
-extern struct super_block *get_super_thawed(struct block_device *);
-extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev);
-extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
@@ -3210,33 +3281,34 @@ extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, struct dir_context *);
-extern int simple_setattr(struct dentry *, struct iattr *);
-extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int simple_setattr(struct mnt_idmap *, struct dentry *,
+ struct iattr *);
+extern int simple_getattr(struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *,
- struct inode *, struct dentry *, unsigned int);
+void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+extern int simple_rename(struct mnt_idmap *, struct inode *,
+ struct dentry *, struct inode *, struct dentry *,
+ unsigned int);
extern void simple_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
-extern int noop_set_page_dirty(struct page *page);
-extern void noop_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
-extern int simple_readpage(struct file *file, struct page *page);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
-extern int simple_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
-extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
+extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
extern const struct dentry_operations simple_dentry_operations;
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
@@ -3257,26 +3329,45 @@ extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void __user *from, size_t count);
+struct offset_ctx {
+ struct maple_tree mt;
+ unsigned long next_offset;
+};
+
+void simple_offset_init(struct offset_ctx *octx);
+int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
+void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
+int simple_offset_empty(struct dentry *dentry);
+int simple_offset_rename_exchange(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry);
+void simple_offset_destroy(struct offset_ctx *octx);
+
+extern const struct file_operations simple_offset_dir_operations;
+
extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
-#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct address_space *,
- struct page *, struct page *,
- enum migrate_mode);
-extern int buffer_migrate_page_norefs(struct address_space *,
- struct page *, struct page *,
- enum migrate_mode);
+extern void generic_set_sb_d_ops(struct super_block *sb);
+
+static inline bool sb_has_encoding(const struct super_block *sb)
+{
+#if IS_ENABLED(CONFIG_UNICODE)
+ return !!sb->s_encoding;
#else
-#define buffer_migrate_page NULL
-#define buffer_migrate_page_norefs NULL
+ return false;
#endif
+}
-extern int setattr_prepare(struct dentry *, struct iattr *);
+int may_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ unsigned int ia_valid);
+int setattr_prepare(struct mnt_idmap *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
-extern void setattr_copy(struct inode *inode, const struct iattr *attr);
+void setattr_copy(struct mnt_idmap *, struct inode *inode,
+ const struct iattr *attr);
extern int file_update_time(struct file *file);
@@ -3289,7 +3380,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
{
struct inode *inode;
- if (!vma->vm_file)
+ if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file)
return false;
if (!vma_is_dax(vma))
return false;
@@ -3306,7 +3397,7 @@ static inline int iocb_flags(struct file *file)
res |= IOCB_APPEND;
if (file->f_flags & O_DIRECT)
res |= IOCB_DIRECT;
- if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+ if (file->f_flags & O_DSYNC)
res |= IOCB_DSYNC;
if (file->f_flags & __O_SYNC)
res |= IOCB_SYNC;
@@ -3317,24 +3408,30 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
{
int kiocb_flags = 0;
+ /* make sure there's no overlap between RWF and private IOCB flags */
+ BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD);
+
if (!flags)
return 0;
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
+ if (unlikely((flags & RWF_APPEND) && (flags & RWF_NOAPPEND)))
+ return -EINVAL;
if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- kiocb_flags |= IOCB_NOWAIT | IOCB_NOIO;
+ kiocb_flags |= IOCB_NOIO;
}
- if (flags & RWF_HIPRI)
- kiocb_flags |= IOCB_HIPRI;
- if (flags & RWF_DSYNC)
- kiocb_flags |= IOCB_DSYNC;
+ kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
if (flags & RWF_SYNC)
- kiocb_flags |= (IOCB_DSYNC | IOCB_SYNC);
- if (flags & RWF_APPEND)
- kiocb_flags |= IOCB_APPEND;
+ kiocb_flags |= IOCB_DSYNC;
+
+ if ((flags & RWF_NOAPPEND) && (ki->ki_flags & IOCB_APPEND)) {
+ if (IS_APPEND(file_inode(ki->ki_filp)))
+ return -EPERM;
+ ki->ki_flags &= ~IOCB_APPEND;
+ }
ki->ki_flags |= kiocb_flags;
return 0;
@@ -3362,7 +3459,7 @@ static inline ino_t parent_ino(struct dentry *dentry)
*/
struct simple_transaction_argresp {
ssize_t size;
- char data[0];
+ char data[];
};
#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
@@ -3391,7 +3488,7 @@ void simple_transaction_set(struct file *file, size_t n);
* All attributes contain a text representation of a numeric value
* that are accessed with the get() and set() functions.
*/
-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -3402,10 +3499,16 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = simple_attr_read, \
- .write = simple_attr_write, \
+ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \
.llseek = generic_file_llseek, \
}
+#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
static inline __printf(1, 2)
void __simple_attr_check_format(const char *fmt, ...)
{
@@ -3420,15 +3523,11 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct ctl_table;
-int proc_nr_files(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_dentry(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_inodes(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int __init get_filesystem_list(char *buf);
+int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
@@ -3439,15 +3538,16 @@ int __init get_filesystem_list(char *buf);
static inline bool is_sxid(umode_t mode)
{
- return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
+ return mode & (S_ISUID | S_ISGID);
}
-static inline int check_sticky(struct inode *dir, struct inode *inode)
+static inline int check_sticky(struct mnt_idmap *idmap,
+ struct inode *dir, struct inode *inode)
{
if (!(dir->i_mode & S_ISVTX))
return 0;
- return __check_sticky(dir, inode);
+ return __check_sticky(idmap, dir, inode);
}
static inline void inode_has_no_xattr(struct inode *inode)
@@ -3465,17 +3565,17 @@ static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
- return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
+ return ctx->actor(ctx, name, namelen, ctx->pos, ino, type);
}
static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, ".", 1, ctx->pos,
- file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
+ file->f_path.dentry->d_inode->i_ino, DT_DIR);
}
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
- parent_ino(file->f_path.dentry), DT_DIR) == 0;
+ parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
@@ -3514,36 +3614,4 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
-#if defined(CONFIG_IO_URING)
-extern struct sock *io_uring_get_socket(struct file *file);
-#else
-static inline struct sock *io_uring_get_socket(struct file *file)
-{
- return NULL;
-}
-#endif
-
-int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
- unsigned int flags);
-
-int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa,
- struct fsxattr *fa);
-
-static inline void simple_fill_fsxattr(struct fsxattr *fa, __u32 xflags)
-{
- memset(fa, 0, sizeof(*fa));
- fa->fsx_xflags = xflags;
-}
-
-/*
- * Flush file data before changing attributes. Caller must hold any locks
- * required to prevent further writes to this file until we're done setting
- * flags.
- */
-static inline int inode_drain_writes(struct inode *inode)
-{
- inode_dio_wait(inode);
- return filemap_write_and_wait(inode->i_mapping);
-}
-
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_api.h b/include/linux/fs_api.h
new file mode 100644
index 000000000000..83be38d6d413
--- /dev/null
+++ b/include/linux/fs_api.h
@@ -0,0 +1 @@
+#include <linux/fs.h>
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 37e1e8f7f08d..c13e99cbbf81 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -99,17 +99,17 @@ struct fs_context {
const struct cred *cred; /* The mounter's credentials */
struct p_log log; /* Logging buffer */
const char *source; /* The source name (eg. dev path) */
- void *security; /* Linux S&M options */
+ void *security; /* LSM options */
void *s_fs_info; /* Proposed s_fs_info */
unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
unsigned int sb_flags_mask; /* Superblock flags that were changed */
unsigned int s_iflags; /* OR'd with sb->s_iflags */
- unsigned int lsm_flags; /* Information flags from the fs to the LSM */
enum fs_context_purpose purpose:8;
enum fs_context_phase phase:8; /* The phase the context is in */
bool need_free:1; /* Need to call ops->free() */
bool global:1; /* Goes into &init_user_ns */
bool oldapi:1; /* Coming from mount(2) */
+ bool exclusive:1; /* create new superblock, reject existing one */
};
struct fs_context_operations {
@@ -136,23 +136,16 @@ extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
const char *value, size_t v_size);
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+ char *(*sep)(char **));
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc);
-
-/*
- * sget() wrappers to be called from the ->get_tree() op.
- */
-enum vfs_get_super_keying {
- vfs_get_single_super, /* Only one such superblock may exist */
- vfs_get_single_reconf_super, /* As above, but reconfigure if it exists */
- vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */
- vfs_get_independent_super, /* Multiple independent superblocks may exist */
-};
-extern int vfs_get_super(struct fs_context *fc,
- enum vfs_get_super_keying keying,
- int (*fill_super)(struct super_block *sb,
- struct fs_context *fc));
+extern int vfs_parse_fs_param_source(struct fs_context *fc,
+ struct fs_parameter *param);
+extern void fc_drop_locked(struct fs_context *fc);
+int reconfigure_single(struct super_block *s,
+ int flags, void *data);
extern int get_tree_nodev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
@@ -160,14 +153,13 @@ extern int get_tree_nodev(struct fs_context *fc,
extern int get_tree_single(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
-extern int get_tree_single_reconf(struct fs_context *fc,
- int (*fill_super)(struct super_block *sb,
- struct fs_context *fc));
extern int get_tree_keyed(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc),
void *key);
+int setup_bdev_super(struct super_block *sb, int sb_flags,
+ struct fs_context *fc);
extern int get_tree_bdev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
deleted file mode 100644
index 77d783f71527..000000000000
--- a/include/linux/fs_enet_pd.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Platform information definitions for the
- * universal Freescale Ethernet driver.
- *
- * Copyright (c) 2003 Intracom S.A.
- * by Pantelis Antoniou <panto@intracom.gr>
- *
- * 2005 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_ENET_PD_H
-#define FS_ENET_PD_H
-
-#include <linux/clk.h>
-#include <linux/string.h>
-#include <linux/of_mdio.h>
-#include <linux/if_ether.h>
-#include <asm/types.h>
-
-#define FS_ENET_NAME "fs_enet"
-
-enum fs_id {
- fsid_fec1,
- fsid_fec2,
- fsid_fcc1,
- fsid_fcc2,
- fsid_fcc3,
- fsid_scc1,
- fsid_scc2,
- fsid_scc3,
- fsid_scc4,
-};
-
-#define FS_MAX_INDEX 9
-
-static inline int fs_get_fec_index(enum fs_id id)
-{
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id - fsid_fec1;
- return -1;
-}
-
-static inline int fs_get_fcc_index(enum fs_id id)
-{
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id - fsid_fcc1;
- return -1;
-}
-
-static inline int fs_get_scc_index(enum fs_id id)
-{
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id - fsid_scc1;
- return -1;
-}
-
-static inline int fs_fec_index2id(int index)
-{
- int id = fsid_fec1 + index - 1;
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id;
- return FS_MAX_INDEX;
- }
-
-static inline int fs_fcc_index2id(int index)
-{
- int id = fsid_fcc1 + index - 1;
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id;
- return FS_MAX_INDEX;
-}
-
-static inline int fs_scc_index2id(int index)
-{
- int id = fsid_scc1 + index - 1;
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id;
- return FS_MAX_INDEX;
-}
-
-enum fs_mii_method {
- fsmii_fixed,
- fsmii_fec,
- fsmii_bitbang,
-};
-
-enum fs_ioport {
- fsiop_porta,
- fsiop_portb,
- fsiop_portc,
- fsiop_portd,
- fsiop_porte,
-};
-
-struct fs_mii_bit {
- u32 offset;
- u8 bit;
- u8 polarity;
-};
-struct fs_mii_bb_platform_info {
- struct fs_mii_bit mdio_dir;
- struct fs_mii_bit mdio_dat;
- struct fs_mii_bit mdc_dat;
- int delay; /* delay in us */
- int irq[32]; /* irqs per phy's */
-};
-
-struct fs_platform_info {
-
- void(*init_ioports)(struct fs_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
-
- u32 cp_page; /* CPM page */
- u32 cp_block; /* CPM sblock */
- u32 cp_command; /* CPM page/sblock/mcn */
-
- u32 clk_trx; /* some stuff for pins & mux configuration*/
- u32 clk_rx;
- u32 clk_tx;
- u32 clk_route;
- u32 clk_mask;
-
- u32 mem_offset;
- u32 dpram_offset;
- u32 fcc_regs_c;
-
- u32 device_flags;
-
- struct device_node *phy_node;
- const struct fs_mii_bus_info *bus_info;
-
- int rx_ring, tx_ring; /* number of buffers on rx */
- __u8 macaddr[ETH_ALEN]; /* mac address */
- int rx_copybreak; /* limit we copy small frames */
- int napi_weight; /* NAPI weight */
-
- int use_rmii; /* use RMII mode */
- int has_phy; /* if the network is phy container as well...*/
-
- struct clk *clk_per; /* 'per' clock for register access */
-};
-struct fs_mii_fec_platform_info {
- u32 irq[32];
- u32 mii_speed;
-};
-
-static inline int fs_get_id(struct fs_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SCC"))
- return fs_scc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FCC"))
- return fs_fcc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FEC"))
- return fs_fec_index2id(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index 2eab6d5f6736..01542c4b87a2 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -42,7 +42,7 @@ struct fs_parameter_spec {
u8 opt; /* Option number (returned by fs_parse()) */
unsigned short flags;
#define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */
-#define fs_param_neg_with_empty 0x0004 /* "xxx=" is negative param */
+#define fs_param_can_be_empty 0x0004 /* "xxx=" is allowed */
#define fs_param_deprecated 0x0008 /* The param is deprecated */
const void *data;
};
@@ -76,6 +76,7 @@ static inline int fs_parse(struct fs_context *fc,
extern int fs_lookup_param(struct fs_context *fc,
struct fs_parameter *param,
bool want_bdev,
+ unsigned int flags,
struct path *_path);
extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
@@ -120,7 +121,7 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_u32oct(NAME, OPT) \
__fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
#define fsparam_u32hex(NAME, OPT) \
- __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *16))
+ __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16)
#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index 54210a42c30d..2b1f74b24070 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -16,15 +16,15 @@ extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
static inline void fsstack_copy_attr_atime(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
}
static inline void fsstack_copy_attr_times(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
- dest->i_mtime = src->i_mtime;
- dest->i_ctime = src->i_ctime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
+ inode_set_mtime_to_ts(dest, inode_get_mtime(src));
+ inode_set_ctime_to_ts(dest, inode_get_ctime(src));
}
#endif /* _LINUX_FS_STACK_H */
diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h
deleted file mode 100644
index 36b61ff39277..000000000000
--- a/include/linux/fs_uart_pd.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Platform information definitions for the CPM Uart driver.
- *
- * 2006 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_UART_PD_H
-#define FS_UART_PD_H
-
-#include <asm/types.h>
-
-enum fs_uart_id {
- fsid_smc1_uart,
- fsid_smc2_uart,
- fsid_scc1_uart,
- fsid_scc2_uart,
- fsid_scc3_uart,
- fsid_scc4_uart,
- fs_uart_nr,
-};
-
-static inline int fs_uart_id_scc2fsid(int id)
-{
- return fsid_scc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2scc(int id)
-{
- return id - fsid_scc1_uart + 1;
-}
-
-static inline int fs_uart_id_smc2fsid(int id)
-{
- return fsid_smc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2smc(int id)
-{
- return id - fsid_smc1_uart + 1;
-}
-
-struct fs_uart_platform_info {
- void(*init_ioports)(struct fs_uart_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
- u32 uart_clk;
- u8 tx_num_fifo;
- u8 tx_buf_size;
- u8 rx_num_fifo;
- u8 rx_buf_size;
- u8 brg;
- u8 clk_rx;
- u8 clk_tx;
-};
-
-static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SMC"))
- return fs_uart_id_smc2fsid(fpi->fs_no);
- if(strstr(fpi->fs_type, "SCC"))
- return fs_uart_id_scc2fsid(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 3f0b19dcfae7..bdf7f3eddf0a 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching backing cache interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* NOTE!!! See:
@@ -15,208 +15,34 @@
#define _LINUX_FSCACHE_CACHE_H
#include <linux/fscache.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#define NR_MAXCACHES BITS_PER_LONG
-
-struct fscache_cache;
-struct fscache_cache_ops;
-struct fscache_object;
-struct fscache_operation;
-
-enum fscache_obj_ref_trace {
- fscache_obj_get_add_to_deps,
- fscache_obj_get_queue,
- fscache_obj_put_alloc_fail,
- fscache_obj_put_attach_fail,
- fscache_obj_put_drop_obj,
- fscache_obj_put_enq_dep,
- fscache_obj_put_queue,
- fscache_obj_put_work,
- fscache_obj_ref__nr_traces
+enum fscache_cache_trace;
+enum fscache_cookie_trace;
+enum fscache_access_trace;
+
+enum fscache_cache_state {
+ FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
+ FSCACHE_CACHE_IS_PREPARING, /* A cache is preparing to come live */
+ FSCACHE_CACHE_IS_ACTIVE, /* Attached cache is active and can be used */
+ FSCACHE_CACHE_GOT_IOERROR, /* Attached cache stopped on I/O error */
+ FSCACHE_CACHE_IS_WITHDRAWN, /* Attached cache is being withdrawn */
+#define NR__FSCACHE_CACHE_STATE (FSCACHE_CACHE_IS_WITHDRAWN + 1)
};
/*
- * cache tag definition
- */
-struct fscache_cache_tag {
- struct list_head link;
- struct fscache_cache *cache; /* cache referred to by this tag */
- unsigned long flags;
-#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
- atomic_t usage;
- char name[]; /* tag name */
-};
-
-/*
- * cache definition
+ * Cache cookie.
*/
struct fscache_cache {
const struct fscache_cache_ops *ops;
- struct fscache_cache_tag *tag; /* tag representing this cache */
- struct kobject *kobj; /* system representation of this cache */
- struct list_head link; /* link in list of caches */
- size_t max_index_size; /* maximum size of index data */
- char identifier[36]; /* cache label */
-
- /* node management */
- struct work_struct op_gc; /* operation garbage collector */
- struct list_head object_list; /* list of data/index objects */
- struct list_head op_gc_list; /* list of ops to be deleted */
- spinlock_t object_list_lock;
- spinlock_t op_gc_list_lock;
+ struct list_head cache_link; /* Link in cache list */
+ void *cache_priv; /* Private cache data (or NULL) */
+ refcount_t ref;
+ atomic_t n_volumes; /* Number of active volumes; */
+ atomic_t n_accesses; /* Number of in-progress accesses on the cache */
atomic_t object_count; /* no. of live objects in this cache */
- struct fscache_object *fsdef; /* object for the fsdef index */
- unsigned long flags;
-#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
-#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
-};
-
-extern wait_queue_head_t fscache_cache_cleared_wq;
-
-/*
- * operation to be applied to a cache object
- * - retrieval initiation operations are done in the context of the process
- * that issued them, and not in an async thread pool
- */
-typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
-
-enum fscache_operation_state {
- FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
- FSCACHE_OP_ST_INITIALISED, /* Op is initialised */
- FSCACHE_OP_ST_PENDING, /* Op is blocked from running */
- FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */
- FSCACHE_OP_ST_COMPLETE, /* Op is complete */
- FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */
- FSCACHE_OP_ST_DEAD /* Op is now dead */
-};
-
-struct fscache_operation {
- struct work_struct work; /* record for async ops */
- struct list_head pend_link; /* link in object->pending_ops */
- struct fscache_object *object; /* object to be operated upon */
-
- unsigned long flags;
-#define FSCACHE_OP_TYPE 0x000f /* operation type */
-#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
-#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
-#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
-#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
-#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
-#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */
-#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */
-
- enum fscache_operation_state state;
- atomic_t usage;
- unsigned debug_id; /* debugging ID */
-
- /* operation processor callback
- * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
- * the op in a non-pool thread */
- fscache_operation_processor_t processor;
-
- /* Operation cancellation cleanup (optional) */
- fscache_operation_cancel_t cancel;
-
- /* operation releaser */
- fscache_operation_release_t release;
-};
-
-extern atomic_t fscache_op_debug_id;
-extern void fscache_op_work_func(struct work_struct *work);
-
-extern void fscache_enqueue_operation(struct fscache_operation *);
-extern void fscache_op_complete(struct fscache_operation *, bool);
-extern void fscache_put_operation(struct fscache_operation *);
-extern void fscache_operation_init(struct fscache_cookie *,
- struct fscache_operation *,
- fscache_operation_processor_t,
- fscache_operation_cancel_t,
- fscache_operation_release_t);
-
-/*
- * data read operation
- */
-struct fscache_retrieval {
- struct fscache_operation op;
- struct fscache_cookie *cookie; /* The netfs cookie */
- struct address_space *mapping; /* netfs pages */
- fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
- void *context; /* netfs read context (pinned) */
- struct list_head to_do; /* list of things to be done by the backend */
- unsigned long start_time; /* time at which retrieval started */
- atomic_t n_pages; /* number of pages to be retrieved */
-};
-
-typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp);
-
-typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp);
-
-/**
- * fscache_get_retrieval - Get an extra reference on a retrieval operation
- * @op: The retrieval operation to get a reference on
- *
- * Get an extra reference on a retrieval operation.
- */
-static inline
-struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
-{
- atomic_inc(&op->op.usage);
- return op;
-}
-
-/**
- * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
- * @op: The retrieval operation affected
- *
- * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
- */
-static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
-{
- fscache_enqueue_operation(&op->op);
-}
-
-/**
- * fscache_retrieval_complete - Record (partial) completion of a retrieval
- * @op: The retrieval operation affected
- * @n_pages: The number of pages to account for
- */
-static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
- int n_pages)
-{
- if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
- fscache_op_complete(&op->op, false);
-}
-
-/**
- * fscache_put_retrieval - Drop a reference to a retrieval operation
- * @op: The retrieval operation affected
- *
- * Drop a reference to a retrieval operation.
- */
-static inline void fscache_put_retrieval(struct fscache_retrieval *op)
-{
- fscache_put_operation(&op->op);
-}
-
-/*
- * cached page storage work item
- * - used to do three things:
- * - batch writes to the cache
- * - do cache writes asynchronously
- * - defer writes until cache object lookup completion
- */
-struct fscache_storage {
- struct fscache_operation op;
- pgoff_t store_limit; /* don't write more than this */
+ unsigned int debug_id;
+ enum fscache_cache_state state;
+ char *name;
};
/*
@@ -226,340 +52,157 @@ struct fscache_cache_ops {
/* name of cache provider */
const char *name;
- /* allocate an object record for a cookie */
- struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
- struct fscache_cookie *cookie);
-
- /* look up the object for a cookie
- * - return -ETIMEDOUT to be requeued
- */
- int (*lookup_object)(struct fscache_object *object);
-
- /* finished looking up */
- void (*lookup_complete)(struct fscache_object *object);
-
- /* increment the usage count on this object (may fail if unmounting) */
- struct fscache_object *(*grab_object)(struct fscache_object *object,
- enum fscache_obj_ref_trace why);
+ /* Acquire a volume */
+ void (*acquire_volume)(struct fscache_volume *volume);
- /* pin an object in the cache */
- int (*pin_object)(struct fscache_object *object);
+ /* Free the cache's data attached to a volume */
+ void (*free_volume)(struct fscache_volume *volume);
- /* unpin an object in the cache */
- void (*unpin_object)(struct fscache_object *object);
+ /* Look up a cookie in the cache */
+ bool (*lookup_cookie)(struct fscache_cookie *cookie);
- /* check the consistency between the backing cache and the FS-Cache
- * cookie */
- int (*check_consistency)(struct fscache_operation *op);
+ /* Withdraw an object without any cookie access counts held */
+ void (*withdraw_cookie)(struct fscache_cookie *cookie);
- /* store the updated auxiliary data on an object */
- void (*update_object)(struct fscache_object *object);
+ /* Change the size of a data object */
+ void (*resize_cookie)(struct netfs_cache_resources *cres,
+ loff_t new_size);
/* Invalidate an object */
- void (*invalidate_object)(struct fscache_operation *op);
-
- /* discard the resources pinned by an object and effect retirement if
- * necessary */
- void (*drop_object)(struct fscache_object *object);
-
- /* dispose of a reference to an object */
- void (*put_object)(struct fscache_object *object,
- enum fscache_obj_ref_trace why);
-
- /* sync a cache */
- void (*sync_cache)(struct fscache_cache *cache);
-
- /* notification that the attributes of a non-index object (such as
- * i_size) have changed */
- int (*attr_changed)(struct fscache_object *object);
+ bool (*invalidate_cookie)(struct fscache_cookie *cookie);
- /* reserve space for an object's data and associated metadata */
- int (*reserve_space)(struct fscache_object *object, loff_t i_size);
+ /* Begin an operation for the netfs lib */
+ bool (*begin_operation)(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state);
- /* request a backing block for a page be read or allocated in the
- * cache */
- fscache_page_retrieval_func_t read_or_alloc_page;
-
- /* request backing blocks for a list of pages be read or allocated in
- * the cache */
- fscache_pages_retrieval_func_t read_or_alloc_pages;
-
- /* request a backing block for a page be allocated in the cache so that
- * it can be written directly */
- fscache_page_retrieval_func_t allocate_page;
-
- /* request backing blocks for pages be allocated in the cache so that
- * they can be written directly */
- fscache_pages_retrieval_func_t allocate_pages;
-
- /* write a page to its backing block in the cache */
- int (*write_page)(struct fscache_storage *op, struct page *page);
-
- /* detach backing block from a page (optional)
- * - must release the cookie lock before returning
- * - may sleep
- */
- void (*uncache_page)(struct fscache_object *object,
- struct page *page);
-
- /* dissociate a cache from all the pages it was backing */
- void (*dissociate_pages)(struct fscache_cache *cache);
+ /* Prepare to write to a live cache object */
+ void (*prepare_to_write)(struct fscache_cookie *cookie);
};
-extern struct fscache_cookie fscache_fsdef_index;
+extern struct workqueue_struct *fscache_wq;
+extern wait_queue_head_t fscache_clearance_waiters;
/*
- * Event list for fscache_object::{event_mask,events}
- */
-enum {
- FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */
- FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */
- FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */
- FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
- FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */
- FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */
- FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */
- NR_FSCACHE_OBJECT_EVENTS
-};
-
-#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
-
-/*
- * States for object state machine.
- */
-struct fscache_transition {
- unsigned long events;
- const struct fscache_state *transit_to;
-};
-
-struct fscache_state {
- char name[24];
- char short_name[8];
- const struct fscache_state *(*work)(struct fscache_object *object,
- int event);
- const struct fscache_transition transitions[];
-};
-
-/*
- * on-disk cache file or index handle
+ * out-of-line cache backend functions
*/
-struct fscache_object {
- const struct fscache_state *state; /* Object state machine state */
- const struct fscache_transition *oob_table; /* OOB state transition table */
- int debug_id; /* debugging ID */
- int n_children; /* number of child objects */
- int n_ops; /* number of extant ops on object */
- int n_obj_ops; /* number of object ops outstanding on object */
- int n_in_progress; /* number of ops in progress */
- int n_exclusive; /* number of exclusive ops queued or in progress */
- atomic_t n_reads; /* number of read ops in progress */
- spinlock_t lock; /* state and operations lock */
-
- unsigned long lookup_jif; /* time at which lookup started */
- unsigned long oob_event_mask; /* OOB events this object is interested in */
- unsigned long event_mask; /* events this object is interested in */
- unsigned long events; /* events to be processed by this object
- * (order is important - using fls) */
-
- unsigned long flags;
-#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
-#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
-#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
-#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
-#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
-#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
-#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
-#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
-#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
-
- struct list_head cache_link; /* link in cache->object_list */
- struct hlist_node cookie_link; /* link in cookie->backing_objects */
- struct fscache_cache *cache; /* cache that supplied this object */
- struct fscache_cookie *cookie; /* netfs's file/index object */
- struct fscache_object *parent; /* parent object */
- struct work_struct work; /* attention scheduling record */
- struct list_head dependents; /* FIFO of dependent objects */
- struct list_head dep_link; /* link in parent's dependents list */
- struct list_head pending_ops; /* unstarted operations on this object */
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- struct rb_node objlist_link; /* link in global object list */
-#endif
- pgoff_t store_limit; /* current storage limit */
- loff_t store_limit_l; /* current storage limit */
-};
-
-extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
- struct fscache_cache *);
-extern void fscache_object_destroy(struct fscache_object *);
-
-extern void fscache_object_lookup_negative(struct fscache_object *object);
-extern void fscache_obtained_object(struct fscache_object *object);
-
-static inline bool fscache_object_is_live(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
-}
-
-static inline bool fscache_object_is_dying(struct fscache_object *object)
-{
- return !fscache_object_is_live(object);
-}
-
-static inline bool fscache_object_is_available(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
-}
+extern struct rw_semaphore fscache_addremove_sem;
+extern struct fscache_cache *fscache_acquire_cache(const char *name);
+extern void fscache_relinquish_cache(struct fscache_cache *cache);
+extern int fscache_add_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ void *cache_priv);
+extern void fscache_withdraw_cache(struct fscache_cache *cache);
+extern void fscache_withdraw_volume(struct fscache_volume *volume);
+extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
-static inline bool fscache_cache_is_broken(struct fscache_object *object)
-{
- return test_bit(FSCACHE_IOERROR, &object->cache->flags);
-}
+extern void fscache_io_error(struct fscache_cache *cache);
-static inline bool fscache_object_is_active(struct fscache_object *object)
-{
- return fscache_object_is_available(object) &&
- fscache_object_is_live(object) &&
- !fscache_cache_is_broken(object);
-}
+extern void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+
+extern struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_put_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_end_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+extern void fscache_cookie_lookup_negative(struct fscache_cookie *cookie);
+extern void fscache_resume_after_invalidation(struct fscache_cookie *cookie);
+extern void fscache_caching_failed(struct fscache_cookie *cookie);
+extern bool fscache_wait_for_operation(struct netfs_cache_resources *cred,
+ enum fscache_want_state state);
/**
- * fscache_object_destroyed - Note destruction of an object in a cache
- * @cache: The cache from which the object came
+ * fscache_cookie_state - Read the state of a cookie
+ * @cookie: The cookie to query
*
- * Note the destruction and deallocation of an object record in a cache.
+ * Get the state of a cookie, imposing an ordering between the cookie contents
+ * and the state value. Paired with fscache_set_cookie_state().
*/
-static inline void fscache_object_destroyed(struct fscache_cache *cache)
+static inline
+enum fscache_cookie_state fscache_cookie_state(struct fscache_cookie *cookie)
{
- if (atomic_dec_and_test(&cache->object_count))
- wake_up_all(&fscache_cache_cleared_wq);
+ return smp_load_acquire(&cookie->state);
}
/**
- * fscache_object_lookup_error - Note an object encountered an error
- * @object: The object on which the error was encountered
+ * fscache_get_key - Get a pointer to the cookie key
+ * @cookie: The cookie to query
*
- * Note that an object encountered a fatal error (usually an I/O error) and
- * that it should be withdrawn as soon as possible.
+ * Return a pointer to the where a cookie's key is stored.
*/
-static inline void fscache_object_lookup_error(struct fscache_object *object)
+static inline void *fscache_get_key(struct fscache_cookie *cookie)
{
- set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
+ if (cookie->key_len <= sizeof(cookie->inline_key))
+ return cookie->inline_key;
+ else
+ return cookie->key;
}
-/**
- * fscache_set_store_limit - Set the maximum size to be stored in an object
- * @object: The object to set the maximum on
- * @i_size: The limit to set in bytes
- *
- * Set the maximum size an object is permitted to reach, implying the highest
- * byte that may be written. Intended to be called by the attr_changed() op.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-static inline
-void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
+static inline struct fscache_cookie *fscache_cres_cookie(struct netfs_cache_resources *cres)
{
- object->store_limit_l = i_size;
- object->store_limit = i_size >> PAGE_SHIFT;
- if (i_size & ~PAGE_MASK)
- object->store_limit++;
+ return cres->cache_priv;
}
/**
- * fscache_end_io - End a retrieval operation on a page
- * @op: The FS-Cache operation covering the retrieval
- * @page: The page that was to be fetched
- * @error: The error code (0 if successful)
+ * fscache_count_object - Tell fscache that an object has been added
+ * @cache: The cache to account to
*
- * Note the end of an operation to retrieve a page, as covered by a particular
- * operation record.
+ * Tell fscache that an object has been added to the cache. This prevents the
+ * cache from tearing down the cache structure until the object is uncounted.
*/
-static inline void fscache_end_io(struct fscache_retrieval *op,
- struct page *page, int error)
+static inline void fscache_count_object(struct fscache_cache *cache)
{
- op->end_io_func(page, op->context, error);
-}
-
-static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
-{
- atomic_inc(&cookie->n_active);
+ atomic_inc(&cache->object_count);
}
/**
- * fscache_use_cookie - Request usage of cookie attached to an object
- * @object: Object description
- *
- * Request usage of the cookie attached to an object. NULL is returned if the
- * relinquishment had reduced the cookie usage count to 0.
+ * fscache_uncount_object - Tell fscache that an object has been removed
+ * @cache: The cache to account to
+ *
+ * Tell fscache that an object has been removed from the cache and will no
+ * longer be accessed. After this point, the cache cookie may be destroyed.
*/
-static inline bool fscache_use_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- return atomic_inc_not_zero(&cookie->n_active) != 0;
-}
-
-static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
+static inline void fscache_uncount_object(struct fscache_cache *cache)
{
- return atomic_dec_and_test(&cookie->n_active);
-}
-
-static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
-{
- wake_up_var(&cookie->n_active);
+ if (atomic_dec_and_test(&cache->object_count))
+ wake_up_all(&fscache_clearance_waiters);
}
/**
- * fscache_unuse_cookie - Cease usage of cookie attached to an object
- * @object: Object description
- *
- * Cease usage of the cookie attached to an object. When the users count
- * reaches zero then the cookie relinquishment will be permitted to proceed.
- */
-static inline void fscache_unuse_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- if (__fscache_unuse_cookie(cookie))
- __fscache_wake_unused_cookie(cookie);
-}
-
-/*
- * out-of-line cache backend functions
- */
-extern __printf(3, 4)
-void fscache_init_cache(struct fscache_cache *cache,
- const struct fscache_cache_ops *ops,
- const char *idfmt, ...);
-
-extern int fscache_add_cache(struct fscache_cache *cache,
- struct fscache_object *fsdef,
- const char *tagname);
-extern void fscache_withdraw_cache(struct fscache_cache *cache);
-
-extern void fscache_io_error(struct fscache_cache *cache);
-
-extern void fscache_mark_page_cached(struct fscache_retrieval *op,
- struct page *page);
-
-extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
- struct pagevec *pagevec);
-
-extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
-
-extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
-extern void fscache_object_retrying_stale(struct fscache_object *object);
-
-enum fscache_why_object_killed {
- FSCACHE_OBJECT_IS_STALE,
- FSCACHE_OBJECT_NO_SPACE,
- FSCACHE_OBJECT_WAS_RETIRED,
- FSCACHE_OBJECT_WAS_CULLED,
-};
-extern void fscache_object_mark_killed(struct fscache_object *object,
- enum fscache_why_object_killed why);
+ * fscache_wait_for_objects - Wait for all objects to be withdrawn
+ * @cache: The cache to query
+ *
+ * Wait for all extant objects in a cache to finish being withdrawn
+ * and go away.
+ */
+static inline void fscache_wait_for_objects(struct fscache_cache *cache)
+{
+ wait_event(fscache_clearance_waiters,
+ atomic_read(&cache->object_count) == 0);
+}
+
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_read;
+extern atomic_t fscache_n_write;
+extern atomic_t fscache_n_no_write_space;
+extern atomic_t fscache_n_no_create_space;
+extern atomic_t fscache_n_culled;
+extern atomic_t fscache_n_dio_misfit;
+#define fscache_count_read() atomic_inc(&fscache_n_read)
+#define fscache_count_write() atomic_inc(&fscache_n_write)
+#define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
+#define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
+#define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#define fscache_count_dio_misfit() atomic_inc(&fscache_n_dio_misfit)
+#else
+#define fscache_count_read() do {} while(0)
+#define fscache_count_write() do {} while(0)
+#define fscache_count_no_write_space() do {} while(0)
+#define fscache_count_no_create_space() do {} while(0)
+#define fscache_count_culled() do {} while(0)
+#define fscache_count_dio_misfit() do {} while(0)
+#endif
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index a1c928fe98e7..6e8562cbcc43 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* NOTE!!! See:
@@ -15,152 +15,128 @@
#define _LINUX_FSCACHE_H
#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-#include <linux/list_bl.h>
+#include <linux/netfs.h>
+#include <linux/writeback.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
+#define __fscache_available (1)
#define fscache_available() (1)
+#define fscache_volume_valid(volume) (volume)
#define fscache_cookie_valid(cookie) (cookie)
+#define fscache_resources_valid(cres) ((cres)->cache_priv)
+#define fscache_cookie_enabled(cookie) (cookie && !test_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
#else
+#define __fscache_available (0)
#define fscache_available() (0)
+#define fscache_volume_valid(volume) (0)
#define fscache_cookie_valid(cookie) (0)
+#define fscache_resources_valid(cres) (false)
+#define fscache_cookie_enabled(cookie) (0)
#endif
-
-/*
- * overload PG_private_2 to give us PG_fscache - this is used to indicate that
- * a page is currently backed by a local disk cache
- */
-#define PageFsCache(page) PagePrivate2((page))
-#define SetPageFsCache(page) SetPagePrivate2((page))
-#define ClearPageFsCache(page) ClearPagePrivate2((page))
-#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
-#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
-
-/* pattern used to fill dead space in an index entry */
-#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
-
-struct pagevec;
-struct fscache_cache_tag;
struct fscache_cookie;
-struct fscache_netfs;
-typedef void (*fscache_rw_complete_t)(struct page *page,
- void *context,
- int error);
+#define FSCACHE_ADV_SINGLE_CHUNK 0x01 /* The object is a single chunk of data */
+#define FSCACHE_ADV_WRITE_CACHE 0x00 /* Do cache if written to locally */
+#define FSCACHE_ADV_WRITE_NOCACHE 0x02 /* Don't cache if written to locally */
+#define FSCACHE_ADV_WANT_CACHE_SIZE 0x04 /* Retrieve cache size at runtime */
+
+#define FSCACHE_INVAL_DIO_WRITE 0x01 /* Invalidate due to DIO write */
-/* result of index entry consultation */
-enum fscache_checkaux {
- FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
- FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
- FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
+enum fscache_want_state {
+ FSCACHE_WANT_PARAMS,
+ FSCACHE_WANT_WRITE,
+ FSCACHE_WANT_READ,
};
/*
- * fscache cookie definition
- */
-struct fscache_cookie_def {
- /* name of cookie type */
- char name[16];
-
- /* cookie type */
- uint8_t type;
-#define FSCACHE_COOKIE_TYPE_INDEX 0
-#define FSCACHE_COOKIE_TYPE_DATAFILE 1
-
- /* select the cache into which to insert an entry in this index
- * - optional
- * - should return a cache identifier or NULL to cause the cache to be
- * inherited from the parent if possible or the first cache picked
- * for a non-index file if not
- */
- struct fscache_cache_tag *(*select_cache)(
- const void *parent_netfs_data,
- const void *cookie_netfs_data);
-
- /* consult the netfs about the state of an object
- * - this function can be absent if the index carries no state data
- * - the netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data and the object size
- */
- enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
- /* get an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*get_context)(void *cookie_netfs_data, void *context);
-
- /* release an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*put_context)(void *cookie_netfs_data, void *context);
-
- /* indicate page that now have cache metadata retained
- * - this function should mark the specified page as now being cached
- * - the page will have been marked with PG_fscache before this is
- * called, so this is optional
- */
- void (*mark_page_cached)(void *cookie_netfs_data,
- struct address_space *mapping,
- struct page *page);
-};
+ * Data object state.
+ */
+enum fscache_cookie_state {
+ FSCACHE_COOKIE_STATE_QUIESCENT, /* The cookie is uncached */
+ FSCACHE_COOKIE_STATE_LOOKING_UP, /* The cache object is being looked up */
+ FSCACHE_COOKIE_STATE_CREATING, /* The cache object is being created */
+ FSCACHE_COOKIE_STATE_ACTIVE, /* The cache is active, readable and writable */
+ FSCACHE_COOKIE_STATE_INVALIDATING, /* The cache is being invalidated */
+ FSCACHE_COOKIE_STATE_FAILED, /* The cache failed, withdraw to clear */
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING, /* The cookie is being discarded by the LRU */
+ FSCACHE_COOKIE_STATE_WITHDRAWING, /* The cookie is being withdrawn */
+ FSCACHE_COOKIE_STATE_RELINQUISHING, /* The cookie is being relinquished */
+ FSCACHE_COOKIE_STATE_DROPPED, /* The cookie has been dropped */
+#define FSCACHE_COOKIE_STATE__NR (FSCACHE_COOKIE_STATE_DROPPED + 1)
+} __attribute__((mode(byte)));
/*
- * fscache cached network filesystem type
- * - name, version and ops must be filled in before registration
- * - all other fields will be set during registration
- */
-struct fscache_netfs {
- uint32_t version; /* indexing version */
- const char *name; /* filesystem name */
- struct fscache_cookie *primary_index;
+ * Volume representation cookie.
+ */
+struct fscache_volume {
+ refcount_t ref;
+ atomic_t n_cookies; /* Number of data cookies in volume */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int key_hash; /* Hash of key string */
+ u8 *key; /* Volume ID, eg. "afs@example.com@1234" */
+ struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
+ struct hlist_bl_node hash_link; /* Link in hash table */
+ struct work_struct work;
+ struct fscache_cache *cache; /* The cache in which this resides */
+ void *cache_priv; /* Cache private data */
+ spinlock_t lock;
+ unsigned long flags;
+#define FSCACHE_VOLUME_RELINQUISHED 0 /* Volume is being cleaned up */
+#define FSCACHE_VOLUME_INVALIDATE 1 /* Volume was invalidated */
+#define FSCACHE_VOLUME_COLLIDED_WITH 2 /* Volume was collided with */
+#define FSCACHE_VOLUME_ACQUIRE_PENDING 3 /* Volume is waiting to complete acquisition */
+#define FSCACHE_VOLUME_CREATING 4 /* Volume is being created on disk */
+ u8 coherency_len; /* Length of the coherency data */
+ u8 coherency[]; /* Coherency data */
};
/*
- * data file or index object cookie
+ * Data file representation cookie.
* - a file will only appear in one cache
* - a request to cache a file may or may not be honoured, subject to
* constraints such as disk space
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
- atomic_t usage; /* number of users of this cookie */
- atomic_t n_children; /* number of children of this cookie */
- atomic_t n_active; /* number of active users of netfs ptrs */
+ refcount_t ref;
+ atomic_t n_active; /* number of active users of cookie */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int inval_counter; /* Number of invalidations made */
spinlock_t lock;
- spinlock_t stores_lock; /* lock on page store tree */
- struct hlist_head backing_objects; /* object(s) backing this file/index */
- const struct fscache_cookie_def *def; /* definition */
- struct fscache_cookie *parent; /* parent of this entry */
+ struct fscache_volume *volume; /* Parent volume of this file. */
+ void *cache_priv; /* Cache-side representation */
struct hlist_bl_node hash_link; /* Link in hash table */
- void *netfs_data; /* back pointer to netfs */
- struct radix_tree_root stores; /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
-
+ struct list_head proc_link; /* Link in proc list */
+ struct list_head commit_link; /* Link in commit queue */
+ struct work_struct work; /* Commit/relinq/withdraw work */
+ loff_t object_size; /* Size of the netfs object */
+ unsigned long unused_at; /* Time at which unused (jiffies) */
unsigned long flags;
-#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
-#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
-#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */
-#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */
-#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */
-
- u8 type; /* Type of object */
+#define FSCACHE_COOKIE_RELINQUISHED 0 /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_RETIRED 1 /* T if this cookie has retired on relinq */
+#define FSCACHE_COOKIE_IS_CACHING 2 /* T if this cookie is cached */
+#define FSCACHE_COOKIE_NO_DATA_TO_READ 3 /* T if this cookie has nothing to read */
+#define FSCACHE_COOKIE_NEEDS_UPDATE 4 /* T if attrs have been updated */
+#define FSCACHE_COOKIE_HAS_BEEN_CACHED 5 /* T if cookie needs withdraw-on-relinq */
+#define FSCACHE_COOKIE_DISABLED 6 /* T if cookie has been disabled */
+#define FSCACHE_COOKIE_LOCAL_WRITE 7 /* T if cookie has been modified locally */
+#define FSCACHE_COOKIE_NO_ACCESS_WAKE 8 /* T if no wake when n_accesses goes 0 */
+#define FSCACHE_COOKIE_DO_RELINQUISH 9 /* T if this cookie needs relinquishment */
+#define FSCACHE_COOKIE_DO_WITHDRAW 10 /* T if this cookie needs withdrawing */
+#define FSCACHE_COOKIE_DO_LRU_DISCARD 11 /* T if this cookie needs LRU discard */
+#define FSCACHE_COOKIE_DO_PREP_TO_WRITE 12 /* T if cookie needs write preparation */
+#define FSCACHE_COOKIE_HAVE_DATA 13 /* T if this cookie has data stored */
+#define FSCACHE_COOKIE_IS_HASHED 14 /* T if this cookie is hashed */
+#define FSCACHE_COOKIE_DO_INVALIDATE 15 /* T if cookie needs invalidation */
+
+ enum fscache_cookie_state state;
+ u8 advice; /* FSCACHE_ADV_* */
u8 key_len; /* Length of index key */
u8 aux_len; /* Length of auxiliary data */
- u32 key_hash; /* Hash of parent, type, key, len */
+ u32 key_hash; /* Hash of volume, key, len */
union {
void *key; /* Index key */
u8 inline_key[16]; /* - If the key is short enough */
@@ -171,11 +147,6 @@ struct fscache_cookie {
};
};
-static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
-{
- return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
-}
-
/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
@@ -183,661 +154,497 @@ static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
* - these are undefined symbols when FS-Cache is not configured and the
* optimiser takes care of not using them
*/
-extern int __fscache_register_netfs(struct fscache_netfs *);
-extern void __fscache_unregister_netfs(struct fscache_netfs *);
-extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
-extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
+extern struct fscache_volume *__fscache_acquire_volume(const char *, const char *,
+ const void *, size_t);
+extern void __fscache_relinquish_volume(struct fscache_volume *, const void *, bool);
extern struct fscache_cookie *__fscache_acquire_cookie(
- struct fscache_cookie *,
- const struct fscache_cookie_def *,
+ struct fscache_volume *,
+ u8,
const void *, size_t,
const void *, size_t,
- void *, loff_t, bool);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool);
-extern int __fscache_check_consistency(struct fscache_cookie *, const void *);
-extern void __fscache_update_cookie(struct fscache_cookie *, const void *);
-extern int __fscache_attr_changed(struct fscache_cookie *);
-extern void __fscache_invalidate(struct fscache_cookie *);
-extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
-extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
- struct page *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
- struct address_space *,
- struct list_head *,
- unsigned *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
-extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t);
-extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
-extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
-extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
-extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
- gfp_t);
-extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
- struct inode *);
-extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages);
-extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
-extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
- bool (*)(void *), void *);
+ loff_t);
+extern void __fscache_use_cookie(struct fscache_cookie *, bool);
+extern void __fscache_unuse_cookie(struct fscache_cookie *, const void *, const loff_t *);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
+extern void __fscache_resize_cookie(struct fscache_cookie *, loff_t);
+extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t, unsigned int);
+extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+
+extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *,
+ loff_t, size_t, loff_t, netfs_io_terminated_t, void *,
+ bool);
+extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t);
/**
- * fscache_register_netfs - Register a filesystem as desiring caching services
- * @netfs: The description of the filesystem
+ * fscache_acquire_volume - Register a volume as desiring caching services
+ * @volume_key: An identification string for the volume
+ * @cache_name: The name of the cache to use (or NULL for the default)
+ * @coherency_data: Piece of arbitrary coherency data to check (or NULL)
+ * @coherency_len: The size of the coherency data
*
- * Register a filesystem as desiring caching services if they're available.
+ * Register a volume as desiring caching services if they're available. The
+ * caller must provide an identifier for the volume and may also indicate which
+ * cache it should be in. If a preexisting volume entry is found in the cache,
+ * the coherency data must match otherwise the entry will be invalidated.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Returns a cookie pointer on success, -ENOMEM if out of memory or -EBUSY if a
+ * cache volume of that name is already acquired. Note that "NULL" is a valid
+ * cookie pointer and can be returned if caching is refused.
*/
static inline
-int fscache_register_netfs(struct fscache_netfs *netfs)
+struct fscache_volume *fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
{
- if (fscache_available())
- return __fscache_register_netfs(netfs);
- else
- return 0;
+ if (!fscache_available())
+ return NULL;
+ return __fscache_acquire_volume(volume_key, cache_name,
+ coherency_data, coherency_len);
}
/**
- * fscache_unregister_netfs - Indicate that a filesystem no longer desires
- * caching services
- * @netfs: The description of the filesystem
+ * fscache_relinquish_volume - Cease caching a volume
+ * @volume: The volume cookie
+ * @coherency_data: Piece of arbitrary coherency data to set (or NULL)
+ * @invalidate: True if the volume should be invalidated
*
- * Indicate that a filesystem no longer desires caching services for the
- * moment.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Indicate that a filesystem no longer desires caching services for a volume.
+ * The caller must have relinquished all file cookies prior to calling this.
+ * The stored coherency data is updated.
*/
static inline
-void fscache_unregister_netfs(struct fscache_netfs *netfs)
+void fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate)
{
- if (fscache_available())
- __fscache_unregister_netfs(netfs);
+ if (fscache_volume_valid(volume))
+ __fscache_relinquish_volume(volume, coherency_data, invalidate);
}
/**
- * fscache_lookup_cache_tag - Look up a cache tag
- * @name: The name of the tag to search for
+ * fscache_acquire_cookie - Acquire a cookie to represent a cache object
+ * @volume: The volume in which to locate/create this cookie
+ * @advice: Advice flags (FSCACHE_COOKIE_ADV_*)
+ * @index_key: The index key for this cookie
+ * @index_key_len: Size of the index key
+ * @aux_data: The auxiliary data for the cookie (may be NULL)
+ * @aux_data_len: Size of the auxiliary data buffer
+ * @object_size: The initial size of object
*
- * Acquire a specific cache referral tag that can be used to select a specific
- * cache in which to cache an index.
+ * Acquire a cookie to represent a data file within the given cache volume.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
+struct fscache_cookie *fscache_acquire_cookie(struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key,
+ size_t index_key_len,
+ const void *aux_data,
+ size_t aux_data_len,
+ loff_t object_size)
{
- if (fscache_available())
- return __fscache_lookup_cache_tag(name);
- else
+ if (!fscache_volume_valid(volume))
return NULL;
+ return __fscache_acquire_cookie(volume, advice,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ object_size);
}
/**
- * fscache_release_cache_tag - Release a cache tag
- * @tag: The tag to release
- *
- * Release a reference to a cache referral tag previously looked up.
+ * fscache_use_cookie - Request usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @will_modify: If cache is expected to be modified locally
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Request usage of the cookie attached to an object. The caller should tell
+ * the cache if the object's contents are about to be modified locally and then
+ * the cache can apply the policy that has been set to handle this case.
*/
-static inline
-void fscache_release_cache_tag(struct fscache_cache_tag *tag)
+static inline void fscache_use_cookie(struct fscache_cookie *cookie,
+ bool will_modify)
{
- if (fscache_available())
- __fscache_release_cache_tag(tag);
+ if (fscache_cookie_valid(cookie))
+ __fscache_use_cookie(cookie, will_modify);
}
/**
- * fscache_acquire_cookie - Acquire a cookie to represent a cache object
- * @parent: The cookie that's to be the parent of this one
- * @def: A description of the cache object, including callback operations
- * @index_key: The index key for this cookie
- * @index_key_len: Size of the index key
- * @aux_data: The auxiliary data for the cookie (may be NULL)
- * @aux_data_len: Size of the auxiliary data buffer
- * @netfs_data: An arbitrary piece of data to be kept in the cookie to
- * represent the cache object to the netfs
- * @object_size: The initial size of object
- * @enable: Whether or not to enable a data cookie immediately
- *
- * This function is used to inform FS-Cache about part of an index hierarchy
- * that can be used to locate files. This is done by requesting a cookie for
- * each index in the path to the file.
+ * fscache_unuse_cookie - Cease usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @aux_data: Updated auxiliary data (or NULL)
+ * @object_size: Revised size of the object (or NULL)
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Cease usage of the cookie attached to an object. When the users count
+ * reaches zero then the cookie relinquishment will be permitted to proceed.
*/
-static inline
-struct fscache_cookie *fscache_acquire_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
- const void *index_key,
- size_t index_key_len,
- const void *aux_data,
- size_t aux_data_len,
- void *netfs_data,
- loff_t object_size,
- bool enable)
+static inline void fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ const loff_t *object_size)
{
- if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
- return __fscache_acquire_cookie(parent, def,
- index_key, index_key_len,
- aux_data, aux_data_len,
- netfs_data, object_size, enable);
- else
- return NULL;
+ if (fscache_cookie_valid(cookie))
+ __fscache_unuse_cookie(cookie, aux_data, object_size);
}
/**
* fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
* it
* @cookie: The cookie being returned
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
* @retire: True if the cache object the cookie represents is to be discarded
*
* This function returns a cookie to the cache, forcibly discarding the
- * associated cache object if retire is set to true. The opportunity is
- * provided to update the auxiliary data in the cache before the object is
- * disconnected.
+ * associated cache object if retire is set to true.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
{
if (fscache_cookie_valid(cookie))
- __fscache_relinquish_cookie(cookie, aux_data, retire);
+ __fscache_relinquish_cookie(cookie, retire);
}
-/**
- * fscache_check_consistency - Request validation of a cache's auxiliary data
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- *
- * Request an consistency check from fscache, which passes the request to the
- * backing cache. The auxiliary data on the cookie will be updated first if
- * @aux_data is set.
- *
- * Returns 0 if consistent and -ESTALE if inconsistent. May also
- * return -ENOMEM and -ERESTARTSYS.
+/*
+ * Find the auxiliary data on a cookie.
*/
-static inline
-int fscache_check_consistency(struct fscache_cookie *cookie,
- const void *aux_data)
+static inline void *fscache_get_aux(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_check_consistency(cookie, aux_data);
+ if (cookie->aux_len <= sizeof(cookie->inline_aux))
+ return cookie->inline_aux;
else
- return 0;
+ return cookie->aux;
}
-/**
- * fscache_update_cookie - Request that a cache object be updated
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- *
- * Request an update of the index data for the cache object associated with the
- * cookie. The auxiliary data on the cookie will be updated first if @aux_data
- * is set.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+/*
+ * Update the auxiliary data on a cookie.
*/
static inline
-void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data)
+void fscache_update_aux(struct fscache_cookie *cookie,
+ const void *aux_data, const loff_t *object_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_update_cookie(cookie, aux_data);
+ void *p = fscache_get_aux(cookie);
+
+ if (aux_data && p)
+ memcpy(p, aux_data, cookie->aux_len);
+ if (object_size)
+ cookie->object_size = *object_size;
}
-/**
- * fscache_pin_cookie - Pin a data-storage cache object in its cache
- * @cookie: The cookie representing the cache object
- *
- * Permit data-storage cache objects to be pinned in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_updates;
+#endif
+
static inline
-int fscache_pin_cookie(struct fscache_cookie *cookie)
+void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
- return -ENOBUFS;
+#ifdef CONFIG_FSCACHE_STATS
+ atomic_inc(&fscache_n_updates);
+#endif
+ fscache_update_aux(cookie, aux_data, object_size);
+ smp_wmb();
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
}
/**
- * fscache_pin_cookie - Unpin a data-storage cache object in its cache
+ * fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @object_size: The current size of the object (may be NULL)
*
- * Permit data-storage cache objects to be unpinned from the cache.
+ * Request an update of the index data for the cache object associated with the
+ * cookie. The auxiliary data on the cookie will be updated first if @aux_data
+ * is set and the object size will be updated and the object possibly trimmed
+ * if @object_size is set.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_unpin_cookie(struct fscache_cookie *cookie)
+void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
+ if (fscache_cookie_enabled(cookie))
+ __fscache_update_cookie(cookie, aux_data, object_size);
}
/**
- * fscache_attr_changed - Notify cache that an object's attributes changed
+ * fscache_resize_cookie - Request that a cache object be resized
* @cookie: The cookie representing the cache object
+ * @new_size: The new size of the object (may be NULL)
*
- * Send a notification to the cache indicating that an object's attributes have
- * changed. This includes the data size. These attributes will be obtained
- * through the get_attr() cookie definition op.
+ * Request that the size of an object be changed.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-int fscache_attr_changed(struct fscache_cookie *cookie)
+void fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_attr_changed(cookie);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ __fscache_resize_cookie(cookie, new_size);
}
/**
* fscache_invalidate - Notify cache that an object needs invalidation
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @size: The revised size of the object.
+ * @flags: Invalidation flags (FSCACHE_INVAL_*)
*
* Notify the cache that an object is needs to be invalidated and that it
- * should abort any retrievals or stores it is doing on the cache. The object
- * is then marked non-caching until such time as the invalidation is complete.
+ * should abort any retrievals or stores it is doing on the cache. This
+ * increments inval_counter on the cookie which can be used by the caller to
+ * reconsider I/O requests as they complete.
*
- * This can be called with spinlocks held.
+ * If @flags has FSCACHE_INVAL_DIO_WRITE set, this indicates that this is due
+ * to a direct I/O write and will cause caching to be disabled on this cookie
+ * until it is completely unused.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_invalidate(struct fscache_cookie *cookie)
+void fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data, loff_t size, unsigned int flags)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_invalidate(cookie);
+ if (fscache_cookie_enabled(cookie))
+ __fscache_invalidate(cookie, aux_data, size, flags);
}
/**
- * fscache_wait_on_invalidate - Wait for invalidation to complete
- * @cookie: The cookie representing the cache object
+ * fscache_operation_valid - Return true if operations resources are usable
+ * @cres: The resources to check.
*
- * Wait for the invalidation of an object to complete.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Returns a pointer to the operations table if usable or NULL if not.
*/
static inline
-void fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_resources *cres)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_invalidate(cookie);
+ return fscache_resources_valid(cres) ? cres->ops : NULL;
}
/**
- * fscache_reserve_space - Reserve data space for a cached object
+ * fscache_begin_read_operation - Begin a read operation for the netfs lib
+ * @cres: The cache resources for the read being performed
* @cookie: The cookie representing the cache object
- * @i_size: The amount of space to be reserved
*
- * Reserve an amount of space in the cache for the cache object attached to a
- * cookie so that a write to that object within the space can always be
- * honoured.
+ * Begin a read operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
+ *
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
+int fscache_begin_read_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_read_operation(cres, cookie);
return -ENOBUFS;
}
/**
- * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
- * in which to store it
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to fill if possible
- * @end_io_func: The callback to invoke when and if the page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a page from the cache, or if that's not possible make a potential
- * one-block reservation in the cache into which the page may be stored once
- * fetched from the server.
- *
- * If the page is not backed by the cache object, or if it there's some reason
- * it can't be, -ENOBUFS will be returned and nothing more will be done for
- * that page.
- *
- * Else, if that page is backed by the cache, a read will be initiated directly
- * to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if the page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
+ * fscache_end_operation - End the read operation for the netfs lib
+ * @cres: The cache resources for the read operation
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Clean up the resources at the end of the read request.
*/
-static inline
-int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_page(cookie, page, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
-}
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-/**
- * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
- * blocks in which to store them
- * @cookie: The cookie representing the cache object
- * @mapping: The netfs inode mapping to which the pages will be attached
- * @pages: A list of potential netfs pages to be filled
- * @nr_pages: Number of pages to be read and/or allocated
- * @end_io_func: The callback to invoke when and if each page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a set of pages from the cache, or if that's not possible, attempt to
- * make a potential one-block reservation for each page in the cache into which
- * that page may be stored once fetched from the server.
- *
- * If some pages are not backed by the cache object, or if it there's some
- * reason they can't be, -ENOBUFS will be returned and nothing more will be
- * done for that pages.
- *
- * Else, if some of the pages are backed by the cache, a read will be initiated
- * directly to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if a page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
- * regard to different pages, the return values are prioritised in that order.
- * Any pages submitted for reading are removed from the pages list.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_pages(cookie, mapping, pages,
- nr_pages, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
+ if (ops)
+ ops->end_operation(cres);
}
/**
- * fscache_alloc_page - Allocate a block in which to store a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to allocate a page for
- * @gfp: The conditions under which memory allocation should be made
+ * fscache_read - Start a read from the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The buffer to fill - and also the length
+ * @read_hole: How to handle a hole in the data.
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * Request Allocation a block in the cache in which to store a netfs page
- * without retrieving any contents from the cache.
+ * Start a read from the cache. @cres indicates the cache object to read from
+ * and must be obtained by a call to fscache_begin_operation() beforehand.
*
- * If the page is not backed by a file then -ENOBUFS will be returned and
- * nothing more will be done, and no reservation will be made.
+ * The data is read into the iterator, @iter, and that also indicates the size
+ * of the operation. @start_pos is the start position in the file, though if
+ * @seek_data is set appropriately, the cache can use SEEK_DATA to find the
+ * next piece of data, writing zeros for the hole into the iterator.
*
- * Else, a block will be allocated if one wasn't already, and 0 will be
- * returned
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-int fscache_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_alloc_page(cookie, page, gfp);
- else
- return -ENOBUFS;
-}
-
-/**
- * fscache_readpages_cancel - Cancel read/alloc on pages
- * @cookie: The cookie representing the inode's cache object.
- * @pages: The netfs pages that we canceled write on in readpages()
+ * @read_hole indicates how a partially populated region in the cache should be
+ * handled. It can be one of a number of settings:
*
- * Uncache/unreserve the pages reserved earlier in readpages() via
- * fscache_readpages_or_alloc() and similar. In most successful caches in
- * readpages() this doesn't do anything. In cases when the underlying netfs's
- * readahead failed we need to clean up the pagelist (unmark and uncache).
+ * NETFS_READ_HOLE_IGNORE - Just try to read (may return a short read).
*
- * This function may sleep as it may have to clean up disk state.
- */
-static inline
-void fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages)
-{
- if (fscache_cookie_valid(cookie))
- __fscache_readpages_cancel(cookie, pages);
-}
-
-/**
- * fscache_write_page - Request storage of a page in the cache
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to store
- * @object_size: Updated size of object
- * @gfp: The conditions under which memory allocation should be made
- *
- * Request the contents of the netfs page be written into the cache. This
- * request may be ignored if no cache block is currently allocated, in which
- * case it will return -ENOBUFS.
+ * NETFS_READ_HOLE_CLEAR - Seek for data, clearing the part of the buffer
+ * skipped over, then do as for IGNORE.
*
- * If a cache block was already allocated, a write will be initiated and 0 will
- * be returned. The PG_fscache_write page bit is set immediately and will then
- * be cleared at the completion of the write to indicate the success or failure
- * of the operation. Note that the completion may happen before the return.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * NETFS_READ_HOLE_FAIL - Give ENODATA if we encounter a hole.
*/
static inline
-int fscache_write_page(struct fscache_cookie *cookie,
- struct page *page,
- loff_t object_size,
- gfp_t gfp)
+int fscache_read(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_write_page(cookie, page, object_size, gfp);
- else
- return -ENOBUFS;
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->read(cres, start_pos, iter, read_hole,
+ term_func, term_func_priv);
}
/**
- * fscache_uncache_page - Indicate that caching is no longer required on a page
+ * fscache_begin_write_operation - Begin a write operation for the netfs lib
+ * @cres: The cache resources for the write being performed
* @cookie: The cookie representing the cache object
- * @page: The netfs page that was being cached.
*
- * Tell the cache that we no longer want a page to be cached and that it should
- * remove any knowledge of the netfs page it may have.
+ * Begin a write operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * Note that this cannot cancel any outstanding I/O operations between this
- * page and the cache.
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-void fscache_uncache_page(struct fscache_cookie *cookie,
- struct page *page)
+int fscache_begin_write_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_page(cookie, page);
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_write_operation(cres, cookie);
+ return -ENOBUFS;
}
/**
- * fscache_check_page_write - Ask if a page is being writing to the cache
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache if a page is being written to the cache.
+ * fscache_write - Start a write to the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The data to write - and also the length
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-bool fscache_check_page_write(struct fscache_cookie *cookie,
- struct page *page)
-{
- if (fscache_cookie_valid(cookie))
- return __fscache_check_page_write(cookie, page);
- return false;
-}
-
-/**
- * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
+ * Start a write to the cache. @cres indicates the cache object to write to and
+ * must be obtained by a call to fscache_begin_operation() beforehand.
*
- * Ask the cache to wake us up when a page is no longer being written to the
- * cache.
+ * The data to be written is obtained from the iterator, @iter, and that also
+ * indicates the size of the operation. @start_pos is the start position in
+ * the file.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*/
static inline
-void fscache_wait_on_page_write(struct fscache_cookie *cookie,
- struct page *page)
+int fscache_write(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_page_write(cookie, page);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->write(cres, start_pos, iter, term_func, term_func_priv);
}
/**
- * fscache_maybe_release_page - Consider releasing a page, cancelling a store
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- * @gfp: The gfp flags passed to releasepage()
- *
- * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
- * releasepage() call. A storage request on the page may cancelled if it is
- * not currently being processed.
- *
- * The function returns true if the page no longer has a storage request on it,
- * and false if a storage request is left in place. If true is returned, the
- * page will have been passed to fscache_uncache_page(). If false is returned
- * the page cannot be freed yet.
- */
-static inline
-bool fscache_maybe_release_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && PageFsCache(page))
- return __fscache_maybe_release_page(cookie, page, gfp);
- return true;
-}
-
-/**
- * fscache_uncache_all_inode_pages - Uncache all an inode's pages
- * @cookie: The cookie representing the inode's cache object.
- * @inode: The inode to uncache pages from.
- *
- * Uncache all the pages in an inode that are marked PG_fscache, assuming them
- * to be associated with the given cookie.
- *
- * This function may sleep. It will wait for pages that are being written out
- * and will wait whilst the PG_fscache mark is removed by the cache.
- */
-static inline
-void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
- struct inode *inode)
+ * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to unlock
+ * @caching: If PG_fscache has been set
+ *
+ * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
+ * waiting.
+ */
+static inline void fscache_clear_page_bits(struct address_space *mapping,
+ loff_t start, size_t len,
+ bool caching)
{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_all_inode_pages(cookie, inode);
+ if (caching)
+ __fscache_clear_page_bits(mapping, start, len);
}
/**
- * fscache_disable_cookie - Disable a cookie
+ * fscache_write_to_cache - Save a write to the cache and clear PG_fscache
* @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- * @invalidate: Invalidate the backing object
- *
- * Disable a cookie from accepting further alloc, read, write, invalidate,
- * update or acquire operations. Outstanding operations can still be waited
- * upon and pages can still be uncached and the cookie relinquished.
- *
- * This will not return until all outstanding operations have completed.
- *
- * If @invalidate is set, then the backing object will be invalidated and
- * detached, otherwise it will just be detached.
- *
- * If @aux_data is set, then auxiliary data will be updated from that.
- */
-static inline
-void fscache_disable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool invalidate)
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to write back
+ * @i_size: The new size of the inode
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
+ * @caching: If PG_fscache has been set
+ *
+ * Helper function for a netfs to write dirty data from an inode into the cache
+ * object that's backing it.
+ *
+ * @start and @len describe the range of the data. This does not need to be
+ * page-aligned, but to satisfy DIO requirements, the cache may expand it up to
+ * the page boundaries on either end. All the pages covering the range must be
+ * marked with PG_fscache.
+ *
+ * If given, @term_func will be called upon completion and supplied with
+ * @term_func_priv. Note that the PG_fscache flags will have been cleared by
+ * this point, so the netfs must retain its own pin on the mapping.
+ */
+static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool caching)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_disable_cookie(cookie, aux_data, invalidate);
+ if (caching)
+ __fscache_write_to_cache(cookie, mapping, start, len, i_size,
+ term_func, term_func_priv, caching);
+ else if (term_func)
+ term_func(term_func_priv, -ENOBUFS, false);
+
}
/**
- * fscache_enable_cookie - Reenable a cookie
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- * @object_size: Current size of object
- * @can_enable: A function to permit enablement once lock is held
- * @data: Data for can_enable()
+ * fscache_note_page_release - Note that a netfs page got released
+ * @cookie: The cookie corresponding to the file
*
- * Reenable a previously disabled cookie, allowing it to accept further alloc,
- * read, write, invalidate, update or acquire operations. An attempt will be
- * made to immediately reattach the cookie to a backing object. If @aux_data
- * is set, the auxiliary data attached to the cookie will be updated.
- *
- * The can_enable() function is called (if not NULL) once the enablement lock
- * is held to rule on whether enablement is still permitted to go ahead.
+ * Note that a page that has been copied to the cache has been released. This
+ * means that future reads will need to look in the cache to see if it's there.
*/
static inline
-void fscache_enable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- loff_t object_size,
- bool (*can_enable)(void *data),
- void *data)
+void fscache_note_page_release(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
- __fscache_enable_cookie(cookie, aux_data, object_size,
- can_enable, data);
+ /* If we've written data to the cache (HAVE_DATA) and there wasn't any
+ * data in the cache when we started (NO_DATA_TO_READ), it may no
+ * longer be true that we can skip reading from the cache - so clear
+ * the flag that causes reads to be skipped.
+ */
+ if (cookie &&
+ test_bit(FSCACHE_COOKIE_HAVE_DATA, &cookie->flags) &&
+ test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags))
+ clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
}
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 991ff8575d0e..772f822dc6b8 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -15,14 +15,24 @@
#include <linux/fs.h>
#include <linux/mm.h>
-#include <linux/parser.h>
#include <linux/slab.h>
#include <uapi/linux/fscrypt.h>
-#define FS_CRYPTO_BLOCK_SIZE 16
+/*
+ * The lengths of all file contents blocks must be divisible by this value.
+ * This is needed to ensure that all contents encryption modes will work, as
+ * some of the supported modes don't support arbitrarily byte-aligned messages.
+ *
+ * Since the needed alignment is 16 bytes, most filesystems will meet this
+ * requirement naturally, as typical block sizes are powers of 2. However, if a
+ * filesystem can generate arbitrarily byte-aligned block lengths (e.g., via
+ * compression), then it will need to pad to this alignment before encryption.
+ */
+#define FSCRYPT_CONTENTS_ALIGNMENT 16
-union fscrypt_context;
-struct fscrypt_info;
+union fscrypt_policy;
+struct fscrypt_inode_info;
+struct fs_parameter;
struct seq_file;
struct fscrypt_str {
@@ -36,7 +46,7 @@ struct fscrypt_name {
u32 hash;
u32 minor_hash;
struct fscrypt_str crypto_buf;
- bool is_ciphertext_name;
+ bool is_nokey_name;
};
#define FSTR_INIT(n, l) { .name = n, .len = l }
@@ -48,36 +58,147 @@ struct fscrypt_name {
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40
#ifdef CONFIG_FS_ENCRYPTION
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-/*
- * crypto operations for filesystems
- */
+/* Crypto operations for filesystems */
struct fscrypt_operations {
- unsigned int flags;
- const char *key_prefix;
+
+ /*
+ * If set, then fs/crypto/ will allocate a global bounce page pool the
+ * first time an encryption key is set up for a file. The bounce page
+ * pool is required by the following functions:
+ *
+ * - fscrypt_encrypt_pagecache_blocks()
+ * - fscrypt_zeroout_range() for files not using inline crypto
+ *
+ * If the filesystem doesn't use those, it doesn't need to set this.
+ */
+ unsigned int needs_bounce_pages : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow the use of encryption settings
+ * that assume inode numbers fit in 32 bits (i.e.
+ * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other
+ * prerequisites for these settings are also met. This is only useful
+ * if the filesystem wants to support inline encryption hardware that is
+ * limited to 32-bit or 64-bit data unit numbers and where programming
+ * keyslots is very slow.
+ */
+ unsigned int has_32bit_inodes : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow users to select a crypto data unit
+ * size that is less than the filesystem block size. This is done via
+ * the log2_data_unit_size field of the fscrypt policy. This flag is
+ * not compatible with filesystems that encrypt variable-length blocks
+ * (i.e. blocks that aren't all equal to filesystem's block size), for
+ * example as a result of compression. It's also not compatible with
+ * the fscrypt_encrypt_block_inplace() and
+ * fscrypt_decrypt_block_inplace() functions.
+ */
+ unsigned int supports_subblock_data_units : 1;
+
+ /*
+ * This field exists only for backwards compatibility reasons and should
+ * only be set by the filesystems that are setting it already. It
+ * contains the filesystem-specific key description prefix that is
+ * accepted for "logon" keys for v1 fscrypt policies. This
+ * functionality is deprecated in favor of the generic prefix
+ * "fscrypt:", which itself is deprecated in favor of the filesystem
+ * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that
+ * are newly adding fscrypt support should not set this field.
+ */
+ const char *legacy_key_prefix;
+
+ /*
+ * Get the fscrypt context of the given inode.
+ *
+ * @inode: the inode whose context to get
+ * @ctx: the buffer into which to get the context
+ * @len: length of the @ctx buffer in bytes
+ *
+ * Return: On success, returns the length of the context in bytes; this
+ * may be less than @len. On failure, returns -ENODATA if the
+ * inode doesn't have a context, -ERANGE if the context is
+ * longer than @len, or another -errno code.
+ */
int (*get_context)(struct inode *inode, void *ctx, size_t len);
+
+ /*
+ * Set an fscrypt context on the given inode.
+ *
+ * @inode: the inode whose context to set. The inode won't already have
+ * an fscrypt context.
+ * @ctx: the context to set
+ * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE)
+ * @fs_data: If called from fscrypt_set_context(), this will be the
+ * value the filesystem passed to fscrypt_set_context().
+ * Otherwise (i.e. when called from
+ * FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL.
+ *
+ * i_rwsem will be held for write.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
int (*set_context)(struct inode *inode, const void *ctx, size_t len,
void *fs_data);
- const union fscrypt_context *(*get_dummy_context)(
- struct super_block *sb);
+
+ /*
+ * Get the dummy fscrypt policy in use on the filesystem (if any).
+ *
+ * Filesystems only need to implement this function if they support the
+ * test_dummy_encryption mount option.
+ *
+ * Return: A pointer to the dummy fscrypt policy, if the filesystem is
+ * mounted with test_dummy_encryption; otherwise NULL.
+ */
+ const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb);
+
+ /*
+ * Check whether a directory is empty. i_rwsem will be held for write.
+ */
bool (*empty_dir)(struct inode *inode);
- unsigned int max_namelen;
+
+ /*
+ * Check whether the filesystem's inode numbers and UUID are stable,
+ * meaning that they will never be changed even by offline operations
+ * such as filesystem shrinking and therefore can be used in the
+ * encryption without the possibility of files becoming unreadable.
+ *
+ * Filesystems only need to implement this function if they want to
+ * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These
+ * flags are designed to work around the limitations of UFS and eMMC
+ * inline crypto hardware, and they shouldn't be used in scenarios where
+ * such hardware isn't being used.
+ *
+ * Leaving this NULL is equivalent to always returning false.
+ */
bool (*has_stable_inodes)(struct super_block *sb);
- void (*get_ino_and_lblk_bits)(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret);
- int (*get_num_devices)(struct super_block *sb);
- void (*get_devices)(struct super_block *sb,
- struct request_queue **devs);
+
+ /*
+ * Return an array of pointers to the block devices to which the
+ * filesystem may write encrypted file contents, NULL if the filesystem
+ * only has a single such block device, or an ERR_PTR() on error.
+ *
+ * On successful non-NULL return, *num_devs is set to the number of
+ * devices in the returned array. The caller must free the returned
+ * array using kfree().
+ *
+ * If the filesystem can use multiple block devices (other than block
+ * devices that aren't used for encrypted file contents, such as
+ * external journal devices), and wants to support inline encryption,
+ * then it must implement this function. Otherwise it's not needed.
+ */
+ struct block_device **(*get_devices)(struct super_block *sb,
+ unsigned int *num_devs);
};
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags);
+
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
/*
- * Pairs with the cmpxchg_release() in fscrypt_get_encryption_info().
+ * Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
* I.e., another task may publish ->i_crypt_info concurrently, executing
* a RELEASE barrier. We need to use smp_load_acquire() here to safely
* ACQUIRE the memory the other task published.
@@ -101,24 +222,88 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
}
-static inline const union fscrypt_context *
-fscrypt_get_dummy_context(struct super_block *sb)
+/*
+ * When d_splice_alias() moves a directory's no-key alias to its
+ * plaintext alias as a result of the encryption key being added,
+ * DCACHE_NOKEY_NAME must be cleared and there might be an opportunity
+ * to disable d_revalidate. Note that we don't have to support the
+ * inverse operation because fscrypt doesn't allow no-key names to be
+ * the source or target of a rename().
+ */
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
{
- if (!sb->s_cop->get_dummy_context)
- return NULL;
- return sb->s_cop->get_dummy_context(sb);
+ /*
+ * VFS calls fscrypt_handle_d_move even for non-fscrypt
+ * filesystems.
+ */
+ if (dentry->d_flags & DCACHE_NOKEY_NAME) {
+ dentry->d_flags &= ~DCACHE_NOKEY_NAME;
+
+ /*
+ * Other filesystem features might be handling dentry
+ * revalidation, in which case it cannot be disabled.
+ */
+ if (dentry->d_op->d_revalidate == fscrypt_d_revalidate)
+ dentry->d_flags &= ~DCACHE_OP_REVALIDATE;
+ }
}
-/*
- * When d_splice_alias() moves a directory's encrypted alias to its decrypted
- * alias as a result of the encryption key being added, DCACHE_ENCRYPTED_NAME
- * must be cleared. Note that we don't have to support arbitrary moves of this
- * flag because fscrypt doesn't allow encrypted aliases to be the source or
- * target of a rename().
+/**
+ * fscrypt_is_nokey_name() - test whether a dentry is a no-key name
+ * @dentry: the dentry to check
+ *
+ * This returns true if the dentry is a no-key dentry. A no-key dentry is a
+ * dentry that was created in an encrypted directory that hasn't had its
+ * encryption key added yet. Such dentries may be either positive or negative.
+ *
+ * When a filesystem is asked to create a new filename in an encrypted directory
+ * and the new filename's dentry is a no-key dentry, it must fail the operation
+ * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(),
+ * ->rename(), and ->link(). (However, ->rename() and ->link() are already
+ * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().)
+ *
+ * This is necessary because creating a filename requires the directory's
+ * encryption key, but just checking for the key on the directory inode during
+ * the final filesystem operation doesn't guarantee that the key was available
+ * during the preceding dentry lookup. And the key must have already been
+ * available during the dentry lookup in order for it to have been checked
+ * whether the filename already exists in the directory and for the new file's
+ * dentry not to be invalidated due to it incorrectly having the no-key flag.
+ *
+ * Return: %true if the dentry is a no-key name
*/
-static inline void fscrypt_handle_d_move(struct dentry *dentry)
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_NOKEY_NAME;
+}
+
+static inline void fscrypt_prepare_dentry(struct dentry *dentry,
+ bool is_nokey_name)
{
- dentry->d_flags &= ~DCACHE_ENCRYPTED_NAME;
+ /*
+ * This code tries to only take ->d_lock when necessary to write
+ * to ->d_flags. We shouldn't be peeking on d_flags for
+ * DCACHE_OP_REVALIDATE unlocked, but in the unlikely case
+ * there is a race, the worst it can happen is that we fail to
+ * unset DCACHE_OP_REVALIDATE and pay the cost of an extra
+ * d_revalidate.
+ */
+ if (is_nokey_name) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_NOKEY_NAME;
+ spin_unlock(&dentry->d_lock);
+ } else if (dentry->d_flags & DCACHE_OP_REVALIDATE &&
+ dentry->d_op->d_revalidate == fscrypt_d_revalidate) {
+ /*
+ * Unencrypted dentries and encrypted dentries where the
+ * key is available are always valid from fscrypt
+ * perspective. Avoid the cost of calling
+ * fscrypt_d_revalidate unnecessarily.
+ */
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_OP_REVALIDATE;
+ spin_unlock(&dentry->d_lock);
+ }
}
/* crypto.c */
@@ -132,8 +317,8 @@ int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num, gfp_t gfp_flags);
-int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
- unsigned int offs);
+int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
+ size_t offs);
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num);
@@ -148,6 +333,16 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
return (struct page *)page_private(bounce_page);
}
+static inline bool fscrypt_is_bounce_folio(struct folio *folio)
+{
+ return folio->mapping == NULL;
+}
+
+static inline struct folio *fscrypt_pagecache_folio(struct folio *bounce_folio)
+{
+ return bounce_folio->private;
+}
+
void fscrypt_free_bounce_page(struct page *bounce_page);
/* policy.c */
@@ -156,39 +351,50 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg);
int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg);
int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg);
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child);
-int fscrypt_inherit_context(struct inode *parent, struct inode *child,
- void *fs_data, bool preload);
+int fscrypt_context_for_new_inode(void *ctx, struct inode *inode);
+int fscrypt_set_context(struct inode *inode, void *fs_data);
-struct fscrypt_dummy_context {
- const union fscrypt_context *ctx;
+struct fscrypt_dummy_policy {
+ const union fscrypt_policy *policy;
};
-int fscrypt_set_test_dummy_encryption(struct super_block *sb,
- const substring_t *arg,
- struct fscrypt_dummy_context *dummy_ctx);
+int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy);
+bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2);
void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep,
struct super_block *sb);
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return dummy_policy->policy != NULL;
+}
static inline void
-fscrypt_free_dummy_context(struct fscrypt_dummy_context *dummy_ctx)
+fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
{
- kfree(dummy_ctx->ctx);
- dummy_ctx->ctx = NULL;
+ kfree(dummy_policy->policy);
+ dummy_policy->policy = NULL;
}
/* keyring.c */
-void fscrypt_sb_free(struct super_block *sb);
+void fscrypt_destroy_keyring(struct super_block *sb);
int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg);
int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg);
int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg);
/* keysetup.c */
-int fscrypt_get_encryption_info(struct inode *inode);
+int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode,
+ bool *encrypt_ret);
void fscrypt_put_encryption_info(struct inode *inode);
void fscrypt_free_inode(struct inode *inode);
int fscrypt_drop_inode(struct inode *inode);
/* fname.c */
+int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen);
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+ u32 max_len, u32 *encrypted_len_ret);
int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname,
int lookup, struct fscrypt_name *fname);
@@ -197,7 +403,7 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
kfree(fname->crypto_buf.name);
}
-int fscrypt_fname_alloc_buffer(const struct inode *inode, u32 max_encrypted_len,
+int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
struct fscrypt_str *crypto_str);
void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str);
int fscrypt_fname_disk_to_usr(const struct inode *inode,
@@ -209,7 +415,7 @@ bool fscrypt_match_name(const struct fscrypt_name *fname,
u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name);
/* bio.c */
-void fscrypt_decrypt_bio(struct bio *bio);
+bool fscrypt_decrypt_bio(struct bio *bio);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len);
@@ -222,16 +428,20 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned int flags);
int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct fscrypt_name *fname);
+int fscrypt_prepare_lookup_partial(struct inode *dir, struct dentry *dentry);
+int __fscrypt_prepare_readdir(struct inode *dir);
+int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr);
int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags);
-int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
- unsigned int max_len,
- struct fscrypt_str *disk_link);
+int fscrypt_prepare_symlink(struct inode *dir, const char *target,
+ unsigned int len, unsigned int max_len,
+ struct fscrypt_str *disk_link);
int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
unsigned int len, struct fscrypt_str *disk_link);
const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
unsigned int max_size,
struct delayed_call *done);
+int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat);
static inline void fscrypt_set_ops(struct super_block *sb,
const struct fscrypt_operations *s_cop)
{
@@ -239,7 +449,8 @@ static inline void fscrypt_set_ops(struct super_block *sb,
}
#else /* !CONFIG_FS_ENCRYPTION */
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
return NULL;
}
@@ -249,13 +460,17 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
return false;
}
-static inline const union fscrypt_context *
-fscrypt_get_dummy_context(struct super_block *sb)
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
{
- return NULL;
}
-static inline void fscrypt_handle_d_move(struct dentry *dentry)
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return false;
+}
+
+static inline void fscrypt_prepare_dentry(struct dentry *dentry,
+ bool is_nokey_name)
{
}
@@ -281,9 +496,8 @@ static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
return -EOPNOTSUPP;
}
-static inline int fscrypt_decrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs)
+static inline int fscrypt_decrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs)
{
return -EOPNOTSUPP;
}
@@ -307,6 +521,17 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
return ERR_PTR(-EINVAL);
}
+static inline bool fscrypt_is_bounce_folio(struct folio *folio)
+{
+ return false;
+}
+
+static inline struct folio *fscrypt_pagecache_folio(struct folio *bounce_folio)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
+
static inline void fscrypt_free_bounce_page(struct page *bounce_page)
{
}
@@ -340,29 +565,47 @@ static inline int fscrypt_has_permitted_context(struct inode *parent,
return 0;
}
-static inline int fscrypt_inherit_context(struct inode *parent,
- struct inode *child,
- void *fs_data, bool preload)
+static inline int fscrypt_set_context(struct inode *inode, void *fs_data)
{
return -EOPNOTSUPP;
}
-struct fscrypt_dummy_context {
+struct fscrypt_dummy_policy {
};
+static inline int
+fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy)
+{
+ return -EINVAL;
+}
+
+static inline bool
+fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2)
+{
+ return true;
+}
+
static inline void fscrypt_show_test_dummy_encryption(struct seq_file *seq,
char sep,
struct super_block *sb)
{
}
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return false;
+}
+
static inline void
-fscrypt_free_dummy_context(struct fscrypt_dummy_context *dummy_ctx)
+fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
{
}
/* keyring.c */
-static inline void fscrypt_sb_free(struct super_block *sb)
+static inline void fscrypt_destroy_keyring(struct super_block *sb)
{
}
@@ -389,9 +632,14 @@ static inline int fscrypt_ioctl_get_key_status(struct file *filp,
}
/* keysetup.c */
-static inline int fscrypt_get_encryption_info(struct inode *inode)
+
+static inline int fscrypt_prepare_new_inode(struct inode *dir,
+ struct inode *inode,
+ bool *encrypt_ret)
{
- return -EOPNOTSUPP;
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ return 0;
}
static inline void fscrypt_put_encryption_info(struct inode *inode)
@@ -428,8 +676,7 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
return;
}
-static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
- u32 max_encrypted_len,
+static inline int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
struct fscrypt_str *crypto_str)
{
return -EOPNOTSUPP;
@@ -464,9 +711,16 @@ static inline u64 fscrypt_fname_siphash(const struct inode *dir,
return 0;
}
+static inline int fscrypt_d_revalidate(struct dentry *dentry,
+ unsigned int flags)
+{
+ return 1;
+}
+
/* bio.c */
-static inline void fscrypt_decrypt_bio(struct bio *bio)
+static inline bool fscrypt_decrypt_bio(struct bio *bio)
{
+ return true;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
@@ -506,6 +760,23 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
return -EOPNOTSUPP;
}
+static inline int fscrypt_prepare_lookup_partial(struct inode *dir,
+ struct dentry *dentry)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_readdir(struct inode *dir)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags,
unsigned int flags)
@@ -513,15 +784,21 @@ static inline int fscrypt_prepare_setflags(struct inode *inode,
return 0;
}
-static inline int __fscrypt_prepare_symlink(struct inode *dir,
- unsigned int len,
- unsigned int max_len,
- struct fscrypt_str *disk_link)
+static inline int fscrypt_prepare_symlink(struct inode *dir,
+ const char *target,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
{
- return -EOPNOTSUPP;
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ disk_link->name = (unsigned char *)target;
+ disk_link->len = len + 1;
+ if (disk_link->len > max_len)
+ return -ENAMETOOLONG;
+ return 0;
}
-
static inline int __fscrypt_encrypt_symlink(struct inode *inode,
const char *target,
unsigned int len,
@@ -538,6 +815,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline int fscrypt_symlink_getattr(const struct path *path,
+ struct kstat *stat)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void fscrypt_set_ops(struct super_block *sb,
const struct fscrypt_operations *s_cop)
{
@@ -564,6 +847,10 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh);
+bool fscrypt_dio_supported(struct inode *inode);
+
+u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks);
+
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
@@ -592,6 +879,17 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
{
return true;
}
+
+static inline bool fscrypt_dio_supported(struct inode *inode)
+{
+ return !fscrypt_needs_contents_encryption(inode);
+}
+
+static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk,
+ u64 nr_blocks)
+{
+ return nr_blocks;
+}
#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
/**
@@ -635,33 +933,7 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
*/
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
- return fscrypt_get_info(inode) != NULL;
-}
-
-/**
- * fscrypt_require_key() - require an inode's encryption key
- * @inode: the inode we need the key for
- *
- * If the inode is encrypted, set up its encryption key if not already done.
- * Then require that the key be present and return -ENOKEY otherwise.
- *
- * No locks are needed, and the key will live as long as the struct inode --- so
- * it won't go away from under you.
- *
- * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
- * if a problem occurred while setting up the encryption key.
- */
-static inline int fscrypt_require_key(struct inode *inode)
-{
- if (IS_ENCRYPTED(inode)) {
- int err = fscrypt_get_encryption_info(inode);
-
- if (err)
- return err;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
- return 0;
+ return fscrypt_get_inode_info(inode) != NULL;
}
/**
@@ -673,8 +945,7 @@ static inline int fscrypt_require_key(struct inode *inode)
*
* A new link can only be added to an encrypted directory if the directory's
* encryption key is available --- since otherwise we'd have no way to encrypt
- * the filename. Therefore, we first set up the directory's encryption key (if
- * not already done) and return an error if it's unavailable.
+ * the filename.
*
* We also verify that the link will not violate the constraint that all files
* in an encrypted directory tree use the same encryption policy.
@@ -734,17 +1005,20 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir,
* @fname: (output) the name to use to search the on-disk directory
*
* Prepare for ->lookup() in a directory which may be encrypted by determining
- * the name that will actually be used to search the directory on-disk. Lookups
- * can be done with or without the directory's encryption key; without the key,
- * filenames are presented in encrypted form. Therefore, we'll try to set up
- * the directory's encryption key, but even without it the lookup can continue.
+ * the name that will actually be used to search the directory on-disk. If the
+ * directory's encryption policy is supported by this kernel and its encryption
+ * key is available, then the lookup is assumed to be by plaintext name;
+ * otherwise, it is assumed to be by no-key name.
*
- * This also installs a custom ->d_revalidate() method which will invalidate the
- * dentry if it was created without the key and the key is later added.
+ * This will set DCACHE_NOKEY_NAME on the dentry if the lookup is by no-key
+ * name. In this case the filesystem must assign the dentry a dentry_operations
+ * which contains fscrypt_d_revalidate (or contains a d_revalidate method that
+ * calls fscrypt_d_revalidate), so that the dentry will be invalidated if the
+ * directory's encryption key is later added.
*
- * Return: 0 on success; -ENOENT if key is unavailable but the filename isn't a
- * correctly formed encoded ciphertext name, so a negative dentry should be
- * created; or another -errno code.
+ * Return: 0 on success; -ENOENT if the directory's key is unavailable but the
+ * filename isn't a valid no-key name, so a negative dentry should be created;
+ * or another -errno code.
*/
static inline int fscrypt_prepare_lookup(struct inode *dir,
struct dentry *dentry,
@@ -757,6 +1031,29 @@ static inline int fscrypt_prepare_lookup(struct inode *dir,
fname->usr_fname = &dentry->d_name;
fname->disk_name.name = (unsigned char *)dentry->d_name.name;
fname->disk_name.len = dentry->d_name.len;
+
+ fscrypt_prepare_dentry(dentry, false);
+
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_readdir() - prepare to read a possibly-encrypted directory
+ * @dir: the directory inode
+ *
+ * If the directory is encrypted and it doesn't already have its encryption key
+ * set up, try to set it up so that the filenames will be listed in plaintext
+ * form rather than in no-key form.
+ *
+ * Return: 0 on success; -errno on error. Note that the encryption key being
+ * unavailable is not considered an error. It is also not an error if
+ * the encryption policy is unsupported by this kernel; that is treated
+ * like the key being unavailable, so that files can still be deleted.
+ */
+static inline int fscrypt_prepare_readdir(struct inode *dir)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_readdir(dir);
return 0;
}
@@ -781,47 +1078,8 @@ static inline int fscrypt_prepare_lookup(struct inode *dir,
static inline int fscrypt_prepare_setattr(struct dentry *dentry,
struct iattr *attr)
{
- if (attr->ia_valid & ATTR_SIZE)
- return fscrypt_require_key(d_inode(dentry));
- return 0;
-}
-
-/**
- * fscrypt_prepare_symlink() - prepare to create a possibly-encrypted symlink
- * @dir: directory in which the symlink is being created
- * @target: plaintext symlink target
- * @len: length of @target excluding null terminator
- * @max_len: space the filesystem has available to store the symlink target
- * @disk_link: (out) the on-disk symlink target being prepared
- *
- * This function computes the size the symlink target will require on-disk,
- * stores it in @disk_link->len, and validates it against @max_len. An
- * encrypted symlink may be longer than the original.
- *
- * Additionally, @disk_link->name is set to @target if the symlink will be
- * unencrypted, but left NULL if the symlink will be encrypted. For encrypted
- * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the
- * on-disk target later. (The reason for the two-step process is that some
- * filesystems need to know the size of the symlink target before creating the
- * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.)
- *
- * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long,
- * -ENOKEY if the encryption key is missing, or another -errno code if a problem
- * occurred while setting up the encryption key.
- */
-static inline int fscrypt_prepare_symlink(struct inode *dir,
- const char *target,
- unsigned int len,
- unsigned int max_len,
- struct fscrypt_str *disk_link)
-{
- if (IS_ENCRYPTED(dir) || fscrypt_get_dummy_context(dir->i_sb) != NULL)
- return __fscrypt_prepare_symlink(dir, len, max_len, disk_link);
-
- disk_link->name = (unsigned char *)target;
- disk_link->len = len + 1;
- if (disk_link->len > max_len)
- return -ENAMETOOLONG;
+ if (IS_ENCRYPTED(d_inode(dentry)))
+ return __fscrypt_prepare_setattr(dentry, attr);
return 0;
}
diff --git a/include/linux/fsi-occ.h b/include/linux/fsi-occ.h
index d4cdc2aa6e33..7ee3dbd7f4b3 100644
--- a/include/linux/fsi-occ.h
+++ b/include/linux/fsi-occ.h
@@ -19,6 +19,8 @@ struct device;
#define OCC_RESP_CRIT_OCB 0xE3
#define OCC_RESP_CRIT_HW 0xE4
+#define OCC_MAX_RESP_WORDS 2048
+
int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
void *response, size_t *resp_len);
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
index 2d9203314865..df25fffdc0ae 100644
--- a/include/linux/fsl/enetc_mdio.h
+++ b/include/linux/fsl/enetc_mdio.h
@@ -37,16 +37,27 @@ struct enetc_mdio_priv {
#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum);
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value);
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad, int regnum);
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad, int regnum,
+ u16 value);
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
#else
-static inline int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static inline int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id,
+ int regnum)
{ return -EINVAL; }
-static inline int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
+static inline int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id,
+ int regnum, u16 value)
+{ return -EINVAL; }
+static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum, u16 value)
{ return -EINVAL; }
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 0ac27b233f12..fdb55ca47a4f 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* Freecale 85xx and 86xx Global Utilties register set
*
* Authors: Jeff Brown
@@ -14,7 +14,7 @@
#include <linux/types.h>
#include <linux/io.h>
-/**
+/*
* Global Utility Registers.
*
* Not all registers defined in this structure are available on all chips, so
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index a428c61ead6e..a1b3de87a3d1 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -3,6 +3,7 @@
* Freescale Management Complex (MC) bus public interface
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
* Author: German Rivera <German.Rivera@freescale.com>
*
*/
@@ -12,6 +13,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
+#include <uapi/linux/fsl_mc.h>
#define FSL_MC_VENDOR_FREESCALE 0x1957
@@ -30,6 +32,13 @@ struct fsl_mc_io;
* @shutdown: Function called at shutdown time to quiesce the device
* @suspend: Function called when a device is stopped
* @resume: Function called when a device is resumed
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
*
* Generic DPAA device driver object for device drivers that are registered
* with a DPRC bus. This structure is to be embedded in each device-specific
@@ -39,10 +48,11 @@ struct fsl_mc_driver {
struct device_driver driver;
const struct fsl_mc_device_id *match_id_table;
int (*probe)(struct fsl_mc_device *dev);
- int (*remove)(struct fsl_mc_device *dev);
+ void (*remove)(struct fsl_mc_device *dev);
void (*shutdown)(struct fsl_mc_device *dev);
int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
int (*resume)(struct fsl_mc_device *dev);
+ bool driver_managed_dma;
};
#define to_fsl_mc_driver(_drv) \
@@ -89,13 +99,13 @@ struct fsl_mc_resource {
/**
* struct fsl_mc_device_irq - MC object device message-based interrupt
- * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
+ * @virq: Linux virtual interrupt number
* @mc_dev: MC object device that owns this interrupt
* @dev_irq_index: device-relative IRQ index
* @resource: MC generic resource associated with the interrupt
*/
struct fsl_mc_device_irq {
- struct msi_desc *msi_desc;
+ unsigned int virq;
struct fsl_mc_device *mc_dev;
u8 dev_irq_index;
struct fsl_mc_resource resource;
@@ -148,6 +158,13 @@ struct fsl_mc_obj_desc {
*/
#define FSL_MC_IS_DPRC 0x0001
+/* Region flags */
+/* Indicates that region can be mapped as cacheable */
+#define FSL_MC_REGION_CACHEABLE 0x00000001
+
+/* Indicates that region can be mapped as shareable */
+#define FSL_MC_REGION_SHAREABLE 0x00000002
+
/**
* struct fsl_mc_device - MC object device object
* @dev: Linux driver model device object
@@ -161,6 +178,9 @@ struct fsl_mc_obj_desc {
* @regions: pointer to array of MMIO region entries
* @irqs: pointer to array of pointers to interrupts allocated to this device
* @resource: generic resource associated with this MC object device, if any.
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
*
* Generic device object for MC object devices that are "attached" to a
* MC bus.
@@ -186,7 +206,7 @@ struct fsl_mc_device {
struct device dev;
u64 dma_mask;
u16 flags;
- u16 icid;
+ u32 icid;
u16 mc_handle;
struct fsl_mc_io *mc_io;
struct fsl_mc_obj_desc obj_desc;
@@ -194,13 +214,12 @@ struct fsl_mc_device {
struct fsl_mc_device_irq **irqs;
struct fsl_mc_resource *resource;
struct device_link *consumer_link;
+ const char *driver_override;
};
#define to_fsl_mc_device(_dev) \
container_of(_dev, struct fsl_mc_device, dev)
-#define MC_CMD_NUM_OF_PARAMS 7
-
struct mc_cmd_header {
u8 src_id;
u8 flags_hw;
@@ -210,11 +229,6 @@ struct mc_cmd_header {
__le16 cmd_id;
};
-struct fsl_mc_command {
- __le64 header;
- __le64 params[MC_CMD_NUM_OF_PARAMS];
-};
-
enum mc_cmd_status {
MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
@@ -419,7 +433,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
-struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id);
extern struct bus_type fsl_mc_bus_type;
@@ -514,6 +529,35 @@ static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type;
}
+#define DPRC_RESET_OPTION_NON_RECURSIVE 0x00000001
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ u32 options);
+
+int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts);
+
+void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc);
+
+int dprc_cleanup(struct fsl_mc_device *mc_dev);
+
+int dprc_setup(struct fsl_mc_device *mc_dev);
+
+/**
+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+ * IRQ pool
+ */
+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
+
+int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
+ unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev);
+
/*
* Data Path Buffer Pool (DPBP) API
* Contains initialization APIs and runtime control APIs for DPBP
@@ -586,6 +630,20 @@ int dpcon_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
/**
* struct dpcon_attr - Structure representing DPCON attributes
* @id: DPCON object ID
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index 884b8f8ca06d..b301bf7199d3 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -136,6 +136,7 @@ struct ptp_qoriq_registers {
#define DEFAULT_TMR_PRSC 2
#define DEFAULT_FIPER1_PERIOD 1000000000
#define DEFAULT_FIPER2_PERIOD 1000000000
+#define DEFAULT_FIPER3_PERIOD 1000000000
struct ptp_qoriq {
void __iomem *base;
@@ -147,6 +148,8 @@ struct ptp_qoriq {
struct dentry *debugfs_root;
struct device *dev;
bool extts_fifo_support;
+ bool fiper3_support;
+ bool etsec;
int irq;
int phc_index;
u32 tclk_period; /* nanoseconds */
@@ -155,6 +158,7 @@ struct ptp_qoriq {
u32 cksel;
u32 tmr_fiper1;
u32 tmr_fiper2;
+ u32 tmr_fiper3;
u32 (*read)(unsigned __iomem *addr);
void (*write)(unsigned __iomem *addr, u32 val);
};
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index f8acddcf54fb..1a9de119a0f7 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -17,6 +17,12 @@
#include <linux/slab.h>
#include <linux/bug.h>
+/* Are there any inode/mount/sb objects that are being watched at all? */
+static inline bool fsnotify_sb_has_watchers(struct super_block *sb)
+{
+ return atomic_long_read(&sb->s_fsnotify_connectors);
+}
+
/*
* Notify this @dir inode about a change in a child directory entry.
* The directory entry may have turned positive or negative or its inode may
@@ -26,21 +32,27 @@
* FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only
* the child is interested and not the parent.
*/
-static inline void fsnotify_name(struct inode *dir, __u32 mask,
- struct inode *child,
- const struct qstr *name, u32 cookie)
+static inline int fsnotify_name(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ u32 cookie)
{
- fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie);
+ if (!fsnotify_sb_has_watchers(dir->i_sb))
+ return 0;
+
+ return fsnotify(mask, data, data_type, dir, name, NULL, cookie);
}
static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
__u32 mask)
{
- fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0);
+ fsnotify_name(mask, dentry, FSNOTIFY_EVENT_DENTRY, dir, &dentry->d_name, 0);
}
static inline void fsnotify_inode(struct inode *inode, __u32 mask)
{
+ if (!fsnotify_sb_has_watchers(inode->i_sb))
+ return;
+
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
@@ -53,6 +65,9 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
{
struct inode *inode = d_inode(dentry);
+ if (!fsnotify_sb_has_watchers(inode->i_sb))
+ return 0;
+
if (S_ISDIR(inode->i_mode)) {
mask |= FS_ISDIR;
@@ -77,43 +92,83 @@ notify_child:
*/
static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
{
- fsnotify_parent(dentry, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE);
+ fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY);
}
static inline int fsnotify_file(struct file *file, __u32 mask)
{
- const struct path *path = &file->f_path;
+ const struct path *path;
if (file->f_mode & FMODE_NONOTIFY)
return 0;
+ path = &file->f_path;
return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
-/* Simple call site for access decisions */
-static inline int fsnotify_perm(struct file *file, int mask)
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+/*
+ * fsnotify_file_area_perm - permission hook before access to file range
+ */
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
{
- int ret;
- __u32 fsnotify_mask = 0;
+ __u32 fsnotify_mask = FS_ACCESS_PERM;
+
+ /*
+ * filesystem may be modified in the context of permission events
+ * (e.g. by HSM filling a file on access), so sb freeze protection
+ * must not be held.
+ */
+ lockdep_assert_once(file_write_not_started(file));
- if (!(mask & (MAY_READ | MAY_OPEN)))
+ if (!(perm_mask & MAY_READ))
return 0;
- if (mask & MAY_OPEN) {
- fsnotify_mask = FS_OPEN_PERM;
+ return fsnotify_file(file, fsnotify_mask);
+}
- if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+/*
+ * fsnotify_file_perm - permission hook before file access
+ */
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
+}
+
+/*
+ * fsnotify_open_perm - permission hook before file open
+ */
+static inline int fsnotify_open_perm(struct file *file)
+{
+ int ret;
- if (ret)
- return ret;
- }
- } else if (mask & MAY_READ) {
- fsnotify_mask = FS_ACCESS_PERM;
+ if (file->f_flags & __FMODE_EXEC) {
+ ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+ if (ret)
+ return ret;
}
- return fsnotify_file(file, fsnotify_mask);
+ return fsnotify_file(file, FS_OPEN_PERM);
+}
+
+#else
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
+{
+ return 0;
+}
+
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return 0;
+}
+
+static inline int fsnotify_open_perm(struct file *file)
+{
+ return 0;
}
+#endif
/*
* fsnotify_link_count - inode's link count changed
@@ -135,18 +190,23 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = FS_MOVED_FROM;
__u32 new_dir_mask = FS_MOVED_TO;
+ __u32 rename_mask = FS_RENAME;
const struct qstr *new_name = &moved->d_name;
- if (old_dir == new_dir)
- old_dir_mask |= FS_DN_RENAME;
-
if (isdir) {
old_dir_mask |= FS_ISDIR;
new_dir_mask |= FS_ISDIR;
+ rename_mask |= FS_ISDIR;
}
- fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie);
- fsnotify_name(new_dir, new_dir_mask, source, new_name, fs_cookie);
+ /* Event with information about both old and new parent+name */
+ fsnotify_name(rename_mask, moved, FSNOTIFY_EVENT_DENTRY,
+ old_dir, old_name, 0);
+
+ fsnotify_name(old_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ old_dir, old_name, fs_cookie);
+ fsnotify_name(new_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ new_dir, new_name, fs_cookie);
if (target)
fsnotify_link_count(target);
@@ -181,16 +241,22 @@ static inline void fsnotify_inoderemove(struct inode *inode)
/*
* fsnotify_create - 'name' was linked in
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_create(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_dirent(inode, dentry, FS_CREATE);
+ fsnotify_dirent(dir, dentry, FS_CREATE);
}
/*
* fsnotify_link - new hardlink in 'inode' directory
+ *
+ * Caller must make sure that new_dentry->d_name is stable.
* Note: We have to pass also the linked inode ptr as some filesystems leave
* new_dentry->d_inode NULL and instantiate inode pointer later
*/
@@ -200,7 +266,45 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
fsnotify_link_count(inode);
audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0);
+ fsnotify_name(FS_CREATE, inode, FSNOTIFY_EVENT_INODE,
+ dir, &new_dentry->d_name, 0);
+}
+
+/*
+ * fsnotify_delete - @dentry was unlinked and unhashed
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ *
+ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
+ * as this may be called after d_delete() and old_dentry may be negative.
+ */
+static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
+ struct dentry *dentry)
+{
+ __u32 mask = FS_DELETE;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name,
+ 0);
+}
+
+/**
+ * d_delete_notify - delete a dentry and call fsnotify_delete()
+ * @dentry: The dentry to delete
+ *
+ * This helper is used to guaranty that the unlinked inode cannot be found
+ * by lookup of this name after fsnotify_delete() event has been delivered.
+ */
+static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+
+ ihold(inode);
+ d_delete(dentry);
+ fsnotify_delete(dir, inode, dentry);
+ iput(inode);
}
/*
@@ -210,20 +314,24 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
*/
static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
{
- /* Expected to be called before d_delete() */
- WARN_ON_ONCE(d_is_negative(dentry));
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify_dirent(dir, dentry, FS_DELETE);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
* fsnotify_mkdir - directory 'name' was created
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR);
+ fsnotify_dirent(dir, dentry, FS_CREATE | FS_ISDIR);
}
/*
@@ -233,10 +341,10 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
{
- /* Expected to be called before d_delete() */
- WARN_ON_ONCE(d_is_negative(dentry));
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
@@ -317,4 +425,17 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
fsnotify_dentry(dentry, mask);
}
+static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode,
+ int error)
+{
+ struct fs_error_report report = {
+ .error = error,
+ .inode = inode,
+ .sb = sb,
+ };
+
+ return fsnotify(FS_ERROR, &report, FSNOTIFY_EVENT_ERROR,
+ NULL, NULL, NULL, 0);
+}
+
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index f8529a3a2923..8f40c349b228 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -19,6 +19,8 @@
#include <linux/atomic.h>
#include <linux/user_namespace.h>
#include <linux/refcount.h>
+#include <linux/mempool.h>
+#include <linux/sched/mm.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
@@ -29,8 +31,8 @@
#define FS_ACCESS 0x00000001 /* File was accessed */
#define FS_MODIFY 0x00000002 /* File was modified */
#define FS_ATTRIB 0x00000004 /* Metadata changed */
-#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
-#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
+#define FS_CLOSE_WRITE 0x00000008 /* Writable file was closed */
+#define FS_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */
#define FS_OPEN 0x00000020 /* File was opened */
#define FS_MOVED_FROM 0x00000040 /* File was moved from X */
#define FS_MOVED_TO 0x00000080 /* File was moved to Y */
@@ -42,13 +44,18 @@
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FS_ERROR 0x00008000 /* Filesystem Error (fanotify) */
+
+/*
+ * FS_IN_IGNORED overloads FS_ERROR. It is only used internally by inotify
+ * which does not support FS_ERROR.
+ */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
-#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
/*
* Set on inode mark that cares about things that happen to its children.
* Always set for dnotify and inotify.
@@ -56,10 +63,9 @@
*/
#define FS_EVENT_ON_CHILD 0x08000000
-#define FS_DN_RENAME 0x10000000 /* file renamed */
+#define FS_RENAME 0x10000000 /* File was renamed */
#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
#define FS_ISDIR 0x40000000 /* event occurred against dir */
-#define FS_IN_ONESHOT 0x80000000 /* only send event once */
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
@@ -69,7 +75,7 @@
* The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
* when a directory entry inside a child subdir changes.
*/
-#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE)
+#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
FS_OPEN_EXEC_PERM)
@@ -94,12 +100,12 @@
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
FS_EVENTS_POSS_ON_CHILD | \
- FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \
- FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED)
+ FS_DELETE_SELF | FS_MOVE_SELF | \
+ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
+ FS_ERROR)
/* Extra flags that may be reported with event or control handling of events */
-#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
- FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
+#define ALL_FSNOTIFY_FLAGS (FS_ISDIR | FS_EVENT_ON_CHILD | FS_DN_MULTISHOT)
#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
@@ -136,7 +142,9 @@ struct mem_cgroup;
* @dir: optional directory associated with event -
* if @file_name is not NULL, this is the directory that
* @file_name is relative to.
+ * Either @inode or @dir must be non-NULL.
* @file_name: optional file name associated with event
+ * @cookie: inotify rename cookie
*
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing_mark - called when a mark is being destroyed for some reason. The group
@@ -151,10 +159,10 @@ struct fsnotify_ops {
struct fsnotify_iter_info *iter_info);
int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask,
struct inode *inode, struct inode *dir,
- const struct qstr *file_name);
+ const struct qstr *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
- void (*free_event)(struct fsnotify_event *event);
+ void (*free_event)(struct fsnotify_group *group, struct fsnotify_event *event);
/* called on final put+free to free memory */
void (*free_mark)(struct fsnotify_mark *mark);
};
@@ -166,7 +174,6 @@ struct fsnotify_ops {
*/
struct fsnotify_event {
struct list_head list;
- unsigned long objectid; /* identifier for queue merges */
};
/*
@@ -204,11 +211,14 @@ struct fsnotify_group {
unsigned int priority;
bool shutdown; /* group is being shut down, don't queue more events */
+#define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */
+#define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */
+#define FSNOTIFY_GROUP_NOFS 0x04 /* group lock is not direct reclaim safe */
+ int flags;
+ unsigned int owner_flags; /* stored flags of mark_mutex owner */
+
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
- atomic_t num_marks; /* 1 for each mark and 1 for not being
- * past the point of no return when freeing
- * a group */
atomic_t user_waits; /* Number of tasks waiting for user
* response */
struct list_head marks_list; /* all inode marks for this group */
@@ -233,23 +243,58 @@ struct fsnotify_group {
#endif
#ifdef CONFIG_FANOTIFY
struct fanotify_group_private_data {
+ /* Hash table of events for merge */
+ struct hlist_head *merge_hash;
/* allows a group to block waiting for a userspace response */
struct list_head access_list;
wait_queue_head_t access_waitq;
int flags; /* flags from fanotify_init() */
int f_flags; /* event_f_flags from fanotify_init() */
- unsigned int max_marks;
- struct user_struct *user;
+ struct ucounts *ucounts;
+ mempool_t error_events_pool;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
};
+/*
+ * These helpers are used to prevent deadlock when reclaiming inodes with
+ * evictable marks of the same group that is allocating a new mark.
+ */
+static inline void fsnotify_group_lock(struct fsnotify_group *group)
+{
+ mutex_lock(&group->mark_mutex);
+ if (group->flags & FSNOTIFY_GROUP_NOFS)
+ group->owner_flags = memalloc_nofs_save();
+}
+
+static inline void fsnotify_group_unlock(struct fsnotify_group *group)
+{
+ if (group->flags & FSNOTIFY_GROUP_NOFS)
+ memalloc_nofs_restore(group->owner_flags);
+ mutex_unlock(&group->mark_mutex);
+}
+
+static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
+{
+ WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
+ if (group->flags & FSNOTIFY_GROUP_NOFS)
+ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
+}
+
/* When calling fsnotify tell it if the data is a path or inode */
enum fsnotify_data_type {
FSNOTIFY_EVENT_NONE,
FSNOTIFY_EVENT_PATH,
FSNOTIFY_EVENT_INODE,
+ FSNOTIFY_EVENT_DENTRY,
+ FSNOTIFY_EVENT_ERROR,
+};
+
+struct fs_error_report {
+ int error;
+ struct inode *inode;
+ struct super_block *sb;
};
static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
@@ -257,8 +302,25 @@ static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
switch (data_type) {
case FSNOTIFY_EVENT_INODE:
return (struct inode *)data;
+ case FSNOTIFY_EVENT_DENTRY:
+ return d_inode(data);
case FSNOTIFY_EVENT_PATH:
return d_inode(((const struct path *)data)->dentry);
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *)data)->inode;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct dentry *fsnotify_data_dentry(const void *data, int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_DENTRY:
+ /* Non const is needed for dget() */
+ return (struct dentry *)data;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry;
default:
return NULL;
}
@@ -275,67 +337,124 @@ static inline const struct path *fsnotify_data_path(const void *data,
}
}
+static inline struct super_block *fsnotify_data_sb(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return ((struct inode *)data)->i_sb;
+ case FSNOTIFY_EVENT_DENTRY:
+ return ((struct dentry *)data)->d_sb;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *) data)->sb;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct fs_error_report *fsnotify_data_error_report(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_ERROR:
+ return (struct fs_error_report *) data;
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * Index to merged marks iterator array that correlates to a type of watch.
+ * The type of watched object can be deduced from the iterator type, but not
+ * the other way around, because an event can match different watched objects
+ * of the same object type.
+ * For example, both parent and child are watching an object of type inode.
+ */
+enum fsnotify_iter_type {
+ FSNOTIFY_ITER_TYPE_INODE,
+ FSNOTIFY_ITER_TYPE_VFSMOUNT,
+ FSNOTIFY_ITER_TYPE_SB,
+ FSNOTIFY_ITER_TYPE_PARENT,
+ FSNOTIFY_ITER_TYPE_INODE2,
+ FSNOTIFY_ITER_TYPE_COUNT
+};
+
+/* The type of object that a mark is attached to */
enum fsnotify_obj_type {
+ FSNOTIFY_OBJ_TYPE_ANY = -1,
FSNOTIFY_OBJ_TYPE_INODE,
- FSNOTIFY_OBJ_TYPE_CHILD,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
FSNOTIFY_OBJ_TYPE_COUNT,
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
-#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
-#define FSNOTIFY_OBJ_TYPE_CHILD_FL (1U << FSNOTIFY_OBJ_TYPE_CHILD)
-#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
-#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB)
-#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
-
-static inline bool fsnotify_valid_obj_type(unsigned int type)
+static inline bool fsnotify_valid_obj_type(unsigned int obj_type)
{
- return (type < FSNOTIFY_OBJ_TYPE_COUNT);
+ return (obj_type < FSNOTIFY_OBJ_TYPE_COUNT);
}
struct fsnotify_iter_info {
- struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT];
+ struct fsnotify_mark *marks[FSNOTIFY_ITER_TYPE_COUNT];
+ struct fsnotify_group *current_group;
unsigned int report_mask;
int srcu_idx;
};
static inline bool fsnotify_iter_should_report_type(
- struct fsnotify_iter_info *iter_info, int type)
+ struct fsnotify_iter_info *iter_info, int iter_type)
{
- return (iter_info->report_mask & (1U << type));
+ return (iter_info->report_mask & (1U << iter_type));
}
static inline void fsnotify_iter_set_report_type(
- struct fsnotify_iter_info *iter_info, int type)
+ struct fsnotify_iter_info *iter_info, int iter_type)
+{
+ iter_info->report_mask |= (1U << iter_type);
+}
+
+static inline struct fsnotify_mark *fsnotify_iter_mark(
+ struct fsnotify_iter_info *iter_info, int iter_type)
{
- iter_info->report_mask |= (1U << type);
+ if (fsnotify_iter_should_report_type(iter_info, iter_type))
+ return iter_info->marks[iter_type];
+ return NULL;
}
-static inline void fsnotify_iter_set_report_type_mark(
- struct fsnotify_iter_info *iter_info, int type,
- struct fsnotify_mark *mark)
+static inline int fsnotify_iter_step(struct fsnotify_iter_info *iter, int type,
+ struct fsnotify_mark **markp)
{
- iter_info->marks[type] = mark;
- iter_info->report_mask |= (1U << type);
+ while (type < FSNOTIFY_ITER_TYPE_COUNT) {
+ *markp = fsnotify_iter_mark(iter, type);
+ if (*markp)
+ break;
+ type++;
+ }
+ return type;
}
#define FSNOTIFY_ITER_FUNCS(name, NAME) \
static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
struct fsnotify_iter_info *iter_info) \
{ \
- return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \
- iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \
+ return fsnotify_iter_mark(iter_info, FSNOTIFY_ITER_TYPE_##NAME); \
}
FSNOTIFY_ITER_FUNCS(inode, INODE)
-FSNOTIFY_ITER_FUNCS(child, CHILD)
+FSNOTIFY_ITER_FUNCS(parent, PARENT)
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
FSNOTIFY_ITER_FUNCS(sb, SB)
-#define fsnotify_foreach_obj_type(type) \
- for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
+#define fsnotify_foreach_iter_type(type) \
+ for (type = 0; type < FSNOTIFY_ITER_TYPE_COUNT; type++)
+#define fsnotify_foreach_iter_mark_type(iter, mark, type) \
+ for (type = 0; \
+ type = fsnotify_iter_step(iter, type, &mark), \
+ type < FSNOTIFY_ITER_TYPE_COUNT; \
+ type++)
/*
* fsnotify_connp_t is what we embed in objects which connector can be attached
@@ -353,9 +472,8 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
struct fsnotify_mark_connector {
spinlock_t lock;
unsigned short type; /* Type of object [lock] */
-#define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01
+#define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02
unsigned short flags; /* flags [lock] */
- __kernel_fsid_t fsid; /* fsid of filesystem containing object */
union {
/* Object pointer [lock] */
fsnotify_connp_t *obj;
@@ -398,11 +516,20 @@ struct fsnotify_mark {
struct hlist_node obj_list;
/* Head of list of marks for an object [mark ref] */
struct fsnotify_mark_connector *connector;
- /* Events types to ignore [mark->lock, group->mark_mutex] */
- __u32 ignored_mask;
-#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01
-#define FSNOTIFY_MARK_FLAG_ALIVE 0x02
-#define FSNOTIFY_MARK_FLAG_ATTACHED 0x04
+ /* Events types and flags to ignore [mark->lock, group->mark_mutex] */
+ __u32 ignore_mask;
+ /* General fsnotify mark flags */
+#define FSNOTIFY_MARK_FLAG_ALIVE 0x0001
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x0002
+ /* inotify mark flags */
+#define FSNOTIFY_MARK_FLAG_EXCL_UNLINK 0x0010
+#define FSNOTIFY_MARK_FLAG_IN_ONESHOT 0x0020
+ /* fanotify mark flags */
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
+#define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200
+#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400
+#define FSNOTIFY_MARK_FLAG_HAS_FSID 0x0800
+#define FSNOTIFY_MARK_FLAG_WEAK_FSID 0x1000
unsigned int flags; /* flags [mark->lock] */
};
@@ -468,7 +595,9 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/* called from fsnotify listeners, such as fanotify or dnotify */
/* create a new group */
-extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
+extern struct fsnotify_group *fsnotify_alloc_group(
+ const struct fsnotify_ops *ops,
+ int flags);
/* get reference to a group */
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
@@ -483,17 +612,39 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
extern void fsnotify_destroy_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* attach the event to the group notification queue */
-extern int fsnotify_add_event(struct fsnotify_group *group,
- struct fsnotify_event *event,
- int (*merge)(struct list_head *,
- struct fsnotify_event *));
+extern int fsnotify_insert_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *),
+ void (*insert)(struct fsnotify_group *,
+ struct fsnotify_event *));
+
+static inline int fsnotify_add_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *))
+{
+ return fsnotify_insert_event(group, event, merge, NULL);
+}
+
/* Queue overflow event to a notification group */
static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
{
fsnotify_add_event(group, group->overflow_event, NULL);
}
-/* true if the group notification queue is empty */
+static inline bool fsnotify_is_overflow_event(u32 mask)
+{
+ return mask & FS_Q_OVERFLOW;
+}
+
+static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
+{
+ assert_spin_locked(&group->notification_lock);
+
+ return list_empty(&group->notification_list);
+}
+
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
@@ -505,6 +656,101 @@ extern void fsnotify_remove_queued_event(struct fsnotify_group *group,
/* functions used to manipulate the marks attached to inodes */
+/*
+ * Canonical "ignore mask" including event flags.
+ *
+ * Note the subtle semantic difference from the legacy ->ignored_mask.
+ * ->ignored_mask traditionally only meant which events should be ignored,
+ * while ->ignore_mask also includes flags regarding the type of objects on
+ * which events should be ignored.
+ */
+static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark)
+{
+ __u32 ignore_mask = mark->ignore_mask;
+
+ /* The event flags in ignore mask take effect */
+ if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS)
+ return ignore_mask;
+
+ /*
+ * Legacy behavior:
+ * - Always ignore events on dir
+ * - Ignore events on child if parent is watching children
+ */
+ ignore_mask |= FS_ISDIR;
+ ignore_mask &= ~FS_EVENT_ON_CHILD;
+ ignore_mask |= mark->mask & FS_EVENT_ON_CHILD;
+
+ return ignore_mask;
+}
+
+/* Legacy ignored_mask - only event types to ignore */
+static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark)
+{
+ return mark->ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/*
+ * Check if mask (or ignore mask) should be applied depending if victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir,
+ int iter_type)
+{
+ /* Should mask be applied to a directory? */
+ if (is_dir && !(mask & FS_ISDIR))
+ return false;
+
+ /* Should mask be applied to a child? */
+ if (iter_type == FSNOTIFY_ITER_TYPE_PARENT &&
+ !(mask & FS_EVENT_ON_CHILD))
+ return false;
+
+ return true;
+}
+
+/*
+ * Effective ignore mask taking into account if event victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark,
+ bool is_dir, int iter_type)
+{
+ __u32 ignore_mask = fsnotify_ignored_events(mark);
+
+ if (!ignore_mask)
+ return 0;
+
+ /* For non-dir and non-child, no need to consult the event flags */
+ if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT)
+ return ignore_mask;
+
+ ignore_mask = fsnotify_ignore_mask(mark);
+ if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type))
+ return 0;
+
+ return ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/* Get mask for calculating object interest taking ignore mask into account */
+static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark)
+{
+ __u32 mask = mark->mask;
+
+ if (!fsnotify_ignored_events(mark))
+ return mask;
+
+ /* Interest in FS_MODIFY may be needed for clearing ignore mask */
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ mask |= FS_MODIFY;
+
+ /*
+ * If mark is interested in ignoring events on children, the object must
+ * show interest in those events for fsnotify_parent() to notice it.
+ */
+ return mask | mark->ignore_mask;
+}
+
/* Get mask of events for a list of marks */
extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn);
/* Calculate mask of events for a list of marks */
@@ -514,33 +760,28 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark,
/* Find mark belonging to given group in the list of marks */
extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
struct fsnotify_group *group);
-/* Get cached fsid of filesystem containing object */
-extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn,
- __kernel_fsid_t *fsid);
/* attach the mark to the object */
extern int fsnotify_add_mark(struct fsnotify_mark *mark,
- fsnotify_connp_t *connp, unsigned int type,
- int allow_dups, __kernel_fsid_t *fsid);
+ fsnotify_connp_t *connp, unsigned int obj_type,
+ int add_flags);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
fsnotify_connp_t *connp,
- unsigned int type, int allow_dups,
- __kernel_fsid_t *fsid);
+ unsigned int obj_type, int add_flags);
/* attach the mark to the inode */
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct inode *inode,
- int allow_dups)
+ int add_flags)
{
return fsnotify_add_mark(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, allow_dups, NULL);
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags);
}
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
struct inode *inode,
- int allow_dups)
+ int add_flags)
{
return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, allow_dups,
- NULL);
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags);
}
/* given a group and a mark, flag mark to be freed when all references are dropped */
@@ -552,33 +793,32 @@ extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
/* Wait until all marks queued for destruction are destroyed */
extern void fsnotify_wait_marks_destroyed(void);
-/* run all the marks in a group, and clear all of the marks attached to given object type */
-extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type);
+/* Clear all of the marks of a group attached to a given object type */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
+ unsigned int obj_type);
/* run all the marks in a group, and clear all of the vfsmount marks */
static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
}
/* run all the marks in a group, and clear all of the inode marks */
static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
}
/* run all the marks in a group, and clear all of the sn marks */
static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB);
}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
-static inline void fsnotify_init_event(struct fsnotify_event *event,
- unsigned long objectid)
+static inline void fsnotify_init_event(struct fsnotify_event *event)
{
INIT_LIST_HEAD(&event->list);
- event->objectid = objectid;
}
#else
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index c1144a450392..1eb7eae580be 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -12,8 +12,20 @@
#define _LINUX_FSVERITY_H
#include <linux/fs.h>
+#include <linux/mm.h>
+#include <crypto/hash_info.h>
+#include <crypto/sha2.h>
#include <uapi/linux/fsverity.h>
+/*
+ * Largest digest size among all hash algorithms supported by fs-verity.
+ * Currently assumed to be <= size of fsverity_descriptor::root_hash.
+ */
+#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+/* Arbitrary limit to bound the kmalloc() size. Can be changed. */
+#define FS_VERITY_MAX_DESCRIPTOR_SIZE 16384
+
/* Verity operations for filesystems */
struct fsverity_operations {
@@ -82,8 +94,7 @@ struct fsverity_operations {
* isn't already cached. Implementations may ignore this
* argument; it's only a performance optimization.
*
- * This can be called at any time on an open verity file, as well as
- * between ->begin_enable_verity() and ->end_enable_verity(). It may be
+ * This can be called at any time on an open verity file. It may be
* called by multiple processes concurrently, even with the same page.
*
* Note that this must retrieve a *page*, not necessarily a *block*.
@@ -98,9 +109,9 @@ struct fsverity_operations {
* Write a Merkle tree block to the given inode.
*
* @inode: the inode for which the Merkle tree is being built
- * @buf: block to write
- * @index: 0-based index of the block within the Merkle tree
- * @log_blocksize: log base 2 of the Merkle tree block size
+ * @buf: the Merkle tree block to write
+ * @pos: the position of the block in the Merkle tree (in bytes)
+ * @size: the Merkle tree block size (in bytes)
*
* This is only called between ->begin_enable_verity() and
* ->end_enable_verity().
@@ -108,7 +119,7 @@ struct fsverity_operations {
* Return: 0 on success, -errno on failure
*/
int (*write_merkle_tree_block)(struct inode *inode, const void *buf,
- u64 index, int log_blocksize);
+ u64 pos, unsigned int size);
};
#ifdef CONFIG_FS_VERITY
@@ -131,16 +142,35 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *arg);
/* measure.c */
int fsverity_ioctl_measure(struct file *filp, void __user *arg);
+int fsverity_get_digest(struct inode *inode,
+ u8 raw_digest[FS_VERITY_MAX_DIGEST_SIZE],
+ u8 *alg, enum hash_algo *halg);
/* open.c */
-int fsverity_file_open(struct inode *inode, struct file *filp);
-int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
-void fsverity_cleanup_inode(struct inode *inode);
+int __fsverity_file_open(struct inode *inode, struct file *filp);
+int __fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
+void __fsverity_cleanup_inode(struct inode *inode);
+
+/**
+ * fsverity_cleanup_inode() - free the inode's verity info, if present
+ * @inode: an inode being evicted
+ *
+ * Filesystems must call this on inode eviction to free ->i_verity_info.
+ */
+static inline void fsverity_cleanup_inode(struct inode *inode)
+{
+ if (inode->i_verity_info)
+ __fsverity_cleanup_inode(inode);
+}
+
+/* read_metadata.c */
+
+int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
/* verify.c */
-bool fsverity_verify_page(struct page *page);
+bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
void fsverity_verify_bio(struct bio *bio);
void fsverity_enqueue_verify_work(struct work_struct *work);
@@ -166,50 +196,80 @@ static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg)
return -EOPNOTSUPP;
}
+static inline int fsverity_get_digest(struct inode *inode,
+ u8 raw_digest[FS_VERITY_MAX_DIGEST_SIZE],
+ u8 *alg, enum hash_algo *halg)
+{
+ /*
+ * fsverity is not enabled in the kernel configuration, so always report
+ * that the file doesn't have fsverity enabled (digest size 0).
+ */
+ return 0;
+}
+
/* open.c */
-static inline int fsverity_file_open(struct inode *inode, struct file *filp)
+static inline int __fsverity_file_open(struct inode *inode, struct file *filp)
{
- return IS_VERITY(inode) ? -EOPNOTSUPP : 0;
+ return -EOPNOTSUPP;
}
-static inline int fsverity_prepare_setattr(struct dentry *dentry,
- struct iattr *attr)
+static inline int __fsverity_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
{
- return IS_VERITY(d_inode(dentry)) ? -EOPNOTSUPP : 0;
+ return -EOPNOTSUPP;
}
static inline void fsverity_cleanup_inode(struct inode *inode)
{
}
+/* read_metadata.c */
+
+static inline int fsverity_ioctl_read_metadata(struct file *filp,
+ const void __user *uarg)
+{
+ return -EOPNOTSUPP;
+}
+
/* verify.c */
-static inline bool fsverity_verify_page(struct page *page)
+static inline bool fsverity_verify_blocks(struct folio *folio, size_t len,
+ size_t offset)
{
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return false;
}
static inline void fsverity_verify_bio(struct bio *bio)
{
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
static inline void fsverity_enqueue_verify_work(struct work_struct *work)
{
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
#endif /* !CONFIG_FS_VERITY */
+static inline bool fsverity_verify_folio(struct folio *folio)
+{
+ return fsverity_verify_blocks(folio, folio_size(folio), 0);
+}
+
+static inline bool fsverity_verify_page(struct page *page)
+{
+ return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0);
+}
+
/**
* fsverity_active() - do reads from the inode need to go through fs-verity?
* @inode: inode to check
*
* This checks whether ->i_verity_info has been set.
*
- * Filesystems call this from ->readpages() to check whether the pages need to
+ * Filesystems call this from ->readahead() to check whether the pages need to
* be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
* a race condition where the file is being read concurrently with
* FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.)
@@ -221,4 +281,42 @@ static inline bool fsverity_active(const struct inode *inode)
return fsverity_get_info(inode) != NULL;
}
+/**
+ * fsverity_file_open() - prepare to open a verity file
+ * @inode: the inode being opened
+ * @filp: the struct file being set up
+ *
+ * When opening a verity file, deny the open if it is for writing. Otherwise,
+ * set up the inode's ->i_verity_info if not already done.
+ *
+ * When combined with fscrypt, this must be called after fscrypt_file_open().
+ * Otherwise, we won't have the key set up to decrypt the verity metadata.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fsverity_file_open(struct inode *inode, struct file *filp)
+{
+ if (IS_VERITY(inode))
+ return __fsverity_file_open(inode, filp);
+ return 0;
+}
+
+/**
+ * fsverity_prepare_setattr() - prepare to change a verity inode's attributes
+ * @dentry: dentry through which the inode is being changed
+ * @attr: attributes to change
+ *
+ * Verity files are immutable, so deny truncates. This isn't covered by the
+ * open-time check because sys_truncate() takes a path, not a file descriptor.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fsverity_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ if (IS_VERITY(d_inode(dentry)))
+ return __fsverity_prepare_setattr(dentry, attr);
+ return 0;
+}
+
#endif /* _LINUX_FSVERITY_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index ce2c06f72e86..54d53f345d14 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -7,7 +7,9 @@
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
+#include <linux/trace_recursion.h>
#include <linux/trace_clock.h>
+#include <linux/jump_label.h>
#include <linux/kallsyms.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
@@ -29,16 +31,46 @@
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif
+#ifdef CONFIG_TRACING
+extern void ftrace_boot_snapshot(void);
+#else
+static inline void ftrace_boot_snapshot(void) { }
+#endif
+
+struct ftrace_ops;
+struct ftrace_regs;
+struct dyn_ftrace;
+
+char *arch_ftrace_match_adjust(char *str, const char *search);
+
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
+struct fgraph_ret_regs;
+unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
+#else
+unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
- * does. Or at least does enough to prevent any unwelcomed side effects.
+ * does. Or at least does enough to prevent any unwelcome side effects.
+ *
+ * Also define the function prototype that these architectures use
+ * to call the ftrace_ops_list_func().
*/
#if !ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
#else
# define FTRACE_FORCE_LIST_FUNC 0
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
#endif
+extern const struct ftrace_ops ftrace_nop_ops;
+extern const struct ftrace_ops ftrace_list_ops;
+struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
+#endif /* CONFIG_FUNCTION_TRACER */
/* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING
@@ -83,22 +115,68 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val
#ifdef CONFIG_FUNCTION_TRACER
extern int ftrace_enabled;
-extern int
-ftrace_enable_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-struct ftrace_ops;
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
+struct ftrace_regs {
+ struct pt_regs regs;
+};
+#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
+
+/*
+ * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
+ * if to allow setting of the instruction pointer from the ftrace_regs when
+ * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
+ */
+#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
+#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+
+static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ if (!fregs)
+ return NULL;
+
+ return arch_ftrace_get_regs(fregs);
+}
+
+/*
+ * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
+ * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
+ */
+static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
+{
+ if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
+ return true;
+
+ return ftrace_get_regs(fregs) != NULL;
+}
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+#define ftrace_regs_get_instruction_pointer(fregs) \
+ instruction_pointer(ftrace_get_regs(fregs))
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(ftrace_get_regs(fregs), n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(ftrace_get_regs(fregs))
+#define ftrace_regs_return_value(fregs) \
+ regs_return_value(ftrace_get_regs(fregs))
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(ftrace_get_regs(fregs), ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(ftrace_get_regs(fregs))
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+#endif
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
- * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
+ * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
* IPMODIFY are a kind of attribute flags which can be set only before
* registering the ftrace_ops, and can not be modified while registered.
* Changing those attribute flags after registering ftrace_ops will
@@ -121,10 +199,10 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* passing regs to the handler.
* Note, if this flag is set, the SAVE_REGS flag will automatically
* get set upon registering the ftrace_ops, if the arch supports it.
- * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
- * that the call back has its own recursion protection. If it does
- * not set this, then the ftrace infrastructure will add recursion
- * protection for the caller.
+ * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
+ * that the call back needs recursion protection. If it does
+ * not set this, then the ftrace infrastructure will assume
+ * that the callback can handle recursion on its own.
* STUB - The ftrace_ops is just a place holder.
* INITIALIZED - The ftrace_ops has already been initialized (first use time
* register_ftrace_function() is called, it will initialized the ops)
@@ -156,7 +234,7 @@ enum {
FTRACE_OPS_FL_DYNAMIC = BIT(1),
FTRACE_OPS_FL_SAVE_REGS = BIT(2),
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
- FTRACE_OPS_FL_RECURSION_SAFE = BIT(4),
+ FTRACE_OPS_FL_RECURSION = BIT(4),
FTRACE_OPS_FL_STUB = BIT(5),
FTRACE_OPS_FL_INITIALIZED = BIT(6),
FTRACE_OPS_FL_DELETED = BIT(7),
@@ -172,6 +250,49 @@ enum {
FTRACE_OPS_FL_DIRECT = BIT(17),
};
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+#define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS
+#else
+#define FTRACE_OPS_FL_SAVE_ARGS 0
+#endif
+
+/*
+ * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
+ * to a ftrace_ops. Note, the requests may fail.
+ *
+ * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the DIRECT ops is being registered.
+ * This is called with both direct_mutex and
+ * ftrace_lock are locked.
+ *
+ * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being registered.
+ * This is called with direct_mutex locked.
+ *
+ * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being unregistered.
+ * This is called with direct_mutex locked.
+ */
+enum ftrace_ops_cmd {
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
+ FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
+};
+
+/*
+ * For most ftrace_ops_cmd,
+ * Returns:
+ * 0 - Success.
+ * Negative on failure. The return value is dependent on the
+ * callback.
+ */
+typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
+
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
@@ -183,7 +304,10 @@ struct ftrace_ops_hash {
void ftrace_free_init_mem(void);
void ftrace_free_mem(struct module *mod, void *start, void *end);
#else
-static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_init_mem(void)
+{
+ ftrace_boot_snapshot();
+}
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
#endif
@@ -211,6 +335,10 @@ struct ftrace_ops {
unsigned long trampoline;
unsigned long trampoline_size;
struct list_head list;
+ ftrace_ops_func_t ops_func;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ unsigned long direct_call;
+#endif
#endif
};
@@ -218,11 +346,11 @@ extern struct ftrace_ops __rcu *ftrace_ops_list;
extern struct ftrace_ops ftrace_list_end;
/*
- * Traverse the ftrace_global_list, invoking all entries. The reason that we
+ * Traverse the ftrace_ops_list, invoking all entries. The reason that we
* can use rcu_dereference_raw_check() is that elements removed from this list
* are simply leaked, so there is no need to interact with a grace-period
* mechanism. The rcu_dereference_raw_check() calls are needed to handle
- * concurrent insertions into the ftrace_global_list.
+ * concurrent insertions into the ftrace_ops_list.
*
* Silly Alpha and silly pointer-speculation compiler optimizations!
*/
@@ -259,8 +387,10 @@ int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
extern void ftrace_stub(unsigned long a0, unsigned long a1,
- struct ftrace_ops *op, struct pt_regs *regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+
+int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
#else /* !CONFIG_FUNCTION_TRACER */
/*
* (un)register_ftrace_function must be a macro since the ops parameter
@@ -271,6 +401,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1,
static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
+static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_FUNCTION_TRACER */
struct ftrace_func_entry {
@@ -279,52 +413,42 @@ struct ftrace_func_entry {
unsigned long direct; /* for direct lookup only */
};
-struct dyn_ftrace;
-
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
extern int ftrace_direct_func_count;
-int register_ftrace_direct(unsigned long ip, unsigned long addr);
-int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
-int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
-struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
-int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
- struct dyn_ftrace *rec,
- unsigned long old_addr,
- unsigned long new_addr);
unsigned long ftrace_find_rec_direct(unsigned long ip);
+int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
+int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+ bool free_filters);
+int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
+int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
+
+void ftrace_stub_direct_tramp(void);
+
#else
+struct ftrace_ops;
# define ftrace_direct_func_count 0
-static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
-{
- return -ENOTSUPP;
-}
-static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
+static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
{
- return -ENOTSUPP;
+ return 0;
}
-static inline int modify_ftrace_direct(unsigned long ip,
- unsigned long old_addr, unsigned long new_addr)
+static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{
- return -ENOTSUPP;
+ return -ENODEV;
}
-static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
+static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+ bool free_filters)
{
- return NULL;
+ return -ENODEV;
}
-static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
- struct dyn_ftrace *rec,
- unsigned long old_addr,
- unsigned long new_addr)
+static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{
return -ENODEV;
}
-static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
+static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
{
- return 0;
+ return -ENODEV;
}
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* This must be implemented by the architecture.
* It is the way the ftrace direct_ops helper, when called
@@ -338,9 +462,9 @@ static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
* the return from the trampoline jump to the direct caller
* instead of going back to the function it just traced.
*/
-static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
unsigned long addr) { }
-#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#ifdef CONFIG_STACK_TRACER
@@ -365,7 +489,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer);
*/
static inline void stack_tracer_disable(void)
{
- /* Preemption or interupts must be disabled */
+ /* Preemption or interrupts must be disabled */
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(disable_stack_tracer);
@@ -390,8 +514,8 @@ static inline void stack_tracer_enable(void) { }
#ifdef CONFIG_DYNAMIC_FTRACE
-int ftrace_arch_code_modify_prepare(void);
-int ftrace_arch_code_modify_post_process(void);
+void ftrace_arch_code_modify_prepare(void);
+void ftrace_arch_code_modify_post_process(void);
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
@@ -431,9 +555,13 @@ bool is_ftrace_trampoline(unsigned long addr);
* IPMODIFY - the record allows for the IP address to be changed.
* DISABLED - the record is not ready to be touched yet
* DIRECT - there is a direct function to call
+ * CALL_OPS - the record can use callsite-specific ops
+ * CALL_OPS_EN - the function is set up to use callsite-specific ops
+ * TOUCHED - A callback was added since boot up
+ * MODIFIED - The function had IPMODIFY or DIRECT attached to it
*
* When a new ftrace_ops is registered and wants a function to save
- * pt_regs, the rec->flag REGS is set. When the function has been
+ * pt_regs, the rec->flags REGS is set. When the function has been
* set up to save regs, the REG_EN flag is set. Once a function
* starts saving regs it will do so until all ftrace_ops are removed
* from tracing that function.
@@ -448,15 +576,16 @@ enum {
FTRACE_FL_DISABLED = (1UL << 25),
FTRACE_FL_DIRECT = (1UL << 24),
FTRACE_FL_DIRECT_EN = (1UL << 23),
+ FTRACE_FL_CALL_OPS = (1UL << 22),
+ FTRACE_FL_CALL_OPS_EN = (1UL << 21),
+ FTRACE_FL_TOUCHED = (1UL << 20),
+ FTRACE_FL_MODIFIED = (1UL << 19),
};
-#define FTRACE_REF_MAX_SHIFT 23
-#define FTRACE_FL_BITS 9
-#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
-#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
+#define FTRACE_REF_MAX_SHIFT 19
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
-#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
+#define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
struct dyn_ftrace {
unsigned long ip; /* address of mcount call-site */
@@ -464,9 +593,10 @@ struct dyn_ftrace {
struct dyn_arch_ftrace arch;
};
-int ftrace_force_update(void);
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
+int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ unsigned int cnt, int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
@@ -511,6 +641,8 @@ enum {
FTRACE_ITER_PROBE = (1 << 4),
FTRACE_ITER_MOD = (1 << 5),
FTRACE_ITER_ENABLED = (1 << 6),
+ FTRACE_ITER_TOUCHED = (1 << 7),
+ FTRACE_ITER_ADDRS = (1 << 8),
};
void arch_ftrace_update_code(int command);
@@ -552,7 +684,6 @@ void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
/* defined in arch */
-extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void);
extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
@@ -623,6 +754,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
extern int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr);
+/**
+ * ftrace_need_init_nop - return whether nop call sites should be initialized
+ *
+ * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
+ * need to call ftrace_init_nop() if the code is built with that flag.
+ * Architectures where this is not always the case may define their own
+ * condition.
+ *
+ * Return must be:
+ * 0 if ftrace_init_nop() should be called
+ * Nonzero if ftrace_init_nop() should not be called
+ */
+
+#ifndef ftrace_need_init_nop
+#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
+#endif
/**
* ftrace_init_nop - initialize a nop call site
@@ -674,7 +821,8 @@ static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
+ defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)
/**
* ftrace_modify_call - convert from one addr to another (no nop)
* @rec: the call site record (e.g. mcount/fentry)
@@ -687,6 +835,9 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
+ * When using call ops, this is called when the associated ops change, even
+ * when (addr == old_addr).
+ *
* The code segment at @rec->ip should be a caller to @old_addr
*
* Return must be:
@@ -707,21 +858,12 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
}
#endif
-/* May be defined in arch */
-extern int ftrace_arch_read_dyn_info(char *buf, int size);
-
extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
extern void ftrace_module_enable(struct module *mod);
extern void ftrace_release_mod(struct module *mod);
-
-extern void ftrace_disable_daemon(void);
-extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline int ftrace_force_update(void) { return 0; }
-static inline void ftrace_disable_daemon(void) { }
-static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { }
@@ -742,6 +884,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
+#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
@@ -760,6 +903,15 @@ static inline bool is_ftrace_trampoline(unsigned long addr)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef ftrace_graph_func
+#define ftrace_graph_func ftrace_stub
+#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
+#else
+#define FTRACE_OPS_GRAPH_STUB 0
+#endif
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
@@ -815,7 +967,7 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
-static inline unsigned long get_lock_parent_ip(void)
+static __always_inline unsigned long get_lock_parent_ip(void)
{
unsigned long addr = CALLER_ADDR0;
@@ -867,11 +1019,14 @@ struct ftrace_graph_ent {
*/
struct ftrace_graph_ret {
unsigned long func; /* Current function */
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+ unsigned long retval;
+#endif
+ int depth;
/* Number of functions that overran the depth limit for current task */
- unsigned long overrun;
+ unsigned int overrun;
unsigned long long calltime;
unsigned long long rettime;
- int depth;
} __packed;
/* Type of the callback handlers for tracing function graph*/
@@ -937,7 +1092,20 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
extern int register_ftrace_graph(struct fgraph_ops *ops);
extern void unregister_ftrace_graph(struct fgraph_ops *ops);
-extern bool ftrace_graph_is_dead(void);
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from doing any more harm.
+ */
+DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
+
+static inline bool ftrace_graph_is_dead(void)
+{
+ return static_branch_unlikely(&kill_ftrace_graph);
+}
+
extern void ftrace_graph_stop(void);
/* The current handlers in use */
@@ -981,50 +1149,11 @@ static inline void unpause_graph_tracing(void) { }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_TRACING
-
-/* flags for current->trace */
-enum {
- TSK_TRACE_FL_TRACE_BIT = 0,
- TSK_TRACE_FL_GRAPH_BIT = 1,
-};
-enum {
- TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
- TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
-};
-
-static inline void set_tsk_trace_trace(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_trace(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_trace(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_TRACE;
-}
-
-static inline void set_tsk_trace_graph(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_graph(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_graph(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_GRAPH;
-}
-
enum ftrace_dump_mode;
-extern enum ftrace_dump_mode ftrace_dump_on_oops;
+#define MAX_TRACER_SIZE 100
+extern char ftrace_dump_on_oops[];
+extern int ftrace_dump_on_oops_enabled(void);
extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 0abd9a1d2852..f6faa31289ba 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -7,12 +7,21 @@ extern bool trace_hwlat_callback_enabled;
extern void trace_hwlat_callback(bool enter);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+extern bool trace_osnoise_callback_enabled;
+extern void trace_osnoise_callback(bool enter);
+#endif
+
static inline void ftrace_nmi_enter(void)
{
#ifdef CONFIG_HWLAT_TRACER
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(true);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(true);
+#endif
}
static inline void ftrace_nmi_exit(void)
@@ -21,6 +30,10 @@ static inline void ftrace_nmi_exit(void)
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(false);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(false);
+#endif
}
#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
new file mode 100644
index 000000000000..3ff4c277296f
--- /dev/null
+++ b/include/linux/fw_table.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * fw_tables.h - Parsing support for ACPI and ACPI-like tables provided by
+ * platform or device firmware
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2023 Intel Corp.
+ */
+#ifndef _FW_TABLE_H_
+#define _FW_TABLE_H_
+
+union acpi_subtable_headers;
+
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
+ const unsigned long end);
+
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+ void *arg, const unsigned long end);
+
+struct acpi_subtable_proc {
+ int id;
+ acpi_tbl_entry_handler handler;
+ acpi_tbl_entry_handler_arg handler_arg;
+ void *arg;
+ int count;
+};
+
+union fw_table_header {
+ struct acpi_table_header acpi;
+ struct acpi_table_cdat cdat;
+};
+
+union acpi_subtable_headers {
+ struct acpi_subtable_header common;
+ struct acpi_hmat_structure hmat;
+ struct acpi_prmt_module_header prmt;
+ struct acpi_cedt_header cedt;
+ struct acpi_cdat_header cdat;
+};
+
+int acpi_parse_entries_array(char *id, unsigned long table_size,
+ union fw_table_header *table_header,
+ unsigned long max_length,
+ struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
+
+int cdat_table_parse(enum acpi_cdat_type type,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg,
+ struct acpi_table_cdat *table_header,
+ unsigned long length);
+
+/* CXL is the only non-ACPI consumer of the FIRMWARE_TABLE library */
+#if IS_ENABLED(CONFIG_ACPI) && !IS_ENABLED(CONFIG_CXL_BUS)
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_fwtbl_lib __init_or_acpilib
+#else
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, CXL)
+#define __init_or_fwtbl_lib
+#endif
+
+#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 9506f8ec0974..0d79070c5a70 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -9,15 +9,67 @@
#ifndef _LINUX_FWNODE_H_
#define _LINUX_FWNODE_H_
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/list.h>
#include <linux/types.h>
+enum dev_dma_attr {
+ DEV_DMA_NOT_SUPPORTED,
+ DEV_DMA_NON_COHERENT,
+ DEV_DMA_COHERENT,
+};
+
struct fwnode_operations;
struct device;
+/*
+ * fwnode flags
+ *
+ * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
+ * NOT_DEVICE: The fwnode will never be populated as a struct device.
+ * INITIALIZED: The hardware corresponding to fwnode has been initialized.
+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
+ * driver needs its child devices to be bound with
+ * their respective drivers as soon as they are
+ * added.
+ * BEST_EFFORT: The fwnode/device needs to probe early and might be missing some
+ * suppliers. Only enforce ordering with suppliers that have
+ * drivers.
+ */
+#define FWNODE_FLAG_LINKS_ADDED BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE BIT(1)
+#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
+#define FWNODE_FLAG_BEST_EFFORT BIT(4)
+#define FWNODE_FLAG_VISITED BIT(5)
+
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
+
+ /* The below is used solely by device links, don't use otherwise */
struct device *dev;
+ struct list_head suppliers;
+ struct list_head consumers;
+ u8 flags;
+};
+
+/*
+ * fwnode link flags
+ *
+ * CYCLE: The fwnode link is part of a cycle. Don't defer probe.
+ * IGNORE: Completely ignore this link, even during cycle detection.
+ */
+#define FWLINK_FLAG_CYCLE BIT(0)
+#define FWLINK_FLAG_IGNORE BIT(1)
+
+struct fwnode_link {
+ struct fwnode_handle *supplier;
+ struct list_head s_hook;
+ struct fwnode_handle *consumer;
+ struct list_head c_hook;
+ u8 flags;
};
/**
@@ -32,6 +84,13 @@ struct fwnode_endpoint {
const struct fwnode_handle *local_fwnode;
};
+/*
+ * ports and endpoints defined as software_nodes should all follow a common
+ * naming scheme; use these macros to ensure commonality.
+ */
+#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u"
+#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
+
#define NR_FWNODE_REFERENCE_ARGS 8
/**
@@ -68,44 +127,8 @@ struct fwnode_reference_args {
* endpoint node.
* @graph_get_port_parent: Return the parent node of a port node.
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
- * @add_links: Called after the device corresponding to the fwnode is added
- * using device_add(). The function is expected to create device
- * links to all the suppliers of the device that are available at
- * the time this function is called. The function must NOT stop
- * at the first failed device link if other unlinked supplier
- * devices are present in the system. This is necessary for the
- * driver/bus sync_state() callbacks to work correctly.
- *
- * For example, say Device-C depends on suppliers Device-S1 and
- * Device-S2 and the dependency is listed in that order in the
- * firmware. Say, S1 gets populated from the firmware after
- * late_initcall_sync(). Say S2 is populated and probed way
- * before that in device_initcall(). When C is populated, if this
- * add_links() function doesn't continue past a "failed linking to
- * S1" and continue linking C to S2, then S2 will get a
- * sync_state() callback before C is probed. This is because from
- * the perspective of S2, C was never a consumer when its
- * sync_state() evaluation is done. To avoid this, the add_links()
- * function has to go through all available suppliers of the
- * device (that corresponds to this fwnode) and link to them
- * before returning.
- *
- * If some suppliers are not yet available (indicated by an error
- * return value), this function will be called again when other
- * devices are added to allow creating device links to any newly
- * available suppliers.
- *
- * Return 0 if device links have been successfully created to all
- * the known suppliers of this device or if the supplier
- * information is not known.
- *
- * Return -ENODEV if the suppliers needed for probing this device
- * have not been registered yet (because device links can only be
- * created to devices registered with the driver core).
- *
- * Return -EAGAIN if some of the suppliers of this device have not
- * been registered yet, but none of those suppliers are necessary
- * for probing the device.
+ * @add_links: Create fwnode links to all the suppliers of the fwnode. Return
+ * zero on success, a negative error code otherwise.
*/
struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
@@ -113,6 +136,9 @@ struct fwnode_operations {
bool (*device_is_available)(const struct fwnode_handle *fwnode);
const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
const struct device *dev);
+ bool (*device_dma_supported)(const struct fwnode_handle *fwnode);
+ enum dev_dma_attr
+ (*device_get_dma_attr)(const struct fwnode_handle *fwnode);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
@@ -145,16 +171,17 @@ struct fwnode_operations {
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
- int (*add_links)(const struct fwnode_handle *fwnode,
- struct device *dev);
+ void __iomem *(*iomap)(struct fwnode_handle *fwnode, int index);
+ int (*irq_get)(const struct fwnode_handle *fwnode, unsigned int index);
+ int (*add_links)(struct fwnode_handle *fwnode);
};
-#define fwnode_has_op(fwnode, op) \
- ((fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+#define fwnode_has_op(fwnode, op) \
+ (!IS_ERR_OR_NULL(fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+
#define fwnode_call_int_op(fwnode, op, ...) \
- (fwnode ? (fwnode_has_op(fwnode, op) ? \
- (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \
- -EINVAL)
+ (fwnode_has_op(fwnode, op) ? \
+ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : (IS_ERR_OR_NULL(fwnode) ? -EINVAL : -ENXIO))
#define fwnode_call_bool_op(fwnode, op, ...) \
(fwnode_has_op(fwnode, op) ? \
@@ -168,10 +195,31 @@ struct fwnode_operations {
if (fwnode_has_op(fwnode, op)) \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__); \
} while (false)
-#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
-extern u32 fw_devlink_get_flags(void);
-void fw_devlink_pause(void);
-void fw_devlink_resume(void);
+static inline void fwnode_init(struct fwnode_handle *fwnode,
+ const struct fwnode_operations *ops)
+{
+ fwnode->ops = ops;
+ INIT_LIST_HEAD(&fwnode->consumers);
+ INIT_LIST_HEAD(&fwnode->suppliers);
+}
+
+static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
+ bool initialized)
+{
+ if (IS_ERR_OR_NULL(fwnode))
+ return;
+
+ if (initialized)
+ fwnode->flags |= FWNODE_FLAG_INITIALIZED;
+ else
+ fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
+}
+
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
+ u8 flags);
+void fwnode_links_purge(struct fwnode_handle *fwnode);
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode);
+bool fw_devlink_is_strict(void);
#endif
diff --git a/include/linux/fwnode_mdio.h b/include/linux/fwnode_mdio.h
new file mode 100644
index 000000000000..faf603c48c86
--- /dev/null
+++ b/include/linux/fwnode_mdio.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * FWNODE helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_FWNODE_MDIO_H
+#define __LINUX_FWNODE_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_FWNODE_MDIO)
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr);
+
+int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child, u32 addr);
+
+#else /* CONFIG_FWNODE_MDIO */
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr)
+{
+ return -EINVAL;
+}
+
+static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child,
+ u32 addr)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_FWNODE_MDIO_H */
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 69081d899492..07e370113b2b 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -5,7 +5,6 @@
#ifndef _GAMEPORT_H
#define _GAMEPORT_H
-#include <asm/io.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -64,7 +63,7 @@ struct gameport_driver {
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
void gameport_close(struct gameport *gameport);
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
void __gameport_register_port(struct gameport *gameport, struct module *owner);
/* use a define to avoid include chaining to get THIS_MODULE */
@@ -110,7 +109,7 @@ static inline void gameport_free_port(struct gameport *gameport)
static inline void gameport_set_name(struct gameport *gameport, const char *name)
{
- strlcpy(gameport->name, name, sizeof(gameport->name));
+ strscpy(gameport->name, name, sizeof(gameport->name));
}
/*
@@ -165,18 +164,12 @@ void gameport_unregister_driver(struct gameport_driver *drv);
static inline void gameport_trigger(struct gameport *gameport)
{
- if (gameport->trigger)
- gameport->trigger(gameport);
- else
- outb(0xff, gameport->io);
+ gameport->trigger(gameport);
}
static inline unsigned char gameport_read(struct gameport *gameport)
{
- if (gameport->read)
- return gameport->read(gameport);
- else
- return inb(gameport->io);
+ return gameport->read(gameport);
}
static inline int gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons)
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index bfd00320c7f3..f3512fddf3d7 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -5,7 +5,7 @@
* DOC: Generic radix trees/sparse arrays
*
* Very simple and minimalistic, supporting arbitrary size entries up to
- * PAGE_SIZE.
+ * GENRADIX_NODE_SIZE.
*
* A genradix is defined with the type it will store, like so:
*
@@ -38,17 +38,22 @@
#include <asm/page.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/types.h>
struct genradix_root;
+#define GENRADIX_NODE_SHIFT 9
+#define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT)
+
struct __genradix {
struct genradix_root *root;
};
/*
- * NOTE: currently, sizeof(_type) must not be larger than PAGE_SIZE:
+ * NOTE: currently, sizeof(_type) must not be larger than GENRADIX_NODE_SIZE:
*/
#define __GENRADIX_INITIALIZER \
@@ -99,14 +104,14 @@ void __genradix_free(struct __genradix *);
static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
{
if (__builtin_constant_p(obj_size))
- BUILD_BUG_ON(obj_size > PAGE_SIZE);
+ BUILD_BUG_ON(obj_size > GENRADIX_NODE_SIZE);
else
- BUG_ON(obj_size > PAGE_SIZE);
+ BUG_ON(obj_size > GENRADIX_NODE_SIZE);
if (!is_power_of_2(obj_size)) {
- size_t objs_per_page = PAGE_SIZE / obj_size;
+ size_t objs_per_page = GENRADIX_NODE_SIZE / obj_size;
- return (idx / objs_per_page) * PAGE_SIZE +
+ return (idx / objs_per_page) * GENRADIX_NODE_SIZE +
(idx % objs_per_page) * obj_size;
} else {
return idx * obj_size;
@@ -115,6 +120,11 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *)
#define __genradix_obj_size(_radix) sizeof((_radix)->type[0])
+#define __genradix_objs_per_page(_radix) \
+ (GENRADIX_NODE_SIZE / sizeof((_radix)->type[0]))
+#define __genradix_page_remainder(_radix) \
+ (GENRADIX_NODE_SIZE % sizeof((_radix)->type[0]))
+
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
@@ -178,16 +188,40 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
#define genradix_iter_peek(_iter, _radix) \
(__genradix_cast(_radix) \
__genradix_iter_peek(_iter, &(_radix)->tree, \
- PAGE_SIZE / __genradix_obj_size(_radix)))
+ __genradix_objs_per_page(_radix)))
+
+void *__genradix_iter_peek_prev(struct genradix_iter *, struct __genradix *,
+ size_t, size_t);
+
+/**
+ * genradix_iter_peek_prev - get first entry at or below iterator's current
+ * position
+ * @_iter: a genradix_iter
+ * @_radix: genradix being iterated over
+ *
+ * If no more entries exist at or below @_iter's current position, returns NULL
+ */
+#define genradix_iter_peek_prev(_iter, _radix) \
+ (__genradix_cast(_radix) \
+ __genradix_iter_peek_prev(_iter, &(_radix)->tree, \
+ __genradix_objs_per_page(_radix), \
+ __genradix_obj_size(_radix) + \
+ __genradix_page_remainder(_radix)))
static inline void __genradix_iter_advance(struct genradix_iter *iter,
size_t obj_size)
{
+ if (iter->offset + obj_size < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return;
+ }
+
iter->offset += obj_size;
if (!is_power_of_2(obj_size) &&
- (iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE)
- iter->offset = round_up(iter->offset, PAGE_SIZE);
+ (iter->offset & (GENRADIX_NODE_SIZE - 1)) + obj_size > GENRADIX_NODE_SIZE)
+ iter->offset = round_up(iter->offset, GENRADIX_NODE_SIZE);
iter->pos++;
}
@@ -195,6 +229,25 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_iter_advance(_iter, _radix) \
__genradix_iter_advance(_iter, __genradix_obj_size(_radix))
+static inline void __genradix_iter_rewind(struct genradix_iter *iter,
+ size_t obj_size)
+{
+ if (iter->offset == 0 ||
+ iter->offset == SIZE_MAX) {
+ iter->offset = SIZE_MAX;
+ return;
+ }
+
+ if ((iter->offset & (GENRADIX_NODE_SIZE - 1)) == 0)
+ iter->offset -= GENRADIX_NODE_SIZE % obj_size;
+
+ iter->offset -= obj_size;
+ iter->pos--;
+}
+
+#define genradix_iter_rewind(_iter, _radix) \
+ __genradix_iter_rewind(_iter, __genradix_obj_size(_radix))
+
#define genradix_for_each_from(_radix, _iter, _p, _start) \
for (_iter = genradix_iter_init(_radix, _start); \
(_p = genradix_iter_peek(&_iter, _radix)) != NULL; \
@@ -212,6 +265,23 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_for_each(_radix, _iter, _p) \
genradix_for_each_from(_radix, _iter, _p, 0)
+#define genradix_last_pos(_radix) \
+ (SIZE_MAX / GENRADIX_NODE_SIZE * __genradix_objs_per_page(_radix) - 1)
+
+/**
+ * genradix_for_each_reverse - iterate over entry in a genradix, reverse order
+ * @_radix: genradix to iterate over
+ * @_iter: a genradix_iter to track current position
+ * @_p: pointer to genradix entry type
+ *
+ * On every iteration, @_p will point to the current entry, and @_iter.pos
+ * will be the current entry's index.
+ */
+#define genradix_for_each_reverse(_radix, _iter, _p) \
+ for (_iter = genradix_iter_init(_radix, genradix_last_pos(_radix));\
+ (_p = genradix_iter_peek_prev(&_iter, _radix)) != NULL;\
+ genradix_iter_rewind(&_iter, _radix))
+
int __genradix_prealloc(struct __genradix *, size_t, gfp_t);
/**
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index bc738504ab4a..c285968e437a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -8,34 +8,11 @@
/* All generic netlink requests are serialized by a global lock. */
extern void genl_lock(void);
extern void genl_unlock(void);
-#ifdef CONFIG_LOCKDEP
-extern bool lockdep_genl_is_held(void);
-#endif
/* for synchronisation between af_netlink and genetlink */
extern atomic_t genl_sk_destructing_cnt;
extern wait_queue_head_t genl_sk_destructing_waitq;
-/**
- * rcu_dereference_genl - rcu_dereference with debug checking
- * @p: The pointer to read, prior to dereferencing
- *
- * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
- * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
- */
-#define rcu_dereference_genl(p) \
- rcu_dereference_check(p, lockdep_genl_is_held())
-
-/**
- * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
- * @p: The pointer to read, prior to dereferencing
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds genl mutex.
- */
-#define genl_dereference(p) \
- rcu_dereference_protected(p, lockdep_genl_is_held())
-
#define MODULE_ALIAS_GENL_FAMILY(family)\
MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
deleted file mode 100644
index 4ab853461dff..000000000000
--- a/include/linux/genhd.h
+++ /dev/null
@@ -1,413 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_GENHD_H
-#define _LINUX_GENHD_H
-
-/*
- * genhd.h Copyright (C) 1992 Drew Eckhardt
- * Generic hard disk header file by
- * Drew Eckhardt
- *
- * <drew@colorado.edu>
- */
-
-#include <linux/types.h>
-#include <linux/kdev_t.h>
-#include <linux/rcupdate.h>
-#include <linux/slab.h>
-#include <linux/percpu-refcount.h>
-#include <linux/uuid.h>
-#include <linux/blk_types.h>
-#include <asm/local.h>
-
-#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
-#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
-#define disk_to_dev(disk) (&(disk)->part0.__dev)
-#define part_to_dev(part) (&((part)->__dev))
-
-extern const struct device_type disk_type;
-extern struct device_type part_type;
-extern struct class block_class;
-
-#define DISK_MAX_PARTS 256
-#define DISK_NAME_LEN 32
-
-#include <linux/major.h>
-#include <linux/device.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/workqueue.h>
-
-#define PARTITION_META_INFO_VOLNAMELTH 64
-/*
- * Enough for the string representation of any kind of UUID plus NULL.
- * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
- */
-#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
-
-struct partition_meta_info {
- char uuid[PARTITION_META_INFO_UUIDLTH];
- u8 volname[PARTITION_META_INFO_VOLNAMELTH];
-};
-
-struct hd_struct {
- sector_t start_sect;
- /*
- * nr_sects is protected by sequence counter. One might extend a
- * partition while IO is happening to it and update of nr_sects
- * can be non-atomic on 32bit machines with 64bit sector_t.
- */
- sector_t nr_sects;
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- seqcount_t nr_sects_seq;
-#endif
- unsigned long stamp;
- struct disk_stats __percpu *dkstats;
- struct percpu_ref ref;
-
- sector_t alignment_offset;
- unsigned int discard_alignment;
- struct device __dev;
- struct kobject *holder_dir;
- int policy, partno;
- struct partition_meta_info *info;
-#ifdef CONFIG_FAIL_MAKE_REQUEST
- int make_it_fail;
-#endif
- struct rcu_work rcu_work;
-};
-
-/**
- * DOC: genhd capability flags
- *
- * ``GENHD_FL_REMOVABLE`` (0x0001): indicates that the block device
- * gives access to removable media.
- * When set, the device remains present even when media is not
- * inserted.
- * Must not be set for devices which are removed entirely when the
- * media is removed.
- *
- * ``GENHD_FL_CD`` (0x0008): the block device is a CD-ROM-style
- * device.
- * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl.
- *
- * ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up",
- * with a similar meaning to network interfaces.
- *
- * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include
- * partition information in ``/proc/partitions`` or in the output of
- * printk_all_partitions().
- * Used for the null block device and some MMC devices.
- *
- * ``GENHD_FL_EXT_DEVT`` (0x0040): the driver supports extended
- * dynamic ``dev_t``, i.e. it wants extended device numbers
- * (``BLOCK_EXT_MAJOR``).
- * This affects the maximum number of partitions.
- *
- * ``GENHD_FL_NATIVE_CAPACITY`` (0x0080): based on information in the
- * partition table, the device's capacity has been extended to its
- * native capacity; i.e. the device has hidden capacity used by one
- * of the partitions (this is a flag used so that native capacity is
- * only ever unlocked once).
- *
- * ``GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE`` (0x0100): event polling is
- * blocked whenever a writer holds an exclusive lock.
- *
- * ``GENHD_FL_NO_PART_SCAN`` (0x0200): partition scanning is disabled.
- * Used for loop devices in their default settings and some MMC
- * devices.
- *
- * ``GENHD_FL_HIDDEN`` (0x0400): the block device is hidden; it
- * doesn't produce events, doesn't appear in sysfs, and doesn't have
- * an associated ``bdev``.
- * Implies ``GENHD_FL_SUPPRESS_PARTITION_INFO`` and
- * ``GENHD_FL_NO_PART_SCAN``.
- * Used for multipath devices.
- */
-#define GENHD_FL_REMOVABLE 0x0001
-/* 2 is unused (used to be GENHD_FL_DRIVERFS) */
-/* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */
-#define GENHD_FL_CD 0x0008
-#define GENHD_FL_UP 0x0010
-#define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020
-#define GENHD_FL_EXT_DEVT 0x0040
-#define GENHD_FL_NATIVE_CAPACITY 0x0080
-#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 0x0100
-#define GENHD_FL_NO_PART_SCAN 0x0200
-#define GENHD_FL_HIDDEN 0x0400
-
-enum {
- DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
- DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
-};
-
-enum {
- /* Poll even if events_poll_msecs is unset */
- DISK_EVENT_FLAG_POLL = 1 << 0,
- /* Forward events to udev */
- DISK_EVENT_FLAG_UEVENT = 1 << 1,
-};
-
-struct disk_part_tbl {
- struct rcu_head rcu_head;
- int len;
- struct hd_struct __rcu *last_lookup;
- struct hd_struct __rcu *part[];
-};
-
-struct disk_events;
-struct badblocks;
-
-struct blk_integrity {
- const struct blk_integrity_profile *profile;
- unsigned char flags;
- unsigned char tuple_size;
- unsigned char interval_exp;
- unsigned char tag_size;
-};
-
-struct gendisk {
- /* major, first_minor and minors are input parameters only,
- * don't use directly. Use disk_devt() and disk_max_parts().
- */
- int major; /* major number of driver */
- int first_minor;
- int minors; /* maximum number of minors, =1 for
- * disks that can't be partitioned. */
-
- char disk_name[DISK_NAME_LEN]; /* name of major driver */
-
- unsigned short events; /* supported events */
- unsigned short event_flags; /* flags related to event processing */
-
- /* Array of pointers to partitions indexed by partno.
- * Protected with matching bdev lock but stat and other
- * non-critical accesses use RCU. Always access through
- * helpers.
- */
- struct disk_part_tbl __rcu *part_tbl;
- struct hd_struct part0;
-
- const struct block_device_operations *fops;
- struct request_queue *queue;
- void *private_data;
-
- int flags;
- struct rw_semaphore lookup_sem;
- struct kobject *slave_dir;
-
- struct timer_rand_state *random;
- atomic_t sync_io; /* RAID */
- struct disk_events *ev;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct kobject integrity_kobj;
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-#if IS_ENABLED(CONFIG_CDROM)
- struct cdrom_device_info *cdi;
-#endif
- int node_id;
- struct badblocks *bb;
- struct lockdep_map lockdep_map;
-};
-
-#if IS_REACHABLE(CONFIG_CDROM)
-#define disk_to_cdi(disk) ((disk)->cdi)
-#else
-#define disk_to_cdi(disk) NULL
-#endif
-
-static inline struct gendisk *part_to_disk(struct hd_struct *part)
-{
- if (likely(part)) {
- if (part->partno)
- return dev_to_disk(part_to_dev(part)->parent);
- else
- return dev_to_disk(part_to_dev(part));
- }
- return NULL;
-}
-
-static inline int disk_max_parts(struct gendisk *disk)
-{
- if (disk->flags & GENHD_FL_EXT_DEVT)
- return DISK_MAX_PARTS;
- return disk->minors;
-}
-
-static inline bool disk_part_scan_enabled(struct gendisk *disk)
-{
- return disk_max_parts(disk) > 1 &&
- !(disk->flags & GENHD_FL_NO_PART_SCAN);
-}
-
-static inline dev_t disk_devt(struct gendisk *disk)
-{
- return MKDEV(disk->major, disk->first_minor);
-}
-
-static inline dev_t part_devt(struct hd_struct *part)
-{
- return part_to_dev(part)->devt;
-}
-
-extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
-extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
-
-static inline void disk_put_part(struct hd_struct *part)
-{
- if (likely(part))
- put_device(part_to_dev(part));
-}
-
-static inline void hd_sects_seq_init(struct hd_struct *p)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- seqcount_init(&p->nr_sects_seq);
-#endif
-}
-
-/*
- * Smarter partition iterator without context limits.
- */
-#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
-#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
-#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
-#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
-
-struct disk_part_iter {
- struct gendisk *disk;
- struct hd_struct *part;
- int idx;
- unsigned int flags;
-};
-
-extern void disk_part_iter_init(struct disk_part_iter *piter,
- struct gendisk *disk, unsigned int flags);
-extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
-extern void disk_part_iter_exit(struct disk_part_iter *piter);
-extern bool disk_has_partitions(struct gendisk *disk);
-
-/* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk,
- const struct attribute_group **groups);
-static inline void add_disk(struct gendisk *disk)
-{
- device_add_disk(NULL, disk, NULL);
-}
-extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
-static inline void add_disk_no_queue_reg(struct gendisk *disk)
-{
- device_add_disk_no_queue_reg(NULL, disk);
-}
-
-extern void del_gendisk(struct gendisk *gp);
-extern struct gendisk *get_gendisk(dev_t dev, int *partno);
-extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
-
-extern void set_device_ro(struct block_device *bdev, int flag);
-extern void set_disk_ro(struct gendisk *disk, int flag);
-
-static inline int get_disk_ro(struct gendisk *disk)
-{
- return disk->part0.policy;
-}
-
-extern void disk_block_events(struct gendisk *disk);
-extern void disk_unblock_events(struct gendisk *disk);
-extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
-extern void set_capacity_revalidate_and_notify(struct gendisk *disk,
- sector_t size, bool revalidate);
-extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
-
-/* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
-extern void rand_initialize_disk(struct gendisk *disk);
-
-static inline sector_t get_start_sect(struct block_device *bdev)
-{
- return bdev->bd_part->start_sect;
-}
-static inline sector_t get_capacity(struct gendisk *disk)
-{
- return disk->part0.nr_sects;
-}
-static inline void set_capacity(struct gendisk *disk, sector_t size)
-{
- disk->part0.nr_sects = size;
-}
-
-int bdev_disk_changed(struct block_device *bdev, bool invalidate);
-int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
-int blk_drop_partitions(struct block_device *bdev);
-
-extern struct gendisk *__alloc_disk_node(int minors, int node_id);
-extern struct kobject *get_disk_and_module(struct gendisk *disk);
-extern void put_disk(struct gendisk *disk);
-extern void put_disk_and_module(struct gendisk *disk);
-extern void blk_register_region(dev_t devt, unsigned long range,
- struct module *module,
- struct kobject *(*probe)(dev_t, int *, void *),
- int (*lock)(dev_t, void *),
- void *data);
-extern void blk_unregister_region(dev_t devt, unsigned long range);
-
-#define alloc_disk_node(minors, node_id) \
-({ \
- static struct lock_class_key __key; \
- const char *__name; \
- struct gendisk *__disk; \
- \
- __name = "(gendisk_completion)"#minors"("#node_id")"; \
- \
- __disk = __alloc_disk_node(minors, node_id); \
- \
- if (__disk) \
- lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
- \
- __disk; \
-})
-
-#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
-
-int register_blkdev(unsigned int major, const char *name);
-void unregister_blkdev(unsigned int major, const char *name);
-
-int revalidate_disk(struct gendisk *disk);
-int check_disk_change(struct block_device *bdev);
-int __invalidate_device(struct block_device *bdev, bool kill_dirty);
-void bd_set_size(struct block_device *bdev, loff_t size);
-
-/* for drivers/char/raw.c: */
-int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
-long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-
-#ifdef CONFIG_SYSFS
-int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
-void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
-#else
-static inline int bd_link_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
- return 0;
-}
-static inline void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
-}
-#endif /* CONFIG_SYSFS */
-
-#ifdef CONFIG_BLOCK
-void printk_all_partitions(void);
-dev_t blk_lookup_devt(const char *name, int partno);
-#else /* CONFIG_BLOCK */
-static inline void printk_all_partitions(void)
-{
-}
-static inline dev_t blk_lookup_devt(const char *name, int partno)
-{
- dev_t devt = MKDEV(0, 0);
- return devt;
-}
-#endif /* CONFIG_BLOCK */
-
-#endif /* _LINUX_GENHD_H */
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 6cb82301d8e9..d4da060b7532 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -2,6 +2,7 @@
#ifndef GENL_MAGIC_FUNC_H
#define GENL_MAGIC_FUNC_H
+#include <linux/args.h>
#include <linux/build_bug.h>
#include <linux/genl_magic_struct.h>
@@ -23,7 +24,7 @@
#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
[tag_name] = { .type = NLA_NESTED },
-static struct nla_policy CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
+static struct nla_policy CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -209,7 +210,7 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
* Magic: define op number to op name mapping {{{1
* {{{2
*/
-const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+static const char *CONCATENATE(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
{
switch (cmd) {
#undef GENL_op
@@ -235,7 +236,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
.cmd = op_name, \
},
-#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
+#define ZZZ_genl_ops CONCATENATE(GENL_MAGIC_FAMILY, _genl_ops)
static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -248,32 +249,32 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
* and provide register/unregister functions.
* {{{2
*/
-#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
+#define ZZZ_genl_family CONCATENATE(GENL_MAGIC_FAMILY, _genl_family)
static struct genl_family ZZZ_genl_family;
/*
* Magic: define multicast groups
* Magic: define multicast group registration helper
*/
-#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps)
+#define ZZZ_genl_mcgrps CONCATENATE(GENL_MAGIC_FAMILY, _genl_mcgrps)
static const struct genl_multicast_group ZZZ_genl_mcgrps[] = {
#undef GENL_mc_group
#define GENL_mc_group(group) { .name = #group, },
#include GENL_MAGIC_INCLUDE_FILE
};
-enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) {
+enum CONCATENATE(GENL_MAGIC_FAMILY, group_ids) {
#undef GENL_mc_group
-#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group),
+#define GENL_mc_group(group) CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group),
#include GENL_MAGIC_INCLUDE_FILE
};
#undef GENL_mc_group
#define GENL_mc_group(group) \
-static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
+static int CONCATENATE(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
struct sk_buff *skb, gfp_t flags) \
{ \
unsigned int group_id = \
- CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \
+ CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group); \
return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \
group_id, flags); \
}
@@ -289,21 +290,22 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
#ifdef GENL_MAGIC_FAMILY_HDRSZ
.hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
#endif
- .maxattr = ARRAY_SIZE(CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
- .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy),
+ .maxattr = ARRAY_SIZE(CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
+ .policy = CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy),
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
+ .resv_start_op = 42, /* drbd is currently the only user */
.n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
.module = THIS_MODULE,
};
-int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
+int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void)
{
return genl_register_family(&ZZZ_genl_family);
}
-void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
+void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void)
{
genl_unregister_family(&ZZZ_genl_family);
}
@@ -404,4 +406,3 @@ s_fields \
/* }}}1 */
#endif /* GENL_MAGIC_FUNC_H */
-/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index eeae59d3ceb7..a419d93789ff 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -14,14 +14,12 @@
# error "you need to define GENL_MAGIC_INCLUDE_FILE before inclusion"
#endif
+#include <linux/args.h>
#include <linux/genetlink.h>
#include <linux/types.h>
-#define CONCAT__(a,b) a ## b
-#define CONCAT_(a,b) CONCAT__(a,b)
-
-extern int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void);
-extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
+extern int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void);
+extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void);
/*
* Extension of genl attribute validation policies {{{2
@@ -89,7 +87,7 @@ static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
nla_get_u64, nla_put_u64_0pad, false)
#define __str_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
- nla_strlcpy, nla_put, false)
+ nla_strscpy, nla_put, false)
#define __bin_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \
nla_memcpy, nla_put, false)
@@ -283,4 +281,3 @@ enum { \
/* }}}1 */
#endif /* GENL_MAGIC_STRUCT_H */
-/* vim: set foldmethod=marker nofoldenable : */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 67a0774e080b..c775ea3c6015 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -2,311 +2,13 @@
#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
-#include <linux/mmdebug.h>
+#include <linux/gfp_types.h>
+
#include <linux/mmzone.h>
-#include <linux/stddef.h>
-#include <linux/linkage.h>
#include <linux/topology.h>
struct vm_area_struct;
-
-/*
- * In case of changes, please don't forget to update
- * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
- */
-
-/* Plain integer GFP bitmasks. Do not use this directly. */
-#define ___GFP_DMA 0x01u
-#define ___GFP_HIGHMEM 0x02u
-#define ___GFP_DMA32 0x04u
-#define ___GFP_MOVABLE 0x08u
-#define ___GFP_RECLAIMABLE 0x10u
-#define ___GFP_HIGH 0x20u
-#define ___GFP_IO 0x40u
-#define ___GFP_FS 0x80u
-#define ___GFP_ZERO 0x100u
-#define ___GFP_ATOMIC 0x200u
-#define ___GFP_DIRECT_RECLAIM 0x400u
-#define ___GFP_KSWAPD_RECLAIM 0x800u
-#define ___GFP_WRITE 0x1000u
-#define ___GFP_NOWARN 0x2000u
-#define ___GFP_RETRY_MAYFAIL 0x4000u
-#define ___GFP_NOFAIL 0x8000u
-#define ___GFP_NORETRY 0x10000u
-#define ___GFP_MEMALLOC 0x20000u
-#define ___GFP_COMP 0x40000u
-#define ___GFP_NOMEMALLOC 0x80000u
-#define ___GFP_HARDWALL 0x100000u
-#define ___GFP_THISNODE 0x200000u
-#define ___GFP_ACCOUNT 0x400000u
-#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x800000u
-#else
-#define ___GFP_NOLOCKDEP 0
-#endif
-/* If the above are modified, __GFP_BITS_SHIFT may need updating */
-
-/*
- * Physical address zone modifiers (see linux/mmzone.h - low four bits)
- *
- * Do not put any conditional on these. If necessary modify the definitions
- * without the underscores and use them consistently. The definitions here may
- * be used in bit comparisons.
- */
-#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
-#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
-#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
-#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
-/**
- * DOC: Page mobility and placement hints
- *
- * Page mobility and placement hints
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * These flags provide hints about how mobile the page is. Pages with similar
- * mobility are placed within the same pageblocks to minimise problems due
- * to external fragmentation.
- *
- * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
- * moved by page migration during memory compaction or can be reclaimed.
- *
- * %__GFP_RECLAIMABLE is used for slab allocations that specify
- * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
- *
- * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
- * these pages will be spread between local zones to avoid all the dirty
- * pages being in one zone (fair zone allocation policy).
- *
- * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
- *
- * %__GFP_THISNODE forces the allocation to be satisfied from the requested
- * node with no fallbacks or placement policy enforcements.
- *
- * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
- */
-#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
-#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
-#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
-#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
-#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
-
-/**
- * DOC: Watermark modifiers
- *
- * Watermark modifiers -- controls access to emergency reserves
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * %__GFP_HIGH indicates that the caller is high-priority and that granting
- * the request is necessary before the system can make forward progress.
- * For example, creating an IO context to clean pages.
- *
- * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
- * high priority. Users are typically interrupt handlers. This may be
- * used in conjunction with %__GFP_HIGH
- *
- * %__GFP_MEMALLOC allows access to all memory. This should only be used when
- * the caller guarantees the allocation will allow more memory to be freed
- * very shortly e.g. process exiting or swapping. Users either should
- * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
- * Users of this flag have to be extremely careful to not deplete the reserve
- * completely and implement a throttling mechanism which controls the
- * consumption of the reserve based on the amount of freed memory.
- * Usage of a pre-allocated pool (e.g. mempool) should be always considered
- * before using this flag.
- *
- * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
- * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
- */
-#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
-#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
-#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
-
-/**
- * DOC: Reclaim modifiers
- *
- * Reclaim modifiers
- * ~~~~~~~~~~~~~~~~~
- * Please note that all the following flags are only applicable to sleepable
- * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
- *
- * %__GFP_IO can start physical IO.
- *
- * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
- * allocator recursing into the filesystem which might already be holding
- * locks.
- *
- * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
- * This flag can be cleared to avoid unnecessary delays when a fallback
- * option is available.
- *
- * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
- * the low watermark is reached and have it reclaim pages until the high
- * watermark is reached. A caller may wish to clear this flag when fallback
- * options are available and the reclaim is likely to disrupt the system. The
- * canonical example is THP allocation where a fallback is cheap but
- * reclaim/compaction may cause indirect stalls.
- *
- * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
- *
- * The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
- * !costly allocations are too essential to fail so they are implicitly
- * non-failing by default (with some exceptions like OOM victims might fail so
- * the caller still has to check for failures) while costly requests try to be
- * not disruptive and back off even without invoking the OOM killer.
- * The following three modifiers might be used to override some of these
- * implicit rules
- *
- * %__GFP_NORETRY: The VM implementation will try only very lightweight
- * memory direct reclaim to get some memory under memory pressure (thus
- * it can sleep). It will avoid disruptive actions like OOM killer. The
- * caller must handle the failure which is quite likely to happen under
- * heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
- *
- * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
- * procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
- * compaction (which removes fragmentation) and page-out.
- * There is still a definite limit to the number of retries, but it is
- * a larger limit than with %__GFP_NORETRY.
- * Allocations with this flag may fail, but only when there is
- * genuinely little unused memory. While these allocations do not
- * directly trigger the OOM killer, their failure indicates that
- * the system is likely to need to use the OOM killer soon. The
- * caller must handle failure, but can reasonably do so by failing
- * a higher-level request, or completing it only in a much less
- * efficient manner.
- * If the allocation does fail, and the caller is in a position to
- * free some non-essential memory, doing so could benefit the system
- * as a whole.
- *
- * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. The allocation could block
- * indefinitely but will never return with failure. Testing for
- * failure is pointless.
- * New users should be evaluated carefully (and the flag should be
- * used only when there is no reasonable failure policy) but it is
- * definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
- */
-#define __GFP_IO ((__force gfp_t)___GFP_IO)
-#define __GFP_FS ((__force gfp_t)___GFP_FS)
-#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
-#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
-#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
-#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
-#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
-#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
-
-/**
- * DOC: Action modifiers
- *
- * Action modifiers
- * ~~~~~~~~~~~~~~~~
- *
- * %__GFP_NOWARN suppresses allocation failure reports.
- *
- * %__GFP_COMP address compound page metadata.
- *
- * %__GFP_ZERO returns a zeroed page on success.
- */
-#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
-#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
-#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
-
-/* Disable lockdep for GFP context tracking */
-#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
-
-/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
-#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-/**
- * DOC: Useful GFP flag combinations
- *
- * Useful GFP flag combinations
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Useful GFP flag combinations that are commonly used. It is recommended
- * that subsystems start with one of these combinations and then set/clear
- * %__GFP_FOO flags as necessary.
- *
- * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
- * watermark is applied to allow access to "atomic reserves"
- *
- * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
- * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
- *
- * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
- * accounted to kmemcg.
- *
- * %GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
- *
- * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
- * that do not require the starting of any physical IO.
- * Please try to avoid using this flag directly and instead use
- * memalloc_noio_{save,restore} to mark the whole scope which cannot
- * perform any IO with a short explanation why. All allocation requests
- * will inherit GFP_NOIO implicitly.
- *
- * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
- * Please try to avoid using this flag directly and instead use
- * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
- * recurse into the FS layer with a short explanation why. All allocation
- * requests will inherit GFP_NOFS implicitly.
- *
- * %GFP_USER is for userspace allocations that also need to be directly
- * accessibly by the kernel or hardware. It is typically used by hardware
- * for buffers that are mapped to userspace (e.g. graphics) that hardware
- * still must DMA to. cpuset limits are enforced for these allocations.
- *
- * %GFP_DMA exists for historical reasons and should be avoided where possible.
- * The flags indicates that the caller requires that the lowest zone be
- * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
- * it would require careful auditing as some users really require it and
- * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
- * lowest zone as a type of emergency reserve.
- *
- * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
- * address.
- *
- * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
- * do not need to be directly accessible by the kernel but that cannot
- * move once in use. An example may be a hardware allocation that maps
- * data directly into userspace but has no addressing limitations.
- *
- * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
- * need direct access to but can use kmap() when access is required. They
- * are expected to be movable via page reclaim or page migration. Typically,
- * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
- *
- * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
- * are compound allocations that will generally fail quickly if memory is not
- * available and will not wake kswapd/kcompactd on failure. The _LIGHT
- * version does not attempt reclaim/compaction at all and is by default used
- * in page fault path, while the non-light is used by khugepaged.
- */
-#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
-#define GFP_NOIO (__GFP_RECLAIM)
-#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
-#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
-#define GFP_DMA __GFP_DMA
-#define GFP_DMA32 __GFP_DMA32
-#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
-#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
-#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+struct mempolicy;
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -317,12 +19,15 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
+ BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
+ BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
+ GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
- return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+ return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
@@ -332,29 +37,6 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
-/**
- * gfpflags_normal_context - is gfp_flags a normal sleepable context?
- * @gfp_flags: gfp_flags to test
- *
- * Test whether @gfp_flags indicates that the allocation is from the
- * %current context and allowed to sleep.
- *
- * An allocation being allowed to block doesn't mean it owns the %current
- * context. When direct reclaim path tries to allocate memory, the
- * allocation context is nested inside whatever %current was doing at the
- * time of the original allocation. The nested allocation may be allowed
- * to block but modifying anything %current owns can corrupt the outer
- * context's expectations.
- *
- * %true result from this function indicates that the allocation context
- * can sleep and use anything that's associated with %current.
- */
-static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
-{
- return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
- __GFP_DIRECT_RECLAIM;
-}
-
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
@@ -474,12 +156,12 @@ static inline int gfp_zonelist(gfp_t flags)
/*
* We get the zone list from the current node and the gfp_mask.
- * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
* There are two zonelists per node, one for all zones with memory and
* one containing just zones from the node the zonelist belongs to.
*
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
+ * For the case of non-NUMA systems the NODE_DATA() gets optimized to
+ * &contig_page_data at compile-time.
*/
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
{
@@ -492,21 +174,55 @@ static inline void arch_free_page(struct page *page, int order) { }
#ifndef HAVE_ARCH_ALLOC_PAGE
static inline void arch_alloc_page(struct page *page, int order) { }
#endif
-#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
-static inline int arch_make_page_accessible(struct page *page)
+
+struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
+struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
+
+unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
+ nodemask_t *nodemask, int nr_pages,
+ struct list_head *page_list,
+ struct page **page_array);
+
+unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
+ unsigned long nr_pages,
+ struct page **page_array);
+
+/* Bulk allocate order-0 pages */
+static inline unsigned long
+alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
{
- return 0;
+ return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL);
}
-#endif
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
- nodemask_t *nodemask);
+static inline unsigned long
+alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array)
+{
+ return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
+}
-static inline struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
+static inline unsigned long
+alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array)
+{
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
+}
+
+static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{
- return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
+ gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
+
+ if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
+ return;
+
+ if (node_online(this_node))
+ return;
+
+ pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
+ dump_stack();
}
/*
@@ -517,9 +233,18 @@ static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp_mask);
+
+ return __alloc_pages(gfp_mask, order, nid, NULL);
+}
+
+static inline
+struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
+{
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ warn_if_node_offline(nid, gfp);
- return __alloc_pages(gfp_mask, order, nid);
+ return __folio_alloc(gfp, order, nid, NULL);
}
/*
@@ -537,38 +262,44 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
}
#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
-
-static inline struct page *
-alloc_pages(gfp_t gfp_mask, unsigned int order)
+struct page *alloc_pages(gfp_t gfp, unsigned int order);
+struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid);
+struct folio *folio_alloc(gfp_t gfp, unsigned int order);
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr, bool hugepage);
+#else
+static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
{
- return alloc_pages_current(gfp_mask, order);
+ return alloc_pages_node(numa_node_id(), gfp_mask, order);
}
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
- struct vm_area_struct *vma, unsigned long addr,
- int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
-#else
-#define alloc_pages(gfp_mask, order) \
- alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
- alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages(gfp_mask, order)
+static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid)
+{
+ return alloc_pages(gfp, order);
+}
+static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
+{
+ return __folio_alloc_node(gfp, order, numa_node_id());
+}
+#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
+ folio_alloc(gfp, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
-#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
+static inline struct page *alloc_page_vma(gfp_t gfp,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false);
+
+ return &folio->page;
+}
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
void free_pages_exact(void *virt, size_t size);
-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
+__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
@@ -578,24 +309,40 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-extern void free_unref_page(struct page *page);
-extern void free_unref_page_list(struct list_head *list);
struct page_frag_cache;
+void page_frag_cache_drain(struct page_frag_cache *nc);
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
+void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
+ gfp_t gfp_mask, unsigned int align_mask);
+
+static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
+}
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
-void page_alloc_init(void);
+void page_alloc_init_cpuhp(void);
+int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
void page_alloc_init_late(void);
+void setup_pcp_cacheinfo(unsigned int cpu);
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
@@ -609,17 +356,21 @@ extern gfp_t gfp_allowed_mask;
/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
-extern void pm_restrict_gfp_mask(void);
-extern void pm_restore_gfp_mask(void);
+static inline bool gfp_has_io_fs(gfp_t gfp)
+{
+ return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
+}
-#ifdef CONFIG_PM_SLEEP
-extern bool pm_suspended_storage(void);
-#else
-static inline bool pm_suspended_storage(void)
+/*
+ * Check if the gfp flags allow compaction - GFP_NOIO is a really
+ * tricky context because the migration might require IO.
+ */
+static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
{
- return false;
+ return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
}
-#endif /* CONFIG_PM_SLEEP */
+
+extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
/* The below functions must be run on a range from a single zone. */
@@ -628,11 +379,6 @@ extern int alloc_contig_range(unsigned long start, unsigned long end,
extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
#endif
-void free_contig_range(unsigned long pfn, unsigned int nr_pages);
-
-#ifdef CONFIG_CMA
-/* CMA stuff */
-extern void init_cma_reserved_pageblock(struct page *page);
-#endif
+void free_contig_range(unsigned long pfn, unsigned long nr_pages);
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gfp_api.h b/include/linux/gfp_api.h
new file mode 100644
index 000000000000..5a05a2764a86
--- /dev/null
+++ b/include/linux/gfp_api.h
@@ -0,0 +1 @@
+#include <linux/gfp.h>
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
new file mode 100644
index 000000000000..868c8fb1bbc1
--- /dev/null
+++ b/include/linux/gfp_types.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GFP_TYPES_H
+#define __LINUX_GFP_TYPES_H
+
+/* The typedef is in types.h but we want the documentation here */
+#if 0
+/**
+ * typedef gfp_t - Memory allocation flags.
+ *
+ * GFP flags are commonly used throughout Linux to indicate how memory
+ * should be allocated. The GFP acronym stands for get_free_pages(),
+ * the underlying memory allocation function. Not every GFP flag is
+ * supported by every function which may allocate memory. Most users
+ * will want to use a plain ``GFP_KERNEL``.
+ */
+typedef unsigned int __bitwise gfp_t;
+#endif
+
+/*
+ * In case of changes, please don't forget to update
+ * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
+ */
+
+enum {
+ ___GFP_DMA_BIT,
+ ___GFP_HIGHMEM_BIT,
+ ___GFP_DMA32_BIT,
+ ___GFP_MOVABLE_BIT,
+ ___GFP_RECLAIMABLE_BIT,
+ ___GFP_HIGH_BIT,
+ ___GFP_IO_BIT,
+ ___GFP_FS_BIT,
+ ___GFP_ZERO_BIT,
+ ___GFP_UNUSED_BIT, /* 0x200u unused */
+ ___GFP_DIRECT_RECLAIM_BIT,
+ ___GFP_KSWAPD_RECLAIM_BIT,
+ ___GFP_WRITE_BIT,
+ ___GFP_NOWARN_BIT,
+ ___GFP_RETRY_MAYFAIL_BIT,
+ ___GFP_NOFAIL_BIT,
+ ___GFP_NORETRY_BIT,
+ ___GFP_MEMALLOC_BIT,
+ ___GFP_COMP_BIT,
+ ___GFP_NOMEMALLOC_BIT,
+ ___GFP_HARDWALL_BIT,
+ ___GFP_THISNODE_BIT,
+ ___GFP_ACCOUNT_BIT,
+ ___GFP_ZEROTAGS_BIT,
+#ifdef CONFIG_KASAN_HW_TAGS
+ ___GFP_SKIP_ZERO_BIT,
+ ___GFP_SKIP_KASAN_BIT,
+#endif
+#ifdef CONFIG_LOCKDEP
+ ___GFP_NOLOCKDEP_BIT,
+#endif
+ ___GFP_LAST_BIT
+};
+
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA BIT(___GFP_DMA_BIT)
+#define ___GFP_HIGHMEM BIT(___GFP_HIGHMEM_BIT)
+#define ___GFP_DMA32 BIT(___GFP_DMA32_BIT)
+#define ___GFP_MOVABLE BIT(___GFP_MOVABLE_BIT)
+#define ___GFP_RECLAIMABLE BIT(___GFP_RECLAIMABLE_BIT)
+#define ___GFP_HIGH BIT(___GFP_HIGH_BIT)
+#define ___GFP_IO BIT(___GFP_IO_BIT)
+#define ___GFP_FS BIT(___GFP_FS_BIT)
+#define ___GFP_ZERO BIT(___GFP_ZERO_BIT)
+/* 0x200u unused */
+#define ___GFP_DIRECT_RECLAIM BIT(___GFP_DIRECT_RECLAIM_BIT)
+#define ___GFP_KSWAPD_RECLAIM BIT(___GFP_KSWAPD_RECLAIM_BIT)
+#define ___GFP_WRITE BIT(___GFP_WRITE_BIT)
+#define ___GFP_NOWARN BIT(___GFP_NOWARN_BIT)
+#define ___GFP_RETRY_MAYFAIL BIT(___GFP_RETRY_MAYFAIL_BIT)
+#define ___GFP_NOFAIL BIT(___GFP_NOFAIL_BIT)
+#define ___GFP_NORETRY BIT(___GFP_NORETRY_BIT)
+#define ___GFP_MEMALLOC BIT(___GFP_MEMALLOC_BIT)
+#define ___GFP_COMP BIT(___GFP_COMP_BIT)
+#define ___GFP_NOMEMALLOC BIT(___GFP_NOMEMALLOC_BIT)
+#define ___GFP_HARDWALL BIT(___GFP_HARDWALL_BIT)
+#define ___GFP_THISNODE BIT(___GFP_THISNODE_BIT)
+#define ___GFP_ACCOUNT BIT(___GFP_ACCOUNT_BIT)
+#define ___GFP_ZEROTAGS BIT(___GFP_ZEROTAGS_BIT)
+#ifdef CONFIG_KASAN_HW_TAGS
+#define ___GFP_SKIP_ZERO BIT(___GFP_SKIP_ZERO_BIT)
+#define ___GFP_SKIP_KASAN BIT(___GFP_SKIP_KASAN_BIT)
+#else
+#define ___GFP_SKIP_ZERO 0
+#define ___GFP_SKIP_KASAN 0
+#endif
+#ifdef CONFIG_LOCKDEP
+#define ___GFP_NOLOCKDEP BIT(___GFP_NOLOCKDEP_BIT)
+#else
+#define ___GFP_NOLOCKDEP 0
+#endif
+
+/*
+ * Physical address zone modifiers (see linux/mmzone.h - low four bits)
+ *
+ * Do not put any conditional on these. If necessary modify the definitions
+ * without the underscores and use them consistently. The definitions here may
+ * be used in bit comparisons.
+ */
+#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+
+/**
+ * DOC: Page mobility and placement hints
+ *
+ * Page mobility and placement hints
+ * ---------------------------------
+ *
+ * These flags provide hints about how mobile the page is. Pages with similar
+ * mobility are placed within the same pageblocks to minimise problems due
+ * to external fragmentation.
+ *
+ * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
+ *
+ * %__GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ *
+ * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
+ *
+ * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
+ *
+ * %__GFP_THISNODE forces the allocation to be satisfied from the requested
+ * node with no fallbacks or placement policy enforcements.
+ *
+ * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
+ */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
+#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
+
+/**
+ * DOC: Watermark modifiers
+ *
+ * Watermark modifiers -- controls access to emergency reserves
+ * ------------------------------------------------------------
+ *
+ * %__GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example creating an IO context to clean pages and requests
+ * from atomic context.
+ *
+ * %__GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * Users of this flag have to be extremely careful to not deplete the reserve
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory.
+ * Usage of a pre-allocated pool (e.g. mempool) should be always considered
+ * before using this flag.
+ *
+ * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
+ */
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
+
+/**
+ * DOC: Reclaim modifiers
+ *
+ * Reclaim modifiers
+ * -----------------
+ * Please note that all the following flags are only applicable to sleepable
+ * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
+ *
+ * %__GFP_IO can start physical IO.
+ *
+ * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
+ *
+ * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
+ *
+ * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
+ *
+ * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
+ *
+ * The default allocator behavior depends on the request size. We have a concept
+ * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * !costly allocations are too essential to fail so they are implicitly
+ * non-failing by default (with some exceptions like OOM victims might fail so
+ * the caller still has to check for failures) while costly requests try to be
+ * not disruptive and back off even without invoking the OOM killer.
+ * The following three modifiers might be used to override some of these
+ * implicit rules.
+ *
+ * %__GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput.
+ *
+ * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made elsewhere. It can wait for other
+ * tasks to attempt high-level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with %__GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
+ *
+ * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Using this flag for costly allocations is _highly_ discouraged.
+ */
+#define __GFP_IO ((__force gfp_t)___GFP_IO)
+#define __GFP_FS ((__force gfp_t)___GFP_FS)
+#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
+#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
+
+/**
+ * DOC: Action modifiers
+ *
+ * Action modifiers
+ * ----------------
+ *
+ * %__GFP_NOWARN suppresses allocation failure reports.
+ *
+ * %__GFP_COMP address compound page metadata.
+ *
+ * %__GFP_ZERO returns a zeroed page on success.
+ *
+ * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
+ * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
+ * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
+ * memory tags at the same time as zeroing memory has minimal additional
+ * performance impact.
+ *
+ * %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation.
+ * Used for userspace and vmalloc pages; the latter are unpoisoned by
+ * kasan_unpoison_vmalloc instead. For userspace pages, results in
+ * poisoning being skipped as well, see should_skip_kasan_poison for
+ * details. Only effective in HW_TAGS mode.
+ */
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
+#define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN)
+
+/* Disable lockdep for GFP context tracking */
+#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
+
+/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT ___GFP_LAST_BIT
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+/**
+ * DOC: Useful GFP flag combinations
+ *
+ * Useful GFP flag combinations
+ * ----------------------------
+ *
+ * Useful GFP flag combinations that are commonly used. It is recommended
+ * that subsystems start with one of these combinations and then set/clear
+ * %__GFP_FOO flags as necessary.
+ *
+ * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves".
+ * The current implementation doesn't support NMI and few other strict
+ * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ *
+ * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
+ * accounted to kmemcg.
+ *
+ * %GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback. It is very
+ * likely to fail to allocate memory, even for very small allocations.
+ *
+ * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
+ *
+ * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
+ *
+ * %GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * %GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
+ * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
+ * because the DMA32 kmalloc cache array is not implemented.
+ * (Reason: there is no such user in kernel).
+ *
+ * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
+ *
+ * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
+ * are compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
+ */
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN)
+#define GFP_NOIO (__GFP_RECLAIM)
+#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
+#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_DMA __GFP_DMA
+#define GFP_DMA32 __GFP_DMA32
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | __GFP_SKIP_KASAN)
+#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
+#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+
+#endif /* __LINUX_GFP_TYPES_H */
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
index 265a099cd3b8..bcc17f95b906 100644
--- a/include/linux/goldfish.h
+++ b/include/linux/goldfish.h
@@ -8,14 +8,21 @@
/* Helpers for Goldfish virtual platform */
+#ifndef gf_ioread32
+#define gf_ioread32 ioread32
+#endif
+#ifndef gf_iowrite32
+#define gf_iowrite32 iowrite32
+#endif
+
static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
void __iomem *porth)
{
const unsigned long addr = (unsigned long)ptr;
- writel(lower_32_bits(addr), portl);
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_64BIT
- writel(upper_32_bits(addr), porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
@@ -23,9 +30,9 @@ static inline void gf_write_dma_addr(const dma_addr_t addr,
void __iomem *portl,
void __iomem *porth)
{
- writel(lower_32_bits(addr), portl);
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(upper_32_bits(addr), porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 008ad3ee56b7..7ecc25c543ce 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -12,7 +12,9 @@
#ifndef __LINUX_GPIO_H
#define __LINUX_GPIO_H
-#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device;
/* see Documentation/driver-api/gpio/legacy.rst */
@@ -30,17 +32,6 @@
/* Gpio pin is active-low */
#define GPIOF_ACTIVE_LOW (1 << 2)
-/* Gpio pin is open drain */
-#define GPIOF_OPEN_DRAIN (1 << 3)
-
-/* Gpio pin is open source */
-#define GPIOF_OPEN_SOURCE (1 << 4)
-
-#define GPIOF_EXPORT (1 << 5)
-#define GPIOF_EXPORT_CHANGEABLE (1 << 6)
-#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
-#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
-
/**
* struct gpio - a structure describing a GPIO with configuration
* @gpio: the GPIO number
@@ -55,56 +46,89 @@ struct gpio {
#ifdef CONFIG_GPIOLIB
-#ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
-#include <asm/gpio.h>
-#else
-
-#include <asm-generic/gpio.h>
+#include <linux/gpio/consumer.h>
-static inline int gpio_get_value(unsigned int gpio)
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request(). Only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+static inline bool gpio_is_valid(int number)
{
- return __gpio_get_value(gpio);
+ /* only non-negative numbers are valid */
+ return number >= 0;
}
-static inline void gpio_set_value(unsigned int gpio, int value)
+/*
+ * Platforms may implement their GPIO interface with library code,
+ * at a small performance cost for non-inlined operations and some
+ * extra memory (for code and for per-GPIO table entries).
+ */
+
+/*
+ * At the end we want all GPIOs to be dynamically allocated from 0.
+ * However, some legacy drivers still perform fixed allocation.
+ * Until they are all fixed, leave 0-512 space for them.
+ */
+#define GPIO_DYNAMIC_BASE 512
+
+/* Always use the library code for GPIO management calls,
+ * or when sleeping may be involved.
+ */
+int gpio_request(unsigned gpio, const char *label);
+void gpio_free(unsigned gpio);
+
+static inline int gpio_direction_input(unsigned gpio)
+{
+ return gpiod_direction_input(gpio_to_desc(gpio));
+}
+static inline int gpio_direction_output(unsigned gpio, int value)
{
- __gpio_set_value(gpio, value);
+ return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
}
-static inline int gpio_cansleep(unsigned int gpio)
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
+}
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
{
- return __gpio_cansleep(gpio);
+ return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
}
-static inline int gpio_to_irq(unsigned int gpio)
+static inline int gpio_get_value(unsigned gpio)
{
- return __gpio_to_irq(gpio);
+ return gpiod_get_raw_value(gpio_to_desc(gpio));
+}
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ return gpiod_set_raw_value(gpio_to_desc(gpio), value);
}
-static inline int irq_to_gpio(unsigned int irq)
+static inline int gpio_to_irq(unsigned gpio)
{
- return -EINVAL;
+ return gpiod_to_irq(gpio_to_desc(gpio));
}
-#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
+int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
+int gpio_request_array(const struct gpio *array, size_t num);
+void gpio_free_array(const struct gpio *array, size_t num);
/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
-struct device;
-
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label);
-void devm_gpio_free(struct device *dev, unsigned int gpio);
#else /* ! CONFIG_GPIOLIB */
#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/bug.h>
-struct device;
-struct gpio_chip;
+#include <asm/bug.h>
+#include <asm/errno.h>
static inline bool gpio_is_valid(int number)
{
@@ -153,11 +177,6 @@ static inline int gpio_direction_output(unsigned gpio, int value)
return -ENOSYS;
}
-static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
-{
- return -ENOSYS;
-}
-
static inline int gpio_get_value(unsigned gpio)
{
/* GPIO can never have been requested or set as {in,out}put */
@@ -171,13 +190,6 @@ static inline void gpio_set_value(unsigned gpio, int value)
WARN_ON(1);
}
-static inline int gpio_cansleep(unsigned gpio)
-{
- /* GPIO can never have been requested or set as {in,out}put */
- WARN_ON(1);
- return 0;
-}
-
static inline int gpio_get_value_cansleep(unsigned gpio)
{
/* GPIO can never have been requested or set as {in,out}put */
@@ -191,27 +203,6 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
WARN_ON(1);
}
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
- /* GPIO can never have been requested or set as {in,out}put */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio)
-{
- /* GPIO can never have been exported */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
- /* GPIO can never have been exported */
- WARN_ON(1);
-}
-
static inline int gpio_to_irq(unsigned gpio)
{
/* GPIO can never have been requested or set as input */
@@ -219,13 +210,6 @@ static inline int gpio_to_irq(unsigned gpio)
return -EINVAL;
}
-static inline int irq_to_gpio(unsigned irq)
-{
- /* irq can never have been returned from gpio_to_irq() */
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline int devm_gpio_request(struct device *dev, unsigned gpio,
const char *label)
{
@@ -240,11 +224,6 @@ static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
return -EINVAL;
}
-static inline void devm_gpio_free(struct device *dev, unsigned int gpio)
-{
- WARN_ON(1);
-}
-
#endif /* ! CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/aspeed.h b/include/linux/gpio/aspeed.h
index 1bfb3cdc86d0..9a547e66c8c4 100644
--- a/include/linux/gpio/aspeed.h
+++ b/include/linux/gpio/aspeed.h
@@ -1,6 +1,10 @@
#ifndef __GPIO_ASPEED_H
#define __GPIO_ASPEED_H
+#include <linux/types.h>
+
+struct gpio_desc;
+
struct aspeed_gpio_copro_ops {
int (*request_access)(void *data);
int (*release_access)(void *data);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 901aab89d025..db2dfbae8edb 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -3,32 +3,22 @@
#define __LINUX_GPIO_CONSUMER_H
#include <linux/bits.h>
-#include <linux/bug.h>
-#include <linux/compiler_types.h>
-#include <linux/err.h>
+#include <linux/types.h>
+struct acpi_device;
struct device;
+struct fwnode_handle;
-/**
- * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
- * preferable to the old integer-based handles.
- *
- * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
- * until the GPIO is released.
- */
-struct gpio_desc;
-
-/**
- * Opaque descriptor for a structure of GPIO array attributes. This structure
- * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be
- * passed back to get/set array functions in order to activate fast processing
- * path if applicable.
- */
struct gpio_array;
+struct gpio_desc;
/**
- * Struct containing an array of descriptors that can be obtained using
- * gpiod_get_array().
+ * struct gpio_descs - Struct containing an array of descriptors that can be
+ * obtained using gpiod_get_array()
+ *
+ * @info: Pointer to the opaque gpio_array structure
+ * @ndescs: Number of held descriptors
+ * @desc: Array of pointers to GPIO descriptors
*/
struct gpio_descs {
struct gpio_array *info;
@@ -43,8 +33,16 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
- * Optional flags that can be passed to one of gpiod_* to configure direction
- * and output value. These values cannot be OR'd.
+ * enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to
+ * configure direction and output value. These values
+ * cannot be OR'd.
+ *
+ * @GPIOD_ASIS: Don't change anything
+ * @GPIOD_IN: Set lines to input mode
+ * @GPIOD_OUT_LOW: Set lines to output and drive them low
+ * @GPIOD_OUT_HIGH: Set lines to output and drive them high
+ * @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low
+ * @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high
*/
enum gpiod_flags {
GPIOD_ASIS = 0,
@@ -112,6 +110,8 @@ int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
int gpiod_direction_output(struct gpio_desc *desc, int value);
int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
@@ -158,8 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap);
int gpiod_set_config(struct gpio_desc *desc, unsigned long config);
-int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
-int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
+int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce);
void gpiod_toggle_active_low(struct gpio_desc *desc);
int gpiod_is_active_low(const struct gpio_desc *desc);
@@ -172,13 +171,6 @@ int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
-/* Child properties interface */
-struct fwnode_handle;
-
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
const char *con_id, int index,
enum gpiod_flags flags,
@@ -191,8 +183,11 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
#else /* CONFIG_GPIOLIB */
+#include <linux/err.h>
#include <linux/kernel.h>
+#include <asm/bug.h>
+
static inline int gpiod_count(struct device *dev, const char *con_id)
{
return 0;
@@ -353,8 +348,18 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
WARN_ON(desc);
return -ENOSYS;
}
-
-
+static inline int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ WARN_ON(desc);
+ return -ENOSYS;
+}
+static inline int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ WARN_ON(desc);
+ return -ENOSYS;
+}
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -481,14 +486,7 @@ static inline int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
return -ENOSYS;
}
-static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
-{
- /* GPIO can never have been requested */
- WARN_ON(desc);
- return -ENOSYS;
-}
-
-static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce)
{
/* GPIO can never have been requested */
WARN_ON(desc);
@@ -541,18 +539,6 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
return -EINVAL;
}
-/* Child properties interface */
-struct fwnode_handle;
-
-static inline
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
static inline
struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
const char *con_id, int index,
@@ -585,75 +571,6 @@ struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
flags, label);
}
-static inline
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
-{
- return devm_fwnode_gpiod_get_index(dev, child, con_id, index,
- flags, label);
-}
-
-static inline
-struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
- const char *con_id,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
-{
- return devm_fwnode_gpiod_get_index(dev, child, con_id, 0, flags, label);
-}
-
-#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO)
-struct device_node;
-
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
-
-#else /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */
-
-struct device_node;
-
-static inline
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-#endif /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */
-
-#ifdef CONFIG_GPIOLIB
-struct device_node;
-
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
-
-#else /* CONFIG_GPIOLIB */
-
-struct device_node;
-
-static inline
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-#endif /* CONFIG_GPIOLIB */
-
struct acpi_gpio_params {
unsigned int crs_entry_index;
unsigned int line_index;
@@ -674,25 +591,24 @@ struct acpi_gpio_mapping {
* get GpioIo type explicitly, this quirk may be used.
*/
#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1)
+/* Use given pin as an absolute GPIO number in the system */
+#define ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER BIT(2)
unsigned int quirks;
};
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
-struct acpi_device;
-
int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios);
void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
int devm_acpi_dev_add_driver_gpios(struct device *dev,
const struct acpi_gpio_mapping *gpios);
-void devm_acpi_dev_remove_driver_gpios(struct device *dev);
#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
-struct acpi_device;
+#include <linux/err.h>
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
@@ -706,7 +622,6 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
{
return -ENXIO;
}
-static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
@@ -720,6 +635,8 @@ void gpiod_unexport(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+#include <asm/errno.h>
+
static inline int gpiod_export(struct gpio_desc *desc,
bool direction_may_change)
{
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index d1cef5c2715c..dc75f802e284 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -2,25 +2,44 @@
#ifndef __LINUX_GPIO_DRIVER_H
#define __LINUX_GPIO_DRIVER_H
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/irq.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqhandler.h>
#include <linux/lockdep.h>
-#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
-struct gpio_desc;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+#include <asm/msi.h>
+#endif
+
+struct device;
+struct irq_chip;
+struct irq_data;
+struct module;
struct of_phandle_args;
-struct device_node;
+struct pinctrl_dev;
struct seq_file;
+
+struct gpio_chip;
+struct gpio_desc;
struct gpio_device;
-struct module;
-enum gpiod_flags;
+
enum gpio_lookup_flags;
+enum gpiod_flags;
-struct gpio_chip;
+union gpio_irq_fwspec {
+ struct irq_fwspec fwspec;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ msi_alloc_info_t msiinfo;
+#endif
+};
#define GPIO_LINE_DIRECTION_IN 1
#define GPIO_LINE_DIRECTION_OUT 0
@@ -44,13 +63,6 @@ struct gpio_irq_chip {
*/
struct irq_domain *domain;
- /**
- * @domain_ops:
- *
- * Table of interrupt domain operations for this IRQ chip.
- */
- const struct irq_domain_ops *domain_ops;
-
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
* @fwnode:
@@ -102,9 +114,10 @@ struct gpio_irq_chip {
* variant named &gpiochip_populate_parent_fwspec_fourcell is also
* available.
*/
- void *(*populate_parent_alloc_arg)(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type);
+ int (*populate_parent_alloc_arg)(struct gpio_chip *gc,
+ union gpio_irq_fwspec *fwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
/**
* @child_offset_to_irq:
@@ -166,13 +179,26 @@ struct gpio_irq_chip {
*/
irq_flow_handler_t parent_handler;
- /**
- * @parent_handler_data:
- *
- * Data associated, and passed to, the handler for the parent
- * interrupt.
- */
- void *parent_handler_data;
+ union {
+ /**
+ * @parent_handler_data:
+ *
+ * If @per_parent_data is false, @parent_handler_data is a
+ * single pointer used as the data associated with every
+ * parent interrupt.
+ */
+ void *parent_handler_data;
+
+ /**
+ * @parent_handler_data_array:
+ *
+ * If @per_parent_data is true, @parent_handler_data_array is
+ * an array of @num_parents pointers, and is used to associate
+ * different data for each parent. This cannot be NULL if
+ * @per_parent_data is true.
+ */
+ void **parent_handler_data_array;
+ };
/**
* @num_parents:
@@ -204,6 +230,31 @@ struct gpio_irq_chip {
bool threaded;
/**
+ * @per_parent_data:
+ *
+ * True if parent_handler_data_array describes a @num_parents
+ * sized array to be used as parent data.
+ */
+ bool per_parent_data;
+
+ /**
+ * @initialized:
+ *
+ * Flag to track GPIO chip irq member's initialization.
+ * This flag will make sure GPIO chip irq members are not used
+ * before they are initialized.
+ */
+ bool initialized;
+
+ /**
+ * @domain_is_allocated_externally:
+ *
+ * True it the irq_domain was allocated outside of gpiolib, in which
+ * case gpiolib won't free the irq_domain itself.
+ */
+ bool domain_is_allocated_externally;
+
+ /**
* @init_hw: optional routine to initialize hardware before
* an IRQ chip will be added. This is quite useful when
* a particular driver wants to clear IRQ related registers
@@ -227,7 +278,7 @@ struct gpio_irq_chip {
/**
* @valid_mask:
*
- * If not %NULL holds bitmask of GPIOs which are valid to be included
+ * If not %NULL, holds bitmask of GPIOs which are valid to be included
* in IRQ domain of the chip.
*/
unsigned long *valid_mask;
@@ -274,6 +325,7 @@ struct gpio_irq_chip {
* number or the name of the SoC IP-block implementing it.
* @gpiodev: the internal state holder, opaque struct
* @parent: optional parent device providing the GPIOs
+ * @fwnode: optional fwnode providing this controller's properties
* @owner: helps prevent removal of modules exporting active GPIOs
* @request: optional hook for chip-specific activation, such as
* enabling module power and clock; may sleep
@@ -283,10 +335,12 @@ struct gpio_irq_chip {
* (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN),
* or negative error. It is recommended to always implement this
* function, even on input-only or output-only gpio chips.
- * @direction_input: configures signal "offset" as input, or returns error
- * This can be omitted on input-only or output-only gpio chips.
- * @direction_output: configures signal "offset" as output, or returns error
- * This can be omitted on input-only or output-only gpio chips.
+ * @direction_input: configures signal "offset" as input, returns 0 on success
+ * or a negative error number. This can be omitted on input-only or
+ * output-only gpio chips.
+ * @direction_output: configures signal "offset" as output, returns 0 on
+ * success or a negative error number. This can be omitted on input-only
+ * or output-only gpio chips.
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
* @get_multiple: reads values for multiple signals defined by "mask" and
* stores them in "bits", returns 0 on success or negative error
@@ -294,7 +348,7 @@ struct gpio_irq_chip {
* @set_multiple: assigns output values for multiple signals defined by "mask"
* @set_config: optional hook for all kinds of settings. Uses the same
* packed config format as generic pinconf.
- * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
+ * @to_irq: optional hook supporting non-static gpiod_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
@@ -304,6 +358,10 @@ struct gpio_irq_chip {
* @add_pin_ranges: optional routine to initialize pin ranges, to be used when
* requires special mapping of the pins that provides GPIO functionality.
* It is called after adding GPIO chip and before adding IRQ chip.
+ * @en_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * enable hardware timestamp.
+ * @dis_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * disable hardware timestamp.
* @base: identifies the first GPIO number handled by this chip;
* or, if negative during registration, requests dynamic ID allocation.
* DEPRECATION: providing anything non-negative and nailing the base
@@ -312,6 +370,9 @@ struct gpio_irq_chip {
* get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
+ * @offset: when multiple gpio chips belong to the same device this
+ * can be used as offset within the device so friendly names can
+ * be properly assigned.
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
@@ -346,7 +407,7 @@ struct gpio_irq_chip {
* output.
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
- * they can all be accessed through a common programing interface.
+ * they can all be accessed through a common programming interface.
* Example sources would be SOC controllers, FPGAs, multifunction
* chips, dedicated GPIO expanders, and so on.
*
@@ -359,6 +420,7 @@ struct gpio_chip {
const char *label;
struct gpio_device *gpiodev;
struct device *parent;
+ struct fwnode_handle *fwnode;
struct module *owner;
int (*request)(struct gpio_chip *gc,
@@ -396,8 +458,15 @@ struct gpio_chip {
int (*add_pin_ranges)(struct gpio_chip *gc);
+ int (*en_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
+ int (*dis_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
int base;
u16 ngpio;
+ u16 offset;
const char *const *names;
bool can_sleep;
@@ -412,7 +481,7 @@ struct gpio_chip {
void __iomem *reg_dir_in;
bool bgpio_dir_unreadable;
int bgpio_bits;
- spinlock_t bgpio_lock;
+ raw_spinlock_t bgpio_lock;
unsigned long bgpio_data;
unsigned long bgpio_dir;
#endif /* CONFIG_GPIO_GENERIC */
@@ -435,25 +504,18 @@ struct gpio_chip {
/**
* @valid_mask:
*
- * If not %NULL holds bitmask of GPIOs which are valid to be used
+ * If not %NULL, holds bitmask of GPIOs which are valid to be used
* from the chip.
*/
unsigned long *valid_mask;
#if defined(CONFIG_OF_GPIO)
/*
- * If CONFIG_OF is enabled, then all GPIO controllers described in the
- * device tree automatically may have an OF translation
+ * If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in
+ * the device tree automatically may have an OF translation
*/
/**
- * @of_node:
- *
- * Pointer to a device tree node representing this GPIO controller.
- */
- struct device_node *of_node;
-
- /**
* @of_gpio_n_cells:
*
* Number of cells used to form the GPIO specifier.
@@ -471,29 +533,64 @@ struct gpio_chip {
#endif /* CONFIG_OF_GPIO */
};
-extern const char *gpiochip_is_requested(struct gpio_chip *gc,
- unsigned int offset);
+char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset);
+
+
+struct _gpiochip_for_each_data {
+ const char **label;
+ unsigned int *i;
+};
+
+DEFINE_CLASS(_gpiochip_for_each_data,
+ struct _gpiochip_for_each_data,
+ if (*_T.label) kfree(*_T.label),
+ ({
+ struct _gpiochip_for_each_data _data = { label, i };
+ *_data.i = 0;
+ _data;
+ }),
+ const char **label, int *i)
+
+/**
+ * for_each_hwgpio - Iterates over all GPIOs for given chip.
+ * @_chip: Chip to iterate over.
+ * @_i: Loop counter.
+ * @_label: Place to store the address of the label if the GPIO is requested.
+ * Set to NULL for unused GPIOs.
+ */
+#define for_each_hwgpio(_chip, _i, _label) \
+ for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \
+ *_data.i < _chip->ngpio; \
+ (*_data.i)++, kfree(*(_data.label)), *_data.label = NULL) \
+ if (IS_ERR(*_data.label = \
+ gpiochip_dup_line_label(_chip, *_data.i))) {} \
+ else
/**
* for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
- * @chip: the chip to query
- * @i: loop variable
- * @base: first GPIO in the range
- * @size: amount of GPIOs to check starting from @base
- * @label: label of current GPIO
+ * @_chip: the chip to query
+ * @_i: loop variable
+ * @_base: first GPIO in the range
+ * @_size: amount of GPIOs to check starting from @base
+ * @_label: label of current GPIO
*/
-#define for_each_requested_gpio_in_range(chip, i, base, size, label) \
- for (i = 0; i < size; i++) \
- if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else
+#define for_each_requested_gpio_in_range(_chip, _i, _base, _size, _label) \
+ for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \
+ *_data.i < _size; \
+ (*_data.i)++, kfree(*(_data.label)), *_data.label = NULL) \
+ if ((*_data.label = \
+ gpiochip_dup_line_label(_chip, _base + *_data.i)) == NULL) {} \
+ else if (IS_ERR(*_data.label)) {} \
+ else
/* Iterates over all requested GPIO of the given @chip */
#define for_each_requested_gpio(chip, i, label) \
for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label)
/* add/remove chips */
-extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
+int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
/**
* gpiochip_add_data() - register a gpio_chip
@@ -508,7 +605,7 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
* for GPIOs will fail rudely.
*
* gpiochip_add_data() must only be called after gpiolib initialization,
- * ie after core_initcall().
+ * i.e. after core_initcall().
*
* If gc->base is negative, this requests dynamic assignment of
* a range of valid GPIOs.
@@ -541,13 +638,24 @@ static inline int gpiochip_add(struct gpio_chip *gc)
{
return gpiochip_add_data(gc, NULL);
}
-extern void gpiochip_remove(struct gpio_chip *gc);
-extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
+void gpiochip_remove(struct gpio_chip *gc);
+int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
+ void *data, struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
+
+struct gpio_device *gpio_device_find(const void *data,
+ int (*match)(struct gpio_chip *gc,
+ const void *data));
+struct gpio_device *gpio_device_find_by_label(const char *label);
+struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode);
+
+struct gpio_device *gpio_device_get(struct gpio_device *gdev);
+void gpio_device_put(struct gpio_device *gdev);
-extern struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *gc, void *data));
+DEFINE_FREE(gpio_device_put, struct gpio_device *,
+ if (!IS_ERR_OR_NULL(_T)) gpio_device_put(_T))
+
+struct device *gpio_device_to_device(struct gpio_device *gdev);
bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
@@ -555,6 +663,22 @@ void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset);
void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset);
void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset);
+/* irq_data versions of the above */
+int gpiochip_irq_reqres(struct irq_data *data);
+void gpiochip_irq_relres(struct irq_data *data);
+
+/* Paste this in your irq_chip structure */
+#define GPIOCHIP_IRQ_RESOURCE_HELPERS \
+ .irq_request_resources = gpiochip_irq_reqres, \
+ .irq_release_resources = gpiochip_irq_relres
+
+static inline void gpio_irq_chip_set_chip(struct gpio_irq_chip *girq,
+ const struct irq_chip *chip)
+{
+ /* Yes, dropping const is ugly, but it isn't like we have a choice */
+ girq->chip = (struct irq_chip *)chip;
+}
+
/* Line status inquiry for drivers */
bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset);
bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset);
@@ -574,28 +698,14 @@ struct bgpio_pdata {
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
+int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
+int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
unsigned int parent_hwirq,
unsigned int parent_type);
-void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type);
-
-#else
-
-static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type)
-{
- return NULL;
-}
-
-static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type)
-{
- return NULL;
-}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
@@ -612,91 +722,20 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
#define BGPIOF_NO_SET_ON_INPUT BIT(6)
-int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq);
-void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq);
-
-int gpiochip_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *data, bool reserve);
-void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
- struct irq_data *data);
-
-void gpiochip_set_nested_irqchip(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int parent_irq);
-
-int gpiochip_irqchip_add_key(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type,
- bool threaded,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
-
-bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
- unsigned int offset);
-
+#ifdef CONFIG_GPIOLIB_IRQCHIP
int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
struct irq_domain *domain);
+#else
-#ifdef CONFIG_LOCKDEP
-
-/*
- * Lockdep requires that each irqchip instance be created with a
- * unique key so as to avoid unnecessary warnings. This upfront
- * boilerplate static inlines provides such a key for each
- * unique instance.
- */
-static inline int gpiochip_irqchip_add(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
- static struct lock_class_key lock_key;
- static struct lock_class_key request_key;
-
- return gpiochip_irqchip_add_key(gc, irqchip, first_irq,
- handler, type, false,
- &lock_key, &request_key);
-}
-
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
-
- static struct lock_class_key lock_key;
- static struct lock_class_key request_key;
+#include <asm/bug.h>
- return gpiochip_irqchip_add_key(gc, irqchip, first_irq,
- handler, type, true,
- &lock_key, &request_key);
-}
-#else /* ! CONFIG_LOCKDEP */
-static inline int gpiochip_irqchip_add(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
+static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain)
{
- return gpiochip_irqchip_add_key(gc, irqchip, first_irq,
- handler, type, false, NULL, NULL);
-}
-
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
- return gpiochip_irqchip_add_key(gc, irqchip, first_irq,
- handler, type, true, NULL, NULL);
+ WARN_ON(1);
+ return -EINVAL;
}
-#endif /* CONFIG_LOCKDEP */
+#endif
int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
@@ -756,8 +795,11 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
-void devprop_gpiochip_set_names(struct gpio_chip *gc,
- const struct fwnode_handle *fwnode);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
+struct gpio_desc *
+gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum);
+
+struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev);
#ifdef CONFIG_GPIOLIB
@@ -765,11 +807,17 @@ void devprop_gpiochip_set_names(struct gpio_chip *gc,
int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset);
void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset);
-
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc);
+
+/* struct gpio_device getters */
+int gpio_device_get_base(struct gpio_device *gdev);
+const char *gpio_device_get_label(struct gpio_device *gdev);
#else /* CONFIG_GPIOLIB */
+#include <asm/bug.h>
+
static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -777,6 +825,24 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
return ERR_PTR(-ENODEV);
}
+static inline struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
+{
+ WARN_ON(1);
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int gpio_device_get_base(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return -ENODEV;
+}
+
+static inline const char *gpio_device_get_label(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
@@ -791,4 +857,29 @@ static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc,
}
#endif /* CONFIG_GPIOLIB */
+#define for_each_gpiochip_node(dev, child) \
+ device_for_each_child_node(dev, child) \
+ if (!fwnode_property_present(child, "gpio-controller")) {} else
+
+static inline unsigned int gpiochip_node_count(struct device *dev)
+{
+ struct fwnode_handle *child;
+ unsigned int count = 0;
+
+ for_each_gpiochip_node(dev, child)
+ count++;
+
+ return count;
+}
+
+static inline struct fwnode_handle *gpiochip_node_get_first(struct device *dev)
+{
+ struct fwnode_handle *fwnode;
+
+ for_each_gpiochip_node(dev, fwnode)
+ return fwnode;
+
+ return NULL;
+}
+
#endif /* __LINUX_GPIO_DRIVER_H */
diff --git a/include/linux/gpio/gpio-nomadik.h b/include/linux/gpio/gpio-nomadik.h
new file mode 100644
index 000000000000..b5a84864650d
--- /dev/null
+++ b/include/linux/gpio/gpio-nomadik.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GPIO_NOMADIK_H
+#define __LINUX_GPIO_NOMADIK_H
+
+struct fwnode_handle;
+
+/* Package definitions */
+#define PINCTRL_NMK_STN8815 0
+#define PINCTRL_NMK_DB8500 1
+
+#define GPIO_BLOCK_SHIFT 5
+#define NMK_GPIO_PER_CHIP BIT(GPIO_BLOCK_SHIFT)
+#define NMK_MAX_BANKS DIV_ROUND_UP(512, NMK_GPIO_PER_CHIP)
+
+/* Register in the logic block */
+#define NMK_GPIO_DAT 0x00
+#define NMK_GPIO_DATS 0x04
+#define NMK_GPIO_DATC 0x08
+#define NMK_GPIO_PDIS 0x0c
+#define NMK_GPIO_DIR 0x10
+#define NMK_GPIO_DIRS 0x14
+#define NMK_GPIO_DIRC 0x18
+#define NMK_GPIO_SLPC 0x1c
+#define NMK_GPIO_AFSLA 0x20
+#define NMK_GPIO_AFSLB 0x24
+#define NMK_GPIO_LOWEMI 0x28
+
+#define NMK_GPIO_RIMSC 0x40
+#define NMK_GPIO_FIMSC 0x44
+#define NMK_GPIO_IS 0x48
+#define NMK_GPIO_IC 0x4c
+#define NMK_GPIO_RWIMSC 0x50
+#define NMK_GPIO_FWIMSC 0x54
+#define NMK_GPIO_WKS 0x58
+/* These appear in DB8540 and later ASICs */
+#define NMK_GPIO_EDGELEVEL 0x5C
+#define NMK_GPIO_LEVEL 0x60
+
+/* Pull up/down values */
+enum nmk_gpio_pull {
+ NMK_GPIO_PULL_NONE,
+ NMK_GPIO_PULL_UP,
+ NMK_GPIO_PULL_DOWN,
+};
+
+/* Sleep mode */
+enum nmk_gpio_slpm {
+ NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_NOCHANGE,
+ NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
+};
+
+struct nmk_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *addr;
+ struct clk *clk;
+ unsigned int bank;
+ void (*set_ioforce)(bool enable);
+ spinlock_t lock;
+ bool sleepmode;
+ bool is_mobileye_soc;
+ /* Keep track of configured edges */
+ u32 edge_rising;
+ u32 edge_falling;
+ u32 real_wake;
+ u32 rwimsc;
+ u32 fwimsc;
+ u32 rimsc;
+ u32 fimsc;
+ u32 pull_up;
+ u32 lowemi;
+};
+
+/* Alternate functions: function C is set in hw by setting both A and B */
+#define NMK_GPIO_ALT_GPIO 0
+#define NMK_GPIO_ALT_A 1
+#define NMK_GPIO_ALT_B 2
+#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
+
+#define NMK_GPIO_ALT_CX_SHIFT 2
+#define NMK_GPIO_ALT_C1 ((1<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C2 ((2<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C3 ((3<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C4 ((4<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+
+#define PRCM_GPIOCR_ALTCX(pin_num,\
+ altc1_used, altc1_ri, altc1_cb,\
+ altc2_used, altc2_ri, altc2_cb,\
+ altc3_used, altc3_ri, altc3_cb,\
+ altc4_used, altc4_ri, altc4_cb)\
+{\
+ .pin = pin_num,\
+ .altcx[PRCM_IDX_GPIOCR_ALTC1] = {\
+ .used = altc1_used,\
+ .reg_index = altc1_ri,\
+ .control_bit = altc1_cb\
+ },\
+ .altcx[PRCM_IDX_GPIOCR_ALTC2] = {\
+ .used = altc2_used,\
+ .reg_index = altc2_ri,\
+ .control_bit = altc2_cb\
+ },\
+ .altcx[PRCM_IDX_GPIOCR_ALTC3] = {\
+ .used = altc3_used,\
+ .reg_index = altc3_ri,\
+ .control_bit = altc3_cb\
+ },\
+ .altcx[PRCM_IDX_GPIOCR_ALTC4] = {\
+ .used = altc4_used,\
+ .reg_index = altc4_ri,\
+ .control_bit = altc4_cb\
+ },\
+}
+
+/**
+ * enum prcm_gpiocr_reg_index
+ * Used to reference an PRCM GPIOCR register address.
+ */
+enum prcm_gpiocr_reg_index {
+ PRCM_IDX_GPIOCR1,
+ PRCM_IDX_GPIOCR2,
+ PRCM_IDX_GPIOCR3
+};
+/**
+ * enum prcm_gpiocr_altcx_index
+ * Used to reference an Other alternate-C function.
+ */
+enum prcm_gpiocr_altcx_index {
+ PRCM_IDX_GPIOCR_ALTC1,
+ PRCM_IDX_GPIOCR_ALTC2,
+ PRCM_IDX_GPIOCR_ALTC3,
+ PRCM_IDX_GPIOCR_ALTC4,
+ PRCM_IDX_GPIOCR_ALTC_MAX,
+};
+
+/**
+ * struct prcm_gpio_altcx - Other alternate-C function
+ * @used: other alternate-C function availability
+ * @reg_index: PRCM GPIOCR register index used to control the function
+ * @control_bit: PRCM GPIOCR bit used to control the function
+ */
+struct prcm_gpiocr_altcx {
+ bool used:1;
+ u8 reg_index:2;
+ u8 control_bit:5;
+} __packed;
+
+/**
+ * struct prcm_gpio_altcx_pin_desc - Other alternate-C pin
+ * @pin: The pin number
+ * @altcx: array of other alternate-C[1-4] functions
+ */
+struct prcm_gpiocr_altcx_pin_desc {
+ unsigned short pin;
+ struct prcm_gpiocr_altcx altcx[PRCM_IDX_GPIOCR_ALTC_MAX];
+};
+
+/**
+ * struct nmk_function - Nomadik pinctrl mux function
+ * @name: The name of the function, exported to pinctrl core.
+ * @groups: An array of pin groups that may select this function.
+ * @ngroups: The number of entries in @groups.
+ */
+struct nmk_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct nmk_pingroup - describes a Nomadik pin group
+ * @grp: Generic data of the pin group (name and pins)
+ * @altsetting: the altsetting to apply to all pins in this group to
+ * configure them to be used by a function
+ */
+struct nmk_pingroup {
+ struct pingroup grp;
+ int altsetting;
+};
+
+#define NMK_PIN_GROUP(a, b) \
+ { \
+ .grp = PINCTRL_PINGROUP(#a, a##_pins, ARRAY_SIZE(a##_pins)), \
+ .altsetting = b, \
+ }
+
+/**
+ * struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
+ * @pins: An array describing all pins the pin controller affects.
+ * All pins which are also GPIOs must be listed first within the
+ * array, and be numbered identically to the GPIO controller's
+ * numbering.
+ * @npins: The number of entries in @pins.
+ * @functions: The functions supported on this SoC.
+ * @nfunction: The number of entries in @functions.
+ * @groups: An array describing all pin groups the pin SoC supports.
+ * @ngroups: The number of entries in @groups.
+ * @altcx_pins: The pins that support Other alternate-C function on this SoC
+ * @npins_altcx: The number of Other alternate-C pins
+ * @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
+ */
+struct nmk_pinctrl_soc_data {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ const struct nmk_function *functions;
+ unsigned int nfunctions;
+ const struct nmk_pingroup *groups;
+ unsigned int ngroups;
+ const struct prcm_gpiocr_altcx_pin_desc *altcx_pins;
+ unsigned int npins_altcx;
+ const u16 *prcm_gpiocr_registers;
+};
+
+#ifdef CONFIG_PINCTRL_STN8815
+
+void nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_DB8500
+
+void nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_DB8540
+
+void nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+struct platform_device;
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Symbols declared in gpio-nomadik used by pinctrl-nomadik. If pinctrl-nomadik
+ * is enabled, then gpio-nomadik is enabled as well; the reverse if not always
+ * true.
+ */
+void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip, unsigned int offset,
+ unsigned int gpio);
+
+#else
+
+static inline void nmk_gpio_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned int offset,
+ unsigned int gpio)
+{
+}
+
+#endif
+
+void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned int offset, int val);
+void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip, unsigned int offset,
+ enum nmk_gpio_slpm mode);
+struct nmk_gpio_chip *nmk_gpio_populate_chip(struct fwnode_handle *fwnode,
+ struct platform_device *pdev);
+
+/* Symbols declared in pinctrl-nomadik used by gpio-nomadik. */
+#ifdef CONFIG_PINCTRL_NOMADIK
+extern struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
+extern spinlock_t nmk_gpio_slpm_lock;
+int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev,
+ int gpio);
+#endif
+
+#endif /* __LINUX_GPIO_NOMADIK_H */
diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h
index 39b888c40b39..3913b6660ed1 100644
--- a/include/linux/gpio/gpio-reg.h
+++ b/include/linux/gpio/gpio-reg.h
@@ -2,9 +2,13 @@
#ifndef GPIO_REG_H
#define GPIO_REG_H
+#include <linux/types.h>
+
struct device;
struct irq_domain;
+struct gpio_chip;
+
struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
int base, int num, const char *label, u32 direction, u32 def_out,
const char *const *names, struct irq_domain *irqdom, const int *irqs);
diff --git a/include/linux/gpio/legacy-of-mm-gpiochip.h b/include/linux/gpio/legacy-of-mm-gpiochip.h
new file mode 100644
index 000000000000..2e2bd3b19cc3
--- /dev/null
+++ b/include/linux/gpio/legacy-of-mm-gpiochip.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * OF helpers for the old of_mm_gpio_chip, used on ppc32 and nios2,
+ * do not use in new code.
+ *
+ * Copyright (c) 2007-2008 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#ifndef __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H
+#define __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H
+
+#include <linux/gpio/driver.h>
+#include <linux/of.h>
+
+/*
+ * OF GPIO chip for memory mapped banks
+ */
+struct of_mm_gpio_chip {
+ struct gpio_chip gc;
+ void (*save_regs)(struct of_mm_gpio_chip *mm_gc);
+ void __iomem *regs;
+};
+
+static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct of_mm_gpio_chip, gc);
+}
+
+extern int of_mm_gpiochip_add_data(struct device_node *np,
+ struct of_mm_gpio_chip *mm_gc,
+ void *data);
+extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
+
+#endif /* __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H */
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 781a053abbb9..44e5f162973e 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -3,7 +3,6 @@
#define __LINUX_GPIO_MACHINE_H
#include <linux/types.h>
-#include <linux/list.h>
enum gpio_lookup_flags {
GPIO_ACTIVE_HIGH = (0 << 0),
@@ -14,6 +13,7 @@ enum gpio_lookup_flags {
GPIO_TRANSITORY = (1 << 3),
GPIO_PULL_UP = (1 << 4),
GPIO_PULL_DOWN = (1 << 5),
+ GPIO_PULL_DISABLE = (1 << 6),
GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT,
};
@@ -64,6 +64,18 @@ struct gpiod_hog {
};
/*
+ * Helper for lookup tables with just one single lookup for a device.
+ */
+#define GPIO_LOOKUP_SINGLE(_name, _dev_id, _key, _chip_hwnum, _con_id, _flags) \
+static struct gpiod_lookup_table _name = { \
+ .dev_id = _dev_id, \
+ .table = { \
+ GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags), \
+ {}, \
+ }, \
+}
+
+/*
* Simple definition of a single GPIO under a con_id
*/
#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \
@@ -75,7 +87,7 @@ struct gpiod_hog {
* gpiod_get_index()
*/
#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \
-{ \
+(struct gpiod_lookup) { \
.key = _key, \
.chip_hwnum = _chip_hwnum, \
.con_id = _con_id, \
@@ -87,7 +99,7 @@ struct gpiod_hog {
* Simple definition of a single GPIO hog in an array.
*/
#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \
-{ \
+(struct gpiod_hog) { \
.chip_label = _chip_label, \
.chip_hwnum = _chip_hwnum, \
.line_name = _line_name, \
@@ -100,6 +112,7 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_hogs(struct gpiod_hog *hogs);
+void gpiod_remove_hogs(struct gpiod_hog *hogs);
#else /* ! CONFIG_GPIOLIB */
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
@@ -108,6 +121,7 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {}
+static inline void gpiod_remove_hogs(struct gpiod_hog *hogs) {}
#endif /* CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h
new file mode 100644
index 000000000000..6c75c8bd44a0
--- /dev/null
+++ b/include/linux/gpio/property.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+#ifndef __LINUX_GPIO_PROPERTY_H
+#define __LINUX_GPIO_PROPERTY_H
+
+#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */
+#include <linux/property.h>
+
+#define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
+ PROPERTY_ENTRY_REF(_name_, _chip_node_, _idx_, _flags_)
+
+#endif /* __LINUX_GPIO_PROPERTY_H */
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
index ad76f3d0a6ba..a9f7b7faf57b 100644
--- a/include/linux/gpio/regmap.h
+++ b/include/linux/gpio/regmap.h
@@ -4,6 +4,7 @@
#define _LINUX_GPIO_REGMAP_H
struct device;
+struct fwnode_handle;
struct gpio_regmap;
struct irq_domain;
struct regmap;
@@ -16,6 +17,8 @@ struct regmap;
* @parent: The parent device
* @regmap: The regmap used to access the registers
* given, the name of the device is used
+ * @fwnode: (Optional) The firmware node.
+ * If not given, the fwnode of the parent is used.
* @label: (Optional) Descriptive name for GPIO controller.
* If not given, the name of the device is used.
* @ngpio: Number of GPIOs
@@ -34,6 +37,9 @@ struct regmap;
* offset to a register/bitmask pair. If not
* given the default gpio_regmap_simple_xlate()
* is used.
+ * @drvdata: (Optional) Pointer to driver specific data which is
+ * not used by gpio-remap but is provided "as is" to the
+ * driver callback(s).
*
* The ->reg_mask_xlate translates a given base address and GPIO offset to
* register and mask pair. The base address is one of the given register
@@ -57,6 +63,7 @@ struct regmap;
struct gpio_regmap_config {
struct device *parent;
struct regmap *regmap;
+ struct fwnode_handle *fwnode;
const char *label;
int ngpio;
@@ -74,13 +81,14 @@ struct gpio_regmap_config {
int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask);
+
+ void *drvdata;
};
struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config);
void gpio_regmap_unregister(struct gpio_regmap *gpio);
struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
const struct gpio_regmap_config *config);
-void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data);
void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio);
#endif /* _LINUX_GPIO_REGMAP_H */
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 3f84aeb81e48..80fa930b04c6 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -21,6 +21,7 @@ struct device;
* disable button via sysfs
* @value: axis value for %EV_ABS
* @irq: Irq number in case of interrupt keys
+ * @wakeirq: Optional dedicated wake-up interrupt
*/
struct gpio_keys_button {
unsigned int code;
@@ -34,6 +35,7 @@ struct gpio_keys_button {
bool can_disable;
int value;
unsigned int irq;
+ unsigned int wakeirq;
};
/**
diff --git a/include/linux/greybus.h b/include/linux/greybus.h
index 18c0fb958b74..634c9511cf78 100644
--- a/include/linux/greybus.h
+++ b/include/linux/greybus.h
@@ -104,44 +104,14 @@ void gb_debugfs_init(void);
void gb_debugfs_cleanup(void);
struct dentry *gb_debugfs_get(void);
-extern struct bus_type greybus_bus_type;
-
-extern struct device_type greybus_hd_type;
-extern struct device_type greybus_module_type;
-extern struct device_type greybus_interface_type;
-extern struct device_type greybus_control_type;
-extern struct device_type greybus_bundle_type;
-extern struct device_type greybus_svc_type;
-
-static inline int is_gb_host_device(const struct device *dev)
-{
- return dev->type == &greybus_hd_type;
-}
-
-static inline int is_gb_module(const struct device *dev)
-{
- return dev->type == &greybus_module_type;
-}
-
-static inline int is_gb_interface(const struct device *dev)
-{
- return dev->type == &greybus_interface_type;
-}
-
-static inline int is_gb_control(const struct device *dev)
-{
- return dev->type == &greybus_control_type;
-}
-
-static inline int is_gb_bundle(const struct device *dev)
-{
- return dev->type == &greybus_bundle_type;
-}
-
-static inline int is_gb_svc(const struct device *dev)
-{
- return dev->type == &greybus_svc_type;
-}
+extern const struct bus_type greybus_bus_type;
+
+extern const struct device_type greybus_hd_type;
+extern const struct device_type greybus_module_type;
+extern const struct device_type greybus_interface_type;
+extern const struct device_type greybus_control_type;
+extern const struct device_type greybus_bundle_type;
+extern const struct device_type greybus_svc_type;
static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
{
diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h
index 6e62fe478712..bef9eb2093e9 100644
--- a/include/linux/greybus/greybus_manifest.h
+++ b/include/linux/greybus/greybus_manifest.h
@@ -100,7 +100,7 @@ enum {
struct greybus_descriptor_string {
__u8 length;
__u8 id;
- __u8 string[0];
+ __u8 string[];
} __packed;
/*
@@ -175,7 +175,7 @@ struct greybus_manifest_header {
struct greybus_manifest {
struct greybus_manifest_header header;
- struct greybus_descriptor descriptors[0];
+ struct greybus_descriptor descriptors[];
} __packed;
#endif /* __GREYBUS_MANIFEST_H */
diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h
index aeb8f9243545..820134b0105c 100644
--- a/include/linux/greybus/greybus_protocols.h
+++ b/include/linux/greybus/greybus_protocols.h
@@ -232,9 +232,7 @@ struct gb_fw_download_fetch_firmware_request {
__le32 size;
} __packed;
-struct gb_fw_download_fetch_firmware_response {
- __u8 data[0];
-} __packed;
+/* gb_fw_download_fetch_firmware_response contains no other data */
/* firmware download release firmware request */
struct gb_fw_download_release_firmware_request {
@@ -414,9 +412,7 @@ struct gb_bootrom_get_firmware_request {
__le32 size;
} __packed;
-struct gb_bootrom_get_firmware_response {
- __u8 data[0];
-} __packed;
+/* gb_bootrom_get_firmware_response contains no other data */
/* Bootrom protocol Ready to boot request */
struct gb_bootrom_ready_to_boot_request {
diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h
index d3faf0c1a569..718e2857054e 100644
--- a/include/linux/greybus/hd.h
+++ b/include/linux/greybus/hd.h
@@ -58,7 +58,7 @@ struct gb_host_device {
struct gb_svc *svc;
/* Private data for the host driver */
- unsigned long hd_priv[0] __aligned(sizeof(s64));
+ unsigned long hd_priv[] __aligned(sizeof(s64));
};
#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h
index 47b839af145d..3efe2133acfd 100644
--- a/include/linux/greybus/module.h
+++ b/include/linux/greybus/module.h
@@ -23,7 +23,7 @@ struct gb_module {
bool disconnected;
- struct gb_interface *interfaces[0];
+ struct gb_interface *interfaces[];
};
#define to_gb_module(d) container_of(d, struct gb_module, dev)
diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h
index 5afaf5f06856..da547fb9071b 100644
--- a/include/linux/greybus/svc.h
+++ b/include/linux/greybus/svc.h
@@ -100,7 +100,4 @@ bool gb_svc_watchdog_enabled(struct gb_svc *svc);
int gb_svc_watchdog_enable(struct gb_svc *svc);
int gb_svc_watchdog_disable(struct gb_svc *svc);
-int gb_svc_protocol_init(void);
-void gb_svc_protocol_exit(void);
-
#endif /* __SVC_H */
diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
new file mode 100644
index 000000000000..e42807ec61f6
--- /dev/null
+++ b/include/linux/group_cpus.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
+
+#ifndef __LINUX_GROUP_CPUS_H
+#define __LINUX_GROUP_CPUS_H
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+
+struct cpumask *group_cpus_evenly(unsigned int numgrps);
+
+#endif
diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h
new file mode 100644
index 000000000000..f316c8d0f3fc
--- /dev/null
+++ b/include/linux/habanalabs/cpucp_if.h
@@ -0,0 +1,1423 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020-2023 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef CPUCP_IF_H
+#define CPUCP_IF_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "hl_boot_if.h"
+
+#define NUM_HBM_PSEUDO_CH 2
+#define NUM_HBM_CH_PER_DEV 8
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK 0x00000001
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT 1
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK 0x00000002
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT 2
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK 0x00000004
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT 3
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK 0x00000008
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT 4
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK 0x00000010
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT 5
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK 0x00000020
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0
+
+#define PLL_MAP_MAX_BITS 128
+#define PLL_MAP_LEN (PLL_MAP_MAX_BITS / 8)
+
+enum eq_event_id {
+ EQ_EVENT_NIC_STS_REQUEST = 0,
+ EQ_EVENT_PWR_MODE_0,
+ EQ_EVENT_PWR_MODE_1,
+ EQ_EVENT_PWR_MODE_2,
+ EQ_EVENT_PWR_MODE_3,
+ EQ_EVENT_PWR_BRK_ENTRY,
+ EQ_EVENT_PWR_BRK_EXIT,
+ EQ_EVENT_HEARTBEAT,
+};
+
+/*
+ * info of the pkt queue pointers in the first async occurrence
+ */
+struct cpucp_pkt_sync_err {
+ __le32 pi;
+ __le32 ci;
+};
+
+struct hl_eq_hbm_ecc_data {
+ /* SERR counter */
+ __le32 sec_cnt;
+ /* DERR counter */
+ __le32 dec_cnt;
+ /* Supplemental Information according to the mask bits */
+ __le32 hbm_ecc_info;
+ /* Address in hbm where the ecc happened */
+ __le32 first_addr;
+ /* SERR continuous address counter */
+ __le32 sec_cont_cnt;
+ __le32 pad;
+};
+
+/*
+ * EVENT QUEUE
+ */
+
+struct hl_eq_header {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct hl_eq_ecc_data {
+ __le64 ecc_address;
+ __le64 ecc_syndrom;
+ __u8 memory_wrapper_idx;
+ __u8 is_critical;
+ __le16 block_id;
+ __u8 pad[4];
+};
+
+enum hl_sm_sei_cause {
+ SM_SEI_SO_OVERFLOW,
+ SM_SEI_LBW_4B_UNALIGNED,
+ SM_SEI_AXI_RESPONSE_ERR
+};
+
+struct hl_eq_sm_sei_data {
+ __le32 sei_log;
+ /* enum hl_sm_sei_cause */
+ __u8 sei_cause;
+ __u8 pad[3];
+};
+
+enum hl_fw_alive_severity {
+ FW_ALIVE_SEVERITY_MINOR,
+ FW_ALIVE_SEVERITY_CRITICAL
+};
+
+struct hl_eq_fw_alive {
+ __le64 uptime_seconds;
+ __le32 process_id;
+ __le32 thread_id;
+ /* enum hl_fw_alive_severity */
+ __u8 severity;
+ __u8 pad[7];
+};
+
+struct hl_eq_intr_cause {
+ __le64 intr_cause_data;
+};
+
+struct hl_eq_pcie_drain_ind_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 drain_wr_addr_lbw;
+ __le64 drain_rd_addr_lbw;
+ __le64 drain_wr_addr_hbw;
+ __le64 drain_rd_addr_hbw;
+};
+
+struct hl_eq_razwi_lbw_info_regs {
+ __le32 rr_aw_razwi_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+struct hl_eq_razwi_hbw_info_regs {
+ __le32 rr_aw_razwi_hi_reg;
+ __le32 rr_aw_razwi_lo_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_hi_reg;
+ __le32 rr_ar_razwi_lo_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+/* razwi_happened masks */
+#define RAZWI_HAPPENED_HBW 0x1
+#define RAZWI_HAPPENED_LBW 0x2
+#define RAZWI_HAPPENED_AW 0x4
+#define RAZWI_HAPPENED_AR 0x8
+
+struct hl_eq_razwi_info {
+ __le32 razwi_happened_mask;
+ union {
+ struct hl_eq_razwi_lbw_info_regs lbw;
+ struct hl_eq_razwi_hbw_info_regs hbw;
+ };
+ __le32 pad;
+};
+
+struct hl_eq_razwi_with_intr_cause {
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_intr_cause intr_cause;
+};
+
+#define HBM_CA_ERR_CMD_LIFO_LEN 8
+#define HBM_RD_ERR_DATA_LIFO_LEN 8
+#define HBM_WR_PAR_CMD_LIFO_LEN 11
+
+enum hl_hbm_sei_cause {
+ /* Command/address parity error event is split into 2 events due to
+ * size limitation: ODD suffix for odd HBM CK_t cycles and EVEN suffix
+ * for even HBM CK_t cycles
+ */
+ HBM_SEI_CMD_PARITY_EVEN,
+ HBM_SEI_CMD_PARITY_ODD,
+ /* Read errors can be reflected as a combination of SERR/DERR/parity
+ * errors. Therefore, we define one event for all read error types.
+ * LKD will perform further proccessing.
+ */
+ HBM_SEI_READ_ERR,
+ HBM_SEI_WRITE_DATA_PARITY_ERR,
+ HBM_SEI_CATTRIP,
+ HBM_SEI_MEM_BIST_FAIL,
+ HBM_SEI_DFI,
+ HBM_SEI_INV_TEMP_READ_OUT,
+ HBM_SEI_BIST_FAIL,
+};
+
+/* Masks for parsing hl_hbm_sei_headr fields */
+#define HBM_ECC_SERR_CNTR_MASK 0xFF
+#define HBM_ECC_DERR_CNTR_MASK 0xFF00
+#define HBM_RD_PARITY_CNTR_MASK 0xFF0000
+
+/* HBM index and MC index are known by the event_id */
+struct hl_hbm_sei_header {
+ union {
+ /* relevant only in case of HBM read error */
+ struct {
+ __u8 ecc_serr_cnt;
+ __u8 ecc_derr_cnt;
+ __u8 read_par_cnt;
+ __u8 reserved;
+ };
+ /* All other cases */
+ __le32 cnt;
+ };
+ __u8 sei_cause; /* enum hl_hbm_sei_cause */
+ __u8 mc_channel; /* range: 0-3 */
+ __u8 mc_pseudo_channel; /* range: 0-7 */
+ __u8 is_critical;
+};
+
+#define HBM_RD_ADDR_SID_SHIFT 0
+#define HBM_RD_ADDR_SID_MASK 0x1
+#define HBM_RD_ADDR_BG_SHIFT 1
+#define HBM_RD_ADDR_BG_MASK 0x6
+#define HBM_RD_ADDR_BA_SHIFT 3
+#define HBM_RD_ADDR_BA_MASK 0x18
+#define HBM_RD_ADDR_COL_SHIFT 5
+#define HBM_RD_ADDR_COL_MASK 0x7E0
+#define HBM_RD_ADDR_ROW_SHIFT 11
+#define HBM_RD_ADDR_ROW_MASK 0x3FFF800
+
+struct hbm_rd_addr {
+ union {
+ /* bit fields are only for FW use */
+ struct {
+ u32 dbg_rd_err_addr_sid:1;
+ u32 dbg_rd_err_addr_bg:2;
+ u32 dbg_rd_err_addr_ba:2;
+ u32 dbg_rd_err_addr_col:6;
+ u32 dbg_rd_err_addr_row:15;
+ u32 reserved:6;
+ };
+ __le32 rd_addr_val;
+ };
+};
+
+#define HBM_RD_ERR_BEAT_SHIFT 2
+/* dbg_rd_err_misc fields: */
+/* Read parity is calculated per DW on every beat */
+#define HBM_RD_ERR_PAR_ERR_BEAT0_SHIFT 0
+#define HBM_RD_ERR_PAR_ERR_BEAT0_MASK 0x3
+#define HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT 8
+#define HBM_RD_ERR_PAR_DATA_BEAT0_MASK 0x300
+/* ECC is calculated per PC on every beat */
+#define HBM_RD_ERR_SERR_BEAT0_SHIFT 16
+#define HBM_RD_ERR_SERR_BEAT0_MASK 0x10000
+#define HBM_RD_ERR_DERR_BEAT0_SHIFT 24
+#define HBM_RD_ERR_DERR_BEAT0_MASK 0x100000
+
+struct hl_eq_hbm_sei_read_err_intr_info {
+ /* DFI_RD_ERR_REP_ADDR */
+ struct hbm_rd_addr dbg_rd_err_addr;
+ /* DFI_RD_ERR_REP_ERR */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 dbg_rd_err_par:8;
+ u32 dbg_rd_err_par_data:8;
+ u32 dbg_rd_err_serr:4;
+ u32 dbg_rd_err_derr:4;
+ u32 reserved:8;
+ };
+ __le32 dbg_rd_err_misc;
+ };
+ /* DFI_RD_ERR_REP_DM */
+ __le32 dbg_rd_err_dm;
+ /* DFI_RD_ERR_REP_SYNDROME */
+ __le32 dbg_rd_err_syndrome;
+ /* DFI_RD_ERR_REP_DATA */
+ __le32 dbg_rd_err_data[HBM_RD_ERR_DATA_LIFO_LEN];
+};
+
+struct hl_eq_hbm_sei_ca_par_intr_info {
+ /* 14 LSBs */
+ __le16 dbg_row[HBM_CA_ERR_CMD_LIFO_LEN];
+ /* 18 LSBs */
+ __le32 dbg_col[HBM_CA_ERR_CMD_LIFO_LEN];
+};
+
+#define WR_PAR_LAST_CMD_COL_SHIFT 0
+#define WR_PAR_LAST_CMD_COL_MASK 0x3F
+#define WR_PAR_LAST_CMD_BG_SHIFT 6
+#define WR_PAR_LAST_CMD_BG_MASK 0xC0
+#define WR_PAR_LAST_CMD_BA_SHIFT 8
+#define WR_PAR_LAST_CMD_BA_MASK 0x300
+#define WR_PAR_LAST_CMD_SID_SHIFT 10
+#define WR_PAR_LAST_CMD_SID_MASK 0x400
+
+/* Row address isn't latched */
+struct hbm_sei_wr_cmd_address {
+ /* DFI_DERR_LAST_CMD */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 col:6;
+ u32 bg:2;
+ u32 ba:2;
+ u32 sid:1;
+ u32 reserved:21;
+ };
+ __le32 dbg_wr_cmd_addr;
+ };
+};
+
+struct hl_eq_hbm_sei_wr_par_intr_info {
+ /* entry 0: WR command address from the 1st cycle prior to the error
+ * entry 1: WR command address from the 2nd cycle prior to the error
+ * and so on...
+ */
+ struct hbm_sei_wr_cmd_address dbg_last_wr_cmds[HBM_WR_PAR_CMD_LIFO_LEN];
+ /* derr[0:1] - 1st HBM cycle DERR output
+ * derr[2:3] - 2nd HBM cycle DERR output
+ */
+ __u8 dbg_derr;
+ /* extend to reach 8B */
+ __u8 pad[3];
+};
+
+/*
+ * this struct represents the following sei causes:
+ * command parity, ECC double error, ECC single error, dfi error, cattrip,
+ * temperature read-out, read parity error and write parity error.
+ * some only use the header while some have extra data.
+ */
+struct hl_eq_hbm_sei_data {
+ struct hl_hbm_sei_header hdr;
+ union {
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_even_info;
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_odd_info;
+ struct hl_eq_hbm_sei_read_err_intr_info read_err_info;
+ struct hl_eq_hbm_sei_wr_par_intr_info wr_parity_info;
+ };
+};
+
+/* Engine/farm arc interrupt type */
+enum hl_engine_arc_interrupt_type {
+ /* Qman/farm ARC DCCM QUEUE FULL interrupt type */
+ ENGINE_ARC_DCCM_QUEUE_FULL_IRQ = 1
+};
+
+/* Data structure specifies details of payload of DCCM QUEUE FULL interrupt */
+struct hl_engine_arc_dccm_queue_full_irq {
+ /* Queue index value which caused DCCM QUEUE FULL */
+ __le32 queue_index;
+ __le32 pad;
+};
+
+/* Data structure specifies details of QM/FARM ARC interrupt */
+struct hl_eq_engine_arc_intr_data {
+ /* ARC engine id e.g. DCORE0_TPC0_QM_ARC, DCORE0_TCP1_QM_ARC */
+ __le32 engine_id;
+ __le32 intr_type; /* enum hl_engine_arc_interrupt_type */
+ /* More info related to the interrupt e.g. queue index
+ * incase of DCCM_QUEUE_FULL interrupt.
+ */
+ __le64 payload;
+ __le64 pad[5];
+};
+
+#define ADDR_DEC_ADDRESS_COUNT_MAX 4
+
+/* Data structure specifies details of ADDR_DEC interrupt */
+struct hl_eq_addr_dec_intr_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX];
+ __u8 addr_cnt;
+ __u8 pad[7];
+};
+
+struct hl_eq_entry {
+ struct hl_eq_header hdr;
+ union {
+ __le64 data_placeholder;
+ struct hl_eq_ecc_data ecc_data;
+ struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Obsolete */
+ struct hl_eq_sm_sei_data sm_sei_data;
+ struct cpucp_pkt_sync_err pkt_sync_err;
+ struct hl_eq_fw_alive fw_alive;
+ struct hl_eq_intr_cause intr_cause;
+ struct hl_eq_pcie_drain_ind_data pcie_drain_ind_data;
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause;
+ struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */
+ struct hl_eq_engine_arc_intr_data arc_data;
+ struct hl_eq_addr_dec_intr_data addr_dec;
+ __le64 data[7];
+ };
+};
+
+#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
+
+#define EQ_CTL_READY_SHIFT 31
+#define EQ_CTL_READY_MASK 0x80000000
+
+#define EQ_CTL_EVENT_TYPE_SHIFT 16
+#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
+
+#define EQ_CTL_INDEX_SHIFT 0
+#define EQ_CTL_INDEX_MASK 0x0000FFFF
+
+enum pq_init_status {
+ PQ_INIT_STATUS_NA = 0,
+ PQ_INIT_STATUS_READY_FOR_CP,
+ PQ_INIT_STATUS_READY_FOR_HOST,
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI,
+ PQ_INIT_STATUS_LEN_NOT_POWER_OF_TWO_ERR,
+ PQ_INIT_STATUS_ILLEGAL_Q_ADDR_ERR
+};
+
+/*
+ * CpuCP Primary Queue Packets
+ *
+ * During normal operation, the host's kernel driver needs to send various
+ * messages to CpuCP, usually either to SET some value into a H/W periphery or
+ * to GET the current value of some H/W periphery. For example, SET the
+ * frequency of MME/TPC and GET the value of the thermal sensor.
+ *
+ * These messages can be initiated either by the User application or by the
+ * host's driver itself, e.g. power management code. In either case, the
+ * communication from the host's driver to CpuCP will *always* be in
+ * synchronous mode, meaning that the host will send a single message and poll
+ * until the message was acknowledged and the results are ready (if results are
+ * needed).
+ *
+ * This means that only a single message can be sent at a time and the host's
+ * driver must wait for its result before sending the next message. Having said
+ * that, because these are control messages which are sent in a relatively low
+ * frequency, this limitation seems acceptable. It's important to note that
+ * in case of multiple devices, messages to different devices *can* be sent
+ * at the same time.
+ *
+ * The message, inputs/outputs (if relevant) and fence object will be located
+ * on the device DDR at an address that will be determined by the host's driver.
+ * During device initialization phase, the host will pass to CpuCP that address.
+ * Most of the message types will contain inputs/outputs inside the message
+ * itself. The common part of each message will contain the opcode of the
+ * message (its type) and a field representing a fence object.
+ *
+ * When the host's driver wishes to send a message to CPU CP, it will write the
+ * message contents to the device DDR, clear the fence object and then write to
+ * the PSOC_ARC1_AUX_SW_INTR, to issue interrupt 121 to ARC Management CPU.
+ *
+ * Upon receiving the interrupt (#121), CpuCP will read the message from the
+ * DDR. In case the message is a SET operation, CpuCP will first perform the
+ * operation and then write to the fence object on the device DDR. In case the
+ * message is a GET operation, CpuCP will first fill the results section on the
+ * device DDR and then write to the fence object. If an error occurred, CpuCP
+ * will fill the rc field with the right error code.
+ *
+ * In the meantime, the host's driver will poll on the fence object. Once the
+ * host sees that the fence object is signaled, it will read the results from
+ * the device DDR (if relevant) and resume the code execution in the host's
+ * driver.
+ *
+ * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
+ * so the value being put by the host's driver matches the value read by CpuCP
+ *
+ * Non-QMAN packets should be limited to values 1 through (2^8 - 1)
+ *
+ * Detailed description:
+ *
+ * CPUCP_PACKET_DISABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU must NOT issue PCI
+ * transactions (read/write) towards the Host CPU. This also include
+ * sending MSI-X interrupts.
+ * This packet is usually sent before the device is moved to D3Hot state.
+ *
+ * CPUCP_PACKET_ENABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU is allowed to issue PCI
+ * transactions towards the Host CPU, including sending MSI-X interrupts.
+ * This packet is usually send after the device is moved to D0 state.
+ *
+ * CPUCP_PACKET_TEMPERATURE_GET -
+ * Fetch the current temperature / Max / Max Hyst / Critical /
+ * Critical Hyst of a specified thermal sensor. The packet's
+ * arguments specify the desired sensor and the field to get.
+ *
+ * CPUCP_PACKET_VOLTAGE_GET -
+ * Fetch the voltage / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_CURRENT_GET -
+ * Fetch the current / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_FAN_SPEED_GET -
+ * Fetch the speed / Max / Min of a specified fan. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_GET -
+ * Fetch the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_SET -
+ * Set the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor, type and value.
+ *
+ * CPUCP_PACKET_FREQUENCY_SET -
+ * Set the frequency of a specified PLL. The packet's arguments specify
+ * the PLL and the desired frequency. The actual frequency in the device
+ * might differ from the requested frequency.
+ *
+ * CPUCP_PACKET_FREQUENCY_GET -
+ * Fetch the frequency of a specified PLL. The packet's arguments specify
+ * the PLL.
+ *
+ * CPUCP_PACKET_LED_SET -
+ * Set the state of a specified led. The packet's arguments
+ * specify the led and the desired state.
+ *
+ * CPUCP_PACKET_I2C_WR -
+ * Write 32-bit value to I2C device. The packet's arguments specify the
+ * I2C bus, address and value.
+ *
+ * CPUCP_PACKET_I2C_RD -
+ * Read 32-bit value from I2C device. The packet's arguments specify the
+ * I2C bus and address.
+ *
+ * CPUCP_PACKET_INFO_GET -
+ * Fetch information from the device as specified in the packet's
+ * structure. The host's driver passes the max size it allows the CpuCP to
+ * write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ -
+ * Unmask the given IRQ. The IRQ number is specified in the value field.
+ * The packet is sent after receiving an interrupt and printing its
+ * relevant information.
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
+ * Unmask the given IRQs. The IRQs numbers are specified in an array right
+ * after the cpucp_packet structure, where its first element is the array
+ * length. The packet is sent after a soft reset was done in order to
+ * handle any interrupts that were sent during the reset process.
+ *
+ * CPUCP_PACKET_TEST -
+ * Test packet for CpuCP connectivity. The CPU will put the fence value
+ * in the result field.
+ *
+ * CPUCP_PACKET_FREQUENCY_CURR_GET -
+ * Fetch the current frequency of a specified PLL. The packet's arguments
+ * specify the PLL.
+ *
+ * CPUCP_PACKET_MAX_POWER_GET -
+ * Fetch the maximal power of the device.
+ *
+ * CPUCP_PACKET_MAX_POWER_SET -
+ * Set the maximal power of the device. The packet's arguments specify
+ * the power.
+ *
+ * CPUCP_PACKET_EEPROM_DATA_GET -
+ * Get EEPROM data from the CpuCP kernel. The buffer is specified in the
+ * addr field. The CPU will put the returned data size in the result
+ * field. In addition, the host's driver passes the max size it allows the
+ * CpuCP to write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_NIC_INFO_GET -
+ * Fetch information from the device regarding the NIC. the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to
+ * prevent data corruption in case of mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_TEMPERATURE_SET -
+ * Set the value of the offset property of a specified thermal sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_VOLTAGE_SET -
+ * Trigger the reset_history property of a specified voltage sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_CURRENT_SET -
+ * Trigger the reset_history property of a specified current sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_PCIE_THROUGHPUT_GET -
+ * Get throughput of PCIe.
+ * The packet's arguments specify the transaction direction (TX/RX).
+ * The window measurement is 10[msec], and the return value is in KB/sec.
+ *
+ * CPUCP_PACKET_PCIE_REPLAY_CNT_GET
+ * Replay count measures number of "replay" events, which is basicly
+ * number of retries done by PCIe.
+ *
+ * CPUCP_PACKET_TOTAL_ENERGY_GET -
+ * Total Energy is measurement of energy from the time FW Linux
+ * is loaded. It is calculated by multiplying the average power
+ * by time (passed from armcp start). The units are in MilliJouls.
+ *
+ * CPUCP_PACKET_PLL_INFO_GET -
+ * Fetch frequencies of PLL from the required PLL IP.
+ * The packet's arguments specify the device PLL type
+ * Pll type is the PLL from device pll_index enum.
+ * The result is composed of 4 outputs, each is 16-bit
+ * frequency in MHz.
+ *
+ * CPUCP_PACKET_POWER_GET -
+ * Fetch the present power consumption of the device (Current * Voltage).
+ *
+ * CPUCP_PACKET_NIC_PFC_SET -
+ * Enable/Disable the NIC PFC feature. The packet's arguments specify the
+ * NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_FAULT_GET -
+ * Fetch the current indication for local/remote faults from the NIC MAC.
+ * The result is 32-bit value of the relevant register.
+ *
+ * CPUCP_PACKET_NIC_LPBK_SET -
+ * Enable/Disable the MAC loopback feature. The packet's arguments specify
+ * the NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_MAC_INIT -
+ * Configure the NIC MAC channels. The packet's arguments specify the
+ * NIC port and the speed.
+ *
+ * CPUCP_PACKET_MSI_INFO_SET -
+ * set the index number for each supported msi type going from
+ * host to device
+ *
+ * CPUCP_PACKET_NIC_XPCS91_REGS_GET -
+ * Fetch the un/correctable counters values from the NIC MAC.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_GET -
+ * Fetch various NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_CLR -
+ * Clear the various NIC MAC counters in the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET -
+ * Fetch all NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_IS_IDLE_CHECK -
+ * Check if the device is IDLE in regard to the DMA/compute engines
+ * and QMANs. The f/w will return a bitmask where each bit represents
+ * a different engine or QMAN according to enum cpucp_idle_mask.
+ * The bit will be 1 if the engine is NOT idle.
+ *
+ * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET -
+ * Fetch all HBM replaced-rows and prending to be replaced rows data.
+ *
+ * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS -
+ * Fetch status of HBM rows pending replacement and need a reboot to
+ * be replaced.
+ *
+ * CPUCP_PACKET_POWER_SET -
+ * Resets power history of device to 0
+ *
+ * CPUCP_PACKET_ENGINE_CORE_ASID_SET -
+ * Packet to perform engine core ASID configuration
+ *
+ * CPUCP_PACKET_SEC_ATTEST_GET -
+ * Get the attestaion data that is collected during various stages of the
+ * boot sequence. the attestation data is also hashed with some unique
+ * number (nonce) provided by the host to prevent replay attacks.
+ * public key and certificate also provided as part of the FW response.
+ *
+ * CPUCP_PACKET_INFO_SIGNED_GET -
+ * Get the device information signed by the Trusted Platform device.
+ * device info data is also hashed with some unique number (nonce) provided
+ * by the host to prevent replay attacks. public key and certificate also
+ * provided as part of the FW response.
+ *
+ * CPUCP_PACKET_MONITOR_DUMP_GET -
+ * Get monitors registers dump from the CpuCP kernel.
+ * The CPU will put the registers dump in the a buffer allocated by the driver
+ * which address is passed via the CpuCp packet. In addition, the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to prevent
+ * data corruption in case of mismatched driver/FW versions.
+ * Obsolete.
+ *
+ * CPUCP_PACKET_GENERIC_PASSTHROUGH -
+ * Generic opcode for all firmware info that is only passed to host
+ * through the LKD, without getting parsed there.
+ *
+ * CPUCP_PACKET_ACTIVE_STATUS_SET -
+ * LKD sends FW indication whether device is free or in use, this indication is reported
+ * also to the BMC.
+ *
+ * CPUCP_PACKET_SOFT_RESET -
+ * Packet to perform soft-reset.
+ *
+ * CPUCP_PACKET_INTS_REGISTER -
+ * Packet to inform FW that queues have been established and LKD is ready to receive
+ * EQ events.
+ */
+
+enum cpucp_packet_id {
+ CPUCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
+ CPUCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
+ CPUCP_PACKET_TEMPERATURE_GET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_GET, /* sysfs */
+ CPUCP_PACKET_CURRENT_GET, /* sysfs */
+ CPUCP_PACKET_FAN_SPEED_GET, /* sysfs */
+ CPUCP_PACKET_PWM_GET, /* sysfs */
+ CPUCP_PACKET_PWM_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_GET, /* sysfs */
+ CPUCP_PACKET_LED_SET, /* debugfs */
+ CPUCP_PACKET_I2C_WR, /* debugfs */
+ CPUCP_PACKET_I2C_RD, /* debugfs */
+ CPUCP_PACKET_INFO_GET, /* IOCTL */
+ CPUCP_PACKET_FLASH_PROGRAM_REMOVED,
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
+ CPUCP_PACKET_TEST, /* internal */
+ CPUCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_SET, /* sysfs */
+ CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+ CPUCP_PACKET_NIC_INFO_GET, /* internal */
+ CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_SET, /* sysfs */
+ CPUCP_PACKET_CURRENT_SET, /* sysfs */
+ CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */
+ CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */
+ CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */
+ CPUCP_PACKET_PLL_INFO_GET, /* internal */
+ CPUCP_PACKET_NIC_STATUS, /* internal */
+ CPUCP_PACKET_POWER_GET, /* internal */
+ CPUCP_PACKET_NIC_PFC_SET, /* internal */
+ CPUCP_PACKET_NIC_FAULT_GET, /* internal */
+ CPUCP_PACKET_NIC_LPBK_SET, /* internal */
+ CPUCP_PACKET_NIC_MAC_CFG, /* internal */
+ CPUCP_PACKET_MSI_INFO_SET, /* internal */
+ CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
+ CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
+ CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */
+ CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */
+ CPUCP_PACKET_POWER_SET, /* internal */
+ CPUCP_PACKET_RESERVED, /* not used */
+ CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
+ CPUCP_PACKET_RESERVED2, /* not used */
+ CPUCP_PACKET_SEC_ATTEST_GET, /* internal */
+ CPUCP_PACKET_INFO_SIGNED_GET, /* internal */
+ CPUCP_PACKET_RESERVED4, /* not used */
+ CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
+ CPUCP_PACKET_RESERVED5, /* not used */
+ CPUCP_PACKET_RESERVED6, /* not used */
+ CPUCP_PACKET_RESERVED7, /* not used */
+ CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */
+ CPUCP_PACKET_RESERVED8, /* not used */
+ CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */
+ CPUCP_PACKET_RESERVED9, /* not used */
+ CPUCP_PACKET_RESERVED10, /* not used */
+ CPUCP_PACKET_RESERVED11, /* not used */
+ CPUCP_PACKET_RESERVED12, /* internal */
+ CPUCP_PACKET_RESERVED13, /* internal */
+ CPUCP_PACKET_SOFT_RESET, /* internal */
+ CPUCP_PACKET_INTS_REGISTER, /* internal */
+ CPUCP_PACKET_ID_MAX /* must be last */
+};
+
+#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
+
+#define CPUCP_PKT_CTL_RC_SHIFT 12
+#define CPUCP_PKT_CTL_RC_MASK 0x0000F000
+
+#define CPUCP_PKT_CTL_OPCODE_SHIFT 16
+#define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
+
+#define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000ull
+#define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32
+#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000ull
+#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48
+#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull
+
+#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull
+
+#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_PFC_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_LPBK_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_LPBK_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull
+
+/* heartbeat status bits */
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001
+
+struct cpucp_packet {
+ union {
+ __le64 value; /* For SET packets */
+ __le64 result; /* For GET packets */
+ __le64 addr; /* For PQ */
+ };
+
+ __le32 ctl;
+
+ __le32 fence; /* Signal to host that message is completed */
+
+ union {
+ struct {/* For temperature/current/voltage/fan/pwm get/set */
+ __le16 sensor_index;
+ __le16 type;
+ };
+
+ struct { /* For I2C read/write */
+ __u8 i2c_bus;
+ __u8 i2c_addr;
+ __u8 i2c_reg;
+ /*
+ * In legacy implemetations, i2c_len was not present,
+ * was unused and just added as pad.
+ * So if i2c_len is 0, it is treated as legacy
+ * and r/w 1 Byte, else if i2c_len is specified,
+ * its treated as new multibyte r/w support.
+ */
+ __u8 i2c_len;
+ };
+
+ struct {/* For PLL info fetch */
+ __le16 pll_type;
+ /* TODO pll_reg is kept temporary before removal */
+ __le16 pll_reg;
+ };
+
+ /* For any general request */
+ __le32 index;
+
+ /* For frequency get/set */
+ __le32 pll_index;
+
+ /* For led set */
+ __le32 led_index;
+
+ /* For get CpuCP info/EEPROM data/NIC info */
+ __le32 data_max_size;
+
+ /*
+ * For any general status bitmask. Shall be used whenever the
+ * result cannot be used to hold general purpose data.
+ */
+ __le32 status_mask;
+
+ /* random, used once number, for security packets */
+ __le32 nonce;
+ };
+
+ union {
+ /* For NIC requests */
+ __le32 port_index;
+
+ /* For Generic packet sub index */
+ __le32 pkt_subidx;
+ };
+};
+
+struct cpucp_unmask_irq_arr_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 irqs[];
+};
+
+struct cpucp_nic_status_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+struct cpucp_array_data_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+enum cpucp_led_index {
+ CPUCP_LED0_INDEX = 0,
+ CPUCP_LED1_INDEX,
+ CPUCP_LED2_INDEX,
+ CPUCP_LED_MAX_INDEX = CPUCP_LED2_INDEX
+};
+
+/*
+ * enum cpucp_packet_rc - Error return code
+ * @cpucp_packet_success -> in case of success.
+ * @cpucp_packet_invalid -> this is to support first generation platforms.
+ * @cpucp_packet_fault -> in case of processing error like failing to
+ * get device binding or semaphore etc.
+ * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported.
+ * @cpucp_packet_invalid_params -> when checking parameter like length of buffer
+ * or attribute value etc.
+ * @cpucp_packet_rc_max -> It indicates size of enum so should be at last.
+ */
+enum cpucp_packet_rc {
+ cpucp_packet_success,
+ cpucp_packet_invalid,
+ cpucp_packet_fault,
+ cpucp_packet_invalid_pkt,
+ cpucp_packet_invalid_params,
+ cpucp_packet_rc_max
+};
+
+/*
+ * cpucp_temp_type should adhere to hwmon_temp_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_temp_type {
+ cpucp_temp_input,
+ cpucp_temp_min = 4,
+ cpucp_temp_min_hyst,
+ cpucp_temp_max = 6,
+ cpucp_temp_max_hyst,
+ cpucp_temp_crit,
+ cpucp_temp_crit_hyst,
+ cpucp_temp_offset = 19,
+ cpucp_temp_lowest = 21,
+ cpucp_temp_highest = 22,
+ cpucp_temp_reset_history = 23,
+ cpucp_temp_warn = 24,
+ cpucp_temp_max_crit = 25,
+ cpucp_temp_max_warn = 26,
+};
+
+enum cpucp_in_attributes {
+ cpucp_in_input,
+ cpucp_in_min,
+ cpucp_in_max,
+ cpucp_in_lowest = 6,
+ cpucp_in_highest = 7,
+ cpucp_in_reset_history,
+ cpucp_in_intr_alarm_a,
+ cpucp_in_intr_alarm_b,
+};
+
+enum cpucp_curr_attributes {
+ cpucp_curr_input,
+ cpucp_curr_min,
+ cpucp_curr_max,
+ cpucp_curr_lowest = 6,
+ cpucp_curr_highest = 7,
+ cpucp_curr_reset_history
+};
+
+enum cpucp_fan_attributes {
+ cpucp_fan_input,
+ cpucp_fan_min = 2,
+ cpucp_fan_max
+};
+
+enum cpucp_pwm_attributes {
+ cpucp_pwm_input,
+ cpucp_pwm_enable
+};
+
+enum cpucp_pcie_throughput_attributes {
+ cpucp_pcie_throughput_tx,
+ cpucp_pcie_throughput_rx
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_reg_attributes {
+ cpucp_pll_nr_reg,
+ cpucp_pll_nf_reg,
+ cpucp_pll_od_reg,
+ cpucp_pll_div_factor_reg,
+ cpucp_pll_div_sel_reg
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_type_attributes {
+ cpucp_pll_cpu,
+ cpucp_pll_pci,
+};
+
+/*
+ * cpucp_power_type aligns with hwmon_power_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_power_type {
+ CPUCP_POWER_INPUT = 8,
+ CPUCP_POWER_INPUT_HIGHEST = 9,
+ CPUCP_POWER_RESET_INPUT_HISTORY = 11
+};
+
+/*
+ * MSI type enumeration table for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table (before CPUCP_NUM_OF_MSI_TYPES).
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum cpucp_msi_type {
+ CPUCP_EVENT_QUEUE_MSI_TYPE,
+ CPUCP_NIC_PORT1_MSI_TYPE,
+ CPUCP_NIC_PORT3_MSI_TYPE,
+ CPUCP_NIC_PORT5_MSI_TYPE,
+ CPUCP_NIC_PORT7_MSI_TYPE,
+ CPUCP_NIC_PORT9_MSI_TYPE,
+ CPUCP_EVENT_QUEUE_ERR_MSI_TYPE,
+ CPUCP_NUM_OF_MSI_TYPES
+};
+
+/*
+ * PLL enumeration table used for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table.
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum pll_index {
+ CPU_PLL = 0,
+ PCI_PLL = 1,
+ NIC_PLL = 2,
+ DMA_PLL = 3,
+ MESH_PLL = 4,
+ MME_PLL = 5,
+ TPC_PLL = 6,
+ IF_PLL = 7,
+ SRAM_PLL = 8,
+ NS_PLL = 9,
+ HBM_PLL = 10,
+ MSS_PLL = 11,
+ DDR_PLL = 12,
+ VID_PLL = 13,
+ BANK_PLL = 14,
+ MMU_PLL = 15,
+ IC_PLL = 16,
+ MC_PLL = 17,
+ EMMC_PLL = 18,
+ D2D_PLL = 19,
+ CS_PLL = 20,
+ C2C_PLL = 21,
+ NCH_PLL = 22,
+ C2M_PLL = 23,
+ PLL_MAX
+};
+
+enum rl_index {
+ TPC_RL = 0,
+ MME_RL,
+ EDMA_RL,
+};
+
+enum pvt_index {
+ PVT_SW,
+ PVT_SE,
+ PVT_NW,
+ PVT_NE
+};
+
+/* Event Queue Packets */
+
+struct eq_generic_event {
+ __le64 data[7];
+};
+
+/*
+ * CpuCP info
+ */
+
+#define CARD_NAME_MAX_LEN 16
+#define CPUCP_MAX_SENSORS 128
+#define CPUCP_MAX_NICS 128
+#define CPUCP_LANES_PER_NIC 4
+#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN 1024
+#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC)
+#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64)
+#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64)
+#define CPUCP_HBM_ROW_REPLACE_MAX 32
+
+struct cpucp_sensor {
+ __le32 type;
+ __le32 flags;
+};
+
+/**
+ * struct cpucp_card_types - ASIC card type.
+ * @cpucp_card_type_pci: PCI card.
+ * @cpucp_card_type_pmc: PCI Mezzanine Card.
+ */
+enum cpucp_card_types {
+ cpucp_card_type_pci,
+ cpucp_card_type_pmc
+};
+
+#define CPUCP_SEC_CONF_ENABLED_SHIFT 0
+#define CPUCP_SEC_CONF_ENABLED_MASK 0x00000001
+
+#define CPUCP_SEC_CONF_FLASH_WP_SHIFT 1
+#define CPUCP_SEC_CONF_FLASH_WP_MASK 0x00000002
+
+#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT 2
+#define CPUCP_SEC_CONF_EEPROM_WP_MASK 0x00000004
+
+/**
+ * struct cpucp_security_info - Security information.
+ * @config: configuration bit field
+ * @keys_num: number of stored keys
+ * @revoked_keys: revoked keys bit field
+ * @min_svn: minimal security version
+ */
+struct cpucp_security_info {
+ __u8 config;
+ __u8 keys_num;
+ __u8 revoked_keys;
+ __u8 min_svn;
+};
+
+/**
+ * struct cpucp_info - Info from CpuCP that is necessary to the host's driver
+ * @sensors: available sensors description.
+ * @kernel_version: CpuCP linux kernel version.
+ * @reserved: reserved field.
+ * @card_type: card configuration type.
+ * @card_location: in a server, each card has different connections topology
+ * depending on its location (relevant for PMC card type)
+ * @cpld_version: CPLD programmed F/W version.
+ * @infineon_version: Infineon main DC-DC version.
+ * @fuse_version: silicon production FUSE information.
+ * @thermal_version: thermald S/W version.
+ * @cpucp_version: CpuCP S/W version.
+ * @infineon_second_stage_version: Infineon 2nd stage DC-DC version.
+ * @dram_size: available DRAM size.
+ * @card_name: card name that will be displayed in HWMON subsystem on the host
+ * @tpc_binning_mask: TPC binning mask, 1 bit per TPC instance
+ * (0 = functional, 1 = binned)
+ * @decoder_binning_mask: Decoder binning mask, 1 bit per decoder instance
+ * (0 = functional, 1 = binned), maximum 1 per dcore
+ * @sram_binning: Categorize SRAM functionality
+ * (0 = fully functional, 1 = lower-half is not functional,
+ * 2 = upper-half is not functional)
+ * @sec_info: security information
+ * @pll_map: Bit map of supported PLLs for current ASIC version.
+ * @mme_binning_mask: MME binning mask,
+ * bits [0:6] <==> dcore0 mme fma
+ * bits [7:13] <==> dcore1 mme fma
+ * bits [14:20] <==> dcore0 mme ima
+ * bits [21:27] <==> dcore1 mme ima
+ * For each group, if the 6th bit is set then first 5 bits
+ * represent the col's idx [0-31], otherwise these bits are
+ * ignored, and col idx 32 is binned. 7th bit is don't care.
+ * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
+ * (0 = functional 1 = binned)
+ * @memory_repair_flag: eFuse flag indicating memory repair
+ * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance
+ * (0 = functional 1 = binned)
+ * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance
+ * (0 = functional 1 = binned)
+ * @interposer_version: Interposer version programmed in eFuse
+ * @substrate_version: Substrate version programmed in eFuse
+ * @eq_health_check_supported: eq health check feature supported in FW.
+ * @fw_hbm_region_size: Size in bytes of FW reserved region in HBM.
+ * @fw_os_version: Firmware OS Version
+ */
+struct cpucp_info {
+ struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
+ __u8 kernel_version[VERSION_MAX_LEN];
+ __le32 reserved;
+ __le32 card_type;
+ __le32 card_location;
+ __le32 cpld_version;
+ __le32 infineon_version;
+ __u8 fuse_version[VERSION_MAX_LEN];
+ __u8 thermal_version[VERSION_MAX_LEN];
+ __u8 cpucp_version[VERSION_MAX_LEN];
+ __le32 infineon_second_stage_version;
+ __le64 dram_size;
+ char card_name[CARD_NAME_MAX_LEN];
+ __le64 tpc_binning_mask;
+ __le64 decoder_binning_mask;
+ __u8 sram_binning;
+ __u8 dram_binning_mask;
+ __u8 memory_repair_flag;
+ __u8 edma_binning_mask;
+ __u8 xbar_binning_mask;
+ __u8 interposer_version;
+ __u8 substrate_version;
+ __u8 eq_health_check_supported;
+ struct cpucp_security_info sec_info;
+ __le32 fw_hbm_region_size;
+ __u8 pll_map[PLL_MAP_LEN];
+ __le64 mme_binning_mask;
+ __u8 fw_os_version[VERSION_MAX_LEN];
+};
+
+struct cpucp_mac_addr {
+ __u8 mac_addr[ETH_ALEN];
+};
+
+enum cpucp_serdes_type {
+ TYPE_1_SERDES_TYPE,
+ TYPE_2_SERDES_TYPE,
+ HLS1_SERDES_TYPE,
+ HLS1H_SERDES_TYPE,
+ HLS2_SERDES_TYPE,
+ HLS2_TYPE_1_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE, /* number of types */
+ UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */
+};
+
+struct cpucp_nic_info {
+ struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS];
+ __le64 link_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
+ __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le16 serdes_type; /* enum cpucp_serdes_type */
+ __le16 tx_swap_map[CPUCP_MAX_NICS];
+ __u8 reserved[6];
+};
+
+#define PAGE_DISCARD_MAX 64
+
+struct page_discard_info {
+ __u8 num_entries;
+ __u8 reserved[7];
+ __le32 mmu_page_idx[PAGE_DISCARD_MAX];
+};
+
+/*
+ * struct frac_val - fracture value represented by "integer.frac".
+ * @integer: the integer part of the fracture value;
+ * @frac: the fracture part of the fracture value.
+ */
+struct frac_val {
+ union {
+ struct {
+ __le16 integer;
+ __le16 frac;
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
+ * @integer: the integer part of the SER value;
+ * @exp: the exponent part of the SER value.
+ */
+struct ser_val {
+ __le16 integer;
+ __le16 exp;
+};
+
+/*
+ * struct cpucp_nic_status - describes the status of a NIC port.
+ * @port: NIC port index.
+ * @bad_format_cnt: e.g. CRC.
+ * @responder_out_of_sequence_psn_cnt: e.g NAK.
+ * @high_ber_reinit_cnt: link reinit due to high BER.
+ * @correctable_err_cnt: e.g. bit-flip.
+ * @uncorrectable_err_cnt: e.g. MAC errors.
+ * @retraining_cnt: re-training counter.
+ * @up: is port up.
+ * @pcs_link: has PCS link.
+ * @phy_ready: is PHY ready.
+ * @auto_neg: is Autoneg enabled.
+ * @timeout_retransmission_cnt: timeout retransmission events.
+ * @high_ber_cnt: high ber events.
+ * @pre_fec_ser: pre FEC SER value.
+ * @post_fec_ser: post FEC SER value.
+ * @throughput: measured throughput.
+ * @latency: measured latency.
+ */
+struct cpucp_nic_status {
+ __le32 port;
+ __le32 bad_format_cnt;
+ __le32 responder_out_of_sequence_psn_cnt;
+ __le32 high_ber_reinit;
+ __le32 correctable_err_cnt;
+ __le32 uncorrectable_err_cnt;
+ __le32 retraining_cnt;
+ __u8 up;
+ __u8 pcs_link;
+ __u8 phy_ready;
+ __u8 auto_neg;
+ __le32 timeout_retransmission_cnt;
+ __le32 high_ber_cnt;
+ struct ser_val pre_fec_ser;
+ struct ser_val post_fec_ser;
+ struct frac_val bandwidth;
+ struct frac_val lat;
+};
+
+enum cpucp_hbm_row_replace_cause {
+ REPLACE_CAUSE_DOUBLE_ECC_ERR,
+ REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR,
+};
+
+struct cpucp_hbm_row_info {
+ __u8 hbm_idx;
+ __u8 pc;
+ __u8 sid;
+ __u8 bank_idx;
+ __le16 row_addr;
+ __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */
+ __u8 pad;
+};
+
+struct cpucp_hbm_row_replaced_rows_info {
+ __le16 num_replaced_rows;
+ __u8 pad[6];
+ struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
+};
+
+enum cpu_reset_status {
+ CPU_RST_STATUS_NA = 0,
+ CPU_RST_STATUS_SOFT_RST_DONE = 1,
+};
+
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+
+/*
+ * struct cpucp_sec_attest_info - attestation report of the boot
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @nonce: number only used once. random number provided by host. this also
+ * passed to the quote command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pcr_quote: attestation report data structure
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @quote_sig: signature structure of the attestation report
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the attestation signing key
+ */
+struct cpucp_sec_attest_info {
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __le16 pad0;
+ __le32 nonce;
+ __le16 pcr_quote_len;
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig_len;
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+/*
+ * struct cpucp_dev_info_signed - device information signed by a secured device
+ * @info: device information structure as defined above
+ * @nonce: number only used once. random number provided by host. this number is
+ * hashed and signed along with the device information.
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @info_sig: signature of the info + nonce data.
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key info signed info data
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the signing key
+ */
+struct cpucp_dev_info_signed {
+ struct cpucp_info info; /* assumed to be 64bit aligned */
+ __le32 nonce;
+ __le32 pad0;
+ __u8 info_sig_len;
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+#define DCORE_MON_REGS_SZ 512
+/*
+ * struct dcore_monitor_regs_data - DCORE monitor regs data.
+ * the structure follows sync manager block layout. Obsolete.
+ * @mon_pay_addrl: array of payload address low bits.
+ * @mon_pay_addrh: array of payload address high bits.
+ * @mon_pay_data: array of payload data.
+ * @mon_arm: array of monitor arm.
+ * @mon_status: array of monitor status.
+ */
+struct dcore_monitor_regs_data {
+ __le32 mon_pay_addrl[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_addrh[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_data[DCORE_MON_REGS_SZ];
+ __le32 mon_arm[DCORE_MON_REGS_SZ];
+ __le32 mon_status[DCORE_MON_REGS_SZ];
+};
+
+/* contains SM data for each SYNC_MNGR (Obsolete) */
+struct cpucp_monitor_dump {
+ struct dcore_monitor_regs_data sync_mngr_w_s;
+ struct dcore_monitor_regs_data sync_mngr_e_s;
+ struct dcore_monitor_regs_data sync_mngr_w_n;
+ struct dcore_monitor_regs_data sync_mngr_e_n;
+};
+
+/*
+ * The Type of the generic request (and other input arguments) will be fetched from user by reading
+ * from "pkt_subidx" field in struct cpucp_packet.
+ *
+ * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions.
+ */
+enum hl_passthrough_type {
+ HL_PASSTHROUGH_VERSIONS,
+};
+
+#endif /* CPUCP_IF_H */
diff --git a/include/linux/habanalabs/hl_boot_if.h b/include/linux/habanalabs/hl_boot_if.h
new file mode 100644
index 000000000000..93366d5621fd
--- /dev/null
+++ b/include/linux/habanalabs/hl_boot_if.h
@@ -0,0 +1,792 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HL_BOOT_IF_H
+#define HL_BOOT_IF_H
+
+#define LKD_HARD_RESET_MAGIC 0xED7BD694 /* deprecated - do not use */
+#define HL_POWER9_HOST_MAGIC 0x1DA30009
+
+#define BOOT_FIT_SRAM_OFFSET 0x200000
+
+#define VERSION_MAX_LEN 128
+
+enum cpu_boot_err {
+ CPU_BOOT_ERR_DRAM_INIT_FAIL = 0,
+ CPU_BOOT_ERR_FIT_CORRUPTED = 1,
+ CPU_BOOT_ERR_TS_INIT_FAIL = 2,
+ CPU_BOOT_ERR_DRAM_SKIPPED = 3,
+ CPU_BOOT_ERR_BMC_WAIT_SKIPPED = 4,
+ CPU_BOOT_ERR_NIC_DATA_NOT_RDY = 5,
+ CPU_BOOT_ERR_NIC_FW_FAIL = 6,
+ CPU_BOOT_ERR_SECURITY_NOT_RDY = 7,
+ CPU_BOOT_ERR_SECURITY_FAIL = 8,
+ CPU_BOOT_ERR_EFUSE_FAIL = 9,
+ CPU_BOOT_ERR_PRI_IMG_VER_FAIL = 10,
+ CPU_BOOT_ERR_SEC_IMG_VER_FAIL = 11,
+ CPU_BOOT_ERR_PLL_FAIL = 12,
+ CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13,
+ CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18,
+ CPU_BOOT_ERR_BINNING_FAIL = 19,
+ CPU_BOOT_ERR_TPM_FAIL = 20,
+ CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21,
+ CPU_BOOT_ERR_EEPROM_FAIL = 22,
+ CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL = 23,
+ CPU_BOOT_ERR_ENABLED = 31,
+ CPU_BOOT_ERR_SCND_EN = 63,
+ CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * Mask for fatal failures
+ * This mask contains all possible fatal failures, and a dynamic code
+ * will clear the non-relevant ones.
+ */
+#define CPU_BOOT_ERR_FATAL_MASK \
+ ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \
+ (1 << CPU_BOOT_ERR_PLL_FAIL) | \
+ (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \
+ (1 << CPU_BOOT_ERR_BINNING_FAIL) | \
+ (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
+ (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \
+ (1 << CPU_BOOT_ERR_EEPROM_FAIL))
+
+/*
+ * CPU error bits in BOOT_ERROR registers
+ *
+ * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed.
+ * DRAM is not reliable to use.
+ *
+ * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the
+ * image provided by the host has failed.
+ *
+ * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed.
+ * Boot continues as usual, but keep in
+ * mind this is a warning.
+ *
+ * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped.
+ * Skipping DRAM initialization has been
+ * requested (e.g. strap, command, etc.)
+ * and FW skipped the DRAM initialization.
+ * Host can initialize the DRAM.
+ *
+ * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped.
+ * Meaning the BMC data might not be
+ * available until reset.
+ *
+ * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready.
+ * BMC has not provided the NIC data yet.
+ * Once provided this bit will be cleared.
+ *
+ * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed.
+ * The NIC FW loading and initialization
+ * failed. This means NICs are not usable.
+ *
+ * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been
+ * started, but is not ready yet - chip
+ * cannot be accessed.
+ *
+ * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed.
+ * The tasks are security init (root of
+ * trust), boot authentication (chain of
+ * trust), data packets authentication.
+ *
+ * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed.
+ * The PCI device ID might be wrong.
+ *
+ * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL Verification of primary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot primary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL Verification of secondary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot secondary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one
+ * of the PLLs remains in REF_CLK
+ *
+ * CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
+ * should be contacted.
+ *
+ * CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR Critical error was detected during
+ * the execution of ppboot or preboot.
+ * for example: stack overflow.
+ *
+ * CPU_BOOT_ERR0_BINNING_FAIL Binning settings failed, meaning
+ * malfunctioning components might still be
+ * in use.
+ *
+ * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed.
+ *
+ * CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL Failed to set threshold for tmperature
+ * sensor.
+ *
+ * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults
+ * are used.
+ *
+ * CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL Failed scrubbing the Engines/ARCFarm
+ * memories. Boot disabled until reset.
+ *
+ * CPU_BOOT_ERR0_ENABLED Error registers enabled.
+ * This is a main indication that the
+ * running FW populates the error
+ * registers. Meaning the error bits are
+ * not garbage, but actual error statuses.
+ */
+#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << CPU_BOOT_ERR_DRAM_INIT_FAIL)
+#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << CPU_BOOT_ERR_FIT_CORRUPTED)
+#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << CPU_BOOT_ERR_TS_INIT_FAIL)
+#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << CPU_BOOT_ERR_DRAM_SKIPPED)
+#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << CPU_BOOT_ERR_BMC_WAIT_SKIPPED)
+#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << CPU_BOOT_ERR_NIC_DATA_NOT_RDY)
+#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << CPU_BOOT_ERR_NIC_FW_FAIL)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << CPU_BOOT_ERR_SECURITY_NOT_RDY)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << CPU_BOOT_ERR_SECURITY_FAIL)
+#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << CPU_BOOT_ERR_EFUSE_FAIL)
+#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << CPU_BOOT_ERR_PRI_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << CPU_BOOT_ERR_SEC_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_PLL_FAIL (1 << CPU_BOOT_ERR_PLL_FAIL)
+#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL)
+#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR)
+#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
+#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
+#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL)
+#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL)
+#define CPU_BOOT_ERR0_ENG_ARC_MEM_SCRUB_FAIL (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL)
+#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+
+enum cpu_boot_dev_sts {
+ CPU_BOOT_DEV_STS_SECURITY_EN = 0,
+ CPU_BOOT_DEV_STS_DEBUG_EN = 1,
+ CPU_BOOT_DEV_STS_WATCHDOG_EN = 2,
+ CPU_BOOT_DEV_STS_DRAM_INIT_EN = 3,
+ CPU_BOOT_DEV_STS_BMC_WAIT_EN = 4,
+ CPU_BOOT_DEV_STS_E2E_CRED_EN = 5,
+ CPU_BOOT_DEV_STS_HBM_CRED_EN = 6,
+ CPU_BOOT_DEV_STS_RL_EN = 7,
+ CPU_BOOT_DEV_STS_SRAM_SCR_EN = 8,
+ CPU_BOOT_DEV_STS_DRAM_SCR_EN = 9,
+ CPU_BOOT_DEV_STS_FW_HARD_RST_EN = 10,
+ CPU_BOOT_DEV_STS_PLL_INFO_EN = 11,
+ CPU_BOOT_DEV_STS_SP_SRAM_EN = 12,
+ CPU_BOOT_DEV_STS_CLK_GATE_EN = 13,
+ CPU_BOOT_DEV_STS_HBM_ECC_EN = 14,
+ CPU_BOOT_DEV_STS_PKT_PI_ACK_EN = 15,
+ CPU_BOOT_DEV_STS_FW_LD_COM_EN = 16,
+ CPU_BOOT_DEV_STS_FW_IATU_CONF_EN = 17,
+ CPU_BOOT_DEV_STS_FW_NIC_MAC_EN = 18,
+ CPU_BOOT_DEV_STS_DYN_PLL_EN = 19,
+ CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN = 20,
+ CPU_BOOT_DEV_STS_EQ_INDEX_EN = 21,
+ CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN = 22,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN = 23,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24,
+ CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25,
+ CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26,
+ CPU_BOOT_DEV_STS_ENABLED = 31,
+ CPU_BOOT_DEV_STS_SCND_EN = 63,
+ CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers
+ *
+ * CPU_BOOT_DEV_STS0_SECURITY_EN Security is Enabled.
+ * This is an indication for security
+ * enabled in FW, which means that
+ * all conditions for security are met:
+ * device is indicated as security enabled,
+ * registers are protected, and device
+ * uses keys for image verification.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled.
+ * Enabled when JTAG or DEBUG is enabled
+ * in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled.
+ * Watchdog is enabled in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled.
+ * DRAM initialization has been done in FW.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled.
+ * If set, it means that during boot,
+ * FW waited for BMC data.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized.
+ * FW initialized E2E credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized.
+ * FW initialized HBM credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized.
+ * FW initialized rate limiter.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled.
+ * FW initialized SRAM scrambler.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled.
+ * FW initialized DRAM scrambler.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN FW hard reset procedure is enabled.
+ * FW has the hard reset procedure
+ * implemented. This means that FW will
+ * perform hard reset procedure on
+ * receiving the halt-machine event.
+ * Initialized in: preboot, u-boot, linux
+ *
+ * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available
+ * for use.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled.
+ * FW handles HBM ECC indications.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd
+ * is set to the PI counter.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_LD_COM_EN Flexible FW loading communication
+ * protocol is enabled.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN FW iATU configuration is enabled.
+ * This bit if set, means the iATU has been
+ * configured and is ready for use.
+ * Initialized in: ppboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and
+ * any access to them is done via the FW.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled.
+ * FW sends to host a bitmap of supported
+ * PLLs.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN GIC access permission only from
+ * previleged entity. FW sets this status
+ * bit for host. If this bit is set then
+ * GIC can not be accessed from host.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_EQ_INDEX_EN Event Queue (EQ) index is a running
+ * index for each new event sent to host.
+ * This is used as a method in host to
+ * identify that the waiting event in
+ * queue is actually a new event which
+ * was not served before.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
+ * prevent IRQs overriding each other.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN
+ * NIC STAT and XPCS91 access is restricted
+ * and is done via FW only.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN
+ * NIC STAT get all is supported.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN
+ * F/W checks if the device is idle by reading defined set
+ * of registers. It returns a bitmask of all the engines,
+ * where a bit is set if the engine is not idle.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MAP_HWMON_EN
+ * If set, means f/w supports proprietary
+ * HWMON enum mapping to cpucp enums.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
+ * This is a main indication that the
+ * running FW populates the device status
+ * register. Meaning the device status
+ * bits are not garbage, but actual
+ * statuses.
+ * Initialized in: preboot
+ *
+ */
+#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << CPU_BOOT_DEV_STS_SECURITY_EN)
+#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << CPU_BOOT_DEV_STS_DEBUG_EN)
+#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << CPU_BOOT_DEV_STS_WATCHDOG_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << CPU_BOOT_DEV_STS_DRAM_INIT_EN)
+#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << CPU_BOOT_DEV_STS_BMC_WAIT_EN)
+#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << CPU_BOOT_DEV_STS_E2E_CRED_EN)
+#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << CPU_BOOT_DEV_STS_HBM_CRED_EN)
+#define CPU_BOOT_DEV_STS0_RL_EN (1 << CPU_BOOT_DEV_STS_RL_EN)
+#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_SRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_DRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << CPU_BOOT_DEV_STS_FW_HARD_RST_EN)
+#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << CPU_BOOT_DEV_STS_PLL_INFO_EN)
+#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << CPU_BOOT_DEV_STS_SP_SRAM_EN)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << CPU_BOOT_DEV_STS_CLK_GATE_EN)
+#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << CPU_BOOT_DEV_STS_HBM_ECC_EN)
+#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << CPU_BOOT_DEV_STS_PKT_PI_ACK_EN)
+#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << CPU_BOOT_DEV_STS_FW_LD_COM_EN)
+#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << CPU_BOOT_DEV_STS_FW_IATU_CONF_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_MAC_EN)
+#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << CPU_BOOT_DEV_STS_DYN_PLL_EN)
+#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN)
+#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << CPU_BOOT_DEV_STS_EQ_INDEX_EN)
+#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN)
+#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN)
+#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+
+enum cpu_boot_status {
+ CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
+ CPU_BOOT_STATUS_IN_WFE = 1,
+ CPU_BOOT_STATUS_DRAM_RDY = 2,
+ CPU_BOOT_STATUS_SRAM_AVAIL = 3,
+ CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */
+ CPU_BOOT_STATUS_IN_PREBOOT = 5,
+ CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */
+ CPU_BOOT_STATUS_IN_UBOOT = 7,
+ CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */
+ /* U-Boot console prompt activated, commands are not processed */
+ CPU_BOOT_STATUS_UBOOT_NOT_READY = 10,
+ /* Finished NICs init, reported after DRAM and NICs */
+ CPU_BOOT_STATUS_NIC_FW_RDY = 11,
+ CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
+ /* Last boot loader progress status, ready to receive commands */
+ CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ /* Internal Boot finished, ready for boot-fit */
+ CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
+ /* Internal Security has been initialized, device can be accessed */
+ CPU_BOOT_STATUS_SECURITY_READY = 17,
+ /* FW component is preparing to shutdown and communication with host is not available */
+ CPU_BOOT_STATUS_FW_SHUTDOWN_PREP = 18,
+};
+
+enum kmd_msg {
+ KMD_MSG_NA = 0,
+ KMD_MSG_GOTO_WFE,
+ KMD_MSG_FIT_RDY,
+ KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
+ KMD_MSG_LAST
+};
+
+enum cpu_msg_status {
+ CPU_MSG_CLR = 0,
+ CPU_MSG_OK,
+ CPU_MSG_ERR,
+};
+
+/* communication registers mapping - consider ABI when changing */
+struct cpu_dyn_regs {
+ __le32 cpu_pq_base_addr_low;
+ __le32 cpu_pq_base_addr_high;
+ __le32 cpu_pq_length;
+ __le32 cpu_pq_init_status;
+ __le32 cpu_eq_base_addr_low;
+ __le32 cpu_eq_base_addr_high;
+ __le32 cpu_eq_length;
+ __le32 cpu_eq_ci;
+ __le32 cpu_cq_base_addr_low;
+ __le32 cpu_cq_base_addr_high;
+ __le32 cpu_cq_length;
+ __le32 cpu_pf_pq_pi;
+ __le32 cpu_boot_dev_sts0;
+ __le32 cpu_boot_dev_sts1;
+ __le32 cpu_boot_err0;
+ __le32 cpu_boot_err1;
+ __le32 cpu_boot_status;
+ __le32 fw_upd_sts;
+ __le32 fw_upd_cmd;
+ __le32 fw_upd_pending_sts;
+ __le32 fuse_ver_offset;
+ __le32 preboot_ver_offset;
+ __le32 uboot_ver_offset;
+ __le32 hw_state;
+ __le32 kmd_msg_to_cpu;
+ __le32 cpu_cmd_status_to_host;
+ __le32 gic_host_pi_upd_irq;
+ __le32 gic_tpc_qm_irq_ctrl;
+ __le32 gic_mme_qm_irq_ctrl;
+ __le32 gic_dma_qm_irq_ctrl;
+ __le32 gic_nic_qm_irq_ctrl;
+ __le32 gic_dma_core_irq_ctrl;
+ __le32 gic_host_halt_irq;
+ __le32 gic_host_ints_irq;
+ __le32 gic_host_soft_rst_irq;
+ __le32 gic_rot_qm_irq_ctrl;
+ __le32 cpu_rst_status;
+ __le32 eng_arc_irq_ctrl;
+ __le32 reserved1[20]; /* reserve for future use */
+};
+
+/* TODO: remove the desc magic after the code is updated to use message */
+/* HCDM - Habana Communications Descriptor Magic */
+#define HL_COMMS_DESC_MAGIC 0x4843444D
+#define HL_COMMS_DESC_VER 3
+
+/* HCMv - Habana Communications Message + header version */
+#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00
+#define HL_COMMS_MSG_MAGIC_MASK 0xFFFFFF00
+#define HL_COMMS_MSG_MAGIC_VER_MASK 0xFF
+
+#define HL_COMMS_MSG_MAGIC_VER(ver) (HL_COMMS_MSG_MAGIC_VALUE | \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC
+#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1)
+#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2)
+#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3)
+
+#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \
+ (((magic) & HL_COMMS_MSG_MAGIC_MASK) == \
+ HL_COMMS_MSG_MAGIC_VALUE)
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_VERSION(magic, ver) \
+ (((magic) & HL_COMMS_MSG_MAGIC_VER_MASK) >= \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE(magic, ver) \
+ (HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC((magic)) && \
+ HL_COMMS_MSG_MAGIC_VALIDATE_VERSION((magic), (ver)))
+
+enum comms_msg_type {
+ HL_COMMS_DESC_TYPE = 0,
+ HL_COMMS_RESET_CAUSE_TYPE = 1,
+ HL_COMMS_FW_CFG_SKIP_TYPE = 2,
+ HL_COMMS_BINNING_CONF_TYPE = 3,
+};
+
+/*
+ * Binning information shared between LKD and FW
+ * @tpc_mask_l - TPC binning information lower 64 bit
+ * @dec_mask - Decoder binning information
+ * @dram_mask - DRAM binning information
+ * @edma_mask - EDMA binning information
+ * @mme_mask_l - MME binning information lower 32
+ * @mme_mask_h - MME binning information upper 32
+ * @rot_mask - Rotator binning information
+ * @xbar_mask - xBAR binning information
+ * @reserved - reserved field for future binning info w/o ABI change
+ * @tpc_mask_h - TPC binning information upper 64 bit
+ * @nic_mask - NIC binning information
+ */
+struct lkd_fw_binning_info {
+ __le64 tpc_mask_l;
+ __le32 dec_mask;
+ __le32 dram_mask;
+ __le32 edma_mask;
+ __le32 mme_mask_l;
+ __le32 mme_mask_h;
+ __le32 rot_mask;
+ __le32 xbar_mask;
+ __le32 reserved0;
+ __le64 tpc_mask_h;
+ __le64 nic_mask;
+ __le32 reserved1[8];
+};
+
+/* TODO: remove this struct after the code is updated to use message */
+/* this is the comms descriptor header - meta data */
+struct comms_desc_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the descriptor w/o header */
+ __le16 size; /* size of the descriptor w/o header */
+ __u8 version; /* descriptor version */
+ __u8 reserved[5]; /* pad to 64 bit */
+};
+
+/* this is the comms message header - meta data */
+struct comms_msg_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the message w/o header */
+ __le16 size; /* size of the message w/o header */
+ __u8 version; /* message payload version */
+ __u8 type; /* message type */
+ __u8 reserved[4]; /* pad to 64 bit */
+};
+
+enum lkd_fw_ascii_msg_lvls {
+ LKD_FW_ASCII_MSG_ERR = 0,
+ LKD_FW_ASCII_MSG_WRN = 1,
+ LKD_FW_ASCII_MSG_INF = 2,
+ LKD_FW_ASCII_MSG_DBG = 3,
+};
+
+#define LKD_FW_ASCII_MSG_MAX_LEN 128
+#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
+
+struct lkd_fw_ascii_msg {
+ __u8 valid;
+ __u8 msg_lvl;
+ __u8 reserved[6];
+ char msg[LKD_FW_ASCII_MSG_MAX_LEN];
+};
+
+/* this is the main FW descriptor - consider ABI when changing */
+struct lkd_fw_comms_desc {
+ struct comms_desc_header header;
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ __le64 img_addr; /* address for next FW component load */
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ __le32 rsvd_mem_size_mb; /* reserved memory size [MB] for FW/SVE */
+ char reserved1[4];
+};
+
+enum comms_reset_cause {
+ HL_RESET_CAUSE_UNKNOWN = 0,
+ HL_RESET_CAUSE_HEARTBEAT = 1,
+ HL_RESET_CAUSE_TDR = 2,
+};
+
+/* TODO: remove define after struct name is aligned on all projects */
+#define lkd_msg_comms lkd_fw_comms_msg
+
+/* this is the comms message descriptor */
+struct lkd_fw_comms_msg {
+ struct comms_msg_header header;
+ /* union for future expantions of new messages */
+ union {
+ struct {
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ /* address for next FW component load */
+ __le64 img_addr;
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ /* reserved memory size [MB] for FW/SVE */
+ __le32 rsvd_mem_size_mb;
+ char reserved1[4];
+ };
+ struct {
+ __u8 reset_cause;
+ };
+ struct {
+ __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
+ };
+ struct lkd_fw_binning_info binning_conf;
+ };
+};
+
+/*
+ * LKD commands:
+ *
+ * COMMS_NOOP Used to clear the command register and no actual
+ * command is send.
+ *
+ * COMMS_CLR_STS Clear status command - FW should clear the
+ * status register. Used for synchronization
+ * between the commands as part of the race free
+ * protocol.
+ *
+ * COMMS_RST_STATE Reset the current communication state which is
+ * kept by FW for proper responses.
+ * Should be used in the beginning of the
+ * communication cycle to clean any leftovers from
+ * previous communication attempts.
+ *
+ * COMMS_PREP_DESC Prepare descriptor for setting up the
+ * communication and other dynamic data:
+ * struct lkd_fw_comms_desc.
+ * This command has a parameter stating the next FW
+ * component size, so the FW can actually prepare a
+ * space for it and in the status response provide
+ * the descriptor offset. The Offset of the next FW
+ * data component is a part of the descriptor
+ * structure.
+ *
+ * COMMS_DATA_RDY The FW data has been uploaded and is ready for
+ * validation.
+ *
+ * COMMS_EXEC Execute the next FW component.
+ *
+ * COMMS_RST_DEV Reset the device.
+ *
+ * COMMS_GOTO_WFE Execute WFE command. Allowed only on non-secure
+ * devices.
+ *
+ * COMMS_SKIP_BMC Perform actions required for BMC-less servers.
+ * Do not wait for BMC response.
+ *
+ * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
+ * space is allocated in a ELBI access only
+ * address range.
+ *
+ */
+enum comms_cmd {
+ COMMS_NOOP = 0,
+ COMMS_CLR_STS = 1,
+ COMMS_RST_STATE = 2,
+ COMMS_PREP_DESC = 3,
+ COMMS_DATA_RDY = 4,
+ COMMS_EXEC = 5,
+ COMMS_RST_DEV = 6,
+ COMMS_GOTO_WFE = 7,
+ COMMS_SKIP_BMC = 8,
+ COMMS_PREP_DESC_ELBI = 10,
+ COMMS_INVLD_LAST
+};
+
+#define COMMS_COMMAND_SIZE_SHIFT 0
+#define COMMS_COMMAND_SIZE_MASK 0x1FFFFFF
+#define COMMS_COMMAND_CMD_SHIFT 27
+#define COMMS_COMMAND_CMD_MASK 0xF8000000
+
+/*
+ * LKD command to FW register structure
+ * @size - FW component size
+ * @cmd - command from enum comms_cmd
+ */
+struct comms_command {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 size :25; /* 32MB max. */
+ u32 reserved :2;
+ enum comms_cmd cmd :5; /* 32 commands */
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * FW status
+ *
+ * COMMS_STS_NOOP Used to clear the status register and no actual
+ * status is provided.
+ *
+ * COMMS_STS_ACK Command has been received and recognized.
+ *
+ * COMMS_STS_OK Command execution has finished successfully.
+ *
+ * COMMS_STS_ERR Command execution was unsuccessful and resulted
+ * in error.
+ *
+ * COMMS_STS_VALID_ERR FW validation has failed.
+ *
+ * COMMS_STS_TIMEOUT_ERR Command execution has timed out.
+ */
+enum comms_sts {
+ COMMS_STS_NOOP = 0,
+ COMMS_STS_ACK = 1,
+ COMMS_STS_OK = 2,
+ COMMS_STS_ERR = 3,
+ COMMS_STS_VALID_ERR = 4,
+ COMMS_STS_TIMEOUT_ERR = 5,
+ COMMS_STS_INVLD_LAST
+};
+
+/* RAM types for FW components loading - defines the base address */
+enum comms_ram_types {
+ COMMS_SRAM = 0,
+ COMMS_DRAM = 1,
+};
+
+#define COMMS_STATUS_OFFSET_SHIFT 0
+#define COMMS_STATUS_OFFSET_MASK 0x03FFFFFF
+#define COMMS_STATUS_OFFSET_ALIGN_SHIFT 2
+#define COMMS_STATUS_RAM_TYPE_SHIFT 26
+#define COMMS_STATUS_RAM_TYPE_MASK 0x0C000000
+#define COMMS_STATUS_STATUS_SHIFT 28
+#define COMMS_STATUS_STATUS_MASK 0xF0000000
+
+/*
+ * FW status to LKD register structure
+ * @offset - an offset from the base of the ram_type shifted right by
+ * 2 bits (always aligned to 32 bits).
+ * Allows a maximum addressable offset of 256MB from RAM base.
+ * Example: for real offset in RAM of 0x800000 (8MB), the value
+ * in offset field is (0x800000 >> 2) = 0x200000.
+ * @ram_type - the RAM type that should be used for offset from
+ * enum comms_ram_types
+ * @status - status from enum comms_sts
+ */
+struct comms_status {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 offset :26;
+ enum comms_ram_types ram_type :2;
+ enum comms_sts status :4; /* 16 statuses */
+ };
+ __le32 val;
+ };
+};
+
+#define NAME_MAX_LEN 32 /* bytes */
+struct hl_module_data {
+ __u8 name[NAME_MAX_LEN];
+ __u8 version[VERSION_MAX_LEN];
+};
+
+/**
+ * struct hl_component_versions - versions associated with hl component.
+ * @struct_size: size of all the struct (including dynamic size of modules).
+ * @modules_offset: offset of the modules field in this struct.
+ * @component: version of the component itself.
+ * @fw_os: Firmware OS Version.
+ * @comp_name: Name of the component.
+ * @modules_counter: number of set bits in modules_mask.
+ * @reserved: reserved for future use.
+ * @modules: versions of the component's modules. Elborated explanation in
+ * struct cpucp_versions.
+ */
+struct hl_component_versions {
+ __le16 struct_size;
+ __le16 modules_offset;
+ __u8 component[VERSION_MAX_LEN];
+ __u8 fw_os[VERSION_MAX_LEN];
+ __u8 comp_name[NAME_MAX_LEN];
+ __u8 modules_counter;
+ __u8 reserved[3];
+ struct hl_module_data modules[];
+};
+
+/* Max size of fit size */
+#define HL_FW_VERSIONS_FIT_SIZE 4096
+
+#endif /* HL_BOOT_IF_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 754f67ac4326..d57cab4d4c06 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -6,6 +6,7 @@
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
+#include <linux/sched.h>
#include <linux/vtime.h>
#include <asm/hardirq.h>
@@ -32,9 +33,9 @@ static __always_inline void rcu_irq_enter_check_tick(void)
*/
#define __irq_enter() \
do { \
- account_irq_enter_time(current); \
preempt_count_add(HARDIRQ_OFFSET); \
lockdep_hardirq_enter(); \
+ account_hardirq_enter(current); \
} while (0)
/*
@@ -62,8 +63,8 @@ void irq_enter_rcu(void);
*/
#define __irq_exit() \
do { \
+ account_hardirq_exit(current); \
lockdep_hardirq_exit(); \
- account_irq_exit_time(current); \
preempt_count_sub(HARDIRQ_OFFSET); \
} while (0)
@@ -91,14 +92,6 @@ void irq_exit_rcu(void);
#define arch_nmi_exit() do { } while (0)
#endif
-#ifdef CONFIG_TINY_RCU
-static inline void rcu_nmi_enter(void) { }
-static inline void rcu_nmi_exit(void) { }
-#else
-extern void rcu_nmi_enter(void);
-extern void rcu_nmi_exit(void);
-#endif
-
/*
* NMI vs Tracing
* --------------
@@ -115,7 +108,6 @@ extern void rcu_nmi_exit(void);
do { \
lockdep_off(); \
arch_nmi_enter(); \
- printk_nmi_enter(); \
BUG_ON(in_nmi() == NMI_MASK); \
__preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
} while (0)
@@ -124,7 +116,7 @@ extern void rcu_nmi_exit(void);
do { \
__nmi_enter(); \
lockdep_hardirq_enter(); \
- rcu_nmi_enter(); \
+ ct_nmi_enter(); \
instrumentation_begin(); \
ftrace_nmi_enter(); \
instrumentation_end(); \
@@ -134,7 +126,6 @@ extern void rcu_nmi_exit(void);
do { \
BUG_ON(!in_nmi()); \
__preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- printk_nmi_exit(); \
arch_nmi_exit(); \
lockdep_on(); \
} while (0)
@@ -144,7 +135,7 @@ extern void rcu_nmi_exit(void);
instrumentation_begin(); \
ftrace_nmi_exit(); \
instrumentation_end(); \
- rcu_nmi_exit(); \
+ ct_nmi_exit(); \
lockdep_hardirq_exit(); \
__nmi_exit(); \
} while (0)
diff --git a/include/linux/hash.h b/include/linux/hash.h
index ad6fa21d977b..38edaa08f862 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -62,10 +62,7 @@ static inline u32 __hash_32_generic(u32 val)
return val * GOLDEN_RATIO_32;
}
-#ifndef HAVE_ARCH_HASH_32
-#define hash_32 hash_32_generic
-#endif
-static inline u32 hash_32_generic(u32 val, unsigned int bits)
+static inline u32 hash_32(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
diff --git a/include/linux/hashtable_api.h b/include/linux/hashtable_api.h
new file mode 100644
index 000000000000..c268ac2c5c0e
--- /dev/null
+++ b/include/linux/hashtable_api.h
@@ -0,0 +1 @@
+#include <linux/hashtable.h>
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index cacc4dd27794..630a388035f1 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -22,7 +22,7 @@ struct hdlc_proto {
void (*start)(struct net_device *dev); /* if open & DCD */
void (*stop)(struct net_device *dev); /* if open & !DCD */
void (*detach)(struct net_device *dev);
- int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
+ int (*ioctl)(struct net_device *dev, struct if_settings *ifs);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
int (*netif_rx)(struct sk_buff *skb);
netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
@@ -54,7 +54,7 @@ typedef struct hdlc_device {
/* Exported from hdlc module */
/* Called by hardware driver when a user requests HDLC service */
-int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs);
/* Must be used by hardware driver on module startup/exit */
#define register_hdlc_device(dev) register_netdev(dev)
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h
index d4d633a49d36..5d70c3f98f5b 100644
--- a/include/linux/hdlcdrv.h
+++ b/include/linux/hdlcdrv.h
@@ -79,7 +79,7 @@ struct hdlcdrv_ops {
*/
int (*open)(struct net_device *);
int (*close)(struct net_device *);
- int (*ioctl)(struct net_device *, struct ifreq *,
+ int (*ioctl)(struct net_device *, void __user *,
struct hdlcdrv_ioctl *, int);
};
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 9850d59d6f1c..3bb87bf6bc65 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -156,7 +156,7 @@ enum hdmi_content_type {
};
enum hdmi_metadata_type {
- HDMI_STATIC_METADATA_TYPE1 = 1,
+ HDMI_STATIC_METADATA_TYPE1 = 0,
};
enum hdmi_eotf {
@@ -170,19 +170,19 @@ struct hdmi_avi_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
unsigned char length;
+ bool itc;
+ unsigned char pixel_repeat;
enum hdmi_colorspace colorspace;
enum hdmi_scan_mode scan_mode;
enum hdmi_colorimetry colorimetry;
enum hdmi_picture_aspect picture_aspect;
enum hdmi_active_aspect active_aspect;
- bool itc;
enum hdmi_extended_colorimetry extended_colorimetry;
enum hdmi_quantization_range quantization_range;
enum hdmi_nups nups;
unsigned char video_code;
enum hdmi_ycc_quantization_range ycc_quantization_range;
enum hdmi_content_type content_type;
- unsigned char pixel_repeat;
unsigned short top_bar;
unsigned short bottom_bar;
unsigned short left_bar;
@@ -336,7 +336,12 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
-int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame);
+
+struct dp_sdp;
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
diff --git a/include/linux/hex.h b/include/linux/hex.h
new file mode 100644
index 000000000000..2618382e5b0c
--- /dev/null
+++ b/include/linux/hex.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HEX_H
+#define _LINUX_HEX_H
+
+#include <linux/types.h>
+
+extern const char hex_asc[];
+#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
+#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_hi(byte);
+ *buf++ = hex_asc_lo(byte);
+ return buf;
+}
+
+extern const char hex_asc_upper[];
+#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
+#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack_upper(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_upper_hi(byte);
+ *buf++ = hex_asc_upper_lo(byte);
+ return buf;
+}
+
+extern int hex_to_bin(unsigned char ch);
+extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
+extern char *bin2hex(char *dst, const void *src, size_t count);
+
+bool mac_pton(const char *s, u8 *mac);
+
+#endif
diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h
index 3214fb0815fc..753654fff07f 100644
--- a/include/linux/hid-roccat.h
+++ b/include/linux/hid-roccat.h
@@ -16,7 +16,7 @@
#ifdef __KERNEL__
-int roccat_connect(struct class *klass, struct hid_device *hid,
+int roccat_connect(const struct class *klass, struct hid_device *hid,
int report_size);
void roccat_disconnect(int minor);
int roccat_report_event(int minor, u8 const *data);
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 46bcef380446..c27329e2a5ad 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -150,7 +150,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
* @info: return information about attribute after parsing report
*
* Parses report and returns the attribute information such as report id,
-* field index, units and exponet etc.
+* field index, units and exponent etc.
*/
int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
u8 type,
@@ -167,7 +167,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
-* Returns data upto 32 bits.
+* Return: data up to 32 bits.
*/
enum sensor_hub_read_flags {
@@ -205,8 +205,9 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
* @buffer: buffer to copy output
*
* Used to get a field in feature report. For example this can get polling
-* interval, sensitivity, activate/deactivate state. On success it returns
-* number of bytes copied to buffer. On failure, it returns value < 0.
+* interval, sensitivity, activate/deactivate state.
+* Return: On success, it returns the number of bytes copied to buffer.
+* On failure, it returns value < 0.
*/
int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
u32 field_index, int buffer_size, void *buffer);
@@ -230,6 +231,7 @@ struct hid_sensor_common {
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
struct hid_sensor_hub_attribute_info sensitivity;
+ struct hid_sensor_hub_attribute_info sensitivity_rel;
struct hid_sensor_hub_attribute_info report_latency;
struct work_struct work;
};
@@ -247,11 +249,17 @@ static inline int hid_sensor_convert_exponent(int unit_expo)
int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
- struct hid_sensor_common *st);
+ struct hid_sensor_common *st,
+ const u32 *sensitivity_addresses,
+ u32 sensitivity_addresses_len);
int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
int val1, int val2);
+int hid_sensor_write_raw_hyst_rel_value(struct hid_sensor_common *st, int val1,
+ int val2);
int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
int *val1, int *val2);
+int hid_sensor_read_raw_hyst_rel_value(struct hid_sensor_common *st,
+ int *val1, int *val2);
int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int val1, int val2);
int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index d82a97e311d3..6730ee900ee1 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -21,6 +21,10 @@
#define HID_USAGE_SENSOR_ALS 0x200041
#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
+#define HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE 0x2004d2
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY 0x2004d3
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X 0x2004d4
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y 0x2004d5
/* PROX (200011) */
#define HID_USAGE_SENSOR_PROX 0x200011
@@ -128,6 +132,11 @@
#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15
/* Common selectors */
+#define HID_USAGE_SENSOR_PROP_DESC 0x200300
+#define HID_USAGE_SENSOR_PROP_FRIENDLY_NAME 0x200301
+#define HID_USAGE_SENSOR_PROP_SERIAL_NUM 0x200307
+#define HID_USAGE_SENSOR_PROP_MANUFACTURER 0x200305
+#define HID_USAGE_SENSOR_PROP_MODEL 0x200306
#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310
@@ -145,6 +154,7 @@
/* Per data field properties */
#define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00
#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000
+#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_REL_PCT 0xE000
/* Power state enumerations */
#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850
@@ -158,4 +168,14 @@
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841
+/* Custom Sensor (2000e1) */
+#define HID_USAGE_SENSOR_HINGE 0x20020B
+#define HID_USAGE_SENSOR_DATA_FIELD_LOCATION 0x200400
+#define HID_USAGE_SENSOR_DATA_FIELE_TIME_SINCE_SYS_BOOT 0x20052B
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_USAGE 0x200541
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE 0x200543
+/* Custom Sensor data 28=>x>=0 */
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(x) \
+ (HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE + (x))
+
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index c7044a14200e..b12cb1c8e682 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <uapi/linux/hid.h>
+#include <linux/hid_bpf.h>
/*
* We parse each description item into this structure. Short items data
@@ -102,6 +103,7 @@ struct hid_item {
#define HID_COLLECTION_PHYSICAL 0
#define HID_COLLECTION_APPLICATION 1
#define HID_COLLECTION_LOGICAL 2
+#define HID_COLLECTION_NAMED_ARRAY 4
/*
* HID report descriptor global item tags
@@ -153,6 +155,8 @@ struct hid_item {
#define HID_UP_CONSUMER 0x000c0000
#define HID_UP_DIGITIZER 0x000d0000
#define HID_UP_PID 0x000f0000
+#define HID_UP_BATTERY 0x00850000
+#define HID_UP_CAMERA 0x00900000
#define HID_UP_HPVENDOR 0xff7f0000
#define HID_UP_HPVENDOR2 0xff010000
#define HID_UP_MSVENDOR 0xff000000
@@ -163,6 +167,7 @@ struct hid_item {
#define HID_UP_LNVENDOR 0xffa00000
#define HID_UP_SENSOR 0x00200000
#define HID_UP_ASUSVENDOR 0xff310000
+#define HID_UP_GOOGLEVENDOR 0xffd10000
#define HID_USAGE 0x0000ffff
@@ -238,6 +243,7 @@ struct hid_item {
#define HID_DG_TOUCH 0x000d0033
#define HID_DG_UNTOUCH 0x000d0034
#define HID_DG_TAP 0x000d0035
+#define HID_DG_TRANSDUCER_INDEX 0x000d0038
#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
#define HID_DG_BATTERYSTRENGTH 0x000d003b
@@ -250,6 +256,15 @@ struct hid_item {
#define HID_DG_BARRELSWITCH 0x000d0044
#define HID_DG_ERASER 0x000d0045
#define HID_DG_TABLETPICK 0x000d0046
+#define HID_DG_PEN_COLOR 0x000d005c
+#define HID_DG_PEN_LINE_WIDTH 0x000d005e
+#define HID_DG_PEN_LINE_STYLE 0x000d0070
+#define HID_DG_PEN_LINE_STYLE_INK 0x000d0072
+#define HID_DG_PEN_LINE_STYLE_PENCIL 0x000d0073
+#define HID_DG_PEN_LINE_STYLE_HIGHLIGHTER 0x000d0074
+#define HID_DG_PEN_LINE_STYLE_CHISEL_MARKER 0x000d0075
+#define HID_DG_PEN_LINE_STYLE_BRUSH 0x000d0076
+#define HID_DG_PEN_LINE_STYLE_NO_PREFERENCE 0x000d0077
#define HID_CP_CONSUMERCONTROL 0x000c0001
#define HID_CP_NUMERICKEYPAD 0x000c0002
@@ -261,6 +276,8 @@ struct hid_item {
#define HID_CP_SELECTION 0x000c0080
#define HID_CP_MEDIASELECTION 0x000c0087
#define HID_CP_SELECTDISC 0x000c00ba
+#define HID_CP_VOLUMEUP 0x000c00e9
+#define HID_CP_VOLUMEDOWN 0x000c00ea
#define HID_CP_PLAYBACKSPEED 0x000c00f1
#define HID_CP_PROXIMITY 0x000c0109
#define HID_CP_SPEAKERSYSTEM 0x000c0160
@@ -296,16 +313,10 @@ struct hid_item {
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
#define HID_DG_LATENCYMODE 0x000d0060
-#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
-/*
- * HID report types --- Ouch! HID spec says 1 2 3!
- */
+#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065
+#define HID_BAT_CHARGING 0x00850044
-#define HID_INPUT_REPORT 0
-#define HID_OUTPUT_REPORT 1
-#define HID_FEATURE_REPORT 2
-
-#define HID_REPORT_TYPES 3
+#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
/*
* HID connect requests
@@ -325,12 +336,35 @@ struct hid_item {
* HID device quirks.
*/
-/*
+/*
* Increase this if you need to configure more HID quirks at module load time
*/
#define MAX_USBHID_BOOT_QUIRKS 4
-#define HID_QUIRK_INVERT BIT(0)
+/**
+ * DOC: HID quirks
+ * | @HID_QUIRK_NOTOUCH:
+ * | @HID_QUIRK_IGNORE: ignore this device
+ * | @HID_QUIRK_NOGET:
+ * | @HID_QUIRK_HIDDEV_FORCE:
+ * | @HID_QUIRK_BADPAD:
+ * | @HID_QUIRK_MULTI_INPUT:
+ * | @HID_QUIRK_HIDINPUT_FORCE:
+ * | @HID_QUIRK_ALWAYS_POLL:
+ * | @HID_QUIRK_INPUT_PER_APP:
+ * | @HID_QUIRK_X_INVERT:
+ * | @HID_QUIRK_Y_INVERT:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORTS:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORT_ID:
+ * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP:
+ * | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
+ * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
+ * | @HID_QUIRK_FULLSPEED_INTERVAL:
+ * | @HID_QUIRK_NO_INIT_REPORTS:
+ * | @HID_QUIRK_NO_IGNORE:
+ * | @HID_QUIRK_NO_INPUT_SYNC:
+ */
+/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */
#define HID_QUIRK_NOTOUCH BIT(1)
#define HID_QUIRK_IGNORE BIT(2)
#define HID_QUIRK_NOGET BIT(3)
@@ -342,11 +376,14 @@ struct hid_item {
/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
#define HID_QUIRK_ALWAYS_POLL BIT(10)
#define HID_QUIRK_INPUT_PER_APP BIT(11)
+#define HID_QUIRK_X_INVERT BIT(12)
+#define HID_QUIRK_Y_INVERT BIT(13)
#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
+#define HID_QUIRK_NOINVERT BIT(21)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
@@ -371,6 +408,7 @@ struct hid_item {
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
#define HID_GROUP_STEAM 0x0103
#define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104
+#define HID_GROUP_VIVALDI 0x0105
/*
* HID protocol status
@@ -456,31 +494,50 @@ struct hid_field {
unsigned report_count; /* number of this field in the report */
unsigned report_type; /* (input,output,feature) */
__s32 *value; /* last known value(s) */
+ __s32 *new_value; /* newly read value(s) */
+ __s32 *usages_priorities; /* priority of each usage when reading the report
+ * bits 8-16 are reserved for hid-input usage
+ */
__s32 logical_minimum;
__s32 logical_maximum;
__s32 physical_minimum;
__s32 physical_maximum;
__s32 unit_exponent;
unsigned unit;
+ bool ignored; /* this field is ignored in this event */
struct hid_report *report; /* associated report */
unsigned index; /* index into report->field[] */
/* hidinput data */
struct hid_input *hidinput; /* associated input structure */
__u16 dpad; /* dpad input code */
+ unsigned int slot_idx; /* slot index in a report */
};
#define HID_MAX_FIELDS 256
+struct hid_field_entry {
+ struct list_head list;
+ struct hid_field *field;
+ unsigned int index;
+ __s32 priority;
+};
+
struct hid_report {
struct list_head list;
struct list_head hidinput_list;
+ struct list_head field_entry_list; /* ordered list of input fields */
unsigned int id; /* id of this report */
- unsigned int type; /* report type */
+ enum hid_report_type type; /* report type */
unsigned int application; /* application usage for this report */
struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
+ struct hid_field_entry *field_entries; /* allocated memory of input field_entry */
unsigned maxfield; /* maximum valid field index */
unsigned size; /* size of the report (bits) */
struct hid_device *device; /* associated device */
+
+ /* tool related state */
+ bool tool_active; /* whether the current tool is active */
+ unsigned int tool; /* BTN_TOOL_* */
};
#define HID_MAX_IDS 256
@@ -492,7 +549,7 @@ struct hid_report_enum {
};
#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
-#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
+#define HID_MAX_BUFFER_SIZE 16384 /* 16kb */
#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
#define HID_OUTPUT_FIFO_SIZE 64
@@ -522,9 +579,9 @@ struct hid_input {
struct hid_report *report;
struct input_dev *input;
const char *name;
- bool registered;
struct list_head reports; /* the list of reports */
unsigned int application; /* application usage for this input */
+ bool registered;
};
enum hid_type {
@@ -564,8 +621,9 @@ struct hid_device { /* device report descriptor */
struct semaphore driver_input_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
+ void *devres_group_id; /* ID of probe devres group */
- struct hid_ll_driver *ll_driver;
+ const struct hid_ll_driver *ll_driver;
struct mutex ll_open_lock;
unsigned int ll_open_count;
@@ -581,13 +639,16 @@ struct hid_device { /* device report descriptor */
__s32 battery_max;
__s32 battery_report_type;
__s32 battery_report_id;
+ __s32 battery_charge_status;
enum hid_battery_status battery_status;
bool battery_avoid_query;
+ ktime_t battery_ratelimit_time;
#endif
unsigned long status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
+ unsigned initial_quirks; /* Initial set of quirks supplied when creating device */
bool io_started; /* If IO has started */
struct list_head inputs; /* The list of inputs */
@@ -618,8 +679,17 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list;
spinlock_t debug_list_lock;
wait_queue_head_t debug_wait;
+ struct kref ref;
+
+ unsigned int id; /* system unique id */
+
+#ifdef CONFIG_HID_BPF
+ struct hid_bpf bpf; /* hid-bpf data */
+#endif /* CONFIG_HID_BPF */
};
+void hiddev_free(struct kref *ref);
+
#define to_hid_device(pdev) \
container_of(pdev, struct hid_device, dev)
@@ -766,11 +836,11 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
-#ifdef CONFIG_PM
+
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
int (*reset_resume)(struct hid_device *hdev);
-#endif
+
/* private: */
struct device_driver driver;
};
@@ -779,7 +849,7 @@ struct hid_driver {
container_of(pdrv, struct hid_driver, driver)
/**
- * hid_ll_driver - low level driver callbacks
+ * struct hid_ll_driver - low level driver callbacks
* @start: called on probe to start the device
* @stop: called on remove
* @open: called by input layer on open
@@ -792,6 +862,8 @@ struct hid_driver {
* @raw_request: send raw report request to device (e.g. feature report)
* @output_report: send output report to device
* @idle: send idle request to device
+ * @may_wakeup: return if device may act as a wakeup source during system-suspend
+ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
*/
struct hid_ll_driver {
int (*start)(struct hid_device *hdev);
@@ -816,18 +888,12 @@ struct hid_ll_driver {
int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
+ bool (*may_wakeup)(struct hid_device *hdev);
+
+ unsigned int max_buffer_size;
};
-extern struct hid_ll_driver i2c_hid_ll_driver;
-extern struct hid_ll_driver hidp_hid_driver;
-extern struct hid_ll_driver uhid_hid_driver;
-extern struct hid_ll_driver usb_hid_driver;
-
-static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
- struct hid_ll_driver *driver)
-{
- return hdev->ll_driver == driver;
-}
+extern bool hid_is_usb(const struct hid_device *hdev);
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
@@ -836,19 +902,17 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
/* We ignore a few input applications that are not widely used */
#define IS_INPUT_APPLICATION(a) \
(((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \
- || ((a >= HID_DG_PEN) && (a <= HID_DG_WHITEBOARD)) \
+ || ((a >= HID_DG_DIGITIZER) && (a <= HID_DG_WHITEBOARD)) \
|| (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \
|| (a == HID_GD_WIRELESS_RADIO_CTLS))
/* HID core API */
-extern int hid_debug;
-
extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
-extern struct bus_type hid_bus_type;
+extern const struct bus_type hid_bus_type;
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
@@ -877,21 +941,21 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
-int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
-int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+int __hid_request(struct hid_device *hid, struct hid_report *rep, enum hid_class_request reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int application);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int field_index,
unsigned int report_counts);
@@ -912,10 +976,20 @@ s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
+#ifdef CONFIG_PM
+int hid_driver_suspend(struct hid_device *hdev, pm_message_t state);
+int hid_driver_reset_resume(struct hid_device *hdev);
+int hid_driver_resume(struct hid_device *hdev);
+#else
+static inline int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) { return 0; }
+static inline int hid_driver_reset_resume(struct hid_device *hdev) { return 0; }
+static inline int hid_driver_resume(struct hid_device *hdev) { return 0; }
+#endif
+
/**
* hid_device_io_start - enable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* This should only be called during probe or remove and only be
* called by the thread calling probe or remove. It will allow
@@ -933,7 +1007,7 @@ static inline void hid_device_io_start(struct hid_device *hid) {
/**
* hid_device_io_stop - disable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* Should only be called after hid_device_io_start. It will prevent
* incoming packets from going to the driver for the duration of
@@ -989,6 +1063,10 @@ static inline void hid_map_usage(struct hid_input *hidinput,
bmap = input->ledbit;
limit = LED_MAX;
break;
+ case EV_MSC:
+ bmap = input->mscbit;
+ limit = MSC_MAX;
+ break;
}
if (unlikely(c > limit || !bmap)) {
@@ -1007,6 +1085,13 @@ static inline void hid_map_usage(struct hid_input *hidinput,
/**
* hid_map_usage_clear - map usage input bits and clear the input bit
*
+ * @hidinput: hidinput which we are interested in
+ * @usage: usage to fill in
+ * @bit: pointer to input->{}bit (out parameter)
+ * @max: maximal valid usage->code to consider later (out parameter)
+ * @type: input event type (EV_KEY, EV_REL, ...)
+ * @c: code which corresponds to this usage and type
+ *
* The same as hid_map_usage, except the @c bit is also cleared in supported
* bits (@bit).
*/
@@ -1038,6 +1123,13 @@ int __must_check hid_hw_start(struct hid_device *hdev,
void hid_hw_stop(struct hid_device *hdev);
int __must_check hid_hw_open(struct hid_device *hdev);
void hid_hw_close(struct hid_device *hdev);
+void hid_hw_request(struct hid_device *hdev,
+ struct hid_report *report, enum hid_class_request reqtype);
+int hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype);
+int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len);
/**
* hid_hw_power - requests underlying HW to go into given power mode
@@ -1056,82 +1148,36 @@ static inline int hid_hw_power(struct hid_device *hdev, int level)
/**
- * hid_hw_request - send report request to device
+ * hid_hw_idle - send idle request to device
*
* @hdev: hid device
- * @report: report to send
+ * @report: report to control
+ * @idle: idle state
* @reqtype: hid request type
*/
-static inline void hid_hw_request(struct hid_device *hdev,
- struct hid_report *report, int reqtype)
-{
- if (hdev->ll_driver->request)
- return hdev->ll_driver->request(hdev, report, reqtype);
-
- __hid_request(hdev, report, reqtype);
-}
-
-/**
- * hid_hw_raw_request - send report request to device
- *
- * @hdev: hid device
- * @reportnum: report ID
- * @buf: in/out data to transfer
- * @len: length of buf
- * @rtype: HID report type
- * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
- *
- * @return: count of data transfered, negative if error
- *
- * Same behavior as hid_hw_request, but with raw buffers instead.
- */
-static inline int hid_hw_raw_request(struct hid_device *hdev,
- unsigned char reportnum, __u8 *buf,
- size_t len, unsigned char rtype, int reqtype)
+static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
+ enum hid_class_request reqtype)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
+ if (hdev->ll_driver->idle)
+ return hdev->ll_driver->idle(hdev, report, idle, reqtype);
- return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
- rtype, reqtype);
+ return 0;
}
/**
- * hid_hw_output_report - send output report to device
+ * hid_hw_may_wakeup - return if the hid device may act as a wakeup source during system-suspend
*
* @hdev: hid device
- * @buf: raw data to transfer
- * @len: length of buf
- *
- * @return: count of data transfered, negative if error
*/
-static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf,
- size_t len)
+static inline bool hid_hw_may_wakeup(struct hid_device *hdev)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
-
- if (hdev->ll_driver->output_report)
- return hdev->ll_driver->output_report(hdev, buf, len);
-
- return -ENOSYS;
-}
+ if (hdev->ll_driver->may_wakeup)
+ return hdev->ll_driver->may_wakeup(hdev);
-/**
- * hid_hw_idle - send idle request to device
- *
- * @hdev: hid device
- * @report: report to control
- * @idle: idle state
- * @reqtype: hid request type
- */
-static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
- int reqtype)
-{
- if (hdev->ll_driver->idle)
- return hdev->ll_driver->idle(hdev, report, idle, reqtype);
+ if (hdev->dev.parent)
+ return device_may_wakeup(hdev->dev.parent);
- return 0;
+ return false;
}
/**
@@ -1152,12 +1198,11 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*/
static inline u32 hid_report_len(struct hid_report *report)
{
- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
- int interrupt);
+int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
@@ -1170,11 +1215,7 @@ int hid_pidff_init(struct hid_device *hid);
#define hid_pidff_init NULL
#endif
-#define dbg_hid(fmt, ...) \
-do { \
- if (hid_debug) \
- printk(KERN_DEBUG "%s: " fmt, __FILE__, ##__VA_ARGS__); \
-} while (0)
+#define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__)
#define hid_err(hid, fmt, ...) \
dev_err(&(hid)->dev, fmt, ##__VA_ARGS__)
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
new file mode 100644
index 000000000000..7118ac28d468
--- /dev/null
+++ b/include/linux/hid_bpf.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __HID_BPF_H
+#define __HID_BPF_H
+
+#include <linux/bpf.h>
+#include <linux/spinlock.h>
+#include <uapi/linux/hid.h>
+
+struct hid_device;
+
+/*
+ * The following is the user facing HID BPF API.
+ *
+ * Extra care should be taken when editing this part, as
+ * it might break existing out of the tree bpf programs.
+ */
+
+/**
+ * struct hid_bpf_ctx - User accessible data for all HID programs
+ *
+ * ``data`` is not directly accessible from the context. We need to issue
+ * a call to ``hid_bpf_get_data()`` in order to get a pointer to that field.
+ *
+ * All of these fields are currently read-only.
+ *
+ * @index: program index in the jump table. No special meaning (a smaller index
+ * doesn't mean the program will be executed before another program with
+ * a bigger index).
+ * @hid: the ``struct hid_device`` representing the device itself
+ * @report_type: used for ``hid_bpf_device_event()``
+ * @allocated_size: Allocated size of data.
+ *
+ * This is how much memory is available and can be requested
+ * by the HID program.
+ * Note that for ``HID_BPF_RDESC_FIXUP``, that memory is set to
+ * ``4096`` (4 KB)
+ * @size: Valid data in the data field.
+ *
+ * Programs can get the available valid size in data by fetching this field.
+ * Programs can also change this value by returning a positive number in the
+ * program.
+ * To discard the event, return a negative error code.
+ *
+ * ``size`` must always be less or equal than ``allocated_size`` (it is enforced
+ * once all BPF programs have been run).
+ * @retval: Return value of the previous program.
+ */
+struct hid_bpf_ctx {
+ __u32 index;
+ const struct hid_device *hid;
+ __u32 allocated_size;
+ enum hid_report_type report_type;
+ union {
+ __s32 retval;
+ __s32 size;
+ };
+};
+
+/**
+ * enum hid_bpf_attach_flags - flags used when attaching a HIF-BPF program
+ *
+ * @HID_BPF_FLAG_NONE: no specific flag is used, the kernel choses where to
+ * insert the program
+ * @HID_BPF_FLAG_INSERT_HEAD: insert the given program before any other program
+ * currently attached to the device. This doesn't
+ * guarantee that this program will always be first
+ * @HID_BPF_FLAG_MAX: sentinel value, not to be used by the callers
+ */
+enum hid_bpf_attach_flags {
+ HID_BPF_FLAG_NONE = 0,
+ HID_BPF_FLAG_INSERT_HEAD = _BITUL(0),
+ HID_BPF_FLAG_MAX,
+};
+
+/* Following functions are tracepoints that BPF programs can attach to */
+int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
+int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
+
+/*
+ * Below is HID internal
+ */
+
+/* internal function to call eBPF programs, not to be used by anybody */
+int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx);
+
+#define HID_BPF_MAX_PROGS_PER_DEV 64
+#define HID_BPF_FLAG_MASK (((HID_BPF_FLAG_MAX - 1) << 1) - 1)
+
+/* types of HID programs to attach to */
+enum hid_bpf_prog_type {
+ HID_BPF_PROG_TYPE_UNDEF = -1,
+ HID_BPF_PROG_TYPE_DEVICE_EVENT, /* an event is emitted from the device */
+ HID_BPF_PROG_TYPE_RDESC_FIXUP,
+ HID_BPF_PROG_TYPE_MAX,
+};
+
+struct hid_report_enum;
+
+struct hid_bpf_ops {
+ struct hid_report *(*hid_get_report)(struct hid_report_enum *report_enum, const u8 *data);
+ int (*hid_hw_raw_request)(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype);
+ struct module *owner;
+ const struct bus_type *bus_type;
+};
+
+extern struct hid_bpf_ops *hid_bpf_ops;
+
+struct hid_bpf_prog_list {
+ u16 prog_idx[HID_BPF_MAX_PROGS_PER_DEV];
+ u8 prog_cnt;
+};
+
+/* stored in each device */
+struct hid_bpf {
+ u8 *device_data; /* allocated when a bpf program of type
+ * SEC(f.../hid_bpf_device_event) has been attached
+ * to this HID device
+ */
+ u32 allocated_data;
+
+ struct hid_bpf_prog_list __rcu *progs[HID_BPF_PROG_TYPE_MAX]; /* attached BPF progs */
+ bool destroyed; /* prevents the assignment of any progs */
+
+ spinlock_t progs_lock; /* protects RCU update of progs */
+};
+
+/* specific HID-BPF link when a program is attached to a device */
+struct hid_bpf_link {
+ struct bpf_link link;
+ int hid_table_index;
+};
+
+#ifdef CONFIG_HID_BPF
+u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
+ u32 *size, int interrupt);
+int hid_bpf_connect_device(struct hid_device *hdev);
+void hid_bpf_disconnect_device(struct hid_device *hdev);
+void hid_bpf_destroy_device(struct hid_device *hid);
+void hid_bpf_device_init(struct hid_device *hid);
+u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size);
+#else /* CONFIG_HID_BPF */
+static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
+ u8 *data, u32 *size, int interrupt) { return data; }
+static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
+static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
+static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
+static inline void hid_bpf_device_init(struct hid_device *hid) {}
+static inline u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
+{
+ return kmemdup(rdesc, *size, GFP_KERNEL);
+}
+
+#endif /* CONFIG_HID_BPF */
+
+#endif /* __HID_BPF_H */
diff --git a/include/linux/hidden.h b/include/linux/hidden.h
new file mode 100644
index 000000000000..49a17b6b5962
--- /dev/null
+++ b/include/linux/hidden.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * When building position independent code with GCC using the -fPIC option,
+ * (or even the -fPIE one on older versions), it will assume that we are
+ * building a dynamic object (either a shared library or an executable) that
+ * may have symbol references that can only be resolved at load time. For a
+ * variety of reasons (ELF symbol preemption, the CoW footprint of the section
+ * that is modified by the loader), this results in all references to symbols
+ * with external linkage to go via entries in the Global Offset Table (GOT),
+ * which carries absolute addresses which need to be fixed up when the
+ * executable image is loaded at an offset which is different from its link
+ * time offset.
+ *
+ * Fortunately, there is a way to inform the compiler that such symbol
+ * references will be satisfied at link time rather than at load time, by
+ * giving them 'hidden' visibility.
+ */
+
+#pragma GCC visibility push(hidden)
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
new file mode 100644
index 000000000000..a3028e400a9c
--- /dev/null
+++ b/include/linux/highmem-internal.h
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HIGHMEM_INTERNAL_H
+#define _LINUX_HIGHMEM_INTERNAL_H
+
+/*
+ * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
+ */
+#ifdef CONFIG_KMAP_LOCAL
+void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+void kunmap_local_indexed(const void *vaddr);
+void kmap_local_fork(struct task_struct *tsk);
+void __kmap_local_sched_out(void);
+void __kmap_local_sched_in(void);
+static inline void kmap_assert_nomap(void)
+{
+ DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
+}
+#else
+static inline void kmap_local_fork(struct task_struct *tsk) { }
+static inline void kmap_assert_nomap(void) { }
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+static inline void kmap_flush_tlb(unsigned long addr) { }
+#endif
+
+#ifndef kmap_prot
+#define kmap_prot PAGE_KERNEL
+#endif
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+void __kmap_flush_unused(void);
+struct page *__kmap_to_page(void *addr);
+
+static inline void *kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ addr = page_address(page);
+ else
+ addr = kmap_high(page);
+ kmap_flush_tlb((unsigned long)addr);
+ return addr;
+}
+
+static inline void kunmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return __kmap_to_page(addr);
+}
+
+static inline void kmap_flush_unused(void)
+{
+ __kmap_flush_unused();
+}
+
+static inline void *kmap_local_page(struct page *page)
+{
+ return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+ struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(const void *vaddr)
+{
+ kunmap_local_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
+ pagefault_disable();
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ return kmap_atomic_prot(page, kmap_prot);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
+ pagefault_disable();
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_atomic(const void *addr)
+{
+ kunmap_local_indexed(addr);
+ pagefault_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
+}
+
+unsigned int __nr_free_highpages(void);
+extern atomic_long_t _totalhigh_pages;
+
+static inline unsigned int nr_free_highpages(void)
+{
+ return __nr_free_highpages();
+}
+
+static inline unsigned long totalhigh_pages(void)
+{
+ return (unsigned long)atomic_long_read(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_add(long count)
+{
+ atomic_long_add(count, &_totalhigh_pages);
+}
+
+static inline bool is_kmap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
+ (addr >= __fix_to_virt(FIX_KMAP_END) &&
+ addr < __fix_to_virt(FIX_KMAP_BEGIN));
+}
+#else /* CONFIG_HIGHMEM */
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return virt_to_page(addr);
+}
+
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ return page_address(page);
+}
+
+static inline void kunmap_high(struct page *page) { }
+static inline void kmap_flush_unused(void) { }
+
+static inline void kunmap(struct page *page)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(page_address(page));
+#endif
+}
+
+static inline void *kmap_local_page(struct page *page)
+{
+ return page_address(page);
+}
+
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+ return page_address(&folio->page) + offset;
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(const void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
+#endif
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ return kmap_atomic(page);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ return kmap_atomic(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_atomic(const void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
+#endif
+ pagefault_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
+}
+
+static inline unsigned int nr_free_highpages(void) { return 0; }
+static inline unsigned long totalhigh_pages(void) { return 0UL; }
+
+static inline bool is_kmap_addr(const void *x)
+{
+ return false;
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+/**
+ * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
+ * @__addr: Virtual address to be unmapped
+ *
+ * Unmaps an address previously mapped by kmap_atomic() and re-enables
+ * pagefaults. Depending on PREEMP_RT configuration, re-enables also
+ * migration and preemption. Users should not count on these side effects.
+ *
+ * Mappings should be unmapped in the reverse order that they were mapped.
+ * See kmap_local_page() for details on nesting.
+ *
+ * @__addr can be any address within the mapped page, so there is no need
+ * to subtract any offset that has been added. In contrast to kunmap(),
+ * this function takes the address returned from kmap_atomic(), not the
+ * page passed to it. The compiler will warn you if you pass the page.
+ */
+#define kunmap_atomic(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_atomic(__addr); \
+} while (0)
+
+/**
+ * kunmap_local - Unmap a page mapped via kmap_local_page().
+ * @__addr: An address within the page mapped
+ *
+ * @__addr can be any address within the mapped page. Commonly it is the
+ * address return from kmap_local_page(), but it can also include offsets.
+ *
+ * Unmapping should be done in the reverse order of the mapping. See
+ * kmap_local_page() for details.
+ */
+#define kunmap_local(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_local(__addr); \
+} while (0)
+
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 14e6202ce47f..00341b56d291 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,11 +5,182 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <asm/cacheflush.h>
+#include "highmem-internal.h"
+
+/**
+ * kmap - Map a page for long term usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can only be invoked from preemptible task context because on 32bit
+ * systems with CONFIG_HIGHMEM enabled this function might sleep.
+ *
+ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
+ * this returns the virtual address of the direct kernel mapping.
+ *
+ * The returned virtual address is globally visible and valid up to the
+ * point where it is unmapped via kunmap(). The pointer can be handed to
+ * other contexts.
+ *
+ * For highmem pages on 32bit systems this can be slow as the mapping space
+ * is limited and protected by a global lock. In case that there is no
+ * mapping slot available the function blocks until a slot is released via
+ * kunmap().
+ */
+static inline void *kmap(struct page *page);
+
+/**
+ * kunmap - Unmap the virtual address mapped by kmap()
+ * @page: Pointer to the page which was mapped by kmap()
+ *
+ * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
+ * pages in the low memory area.
+ */
+static inline void kunmap(struct page *page);
+
+/**
+ * kmap_to_page - Get the page for a kmap'ed address
+ * @addr: The address to look up
+ *
+ * Returns: The page which is mapped to @addr.
+ */
+static inline struct page *kmap_to_page(void *addr);
+
+/**
+ * kmap_flush_unused - Flush all unused kmap mappings in order to
+ * remove stray mappings
+ */
+static inline void kmap_flush_unused(void);
+
+/**
+ * kmap_local_page - Map a page for temporary usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can be invoked from any context, including interrupts.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation:
+ *
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While kmap_local_page() is significantly faster than kmap() for the highmem
+ * case it comes with restrictions about the pointer validity.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_local_folio - Map a page in this folio for temporary usage
+ * @folio: The folio containing the page.
+ * @offset: The byte offset within the folio which identifies the page.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation::
+ *
+ * addr1 = kmap_local_folio(folio1, offset1);
+ * addr2 = kmap_local_folio(folio2, offset2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While it is significantly faster than kmap() for the highmem case it
+ * comes with restrictions about the pointer validity.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_folio() can rely on this side effect.
+ *
+ * Context: Can be invoked from any context.
+ * Return: The virtual address of @offset.
+ */
+static inline void *kmap_local_folio(struct folio *folio, size_t offset);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * In fact a wrapper around kmap_local_page() which also disables pagefaults
+ * and, depending on PREEMPT_RT configuration, also CPU migration and
+ * preemption. Therefore users should not count on the latter two side effects.
+ *
+ * Mappings should always be released by kunmap_atomic().
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
+ *
+ * It is used in atomic context when code wants to access the contents of a
+ * page that might be allocated from high memory (see __GFP_HIGHMEM), for
+ * example a page in the pagecache. The API has two functions, and they
+ * can be used in a manner similar to the following::
+ *
+ * // Find the page of interest.
+ * struct page *page = find_get_page(mapping, offset);
+ *
+ * // Gain access to the contents of that page.
+ * void *vaddr = kmap_atomic(page);
+ *
+ * // Do something to the contents of that page.
+ * memset(vaddr, 0, PAGE_SIZE);
+ *
+ * // Unmap that page.
+ * kunmap_atomic(vaddr);
+ *
+ * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
+ * call, not the argument.
+ *
+ * If you need to map two pages because you want to copy from one page to
+ * another you need to keep the kmap_atomic calls strictly nested, like:
+ *
+ * vaddr1 = kmap_atomic(page1);
+ * vaddr2 = kmap_atomic(page2);
+ *
+ * memcpy(vaddr1, vaddr2, PAGE_SIZE);
+ *
+ * kunmap_atomic(vaddr2);
+ * kunmap_atomic(vaddr1);
+ */
+static inline void *kmap_atomic(struct page *page);
+
+/* Highmem related interfaces for management code */
+static inline unsigned int nr_free_highpages(void);
+static inline unsigned long totalhigh_pages(void);
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@@ -17,10 +188,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page
}
#endif
-#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
}
@@ -29,320 +197,468 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
}
#endif
-#include <asm/kmap_types.h>
+/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+#ifndef clear_user_highpage
+static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ void *addr = kmap_local_page(page);
+ clear_user_page(addr, vaddr, page);
+ kunmap_local(addr);
+}
+#endif
-#ifdef CONFIG_HIGHMEM
-extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
-extern void kunmap_atomic_high(void *kvaddr);
-#include <asm/highmem.h>
+#ifndef vma_alloc_zeroed_movable_folio
+/**
+ * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
+ * @vma: The VMA the page is to be allocated for.
+ * @vaddr: The virtual address the page will be inserted into.
+ *
+ * This function will allocate a page suitable for inserting into this
+ * VMA at this virtual address. It may be allocated from highmem or
+ * the movable zone. An architecture may provide its own implementation.
+ *
+ * Return: A folio containing one allocated and zeroed page or NULL if
+ * we are out of memory.
+ */
+static inline
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ struct folio *folio;
-#ifndef ARCH_HAS_KMAP_FLUSH_TLB
-static inline void kmap_flush_tlb(unsigned long addr) { }
-#endif
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
+ if (folio)
+ clear_user_highpage(&folio->page, vaddr);
-#ifndef kmap_prot
-#define kmap_prot PAGE_KERNEL
+ return folio;
+}
#endif
-void *kmap_high(struct page *page);
-static inline void *kmap(struct page *page)
+static inline void clear_highpage(struct page *page)
+{
+ void *kaddr = kmap_local_page(page);
+ clear_page(kaddr);
+ kunmap_local(kaddr);
+}
+
+static inline void clear_highpage_kasan_tagged(struct page *page)
{
- void *addr;
+ void *kaddr = kmap_local_page(page);
- might_sleep();
- if (!PageHighMem(page))
- addr = page_address(page);
- else
- addr = kmap_high(page);
- kmap_flush_tlb((unsigned long)addr);
- return addr;
+ clear_page(kasan_reset_tag(kaddr));
+ kunmap_local(kaddr);
}
-void kunmap_high(struct page *page);
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
-static inline void kunmap(struct page *page)
+static inline void tag_clear_highpage(struct page *page)
{
- might_sleep();
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
}
+#endif
+
/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap it is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- *
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
+ * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
+ * If we pass in a head page, we can zero up to the size of the compound page.
*/
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+#ifdef CONFIG_HIGHMEM
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2);
+#else
+static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
{
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_atomic_high_prot(page, prot);
-}
-#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
+ void *kaddr = kmap_local_page(page);
+ unsigned int i;
-/* declarations for linux/mm/highmem.c */
-unsigned int nr_free_highpages(void);
-extern atomic_long_t _totalhigh_pages;
-static inline unsigned long totalhigh_pages(void)
-{
- return (unsigned long)atomic_long_read(&_totalhigh_pages);
-}
+ BUG_ON(end1 > page_size(page) || end2 > page_size(page));
-static inline void totalhigh_pages_inc(void)
-{
- atomic_long_inc(&_totalhigh_pages);
-}
+ if (end1 > start1)
+ memset(kaddr + start1, 0, end1 - start1);
-static inline void totalhigh_pages_dec(void)
-{
- atomic_long_dec(&_totalhigh_pages);
+ if (end2 > start2)
+ memset(kaddr + start2, 0, end2 - start2);
+
+ kunmap_local(kaddr);
+ for (i = 0; i < compound_nr(page); i++)
+ flush_dcache_page(page + i);
}
+#endif
-static inline void totalhigh_pages_add(long count)
+static inline void zero_user_segment(struct page *page,
+ unsigned start, unsigned end)
{
- atomic_long_add(count, &_totalhigh_pages);
+ zero_user_segments(page, start, end, 0, 0);
}
-static inline void totalhigh_pages_set(long val)
+static inline void zero_user(struct page *page,
+ unsigned start, unsigned size)
{
- atomic_long_set(&_totalhigh_pages, val);
+ zero_user_segments(page, start, start + size, 0, 0);
}
-void kmap_flush_unused(void);
+#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
-struct page *kmap_to_page(void *addr);
+static inline void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_user_page(vto, vfrom, vaddr, to);
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+}
-#else /* CONFIG_HIGHMEM */
+#endif
-static inline unsigned int nr_free_highpages(void) { return 0; }
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE
-static inline struct page *kmap_to_page(void *addr)
+static inline void copy_highpage(struct page *to, struct page *from)
{
- return virt_to_page(addr);
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_page(vto, vfrom);
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
-static inline unsigned long totalhigh_pages(void) { return 0UL; }
+#endif
-static inline void *kmap(struct page *page)
+#ifdef copy_mc_to_kernel
+/*
+ * If architecture supports machine check exception handling, define the
+ * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
+ * page with #MC in source page (@from) handled, and return the number
+ * of bytes not copied if there was a #MC, otherwise 0 for success.
+ */
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
{
- might_sleep();
- return page_address(page);
-}
+ unsigned long ret;
+ char *vfrom, *vto;
-static inline void kunmap_high(struct page *page)
-{
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ if (!ret)
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+
+ return ret;
}
-static inline void kunmap(struct page *page)
+static inline int copy_mc_highpage(struct page *to, struct page *from)
{
-#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
- kunmap_flush_on_unmap(page_address(page));
-#endif
-}
+ unsigned long ret;
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ if (!ret)
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
-static inline void *kmap_atomic(struct page *page)
+ return ret;
+}
+#else
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
{
- preempt_disable();
- pagefault_disable();
- return page_address(page);
+ copy_user_highpage(to, from, vaddr, vma);
+ return 0;
}
-#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-static inline void kunmap_atomic_high(void *addr)
+static inline int copy_mc_highpage(struct page *to, struct page *from)
{
- /*
- * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
- * handles re-enabling faults + preemption
- */
-#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
- kunmap_flush_on_unmap(addr);
-#endif
+ copy_highpage(to, from);
+ return 0;
}
+#endif
-#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-
-#define kmap_flush_unused() do {} while(0)
+static inline void memcpy_page(struct page *dst_page, size_t dst_off,
+ struct page *src_page, size_t src_off,
+ size_t len)
+{
+ char *dst = kmap_local_page(dst_page);
+ char *src = kmap_local_page(src_page);
-#endif /* CONFIG_HIGHMEM */
+ VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
+ memcpy(dst + dst_off, src + src_off, len);
+ kunmap_local(src);
+ kunmap_local(dst);
+}
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+static inline void memset_page(struct page *page, size_t offset, int val,
+ size_t len)
+{
+ char *addr = kmap_local_page(page);
-DECLARE_PER_CPU(int, __kmap_atomic_idx);
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memset(addr + offset, val, len);
+ kunmap_local(addr);
+}
-static inline int kmap_atomic_idx_push(void)
+static inline void memcpy_from_page(char *to, struct page *page,
+ size_t offset, size_t len)
{
- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+ char *from = kmap_local_page(page);
-#ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
- BUG_ON(idx >= KM_TYPE_NR);
-#endif
- return idx;
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to, from + offset, len);
+ kunmap_local(from);
}
-static inline int kmap_atomic_idx(void)
+static inline void memcpy_to_page(struct page *page, size_t offset,
+ const char *from, size_t len)
{
- return __this_cpu_read(__kmap_atomic_idx) - 1;
+ char *to = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to + offset, from, len);
+ flush_dcache_page(page);
+ kunmap_local(to);
}
-static inline void kmap_atomic_idx_pop(void)
+static inline void memzero_page(struct page *page, size_t offset, size_t len)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+ char *addr = kmap_local_page(page);
- BUG_ON(idx < 0);
-#else
- __this_cpu_dec(__kmap_atomic_idx);
-#endif
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memset(addr + offset, 0, len);
+ flush_dcache_page(page);
+ kunmap_local(addr);
}
-#endif
-
-/*
- * Prevent people trying to call kunmap_atomic() as if it were kunmap()
- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+/**
+ * memcpy_from_folio - Copy a range of bytes from a folio.
+ * @to: The memory to copy to.
+ * @folio: The folio to read from.
+ * @offset: The first byte in the folio to read.
+ * @len: The number of bytes to copy.
*/
-#define kunmap_atomic(addr) \
-do { \
- BUILD_BUG_ON(__same_type((addr), struct page *)); \
- kunmap_atomic_high(addr); \
- pagefault_enable(); \
- preempt_enable(); \
-} while (0)
-
-
-/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
-#ifndef clear_user_highpage
-static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+static inline void memcpy_from_folio(char *to, struct folio *folio,
+ size_t offset, size_t len)
{
- void *addr = kmap_atomic(page);
- clear_user_page(addr, vaddr, page);
- kunmap_atomic(addr);
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ const char *from = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_highmem(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(from);
+
+ to += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
}
-#endif
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/**
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA but the caller is expected
- * to specify via movableflags whether the page will be movable in the
- * future or not
- *
- * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
- * implementation.
+ * memcpy_to_folio - Copy a range of bytes to a folio.
+ * @folio: The folio to write to.
+ * @offset: The first byte in the folio to store to.
+ * @from: The memory to copy from.
+ * @len: The number of bytes to copy.
*/
-static inline struct page *
-__alloc_zeroed_user_highpage(gfp_t movableflags,
- struct vm_area_struct *vma,
- unsigned long vaddr)
+static inline void memcpy_to_folio(struct folio *folio, size_t offset,
+ const char *from, size_t len)
{
- struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
- vma, vaddr);
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ char *to = kmap_local_folio(folio, offset);
+ size_t chunk = len;
- if (page)
- clear_user_highpage(page, vaddr);
+ if (folio_test_highmem(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(to);
- return page;
+ from += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+
+ flush_dcache_folio(folio);
}
-#endif
/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
+ * folio_zero_tail - Zero the tail of a folio.
+ * @folio: The folio to zero.
+ * @offset: The byte offset in the folio to start zeroing at.
+ * @kaddr: The address the folio is currently mapped to.
+ *
+ * If you have already used kmap_local_folio() to map a folio, written
+ * some data to it and now need to zero the end of the folio (and flush
+ * the dcache), you can use this function. If you do not have the
+ * folio kmapped (eg the folio has been partially populated by DMA),
+ * use folio_zero_range() or folio_zero_segment() instead.
*
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
+ * Return: An address which can be passed to kunmap_local().
*/
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
- unsigned long vaddr)
+static inline __must_check void *folio_zero_tail(struct folio *folio,
+ size_t offset, void *kaddr)
{
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+ size_t len = folio_size(folio) - offset;
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memset(kaddr, 0, max);
+ kunmap_local(kaddr);
+ len -= max;
+ offset += max;
+ max = PAGE_SIZE;
+ kaddr = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memset(kaddr, 0, len);
+ flush_dcache_folio(folio);
+
+ return kaddr;
}
-static inline void clear_highpage(struct page *page)
+/**
+ * folio_fill_tail - Copy some data to a folio and pad with zeroes.
+ * @folio: The destination folio.
+ * @offset: The offset into @folio at which to start copying.
+ * @from: The data to copy.
+ * @len: How many bytes of data to copy.
+ *
+ * This function is most useful for filesystems which support inline data.
+ * When they want to copy data from the inode into the page cache, this
+ * function does everything for them. It supports large folios even on
+ * HIGHMEM configurations.
+ */
+static inline void folio_fill_tail(struct folio *folio, size_t offset,
+ const char *from, size_t len)
{
- void *kaddr = kmap_atomic(page);
- clear_page(kaddr);
- kunmap_atomic(kaddr);
+ char *to = kmap_local_folio(folio, offset);
+
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memcpy(to, from, max);
+ kunmap_local(to);
+ len -= max;
+ from += max;
+ offset += max;
+ max = PAGE_SIZE;
+ to = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memcpy(to, from, len);
+ to = folio_zero_tail(folio, offset + len, to + len);
+ kunmap_local(to);
}
-static inline void zero_user_segments(struct page *page,
- unsigned start1, unsigned end1,
- unsigned start2, unsigned end2)
+/**
+ * memcpy_from_file_folio - Copy some bytes from a file folio.
+ * @to: The destination buffer.
+ * @folio: The folio to copy from.
+ * @pos: The position in the file.
+ * @len: The maximum number of bytes to copy.
+ *
+ * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
+ * if the folio comes from HIGHMEM, and by the size of the folio.
+ *
+ * Return: The number of bytes copied from the folio.
+ */
+static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
+ loff_t pos, size_t len)
{
- void *kaddr = kmap_atomic(page);
+ size_t offset = offset_in_folio(folio, pos);
+ char *from = kmap_local_folio(folio, offset);
- BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+ if (folio_test_highmem(folio)) {
+ offset = offset_in_page(offset);
+ len = min_t(size_t, len, PAGE_SIZE - offset);
+ } else
+ len = min(len, folio_size(folio) - offset);
- if (end1 > start1)
- memset(kaddr + start1, 0, end1 - start1);
+ memcpy(to, from, len);
+ kunmap_local(from);
- if (end2 > start2)
- memset(kaddr + start2, 0, end2 - start2);
-
- kunmap_atomic(kaddr);
- flush_dcache_page(page);
+ return len;
}
-static inline void zero_user_segment(struct page *page,
- unsigned start, unsigned end)
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @xend1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @xend2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+ size_t start1, size_t xend1, size_t start2, size_t xend2)
{
- zero_user_segments(page, start, end, 0, 0);
+ zero_user_segments(&folio->page, start1, xend1, start2, xend2);
}
-static inline void zero_user(struct page *page,
- unsigned start, unsigned size)
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @xend: One more than the last byte to zero.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+ size_t start, size_t xend)
{
- zero_user_segments(page, start, start + size, 0, 0);
+ zero_user_segments(&folio->page, start, xend, 0, 0);
}
-#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
-
-static inline void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+ size_t start, size_t length)
{
- char *vfrom, *vto;
-
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
- copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ zero_user_segments(&folio->page, start, start + length, 0, 0);
}
-#endif
-
-#ifndef __HAVE_ARCH_COPY_HIGHPAGE
-
-static inline void copy_highpage(struct page *to, struct page *from)
+/**
+ * folio_release_kmap - Unmap a folio and drop a refcount.
+ * @folio: The folio to release.
+ * @addr: The address previously returned by a call to kmap_local_folio().
+ *
+ * It is common, eg in directory handling to kmap a folio. This function
+ * unmaps the folio and drops the refcount that was being held to keep the
+ * folio alive while we accessed it.
+ */
+static inline void folio_release_kmap(struct folio *folio, void *addr)
{
- char *vfrom, *vto;
-
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
- copy_page(vto, vfrom);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kunmap_local(addr);
+ folio_put(folio);
}
-#endif
+static inline void unmap_and_put_page(struct page *page, void *addr)
+{
+ folio_release_kmap(page_folio(page), addr);
+}
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h
index 774f7d3b8f6a..369221fd5518 100644
--- a/include/linux/hil_mlc.h
+++ b/include/linux/hil_mlc.h
@@ -103,7 +103,7 @@ struct hilse_node {
/* Methods for back-end drivers, e.g. hp_sdc_mlc */
typedef int (hil_mlc_cts) (hil_mlc *mlc);
-typedef void (hil_mlc_out) (hil_mlc *mlc);
+typedef int (hil_mlc_out) (hil_mlc *mlc);
typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout);
struct hil_mlc_devinfo {
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index 9dc01f7ab5b4..07414c241e65 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -23,6 +23,10 @@
#ifdef __KERNEL__
+struct neigh_parms;
+struct net_device;
+struct sk_buff;
+
struct hippi_cb {
__u32 ifield;
};
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
new file mode 100644
index 000000000000..9d7754ad5e9b
--- /dev/null
+++ b/include/linux/hisi_acc_qm.h
@@ -0,0 +1,593 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+#ifndef HISI_ACC_QM_H
+#define HISI_ACC_QM_H
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define QM_QNUM_V1 4096
+#define QM_QNUM_V2 1024
+#define QM_MAX_VFS_NUM_V2 63
+
+/* qm user domain */
+#define QM_ARUSER_M_CFG_1 0x100088
+#define AXUSER_SNOOP_ENABLE BIT(30)
+#define AXUSER_CMD_TYPE GENMASK(14, 12)
+#define AXUSER_CMD_SMMU_NORMAL 1
+#define AXUSER_NS BIT(6)
+#define AXUSER_NO BIT(5)
+#define AXUSER_FP BIT(4)
+#define AXUSER_SSV BIT(0)
+#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \
+ FIELD_PREP(AXUSER_CMD_TYPE, \
+ AXUSER_CMD_SMMU_NORMAL) | \
+ AXUSER_NS | AXUSER_NO | AXUSER_FP)
+#define QM_ARUSER_M_CFG_ENABLE 0x100090
+#define ARUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_AWUSER_M_CFG_1 0x100098
+#define QM_AWUSER_M_CFG_ENABLE 0x1000a0
+#define AWUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_WUSER_M_CFG_ENABLE 0x1000a8
+#define WUSER_M_CFG_ENABLE 0xffffffff
+
+/* mailbox */
+#define QM_MB_CMD_SQC 0x0
+#define QM_MB_CMD_CQC 0x1
+#define QM_MB_CMD_EQC 0x2
+#define QM_MB_CMD_AEQC 0x3
+#define QM_MB_CMD_SQC_BT 0x4
+#define QM_MB_CMD_CQC_BT 0x5
+#define QM_MB_CMD_SQC_VFT_V2 0x6
+#define QM_MB_CMD_STOP_QP 0x8
+#define QM_MB_CMD_FLUSH_QM 0x9
+#define QM_MB_CMD_SRC 0xc
+#define QM_MB_CMD_DST 0xd
+
+#define QM_MB_CMD_SEND_BASE 0x300
+#define QM_MB_EVENT_SHIFT 8
+#define QM_MB_BUSY_SHIFT 13
+#define QM_MB_OP_SHIFT 14
+#define QM_MB_CMD_DATA_ADDR_L 0x304
+#define QM_MB_CMD_DATA_ADDR_H 0x308
+#define QM_MB_MAX_WAIT_CNT 6000
+
+/* doorbell */
+#define QM_DOORBELL_CMD_SQ 0
+#define QM_DOORBELL_CMD_CQ 1
+#define QM_DOORBELL_CMD_EQ 2
+#define QM_DOORBELL_CMD_AEQ 3
+
+#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
+#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
+#define QM_QP_MAX_NUM_SHIFT 11
+#define QM_DB_CMD_SHIFT_V2 12
+#define QM_DB_RAND_SHIFT_V2 16
+#define QM_DB_INDEX_SHIFT_V2 32
+#define QM_DB_PRIORITY_SHIFT_V2 48
+#define QM_VF_STATE 0x60
+
+/* qm cache */
+#define QM_CACHE_CTL 0x100050
+#define SQC_CACHE_ENABLE BIT(0)
+#define CQC_CACHE_ENABLE BIT(1)
+#define SQC_CACHE_WB_ENABLE BIT(4)
+#define SQC_CACHE_WB_THRD GENMASK(10, 5)
+#define CQC_CACHE_WB_ENABLE BIT(11)
+#define CQC_CACHE_WB_THRD GENMASK(17, 12)
+#define QM_AXI_M_CFG 0x1000ac
+#define AXI_M_CFG 0xffff
+#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
+#define AXI_M_CFG_ENABLE 0xffffffff
+#define QM_PEH_AXUSER_CFG 0x1000cc
+#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
+#define PEH_AXUSER_CFG 0x401001
+#define PEH_AXUSER_CFG_ENABLE 0xffffffff
+
+#define QM_MIN_QNUM 2
+#define HISI_ACC_SGL_SGE_NR_MAX 255
+#define QM_SHAPER_CFG 0x100164
+#define QM_SHAPER_ENABLE BIT(30)
+#define QM_SHAPER_TYPE1_OFFSET 10
+
+/* page number for queue file region */
+#define QM_DOORBELL_PAGE_NR 1
+
+/* uacce mode of the driver */
+#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
+#define UACCE_MODE_SVA 1 /* use uacce sva mode */
+#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
+
+enum qm_stop_reason {
+ QM_NORMAL,
+ QM_SOFT_RESET,
+ QM_DOWN,
+};
+
+enum qm_state {
+ QM_WORK = 0,
+ QM_STOP,
+};
+
+enum qp_state {
+ QP_START = 1,
+ QP_STOP,
+};
+
+enum qm_hw_ver {
+ QM_HW_V1 = 0x20,
+ QM_HW_V2 = 0x21,
+ QM_HW_V3 = 0x30,
+};
+
+enum qm_fun_type {
+ QM_HW_PF,
+ QM_HW_VF,
+};
+
+enum qm_debug_file {
+ CURRENT_QM,
+ CURRENT_Q,
+ CLEAR_ENABLE,
+ DEBUG_FILE_NUM,
+};
+
+enum qm_vf_state {
+ QM_READY = 0,
+ QM_NOT_READY,
+};
+
+enum qm_misc_ctl_bits {
+ QM_DRIVER_REMOVING = 0x0,
+ QM_RST_SCHED,
+ QM_RESETTING,
+ QM_MODULE_PARAM,
+};
+
+enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+ QM_SUPPORT_STOP_QP,
+ QM_SUPPORT_STOP_FUNC,
+ QM_SUPPORT_MB_COMMAND,
+ QM_SUPPORT_SVA_PREFETCH,
+ QM_SUPPORT_RPM,
+};
+
+struct qm_dev_alg {
+ u64 alg_msk;
+ const char *alg;
+};
+
+struct qm_dev_dfx {
+ u32 dev_state;
+ u32 dev_timeout;
+};
+
+struct dfx_diff_registers {
+ u32 *regs;
+ u32 reg_offset;
+ u32 reg_len;
+};
+
+struct qm_dfx {
+ atomic64_t err_irq_cnt;
+ atomic64_t aeq_irq_cnt;
+ atomic64_t abnormal_irq_cnt;
+ atomic64_t create_qp_err_cnt;
+ atomic64_t mb_err_cnt;
+};
+
+struct debugfs_file {
+ enum qm_debug_file index;
+ struct mutex lock;
+ struct qm_debug *debug;
+};
+
+struct qm_debug {
+ u32 curr_qm_qp_num;
+ u32 sqe_mask_offset;
+ u32 sqe_mask_len;
+ struct qm_dfx dfx;
+ struct dentry *debug_root;
+ struct dentry *qm_d;
+ struct debugfs_file files[DEBUG_FILE_NUM];
+ struct qm_dev_dfx dev_dfx;
+ unsigned int *qm_last_words;
+ /* ACC engines recoreding last regs */
+ unsigned int *last_words;
+ struct dfx_diff_registers *qm_diff_regs;
+ struct dfx_diff_registers *acc_diff_regs;
+};
+
+struct qm_shaper_factor {
+ u32 func_qos;
+ u64 cir_b;
+ u64 cir_u;
+ u64 cir_s;
+ u64 cbs_s;
+};
+
+struct qm_dma {
+ void *va;
+ dma_addr_t dma;
+ size_t size;
+};
+
+struct hisi_qm_status {
+ u32 eq_head;
+ bool eqc_phase;
+ u32 aeq_head;
+ bool aeqc_phase;
+ atomic_t flags;
+ int stop_reason;
+};
+
+struct hisi_qm;
+
+struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ u32 ecc_2bits_mask;
+ u32 qm_shutdown_mask;
+ u32 dev_shutdown_mask;
+ u32 qm_reset_mask;
+ u32 dev_reset_mask;
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+};
+
+struct hisi_qm_err_status {
+ u32 is_qm_ecc_mbit;
+ u32 is_dev_ecc_mbit;
+};
+
+struct hisi_qm_err_ini {
+ int (*hw_init)(struct hisi_qm *qm);
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ void (*open_sva_prefetch)(struct hisi_qm *qm);
+ void (*close_sva_prefetch)(struct hisi_qm *qm);
+ void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
+ void (*show_last_dfx_regs)(struct hisi_qm *qm);
+ void (*err_info_init)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_cap_info {
+ u32 type;
+ /* Register offset */
+ u32 offset;
+ /* Bit offset in register */
+ u32 shift;
+ u32 mask;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
+struct hisi_qm_cap_record {
+ u32 type;
+ u32 cap_val;
+};
+
+struct hisi_qm_cap_tables {
+ struct hisi_qm_cap_record *qm_cap_table;
+ struct hisi_qm_cap_record *dev_cap_table;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+ int (*register_to_crypto)(struct hisi_qm *qm);
+ void (*unregister_from_crypto)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_poll_data {
+ struct hisi_qm *qm;
+ struct work_struct work;
+ u16 *qp_finish_id;
+ u16 eqe_num;
+};
+
+/**
+ * struct qm_err_isolate
+ * @isolate_lock: protects device error log
+ * @err_threshold: user config error threshold which triggers isolation
+ * @is_isolate: device isolation state
+ * @uacce_hw_errs: index into qm device error list
+ */
+struct qm_err_isolate {
+ struct mutex isolate_lock;
+ u32 err_threshold;
+ bool is_isolate;
+ struct list_head qm_hw_errs;
+};
+
+struct qm_rsv_buf {
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqc *eqc;
+ struct qm_aeqc *aeqc;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqc_dma;
+ dma_addr_t aeqc_dma;
+ struct qm_dma qcdma;
+};
+
+struct hisi_qm {
+ enum qm_hw_ver ver;
+ enum qm_fun_type fun_type;
+ const char *dev_name;
+ struct pci_dev *pdev;
+ void __iomem *io_base;
+ void __iomem *db_io_base;
+
+ /* Capbility version, 0: not supports */
+ u32 cap_ver;
+ u32 sqe_size;
+ u32 qp_base;
+ u32 qp_num;
+ u32 qp_in_used;
+ u32 ctrl_qp_num;
+ u32 max_qp_num;
+ u32 vfs_num;
+ u32 db_interval;
+ u16 eq_depth;
+ u16 aeq_depth;
+ struct list_head list;
+ struct hisi_qm_list *qm_list;
+
+ struct qm_dma qdma;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqe *eqe;
+ struct qm_aeqe *aeqe;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqe_dma;
+ dma_addr_t aeqe_dma;
+ struct qm_rsv_buf xqc_buf;
+
+ struct hisi_qm_status status;
+ const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_info err_info;
+ struct hisi_qm_err_status err_status;
+ /* driver removing and reset sched */
+ unsigned long misc_ctl;
+ /* Device capability bit */
+ unsigned long caps;
+
+ struct rw_semaphore qps_lock;
+ struct idr qp_idr;
+ struct hisi_qp *qp_array;
+ struct hisi_qm_poll_data *poll_data;
+
+ struct mutex mailbox_lock;
+
+ const struct hisi_qm_hw_ops *ops;
+
+ struct qm_debug debug;
+
+ u32 error_mask;
+
+ struct workqueue_struct *wq;
+ struct work_struct rst_work;
+ struct work_struct cmd_process;
+
+ bool use_sva;
+
+ resource_size_t phys_base;
+ resource_size_t db_phys_base;
+ struct uacce_device *uacce;
+ int mode;
+ struct qm_shaper_factor *factor;
+ u32 mb_qos;
+ u32 type_rate;
+ struct qm_err_isolate isolate_data;
+
+ struct hisi_qm_cap_tables cap_tables;
+};
+
+struct hisi_qp_status {
+ atomic_t used;
+ u16 sq_tail;
+ u16 cq_head;
+ bool cqc_phase;
+ atomic_t flags;
+};
+
+struct hisi_qp_ops {
+ int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
+};
+
+struct hisi_qp {
+ u32 qp_id;
+ u16 sq_depth;
+ u16 cq_depth;
+ u8 alg_type;
+ u8 req_type;
+
+ struct qm_dma qdma;
+ void *sqe;
+ struct qm_cqe *cqe;
+ dma_addr_t sqe_dma;
+ dma_addr_t cqe_dma;
+
+ struct hisi_qp_status qp_status;
+ struct hisi_qp_ops *hw_ops;
+ void *qp_ctx;
+ void (*req_cb)(struct hisi_qp *qp, void *data);
+ void (*event_cb)(struct hisi_qp *qp);
+
+ struct hisi_qm *qm;
+ bool is_resetting;
+ bool is_in_kernel;
+ u16 pasid;
+ struct uacce_queue *uacce_q;
+};
+
+static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %u\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+
+ pci_dev_put(pdev);
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || n < QM_MIN_QNUM || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM_V2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_SVA &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
+}
+
+static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
+{
+ INIT_LIST_HEAD(&qm_list->list);
+ mutex_init(&qm_list->lock);
+}
+
+static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+int hisi_qm_init(struct hisi_qm *qm);
+void hisi_qm_uninit(struct hisi_qm *qm);
+int hisi_qm_start(struct hisi_qm *qm);
+int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
+int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
+void hisi_qm_stop_qp(struct hisi_qp *qp);
+int hisi_qp_send(struct hisi_qp *qp, const void *msg);
+void hisi_qm_debug_init(struct hisi_qm *qm);
+void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, u32 reg_len);
+void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len);
+void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+ struct dfx_diff_registers *dregs, u32 regs_len);
+
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
+
+int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
+int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
+ bool op);
+
+struct hisi_acc_sgl_pool;
+struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
+ struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma);
+void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_hw_sgl *hw_sgl);
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr);
+void hisi_acc_free_sgl_pool(struct device *dev,
+ struct hisi_acc_sgl_pool *pool);
+int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
+ u8 alg_type, int node, struct hisi_qp **qps);
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
+int hisi_qm_resume(struct device *dev);
+int hisi_qm_suspend(struct device *dev);
+void hisi_qm_pm_uninit(struct hisi_qm *qm);
+void hisi_qm_pm_init(struct hisi_qm *qm);
+int hisi_qm_get_dfx_access(struct hisi_qm *qm);
+void hisi_qm_put_dfx_access(struct hisi_qm *qm);
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read);
+int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
+ u32 dev_algs_size);
+
+/* Used by VFIO ACC live migration driver */
+struct pci_driver *hisi_sec_get_pf_driver(void);
+struct pci_driver *hisi_hpre_get_pf_driver(void);
+struct pci_driver *hisi_zip_get_pf_driver(void);
+#endif
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 866a0fa104c4..126a36571667 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -4,19 +4,14 @@
*
* Authors: Jérôme Glisse <jglisse@redhat.com>
*
- * See Documentation/vm/hmm.rst for reasons and overview of what HMM is.
+ * See Documentation/mm/hmm.rst for reasons and overview of what HMM is.
*/
#ifndef LINUX_HMM_H
#define LINUX_HMM_H
-#include <linux/kconfig.h>
-#include <linux/pgtable.h>
+#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/migrate.h>
-#include <linux/memremap.h>
-#include <linux/completion.h>
-#include <linux/mmu_notifier.h>
+struct mmu_interval_notifier;
/*
* On output:
@@ -105,7 +100,7 @@ struct hmm_range {
};
/*
- * Please see Documentation/vm/hmm.rst for how to use the range API.
+ * Please see Documentation/mm/hmm.rst for how to use the range API.
*/
int hmm_range_fault(struct hmm_range *range);
@@ -113,7 +108,7 @@ int hmm_range_fault(struct hmm_range *range);
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
*
* When waiting for mmu notifiers we need some kind of time out otherwise we
- * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
+ * could potentially wait for ever, 1000ms ie 1s sounds like a long time to
* wait already.
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 20c885d0bddc..9c8119ed13a4 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -7,6 +7,9 @@
#define __LINUX_HOST1X_H
#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
enum host1x_class {
@@ -15,6 +18,8 @@ enum host1x_class {
HOST1X_CLASS_GR2D_SB = 0x52,
HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
+ HOST1X_CLASS_NVDEC = 0xF0,
+ HOST1X_CLASS_NVDEC1 = 0xF5,
};
struct host1x;
@@ -24,15 +29,46 @@ struct iommu_group;
u64 host1x_get_dma_mask(struct host1x *host1x);
/**
+ * struct host1x_bo_cache - host1x buffer object cache
+ * @mappings: list of mappings
+ * @lock: synchronizes accesses to the list of mappings
+ *
+ * Note that entries are not periodically evicted from this cache and instead need to be
+ * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
+ * released when the last reference to a buffer object represented by a mapping in this
+ * cache is dropped.
+ */
+struct host1x_bo_cache {
+ struct list_head mappings;
+ struct mutex lock;
+};
+
+static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
+{
+ INIT_LIST_HEAD(&cache->mappings);
+ mutex_init(&cache->lock);
+}
+
+static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
+{
+ /* XXX warn if not empty? */
+ mutex_destroy(&cache->lock);
+}
+
+/**
* struct host1x_client_ops - host1x client operations
+ * @early_init: host1x client early initialization code
* @init: host1x client initialization code
* @exit: host1x client tear down code
+ * @late_exit: host1x client late tear down code
* @suspend: host1x client suspend code
* @resume: host1x client resume code
*/
struct host1x_client_ops {
+ int (*early_init)(struct host1x_client *client);
int (*init)(struct host1x_client *client);
int (*exit)(struct host1x_client *client);
+ int (*late_exit)(struct host1x_client *client);
int (*suspend)(struct host1x_client *client);
int (*resume)(struct host1x_client *client);
};
@@ -51,6 +87,7 @@ struct host1x_client_ops {
* @parent: pointer to parent structure
* @usecount: reference count for this structure
* @lock: mutex for mutually exclusive concurrency
+ * @cache: host1x buffer object cache
*/
struct host1x_client {
struct list_head list;
@@ -69,6 +106,8 @@ struct host1x_client {
struct host1x_client *parent;
unsigned int usecount;
struct mutex lock;
+
+ struct host1x_bo_cache cache;
};
/*
@@ -78,23 +117,48 @@ struct host1x_client {
struct host1x_bo;
struct sg_table;
+struct host1x_bo_mapping {
+ struct kref ref;
+ struct dma_buf_attachment *attach;
+ enum dma_data_direction direction;
+ struct list_head list;
+ struct host1x_bo *bo;
+ struct sg_table *sgt;
+ unsigned int chunks;
+ struct device *dev;
+ dma_addr_t phys;
+ size_t size;
+
+ struct host1x_bo_cache *cache;
+ struct list_head entry;
+};
+
+static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
+{
+ return container_of(ref, struct host1x_bo_mapping, ref);
+}
+
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
- struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
- dma_addr_t *phys);
- void (*unpin)(struct device *dev, struct sg_table *sgt);
+ struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir);
+ void (*unpin)(struct host1x_bo_mapping *map);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
};
struct host1x_bo {
const struct host1x_bo_ops *ops;
+ struct list_head mappings;
+ spinlock_t lock;
};
static inline void host1x_bo_init(struct host1x_bo *bo,
const struct host1x_bo_ops *ops)
{
+ INIT_LIST_HEAD(&bo->mappings);
+ spin_lock_init(&bo->lock);
bo->ops = ops;
}
@@ -108,18 +172,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
-static inline struct sg_table *host1x_bo_pin(struct device *dev,
- struct host1x_bo *bo,
- dma_addr_t *phys)
-{
- return bo->ops->pin(dev, bo, phys);
-}
-
-static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
- struct sg_table *sgt)
-{
- bo->ops->unpin(dev, sgt);
-}
+struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir,
+ struct host1x_bo_cache *cache);
+void host1x_bo_unpin(struct host1x_bo_mapping *map);
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
{
@@ -142,7 +198,9 @@ struct host1x_syncpt_base;
struct host1x_syncpt;
struct host1x;
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
u32 host1x_syncpt_id(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
@@ -153,11 +211,21 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
u32 *value);
struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags);
-void host1x_syncpt_free(struct host1x_syncpt *sp);
+void host1x_syncpt_put(struct host1x_syncpt *sp);
+struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+ unsigned long flags,
+ const char *name);
struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
+void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
+ u32 syncpt_id);
+
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
+ bool timeout);
+void host1x_fence_cancel(struct dma_fence *fence);
+
/*
* host1x channel
*/
@@ -167,6 +235,7 @@ struct host1x_job;
struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_stop(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
@@ -204,8 +273,8 @@ struct host1x_job {
struct host1x_client *client;
/* Gathers and their memory */
- struct host1x_job_gather *gathers;
- unsigned int num_gathers;
+ struct host1x_job_cmd *cmds;
+ unsigned int num_cmds;
/* Array of handles to be pinned & unpinned */
struct host1x_reloc *relocs;
@@ -218,13 +287,20 @@ struct host1x_job {
dma_addr_t *reloc_addr_phys;
/* Sync point id, number of increments and end related to the submit */
- u32 syncpt_id;
+ struct host1x_syncpt *syncpt;
u32 syncpt_incrs;
u32 syncpt_end;
+ /* Completion fence for job tracking */
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+
/* Maximum time to wait for this job */
unsigned int timeout;
+ /* Job has timed out and should be released */
+ bool cancelled;
+
/* Index and number of slots used in the push buffer */
unsigned int first_get;
unsigned int num_slots;
@@ -245,12 +321,33 @@ struct host1x_job {
/* Add a channel wait for previous ops to complete */
bool serialize;
+
+ /* Fast-forward syncpoint increments on job timeout */
+ bool syncpt_recovery;
+
+ /* Callback called when job is freed */
+ void (*release)(struct host1x_job *job);
+ void *user_data;
+
+ /* Whether host1x-side firewall should be ran for this job or not */
+ bool enable_firewall;
+
+ /* Options for configuring engine data stream ID */
+ /* Context device to use for job */
+ struct host1x_memory_context *memory_context;
+ /* Stream ID to use if context isolation is disabled (!memory_context) */
+ u32 engine_fallback_streamid;
+ /* Engine offset to program stream ID to */
+ u32 engine_streamid_offset;
};
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
- u32 num_cmdbufs, u32 num_relocs);
+ u32 num_cmdbufs, u32 num_relocs,
+ bool skip_firewall);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
unsigned int words, unsigned int offset);
+void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
+ bool relative, u32 next_class);
struct host1x_job *host1x_job_get(struct host1x_job *job);
void host1x_job_put(struct host1x_job *job);
int host1x_job_pin(struct host1x_job *job, struct device *dev);
@@ -320,8 +417,33 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device);
-int host1x_client_register(struct host1x_client *client);
-int host1x_client_unregister(struct host1x_client *client);
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ __host1x_client_register(client); \
+ })
+
+void host1x_client_unregister(struct host1x_client *client);
int host1x_client_suspend(struct host1x_client *client);
int host1x_client_resume(struct host1x_client *client);
@@ -333,7 +455,43 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device,
void tegra_mipi_free(struct tegra_mipi_device *device);
int tegra_mipi_enable(struct tegra_mipi_device *device);
int tegra_mipi_disable(struct tegra_mipi_device *device);
-int tegra_mipi_calibrate(struct tegra_mipi_device *device);
-int tegra_mipi_wait(struct tegra_mipi_device *device);
+int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
+int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
+
+/* host1x memory contexts */
+
+struct host1x_memory_context {
+ struct host1x *host;
+
+ refcount_t ref;
+ struct pid *owner;
+
+ struct device dev;
+ u64 dma_mask;
+ u32 stream_id;
+};
+
+#ifdef CONFIG_IOMMU_API
+struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct device *dev,
+ struct pid *pid);
+void host1x_memory_context_get(struct host1x_memory_context *cd);
+void host1x_memory_context_put(struct host1x_memory_context *cd);
+#else
+static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct device *dev,
+ struct pid *pid)
+{
+ return NULL;
+}
+
+static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
+{
+}
+
+static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
+{
+}
+#endif
#endif
diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h
new file mode 100644
index 000000000000..72462737a6db
--- /dev/null
+++ b/include/linux/host1x_context_bus.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ */
+
+#ifndef __LINUX_HOST1X_CONTEXT_BUS_H
+#define __LINUX_HOST1X_CONTEXT_BUS_H
+
+#include <linux/device.h>
+
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+extern struct bus_type host1x_context_device_bus_type;
+#endif
+
+#endif
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
index 6f1dee7e67e0..9be8704e2d38 100644
--- a/include/linux/hp_sdc.h
+++ b/include/linux/hp_sdc.h
@@ -180,7 +180,7 @@ switch (val) { \
#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */
-/* The documents provided do not explicitly state that all registers betweem
+/* The documents provided do not explicitly state that all registers between
* 0x01 and 0x1f inclusive can be read by sending their register index as a
* command, but this is implied and appears to be the case.
*/
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 8604564b985d..21e69eaf7a36 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -30,7 +30,7 @@ struct hpet {
unsigned long _hpet_compare;
} _u1;
u64 hpet_fsb[2]; /* FSB route */
- } hpet_timers[1];
+ } hpet_timers[];
};
#define hpet_mc _u0._hpet_mc
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 107cedd7019a..aa1e65ccb615 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -13,16 +13,12 @@
#define _LINUX_HRTIMER_H
#include <linux/hrtimer_defs.h>
-#include <linux/rbtree.h>
+#include <linux/hrtimer_types.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/percpu.h>
-#include <linux/seqlock.h>
+#include <linux/percpu-defs.h>
+#include <linux/rbtree.h>
#include <linux/timer.h>
-#include <linux/timerqueue.h>
-
-struct hrtimer_clock_base;
-struct hrtimer_cpu_base;
/*
* Mode arguments of xxx_hrtimer functions:
@@ -60,14 +56,6 @@ enum hrtimer_mode {
};
/*
- * Return values for the callback function
- */
-enum hrtimer_restart {
- HRTIMER_NORESTART, /* Timer is not restarted */
- HRTIMER_RESTART, /* Timer must be restarted */
-};
-
-/*
* Values to track state of the timer
*
* Possible states:
@@ -95,38 +83,6 @@ enum hrtimer_restart {
#define HRTIMER_STATE_ENQUEUED 0x01
/**
- * struct hrtimer - the basic hrtimer structure
- * @node: timerqueue node, which also manages node.expires,
- * the absolute expiry time in the hrtimers internal
- * representation. The time is related to the clock on
- * which the timer is based. Is setup by adding
- * slack to the _softexpires value. For non range timers
- * identical to _softexpires.
- * @_softexpires: the absolute earliest expiry time of the hrtimer.
- * The time which was given as expiry time when the timer
- * was armed.
- * @function: timer expiry callback function
- * @base: pointer to the timer base (per cpu and per clock)
- * @state: state information (See bit values above)
- * @is_rel: Set if the timer was armed relative
- * @is_soft: Set if hrtimer will be expired in soft interrupt context.
- * @is_hard: Set if hrtimer will be expired in hard interrupt context
- * even on RT.
- *
- * The hrtimer structure must be initialized by hrtimer_init()
- */
-struct hrtimer {
- struct timerqueue_node node;
- ktime_t _softexpires;
- enum hrtimer_restart (*function)(struct hrtimer *);
- struct hrtimer_clock_base *base;
- u8 state;
- u8 is_rel;
- u8 is_soft;
- u8 is_hard;
-};
-
-/**
* struct hrtimer_sleeper - simple sleeper structure
* @timer: embedded timer structure
* @task: task to wake up
@@ -138,105 +94,6 @@ struct hrtimer_sleeper {
struct task_struct *task;
};
-#ifdef CONFIG_64BIT
-# define __hrtimer_clock_base_align ____cacheline_aligned
-#else
-# define __hrtimer_clock_base_align
-#endif
-
-/**
- * struct hrtimer_clock_base - the timer base for a specific clock
- * @cpu_base: per cpu clock base
- * @index: clock type index for per_cpu support when moving a
- * timer to a base on another cpu.
- * @clockid: clock id for per_cpu support
- * @seq: seqcount around __run_hrtimer
- * @running: pointer to the currently running hrtimer
- * @active: red black tree root node for the active timers
- * @get_time: function to retrieve the current time of the clock
- * @offset: offset of this clock to the monotonic base
- */
-struct hrtimer_clock_base {
- struct hrtimer_cpu_base *cpu_base;
- unsigned int index;
- clockid_t clockid;
- seqcount_raw_spinlock_t seq;
- struct hrtimer *running;
- struct timerqueue_head active;
- ktime_t (*get_time)(void);
- ktime_t offset;
-} __hrtimer_clock_base_align;
-
-enum hrtimer_base_type {
- HRTIMER_BASE_MONOTONIC,
- HRTIMER_BASE_REALTIME,
- HRTIMER_BASE_BOOTTIME,
- HRTIMER_BASE_TAI,
- HRTIMER_BASE_MONOTONIC_SOFT,
- HRTIMER_BASE_REALTIME_SOFT,
- HRTIMER_BASE_BOOTTIME_SOFT,
- HRTIMER_BASE_TAI_SOFT,
- HRTIMER_MAX_CLOCK_BASES,
-};
-
-/**
- * struct hrtimer_cpu_base - the per cpu clock bases
- * @lock: lock protecting the base and associated clock bases
- * and timers
- * @cpu: cpu number
- * @active_bases: Bitfield to mark bases with active timers
- * @clock_was_set_seq: Sequence counter of clock was set events
- * @hres_active: State of high resolution mode
- * @in_hrtirq: hrtimer_interrupt() is currently executing
- * @hang_detected: The last hrtimer interrupt detected a hang
- * @softirq_activated: displays, if the softirq is raised - update of softirq
- * related settings is not required then.
- * @nr_events: Total number of hrtimer interrupt events
- * @nr_retries: Total number of hrtimer interrupt retries
- * @nr_hangs: Total number of hrtimer interrupt hangs
- * @max_hang_time: Maximum time spent in hrtimer_interrupt
- * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
- * expired
- * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
- * callback to finish.
- * @expires_next: absolute time of the next event, is required for remote
- * hrtimer enqueue; it is the total first expiry time (hard
- * and soft hrtimer are taken into account)
- * @next_timer: Pointer to the first expiring timer
- * @softirq_expires_next: Time to check, if soft queues needs also to be expired
- * @softirq_next_timer: Pointer to the first expiring softirq based timer
- * @clock_base: array of clock bases for this cpu
- *
- * Note: next_timer is just an optimization for __remove_hrtimer().
- * Do not dereference the pointer because it is not reliable on
- * cross cpu removals.
- */
-struct hrtimer_cpu_base {
- raw_spinlock_t lock;
- unsigned int cpu;
- unsigned int active_bases;
- unsigned int clock_was_set_seq;
- unsigned int hres_active : 1,
- in_hrtirq : 1,
- hang_detected : 1,
- softirq_activated : 1;
-#ifdef CONFIG_HIGH_RES_TIMERS
- unsigned int nr_events;
- unsigned short nr_retries;
- unsigned short nr_hangs;
- unsigned int max_hang_time;
-#endif
-#ifdef CONFIG_PREEMPT_RT
- spinlock_t softirq_expiry_lock;
- atomic_t timer_waiters;
-#endif
- ktime_t expires_next;
- struct hrtimer *next_timer;
- ktime_t softirq_expires_next;
- struct hrtimer *softirq_next_timer;
- struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
-} ____cacheline_aligned;
-
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
timer->node.expires = time;
@@ -318,16 +175,12 @@ struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
-extern void clock_was_set_delayed(void);
-
extern unsigned int hrtimer_resolution;
#else
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline void clock_was_set_delayed(void) { }
-
#endif
static inline ktime_t
@@ -351,13 +204,13 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
timer->base->get_time());
}
-extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
+extern void timerfd_resume(void);
#else
static inline void timerfd_clock_was_set(void) { }
+static inline void timerfd_resume(void) { }
#endif
-extern void hrtimers_resume(void);
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
@@ -447,6 +300,10 @@ static inline void hrtimer_restart(struct hrtimer *timer)
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+/**
+ * hrtimer_get_remaining - get remaining time for the timer
+ * @timer: the timer to read
+ */
static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
@@ -458,7 +315,7 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
/**
- * hrtimer_is_queued = check, whether the timer is on one of the queues
+ * hrtimer_is_queued - check, whether the timer is on one of the queues
* @timer: Timer to check
*
* Returns: True if the timer is queued, false otherwise
@@ -485,20 +342,12 @@ extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
/**
- * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * hrtimer_forward_now() - forward the timer expiry so it expires after now
* @timer: hrtimer to forward
* @interval: the interval to forward
*
- * Forward the timer expiry so it will expire after the current time
- * of the hrtimer clock base. Returns the number of overruns.
- *
- * Can be safely called from the callback function of @timer. If
- * called from other contexts @timer must neither be enqueued nor
- * running the callback and the caller needs to take care of
- * serialization.
- *
- * Note: This only updates the timer expiry value and does not requeue
- * the timer.
+ * It is a variant of hrtimer_forward(). The timer will expire after the current
+ * time of the hrtimer clock base. See hrtimer_forward() for details.
*/
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
@@ -531,9 +380,9 @@ extern void sysrq_timer_list_show(void);
int hrtimers_prepare_cpu(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
-int hrtimers_dead_cpu(unsigned int cpu);
+int hrtimers_cpu_dying(unsigned int cpu);
#else
-#define hrtimers_dead_cpu NULL
+#define hrtimers_cpu_dying NULL
#endif
#endif
diff --git a/include/linux/hrtimer_api.h b/include/linux/hrtimer_api.h
new file mode 100644
index 000000000000..8d9700894468
--- /dev/null
+++ b/include/linux/hrtimer_api.h
@@ -0,0 +1 @@
+#include <linux/hrtimer.h>
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index 2d3e3c5fb946..c3b4b7ed7c16 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -3,6 +3,8 @@
#define _LINUX_HRTIMER_DEFS_H
#include <linux/ktime.h>
+#include <linux/timerqueue.h>
+#include <linux/seqlock.h>
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -24,4 +26,106 @@
#endif
+#ifdef CONFIG_64BIT
+# define __hrtimer_clock_base_align ____cacheline_aligned
+#else
+# define __hrtimer_clock_base_align
+#endif
+
+/**
+ * struct hrtimer_clock_base - the timer base for a specific clock
+ * @cpu_base: per cpu clock base
+ * @index: clock type index for per_cpu support when moving a
+ * timer to a base on another cpu.
+ * @clockid: clock id for per_cpu support
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
+ * @active: red black tree root node for the active timers
+ * @get_time: function to retrieve the current time of the clock
+ * @offset: offset of this clock to the monotonic base
+ */
+struct hrtimer_clock_base {
+ struct hrtimer_cpu_base *cpu_base;
+ unsigned int index;
+ clockid_t clockid;
+ seqcount_raw_spinlock_t seq;
+ struct hrtimer *running;
+ struct timerqueue_head active;
+ ktime_t (*get_time)(void);
+ ktime_t offset;
+} __hrtimer_clock_base_align;
+
+enum hrtimer_base_type {
+ HRTIMER_BASE_MONOTONIC,
+ HRTIMER_BASE_REALTIME,
+ HRTIMER_BASE_BOOTTIME,
+ HRTIMER_BASE_TAI,
+ HRTIMER_BASE_MONOTONIC_SOFT,
+ HRTIMER_BASE_REALTIME_SOFT,
+ HRTIMER_BASE_BOOTTIME_SOFT,
+ HRTIMER_BASE_TAI_SOFT,
+ HRTIMER_MAX_CLOCK_BASES,
+};
+
+/**
+ * struct hrtimer_cpu_base - the per cpu clock bases
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+ * @cpu: cpu number
+ * @active_bases: Bitfield to mark bases with active timers
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hang_detected: The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ * related settings is not required then.
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
+ * expired
+ * @online: CPU is online from an hrtimers point of view
+ * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
+ * callback to finish.
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
+ * @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ * Do not dereference the pointer because it is not reliable on
+ * cross cpu removals.
+ */
+struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set_seq;
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+ softirq_activated : 1,
+ online : 1;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int nr_events;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
+ unsigned int max_hang_time;
+#endif
+#ifdef CONFIG_PREEMPT_RT
+ spinlock_t softirq_expiry_lock;
+ atomic_t timer_waiters;
+#endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+ struct hrtimer *softirq_next_timer;
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+} ____cacheline_aligned;
+
+
#endif
diff --git a/include/linux/hrtimer_types.h b/include/linux/hrtimer_types.h
new file mode 100644
index 000000000000..ad66a3081735
--- /dev/null
+++ b/include/linux/hrtimer_types.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HRTIMER_TYPES_H
+#define _LINUX_HRTIMER_TYPES_H
+
+#include <linux/types.h>
+#include <linux/timerqueue_types.h>
+
+struct hrtimer_clock_base;
+
+/*
+ * Return values for the callback function
+ */
+enum hrtimer_restart {
+ HRTIMER_NORESTART, /* Timer is not restarted */
+ HRTIMER_RESTART, /* Timer must be restarted */
+};
+
+/**
+ * struct hrtimer - the basic hrtimer structure
+ * @node: timerqueue node, which also manages node.expires,
+ * the absolute expiry time in the hrtimers internal
+ * representation. The time is related to the clock on
+ * which the timer is based. Is setup by adding
+ * slack to the _softexpires value. For non range timers
+ * identical to _softexpires.
+ * @_softexpires: the absolute earliest expiry time of the hrtimer.
+ * The time which was given as expiry time when the timer
+ * was armed.
+ * @function: timer expiry callback function
+ * @base: pointer to the timer base (per cpu and per clock)
+ * @state: state information (See bit values above)
+ * @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
+ * @is_hard: Set if hrtimer will be expired in hard interrupt context
+ * even on RT.
+ *
+ * The hrtimer structure must be initialized by hrtimer_init()
+ */
+struct hrtimer {
+ struct timerqueue_node node;
+ ktime_t _softexpires;
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ u8 state;
+ u8 is_rel;
+ u8 is_soft;
+ u8 is_hard;
+};
+
+#endif /* _LINUX_HRTIMER_TYPES_H */
diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h
index 2d6f3cfa7dea..972434daa000 100644
--- a/include/linux/hsi/ssi_protocol.h
+++ b/include/linux/hsi/ssi_protocol.h
@@ -24,6 +24,7 @@ int ssip_slave_stop_tx(struct hsi_client *master);
void ssip_reset_event(struct hsi_client *master);
int ssip_slave_running(struct hsi_client *master);
+void ssi_waketest(struct hsi_client *cl, unsigned int enable);
#endif /* __LINUX_SSIP_SLAVE_H__ */
diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h
deleted file mode 100644
index 842fce69ac06..000000000000
--- a/include/linux/htcpld.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_HTCPLD_H
-#define __LINUX_HTCPLD_H
-
-struct htcpld_chip_platform_data {
- unsigned int addr;
- unsigned int reset;
- unsigned int num_gpios;
- unsigned int gpio_out_base;
- unsigned int gpio_in_base;
- unsigned int irq_base;
- unsigned int num_irqs;
-};
-
-struct htcpld_core_platform_data {
- unsigned int int_reset_gpio_hi;
- unsigned int int_reset_gpio_lo;
- unsigned int i2c_adapter_id;
-
- struct htcpld_chip_platform_data *chip;
- unsigned int num_chip;
-};
-
-#endif /* __LINUX_HTCPLD_H */
-
diff --git a/include/linux/hte.h b/include/linux/hte.h
new file mode 100644
index 000000000000..8289055061ab
--- /dev/null
+++ b/include/linux/hte.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_HTE_H
+#define __LINUX_HTE_H
+
+#include <linux/errno.h>
+
+struct hte_chip;
+struct hte_device;
+struct of_phandle_args;
+
+/**
+ * enum hte_edge - HTE line edge flags.
+ *
+ * @HTE_EDGE_NO_SETUP: No edge setup. In this case consumer will setup edges,
+ * for example during request irq call.
+ * @HTE_RISING_EDGE_TS: Rising edge.
+ * @HTE_FALLING_EDGE_TS: Falling edge.
+ *
+ */
+enum hte_edge {
+ HTE_EDGE_NO_SETUP = 1U << 0,
+ HTE_RISING_EDGE_TS = 1U << 1,
+ HTE_FALLING_EDGE_TS = 1U << 2,
+};
+
+/**
+ * enum hte_return - HTE subsystem return values used during callback.
+ *
+ * @HTE_CB_HANDLED: The consumer handled the data.
+ * @HTE_RUN_SECOND_CB: The consumer needs further processing, in that case
+ * HTE subsystem calls secondary callback provided by the consumer where it
+ * is allowed to sleep.
+ */
+enum hte_return {
+ HTE_CB_HANDLED,
+ HTE_RUN_SECOND_CB,
+};
+
+/**
+ * struct hte_ts_data - HTE timestamp data.
+ *
+ * @tsc: Timestamp value.
+ * @seq: Sequence counter of the timestamps.
+ * @raw_level: Level of the line at the timestamp if provider supports it,
+ * -1 otherwise.
+ */
+struct hte_ts_data {
+ u64 tsc;
+ u64 seq;
+ int raw_level;
+};
+
+/**
+ * struct hte_clk_info - Clock source info that HTE provider uses to timestamp.
+ *
+ * @hz: Supported clock rate in HZ, for example 1KHz clock = 1000.
+ * @type: Supported clock type.
+ */
+struct hte_clk_info {
+ u64 hz;
+ clockid_t type;
+};
+
+/**
+ * typedef hte_ts_cb_t - HTE timestamp data processing primary callback.
+ *
+ * The callback is used to push timestamp data to the client and it is
+ * not allowed to sleep.
+ *
+ * @ts: HW timestamp data.
+ * @data: Client supplied data.
+ */
+typedef enum hte_return (*hte_ts_cb_t)(struct hte_ts_data *ts, void *data);
+
+/**
+ * typedef hte_ts_sec_cb_t - HTE timestamp data processing secondary callback.
+ *
+ * This is used when the client needs further processing where it is
+ * allowed to sleep.
+ *
+ * @data: Client supplied data.
+ *
+ */
+typedef enum hte_return (*hte_ts_sec_cb_t)(void *data);
+
+/**
+ * struct hte_line_attr - Line attributes.
+ *
+ * @line_id: The logical ID understood by the consumers and providers.
+ * @line_data: Line data related to line_id.
+ * @edge_flags: Edge setup flags.
+ * @name: Descriptive name of the entity that is being monitored for the
+ * hardware timestamping. If null, HTE core will construct the name.
+ *
+ */
+struct hte_line_attr {
+ u32 line_id;
+ void *line_data;
+ unsigned long edge_flags;
+ const char *name;
+};
+
+/**
+ * struct hte_ts_desc - HTE timestamp descriptor.
+ *
+ * This structure is a communication token between consumers to subsystem
+ * and subsystem to providers.
+ *
+ * @attr: The line attributes.
+ * @hte_data: Subsystem's private data, set by HTE subsystem.
+ */
+struct hte_ts_desc {
+ struct hte_line_attr attr;
+ void *hte_data;
+};
+
+/**
+ * struct hte_ops - HTE operations set by providers.
+ *
+ * @request: Hook for requesting a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @release: Hook for releasing a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @enable: Hook to enable the specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @disable: Hook to disable specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @get_clk_src_info: Hook to get the clock information the provider uses
+ * to timestamp. Returns 0 for success and negative error code for failure. On
+ * success HTE subsystem fills up provided struct hte_clk_info.
+ *
+ * xlated_id parameter is used to communicate between HTE subsystem and the
+ * providers and is translated by the provider.
+ */
+struct hte_ops {
+ int (*request)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*release)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*enable)(struct hte_chip *chip, u32 xlated_id);
+ int (*disable)(struct hte_chip *chip, u32 xlated_id);
+ int (*get_clk_src_info)(struct hte_chip *chip,
+ struct hte_clk_info *ci);
+};
+
+/**
+ * struct hte_chip - Abstract HTE chip.
+ *
+ * @name: functional name of the HTE IP block.
+ * @dev: device providing the HTE.
+ * @ops: callbacks for this HTE.
+ * @nlines: number of lines/signals supported by this chip.
+ * @xlate_of: Callback which translates consumer supplied logical ids to
+ * physical ids, return 0 for the success and negative for the failures.
+ * It stores (between 0 to @nlines) in xlated_id parameter for the success.
+ * @xlate_plat: Same as above but for the consumers with no DT node.
+ * @match_from_linedata: Match HTE device using the line_data.
+ * @of_hte_n_cells: Number of cells used to form the HTE specifier.
+ * @gdev: HTE subsystem abstract device, internal to the HTE subsystem.
+ * @data: chip specific private data.
+ */
+struct hte_chip {
+ const char *name;
+ struct device *dev;
+ const struct hte_ops *ops;
+ u32 nlines;
+ int (*xlate_of)(struct hte_chip *gc,
+ const struct of_phandle_args *args,
+ struct hte_ts_desc *desc, u32 *xlated_id);
+ int (*xlate_plat)(struct hte_chip *gc, struct hte_ts_desc *desc,
+ u32 *xlated_id);
+ bool (*match_from_linedata)(const struct hte_chip *chip,
+ const struct hte_ts_desc *hdesc);
+ u8 of_hte_n_cells;
+
+ struct hte_device *gdev;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_HTE)
+/* HTE APIs for the providers */
+int devm_hte_register_chip(struct hte_chip *chip);
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+ struct hte_ts_data *data);
+
+/* HTE APIs for the consumers */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags, const char *name,
+ void *data);
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index);
+int hte_ts_put(struct hte_ts_desc *desc);
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data);
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+ hte_ts_cb_t cb, hte_ts_sec_cb_t tcb, void *data);
+int of_hte_req_count(struct device *dev);
+int hte_enable_ts(struct hte_ts_desc *desc);
+int hte_disable_ts(struct hte_ts_desc *desc);
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci);
+
+#else /* !CONFIG_HTE */
+static inline int devm_hte_register_chip(struct hte_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_push_ts_ns(const struct hte_chip *chip,
+ u32 xlated_id,
+ const struct hte_ts_data *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags,
+ const char *name, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_get(struct device *dev, struct hte_ts_desc *desc,
+ int index)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_put(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_hte_request_ts_ns(struct device *dev,
+ struct hte_ts_desc *desc,
+ hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb,
+ void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int of_hte_req_count(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_enable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_disable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_HTE */
+
+#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 8a8bc46a2432..de0c89105076 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -7,86 +7,41 @@
#include <linux/fs.h> /* only for vma_is_dax() */
-extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
-extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
- struct vm_area_struct *vma);
-extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
-extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
- struct vm_area_struct *vma);
+vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
+int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+void huge_pmd_set_accessed(struct vm_fault *vmf);
+int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+ struct vm_area_struct *vma);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
+void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}
#endif
-extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
-extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr,
- pmd_t *pmd,
- unsigned int flags);
-extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr, unsigned long next);
-extern int zap_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr);
-extern int zap_huge_pud(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pud_t *pud, unsigned long addr);
-extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned char *vec);
-extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr,
- pmd_t *old_pmd, pmd_t *new_pmd);
-extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot,
- unsigned long cp_flags);
-vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
- pgprot_t pgprot, bool write);
-
-/**
- * vmf_insert_pfn_pmd - insert a pmd size pfn
- * @vmf: Structure describing the fault
- * @pfn: pfn to insert
- * @pgprot: page protection to use
- * @write: whether it's a write fault
- *
- * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
- *
- * Return: vm_fault_t value.
- */
-static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
- bool write)
-{
- return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
-}
-vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
- pgprot_t pgprot, bool write);
-
-/**
- * vmf_insert_pfn_pud - insert a pud size pfn
- * @vmf: Structure describing the fault
- * @pfn: pfn to insert
- * @pgprot: page protection to use
- * @write: whether it's a write fault
- *
- * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
- *
- * Return: vm_fault_t value.
- */
-static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
- bool write)
-{
- return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
-}
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
+bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, unsigned long next);
+int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr);
+int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
+ unsigned long addr);
+bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
+int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
enum transparent_hugepage_flag {
+ TRANSPARENT_HUGEPAGE_UNSUPPORTED,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
@@ -95,26 +50,43 @@ enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
-#ifdef CONFIG_DEBUG_VM
- TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
-#endif
};
struct kobject;
struct kobj_attribute;
-extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count,
- enum transparent_hugepage_flag flag);
-extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf,
- enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count,
+ enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf,
+ enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+/*
+ * Mask of all large folio orders supported for anonymous THP; all orders up to
+ * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
+ * (which is a limitation of the THP implementation).
+ */
+#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
+
+/*
+ * Mask of all large folio orders supported for file THP.
+ */
+#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+
+/*
+ * Mask of all large folio orders supported for THP.
+ */
+#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
+
+#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
@@ -125,81 +97,184 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
extern unsigned long transparent_hugepage_flags;
+extern unsigned long huge_anon_orders_always;
+extern unsigned long huge_anon_orders_madvise;
+extern unsigned long huge_anon_orders_inherit;
-/*
- * to be used on vmas which are known to support THP.
- * Use transparent_hugepage_enabled otherwise
- */
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool hugepage_global_enabled(void)
{
- if (vma->vm_flags & VM_NOHUGEPAGE)
- return false;
-
- if (vma_is_temporary_stack(vma))
- return false;
+ return transparent_hugepage_flags &
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
+}
- if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- return false;
+static inline bool hugepage_global_always(void)
+{
+ return transparent_hugepage_flags &
+ (1<<TRANSPARENT_HUGEPAGE_FLAG);
+}
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
- return true;
+static inline bool hugepage_flags_enabled(void)
+{
/*
- * For dax vmas, try to always use hugepage mappings. If the kernel does
- * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
- * mappings, and device-dax namespaces, that try to guarantee a given
- * mapping size, will fail to enable
+ * We cover both the anon and the file-backed case here; we must return
+ * true if globally enabled, even when all anon sizes are set to never.
+ * So we don't need to look at huge_anon_orders_inherit.
*/
- if (vma_is_dax(vma))
- return true;
-
- if (transparent_hugepage_flags &
- (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
- return !!(vma->vm_flags & VM_HUGEPAGE);
-
- return false;
+ return hugepage_global_enabled() ||
+ huge_anon_orders_always ||
+ huge_anon_orders_madvise;
}
-bool transparent_hugepage_enabled(struct vm_area_struct *vma);
+static inline int highest_order(unsigned long orders)
+{
+ return fls_long(orders) - 1;
+}
-#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
+static inline int next_order(unsigned long *orders, int prev)
+{
+ *orders &= ~BIT(prev);
+ return highest_order(*orders);
+}
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+/*
+ * Do the below checks:
+ * - For file vma, check if the linear page offset of vma is
+ * order-aligned within the file. The hugepage is
+ * guaranteed to be order-aligned within the file, but we must
+ * check that the order-aligned addresses in the VMA map to
+ * order-aligned offsets within the file, else the hugepage will
+ * not be mappable.
+ * - For all vmas, check if the haddr is in an aligned hugepage
+ * area.
+ */
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
+ unsigned long hpage_size = PAGE_SIZE << order;
+ unsigned long haddr;
+
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
- if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
- (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
+ if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
+ hpage_size >> PAGE_SHIFT))
return false;
}
- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ haddr = ALIGN_DOWN(addr, hpage_size);
+
+ if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
return false;
return true;
}
+/*
+ * Filter the bitfield of input orders to the ones suitable for use in the vma.
+ * See thp_vma_suitable_order().
+ * All orders that pass the checks are returned as a bitfield.
+ */
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
+{
+ int order;
+
+ /*
+ * Iterate over orders, highest to lowest, removing orders that don't
+ * meet alignment requirements from the set. Exit loop at first order
+ * that meets requirements, since all lower orders must also meet
+ * requirements.
+ */
+
+ order = highest_order(orders);
+
+ while (orders) {
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ return orders;
+}
+
+static inline bool file_thp_enabled(struct vm_area_struct *vma)
+{
+ struct inode *inode;
+
+ if (!vma->vm_file)
+ return false;
+
+ inode = vma->vm_file->f_inode;
+
+ return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
+ !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
+}
+
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders);
+
+/**
+ * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
+ * @vma: the vm area to check
+ * @vm_flags: use these vm_flags instead of vma->vm_flags
+ * @smaps: whether answer will be used for smaps file
+ * @in_pf: whether answer will be used by page fault handler
+ * @enforce_sysfs: whether sysfs config should be taken into account
+ * @orders: bitfield of all orders to consider
+ *
+ * Calculates the intersection of the requested hugepage orders and the allowed
+ * hugepage orders for the provided vma. Permitted orders are encoded as a set
+ * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
+ * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
+ *
+ * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
+ * orders are allowed.
+ */
+static inline
+unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ /* Optimization to check if required orders are enabled early. */
+ if (enforce_sysfs && vma_is_anonymous(vma)) {
+ unsigned long mask = READ_ONCE(huge_anon_orders_always);
+
+ if (vm_flags & VM_HUGEPAGE)
+ mask |= READ_ONCE(huge_anon_orders_madvise);
+ if (hugepage_global_always() ||
+ ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
+ mask |= READ_ONCE(huge_anon_orders_inherit);
+
+ orders &= mask;
+ if (!orders)
+ return 0;
+ }
+
+ return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
+ enforce_sysfs, orders);
+}
+
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
-extern unsigned long thp_get_unmapped_area(struct file *filp,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags);
-
-extern void prep_transhuge_page(struct page *page);
-extern void free_transhuge_page(struct page *page);
-bool is_transparent_hugepage(struct page *page);
+unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags);
-bool can_split_huge_page(struct page *page, int *pextra_pins);
-int split_huge_page_to_list(struct page *page, struct list_head *list);
+void folio_prep_large_rmappable(struct folio *folio);
+bool can_split_folio(struct folio *folio, int *pextra_pins);
+int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order);
static inline int split_huge_page(struct page *page)
{
- return split_huge_page_to_list(page, NULL);
+ return split_huge_page_to_list_to_order(page, NULL, 0);
}
-void deferred_split_huge_page(struct page *page);
+void deferred_split_folio(struct folio *folio);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page);
+ unsigned long address, bool freeze, struct folio *folio);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
@@ -212,7 +287,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
- bool freeze, struct page *page);
+ bool freeze, struct folio *folio);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
@@ -225,16 +300,15 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
__split_huge_pud(__vma, __pud, __address); \
} while (0)
-extern int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice);
-extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next);
-extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma);
-extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
- struct vm_area_struct *vma);
+int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
+ int advice);
+int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end);
+void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, long adjust_next);
+spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
+spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
static inline int is_swap_pmd(pmd_t pmd)
{
@@ -260,36 +334,12 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
}
/**
- * thp_head - Head page of a transparent huge page.
- * @page: Any page (tail, head or regular) found in the page cache.
- */
-static inline struct page *thp_head(struct page *page)
-{
- return compound_head(page);
-}
-
-/**
- * thp_order - Order of a transparent huge page.
- * @page: Head page of a transparent huge page.
- */
-static inline unsigned int thp_order(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- if (PageHead(page))
- return HPAGE_PMD_ORDER;
- return 0;
-}
-
-/**
- * thp_nr_pages - The number of regular pages in this huge page.
- * @page: The head page of a huge page.
+ * folio_test_pmd_mappable - Can we map this folio with a PMD?
+ * @folio: The folio to test
*/
-static inline int thp_nr_pages(struct page *page)
+static inline bool folio_test_pmd_mappable(struct folio *folio)
{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- if (PageHead(page))
- return HPAGE_PMD_NR;
- return 1;
+ return folio_order(folio) >= HPAGE_PMD_ORDER;
}
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -297,9 +347,10 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags, struct dev_pagemap **pgmap);
-extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
extern struct page *huge_zero_page;
+extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_page(struct page *page)
{
@@ -308,7 +359,7 @@ static inline bool is_huge_zero_page(struct page *page)
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return is_huge_zero_page(pmd_page(pmd));
+ return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
}
static inline bool is_huge_zero_pud(pud_t pud)
@@ -326,15 +377,6 @@ static inline bool thp_migration_supported(void)
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}
-static inline struct list_head *page_deferred_list(struct page *page)
-{
- /*
- * Global or memcg deferred list in the second tail pages is
- * occupied by compound_head.
- */
- return &page[2].deferred_list;
-}
-
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -344,59 +386,45 @@ static inline struct list_head *page_deferred_list(struct page *page)
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-static inline struct page *thp_head(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- return page;
-}
-
-static inline unsigned int thp_order(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- return 0;
-}
-
-static inline int thp_nr_pages(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- return 1;
-}
-
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool folio_test_pmd_mappable(struct folio *folio)
{
return false;
}
-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
return false;
}
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
{
- return false;
+ return 0;
}
-static inline void prep_transhuge_page(struct page *page) {}
-
-static inline bool is_transparent_hugepage(struct page *page)
+static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
{
- return false;
+ return 0;
}
+static inline void folio_prep_large_rmappable(struct folio *folio) {}
+
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
static inline bool
-can_split_huge_page(struct page *page, int *pextra_pins)
+can_split_folio(struct folio *folio, int *pextra_pins)
{
- BUILD_BUG();
return false;
}
static inline int
-split_huge_page_to_list(struct page *page, struct list_head *list)
+split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order)
{
return 0;
}
@@ -404,14 +432,14 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
-static inline void deferred_split_huge_page(struct page *page) {}
+static inline void deferred_split_folio(struct folio *folio) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze, struct folio *folio) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze, struct folio *folio) {}
#define split_huge_pud(__vma, __pmd, __address) \
do { } while (0)
@@ -419,9 +447,16 @@ static inline void split_huge_pmd_address(struct vm_area_struct *vma,
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
- BUG();
- return 0;
+ return -EINVAL;
+}
+
+static inline int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end)
+{
+ return -EINVAL;
}
+
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
@@ -443,8 +478,7 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
return NULL;
}
-static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
- pmd_t orig_pmd)
+static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
return 0;
}
@@ -454,6 +488,11 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pud(pud_t pud)
{
return false;
@@ -482,15 +521,30 @@ static inline bool thp_migration_supported(void)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-/**
- * thp_size - Size of a transparent huge page.
- * @page: Head page of a transparent huge page.
- *
- * Return: Number of bytes in this page.
+static inline int split_folio_to_list_to_order(struct folio *folio,
+ struct list_head *list, int new_order)
+{
+ return split_huge_page_to_list_to_order(&folio->page, list, new_order);
+}
+
+static inline int split_folio_to_order(struct folio *folio, int new_order)
+{
+ return split_folio_to_list_to_order(folio, NULL, new_order);
+}
+
+#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
+#define split_folio(f) split_folio_to_order(f, 0)
+
+/*
+ * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
+ * limitations in the implementation like arm64 MTE can override this to
+ * false
*/
-static inline unsigned long thp_size(struct page *page)
+#ifndef arch_thp_swp_supported
+static inline bool arch_thp_swp_supported(void)
{
- return PAGE_SIZE << thp_order(page);
+ return true;
}
+#endif
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d5cc5f802dd4..77b30a8c6076 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -2,42 +2,55 @@
#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H
+#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
+#include <linux/page_ref.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/pgtable.h>
#include <linux/gfp.h>
+#include <linux/userfaultfd_k.h>
struct ctl_table;
struct user_struct;
struct mmu_gather;
+struct node;
-#ifndef is_hugepd
+#ifndef CONFIG_ARCH_HAS_HUGEPD
typedef struct { unsigned long pd; } hugepd_t;
#define is_hugepd(hugepd) (0)
#define __hugepd(x) ((hugepd_t) { (x) })
#endif
+void free_huge_folio(struct folio *folio);
+
#ifdef CONFIG_HUGETLB_PAGE
-#include <linux/mempolicy.h>
+#include <linux/pagemap.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
+/*
+ * For HugeTLB page, there are more metadata to save in the struct page. But
+ * the head struct page cannot meet our needs, so we have to abuse other tail
+ * struct page to store the metadata.
+ */
+#define __NR_USED_SUBPAGE 3
+
struct hugepage_subpool {
spinlock_t lock;
long count;
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
long used_hpages; /* Used count against maximum, includes */
- /* both alloced and reserved pages. */
+ /* both allocated and reserved pages. */
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
- /* sasitfy minimum size. */
+ /* satisfy minimum size. */
};
struct resv_map {
@@ -47,6 +60,7 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -67,7 +81,7 @@ struct resv_map {
* by a resv_map's lock. The set of regions within the resv_map represent
* reservations for huge pages, or huge pages that have already been
* instantiated within the map. The from and to elements are huge page
- * indicies into the associated mapping. from indicates the starting index
+ * indices into the associated mapping. from indicates the starting index
* of the region. to represents the first index past the end of the region.
*
* For example, a file region structure with from == 0 and to == 4 represents
@@ -93,6 +107,12 @@ struct file_region {
#endif
};
+struct hugetlb_vma_lock {
+ struct kref refs;
+ struct rw_semaphore rw_sema;
+ struct vm_area_struct *vma;
+};
+
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -105,92 +125,171 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
-int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
-int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-
-int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
-long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
- struct page **, struct vm_area_struct **,
- unsigned long *, unsigned long *, long, unsigned int,
- int *);
+void hugetlb_dup_vma_private(struct vm_area_struct *vma);
+void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
+int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr, unsigned long new_addr,
+ unsigned long len);
+int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
+ struct vm_area_struct *, struct vm_area_struct *);
+struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ unsigned int *page_mask);
void unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ unsigned long, unsigned long, struct page *,
+ zap_flags_t);
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct page *ref_page);
-void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page);
+ struct page *ref_page, zap_flags_t zap_flags);
void hugetlb_report_meminfo(struct seq_file *);
-int hugetlb_report_node_meminfo(int, char *);
-void hugetlb_show_meminfo(void);
+int hugetlb_report_node_meminfo(char *buf, int len, int nid);
+void hugetlb_show_meminfo_node(int nid);
unsigned long hugetlb_total_pages(void);
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
-int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- struct page **pagep);
-int hugetlb_reserve_pages(struct inode *inode, long from, long to,
+#ifdef CONFIG_USERFAULTFD
+int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ uffd_flags_t flags,
+ struct folio **foliop);
+#endif /* CONFIG_USERFAULTFD */
+bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-bool isolate_huge_page(struct page *page, struct list_head *list);
-void putback_active_hugepage(struct page *page);
-void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
-void free_huge_page(struct page *page);
+bool isolate_hugetlb(struct folio *folio, struct list_head *list);
+int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared);
+void folio_putback_active_hugetlb(struct folio *folio);
+void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
-pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pud_t *pud);
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
extern int sysctl_hugetlb_shm_group;
-extern struct list_head huge_boot_pages;
+extern struct list_head huge_boot_pages[MAX_NUMNODES];
/* arch callbacks */
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+#ifndef CONFIG_HIGHPTE
+/*
+ * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
+ * which may go down to the lowest PTE level in their huge_pte_offset() and
+ * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
+ */
+static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
+{
+ return pte_offset_kernel(pmd, address);
+}
+static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
+}
+#endif
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
+/*
+ * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
+ * Returns the pte_t* if found, or NULL if the address is not mapped.
+ *
+ * IMPORTANT: we should normally not directly call this function, instead
+ * this is only a common interface to implement arch-specific
+ * walker. Please use hugetlb_walk() instead, because that will attempt to
+ * verify the locking for you.
+ *
+ * Since this function will walk all the pgtable pages (including not only
+ * high-level pgtable page, but also PUD entry that can be unshared
+ * concurrently for VM_SHARED), the caller of this function should be
+ * responsible of its thread safety. One can follow this rule:
+ *
+ * (1) For private mappings: pmd unsharing is not possible, so holding the
+ * mmap_lock for either read or write is sufficient. Most callers
+ * already hold the mmap_lock, so normally, no special action is
+ * required.
+ *
+ * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
+ * pgtable page can go away from under us! It can be done by a pmd
+ * unshare with a follow up munmap() on the other process), then we
+ * need either:
+ *
+ * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
+ * won't happen upon the range (it also makes sure the pte_t we
+ * read is the right and stable one), or,
+ *
+ * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
+ * sure even if unshare happened the racy unmap() will wait until
+ * i_mmap_rwsem is released.
+ *
+ * Option (2.1) is the safest, which guarantees pte stability from pmd
+ * sharing pov, until the vma lock released. Option (2.2) doesn't protect
+ * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
+ * access.
+ */
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
+unsigned long hugetlb_mask_last_page(struct hstate *h);
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep);
+ unsigned long addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write);
-struct page *follow_huge_pd(struct vm_area_struct *vma,
- unsigned long address, hugepd_t hpd,
- int flags, int pdshift);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags);
-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int flags);
-struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
- pgd_t *pgd, int flags);
+
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
+void hugetlb_vma_lock_read(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
+void hugetlb_vma_lock_write(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
+void hugetlb_vma_lock_release(struct kref *kref);
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
-unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot);
+long hugetlb_change_protection(struct vm_area_struct *vma,
+ unsigned long address, unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags);
bool is_hugetlb_entry_migration(pte_t pte);
+bool is_hugetlb_entry_hwpoisoned(pte_t pte);
+void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
-static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
+{
+}
+
+static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -207,7 +306,7 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
static inline int huge_pmd_unshare(struct mm_struct *mm,
struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
return 0;
}
@@ -218,24 +317,39 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
-static inline long follow_hugetlb_page(struct mm_struct *mm,
- struct vm_area_struct *vma, struct page **pages,
- struct vm_area_struct **vmas, unsigned long *position,
- unsigned long *nr_pages, long i, unsigned int flags,
- int *nonblocking)
+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
{
- BUG();
- return 0;
}
-static inline struct page *follow_huge_addr(struct mm_struct *mm,
- unsigned long address, int write)
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
{
- return ERR_PTR(-EINVAL);
+}
+
+static inline struct page *hugetlb_follow_page_mask(
+ struct vm_area_struct *vma, unsigned long address, unsigned int flags,
+ unsigned int *page_mask)
+{
+ BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
}
static inline int copy_hugetlb_page_range(struct mm_struct *dst,
- struct mm_struct *src, struct vm_area_struct *vma)
+ struct mm_struct *src,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
+{
+ BUG();
+ return 0;
+}
+
+static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr,
+ unsigned long new_addr,
+ unsigned long len)
{
BUG();
return 0;
@@ -245,44 +359,44 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
-static inline int hugetlb_report_node_meminfo(int nid, char *buf)
+static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
{
return 0;
}
-static inline void hugetlb_show_meminfo(void)
+static inline void hugetlb_show_meminfo_node(int nid)
{
}
-static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
- unsigned long address, hugepd_t hpd, int flags,
- int pdshift)
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
{
- return NULL;
+ return -EINVAL;
}
-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
- unsigned long address, pmd_t *pmd, int flags)
+static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
- return NULL;
}
-static inline struct page *follow_huge_pud(struct mm_struct *mm,
- unsigned long address, pud_t *pud, int flags)
+static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
{
- return NULL;
}
-static inline struct page *follow_huge_pgd(struct mm_struct *mm,
- unsigned long address, pgd_t *pgd, int flags)
+static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
{
- return NULL;
}
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
+static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+{
+}
+
+static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+{
+ return 1;
+}
+
+static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
{
- return -EINVAL;
}
static inline int pmd_huge(pmd_t pmd)
@@ -308,16 +422,18 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
BUG();
}
-static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
- pte_t *dst_pte,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- struct page **pagep)
+#ifdef CONFIG_USERFAULTFD
+static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ uffd_flags_t flags,
+ struct folio **foliop)
{
BUG();
return 0;
}
+#endif /* CONFIG_USERFAULTFD */
static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
@@ -325,37 +441,43 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL;
}
-static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
return false;
}
-static inline void putback_active_hugepage(struct page *page)
+static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
{
+ return 0;
}
-static inline void move_hugetlb_state(struct page *oldpage,
- struct page *newpage, int reason)
+static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared)
{
+ return 0;
}
-static inline unsigned long hugetlb_change_protection(
- struct vm_area_struct *vma, unsigned long address,
- unsigned long end, pgprot_t newprot)
+static inline void folio_putback_active_hugetlb(struct folio *folio)
{
- return 0;
}
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+static inline void move_hugetlb_state(struct folio *old_folio,
+ struct folio *new_folio, int reason)
{
- BUG();
+}
+
+static inline long hugetlb_change_protection(
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ return 0;
}
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+ unsigned long end, struct page *ref_page,
+ zap_flags_t zap_flags)
{
BUG();
}
@@ -368,6 +490,8 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
return 0;
}
+static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+
#endif /* !CONFIG_HUGETLB_PAGE */
/*
* hugepages at page global directory. If arch support
@@ -421,7 +545,6 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
}
struct hugetlbfs_inode_info {
- struct shared_policy policy;
struct inode vfs_inode;
unsigned int seals;
};
@@ -434,8 +557,7 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
- struct user_struct **user, int creat_flags,
- int page_size_log);
+ int creat_flags, int page_size_log);
static inline bool is_file_hugepages(struct file *file)
{
@@ -454,8 +576,7 @@ static inline struct hstate *hstate_inode(struct inode *i)
#define is_file_hugepages(file) false
static inline struct file *
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
- struct user_struct **user, int creat_flags,
- int page_size_log)
+ int creat_flags, int page_size_log)
{
return ERR_PTR(-ENOSYS);
}
@@ -472,14 +593,130 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+unsigned long
+generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+/*
+ * huegtlb page specific state flags. These flags are located in page.private
+ * of the hugetlb head page. Functions created via the below macros should be
+ * used to manipulate these flags.
+ *
+ * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
+ * allocation time. Cleared when page is fully instantiated. Free
+ * routine checks flag to restore a reservation on error paths.
+ * Synchronization: Examined or modified by code that knows it has
+ * the only reference to page. i.e. After allocation but before use
+ * or when the page is being freed.
+ * HPG_migratable - Set after a newly allocated page is added to the page
+ * cache and/or page tables. Indicates the page is a candidate for
+ * migration.
+ * Synchronization: Initially set after new page allocation with no
+ * locking. When examined and modified during migration processing
+ * (isolate, migrate, putback) the hugetlb_lock is held.
+ * HPG_temporary - Set on a page that is temporarily allocated from the buddy
+ * allocator. Typically used for migration target pages when no pages
+ * are available in the pool. The hugetlb free page path will
+ * immediately free pages with this flag set to the buddy allocator.
+ * Synchronization: Can be set after huge page allocation from buddy when
+ * code knows it has only reference. All other examinations and
+ * modifications require hugetlb_lock.
+ * HPG_freed - Set when page is on the free lists.
+ * Synchronization: hugetlb_lock held for examination and modification.
+ * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
+ * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
+ * that is not tracked by raw_hwp_page list.
+ */
+enum hugetlb_page_flags {
+ HPG_restore_reserve = 0,
+ HPG_migratable,
+ HPG_temporary,
+ HPG_freed,
+ HPG_vmemmap_optimized,
+ HPG_raw_hwp_unreliable,
+ __NR_HPAGEFLAGS,
+};
+
+/*
+ * Macros to create test, set and clear function definitions for
+ * hugetlb specific page flags.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define TESTHPAGEFLAG(uname, flname) \
+static __always_inline \
+bool folio_test_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ return test_bit(HPG_##flname, private); \
+ } \
+static inline int HPage##uname(struct page *page) \
+ { return test_bit(HPG_##flname, &(page->private)); }
+
+#define SETHPAGEFLAG(uname, flname) \
+static __always_inline \
+void folio_set_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ set_bit(HPG_##flname, private); \
+ } \
+static inline void SetHPage##uname(struct page *page) \
+ { set_bit(HPG_##flname, &(page->private)); }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static __always_inline \
+void folio_clear_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ clear_bit(HPG_##flname, private); \
+ } \
+static inline void ClearHPage##uname(struct page *page) \
+ { clear_bit(HPG_##flname, &(page->private)); }
+#else
+#define TESTHPAGEFLAG(uname, flname) \
+static inline bool \
+folio_test_hugetlb_##flname(struct folio *folio) \
+ { return 0; } \
+static inline int HPage##uname(struct page *page) \
+ { return 0; }
+
+#define SETHPAGEFLAG(uname, flname) \
+static inline void \
+folio_set_hugetlb_##flname(struct folio *folio) \
+ { } \
+static inline void SetHPage##uname(struct page *page) \
+ { }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static inline void \
+folio_clear_hugetlb_##flname(struct folio *folio) \
+ { } \
+static inline void ClearHPage##uname(struct page *page) \
+ { }
+#endif
+
+#define HPAGEFLAG(uname, flname) \
+ TESTHPAGEFLAG(uname, flname) \
+ SETHPAGEFLAG(uname, flname) \
+ CLEARHPAGEFLAG(uname, flname) \
+
+/*
+ * Create functions associated with hugetlb page flags
+ */
+HPAGEFLAG(RestoreReserve, restore_reserve)
+HPAGEFLAG(Migratable, migratable)
+HPAGEFLAG(Temporary, temporary)
+HPAGEFLAG(Freed, freed)
+HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
+HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+
#ifdef CONFIG_HUGETLB_PAGE
#define HSTATE_NAME_LEN 32
/* Defines one hugetlb page size */
struct hstate {
+ struct mutex resize_lock;
int next_nid_to_alloc;
int next_nid_to_free;
unsigned int order;
+ unsigned int demote_order;
unsigned long mask;
unsigned long max_huge_pages;
unsigned long nr_huge_pages;
@@ -489,13 +726,14 @@ struct hstate {
unsigned long nr_overcommit_huge_pages;
struct list_head hugepage_activelist;
struct list_head hugepage_freelists[MAX_NUMNODES];
+ unsigned int max_huge_pages_node[MAX_NUMNODES];
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
- struct cftype cgroup_files_dfl[7];
- struct cftype cgroup_files_legacy[9];
+ struct cftype cgroup_files_dfl[8];
+ struct cftype cgroup_files_legacy[10];
#endif
char name[HSTATE_NAME_LEN];
};
@@ -505,18 +743,20 @@ struct huge_bootmem_page {
struct hstate *hstate;
};
-struct page *alloc_huge_page(struct vm_area_struct *vma,
+int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
+struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);
-struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
- unsigned long address);
-int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct folio *folio);
/* arch callback */
-int __init __alloc_bootmem_huge_page(struct hstate *h);
-int __init alloc_bootmem_huge_page(struct hstate *h);
+int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
+int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
+bool __init hugetlb_node_alloc_supported(void);
void __init hugetlb_add_hstate(unsigned order);
bool __init arch_hugetlb_valid_size(unsigned long size);
@@ -531,6 +771,17 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
+static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
+{
+ return folio->_hugetlb_subpool;
+}
+
+static inline void hugetlb_set_folio_subpool(struct folio *folio,
+ struct hugepage_subpool *subpool)
+{
+ folio->_hugetlb_subpool = subpool;
+}
+
static inline struct hstate *hstate_file(struct file *f)
{
return hstate_inode(file_inode(f));
@@ -541,7 +792,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
if (!page_size_log)
return &default_hstate;
- return size_to_hstate(1UL << page_size_log);
+ if (page_size_log < BITS_PER_LONG)
+ return size_to_hstate(1UL << page_size_log);
+
+ return NULL;
}
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
@@ -549,7 +803,7 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return hstate_file(vma->vm_file);
}
-static inline unsigned long huge_page_size(struct hstate *h)
+static inline unsigned long huge_page_size(const struct hstate *h)
{
return (unsigned long)PAGE_SIZE << h->order;
}
@@ -575,10 +829,10 @@ static inline unsigned huge_page_shift(struct hstate *h)
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) >= MAX_ORDER;
+ return huge_page_order(h) > MAX_PAGE_ORDER;
}
-static inline unsigned int pages_per_huge_page(struct hstate *h)
+static inline unsigned int pages_per_huge_page(const struct hstate *h)
{
return 1 << h->order;
}
@@ -588,6 +842,12 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
return huge_page_size(h) / 512;
}
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
+{
+ return filemap_lock_folio(mapping, idx << huge_page_order(h));
+}
+
#include <asm/hugetlb.h>
#ifndef is_hugepage_only_range
@@ -605,17 +865,17 @@ static inline void arch_clear_hugepage_flags(struct page *page) { }
#endif
#ifndef arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
+ vm_flags_t flags)
{
- return entry;
+ return pte_mkhuge(entry);
}
#endif
-static inline struct hstate *page_hstate(struct page *page)
+static inline struct hstate *folio_hstate(struct folio *folio)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- return size_to_hstate(page_size(page));
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ return size_to_hstate(folio_size(folio));
}
static inline unsigned hstate_index_to_shift(unsigned index)
@@ -628,21 +888,18 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-pgoff_t __basepage_index(struct page *page);
-
-/* Return page->index in PAGE_SIZE units */
-static inline pgoff_t basepage_index(struct page *page)
-{
- if (!PageCompound(page))
- return page->index;
-
- return __basepage_index(page);
-}
-
extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
+#ifdef CONFIG_MEMORY_FAILURE
+extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
+#else
+static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
+{
+}
+#endif
+
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
#ifndef arch_hugetlb_migration_supported
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
@@ -672,7 +929,7 @@ static inline bool hugepage_migration_supported(struct hstate *h)
* It determines whether or not a huge page should be placed on
* movable zone or not. Movability of any huge page should be
* required only if huge page size is supported for migration.
- * There wont be any reason for the huge page to be movable if
+ * There won't be any reason for the huge page to be movable if
* it is not migratable to start with. Also the size of the huge
* page should be large enough to be placed under a movable zone
* and still feasible enough to be migratable. Just the presence
@@ -733,6 +990,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->hugetlb_usage, 0);
+}
+
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
atomic_long_add(l, &mm->hugetlb_usage);
@@ -743,14 +1005,6 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
atomic_long_sub(l, &mm->hugetlb_usage);
}
-#ifndef set_huge_swap_pte_at
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
-{
- set_huge_pte_at(mm, addr, ptep, pte);
-}
-#endif
-
#ifndef huge_ptep_modify_prot_start
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
@@ -766,30 +1020,52 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
#endif
+#ifdef CONFIG_NUMA
+void hugetlb_register_node(struct node *node);
+void hugetlb_unregister_node(struct node *node);
+#endif
+
+/*
+ * Check if a given raw @page in a hugepage is HWPOISON.
+ */
+bool is_raw_hwpoison_page_in_hugepage(struct page *page);
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
-static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
- unsigned long addr,
- int avoid_reserve)
+static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
{
return NULL;
}
-static inline struct page *
-alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
- nodemask_t *nmask, gfp_t gfp_mask)
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
{
return NULL;
}
-static inline struct page *alloc_huge_page_vma(struct hstate *h,
- struct vm_area_struct *vma,
- unsigned long address)
+static inline int isolate_or_dissolve_huge_page(struct page *page,
+ struct list_head *list)
+{
+ return -ENOMEM;
+}
+
+static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ unsigned long addr,
+ int avoid_reserve)
+{
+ return NULL;
+}
+
+static inline struct folio *
+alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
{
return NULL;
}
@@ -814,7 +1090,12 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return NULL;
}
-static inline struct hstate *page_hstate(struct page *page)
+static inline struct hstate *folio_hstate(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct hstate *size_to_hstate(unsigned long size)
{
return NULL;
}
@@ -869,11 +1150,6 @@ static inline int hstate_index(struct hstate *h)
return 0;
}
-static inline pgoff_t basepage_index(struct page *page)
-{
- return page->index;
-}
-
static inline int dissolve_free_huge_page(struct page *page)
{
return 0;
@@ -911,6 +1187,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+}
+
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}
@@ -919,8 +1199,26 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_MMU
+ return ptep_get(ptep);
+#else
+ return *ptep;
+#endif
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+}
+
+static inline void hugetlb_register_node(struct node *node)
+{
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
{
}
#endif /* CONFIG_HUGETLB_PAGE */
@@ -937,14 +1235,65 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
extern void __init hugetlb_cma_reserve(int order);
-extern void __init hugetlb_cma_check(void);
#else
static inline __init void hugetlb_cma_reserve(int order)
{
}
-static inline __init void hugetlb_cma_check(void)
+#endif
+
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return page_count(virt_to_page(pte)) > 1;
+}
+#else
+static inline bool hugetlb_pmd_shared(pte_t *pte)
{
+ return false;
}
#endif
+bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
+
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#endif
+
+static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
+}
+
+bool __vma_private_lock(struct vm_area_struct *vma);
+
+/*
+ * Safe version of huge_pte_offset() to check the locks. See comments
+ * above huge_pte_offset().
+ */
+static inline pte_t *
+hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
+{
+#if defined(CONFIG_HUGETLB_PAGE) && \
+ defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ /*
+ * If pmd sharing possible, locking needed to safely walk the
+ * hugetlb pgtables. More information can be found at the comment
+ * above huge_pte_offset() in the same file.
+ *
+ * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
+ */
+ if (__vma_shareable_lock(vma))
+ WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
+ !lockdep_is_held(
+ &vma->vm_file->f_mapping->i_mmap_rwsem));
+#endif
+ return huge_pte_offset(vma->vm_mm, addr, sz);
+}
+
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 2ad6e92f124a..e5d64b8b59c2 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -21,20 +21,17 @@ struct hugetlb_cgroup;
struct resv_map;
struct file_region;
-/*
- * Minimum page order trackable by hugetlb cgroup.
- * At least 4 pages are necessary for all the tracking information.
- * The second tail page (hpage[2]) is the fault usage cgroup.
- * The third tail page (hpage[3]) is the reservation usage cgroup.
- */
-#define HUGETLB_CGROUP_MIN_ORDER 2
-
#ifdef CONFIG_CGROUP_HUGETLB
enum hugetlb_memory_event {
HUGETLB_MAX,
HUGETLB_NR_MEMORY_EVENTS,
};
+struct hugetlb_cgroup_per_node {
+ /* hugetlb usage in pages over all hstates. */
+ unsigned long usage[HUGE_MAX_HSTATE];
+};
+
struct hugetlb_cgroup {
struct cgroup_subsys_state css;
@@ -56,56 +53,51 @@ struct hugetlb_cgroup {
/* Handle for "hugetlb.events.local" */
struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
+
+ struct hugetlb_cgroup_per_node *nodeinfo[];
};
static inline struct hugetlb_cgroup *
-__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
+__hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
-
- if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return NULL;
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
if (rsvd)
- return (struct hugetlb_cgroup *)page[3].private;
+ return folio->_hugetlb_cgroup_rsvd;
else
- return (struct hugetlb_cgroup *)page[2].private;
+ return folio->_hugetlb_cgroup;
}
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
{
- return __hugetlb_cgroup_from_page(page, false);
+ return __hugetlb_cgroup_from_folio(folio, false);
}
static inline struct hugetlb_cgroup *
-hugetlb_cgroup_from_page_rsvd(struct page *page)
+hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
{
- return __hugetlb_cgroup_from_page(page, true);
+ return __hugetlb_cgroup_from_folio(folio, true);
}
-static inline int __set_hugetlb_cgroup(struct page *page,
+static inline void __set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg, bool rsvd)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
-
- if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return -1;
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
if (rsvd)
- page[3].private = (unsigned long)h_cg;
+ folio->_hugetlb_cgroup_rsvd = h_cg;
else
- page[2].private = (unsigned long)h_cg;
- return 0;
+ folio->_hugetlb_cgroup = h_cg;
}
-static inline int set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- return __set_hugetlb_cgroup(page, h_cg, false);
+ __set_hugetlb_cgroup(folio, h_cg, false);
}
-static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- return __set_hugetlb_cgroup(page, h_cg, true);
+ __set_hugetlb_cgroup(folio, h_cg, true);
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -113,20 +105,39 @@ static inline bool hugetlb_cgroup_disabled(void)
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
}
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+ css_put(&h_cg->css);
+}
+
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_get(resv_map->css);
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_put(resv_map->css);
+}
+
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
+ struct folio *folio);
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
-extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
- struct page *page);
-extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
- struct page *page);
+ struct folio *folio);
+extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+ struct folio *folio);
+extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
+ struct folio *folio);
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
@@ -138,46 +149,40 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg,
- unsigned long nr_pages);
+ unsigned long nr_pages,
+ bool region_del);
extern void hugetlb_cgroup_file_init(void) __init;
-extern void hugetlb_cgroup_migrate(struct page *oldhpage,
- struct page *newhpage);
+extern void hugetlb_cgroup_migrate(struct folio *old_folio,
+ struct folio *new_folio);
#else
static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg,
- unsigned long nr_pages)
+ unsigned long nr_pages,
+ bool region_del)
{
}
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
{
return NULL;
}
static inline struct hugetlb_cgroup *
-hugetlb_cgroup_from_page_resv(struct page *page)
+hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
{
return NULL;
}
-static inline struct hugetlb_cgroup *
-hugetlb_cgroup_from_page_rsvd(struct page *page)
-{
- return NULL;
-}
-
-static inline int set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- return 0;
}
-static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- return 0;
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -185,6 +190,20 @@ static inline bool hugetlb_cgroup_disabled(void)
return true;
}
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr)
{
@@ -200,25 +219,25 @@ static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
}
static inline void
hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
}
-static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
- struct page *page)
+static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+ struct folio *folio)
{
}
-static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
+static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
unsigned long nr_pages,
- struct page *page)
+ struct folio *folio)
{
}
static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
@@ -243,8 +262,8 @@ static inline void hugetlb_cgroup_file_init(void)
{
}
-static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
- struct page *newhpage)
+static inline void hugetlb_cgroup_migrate(struct folio *old_folio,
+ struct folio *new_folio)
{
}
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 78dd7035d1e5..db199d653dd1 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -7,6 +7,16 @@
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+enum bp_type_idx {
+ TYPE_INST = 0,
+#if defined(CONFIG_HAVE_MIXED_BREAKPOINTS_REGS)
+ TYPE_DATA = 0,
+#else
+ TYPE_DATA = 1,
+#endif
+ TYPE_MAX
+};
+
extern int __init init_hw_breakpoint(void);
static inline void hw_breakpoint_init(struct perf_event_attr *attr)
@@ -74,15 +84,12 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
extern int register_perf_hw_breakpoint(struct perf_event *bp);
extern void unregister_hw_breakpoint(struct perf_event *bp);
extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
+extern bool hw_breakpoint_is_used(void);
extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
-int hw_breakpoint_weight(struct perf_event *bp);
-int arch_reserve_bp_slot(struct perf_event *bp);
-void arch_release_bp_slot(struct perf_event *bp);
-void arch_unregister_hw_breakpoint(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
@@ -121,6 +128,8 @@ register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
static inline void
unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
+static inline bool hw_breakpoint_is_used(void) { return false; }
+
static inline int
reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
static inline void release_bp_slot(struct perf_event *bp) { }
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 8e6dd908da21..136e9842120e 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -34,7 +34,7 @@
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
* (in bits of entropy per 1024 bits of input;
- * valid values: 1 to 1024, or 0 for unknown).
+ * valid values: 1 to 1024, or 0 for maximum).
*/
struct hwrng {
const char *name;
@@ -50,6 +50,7 @@ struct hwrng {
struct list_head list;
struct kref ref;
struct completion cleanup_done;
+ struct completion dying;
};
struct device;
@@ -60,7 +61,8 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
-/** Feed random bits into the pool. */
-extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
+
+extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
+extern long hwrng_yield(struct hwrng *rng);
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
index cb26d02f52f3..d896713359cd 100644
--- a/include/linux/hwmon-sysfs.h
+++ b/include/linux/hwmon-sysfs.h
@@ -8,6 +8,7 @@
#define _LINUX_HWMON_SYSFS_H
#include <linux/device.h>
+#include <linux/kstrtox.h>
struct sensor_device_attribute{
struct device_attribute dev_attr;
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 363d4a814aa1..edf96f249eb5 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -44,6 +44,7 @@ enum hwmon_chip_attributes {
hwmon_chip_in_samples,
hwmon_chip_power_samples,
hwmon_chip_temp_samples,
+ hwmon_chip_beep_enable,
};
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
@@ -58,6 +59,7 @@ enum hwmon_chip_attributes {
#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples)
#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
+#define HWMON_C_BEEP_ENABLE BIT(hwmon_chip_beep_enable)
enum hwmon_temp_attributes {
hwmon_temp_enable,
@@ -85,6 +87,9 @@ enum hwmon_temp_attributes {
hwmon_temp_lowest,
hwmon_temp_highest,
hwmon_temp_reset_history,
+ hwmon_temp_rated_min,
+ hwmon_temp_rated_max,
+ hwmon_temp_beep,
};
#define HWMON_T_ENABLE BIT(hwmon_temp_enable)
@@ -112,6 +117,9 @@ enum hwmon_temp_attributes {
#define HWMON_T_LOWEST BIT(hwmon_temp_lowest)
#define HWMON_T_HIGHEST BIT(hwmon_temp_highest)
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
+#define HWMON_T_RATED_MIN BIT(hwmon_temp_rated_min)
+#define HWMON_T_RATED_MAX BIT(hwmon_temp_rated_max)
+#define HWMON_T_BEEP BIT(hwmon_temp_beep)
enum hwmon_in_attributes {
hwmon_in_enable,
@@ -130,6 +138,10 @@ enum hwmon_in_attributes {
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
+ hwmon_in_rated_min,
+ hwmon_in_rated_max,
+ hwmon_in_beep,
+ hwmon_in_fault,
};
#define HWMON_I_ENABLE BIT(hwmon_in_enable)
@@ -148,6 +160,10 @@ enum hwmon_in_attributes {
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
+#define HWMON_I_RATED_MIN BIT(hwmon_in_rated_min)
+#define HWMON_I_RATED_MAX BIT(hwmon_in_rated_max)
+#define HWMON_I_BEEP BIT(hwmon_in_beep)
+#define HWMON_I_FAULT BIT(hwmon_in_fault)
enum hwmon_curr_attributes {
hwmon_curr_enable,
@@ -166,6 +182,9 @@ enum hwmon_curr_attributes {
hwmon_curr_max_alarm,
hwmon_curr_lcrit_alarm,
hwmon_curr_crit_alarm,
+ hwmon_curr_rated_min,
+ hwmon_curr_rated_max,
+ hwmon_curr_beep,
};
#define HWMON_C_ENABLE BIT(hwmon_curr_enable)
@@ -184,6 +203,9 @@ enum hwmon_curr_attributes {
#define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm)
#define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm)
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
+#define HWMON_C_RATED_MIN BIT(hwmon_curr_rated_min)
+#define HWMON_C_RATED_MAX BIT(hwmon_curr_rated_max)
+#define HWMON_C_BEEP BIT(hwmon_curr_beep)
enum hwmon_power_attributes {
hwmon_power_enable,
@@ -215,6 +237,8 @@ enum hwmon_power_attributes {
hwmon_power_max_alarm,
hwmon_power_lcrit_alarm,
hwmon_power_crit_alarm,
+ hwmon_power_rated_min,
+ hwmon_power_rated_max,
};
#define HWMON_P_ENABLE BIT(hwmon_power_enable)
@@ -246,6 +270,8 @@ enum hwmon_power_attributes {
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
+#define HWMON_P_RATED_MIN BIT(hwmon_power_rated_min)
+#define HWMON_P_RATED_MAX BIT(hwmon_power_rated_max)
enum hwmon_energy_attributes {
hwmon_energy_enable,
@@ -267,6 +293,10 @@ enum hwmon_humidity_attributes {
hwmon_humidity_max_hyst,
hwmon_humidity_alarm,
hwmon_humidity_fault,
+ hwmon_humidity_rated_min,
+ hwmon_humidity_rated_max,
+ hwmon_humidity_min_alarm,
+ hwmon_humidity_max_alarm,
};
#define HWMON_H_ENABLE BIT(hwmon_humidity_enable)
@@ -278,6 +308,10 @@ enum hwmon_humidity_attributes {
#define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst)
#define HWMON_H_ALARM BIT(hwmon_humidity_alarm)
#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
+#define HWMON_H_RATED_MIN BIT(hwmon_humidity_rated_min)
+#define HWMON_H_RATED_MAX BIT(hwmon_humidity_rated_max)
+#define HWMON_H_MIN_ALARM BIT(hwmon_humidity_min_alarm)
+#define HWMON_H_MAX_ALARM BIT(hwmon_humidity_max_alarm)
enum hwmon_fan_attributes {
hwmon_fan_enable,
@@ -292,6 +326,7 @@ enum hwmon_fan_attributes {
hwmon_fan_min_alarm,
hwmon_fan_max_alarm,
hwmon_fan_fault,
+ hwmon_fan_beep,
};
#define HWMON_F_ENABLE BIT(hwmon_fan_enable)
@@ -306,18 +341,21 @@ enum hwmon_fan_attributes {
#define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm)
#define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm)
#define HWMON_F_FAULT BIT(hwmon_fan_fault)
+#define HWMON_F_BEEP BIT(hwmon_fan_beep)
enum hwmon_pwm_attributes {
hwmon_pwm_input,
hwmon_pwm_enable,
hwmon_pwm_mode,
hwmon_pwm_freq,
+ hwmon_pwm_auto_channels_temp,
};
#define HWMON_PWM_INPUT BIT(hwmon_pwm_input)
#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable)
#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
+#define HWMON_PWM_AUTO_CHANNELS_TEMP BIT(hwmon_pwm_auto_channels_temp)
enum hwmon_intrusion_attributes {
hwmon_intrusion_alarm,
@@ -383,7 +421,7 @@ struct hwmon_ops {
};
/**
- * Channel information
+ * struct hwmon_channel_info - Channel information
* @type: Channel type.
* @config: Pointer to NULL-terminated list of channel parameters.
* Use for per-channel attributes.
@@ -393,27 +431,31 @@ struct hwmon_channel_info {
const u32 *config;
};
-#define HWMON_CHANNEL_INFO(stype, ...) \
- (&(struct hwmon_channel_info) { \
- .type = hwmon_##stype, \
- .config = (u32 []) { \
- __VA_ARGS__, 0 \
- } \
+#define HWMON_CHANNEL_INFO(stype, ...) \
+ (&(const struct hwmon_channel_info) { \
+ .type = hwmon_##stype, \
+ .config = (const u32 []) { \
+ __VA_ARGS__, 0 \
+ } \
})
/**
- * Chip configuration
+ * struct hwmon_chip_info - Chip configuration
* @ops: Pointer to hwmon operations.
* @info: Null-terminated list of channel information.
*/
struct hwmon_chip_info {
const struct hwmon_ops *ops;
- const struct hwmon_channel_info **info;
+ const struct hwmon_channel_info * const *info;
};
/* hwmon_device_register() is deprecated */
struct device *hwmon_device_register(struct device *dev);
+/*
+ * hwmon_device_register_with_groups() and
+ * devm_hwmon_device_register_with_groups() are deprecated.
+ */
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
@@ -428,6 +470,9 @@ hwmon_device_register_with_info(struct device *dev,
const struct hwmon_chip_info *info,
const struct attribute_group **extra_groups);
struct device *
+hwmon_device_register_for_thermal(struct device *dev, const char *name,
+ void *drvdata);
+struct device *
devm_hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
@@ -439,6 +484,9 @@ void devm_hwmon_device_unregister(struct device *dev);
int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel);
+char *hwmon_sanitize_name(const char *name);
+char *devm_hwmon_sanitize_name(struct device *dev, const char *name);
+
/**
* hwmon_is_bad_char - Is the char invalid in a hwmon name
* @ch: the char to be considered
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 38100e80360a..6ef0557b4bff 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -14,6 +14,7 @@
#include <uapi/linux/hyperv.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/list.h>
@@ -23,12 +24,55 @@
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/reciprocal_div.h>
+#include <asm/hyperv-tlfs.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
#pragma pack(push, 1)
+/*
+ * Types for GPADL, decides is how GPADL header is created.
+ *
+ * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
+ * same as HV_HYP_PAGE_SIZE.
+ *
+ * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
+ * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
+ * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
+ * HV_HYP_PAGE will be different between different types of GPADL, for example
+ * if PAGE_SIZE is 64K:
+ *
+ * BUFFER:
+ *
+ * gva: |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
+ * index: 0 1 2 15 16 17 18 .. 31 32 ...
+ * | | ... | | | ... | ...
+ * v V V V V V
+ * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
+ * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
+ *
+ * RING:
+ *
+ * | header | data | header | data |
+ * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
+ * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
+ * | / / / | / /
+ * | / / / | / /
+ * | / / ... / ... | / ... /
+ * | / / / | / /
+ * | / / / | / /
+ * V V V V V V v
+ * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
+ * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
+ */
+enum hv_gpadl_type {
+ HV_GPADL_BUFFER,
+ HV_GPADL_RING
+};
+
/* Single-page buffer */
struct hv_page_buffer {
u32 len;
@@ -111,7 +155,7 @@ struct hv_ring_buffer {
} feature_bits;
/* Pad it to PAGE_SIZE so that data starts on page boundary */
- u8 reserved2[4028];
+ u8 reserved2[PAGE_SIZE - 68];
/*
* Ring data starts here + RingDataStartOffset
@@ -120,6 +164,30 @@ struct hv_ring_buffer {
u8 buffer[];
} __packed;
+
+/*
+ * If the requested ring buffer size is at least 8 times the size of the
+ * header, steal space from the ring buffer for the header. Otherwise, add
+ * space for the header so that is doesn't take too much of the ring buffer
+ * space.
+ *
+ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
+ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
+ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
+ * large allocation that will be almost half wasted. As a contrasting example,
+ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
+ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
+ * In this latter case, we must add 64 Kbytes for the header and not worry
+ * about what's wasted.
+ */
+#define VMBUS_HEADER_ADJ(payload_sz) \
+ ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \
+ 0 : sizeof(struct hv_ring_buffer))
+
+/* Calculate the proper size of a ringbuffer, it must be page-aligned */
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
+ (payload_sz))
+
struct hv_ring_buffer_info {
struct hv_ring_buffer *ring_buffer;
u32 ring_size; /* Include the shared header */
@@ -133,6 +201,10 @@ struct hv_ring_buffer_info {
* being freed while the ring buffer is being accessed.
*/
struct mutex ring_buffer_mutex;
+
+ /* Buffer that holds a copy of an incoming host packet */
+ void *pkt_buffer;
+ u32 pkt_buffer_size;
};
@@ -178,14 +250,19 @@ static inline u32 hv_get_avail_to_write_percent(
* two 16 bit quantities: major_number. minor_number.
*
* 0 . 13 (Windows Server 2008)
- * 1 . 1 (Windows 7)
- * 2 . 4 (Windows 8)
- * 3 . 0 (Windows 8 R2)
+ * 1 . 1 (Windows 7, WS2008 R2)
+ * 2 . 4 (Windows 8, WS2012)
+ * 3 . 0 (Windows 8.1, WS2012 R2)
* 4 . 0 (Windows 10)
* 4 . 1 (Windows 10 RS3)
* 5 . 0 (Newer Windows 10)
* 5 . 1 (Windows 10 RS4)
* 5 . 2 (Windows Server 2019, RS5)
+ * 5 . 3 (Windows Server 2022)
+ *
+ * The WS2008 and WIN7 versions are listed here for
+ * completeness but are no longer supported in the
+ * Linux kernel.
*/
#define VERSION_WS2008 ((0 << 16) | (13))
@@ -197,6 +274,7 @@ static inline u32 hv_get_avail_to_write_percent(
#define VERSION_WIN10_V5 ((5 << 16) | (0))
#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
+#define VERSION_WIN10_V5_3 ((5 << 16) | (3))
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -236,7 +314,7 @@ struct vmbus_channel_offer {
/*
* Pipes:
- * The following sructure is an integrated pipe protocol, which
+ * The following structure is an integrated pipe protocol, which
* is implemented on top of standard user-defined data. Pipe
* clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
* use.
@@ -290,7 +368,7 @@ struct vmtransfer_page_packet_header {
u8 sender_owns_set;
u8 reserved;
u32 range_cnt;
- struct vmtransfer_page_range ranges[1];
+ struct vmtransfer_page_range ranges[];
} __packed;
struct vmgpadl_packet_header {
@@ -427,6 +505,7 @@ enum vmbus_channel_message_type {
CHANNELMSG_TL_CONNECT_REQUEST = 21,
CHANNELMSG_MODIFYCHANNEL = 22,
CHANNELMSG_TL_CONNECT_RESULT = 23,
+ CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24,
CHANNELMSG_COUNT
};
@@ -483,12 +562,6 @@ struct vmbus_channel_rescind_offer {
u32 child_relid;
} __packed;
-static inline u32
-hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
-{
- return rbi->ring_buffer->pending_send_sz;
-}
-
/*
* Request Offer -- no parameters, SynIC message contains the partition ID
* Set Snoop -- no parameters, SynIC message contains the partition ID
@@ -540,6 +613,13 @@ struct vmbus_channel_open_result {
u32 status;
} __packed;
+/* Modify Channel Result parameters */
+struct vmbus_channel_modifychannel_response {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 status;
+} __packed;
+
/* Close channel parameters; */
struct vmbus_channel_close_channel {
struct vmbus_channel_message_header header;
@@ -605,8 +685,8 @@ struct vmbus_channel_initiate_contact {
u64 interrupt_page;
struct {
u8 msg_sint;
- u8 padding1[3];
- u32 padding2;
+ u8 msg_vtl;
+ u8 reserved[6];
};
};
u64 monitor_page1;
@@ -672,6 +752,7 @@ struct vmbus_channel_msginfo {
struct vmbus_channel_gpadl_torndown gpadl_torndown;
struct vmbus_channel_gpadl_created gpadl_created;
struct vmbus_channel_version_response version_response;
+ struct vmbus_channel_modifychannel_response modify_response;
} response;
u32 msgsize;
@@ -716,10 +797,41 @@ enum vmbus_device_type {
HV_UNKNOWN,
};
+/*
+ * Provides request ids for VMBus. Encapsulates guest memory
+ * addresses and stores the next available slot in req_arr
+ * to generate new ids in constant time.
+ */
+struct vmbus_requestor {
+ u64 *req_arr;
+ unsigned long *req_bitmap; /* is a given slot available? */
+ u32 size;
+ u64 next_request_id;
+ spinlock_t req_lock; /* provides atomicity */
+};
+
+#define VMBUS_NO_RQSTOR U64_MAX
+#define VMBUS_RQST_ERROR (U64_MAX - 1)
+#define VMBUS_RQST_ADDR_ANY U64_MAX
+/* NetVSC-specific */
+#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
+/* StorVSC-specific */
+#define VMBUS_RQST_INIT (U64_MAX - 2)
+#define VMBUS_RQST_RESET (U64_MAX - 3)
+
struct vmbus_device {
u16 dev_type;
guid_t guid;
bool perf_device;
+ bool allowed_in_isolated;
+};
+
+#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
+
+struct vmbus_gpadl {
+ u32 gpadl_handle;
+ u32 size;
+ void *buffer;
};
struct vmbus_channel {
@@ -738,9 +850,10 @@ struct vmbus_channel {
u8 monitor_bit;
bool rescind; /* got rescind msg */
+ bool rescind_ref; /* got rescind msg, got channel reference */
struct completion rescind_event;
- u32 ringbuffer_gpadlhandle;
+ struct vmbus_gpadl ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
struct page *ringbuffer_page;
@@ -816,11 +929,11 @@ struct vmbus_channel {
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support
* a scalable communication infrastructure with the host.
- * The support for sub-channels is implemented as an extention
+ * The support for sub-channels is implemented as an extension
* to the current infrastructure.
* The initial offer is considered the primary channel and this
* offer message will indicate if the host supports sub-channels.
- * The guest is free to ask for sub-channels to be offerred and can
+ * The guest is free to ask for sub-channels to be offered and can
* open these sub-channels as a normal "primary" channel. However,
* all sub-channels will have the same type and instance guids as the
* primary channel. Requests sent on a given channel will result in a
@@ -876,7 +989,7 @@ struct vmbus_channel {
* mechanism improves throughput by:
*
* A) Making the host more efficient - each time it wakes up,
- * potentially it will process morev number of packets. The
+ * potentially it will process more number of packets. The
* monitor latency allows a batch to build up.
* B) By deferring the hypercall to signal, we will also minimize
* the interrupts.
@@ -884,7 +997,7 @@ struct vmbus_channel {
* Clearly, these optimizations improve throughput at the expense of
* latency. Furthermore, since the channel is shared for both
* control and data messages, control messages currently suffer
- * unnecessary latency adversley impacting performance and boot
+ * unnecessary latency adversely impacting performance and boot
* time. To fix this issue, permit tagging the channel as being
* in "low latency" mode. In this mode, we will bypass the monitor
* mechanism.
@@ -940,12 +1053,49 @@ struct vmbus_channel {
u32 fuzz_testing_interrupt_delay;
u32 fuzz_testing_message_delay;
+ /* callback to generate a request ID from a request address */
+ u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
+ /* callback to retrieve a request address from a request ID */
+ u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
+
+ /* request/transaction ids for VMBus */
+ struct vmbus_requestor requestor;
+ u32 rqstor_size;
+
+ /* The max size of a packet on this channel */
+ u32 max_pkt_size;
};
+#define lock_requestor(channel, flags) \
+do { \
+ struct vmbus_requestor *rqstor = &(channel)->requestor; \
+ \
+ spin_lock_irqsave(&rqstor->req_lock, flags); \
+} while (0)
+
+static __always_inline void unlock_requestor(struct vmbus_channel *channel,
+ unsigned long flags)
+{
+ struct vmbus_requestor *rqstor = &channel->requestor;
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+}
+
+u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
+u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
+
+static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+}
+
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
- return !!(c->offermsg.offer.chn_flags &
- VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+ return is_hvsock_offer(&c->offermsg);
}
static inline bool is_sub_channel(const struct vmbus_channel *c)
@@ -990,16 +1140,6 @@ static inline void set_channel_pending_send_size(struct vmbus_channel *c,
c->outbound.ring_buffer->pending_send_sz = size;
}
-static inline void set_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = true;
-}
-
-static inline void clear_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = false;
-}
-
void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
int vmbus_request_offers(void);
@@ -1014,19 +1154,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *));
-/*
- * Check if sub-channels have already been offerred. This API will be useful
- * when the driver is unloaded after establishing sub-channels. In this case,
- * when the driver is re-loaded, the driver would have to check if the
- * subchannels have already been established before attempting to request
- * the creation of sub-channels.
- * This function returns TRUE to indicate that subchannels have already been
- * created.
- * This function should be invoked after setting the callback function for
- * sub-channel creation.
- */
-bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
-
/* The format must be the same as struct vmdata_gpa_direct */
struct vmbus_channel_packet_page_buffer {
u16 type;
@@ -1082,6 +1209,13 @@ extern int vmbus_open(struct vmbus_channel *channel,
extern void vmbus_close(struct vmbus_channel *channel);
+extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ u64 *trans_id,
+ enum vmbus_packet_type type,
+ u32 flags);
extern int vmbus_sendpacket(struct vmbus_channel *channel,
void *buffer,
u32 bufferLen,
@@ -1106,10 +1240,10 @@ extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
void *kbuffer,
u32 size,
- u32 *gpadl_handle);
+ struct vmbus_gpadl *gpadl);
extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
- u32 gpadl_handle);
+ struct vmbus_gpadl *gpadl);
void vmbus_reset_channel_cb(struct vmbus_channel *channel);
@@ -1125,9 +1259,6 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
u32 *buffer_actual_len,
u64 *requestid);
-
-extern void vmbus_ontimer(unsigned long data);
-
/* Base driver object */
struct hv_driver {
const char *name;
@@ -1159,7 +1290,7 @@ struct hv_driver {
} dynids;
int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
- int (*remove)(struct hv_device *);
+ void (*remove)(struct hv_device *dev);
void (*shutdown)(struct hv_device *);
int (*suspend)(struct hv_device *);
@@ -1178,10 +1309,16 @@ struct hv_device {
u16 device_id;
struct device device;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
struct vmbus_channel *channel;
struct kset *channels_kset;
+ struct device_dma_parameters dma_parms;
+ u64 dma_mask;
/* place holder to keep track of the dir for hv device in debugfs */
struct dentry *debug_dir;
@@ -1189,10 +1326,7 @@ struct hv_device {
};
-static inline struct hv_device *device_to_hv_device(struct device *d)
-{
- return container_of(d, struct hv_device, device);
-}
+#define device_to_hv_device(d) container_of_const(d, struct hv_device, device)
static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
{
@@ -1221,6 +1355,8 @@ struct hv_ring_buffer_debug_info {
int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
+bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
+
/* Vmbus interface */
#define vmbus_driver_register(driver) \
__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
@@ -1370,12 +1506,14 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
- * Linux doesn't support the 3 devices: the first two are for
- * Automatic Virtual Machine Activation, and the third is for
- * Remote Desktop Virtualization.
+ * Linux doesn't support these 4 devices: the first two are for
+ * Automatic Virtual Machine Activation, the third is for
+ * Remote Desktop Virtualization, and the fourth is Initial
+ * Machine Configuration (IMC) used only by Windows guests.
* {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
* {3375baf4-9e15-4b30-b765-67acb10d607b}
* {276aacf4-ac15-426c-98dd-7521ad3f01fe}
+ * {c376c1c3-d276-48d2-90a9-c04748072c60}
*/
#define HV_AVMA1_GUID \
@@ -1390,6 +1528,10 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
.guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+#define HV_IMC_GUID \
+ .guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
+ 0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
+
/*
* Common header for Hyper-V ICs
*/
@@ -1400,6 +1542,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
#define ICMSGTYPE_SHUTDOWN 3
#define ICMSGTYPE_TIMESYNC 4
#define ICMSGTYPE_VSS 5
+#define ICMSGTYPE_FCOPY 7
#define ICMSGHDRFLAG_TRANSACTION 1
#define ICMSGHDRFLAG_REQUEST 2
@@ -1443,11 +1586,17 @@ struct icmsg_hdr {
u8 reserved[2];
} __packed;
+#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
+#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
+#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
+ (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
+ (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
+
struct icmsg_negotiate {
u16 icframe_vercnt;
u16 icmsg_vercnt;
u32 reserved;
- struct ic_version icversion_data[1]; /* any size array */
+ struct ic_version icversion_data[]; /* any size array */
} __packed;
struct shutdown_msg_data {
@@ -1497,8 +1646,13 @@ struct hyperv_service_callback {
void (*callback)(void *context);
};
+struct hv_dma_range {
+ dma_addr_t dma;
+ u32 mapping_size;
+};
+
#define MAX_SRV_VER 0x7ffffff
-extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
const int *fw_version, int fw_vercnt,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
@@ -1514,7 +1668,7 @@ extern __u32 vmbus_proto_version;
int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
const guid_t *shv_host_servie_id);
-int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
+int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
void vmbus_set_event(struct vmbus_channel *channel);
/* Get the start of the ring buffer. */
@@ -1570,6 +1724,11 @@ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
return (desc->len8 << 3) - (desc->offset8 << 3);
}
+/* Get packet length associated with descriptor */
+static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
+{
+ return desc->len8 << 3;
+}
struct vmpacket_descriptor *
hv_pkt_iter_first(struct vmbus_channel *channel);
@@ -1580,10 +1739,6 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
void hv_pkt_iter_close(struct vmbus_channel *channel);
-/*
- * Get next packet descriptor from iterator
- * If at end of list, return NULL and update host.
- */
static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel *channel,
const struct vmpacket_descriptor *pkt)
@@ -1630,4 +1785,23 @@ struct hyperv_pci_block_ops {
extern struct hyperv_pci_block_ops hvpci_block_ops;
+static inline unsigned long virt_to_hvpfn(void *addr)
+{
+ phys_addr_t paddr;
+
+ if (is_vmalloc_addr(addr))
+ paddr = page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
+ else
+ paddr = __pa(addr);
+
+ return paddr >> HV_HYP_PAGE_SHIFT;
+}
+
+#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
+#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
+#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
+#define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT)
+#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
+
#endif /* _HYPERV_H */
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index fc08b433c856..9efbc54e35e5 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -32,4 +32,12 @@ static inline bool jailhouse_paravirt(void)
#endif /* !CONFIG_X86 */
+static inline bool hypervisor_isolated_pci_functions(void)
+{
+ if (IS_ENABLED(CONFIG_S390))
+ return true;
+
+ return jailhouse_paravirt();
+}
+
#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index d03071732db4..7c522fdd9ea7 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -53,6 +53,20 @@
#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
+/**
+ * struct pca_i2c_bus_settings - The configured PCA i2c bus settings
+ * @mode: Configured i2c bus mode
+ * @tlow: Configured SCL LOW period
+ * @thi: Configured SCL HIGH period
+ * @clock_freq: The configured clock frequency
+ */
+struct pca_i2c_bus_settings {
+ int mode;
+ int tlow;
+ int thi;
+ int clock_freq;
+};
+
struct i2c_algo_pca_data {
void *data; /* private low level data */
void (*write_byte) (void *data, int reg, int val);
@@ -64,6 +78,7 @@ struct i2c_algo_pca_data {
* For PCA9665, use the frequency you want here. */
unsigned int i2c_clock;
unsigned int chip;
+ struct pca_i2c_bus_settings bus_settings;
};
int i2c_pca_add_bus(struct i2c_adapter *);
diff --git a/include/linux/i2c-atr.h b/include/linux/i2c-atr.h
new file mode 100644
index 000000000000..4d5da161c225
--- /dev/null
+++ b/include/linux/i2c-atr.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * I2C Address Translator
+ *
+ * Copyright (c) 2019,2022 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2022,2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Based on i2c-mux.h
+ */
+
+#ifndef _LINUX_I2C_ATR_H
+#define _LINUX_I2C_ATR_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+struct device;
+struct fwnode_handle;
+struct i2c_atr;
+
+/**
+ * struct i2c_atr_ops - Callbacks from ATR to the device driver.
+ * @attach_client: Notify the driver of a new device connected on a child
+ * bus, with the alias assigned to it. The driver must
+ * configure the hardware to use the alias.
+ * @detach_client: Notify the driver of a device getting disconnected. The
+ * driver must configure the hardware to stop using the
+ * alias.
+ *
+ * All these functions return 0 on success, a negative error code otherwise.
+ */
+struct i2c_atr_ops {
+ int (*attach_client)(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client, u16 alias);
+ void (*detach_client)(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client);
+};
+
+/**
+ * i2c_atr_new() - Allocate and initialize an I2C ATR helper.
+ * @parent: The parent (upstream) adapter
+ * @dev: The device acting as an ATR
+ * @ops: Driver-specific callbacks
+ * @max_adapters: Maximum number of child adapters
+ *
+ * The new ATR helper is connected to the parent adapter but has no child
+ * adapters. Call i2c_atr_add_adapter() to add some.
+ *
+ * Call i2c_atr_delete() to remove.
+ *
+ * Return: pointer to the new ATR helper object, or ERR_PTR
+ */
+struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
+ const struct i2c_atr_ops *ops, int max_adapters);
+
+/**
+ * i2c_atr_delete - Delete an I2C ATR helper.
+ * @atr: I2C ATR helper to be deleted.
+ *
+ * Precondition: all the adapters added with i2c_atr_add_adapter() must be
+ * removed by calling i2c_atr_del_adapter().
+ */
+void i2c_atr_delete(struct i2c_atr *atr);
+
+/**
+ * i2c_atr_add_adapter - Create a child ("downstream") I2C bus.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the new adapter (0 .. max_adapters-1). This value is
+ * passed to the callbacks in `struct i2c_atr_ops`.
+ * @adapter_parent: The device used as the parent of the new i2c adapter, or NULL
+ * to use the i2c-atr device as the parent.
+ * @bus_handle: The fwnode handle that points to the adapter's i2c
+ * peripherals, or NULL.
+ *
+ * After calling this function a new i2c bus will appear. Adding and removing
+ * devices on the downstream bus will result in calls to the
+ * &i2c_atr_ops->attach_client and &i2c_atr_ops->detach_client callbacks for the
+ * driver to assign an alias to the device.
+ *
+ * The adapter's fwnode is set to @bus_handle, or if @bus_handle is NULL the
+ * function looks for a child node whose 'reg' property matches the chan_id
+ * under the i2c-atr device's 'i2c-atr' node.
+ *
+ * Call i2c_atr_del_adapter() to remove the adapter.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
+ struct device *adapter_parent,
+ struct fwnode_handle *bus_handle);
+
+/**
+ * i2c_atr_del_adapter - Remove a child ("downstream") I2C bus added by
+ * i2c_atr_add_adapter(). If no I2C bus has been added
+ * this function is a no-op.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the adapter to be removed (0 .. max_adapters-1)
+ */
+void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id);
+
+/**
+ * i2c_atr_set_driver_data - Set private driver data to the i2c-atr instance.
+ * @atr: The I2C ATR
+ * @data: Pointer to the data to store
+ */
+void i2c_atr_set_driver_data(struct i2c_atr *atr, void *data);
+
+/**
+ * i2c_atr_get_driver_data - Get the stored drive data.
+ * @atr: The I2C ATR
+ *
+ * Return: Pointer to the stored data
+ */
+void *i2c_atr_get_driver_data(struct i2c_atr *atr);
+
+#endif /* _LINUX_I2C_ATR_H */
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index 1e4e0de4ef8b..ced1c6ead52a 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -30,12 +30,16 @@ struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter,
struct i2c_smbus_alert_setup *setup);
int i2c_handle_smbus_alert(struct i2c_client *ara);
-#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
-int of_i2c_setup_smbus_alert(struct i2c_adapter *adap);
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_I2C_SLAVE)
+struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter);
+void i2c_free_slave_host_notify_device(struct i2c_client *client);
#else
-static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap)
+static inline struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline void i2c_free_slave_host_notify_device(struct i2c_client *client)
{
- return 0;
}
#endif
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index fc55ea41d323..5e6cd43a6dbd 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -11,19 +11,21 @@
#define _LINUX_I2C_H
#include <linux/acpi.h> /* for acpi_handle */
+#include <linux/bits.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
#include <linux/rtmutex.h>
#include <linux/irqdomain.h> /* for Host Notify IRQ */
#include <linux/of.h> /* for struct device_node */
#include <linux/swab.h> /* for swab16 */
#include <uapi/linux/i2c.h>
-extern struct bus_type i2c_bus_type;
-extern struct device_type i2c_adapter_type;
-extern struct device_type i2c_client_type;
+extern const struct bus_type i2c_bus_type;
+extern const struct device_type i2c_adapter_type;
+extern const struct device_type i2c_client_type;
/* --- General options ------------------------------------------------ */
@@ -51,6 +53,9 @@ struct module;
struct property_entry;
#if IS_ENABLED(CONFIG_I2C)
+/* Return the Frequency mode string based on the bus frequency */
+const char *i2c_freq_mode_string(u32 bus_freq_hz);
+
/*
* The master routines are the ones normally used to transmit data to devices
* on a bus (or read from them). Apart from two basic transfer functions to
@@ -144,6 +149,7 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
+u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count);
s32 i2c_smbus_read_byte(const struct i2c_client *client);
s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command);
@@ -183,6 +189,7 @@ s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
u8 *values);
int i2c_get_device_id(const struct i2c_client *client,
struct i2c_device_identity *id);
+const struct i2c_device_id *i2c_client_get_device_id(const struct i2c_client *client);
#endif /* I2C */
/**
@@ -218,10 +225,18 @@ enum i2c_alert_protocol {
};
/**
+ * enum i2c_driver_flags - Flags for an I2C device driver
+ *
+ * @I2C_DRV_ACPI_WAIVE_D0_PROBE: Don't put the device in D0 state for probe
+ */
+enum i2c_driver_flags {
+ I2C_DRV_ACPI_WAIVE_D0_PROBE = BIT(0),
+};
+
+/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
- * @probe: Callback for device binding - soon to be deprecated
- * @probe_new: New callback for device binding
+ * @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
@@ -231,6 +246,7 @@ enum i2c_alert_protocol {
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
+ * @flags: A bitmask of flags defined in &enum i2c_driver_flags
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
@@ -256,13 +272,9 @@ struct i2c_driver {
unsigned int class;
/* Standard driver model interfaces */
- int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
- int (*remove)(struct i2c_client *client);
+ int (*probe)(struct i2c_client *client);
+ void (*remove)(struct i2c_client *client);
- /* New driver model interface to aid the seamless removal of the
- * current probe()'s, more commonly unused than used second parameter.
- */
- int (*probe_new)(struct i2c_client *client);
/* driver model interfaces that don't relate to enumeration */
void (*shutdown)(struct i2c_client *client);
@@ -289,6 +301,8 @@ struct i2c_driver {
int (*detect)(struct i2c_client *client, struct i2c_board_info *info);
const unsigned short *address_list;
struct list_head clients;
+
+ u32 flags;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -306,6 +320,8 @@ struct i2c_driver {
* userspace_devices list
* @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter
* calls it to pass on slave events to the slave driver.
+ * @devres_group_id: id of the devres group that will be created for resources
+ * acquired when probing this device.
*
* An i2c_client identifies a single device (i.e. chip) connected to an
* i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -334,17 +350,19 @@ struct i2c_client {
#if IS_ENABLED(CONFIG_I2C_SLAVE)
i2c_slave_cb_t slave_cb; /* callback for slave mode */
#endif
+ void *devres_group_id; /* ID of probe devres group */
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
-struct i2c_client *i2c_verify_client(struct device *dev);
struct i2c_adapter *i2c_verify_adapter(struct device *dev);
const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client);
+const void *i2c_get_match_data(const struct i2c_client *client);
+
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
- struct device * const dev = container_of(kobj, struct device, kobj);
+ struct device * const dev = kobj_to_dev(kobj);
return to_i2c_client(dev);
}
@@ -360,7 +378,6 @@ static inline void i2c_set_clientdata(struct i2c_client *client, void *data)
/* I2C slave support */
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
I2C_SLAVE_READ_REQUESTED,
I2C_SLAVE_WRITE_REQUESTED,
@@ -371,13 +388,10 @@ enum i2c_slave_event {
int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
int i2c_slave_unregister(struct i2c_client *client);
+int i2c_slave_event(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
bool i2c_detect_slave_mode(struct device *dev);
-
-static inline int i2c_slave_event(struct i2c_client *client,
- enum i2c_slave_event event, u8 *val)
-{
- return client->slave_cb(client, event, val);
-}
#else
static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
#endif
@@ -391,7 +405,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* @platform_data: stored in i2c_client.dev.platform_data
* @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
- * @properties: additional device properties for the device
+ * @swnode: software node for the device
* @resources: resources associated with the device
* @num_resources: number of resources in the @resources array
* @irq: stored in i2c_client.irq
@@ -415,7 +429,7 @@ struct i2c_board_info {
void *platform_data;
struct device_node *of_node;
struct fwnode_handle *fwnode;
- const struct property_entry *properties;
+ const struct software_node *swnode;
const struct resource *resources;
unsigned int num_resources;
int irq;
@@ -471,6 +485,13 @@ i2c_new_ancillary_device(struct i2c_client *client,
u16 default_addr);
void i2c_unregister_device(struct i2c_client *client);
+
+struct i2c_client *i2c_verify_client(struct device *dev);
+#else
+static inline struct i2c_client *i2c_verify_client(struct device *dev)
+{
+ return NULL;
+}
#endif /* I2C */
/* Mainboard arch_initcall() code should register all its I2C devices.
@@ -514,7 +535,8 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
*
* The return codes from the ``master_xfer{_atomic}`` fields should indicate the
* type of error code that occurred during the transfer, as documented in the
- * Kernel Documentation file Documentation/i2c/fault-codes.rst.
+ * Kernel Documentation file Documentation/i2c/fault-codes.rst. Otherwise, the
+ * number of messages executed should be returned.
*/
struct i2c_algorithm {
/*
@@ -687,6 +709,8 @@ struct i2c_adapter_quirks {
#define I2C_AQ_NO_ZERO_LEN_READ BIT(5)
#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6)
#define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
+/* adapter cannot do repeated START */
+#define I2C_AQ_NO_REP_START BIT(7)
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
@@ -721,6 +745,9 @@ struct i2c_adapter {
const struct i2c_adapter_quirks *quirks;
struct irq_domain *host_notify_domain;
+ struct regulator *bus_regulator;
+
+ struct dentry *debugfs;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -825,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
-#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */
/* Warn users that the adapter doesn't support classes anymore */
#define I2C_CLASS_DEPRECATED (1<<8)
@@ -844,6 +870,7 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
*/
#if IS_ENABLED(CONFIG_I2C)
int i2c_add_adapter(struct i2c_adapter *adap);
+int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter);
void i2c_del_adapter(struct i2c_adapter *adap);
int i2c_add_numbered_adapter(struct i2c_adapter *adap);
@@ -904,7 +931,7 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
{
- return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
+ return (msg->addr << 1) | (msg->flags & I2C_M_RD);
}
u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
@@ -936,15 +963,33 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#endif /* I2C */
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ return i2c_find_device_by_fwnode(of_fwnode_handle(node));
+}
/* must call put_device() when done with returned i2c_adapter device */
-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_find_adapter_by_fwnode(of_fwnode_handle(node));
+}
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
+}
const struct of_device_id
*i2c_of_match_device(const struct of_device_id *matches,
@@ -992,22 +1037,30 @@ struct acpi_resource_i2c_serialbus;
#if IS_ENABLED(CONFIG_ACPI)
bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c);
+int i2c_acpi_client_count(struct acpi_device *adev);
u32 i2c_acpi_find_bus_speed(struct device *dev);
-struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
- struct i2c_board_info *info);
+struct i2c_client *i2c_acpi_new_device_by_fwnode(struct fwnode_handle *fwnode,
+ int index,
+ struct i2c_board_info *info);
struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle);
+bool i2c_acpi_waive_d0_probe(struct device *dev);
#else
static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c)
{
return false;
}
+static inline int i2c_acpi_client_count(struct acpi_device *adev)
+{
+ return 0;
+}
static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
{
return 0;
}
-static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
- int index, struct i2c_board_info *info)
+static inline struct i2c_client *i2c_acpi_new_device_by_fwnode(
+ struct fwnode_handle *fwnode, int index,
+ struct i2c_board_info *info)
{
return ERR_PTR(-ENODEV);
}
@@ -1015,6 +1068,17 @@ static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle ha
{
return NULL;
}
+static inline bool i2c_acpi_waive_d0_probe(struct device *dev)
+{
+ return false;
+}
#endif /* CONFIG_ACPI */
+static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
+ int index,
+ struct i2c_board_info *info)
+{
+ return i2c_acpi_new_device_by_fwnode(dev_fwnode(dev), index, info);
+}
+
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
index 73b0982cc519..ad59a4ae60d1 100644
--- a/include/linux/i3c/ccc.h
+++ b/include/linux/i3c/ccc.h
@@ -132,7 +132,7 @@ struct i3c_ccc_dev_desc {
struct i3c_ccc_defslvs {
u8 count;
struct i3c_ccc_dev_desc master;
- struct i3c_ccc_dev_desc slaves[0];
+ struct i3c_ccc_dev_desc slaves[];
} __packed;
/**
@@ -240,7 +240,7 @@ struct i3c_ccc_bridged_slave_desc {
*/
struct i3c_ccc_setbrgtgt {
u8 count;
- struct i3c_ccc_bridged_slave_desc bslaves[0];
+ struct i3c_ccc_bridged_slave_desc bslaves[];
} __packed;
/**
@@ -318,7 +318,7 @@ enum i3c_ccc_setxtime_subcmd {
*/
struct i3c_ccc_setxtime {
u8 subcmd;
- u8 data[0];
+ u8 data[];
} __packed;
#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0)
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index de102e4418ab..e119f11948ef 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -18,17 +18,18 @@
/**
* enum i3c_error_code - I3C error codes
*
+ * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
+ * related
+ * @I3C_ERROR_M0: M0 error
+ * @I3C_ERROR_M1: M1 error
+ * @I3C_ERROR_M2: M2 error
+ *
* These are the standard error codes as defined by the I3C specification.
* When -EIO is returned by the i3c_device_do_priv_xfers() or
* i3c_device_send_hdr_cmds() one can check the error code in
* &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
* what went wrong.
*
- * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
- * related
- * @I3C_ERROR_M0: M0 error
- * @I3C_ERROR_M1: M1 error
- * @I3C_ERROR_M2: M2 error
*/
enum i3c_error_code {
I3C_ERROR_UNKNOWN = 0,
@@ -53,6 +54,7 @@ enum i3c_hdr_mode {
* struct i3c_priv_xfer - I3C SDR private transfer
* @rnw: encodes the transfer direction. true for a read, false for a write
* @len: transfer length in bytes of the transfer
+ * @actual_len: actual length in bytes are transferred by the controller
* @data: input/output buffer
* @data.in: input buffer. Must point to a DMA-able buffer
* @data.out: output buffer. Must point to a DMA-able buffer
@@ -61,6 +63,7 @@ enum i3c_hdr_mode {
struct i3c_priv_xfer {
u8 rnw;
u16 len;
+ u16 actual_len;
union {
void *in;
const void *out;
@@ -95,7 +98,7 @@ enum i3c_dcr {
/**
* struct i3c_device_info - I3C device information
- * @pid: Provisional ID
+ * @pid: Provisioned ID
* @bcr: Bus Characteristic Register
* @dcr: Device Characteristic Register
* @static_addr: static/I2C address
@@ -176,7 +179,7 @@ struct i3c_device;
struct i3c_driver {
struct device_driver driver;
int (*probe)(struct i3c_device *dev);
- int (*remove)(struct i3c_device *dev);
+ void (*remove)(struct i3c_device *dev);
const struct i3c_device_id *id_table;
};
@@ -186,7 +189,14 @@ static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
}
struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
-struct i3c_device *dev_to_i3cdev(struct device *dev);
+
+/**
+ * dev_to_i3cdev() - Returns the I3C device containing @dev
+ * @__dev: device object
+ *
+ * Return: a pointer to an I3C device object.
+ */
+#define dev_to_i3cdev(__dev) container_of_const(__dev, struct i3c_device, dev)
const struct i3c_device_id *
i3c_device_match_id(struct i3c_device *i3cdev,
@@ -287,13 +297,16 @@ static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \
module_driver(__i3cdrv, \
i3c_i2c_driver_register, \
- i3c_i2c_driver_unregister)
+ i3c_i2c_driver_unregister, \
+ __i2cdrv)
int i3c_device_do_priv_xfers(struct i3c_device *dev,
struct i3c_priv_xfer *xfers,
int nxfers);
-void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info);
+int i3c_device_do_setdasa(struct i3c_device *dev);
+
+void i3c_device_get_info(const struct i3c_device *dev, struct i3c_device_info *info);
struct i3c_ibi_payload {
unsigned int len;
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 9cb39d901cd5..0ca27dd86956 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -22,9 +22,16 @@
#define I3C_BROADCAST_ADDR 0x7e
#define I3C_MAX_ADDR GENMASK(6, 0)
+struct i2c_client;
+
+/* notifier actions. notifier call data is the struct i3c_bus */
+enum {
+ I3C_NOTIFY_BUS_ADD,
+ I3C_NOTIFY_BUS_REMOVE,
+};
+
struct i3c_master_controller;
struct i3c_bus;
-struct i2c_device;
struct i3c_device;
/**
@@ -69,7 +76,6 @@ struct i2c_dev_boardinfo {
/**
* struct i2c_dev_desc - I2C device descriptor
* @common: common part of the I2C device descriptor
- * @boardinfo: pointer to the boardinfo attached to this I2C device
* @dev: I2C device object registered to the I2C framework
* @addr: I2C device address
* @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
@@ -85,7 +91,6 @@ struct i2c_dev_boardinfo {
*/
struct i2c_dev_desc {
struct i3c_i2c_dev_desc common;
- const struct i2c_dev_boardinfo *boardinfo;
struct i2c_client *dev;
u16 addr;
u8 lvr;
@@ -129,6 +134,7 @@ struct i3c_ibi_slot {
* rejected by the master
* @num_slots: number of IBI slots reserved for this device
* @enabled: reflect the IBI status
+ * @wq: workqueue used to execute IBI handlers.
* @handler: IBI handler specified at i3c_device_request_ibi() call time. This
* handler will be called from the controller workqueue, and as such
* is allowed to sleep (though it is recommended to process the IBI
@@ -151,6 +157,7 @@ struct i3c_device_ibi_info {
unsigned int max_payload_len;
unsigned int num_slots;
unsigned int enabled;
+ struct workqueue_struct *wq;
void (*handler)(struct i3c_device *dev,
const struct i3c_ibi_payload *payload);
};
@@ -166,7 +173,7 @@ struct i3c_device_ibi_info {
* assigned a dynamic address by the master. Will be used during
* bus initialization to assign it a specific dynamic address
* before starting DAA (Dynamic Address Assignment)
- * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * @pid: I3C Provisioned ID exposed by the device. This is a unique identifier
* that may be used to attach boardinfo to i3c_dev_desc when the device
* does not have a static address
* @of_node: optional DT node in case the device has been described in the DT
@@ -426,6 +433,8 @@ struct i3c_bus {
* for a future IBI
* This method is mandatory only if ->request_ibi is not
* NULL.
+ * @enable_hotjoin: enable hot join event detect.
+ * @disable_hotjoin: disable hot join event detect.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -452,6 +461,8 @@ struct i3c_master_controller_ops {
int (*disable_ibi)(struct i3c_dev_desc *dev);
void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot);
+ int (*enable_hotjoin)(struct i3c_master_controller *master);
+ int (*disable_hotjoin)(struct i3c_master_controller *master);
};
/**
@@ -465,11 +476,12 @@ struct i3c_master_controller_ops {
* @ops: master operations. See &struct i3c_master_controller_ops
* @secondary: true if the master is a secondary master
* @init_done: true when the bus initialization is done
+ * @hotjoin: true if the master support hotjoin
* @boardinfo.i3c: list of I3C boardinfo objects
* @boardinfo.i2c: list of I2C boardinfo objects
* @boardinfo: board-level information attached to devices connected on the bus
* @bus: I3C bus exposed by this master
- * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * @wq: workqueue which can be used by master
* drivers if they need to postpone operations that need to take place
* in a thread context. Typical examples are Hot Join processing which
* requires taking the bus lock in maintenance, which in turn, can only
@@ -487,6 +499,7 @@ struct i3c_master_controller {
const struct i3c_master_controller_ops *ops;
unsigned int secondary : 1;
unsigned int init_done : 1;
+ unsigned int hotjoin: 1;
struct {
struct list_head i3c;
struct list_head i2c;
@@ -542,7 +555,9 @@ int i3c_master_register(struct i3c_master_controller *master,
struct device *parent,
const struct i3c_master_controller_ops *ops,
bool secondary);
-int i3c_master_unregister(struct i3c_master_controller *master);
+void i3c_master_unregister(struct i3c_master_controller *master);
+int i3c_master_enable_hotjoin(struct i3c_master_controller *master);
+int i3c_master_disable_hotjoin(struct i3c_master_controller *master);
/**
* i3c_dev_get_master_data() - get master private data attached to an I3C
@@ -652,4 +667,9 @@ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data),
+ void *data);
+int i3c_register_notifier(struct notifier_block *nb);
+int i3c_unregister_notifier(struct notifier_block *nb);
+
#endif /* I3C_MASTER_H */
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 0261e2fb3636..95b07f8b77fe 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -3,6 +3,7 @@
#define _LINUX_I8042_H
+#include <linux/errno.h>
#include <linux/types.h>
/*
diff --git a/include/linux/i8254.h b/include/linux/i8254.h
new file mode 100644
index 000000000000..a675c309232b
--- /dev/null
+++ b/include/linux/i8254.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) William Breathitt Gray */
+#ifndef _I8254_H_
+#define _I8254_H_
+
+struct device;
+struct regmap;
+
+/**
+ * struct i8254_regmap_config - Configuration for the register map of an i8254
+ * @parent: parent device
+ * @map: regmap for the i8254
+ */
+struct i8254_regmap_config {
+ struct device *parent;
+ struct regmap *map;
+};
+
+int devm_i8254_regmap_register(struct device *dev, const struct i8254_regmap_config *config);
+
+#endif /* _I8254_H_ */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 1b3371ae8193..e3b3b0fa2a8f 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -3,6 +3,7 @@
#define _LINUX_ICMPV6_H
#include <linux/skbuff.h>
+#include <linux/ipv6.h>
#include <uapi/linux/icmpv6.h>
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
@@ -15,13 +16,16 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr);
-#if IS_BUILTIN(CONFIG_IPV6)
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr);
-static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+#if IS_BUILTIN(CONFIG_IPV6)
+static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm)
{
- icmp6_send(skb, type, code, info, NULL);
+ icmp6_send(skb, type, code, info, NULL, parm);
}
static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
{
@@ -34,18 +38,28 @@ static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
return 0;
}
#else
-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
+extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm);
extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
#endif
+static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+{
+ __icmpv6_send(skb, type, code, info, IP6CB(skb));
+}
+
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
#if IS_ENABLED(CONFIG_NF_NAT)
void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
#else
-#define icmpv6_ndo_send icmpv6_send
+static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+{
+ struct inet6_skb_parm parm = { 0 };
+ __icmpv6_send(skb_in, type, code, info, &parm);
+}
#endif
#else
@@ -65,17 +79,22 @@ extern int icmpv6_init(void);
extern int icmpv6_err_convert(u8 type, u8 code,
int *err);
extern void icmpv6_cleanup(void);
-extern void icmpv6_param_prob(struct sk_buff *skb,
- u8 code, int pos);
+extern void icmpv6_param_prob_reason(struct sk_buff *skb,
+ u8 code, int pos,
+ enum skb_drop_reason reason);
struct flowi6;
struct in6_addr;
-extern void icmpv6_flow_init(struct sock *sk,
- struct flowi6 *fl6,
- u8 type,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- int oif);
+
+void icmpv6_flow_init(const struct sock *sk, struct flowi6 *fl6, u8 type,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr, int oif);
+
+static inline void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
+{
+ icmpv6_param_prob_reason(skb, code, pos,
+ SKB_DROP_REASON_NOT_SPECIFIED);
+}
static inline bool icmpv6_is_err(int type)
{
diff --git a/include/linux/ide.h b/include/linux/ide.h
deleted file mode 100644
index a254841bd315..000000000000
--- a/include/linux/ide.h
+++ /dev/null
@@ -1,1628 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _IDE_H
-#define _IDE_H
-/*
- * linux/include/linux/ide.h
- *
- * Copyright (C) 1994-2002 Linus Torvalds & authors
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/ata.h>
-#include <linux/blk-mq.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/bio.h>
-#include <linux/pci.h>
-#include <linux/completion.h>
-#include <linux/pm.h>
-#include <linux/mutex.h>
-/* for request_sense */
-#include <linux/cdrom.h>
-#include <scsi/scsi_cmnd.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-
-/*
- * Probably not wise to fiddle with these
- */
-#define SUPPORT_VLB_SYNC 1
-#define IDE_DEFAULT_MAX_FAILURES 1
-#define ERROR_MAX 8 /* Max read/write errors per sector */
-#define ERROR_RESET 3 /* Reset controller every 4th retry */
-#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
-
-struct device;
-
-/* values for ide_request.type */
-enum ata_priv_type {
- ATA_PRIV_MISC,
- ATA_PRIV_TASKFILE,
- ATA_PRIV_PC,
- ATA_PRIV_SENSE, /* sense request */
- ATA_PRIV_PM_SUSPEND, /* suspend request */
- ATA_PRIV_PM_RESUME, /* resume request */
-};
-
-struct ide_request {
- struct scsi_request sreq;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
- u8 type;
- void *special;
-};
-
-static inline struct ide_request *ide_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline bool ata_misc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
-}
-
-static inline bool ata_taskfile_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
-}
-
-static inline bool ata_pc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
-}
-
-static inline bool ata_sense_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
-}
-
-static inline bool ata_pm_request(struct request *rq)
-{
- return blk_rq_is_private(rq) &&
- (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
- ide_req(rq)->type == ATA_PRIV_PM_RESUME);
-}
-
-/* Error codes returned in result to the higher part of the driver. */
-enum {
- IDE_DRV_ERROR_GENERAL = 101,
- IDE_DRV_ERROR_FILEMARK = 102,
- IDE_DRV_ERROR_EOD = 103,
-};
-
-/*
- * Definitions for accessing IDE controller registers
- */
-#define IDE_NR_PORTS (10)
-
-struct ide_io_ports {
- unsigned long data_addr;
-
- union {
- unsigned long error_addr; /* read: error */
- unsigned long feature_addr; /* write: feature */
- };
-
- unsigned long nsect_addr;
- unsigned long lbal_addr;
- unsigned long lbam_addr;
- unsigned long lbah_addr;
-
- unsigned long device_addr;
-
- union {
- unsigned long status_addr; /*  read: status  */
- unsigned long command_addr; /* write: command */
- };
-
- unsigned long ctl_addr;
-
- unsigned long irq_addr;
-};
-
-#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
-
-#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
-#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
-#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
-#define DRIVE_READY (ATA_DRDY | ATA_DSC)
-
-#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
-
-#define SATA_NR_PORTS (3) /* 16 possible ?? */
-
-#define SATA_STATUS_OFFSET (0)
-#define SATA_ERROR_OFFSET (1)
-#define SATA_CONTROL_OFFSET (2)
-
-/*
- * Our Physical Region Descriptor (PRD) table should be large enough
- * to handle the biggest I/O request we are likely to see. Since requests
- * can have no more than 256 sectors, and since the typical blocksize is
- * two or more sectors, we could get by with a limit of 128 entries here for
- * the usual worst case. Most requests seem to include some contiguous blocks,
- * further reducing the number of table entries required.
- *
- * The driver reverts to PIO mode for individual requests that exceed
- * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
- * 100% of all crazy scenarios here is not necessary.
- *
- * As it turns out though, we must allocate a full 4KB page for this,
- * so the two PRD tables (ide0 & ide1) will each get half of that,
- * allowing each to have about 256 entries (8 bytes each) from this.
- */
-#define PRD_BYTES 8
-#define PRD_ENTRIES 256
-
-/*
- * Some more useful definitions
- */
-#define PARTN_BITS 6 /* number of minor dev bits for partitions */
-#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
-
-/*
- * Timeouts for various operations:
- */
-enum {
- /* spec allows up to 20ms, but CF cards and SSD drives need more */
- WAIT_DRQ = 1 * HZ, /* 1s */
- /* some laptops are very slow */
- WAIT_READY = 5 * HZ, /* 5s */
- /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
- WAIT_PIDENTIFY = 10 * HZ, /* 10s */
- /* worst case when spinning up */
- WAIT_WORSTCASE = 30 * HZ, /* 30s */
- /* maximum wait for an IRQ to happen */
- WAIT_CMD = 10 * HZ, /* 10s */
- /* Some drives require a longer IRQ timeout. */
- WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
- /*
- * Some drives (for example, Seagate STT3401A Travan) require a very
- * long timeout, because they don't return an interrupt or clear their
- * BSY bit until after the command completes (even retension commands).
- */
- WAIT_TAPE_CMD = 900 * HZ, /* 900s */
- /* minimum sleep time */
- WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
-};
-
-/*
- * Op codes for special requests to be handled by ide_special_rq().
- * Values should be in the range of 0x20 to 0x3f.
- */
-#define REQ_DRIVE_RESET 0x20
-#define REQ_DEVSET_EXEC 0x21
-#define REQ_PARK_HEADS 0x22
-#define REQ_UNPARK_HEADS 0x23
-
-/*
- * hwif_chipset_t is used to keep track of the specific hardware
- * chipset used by each IDE interface, if known.
- */
-enum { ide_unknown, ide_generic, ide_pci,
- ide_cmd640, ide_dtc2278, ide_ali14xx,
- ide_qd65xx, ide_umc8672, ide_ht6560b,
- ide_4drives, ide_pmac, ide_acorn,
- ide_au1xxx, ide_palm3710
-};
-
-typedef u8 hwif_chipset_t;
-
-/*
- * Structure to hold all information about the location of this port
- */
-struct ide_hw {
- union {
- struct ide_io_ports io_ports;
- unsigned long io_ports_array[IDE_NR_PORTS];
- };
-
- int irq; /* our irq number */
- struct device *dev, *parent;
- unsigned long config;
-};
-
-static inline void ide_std_init_ports(struct ide_hw *hw,
- unsigned long io_addr,
- unsigned long ctl_addr)
-{
- unsigned int i;
-
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = io_addr++;
-
- hw->io_ports.ctl_addr = ctl_addr;
-}
-
-#define MAX_HWIFS 10
-
-/*
- * Now for the data we need to maintain per-drive: ide_drive_t
- */
-
-#define ide_scsi 0x21
-#define ide_disk 0x20
-#define ide_optical 0x7
-#define ide_cdrom 0x5
-#define ide_tape 0x1
-#define ide_floppy 0x0
-
-/*
- * Special Driver Flags
- */
-enum {
- IDE_SFLAG_SET_GEOMETRY = BIT(0),
- IDE_SFLAG_RECALIBRATE = BIT(1),
- IDE_SFLAG_SET_MULTMODE = BIT(2),
-};
-
-/*
- * Status returned from various ide_ functions
- */
-typedef enum {
- ide_stopped, /* no drive operation was started */
- ide_started, /* a drive operation was started, handler was set */
-} ide_startstop_t;
-
-enum {
- IDE_VALID_ERROR = BIT(1),
- IDE_VALID_FEATURE = IDE_VALID_ERROR,
- IDE_VALID_NSECT = BIT(2),
- IDE_VALID_LBAL = BIT(3),
- IDE_VALID_LBAM = BIT(4),
- IDE_VALID_LBAH = BIT(5),
- IDE_VALID_DEVICE = BIT(6),
- IDE_VALID_LBA = IDE_VALID_LBAL |
- IDE_VALID_LBAM |
- IDE_VALID_LBAH,
- IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_IN_TF = IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
- IDE_VALID_IN_HOB = IDE_VALID_ERROR |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
-};
-
-enum {
- IDE_TFLAG_LBA48 = BIT(0),
- IDE_TFLAG_WRITE = BIT(1),
- IDE_TFLAG_CUSTOM_HANDLER = BIT(2),
- IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3),
- /* force 16-bit I/O operations */
- IDE_TFLAG_IO_16BIT = BIT(4),
- /* struct ide_cmd was allocated using kmalloc() */
- IDE_TFLAG_DYN = BIT(5),
- IDE_TFLAG_FS = BIT(6),
- IDE_TFLAG_MULTI_PIO = BIT(7),
- IDE_TFLAG_SET_XFER = BIT(8),
-};
-
-enum {
- IDE_FTFLAG_FLAGGED = BIT(0),
- IDE_FTFLAG_SET_IN_FLAGS = BIT(1),
- IDE_FTFLAG_OUT_DATA = BIT(2),
- IDE_FTFLAG_IN_DATA = BIT(3),
-};
-
-struct ide_taskfile {
- u8 data; /* 0: data byte (for TASKFILE ioctl) */
- union { /* 1: */
- u8 error; /* read: error */
- u8 feature; /* write: feature */
- };
- u8 nsect; /* 2: number of sectors */
- u8 lbal; /* 3: LBA low */
- u8 lbam; /* 4: LBA mid */
- u8 lbah; /* 5: LBA high */
- u8 device; /* 6: device select */
- union { /* 7: */
- u8 status; /* read: status */
- u8 command; /* write: command */
- };
-};
-
-struct ide_cmd {
- struct ide_taskfile tf;
- struct ide_taskfile hob;
- struct {
- struct {
- u8 tf;
- u8 hob;
- } out, in;
- } valid;
-
- u16 tf_flags;
- u8 ftf_flags; /* for TASKFILE ioctl */
- int protocol;
-
- int sg_nents; /* number of sg entries */
- int orig_sg_nents;
- int sg_dma_direction; /* DMA transfer direction */
-
- unsigned int nbytes;
- unsigned int nleft;
- unsigned int last_xfer_len;
-
- struct scatterlist *cursg;
- unsigned int cursg_ofs;
-
- struct request *rq; /* copy of request */
-};
-
-/* ATAPI packet command flags */
-enum {
- /* set when an error is considered normal - no retry (ide-tape) */
- PC_FLAG_ABORT = BIT(0),
- PC_FLAG_SUPPRESS_ERROR = BIT(1),
- PC_FLAG_WAIT_FOR_DSC = BIT(2),
- PC_FLAG_DMA_OK = BIT(3),
- PC_FLAG_DMA_IN_PROGRESS = BIT(4),
- PC_FLAG_DMA_ERROR = BIT(5),
- PC_FLAG_WRITING = BIT(6),
-};
-
-#define ATAPI_WAIT_PC (60 * HZ)
-
-struct ide_atapi_pc {
- /* actual packet bytes */
- u8 c[12];
- /* incremented on each retry */
- int retries;
- int error;
-
- /* bytes to transfer */
- int req_xfer;
-
- /* the corresponding request */
- struct request *rq;
-
- unsigned long flags;
-
- /*
- * those are more or less driver-specific and some of them are subject
- * to change/removal later.
- */
- unsigned long timeout;
-};
-
-struct ide_devset;
-struct ide_driver;
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-struct ide_acpi_drive_link;
-struct ide_acpi_hwif_link;
-#endif
-
-struct ide_drive_s;
-
-struct ide_disk_ops {
- int (*check)(struct ide_drive_s *, const char *);
- int (*get_capacity)(struct ide_drive_s *);
- void (*unlock_native_capacity)(struct ide_drive_s *);
- void (*setup)(struct ide_drive_s *);
- void (*flush)(struct ide_drive_s *);
- int (*init_media)(struct ide_drive_s *, struct gendisk *);
- int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
- int);
- ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
- sector_t);
- int (*ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
- int (*compat_ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
-};
-
-/* ATAPI device flags */
-enum {
- IDE_AFLAG_DRQ_INTERRUPT = BIT(0),
-
- /* ide-cd */
- /* Drive cannot eject the disc. */
- IDE_AFLAG_NO_EJECT = BIT(1),
- /* Drive is a pre ATAPI 1.2 drive. */
- IDE_AFLAG_PRE_ATAPI12 = BIT(2),
- /* TOC addresses are in BCD. */
- IDE_AFLAG_TOCADDR_AS_BCD = BIT(3),
- /* TOC track numbers are in BCD. */
- IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4),
- /* Saved TOC information is current. */
- IDE_AFLAG_TOC_VALID = BIT(6),
- /* We think that the drive door is locked. */
- IDE_AFLAG_DOOR_LOCKED = BIT(7),
- /* SET_CD_SPEED command is unsupported. */
- IDE_AFLAG_NO_SPEED_SELECT = BIT(8),
- IDE_AFLAG_VERTOS_300_SSD = BIT(9),
- IDE_AFLAG_VERTOS_600_ESD = BIT(10),
- IDE_AFLAG_SANYO_3CD = BIT(11),
- IDE_AFLAG_FULL_CAPS_PAGE = BIT(12),
- IDE_AFLAG_PLAY_AUDIO_OK = BIT(13),
- IDE_AFLAG_LE_SPEED_FIELDS = BIT(14),
-
- /* ide-floppy */
- /* Avoid commands not supported in Clik drive */
- IDE_AFLAG_CLIK_DRIVE = BIT(15),
- /* Requires BH algorithm for packets */
- IDE_AFLAG_ZIP_DRIVE = BIT(16),
- /* Supports format progress report */
- IDE_AFLAG_SRFP = BIT(17),
-
- /* ide-tape */
- IDE_AFLAG_IGNORE_DSC = BIT(18),
- /* 0 When the tape position is unknown */
- IDE_AFLAG_ADDRESS_VALID = BIT(19),
- /* Device already opened */
- IDE_AFLAG_BUSY = BIT(20),
- /* Attempt to auto-detect the current user block size */
- IDE_AFLAG_DETECT_BS = BIT(21),
- /* Currently on a filemark */
- IDE_AFLAG_FILEMARK = BIT(22),
- /* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDE_AFLAG_MEDIUM_PRESENT = BIT(23),
-
- IDE_AFLAG_NO_AUTOCLOSE = BIT(24),
-};
-
-/* device flags */
-enum {
- /* restore settings after device reset */
- IDE_DFLAG_KEEP_SETTINGS = BIT(0),
- /* device is using DMA for read/write */
- IDE_DFLAG_USING_DMA = BIT(1),
- /* okay to unmask other IRQs */
- IDE_DFLAG_UNMASK = BIT(2),
- /* don't attempt flushes */
- IDE_DFLAG_NOFLUSH = BIT(3),
- /* DSC overlap */
- IDE_DFLAG_DSC_OVERLAP = BIT(4),
- /* give potential excess bandwidth */
- IDE_DFLAG_NICE1 = BIT(5),
- /* device is physically present */
- IDE_DFLAG_PRESENT = BIT(6),
- /* disable Host Protected Area */
- IDE_DFLAG_NOHPA = BIT(7),
- /* id read from device (synthetic if not set) */
- IDE_DFLAG_ID_READ = BIT(8),
- IDE_DFLAG_NOPROBE = BIT(9),
- /* need to do check_media_change() */
- IDE_DFLAG_REMOVABLE = BIT(10),
- /* needed for removable devices */
- IDE_DFLAG_ATTACH = BIT(11),
- IDE_DFLAG_FORCED_GEOM = BIT(12),
- /* disallow setting unmask bit */
- IDE_DFLAG_NO_UNMASK = BIT(13),
- /* disallow enabling 32-bit I/O */
- IDE_DFLAG_NO_IO_32BIT = BIT(14),
- /* for removable only: door lock/unlock works */
- IDE_DFLAG_DOORLOCKING = BIT(15),
- /* disallow DMA */
- IDE_DFLAG_NODMA = BIT(16),
- /* powermanagement told us not to do anything, so sleep nicely */
- IDE_DFLAG_BLOCKED = BIT(17),
- /* sleeping & sleep field valid */
- IDE_DFLAG_SLEEPING = BIT(18),
- IDE_DFLAG_POST_RESET = BIT(19),
- IDE_DFLAG_UDMA33_WARNED = BIT(20),
- IDE_DFLAG_LBA48 = BIT(21),
- /* status of write cache */
- IDE_DFLAG_WCACHE = BIT(22),
- /* used for ignoring ATA_DF */
- IDE_DFLAG_NOWERR = BIT(23),
- /* retrying in PIO */
- IDE_DFLAG_DMA_PIO_RETRY = BIT(24),
- IDE_DFLAG_LBA = BIT(25),
- /* don't unload heads */
- IDE_DFLAG_NO_UNLOAD = BIT(26),
- /* heads unloaded, please don't reset port */
- IDE_DFLAG_PARKED = BIT(27),
- IDE_DFLAG_MEDIA_CHANGED = BIT(28),
- /* write protect */
- IDE_DFLAG_WP = BIT(29),
- IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30),
- IDE_DFLAG_NIEN_QUIRK = BIT(31),
-};
-
-struct ide_drive_s {
- char name[4]; /* drive name, such as "hda" */
- char driver_req[10]; /* requests specific driver */
-
- struct request_queue *queue; /* request queue */
-
- bool (*prep_rq)(struct ide_drive_s *, struct request *);
-
- struct blk_mq_tag_set tag_set;
-
- struct request *rq; /* current request */
- void *driver_data; /* extra driver data */
- u16 *id; /* identification info */
-#ifdef CONFIG_IDE_PROC_FS
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
- const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
-#endif
- struct hwif_s *hwif; /* actually (ide_hwif_t *) */
-
- const struct ide_disk_ops *disk_ops;
-
- unsigned long dev_flags;
-
- unsigned long sleep; /* sleep until this time */
- unsigned long timeout; /* max time to wait for irq */
-
- u8 special_flags; /* special action flags */
-
- u8 select; /* basic drive/head select reg value */
- u8 retry_pio; /* retrying dma capable host in pio */
- u8 waiting_for_dma; /* dma currently in progress */
- u8 dma; /* atapi dma flag */
-
- u8 init_speed; /* transfer rate set at boot */
- u8 current_speed; /* current transfer rate set */
- u8 desired_speed; /* desired transfer rate set */
- u8 pio_mode; /* for ->set_pio_mode _only_ */
- u8 dma_mode; /* for ->set_dma_mode _only_ */
- u8 dn; /* now wide spread use */
- u8 acoustic; /* acoustic management */
- u8 media; /* disk, cdrom, tape, floppy, ... */
- u8 ready_stat; /* min status value for drive ready */
- u8 mult_count; /* current multiple sector setting */
- u8 mult_req; /* requested multiple sector setting */
- u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
- u8 bad_wstat; /* used for ignoring ATA_DF */
- u8 head; /* "real" number of heads */
- u8 sect; /* "real" sectors per track */
- u8 bios_head; /* BIOS/fdisk/LILO number of heads */
- u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
-
- /* delay this long before sending packet command */
- u8 pc_delay;
-
- unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
- unsigned int cyl; /* "real" number of cyls */
- void *drive_data; /* used by set_pio_mode/dev_select() */
- unsigned int failures; /* current failure count */
- unsigned int max_failures; /* maximum allowed failure count */
- u64 probed_capacity;/* initial/native media capacity */
- u64 capacity64; /* total number of sectors */
-
- int lun; /* logical unit */
- int crc_count; /* crc counter to reduce drive speed */
-
- unsigned long debug_mask; /* debugging levels switch */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_drive_link *acpidata;
-#endif
- struct list_head list;
- struct device gendev;
- struct completion gendev_rel_comp; /* to deal with device release() */
-
- /* current packet command */
- struct ide_atapi_pc *pc;
-
- /* last failed packet command */
- struct ide_atapi_pc *failed_pc;
-
- /* callback for packet commands */
- int (*pc_callback)(struct ide_drive_s *, int);
-
- ide_startstop_t (*irq_handler)(struct ide_drive_s *);
-
- unsigned long atapi_flags;
-
- struct ide_atapi_pc request_sense_pc;
-
- /* current sense rq and buffer */
- bool sense_rq_armed;
- bool sense_rq_active;
- struct request *sense_rq;
- struct request_sense sense_data;
-
- /* async sense insertion */
- struct work_struct rq_work;
- struct list_head rq_list;
-};
-
-typedef struct ide_drive_s ide_drive_t;
-
-#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
-
-#define to_ide_drv(obj, cont_type) \
- container_of(obj, struct cont_type, dev)
-
-#define ide_drv_g(disk, cont_type) \
- container_of((disk)->private_data, struct cont_type, driver)
-
-struct ide_port_info;
-
-struct ide_tp_ops {
- void (*exec_command)(struct hwif_s *, u8);
- u8 (*read_status)(struct hwif_s *);
- u8 (*read_altstatus)(struct hwif_s *);
- void (*write_devctl)(struct hwif_s *, u8);
-
- void (*dev_select)(ide_drive_t *);
- void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
- void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
-
- void (*input_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
- void (*output_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
-};
-
-extern const struct ide_tp_ops default_tp_ops;
-
-/**
- * struct ide_port_ops - IDE port operations
- *
- * @init_dev: host specific initialization of a device
- * @set_pio_mode: routine to program host for PIO mode
- * @set_dma_mode: routine to program host for DMA mode
- * @reset_poll: chipset polling based on hba specifics
- * @pre_reset: chipset specific changes to default for device-hba resets
- * @resetproc: routine to reset controller after a disk reset
- * @maskproc: special host masking for drive selection
- * @quirkproc: check host's drive quirk list
- * @clear_irq: clear IRQ
- *
- * @mdma_filter: filter MDMA modes
- * @udma_filter: filter UDMA modes
- *
- * @cable_detect: detect cable type
- */
-struct ide_port_ops {
- void (*init_dev)(ide_drive_t *);
- void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
- void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
- blk_status_t (*reset_poll)(ide_drive_t *);
- void (*pre_reset)(ide_drive_t *);
- void (*resetproc)(ide_drive_t *);
- void (*maskproc)(ide_drive_t *, int);
- void (*quirkproc)(ide_drive_t *);
- void (*clear_irq)(ide_drive_t *);
- int (*test_irq)(struct hwif_s *);
-
- u8 (*mdma_filter)(ide_drive_t *);
- u8 (*udma_filter)(ide_drive_t *);
-
- u8 (*cable_detect)(struct hwif_s *);
-};
-
-struct ide_dma_ops {
- void (*dma_host_set)(struct ide_drive_s *, int);
- int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
- void (*dma_start)(struct ide_drive_s *);
- int (*dma_end)(struct ide_drive_s *);
- int (*dma_test_irq)(struct ide_drive_s *);
- void (*dma_lost_irq)(struct ide_drive_s *);
- /* below ones are optional */
- int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
- int (*dma_timer_expiry)(struct ide_drive_s *);
- void (*dma_clear)(struct ide_drive_s *);
- /*
- * The following method is optional and only required to be
- * implemented for the SFF-8038i compatible controllers.
- */
- u8 (*dma_sff_read_status)(struct hwif_s *);
-};
-
-enum {
- IDE_PFLAG_PROBING = BIT(0),
-};
-
-struct ide_host;
-
-typedef struct hwif_s {
- struct hwif_s *mate; /* other hwif from same PCI chip */
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
-
- struct ide_host *host;
-
- char name[6]; /* name of interface, eg. "ide0" */
-
- struct ide_io_ports io_ports;
-
- unsigned long sata_scr[SATA_NR_PORTS];
-
- ide_drive_t *devices[MAX_DRIVES + 1];
-
- unsigned long port_flags;
-
- u8 major; /* our major number */
- u8 index; /* 0 for ide0; 1 for ide1; ... */
- u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
-
- u32 host_flags;
-
- u8 pio_mask;
-
- u8 ultra_mask;
- u8 mwdma_mask;
- u8 swdma_mask;
-
- u8 cbl; /* cable type */
-
- hwif_chipset_t chipset; /* sub-module for tuning.. */
-
- struct device *dev;
-
- void (*rw_disk)(ide_drive_t *, struct request *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- /* dma physical region descriptor table (cpu view) */
- unsigned int *dmatable_cpu;
- /* dma physical region descriptor table (dma view) */
- dma_addr_t dmatable_dma;
-
- /* maximum number of PRD table entries */
- int prd_max_nents;
- /* PRD entry size in bytes */
- int prd_ent_size;
-
- /* Scatter-gather list used to build the above */
- struct scatterlist *sg_table;
- int sg_max_nents; /* Maximum number of entries in it */
-
- struct ide_cmd cmd; /* current command */
-
- int rqsize; /* max sectors per request */
- int irq; /* our irq number */
-
- unsigned long dma_base; /* base addr for dma ports */
-
- unsigned long config_data; /* for use by chipset-specific code */
- unsigned long select_data; /* for use by chipset-specific code */
-
- unsigned long extra_base; /* extra addr for dma ports */
- unsigned extra_ports; /* number of extra dma ports */
-
- unsigned present : 1; /* this interface exists */
- unsigned busy : 1; /* serializes devices on a port */
-
- struct device gendev;
- struct device *portdev;
-
- struct completion gendev_rel_comp; /* To deal with device release() */
-
- void *hwif_data; /* extra hwif data */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_hwif_link *acpidata;
-#endif
-
- /* IRQ handler, if active */
- ide_startstop_t (*handler)(ide_drive_t *);
-
- /* BOOL: polling active & poll_timeout field valid */
- unsigned int polling : 1;
-
- /* current drive */
- ide_drive_t *cur_dev;
-
- /* current request */
- struct request *rq;
-
- /* failsafe timer */
- struct timer_list timer;
- /* timeout value during long polls */
- unsigned long poll_timeout;
- /* queried upon timeouts */
- int (*expiry)(ide_drive_t *);
-
- int req_gen;
- int req_gen_timer;
-
- spinlock_t lock;
-} ____cacheline_internodealigned_in_smp ide_hwif_t;
-
-#define MAX_HOST_PORTS 4
-
-struct ide_host {
- ide_hwif_t *ports[MAX_HOST_PORTS + 1];
- unsigned int n_ports;
- struct device *dev[2];
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- irq_handler_t irq_handler;
-
- unsigned long host_flags;
-
- int irq_flags;
-
- void *host_priv;
- ide_hwif_t *cur_port; /* for hosts requiring serialization */
-
- /* used for hosts requiring serialization */
- volatile unsigned long host_busy;
-};
-
-#define IDE_HOST_BUSY 0
-
-/*
- * internal ide interrupt handler type
- */
-typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
-typedef int (ide_expiry_t)(ide_drive_t *);
-
-/* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
-
-extern struct mutex ide_setting_mtx;
-
-/*
- * configurable drive settings
- */
-
-#define DS_SYNC BIT(0)
-
-struct ide_devset {
- int (*get)(ide_drive_t *);
- int (*set)(ide_drive_t *, int);
- unsigned int flags;
-};
-
-#define __DEVSET(_flags, _get, _set) { \
- .flags = _flags, \
- .get = _get, \
- .set = _set, \
-}
-
-#define ide_devset_get(name, field) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return drive->field; \
-}
-
-#define ide_devset_set(name, field) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- drive->field = arg; \
- return 0; \
-}
-
-#define ide_devset_get_flag(name, flag) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return !!(drive->dev_flags & flag); \
-}
-
-#define ide_devset_set_flag(name, flag) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- if (arg) \
- drive->dev_flags |= flag; \
- else \
- drive->dev_flags &= ~flag; \
- return 0; \
-}
-
-#define __IDE_DEVSET(_name, _flags, _get, _set) \
-const struct ide_devset ide_devset_##_name = \
- __DEVSET(_flags, _get, _set)
-
-#define IDE_DEVSET(_name, _flags, _get, _set) \
-static __IDE_DEVSET(_name, _flags, _get, _set)
-
-#define ide_devset_rw(_name, _func) \
-IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_devset_w(_name, _func) \
-IDE_DEVSET(_name, 0, NULL, set_##_func)
-
-#define ide_ext_devset_rw(_name, _func) \
-__IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_ext_devset_rw_sync(_name, _func) \
-__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
-
-#define ide_decl_devset(_name) \
-extern const struct ide_devset ide_devset_##_name
-
-ide_decl_devset(io_32bit);
-ide_decl_devset(keepsettings);
-ide_decl_devset(pio_mode);
-ide_decl_devset(unmaskirq);
-ide_decl_devset(using_dma);
-
-#ifdef CONFIG_IDE_PROC_FS
-/*
- * /proc/ide interface
- */
-
-#define ide_devset_rw_field(_name, _field) \
-ide_devset_get(_name, _field); \
-ide_devset_set(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-#define ide_devset_ro_field(_name, _field) \
-ide_devset_get(_name, _field); \
-IDE_DEVSET(_name, 0, get_##_name, NULL)
-
-#define ide_devset_rw_flag(_name, _field) \
-ide_devset_get_flag(_name, _field); \
-ide_devset_set_flag(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-struct ide_proc_devset {
- const char *name;
- const struct ide_devset *setting;
- int min, max;
- int (*mulf)(ide_drive_t *);
- int (*divf)(ide_drive_t *);
-};
-
-#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
- .name = __stringify(_name), \
- .setting = &ide_devset_##_name, \
- .min = _min, \
- .max = _max, \
- .mulf = _mulf, \
- .divf = _divf, \
-}
-
-#define IDE_PROC_DEVSET(_name, _min, _max) \
-__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
-
-typedef struct {
- const char *name;
- umode_t mode;
- int (*show)(struct seq_file *, void *);
-} ide_proc_entry_t;
-
-void proc_ide_create(void);
-void proc_ide_destroy(void);
-void ide_proc_register_port(ide_hwif_t *);
-void ide_proc_port_register_devices(ide_hwif_t *);
-void ide_proc_unregister_device(ide_drive_t *);
-void ide_proc_unregister_port(ide_hwif_t *);
-void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
-void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
-
-int ide_capacity_proc_show(struct seq_file *m, void *v);
-int ide_geometry_proc_show(struct seq_file *m, void *v);
-#else
-static inline void proc_ide_create(void) { ; }
-static inline void proc_ide_destroy(void) { ; }
-static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
-static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_register_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-static inline void ide_proc_unregister_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-#endif
-
-enum {
- /* enter/exit functions */
- IDE_DBG_FUNC = BIT(0),
- /* sense key/asc handling */
- IDE_DBG_SENSE = BIT(1),
- /* packet commands handling */
- IDE_DBG_PC = BIT(2),
- /* request handling */
- IDE_DBG_RQ = BIT(3),
- /* driver probing/setup */
- IDE_DBG_PROBE = BIT(4),
-};
-
-/* DRV_NAME has to be defined in the driver before using the macro below */
-#define __ide_debug_log(lvl, fmt, args...) \
-{ \
- if (unlikely(drive->debug_mask & lvl)) \
- printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
- __func__, ## args); \
-}
-
-/*
- * Power Management state machine (rq->pm->pm_step).
- *
- * For each step, the core calls ide_start_power_step() first.
- * This can return:
- * - ide_stopped : In this case, the core calls us back again unless
- * step have been set to ide_power_state_completed.
- * - ide_started : In this case, the channel is left busy until an
- * async event (interrupt) occurs.
- * Typically, ide_start_power_step() will issue a taskfile request with
- * do_rw_taskfile().
- *
- * Upon reception of the interrupt, the core will call ide_complete_power_step()
- * with the error code if any. This routine should update the step value
- * and return. It should not start a new request. The core will call
- * ide_start_power_step() for the new step value, unless step have been
- * set to IDE_PM_COMPLETED.
- */
-enum {
- IDE_PM_START_SUSPEND,
- IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
- IDE_PM_STANDBY,
-
- IDE_PM_START_RESUME,
- IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
- IDE_PM_IDLE,
- IDE_PM_RESTORE_DMA,
-
- IDE_PM_COMPLETED,
-};
-
-int generic_ide_suspend(struct device *, pm_message_t);
-int generic_ide_resume(struct device *);
-
-void ide_complete_power_step(ide_drive_t *, struct request *);
-ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
-void ide_complete_pm_rq(ide_drive_t *, struct request *);
-void ide_check_pm_state(ide_drive_t *, struct request *);
-
-/*
- * Subdrivers support.
- *
- * The gendriver.owner field should be set to the module owner of this driver.
- * The gendriver.name field should be set to the name of this driver
- */
-struct ide_driver {
- const char *version;
- ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
- struct device_driver gen_driver;
- int (*probe)(ide_drive_t *);
- void (*remove)(ide_drive_t *);
- void (*resume)(ide_drive_t *);
- void (*shutdown)(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
- ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
- const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
-#endif
-};
-
-#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
-
-int ide_device_get(ide_drive_t *);
-void ide_device_put(ide_drive_t *);
-
-struct ide_ioctl_devset {
- unsigned int get_ioctl;
- unsigned int set_ioctl;
- const struct ide_devset *setting;
-};
-
-int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
- unsigned long, const struct ide_ioctl_devset *);
-
-int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
-
-extern int ide_vlb_clk;
-extern int ide_pci_clk;
-
-int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
-void ide_kill_rq(ide_drive_t *, struct request *);
-void ide_insert_request_head(ide_drive_t *, struct request *);
-
-void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-
-void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
- unsigned int);
-
-void ide_pad_transfer(ide_drive_t *, int, int);
-
-ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
-
-void ide_fix_driveid(u16 *);
-
-extern void ide_fixstring(u8 *, const int, const int);
-
-int ide_busy_sleep(ide_drive_t *, unsigned long, int);
-
-int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
-int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
-
-ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
-ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
-
-extern ide_startstop_t ide_do_reset (ide_drive_t *);
-
-extern int ide_devset_execute(ide_drive_t *drive,
- const struct ide_devset *setting, int arg);
-
-void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
-
-void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
-void ide_tf_dump(const char *, struct ide_cmd *);
-
-void ide_exec_command(ide_hwif_t *, u8);
-u8 ide_read_status(ide_hwif_t *);
-u8 ide_read_altstatus(ide_hwif_t *);
-void ide_write_devctl(ide_hwif_t *, u8);
-
-void ide_dev_select(ide_drive_t *);
-void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
-void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
-
-void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-
-void SELECT_MASK(ide_drive_t *, int);
-
-u8 ide_read_error(ide_drive_t *);
-void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
-
-int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
-
-int ide_check_atapi_device(ide_drive_t *, const char *);
-
-void ide_init_pc(struct ide_atapi_pc *);
-
-/* Disk head parking */
-extern wait_queue_head_t ide_park_wq;
-ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len);
-
-/*
- * Special requests for ide-tape block device strategy routine.
- *
- * In order to service a character device command, we add special requests to
- * the tail of our block device request queue and wait for their completion.
- */
-enum {
- REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */
- REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */
- REQ_IDETAPE_READ = BIT(2),
- REQ_IDETAPE_WRITE = BIT(3),
-};
-
-int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
- void *, unsigned int);
-
-int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
-int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
-int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
-void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *drive);
-
-void ide_prep_sense(ide_drive_t *drive, struct request *rq);
-int ide_queue_sense_rq(ide_drive_t *drive, void *special);
-
-int ide_cd_expiry(ide_drive_t *);
-
-int ide_cd_get_xferlen(struct request *);
-
-ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
-
-ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
-
-void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
-
-void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
-
-int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
-int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
-
-int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
-
-int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
-
-extern int ide_driveid_update(ide_drive_t *);
-extern int ide_config_drive_speed(ide_drive_t *, u8);
-extern u8 eighty_ninty_three (ide_drive_t *);
-extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
-
-extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
-
-extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
-
-extern void ide_timer_expiry(struct timer_list *t);
-extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
-extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
-extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
-
-void ide_init_disk(struct gendisk *, ide_drive_t *);
-
-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
-#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
-#else
-#define ide_pci_register_driver(d) pci_register_driver(d)
-#endif
-
-static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
-{
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
- return 1;
- return 0;
-}
-
-void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
- struct ide_hw *, struct ide_hw **);
-void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-int ide_pci_set_master(struct pci_dev *, const char *);
-unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
-int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
-int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
-#else
-static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- return -EINVAL;
-}
-#endif
-
-struct ide_pci_enablebit {
- u8 reg; /* byte pci reg holding the enable-bit */
- u8 mask; /* mask to isolate the enable-bit */
- u8 val; /* value of masked reg when "enabled" */
-};
-
-enum {
- /* Uses ISA control ports not PCI ones. */
- IDE_HFLAG_ISA_PORTS = BIT(0),
- /* single port device */
- IDE_HFLAG_SINGLE = BIT(1),
- /* don't use legacy PIO blacklist */
- IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2),
- /* set for the second port of QD65xx */
- IDE_HFLAG_QD_2ND_PORT = BIT(3),
- /* use PIO8/9 for prefetch off/on */
- IDE_HFLAG_ABUSE_PREFETCH = BIT(4),
- /* use PIO6/7 for fast-devsel off/on */
- IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5),
- /* use 100-102 and 200-202 PIO values to set DMA modes */
- IDE_HFLAG_ABUSE_DMA_MODES = BIT(6),
- /*
- * keep DMA setting when programming PIO mode, may be used only
- * for hosts which have separate PIO and DMA timings (ie. PMAC)
- */
- IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7),
- /* program host for the transfer mode after programming device */
- IDE_HFLAG_POST_SET_MODE = BIT(8),
- /* don't program host/device for the transfer mode ("smart" hosts) */
- IDE_HFLAG_NO_SET_MODE = BIT(9),
- /* trust BIOS for programming chipset/device for DMA */
- IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10),
- /* host is CS5510/CS5520 */
- IDE_HFLAG_CS5520 = BIT(11),
- /* ATAPI DMA is unsupported */
- IDE_HFLAG_NO_ATAPI_DMA = BIT(12),
- /* set if host is a "non-bootable" controller */
- IDE_HFLAG_NON_BOOTABLE = BIT(13),
- /* host doesn't support DMA */
- IDE_HFLAG_NO_DMA = BIT(14),
- /* check if host is PCI IDE device before allowing DMA */
- IDE_HFLAG_NO_AUTODMA = BIT(15),
- /* host uses MMIO */
- IDE_HFLAG_MMIO = BIT(16),
- /* no LBA48 */
- IDE_HFLAG_NO_LBA48 = BIT(17),
- /* no LBA48 DMA */
- IDE_HFLAG_NO_LBA48_DMA = BIT(18),
- /* data FIFO is cleared by an error */
- IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19),
- /* serialize ports */
- IDE_HFLAG_SERIALIZE = BIT(20),
- /* host is DTC2278 */
- IDE_HFLAG_DTC2278 = BIT(21),
- /* 4 devices on a single set of I/O ports */
- IDE_HFLAG_4DRIVES = BIT(22),
- /* host is TRM290 */
- IDE_HFLAG_TRM290 = BIT(23),
- /* use 32-bit I/O ops */
- IDE_HFLAG_IO_32BIT = BIT(24),
- /* unmask IRQs */
- IDE_HFLAG_UNMASK_IRQS = BIT(25),
- IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26),
- /* serialize ports if DMA is possible (for sl82c105) */
- IDE_HFLAG_SERIALIZE_DMA = BIT(27),
- /* force host out of "simplex" mode */
- IDE_HFLAG_CLEAR_SIMPLEX = BIT(28),
- /* DSC overlap is unsupported */
- IDE_HFLAG_NO_DSC = BIT(29),
- /* never use 32-bit I/O ops */
- IDE_HFLAG_NO_IO_32BIT = BIT(30),
- /* never unmask IRQs */
- IDE_HFLAG_NO_UNMASK_IRQS = BIT(31),
-};
-
-#ifdef CONFIG_BLK_DEV_OFFBOARD
-# define IDE_HFLAG_OFF_BOARD 0
-#else
-# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
-#endif
-
-struct ide_port_info {
- char *name;
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- void (*init_iops)(ide_hwif_t *);
- void (*init_hwif)(ide_hwif_t *);
- int (*init_dma)(ide_hwif_t *,
- const struct ide_port_info *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- struct ide_pci_enablebit enablebits[2];
-
- hwif_chipset_t chipset;
-
- u16 max_sectors; /* if < than the default one */
-
- u32 host_flags;
-
- int irq_flags;
-
- u8 pio_mask;
- u8 swdma_mask;
- u8 mwdma_mask;
- u8 udma_mask;
-};
-
-/*
- * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
- * requests.
- */
-struct ide_pm_state {
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
-
-int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
-int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
- const struct ide_port_info *, void *);
-void ide_pci_remove(struct pci_dev *);
-
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *, pm_message_t);
-int ide_pci_resume(struct pci_dev *);
-#else
-#define ide_pci_suspend NULL
-#define ide_pci_resume NULL
-#endif
-
-void ide_map_sg(ide_drive_t *, struct ide_cmd *);
-void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
-
-#define BAD_DMA_DRIVE 0
-#define GOOD_DMA_DRIVE 1
-
-struct drive_list_entry {
- const char *id_model;
- const char *id_firmware;
-};
-
-int ide_in_drive_list(u16 *, const struct drive_list_entry *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA
-int ide_dma_good_drive(ide_drive_t *);
-int __ide_dma_bad_drive(ide_drive_t *);
-
-u8 ide_find_dma_mode(ide_drive_t *, u8);
-
-static inline u8 ide_max_dma_mode(ide_drive_t *drive)
-{
- return ide_find_dma_mode(drive, XFER_UDMA_6);
-}
-
-void ide_dma_off_quietly(ide_drive_t *);
-void ide_dma_off(ide_drive_t *);
-void ide_dma_on(ide_drive_t *);
-int ide_set_dma(ide_drive_t *);
-void ide_check_dma_crc(ide_drive_t *);
-ide_startstop_t ide_dma_intr(ide_drive_t *);
-
-int ide_allocate_dma_engine(ide_hwif_t *);
-void ide_release_dma_engine(ide_hwif_t *);
-
-int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
-void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
-int config_drive_for_dma(ide_drive_t *);
-int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
-void ide_dma_host_set(ide_drive_t *, int);
-int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
-extern void ide_dma_start(ide_drive_t *);
-int ide_dma_end(ide_drive_t *);
-int ide_dma_test_irq(ide_drive_t *);
-int ide_dma_sff_timer_expiry(ide_drive_t *);
-u8 ide_dma_sff_read_status(ide_hwif_t *);
-extern const struct ide_dma_ops sff_dma_ops;
-#else
-static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
-#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
-
-void ide_dma_lost_irq(ide_drive_t *);
-ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
-
-#else
-static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
-static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
-static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
-static inline void ide_dma_off(ide_drive_t *drive) { ; }
-static inline void ide_dma_on(ide_drive_t *drive) { ; }
-static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
-static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
-static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
-static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
-static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
-static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
-static inline int ide_dma_prepare(ide_drive_t *drive,
- struct ide_cmd *cmd) { return 1; }
-static inline void ide_dma_unmap_sg(ide_drive_t *drive,
- struct ide_cmd *cmd) { ; }
-#endif /* CONFIG_BLK_DEV_IDEDMA */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_acpi_init(void);
-bool ide_port_acpi(ide_hwif_t *hwif);
-extern int ide_acpi_exec_tfs(ide_drive_t *drive);
-extern void ide_acpi_get_timing(ide_hwif_t *hwif);
-extern void ide_acpi_push_timing(ide_hwif_t *hwif);
-void ide_acpi_init_port(ide_hwif_t *);
-void ide_acpi_port_init_devices(ide_hwif_t *);
-extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
-#else
-static inline int ide_acpi_init(void) { return 0; }
-static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
-static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
-static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
-#endif
-
-void ide_register_region(struct gendisk *);
-void ide_unregister_region(struct gendisk *);
-
-void ide_check_nien_quirk_list(ide_drive_t *);
-void ide_undecoded_slave(ide_drive_t *);
-
-void ide_port_apply_params(ide_hwif_t *);
-int ide_sysfs_register_port(ide_hwif_t *);
-
-struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
- unsigned int);
-void ide_host_free(struct ide_host *);
-int ide_host_register(struct ide_host *, const struct ide_port_info *,
- struct ide_hw **);
-int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
- struct ide_host **);
-void ide_host_remove(struct ide_host *);
-int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
-void ide_port_unregister_devices(ide_hwif_t *);
-void ide_port_scan(ide_hwif_t *);
-
-static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
-{
- return hwif->hwif_data;
-}
-
-static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
-{
- hwif->hwif_data = data;
-}
-
-u64 ide_get_lba_addr(struct ide_cmd *, int);
-u8 ide_dump_status(ide_drive_t *, const char *, u8);
-
-struct ide_timing {
- u8 mode;
- u8 setup; /* t1 */
- u16 act8b; /* t2 for 8-bit io */
- u16 rec8b; /* t2i for 8-bit io */
- u16 cyc8b; /* t0 for 8-bit io */
- u16 active; /* t2 or tD */
- u16 recover; /* t2i or tK */
- u16 cycle; /* t0 */
- u16 udma; /* t2CYCTYP/2 */
-};
-
-enum {
- IDE_TIMING_SETUP = BIT(0),
- IDE_TIMING_ACT8B = BIT(1),
- IDE_TIMING_REC8B = BIT(2),
- IDE_TIMING_CYC8B = BIT(3),
- IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
- IDE_TIMING_CYC8B,
- IDE_TIMING_ACTIVE = BIT(4),
- IDE_TIMING_RECOVER = BIT(5),
- IDE_TIMING_CYCLE = BIT(6),
- IDE_TIMING_UDMA = BIT(7),
- IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
- IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
- IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
-};
-
-struct ide_timing *ide_timing_find_mode(u8);
-u16 ide_pio_cycle_time(ide_drive_t *, u8);
-void ide_timing_merge(struct ide_timing *, struct ide_timing *,
- struct ide_timing *, unsigned int);
-int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
-
-#ifdef CONFIG_IDE_XFER_MODE
-int ide_scan_pio_blacklist(char *);
-const char *ide_xfer_verbose(u8);
-int ide_pio_need_iordy(ide_drive_t *, const u8);
-int ide_set_pio_mode(ide_drive_t *, u8);
-int ide_set_dma_mode(ide_drive_t *, u8);
-void ide_set_pio(ide_drive_t *, u8);
-int ide_set_xfer_rate(ide_drive_t *, u8);
-#else
-static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
-static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
-#endif
-
-static inline void ide_set_max_pio(ide_drive_t *drive)
-{
- ide_set_pio(drive, 255);
-}
-
-char *ide_media_string(ide_drive_t *);
-
-extern const struct attribute_group *ide_dev_groups[];
-extern struct bus_type ide_bus_type;
-extern struct class *ide_port_class;
-
-static inline void ide_dump_identify(u8 *id)
-{
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
-}
-
-static inline int hwif_to_node(ide_hwif_t *hwif)
-{
- return hwif->dev ? dev_to_node(hwif->dev) : -1;
-}
-
-static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
-{
- ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
-
- return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
-}
-
-static inline void *ide_get_drivedata(ide_drive_t *drive)
-{
- return drive->drive_data;
-}
-
-static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
-{
- drive->drive_data = data;
-}
-
-#define ide_port_for_each_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
-
-#define ide_port_for_each_present_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
- if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
-
-#define ide_host_for_each_port(i, port, host) \
- for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
-
-
-#endif /* _IDE_H */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
index 91a8612b8bf9..a85d5dd40f72 100644
--- a/include/linux/idle_inject.h
+++ b/include/linux/idle_inject.h
@@ -13,6 +13,9 @@ struct idle_inject_device;
struct idle_inject_device *idle_inject_register(struct cpumask *cpumask);
+struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
+ bool (*update)(void));
+
void idle_inject_unregister(struct idle_inject_device *ii_dev);
int idle_inject_start(struct idle_inject_device *ii_dev);
@@ -28,6 +31,6 @@ void idle_inject_get_duration(struct idle_inject_device *ii_dev,
unsigned int *idle_duration_us);
void idle_inject_set_latency(struct idle_inject_device *ii_dev,
- unsigned int latency_ns);
+ unsigned int latency_us);
#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 3ade03e5c7af..da5f5fa4a3a6 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
*/
#define idr_for_each_entry_ul(idr, entry, tmp, id) \
for (tmp = 0, id = 0; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/**
@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
* @id: Entry ID.
*
* Continue to iterate over entries, continuing after the current position.
+ * After normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
for (tmp = id; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/*
@@ -263,7 +265,8 @@ void ida_destroy(struct ida *ida);
*
* Allocate an ID between 0 and %INT_MAX, inclusive.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -280,7 +283,8 @@ static inline int ida_alloc(struct ida *ida, gfp_t gfp)
*
* Allocate an ID between @min and %INT_MAX, inclusive.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -297,7 +301,8 @@ static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
*
* Allocate an ID between 0 and @max, inclusive.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -311,6 +316,10 @@ static inline void ida_init(struct ida *ida)
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
+/*
+ * ida_simple_get() and ida_simple_remove() are deprecated. Use
+ * ida_alloc() and ida_free() instead respectively.
+ */
#define ida_simple_get(ida, start, end, gfp) \
ida_alloc_range(ida, start, (end) - 1, gfp)
#define ida_simple_remove(ida, id) ida_free(ida, id)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c47f43e65a2f..3385a2cc5b09 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2020 Intel Corporation
+ * Copyright (c) 2018 - 2024 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -75,6 +76,7 @@
#define IEEE80211_STYPE_ACTION 0x00D0
/* control */
+#define IEEE80211_STYPE_TRIGGER 0x0020
#define IEEE80211_STYPE_CTL_EXT 0x0060
#define IEEE80211_STYPE_BACK_REQ 0x0080
#define IEEE80211_STYPE_BACK 0x0090
@@ -151,6 +153,9 @@
#define IEEE80211_ANO_NETTYPE_WILD 15
+/* bits unique to S1G beacon */
+#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
+
/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
#define IEEE80211_CTL_EXT_POLL 0x2000
#define IEEE80211_CTL_EXT_SPR 0x3000
@@ -167,11 +172,11 @@
#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
-/* PV1 Layout 11ah 9.8.3.1 */
+/* PV1 Layout IEEE 802.11-2020 9.8.3.1 */
#define IEEE80211_PV1_FCTL_VERS 0x0003
#define IEEE80211_PV1_FCTL_FTYPE 0x001c
#define IEEE80211_PV1_FCTL_STYPE 0x00e0
-#define IEEE80211_PV1_FCTL_TODS 0x0100
+#define IEEE80211_PV1_FCTL_FROMDS 0x0100
#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200
#define IEEE80211_PV1_FCTL_PM 0x0400
#define IEEE80211_PV1_FCTL_MOREDATA 0x0800
@@ -186,6 +191,11 @@ static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
}
+static inline bool ieee80211_sn_less_eq(u16 sn1, u16 sn2)
+{
+ return ((sn2 - sn1) & IEEE80211_SN_MASK) <= (IEEE80211_SN_MODULO >> 1);
+}
+
static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2)
{
return (sn1 + sn2) & IEEE80211_SN_MASK;
@@ -291,12 +301,32 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_HT_CTL_LEN 4
+/* trigger type within common_info of trigger frame */
+#define IEEE80211_TRIGGER_TYPE_MASK 0xf
+#define IEEE80211_TRIGGER_TYPE_BASIC 0x0
+#define IEEE80211_TRIGGER_TYPE_BFRP 0x1
+#define IEEE80211_TRIGGER_TYPE_MU_BAR 0x2
+#define IEEE80211_TRIGGER_TYPE_MU_RTS 0x3
+#define IEEE80211_TRIGGER_TYPE_BSRP 0x4
+#define IEEE80211_TRIGGER_TYPE_GCR_MU_BAR 0x5
+#define IEEE80211_TRIGGER_TYPE_BQRP 0x6
+#define IEEE80211_TRIGGER_TYPE_NFRP 0x7
+
+/* UL-bandwidth within common_info of trigger frame */
+#define IEEE80211_TRIGGER_ULBW_MASK 0xc0000
+#define IEEE80211_TRIGGER_ULBW_20MHZ 0x0
+#define IEEE80211_TRIGGER_ULBW_40MHZ 0x1
+#define IEEE80211_TRIGGER_ULBW_80MHZ 0x2
+#define IEEE80211_TRIGGER_ULBW_160_80P80MHZ 0x3
+
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
+ struct_group(addrs,
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ );
__le16 seq_ctrl;
u8 addr4[ETH_ALEN];
} __packed __aligned(2);
@@ -320,6 +350,26 @@ struct ieee80211_qos_hdr {
__le16 qos_ctrl;
} __packed __aligned(2);
+struct ieee80211_qos_hdr_4addr {
+ __le16 frame_control;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed __aligned(2);
+
+struct ieee80211_trigger {
+ __le16 frame_control;
+ __le16 duration;
+ u8 ra[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ __le64 common_info;
+ u8 variable[];
+} __packed __aligned(2);
+
/**
* ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
* @fc: frame control bytes in little-endian byteorder
@@ -554,6 +604,28 @@ static inline bool ieee80211_is_s1g_beacon(__le16 fc)
}
/**
+ * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT &&
+ * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_next_tbtt_present(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) &&
+ fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT);
+}
+
+/**
+ * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only
+ * true for S1G beacons when they're short.
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_s1g_short_beacon(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc);
+}
+
+/**
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
*/
@@ -704,26 +776,22 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
}
/**
- * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
- * @fc: frame control bytes in little-endian byteorder
+ * ieee80211_is_trigger - check if frame is trigger frame
+ * @fc: frame control field in little-endian byteorder
*/
-static inline bool ieee80211_is_any_nullfunc(__le16 fc)
+static inline bool ieee80211_is_trigger(__le16 fc)
{
- return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_TRIGGER);
}
/**
- * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
- * @fc: frame control field in little-endian byteorder
+ * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
+ * @fc: frame control bytes in little-endian byteorder
*/
-static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc)
+static inline bool ieee80211_is_any_nullfunc(__le16 fc)
{
- /* IEEE 802.11-2012, definition of "bufferable management frame";
- * note that this ignores the IBSS special case. */
- return ieee80211_is_mgmt(fc) &&
- (ieee80211_is_action(fc) ||
- ieee80211_is_disassoc(fc) ||
- ieee80211_is_deauth(fc));
+ return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
}
/**
@@ -745,6 +813,11 @@ static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
}
+static inline u16 ieee80211_get_sn(struct ieee80211_hdr *hdr)
+{
+ return le16_get_bits(hdr->seq_ctrl, IEEE80211_SCTL_SEQ);
+}
+
struct ieee80211s_hdr {
u8 flags;
u8 ttl;
@@ -780,9 +853,14 @@ enum ieee80211_preq_target_flags {
};
/**
- * struct ieee80211_quiet_ie
+ * struct ieee80211_quiet_ie - Quiet element
+ * @count: Quiet Count
+ * @period: Quiet Period
+ * @duration: Quiet Duration
+ * @offset: Quiet Offset
*
- * This structure refers to "Quiet information element"
+ * This structure represents the payload of the "Quiet element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.22.
*/
struct ieee80211_quiet_ie {
u8 count;
@@ -792,9 +870,15 @@ struct ieee80211_quiet_ie {
} __packed;
/**
- * struct ieee80211_msrment_ie
+ * struct ieee80211_msrment_ie - Measurement element
+ * @token: Measurement Token
+ * @mode: Measurement Report Mode
+ * @type: Measurement Type
+ * @request: Measurement Request or Measurement Report
*
- * This structure refers to "Measurement Request/Report information element"
+ * This structure represents the payload of both the "Measurement
+ * Request element" and the "Measurement Report element" as described
+ * in IEEE Std 802.11-2020 sections 9.4.2.20 and 9.4.2.21.
*/
struct ieee80211_msrment_ie {
u8 token;
@@ -804,9 +888,14 @@ struct ieee80211_msrment_ie {
} __packed;
/**
- * struct ieee80211_channel_sw_ie
+ * struct ieee80211_channel_sw_ie - Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
*
- * This structure refers to "Channel Switch Announcement information element"
+ * This structure represents the payload of the "Channel Switch
+ * Announcement element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.18.
*/
struct ieee80211_channel_sw_ie {
u8 mode;
@@ -815,9 +904,14 @@ struct ieee80211_channel_sw_ie {
} __packed;
/**
- * struct ieee80211_ext_chansw_ie
+ * struct ieee80211_ext_chansw_ie - Extended Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_operating_class: New Operating Class
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
*
- * This structure represents the "Extended Channel Switch Announcement element"
+ * This structure represents the "Extended Channel Switch Announcement
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.52.
*/
struct ieee80211_ext_chansw_ie {
u8 mode;
@@ -838,8 +932,14 @@ struct ieee80211_sec_chan_offs_ie {
/**
* struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
+ * @mesh_ttl: Time To Live
+ * @mesh_flags: Flags
+ * @mesh_reason: Reason Code
+ * @mesh_pre_value: Precedence Value
*
- * This structure represents the "Mesh Channel Switch Paramters element"
+ * This structure represents the payload of the "Mesh Channel Switch
+ * Parameters element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.102.
*/
struct ieee80211_mesh_chansw_params_ie {
u8 mesh_ttl;
@@ -850,6 +950,13 @@ struct ieee80211_mesh_chansw_params_ie {
/**
* struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
+ * @new_channel_width: New Channel Width
+ * @new_center_freq_seg0: New Channel Center Frequency Segment 0
+ * @new_center_freq_seg1: New Channel Center Frequency Segment 1
+ *
+ * This structure represents the payload of the "Wide Bandwidth
+ * Channel Switch element" as described in IEEE Std 802.11-2020
+ * section 9.4.2.160.
*/
struct ieee80211_wide_bw_chansw_ie {
u8 new_channel_width;
@@ -857,22 +964,42 @@ struct ieee80211_wide_bw_chansw_ie {
} __packed;
/**
- * struct ieee80211_tim
+ * struct ieee80211_tim_ie - Traffic Indication Map information element
+ * @dtim_count: DTIM Count
+ * @dtim_period: DTIM Period
+ * @bitmap_ctrl: Bitmap Control
+ * @required_octet: "Syntatic sugar" to force the struct size to the
+ * minimum valid size when carried in a non-S1G PPDU
+ * @virtual_map: Partial Virtual Bitmap
*
- * This structure refers to "Traffic Indication Map information element"
+ * This structure represents the payload of the "TIM element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.5. Note that this
+ * definition is only applicable when the element is carried in a
+ * non-S1G PPDU. When the TIM is carried in an S1G PPDU, the Bitmap
+ * Control and Partial Virtual Bitmap may not be present.
*/
struct ieee80211_tim_ie {
u8 dtim_count;
u8 dtim_period;
u8 bitmap_ctrl;
- /* variable size: 1 - 251 bytes */
- u8 virtual_map[1];
+ union {
+ u8 required_octet;
+ DECLARE_FLEX_ARRAY(u8, virtual_map);
+ };
} __packed;
/**
- * struct ieee80211_meshconf_ie
+ * struct ieee80211_meshconf_ie - Mesh Configuration element
+ * @meshconf_psel: Active Path Selection Protocol Identifier
+ * @meshconf_pmetric: Active Path Selection Metric Identifier
+ * @meshconf_congest: Congestion Control Mode Identifier
+ * @meshconf_synch: Synchronization Method Identifier
+ * @meshconf_auth: Authentication Protocol Identifier
+ * @meshconf_form: Mesh Formation Info
+ * @meshconf_cap: Mesh Capability (see &enum mesh_config_capab_flags)
*
- * This structure refers to "Mesh Configuration information element"
+ * This structure represents the payload of the "Mesh Configuration
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.97.
*/
struct ieee80211_meshconf_ie {
u8 meshconf_psel;
@@ -894,6 +1021,9 @@ struct ieee80211_meshconf_ie {
* is ongoing
* @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
* neighbors in deep sleep mode
+ *
+ * Enumerates the "Mesh Capability" as described in IEEE Std
+ * 802.11-2020 section 9.4.2.97.7.
*/
enum mesh_config_capab_flags {
IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01,
@@ -904,7 +1034,7 @@ enum mesh_config_capab_flags {
#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
-/**
+/*
* mesh channel switch parameters element's flag indicator
*
*/
@@ -913,9 +1043,17 @@ enum mesh_config_capab_flags {
#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
/**
- * struct ieee80211_rann_ie
+ * struct ieee80211_rann_ie - RANN (root announcement) element
+ * @rann_flags: Flags
+ * @rann_hopcount: Hop Count
+ * @rann_ttl: Element TTL
+ * @rann_addr: Root Mesh STA Address
+ * @rann_seq: HWMP Sequence Number
+ * @rann_interval: Interval
+ * @rann_metric: Metric
*
- * This structure refers to "Root Announcement information element"
+ * This structure represents the payload of the "RANN element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.111.
*/
struct ieee80211_rann_ie {
u8 rann_flags;
@@ -937,7 +1075,7 @@ enum ieee80211_ht_chanwidth_values {
};
/**
- * enum ieee80211_opmode_bits - VHT operating mode field bits
+ * enum ieee80211_vht_opmode_bits - VHT operating mode field bits
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
@@ -962,14 +1100,36 @@ enum ieee80211_vht_opmode_bits {
IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
};
+/**
+ * enum ieee80211_s1g_chanwidth
+ * These are defined in IEEE802.11-2016ah Table 10-20
+ * as BSS Channel Width
+ *
+ * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel
+ */
+enum ieee80211_s1g_chanwidth {
+ IEEE80211_S1G_CHANWIDTH_1MHZ = 0,
+ IEEE80211_S1G_CHANWIDTH_2MHZ = 1,
+ IEEE80211_S1G_CHANWIDTH_4MHZ = 3,
+ IEEE80211_S1G_CHANWIDTH_8MHZ = 7,
+ IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
+};
+
#define WLAN_SA_QUERY_TR_ID_LEN 2
#define WLAN_MEMBERSHIP_LEN 8
#define WLAN_USER_POSITION_LEN 16
/**
- * struct ieee80211_tpc_report_ie
+ * struct ieee80211_tpc_report_ie - TPC Report element
+ * @tx_power: Transmit Power
+ * @link_margin: Link Margin
*
- * This structure refers to "TPC Report element"
+ * This structure represents the payload of the "TPC Report element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.16.
*/
struct ieee80211_tpc_report_ie {
u8 tx_power;
@@ -979,15 +1139,22 @@ struct ieee80211_tpc_report_ie {
#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1)
#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1
#define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_MASK GENMASK(7, 5)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT 10
struct ieee80211_addba_ext_ie {
u8 data;
} __packed;
/**
- * struct ieee80211_s1g_bcn_compat_ie
+ * struct ieee80211_s1g_bcn_compat_ie - S1G Beacon Compatibility element
+ * @compat_info: Compatibility Information
+ * @beacon_int: Beacon Interval
+ * @tsf_completion: TSF Completion
*
- * S1G Beacon Compatibility element
+ * This structure represents the payload of the "S1G Beacon
+ * Compatibility element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.196.
*/
struct ieee80211_s1g_bcn_compat_ie {
__le16 compat_info;
@@ -996,9 +1163,15 @@ struct ieee80211_s1g_bcn_compat_ie {
} __packed;
/**
- * struct ieee80211_s1g_oper_ie
+ * struct ieee80211_s1g_oper_ie - S1G Operation element
+ * @ch_width: S1G Operation Information Channel Width
+ * @oper_class: S1G Operation Information Operating Class
+ * @primary_ch: S1G Operation Information Primary Channel Number
+ * @oper_ch: S1G Operation Information Channel Center Frequency
+ * @basic_mcs_nss: Basic S1G-MCS and NSS Set
*
- * S1G Operation element
+ * This structure represents the payload of the "S1G Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.212.
*/
struct ieee80211_s1g_oper_ie {
u8 ch_width;
@@ -1009,9 +1182,13 @@ struct ieee80211_s1g_oper_ie {
} __packed;
/**
- * struct ieee80211_aid_response_ie
+ * struct ieee80211_aid_response_ie - AID Response element
+ * @aid: AID/Group AID
+ * @switch_count: AID Switch Count
+ * @response_int: AID Response Interval
*
- * AID Response element
+ * This structure represents the payload of the "AID Response element"
+ * as described in IEEE Std 802.11-2020 section 9.4.2.194.
*/
struct ieee80211_aid_response_ie {
__le16 aid;
@@ -1034,9 +1211,82 @@ struct ieee80211_ext {
u8 change_seq;
u8 variable[0];
} __packed s1g_beacon;
+ struct {
+ u8 sa[ETH_ALEN];
+ __le32 timestamp;
+ u8 change_seq;
+ u8 next_tbtt[3];
+ u8 variable[0];
+ } __packed s1g_short_beacon;
} u;
} __packed __aligned(2);
+#define IEEE80211_TWT_CONTROL_NDP BIT(0)
+#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
+#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
+#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
+#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
+
+#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0)
+#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1)
+#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
+#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
+#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
+#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7)
+#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10)
+#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
+
+enum ieee80211_twt_setup_cmd {
+ TWT_SETUP_CMD_REQUEST,
+ TWT_SETUP_CMD_SUGGEST,
+ TWT_SETUP_CMD_DEMAND,
+ TWT_SETUP_CMD_GROUPING,
+ TWT_SETUP_CMD_ACCEPT,
+ TWT_SETUP_CMD_ALTERNATE,
+ TWT_SETUP_CMD_DICTATE,
+ TWT_SETUP_CMD_REJECT,
+};
+
+struct ieee80211_twt_params {
+ __le16 req_type;
+ __le64 twt;
+ u8 min_twt_dur;
+ __le16 mantissa;
+ u8 channel;
+} __packed;
+
+struct ieee80211_twt_setup {
+ u8 dialog_token;
+ u8 element_id;
+ u8 length;
+ u8 control;
+ u8 params[];
+} __packed;
+
+#define IEEE80211_TTLM_MAX_CNT 2
+#define IEEE80211_TTLM_CONTROL_DIRECTION 0x03
+#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP 0x04
+#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT 0x08
+#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT 0x10
+#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE 0x20
+
+#define IEEE80211_TTLM_DIRECTION_DOWN 0
+#define IEEE80211_TTLM_DIRECTION_UP 1
+#define IEEE80211_TTLM_DIRECTION_BOTH 2
+
+/**
+ * struct ieee80211_ttlm_elem - TID-To-Link Mapping element
+ *
+ * Defined in section 9.4.2.314 in P802.11be_D4
+ *
+ * @control: the first part of control field
+ * @optional: the second part of control field
+ */
+struct ieee80211_ttlm_elem {
+ u8 control;
+ u8 optional[];
+} __packed;
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -1050,7 +1300,7 @@ struct ieee80211_mgmt {
__le16 auth_transaction;
__le16 status_code;
/* possibly followed by Challenge text */
- u8 variable[0];
+ u8 variable[];
} __packed auth;
struct {
__le16 reason_code;
@@ -1059,21 +1309,26 @@ struct ieee80211_mgmt {
__le16 capab_info;
__le16 listen_interval;
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_req;
struct {
__le16 capab_info;
__le16 status_code;
__le16 aid;
/* followed by Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_resp, reassoc_resp;
struct {
__le16 capab_info;
+ __le16 status_code;
+ u8 variable[];
+ } __packed s1g_assoc_resp, s1g_reassoc_resp;
+ struct {
+ __le16 capab_info;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed reassoc_req;
struct {
__le16 reason_code;
@@ -1084,11 +1339,11 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
+ u8 variable[];
} __packed beacon;
struct {
/* only variable items: SSID, Supported rates */
- u8 variable[0];
+ DECLARE_FLEX_ARRAY(u8, variable);
} __packed probe_req;
struct {
__le64 timestamp;
@@ -1096,7 +1351,7 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params */
- u8 variable[0];
+ u8 variable[];
} __packed probe_resp;
struct {
u8 category;
@@ -1105,16 +1360,16 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
u8 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed wme_action;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch;
struct{
u8 action_code;
struct ieee80211_ext_chansw_ie data;
- u8 variable[0];
+ u8 variable[];
} __packed ext_chan_switch;
struct{
u8 action_code;
@@ -1130,7 +1385,7 @@ struct ieee80211_mgmt {
__le16 timeout;
__le16 start_seq_num;
/* followed by BA Extension */
- u8 variable[0];
+ u8 variable[];
} __packed addba_req;
struct{
u8 action_code;
@@ -1146,11 +1401,11 @@ struct ieee80211_mgmt {
} __packed delba;
struct {
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed self_prot;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed mesh_action;
struct {
u8 action;
@@ -1194,17 +1449,49 @@ struct ieee80211_mgmt {
u8 toa[6];
__le16 tod_error;
__le16 toa_error;
- u8 variable[0];
+ u8 variable[];
} __packed ftm;
+ struct {
+ u8 action_code;
+ u8 variable[];
+ } __packed s1g;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 follow_up;
+ u32 tod;
+ u32 toa;
+ u8 max_tod_error;
+ u8 max_toa_error;
+ } __packed wnm_timing_msr;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 variable[];
+ } __packed ttlm_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 status_code;
+ u8 variable[];
+ } __packed ttlm_res;
+ struct {
+ u8 action_code;
+ } __packed ttlm_tear_down;
} u;
} __packed action;
+ DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
} u;
} __packed __aligned(2);
/* Supported rates membership selectors */
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
+#define BSS_MEMBERSHIP_SELECTOR_GLK 125
+#define BSS_MEMBERSHIP_SELECTOR_EPS 124
+#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
+#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -1340,7 +1627,7 @@ struct ieee80211_tdls_data {
/*
* Peer-to-Peer IE attribute related definitions.
*/
-/**
+/*
* enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
*/
enum ieee80211_p2p_attr_id {
@@ -1390,11 +1677,17 @@ struct ieee80211_p2p_noa_attr {
#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F
/**
- * struct ieee80211_bar - HT Block Ack Request
+ * struct ieee80211_bar - Block Ack Request frame format
+ * @frame_control: Frame Control
+ * @duration: Duration
+ * @ra: RA
+ * @ta: TA
+ * @control: BAR Control
+ * @start_seq_num: Starting Sequence Number (see Figure 9-37)
*
- * This structure refers to "HT BlockAckReq" as
- * described in 802.11n draft section 7.2.1.7.1
- */
+ * This structure represents the "BlockAckReq frame format"
+ * as described in IEEE Std 802.11-2020 section 9.3.1.7.
+*/
struct ieee80211_bar {
__le16 frame_control;
__le16 duration;
@@ -1414,13 +1707,17 @@ struct ieee80211_bar {
#define IEEE80211_HT_MCS_MASK_LEN 10
/**
- * struct ieee80211_mcs_info - MCS information
+ * struct ieee80211_mcs_info - Supported MCS Set field
* @rx_mask: RX mask
* @rx_highest: highest supported RX rate. If set represents
* the highest supported RX data rate in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest RX data rate supported.
* @tx_params: TX parameters
+ * @reserved: Reserved bits
+ *
+ * This structure represents the "Supported MCS Set field" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.55.4.
*/
struct ieee80211_mcs_info {
u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
@@ -1439,6 +1736,8 @@ struct ieee80211_mcs_info {
#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
+#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3)))
+
/*
* 802.11n D5.0 20.3.5 / 20.6 says:
* - indices 0 to 7 and 32 are single spatial stream
@@ -1451,10 +1750,16 @@ struct ieee80211_mcs_info {
(IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
/**
- * struct ieee80211_ht_cap - HT capabilities
+ * struct ieee80211_ht_cap - HT capabilities element
+ * @cap_info: HT Capability Information
+ * @ampdu_params_info: A-MPDU Parameters
+ * @mcs: Supported MCS Set
+ * @extended_ht_cap_info: HT Extended Capabilities
+ * @tx_BF_cap_info: Transmit Beamforming Capabilities
+ * @antenna_selection_info: ASEL Capability
*
- * This structure is the "HT capabilities element" as
- * described in 802.11n D5.0 7.3.2.57
+ * This structure represents the payload of the "HT Capabilities
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.55.
*/
struct ieee80211_ht_cap {
__le16 cap_info;
@@ -1542,9 +1847,14 @@ enum ieee80211_min_mpdu_spacing {
/**
* struct ieee80211_ht_operation - HT operation IE
+ * @primary_chan: Primary Channel
+ * @ht_param: HT Operation Information parameters
+ * @operation_mode: HT Operation Information operation mode
+ * @stbc_param: HT Operation Information STBC params
+ * @basic_set: Basic HT-MCS Set
*
- * This structure is the "HT operation element" as
- * described in 802.11n-2009 7.3.2.57
+ * This structure represents the payload of the "HT Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.56.
*/
struct ieee80211_ht_operation {
u8 primary_chan;
@@ -1594,10 +1904,12 @@ struct ieee80211_ht_operation {
* A-MPDU buffer sizes
* According to HT size varies from 8 to 64 frames
* HE adds the ability to have up to 256 frames.
+ * EHT adds the ability to have up to 1K frames.
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
-#define IEEE80211_MAX_AMPDU_BUF 0x100
+#define IEEE80211_MAX_AMPDU_BUF_HE 0x100
+#define IEEE80211_MAX_AMPDU_BUF_EHT 0x400
/* Spatial Multiplexing Power Save Modes (for capability) */
@@ -1711,9 +2023,12 @@ struct ieee80211_vht_operation {
/**
* struct ieee80211_he_cap_elem - HE capabilities element
+ * @mac_cap_info: HE MAC Capabilities Information
+ * @phy_cap_info: HE PHY Capabilities Information
*
- * This structure is the "HE capabilities element" fixed fields as
- * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3
+ * This structure represents the fixed fields of the payload of the
+ * "HE capabilities element" as described in IEEE Std 802.11ax-2021
+ * sections 9.4.2.248.2 and 9.4.2.248.3.
*/
struct ieee80211_he_cap_elem {
u8 mac_cap_info[6];
@@ -1772,35 +2087,45 @@ struct ieee80211_he_mcs_nss_supp {
} __packed;
/**
- * struct ieee80211_he_operation - HE capabilities element
+ * struct ieee80211_he_operation - HE Operation element
+ * @he_oper_params: HE Operation Parameters + BSS Color Information
+ * @he_mcs_nss_set: Basic HE-MCS And NSS Set
+ * @optional: Optional fields VHT Operation Information, Max Co-Hosted
+ * BSSID Indicator, and 6 GHz Operation Information
*
- * This structure is the "HE operation element" fields as
- * described in P802.11ax_D4.0 section 9.4.2.243
+ * This structure represents the payload of the "HE Operation
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.249.
*/
struct ieee80211_he_operation {
__le32 he_oper_params;
__le16 he_mcs_nss_set;
- /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */
u8 optional[];
} __packed;
/**
- * struct ieee80211_he_spr - HE spatial reuse element
+ * struct ieee80211_he_spr - Spatial Reuse Parameter Set element
+ * @he_sr_control: SR Control
+ * @optional: Optional fields Non-SRG OBSS PD Max Offset, SRG OBSS PD
+ * Min Offset, SRG OBSS PD Max Offset, SRG BSS Color
+ * Bitmap, and SRG Partial BSSID Bitmap
*
- * This structure is the "HE spatial reuse element" element as
- * described in P802.11ax_D4.0 section 9.4.2.241
+ * This structure represents the payload of the "Spatial Reuse
+ * Parameter Set element" as described in IEEE Std 802.11ax-2021
+ * section 9.4.2.252.
*/
struct ieee80211_he_spr {
u8 he_sr_control;
- /* Optional 0 to 19 bytes: depends on @he_sr_control */
u8 optional[];
} __packed;
/**
* struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ * @aifsn: ACI/AIFSN
+ * @ecw_min_max: ECWmin/ECWmax
+ * @mu_edca_timer: MU EDCA Timer
*
- * This structure is the "MU AC Parameter Record" fields as
- * described in P802.11ax_D4.0 section 9.4.2.245
+ * This structure represents the "MU AC Parameter Record" as described
+ * in IEEE Std 802.11ax-2021 section 9.4.2.251, Figure 9-788p.
*/
struct ieee80211_he_mu_edca_param_ac_rec {
u8 aifsn;
@@ -1810,9 +2135,14 @@ struct ieee80211_he_mu_edca_param_ac_rec {
/**
* struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ * @mu_qos_info: QoS Info
+ * @ac_be: MU AC_BE Parameter Record
+ * @ac_bk: MU AC_BK Parameter Record
+ * @ac_vi: MU AC_VI Parameter Record
+ * @ac_vo: MU AC_VO Parameter Record
*
- * This structure is the "MU EDCA Parameter Set element" fields as
- * described in P802.11ax_D4.0 section 9.4.2.245
+ * This structure represents the payload of the "MU EDCA Parameter Set
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.251.
*/
struct ieee80211_mu_edca_param_set {
u8 mu_qos_info;
@@ -1822,6 +2152,143 @@ struct ieee80211_mu_edca_param_set {
struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
} __packed;
+#define IEEE80211_EHT_MCS_NSS_RX 0x0f
+#define IEEE80211_EHT_MCS_NSS_TX 0xf0
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max
+ * supported NSS for per MCS.
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 7.
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 8 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
+ */
+struct ieee80211_eht_mcs_nss_supp_20mhz_only {
+ union {
+ struct {
+ u8 rx_tx_mcs7_max_nss;
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[4];
+ };
+};
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except
+ * 20MHz only stations).
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
+ */
+struct ieee80211_eht_mcs_nss_supp_bw {
+ union {
+ struct {
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[3];
+ };
+};
+
+/**
+ * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data
+ *
+ * This structure is the "EHT Capabilities element" fixed fields as
+ * described in P802.11be_D2.0 section 9.4.2.313.
+ *
+ * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP*
+ * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP*
+ */
+struct ieee80211_eht_cap_elem_fixed {
+ u8 mac_cap_info[2];
+ u8 phy_cap_info[9];
+} __packed;
+
+/**
+ * struct ieee80211_eht_cap_elem - EHT capabilities element
+ * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed
+ * @optional: optional parts
+ */
+struct ieee80211_eht_cap_elem {
+ struct ieee80211_eht_cap_elem_fixed fixed;
+
+ /*
+ * Followed by:
+ * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets.
+ * EHT PPE Thresholds field: variable length.
+ */
+ u8 optional[];
+} __packed;
+
+#define IEEE80211_EHT_OPER_INFO_PRESENT 0x01
+#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x02
+#define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30
+
+/**
+ * struct ieee80211_eht_operation - eht operation element
+ *
+ * This structure is the "EHT Operation Element" fields as
+ * described in P802.11be_D2.0 section 9.4.2.311
+ *
+ * @params: EHT operation element parameters. See &IEEE80211_EHT_OPER_*
+ * @basic_mcs_nss: indicates the EHT-MCSs for each number of spatial streams in
+ * EHT PPDUs that are supported by all EHT STAs in the BSS in transmit and
+ * receive.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation {
+ u8 params;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only basic_mcs_nss;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_eht_operation_info - eht operation information
+ *
+ * @control: EHT operation information control.
+ * @ccfs0: defines a channel center frequency for a 20, 40, 80, 160, or 320 MHz
+ * EHT BSS.
+ * @ccfs1: defines a channel center frequency for a 160 or 320 MHz EHT BSS.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation_info {
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 optional[];
+} __packed;
+
/* 802.11ac VHT Capabilities */
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
@@ -1885,6 +2352,44 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
int mcs, bool ext_nss_bw_capable,
unsigned int max_vht_nss);
+/**
+ * enum ieee80211_ap_reg_power - regulatory power for a Access Point
+ *
+ * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
+ * @IEEE80211_REG_LPI_AP: Indoor Access Point
+ * @IEEE80211_REG_SP_AP: Standard power Access Point
+ * @IEEE80211_REG_VLP_AP: Very low power Access Point
+ * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal
+ * @IEEE80211_REG_AP_POWER_MAX: maximum value
+ */
+enum ieee80211_ap_reg_power {
+ IEEE80211_REG_UNSET_AP,
+ IEEE80211_REG_LPI_AP,
+ IEEE80211_REG_SP_AP,
+ IEEE80211_REG_VLP_AP,
+ IEEE80211_REG_AP_POWER_AFTER_LAST,
+ IEEE80211_REG_AP_POWER_MAX =
+ IEEE80211_REG_AP_POWER_AFTER_LAST - 1,
+};
+
+/**
+ * enum ieee80211_client_reg_power - regulatory power for a client
+ *
+ * @IEEE80211_REG_UNSET_CLIENT: Client has no regulatory power mode
+ * @IEEE80211_REG_DEFAULT_CLIENT: Default Client
+ * @IEEE80211_REG_SUBORDINATE_CLIENT: Subordinate Client
+ * @IEEE80211_REG_CLIENT_POWER_AFTER_LAST: internal
+ * @IEEE80211_REG_CLIENT_POWER_MAX: maximum value
+ */
+enum ieee80211_client_reg_power {
+ IEEE80211_REG_UNSET_CLIENT,
+ IEEE80211_REG_DEFAULT_CLIENT,
+ IEEE80211_REG_SUBORDINATE_CLIENT,
+ IEEE80211_REG_CLIENT_POWER_AFTER_LAST,
+ IEEE80211_REG_CLIENT_POWER_MAX =
+ IEEE80211_REG_CLIENT_POWER_AFTER_LAST - 1,
+};
+
/* 802.11ax HE MAC capabilities */
#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
@@ -1949,24 +2454,22 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
* A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
* same field in the HE capabilities.
*/
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18
#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_SHIFT 3
-
#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
#define IEEE80211_HE_MAC_CAP4_QTP 0x02
#define IEEE80211_HE_MAC_CAP4_BQR 0x04
-#define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08
#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
#define IEEE80211_HE_MAC_CAP4_OPS 0x20
-#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
+#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40
/* Multi TID agg TX is split between byte #4 and #5
* The value is a combination of B39,B40,B41
*/
@@ -1974,7 +2477,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
-#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04
+#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04
#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20
@@ -1983,12 +2486,15 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
+#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
/* 802.11ax HE PHY capabilities */
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL 0x1e
+
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
@@ -2032,7 +2538,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
-#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40
+#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40
#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
@@ -2079,15 +2585,15 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
-#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04
-#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08
#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
-#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01
-#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02
+#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02
#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
@@ -2118,11 +2624,14 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US 0x00
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US 0x40
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US 0x80
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US 0x0
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US 0x1
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US 0x2
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED 0x3
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS 6
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01
/* 802.11ax HE TX/RX MCS NSS Support */
#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
@@ -2166,6 +2675,7 @@ ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
+#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7)
/*
* Calculate 802.11ax HE capabilities IE PPE field size
@@ -2195,6 +2705,29 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
return n;
}
+static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data;
+ u8 needed = sizeof(*he_cap_ie_elem);
+
+ if (len < needed)
+ return false;
+
+ needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+ if (len < needed)
+ return false;
+
+ if (he_cap_ie_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ if (len < needed + 1)
+ return false;
+ needed += ieee80211_he_ppe_size(data[needed],
+ he_cap_ie_elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
/* HE Operation defines */
#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
@@ -2209,8 +2742,12 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
+
/**
- * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
+ * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
* @primary: primary channel
* @control: control flags
* @ccfs0: channel center frequency segment 0
@@ -2225,6 +2762,7 @@ struct ieee80211_he_6ghz_oper {
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
+#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38
u8 control;
u8 ccfs0;
u8 ccfs1;
@@ -2232,6 +2770,48 @@ struct ieee80211_he_6ghz_oper {
} __packed;
/*
+ * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021",
+ * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation
+ * subfield encoding", and two category for each type in "Table E-12-Regulatory
+ * Info subfield encoding in the United States".
+ * So it it totally max 8 Transmit Power Envelope element.
+ */
+#define IEEE80211_TPE_MAX_IE_COUNT 8
+/*
+ * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield"
+ * of "IEEE Std 802.11ax™‐2021", the max power level is 8.
+ */
+#define IEEE80211_MAX_NUM_PWR_LEVEL 8
+
+#define IEEE80211_TPE_MAX_POWER_COUNT 8
+
+/* transmit power interpretation type of transmit power envelope element */
+enum ieee80211_tx_power_intrpt_type {
+ IEEE80211_TPE_LOCAL_EIRP,
+ IEEE80211_TPE_LOCAL_EIRP_PSD,
+ IEEE80211_TPE_REG_CLIENT_EIRP,
+ IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+};
+
+/**
+ * struct ieee80211_tx_pwr_env - Transmit Power Envelope
+ * @tx_power_info: Transmit Power Information field
+ * @tx_power: Maximum Transmit Power field
+ *
+ * This structure represents the payload of the "Transmit Power
+ * Envelope element" as described in IEEE Std 802.11ax-2021 section
+ * 9.4.2.161
+ */
+struct ieee80211_tx_pwr_env {
+ u8 tx_power_info;
+ s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT];
+} __packed;
+
+#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
+#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
+#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
+
+/*
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
* @he_oper_ie: byte data of the He Operations IE, stating from the byte
* after the ext ID byte. It is assumed that he_oper_ie has at least
@@ -2242,7 +2822,7 @@ struct ieee80211_he_6ghz_oper {
static inline u8
ieee80211_he_oper_size(const u8 *he_oper_ie)
{
- struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
+ const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie;
u8 oper_len = sizeof(struct ieee80211_he_operation);
u32 he_oper_params;
@@ -2275,12 +2855,14 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
static inline const struct ieee80211_he_6ghz_oper *
ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
{
- const u8 *ret = (void *)&he_oper->optional;
+ const u8 *ret;
u32 he_oper_params;
if (!he_oper)
return NULL;
+ ret = (const void *)&he_oper->optional;
+
he_oper_params = le32_to_cpu(he_oper->he_oper_params);
if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
@@ -2290,12 +2872,15 @@ ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
ret++;
- return (void *)ret;
+ return (const void *)ret;
}
/* HE Spatial Reuse defines */
-#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT 0x4
-#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT 0x8
+#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0)
+#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1)
+#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2)
+#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3)
+#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4)
/*
* ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
@@ -2308,7 +2893,7 @@ ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
static inline u8
ieee80211_he_spr_size(const u8 *he_spr_ie)
{
- struct ieee80211_he_spr *he_spr = (void *)he_spr_ie;
+ const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie;
u8 spr_len = sizeof(struct ieee80211_he_spr);
u8 he_spr_params;
@@ -2330,84 +2915,336 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
}
/* S1G Capabilities Information field */
-#define S1G_CAPAB_B0_S1G_LONG BIT(0)
-#define S1G_CAPAB_B0_SGI_1MHZ BIT(1)
-#define S1G_CAPAB_B0_SGI_2MHZ BIT(2)
-#define S1G_CAPAB_B0_SGI_4MHZ BIT(3)
-#define S1G_CAPAB_B0_SGI_8MHZ BIT(4)
-#define S1G_CAPAB_B0_SGI_16MHZ BIT(5)
-#define S1G_CAPAB_B0_SUPP_CH_WIDTH_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B0_SUPP_CH_WIDTH_SHIFT 6
-
-#define S1G_CAPAB_B1_RX_LDPC BIT(0)
-#define S1G_CAPAB_B1_TX_STBC BIT(1)
-#define S1G_CAPAB_B1_RX_STBC BIT(2)
-#define S1G_CAPAB_B1_SU_BFER BIT(3)
-#define S1G_CAPAB_B1_SU_BFEE BIT(4)
-#define S1G_CAPAB_B1_BFEE_STS_MASK (BIT(5) | BIT(6) | BIT(7))
-#define S1G_CAPAB_B1_BFEE_STS_SHIFT 5
-
-#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_MASK (BIT(0) | BIT(1) | BIT(2))
-#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_SHIFT 0
-#define S1G_CAPAB_B2_MU_BFER BIT(3)
-#define S1G_CAPAB_B2_MU_BFEE BIT(4)
-#define S1G_CAPAB_B2_PLUS_HTC_VHT BIT(5)
-#define S1G_CAPAB_B2_TRAVELING_PILOT_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B2_TRAVELING_PILOT_SHIFT 6
-
-#define S1G_CAPAB_B3_RD_RESPONDER BIT(0)
-#define S1G_CAPAB_B3_HT_DELAYED_BA BIT(1)
-#define S1G_CAPAB_B3_MAX_MPDU_LEN BIT(2)
-#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_MASK (BIT(3) | BIT(4))
-#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_SHIFT 3
-#define S1G_CAPAB_B3_MIN_MPDU_START_MASK (BIT(5) | BIT(6) | BIT(7))
-#define S1G_CAPAB_B3_MIN_MPDU_START_SHIFT 5
-
-#define S1G_CAPAB_B4_UPLINK_SYNC BIT(0)
-#define S1G_CAPAB_B4_DYNAMIC_AID BIT(1)
-#define S1G_CAPAB_B4_BAT BIT(2)
-#define S1G_CAPAB_B4_TIME_ADE BIT(3)
-#define S1G_CAPAB_B4_NON_TIM BIT(4)
-#define S1G_CAPAB_B4_GROUP_AID BIT(5)
-#define S1G_CAPAB_B4_STA_TYPE_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B4_STA_TYPE_SHIFT 6
-
-#define S1G_CAPAB_B5_CENT_AUTH_CONTROL BIT(0)
-#define S1G_CAPAB_B5_DIST_AUTH_CONTROL BIT(1)
-#define S1G_CAPAB_B5_AMSDU BIT(2)
-#define S1G_CAPAB_B5_AMPDU BIT(3)
-#define S1G_CAPAB_B5_ASYMMETRIC_BA BIT(4)
-#define S1G_CAPAB_B5_FLOW_CONTROL BIT(5)
-#define S1G_CAPAB_B5_SECTORIZED_BEAM_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B5_SECTORIZED_BEAM_SHIFT 6
-
-#define S1G_CAPAB_B6_OBSS_MITIGATION BIT(0)
-#define S1G_CAPAB_B6_FRAGMENT_BA BIT(1)
-#define S1G_CAPAB_B6_NDP_PS_POLL BIT(2)
-#define S1G_CAPAB_B6_RAW_OPERATION BIT(3)
-#define S1G_CAPAB_B6_PAGE_SLICING BIT(4)
-#define S1G_CAPAB_B6_TXOP_SHARING_IMP_ACK BIT(5)
-#define S1G_CAPAB_B6_VHT_LINK_ADAPT_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B6_VHT_LINK_ADAPT_SHIFT 6
-
-#define S1G_CAPAB_B7_TACK_AS_PS_POLL BIT(0)
-#define S1G_CAPAB_B7_DUP_1MHZ BIT(1)
-#define S1G_CAPAB_B7_MCS_NEGOTIATION BIT(2)
-#define S1G_CAPAB_B7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
-#define S1G_CAPAB_B7_NDP_BFING_REPORT_POLL BIT(4)
-#define S1G_CAPAB_B7_UNSOLICITED_DYN_AID BIT(5)
-#define S1G_CAPAB_B7_SECTOR_TRAINING_OPERATION BIT(6)
-#define S1G_CAPAB_B7_TEMP_PS_MODE_SWITCH BIT(7)
-
-#define S1G_CAPAB_B8_TWT_GROUPING BIT(0)
-#define S1G_CAPAB_B8_BDT BIT(1)
-#define S1G_CAPAB_B8_COLOR_MASK (BIT(2) | BIT(3) | BIT(4))
-#define S1G_CAPAB_B8_COLOR_SHIFT 2
-#define S1G_CAPAB_B8_TWT_REQUEST BIT(5)
-#define S1G_CAPAB_B8_TWT_RESPOND BIT(6)
-#define S1G_CAPAB_B8_PV1_FRAME BIT(7)
-
-#define S1G_CAPAB_B9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+#define IEEE80211_S1G_CAPABILITY_LEN 15
+
+#define S1G_CAP0_S1G_LONG BIT(0)
+#define S1G_CAP0_SGI_1MHZ BIT(1)
+#define S1G_CAP0_SGI_2MHZ BIT(2)
+#define S1G_CAP0_SGI_4MHZ BIT(3)
+#define S1G_CAP0_SGI_8MHZ BIT(4)
+#define S1G_CAP0_SGI_16MHZ BIT(5)
+#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
+
+#define S1G_SUPP_CH_WIDTH_2 0
+#define S1G_SUPP_CH_WIDTH_4 1
+#define S1G_SUPP_CH_WIDTH_8 2
+#define S1G_SUPP_CH_WIDTH_16 3
+#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
+ cap[0])) << 1)
+
+#define S1G_CAP1_RX_LDPC BIT(0)
+#define S1G_CAP1_TX_STBC BIT(1)
+#define S1G_CAP1_RX_STBC BIT(2)
+#define S1G_CAP1_SU_BFER BIT(3)
+#define S1G_CAP1_SU_BFEE BIT(4)
+#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
+
+#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
+#define S1G_CAP2_MU_BFER BIT(3)
+#define S1G_CAP2_MU_BFEE BIT(4)
+#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
+#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
+
+#define S1G_CAP3_RD_RESPONDER BIT(0)
+#define S1G_CAP3_HT_DELAYED_BA BIT(1)
+#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
+#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
+#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
+
+#define S1G_CAP4_UPLINK_SYNC BIT(0)
+#define S1G_CAP4_DYNAMIC_AID BIT(1)
+#define S1G_CAP4_BAT BIT(2)
+#define S1G_CAP4_TIME_ADE BIT(3)
+#define S1G_CAP4_NON_TIM BIT(4)
+#define S1G_CAP4_GROUP_AID BIT(5)
+#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
+
+#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
+#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
+#define S1G_CAP5_AMSDU BIT(2)
+#define S1G_CAP5_AMPDU BIT(3)
+#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
+#define S1G_CAP5_FLOW_CONTROL BIT(5)
+#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
+
+#define S1G_CAP6_OBSS_MITIGATION BIT(0)
+#define S1G_CAP6_FRAGMENT_BA BIT(1)
+#define S1G_CAP6_NDP_PS_POLL BIT(2)
+#define S1G_CAP6_RAW_OPERATION BIT(3)
+#define S1G_CAP6_PAGE_SLICING BIT(4)
+#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
+#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
+
+#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
+#define S1G_CAP7_DUP_1MHZ BIT(1)
+#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
+#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
+#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
+#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
+#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
+#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
+
+#define S1G_CAP8_TWT_GROUPING BIT(0)
+#define S1G_CAP8_BDT BIT(1)
+#define S1G_CAP8_COLOR GENMASK(4, 2)
+#define S1G_CAP8_TWT_REQUEST BIT(5)
+#define S1G_CAP8_TWT_RESPOND BIT(6)
+#define S1G_CAP8_PV1_FRAME BIT(7)
+
+#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+
+#define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0)
+#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
+
+/* EHT MAC capabilities as defined in P802.11be_D2.0 section 9.4.2.313.2 */
+#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01
+#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08
+#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10
+#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK 0xc0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895 0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991 1
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 2
+
+#define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01
+
+/* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */
+#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02
+#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08
+#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x10
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x20
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x40
+
+/* EHT beamformee number of spatial streams <= 80MHz is split */
+#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x80
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03
+
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x1c
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0xe0
+
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x07
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x38
+
+/* EHT number of sounding dimensions for 320MHz is split */
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x01
+#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x02
+#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x04
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x08
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x10
+#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x20
+#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x40
+#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x80
+
+#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x01
+#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x08
+#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0xf0
+
+#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x01
+#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x08
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x30
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 1
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 2
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 3
+
+/* Maximum number of supported EHT LTF is split */
+#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x40
+#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
+
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ 0x30
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
+#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
+
+#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW 0x01
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x02
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x10
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x20
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT 0x80
+
+#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01
+#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02
+
+/*
+ * EHT operation channel width as defined in P802.11be_D2.0 section 9.4.2.311
+ */
+#define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ 1
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ 2
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ 3
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ 4
+
+/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */
+static inline u8
+ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_eht_cap_elem_fixed *eht_cap,
+ bool from_ap)
+{
+ u8 count = 0;
+
+ /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ return 3;
+
+ /* on 2.4 GHz, these three bits are reserved, so should be 0 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+ count += 3;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 3;
+
+ if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ count += 3;
+
+ if (count)
+ return count;
+
+ return from_ap ? 3 : 4;
+}
+
+/* 802.11be EHT PPE Thresholds */
+#define IEEE80211_EHT_PPE_THRES_NSS_POS 0
+#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0xf
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x1f0
+#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 3
+#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 9
+
+/*
+ * Calculate 802.11be EHT capabilities IE EHT field size
+ */
+static inline u8
+ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u32 n;
+
+ if (!(phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT))
+ return 0;
+
+ n = hweight16(ppe_thres_hdr &
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK);
+
+ /*
+ * Each pair is 6 bits, and we need to add the 9 "header" bits to the
+ * total size.
+ */
+ n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 +
+ IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline bool
+ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len,
+ bool from_ap)
+{
+ const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data;
+ u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed);
+
+ if (len < needed || !he_capa)
+ return false;
+
+ needed += ieee80211_eht_mcs_nss_size((const void *)he_capa,
+ (const void *)data,
+ from_ap);
+ if (len < needed)
+ return false;
+
+ if (elem->phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u16 ppe_thres_hdr;
+
+ if (len < needed + sizeof(ppe_thres_hdr))
+ return false;
+
+ ppe_thres_hdr = get_unaligned_le16(data + needed);
+ needed += ieee80211_eht_ppe_size(ppe_thres_hdr,
+ elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+static inline bool
+ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_eht_operation *elem = (const void *)data;
+ u8 needed = sizeof(*elem);
+
+ if (len < needed)
+ return false;
+
+ if (elem->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
+ needed += 3;
+
+ if (elem->params &
+ IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)
+ needed += 2;
+ }
+
+ return len >= needed;
+}
+
+/* must validate ieee80211_eht_oper_size_ok() first */
+static inline u16
+ieee80211_eht_oper_dis_subchan_bitmap(const struct ieee80211_eht_operation *eht_oper)
+{
+ const struct ieee80211_eht_operation_info *info =
+ (const void *)eht_oper->optional;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT))
+ return 0;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
+ return 0;
+
+ return get_unaligned_le16(info->optional);
+}
+
+#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1)
+
+struct ieee80211_bandwidth_indication {
+ u8 params;
+ struct ieee80211_eht_operation_info info;
+} __packed;
+
+static inline bool
+ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_bandwidth_indication *bwi = (const void *)data;
+
+ if (len < sizeof(*bwi))
+ return false;
+
+ if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT &&
+ len < sizeof(*bwi) + 2)
+ return false;
+
+ return true;
+}
+
+#define LISTEN_INT_USF GENMASK(15, 14)
+#define LISTEN_INT_UI GENMASK(13, 0)
+
+#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF)
+#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI)
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
@@ -2563,6 +3400,8 @@ enum ieee80211_statuscode {
WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126,
WLAN_STATUS_SAE_PK = 127,
+ WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING = 133,
+ WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED = 134,
};
@@ -2800,7 +3639,7 @@ enum ieee80211_eid {
WLAN_EID_VHT_OPERATION = 192,
WLAN_EID_EXTENDED_BSS_LOAD = 193,
WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
- WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_TX_POWER_ENVELOPE = 195,
WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
WLAN_EID_AID = 197,
WLAN_EID_QUIET_CHANNEL = 198,
@@ -2808,8 +3647,11 @@ enum ieee80211_eid {
WLAN_EID_REDUCED_NEIGHBOR_REPORT = 201,
+ WLAN_EID_AID_REQUEST = 210,
+ WLAN_EID_AID_RESPONSE = 211,
WLAN_EID_S1G_BCN_COMPAT = 213,
WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214,
+ WLAN_EID_S1G_TWT = 216,
WLAN_EID_S1G_CAPABILITIES = 217,
WLAN_EID_VENDOR_SPECIFIC = 221,
WLAN_EID_QOS_PARAMETER = 222,
@@ -2854,6 +3696,11 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_SHORT_SSID_LIST = 58,
WLAN_EID_EXT_HE_6GHZ_CAPA = 59,
WLAN_EID_EXT_UL_MU_POWER_CAPA = 60,
+ WLAN_EID_EXT_EHT_OPERATION = 106,
+ WLAN_EID_EXT_EHT_MULTI_LINK = 107,
+ WLAN_EID_EXT_EHT_CAPABILITY = 108,
+ WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109,
+ WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
};
/* Action category code */
@@ -2864,6 +3711,7 @@ enum ieee80211_category {
WLAN_CATEGORY_BACK = 3,
WLAN_CATEGORY_PUBLIC = 4,
WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ WLAN_CATEGORY_FAST_BBS_TRANSITION = 6,
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -2878,6 +3726,8 @@ enum ieee80211_category {
WLAN_CATEGORY_FST = 18,
WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VHT = 21,
+ WLAN_CATEGORY_S1G = 22,
+ WLAN_CATEGORY_PROTECTED_EHT = 37,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -2935,6 +3785,19 @@ enum ieee80211_mesh_actioncode {
WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
};
+/* Unprotected WNM action codes */
+enum ieee80211_unprotected_wnm_actioncode {
+ WLAN_UNPROTECTED_WNM_ACTION_TIM = 0,
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1,
+};
+
+/* Protected EHT action codes */
+enum ieee80211_protected_eht_actioncode {
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2,
+};
+
/* Security key length */
enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
@@ -2951,6 +3814,20 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
+enum ieee80211_s1g_actioncode {
+ WLAN_S1G_AID_SWITCH_REQUEST,
+ WLAN_S1G_AID_SWITCH_RESPONSE,
+ WLAN_S1G_SYNC_CONTROL,
+ WLAN_S1G_STA_INFO_ANNOUNCE,
+ WLAN_S1G_EDCA_PARAM_SET,
+ WLAN_S1G_EL_OPERATION,
+ WLAN_S1G_TWT_SETUP,
+ WLAN_S1G_TWT_TEARDOWN,
+ WLAN_S1G_SECT_GROUP_ID_LIST,
+ WLAN_S1G_SECT_ID_FEEDBACK,
+ WLAN_S1G_TWT_INFORMATION = 11,
+};
+
#define IEEE80211_WEP_IV_LEN 4
#define IEEE80211_WEP_ICV_LEN 4
#define IEEE80211_CCMP_HDR_LEN 8
@@ -3012,7 +3889,7 @@ enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_NETWORK_CHANNEL_CONTROL = 30,
WLAN_PUB_ACTION_WHITE_SPACE_MAP_ANN = 31,
WLAN_PUB_ACTION_FTM_REQUEST = 32,
- WLAN_PUB_ACTION_FTM = 33,
+ WLAN_PUB_ACTION_FTM_RESPONSE = 33,
WLAN_PUB_ACTION_FILS_DISCOVERY = 34,
};
@@ -3041,6 +3918,11 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6)
+/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte
+ * of the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(7)
+
/* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */
#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
@@ -3347,6 +4229,8 @@ struct ieee80211_multiple_bssid_configuration {
#define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19)
#define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20)
+#define WLAN_AKM_SUITE_WFA_DPP SUITE(WLAN_OUI_WFA, 2)
+
#define WLAN_MAX_KEY_LEN 32
#define WLAN_PMK_NAME_LEN 16
@@ -3357,6 +4241,7 @@ struct ieee80211_multiple_bssid_configuration {
#define WLAN_OUI_WFA 0x506f9a
#define WLAN_OUI_TYPE_WFA_P2P 9
+#define WLAN_OUI_TYPE_WFA_DPP 0x1A
#define WLAN_OUI_MICROSOFT 0x0050f2
#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
@@ -3423,16 +4308,21 @@ struct ieee80211_he_6ghz_capa {
* @hdr: the frame
*
* The qos ctrl bytes come after the frame_control, duration, seq_num
- * and 3 or 4 addresses of length ETH_ALEN.
- * 3 addr: 2 + 2 + 2 + 3*6 = 24
- * 4 addr: 2 + 2 + 2 + 4*6 = 30
+ * and 3 or 4 addresses of length ETH_ALEN. Checks frame_control to choose
+ * between struct ieee80211_qos_hdr_4addr and struct ieee80211_qos_hdr.
*/
static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
{
- if (ieee80211_has_a4(hdr->frame_control))
- return (u8 *)hdr + 30;
+ union {
+ struct ieee80211_qos_hdr addr3;
+ struct ieee80211_qos_hdr_4addr addr4;
+ } *qos;
+
+ qos = (void *)hdr;
+ if (ieee80211_has_a4(qos->addr3.frame_control))
+ return (u8 *)&qos->addr4.qos_ctrl;
else
- return (u8 *)hdr + 24;
+ return (u8 *)&qos->addr3.qos_ctrl;
}
/**
@@ -3484,6 +4374,44 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
}
/**
+ * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * @skb: the skb to check, starting with the 802.11 header
+ */
+static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ __le16 fc = mgmt->frame_control;
+
+ /*
+ * IEEE 802.11 REVme D2.0 definition of bufferable MMPDU;
+ * note that this ignores the IBSS special case.
+ */
+ if (!ieee80211_is_mgmt(fc))
+ return false;
+
+ if (ieee80211_is_disassoc(fc) || ieee80211_is_deauth(fc))
+ return true;
+
+ if (!ieee80211_is_action(fc))
+ return false;
+
+ if (skb->len < offsetofend(typeof(*mgmt), u.action.u.ftm.action_code))
+ return true;
+
+ /* action frame - additionally check for non-bufferable FTM */
+
+ if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
+ mgmt->u.action.category != WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
+ return true;
+
+ if (mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_REQUEST ||
+ mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_RESPONSE)
+ return false;
+
+ return true;
+}
+
+/**
* _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
* @hdr: the frame (buffer must include at least the first octet of payload)
*/
@@ -3512,6 +4440,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
*category != WLAN_CATEGORY_SELF_PROTECTED &&
*category != WLAN_CATEGORY_UNPROT_DMG &&
*category != WLAN_CATEGORY_VHT &&
+ *category != WLAN_CATEGORY_S1G &&
*category != WLAN_CATEGORY_VENDOR_SPECIFIC;
}
@@ -3547,6 +4476,36 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
}
/**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+ u8 action;
+
+ if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+ skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+ return false;
+
+ action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+ return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+ action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+ action != WLAN_PUB_ACTION_MSMT_PILOT &&
+ action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+ action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+ action != WLAN_PUB_ACTION_FTM_REQUEST &&
+ action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY &&
+ action != WLAN_PUB_ACTION_VENDOR_SPECIFIC;
+}
+
+/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
* privacy action frame
* @hdr: the frame
@@ -3617,12 +4576,11 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
/**
* ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
* @skb: the skb containing the frame, length will not be checked
- * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
*
* This function assumes the frame is a data frame, and that the network header
* is in the correct place.
*/
-static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
+static inline int ieee80211_get_tdls_action(struct sk_buff *skb)
{
if (!skb_is_nonlinear(skb) &&
skb->len > (skb_network_offset(skb) + 2)) {
@@ -3701,6 +4659,40 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return false;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+
+ if (mgmt->u.action.category == WLAN_CATEGORY_WNM_UNPROTECTED &&
+ mgmt->u.action.u.wnm_timing_msr.action_code ==
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.wnm_timing_msr))
+ return true;
+
+ return false;
+}
+
+static inline bool ieee80211_is_ftm(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (!ieee80211_is_public_action((void *)mgmt, skb->len))
+ return false;
+
+ if (mgmt->u.action.u.ftm.action_code ==
+ WLAN_PUB_ACTION_FTM_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.ftm))
+ return true;
+
+ return false;
+}
+
struct element {
u8 id;
u8 datalen;
@@ -3754,7 +4746,7 @@ static inline bool for_each_element_completed(const struct element *element,
return (const u8 *)element == (const u8 *)data + datalen;
}
-/**
+/*
* RSNX Capabilities:
* bits 0-3: Field length (n-1)
*/
@@ -3762,15 +4754,15 @@ static inline bool for_each_element_completed(const struct element *element,
#define WLAN_RSNX_CAPA_SAE_H2E BIT(5)
/*
- * reduced neighbor report, based on Draft P802.11ax_D5.0,
- * section 9.4.2.170
+ * reduced neighbor report, based on Draft P802.11ax_D6.1,
+ * section 9.4.2.170 and accepted contributions.
*/
#define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03
#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04
#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08
#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 8
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 12
+#define IEEE80211_TBTT_INFO_TYPE_TBTT 0
+#define IEEE80211_TBTT_INFO_TYPE_MLD 1
#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01
#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02
@@ -3780,11 +4772,649 @@ static inline bool for_each_element_completed(const struct element *element,
#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20
#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_NO_LIMIT 127
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED -128
+
struct ieee80211_neighbor_ap_info {
- u8 tbtt_info_hdr;
- u8 tbtt_info_len;
- u8 op_class;
- u8 channel;
+ u8 tbtt_info_hdr;
+ u8 tbtt_info_len;
+ u8 op_class;
+ u8 channel;
+} __packed;
+
+enum ieee80211_range_params_max_total_ltf {
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_4 = 0,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_8,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_16,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED,
+};
+
+/*
+ * reduced neighbor report, based on Draft P802.11be_D3.0,
+ * section 9.4.2.170.2.
+ */
+struct ieee80211_rnr_mld_params {
+ u8 mld_id;
+ __le16 params;
+} __packed;
+
+#define IEEE80211_RNR_MLD_PARAMS_LINK_ID 0x000F
+#define IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT 0x0FF0
+#define IEEE80211_RNR_MLD_PARAMS_UPDATES_INCLUDED 0x1000
+#define IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK 0x2000
+
+/* Format of the TBTT information element if it has 7, 8 or 9 bytes */
+struct ieee80211_tbtt_info_7_8_9 {
+ u8 tbtt_offset;
+ u8 bssid[ETH_ALEN];
+
+ /* The following element is optional, structure may not grow */
+ u8 bss_params;
+ s8 psd_20;
+} __packed;
+
+/* Format of the TBTT information element if it has >= 11 bytes */
+struct ieee80211_tbtt_info_ge_11 {
+ u8 tbtt_offset;
+ u8 bssid[ETH_ALEN];
+ __le32 short_ssid;
+
+ /* The following elements are optional, structure may grow */
+ u8 bss_params;
+ s8 psd_20;
+ struct ieee80211_rnr_mld_params mld_params;
+} __packed;
+
+/* multi-link device */
+#define IEEE80211_MLD_MAX_NUM_LINKS 15
+
+#define IEEE80211_ML_CONTROL_TYPE 0x0007
+#define IEEE80211_ML_CONTROL_TYPE_BASIC 0
+#define IEEE80211_ML_CONTROL_TYPE_PREQ 1
+#define IEEE80211_ML_CONTROL_TYPE_RECONF 2
+#define IEEE80211_ML_CONTROL_TYPE_TDLS 3
+#define IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS 4
+#define IEEE80211_ML_CONTROL_PRESENCE_MASK 0xfff0
+
+struct ieee80211_multi_link_elem {
+ __le16 control;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_BASIC_PRES_LINK_ID 0x0010
+#define IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT 0x0020
+#define IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY 0x0040
+#define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080
+#define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100
+#define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200
+
+#define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff
+#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00
+#define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS 0xf000
+
+/*
+ * Described in P802.11be_D3.0
+ * dot11MSDTimerDuration should default to 5484 (i.e. 171.375)
+ * dot11MSDOFDMEDthreshold defaults to -72 (i.e. 0)
+ * dot11MSDTXOPMAX defaults to 1
+ */
+#define IEEE80211_MED_SYNC_DELAY_DEFAULT 0x10ac
+
+#define IEEE80211_EML_CAP_EMLSR_SUPP 0x0001
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x000e
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x0070
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_16US 1
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_32US 2
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 3
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_128US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 5
+#define IEEE80211_EML_CAP_EMLMR_SUPPORT 0x0080
+#define IEEE80211_EML_CAP_EMLMR_DELAY 0x0700
+#define IEEE80211_EML_CAP_EMLMR_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLMR_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLMR_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLMR_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLMR_DELAY_256US 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT 0x7800
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_0 0
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128US 1
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_256US 2
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_512US 3
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_1TU 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_2TU 5
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_4TU 6
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_8TU 7
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_16TU 8
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_32TU 9
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_64TU 10
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 11
+
+#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f
+#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_NO_SUPP 0
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME 1
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_RESERVED 2
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF 3
+#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
+#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
+
+struct ieee80211_mle_basic_common_info {
+ u8 len;
+ u8 mld_mac_addr[ETH_ALEN];
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_PREQ_PRES_MLD_ID 0x0010
+
+struct ieee80211_mle_preq_common_info {
+ u8 len;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010
+
+/* no fixed fields in RECONF */
+
+struct ieee80211_mle_tdls_common_info {
+ u8 len;
+ u8 ap_mld_mac_addr[ETH_ALEN];
+} __packed;
+
+#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010
+
+/* no fixed fields in PRIO_ACCESS */
+
+/**
+ * ieee80211_mle_common_size - check multi-link element common size
+ * @data: multi-link element, must already be checked for size using
+ * ieee80211_mle_size_ok()
+ */
+static inline u8 ieee80211_mle_common_size(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ u8 common = 0;
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ /*
+ * The length is the first octet pointed by mle->variable so no
+ * need to add anything
+ */
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ return common;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return sizeof(*mle) + common + mle->variable[0];
+}
+
+/**
+ * ieee80211_mle_get_link_id - returns the link ID
+ * @data: the basic multi link element
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the BSS link ID can't be found, -1 will be returned
+ */
+static inline int ieee80211_mle_get_link_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_LINK_ID))
+ return -1;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
+ * @data: pointer to the basic multi link element
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the BSS parameter change count value can't be found (the presence bit
+ * for it is clear), -1 will be returned.
+ */
+static inline int
+ieee80211_mle_get_bss_param_ch_cnt(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))
+ return -1;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay
+ * @data: pointer to the multi link EHT IE
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the medium synchronization is not present, then the default value is
+ * returned.
+ */
+static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
+ return IEEE80211_MED_SYNC_DELAY_DEFAULT;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_eml_cap - returns the EML capability
+ * @data: pointer to the multi link EHT IE
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the EML capability is not present, 0 will be returned.
+ */
+static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_EML_CAPA))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations.
+ * @data: pointer to the multi link EHT IE
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the MLD capabilities and operations field is not present, 0 will be
+ * returned.
+ */
+static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_id - returns the MLD ID
+ * @data: pointer to the multi link element
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the MLD ID is not present, 0 will be returned.
+ */
+static inline u8 ieee80211_mle_get_mld_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_ID))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_size_ok - validate multi-link element size
+ * @data: pointer to the element data
+ * @len: length of the containing element
+ */
+static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u8 fixed = sizeof(*mle);
+ u8 common = 0;
+ bool check_common_len = false;
+ u16 control;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+ check_common_len = true;
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ common += sizeof(struct ieee80211_mle_preq_common_info);
+ if (control & IEEE80211_MLC_PREQ_PRES_MLD_ID)
+ common += 1;
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ common += sizeof(struct ieee80211_mle_tdls_common_info);
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ break;
+ default:
+ /* we don't know this type */
+ return true;
+ }
+
+ if (len < fixed + common)
+ return false;
+
+ if (!check_common_len)
+ return true;
+
+ /* if present, common length is the first octet there */
+ return mle->variable[0] >= common;
+}
+
+/**
+ * ieee80211_mle_type_ok - validate multi-link element type and size
+ * @data: pointer to the element data
+ * @type: expected type of the element
+ * @len: length of the containing element
+ */
+static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control;
+
+ if (!ieee80211_mle_size_ok(data, len))
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) == type)
+ return true;
+
+ return false;
+}
+
+enum ieee80211_mle_subelems {
+ IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0,
+ IEEE80211_MLE_SUBELEM_FRAGMENT = 254,
+};
+
+#define IEEE80211_MLE_STA_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT 0x0040
+#define IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT 0x0080
+#define IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT 0x0100
+#define IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT 0x0200
+#define IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE 0x0400
+#define IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT 0x0800
+
+struct ieee80211_mle_per_sta_profile {
+ __le16 control;
+ u8 sta_info_len;
+ u8 variable[];
} __packed;
+/**
+ * ieee80211_mle_basic_sta_prof_size_ok - validate basic multi-link element sta
+ * profile size
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ */
+static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
+ size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ info_len += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ info_len += 2;
+ else
+ info_len += 1;
+ }
+ if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
+ info_len += 1;
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len <= len;
+}
+
+/**
+ * ieee80211_mle_basic_sta_prof_bss_param_ch_cnt - get per-STA profile BSS
+ * parameter change count
+ * @prof: the per-STA profile, having been checked with
+ * ieee80211_mle_basic_sta_prof_size_ok() for the correct length
+ *
+ * Return: The BSS parameter change count value if present, 0 otherwise.
+ */
+static inline u8
+ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta_profile *prof)
+{
+ u16 control = le16_to_cpu(prof->control);
+ const u8 *pos = prof->variable;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT))
+ return 0;
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ pos += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ pos += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ pos += 2;
+ else
+ pos += 1;
+ }
+
+ return *pos;
+}
+
+#define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT 0x0040
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_UPDATE_TYPE 0x0780
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800
+
+/**
+ * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link
+ * element sta profile size.
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ */
+static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data,
+ size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += ETH_ALEN;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT)
+ info_len += 2;
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len - 1 <= len;
+}
+
+static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_ttlm_elem *t2l = (const void *)data;
+ u8 control, fixed = sizeof(*t2l), elem_len = 0;
+
+ if (len < fixed)
+ return false;
+
+ control = t2l->control;
+
+ if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT)
+ elem_len += 2;
+ if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)
+ elem_len += 3;
+
+ if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) {
+ u8 bm_size;
+
+ elem_len += 1;
+ if (len < fixed + elem_len)
+ return false;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ bm_size = 1;
+ else
+ bm_size = 2;
+
+ elem_len += hweight8(t2l->optional[0]) * bm_size;
+ }
+
+ return len >= fixed + elem_len;
+}
+
+#define for_each_mle_subelement(_elem, _data, _len) \
+ if (ieee80211_mle_size_ok(_data, _len)) \
+ for_each_element(_elem, \
+ _data + ieee80211_mle_common_size(_data),\
+ _len - ieee80211_mle_common_size(_data))
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 95c831162212..140f61ec0f5f 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -44,6 +44,13 @@
#define IEEE802154_SHORT_ADDR_LEN 2
#define IEEE802154_PAN_ID_LEN 2
+/* Duration in superframe order */
+#define IEEE802154_MAX_SCAN_DURATION 14
+#define IEEE802154_ACTIVE_SCAN_DURATION 15
+/* Superframe duration in slots */
+#define IEEE802154_SUPERFRAME_PERIOD 16
+/* Various periods expressed in symbols */
+#define IEEE802154_SLOT_PERIOD 60
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
@@ -134,18 +141,46 @@ enum {
* a successful transmission.
*/
IEEE802154_SUCCESS = 0x0,
-
+ /* The requested operation failed. */
+ IEEE802154_MAC_ERROR = 0x1,
+ /* The requested operation has been cancelled. */
+ IEEE802154_CANCELLED = 0x2,
+ /*
+ * Device is ready to poll the coordinator for data in a non beacon
+ * enabled PAN.
+ */
+ IEEE802154_READY_FOR_POLL = 0x3,
+ /* Wrong frame counter. */
+ IEEE802154_COUNTER_ERROR = 0xdb,
+ /*
+ * The frame does not conforms to the incoming key usage policy checking
+ * procedure.
+ */
+ IEEE802154_IMPROPER_KEY_TYPE = 0xdc,
+ /*
+ * The frame does not conforms to the incoming security level usage
+ * policy checking procedure.
+ */
+ IEEE802154_IMPROPER_SECURITY_LEVEL = 0xdd,
+ /* Secured frame received with an empty Frame Version field. */
+ IEEE802154_UNSUPPORTED_LEGACY = 0xde,
+ /*
+ * A secured frame is received or must be sent but security is not
+ * enabled in the device. Or, the Auxiliary Security Header has security
+ * level of zero in it.
+ */
+ IEEE802154_UNSUPPORTED_SECURITY = 0xdf,
/* The beacon was lost following a synchronization request. */
- IEEE802154_BEACON_LOSS = 0xe0,
+ IEEE802154_BEACON_LOST = 0xe0,
/*
* A transmission could not take place due to activity on the
* channel, i.e., the CSMA-CA mechanism has failed.
*/
- IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
+ IEEE802154_CHANNEL_ACCESS_FAILURE = 0xe1,
/* The GTS request has been denied by the PAN coordinator. */
- IEEE802154_DENINED = 0xe2,
+ IEEE802154_DENIED = 0xe2,
/* The attempt to disable the transceiver has failed. */
- IEEE802154_DISABLE_TRX_FAIL = 0xe3,
+ IEEE802154_DISABLE_TRX_FAILURE = 0xe3,
/*
* The received frame induces a failed security check according to
* the security suite.
@@ -185,9 +220,9 @@ enum {
* A PAN identifier conflict has been detected and communicated to the
* PAN coordinator.
*/
- IEEE802154_PANID_CONFLICT = 0xee,
+ IEEE802154_PAN_ID_CONFLICT = 0xee,
/* A coordinator realignment command has been received. */
- IEEE802154_REALIGMENT = 0xef,
+ IEEE802154_REALIGNMENT = 0xef,
/* The transaction has expired and its information discarded. */
IEEE802154_TRANSACTION_EXPIRED = 0xf0,
/* There is no capacity to store the transaction. */
@@ -203,12 +238,73 @@ enum {
* A SET/GET request was issued with the identifier of a PIB attribute
* that is not supported.
*/
- IEEE802154_UNSUPPORTED_ATTR = 0xf4,
+ IEEE802154_UNSUPPORTED_ATTRIBUTE = 0xf4,
+ /* Missing source or destination address or address mode. */
+ IEEE802154_INVALID_ADDRESS = 0xf5,
+ /*
+ * MLME asked to turn the receiver on, but the on time duration is too
+ * big compared to the macBeaconOrder.
+ */
+ IEEE802154_ON_TIME_TOO_LONG = 0xf6,
+ /*
+ * MLME asaked to turn the receiver on, but the request was delayed for
+ * too long before getting processed.
+ */
+ IEEE802154_PAST_TIME = 0xf7,
+ /*
+ * The StartTime parameter is nonzero, and the MLME is not currently
+ * tracking the beacon of the coordinator through which it is
+ * associated.
+ */
+ IEEE802154_TRACKING_OFF = 0xf8,
+ /*
+ * The index inside the hierarchical values in PIBAttribute is out of
+ * range.
+ */
+ IEEE802154_INVALID_INDEX = 0xf9,
+ /*
+ * The number of PAN descriptors discovered during a scan has been
+ * reached.
+ */
+ IEEE802154_LIMIT_REACHED = 0xfa,
+ /*
+ * The PIBAttribute parameter specifies an attribute that is a read-only
+ * attribute.
+ */
+ IEEE802154_READ_ONLY = 0xfb,
/*
* A request to perform a scan operation failed because the MLME was
* in the process of performing a previously initiated scan operation.
*/
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
+ /* The outgoing superframe overlaps the incoming superframe. */
+ IEEE802154_SUPERFRAME_OVERLAP = 0xfd,
+ /* Any other error situation. */
+ IEEE802154_SYSTEM_ERROR = 0xff,
+};
+
+/**
+ * enum ieee802154_filtering_level - Filtering levels applicable to a PHY
+ *
+ * @IEEE802154_FILTERING_NONE: No filtering at all, what is received is
+ * forwarded to the softMAC
+ * @IEEE802154_FILTERING_1_FCS: First filtering level, frames with an invalid
+ * FCS should be dropped
+ * @IEEE802154_FILTERING_2_PROMISCUOUS: Second filtering level, promiscuous
+ * mode as described in the spec, identical in terms of filtering to the
+ * level one on PHY side, but at the MAC level the frame should be
+ * forwarded to the upper layer directly
+ * @IEEE802154_FILTERING_3_SCAN: Third filtering level, scan related, where
+ * only beacons must be processed, all remaining traffic gets dropped
+ * @IEEE802154_FILTERING_4_FRAME_FIELDS: Fourth filtering level actually
+ * enforcing the validity of the content of the frame with various checks
+ */
+enum ieee802154_filtering_level {
+ IEEE802154_FILTERING_NONE,
+ IEEE802154_FILTERING_1_FCS,
+ IEEE802154_FILTERING_2_PROMISCUOUS,
+ IEEE802154_FILTERING_3_SCAN,
+ IEEE802154_FILTERING_4_FRAME_FIELDS,
};
/* frame control handling */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index bf5c5f32c65e..10a1e81434cb 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -48,9 +48,15 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
+ case ARPHRD_IP6GRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
case ARPHRD_RAWIP:
+ case ARPHRD_PIMREG:
+ /* PPP adds its l2 header automatically in ppp_start_xmit().
+ * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init().
+ */
+ case ARPHRD_PPP:
return false;
default:
return true;
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 6479a38e52fa..3ff96ae31bf6 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -19,7 +19,14 @@ struct br_ip {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ip6;
#endif
- } u;
+ } src;
+ union {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+ unsigned char mac_addr[ETH_ALEN];
+ } dst;
__be16 proto;
__u16 vid;
};
@@ -50,16 +57,26 @@ struct br_ip_list {
#define BR_MRP_AWARE BIT(17)
#define BR_MRP_LOST_CONT BIT(18)
#define BR_MRP_LOST_IN_CONT BIT(19)
+#define BR_TX_FWD_OFFLOAD BIT(20)
+#define BR_PORT_LOCKED BIT(21)
+#define BR_PORT_MAB BIT(22)
+#define BR_NEIGH_VLAN_SUPPRESS BIT(23)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
-extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
+struct net_bridge;
+void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br,
+ unsigned int cmd, struct ifreq *ifr,
+ void __user *uarg));
+int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
+ struct ifreq *ifr, void __user *uarg);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+bool br_multicast_has_router_adjacent(struct net_device *dev, int proto);
bool br_multicast_enabled(const struct net_device *dev);
bool br_multicast_router(const struct net_device *dev);
#else
@@ -78,6 +95,13 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
{
return false;
}
+
+static inline bool br_multicast_has_router_adjacent(struct net_device *dev,
+ int proto)
+{
+ return true;
+}
+
static inline bool br_multicast_enabled(const struct net_device *dev)
{
return false;
@@ -95,6 +119,11 @@ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid);
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto);
int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo);
+int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo);
+bool br_mst_enabled(const struct net_device *dev);
+int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids);
+int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
@@ -121,6 +150,28 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
{
return -EINVAL;
}
+
+static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
+{
+ return -EINVAL;
+}
+
+static inline bool br_mst_enabled(const struct net_device *dev)
+{
+ return false;
+}
+
+static inline int br_mst_get_info(const struct net_device *dev, u16 msti,
+ unsigned long *vids)
+{
+ return -EINVAL;
+}
+static inline int br_mst_get_state(const struct net_device *dev, u16 msti,
+ u8 *state)
+{
+ return -EINVAL;
+}
#endif
#if IS_ENABLED(CONFIG_BRIDGE)
@@ -129,6 +180,8 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev,
__u16 vid);
void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
+u8 br_port_get_stp_state(const struct net_device *dev);
+clock_t br_get_ageing_time(const struct net_device *br_dev);
#else
static inline struct net_device *
br_fdb_find_port(const struct net_device *br_dev,
@@ -147,6 +200,16 @@ br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
{
return false;
}
+
+static inline u8 br_port_get_stp_state(const struct net_device *dev)
+{
+ return BR_STATE_DISABLED;
+}
+
+static inline clock_t br_get_ageing_time(const struct net_device *br_dev)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h
index d984694c384d..07f9b660b741 100644
--- a/include/linux/if_eql.h
+++ b/include/linux/if_eql.h
@@ -21,11 +21,13 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
+#include <net/net_trackers.h>
#include <uapi/linux/if_eql.h>
typedef struct slave {
struct list_head list;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
long priority;
long priority_bps;
long priority_Bps;
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
deleted file mode 100644
index 52224de798aa..000000000000
--- a/include/linux/if_frad.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are
- * created for each DLCI associated with a FRAD. The FRAD driver
- * is not truly a network device, but the lower level device
- * handler. This allows other FRAD manufacturers to use the DLCI
- * code, including its RFC1490 encapsulation alongside the current
- * implementation for the Sangoma cards.
- *
- * Version: @(#)if_ifrad.h 0.15 31 Mar 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan changed structure defs (packed)
- * re-arranged flags
- * added DLCI_RET vars
- */
-#ifndef _FRAD_H_
-#define _FRAD_H_
-
-#include <uapi/linux/if_frad.h>
-
-
-#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE)
-
-/* these are the fields of an RFC 1490 header */
-struct frhdr
-{
- unsigned char control;
-
- /* for IP packets, this can be the NLPID */
- unsigned char pad;
-
- unsigned char NLPID;
- unsigned char OUI[3];
- __be16 PID;
-
-#define IP_NLPID pad
-} __packed;
-
-/* see RFC 1490 for the definition of the following */
-#define FRAD_I_UI 0x03
-
-#define FRAD_P_PADDING 0x00
-#define FRAD_P_Q933 0x08
-#define FRAD_P_SNAP 0x80
-#define FRAD_P_CLNP 0x81
-#define FRAD_P_IP 0xCC
-
-struct dlci_local
-{
- struct net_device *master;
- struct net_device *slave;
- struct dlci_conf config;
- int configured;
- struct list_head list;
-
- /* callback function */
- void (*receive)(struct sk_buff *skb, struct net_device *);
-};
-
-struct frad_local
-{
- /* devices which this FRAD is slaved to */
- struct net_device *master[CONFIG_DLCI_MAX];
- short dlci[CONFIG_DLCI_MAX];
-
- struct frad_conf config;
- int configured; /* has this device been configured */
- int initialized; /* mem_start, port, irq set ? */
-
- /* callback functions */
- int (*activate)(struct net_device *, struct net_device *);
- int (*deactivate)(struct net_device *, struct net_device *);
- int (*assoc)(struct net_device *, struct net_device *);
- int (*deassoc)(struct net_device *, struct net_device *);
- int (*dlci_conf)(struct net_device *, struct net_device *, int get);
-
- /* fields that are used by the Sangoma SDLA cards */
- struct timer_list timer;
- struct net_device *dev;
- int type; /* adapter type */
- int state; /* state of the S502/8 control latch */
- int buffer; /* current buffer for S508 firmware */
-};
-
-#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */
-
-extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *));
-
-#endif
diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h
new file mode 100644
index 000000000000..0404f5bf4f30
--- /dev/null
+++ b/include/linux/if_hsr.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_HSR_H_
+#define _LINUX_IF_HSR_H_
+
+#include <linux/types.h>
+
+struct net_device;
+
+/* used to differentiate various protocols */
+enum hsr_version {
+ HSR_V0 = 0,
+ HSR_V1,
+ PRP_V1,
+};
+
+/* HSR Tag.
+ * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
+ * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
+ * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
+ * encapsulated protocol } instead.
+ *
+ * Field names as defined in the IEC:2010 standard for HSR.
+ */
+struct hsr_tag {
+ __be16 path_and_LSDU_size;
+ __be16 sequence_nr;
+ __be16 encap_proto;
+} __packed;
+
+#define HSR_HLEN 6
+
+#if IS_ENABLED(CONFIG_HSR)
+extern bool is_hsr_master(struct net_device *dev);
+extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
+#else
+static inline bool is_hsr_master(struct net_device *dev)
+{
+ return false;
+}
+static inline int hsr_get_version(struct net_device *dev,
+ enum hsr_version *ver)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_HSR */
+
+#endif /*_LINUX_IF_HSR_H_*/
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index a367ead4bf4b..523025106a64 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -21,6 +21,7 @@ struct macvlan_dev {
struct hlist_node hlist;
struct macvlan_port *port;
struct net_device *lowerdev;
+ netdevice_tracker dev_tracker;
void *accel_priv;
struct vlan_pcpu_stats __percpu *pcpu_stats;
@@ -30,6 +31,7 @@ struct macvlan_dev {
enum macvlan_mode mode;
u16 flags;
unsigned int macaddr_count;
+ u32 bc_queue_len_req;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
@@ -42,13 +44,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
if (likely(success)) {
struct vlan_pcpu_stats *pcpu_stats;
- pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += len;
+ u64_stats_inc(&pcpu_stats->rx_packets);
+ u64_stats_add(&pcpu_stats->rx_bytes, len);
if (multicast)
- pcpu_stats->rx_multicast++;
+ u64_stats_inc(&pcpu_stats->rx_multicast);
u64_stats_update_end(&pcpu_stats->syncp);
+ put_cpu_ptr(vlan->pcpu_stats);
} else {
this_cpu_inc(vlan->pcpu_stats->rx_errors);
}
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 96d40942e5a3..c87efd333faa 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -4,8 +4,6 @@
*
* This file supplies definitions required by the PPP over L2TP driver
* (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c
- *
- * License:
*/
#ifndef __LINUX_IF_PPPOL2TP_H
#define __LINUX_IF_PPPOL2TP_H
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 69e813bcb947..ff3beda1312c 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -5,8 +5,6 @@
*
* This file supplies definitions required by the PPP over Ethernet driver
* (pppox.c). All version information wrt this file is located in pppox.c
- *
- * License:
*/
#ifndef __LINUX_IF_PPPOX_H
#define __LINUX_IF_PPPOX_H
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
index 9661416a9bb4..839d1e48b85e 100644
--- a/include/linux/if_rmnet.h
+++ b/include/linux/if_rmnet.h
@@ -1,55 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0-only
- * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_IF_RMNET_H_
#define _LINUX_IF_RMNET_H_
+#include <linux/types.h>
+
struct rmnet_map_header {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- u8 pad_len:6;
- u8 reserved_bit:1;
- u8 cd_bit:1;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- u8 cd_bit:1;
- u8 reserved_bit:1;
- u8 pad_len:6;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 mux_id;
- __be16 pkt_len;
+ u8 flags; /* MAP_CMD_FLAG, MAP_PAD_LEN_MASK */
+ u8 mux_id;
+ __be16 pkt_len; /* Length of packet, including pad */
} __aligned(1);
+/* rmnet_map_header flags field:
+ * PAD_LEN: number of pad bytes following packet data
+ * CMD: 1 = packet contains a MAP command; 0 = packet contains data
+ * NEXT_HEADER: 1 = packet contains V5 CSUM header 0 = no V5 CSUM header
+ */
+#define MAP_PAD_LEN_MASK GENMASK(5, 0)
+#define MAP_NEXT_HEADER_FLAG BIT(6)
+#define MAP_CMD_FLAG BIT(7)
+
struct rmnet_map_dl_csum_trailer {
- u8 reserved1;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- u8 valid:1;
- u8 reserved2:7;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- u8 reserved2:7;
- u8 valid:1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u16 csum_start_offset;
- u16 csum_length;
- __be16 csum_value;
+ u8 reserved1;
+ u8 flags; /* MAP_CSUM_DL_VALID_FLAG */
+ __be16 csum_start_offset;
+ __be16 csum_length;
+ __sum16 csum_value;
} __aligned(1);
+/* rmnet_map_dl_csum_trailer flags field:
+ * VALID: 1 = checksum and length valid; 0 = ignore them
+ */
+#define MAP_CSUM_DL_VALID_FLAG BIT(0)
+
struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- u16 csum_insert_offset:14;
- u16 udp_ind:1;
- u16 csum_enabled:1;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- u16 csum_enabled:1;
- u16 udp_ind:1;
- u16 csum_insert_offset:14;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
+ __be16 csum_info; /* MAP_CSUM_UL_* */
} __aligned(1);
+/* csum_info field:
+ * OFFSET: where (offset in bytes) to insert computed checksum
+ * UDP: 1 = UDP checksum (zero checkum means no checksum)
+ * ENABLED: 1 = checksum computation requested
+ */
+#define MAP_CSUM_UL_OFFSET_MASK GENMASK(13, 0)
+#define MAP_CSUM_UL_UDP_FLAG BIT(14)
+#define MAP_CSUM_UL_ENABLED_FLAG BIT(15)
+
+/* MAP CSUM headers */
+struct rmnet_map_v5_csum_header {
+ u8 header_info;
+ u8 csum_info;
+ __be16 reserved;
+} __aligned(1);
+
+/* v5 header_info field
+ * NEXT_HEADER: represents whether there is any next header
+ * HEADER_TYPE: represents the type of this header
+ *
+ * csum_info field
+ * CSUM_VALID_OR_REQ:
+ * 1 = for UL, checksum computation is requested.
+ * 1 = for DL, validated the checksum and has found it valid
+ */
+
+#define MAPV5_HDRINFO_NXT_HDR_FLAG BIT(0)
+#define MAPV5_HDRINFO_HDR_TYPE_FMASK GENMASK(7, 1)
+#define MAPV5_CSUMINFO_VALID_FLAG BIT(7)
+
+#define RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD 2
#endif /* !(_LINUX_IF_RMNET_H_) */
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 915a187cfabd..553552fa635c 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -2,14 +2,18 @@
#ifndef _LINUX_IF_TAP_H_
#define _LINUX_IF_TAP_H_
+#include <net/sock.h>
+#include <linux/skb_array.h>
+
+struct file;
+struct socket;
+
#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
struct ptr_ring *tap_get_ptr_ring(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
-struct file;
-struct socket;
static inline struct socket *tap_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
@@ -20,9 +24,6 @@ static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
}
#endif /* CONFIG_TAP */
-#include <net/sock.h>
-#include <linux/skb_array.h>
-
/*
* Maximum times a tap device can be opened. This can be used to
* configure the number of receive queue, e.g. for multiqueue virtio.
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index add607943c95..cdc684e04a2f 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -12,11 +12,11 @@
#include <uapi/linux/if_team.h>
struct team_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
@@ -162,8 +162,8 @@ struct team_option {
bool per_port;
unsigned int array_size; /* != 0 means the option is array */
enum team_option_type type;
- int (*init)(struct team *team, struct team_option_inst_info *info);
- int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
+ void (*init)(struct team *team, struct team_option_inst_info *info);
+ void (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
};
@@ -189,6 +189,8 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
+ const struct header_ops *header_ops_cache;
+
struct mutex lock; /* used for overall locking, e.g. port lists write */
/*
@@ -208,6 +210,7 @@ struct team {
bool queue_override_enabled;
struct list_head *qom_lists; /* array of queue override mapping lists */
bool port_mtu_change_allowed;
+ bool notifier_ctx;
struct {
unsigned int count;
unsigned int interval; /* in ms */
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 5bda8cf457b6..043d442994b0 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -27,35 +27,54 @@ struct tun_xdp_hdr {
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
-bool tun_is_xdp_frame(void *ptr);
-void *tun_xdp_to_ptr(void *ptr);
-void *tun_ptr_to_xdp(void *ptr);
+
+static inline bool tun_is_xdp_frame(void *ptr)
+{
+ return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
+{
+ return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
+}
+
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
+
void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
struct file;
struct socket;
+
static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+
static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+
static inline bool tun_is_xdp_frame(void *ptr)
{
return false;
}
-static inline void *tun_xdp_to_ptr(void *ptr)
+
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
{
return NULL;
}
-static inline void *tun_ptr_to_xdp(void *ptr)
+
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
{
return NULL;
}
+
static inline void tun_ptr_free(void *ptr)
{
}
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 41a518336673..c1645c86eed9 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -46,8 +46,10 @@ struct vlan_hdr {
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
+ struct_group(addrs,
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ );
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
@@ -60,6 +62,14 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + vlan_eth_hdr()
+ */
+static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct vlan_ethhdr *)skb->data;
+}
+
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
@@ -74,7 +84,7 @@ static inline bool is_vlan_dev(const struct net_device *dev)
return dev->priv_flags & IFF_802_1Q_VLAN;
}
-#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present)
+#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
@@ -116,11 +126,11 @@ static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
* @tx_dropped: number of tx drops
*/
struct vlan_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errors;
u32 tx_dropped;
@@ -162,6 +172,7 @@ struct netpoll;
* @vlan_id: VLAN identifier
* @flags: device flags
* @real_dev: underlying netdevice
+ * @dev_tracker: refcount tracker for @real_dev reference
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
@@ -177,6 +188,8 @@ struct vlan_dev_priv {
u16 flags;
struct net_device *real_dev;
+ netdevice_tracker dev_tracker;
+
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
@@ -346,7 +359,8 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
/* Move the mac header sans proto to the beginning of the new header. */
if (likely(mac_len > ETH_TLEN))
memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
- skb->mac_header -= VLAN_HLEN;
+ if (skb_mac_header_was_set(skb))
+ skb->mac_header -= VLAN_HLEN;
veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
@@ -394,7 +408,7 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
@@ -423,7 +437,7 @@ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
@@ -443,7 +457,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
@@ -466,7 +480,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
*/
static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
{
- skb->vlan_present = 0;
+ skb->vlan_all = 0;
}
/**
@@ -478,9 +492,7 @@ static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
*/
static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
{
- dst->vlan_present = src->vlan_present;
- dst->vlan_proto = src->vlan_proto;
- dst->vlan_tci = src->vlan_tci;
+ dst->vlan_all = src->vlan_all;
}
/*
@@ -514,7 +526,6 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
{
skb->vlan_proto = vlan_proto;
skb->vlan_tci = vlan_tci;
- skb->vlan_present = 1;
}
/**
@@ -526,10 +537,10 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
- struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
+ struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
if (!eth_type_vlan(veth->h_vlan_proto))
- return -EINVAL;
+ return -ENODATA;
*vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
@@ -550,7 +561,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
return 0;
} else {
*vlan_tci = 0;
- return -EINVAL;
+ return -ENODATA;
}
}
@@ -626,6 +637,23 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
return __vlan_get_protocol(skb, skb->protocol, NULL);
}
+/* This version of __vlan_get_protocol() also pulls mac header in skb->head */
+static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
+ __be16 type, int *depth)
+{
+ int maclen;
+
+ type = __vlan_get_protocol(skb, type, &maclen);
+
+ if (type) {
+ if (!pskb_may_pull(skb, maclen))
+ type = 0;
+ else if (depth)
+ *depth = maclen;
+ }
+ return type;
+}
+
/* A getter for the SKB protocol field which will handle VLAN tags consistently
* whether VLAN acceleration is enabled or not.
*/
@@ -675,6 +703,27 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
}
/**
+ * vlan_remove_tag - remove outer VLAN tag from payload
+ * @skb: skbuff to remove tag from
+ * @vlan_tci: buffer to store value
+ *
+ * Expects the skb to contain a VLAN tag in the payload, and to have skb->data
+ * pointing at the MAC header.
+ *
+ * Returns a new pointer to skb->data, or NULL on failure to pull.
+ */
+static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
+{
+ struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+
+ *vlan_tci = ntohs(vhdr->h_vlan_TCI);
+
+ memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ vlan_set_encap_proto(skb, vhdr);
+ return __skb_pull(skb, VLAN_HLEN);
+}
+
+/**
* skb_vlan_tagged - check if skb is vlan tagged.
* @skb: skbuff to query
*
@@ -710,7 +759,7 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
return false;
- veh = (struct vlan_ethhdr *)skb->data;
+ veh = skb_vlan_eth_hdr(skb);
protocol = veh->h_vlan_encapsulated_proto;
}
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 64ce8cd1cfaf..5171231f70a8 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -15,6 +15,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/sockptr.h>
#include <uapi/linux/igmp.h>
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
@@ -38,12 +39,9 @@ struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- __be32 sl_addr[];
+ __be32 sl_addr[] __counted_by(sl_max);
};
-#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \
- (count) * sizeof(__be32))
-
#define IP_SFBLOCK 10 /* allocate this many at once */
/* ip_mc_socklist is real list now. Speed is not argument;
@@ -121,10 +119,10 @@ extern int ip_mc_source(int add, int omode, struct sock *sk,
struct ip_mreq_source *mreqs, int ifindex);
extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
- struct ip_msfilter __user *optval, int __user *optlen);
+ sockptr_t optval, sockptr_t optlen);
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
- struct sockaddr_storage __user *p);
-extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt,
+ sockptr_t optval, size_t offset);
+extern int ip_mc_sf_allow(const struct sock *sk, __be32 local, __be32 rmt,
int dif, int sdif);
extern void ip_mc_init_dev(struct in_device *);
extern void ip_mc_destroy_dev(struct in_device *);
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index a3a838dcf8e4..719cf9cc6e1a 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -8,6 +8,8 @@
#ifndef __AD_SIGMA_DELTA_H__
#define __AD_SIGMA_DELTA_H__
+#include <linux/iio/iio.h>
+
enum ad_sigma_delta_mode {
AD_SD_MODE_CONTINUOUS = 0,
AD_SD_MODE_SINGLE = 1,
@@ -26,31 +28,40 @@ struct ad_sd_calib_data {
};
struct ad_sigma_delta;
+struct device;
struct iio_dev;
/**
* struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
* @set_channel: Will be called to select the current channel, may be NULL.
+ * @append_status: Will be called to enable status append at the end of the sample, may be NULL.
* @set_mode: Will be called to select the current mode, may be NULL.
+ * @disable_all: Will be called to disable all channels, may be NULL.
* @postprocess_sample: Is called for each sampled data word, can be used to
* modify or drop the sample data, it, may be NULL.
* @has_registers: true if the device has writable and readable registers, false
* if there is just one read-only sample data shift register.
* @addr_shift: Shift of the register address in the communications register.
* @read_mask: Mask for the communications register having the read bit set.
+ * @status_ch_mask: Mask for the channel number stored in status register.
* @data_reg: Address of the data register, if 0 the default address of 0x3 will
* be used.
* @irq_flags: flags for the interrupt used by the triggered buffer
+ * @num_slots: Number of sequencer slots
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
+ int (*append_status)(struct ad_sigma_delta *, bool append);
int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
+ int (*disable_all)(struct ad_sigma_delta *);
int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
bool has_registers;
unsigned int addr_shift;
unsigned int read_mask;
+ unsigned int status_ch_mask;
unsigned int data_reg;
unsigned long irq_flags;
+ unsigned int num_slots;
};
/**
@@ -75,12 +86,23 @@ struct ad_sigma_delta {
uint8_t comm;
const struct ad_sigma_delta_info *info;
+ unsigned int active_slots;
+ unsigned int current_slot;
+ unsigned int num_slots;
+ bool status_appended;
+ /* map slots to channels in order to know what to expect from devices */
+ unsigned int *slots;
+ uint8_t *samples_buf;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * 'tx_buf' is up to 32 bits.
+ * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
+ * rounded to 16 bytes to take into account padding.
*/
- uint8_t data[4] ____cacheline_aligned;
+ uint8_t tx_buf[4] __aligned(IIO_DMA_MINALIGN);
+ uint8_t rx_buf[16] __aligned(8);
};
static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
@@ -92,6 +114,29 @@ static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
return 0;
}
+static inline int ad_sigma_delta_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ int ret;
+
+ if (sd->info->append_status) {
+ ret = sd->info->append_status(sd, append);
+ if (ret < 0)
+ return ret;
+
+ sd->status_appended = append;
+ }
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd)
+{
+ if (sd->info->disable_all)
+ return sd->info->disable_all(sd);
+
+ return 0;
+}
+
static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
unsigned int mode)
{
@@ -128,8 +173,7 @@ int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
struct spi_device *spi, const struct ad_sigma_delta_info *info);
-int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev);
-void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
+int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
deleted file mode 100644
index c5d48e1c2d36..000000000000
--- a/include/linux/iio/adc/adi-axi-adc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Analog Devices Generic AXI ADC IP core driver/library
- * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
- *
- * Copyright 2012-2020 Analog Devices Inc.
- */
-#ifndef __ADI_AXI_ADC_H__
-#define __ADI_AXI_ADC_H__
-
-struct device;
-struct iio_chan_spec;
-
-/**
- * struct adi_axi_adc_chip_info - Chip specific information
- * @name Chip name
- * @id Chip ID (usually product ID)
- * @channels Channel specifications of type @struct axi_adc_chan_spec
- * @num_channels Number of @channels
- * @scale_table Supported scales by the chip; tuples of 2 ints
- * @num_scales Number of scales in the table
- * @max_rate Maximum sampling rate supported by the device
- */
-struct adi_axi_adc_chip_info {
- const char *name;
- unsigned int id;
-
- const struct iio_chan_spec *channels;
- unsigned int num_channels;
-
- const unsigned int (*scale_table)[2];
- int num_scales;
-
- unsigned long max_rate;
-};
-
-/**
- * struct adi_axi_adc_conv - data of the ADC attached to the AXI ADC
- * @chip_info chip info details for the client ADC
- * @preenable_setup op to run in the client before enabling the AXI ADC
- * @reg_access IIO debugfs_reg_access hook for the client ADC
- * @read_raw IIO read_raw hook for the client ADC
- * @write_raw IIO write_raw hook for the client ADC
- */
-struct adi_axi_adc_conv {
- const struct adi_axi_adc_chip_info *chip_info;
-
- int (*preenable_setup)(struct adi_axi_adc_conv *conv);
- int (*reg_access)(struct adi_axi_adc_conv *conv, unsigned int reg,
- unsigned int writeval, unsigned int *readval);
- int (*read_raw)(struct adi_axi_adc_conv *conv,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask);
- int (*write_raw)(struct adi_axi_adc_conv *conv,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask);
-};
-
-struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
- size_t sizeof_priv);
-
-void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv);
-
-#endif
diff --git a/include/linux/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h
new file mode 100644
index 000000000000..aa21b032e861
--- /dev/null
+++ b/include/linux/iio/adc/qcom-vadc-common.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code shared between the different Qualcomm PMIC voltage ADCs
+ */
+
+#ifndef QCOM_VADC_COMMON_H
+#define QCOM_VADC_COMMON_H
+
+#include <linux/math.h>
+#include <linux/types.h>
+
+#define VADC_CONV_TIME_MIN_US 2000
+#define VADC_CONV_TIME_MAX_US 2100
+
+/* Min ADC code represents 0V */
+#define VADC_MIN_ADC_CODE 0x6000
+/* Max ADC code represents full-scale range of 1.8V */
+#define VADC_MAX_ADC_CODE 0xa800
+
+#define VADC_ABSOLUTE_RANGE_UV 625000
+#define VADC_RATIOMETRIC_RANGE 1800
+
+#define VADC_DEF_PRESCALING 0 /* 1:1 */
+#define VADC_DEF_DECIMATION 0 /* 512 */
+#define VADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
+#define VADC_DEF_AVG_SAMPLES 0 /* 1 sample */
+#define VADC_DEF_CALIB_TYPE VADC_CALIB_ABSOLUTE
+
+#define VADC_DECIMATION_MIN 512
+#define VADC_DECIMATION_MAX 4096
+#define ADC5_DEF_VBAT_PRESCALING 1 /* 1:3 */
+#define ADC5_DECIMATION_SHORT 250
+#define ADC5_DECIMATION_MEDIUM 420
+#define ADC5_DECIMATION_LONG 840
+/* Default decimation - 1024 for rev2, 840 for pmic5 */
+#define ADC5_DECIMATION_DEFAULT 2
+#define ADC5_DECIMATION_SAMPLES_MAX 3
+
+#define VADC_HW_SETTLE_DELAY_MAX 10000
+#define VADC_HW_SETTLE_SAMPLES_MAX 16
+#define VADC_AVG_SAMPLES_MAX 512
+#define ADC5_AVG_SAMPLES_MAX 16
+
+#define PMIC5_CHG_TEMP_SCALE_FACTOR 377500
+#define PMIC5_SMB_TEMP_CONSTANT 419400
+#define PMIC5_SMB_TEMP_SCALE_FACTOR 356
+
+#define PMI_CHG_SCALE_1 -138890
+#define PMI_CHG_SCALE_2 391750000000LL
+
+#define VADC5_MAX_CODE 0x7fff
+#define ADC5_FULL_SCALE_CODE 0x70e4
+#define ADC5_USR_DATA_CHECK 0x8000
+
+#define R_PU_100K 100000
+#define RATIO_MAX_ADC7 BIT(14)
+
+/*
+ * VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels.
+ * VADC_CALIB_RATIOMETRIC: uses the reference voltage (1.8V) and GND for
+ * calibration.
+ */
+enum vadc_calibration {
+ VADC_CALIB_ABSOLUTE = 0,
+ VADC_CALIB_RATIOMETRIC
+};
+
+/**
+ * struct vadc_linear_graph - Represent ADC characteristics.
+ * @dy: numerator slope to calculate the gain.
+ * @dx: denominator slope to calculate the gain.
+ * @gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are
+ * computed to calibrate the device.
+ */
+struct vadc_linear_graph {
+ s32 dy;
+ s32 dx;
+ s32 gnd;
+};
+
+/**
+ * enum vadc_scale_fn_type - Scaling function to convert ADC code to
+ * physical scaled units for the channel.
+ * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
+ * Uses a mapping table with 100K pullup.
+ * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
+ * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to
+ * voltage (uV) with hardware applied offset/slope values to adc code.
+ * SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using
+ * lookup table. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
+ * 100k pullup. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
+ * lookup table for PMIC7. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * The hardware applies offset/slope to adc code. This is for PMIC7.
+ * SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
+ * charger temperature.
+ * SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
+ * SMB1390 temperature.
+ */
+enum vadc_scale_fn_type {
+ SCALE_DEFAULT = 0,
+ SCALE_THERM_100K_PULLUP,
+ SCALE_PMIC_THERM,
+ SCALE_XOTHERM,
+ SCALE_PMI_CHG_TEMP,
+ SCALE_HW_CALIB_DEFAULT,
+ SCALE_HW_CALIB_THERM_100K_PULLUP,
+ SCALE_HW_CALIB_XOTHERM,
+ SCALE_HW_CALIB_THERM_100K_PU_PM7,
+ SCALE_HW_CALIB_PMIC_THERM,
+ SCALE_HW_CALIB_PMIC_THERM_PM7,
+ SCALE_HW_CALIB_PM5_CHG_TEMP,
+ SCALE_HW_CALIB_PM5_SMB_TEMP,
+ SCALE_HW_CALIB_INVALID,
+};
+
+struct adc5_data {
+ const u32 full_scale_code_volt;
+ const u32 full_scale_code_cur;
+ const struct adc5_channels *adc_chans;
+ const struct iio_info *info;
+ unsigned int *decimation;
+ unsigned int *hw_settle_1;
+ unsigned int *hw_settle_2;
+};
+
+int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
+ const struct vadc_linear_graph *calib_graph,
+ const struct u32_fract *prescale,
+ bool absolute,
+ u16 adc_code, int *result_mdec);
+
+struct qcom_adc5_scale_type {
+ int (*scale_fn)(const struct u32_fract *prescale,
+ const struct adc5_data *data, u16 adc_code, int *result);
+};
+
+int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
+ unsigned int prescale_ratio,
+ const struct adc5_data *data,
+ u16 adc_code, int *result_mdec);
+
+u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio,
+ u32 full_scale_code_volt, int temp);
+
+u16 qcom_adc_tm5_gen2_temp_res_scale(int temp);
+
+int qcom_adc5_prescaling_from_dt(u32 num, u32 den);
+
+int qcom_adc5_hw_settle_time_from_dt(u32 value, const unsigned int *hw_settle);
+
+int qcom_adc5_avg_samples_from_dt(u32 value);
+
+int qcom_adc5_decimation_from_dt(u32 value, const unsigned int *decimation);
+
+int qcom_vadc_decimation_from_dt(u32 value);
+
+#endif /* QCOM_VADC_COMMON_H */
diff --git a/include/linux/iio/afe/rescale.h b/include/linux/iio/afe/rescale.h
new file mode 100644
index 000000000000..6eecb435488f
--- /dev/null
+++ b/include/linux/iio/afe/rescale.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 Axentia Technologies AB
+ */
+
+#ifndef __IIO_RESCALE_H__
+#define __IIO_RESCALE_H__
+
+#include <linux/types.h>
+#include <linux/iio/iio.h>
+
+struct device;
+struct rescale;
+
+struct rescale_cfg {
+ enum iio_chan_type type;
+ int (*props)(struct device *dev, struct rescale *rescale);
+};
+
+struct rescale {
+ const struct rescale_cfg *cfg;
+ struct iio_channel *source;
+ struct iio_chan_spec chan;
+ struct iio_chan_spec_ext_info *ext_info;
+ bool chan_processed;
+ s32 numerator;
+ s32 denominator;
+ s32 offset;
+};
+
+int rescale_process_scale(struct rescale *rescale, int scale_type,
+ int *val, int *val2);
+int rescale_process_offset(struct rescale *rescale, int scale_type,
+ int scale, int scale2, int schan_off,
+ int *val, int *val2);
+#endif /* __IIO_RESCALE_H__ */
diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h
new file mode 100644
index 000000000000..a6d79381866e
--- /dev/null
+++ b/include/linux/iio/backend.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _IIO_BACKEND_H_
+#define _IIO_BACKEND_H_
+
+#include <linux/types.h>
+
+struct fwnode_handle;
+struct iio_backend;
+struct device;
+struct iio_dev;
+
+enum iio_backend_data_type {
+ IIO_BACKEND_TWOS_COMPLEMENT,
+ IIO_BACKEND_OFFSET_BINARY,
+ IIO_BACKEND_DATA_TYPE_MAX
+};
+
+/**
+ * struct iio_backend_data_fmt - Backend data format
+ * @type: Data type.
+ * @sign_extend: Bool to tell if the data is sign extended.
+ * @enable: Enable/Disable the data format module. If disabled,
+ * not formatting will happen.
+ */
+struct iio_backend_data_fmt {
+ enum iio_backend_data_type type;
+ bool sign_extend;
+ bool enable;
+};
+
+/**
+ * struct iio_backend_ops - operations structure for an iio_backend
+ * @enable: Enable backend.
+ * @disable: Disable backend.
+ * @chan_enable: Enable one channel.
+ * @chan_disable: Disable one channel.
+ * @data_format_set: Configure the data format for a specific channel.
+ * @request_buffer: Request an IIO buffer.
+ * @free_buffer: Free an IIO buffer.
+ **/
+struct iio_backend_ops {
+ int (*enable)(struct iio_backend *back);
+ void (*disable)(struct iio_backend *back);
+ int (*chan_enable)(struct iio_backend *back, unsigned int chan);
+ int (*chan_disable)(struct iio_backend *back, unsigned int chan);
+ int (*data_format_set)(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data);
+ struct iio_buffer *(*request_buffer)(struct iio_backend *back,
+ struct iio_dev *indio_dev);
+ void (*free_buffer)(struct iio_backend *back,
+ struct iio_buffer *buffer);
+};
+
+int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan);
+int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan);
+int devm_iio_backend_enable(struct device *dev, struct iio_backend *back);
+int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data);
+int devm_iio_backend_request_buffer(struct device *dev,
+ struct iio_backend *back,
+ struct iio_dev *indio_dev);
+
+void *iio_backend_get_priv(const struct iio_backend *conv);
+struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name);
+struct iio_backend *
+__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
+ struct fwnode_handle *fwnode);
+
+int devm_iio_backend_register(struct device *dev,
+ const struct iio_backend_ops *ops, void *priv);
+
+#endif
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index ff15c61bf319..18d3702fa95d 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -17,21 +17,14 @@ struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
-struct iio_buffer_block {
- u32 size;
- u32 bytes_used;
-};
-
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
- * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
* @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
* @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
* @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
* @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
*/
enum iio_block_state {
- IIO_BLOCK_STATE_DEQUEUED,
IIO_BLOCK_STATE_QUEUED,
IIO_BLOCK_STATE_ACTIVE,
IIO_BLOCK_STATE_DONE,
@@ -78,12 +71,15 @@ struct iio_dma_buffer_block {
* @active_block: Block being used in read()
* @pos: Read offset in the active block
* @block_size: Size of each block
+ * @next_dequeue: index of next block that will be dequeued
*/
struct iio_dma_buffer_queue_fileio {
struct iio_dma_buffer_block *blocks[2];
struct iio_dma_buffer_block *active_block;
size_t pos;
size_t block_size;
+
+ unsigned int next_dequeue;
};
/**
@@ -98,7 +94,6 @@ struct iio_dma_buffer_queue_fileio {
* list and typically also a list of active blocks in the part that handles
* the DMA controller
* @incoming: List of buffers on the incoming queue
- * @outgoing: List of buffers on the outgoing queue
* @active: Whether the buffer is currently active
* @fileio: FileIO state
*/
@@ -110,7 +105,6 @@ struct iio_dma_buffer_queue {
struct mutex lock;
spinlock_t list_lock;
struct list_head incoming;
- struct list_head outgoing;
bool active;
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index 0e503db71289..cbb8ba957fad 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -7,14 +7,14 @@
#ifndef __IIO_DMAENGINE_H__
#define __IIO_DMAENGINE_H__
-struct iio_buffer;
+struct iio_dev;
struct device;
struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel);
+ const char *channel);
void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
-
-struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel);
+int devm_iio_dmaengine_buffer_setup(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel);
#endif
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index fbba4093f06c..418b1307d3f2 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -11,11 +11,15 @@
struct iio_buffer;
-void iio_buffer_set_attrs(struct iio_buffer *buffer,
- const struct attribute **attrs);
+enum iio_buffer_direction {
+ IIO_BUFFER_DIRECTION_IN,
+ IIO_BUFFER_DIRECTION_OUT,
+};
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
+int iio_pop_from_buffer(struct iio_buffer *buffer, void *data);
+
/**
* iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
* @indio_dev: iio_dev structure for device.
@@ -41,10 +45,14 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
return iio_push_to_buffers(indio_dev, data);
}
+int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
+ const void *data, size_t data_sz,
+ int64_t timestamp);
+
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
-void iio_device_attach_buffer(struct iio_dev *indio_dev,
- struct iio_buffer *buffer);
+int iio_device_attach_buffer(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer);
#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index a63dc07b7350..89c3fd7c29ca 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -6,6 +6,9 @@
#ifdef CONFIG_IIO_BUFFER
+#include <uapi/linux/iio/buffer.h>
+#include <linux/iio/buffer.h>
+
struct iio_dev;
struct iio_buffer;
@@ -21,6 +24,10 @@ struct iio_buffer;
* @read: try to get a specified number of bytes (must exist)
* @data_available: indicates how much data is available for reading from
* the buffer.
+ * @remove_from: remove scan from buffer. Drivers should calls this to
+ * remove a scan from a buffer.
+ * @write: try to write a number of bytes
+ * @space_available: returns the amount of bytes available in a buffer
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @set_bytes_per_datum:set number of bytes per datum
@@ -47,6 +54,9 @@ struct iio_buffer_access_funcs {
int (*store_to)(struct iio_buffer *buffer, const void *data);
int (*read)(struct iio_buffer *buffer, size_t n, char __user *buf);
size_t (*data_available)(struct iio_buffer *buffer);
+ int (*remove_from)(struct iio_buffer *buffer, void *data);
+ int (*write)(struct iio_buffer *buffer, size_t n, const char __user *buf);
+ size_t (*space_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
@@ -72,9 +82,15 @@ struct iio_buffer {
/** @length: Number of datums in buffer. */
unsigned int length;
+ /** @flags: File ops flags including busy flag. */
+ unsigned long flags;
+
/** @bytes_per_datum: Size of individual datum including timestamp. */
size_t bytes_per_datum;
+ /* @direction: Direction of the data stream (in/out). */
+ enum iio_buffer_direction direction;
+
/**
* @access: Buffer access functions associated with the
* implementation.
@@ -97,24 +113,24 @@ struct iio_buffer {
/* @scan_timestamp: Does the scan mode include a timestamp. */
bool scan_timestamp;
- /* @scan_el_dev_attr_list: List of scan element related attributes. */
- struct list_head scan_el_dev_attr_list;
-
- /* @buffer_group: Attributes of the buffer group. */
- struct attribute_group buffer_group;
+ /* @buffer_attr_list: List of buffer attributes. */
+ struct list_head buffer_attr_list;
/*
- * @scan_el_group: Attribute group for those attributes not
- * created from the iio_chan_info array.
+ * @buffer_group: Attributes of the new buffer group.
+ * Includes scan elements attributes.
*/
- struct attribute_group scan_el_group;
+ struct attribute_group buffer_group;
/* @attrs: Standard attributes of the buffer. */
- const struct attribute **attrs;
+ const struct iio_dev_attr **attrs;
/* @demux_bounce: Buffer for doing gather from incoming scan. */
void *demux_bounce;
+ /* @attached_entry: Entry in the devices list of buffers attached by the driver. */
+ struct list_head attached_entry;
+
/* @buffer_list: Entry in the devices list of current buffers. */
struct list_head buffer_list;
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
index caa8bb279a34..e72167b96d27 100644
--- a/include/linux/iio/common/cros_ec_sensors_core.h
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -41,7 +41,6 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p);
* @param: motion sensor parameters structure
* @resp: motion sensor response structure
* @type: type of motion sensor
- * @loc: location where the motion sensor is placed
* @range_updated: True if the range of the sensor has been
* updated.
* @curr_range: If updated, the current range value.
@@ -67,7 +66,6 @@ struct cros_ec_sensors_core_state {
struct ec_response_motion_sense *resp;
enum motionsensor_type type;
- enum motionsensor_location loc;
bool range_updated;
int curr_range;
@@ -77,7 +75,7 @@ struct cros_ec_sensors_core_state {
u16 scale;
} calib[CROS_EC_SENSOR_MAX_AXIS];
s8 sign[CROS_EC_SENSOR_MAX_AXIS];
- u8 samples[CROS_EC_SAMPLE_SIZE];
+ u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8);
int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data);
@@ -95,8 +93,11 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
struct platform_device;
int cros_ec_sensors_core_init(struct platform_device *pdev,
struct iio_dev *indio_dev, bool physical_device,
- cros_ec_sensors_capture_t trigger_capture,
- cros_ec_sensorhub_push_data_cb_t push_data);
+ cros_ec_sensors_capture_t trigger_capture);
+
+int cros_ec_sensors_core_register(struct device *dev,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t push_data);
irqreturn_t cros_ec_sensors_capture(int irq, void *p);
int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
@@ -125,6 +126,5 @@ extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
/* List of extended channel specification for all sensors. */
extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
-extern const struct attribute *cros_ec_sensor_fifo_attributes[];
#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/include/linux/iio/common/inv_sensors_timestamp.h b/include/linux/iio/common/inv_sensors_timestamp.h
new file mode 100644
index 000000000000..a47d304d1ba7
--- /dev/null
+++ b/include/linux/iio/common/inv_sensors_timestamp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Invensense, Inc.
+ */
+
+#ifndef INV_SENSORS_TIMESTAMP_H_
+#define INV_SENSORS_TIMESTAMP_H_
+
+/**
+ * struct inv_sensors_timestamp_chip - chip internal properties
+ * @clock_period: internal clock period in ns
+ * @jitter: acceptable jitter in per-mille
+ * @init_period: chip initial period at reset in ns
+ */
+struct inv_sensors_timestamp_chip {
+ uint32_t clock_period;
+ uint32_t jitter;
+ uint32_t init_period;
+};
+
+/**
+ * struct inv_sensors_timestamp_interval - timestamps interval
+ * @lo: interval lower bound
+ * @up: interval upper bound
+ */
+struct inv_sensors_timestamp_interval {
+ int64_t lo;
+ int64_t up;
+};
+
+/**
+ * struct inv_sensors_timestamp_acc - accumulator for computing an estimation
+ * @val: current estimation of the value, the mean of all values
+ * @idx: current index of the next free place in values table
+ * @values: table of all measured values, use for computing the mean
+ */
+struct inv_sensors_timestamp_acc {
+ uint32_t val;
+ size_t idx;
+ uint32_t values[32];
+};
+
+/**
+ * struct inv_sensors_timestamp - timestamp management states
+ * @chip: chip internal characteristics
+ * @min_period: minimal acceptable clock period
+ * @max_period: maximal acceptable clock period
+ * @it: interrupts interval timestamps
+ * @timestamp: store last timestamp for computing next data timestamp
+ * @mult: current internal period multiplier
+ * @new_mult: new set internal period multiplier (not yet effective)
+ * @period: measured current period of the sensor
+ * @chip_period: accumulator for computing internal chip period
+ */
+struct inv_sensors_timestamp {
+ struct inv_sensors_timestamp_chip chip;
+ uint32_t min_period;
+ uint32_t max_period;
+ struct inv_sensors_timestamp_interval it;
+ int64_t timestamp;
+ uint32_t mult;
+ uint32_t new_mult;
+ uint32_t period;
+ struct inv_sensors_timestamp_acc chip_period;
+};
+
+void inv_sensors_timestamp_init(struct inv_sensors_timestamp *ts,
+ const struct inv_sensors_timestamp_chip *chip);
+
+int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ uint32_t period, bool fifo);
+
+void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ size_t sensor_nb, int64_t timestamp);
+
+static inline int64_t inv_sensors_timestamp_pop(struct inv_sensors_timestamp *ts)
+{
+ ts->timestamp += ts->period;
+ return ts->timestamp;
+}
+
+void inv_sensors_timestamp_apply_odr(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ unsigned int fifo_no);
+
+static inline void inv_sensors_timestamp_reset(struct inv_sensors_timestamp *ts)
+{
+ const struct inv_sensors_timestamp_interval interval_init = {0LL, 0LL};
+
+ ts->it = interval_init;
+ ts->timestamp = 0;
+}
+
+#endif
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 33e939977444..f9ae5cdd884f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -13,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/irqreturn.h>
+#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
@@ -20,6 +21,9 @@
#include <linux/platform_data/st_sensors_pdata.h>
+#define LSM9DS0_IMU_DEV_NAME "lsm9ds0"
+#define LSM303D_IMU_DEV_NAME "lsm303d"
+
/*
* Buffer size max case: 2bytes per channel, 3 channels in total +
* 8bytes timestamp channel (s64)
@@ -46,8 +50,8 @@
#define ST_SENSORS_MAX_NAME 17
#define ST_SENSORS_MAX_4WAI 8
-#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
- ch2, s, endian, rbits, sbits, addr) \
+#define ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, ext) \
{ \
.type = device_type, \
.modified = mod, \
@@ -63,8 +67,14 @@
.storagebits = sbits, \
.endianness = endian, \
}, \
+ .ext_info = ext, \
}
+#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr) \
+ ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, NULL)
+
#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \
IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \
st_sensors_sysfs_sampling_frequency_avail)
@@ -211,12 +221,10 @@ struct st_sensor_settings {
/**
* struct st_sensor_data - ST sensor device status
- * @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
+ * @mount_matrix: The mounting matrix of the sensor.
* @sensor_settings: Pointer to the specific sensor settings in use.
* @current_fullscale: Maximum range of measure by the sensor.
- * @vdd: Pointer to sensor's Vdd power supply
- * @vdd_io: Pointer to sensor's Vdd-IO power supply
* @regmap: Pointer to specific sensor regmap configuration.
* @enabled: Status of the sensor (false->off, true->on).
* @odr: Output data rate of the sensor [Hz].
@@ -228,15 +236,13 @@ struct st_sensor_settings {
* @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
* @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
* @buffer_data: Data used by buffer part.
+ * @odr_lock: Local lock for preventing concurrent ODR accesses/changes
*/
struct st_sensor_data {
- struct device *dev;
struct iio_trigger *trig;
- struct iio_mount_matrix *mount_matrix;
+ struct iio_mount_matrix mount_matrix;
struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
- struct regulator *vdd;
- struct regulator *vdd_io;
struct regmap *regmap;
bool enabled;
@@ -252,7 +258,9 @@ struct st_sensor_data {
bool hw_irq_trigger;
s64 hw_timestamp;
- char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
+ struct mutex odr_lock;
+
+ char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN);
};
#ifdef CONFIG_IIO_BUFFER
@@ -263,7 +271,6 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p);
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops);
-void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
int st_sensors_validate_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
#else
@@ -272,10 +279,6 @@ static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
{
return 0;
}
-static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
-{
- return;
-}
#define st_sensors_validate_device NULL
#endif
@@ -288,8 +291,6 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
int st_sensors_power_enable(struct iio_dev *indio_dev);
-void st_sensors_power_disable(struct iio_dev *indio_dev);
-
int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
@@ -317,4 +318,20 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
void st_sensors_dev_name_probe(struct device *dev, char *name, int len);
+/* Accelerometer */
+const struct st_sensor_settings *st_accel_get_settings(const char *name);
+int st_accel_common_probe(struct iio_dev *indio_dev);
+
+/* Gyroscope */
+const struct st_sensor_settings *st_gyro_get_settings(const char *name);
+int st_gyro_common_probe(struct iio_dev *indio_dev);
+
+/* Magnetometer */
+const struct st_sensor_settings *st_magn_get_settings(const char *name);
+int st_magn_common_probe(struct iio_dev *indio_dev);
+
+/* Pressure */
+const struct st_sensor_settings *st_press_get_settings(const char *name);
+int st_press_common_probe(struct iio_dev *indio_dev);
+
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index c4118dcb8e05..e9910b41d48e 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -13,6 +13,7 @@
struct iio_dev;
struct iio_chan_spec;
struct device;
+struct fwnode_handle;
/**
* struct iio_channel - everything needed for a consumer to use a channel
@@ -97,6 +98,35 @@ void iio_channel_release_all(struct iio_channel *chan);
*/
struct iio_channel *devm_iio_channel_get_all(struct device *dev);
+/**
+ * fwnode_iio_channel_get_by_name() - get description of all that is needed to access channel.
+ * @fwnode: Pointer to consumer Firmware node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ */
+struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
+ const char *name);
+
+/**
+ * devm_fwnode_iio_channel_get_by_name() - Resource managed version of
+ * fwnode_iio_channel_get_by_name().
+ * @dev: Pointer to consumer device.
+ * @fwnode: Pointer to consumer Firmware node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ *
+ * Returns a pointer to negative errno if it is not able to get the iio channel
+ * otherwise returns valid pointer for iio channel.
+ *
+ * The allocated iio channel is automatically released when the device is
+ * unbound.
+ */
+struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *consumer_channel);
+
struct iio_cb_buffer;
/**
* iio_channel_get_all_cb() - register callback for triggered capture
@@ -171,8 +201,9 @@ struct iio_dev
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_channel_raw(struct iio_channel *chan,
int *val);
@@ -182,8 +213,9 @@ int iio_read_channel_raw(struct iio_channel *chan,
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*
* In opposit to the normal iio_read_channel_raw this function
* returns the average of multiple reads.
@@ -206,6 +238,21 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_read_channel_processed_scale() - read and scale a processed value
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ * @scale: Scale factor to apply during the conversion
+ *
+ * Returns an error code or 0.
+ *
+ * This function will read a processed value from a channel. This will work
+ * like @iio_read_channel_processed() but also scale with an additional
+ * scale factor while attempting to minimize any precision loss.
+ */
+int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
+ unsigned int scale);
+
+/**
* iio_write_channel_attribute() - Write values to the device attribute.
* @chan: The channel being queried.
* @val: Value being written.
@@ -236,8 +283,9 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val,
* @chan: The channel being queried.
* @val: Value being written.
*
- * Note raw writes to iio channels are in dac counts and hence
- * scale will need to be applied if standard units required.
+ * Note that for raw writes to iio channels, if the value provided is
+ * in standard units, the affect of the scale and offset must be removed
+ * as (value / scale) - offset.
*/
int iio_write_channel_raw(struct iio_channel *chan, int val);
@@ -247,12 +295,25 @@ int iio_write_channel_raw(struct iio_channel *chan, int val);
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
/**
+ * iio_read_min_channel_raw() - read minimum available raw value from a given
+ * channel, i.e. the minimum possible value.
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
+ */
+int iio_read_min_channel_raw(struct iio_channel *chan, int *val);
+
+/**
* iio_read_avail_channel_raw() - read available raw values from a given channel
* @chan: The channel being queried.
* @vals: Available values read back.
@@ -263,8 +324,9 @@ int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
* For ranges, three vals are always returned; min, step and max.
* For lists, all the possible values are enumerated.
*
- * Note raw available values from iio channels are in adc counts and
- * hence scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw available values from iio
+ * channels need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_avail_channel_raw(struct iio_channel *chan,
const int **vals, int *length);
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
index e9801c8d49c0..1f7e53c506b6 100644
--- a/include/linux/iio/dac/mcp4725.h
+++ b/include/linux/iio/dac/mcp4725.h
@@ -15,7 +15,7 @@
* @vref_buffered: Controls buffering of the external reference voltage.
*
* Vref related settings are available only on MCP4756. See
- * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information.
+ * Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml for more information.
*/
struct mcp4725_platform_data {
bool use_vref;
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
index 36de60a5da7a..7a157ed218f6 100644
--- a/include/linux/iio/driver.h
+++ b/include/linux/iio/driver.h
@@ -8,6 +8,7 @@
#ifndef _IIO_INKERN_H_
#define _IIO_INKERN_H_
+struct device;
struct iio_dev;
struct iio_map;
@@ -26,4 +27,17 @@ int iio_map_array_register(struct iio_dev *indio_dev,
*/
int iio_map_array_unregister(struct iio_dev *indio_dev);
+/**
+ * devm_iio_map_array_register - device-managed version of iio_map_array_register
+ * @dev: Device object to which to bind the unwinding of this registration
+ * @indio_dev: Pointer to the iio_dev structure
+ * @maps: Pointer to an IIO map object which is to be registered to this IIO device
+ *
+ * This function will call iio_map_array_register() to register an IIO map object
+ * and will also hook a callback to the iio_map_array_unregister() function to
+ * handle de-registration of the IIO map object when the device's refcount goes to
+ * zero.
+ */
+int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps);
+
#endif
diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h
index a602fe7b84fa..74b6d1cadc86 100644
--- a/include/linux/iio/gyro/itg3200.h
+++ b/include/linux/iio/gyro/itg3200.h
@@ -102,6 +102,8 @@ struct itg3200 {
struct i2c_client *i2c;
struct iio_trigger *trig;
struct iio_mount_matrix orientation;
+ /* lock to protect against multiple access to the device */
+ struct mutex lock;
};
enum ITG3200_SCAN_INDEX {
diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h
new file mode 100644
index 000000000000..9cb6c80dea71
--- /dev/null
+++ b/include/linux/iio/iio-gts-helper.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* gain-time-scale conversion helpers for IIO light sensors
+ *
+ * Copyright (c) 2023 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#ifndef __IIO_GTS_HELPER__
+#define __IIO_GTS_HELPER__
+
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * struct iio_gain_sel_pair - gain - selector values
+ *
+ * In many cases devices like light sensors allow setting signal amplification
+ * (gain) using a register interface. This structure describes amplification
+ * and corresponding selector (register value)
+ *
+ * @gain: Gain (multiplication) value. Gain must be positive, negative
+ * values are reserved for error handling.
+ * @sel: Selector (usually register value) used to indicate this gain.
+ * NOTE: Only selectors >= 0 supported.
+ */
+struct iio_gain_sel_pair {
+ int gain;
+ int sel;
+};
+
+/**
+ * struct iio_itime_sel_mul - integration time description
+ *
+ * In many cases devices like light sensors allow setting the duration of
+ * collecting data. Typically this duration has also an impact to the magnitude
+ * of measured values (gain). This structure describes the relation of
+ * integration time and amplification as well as corresponding selector
+ * (register value).
+ *
+ * An example could be a sensor allowing 50, 100, 200 and 400 mS times. The
+ * respective multiplication values could be 50 mS => 1, 100 mS => 2,
+ * 200 mS => 4 and 400 mS => 8 assuming the impact of integration time would be
+ * linear in a way that when collecting data for 50 mS caused value X, doubling
+ * the data collection time caused value 2X etc.
+ *
+ * @time_us: Integration time in microseconds. Time values must be positive,
+ * negative values are reserved for error handling.
+ * @sel: Selector (usually register value) used to indicate this time
+ * NOTE: Only selectors >= 0 supported.
+ * @mul: Multiplication to the values caused by this time.
+ * NOTE: Only multipliers > 0 supported.
+ */
+struct iio_itime_sel_mul {
+ int time_us;
+ int sel;
+ int mul;
+};
+
+struct iio_gts {
+ u64 max_scale;
+ const struct iio_gain_sel_pair *hwgain_table;
+ int num_hwgain;
+ const struct iio_itime_sel_mul *itime_table;
+ int num_itime;
+ int **per_time_avail_scale_tables;
+ int *avail_all_scales_table;
+ int num_avail_all_scales;
+ int *avail_time_tables;
+ int num_avail_time_tables;
+};
+
+#define GAIN_SCALE_GAIN(_gain, _sel) \
+{ \
+ .gain = (_gain), \
+ .sel = (_sel), \
+}
+
+#define GAIN_SCALE_ITIME_US(_itime, _sel, _mul) \
+{ \
+ .time_us = (_itime), \
+ .sel = (_sel), \
+ .mul = (_mul), \
+}
+
+static inline const struct iio_itime_sel_mul *
+iio_gts_find_itime_by_time(struct iio_gts *gts, int time)
+{
+ int i;
+
+ if (!gts->num_itime)
+ return NULL;
+
+ for (i = 0; i < gts->num_itime; i++)
+ if (gts->itime_table[i].time_us == time)
+ return &gts->itime_table[i];
+
+ return NULL;
+}
+
+static inline const struct iio_itime_sel_mul *
+iio_gts_find_itime_by_sel(struct iio_gts *gts, int sel)
+{
+ int i;
+
+ for (i = 0; i < gts->num_itime; i++)
+ if (gts->itime_table[i].sel == sel)
+ return &gts->itime_table[i];
+
+ return NULL;
+}
+
+int devm_iio_init_iio_gts(struct device *dev, int max_scale_int, int max_scale_nano,
+ const struct iio_gain_sel_pair *gain_tbl, int num_gain,
+ const struct iio_itime_sel_mul *tim_tbl, int num_times,
+ struct iio_gts *gts);
+/**
+ * iio_gts_find_int_time_by_sel - find integration time matching a selector
+ * @gts: Gain time scale descriptor
+ * @sel: selector for which matching integration time is searched for
+ *
+ * Return: integration time matching given selector or -EINVAL if
+ * integration time was not found.
+ */
+static inline int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel)
+{
+ const struct iio_itime_sel_mul *itime;
+
+ itime = iio_gts_find_itime_by_sel(gts, sel);
+ if (!itime)
+ return -EINVAL;
+
+ return itime->time_us;
+}
+
+/**
+ * iio_gts_find_sel_by_int_time - find selector matching integration time
+ * @gts: Gain time scale descriptor
+ * @time: Integration time for which matching selector is searched for
+ *
+ * Return: a selector matching given integration time or -EINVAL if
+ * selector was not found.
+ */
+static inline int iio_gts_find_sel_by_int_time(struct iio_gts *gts, int time)
+{
+ const struct iio_itime_sel_mul *itime;
+
+ itime = iio_gts_find_itime_by_time(gts, time);
+ if (!itime)
+ return -EINVAL;
+
+ return itime->sel;
+}
+
+/**
+ * iio_gts_valid_time - check if given integration time is valid
+ * @gts: Gain time scale descriptor
+ * @time_us: Integration time to check
+ *
+ * Return: True if given time is supported by device. False if not.
+ */
+static inline bool iio_gts_valid_time(struct iio_gts *gts, int time_us)
+{
+ return iio_gts_find_itime_by_time(gts, time_us) != NULL;
+}
+
+int iio_gts_find_sel_by_gain(struct iio_gts *gts, int gain);
+
+/**
+ * iio_gts_valid_gain - check if given HW-gain is valid
+ * @gts: Gain time scale descriptor
+ * @gain: HW-gain to check
+ *
+ * Return: True if given time is supported by device. False if not.
+ */
+static inline bool iio_gts_valid_gain(struct iio_gts *gts, int gain)
+{
+ return iio_gts_find_sel_by_gain(gts, gain) >= 0;
+}
+
+int iio_find_closest_gain_low(struct iio_gts *gts, int gain, bool *in_range);
+int iio_gts_find_gain_by_sel(struct iio_gts *gts, int sel);
+int iio_gts_get_min_gain(struct iio_gts *gts);
+int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel);
+int iio_gts_find_sel_by_int_time(struct iio_gts *gts, int time);
+
+int iio_gts_total_gain_to_scale(struct iio_gts *gts, int total_gain,
+ int *scale_int, int *scale_nano);
+int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel,
+ int scale_int, int scale_nano,
+ int *gain_sel);
+int iio_gts_get_scale(struct iio_gts *gts, int gain, int time, int *scale_int,
+ int *scale_nano);
+int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts,
+ int old_gain, int old_time_sel,
+ int new_time_sel, int *new_gain);
+int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain);
+int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
+ int *length);
+int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type,
+ int *length);
+int iio_gts_avail_scales_for_time(struct iio_gts *gts, int time,
+ const int **vals, int *type, int *length);
+
+#endif
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index f2e94196d31f..5aec3945555b 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -6,11 +6,34 @@
/**
* struct iio_dev_opaque - industrial I/O device opaque information
* @indio_dev: public industrial I/O device information
+ * @id: used to identify device internally
+ * @currentmode: operating mode currently in use, may be eventually
+ * checked by device drivers but should be considered
+ * read-only as this is a core internal bit
+ * @driver_module: used to make it harder to undercut users
+ * @mlock: lock used to prevent simultaneous device state changes
+ * @mlock_key: lockdep class for iio_dev lock
+ * @info_exist_lock: lock to prevent use during removal
+ * @trig_readonly: mark the current trigger immutable
* @event_interface: event chrdevs associated with interrupt lines
+ * @attached_buffers: array of buffers statically attached by the driver
+ * @attached_buffers_cnt: number of buffers in the array of statically attached buffers
+ * @buffer_ioctl_handler: ioctl() handler for this IIO device's buffer interface
* @buffer_list: list of all buffers currently attached
* @channel_attr_list: keep track of automatically created channel
* attributes
* @chan_attr_group: group for all attrs in base directory
+ * @ioctl_handlers: ioctl handlers registered with the core handler
+ * @groups: attribute groups
+ * @groupcounter: index of next attribute group
+ * @legacy_scan_el_group: attribute group for legacy scan elements attribute group
+ * @legacy_buffer_group: attribute group for legacy buffer attributes group
+ * @bounce_buffer: for devices that call iio_push_to_buffers_with_timestamp_unaligned()
+ * @bounce_buffer_size: size of currently allocate bounce buffer
+ * @scan_index_timestamp: cache of the index to the timestamp
+ * @clock_id: timestamping clock posix identifier
+ * @chrdev: associated character device
+ * @flags: file ops related flags including busy flag.
* @debugfs_dentry: device specific debugfs dentry
* @cached_reg_addr: cached register address for debugfs reads
* @read_buf: read buffer to be used for the initial reg read
@@ -18,10 +41,33 @@
*/
struct iio_dev_opaque {
struct iio_dev indio_dev;
+ int currentmode;
+ int id;
+ struct module *driver_module;
+ struct mutex mlock;
+ struct lock_class_key mlock_key;
+ struct mutex info_exist_lock;
+ bool trig_readonly;
struct iio_event_interface *event_interface;
+ struct iio_buffer **attached_buffers;
+ unsigned int attached_buffers_cnt;
+ struct iio_ioctl_handler *buffer_ioctl_handler;
struct list_head buffer_list;
struct list_head channel_attr_list;
struct attribute_group chan_attr_group;
+ struct list_head ioctl_handlers;
+ const struct attribute_group **groups;
+ int groupcounter;
+ struct attribute_group legacy_scan_el_group;
+ struct attribute_group legacy_buffer_group;
+ void *bounce_buffer;
+ size_t bounce_buffer_size;
+
+ unsigned int scan_index_timestamp;
+ clockid_t clock_id;
+ struct cdev chrdev;
+ unsigned long flags;
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_dentry;
unsigned cached_reg_addr;
@@ -30,7 +76,7 @@ struct iio_dev_opaque {
#endif
};
-#define to_iio_dev_opaque(indio_dev) \
- container_of(indio_dev, struct iio_dev_opaque, indio_dev)
+#define to_iio_dev_opaque(_indio_dev) \
+ container_of((_indio_dev), struct iio_dev_opaque, indio_dev)
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index e2df67a3b9ab..e370a7bb3300 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -9,14 +9,17 @@
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/cleanup.h>
+#include <linux/slab.h>
#include <linux/iio/types.h>
-#include <linux/of.h>
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
* Currently assumes nano seconds.
*/
+struct fwnode_reference_args;
+
enum iio_shared_by {
IIO_SEPARATE,
IIO_SHARED_BY_TYPE,
@@ -103,15 +106,16 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
/**
* IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
* @_name: Attribute name ("_available" will be appended to the name)
+ * @_shared: Whether the attribute is shared between all channels
* @_e: Pointer to an iio_enum struct
*
* Creates a read only attribute which lists all the available enum items in a
* space separated list. This should usually be used together with IIO_ENUM()
*/
-#define IIO_ENUM_AVAILABLE(_name, _e) \
+#define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
{ \
.name = (_name "_available"), \
- .shared = IIO_SHARED_BY_TYPE, \
+ .shared = _shared, \
.read = iio_enum_available_read, \
.private = (uintptr_t)(_e), \
}
@@ -127,8 +131,7 @@ struct iio_mount_matrix {
ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
const struct iio_chan_spec *chan, char *buf);
-int iio_read_mount_matrix(struct device *dev, const char *propname,
- struct iio_mount_matrix *matrix);
+int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
typedef const struct iio_mount_matrix *
(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
@@ -219,6 +222,9 @@ struct iio_event_spec {
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
+ * This field is deprecated in favour of providing
+ * iio_info->read_label() to override the label, which
+ * unlike @extend_name does not affect sysfs filenames.
* @datasheet_name: A name used in in-kernel mapping of channels. It should
* correspond to the first name that the channel is referred
* to by in the datasheet (e.g. IND), or the nearest
@@ -313,9 +319,55 @@ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
}
s64 iio_get_time_ns(const struct iio_dev *indio_dev);
-unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
-/* Device operating modes */
+/*
+ * Device operating modes
+ * @INDIO_DIRECT_MODE: There is an access to either:
+ * a) The last single value available for devices that do not provide
+ * on-demand reads.
+ * b) A new value after performing an on-demand read otherwise.
+ * On most devices, this is a single-shot read. On some devices with data
+ * streams without an 'on-demand' function, this might also be the 'last value'
+ * feature. Above all, this mode internally means that we are not in any of the
+ * other modes, and sysfs reads should work.
+ * Device drivers should inform the core if they support this mode.
+ * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
+ * It indicates that an explicit trigger is required. This requests the core to
+ * attach a poll function when enabling the buffer, which is indicated by the
+ * _TRIGGERED suffix.
+ * The core will ensure this mode is set when registering a triggered buffer
+ * with iio_triggered_buffer_setup().
+ * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
+ * No poll function can be attached because there is no triggered infrastructure
+ * we can use to cause capture. There is a kfifo that the driver will fill, but
+ * not "only one scan at a time". Typically, hardware will have a buffer that
+ * can hold multiple scans. Software may read one or more scans at a single time
+ * and push the available data to a Kfifo. This means the core will not attach
+ * any poll function when enabling the buffer.
+ * The core will ensure this mode is set when registering a simple kfifo buffer
+ * with devm_iio_kfifo_buffer_setup().
+ * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
+ * Same as above but this time the buffer is not a kfifo where we have direct
+ * access to the data. Instead, the consumer driver must access the data through
+ * non software visible channels (or DMA when there is no demux possible in
+ * software)
+ * The core will ensure this mode is set when registering a dmaengine buffer
+ * with devm_iio_dmaengine_buffer_setup().
+ * @INDIO_EVENT_TRIGGERED: Very unusual mode.
+ * Triggers usually refer to an external event which will start data capture.
+ * Here it is kind of the opposite as, a particular state of the data might
+ * produce an event which can be considered as an event. We don't necessarily
+ * have access to the data itself, but to the event produced. For example, this
+ * can be a threshold detector. The internal path of this mode is very close to
+ * the INDIO_BUFFER_TRIGGERED mode.
+ * The core will ensure this mode is set when registering a triggered event.
+ * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
+ * Here, triggers can result in data capture and can be routed to multiple
+ * hardware components, which make them close to regular triggers in the way
+ * they must be managed by the core, but without the entire interrupts/poll
+ * functions burden. Interrupts are irrelevant as the data flow is hardware
+ * mediated and distributed.
+ */
#define INDIO_DIRECT_MODE 0x01
#define INDIO_BUFFER_TRIGGERED 0x02
#define INDIO_BUFFER_SOFTWARE 0x04
@@ -333,6 +385,11 @@ unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
#define INDIO_MAX_RAW_ELEMENTS 4
+struct iio_val_int_plus_micro {
+ int integer;
+ int micro;
+};
+
struct iio_trigger; /* forward declaration */
/**
@@ -362,6 +419,8 @@ struct iio_trigger; /* forward declaration */
* and max. For lists, all possible values are enumerated.
* @write_raw: function to write a value to the device.
* Parameters are the same as for read_raw.
+ * @read_label: function to request label name for a specified label,
+ * for better channel identification.
* @write_raw_get_fmt: callback function to query the expected
* format/precision. If not set by the driver, write_raw
* returns IIO_VAL_INT_PLUS_MICRO.
@@ -369,16 +428,14 @@ struct iio_trigger; /* forward declaration */
* @write_event_config: set if the event is enabled.
* @read_event_value: read a configuration value associated with the event.
* @write_event_value: write a configuration value for the event.
+ * @read_event_label: function to request label name for a specified label,
+ * for better event identification.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
- * @of_xlate: function pointer to obtain channel specifier index.
- * When #iio-cells is greater than '0', the driver could
- * provide a custom of_xlate function that reads the
- * *args* and returns the appropriate index in registered
- * IIO channels array.
+ * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
* @hwfifo_set_watermark: function pointer to set the current hardware
* fifo watermark level; see hwfifo_* entries in
* Documentation/ABI/testing/sysfs-bus-iio for details on
@@ -420,6 +477,10 @@ struct iio_info {
int val2,
long mask);
+ int (*read_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label);
+
int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask);
@@ -447,6 +508,12 @@ struct iio_info {
enum iio_event_direction dir,
enum iio_event_info info, int val, int val2);
+ int (*read_event_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ char *label);
+
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
int (*update_scan_mode)(struct iio_dev *indio_dev,
@@ -454,8 +521,8 @@ struct iio_info {
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
- int (*of_xlate)(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec);
+ int (*fwnode_xlate)(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec);
int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
unsigned count);
@@ -482,24 +549,24 @@ struct iio_buffer_setup_ops {
/**
* struct iio_dev - industrial I/O device
- * @id: [INTERN] used to identify device internally
- * @driver_module: [INTERN] used to make it harder to undercut users
- * @modes: [DRIVER] operating modes supported by device
- * @currentmode: [DRIVER] current operating mode
+ * @modes: [DRIVER] bitmask listing all the operating modes
+ * supported by the IIO device. This list should be
+ * initialized before registering the IIO device. It can
+ * also be filed up by the IIO core, as a result of
+ * enabling particular features in the driver
+ * (see iio_triggered_event_setup()).
* @dev: [DRIVER] device structure, should be assigned a parent
* and owner
* @buffer: [DRIVER] any buffer present
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @mlock: [INTERN] lock used to prevent simultaneous device state
- * changes
- * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
+ * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
+ * array in order of preference, the most preferred
+ * masks first.
* @masklength: [INTERN] the length of the mask established from
* channels
* @active_scan_mask: [INTERN] union of all scan masks requested by buffers
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
- * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
- * @trig_readonly: [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
@@ -507,36 +574,23 @@ struct iio_buffer_setup_ops {
* @name: [DRIVER] name of the device.
* @label: [DRIVER] unique name to identify which device this is
* @info: [DRIVER] callbacks and constant info from driver
- * @clock_id: [INTERN] timestamping clock posix identifier
- * @info_exist_lock: [INTERN] lock to prevent use during removal
* @setup_ops: [DRIVER] callbacks to call before and after buffer
* enable/disable
- * @chrdev: [INTERN] associated character device
- * @groups: [INTERN] attribute groups
- * @groupcounter: [INTERN] index of next attribute group
- * @flags: [INTERN] file ops related flags including busy flag.
* @priv: [DRIVER] reference to driver's private information
* **MUST** be accessed **ONLY** via iio_priv() helper
*/
struct iio_dev {
- int id;
- struct module *driver_module;
-
int modes;
- int currentmode;
struct device dev;
struct iio_buffer *buffer;
int scan_bytes;
- struct mutex mlock;
const unsigned long *available_scan_masks;
unsigned masklength;
const unsigned long *active_scan_mask;
bool scan_timestamp;
- unsigned scan_index_timestamp;
struct iio_trigger *trig;
- bool trig_readonly;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
@@ -546,18 +600,15 @@ struct iio_dev {
const char *name;
const char *label;
const struct iio_info *info;
- clockid_t clock_id;
- struct mutex info_exist_lock;
const struct iio_buffer_setup_ops *setup_ops;
- struct cdev chrdev;
-#define IIO_MAX_GROUPS 6
- const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
- int groupcounter;
- unsigned long flags;
void *priv;
};
+int iio_device_id(struct iio_dev *indio_dev);
+int iio_device_get_current_mode(struct iio_dev *indio_dev);
+bool iio_buffer_enabled(struct iio_dev *indio_dev);
+
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
/**
@@ -589,7 +640,36 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
void iio_device_release_direct_mode(struct iio_dev *indio_dev);
-extern struct bus_type iio_bus_type;
+/*
+ * This autocleanup logic is normally used via
+ * iio_device_claim_direct_scoped().
+ */
+DEFINE_GUARD(iio_claim_direct, struct iio_dev *, iio_device_claim_direct_mode(_T),
+ iio_device_release_direct_mode(_T))
+
+DEFINE_GUARD_COND(iio_claim_direct, _try, ({
+ struct iio_dev *dev;
+ int d = iio_device_claim_direct_mode(_T);
+
+ if (d < 0)
+ dev = NULL;
+ else
+ dev = _T;
+ dev;
+ }))
+
+/**
+ * iio_device_claim_direct_scoped() - Scoped call to iio_device_claim_direct.
+ * @fail: What to do on failure to claim device.
+ * @iio_dev: Pointer to the IIO devices structure
+ */
+#define iio_device_claim_direct_scoped(fail, iio_dev) \
+ scoped_cond_guard(iio_claim_direct_try, fail, iio_dev)
+
+int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
+void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
+
+extern const struct bus_type iio_bus_type;
/**
* iio_device_put() - reference counted deallocation of struct device
@@ -601,15 +681,7 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
put_device(&indio_dev->dev);
}
-/**
- * iio_device_get_clock() - Retrieve current timestamping clock for the device
- * @indio_dev: IIO device structure containing the device
- */
-static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
-{
- return indio_dev->clock_id;
-}
-
+clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
/**
@@ -641,7 +713,7 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
*
* This utility must be called between IIO device allocation
* (via devm_iio_device_alloc()) & IIO device registration
- * (via {devm_}iio_device_register()).
+ * (via iio_device_register() and devm_iio_device_register())).
* By default, the device allocation will also assign a parent device to
* the IIO device object. In cases where devm_iio_device_alloc() is used,
* sometimes the parent device must be different than the device used to
@@ -679,8 +751,13 @@ static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
return dev_get_drvdata(&indio_dev->dev);
}
-/* Can we make this smaller? */
-#define IIO_ALIGN L1_CACHE_BYTES
+/*
+ * Used to ensure the iio_priv() structure is aligned to allow that structure
+ * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
+ * must not share cachelines with the rest of the structure, thus making
+ * them safe for use with non-coherent DMA.
+ */
+#define IIO_DMA_MINALIGN ARCH_DMA_MINALIGN
struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
/* The information at the returned address is guaranteed to be cacheline aligned */
@@ -691,19 +768,13 @@ static inline void *iio_priv(const struct iio_dev *indio_dev)
void iio_device_free(struct iio_dev *indio_dev);
struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
-struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
- const char *fmt, ...);
-/**
- * iio_buffer_enabled() - helper function to test if the buffer is enabled
- * @indio_dev: IIO device structure for device
- **/
-static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
-{
- return indio_dev->currentmode
- & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
- INDIO_BUFFER_SOFTWARE);
-}
+#define devm_iio_trigger_alloc(parent, fmt, ...) \
+ __devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+__printf(3, 4)
+struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
/**
* iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
* @indio_dev: IIO device structure for device
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 2df67448f0d1..8898966bc0f0 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -11,6 +11,7 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#define ADIS_WRITE_REG(reg) ((0x80 | (reg)))
@@ -20,7 +21,6 @@
#define ADIS_REG_PAGE_ID 0x00
struct adis;
-struct adis_burst;
/**
* struct adis_timeouts - ADIS chip variant timeouts
@@ -33,6 +33,7 @@ struct adis_timeout {
u16 sw_reset_ms;
u16 self_test_ms;
};
+
/**
* struct adis_data - ADIS chip variant specific data
* @read_delay: SPI delay for read operations in us
@@ -46,11 +47,18 @@ struct adis_timeout {
* @self_test_mask: Bitmask of supported self-test operations
* @self_test_reg: Register address to request self test command
* @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg
- * @status_error_msgs: Array of error messgaes
+ * @status_error_msgs: Array of error messages
* @status_error_mask: Bitmask of errors supported by the device
* @timeouts: Chip specific delays
* @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable
+ * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin
* @has_paging: True if ADIS device has paged registers
+ * @burst_reg_cmd: Register command that triggers burst
+ * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined,
+ * this should be the minimum size supported by the device.
+ * @burst_max_len: Holds the maximum burst size when the device supports
+ * more than one burst mode with different sizes
+ * @burst_max_speed_hz: Maximum spi speed that can be used in burst mode
*/
struct adis_data {
unsigned int read_delay;
@@ -73,8 +81,14 @@ struct adis_data {
unsigned int status_error_mask;
int (*enable_irq)(struct adis *adis, bool enable);
+ bool unmasked_drdy;
bool has_paging;
+
+ unsigned int burst_reg_cmd;
+ unsigned int burst_len;
+ unsigned int burst_max_len;
+ unsigned int burst_max_speed_hz;
};
/**
@@ -99,7 +113,6 @@ struct adis {
struct iio_trigger *trig;
const struct adis_data *data;
- struct adis_burst *burst;
unsigned int burst_extra_len;
/**
* The state_lock is meant to be used during operations that require
@@ -119,12 +132,12 @@ struct adis {
unsigned long irq_flag;
void *buffer;
- uint8_t tx[10] ____cacheline_aligned;
- uint8_t rx[4];
+ u8 tx[10] __aligned(IIO_DMA_MINALIGN);
+ u8 rx[4];
};
int adis_init(struct adis *adis, struct iio_dev *indio_dev,
- struct spi_device *spi, const struct adis_data *data);
+ struct spi_device *spi, const struct adis_data *data);
int __adis_reset(struct adis *adis);
/**
@@ -145,9 +158,9 @@ static inline int adis_reset(struct adis *adis)
}
int __adis_write_reg(struct adis *adis, unsigned int reg,
- unsigned int val, unsigned int size);
+ unsigned int val, unsigned int size);
int __adis_read_reg(struct adis *adis, unsigned int reg,
- unsigned int *val, unsigned int size);
+ unsigned int *val, unsigned int size);
/**
* __adis_write_reg_8() - Write single byte to a register (unlocked)
@@ -156,7 +169,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg,
* @value: The value to write
*/
static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
- uint8_t val)
+ u8 val)
{
return __adis_write_reg(adis, reg, val, 1);
}
@@ -168,7 +181,7 @@ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
- uint16_t val)
+ u16 val)
{
return __adis_write_reg(adis, reg, val, 2);
}
@@ -180,7 +193,7 @@ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
- uint32_t val)
+ u32 val)
{
return __adis_write_reg(adis, reg, val, 4);
}
@@ -192,7 +205,7 @@ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
- uint16_t *val)
+ u16 *val)
{
unsigned int tmp;
int ret;
@@ -211,7 +224,7 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
- uint32_t *val)
+ u32 *val)
{
unsigned int tmp;
int ret;
@@ -231,7 +244,7 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
* @size: The size of the @value (in bytes)
*/
static inline int adis_write_reg(struct adis *adis, unsigned int reg,
- unsigned int val, unsigned int size)
+ unsigned int val, unsigned int size)
{
int ret;
@@ -250,7 +263,7 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg,
* @size: The size of the @val buffer
*/
static int adis_read_reg(struct adis *adis, unsigned int reg,
- unsigned int *val, unsigned int size)
+ unsigned int *val, unsigned int size)
{
int ret;
@@ -268,7 +281,7 @@ static int adis_read_reg(struct adis *adis, unsigned int reg,
* @value: The value to write
*/
static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
- uint8_t val)
+ u8 val)
{
return adis_write_reg(adis, reg, val, 1);
}
@@ -280,7 +293,7 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
- uint16_t val)
+ u16 val)
{
return adis_write_reg(adis, reg, val, 2);
}
@@ -292,7 +305,7 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
- uint32_t val)
+ u32 val)
{
return adis_write_reg(adis, reg, val, 4);
}
@@ -304,7 +317,7 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
- uint16_t *val)
+ u16 *val)
{
unsigned int tmp;
int ret;
@@ -323,7 +336,7 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
- uint32_t *val)
+ u32 *val)
{
unsigned int tmp;
int ret;
@@ -370,10 +383,8 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
* @val can lead to undesired behavior if the register to update is 16bit.
*/
#define adis_update_bits(adis, reg, mask, val) ({ \
- BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \
- __builtin_choose_expr(sizeof(val) == 4, \
- adis_update_bits_base(adis, reg, mask, val, 4), \
- adis_update_bits_base(adis, reg, mask, val, 2)); \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
})
/**
@@ -388,42 +399,49 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
* @val can lead to undesired behavior if the register to update is 16bit.
*/
#define __adis_update_bits(adis, reg, mask, val) ({ \
- BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \
- __builtin_choose_expr(sizeof(val) == 4, \
- __adis_update_bits_base(adis, reg, mask, val, 4), \
- __adis_update_bits_base(adis, reg, mask, val, 2)); \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ __adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
})
-int adis_enable_irq(struct adis *adis, bool enable);
int __adis_check_status(struct adis *adis);
int __adis_initial_startup(struct adis *adis);
+int __adis_enable_irq(struct adis *adis, bool enable);
-static inline int adis_check_status(struct adis *adis)
+static inline int adis_enable_irq(struct adis *adis, bool enable)
{
int ret;
mutex_lock(&adis->state_lock);
- ret = __adis_check_status(adis);
+ ret = __adis_enable_irq(adis, enable);
mutex_unlock(&adis->state_lock);
return ret;
}
-/* locked version of __adis_initial_startup() */
-static inline int adis_initial_startup(struct adis *adis)
+static inline int adis_check_status(struct adis *adis)
{
int ret;
mutex_lock(&adis->state_lock);
- ret = __adis_initial_startup(adis);
+ ret = __adis_check_status(adis);
mutex_unlock(&adis->state_lock);
return ret;
}
+static inline void adis_dev_lock(struct adis *adis)
+{
+ mutex_lock(&adis->state_lock);
+}
+
+static inline void adis_dev_unlock(struct adis *adis)
+{
+ mutex_unlock(&adis->state_lock);
+}
+
int adis_single_conversion(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int error_mask,
- int *val);
+ const struct iio_chan_spec *chan,
+ unsigned int error_mask, int *val);
#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \
.type = IIO_VOLTAGE, \
@@ -472,7 +490,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
.modified = 1, \
.channel2 = IIO_MOD_ ## mod, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- info_sep, \
+ (info_sep), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = info_all, \
.address = (addr), \
@@ -499,35 +517,14 @@ int adis_single_conversion(struct iio_dev *indio_dev,
#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
-/**
- * struct adis_burst - ADIS data for burst transfers
- * @en burst mode enabled
- * @reg_cmd register command that triggers burst
- * @extra_len extra length to account in the SPI RX buffer
- * @burst_max_len holds the maximum burst size when the device supports
- * more than one burst mode with different sizes
- */
-struct adis_burst {
- bool en;
- unsigned int reg_cmd;
- const u32 extra_len;
- const u32 burst_max_len;
-};
-
int
devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
irq_handler_t trigger_handler);
-int adis_setup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *));
-void adis_cleanup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev);
int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
-int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
-void adis_remove_trigger(struct adis *adis);
int adis_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask);
+ const unsigned long *scan_mask);
#else /* CONFIG_IIO_BUFFER */
@@ -538,33 +535,12 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
return 0;
}
-static inline int adis_setup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *))
-{
- return 0;
-}
-
-static inline void adis_cleanup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev)
-{
-}
-
static inline int devm_adis_probe_trigger(struct adis *adis,
struct iio_dev *indio_dev)
{
return 0;
}
-static inline int adis_probe_trigger(struct adis *adis,
- struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void adis_remove_trigger(struct adis *adis)
-{
-}
-
#define adis_update_scan_mode NULL
#endif /* CONFIG_IIO_BUFFER */
@@ -572,7 +548,8 @@ static inline void adis_remove_trigger(struct adis *adis)
#ifdef CONFIG_DEBUG_FS
int adis_debugfs_reg_access(struct iio_dev *indio_dev,
- unsigned int reg, unsigned int writeval, unsigned int *readval);
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval);
#else
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 1fc1efa7799d..22874da0c8be 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -3,11 +3,20 @@
#define __LINUX_IIO_KFIFO_BUF_H__
struct iio_buffer;
+struct iio_buffer_setup_ops;
+struct iio_dev;
+struct iio_dev_attr;
struct device;
struct iio_buffer *iio_kfifo_allocate(void);
void iio_kfifo_free(struct iio_buffer *r);
-struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
+int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const struct iio_buffer_setup_ops *setup_ops,
+ const struct iio_dev_attr **buffer_attrs);
+
+#define devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops) \
+ devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index eff1e6b2595c..0f7fe7b522e3 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_device_type(struct iio_sw_device_type *dt);
struct iio_sw_device *iio_sw_device_create(const char *, const char *);
void iio_sw_device_destroy(struct iio_sw_device *);
-int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt);
-void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
-
static inline
void iio_swd_group_init_type_name(struct iio_sw_device *d,
const char *name,
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index 47de2443e984..bc77f88df303 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt);
struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *);
void iio_sw_trigger_destroy(struct iio_sw_trigger *);
-int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt);
-void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt);
-
static inline
void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index b532c875bc24..de5bb125815c 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -9,6 +9,7 @@
#ifndef _INDUSTRIAL_IO_SYSFS_H_
#define _INDUSTRIAL_IO_SYSFS_H_
+struct iio_buffer;
struct iio_chan_spec;
/**
@@ -17,12 +18,14 @@ struct iio_chan_spec;
* @address: associated register address
* @l: list head for maintaining list of dynamically created attrs
* @c: specification for the underlying channel
+ * @buffer: the IIO buffer to which this attribute belongs to (if any)
*/
struct iio_dev_attr {
struct device_attribute dev_attr;
u64 address;
struct list_head l;
struct iio_chan_spec const *c;
+ struct iio_buffer *buffer;
};
#define to_iio_dev_attr(_dev_attr) \
@@ -94,6 +97,17 @@ struct iio_const_attr {
= { .string = _string, \
.dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
+#define IIO_STATIC_CONST_DEVICE_ATTR(_name, _string) \
+ static ssize_t iio_const_dev_attr_show_##_name( \
+ struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return sysfs_emit(buf, "%s\n", _string); \
+ } \
+ static IIO_DEVICE_ATTR(_name, 0444, \
+ iio_const_dev_attr_show_##_name, NULL, 0)
+
/* Generic attributes of onetype or another */
/**
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index cad8325903f9..bce3b1788199 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -21,7 +21,7 @@ struct iio_trigger;
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
* @set_trigger_state: switch on/off the trigger on demand
- * @try_reenable: function to reenable the trigger when the
+ * @reenable: function to reenable the trigger when the
* use count is zero (may be NULL)
* @validate_device: function to validate the device when the
* current trigger gets changed.
@@ -31,7 +31,7 @@ struct iio_trigger;
**/
struct iio_trigger_ops {
int (*set_trigger_state)(struct iio_trigger *trig, bool state);
- int (*try_reenable)(struct iio_trigger *trig);
+ void (*reenable)(struct iio_trigger *trig);
int (*validate_device)(struct iio_trigger *trig,
struct iio_dev *indio_dev);
};
@@ -55,6 +55,7 @@ struct iio_trigger_ops {
* @attached_own_device:[INTERN] if we are using our own device as trigger,
* i.e. if we registered a poll function to the same
* device as the one providing the trigger.
+ * @reenable_work: [INTERN] work item used to ensure reenable can sleep.
**/
struct iio_trigger {
const struct iio_trigger_ops *ops;
@@ -74,6 +75,7 @@ struct iio_trigger {
unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
struct mutex pool_lock;
bool attached_own_device;
+ struct work_struct reenable_work;
};
@@ -91,13 +93,18 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
{
get_device(&trig->dev);
+
+ WARN_ONCE(list_empty(&trig->list),
+ "Getting non-registered iio trigger %s is prohibited\n",
+ trig->name);
+
__module_get(trig->owner);
return trig;
}
/**
- * iio_device_set_drvdata() - Set trigger driver data
+ * iio_trigger_set_drvdata() - Set trigger driver data
* @trig: IIO trigger structure
* @data: Driver specific data
*
@@ -124,16 +131,10 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
* iio_trigger_register() - register a trigger with the IIO core
* @trig_info: trigger to be registered
**/
-#define iio_trigger_register(trig_info) \
- __iio_trigger_register((trig_info), THIS_MODULE)
-int __iio_trigger_register(struct iio_trigger *trig_info,
- struct module *this_mod);
+int iio_trigger_register(struct iio_trigger *trig_info);
-#define devm_iio_trigger_register(dev, trig_info) \
- __devm_iio_trigger_register((dev), (trig_info), THIS_MODULE)
-int __devm_iio_trigger_register(struct device *dev,
- struct iio_trigger *trig_info,
- struct module *this_mod);
+int devm_iio_trigger_register(struct device *dev,
+ struct iio_trigger *trig_info);
/**
* iio_trigger_unregister() - unregister a trigger from the core
@@ -150,18 +151,18 @@ void iio_trigger_unregister(struct iio_trigger *trig_info);
**/
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
-/**
- * iio_trigger_poll() - called on a trigger occurring
- * @trig: trigger which occurred
- *
- * Typically called in relevant hardware interrupt handler.
- **/
void iio_trigger_poll(struct iio_trigger *trig);
-void iio_trigger_poll_chained(struct iio_trigger *trig);
+void iio_trigger_poll_nested(struct iio_trigger *trig);
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
-__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...);
+#define iio_trigger_alloc(parent, fmt, ...) \
+ __iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+
+__printf(3, 4)
+struct iio_trigger *__iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
void iio_trigger_free(struct iio_trigger *trig);
/**
@@ -170,6 +171,7 @@ void iio_trigger_free(struct iio_trigger *trig);
*/
bool iio_trigger_using_own(struct iio_dev *indio_dev);
+int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig);
int iio_trigger_validate_own_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h
index 3aa2f132dd67..2c05dfad88d7 100644
--- a/include/linux/iio/trigger_consumer.h
+++ b/include/linux/iio/trigger_consumer.h
@@ -38,7 +38,7 @@ struct iio_poll_func {
};
-struct iio_poll_func
+__printf(5, 6) struct iio_poll_func
*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
int type,
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index e99c91799359..29e1fe146879 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -2,21 +2,37 @@
#ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_
#define _LINUX_IIO_TRIGGERED_BUFFER_H_
+#include <linux/iio/buffer.h>
#include <linux/interrupt.h>
struct iio_dev;
+struct iio_dev_attr;
struct iio_buffer_setup_ops;
-int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
+int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *setup_ops);
+ enum iio_buffer_direction direction,
+ const struct iio_buffer_setup_ops *setup_ops,
+ const struct iio_dev_attr **buffer_attrs);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
-int devm_iio_triggered_buffer_setup(struct device *dev,
- struct iio_dev *indio_dev,
- irqreturn_t (*h)(int irq, void *p),
- irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *ops);
+#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \
+ iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, (setup_ops), \
+ NULL)
+
+int devm_iio_triggered_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs);
+
+#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \
+ devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, \
+ (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index e6fd3645963c..d89982c98368 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -16,6 +16,11 @@ enum iio_event_info {
IIO_EV_INFO_PERIOD,
IIO_EV_INFO_HIGH_PASS_FILTER_3DB,
IIO_EV_INFO_LOW_PASS_FILTER_3DB,
+ IIO_EV_INFO_TIMEOUT,
+ IIO_EV_INFO_RESET_TIMEOUT,
+ IIO_EV_INFO_TAP2_MIN_DELAY,
+ IIO_EV_INFO_RUNNING_PERIOD,
+ IIO_EV_INFO_RUNNING_COUNT,
};
#define IIO_VAL_INT 1
@@ -23,6 +28,7 @@ enum iio_event_info {
#define IIO_VAL_INT_PLUS_NANO 3
#define IIO_VAL_INT_PLUS_MICRO_DB 4
#define IIO_VAL_INT_MULTIPLE 5
+#define IIO_VAL_INT_64 6 /* 64-bit data, val is lower 32 bits */
#define IIO_VAL_FRACTIONAL 10
#define IIO_VAL_FRACTIONAL_LOG2 11
#define IIO_VAL_CHAR 12
@@ -50,6 +56,7 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_PHASE,
IIO_CHAN_INFO_HARDWAREGAIN,
IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_HYSTERESIS_RELATIVE,
IIO_CHAN_INFO_INT_TIME,
IIO_CHAN_INFO_ENABLE,
IIO_CHAN_INFO_CALIBHEIGHT,
@@ -59,6 +66,9 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_CALIBEMISSIVITY,
IIO_CHAN_INFO_OVERSAMPLING_RATIO,
IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
+ IIO_CHAN_INFO_CALIBAMBIENT,
+ IIO_CHAN_INFO_ZEROPOINT,
+ IIO_CHAN_INFO_TROUGH,
};
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index d15100de6cdd..0bae61a15b60 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -7,104 +7,81 @@
#ifndef _LINUX_IMA_H
#define _LINUX_IMA_H
+#include <linux/kernel_read_file.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/kexec.h>
+#include <crypto/hash_info.h>
struct linux_binprm;
#ifdef CONFIG_IMA
-extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask);
-extern void ima_post_create_tmpfile(struct inode *inode);
-extern void ima_file_free(struct file *file);
-extern int ima_file_mmap(struct file *file, unsigned long prot);
-extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
-extern int ima_load_data(enum kernel_load_data_id id);
-extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
-extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
- enum kernel_read_file_id id);
-extern void ima_post_path_mknod(struct dentry *dentry);
+extern enum hash_algo ima_get_current_hash_algo(void);
extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
+extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size);
extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size);
+extern int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest, size_t digest_len);
+
+#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
+extern void ima_appraise_parse_cmdline(void);
+#else
+static inline void ima_appraise_parse_cmdline(void) {}
+#endif
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
-#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
-extern bool arch_ima_get_secureboot(void);
-extern const char * const *arch_get_ima_policy(void);
#else
-static inline bool arch_ima_get_secureboot(void)
-{
- return false;
-}
-
-static inline const char * const *arch_get_ima_policy(void)
+static inline enum hash_algo ima_get_current_hash_algo(void)
{
- return NULL;
+ return HASH_ALGO__LAST;
}
-#endif
-#else
-static inline int ima_bprm_check(struct linux_binprm *bprm)
-{
- return 0;
-}
-
-static inline int ima_file_check(struct file *file, int mask)
-{
- return 0;
-}
-
-static inline void ima_post_create_tmpfile(struct inode *inode)
-{
-}
-
-static inline void ima_file_free(struct file *file)
+static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
{
- return;
+ return -EOPNOTSUPP;
}
-static inline int ima_file_mmap(struct file *file, unsigned long prot)
+static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size)
{
- return 0;
+ return -EOPNOTSUPP;
}
-static inline int ima_file_mprotect(struct vm_area_struct *vma,
- unsigned long prot)
-{
- return 0;
-}
+static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
-static inline int ima_load_data(enum kernel_load_data_id id)
+static inline int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest,
+ size_t digest_len)
{
- return 0;
+ return -ENOENT;
}
-static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
-{
- return 0;
-}
+#endif /* CONFIG_IMA */
-static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
- enum kernel_read_file_id id)
-{
- return 0;
-}
+#ifdef CONFIG_HAVE_IMA_KEXEC
+int __init ima_free_kexec_buffer(void);
+int __init ima_get_kexec_buffer(void **addr, size_t *size);
+#endif
-static inline void ima_post_path_mknod(struct dentry *dentry)
+#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
+extern bool arch_ima_get_secureboot(void);
+extern const char * const *arch_get_ima_policy(void);
+#else
+static inline bool arch_ima_get_secureboot(void)
{
- return;
+ return false;
}
-static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
+static inline const char * const *arch_get_ima_policy(void)
{
- return -EOPNOTSUPP;
+ return NULL;
}
-
-static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
-#endif /* CONFIG_IMA */
+#endif
#ifndef CONFIG_IMA_KEXEC
struct kimage;
@@ -113,50 +90,13 @@ static inline void ima_add_kexec_buffer(struct kimage *image)
{}
#endif
-#ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS
-extern void ima_post_key_create_or_update(struct key *keyring,
- struct key *key,
- const void *payload, size_t plen,
- unsigned long flags, bool create);
-#else
-static inline void ima_post_key_create_or_update(struct key *keyring,
- struct key *key,
- const void *payload,
- size_t plen,
- unsigned long flags,
- bool create) {}
-#endif /* CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS */
-
#ifdef CONFIG_IMA_APPRAISE
extern bool is_ima_appraise_enabled(void);
-extern void ima_inode_post_setattr(struct dentry *dentry);
-extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
- const void *xattr_value, size_t xattr_value_len);
-extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
#else
static inline bool is_ima_appraise_enabled(void)
{
return 0;
}
-
-static inline void ima_inode_post_setattr(struct dentry *dentry)
-{
- return;
-}
-
-static inline int ima_inode_setxattr(struct dentry *dentry,
- const char *xattr_name,
- const void *xattr_value,
- size_t xattr_value_len)
-{
- return 0;
-}
-
-static inline int ima_inode_removexattr(struct dentry *dentry,
- const char *xattr_name)
-{
- return 0;
-}
#endif /* CONFIG_IMA_APPRAISE */
#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING)
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index 54c02c84906a..35227d47cfc9 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
#define _LINUX_INDIRECT_CALL_WRAPPER_H
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_MITIGATION_RETPOLINE
/*
* INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
@@ -11,7 +11,7 @@
* @__VA_ARGS__: arguments for @f
*
* Avoid retpoline overhead for known builtin, checking @f vs each of them and
- * eventually invoking directly the builtin function. The functions are check
+ * eventually invoking directly the builtin function. The functions are checked
* in the given order. Fallback to the indirect call.
*/
#define INDIRECT_CALL_1(f, f1, ...) \
@@ -36,6 +36,7 @@
#define INDIRECT_CALLABLE_DECLARE(f) f
#define INDIRECT_CALLABLE_SCOPE
+#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f)
#else
#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
@@ -44,6 +45,7 @@
#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
#define INDIRECT_CALLABLE_DECLARE(f)
#define INDIRECT_CALLABLE_SCOPE static
+#define EXPORT_INDIRECT_CALLABLE(f)
#endif
/*
@@ -60,4 +62,10 @@
#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
#endif
+#if IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
+#endif
+
#endif
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 0ef2d800fda7..a9033696b0aa 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -8,6 +8,7 @@
struct inet_hashinfo;
struct inet_diag_handler {
+ struct module *owner;
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r);
@@ -75,6 +76,8 @@ static inline size_t inet_diag_msg_attrs_size(void)
#ifdef CONFIG_SOCK_CGROUP_DATA
+ nla_total_size_64bit(sizeof(u64)) /* INET_DIAG_CGROUP_ID */
#endif
+ + nla_total_size(sizeof(struct inet_diag_sockopt))
+ /* INET_DIAG_SOCKOPT */
;
}
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 3515ca64e638..cb5280e6cc21 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -24,6 +24,8 @@ struct ipv4_devconf {
struct in_device {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
refcount_t refcnt;
int dead;
struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */
@@ -41,7 +43,7 @@ struct in_device {
unsigned long mr_qri; /* Query Response Interval */
unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
- unsigned char mr_ifc_count;
+ u32 mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */
@@ -51,13 +53,15 @@ struct in_device {
};
#define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1])
+#define IPV4_DEVCONF_RO(cnf, attr) READ_ONCE(IPV4_DEVCONF(cnf, attr))
#define IPV4_DEVCONF_ALL(net, attr) \
IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr)
+#define IPV4_DEVCONF_ALL_RO(net, attr) READ_ONCE(IPV4_DEVCONF_ALL(net, attr))
-static inline int ipv4_devconf_get(struct in_device *in_dev, int index)
+static inline int ipv4_devconf_get(const struct in_device *in_dev, int index)
{
index--;
- return in_dev->cnf.data[index];
+ return READ_ONCE(in_dev->cnf.data[index]);
}
static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
@@ -65,7 +69,7 @@ static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
{
index--;
set_bit(index, in_dev->cnf.state);
- in_dev->cnf.data[index] = val;
+ WRITE_ONCE(in_dev->cnf.data[index], val);
}
static inline void ipv4_devconf_setall(struct in_device *in_dev)
@@ -79,18 +83,18 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val))
#define IN_DEV_ANDCONF(in_dev, attr) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr) && \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
- (IPV4_DEVCONF_ALL(net, attr) || \
+ (IPV4_DEVCONF_ALL_RO(net, attr) || \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_ORCONF(in_dev, attr) \
IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
#define IN_DEV_MAXCONF(in_dev, attr) \
- (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
+ (max(IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr), \
IN_DEV_CONF_GET((in_dev), attr)))
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
@@ -105,7 +109,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
-#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
+#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP_PVLAN)
#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
@@ -126,13 +130,15 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
- IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
+ IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
-#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
+#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ACCEPT)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+#define IN_DEV_ARP_EVICT_NOCARRIER(in_dev) IN_DEV_ANDCONF((in_dev), \
+ ARP_EVICT_NOCARRIER)
struct in_ifaddr {
struct hlist_node hash;
@@ -146,6 +152,7 @@ struct in_ifaddr {
__be32 ifa_broadcast;
unsigned char ifa_scope;
unsigned char ifa_prefixlen;
+ unsigned char ifa_proto;
__u32 ifa_flags;
char ifa_label[IFNAMSIZ];
@@ -178,6 +185,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
+#ifdef CONFIG_INET
+int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size);
+#else
+static inline int inet_gifconf(struct net_device *dev, char __user *buf,
+ int len, int size)
+{
+ return 0;
+}
+#endif
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
diff --git a/include/linux/init.h b/include/linux/init.h
index 212fc9e2f691..58cef4c2e59a 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -2,7 +2,9 @@
#ifndef _LINUX_INIT_H
#define _LINUX_INIT_H
+#include <linux/build_bug.h>
#include <linux/compiler.h>
+#include <linux/stringify.h>
#include <linux/types.h>
/* Built-in __init functions needn't be compiled with retpoline */
@@ -47,11 +49,11 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold __latent_entropy __noinitretpoline
-#define __initdata __section(.init.data)
-#define __initconst __section(.init.rodata)
-#define __exitdata __section(.exit.data)
-#define __exit_call __used __section(.exitcall.exit)
+#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline
+#define __initdata __section(".init.data")
+#define __initconst __section(".init.rodata")
+#define __exitdata __section(".exit.data")
+#define __exit_call __used __section(".exitcall.exit")
/*
* modpost check for section mismatches during the kernel build.
@@ -70,9 +72,9 @@
*
* The markers follow same syntax rules as __init / __initdata.
*/
-#define __ref __section(.ref.text) noinline
-#define __refdata __section(.ref.data)
-#define __refconst __section(.ref.rodata)
+#define __ref __section(".ref.text") noinline
+#define __refdata __section(".ref.data")
+#define __refconst __section(".ref.rodata")
#ifdef MODULE
#define __exitused
@@ -80,16 +82,13 @@
#define __exitused __used
#endif
-#define __exit __section(.exit.text) __exitused __cold notrace
+#define __exit __section(".exit.text") __exitused __cold notrace
/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(.meminit.text) __cold notrace \
+#define __meminit __section(".meminit.text") __cold notrace \
__latent_entropy
-#define __meminitdata __section(.meminit.data)
-#define __meminitconst __section(.meminit.rodata)
-#define __memexit __section(.memexit.text) __exitused __cold notrace
-#define __memexitdata __section(.memexit.data)
-#define __memexitconst __section(.memexit.rodata)
+#define __meminitdata __section(".meminit.data")
+#define __meminitconst __section(".meminit.rodata")
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
@@ -134,7 +133,7 @@ static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
-/* Used for contructor calls. */
+/* Used for constructor calls. */
typedef void (*ctor_fn_t)(void);
struct file_system_type;
@@ -143,25 +142,46 @@ struct file_system_type;
extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
extern char *saved_command_line;
+extern unsigned int saved_command_line_len;
extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
void __init init_rootfs(void);
+
+void init_IRQ(void);
+void time_init(void);
+void poking_init(void);
+void pgtable_cache_init(void);
+
+extern initcall_entry_t __initcall_start[];
+extern initcall_entry_t __initcall0_start[];
+extern initcall_entry_t __initcall1_start[];
+extern initcall_entry_t __initcall2_start[];
+extern initcall_entry_t __initcall3_start[];
+extern initcall_entry_t __initcall4_start[];
+extern initcall_entry_t __initcall5_start[];
+extern initcall_entry_t __initcall6_start[];
+extern initcall_entry_t __initcall7_start[];
+extern initcall_entry_t __initcall_end[];
+
extern struct file_system_type rootfs_fs_type;
-#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
extern bool rodata_enabled;
-#endif
-#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void);
-#endif
extern void (*late_time_init)(void);
extern bool initcall_debug;
+#ifdef MODULE
+extern struct module __this_module;
+#define THIS_MODULE (&__this_module)
+#else
+#define THIS_MODULE ((struct module *)0)
+#endif
+
#endif
#ifndef MODULE
@@ -184,19 +204,81 @@ extern bool initcall_debug;
* as KEEP() in the linker script.
*/
+/* Format: <modname>__<counter>_<line>_<fn> */
+#define __initcall_id(fn) \
+ __PASTE(__KBUILD_MODNAME, \
+ __PASTE(__, \
+ __PASTE(__COUNTER__, \
+ __PASTE(_, \
+ __PASTE(__LINE__, \
+ __PASTE(_, fn))))))
+
+/* Format: __<prefix>__<iid><id> */
+#define __initcall_name(prefix, __iid, id) \
+ __PASTE(__, \
+ __PASTE(prefix, \
+ __PASTE(__, \
+ __PASTE(__iid, id))))
+
+#ifdef CONFIG_LTO_CLANG
+/*
+ * With LTO, the compiler doesn't necessarily obey link order for
+ * initcalls. In order to preserve the correct order, we add each
+ * variable into its own section and generate a linker script (in
+ * scripts/link-vmlinux.sh) to specify the order of the sections.
+ */
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init.." #__iid
+
+/*
+ * With LTO, the compiler can rename static functions to avoid
+ * global naming collisions. We use a global stub function for
+ * initcalls to create a stable symbol name whose address can be
+ * taken in inline assembly when PREL32 relocations are used.
+ */
+#define __initcall_stub(fn, __iid, id) \
+ __initcall_name(initstub, __iid, id)
+
+#define __define_initcall_stub(__stub, fn) \
+ int __init __stub(void); \
+ int __init __stub(void) \
+ { \
+ return fn(); \
+ } \
+ __ADDRESSABLE(__stub)
+#else
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init"
+
+#define __initcall_stub(fn, __iid, id) fn
+
+#define __define_initcall_stub(__stub, fn) \
+ __ADDRESSABLE(fn)
+#endif
+
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define ___define_initcall(fn, id, __sec) \
- __ADDRESSABLE(fn) \
- asm(".section \"" #__sec ".init\", \"a\" \n" \
- "__initcall_" #fn #id ": \n" \
- ".long " #fn " - . \n" \
- ".previous \n");
+#define ____define_initcall(fn, __stub, __name, __sec) \
+ __define_initcall_stub(__stub, fn) \
+ asm(".section \"" __sec "\", \"a\" \n" \
+ __stringify(__name) ": \n" \
+ ".long " __stringify(__stub) " - . \n" \
+ ".previous \n"); \
+ static_assert(__same_type(initcall_t, &fn));
#else
-#define ___define_initcall(fn, id, __sec) \
- static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(#__sec ".init"))) = fn;
+#define ____define_initcall(fn, __unused, __name, __sec) \
+ static initcall_t __name __used \
+ __attribute__((__section__(__sec))) = fn;
#endif
+#define __unique_initcall(fn, id, __sec, __iid) \
+ ____define_initcall(fn, \
+ __initcall_stub(fn, __iid, id), \
+ __initcall_name(initcall, __iid, id), \
+ __initcall_section(__sec, __iid))
+
+#define ___define_initcall(fn, id, __sec) \
+ __unique_initcall(fn, id, __sec, __initcall_id(fn))
+
#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id)
/*
@@ -236,7 +318,7 @@ extern bool initcall_debug;
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
-#define console_initcall(fn) ___define_initcall(fn,, .con_initcall)
+#define console_initcall(fn) ___define_initcall(fn, con, .con_initcall)
struct obs_kernel_param {
const char *str;
@@ -244,6 +326,8 @@ struct obs_kernel_param {
int early;
};
+extern const struct obs_kernel_param __setup_start[], __setup_end[];
+
/*
* Only for really core code. See moduleparam.h for the normal way.
*
@@ -254,16 +338,23 @@ struct obs_kernel_param {
static const char __setup_str_##unique_id[] __initconst \
__aligned(1) = str; \
static struct obs_kernel_param __setup_##unique_id \
- __used __section(.init.setup) \
- __attribute__((aligned((sizeof(long))))) \
+ __used __section(".init.setup") \
+ __aligned(__alignof__(struct obs_kernel_param)) \
= { __setup_str_##unique_id, fn, early }
+/*
+ * NOTE: __setup functions return values:
+ * @fn returns 1 (or non-zero) if the option argument is "handled"
+ * and returns 0 if the option argument is "not handled".
+ */
#define __setup(str, fn) \
__setup_param(str, fn, fn, 0)
/*
- * NOTE: fn is as per module_param, not __setup!
- * Emits warning if fn returns non-zero.
+ * NOTE: @fn is as per module_param, not __setup!
+ * I.e., @fn returns 0 for no error or non-zero for error
+ * (possibly @fn returns a -errno value, but it does not matter).
+ * Emits warning if @fn returns non-zero.
*/
#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
@@ -277,14 +368,14 @@ struct obs_kernel_param {
var = 1; \
return 0; \
} \
- __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \
+ early_param(str_on, parse_##var##_on); \
\
static int __init parse_##var##_off(char *arg) \
{ \
var = 0; \
return 0; \
} \
- __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1)
+ early_param(str_off, parse_##var##_off)
/* Relies on boot_command_line being set */
void __init parse_early_param(void);
@@ -298,7 +389,7 @@ void __init parse_early_options(char *cmdline);
#endif
/* Data marked not to be saved by software suspend */
-#define __nosavedata __section(.data..nosave)
+#define __nosavedata __section(".data..nosave")
#ifdef MODULE
#define __exit_p(x) x
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2c620d7ac432..bccb3f1f6262 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,7 +25,6 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
extern struct nsproxy init_nsproxy;
-extern struct group_info init_groups;
extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
@@ -38,14 +37,7 @@ extern struct cred init_cred;
#define INIT_TASK_COMM "swapper"
-/* Attach to the init_task data structure for proper alignment */
-#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
-#define __init_task_data __attribute__((__section__(".data..init_task")))
-#else
-#define __init_task_data /**/
-#endif
-
/* Attach to the thread_info data structure for proper alignment */
-#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
+#define __init_thread_info __section(".data..init_thread_info")
#endif
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index 8db6f8c8030b..f1a1f4c92ded 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_INITRD_H
+#define __LINUX_INITRD_H
+
#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
/* starting block # of image */
@@ -15,12 +18,20 @@ extern int initrd_below_start_ok;
extern unsigned long initrd_start, initrd_end;
extern void free_initrd_mem(unsigned long, unsigned long);
+#ifdef CONFIG_BLK_DEV_INITRD
+extern void __init reserve_initrd_mem(void);
+extern void wait_for_initramfs(void);
+#else
+static inline void __init reserve_initrd_mem(void) {}
+static inline void wait_for_initramfs(void) {}
+#endif
+
extern phys_addr_t phys_initrd_start;
extern unsigned long phys_initrd_size;
-extern unsigned int real_root_dev;
-
extern char __initramfs_start[];
extern unsigned long __initramfs_size;
void console_on_rootfs(void);
+
+#endif /* __LINUX_INITRD_H */
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 6a24905f6e1e..8d20caa1b268 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -7,11 +7,8 @@
#ifndef _LINUX_INOTIFY_H
#define _LINUX_INOTIFY_H
-#include <linux/sysctl.h>
#include <uapi/linux/inotify.h>
-extern struct ctl_table inotify_table[]; /* for sysctl */
-
#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
IN_MOVED_TO | IN_CREATE | IN_DELETE | \
diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h
deleted file mode 100644
index 14821fd231c0..000000000000
--- a/include/linux/input-polldev.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _INPUT_POLLDEV_H
-#define _INPUT_POLLDEV_H
-
-/*
- * Copyright (c) 2007 Dmitry Torokhov
- */
-
-#include <linux/input.h>
-#include <linux/workqueue.h>
-
-/**
- * struct input_polled_dev - simple polled input device
- * @private: private driver data.
- * @open: driver-supplied method that prepares device for polling
- * (enabled the device and maybe flushes device state).
- * @close: driver-supplied method that is called when device is no
- * longer being polled. Used to put device into low power mode.
- * @poll: driver-supplied method that polls the device and posts
- * input events (mandatory).
- * @poll_interval: specifies how often the poll() method should be called.
- * Defaults to 500 msec unless overridden when registering the device.
- * @poll_interval_max: specifies upper bound for the poll interval.
- * Defaults to the initial value of @poll_interval.
- * @poll_interval_min: specifies lower bound for the poll interval.
- * Defaults to 0.
- * @input: input device structure associated with the polled device.
- * Must be properly initialized by the driver (id, name, phys, bits).
- *
- * Polled input device provides a skeleton for supporting simple input
- * devices that do not raise interrupts but have to be periodically
- * scanned or polled to detect changes in their state.
- */
-struct input_polled_dev {
- void *private;
-
- void (*open)(struct input_polled_dev *dev);
- void (*close)(struct input_polled_dev *dev);
- void (*poll)(struct input_polled_dev *dev);
- unsigned int poll_interval; /* msec */
- unsigned int poll_interval_max; /* msec */
- unsigned int poll_interval_min; /* msec */
-
- struct input_dev *input;
-
-/* private: */
- struct delayed_work work;
-
- bool devres_managed;
-};
-
-struct input_polled_dev *input_allocate_polled_device(void);
-struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev);
-void input_free_polled_device(struct input_polled_dev *dev);
-int input_register_polled_device(struct input_polled_dev *dev);
-void input_unregister_polled_device(struct input_polled_dev *dev);
-
-#endif
diff --git a/include/linux/input.h b/include/linux/input.h
index 56f2fd32e609..c22ac465254b 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -90,9 +90,11 @@ enum input_clock_type {
* @open: this method is called when the very first user calls
* input_open_device(). The driver must prepare the device
* to start generating events (start polling thread,
- * request an IRQ, submit URB, etc.)
+ * request an IRQ, submit URB, etc.). The meaning of open() is
+ * to start providing events to the input core.
* @close: this method is called when the very last user calls
- * input_close_device().
+ * input_close_device(). The meaning of close() is to stop
+ * providing events to the input core.
* @flush: purges the device. Most commonly used to get rid of force
* feedback effects loaded into the device when disconnecting
* from it
@@ -127,6 +129,10 @@ enum input_clock_type {
* and needs not be explicitly unregistered or freed.
* @timestamp: storage for a timestamp set by input_set_timestamp called
* by a driver
+ * @inhibited: indicates that the input device is inhibited. If that is
+ * the case then input core ignores any events generated by the device.
+ * Device's close() is called when it is being inhibited and its open()
+ * is called when it is being uninhibited.
*/
struct input_dev {
const char *name;
@@ -201,6 +207,8 @@ struct input_dev {
bool devres_managed;
ktime_t timestamp[INPUT_CLK_MAX];
+
+ bool inhibited;
};
#define to_input_dev(d) container_of(d, struct input_dev, dev)
@@ -467,6 +475,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even
void input_alloc_absinfo(struct input_dev *dev);
void input_set_abs_params(struct input_dev *dev, unsigned int axis,
int min, int max, int fuzz, int flat);
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+ const struct input_dev *src, unsigned int src_axis);
#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \
static inline int input_abs_get_##_suffix(struct input_dev *dev, \
@@ -502,7 +512,9 @@ bool input_match_device_id(const struct input_dev *dev,
void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
-extern struct class input_class;
+bool input_device_enabled(struct input_dev *dev);
+
+extern const struct class input_class;
/**
* struct ff_device - force-feedback part of an input device
@@ -550,7 +562,7 @@ struct ff_device {
int max_effects;
struct ff_effect *effects;
- struct file *effect_owners[];
+ struct file *effect_owners[] __counted_by(max_effects);
};
int input_ff_create(struct input_dev *dev, unsigned int max_effects);
diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h
index c0523af96893..0e4742c8c81e 100644
--- a/include/linux/input/adp5589.h
+++ b/include/linux/input/adp5589.h
@@ -175,13 +175,6 @@ struct i2c_client; /* forward declaration */
struct adp5589_gpio_platform_data {
int gpio_start; /* GPIO Chip base # */
- int (*setup)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- void *context;
};
#endif
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
index 5fba52a56cd6..5705d5de3aea 100644
--- a/include/linux/input/as5011.h
+++ b/include/linux/input/as5011.h
@@ -7,7 +7,6 @@
*/
struct as5011_platform_data {
- unsigned int button_gpio;
unsigned int axis_irq; /* irq number */
unsigned long axis_irqflags;
char xp, xn; /* threshold for x axis */
diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h
deleted file mode 100644
index ed0776997a7a..000000000000
--- a/include/linux/input/auo-pixcir-ts.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Driver for AUO in-cell touchscreens
- *
- * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
- *
- * based on auo_touch.h from Dell Streak kernel
- *
- * Copyright (c) 2008 QUALCOMM Incorporated.
- * Copyright (c) 2008 QUALCOMM USA, INC.
- */
-
-#ifndef __AUO_PIXCIR_TS_H__
-#define __AUO_PIXCIR_TS_H__
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * compare coordinates: interrupt is asserted when coordinates change
- * indicate touch: interrupt is asserted during touch
- */
-#define AUO_PIXCIR_INT_PERIODICAL 0x00
-#define AUO_PIXCIR_INT_COMP_COORD 0x01
-#define AUO_PIXCIR_INT_TOUCH_IND 0x02
-
-/*
- * @gpio_int interrupt gpio
- * @int_setting one of AUO_PIXCIR_INT_*
- * @init_hw hardwarespecific init
- * @exit_hw hardwarespecific shutdown
- * @x_max x-resolution
- * @y_max y-resolution
- */
-struct auo_pixcir_ts_platdata {
- int gpio_int;
- int gpio_rst;
-
- int int_setting;
-
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif
diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h
deleted file mode 100644
index 77582ae1745a..000000000000
--- a/include/linux/input/cy8ctmg110_pdata.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_CY8CTMG110_PDATA_H
-#define _LINUX_CY8CTMG110_PDATA_H
-
-struct cy8ctmg110_pdata
-{
- int reset_pin; /* Reset pin is wired to this GPIO (optional) */
- int irq_pin; /* IRQ pin is wired to this GPIO */
-};
-
-#endif
diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h
deleted file mode 100644
index 118b9af6e01a..000000000000
--- a/include/linux/input/cyttsp.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- */
-#ifndef _CYTTSP_H_
-#define _CYTTSP_H_
-
-#define CY_SPI_NAME "cyttsp-spi"
-#define CY_I2C_NAME "cyttsp-i2c"
-/* Active Power state scanning/processing refresh interval */
-#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
-/* touch timeout for the Active power */
-#define CY_TCH_TMOUT_DFLT 0xFF /* ms */
-/* Low Power state scanning/processing refresh interval */
-#define CY_LP_INTRVL_DFLT 0x0A /* ms */
-/* Active distance in pixels for a gesture to be reported */
-#define CY_ACT_DIST_DFLT 0xF8 /* pixels */
-
-#endif /* _CYTTSP_H_ */
diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h
index 520858d12680..51cca17ee94c 100644
--- a/include/linux/input/elan-i2c-ids.h
+++ b/include/linux/input/elan-i2c-ids.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Elan I2C/SMBus Touchpad device whitelist
*
@@ -11,10 +12,6 @@
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
* copyright (c) 2011-2012 Google, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Trademarks are the property of their respective owners.
*/
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 9476768c3b90..b8d8d69eba29 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -3,8 +3,9 @@
#define _MATRIX_KEYPAD_H
#include <linux/types.h>
-#include <linux/input.h>
-#include <linux/of.h>
+
+struct device;
+struct input_dev;
#define MATRIX_MAX_ROWS 32
#define MATRIX_MAX_COLS 32
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 3b8580bd33c1..2cf89a538b18 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -47,7 +47,7 @@ struct input_mt {
unsigned int flags;
unsigned int frame;
int *red;
- struct input_mt_slot slots[];
+ struct input_mt_slot slots[] __counted_by(num_slots);
};
static inline void input_mt_set_value(struct input_mt_slot *slot,
diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h
deleted file mode 100644
index d464ffb4db52..000000000000
--- a/include/linux/input/navpoint.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
- */
-
-struct navpoint_platform_data {
- int port; /* PXA SSP port for pxa_ssp_request() */
- int gpio; /* GPIO for power on/off */
-};
diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h
index d25d1452dc6e..d0dddc14ebc8 100644
--- a/include/linux/input/sparse-keymap.h
+++ b/include/linux/input/sparse-keymap.h
@@ -20,6 +20,7 @@
* private definitions.
* @code: Device-specific data identifying the button/switch
* @keycode: KEY_* code assigned to a key/button
+ * @sw: struct with code/value used by KE_SW and KE_VSW
* @sw.code: SW_* code assigned to a switch
* @sw.value: Value that should be sent in an input even when KE_SW
* switch is toggled. KE_VSW switches ignore this field and
diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h
new file mode 100644
index 000000000000..7e4b7023bf04
--- /dev/null
+++ b/include/linux/input/vivaldi-fmap.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VIVALDI_FMAP_H
+#define _VIVALDI_FMAP_H
+
+#include <linux/types.h>
+
+#define VIVALDI_MAX_FUNCTION_ROW_KEYS 24
+
+/**
+ * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards
+ * @function_row_physmap: An array of scancodes or their equivalent (HID usage
+ * codes, encoded rows/columns, etc) for the top
+ * row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
+ *
+ * This structure is supposed to be used by ChromeOS keyboards using
+ * the Vivaldi keyboard function row design.
+ */
+struct vivaldi_data {
+ u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS];
+ unsigned int num_function_row_keys;
+};
+
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+ char *buf);
+
+#endif /* _VIVALDI_FMAP_H */
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
new file mode 100644
index 000000000000..aa0b3ffea935
--- /dev/null
+++ b/include/linux/instruction_pointer.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INSTRUCTION_POINTER_H
+#define _LINUX_INSTRUCTION_POINTER_H
+
+#include <asm/linkage.h>
+
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
+#ifndef _THIS_IP_
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
+
+#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
index 93e2ad67fc10..bc7babe91b2e 100644
--- a/include/linux/instrumentation.h
+++ b/include/linux/instrumentation.h
@@ -2,15 +2,18 @@
#ifndef __LINUX_INSTRUMENTATION_H
#define __LINUX_INSTRUMENTATION_H
-#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION)
+#ifdef CONFIG_NOINSTR_VALIDATION
+
+#include <linux/stringify.h>
/* Begin/end of an instrumentation safe region */
-#define instrumentation_begin() ({ \
- asm volatile("%c0: nop\n\t" \
+#define __instrumentation_begin(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_begin\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t" : : "i" (c)); \
})
+#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
/*
* Because instrumentation_{begin,end}() can nest, objtool validation considers
@@ -43,15 +46,16 @@
* To avoid this, have _end() be a NOP instruction, this ensures it will be
* part of the condition block and does not escape.
*/
-#define instrumentation_end() ({ \
- asm volatile("%c0: nop\n\t" \
+#define __instrumentation_end(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_end\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t" : : "i" (c)); \
})
-#else
+#define instrumentation_end() __instrumentation_end(__COUNTER__)
+#else /* !CONFIG_NOINSTR_VALIDATION */
# define instrumentation_begin() do { } while(0)
# define instrumentation_end() do { } while(0)
-#endif
+#endif /* CONFIG_NOINSTR_VALIDATION */
#endif /* __LINUX_INSTRUMENTATION_H */
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
index 43e6ea591975..1b608e00290a 100644
--- a/include/linux/instrumented.h
+++ b/include/linux/instrumented.h
@@ -2,7 +2,7 @@
/*
* This header provides generic wrappers for memory access instrumentation that
- * the compiler cannot emit for: KASAN, KCSAN.
+ * the compiler cannot emit for: KASAN, KCSAN, KMSAN.
*/
#ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H
@@ -10,16 +10,16 @@
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
+#include <linux/kmsan-checks.h>
#include <linux/types.h>
/**
* instrument_read - instrument regular read access
+ * @v: address of access
+ * @size: size of access
*
* Instrument a regular read access. The instrumentation should be inserted
* before the actual read happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_read(const volatile void *v, size_t size)
{
@@ -29,12 +29,11 @@ static __always_inline void instrument_read(const volatile void *v, size_t size)
/**
* instrument_write - instrument regular write access
+ * @v: address of access
+ * @size: size of access
*
* Instrument a regular write access. The instrumentation should be inserted
* before the actual write happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_write(const volatile void *v, size_t size)
{
@@ -43,13 +42,26 @@ static __always_inline void instrument_write(const volatile void *v, size_t size
}
/**
+ * instrument_read_write - instrument regular read-write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ */
+static __always_inline void instrument_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_read_write(v, size);
+}
+
+/**
* instrument_atomic_read - instrument atomic read access
+ * @v: address of access
+ * @size: size of access
*
* Instrument an atomic read access. The instrumentation should be inserted
* before the actual read happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
{
@@ -59,12 +71,11 @@ static __always_inline void instrument_atomic_read(const volatile void *v, size_
/**
* instrument_atomic_write - instrument atomic write access
+ * @v: address of access
+ * @size: size of access
*
* Instrument an atomic write access. The instrumentation should be inserted
* before the actual write happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
{
@@ -73,37 +84,98 @@ static __always_inline void instrument_atomic_write(const volatile void *v, size
}
/**
+ * instrument_atomic_read_write - instrument atomic read-write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument an atomic read-write access. The instrumentation should be
+ * inserted before the actual write happens.
+ */
+static __always_inline void instrument_atomic_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_read_write(v, size);
+}
+
+/**
* instrument_copy_to_user - instrument reads of copy_to_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
*
* Instrument reads from kernel memory, that are due to copy_to_user (and
* variants). The instrumentation must be inserted before the accesses.
- *
- * @to destination address
- * @from source address
- * @n number of bytes to copy
*/
static __always_inline void
instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
kcsan_check_read(from, n);
+ kmsan_copy_to_user(to, from, n, 0);
}
/**
- * instrument_copy_from_user - instrument writes of copy_from_user
+ * instrument_copy_from_user_before - add instrumentation before copy_from_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
*
* Instrument writes to kernel memory, that are due to copy_from_user (and
* variants). The instrumentation should be inserted before the accesses.
- *
- * @to destination address
- * @from source address
- * @n number of bytes to copy
*/
static __always_inline void
-instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+instrument_copy_from_user_before(const void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
kcsan_check_write(to, n);
}
+/**
+ * instrument_copy_from_user_after - add instrumentation after copy_from_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ * @left: number of bytes not copied (as returned by copy_from_user)
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted after the accesses.
+ */
+static __always_inline void
+instrument_copy_from_user_after(const void *to, const void __user *from,
+ unsigned long n, unsigned long left)
+{
+ kmsan_unpoison_memory(to, n - left);
+}
+
+/**
+ * instrument_get_user() - add instrumentation to get_user()-like macros
+ * @to: destination variable, may not be address-taken
+ *
+ * get_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ */
+#define instrument_get_user(to) \
+({ \
+ u64 __tmp = (u64)(to); \
+ kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \
+ to = __tmp; \
+})
+
+
+/**
+ * instrument_put_user() - add instrumentation to put_user()-like macros
+ * @from: source address
+ * @ptr: userspace pointer to copy to
+ * @size: number of bytes to copy
+ *
+ * put_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ */
+#define instrument_put_user(from, ptr, size) \
+({ \
+ kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \
+})
+
#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/media/dvb_math.h b/include/linux/int_log.h
index 8690ec42954d..0a6f58c38b61 100644
--- a/include/media/dvb_math.h
+++ b/include/linux/int_log.h
@@ -1,22 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * dvb-math provides some complex fixed-point math
- * operations shared between the dvb related stuff
+ * Provides fixed-point logarithm operations.
*
* Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
- *
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
*/
-#ifndef __DVB_MATH_H
-#define __DVB_MATH_H
+#ifndef __LINUX_INT_LOG_H
+#define __LINUX_INT_LOG_H
#include <linux/types.h>
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 2271939c5c31..459b79683783 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -13,45 +13,19 @@ enum integrity_status {
INTEGRITY_PASS = 0,
INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
+ INTEGRITY_FAIL_IMMUTABLE,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
INTEGRITY_UNKNOWN,
};
-/* List of EVM protected security xattrs */
#ifdef CONFIG_INTEGRITY
-extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode);
-extern void integrity_inode_free(struct inode *inode);
extern void __init integrity_load_keys(void);
#else
-static inline struct integrity_iint_cache *
- integrity_inode_get(struct inode *inode)
-{
- return NULL;
-}
-
-static inline void integrity_inode_free(struct inode *inode)
-{
- return;
-}
-
static inline void integrity_load_keys(void)
{
}
#endif /* CONFIG_INTEGRITY */
-#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
-
-extern int integrity_kernel_module_request(char *kmod_name);
-
-#else
-
-static inline int integrity_kernel_module_request(char *kmod_name)
-{
- return 0;
-}
-
-#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
-
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
deleted file mode 100644
index b1ed2f25f7c0..000000000000
--- a/include/linux/intel-iommu.h
+++ /dev/null
@@ -1,808 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2006-2015, Intel Corporation.
- *
- * Authors: Ashok Raj <ashok.raj@intel.com>
- * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- * David Woodhouse <David.Woodhouse@intel.com>
- */
-
-#ifndef _INTEL_IOMMU_H_
-#define _INTEL_IOMMU_H_
-
-#include <linux/types.h>
-#include <linux/iova.h>
-#include <linux/io.h>
-#include <linux/idr.h>
-#include <linux/mmu_notifier.h>
-#include <linux/list.h>
-#include <linux/iommu.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/dmar.h>
-#include <linux/ioasid.h>
-
-#include <asm/cacheflush.h>
-#include <asm/iommu.h>
-
-/*
- * VT-d hardware uses 4KiB page size regardless of host page size.
- */
-#define VTD_PAGE_SHIFT (12)
-#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
-#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
-#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
-
-#define VTD_STRIDE_SHIFT (9)
-#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
-
-#define DMA_PTE_READ BIT_ULL(0)
-#define DMA_PTE_WRITE BIT_ULL(1)
-#define DMA_PTE_LARGE_PAGE BIT_ULL(7)
-#define DMA_PTE_SNP BIT_ULL(11)
-
-#define DMA_FL_PTE_PRESENT BIT_ULL(0)
-#define DMA_FL_PTE_US BIT_ULL(2)
-#define DMA_FL_PTE_XD BIT_ULL(63)
-
-#define ADDR_WIDTH_5LEVEL (57)
-#define ADDR_WIDTH_4LEVEL (48)
-
-#define CONTEXT_TT_MULTI_LEVEL 0
-#define CONTEXT_TT_DEV_IOTLB 1
-#define CONTEXT_TT_PASS_THROUGH 2
-#define CONTEXT_PASIDE BIT_ULL(3)
-
-/*
- * Intel IOMMU register specification per version 1.0 public spec.
- */
-#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
-#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
-#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
-#define DMAR_GCMD_REG 0x18 /* Global command register */
-#define DMAR_GSTS_REG 0x1c /* Global status register */
-#define DMAR_RTADDR_REG 0x20 /* Root entry table */
-#define DMAR_CCMD_REG 0x28 /* Context command reg */
-#define DMAR_FSTS_REG 0x34 /* Fault Status register */
-#define DMAR_FECTL_REG 0x38 /* Fault control register */
-#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
-#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
-#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
-#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
-#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
-#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
-#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
-#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
-#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
-#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
-#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
-#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
-#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
-#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
-#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
-#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
-#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
-#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
-#define DMAR_PRS_REG 0xdc /* Page request status register */
-#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
-#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
-#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
-#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
-#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
-#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
-#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
-#define DMAR_MTRR_FIX16K_80000_REG 0x128
-#define DMAR_MTRR_FIX16K_A0000_REG 0x130
-#define DMAR_MTRR_FIX4K_C0000_REG 0x138
-#define DMAR_MTRR_FIX4K_C8000_REG 0x140
-#define DMAR_MTRR_FIX4K_D0000_REG 0x148
-#define DMAR_MTRR_FIX4K_D8000_REG 0x150
-#define DMAR_MTRR_FIX4K_E0000_REG 0x158
-#define DMAR_MTRR_FIX4K_E8000_REG 0x160
-#define DMAR_MTRR_FIX4K_F0000_REG 0x168
-#define DMAR_MTRR_FIX4K_F8000_REG 0x170
-#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
-#define DMAR_MTRR_PHYSMASK0_REG 0x188
-#define DMAR_MTRR_PHYSBASE1_REG 0x190
-#define DMAR_MTRR_PHYSMASK1_REG 0x198
-#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
-#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
-#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
-#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
-#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
-#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
-#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
-#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
-#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
-#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
-#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
-#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
-#define DMAR_MTRR_PHYSBASE8_REG 0x200
-#define DMAR_MTRR_PHYSMASK8_REG 0x208
-#define DMAR_MTRR_PHYSBASE9_REG 0x210
-#define DMAR_MTRR_PHYSMASK9_REG 0x218
-#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
-#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
-#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
-
-#define OFFSET_STRIDE (9)
-
-#define dmar_readq(a) readq(a)
-#define dmar_writeq(a,v) writeq(v,a)
-#define dmar_readl(a) readl(a)
-#define dmar_writel(a, v) writel(v, a)
-
-#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
-#define DMAR_VER_MINOR(v) ((v) & 0x0f)
-
-/*
- * Decoding Capability Register
- */
-#define cap_5lp_support(c) (((c) >> 60) & 1)
-#define cap_pi_support(c) (((c) >> 59) & 1)
-#define cap_fl1gp_support(c) (((c) >> 56) & 1)
-#define cap_read_drain(c) (((c) >> 55) & 1)
-#define cap_write_drain(c) (((c) >> 54) & 1)
-#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
-#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
-#define cap_pgsel_inv(c) (((c) >> 39) & 1)
-
-#define cap_super_page_val(c) (((c) >> 34) & 0xf)
-#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
- * OFFSET_STRIDE) + 21)
-
-#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
-#define cap_max_fault_reg_offset(c) \
- (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
-
-#define cap_zlr(c) (((c) >> 22) & 1)
-#define cap_isoch(c) (((c) >> 23) & 1)
-#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
-#define cap_sagaw(c) (((c) >> 8) & 0x1f)
-#define cap_caching_mode(c) (((c) >> 7) & 1)
-#define cap_phmr(c) (((c) >> 6) & 1)
-#define cap_plmr(c) (((c) >> 5) & 1)
-#define cap_rwbf(c) (((c) >> 4) & 1)
-#define cap_afl(c) (((c) >> 3) & 1)
-#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
-/*
- * Extended Capability Register
- */
-
-#define ecap_smpwc(e) (((e) >> 48) & 0x1)
-#define ecap_flts(e) (((e) >> 47) & 0x1)
-#define ecap_slts(e) (((e) >> 46) & 0x1)
-#define ecap_vcs(e) (((e) >> 44) & 0x1)
-#define ecap_smts(e) (((e) >> 43) & 0x1)
-#define ecap_dit(e) ((e >> 41) & 0x1)
-#define ecap_pasid(e) ((e >> 40) & 0x1)
-#define ecap_pss(e) ((e >> 35) & 0x1f)
-#define ecap_eafs(e) ((e >> 34) & 0x1)
-#define ecap_nwfs(e) ((e >> 33) & 0x1)
-#define ecap_srs(e) ((e >> 31) & 0x1)
-#define ecap_ers(e) ((e >> 30) & 0x1)
-#define ecap_prs(e) ((e >> 29) & 0x1)
-#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
-#define ecap_dis(e) ((e >> 27) & 0x1)
-#define ecap_nest(e) ((e >> 26) & 0x1)
-#define ecap_mts(e) ((e >> 25) & 0x1)
-#define ecap_ecs(e) ((e >> 24) & 0x1)
-#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
-#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
-#define ecap_coherent(e) ((e) & 0x1)
-#define ecap_qis(e) ((e) & 0x2)
-#define ecap_pass_through(e) ((e >> 6) & 0x1)
-#define ecap_eim_support(e) ((e >> 4) & 0x1)
-#define ecap_ir_support(e) ((e >> 3) & 0x1)
-#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
-#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
-#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
-
-/* Virtual command interface capability */
-#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
-
-/* IOTLB_REG */
-#define DMA_TLB_FLUSH_GRANU_OFFSET 60
-#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
-#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
-#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
-#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
-#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
-#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
-#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
-#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
-#define DMA_TLB_IVT (((u64)1) << 63)
-#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_TLB_MAX_SIZE (0x3f)
-
-/* INVALID_DESC */
-#define DMA_CCMD_INVL_GRANU_OFFSET 61
-#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
-#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
-#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
-#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
-#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
-#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
-#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_ID_TLB_ADDR(addr) (addr)
-#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
-
-/* PMEN_REG */
-#define DMA_PMEN_EPM (((u32)1)<<31)
-#define DMA_PMEN_PRS (((u32)1)<<0)
-
-/* GCMD_REG */
-#define DMA_GCMD_TE (((u32)1) << 31)
-#define DMA_GCMD_SRTP (((u32)1) << 30)
-#define DMA_GCMD_SFL (((u32)1) << 29)
-#define DMA_GCMD_EAFL (((u32)1) << 28)
-#define DMA_GCMD_WBF (((u32)1) << 27)
-#define DMA_GCMD_QIE (((u32)1) << 26)
-#define DMA_GCMD_SIRTP (((u32)1) << 24)
-#define DMA_GCMD_IRE (((u32) 1) << 25)
-#define DMA_GCMD_CFI (((u32) 1) << 23)
-
-/* GSTS_REG */
-#define DMA_GSTS_TES (((u32)1) << 31)
-#define DMA_GSTS_RTPS (((u32)1) << 30)
-#define DMA_GSTS_FLS (((u32)1) << 29)
-#define DMA_GSTS_AFLS (((u32)1) << 28)
-#define DMA_GSTS_WBFS (((u32)1) << 27)
-#define DMA_GSTS_QIES (((u32)1) << 26)
-#define DMA_GSTS_IRTPS (((u32)1) << 24)
-#define DMA_GSTS_IRES (((u32)1) << 25)
-#define DMA_GSTS_CFIS (((u32)1) << 23)
-
-/* DMA_RTADDR_REG */
-#define DMA_RTADDR_RTT (((u64)1) << 11)
-#define DMA_RTADDR_SMT (((u64)1) << 10)
-
-/* CCMD_REG */
-#define DMA_CCMD_ICC (((u64)1) << 63)
-#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
-#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
-#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
-#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
-#define DMA_CCMD_MASK_NOBIT 0
-#define DMA_CCMD_MASK_1BIT 1
-#define DMA_CCMD_MASK_2BIT 2
-#define DMA_CCMD_MASK_3BIT 3
-#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
-#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
-
-/* FECTL_REG */
-#define DMA_FECTL_IM (((u32)1) << 31)
-
-/* FSTS_REG */
-#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
-#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
-#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
-#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
-#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
-#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
-#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
-
-/* FRCD_REG, 32 bits access */
-#define DMA_FRCD_F (((u32)1) << 31)
-#define dma_frcd_type(d) ((d >> 30) & 1)
-#define dma_frcd_fault_reason(c) (c & 0xff)
-#define dma_frcd_source_id(c) (c & 0xffff)
-#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
-#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
-/* low 64 bit */
-#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
-
-/* PRS_REG */
-#define DMA_PRS_PPR ((u32)1)
-#define DMA_PRS_PRO ((u32)2)
-
-#define DMA_VCS_PAS ((u64)1)
-
-#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
-do { \
- cycles_t start_time = get_cycles(); \
- while (1) { \
- sts = op(iommu->reg + offset); \
- if (cond) \
- break; \
- if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
- panic("DMAR hardware is malfunctioning\n"); \
- cpu_relax(); \
- } \
-} while (0)
-
-#define QI_LENGTH 256 /* queue length */
-
-enum {
- QI_FREE,
- QI_IN_USE,
- QI_DONE,
- QI_ABORT
-};
-
-#define QI_CC_TYPE 0x1
-#define QI_IOTLB_TYPE 0x2
-#define QI_DIOTLB_TYPE 0x3
-#define QI_IEC_TYPE 0x4
-#define QI_IWD_TYPE 0x5
-#define QI_EIOTLB_TYPE 0x6
-#define QI_PC_TYPE 0x7
-#define QI_DEIOTLB_TYPE 0x8
-#define QI_PGRP_RESP_TYPE 0x9
-#define QI_PSTRM_RESP_TYPE 0xa
-
-#define QI_IEC_SELECTIVE (((u64)1) << 4)
-#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
-#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
-
-#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
-#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
-#define QI_IWD_FENCE (((u64)1) << 6)
-#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
-
-#define QI_IOTLB_DID(did) (((u64)did) << 16)
-#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
-#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
-#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
-#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
-#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
-
-#define QI_CC_FM(fm) (((u64)fm) << 48)
-#define QI_CC_SID(sid) (((u64)sid) << 32)
-#define QI_CC_DID(did) (((u64)did) << 16)
-#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
-
-#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
-#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
-#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
- ((u64)((pfsid >> 4) & 0xfff) << 52))
-#define QI_DEV_IOTLB_SIZE 1
-#define QI_DEV_IOTLB_MAX_INVS 32
-
-#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
-#define QI_PC_DID(did) (((u64)did) << 16)
-#define QI_PC_GRAN(gran) (((u64)gran) << 4)
-
-/* PASID cache invalidation granu */
-#define QI_PC_ALL_PASIDS 0
-#define QI_PC_PASID_SEL 1
-
-#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
-#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
-#define QI_EIOTLB_DID(did) (((u64)did) << 16)
-#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
-
-/* QI Dev-IOTLB inv granu */
-#define QI_DEV_IOTLB_GRAN_ALL 1
-#define QI_DEV_IOTLB_GRAN_PASID_SEL 0
-
-#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
-#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
-#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
-#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
-#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
- ((u64)((pfsid >> 4) & 0xfff) << 52))
-#define QI_DEV_EIOTLB_MAX_INVS 32
-
-/* Page group response descriptor QW0 */
-#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
-#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
-#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
-#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
-#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
-
-/* Page group response descriptor QW1 */
-#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
-#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
-
-
-#define QI_RESP_SUCCESS 0x0
-#define QI_RESP_INVALID 0x1
-#define QI_RESP_FAILURE 0xf
-
-#define QI_GRAN_NONG_PASID 2
-#define QI_GRAN_PSI_PASID 3
-
-#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
-
-struct qi_desc {
- u64 qw0;
- u64 qw1;
- u64 qw2;
- u64 qw3;
-};
-
-struct q_inval {
- raw_spinlock_t q_lock;
- void *desc; /* invalidation queue */
- int *desc_status; /* desc status */
- int free_head; /* first free entry */
- int free_tail; /* last free entry */
- int free_cnt;
-};
-
-#ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER 8
-#define INTR_REMAP_TABLE_REG_SIZE 0xf
-#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
-
-#define INTR_REMAP_TABLE_ENTRIES 65536
-
-struct irq_domain;
-
-struct ir_table {
- struct irte *base;
- unsigned long *bitmap;
-};
-#endif
-
-struct iommu_flush {
- void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
- void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-};
-
-enum {
- SR_DMAR_FECTL_REG,
- SR_DMAR_FEDATA_REG,
- SR_DMAR_FEADDR_REG,
- SR_DMAR_FEUADDR_REG,
- MAX_SR_DMAR_REGS
-};
-
-#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
-#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
-#define VTD_FLAG_SVM_CAPABLE (1 << 2)
-
-extern int intel_iommu_sm;
-extern spinlock_t device_domain_lock;
-
-#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
-#define pasid_supported(iommu) (sm_supported(iommu) && \
- ecap_pasid((iommu)->ecap))
-
-struct pasid_entry;
-struct pasid_state_entry;
-struct page_req_dsc;
-
-/*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
-struct root_entry {
- u64 lo;
- u64 hi;
-};
-
-/*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
-struct context_entry {
- u64 lo;
- u64 hi;
-};
-
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
-
-/*
- * When VT-d works in the scalable mode, it allows DMA translation to
- * happen through either first level or second level page table. This
- * bit marks that the DMA translation for the domain goes through the
- * first level page table, otherwise, it goes through the second level.
- */
-#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
-
-/*
- * Domain represents a virtual machine which demands iommu nested
- * translation mode support.
- */
-#define DOMAIN_FLAG_NESTING_MODE BIT(2)
-
-struct dmar_domain {
- int nid; /* node id */
-
- unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
- /* Refcount of devices per iommu */
-
-
- u16 iommu_did[DMAR_UNITS_SUPPORTED];
- /* Domain ids per IOMMU. Use u16 since
- * domain ids are 16 bit wide according
- * to VT-d spec, section 9.3 */
- unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */
-
- bool has_iotlb_device;
- struct list_head devices; /* all devices' list */
- struct list_head auxd; /* link to device's auxiliary list */
- struct iova_domain iovad; /* iova's that belong to this domain */
-
- struct dma_pte *pgd; /* virtual address */
- int gaw; /* max guest address width */
-
- /* adjusted guest address width, 0 is level 2 30-bit */
- int agaw;
-
- int flags; /* flags to find out type of domain */
-
- int iommu_coherency;/* indicate coherency of iommu access */
- int iommu_snooping; /* indicate snooping control feature*/
- int iommu_count; /* reference count of iommu */
- int iommu_superpage;/* Level of superpages supported:
- 0 == 4KiB (no superpages), 1 == 2MiB,
- 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
- u64 max_addr; /* maximum mapped address */
-
- int default_pasid; /*
- * The default pasid used for non-SVM
- * traffic on mediated devices.
- */
-
- struct iommu_domain domain; /* generic domain data structure for
- iommu core */
-};
-
-struct intel_iommu {
- void __iomem *reg; /* Pointer to hardware regs, virtual addr */
- u64 reg_phys; /* physical address of hw register set */
- u64 reg_size; /* size of hw register set */
- u64 cap;
- u64 ecap;
- u64 vccap;
- u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
- raw_spinlock_t register_lock; /* protect register handling */
- int seq_id; /* sequence id of the iommu */
- int agaw; /* agaw of this iommu */
- int msagaw; /* max sagaw of this iommu */
- unsigned int irq, pr_irq;
- u16 segment; /* PCI segment# */
- unsigned char name[13]; /* Device Name */
-
-#ifdef CONFIG_INTEL_IOMMU
- unsigned long *domain_ids; /* bitmap of domains */
- struct dmar_domain ***domains; /* ptr to domains */
- spinlock_t lock; /* protect context, domain ids */
- struct root_entry *root_entry; /* virtual address */
-
- struct iommu_flush flush;
-#endif
-#ifdef CONFIG_INTEL_IOMMU_SVM
- struct page_req_dsc *prq;
- unsigned char prq_name[16]; /* Name for PRQ interrupt */
- struct completion prq_complete;
- struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
-#endif
- struct q_inval *qi; /* Queued invalidation info */
- u32 *iommu_state; /* Store iommu states between suspend and resume.*/
-
-#ifdef CONFIG_IRQ_REMAP
- struct ir_table *ir_table; /* Interrupt remapping info */
- struct irq_domain *ir_domain;
- struct irq_domain *ir_msi_domain;
-#endif
- struct iommu_device iommu; /* IOMMU core code handle */
- int node;
- u32 flags; /* Software defined flags */
-
- struct dmar_drhd_unit *drhd;
-};
-
-/* PCI domain-device relationship */
-struct device_domain_info {
- struct list_head link; /* link to domain siblings */
- struct list_head global; /* link to global list */
- struct list_head table; /* link to pasid table */
- struct list_head auxiliary_domains; /* auxiliary domains
- * attached to this device
- */
- u32 segment; /* PCI segment number */
- u8 bus; /* PCI bus number */
- u8 devfn; /* PCI devfn number */
- u16 pfsid; /* SRIOV physical function source ID */
- u8 pasid_supported:3;
- u8 pasid_enabled:1;
- u8 pri_supported:1;
- u8 pri_enabled:1;
- u8 ats_supported:1;
- u8 ats_enabled:1;
- u8 auxd_enabled:1; /* Multiple domains per device */
- u8 ats_qdep;
- struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
- struct intel_iommu *iommu; /* IOMMU used by this device */
- struct dmar_domain *domain; /* pointer to domain */
- struct pasid_table *pasid_table; /* pasid table */
-};
-
-static inline void __iommu_flush_cache(
- struct intel_iommu *iommu, void *addr, int size)
-{
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(addr, size);
-}
-
-/* Convert generic struct iommu_domain to private struct dmar_domain */
-static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
-{
- return container_of(dom, struct dmar_domain, domain);
-}
-
-/*
- * 0: readable
- * 1: writable
- * 2-6: reserved
- * 7: super page
- * 8-10: available
- * 11: snoop behavior
- * 12-63: Host physcial address
- */
-struct dma_pte {
- u64 val;
-};
-
-static inline void dma_clear_pte(struct dma_pte *pte)
-{
- pte->val = 0;
-}
-
-static inline u64 dma_pte_addr(struct dma_pte *pte)
-{
-#ifdef CONFIG_64BIT
- return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
-#else
- /* Must have a full atomic 64-bit read */
- return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
- VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
-#endif
-}
-
-static inline bool dma_pte_present(struct dma_pte *pte)
-{
- return (pte->val & 3) != 0;
-}
-
-static inline bool dma_pte_superpage(struct dma_pte *pte)
-{
- return (pte->val & DMA_PTE_LARGE_PAGE);
-}
-
-static inline int first_pte_in_page(struct dma_pte *pte)
-{
- return !((unsigned long)pte & ~VTD_PAGE_MASK);
-}
-
-extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
-extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
-
-extern int dmar_enable_qi(struct intel_iommu *iommu);
-extern void dmar_disable_qi(struct intel_iommu *iommu);
-extern int dmar_reenable_qi(struct intel_iommu *iommu);
-extern void qi_global_iec(struct intel_iommu *iommu);
-
-extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
-extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u16 qdep, u64 addr, unsigned mask);
-
-void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
- unsigned long npages, bool ih);
-
-void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u32 pasid, u16 qdep, u64 addr,
- unsigned int size_order);
-void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
- int pasid);
-
-int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
- unsigned int count, unsigned long options);
-/*
- * Options used in qi_submit_sync:
- * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
- */
-#define QI_OPT_WAIT_DRAIN BIT(0)
-
-extern int dmar_ir_support(void);
-
-void *alloc_pgtable_page(int node);
-void free_pgtable_page(void *vaddr);
-struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
-int for_each_device_domain(int (*fn)(struct device_domain_info *info,
- void *data), void *data);
-void iommu_flush_write_buffer(struct intel_iommu *iommu);
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
-struct dmar_domain *find_domain(struct device *dev);
-struct device_domain_info *get_domain_info(struct device *dev);
-struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-extern void intel_svm_check(struct intel_iommu *iommu);
-extern int intel_svm_enable_prq(struct intel_iommu *iommu);
-extern int intel_svm_finish_prq(struct intel_iommu *iommu);
-int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
- struct iommu_gpasid_bind_data *data);
-int intel_svm_unbind_gpasid(struct device *dev, int pasid);
-struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
- void *drvdata);
-void intel_svm_unbind(struct iommu_sva *handle);
-int intel_svm_get_pasid(struct iommu_sva *handle);
-int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
- struct iommu_page_response *msg);
-
-struct svm_dev_ops;
-
-struct intel_svm_dev {
- struct list_head list;
- struct rcu_head rcu;
- struct device *dev;
- struct svm_dev_ops *ops;
- struct iommu_sva sva;
- int pasid;
- int users;
- u16 did;
- u16 dev_iotlb:1;
- u16 sid, qdep;
-};
-
-struct intel_svm {
- struct mmu_notifier notifier;
- struct mm_struct *mm;
-
- struct intel_iommu *iommu;
- int flags;
- int pasid;
- int gpasid; /* In case that guest PASID is different from host PASID */
- struct list_head devs;
- struct list_head list;
-};
-#else
-static inline void intel_svm_check(struct intel_iommu *iommu) {}
-#endif
-
-#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
-void intel_iommu_debugfs_init(void);
-#else
-static inline void intel_iommu_debugfs_init(void) {}
-#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
-
-extern const struct attribute_group *intel_iommu_groups[];
-bool context_present(struct context_entry *context);
-struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
- u8 devfn, int alloc);
-
-#ifdef CONFIG_INTEL_IOMMU
-extern int iommu_calculate_agaw(struct intel_iommu *iommu);
-extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
-extern int dmar_disabled;
-extern int intel_iommu_enabled;
-extern int intel_iommu_tboot_noforce;
-#else
-static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-#define dmar_disabled (1)
-#define intel_iommu_enabled (0)
-#endif
-
-#endif
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index 0d6b4bc191c5..771622650247 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -8,11 +8,17 @@
#ifndef _INTEL_ISH_CLIENT_IF_H_
#define _INTEL_ISH_CLIENT_IF_H_
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
struct ishtp_cl_device;
struct ishtp_device;
struct ishtp_cl;
struct ishtp_fw_client;
+typedef __printf(2, 3) void (*ishtp_print_log)(struct ishtp_device *dev,
+ const char *format, ...);
+
/* Client state */
enum cl_state {
ISHTP_CL_INITIALIZING = 0,
@@ -34,9 +40,9 @@ enum cl_state {
struct ishtp_cl_driver {
struct device_driver driver;
const char *name;
- const guid_t *guid;
+ const struct ishtp_device_id *id;
int (*probe)(struct ishtp_cl_device *dev);
- int (*remove)(struct ishtp_cl_device *dev);
+ void (*remove)(struct ishtp_cl_device *dev);
int (*reset)(struct ishtp_cl_device *dev);
const struct dev_pm_ops *pm;
};
@@ -75,8 +81,10 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device,
/* Get the device * from ishtp device instance */
struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* wait for IPC resume */
+bool ishtp_wait_resume(struct ishtp_device *dev);
/* Trace interface for clients */
-void *ishtp_trace_callback(struct ishtp_cl_device *cl_device);
+ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
/* Get device pointer of PCI device for DMA acces */
struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
@@ -86,6 +94,9 @@ int ishtp_cl_link(struct ishtp_cl *cl);
void ishtp_cl_unlink(struct ishtp_cl *cl);
int ishtp_cl_disconnect(struct ishtp_cl *cl);
int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_establish_connection(struct ishtp_cl *cl, const guid_t *uuid,
+ int tx_size, int rx_size, bool reset);
+void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset);
int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
int ishtp_cl_flush_queues(struct ishtp_cl *cl);
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h
deleted file mode 100644
index fcd841a90f2f..000000000000
--- a/include/linux/intel-pti.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Intel 2011
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The PTI (Parallel Trace Interface) driver directs trace data routed from
- * various parts in the system out through the Intel Penwell PTI port and
- * out of the mobile device for analysis with a debugging tool
- * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
- * compact JTAG, standard.
- *
- * This header file will allow other parts of the OS to use the
- * interface to write out it's contents for debugging a mobile system.
- */
-
-#ifndef LINUX_INTEL_PTI_H_
-#define LINUX_INTEL_PTI_H_
-
-/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
-#define PTI_LASTDWORD_DTS 0x30
-
-/* basic structure used as a write address to the PTI HW */
-struct pti_masterchannel {
- u8 master;
- u8 channel;
-};
-
-/* the following functions are defined in misc/pti.c */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
-struct pti_masterchannel *pti_request_masterchannel(u8 type,
- const char *thread_name);
-void pti_release_masterchannel(struct pti_masterchannel *mc);
-
-#endif /* LINUX_INTEL_PTI_H_ */
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
deleted file mode 100644
index c9e7e601950d..000000000000
--- a/include/linux/intel-svm.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2015 Intel Corporation.
- *
- * Authors: David Woodhouse <David.Woodhouse@intel.com>
- */
-
-#ifndef __INTEL_SVM_H__
-#define __INTEL_SVM_H__
-
-struct device;
-
-struct svm_dev_ops {
- void (*fault_cb)(struct device *dev, int pasid, u64 address,
- void *private, int rwxp, int response);
-};
-
-/* Values for rxwp in fault_cb callback */
-#define SVM_REQ_READ (1<<3)
-#define SVM_REQ_WRITE (1<<2)
-#define SVM_REQ_EXEC (1<<1)
-#define SVM_REQ_PRIV (1<<0)
-
-/*
- * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
- * PASID for the current process. Even if a PASID already exists, a new one
- * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID
- * will not be given to subsequent callers. This facility allows a driver to
- * disambiguate between multiple device contexts which access the same MM,
- * if there is no other way to do so. It should be used sparingly, if at all.
- */
-#define SVM_FLAG_PRIVATE_PASID (1<<0)
-
-/*
- * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only
- * for access to kernel addresses. No IOTLB flushes are automatically done
- * for kernel mappings; it is valid only for access to the kernel's static
- * 1:1 mapping of physical memory — not to vmalloc or even module mappings.
- * A future API addition may permit the use of such ranges, by means of an
- * explicit IOTLB flush call (akin to the DMA API's unmap method).
- *
- * It is unlikely that we will ever hook into flush_tlb_kernel_range() to
- * do such IOTLB flushes automatically.
- */
-#define SVM_FLAG_SUPERVISOR_MODE (1<<1)
-/*
- * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest
- * processes. Compared to the host bind, the primary differences are:
- * 1. mm life cycle management
- * 2. fault reporting
- */
-#define SVM_FLAG_GUEST_MODE (1<<2)
-/*
- * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space,
- * which requires guest and host PASID translation at both directions.
- */
-#define SVM_FLAG_GUEST_PASID (1<<3)
-
-#endif /* __INTEL_SVM_H__ */
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index 3582176a1eca..f3196f82fd8a 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -14,6 +14,12 @@
#include <linux/powercap.h>
#include <linux/cpuhotplug.h>
+enum rapl_if_type {
+ RAPL_IF_MSR, /* RAPL I/F using MSR registers */
+ RAPL_IF_MMIO, /* RAPL I/F using MMIO registers */
+ RAPL_IF_TPMI, /* RAPL I/F using TPMI registers */
+};
+
enum rapl_domain_type {
RAPL_DOMAIN_PACKAGE, /* entire package/socket */
RAPL_DOMAIN_PP0, /* core power plane */
@@ -30,17 +36,23 @@ enum rapl_domain_reg_id {
RAPL_DOMAIN_REG_POLICY,
RAPL_DOMAIN_REG_INFO,
RAPL_DOMAIN_REG_PL4,
+ RAPL_DOMAIN_REG_UNIT,
+ RAPL_DOMAIN_REG_PL2,
RAPL_DOMAIN_REG_MAX,
};
-struct rapl_package;
+struct rapl_domain;
enum rapl_primitives {
- ENERGY_COUNTER,
POWER_LIMIT1,
POWER_LIMIT2,
POWER_LIMIT4,
+ ENERGY_COUNTER,
FW_LOCK,
+ FW_HIGH_LOCK,
+ PL1_LOCK,
+ PL2_LOCK,
+ PL4_LOCK,
PL1_ENABLE, /* power limit 1, aka long term */
PL1_CLAMP, /* allow frequency to go below OS request */
@@ -58,6 +70,12 @@ enum rapl_primitives {
THROTTLED_TIME,
PRIORITY_LEVEL,
+ PSYS_POWER_LIMIT1,
+ PSYS_POWER_LIMIT2,
+ PSYS_PL1_ENABLE,
+ PSYS_PL2_ENABLE,
+ PSYS_TIME_WINDOW1,
+ PSYS_TIME_WINDOW2,
/* below are not raw primitive data */
AVERAGE_POWER,
NR_RAPL_PRIMITIVES,
@@ -68,32 +86,43 @@ struct rapl_domain_data {
unsigned long timestamp;
};
-#define NR_POWER_LIMITS (3)
+#define NR_POWER_LIMITS (POWER_LIMIT4 + 1)
+
struct rapl_power_limit {
struct powercap_zone_constraint *constraint;
- int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
+ bool locked;
u64 last_power_limit;
};
struct rapl_package;
+#define RAPL_DOMAIN_NAME_LENGTH 16
+
+union rapl_reg {
+ void __iomem *mmio;
+ u32 msr;
+ u64 val;
+};
+
struct rapl_domain {
- const char *name;
+ char name[RAPL_DOMAIN_NAME_LENGTH];
enum rapl_domain_type id;
- u64 regs[RAPL_DOMAIN_REG_MAX];
+ union rapl_reg regs[RAPL_DOMAIN_REG_MAX];
struct powercap_zone power_zone;
struct rapl_domain_data rdd;
struct rapl_power_limit rpl[NR_POWER_LIMITS];
u64 attr_map; /* track capabilities */
unsigned int state;
- unsigned int domain_energy_unit;
+ unsigned int power_unit;
+ unsigned int energy_unit;
+ unsigned int time_unit;
struct rapl_package *rp;
};
struct reg_action {
- u64 reg;
+ union rapl_reg reg;
u64 mask;
u64 value;
int err;
@@ -113,16 +142,20 @@ struct reg_action {
* registers.
* @write_raw: Callback for writing RAPL interface specific
* registers.
+ * @defaults: internal pointer to interface default settings
+ * @rpi: internal pointer to interface primitive info
*/
struct rapl_if_priv {
+ enum rapl_if_type type;
struct powercap_control_type *control_type;
- struct rapl_domain *platform_rapl_domain;
enum cpuhp_state pcap_rapl_online;
- u64 reg_unit;
- u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
+ union rapl_reg reg_unit;
+ union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
int limits[RAPL_DOMAIN_MAX];
- int (*read_raw)(int cpu, struct reg_action *ra);
- int (*write_raw)(int cpu, struct reg_action *ra);
+ int (*read_raw)(int id, struct reg_action *ra);
+ int (*write_raw)(int id, struct reg_action *ra);
+ void *defaults;
+ void *rpi;
};
/* maximum rapl package domain name: package-%d-die-%d */
@@ -132,9 +165,6 @@ struct rapl_package {
unsigned int id; /* logical die id, equals physical 1-die systems */
unsigned int nr_domains;
unsigned long domain_map; /* bit map of active domains */
- unsigned int power_unit;
- unsigned int energy_unit;
- unsigned int time_unit;
struct rapl_domain *domains; /* array of domains, sized at runtime */
struct powercap_zone *power_zone; /* keep track of parent zone */
unsigned long power_limit_irq; /* keep track of package power limit
@@ -148,11 +178,14 @@ struct rapl_package {
struct rapl_if_priv *priv;
};
-struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv);
-struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
-void rapl_remove_package(struct rapl_package *rp);
+struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu);
+struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu);
+void rapl_remove_package_cpuslocked(struct rapl_package *rp);
-int rapl_add_platform_domain(struct rapl_if_priv *priv);
-void rapl_remove_platform_domain(struct rapl_if_priv *priv);
+struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu);
+struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu);
+void rapl_remove_package(struct rapl_package *rp);
#endif /* __INTEL_RAPL_H__ */
diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h
new file mode 100644
index 000000000000..8ff8eabb4a98
--- /dev/null
+++ b/include/linux/intel_tcc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * header for Intel TCC (thermal control circuitry) library
+ *
+ * Copyright (C) 2022 Intel Corporation.
+ */
+
+#ifndef __INTEL_TCC_H__
+#define __INTEL_TCC_H__
+
+#include <linux/types.h>
+
+int intel_tcc_get_tjmax(int cpu);
+int intel_tcc_get_offset(int cpu);
+int intel_tcc_set_offset(int cpu, int offset);
+int intel_tcc_get_temp(int cpu, int *temp, bool pkg);
+
+#endif /* __INTEL_TCC_H__ */
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
new file mode 100644
index 000000000000..a3529b962be6
--- /dev/null
+++ b/include/linux/intel_tpmi.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * intel_tpmi.h: Intel TPMI core external interface
+ */
+
+#ifndef _INTEL_TPMI_H_
+#define _INTEL_TPMI_H_
+
+#include <linux/bitfield.h>
+
+#define TPMI_VERSION_INVALID 0xff
+#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val)
+#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val)
+
+/*
+ * List of supported TMPI IDs.
+ * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
+ */
+enum intel_tpmi_id {
+ TPMI_ID_RAPL = 0, /* Running Average Power Limit */
+ TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
+ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
+ TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
+ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
+};
+
+/**
+ * struct intel_tpmi_plat_info - Platform information for a TPMI device instance
+ * @package_id: CPU Package id
+ * @bus_number: PCI bus number
+ * @device_number: PCI device number
+ * @function_number: PCI function number
+ *
+ * Structure to store platform data for a TPMI device instance. This
+ * struct is used to return data via tpmi_get_platform_data().
+ */
+struct intel_tpmi_plat_info {
+ u8 package_id;
+ u8 bus_number;
+ u8 device_number;
+ u8 function_number;
+};
+
+struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev);
+struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
+int tpmi_get_resource_count(struct auxiliary_device *auxdev);
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked,
+ bool *write_blocked);
+#endif
diff --git a/include/linux/interconnect-clk.h b/include/linux/interconnect-clk.h
new file mode 100644
index 000000000000..0cd80112bea5
--- /dev/null
+++ b/include/linux/interconnect-clk.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef __LINUX_INTERCONNECT_CLK_H
+#define __LINUX_INTERCONNECT_CLK_H
+
+struct device;
+
+struct icc_clk_data {
+ struct clk *clk;
+ const char *name;
+};
+
+struct icc_provider *icc_clk_register(struct device *dev,
+ unsigned int first_id,
+ unsigned int num_clocks,
+ const struct icc_clk_data *data);
+void icc_clk_unregister(struct icc_provider *provider);
+
+#endif
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 4735518de515..f5aef8784692 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -15,6 +15,17 @@ struct icc_node;
struct of_phandle_args;
/**
+ * struct icc_node_data - icc node data
+ *
+ * @node: icc node
+ * @tag: tag
+ */
+struct icc_node_data {
+ struct icc_node *node;
+ u32 tag;
+};
+
+/**
* struct icc_onecell_data - driver data for onecell interconnect providers
*
* @num_nodes: number of nodes in this device
@@ -22,10 +33,10 @@ struct of_phandle_args;
*/
struct icc_onecell_data {
unsigned int num_nodes;
- struct icc_node *nodes[];
+ struct icc_node *nodes[] __counted_by(num_nodes);
};
-struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+struct icc_node *of_icc_xlate_onecell(const struct of_phandle_args *spec,
void *data);
/**
@@ -38,7 +49,9 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
* @aggregate: pointer to device specific aggregate operation function
* @pre_aggregate: pointer to device specific function that is called
* before the aggregation begins (optional)
+ * @get_bw: pointer to device specific function to get current bandwidth
* @xlate: provider-specific callback for mapping nodes from phandle arguments
+ * @xlate_extended: vendor-specific callback for mapping node data from phandle arguments
* @dev: the device this interconnect provider belongs to
* @users: count of active users
* @inter_set: whether inter-provider pairs will be configured with @set
@@ -51,7 +64,10 @@ struct icc_provider {
int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
void (*pre_aggregate)(struct icc_node *node);
- struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
+ int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
+ struct icc_node* (*xlate)(const struct of_phandle_args *spec, void *data);
+ struct icc_node_data* (*xlate_extended)(const struct of_phandle_args *spec,
+ void *data);
struct device *dev;
int users;
bool inter_set;
@@ -73,6 +89,8 @@ struct icc_provider {
* @req_list: a list of QoS constraint requests associated with this node
* @avg_bw: aggregated value of average bandwidth requests from all consumers
* @peak_bw: aggregated value of peak bandwidth requests from all consumers
+ * @init_avg: average bandwidth value that is read from the hardware during init
+ * @init_peak: peak bandwidth value that is read from the hardware during init
* @data: pointer to private data
*/
struct icc_node {
@@ -89,6 +107,8 @@ struct icc_node {
struct hlist_head req_list;
u32 avg_bw;
u32 peak_bw;
+ u32 init_avg;
+ u32 init_peak;
void *data;
};
@@ -99,13 +119,14 @@ int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
struct icc_node *icc_node_create(int id);
void icc_node_destroy(int id);
int icc_link_create(struct icc_node *node, const int dst_id);
-int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
void icc_node_add(struct icc_node *node, struct icc_provider *provider);
void icc_node_del(struct icc_node *node);
int icc_nodes_remove(struct icc_provider *provider);
-int icc_provider_add(struct icc_provider *provider);
-int icc_provider_del(struct icc_provider *provider);
-struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec);
+void icc_provider_init(struct icc_provider *provider);
+int icc_provider_register(struct icc_provider *provider);
+void icc_provider_deregister(struct icc_provider *provider);
+struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec);
+void icc_sync_state(struct device *dev);
#else
@@ -129,11 +150,6 @@ static inline int icc_link_create(struct icc_node *node, const int dst_id)
return -ENOTSUPP;
}
-static inline int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
-{
- return -ENOTSUPP;
-}
-
static inline void icc_node_add(struct icc_node *node, struct icc_provider *provider)
{
}
@@ -147,17 +163,16 @@ static inline int icc_nodes_remove(struct icc_provider *provider)
return -ENOTSUPP;
}
-static inline int icc_provider_add(struct icc_provider *provider)
-{
- return -ENOTSUPP;
-}
+static inline void icc_provider_init(struct icc_provider *provider) { }
-static inline int icc_provider_del(struct icc_provider *provider)
+static inline int icc_provider_register(struct icc_provider *provider)
{
return -ENOTSUPP;
}
-static inline struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
+static inline void icc_provider_deregister(struct icc_provider *provider) { }
+
+static inline struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec)
{
return ERR_PTR(-ENOTSUPP);
}
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index 3a63d98613fc..97ac253df62c 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -23,12 +23,26 @@
struct icc_path;
struct device;
+/**
+ * struct icc_bulk_data - Data used for bulk icc operations.
+ *
+ * @path: reference to the interconnect path (internal use)
+ * @name: the name from the "interconnect-names" DT property
+ * @avg_bw: average bandwidth in icc units
+ * @peak_bw: peak bandwidth in icc units
+ */
+struct icc_bulk_data {
+ struct icc_path *path;
+ const char *name;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
#if IS_ENABLED(CONFIG_INTERCONNECT)
-struct icc_path *icc_get(struct device *dev, const int src_id,
- const int dst_id);
struct icc_path *of_icc_get(struct device *dev, const char *name);
struct icc_path *devm_of_icc_get(struct device *dev, const char *name);
+int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths);
struct icc_path *of_icc_get_by_index(struct device *dev, int idx);
void icc_put(struct icc_path *path);
int icc_enable(struct icc_path *path);
@@ -36,15 +50,15 @@ int icc_disable(struct icc_path *path);
int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
void icc_set_tag(struct icc_path *path, u32 tag);
const char *icc_get_name(struct icc_path *path);
+int __must_check of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths);
+void icc_bulk_put(int num_paths, struct icc_bulk_data *paths);
+int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths);
+int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths);
+void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
#else
-static inline struct icc_path *icc_get(struct device *dev, const int src_id,
- const int dst_id)
-{
- return NULL;
-}
-
static inline struct icc_path *of_icc_get(struct device *dev,
const char *name)
{
@@ -90,6 +104,35 @@ static inline const char *icc_get_name(struct icc_path *path)
return NULL;
}
+static inline int of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int devm_of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
+{
+}
+
+static inline int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths)
+{
+}
+
#endif /* CONFIG_INTERCONNECT */
#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f9aee3538461..76121c2bb4f8 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -13,6 +13,7 @@
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
+#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
@@ -61,6 +62,11 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
+ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
+ * Users will enable it explicitly by enable_irq() or enable_nmi()
+ * later.
+ * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
+ * depends on IRQF_PERCPU.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -74,6 +80,8 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
+#define IRQF_NO_AUTOEN 0x00080000
+#define IRQF_NO_DEBUG 0x00100000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -214,24 +222,7 @@ devm_request_any_context_irq(struct device *dev, unsigned int irq,
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
-/*
- * On lockdep we dont want to enable hardirqs in hardirq
- * context. Use local_irq_enable_in_hardirq() to annotate
- * kernel code that has to do this nevertheless (pretty much
- * the only valid case is for old/broken hardware that is
- * insanely slow).
- *
- * NOTE: in theory this might break fragile code that relies
- * on hardirq delivery - in practice we dont seem to have such
- * places left. So the only effect should be slightly increased
- * irqs-off latencies.
- */
-#ifdef CONFIG_LOCKDEP
-# define local_irq_enable_in_hardirq() do { } while (0)
-#else
-# define local_irq_enable_in_hardirq() local_irq_enable()
-#endif
-
+bool irq_has_action(unsigned int irq);
extern void disable_irq_nosync(unsigned int irq);
extern bool disable_hardirq(unsigned int irq);
extern void disable_irq(unsigned int irq);
@@ -314,44 +305,54 @@ struct irq_affinity_desc {
extern cpumask_var_t irq_default_affinity;
-/* Internal implementation. Use the helpers below */
-extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
- bool force);
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+
+extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
+
+extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
+ bool setaffinity);
/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
+ * irq_update_affinity_hint - Update the affinity hint
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Fails if cpumask does not contain an online CPU
+ * Updates the affinity hint, but does not change the affinity of the interrupt.
*/
static inline int
-irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, false);
+ return __irq_apply_affinity_hint(irq, m, false);
}
/**
- * irq_force_affinity - Force the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
- *
- * Same as irq_set_affinity, but without checking the mask against
- * online cpus.
+ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
+ * cpumask to the interrupt
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Solely for low level cpu hotplug code, where we need to make per
- * cpu interrupts affine before the cpu becomes online.
+ * Updates the affinity hint and if @m is not NULL it applies it as the
+ * affinity of that interrupt.
*/
static inline int
-irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, true);
+ return __irq_apply_affinity_hint(irq, m, true);
}
-extern int irq_can_set_affinity(unsigned int irq);
-extern int irq_select_affinity(unsigned int irq);
+/*
+ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
+ * instead.
+ */
+static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+ return irq_set_affinity_and_hint(irq, m);
+}
-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+extern int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
@@ -381,12 +382,30 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
+static inline int irq_update_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
+static inline int irq_set_affinity_and_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
static inline int irq_set_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
+static inline int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity)
+{
+ return -EINVAL;
+}
+
static inline int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
{
@@ -489,12 +508,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
#ifdef CONFIG_IRQ_FORCED_THREADING
# ifdef CONFIG_PREEMPT_RT
-# define force_irqthreads (true)
+# define force_irqthreads() (true)
# else
-extern bool force_irqthreads;
+DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
+# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
# endif
#else
-#define force_irqthreads (0)
+#define force_irqthreads() (false)
#endif
#ifndef local_softirq_pending
@@ -541,7 +561,20 @@ enum
NR_SOFTIRQS
};
-#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/*
+ * The following vectors can be safely ignored after ksoftirqd is parked:
+ *
+ * _ RCU:
+ * 1) rcutree_migrate_callbacks() migrates the queue.
+ * 2) rcutree_report_cpu_dead() reports the final quiescent states.
+ *
+ * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ *
+ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
+ */
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
+ BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
+
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
@@ -560,12 +593,12 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-#ifdef __ARCH_HAS_DO_SOFTIRQ
-void do_softirq_own_stack(void);
+#ifdef CONFIG_PREEMPT_RT
+extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
#else
-static inline void do_softirq_own_stack(void)
+static inline void do_softirq_post_smp_call_flush(unsigned int unused)
{
- __do_softirq();
+ do_softirq();
}
#endif
@@ -654,26 +687,21 @@ enum
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline void tasklet_unlock(struct tasklet_struct *t)
-{
- smp_mb__before_atomic();
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
-}
+void tasklet_unlock(struct tasklet_struct *t);
+void tasklet_unlock_wait(struct tasklet_struct *t);
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
#else
-#define tasklet_trylock(t) 1
-#define tasklet_unlock_wait(t) do { } while (0)
-#define tasklet_unlock(t) do { } while (0)
+static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+static inline void tasklet_unlock(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
#endif
extern void __tasklet_schedule(struct tasklet_struct *t);
@@ -698,6 +726,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t)
smp_mb__after_atomic();
}
+/*
+ * Do not use in new code. Disabling tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+{
+ tasklet_disable_nosync(t);
+ tasklet_unlock_spin_wait(t);
+ smp_mb();
+}
+
static inline void tasklet_disable(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);
@@ -712,7 +751,6 @@ static inline void tasklet_enable(struct tasklet_struct *t)
}
extern void tasklet_kill(struct tasklet_struct *t);
-extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
extern void tasklet_setup(struct tasklet_struct *t,
@@ -792,9 +830,9 @@ extern int arch_early_irq_init(void);
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
#ifndef __irq_entry
-# define __irq_entry __attribute__((__section__(".irqentry.text")))
+# define __irq_entry __section(".irqentry.text")
#endif
-#define __softirq_entry __attribute__((__section__(".softirqentry.text")))
+#define __softirq_entry __section(".softirqentry.text")
#endif
diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h
index 288c26f50732..2b8026a39906 100644
--- a/include/linux/interval_tree.h
+++ b/include/linux/interval_tree.h
@@ -27,4 +27,62 @@ extern struct interval_tree_node *
interval_tree_iter_next(struct interval_tree_node *node,
unsigned long start, unsigned long last);
+/**
+ * struct interval_tree_span_iter - Find used and unused spans.
+ * @start_hole: Start of an interval for a hole when is_hole == 1
+ * @last_hole: Inclusive end of an interval for a hole when is_hole == 1
+ * @start_used: Start of a used interval when is_hole == 0
+ * @last_used: Inclusive end of a used interval when is_hole == 0
+ * @is_hole: 0 == used, 1 == is_hole, -1 == done iteration
+ *
+ * This iterator travels over spans in an interval tree. It does not return
+ * nodes but classifies each span as either a hole, where no nodes intersect, or
+ * a used, which is fully covered by nodes. Each iteration step toggles between
+ * hole and used until the entire range is covered. The returned spans always
+ * fully cover the requested range.
+ *
+ * The iterator is greedy, it always returns the largest hole or used possible,
+ * consolidating all consecutive nodes.
+ *
+ * Use interval_tree_span_iter_done() to detect end of iteration.
+ */
+struct interval_tree_span_iter {
+ /* private: not for use by the caller */
+ struct interval_tree_node *nodes[2];
+ unsigned long first_index;
+ unsigned long last_index;
+
+ /* public: */
+ union {
+ unsigned long start_hole;
+ unsigned long start_used;
+ };
+ union {
+ unsigned long last_hole;
+ unsigned long last_used;
+ };
+ int is_hole;
+};
+
+void interval_tree_span_iter_first(struct interval_tree_span_iter *state,
+ struct rb_root_cached *itree,
+ unsigned long first_index,
+ unsigned long last_index);
+void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long new_index);
+void interval_tree_span_iter_next(struct interval_tree_span_iter *state);
+
+static inline bool
+interval_tree_span_iter_done(struct interval_tree_span_iter *state)
+{
+ return state->is_hole == -1;
+}
+
+#define interval_tree_for_each_span(span, itree, first_index, last_index) \
+ for (interval_tree_span_iter_first(span, itree, \
+ first_index, last_index); \
+ !interval_tree_span_iter_done(span); \
+ interval_tree_span_iter_next(span))
+
#endif /* _LINUX_INTERVAL_TREE_H */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index c75e4d3d8833..7376c1df9c90 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -69,13 +69,38 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ migrate_disable();
+ pagefault_disable();
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
- iounmap_atomic(vaddr);
+ kunmap_local_indexed((void __force *)vaddr);
+ pagefault_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ migrate_enable();
+}
+
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ resource_size_t phys_addr;
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ kunmap_local_indexed((void __force *)vaddr);
}
static inline void __iomem *
@@ -97,7 +122,7 @@ io_mapping_unmap(void __iomem *vaddr)
iounmap(vaddr);
}
-#else
+#else /* HAVE_ATOMIC_IOMAP */
#include <linux/uaccess.h>
@@ -113,13 +138,7 @@ io_mapping_init_wc(struct io_mapping *iomap,
iomap->base = base;
iomap->size = size;
-#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
- iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
-#elif defined(pgprot_writecombine)
iomap->prot = pgprot_writecombine(PAGE_KERNEL);
-#else
- iomap->prot = pgprot_noncached(PAGE_KERNEL);
-#endif
return iomap;
}
@@ -149,7 +168,10 @@ static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
- preempt_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ migrate_disable();
pagefault_disable();
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
}
@@ -159,10 +181,24 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
{
io_mapping_unmap(vaddr);
pagefault_enable();
- preempt_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ migrate_enable();
}
-#endif /* HAVE_ATOMIC_IOMAP */
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ io_mapping_unmap(vaddr);
+}
+
+#endif /* !HAVE_ATOMIC_IOMAP */
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,
@@ -189,4 +225,7 @@ io_mapping_free(struct io_mapping *iomap)
kfree(iomap);
}
+int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size);
+
#endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 23285ba645db..86cf1f7ae389 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -15,6 +15,10 @@ enum io_pgtable_fmt {
ARM_64_LPAE_S2,
ARM_V7S,
ARM_MALI_LPAE,
+ AMD_IOMMU_V1,
+ AMD_IOMMU_V2,
+ APPLE_DART,
+ APPLE_DART2,
IO_PGTABLE_NUM_FMTS,
};
@@ -25,13 +29,11 @@ enum io_pgtable_fmt {
* @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
* (sometimes referred to as the "walk cache") for a virtual
* address range.
- * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
- * address range.
* @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
* single page. IOMMUs that cannot batch TLB invalidation
* operations efficiently will typically issue them here, but
* others may decide to update the iommu_iotlb_gather structure
- * and defer the invalidation until iommu_tlb_sync() instead.
+ * and defer the invalidation until iommu_iotlb_sync() instead.
*
* Note that these can all be called in atomic context and must therefore
* not block.
@@ -40,8 +42,6 @@ struct iommu_flush_ops {
void (*tlb_flush_all)(void *cookie);
void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
void *cookie);
- void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
- void *cookie);
void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie);
};
@@ -72,27 +72,26 @@ struct io_pgtable_cfg {
* hardware which does not implement the permissions of a given
* format, and/or requires some format-specific default value.
*
- * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
- * (unmapped) entries but the hardware might do so anyway, perform
- * TLB maintenance when mapping as well as when unmapping.
- *
* IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
- * to support up to 34 bits PA where the bit32 and bit33 are
- * encoded in the bit9 and bit4 of the PTE respectively.
+ * to support up to 35 bits PA where the bit32, bit33 and bit34 are
+ * encoded in the bit9, bit4 and bit5 of the PTE respectively.
*
- * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
- * on unmap, for DMA domains using the flush queue mechanism for
- * delayed invalidation.
+ * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs
+ * extend the translation table base support up to 35 bits PA, the
+ * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT.
*
* IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
* for use in the upper half of a split address space.
+ *
+ * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
+ * attributes set in the TCR for a non-coherent page-table walker.
*/
- #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
- #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
- #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
- #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
- #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
- #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
+ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
+ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
+ #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
@@ -101,6 +100,30 @@ struct io_pgtable_cfg {
const struct iommu_flush_ops *tlb;
struct device *iommu_dev;
+ /**
+ * @alloc: Custom page allocator.
+ *
+ * Optional hook used to allocate page tables. If this function is NULL,
+ * @free must be NULL too.
+ *
+ * Memory returned should be zeroed and suitable for dma_map_single() and
+ * virt_to_phys().
+ *
+ * Not all formats support custom page allocators. Before considering
+ * passing a non-NULL value, make sure the chosen page format supports
+ * this feature.
+ */
+ void *(*alloc)(void *cookie, size_t size, gfp_t gfp);
+
+ /**
+ * @free: Custom page de-allocator.
+ *
+ * Optional hook used to free page tables allocated with the @alloc
+ * hook. Must be non-NULL if @alloc is not NULL, must be NULL
+ * otherwise.
+ */
+ void (*free)(void *cookie, void *pages, size_t size);
+
/* Low-level data specific to the table format */
union {
struct {
@@ -140,26 +163,37 @@ struct io_pgtable_cfg {
u64 transtab;
u64 memattr;
} arm_mali_lpae_cfg;
+
+ struct {
+ u64 ttbr[4];
+ u32 n_ttbrs;
+ } apple_dart_cfg;
};
};
/**
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
*
- * @map: Map a physically contiguous memory region.
- * @unmap: Unmap a physically contiguous memory region.
+ * @map_pages: Map a physically contiguous range of pages of the same size.
+ * @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
* @iova_to_phys: Translate iova to physical address.
*
* These functions map directly onto the iommu_ops member functions with
* the same names.
*/
struct io_pgtable_ops {
- int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
- size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather);
+ int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+ size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
+ int (*read_and_clear_dirty)(struct io_pgtable_ops *ops,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
};
/**
@@ -210,21 +244,16 @@ struct io_pgtable {
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
- iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all)
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
}
static inline void
io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
size_t size, size_t granule)
{
- iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
-}
-
-static inline void
-io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
- size_t size, size_t granule)
-{
- iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk)
+ iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
}
static inline void
@@ -232,20 +261,30 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop,
struct iommu_iotlb_gather * gather, unsigned long iova,
size_t granule)
{
- if (iop->cfg.tlb->tlb_add_page)
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page)
iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
}
/**
+ * enum io_pgtable_caps - IO page table backend capabilities.
+ */
+enum io_pgtable_caps {
+ /** @IO_PGTABLE_CAP_CUSTOM_ALLOCATOR: Backend accepts custom page table allocators. */
+ IO_PGTABLE_CAP_CUSTOM_ALLOCATOR = BIT(0),
+};
+
+/**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format.
*
* @alloc: Allocate a set of page tables described by cfg.
* @free: Free the page tables associated with iop.
+ * @caps: Combination of @io_pgtable_caps flags encoding the backend capabilities.
*/
struct io_pgtable_init_fns {
struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
void (*free)(struct io_pgtable *iop);
+ u32 caps;
};
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
@@ -254,5 +293,8 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/io.h b/include/linux/io.h
index 8394c56babc2..235ba7d80a8f 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -23,21 +23,19 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
#ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot);
+int vmap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot);
#else
static inline int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
return 0;
}
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-void __init ioremap_huge_init(void);
-int arch_ioremap_p4d_supported(void);
-int arch_ioremap_pud_supported(void);
-int arch_ioremap_pmd_supported(void);
-#else
-static inline void ioremap_huge_init(void) { }
+static inline int vmap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
+{
+ return 0;
+}
#endif
/*
@@ -77,23 +75,28 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
+/* architectures can override this */
+pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
+ unsigned long size, pgprot_t prot);
+
+
#ifdef CONFIG_PCI
/*
* The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
- * Posting") mandate non-posted configuration transactions. There is
- * no ioremap API in the kernel that can guarantee non-posted write
- * semantics across arches so provide a default implementation for
- * mapping PCI config space that defaults to ioremap(); arches
- * should override it if they have memory mapping implementations that
- * guarantee non-posted writes semantics to make the memory mapping
- * compliant with the PCI specification.
+ * Posting") mandate non-posted configuration transactions. This default
+ * implementation attempts to use the ioremap_np() API to provide this
+ * on arches that support it, and falls back to ioremap() on those that
+ * don't. Overriding this function is deprecated; arches that properly
+ * support non-posted accesses should implement ioremap_np() instead, which
+ * this default implementation can then use to return mappings compliant with
+ * the PCI specification.
*/
#ifndef pci_remap_cfgspace
#define pci_remap_cfgspace pci_remap_cfgspace
static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
size_t size)
{
- return ioremap(offset, size);
+ return ioremap_np(offset, size) ?: ioremap(offset, size);
}
#endif
#endif
@@ -139,6 +142,8 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);
+
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
@@ -173,4 +178,7 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
}
#endif
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size);
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
new file mode 100644
index 000000000000..68ed6697fece
--- /dev/null
+++ b/include/linux/io_uring.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_H
+#define _LINUX_IO_URING_H
+
+#include <linux/sched.h>
+#include <linux/xarray.h>
+#include <uapi/linux/io_uring.h>
+
+#if defined(CONFIG_IO_URING)
+void __io_uring_cancel(bool cancel_all);
+void __io_uring_free(struct task_struct *tsk);
+void io_uring_unreg_ringfd(void);
+const char *io_uring_get_opcode(u8 opcode);
+int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+bool io_is_uring_fops(struct file *file);
+
+static inline void io_uring_files_cancel(void)
+{
+ if (current->io_uring) {
+ io_uring_unreg_ringfd();
+ __io_uring_cancel(false);
+ }
+}
+static inline void io_uring_task_cancel(void)
+{
+ if (current->io_uring)
+ __io_uring_cancel(true);
+}
+static inline void io_uring_free(struct task_struct *tsk)
+{
+ if (tsk->io_uring)
+ __io_uring_free(tsk);
+}
+#else
+static inline void io_uring_task_cancel(void)
+{
+}
+static inline void io_uring_files_cancel(void)
+{
+}
+static inline void io_uring_free(struct task_struct *tsk)
+{
+}
+static inline const char *io_uring_get_opcode(u8 opcode)
+{
+ return "";
+}
+static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline bool io_is_uring_fops(struct file *file)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
new file mode 100644
index 000000000000..e453a997c060
--- /dev/null
+++ b/include/linux/io_uring/cmd.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_CMD_H
+#define _LINUX_IO_URING_CMD_H
+
+#include <uapi/linux/io_uring.h>
+#include <linux/io_uring_types.h>
+
+/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
+#define IORING_URING_CMD_CANCELABLE (1U << 30)
+
+struct io_uring_cmd {
+ struct file *file;
+ const struct io_uring_sqe *sqe;
+ /* callback to defer completions to task context */
+ void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
+ u32 cmd_op;
+ u32 flags;
+ u8 pdu[32]; /* available inline for free use */
+};
+
+static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
+{
+ return sqe->cmd;
+}
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd);
+void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
+ unsigned issue_flags);
+void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned),
+ unsigned flags);
+
+void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags);
+
+#else
+static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+{
+ return -EOPNOTSUPP;
+}
+static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+ ssize_t ret2, unsigned issue_flags)
+{
+}
+static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned),
+ unsigned flags)
+{
+}
+static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+}
+#endif
+
+/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
+static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
+}
+
+static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
+}
+
+static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->task;
+}
+
+#endif /* _LINUX_IO_URING_CMD_H */
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
new file mode 100644
index 000000000000..e24893625085
--- /dev/null
+++ b/include/linux/io_uring_types.h
@@ -0,0 +1,682 @@
+#ifndef IO_URING_TYPES_H
+#define IO_URING_TYPES_H
+
+#include <linux/blkdev.h>
+#include <linux/hashtable.h>
+#include <linux/task_work.h>
+#include <linux/bitmap.h>
+#include <linux/llist.h>
+#include <uapi/linux/io_uring.h>
+
+enum {
+ /*
+ * A hint to not wake right away but delay until there are enough of
+ * tw's queued to match the number of CQEs the task is waiting for.
+ *
+ * Must not be used wirh requests generating more than one CQE.
+ * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
+ */
+ IOU_F_TWQ_LAZY_WAKE = 1,
+};
+
+enum io_uring_cmd_flags {
+ IO_URING_F_COMPLETE_DEFER = 1,
+ IO_URING_F_UNLOCKED = 2,
+ /* the request is executed from poll, it should not be freed */
+ IO_URING_F_MULTISHOT = 4,
+ /* executed by io-wq */
+ IO_URING_F_IOWQ = 8,
+ /* int's last bit, sign checks are usually faster than a bit test */
+ IO_URING_F_NONBLOCK = INT_MIN,
+
+ /* ctx state flags, for URING_CMD */
+ IO_URING_F_SQE128 = (1 << 8),
+ IO_URING_F_CQE32 = (1 << 9),
+ IO_URING_F_IOPOLL = (1 << 10),
+
+ /* set when uring wants to cancel a previously issued command */
+ IO_URING_F_CANCEL = (1 << 11),
+ IO_URING_F_COMPAT = (1 << 12),
+};
+
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
+};
+
+struct io_wq_work {
+ struct io_wq_work_node list;
+ unsigned flags;
+ /* place it here instead of io_kiocb as it fills padding and saves 4B */
+ int cancel_seq;
+};
+
+struct io_fixed_file {
+ /* file * with additional FFS_* flags */
+ unsigned long file_ptr;
+};
+
+struct io_file_table {
+ struct io_fixed_file *files;
+ unsigned long *bitmap;
+ unsigned int alloc_hint;
+};
+
+struct io_hash_bucket {
+ spinlock_t lock;
+ struct hlist_head list;
+} ____cacheline_aligned_in_smp;
+
+struct io_hash_table {
+ struct io_hash_bucket *hbs;
+ unsigned hash_bits;
+};
+
+/*
+ * Arbitrary limit, can be raised if need be
+ */
+#define IO_RINGFD_REG_MAX 16
+
+struct io_uring_task {
+ /* submission side */
+ int cached_refs;
+ const struct io_ring_ctx *last;
+ struct io_wq *io_wq;
+ struct file *registered_rings[IO_RINGFD_REG_MAX];
+
+ struct xarray xa;
+ struct wait_queue_head wait;
+ atomic_t in_cancel;
+ atomic_t inflight_tracked;
+ struct percpu_counter inflight;
+
+ struct { /* task_work */
+ struct llist_head task_list;
+ struct callback_head task_work;
+ } ____cacheline_aligned_in_smp;
+};
+
+struct io_uring {
+ u32 head;
+ u32 tail;
+};
+
+/*
+ * This data is shared with the application through the mmap at offsets
+ * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
+ *
+ * The offsets to the member fields are published through struct
+ * io_sqring_offsets when calling io_uring_setup.
+ */
+struct io_rings {
+ /*
+ * Head and tail offsets into the ring; the offsets need to be
+ * masked to get valid indices.
+ *
+ * The kernel controls head of the sq ring and the tail of the cq ring,
+ * and the application controls tail of the sq ring and the head of the
+ * cq ring.
+ */
+ struct io_uring sq, cq;
+ /*
+ * Bitmasks to apply to head and tail offsets (constant, equals
+ * ring_entries - 1)
+ */
+ u32 sq_ring_mask, cq_ring_mask;
+ /* Ring sizes (constant, power of 2) */
+ u32 sq_ring_entries, cq_ring_entries;
+ /*
+ * Number of invalid entries dropped by the kernel due to
+ * invalid index stored in array
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * After a new SQ head value was read by the application this
+ * counter includes all submissions that were dropped reaching
+ * the new SQ head (and possibly more).
+ */
+ u32 sq_dropped;
+ /*
+ * Runtime SQ flags
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application.
+ *
+ * The application needs a full memory barrier before checking
+ * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
+ */
+ atomic_t sq_flags;
+ /*
+ * Runtime CQ flags
+ *
+ * Written by the application, shouldn't be modified by the
+ * kernel.
+ */
+ u32 cq_flags;
+ /*
+ * Number of completion events lost because the queue was full;
+ * this should be avoided by the application by making sure
+ * there are not more requests pending than there is space in
+ * the completion queue.
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * As completion events come in out of order this counter is not
+ * ordered with any other data.
+ */
+ u32 cq_overflow;
+ /*
+ * Ring buffer of completion events.
+ *
+ * The kernel writes completion events fresh every time they are
+ * produced, so the application is allowed to modify pending
+ * entries.
+ */
+ struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
+};
+
+struct io_restriction {
+ DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
+ DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
+ u8 sqe_flags_allowed;
+ u8 sqe_flags_required;
+ bool registered;
+};
+
+struct io_submit_link {
+ struct io_kiocb *head;
+ struct io_kiocb *last;
+};
+
+struct io_submit_state {
+ /* inline/task_work completion list, under ->uring_lock */
+ struct io_wq_work_node free_list;
+ /* batch completion logic */
+ struct io_wq_work_list compl_reqs;
+ struct io_submit_link link;
+
+ bool plug_started;
+ bool need_plug;
+ unsigned short submit_nr;
+ unsigned int cqes_count;
+ struct blk_plug plug;
+};
+
+struct io_ev_fd {
+ struct eventfd_ctx *cq_ev_fd;
+ unsigned int eventfd_async: 1;
+ struct rcu_head rcu;
+ atomic_t refs;
+ atomic_t ops;
+};
+
+struct io_alloc_cache {
+ struct io_wq_work_node list;
+ unsigned int nr_cached;
+ unsigned int max_cached;
+ size_t elem_size;
+};
+
+struct io_ring_ctx {
+ /* const or read-mostly hot data */
+ struct {
+ unsigned int flags;
+ unsigned int drain_next: 1;
+ unsigned int restricted: 1;
+ unsigned int off_timeout_used: 1;
+ unsigned int drain_active: 1;
+ unsigned int has_evfd: 1;
+ /* all CQEs should be posted only by the submitter task */
+ unsigned int task_complete: 1;
+ unsigned int lockless_cq: 1;
+ unsigned int syscall_iopoll: 1;
+ unsigned int poll_activated: 1;
+ unsigned int drain_disabled: 1;
+ unsigned int compat: 1;
+ unsigned int iowq_limits_set : 1;
+
+ struct task_struct *submitter_task;
+ struct io_rings *rings;
+ struct percpu_ref refs;
+
+ enum task_work_notify_mode notify_method;
+ unsigned sq_thread_idle;
+ } ____cacheline_aligned_in_smp;
+
+ /* submission data */
+ struct {
+ struct mutex uring_lock;
+
+ /*
+ * Ring buffer of indices into array of io_uring_sqe, which is
+ * mmapped by the application using the IORING_OFF_SQES offset.
+ *
+ * This indirection could e.g. be used to assign fixed
+ * io_uring_sqe entries to operations and only submit them to
+ * the queue when needed.
+ *
+ * The kernel modifies neither the indices array nor the entries
+ * array.
+ */
+ u32 *sq_array;
+ struct io_uring_sqe *sq_sqes;
+ unsigned cached_sq_head;
+ unsigned sq_entries;
+
+ /*
+ * Fixed resources fast path, should be accessed only under
+ * uring_lock, and updated through io_uring_register(2)
+ */
+ struct io_rsrc_node *rsrc_node;
+ atomic_t cancel_seq;
+
+ /*
+ * ->iopoll_list is protected by the ctx->uring_lock for
+ * io_uring instances that don't use IORING_SETUP_SQPOLL.
+ * For SQPOLL, only the single threaded io_sq_thread() will
+ * manipulate the list, hence no extra locking is needed there.
+ */
+ bool poll_multi_queue;
+ struct io_wq_work_list iopoll_list;
+
+ struct io_file_table file_table;
+ struct io_mapped_ubuf **user_bufs;
+ unsigned nr_user_files;
+ unsigned nr_user_bufs;
+
+ struct io_submit_state submit_state;
+
+ struct io_buffer_list *io_bl;
+ struct xarray io_bl_xa;
+
+ struct io_hash_table cancel_table_locked;
+ struct io_alloc_cache apoll_cache;
+ struct io_alloc_cache netmsg_cache;
+
+ /*
+ * Any cancelable uring_cmd is added to this list in
+ * ->uring_cmd() by io_uring_cmd_insert_cancelable()
+ */
+ struct hlist_head cancelable_uring_cmd;
+ } ____cacheline_aligned_in_smp;
+
+ struct {
+ /*
+ * We cache a range of free CQEs we can use, once exhausted it
+ * should go through a slower range setup, see __io_get_cqe()
+ */
+ struct io_uring_cqe *cqe_cached;
+ struct io_uring_cqe *cqe_sentinel;
+
+ unsigned cached_cq_tail;
+ unsigned cq_entries;
+ struct io_ev_fd __rcu *io_ev_fd;
+ unsigned cq_extra;
+ } ____cacheline_aligned_in_smp;
+
+ /*
+ * task_work and async notification delivery cacheline. Expected to
+ * regularly bounce b/w CPUs.
+ */
+ struct {
+ struct llist_head work_llist;
+ unsigned long check_cq;
+ atomic_t cq_wait_nr;
+ atomic_t cq_timeouts;
+ struct wait_queue_head cq_wait;
+ } ____cacheline_aligned_in_smp;
+
+ /* timeouts */
+ struct {
+ spinlock_t timeout_lock;
+ struct list_head timeout_list;
+ struct list_head ltimeout_list;
+ unsigned cq_last_tm_flush;
+ } ____cacheline_aligned_in_smp;
+
+ struct io_uring_cqe completion_cqes[16];
+
+ spinlock_t completion_lock;
+
+ /* IRQ completion list, under ->completion_lock */
+ unsigned int locked_free_nr;
+ struct io_wq_work_list locked_free_list;
+
+ struct list_head io_buffers_comp;
+ struct list_head cq_overflow_list;
+ struct io_hash_table cancel_table;
+
+ struct hlist_head waitid_list;
+
+#ifdef CONFIG_FUTEX
+ struct hlist_head futex_list;
+ struct io_alloc_cache futex_cache;
+#endif
+
+ const struct cred *sq_creds; /* cred used for __io_sq_thread() */
+ struct io_sq_data *sq_data; /* if using sq thread polling */
+
+ struct wait_queue_head sqo_sq_wait;
+ struct list_head sqd_list;
+
+ unsigned int file_alloc_start;
+ unsigned int file_alloc_end;
+
+ struct list_head io_buffers_cache;
+
+ /* deferred free list, protected by ->uring_lock */
+ struct hlist_head io_buf_list;
+
+ /* Keep this last, we don't need it for the fast path */
+ struct wait_queue_head poll_wq;
+ struct io_restriction restrictions;
+
+ /* slow path rsrc auxilary data, used by update/register */
+ struct io_mapped_ubuf *dummy_ubuf;
+ struct io_rsrc_data *file_data;
+ struct io_rsrc_data *buf_data;
+
+ /* protected by ->uring_lock */
+ struct list_head rsrc_ref_list;
+ struct io_alloc_cache rsrc_node_cache;
+ struct wait_queue_head rsrc_quiesce_wq;
+ unsigned rsrc_quiesce;
+
+ u32 pers_next;
+ struct xarray personalities;
+
+ /* hashed buffered write serialization */
+ struct io_wq_hash *hash_map;
+
+ /* Only used for accounting purposes */
+ struct user_struct *user;
+ struct mm_struct *mm_account;
+
+ /* ctx exit and cancelation */
+ struct llist_head fallback_llist;
+ struct delayed_work fallback_work;
+ struct work_struct exit_work;
+ struct list_head tctx_list;
+ struct completion ref_comp;
+
+ /* io-wq management, e.g. thread count */
+ u32 iowq_limits[2];
+
+ struct callback_head poll_wq_task_work;
+ struct list_head defer_list;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ struct list_head napi_list; /* track busy poll napi_id */
+ spinlock_t napi_lock; /* napi_list lock */
+
+ /* napi busy poll default timeout */
+ unsigned int napi_busy_poll_to;
+ bool napi_prefer_busy_poll;
+ bool napi_enabled;
+
+ DECLARE_HASHTABLE(napi_ht, 4);
+#endif
+
+ /* protected by ->completion_lock */
+ unsigned evfd_last_cq_tail;
+
+ /*
+ * If IORING_SETUP_NO_MMAP is used, then the below holds
+ * the gup'ed pages for the two rings, and the sqes.
+ */
+ unsigned short n_ring_pages;
+ unsigned short n_sqe_pages;
+ struct page **ring_pages;
+ struct page **sqe_pages;
+};
+
+struct io_tw_state {
+ /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
+ bool locked;
+};
+
+enum {
+ REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
+ REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
+ REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
+ REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
+ REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
+ REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
+ REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
+
+ /* first byte is taken by user flags, shift it to not overlap */
+ REQ_F_FAIL_BIT = 8,
+ REQ_F_INFLIGHT_BIT,
+ REQ_F_CUR_POS_BIT,
+ REQ_F_NOWAIT_BIT,
+ REQ_F_LINK_TIMEOUT_BIT,
+ REQ_F_NEED_CLEANUP_BIT,
+ REQ_F_POLLED_BIT,
+ REQ_F_BUFFER_SELECTED_BIT,
+ REQ_F_BUFFER_RING_BIT,
+ REQ_F_REISSUE_BIT,
+ REQ_F_CREDS_BIT,
+ REQ_F_REFCOUNT_BIT,
+ REQ_F_ARM_LTIMEOUT_BIT,
+ REQ_F_ASYNC_DATA_BIT,
+ REQ_F_SKIP_LINK_CQES_BIT,
+ REQ_F_SINGLE_POLL_BIT,
+ REQ_F_DOUBLE_POLL_BIT,
+ REQ_F_APOLL_MULTISHOT_BIT,
+ REQ_F_CLEAR_POLLIN_BIT,
+ REQ_F_HASH_LOCKED_BIT,
+ /* keep async read/write and isreg together and in order */
+ REQ_F_SUPPORT_NOWAIT_BIT,
+ REQ_F_ISREG_BIT,
+ REQ_F_POLL_NO_LAZY_BIT,
+ REQ_F_CANCEL_SEQ_BIT,
+ REQ_F_CAN_POLL_BIT,
+ REQ_F_BL_EMPTY_BIT,
+ REQ_F_BL_NO_RECYCLE_BIT,
+
+ /* not a real bit, just to check we're not overflowing the space */
+ __REQ_F_LAST_BIT,
+};
+
+typedef u64 __bitwise io_req_flags_t;
+#define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno)))
+
+enum {
+ /* ctx owns file */
+ REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT),
+ /* drain existing IO first */
+ REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT),
+ /* linked sqes */
+ REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT),
+ /* doesn't sever on completion < 0 */
+ REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT),
+ /* IOSQE_ASYNC */
+ REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT),
+ /* IOSQE_BUFFER_SELECT */
+ REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT),
+ /* IOSQE_CQE_SKIP_SUCCESS */
+ REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT),
+
+ /* fail rest of links */
+ REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT),
+ /* on inflight list, should be cancelled and waited on exit reliably */
+ REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT),
+ /* read/write uses file position */
+ REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT),
+ /* must not punt to workers */
+ REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT),
+ /* has or had linked timeout */
+ REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT),
+ /* needs cleanup */
+ REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
+ /* already went through poll handler */
+ REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT),
+ /* buffer already selected */
+ REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
+ /* buffer selected from ring, needs commit */
+ REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT),
+ /* caller should reissue async */
+ REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT),
+ /* supports async reads/writes */
+ REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT),
+ /* regular file */
+ REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT),
+ /* has creds assigned */
+ REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT),
+ /* skip refcounting if not set */
+ REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT),
+ /* there is a linked timeout that has to be armed */
+ REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT),
+ /* ->async_data allocated */
+ REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT),
+ /* don't post CQEs while failing linked requests */
+ REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT),
+ /* single poll may be active */
+ REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
+ /* double poll may active */
+ REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
+ /* fast poll multishot mode */
+ REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
+ /* recvmsg special flag, clear EPOLLIN */
+ REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
+ /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
+ REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
+ /* don't use lazy poll wake for this request */
+ REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
+ /* cancel sequence is set and valid */
+ REQ_F_CANCEL_SEQ = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT),
+ /* file is pollable */
+ REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
+ /* buffer list was empty after selection of buffer */
+ REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
+ /* don't recycle provided buffers for this request */
+ REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
+};
+
+typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
+
+struct io_task_work {
+ struct llist_node node;
+ io_req_tw_func_t func;
+};
+
+struct io_cqe {
+ __u64 user_data;
+ __s32 res;
+ /* fd initially, then cflags for completion */
+ union {
+ __u32 flags;
+ int fd;
+ };
+};
+
+/*
+ * Each request type overlays its private data structure on top of this one.
+ * They must not exceed this one in size.
+ */
+struct io_cmd_data {
+ struct file *file;
+ /* each command gets 56 bytes of data */
+ __u8 data[56];
+};
+
+static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
+}
+#define io_kiocb_to_cmd(req, cmd_type) ( \
+ io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
+ ((cmd_type *)&(req)->cmd) \
+)
+#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
+
+struct io_kiocb {
+ union {
+ /*
+ * NOTE! Each of the io_kiocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'file' in this struct.
+ */
+ struct file *file;
+ struct io_cmd_data cmd;
+ };
+
+ u8 opcode;
+ /* polled IO has completed */
+ u8 iopoll_completed;
+ /*
+ * Can be either a fixed buffer index, or used with provided buffers.
+ * For the latter, before issue it points to the buffer group ID,
+ * and after selection it points to the buffer ID itself.
+ */
+ u16 buf_index;
+
+ unsigned nr_tw;
+
+ /* REQ_F_* flags */
+ io_req_flags_t flags;
+
+ struct io_cqe cqe;
+
+ struct io_ring_ctx *ctx;
+ struct task_struct *task;
+
+ union {
+ /* store used ubuf, so we can prevent reloading */
+ struct io_mapped_ubuf *imu;
+
+ /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
+ struct io_buffer *kbuf;
+
+ /*
+ * stores buffer ID for ring provided buffers, valid IFF
+ * REQ_F_BUFFER_RING is set.
+ */
+ struct io_buffer_list *buf_list;
+ };
+
+ union {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+ __poll_t apoll_events;
+ };
+
+ struct io_rsrc_node *rsrc_node;
+
+ atomic_t refs;
+ atomic_t poll_refs;
+ struct io_task_work io_task_work;
+ /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
+ struct hlist_node hash_node;
+ /* internal polling, see IORING_FEAT_FAST_POLL */
+ struct async_poll *apoll;
+ /* opcode allocated if it needs to store data for async defer */
+ void *async_data;
+ /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ struct io_kiocb *link;
+ /* custom credentials, valid IFF REQ_F_CREDS is set */
+ const struct cred *creds;
+ struct io_wq_work work;
+
+ struct {
+ u64 extra1;
+ u64 extra2;
+ } big_cqe;
+};
+
+struct io_overflow_cqe {
+ struct list_head list;
+ struct io_uring_cqe cqe;
+};
+
+#endif
diff --git a/include/linux/ioam6.h b/include/linux/ioam6.h
new file mode 100644
index 000000000000..94a24b36998f
--- /dev/null
+++ b/include/linux/ioam6.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_H
+#define _LINUX_IOAM6_H
+
+#include <uapi/linux/ioam6.h>
+
+#endif /* _LINUX_IOAM6_H */
diff --git a/include/linux/ioam6_genl.h b/include/linux/ioam6_genl.h
new file mode 100644
index 000000000000..176e67919de3
--- /dev/null
+++ b/include/linux/ioam6_genl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Generic Netlink API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_GENL_H
+#define _LINUX_IOAM6_GENL_H
+
+#include <uapi/linux/ioam6_genl.h>
+
+#endif /* _LINUX_IOAM6_GENL_H */
diff --git a/include/linux/ioam6_iptunnel.h b/include/linux/ioam6_iptunnel.h
new file mode 100644
index 000000000000..07d9dfedd29d
--- /dev/null
+++ b/include/linux/ioam6_iptunnel.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Lightweight Tunnel API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_IPTUNNEL_H
+#define _LINUX_IOAM6_IPTUNNEL_H
+
+#include <uapi/linux/ioam6_iptunnel.h>
+
+#endif /* _LINUX_IOAM6_IPTUNNEL_H */
diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
deleted file mode 100644
index 6f000d7a0ddc..000000000000
--- a/include/linux/ioasid.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_IOASID_H
-#define __LINUX_IOASID_H
-
-#include <linux/types.h>
-#include <linux/errno.h>
-
-#define INVALID_IOASID ((ioasid_t)-1)
-typedef unsigned int ioasid_t;
-typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t min, ioasid_t max, void *data);
-typedef void (*ioasid_free_fn_t)(ioasid_t ioasid, void *data);
-
-struct ioasid_set {
- int dummy;
-};
-
-/**
- * struct ioasid_allocator_ops - IOASID allocator helper functions and data
- *
- * @alloc: helper function to allocate IOASID
- * @free: helper function to free IOASID
- * @list: for tracking ops that share helper functions but not data
- * @pdata: data belong to the allocator, provided when calling alloc()
- */
-struct ioasid_allocator_ops {
- ioasid_alloc_fn_t alloc;
- ioasid_free_fn_t free;
- struct list_head list;
- void *pdata;
-};
-
-#define DECLARE_IOASID_SET(name) struct ioasid_set name = { 0 }
-
-#if IS_ENABLED(CONFIG_IOASID)
-ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
- void *private);
-void ioasid_free(ioasid_t ioasid);
-void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
- bool (*getter)(void *));
-int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
-void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator);
-int ioasid_set_data(ioasid_t ioasid, void *data);
-
-#else /* !CONFIG_IOASID */
-static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
- ioasid_t max, void *private)
-{
- return INVALID_IOASID;
-}
-
-static inline void ioasid_free(ioasid_t ioasid)
-{
-}
-
-static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
- bool (*getter)(void *))
-{
- return NULL;
-}
-
-static inline int ioasid_register_allocator(struct ioasid_allocator_ops *allocator)
-{
- return -ENOTSUPP;
-}
-
-static inline void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator)
-{
-}
-
-static inline int ioasid_set_data(ioasid_t ioasid, void *data)
-{
- return -ENOTSUPP;
-}
-
-#endif /* CONFIG_IOASID */
-#endif /* __LINUX_IOASID_H */
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 1dcd9198beb7..14f7eaf1b443 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -99,61 +99,40 @@ struct io_cq {
struct io_context {
atomic_long_t refcount;
atomic_t active_ref;
- atomic_t nr_tasks;
-
- /* all the fields below are protected by this lock */
- spinlock_t lock;
unsigned short ioprio;
- /*
- * For request batching
- */
- int nr_batch_requests; /* Number of requests left in the batch */
- unsigned long last_waited; /* Time last woken after wait for request */
+#ifdef CONFIG_BLK_ICQ
+ /* all the fields below are protected by this lock */
+ spinlock_t lock;
struct radix_tree_root icq_tree;
struct io_cq __rcu *icq_hint;
struct hlist_head icq_list;
struct work_struct release_work;
+#endif /* CONFIG_BLK_ICQ */
};
-/**
- * get_io_context_active - get active reference on ioc
- * @ioc: ioc of interest
- *
- * Only iocs with active reference can issue new IOs. This function
- * acquires an active reference on @ioc. The caller must already have an
- * active reference on @ioc.
- */
-static inline void get_io_context_active(struct io_context *ioc)
-{
- WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
- WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
- atomic_long_inc(&ioc->refcount);
- atomic_inc(&ioc->active_ref);
-}
-
-static inline void ioc_task_link(struct io_context *ioc)
-{
- get_io_context_active(ioc);
-
- WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
- atomic_inc(&ioc->nr_tasks);
-}
-
struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
-void put_io_context_active(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
-struct io_context *get_task_io_context(struct task_struct *task,
- gfp_t gfp_flags, int node);
+int __copy_io(unsigned long clone_flags, struct task_struct *tsk);
+static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+ if (!current->io_context)
+ return 0;
+ return __copy_io(clone_flags, tsk);
+}
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
static inline void exit_io_context(struct task_struct *task) { }
-#endif
+static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
-#endif
+#endif /* IOCONTEXT_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 4d1d3c3469e9..6fc1c858013d 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -13,6 +13,8 @@
struct address_space;
struct fiemap_extent_info;
struct inode;
+struct iomap_iter;
+struct iomap_dio;
struct iomap_writepage_ctx;
struct iov_iter;
struct kiocb;
@@ -48,25 +50,38 @@ struct vm_fault;
*
* IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
* buffer heads for this mapping.
+ *
+ * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent
+ * rather than a file data extent.
*/
-#define IOMAP_F_NEW 0x01
-#define IOMAP_F_DIRTY 0x02
-#define IOMAP_F_SHARED 0x04
-#define IOMAP_F_MERGED 0x08
-#define IOMAP_F_BUFFER_HEAD 0x10
+#define IOMAP_F_NEW (1U << 0)
+#define IOMAP_F_DIRTY (1U << 1)
+#define IOMAP_F_SHARED (1U << 2)
+#define IOMAP_F_MERGED (1U << 3)
+#ifdef CONFIG_BUFFER_HEAD
+#define IOMAP_F_BUFFER_HEAD (1U << 4)
+#else
+#define IOMAP_F_BUFFER_HEAD 0
+#endif /* CONFIG_BUFFER_HEAD */
+#define IOMAP_F_XATTR (1U << 5)
/*
* Flags set by the core iomap code during operations:
*
* IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
* has changed as the result of this write operation.
+ *
+ * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file
+ * range it covers needs to be remapped by the high level before the operation
+ * can proceed.
*/
-#define IOMAP_F_SIZE_CHANGED 0x100
+#define IOMAP_F_SIZE_CHANGED (1U << 8)
+#define IOMAP_F_STALE (1U << 9)
/*
* Flags from 0x1000 up are for file system specific usage:
*/
-#define IOMAP_F_PRIVATE 0x1000
+#define IOMAP_F_PRIVATE (1U << 12)
/*
@@ -74,7 +89,7 @@ struct vm_fault;
*/
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
-struct iomap_page_ops;
+struct iomap_folio_ops;
struct iomap {
u64 addr; /* disk offset of mapping, bytes */
@@ -86,30 +101,65 @@ struct iomap {
struct dax_device *dax_dev; /* dax_dev for dax operations */
void *inline_data;
void *private; /* filesystem private */
- const struct iomap_page_ops *page_ops;
+ const struct iomap_folio_ops *folio_ops;
+ u64 validity_cookie; /* used with .iomap_valid() */
};
-static inline sector_t
-iomap_sector(struct iomap *iomap, loff_t pos)
+static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
{
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
/*
- * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
- * and page_done will be called for each page written to. This only applies to
- * buffered writes as unbuffered writes will not typically have pages
+ * Returns the inline data pointer for logical offset @pos.
+ */
+static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
+{
+ return iomap->inline_data + pos - iomap->offset;
+}
+
+/*
+ * Check if the mapping's length is within the valid range for inline data.
+ * This is used to guard against accessing data beyond the page inline_data
+ * points at.
+ */
+static inline bool iomap_inline_data_valid(const struct iomap *iomap)
+{
+ return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
+}
+
+/*
+ * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio
+ * and put_folio will be called for each folio written to. This only applies
+ * to buffered writes as unbuffered writes will not typically have folios
* associated with them.
*
- * When page_prepare succeeds, page_done will always be called to do any
- * cleanup work necessary. In that page_done call, @page will be NULL if the
- * associated page could not be obtained.
+ * When get_folio succeeds, put_folio will always be called to do any
+ * cleanup work necessary. put_folio is responsible for unlocking and putting
+ * @folio.
*/
-struct iomap_page_ops {
- int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
- struct iomap *iomap);
- void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
- struct page *page, struct iomap *iomap);
+struct iomap_folio_ops {
+ struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
+ unsigned len);
+ void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
+ struct folio *folio);
+
+ /*
+ * Check that the cached iomap still maps correctly to the filesystem's
+ * internal extent map. FS internal extent maps can change while iomap
+ * is iterating a cached iomap, so this hook allows iomap to detect that
+ * the iomap needs to be refreshed during a long running write
+ * operation.
+ *
+ * The filesystem can store internal state (e.g. a sequence number) in
+ * iomap->validity_cookie when the iomap is first mapped to be able to
+ * detect changes between mapping time and whenever .iomap_valid() is
+ * called.
+ *
+ * This is called with the folio over the specified file position held
+ * locked by the iomap code.
+ */
+ bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
};
/*
@@ -121,6 +171,13 @@ struct iomap_page_ops {
#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
#define IOMAP_NOWAIT (1 << 5) /* do not block */
+#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
+#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
+#ifdef CONFIG_FS_DAX
+#define IOMAP_DAX (1 << 8) /* DAX mapping */
+#else
+#define IOMAP_DAX 0
+#endif /* CONFIG_FS_DAX */
struct iomap_ops {
/*
@@ -142,32 +199,76 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
-/*
- * Main iomap iterator function.
+/**
+ * struct iomap_iter - Iterate through a range of a file
+ * @inode: Set at the start of the iteration and should not change.
+ * @pos: The current file position we are operating on. It is updated by
+ * calls to iomap_iter(). Treat as read-only in the body.
+ * @len: The remaining length of the file segment we're operating on.
+ * It is updated at the same time as @pos.
+ * @processed: The number of bytes processed by the body in the most recent
+ * iteration, or a negative errno. 0 causes the iteration to stop.
+ * @flags: Zero or more of the iomap_begin flags above.
+ * @iomap: Map describing the I/O iteration
+ * @srcmap: Source map for COW operations
*/
-typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
- void *data, struct iomap *iomap, struct iomap *srcmap);
+struct iomap_iter {
+ struct inode *inode;
+ loff_t pos;
+ u64 len;
+ s64 processed;
+ unsigned flags;
+ struct iomap iomap;
+ struct iomap srcmap;
+ void *private;
+};
+
+int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
+
+/**
+ * iomap_length - length of the current iomap iteration
+ * @iter: iteration structure
+ *
+ * Returns the length that the operation applies to for the current iteration.
+ */
+static inline u64 iomap_length(const struct iomap_iter *iter)
+{
+ u64 end = iter->iomap.offset + iter->iomap.length;
-loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, const struct iomap_ops *ops, void *data,
- iomap_actor_t actor);
+ if (iter->srcmap.type != IOMAP_HOLE)
+ end = min(end, iter->srcmap.offset + iter->srcmap.length);
+ return min(iter->len, end - iter->pos);
+}
+
+/**
+ * iomap_iter_srcmap - return the source map for the current iomap iteration
+ * @i: iteration structure
+ *
+ * Write operations on file systems with reflink support might require a
+ * source and a destination map. This function retourns the source map
+ * for a given operation, which may or may no be identical to the destination
+ * map in &i->iomap.
+ */
+static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
+{
+ if (i->srcmap.type != IOMAP_HOLE)
+ return &i->srcmap;
+ return &i->iomap;
+}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
-int iomap_readpage(struct page *page, const struct iomap_ops *ops);
+int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
+ struct iomap *iomap, loff_t pos, loff_t length, ssize_t written,
+ int (*punch)(struct inode *inode, loff_t pos, loff_t length));
+
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
-int iomap_set_page_dirty(struct page *page);
-int iomap_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count);
-int iomap_releasepage(struct page *page, gfp_t gfp_mask);
-void iomap_invalidatepage(struct page *page, unsigned int offset,
- unsigned int len);
-#ifdef CONFIG_MIGRATION
-int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode);
-#else
-#define iomap_migrate_page NULL
-#endif
+bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
+bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
@@ -195,18 +296,29 @@ struct iomap_ioend {
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
loff_t io_offset; /* offset in the file */
- void *io_private; /* file system private data */
- struct bio *io_bio; /* bio being built */
- struct bio io_inline_bio; /* MUST BE LAST! */
+ sector_t io_sector; /* start sector of ioend */
+ struct bio io_bio; /* MUST BE LAST! */
};
+static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
+{
+ return container_of(bio, struct iomap_ioend, io_bio);
+}
+
struct iomap_writeback_ops {
/*
* Required, maps the blocks so that writeback can be performed on
* the range starting at offset.
+ *
+ * Can return arbitrarily large regions, but we need to call into it at
+ * least once per folio to allow the file systems to synchronize with
+ * the write path that could be invalidating mappings.
+ *
+ * An existing mapping from a previous call to this method can be reused
+ * by the file system if it is still valid.
*/
int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
- loff_t offset);
+ loff_t offset, unsigned len);
/*
* Optional, allows the file systems to perform actions just before
@@ -220,24 +332,20 @@ struct iomap_writeback_ops {
* Optional, allows the file system to discard state on a page where
* we failed to submit any I/O.
*/
- void (*discard_page)(struct page *page);
+ void (*discard_folio)(struct folio *folio, loff_t pos);
};
struct iomap_writepage_ctx {
struct iomap iomap;
struct iomap_ioend *ioend;
const struct iomap_writeback_ops *ops;
+ u32 nr_folios; /* folios added to the ioend */
};
void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
void iomap_ioend_try_merge(struct iomap_ioend *ioend,
- struct list_head *more_ioends,
- void (*merge_private)(struct iomap_ioend *ioend,
- struct iomap_ioend *next));
+ struct list_head *more_ioends);
void iomap_sort_ioends(struct list_head *ioend_list);
-int iomap_writepage(struct page *page, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops);
int iomap_writepages(struct address_space *mapping,
struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
const struct iomap_writeback_ops *ops);
@@ -251,14 +359,48 @@ int iomap_writepages(struct address_space *mapping,
struct iomap_dio_ops {
int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
unsigned flags);
- blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap,
- struct bio *bio, loff_t file_offset);
+ void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
+ loff_t file_offset);
+
+ /*
+ * Filesystems wishing to attach private information to a direct io bio
+ * must provide a ->submit_io method that attaches the additional
+ * information to the bio and changes the ->bi_end_io callback to a
+ * custom function. This function should, at a minimum, perform any
+ * relevant post-processing of the bio and end with a call to
+ * iomap_dio_bio_end_io.
+ */
+ struct bio_set *bio_set;
};
+/*
+ * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not
+ * synchronous.
+ */
+#define IOMAP_DIO_FORCE_WAIT (1 << 0)
+
+/*
+ * Do not allocate blocks or zero partial blocks, but instead fall back to
+ * the caller by returning -EAGAIN. Used to optimize direct I/O writes that
+ * are not aligned to the file system block size.
+ */
+#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
+
+/*
+ * When a page fault occurs, return a partial synchronous result and allow
+ * the caller to retry the rest of the operation after dealing with the page
+ * fault.
+ */
+#define IOMAP_DIO_PARTIAL (1 << 2)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- bool wait_for_completion);
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
+ unsigned int dio_flags, void *private, size_t done_before);
+struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ unsigned int dio_flags, void *private, size_t done_before);
+ssize_t iomap_dio_complete(struct iomap_dio *dio);
+void iomap_dio_bio_end_io(struct bio *bio);
#ifdef CONFIG_SWAP
struct file;
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 70d01edcbf8b..74be34f3a20a 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -3,7 +3,9 @@
#define _LINUX_IOMMU_HELPER_H
#include <linux/bug.h>
-#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/types.h>
static inline unsigned long iommu_device_max_index(unsigned long size,
unsigned long offset,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index fee209efb756..2e925b5eba53 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,8 +13,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/ioasid.h>
-#include <uapi/linux/iommu.h>
+#include <linux/iova_bitmap.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -37,9 +36,114 @@ struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
+struct iommu_domain_ops;
+struct iommu_dirty_ops;
struct notifier_block;
struct iommu_sva;
-struct iommu_fault_event;
+struct iommu_dma_cookie;
+struct iommu_fault_param;
+
+#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
+#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
+#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
+#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
+
+/* Generic fault types, can be expanded IRQ remapping fault */
+enum iommu_fault_type {
+ IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */
+};
+
+/**
+ * struct iommu_fault_page_request - Page Request data
+ * @flags: encodes whether the corresponding fields are valid and whether this
+ * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
+ * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
+ * must have the same PASID value as the page request. When it is clear,
+ * the page response should not have a PASID.
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
+ * @addr: page address
+ * @private_data: device-specific private information
+ */
+struct iommu_fault_page_request {
+#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
+#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
+#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
+#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
+ u32 flags;
+ u32 pasid;
+ u32 grpid;
+ u32 perm;
+ u64 addr;
+ u64 private_data[2];
+};
+
+/**
+ * struct iommu_fault - Generic fault data
+ * @type: fault type from &enum iommu_fault_type
+ * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
+ */
+struct iommu_fault {
+ u32 type;
+ struct iommu_fault_page_request prm;
+};
+
+/**
+ * enum iommu_page_response_code - Return status of fault handlers
+ * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
+ * populated, retry the access. This is "Success" in PCI PRI.
+ * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
+ * this device if possible. This is "Response Failure" in PCI PRI.
+ * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
+ * access. This is "Invalid Request" in PCI PRI.
+ */
+enum iommu_page_response_code {
+ IOMMU_PAGE_RESP_SUCCESS = 0,
+ IOMMU_PAGE_RESP_INVALID,
+ IOMMU_PAGE_RESP_FAILURE,
+};
+
+/**
+ * struct iommu_page_response - Generic page response information
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @code: response code from &enum iommu_page_response_code
+ */
+struct iommu_page_response {
+ u32 pasid;
+ u32 grpid;
+ u32 code;
+};
+
+struct iopf_fault {
+ struct iommu_fault fault;
+ /* node for pending lists */
+ struct list_head list;
+};
+
+struct iopf_group {
+ struct iopf_fault last_fault;
+ struct list_head faults;
+ /* list node for iommu_fault_param::faults */
+ struct list_head pending_node;
+ struct work_struct work;
+ struct iommu_domain *domain;
+ /* The device's fault data parameter. */
+ struct iommu_fault_param *fault_param;
+};
+
+/**
+ * struct iopf_queue - IO Page Fault queue
+ * @wq: the fault workqueue
+ * @devices: devices attached to this queue
+ * @lock: protects the device list
+ */
+struct iopf_queue {
+ struct workqueue_struct *wq;
+ struct list_head devices;
+ struct mutex lock;
+};
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
@@ -47,7 +151,6 @@ struct iommu_fault_event;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
-typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */
@@ -60,7 +163,15 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
+
+#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
+#define __IOMMU_DOMAIN_PLATFORM (1U << 5)
+#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
+ on a stage-2 translation */
+
+#define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
/*
* This are the possible domain-types
*
@@ -72,53 +183,73 @@ struct iommu_domain_geometry {
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
+ * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
+ * invalidation.
+ * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
+ * represented by mm_struct's.
+ * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
+ * dma_api stuff. Do not use in new drivers.
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
+#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API | \
+ __IOMMU_DOMAIN_DMA_FQ)
+#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
+#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
+#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
struct iommu_domain {
unsigned type;
- const struct iommu_ops *ops;
+ const struct iommu_domain_ops *ops;
+ const struct iommu_dirty_ops *dirty_ops;
+ const struct iommu_ops *owner; /* Whose domain_alloc we came from */
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
- iommu_fault_handler_t handler;
- void *handler_token;
struct iommu_domain_geometry geometry;
- void *iova_cookie;
+ struct iommu_dma_cookie *iova_cookie;
+ int (*iopf_handler)(struct iopf_group *group);
+ void *fault_data;
+ union {
+ struct {
+ iommu_fault_handler_t handler;
+ void *handler_token;
+ };
+ struct { /* IOMMU_DOMAIN_SVA */
+ struct mm_struct *mm;
+ int users;
+ /*
+ * Next iommu_domain in mm->iommu_mm->sva-domains list
+ * protected by iommu_sva_lock.
+ */
+ struct list_head next;
+ };
+ };
};
+static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
+{
+ return domain->type & __IOMMU_DOMAIN_DMA_API;
+}
+
enum iommu_cap {
- IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
- transactions */
- IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
+ IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
-};
-
-/*
- * Following constraints are specifc to FSL_PAMUV1:
- * -aperture must be power of 2, and naturally aligned
- * -number of windows must be power of 2, and address space size
- * of each window is determined by aperture size / # of windows
- * -the actual size of the mapped region of a window must be power
- * of 2 starting with 4KB and physical address must be naturally
- * aligned.
- * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
- * The caller can invoke iommu_domain_get_attr to check if the underlying
- * iommu implementation supports these constraints.
- */
-
-enum iommu_attr {
- DOMAIN_ATTR_GEOMETRY,
- DOMAIN_ATTR_PAGING,
- DOMAIN_ATTR_WINDOWS,
- DOMAIN_ATTR_FSL_PAMU_STASH,
- DOMAIN_ATTR_FSL_PAMU_ENABLE,
- DOMAIN_ATTR_FSL_PAMUV1,
- DOMAIN_ATTR_NESTING, /* two stages of translation */
- DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
- DOMAIN_ATTR_MAX,
+ IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
+ DMA protection and we should too */
+ /*
+ * Per-device flag indicating if enforce_cache_coherency() will work on
+ * this device.
+ */
+ IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
+ /*
+ * IOMMU driver does not issue TLB maintenance during .unmap, so can
+ * usefully support the non-strict DMA flush queue.
+ */
+ IOMMU_CAP_DEFERRED_FLUSH,
+ IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
};
/* These are the possible reserved region types */
@@ -146,6 +277,7 @@ enum iommu_resv_type {
* @length: Length of the region in bytes
* @prot: IOMMU Protection flags (READ/WRITE/...)
* @type: Type of the reserved region
+ * @free: Callback to free associated memory allocations
*/
struct iommu_resv_region {
struct list_head list;
@@ -153,15 +285,38 @@ struct iommu_resv_region {
size_t length;
int prot;
enum iommu_resv_type type;
+ void (*free)(struct device *dev, struct iommu_resv_region *region);
+};
+
+struct iommu_iort_rmr_data {
+ struct iommu_resv_region rr;
+
+ /* Stream IDs associated with IORT RMR entry */
+ const u32 *sids;
+ u32 num_sids;
};
-/* Per device IOMMU features */
+/**
+ * enum iommu_dev_features - Per device IOMMU features
+ * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
+ * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
+ * enabling %IOMMU_DEV_FEAT_SVA requires
+ * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
+ * Faults themselves instead of relying on the IOMMU. When
+ * supported, this feature must be enabled before and
+ * disabled after %IOMMU_DEV_FEAT_SVA.
+ *
+ * Device drivers enable a feature using iommu_dev_enable_feature().
+ */
enum iommu_dev_features {
- IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
- IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
+ IOMMU_DEV_FEAT_SVA,
+ IOMMU_DEV_FEAT_IOPF,
};
+#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
+#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
#define IOMMU_PASID_INVALID (-1U)
+typedef unsigned int ioasid_t;
#ifdef CONFIG_IOMMU_API
@@ -169,139 +324,338 @@ enum iommu_dev_features {
* struct iommu_iotlb_gather - Range information for a pending IOTLB flush
*
* @start: IOVA representing the start of the range to be flushed
- * @end: IOVA representing the end of the range to be flushed (exclusive)
+ * @end: IOVA representing the end of the range to be flushed (inclusive)
* @pgsize: The interval at which to perform the flush
+ * @freelist: Removed pages to free after sync
+ * @queued: Indicates that the flush will be queued
*
* This structure is intended to be updated by multiple calls to the
* ->unmap() function in struct iommu_ops before eventually being passed
- * into ->iotlb_sync().
+ * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
+ * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
+ * them. @queued is set to indicate when ->iotlb_flush_all() will be called
+ * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
*/
struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
+ struct list_head freelist;
+ bool queued;
};
/**
+ * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
+ * @bitmap: IOVA bitmap
+ * @gather: Range information for a pending IOTLB flush
+ */
+struct iommu_dirty_bitmap {
+ struct iova_bitmap *bitmap;
+ struct iommu_iotlb_gather *gather;
+};
+
+/* Read but do not clear any dirty bits */
+#define IOMMU_DIRTY_NO_CLEAR (1 << 0)
+
+/**
+ * struct iommu_dirty_ops - domain specific dirty tracking operations
+ * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
+ * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
+ * into a bitmap, with a bit represented as a page.
+ * Reads the dirty PTE bits and clears it from IO
+ * pagetables.
+ */
+struct iommu_dirty_ops {
+ int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
+ int (*read_and_clear_dirty)(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
+};
+
+/**
+ * struct iommu_user_data - iommu driver specific user space data info
+ * @type: The data type of the user buffer
+ * @uptr: Pointer to the user buffer for copy_from_user()
+ * @len: The length of the user buffer in bytes
+ *
+ * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
+ * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
+ */
+struct iommu_user_data {
+ unsigned int type;
+ void __user *uptr;
+ size_t len;
+};
+
+/**
+ * struct iommu_user_data_array - iommu driver specific user space data array
+ * @type: The data type of all the entries in the user buffer array
+ * @uptr: Pointer to the user buffer array
+ * @entry_len: The fixed-width length of an entry in the array, in bytes
+ * @entry_num: The number of total entries in the array
+ *
+ * The user buffer includes an array of requests with format defined in
+ * include/uapi/linux/iommufd.h
+ */
+struct iommu_user_data_array {
+ unsigned int type;
+ void __user *uptr;
+ size_t entry_len;
+ u32 entry_num;
+};
+
+/**
+ * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @dst_data. Must match with @src_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user(
+ void *dst_data, const struct iommu_user_data *src_data,
+ unsigned int data_type, size_t data_len, size_t min_len)
+{
+ if (src_data->type != data_type)
+ return -EINVAL;
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (src_data->len < min_len || data_len < src_data->len)
+ return -EINVAL;
+ return copy_struct_from_user(dst_data, data_len, src_data->uptr,
+ src_data->len);
+}
+
+/**
+ * iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @kdst. Must match with @user_data->type
+ * @min_last: The last memember of the data structure @kdst points in the
+ * initial version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
+ __iommu_copy_struct_from_user(kdst, user_data, data_type, \
+ sizeof(*kdst), \
+ offsetofend(typeof(*kdst), min_last))
+
+/**
+ * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_array: Pointer to a struct iommu_user_data_array for a user space array
+ * @data_type: The data type of the @dst_data. Must match with @src_array.type
+ * @index: Index to the location in the array to copy user data from
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user_array(
+ void *dst_data, const struct iommu_user_data_array *src_array,
+ unsigned int data_type, unsigned int index, size_t data_len,
+ size_t min_len)
+{
+ struct iommu_user_data src_data;
+
+ if (WARN_ON(!src_array || index >= src_array->entry_num))
+ return -EINVAL;
+ if (!src_array->entry_num)
+ return -EINVAL;
+ src_data.uptr = src_array->uptr + src_array->entry_len * index;
+ src_data.len = src_array->entry_len;
+ src_data.type = src_array->type;
+
+ return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
+ data_len, min_len);
+}
+
+/**
+ * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ * array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ * @index: Index to the location in the array to copy user data from
+ * @min_last: The last member of the data structure @kdst points in the
+ * initial version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
+ min_last) \
+ __iommu_copy_struct_from_user_array( \
+ kdst, user_array, data_type, index, sizeof(*(kdst)), \
+ offsetofend(typeof(*(kdst)), min_last))
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
- * @domain_alloc: allocate iommu domain
- * @domain_free: free iommu domain
- * @attach_dev: attach device to an iommu domain
- * @detach_dev: detach device from an iommu domain
- * @map: map a physically contiguous memory region to an iommu domain
- * @unmap: unmap a physically contiguous memory region from an iommu domain
- * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
- * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
- * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
- * queue
- * @iova_to_phys: translate iova to physical address
+ * @hw_info: report iommu hardware information. The data buffer returned by this
+ * op is allocated in the iommu driver and freed by the caller after
+ * use. The information type is one of enum iommu_hw_info_type defined
+ * in include/uapi/linux/iommufd.h.
+ * @domain_alloc: allocate and return an iommu domain if success. Otherwise
+ * NULL is returned. The domain is not fully initialized until
+ * the caller iommu_domain_alloc() returns.
+ * @domain_alloc_user: Allocate an iommu domain corresponding to the input
+ * parameters as defined in include/uapi/linux/iommufd.h.
+ * Unlike @domain_alloc, it is called only by IOMMUFD and
+ * must fully initialize the new domain before return.
+ * Upon success, if the @user_data is valid and the @parent
+ * points to a kernel-managed domain, the new domain must be
+ * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be
+ * NULL while the @user_data can be optionally provided, the
+ * new domain must support __IOMMU_DOMAIN_PAGING.
+ * Upon failure, ERR_PTR must be returned.
+ * @domain_alloc_paging: Allocate an iommu_domain that can be used for
+ * UNMANAGED, DMA, and DMA_FQ domain types.
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
* group and attached to the groups domain
* @device_group: find iommu group for a particular device
- * @domain_get_attr: Query domain attributes
- * @domain_set_attr: Change domain attributes
* @get_resv_regions: Request list of reserved regions for a device
- * @put_resv_regions: Free list of reserved regions for a device
- * @apply_resv_region: Temporary helper call-back for iova reserved ranges
- * @domain_window_enable: Configure and enable a particular window for a domain
- * @domain_window_disable: Disable a particular window for a domain
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
- * @dev_has/enable/disable_feat: per device entries to check/enable/disable
+ * @dev_enable/disable_feat: per device entries to enable/disable
* iommu specific features.
- * @dev_feat_enabled: check enabled feature
- * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
- * @aux_get_pasid: get the pasid given an aux-domain
- * @sva_bind: Bind process address space to device
- * @sva_unbind: Unbind process address space from device
- * @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
- * @cache_invalidate: invalidate translation caches
- * @sva_bind_gpasid: bind guest pasid and mm
- * @sva_unbind_gpasid: unbind guest pasid and mm
* @def_domain_type: device default domain type, return value:
* - IOMMU_DOMAIN_IDENTITY: must use an identity domain
* - IOMMU_DOMAIN_DMA: must use a dma domain
* - 0: use the default setting
+ * @default_domain_ops: the default ops for domains
+ * @remove_dev_pasid: Remove any translation configurations of a specific
+ * pasid, so that any DMA transactions with this pasid
+ * will be blocked by the hardware.
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
+ * @identity_domain: An always available, always attachable identity
+ * translation.
+ * @blocked_domain: An always available, always attachable blocking
+ * translation.
+ * @default_domain: If not NULL this will always be set as the default domain.
+ * This should be an IDENTITY/BLOCKED/PLATFORM domain.
+ * Do not use in new drivers.
*/
struct iommu_ops {
- bool (*capable)(enum iommu_cap);
+ bool (*capable)(struct device *dev, enum iommu_cap);
+ void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
- void (*domain_free)(struct iommu_domain *);
+ struct iommu_domain *(*domain_alloc_user)(
+ struct device *dev, u32 flags, struct iommu_domain *parent,
+ const struct iommu_user_data *user_data);
+ struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
- int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
- void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
- int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
- size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *iotlb_gather);
- void (*flush_iotlb_all)(struct iommu_domain *domain);
- void (*iotlb_sync_map)(struct iommu_domain *domain);
- void (*iotlb_sync)(struct iommu_domain *domain,
- struct iommu_iotlb_gather *iotlb_gather);
- phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
void (*probe_finalize)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
- int (*domain_get_attr)(struct iommu_domain *domain,
- enum iommu_attr attr, void *data);
- int (*domain_set_attr)(struct iommu_domain *domain,
- enum iommu_attr attr, void *data);
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
- void (*put_resv_regions)(struct device *dev, struct list_head *list);
- void (*apply_resv_region)(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region);
- /* Window handling functions */
- int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size, int prot);
- void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
-
- int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
- bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
+ int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
+ bool (*is_attach_deferred)(struct device *dev);
/* Per device IOMMU features */
- bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
- bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
- /* Aux-domain specific attach/detach entries */
- int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
- void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
- int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
-
- struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
- void *drvdata);
- void (*sva_unbind)(struct iommu_sva *handle);
- int (*sva_get_pasid)(struct iommu_sva *handle);
-
- int (*page_response)(struct device *dev,
- struct iommu_fault_event *evt,
- struct iommu_page_response *msg);
- int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
- struct iommu_cache_invalidate_info *inv_info);
- int (*sva_bind_gpasid)(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data);
-
- int (*sva_unbind_gpasid)(struct device *dev, int pasid);
+ void (*page_response)(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg);
int (*def_domain_type)(struct device *dev);
+ void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
+ const struct iommu_domain_ops *default_domain_ops;
unsigned long pgsize_bitmap;
struct module *owner;
+ struct iommu_domain *identity_domain;
+ struct iommu_domain *blocked_domain;
+ struct iommu_domain *release_domain;
+ struct iommu_domain *default_domain;
+};
+
+/**
+ * struct iommu_domain_ops - domain specific operations
+ * @attach_dev: attach an iommu domain to a device
+ * Return:
+ * * 0 - success
+ * * EINVAL - can indicate that device and domain are incompatible due to
+ * some previous configuration of the domain, in which case the
+ * driver shouldn't log an error, since it is legitimate for a
+ * caller to test reuse of existing domains. Otherwise, it may
+ * still represent some other fundamental problem
+ * * ENOMEM - out of memory
+ * * ENOSPC - non-ENOMEM type of resource allocation failures
+ * * EBUSY - device is attached to a domain and cannot be changed
+ * * ENODEV - device specific errors, not able to be attached
+ * * <others> - treated as ENODEV by the caller. Use is discouraged
+ * @set_dev_pasid: set an iommu domain to a pasid of device
+ * @map_pages: map a physically contiguous set of pages of the same size to
+ * an iommu domain.
+ * @unmap_pages: unmap a number of pages of the same size from an iommu domain
+ * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
+ * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
+ * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
+ * queue
+ * @cache_invalidate_user: Flush hardware cache for user space IO page table.
+ * The @domain must be IOMMU_DOMAIN_NESTED. The @array
+ * passes in the cache invalidation requests, in form
+ * of a driver data structure. The driver must update
+ * array->entry_num to report the number of handled
+ * invalidation requests. The driver data structure
+ * must be defined in include/uapi/linux/iommufd.h
+ * @iova_to_phys: translate iova to physical address
+ * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
+ * including no-snoop TLPs on PCIe or other platform
+ * specific mechanisms.
+ * @enable_nesting: Enable nesting
+ * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
+ * @free: Release the domain after use.
+ */
+struct iommu_domain_ops {
+ int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
+ int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid);
+
+ int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+ size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
+ int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
+ void (*iotlb_sync)(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather);
+ int (*cache_invalidate_user)(struct iommu_domain *domain,
+ struct iommu_user_data_array *array);
+
+ phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
+ dma_addr_t iova);
+
+ bool (*enforce_cache_coherency)(struct iommu_domain *domain);
+ int (*enable_nesting)(struct iommu_domain *domain);
+ int (*set_pgtable_quirks)(struct iommu_domain *domain,
+ unsigned long quirks);
+
+ void (*free)(struct iommu_domain *domain);
};
/**
@@ -310,40 +664,41 @@ struct iommu_ops {
* @list: Used by the iommu-core to keep a list of registered iommus
* @ops: iommu-ops for talking to this iommu
* @dev: struct device for sysfs handling
+ * @singleton_group: Used internally for drivers that have only one group
+ * @max_pasids: number of supported PASIDs
*/
struct iommu_device {
struct list_head list;
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
struct device *dev;
-};
-
-/**
- * struct iommu_fault_event - Generic fault event
- *
- * Can represent recoverable faults such as a page requests or
- * unrecoverable faults such as DMA or IRQ remapping faults.
- *
- * @fault: fault descriptor
- * @list: pending fault event list, used for tracking responses
- */
-struct iommu_fault_event {
- struct iommu_fault fault;
- struct list_head list;
+ struct iommu_group *singleton_group;
+ u32 max_pasids;
};
/**
* struct iommu_fault_param - per-device IOMMU fault data
- * @handler: Callback function to handle IOMMU faults at device level
- * @data: handler private data
- * @faults: holds the pending faults which needs response
* @lock: protect pending faults list
+ * @users: user counter to manage the lifetime of the data
+ * @rcu: rcu head for kfree_rcu()
+ * @dev: the device that owns this param
+ * @queue: IOPF queue
+ * @queue_list: index into queue->devices
+ * @partial: faults that are part of a Page Request Group for which the last
+ * request hasn't been submitted yet.
+ * @faults: holds the pending faults which need response
*/
struct iommu_fault_param {
- iommu_dev_fault_handler_t handler;
- void *data;
- struct list_head faults;
struct mutex lock;
+ refcount_t users;
+ struct rcu_head rcu;
+
+ struct device *dev;
+ struct iopf_queue *queue;
+ struct list_head queue_list;
+
+ struct list_head partial;
+ struct list_head faults;
};
/**
@@ -353,19 +708,31 @@ struct iommu_fault_param {
* @fwspec: IOMMU fwspec data
* @iommu_dev: IOMMU device this device is linked to
* @priv: IOMMU Driver private data
+ * @max_pasids: number of PASIDs this device can consume
+ * @attach_deferred: the dma domain attachment is deferred
+ * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
+ * @require_direct: device requires IOMMU_RESV_DIRECT regions
+ * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
*/
struct dev_iommu {
struct mutex lock;
- struct iommu_fault_param *fault_param;
+ struct iommu_fault_param __rcu *fault_param;
struct iommu_fwspec *fwspec;
struct iommu_device *iommu_dev;
void *priv;
+ u32 max_pasids;
+ u32 attach_deferred:1;
+ u32 pci_32bit_workaround:1;
+ u32 require_direct:1;
+ u32 shadow_on_flush:1;
};
-int iommu_device_register(struct iommu_device *iommu);
+int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops,
+ struct device *hwdev);
void iommu_device_unregister(struct iommu_device *iommu);
int iommu_device_sysfs_add(struct iommu_device *iommu,
struct device *parent,
@@ -374,93 +741,73 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
void iommu_device_sysfs_remove(struct iommu_device *iommu);
int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
+int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
-static inline void __iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
- iommu->ops = ops;
+ return (struct iommu_device *)dev_get_drvdata(dev);
}
-#define iommu_device_set_ops(iommu, ops) \
-do { \
- struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
- __ops->owner = THIS_MODULE; \
- __iommu_device_set_ops(iommu, __ops); \
-} while (0)
-
-static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
- struct fwnode_handle *fwnode)
+/**
+ * iommu_get_iommu_dev - Get iommu_device for a device
+ * @dev: an end-point device
+ *
+ * Note that this function must be called from the iommu_ops
+ * to retrieve the iommu_device for a device, which the core code
+ * guarentees it will not invoke the op without an attached iommu.
+ */
+static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
{
- iommu->fwnode = fwnode;
+ return dev->iommu->iommu_dev;
}
-static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
-{
- return (struct iommu_device *)dev_get_drvdata(dev);
-}
+#define iommu_get_iommu_dev(dev, type, member) \
+ container_of(__iommu_get_iommu_dev(dev), type, member)
static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
*gather = (struct iommu_iotlb_gather) {
.start = ULONG_MAX,
+ .freelist = LIST_HEAD_INIT(gather->freelist),
};
}
-#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
-#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
-#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
-#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
-#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
-#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
-
-extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
-extern int bus_iommu_probe(struct bus_type *bus);
-extern bool iommu_present(struct bus_type *bus);
-extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
-extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
-extern struct iommu_group *iommu_group_get_by_id(int id);
+extern int bus_iommu_probe(const struct bus_type *bus);
+extern bool iommu_present(const struct bus_type *bus);
+extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
+extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
+extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
-extern int iommu_cache_invalidate(struct iommu_domain *domain,
- struct device *dev,
- struct iommu_cache_invalidate_info *inv_info);
-extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data);
extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid);
+ struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
-extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather);
-extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg,unsigned int nents, int prot);
-extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot);
+extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot, gfp_t gfp);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
-extern void generic_iommu_put_resv_regions(struct device *dev,
- struct list_head *list);
extern void iommu_set_default_passthrough(bool cmd_line);
extern void iommu_set_default_translated(bool cmd_line);
extern bool iommu_default_passthrough(void);
extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
- enum iommu_resv_type type);
+ enum iommu_resv_type type, gfp_t gfp);
extern int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head);
@@ -482,45 +829,26 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
extern struct iommu_group *iommu_group_get(struct device *dev);
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
-extern int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb);
-extern int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb);
-extern int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data);
-
-extern int iommu_unregister_device_fault_handler(struct device *dev);
-
-extern int iommu_report_device_fault(struct device *dev,
- struct iommu_fault_event *evt);
-extern int iommu_page_response(struct device *dev,
- struct iommu_page_response *msg);
extern int iommu_group_id(struct iommu_group *group);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
-extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
- void *data);
-extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
- void *data);
+int iommu_enable_nesting(struct iommu_domain *domain);
+int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks);
-/* Window handling function prototypes */
-extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t offset, u64 size,
- int prot);
-extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
+void iommu_set_dma_strict(void);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
if (domain->ops->flush_iotlb_all)
domain->ops->flush_iotlb_all(domain);
}
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
if (domain->ops->iotlb_sync)
@@ -529,29 +857,102 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain,
iommu_iotlb_gather_init(iotlb_gather);
}
+/**
+ * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
+ *
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to check whether a new range and the gathered range
+ * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
+ * than merging the two, which might lead to unnecessary invalidations.
+ */
+static inline
+bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long start = iova, end = start + size - 1;
+
+ return gather->end != 0 &&
+ (end + 1 < gather->start || start > gather->end + 1);
+}
+
+
+/**
+ * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
+ * where only the address range matters, and simply minimising intermediate
+ * syncs is preferred.
+ */
+static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long end = iova + size - 1;
+
+ if (gather->start > iova)
+ gather->start = iova;
+ if (gather->end < end)
+ gather->end = end;
+}
+
+/**
+ * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
+ * @domain: IOMMU domain to be invalidated
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build invalidation commands based on individual
+ * pages, or with page size/table level hints which cannot be gathered if they
+ * differ.
+ */
static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
- unsigned long start = iova, end = start + size;
-
/*
* If the new page is disjoint from the current range or is mapped at
* a different granularity, then sync the TLB so that the gather
* structure can be rewritten.
*/
- if (gather->pgsize != size ||
- end < gather->start || start > gather->end) {
- if (gather->pgsize)
- iommu_tlb_sync(domain, gather);
- gather->pgsize = size;
- }
+ if ((gather->pgsize && gather->pgsize != size) ||
+ iommu_iotlb_gather_is_disjoint(gather, iova, size))
+ iommu_iotlb_sync(domain, gather);
- if (gather->end < end)
- gather->end = end;
+ gather->pgsize = size;
+ iommu_iotlb_gather_add_range(gather, iova, size);
+}
- if (gather->start > start)
- gather->start = start;
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
+{
+ return gather && gather->queued;
+}
+
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
+{
+ if (gather)
+ iommu_iotlb_gather_init(gather);
+
+ dirty->bitmap = bitmap;
+ dirty->gather = gather;
+}
+
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
+{
+ if (dirty->bitmap)
+ iova_bitmap_set(dirty->bitmap, iova, length);
+
+ if (dirty->gather)
+ iommu_iotlb_gather_add_range(dirty->gather, iova, length);
}
/* PCI device grouping function */
@@ -560,21 +961,24 @@ extern struct iommu_group *pci_device_group(struct device *dev);
extern struct iommu_group *generic_device_group(struct device *dev);
/* FSL-MC device grouping function */
struct iommu_group *fsl_mc_device_group(struct device *dev);
+extern struct iommu_group *generic_single_device_group(struct device *dev);
/**
* struct iommu_fwspec - per-device IOMMU instance data
* @ops: ops for this device's IOMMU
* @iommu_fwnode: firmware handle for this device's IOMMU
- * @iommu_priv: IOMMU driver private data for this device
- * @num_pasid_bits: number of PASID bits supported by this device
+ * @flags: IOMMU_FWSPEC_* flags
* @num_ids: number of associated device IDs
* @ids: IDs which this device may present to the IOMMU
+ *
+ * Note that the IDs (and any other information, really) stored in this structure should be
+ * considered private to the IOMMU device driver and are not to be used directly by IOMMU
+ * consumers.
*/
struct iommu_fwspec {
const struct iommu_ops *ops;
struct fwnode_handle *iommu_fwnode;
u32 flags;
- u32 num_pasid_bits;
unsigned int num_ids;
u32 ids[];
};
@@ -587,13 +991,22 @@ struct iommu_fwspec {
*/
struct iommu_sva {
struct device *dev;
+ struct iommu_domain *domain;
+ struct list_head handle_item;
+ refcount_t users;
+};
+
+struct iommu_mm_data {
+ u32 pasid;
+ struct list_head sva_domains;
+ struct list_head sva_handles;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
-int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
+int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
@@ -611,31 +1024,39 @@ static inline void dev_iommu_fwspec_set(struct device *dev,
static inline void *dev_iommu_priv_get(struct device *dev)
{
- return dev->iommu->priv;
+ if (dev->iommu)
+ return dev->iommu->priv;
+ else
+ return NULL;
}
-static inline void dev_iommu_priv_set(struct device *dev, void *priv)
-{
- dev->iommu->priv = priv;
-}
+void dev_iommu_priv_set(struct device *dev, void *priv);
+extern struct mutex iommu_probe_device_lock;
int iommu_probe_device(struct device *dev);
-void iommu_release_device(struct device *dev);
-bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
-int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
-void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
-int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
-
-struct iommu_sva *iommu_sva_bind_device(struct device *dev,
- struct mm_struct *mm,
- void *drvdata);
-void iommu_sva_unbind_device(struct iommu_sva *handle);
-int iommu_sva_get_pasid(struct iommu_sva *handle);
+int iommu_device_use_default_domain(struct device *dev);
+void iommu_device_unuse_default_domain(struct device *dev);
+
+int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
+void iommu_group_release_dma_owner(struct iommu_group *group);
+bool iommu_group_dma_owner_claimed(struct iommu_group *group);
+
+int iommu_device_claim_dma_owner(struct device *dev, void *owner);
+void iommu_device_release_dma_owner(struct device *dev);
+
+int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
+ unsigned int type);
+ioasid_t iommu_alloc_global_pasid(struct device *dev);
+void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -644,23 +1065,20 @@ struct iommu_fwspec {};
struct iommu_device {};
struct iommu_fault_param {};
struct iommu_iotlb_gather {};
+struct iommu_dirty_bitmap {};
+struct iommu_dirty_ops {};
-static inline bool iommu_present(struct bus_type *bus)
+static inline bool iommu_present(const struct bus_type *bus)
{
return false;
}
-static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
+static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{
return false;
}
-static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
-{
- return NULL;
-}
-
-static inline struct iommu_group *iommu_group_get_by_id(int id)
+static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
return NULL;
}
@@ -686,14 +1104,7 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
}
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- return -ENODEV;
-}
-
-static inline int iommu_map_atomic(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
return -ENODEV;
}
@@ -711,41 +1122,22 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
return 0;
}
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
+static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot, gfp_t gfp)
{
- return 0;
-}
-
-static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
-{
- return 0;
+ return -ENODEV;
}
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
}
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
}
-static inline int iommu_domain_window_enable(struct iommu_domain *domain,
- u32 wnd_nr, phys_addr_t paddr,
- u64 size, int prot)
-{
- return -ENODEV;
-}
-
-static inline void iommu_domain_window_disable(struct iommu_domain *domain,
- u32 wnd_nr)
-{
-}
-
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
return 0;
@@ -844,87 +1236,53 @@ static inline void iommu_group_put(struct iommu_group *group)
{
}
-static inline int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
-static inline int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline
-int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data)
+static inline int iommu_group_id(struct iommu_group *group)
{
return -ENODEV;
}
-static inline int iommu_unregister_device_fault_handler(struct device *dev)
+static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks)
{
return 0;
}
-static inline
-int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
-{
- return -ENODEV;
-}
-
-static inline int iommu_page_response(struct device *dev,
- struct iommu_page_response *msg)
+static inline int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops,
+ struct device *hwdev)
{
return -ENODEV;
}
-static inline int iommu_group_id(struct iommu_group *group)
-{
- return -ENODEV;
-}
-
-static inline int iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
-{
- return -EINVAL;
-}
-
-static inline int iommu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
-{
- return -EINVAL;
-}
-
-static inline int iommu_device_register(struct iommu_device *iommu)
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
- return -ENODEV;
+ return NULL;
}
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
+static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
}
-static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
- struct fwnode_handle *fwnode)
+static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
{
}
-static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
{
- return NULL;
+ return false;
}
-static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
{
}
-static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
- struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t size)
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
{
}
@@ -971,23 +1329,11 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
}
static inline
-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool
-iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
-{
- return false;
-}
-
-static inline bool
-iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
-{
- return false;
-}
-
static inline int
iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
@@ -1000,63 +1346,78 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
return -ENODEV;
}
-static inline int
-iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
- return -ENODEV;
+ return NULL;
}
-static inline void
-iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
+static inline int iommu_device_use_default_domain(struct device *dev)
+{
+ return 0;
+}
+
+static inline void iommu_device_unuse_default_domain(struct device *dev)
{
}
static inline int
-iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
{
return -ENODEV;
}
-static inline struct iommu_sva *
-iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
+static inline void iommu_group_release_dma_owner(struct iommu_group *group)
{
- return NULL;
}
-static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
{
+ return false;
}
-static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
+static inline void iommu_device_release_dma_owner(struct device *dev)
{
- return IOMMU_PASID_INVALID;
}
-static inline int
-iommu_cache_invalidate(struct iommu_domain *domain,
- struct device *dev,
- struct iommu_cache_invalidate_info *inv_info)
+static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
{
return -ENODEV;
}
-static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data)
+
+static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
{
return -ENODEV;
}
-static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev, int pasid)
+static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
{
- return -ENODEV;
}
-static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
+static inline struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
+ unsigned int type)
{
return NULL;
}
+
+static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void iommu_free_global_pasid(ioasid_t pasid) {}
#endif /* CONFIG_IOMMU_API */
+#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
+void iommu_group_mutex_assert(struct device *dev);
+#else
+static inline void iommu_group_mutex_assert(struct device *dev)
+{
+}
+#endif
+
/**
* iommu_map_sgtable - Map the given buffer to the IOMMU domain
* @domain: The IOMMU domain to perform the mapping
@@ -1067,10 +1428,11 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
* Creates a mapping at @iova for the buffer described by a scatterlist
* stored in the given sg_table object in the provided IOMMU domain.
*/
-static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot)
{
- return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
+ GFP_KERNEL);
}
#ifdef CONFIG_IOMMU_DEBUGFS
@@ -1080,4 +1442,183 @@ void iommu_debugfs_setup(void);
static inline void iommu_debugfs_setup(void) {}
#endif
+#ifdef CONFIG_IOMMU_DMA
+#include <linux/msi.h>
+
+/* Setup call for arch DMA mapping code */
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
+
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
+
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
+void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
+
+#else /* CONFIG_IOMMU_DMA */
+
+struct msi_desc;
+struct msi_msg;
+
+static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
+{
+}
+
+static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+{
+ return 0;
+}
+
+static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+}
+
+#endif /* CONFIG_IOMMU_DMA */
+
+/*
+ * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
+ * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
+ * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
+ */
+#define TEGRA_STREAM_ID_BYPASS 0x7f
+
+static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
+{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (fwspec && fwspec->num_ids == 1) {
+ *stream_id = fwspec->ids[0] & 0xffff;
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+#ifdef CONFIG_IOMMU_MM_DATA
+static inline void mm_pasid_init(struct mm_struct *mm)
+{
+ /*
+ * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
+ * the new mm and the old one point to a same iommu_mm instance. When either
+ * one of the two mms gets released, the iommu_mm instance is freed, leaving
+ * the other mm running into a use-after-free/double-free problem. To avoid
+ * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
+ */
+ mm->iommu_mm = NULL;
+}
+
+static inline bool mm_valid_pasid(struct mm_struct *mm)
+{
+ return READ_ONCE(mm->iommu_mm);
+}
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+ struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
+
+ if (!iommu_mm)
+ return IOMMU_PASID_INVALID;
+ return iommu_mm->pasid;
+}
+
+void mm_pasid_drop(struct mm_struct *mm);
+struct iommu_sva *iommu_sva_bind_device(struct device *dev,
+ struct mm_struct *mm);
+void iommu_sva_unbind_device(struct iommu_sva *handle);
+u32 iommu_sva_get_pasid(struct iommu_sva *handle);
+struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
+ struct mm_struct *mm);
+#else
+static inline struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+{
+ return NULL;
+}
+
+static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+}
+
+static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+ return IOMMU_PASID_INVALID;
+}
+static inline void mm_pasid_init(struct mm_struct *mm) {}
+static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void mm_pasid_drop(struct mm_struct *mm) {}
+
+static inline struct iommu_domain *
+iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
+{
+ return NULL;
+}
+#endif /* CONFIG_IOMMU_SVA */
+
+#ifdef CONFIG_IOMMU_IOPF
+int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
+void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
+int iopf_queue_flush_dev(struct device *dev);
+struct iopf_queue *iopf_queue_alloc(const char *name);
+void iopf_queue_free(struct iopf_queue *queue);
+int iopf_queue_discard_partial(struct iopf_queue *queue);
+void iopf_free_group(struct iopf_group *group);
+void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
+void iopf_group_response(struct iopf_group *group,
+ enum iommu_page_response_code status);
+#else
+static inline int
+iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline void
+iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+{
+}
+
+static inline int iopf_queue_flush_dev(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline struct iopf_queue *iopf_queue_alloc(const char *name)
+{
+ return NULL;
+}
+
+static inline void iopf_queue_free(struct iopf_queue *queue)
+{
+}
+
+static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
+{
+ return -ENODEV;
+}
+
+static inline void iopf_free_group(struct iopf_group *group)
+{
+}
+
+static inline void
+iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
+{
+}
+
+static inline void iopf_group_response(struct iopf_group *group,
+ enum iommu_page_response_code status)
+{
+}
+#endif /* CONFIG_IOMMU_IOPF */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
new file mode 100644
index 000000000000..ffc3a949f837
--- /dev/null
+++ b/include/linux/iommufd.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __LINUX_IOMMUFD_H
+#define __LINUX_IOMMUFD_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+struct device;
+struct iommufd_device;
+struct page;
+struct iommufd_ctx;
+struct iommufd_access;
+struct file;
+struct iommu_group;
+
+struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
+ struct device *dev, u32 *id);
+void iommufd_device_unbind(struct iommufd_device *idev);
+
+int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id);
+int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id);
+void iommufd_device_detach(struct iommufd_device *idev);
+
+struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
+u32 iommufd_device_to_id(struct iommufd_device *idev);
+
+struct iommufd_access_ops {
+ u8 needs_pin_pages : 1;
+ void (*unmap)(void *data, unsigned long iova, unsigned long length);
+};
+
+enum {
+ IOMMUFD_ACCESS_RW_READ = 0,
+ IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
+ /* Set if the caller is in a kthread then rw will use kthread_use_mm() */
+ IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
+
+ /* Only for use by selftest */
+ __IOMMUFD_ACCESS_RW_SLOW_PATH = 1 << 2,
+};
+
+struct iommufd_access *
+iommufd_access_create(struct iommufd_ctx *ictx,
+ const struct iommufd_access_ops *ops, void *data, u32 *id);
+void iommufd_access_destroy(struct iommufd_access *access);
+int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
+int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id);
+void iommufd_access_detach(struct iommufd_access *access);
+
+void iommufd_ctx_get(struct iommufd_ctx *ictx);
+
+#if IS_ENABLED(CONFIG_IOMMUFD)
+struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
+struct iommufd_ctx *iommufd_ctx_from_fd(int fd);
+void iommufd_ctx_put(struct iommufd_ctx *ictx);
+bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group);
+
+int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
+ unsigned long length, struct page **out_pages,
+ unsigned int flags);
+void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova, unsigned long length);
+int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ void *data, size_t len, unsigned int flags);
+int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
+int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx);
+int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx);
+#else /* !CONFIG_IOMMUFD */
+static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
+{
+}
+
+static inline int iommufd_access_pin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length,
+ struct page **out_pages,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length)
+{
+}
+
+static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ void *data, size_t len, unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_IOMMUFD */
+#endif
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index bc89ac625f26..19a7b00baff4 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -53,6 +53,7 @@
} \
if (__sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
+ cpu_relax(); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
@@ -60,8 +61,7 @@
/**
* read_poll_timeout_atomic - Periodically poll an address until a condition is
* met or a timeout occurs
- * @op: accessor function (takes @addr as its only argument)
- * @addr: Address to poll
+ * @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @delay_us: Time to udelay between reads in us (0 tight-loops). Should
@@ -69,10 +69,15 @@
* Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
* @delay_before_read: if it is true, delay @delay_us before read.
+ * @args: arguments for @op poll
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val.
*
+ * This macro does not rely on timekeeping. Hence it is safe to call even when
+ * timekeeping is suspended, at the expense of an underestimation of wall clock
+ * time, which is rather minimal with a non-zero delay_us.
+ *
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
@@ -80,21 +85,30 @@
delay_before_read, args...) \
({ \
u64 __timeout_us = (timeout_us); \
+ s64 __left_ns = __timeout_us * NSEC_PER_USEC; \
unsigned long __delay_us = (delay_us); \
- ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
- if (delay_before_read && __delay_us) \
+ u64 __delay_ns = __delay_us * NSEC_PER_USEC; \
+ if (delay_before_read && __delay_us) { \
udelay(__delay_us); \
+ if (__timeout_us) \
+ __left_ns -= __delay_ns; \
+ } \
for (;;) { \
(val) = op(args); \
if (cond) \
break; \
- if (__timeout_us && \
- ktime_compare(ktime_get(), __timeout) > 0) { \
+ if (__timeout_us && __left_ns < 0) { \
(val) = op(args); \
break; \
} \
- if (__delay_us) \
+ if (__delay_us) { \
udelay(__delay_us); \
+ if (__timeout_us) \
+ __left_ns -= __delay_ns; \
+ } \
+ cpu_relax(); \
+ if (__timeout_us) \
+ __left_ns--; \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 6c2b06fe8beb..db7fe25f3370 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -10,9 +10,10 @@
#define _LINUX_IOPORT_H
#ifndef __ASSEMBLY__
+#include <linux/bits.h>
#include <linux/compiler.h>
+#include <linux/minmax.h>
#include <linux/types.h>
-#include <linux/bits.h>
/*
* Resources are tree-like, allowing
* nesting etc..
@@ -58,6 +59,10 @@ struct resource {
#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
+/* IORESOURCE_SYSRAM specific bits. */
+#define IORESOURCE_SYSRAM_DRIVER_MANAGED 0x02000000 /* Always detected via a driver. */
+#define IORESOURCE_SYSRAM_MERGEABLE 0x04000000 /* Resource can be merged. */
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
@@ -74,7 +79,8 @@ struct resource {
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
-#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_WAKECAPABLE (1<<6)
/* PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
@@ -103,7 +109,7 @@ struct resource {
#define IORESOURCE_MEM_32BIT (3<<3)
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
-#define IORESOURCE_MEM_DRIVER_MANAGED (1<<7)
+#define IORESOURCE_MEM_NONPOSTED (1<<7)
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
@@ -136,6 +142,7 @@ enum {
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_RESERVED = 7,
IORES_DESC_SOFT_RESERVED = 8,
+ IORES_DESC_CXL = 9,
};
/*
@@ -148,7 +155,7 @@ enum {
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
- { \
+(struct resource) { \
.start = (_start), \
.end = (_start) + (_size) - 1, \
.name = (_name), \
@@ -166,6 +173,11 @@ enum {
#define DEFINE_RES_MEM(_start, _size) \
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
+#define DEFINE_RES_REG_NAMED(_start, _size, _name) \
+ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_REG)
+#define DEFINE_RES_REG(_start, _size) \
+ DEFINE_RES_REG_NAMED((_start), (_size), NULL)
+
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
#define DEFINE_RES_IRQ(_irq) \
@@ -217,7 +229,7 @@ static inline unsigned long resource_ext_type(const struct resource *res)
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
-static inline bool resource_contains(struct resource *r1, struct resource *r2)
+static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
@@ -226,12 +238,39 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2)
return r1->start <= r2->start && r1->end >= r2->end;
}
+/* True if any part of r1 overlaps r2 */
+static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2)
+{
+ return r1->start <= r2->end && r1->end >= r2->start;
+}
+
+static inline bool resource_intersection(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
+{
+ if (!resource_overlaps(r1, r2))
+ return false;
+ r->start = max(r1->start, r2->start);
+ r->end = min(r1->end, r2->end);
+ return true;
+}
+
+static inline bool resource_union(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
+{
+ if (!resource_overlaps(r1, r2))
+ return false;
+ r->start = min(r1->start, r2->start);
+ r->end = max(r1->end, r2->end);
+ return true;
+}
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
+#define request_mem_region_muxed(start, n, name) \
+ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_MUXED)
#define request_mem_region_exclusive(start,n,name) \
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
@@ -248,8 +287,10 @@ extern struct resource * __request_region(struct resource *,
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int release_mem_region_adjustable(struct resource *, resource_size_t,
- resource_size_t);
+extern void release_mem_region_adjustable(resource_size_t, resource_size_t);
+#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void merge_system_ram_resource(struct resource *res);
#endif
/* Wrappers for managed devices */
@@ -277,6 +318,8 @@ extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
extern bool iomem_is_exclusive(u64 addr);
+extern bool resource_is_exclusive(struct resource *resource, u64 addr,
+ resource_size_t size);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
@@ -288,25 +331,27 @@ extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
+walk_system_ram_res_rev(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
-/* True if any part of r1 overlaps r2 */
-static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
-{
- return (r1->start <= r2->end && r1->end >= r2->start);
-}
-
struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size);
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name);
+struct resource *alloc_free_mem_region(struct resource *base,
+ unsigned long size, unsigned long align, const char *name);
-#ifdef CONFIG_IO_STRICT_DEVMEM
-void revoke_devmem(struct resource *res);
-#else
-static inline void revoke_devmem(struct resource *res) { };
-#endif
+static inline void irqresource_disabled(struct resource *res, u32 irq)
+{
+ res->start = irq;
+ res->end = irq;
+ res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
+}
+
+extern struct address_space *iomem_get_mapping(void);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index e9bfe6972aed..db1249cd9692 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -6,46 +6,22 @@
#include <linux/sched/rt.h>
#include <linux/iocontext.h>
-/*
- * Gives us 8 prio classes with 13-bits of data for each class
- */
-#define IOPRIO_CLASS_SHIFT (13)
-#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
-
-#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
-#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
-#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
-
-#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
+#include <uapi/linux/ioprio.h>
/*
- * These are the io priority groups as implemented by CFQ. RT is the realtime
- * class, it always gets premium service. BE is the best-effort scheduling
- * class, the default for any process. IDLE is the idle scheduling class, it
- * is only served when no one else is using the disk.
+ * Default IO priority.
*/
-enum {
- IOPRIO_CLASS_NONE,
- IOPRIO_CLASS_RT,
- IOPRIO_CLASS_BE,
- IOPRIO_CLASS_IDLE,
-};
+#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0)
/*
- * 8 best effort priority levels are supported
+ * Check that a priority value has a valid class.
*/
-#define IOPRIO_BE_NR (8)
-
-enum {
- IOPRIO_WHO_PROCESS = 1,
- IOPRIO_WHO_PGRP,
- IOPRIO_WHO_USER,
-};
+static inline bool ioprio_valid(unsigned short ioprio)
+{
+ unsigned short class = IOPRIO_PRIO_CLASS(ioprio);
-/*
- * Fallback BE priority
- */
-#define IOPRIO_NORM (4)
+ return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE;
+}
/*
* if process has set io priority explicitly, use that. if not, convert
@@ -70,23 +46,42 @@ static inline int task_nice_ioclass(struct task_struct *task)
return IOPRIO_CLASS_BE;
}
+#ifdef CONFIG_BLOCK
/*
- * If the calling process has set an I/O priority, use that. Otherwise, return
+ * If the task has set an I/O priority, use that. Otherwise, return
* the default I/O priority.
+ *
+ * Expected to be called for current task or with task_lock() held to keep
+ * io_context stable.
*/
-static inline int get_current_ioprio(void)
+static inline int __get_task_ioprio(struct task_struct *p)
{
- struct io_context *ioc = current->io_context;
+ struct io_context *ioc = p->io_context;
+ int prio;
- if (ioc)
- return ioc->ioprio;
- return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ if (!ioc)
+ return IOPRIO_DEFAULT;
+
+ if (p != current)
+ lockdep_assert_held(&p->alloc_lock);
+
+ prio = ioc->ioprio;
+ if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
+ prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+ task_nice_ioprio(p));
+ return prio;
+}
+#else
+static inline int __get_task_ioprio(struct task_struct *p)
+{
+ return IOPRIO_DEFAULT;
}
+#endif /* CONFIG_BLOCK */
-/*
- * For inheritance, return the highest of the two given priorities
- */
-extern int ioprio_best(unsigned short aprio, unsigned short bprio);
+static inline int get_current_ioprio(void)
+{
+ return __get_task_ioprio(current);
+}
extern int set_task_ioprio(struct task_struct *task, int ioprio);
diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h
new file mode 100644
index 000000000000..f0e99fc7dd8b
--- /dev/null
+++ b/include/linux/ioremap.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IOREMAP_H
+#define _LINUX_IOREMAP_H
+
+#include <linux/kasan.h>
+#include <asm/pgtable.h>
+
+#if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP)
+/*
+ * Ioremap often, but not always uses the generic vmalloc area. E.g on
+ * Power ARCH, it could have different ioremap space.
+ */
+#ifndef IOREMAP_START
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+#endif
+static inline bool is_ioremap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)kasan_reset_tag(x);
+
+ return addr >= IOREMAP_START && addr < IOREMAP_END;
+}
+#else
+static inline bool is_ioremap_addr(const void *x)
+{
+ return false;
+}
+#endif
+
+#endif /* _LINUX_IOREMAP_H */
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
new file mode 100644
index 000000000000..4696abfd311c
--- /dev/null
+++ b/include/linux/iosys-map.h
@@ -0,0 +1,516 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer abstraction for IO/system memory
+ */
+
+#ifndef __IOSYS_MAP_H__
+#define __IOSYS_MAP_H__
+
+#include <linux/compiler_types.h>
+#include <linux/io.h>
+#include <linux/string.h>
+
+/**
+ * DOC: overview
+ *
+ * When accessing a memory region, depending on its location, users may have to
+ * access it with I/O operations or memory load/store operations. For example,
+ * copying to system memory could be done with memcpy(), copying to I/O memory
+ * would be done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ * void *vaddr = ...; // pointer to system memory
+ * memcpy(vaddr, src, len);
+ *
+ * void *vaddr_iomem = ...; // pointer to I/O memory
+ * memcpy_toio(vaddr_iomem, src, len);
+ *
+ * The user of such pointer may not have information about the mapping of that
+ * region or may want to have a single code path to handle operations on that
+ * buffer, regardless if it's located in system or IO memory. The type
+ * :c:type:`struct iosys_map <iosys_map>` and its helpers abstract that so the
+ * buffer can be passed around to other drivers or have separate duties inside
+ * the same driver for allocation, read and write operations.
+ *
+ * Open-coding access to :c:type:`struct iosys_map <iosys_map>` is considered
+ * bad style. Rather than accessing its fields directly, use one of the provided
+ * helper functions, or implement your own. For example, instances of
+ * :c:type:`struct iosys_map <iosys_map>` can be initialized statically with
+ * IOSYS_MAP_INIT_VADDR(), or at runtime with iosys_map_set_vaddr(). These
+ * helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr(&map, 0xdeadbeaf);
+ *
+ * To set an address in I/O memory, use IOSYS_MAP_INIT_VADDR_IOMEM() or
+ * iosys_map_set_vaddr_iomem().
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf);
+ *
+ * Instances of struct iosys_map do not have to be cleaned up, but
+ * can be cleared to NULL with iosys_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * .. code-block:: c
+ *
+ * iosys_map_clear(&map);
+ *
+ * Test if a mapping is valid with either iosys_map_is_set() or
+ * iosys_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ * if (iosys_map_is_set(&map) != iosys_map_is_null(&map))
+ * // always true
+ *
+ * Instances of :c:type:`struct iosys_map <iosys_map>` can be compared for
+ * equality with iosys_map_is_equal(). Mappings that point to different memory
+ * spaces, system or I/O, are never equal. That's even true if both spaces are
+ * located in the same address space, both mappings contain the same address
+ * value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map sys_map; // refers to system memory
+ * struct iosys_map io_map; // refers to I/O memory
+ *
+ * if (iosys_map_is_equal(&sys_map, &io_map))
+ * // always false
+ *
+ * A set up instance of struct iosys_map can be used to access or manipulate the
+ * buffer memory. Depending on the location of the memory, the provided helpers
+ * will pick the correct operations. Data can be copied into the memory with
+ * iosys_map_memcpy_to(). The address can be manipulated with iosys_map_incr().
+ *
+ * .. code-block:: c
+ *
+ * const void *src = ...; // source buffer
+ * size_t len = ...; // length of src
+ *
+ * iosys_map_memcpy_to(&map, src, len);
+ * iosys_map_incr(&map, len); // go to first byte after the memcpy
+ */
+
+/**
+ * struct iosys_map - Pointer to IO/system memory
+ * @vaddr_iomem: The buffer's address if in I/O memory
+ * @vaddr: The buffer's address if in system memory
+ * @is_iomem: True if the buffer is located in I/O memory, or false
+ * otherwise.
+ */
+struct iosys_map {
+ union {
+ void __iomem *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+/**
+ * IOSYS_MAP_INIT_VADDR - Initializes struct iosys_map to an address in system memory
+ * @vaddr_: A system-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR(vaddr_) \
+ { \
+ .vaddr = (vaddr_), \
+ .is_iomem = false, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_VADDR_IOMEM - Initializes struct iosys_map to an address in I/O memory
+ * @vaddr_iomem_: An I/O-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR_IOMEM(vaddr_iomem_) \
+ { \
+ .vaddr_iomem = (vaddr_iomem_), \
+ .is_iomem = true, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map
+ * @map_: The dma-buf mapping structure to copy from
+ * @offset_: Offset to add to the other mapping
+ *
+ * Initializes a new iosys_map struct based on another passed as argument. It
+ * does a shallow copy of the struct so it's possible to update the back storage
+ * without changing where the original map points to. It is the equivalent of
+ * doing:
+ *
+ * .. code-block:: c
+ *
+ * iosys_map map = other_map;
+ * iosys_map_incr(&map, &offset);
+ *
+ * Example usage:
+ *
+ * .. code-block:: c
+ *
+ * void foo(struct device *dev, struct iosys_map *base_map)
+ * {
+ * ...
+ * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET);
+ * ...
+ * }
+ *
+ * The advantage of using the initializer over just increasing the offset with
+ * iosys_map_incr() like above is that the new map will always point to the
+ * right place of the buffer during its scope. It reduces the risk of updating
+ * the wrong part of the buffer and having no compiler warning about that. If
+ * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn
+ * about the use of uninitialized variable.
+ */
+#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \
+ struct iosys_map copy_ = *map_; \
+ iosys_map_incr(&copy_, offset_); \
+ copy_; \
+})
+
+/**
+ * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory
+ * @map: The iosys_map structure
+ * @vaddr: A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr)
+{
+ map->vaddr = vaddr;
+ map->is_iomem = false;
+}
+
+/**
+ * iosys_map_set_vaddr_iomem - Sets a iosys mapping structure to an address in I/O memory
+ * @map: The iosys_map structure
+ * @vaddr_iomem: An I/O-memory address
+ *
+ * Sets the address and the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map,
+ void __iomem *vaddr_iomem)
+{
+ map->vaddr_iomem = vaddr_iomem;
+ map->is_iomem = true;
+}
+
+/**
+ * iosys_map_is_equal - Compares two iosys mapping structures for equality
+ * @lhs: The iosys_map structure
+ * @rhs: A iosys_map structure to compare with
+ *
+ * Two iosys mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool iosys_map_is_equal(const struct iosys_map *lhs,
+ const struct iosys_map *rhs)
+{
+ if (lhs->is_iomem != rhs->is_iomem)
+ return false;
+ else if (lhs->is_iomem)
+ return lhs->vaddr_iomem == rhs->vaddr_iomem;
+ else
+ return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * iosys_map_is_null - Tests for a iosys mapping to be NULL
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool iosys_map_is_null(const struct iosys_map *map)
+{
+ if (map->is_iomem)
+ return !map->vaddr_iomem;
+ return !map->vaddr;
+}
+
+/**
+ * iosys_map_is_set - Tests if the iosys mapping has been set
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool iosys_map_is_set(const struct iosys_map *map)
+{
+ return !iosys_map_is_null(map);
+}
+
+/**
+ * iosys_map_clear - Clears a iosys mapping structure
+ * @map: The iosys_map structure
+ *
+ * Clears all fields to zero, including struct iosys_map.is_iomem, so
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void iosys_map_clear(struct iosys_map *map)
+{
+ if (map->is_iomem) {
+ map->vaddr_iomem = NULL;
+ map->is_iomem = false;
+ } else {
+ map->vaddr = NULL;
+ }
+}
+
+/**
+ * iosys_map_memcpy_to - Memcpy into offset of iosys_map
+ * @dst: The iosys_map structure
+ * @dst_offset: The offset from which to copy
+ * @src: The source buffer
+ * @len: The number of byte in src
+ *
+ * Copies data into a iosys_map with an offset. The source buffer is in
+ * system memory. Depending on the buffer's location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset,
+ const void *src, size_t len)
+{
+ if (dst->is_iomem)
+ memcpy_toio(dst->vaddr_iomem + dst_offset, src, len);
+ else
+ memcpy(dst->vaddr + dst_offset, src, len);
+}
+
+/**
+ * iosys_map_memcpy_from - Memcpy from iosys_map into system memory
+ * @dst: Destination in system memory
+ * @src: The iosys_map structure
+ * @src_offset: The offset from which to copy
+ * @len: The number of byte in src
+ *
+ * Copies data from a iosys_map with an offset. The dest buffer is in
+ * system memory. Depending on the mapping location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src,
+ size_t src_offset, size_t len)
+{
+ if (src->is_iomem)
+ memcpy_fromio(dst, src->vaddr_iomem + src_offset, len);
+ else
+ memcpy(dst, src->vaddr + src_offset, len);
+}
+
+/**
+ * iosys_map_incr - Increments the address stored in a iosys mapping
+ * @map: The iosys_map structure
+ * @incr: The number of bytes to increment
+ *
+ * Increments the address stored in a iosys mapping. Depending on the
+ * buffer's location, the correct value will be updated.
+ */
+static inline void iosys_map_incr(struct iosys_map *map, size_t incr)
+{
+ if (map->is_iomem)
+ map->vaddr_iomem += incr;
+ else
+ map->vaddr += incr;
+}
+
+/**
+ * iosys_map_memset - Memset iosys_map
+ * @dst: The iosys_map structure
+ * @offset: Offset from dst where to start setting value
+ * @value: The value to set
+ * @len: The number of bytes to set in dst
+ *
+ * Set value in iosys_map. Depending on the buffer's location, the helper
+ * picks the correct method of accessing the memory.
+ */
+static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
+ int value, size_t len)
+{
+ if (dst->is_iomem)
+ memset_io(dst->vaddr_iomem + offset, value, len);
+ else
+ memset(dst->vaddr + offset, value, len);
+}
+
+#ifdef CONFIG_64BIT
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: val_ = readq(vaddr_iomem_)
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: writeq(val_, vaddr_iomem_)
+#else
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_fromio(&(val_), vaddr_iomem_, sizeof(u64))
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_toio(vaddr_iomem_, &(val_), sizeof(u64))
+#endif
+
+#define __iosys_map_rd_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: val__ = readb(vaddr_iomem__), \
+ u16: val__ = readw(vaddr_iomem__), \
+ u32: val__ = readl(vaddr_iomem__), \
+ __iosys_map_rd_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_rd_sys(val__, vaddr__, type__) \
+ val__ = READ_ONCE(*(type__ *)(vaddr__))
+
+#define __iosys_map_wr_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: writeb(val__, vaddr_iomem__), \
+ u16: writew(val__, vaddr_iomem__), \
+ u32: writel(val__, vaddr_iomem__), \
+ __iosys_map_wr_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_wr_sys(val__, vaddr__, type__) \
+ WRITE_ONCE(*(type__ *)(vaddr__), val__)
+
+/**
+ * iosys_map_rd - Read a C-type value from the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from which to read
+ * @type__: Type of the value being read
+ *
+ * Read a C type value (u8, u16, u32 and u64) from iosys_map. For other types or
+ * if pointer may be unaligned (and problematic for the architecture supported),
+ * use iosys_map_memcpy_from().
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd(map__, offset__, type__) ({ \
+ type__ val_; \
+ if ((map__)->is_iomem) { \
+ __iosys_map_rd_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
+ val_; \
+})
+
+/**
+ * iosys_map_wr - Write a C-type value to the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from the mapping to write to
+ * @type__: Type of the value being written
+ * @val__: Value to write
+ *
+ * Write a C type value (u8, u16, u32 and u64) to the iosys_map. For other types
+ * or if pointer may be unaligned (and problematic for the architecture
+ * supported), use iosys_map_memcpy_to()
+ */
+#define iosys_map_wr(map__, offset__, type__, val__) ({ \
+ type__ val_ = (val__); \
+ if ((map__)->is_iomem) { \
+ __iosys_map_wr_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
+})
+
+/**
+ * iosys_map_rd_field - Read a member from a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beginning of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ *
+ * Read a value from iosys_map considering its layout is described by a C struct
+ * starting at @struct_offset__. The field offset and size is calculated and its
+ * value read. If the field access would incur in un-aligned access, then either
+ * iosys_map_memcpy_from() needs to be used or the architecture must support it.
+ * For example: suppose there is a @struct foo defined as below and the value
+ * ``foo.field2.inner2`` needs to be read from the iosys_map:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * int field1;
+ * struct {
+ * int inner1;
+ * int inner2;
+ * } field2;
+ * int field3;
+ * } __packed;
+ *
+ * This is the expected memory layout of a buffer using iosys_map_rd_field():
+ *
+ * +------------------------------+--------------------------+
+ * | Address | Content |
+ * +==============================+==========================+
+ * | buffer + 0000 | start of mmapped buffer |
+ * | | pointed by iosys_map |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + ``struct_offset__`` | start of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + wwww | ``foo.field2.inner2`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + yyyy | end of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + zzzz | end of mmaped buffer |
+ * +------------------------------+--------------------------+
+ *
+ * Values automatically calculated by this macro or not needed are denoted by
+ * wwww, yyyy and zzzz. This is the code to read that value:
+ *
+ * .. code-block:: c
+ *
+ * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2);
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \
+ struct_type__ *s_; \
+ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s_->field__)); \
+})
+
+/**
+ * iosys_map_wr_field - Write to a member of a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beginning of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ * @val__: Value to write
+ *
+ * Write a value to the iosys_map considering its layout is described by a C
+ * struct starting at @struct_offset__. The field offset and size is calculated
+ * and the @val__ is written. If the field access would incur in un-aligned
+ * access, then either iosys_map_memcpy_to() needs to be used or the
+ * architecture must support it. Refer to iosys_map_rd_field() for expected
+ * usage and memory layout.
+ */
+#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \
+ struct_type__ *s_; \
+ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s_->field__), val__); \
+})
+
+#endif /* __IOSYS_MAP_H__ */
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
new file mode 100644
index 000000000000..270454a6703d
--- /dev/null
+++ b/include/linux/iov_iter.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* I/O iterator iteration building functions.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_IOV_ITER_H
+#define _LINUX_IOV_ITER_H
+
+#include <linux/uio.h>
+#include <linux/bvec.h>
+
+typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+
+/*
+ * Handle ITER_UBUF.
+ */
+static __always_inline
+size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ void __user *base = iter->ubuf;
+ size_t progress = 0, remain;
+
+ remain = step(base + iter->iov_offset, 0, len, priv, priv2);
+ progress = len - remain;
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_IOVEC.
+ */
+static __always_inline
+size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ const struct iovec *p = iter->__iov;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->__iov;
+ iter->__iov = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_KVEC.
+ */
+static __always_inline
+size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct kvec *p = iter->kvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->kvec;
+ iter->kvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_BVEC.
+ */
+static __always_inline
+size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct bio_vec *p = iter->bvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t offset = p->bv_offset + skip, part;
+ void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE);
+
+ part = min3(len,
+ (size_t)(p->bv_len - skip),
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE));
+ remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2);
+ kunmap_local(kaddr);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ if (skip >= p->bv_len) {
+ skip = 0;
+ p++;
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->nr_segs -= p - iter->bvec;
+ iter->bvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_XARRAY.
+ */
+static __always_inline
+size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ struct folio *folio;
+ size_t progress = 0;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ XA_STATE(xas, iter->xarray, index);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ size_t remain, consumed, offset, part, flen;
+
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start + progress);
+ flen = min(folio_size(folio) - offset, len);
+
+ while (flen) {
+ void *base = kmap_local_folio(folio, offset);
+
+ part = min_t(size_t, flen,
+ PAGE_SIZE - offset_in_page(offset));
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+
+ consumed = part - remain;
+ progress += consumed;
+ len -= consumed;
+
+ if (remain || len == 0)
+ goto out;
+ flen -= consumed;
+ offset += consumed;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_DISCARD.
+ */
+static __always_inline
+size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ size_t progress = len;
+
+ iter->count -= progress;
+ return progress;
+}
+
+/**
+ * iterate_and_advance2 - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * Two step functions, @step and @ustep, must be provided, one for handling
+ * mapped kernel addresses and the other is given user addresses which have the
+ * potential to fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_ustep_f ustep, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+
+ if (likely(iter_is_ubuf(iter)))
+ return iterate_ubuf(iter, len, priv, priv2, ustep);
+ if (likely(iter_is_iovec(iter)))
+ return iterate_iovec(iter, len, priv, priv2, ustep);
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
+/**
+ * iterate_and_advance - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * As iterate_and_advance2(), but priv2 is always NULL.
+ */
+static __always_inline
+size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
+ iov_ustep_f ustep, iov_step_f step)
+{
+ return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
+}
+
+#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index a0637abffee8..83c00fac2acb 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rbtree.h>
-#include <linux/atomic.h>
#include <linux/dma-mapping.h>
/* iova structure */
@@ -22,47 +21,8 @@ struct iova {
unsigned long pfn_lo; /* Lowest allocated pfn */
};
-struct iova_magazine;
-struct iova_cpu_rcache;
-#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
-#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
-
-struct iova_rcache {
- spinlock_t lock;
- unsigned long depot_size;
- struct iova_magazine *depot[MAX_GLOBAL_MAGS];
- struct iova_cpu_rcache __percpu *cpu_rcaches;
-};
-
-struct iova_domain;
-
-/* Call-Back from IOVA code into IOMMU drivers */
-typedef void (* iova_flush_cb)(struct iova_domain *domain);
-
-/* Destructor for per-entry data */
-typedef void (* iova_entry_dtor)(unsigned long data);
-
-/* Number of entries per Flush Queue */
-#define IOVA_FQ_SIZE 256
-
-/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
-#define IOVA_FQ_TIMEOUT 10
-
-/* Flush Queue entry for defered flushing */
-struct iova_fq_entry {
- unsigned long iova_pfn;
- unsigned long pages;
- unsigned long data;
- u64 counter; /* Flush counter when this entrie was added */
-};
-
-/* Per-CPU Flush Queue structure */
-struct iova_fq {
- struct iova_fq_entry entries[IOVA_FQ_SIZE];
- unsigned head, tail;
- spinlock_t lock;
-};
+struct iova_rcache;
/* holds all the iova translations for a domain */
struct iova_domain {
@@ -74,27 +34,10 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
unsigned long max32_alloc_size; /* Size of last failed allocation */
- struct iova_fq __percpu *fq; /* Flush Queue */
-
- atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
- have been started */
-
- atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
- have been finished */
-
struct iova anchor; /* rbtree lookup anchor */
- struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
-
- iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
- TLBs */
-
- iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
- iova entry */
- struct timer_list fq_timer; /* Timer to regularily empty the
- flush-queues */
- atomic_t fq_timer_on; /* 1 when timer is active, 0
- when not */
+ struct iova_rcache *rcaches;
+ struct hlist_node cpuhp_dead;
};
static inline unsigned long iova_size(struct iova *iova)
@@ -132,12 +75,12 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
-#if IS_ENABLED(CONFIG_IOMMU_IOVA)
+#if IS_REACHABLE(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
-struct iova *alloc_iova_mem(void);
-void free_iova_mem(struct iova *iova);
+unsigned long iova_rcache_range(void);
+
void free_iova(struct iova_domain *iovad, unsigned long pfn);
void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
@@ -145,24 +88,15 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
bool size_aligned);
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
-void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
-void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
-bool has_iova_flush_queue(struct iova_domain *iovad);
-int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
+int iova_domain_init_rcaches(struct iova_domain *iovad);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
-struct iova *split_and_remove_iova(struct iova_domain *iovad,
- struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
-void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
#else
static inline int iova_cache_get(void)
{
@@ -173,15 +107,6 @@ static inline void iova_cache_put(void)
{
}
-static inline struct iova *alloc_iova_mem(void)
-{
- return NULL;
-}
-
-static inline void free_iova_mem(struct iova *iova)
-{
-}
-
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
{
}
@@ -204,12 +129,6 @@ static inline void free_iova_fast(struct iova_domain *iovad,
{
}
-static inline void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data)
-{
-}
-
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn,
@@ -225,29 +144,12 @@ static inline struct iova *reserve_iova(struct iova_domain *iovad,
return NULL;
}
-static inline void copy_reserved_iova(struct iova_domain *from,
- struct iova_domain *to)
-{
-}
-
static inline void init_iova_domain(struct iova_domain *iovad,
unsigned long granule,
unsigned long start_pfn)
{
}
-static inline bool has_iova_flush_queue(struct iova_domain *iovad)
-{
- return false;
-}
-
-static inline int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb,
- iova_entry_dtor entry_dtor)
-{
- return -ENODEV;
-}
-
static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn)
{
@@ -258,18 +160,6 @@ static inline void put_iova_domain(struct iova_domain *iovad)
{
}
-static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
- struct iova *iova,
- unsigned long pfn_lo,
- unsigned long pfn_hi)
-{
- return NULL;
-}
-
-static inline void free_cpu_cached_iovas(unsigned int cpu,
- struct iova_domain *iovad)
-{
-}
#endif
#endif
diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h
new file mode 100644
index 000000000000..1c338f5e5b7a
--- /dev/null
+++ b/include/linux/iova_bitmap.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+#ifndef _IOVA_BITMAP_H_
+#define _IOVA_BITMAP_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct iova_bitmap;
+
+typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length,
+ void *opaque);
+
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER)
+struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+ unsigned long page_size,
+ u64 __user *data);
+void iova_bitmap_free(struct iova_bitmap *bitmap);
+int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn);
+void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length);
+#else
+static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova,
+ size_t length,
+ unsigned long page_size,
+ u64 __user *data)
+{
+ return NULL;
+}
+
+static inline void iova_bitmap_free(struct iova_bitmap *bitmap)
+{
+}
+
+static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 3d9c6750af62..d11c25f5030a 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -35,4 +35,25 @@ static inline unsigned int ip_transport_len(const struct sk_buff *skb)
{
return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
}
+
+static inline unsigned int iph_totlen(const struct sk_buff *skb, const struct iphdr *iph)
+{
+ u32 len = ntohs(iph->tot_len);
+
+ return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
+ len : skb->len - skb_network_offset(skb);
+}
+
+static inline unsigned int skb_ip_totlen(const struct sk_buff *skb)
+{
+ return iph_totlen(skb, ip_hdr(skb));
+}
+
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFFU
+
+static inline void iph_set_totlen(struct iphdr *iph, unsigned int len)
+{
+ iph->tot_len = len <= IP_MAX_MTU ? htons(len) : 0;
+}
#endif /* _LINUX_IP_H */
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index e1c9eea6015b..9b1434247aab 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_IPC_H
#define _LINUX_IPC_H
-#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
#include <linux/uidgid.h>
#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index a06a78c67f19..e8240cf2611a 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -10,6 +10,8 @@
#include <linux/ns_common.h>
#include <linux/refcount.h>
#include <linux/rhashtable-types.h>
+#include <linux/sysctl.h>
+#include <linux/percpu_counter.h>
struct user_namespace;
@@ -27,7 +29,6 @@ struct ipc_ids {
};
struct ipc_namespace {
- refcount_t count;
struct ipc_ids ids[3];
int sem_ctls[4];
@@ -36,8 +37,8 @@ struct ipc_namespace {
unsigned int msg_ctlmax;
unsigned int msg_ctlmnb;
unsigned int msg_ctlmni;
- atomic_t msg_bytes;
- atomic_t msg_hdrs;
+ struct percpu_counter percpu_msg_bytes;
+ struct percpu_counter percpu_msg_hdrs;
size_t shm_ctlmax;
size_t shm_ctlall;
@@ -64,6 +65,12 @@ struct ipc_namespace {
unsigned int mq_msg_default;
unsigned int mq_msgsize_default;
+ struct ctl_table_set mq_set;
+ struct ctl_table_header *mq_sysctls;
+
+ struct ctl_table_set ipc_set;
+ struct ctl_table_header *ipc_sysctls;
+
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -128,10 +135,20 @@ extern struct ipc_namespace *copy_ipcs(unsigned long flags,
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
- refcount_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ if (ns) {
+ if (refcount_inc_not_zero(&ns->ns.count))
+ return ns;
+ }
+
+ return NULL;
+}
+
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
@@ -148,6 +165,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ return ns;
+}
+
static inline void put_ipc_ns(struct ipc_namespace *ns)
{
}
@@ -155,15 +177,37 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
-struct ctl_table_header;
-extern struct ctl_table_header *mq_register_sysctl_table(void);
+void retire_mq_sysctls(struct ipc_namespace *ns);
+bool setup_mq_sysctls(struct ipc_namespace *ns);
#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
-static inline struct ctl_table_header *mq_register_sysctl_table(void)
+static inline void retire_mq_sysctls(struct ipc_namespace *ns)
{
- return NULL;
+}
+
+static inline bool setup_mq_sysctls(struct ipc_namespace *ns)
+{
+ return true;
}
#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
+
+#ifdef CONFIG_SYSVIPC_SYSCTL
+
+bool setup_ipc_sysctls(struct ipc_namespace *ns);
+void retire_ipc_sysctls(struct ipc_namespace *ns);
+
+#else /* CONFIG_SYSVIPC_SYSCTL */
+
+static inline void retire_ipc_sysctls(struct ipc_namespace *ns)
+{
+}
+
+static inline bool setup_ipc_sysctls(struct ipc_namespace *ns)
+{
+ return true;
+}
+
+#endif /* CONFIG_SYSVIPC_SYSCTL */
#endif
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index ef61676cfe05..a1c9c0d48ebf 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -72,6 +72,11 @@ struct ipmi_recv_msg {
unsigned char msg_data[IPMI_MAX_MSG_LENGTH];
};
+#define INIT_IPMI_RECV_MSG(done_handler) \
+{ \
+ .done = done_handler \
+}
+
/* Allocate and free the receive message. */
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
@@ -333,4 +338,9 @@ struct ipmi_smi_info {
/* This is to get the private info of struct ipmi_smi */
extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
+#define GET_DEVICE_ID_MAX_RETRY 5
+
+/* Helper function for computing the IPMB checksum of some data. */
+unsigned char ipmb_checksum(unsigned char *data, int size);
+
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index deec18b8944a..5d69820d8b02 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -39,6 +39,59 @@ struct ipmi_smi;
#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2)
/*
+ * SMI messages
+ *
+ * When communicating with an SMI, messages come in two formats:
+ *
+ * * Normal (to a BMC over a BMC interface)
+ *
+ * * IPMB (over a IPMB to another MC)
+ *
+ * When normal, commands are sent using the format defined by a
+ * standard message over KCS (NetFn must be even):
+ *
+ * +-----------+-----+------+
+ * | NetFn/LUN | Cmd | Data |
+ * +-----------+-----+------+
+ *
+ * And responses, similarly, with an completion code added (NetFn must
+ * be odd):
+ *
+ * +-----------+-----+------+------+
+ * | NetFn/LUN | Cmd | CC | Data |
+ * +-----------+-----+------+------+
+ *
+ * With normal messages, only commands are sent and only responses are
+ * received.
+ *
+ * In IPMB mode, we are acting as an IPMB device. Commands will be in
+ * the following format (NetFn must be even):
+ *
+ * +-------------+------+-------------+-----+------+
+ * | NetFn/rsLUN | Addr | rqSeq/rqLUN | Cmd | Data |
+ * +-------------+------+-------------+-----+------+
+ *
+ * Responses will using the following format:
+ *
+ * +-------------+------+-------------+-----+------+------+
+ * | NetFn/rqLUN | Addr | rqSeq/rsLUN | Cmd | CC | Data |
+ * +-------------+------+-------------+-----+------+------+
+ *
+ * This is similar to the format defined in the IPMB manual section
+ * 2.11.1 with the checksums and the first address removed. Also, the
+ * address is always the remote address.
+ *
+ * IPMB messages can be commands and responses in both directions.
+ * Received commands are handled as received commands from the message
+ * queue.
+ */
+
+enum ipmi_smi_msg_type {
+ IPMI_SMI_MSG_TYPE_NORMAL = 0,
+ IPMI_SMI_MSG_TYPE_IPMB_DIRECT
+};
+
+/*
* Messages to/from the lower layer. The smi interface will take one
* of these to send. After the send has occurred and a response has
* been received, it will report this same data structure back up to
@@ -54,6 +107,8 @@ struct ipmi_smi;
struct ipmi_smi_msg {
struct list_head link;
+ enum ipmi_smi_msg_type type;
+
long msgid;
void *user_data;
@@ -70,9 +125,19 @@ struct ipmi_smi_msg {
void (*done)(struct ipmi_smi_msg *msg);
};
+#define INIT_IPMI_SMI_MSG(done_handler) \
+{ \
+ .done = done_handler, \
+ .type = IPMI_SMI_MSG_TYPE_NORMAL \
+}
+
struct ipmi_smi_handlers {
struct module *owner;
+ /* Capabilities of the SMI. */
+#define IPMI_SMI_CAN_HANDLE_IPMB_DIRECT (1 << 0)
+ unsigned int flags;
+
/*
* The low-level interface cannot start sending messages to
* the upper layer until this function is called. This may
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index a44789d027cc..383a0ea2ab91 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -3,6 +3,7 @@
#define _IPV6_H
#include <uapi/linux/ipv6.h>
+#include <linux/cache.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
@@ -10,9 +11,16 @@
* This structure contains configuration options per IPv6 link.
*/
struct ipv6_devconf {
- __s32 forwarding;
+ /* RX & TX fastpath fields. */
+ __cacheline_group_begin(ipv6_devconf_read_txrx);
+ __s32 disable_ipv6;
__s32 hop_limit;
__s32 mtu6;
+ __s32 forwarding;
+ __s32 disable_policy;
+ __s32 proxy_ndp;
+ __cacheline_group_end(ipv6_devconf_read_txrx);
+
__s32 accept_ra;
__s32 accept_redirects;
__s32 autoconf;
@@ -27,11 +35,14 @@ struct ipv6_devconf {
__s32 use_tempaddr;
__s32 temp_valid_lft;
__s32 temp_prefered_lft;
+ __s32 regen_min_advance;
__s32 regen_max_retry;
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __u32 ra_defrtr_metric;
__s32 accept_ra_min_hop_limit;
+ __s32 accept_ra_min_lft;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -42,7 +53,6 @@ struct ipv6_devconf {
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
- __s32 proxy_ndp;
__s32 accept_source_route;
__s32 accept_ra_from_local;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -50,9 +60,8 @@ struct ipv6_devconf {
__s32 use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
- __s32 mc_forwarding;
+ atomic_t mc_forwarding;
#endif
- __s32 disable_ipv6;
__s32 drop_unicast_in_l2_multicast;
__s32 accept_dad;
__s32 force_tllao;
@@ -60,6 +69,7 @@ struct ipv6_devconf {
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
__s32 drop_unsolicited_na;
+ __s32 accept_untracked_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
@@ -72,9 +82,13 @@ struct ipv6_devconf {
#endif
__u32 enhanced_dad;
__u32 addr_gen_mode;
- __s32 disable_policy;
__s32 ndisc_tclass;
__s32 rpl_seg_enabled;
+ __u32 ioam6_id;
+ __u32 ioam6_id_wide;
+ __u8 ioam6_enabled;
+ __u8 ndisc_evict_nocarrier;
+ __u8 ra_honor_pio_life;
struct ctl_table_header *sysctl_header;
};
@@ -84,7 +98,6 @@ struct ipv6_params {
__s32 autoconf;
};
extern struct ipv6_params ipv6_defaults;
-#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -129,6 +142,7 @@ struct inet6_skb_parm {
__u16 dsthao;
#endif
__u16 frag_max_size;
+ __u16 srhoff;
#define IP6SKB_XFRM_TRANSFORMED 1
#define IP6SKB_FORWARDED 2
@@ -138,6 +152,9 @@ struct inet6_skb_parm {
#define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
+#define IP6SKB_SEG6 256
+#define IP6SKB_FAKEJUMBO 512
+#define IP6SKB_MULTIPATH 1024
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -177,17 +194,6 @@ static inline int inet6_sdif(const struct sk_buff *skb)
return 0;
}
-/* can not be used in TCP layer after tcp_v6_fill_cb */
-static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
-{
-#if defined(CONFIG_NET_L3_MASTER_DEV)
- if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
- return true;
-#endif
- return false;
-}
-
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
};
@@ -202,14 +208,7 @@ struct inet6_cork {
u8 tclass;
};
-/**
- * struct ipv6_pinfo - ipv6 private area
- *
- * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc)
- * this _must_ be the last member, so that inet6_sk_generic
- * is able to calculate its offset from the base struct sock
- * by using the struct proto->slab_obj_size member. -acme
- */
+/* struct ipv6_pinfo - ipv6 private area */
struct ipv6_pinfo {
struct in6_addr saddr;
struct in6_pktinfo sticky_pktinfo;
@@ -221,28 +220,9 @@ struct ipv6_pinfo {
__be32 flow_label;
__u32 frag_size;
- /*
- * Packed in 16bits.
- * Omit one shift by putting the signed field at MSB.
- */
-#if defined(__BIG_ENDIAN_BITFIELD)
- __s16 hop_limit:9;
- __u16 __unused_1:7;
-#else
- __u16 __unused_1:7;
- __s16 hop_limit:9;
-#endif
+ s16 hop_limit;
+ u8 mcast_hops;
-#if defined(__BIG_ENDIAN_BITFIELD)
- /* Packed in 16bits. */
- __s16 mcast_hops:9;
- __u16 __unused_2:6,
- mc_loop:1;
-#else
- __u16 mc_loop:1,
- __unused_2:6;
- __s16 mcast_hops:9;
-#endif
int ucast_oif;
int mcast_oif;
@@ -270,27 +250,16 @@ struct ipv6_pinfo {
} rxopt;
/* sockopt flags */
- __u16 recverr:1,
- sndflow:1,
- repflow:1,
- pmtudisc:3,
- padding:1, /* 1 bit hole */
- srcprefs:3, /* 001: prefer temporary address
+ __u8 srcprefs; /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
- dontfrag:1,
- autoflowlabel:1,
- autoflowlabel_set:1,
- mc_all:1,
- recverr_rfc4884:1,
- rtalert_isolate:1;
+ __u8 pmtudisc;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;
__u32 dst_cookie;
- __u32 rx_dst_cookie;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
@@ -302,6 +271,18 @@ struct ipv6_pinfo {
struct inet6_cork cork;
};
+/* We currently use available bits from inet_sk(sk)->inet_flags,
+ * this could change in the future.
+ */
+#define inet6_test_bit(nr, sk) \
+ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_set_bit(nr, sk) \
+ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_clear_bit(nr, sk) \
+ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
+
/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
struct raw6_sock {
/* inet_sock has to be the first member of raw6_sock */
@@ -310,19 +291,19 @@ struct raw6_sock {
__u32 offset; /* checksum offset */
struct icmp6_filter filter;
__u32 ip6mr_table;
- /* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
struct udp6_sock {
struct udp_sock udp;
- /* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
struct tcp6_sock {
struct tcp_sock tcp;
- /* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
@@ -340,24 +321,9 @@ static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
}
-static inline struct raw6_sock *raw6_sk(const struct sock *sk)
-{
- return (struct raw6_sock *)sk;
-}
+#define raw6_sk(ptr) container_of_const(ptr, struct raw6_sock, inet.sk)
-static inline void inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from)
-{
- int ancestor_size = sizeof(struct inet_sock);
-
- if (sk_from->sk_family == PF_INET6)
- ancestor_size += sizeof(struct ipv6_pinfo);
-
- __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
-}
-
-#define __ipv6_only_sock(sk) (sk->sk_ipv6only)
-#define ipv6_only_sock(sk) (__ipv6_only_sock(sk))
+#define ipv6_only_sock(sk) (sk->sk_ipv6only)
#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
inet6_sk(sk)->rxopt.bits.rxinfo)
@@ -374,7 +340,6 @@ static inline int inet_v6_ipv6only(const struct sock *sk)
return ipv6_only_sock(sk);
}
#else
-#define __ipv6_only_sock(sk) 0
#define ipv6_only_sock(sk) 0
#define ipv6_sk_rxinfo(sk) 0
@@ -388,19 +353,12 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
return NULL;
}
-static inline struct inet6_request_sock *
- inet6_rsk(const struct request_sock *rsk)
-{
- return NULL;
-}
-
static inline struct raw6_sock *raw6_sk(const struct sock *sk)
{
return NULL;
}
#define inet6_rcv_saddr(__sk) NULL
-#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _IPV6_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 1b7f4dfee35b..97baa937ab5b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -71,6 +71,8 @@ enum irqchip_irq_state;
* it from the spurious interrupt detection
* mechanism and from core side polling.
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
+ * IRQ_HIDDEN - Don't show up in /proc/interrupts
+ * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -97,13 +99,15 @@ enum {
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
+ IRQ_HIDDEN = (1 << 20),
+ IRQ_NO_DEBUG = (1 << 21),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -114,7 +118,7 @@ enum {
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
- * all descendent irqchips.
+ * all descendant irqchips.
*/
enum {
IRQ_SET_MASK_OK = 0,
@@ -147,7 +151,9 @@ struct irq_common_data {
#endif
void *handler_data;
struct msi_desc *msi_desc;
+#ifdef CONFIG_SMP
cpumask_var_t affinity;
+#endif
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
cpumask_var_t effective_affinity;
#endif
@@ -173,7 +179,7 @@ struct irq_common_data {
struct irq_data {
u32 mask;
unsigned int irq;
- unsigned long hwirq;
+ irq_hw_number_t hwirq;
struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
@@ -209,37 +215,40 @@ struct irq_data {
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
* IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
* IRQD_CAN_RESERVE - Can use reservation mode
- * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
- * required
* IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
* from actual interrupt context.
* IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
* irq_chip::irq_set_affinity() when deactivated.
+ * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if
+ * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
+ * IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which
+ * case it must be resent at the next available opportunity.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
- IRQD_SETAFFINITY_PENDING = (1 << 8),
- IRQD_ACTIVATED = (1 << 9),
- IRQD_NO_BALANCING = (1 << 10),
- IRQD_PER_CPU = (1 << 11),
- IRQD_AFFINITY_SET = (1 << 12),
- IRQD_LEVEL = (1 << 13),
- IRQD_WAKEUP_STATE = (1 << 14),
- IRQD_MOVE_PCNTXT = (1 << 15),
- IRQD_IRQ_DISABLED = (1 << 16),
- IRQD_IRQ_MASKED = (1 << 17),
- IRQD_IRQ_INPROGRESS = (1 << 18),
- IRQD_WAKEUP_ARMED = (1 << 19),
- IRQD_FORWARDED_TO_VCPU = (1 << 20),
- IRQD_AFFINITY_MANAGED = (1 << 21),
- IRQD_IRQ_STARTED = (1 << 22),
- IRQD_MANAGED_SHUTDOWN = (1 << 23),
- IRQD_SINGLE_TARGET = (1 << 24),
- IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
- IRQD_CAN_RESERVE = (1 << 26),
- IRQD_MSI_NOMASK_QUIRK = (1 << 27),
- IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
- IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
+ IRQD_SETAFFINITY_PENDING = BIT(8),
+ IRQD_ACTIVATED = BIT(9),
+ IRQD_NO_BALANCING = BIT(10),
+ IRQD_PER_CPU = BIT(11),
+ IRQD_AFFINITY_SET = BIT(12),
+ IRQD_LEVEL = BIT(13),
+ IRQD_WAKEUP_STATE = BIT(14),
+ IRQD_MOVE_PCNTXT = BIT(15),
+ IRQD_IRQ_DISABLED = BIT(16),
+ IRQD_IRQ_MASKED = BIT(17),
+ IRQD_IRQ_INPROGRESS = BIT(18),
+ IRQD_WAKEUP_ARMED = BIT(19),
+ IRQD_FORWARDED_TO_VCPU = BIT(20),
+ IRQD_AFFINITY_MANAGED = BIT(21),
+ IRQD_IRQ_STARTED = BIT(22),
+ IRQD_MANAGED_SHUTDOWN = BIT(23),
+ IRQD_SINGLE_TARGET = BIT(24),
+ IRQD_DEFAULT_TRIGGER_SET = BIT(25),
+ IRQD_CAN_RESERVE = BIT(26),
+ IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27),
+ IRQD_AFFINITY_ON_ACTIVATE = BIT(28),
+ IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29),
+ IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -297,7 +306,7 @@ static inline bool irqd_is_level_type(struct irq_data *d)
/*
* Must only be called of irqchip.irq_set_affinity() or low level
- * hieararchy domain allocation functions.
+ * hierarchy domain allocation functions.
*/
static inline void irqd_set_single_target(struct irq_data *d)
{
@@ -319,6 +328,11 @@ static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
}
+static inline bool irqd_is_enabled_on_suspend(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND;
+}
+
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
@@ -409,29 +423,24 @@ static inline bool irqd_can_reserve(struct irq_data *d)
return __irqd_to_state(d) & IRQD_CAN_RESERVE;
}
-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
-{
- __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
-}
-
-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+static inline void irqd_set_affinity_on_activate(struct irq_data *d)
{
- __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
}
-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+static inline bool irqd_affinity_on_activate(struct irq_data *d)
{
- return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
}
-static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+static inline void irqd_set_resend_when_in_progress(struct irq_data *d)
{
- __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+ __irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS;
}
-static inline bool irqd_affinity_on_activate(struct irq_data *d)
+static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d)
{
- return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
+ return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS;
}
#undef __irqd_to_state
@@ -444,7 +453,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
/**
* struct irq_chip - hardware interrupt chip descriptor
*
- * @parent_device: pointer to parent device for irqchip
* @name: name for /proc/interrupts
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
@@ -491,7 +499,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @flags: chip specific flags
*/
struct irq_chip {
- struct device *parent_device;
const char *name;
unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data);
@@ -512,9 +519,10 @@ struct irq_chip {
void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
-
+#endif
void (*irq_suspend)(struct irq_data *data);
void (*irq_resume)(struct irq_data *data);
void (*irq_pm_shutdown)(struct irq_data *data);
@@ -545,27 +553,34 @@ struct irq_chip {
/*
* irq_chip specific flags
*
- * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
- * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
- * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
- * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
- * when irq enabled
- * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
- * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
- * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
- * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
- * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
+ * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
+ * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
+ * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
+ * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
+ * when irq enabled
+ * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
+ * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
+ * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI: Chip can provide two doorbells for Level MSIs
+ * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
+ * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs
+ * in the suspend path if they are in disabled state
+ * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
+ * IRQCHIP_IMMUTABLE: Don't ever change anything in this chip
*/
enum {
- IRQCHIP_SET_TYPE_MASKED = (1 << 0),
- IRQCHIP_EOI_IF_HANDLED = (1 << 1),
- IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
- IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
- IRQCHIP_SKIP_SET_WAKE = (1 << 4),
- IRQCHIP_ONESHOT_SAFE = (1 << 5),
- IRQCHIP_EOI_THREADED = (1 << 6),
- IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
- IRQCHIP_SUPPORTS_NMI = (1 << 8),
+ IRQCHIP_SET_TYPE_MASKED = (1 << 0),
+ IRQCHIP_EOI_IF_HANDLED = (1 << 1),
+ IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
+ IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
+ IRQCHIP_SKIP_SET_WAKE = (1 << 4),
+ IRQCHIP_ONESHOT_SAFE = (1 << 5),
+ IRQCHIP_EOI_THREADED = (1 << 6),
+ IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
+ IRQCHIP_SUPPORTS_NMI = (1 << 8),
+ IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
+ IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
+ IRQCHIP_IMMUTABLE = (1 << 11),
};
#include <linux/irqdesc.h>
@@ -589,8 +604,10 @@ struct irqaction;
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
+#endif
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
@@ -692,10 +709,11 @@ extern struct irq_chip no_irq_chip;
extern struct irq_chip dummy_irq_chip;
extern void
-irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
irq_flow_handler_t handle, const char *name);
-static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+static inline void irq_set_chip_and_handler(unsigned int irq,
+ const struct irq_chip *chip,
irq_flow_handler_t handle)
{
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
@@ -785,7 +803,7 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq)
}
/* Set/get chip/data for an IRQ: */
-extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
+extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip);
extern int irq_set_handler_data(unsigned int irq, void *data);
extern int irq_set_chip_data(unsigned int irq, void *data);
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
@@ -858,21 +876,34 @@ static inline int irq_data_get_node(struct irq_data *d)
return irq_common_data_get_node(d->common);
}
-static inline struct cpumask *irq_get_affinity_mask(int irq)
+static inline
+const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
- struct irq_data *d = irq_get_irq_data(irq);
+#ifdef CONFIG_SMP
+ return d->common->affinity;
+#else
+ return cpumask_of(0);
+#endif
+}
- return d ? d->common->affinity : NULL;
+static inline void irq_data_update_affinity(struct irq_data *d,
+ const struct cpumask *m)
+{
+#ifdef CONFIG_SMP
+ cpumask_copy(d->common->affinity, m);
+#endif
}
-static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
+static inline const struct cpumask *irq_get_affinity_mask(int irq)
{
- return d->common->affinity;
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? irq_data_get_affinity_mask(d) : NULL;
}
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
return d->common->effective_affinity;
}
@@ -887,12 +918,20 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d,
{
}
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
- return d->common->affinity;
+ return irq_data_get_affinity_mask(d);
}
#endif
+static inline
+const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? irq_data_get_effective_affinity_mask(d) : NULL;
+}
+
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
@@ -908,7 +947,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
#define irq_alloc_desc(node) \
- irq_alloc_descs(-1, 0, 1, node)
+ irq_alloc_descs(-1, 1, 1, node)
#define irq_alloc_desc_at(at, node) \
irq_alloc_descs(at, at, 1, node)
@@ -923,7 +962,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
__devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
#define devm_irq_alloc_desc(dev, node) \
- devm_irq_alloc_descs(dev, -1, 0, 1, node)
+ devm_irq_alloc_descs(dev, -1, 1, 1, node)
#define devm_irq_alloc_desc_at(dev, at, node) \
devm_irq_alloc_descs(dev, at, at, 1, node)
@@ -940,21 +979,6 @@ static inline void irq_free_desc(unsigned int irq)
irq_free_descs(irq, 1);
}
-#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
-unsigned int irq_alloc_hwirqs(int cnt, int node);
-static inline unsigned int irq_alloc_hwirq(int node)
-{
- return irq_alloc_hwirqs(1, node);
-}
-void irq_free_hwirqs(unsigned int from, int cnt);
-static inline void irq_free_hwirq(unsigned int irq)
-{
- return irq_free_hwirqs(irq, 1);
-}
-int arch_setup_hwirq(unsigned int irq, int node);
-void arch_teardown_hwirq(unsigned int irq);
-#endif
-
#ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc(unsigned int irq);
#endif
@@ -1108,6 +1132,7 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on);
/* Setup functions for irq_chip_generic */
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq);
+void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq);
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler);
@@ -1236,6 +1261,9 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
int ipi_send_single(unsigned int virq, unsigned int cpu);
int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
+void ipi_mux_process(void);
+int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu));
+
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
/*
* Registers a generic IRQ handling function as the top-level IRQ handler in
@@ -1252,6 +1280,15 @@ int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
* top-level IRQ handler.
*/
extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
+asmlinkage void generic_handle_arch_irq(struct pt_regs *regs);
+#else
+#ifndef set_handle_irq
+#define set_handle_irq(handle_irq) \
+ do { \
+ (void)handle_irq; \
+ WARN_ON(1); \
+ } while (0)
+#endif
#endif
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
deleted file mode 100644
index 6e8895cd4d92..000000000000
--- a/include/linux/irq_cpustat.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __irq_cpustat_h
-#define __irq_cpustat_h
-
-/*
- * Contains default mappings for irq_cpustat_t, used by almost every
- * architecture. Some arch (like s390) have per cpu hardware pages and
- * they define their own mappings for irq_stat.
- *
- * Keith Owens <kaos@ocs.com.au> July 2000.
- */
-
-
-/*
- * Simple wrappers reducing source bloat. Define all irq_stat fields
- * here, even ones that are arch dependent. That way we get common
- * definitions instead of differing sets for each arch.
- */
-
-#ifndef __ARCH_IRQ_STAT
-DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); /* defined in asm/hardirq.h */
-#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu))
-#endif
-
-/* arch dependent irq_stat fields */
-#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */
-
-#endif /* __irq_cpustat_h */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 30823780c192..136f2980cba3 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -3,6 +3,7 @@
#define _LINUX_IRQ_WORK_H
#include <linux/smp_types.h>
+#include <linux/rcuwait.h>
/*
* An entry can be in one of four states:
@@ -14,28 +15,44 @@
*/
struct irq_work {
- union {
- struct __call_single_node node;
- struct {
- struct llist_node llnode;
- atomic_t flags;
- };
- };
+ struct __call_single_node node;
void (*func)(struct irq_work *);
+ struct rcuwait irqwait;
};
+#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
+ .node = { .u_flags = (_flags), }, \
+ .func = (_func), \
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
+}
+
+#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
+#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
+#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)
+
+#define DEFINE_IRQ_WORK(name, _f) \
+ struct irq_work name = IRQ_WORK_INIT(_f)
+
static inline
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
- atomic_set(&work->flags, 0);
- work->func = func;
+ *work = IRQ_WORK_INIT(func);
+}
+
+static inline bool irq_work_is_pending(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
}
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
- .flags = ATOMIC_INIT(0), \
- .func = (_f) \
+static inline bool irq_work_is_busy(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
}
+static inline bool irq_work_is_hard(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
+}
bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
@@ -49,6 +66,9 @@ void irq_work_sync(struct irq_work *work);
void irq_work_run(void);
bool irq_work_needs_cpu(void);
void irq_work_single(void *arg);
+
+void arch_irq_work_raise(void);
+
#else
static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 67351aac65ef..d5e6024cb2a8 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -14,8 +14,15 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
+/* Undefined on purpose */
+extern of_irq_init_cb_t typecheck_irq_init_cb;
+
+#define typecheck_irq_init_cb(fn) \
+ (__typecheck(typecheck_irq_init_cb, &fn) ? fn : fn)
+
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their DT compatible string and their
@@ -23,29 +30,34 @@
*
* @name: name that must be unique across all IRQCHIP_DECLARE of the
* same file.
- * @compstr: compatible string of the irqchip driver
+ * @compat: compatible string of the irqchip driver
* @fn: initialization function
*/
-#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+#define IRQCHIP_DECLARE(name, compat, fn) \
+ OF_DECLARE_2(irqchip, name, compat, typecheck_irq_init_cb(fn))
extern int platform_irqchip_probe(struct platform_device *pdev);
#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \
static const struct of_device_id drv_name##_irqchip_match_table[] = {
-#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, .data = fn },
+#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \
+ .data = typecheck_irq_init_cb(fn), },
+
-#define IRQCHIP_PLATFORM_DRIVER_END(drv_name) \
+#define IRQCHIP_PLATFORM_DRIVER_END(drv_name, ...) \
{}, \
}; \
MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \
-static struct platform_driver drv_name##_driver = { \
- .probe = platform_irqchip_probe, \
+static struct platform_driver drv_name##_driver = { \
+ .probe = IS_ENABLED(CONFIG_IRQCHIP) ? \
+ platform_irqchip_probe : NULL, \
.driver = { \
.name = #drv_name, \
.owner = THIS_MODULE, \
.of_match_table = drv_name##_irqchip_match_table, \
.suppress_bind_attrs = true, \
+ __VA_ARGS__ \
}, \
}; \
builtin_platform_driver(drv_name##_driver)
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index fa8c0455c352..1177f3a1aed5 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -7,8 +7,7 @@
#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H
#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H
-#include <linux/types.h>
-#include <linux/ioport.h>
+#include <linux/irqchip/arm-vgic-info.h>
#define GICD_INT_DEF_PRI 0xa0
#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
@@ -16,28 +15,6 @@
(GICD_INT_DEF_PRI << 8) |\
GICD_INT_DEF_PRI)
-enum gic_type {
- GIC_V2,
- GIC_V3,
-};
-
-struct gic_kvm_info {
- /* GIC type */
- enum gic_type type;
- /* Virtual CPU interface */
- struct resource vcpu;
- /* Interrupt number */
- unsigned int maint_irq;
- /* Virtual control interface */
- struct resource vctrl;
- /* vlpi support */
- bool has_v4;
- /* rvpeid support */
- bool has_v4_1;
-};
-
-const struct gic_kvm_info *gic_get_kvm_info(void);
-
struct irq_domain;
struct fwnode_handle;
int gicv2m_init(struct fwnode_handle *parent_handle,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index f6d092fdb93d..728691365464 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -127,6 +127,8 @@
#define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+#define GICR_CTLR_CES (1UL << 1)
+#define GICR_CTLR_IR (1UL << 2)
#define GICR_CTLR_RWP (1UL << 3)
#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
@@ -575,67 +577,11 @@
#define ICC_SRE_EL1_DFB (1U << 1)
#define ICC_SRE_EL1_SRE (1U << 0)
-/*
- * Hypervisor interface registers (SRE only)
- */
-#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
-
-#define ICH_LR_EOI (1ULL << 41)
-#define ICH_LR_GROUP (1ULL << 60)
-#define ICH_LR_HW (1ULL << 61)
-#define ICH_LR_STATE (3ULL << 62)
-#define ICH_LR_PENDING_BIT (1ULL << 62)
-#define ICH_LR_ACTIVE_BIT (1ULL << 63)
-#define ICH_LR_PHYS_ID_SHIFT 32
-#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
-#define ICH_LR_PRIORITY_SHIFT 48
-#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
-
/* These are for GICv2 emulation only */
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
-#define ICH_MISR_EOI (1 << 0)
-#define ICH_MISR_U (1 << 1)
-
-#define ICH_HCR_EN (1 << 0)
-#define ICH_HCR_UIE (1 << 1)
-#define ICH_HCR_NPIE (1 << 3)
-#define ICH_HCR_TC (1 << 10)
-#define ICH_HCR_TALL0 (1 << 11)
-#define ICH_HCR_TALL1 (1 << 12)
-#define ICH_HCR_EOIcount_SHIFT 27
-#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
-
-#define ICH_VMCR_ACK_CTL_SHIFT 2
-#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
-#define ICH_VMCR_FIQ_EN_SHIFT 3
-#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
-#define ICH_VMCR_CBPR_SHIFT 4
-#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
-#define ICH_VMCR_EOIM_SHIFT 9
-#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
-#define ICH_VMCR_BPR1_SHIFT 18
-#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
-#define ICH_VMCR_BPR0_SHIFT 21
-#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
-#define ICH_VMCR_PMR_SHIFT 24
-#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
-#define ICH_VMCR_ENG0_SHIFT 0
-#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
-#define ICH_VMCR_ENG1_SHIFT 1
-#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
-
-#define ICH_VTR_PRI_BITS_SHIFT 29
-#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
-#define ICH_VTR_ID_BITS_SHIFT 23
-#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
-#define ICH_VTR_SEIS_SHIFT 22
-#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
-#define ICH_VTR_A3V_SHIFT 21
-#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
-
#define ICC_IAR1_EL1_SPURIOUS 0x3ff
#define ICC_SRE_EL2_SRE (1 << 0)
@@ -671,7 +617,7 @@ struct rdists {
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
- bool lpi_enabled;
+ u64 flags;
cpumask_t *vpe_table_mask;
void *vpe_l1_base;
} __percpu *rdist;
@@ -680,6 +626,7 @@ struct rdists {
u64 flags;
u32 gicd_typer;
u32 gicd_typer2;
+ int cpuhp_memreserve_state;
bool has_vlpis;
bool has_rvpeid;
bool has_direct_lpi;
@@ -688,6 +635,7 @@ struct rdists {
struct irq_domain;
struct fwnode_handle;
+int __init its_lpi_memreserve_init(void);
int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *domain);
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 6976b8331b60..2c63375bbd43 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -39,6 +39,8 @@ struct its_vpe {
irq_hw_number_t vpe_db_lpi;
/* VPE resident */
bool resident;
+ /* VPT parse complete */
+ bool ready;
union {
/* GICv4.0 implementations */
struct {
@@ -104,6 +106,7 @@ enum its_vcpu_info_cmd_type {
PROP_UPDATE_AND_INV_VLPI,
SCHEDULE_VPE,
DESCHEDULE_VPE,
+ COMMIT_VPE,
INVALL_VPE,
PROP_UPDATE_VSGI,
};
@@ -129,6 +132,7 @@ int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm);
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
+int its_commit_vpe(struct its_vpe *vpe);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
@@ -141,4 +145,6 @@ int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops);
+bool gic_cpuif_has_vsgi(void);
+
#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 5686711b0f40..2223f95079ce 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -151,12 +151,6 @@ int gic_of_init(struct device_node *node, struct device_node *parent);
*/
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
-/*
- * Legacy platforms not converted to DT yet must use this to init
- * their GIC
- */
-void gic_init(void __iomem *dist , void __iomem *cpu);
-
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h
new file mode 100644
index 000000000000..a75b2c7de69d
--- /dev/null
+++ b/include/linux/irqchip/arm-vgic-info.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/linux/irqchip/arm-vgic-info.h
+ *
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+#define __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+enum gic_type {
+ /* Full GICv2 */
+ GIC_V2,
+ /* Full GICv3, optionally with v2 compat */
+ GIC_V3,
+};
+
+struct gic_kvm_info {
+ /* GIC type */
+ enum gic_type type;
+ /* Virtual CPU interface */
+ struct resource vcpu;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* No interrupt mask, no need to use the above field */
+ bool no_maint_irq_mask;
+ /* Virtual control interface */
+ struct resource vctrl;
+ /* vlpi support */
+ bool has_v4;
+ /* rvpeid support */
+ bool has_v4_1;
+ /* Deactivation impared, subpar stuff */
+ bool no_hw_deactivation;
+};
+
+#ifdef CONFIG_KVM
+void vgic_set_kvm_info(const struct gic_kvm_info *info);
+#else
+static inline void vgic_set_kvm_info(const struct gic_kvm_info *info) {}
+#endif
+
+#endif
diff --git a/include/linux/irqchip/irq-ixp4xx.h b/include/linux/irqchip/irq-ixp4xx.h
deleted file mode 100644
index 9395917d6936..000000000000
--- a/include/linux/irqchip/irq-ixp4xx.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IRQ_IXP4XX_H
-#define __IRQ_IXP4XX_H
-
-#include <linux/ioport.h>
-struct irq_domain;
-
-void ixp4xx_irq_init(resource_size_t irqbase,
- bool is_356);
-struct irq_domain *ixp4xx_get_irq_domain(void);
-
-#endif /* __IRQ_IXP4XX_H */
diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
deleted file mode 100644
index cb8455c87c8a..000000000000
--- a/include/linux/irqchip/mmp.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IRQCHIP_MMP_H
-#define __IRQCHIP_MMP_H
-
-extern struct irq_chip icu_irq_chip;
-
-#endif /* __IRQCHIP_MMP_H */
diff --git a/include/linux/irqchip/mxs.h b/include/linux/irqchip/mxs.h
deleted file mode 100644
index 4f447e3f0f3a..000000000000
--- a/include/linux/irqchip/mxs.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- */
-
-#ifndef __LINUX_IRQCHIP_MXS_H
-#define __LINUX_IRQCHIP_MXS_H
-
-extern void icoll_handle_irq(struct pt_regs *);
-
-#endif
diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h
deleted file mode 100644
index a978fc8c7996..000000000000
--- a/include/linux/irqchip/versatile-fpga.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PLAT_FPGA_IRQ_H
-#define PLAT_FPGA_IRQ_H
-
-struct device_node;
-struct pt_regs;
-
-void fpga_handle_irq(struct pt_regs *regs);
-void fpga_irq_init(void __iomem *, const char *, int, int, u32,
- struct device_node *node);
-int fpga_irq_of_init(struct device_node *node,
- struct device_node *parent);
-
-#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 5745491303e0..d9451d456a73 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -32,7 +32,7 @@ struct pt_regs;
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @threads_handled: stats field for deferred spurious detection of threaded handlers
- * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+ * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers
* @lock: locking for SMP
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
@@ -102,6 +102,9 @@ struct irq_desc {
int parent_irq;
struct module *owner;
const char *name;
+#ifdef CONFIG_HARDIRQS_SW_RESEND
+ struct hlist_node resend_node;
+#endif
} ____cacheline_internodealigned_in_smp;
#ifdef CONFIG_SPARSE_IRQ
@@ -113,6 +116,12 @@ static inline void irq_unlock_sparse(void) { }
extern struct irq_desc irq_desc[NR_IRQS];
#endif
+static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc,
+ unsigned int cpu)
+{
+ return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
+}
+
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
return container_of(data->common, struct irq_desc, irq_common_data);
@@ -152,39 +161,25 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc)
desc->handle_irq(desc);
}
+int handle_irq_desc(struct irq_desc *desc);
int generic_handle_irq(unsigned int irq);
+int generic_handle_irq_safe(unsigned int irq);
-#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+#ifdef CONFIG_IRQ_DOMAIN
/*
* Convert a HW interrupt number to a logical one using a IRQ domain,
* and handle the result interrupt number. Return -EINVAL if
- * conversion failed. Providing a NULL domain indicates that the
- * conversion has already been done.
+ * conversion failed.
*/
-int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
- bool lookup, struct pt_regs *regs);
-
-static inline int handle_domain_irq(struct irq_domain *domain,
- unsigned int hwirq, struct pt_regs *regs)
-{
- return __handle_domain_irq(domain, hwirq, true, regs);
-}
-
-#ifdef CONFIG_IRQ_DOMAIN
-int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
- struct pt_regs *regs);
-#endif
+int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */
static inline int irq_desc_has_action(struct irq_desc *desc)
{
- return desc->action != NULL;
-}
-
-static inline int irq_has_action(unsigned int irq)
-{
- return irq_desc_has_action(irq_to_desc(irq));
+ return desc && desc->action != NULL;
}
/**
@@ -218,50 +213,42 @@ static inline void irq_set_handler_locked(struct irq_data *data,
* Must be called with irq_desc locked and valid parameters.
*/
static inline void
-irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
+irq_set_chip_handler_name_locked(struct irq_data *data,
+ const struct irq_chip *chip,
irq_flow_handler_t handler, const char *name)
{
struct irq_desc *desc = irq_data_to_desc(data);
desc->handle_irq = handler;
desc->name = name;
- data->chip = chip;
+ data->chip = (struct irq_chip *)chip;
}
+bool irq_check_status_bit(unsigned int irq, unsigned int bitmask);
+
static inline bool irq_balancing_disabled(unsigned int irq)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
+ return irq_check_status_bit(irq, IRQ_NO_BALANCING_MASK);
}
static inline bool irq_is_percpu(unsigned int irq)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_PER_CPU;
+ return irq_check_status_bit(irq, IRQ_PER_CPU);
}
static inline bool irq_is_percpu_devid(unsigned int irq)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_PER_CPU_DEVID;
+ return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID);
}
+void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
+ struct lock_class_key *request_class);
static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (desc) {
- lockdep_set_class(&desc->lock, lock_class);
- lockdep_set_class(&desc->request_mutex, request_class);
- }
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ __irq_set_lockdep_class(irq, lock_class, request_class);
}
#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index b37350c4fe37..21ecf582a0fe 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -31,22 +31,22 @@
#define _LINUX_IRQDOMAIN_H
#include <linux/types.h>
+#include <linux/irqdomain_defs.h>
#include <linux/irqhandler.h>
#include <linux/of.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
struct device_node;
+struct fwnode_handle;
struct irq_domain;
-struct of_device_id;
struct irq_chip;
struct irq_data;
+struct irq_desc;
struct cpumask;
struct seq_file;
struct irq_affinity_desc;
-
-/* Number of irqs reserved for a legacy isa controller */
-#define NUM_ISA_INTERRUPTS 16
+struct msi_parent_ops;
#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
@@ -66,25 +66,9 @@ struct irq_fwspec {
u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
-/*
- * Should several domains have the same device node, but serve
- * different purposes (for example one domain is for PCI/MSI, and the
- * other for wired IRQs), they can be distinguished using a
- * bus-specific token. Most domains are expected to only carry
- * DOMAIN_BUS_ANY.
- */
-enum irq_domain_bus_token {
- DOMAIN_BUS_ANY = 0,
- DOMAIN_BUS_WIRED,
- DOMAIN_BUS_GENERIC_MSI,
- DOMAIN_BUS_PCI_MSI,
- DOMAIN_BUS_PLATFORM_MSI,
- DOMAIN_BUS_NEXUS,
- DOMAIN_BUS_IPI,
- DOMAIN_BUS_FSL_MC_MSI,
- DOMAIN_BUS_TI_SCI_INTA_MSI,
- DOMAIN_BUS_WAKEUP,
-};
+/* Conversion function from of_phandle_args fields to fwspec */
+void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
+ unsigned int count, struct irq_fwspec *fwspec);
/**
* struct irq_domain_ops - Methods for irq_domain objects
@@ -128,62 +112,69 @@ struct irq_domain_ops {
#endif
};
-extern struct irq_domain_ops irq_generic_chip_ops;
+extern const struct irq_domain_ops irq_generic_chip_ops;
struct irq_domain_chip_generic;
/**
* struct irq_domain - Hardware interrupt number translation object
- * @link: Element in global irq_domain list.
- * @name: Name of interrupt domain
- * @ops: pointer to irq_domain methods
- * @host_data: private data pointer for use by owner. Not touched by irq_domain
- * core code.
- * @flags: host per irq_domain flags
- * @mapcount: The number of mapped interrupts
+ * @link: Element in global irq_domain list.
+ * @name: Name of interrupt domain
+ * @ops: Pointer to irq_domain methods
+ * @host_data: Private data pointer for use by owner. Not touched by irq_domain
+ * core code.
+ * @flags: Per irq_domain flags
+ * @mapcount: The number of mapped interrupts
+ * @mutex: Domain lock, hierarchical domains use root domain's lock
+ * @root: Pointer to root domain, or containing structure if non-hierarchical
*
- * Optional elements
- * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
- * to swap it for the of_node via the irq_domain_get_of_node accessor
- * @gc: Pointer to a list of generic chips. There is a helper function for
- * setting up one or more generic chips for interrupt controllers
- * drivers using the generic chip library which uses this pointer.
- * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
- * @debugfs_file: dentry for the domain debugfs file
+ * Optional elements:
+ * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
+ * to swap it for the of_node via the irq_domain_get_of_node accessor
+ * @gc: Pointer to a list of generic chips. There is a helper function for
+ * setting up one or more generic chips for interrupt controllers
+ * drivers using the generic chip library which uses this pointer.
+ * @dev: Pointer to the device which instantiated the irqdomain
+ * With per device irq domains this is not necessarily the same
+ * as @pm_dev.
+ * @pm_dev: Pointer to a device that can be utilized for power management
+ * purposes related to the irq domain.
+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
+ * @msi_parent_ops: Pointer to MSI parent domain methods for per device domain init
*
- * Revmap data, used internally by irq_domain
- * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
- * support direct mapping
- * @revmap_size: Size of the linear map table @linear_revmap[]
- * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
- * @linear_revmap: Linear table of hwirq->virq reverse mappings
+ * Revmap data, used internally by the irq domain code:
+ * @revmap_size: Size of the linear map table @revmap[]
+ * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
+ * @revmap: Linear table of irq_data pointers
*/
struct irq_domain {
- struct list_head link;
- const char *name;
- const struct irq_domain_ops *ops;
- void *host_data;
- unsigned int flags;
- unsigned int mapcount;
+ struct list_head link;
+ const char *name;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ unsigned int flags;
+ unsigned int mapcount;
+ struct mutex mutex;
+ struct irq_domain *root;
/* Optional data */
- struct fwnode_handle *fwnode;
- enum irq_domain_bus_token bus_token;
- struct irq_domain_chip_generic *gc;
+ struct fwnode_handle *fwnode;
+ enum irq_domain_bus_token bus_token;
+ struct irq_domain_chip_generic *gc;
+ struct device *dev;
+ struct device *pm_dev;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
- struct irq_domain *parent;
+ struct irq_domain *parent;
#endif
-#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
- struct dentry *debugfs_file;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ const struct msi_parent_ops *msi_parent_ops;
#endif
/* reverse map data. The linear map gets appended to the irq_domain */
- irq_hw_number_t hwirq_max;
- unsigned int revmap_direct_max_irq;
- unsigned int revmap_size;
- struct radix_tree_root revmap_tree;
- struct mutex revmap_tree_mutex;
- unsigned int linear_revmap[];
+ irq_hw_number_t hwirq_max;
+ unsigned int revmap_size;
+ struct radix_tree_root revmap_tree;
+ struct irq_data __rcu *revmap[] __counted_by(revmap_size);
};
/* Irq domain flags */
@@ -203,15 +194,19 @@ enum {
/* Irq domain implements MSIs */
IRQ_DOMAIN_FLAG_MSI = (1 << 4),
- /* Irq domain implements MSI remapping */
- IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
-
/*
- * Quirk to handle MSI implementations which do not provide
- * masking. Currently known to affect x86, but partially
- * handled in core code.
+ * Irq domain implements isolated MSI, see msi_device_has_isolated_msi()
*/
- IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6),
+ IRQ_DOMAIN_FLAG_ISOLATED_MSI = (1 << 5),
+
+ /* Irq domain doesn't translate anything */
+ IRQ_DOMAIN_FLAG_NO_MAP = (1 << 6),
+
+ /* Irq domain is a MSI parent domain */
+ IRQ_DOMAIN_FLAG_MSI_PARENT = (1 << 8),
+
+ /* Irq domain is a MSI device domain */
+ IRQ_DOMAIN_FLAG_MSI_DEVICE = (1 << 9),
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
@@ -226,6 +221,13 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
return to_of_node(d->fwnode);
}
+static inline void irq_domain_set_pm_device(struct irq_domain *d,
+ struct device *dev)
+{
+ if (d)
+ d->pm_dev = dev;
+}
+
#ifdef CONFIG_IRQ_DOMAIN
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
const char *name, phys_addr_t *pa);
@@ -255,24 +257,29 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data);
-struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data);
+struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
-extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host);
extern struct irq_domain *irq_get_default_host(void);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
@@ -322,6 +329,15 @@ static inline struct irq_domain *irq_find_host(struct device_node *node)
return d;
}
+static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data);
+}
+
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
@@ -336,6 +352,8 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no
{
return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
}
+
+#ifdef CONFIG_IRQ_DOMAIN_NOMAP
static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq,
const struct irq_domain_ops *ops,
@@ -343,14 +361,10 @@ static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_nod
{
return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
}
-static inline struct irq_domain *irq_domain_add_legacy_isa(
- struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
- host_data);
-}
+
+extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+#endif
+
static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
@@ -380,40 +394,49 @@ extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
extern void irq_domain_associate_many(struct irq_domain *domain,
unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
-extern void irq_domain_disassociate(struct irq_domain *domain,
- unsigned int irq);
-extern unsigned int irq_create_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
+extern unsigned int irq_create_mapping_affinity(struct irq_domain *host,
+ irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity);
extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
extern void irq_dispose_mapping(unsigned int virq);
+static inline unsigned int irq_create_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq)
+{
+ return irq_create_mapping_affinity(host, hwirq, NULL);
+}
+
+extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ unsigned int *irq);
+
+static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ return __irq_resolve_mapping(domain, hwirq, NULL);
+}
+
/**
- * irq_linear_revmap() - Find a linux irq from a hw irq number.
+ * irq_find_mapping() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
- *
- * This is a fast path alternative to irq_find_mapping() that can be
- * called directly by irq controller code to save a handful of
- * instructions. It is always safe to call, but won't find irqs mapped
- * using the radix tree.
*/
-static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+static inline unsigned int irq_find_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0;
+ unsigned int irq;
+
+ if (__irq_resolve_mapping(domain, hwirq, &irq))
+ return irq;
+
+ return 0;
}
-extern unsigned int irq_find_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
-extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
-extern int irq_create_strict_mappings(struct irq_domain *domain,
- unsigned int irq_base,
- irq_hw_number_t hwirq_base, int count);
-static inline int irq_create_identity_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq)
+static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- return irq_create_strict_mappings(host, hwirq, hwirq, 1);
+ return irq_find_mapping(domain, hwirq);
}
extern const struct irq_domain_ops irq_domain_simple_ops;
@@ -447,7 +470,8 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq);
extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq, struct irq_chip *chip,
+ irq_hw_number_t hwirq,
+ const struct irq_chip *chip,
void *chip_data, irq_flow_handler_t handler,
void *handler_data, const char *handler_name);
extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
@@ -490,7 +514,7 @@ extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
unsigned int virq,
irq_hw_number_t hwirq,
- struct irq_chip *chip,
+ const struct irq_chip *chip,
void *chip_data);
extern void irq_domain_free_irqs_common(struct irq_domain *domain,
unsigned int virq,
@@ -509,6 +533,9 @@ extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs);
+extern int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
+ unsigned int virq);
+
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
@@ -535,12 +562,15 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return domain->flags & IRQ_DOMAIN_FLAG_MSI;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
- return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_PARENT;
}
-extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE;
+}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
@@ -577,18 +607,35 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return false;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
return false;
}
-static inline bool
-irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
{
return false;
}
+
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+#ifdef CONFIG_GENERIC_MSI_IRQ
+int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
+ unsigned int type);
+void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq);
+#else
+static inline int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
+ unsigned int type)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq)
+{
+ WARN_ON_ONCE(1);
+}
+#endif
+
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
static inline struct irq_domain *irq_find_matching_fwnode(
@@ -596,10 +643,6 @@ static inline struct irq_domain *irq_find_matching_fwnode(
{
return NULL;
}
-static inline bool irq_domain_check_msi_remap(void)
-{
- return false;
-}
#endif /* !CONFIG_IRQ_DOMAIN */
#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqdomain_defs.h b/include/linux/irqdomain_defs.h
new file mode 100644
index 000000000000..5c1fe6f1fcde
--- /dev/null
+++ b/include/linux/irqdomain_defs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQDOMAIN_DEFS_H
+#define _LINUX_IRQDOMAIN_DEFS_H
+
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_WIRED,
+ DOMAIN_BUS_GENERIC_MSI,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+ DOMAIN_BUS_IPI,
+ DOMAIN_BUS_FSL_MC_MSI,
+ DOMAIN_BUS_TI_SCI_INTA_MSI,
+ DOMAIN_BUS_WAKEUP,
+ DOMAIN_BUS_VMD_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSIX,
+ DOMAIN_BUS_DMAR,
+ DOMAIN_BUS_AMDVI,
+ DOMAIN_BUS_PCI_DEVICE_IMS,
+ DOMAIN_BUS_DEVICE_MSI,
+ DOMAIN_BUS_WIRED_TO_MSI,
+};
+
+#endif /* _LINUX_IRQDOMAIN_DEFS_H */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 3ed4e8771b64..147feebd508c 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -12,7 +12,9 @@
#ifndef _LINUX_TRACE_IRQFLAGS_H
#define _LINUX_TRACE_IRQFLAGS_H
+#include <linux/irqflags_types.h>
#include <linux/typecheck.h>
+#include <linux/cleanup.h>
#include <asm/irqflags.h>
#include <asm/percpu.h>
@@ -20,32 +22,19 @@
#ifdef CONFIG_PROVE_LOCKING
extern void lockdep_softirqs_on(unsigned long ip);
extern void lockdep_softirqs_off(unsigned long ip);
- extern void lockdep_hardirqs_on_prepare(unsigned long ip);
+ extern void lockdep_hardirqs_on_prepare(void);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
#else
static inline void lockdep_softirqs_on(unsigned long ip) { }
static inline void lockdep_softirqs_off(unsigned long ip) { }
- static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { }
+ static inline void lockdep_hardirqs_on_prepare(void) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
-/* Per-task IRQ trace events information. */
-struct irqtrace_events {
- unsigned int irq_events;
- unsigned long hardirq_enable_ip;
- unsigned long hardirq_disable_ip;
- unsigned int hardirq_enable_event;
- unsigned int hardirq_disable_event;
- unsigned long softirq_disable_ip;
- unsigned long softirq_enable_ip;
- unsigned int softirq_disable_event;
- unsigned int softirq_enable_event;
-};
-
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
@@ -71,14 +60,6 @@ do { \
do { \
__this_cpu_dec(hardirq_context); \
} while (0)
-# define lockdep_softirq_enter() \
-do { \
- current->softirq_context++; \
-} while (0)
-# define lockdep_softirq_exit() \
-do { \
- current->softirq_context--; \
-} while (0)
# define lockdep_hrtimer_enter(__hrtimer) \
({ \
@@ -107,14 +88,14 @@ do { \
current->irq_config = 0; \
} while (0)
-# define lockdep_irq_work_enter(__work) \
+# define lockdep_irq_work_enter(_flags) \
do { \
- if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
current->irq_config = 1; \
} while (0)
-# define lockdep_irq_work_exit(__work) \
+# define lockdep_irq_work_exit(_flags) \
do { \
- if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
current->irq_config = 0; \
} while (0)
@@ -140,6 +121,21 @@ do { \
# define lockdep_irq_work_exit(__work) do { } while (0)
#endif
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
+# define lockdep_softirq_enter() \
+do { \
+ current->softirq_context++; \
+} while (0)
+# define lockdep_softirq_exit() \
+do { \
+ current->softirq_context--; \
+} while (0)
+
+#else
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
+#endif
+
#if defined(CONFIG_IRQSOFF_TRACER) || \
defined(CONFIG_PREEMPT_TRACER)
extern void stop_critical_timings(void);
@@ -149,6 +145,17 @@ do { \
# define start_critical_timings() do { } while (0)
#endif
+#ifdef CONFIG_DEBUG_IRQFLAGS
+extern void warn_bogus_irq_restore(void);
+#define raw_check_bogus_irq_restore() \
+ do { \
+ if (unlikely(!arch_irqs_disabled())) \
+ warn_bogus_irq_restore(); \
+ } while (0)
+#else
+#define raw_check_bogus_irq_restore() do { } while (0)
+#endif
+
/*
* Wrap the arch provided IRQ routines to provide appropriate checks.
*/
@@ -162,6 +169,7 @@ do { \
#define raw_local_irq_restore(flags) \
do { \
typecheck(unsigned long, flags); \
+ raw_check_bogus_irq_restore(); \
arch_local_irq_restore(flags); \
} while (0)
#define raw_local_save_flags(flags) \
@@ -248,4 +256,10 @@ do { \
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
+DEFINE_LOCK_GUARD_0(irqsave,
+ local_irq_save(_T->flags),
+ local_irq_restore(_T->flags),
+ unsigned long flags)
+
#endif
diff --git a/include/linux/irqflags_types.h b/include/linux/irqflags_types.h
new file mode 100644
index 000000000000..c13f0d915097
--- /dev/null
+++ b/include/linux/irqflags_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQFLAGS_TYPES_H
+#define _LINUX_IRQFLAGS_TYPES_H
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+};
+
+#endif
+
+#endif /* _LINUX_IRQFLAGS_TYPES_H */
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index c30f454a9518..72dd1eb3a0e7 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -8,7 +8,7 @@
*/
struct irq_desc;
-struct irq_data;
+
typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
#endif
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index bd4c066ad39b..d426c7ad92bf 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -3,10 +3,10 @@
#define _LINUX_IRQRETURN_H
/**
- * enum irqreturn
- * @IRQ_NONE interrupt was not from this device or was not handled
- * @IRQ_HANDLED interrupt was handled by this device
- * @IRQ_WAKE_THREAD handler requests to wake the handler thread
+ * enum irqreturn - irqreturn type values
+ * @IRQ_NONE: interrupt was not from this device or was not handled
+ * @IRQ_HANDLED: interrupt was handled by this device
+ * @IRQ_WAKE_THREAD: handler requests to wake the handler thread
*/
enum irqreturn {
IRQ_NONE = (0 << 0),
diff --git a/include/linux/isa-dma.h b/include/linux/isa-dma.h
new file mode 100644
index 000000000000..61504a8c1b9e
--- /dev/null
+++ b/include/linux/isa-dma.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_ISA_DMA_H
+#define __LINUX_ISA_DMA_H
+
+#include <asm/dma.h>
+
+#if defined(CONFIG_PCI) && defined(CONFIG_X86_32)
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* __LINUX_ISA_DMA_H */
diff --git a/include/linux/isa.h b/include/linux/isa.h
index 41336da0f4e7..4fbbf5e36e08 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -13,7 +13,7 @@
struct isa_driver {
int (*match)(struct device *, unsigned int);
int (*probe)(struct device *, unsigned int);
- int (*remove)(struct device *, unsigned int);
+ void (*remove)(struct device *, unsigned int);
void (*shutdown)(struct device *, unsigned int);
int (*suspend)(struct device *, unsigned int, pm_message_t);
int (*resume)(struct device *, unsigned int);
@@ -38,6 +38,32 @@ static inline void isa_unregister_driver(struct isa_driver *d)
}
#endif
+#define module_isa_driver_init(__isa_driver, __num_isa_dev) \
+static int __init __isa_driver##_init(void) \
+{ \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq) \
+static int __init __isa_driver##_init(void) \
+{ \
+ if (__num_irq != __num_isa_dev) { \
+ pr_err("%s: Number of irq (%u) does not match number of base (%u)\n", \
+ __isa_driver.driver.name, __num_irq, __num_isa_dev); \
+ return -EINVAL; \
+ } \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_exit(__isa_driver) \
+static void __exit __isa_driver##_exit(void) \
+{ \
+ isa_unregister_driver(&(__isa_driver)); \
+} \
+module_exit(__isa_driver##_exit)
+
/**
* module_isa_driver() - Helper macro for registering a ISA driver
* @__isa_driver: isa_driver struct
@@ -48,16 +74,22 @@ static inline void isa_unregister_driver(struct isa_driver *d)
* use this macro once, and calling it replaces module_init and module_exit.
*/
#define module_isa_driver(__isa_driver, __num_isa_dev) \
-static int __init __isa_driver##_init(void) \
-{ \
- return isa_register_driver(&(__isa_driver), __num_isa_dev); \
-} \
-module_init(__isa_driver##_init); \
-static void __exit __isa_driver##_exit(void) \
-{ \
- isa_unregister_driver(&(__isa_driver)); \
-} \
-module_exit(__isa_driver##_exit);
+module_isa_driver_init(__isa_driver, __num_isa_dev); \
+module_isa_driver_exit(__isa_driver)
+
+/**
+ * module_isa_driver_with_irq() - Helper macro for registering an ISA driver with irq
+ * @__isa_driver: isa_driver struct
+ * @__num_isa_dev: number of devices to register
+ * @__num_irq: number of IRQ to register
+ *
+ * Helper macro for ISA drivers with irq that do not do anything special in
+ * module init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init and module_exit.
+ */
+#define module_isa_driver_with_irq(__isa_driver, __num_isa_dev, __num_irq) \
+module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq); \
+module_isa_driver_exit(__isa_driver)
/**
* max_num_isa_dev() - Maximum possible number registered of an ISA device
diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h
index 11edb2109a68..dba18c95844b 100644
--- a/include/linux/isapnp.h
+++ b/include/linux/isapnp.h
@@ -75,9 +75,6 @@ static inline int isapnp_proc_done(void) { return 0; }
#endif
/* compat */
-struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
- struct pnp_card *from);
struct pnp_dev *pnp_find_dev(struct pnp_card *card,
unsigned short vendor,
unsigned short function,
@@ -92,9 +89,6 @@ static inline int isapnp_cfg_end(void) { return -ENODEV; }
static inline unsigned char isapnp_read_byte(unsigned char idx) { return 0xff; }
static inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; }
-static inline struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
- struct pnp_card *from) { return NULL; }
static inline struct pnp_dev *pnp_find_dev(struct pnp_card *card,
unsigned short vendor,
unsigned short function,
diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h
index b7b45ca82bea..e2742748104d 100644
--- a/include/linux/iscsi_ibft.h
+++ b/include/linux/iscsi_ibft.h
@@ -13,26 +13,30 @@
#ifndef ISCSI_IBFT_H
#define ISCSI_IBFT_H
-#include <linux/acpi.h>
+#include <linux/types.h>
/*
- * Logical location of iSCSI Boot Format Table.
- * If the value is NULL there is no iBFT on the machine.
+ * Physical location of iSCSI Boot Format Table.
+ * If the value is 0 there is no iBFT on the machine.
*/
-extern struct acpi_table_ibft *ibft_addr;
+extern phys_addr_t ibft_phys_addr;
+
+#ifdef CONFIG_ISCSI_IBFT_FIND
/*
* Routine used to find and reserve the iSCSI Boot Format Table. The
- * mapped address is set in the ibft_addr variable.
+ * physical address is set in the ibft_phys_addr variable.
*/
-#ifdef CONFIG_ISCSI_IBFT_FIND
-unsigned long find_ibft_region(unsigned long *sizep);
+void reserve_ibft_region(void);
+
+/*
+ * Physical bounds to search for the iSCSI Boot Format Table.
+ */
+#define IBFT_START 0x80000 /* 512kB */
+#define IBFT_END 0x100000 /* 1MB */
+
#else
-static inline unsigned long find_ibft_region(unsigned long *sizep)
-{
- *sizep = 0;
- return 0;
-}
+static inline void reserve_ibft_region(void) {}
#endif
#endif /* ISCSI_IBFT_H */
diff --git a/include/linux/isicom.h b/include/linux/isicom.h
deleted file mode 100644
index 7de6822d7b1a..000000000000
--- a/include/linux/isicom.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_ISICOM_H
-#define _LINUX_ISICOM_H
-
-#define YES 1
-#define NO 0
-
-/*
- * ISICOM Driver definitions ...
- *
- */
-
-#define ISICOM_NAME "ISICom"
-
-/*
- * PCI definitions
- */
-
-#define DEVID_COUNT 9
-#define VENDOR_ID 0x10b5
-
-/*
- * These are now officially allocated numbers
- */
-
-#define ISICOM_NMAJOR 112 /* normal */
-#define ISICOM_CMAJOR 113 /* callout */
-#define ISICOM_MAGIC (('M' << 8) | 'T')
-
-#define WAKEUP_CHARS 256 /* hard coded for now */
-#define TX_SIZE 254
-
-#define BOARD_COUNT 4
-#define PORT_COUNT (BOARD_COUNT*16)
-
-/* character sizes */
-
-#define ISICOM_CS5 0x0000
-#define ISICOM_CS6 0x0001
-#define ISICOM_CS7 0x0002
-#define ISICOM_CS8 0x0003
-
-/* stop bits */
-
-#define ISICOM_1SB 0x0000
-#define ISICOM_2SB 0x0004
-
-/* parity */
-
-#define ISICOM_NOPAR 0x0000
-#define ISICOM_ODPAR 0x0008
-#define ISICOM_EVPAR 0x0018
-
-/* flow control */
-
-#define ISICOM_CTSRTS 0x03
-#define ISICOM_INITIATE_XONXOFF 0x04
-#define ISICOM_RESPOND_XONXOFF 0x08
-
-#define BOARD(line) (((line) >> 4) & 0x3)
-
- /* isi kill queue bitmap */
-
-#define ISICOM_KILLTX 0x01
-#define ISICOM_KILLRX 0x02
-
- /* isi_board status bitmap */
-
-#define FIRMWARE_LOADED 0x0001
-#define BOARD_ACTIVE 0x0002
-#define BOARD_INIT 0x0004
-
- /* isi_port status bitmap */
-
-#define ISI_CTS 0x1000
-#define ISI_DSR 0x2000
-#define ISI_RI 0x4000
-#define ISI_DCD 0x8000
-#define ISI_DTR 0x0100
-#define ISI_RTS 0x0200
-
-
-#define ISI_TXOK 0x0001
-
-#endif /* ISICOM_H */
diff --git a/include/linux/ism.h b/include/linux/ism.h
new file mode 100644
index 000000000000..5428edd90982
--- /dev/null
+++ b/include/linux/ism.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internal Shared Memory
+ *
+ * Definitions for the ISM module
+ *
+ * Copyright IBM Corp. 2022
+ */
+#ifndef _ISM_H
+#define _ISM_H
+
+#include <linux/workqueue.h>
+
+struct ism_dmb {
+ u64 dmb_tok;
+ u64 rgid;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+/* Unless we gain unexpected popularity, this limit should hold for a while */
+#define MAX_CLIENTS 8
+#define ISM_NR_DMBS 1920
+
+struct ism_dev {
+ spinlock_t lock; /* protects the ism device */
+ struct list_head list;
+ struct pci_dev *pdev;
+
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+ u8 *sba_client_arr; /* entries are indices into 'clients' array */
+ void *priv[MAX_CLIENTS];
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+
+ struct device dev;
+ u64 local_gid;
+ int ieq_idx;
+
+ struct ism_client *subs[MAX_CLIENTS];
+};
+
+struct ism_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct ism_client {
+ const char *name;
+ void (*add)(struct ism_dev *dev);
+ void (*remove)(struct ism_dev *dev);
+ void (*handle_event)(struct ism_dev *dev, struct ism_event *event);
+ /* Parameter dmbemask contains a bit vector with updated DMBEs, if sent
+ * via ism_move_data(). Callback function must handle all active bits
+ * indicated by dmbemask.
+ */
+ void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
+ /* Private area - don't touch! */
+ u8 id;
+};
+
+int ism_register_client(struct ism_client *client);
+int ism_unregister_client(struct ism_client *client);
+static inline void *ism_get_priv(struct ism_dev *dev,
+ struct ism_client *client) {
+ return dev->priv[client->id];
+}
+
+static inline void ism_set_priv(struct ism_dev *dev, struct ism_client *client,
+ void *priv) {
+ dev->priv[client->id] = priv;
+}
+
+int ism_register_dmb(struct ism_dev *dev, struct ism_dmb *dmb,
+ struct ism_client *client);
+int ism_unregister_dmb(struct ism_dev *dev, struct ism_dmb *dmb);
+int ism_move(struct ism_dev *dev, u64 dmb_tok, unsigned int idx, bool sf,
+ unsigned int offset, void *data, unsigned int size);
+
+const struct smcd_ops *ism_get_smcd_ops(void);
+
+#endif /* _ISM_H */
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
index 2917ef990d43..8f972eaca2ed 100644
--- a/include/linux/iversion.h
+++ b/include/linux/iversion.h
@@ -9,8 +9,26 @@
* ---------------------------
* The change attribute (i_version) is mandated by NFSv4 and is mostly for
* knfsd, but is also used for other purposes (e.g. IMA). The i_version must
- * appear different to observers if there was a change to the inode's data or
- * metadata since it was last queried.
+ * appear larger to observers if there was an explicit change to the inode's
+ * data or metadata since it was last queried.
+ *
+ * An explicit change is one that would ordinarily result in a change to the
+ * inode status change time (aka ctime). i_version must appear to change, even
+ * if the ctime does not (since the whole point is to avoid missing updates due
+ * to timestamp granularity). If POSIX or other relevant spec mandates that the
+ * ctime must change due to an operation, then the i_version counter must be
+ * incremented as well.
+ *
+ * Making the i_version update completely atomic with the operation itself would
+ * be prohibitively expensive. Traditionally the kernel has updated the times on
+ * directories after an operation that changes its contents. For regular files,
+ * the ctime is usually updated before the data is copied into the cache for a
+ * write. This means that there is a window of time when an observer can
+ * associate a new timestamp with old file contents. Since the purpose of the
+ * i_version is to allow for better cache coherency, the i_version must always
+ * be updated after the results of the operation are visible. Updating it before
+ * and after a change is also permitted. (Note that no filesystems currently do
+ * this. Fixing that is a work-in-progress).
*
* Observers see the i_version as a 64-bit number that never decreases. If it
* remains the same since it was last checked, then nothing has changed in the
@@ -123,17 +141,12 @@ inode_peek_iversion_raw(const struct inode *inode)
static inline void
inode_set_max_iversion_raw(struct inode *inode, u64 val)
{
- u64 cur, old;
+ u64 cur = inode_peek_iversion_raw(inode);
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
+ do {
if (cur > val)
break;
- old = atomic64_cmpxchg(&inode->i_version, cur, val);
- if (likely(old == cur))
- break;
- cur = old;
- }
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, val));
}
/**
@@ -177,56 +190,7 @@ inode_set_iversion_queried(struct inode *inode, u64 val)
I_VERSION_QUERIED);
}
-/**
- * inode_maybe_inc_iversion - increments i_version
- * @inode: inode with the i_version that should be updated
- * @force: increment the counter even if it's not necessary?
- *
- * Every time the inode is modified, the i_version field must be seen to have
- * changed by any observer.
- *
- * If "force" is set or the QUERIED flag is set, then ensure that we increment
- * the value, and clear the queried flag.
- *
- * In the common case where neither is set, then we can return "false" without
- * updating i_version.
- *
- * If this function returns false, and no other metadata has changed, then we
- * can avoid logging the metadata.
- */
-static inline bool
-inode_maybe_inc_iversion(struct inode *inode, bool force)
-{
- u64 cur, old, new;
-
- /*
- * The i_version field is not strictly ordered with any other inode
- * information, but the legacy inode_inc_iversion code used a spinlock
- * to serialize increments.
- *
- * Here, we add full memory barriers to ensure that any de-facto
- * ordering with other info is preserved.
- *
- * This barrier pairs with the barrier in inode_query_iversion()
- */
- smp_mb();
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
- /* If flag is clear then we needn't do anything */
- if (!force && !(cur & I_VERSION_QUERIED))
- return false;
-
- /* Since lowest bit is flag, add 2 to avoid it */
- new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
-
- old = atomic64_cmpxchg(&inode->i_version, cur, new);
- if (likely(old == cur))
- break;
- cur = old;
- }
- return true;
-}
-
+bool inode_maybe_inc_iversion(struct inode *inode, bool force);
/**
* inode_inc_iversion - forcibly increment i_version
@@ -288,46 +252,21 @@ inode_peek_iversion(const struct inode *inode)
return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
}
-/**
- * inode_query_iversion - read i_version for later use
- * @inode: inode from which i_version should be read
- *
- * Read the inode i_version counter. This should be used by callers that wish
- * to store the returned i_version for later comparison. This will guarantee
- * that a later query of the i_version will result in a different value if
- * anything has changed.
- *
- * In this implementation, we fetch the current value, set the QUERIED flag and
- * then try to swap it into place with a cmpxchg, if it wasn't already set. If
- * that fails, we try again with the newly fetched value from the cmpxchg.
+/*
+ * For filesystems without any sort of change attribute, the best we can
+ * do is fake one up from the ctime:
*/
-static inline u64
-inode_query_iversion(struct inode *inode)
+static inline u64 time_to_chattr(const struct timespec64 *t)
{
- u64 cur, old, new;
-
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
- /* If flag is already set, then no need to swap */
- if (cur & I_VERSION_QUERIED) {
- /*
- * This barrier (and the implicit barrier in the
- * cmpxchg below) pairs with the barrier in
- * inode_maybe_inc_iversion().
- */
- smp_mb();
- break;
- }
+ u64 chattr = t->tv_sec;
- new = cur | I_VERSION_QUERIED;
- old = atomic64_cmpxchg(&inode->i_version, cur, new);
- if (likely(old == cur))
- break;
- cur = old;
- }
- return cur >> I_VERSION_QUERIED_SHIFT;
+ chattr <<= 32;
+ chattr += t->tv_nsec;
+ return chattr;
}
+u64 inode_query_iversion(struct inode *inode);
+
/**
* inode_eq_iversion_raw - check whether the raw i_version counter has changed
* @inode: inode to check
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 08f904943ab2..971f3e826e15 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -54,20 +54,20 @@
* CONFIG_JBD2_DEBUG is on.
*/
#define JBD2_EXPENSIVE_CHECKING
-extern ushort jbd2_journal_enable_debug;
void __jbd2_debug(int level, const char *file, const char *func,
unsigned int line, const char *fmt, ...);
-#define jbd_debug(n, fmt, a...) \
+#define jbd2_debug(n, fmt, a...) \
__jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
#else
-#define jbd_debug(n, fmt, a...) /**/
+#define jbd2_debug(n, fmt, a...) no_printk(fmt, ##a)
#endif
extern void *jbd2_alloc(size_t size, gfp_t flags);
extern void jbd2_free(void *ptr, size_t size);
#define JBD2_MIN_JOURNAL_BLOCKS 1024
+#define JBD2_DEFAULT_FAST_COMMIT_BLOCKS 256
#ifdef __KERNEL__
@@ -263,7 +263,12 @@ typedef struct journal_superblock_s
/* 0x0050 */
__u8 s_checksum_type; /* checksum type */
__u8 s_padding2[3];
- __u32 s_padding[42];
+/* 0x0054 */
+ __be32 s_num_fc_blks; /* Number of fast commit blocks */
+ __be32 s_head; /* blocknr of head of log, only uptodate
+ * while the filesystem is clean */
+/* 0x005C */
+ __u32 s_padding[40];
__be32 s_checksum; /* crc32c(superblock) */
/* 0x0100 */
@@ -271,17 +276,6 @@ typedef struct journal_superblock_s
/* 0x0400 */
} journal_superblock_t;
-/* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */
-#define JBD2_HAS_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001
#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
@@ -289,6 +283,7 @@ typedef struct journal_superblock_s
#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
+#define JBD2_FEATURE_INCOMPAT_FAST_COMMIT 0x00000020
/* See "journal feature predicate functions" below */
@@ -299,7 +294,8 @@ typedef struct journal_superblock_s
JBD2_FEATURE_INCOMPAT_64BIT | \
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
- JBD2_FEATURE_INCOMPAT_CSUM_V3)
+ JBD2_FEATURE_INCOMPAT_CSUM_V3 | \
+ JBD2_FEATURE_INCOMPAT_FAST_COMMIT)
#ifdef __KERNEL__
@@ -395,7 +391,7 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
/**
- * struct jbd_inode - The jbd_inode type is the structure linking inodes in
+ * struct jbd2_inode - The jbd_inode type is the structure linking inodes in
* ordered mode present in a transaction so that we can sync them during commit.
*/
struct jbd2_inode {
@@ -452,8 +448,8 @@ struct jbd2_inode {
struct jbd2_revoke_table_s;
/**
- * struct handle_s - The handle_s type is the concrete type associated with
- * handle_t.
+ * struct jbd2_journal_handle - The jbd2_journal_handle type is the concrete
+ * type associated with handle_t.
* @h_transaction: Which compound transaction is this update a part of?
* @h_journal: Which journal handle belongs to - used iff h_reserved set.
* @h_rsv_handle: Handle reserved for finishing the logical operation.
@@ -532,6 +528,7 @@ struct transaction_chp_stats_s {
* The transaction keeps track of all of the buffers modified by a
* running transaction, and all of the buffers committed but not yet
* flushed to home for finished transactions.
+ * (Locking Documentation improved by LockDoc)
*/
/*
@@ -547,9 +544,6 @@ struct transaction_chp_stats_s {
* ->j_list_lock
*
* j_state_lock
- * ->t_handle_lock
- *
- * j_state_lock
* ->j_list_lock (journal_unmap_buffer)
*
*/
@@ -587,18 +581,22 @@ struct transaction_s
*/
unsigned long t_log_start;
- /* Number of buffers on the t_buffers list [j_list_lock] */
+ /*
+ * Number of buffers on the t_buffers list [j_list_lock, no locks
+ * needed for jbd2 thread]
+ */
int t_nr_buffers;
/*
* Doubly-linked circular list of all buffers reserved but not yet
- * modified by this transaction [j_list_lock]
+ * modified by this transaction [j_list_lock, no locks needed fo
+ * jbd2 thread]
*/
struct journal_head *t_reserved_list;
/*
* Doubly-linked circular list of all metadata buffers owned by this
- * transaction [j_list_lock]
+ * transaction [j_list_lock, no locks needed for jbd2 thread]
*/
struct journal_head *t_buffers;
@@ -616,30 +614,23 @@ struct transaction_s
struct journal_head *t_checkpoint_list;
/*
- * Doubly-linked circular list of all buffers submitted for IO while
- * checkpointing. [j_list_lock]
- */
- struct journal_head *t_checkpoint_io_list;
-
- /*
- * Doubly-linked circular list of metadata buffers being shadowed by log
- * IO. The IO buffers on the iobuf list and the shadow buffers on this
- * list match each other one for one at all times. [j_list_lock]
+ * Doubly-linked circular list of metadata buffers being
+ * shadowed by log IO. The IO buffers on the iobuf list and
+ * the shadow buffers on this list match each other one for
+ * one at all times. [j_list_lock, no locks needed for jbd2
+ * thread]
*/
struct journal_head *t_shadow_list;
/*
- * List of inodes whose data we've modified in data=ordered mode.
+ * List of inodes associated with the transaction; e.g., ext4 uses
+ * this to track inodes in data=ordered and data=journal mode that
+ * need special handling on transaction commit; also used by ocfs2.
* [j_list_lock]
*/
struct list_head t_inode_list;
/*
- * Protects info related to handles
- */
- spinlock_t t_handle_lock;
-
- /*
* Longest time some handle had to wait for running transaction
*/
unsigned long t_max_wait;
@@ -650,12 +641,12 @@ struct transaction_s
unsigned long t_start;
/*
- * When commit was requested
+ * When commit was requested [j_state_lock]
*/
unsigned long t_requested;
/*
- * Checkpointing stats [j_checkpoint_sem]
+ * Checkpointing stats [j_list_lock]
*/
struct transaction_chp_stats_s t_chp_stats;
@@ -747,6 +738,11 @@ jbd2_time_diff(unsigned long start, unsigned long end)
#define JBD2_NR_BATCH 64
+enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
+
+#define JBD2_FC_REPLAY_STOP 0
+#define JBD2_FC_REPLAY_CONTINUE 1
+
/**
* struct journal_s - The journal_s type is the concrete type associated with
* journal_t.
@@ -754,7 +750,8 @@ jbd2_time_diff(unsigned long start, unsigned long end)
struct journal_s
{
/**
- * @j_flags: General journaling state flags [j_state_lock]
+ * @j_flags: General journaling state flags [j_state_lock,
+ * no lock for quick racy checks]
*/
unsigned long j_flags;
@@ -782,11 +779,6 @@ struct journal_s
journal_superblock_t *j_superblock;
/**
- * @j_format_version: Version of the superblock format.
- */
- int j_format_version;
-
- /**
* @j_state_lock: Protect the various scalars in the journal.
*/
rwlock_t j_state_lock;
@@ -794,7 +786,8 @@ struct journal_s
/**
* @j_barrier_count:
*
- * Number of processes waiting to create a barrier lock [j_state_lock]
+ * Number of processes waiting to create a barrier lock [j_state_lock,
+ * no lock for quick racy checks]
*/
int j_barrier_count;
@@ -807,7 +800,8 @@ struct journal_s
* @j_running_transaction:
*
* Transactions: The current running transaction...
- * [j_state_lock] [caller holding open handle]
+ * [j_state_lock, no lock for quick racy checks] [caller holding
+ * open handle]
*/
transaction_t *j_running_transaction;
@@ -858,6 +852,13 @@ struct journal_s
wait_queue_head_t j_wait_reserved;
/**
+ * @j_fc_wait:
+ *
+ * Wait queue to wait for completion of async fast commits.
+ */
+ wait_queue_head_t j_fc_wait;
+
+ /**
* @j_checkpoint_mutex:
*
* Semaphore for locking against concurrent checkpoints.
@@ -875,6 +876,29 @@ struct journal_s
struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
/**
+ * @j_shrinker:
+ *
+ * Journal head shrinker, reclaim buffer's journal head which
+ * has been written back.
+ */
+ struct shrinker *j_shrinker;
+
+ /**
+ * @j_checkpoint_jh_count:
+ *
+ * Number of journal buffers on the checkpoint list. [j_list_lock]
+ */
+ struct percpu_counter j_checkpoint_jh_count;
+
+ /**
+ * @j_shrink_transaction:
+ *
+ * Record next transaction will shrink on the checkpoint list.
+ * [j_list_lock]
+ */
+ transaction_t *j_shrink_transaction;
+
+ /**
* @j_head:
*
* Journal head: identifies the first unused block in the journal.
@@ -915,6 +939,31 @@ struct journal_s
unsigned long j_last;
/**
+ * @j_fc_first:
+ *
+ * The block number of the first fast commit block in the journal
+ * [j_state_lock].
+ */
+ unsigned long j_fc_first;
+
+ /**
+ * @j_fc_off:
+ *
+ * Number of fast commit blocks currently allocated. Accessed only
+ * during fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
+ */
+ unsigned long j_fc_off;
+
+ /**
+ * @j_fc_last:
+ *
+ * The block number one beyond the last fast commit block in the journal
+ * [j_state_lock].
+ */
+ unsigned long j_fc_last;
+
+ /**
* @j_dev: Device where we store the journal.
*/
struct block_device *j_dev;
@@ -945,9 +994,16 @@ struct journal_s
struct block_device *j_fs_dev;
/**
- * @j_maxlen: Total maximum capacity of the journal region on disk.
+ * @j_fs_dev_wb_err:
+ *
+ * Records the errseq of the client fs's backing block device.
+ */
+ errseq_t j_fs_dev_wb_err;
+
+ /**
+ * @j_total_len: Total maximum capacity of the journal region on disk.
*/
- unsigned int j_maxlen;
+ unsigned int j_total_len;
/**
* @j_reserved_credits:
@@ -987,7 +1043,7 @@ struct journal_s
* @j_commit_sequence:
*
* Sequence number of the most recently committed transaction
- * [j_state_lock].
+ * [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_sequence;
@@ -995,7 +1051,7 @@ struct journal_s
* @j_commit_request:
*
* Sequence number of the most recent transaction wanting commit
- * [j_state_lock]
+ * [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_request;
@@ -1065,6 +1121,13 @@ struct journal_s
struct buffer_head **j_wbuf;
/**
+ * @j_fc_wbuf: Array of fast commit bhs for fast commit. Accessed only
+ * during a fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
+ */
+ struct buffer_head **j_fc_wbuf;
+
+ /**
* @j_wbufsize:
*
* Size of @j_wbuf array.
@@ -1072,6 +1135,13 @@ struct journal_s
int j_wbufsize;
/**
+ * @j_fc_wbufsize:
+ *
+ * Size of @j_fc_wbuf array.
+ */
+ int j_fc_wbufsize;
+
+ /**
* @j_last_sync_writer:
*
* The pid of the last person to run a synchronous operation
@@ -1111,6 +1181,27 @@ struct journal_s
void (*j_commit_callback)(journal_t *,
transaction_t *);
+ /**
+ * @j_submit_inode_data_buffers:
+ *
+ * This function is called for all inodes associated with the
+ * committing transaction marked with JI_WRITE_DATA flag
+ * before we start to write out the transaction to the journal.
+ */
+ int (*j_submit_inode_data_buffers)
+ (struct jbd2_inode *);
+
+ /**
+ * @j_finish_inode_data_buffers:
+ *
+ * This function is called for all inodes associated with the
+ * committing transaction marked with JI_WAIT_DATA flag
+ * after we have written the transaction to the journal
+ * but before we write out the commit block.
+ */
+ int (*j_finish_inode_data_buffers)
+ (struct jbd2_inode *);
+
/*
* Journal statistics
*/
@@ -1170,6 +1261,38 @@ struct journal_s
*/
struct lockdep_map j_trans_commit_map;
#endif
+
+ /**
+ * @j_fc_cleanup_callback:
+ *
+ * Clean-up after fast commit or full commit. JBD2 calls this function
+ * after every commit operation.
+ */
+ void (*j_fc_cleanup_callback)(struct journal_s *journal, int full, tid_t tid);
+
+ /**
+ * @j_fc_replay_callback:
+ *
+ * File-system specific function that performs replay of a fast
+ * commit. JBD2 calls this function for each fast commit block found in
+ * the journal. This function should return JBD2_FC_REPLAY_CONTINUE
+ * to indicate that the block was processed correctly and more fast
+ * commit replay should continue. Return value of JBD2_FC_REPLAY_STOP
+ * indicates the end of replay (no more blocks remaining). A negative
+ * return value indicates error.
+ */
+ int (*j_fc_replay_callback)(struct journal_s *journal,
+ struct buffer_head *bh,
+ enum passtype pass, int off,
+ tid_t expected_commit_id);
+
+ /**
+ * @j_bmap:
+ *
+ * Bmap function that should be used instead of the generic
+ * VFS bmap function.
+ */
+ int (*j_bmap)(struct journal_s *journal, sector_t *block);
};
#define jbd2_might_wait_for_commit(j) \
@@ -1178,11 +1301,22 @@ struct journal_s
rwsem_release(&j->j_trans_commit_map, _THIS_IP_); \
} while (0)
+/*
+ * We can support any known requested features iff the
+ * superblock is not in version 1. Otherwise we fail to support any
+ * extended sb features.
+ */
+static inline bool jbd2_format_support_feature(journal_t *j)
+{
+ return j->j_superblock->s_header.h_blocktype !=
+ cpu_to_be32(JBD2_SUPERBLOCK_V1);
+}
+
/* journal feature predicate functions */
#define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_compat & \
cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \
} \
@@ -1200,7 +1334,7 @@ static inline void jbd2_clear_feature_##name(journal_t *j) \
#define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_ro_compat & \
cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \
} \
@@ -1218,7 +1352,7 @@ static inline void jbd2_clear_feature_##name(journal_t *j) \
#define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_incompat & \
cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \
} \
@@ -1240,6 +1374,10 @@ JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT)
JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT)
JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2)
JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
+JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
+
+/* Journal high priority write IO operation flags */
+#define JBD2_JOURNAL_REQ_FLAGS (REQ_META | REQ_SYNC | REQ_IDLE)
/*
* Journal flag definitions
@@ -1253,6 +1391,15 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
* data write error in ordered
* mode */
+#define JBD2_CYCLE_RECORD 0x080 /* Journal cycled record log on
+ * clean and empty filesystem
+ * logging area */
+#define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */
+#define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */
+#define JBD2_JOURNAL_FLUSH_DISCARD 0x0001
+#define JBD2_JOURNAL_FLUSH_ZEROOUT 0x0002
+#define JBD2_JOURNAL_FLUSH_VALID (JBD2_JOURNAL_FLUSH_DISCARD | \
+ JBD2_JOURNAL_FLUSH_ZEROOUT)
/*
* Function declarations for the journaling transaction and buffer
@@ -1264,9 +1411,7 @@ extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
{
list_add_tail(&bh->b_assoc_buffers, head);
@@ -1290,7 +1435,9 @@ extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
+int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
@@ -1330,9 +1477,6 @@ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr);
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
/* Transaction cache support */
extern void jbd2_journal_destroy_transaction_cache(void);
extern int __init jbd2_journal_init_transaction_cache(void);
@@ -1379,14 +1523,16 @@ void jbd2_journal_set_triggers(struct buffer_head *,
struct jbd2_buffer_trigger_type *type);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
-extern int jbd2_journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
-extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page);
+int jbd2_journal_invalidate_folio(journal_t *, struct folio *,
+ size_t offset, size_t length);
+bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio);
extern int jbd2_journal_stop(handle_t *);
-extern int jbd2_journal_flush (journal_t *);
+extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);
extern void jbd2_journal_lock_updates (journal_t *);
extern void jbd2_journal_unlock_updates (journal_t *);
+void jbd2_journal_wait_updates(journal_t *);
+
extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int bsize);
@@ -1407,7 +1553,7 @@ extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
- unsigned long, int);
+ unsigned long, blk_opf_t);
extern void jbd2_journal_abort (journal_t *, int);
extern int jbd2_journal_errno (journal_t *);
extern void jbd2_journal_ack_err (journal_t *);
@@ -1421,6 +1567,8 @@ extern int jbd2_journal_inode_ranged_write(handle_t *handle,
extern int jbd2_journal_inode_ranged_wait(handle_t *handle,
struct jbd2_inode *inode, loff_t start_byte,
loff_t length);
+extern int jbd2_journal_finish_inode_data_buffers(
+ struct jbd2_inode *jinode);
extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
struct jbd2_inode *inode, loff_t new_size);
extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
@@ -1493,7 +1641,6 @@ extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
*/
int jbd2_log_start_commit(journal_t *journal, tid_t tid);
-int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
int jbd2_transaction_committed(journal_t *journal, tid_t tid);
@@ -1505,6 +1652,21 @@ void __jbd2_log_wait_for_space(journal_t *journal);
extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
extern int jbd2_cleanup_journal_tail(journal_t *);
+/* Fast commit related APIs */
+int jbd2_fc_begin_commit(journal_t *journal, tid_t tid);
+int jbd2_fc_end_commit(journal_t *journal);
+int jbd2_fc_end_commit_fallback(journal_t *journal);
+int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
+int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode);
+int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
+int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
+int jbd2_fc_release_bufs(journal_t *journal);
+
+static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
+{
+ return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
+}
+
/*
* is_journal_abort
*
@@ -1532,6 +1694,25 @@ static inline void jbd2_journal_abort_handle(handle_t *handle)
handle->h_aborted = 1;
}
+static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
+
+ /*
+ * Save the original wb_err value of client fs's bdev mapping which
+ * could be used to detect the client fs's metadata async write error.
+ */
+ errseq_check_and_advance(&mapping->wb_err, &journal->j_fs_dev_wb_err);
+}
+
+static inline int jbd2_check_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
+
+ return errseq_check(&mapping->wb_err,
+ READ_ONCE(journal->j_fs_dev_wb_err));
+}
+
#endif /* __KERNEL__ */
/* Comparison functions for transaction IDs: perform comparisons using
@@ -1565,6 +1746,13 @@ static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
return journal->j_chksum_driver != NULL;
}
+static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb)
+{
+ int num_fc_blocks = be32_to_cpu(jsb->s_num_fc_blks);
+
+ return num_fc_blocks ? num_fc_blocks : JBD2_DEFAULT_FAST_COMMIT_BLOCKS;
+}
+
/*
* Return number of free blocks in the log. Must be called under j_state_lock.
*/
@@ -1592,8 +1780,6 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-extern int jbd_blocks_per_page(struct inode *inode);
-
/* JBD uses a CRC32 checksum */
#define JBD_MAX_CHECKSUM_SIZE 4
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index cfb62e9f37be..ab7f8c152b89 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -99,6 +99,7 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
case 2: a += (u32)k[1]<<8; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
+ break;
case 0: /* Nothing left to add */
break;
}
@@ -136,6 +137,7 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
case 2: b += k[1]; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
+ break;
case 0: /* Nothing left to add */
break;
}
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index fed6ba96c527..d9f1435a5a13 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -3,8 +3,9 @@
#define _LINUX_JIFFIES_H
#include <linux/cache.h>
+#include <linux/limits.h>
#include <linux/math64.h>
-#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/timex.h>
@@ -71,9 +72,15 @@ extern int register_refined_jiffies(long clock_tick_rate);
#endif
/*
- * The 64-bit value is not atomic - you MUST NOT read it
+ * The 64-bit value is not atomic on 32-bit systems - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
+ *
+ * jiffies and jiffies_64 are at the same address for little-endian systems
+ * and for 64-bit big-endian systems.
+ * On 32-bit big-endian systems, jiffies is the lower 32 bits of jiffies_64
+ * (i.e., at address @jiffies_64 + 4).
+ * See arch/ARCH/kernel/vmlinux.lds.S
*/
extern u64 __cacheline_aligned_in_smp jiffies_64;
extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
@@ -81,46 +88,94 @@ extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffi
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
+/**
+ * get_jiffies_64 - read the 64-bit non-atomic jiffies_64 value
+ *
+ * When BITS_PER_LONG < 64, this uses sequence number sampling using
+ * jiffies_lock to protect the 64-bit read.
+ *
+ * Return: current 64-bit jiffies value
+ */
static inline u64 get_jiffies_64(void)
{
return (u64)jiffies;
}
#endif
-/*
- * These inlines deal with timer wrapping correctly. You are
- * strongly encouraged to use them
- * 1. Because people otherwise forget
- * 2. Because if the timer wrap changes in future you won't have to
- * alter your driver code.
+/**
+ * DOC: General information about time_* inlines
+ *
+ * These inlines deal with timer wrapping correctly. You are strongly encouraged
+ * to use them:
*
- * time_after(a,b) returns true if the time a is after time b.
+ * #. Because people otherwise forget
+ * #. Because if the timer wrap changes in future you won't have to alter your
+ * driver code.
+ */
+
+/**
+ * time_after - returns true if the time a is after time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
*
* Do this with "<0" and ">=0" to only test the sign of the result. A
* good compiler would generate better code (and a really good compiler
* wouldn't care). Gcc is currently neither.
+ *
+ * Return: %true is time a is after time b, otherwise %false.
*/
#define time_after(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((b) - (a)) < 0))
+/**
+ * time_before - returns true if the time a is before time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before(a,b) time_after(b,a)
+/**
+ * time_after_eq - returns true if the time a is after or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((a) - (b)) >= 0))
+/**
+ * time_before_eq - returns true if the time a is before or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq(a,b) time_after_eq(b,a)
-/*
- * Calculate whether a is in the range of [b, c].
+/**
+ * time_in_range - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
*/
#define time_in_range(a,b,c) \
(time_after_eq(a,b) && \
time_before_eq(a,c))
-/*
- * Calculate whether a is in the range of [b, c).
+/**
+ * time_in_range_open - Calculate whether a is in the range of [b, c).
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c), otherwise %false.
*/
#define time_in_range_open(a,b,c) \
(time_after_eq(a,b) && \
@@ -128,45 +183,138 @@ static inline u64 get_jiffies_64(void)
/* Same as above, but does so with platform independent 64bit types.
* These must be used when utilizing jiffies_64 (i.e. return value of
- * get_jiffies_64() */
+ * get_jiffies_64()). */
+
+/**
+ * time_after64 - returns true if the time a is after time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after time b, otherwise %false.
+ */
#define time_after64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((b) - (a)) < 0))
+/**
+ * time_before64 - returns true if the time a is before time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before64(a,b) time_after64(b,a)
+/**
+ * time_after_eq64 - returns true if the time a is after or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((a) - (b)) >= 0))
+/**
+ * time_before_eq64 - returns true if the time a is before or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq64(a,b) time_after_eq64(b,a)
+/**
+ * time_in_range64 - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
+ */
#define time_in_range64(a, b, c) \
(time_after_eq64(a, b) && \
time_before_eq64(a, c))
/*
- * These four macros compare jiffies and 'a' for convenience.
+ * These eight macros compare jiffies[_64] and 'a' for convenience.
*/
-/* time_is_before_jiffies(a) return true if a is before jiffies */
+/**
+ * time_is_before_jiffies - return true if a is before jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before jiffies, otherwise %false.
+ */
#define time_is_before_jiffies(a) time_after(jiffies, a)
+/**
+ * time_is_before_jiffies64 - return true if a is before jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before jiffies_64, otherwise %false.
+ */
#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
-/* time_is_after_jiffies(a) return true if a is after jiffies */
+/**
+ * time_is_after_jiffies - return true if a is after jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after jiffies, otherwise %false.
+ */
#define time_is_after_jiffies(a) time_before(jiffies, a)
+/**
+ * time_is_after_jiffies64 - return true if a is after jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after jiffies_64, otherwise %false.
+ */
#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a)
-/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
+/**
+ * time_is_before_eq_jiffies - return true if a is before or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before or the same as jiffies, otherwise %false.
+ */
#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+/**
+ * time_is_before_eq_jiffies64 - return true if a is before or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before or the same jiffies_64, otherwise %false.
+ */
#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a)
-/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
+/**
+ * time_is_after_eq_jiffies - return true if a is after or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after or the same as jiffies, otherwise %false.
+ */
#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+/**
+ * time_is_after_eq_jiffies64 - return true if a is after or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after or the same as jiffies_64, otherwise %false.
+ */
#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a)
/*
- * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * Have the 32-bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -277,7 +425,7 @@ extern unsigned long preset_lpj;
#if BITS_PER_LONG < 64
# define MAX_SEC_IN_JIFFIES \
(long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
-#else /* take care of overflow on 64 bits machines */
+#else /* take care of overflow on 64-bit machines */
# define MAX_SEC_IN_JIFFIES \
(SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
@@ -289,6 +437,12 @@ extern unsigned long preset_lpj;
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);
+/**
+ * jiffies_to_nsecs - Convert jiffies to nanoseconds
+ * @j: jiffies value
+ *
+ * Return: nanoseconds value
+ */
static inline u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
@@ -352,12 +506,14 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __msecs_to_jiffies() is called if the value passed does not
+ * code. __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * The HZ range specific helpers _msecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
{
@@ -399,12 +555,14 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
*
* usecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __usecs_to_jiffies() is called if the value passed does not
+ * code. __usecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * The HZ range specific helpers _usecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
@@ -421,6 +579,7 @@ extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
extern clock_t jiffies_to_clock_t(unsigned long x);
+
static inline clock_t jiffies_delta_to_clock_t(long delta)
{
return jiffies_to_clock_t(max(0L, delta));
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 32809624d422..f0a949b7c973 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -82,10 +82,9 @@ extern bool static_key_initialized;
"%s(): static key '%pS' used before call to jump_label_init()", \
__func__, (key))
-#ifdef CONFIG_JUMP_LABEL
-
struct static_key {
atomic_t enabled;
+#ifdef CONFIG_JUMP_LABEL
/*
* Note:
* To make anonymous unions work with old compilers, the static
@@ -104,13 +103,9 @@ struct static_key {
struct jump_entry *entries;
struct static_key_mod *next;
};
+#endif /* CONFIG_JUMP_LABEL */
};
-#else
-struct static_key {
- atomic_t enabled;
-};
-#endif /* CONFIG_JUMP_LABEL */
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_JUMP_LABEL
@@ -171,9 +166,21 @@ static inline bool jump_entry_is_init(const struct jump_entry *entry)
return (unsigned long)entry->key & 2UL;
}
-static inline void jump_entry_set_init(struct jump_entry *entry)
+static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
{
- entry->key |= 2;
+ if (set)
+ entry->key |= 2;
+ else
+ entry->key &= ~2;
+}
+
+static inline int jump_entry_size(struct jump_entry *entry)
+{
+#ifdef JUMP_LABEL_NOP_SIZE
+ return JUMP_LABEL_NOP_SIZE;
+#else
+ return arch_jump_entry_size(entry);
+#endif
}
#endif
@@ -213,22 +220,21 @@ extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type);
-extern void arch_jump_label_transform_static(struct jump_entry *entry,
- enum jump_label_type type);
extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type);
extern void arch_jump_label_transform_apply(void);
extern int jump_label_text_reserved(void *start, void *end);
-extern void static_key_slow_inc(struct static_key *key);
+extern bool static_key_slow_inc(struct static_key *key);
+extern bool static_key_fast_inc_not_disabled(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
-extern void static_key_slow_inc_cpuslocked(struct static_key *key);
+extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
extern void static_key_slow_dec_cpuslocked(struct static_key *key);
-extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
+extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -239,19 +245,19 @@ extern void static_key_disable_cpuslocked(struct static_key *key);
*/
#define STATIC_KEY_INIT_TRUE \
{ .enabled = { 1 }, \
- { .entries = (void *)JUMP_TYPE_TRUE } }
+ { .type = JUMP_TYPE_TRUE } }
#define STATIC_KEY_INIT_FALSE \
{ .enabled = { 0 }, \
- { .entries = (void *)JUMP_TYPE_FALSE } }
+ { .type = JUMP_TYPE_FALSE } }
#else /* !CONFIG_JUMP_LABEL */
#include <linux/atomic.h>
#include <linux/bug.h>
-static inline int static_key_count(struct static_key *key)
+static __always_inline int static_key_count(struct static_key *key)
{
- return atomic_read(&key->enabled);
+ return raw_atomic_read(&key->enabled);
}
static __always_inline void jump_label_init(void)
@@ -261,23 +267,35 @@ static __always_inline void jump_label_init(void)
static __always_inline bool static_key_false(struct static_key *key)
{
- if (unlikely(static_key_count(key) > 0))
+ if (unlikely_notrace(static_key_count(key) > 0))
return true;
return false;
}
static __always_inline bool static_key_true(struct static_key *key)
{
- if (likely(static_key_count(key) > 0))
+ if (likely_notrace(static_key_count(key) > 0))
return true;
return false;
}
-static inline void static_key_slow_inc(struct static_key *key)
+static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
{
+ int v;
+
STATIC_KEY_CHECK_USE(key);
- atomic_inc(&key->enabled);
+ /*
+ * Prevent key->enabled getting negative to follow the same semantics
+ * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
+ */
+ v = atomic_read(&key->enabled);
+ do {
+ if (v < 0 || (v + 1) < 0)
+ return false;
+ } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
+ return true;
}
+#define static_key_slow_inc(key) static_key_fast_inc_not_disabled(key)
static inline void static_key_slow_dec(struct static_key *key)
{
@@ -296,11 +314,6 @@ static inline int jump_label_text_reserved(void *start, void *end)
static inline void jump_label_lock(void) {}
static inline void jump_label_unlock(void) {}
-static inline int jump_label_apply_nops(struct module *mod)
-{
- return 0;
-}
-
static inline void static_key_enable(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
@@ -382,6 +395,21 @@ struct static_key_false {
[0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
}
+#define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name)
+#define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name)
+#define DEFINE_STATIC_KEY_MAYBE(cfg, name) \
+ __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
+#define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name)
+#define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name)
+#define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \
+ __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
+
+#define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name)
+#define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name)
+#define DECLARE_STATIC_KEY_MAYBE(cfg, name) \
+ __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
extern bool ____wrong_branch_error(void);
#define static_key_enabled(x) \
@@ -460,7 +488,7 @@ extern bool ____wrong_branch_error(void);
branch = !arch_static_branch_jump(&(x)->key, true); \
else \
branch = ____wrong_branch_error(); \
- likely(branch); \
+ likely_notrace(branch); \
})
#define static_branch_unlikely(x) \
@@ -472,16 +500,20 @@ extern bool ____wrong_branch_error(void);
branch = arch_static_branch(&(x)->key, false); \
else \
branch = ____wrong_branch_error(); \
- unlikely(branch); \
+ unlikely_notrace(branch); \
})
#else /* !CONFIG_JUMP_LABEL */
-#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
-#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
+#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
#endif /* CONFIG_JUMP_LABEL */
+#define static_branch_maybe(config, x) \
+ (IS_ENABLED(config) ? static_branch_likely(x) \
+ : static_branch_unlikely(x))
+
/*
* Advanced usage; refcount, branch is enabled when: count != 0
*/
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 481273f0c72d..c3f075e8f60c 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -7,6 +7,7 @@
#define _LINUX_KALLSYMS_H
#include <linux/errno.h>
+#include <linux/buildid.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/mm.h>
@@ -14,32 +15,25 @@
#include <asm/sections.h>
-#define KSYM_NAME_LEN 128
-#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
- 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+#define KSYM_NAME_LEN 512
+#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
+ (KSYM_NAME_LEN - 1) + \
+ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \
+ (BUILD_ID_SIZE_MAX * 2) + 1)
struct cred;
struct module;
-static inline int is_kernel_inittext(unsigned long addr)
-{
- if (addr >= (unsigned long)_sinittext
- && addr <= (unsigned long)_einittext)
- return 1;
- return 0;
-}
-
static inline int is_kernel_text(unsigned long addr)
{
- if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
- arch_is_kernel_text(addr))
+ if (__is_kernel_text(addr))
return 1;
return in_gate_area_no_mm(addr);
}
static inline int is_kernel(unsigned long addr)
{
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+ if (__is_kernel(addr))
return 1;
return in_gate_area_no_mm(addr);
}
@@ -54,7 +48,7 @@ static inline int is_ksym_addr(unsigned long addr)
static inline void *dereference_symbol_descriptor(void *ptr)
{
-#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
struct module *mod;
ptr = dereference_kernel_function_descriptor(ptr);
@@ -71,15 +65,19 @@ static inline void *dereference_symbol_descriptor(void *ptr)
return ptr;
}
+/* How and when do we show kallsyms values? */
+extern bool kallsyms_show_value(const struct cred *cred);
+
#ifdef CONFIG_KALLSYMS
+unsigned long kallsyms_sym_address(int idx);
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
+ void *data);
+int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
+ const char *name, void *data);
+
/* Lookup the address for a symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name);
-/* Call a function on each kallsyms symbol in the core kernel */
-int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
- unsigned long),
- void *data);
-
extern int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset);
@@ -92,14 +90,12 @@ const char *kallsyms_lookup(unsigned long addr,
/* Look up a kernel symbol and return it in a text buffer. */
extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_build_id(char *buffer, unsigned long address);
extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
+extern int sprint_backtrace_build_id(char *buffer, unsigned long address);
int lookup_symbol_name(unsigned long addr, char *symname);
-int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
-
-/* How and when do we show kallsyms values? */
-extern bool kallsyms_show_value(const struct cred *cred);
#else /* !CONFIG_KALLSYMS */
@@ -108,14 +104,6 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
return 0;
}
-static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *,
- unsigned long),
- void *data)
-{
- return 0;
-}
-
static inline int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset)
@@ -137,6 +125,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_symbol_build_id(char *buffer, unsigned long address)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
{
*buffer = '\0';
@@ -149,21 +143,28 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr)
return 0;
}
-static inline int lookup_symbol_name(unsigned long addr, char *symname)
+static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr)
{
- return -ERANGE;
+ *buffer = '\0';
+ return 0;
}
-static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
+static inline int lookup_symbol_name(unsigned long addr, char *symname)
{
return -ERANGE;
}
-static inline bool kallsyms_show_value(const struct cred *cred)
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
+ void *data)
{
- return false;
+ return -EOPNOTSUPP;
}
+static inline int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
+ const char *name, void *data)
+{
+ return -EOPNOTSUPP;
+}
#endif /*CONFIG_KALLSYMS*/
static inline void print_ip_sym(const char *loglvl, unsigned long ip)
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index ac6aba632f2d..3d6d22a25bdc 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -5,11 +5,17 @@
#include <linux/types.h>
/*
+ * The annotations present in this file are only relevant for the software
+ * KASAN modes that rely on compiler instrumentation, and will be optimized
+ * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead.
+ */
+
+/*
* __kasan_check_*: Always available when KASAN is enabled. This may be used
* even in compilation units that selectively disable KASAN, but must use KASAN
* to validate access to an address. Never use these in header files!
*/
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bool __kasan_check_read(const volatile void *p, unsigned int size);
bool __kasan_check_write(const volatile void *p, unsigned int size);
#else
diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h
new file mode 100644
index 000000000000..6f612d69ea0c
--- /dev/null
+++ b/include/linux/kasan-enabled.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_ENABLED_H
+#define _LINUX_KASAN_ENABLED_H
+
+#include <linux/static_key.h>
+
+#ifdef CONFIG_KASAN_HW_TAGS
+
+DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
+
+static __always_inline bool kasan_enabled(void)
+{
+ return static_branch_likely(&kasan_flag_enabled);
+}
+
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return kasan_enabled();
+}
+
+#else /* CONFIG_KASAN_HW_TAGS */
+
+static inline bool kasan_enabled(void)
+{
+ return IS_ENABLED(CONFIG_KASAN);
+}
+
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#endif /* LINUX_KASAN_ENABLED_H */
diff --git a/include/linux/kasan-tags.h b/include/linux/kasan-tags.h
new file mode 100644
index 000000000000..4f85f562512c
--- /dev/null
+++ b/include/linux/kasan-tags.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_TAGS_H
+#define _LINUX_KASAN_TAGS_H
+
+#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */
+#else
+#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */
+#endif
+
+#endif /* LINUX_KASAN_TAGS_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 087fba34b209..70d6a8f6e25d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -2,32 +2,69 @@
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H
+#include <linux/bug.h>
+#include <linux/kasan-enabled.h>
+#include <linux/kasan-tags.h>
+#include <linux/kernel.h>
+#include <linux/static_key.h>
#include <linux/types.h>
struct kmem_cache;
struct page;
+struct slab;
struct vm_struct;
struct task_struct;
#ifdef CONFIG_KASAN
-#include <linux/pgtable.h>
+#include <linux/linkage.h>
#include <asm/kasan.h>
+#endif
+
+typedef unsigned int __bitwise kasan_vmalloc_flags_t;
+
+#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
+#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
+#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
+#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+#include <linux/pgtable.h>
+
+/* Software KASAN implementations use shadow memory. */
+
+#ifdef CONFIG_KASAN_SW_TAGS
+/* This matches KASAN_TAG_INVALID. */
+#define KASAN_SHADOW_INIT 0xFE
+#else
+#define KASAN_SHADOW_INIT 0
+#endif
+
+#ifndef PTE_HWTABLE_PTRS
+#define PTE_HWTABLE_PTRS 0
+#endif
+
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
-extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
-extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
-extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
+extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
+extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
+#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ KASAN_SHADOW_OFFSET;
}
+#endif
+
+int kasan_add_zero_shadow(void *start, unsigned long size);
+void kasan_remove_zero_shadow(void *start, unsigned long size);
/* Enable reporting bugs after kasan_disable_current() */
extern void kasan_enable_current(void);
@@ -35,203 +72,530 @@ extern void kasan_enable_current(void);
/* Disable reporting bugs for current task */
extern void kasan_disable_current(void);
-void kasan_unpoison_shadow(const void *address, size_t size);
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-void kasan_unpoison_task_stack(struct task_struct *task);
+static inline int kasan_add_zero_shadow(void *start, unsigned long size)
+{
+ return 0;
+}
+static inline void kasan_remove_zero_shadow(void *start,
+ unsigned long size)
+{}
-void kasan_alloc_pages(struct page *page, unsigned int order);
-void kasan_free_pages(struct page *page, unsigned int order);
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
-void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
- slab_flags_t *flags);
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-void kasan_poison_slab(struct page *page);
-void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-void kasan_poison_object_data(struct kmem_cache *cache, void *object);
-void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
- const void *object);
-
-void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
- gfp_t flags);
-void kasan_kfree_large(void *ptr, unsigned long ip);
-void kasan_poison_kfree(void *ptr, unsigned long ip);
-void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
- size_t size, gfp_t flags);
-void * __must_check kasan_krealloc(const void *object, size_t new_size,
- gfp_t flags);
-
-void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
- gfp_t flags);
-bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
+#ifdef CONFIG_KASAN_HW_TAGS
-struct kasan_cache {
- int alloc_meta_offset;
- int free_meta_offset;
-};
+#else /* CONFIG_KASAN_HW_TAGS */
-/*
- * These functions provide a special case to support backing module
- * allocations with real shadow memory. With KASAN vmalloc, the special
- * case is unnecessary, as the work is handled in the generic case.
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+static inline bool kasan_has_integrated_init(void)
+{
+ return kasan_hw_tags_enabled();
+}
+
+#ifdef CONFIG_KASAN
+void __kasan_unpoison_range(const void *addr, size_t size);
+static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_range(addr, size);
+}
+
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_poison_pages(struct page *page,
+ unsigned int order, bool init)
+{
+ if (kasan_enabled())
+ __kasan_poison_pages(page, order, init);
+}
+
+bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline bool kasan_unpoison_pages(struct page *page,
+ unsigned int order, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_unpoison_pages(page, order, init);
+ return false;
+}
+
+void __kasan_poison_slab(struct slab *slab);
+static __always_inline void kasan_poison_slab(struct slab *slab)
+{
+ if (kasan_enabled())
+ __kasan_poison_slab(slab);
+}
+
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
*/
-#ifndef CONFIG_KASAN_VMALLOC
-int kasan_module_alloc(void *addr, size_t size);
-void kasan_free_shadow(const struct vm_struct *vm);
-#else
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
-#endif
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
+ void *object)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_new_object(cache, object);
+}
-int kasan_add_zero_shadow(void *start, unsigned long size);
-void kasan_remove_zero_shadow(void *start, unsigned long size);
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
+ void *object)
+{
+ if (kasan_enabled())
+ __kasan_poison_new_object(cache, object);
+}
-size_t __ksize(const void *);
-static inline void kasan_unpoison_slab(const void *ptr)
+void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object);
+static __always_inline void * __must_check kasan_init_slab_obj(
+ struct kmem_cache *cache, const void *object)
{
- kasan_unpoison_shadow(ptr, __ksize(ptr));
+ if (kasan_enabled())
+ return __kasan_init_slab_obj(cache, object);
+ return (void *)object;
}
-size_t kasan_metadata_size(struct kmem_cache *cache);
-bool kasan_save_enable_multi_shot(void);
-void kasan_restore_multi_shot(bool enabled);
+bool __kasan_slab_free(struct kmem_cache *s, void *object,
+ unsigned long ip, bool init);
+static __always_inline bool kasan_slab_free(struct kmem_cache *s,
+ void *object, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_slab_free(s, object, _RET_IP_, init);
+ return false;
+}
-#else /* CONFIG_KASAN */
+void __kasan_kfree_large(void *ptr, unsigned long ip);
+static __always_inline void kasan_kfree_large(void *ptr)
+{
+ if (kasan_enabled())
+ __kasan_kfree_large(ptr, _RET_IP_);
+}
-static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
+ void *object, gfp_t flags, bool init);
+static __always_inline void * __must_check kasan_slab_alloc(
+ struct kmem_cache *s, void *object, gfp_t flags, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_slab_alloc(s, object, flags, init);
+ return object;
+}
-static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
+ const void *object, size_t size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_kmalloc(s, object, size, flags);
+ return (void *)object;
+}
-static inline void kasan_enable_current(void) {}
-static inline void kasan_disable_current(void) {}
+void * __must_check __kasan_kmalloc_large(const void *ptr,
+ size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
+ size_t size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_kmalloc_large(ptr, size, flags);
+ return (void *)ptr;
+}
-static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+void * __must_check __kasan_krealloc(const void *object,
+ size_t new_size, gfp_t flags);
+static __always_inline void * __must_check kasan_krealloc(const void *object,
+ size_t new_size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_krealloc(object, new_size, flags);
+ return (void *)object;
+}
-static inline void kasan_cache_create(struct kmem_cache *cache,
- unsigned int *size,
- slab_flags_t *flags) {}
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function is similar to kasan_mempool_poison_object() but operates on
+ * page allocations.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_pages().
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_pages(page, order, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function unpoisons a page allocation that was previously poisoned by
+ * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
+ * the tag-based modes, this function assigns a new tag to the allocation.
+ */
+static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
+/**
+ * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function poisons a slab allocation and saves a free stack trace for it
+ * without initializing the allocation's memory and without putting it into the
+ * quarantine (for the Generic mode).
+ *
+ * This function also performs checks to detect double-free and invalid-free
+ * bugs and reports them. The caller can use the return value of this function
+ * to find out if the allocation is buggy.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_object().
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_object(void *ptr)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_object(ptr, _RET_IP_);
+ return true;
+}
-static inline void kasan_poison_slab(struct page *page) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
+/**
+ * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ * @size: Size to be unpoisoned.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function unpoisons a slab allocation that was previously poisoned via
+ * kasan_mempool_poison_object() and saves an alloc stack trace for it without
+ * initializing the allocation's memory. For the tag-based modes, this function
+ * does not assign a new tag to the allocation and instead restores the
+ * original tags based on the pointer value.
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ */
+static __always_inline void kasan_mempool_unpoison_object(void *ptr,
+ size_t size)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
+}
+
+/*
+ * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
+ * the hardware tag-based mode that doesn't rely on compiler instrumentation.
+ */
+bool __kasan_check_byte(const void *addr, unsigned long ip);
+static __always_inline bool kasan_check_byte(const void *addr)
+{
+ if (kasan_enabled())
+ return __kasan_check_byte(addr, _RET_IP_);
+ return true;
+}
+
+#else /* CONFIG_KASAN */
+
+static inline void kasan_unpoison_range(const void *address, size_t size) {}
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
+ bool init) {}
+static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
+ bool init)
+{
+ return false;
+}
+static inline void kasan_poison_slab(struct slab *slab) {}
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object) {}
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
{
return (void *)object;
}
-
-static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
+{
+ return false;
+}
+static inline void kasan_kfree_large(void *ptr) {}
+static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags, bool init)
{
- return ptr;
+ return object;
}
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
-static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags)
{
return (void *)object;
}
+static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
+{
+ return (void *)ptr;
+}
static inline void *kasan_krealloc(const void *object, size_t new_size,
gfp_t flags)
{
return (void *)object;
}
-
-static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
- gfp_t flags)
+static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
{
- return object;
+ return true;
}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip)
+static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
+static inline bool kasan_mempool_poison_object(void *ptr)
{
- return false;
+ return true;
}
+static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
-
-static inline int kasan_add_zero_shadow(void *start, unsigned long size)
+static inline bool kasan_check_byte(const void *address)
{
- return 0;
+ return true;
}
-static inline void kasan_remove_zero_shadow(void *start,
- unsigned long size)
-{}
-
-static inline void kasan_unpoison_slab(const void *ptr) { }
-static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
#endif /* CONFIG_KASAN */
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
+void kasan_unpoison_task_stack(struct task_struct *task);
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
+#else
+static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
+#endif
+
#ifdef CONFIG_KASAN_GENERIC
-#define KASAN_SHADOW_INIT 0
+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+};
+
+size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+ slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
+void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
+/* Tag-based KASAN modes do not use per-object metadata. */
+static inline size_t kasan_metadata_size(struct kmem_cache *cache,
+ bool in_object)
+{
+ return 0;
+}
+/* And no cache-related metadata initialization is required. */
+static inline void kasan_cache_create(struct kmem_cache *cache,
+ unsigned int *size,
+ slab_flags_t *flags) {}
+
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
+static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
-#ifdef CONFIG_KASAN_SW_TAGS
-
-#define KASAN_SHADOW_INIT 0xFF
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
-void kasan_init_tags(void);
-
-void *kasan_reset_tag(const void *addr);
+static inline void *kasan_reset_tag(const void *addr)
+{
+ return (void *)arch_kasan_reset_tag(addr);
+}
-bool kasan_report(unsigned long addr, size_t size,
+/**
+ * kasan_report - print a report about a bad memory access detected by KASAN
+ * @addr: address of the bad access
+ * @size: size of the bad access
+ * @is_write: whether the bad access is a write or a read
+ * @ip: instruction pointer for the accessibility check or the bad access itself
+ */
+bool kasan_report(const void *addr, size_t size,
bool is_write, unsigned long ip);
-#else /* CONFIG_KASAN_SW_TAGS */
-
-static inline void kasan_init_tags(void) { }
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline void *kasan_reset_tag(const void *addr)
{
return (void *)addr;
}
-#endif /* CONFIG_KASAN_SW_TAGS */
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
+
+#ifdef CONFIG_KASAN_HW_TAGS
+
+void kasan_report_async(void);
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_SW_TAGS
+void __init kasan_init_sw_tags(void);
+#else
+static inline void kasan_init_sw_tags(void) { }
+#endif
+
+#ifdef CONFIG_KASAN_HW_TAGS
+void kasan_init_hw_tags_cpu(void);
+void __init kasan_init_hw_tags(void);
+#else
+static inline void kasan_init_hw_tags_cpu(void) { }
+static inline void kasan_init_hw_tags(void) { }
+#endif
#ifdef CONFIG_KASAN_VMALLOC
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
-void kasan_poison_vmalloc(const void *start, unsigned long size);
-void kasan_unpoison_vmalloc(const void *start, unsigned long size);
void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end);
-#else
+
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size)
+{ }
static inline int kasan_populate_vmalloc(unsigned long start,
unsigned long size)
{
return 0;
}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end) { }
-static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
-{ }
-static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
-{ }
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ kasan_vmalloc_flags_t flags);
+static __always_inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_unpoison_vmalloc(start, size, flags);
+ return (void *)start;
+}
+
+void __kasan_poison_vmalloc(const void *start, unsigned long size);
+static __always_inline void kasan_poison_vmalloc(const void *start,
+ unsigned long size)
+{
+ if (kasan_enabled())
+ __kasan_poison_vmalloc(start, size);
+}
+
+#else /* CONFIG_KASAN_VMALLOC */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size) { }
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size)
+{
+ return 0;
+}
static inline void kasan_release_vmalloc(unsigned long start,
unsigned long end,
unsigned long free_region_start,
- unsigned long free_region_end) {}
-#endif
+ unsigned long free_region_end) { }
+
+static inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ return (void *)start;
+}
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
+{ }
+
+#endif /* CONFIG_KASAN_VMALLOC */
+
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+
+/*
+ * These functions allocate and free shadow memory for kernel modules.
+ * They are only required when KASAN_VMALLOC is not supported, as otherwise
+ * shadow memory is allocated by the generic vmalloc handlers.
+ */
+int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
+void kasan_free_module_shadow(const struct vm_struct *vm);
+
+#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+
+static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
+static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
+
+#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_non_canonical_hook(unsigned long addr);
-#else /* CONFIG_KASAN_INLINE */
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
-#endif /* CONFIG_KASAN_INLINE */
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index bb2246c8ec13..c40811d79769 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -6,12 +6,7 @@
#include <linux/interrupt.h>
#include <linux/keyboard.h>
-extern struct tasklet_struct keyboard_tasklet;
-
extern char *func_table[MAX_NR_FUNC];
-extern char func_buf[];
-extern char *funcbufptr;
-extern int funcbufsize, funcbufleft;
/*
* kbd->xxx contains the VC-local things (flag settings etc..)
@@ -74,12 +69,6 @@ extern void (*kbd_ledfunc)(unsigned int led);
extern int set_console(int nr);
extern void schedule_console_callback(void);
-/* FIXME: review locking for vt.c callers */
-static inline void set_leds(void)
-{
- tasklet_schedule(&keyboard_tasklet);
-}
-
static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag)
{
return ((kbd->modeflags >> flag) & 1);
@@ -138,7 +127,7 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag)
struct console;
-void compute_shiftstate(void);
+void vt_set_leds_compute_shiftstate(void);
/* defkeymap.c */
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 9d12c970f18f..20d1079e92b4 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -2,8 +2,6 @@
#ifndef __LINUX_KCONFIG_H
#define __LINUX_KCONFIG_H
-/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
-
#include <generated/autoconf.h>
#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -53,7 +51,8 @@
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
- * otherwise.
+ * otherwise. CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1" in
+ * autoconf.h.
*/
#define IS_MODULE(option) __is_defined(option##_MODULE)
@@ -68,7 +67,8 @@
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
- * 0 otherwise.
+ * 0 otherwise. Note that CONFIG_FOO=y results in "#define CONFIG_FOO 1" in
+ * autoconf.h, while CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1".
*/
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index da676cdbd727..86c0f1d18998 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -11,14 +11,11 @@ enum kcore_type {
KCORE_RAM,
KCORE_VMEMMAP,
KCORE_USER,
- KCORE_OTHER,
- KCORE_REMAP,
};
struct kcore_list {
struct list_head list;
unsigned long addr;
- unsigned long vaddr;
size_t size;
int type;
};
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index a10e84707d82..b851ba415e03 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_KCOV_H
#define _LINUX_KCOV_H
+#include <linux/sched.h>
#include <uapi/linux/kcov.h>
struct task_struct;
@@ -52,6 +53,42 @@ static inline void kcov_remote_start_usb(u64 id)
kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
}
+/*
+ * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
+ * work around for kcov's lack of nested remote coverage sections support in
+ * task context. Adding support for nested sections is tracked in:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ */
+
+static inline void kcov_remote_start_usb_softirq(u64 id)
+{
+ if (in_serving_softirq())
+ kcov_remote_start_usb(id);
+}
+
+static inline void kcov_remote_stop_softirq(void)
+{
+ if (in_serving_softirq())
+ kcov_remote_stop();
+}
+
+#ifdef CONFIG_64BIT
+typedef unsigned long kcov_u64;
+#else
+typedef unsigned long long kcov_u64;
+#endif
+
+void __sanitizer_cov_trace_pc(void);
+void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2);
+void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2);
+void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2);
+void __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2);
+void __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2);
+void __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2);
+void __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2);
+void __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2);
+void __sanitizer_cov_trace_switch(kcov_u64 val, void *cases);
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
@@ -66,6 +103,8 @@ static inline u64 kcov_common_handle(void)
}
static inline void kcov_remote_start_common(u64 id) {}
static inline void kcov_remote_start_usb(u64 id) {}
+static inline void kcov_remote_start_usb_softirq(u64 id) {}
+static inline void kcov_remote_stop_softirq(void) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index c5f6c1dcf7e3..92f3843d9ebb 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -1,4 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KCSAN access checks and modifiers. These can be used to explicitly check
+ * uninstrumented accesses, or change KCSAN checking behaviour of accesses.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
#ifndef _LINUX_KCSAN_CHECKS_H
#define _LINUX_KCSAN_CHECKS_H
@@ -7,19 +13,13 @@
#include <linux/compiler_attributes.h>
#include <linux/types.h>
-/*
- * ACCESS TYPE MODIFIERS
- *
- * <none>: normal read access;
- * WRITE : write access;
- * ATOMIC: access is atomic;
- * ASSERT: access is not a regular access, but an assertion;
- * SCOPED: access is a scoped access;
- */
-#define KCSAN_ACCESS_WRITE 0x1
-#define KCSAN_ACCESS_ATOMIC 0x2
-#define KCSAN_ACCESS_ASSERT 0x4
-#define KCSAN_ACCESS_SCOPED 0x8
+/* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
+#define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
+#define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
+#define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
+/* The following are special, and never due to compiler instrumentation. */
+#define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
+#define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
/*
* __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
@@ -36,6 +36,36 @@
*/
void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+/*
+ * See definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ * Note: The mappings are arbitrary, and do not reflect any real mappings of C11
+ * memory orders to the LKMM memory orders and vice-versa!
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb __ATOMIC_SEQ_CST
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb __ATOMIC_ACQ_REL
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb __ATOMIC_ACQUIRE
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_release __ATOMIC_RELEASE
+
+/**
+ * __kcsan_mb - full memory barrier instrumentation
+ */
+void __kcsan_mb(void);
+
+/**
+ * __kcsan_wmb - write memory barrier instrumentation
+ */
+void __kcsan_wmb(void);
+
+/**
+ * __kcsan_rmb - read memory barrier instrumentation
+ */
+void __kcsan_rmb(void);
+
+/**
+ * __kcsan_release - release barrier instrumentation
+ */
+void __kcsan_release(void);
+
/**
* kcsan_disable_current - disable KCSAN for the current context
*
@@ -99,10 +129,21 @@ void kcsan_set_access_mask(unsigned long mask);
/* Scoped access information. */
struct kcsan_scoped_access {
- struct list_head list;
+ union {
+ struct list_head list; /* scoped_accesses list */
+ /*
+ * Not an entry in scoped_accesses list; stack depth from where
+ * the access was initialized.
+ */
+ int stack_depth;
+ };
+
+ /* Access information. */
const volatile void *ptr;
size_t size;
int type;
+ /* Location where scoped access was set up. */
+ unsigned long ip;
};
/*
* Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
@@ -148,6 +189,10 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
int type) { }
+static inline void __kcsan_mb(void) { }
+static inline void __kcsan_wmb(void) { }
+static inline void __kcsan_rmb(void) { }
+static inline void __kcsan_release(void) { }
static inline void kcsan_disable_current(void) { }
static inline void kcsan_enable_current(void) { }
static inline void kcsan_enable_current_nowarn(void) { }
@@ -180,12 +225,47 @@ static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
*/
#define __kcsan_disable_current kcsan_disable_current
#define __kcsan_enable_current kcsan_enable_current_nowarn
-#else
+#else /* __SANITIZE_THREAD__ */
static inline void kcsan_check_access(const volatile void *ptr, size_t size,
int type) { }
static inline void __kcsan_enable_current(void) { }
static inline void __kcsan_disable_current(void) { }
-#endif
+#endif /* __SANITIZE_THREAD__ */
+
+#if defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__SANITIZE_THREAD__)
+/*
+ * Normal barrier instrumentation is not done via explicit calls, but by mapping
+ * to a repurposed __atomic_signal_fence(), which normally does not generate any
+ * real instructions, but is still intercepted by fsanitize=thread. This means,
+ * like any other compile-time instrumentation, barrier instrumentation can be
+ * disabled with the __no_kcsan function attribute.
+ *
+ * Also see definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ *
+ * These are all macros, like <asm/barrier.h>, since some architectures use them
+ * in non-static inline functions.
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE(name) \
+ do { \
+ barrier(); \
+ __atomic_signal_fence(__KCSAN_BARRIER_TO_SIGNAL_FENCE_##name); \
+ barrier(); \
+ } while (0)
+#define kcsan_mb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(mb)
+#define kcsan_wmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(wmb)
+#define kcsan_rmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(rmb)
+#define kcsan_release() __KCSAN_BARRIER_TO_SIGNAL_FENCE(release)
+#elif defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__KCSAN_INSTRUMENT_BARRIERS__)
+#define kcsan_mb __kcsan_mb
+#define kcsan_wmb __kcsan_wmb
+#define kcsan_rmb __kcsan_rmb
+#define kcsan_release __kcsan_release
+#else /* CONFIG_KCSAN_WEAK_MEMORY && ... */
+#define kcsan_mb() do { } while (0)
+#define kcsan_wmb() do { } while (0)
+#define kcsan_rmb() do { } while (0)
+#define kcsan_release() do { } while (0)
+#endif /* CONFIG_KCSAN_WEAK_MEMORY && ... */
/**
* __kcsan_check_read - check regular read access for races
@@ -205,6 +285,15 @@ static inline void __kcsan_disable_current(void) { }
__kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
/**
+ * __kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
+/**
* kcsan_check_read - check regular read access for races
*
* @ptr: address of access
@@ -221,18 +310,30 @@ static inline void __kcsan_disable_current(void) { }
#define kcsan_check_write(ptr, size) \
kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+/**
+ * kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
/*
* Check for atomic accesses: if atomic accesses are not ignored, this simply
* aliases to kcsan_check_access(), otherwise becomes a no-op.
*/
#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
-#define kcsan_check_atomic_read(...) do { } while (0)
-#define kcsan_check_atomic_write(...) do { } while (0)
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#define kcsan_check_atomic_read_write(...) do { } while (0)
#else
#define kcsan_check_atomic_read(ptr, size) \
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
#define kcsan_check_atomic_write(ptr, size) \
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#define kcsan_check_atomic_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
#endif
/**
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
index 53340d8789f9..c07c71f5ba4f 100644
--- a/include/linux/kcsan.h
+++ b/include/linux/kcsan.h
@@ -1,4 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and
+ * data structures to set up runtime. See kcsan-checks.h for explicit checks and
+ * modifiers. For more info please see Documentation/dev-tools/kcsan.rst.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
#ifndef _LINUX_KCSAN_H
#define _LINUX_KCSAN_H
@@ -14,6 +21,7 @@
*/
struct kcsan_ctx {
int disable_count; /* disable counter */
+ int disable_scoped; /* disable scoped access counter */
int atomic_next; /* number of following atomic ops */
/*
@@ -41,8 +49,16 @@ struct kcsan_ctx {
*/
unsigned long access_mask;
- /* List of scoped accesses. */
+ /* List of scoped accesses; likely to be empty. */
struct list_head scoped_accesses;
+
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ /*
+ * Scoped access for modeling access reordering to detect missing memory
+ * barriers; only keep 1 to keep fast-path complexity manageable.
+ */
+ struct kcsan_scoped_access reorder_access;
+#endif
};
/**
diff --git a/include/linux/kd.h b/include/linux/kd.h
deleted file mode 100644
index b130a18f860f..000000000000
--- a/include/linux/kd.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_KD_H
-#define _LINUX_KD_H
-
-#include <uapi/linux/kd.h>
-
-#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */
-#endif /* _LINUX_KD_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 0125a677b67f..f6c2ddb16b95 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,6 +13,8 @@
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
+#include <linux/list.h>
+
/* Shifted versions of the command enable bits are be used if the command
* has no arguments (see kdb_check_flags). This allows commands, such as
* go, to have different permissions depending upon whether it is called
@@ -64,6 +66,17 @@ typedef enum {
typedef int (*kdb_func_t)(int, const char **);
+/* The KDB shell command table */
+typedef struct _kdbtab {
+ char *name; /* Command name */
+ kdb_func_t func; /* Function to execute command */
+ char *usage; /* Usage String for this command */
+ char *help; /* Help message for this command */
+ short minlen; /* Minimum legal # cmd chars required */
+ kdb_cmdflags_t flags; /* Command behaviour flags */
+ struct list_head list_node; /* Command list */
+} kdbtab_t;
+
#ifdef CONFIG_KGDB_KDB
#include <linux/init.h>
#include <linux/sched.h>
@@ -183,6 +196,8 @@ int kdb_process_cpu(const struct task_struct *p)
return cpu;
}
+extern void kdb_send_sig(struct task_struct *p, int sig);
+
#ifdef CONFIG_KALLSYMS
extern const char *kdb_walk_kallsyms(loff_t *pos);
#else /* ! CONFIG_KALLSYMS */
@@ -193,19 +208,13 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
#endif /* ! CONFIG_KALLSYMS */
/* Dynamic kdb shell command registration */
-extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
- short, kdb_cmdflags_t);
-extern int kdb_unregister(char *);
+extern int kdb_register(kdbtab_t *cmd);
+extern void kdb_unregister(kdbtab_t *cmd);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
-static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen) { return 0; }
-static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen,
- kdb_cmdflags_t flags) { return 0; }
-static inline int kdb_unregister(char *cmd) { return 0; }
+static inline int kdb_register(kdbtab_t *cmd) { return 0; }
+static inline void kdb_unregister(kdbtab_t *cmd) {}
#endif /* CONFIG_KGDB_KDB */
enum {
KDB_NOT_INITIALIZED,
@@ -215,5 +224,6 @@ enum {
extern int kdbgetintenv(const char *, int *);
extern int kdb_set(int, const char **);
+int kdb_lsmod(int argc, const char **argv);
#endif /* !_KDB_H */
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
index 85b5151911cf..4856706fbfeb 100644
--- a/include/linux/kdev_t.h
+++ b/include/linux/kdev_t.h
@@ -21,61 +21,61 @@
})
/* acceptable for old filesystems */
-static inline bool old_valid_dev(dev_t dev)
+static __always_inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
-static inline u16 old_encode_dev(dev_t dev)
+static __always_inline u16 old_encode_dev(dev_t dev)
{
return (MAJOR(dev) << 8) | MINOR(dev);
}
-static inline dev_t old_decode_dev(u16 val)
+static __always_inline dev_t old_decode_dev(u16 val)
{
return MKDEV((val >> 8) & 255, val & 255);
}
-static inline u32 new_encode_dev(dev_t dev)
+static __always_inline u32 new_encode_dev(dev_t dev)
{
unsigned major = MAJOR(dev);
unsigned minor = MINOR(dev);
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
-static inline dev_t new_decode_dev(u32 dev)
+static __always_inline dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return MKDEV(major, minor);
}
-static inline u64 huge_encode_dev(dev_t dev)
+static __always_inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
-static inline dev_t huge_decode_dev(u64 dev)
+static __always_inline dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
-static inline int sysv_valid_dev(dev_t dev)
+static __always_inline int sysv_valid_dev(dev_t dev)
{
return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
}
-static inline u32 sysv_encode_dev(dev_t dev)
+static __always_inline u32 sysv_encode_dev(dev_t dev)
{
return MINOR(dev) | (MAJOR(dev) << 18);
}
-static inline unsigned sysv_major(u32 dev)
+static __always_inline unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
-static inline unsigned sysv_minor(u32 dev)
+static __always_inline unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index abd20ef93c98..859f4b0c1b2b 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -17,5 +17,7 @@
#define KPF_ARCH 38
#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
+#define KPF_ARCH_2 41
+#define KPF_ARCH_3 42
#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index c25b8e41c0ea..be2e8c0a187e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -1,50 +1,51 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NOTE:
+ *
+ * This header has combined a lot of unrelated to each other stuff.
+ * The process of splitting its content is in progress while keeping
+ * backward compatibility. That's why it's highly recommended NOT to
+ * include this header inside another header file, especially under
+ * generic or architectural include/ directory.
+ */
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
-
-#include <stdarg.h>
+#include <linux/stdarg.h>
+#include <linux/align.h>
+#include <linux/array_size.h>
#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/bitops.h>
+#include <linux/hex.h>
+#include <linux/kstrtox.h>
#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/typecheck.h>
+#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
+#include <linux/sprintf.h>
+#include <linux/static_call_types.h>
+#include <linux/instruction_pointer.h>
+#include <linux/wordpart.h>
+
#include <asm/byteorder.h>
-#include <asm/div64.h>
+
#include <uapi/linux/kernel.h>
#define STACK_MAGIC 0xdeadbeef
-/**
- * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
- * @x: value to repeat
- *
- * NOTE: @x is not checked for > 0xff; larger values produce odd results.
- */
-#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
-
-/* @a is a power of 2 value */
-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
-#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
-#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
-#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
-
/* generic data direction definitions */
#define READ 0
#define WRITE 1
-/**
- * ARRAY_SIZE - get the number of elements in array @arr
- * @arr: array to be sized
- */
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
#define u64_to_user_ptr(x) ( \
{ \
@@ -53,156 +54,41 @@
} \
)
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-/**
- * round_up - round up to next specified power of 2
- * @x: the value to round
- * @y: multiple to round up to (must be a power of 2)
- *
- * Rounds @x up to next multiple of @y (which must be a power of 2).
- * To perform arbitrary rounding up, use roundup() below.
- */
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-/**
- * round_down - round down to next specified power of 2
- * @x: the value to round
- * @y: multiple to round down to (must be a power of 2)
- *
- * Rounds @x down to next multiple of @y (which must be a power of 2).
- * To perform arbitrary rounding down, use rounddown() below.
- */
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-
-#define typeof_member(T, m) typeof(((T*)0)->m)
-
-#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
-
-#define DIV_ROUND_DOWN_ULL(ll, d) \
- ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
-
-#define DIV_ROUND_UP_ULL(ll, d) \
- DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
-
-#if BITS_PER_LONG == 32
-# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
-#else
-# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
-#endif
-
-/**
- * roundup - round up to the next specified multiple
- * @x: the value to up
- * @y: multiple to round up to
- *
- * Rounds @x up to next multiple of @y. If @y will always be a power
- * of 2, consider using the faster round_up().
- */
-#define roundup(x, y) ( \
-{ \
- typeof(y) __y = y; \
- (((x) + (__y - 1)) / __y) * __y; \
-} \
-)
-/**
- * rounddown - round down to next specified multiple
- * @x: the value to round
- * @y: multiple to round down to
- *
- * Rounds @x down to next multiple of @y. If @y will always be a power
- * of 2, consider using the faster round_down().
- */
-#define rounddown(x, y) ( \
-{ \
- typeof(x) __x = (x); \
- __x - (__x % (y)); \
-} \
-)
+struct completion;
+struct user;
-/*
- * Divide positive or negative dividend by positive or negative divisor
- * and round to closest integer. Result is undefined for negative
- * divisors if the dividend variable type is unsigned and for negative
- * dividends if the divisor variable type is unsigned.
- */
-#define DIV_ROUND_CLOSEST(x, divisor)( \
-{ \
- typeof(x) __x = x; \
- typeof(divisor) __d = divisor; \
- (((typeof(x))-1) > 0 || \
- ((typeof(divisor))-1) > 0 || \
- (((__x) > 0) == ((__d) > 0))) ? \
- (((__x) + ((__d) / 2)) / (__d)) : \
- (((__x) - ((__d) / 2)) / (__d)); \
-} \
-)
-/*
- * Same as above but for u64 dividends. divisor must be a 32-bit
- * number.
- */
-#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
-{ \
- typeof(divisor) __d = divisor; \
- unsigned long long _tmp = (x) + (__d) / 2; \
- do_div(_tmp, __d); \
- _tmp; \
-} \
-)
+#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
-/*
- * Multiplies an integer by a fraction, while avoiding unnecessary
- * overflow or loss of precision.
- */
-#define mult_frac(x, numer, denom)( \
-{ \
- typeof(x) quot = (x) / (denom); \
- typeof(x) rem = (x) % (denom); \
- (quot * (numer)) + ((rem * (numer)) / (denom)); \
-} \
-)
+extern int __cond_resched(void);
+# define might_resched() __cond_resched()
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-#define _RET_IP_ (unsigned long)__builtin_return_address(0)
-#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+extern int __cond_resched(void);
-#define sector_div(a, b) do_div(a, b)
+DECLARE_STATIC_CALL(might_resched, __cond_resched);
-/**
- * upper_32_bits - return bits 32-63 of a number
- * @n: the number we're accessing
- *
- * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
- * the "right shift count >= width of type" warning when that quantity is
- * 32-bits.
- */
-#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+static __always_inline void might_resched(void)
+{
+ static_call_mod(might_resched)();
+}
-/**
- * lower_32_bits - return bits 0-31 of a number
- * @n: the number we're accessing
- */
-#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-struct completion;
-struct pt_regs;
-struct user;
+extern int dynamic_might_resched(void);
+# define might_resched() dynamic_might_resched()
-#ifdef CONFIG_PREEMPT_VOLUNTARY
-extern int _cond_resched(void);
-# define might_resched() _cond_resched()
#else
+
# define might_resched() do { } while (0)
-#endif
+
+#endif /* CONFIG_PREEMPT_* */
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-extern void ___might_sleep(const char *file, int line, int preempt_offset);
-extern void __might_sleep(const char *file, int line, int preempt_offset);
+extern void __might_resched(const char *file, int line, unsigned int offsets);
+extern void __might_sleep(const char *file, int line);
extern void __cant_sleep(const char *file, int line, int preempt_offset);
+extern void __cant_migrate(const char *file, int line);
/**
* might_sleep - annotation for functions that can sleep
@@ -217,7 +103,7 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
/**
* cant_sleep - annotation for functions that cannot sleep
*
@@ -226,6 +112,18 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
# define cant_sleep() \
do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
+
+/**
+ * cant_migrate - annotation for functions that cannot migrate
+ *
+ * Will print a stack trace if executed in code which is migratable
+ */
+# define cant_migrate() \
+ do { \
+ if (IS_ENABLED(CONFIG_SMP)) \
+ __cant_migrate(__FILE__, __LINE__); \
+ } while (0)
+
/**
* non_block_start - annotate the start of section where sleeping is prohibited
*
@@ -244,12 +142,12 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
*/
# define non_block_end() WARN_ON(current->non_block_count-- == 0)
#else
- static inline void ___might_sleep(const char *file, int line,
- int preempt_offset) { }
- static inline void __might_sleep(const char *file, int line,
- int preempt_offset) { }
+ static inline void __might_resched(const char *file, int line,
+ unsigned int offsets) { }
+static inline void __might_sleep(const char *file, int line) { }
# define might_sleep() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
+# define cant_migrate() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
# define non_block_start() do { } while (0)
# define non_block_end() do { } while (0)
@@ -257,55 +155,6 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
-#ifndef CONFIG_PREEMPT_RT
-# define cant_migrate() cant_sleep()
-#else
- /* Placeholder for now */
-# define cant_migrate() do { } while (0)
-#endif
-
-/**
- * abs - return absolute value of an argument
- * @x: the value. If it is unsigned type, it is converted to signed type first.
- * char is treated as if it was signed (regardless of whether it really is)
- * but the macro's return type is preserved as char.
- *
- * Return: an absolute value of x.
- */
-#define abs(x) __abs_choose_expr(x, long long, \
- __abs_choose_expr(x, long, \
- __abs_choose_expr(x, int, \
- __abs_choose_expr(x, short, \
- __abs_choose_expr(x, char, \
- __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), char), \
- (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
- ((void)0)))))))
-
-#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), signed type) || \
- __builtin_types_compatible_p(typeof(x), unsigned type), \
- ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
-
-/**
- * reciprocal_scale - "scale" a value into range [0, ep_ro)
- * @val: value
- * @ep_ro: right open interval endpoint
- *
- * Perform a "reciprocal multiplication" in order to "scale" a value into
- * range [0, @ep_ro), where the upper interval endpoint is right-open.
- * This is useful, e.g. for accessing a index of an array containing
- * @ep_ro elements, for example. Think of it as sort of modulus, only that
- * the result isn't that of modulo. ;) Note that if initial input is a
- * small value, then result will return 0.
- *
- * Return: a result based on @val in interval [0, @ep_ro).
- */
-static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
-{
- return (u32)(((u64) val * ep_ro) >> 32);
-}
-
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
#define might_fault() __might_fault(__FILE__, __LINE__)
@@ -314,257 +163,15 @@ void __might_fault(const char *file, int line);
static inline void might_fault(void) { }
#endif
-extern struct atomic_notifier_head panic_notifier_list;
-extern long (*panic_blink)(int state);
-__printf(1, 2)
-void panic(const char *fmt, ...) __noreturn __cold;
-void nmi_panic(struct pt_regs *regs, const char *msg);
-extern void oops_enter(void);
-extern void oops_exit(void);
-extern bool oops_may_print(void);
void do_exit(long error_code) __noreturn;
-void complete_and_exit(struct completion *, long) __noreturn;
-
-/* Internal, do not use. */
-int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
-int __must_check _kstrtol(const char *s, unsigned int base, long *res);
-
-int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
-
-/**
- * kstrtoul - convert a string to an unsigned long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign, but not a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Preferred over simple_strtoul(). Return code must be checked.
-*/
-static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
- */
- if (sizeof(unsigned long) == sizeof(unsigned long long) &&
- __alignof__(unsigned long) == __alignof__(unsigned long long))
- return kstrtoull(s, base, (unsigned long long *)res);
- else
- return _kstrtoul(s, base, res);
-}
-
-/**
- * kstrtol - convert a string to a long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign or a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Preferred over simple_strtol(). Return code must be checked.
- */
-static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(long, long long) = 0.
- */
- if (sizeof(long) == sizeof(long long) &&
- __alignof__(long) == __alignof__(long long))
- return kstrtoll(s, base, (long long *)res);
- else
- return _kstrtol(s, base, res);
-}
-
-int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
-int __must_check kstrtoint(const char *s, unsigned int base, int *res);
-
-static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
-{
- return kstrtoull(s, base, res);
-}
-
-static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
-{
- return kstrtoll(s, base, res);
-}
-
-static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
-{
- return kstrtouint(s, base, res);
-}
-
-static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
-{
- return kstrtoint(s, base, res);
-}
-
-int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
-int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
-int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
-int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
-int __must_check kstrtobool(const char *s, bool *res);
-
-int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
-int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
-int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
-int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
-int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
-int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
-int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
-int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
-int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
-int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
-
-static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
-{
- return kstrtoull_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
-{
- return kstrtoll_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
-{
- return kstrtouint_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
-{
- return kstrtoint_from_user(s, count, base, res);
-}
-
-/*
- * Use kstrto<foo> instead.
- *
- * NOTE: simple_strto<foo> does not check for the range overflow and,
- * depending on the input, may give interesting results.
- *
- * Use these functions if and only if you cannot use kstrto<foo>, because
- * the conversion ends on the first non-digit character, which may be far
- * beyond the supported range. It might be useful to parse the strings like
- * 10x50 or 12:21 without altering original string or temporary buffer in use.
- * Keep in mind above caveat.
- */
-
-extern unsigned long simple_strtoul(const char *,char **,unsigned int);
-extern long simple_strtol(const char *,char **,unsigned int);
-extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
-extern long long simple_strtoll(const char *,char **,unsigned int);
-
-extern int num_to_str(char *buf, int size,
- unsigned long long num, unsigned int width);
-
-/* lib/printf utilities */
-
-extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
-extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
-extern __printf(3, 4)
-int snprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(3, 4)
-int scnprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(2, 3) __malloc
-char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern __printf(2, 0) __malloc
-char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
-extern __printf(2, 0)
-const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
-
-extern __scanf(2, 3)
-int sscanf(const char *, const char *, ...);
-extern __scanf(2, 0)
-int vsscanf(const char *, const char *, va_list);
-
-extern int get_option(char **str, int *pint);
-extern char *get_options(const char *str, int nints, int *ints);
-extern unsigned long long memparse(const char *ptr, char **retptr);
-extern bool parse_option_str(const char *str, const char *option);
-extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
-extern int init_kernel_text(unsigned long addr);
-extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
-u64 int_pow(u64 base, unsigned int exp);
-unsigned long int_sqrt(unsigned long);
-
-#if BITS_PER_LONG < 64
-u32 int_sqrt64(u64 x);
-#else
-static inline u32 int_sqrt64(u64 x)
-{
- return (u32)int_sqrt(x);
-}
-#endif
-
-#ifdef CONFIG_SMP
-extern unsigned int sysctl_oops_all_cpu_backtrace;
-#else
-#define sysctl_oops_all_cpu_backtrace 0
-#endif /* CONFIG_SMP */
-
extern void bust_spinlocks(int yes);
-extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
-extern int panic_timeout;
-extern unsigned long panic_print;
-extern int panic_on_oops;
-extern int panic_on_unrecovered_nmi;
-extern int panic_on_io_nmi;
-extern int panic_on_warn;
-extern unsigned long panic_on_taint;
-extern bool panic_on_taint_nousertaint;
-extern int sysctl_panic_on_rcu_stall;
-extern int sysctl_panic_on_stackoverflow;
-
-extern bool crash_kexec_post_notifiers;
-/*
- * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
- * holds a CPU number which is executing panic() currently. A value of
- * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
- */
-extern atomic_t panic_cpu;
-#define PANIC_CPU_INVALID -1
-
-/*
- * Only to be used by arch init code. If the user over-wrote the default
- * CONFIG_PANIC_TIMEOUT, honor it.
- */
-static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
-{
- if (panic_timeout == arch_default_timeout)
- panic_timeout = timeout;
-}
-extern const char *print_tainted(void);
-enum lockdep_ok {
- LOCKDEP_STILL_OK,
- LOCKDEP_NOW_UNRELIABLE
-};
-extern void add_taint(unsigned flag, enum lockdep_ok);
-extern int test_taint(unsigned flag);
-extern unsigned long get_taint(void);
extern int root_mountflags;
extern bool early_boot_irqs_disabled;
@@ -576,6 +183,7 @@ extern bool early_boot_irqs_disabled;
extern enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
+ SYSTEM_FREEING_INITMEM,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
@@ -583,71 +191,13 @@ extern enum system_states {
SYSTEM_SUSPEND,
} system_state;
-/* This cannot be an enum because some may be used in assembly source. */
-#define TAINT_PROPRIETARY_MODULE 0
-#define TAINT_FORCED_MODULE 1
-#define TAINT_CPU_OUT_OF_SPEC 2
-#define TAINT_FORCED_RMMOD 3
-#define TAINT_MACHINE_CHECK 4
-#define TAINT_BAD_PAGE 5
-#define TAINT_USER 6
-#define TAINT_DIE 7
-#define TAINT_OVERRIDDEN_ACPI_TABLE 8
-#define TAINT_WARN 9
-#define TAINT_CRAP 10
-#define TAINT_FIRMWARE_WORKAROUND 11
-#define TAINT_OOT_MODULE 12
-#define TAINT_UNSIGNED_MODULE 13
-#define TAINT_SOFTLOCKUP 14
-#define TAINT_LIVEPATCH 15
-#define TAINT_AUX 16
-#define TAINT_RANDSTRUCT 17
-#define TAINT_FLAGS_COUNT 18
-#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
-
-struct taint_flag {
- char c_true; /* character printed when tainted */
- char c_false; /* character printed when not tainted */
- bool module; /* also show as a per-module taint flag */
-};
-
-extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
-
-extern const char hex_asc[];
-#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
-#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
-
-static inline char *hex_byte_pack(char *buf, u8 byte)
-{
- *buf++ = hex_asc_hi(byte);
- *buf++ = hex_asc_lo(byte);
- return buf;
-}
-
-extern const char hex_asc_upper[];
-#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
-#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
-
-static inline char *hex_byte_pack_upper(char *buf, u8 byte)
-{
- *buf++ = hex_asc_upper_hi(byte);
- *buf++ = hex_asc_upper_lo(byte);
- return buf;
-}
-
-extern int hex_to_bin(char ch);
-extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
-extern char *bin2hex(char *dst, const void *src, size_t count);
-
-bool mac_pton(const char *s, u8 *mac);
-
/*
* General tracing related utility functions - trace_printk(),
* tracing_on/tracing_off and tracing_start()/tracing_stop
*
* Use tracing_on/tracing_off when you want to quickly turn on or off
* tracing. It simply enables or disables the recording of the trace events.
- * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
+ * This also corresponds to the user space /sys/kernel/tracing/tracing_on
* file, which gives a means for the kernel and userspace to interact.
* Place a tracing_off() in the kernel where you want tracing to end.
* From user space, examine the trace, and then echo 1 > tracing_on
@@ -665,6 +215,7 @@ enum ftrace_dump_mode {
DUMP_NONE,
DUMP_ALL,
DUMP_ORIG,
+ DUMP_PARAM,
};
#ifdef CONFIG_TRACING
@@ -729,7 +280,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__trace_printk_check_format(fmt, ##args); \
@@ -773,7 +324,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
#define trace_puts(str) ({ \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(str) ? str : NULL; \
\
if (__builtin_constant_p(str)) \
@@ -795,7 +346,7 @@ extern void trace_dump_stack(int skip);
do { \
if (__builtin_constant_p(fmt)) { \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
@@ -834,192 +385,6 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/*
- * min()/max()/clamp() macros must accomplish three things:
- *
- * - avoid multiple evaluations of the arguments (so side-effects like
- * "x++" happen only once) when non-constant.
- * - perform strict type-checking (to generate warnings instead of
- * nasty runtime surprises). See the "unnecessary" pointer comparison
- * in __typecheck().
- * - retain result as a constant expressions when called with only
- * constant expressions (to avoid tripping VLA warnings in stack
- * allocation usage).
- */
-#define __typecheck(x, y) \
- (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
-
-/*
- * This returns a constant expression while determining if an argument is
- * a constant expression, most importantly without evaluating the argument.
- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
- */
-#define __is_constexpr(x) \
- (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
-
-#define __no_side_effects(x, y) \
- (__is_constexpr(x) && __is_constexpr(y))
-
-#define __safe_cmp(x, y) \
- (__typecheck(x, y) && __no_side_effects(x, y))
-
-#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
-
-#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
- typeof(x) unique_x = (x); \
- typeof(y) unique_y = (y); \
- __cmp(unique_x, unique_y, op); })
-
-#define __careful_cmp(x, y, op) \
- __builtin_choose_expr(__safe_cmp(x, y), \
- __cmp(x, y, op), \
- __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
-
-/**
- * min - return minimum of two values of the same or compatible types
- * @x: first value
- * @y: second value
- */
-#define min(x, y) __careful_cmp(x, y, <)
-
-/**
- * max - return maximum of two values of the same or compatible types
- * @x: first value
- * @y: second value
- */
-#define max(x, y) __careful_cmp(x, y, >)
-
-/**
- * min3 - return minimum of three values
- * @x: first value
- * @y: second value
- * @z: third value
- */
-#define min3(x, y, z) min((typeof(x))min(x, y), z)
-
-/**
- * max3 - return maximum of three values
- * @x: first value
- * @y: second value
- * @z: third value
- */
-#define max3(x, y, z) max((typeof(x))max(x, y), z)
-
-/**
- * min_not_zero - return the minimum that is _not_ zero, unless both are zero
- * @x: value1
- * @y: value2
- */
-#define min_not_zero(x, y) ({ \
- typeof(x) __x = (x); \
- typeof(y) __y = (y); \
- __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
-
-/**
- * clamp - return a value clamped to a given range with strict typechecking
- * @val: current value
- * @lo: lowest allowable value
- * @hi: highest allowable value
- *
- * This macro does strict typechecking of @lo/@hi to make sure they are of the
- * same type as @val. See the unnecessary pointer comparisons.
- */
-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
-
-/*
- * ..and if you can't take the strict
- * types, you can specify one yourself.
- *
- * Or not use min/max/clamp at all, of course.
- */
-
-/**
- * min_t - return minimum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
- */
-#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
-
-/**
- * max_t - return maximum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
- */
-#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
-
-/**
- * clamp_t - return a value clamped to a given range using a given type
- * @type: the type of variable to use
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of type
- * @type to make all the comparisons.
- */
-#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
-
-/**
- * clamp_val - return a value clamped to a given range using val's type
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of whatever
- * type the input argument @val is. This is useful when @val is an unsigned
- * type and @lo and @hi are literals that will otherwise be assigned a signed
- * integer type.
- */
-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
-
-
-/**
- * swap - swap values of @a and @b
- * @a: first value
- * @b: second value
- */
-#define swap(a, b) \
- do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
-
-/* This counts to 12. Any more, it will return 13th argument. */
-#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
-#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-
-#define __CONCAT(a, b) a ## b
-#define CONCATENATE(a, b) __CONCAT(a, b)
-
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type *)(__mptr - offsetof(type, member))); })
-
-/**
- * container_of_safe - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged.
- */
-#define container_of_safe(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
- ((type *)(__mptr - offsetof(type, member))); })
-
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h
new file mode 100644
index 000000000000..90451e2e12bd
--- /dev/null
+++ b/include/linux/kernel_read_file.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KERNEL_READ_FILE_H
+#define _LINUX_KERNEL_READ_FILE_H
+
+#include <linux/file.h>
+#include <linux/types.h>
+
+/* This is a list of *what* is being read, not *how* nor *where*. */
+#define __kernel_read_file_id(id) \
+ id(UNKNOWN, unknown) \
+ id(FIRMWARE, firmware) \
+ id(MODULE, kernel-module) \
+ id(KEXEC_IMAGE, kexec-image) \
+ id(KEXEC_INITRAMFS, kexec-initramfs) \
+ id(POLICY, security-policy) \
+ id(X509_CERTIFICATE, x509-certificate) \
+ id(MAX_ID, )
+
+#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
+#define __fid_stringify(dummy, str) #str,
+
+enum kernel_read_file_id {
+ __kernel_read_file_id(__fid_enumify)
+};
+
+static const char * const kernel_read_file_str[] = {
+ __kernel_read_file_id(__fid_stringify)
+};
+
+static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
+{
+ if ((unsigned int)id >= READING_MAX_ID)
+ return kernel_read_file_str[READING_UNKNOWN];
+
+ return kernel_read_file_str[id];
+}
+
+ssize_t kernel_read_file(struct file *file, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_fd(int fd, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+
+#endif /* _LINUX_KERNEL_READ_FILE_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 89f0745c096d..9935f7ecbfb9 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,6 +28,9 @@ enum cpu_usage_stat {
CPUTIME_STEAL,
CPUTIME_GUEST,
CPUTIME_GUEST_NICE,
+#ifdef CONFIG_SCHED_CORE
+ CPUTIME_FORCEIDLE,
+#endif
NR_STATS,
};
@@ -49,6 +52,7 @@ DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
+extern unsigned long long nr_context_switches_cpu(int cpu);
extern unsigned long long nr_context_switches(void);
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
@@ -64,16 +68,26 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
return kstat_cpu(cpu).softirqs[irq];
}
+static inline unsigned int kstat_cpu_softirqs_sum(int cpu)
+{
+ int i;
+ unsigned int sum = 0;
+
+ for (i = 0; i < NR_SOFTIRQS; i++)
+ sum += kstat_softirqs_cpu(i, cpu);
+
+ return sum;
+}
+
/*
* Number of interrupts per specific IRQ source, since bootup
*/
-extern unsigned int kstat_irqs(unsigned int irq);
extern unsigned int kstat_irqs_usr(unsigned int irq);
/*
* Number of interrupts per cpu, since bootup
*/
-static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
{
return kstat_cpu(cpu).irqs_sum;
}
@@ -103,6 +117,7 @@ extern void account_system_index_time(struct task_struct *, u64,
enum cpu_usage_stat);
extern void account_steal_time(u64);
extern void account_idle_time(u64);
+extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static inline void account_process_tick(struct task_struct *tsk, int user)
@@ -115,4 +130,8 @@ extern void account_process_tick(struct task_struct *, int user);
extern void account_idle_ticks(unsigned long ticks);
+#ifdef CONFIG_SCHED_CORE
+extern void __account_forceidle_time(struct task_struct *tsk, u64 delta);
+#endif
+
#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 89f6a4214a70..87c79d076d6d 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -6,7 +6,6 @@
#ifndef __LINUX_KERNFS_H
#define __LINUX_KERNFS_H
-#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -14,14 +13,19 @@
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/types.h>
#include <linux/uidgid.h>
#include <linux/wait.h>
+#include <linux/rwsem.h>
+#include <linux/cache.h>
struct file;
struct dentry;
struct iattr;
struct seq_file;
struct vm_area_struct;
+struct vm_operations_struct;
struct super_block;
struct file_system_type;
struct poll_table_struct;
@@ -31,6 +35,62 @@ struct kernfs_fs_context;
struct kernfs_open_node;
struct kernfs_iattrs;
+/*
+ * NR_KERNFS_LOCK_BITS determines size (NR_KERNFS_LOCKS) of hash
+ * table of locks.
+ * Having a small hash table would impact scalability, since
+ * more and more kernfs_node objects will end up using same lock
+ * and having a very large hash table would waste memory.
+ *
+ * At the moment size of hash table of locks is being set based on
+ * the number of CPUs as follows:
+ *
+ * NR_CPU NR_KERNFS_LOCK_BITS NR_KERNFS_LOCKS
+ * 1 1 2
+ * 2-3 2 4
+ * 4-7 4 16
+ * 8-15 6 64
+ * 16-31 8 256
+ * 32 and more 10 1024
+ *
+ * The above relation between NR_CPU and number of locks is based
+ * on some internal experimentation which involved booting qemu
+ * with different values of smp, performing some sysfs operations
+ * on all CPUs and observing how increase in number of locks impacts
+ * completion time of these sysfs operations on each CPU.
+ */
+#ifdef CONFIG_SMP
+#define NR_KERNFS_LOCK_BITS (2 * (ilog2(NR_CPUS < 32 ? NR_CPUS : 32)))
+#else
+#define NR_KERNFS_LOCK_BITS 1
+#endif
+
+#define NR_KERNFS_LOCKS (1 << NR_KERNFS_LOCK_BITS)
+
+/*
+ * There's one kernfs_open_file for each open file and one kernfs_open_node
+ * for each kernfs_node with one or more open files.
+ *
+ * filp->private_data points to seq_file whose ->private points to
+ * kernfs_open_file.
+ *
+ * kernfs_open_files are chained at kernfs_open_node->files, which is
+ * protected by kernfs_global_locks.open_file_mutex[i].
+ *
+ * To reduce possible contention in sysfs access, arising due to single
+ * locks, use an array of locks (e.g. open_file_mutex) and use kernfs_node
+ * object address as hash keys to get the index of these locks.
+ *
+ * Hashed mutexes are safe to use here because operations using these don't
+ * rely on global exclusion.
+ *
+ * In future we intend to replace other global locks with hashed ones as well.
+ * kernfs_global_locks acts as a holder for all such hash tables.
+ */
+struct kernfs_global_locks {
+ struct mutex open_file_mutex[NR_KERNFS_LOCKS];
+};
+
enum kernfs_node_type {
KERNFS_DIR = 0x0001,
KERNFS_FILE = 0x0002,
@@ -48,10 +108,12 @@ enum kernfs_node_flag {
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
+ KERNFS_HIDDEN = 0x0200,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
KERNFS_HAS_RELEASE = 0x2000,
+ KERNFS_REMOVING = 0x4000,
};
/* @flags for kernfs_create_root() */
@@ -98,6 +160,11 @@ struct kernfs_elem_dir {
* better directly in kernfs_node but is here to save space.
*/
struct kernfs_root *root;
+ /*
+ * Monotonic revision counter, used to identify if a directory
+ * node has changed during negative dentry revalidation.
+ */
+ unsigned long rev;
};
struct kernfs_elem_symlink {
@@ -106,7 +173,7 @@ struct kernfs_elem_symlink {
struct kernfs_elem_attr {
const struct kernfs_ops *ops;
- struct kernfs_open_node *open;
+ struct kernfs_open_node __rcu *open;
loff_t size;
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
@@ -116,7 +183,7 @@ struct kernfs_elem_attr {
* kernfs node is represented by single kernfs_node. Most fields are
* private to kernfs and shouldn't be accessed directly by kernfs users.
*
- * As long as s_count reference is held, the kernfs_node itself is
+ * As long as count reference is held, the kernfs_node itself is
* accessible. Dereferencing elem or any other outer entity requires
* active reference.
*/
@@ -139,23 +206,25 @@ struct kernfs_node {
const void *ns; /* namespace tag */
unsigned int hash; /* ns + name hash */
+ unsigned short flags;
+ umode_t mode;
+
union {
struct kernfs_elem_dir dir;
struct kernfs_elem_symlink symlink;
struct kernfs_elem_attr attr;
};
- void *priv;
-
/*
* 64bit unique ID. On 64bit ino setups, id is the ino. On 32bit,
* the low 32bits are ino and upper generation.
*/
u64 id;
- unsigned short flags;
- umode_t mode;
+ void *priv;
struct kernfs_iattrs *iattr;
+
+ struct rcu_head rcu;
};
/*
@@ -177,22 +246,7 @@ struct kernfs_syscall_ops {
struct kernfs_root *root);
};
-struct kernfs_root {
- /* published fields */
- struct kernfs_node *kn;
- unsigned int flags; /* KERNFS_ROOT_* flags */
-
- /* private fields, do not use outside kernfs proper */
- struct idr ino_idr;
- u32 last_id_lowbits;
- u32 id_highbits;
- struct kernfs_syscall_ops *syscall_ops;
-
- /* list of kernfs_super_info of this root, protected by kernfs_mutex */
- struct list_head supers;
-
- wait_queue_head_t deactivate_waitq;
-};
+struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root);
struct kernfs_open_file {
/* published fields */
@@ -264,10 +318,7 @@ struct kernfs_ops {
struct poll_table_struct *pt);
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lockdep_key;
-#endif
+ loff_t (*llseek)(struct kernfs_open_file *of, loff_t offset, int whence);
};
/*
@@ -383,6 +434,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
struct kernfs_node *target);
void kernfs_activate(struct kernfs_node *kn);
+void kernfs_show(struct kernfs_node *kn, bool show);
void kernfs_remove(struct kernfs_node *kn);
void kernfs_break_active_protection(struct kernfs_node *kn);
void kernfs_unbreak_active_protection(struct kernfs_node *kn);
@@ -501,6 +553,10 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
const struct iattr *iattr)
{ return -ENOSYS; }
+static inline __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt)
+{ return -ENOSYS; }
+
static inline void kernfs_notify(struct kernfs_node *kn) { }
static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
@@ -563,30 +619,6 @@ kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
priv, NULL);
}
-static inline struct kernfs_node *
-kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, kuid_t uid, kgid_t gid,
- loff_t size, const struct kernfs_ops *ops,
- void *priv, const void *ns)
-{
- struct lock_class_key *key = NULL;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- key = (struct lock_class_key *)&ops->lockdep_key;
-#endif
- return __kernfs_create_file(parent, name, mode, uid, gid,
- size, ops, priv, ns, key);
-}
-
-static inline struct kernfs_node *
-kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
- loff_t size, const struct kernfs_ops *ops, void *priv)
-{
- return kernfs_create_file_ns(parent, name, mode,
- GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
- size, ops, priv, NULL);
-}
-
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
const char *name)
{
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 9e93bef52968..060835bb82d5 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -15,17 +15,24 @@
#if !defined(__ASSEMBLY__)
-#include <linux/crash_core.h>
+#include <linux/vmcore_info.h>
+#include <linux/crash_reserve.h>
#include <asm/io.h>
+#include <linux/range.h>
#include <uapi/linux/kexec.h>
+#include <linux/verification.h>
+
+extern note_buf_t __percpu *crash_notes;
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
#include <linux/compat.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/highmem.h>
#include <asm/kexec.h>
+#include <linux/crash_core.h>
/* Verify architecture specific macros are defined */
@@ -183,46 +190,82 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
-/* Architectures may override the below functions */
-int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len);
-void *arch_kexec_kernel_image_load(struct kimage *image);
-int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
-int arch_kexec_apply_relocations(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
-int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#ifndef arch_kexec_kernel_image_probe
+static inline int
+arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len)
+{
+ return kexec_image_probe_default(image, buf, buf_len);
+}
+#endif
+
+#ifndef arch_kimage_file_post_load_cleanup
+static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ return kexec_image_post_load_cleanup_default(image);
+}
+#endif
+
#ifdef CONFIG_KEXEC_SIG
-int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
- unsigned long buf_len);
+#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
+int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
+#endif
#endif
-int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
-/* Alignment required for elf header segment */
-#define ELF_CORE_HEADER_ALIGN 4096
-
-struct crash_mem_range {
- u64 start, end;
-};
+#ifndef arch_kexec_locate_mem_hole
+/**
+ * arch_kexec_locate_mem_hole - Find free memory to place the segments.
+ * @kbuf: Parameters for the memory search.
+ *
+ * On success, kbuf->mem will have the start address of the memory region found.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
+{
+ return kexec_locate_mem_hole(kbuf);
+}
+#endif
-struct crash_mem {
- unsigned int max_nr_ranges;
- unsigned int nr_ranges;
- struct crash_mem_range ranges[];
-};
+#ifndef arch_kexec_apply_relocations_add
+/*
+ * arch_kexec_apply_relocations_add - apply relocations of type RELA
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELAs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("RELA relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
-extern int crash_exclude_mem_range(struct crash_mem *mem,
- unsigned long long mstart,
- unsigned long long mend);
-extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
- void **addr, unsigned long *sz);
+#ifndef arch_kexec_apply_relocations
+/*
+ * arch_kexec_apply_relocations - apply relocations of type REL
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("REL relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
#endif /* CONFIG_KEXEC_FILE */
#ifdef CONFIG_KEXEC_ELF
@@ -275,6 +318,10 @@ struct kimage {
unsigned int preserve_context : 1;
/* If set, we are using file mode kexec syscall */
unsigned int file_mode:1;
+#ifdef CONFIG_CRASH_HOTPLUG
+ /* If set, allow changes to elfcorehdr of kexec_load'd image */
+ unsigned int update_elfcorehdr:1;
+#endif
#ifdef ARCH_HAS_KIMAGE_ARCH
struct kimage_arch arch;
@@ -300,6 +347,25 @@ struct kimage {
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
#endif
+
+#ifdef CONFIG_CRASH_HOTPLUG
+ int hp_action;
+ int elfcorehdr_index;
+ bool elfcorehdr_updated;
+#endif
+
+#ifdef CONFIG_IMA_KEXEC
+ /* Virtual address of IMA measurement buffer for kexec syscall */
+ void *ima_buffer;
+
+ phys_addr_t ima_buffer_addr;
+ size_t ima_buffer_size;
+#endif
+
+ /* Core ELF header buffer */
+ void *elf_headers;
+ unsigned long elf_headers_sz;
+ unsigned long elf_load_addr;
};
/* kexec interface functions */
@@ -309,16 +375,15 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
-extern void __crash_kexec(struct pt_regs *);
-extern void crash_kexec(struct pt_regs *);
-int kexec_should_crash(struct task_struct *);
-int kexec_crash_loaded(void);
-void crash_save_cpu(struct pt_regs *regs, int cpu);
-extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
+
+#ifndef machine_kexec_post_load
+static inline int machine_kexec_post_load(struct kimage *image) { return 0; }
+#endif
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
-extern int kexec_load_disabled;
+
+bool kexec_load_permitted(int kexec_image_type);
#ifndef kexec_flush_icache_page
#define kexec_flush_icache_page(page)
@@ -326,31 +391,18 @@ extern int kexec_load_disabled;
/* List of defined/legal kexec flags */
#ifndef CONFIG_KEXEC_JUMP
-#define KEXEC_FLAGS KEXEC_ON_CRASH
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR)
#else
-#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR)
#endif
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
- KEXEC_FILE_NO_INITRAMFS)
-
-/* Location of a reserved region to hold the crash kernel.
- */
-extern struct resource crashk_res;
-extern struct resource crashk_low_res;
-extern note_buf_t __percpu *crash_notes;
+ KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG)
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
-int crash_shrink_memory(unsigned long new_size);
-size_t crash_get_memory_size(void);
-void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-
-void arch_kexec_protect_crashkres(void);
-void arch_kexec_unprotect_crashkres(void);
-
#ifndef page_to_boot_pfn
static inline unsigned long page_to_boot_pfn(struct page *page)
{
@@ -379,6 +431,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
}
#endif
+#ifndef crash_free_reserved_phys_range
+static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE)
+ free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
+}
+#endif
+
static inline unsigned long virt_to_boot_phys(void *addr)
{
return phys_to_boot_phys(__pa((unsigned long)addr));
@@ -397,6 +459,13 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
#endif
+extern bool kexec_file_dbg_print;
+
+#define kexec_dprintk(fmt, ...) \
+ printk("%s" fmt, \
+ kexec_file_dbg_print ? KERN_INFO : KERN_DEBUG, \
+ ##__VA_ARGS__)
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
@@ -407,6 +476,12 @@ static inline int kexec_crash_loaded(void) { return 0; }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
+#ifdef CONFIG_KEXEC_SIG
+void set_kexec_sig_enforced(void);
+#else
+static inline void set_kexec_sig_enforced(void) {}
+#endif
+
#endif /* !defined(__ASSEBMLY__) */
#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 2ab2d6d6aeab..5caf3ce82373 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -29,6 +29,7 @@ struct kernel_pkey_params;
* clear the contents.
*/
struct key_preparsed_payload {
+ const char *orig_description; /* Actual or proposed description (maybe NULL) */
char *description; /* Proposed key description (or NULL) */
union key_payload payload; /* Proposed payload */
const void *data; /* Raw data */
@@ -72,6 +73,7 @@ struct key_type {
unsigned int flags;
#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
+#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */
/* vet a description */
int (*vet_description)(const char *description);
diff --git a/include/linux/key.h b/include/linux/key.h
index 0f2e24f13c2b..943a432da3ae 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -88,6 +88,12 @@ enum key_need_perm {
KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */
};
+enum key_lookup_flag {
+ KEY_LOOKUP_CREATE = 0x01,
+ KEY_LOOKUP_PARTIAL = 0x02,
+ KEY_LOOKUP_ALL = (KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL),
+};
+
struct seq_file;
struct user_struct;
struct signal_struct;
@@ -289,6 +295,7 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
+#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
@@ -360,7 +367,7 @@ static inline struct key *request_key(struct key_type *type,
* completion of keys undergoing construction with a non-interruptible wait.
*/
#define request_key_net(type, description, net, callout_info) \
- request_key_tag(type, description, net->key_domain, callout_info);
+ request_key_tag(type, description, net->key_domain, callout_info)
/**
* request_key_net_rcu - Request a key for a net namespace under RCU conditions
@@ -372,13 +379,21 @@ static inline struct key *request_key(struct key_type *type,
* network namespace are used.
*/
#define request_key_net_rcu(type, description, net) \
- request_key_rcu(type, description, net->key_domain);
+ request_key_rcu(type, description, net->key_domain)
#endif /* CONFIG_NET */
extern int wait_for_key_construction(struct key *key, bool intr);
extern int key_validate(const struct key *key);
+extern key_ref_t key_create(key_ref_t keyring,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ key_perm_t perm,
+ unsigned long flags);
+
extern key_ref_t key_create_or_update(key_ref_t keyring,
const char *type,
const char *description,
@@ -475,9 +490,6 @@ do { \
rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \
} while (0)
-#ifdef CONFIG_SYSCTL
-extern struct ctl_table key_sysctls[];
-#endif
/*
* the userspace interface
*/
@@ -503,6 +515,7 @@ extern void key_init(void);
#define key_init() do { } while(0)
#define key_free_user_ns(ns) do { } while(0)
#define key_remove_domain(d) do { } while(0)
+#define key_lookup(k) NULL
#endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */
diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h
deleted file mode 100644
index 18f3f5346843..000000000000
--- a/include/linux/keyslot-manager.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2019 Google LLC
- */
-
-#ifndef __LINUX_KEYSLOT_MANAGER_H
-#define __LINUX_KEYSLOT_MANAGER_H
-
-#include <linux/bio.h>
-#include <linux/blk-crypto.h>
-
-struct blk_keyslot_manager;
-
-/**
- * struct blk_ksm_ll_ops - functions to manage keyslots in hardware
- * @keyslot_program: Program the specified key into the specified slot in the
- * inline encryption hardware.
- * @keyslot_evict: Evict key from the specified keyslot in the hardware.
- * The key is provided so that e.g. dm layers can evict
- * keys from the devices that they map over.
- * Returns 0 on success, -errno otherwise.
- *
- * This structure should be provided by storage device drivers when they set up
- * a keyslot manager - this structure holds the function ptrs that the keyslot
- * manager will use to manipulate keyslots in the hardware.
- */
-struct blk_ksm_ll_ops {
- int (*keyslot_program)(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key,
- unsigned int slot);
- int (*keyslot_evict)(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key,
- unsigned int slot);
-};
-
-struct blk_keyslot_manager {
- /*
- * The struct blk_ksm_ll_ops that this keyslot manager will use
- * to perform operations like programming and evicting keys on the
- * device
- */
- struct blk_ksm_ll_ops ksm_ll_ops;
-
- /*
- * The maximum number of bytes supported for specifying the data unit
- * number.
- */
- unsigned int max_dun_bytes_supported;
-
- /*
- * Array of size BLK_ENCRYPTION_MODE_MAX of bitmasks that represents
- * whether a crypto mode and data unit size are supported. The i'th
- * bit of crypto_mode_supported[crypto_mode] is set iff a data unit
- * size of (1 << i) is supported. We only support data unit sizes
- * that are powers of 2.
- */
- unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX];
-
- /* Device for runtime power management (NULL if none) */
- struct device *dev;
-
- /* Here onwards are *private* fields for internal keyslot manager use */
-
- unsigned int num_slots;
-
- /* Protects programming and evicting keys from the device */
- struct rw_semaphore lock;
-
- /* List of idle slots, with least recently used slot at front */
- wait_queue_head_t idle_slots_wait_queue;
- struct list_head idle_slots;
- spinlock_t idle_slots_lock;
-
- /*
- * Hash table which maps struct *blk_crypto_key to keyslots, so that we
- * can find a key's keyslot in O(1) time rather than O(num_slots).
- * Protected by 'lock'.
- */
- struct hlist_head *slot_hashtable;
- unsigned int log_slot_ht_size;
-
- /* Per-keyslot data */
- struct blk_ksm_keyslot *slots;
-};
-
-int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots);
-
-blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key,
- struct blk_ksm_keyslot **slot_ptr);
-
-unsigned int blk_ksm_get_slot_idx(struct blk_ksm_keyslot *slot);
-
-void blk_ksm_put_slot(struct blk_ksm_keyslot *slot);
-
-bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_config *cfg);
-
-int blk_ksm_evict_key(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key);
-
-void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm);
-
-void blk_ksm_destroy(struct blk_keyslot_manager *ksm);
-
-#endif /* __LINUX_KEYSLOT_MANAGER_H */
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
new file mode 100644
index 000000000000..88100cc9caba
--- /dev/null
+++ b/include/linux/kfence.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
+ * handler integration. For more info see Documentation/dev-tools/kfence.rst.
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef _LINUX_KFENCE_H
+#define _LINUX_KFENCE_H
+
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KFENCE
+
+#include <linux/atomic.h>
+#include <linux/static_key.h>
+
+extern unsigned long kfence_sample_interval;
+
+/*
+ * We allocate an even number of pages, as it simplifies calculations to map
+ * address to metadata indices; effectively, the very first page serves as an
+ * extended guard page, but otherwise has no special purpose.
+ */
+#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
+extern char *__kfence_pool;
+
+DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
+extern atomic_t kfence_allocation_gate;
+
+/**
+ * is_kfence_address() - check if an address belongs to KFENCE pool
+ * @addr: address to check
+ *
+ * Return: true or false depending on whether the address is within the KFENCE
+ * object range.
+ *
+ * KFENCE objects live in a separate page range and are not to be intermixed
+ * with regular heap objects (e.g. KFENCE objects must never be added to the
+ * allocator freelists). Failing to do so may and will result in heap
+ * corruptions, therefore is_kfence_address() must be used to check whether
+ * an object requires specific handling.
+ *
+ * Note: This function may be used in fast-paths, and is performance critical.
+ * Future changes should take this into account; for instance, we want to avoid
+ * introducing another load and therefore need to keep KFENCE_POOL_SIZE a
+ * constant (until immediate patching support is added to the kernel).
+ */
+static __always_inline bool is_kfence_address(const void *addr)
+{
+ /*
+ * The __kfence_pool != NULL check is required to deal with the case
+ * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
+ * the slow-path after the range-check!
+ */
+ return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
+}
+
+/**
+ * kfence_alloc_pool_and_metadata() - allocate the KFENCE pool and KFENCE
+ * metadata via memblock
+ */
+void __init kfence_alloc_pool_and_metadata(void);
+
+/**
+ * kfence_init() - perform KFENCE initialization at boot time
+ *
+ * Requires that kfence_alloc_pool_and_metadata() was called before. This sets
+ * up the allocation gate timer, and requires that workqueues are available.
+ */
+void __init kfence_init(void);
+
+/**
+ * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
+ * @s: cache being shut down
+ *
+ * Before shutting down a cache, one must ensure there are no remaining objects
+ * allocated from it. Because KFENCE objects are not referenced from the cache
+ * directly, we need to check them here.
+ *
+ * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
+ * not return if allocated objects still exist: it prints an error message and
+ * simply aborts destruction of a cache, leaking memory.
+ *
+ * If the only such objects are KFENCE objects, we will not leak the entire
+ * cache, but instead try to provide more useful debug info by making allocated
+ * objects "zombie allocations". Objects may then still be used or freed (which
+ * is handled gracefully), but usage will result in showing KFENCE error reports
+ * which include stack traces to the user of the object, the original allocation
+ * site, and caller to shutdown_cache().
+ */
+void kfence_shutdown_cache(struct kmem_cache *s);
+
+/*
+ * Allocate a KFENCE object. Allocators must not call this function directly,
+ * use kfence_alloc() instead.
+ */
+void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
+
+/**
+ * kfence_alloc() - allocate a KFENCE object with a low probability
+ * @s: struct kmem_cache with object requirements
+ * @size: exact size of the object to allocate (can be less than @s->size
+ * e.g. for kmalloc caches)
+ * @flags: GFP flags
+ *
+ * Return:
+ * * NULL - must proceed with allocating as usual,
+ * * non-NULL - pointer to a KFENCE object.
+ *
+ * kfence_alloc() should be inserted into the heap allocation fast path,
+ * allowing it to transparently return KFENCE-allocated objects with a low
+ * probability using a static branch (the probability is controlled by the
+ * kfence.sample_interval boot parameter).
+ */
+static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
+{
+#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
+ if (!static_branch_unlikely(&kfence_allocation_key))
+ return NULL;
+#else
+ if (!static_branch_likely(&kfence_allocation_key))
+ return NULL;
+#endif
+ if (likely(atomic_read(&kfence_allocation_gate)))
+ return NULL;
+ return __kfence_alloc(s, size, flags);
+}
+
+/**
+ * kfence_ksize() - get actual amount of memory allocated for a KFENCE object
+ * @addr: pointer to a heap object
+ *
+ * Return:
+ * * 0 - not a KFENCE object, must call __ksize() instead,
+ * * non-0 - this many bytes can be accessed without causing a memory error.
+ *
+ * kfence_ksize() returns the number of bytes requested for a KFENCE object at
+ * allocation time. This number may be less than the object size of the
+ * corresponding struct kmem_cache.
+ */
+size_t kfence_ksize(const void *addr);
+
+/**
+ * kfence_object_start() - find the beginning of a KFENCE object
+ * @addr: address within a KFENCE-allocated object
+ *
+ * Return: address of the beginning of the object.
+ *
+ * SL[AU]B-allocated objects are laid out within a page one by one, so it is
+ * easy to calculate the beginning of an object given a pointer inside it and
+ * the object size. The same is not true for KFENCE, which places a single
+ * object at either end of the page. This helper function is used to find the
+ * beginning of a KFENCE-allocated object.
+ */
+void *kfence_object_start(const void *addr);
+
+/**
+ * __kfence_free() - release a KFENCE heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Requires: is_kfence_address(addr)
+ *
+ * Release a KFENCE object and mark it as freed.
+ */
+void __kfence_free(void *addr);
+
+/**
+ * kfence_free() - try to release an arbitrary heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Return:
+ * * false - object doesn't belong to KFENCE pool and was ignored,
+ * * true - object was released to KFENCE pool.
+ *
+ * Release a KFENCE object and mark it as freed. May be called on any object,
+ * even non-KFENCE objects, to simplify integration of the hooks into the
+ * allocator's free codepath. The allocator must check the return value to
+ * determine if it was a KFENCE object or not.
+ */
+static __always_inline __must_check bool kfence_free(void *addr)
+{
+ if (!is_kfence_address(addr))
+ return false;
+ __kfence_free(addr);
+ return true;
+}
+
+/**
+ * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
+ * @addr: faulting address
+ * @is_write: is access a write
+ * @regs: current struct pt_regs (can be NULL, but shows full stack trace)
+ *
+ * Return:
+ * * false - address outside KFENCE pool,
+ * * true - page fault handled by KFENCE, no additional handling required.
+ *
+ * A page fault inside KFENCE pool indicates a memory error, such as an
+ * out-of-bounds access, a use-after-free or an invalid memory access. In these
+ * cases KFENCE prints an error message and marks the offending page as
+ * present, so that the kernel can proceed.
+ */
+bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
+#else /* CONFIG_KFENCE */
+
+#define kfence_sample_interval (0)
+
+static inline bool is_kfence_address(const void *addr) { return false; }
+static inline void kfence_alloc_pool_and_metadata(void) { }
+static inline void kfence_init(void) { }
+static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
+static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
+static inline size_t kfence_ksize(const void *addr) { return 0; }
+static inline void *kfence_object_start(const void *addr) { return NULL; }
+static inline void __kfence_free(void *addr) { }
+static inline bool __must_check kfence_free(void *addr) { return false; }
+static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
+ struct pt_regs *regs)
+{
+ return false;
+}
+
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ return false;
+}
+#endif
+
+#endif
+
+#endif /* _LINUX_KFENCE_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 86249476b57f..0b35a41440ff 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -688,7 +688,7 @@ __kfifo_uint_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_to_user(fifo, to, len, copied) \
-__kfifo_uint_must_check_helper( \
+__kfifo_int_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 477b8b7c908f..76e891ee9e37 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -16,6 +16,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/atomic.h>
+#include <linux/kprobes.h>
#ifdef CONFIG_HAVE_ARCH_KGDB
#include <asm/kgdb.h>
#endif
@@ -104,9 +105,9 @@ extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs);
*/
/**
- * kgdb_arch_init - Perform any architecture specific initalization.
+ * kgdb_arch_init - Perform any architecture specific initialization.
*
- * This function will handle the initalization of any architecture
+ * This function will handle the initialization of any architecture
* specific callbacks.
*/
extern int kgdb_arch_init(void);
@@ -228,9 +229,9 @@ extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
/**
- * kgdb_arch_late - Perform any architecture specific initalization.
+ * kgdb_arch_late - Perform any architecture specific initialization.
*
- * This function will handle the late initalization of any
+ * This function will handle the late initialization of any
* architecture specific callbacks. This is an optional function for
* handling things like late initialization of hw breakpoints. The
* default implementation does nothing.
@@ -324,7 +325,6 @@ extern char *kgdb_mem2hex(char *mem, char *buf, int count);
extern int kgdb_hex2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
-extern void kgdb_schedule_breakpoint(void);
extern int kgdb_has_hit_break(unsigned long addr);
extern int
@@ -335,6 +335,23 @@ extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
atomic_t *snd_rdy);
extern void gdbstub_exit(int status);
+/*
+ * kgdb and kprobes both use the same (kprobe) blocklist (which makes sense
+ * given they are both typically hooked up to the same trap meaning on most
+ * architectures one cannot be used to debug the other)
+ *
+ * However on architectures where kprobes is not (yet) implemented we permit
+ * breakpoints everywhere rather than blocking everything by default.
+ */
+static inline bool kgdb_within_blocklist(unsigned long addr)
+{
+#ifdef CONFIG_KGDB_HONOUR_BLOCKLIST
+ return within_kprobe_blacklist(addr);
+#else
+ return false;
+#endif
+}
+
extern int kgdb_single_step;
extern atomic_t kgdb_active;
#define in_dbg_master() \
@@ -342,9 +359,12 @@ extern atomic_t kgdb_active;
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
extern void kgdb_panic(const char *msg);
+extern void kgdb_free_init_mem(void);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
static inline void kgdb_panic(const char *msg) {}
+static inline void kgdb_free_init_mem(void) { }
+static inline int kgdb_nmicallback(int cpu, void *regs) { return 1; }
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index bc45ea1efbf7..f68865e19b0b 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -4,45 +4,33 @@
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
extern int khugepaged_init(void);
extern void khugepaged_destroy(void);
extern int start_stop_khugepaged(void);
-extern int __khugepaged_enter(struct mm_struct *mm);
+extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags);
+extern void khugepaged_enter_vma(struct vm_area_struct *vma,
+ unsigned long vm_flags);
+extern void khugepaged_min_free_kbytes_update(void);
+extern bool current_is_khugepaged(void);
#ifdef CONFIG_SHMEM
-extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
+extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
#else
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
+ return 0;
}
#endif
-#define khugepaged_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define khugepaged_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
-#define khugepaged_req_madv() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
-#define khugepaged_defrag() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
-
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
- return __khugepaged_enter(mm);
- return 0;
+ __khugepaged_enter(mm);
}
static inline void khugepaged_exit(struct mm_struct *mm)
@@ -50,40 +38,30 @@ static inline void khugepaged_exit(struct mm_struct *mm)
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
__khugepaged_exit(mm);
}
-
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
- if ((khugepaged_always() ||
- (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
- !(vm_flags & VM_NOHUGEPAGE) &&
- !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- if (__khugepaged_enter(vma->vm_mm))
- return -ENOMEM;
- return 0;
-}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- return 0;
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
- return 0;
}
-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
return 0;
}
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+
+static inline void khugepaged_min_free_kbytes_update(void)
+{
+}
+
+static inline bool current_is_khugepaged(void)
{
+ return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 34684b2026ab..6a3cd1bf4680 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -29,10 +29,9 @@ extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
-extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
gfp_t gfp) __ref;
extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
-extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -107,15 +106,12 @@ static inline void kmemleak_no_scan(const void *ptr)
{
}
static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
- int min_count, gfp_t gfp)
+ gfp_t gfp)
{
}
static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
}
-static inline void kmemleak_not_leak_phys(phys_addr_t phys)
-{
-}
static inline void kmemleak_ignore_phys(phys_addr_t phys)
{
}
diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h
new file mode 100644
index 000000000000..c4cae333deec
--- /dev/null
+++ b/include/linux/kmsan-checks.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN checks to be used for one-off annotations in subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#ifndef _LINUX_KMSAN_CHECKS_H
+#define _LINUX_KMSAN_CHECKS_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_poison_memory() - Mark the memory range as uninitialized.
+ * @address: address to start with.
+ * @size: size of buffer to poison.
+ * @flags: GFP flags for allocations done by this function.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * uninitialized. Error reports for this memory will reference the call site of
+ * kmsan_poison_memory() as origin.
+ */
+void kmsan_poison_memory(const void *address, size_t size, gfp_t flags);
+
+/**
+ * kmsan_unpoison_memory() - Mark the memory range as initialized.
+ * @address: address to start with.
+ * @size: size of buffer to unpoison.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * initialized.
+ */
+void kmsan_unpoison_memory(const void *address, size_t size);
+
+/**
+ * kmsan_check_memory() - Check the memory range for being initialized.
+ * @address: address to start with.
+ * @size: size of buffer to check.
+ *
+ * If any piece of the given range is marked as uninitialized, KMSAN will report
+ * an error.
+ */
+void kmsan_check_memory(const void *address, size_t size);
+
+/**
+ * kmsan_copy_to_user() - Notify KMSAN about a data transfer to userspace.
+ * @to: destination address in the userspace.
+ * @from: source address in the kernel.
+ * @to_copy: number of bytes to copy.
+ * @left: number of bytes not copied.
+ *
+ * If this is a real userspace data transfer, KMSAN checks the bytes that were
+ * actually copied to ensure there was no information leak. If @to belongs to
+ * the kernel space (which is possible for compat syscalls), KMSAN just copies
+ * the metadata.
+ */
+void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
+ size_t left);
+
+#else
+
+static inline void kmsan_poison_memory(const void *address, size_t size,
+ gfp_t flags)
+{
+}
+static inline void kmsan_unpoison_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_check_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_copy_to_user(void __user *to, const void *from,
+ size_t to_copy, size_t left)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_CHECKS_H */
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
new file mode 100644
index 000000000000..e0c23a32cdf0
--- /dev/null
+++ b/include/linux/kmsan.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN API for subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_H
+#define _LINUX_KMSAN_H
+
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+struct page;
+struct kmem_cache;
+struct task_struct;
+struct scatterlist;
+struct urb;
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_task_create() - Initialize KMSAN state for the task.
+ * @task: task to initialize.
+ */
+void kmsan_task_create(struct task_struct *task);
+
+/**
+ * kmsan_task_exit() - Notify KMSAN that a task has exited.
+ * @task: task about to finish.
+ */
+void kmsan_task_exit(struct task_struct *task);
+
+/**
+ * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
+ *
+ * Allocate and initialize KMSAN metadata for early allocations.
+ */
+void __init kmsan_init_shadow(void);
+
+/**
+ * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
+ */
+void __init kmsan_init_runtime(void);
+
+/**
+ * kmsan_memblock_free_pages() - handle freeing of memblock pages.
+ * @page: struct page to free.
+ * @order: order of @page.
+ *
+ * Freed pages are either returned to buddy allocator or held back to be used
+ * as metadata pages.
+ */
+bool __init __must_check kmsan_memblock_free_pages(struct page *page,
+ unsigned int order);
+
+/**
+ * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
+ * @page: struct page pointer returned by alloc_pages().
+ * @order: order of allocated struct page.
+ * @flags: GFP flags used by alloc_pages()
+ *
+ * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
+ * @flags contain __GFP_ZERO.
+ */
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
+
+/**
+ * kmsan_free_page() - Notify KMSAN about a free_pages() call.
+ * @page: struct page pointer passed to free_pages().
+ * @order: order of deallocated struct page.
+ *
+ * KMSAN marks freed memory as uninitialized.
+ */
+void kmsan_free_page(struct page *page, unsigned int order);
+
+/**
+ * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
+ * @dst: destination page.
+ * @src: source page.
+ *
+ * KMSAN copies the contents of metadata pages for @src into the metadata pages
+ * for @dst. If @dst has no associated metadata pages, nothing happens.
+ * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
+ */
+void kmsan_copy_page_meta(struct page *dst, struct page *src);
+
+/**
+ * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
+ * newly created object, marking it as initialized or uninitialized.
+ */
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+
+/**
+ * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ *
+ * KMSAN marks the freed object as uninitialized.
+ */
+void kmsan_slab_free(struct kmem_cache *s, void *object);
+
+/**
+ * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
+ * @ptr: object pointer.
+ * @size: object size.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Similar to kmsan_slab_alloc(), but for large allocations.
+ */
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+
+/**
+ * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
+ * @ptr: object pointer.
+ *
+ * Similar to kmsan_slab_free(), but for large allocations.
+ */
+void kmsan_kfree_large(const void *ptr);
+
+/**
+ * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
+ * @start: start of vmapped range.
+ * @end: end of vmapped range.
+ * @prot: page protection flags used for vmap.
+ * @pages: array of pages.
+ * @page_shift: page_shift passed to vmap_range_noflush().
+ *
+ * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+ * vmalloc metadata address range. Returns 0 on success, callers must check
+ * for non-zero return value.
+ */
+int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
+ unsigned long end,
+ pgprot_t prot,
+ struct page **pages,
+ unsigned int page_shift);
+
+/**
+ * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+ * @start: start of vunmapped range.
+ * @end: end of vunmapped range.
+ *
+ * KMSAN unmaps the contiguous metadata ranges created by
+ * kmsan_map_kernel_range_noflush().
+ */
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
+ * @addr: range start.
+ * @end: range end.
+ * @phys_addr: physical range start.
+ * @prot: page protection flags used for ioremap_page_range().
+ * @page_shift: page_shift argument passed to vmap_range_noflush().
+ *
+ * KMSAN creates new metadata pages for the physical pages mapped into the
+ * virtual memory. Returns 0 on success, callers must check for non-zero return
+ * value.
+ */
+int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift);
+
+/**
+ * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+ * @start: range start.
+ * @end: range end.
+ *
+ * KMSAN unmaps the metadata pages for the given range and, unlike for
+ * vunmap_page_range(), also deallocates them.
+ */
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_handle_dma() - Handle a DMA data transfer.
+ * @page: first page of the buffer.
+ * @offset: offset of the buffer within the first page.
+ * @size: buffer size.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffer, if it is copied to device;
+ * * initializes the buffer, if it is copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
+ * @sg: scatterlist holding DMA buffers.
+ * @nents: number of scatterlist entries.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffers in the scatterlist, if they are copied to device;
+ * * initializes the buffers, if they are copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_urb() - Handle a USB data transfer.
+ * @urb: struct urb pointer.
+ * @is_out: data transfer direction (true means output to hardware).
+ *
+ * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
+ * KMSAN initializes the transfer buffer.
+ */
+void kmsan_handle_urb(const struct urb *urb, bool is_out);
+
+/**
+ * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
+ * @regs: struct pt_regs pointer received from assembly code.
+ *
+ * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
+ * false positive reports. Unlike kmsan_unpoison_memory(),
+ * kmsan_unpoison_entry_regs() can be called from the regions where
+ * kmsan_in_runtime() returns true, which is the case in early entry code.
+ */
+void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
+
+#else
+
+static inline void kmsan_init_shadow(void)
+{
+}
+
+static inline void kmsan_init_runtime(void)
+{
+}
+
+static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
+ unsigned int order)
+{
+ return true;
+}
+
+static inline void kmsan_task_create(struct task_struct *task)
+{
+}
+
+static inline void kmsan_task_exit(struct task_struct *task)
+{
+}
+
+static inline void kmsan_alloc_page(struct page *page, unsigned int order,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_free_page(struct page *page, unsigned int order)
+{
+}
+
+static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+}
+
+static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+}
+
+static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_kfree_large(const void *ptr)
+{
+}
+
+static inline int __must_check kmsan_vmap_pages_range_noflush(
+ unsigned long start, unsigned long end, pgprot_t prot,
+ struct page **pages, unsigned int page_shift)
+{
+ return 0;
+}
+
+static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
+ unsigned long end,
+ phys_addr_t phys_addr,
+ pgprot_t prot,
+ unsigned int page_shift)
+{
+ return 0;
+}
+
+static inline void kmsan_iounmap_page_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_handle_dma(struct page *page, size_t offset,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
+{
+}
+
+static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_H */
diff --git a/include/linux/kmsan_string.h b/include/linux/kmsan_string.h
new file mode 100644
index 000000000000..7287da6f52ef
--- /dev/null
+++ b/include/linux/kmsan_string.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN string functions API used in other headers.
+ *
+ * Copyright (C) 2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_STRING_H
+#define _LINUX_KMSAN_STRING_H
+
+/*
+ * KMSAN overrides the default memcpy/memset/memmove implementations in the
+ * kernel, which requires having __msan_XXX function prototypes in several other
+ * headers. Keep them in one place instead of open-coding.
+ */
+void *__msan_memcpy(void *dst, const void *src, size_t size);
+void *__msan_memset(void *s, int c, size_t n);
+void *__msan_memmove(void *dest, const void *src, size_t len);
+
+#endif /* _LINUX_KMSAN_STRING_H */
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
new file mode 100644
index 000000000000..929287981afe
--- /dev/null
+++ b/include/linux/kmsan_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A minimal header declaring types added by KMSAN to existing kernel structs.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_TYPES_H
+#define _LINUX_KMSAN_TYPES_H
+
+#include <linux/types.h>
+
+/* These constants are defined in the MSan LLVM instrumentation pass. */
+#define KMSAN_RETVAL_SIZE 800
+#define KMSAN_PARAM_SIZE 800
+
+struct kmsan_context_state {
+ char param_tls[KMSAN_PARAM_SIZE];
+ char retval_tls[KMSAN_RETVAL_SIZE];
+ char va_arg_tls[KMSAN_PARAM_SIZE];
+ char va_arg_origin_tls[KMSAN_PARAM_SIZE];
+ u64 va_arg_overflow_size_tls;
+ char param_origin_tls[KMSAN_PARAM_SIZE];
+ u32 retval_origin_tls;
+};
+
+#undef KMSAN_PARAM_SIZE
+#undef KMSAN_RETVAL_SIZE
+
+struct kmsan_ctx {
+ struct kmsan_context_state cstate;
+ int kmsan_in_runtime;
+ bool allow_reporting;
+};
+
+#endif /* _LINUX_KMSAN_TYPES_H */
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 3378bcbe585e..906521c2329c 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -30,6 +30,16 @@ enum kmsg_dump_reason {
};
/**
+ * struct kmsg_dump_iter - iterator for retrieving kernel messages
+ * @cur_seq: Points to the oldest message to dump
+ * @next_seq: Points after the newest message to dump
+ */
+struct kmsg_dump_iter {
+ u64 cur_seq;
+ u64 next_seq;
+};
+
+/**
* struct kmsg_dumper - kernel crash message dumper structure
* @list: Entry in the dumper list (private)
* @dump: Call into dumping code which will retrieve the data with
@@ -41,31 +51,19 @@ struct kmsg_dumper {
struct list_head list;
void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
enum kmsg_dump_reason max_reason;
- bool active;
bool registered;
-
- /* private state of the kmsg iterator */
- u32 cur_idx;
- u32 next_idx;
- u64 cur_seq;
- u64 next_seq;
};
#ifdef CONFIG_PRINTK
void kmsg_dump(enum kmsg_dump_reason reason);
-bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
- char *line, size_t size, size_t *len);
-
-bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
-bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
- char *buf, size_t size, size_t *len);
-
-void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
+bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
+ char *buf, size_t size, size_t *len_out);
-void kmsg_dump_rewind(struct kmsg_dumper *dumper);
+void kmsg_dump_rewind(struct kmsg_dump_iter *iter);
int kmsg_dump_register(struct kmsg_dumper *dumper);
@@ -77,30 +75,19 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason)
{
}
-static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
- bool syslog, const char *line,
- size_t size, size_t *len)
-{
- return false;
-}
-
-static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+static inline bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
const char *line, size_t size, size_t *len)
{
return false;
}
-static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+static inline bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
char *buf, size_t size, size_t *len)
{
return false;
}
-static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
-{
-}
-
-static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+static inline void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
{
}
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index ea30529fba08..c8219505a79f 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -19,10 +19,10 @@
#include <linux/list.h>
#include <linux/sysfs.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/kobject_ns.h>
-#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
@@ -38,7 +38,7 @@ extern char uevent_helper[];
#endif
/* counter to tag the uevent, read only except for the kobject core */
-extern u64 uevent_seqnum;
+extern atomic64_t uevent_seqnum;
/*
* The actions here must match the index to the string array
@@ -66,83 +66,60 @@ struct kobject {
struct list_head entry;
struct kobject *parent;
struct kset *kset;
- struct kobj_type *ktype;
+ const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
-#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- struct delayed_work release;
-#endif
+
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
unsigned int uevent_suppress:1;
+
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ struct delayed_work release;
+#endif
};
-extern __printf(2, 3)
-int kobject_set_name(struct kobject *kobj, const char *name, ...);
-extern __printf(2, 0)
-int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
- va_list vargs);
+__printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...);
+__printf(2, 0) int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs);
static inline const char *kobject_name(const struct kobject *kobj)
{
return kobj->name;
}
-extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
-extern __printf(3, 4) __must_check
-int kobject_add(struct kobject *kobj, struct kobject *parent,
- const char *fmt, ...);
-extern __printf(4, 5) __must_check
-int kobject_init_and_add(struct kobject *kobj,
- struct kobj_type *ktype, struct kobject *parent,
- const char *fmt, ...);
+void kobject_init(struct kobject *kobj, const struct kobj_type *ktype);
+__printf(3, 4) __must_check int kobject_add(struct kobject *kobj,
+ struct kobject *parent,
+ const char *fmt, ...);
+__printf(4, 5) __must_check int kobject_init_and_add(struct kobject *kobj,
+ const struct kobj_type *ktype,
+ struct kobject *parent,
+ const char *fmt, ...);
-extern void kobject_del(struct kobject *kobj);
+void kobject_del(struct kobject *kobj);
-extern struct kobject * __must_check kobject_create(void);
-extern struct kobject * __must_check kobject_create_and_add(const char *name,
- struct kobject *parent);
+struct kobject * __must_check kobject_create_and_add(const char *name, struct kobject *parent);
-extern int __must_check kobject_rename(struct kobject *, const char *new_name);
-extern int __must_check kobject_move(struct kobject *, struct kobject *);
+int __must_check kobject_rename(struct kobject *, const char *new_name);
+int __must_check kobject_move(struct kobject *, struct kobject *);
-extern struct kobject *kobject_get(struct kobject *kobj);
-extern struct kobject * __must_check kobject_get_unless_zero(
- struct kobject *kobj);
-extern void kobject_put(struct kobject *kobj);
+struct kobject *kobject_get(struct kobject *kobj);
+struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj);
+void kobject_put(struct kobject *kobj);
-extern const void *kobject_namespace(struct kobject *kobj);
-extern void kobject_get_ownership(struct kobject *kobj,
- kuid_t *uid, kgid_t *gid);
-extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
-
-/**
- * kobject_has_children - Returns whether a kobject has children.
- * @kobj: the object to test
- *
- * This will return whether a kobject has other kobjects as children.
- *
- * It does NOT account for the presence of attribute files, only sub
- * directories. It also assumes there is no concurrent addition or
- * removal of such children, and thus relies on external locking.
- */
-static inline bool kobject_has_children(struct kobject *kobj)
-{
- WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
-
- return kobj->sd && kobj->sd->dir.subdirs;
-}
+const void *kobject_namespace(const struct kobject *kobj);
+void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
+char *kobject_get_path(const struct kobject *kobj, gfp_t flag);
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs; /* use default_groups instead */
const struct attribute_group **default_groups;
- const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
- const void *(*namespace)(struct kobject *kobj);
- void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
+ const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj);
+ const void *(*namespace)(const struct kobject *kobj);
+ void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};
struct kobj_uevent_env {
@@ -154,10 +131,9 @@ struct kobj_uevent_env {
};
struct kset_uevent_ops {
- int (* const filter)(struct kset *kset, struct kobject *kobj);
- const char *(* const name)(struct kset *kset, struct kobject *kobj);
- int (* const uevent)(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env);
+ int (* const filter)(const struct kobject *kobj);
+ const char *(* const name)(const struct kobject *kobj);
+ int (* const uevent)(const struct kobject *kobj, struct kobj_uevent_env *env);
};
struct kobj_attribute {
@@ -196,12 +172,11 @@ struct kset {
const struct kset_uevent_ops *uevent_ops;
} __randomize_layout;
-extern void kset_init(struct kset *kset);
-extern int __must_check kset_register(struct kset *kset);
-extern void kset_unregister(struct kset *kset);
-extern struct kset * __must_check kset_create_and_add(const char *name,
- const struct kset_uevent_ops *u,
- struct kobject *parent_kobj);
+void kset_init(struct kset *kset);
+int __must_check kset_register(struct kset *kset);
+void kset_unregister(struct kset *kset);
+struct kset * __must_check kset_create_and_add(const char *name, const struct kset_uevent_ops *u,
+ struct kobject *parent_kobj);
static inline struct kset *to_kset(struct kobject *kobj)
{
@@ -218,12 +193,12 @@ static inline void kset_put(struct kset *k)
kobject_put(&k->kobj);
}
-static inline struct kobj_type *get_ktype(struct kobject *kobj)
+static inline const struct kobj_type *get_ktype(const struct kobject *kobj)
{
return kobj->ktype;
}
-extern struct kobject *kset_find_obj(struct kset *, const char *);
+struct kobject *kset_find_obj(struct kset *, const char *);
/* The global /sys/kernel/ kobject for people to chain off of */
extern struct kobject *kernel_kobj;
diff --git a/include/linux/kobject_api.h b/include/linux/kobject_api.h
new file mode 100644
index 000000000000..6e36a054c2d6
--- /dev/null
+++ b/include/linux/kobject_api.h
@@ -0,0 +1 @@
+#include <linux/kobject.h>
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index 2b5b64256cf4..be707748e7ce 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -47,8 +47,8 @@ struct kobj_ns_type_operations {
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
int kobj_ns_type_registered(enum kobj_ns_type type);
-const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
-const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent);
+const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj);
bool kobj_ns_current_may_mount(enum kobj_ns_type type);
void *kobj_ns_grab_current(enum kobj_ns_type type);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9be1bff4f586..0ff44d6633e3 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -3,7 +3,6 @@
#define _LINUX_KPROBES_H
/*
* Kernel Probes (KProbes)
- * include/linux/kprobes.h
*
* Copyright (C) IBM Corporation, 2002, 2004
*
@@ -27,6 +26,8 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
+#include <linux/objpool.h>
+#include <linux/rethook.h>
#include <asm/kprobes.h>
#ifdef CONFIG_KPROBES
@@ -37,7 +38,7 @@
#define KPROBE_REENTER 0x00000004
#define KPROBE_HIT_SSDONE 0x00000008
-#else /* CONFIG_KPROBES */
+#else /* !CONFIG_KPROBES */
#include <asm-generic/kprobes.h>
typedef int kprobe_opcode_t;
struct arch_specific_insn {
@@ -52,8 +53,6 @@ struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
-typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
- int trapnr);
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
struct pt_regs *);
@@ -81,12 +80,6 @@ struct kprobe {
/* Called after addr is executed, unless... */
kprobe_post_handler_t post_handler;
- /*
- * ... called if executing addr causes a fault (eg. page fault).
- * Return 1 if it handled fault, otherwise kernel will see it.
- */
- kprobe_fault_handler_t fault_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
@@ -109,27 +102,28 @@ struct kprobe {
* this flag is only for optimized_kprobe.
*/
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
+#define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */
/* Has this kprobe gone ? */
-static inline int kprobe_gone(struct kprobe *p)
+static inline bool kprobe_gone(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_GONE;
}
/* Is this kprobe disabled ? */
-static inline int kprobe_disabled(struct kprobe *p)
+static inline bool kprobe_disabled(struct kprobe *p)
{
return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
}
/* Is this kprobe really running optimized path ? */
-static inline int kprobe_optimized(struct kprobe *p)
+static inline bool kprobe_optimized(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_OPTIMIZED;
}
/* Is this kprobe uses ftrace ? */
-static inline int kprobe_ftrace(struct kprobe *p)
+static inline bool kprobe_ftrace(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_FTRACE;
}
@@ -144,6 +138,11 @@ static inline int kprobe_ftrace(struct kprobe *p)
* ignored, due to maxactive being too low.
*
*/
+struct kretprobe_holder {
+ struct kretprobe __rcu *rp;
+ struct objpool_head pool;
+};
+
struct kretprobe {
struct kprobe kp;
kretprobe_handler_t handler;
@@ -151,16 +150,25 @@ struct kretprobe {
int maxactive;
int nmissed;
size_t data_size;
- struct hlist_head free_instances;
- raw_spinlock_t lock;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook *rh;
+#else
+ struct kretprobe_holder *rph;
+#endif
};
+#define KRETPROBE_MAX_DATA_SIZE 4096
+
struct kretprobe_instance {
- struct hlist_node hlist;
- struct kretprobe *rp;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook_node node;
+#else
+ struct rcu_head rcu;
+ struct llist_node llist;
+ struct kretprobe_holder *rph;
kprobe_opcode_t *ret_addr;
- struct task_struct *task;
void *fp;
+#endif
char data[];
};
@@ -179,19 +187,72 @@ struct kprobe_blacklist_entry {
DECLARE_PER_CPU(struct kprobe *, current_kprobe);
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+extern void kprobe_busy_begin(void);
+extern void kprobe_busy_end(void);
+
+#ifdef CONFIG_KRETPROBES
+/* Check whether @p is used for implementing a trampoline. */
+extern int arch_trampoline_kprobe(struct kprobe *p);
+
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ /* rethook::data is non-changed field, so that you can access it freely. */
+ return (struct kretprobe *)ri->node.rethook->data;
+}
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+ return ri->node.ret_addr;
+}
+#else
+extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs);
+void arch_kretprobe_fixup_return(struct pt_regs *regs,
+ kprobe_opcode_t *correct_ret_addr);
+
+void __kretprobe_trampoline(void);
/*
- * For #ifdef avoidance:
+ * Since some architecture uses structured function pointer,
+ * use dereference_function_descriptor() to get real function address.
*/
-static inline int kprobes_built_in(void)
+static nokprobe_inline void *kretprobe_trampoline_addr(void)
{
- return 1;
+ return dereference_kernel_function_descriptor(__kretprobe_trampoline);
}
-#ifdef CONFIG_KRETPROBES
-extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs);
-extern int arch_trampoline_kprobe(struct kprobe *p);
-#else /* CONFIG_KRETPROBES */
+/* If the trampoline handler called from a kprobe, use this version */
+unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
+ void *frame_pointer);
+
+static nokprobe_inline
+unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
+ void *frame_pointer)
+{
+ unsigned long ret;
+ /*
+ * Set a dummy kprobe for avoiding kretprobe recursion.
+ * Since kretprobe never runs in kprobe handler, no kprobe must
+ * be running at this point.
+ */
+ kprobe_busy_begin();
+ ret = __kretprobe_trampoline_handler(regs, frame_pointer);
+ kprobe_busy_end();
+
+ return ret;
+}
+
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
+}
+
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+ return (unsigned long)ri->ret_addr;
+}
+#endif /* CONFIG_KRETPROBE_ON_RETHOOK */
+
+#else /* !CONFIG_KRETPROBES */
static inline void arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -202,21 +263,15 @@ static inline int arch_trampoline_kprobe(struct kprobe *p)
}
#endif /* CONFIG_KRETPROBES */
-extern struct kretprobe_blackpoint kretprobe_blacklist[];
+/* Markers of '_kprobe_blacklist' section */
+extern unsigned long __start_kprobe_blacklist[];
+extern unsigned long __stop_kprobe_blacklist[];
-static inline void kretprobe_assert(struct kretprobe_instance *ri,
- unsigned long orig_ret_address, unsigned long trampoline_address)
-{
- if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
- printk("kretprobe BUG!: Processing kretprobe %p @ %p\n",
- ri->rp, ri->rp->kp.addr);
- BUG();
- }
-}
+extern struct kretprobe_blackpoint kretprobe_blacklist[];
#ifdef CONFIG_KPROBES_SANITY_TEST
extern int init_test_probes(void);
-#else
+#else /* !CONFIG_KPROBES_SANITY_TEST */
static inline int init_test_probes(void)
{
return 0;
@@ -230,8 +285,7 @@ extern int arch_init_kprobes(void);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void);
-extern bool arch_kprobe_on_func_entry(unsigned long offset);
-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
+extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
extern int kprobe_add_ksym_blacklist(unsigned long entry);
@@ -276,7 +330,7 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
unsigned long *value, char *type, char *sym);
-#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
+#else /* !__ARCH_WANT_KPROBES_INSN_SLOT */
#define DEFINE_INSN_CACHE_OPS(__name) \
static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
@@ -307,41 +361,37 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr);
+ kprobe_opcode_t *addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
DEFINE_INSN_CACHE_OPS(optinsn);
-#ifdef CONFIG_SYSCTL
-extern int sysctl_kprobes_optimization;
-extern int proc_kprobes_optimization_handler(struct ctl_table *table,
- int write, void *buffer,
- size_t *length, loff_t *ppos);
-#endif
extern void wait_for_kprobe_optimizer(void);
-#else
+bool optprobe_queued_unopt(struct optimized_kprobe *op);
+bool kprobe_disarmed(struct kprobe *p);
+#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
+
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs);
+ struct ftrace_ops *ops, struct ftrace_regs *fregs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
-#endif
-
-int arch_check_ftrace_location(struct kprobe *p);
+#else
+static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_KPROBES_ON_FTRACE */
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
-void kretprobe_hash_lock(struct task_struct *tsk,
- struct hlist_head **head, unsigned long *flags);
-void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags);
-struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
/* kprobe_running() will just return the current_kprobe on this CPU */
static inline struct kprobe *kprobe_running(void)
{
- return (__this_cpu_read(current_kprobe));
+ return __this_cpu_read(current_kprobe);
}
static inline void reset_current_kprobe(void)
@@ -354,24 +404,26 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
return this_cpu_ptr(&kprobe_ctlblk);
}
-extern struct kprobe kprobe_busy;
-void kprobe_busy_begin(void);
-void kprobe_busy_end(void);
-
kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
+kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry);
+
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
void unregister_kretprobe(struct kretprobe *rp);
int register_kretprobes(struct kretprobe **rps, int num);
void unregister_kretprobes(struct kretprobe **rps, int num);
+#if defined(CONFIG_KRETPROBE_ON_RETHOOK) || !defined(CONFIG_KRETPROBES)
+#define kprobe_flush_task(tk) do {} while (0)
+#else
void kprobe_flush_task(struct task_struct *tk);
-void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
+#endif
+
+void kprobe_free_init_mem(void);
int disable_kprobe(struct kprobe *kp);
int enable_kprobe(struct kprobe *kp);
@@ -379,19 +431,21 @@ int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
void *alloc_insn_page(void);
-void free_insn_page(void *page);
+
+void *alloc_optinsn_page(void);
+void free_optinsn_page(void *page);
int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
char *type, char *sym);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
#else /* !CONFIG_KPROBES: */
-static inline int kprobes_built_in(void)
-{
- return 0;
-}
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
return 0;
@@ -404,13 +458,16 @@ static inline struct kprobe *kprobe_running(void)
{
return NULL;
}
+#define kprobe_busy_begin() do {} while (0)
+#define kprobe_busy_end() do {} while (0)
+
static inline int register_kprobe(struct kprobe *p)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kprobes(struct kprobe **kps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kprobe(struct kprobe *p)
{
@@ -420,11 +477,11 @@ static inline void unregister_kprobes(struct kprobe **kps, int num)
}
static inline int register_kretprobe(struct kretprobe *rp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kretprobes(struct kretprobe **rps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kretprobe(struct kretprobe *rp)
{
@@ -435,13 +492,16 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
static inline void kprobe_flush_task(struct task_struct *tk)
{
}
+static inline void kprobe_free_init_mem(void)
+{
+}
static inline int disable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int enable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline bool within_kprobe_blacklist(unsigned long addr)
@@ -454,6 +514,7 @@ static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
return -ERANGE;
}
#endif /* CONFIG_KPROBES */
+
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -468,19 +529,56 @@ static inline bool is_kprobe_insn_slot(unsigned long addr)
{
return false;
}
-#endif
+#endif /* !CONFIG_KPROBES */
+
#ifndef CONFIG_OPTPROBES
static inline bool is_kprobe_optinsn_slot(unsigned long addr)
{
return false;
}
+#endif /* !CONFIG_OPTPROBES */
+
+#ifdef CONFIG_KRETPROBES
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return is_rethook_trampoline(addr);
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return rethook_find_ret_addr(tsk, (unsigned long)fp, cur);
+}
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return (void *)addr == kretprobe_trampoline_addr();
+}
+
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur);
+#endif
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return false;
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return 0;
+}
#endif
/* Returns true if kprobes handled the fault */
static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
unsigned int trap)
{
- if (!kprobes_built_in())
+ if (!IS_ENABLED(CONFIG_KPROBES))
return false;
if (user_mode(regs))
return false;
diff --git a/include/linux/kref_api.h b/include/linux/kref_api.h
new file mode 100644
index 000000000000..d67e554721d2
--- /dev/null
+++ b/include/linux/kref_api.h
@@ -0,0 +1 @@
+#include <linux/kref.h>
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h
index 0738389b42b6..1a37a664f915 100644
--- a/include/linux/ks0108.h
+++ b/include/linux/ks0108.h
@@ -4,7 +4,7 @@
* Version: 0.1.0
* Description: ks0108 LCD Controller driver header
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 161e8164abcf..401348e9f92b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -15,19 +15,47 @@
#include <linux/sched.h>
#include <linux/sched/coredump.h>
-struct stable_node;
-struct mem_cgroup;
-
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
+
+void ksm_add_vma(struct vm_area_struct *vma);
+int ksm_enable_merge_any(struct mm_struct *mm);
+int ksm_disable_merge_any(struct mm_struct *mm);
+int ksm_disable(struct mm_struct *mm);
+
int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
+/*
+ * To identify zeropages that were mapped by KSM, we reuse the dirty bit
+ * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
+ * deduplicating memory.
+ */
+#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
+
+extern unsigned long ksm_zero_pages;
+
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+{
+ if (is_ksm_zero_pte(pte)) {
+ ksm_zero_pages--;
+ mm->ksm_zero_pages--;
+ }
+}
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
- return __ksm_enter(mm);
+ int ret;
+
+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
+ ret = __ksm_enter(mm);
+ if (ret)
+ return ret;
+ }
+
+ if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags))
+ set_bit(MMF_VM_MERGE_ANY, &mm->flags);
+
return 0;
}
@@ -48,14 +76,32 @@ static inline void ksm_exit(struct mm_struct *mm)
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr);
+
+void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
+void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
+
+#ifdef CONFIG_MEMORY_FAILURE
+void collect_procs_ksm(struct page *page, struct list_head *to_kill,
+ int force_early);
+#endif
-void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
-void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+#ifdef CONFIG_PROC_FS
+long ksm_process_profit(struct mm_struct *);
+#endif /* CONFIG_PROC_FS */
#else /* !CONFIG_KSM */
+static inline void ksm_add_vma(struct vm_area_struct *vma)
+{
+}
+
+static inline int ksm_disable(struct mm_struct *mm)
+{
+ return 0;
+}
+
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
return 0;
@@ -65,6 +111,17 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+{
+}
+
+#ifdef CONFIG_MEMORY_FAILURE
+static inline void collect_procs_ksm(struct page *page,
+ struct list_head *to_kill, int force_early)
+{
+}
+#endif
+
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
@@ -72,18 +129,18 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- return page;
+ return folio;
}
-static inline void rmap_walk_ksm(struct page *page,
+static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}
-static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
{
}
#endif /* CONFIG_MMU */
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
new file mode 100644
index 000000000000..7fcf29a4e0de
--- /dev/null
+++ b/include/linux/kstrtox.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KSTRTOX_H
+#define _LINUX_KSTRTOX_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/* Internal, do not use. */
+int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
+int __must_check _kstrtol(const char *s, unsigned int base, long *res);
+
+int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
+
+/**
+ * kstrtoul - convert a string to an unsigned long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtoul(). Return code must be checked.
+*/
+static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
+ */
+ if (sizeof(unsigned long) == sizeof(unsigned long long) &&
+ __alignof__(unsigned long) == __alignof__(unsigned long long))
+ return kstrtoull(s, base, (unsigned long long *)res);
+ else
+ return _kstrtoul(s, base, res);
+}
+
+/**
+ * kstrtol - convert a string to a long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtol(). Return code must be checked.
+ */
+static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(long, long long) = 0.
+ */
+ if (sizeof(long) == sizeof(long long) &&
+ __alignof__(long) == __alignof__(long long))
+ return kstrtoll(s, base, (long long *)res);
+ else
+ return _kstrtol(s, base, res);
+}
+
+int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
+int __must_check kstrtoint(const char *s, unsigned int base, int *res);
+
+static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
+{
+ return kstrtoull(s, base, res);
+}
+
+static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
+{
+ return kstrtoll(s, base, res);
+}
+
+static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
+{
+ return kstrtouint(s, base, res);
+}
+
+static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
+{
+ return kstrtoint(s, base, res);
+}
+
+int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
+int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
+int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
+int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
+int __must_check kstrtobool(const char *s, bool *res);
+
+int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
+int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
+int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
+int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
+int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
+int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
+int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
+int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
+int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
+int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
+
+static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
+{
+ return kstrtoull_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
+{
+ return kstrtoll_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
+{
+ return kstrtouint_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
+{
+ return kstrtoint_from_user(s, count, base, res);
+}
+
+/*
+ * Use kstrto<foo> instead.
+ *
+ * NOTE: simple_strto<foo> does not check for the range overflow and,
+ * depending on the input, may give interesting results.
+ *
+ * Use these functions if and only if you cannot use kstrto<foo>, because
+ * the conversion ends on the first non-digit character, which may be far
+ * beyond the supported range. It might be useful to parse the strings like
+ * 10x50 or 12:21 without altering original string or temporary buffer in use.
+ * Keep in mind above caveat.
+ */
+
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+
+#endif /* _LINUX_KSTRTOX_H */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 65b81e0c494d..b11f53c1ba2e 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -18,7 +18,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
* @threadfn: the function to run in the thread
* @data: data pointer for @threadfn()
* @namefmt: printf-style format string for the thread name
- * @arg...: arguments for @namefmt.
+ * @arg: arguments for @namefmt.
*
* This macro will create a kthread on the current node, leaving it in
* the stopped state. This is just a helper for kthread_create_on_node();
@@ -33,6 +33,12 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);
+void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk);
+bool set_kthread_struct(struct task_struct *p);
+
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
/**
* kthread_run - create and wake a thread.
* @threadfn: the function to run until signal_pending(current).
@@ -51,13 +57,39 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
__k; \
})
+/**
+ * kthread_run_on_cpu - create and wake a cpu bound thread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @cpu: The cpu on which the thread should be bound,
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_on_cpu()
+ * followed by wake_up_process(). Returns the kthread or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct task_struct *
+kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
+ unsigned int cpu, const char *namefmt)
+{
+ struct task_struct *p;
+
+ p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
+ if (!IS_ERR(p))
+ wake_up_process(p);
+
+ return p;
+}
+
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
+int kthread_stop_put(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
-bool __kthread_should_park(struct task_struct *k);
+bool kthread_should_stop_or_park(void);
bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
@@ -65,6 +97,8 @@ void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
+void kthread_exit(long result) __noreturn;
+void kthread_complete_and_exit(struct completion *, long) __noreturn;
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
@@ -108,12 +142,6 @@ struct kthread_delayed_work {
struct timer_list timer;
};
-#define KTHREAD_WORKER_INIT(worker) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
- .work_list = LIST_HEAD_INIT((worker).work_list), \
- .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
- }
-
#define KTHREAD_WORK_INIT(work, fn) { \
.node = LIST_HEAD_INIT((work).node), \
.func = (fn), \
@@ -125,9 +153,6 @@ struct kthread_delayed_work {
TIMER_IRQSAFE), \
}
-#define DEFINE_KTHREAD_WORKER(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
-
#define DEFINE_KTHREAD_WORK(work, fn) \
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
@@ -135,19 +160,6 @@ struct kthread_delayed_work {
struct kthread_delayed_work dwork = \
KTHREAD_DELAYED_WORK_INIT(dwork, fn)
-/*
- * kthread_worker.lock needs its own lockdep class key when defined on
- * stack with lockdep enabled. Use the following macros in such cases.
- */
-#ifdef CONFIG_LOCKDEP
-# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
- ({ kthread_init_worker(&worker); worker; })
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
-#else
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
-#endif
-
extern void __kthread_init_worker(struct kthread_worker *worker,
const char *name, struct lock_class_key *key);
@@ -211,9 +223,5 @@ void kthread_associate_blkcg(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *kthread_blkcg(void);
#else
static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
-static inline struct cgroup_subsys_state *kthread_blkcg(void)
-{
- return NULL;
-}
#endif
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index a12b5523cc18..3a4e723eae0f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -21,12 +21,10 @@
#ifndef _LINUX_KTIME_H
#define _LINUX_KTIME_H
-#include <linux/time.h>
-#include <linux/jiffies.h>
#include <asm/bug.h>
-
-/* Nanosecond scalar representation for kernel time values */
-typedef s64 ktime_t;
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/types.h>
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
@@ -230,6 +228,5 @@ static inline ktime_t ms_to_ktime(u64 ms)
}
# include <linux/timekeeping.h>
-# include <linux/timekeeping32.h>
#endif
diff --git a/include/linux/ktime_api.h b/include/linux/ktime_api.h
new file mode 100644
index 000000000000..f697d493960f
--- /dev/null
+++ b/include/linux/ktime_api.h
@@ -0,0 +1 @@
+#include <linux/ktime.h>
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
new file mode 100644
index 000000000000..4862c98d80d3
--- /dev/null
+++ b/include/linux/kvm_dirty_ring.h
@@ -0,0 +1,101 @@
+#ifndef KVM_DIRTY_RING_H
+#define KVM_DIRTY_RING_H
+
+#include <linux/kvm.h>
+
+/**
+ * kvm_dirty_ring: KVM internal dirty ring structure
+ *
+ * @dirty_index: free running counter that points to the next slot in
+ * dirty_ring->dirty_gfns, where a new dirty page should go
+ * @reset_index: free running counter that points to the next dirty page
+ * in dirty_ring->dirty_gfns for which dirty trap needs to
+ * be reenabled
+ * @size: size of the compact list, dirty_ring->dirty_gfns
+ * @soft_limit: when the number of dirty pages in the list reaches this
+ * limit, vcpu that owns this ring should exit to userspace
+ * to allow userspace to harvest all the dirty pages
+ * @dirty_gfns: the array to keep the dirty gfns
+ * @index: index of this dirty ring
+ */
+struct kvm_dirty_ring {
+ u32 dirty_index;
+ u32 reset_index;
+ u32 size;
+ u32 soft_limit;
+ struct kvm_dirty_gfn *dirty_gfns;
+ int index;
+};
+
+#ifndef CONFIG_HAVE_KVM_DIRTY_RING
+/*
+ * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
+ * not be included as well, so define these nop functions for the arch.
+ */
+static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
+{
+ return 0;
+}
+
+static inline bool kvm_use_dirty_bitmap(struct kvm *kvm)
+{
+ return true;
+}
+
+static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
+ int index, u32 size)
+{
+ return 0;
+}
+
+static inline int kvm_dirty_ring_reset(struct kvm *kvm,
+ struct kvm_dirty_ring *ring)
+{
+ return 0;
+}
+
+static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
+ u32 slot, u64 offset)
+{
+}
+
+static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring,
+ u32 offset)
+{
+ return NULL;
+}
+
+static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
+{
+}
+
+#else /* CONFIG_HAVE_KVM_DIRTY_RING */
+
+int kvm_cpu_dirty_log_size(void);
+bool kvm_use_dirty_bitmap(struct kvm *kvm);
+bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
+u32 kvm_dirty_ring_get_rsvd_entries(void);
+int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
+
+/*
+ * called with kvm->slots_lock held, returns the number of
+ * processed pages.
+ */
+int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
+
+/*
+ * returns =0: successfully pushed
+ * <0: unable to push, need to wait
+ */
+void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
+
+bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
+
+/* for use in vm_operations_struct */
+struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
+
+void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
+
+#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
+
+#endif /* KVM_DIRTY_RING_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a23076765b4c..48f31dcd318a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -10,7 +10,9 @@
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/sched/stat.h>
#include <linux/bug.h>
+#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/preempt.h>
@@ -26,6 +28,13 @@
#include <linux/rcuwait.h>
#include <linux/refcount.h>
#include <linux/nospec.h>
+#include <linux/notifier.h>
+#include <linux/ftrace.h>
+#include <linux/hashtable.h>
+#include <linux/instrumentation.h>
+#include <linux/interval_tree.h>
+#include <linux/rbtree.h>
+#include <linux/xarray.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -34,21 +43,22 @@
#include <linux/kvm_types.h>
#include <asm/kvm_host.h>
+#include <linux/kvm_dirty_ring.h>
-#ifndef KVM_MAX_VCPU_ID
-#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#ifndef KVM_MAX_VCPU_IDS
+#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
#endif
/*
- * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
- * in kvm, other bits are visible for userspace which are defined in
+ * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally
+ * used in kvm, other bits are visible for userspace which are defined in
* include/linux/kvm_h.
*/
#define KVM_MEMSLOT_INVALID (1UL << 16)
/*
* Bit 63 of the memslot generation number is an "update in-progress flag",
- * e.g. is temporarily set for the duration of install_new_memslots().
+ * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
* This flag effectively creates a unique generation number that is used to
* mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
* i.e. may (or may not) have come from the previous memslots generation.
@@ -70,8 +80,8 @@
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
-#ifndef KVM_ADDRESS_SPACE_NUM
-#define KVM_ADDRESS_SPACE_NUM 1
+#ifndef KVM_MAX_NR_ADDRESS_SPACES
+#define KVM_MAX_NR_ADDRESS_SPACES 1
#endif
/*
@@ -86,6 +96,7 @@
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+#define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3)
/*
* error pfns indicate that the gfn is in slot but faild to
@@ -97,6 +108,15 @@ static inline bool is_error_pfn(kvm_pfn_t pfn)
}
/*
+ * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted
+ * by a pending signal. Note, the signal may or may not be fatal.
+ */
+static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
+{
+ return pfn == KVM_PFN_ERR_SIGPENDING;
+}
+
+/*
* error_noslot pfns indicate that the gfn can not be
* translated to pfn - it is not in slot or failed to
* translate it to pfn.
@@ -128,6 +148,11 @@ static inline bool kvm_is_error_hva(unsigned long addr)
#endif
+static inline bool kvm_is_error_gpa(gpa_t gpa)
+{
+ return gpa == INVALID_GPA;
+}
+
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
static inline bool is_error_page(struct page *page)
@@ -138,15 +163,26 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_MASK GENMASK(7,0)
#define KVM_REQUEST_NO_WAKEUP BIT(8)
#define KVM_REQUEST_WAIT BIT(9)
+#define KVM_REQUEST_NO_ACTION BIT(10)
/*
* Architecture-independent vcpu->requests bit members
- * Bits 4-7 are reserved for more arch-independent bits.
+ * Bits 3-7 are reserved for more arch-independent bits.
+ */
+#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_UNBLOCK 2
+#define KVM_REQ_DIRTY_RING_SOFT_FULL 3
+#define KVM_REQUEST_ARCH_BASE 8
+
+/*
+ * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
+ * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
+ * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
+ * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
+ * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
+ * guarantee the vCPU received an IPI and has actually exited guest mode.
*/
-#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_PENDING_TIMER 2
-#define KVM_REQ_UNHALT 3
-#define KVM_REQUEST_ARCH_BASE 8
+#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
@@ -154,6 +190,12 @@ static inline bool is_error_page(struct page *page)
})
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
+bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap);
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
+ struct kvm_vcpu *except);
+
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -190,8 +232,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev);
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr);
@@ -201,7 +243,6 @@ struct kvm_async_pf {
struct list_head link;
struct list_head queue;
struct kvm_vcpu *vcpu;
- struct mm_struct *mm;
gpa_t cr2_or_gpa;
unsigned long addr;
struct kvm_arch_async_pf arch;
@@ -216,6 +257,25 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
+union kvm_mmu_notifier_arg {
+ pte_t pte;
+ unsigned long attributes;
+};
+
+struct kvm_gfn_range {
+ struct kvm_memory_slot *slot;
+ gfn_t start;
+ gfn_t end;
+ union kvm_mmu_notifier_arg arg;
+ bool may_block;
+};
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+#endif
+
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
@@ -249,6 +309,11 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
return !!map->hva;
}
+static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
+{
+ return single_task_running() && !need_resched() && ktime_before(cur, stop);
+}
+
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@@ -266,23 +331,24 @@ struct kvm_vcpu {
#endif
int cpu;
int vcpu_id; /* id given by userspace at creation */
- int vcpu_idx; /* index in kvm->vcpus array */
- int srcu_idx;
+ int vcpu_idx; /* index into kvm->vcpu_array */
+ int ____srcu_idx; /* Don't use this directly. You've been warned. */
+#ifdef CONFIG_PROVE_RCU
+ int srcu_depth;
+#endif
int mode;
u64 requests;
unsigned long guest_debug;
- int pre_pcpu;
- struct list_head blocked_vcpu_list;
-
struct mutex mutex;
struct kvm_run *run;
+#ifndef __KVM_HAVE_ARCH_WQP
struct rcuwait wait;
+#endif
struct pid __rcu *pid;
int sigset_active;
sigset_t sigset;
- struct kvm_vcpu_stat stat;
unsigned int halt_poll_ns;
bool valid_wakeup;
@@ -319,8 +385,169 @@ struct kvm_vcpu {
bool preempted;
bool ready;
struct kvm_vcpu_arch arch;
+ struct kvm_vcpu_stat stat;
+ char stats_id[KVM_STATS_NAME_SIZE];
+ struct kvm_dirty_ring dirty_ring;
+
+ /*
+ * The most recently used memslot by this vCPU and the slots generation
+ * for which it is valid.
+ * No wraparound protection is needed since generations won't overflow in
+ * thousands of years, even assuming 1M memslot operations per second.
+ */
+ struct kvm_memory_slot *last_used_slot;
+ u64 last_used_slot_gen;
};
+/*
+ * Start accounting time towards a guest.
+ * Must be called before entering guest context.
+ */
+static __always_inline void guest_timing_enter_irqoff(void)
+{
+ /*
+ * This is running in ioctl context so its safe to assume that it's the
+ * stime pending cputime to flush.
+ */
+ instrumentation_begin();
+ vtime_account_guest_enter();
+ instrumentation_end();
+}
+
+/*
+ * Enter guest context and enter an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_enter_irqoff(void)
+{
+ /*
+ * KVM does not hold any references to rcu protected data when it
+ * switches CPU into a guest mode. In fact switching to a guest mode
+ * is very similar to exiting to userspace from rcu point of view. In
+ * addition CPU may stay in a guest mode for quite a long time (up to
+ * one time slice). Lets treat guest mode as quiescent state, just like
+ * we do with user-mode execution.
+ */
+ if (!context_tracking_guest_enter()) {
+ instrumentation_begin();
+ rcu_virt_note_context_switch();
+ instrumentation_end();
+ }
+}
+
+/*
+ * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
+ * guest_state_enter_irqoff().
+ */
+static __always_inline void guest_enter_irqoff(void)
+{
+ guest_timing_enter_irqoff();
+ guest_context_enter_irqoff();
+}
+
+/**
+ * guest_state_enter_irqoff - Fixup state when entering a guest
+ *
+ * Entry to a guest will enable interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code before entering a guest.
+ * Must be called with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_enter_irqoff() before this.
+ *
+ * Note: this is analogous to exit_to_user_mode().
+ */
+static __always_inline void guest_state_enter_irqoff(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ guest_context_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/*
+ * Exit guest context and exit an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_exit_irqoff(void)
+{
+ context_tracking_guest_exit();
+}
+
+/*
+ * Stop accounting time towards a guest.
+ * Must be called after exiting guest context.
+ */
+static __always_inline void guest_timing_exit_irqoff(void)
+{
+ instrumentation_begin();
+ /* Flush the guest cputime we spent on the guest */
+ vtime_account_guest_exit();
+ instrumentation_end();
+}
+
+/*
+ * Deprecated. Architectures should move to guest_state_exit_irqoff() and
+ * guest_timing_exit_irqoff().
+ */
+static __always_inline void guest_exit_irqoff(void)
+{
+ guest_context_exit_irqoff();
+ guest_timing_exit_irqoff();
+}
+
+static inline void guest_exit(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_exit_irqoff();
+ local_irq_restore(flags);
+}
+
+/**
+ * guest_state_exit_irqoff - Establish state when returning from guest mode
+ *
+ * Entry from a guest disables interrupts, but guest mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific code after exiting a guest.
+ * Must be invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_exit_irqoff() after this.
+ *
+ * Note: this is analogous to enter_from_user_mode().
+ */
+static __always_inline void guest_state_exit_irqoff(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_context_exit_irqoff();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
@@ -338,7 +565,26 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
*/
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
+/*
+ * Since at idle each memslot belongs to two memslot sets it has to contain
+ * two embedded nodes for each data structure that it forms a part of.
+ *
+ * Two memslot sets (one active and one inactive) are necessary so the VM
+ * continues to run on one memslot set while the other is being modified.
+ *
+ * These two memslot sets normally point to the same set of memslots.
+ * They can, however, be desynchronized when performing a memslot management
+ * operation by replacing the memslot to be modified by its copy.
+ * After the operation is complete, both memslot sets once again point to
+ * the same, common set of memslot data.
+ *
+ * The memslots themselves are independent of each other so they can be
+ * individually added or deleted.
+ */
struct kvm_memory_slot {
+ struct hlist_node id_node[2];
+ struct interval_tree_node hva_node[2];
+ struct rb_node gfn_node[2];
gfn_t base_gfn;
unsigned long npages;
unsigned long *dirty_bitmap;
@@ -346,8 +592,26 @@ struct kvm_memory_slot {
unsigned long userspace_addr;
u32 flags;
short id;
+ u16 as_id;
+
+#ifdef CONFIG_KVM_PRIVATE_MEM
+ struct {
+ struct file __rcu *file;
+ pgoff_t pgoff;
+ } gmem;
+#endif
};
+static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot)
+{
+ return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
+}
+
+static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
+{
+ return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
+}
+
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
@@ -377,6 +641,13 @@ struct kvm_hv_sint {
u32 sint;
};
+struct kvm_xen_evtchn {
+ u32 port;
+ u32 vcpu_id;
+ int vcpu_idx;
+ u32 priority;
+};
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
@@ -397,6 +668,7 @@ struct kvm_kernel_irq_routing_entry {
} msi;
struct kvm_s390_adapter_int adapter;
struct kvm_hv_sint hv_sint;
+ struct kvm_xen_evtchn xen_evtchn;
};
struct hlist_node link;
};
@@ -409,19 +681,25 @@ struct kvm_irq_routing_table {
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
- struct hlist_head map[];
+ struct hlist_head map[] __counted_by(nr_rt_entries);
};
#endif
-#ifndef KVM_PRIVATE_MEM_SLOTS
-#define KVM_PRIVATE_MEM_SLOTS 0
-#endif
+bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
-#ifndef KVM_MEM_SLOTS_NUM
-#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#ifndef KVM_INTERNAL_MEM_SLOTS
+#define KVM_INTERNAL_MEM_SLOTS 0
#endif
-#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+#define KVM_MEM_SLOTS_NUM SHRT_MAX
+#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
+
+#if KVM_MAX_NR_ADDRESS_SPACES == 1
+static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
+{
+ return KVM_MAX_NR_ADDRESS_SPACES;
+}
+
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
return 0;
@@ -429,25 +707,71 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
#endif
/*
- * Note:
- * memslots are not sorted by id anymore, please use id_to_memslot()
- * to get the memslot by its id.
+ * Arch code must define kvm_arch_has_private_mem if support for private memory
+ * is enabled.
*/
+#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM)
+static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
struct kvm_memslots {
u64 generation;
- /* The mapping table from slot id to the index in memslots[]. */
- short id_to_index[KVM_MEM_SLOTS_NUM];
- atomic_t lru_slot;
- int used_slots;
- struct kvm_memory_slot memslots[];
+ atomic_long_t last_used_slot;
+ struct rb_root_cached hva_tree;
+ struct rb_root gfn_tree;
+ /*
+ * The mapping table from slot id to memslot.
+ *
+ * 7-bit bucket count matches the size of the old id to index array for
+ * 512 slots, while giving good performance with this slot count.
+ * Higher bucket counts bring only small performance improvements but
+ * always result in higher memory usage (even for lower memslot counts).
+ */
+ DECLARE_HASHTABLE(id_hash, 7);
+ int node_idx;
};
struct kvm {
+#ifdef KVM_HAVE_MMU_RWLOCK
+ rwlock_t mmu_lock;
+#else
spinlock_t mmu_lock;
+#endif /* KVM_HAVE_MMU_RWLOCK */
+
struct mutex slots_lock;
+
+ /*
+ * Protects the arch-specific fields of struct kvm_memory_slots in
+ * use by the VM. To be used under the slots_lock (above) or in a
+ * kvm->srcu critical section where acquiring the slots_lock would
+ * lead to deadlock with the synchronize_srcu in
+ * kvm_swap_active_memslots().
+ */
+ struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
- struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ unsigned long nr_memslot_pages;
+ /* The two memslot sets - active and inactive (per address space) */
+ struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
+ /* The current active memslot set for each address space */
+ struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
+ struct xarray vcpu_array;
+ /*
+ * Protected by slots_lock, but can be read outside if an
+ * incorrect answer is acceptable.
+ */
+ atomic_t nr_memslots_dirty_logging;
+
+ /* Used to wait for completion of MMU notifiers. */
+ spinlock_t mn_invalidate_lock;
+ unsigned long mn_active_invalidate_count;
+ struct rcuwait mn_memslots_update_rcuwait;
+
+ /* For management / invalidation of gfn_to_pfn_caches */
+ spinlock_t gpc_lock;
+ struct list_head gpc_list;
/*
* created_vcpus is protected by kvm->lock, and is incremented
@@ -456,20 +780,22 @@ struct kvm {
* and is accessed atomically.
*/
atomic_t online_vcpus;
+ int max_vcpus;
int created_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
-#ifdef CONFIG_HAVE_KVM_EVENTFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
struct {
spinlock_t lock;
struct list_head items;
+ /* resampler_list update side is protected by resampler_lock. */
struct list_head resampler_list;
struct mutex resampler_lock;
} irqfds;
- struct list_head ioeventfds;
#endif
+ struct list_head ioeventfds;
struct kvm_vm_stat stat;
struct kvm_arch arch;
refcount_t users_count;
@@ -485,17 +811,17 @@ struct kvm {
* Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
-#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+
struct hlist_head irq_ack_notifier_list;
#endif
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
struct mmu_notifier mmu_notifier;
- unsigned long mmu_notifier_seq;
- long mmu_notifier_count;
+ unsigned long mmu_invalidate_seq;
+ long mmu_invalidate_in_progress;
+ gfn_t mmu_invalidate_range_start;
+ gfn_t mmu_invalidate_range_end;
#endif
- long tlbs_dirty;
struct list_head devices;
u64 manual_dirty_log_protect;
struct dentry *debugfs_dentry;
@@ -503,7 +829,21 @@ struct kvm {
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
pid_t userspace_pid;
+ bool override_halt_poll_ns;
unsigned int max_halt_poll_ns;
+ u32 dirty_ring_size;
+ bool dirty_ring_with_bitmap;
+ bool vm_bugged;
+ bool vm_dead;
+
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+ struct notifier_block pm_notifier;
+#endif
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ /* Protected by slots_locks (for writes) and RCU (for reads) */
+ struct xarray mem_attr_array;
+#endif
+ char stats_id[KVM_STATS_NAME_SIZE];
};
#define kvm_err(fmt, ...) \
@@ -532,6 +872,75 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline void kvm_vm_dead(struct kvm *kvm)
+{
+ kvm->vm_dead = true;
+ kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
+}
+
+static inline void kvm_vm_bugged(struct kvm *kvm)
+{
+ kvm->vm_bugged = true;
+ kvm_vm_dead(kvm);
+}
+
+
+#define KVM_BUG(cond, kvm, fmt...) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+#define KVM_BUG_ON(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+/*
+ * Note, "data corruption" refers to corruption of host kernel data structures,
+ * not guest data. Guest data corruption, suspected or confirmed, that is tied
+ * and contained to a single VM should *never* BUG() and potentially panic the
+ * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
+ * is corrupted and that corruption can have a cascading effect to other parts
+ * of the hosts and/or to other VMs.
+ */
+#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
+ BUG_ON(__ret); \
+ else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(vcpu->srcu_depth++,
+ "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
+#endif
+ vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+}
+
+static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
+{
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
+
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(--vcpu->srcu_depth,
+ "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
+#endif
+}
+
static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
{
return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
@@ -551,19 +960,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb();
- return kvm->vcpus[i];
+ return xa_load(&kvm->vcpu_array, i);
}
-#define kvm_for_each_vcpu(idx, vcpup, kvm) \
- for (idx = 0; \
- idx < atomic_read(&kvm->online_vcpus) && \
- (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
- idx++)
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
+ (atomic_read(&kvm->online_vcpus) - 1))
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
if (id < 0)
return NULL;
@@ -577,18 +984,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
-{
- return vcpu->vcpu_idx;
-}
-
-#define kvm_for_each_memslot(memslot, slots) \
- for (memslot = &slots->memslots[0]; \
- memslot < slots->memslots + slots->used_slots; memslot++) \
- if (WARN_ON_ONCE(!memslot->npages)) { \
- } else
-
-void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
+void kvm_destroy_vcpus(struct kvm *kvm);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
@@ -605,7 +1001,7 @@ static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
}
#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
@@ -618,17 +1014,18 @@ static inline void kvm_irqfd_exit(void)
{
}
#endif
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- struct module *module);
+int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
+bool kvm_get_kvm_safe(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
+bool file_is_kvm(struct file *file);
void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
+ as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock) ||
!refcount_read(&kvm->users_count));
@@ -646,21 +1043,126 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
return __kvm_memslots(vcpu->kvm, as_id);
}
+static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
+{
+ return RB_EMPTY_ROOT(&slots->gfn_tree);
+}
+
+bool kvm_are_all_memslots_empty(struct kvm *kvm);
+
+#define kvm_for_each_memslot(memslot, bkt, slots) \
+ hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
+ if (WARN_ON_ONCE(!memslot->npages)) { \
+ } else
+
static inline
struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
- int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
+ int idx = slots->node_idx;
- if (index < 0)
- return NULL;
+ hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
+ if (slot->id == id)
+ return slot;
+ }
+
+ return NULL;
+}
+
+/* Iterator used for walking memslots that overlap a gfn range. */
+struct kvm_memslot_iter {
+ struct kvm_memslots *slots;
+ struct rb_node *node;
+ struct kvm_memory_slot *slot;
+};
+
+static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
+{
+ iter->node = rb_next(iter->node);
+ if (!iter->node)
+ return;
- slot = &slots->memslots[index];
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
+}
+
+static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
+ struct kvm_memslots *slots,
+ gfn_t start)
+{
+ int idx = slots->node_idx;
+ struct rb_node *tmp;
+ struct kvm_memory_slot *slot;
+
+ iter->slots = slots;
+
+ /*
+ * Find the so called "upper bound" of a key - the first node that has
+ * its key strictly greater than the searched one (the start gfn in our case).
+ */
+ iter->node = NULL;
+ for (tmp = slots->gfn_tree.rb_node; tmp; ) {
+ slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
+ if (start < slot->base_gfn) {
+ iter->node = tmp;
+ tmp = tmp->rb_left;
+ } else {
+ tmp = tmp->rb_right;
+ }
+ }
+
+ /*
+ * Find the slot with the lowest gfn that can possibly intersect with
+ * the range, so we'll ideally have slot start <= range start
+ */
+ if (iter->node) {
+ /*
+ * A NULL previous node means that the very first slot
+ * already has a higher start gfn.
+ * In this case slot start > range start.
+ */
+ tmp = rb_prev(iter->node);
+ if (tmp)
+ iter->node = tmp;
+ } else {
+ /* a NULL node below means no slots */
+ iter->node = rb_last(&slots->gfn_tree);
+ }
+
+ if (iter->node) {
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
- WARN_ON(slot->id != id);
- return slot;
+ /*
+ * It is possible in the slot start < range start case that the
+ * found slot ends before or at range start (slot end <= range start)
+ * and so it does not overlap the requested range.
+ *
+ * In such non-overlapping case the next slot (if it exists) will
+ * already have slot start > range start, otherwise the logic above
+ * would have found it instead of the current slot.
+ */
+ if (iter->slot->base_gfn + iter->slot->npages <= start)
+ kvm_memslot_iter_next(iter);
+ }
+}
+
+static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
+{
+ if (!iter->node)
+ return false;
+
+ /*
+ * If this slot starts beyond or at the end of the range so does
+ * every next one
+ */
+ return iter->slot->base_gfn < end;
}
+/* Iterate over each memslot at least partially intersecting [start, end) range */
+#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
+ for (kvm_memslot_iter_start(iter, slots, start); \
+ kvm_memslot_iter_is_valid(iter, end); \
+ kvm_memslot_iter_next(iter))
+
/*
* KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
* - create a new memory slot
@@ -680,17 +1182,16 @@ enum kvm_mr_change {
};
int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region2 *mem);
int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region2 *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
@@ -711,24 +1212,22 @@ unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
bool *writable);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
-void kvm_set_page_accessed(struct page *page);
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
-kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
- bool atomic, bool *async, bool write_fault,
- bool *writable);
+kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
+kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
+kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
+ bool atomic, bool interruptible, bool *async,
+ bool write_fault, bool *writable, hva_t *hva);
void kvm_release_pfn_clean(kvm_pfn_t pfn);
void kvm_release_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
-void kvm_get_pfn(kvm_pfn_t pfn);
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
@@ -749,33 +1248,54 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
-#define __kvm_put_guest(kvm, gfn, offset, value, type) \
+#define __kvm_get_guest(kvm, gfn, offset, v) \
({ \
unsigned long __addr = gfn_to_hva(kvm, gfn); \
- type __user *__uaddr = (type __user *)(__addr + offset); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
int __ret = -EFAULT; \
\
if (!kvm_is_error_hva(__addr)) \
- __ret = put_user(value, __uaddr); \
+ __ret = get_user(v, __uaddr); \
+ __ret; \
+})
+
+#define kvm_get_guest(kvm, gpa, v) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ \
+ __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), v); \
+})
+
+#define __kvm_put_guest(kvm, gfn, offset, v) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = put_user(v, __uaddr); \
if (!__ret) \
mark_page_dirty(kvm, gfn); \
__ret; \
})
-#define kvm_put_guest(kvm, gpa, value, type) \
+#define kvm_put_guest(kvm, gpa, v) \
({ \
gpa_t __gpa = gpa; \
struct kvm *__kvm = kvm; \
+ \
__kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
- offset_in_page(__gpa), (value), type); \
+ offset_in_page(__gpa), v); \
})
-int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
+void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
@@ -783,12 +1303,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache, bool atomic);
-struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -803,35 +1318,137 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+/**
+ * kvm_gpc_init - initialize gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @kvm: pointer to kvm instance.
+ *
+ * This sets up a gfn_to_pfn_cache by initializing locks and assigning the
+ * immutable attributes. Note, the cache must be zero-allocated (or zeroed by
+ * the caller before init).
+ */
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
+
+/**
+ * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
+ * physical address.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @gpa: guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
+ * invalidations to be processed. Callers are required to use kvm_gpc_check()
+ * to ensure that the cache is valid before accessing the target page.
+ */
+int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
+
+/**
+ * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @hva: userspace virtual address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * The semantics of this function are the same as those of kvm_gpc_activate(). It
+ * merely bypasses a layer of address translation.
+ */
+int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
+
+/**
+ * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: %true if the cache is still valid and the address matches.
+ * %false if the cache is not valid.
+ *
+ * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
+ * while calling this function, and then continue to hold the lock until the
+ * access is complete.
+ *
+ * Callers in IN_GUEST_MODE may do so without locking, although they should
+ * still hold a read lock on kvm->scru for the memslot checks.
+ */
+bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
+
+/**
+ * kvm_gpc_refresh - update a previously initialized cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
+ * return from this function does not mean the page can be immediately
+ * accessed because it may have raced with an invalidation. Callers must
+ * still lock and check the cache status, as this function does not return
+ * with the lock still held to permit access.
+ */
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
+
+/**
+ * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This removes a cache from the VM's list to be processed on MMU notifier
+ * invocation.
+ */
+void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
+
+static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
+{
+ return gpc->active && !kvm_is_error_gpa(gpc->gpa);
+}
+
+static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc)
+{
+ return gpc->active && kvm_is_error_gpa(gpc->gpa);
+}
+
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
-void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
+void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
-void kvm_reload_remote_mmus(struct kvm *kvm);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
+void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
+int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif
-bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except,
- unsigned long *vcpu_bitmap, cpumask_var_t tmp);
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
-bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except);
-bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
- unsigned long *vcpu_bitmap);
+void kvm_mmu_invalidate_begin(struct kvm *kvm);
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
+void kvm_mmu_invalidate_end(struct kvm *kvm);
+bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -847,10 +1464,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
unsigned long mask);
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
-#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- struct kvm_memory_slot *memslot);
-#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot);
@@ -860,8 +1474,9 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap);
-long kvm_arch_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg);
+int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
@@ -883,9 +1498,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
-int kvm_arch_init(void *opaque);
-void kvm_arch_exit(void);
-
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
@@ -895,21 +1507,29 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
+#endif
+
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
+#else
+static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#endif
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
-int kvm_arch_hardware_setup(void *opaque);
-void kvm_arch_hardware_unsetup(void);
-int kvm_arch_check_processor_compat(void *opaque);
+#endif
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
+void kvm_arch_create_vm_debugfs(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
@@ -918,20 +1538,39 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm);
*/
static inline struct kvm *kvm_arch_alloc_vm(void)
{
- return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT);
+}
+#endif
+
+static inline void __kvm_arch_free_vm(struct kvm *kvm)
+{
+ kvfree(kvm);
}
+#ifndef __KVM_HAVE_ARCH_VM_FREE
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
- kfree(kvm);
+ __kvm_arch_free_vm(kvm);
}
#endif
-#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
return -ENOTSUPP;
}
+#else
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
+#endif
+
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+ gfn_t gfn, u64 nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+#else
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
#endif
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
@@ -965,7 +1604,7 @@ static inline void kvm_arch_end_assignment(struct kvm *kvm)
{
}
-static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
return false;
}
@@ -980,6 +1619,20 @@ static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
#endif
}
+/*
+ * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
+ * true if the vCPU was blocking and was awakened, false otherwise.
+ */
+static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
+{
+ return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+}
+
+static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
+{
+ return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
+}
+
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
/*
* returns true if the virtual interrupt controller is initialized and
@@ -994,15 +1647,24 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
}
#endif
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+void kvm_unregister_perf_callbacks(void);
+#else
+static inline void kvm_register_perf_callbacks(void *ign) {}
+static inline void kvm_unregister_perf_callbacks(void) {}
+#endif /* CONFIG_GUEST_PERF_EVENTS */
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
-bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
-bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
-bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
+struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn);
+bool kvm_is_zone_device_page(struct page *page);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -1033,55 +1695,91 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
/*
- * search_memslots() and __gfn_to_memslot() are here because they are
- * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
- * gfn_to_memslot() itself isn't here as an inline because that would
- * bloat other code too much.
- *
- * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
+ * Returns a pointer to the memslot if it contains gfn.
+ * Otherwise returns NULL.
*/
static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
- int start = 0, end = slots->used_slots;
- int slot = atomic_read(&slots->lru_slot);
- struct kvm_memory_slot *memslots = slots->memslots;
+ if (!slot)
+ return NULL;
- if (unlikely(!slots->used_slots))
+ if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
+ return slot;
+ else
return NULL;
+}
+
+/*
+ * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
+ *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ */
+static inline struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
+ struct rb_node *node;
+ int idx = slots->node_idx;
+
+ slot = NULL;
+ for (node = slots->gfn_tree.rb_node; node; ) {
+ slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
+ if (gfn >= slot->base_gfn) {
+ if (gfn < slot->base_gfn + slot->npages)
+ return slot;
+ node = node->rb_right;
+ } else
+ node = node->rb_left;
+ }
- if (gfn >= memslots[slot].base_gfn &&
- gfn < memslots[slot].base_gfn + memslots[slot].npages)
- return &memslots[slot];
+ return approx ? slot : NULL;
+}
- while (start < end) {
- slot = start + (end - start) / 2;
+static inline struct kvm_memory_slot *
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
- if (gfn >= memslots[slot].base_gfn)
- end = slot;
- else
- start = slot + 1;
- }
+ slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
+ slot = try_get_memslot(slot, gfn);
+ if (slot)
+ return slot;
- if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
- gfn < memslots[start].base_gfn + memslots[start].npages) {
- atomic_set(&slots->lru_slot, start);
- return &memslots[start];
+ slot = search_memslots(slots, gfn, approx);
+ if (slot) {
+ atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
+ return slot;
}
return NULL;
}
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
- return search_memslots(slots, gfn);
+ return ____gfn_to_memslot(slots, gfn, false);
}
static inline unsigned long
-__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
- return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+ /*
+ * The index was checked originally in search_memslots. To avoid
+ * that a malicious guest builds a Spectre gadget out of e.g. page
+ * table walks, do not let the processor speculate loads outside
+ * the guest's registered memslots.
+ */
+ unsigned long offset = gfn - slot->base_gfn;
+ offset = array_index_nospec(offset, slot->npages);
+ return slot->userspace_addr + offset * PAGE_SIZE;
}
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
@@ -1112,17 +1810,21 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT;
}
-static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
- gpa_t gpa)
+static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
{
- return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
+ unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+
+ return !kvm_is_error_hva(hva);
}
-static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
+static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
{
- unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+ lockdep_assert_held(&gpc->lock);
+
+ if (!gpc->memslot)
+ return;
- return kvm_is_error_hva(hva);
+ mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa));
}
enum kvm_stat_kind {
@@ -1132,47 +1834,261 @@ enum kvm_stat_kind {
struct kvm_stat_data {
struct kvm *kvm;
- struct kvm_stats_debugfs_item *dbgfs_item;
-};
-
-struct kvm_stats_debugfs_item {
- const char *name;
- int offset;
+ const struct _kvm_stats_desc *desc;
enum kvm_stat_kind kind;
- int mode;
};
-#define KVM_DBGFS_GET_MODE(dbgfs_item) \
- ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
+struct _kvm_stats_desc {
+ struct kvm_stats_desc desc;
+ char name[KVM_STATS_NAME_SIZE];
+};
-#define VM_STAT(n, x, ...) \
- { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ }
-#define VCPU_STAT(n, x, ...) \
- { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ }
+#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
+ .flags = type | unit | base | \
+ BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
+ BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
+ BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
+ .exponent = exp, \
+ .size = sz, \
+ .bucket_size = bsz
+
+#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
+#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
+ SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
+
+#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
+ unit, base, exponent, sz, bsz)
+#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
+ unit, base, exponent, sz, 0)
+
+/* Cumulative counter, read/write */
+#define STATS_DESC_COUNTER(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Instantaneous counter, read only */
+#define STATS_DESC_ICOUNTER(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak counter, read/write */
+#define STATS_DESC_PCOUNTER(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Instantaneous boolean value, read only */
+#define STATS_DESC_IBOOLEAN(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak (sticky) boolean value, read/write */
+#define STATS_DESC_PBOOLEAN(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Cumulative time in nanosecond */
+#define STATS_DESC_TIME_NSEC(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9)
+/* Linear histogram for time in nanosecond */
+#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
+ STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz, bsz)
+/* Logarithmic histogram for time in nanosecond */
+#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
+ STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz)
+
+#define KVM_GENERIC_VM_STATS() \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
+
+#define KVM_GENERIC_VCPU_STATS() \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
-extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
+ const struct _kvm_stats_desc *desc,
+ void *stats, size_t size_stats,
+ char __user *user_buffer, size_t size, loff_t *offset);
+
+/**
+ * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the linear histogram's bucket
+ * @bucket_size: the size (width) of a bucket
+ */
+static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
+ u64 value, size_t bucket_size)
+{
+ size_t index = div64_u64(value, bucket_size);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+/**
+ * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the logarithmic histogram's bucket
+ */
+static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
+{
+ size_t index = fls64(value);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
+ kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
+#define KVM_STATS_LOG_HIST_UPDATE(array, value) \
+ kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
+
+
+extern const struct kvm_stats_header kvm_vm_stats_header;
+extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
+extern const struct kvm_stats_header kvm_vcpu_stats_header;
+extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
+
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
+static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
- if (unlikely(kvm->mmu_notifier_count))
+ if (unlikely(kvm->mmu_invalidate_in_progress))
return 1;
/*
- * Ensure the read of mmu_notifier_count happens before the read
- * of mmu_notifier_seq. This interacts with the smp_wmb() in
- * mmu_notifier_invalidate_range_end to make sure that the caller
- * either sees the old (non-zero) value of mmu_notifier_count or
- * the new (incremented) value of mmu_notifier_seq.
- * PowerPC Book3s HV KVM calls this under a per-page lock
- * rather than under kvm->mmu_lock, for scalability, so
- * can't rely on kvm->mmu_lock to keep things ordered.
+ * Ensure the read of mmu_invalidate_in_progress happens before
+ * the read of mmu_invalidate_seq. This interacts with the
+ * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
+ * that the caller either sees the old (non-zero) value of
+ * mmu_invalidate_in_progress or the new (incremented) value of
+ * mmu_invalidate_seq.
+ *
+ * PowerPC Book3s HV KVM calls this under a per-page lock rather
+ * than under kvm->mmu_lock, for scalability, so can't rely on
+ * kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_invalidate_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+
+static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
+ unsigned long mmu_seq,
+ gfn_t gfn)
+{
+ lockdep_assert_held(&kvm->mmu_lock);
+ /*
+ * If mmu_invalidate_in_progress is non-zero, then the range maintained
+ * by kvm_mmu_notifier_invalidate_range_start contains all addresses
+ * that might be being invalidated. Note that it may include some false
+ * positives, due to shortcuts when handing concurrent invalidations.
+ */
+ if (unlikely(kvm->mmu_invalidate_in_progress)) {
+ /*
+ * Dropping mmu_lock after bumping mmu_invalidate_in_progress
+ * but before updating the range is a KVM bug.
+ */
+ if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
+ kvm->mmu_invalidate_range_end == INVALID_GPA))
+ return 1;
+
+ if (gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return 1;
+ }
+
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
+
+/*
+ * This lockless version of the range-based retry check *must* be paired with a
+ * call to the locked version after acquiring mmu_lock, i.e. this is safe to
+ * use only as a pre-check to avoid contending mmu_lock. This version *will*
+ * get false negatives and false positives.
+ */
+static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
+ unsigned long mmu_seq,
+ gfn_t gfn)
+{
+ /*
+ * Use READ_ONCE() to ensure the in-progress flag and sequence counter
+ * are always read from memory, e.g. so that checking for retry in a
+ * loop won't result in an infinite retry loop. Don't force loads for
+ * start+end, as the key to avoiding infinite retry loops is observing
+ * the 1=>0 transition of in-progress, i.e. getting false negatives
+ * due to stale start+end values is acceptable.
+ */
+ if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
+ gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return true;
+
+ return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
+}
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
@@ -1197,14 +2113,15 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
-#ifdef CONFIG_HAVE_KVM_EVENTFD
-
void kvm_eventfd_init(struct kvm *kvm);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
+bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin);
void kvm_irq_routing_update(struct kvm *);
#else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
@@ -1213,35 +2130,18 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
}
static inline void kvm_irqfd_release(struct kvm *kvm) {}
-#endif
-#else
-
-static inline void kvm_eventfd_init(struct kvm *kvm) {}
-
-static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
-{
- return -EINVAL;
-}
-
-static inline void kvm_irqfd_release(struct kvm *kvm) {}
-
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
-static inline void kvm_irq_routing_update(struct kvm *kvm)
-{
-}
-#endif
-
-static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin)
{
- return -ENOSYS;
+ return false;
}
-
-#endif /* CONFIG_HAVE_KVM_EVENTFD */
+#endif /* CONFIG_HAVE_KVM_IRQCHIP */
void kvm_arch_irq_routing_update(struct kvm *kvm);
-static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
* Ensure the rest of the request is published to kvm_check_request's
@@ -1251,6 +2151,19 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
+static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+{
+ /*
+ * Request that don't require vCPU action should never be logged in
+ * vcpu->requests. The vCPU won't clear the request, so it will stay
+ * logged indefinitely and prevent the vCPU from entering the guest.
+ */
+ BUILD_BUG_ON(!__builtin_constant_p(req) ||
+ (req & KVM_REQUEST_NO_ACTION));
+
+ __kvm_make_request(req, vcpu);
+}
+
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
{
return READ_ONCE(vcpu->requests);
@@ -1282,7 +2195,9 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
}
}
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
extern bool kvm_rebooting;
+#endif
extern unsigned int halt_poll_ns;
extern unsigned int halt_poll_ns_grow;
@@ -1340,8 +2255,6 @@ struct kvm_device_ops {
int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};
-void kvm_device_get(struct kvm_device *dev);
-void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
@@ -1391,6 +2304,8 @@ void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
+ struct kvm_kernel_irq_routing_entry *);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
@@ -1429,8 +2344,7 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
@@ -1455,4 +2369,80 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+/*
+ * If more than one page is being (un)accounted, @virt must be the address of
+ * the first page of a block of pages what were allocated together (i.e
+ * accounted together).
+ *
+ * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
+ * is thread-safe.
+ */
+static inline void kvm_account_pgtable_pages(void *virt, int nr)
+{
+ mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
+}
+
+/*
+ * This defines how many reserved entries we want to keep before we
+ * kick the vcpu to the userspace to avoid dirty ring full. This
+ * value can be tuned to higher if e.g. PML is enabled on the host.
+ */
+#define KVM_DIRTY_RING_RSVD_ENTRIES 64
+
+/* Max number of entries allowed for each kvm dirty ring */
+#define KVM_DIRTY_RING_MAX_ENTRIES 65536
+
+static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
+ gpa_t gpa, gpa_t size,
+ bool is_write, bool is_exec,
+ bool is_private)
+{
+ vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
+ vcpu->run->memory_fault.gpa = gpa;
+ vcpu->run->memory_fault.size = size;
+
+ /* RWX flags are not (yet) defined or communicated to userspace. */
+ vcpu->run->memory_fault.flags = 0;
+ if (is_private)
+ vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
+}
+
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
+{
+ return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
+}
+
+bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ unsigned long attrs);
+bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) &&
+ kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
+}
+#else
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return false;
+}
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
+#ifdef CONFIG_KVM_PRIVATE_MEM
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+#else
+static inline int kvm_gmem_get_pfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ kvm_pfn_t *pfn, int *max_order)
+{
+ KVM_BUG_ON(1, kvm);
+ return -EIO;
+}
+#endif /* CONFIG_KVM_PRIVATE_MEM */
+
#endif
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index dac047abdba7..8ad43692e3bb 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -31,7 +31,7 @@ struct kvm_kernel_irqfd_resampler {
/*
* Entry in list of kvm->irqfd.resampler_list. Use for sharing
* resamplers among irqfds on the same gsi.
- * Accessed and modified under kvm->irqfds.resampler_lock
+ * RCU list modified under kvm->irqfds.resampler_lock
*/
struct list_head link;
};
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index a7580f69dda0..d93f6522b2c3 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -6,6 +6,7 @@
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
+struct kvm_gfn_range;
struct kvm_interrupt;
struct kvm_irq_routing_table;
struct kvm_memory_slot;
@@ -18,7 +19,10 @@ struct kvm_memslots;
enum kvm_mr_change;
+#include <linux/bits.h>
+#include <linux/mutex.h>
#include <linux/types.h>
+#include <linux/spinlock_types.h>
#include <asm/kvm_types.h>
@@ -37,7 +41,7 @@ typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
-#define GPA_INVALID (~(gpa_t)0)
+#define INVALID_GPA (~(gpa_t)0)
typedef unsigned long hva_t;
typedef u64 hpa_t;
@@ -55,9 +59,17 @@ struct gfn_to_hva_cache {
struct gfn_to_pfn_cache {
u64 generation;
- gfn_t gfn;
+ gpa_t gpa;
+ unsigned long uhva;
+ struct kvm_memory_slot *memslot;
+ struct kvm *kvm;
+ struct list_head list;
+ rwlock_t lock;
+ struct mutex refresh_lock;
+ void *khva;
kvm_pfn_t pfn;
- bool dirty;
+ bool active;
+ bool valid;
};
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
@@ -67,14 +79,41 @@ struct gfn_to_pfn_cache {
* MMU flows is problematic, as is triggering reclaim, I/O, etc... while
* holding MMU locks. Note, these caches act more like prefetch buffers than
* classical caches, i.e. objects are not returned to the cache on being freed.
+ *
+ * The @capacity field and @objects array are lazily initialized when the cache
+ * is topped up (__kvm_mmu_topup_memory_cache()).
*/
struct kvm_mmu_memory_cache {
- int nobjs;
gfp_t gfp_zero;
+ gfp_t gfp_custom;
struct kmem_cache *kmem_cache;
- void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
+ int capacity;
+ int nobjs;
+ void **objects;
};
#endif
+#define HALT_POLL_HIST_COUNT 32
+
+struct kvm_vm_stat_generic {
+ u64 remote_tlb_flush;
+ u64 remote_tlb_flush_requests;
+};
+
+struct kvm_vcpu_stat_generic {
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 halt_poll_success_ns;
+ u64 halt_poll_fail_ns;
+ u64 halt_wait_ns;
+ u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
+ u64 blocking;
+};
+
+#define KVM_STATS_NAME_SIZE 48
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/lapb.h b/include/linux/lapb.h
index eb56472f23b2..b5333f9413dc 100644
--- a/include/linux/lapb.h
+++ b/include/linux/lapb.h
@@ -6,6 +6,11 @@
#ifndef LAPB_KERNEL_H
#define LAPB_KERNEL_H
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct net_device;
+
#define LAPB_OK 0
#define LAPB_BADTOKEN 1
#define LAPB_INVALUE 2
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index abe3d95f795b..84f1053cf2a8 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -38,9 +38,6 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
void clear_tsk_latency_tracing(struct task_struct *p);
-int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-
#else
static inline void
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 21a3358a1731..36df927ec4b7 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -98,12 +98,6 @@ int led_classdev_flash_register_ext(struct device *parent,
struct led_classdev_flash *fled_cdev,
struct led_init_data *init_data);
-static inline int led_classdev_flash_register(struct device *parent,
- struct led_classdev_flash *fled_cdev)
-{
- return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
-}
-
/**
* led_classdev_flash_unregister - unregisters an object of led_classdev class
* with support for flash LEDs
@@ -118,15 +112,21 @@ int devm_led_classdev_flash_register_ext(struct device *parent,
struct led_init_data *init_data);
+void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
+
+static inline int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
+
static inline int devm_led_classdev_flash_register(struct device *parent,
struct led_classdev_flash *fled_cdev)
{
return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL);
}
-void devm_led_classdev_flash_unregister(struct device *parent,
- struct led_classdev_flash *fled_cdev);
-
/**
* led_set_flash_strobe - setup flash strobe
* @fled_cdev: the flash LED to set strobe on
diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h
index 5116f9a866cc..db9f34c6736e 100644
--- a/include/linux/led-class-multicolor.h
+++ b/include/linux/led-class-multicolor.h
@@ -30,7 +30,6 @@ static inline struct led_classdev_mc *lcdev_to_mccdev(
return container_of(led_cdev, struct led_classdev_mc, led_cdev);
}
-#if IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR)
/**
* led_classdev_multicolor_register_ext - register a new object of led_classdev
* class with support for multicolor LEDs
@@ -44,12 +43,6 @@ int led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data);
-static inline int led_classdev_multicolor_register(struct device *parent,
- struct led_classdev_mc *mcled_cdev)
-{
- return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
-}
-
/**
* led_classdev_multicolor_unregister - unregisters an object of led_classdev
* class with support for multicolor LEDs
@@ -68,23 +61,8 @@ int devm_led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data);
-static inline int devm_led_classdev_multicolor_register(struct device *parent,
- struct led_classdev_mc *mcled_cdev)
-{
- return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev,
- NULL);
-}
-
void devm_led_classdev_multicolor_unregister(struct device *parent,
struct led_classdev_mc *mcled_cdev);
-#else
-
-static inline int led_classdev_multicolor_register_ext(struct device *parent,
- struct led_classdev_mc *mcled_cdev,
- struct led_init_data *init_data)
-{
- return -EINVAL;
-}
static inline int led_classdev_multicolor_register(struct device *parent,
struct led_classdev_mc *mcled_cdev)
@@ -92,20 +70,6 @@ static inline int led_classdev_multicolor_register(struct device *parent,
return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
}
-static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {};
-static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
- enum led_brightness brightness)
-{
- return -EINVAL;
-}
-
-static inline int devm_led_classdev_multicolor_register_ext(struct device *parent,
- struct led_classdev_mc *mcled_cdev,
- struct led_init_data *init_data)
-{
- return -EINVAL;
-}
-
static inline int devm_led_classdev_multicolor_register(struct device *parent,
struct led_classdev_mc *mcled_cdev)
{
@@ -113,9 +77,4 @@ static inline int devm_led_classdev_multicolor_register(struct device *parent,
NULL);
}
-static inline void devm_led_classdev_multicolor_unregister(struct device *parent,
- struct led_classdev_mc *mcled_cdev)
-{};
-
-#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */
#endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */
diff --git a/include/linux/leds-expresswire.h b/include/linux/leds-expresswire.h
new file mode 100644
index 000000000000..a422921f4159
--- /dev/null
+++ b/include/linux/leds-expresswire.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Shared library for Kinetic's ExpressWire protocol.
+ * This protocol works by pulsing the ExpressWire IC's control GPIO.
+ * ktd2692 and ktd2801 are known to use this protocol.
+ */
+
+#ifndef _LEDS_EXPRESSWIRE_H
+#define _LEDS_EXPRESSWIRE_H
+
+#include <linux/types.h>
+
+struct gpio_desc;
+
+struct expresswire_timing {
+ unsigned long poweroff_us;
+ unsigned long detect_delay_us;
+ unsigned long detect_us;
+ unsigned long data_start_us;
+ unsigned long end_of_data_low_us;
+ unsigned long end_of_data_high_us;
+ unsigned long short_bitset_us;
+ unsigned long long_bitset_us;
+};
+
+struct expresswire_common_props {
+ struct gpio_desc *ctrl_gpio;
+ struct expresswire_timing timing;
+};
+
+void expresswire_power_off(struct expresswire_common_props *props);
+void expresswire_enable(struct expresswire_common_props *props);
+void expresswire_start(struct expresswire_common_props *props);
+void expresswire_end(struct expresswire_common_props *props);
+void expresswire_set_bit(struct expresswire_common_props *props, bool bit);
+void expresswire_write_u8(struct expresswire_common_props *props, u8 val);
+
+#endif /* _LEDS_EXPRESSWIRE_H */
diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h
deleted file mode 100644
index 50d330ed1100..000000000000
--- a/include/linux/leds-tca6507.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TCA6507 LED chip driver.
- *
- * Copyright (C) 2011 Neil Brown <neil@brown.name>
- */
-
-#ifndef __LINUX_TCA6507_H
-#define __LINUX_TCA6507_H
-#include <linux/leds.h>
-
-struct tca6507_platform_data {
- struct led_platform_data leds;
-#ifdef CONFIG_GPIOLIB
- int gpio_base;
- void (*setup)(unsigned gpio_base, unsigned ngpio);
-#endif
-};
-
-#define TCA6507_MAKE_GPIO 1
-#endif /* __LINUX_TCA6507_H*/
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6a8d6409c993..db6b114bb3d9 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -10,17 +10,21 @@
#include <dt-bindings/leds/common.h>
#include <linux/device.h>
-#include <linux/kernfs.h>
-#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
-struct device;
-struct led_pattern;
+struct attribute_group;
struct device_node;
+struct fwnode_handle;
+struct gpio_desc;
+struct kernfs_node;
+struct led_pattern;
+struct platform_device;
+
/*
* LED Core
*/
@@ -33,6 +37,27 @@ enum led_brightness {
LED_FULL = 255,
};
+enum led_default_state {
+ LEDS_DEFSTATE_OFF = 0,
+ LEDS_DEFSTATE_ON = 1,
+ LEDS_DEFSTATE_KEEP = 2,
+};
+
+/**
+ * struct led_lookup_data - represents a single LED lookup entry
+ *
+ * @list: internal list of all LED lookup entries
+ * @provider: name of led_classdev providing the LED
+ * @dev_id: name of the device associated with this LED
+ * @con_id: name of the LED from the device's point of view
+ */
+struct led_lookup_data {
+ struct list_head list;
+ const char *provider;
+ const char *dev_id;
+ const char *con_id;
+};
+
struct led_init_data {
/* device fwnode handle */
struct fwnode_handle *fwnode;
@@ -57,14 +82,17 @@ struct led_init_data {
bool devname_mandatory;
};
+enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode);
+
struct led_hw_trigger_type {
int dummy;
};
struct led_classdev {
const char *name;
- enum led_brightness brightness;
- enum led_brightness max_brightness;
+ unsigned int brightness;
+ unsigned int max_brightness;
+ unsigned int color;
int flags;
/* Lower 16 bits reflect status */
@@ -89,6 +117,10 @@ struct led_classdev {
#define LED_BLINK_INVERT 3
#define LED_BLINK_BRIGHTNESS_CHANGE 4
#define LED_BLINK_DISABLE 5
+ /* Brightness off also disables hw-blinking so it is a separate action */
+#define LED_SET_BRIGHTNESS_OFF 6
+#define LED_SET_BRIGHTNESS 7
+#define LED_SET_BLINK 8
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
@@ -112,6 +144,10 @@ struct led_classdev {
* match the values specified exactly.
* Deactivate blinking again when the brightness is set to LED_OFF
* via the brightness_set() callback.
+ * For led_blink_set_nosleep() the LED core assumes that blink_set
+ * implementations, of drivers which do not use brightness_set_blocking,
+ * will not sleep. Therefor if brightness_set_blocking is not set
+ * this function must not sleep!
*/
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
@@ -135,6 +171,8 @@ struct led_classdev {
struct work_struct set_brightness_work;
int delayed_set_value;
+ unsigned long delayed_delay_on;
+ unsigned long delayed_delay_off;
#ifdef CONFIG_LEDS_TRIGGERS
/* Protects the trigger data below */
@@ -148,6 +186,49 @@ struct led_classdev {
/* LEDs that have private triggers have this set */
struct led_hw_trigger_type *trigger_type;
+
+ /* Unique trigger name supported by LED set in hw control mode */
+ const char *hw_control_trigger;
+ /*
+ * Check if the LED driver supports the requested mode provided by the
+ * defined supported trigger to setup the LED to hw control mode.
+ *
+ * Return 0 on success. Return -EOPNOTSUPP when the passed flags are not
+ * supported and software fallback needs to be used.
+ * Return a negative error number on any other case for check fail due
+ * to various reason like device not ready or timeouts.
+ */
+ int (*hw_control_is_supported)(struct led_classdev *led_cdev,
+ unsigned long flags);
+ /*
+ * Activate hardware control, LED driver will use the provided flags
+ * from the supported trigger and setup the LED to be driven by hardware
+ * following the requested mode from the trigger flags.
+ * Deactivate hardware blink control by setting brightness to LED_OFF via
+ * the brightness_set() callback.
+ *
+ * Return 0 on success, a negative error number on flags apply fail.
+ */
+ int (*hw_control_set)(struct led_classdev *led_cdev,
+ unsigned long flags);
+ /*
+ * Get from the LED driver the current mode that the LED is set in hw
+ * control mode and put them in flags.
+ * Trigger can use this to get the initial state of a LED already set in
+ * hardware blink control.
+ *
+ * Return 0 on success, a negative error number on failing parsing the
+ * initial mode. Error from this function is NOT FATAL as the device
+ * may be in a not supported initial state by the attached LED trigger.
+ */
+ int (*hw_control_get)(struct led_classdev *led_cdev,
+ unsigned long *flags);
+ /*
+ * Get the device this LED blinks in response to.
+ * e.g. for a PHY LED, it is the network device. If the LED is
+ * not yet associated to a device, return NULL.
+ */
+ struct device *(*hw_control_get_device)(struct led_classdev *led_cdev);
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
@@ -193,7 +274,6 @@ static inline int led_classdev_register(struct device *parent,
int devm_led_classdev_register_ext(struct device *parent,
struct led_classdev *led_cdev,
struct led_init_data *init_data);
-
static inline int devm_led_classdev_register(struct device *parent,
struct led_classdev *led_cdev)
{
@@ -205,10 +285,18 @@ void devm_led_classdev_unregister(struct device *parent,
void led_classdev_suspend(struct led_classdev *led_cdev);
void led_classdev_resume(struct led_classdev *led_cdev);
+void led_add_lookup(struct led_lookup_data *led_lookup);
+void led_remove_lookup(struct led_lookup_data *led_lookup);
+
+struct led_classdev *__must_check led_get(struct device *dev, char *con_id);
+struct led_classdev *__must_check devm_led_get(struct device *dev, char *con_id);
+
extern struct led_classdev *of_led_get(struct device_node *np, int index);
extern void led_put(struct led_classdev *led_cdev);
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index);
+struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
+ int index);
/**
* led_blink_set - set blinking with software fallback
@@ -221,12 +309,27 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
* software blinking if there is no hardware blinking or if
* the LED refuses the passed values.
*
+ * This function may sleep!
+ *
* Note that if software blinking is active, simply calling
* led_cdev->brightness_set() will not stop the blinking,
- * use led_classdev_brightness_set() instead.
+ * use led_set_brightness() instead.
*/
void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
unsigned long *delay_off);
+
+/**
+ * led_blink_set_nosleep - set blinking, guaranteed to not sleep
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ *
+ * This function makes the LED blink and is guaranteed to not sleep. Otherwise
+ * this is the same as led_blink_set(), see led_blink_set() for details.
+ */
+void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on,
+ unsigned long delay_off);
+
/**
* led_blink_set_oneshot - do a oneshot software blink
* @led_cdev: the LED to start blinking
@@ -240,6 +343,8 @@ void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
*
* If invert is set, led blinks for delay_off first, then for
* delay_on and leave the led on after the on-off cycle.
+ *
+ * This function is guaranteed not to sleep.
*/
void led_blink_set_oneshot(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off,
@@ -253,8 +358,7 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
* software blink timer that implements blinking when the
* hardware doesn't. This function is guaranteed not to sleep.
*/
-void led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness);
+void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness);
/**
* led_set_brightness_sync - set LED brightness synchronously
@@ -267,8 +371,7 @@ void led_set_brightness(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-int led_set_brightness_sync(struct led_classdev *led_cdev,
- enum led_brightness value);
+int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value);
/**
* led_update_brightness - update LED brightness
@@ -356,7 +459,7 @@ struct led_trigger {
struct led_hw_trigger_type *trigger_type;
/* LEDs under control by this trigger (for simple triggers) */
- rwlock_t leddev_list_lock;
+ spinlock_t leddev_list_lock;
struct list_head led_cdevs;
/* Link to next registered trigger */
@@ -384,11 +487,11 @@ void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
void led_trigger_unregister_simple(struct led_trigger *trigger);
void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
-void led_trigger_blink(struct led_trigger *trigger, unsigned long *delay_on,
- unsigned long *delay_off);
+void led_trigger_blink(struct led_trigger *trigger, unsigned long delay_on,
+ unsigned long delay_off);
void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert);
void led_trigger_set_default(struct led_classdev *led_cdev);
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger);
@@ -405,23 +508,6 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
return led_cdev->trigger_data;
}
-/**
- * led_trigger_rename_static - rename a trigger
- * @name: the new trigger name
- * @trig: the LED trigger to rename
- *
- * Change a LED trigger name by copying the string passed in
- * name into current trigger name, which MUST be large
- * enough for the new string.
- *
- * Note that name must NOT point to the same string used
- * during LED registration, as that could lead to races.
- *
- * This is meant to be used on triggers with statically
- * allocated name.
- */
-void led_trigger_rename_static(const char *name, struct led_trigger *trig);
-
#define module_led_trigger(__led_trigger) \
module_driver(__led_trigger, led_trigger_register, \
led_trigger_unregister)
@@ -438,11 +524,11 @@ static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
static inline void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off) {}
+ unsigned long delay_on,
+ unsigned long delay_off) {}
static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
static inline int led_trigger_set(struct led_classdev *led_cdev,
@@ -460,6 +546,24 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
#endif /* CONFIG_LEDS_TRIGGERS */
+/* Trigger specific enum */
+enum led_trigger_netdev_modes {
+ TRIGGER_NETDEV_LINK = 0,
+ TRIGGER_NETDEV_LINK_10,
+ TRIGGER_NETDEV_LINK_100,
+ TRIGGER_NETDEV_LINK_1000,
+ TRIGGER_NETDEV_LINK_2500,
+ TRIGGER_NETDEV_LINK_5000,
+ TRIGGER_NETDEV_LINK_10000,
+ TRIGGER_NETDEV_HALF_DUPLEX,
+ TRIGGER_NETDEV_FULL_DUPLEX,
+ TRIGGER_NETDEV_TX,
+ TRIGGER_NETDEV_RX,
+
+ /* Keep last */
+ __TRIGGER_NETDEV_MAX,
+};
+
/* Trigger specific functions */
#ifdef CONFIG_LEDS_TRIGGER_DISK
void ledtrig_disk_activity(bool write);
@@ -504,7 +608,6 @@ struct led_properties {
const char *label;
};
-struct gpio_desc;
typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state,
unsigned long *delay_on,
unsigned long *delay_off);
@@ -522,9 +625,9 @@ struct gpio_led {
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
struct gpio_desc *gpiod;
};
-#define LEDS_GPIO_DEFSTATE_OFF 0
-#define LEDS_GPIO_DEFSTATE_ON 1
-#define LEDS_GPIO_DEFSTATE_KEEP 2
+#define LEDS_GPIO_DEFSTATE_OFF LEDS_DEFSTATE_OFF
+#define LEDS_GPIO_DEFSTATE_ON LEDS_DEFSTATE_ON
+#define LEDS_GPIO_DEFSTATE_KEEP LEDS_DEFSTATE_KEEP
struct gpio_led_platform_data {
int num_leds;
@@ -536,7 +639,7 @@ struct gpio_led_platform_data {
gpio_blink_set_t gpio_blink_set;
};
-#ifdef CONFIG_NEW_LEDS
+#ifdef CONFIG_LEDS_GPIO_REGISTER
struct platform_device *gpio_led_register_device(
int id, const struct gpio_led_platform_data *pdata);
#else
@@ -565,7 +668,7 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
void led_classdev_notify_brightness_hw_changed(
- struct led_classdev *led_cdev, enum led_brightness brightness);
+ struct led_classdev *led_cdev, unsigned int brightness);
#else
static inline void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness) { }
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5f550eb27f81..26d68115afb8 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -39,25 +39,9 @@
* compile-time options: to be removed as soon as all the drivers are
* converted to the new debugging mechanism
*/
-#undef ATA_DEBUG /* debugging output */
-#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
-#undef ATA_NDEBUG /* define to disable quick runtime checks */
-/* note: prints function name for you */
-#ifdef ATA_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef ATA_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* ATA_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* ATA_DEBUG */
-
#define ata_print_version_once(dev, version) \
({ \
static bool __print_once; \
@@ -68,38 +52,6 @@
} \
})
-/* NEW: debug levels */
-#define HAVE_LIBATA_MSG 1
-
-enum {
- ATA_MSG_DRV = 0x0001,
- ATA_MSG_INFO = 0x0002,
- ATA_MSG_PROBE = 0x0004,
- ATA_MSG_WARN = 0x0008,
- ATA_MSG_MALLOC = 0x0010,
- ATA_MSG_CTL = 0x0020,
- ATA_MSG_INTR = 0x0040,
- ATA_MSG_ERR = 0x0080,
-};
-
-#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
-#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
-#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
-#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
-#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
-#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
-#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
-#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
-
-static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
-{
- if (dval < 0 || dval >= (sizeof(u32) * 8))
- return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
- if (!dval)
- return 0;
- return (1 << dval) - 1;
-}
-
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
@@ -138,29 +90,36 @@ enum {
ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
ATA_DFLAG_AN = (1 << 7), /* AN configured */
ATA_DFLAG_TRUSTED = (1 << 8), /* device supports trusted send/recv */
+ ATA_DFLAG_FUA = (1 << 9), /* device supports FUA */
ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
- ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
-
- ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
- ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
- ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
- ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
- ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
- ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
- ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
- ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */
- ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */
- ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
-
+ ATA_DFLAG_NCQ_SEND_RECV = (1 << 11), /* device supports NCQ SEND and RECV */
+ ATA_DFLAG_NCQ_PRIO = (1 << 12), /* device supports NCQ priority */
+ ATA_DFLAG_CDL = (1 << 13), /* supports cmd duration limits */
+ ATA_DFLAG_CFG_MASK = (1 << 14) - 1,
+
+ ATA_DFLAG_PIO = (1 << 14), /* device limited to PIO mode */
+ ATA_DFLAG_NCQ_OFF = (1 << 15), /* device limited to non-NCQ mode */
+ ATA_DFLAG_SLEEPING = (1 << 16), /* device is sleeping */
+ ATA_DFLAG_DUBIOUS_XFER = (1 << 17), /* data transfer not verified */
+ ATA_DFLAG_NO_UNLOAD = (1 << 18), /* device doesn't support unload */
+ ATA_DFLAG_UNLOCK_HPA = (1 << 19), /* unlock HPA */
+ ATA_DFLAG_INIT_MASK = (1 << 20) - 1,
+
+ ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 20), /* Priority cmds sent to dev */
+ ATA_DFLAG_CDL_ENABLED = (1 << 21), /* cmd duration limits is enabled */
ATA_DFLAG_DETACH = (1 << 24),
ATA_DFLAG_DETACHED = (1 << 25),
-
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */
ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */
+ ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
+ ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
+ ATA_DFLAG_NCQ_PRIO | ATA_DFLAG_FUA | \
+ ATA_DFLAG_CDL),
+
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
@@ -187,7 +146,7 @@ enum {
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
- ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */
+ ATA_LFLAG_NO_DEBOUNCE_DELAY = (1 << 11), /* no debounce delay on link resume */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -233,6 +192,7 @@ enum {
ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
+ ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
@@ -244,15 +204,18 @@ enum {
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
+ ATA_QCFLAG_RTF_FILLED = (1 << 2), /* result TF has been filled */
ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */
ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */
+ ATA_QCFLAG_HAS_CDL = (1 << 8), /* qc has CDL a descriptor set */
- ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
+ ATA_QCFLAG_EH = (1 << 16), /* cmd aborted and owned by EH */
ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
+ ATA_QCFLAG_EH_SUCCESS_CMD = (1 << 19), /* EH should fetch sense for this successful cmd */
/* host set flags */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
@@ -260,6 +223,10 @@ enum {
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
+ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
+ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
+
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
@@ -293,7 +260,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that
@@ -319,7 +286,7 @@ enum {
PORT_DISABLED = 2,
/* encoding various smaller bitmaps into a single
- * unsigned long bitmap
+ * unsigned int bitmap
*/
ATA_NR_PIO_MODES = 7,
ATA_NR_MWDMA_MODES = 5,
@@ -351,8 +318,11 @@ enum {
ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
+ ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */
+ ATA_EH_SET_ACTIVE = (1 << 7), /* Set a device to active power mode */
- ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
+ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK |
+ ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK,
@@ -380,7 +350,6 @@ enum {
ATA_LINK_RESUME_TRIES = 5,
/* how hard are we gonna try to probe/recover devices */
- ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
ATA_EH_PMP_TRIES = 5,
ATA_EH_PMP_LINK_TRIES = 3,
@@ -390,7 +359,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
@@ -422,6 +391,10 @@ enum {
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
+ ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
+ ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
+ ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
+ ATA_HORKAGE_NO_FUA = (1 << 30), /* Do not use FUA */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -467,12 +440,9 @@ enum {
};
enum ata_xfer_mask {
- ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1)
- << ATA_SHIFT_PIO,
- ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1)
- << ATA_SHIFT_MWDMA,
- ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1)
- << ATA_SHIFT_UDMA,
+ ATA_MASK_PIO = ((1U << ATA_NR_PIO_MODES) - 1) << ATA_SHIFT_PIO,
+ ATA_MASK_MWDMA = ((1U << ATA_NR_MWDMA_MODES) - 1) << ATA_SHIFT_MWDMA,
+ ATA_MASK_UDMA = ((1U << ATA_NR_UDMA_MODES) - 1) << ATA_SHIFT_UDMA,
};
enum hsm_task_states {
@@ -501,7 +471,7 @@ enum ata_completion_errors {
/*
* Link power management policy: If you alter this, you also need to
- * alter libata-scsi.c (for the ascii descriptions)
+ * alter libata-sata.c (for the ascii descriptions)
*/
enum ata_lpm_policy {
ATA_LPM_UNKNOWN,
@@ -535,6 +505,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes)
extern struct device_attribute dev_attr_unload_heads;
#ifdef CONFIG_SATA_HOST
extern struct device_attribute dev_attr_link_power_management_policy;
+extern struct device_attribute dev_attr_ncq_prio_supported;
extern struct device_attribute dev_attr_ncq_prio_enable;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
@@ -559,7 +530,10 @@ struct ata_taskfile {
u8 hob_lbam;
u8 hob_lbah;
- u8 feature;
+ union {
+ u8 error;
+ u8 feature;
+ };
u8 nsect;
u8 lbal;
u8 lbam;
@@ -567,7 +541,10 @@ struct ata_taskfile {
u8 device;
- u8 command; /* IO operation */
+ union {
+ u8 status;
+ u8 command;
+ };
u32 auxiliary; /* auxiliary field */
/* from SATA 3.1 and */
@@ -670,6 +647,18 @@ struct ata_ering {
struct ata_ering_entry ring[ATA_ERING_SIZE];
};
+struct ata_cpr {
+ u8 num;
+ u8 num_storage_elements;
+ u64 start_lba;
+ u64 num_lbas;
+};
+
+struct ata_cpr_log {
+ u8 nr_cpr;
+ struct ata_cpr cpr[] __counted_by(nr_cpr);
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
@@ -702,9 +691,9 @@ struct ata_device {
unsigned int cdb_len;
/* per-dev xfer mask */
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
/* for CHS addressing */
u16 cylinders; /* Number of cylinders */
@@ -729,6 +718,12 @@ struct ata_device {
u32 zac_zones_optimal_nonseq;
u32 zac_zones_max_open;
+ /* Concurrent positioning ranges */
+ struct ata_cpr_log *cpr_log;
+
+ /* Command Duration Limits log support */
+ u8 cdl[ATA_LOG_CDL_SIZE];
+
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
@@ -839,10 +834,8 @@ struct ata_port {
unsigned int cbl; /* cable type; ATA_CBL_xxx */
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1];
- unsigned long sas_tag_allocated; /* for sas tag allocation only */
u64 qc_active;
int nr_active_links; /* #links with active qcs */
- unsigned int sas_last_tag; /* track next tag hw expects */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -858,11 +851,10 @@ struct ata_port {
struct mutex scsi_scan_mutex;
struct delayed_work hotplug_task;
- struct work_struct scsi_rescan_task;
+ struct delayed_work scsi_rescan_task;
unsigned int hsm_task_state;
- u32 msg_enable;
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
int eh_tries;
@@ -872,7 +864,7 @@ struct ata_port {
enum ata_lpm_policy target_lpm_policy;
struct timer_list fastdrain_timer;
- unsigned long fastdrain_cnt;
+ unsigned int fastdrain_cnt;
async_cookie_t cookie;
@@ -883,6 +875,7 @@ struct ata_port {
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
#endif
/* owned by EH */
+ u8 *ncq_sense_buf;
u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
@@ -901,17 +894,19 @@ struct ata_port_operations {
int (*check_atapi_dma)(struct ata_queued_cmd *qc);
enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
- bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_ncq_fill_rtf)(struct ata_port *ap, u64 done_mask);
/*
* Configuration and exception handling
*/
int (*cable_detect)(struct ata_port *ap);
- unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
+ unsigned int (*mode_filter)(struct ata_device *dev, unsigned int xfer_mask);
void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
- unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
+ unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf,
+ __le16 *id);
void (*dev_config)(struct ata_device *dev);
@@ -987,12 +982,6 @@ struct ata_port_operations {
ssize_t size);
/*
- * Obsolete
- */
- void (*phy_reset)(struct ata_port *ap);
- void (*eng_timeout)(struct ata_port *ap);
-
- /*
* ->inherits must be the last field and all the preceding
* fields must be pointers.
*/
@@ -1002,9 +991,9 @@ struct ata_port_operations {
struct ata_port_info {
unsigned long flags;
unsigned long link_flags;
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
struct ata_port_operations *port_ops;
void *private_data;
};
@@ -1067,6 +1056,11 @@ static inline int ata_port_is_dummy(struct ata_port *ap)
return ap->ops == &ata_dummy_port_ops;
}
+static inline bool ata_port_is_frozen(const struct ata_port *ap)
+{
+ return ap->pflags & ATA_PFLAG_FROZEN;
+}
+
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
@@ -1081,10 +1075,10 @@ extern void ata_host_get(struct ata_host *host);
extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern int ata_host_activate(struct ata_host *host, int irq,
irq_handler_t irq_handler, unsigned long irq_flags,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern void ata_host_detach(struct ata_host *host);
extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
extern int ata_scsi_detect(struct scsi_host_template *sht);
@@ -1106,7 +1100,7 @@ extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
extern bool ata_link_online(struct ata_link *link);
extern bool ata_link_offline(struct ata_link *link);
#ifdef CONFIG_PM
-extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
+extern void ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
extern void ata_sas_port_suspend(struct ata_port *ap);
extern void ata_sas_port_resume(struct ata_port *ap);
@@ -1121,30 +1115,34 @@ static inline void ata_sas_port_resume(struct ata_port *ap)
extern int ata_ratelimit(void);
extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
- u32 val, unsigned long interval, unsigned long timeout);
+ u32 val, unsigned int interval, unsigned int timeout);
extern int atapi_cmd_type(u8 opcode);
-extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
- unsigned long mwdma_mask, unsigned long udma_mask);
-extern void ata_unpack_xfermask(unsigned long xfer_mask,
- unsigned long *pio_mask, unsigned long *mwdma_mask,
- unsigned long *udma_mask);
-extern u8 ata_xfer_mask2mode(unsigned long xfer_mask);
-extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
-extern int ata_xfer_mode2shift(unsigned long xfer_mode);
-extern const char *ata_mode_string(unsigned long xfer_mask);
-extern unsigned long ata_id_xfermask(const u16 *id);
+extern unsigned int ata_pack_xfermask(unsigned int pio_mask,
+ unsigned int mwdma_mask,
+ unsigned int udma_mask);
+extern void ata_unpack_xfermask(unsigned int xfer_mask,
+ unsigned int *pio_mask,
+ unsigned int *mwdma_mask,
+ unsigned int *udma_mask);
+extern u8 ata_xfer_mask2mode(unsigned int xfer_mask);
+extern unsigned int ata_xfer_mode2mask(u8 xfer_mode);
+extern int ata_xfer_mode2shift(u8 xfer_mode);
+extern const char *ata_mode_string(unsigned int xfer_mask);
+extern unsigned int ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern unsigned int ata_port_classify(struct ata_port *ap,
+ const struct ata_taskfile *tf);
extern void ata_dev_disable(struct ata_device *adev);
extern void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id);
+ struct ata_taskfile *tf, __le16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
@@ -1152,12 +1150,13 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
+extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+ int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
@@ -1167,11 +1166,11 @@ extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *
* SATA specific code - drivers/ata/libata-sata.c
*/
#ifdef CONFIG_SATA_HOST
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
+extern const unsigned int sata_deb_timing_normal[];
+extern const unsigned int sata_deb_timing_hotplug[];
+extern const unsigned int sata_deb_timing_long[];
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
@@ -1186,13 +1185,14 @@ extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
+ const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
+extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
return NULL;
@@ -1212,7 +1212,7 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
static inline int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing,
+ const unsigned int *timing,
unsigned long deadline,
bool *online,
int (*check_ready)(struct ata_link *))
@@ -1222,28 +1222,27 @@ static inline int sata_link_hardreset(struct ata_link *link,
return -EOPNOTSUPP;
}
static inline int sata_link_resume(struct ata_link *link,
- const unsigned long *params,
+ const unsigned int *params,
unsigned long deadline)
{
return -EOPNOTSUPP;
}
+static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
+{
+ return -EOPNOTSUPP;
+}
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
+ const unsigned int *params, unsigned long deadline);
extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
bool spm_wakeup);
extern int ata_slave_link_init(struct ata_port *ap);
-extern void ata_sas_port_destroy(struct ata_port *);
extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
+extern void ata_port_probe(struct ata_port *ap);
extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
extern void ata_sas_tport_delete(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
@@ -1288,7 +1287,7 @@ extern int ata_pci_device_resume(struct pci_dev *pdev);
struct platform_device;
-extern int ata_platform_remove_one(struct platform_device *pdev);
+extern void ata_platform_remove_one(struct platform_device *pdev);
/*
* ACPI - drivers/ata/libata-acpi.c
@@ -1302,8 +1301,8 @@ static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
}
int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
-unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
- const struct ata_acpi_gtm *gtm);
+unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ const struct ata_acpi_gtm *gtm);
int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
#else
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
@@ -1382,7 +1381,7 @@ extern int ata_link_nr_enabled(struct ata_link *link);
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
-extern struct device_attribute *ata_common_sdev_attrs[];
+extern const struct attribute_group *ata_common_sdev_groups[];
/*
* All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
@@ -1397,26 +1396,42 @@ extern struct device_attribute *ata_common_sdev_attrs[];
ATA_SCSI_COMPAT_IOCTL \
.queuecommand = ata_scsi_queuecmd, \
.dma_need_drain = ata_scsi_dma_need_drain, \
- .can_queue = ATA_DEF_QUEUE, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
- .slave_configure = ata_scsi_slave_config, \
+ .slave_alloc = ata_scsi_slave_alloc, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
- .unlock_native_capacity = ata_scsi_unlock_native_capacity
+ .unlock_native_capacity = ata_scsi_unlock_native_capacity,\
+ .max_sectors = ATA_MAX_SECTORS_LBA48
-#define ATA_BASE_SHT(drv_name) \
+#define ATA_SUBBASE_SHT(drv_name) \
+ __ATA_BASE_SHT(drv_name), \
+ .can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
+
+#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
__ATA_BASE_SHT(drv_name), \
- .sdev_attrs = ata_common_sdev_attrs
+ .can_queue = drv_qd, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
+
+#define ATA_BASE_SHT(drv_name) \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_common_sdev_groups
#ifdef CONFIG_SATA_HOST
-extern struct device_attribute *ata_ncq_sdev_attrs[];
+extern const struct attribute_group *ata_ncq_sdev_groups[];
#define ATA_NCQ_SHT(drv_name) \
- __ATA_BASE_SHT(drv_name), \
- .sdev_attrs = ata_ncq_sdev_attrs, \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_ncq_sdev_groups, \
+ .change_queue_depth = ata_scsi_change_queue_depth
+
+#define ATA_NCQ_SHT_QD(drv_name, drv_qd) \
+ ATA_SUBBASE_SHT_QD(drv_name, drv_qd), \
+ .sdev_groups = ata_ncq_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth
#endif
@@ -1451,7 +1466,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
static inline bool ata_is_host_link(const struct ata_link *link)
{
- return 1;
+ return true;
}
#endif /* CONFIG_SATA_PMP */
@@ -1462,51 +1477,61 @@ static inline int sata_srst_pmp(struct ata_link *link)
return link->pmp;
}
-/*
- * printk helpers
- */
-__printf(3, 4)
-void ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...);
+#define ata_port_printk(level, ap, fmt, ...) \
+ pr_ ## level ("ata%u: " fmt, (ap)->print_id, ##__VA_ARGS__)
#define ata_port_err(ap, fmt, ...) \
- ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_port_printk(err, ap, fmt, ##__VA_ARGS__)
#define ata_port_warn(ap, fmt, ...) \
- ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_port_printk(warn, ap, fmt, ##__VA_ARGS__)
#define ata_port_notice(ap, fmt, ...) \
- ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_port_printk(notice, ap, fmt, ##__VA_ARGS__)
#define ata_port_info(ap, fmt, ...) \
- ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_port_printk(info, ap, fmt, ##__VA_ARGS__)
#define ata_port_dbg(ap, fmt, ...) \
- ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_port_printk(debug, ap, fmt, ##__VA_ARGS__)
+
+#define ata_link_printk(level, link, fmt, ...) \
+do { \
+ if (sata_pmp_attached((link)->ap) || \
+ (link)->ap->slave_link) \
+ pr_ ## level ("ata%u.%02u: " fmt, \
+ (link)->ap->print_id, \
+ (link)->pmp, \
+ ##__VA_ARGS__); \
+ else \
+ pr_ ## level ("ata%u: " fmt, \
+ (link)->ap->print_id, \
+ ##__VA_ARGS__); \
+} while (0)
#define ata_link_err(link, fmt, ...) \
- ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_link_printk(err, link, fmt, ##__VA_ARGS__)
#define ata_link_warn(link, fmt, ...) \
- ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_link_printk(warn, link, fmt, ##__VA_ARGS__)
#define ata_link_notice(link, fmt, ...) \
- ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_link_printk(notice, link, fmt, ##__VA_ARGS__)
#define ata_link_info(link, fmt, ...) \
- ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_link_printk(info, link, fmt, ##__VA_ARGS__)
#define ata_link_dbg(link, fmt, ...) \
- ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_link_printk(debug, link, fmt, ##__VA_ARGS__)
+
+#define ata_dev_printk(level, dev, fmt, ...) \
+ pr_ ## level("ata%u.%02u: " fmt, \
+ (dev)->link->ap->print_id, \
+ (dev)->link->pmp + (dev)->devno, \
+ ##__VA_ARGS__)
#define ata_dev_err(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_dev_printk(err, dev, fmt, ##__VA_ARGS__)
#define ata_dev_warn(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_dev_printk(warn, dev, fmt, ##__VA_ARGS__)
#define ata_dev_notice(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_dev_printk(notice, dev, fmt, ##__VA_ARGS__)
#define ata_dev_info(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_dev_printk(info, dev, fmt, ##__VA_ARGS__)
#define ata_dev_dbg(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_dev_printk(debug, dev, fmt, ##__VA_ARGS__)
void ata_print_version(const struct device *dev, const char *version);
@@ -1536,6 +1561,11 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...);
extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
const char *name);
#endif
+static inline void ata_port_desc_misc(struct ata_port *ap, int irq)
+{
+ ata_port_desc(ap, "irq %d", irq);
+ ata_port_desc(ap, "lpm-pol %d", ap->target_lpm_policy);
+}
static inline bool ata_tag_internal(unsigned int tag)
{
@@ -1680,21 +1710,35 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev,
(dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
/**
- * ata_ncq_enabled - Test whether NCQ is enabled
- * @dev: ATA device to test for
+ * ata_ncq_supported - Test whether NCQ is supported
+ * @dev: ATA device to test
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
- * 1 if NCQ is enabled for @dev, 0 otherwise.
+ * true if @dev supports NCQ, false otherwise.
*/
-static inline int ata_ncq_enabled(struct ata_device *dev)
+static inline bool ata_ncq_supported(struct ata_device *dev)
{
if (!IS_ENABLED(CONFIG_SATA_HOST))
- return 0;
- return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
- ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+ return false;
+ return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+}
+
+/**
+ * ata_ncq_enabled - Test whether NCQ is enabled
+ * @dev: ATA device to test
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * true if NCQ is enabled for @dev, false otherwise.
+ */
+static inline bool ata_ncq_enabled(struct ata_device *dev)
+{
+ return ata_ncq_supported(dev) && !(dev->flags & ATA_DFLAG_NCQ_OFF);
}
static inline bool ata_fpdma_dsm_supported(struct ata_device *dev)
@@ -1742,11 +1786,11 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
{
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
- if (unlikely(!qc) || !ap->ops->error_handler)
+ if (unlikely(!qc))
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
- ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
+ ATA_QCFLAG_EH)) == ATA_QCFLAG_ACTIVE)
return qc;
return NULL;
@@ -1833,7 +1877,7 @@ static inline int ata_check_ready(u8 status)
}
static inline unsigned long ata_deadline(unsigned long from_jiffies,
- unsigned long timeout_msecs)
+ unsigned int timeout_msecs)
{
return from_jiffies + msecs_to_jiffies(timeout_msecs);
}
@@ -1842,23 +1886,21 @@ static inline unsigned long ata_deadline(unsigned long from_jiffies,
change in future hardware and specs, secondly 0xFF means 'no DMA' but is
> UDMA_0. Dyma ddreigiau */
-static inline int ata_using_mwdma(struct ata_device *adev)
+static inline bool ata_using_mwdma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_MW_DMA_0 &&
+ adev->dma_mode <= XFER_MW_DMA_4;
}
-static inline int ata_using_udma(struct ata_device *adev)
+static inline bool ata_using_udma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_UDMA_0 &&
+ adev->dma_mode <= XFER_UDMA_7;
}
-static inline int ata_dma_enabled(struct ata_device *adev)
+static inline bool ata_dma_enabled(struct ata_device *adev)
{
- return (adev->dma_mode == 0xFF ? 0 : 1);
+ return adev->dma_mode != 0xFF;
}
/**************************************************************************
@@ -1908,8 +1950,6 @@ extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
extern u8 ata_sff_check_status(struct ata_port *ap);
extern void ata_sff_pause(struct ata_port *ap);
extern void ata_sff_dma_pause(struct ata_port *ap);
-extern int ata_sff_busy_sleep(struct ata_port *ap,
- unsigned long timeout_pat, unsigned long timeout);
extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline);
extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
@@ -1928,7 +1968,7 @@ extern void ata_sff_queue_delayed_work(struct delayed_work *dwork,
unsigned long delay);
extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
-extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
+extern void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
@@ -1955,10 +1995,10 @@ extern int ata_pci_sff_prepare_host(struct pci_dev *pdev,
struct ata_host **r_host);
extern int ata_pci_sff_activate_host(struct ata_host *host,
irq_handler_t irq_handler,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern int ata_pci_sff_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
- struct scsi_host_template *sht, void *host_priv, int hflags);
+ const struct scsi_host_template *sht, void *host_priv, int hflags);
#endif /* CONFIG_PCI */
#ifdef CONFIG_ATA_BMDMA
@@ -1994,7 +2034,7 @@ extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
struct ata_host **r_host);
extern int ata_pci_bmdma_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
- struct scsi_host_template *sht,
+ const struct scsi_host_template *sht,
void *host_priv, int hflags);
#endif /* CONFIG_PCI */
#endif /* CONFIG_ATA_BMDMA */
@@ -2040,11 +2080,8 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
{
u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
-#ifdef ATA_DEBUG
if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
- ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
- status);
-#endif
+ ata_port_dbg(ap, "abnormal Status 0x%X\n", status);
return status;
}
diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h
index b8dc75f0c830..fc388da6a027 100644
--- a/include/linux/libgcc.h
+++ b/include/linux/libgcc.h
@@ -27,4 +27,11 @@ typedef union {
long long ll;
} DWunion;
+long long notrace __ashldi3(long long u, word_type b);
+long long notrace __ashrdi3(long long u, word_type b);
+word_type notrace __cmpdi2(long long a, long long b);
+long long notrace __lshrdi3(long long u, word_type b);
+long long notrace __muldi3(long long u, long long v);
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b);
+
#endif /* __ASM_LIBGCC_H */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 01f251b6e36c..e772aae71843 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -25,8 +25,6 @@ struct badrange {
};
enum {
- /* when a dimm supports both PMEM and BLK access a label is required */
- NDD_ALIASING = 0,
/* unarmed memory devices may not persist writes */
NDD_UNARMED = 1,
/* locked memory devices should not be accessed */
@@ -35,10 +33,16 @@ enum {
NDD_SECURITY_OVERWRITE = 3,
/* tracking whether or not there is a pending device reference */
NDD_WORK_PENDING = 4,
- /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */
- NDD_NOBLK = 5,
/* dimm supports namespace labels */
NDD_LABELING = 6,
+ /*
+ * dimm contents have changed requiring invalidation of CPU caches prior
+ * to activation of a region that includes this device
+ */
+ NDD_INCOHERENT = 7,
+
+ /* dimm provider wants synchronous registration by __nvdimm_create() */
+ NDD_REGISTER_SYNC = 8,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -63,6 +67,9 @@ enum {
/* Platform provides asynchronous flush mechanism */
ND_REGION_ASYNC = 3,
+ /* Region was created by CXL subsystem */
+ ND_REGION_CXL = 4,
+
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
@@ -126,6 +133,7 @@ struct nd_region_desc {
int numa_node;
int target_node;
unsigned long flags;
+ int memregion;
struct device_node *of_node;
int (*flush)(struct nd_region *nd_region, struct bio *bio);
};
@@ -140,22 +148,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
}
struct nvdimm_bus;
-struct module;
-struct device;
-struct nd_blk_region;
-struct nd_blk_region_desc {
- int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
- int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
- void *iobuf, u64 len, int rw);
- struct nd_region_desc ndr_desc;
-};
-
-static inline struct nd_blk_region_desc *to_blk_region_desc(
- struct nd_region_desc *ndr_desc)
-{
- return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
-
-}
/*
* Note that separate bits for locked + unlocked are defined so that
@@ -199,6 +191,8 @@ struct nvdimm_security_ops {
int (*overwrite)(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data);
int (*query_overwrite)(struct nvdimm *nvdimm);
+ int (*disable_master)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
};
enum nvdimm_fwa_state {
@@ -258,7 +252,6 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm);
struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev);
struct device *nd_region_dev(struct nd_region *nd_region);
-struct nd_blk_region *to_nd_blk_region(struct device *dev);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
const char *nvdimm_name(struct nvdimm *nvdimm);
@@ -279,6 +272,8 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
}
+void nvdimm_delete(struct nvdimm *nvdimm);
+void nvdimm_region_delete(struct nd_region *nd_region);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
@@ -295,10 +290,6 @@ struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc);
void *nd_region_provider_data(struct nd_region *nd_region);
-void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
-void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
-struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
-unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index 53f7e4d0f4b7..9ca9ce4e6e64 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -8,44 +8,59 @@
*/
#include <linux/bitops.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/wait.h>
-#define PS2_CMD_SETSCALE11 0x00e6
-#define PS2_CMD_SETRES 0x10e8
-#define PS2_CMD_GETID 0x02f2
-#define PS2_CMD_RESET_BAT 0x02ff
+struct ps2dev;
-#define PS2_RET_BAT 0xaa
-#define PS2_RET_ID 0x00
-#define PS2_RET_ACK 0xfa
-#define PS2_RET_NAK 0xfe
-#define PS2_RET_ERR 0xfc
+/**
+ * enum ps2_disposition - indicates how received byte should be handled
+ * @PS2_PROCESS: pass to the main protocol handler, process normally
+ * @PS2_IGNORE: skip the byte
+ * @PS2_ERROR: do not process the byte, abort command in progress
+ */
+enum ps2_disposition {
+ PS2_PROCESS,
+ PS2_IGNORE,
+ PS2_ERROR,
+};
-#define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */
-#define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */
-#define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */
-#define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */
-#define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */
-#define PS2_FLAG_ACK_CMD BIT(5) /* Waiting to ACK the command (first) byte */
+typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8,
+ unsigned int);
+typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8);
+/**
+ * struct ps2dev - represents a device using PS/2 protocol
+ * @serio: a serio port used by the PS/2 device
+ * @cmd_mutex: a mutex ensuring that only one command is executing at a time
+ * @wait: a waitqueue used to signal completion from the serio interrupt handler
+ * @flags: various internal flags indicating stages of PS/2 command execution
+ * @cmdbuf: buffer holding command response
+ * @cmdcnt: outstanding number of bytes of the command response
+ * @nak: a byte transmitted by the device when it refuses command
+ * @pre_receive_handler: checks communication errors and returns disposition
+ * (&enum ps2_disposition) of the received data byte
+ * @receive_handler: main handler of particular PS/2 protocol, such as keyboard
+ * or mouse protocol
+ */
struct ps2dev {
struct serio *serio;
-
- /* Ensures that only one command is executing at a time */
struct mutex cmd_mutex;
-
- /* Used to signal completion from interrupt handler */
wait_queue_head_t wait;
-
unsigned long flags;
u8 cmdbuf[8];
u8 cmdcnt;
u8 nak;
+
+ ps2_pre_receive_handler_t pre_receive_handler;
+ ps2_receive_handler_t receive_handler;
};
-void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
+void ps2_init(struct ps2dev *ps2dev, struct serio *serio,
+ ps2_pre_receive_handler_t pre_receive_handler,
+ ps2_receive_handler_t receive_handler);
int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout);
void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout);
void ps2_begin_command(struct ps2dev *ps2dev);
@@ -53,9 +68,8 @@ void ps2_end_command(struct ps2dev *ps2dev);
int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
int ps2_sliced_command(struct ps2dev *ps2dev, u8 command);
-bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data);
-bool ps2_handle_response(struct ps2dev *ps2dev, u8 data);
-void ps2_cmd_aborted(struct ps2dev *ps2dev);
bool ps2_is_keyboard_id(u8 id);
+irqreturn_t ps2_interrupt(struct serio *serio, u8 data, unsigned int flags);
+
#endif /* _LIBPS2_H */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
deleted file mode 100644
index 1db223710b28..000000000000
--- a/include/linux/lightnvm.h
+++ /dev/null
@@ -1,699 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef NVM_H
-#define NVM_H
-
-#include <linux/blkdev.h>
-#include <linux/types.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
- NVM_IO_OK = 0,
- NVM_IO_REQUEUE = 1,
- NVM_IO_DONE = 2,
- NVM_IO_ERR = 3,
-
- NVM_IOTYPE_NONE = 0,
- NVM_IOTYPE_GC = 1,
-};
-
-/* common format */
-#define NVM_GEN_CH_BITS (8)
-#define NVM_GEN_LUN_BITS (8)
-#define NVM_GEN_BLK_BITS (16)
-#define NVM_GEN_RESERVED (32)
-
-/* 1.2 format */
-#define NVM_12_PG_BITS (16)
-#define NVM_12_PL_BITS (4)
-#define NVM_12_SEC_BITS (4)
-#define NVM_12_RESERVED (8)
-
-/* 2.0 format */
-#define NVM_20_SEC_BITS (24)
-#define NVM_20_RESERVED (8)
-
-enum {
- NVM_OCSSD_SPEC_12 = 12,
- NVM_OCSSD_SPEC_20 = 20,
-};
-
-struct ppa_addr {
- /* Generic structure for all addresses */
- union {
- /* generic device format */
- struct {
- u64 ch : NVM_GEN_CH_BITS;
- u64 lun : NVM_GEN_LUN_BITS;
- u64 blk : NVM_GEN_BLK_BITS;
- u64 reserved : NVM_GEN_RESERVED;
- } a;
-
- /* 1.2 device format */
- struct {
- u64 ch : NVM_GEN_CH_BITS;
- u64 lun : NVM_GEN_LUN_BITS;
- u64 blk : NVM_GEN_BLK_BITS;
- u64 pg : NVM_12_PG_BITS;
- u64 pl : NVM_12_PL_BITS;
- u64 sec : NVM_12_SEC_BITS;
- u64 reserved : NVM_12_RESERVED;
- } g;
-
- /* 2.0 device format */
- struct {
- u64 grp : NVM_GEN_CH_BITS;
- u64 pu : NVM_GEN_LUN_BITS;
- u64 chk : NVM_GEN_BLK_BITS;
- u64 sec : NVM_20_SEC_BITS;
- u64 reserved : NVM_20_RESERVED;
- } m;
-
- struct {
- u64 line : 63;
- u64 is_cached : 1;
- } c;
-
- u64 ppa;
- };
-};
-
-struct nvm_rq;
-struct nvm_id;
-struct nvm_dev;
-struct nvm_tgt_dev;
-struct nvm_chk_meta;
-
-typedef int (nvm_id_fn)(struct nvm_dev *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
- struct nvm_chk_meta *);
-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
-typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
- dma_addr_t *);
-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
-
-struct nvm_dev_ops {
- nvm_id_fn *identity;
- nvm_op_bb_tbl_fn *get_bb_tbl;
- nvm_op_set_bb_fn *set_bb_tbl;
-
- nvm_get_chk_meta_fn *get_chk_meta;
-
- nvm_submit_io_fn *submit_io;
-
- nvm_create_dma_pool_fn *create_dma_pool;
- nvm_destroy_dma_pool_fn *destroy_dma_pool;
- nvm_dev_dma_alloc_fn *dev_dma_alloc;
- nvm_dev_dma_free_fn *dev_dma_free;
-};
-
-#ifdef CONFIG_NVM
-
-#include <linux/blkdev.h>
-#include <linux/file.h>
-#include <linux/dmapool.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
- /* HW Responsibilities */
- NVM_RSP_L2P = 1 << 0,
- NVM_RSP_ECC = 1 << 1,
-
- /* Physical Adressing Mode */
- NVM_ADDRMODE_LINEAR = 0,
- NVM_ADDRMODE_CHANNEL = 1,
-
- /* Plane programming mode for LUN */
- NVM_PLANE_SINGLE = 1,
- NVM_PLANE_DOUBLE = 2,
- NVM_PLANE_QUAD = 4,
-
- /* Status codes */
- NVM_RSP_SUCCESS = 0x0,
- NVM_RSP_NOT_CHANGEABLE = 0x1,
- NVM_RSP_ERR_FAILWRITE = 0x40ff,
- NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
- NVM_RSP_ERR_FAILECC = 0x4281,
- NVM_RSP_ERR_FAILCRC = 0x4004,
- NVM_RSP_WARN_HIGHECC = 0x4700,
-
- /* Device opcodes */
- NVM_OP_PWRITE = 0x91,
- NVM_OP_PREAD = 0x92,
- NVM_OP_ERASE = 0x90,
-
- /* PPA Command Flags */
- NVM_IO_SNGL_ACCESS = 0x0,
- NVM_IO_DUAL_ACCESS = 0x1,
- NVM_IO_QUAD_ACCESS = 0x2,
-
- /* NAND Access Modes */
- NVM_IO_SUSPEND = 0x80,
- NVM_IO_SLC_MODE = 0x100,
- NVM_IO_SCRAMBLE_ENABLE = 0x200,
-
- /* Block Types */
- NVM_BLK_T_FREE = 0x0,
- NVM_BLK_T_BAD = 0x1,
- NVM_BLK_T_GRWN_BAD = 0x2,
- NVM_BLK_T_DEV = 0x4,
- NVM_BLK_T_HOST = 0x8,
-
- /* Memory capabilities */
- NVM_ID_CAP_SLC = 0x1,
- NVM_ID_CAP_CMD_SUSPEND = 0x2,
- NVM_ID_CAP_SCRAMBLE = 0x4,
- NVM_ID_CAP_ENCRYPT = 0x8,
-
- /* Memory types */
- NVM_ID_FMTYPE_SLC = 0,
- NVM_ID_FMTYPE_MLC = 1,
-
- /* Device capabilities */
- NVM_ID_DCAP_BBLKMGMT = 0x1,
- NVM_UD_DCAP_ECC = 0x2,
-};
-
-struct nvm_id_lp_mlc {
- u16 num_pairs;
- u8 pairs[886];
-};
-
-struct nvm_id_lp_tbl {
- __u8 id[8];
- struct nvm_id_lp_mlc mlc;
-};
-
-struct nvm_addrf_12 {
- u8 ch_len;
- u8 lun_len;
- u8 blk_len;
- u8 pg_len;
- u8 pln_len;
- u8 sec_len;
-
- u8 ch_offset;
- u8 lun_offset;
- u8 blk_offset;
- u8 pg_offset;
- u8 pln_offset;
- u8 sec_offset;
-
- u64 ch_mask;
- u64 lun_mask;
- u64 blk_mask;
- u64 pg_mask;
- u64 pln_mask;
- u64 sec_mask;
-};
-
-struct nvm_addrf {
- u8 ch_len;
- u8 lun_len;
- u8 chk_len;
- u8 sec_len;
- u8 rsv_len[2];
-
- u8 ch_offset;
- u8 lun_offset;
- u8 chk_offset;
- u8 sec_offset;
- u8 rsv_off[2];
-
- u64 ch_mask;
- u64 lun_mask;
- u64 chk_mask;
- u64 sec_mask;
- u64 rsv_mask[2];
-};
-
-enum {
- /* Chunk states */
- NVM_CHK_ST_FREE = 1 << 0,
- NVM_CHK_ST_CLOSED = 1 << 1,
- NVM_CHK_ST_OPEN = 1 << 2,
- NVM_CHK_ST_OFFLINE = 1 << 3,
-
- /* Chunk types */
- NVM_CHK_TP_W_SEQ = 1 << 0,
- NVM_CHK_TP_W_RAN = 1 << 1,
- NVM_CHK_TP_SZ_SPEC = 1 << 4,
-};
-
-/*
- * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
- * buffer can be used when converting from little endian to cpu addressing.
- */
-struct nvm_chk_meta {
- u8 state;
- u8 type;
- u8 wi;
- u8 rsvd[5];
- u64 slba;
- u64 cnlb;
- u64 wp;
-};
-
-struct nvm_target {
- struct list_head list;
- struct nvm_tgt_dev *dev;
- struct nvm_tgt_type *type;
- struct gendisk *disk;
-};
-
-#define ADDR_EMPTY (~0ULL)
-
-#define NVM_TARGET_DEFAULT_OP (101)
-#define NVM_TARGET_MIN_OP (3)
-#define NVM_TARGET_MAX_OP (80)
-
-#define NVM_VERSION_MAJOR 1
-#define NVM_VERSION_MINOR 0
-#define NVM_VERSION_PATCH 0
-
-#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
-
-struct nvm_rq;
-typedef void (nvm_end_io_fn)(struct nvm_rq *);
-
-struct nvm_rq {
- struct nvm_tgt_dev *dev;
-
- struct bio *bio;
-
- union {
- struct ppa_addr ppa_addr;
- dma_addr_t dma_ppa_list;
- };
-
- struct ppa_addr *ppa_list;
-
- void *meta_list;
- dma_addr_t dma_meta_list;
-
- nvm_end_io_fn *end_io;
-
- uint8_t opcode;
- uint16_t nr_ppas;
- uint16_t flags;
-
- u64 ppa_status; /* ppa media status */
- int error;
-
- int is_seq; /* Sequential hint flag. 1.2 only */
-
- void *private;
-};
-
-static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
-{
- return pdu - sizeof(struct nvm_rq);
-}
-
-static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
-{
- return rqdata + 1;
-}
-
-static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
-{
- return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-}
-
-enum {
- NVM_BLK_ST_FREE = 0x1, /* Free block */
- NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
- NVM_BLK_ST_BAD = 0x8, /* Bad block */
-};
-
-/* Instance geometry */
-struct nvm_geo {
- /* device reported version */
- u8 major_ver_id;
- u8 minor_ver_id;
-
- /* kernel short version */
- u8 version;
-
- /* instance specific geometry */
- int num_ch;
- int num_lun; /* per channel */
-
- /* calculated values */
- int all_luns; /* across channels */
- int all_chunks; /* across channels */
-
- int op; /* over-provision in instance */
-
- sector_t total_secs; /* across channels */
-
- /* chunk geometry */
- u32 num_chk; /* chunks per lun */
- u32 clba; /* sectors per chunk */
- u16 csecs; /* sector size */
- u16 sos; /* out-of-band area size */
- bool ext; /* metadata in extended data buffer */
- u32 mdts; /* Max data transfer size*/
-
- /* device write constrains */
- u32 ws_min; /* minimum write size */
- u32 ws_opt; /* optimal write size */
- u32 mw_cunits; /* distance required for successful read */
- u32 maxoc; /* maximum open chunks */
- u32 maxocpu; /* maximum open chunks per parallel unit */
-
- /* device capabilities */
- u32 mccap;
-
- /* device timings */
- u32 trdt; /* Avg. Tread (ns) */
- u32 trdm; /* Max Tread (ns) */
- u32 tprt; /* Avg. Tprog (ns) */
- u32 tprm; /* Max Tprog (ns) */
- u32 tbet; /* Avg. Terase (ns) */
- u32 tbem; /* Max Terase (ns) */
-
- /* generic address format */
- struct nvm_addrf addrf;
-
- /* 1.2 compatibility */
- u8 vmnt;
- u32 cap;
- u32 dom;
-
- u8 mtype;
- u8 fmtype;
-
- u16 cpar;
- u32 mpos;
-
- u8 num_pln;
- u8 pln_mode;
- u16 num_pg;
- u16 fpg_sz;
-};
-
-/* sub-device structure */
-struct nvm_tgt_dev {
- /* Device information */
- struct nvm_geo geo;
-
- /* Base ppas for target LUNs */
- struct ppa_addr *luns;
-
- struct request_queue *q;
-
- struct nvm_dev *parent;
- void *map;
-};
-
-struct nvm_dev {
- struct nvm_dev_ops *ops;
-
- struct list_head devices;
-
- /* Device information */
- struct nvm_geo geo;
-
- unsigned long *lun_map;
- void *dma_pool;
-
- /* Backend device */
- struct request_queue *q;
- char name[DISK_NAME_LEN];
- void *private_data;
-
- struct kref ref;
- void *rmap;
-
- struct mutex mlock;
- spinlock_t lock;
-
- /* target management */
- struct list_head area_list;
- struct list_head targets;
-};
-
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr l;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
- l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
- l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
- l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
- l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
- l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
- l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &geo->addrf;
-
- l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
- l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
- l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
- l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
- }
-
- return l;
-}
-
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr l;
-
- l.ppa = 0;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
- l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
- l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
- l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
- l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
- l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
- l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &geo->addrf;
-
- l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
- l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
- l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
- l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
- }
-
- return l;
-}
-
-static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
- struct ppa_addr p)
-{
- struct nvm_geo *geo = &dev->geo;
- u64 caddr;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
-
- caddr = (u64)p.g.pg << ppaf->pg_offset;
- caddr |= (u64)p.g.pl << ppaf->pln_offset;
- caddr |= (u64)p.g.sec << ppaf->sec_offset;
- } else {
- caddr = p.m.sec;
- }
-
- return caddr;
-}
-
-static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
- void *addrf, u32 ppa32)
-{
- struct ppa_addr ppa64;
-
- ppa64.ppa = 0;
-
- if (ppa32 == -1) {
- ppa64.ppa = ADDR_EMPTY;
- } else if (ppa32 & (1U << 31)) {
- ppa64.c.line = ppa32 & ((~0U) >> 1);
- ppa64.c.is_cached = 1;
- } else {
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = addrf;
-
- ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
- ppaf->ch_offset;
- ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
- ppaf->lun_offset;
- ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
- ppaf->blk_offset;
- ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
- ppaf->pg_offset;
- ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
- ppaf->pln_offset;
- ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
- ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = addrf;
-
- ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
- lbaf->ch_offset;
- ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
- lbaf->lun_offset;
- ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
- lbaf->chk_offset;
- ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
- lbaf->sec_offset;
- }
- }
-
- return ppa64;
-}
-
-static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
- void *addrf, struct ppa_addr ppa64)
-{
- u32 ppa32 = 0;
-
- if (ppa64.ppa == ADDR_EMPTY) {
- ppa32 = ~0U;
- } else if (ppa64.c.is_cached) {
- ppa32 |= ppa64.c.line;
- ppa32 |= 1U << 31;
- } else {
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = addrf;
-
- ppa32 |= ppa64.g.ch << ppaf->ch_offset;
- ppa32 |= ppa64.g.lun << ppaf->lun_offset;
- ppa32 |= ppa64.g.blk << ppaf->blk_offset;
- ppa32 |= ppa64.g.pg << ppaf->pg_offset;
- ppa32 |= ppa64.g.pl << ppaf->pln_offset;
- ppa32 |= ppa64.g.sec << ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = addrf;
-
- ppa32 |= ppa64.m.grp << lbaf->ch_offset;
- ppa32 |= ppa64.m.pu << lbaf->lun_offset;
- ppa32 |= ppa64.m.chk << lbaf->chk_offset;
- ppa32 |= ppa64.m.sec << lbaf->sec_offset;
- }
- }
-
- return ppa32;
-}
-
-static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
- struct ppa_addr *ppa)
-{
- struct nvm_geo *geo = &dev->geo;
- int last = 0;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- int sec = ppa->g.sec;
-
- sec++;
- if (sec == geo->ws_min) {
- int pg = ppa->g.pg;
-
- sec = 0;
- pg++;
- if (pg == geo->num_pg) {
- int pl = ppa->g.pl;
-
- pg = 0;
- pl++;
- if (pl == geo->num_pln)
- last = 1;
-
- ppa->g.pl = pl;
- }
- ppa->g.pg = pg;
- }
- ppa->g.sec = sec;
- } else {
- ppa->m.sec++;
- if (ppa->m.sec == geo->clba)
- last = 1;
- }
-
- return last;
-}
-
-typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
- int flags);
-typedef void (nvm_tgt_exit_fn)(void *, bool);
-typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
-typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
-
-enum {
- NVM_TGT_F_DEV_L2P = 0,
- NVM_TGT_F_HOST_L2P = 1 << 0,
-};
-
-struct nvm_tgt_type {
- const char *name;
- unsigned int version[3];
- int flags;
-
- /* target entry points */
- const struct block_device_operations *bops;
- nvm_tgt_capacity_fn *capacity;
-
- /* module-specific init/teardown */
- nvm_tgt_init_fn *init;
- nvm_tgt_exit_fn *exit;
-
- /* sysfs */
- nvm_tgt_sysfs_init_fn *sysfs_init;
- nvm_tgt_sysfs_exit_fn *sysfs_exit;
-
- /* For internal use */
- struct list_head list;
- struct module *owner;
-};
-
-extern int nvm_register_tgt_type(struct nvm_tgt_type *);
-extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
-
-extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
-extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
-
-extern struct nvm_dev *nvm_alloc_dev(int);
-extern int nvm_register(struct nvm_dev *);
-extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
- int, struct nvm_chk_meta *);
-extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
- int, int);
-extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern void nvm_end_io(struct nvm_rq *);
-
-#else /* CONFIG_NVM */
-struct nvm_dev_ops;
-
-static inline struct nvm_dev *nvm_alloc_dev(int node)
-{
- return ERR_PTR(-EINVAL);
-}
-static inline int nvm_register(struct nvm_dev *dev)
-{
- return -EINVAL;
-}
-static inline void nvm_unregister(struct nvm_dev *dev) {}
-#endif /* CONFIG_NVM */
-#endif /* LIGHTNVM.H */
diff --git a/include/linux/limits.h b/include/linux/limits.h
index b568b9c30bbf..38eb7f6f7e88 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -7,8 +7,11 @@
#include <vdso/limits.h>
#define SIZE_MAX (~(size_t)0)
+#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1))
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
+#define RESOURCE_SIZE_MAX ((resource_size_t)~0)
+
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX >> 1))
#define S8_MIN ((s8)(-S8_MAX - 1))
diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h
index 17b5943727d5..2e4f4c3539c0 100644
--- a/include/linux/linear_range.h
+++ b/include/linux/linear_range.h
@@ -26,6 +26,17 @@ struct linear_range {
unsigned int step;
};
+#define LINEAR_RANGE(_min, _min_sel, _max_sel, _step) \
+ { \
+ .min = _min, \
+ .min_sel = _min_sel, \
+ .max_sel = _max_sel, \
+ .step = _step, \
+ }
+
+#define LINEAR_RANGE_IDX(_idx, _min, _min_sel, _max_sel, _step) \
+ [_idx] = LINEAR_RANGE(_min, _min_sel, _max_sel, _step)
+
unsigned int linear_range_values_in_range(const struct linear_range *r);
unsigned int linear_range_values_in_range_array(const struct linear_range *r,
int ranges);
@@ -41,6 +52,8 @@ int linear_range_get_selector_low(const struct linear_range *r,
int linear_range_get_selector_high(const struct linear_range *r,
unsigned int val, unsigned int *selector,
bool *found);
+void linear_range_get_selector_within(const struct linear_range *r,
+ unsigned int val, unsigned int *selector);
int linear_range_get_selector_low_array(const struct linear_range *r,
int ranges, unsigned int val,
unsigned int *selector, bool *found);
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index d796ec20d114..5c8865bb59d9 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -36,8 +36,8 @@
__stringify(name))
#endif
-#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
-#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_data __section(".data..page_aligned") __aligned(PAGE_SIZE)
+#define __page_aligned_bss __section(".bss..page_aligned") __aligned(PAGE_SIZE)
/*
* For assembly routines.
@@ -69,8 +69,8 @@
#endif
#ifndef __ALIGN
-#define __ALIGN .align 4,0x90
-#define __ALIGN_STR ".align 4,0x90"
+#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT
+#define __ALIGN_STR __stringify(__ALIGN)
#endif
#ifdef __ASSEMBLY__
@@ -165,7 +165,15 @@
#ifndef SYM_END
#define SYM_END(name, sym_type) \
.type name sym_type ASM_NL \
- .size name, .-name
+ .set .L__sym_size_##name, .-name ASM_NL \
+ .size name, .L__sym_size_##name
+#endif
+
+/* SYM_ALIAS -- use only if you have to */
+#ifndef SYM_ALIAS
+#define SYM_ALIAS(alias, name, linkage) \
+ linkage(alias) ASM_NL \
+ .set alias, name ASM_NL
#endif
/* === code annotations === */
@@ -178,6 +186,11 @@
* Objtool generates debug info for both FUNC & CODE, but needs special
* annotations for each CODE's start (to describe the actual stack frame).
*
+ * Objtool requires that all code must be contained in an ELF symbol. Symbol
+ * names that have a .L prefix do not emit symbol table entries. .L
+ * prefixed symbols can be used within a code region, but should be avoided for
+ * denoting a range of code via ``SYM_*_START/END`` annotations.
+ *
* ALIAS -- does not generate debug info -- the aliased function will
*/
@@ -195,30 +208,8 @@
SYM_ENTRY(name, linkage, SYM_A_NONE)
#endif
-/*
- * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one
- * function
- */
-#ifndef SYM_FUNC_START_LOCAL_ALIAS
-#define SYM_FUNC_START_LOCAL_ALIAS(name) \
- SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
-#endif
-
-/*
- * SYM_FUNC_START_ALIAS -- use where there are two global names for one
- * function
- */
-#ifndef SYM_FUNC_START_ALIAS
-#define SYM_FUNC_START_ALIAS(name) \
- SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
-#endif
-
/* SYM_FUNC_START -- use for global functions */
#ifndef SYM_FUNC_START
-/*
- * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two
- * later.
- */
#define SYM_FUNC_START(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
@@ -231,7 +222,6 @@
/* SYM_FUNC_START_LOCAL -- use for local functions */
#ifndef SYM_FUNC_START_LOCAL
-/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_START_LOCAL(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
#endif
@@ -254,22 +244,39 @@
SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
#endif
-/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */
-#ifndef SYM_FUNC_END_ALIAS
-#define SYM_FUNC_END_ALIAS(name) \
- SYM_END(name, SYM_T_FUNC)
-#endif
-
/*
* SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
* SYM_FUNC_START_WEAK, ...
*/
#ifndef SYM_FUNC_END
-/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_END(name) \
SYM_END(name, SYM_T_FUNC)
#endif
+/*
+ * SYM_FUNC_ALIAS -- define a global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS
+#define SYM_FUNC_ALIAS(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_GLOBAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_LOCAL -- define a local alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_LOCAL
+#define SYM_FUNC_ALIAS_LOCAL(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_LOCAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_WEAK -- define a weak global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_WEAK
+#define SYM_FUNC_ALIAS_WEAK(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_WEAK)
+#endif
+
/* SYM_CODE_START -- use for non-C (special) functions */
#ifndef SYM_CODE_START
#define SYM_CODE_START(name) \
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index f8397f300fcd..287f590ed56b 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -10,6 +10,11 @@ static inline void linkmode_zero(unsigned long *dst)
bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static inline void linkmode_fill(unsigned long *dst)
+{
+ bitmap_fill(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
{
bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -43,15 +48,6 @@ static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
__set_bit(nr, addr);
}
-static inline void linkmode_set_bit_array(const int *array, int array_size,
- unsigned long *addr)
-{
- int i;
-
- for (i = 0; i < array_size; i++)
- linkmode_set_bit(array[i], addr);
-}
-
static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
{
__clear_bit(nr, addr);
@@ -66,14 +62,18 @@ static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
linkmode_clear_bit(nr, addr);
}
-static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
+static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
{
- __change_bit(nr, addr);
+ return test_bit(nr, addr);
}
-static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
+static inline void linkmode_set_bit_array(const int *array, int array_size,
+ unsigned long *addr)
{
- return test_bit(nr, addr);
+ int i;
+
+ for (i = 0; i < array_size; i++)
+ linkmode_set_bit(array[i], addr);
}
static inline int linkmode_equal(const unsigned long *src1,
diff --git a/include/linux/list.h b/include/linux/list.h
index 0d0d17a10d25..5f4b0a39cf46 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -2,14 +2,16 @@
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
+#include <linux/container_of.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/poison.h>
#include <linux/const.h>
-#include <linux/kernel.h>
+
+#include <asm/barrier.h>
/*
- * Simple doubly linked list implementation.
+ * Circular doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
@@ -33,14 +35,95 @@
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
- list->prev = list;
+ WRITE_ONCE(list->prev, list);
}
+#ifdef CONFIG_LIST_HARDENED
+
#ifdef CONFIG_DEBUG_LIST
-extern bool __list_add_valid(struct list_head *new,
- struct list_head *prev,
- struct list_head *next);
-extern bool __list_del_entry_valid(struct list_head *entry);
+# define __list_valid_slowpath
+#else
+# define __list_valid_slowpath __cold __preserve_most
+#endif
+
+/*
+ * Performs the full set of list corruption checks before __list_add().
+ * On list corruption reports a warning, and returns false.
+ */
+extern bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+
+/*
+ * Performs list corruption checks before __list_add(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_add_valid_or_report().
+ */
+static __always_inline bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, since the immediate dereference of them below would
+ * result in a fault if NULL.
+ *
+ * With the reduced set of checks, we can afford to inline the
+ * checks, which also gives the compiler a chance to elide some
+ * of them completely if they can be proven at compile-time. If
+ * one of the pre-conditions does not hold, the slow-path will
+ * show a report which pre-condition failed.
+ */
+ if (likely(next->prev == prev && prev->next == next && new != prev && new != next))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_add_valid_or_report(new, prev, next);
+ return ret;
+}
+
+/*
+ * Performs the full set of list corruption checks before __list_del_entry().
+ * On list corruption reports a warning, and returns false.
+ */
+extern bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry);
+
+/*
+ * Performs list corruption checks before __list_del_entry(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_del_entry_valid_or_report().
+ */
+static __always_inline bool __list_del_entry_valid(struct list_head *entry)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ struct list_head *prev = entry->prev;
+ struct list_head *next = entry->next;
+
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, LIST_POISON1 or LIST_POISON2, since the immediate
+ * dereference of them below would result in a fault.
+ */
+ if (likely(prev->next == entry && next->prev == entry))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_del_entry_valid_or_report(entry);
+ return ret;
+}
#else
static inline bool __list_add_valid(struct list_head *new,
struct list_head *prev,
@@ -256,8 +339,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_first(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_first(const struct list_head *list, const struct list_head *head)
{
return list->prev == head;
}
@@ -267,13 +349,22 @@ static inline int list_is_first(const struct list_head *list,
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_last(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_last(const struct list_head *list, const struct list_head *head)
{
return list->next == head;
}
/**
+ * list_is_head - tests whether @list is the list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_head(const struct list_head *list, const struct list_head *head)
+{
+ return list == head;
+}
+
+/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
@@ -296,7 +387,7 @@ static inline int list_empty(const struct list_head *head)
static inline void list_del_init_careful(struct list_head *entry)
{
__list_del_entry(entry);
- entry->prev = entry;
+ WRITE_ONCE(entry->prev, entry);
smp_store_release(&entry->next, entry);
}
@@ -316,7 +407,7 @@ static inline void list_del_init_careful(struct list_head *entry)
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = smp_load_acquire(&head->next);
- return (next == head) && (next == head->prev);
+ return list_is_head(next, head) && (next == READ_ONCE(head->prev));
}
/**
@@ -391,10 +482,9 @@ static inline void list_cut_position(struct list_head *list,
{
if (list_empty(head))
return;
- if (list_is_singular(head) &&
- (head->next != entry && head != entry))
+ if (list_is_singular(head) && !list_is_head(entry, head) && (entry != head->next))
return;
- if (entry == head)
+ if (list_is_head(entry, head))
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
@@ -555,6 +645,19 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
+ * list_next_entry_circular - get the next element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the last element (return the first element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_next_entry_circular(pos, head, member) \
+ (list_is_last(&(pos)->member, head) ? \
+ list_first_entry(head, typeof(*(pos)), member) : list_next_entry(pos, member))
+
+/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -563,12 +666,43 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.prev, typeof(*(pos)), member)
/**
+ * list_prev_entry_circular - get the prev element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the first element (return the last element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_prev_entry_circular(pos, head, member) \
+ (list_is_first(&(pos)->member, head) ? \
+ list_last_entry(head, typeof(*(pos)), member) : list_prev_entry(pos, member))
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
+ for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
+
+/**
+ * list_for_each_reverse - iterate backwards over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_reverse(pos, head) \
+ for (pos = (head)->prev; pos != (head); pos = pos->prev)
+
+/**
+ * list_for_each_rcu - Iterate over a list in an RCU-safe fashion
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference((head)->next); \
+ !list_is_head(pos, (head)); \
+ pos = rcu_dereference(pos->next))
/**
* list_for_each_continue - continue iteration over a list
@@ -578,7 +712,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Continue to iterate over a list, continuing after the current position.
*/
#define list_for_each_continue(pos, head) \
- for (pos = pos->next; pos != (head); pos = pos->next)
+ for (pos = pos->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
@@ -586,7 +720,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; pos != (head); pos = pos->prev)
+ for (pos = (head)->prev; !list_is_head(pos, (head)); pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
@@ -595,8 +729,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
+ for (pos = (head)->next, n = pos->next; \
+ !list_is_head(pos, (head)); \
+ pos = n, n = pos->next)
/**
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
@@ -606,10 +741,34 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
- pos != (head); \
+ !list_is_head(pos, (head)); \
pos = n, n = pos->prev)
/**
+ * list_count_nodes - count nodes in the list
+ * @head: the head for your list.
+ */
+static inline size_t list_count_nodes(struct list_head *head)
+{
+ struct list_head *pos;
+ size_t count = 0;
+
+ list_for_each(pos, head)
+ count++;
+
+ return count;
+}
+
+/**
+ * list_entry_is_head - test if the entry points to the head of the list
+ * @pos: the type * to cursor
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_entry_is_head(pos, head, member) \
+ list_is_head(&pos->member, (head))
+
+/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
@@ -617,7 +776,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -628,7 +787,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -653,7 +812,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -667,7 +826,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_prev_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -679,7 +838,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate over list of given type, continuing from current position.
*/
#define list_for_each_entry_from(pos, head, member) \
- for (; &pos->member != (head); \
+ for (; !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -692,7 +851,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate backwards over list of given type, continuing from current position.
*/
#define list_for_each_entry_from_reverse(pos, head, member) \
- for (; &pos->member != (head); \
+ for (; !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -705,7 +864,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member), \
n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -721,7 +880,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_next_entry(pos, member), \
n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -736,7 +895,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -752,7 +911,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member), \
n = list_prev_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_prev_entry(n, member))
/**
@@ -892,7 +1051,7 @@ static inline void hlist_add_before(struct hlist_node *n,
}
/**
- * hlist_add_behing - add a new entry after the one specified
+ * hlist_add_behind - add a new entry after the one specified
* @n: new entry to be added
* @prev: hlist node to add it after, which must be non-NULL
*/
@@ -960,6 +1119,26 @@ static inline void hlist_move_list(struct hlist_head *old,
old->first = NULL;
}
+/**
+ * hlist_splice_init() - move all entries from one list to another
+ * @from: hlist_head from which entries will be moved
+ * @last: last entry on the @from list
+ * @to: hlist_head to which entries will be moved
+ *
+ * @to can be empty, @from must contain at least @last.
+ */
+static inline void hlist_splice_init(struct hlist_head *from,
+ struct hlist_node *last,
+ struct hlist_head *to)
+{
+ if (to->first)
+ to->first->pprev = &last->next;
+ last->next = to->first;
+ to->first = from->first;
+ from->first->pprev = &to->first;
+ from->first = NULL;
+}
+
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
@@ -1016,4 +1195,19 @@ static inline void hlist_move_list(struct hlist_head *old,
pos && ({ n = pos->member.next; 1; }); \
pos = hlist_entry_safe(n, typeof(*pos), member))
+/**
+ * hlist_count_nodes - count nodes in the hlist
+ * @head: the head for your hlist.
+ */
+static inline size_t hlist_count_nodes(struct hlist_head *head)
+{
+ struct hlist_node *pos;
+ size_t count = 0;
+
+ hlist_for_each(pos, head)
+ count++;
+
+ return count;
+}
+
#endif
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 9dcaa3e582c9..792b67ceb631 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/nodemask.h>
#include <linux/shrinker.h>
+#include <linux/xarray.h>
struct mem_cgroup;
@@ -23,6 +24,8 @@ enum lru_status {
LRU_SKIP, /* item cannot be locked, skip */
LRU_RETRY, /* item not freeable. May drop the lock
internally, but has to return locked. */
+ LRU_STOP, /* stop lru list walking. May drop the lock
+ internally, but has to return locked. */
};
struct list_lru_one {
@@ -33,8 +36,8 @@ struct list_lru_one {
struct list_lru_memcg {
struct rcu_head rcu;
- /* array of per cgroup lists, indexed by memcg_cache_id */
- struct list_lru_one *lru[];
+ /* array of per cgroup per node lists, indexed by node id */
+ struct list_lru_one node[];
};
struct list_lru_node {
@@ -42,11 +45,7 @@ struct list_lru_node {
spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru;
-#ifdef CONFIG_MEMCG_KMEM
- /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
- struct list_lru_memcg __rcu *memcg_lrus;
-#endif
- long nr_items;
+ long nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
@@ -55,6 +54,7 @@ struct list_lru {
struct list_head list;
int shrinker_id;
bool memcg_aware;
+ struct xarray xa;
#endif
};
@@ -64,18 +64,19 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
#define list_lru_init(lru) \
__list_lru_init((lru), false, NULL, NULL)
-#define list_lru_init_key(lru, key) \
- __list_lru_init((lru), false, (key), NULL)
#define list_lru_init_memcg(lru, shrinker) \
__list_lru_init((lru), true, NULL, shrinker)
-int memcg_update_all_list_lrus(int num_memcgs);
-void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
+int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
+ gfp_t gfp);
+void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
/**
* list_lru_add: add an element to the lru list's tail
- * @list_lru: the lru pointer
+ * @lru: the lru pointer
* @item: the item to be added.
+ * @nid: the node id of the sublist to add the item to.
+ * @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
* nothing. Therefore the caller does not need to keep state about whether or
@@ -84,24 +85,54 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
* the caller organize itself in a way that elements can be in more than
* one type of list, it is up to the caller to fully remove the item from
* the previous list (with list_lru_del() for instance) before moving it
- * to @list_lru
+ * to @lru.
+ *
+ * Return: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_add_obj: add an element to the lru list's tail
+ * @lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * This function is similar to list_lru_add(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
*
* Return value: true if the list was updated, false otherwise
*/
-bool list_lru_add(struct list_lru *lru, struct list_head *item);
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
/**
- * list_lru_del: delete an element to the lru list
- * @list_lru: the lru pointer
+ * list_lru_del: delete an element from the lru list
+ * @lru: the lru pointer
* @item: the item to be deleted.
+ * @nid: the node id of the sublist to delete the item from.
+ * @memcg: the cgroup of the sublist to delete the item from.
*
- * This function works analogously as list_lru_add in terms of list
+ * This function works analogously as list_lru_add() in terms of list
* manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del.
+ * a list are also valid for list_lru_del().
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise
*/
-bool list_lru_del(struct list_lru *lru, struct list_head *item);
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_del_obj: delete an element from the lru list
+ * @lru: the lru pointer
+ * @item: the item to be deleted.
+ *
+ * This function is similar to list_lru_del(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
+ *
+ * Return value: true if the list was updated, false otherwise.
+ */
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
/**
* list_lru_count_one: return the number of objects currently held by @lru
@@ -109,9 +140,11 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* @nid: the node id to count from.
* @memcg: the cgroup to count from.
*
- * Always return a non-negative number, 0 for empty lists. There is no
- * guarantee that the list is not updated while the count is being computed.
- * Callers that want such a guarantee need to provide an outer lock.
+ * There is no guarantee that the list is not updated while the count is being
+ * computed. Callers that want such a guarantee need to provide an outer lock.
+ *
+ * Return: 0 for empty lists, otherwise the number of objects
+ * currently held by @lru.
*/
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg);
@@ -142,42 +175,42 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
/**
- * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * This function will scan all elements in a particular list_lru, calling the
+ * This function will scan all elements in a particular @lru, calling the
* @isolate callback for each of those items, along with the current list
* spinlock and a caller-provided opaque. The @isolate callback can choose to
* drop the lock internally, but *must* return with the lock held. The callback
- * will return an enum lru_status telling the list_lru infrastructure what to
+ * will return an enum lru_status telling the @lru infrastructure what to
* do with the object being scanned.
*
- * Please note that nr_to_walk does not mean how many objects will be freed,
+ * Please note that @nr_to_walk does not mean how many objects will be freed,
* just how many objects will be scanned.
*
- * Return value: the number of objects effectively removed from the LRU.
+ * Return: the number of objects effectively removed from the LRU.
*/
unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
/**
- * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * Same as @list_lru_walk_one except that the spinlock is acquired with
+ * Same as list_lru_walk_one() except that the spinlock is acquired with
* spin_lock_irq().
*/
unsigned long list_lru_walk_one_irq(struct list_lru *lru,
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h
index 20f178c24e9d..453105f74e05 100644
--- a/include/linux/list_sort.h
+++ b/include/linux/list_sort.h
@@ -6,8 +6,9 @@
struct list_head;
+typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *,
+ const struct list_head *, const struct list_head *);
+
__attribute__((nonnull(2,3)))
-void list_sort(void *priv, struct list_head *head,
- int (*cmp)(void *priv, struct list_head *a,
- struct list_head *b));
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp);
#endif
diff --git a/include/linux/litex.h b/include/linux/litex.h
new file mode 100644
index 000000000000..f2edb86d5f44
--- /dev/null
+++ b/include/linux/litex.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common LiteX header providing
+ * helper functions for accessing CSRs.
+ *
+ * Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
+ */
+
+#ifndef _LINUX_LITEX_H
+#define _LINUX_LITEX_H
+
+#include <linux/io.h>
+
+static inline void _write_litex_subregister(u32 val, void __iomem *addr)
+{
+ writel((u32 __force)cpu_to_le32(val), addr);
+}
+
+static inline u32 _read_litex_subregister(void __iomem *addr)
+{
+ return le32_to_cpu((__le32 __force)readl(addr));
+}
+
+/*
+ * LiteX SoC Generator, depending on the configuration, can split a single
+ * logical CSR (Control&Status Register) into a series of consecutive physical
+ * registers.
+ *
+ * For example, in the configuration with 8-bit CSR Bus, a 32-bit aligned,
+ * 32-bit wide logical CSR will be laid out as four 32-bit physical
+ * subregisters, each one containing one byte of meaningful data.
+ *
+ * For Linux support, upstream LiteX enforces a 32-bit wide CSR bus, which
+ * means that only larger-than-32-bit CSRs will be split across multiple
+ * subregisters (e.g., a 64-bit CSR will be spread across two consecutive
+ * 32-bit subregisters).
+ *
+ * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
+ */
+
+static inline void litex_write8(void __iomem *reg, u8 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write16(void __iomem *reg, u16 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write32(void __iomem *reg, u32 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write64(void __iomem *reg, u64 val)
+{
+ _write_litex_subregister(val >> 32, reg);
+ _write_litex_subregister(val, reg + 4);
+}
+
+static inline u8 litex_read8(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u16 litex_read16(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u32 litex_read32(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u64 litex_read64(void __iomem *reg)
+{
+ return ((u64)_read_litex_subregister(reg) << 32) |
+ _read_litex_subregister(reg + 4);
+}
+
+#endif /* _LINUX_LITEX_H */
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 2614247a9781..9b9b38e89563 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -13,11 +13,10 @@
#include <linux/ftrace.h>
#include <linux/completion.h>
#include <linux/list.h>
+#include <linux/livepatch_sched.h>
#if IS_ENABLED(CONFIG_LIVEPATCH)
-#include <asm/livepatch.h>
-
/* task patch states */
#define KLP_UNDEFINED -1
#define KLP_UNPATCHED 0
diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h
new file mode 100644
index 000000000000..013794fb5da0
--- /dev/null
+++ b/include/linux/livepatch_sched.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_LIVEPATCH_SCHED_H_
+#define _LINUX_LIVEPATCH_SCHED_H_
+
+#include <linux/jump_label.h>
+#include <linux/static_call_types.h>
+
+#ifdef CONFIG_LIVEPATCH
+
+void __klp_sched_try_switch(void);
+
+#if !defined(CONFIG_PREEMPT_DYNAMIC) || !defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+
+DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
+
+static __always_inline void klp_sched_try_switch(void)
+{
+ if (static_branch_unlikely(&klp_sched_try_switch_key))
+ __klp_sched_try_switch();
+}
+
+#endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
+
+#else /* !CONFIG_LIVEPATCH */
+static inline void klp_sched_try_switch(void) {}
+static inline void __klp_sched_try_switch(void) {}
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 2e9c7215882b..2c982ff7475a 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -49,7 +49,9 @@
*/
#include <linux/atomic.h>
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
struct llist_head {
struct llist_node *first;
@@ -72,6 +74,33 @@ static inline void init_llist_head(struct llist_head *list)
}
/**
+ * init_llist_node - initialize lock-less list node
+ * @node: the node to be initialised
+ *
+ * In cases where there is a need to test if a node is on
+ * a list or not, this initialises the node to clearly
+ * not be on any list.
+ */
+static inline void init_llist_node(struct llist_node *node)
+{
+ node->next = node;
+}
+
+/**
+ * llist_on_list - test if a lock-list list node is on a list
+ * @node: the node to test
+ *
+ * When a node is on a list the ->next pointer will be NULL or
+ * some other node. It can never point to itself. We use that
+ * in init_llist_node() to record that a node is not on any list,
+ * and here to test whether it is on any list.
+ */
+static inline bool llist_on_list(const struct llist_node *node)
+{
+ return node->next != node;
+}
+
+/**
* llist_entry - get the struct of this entry
* @ptr: the &struct llist_node pointer.
* @type: the type of the struct this is embedded in.
@@ -197,6 +226,16 @@ static inline struct llist_node *llist_next(struct llist_node *node)
extern bool llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
struct llist_head *head);
+
+static inline bool __llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head)
+{
+ new_last->next = head->first;
+ head->first = new_first;
+ return new_last->next == NULL;
+}
+
/**
* llist_add - add a new entry
* @new: new entry to be added
@@ -209,6 +248,11 @@ static inline bool llist_add(struct llist_node *new, struct llist_head *head)
return llist_add_batch(new, new, head);
}
+static inline bool __llist_add(struct llist_node *new, struct llist_head *head)
+{
+ return __llist_add_batch(new, new, head);
+}
+
/**
* llist_del_all - delete all entries from lock-less list
* @head: the head of lock-less list to delete all entries
@@ -222,8 +266,35 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
return xchg(&head->first, NULL);
}
+static inline struct llist_node *__llist_del_all(struct llist_head *head)
+{
+ struct llist_node *first = head->first;
+
+ head->first = NULL;
+ return first;
+}
+
extern struct llist_node *llist_del_first(struct llist_head *head);
+/**
+ * llist_del_first_init - delete first entry from lock-list and mark is as being off-list
+ * @head: the head of lock-less list to delete from.
+ *
+ * This behave the same as llist_del_first() except that llist_init_node() is called
+ * on the returned node so that llist_on_list() will report false for the node.
+ */
+static inline struct llist_node *llist_del_first_init(struct llist_head *head)
+{
+ struct llist_node *n = llist_del_first(head);
+
+ if (n)
+ init_llist_node(n);
+ return n;
+}
+
+extern bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this);
+
struct llist_node *llist_reverse_order(struct llist_node *head);
#endif /* LLIST_H */
diff --git a/include/linux/llist_api.h b/include/linux/llist_api.h
new file mode 100644
index 000000000000..625bec0393a1
--- /dev/null
+++ b/include/linux/llist_api.h
@@ -0,0 +1 @@
+#include <linux/llist.h>
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 4a8795b21d77..975e33b793a7 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -6,6 +6,8 @@
#include <linux/percpu-defs.h>
#include <linux/lockdep.h>
+#ifndef CONFIG_PREEMPT_RT
+
typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -14,26 +16,14 @@ typedef struct {
} local_lock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LL_DEP_MAP_INIT(lockname) \
+# define LOCAL_LOCK_DEBUG_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
- }
-#else
-# define LL_DEP_MAP_INIT(lockname)
-#endif
-
-#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }
+ .lock_type = LD_LOCK_PERCPU, \
+ }, \
+ .owner = NULL,
-#define __local_lock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
- lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\
-} while (0)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void local_lock_acquire(local_lock_t *l)
{
lock_map_acquire(&l->dep_map);
@@ -48,11 +38,30 @@ static inline void local_lock_release(local_lock_t *l)
lock_map_release(&l->dep_map);
}
+static inline void local_lock_debug_init(local_lock_t *l)
+{
+ l->owner = NULL;
+}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
+# define LOCAL_LOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+
+#define __local_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_PERCPU); \
+ local_lock_debug_init(lock); \
+} while (0)
+
#define __local_lock(lock) \
do { \
preempt_disable(); \
@@ -88,3 +97,45 @@ static inline void local_lock_release(local_lock_t *l) { }
local_lock_release(this_cpu_ptr(lock)); \
local_irq_restore(flags); \
} while (0)
+
+#else /* !CONFIG_PREEMPT_RT */
+
+/*
+ * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
+ * critical section while staying preemptible.
+ */
+typedef spinlock_t local_lock_t;
+
+#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+
+#define __local_lock_init(l) \
+ do { \
+ local_spin_lock_init((l)); \
+ } while (0)
+
+#define __local_lock(__lock) \
+ do { \
+ migrate_disable(); \
+ spin_lock(this_cpu_ptr((__lock))); \
+ } while (0)
+
+#define __local_lock_irq(lock) __local_lock(lock)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_lock(lock); \
+ } while (0)
+
+#define __local_unlock(__lock) \
+ do { \
+ spin_unlock(this_cpu_ptr((__lock))); \
+ migrate_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) __local_unlock(lock)
+
+#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
+#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 0520c0cd73f4..c53c81242e72 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -20,6 +20,7 @@
/* Dummy declarations */
struct svc_rqst;
struct rpc_task;
+struct rpc_clnt;
/*
* This is the set of functions for lockd->nfsd communication
@@ -27,7 +28,8 @@ struct rpc_task;
struct nlmsvc_binding {
__be32 (*fopen)(struct svc_rqst *,
struct nfs_fh *,
- struct file **);
+ struct file **,
+ int mode);
void (*fclose)(struct file *);
};
@@ -55,6 +57,7 @@ struct nlmclnt_initdata {
extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
extern void nlmclnt_done(struct nlm_host *host);
+extern struct rpc_clnt *nlmclnt_rpc_clnt(struct nlm_host *host);
/*
* NLM client operations provide a means to modify RPC processing of NLM
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 666f5f310a04..1b95fe31051f 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -10,6 +10,8 @@
#ifndef LINUX_LOCKD_LOCKD_H
#define LINUX_LOCKD_LOCKD_H
+/* XXX: a lot of this should really be under fs/lockd. */
+
#include <linux/in.h>
#include <linux/in6.h>
#include <net/ipv6.h>
@@ -97,21 +99,11 @@ struct nsm_handle {
/*
* Rigorous type checking on sockaddr type conversions
*/
-static inline struct sockaddr_in *nlm_addr_in(const struct nlm_host *host)
-{
- return (struct sockaddr_in *)&host->h_addr;
-}
-
static inline struct sockaddr *nlm_addr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_addr;
}
-static inline struct sockaddr_in *nlm_srcaddr_in(const struct nlm_host *host)
-{
- return (struct sockaddr_in *)&host->h_srcaddr;
-}
-
static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_srcaddr;
@@ -129,7 +121,16 @@ struct nlm_lockowner {
uint32_t pid;
};
-struct nlm_wait;
+/*
+ * This is the representation of a blocked client lock.
+ */
+struct nlm_wait {
+ struct list_head b_list; /* linked list */
+ wait_queue_head_t b_wait; /* where to wait on */
+ struct nlm_host *b_host;
+ struct file_lock *b_lock; /* local file lock */
+ __be32 b_status; /* grant callback status */
+};
/*
* Memory chunk for NLM client RPC request.
@@ -154,7 +155,8 @@ struct nlm_rqst {
struct nlm_file {
struct hlist_node f_list; /* linked list */
struct nfs_fh f_handle; /* NFS file handle */
- struct file * f_file; /* VFS file pointer */
+ struct file * f_file[2]; /* VFS file pointers,
+ indexed by O_ flags */
struct nlm_share * f_shares; /* DOS shares */
struct list_head f_blocks; /* blocked locks */
unsigned int f_locks; /* guesstimate # of locks */
@@ -193,15 +195,17 @@ struct nlm_block {
* Global variables
*/
extern const struct rpc_program nlm_program;
-extern const struct svc_procedure nlmsvc_procedures[];
+extern const struct svc_procedure nlmsvc_procedures[24];
#ifdef CONFIG_LOCKD_V4
-extern const struct svc_procedure nlmsvc_procedures4[];
+extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
+extern struct timer_list nlmsvc_retry;
+
/*
* Lockd client functions
*/
@@ -209,9 +213,11 @@ struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
void nlmclnt_release_call(struct nlm_rqst *);
-struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
-void nlmclnt_finish_block(struct nlm_wait *block);
-int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
+void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host,
+ struct file_lock *fl);
+void nlmclnt_queue_block(struct nlm_wait *block);
+__be32 nlmclnt_dequeue_block(struct nlm_wait *block);
+int nlmclnt_wait(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
__be32 nlmclnt_grant(const struct sockaddr *addr,
const struct nlm_lock *lock);
void nlmclnt_recovery(struct nlm_host *);
@@ -267,6 +273,7 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
/*
* Server-side lock handling
*/
+int lock_to_openmode(struct file_lock *);
__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *, int,
struct nlm_cookie *, int);
@@ -275,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *,
struct nlm_lock *, struct nlm_cookie *);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
-unsigned long nlmsvc_retry_blocked(void);
+void nlmsvc_retry_blocked(struct svc_rqst *rqstp);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
@@ -286,8 +293,9 @@ void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t);
* File handling for the server personality
*/
__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
- struct nfs_fh *);
+ struct nlm_lock *);
void nlm_release_file(struct nlm_file *);
+void nlmsvc_put_lockowner(struct nlm_lockowner *);
void nlmsvc_release_lockowner(struct nlm_lock *);
void nlmsvc_mark_resources(struct net *);
void nlmsvc_free_host_resources(struct nlm_host *);
@@ -299,9 +307,15 @@ void nlmsvc_invalidate_all(void);
int nlmsvc_unlock_all_by_sb(struct super_block *sb);
int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+static inline struct file *nlmsvc_file_file(struct nlm_file *file)
+{
+ return file->f_file[O_RDONLY] ?
+ file->f_file[O_RDONLY] : file->f_file[O_WRONLY];
+}
+
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
- return locks_inode(file->f_file);
+ return file_inode(nlmsvc_file_file(file));
}
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
@@ -361,12 +375,12 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
static inline int nlm_compare_locks(const struct file_lock *fl1,
const struct file_lock *fl2)
{
- return locks_inode(fl1->fl_file) == locks_inode(fl2->fl_file)
- && fl1->fl_pid == fl2->fl_pid
- && fl1->fl_owner == fl2->fl_owner
+ return file_inode(fl1->c.flc_file) == file_inode(fl2->c.flc_file)
+ && fl1->c.flc_pid == fl2->c.flc_pid
+ && fl1->c.flc_owner == fl2->c.flc_owner
&& fl1->fl_start == fl2->fl_start
&& fl1->fl_end == fl2->fl_end
- &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK);
+ &&(fl1->c.flc_type == fl2->c.flc_type || fl2->c.flc_type == F_UNLCK);
}
extern const struct lock_manager_operations nlmsvc_lock_operations;
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 7ab9f264313f..80cca9426761 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -11,6 +11,7 @@
#define LOCKD_XDR_H
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/nfs.h>
#include <linux/sunrpc/xdr.h>
@@ -41,6 +42,8 @@ struct nlm_lock {
struct nfs_fh fh;
struct xdr_netobj oh;
u32 svid;
+ u64 lock_start;
+ u64 lock_len;
struct file_lock fl;
};
@@ -49,7 +52,7 @@ struct nlm_lock {
* FreeBSD uses 16, Apple Mac OS X 10.3 uses 20. Therefore we set it to
* 32 bytes.
*/
-
+
struct nlm_cookie
{
unsigned char data[NLM_MAXCOOKIELEN];
@@ -96,24 +99,19 @@ struct nlm_reboot {
*/
#define NLMSVC_XDRSIZE sizeof(struct nlm_args)
-int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_testres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_notify(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
+bool nlmsvc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+
+bool nlmsvc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
#endif /* LOCKD_XDR_H */
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index e709fe5924f2..72831e35dca3 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -22,27 +22,22 @@
#define nlm4_fbig cpu_to_be32(NLM_FBIG)
#define nlm4_failed cpu_to_be32(NLM_FAILED)
+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len);
+bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
-int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_testres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_notify(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
extern const struct rpc_version nlm_version4;
#endif /* LOCKD_XDR4_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 6a584b3e5c74..08b0d1d9d78b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -16,10 +16,6 @@
struct task_struct;
-/* for sysctl */
-extern int prove_locking;
-extern int lock_stat;
-
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
@@ -54,7 +50,11 @@ struct lock_list {
struct lock_class *class;
struct lock_class *links_to;
const struct lock_trace *trace;
- int distance;
+ u16 distance;
+ /* bitmap of different dependencies from head to this */
+ u8 dep;
+ /* used by BFS to record whether "prev -> this" only has -(*R)-> */
+ u8 only_xr;
/*
* The parent field is used to implement breadth-first search, and the
@@ -82,62 +82,6 @@ struct lock_chain {
u64 chain_key;
};
-#define MAX_LOCKDEP_KEYS_BITS 13
-#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
-#define INITIAL_CHAIN_KEY -1
-
-struct held_lock {
- /*
- * One-way hash of the dependency chain up to this point. We
- * hash the hashes step by step as the dependency chain grows.
- *
- * We use it for dependency-caching and we skip detection
- * passes and dependency-updates if there is a cache-hit, so
- * it is absolutely critical for 100% coverage of the validator
- * to have a unique key value for every unique dependency path
- * that can occur in the system, to make a unique hash value
- * as likely as possible - hence the 64-bit width.
- *
- * The task struct holds the current hash value (initialized
- * with zero), here we store the previous hash value:
- */
- u64 prev_chain_key;
- unsigned long acquire_ip;
- struct lockdep_map *instance;
- struct lockdep_map *nest_lock;
-#ifdef CONFIG_LOCK_STAT
- u64 waittime_stamp;
- u64 holdtime_stamp;
-#endif
- /*
- * class_idx is zero-indexed; it points to the element in
- * lock_classes this held lock instance belongs to. class_idx is in
- * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
- */
- unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
- /*
- * The lock-stack is unified in that the lock chains of interrupt
- * contexts nest ontop of process context chains, but we 'separate'
- * the hashes by starting with 0 if we cross into an interrupt
- * context, and we also keep do not add cross-context lock
- * dependencies - the lock usage graph walking covers that area
- * anyway, and we'd just unnecessarily increase the number of
- * dependencies otherwise. [Note: hardirq and softirq contexts
- * are separated from each other too.]
- *
- * The following field is used to detect when we cross into an
- * interrupt context:
- */
- unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
- unsigned int trylock:1; /* 16 bits */
-
- unsigned int read:2; /* see lock_acquire() comment */
- unsigned int check:1; /* see lock_acquire() comment */
- unsigned int hardirqs_off:1;
- unsigned int references:12; /* 32 bits */
- unsigned int pin_count;
-};
-
/*
* Initialization, self-test and debugging-output methods:
*/
@@ -151,7 +95,7 @@ extern void lockdep_set_selftest_task(struct task_struct *task);
extern void lockdep_init_task(struct task_struct *task);
/*
- * Split the recrursion counter in two to readily detect 'off' vs recursion.
+ * Split the recursion counter in two to readily detect 'off' vs recursion.
*/
#define LOCKDEP_RECURSION_BITS 16
#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
@@ -181,12 +125,19 @@ extern void lockdep_unregister_key(struct lock_class_key *key);
* to lockdep:
*/
-extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass, short inner, short outer);
+extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
+
+static inline void
+lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer)
+{
+ lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
+}
static inline void
lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass, short inner)
+ struct lock_class_key *key, int subclass, u8 inner)
{
lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
}
@@ -204,24 +155,28 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
* or they are too narrow (they suffer from a false class-split):
*/
#define lockdep_set_class(lock, key) \
- lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
- (lock)->dep_map.wait_type_inner, \
- (lock)->dep_map.wait_type_outer)
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
#define lockdep_set_class_and_name(lock, key, name) \
- lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
- (lock)->dep_map.wait_type_inner, \
- (lock)->dep_map.wait_type_outer)
+ lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
#define lockdep_set_class_and_subclass(lock, key, sub) \
- lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
- (lock)->dep_map.wait_type_inner, \
- (lock)->dep_map.wait_type_outer)
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
#define lockdep_set_subclass(lock, sub) \
- lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
- (lock)->dep_map.wait_type_inner, \
- (lock)->dep_map.wait_type_outer)
+ lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
@@ -257,6 +212,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, unsigned long ip);
+extern void lock_sync(struct lockdep_map *lock, unsigned int subclass,
+ int read, int check, struct lockdep_map *nest_lock,
+ unsigned long ip);
+
+/* lock_is_held_type() returns */
+#define LOCK_STATE_UNKNOWN -1
+#define LOCK_STATE_NOT_HELD 0
+#define LOCK_STATE_HELD 1
+
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
@@ -274,6 +238,9 @@ extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
+#define lock_set_novalidate_class(l, n, i) \
+ lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
+
static inline void lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
@@ -290,21 +257,29 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
-#define lockdep_assert_held(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert(cond) \
+ do { WARN_ON(debug_locks && !(cond)); } while (0)
+
+#define lockdep_assert_once(cond) \
+ do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
+
+#define lockdep_assert_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+
+#define lockdep_assert_not_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
-#define lockdep_assert_held_write(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
- } while (0)
+#define lockdep_assert_held_write(l) \
+ lockdep_assert(lockdep_is_held_type(l, 0))
-#define lockdep_assert_held_read(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
- } while (0)
+#define lockdep_assert_held_read(l) \
+ lockdep_assert(lockdep_is_held_type(l, 1))
-#define lockdep_assert_held_once(l) do { \
- WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert_held_once(l) \
+ lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+
+#define lockdep_assert_none_held_once() \
+ lockdep_assert_once(!current->lockdep_depth)
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
@@ -312,6 +287,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
+/*
+ * Must use lock_map_aquire_try() with override maps to avoid
+ * lockdep thinking they participate in the block chain.
+ */
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
+ struct lockdep_map _name = { \
+ .name = #_name "-wait-type-override", \
+ .wait_type_inner = _wait_type, \
+ .lock_type = LD_LOCK_WAIT_OVERRIDE, }
+
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_init_task(struct task_struct *task)
@@ -333,9 +318,12 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
-# define lock_set_class(l, n, k, s, i) do { } while (0)
+# define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0)
+# define lock_set_novalidate_class(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
+# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
+ do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_wait(lock, name, key, sub, inner) \
@@ -371,12 +359,23 @@ static inline void lockdep_unregister_key(struct lock_class_key *key)
#define lockdep_depth(tsk) (0)
+/*
+ * Dummy forward declarations, allow users to write less ifdef-y code
+ * and depend on dead code elimination.
+ */
+extern int lock_is_held(const void *);
+extern int lockdep_is_held(const void *);
#define lockdep_is_held_type(l, r) (1)
+#define lockdep_assert(c) do { } while (0)
+#define lockdep_assert_once(c) do { } while (0)
+
#define lockdep_assert_held(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
+#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
+#define lockdep_assert_none_held_once() do { } while (0)
#define lockdep_recursing(tsk) (0)
@@ -386,15 +385,25 @@ static inline void lockdep_unregister_key(struct lock_class_key *key)
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
+ struct lockdep_map __maybe_unused _name = {}
+
#endif /* !LOCKDEP */
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn);
+
+#define lock_set_cmp_fn(lock, ...) lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__)
+#else
+#define lock_set_cmp_fn(lock, ...) do { } while (0)
+#endif
+
enum xhlock_context_t {
XHLOCK_HARD,
XHLOCK_SOFT,
XHLOCK_CTX_NR,
};
-#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
@@ -444,23 +453,6 @@ do { \
#endif /* CONFIG_LOCK_STAT */
-#ifdef CONFIG_LOCKDEP
-
-/*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_*_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- LOCK_CONTENDED((_lock), (try), (lock))
-
-#else /* CONFIG_LOCKDEP */
-
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- lockfl((_lock), (flags))
-
-#endif /* CONFIG_LOCKDEP */
-
#ifdef CONFIG_PROVE_LOCKING
extern void print_irqtrace_events(struct task_struct *curr);
#else
@@ -469,6 +461,20 @@ static inline void print_irqtrace_events(struct task_struct *curr)
}
#endif
+/* Variable used to make lockdep treat read_lock() as recursive in selftests */
+#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
+extern unsigned int force_read_lock_recursive;
+#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+#define force_read_lock_recursive 0
+#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+
+#ifdef CONFIG_LOCKDEP
+extern bool read_lock_is_recursive(void);
+#else /* CONFIG_LOCKDEP */
+/* If !LOCKDEP, the value is meaningless */
+#define read_lock_is_recursive() 0
+#endif
+
/*
* For trivial one-depth nesting of a lock-class, the following
* global define can be used. (Subsystems with multiple levels
@@ -490,7 +496,14 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define spin_release(l, i) lock_release(l, i)
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
-#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define rwlock_acquire_read(l, s, t, i) \
+do { \
+ if (read_lock_is_recursive()) \
+ lock_acquire_shared_recursive(l, s, t, NULL, i); \
+ else \
+ lock_acquire_shared(l, s, t, NULL, i); \
+} while (0)
+
#define rwlock_release(l, i) lock_release(l, i)
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
@@ -507,24 +520,26 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define rwsem_release(l, i) lock_release(l, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_release(l) lock_release(l, _THIS_IP_)
+#define lock_map_sync(l) lock_sync(l, 0, 0, 1, NULL, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
-# define might_lock(lock) \
+# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-# define might_lock_read(lock) \
+# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-# define might_lock_nested(lock, subclass) \
+# define might_lock_nested(lock, subclass) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
@@ -534,44 +549,55 @@ do { \
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
+DECLARE_PER_CPU(unsigned int, lockdep_recursion);
-/*
- * The below lockdep_assert_*() macros use raw_cpu_read() to access the above
- * per-cpu variables. This is required because this_cpu_read() will potentially
- * call into preempt/irq-disable and that obviously isn't right. This is also
- * correct because when IRQs are enabled, it doesn't matter if we accidentally
- * read the value from our previous CPU.
- */
+#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
#define lockdep_assert_irqs_enabled() \
do { \
- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_irqs_disabled() \
do { \
- WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \
+ WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_in_irq() \
do { \
- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
+} while (0)
+
+#define lockdep_assert_no_hardirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
+ !this_cpu_read(hardirqs_enabled))); \
} while (0)
#define lockdep_assert_preemption_enabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
- debug_locks && \
+ __lockdep_enabled && \
(preempt_count() != 0 || \
- !raw_cpu_read(hardirqs_enabled))); \
+ !this_cpu_read(hardirqs_enabled))); \
} while (0)
#define lockdep_assert_preemption_disabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
- debug_locks && \
+ __lockdep_enabled && \
(preempt_count() == 0 && \
- raw_cpu_read(hardirqs_enabled))); \
+ this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+/*
+ * Acceptable for protecting per-CPU resources accessed from BH.
+ * Much like in_softirq() - semantics are ambiguous, use carefully.
+ */
+#define lockdep_assert_in_softirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (!in_softirq() || in_irq() || in_nmi())); \
} while (0)
#else
@@ -582,9 +608,11 @@ do { \
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
+# define lockdep_assert_no_hardirq() do { } while (0)
# define lockdep_assert_preemption_enabled() do { } while (0)
# define lockdep_assert_preemption_disabled() do { } while (0)
+# define lockdep_assert_in_softirq() do { } while (0)
#endif
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
diff --git a/include/linux/lockdep_api.h b/include/linux/lockdep_api.h
new file mode 100644
index 000000000000..907e66979ab2
--- /dev/null
+++ b/include/linux/lockdep_api.h
@@ -0,0 +1 @@
+#include <linux/lockdep.h>
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index bb35b449f533..70d30d40ea4a 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -21,7 +21,7 @@ enum lockdep_wait_type {
LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
- LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
+ LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */
#else
LD_WAIT_CONFIG = LD_WAIT_SPIN,
#endif
@@ -30,19 +30,30 @@ enum lockdep_wait_type {
LD_WAIT_MAX, /* must be last */
};
+enum lockdep_lock_type {
+ LD_LOCK_NORMAL = 0, /* normal, catch all */
+ LD_LOCK_PERCPU, /* percpu */
+ LD_LOCK_WAIT_OVERRIDE, /* annotation */
+ LD_LOCK_MAX,
+};
+
#ifdef CONFIG_LOCKDEP
/*
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need
* the total number of states... :-(
+ *
+ * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
+ * of those we generates 4 states, Additionally we report on USED and USED_READ.
*/
-#define XXX_LOCK_USAGE_STATES (1+2*4)
+#define XXX_LOCK_USAGE_STATES 2
+#define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2)
/*
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
* cached in the instance of lockdep_map
*
- * Currently main class (subclass == 0) and signle depth subclass
+ * Currently main class (subclass == 0) and single depth subclass
* are cached in lockdep_map. This optimization is mainly targeting
* on rq->lock. double_rq_lock() acquires this highly competitive with
* single depth.
@@ -74,6 +85,11 @@ struct lock_trace;
#define LOCKSTAT_POINTS 4
+struct lockdep_map;
+typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
+ const struct lockdep_map *b);
+typedef void (*lock_print_fn)(const struct lockdep_map *map);
+
/*
* The lock-class itself. The order of the structure members matters.
* reinit_class() zeroes the key member and all subsequent members.
@@ -99,6 +115,9 @@ struct lock_class {
struct list_head locks_after, locks_before;
const struct lockdep_subclass_key *key;
+ lock_cmp_fn cmp_fn;
+ lock_print_fn print_fn;
+
unsigned int subclass;
unsigned int dep_gen_id;
@@ -106,17 +125,19 @@ struct lock_class {
* IRQ/softirq usage tracking bits:
*/
unsigned long usage_mask;
- const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
+ const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
+ const char *name;
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
int name_version;
- const char *name;
- short wait_type_inner;
- short wait_type_outer;
+ u8 wait_type_inner;
+ u8 wait_type_outer;
+ u8 lock_type;
+ /* u8 hole; */
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
@@ -165,8 +186,10 @@ struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
- short wait_type_outer; /* can be taken in this context */
- short wait_type_inner; /* presents this context */
+ u8 wait_type_outer; /* can be taken in this context */
+ u8 wait_type_inner; /* presents this context */
+ u8 lock_type;
+ /* u8 hole; */
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
@@ -175,6 +198,63 @@ struct lockdep_map {
struct pin_cookie { unsigned int val; };
+#define MAX_LOCKDEP_KEYS_BITS 13
+#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
+#define INITIAL_CHAIN_KEY -1
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+ struct lockdep_map *nest_lock;
+#ifdef CONFIG_LOCK_STAT
+ u64 waittime_stamp;
+ u64 holdtime_stamp;
+#endif
+ /*
+ * class_idx is zero-indexed; it points to the element in
+ * lock_classes this held lock instance belongs to. class_idx is in
+ * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
+ */
+ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
+ unsigned int trylock:1; /* 16 bits */
+
+ unsigned int read:2; /* see lock_acquire() comment */
+ unsigned int check:1; /* see lock_acquire() comment */
+ unsigned int hardirqs_off:1;
+ unsigned int sync:1;
+ unsigned int references:11; /* 32 bits */
+ unsigned int pin_count;
+};
+
#else /* !CONFIG_LOCKDEP */
/*
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 99f17cc8e163..c3a1f78bc884 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -38,7 +38,6 @@ extern void lockref_get(struct lockref *);
extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_put_not_zero(struct lockref *);
-extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
extern void lockref_mark_dead(struct lockref *);
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 83a4a3ca3e8a..9f30d087a128 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -18,7 +18,7 @@
* - the arch is not required to handle n==0 if implementing the fallback
*/
#ifndef CONFIG_ARCH_HAS_ILOG2_U32
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
@@ -26,7 +26,7 @@ int __ilog2_u32(u32 n)
#endif
#ifndef CONFIG_ARCH_HAS_ILOG2_U64
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
@@ -156,7 +156,8 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define ilog2(n) \
( \
__builtin_constant_p(n) ? \
- const_ilog2(n) : \
+ ((n) < 2 ? 0 : \
+ 63 - __builtin_clzll(n)) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
@@ -173,7 +174,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 1 : \
+ ((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
diff --git a/include/linux/logic_iomem.h b/include/linux/logic_iomem.h
new file mode 100644
index 000000000000..3fa65c964379
--- /dev/null
+++ b/include/linux/logic_iomem.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef __LOGIC_IOMEM_H
+#define __LOGIC_IOMEM_H
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+/**
+ * struct logic_iomem_ops - emulated IO memory ops
+ * @read: read an 8, 16, 32 or 64 bit quantity from the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @write: write an 8, 16 32 or 64 bit quantity to the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @set: optional, for memset_io()
+ * @copy_from: optional, for memcpy_fromio()
+ * @copy_to: optional, for memcpy_toio()
+ * @unmap: optional, this region is getting unmapped
+ */
+struct logic_iomem_ops {
+ unsigned long (*read)(void *priv, unsigned int offset, int size);
+ void (*write)(void *priv, unsigned int offset, int size,
+ unsigned long val);
+
+ void (*set)(void *priv, unsigned int offset, u8 value, int size);
+ void (*copy_from)(void *priv, void *buffer, unsigned int offset,
+ int size);
+ void (*copy_to)(void *priv, unsigned int offset, const void *buffer,
+ int size);
+
+ void (*unmap)(void *priv);
+};
+
+/**
+ * struct logic_iomem_region_ops - ops for an IO memory handler
+ * @map: map a range in the registered IO memory region, must
+ * fill *ops with the ops and may fill *priv to be passed
+ * to the ops. The offset is given as the offset into the
+ * registered resource region.
+ * The return value is negative for errors, or >= 0 for
+ * success. On success, the return value is added to the
+ * offset for later ops, to allow for partial mappings.
+ */
+struct logic_iomem_region_ops {
+ long (*map)(unsigned long offset, size_t size,
+ const struct logic_iomem_ops **ops,
+ void **priv);
+};
+
+/**
+ * logic_iomem_add_region - register an IO memory region
+ * @resource: the resource description for this region
+ * @ops: the IO memory mapping ops for this resource
+ */
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops);
+
+#endif /* __LOGIC_IOMEM_H */
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index 54945aa824b4..babf4e3c28ba 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -39,9 +39,6 @@ struct logic_pio_host_ops {
#ifdef CONFIG_INDIRECT_PIO
u8 logic_inb(unsigned long addr);
-void logic_outb(u8 value, unsigned long addr);
-void logic_outw(u16 value, unsigned long addr);
-void logic_outl(u32 value, unsigned long addr);
u16 logic_inw(unsigned long addr);
u32 logic_inl(unsigned long addr);
void logic_outb(u8 value, unsigned long addr);
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 429d67d815ce..c9afcdd9324c 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -32,7 +32,7 @@ This header file (and its .c file; kernel-doc of functions see there)
Because of this later property, it is called "lru_cache".
As it actually Tracks Objects in an Active SeT, we could also call it
toast (incidentally that is what may happen to the data on the
- backend storage uppon next resync, if we don't get it right).
+ backend storage upon next resync, if we don't get it right).
What for?
@@ -152,7 +152,7 @@ struct lc_element {
* for paranoia, and for "lc_element_to_index" */
unsigned lc_index;
/* if we want to track a larger set of objects,
- * it needs to become arch independend u64 */
+ * it needs to become an architecture independent u64 */
unsigned lc_number;
/* special label when on free list */
#define LC_FREE (~0U)
@@ -199,7 +199,6 @@ struct lru_cache {
unsigned long flags;
- void *lc_private;
const char *name;
/* nr_elements there */
@@ -241,7 +240,6 @@ extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
unsigned e_count, size_t e_size, size_t e_off);
extern void lc_reset(struct lru_cache *lc);
extern void lc_destroy(struct lru_cache *lc);
-extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
extern void lc_del(struct lru_cache *lc, struct lc_element *element);
extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
@@ -263,7 +261,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
*
* Allows (expects) the set to be "dirty". Note that the reference counts and
* order on the active and lru lists may still change. Used to serialize
- * changing transactions. Returns true if we aquired the lock.
+ * changing transactions. Returns true if we acquired the lock.
*/
static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
{
@@ -275,7 +273,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
* @lc: the lru cache to operate on
*
* Note that the reference counts and order on the active and lru lists may
- * still change. Only works on a "clean" set. Returns true if we aquired the
+ * still change. Only works on a "clean" set. Returns true if we acquired the
* lock, which means there are no pending changes, and any further attempt to
* change the set will not succeed until the next lc_unlock().
*/
@@ -297,6 +295,5 @@ extern bool lc_is_used(struct lru_cache *lc, unsigned int enr);
container_of(ptr, type, member)
extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i);
-extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e);
#endif
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 28f23b341c1c..97a8b21eb033 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -26,7 +26,7 @@
struct lsm_network_audit {
int netif;
- struct sock *sk;
+ const struct sock *sk;
u16 family;
__be16 dport;
__be16 sport;
@@ -48,13 +48,13 @@ struct lsm_ioctlop_audit {
};
struct lsm_ibpkey_audit {
- u64 subnet_prefix;
- u16 pkey;
+ u64 subnet_prefix;
+ u16 pkey;
};
struct lsm_ibendport_audit {
- char dev_name[IB_DEVICE_NAME_MAX];
- u8 port;
+ const char *dev_name;
+ u8 port;
};
/* Auxiliary data to use in generating the audit record. */
@@ -76,6 +76,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_IBENDPORT 14
#define LSM_AUDIT_DATA_LOCKDOWN 15
#define LSM_AUDIT_DATA_NOTIFICATION 16
+#define LSM_AUDIT_DATA_ANONINODE 17
union {
struct path path;
struct dentry *dentry;
@@ -96,6 +97,7 @@ struct common_audit_data {
struct lsm_ibpkey_audit *ibpkey;
struct lsm_ibendport_audit *ibendport;
int reason;
+ const char *anonclass;
} u;
/* this union contains LSM specific data */
union {
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 2a8c74d99015..334e00efbde4 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -26,44 +26,47 @@
* #undef LSM_HOOK
* };
*/
-LSM_HOOK(int, 0, binder_set_context_mgr, struct task_struct *mgr)
-LSM_HOOK(int, 0, binder_transaction, struct task_struct *from,
- struct task_struct *to)
-LSM_HOOK(int, 0, binder_transfer_binder, struct task_struct *from,
- struct task_struct *to)
-LSM_HOOK(int, 0, binder_transfer_file, struct task_struct *from,
- struct task_struct *to, struct file *file)
+LSM_HOOK(int, 0, binder_set_context_mgr, const struct cred *mgr)
+LSM_HOOK(int, 0, binder_transaction, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from,
+ const struct cred *to, const struct file *file)
LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child,
unsigned int mode)
LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent)
-LSM_HOOK(int, 0, capget, struct task_struct *target, kernel_cap_t *effective,
+LSM_HOOK(int, 0, capget, const struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old,
const kernel_cap_t *effective, const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns,
int cap, unsigned int opts)
-LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb)
+LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, const struct super_block *sb)
LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
-LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
-LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
-LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
-LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, const struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, const struct linux_binprm *bprm)
+LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference)
LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
struct fs_context *src_sc)
LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
struct fs_parameter *param)
LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_delete, struct super_block *sb)
LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb)
LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts)
LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts)
+LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts)
LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts)
-LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb)
+LSM_HOOK(int, 0, sb_kern_mount, const struct super_block *sb)
LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb)
LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry)
LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path,
@@ -76,12 +79,11 @@ LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts,
LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb,
struct super_block *newsb, unsigned long kern_flags,
unsigned long *set_kern_flags)
-LSM_HOOK(int, 0, sb_add_mnt_opt, const char *option, const char *val,
- int len, void **mnt_opts)
LSM_HOOK(int, 0, move_mount, const struct path *from_path,
const struct path *to_path)
-LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry,
- int mode, const struct qstr *name, void **ctx, u32 *ctxlen)
+LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry,
+ int mode, const struct qstr *name, const char **xattr_name,
+ void **ctx, u32 *ctxlen)
LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
struct qstr *name, const struct cred *old, struct cred *new)
@@ -92,6 +94,8 @@ LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry,
LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry)
LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev)
+LSM_HOOK(void, LSM_RET_VOID, path_post_mknod, struct mnt_idmap *idmap,
+ struct dentry *dentry)
LSM_HOOK(int, 0, path_truncate, const struct path *path)
LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry,
const char *old_name)
@@ -99,7 +103,7 @@ LSM_HOOK(int, 0, path_link, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry)
LSM_HOOK(int, 0, path_rename, const struct path *old_dir,
struct dentry *old_dentry, const struct path *new_dir,
- struct dentry *new_dentry)
+ struct dentry *new_dentry, unsigned int flags)
LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode)
LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid)
LSM_HOOK(int, 0, path_chroot, const struct path *path)
@@ -110,11 +114,15 @@ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
unsigned int obj_type)
LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
-LSM_HOOK(int, 0, inode_init_security, struct inode *inode,
- struct inode *dir, const struct qstr *qstr, const char **name,
- void **value, size_t *len)
+LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode,
+ struct inode *dir, const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count)
+LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode,
+ const struct qstr *name, const struct inode *context_inode)
LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry,
umode_t mode)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_create_tmpfile, struct mnt_idmap *idmap,
+ struct inode *inode)
LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry)
@@ -131,19 +139,37 @@ LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry)
LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode,
bool rcu)
LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask)
-LSM_HOOK(int, 0, inode_setattr, struct dentry *dentry, struct iattr *attr)
+LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, int ia_valid)
LSM_HOOK(int, 0, inode_getattr, const struct path *path)
-LSM_HOOK(int, 0, inode_setxattr, struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name)
LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry)
-LSM_HOOK(int, 0, inode_removexattr, struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_removexattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_removexattr, struct dentry *dentry,
+ const char *name)
+LSM_HOOK(int, 0, inode_set_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name, struct posix_acl *kacl)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_set_acl, struct dentry *dentry,
+ const char *acl_name, struct posix_acl *kacl)
+LSM_HOOK(int, 0, inode_get_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(int, 0, inode_remove_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_remove_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry)
-LSM_HOOK(int, 0, inode_killpriv, struct dentry *dentry)
-LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct inode *inode,
- const char *name, void **buffer, bool alloc)
+LSM_HOOK(int, 0, inode_killpriv, struct mnt_idmap *idmap,
+ struct dentry *dentry)
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct mnt_idmap *idmap,
+ struct inode *inode, const char *name, void **buffer, bool alloc)
LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode,
const char *name, const void *value, size_t size, int flags)
LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
@@ -155,9 +181,12 @@ LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
struct kernfs_node *kn)
LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+LSM_HOOK(void, LSM_RET_VOID, file_release, struct file *file)
LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
unsigned long arg)
+LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
+ unsigned long arg)
LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
@@ -171,6 +200,8 @@ LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk,
struct fown_struct *fown, int sig)
LSM_HOOK(int, 0, file_receive, struct file *file)
LSM_HOOK(int, 0, file_open, struct file *file)
+LSM_HOOK(int, 0, file_post_open, struct file *file, int mask)
+LSM_HOOK(int, 0, file_truncate, struct file *file)
LSM_HOOK(int, 0, task_alloc, struct task_struct *task,
unsigned long clone_flags)
LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task)
@@ -184,19 +215,24 @@ LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid)
LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid)
LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode)
LSM_HOOK(int, 0, kernel_module_request, char *kmod_name)
-LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id)
+LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id, bool contents)
+LSM_HOOK(int, 0, kernel_post_load_data, char *buf, loff_t size,
+ enum kernel_load_data_id id, char *description)
LSM_HOOK(int, 0, kernel_read_file, struct file *file,
- enum kernel_read_file_id id)
+ enum kernel_read_file_id id, bool contents)
LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf,
loff_t size, enum kernel_read_file_id id)
LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old,
int flags)
LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old,
int flags)
+LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old)
LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
-LSM_HOOK(void, LSM_RET_VOID, task_getsecid, struct task_struct *p, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, current_getsecid_subj, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, task_getsecid_obj,
+ struct task_struct *p, u32 *secid)
LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice)
LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio)
LSM_HOOK(int, 0, task_getioprio, struct task_struct *p)
@@ -213,6 +249,7 @@ LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5)
LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p,
struct inode *inode)
+LSM_HOOK(int, 0, userns_create, const struct cred *cred)
LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag)
LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp,
u32 *secid)
@@ -242,7 +279,11 @@ LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
struct inode *inode)
-LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name,
+LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr,
+ struct lsm_ctx __user *ctx, u32 *size, u32 flags)
+LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr,
+ struct lsm_ctx *ctx, u32 size, u32 flags)
+LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
char **value)
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
LSM_HOOK(int, 0, ismaclabel, const char *name)
@@ -253,7 +294,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
u32 *ctxlen)
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
@@ -289,17 +330,17 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
-LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
- char __user *optval, int __user *optlen, unsigned len)
-LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock,
+ sockptr_t optval, sockptr_t optlen, unsigned int len)
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock,
struct sk_buff *skb, u32 *secid)
LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk,
struct sock *newsk)
-LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, const struct sock *sk, u32 *secid)
LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent)
-LSM_HOOK(int, 0, inet_conn_request, struct sock *sk, struct sk_buff *skb,
+LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk,
const struct request_sock *req)
@@ -309,19 +350,22 @@ LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid)
LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
- struct flowi *fl)
+ struct flowi_common *flic)
LSM_HOOK(int, 0, tun_dev_alloc_security, void **security)
LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security)
LSM_HOOK(int, 0, tun_dev_create, void)
LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
LSM_HOOK(int, 0, tun_dev_open, void *security)
-LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_endpoint *ep,
+LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc,
struct sk_buff *skb)
LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname,
struct sockaddr *address, int addrlen)
-LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_endpoint *ep,
+LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc,
struct sock *sk, struct sock *newsk)
+LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk)
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
@@ -346,10 +390,9 @@ LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x)
LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x)
-LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid,
- u8 dir)
+LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid)
LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x,
- struct xfrm_policy *xp, const struct flowi *fl)
+ struct xfrm_policy *xp, const struct flowi_common *flic)
LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
int ckall)
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
@@ -361,7 +404,10 @@ LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
enum key_need_perm need_perm)
-LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
+LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer)
+LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring,
+ struct key *key, const void *payload, size_t payload_len,
+ unsigned long flags, bool create)
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
@@ -376,10 +422,17 @@ LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
-LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map)
-LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map)
-LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux)
-LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux)
+LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
+LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map)
+LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
+LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
+LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
+LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
+LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
+LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
#endif /* CONFIG_BPF_SYSCALL */
LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
@@ -391,3 +444,9 @@ LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
#endif /* CONFIG_PERF_EVENTS */
+
+#ifdef CONFIG_IO_URING
+LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
+LSM_HOOK(int, 0, uring_sqpoll, void)
+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
+#endif /* CONFIG_IO_URING */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 9e2e3e63719d..a2ade0ffe9e7 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -25,1501 +25,12 @@
#ifndef __LINUX_LSM_HOOKS_H
#define __LINUX_LSM_HOOKS_H
+#include <uapi/linux/lsm.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/rculist.h>
+#include <linux/xattr.h>
-/**
- * union security_list_options - Linux Security Module hook function list
- *
- * Security hooks for program execution operations.
- *
- * @bprm_creds_for_exec:
- * If the setup in prepare_exec_creds did not setup @bprm->cred->security
- * properly for executing @bprm->file, update the LSM's portion of
- * @bprm->cred->security to be what commit_creds needs to install for the
- * new program. This hook may also optionally check permissions
- * (e.g. for transitions between security domains).
- * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
- * request libc enable secure mode.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_creds_from_file:
- * If @file is setpcap, suid, sgid or otherwise marked to change
- * privilege upon exec, update @bprm->cred to reflect that change.
- * This is called after finding the binary that will be executed.
- * without an interpreter. This ensures that the credentials will not
- * be derived from a script that the binary will need to reopen, which
- * when reopend may end up being a completely different file. This
- * hook may also optionally check permissions (e.g. for transitions
- * between security domains).
- * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
- * request libc enable secure mode.
- * The hook must add to @bprm->per_clear any personality flags that
- * should be cleared from current->personality.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_check_security:
- * This hook mediates the point when a search for a binary handler will
- * begin. It allows a check against the @bprm->cred->security value
- * which was set in the preceding creds_for_exec call. The argv list and
- * envp list are reliably available in @bprm. This hook may be called
- * multiple times during a single execve.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_committing_creds:
- * Prepare to install the new security attributes of a process being
- * transformed by an execve operation, based on the old credentials
- * pointed to by @current->cred and the information set in @bprm->cred by
- * the bprm_creds_for_exec hook. @bprm points to the linux_binprm
- * structure. This hook is a good place to perform state changes on the
- * process such as closing open file descriptors to which access will no
- * longer be granted when the attributes are changed. This is called
- * immediately before commit_creds().
- * @bprm_committed_creds:
- * Tidy up after the installation of the new security attributes of a
- * process being transformed by an execve operation. The new credentials
- * have, by this point, been set to @current->cred. @bprm points to the
- * linux_binprm structure. This hook is a good place to perform state
- * changes on the process such as clearing out non-inheritable signal
- * state. This is called immediately after commit_creds().
- *
- * Security hooks for mount using fs_context.
- * [See also Documentation/filesystems/mount_api.rst]
- *
- * @fs_context_dup:
- * Allocate and attach a security structure to sc->security. This pointer
- * is initialised to NULL by the caller.
- * @fc indicates the new filesystem context.
- * @src_fc indicates the original filesystem context.
- * @fs_context_parse_param:
- * Userspace provided a parameter to configure a superblock. The LSM may
- * reject it with an error and may use it for itself, in which case it
- * should return 0; otherwise it should return -ENOPARAM to pass it on to
- * the filesystem.
- * @fc indicates the filesystem context.
- * @param The parameter
- *
- * Security hooks for filesystem operations.
- *
- * @sb_alloc_security:
- * Allocate and attach a security structure to the sb->s_security field.
- * The s_security field is initialized to NULL when the structure is
- * allocated.
- * @sb contains the super_block structure to be modified.
- * Return 0 if operation was successful.
- * @sb_free_security:
- * Deallocate and clear the sb->s_security field.
- * @sb contains the super_block structure to be modified.
- * @sb_free_mnt_opts:
- * Free memory associated with @mnt_ops.
- * @sb_eat_lsm_opts:
- * Eat (scan @orig options) and save them in @mnt_opts.
- * @sb_statfs:
- * Check permission before obtaining filesystem statistics for the @mnt
- * mountpoint.
- * @dentry is a handle on the superblock for the filesystem.
- * Return 0 if permission is granted.
- * @sb_mount:
- * Check permission before an object specified by @dev_name is mounted on
- * the mount point named by @nd. For an ordinary mount, @dev_name
- * identifies a device if the file system type requires a device. For a
- * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
- * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
- * pathname of the object being mounted.
- * @dev_name contains the name for object being mounted.
- * @path contains the path for mount point object.
- * @type contains the filesystem type.
- * @flags contains the mount flags.
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_copy_data:
- * Allow mount option data to be copied prior to parsing by the filesystem,
- * so that the security module can extract security-specific mount
- * options cleanly (a filesystem may modify the data e.g. with strsep()).
- * This also allows the original mount data to be stripped of security-
- * specific options to avoid having to make filesystems aware of them.
- * @orig the original mount data copied from userspace.
- * @copy copied data which will be passed to the security module.
- * Returns 0 if the copy was successful.
- * @sb_remount:
- * Extracts security system specific mount options and verifies no changes
- * are being made to those options.
- * @sb superblock being remounted
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_kern_mount:
- * Mount this @sb if allowed by permissions.
- * @sb_show_options:
- * Show (print on @m) mount options for this @sb.
- * @sb_umount:
- * Check permission before the @mnt file system is unmounted.
- * @mnt contains the mounted file system.
- * @flags contains the unmount flags, e.g. MNT_FORCE.
- * Return 0 if permission is granted.
- * @sb_pivotroot:
- * Check permission before pivoting the root filesystem.
- * @old_path contains the path for the new location of the
- * current root (put_old).
- * @new_path contains the path for the new root (new_root).
- * Return 0 if permission is granted.
- * @sb_set_mnt_opts:
- * Set the security relevant mount options used for a superblock
- * @sb the superblock to set security mount options for
- * @opts binary data structure containing all lsm mount data
- * @sb_clone_mnt_opts:
- * Copy all security options from a given superblock to another
- * @oldsb old superblock which contain information to clone
- * @newsb new superblock which needs filled in
- * @sb_add_mnt_opt:
- * Add one mount @option to @mnt_opts.
- * @sb_parse_opts_str:
- * Parse a string of security data filling in the opts structure
- * @options string containing all mount options known by the LSM
- * @opts binary data structure usable by the LSM
- * @move_mount:
- * Check permission before a mount is moved.
- * @from_path indicates the mount that is going to be moved.
- * @to_path indicates the mountpoint that will be mounted upon.
- * @dentry_init_security:
- * Compute a context for a dentry as the inode is not yet available
- * since NFSv4 has no label backed by an EA anyway.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @ctx pointer to place the pointer to the resulting context in.
- * @ctxlen point to place the length of the resulting context.
- * @dentry_create_files_as:
- * Compute a context for a dentry as the inode is not yet available
- * and set that context in passed in creds so that new files are
- * created using that context. Context is calculated using the
- * passed in creds and not the creds of the caller.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @old creds which should be used for context calculation
- * @new creds to modify
- *
- *
- * Security hooks for inode operations.
- *
- * @inode_alloc_security:
- * Allocate and attach a security structure to @inode->i_security. The
- * i_security field is initialized to NULL when the inode structure is
- * allocated.
- * @inode contains the inode structure.
- * Return 0 if operation was successful.
- * @inode_free_security:
- * @inode contains the inode structure.
- * Deallocate the inode security structure and set @inode->i_security to
- * NULL.
- * @inode_init_security:
- * Obtain the security attribute name suffix and value to set on a newly
- * created inode and set up the incore security field for the new inode.
- * This hook is called by the fs code as part of the inode creation
- * transaction and provides for atomic labeling of the inode, unlike
- * the post_create/mkdir/... hooks called by the VFS. The hook function
- * is expected to allocate the name and value via kmalloc, with the caller
- * being responsible for calling kfree after using them.
- * If the security module does not use security attributes or does
- * not wish to put a security attribute on this particular inode,
- * then it should return -EOPNOTSUPP to skip this processing.
- * @inode contains the inode structure of the newly created inode.
- * @dir contains the inode structure of the parent directory.
- * @qstr contains the last path component of the new object
- * @name will be set to the allocated name suffix (e.g. selinux).
- * @value will be set to the allocated attribute value.
- * @len will be set to the length of the value.
- * Returns 0 if @name and @value have been successfully set,
- * -EOPNOTSUPP if no security attribute is needed, or
- * -ENOMEM on memory allocation failure.
- * @inode_create:
- * Check permission to create a regular file.
- * @dir contains inode structure of the parent of the new file.
- * @dentry contains the dentry structure for the file to be created.
- * @mode contains the file mode of the file to be created.
- * Return 0 if permission is granted.
- * @inode_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing
- * link to the file.
- * @dir contains the inode structure of the parent directory
- * of the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @path_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing link
- * to the file.
- * @new_dir contains the path structure of the parent directory of
- * the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @inode_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the inode structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @path_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the path structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @inode_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the inode structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @path_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the path structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @inode_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with inode structure @dir.
- * @dir contains the inode structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @path_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with path structure @path.
- * @dir contains the path structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @inode_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the inode structure of parent of the directory
- * to be removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @path_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the path structure of parent of the directory to be
- * removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @inode_mknod:
- * Check permissions when creating a special file (or a socket or a fifo
- * file created via the mknod system call). Note that if mknod operation
- * is being done for a regular file, then the create hook will be called
- * and not this hook.
- * @dir contains the inode structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the device number.
- * Return 0 if permission is granted.
- * @path_mknod:
- * Check permissions when creating a file. Note that this hook is called
- * even if mknod operation is being done for a regular file.
- * @dir contains the path structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the undecoded device number. Use new_decode_dev() to get
- * the decoded device number.
- * Return 0 if permission is granted.
- * @inode_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the inode structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the inode structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the path structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the path structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_chmod:
- * Check for permission to change a mode of the file @path. The new
- * mode is specified in @mode.
- * @path contains the path structure of the file to change the mode.
- * @mode contains the new DAC's permission, which is a bitmask of
- * constants from <include/uapi/linux/stat.h>
- * Return 0 if permission is granted.
- * @path_chown:
- * Check for permission to change owner/group of a file or directory.
- * @path contains the path structure.
- * @uid contains new owner's ID.
- * @gid contains new group's ID.
- * Return 0 if permission is granted.
- * @path_chroot:
- * Check for permission to change root directory.
- * @path contains the path structure.
- * Return 0 if permission is granted.
- * @path_notify:
- * Check permissions before setting a watch on events as defined by @mask,
- * on an object at @path, whose type is defined by @obj_type.
- * @inode_readlink:
- * Check the permission to read the symbolic link.
- * @dentry contains the dentry structure for the file link.
- * Return 0 if permission is granted.
- * @inode_follow_link:
- * Check permission to follow a symbolic link when looking up a pathname.
- * @dentry contains the dentry structure for the link.
- * @inode contains the inode, which itself is not stable in RCU-walk
- * @rcu indicates whether we are in RCU-walk mode.
- * Return 0 if permission is granted.
- * @inode_permission:
- * Check permission before accessing an inode. This hook is called by the
- * existing Linux permission function, so a security module can use it to
- * provide additional checking for existing Linux permission checks.
- * Notice that this hook is called when a file is opened (as well as many
- * other operations), whereas the file_security_ops permission hook is
- * called when the actual read/write operations are performed.
- * @inode contains the inode structure to check.
- * @mask contains the permission mask.
- * Return 0 if permission is granted.
- * @inode_setattr:
- * Check permission before setting file attributes. Note that the kernel
- * call to notify_change is performed from several locations, whenever
- * file attributes change (such as when a file is truncated, chown/chmod
- * operations, transferring disk quotas, etc).
- * @dentry contains the dentry structure for the file.
- * @attr is the iattr structure containing the new file attributes.
- * Return 0 if permission is granted.
- * @path_truncate:
- * Check permission before truncating a file.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_getattr:
- * Check permission before obtaining file attributes.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_setxattr:
- * Check permission before setting the extended attributes
- * @value identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_post_setxattr:
- * Update inode security field after successful setxattr operation.
- * @value identified by @name for @dentry.
- * @inode_getxattr:
- * Check permission before obtaining the extended attributes
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_listxattr:
- * Check permission before obtaining the list of extended attribute
- * names for @dentry.
- * Return 0 if permission is granted.
- * @inode_removexattr:
- * Check permission before removing the extended attribute
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_getsecurity:
- * Retrieve a copy of the extended attribute representation of the
- * security label associated with @name for @inode via @buffer. Note that
- * @name is the remainder of the attribute name after the security prefix
- * has been removed. @alloc is used to specify of the call should return a
- * value via the buffer or just the value length Return size of buffer on
- * success.
- * @inode_setsecurity:
- * Set the security label associated with @name for @inode from the
- * extended attribute value @value. @size indicates the size of the
- * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
- * Note that @name is the remainder of the attribute name after the
- * security. prefix has been removed.
- * Return 0 on success.
- * @inode_listsecurity:
- * Copy the extended attribute names for the security labels
- * associated with @inode into @buffer. The maximum size of @buffer
- * is specified by @buffer_size. @buffer may be NULL to request
- * the size of the buffer required.
- * Returns number of bytes used/required on success.
- * @inode_need_killpriv:
- * Called when an inode has been changed.
- * @dentry is the dentry being changed.
- * Return <0 on error to abort the inode change operation.
- * Return 0 if inode_killpriv does not need to be called.
- * Return >0 if inode_killpriv does need to be called.
- * @inode_killpriv:
- * The setuid bit is being removed. Remove similar security labels.
- * Called with the dentry->d_inode->i_mutex held.
- * @dentry is the dentry being changed.
- * Return 0 on success. If error is returned, then the operation
- * causing setuid bit removal is failed.
- * @inode_getsecid:
- * Get the secid associated with the node.
- * @inode contains a pointer to the inode.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- * @inode_copy_up:
- * A file is about to be copied up from lower layer to upper layer of
- * overlay filesystem. Security module can prepare a set of new creds
- * and modify as need be and return new creds. Caller will switch to
- * new creds temporarily to create new file and release newly allocated
- * creds.
- * @src indicates the union dentry of file that is being copied up.
- * @new pointer to pointer to return newly allocated creds.
- * Returns 0 on success or a negative error code on error.
- * @inode_copy_up_xattr:
- * Filter the xattrs being copied up when a unioned file is copied
- * up from a lower layer to the union/overlay layer.
- * @name indicates the name of the xattr.
- * Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP if
- * security module does not know about attribute or a negative error code
- * to abort the copy up. Note that the caller is responsible for reading
- * and writing the xattrs as this hook is merely a filter.
- * @d_instantiate:
- * Fill in @inode security information for a @dentry if allowed.
- * @getprocattr:
- * Read attribute @name for process @p and store it into @value if allowed.
- * @setprocattr:
- * Write (set) attribute @name to @value, size @size if allowed.
- *
- * Security hooks for kernfs node operations
- *
- * @kernfs_init_security:
- * Initialize the security context of a newly created kernfs node based
- * on its own and its parent's attributes.
- *
- * @kn_dir the parent kernfs node
- * @kn the new child kernfs node
- *
- * Security hooks for file operations
- *
- * @file_permission:
- * Check file permissions before accessing an open file. This hook is
- * called by various operations that read or write files. A security
- * module can use this hook to perform additional checking on these
- * operations, e.g. to revalidate permissions on use to support privilege
- * bracketing or policy changes. Notice that this hook is used when the
- * actual read/write operations are performed, whereas the
- * inode_security_ops hook is called when a file is opened (as well as
- * many other operations).
- * Caveat: Although this hook can be used to revalidate permissions for
- * various system call operations that read or write files, it does not
- * address the revalidation of permissions for memory-mapped files.
- * Security modules must handle this separately if they need such
- * revalidation.
- * @file contains the file structure being accessed.
- * @mask contains the requested permissions.
- * Return 0 if permission is granted.
- * @file_alloc_security:
- * Allocate and attach a security structure to the file->f_security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @file contains the file structure to secure.
- * Return 0 if the hook is successful and permission is granted.
- * @file_free_security:
- * Deallocate and free any security structures stored in file->f_security.
- * @file contains the file structure being modified.
- * @file_ioctl:
- * @file contains the file structure.
- * @cmd contains the operation to perform.
- * @arg contains the operational arguments.
- * Check permission for an ioctl operation on @file. Note that @arg
- * sometimes represents a user space pointer; in other cases, it may be a
- * simple integer value. When @arg represents a user space pointer, it
- * should never be used by the security module.
- * Return 0 if permission is granted.
- * @mmap_addr :
- * Check permissions for a mmap operation at @addr.
- * @addr contains virtual address that will be used for the operation.
- * Return 0 if permission is granted.
- * @mmap_file :
- * Check permissions for a mmap operation. The @file may be NULL, e.g.
- * if mapping anonymous memory.
- * @file contains the file structure for file to map (may be NULL).
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @file_mprotect:
- * Check permissions before changing memory access permissions.
- * @vma contains the memory region to modify.
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * Return 0 if permission is granted.
- * @file_lock:
- * Check permission before performing file locking operations.
- * Note the hook mediates both flock and fcntl style locks.
- * @file contains the file structure.
- * @cmd contains the posix-translated lock operation to perform
- * (e.g. F_RDLCK, F_WRLCK).
- * Return 0 if permission is granted.
- * @file_fcntl:
- * Check permission before allowing the file operation specified by @cmd
- * from being performed on the file @file. Note that @arg sometimes
- * represents a user space pointer; in other cases, it may be a simple
- * integer value. When @arg represents a user space pointer, it should
- * never be used by the security module.
- * @file contains the file structure.
- * @cmd contains the operation to be performed.
- * @arg contains the operational arguments.
- * Return 0 if permission is granted.
- * @file_set_fowner:
- * Save owner security information (typically from current->security) in
- * file->f_security for later use by the send_sigiotask hook.
- * @file contains the file structure to update.
- * Return 0 on success.
- * @file_send_sigiotask:
- * Check permission for the file owner @fown to send SIGIO or SIGURG to the
- * process @tsk. Note that this hook is sometimes called from interrupt.
- * Note that the fown_struct, @fown, is never outside the context of a
- * struct file, so the file structure (and associated security information)
- * can always be obtained: container_of(fown, struct file, f_owner)
- * @tsk contains the structure of task receiving signal.
- * @fown contains the file owner information.
- * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
- * Return 0 if permission is granted.
- * @file_receive:
- * This hook allows security modules to control the ability of a process
- * to receive an open file descriptor via socket IPC.
- * @file contains the file structure being received.
- * Return 0 if permission is granted.
- * @file_open:
- * Save open-time permission checking state for later use upon
- * file_permission, and recheck access if anything has changed
- * since inode_permission.
- *
- * Security hooks for task operations.
- *
- * @task_alloc:
- * @task task being allocated.
- * @clone_flags contains the flags indicating what should be shared.
- * Handle allocation of task-related resources.
- * Returns a zero on success, negative values on failure.
- * @task_free:
- * @task task about to be freed.
- * Handle release of task-related resources. (Note that this can be called
- * from interrupt context.)
- * @cred_alloc_blank:
- * @cred points to the credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Only allocate sufficient memory and attach to @cred such that
- * cred_transfer() will not get ENOMEM.
- * @cred_free:
- * @cred points to the credentials.
- * Deallocate and clear the cred->security field in a set of credentials.
- * @cred_prepare:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Prepare a new set of credentials by copying the data from the old set.
- * @cred_transfer:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * Transfer data from original creds to new creds
- * @cred_getsecid:
- * Retrieve the security identifier of the cred structure @c
- * @c contains the credentials, secid will be placed into @secid.
- * In case of failure, @secid will be set to zero.
- * @kernel_act_as:
- * Set the credentials for a kernel service to act as (subjective context).
- * @new points to the credentials to be modified.
- * @secid specifies the security ID to be set
- * The current task must be the one that nominated @secid.
- * Return 0 if successful.
- * @kernel_create_files_as:
- * Set the file creation context in a set of credentials to be the same as
- * the objective context of the specified inode.
- * @new points to the credentials to be modified.
- * @inode points to the inode to use as a reference.
- * The current task must be the one that nominated @inode.
- * Return 0 if successful.
- * @kernel_module_request:
- * Ability to trigger the kernel to automatically upcall to userspace for
- * userspace to load a kernel module with the given name.
- * @kmod_name name of the module requested by the kernel
- * Return 0 if successful.
- * @kernel_load_data:
- * Load data provided by userspace.
- * @id kernel load data identifier
- * Return 0 if permission is granted.
- * @kernel_read_file:
- * Read a file specified by userspace.
- * @file contains the file structure pointing to the file being read
- * by the kernel.
- * @id kernel read file identifier
- * Return 0 if permission is granted.
- * @kernel_post_read_file:
- * Read a file specified by userspace.
- * @file contains the file structure pointing to the file being read
- * by the kernel.
- * @buf pointer to buffer containing the file contents.
- * @size length of the file contents.
- * @id kernel read file identifier
- * Return 0 if permission is granted.
- * @task_fix_setuid:
- * Update the module's state after setting one or more of the user
- * identity attributes of the current process. The @flags parameter
- * indicates which of the set*uid system calls invoked this hook. If
- * @new is the set of credentials that will be installed. Modifications
- * should be made to this rather than to @current->cred.
- * @old is the set of credentials that are being replaces
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 on success.
- * @task_fix_setgid:
- * Update the module's state after setting one or more of the group
- * identity attributes of the current process. The @flags parameter
- * indicates which of the set*gid system calls invoked this hook.
- * @new is the set of credentials that will be installed. Modifications
- * should be made to this rather than to @current->cred.
- * @old is the set of credentials that are being replaced.
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 on success.
- * @task_setpgid:
- * Check permission before setting the process group identifier of the
- * process @p to @pgid.
- * @p contains the task_struct for process being modified.
- * @pgid contains the new pgid.
- * Return 0 if permission is granted.
- * @task_getpgid:
- * Check permission before getting the process group identifier of the
- * process @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsid:
- * Check permission before getting the session identifier of the process
- * @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsecid:
- * Retrieve the security identifier of the process @p.
- * @p contains the task_struct for the process and place is into @secid.
- * In case of failure, @secid will be set to zero.
- *
- * @task_setnice:
- * Check permission before setting the nice value of @p to @nice.
- * @p contains the task_struct of process.
- * @nice contains the new nice value.
- * Return 0 if permission is granted.
- * @task_setioprio:
- * Check permission before setting the ioprio value of @p to @ioprio.
- * @p contains the task_struct of process.
- * @ioprio contains the new ioprio value
- * Return 0 if permission is granted.
- * @task_getioprio:
- * Check permission before getting the ioprio value of @p.
- * @p contains the task_struct of process.
- * Return 0 if permission is granted.
- * @task_prlimit:
- * Check permission before getting and/or setting the resource limits of
- * another task.
- * @cred points to the cred structure for the current task.
- * @tcred points to the cred structure for the target task.
- * @flags contains the LSM_PRLIMIT_* flag bits indicating whether the
- * resource limits are being read, modified, or both.
- * Return 0 if permission is granted.
- * @task_setrlimit:
- * Check permission before setting the resource limits of process @p
- * for @resource to @new_rlim. The old resource limit values can
- * be examined by dereferencing (p->signal->rlim + resource).
- * @p points to the task_struct for the target task's group leader.
- * @resource contains the resource whose limit is being set.
- * @new_rlim contains the new limits for @resource.
- * Return 0 if permission is granted.
- * @task_setscheduler:
- * Check permission before setting scheduling policy and/or parameters of
- * process @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_getscheduler:
- * Check permission before obtaining scheduling information for process
- * @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_movememory:
- * Check permission before moving memory owned by process @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_kill:
- * Check permission before sending signal @sig to @p. @info can be NULL,
- * the constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or
- * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
- * from the kernel and should typically be permitted.
- * SIGIO signals are handled separately by the send_sigiotask hook in
- * file_security_ops.
- * @p contains the task_struct for process.
- * @info contains the signal information.
- * @sig contains the signal value.
- * @cred contains the cred of the process where the signal originated, or
- * NULL if the current task is the originator.
- * Return 0 if permission is granted.
- * @task_prctl:
- * Check permission before performing a process control operation on the
- * current process.
- * @option contains the operation.
- * @arg2 contains a argument.
- * @arg3 contains a argument.
- * @arg4 contains a argument.
- * @arg5 contains a argument.
- * Return -ENOSYS if no-one wanted to handle this op, any other value to
- * cause prctl() to return immediately with that value.
- * @task_to_inode:
- * Set the security attributes for an inode based on an associated task's
- * security attributes, e.g. for /proc/pid inodes.
- * @p contains the task_struct for the task.
- * @inode contains the inode structure for the inode.
- *
- * Security hooks for Netlink messaging.
- *
- * @netlink_send:
- * Save security information for a netlink message so that permission
- * checking can be performed when the message is processed. The security
- * information can be saved using the eff_cap field of the
- * netlink_skb_parms structure. Also may be used to provide fine
- * grained control over message transmission.
- * @sk associated sock of task sending the message.
- * @skb contains the sk_buff structure for the netlink message.
- * Return 0 if the information was successfully saved and message
- * is allowed to be transmitted.
- *
- * Security hooks for Unix domain networking.
- *
- * @unix_stream_connect:
- * Check permissions before establishing a Unix domain stream connection
- * between @sock and @other.
- * @sock contains the sock structure.
- * @other contains the peer sock structure.
- * @newsk contains the new sock structure.
- * Return 0 if permission is granted.
- * @unix_may_send:
- * Check permissions before connecting or sending datagrams from @sock to
- * @other.
- * @sock contains the socket structure.
- * @other contains the peer socket structure.
- * Return 0 if permission is granted.
- *
- * The @unix_stream_connect and @unix_may_send hooks were necessary because
- * Linux provides an alternative to the conventional file name space for Unix
- * domain sockets. Whereas binding and connecting to sockets in the file name
- * space is mediated by the typical file permissions (and caught by the mknod
- * and permission hooks in inode_security_ops), binding and connecting to
- * sockets in the abstract name space is completely unmediated. Sufficient
- * control of Unix domain sockets in the abstract name space isn't possible
- * using only the socket layer hooks, since we need to know the actual target
- * socket, which is not looked up until we are inside the af_unix code.
- *
- * Security hooks for socket operations.
- *
- * @socket_create:
- * Check permissions prior to creating a new socket.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * Return 0 if permission is granted.
- * @socket_post_create:
- * This hook allows a module to update or allocate a per-socket security
- * structure. Note that the security field was not added directly to the
- * socket structure, but rather, the socket security information is stored
- * in the associated inode. Typically, the inode alloc_security hook will
- * allocate and attach security information to
- * SOCK_INODE(sock)->i_security. This hook may be used to update the
- * SOCK_INODE(sock)->i_security field with additional information that
- * wasn't available when the inode was allocated.
- * @sock contains the newly created socket structure.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * @socket_socketpair:
- * Check permissions before creating a fresh pair of sockets.
- * @socka contains the first socket structure.
- * @sockb contains the second socket structure.
- * Return 0 if permission is granted and the connection was established.
- * @socket_bind:
- * Check permission before socket protocol layer bind operation is
- * performed and the socket @sock is bound to the address specified in the
- * @address parameter.
- * @sock contains the socket structure.
- * @address contains the address to bind to.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_connect:
- * Check permission before socket protocol layer connect operation
- * attempts to connect socket @sock to a remote address, @address.
- * @sock contains the socket structure.
- * @address contains the address of remote endpoint.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_listen:
- * Check permission before socket protocol layer listen operation.
- * @sock contains the socket structure.
- * @backlog contains the maximum length for the pending connection queue.
- * Return 0 if permission is granted.
- * @socket_accept:
- * Check permission before accepting a new connection. Note that the new
- * socket, @newsock, has been created and some information copied to it,
- * but the accept operation has not actually been performed.
- * @sock contains the listening socket structure.
- * @newsock contains the newly created server socket for connection.
- * Return 0 if permission is granted.
- * @socket_sendmsg:
- * Check permission before transmitting a message to another socket.
- * @sock contains the socket structure.
- * @msg contains the message to be transmitted.
- * @size contains the size of message.
- * Return 0 if permission is granted.
- * @socket_recvmsg:
- * Check permission before receiving a message from a socket.
- * @sock contains the socket structure.
- * @msg contains the message structure.
- * @size contains the size of message structure.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @socket_getsockname:
- * Check permission before the local address (name) of the socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getpeername:
- * Check permission before the remote address (name) of a socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getsockopt:
- * Check permissions before retrieving the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to retrieve option from.
- * @optname contains the name of option to retrieve.
- * Return 0 if permission is granted.
- * @socket_setsockopt:
- * Check permissions before setting the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to set options for.
- * @optname contains the name of the option to set.
- * Return 0 if permission is granted.
- * @socket_shutdown:
- * Checks permission before all or part of a connection on the socket
- * @sock is shut down.
- * @sock contains the socket structure.
- * @how contains the flag indicating how future sends and receives
- * are handled.
- * Return 0 if permission is granted.
- * @socket_sock_rcv_skb:
- * Check permissions on incoming network packets. This hook is distinct
- * from Netfilter's IP input hooks since it is the first time that the
- * incoming sk_buff @skb has been associated with a particular socket, @sk.
- * Must not sleep inside this hook because some callers hold spinlocks.
- * @sk contains the sock (not socket) associated with the incoming sk_buff.
- * @skb contains the incoming network data.
- * @socket_getpeersec_stream:
- * This hook allows the security module to provide peer socket security
- * state for unix or connected tcp sockets to userspace via getsockopt
- * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
- * socket is associated with an ipsec SA.
- * @sock is the local socket.
- * @optval userspace memory where the security state is to be copied.
- * @optlen userspace int where the module should copy the actual length
- * of the security state.
- * @len as input is the maximum length to copy to userspace provided
- * by the caller.
- * Return 0 if all is well, otherwise, typical getsockopt return
- * values.
- * @socket_getpeersec_dgram:
- * This hook allows the security module to provide peer socket security
- * state for udp sockets on a per-packet basis to userspace via
- * getsockopt SO_GETPEERSEC. The application must first have indicated
- * the IP_PASSSEC option via getsockopt. It can then retrieve the
- * security state returned by this hook for a packet via the SCM_SECURITY
- * ancillary message type.
- * @sock contains the peer socket. May be NULL.
- * @skb is the sk_buff for the packet being queried. May be NULL.
- * @secid pointer to store the secid of the packet.
- * Return 0 on success, error on failure.
- * @sk_alloc_security:
- * Allocate and attach a security structure to the sk->sk_security field,
- * which is used to copy security attributes between local stream sockets.
- * @sk_free_security:
- * Deallocate security structure.
- * @sk_clone_security:
- * Clone/copy security structure.
- * @sk_getsecid:
- * Retrieve the LSM-specific secid for the sock to enable caching
- * of network authorizations.
- * @sock_graft:
- * Sets the socket's isec sid to the sock's sid.
- * @inet_conn_request:
- * Sets the openreq's sid to socket's sid with MLS portion taken
- * from peer sid.
- * @inet_csk_clone:
- * Sets the new child socket's sid to the openreq sid.
- * @inet_conn_established:
- * Sets the connection's peersid to the secmark on skb.
- * @secmark_relabel_packet:
- * check if the process should be allowed to relabel packets to
- * the given secid
- * @secmark_refcount_inc:
- * tells the LSM to increment the number of secmark labeling rules loaded
- * @secmark_refcount_dec:
- * tells the LSM to decrement the number of secmark labeling rules loaded
- * @req_classify_flow:
- * Sets the flow's sid to the openreq sid.
- * @tun_dev_alloc_security:
- * This hook allows a module to allocate a security structure for a TUN
- * device.
- * @security pointer to a security structure pointer.
- * Returns a zero on success, negative values on failure.
- * @tun_dev_free_security:
- * This hook allows a module to free the security structure for a TUN
- * device.
- * @security pointer to the TUN device's security structure
- * @tun_dev_create:
- * Check permissions prior to creating a new TUN device.
- * @tun_dev_attach_queue:
- * Check permissions prior to attaching to a TUN device queue.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_attach:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's sock structure.
- * @sk contains the existing sock structure.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_open:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's security structure.
- * @security pointer to the TUN devices's security structure.
- *
- * Security hooks for SCTP
- *
- * @sctp_assoc_request:
- * Passes the @ep and @chunk->skb of the association INIT packet to
- * the security module.
- * @ep pointer to sctp endpoint structure.
- * @skb pointer to skbuff of association packet.
- * Return 0 on success, error on failure.
- * @sctp_bind_connect:
- * Validiate permissions required for each address associated with sock
- * @sk. Depending on @optname, the addresses will be treated as either
- * for a connect or bind service. The @addrlen is calculated on each
- * ipv4 and ipv6 address using sizeof(struct sockaddr_in) or
- * sizeof(struct sockaddr_in6).
- * @sk pointer to sock structure.
- * @optname name of the option to validate.
- * @address list containing one or more ipv4/ipv6 addresses.
- * @addrlen total length of address(s).
- * Return 0 on success, error on failure.
- * @sctp_sk_clone:
- * Called whenever a new socket is created by accept(2) (i.e. a TCP
- * style socket) or when a socket is 'peeled off' e.g userspace
- * calls sctp_peeloff(3).
- * @ep pointer to current sctp endpoint structure.
- * @sk pointer to current sock structure.
- * @sk pointer to new sock structure.
- *
- * Security hooks for Infiniband
- *
- * @ib_pkey_access:
- * Check permission to access a pkey when modifing a QP.
- * @subnet_prefix the subnet prefix of the port being used.
- * @pkey the pkey to be accessed.
- * @sec pointer to a security structure.
- * @ib_endport_manage_subnet:
- * Check permissions to send and receive SMPs on a end port.
- * @dev_name the IB device name (i.e. mlx4_0).
- * @port_num the port number.
- * @sec pointer to a security structure.
- * @ib_alloc_security:
- * Allocate a security structure for Infiniband objects.
- * @sec pointer to a security structure pointer.
- * Returns 0 on success, non-zero on failure
- * @ib_free_security:
- * Deallocate an Infiniband security structure.
- * @sec contains the security structure to be freed.
- *
- * Security hooks for XFRM operations.
- *
- * @xfrm_policy_alloc_security:
- * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
- * Database used by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level policy update program (e.g., setkey).
- * Allocate a security structure to the xp->security field; the security
- * field is initialized to NULL when the xfrm_policy is allocated.
- * Return 0 if operation was successful (memory to allocate, legal context)
- * @gfp is to specify the context for the allocation
- * @xfrm_policy_clone_security:
- * @old_ctx contains an existing xfrm_sec_ctx.
- * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
- * Allocate a security structure in new_ctxp that contains the
- * information from the old_ctx structure.
- * Return 0 if operation was successful (memory to allocate).
- * @xfrm_policy_free_security:
- * @ctx contains the xfrm_sec_ctx
- * Deallocate xp->security.
- * @xfrm_policy_delete_security:
- * @ctx contains the xfrm_sec_ctx.
- * Authorize deletion of xp->security.
- * @xfrm_state_alloc:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level SA generation program (e.g., setkey or racoon).
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to sec_ctx. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_alloc_acquire:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @polsec contains the policy's security context.
- * @secid contains the secid from which to take the mls portion of the
- * context.
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to secid. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_free_security:
- * @x contains the xfrm_state.
- * Deallocate x->security.
- * @xfrm_state_delete_security:
- * @x contains the xfrm_state.
- * Authorize deletion of x->security.
- * @xfrm_policy_lookup:
- * @ctx contains the xfrm_sec_ctx for which the access control is being
- * checked.
- * @fl_secid contains the flow security label that is used to authorize
- * access to the policy xp.
- * @dir contains the direction of the flow (input or output).
- * Check permission when a flow selects a xfrm_policy for processing
- * XFRMs on a packet. The hook is called when selecting either a
- * per-socket policy or a generic xfrm policy.
- * Return 0 if permission is granted, -ESRCH otherwise, or -errno
- * on other errors.
- * @xfrm_state_pol_flow_match:
- * @x contains the state to match.
- * @xp contains the policy to check for a match.
- * @fl contains the flow to check for a match.
- * Return 1 if there is a match.
- * @xfrm_decode_session:
- * @skb points to skb to decode.
- * @secid points to the flow key secid to set.
- * @ckall says if all xfrms used should be checked for same secid.
- * Return 0 if ckall is zero or all xfrms used have the same secid.
- *
- * Security hooks affecting all Key Management operations
- *
- * @key_alloc:
- * Permit allocation of a key and assign security data. Note that key does
- * not have a serial number assigned at this point.
- * @key points to the key.
- * @flags is the allocation flags
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_free:
- * Notification of destruction; free security data.
- * @key points to the key.
- * No return value.
- * @key_permission:
- * See whether a specific operational right is granted to a process on a
- * key.
- * @key_ref refers to the key (key pointer + possession attribute bit).
- * @cred points to the credentials to provide the context against which to
- * evaluate the security data on the key.
- * @perm describes the combination of permissions required of this key.
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_getsecurity:
- * Get a textual representation of the security context attached to a key
- * for the purposes of honouring KEYCTL_GETSECURITY. This function
- * allocates the storage for the NUL-terminated string and the caller
- * should free it.
- * @key points to the key to be queried.
- * @_buffer points to a pointer that should be set to point to the
- * resulting string (if no label or an error occurs).
- * Return the length of the string (including terminating NUL) or -ve if
- * an error.
- * May also return 0 (and a NULL buffer pointer) if there is no label.
- *
- * Security hooks affecting all System V IPC operations.
- *
- * @ipc_permission:
- * Check permissions for access to IPC
- * @ipcp contains the kernel IPC permission structure
- * @flag contains the desired (requested) permission set
- * Return 0 if permission is granted.
- * @ipc_getsecid:
- * Get the secid associated with the ipc object.
- * @ipcp contains the kernel IPC permission structure.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- *
- * Security hooks for individual messages held in System V IPC message queues
- *
- * @msg_msg_alloc_security:
- * Allocate and attach a security structure to the msg->security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @msg contains the message structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_msg_free_security:
- * Deallocate the security structure for this message.
- * @msg contains the message structure to be modified.
- *
- * Security hooks for System V IPC Message Queues
- *
- * @msg_queue_alloc_security:
- * Allocate and attach a security structure to the
- * @perm->security field. The security field is initialized to
- * NULL when the structure is first created.
- * @perm contains the IPC permissions of the message queue.
- * Return 0 if operation was successful and permission is granted.
- * @msg_queue_free_security:
- * Deallocate security field @perm->security for the message queue.
- * @perm contains the IPC permissions of the message queue.
- * @msg_queue_associate:
- * Check permission when a message queue is requested through the
- * msgget system call. This hook is only called when returning the
- * message queue identifier for an existing message queue, not when a
- * new message queue is created.
- * @perm contains the IPC permissions of the message queue.
- * @msqflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgctl:
- * Check permission when a message control operation specified by @cmd
- * is to be performed on the message queue with permissions @perm.
- * The @perm may be NULL, e.g. for IPC_INFO or MSG_INFO.
- * @perm contains the IPC permissions of the msg queue. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @msg_queue_msgsnd:
- * Check permission before a message, @msg, is enqueued on the message
- * queue with permissions @perm.
- * @perm contains the IPC permissions of the message queue.
- * @msg contains the message to be enqueued.
- * @msqflg contains operational flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgrcv:
- * Check permission before a message, @msg, is removed from the message
- * queue. The @target task structure contains a pointer to the
- * process that will be receiving the message (not equal to the current
- * process when inline receives are being performed).
- * @perm contains the IPC permissions of the message queue.
- * @msg contains the message destination.
- * @target contains the task structure for recipient process.
- * @type contains the type of message requested.
- * @mode contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Shared Memory Segments
- *
- * @shm_alloc_security:
- * Allocate and attach a security structure to the @perm->security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @perm contains the IPC permissions of the shared memory structure.
- * Return 0 if operation was successful and permission is granted.
- * @shm_free_security:
- * Deallocate the security structure @perm->security for the memory segment.
- * @perm contains the IPC permissions of the shared memory structure.
- * @shm_associate:
- * Check permission when a shared memory region is requested through the
- * shmget system call. This hook is only called when returning the shared
- * memory region identifier for an existing region, not when a new shared
- * memory region is created.
- * @perm contains the IPC permissions of the shared memory structure.
- * @shmflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @shm_shmctl:
- * Check permission when a shared memory control operation specified by
- * @cmd is to be performed on the shared memory region with permissions @perm.
- * The @perm may be NULL, e.g. for IPC_INFO or SHM_INFO.
- * @perm contains the IPC permissions of the shared memory structure.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @shm_shmat:
- * Check permissions prior to allowing the shmat system call to attach the
- * shared memory segment with permissions @perm to the data segment of the
- * calling process. The attaching address is specified by @shmaddr.
- * @perm contains the IPC permissions of the shared memory structure.
- * @shmaddr contains the address to attach memory region to.
- * @shmflg contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Semaphores
- *
- * @sem_alloc_security:
- * Allocate and attach a security structure to the @perm->security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @perm contains the IPC permissions of the semaphore.
- * Return 0 if operation was successful and permission is granted.
- * @sem_free_security:
- * Deallocate security structure @perm->security for the semaphore.
- * @perm contains the IPC permissions of the semaphore.
- * @sem_associate:
- * Check permission when a semaphore is requested through the semget
- * system call. This hook is only called when returning the semaphore
- * identifier for an existing semaphore, not when a new one must be
- * created.
- * @perm contains the IPC permissions of the semaphore.
- * @semflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @sem_semctl:
- * Check permission when a semaphore operation specified by @cmd is to be
- * performed on the semaphore. The @perm may be NULL, e.g. for
- * IPC_INFO or SEM_INFO.
- * @perm contains the IPC permissions of the semaphore. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @sem_semop:
- * Check permissions before performing operations on members of the
- * semaphore set. If the @alter flag is nonzero, the semaphore set
- * may be modified.
- * @perm contains the IPC permissions of the semaphore.
- * @sops contains the operations to perform.
- * @nsops contains the number of operations to perform.
- * @alter contains the flag indicating whether changes are to be made.
- * Return 0 if permission is granted.
- *
- * @binder_set_context_mgr:
- * Check whether @mgr is allowed to be the binder context manager.
- * @mgr contains the task_struct for the task being registered.
- * Return 0 if permission is granted.
- * @binder_transaction:
- * Check whether @from is allowed to invoke a binder transaction call
- * to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_binder:
- * Check whether @from is allowed to transfer a binder reference to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_file:
- * Check whether @from is allowed to transfer @file to @to.
- * @from contains the task_struct for the sending task.
- * @file contains the struct file being transferred.
- * @to contains the task_struct for the receiving task.
- *
- * @ptrace_access_check:
- * Check permission before allowing the current process to trace the
- * @child process.
- * Security modules may also want to perform a process tracing check
- * during an execve in the set_security or apply_creds hooks of
- * tracing check during an execve in the bprm_set_creds hook of
- * binprm_security_ops if the process is being traced and its security
- * attributes would be changed by the execve.
- * @child contains the task_struct structure for the target process.
- * @mode contains the PTRACE_MODE flags indicating the form of access.
- * Return 0 if permission is granted.
- * @ptrace_traceme:
- * Check that the @parent process has sufficient permission to trace the
- * current process before allowing the current process to present itself
- * to the @parent process for tracing.
- * @parent contains the task_struct structure for debugger process.
- * Return 0 if permission is granted.
- * @capget:
- * Get the @effective, @inheritable, and @permitted capability sets for
- * the @target process. The hook may also perform permission checking to
- * determine if the current process is allowed to see the capability sets
- * of the @target process.
- * @target contains the task_struct structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 if the capability sets were successfully obtained.
- * @capset:
- * Set the @effective, @inheritable, and @permitted capability sets for
- * the current process.
- * @new contains the new credentials structure for target process.
- * @old contains the current credentials structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 and update @new if permission is granted.
- * @capable:
- * Check whether the @tsk process has the @cap capability in the indicated
- * credentials.
- * @cred contains the credentials to use.
- * @ns contains the user namespace we want the capability in
- * @cap contains the capability <include/linux/capability.h>.
- * @opts contains options for the capable check <include/linux/security.h>
- * Return 0 if the capability is granted for @tsk.
- * @quotactl:
- * Check whether the quotactl syscall is allowed for this @sb.
- * @quota_on:
- * Check whether QUOTAON is allowed for this @dentry.
- * @syslog:
- * Check permission before accessing the kernel message ring or changing
- * logging to the console.
- * See the syslog(2) manual page for an explanation of the @type values.
- * @type contains the SYSLOG_ACTION_* constant from <include/linux/syslog.h>
- * Return 0 if permission is granted.
- * @settime:
- * Check permission to change the system time.
- * struct timespec64 is defined in <include/linux/time64.h> and timezone
- * is defined in <include/linux/time.h>
- * @ts contains new time
- * @tz contains new timezone
- * Return 0 if permission is granted.
- * @vm_enough_memory:
- * Check permissions for allocating a new virtual mapping.
- * @mm contains the mm struct it is being added to.
- * @pages contains the number of pages.
- * Return 0 if permission is granted.
- *
- * @ismaclabel:
- * Check if the extended attribute specified by @name
- * represents a MAC label. Returns 1 if name is a MAC
- * attribute otherwise returns 0.
- * @name full extended attribute name to check against
- * LSM as a MAC label.
- *
- * @secid_to_secctx:
- * Convert secid to security context. If secdata is NULL the length of
- * the result will be returned in seclen, but no secdata will be returned.
- * This does mean that the length could change between calls to check the
- * length and the next call which actually allocates and returns the
- * secdata.
- * @secid contains the security ID.
- * @secdata contains the pointer that stores the converted security
- * context.
- * @seclen pointer which contains the length of the data
- * @secctx_to_secid:
- * Convert security context to secid.
- * @secid contains the pointer to the generated security ID.
- * @secdata contains the security context.
- *
- * @release_secctx:
- * Release the security context.
- * @secdata contains the security context.
- * @seclen contains the length of the security context.
- *
- * Security hooks for Audit
- *
- * @audit_rule_init:
- * Allocate and initialize an LSM audit rule structure.
- * @field contains the required Audit action.
- * Fields flags are defined in <include/linux/audit.h>
- * @op contains the operator the rule uses.
- * @rulestr contains the context where the rule will be applied to.
- * @lsmrule contains a pointer to receive the result.
- * Return 0 if @lsmrule has been successfully set,
- * -EINVAL in case of an invalid rule.
- *
- * @audit_rule_known:
- * Specifies whether given @krule contains any fields related to
- * current LSM.
- * @krule contains the audit rule of interest.
- * Return 1 in case of relation found, 0 otherwise.
- *
- * @audit_rule_match:
- * Determine if given @secid matches a rule previously approved
- * by @audit_rule_known.
- * @secid contains the security id in question.
- * @field contains the field which relates to current LSM.
- * @op contains the operator that will be used for matching.
- * @lrule points to the audit rule that will be checked against.
- * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
- *
- * @audit_rule_free:
- * Deallocate the LSM audit rule structure previously allocated by
- * audit_rule_init.
- * @lsmrule contains the allocated rule
- *
- * @inode_invalidate_secctx:
- * Notify the security module that it must revalidate the security context
- * of an inode.
- *
- * @inode_notifysecctx:
- * Notify the security module of what the security context of an inode
- * should be. Initializes the incore security context managed by the
- * security module for this inode. Example usage: NFS client invokes
- * this hook to initialize the security context in its incore inode to the
- * value provided by the server for the file when the server returned the
- * file's attributes to the client.
- * Must be called with inode->i_mutex locked.
- * @inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_setsecctx:
- * Change the security context of an inode. Updates the
- * incore security context managed by the security module and invokes the
- * fs code as needed (via __vfs_setxattr_noperm) to update any backing
- * xattrs that represent the context. Example usage: NFS server invokes
- * this hook to change the security context in its incore inode and on the
- * backing filesystem to a value provided by the client on a SETATTR
- * operation.
- * Must be called with inode->i_mutex locked.
- * @dentry contains the inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_getsecctx:
- * On success, returns 0 and fills out @ctx and @ctxlen with the security
- * context for the given @inode.
- * @inode we wish to get the security context of.
- * @ctx is a pointer in which to place the allocated security context.
- * @ctxlen points to the place to put the length of @ctx.
- *
- * Security hooks for the general notification queue:
- *
- * @post_notification:
- * Check to see if a watch notification can be posted to a particular
- * queue.
- * @w_cred: The credentials of the whoever set the watch.
- * @cred: The event-triggerer's credentials
- * @n: The notification being posted
- *
- * @watch_key:
- * Check to see if a process is allowed to watch for event notifications
- * from a key or keyring.
- * @key: The key to watch.
- *
- * Security hooks for using the eBPF maps and programs functionalities through
- * eBPF syscalls.
- *
- * @bpf:
- * Do a initial check for all bpf syscalls after the attribute is copied
- * into the kernel. The actual security module can implement their own
- * rules to check the specific cmd they need.
- *
- * @bpf_map:
- * Do a check when the kernel generate and return a file descriptor for
- * eBPF maps.
- *
- * @map: bpf map that we want to access
- * @mask: the access flags
- *
- * @bpf_prog:
- * Do a check when the kernel generate and return a file descriptor for
- * eBPF programs.
- *
- * @prog: bpf prog that userspace want to use.
- *
- * @bpf_map_alloc_security:
- * Initialize the security field inside bpf map.
- *
- * @bpf_map_free_security:
- * Clean up the security information stored inside bpf map.
- *
- * @bpf_prog_alloc_security:
- * Initialize the security field inside bpf program.
- *
- * @bpf_prog_free_security:
- * Clean up the security information stored inside bpf prog.
- *
- * @locked_down:
- * Determine whether a kernel feature that potentially enables arbitrary
- * code execution in kernel space should be permitted.
- *
- * @what: kernel feature being accessed
- *
- * Security hooks for perf events
- *
- * @perf_event_open:
- * Check whether the @type of perf_event_open syscall is allowed.
- * @perf_event_alloc:
- * Allocate and save perf_event security info.
- * @perf_event_free:
- * Release (free) perf_event security info.
- * @perf_event_read:
- * Read perf_event security info if allowed.
- * @perf_event_write:
- * Write perf_event security info if allowed.
- */
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
#include "lsm_hook_defs.h"
@@ -1532,6 +43,18 @@ struct security_hook_heads {
#undef LSM_HOOK
} __randomize_layout;
+/**
+ * struct lsm_id - Identify a Linux Security Module.
+ * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @id: LSM ID number from uapi/linux/lsm.h
+ *
+ * Contains the information that identifies the LSM.
+ */
+struct lsm_id {
+ const char *name;
+ u64 id;
+};
+
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
@@ -1540,7 +63,7 @@ struct security_hook_list {
struct hlist_node list;
struct hlist_head *head;
union security_list_options hook;
- char *lsm;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
@@ -1550,11 +73,31 @@ struct lsm_blob_sizes {
int lbs_cred;
int lbs_file;
int lbs_inode;
+ int lbs_superblock;
int lbs_ipc;
int lbs_msg_msg;
int lbs_task;
+ int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
};
+/**
+ * lsm_get_xattr_slot - Return the next available slot and increment the index
+ * @xattrs: array storing LSM-provided xattrs
+ * @xattr_count: number of already stored xattrs (updated)
+ *
+ * Retrieve the first available slot in the @xattrs array to fill with an xattr,
+ * and increment @xattr_count.
+ *
+ * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
+ */
+static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
+ int *xattr_count)
+{
+ if (unlikely(!xattrs))
+ return NULL;
+ return &xattrs[(*xattr_count)++];
+}
+
/*
* LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
* LSM hooks (in include/linux/lsm_hook_defs.h).
@@ -1574,7 +117,7 @@ extern struct security_hook_heads security_hook_heads;
extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
- char *lsm);
+ const struct lsm_id *lsmid);
#define LSM_FLAG_LEGACY_MAJOR BIT(0)
#define LSM_FLAG_EXCLUSIVE BIT(1)
@@ -1582,6 +125,7 @@ extern void security_add_hooks(struct security_hook_list *hooks, int count,
enum lsm_order {
LSM_ORDER_FIRST = -1, /* This is only for capabilities. */
LSM_ORDER_MUTABLE = 0,
+ LSM_ORDER_LAST = 1, /* This is only for integrity. */
};
struct lsm_info {
@@ -1598,44 +142,14 @@ extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
#define DEFINE_LSM(lsm) \
static struct lsm_info __lsm_##lsm \
- __used __section(.lsm_info.init) \
+ __used __section(".lsm_info.init") \
__aligned(sizeof(unsigned long))
#define DEFINE_EARLY_LSM(lsm) \
static struct lsm_info __early_lsm_##lsm \
- __used __section(.early_lsm_info.init) \
+ __used __section(".early_lsm_info.init") \
__aligned(sizeof(unsigned long))
-#ifdef CONFIG_SECURITY_SELINUX_DISABLE
-/*
- * Assuring the safety of deleting a security module is up to
- * the security module involved. This may entail ordering the
- * module's hook list in a particular way, refusing to disable
- * the module once a policy is loaded or any number of other
- * actions better imagined than described.
- *
- * The name of the configuration option reflects the only module
- * that currently uses the mechanism. Any developer who thinks
- * disabling their module is a good idea needs to be at least as
- * careful as the SELinux team.
- */
-static inline void security_delete_hooks(struct security_hook_list *hooks,
- int count)
-{
- int i;
-
- for (i = 0; i < count; i++)
- hlist_del_rcu(&hooks[i].list);
-}
-#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
-
-/* Currently required to handle SELinux runtime hook disable. */
-#ifdef CONFIG_SECURITY_WRITABLE_HOOKS
-#define __lsm_ro_after_init
-#else
-#define __lsm_ro_after_init __ro_after_init
-#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */
-
extern int lsm_inode_alloc(struct inode *inode);
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/lwq.h b/include/linux/lwq.h
new file mode 100644
index 000000000000..d081d5cf8e33
--- /dev/null
+++ b/include/linux/lwq.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LWQ_H
+#define LWQ_H
+/*
+ * Light-weight single-linked queue built from llist
+ *
+ * Entries can be enqueued from any context with no locking.
+ * Entries can be dequeued from process context with integrated locking.
+ *
+ * This is particularly suitable when work items are queued in
+ * BH or IRQ context, and where work items are handled one at a time
+ * by dedicated threads.
+ */
+#include <linux/container_of.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+
+struct lwq_node {
+ struct llist_node node;
+};
+
+struct lwq {
+ spinlock_t lock;
+ struct llist_node *ready; /* entries to be dequeued */
+ struct llist_head new; /* entries being enqueued */
+};
+
+/**
+ * lwq_init - initialise a lwq
+ * @q: the lwq object
+ */
+static inline void lwq_init(struct lwq *q)
+{
+ spin_lock_init(&q->lock);
+ q->ready = NULL;
+ init_llist_head(&q->new);
+}
+
+/**
+ * lwq_empty - test if lwq contains any entry
+ * @q: the lwq object
+ *
+ * This empty test contains an acquire barrier so that if a wakeup
+ * is sent when lwq_dequeue returns true, it is safe to go to sleep after
+ * a test on lwq_empty().
+ */
+static inline bool lwq_empty(struct lwq *q)
+{
+ /* acquire ensures ordering wrt lwq_enqueue() */
+ return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new);
+}
+
+struct llist_node *__lwq_dequeue(struct lwq *q);
+/**
+ * lwq_dequeue - dequeue first (oldest) entry from lwq
+ * @q: the queue to dequeue from
+ * @type: the type of object to return
+ * @member: them member in returned object which is an lwq_node.
+ *
+ * Remove a single object from the lwq and return it. This will take
+ * a spinlock and so must always be called in the same context, typcially
+ * process contet.
+ */
+#define lwq_dequeue(q, type, member) \
+ ({ struct llist_node *_n = __lwq_dequeue(q); \
+ _n ? container_of(_n, type, member.node) : NULL; })
+
+struct llist_node *lwq_dequeue_all(struct lwq *q);
+
+/**
+ * lwq_for_each_safe - iterate over detached queue allowing deletion
+ * @_n: iterator variable
+ * @_t1: temporary struct llist_node **
+ * @_t2: temporary struct llist_node *
+ * @_l: address of llist_node pointer from lwq_dequeue_all()
+ * @_member: member in _n where lwq_node is found.
+ *
+ * Iterate over members in a dequeued list. If the iterator variable
+ * is set to NULL, the iterator removes that entry from the queue.
+ */
+#define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \
+ for (_t1 = (_l); \
+ *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
+ _t2 = ((*_t1)->next), \
+ true) \
+ : false; \
+ (_n) ? (_t1 = &(_n)->_member.node.next, 0) \
+ : ((*(_t1) = (_t2)), 0))
+
+/**
+ * lwq_enqueue - add a new item to the end of the queue
+ * @n - the lwq_node embedded in the item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q)
+{
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add(&n->node, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+
+/**
+ * lwq_enqueue_batch - add a list of new items to the end of the queue
+ * @n - the lwq_node embedded in the first item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q)
+{
+ struct llist_node *e = n;
+
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add_batch(llist_reverse_order(n), e, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+#endif /* LWQ_H */
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index a7330eb3ec64..7aab4a769736 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -18,7 +18,6 @@
#ifndef mISDNIF_H
#define mISDNIF_H
-#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/socket.h>
@@ -587,7 +586,7 @@ extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *,
void *);
extern void mISDN_unregister_clock(struct mISDNclock *);
-static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
+static inline struct mISDNdevice *dev_to_mISDN(const struct device *dev)
{
if (dev)
return dev_get_drvdata(dev);
diff --git a/include/linux/mailbox/arm_mhuv2_message.h b/include/linux/mailbox/arm_mhuv2_message.h
new file mode 100644
index 000000000000..821b9d96daa4
--- /dev/null
+++ b/include/linux/mailbox/arm_mhuv2_message.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM MHUv2 Mailbox Message
+ *
+ * Copyright (C) 2020 Arm Ltd.
+ * Copyright (C) 2020 Linaro Ltd.
+ */
+
+#ifndef _LINUX_ARM_MHUV2_MESSAGE_H_
+#define _LINUX_ARM_MHUV2_MESSAGE_H_
+
+#include <linux/types.h>
+
+/* Data structure for data-transfer protocol */
+struct arm_mhuv2_mbox_msg {
+ void *data;
+ size_t len;
+};
+
+#endif /* _LINUX_ARM_MHUV2_MESSAGE_H_ */
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index 05eea1aef5aa..a8f0070c7aa9 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -28,8 +28,7 @@
* bit 16-27: update value
* bit 31: 1 - update, 0 - no update
*/
-#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
- CMDQ_WFE_WAIT_VALUE)
+#define CMDQ_WFE_OPTION (CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE)
/** cmdq event maximum */
#define CMDQ_MAX_EVENT 0x3ff
@@ -60,24 +59,15 @@ enum cmdq_code {
CMDQ_CODE_JUMP = 0x10,
CMDQ_CODE_WFE = 0x20,
CMDQ_CODE_EOC = 0x40,
+ CMDQ_CODE_READ_S = 0x80,
+ CMDQ_CODE_WRITE_S = 0x90,
+ CMDQ_CODE_WRITE_S_MASK = 0x91,
CMDQ_CODE_LOGIC = 0xa0,
};
-enum cmdq_cb_status {
- CMDQ_CB_NORMAL = 0,
- CMDQ_CB_ERROR
-};
-
struct cmdq_cb_data {
- enum cmdq_cb_status sta;
- void *data;
-};
-
-typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data);
-
-struct cmdq_task_cb {
- cmdq_async_flush_cb cb;
- void *data;
+ int sta;
+ struct cmdq_pkt *pkt;
};
struct cmdq_pkt {
@@ -85,8 +75,6 @@ struct cmdq_pkt {
dma_addr_t pa_base;
size_t cmd_buf_size; /* command occupied size */
size_t buf_size; /* real buffer size */
- struct cmdq_task_cb cb;
- struct cmdq_task_cb async_cb;
void *cl;
};
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
index 9542b41eacfd..31d8046d945e 100644
--- a/include/linux/mailbox/zynqmp-ipi-message.h
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -9,12 +9,12 @@
* @data: message payload
*
* This is the structure for data used in mbox_send_message
- * the maximum length of data buffer is fixed to 12 bytes.
+ * the maximum length of data buffer is fixed to 32 bytes.
* Client is supposed to be aware of this.
*/
struct zynqmp_ipi_message {
size_t len;
- u8 data[0];
+ u8 data[];
};
#endif /* _LINUX_ZYNQMP_IPI_MESSAGE_H_ */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 65229a45590f..734694912ef7 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -37,6 +37,7 @@ struct mbox_client {
void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
};
+int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name);
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 36d6ce673503..6fee33cb52f5 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -83,6 +83,7 @@ struct mbox_controller {
const struct of_phandle_args *sp);
/* Internal to API */
struct hrtimer poll_hrt;
+ spinlock_t poll_hrt_lock;
struct list_head node;
};
diff --git a/include/linux/map_benchmark.h b/include/linux/map_benchmark.h
new file mode 100644
index 000000000000..62674c83bde4
--- /dev/null
+++ b/include/linux/map_benchmark.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 HiSilicon Limited.
+ */
+
+#ifndef _KERNEL_DMA_BENCHMARK_H
+#define _KERNEL_DMA_BENCHMARK_H
+
+#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
+#define DMA_MAP_MAX_THREADS 1024
+#define DMA_MAP_MAX_SECONDS 300
+#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC)
+
+#define DMA_MAP_BIDIRECTIONAL 0
+#define DMA_MAP_TO_DEVICE 1
+#define DMA_MAP_FROM_DEVICE 2
+
+struct map_benchmark {
+ __u64 avg_map_100ns; /* average map latency in 100ns */
+ __u64 map_stddev; /* standard deviation of map latency */
+ __u64 avg_unmap_100ns; /* as above */
+ __u64 unmap_stddev;
+ __u32 threads; /* how many threads will do map/unmap in parallel */
+ __u32 seconds; /* how long the test will last */
+ __s32 node; /* which numa node this benchmark will run on */
+ __u32 dma_bits; /* DMA addressing capability */
+ __u32 dma_dir; /* DMA data direction */
+ __u32 dma_trans_ns; /* time for DMA transmission in ns */
+ __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
+};
+#endif /* _KERNEL_DMA_BENCHMARK_H */
diff --git a/include/linux/maple.h b/include/linux/maple.h
index 9b140272ee16..9aae44efcfd4 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -5,7 +5,6 @@
#include <mach/maple.h>
struct device;
-extern struct bus_type maple_bus_type;
/* Maple Bus command and response codes */
enum maple_code {
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
new file mode 100644
index 000000000000..a53ad4dabd7e
--- /dev/null
+++ b/include/linux/maple_tree.h
@@ -0,0 +1,864 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_MAPLE_TREE_H
+#define _LINUX_MAPLE_TREE_H
+/*
+ * Maple Tree - An RCU-safe adaptive tree for storing ranges
+ * Copyright (c) 2018-2022 Oracle
+ * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+/* #define CONFIG_MAPLE_RCU_DISABLED */
+
+/*
+ * Allocated nodes are mutable until they have been inserted into the tree,
+ * at which time they cannot change their type until they have been removed
+ * from the tree and an RCU grace period has passed.
+ *
+ * Removed nodes have their ->parent set to point to themselves. RCU readers
+ * check ->parent before relying on the value that they loaded from the
+ * slots array. This lets us reuse the slots array for the RCU head.
+ *
+ * Nodes in the tree point to their parent unless bit 0 is set.
+ */
+#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+/* 64bit sizes */
+#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
+#else
+/* 32bit sizes */
+#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
+#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
+
+#define MAPLE_NODE_MASK 255UL
+
+/*
+ * The node->parent of the root node has bit 0 set and the rest of the pointer
+ * is a pointer to the tree itself. No more bits are available in this pointer
+ * (on m68k, the data structure may only be 2-byte aligned).
+ *
+ * Internal non-root nodes can only have maple_range_* nodes as parents. The
+ * parent pointer is 256B aligned like all other tree nodes. When storing a 32
+ * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
+ * extra bit to store the offset. This extra bit comes from a reuse of the last
+ * bit in the node type. This is possible by using bit 1 to indicate if bit 2
+ * is part of the type or the slot.
+ *
+ * Once the type is decided, the decision of an allocation range type or a range
+ * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
+ * flag.
+ *
+ * Node types:
+ * 0x??1 = Root
+ * 0x?00 = 16 bit nodes
+ * 0x010 = 32 bit nodes
+ * 0x110 = 64 bit nodes
+ *
+ * Slot size and location in the parent pointer:
+ * type : slot location
+ * 0x??1 : Root
+ * 0x?00 : 16 bit values, type in 0-1, slot in 2-6
+ * 0x010 : 32 bit values, type in 0-2, slot in 3-6
+ * 0x110 : 64 bit values, type in 0-2, slot in 3-6
+ */
+
+/*
+ * This metadata is used to optimize the gap updating code and in reverse
+ * searching for gaps or any other code that needs to find the end of the data.
+ */
+struct maple_metadata {
+ unsigned char end;
+ unsigned char gap;
+};
+
+/*
+ * Leaf nodes do not store pointers to nodes, they store user data. Users may
+ * store almost any bit pattern. As noted above, the optimisation of storing an
+ * entry at 0 in the root pointer cannot be done for data which have the bottom
+ * two bits set to '10'. We also reserve values with the bottom two bits set to
+ * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
+ * return errnos as a negative errno shifted right by two bits and the bottom
+ * two bits set to '10', and while choosing to store these values in the array
+ * is not an error, it may lead to confusion if you're testing for an error with
+ * mas_is_err().
+ *
+ * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
+ * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges, Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ */
+
+struct maple_range_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
+ union {
+ void __rcu *slot[MAPLE_RANGE64_SLOTS];
+ struct {
+ void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
+ struct maple_metadata meta;
+ };
+ };
+};
+
+/*
+ * At tree creation time, the user can specify that they're willing to trade off
+ * storing fewer entries in a tree in return for storing more information in
+ * each node.
+ *
+ * The maple tree supports recording the largest range of NULL entries available
+ * in this node, also called gaps. This optimises the tree for allocating a
+ * range.
+ */
+struct maple_arange_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
+ void __rcu *slot[MAPLE_ARANGE64_SLOTS];
+ unsigned long gap[MAPLE_ARANGE64_SLOTS];
+ struct maple_metadata meta;
+};
+
+struct maple_alloc {
+ unsigned long total;
+ unsigned char node_count;
+ unsigned int request_count;
+ struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
+};
+
+struct maple_topiary {
+ struct maple_pnode *parent;
+ struct maple_enode *next; /* Overlaps the pivot */
+};
+
+enum maple_type {
+ maple_dense,
+ maple_leaf_64,
+ maple_range_64,
+ maple_arange_64,
+};
+
+
+/**
+ * DOC: Maple tree flags
+ *
+ * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
+ * * MT_FLAGS_USE_RCU - Operate in RCU mode
+ * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
+ * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
+ * * MT_FLAGS_LOCK_MASK - How the mt_lock is used
+ * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
+ * * MT_FLAGS_LOCK_BH - Acquired bh-safe
+ * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
+ *
+ * MAPLE_HEIGHT_MAX The largest height that can be stored
+ */
+#define MT_FLAGS_ALLOC_RANGE 0x01
+#define MT_FLAGS_USE_RCU 0x02
+#define MT_FLAGS_HEIGHT_OFFSET 0x02
+#define MT_FLAGS_HEIGHT_MASK 0x7C
+#define MT_FLAGS_LOCK_MASK 0x300
+#define MT_FLAGS_LOCK_IRQ 0x100
+#define MT_FLAGS_LOCK_BH 0x200
+#define MT_FLAGS_LOCK_EXTERN 0x300
+#define MT_FLAGS_ALLOC_WRAPPED 0x0800
+
+#define MAPLE_HEIGHT_MAX 31
+
+
+#define MAPLE_NODE_TYPE_MASK 0x0F
+#define MAPLE_NODE_TYPE_SHIFT 0x03
+
+#define MAPLE_RESERVED_RANGE 4096
+
+#ifdef CONFIG_LOCKDEP
+typedef struct lockdep_map *lockdep_map_p;
+#define mt_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
+
+#define mt_write_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || \
+ lock_is_held_type((mt)->ma_external_lock, 0))
+
+#define mt_set_external_lock(mt, lock) \
+ (mt)->ma_external_lock = &(lock)->dep_map
+
+#define mt_on_stack(mt) (mt).ma_external_lock = NULL
+#else
+typedef struct { /* nothing */ } lockdep_map_p;
+#define mt_lock_is_held(mt) 1
+#define mt_write_lock_is_held(mt) 1
+#define mt_set_external_lock(mt, lock) do { } while (0)
+#define mt_on_stack(mt) do { } while (0)
+#endif
+
+/*
+ * If the tree contains a single entry at index 0, it is usually stored in
+ * tree->ma_root. To optimise for the page cache, an entry which ends in '00',
+ * '01' or '11' is stored in the root, but an entry which ends in '10' will be
+ * stored in a node. Bits 3-6 are used to store enum maple_type.
+ *
+ * The flags are used both to store some immutable information about this tree
+ * (set at tree creation time) and dynamic information set under the spinlock.
+ *
+ * Another use of flags are to indicate global states of the tree. This is the
+ * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in
+ * RCU mode. This mode was added to allow the tree to reuse nodes instead of
+ * re-allocating and RCU freeing nodes when there is a single user.
+ */
+struct maple_tree {
+ union {
+ spinlock_t ma_lock;
+ lockdep_map_p ma_external_lock;
+ };
+ unsigned int ma_flags;
+ void __rcu *ma_root;
+};
+
+/**
+ * MTREE_INIT() - Initialize a maple tree
+ * @name: The maple tree name
+ * @__flags: The maple tree flags
+ *
+ */
+#define MTREE_INIT(name, __flags) { \
+ .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
+ .ma_flags = __flags, \
+ .ma_root = NULL, \
+}
+
+/**
+ * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
+ * @name: The tree name
+ * @__flags: The maple tree flags
+ * @__lock: The external lock
+ */
+#ifdef CONFIG_LOCKDEP
+#define MTREE_INIT_EXT(name, __flags, __lock) { \
+ .ma_external_lock = &(__lock).dep_map, \
+ .ma_flags = (__flags), \
+ .ma_root = NULL, \
+}
+#else
+#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
+#endif
+
+#define DEFINE_MTREE(name) \
+ struct maple_tree name = MTREE_INIT(name, 0)
+
+#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_lock_nested(mas, subclass) \
+ spin_lock_nested((&(mt)->ma_lock), subclass)
+#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
+
+/*
+ * The Maple Tree squeezes various bits in at various points which aren't
+ * necessarily obvious. Usually, this is done by observing that pointers are
+ * N-byte aligned and thus the bottom log_2(N) bits are available for use. We
+ * don't use the high bits of pointers to store additional information because
+ * we don't know what bits are unused on any given architecture.
+ *
+ * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
+ * low bits for our own purposes. Nodes are currently of 4 types:
+ * 1. Single pointer (Range is 0-0)
+ * 2. Non-leaf Allocation Range nodes
+ * 3. Non-leaf Range nodes
+ * 4. Leaf Range nodes All nodes consist of a number of node slots,
+ * pivots, and a parent pointer.
+ */
+
+struct maple_node {
+ union {
+ struct {
+ struct maple_pnode *parent;
+ void __rcu *slot[MAPLE_NODE_SLOTS];
+ };
+ struct {
+ void *pad;
+ struct rcu_head rcu;
+ struct maple_enode *piv_parent;
+ unsigned char parent_slot;
+ enum maple_type type;
+ unsigned char slot_len;
+ unsigned int ma_flags;
+ };
+ struct maple_range_64 mr64;
+ struct maple_arange_64 ma64;
+ struct maple_alloc alloc;
+ };
+};
+
+/*
+ * More complicated stores can cause two nodes to become one or three and
+ * potentially alter the height of the tree. Either half of the tree may need
+ * to be rebalanced against the other. The ma_topiary struct is used to track
+ * which nodes have been 'cut' from the tree so that the change can be done
+ * safely at a later date. This is done to support RCU.
+ */
+struct ma_topiary {
+ struct maple_enode *head;
+ struct maple_enode *tail;
+ struct maple_tree *mtree;
+};
+
+void *mtree_load(struct maple_tree *mt, unsigned long index);
+
+int mtree_insert(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long range_lo, unsigned long range_hi,
+ unsigned long *next, gfp_t gfp);
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+
+int mtree_store_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_store(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+void *mtree_erase(struct maple_tree *mt, unsigned long index);
+
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+
+void mtree_destroy(struct maple_tree *mt);
+void __mt_destroy(struct maple_tree *mt);
+
+/**
+ * mtree_empty() - Determine if a tree has any present entries.
+ * @mt: Maple Tree.
+ *
+ * Context: Any context.
+ * Return: %true if the tree contains only NULL pointers.
+ */
+static inline bool mtree_empty(const struct maple_tree *mt)
+{
+ return mt->ma_root == NULL;
+}
+
+/* Advanced API */
+
+/*
+ * Maple State Status
+ * ma_active means the maple state is pointing to a node and offset and can
+ * continue operating on the tree.
+ * ma_start means we have not searched the tree.
+ * ma_root means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * ma_none means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * ma_pause means the data within the maple state may be stale, restart the
+ * operation
+ * ma_overflow means the search has reached the upper limit of the search
+ * ma_underflow means the search has reached the lower limit of the search
+ * ma_error means there was an error, check the node for the error number.
+ */
+enum maple_status {
+ ma_active,
+ ma_start,
+ ma_root,
+ ma_none,
+ ma_pause,
+ ma_overflow,
+ ma_underflow,
+ ma_error,
+};
+
+/*
+ * The maple state is defined in the struct ma_state and is used to keep track
+ * of information during operations, and even between operations when using the
+ * advanced API.
+ *
+ * If state->node has bit 0 set then it references a tree location which is not
+ * a node (eg the root). If bit 1 is set, the rest of the bits are a negative
+ * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
+ * node type.
+ *
+ * state->alloc either has a request number of nodes or an allocated node. If
+ * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
+ * and the remaining bits are the value. If state->alloc is a node, then the
+ * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
+ * storing more allocated nodes, a total number of nodes allocated, and the
+ * node_count in this node. node_count is the number of allocated nodes in this
+ * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
+ * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
+ * by removing a node from the state->alloc node until state->alloc->node_count
+ * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
+ * to state->alloc. Nodes are pushed onto state->alloc by putting the current
+ * state->alloc into the pushed node's slot[0].
+ *
+ * The state also contains the implied min/max of the state->node, the depth of
+ * this search, and the offset. The implied min/max are either from the parent
+ * node or are 0-oo for the root node. The depth is incremented or decremented
+ * every time a node is walked down or up. The offset is the slot/pivot of
+ * interest in the node - either for reading or writing.
+ *
+ * When returning a value the maple state index and last respectively contain
+ * the start and end of the range for the entry. Ranges are inclusive in the
+ * Maple Tree.
+ *
+ * The status of the state is used to determine how the next action should treat
+ * the state. For instance, if the status is ma_start then the next action
+ * should start at the root of the tree and walk down. If the status is
+ * ma_pause then the node may be stale data and should be discarded. If the
+ * status is ma_overflow, then the last action hit the upper limit.
+ *
+ */
+struct ma_state {
+ struct maple_tree *tree; /* The tree we're operating in */
+ unsigned long index; /* The index we're operating on - range start */
+ unsigned long last; /* The last index we're operating on - range end */
+ struct maple_enode *node; /* The node containing this entry */
+ unsigned long min; /* The minimum index of this node - implied pivot min */
+ unsigned long max; /* The maximum index of this node - implied pivot max */
+ struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ enum maple_status status; /* The status of the state (active, start, none, etc) */
+ unsigned char depth; /* depth of tree descent during write */
+ unsigned char offset;
+ unsigned char mas_flags;
+ unsigned char end; /* The end of the node */
+};
+
+struct ma_wr_state {
+ struct ma_state *mas;
+ struct maple_node *node; /* Decoded mas->node */
+ unsigned long r_min; /* range min */
+ unsigned long r_max; /* range max */
+ enum maple_type type; /* mas->node type */
+ unsigned char offset_end; /* The offset where the write ends */
+ unsigned long *pivots; /* mas->node->pivots pointer */
+ unsigned long end_piv; /* The pivot at the offset end */
+ void __rcu **slots; /* mas->node->slots pointer */
+ void *entry; /* The entry to write */
+ void *content; /* The existing entry that is being overwritten */
+};
+
+#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_lock_nested(mas, subclass) \
+ spin_lock_nested(&((mas)->tree->ma_lock), subclass)
+#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
+
+/*
+ * Special values for ma_state.node.
+ * MA_ERROR represents an errno. After dropping the lock and attempting
+ * to resolve the error, the walk would have to be restarted from the
+ * top of the tree as the tree may have been modified.
+ */
+#define MA_ERROR(err) \
+ ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+
+#define MA_STATE(name, mt, first, end) \
+ struct ma_state name = { \
+ .tree = mt, \
+ .index = first, \
+ .last = end, \
+ .node = NULL, \
+ .status = ma_start, \
+ .min = 0, \
+ .max = ULONG_MAX, \
+ .alloc = NULL, \
+ .mas_flags = 0, \
+ }
+
+#define MA_WR_STATE(name, ma_state, wr_entry) \
+ struct ma_wr_state name = { \
+ .mas = ma_state, \
+ .content = NULL, \
+ .entry = wr_entry, \
+ }
+
+#define MA_TOPIARY(name, tree) \
+ struct ma_topiary name = { \
+ .head = NULL, \
+ .tail = NULL, \
+ .mtree = tree, \
+ }
+
+void *mas_walk(struct ma_state *mas);
+void *mas_store(struct ma_state *mas, void *entry);
+void *mas_erase(struct ma_state *mas);
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
+void mas_store_prealloc(struct ma_state *mas, void *entry);
+void *mas_find(struct ma_state *mas, unsigned long max);
+void *mas_find_range(struct ma_state *mas, unsigned long max);
+void *mas_find_rev(struct ma_state *mas, unsigned long min);
+void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
+int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
+ void *entry, unsigned long range_lo, unsigned long range_hi,
+ unsigned long *next, gfp_t gfp);
+
+bool mas_nomem(struct ma_state *mas, gfp_t gfp);
+void mas_pause(struct ma_state *mas);
+void maple_tree_init(void);
+void mas_destroy(struct ma_state *mas);
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
+
+void *mas_prev(struct ma_state *mas, unsigned long min);
+void *mas_prev_range(struct ma_state *mas, unsigned long max);
+void *mas_next(struct ma_state *mas, unsigned long max);
+void *mas_next_range(struct ma_state *mas, unsigned long max);
+
+int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
+ unsigned long size);
+/*
+ * This finds an empty area from the highest address to the lowest.
+ * AKA "Topdown" version,
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size);
+
+static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
+ unsigned long addr)
+{
+ memset(mas, 0, sizeof(struct ma_state));
+ mas->tree = tree;
+ mas->index = mas->last = addr;
+ mas->max = ULONG_MAX;
+ mas->status = ma_start;
+ mas->node = NULL;
+}
+
+static inline bool mas_is_active(struct ma_state *mas)
+{
+ return mas->status == ma_active;
+}
+
+static inline bool mas_is_err(struct ma_state *mas)
+{
+ return mas->status == ma_error;
+}
+
+/**
+ * mas_reset() - Reset a Maple Tree operation state.
+ * @mas: Maple Tree operation state.
+ *
+ * Resets the error or walk state of the @mas so future walks of the
+ * array will start from the root. Use this if you have dropped the
+ * lock and want to reuse the ma_state.
+ *
+ * Context: Any context.
+ */
+static __always_inline void mas_reset(struct ma_state *mas)
+{
+ mas->status = ma_start;
+ mas->node = NULL;
+}
+
+/**
+ * mas_for_each() - Iterate over a range of the maple tree.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__max: maximum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ */
+#define mas_for_each(__mas, __entry, __max) \
+ while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+enum mt_dump_format {
+ mt_dump_dec,
+ mt_dump_hex,
+};
+
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
+void mas_dump(const struct ma_state *mas);
+void mas_wr_dump(const struct ma_wr_state *wr_mas);
+void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_BUG_ON(__mas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_WR_BUG_ON(__wrmas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MT_WARN_ON(__tree, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WARN_ON(__mas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
+#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
+#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
+/**
+ * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
+ * current location.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * set the internal maple state values to a sub-range.
+ * Please use mas_set_range() if you do not know where you are in the tree.
+ */
+static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
+ unsigned long last)
+{
+ /* Ensure the range starts within the current slot */
+ MAS_WARN_ON(mas, mas_is_active(mas) &&
+ (mas->index > start || mas->last < start));
+ mas->index = start;
+ mas->last = last;
+}
+
+/**
+ * mas_set_range() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * Move the operation state to refer to a different range. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline
+void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
+{
+ mas_reset(mas);
+ __mas_set_range(mas, start, last);
+}
+
+/**
+ * mas_set() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @index: New index into the Maple Tree.
+ *
+ * Move the operation state to refer to a different index. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline void mas_set(struct ma_state *mas, unsigned long index)
+{
+
+ mas_set_range(mas, index, index);
+}
+
+static inline bool mt_external_lock(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
+}
+
+/**
+ * mt_init_flags() - Initialise an empty maple tree with flags.
+ * @mt: Maple Tree
+ * @flags: maple tree flags.
+ *
+ * If you need to initialise a Maple Tree with special flags (eg, an
+ * allocation tree), use this function.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
+{
+ mt->ma_flags = flags;
+ if (!mt_external_lock(mt))
+ spin_lock_init(&mt->ma_lock);
+ rcu_assign_pointer(mt->ma_root, NULL);
+}
+
+/**
+ * mt_init() - Initialise an empty maple tree.
+ * @mt: Maple Tree
+ *
+ * An empty Maple Tree.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init(struct maple_tree *mt)
+{
+ mt_init_flags(mt, 0);
+}
+
+static inline bool mt_in_rcu(struct maple_tree *mt)
+{
+#ifdef CONFIG_MAPLE_RCU_DISABLED
+ return false;
+#endif
+ return mt->ma_flags & MT_FLAGS_USE_RCU;
+}
+
+/**
+ * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_clear_in_rcu(struct maple_tree *mt)
+{
+ if (!mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ WARN_ON(!mt_lock_is_held(mt));
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+/**
+ * mt_set_in_rcu() - Switch the tree to RCU safe mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_set_in_rcu(struct maple_tree *mt)
+{
+ if (mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ WARN_ON(!mt_lock_is_held(mt));
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+static inline unsigned int mt_height(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
+}
+
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max);
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
+
+/**
+ * mt_for_each - Iterate over each entry starting at index until max.
+ * @__tree: The Maple Tree
+ * @__entry: The current entry
+ * @__index: The index to start the search from. Subsequently used as iterator.
+ * @__max: The maximum limit for @index
+ *
+ * This iterator skips all entries, which resolve to a NULL pointer,
+ * e.g. entries which has been reserved with XA_ZERO_ENTRY.
+ */
+#define mt_for_each(__tree, __entry, __index, __max) \
+ for (__entry = mt_find(__tree, &(__index), __max); \
+ __entry; __entry = mt_find_after(__tree, &(__index), __max))
+
+#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index ff7b7607c8cf..693eba9869e4 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -24,12 +24,20 @@
#define MARVELL_PHY_ID_88E3016 0x01410e60
#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
+#define MARVELL_PHY_ID_88X2222 0x01410f10
+#define MARVELL_PHY_ID_88Q2110 0x002b0980
+#define MARVELL_PHY_ID_88Q2220 0x002b0b20
-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
+#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
+
+/* These Ethernet switch families contain embedded PHYs, but they do
* not have a model ID. So the switch driver traps reads to the ID2
* register and returns the switch family ID
*/
-#define MARVELL_PHY_ID_88E6390 0x01410f90
+#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41
+#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90
+#define MARVELL_PHY_ID_88E6393_FAMILY 0x002b0b9b
#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
diff --git a/include/linux/math.h b/include/linux/math.h
new file mode 100644
index 000000000000..dd4152711de7
--- /dev/null
+++ b/include/linux/math.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MATH_H
+#define _LINUX_MATH_H
+
+#include <linux/types.h>
+#include <asm/div64.h>
+#include <uapi/linux/kernel.h>
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+
+/**
+ * round_up - round up to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round up to (must be a power of 2)
+ *
+ * Rounds @x up to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding up, use roundup() below.
+ */
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+
+/**
+ * round_down - round down to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round down to (must be a power of 2)
+ *
+ * Rounds @x down to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding down, use rounddown() below.
+ */
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
+
+#define DIV_ROUND_DOWN_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
+
+#define DIV_ROUND_UP_ULL(ll, d) \
+ DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
+
+#if BITS_PER_LONG == 32
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
+#else
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
+#endif
+
+/**
+ * roundup - round up to the next specified multiple
+ * @x: the value to up
+ * @y: multiple to round up to
+ *
+ * Rounds @x up to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_up().
+ */
+#define roundup(x, y) ( \
+{ \
+ typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+/**
+ * rounddown - round down to next specified multiple
+ * @x: the value to round
+ * @y: multiple to round down to
+ *
+ * Rounds @x down to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_down().
+ */
+#define rounddown(x, y) ( \
+{ \
+ typeof(x) __x = (x); \
+ __x - (__x % (y)); \
+} \
+)
+
+/*
+ * Divide positive or negative dividend by positive or negative divisor
+ * and round to closest integer. Result is undefined for negative
+ * divisors if the dividend variable type is unsigned and for negative
+ * dividends if the divisor variable type is unsigned.
+ */
+#define DIV_ROUND_CLOSEST(x, divisor)( \
+{ \
+ typeof(x) __x = x; \
+ typeof(divisor) __d = divisor; \
+ (((typeof(x))-1) > 0 || \
+ ((typeof(divisor))-1) > 0 || \
+ (((__x) > 0) == ((__d) > 0))) ? \
+ (((__x) + ((__d) / 2)) / (__d)) : \
+ (((__x) - ((__d) / 2)) / (__d)); \
+} \
+)
+/*
+ * Same as above but for u64 dividends. divisor must be a 32-bit
+ * number.
+ */
+#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
+{ \
+ typeof(divisor) __d = divisor; \
+ unsigned long long _tmp = (x) + (__d) / 2; \
+ do_div(_tmp, __d); \
+ _tmp; \
+} \
+)
+
+#define __STRUCT_FRACT(type) \
+struct type##_fract { \
+ __##type numerator; \
+ __##type denominator; \
+};
+__STRUCT_FRACT(s16)
+__STRUCT_FRACT(u16)
+__STRUCT_FRACT(s32)
+__STRUCT_FRACT(u32)
+#undef __STRUCT_FRACT
+
+/* Calculate "x * n / d" without unnecessary overflow or loss of precision. */
+#define mult_frac(x, n, d) \
+({ \
+ typeof(x) x_ = (x); \
+ typeof(n) n_ = (n); \
+ typeof(d) d_ = (d); \
+ \
+ typeof(x_) q = x_ / d_; \
+ typeof(x_) r = x_ % d_; \
+ q * n_ + r * n_ / d_; \
+})
+
+#define sector_div(a, b) do_div(a, b)
+
+/**
+ * abs - return absolute value of an argument
+ * @x: the value. If it is unsigned type, it is converted to signed type first.
+ * char is treated as if it was signed (regardless of whether it really is)
+ * but the macro's return type is preserved as char.
+ *
+ * Return: an absolute value of x.
+ */
+#define abs(x) __abs_choose_expr(x, long long, \
+ __abs_choose_expr(x, long, \
+ __abs_choose_expr(x, int, \
+ __abs_choose_expr(x, short, \
+ __abs_choose_expr(x, char, \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), char), \
+ (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
+ ((void)0)))))))
+
+#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), signed type) || \
+ __builtin_types_compatible_p(typeof(x), unsigned type), \
+ ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
+
+/**
+ * abs_diff - return absolute value of the difference between the arguments
+ * @a: the first argument
+ * @b: the second argument
+ *
+ * @a and @b have to be of the same type. With this restriction we compare
+ * signed to signed and unsigned to unsigned. The result is the subtraction
+ * the smaller of the two from the bigger, hence result is always a positive
+ * value.
+ *
+ * Return: an absolute value of the difference between the @a and @b.
+ */
+#define abs_diff(a, b) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ (void)(&__a == &__b); \
+ __a > __b ? (__a - __b) : (__b - __a); \
+})
+
+/**
+ * reciprocal_scale - "scale" a value into range [0, ep_ro)
+ * @val: value
+ * @ep_ro: right open interval endpoint
+ *
+ * Perform a "reciprocal multiplication" in order to "scale" a value into
+ * range [0, @ep_ro), where the upper interval endpoint is right-open.
+ * This is useful, e.g. for accessing a index of an array containing
+ * @ep_ro elements, for example. Think of it as sort of modulus, only that
+ * the result isn't that of modulo. ;) Note that if initial input is a
+ * small value, then result will return 0.
+ *
+ * Return: a result based on @val in interval [0, @ep_ro).
+ */
+static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
+{
+ return (u32)(((u64) val * ep_ro) >> 32);
+}
+
+u64 int_pow(u64 base, unsigned int exp);
+unsigned long int_sqrt(unsigned long);
+
+#if BITS_PER_LONG < 64
+u32 int_sqrt64(u64 x);
+#else
+static inline u32 int_sqrt64(u64 x)
+{
+ return (u32)int_sqrt(x);
+}
+#endif
+
+#endif /* _LINUX_MATH_H */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 3381d9e33c4e..bf74478926d4 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -3,6 +3,7 @@
#define _LINUX_MATH64_H
#include <linux/types.h>
+#include <linux/math.h>
#include <vdso/math64.h>
#include <asm/div64.h>
@@ -119,6 +120,8 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
* divide.
+ *
+ * Return: dividend / divisor
*/
#ifndef div_u64
static inline u64 div_u64(u64 dividend, u32 divisor)
@@ -132,6 +135,8 @@ static inline u64 div_u64(u64 dividend, u32 divisor)
* div_s64 - signed 64bit divide with 32bit divisor
* @dividend: signed 64bit dividend
* @divisor: signed 32bit divisor
+ *
+ * Return: dividend / divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)
@@ -156,14 +161,14 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u32_shr */
#ifndef mul_u64_u64_shr
-static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
@@ -172,7 +177,7 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
#else
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
u32 ah, al;
u64 ret;
@@ -234,6 +239,24 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
#endif
+#ifndef mul_s64_u64_shr
+static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
+{
+ u64 ret;
+
+ /*
+ * Extract the sign before the multiplication and put it back
+ * afterwards if needed.
+ */
+ ret = mul_u64_u64_shr(abs(a), b, shift);
+
+ if (a < 0)
+ ret = -((s64) ret);
+
+ return ret;
+}
+#endif /* mul_s64_u64_shr */
+
#ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
@@ -265,6 +288,16 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+/**
+ * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
+ * @ll: unsigned 64bit dividend
+ * @d: unsigned 64bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 64bit divisor
+ * and round up.
+ *
+ * Return: dividend / divisor rounded up
+ */
#define DIV64_U64_ROUND_UP(ll, d) \
({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
@@ -281,7 +314,20 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
-/*
+/**
+ * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 32bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV_U64_ROUND_CLOSEST(dividend, divisor) \
+ ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
+
+/**
* DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
* @dividend: signed 64bit dividend
* @divisor: signed 32bit divisor
diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h
deleted file mode 100644
index 593602fc9317..000000000000
--- a/include/linux/max17040_battery.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- */
-
-#ifndef __MAX17040_BATTERY_H_
-#define __MAX17040_BATTERY_H_
-
-struct max17040_platform_data {
- int (*battery_online)(void);
- int (*charger_online)(void);
- int (*charger_enable)(void);
-};
-
-#endif
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 20f1e3ff6013..97e64184767d 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -10,16 +10,29 @@
struct mb_cache;
+/* Cache entry flags */
+enum {
+ MBE_REFERENCED_B = 0,
+ MBE_REUSABLE_B
+};
+
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
- /* Hash table list - protected by hash chain bitlock */
+ /*
+ * Hash table list - protected by hash chain bitlock. The entry is
+ * guaranteed to be hashed while e_refcnt > 0.
+ */
struct hlist_bl_node e_hash_list;
+ /*
+ * Entry refcount. Once it reaches zero, entry is unhashed and freed.
+ * While refcount > 0, the entry is guaranteed to stay in the hash and
+ * e.g. mb_cache_entry_try_delete() will fail.
+ */
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
- u32 e_referenced:1;
- u32 e_reusable:1;
+ unsigned long e_flags;
/* User provided value - stable during lifetime of the entry */
u64 e_value;
};
@@ -29,17 +42,24 @@ void mb_cache_destroy(struct mb_cache *cache);
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
u64 value, bool reusable);
-void __mb_cache_entry_free(struct mb_cache_entry *entry);
-static inline int mb_cache_entry_put(struct mb_cache *cache,
- struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
+static inline void mb_cache_entry_put(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
{
- if (!atomic_dec_and_test(&entry->e_refcnt))
- return 0;
- __mb_cache_entry_free(entry);
- return 1;
+ unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
+
+ if (cnt > 0) {
+ if (cnt <= 2)
+ wake_up_var(&entry->e_refcnt);
+ return;
+ }
+ __mb_cache_entry_free(cache, entry);
}
-void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
u64 value);
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index 0661af17a758..34dfcc77f505 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
/* 2 values for divider stage reset, others for "testing purposes only" */
# define RTC_DIV_RESET1 0x60
# define RTC_DIV_RESET2 0x70
+ /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
+# define RTC_AMD_BANK_SELECT 0x10
/* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
# define RTC_RATE_SELECT 0x0F
@@ -123,7 +125,12 @@ struct cmos_rtc_board_info {
#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
#endif /* ARCH_RTC_LOCATION */
-unsigned int mc146818_get_time(struct rtc_time *time);
+bool mc146818_does_rtc_work(void);
+int mc146818_get_time(struct rtc_time *time, int timeout);
int mc146818_set_time(struct rtc_time *time);
+bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ int timeout,
+ void *param);
+
#endif /* _MC146818RTC_H */
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 71dd10a3d928..0b971b24a804 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
struct mcb_device {
struct device dev;
struct mcb_bus *bus;
- bool is_added;
struct mcb_driver *driver;
u16 id;
int inst;
@@ -76,10 +75,7 @@ struct mcb_device {
struct device *dma_dev;
};
-static inline struct mcb_device *to_mcb_device(struct device *dev)
-{
- return container_of(dev, struct mcb_device, dev);
-}
+#define to_mcb_device(__dev) container_of_const(__dev, struct mcb_device, dev)
/**
* struct mcb_driver - MEN Chameleon Bus device driver
@@ -120,7 +116,7 @@ extern int __must_check __mcb_register_driver(struct mcb_driver *drv,
__mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
extern void mcb_unregister_driver(struct mcb_driver *driver);
#define module_mcb_driver(__mcb_driver) \
- module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver);
+ module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver)
extern void mcb_bus_add_devices(const struct mcb_bus *bus);
extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev);
extern struct mcb_bus *mcb_alloc_bus(struct device *carrier);
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index 0ce30ca78db0..139d05b26f82 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -10,139 +10,80 @@
#ifndef MDEV_H
#define MDEV_H
-struct mdev_device;
+#include <linux/device.h>
+#include <linux/uuid.h>
-/*
- * Called by the parent device driver to set the device which represents
- * this mdev in iommu protection scope. By default, the iommu device is
- * NULL, that indicates using vendor defined isolation.
- *
- * @dev: the mediated device that iommu will isolate.
- * @iommu_device: a pci device which represents the iommu for @dev.
- *
- * Return 0 for success, otherwise negative error value.
- */
-int mdev_set_iommu_device(struct device *dev, struct device *iommu_device);
+struct mdev_type;
-struct device *mdev_get_iommu_device(struct device *dev);
+struct mdev_device {
+ struct device dev;
+ guid_t uuid;
+ struct list_head next;
+ struct mdev_type *type;
+ bool active;
+};
-/**
- * struct mdev_parent_ops - Structure to be registered for each parent device to
- * register the device to mdev module.
- *
- * @owner: The module owner.
- * @dev_attr_groups: Attributes of the parent device.
- * @mdev_attr_groups: Attributes of the mediated device.
- * @supported_type_groups: Attributes to define supported types. It is mandatory
- * to provide supported types.
- * @create: Called to allocate basic resources in parent device's
- * driver for a particular mediated device. It is
- * mandatory to provide create ops.
- * @kobj: kobject of type for which 'create' is called.
- * @mdev: mdev_device structure on of mediated device
- * that is being created
- * Returns integer: success (0) or error (< 0)
- * @remove: Called to free resources in parent device's driver for a
- * a mediated device. It is mandatory to provide 'remove'
- * ops.
- * @mdev: mdev_device device structure which is being
- * destroyed
- * Returns integer: success (0) or error (< 0)
- * @open: Open mediated device.
- * @mdev: mediated device.
- * Returns integer: success (0) or error (< 0)
- * @release: release mediated device
- * @mdev: mediated device.
- * @read: Read emulation callback
- * @mdev: mediated device structure
- * @buf: read buffer
- * @count: number of bytes to read
- * @ppos: address.
- * Retuns number on bytes read on success or error.
- * @write: Write emulation callback
- * @mdev: mediated device structure
- * @buf: write buffer
- * @count: number of bytes to be written
- * @ppos: address.
- * Retuns number on bytes written on success or error.
- * @ioctl: IOCTL callback
- * @mdev: mediated device structure
- * @cmd: ioctl command
- * @arg: arguments to ioctl
- * @mmap: mmap callback
- * @mdev: mediated device structure
- * @vma: vma structure
- * Parent device that support mediated device should be registered with mdev
- * module with mdev_parent_ops structure.
- **/
-struct mdev_parent_ops {
- struct module *owner;
- const struct attribute_group **dev_attr_groups;
- const struct attribute_group **mdev_attr_groups;
- struct attribute_group **supported_type_groups;
+struct mdev_type {
+ /* set by the driver before calling mdev_register parent: */
+ const char *sysfs_name;
+ const char *pretty_name;
+
+ /* set by the core, can be used drivers */
+ struct mdev_parent *parent;
- int (*create)(struct kobject *kobj, struct mdev_device *mdev);
- int (*remove)(struct mdev_device *mdev);
- int (*open)(struct mdev_device *mdev);
- void (*release)(struct mdev_device *mdev);
- ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
- size_t count, loff_t *ppos);
- ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
- size_t count, loff_t *ppos);
- long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
- unsigned long arg);
- int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
+ /* internal only */
+ struct kobject kobj;
+ struct kobject *devices_kobj;
};
-/* interface for exporting mdev supported type attributes */
-struct mdev_type_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf);
- ssize_t (*store)(struct kobject *kobj, struct device *dev,
- const char *buf, size_t count);
+/* embedded into the struct device that the mdev devices hang off */
+struct mdev_parent {
+ struct device *dev;
+ struct mdev_driver *mdev_driver;
+ struct kset *mdev_types_kset;
+ /* Synchronize device creation/removal with parent unregistration */
+ struct rw_semaphore unreg_sem;
+ struct mdev_type **types;
+ unsigned int nr_types;
+ atomic_t available_instances;
};
-#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \
-struct mdev_type_attribute mdev_type_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-#define MDEV_TYPE_ATTR_RW(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name)
-#define MDEV_TYPE_ATTR_RO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
-#define MDEV_TYPE_ATTR_WO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
+static inline struct mdev_device *to_mdev_device(struct device *dev)
+{
+ return container_of(dev, struct mdev_device, dev);
+}
/**
* struct mdev_driver - Mediated device driver
- * @name: driver name
+ * @device_api: string to return for the device_api sysfs
+ * @max_instances: maximum number of instances supported (optional)
* @probe: called when new device created
* @remove: called when device removed
+ * @get_available: Return the max number of instances that can be created
+ * @show_description: Print a description of the mtype
* @driver: device driver structure
- *
**/
struct mdev_driver {
- const char *name;
- int (*probe)(struct device *dev);
- void (*remove)(struct device *dev);
+ const char *device_api;
+ unsigned int max_instances;
+ int (*probe)(struct mdev_device *dev);
+ void (*remove)(struct mdev_device *dev);
+ unsigned int (*get_available)(struct mdev_type *mtype);
+ ssize_t (*show_description)(struct mdev_type *mtype, char *buf);
struct device_driver driver;
};
-#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
-
-void *mdev_get_drvdata(struct mdev_device *mdev);
-void mdev_set_drvdata(struct mdev_device *mdev, void *data);
-const guid_t *mdev_uuid(struct mdev_device *mdev);
-
-extern struct bus_type mdev_bus_type;
-
-int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops);
-void mdev_unregister_device(struct device *dev);
+int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ struct mdev_driver *mdev_driver, struct mdev_type **types,
+ unsigned int nr_types);
+void mdev_unregister_parent(struct mdev_parent *parent);
-int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
+int mdev_register_driver(struct mdev_driver *drv);
void mdev_unregister_driver(struct mdev_driver *drv);
-struct device *mdev_parent_dev(struct mdev_device *mdev);
-struct device *mdev_dev(struct mdev_device *mdev);
-struct mdev_device *mdev_from_dev(struct device *dev);
+static inline struct device *mdev_dev(struct mdev_device *mdev)
+{
+ return &mdev->dev;
+}
#endif /* MDEV_H */
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 5d71e8a8500f..cffabdbce075 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -33,8 +33,16 @@ struct mdiobb_ops {
struct mdiobb_ctrl {
const struct mdiobb_ops *ops;
+ unsigned int override_op_c22;
+ u8 op_c22_read;
+ u8 op_c22_write;
};
+int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val);
+int mdiobb_read_c45(struct mii_bus *bus, int devad, int phy, int reg);
+int mdiobb_write_c45(struct mii_bus *bus, int devad, int phy, int reg, u16 val);
+
/* The returned bus is not yet registered with the phy layer. */
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
diff --git a/include/linux/mdio-xpcs.h b/include/linux/mdio-xpcs.h
deleted file mode 100644
index 9a841aa5982d..000000000000
--- a/include/linux/mdio-xpcs.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
- * Synopsys DesignWare XPCS helpers
- */
-
-#ifndef __LINUX_MDIO_XPCS_H
-#define __LINUX_MDIO_XPCS_H
-
-#include <linux/phy.h>
-#include <linux/phylink.h>
-
-struct mdio_xpcs_args {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- struct mii_bus *bus;
- int addr;
-};
-
-struct mdio_xpcs_ops {
- int (*validate)(struct mdio_xpcs_args *xpcs,
- unsigned long *supported,
- struct phylink_link_state *state);
- int (*config)(struct mdio_xpcs_args *xpcs,
- const struct phylink_link_state *state);
- int (*get_state)(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state);
- int (*link_up)(struct mdio_xpcs_args *xpcs, int speed,
- phy_interface_t interface);
- int (*probe)(struct mdio_xpcs_args *xpcs, phy_interface_t interface);
-};
-
-#if IS_ENABLED(CONFIG_MDIO_XPCS)
-struct mdio_xpcs_ops *mdio_xpcs_get_ops(void);
-#else
-static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
-{
- return NULL;
-}
-#endif
-
-#endif /* __LINUX_MDIO_XPCS_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 898cbf00332a..68f8d2e970d4 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -7,15 +7,9 @@
#define __LINUX_MDIO_H__
#include <uapi/linux/mdio.h>
+#include <linux/bitfield.h>
#include <linux/mod_devicetable.h>
-/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
- * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
- */
-#define MII_ADDR_C45 (1<<30)
-#define MII_DEVADDR_C45_SHIFT 16
-#define MII_REGADDR_C45_MASK GENMASK(15, 0)
-
struct gpio_desc;
struct mii_bus;
struct reset_control;
@@ -44,12 +38,17 @@ struct mdio_device {
/* Bus address of the MDIO device (0-31) */
int addr;
int flags;
+ int reset_state;
struct gpio_desc *reset_gpio;
struct reset_control *reset_ctrl;
unsigned int reset_assert_delay;
unsigned int reset_deassert_delay;
};
-#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
+
+static inline struct mdio_device *to_mdio_device(const struct device *dev)
+{
+ return container_of(dev, struct mdio_device, dev);
+}
/* struct mdio_driver_common: Common to all MDIO drivers */
struct mdio_driver_common {
@@ -57,8 +56,12 @@ struct mdio_driver_common {
int flags;
};
#define MDIO_DEVICE_FLAG_PHY 1
-#define to_mdio_common_driver(d) \
- container_of(d, struct mdio_driver_common, driver)
+
+static inline struct mdio_driver_common *
+to_mdio_common_driver(const struct device_driver *driver)
+{
+ return container_of(driver, struct mdio_driver_common, driver);
+}
/* struct mdio_driver: Generic MDIO driver */
struct mdio_driver {
@@ -72,9 +75,17 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
+
+ /* Quiesces the device on system shutdown, turns off interrupts etc */
+ void (*shutdown)(struct mdio_device *mdiodev);
};
-#define to_mdio_driver(d) \
- container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv)
+
+static inline struct mdio_driver *
+to_mdio_driver(const struct device_driver *driver)
+{
+ return container_of(to_mdio_common_driver(driver), struct mdio_driver,
+ mdiodrv);
+}
/* device driver data */
static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data)
@@ -96,6 +107,16 @@ int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
+static inline void mdio_device_get(struct mdio_device *mdiodev)
+{
+ get_device(&mdiodev->dev);
+}
+
+static inline void mdio_device_put(struct mdio_device *mdiodev)
+{
+ mdio_device_free(mdiodev);
+}
+
static inline bool mdio_phy_id_is_c45(int phy_id)
{
return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK);
@@ -306,7 +327,7 @@ static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising)
/**
* mii_10gbt_stat_mod_linkmode_lpa_t
* @advertising: target the linkmode advertisement settings
- * @adv: value of the C45 10GBASE-T AN STATUS register
+ * @lpa: value of the C45 10GBASE-T AN STATUS register
*
* A small helper function that translates C45 10GBASE-T AN STATUS register bits
* to linkmode advertisement settings. Other bits in advertising aren't changed.
@@ -322,8 +343,266 @@ static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising,
advertising, lpa & MDIO_AN_10GBT_STAT_LP10G);
}
+/**
+ * mii_t1_adv_l_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [15:0] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [15:0] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_l_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_CAP);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_ASYM);
+}
+
+/**
+ * mii_t1_adv_m_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [31:16] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [31:16] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_B10L);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_100BT1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_1000BT1);
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_l_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [15:0] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_l_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_m_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [31:16] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_B10L;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_100BT1;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_1000BT1;
+
+ return result;
+}
+
+/**
+ * mii_eee_cap1_mod_linkmode_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20)
+ * IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60)
+ * IEEE 802.3-2018 45.2.7.14 "EEE link partner ability 1" register (7.61)
+ */
+static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ adv, val & MDIO_EEE_100TX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ adv, val & MDIO_EEE_1000T);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ adv, val & MDIO_EEE_10GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, val & MDIO_EEE_1000KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, val & MDIO_EEE_10GKX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, val & MDIO_EEE_10GKR);
+}
+
+/**
+ * mii_eee_cap2_mod_linkmode_sup_t()
+ * @adv: target the linkmode settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.3.11 "EEE control and capability 2" register (3.21)
+ */
+static inline void mii_eee_cap2_mod_linkmode_sup_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
+ * mii_eee_cap2_mod_linkmode_adv_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.7.16 "EEE advertisement 2" register (7.62)
+ * IEEE 802.3-2022 45.2.7.17 "EEE link partner ability 2" register (7.63)
+ * Note: Currently this function is the same as mii_eee_cap2_mod_linkmode_sup_t.
+ * For certain, not yet supported, modes however the bits differ.
+ * Therefore create separate functions already.
+ */
+static inline void mii_eee_cap2_mod_linkmode_adv_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
+ * linkmode_to_mii_eee_cap1_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2018 45.2.7.13
+ * "EEE advertisement 1" register (7.60)
+ */
+static inline u32 linkmode_to_mii_eee_cap1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, adv))
+ result |= MDIO_EEE_100TX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_1000T;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_10GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, adv))
+ result |= MDIO_EEE_1000KX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, adv))
+ result |= MDIO_EEE_10GKX4;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, adv))
+ result |= MDIO_EEE_10GKR;
+
+ return result;
+}
+
+/**
+ * linkmode_to_mii_eee_cap2_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2022 45.2.7.16
+ * "EEE advertisement 2" register (7.62)
+ */
+static inline u32 linkmode_to_mii_eee_cap2_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, adv))
+ result |= MDIO_EEE_2_5GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_5GT;
+
+ return result;
+}
+
+/**
+ * mii_10base_t1_adv_mod_linkmode_t()
+ * @adv: linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates IEEE 802.3cg-2019 45.2.7.26 "10BASE-T1 AN status"
+ * register (7.527) value to the linkmode.
+ */
+static inline void mii_10base_t1_adv_mod_linkmode_t(unsigned long *adv, u16 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ adv, val & MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L);
+}
+
+/**
+ * linkmode_adv_to_mii_10base_t1_t()
+ * @adv: linkmode advertisement settings
+ *
+ * A function that translates the linkmode to IEEE 802.3cg-2019 45.2.7.25
+ * "10BASE-T1 AN control" register (7.526) value.
+ */
+static inline u32 linkmode_adv_to_mii_10base_t1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, adv))
+ result |= MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L;
+
+ return result;
+}
+
+/**
+ * mii_c73_mod_linkmode - convert a Clause 73 advertisement to linkmodes
+ * @adv: linkmode advertisement setting
+ * @lpa: array of three u16s containing the advertisement
+ *
+ * Convert an IEEE 802.3 Clause 73 advertisement to ethtool link modes.
+ */
+static inline void mii_c73_mod_linkmode(unsigned long *adv, u16 *lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_PAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_ASM_DIR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_1000BASE_KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_CR4);
+ /* 100GBASE_CR10 and 100GBASE_KP4 not implemented */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_CR4);
+ /* 25GBASE_R_S not implemented */
+ /* The 25GBASE_R bit can be used for 25Gbase KR or CR modes */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ adv, lpa[2] & MDIO_AN_C73_2_2500BASE_KX);
+ /* 5GBASE_KR not implemented */
+}
+
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
+ u16 set);
int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
u16 mask, u16 set);
@@ -333,29 +612,98 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
u16 set);
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
+int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum);
+int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 val);
+int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 mask, u16 set);
+
+int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 mask, u16 set);
+
+static inline int __mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
+{
+ return __mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
+}
+
+static inline int __mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return __mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
-static inline u32 mdiobus_c45_addr(int devad, u16 regnum)
+static inline int __mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
{
- return MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum;
+ return __mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
}
-static inline int __mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
- u16 regnum)
+static inline int __mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
{
- return __mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
+ return __mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
}
-static inline int __mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
- u16 regnum, u16 val)
+static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
{
- return __mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum),
- val);
+ return mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
}
-static inline int mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
+static inline int mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
+
+static inline int mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
+}
+
+static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
+{
+ return mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
+}
+
+static inline int mdiodev_c45_modify(struct mdio_device *mdiodev, int devad,
+ u32 regnum, u16 mask, u16 set)
+{
+ return mdiobus_c45_modify(mdiodev->bus, mdiodev->addr, devad, regnum,
+ mask, set);
+}
+
+static inline int mdiodev_c45_modify_changed(struct mdio_device *mdiodev,
+ int devad, u32 regnum, u16 mask,
+ u16 set)
+{
+ return mdiobus_c45_modify_changed(mdiodev->bus, mdiodev->addr, devad,
+ regnum, mask, set);
+}
+
+static inline int mdiodev_c45_read(struct mdio_device *mdiodev, int devad,
u16 regnum)
{
- return mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
+ return mdiobus_c45_read(mdiodev->bus, mdiodev->addr, devad, regnum);
+}
+
+static inline int mdiodev_c45_write(struct mdio_device *mdiodev, u32 devad,
+ u16 regnum, u16 val)
+{
+ return mdiobus_c45_write(mdiodev->bus, mdiodev->addr, devad, regnum,
+ val);
}
int mdiobus_register_device(struct mdio_device *mdiodev);
@@ -365,6 +713,7 @@ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
/**
* mdio_module_driver() - Helper macro for registering mdio drivers
+ * @_mdio_driver: driver to register
*
* Helper macro for MDIO drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/mdio/mdio-i2c.h b/include/linux/mdio/mdio-i2c.h
new file mode 100644
index 000000000000..65b550a6fc32
--- /dev/null
+++ b/include/linux/mdio/mdio-i2c.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MDIO I2C bridge
+ *
+ * Copyright (C) 2015 Russell King
+ */
+#ifndef MDIO_I2C_H
+#define MDIO_I2C_H
+
+struct device;
+struct i2c_adapter;
+struct mii_bus;
+
+enum mdio_i2c_proto {
+ MDIO_I2C_NONE,
+ MDIO_I2C_MARVELL_C22,
+ MDIO_I2C_C45,
+ MDIO_I2C_ROLLBALL,
+};
+
+struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol);
+
+#endif
diff --git a/include/linux/mdio/mdio-mscc-miim.h b/include/linux/mdio/mdio-mscc-miim.h
new file mode 100644
index 000000000000..1ce699740af6
--- /dev/null
+++ b/include/linux/mdio/mdio-mscc-miim.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Driver for the MDIO interface of Microsemi network switches.
+ *
+ * Author: Colin Foster <colin.foster@in-advantage.com>
+ * Copyright (C) 2021 Innovative Advantage
+ */
+#ifndef MDIO_MSCC_MIIM_H
+#define MDIO_MSCC_MIIM_H
+
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+int mscc_miim_setup(struct device *device, struct mii_bus **bus,
+ const char *name, struct regmap *mii_regmap,
+ int status_offset, bool ignore_read_errors);
+
+#endif
diff --git a/include/linux/mdio/mdio-regmap.h b/include/linux/mdio/mdio-regmap.h
new file mode 100644
index 000000000000..679d9069846b
--- /dev/null
+++ b/include/linux/mdio/mdio-regmap.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Driver for MMIO-Mapped MDIO devices. Some IPs expose internal PHYs or PCS
+ * within the MMIO-mapped area
+ *
+ * Copyright (C) 2023 Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+#ifndef MDIO_REGMAP_H
+#define MDIO_REGMAP_H
+
+#include <linux/phy.h>
+
+struct device;
+struct regmap;
+
+struct mdio_regmap_config {
+ struct device *parent;
+ struct regmap *regmap;
+ char name[MII_BUS_ID_SIZE];
+ u8 valid_addr;
+ bool autoscan;
+};
+
+struct mii_bus *devm_mdio_regmap_register(struct device *dev,
+ const struct mdio_regmap_config *config);
+
+#endif
diff --git a/include/linux/mdio/mdio-xgene.h b/include/linux/mdio/mdio-xgene.h
new file mode 100644
index 000000000000..9e588965dc83
--- /dev/null
+++ b/include/linux/mdio/mdio-xgene.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Applied Micro X-Gene SoC MDIO Driver
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ */
+
+#ifndef __MDIO_XGENE_H__
+#define __MDIO_XGENE_H__
+
+#include <linux/bits.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
+#define BLOCK_DIAG_CSR_OFFSET 0xd000
+#define XGENET_CONFIG_REG_ADDR 0x20
+
+#define MAC_ADDR_REG_OFFSET 0x00
+#define MAC_COMMAND_REG_OFFSET 0x04
+#define MAC_WRITE_REG_OFFSET 0x08
+#define MAC_READ_REG_OFFSET 0x0c
+#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+
+#define CLKEN_OFFSET 0x08
+#define SRST_OFFSET 0x00
+
+#define MENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70
+#define MENET_BLOCK_MEM_RDY_ADDR 0x74
+
+#define MAC_CONFIG_1_ADDR 0x00
+#define MII_MGMT_COMMAND_ADDR 0x24
+#define MII_MGMT_ADDRESS_ADDR 0x28
+#define MII_MGMT_CONTROL_ADDR 0x2c
+#define MII_MGMT_STATUS_ADDR 0x30
+#define MII_MGMT_INDICATORS_ADDR 0x34
+#define SOFT_RESET BIT(31)
+
+#define MII_MGMT_CONFIG_ADDR 0x20
+#define MII_MGMT_COMMAND_ADDR 0x24
+#define MII_MGMT_ADDRESS_ADDR 0x28
+#define MII_MGMT_CONTROL_ADDR 0x2c
+#define MII_MGMT_STATUS_ADDR 0x30
+#define MII_MGMT_INDICATORS_ADDR 0x34
+
+#define MIIM_COMMAND_ADDR 0x20
+#define MIIM_FIELD_ADDR 0x24
+#define MIIM_CONFIGURATION_ADDR 0x28
+#define MIIM_LINKFAILVECTOR_ADDR 0x2c
+#define MIIM_INDICATOR_ADDR 0x30
+#define MIIMRD_FIELD_ADDR 0x34
+
+#define MDIO_CSR_OFFSET 0x5000
+
+#define REG_ADDR_POS 0
+#define REG_ADDR_LEN 5
+#define PHY_ADDR_POS 8
+#define PHY_ADDR_LEN 5
+
+#define HSTMIIMWRDAT_POS 0
+#define HSTMIIMWRDAT_LEN 16
+#define HSTPHYADX_POS 23
+#define HSTPHYADX_LEN 5
+#define HSTREGADX_POS 18
+#define HSTREGADX_LEN 5
+#define HSTLDCMD BIT(3)
+#define HSTMIIMCMD_POS 0
+#define HSTMIIMCMD_LEN 3
+
+#define BUSY_MASK BIT(0)
+#define READ_CYCLE_MASK BIT(0)
+
+enum xgene_enet_cmd {
+ XGENE_ENET_WR_CMD = BIT(31),
+ XGENE_ENET_RD_CMD = BIT(30)
+};
+
+enum {
+ MIIM_CMD_IDLE,
+ MIIM_CMD_LEGACY_WRITE,
+ MIIM_CMD_LEGACY_READ,
+};
+
+enum xgene_mdio_id {
+ XGENE_MDIO_RGMII = 1,
+ XGENE_MDIO_XFI
+};
+
+struct xgene_mdio_pdata {
+ struct clk *clk;
+ struct device *dev;
+ void __iomem *mac_csr_addr;
+ void __iomem *diag_csr_addr;
+ void __iomem *mdio_csr_addr;
+ struct mii_bus *mdio_bus;
+ int mdio_id;
+ spinlock_t mac_lock; /* mac lock */
+};
+
+/* Set the specified value into a bit-field defined by its starting position
+ * and length within a single u64.
+ */
+static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
+{
+ return (val & ((1ULL << len) - 1)) << pos;
+}
+
+#define SET_VAL(field, val) \
+ xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
+
+#define SET_BIT(field) \
+ xgene_enet_set_field_value(field ## _POS, 1, 1)
+
+/* Get the value from a bit-field defined by its starting position
+ * and length within the specified u64.
+ */
+static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
+{
+ return (src >> pos) & ((1ULL << len) - 1);
+}
+
+#define GET_VAL(field, src) \
+ xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
+
+#define GET_BIT(field, src) \
+ xgene_enet_get_field_value(field ## _POS, 1, src)
+
+u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr);
+void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data);
+int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
+int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
+struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
+
+#endif /* __MDIO_XGENE_H__ */
diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h
new file mode 100644
index 000000000000..506912ad363b
--- /dev/null
+++ b/include/linux/mei_aux.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_MEI_AUX_H
+#define _LINUX_MEI_AUX_H
+
+#include <linux/auxiliary_bus.h>
+
+/**
+ * struct mei_aux_device - mei auxiliary device
+ * @aux_dev: - auxiliary device object
+ * @irq: interrupt driving the mei auxiliary device
+ * @bar: mmio resource bar reserved to mei auxiliary device
+ * @ext_op_mem: resource for extend operational memory
+ * used in graphics PXP mode.
+ * @slow_firmware: The device has slow underlying firmware.
+ * Such firmware will require to use larger operation timeouts.
+ */
+struct mei_aux_device {
+ struct auxiliary_device aux_dev;
+ int irq;
+ struct resource bar;
+ struct resource ext_op_mem;
+ bool slow_firmware;
+};
+
+#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct mei_aux_device, aux_dev)
+
+#endif /* _LINUX_MEI_AUX_H */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 52aa4821093a..b38a56a13f39 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -11,6 +11,7 @@
struct mei_cl_device;
struct mei_device;
+struct scatterlist;
typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
@@ -30,11 +31,11 @@ typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
* @rx_work: async work to execute Rx event callback
* @rx_cb: Drivers register this callback to get asynchronous ME
* Rx buffer pending notifications.
- * @notif_work: async work to execute FW notif event callback
+ * @notif_work: async work to execute FW notify event callback
* @notif_cb: Drivers register this callback to get asynchronous ME
* FW notification pending notifications.
*
- * @do_match: wheather device can be matched with a driver
+ * @do_match: whether the device can be matched with a driver
* @is_added: device is already scanned
* @priv_data: client private data
*/
@@ -68,7 +69,7 @@ struct mei_cl_driver {
int (*probe)(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id);
- int (*remove)(struct mei_cl_device *cldev);
+ void (*remove)(struct mei_cl_device *cldev);
};
int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
@@ -91,10 +92,25 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
mei_cldev_driver_register,\
mei_cldev_driver_unregister)
-ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length);
+ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, unsigned long timeout);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
size_t length);
+ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ unsigned long timeout);
+ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag);
+ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag, unsigned long timeout);
+ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag);
+ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
+ size_t length, u8 *vtag);
+ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag, unsigned long timeout);
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
@@ -108,6 +124,14 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data);
int mei_cldev_enable(struct mei_cl_device *cldev);
int mei_cldev_disable(struct mei_cl_device *cldev);
-bool mei_cldev_enabled(struct mei_cl_device *cldev);
+bool mei_cldev_enabled(const struct mei_cl_device *cldev);
+ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
+ u8 client_id, u32 fence_id,
+ struct scatterlist *sg_in,
+ size_t total_in_len,
+ struct scatterlist *sg_out);
+
+void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size);
+int mei_cldev_dma_unmap(struct mei_cl_device *cldev);
#endif /* _LINUX_MEI_CL_BUS_H */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 5c4a18a91f89..ae4526389261 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -16,10 +16,6 @@
#include <asm/mem_encrypt.h>
-#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
-
-static inline bool mem_encrypt_active(void) { return false; }
-
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
#ifdef CONFIG_AMD_MEM_ENCRYPT
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 9d925db0d355..e2082240586d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
-#ifdef __KERNEL__
/*
* Logical memory blocks.
@@ -28,15 +27,29 @@ extern unsigned long long max_possible_pfn;
/**
* enum memblock_flags - definition of memory region attributes
* @MEMBLOCK_NONE: no special request
- * @MEMBLOCK_HOTPLUG: hotpluggable region
+ * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
+ * map during early boot as hot(un)pluggable system RAM (e.g., memory range
+ * that might get hotunplugged later). With "movable_node" set on the kernel
+ * commandline, try keeping this memory region hotunpluggable. Does not apply
+ * to memblocks added ("hotplugged") after early boot.
* @MEMBLOCK_MIRROR: mirrored region
- * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
+ * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
+ * reserved in the memory map; refer to memblock_mark_nomap() description
+ * for further details
+ * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
+ * via a driver, and never indicated in the firmware-provided memory map as
+ * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
+ * kernel resource tree.
+ * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are
+ * not initialized (only for reserved regions).
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
+ MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
+ MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */
};
/**
@@ -50,7 +63,7 @@ struct memblock_region {
phys_addr_t base;
phys_addr_t size;
enum memblock_flags flags;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int nid;
#endif
};
@@ -86,7 +99,6 @@ struct memblock {
};
extern struct memblock memblock;
-extern int memblock_debug;
#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
#define __init_memblock __meminit
@@ -98,31 +110,31 @@ void memblock_discard(void);
static inline void memblock_discard(void) {}
#endif
-#define memblock_dbg(fmt, ...) \
- if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-
-phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
- phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void);
-int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
-int memblock_free(phys_addr_t base, phys_addr_t size);
+int memblock_phys_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
void memblock_trim_memory(phys_addr_t align);
+unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+ phys_addr_t base2, phys_addr_t size2);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
+bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
+int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
-unsigned long memblock_free_all(void);
-void reset_node_managed_pages(pg_data_t *pgdat);
+void memblock_free_all(void);
+void memblock_free(void *ptr, size_t size);
void reset_all_zones_managed_pages(void);
/* Low level functions */
@@ -136,10 +148,7 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
- phys_addr_t *out_end);
-
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+void memblock_free_late(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
@@ -166,7 +175,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
/**
- * for_each_mem_range - iterate through memblock areas from type_a and not
+ * __for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate
@@ -177,7 +186,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range(i, type_a, type_b, nid, flags, \
+#define __for_each_mem_range(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
@@ -186,7 +195,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
p_start, p_end, p_nid))
/**
- * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * __for_each_mem_range_rev - reverse iterate through memblock areas from
* type_a and not included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate
@@ -197,17 +206,40 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
- p_start, p_end, p_nid) \
+#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
+ p_start, p_end, p_nid) \
for (i = (u64)ULLONG_MAX, \
- __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
__next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
/**
- * for_each_reserved_mem_region - iterate over all reserved memblock areas
+ * for_each_mem_range - iterate through memory areas.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range(i, p_start, p_end) \
+ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
+ p_start, p_end, NULL)
+
+/**
+ * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * type_a and not included in type_b. Or just type_a if type_b is NULL.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range_rev(i, p_start, p_end) \
+ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
+ p_start, p_end, NULL)
+
+/**
+ * for_each_reserved_mem_range - iterate over all reserved memblock areas
* @i: u64 used as loop variable
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
@@ -215,10 +247,9 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
* Walks over reserved areas of memblock. Available as soon as memblock
* is initialized.
*/
-#define for_each_reserved_mem_region(i, p_start, p_end) \
- for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
- i != (u64)ULLONG_MAX; \
- __next_reserved_mem_region(&i, p_start, p_end))
+#define for_each_reserved_mem_range(i, p_start, p_end) \
+ __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_NONE, p_start, p_end, NULL)
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
@@ -235,6 +266,16 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
+static inline bool memblock_is_reserved_noinit(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_RSRV_NOINIT;
+}
+
+static inline bool memblock_is_driver_managed(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_DRIVER_MANAGED;
+}
+
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
@@ -259,7 +300,7 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
unsigned long *out_spfn,
unsigned long *out_epfn);
/**
- * for_each_free_mem_range_in_zone - iterate through zone specific free
+ * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
* memblock areas
* @i: u64 used as loop variable
* @zone: zone in which all of the memory blocks reside
@@ -279,7 +320,7 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
__next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
/**
- * for_each_free_mem_range_in_zone_from - iterate through zone specific
+ * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
* free memblock areas from a given point
* @i: u64 used as loop variable
* @zone: zone in which all of the memory blocks reside
@@ -311,8 +352,8 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
* soon as memblock is initialized.
*/
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
- for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
- nid, flags, p_start, p_end, p_nid)
+ __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
+ nid, flags, p_start, p_end, p_nid)
/**
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -328,13 +369,13 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
*/
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
p_nid) \
- for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
- nid, flags, p_start, p_end, p_nid)
+ __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
+ nid, flags, p_start, p_end, p_nid)
int memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
r->nid = nid;
@@ -353,12 +394,12 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
{
return 0;
}
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-#define MEMBLOCK_ALLOC_KASAN 1
+#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0
@@ -374,8 +415,8 @@ phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t end, int nid, bool exact_nid);
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
-static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
- phys_addr_t align)
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+ phys_addr_t align)
{
return memblock_phys_alloc_range(size, align, 0,
MEMBLOCK_ALLOC_ACCESSIBLE);
@@ -391,13 +432,13 @@ void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
-static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_raw(phys_addr_t size,
+static inline void *memblock_alloc_raw(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
@@ -405,7 +446,7 @@ static inline void * __init memblock_alloc_raw(phys_addr_t size,
NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_from(phys_addr_t size,
+static inline void *memblock_alloc_from(phys_addr_t size,
phys_addr_t align,
phys_addr_t min_addr)
{
@@ -413,41 +454,24 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_low(phys_addr_t size,
+static inline void *memblock_alloc_low(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_node(phys_addr_t size,
+static inline void *memblock_alloc_node(phys_addr_t size,
phys_addr_t align, int nid)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
-static inline void __init memblock_free_early(phys_addr_t base,
- phys_addr_t size)
-{
- memblock_free(base, size);
-}
-
-static inline void __init memblock_free_early_nid(phys_addr_t base,
- phys_addr_t size, int nid)
-{
- memblock_free(base, size);
-}
-
-static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
/*
* Set the allocation direction to bottom-up or top-down.
*/
-static inline void __init memblock_set_bottom_up(bool enable)
+static inline __init_memblock void memblock_set_bottom_up(bool enable)
{
memblock.bottom_up = enable;
}
@@ -457,14 +481,13 @@ static inline void __init memblock_set_bottom_up(bool enable)
* if this is true, that said, memblock will allocate memory
* in bottom-up direction.
*/
-static inline bool memblock_bottom_up(void)
+static inline __init_memblock bool memblock_bottom_up(void)
{
return memblock.bottom_up;
}
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
-phys_addr_t memblock_mem_size(unsigned long limit_pfn);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
@@ -476,13 +499,7 @@ bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
bool memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
-extern void __memblock_dump_all(void);
-
-static inline void memblock_dump_all(void)
-{
- if (memblock_debug)
- __memblock_dump_all();
-}
+void memblock_dump_all(void);
/**
* memblock_set_current_limit - Set the current allocation limit to allow
@@ -547,15 +564,23 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
return PFN_UP(reg->base + reg->size);
}
-#define for_each_memblock(memblock_type, region) \
- for (region = memblock.memblock_type.regions; \
- region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
+/**
+ * for_each_mem_region - itereate over memory regions
+ * @region: loop variable
+ */
+#define for_each_mem_region(region) \
+ for (region = memblock.memory.regions; \
+ region < (memblock.memory.regions + memblock.memory.cnt); \
region++)
-#define for_each_memblock_type(i, memblock_type, rgn) \
- for (i = 0, rgn = &memblock_type->regions[0]; \
- i < memblock_type->cnt; \
- i++, rgn = &memblock_type->regions[i])
+/**
+ * for_each_reserved_mem_region - itereate over reserved memory regions
+ * @region: loop variable
+ */
+#define for_each_reserved_mem_region(region) \
+ for (region = memblock.reserved.regions; \
+ region < (memblock.reserved.regions + memblock.reserved.cnt); \
+ region++)
extern void *alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
@@ -568,9 +593,7 @@ extern void *alloc_large_system_hash(const char *tablename,
unsigned long high_limit);
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
- * shift passed via *_hash_shift */
-#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
+#define HASH_ZERO 0x00000002 /* Zero allocated hash table */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
@@ -583,13 +606,12 @@ extern int hashdist; /* Distribute hashes across NUMA nodes? */
#endif
#ifdef CONFIG_MEMTEST
-extern void early_memtest(phys_addr_t start, phys_addr_t end);
+void early_memtest(phys_addr_t start, phys_addr_t end);
+void memtest_report_meminfo(struct seq_file *m);
#else
-static inline void early_memtest(phys_addr_t start, phys_addr_t end)
-{
-}
+static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
+static inline void memtest_report_meminfo(struct seq_file *m) { }
#endif
-#endif /* __KERNEL__ */
#endif /* _LINUX_MEMBLOCK_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d0b036123c6a..394fd0a887ae 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -14,6 +14,7 @@
#include <linux/vm_event_item.h>
#include <linux/hardirq.h>
#include <linux/jump_label.h>
+#include <linux/kernel.h>
#include <linux/page_counter.h>
#include <linux/vmpressure.h>
#include <linux/eventfd.h>
@@ -21,6 +22,7 @@
#include <linux/vmstat.h>
#include <linux/writeback.h>
#include <linux/page-flags.h>
+#include <linux/shrinker.h>
struct mem_cgroup;
struct obj_cgroup;
@@ -33,6 +35,10 @@ enum memcg_stat_item {
MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
MEMCG_PERCPU_B,
+ MEMCG_VMALLOC,
+ MEMCG_KMEM,
+ MEMCG_ZSWAP_B,
+ MEMCG_ZSWAPPED,
MEMCG_NR_STAT,
};
@@ -42,6 +48,7 @@ enum memcg_memory_event {
MEMCG_MAX,
MEMCG_OOM,
MEMCG_OOM_KILL,
+ MEMCG_OOM_GROUP_KILL,
MEMCG_SWAP_HIGH,
MEMCG_SWAP_MAX,
MEMCG_SWAP_FAIL,
@@ -56,7 +63,6 @@ struct mem_cgroup_reclaim_cookie {
#ifdef CONFIG_MEMCG
#define MEM_CGROUP_ID_SHIFT 16
-#define MEM_CGROUP_ID_MAX USHRT_MAX
struct mem_cgroup_id {
int id;
@@ -75,12 +81,8 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-struct memcg_vmstats_percpu {
- long stat[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
-};
+struct memcg_vmstats_percpu;
+struct memcg_vmstats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
@@ -88,17 +90,23 @@ struct mem_cgroup_reclaim_iter {
unsigned int generation;
};
-struct lruvec_stat {
- long count[NR_VM_NODE_STAT_ITEMS];
+struct lruvec_stats_percpu {
+ /* Local (CPU and cgroup) state */
+ long state[NR_VM_NODE_STAT_ITEMS];
+
+ /* Delta calculation for lockless upward propagation */
+ long state_prev[NR_VM_NODE_STAT_ITEMS];
};
-/*
- * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
- * which have elements charged to this memcg.
- */
-struct memcg_shrinker_map {
- struct rcu_head rcu;
- unsigned long map[];
+struct lruvec_stats {
+ /* Aggregated (CPU and subtree) state */
+ long state[NR_VM_NODE_STAT_ITEMS];
+
+ /* Non-hierarchical (CPU aggregated) state */
+ long state_local[NR_VM_NODE_STAT_ITEMS];
+
+ /* Pending child counts during tree propagation */
+ long state_pending[NR_VM_NODE_STAT_ITEMS];
};
/*
@@ -107,18 +115,14 @@ struct memcg_shrinker_map {
struct mem_cgroup_per_node {
struct lruvec lruvec;
- /* Legacy local VM stats */
- struct lruvec_stat __percpu *lruvec_stat_local;
-
- /* Subtree VM stats (batched updates) */
- struct lruvec_stat __percpu *lruvec_stat_cpu;
- atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
+ struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
+ struct lruvec_stats lruvec_stats;
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter;
- struct memcg_shrinker_map __rcu *shrinker_map;
+ struct shrinker_info __rcu *shrinker_info;
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
@@ -140,7 +144,7 @@ struct mem_cgroup_threshold_ary {
/* Size of entries[] */
unsigned int size;
/* Array of thresholds */
- struct mem_cgroup_threshold entries[];
+ struct mem_cgroup_threshold entries[] __counted_by(size);
};
struct mem_cgroup_thresholds {
@@ -154,21 +158,6 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
-enum memcg_kmem_state {
- KMEM_NONE,
- KMEM_ALLOCATED,
- KMEM_ONLINE,
-};
-
-#if defined(CONFIG_SMP)
-struct memcg_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define MEMCG_PADDING(name) struct memcg_padding name;
-#else
-#define MEMCG_PADDING(name)
-#endif
-
/*
* Remember four most recent foreign writebacks with dirty pages in this
* cgroup. Inode sharing is expected to be uncommon and, even if we miss
@@ -197,7 +186,7 @@ struct obj_cgroup {
struct mem_cgroup *memcg;
atomic_t nr_charged_bytes;
union {
- struct list_head list;
+ struct list_head list; /* protected by objcg_lock */
struct rcu_head rcu;
};
};
@@ -215,28 +204,36 @@ struct mem_cgroup {
struct mem_cgroup_id id;
/* Accounted resources */
- struct page_counter memory;
- struct page_counter swap;
+ struct page_counter memory; /* Both v1 & v2 */
+
+ union {
+ struct page_counter swap; /* v2 only */
+ struct page_counter memsw; /* v1 only */
+ };
/* Legacy consumer-oriented counters */
- struct page_counter memsw;
- struct page_counter kmem;
- struct page_counter tcpmem;
+ struct page_counter kmem; /* v1 only */
+ struct page_counter tcpmem; /* v1 only */
/* Range enforcement for interrupt charges */
struct work_struct high_work;
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+ unsigned long zswap_max;
+
+ /*
+ * Prevent pages from this memcg from being written back from zswap to
+ * swap, and from being swapped out on zswap store failures.
+ */
+ bool zswap_writeback;
+#endif
+
unsigned long soft_limit;
/* vmpressure notifications */
struct vmpressure vmpressure;
/*
- * Should the accounting and control be hierarchical, per subtree?
- */
- bool use_hierarchy;
-
- /*
* Should the OOM killer kill all belonging tasks, had it kill one?
*/
bool oom_group;
@@ -277,29 +274,20 @@ struct mem_cgroup {
spinlock_t move_lock;
unsigned long move_lock_flags;
- MEMCG_PADDING(_pad1_);
-
- /*
- * set > 0 if pages under this cgroup are moving to other cgroup.
- */
- atomic_t moving_account;
- struct task_struct *move_lock_task;
-
- /* Legacy local VM stats and events */
- struct memcg_vmstats_percpu __percpu *vmstats_local;
-
- /* Subtree VM stats and events (batched updates) */
- struct memcg_vmstats_percpu __percpu *vmstats_percpu;
+ CACHELINE_PADDING(_pad1_);
- MEMCG_PADDING(_pad2_);
-
- atomic_long_t vmstats[MEMCG_NR_STAT];
- atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
+ /* memory.stat */
+ struct memcg_vmstats *vmstats;
/* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
+ /*
+ * Hint of reclaim pressure for socket memroy management. Note
+ * that this indicator should NOT be used in legacy cgroup mode
+ * where socket memory is accounted/charged separately.
+ */
unsigned long socket_pressure;
/* Legacy tcp memory accounting */
@@ -307,13 +295,28 @@ struct mem_cgroup {
int tcpmem_pressure;
#ifdef CONFIG_MEMCG_KMEM
- /* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
- enum memcg_kmem_state kmem_state;
- struct obj_cgroup __rcu *objcg;
- struct list_head objcg_list; /* list of inherited objcgs */
+ /*
+ * memcg->objcg is wiped out as a part of the objcg repaprenting
+ * process. memcg->orig_objcg preserves a pointer (and a reference)
+ * to the original objcg until the end of live of memcg.
+ */
+ struct obj_cgroup __rcu *objcg;
+ struct obj_cgroup *orig_objcg;
+ /* list of inherited objcgs, protected by objcg_lock */
+ struct list_head objcg_list;
#endif
+ CACHELINE_PADDING(_pad2_);
+
+ /*
+ * set > 0 if pages under this cgroup are moving to other cgroup.
+ */
+ atomic_t moving_account;
+ struct task_struct *move_lock_task;
+
+ struct memcg_vmstats_percpu __percpu *vmstats_percpu;
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
@@ -328,23 +331,243 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
- struct mem_cgroup_per_node *nodeinfo[0];
- /* WARNING: nodeinfo must be the last member here */
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+ /* per-memcg mm_struct list */
+ struct lru_gen_mm_list mm_list;
+#endif
+
+ struct mem_cgroup_per_node *nodeinfo[];
};
/*
- * size of first charge trial. "32" comes from vmscan.c's magic value.
- * TODO: maybe necessary to use big numbers in big irons.
+ * size of first charge trial.
+ * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
+ * workload.
*/
-#define MEMCG_CHARGE_BATCH 32U
+#define MEMCG_CHARGE_BATCH 64U
extern struct mem_cgroup *root_mem_cgroup;
-static __always_inline bool memcg_stat_item_in_bytes(int idx)
+enum page_memcg_data_flags {
+ /* page->memcg_data is a pointer to an objcgs vector */
+ MEMCG_DATA_OBJCGS = (1UL << 0),
+ /* page has been accounted as a non-slab kernel page */
+ MEMCG_DATA_KMEM = (1UL << 1),
+ /* the next bit after the last actual flag */
+ __NR_MEMCG_DATA_FLAGS = (1UL << 2),
+};
+
+#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
+
+static inline bool folio_memcg_kmem(struct folio *folio);
+
+/*
+ * After the initialization objcg->memcg is always pointing at
+ * a valid memcg, but can be atomically swapped to the parent memcg.
+ *
+ * The caller must ensure that the returned memcg won't be released:
+ * e.g. acquire the rcu_read_lock or css_set_lock.
+ */
+static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
{
- if (idx == MEMCG_PERCPU_B)
- return true;
- return vmstat_item_in_bytes(idx);
+ return READ_ONCE(objcg->memcg);
+}
+
+/*
+ * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * kmem folios.
+ */
+static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
+{
+ unsigned long memcg_data = folio->memcg_data;
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * __folio_objcg - get the object cgroup associated with a kmem folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the object cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper object cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * LRU folios.
+ */
+static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
+{
+ unsigned long memcg_data = folio->memcg_data;
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
+ VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
+
+ return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * folio_memcg - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios.
+ *
+ * For a non-kmem folio any of the following ensures folio and memcg binding
+ * stability:
+ *
+ * - the folio lock
+ * - LRU isolation
+ * - folio_memcg_lock()
+ * - exclusive reference
+ * - mem_cgroup_trylock_pages()
+ *
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
+ */
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ if (folio_memcg_kmem(folio))
+ return obj_cgroup_memcg(__folio_objcg(folio));
+ return __folio_memcg(folio);
+}
+
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return folio_memcg(page_folio(page));
+}
+
+/**
+ * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios.
+ *
+ * Return: A pointer to the memory cgroup associated with the folio,
+ * or NULL.
+ */
+static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
+{
+ unsigned long memcg_data = READ_ONCE(folio->memcg_data);
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (memcg_data & MEMCG_DATA_KMEM) {
+ struct obj_cgroup *objcg;
+
+ objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return obj_cgroup_memcg(objcg);
+ }
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * folio_memcg_check - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function unlike folio_memcg() can take any folio
+ * as an argument. It has to be used in cases when it's not known if a folio
+ * has an associated memory cgroup pointer or an object cgroups vector or
+ * an object cgroup.
+ *
+ * For a non-kmem folio any of the following ensures folio and memcg binding
+ * stability:
+ *
+ * - the folio lock
+ * - LRU isolation
+ * - lock_folio_memcg()
+ * - exclusive reference
+ * - mem_cgroup_trylock_pages()
+ *
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
+ */
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
+{
+ /*
+ * Because folio->memcg_data might be changed asynchronously
+ * for slabs, READ_ONCE() should be used here.
+ */
+ unsigned long memcg_data = READ_ONCE(folio->memcg_data);
+
+ if (memcg_data & MEMCG_DATA_OBJCGS)
+ return NULL;
+
+ if (memcg_data & MEMCG_DATA_KMEM) {
+ struct obj_cgroup *objcg;
+
+ objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return obj_cgroup_memcg(objcg);
+ }
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ if (PageTail(page))
+ return NULL;
+ return folio_memcg_check((struct folio *)page);
+}
+
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ struct mem_cgroup *memcg;
+
+ rcu_read_lock();
+retry:
+ memcg = obj_cgroup_memcg(objcg);
+ if (unlikely(!css_tryget(&memcg->css)))
+ goto retry;
+ rcu_read_unlock();
+
+ return memcg;
+}
+
+#ifdef CONFIG_MEMCG_KMEM
+/*
+ * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
+ * @folio: Pointer to the folio.
+ *
+ * Checks if the folio has MemcgKmem flag set. The caller must ensure
+ * that the folio has an associated memory cgroup. It's not safe to call
+ * this function against some types of folios, e.g. slab folios.
+ */
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
+ VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
+ return folio->memcg_data & MEMCG_DATA_KMEM;
+}
+
+
+#else
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ return false;
+}
+
+#endif
+
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return folio_memcg_kmem(page_folio(page));
}
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
@@ -357,17 +580,20 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
- struct mem_cgroup *memcg,
- bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
+ *min = *low = 0;
+
if (mem_cgroup_disabled())
- return 0;
+ return;
/*
* There is no reclaim protection applied to a targeted reclaim.
* We are special casing this specific case here because
- * mem_cgroup_protected calculation is not robust enough to keep
+ * mem_cgroup_calculate_protection is not robust enough to keep
* the protection invariant for calculated effective values for
* parallel reclaimers with different reclaim target. This is
* especially a problem for tail memcgs (as they have pages on LRU)
@@ -398,65 +624,114 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
*
*/
if (root == memcg)
- return 0;
-
- if (in_low_reclaim)
- return READ_ONCE(memcg->memory.emin);
+ return;
- return max(READ_ONCE(memcg->memory.emin),
- READ_ONCE(memcg->memory.elow));
+ *min = READ_ONCE(memcg->memory.emin);
+ *low = READ_ONCE(memcg->memory.elow);
}
void mem_cgroup_calculate_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg);
-static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
/*
* The root memcg doesn't account charges, and doesn't support
- * protection.
+ * protection. The target memcg's protection is ignored, see
+ * mem_cgroup_calculate_protection() and mem_cgroup_protection()
*/
- return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
-
+ return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
+ memcg == target;
}
-static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
- if (!mem_cgroup_supports_protection(memcg))
+ if (mem_cgroup_unprotected(target, memcg))
return false;
return READ_ONCE(memcg->memory.elow) >=
page_counter_read(&memcg->memory);
}
-static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
- if (!mem_cgroup_supports_protection(memcg))
+ if (mem_cgroup_unprotected(target, memcg))
return false;
return READ_ONCE(memcg->memory.emin) >=
page_counter_read(&memcg->memory);
}
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
+void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
+
+int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
+
+/**
+ * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
+ * @folio: Folio to charge.
+ * @mm: mm context of the allocating task.
+ * @gfp: Reclaim mode.
+ *
+ * Try to charge @folio to the memcg that @mm belongs to, reclaiming
+ * pages according to @gfp if necessary. If @mm is NULL, try to
+ * charge to the active memcg.
+ *
+ * Do not use this for folios allocated for swapin.
+ *
+ * Return: 0 on success. Otherwise, an error code is returned.
+ */
+static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp)
+{
+ if (mem_cgroup_disabled())
+ return 0;
+ return __mem_cgroup_charge(folio, mm, gfp);
+}
+
+int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
+ long nr_pages);
+
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp, swp_entry_t entry);
+void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
-void mem_cgroup_uncharge(struct page *page);
-void mem_cgroup_uncharge_list(struct list_head *page_list);
+void __mem_cgroup_uncharge(struct folio *folio);
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
+/**
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
+ *
+ * Uncharge a folio previously charged with mem_cgroup_charge().
+ */
+static inline void mem_cgroup_uncharge(struct folio *folio)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge(folio);
+}
-static struct mem_cgroup_per_node *
-mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
+void __mem_cgroup_uncharge_folios(struct folio_batch *folios);
+static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
{
- return memcg->nodeinfo[nid];
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge_folios(folios);
}
+void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
+void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
+void mem_cgroup_migrate(struct folio *old, struct folio *new);
+
/**
* mem_cgroup_lruvec - get the lru list vector for a memcg & node
* @memcg: memcg of the wanted lruvec
+ * @pgdat: pglist_data
*
* Returns the lru list vector holding pages for a given @memcg &
- * @node combination. This can be the node lruvec, if the memory
+ * @pgdat combination. This can be the node lruvec, if the memory
* controller is disabled.
*/
static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
@@ -473,7 +748,7 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
if (!memcg)
memcg = root_mem_cgroup;
- mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
+ mz = memcg->nodeinfo[pgdat->node_id];
lruvec = &mz->lruvec;
out:
/*
@@ -486,13 +761,39 @@ out:
return lruvec;
}
-struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
+/**
+ * folio_lruvec - return lruvec for isolating/putting an LRU folio
+ * @folio: Pointer to the folio.
+ *
+ * This function relies on folio->mem_cgroup being stable.
+ */
+static inline struct lruvec *folio_lruvec(struct folio *folio)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
+ return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
+}
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
-struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
+struct mem_cgroup *get_mem_cgroup_from_current(void);
+
+struct lruvec *folio_lruvec_lock(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
+ unsigned long *flags);
+
+#ifdef CONFIG_DEBUG_VM
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
+#else
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
+{
+}
+#endif
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
@@ -509,21 +810,25 @@ static inline void obj_cgroup_get(struct obj_cgroup *objcg)
percpu_ref_get(&objcg->refcnt);
}
+static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
+ unsigned long nr)
+{
+ percpu_ref_get_many(&objcg->refcnt, nr);
+}
+
static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{
percpu_ref_put(&objcg->refcnt);
}
-/*
- * After the initialization objcg->memcg is always pointing at
- * a valid memcg, but can be atomically swapped to the parent memcg.
- *
- * The caller must ensure that the returned memcg won't be released:
- * e.g. acquire the rcu_read_lock or css_set_lock.
- */
-static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
{
- return READ_ONCE(objcg->memcg);
+ return !memcg || css_tryget(&memcg->css);
+}
+
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget_online(&memcg->css);
}
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
@@ -539,8 +844,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
-int mem_cgroup_scan_tasks(struct mem_cgroup *,
- int (*)(struct task_struct *, void *), void *);
+void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ int (*)(struct task_struct *, void *), void *arg);
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
@@ -551,6 +856,15 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
+}
+
+struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
+#endif
+
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return mem_cgroup_from_css(seq_css(m));
@@ -571,14 +885,11 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
* parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find
*
- * Returns the parent memcg, or NULL if this is the root or the memory
- * controller is in legacy no-hierarchy mode.
+ * Returns the parent memcg, or NULL if this is the root.
*/
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
- if (!memcg->memory.parent)
- return NULL;
- return mem_cgroup_from_counter(memcg->memory.parent, memory);
+ return mem_cgroup_from_css(memcg->css.parent);
}
static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
@@ -586,8 +897,6 @@ static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
{
if (root == memcg)
return true;
- if (!root->use_hierarchy)
- return false;
return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
}
@@ -605,7 +914,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return match;
}
-struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
ino_t page_cgroup_ino(struct page *page);
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
@@ -615,11 +924,6 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
return !!(memcg->css.flags & CSS_ONLINE);
}
-/*
- * For memory reclaim.
- */
-int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zid, int nr_pages);
@@ -633,7 +937,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(void);
+void mem_cgroup_handle_over_high(gfp_t gfp_mask);
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
@@ -666,48 +970,27 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
struct mem_cgroup *oom_domain);
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
-#ifdef CONFIG_MEMCG_SWAP
-extern bool cgroup_memory_noswap;
-#endif
+void folio_memcg_lock(struct folio *folio);
+void folio_memcg_unlock(struct folio *folio);
-struct mem_cgroup *lock_page_memcg(struct page *page);
-void __unlock_page_memcg(struct mem_cgroup *memcg);
-void unlock_page_memcg(struct page *page);
+void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
-/*
- * idx can be of type enum memcg_stat_item or node_stat_item.
- * Keep in sync with memcg_exact_page_state().
- */
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
+/* try to stablize folio_memcg() for all the pages in a memcg */
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
{
- long x = atomic_long_read(&memcg->vmstats[idx]);
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- return x;
-}
+ rcu_read_lock();
-/*
- * idx can be of type enum memcg_stat_item or node_stat_item.
- * Keep in sync with memcg_exact_page_state().
- */
-static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
- int idx)
-{
- long x = 0;
- int cpu;
+ if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
+ return true;
- for_each_possible_cpu(cpu)
- x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- return x;
+ rcu_read_unlock();
+ return false;
}
-void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
+static inline void mem_cgroup_unlock_pages(void)
+{
+ rcu_read_unlock();
+}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
@@ -720,37 +1003,23 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
-/**
- * mod_memcg_page_state - update page state statistics
- * @page: the page
- * @idx: page state item to account
- * @val: number of pages (positive or negative)
- *
- * The @page must be locked or the caller must use lock_page_memcg()
- * to prevent double accounting when the page is concurrently being
- * moved to another memcg:
- *
- * lock_page(page) or lock_page_memcg(page)
- * if (TestClearPageState(page))
- * mod_memcg_page_state(page, state, -1);
- * unlock_page(page) or unlock_page_memcg(page)
- *
- * Kernel pages are an exception to this, since they'll never move.
- */
-static inline void __mod_memcg_page_state(struct page *page,
- int idx, int val)
-{
- if (page->mem_cgroup)
- __mod_memcg_state(page->mem_cgroup, idx, val);
-}
-
static inline void mod_memcg_page_state(struct page *page,
int idx, int val)
{
- if (page->mem_cgroup)
- mod_memcg_state(page->mem_cgroup, idx, val);
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = page_memcg(page);
+ if (memcg)
+ mod_memcg_state(memcg, idx, val);
+ rcu_read_unlock();
}
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
+
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
@@ -761,7 +1030,7 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- x = atomic_long_read(&pn->lruvec_stat[idx]);
+ x = READ_ONCE(pn->lruvec_stats.state[idx]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -774,14 +1043,12 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
{
struct mem_cgroup_per_node *pn;
long x = 0;
- int cpu;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for_each_possible_cpu(cpu)
- x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
+ x = READ_ONCE(pn->lruvec_stats.state_local[idx]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -789,21 +1056,20 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return x;
}
+void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
+void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
+
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
-void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
- int val);
-void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
-
-void mod_memcg_obj_state(void *p, int idx, int val);
+void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
-static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
unsigned long flags;
local_irq_save(flags);
- __mod_lruvec_slab_state(p, idx, val);
+ __mod_lruvec_kmem_state(p, idx, val);
local_irq_restore(flags);
}
@@ -817,47 +1083,6 @@ static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
local_irq_restore(flags);
}
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_lruvec_state(lruvec, idx, val);
- local_irq_restore(flags);
-}
-
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- struct page *head = compound_head(page); /* rmap on tail pages */
- pg_data_t *pgdat = page_pgdat(page);
- struct lruvec *lruvec;
-
- /* Untracked pages have no memcg, no lruvec. Update only the node */
- if (!head->mem_cgroup) {
- __mod_node_page_state(pgdat, idx, val);
- return;
- }
-
- lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
- __mod_lruvec_state(lruvec, idx, val);
-}
-
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_lruvec_page_state(page, idx, val);
- local_irq_restore(flags);
-}
-
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned);
-
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
unsigned long count);
@@ -872,11 +1097,13 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
-static inline void count_memcg_page_event(struct page *page,
- enum vm_event_item idx)
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
{
- if (page->mem_cgroup)
- count_memcg_events(page->mem_cgroup, idx, 1);
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ if (memcg)
+ count_memcg_events(memcg, idx, nr);
}
static inline void count_memcg_event_mm(struct mm_struct *mm,
@@ -897,12 +1124,19 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
+ bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
+ event == MEMCG_SWAP_FAIL;
+
atomic_long_inc(&memcg->memory_events_local[event]);
- cgroup_file_notify(&memcg->events_local_file);
+ if (!swap_event)
+ cgroup_file_notify(&memcg->events_local_file);
do {
atomic_long_inc(&memcg->memory_events[event]);
- cgroup_file_notify(&memcg->events_file);
+ if (swap_event)
+ cgroup_file_notify(&memcg->swap_events_file);
+ else
+ cgroup_file_notify(&memcg->events_file);
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
break;
@@ -927,16 +1161,56 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
rcu_read_unlock();
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-void mem_cgroup_split_huge_fixup(struct page *head);
-#endif
+void split_page_memcg(struct page *head, int old_order, int new_order);
+
+unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned);
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
-#define MEM_CGROUP_ID_MAX 0
-struct mem_cgroup;
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return NULL;
+}
+
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ return NULL;
+}
+
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return false;
+}
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
@@ -958,11 +1232,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
{
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
- struct mem_cgroup *memcg,
- bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
- return 0;
+ *min = *low = 0;
}
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -970,31 +1245,69 @@ static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
{
}
-static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ return true;
+}
+static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
return false;
}
-static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
return false;
}
-static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask)
+static inline void mem_cgroup_commit_charge(struct folio *folio,
+ struct mem_cgroup *memcg)
+{
+}
+
+static inline int mem_cgroup_charge(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
+ gfp_t gfp, long nr_pages)
+{
+ return 0;
+}
+
+static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
+{
+ return 0;
+}
+
+static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
+{
+}
+
+static inline void mem_cgroup_uncharge(struct folio *folio)
+{
+}
+
+static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
+{
+}
+
+static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
+ unsigned int nr_pages)
{
}
-static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
+static inline void mem_cgroup_replace_folio(struct folio *old,
+ struct folio *new)
{
}
-static inline void mem_cgroup_migrate(struct page *old, struct page *new)
+static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
@@ -1004,12 +1317,17 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
return &pgdat->__lruvec;
}
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct pglist_data *pgdat)
+static inline struct lruvec *folio_lruvec(struct folio *folio)
{
+ struct pglist_data *pgdat = folio_pgdat(folio);
return &pgdat->__lruvec;
}
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
+{
+}
+
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
return NULL;
@@ -1026,15 +1344,60 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
return NULL;
}
-static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
+static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
+{
+ return NULL;
+}
+
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
return NULL;
}
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+}
+
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
+static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
+}
+
+static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock_irq(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
+}
+
+static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
+ unsigned long *flagsp)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
+ return &pgdat->__lruvec;
+}
+
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -1048,10 +1411,9 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
{
}
-static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*fn)(struct task_struct *, void *), void *arg)
{
- return 0;
}
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
@@ -1066,6 +1428,18 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
+static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
+{
+ return NULL;
+}
+#endif
+
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return NULL;
@@ -1108,20 +1482,27 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}
-static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+static inline void folio_memcg_lock(struct folio *folio)
{
- return NULL;
}
-static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
+static inline void folio_memcg_unlock(struct folio *folio)
{
}
-static inline void unlock_page_memcg(struct page *page)
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
{
+ /* to match folio_memcg_rcu() */
+ rcu_read_lock();
+ return true;
}
-static inline void mem_cgroup_handle_over_high(void)
+static inline void mem_cgroup_unlock_pages(void)
+{
+ rcu_read_unlock();
+}
+
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}
@@ -1153,17 +1534,6 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
}
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
-{
- return 0;
-}
-
-static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
- int idx)
-{
- return 0;
-}
-
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
int idx,
int nr)
@@ -1176,16 +1546,14 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
{
}
-static inline void __mod_memcg_page_state(struct page *page,
- int idx,
- int nr)
+static inline void mod_memcg_page_state(struct page *page,
+ int idx, int val)
{
}
-static inline void mod_memcg_page_state(struct page *page,
- int idx,
- int nr)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
+ return 0;
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
@@ -1200,36 +1568,20 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
{
}
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
-{
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
-}
-
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- __mod_node_page_state(page_pgdat(page), idx, val);
-}
-
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
{
- mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
struct page *page = virt_to_head_page(p);
@@ -1237,7 +1589,7 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
__mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
struct page *page = virt_to_head_page(p);
@@ -1245,22 +1597,6 @@ static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void mod_memcg_obj_state(void *p, int idx, int val)
-{
-}
-
-static inline
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned)
-{
- return 0;
-}
-
-static inline void mem_cgroup_split_huge_fixup(struct page *head)
-{
-}
-
static inline void count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
@@ -1273,8 +1609,8 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
{
}
-static inline void count_memcg_page_event(struct page *page,
- int idx)
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
{
}
@@ -1282,133 +1618,93 @@ static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
-#endif /* CONFIG_MEMCG */
-
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __inc_memcg_state(struct mem_cgroup *memcg,
- int idx)
-{
- __mod_memcg_state(memcg, idx, 1);
-}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __dec_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline void split_page_memcg(struct page *head, int old_order, int new_order)
{
- __mod_memcg_state(memcg, idx, -1);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __inc_memcg_page_state(struct page *page,
- int idx)
-{
- __mod_memcg_page_state(page, idx, 1);
-}
-
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void __dec_memcg_page_state(struct page *page,
- int idx)
-{
- __mod_memcg_page_state(page, idx, -1);
-}
-
-static inline void __inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- __mod_lruvec_state(lruvec, idx, 1);
-}
-
-static inline void __dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline
+unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
{
- __mod_lruvec_state(lruvec, idx, -1);
+ return 0;
}
+#endif /* CONFIG_MEMCG */
-static inline void __inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_page_state(page, idx, 1);
+ __mod_lruvec_kmem_state(p, idx, 1);
}
-static inline void __dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_page_state(page, idx, -1);
+ __mod_lruvec_kmem_state(p, idx, -1);
}
-static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
+static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
- __mod_lruvec_slab_state(p, idx, 1);
-}
+ struct mem_cgroup *memcg;
-static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
-{
- __mod_lruvec_slab_state(p, idx, -1);
+ memcg = lruvec_memcg(lruvec);
+ if (!memcg)
+ return NULL;
+ memcg = parent_mem_cgroup(memcg);
+ if (!memcg)
+ return NULL;
+ return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void inc_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline void unlock_page_lruvec(struct lruvec *lruvec)
{
- mod_memcg_state(memcg, idx, 1);
+ spin_unlock(&lruvec->lru_lock);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void dec_memcg_state(struct mem_cgroup *memcg,
- int idx)
+static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
{
- mod_memcg_state(memcg, idx, -1);
+ spin_unlock_irq(&lruvec->lru_lock);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void inc_memcg_page_state(struct page *page,
- int idx)
+static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
+ unsigned long flags)
{
- mod_memcg_page_state(page, idx, 1);
+ spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void dec_memcg_page_state(struct page *page,
- int idx)
+/* Test requires a stable page->memcg binding, see page_memcg() */
+static inline bool folio_matches_lruvec(struct folio *folio,
+ struct lruvec *lruvec)
{
- mod_memcg_page_state(page, idx, -1);
+ return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
+ lruvec_memcg(lruvec) == folio_memcg(folio);
}
-static inline void inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
+ struct lruvec *locked_lruvec)
{
- mod_lruvec_state(lruvec, idx, 1);
-}
+ if (locked_lruvec) {
+ if (folio_matches_lruvec(folio, locked_lruvec))
+ return locked_lruvec;
-static inline void dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- mod_lruvec_state(lruvec, idx, -1);
-}
+ unlock_page_lruvec_irq(locked_lruvec);
+ }
-static inline void inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, 1);
+ return folio_lruvec_lock_irq(folio);
}
-static inline void dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+/* Don't lock again iff folio's lruvec locked */
+static inline void folio_lruvec_relock_irqsave(struct folio *folio,
+ struct lruvec **lruvecp, unsigned long *flags)
{
- mod_lruvec_page_state(page, idx, -1);
-}
+ if (*lruvecp) {
+ if (folio_matches_lruvec(folio, *lruvecp))
+ return;
-static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
-{
- struct mem_cgroup *memcg;
+ unlock_page_lruvec_irqrestore(*lruvecp, *flags);
+ }
- memcg = lruvec_memcg(lruvec);
- if (!memcg)
- return NULL;
- memcg = parent_mem_cgroup(memcg);
- if (!memcg)
- return NULL;
- return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
+ *lruvecp = folio_lruvec_lock_irqsave(folio, flags);
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -1418,17 +1714,20 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
unsigned long *pwriteback);
-void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
+void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
struct bdi_writeback *wb);
-static inline void mem_cgroup_track_foreign_dirty(struct page *page,
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
+ struct mem_cgroup *memcg;
+
if (mem_cgroup_disabled())
return;
- if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
- mem_cgroup_track_foreign_dirty_slowpath(page, wb);
+ memcg = folio_memcg(folio);
+ if (unlikely(memcg && &memcg->css != wb->memcg_css))
+ mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
}
void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
@@ -1448,7 +1747,7 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
{
}
-static inline void mem_cgroup_track_foreign_dirty(struct page *page,
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
}
@@ -1460,7 +1759,8 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
+ gfp_t gfp_mask);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
@@ -1469,19 +1769,19 @@ void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
- return true;
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ return !!memcg->tcpmem_pressure;
do {
- if (time_before(jiffies, memcg->socket_pressure))
+ if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
return true;
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
}
-extern int memcg_expand_shrinker_maps(int new_id);
-
-extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id);
+int alloc_shrinker_info(struct mem_cgroup *memcg);
+void free_shrinker_info(struct mem_cgroup *memcg);
+void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
+void reparent_shrinker_deferred(struct mem_cgroup *memcg);
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1491,97 +1791,99 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
return false;
}
-static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id)
+static inline void set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id)
{
}
#endif
#ifdef CONFIG_MEMCG_KMEM
-int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
- unsigned int nr_pages);
-void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
+bool mem_cgroup_kmem_disabled(void);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
-struct obj_cgroup *get_obj_cgroup_from_current(void);
+/*
+ * The returned objcg pointer is safe to use without additional
+ * protection within a scope. The scope is defined either by
+ * the current task (similar to the "current" global variable)
+ * or by set_active_memcg() pair.
+ * Please, use obj_cgroup_get() to get a reference if the pointer
+ * needs to be used outside of the local scope.
+ */
+struct obj_cgroup *current_obj_cgroup(void);
+struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
-int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
-void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
+static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
+{
+ struct obj_cgroup *objcg = current_obj_cgroup();
-extern struct static_key_false memcg_kmem_enabled_key;
+ if (objcg)
+ obj_cgroup_get(objcg);
-extern int memcg_nr_cache_ids;
-void memcg_get_cache_ids(void);
-void memcg_put_cache_ids(void);
+ return objcg;
+}
-/*
- * Helper macro to loop through all memcg-specific caches. Callers must still
- * check if the cache is valid (it is either valid or NULL).
- * the slab_mutex must be held when looping through those caches
- */
-#define for_each_memcg_cache_index(_idx) \
- for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
+int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
+void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
-static inline bool memcg_kmem_enabled(void)
+extern struct static_key_false memcg_bpf_enabled_key;
+static inline bool memcg_bpf_enabled(void)
{
- return static_branch_likely(&memcg_kmem_enabled_key);
+ return static_branch_likely(&memcg_bpf_enabled_key);
}
-static inline bool memcg_kmem_bypass(void)
-{
- if (in_interrupt())
- return true;
+extern struct static_key_false memcg_kmem_online_key;
- /* Allow remote memcg charging in kthread contexts. */
- if ((!current->mm || (current->flags & PF_KTHREAD)) &&
- !current->active_memcg)
- return true;
- return false;
+static inline bool memcg_kmem_online(void)
+{
+ return static_branch_likely(&memcg_kmem_online_key);
}
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
{
- if (memcg_kmem_enabled())
+ if (memcg_kmem_online())
return __memcg_kmem_charge_page(page, gfp, order);
return 0;
}
static inline void memcg_kmem_uncharge_page(struct page *page, int order)
{
- if (memcg_kmem_enabled())
+ if (memcg_kmem_online())
__memcg_kmem_uncharge_page(page, order);
}
-static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
- unsigned int nr_pages)
-{
- if (memcg_kmem_enabled())
- return __memcg_kmem_charge(memcg, gfp, nr_pages);
- return 0;
-}
-
-static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
- unsigned int nr_pages)
-{
- if (memcg_kmem_enabled())
- __memcg_kmem_uncharge(memcg, nr_pages);
-}
-
/*
- * helper for accessing a memcg's index. It will be used as an index in the
- * child cache array in kmem_cache, and also to derive its name. This function
- * will return -1 when this is not a kmem-limited memcg.
+ * A helper for accessing memcg's kmem_id, used for getting
+ * corresponding LRU lists.
*/
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return memcg ? memcg->kmemcg_id : -1;
}
struct mem_cgroup *mem_cgroup_from_obj(void *p);
+struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
+
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+ enum vm_event_item idx)
+{
+ struct mem_cgroup *memcg;
+
+ if (!memcg_kmem_online())
+ return;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ count_memcg_events(memcg, idx, 1);
+ rcu_read_unlock();
+}
#else
+static inline bool mem_cgroup_kmem_disabled(void)
+{
+ return true;
+}
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
@@ -1603,32 +1905,66 @@ static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
{
}
-#define for_each_memcg_cache_index(_idx) \
- for (; NULL; )
+static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline bool memcg_bpf_enabled(void)
+{
+ return false;
+}
-static inline bool memcg_kmem_enabled(void)
+static inline bool memcg_kmem_online(void)
{
return false;
}
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return -1;
}
-static inline void memcg_get_cache_ids(void)
+static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
+ return NULL;
}
-static inline void memcg_put_cache_ids(void)
+static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
{
+ return NULL;
}
-static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+ enum vm_event_item idx)
{
- return NULL;
}
#endif /* CONFIG_MEMCG_KMEM */
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
+void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
+void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
+#else
+static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
+{
+ return true;
+}
+static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
+{
+ /* if zswap is disabled, do not block pages going to the swapping device */
+ return true;
+}
+#endif
+
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index 4f1600413f91..e7abf6fa4c52 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -5,9 +5,9 @@
#include <linux/file.h>
#ifdef CONFIG_MEMFD_CREATE
-extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
+extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
#else
-static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
+static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
return -EINVAL;
}
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
new file mode 100644
index 000000000000..69e781900082
--- /dev/null
+++ b/include/linux/memory-tiers.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_TIERS_H
+#define _LINUX_MEMORY_TIERS_H
+
+#include <linux/types.h>
+#include <linux/nodemask.h>
+#include <linux/kref.h>
+#include <linux/mmzone.h>
+#include <linux/notifier.h>
+/*
+ * Each tier cover a abstrace distance chunk size of 128
+ */
+#define MEMTIER_CHUNK_BITS 7
+#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
+/*
+ * Smaller abstract distance values imply faster (higher) memory tiers. Offset
+ * the DRAM adistance so that we can accommodate devices with a slightly lower
+ * adistance value (slightly faster) than default DRAM adistance to be part of
+ * the same memory tier.
+ */
+#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+
+struct memory_tier;
+struct memory_dev_type {
+ /* list of memory types that are part of same tier as this type */
+ struct list_head tier_sibling;
+ /* list of memory types that are managed by one driver */
+ struct list_head list;
+ /* abstract distance for this specific memory type */
+ int adistance;
+ /* Nodes of same abstract distance */
+ nodemask_t nodes;
+ struct kref kref;
+};
+
+struct access_coordinate;
+
+#ifdef CONFIG_NUMA
+extern bool numa_demotion_enabled;
+extern struct memory_dev_type *default_dram_type;
+struct memory_dev_type *alloc_memory_type(int adistance);
+void put_memory_type(struct memory_dev_type *memtype);
+void init_node_memory_type(int node, struct memory_dev_type *default_type);
+void clear_node_memory_type(int node, struct memory_dev_type *memtype);
+int register_mt_adistance_algorithm(struct notifier_block *nb);
+int unregister_mt_adistance_algorithm(struct notifier_block *nb);
+int mt_calc_adistance(int node, int *adist);
+int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
+ const char *source);
+int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
+#ifdef CONFIG_MIGRATION
+int next_demotion_node(int node);
+void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
+bool node_is_toptier(int node);
+#else
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif
+
+#else
+
+#define numa_demotion_enabled false
+#define default_dram_type NULL
+/*
+ * CONFIG_NUMA implementation returns non NULL error.
+ */
+static inline struct memory_dev_type *alloc_memory_type(int adistance)
+{
+ return NULL;
+}
+
+static inline void put_memory_type(struct memory_dev_type *memtype)
+{
+
+}
+
+static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
+{
+
+}
+
+static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+
+}
+
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+
+static inline int register_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int mt_calc_adistance(int node, int *adist)
+{
+ return NOTIFY_DONE;
+}
+
+static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
+ const char *source)
+{
+ return -EIO;
+}
+
+static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
+{
+ return -EIO;
+}
+#endif /* CONFIG_NUMA */
+#endif /* _LINUX_MEMORY_TIERS_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 439a89e758d8..c0afee5d126e 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -19,17 +19,70 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+/**
+ * struct memory_group - a logical group of memory blocks
+ * @nid: The node id for all memory blocks inside the memory group.
+ * @blocks: List of all memory blocks belonging to this memory group.
+ * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this
+ * memory group.
+ * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this
+ * memory group.
+ * @is_dynamic: The memory group type: static vs. dynamic
+ * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum
+ * number of pages we'll have in this static memory group.
+ * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages
+ * in which memory is added/removed in this dynamic memory group.
+ * This granularity defines the alignment of a unit in physical
+ * address space; it has to be at least as big as a single
+ * memory block.
+ *
+ * A memory group logically groups memory blocks; each memory block
+ * belongs to at most one memory group. A memory group corresponds to
+ * a memory device, such as a DIMM or a NUMA node, which spans multiple
+ * memory blocks and might even span multiple non-contiguous physical memory
+ * ranges.
+ *
+ * Modification of members after registration is serialized by memory
+ * hot(un)plug code.
+ */
+struct memory_group {
+ int nid;
+ struct list_head memory_blocks;
+ unsigned long present_kernel_pages;
+ unsigned long present_movable_pages;
+ bool is_dynamic;
+ union {
+ struct {
+ unsigned long max_pages;
+ } s;
+ struct {
+ unsigned long unit_pages;
+ } d;
+ };
+};
+
struct memory_block {
unsigned long start_section_nr;
unsigned long state; /* serialized by the dev->lock */
int online_type; /* for passing data to online routine */
- int phys_device; /* to which fru does this belong? */
- struct device dev;
int nid; /* NID for this memory block */
+ /*
+ * The single zone of this memory block if all PFNs of this memory block
+ * that are System RAM (not a memory hole, not ZONE_DEVICE ranges) are
+ * managed by a single zone. NULL if multiple zones (including nodes)
+ * apply.
+ */
+ struct zone *zone;
+ struct device dev;
+ struct vmem_altmap *altmap;
+ struct memory_group *group; /* group (if any) for this block */
+ struct list_head group_next; /* next block inside memory group */
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
+ atomic_long_t nr_hwpoison;
+#endif
};
int arch_get_memory_phys_device(unsigned long start_pfn);
@@ -43,12 +96,20 @@ int set_memory_block_size_order(unsigned int order);
#define MEM_GOING_ONLINE (1<<3)
#define MEM_CANCEL_ONLINE (1<<4)
#define MEM_CANCEL_OFFLINE (1<<5)
+#define MEM_PREPARE_ONLINE (1<<6)
+#define MEM_FINISH_OFFLINE (1<<7)
struct memory_notify {
+ /*
+ * The altmap_start_pfn and altmap_nr_pages fields are designated for
+ * specifying the altmap range and are exclusively intended for use in
+ * MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
+ */
+ unsigned long altmap_start_pfn;
+ unsigned long altmap_nr_pages;
unsigned long start_pfn;
unsigned long nr_pages;
int status_change_nid_normal;
- int status_change_nid_high;
int status_change_nid;
};
@@ -59,10 +120,16 @@ struct mem_section;
* Priorities for the hotplug memory callback routines (stored in decreasing
* order in the callback chain)
*/
-#define SLAB_CALLBACK_PRI 1
-#define IPC_CALLBACK_PRI 10
+#define DEFAULT_CALLBACK_PRI 0
+#define SLAB_CALLBACK_PRI 1
+#define HMAT_CALLBACK_PRI 2
+#define CXL_CALLBACK_PRI 5
+#define MM_COMPUTE_BATCH_PRI 10
+#define CPUSET_CALLBACK_PRI 10
+#define MEMTIER_HOTPLUG_PRI 100
+#define KSM_CALLBACK_PRI 100
-#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifndef CONFIG_MEMORY_HOTPLUG
static inline void memory_dev_init(void)
{
return;
@@ -78,35 +145,43 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
-#else
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
+}
+#else /* CONFIG_MEMORY_HOTPLUG */
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
-int create_memory_block_devices(unsigned long start, unsigned long size);
+int create_memory_block_devices(unsigned long start, unsigned long size,
+ struct vmem_altmap *altmap,
+ struct memory_group *group);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
-extern struct memory_block *find_memory_block(struct mem_section *);
+extern struct memory_block *find_memory_block(unsigned long section_nr);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
-#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
-#ifdef CONFIG_MEMORY_HOTPLUG
+extern int memory_group_register_static(int nid, unsigned long max_pages);
+extern int memory_group_register_dynamic(int nid, unsigned long unit_pages);
+extern int memory_group_unregister(int mgid);
+struct memory_group *memory_group_find_by_id(int mgid);
+typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg);
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
register_memory_notifier(&fn##_mem_nb); \
})
-#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
-#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
-#else
-#define hotplug_memory_notifier(fn, pri) ({ 0; })
-/* These aren't inline functions due to a GCC bug. */
-#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
-#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
-#endif
+
+#ifdef CONFIG_NUMA
+void memory_block_add_nid(struct memory_block *mem, int nid,
+ enum meminit_context context);
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Kernel text modification mutex, used for code patching. Users of this lock
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 375515803cd8..7a9ff464608d 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -11,39 +11,61 @@ struct page;
struct zone;
struct pglist_data;
struct mem_section;
-struct memory_block;
+struct memory_group;
struct resource;
struct vmem_altmap;
+struct dev_pagemap;
-#ifdef CONFIG_MEMORY_HOTPLUG
+#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
/*
- * Return page for the valid pfn only if the page is online. All pfn
- * walkers which rely on the fully initialized page->flags and others
- * should use this rather than pfn_valid && pfn_to_page
+ * For supporting node-hotadd, we have to allocate a new pgdat.
+ *
+ * If an arch has generic style NODE_DATA(),
+ * node_data[nid] = kzalloc() works well. But it depends on the architecture.
+ *
+ * In general, generic_alloc_nodedata() is used.
+ *
*/
-#define pfn_to_online_page(pfn) \
-({ \
- struct page *___page = NULL; \
- unsigned long ___pfn = pfn; \
- unsigned long ___nr = pfn_to_section_nr(___pfn); \
- \
- if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
- pfn_valid_within(___pfn)) \
- ___page = pfn_to_page(___pfn); \
- ___page; \
-})
+extern pg_data_t *arch_alloc_nodedata(int nid);
+extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
+
+#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
+#ifdef CONFIG_NUMA
/*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
+ * XXX: node aware allocation can't work well to get new node's memory at this time.
+ * Because, pgdat for the new node is not allocated/initialized yet itself.
+ * To use new node's memory, more consideration will be necessary.
*/
-enum {
- MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
- SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
- MIX_SECTION_INFO,
- NODE_INFO,
- MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
-};
+#define generic_alloc_nodedata(nid) \
+({ \
+ memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
+})
+
+extern pg_data_t *node_data[];
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+ node_data[nid] = pgdat;
+}
+
+#else /* !CONFIG_NUMA */
+
+/* never called */
+static inline pg_data_t *generic_alloc_nodedata(int nid)
+{
+ BUG();
+ return NULL;
+}
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+}
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+struct page *pfn_to_online_page(unsigned long pfn);
/* Types for control the zone type of onlined and offlined memory */
enum {
@@ -57,6 +79,50 @@ enum {
MMOP_ONLINE_MOVABLE,
};
+/* Flags for add_memory() and friends to specify memory hotplug details. */
+typedef int __bitwise mhp_t;
+
+/* No special request */
+#define MHP_NONE ((__force mhp_t)0)
+/*
+ * Allow merging of the added System RAM resource with adjacent,
+ * mergeable resources. After a successful call to add_memory_resource()
+ * with this flag set, the resource pointer must no longer be used as it
+ * might be stale, or the resource might have changed.
+ */
+#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
+
+/*
+ * We want memmap (struct page array) to be self contained.
+ * To do so, we will use the beginning of the hot-added range to build
+ * the page tables for the memmap array that describes the entire range.
+ * Only selected architectures support it with SPARSE_VMEMMAP.
+ * This is only a hint, the core kernel can decide to not do this based on
+ * different alignment checks.
+ */
+#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
+/*
+ * The nid field specifies a memory group id (mgid) instead. The memory group
+ * implies the node id (nid).
+ */
+#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
+/*
+ * The hotplugged memory is completely inaccessible while the memory is
+ * offline. The memory provider will handle MEM_PREPARE_ONLINE /
+ * MEM_FINISH_OFFLINE notifications and make the memory accessible.
+ *
+ * This flag is only relevant when used along with MHP_MEMMAP_ON_MEMORY,
+ * because the altmap cannot be written (e.g., poisoned) when adding
+ * memory -- before it is set online.
+ *
+ * This allows for adding memory with an altmap that is not currently
+ * made available by a hypervisor. When onlining that memory, the
+ * hypervisor can be instructed to make that memory available, and
+ * the onlining phase will not require any memory allocations, which is
+ * helpful in low-memory situations.
+ */
+#define MHP_OFFLINE_INACCESSIBLE ((__force mhp_t)BIT(3))
+
/*
* Extended parameters for memory hotplug:
* altmap: alternative allocator for memmap array (optional)
@@ -66,8 +132,13 @@ enum {
struct mhp_params {
struct vmem_altmap *altmap;
pgprot_t pgprot;
+ struct dev_pagemap *pgmap;
};
+bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
+struct range mhp_get_pluggable_range(bool need_mapping);
+bool mhp_supports_memmap_on_memory(void);
+
/*
* Zone resizing functions
*
@@ -95,16 +166,17 @@ static inline void zone_seqlock_init(struct zone *zone)
{
seqlock_init(&zone->span_seqlock);
}
-extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
-extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
-extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
+extern void adjust_present_page_count(struct page *page,
+ struct memory_group *group,
+ long nr_pages);
/* VM interface that may be used by firmware interface */
+extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone, bool mhp_off_inaccessible);
+extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
extern int online_pages(unsigned long pfn, unsigned long nr_pages,
- int online_type, int nid);
-extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
- unsigned long end_pfn);
-extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
- unsigned long end_pfn);
+ struct zone *zone, struct memory_group *group);
+extern void __offline_isolated_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
@@ -118,10 +190,10 @@ extern int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params);
extern u64 max_mem_size;
-extern int memhp_online_type_from_str(const char *str);
+extern int mhp_online_type_from_str(const char *str);
/* Default online_type (MMOP_*) when new memory blocks are added. */
-extern int memhp_default_online_type;
+extern int mhp_default_online_type;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -129,8 +201,7 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled;
}
-extern void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap);
+extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
struct vmem_altmap *altmap);
@@ -149,91 +220,27 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
struct mhp_params *params);
#endif /* ARCH_HAS_ADD_PAGES */
-#ifdef CONFIG_NUMA
-extern int memory_add_physaddr_to_nid(u64 start);
-#else
-static inline int memory_add_physaddr_to_nid(u64 start)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_free_nodedata(pg_data_t *pgdat);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
-
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
+void get_online_mems(void);
+void put_online_mems(void);
-#ifdef CONFIG_NUMA
-/*
- * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
- * XXX: kmalloc_node() can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
-})
-/*
- * This definition is just for error path in node hotadd.
- * For node hotremove, we have to replace this.
- */
-#define generic_free_nodedata(pgdat) kfree(pgdat)
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+/* See kswapd_is_running() */
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
{
- node_data[nid] = pgdat;
+ mutex_lock(&pgdat->kswapd_lock);
}
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void generic_free_nodedata(pg_data_t *pgdat)
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
{
+ mutex_unlock(&pgdat->kswapd_lock);
}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
-}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
-#else
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
{
+ mutex_init(&pgdat->kswapd_lock);
}
-#endif
-extern void put_page_bootmem(struct page *page);
-extern void get_page_bootmem(unsigned long ingo, struct page *page,
- unsigned long type);
-
-void get_online_mems(void);
-void put_online_mems(void);
-
-void mem_hotplug_begin(void);
-void mem_hotplug_done(void);
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
@@ -256,17 +263,6 @@ static inline void zone_span_writelock(struct zone *zone) {}
static inline void zone_span_writeunlock(struct zone *zone) {}
static inline void zone_seqlock_init(struct zone *zone) {}
-static inline int mhp_notimplemented(const char *func)
-{
- printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
- dump_stack();
- return -ENOSYS;
-}
-
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
-}
-
static inline int try_online_node(int nid)
{
return 0;
@@ -282,8 +278,24 @@ static inline bool movable_node_is_enabled(void)
{
return false;
}
+
+static inline bool mhp_supports_memmap_on_memory(void)
+{
+ return false;
+}
+
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
+/*
+ * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
+ * platforms might override and use arch_get_mappable_range()
+ * for internal non memory hotplug purposes.
+ */
+struct range arch_get_mappable_range(void);
+
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* pgdat resizing functions
@@ -315,51 +327,57 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
extern void try_offline_node(int nid);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern int remove_memory(int nid, u64 start, u64 size);
-extern void __remove_memory(int nid, u64 start, u64 size);
-extern int offline_and_remove_memory(int nid, u64 start, u64 size);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group);
+extern int remove_memory(u64 start, u64 size);
+extern void __remove_memory(u64 start, u64 size);
+extern int offline_and_remove_memory(u64 start, u64 size);
#else
static inline void try_offline_node(int nid) {}
-static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
{
return -EINVAL;
}
-static inline int remove_memory(int nid, u64 start, u64 size)
+static inline int remove_memory(u64 start, u64 size)
{
return -EBUSY;
}
-static inline void __remove_memory(int nid, u64 start, u64 size) {}
+static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-extern void set_zone_contiguous(struct zone *zone);
-extern void clear_zone_contiguous(struct zone *zone);
-
-extern void __ref free_area_init_core_hotplug(int nid);
-extern int __add_memory(int nid, u64 start, u64 size);
-extern int add_memory(int nid, u64 start, u64 size);
-extern int add_memory_resource(int nid, struct resource *resource);
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
+extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory_resource(int nid, struct resource *resource,
+ mhp_t mhp_flags);
extern int add_memory_driver_managed(int nid, u64 start, u64 size,
- const char *resource_name);
+ const char *resource_name,
+ mhp_t mhp_flags);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap);
+ unsigned long nr_pages,
+ struct vmem_altmap *altmap, int migratetype);
extern void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages);
-extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_section(int nid, unsigned long pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap);
-extern void sparse_remove_section(struct mem_section *ms,
- unsigned long pfn, unsigned long nr_pages,
- unsigned long map_offset, struct vmem_altmap *altmap);
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
+extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
- int online_type);
-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
+extern struct zone *zone_for_pfn_range(int online_type, int nid,
+ struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages);
+extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
+ struct mhp_params *params);
+void arch_remove_linear_mapping(u64 start, u64 size);
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5f1c74df264d..931b118336f4 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -8,7 +8,6 @@
#include <linux/sched.h>
#include <linux/mmzone.h>
-#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
@@ -18,6 +17,8 @@
struct mm_struct;
+#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */
+
#ifdef CONFIG_NUMA
/*
@@ -46,11 +47,9 @@ struct mempolicy {
atomic_t refcnt;
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
- union {
- short preferred_node; /* preferred */
- nodemask_t nodes; /* interleave/bind */
- /* undefined for default */
- } v;
+ nodemask_t nodes; /* interleave/bind/perfer */
+ int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */
+
union {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
nodemask_t user_nodemask; /* nodemask passed by user */
@@ -92,8 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
return pol;
}
-#define vma_policy(vma) ((vma)->vm_policy)
-
static inline void mpol_get(struct mempolicy *pol)
{
if (pol)
@@ -110,35 +107,30 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
/*
* Tree of shared policies for a shared memory region.
- * Maintain the policies in a pseudo mm that contains vmas. The vmas
- * carry the policy. As a special twist the pseudo mm is indexed in pages, not
- * bytes, so that we can work with shared memory segments bigger than
- * unsigned long.
*/
-
-struct sp_node {
- struct rb_node nd;
- unsigned long start, end;
- struct mempolicy *policy;
-};
-
struct shared_policy {
struct rb_root root;
rwlock_t lock;
};
+struct sp_node {
+ struct rb_node nd;
+ pgoff_t start, end;
+ struct mempolicy *policy;
+};
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
-int mpol_set_shared_policy(struct shared_policy *info,
- struct vm_area_struct *vma,
- struct mempolicy *new);
-void mpol_free_shared_policy(struct shared_policy *p);
+int mpol_set_shared_policy(struct shared_policy *sp,
+ struct vm_area_struct *vma, struct mempolicy *mpol);
+void mpol_free_shared_policy(struct shared_policy *sp);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
- unsigned long idx);
+ pgoff_t idx);
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, pgoff_t *ilx);
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
@@ -150,17 +142,8 @@ extern int huge_node(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
-extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
+extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
-extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
-
-static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
-{
- struct mempolicy *mpol = get_task_policy(current);
-
- return policy_nodemask(gfp, mpol);
-}
-
extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
@@ -184,19 +167,31 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
extern bool vma_migratable(struct vm_area_struct *vma);
-extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+ return (pol->mode == MPOL_PREFERRED_MANY);
+}
+
+extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
+
#else
struct mempolicy {};
+static inline struct mempolicy *get_task_policy(struct task_struct *p)
+{
+ return NULL;
+}
+
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
return true;
}
-static inline void mpol_put(struct mempolicy *p)
+static inline void mpol_put(struct mempolicy *pol)
{
}
@@ -215,17 +210,22 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp,
{
}
-static inline void mpol_free_shared_policy(struct shared_policy *p)
+static inline void mpol_free_shared_policy(struct shared_policy *sp)
{
}
static inline struct mempolicy *
-mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx)
{
return NULL;
}
-#define vma_policy(vma) NULL
+static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx)
+{
+ *ilx = 0;
+ return NULL;
+}
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
@@ -281,7 +281,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
}
#endif
-static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
+static inline int mpol_misplaced(struct folio *folio,
+ struct vm_area_struct *vma,
unsigned long address)
{
return -1; /* no node preference */
@@ -291,9 +292,10 @@ static inline void mpol_put_task_policy(struct task_struct *task)
{
}
-static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
{
- return NULL;
+ return false;
}
+
#endif /* CONFIG_NUMA */
#endif
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 0c964ac107c2..16c5cc807ff6 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -30,6 +30,11 @@ static inline bool mempool_initialized(mempool_t *pool)
return pool->elements != NULL;
}
+static inline bool mempool_is_saturated(mempool_t *pool)
+{
+ return READ_ONCE(pool->curr_nr) >= pool->min_nr;
+}
+
void mempool_exit(mempool_t *pool);
int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
@@ -46,6 +51,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
+extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc;
extern void mempool_free(void *element, mempool_t *pool);
/*
@@ -89,6 +95,19 @@ static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
(void *) size);
}
+void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data);
+void mempool_kvfree(void *element, void *pool_data);
+
+static inline int mempool_init_kvmalloc_pool(mempool_t *pool, int min_nr, size_t size)
+{
+ return mempool_init(pool, min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
+}
+
+static inline mempool_t *mempool_create_kvmalloc_pool(int min_nr, size_t size)
+{
+ return mempool_create(min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
+}
+
/*
* A mempool_alloc_t and mempool_free_t for a simple page allocator that
* allocates pages of the order specified by pool_data
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
index e11595256cac..c01321467789 100644
--- a/include/linux/memregion.h
+++ b/include/linux/memregion.h
@@ -3,9 +3,12 @@
#define _MEMREGION_H_
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/range.h>
+#include <linux/bug.h>
struct memregion_info {
int target_node;
+ struct range range;
};
#ifdef CONFIG_MEMREGION
@@ -16,8 +19,45 @@ static inline int memregion_alloc(gfp_t gfp)
{
return -ENOMEM;
}
-void memregion_free(int id)
+static inline void memregion_free(int id)
{
}
#endif
+
+/**
+ * cpu_cache_invalidate_memregion - drop any CPU cached data for
+ * memregions described by @res_desc
+ * @res_desc: one of the IORES_DESC_* types
+ *
+ * Perform cache maintenance after a memory event / operation that
+ * changes the contents of physical memory in a cache-incoherent manner.
+ * For example, device memory technologies like NVDIMM and CXL have
+ * device secure erase, and dynamic region provision that can replace
+ * the memory mapped to a given physical address.
+ *
+ * Limit the functionality to architectures that have an efficient way
+ * to writeback and invalidate potentially terabytes of address space at
+ * once. Note that this routine may or may not write back any dirty
+ * contents while performing the invalidation. It is only exported for
+ * the explicit usage of the NVDIMM and CXL modules in the 'DEVMEM'
+ * symbol namespace on bare platforms.
+ *
+ * Returns 0 on success or negative error code on a failure to perform
+ * the cache maintenance.
+ */
+#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+int cpu_cache_invalidate_memregion(int res_desc);
+bool cpu_cache_has_invalidate_memregion(void);
+#else
+static inline bool cpu_cache_has_invalidate_memregion(void)
+{
+ return false;
+}
+
+static inline int cpu_cache_invalidate_memregion(int res_desc)
+{
+ WARN_ON_ONCE("CPU cache invalidation required");
+ return -ENXIO;
+}
+#endif
#endif /* _MEMREGION_H_ */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 5f5b2df06e61..3f7143ade32c 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_
+
+#include <linux/mmzone.h>
+#include <linux/range.h>
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
@@ -16,16 +19,17 @@ struct device;
* @alloc: track pages consumed, private to vmemmap_populate()
*/
struct vmem_altmap {
- const unsigned long base_pfn;
+ unsigned long base_pfn;
const unsigned long end_pfn;
const unsigned long reserve;
unsigned long free;
unsigned long align;
unsigned long alloc;
+ bool inaccessible;
};
/*
- * Specialize ZONE_DEVICE memory into multiple types each having differents
+ * Specialize ZONE_DEVICE memory into multiple types each has a different
* usage.
*
* MEMORY_DEVICE_PRIVATE:
@@ -36,7 +40,14 @@ struct vmem_altmap {
* must be treated as an opaque object, rather than a "normal" struct page.
*
* A more complete discussion of unaddressable memory may be found in
- * include/linux/hmm.h and Documentation/vm/hmm.rst.
+ * include/linux/hmm.h and Documentation/mm/hmm.rst.
+ *
+ * MEMORY_DEVICE_COHERENT:
+ * Device memory that is cache coherent from device and CPU point of view. This
+ * is used on platforms that have an advanced system bus (like CAPI or CXL). A
+ * driver can hotplug the device memory using ZONE_DEVICE and with that memory
+ * type. Any page of a process can be migrated to such memory. However no one
+ * should be allowed to pin such memory so that it can always be evicted.
*
* MEMORY_DEVICE_FS_DAX:
* Host memory that has similar access semantics as System RAM i.e. DMA
@@ -46,11 +57,10 @@ struct vmem_altmap {
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
*
- * MEMORY_DEVICE_DEVDAX:
+ * MEMORY_DEVICE_GENERIC:
* Host memory that has similar access semantics as System RAM i.e. DMA
- * coherent and supports page pinning. In contrast to
- * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax
- * character device.
+ * coherent and supports page pinning. This is for example used by DAX devices
+ * that expose memory using a character device.
*
* MEMORY_DEVICE_PCI_P2PDMA:
* Device memory residing in a PCI BAR intended for use with Peer-to-Peer
@@ -59,34 +69,37 @@ struct vmem_altmap {
enum memory_type {
/* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
+ MEMORY_DEVICE_COHERENT,
MEMORY_DEVICE_FS_DAX,
- MEMORY_DEVICE_DEVDAX,
+ MEMORY_DEVICE_GENERIC,
MEMORY_DEVICE_PCI_P2PDMA,
};
struct dev_pagemap_ops {
/*
- * Called once the page refcount reaches 1. (ZONE_DEVICE pages never
- * reach 0 refcount unless there is a refcount bug. This allows the
- * device driver to implement its own memory management.)
+ * Called once the page refcount reaches 0. The reference count will be
+ * reset to one by the core code after the method is called to prepare
+ * for handing out the page again.
*/
void (*page_free)(struct page *page);
/*
- * Transition the refcount in struct dev_pagemap to the dead state.
- */
- void (*kill)(struct dev_pagemap *pgmap);
-
- /*
- * Wait for refcount in struct dev_pagemap to be idle and reap it.
- */
- void (*cleanup)(struct dev_pagemap *pgmap);
-
- /*
* Used for private (un-addressable) device memory only. Must migrate
* the page back to a CPU accessible page.
*/
vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
+
+ /*
+ * Handle the memory failure happens on a range of pfns. Notify the
+ * processes who are using these pfns, and try to recover the data on
+ * them if necessary. The mf_flags is finally passed to the recover
+ * function through the whole notify routine.
+ *
+ * When this is not implemented, or it returns -EOPNOTSUPP, the caller
+ * will fall back to a common handler called mf_generic_kill_procs().
+ */
+ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
+ unsigned long nr_pages, int mf_flags);
};
#define PGMAP_ALTMAP_VALID (1 << 0)
@@ -94,29 +107,44 @@ struct dev_pagemap_ops {
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations
- * @res: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping
- * @internal_ref: internal reference if @ref is not provided by the caller
- * @done: completion for @internal_ref
- * @type: memory type: see MEMORY_* in memory_hotplug.h
+ * @done: completion for @ref
+ * @type: memory type: see MEMORY_* above in memremap.h
* @flags: PGMAP_* flags to specify defailed behavior
+ * @vmemmap_shift: structural definition of how the vmemmap page metadata
+ * is populated, specifically the metadata page order.
+ * A zero value (default) uses base pages as the vmemmap metadata
+ * representation. A bigger value will set up compound struct pages
+ * of the requested order value.
* @ops: method table
* @owner: an opaque pointer identifying the entity that manages this
* instance. Used by various helpers to make sure that no
* foreign ZONE_DEVICE memory is accessed.
+ * @nr_range: number of ranges to be mapped
+ * @range: range to be mapped when nr_range == 1
+ * @ranges: array of ranges to be mapped when nr_range > 1
*/
struct dev_pagemap {
struct vmem_altmap altmap;
- struct resource res;
- struct percpu_ref *ref;
- struct percpu_ref internal_ref;
+ struct percpu_ref ref;
struct completion done;
enum memory_type type;
unsigned int flags;
+ unsigned long vmemmap_shift;
const struct dev_pagemap_ops *ops;
void *owner;
+ int nr_range;
+ union {
+ struct range range;
+ DECLARE_FLEX_ARRAY(struct range, ranges);
+ };
};
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+ return pgmap->ops && pgmap->ops->memory_failure;
+}
+
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
{
if (pgmap->flags & PGMAP_ALTMAP_VALID)
@@ -124,16 +152,51 @@ static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
return NULL;
}
+static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
+{
+ return 1 << pgmap->vmemmap_shift;
+}
+
+static inline bool is_device_private_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PRIVATE;
+}
+
+static inline bool folio_is_device_private(const struct folio *folio)
+{
+ return is_device_private_page(&folio->page);
+}
+
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+}
+
+static inline bool is_device_coherent_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_COHERENT;
+}
+
+static inline bool folio_is_device_coherent(const struct folio *folio)
+{
+ return is_device_coherent_page(&folio->page);
+}
+
#ifdef CONFIG_ZONE_DEVICE
+void zone_device_page_init(struct page *page);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
+bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
unsigned long memremap_compat_align(void);
#else
static inline void *devm_memremap_pages(struct device *dev,
@@ -159,14 +222,9 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
return NULL;
}
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
-{
- return 0;
-}
-
-static inline void vmem_altmap_free(struct vmem_altmap *altmap,
- unsigned long nr_pfns)
+static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
{
+ return false;
}
/* when memremap_pages() is disabled all archs can remap a single page */
@@ -179,7 +237,7 @@ static inline unsigned long memremap_compat_align(void)
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
- percpu_ref_put(pgmap->ref);
+ percpu_ref_put(&pgmap->ref);
}
#endif /* _LINUX_MEMREMAP_H_ */
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index da4c65f9435f..ebf73d4ee969 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -281,6 +281,7 @@ struct memstick_host {
struct memstick_dev *card;
unsigned int retries;
+ bool removing;
/* Notify the host that some requests are pending. */
void (*request)(struct memstick_host *host);
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index 473545a2c425..6fa21791fc85 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -472,13 +472,7 @@ extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
unsigned char);
-extern int pm860x_page_reg_read(struct i2c_client *, int);
extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char);
extern int pm860x_page_bulk_read(struct i2c_client *, int, int,
unsigned char *);
-extern int pm860x_page_bulk_write(struct i2c_client *, int, int,
- unsigned char *);
-extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char,
- unsigned char);
-
#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h
deleted file mode 100644
index a881d8495186..000000000000
--- a/include/linux/mfd/ab3100.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * AB3100 core access functions
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-
-#include <linux/regulator/machine.h>
-
-struct device;
-
-#ifndef MFD_AB3100_H
-#define MFD_AB3100_H
-
-
-#define AB3100_P1A 0xc0
-#define AB3100_P1B 0xc1
-#define AB3100_P1C 0xc2
-#define AB3100_P1D 0xc3
-#define AB3100_P1E 0xc4
-#define AB3100_P1F 0xc5
-#define AB3100_P1G 0xc6
-#define AB3100_R2A 0xc7
-#define AB3100_R2B 0xc8
-
-/*
- * AB3100, EVENTA1, A2 and A3 event register flags
- * these are catenated into a single 32-bit flag in the code
- * for event notification broadcasts.
- */
-#define AB3100_EVENTA1_ONSWA (0x01<<16)
-#define AB3100_EVENTA1_ONSWB (0x02<<16)
-#define AB3100_EVENTA1_ONSWC (0x04<<16)
-#define AB3100_EVENTA1_DCIO (0x08<<16)
-#define AB3100_EVENTA1_OVER_TEMP (0x10<<16)
-#define AB3100_EVENTA1_SIM_OFF (0x20<<16)
-#define AB3100_EVENTA1_VBUS (0x40<<16)
-#define AB3100_EVENTA1_VSET_USB (0x80<<16)
-
-#define AB3100_EVENTA2_READY_TX (0x01<<8)
-#define AB3100_EVENTA2_READY_RX (0x02<<8)
-#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8)
-#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8)
-#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8)
-#define AB3100_EVENTA2_MIDR (0x20<<8)
-#define AB3100_EVENTA2_BATTERY_REM (0x40<<8)
-#define AB3100_EVENTA2_ALARM (0x80<<8)
-
-#define AB3100_EVENTA3_ADC_TRIG5 (0x01)
-#define AB3100_EVENTA3_ADC_TRIG4 (0x02)
-#define AB3100_EVENTA3_ADC_TRIG3 (0x04)
-#define AB3100_EVENTA3_ADC_TRIG2 (0x08)
-#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10)
-#define AB3100_EVENTA3_ADC_TRIGVTX (0x20)
-#define AB3100_EVENTA3_ADC_TRIG1 (0x40)
-#define AB3100_EVENTA3_ADC_TRIG0 (0x80)
-
-/* AB3100, STR register flags */
-#define AB3100_STR_ONSWA (0x01)
-#define AB3100_STR_ONSWB (0x02)
-#define AB3100_STR_ONSWC (0x04)
-#define AB3100_STR_DCIO (0x08)
-#define AB3100_STR_BOOT_MODE (0x10)
-#define AB3100_STR_SIM_OFF (0x20)
-#define AB3100_STR_BATT_REMOVAL (0x40)
-#define AB3100_STR_VBUS (0x80)
-
-/*
- * AB3100 contains 8 regulators, one external regulator controller
- * and a buck converter, further the LDO E and buck converter can
- * have separate settings if they are in sleep mode, this is
- * modeled as a separate regulator.
- */
-#define AB3100_NUM_REGULATORS 10
-
-/**
- * struct ab3100
- * @access_mutex: lock out concurrent accesses to the AB3100 registers
- * @dev: pointer to the containing device
- * @i2c_client: I2C client for this chip
- * @testreg_client: secondary client for test registers
- * @chip_name: name of this chip variant
- * @chip_id: 8 bit chip ID for this chip variant
- * @event_subscribers: event subscribers are listed here
- * @startup_events: a copy of the first reading of the event registers
- * @startup_events_read: whether the first events have been read
- *
- * This struct is PRIVATE and devices using it should NOT
- * access ANY fields. It is used as a token for calling the
- * AB3100 functions.
- */
-struct ab3100 {
- struct mutex access_mutex;
- struct device *dev;
- struct i2c_client *i2c_client;
- struct i2c_client *testreg_client;
- char chip_name[32];
- u8 chip_id;
- struct blocking_notifier_head event_subscribers;
- u8 startup_events[3];
- bool startup_events_read;
-};
-
-/**
- * struct ab3100_platform_data
- * Data supplied to initialize board connections to the AB3100
- * @reg_constraints: regulator constraints for target board
- * the order of these constraints are: LDO A, C, D, E,
- * F, G, H, K, EXT and BUCK.
- * @reg_initvals: initial values for the regulator registers
- * plus two sleep settings for LDO E and the BUCK converter.
- * exactly AB3100_NUM_REGULATORS+2 values must be sent in.
- * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK,
- * BUCK sleep, LDO D. (LDO D need to be initialized last.)
- * @external_voltage: voltage level of the external regulator.
- */
-struct ab3100_platform_data {
- struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS];
- u8 reg_initvals[AB3100_NUM_REGULATORS+2];
- int external_voltage;
-};
-
-int ab3100_event_register(struct ab3100 *ab3100,
- struct notifier_block *nb);
-int ab3100_event_unregister(struct ab3100 *ab3100,
- struct notifier_block *nb);
-
-#endif /* MFD_AB3100_H */
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 23040b6f1615..7f07cfe44753 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -28,282 +28,6 @@ struct abx500_init_settings {
u8 setting;
};
-/* Battery driver related data */
-/*
- * ADC for the battery thermistor.
- * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
- * with a NTC resistor to both identify the battery and to measure its
- * temperature. Different phone manufactures uses different techniques to both
- * identify the battery and to read its temperature.
- */
-enum abx500_adc_therm {
- ABx500_ADC_THERM_BATCTRL,
- ABx500_ADC_THERM_BATTEMP,
-};
-
-/**
- * struct abx500_res_to_temp - defines one point in a temp to res curve. To
- * be used in battery packs that combines the identification resistor with a
- * NTC resistor.
- * @temp: battery pack temperature in Celsius
- * @resist: NTC resistor net total resistance
- */
-struct abx500_res_to_temp {
- int temp;
- int resist;
-};
-
-/**
- * struct abx500_v_to_cap - Table for translating voltage to capacity
- * @voltage: Voltage in mV
- * @capacity: Capacity in percent
- */
-struct abx500_v_to_cap {
- int voltage;
- int capacity;
-};
-
-/* Forward declaration */
-struct abx500_fg;
-
-/**
- * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer: Time between measurements while recovering
- * @recovery_total_time: Total recovery time
- * @init_timer: Measurement interval during startup
- * @init_discard_time: Time we discard voltage measurement at startup
- * @init_total_time: Total init time during startup
- * @high_curr_time: Time current has to be high to go to recovery
- * @accu_charging: FG accumulation time while charging
- * @accu_high_curr: FG accumulation time in high current mode
- * @high_curr_threshold: High current threshold, in mA
- * @lowbat_threshold: Low battery threshold, in mV
- * @overbat_threshold: Over battery threshold, in mV
- * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
- * Resolution in 50 mV step.
- * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
- * Resolution in 50 mV step.
- * @user_cap_limit Capacity reported from user must be within this
- * limit to be considered as sane, in percentage
- * points.
- * @maint_thres This is the threshold where we stop reporting
- * battery full while in maintenance, in per cent
- * @pcut_enable: Enable power cut feature in ab8505
- * @pcut_max_time: Max time threshold
- * @pcut_flag_time: Flagtime threshold
- * @pcut_max_restart: Max number of restarts
- * @pcut_debounce_time: Sets battery debounce time
- */
-struct abx500_fg_parameters {
- int recovery_sleep_timer;
- int recovery_total_time;
- int init_timer;
- int init_discard_time;
- int init_total_time;
- int high_curr_time;
- int accu_charging;
- int accu_high_curr;
- int high_curr_threshold;
- int lowbat_threshold;
- int overbat_threshold;
- int battok_falling_th_sel0;
- int battok_raising_th_sel1;
- int user_cap_limit;
- int maint_thres;
- bool pcut_enable;
- u8 pcut_max_time;
- u8 pcut_flag_time;
- u8 pcut_max_restart;
- u8 pcut_debounce_time;
-};
-
-/**
- * struct abx500_charger_maximization - struct used by the board config.
- * @use_maxi: Enable maximization for this battery type
- * @maxi_chg_curr: Maximum charger current allowed
- * @maxi_wait_cycles: cycles to wait before setting charger current
- * @charger_curr_step delta between two charger current settings (mA)
- */
-struct abx500_maxim_parameters {
- bool ena_maxi;
- int chg_curr;
- int wait_cycles;
- int charger_curr_step;
-};
-
-/**
- * struct abx500_battery_type - different batteries supported
- * @name: battery technology
- * @resis_high: battery upper resistance limit
- * @resis_low: battery lower resistance limit
- * @charge_full_design: Maximum battery capacity in mAh
- * @nominal_voltage: Nominal voltage of the battery in mV
- * @termination_vol: max voltage upto which battery can be charged
- * @termination_curr battery charging termination current in mA
- * @recharge_cap battery capacity limit that will trigger a new
- * full charging cycle in the case where maintenan-
- * -ce charging has been disabled
- * @normal_cur_lvl: charger current in normal state in mA
- * @normal_vol_lvl: charger voltage in normal state in mV
- * @maint_a_cur_lvl: charger current in maintenance A state in mA
- * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
- * @maint_a_chg_timer_h: charge time in maintenance A state
- * @maint_b_cur_lvl: charger current in maintenance B state in mA
- * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
- * @maint_b_chg_timer_h: charge time in maintenance B state
- * @low_high_cur_lvl: charger current in temp low/high state in mA
- * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
- * @battery_resistance: battery inner resistance in mOhm.
- * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
- * @r_to_t_tbl: table containing resistance to temp points
- * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
- * @v_to_cap_tbl: Voltage to capacity (in %) table
- * @n_batres_tbl_elements number of elements in the batres_tbl
- * @batres_tbl battery internal resistance vs temperature table
- */
-struct abx500_battery_type {
- int name;
- int resis_high;
- int resis_low;
- int charge_full_design;
- int nominal_voltage;
- int termination_vol;
- int termination_curr;
- int recharge_cap;
- int normal_cur_lvl;
- int normal_vol_lvl;
- int maint_a_cur_lvl;
- int maint_a_vol_lvl;
- int maint_a_chg_timer_h;
- int maint_b_cur_lvl;
- int maint_b_vol_lvl;
- int maint_b_chg_timer_h;
- int low_high_cur_lvl;
- int low_high_vol_lvl;
- int battery_resistance;
- int n_temp_tbl_elements;
- const struct abx500_res_to_temp *r_to_t_tbl;
- int n_v_cap_tbl_elements;
- const struct abx500_v_to_cap *v_to_cap_tbl;
- int n_batres_tbl_elements;
- const struct batres_vs_temp *batres_tbl;
-};
-
-/**
- * struct abx500_bm_capacity_levels - abx500 capacity level data
- * @critical: critical capacity level in percent
- * @low: low capacity level in percent
- * @normal: normal capacity level in percent
- * @high: high capacity level in percent
- * @full: full capacity level in percent
- */
-struct abx500_bm_capacity_levels {
- int critical;
- int low;
- int normal;
- int high;
- int full;
-};
-
-/**
- * struct abx500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max: maximum allowed USB charger voltage in mV
- * @usb_curr_max: maximum allowed USB charger current in mA
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct abx500_bm_charger_parameters {
- int usb_volt_max;
- int usb_curr_max;
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct abx500_bm_data - abx500 battery management data
- * @temp_under under this temp, charging is stopped
- * @temp_low between this temp and temp_under charging is reduced
- * @temp_high between this temp and temp_over charging is reduced
- * @temp_over over this temp, charging is stopped
- * @temp_now present battery temperature
- * @temp_interval_chg temperature measurement interval in s when charging
- * @temp_interval_nochg temperature measurement interval in s when not charging
- * @main_safety_tmr_h safety timer for main charger
- * @usb_safety_tmr_h safety timer for usb charger
- * @bkup_bat_v voltage which we charge the backup battery with
- * @bkup_bat_i current which we charge the backup battery with
- * @no_maintenance indicates that maintenance charging is disabled
- * @capacity_scaling indicates whether capacity scaling is to be used
- * @abx500_adc_therm placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat flag to enable charging of unknown batteries
- * @enable_overshoot flag to enable VBAT overshoot control
- * @auto_trig flag to enable auto adc trigger
- * @fg_res resistance of FG resistor in 0.1mOhm
- * @n_btypes number of elements in array bat_type
- * @batt_id index of the identified battery in array bat_type
- * @interval_charging charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis temperature hysteresis
- * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
- * @n_chg_out_curr number of elements in array chg_output_curr
- * @n_chg_in_curr number of elements in array chg_input_curr
- * @chg_output_curr charger output current level map
- * @chg_input_curr charger input current level map
- * @maxi maximization parameters
- * @cap_levels capacity in percent for the different capacity levels
- * @bat_type table of supported battery types
- * @chg_params charger parameters
- * @fg_params fuel gauge parameters
- */
-struct abx500_bm_data {
- int temp_under;
- int temp_low;
- int temp_high;
- int temp_over;
- int temp_now;
- int temp_interval_chg;
- int temp_interval_nochg;
- int main_safety_tmr_h;
- int usb_safety_tmr_h;
- int bkup_bat_v;
- int bkup_bat_i;
- bool autopower_cfg;
- bool ac_enabled;
- bool usb_enabled;
- bool no_maintenance;
- bool capacity_scaling;
- bool chg_unknown_bat;
- bool enable_overshoot;
- bool auto_trig;
- enum abx500_adc_therm adc_therm;
- int fg_res;
- int n_btypes;
- int batt_id;
- int interval_charging;
- int interval_not_charging;
- int temp_hysteresis;
- int gnd_lift_resistance;
- int n_chg_out_curr;
- int n_chg_in_curr;
- int *chg_output_curr;
- int *chg_input_curr;
- const struct abx500_maxim_parameters *maxi;
- const struct abx500_bm_capacity_levels *cap_levels;
- struct abx500_battery_type *bat_type;
- const struct abx500_bm_charger_parameters *chg_params;
- const struct abx500_fg_parameters *fg_params;
-};
-
-enum {
- NTC_EXTERNAL = 0,
- NTC_INTERNAL,
-};
-
-int ab8500_bm_of_probe(struct device *dev,
- struct device_node *np,
- struct abx500_bm_data *bm);
-
int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
u8 value);
int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
deleted file mode 100644
index 903e94c189d8..000000000000
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ /dev/null
@@ -1,476 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright ST-Ericsson 2012.
- *
- * Author: Arun Murthy <arun.murthy@stericsson.com>
- */
-
-#ifndef _AB8500_BM_H
-#define _AB8500_BM_H
-
-#include <linux/kernel.h>
-#include <linux/mfd/abx500.h>
-
-/*
- * System control 2 register offsets.
- * bank = 0x02
- */
-#define AB8500_MAIN_WDOG_CTRL_REG 0x01
-#define AB8500_LOW_BAT_REG 0x03
-#define AB8500_BATT_OK_REG 0x04
-/*
- * USB/ULPI register offsets
- * Bank : 0x5
- */
-#define AB8500_USB_LINE_STAT_REG 0x80
-#define AB8500_USB_LINE_CTRL2_REG 0x82
-#define AB8500_USB_LINK1_STAT_REG 0x94
-
-/*
- * Charger / status register offfsets
- * Bank : 0x0B
- */
-#define AB8500_CH_STATUS1_REG 0x00
-#define AB8500_CH_STATUS2_REG 0x01
-#define AB8500_CH_USBCH_STAT1_REG 0x02
-#define AB8500_CH_USBCH_STAT2_REG 0x03
-#define AB8540_CH_USBCH_STAT3_REG 0x04
-#define AB8500_CH_STAT_REG 0x05
-
-/*
- * Charger / control register offfsets
- * Bank : 0x0B
- */
-#define AB8500_CH_VOLT_LVL_REG 0x40
-#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/
-#define AB8500_CH_OPT_CRNTLVL_REG 0x42
-#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/
-#define AB8500_CH_WD_TIMER_REG 0x50
-#define AB8500_CHARG_WD_CTRL 0x51
-#define AB8500_BTEMP_HIGH_TH 0x52
-#define AB8500_LED_INDICATOR_PWM_CTRL 0x53
-#define AB8500_LED_INDICATOR_PWM_DUTY 0x54
-#define AB8500_BATT_OVV 0x55
-#define AB8500_CHARGER_CTRL 0x56
-#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/
-
-/*
- * Charger / main control register offsets
- * Bank : 0x0B
- */
-#define AB8500_MCH_CTRL1 0x80
-#define AB8500_MCH_CTRL2 0x81
-#define AB8500_MCH_IPT_CURLVL_REG 0x82
-#define AB8500_CH_WD_REG 0x83
-
-/*
- * Charger / USB control register offsets
- * Bank : 0x0B
- */
-#define AB8500_USBCH_CTRL1_REG 0xC0
-#define AB8500_USBCH_CTRL2_REG 0xC1
-#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2
-#define AB8540_USB_PP_MODE_REG 0xC5
-#define AB8540_USB_PP_CHR_REG 0xC6
-
-/*
- * Gas Gauge register offsets
- * Bank : 0x0C
- */
-#define AB8500_GASG_CC_CTRL_REG 0x00
-#define AB8500_GASG_CC_ACCU1_REG 0x01
-#define AB8500_GASG_CC_ACCU2_REG 0x02
-#define AB8500_GASG_CC_ACCU3_REG 0x03
-#define AB8500_GASG_CC_ACCU4_REG 0x04
-#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05
-#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06
-#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07
-#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08
-#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09
-#define AB8500_GASG_CC_OFFSET_REG 0x0A
-#define AB8500_GASG_CC_NCOV_ACCU 0x10
-#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11
-#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12
-#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13
-#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14
-
-/*
- * Interrupt register offsets
- * Bank : 0x0E
- */
-#define AB8500_IT_SOURCE2_REG 0x01
-#define AB8500_IT_SOURCE21_REG 0x14
-
-/*
- * RTC register offsets
- * Bank: 0x0F
- */
-#define AB8500_RTC_BACKUP_CHG_REG 0x0C
-#define AB8500_RTC_CC_CONF_REG 0x01
-#define AB8500_RTC_CTRL_REG 0x0B
-#define AB8500_RTC_CTRL1_REG 0x11
-
-/*
- * OTP register offsets
- * Bank : 0x15
- */
-#define AB8500_OTP_CONF_15 0x0E
-
-/* GPADC constants from AB8500 spec, UM0836 */
-#define ADC_RESOLUTION 1024
-#define ADC_CH_MAIN_MIN 0
-#define ADC_CH_MAIN_MAX 20030
-#define ADC_CH_VBUS_MIN 0
-#define ADC_CH_VBUS_MAX 20030
-#define ADC_CH_VBAT_MIN 2300
-#define ADC_CH_VBAT_MAX 4800
-#define ADC_CH_BKBAT_MIN 0
-#define ADC_CH_BKBAT_MAX 3200
-
-/* Main charge i/p current */
-#define MAIN_CH_IP_CUR_0P9A 0x80
-#define MAIN_CH_IP_CUR_1P0A 0x90
-#define MAIN_CH_IP_CUR_1P1A 0xA0
-#define MAIN_CH_IP_CUR_1P2A 0xB0
-#define MAIN_CH_IP_CUR_1P3A 0xC0
-#define MAIN_CH_IP_CUR_1P4A 0xD0
-#define MAIN_CH_IP_CUR_1P5A 0xE0
-
-/* ChVoltLevel */
-#define CH_VOL_LVL_3P5 0x00
-#define CH_VOL_LVL_4P0 0x14
-#define CH_VOL_LVL_4P05 0x16
-#define CH_VOL_LVL_4P1 0x1B
-#define CH_VOL_LVL_4P15 0x20
-#define CH_VOL_LVL_4P2 0x25
-#define CH_VOL_LVL_4P6 0x4D
-
-/* ChOutputCurrentLevel */
-#define CH_OP_CUR_LVL_0P1 0x00
-#define CH_OP_CUR_LVL_0P2 0x01
-#define CH_OP_CUR_LVL_0P3 0x02
-#define CH_OP_CUR_LVL_0P4 0x03
-#define CH_OP_CUR_LVL_0P5 0x04
-#define CH_OP_CUR_LVL_0P6 0x05
-#define CH_OP_CUR_LVL_0P7 0x06
-#define CH_OP_CUR_LVL_0P8 0x07
-#define CH_OP_CUR_LVL_0P9 0x08
-#define CH_OP_CUR_LVL_1P4 0x0D
-#define CH_OP_CUR_LVL_1P5 0x0E
-#define CH_OP_CUR_LVL_1P6 0x0F
-#define CH_OP_CUR_LVL_2P 0x3F
-
-/* BTEMP High thermal limits */
-#define BTEMP_HIGH_TH_57_0 0x00
-#define BTEMP_HIGH_TH_52 0x01
-#define BTEMP_HIGH_TH_57_1 0x02
-#define BTEMP_HIGH_TH_62 0x03
-
-/* current is mA */
-#define USB_0P1A 100
-#define USB_0P2A 200
-#define USB_0P3A 300
-#define USB_0P4A 400
-#define USB_0P5A 500
-
-#define LOW_BAT_3P1V 0x20
-#define LOW_BAT_2P3V 0x00
-#define LOW_BAT_RESET 0x01
-#define LOW_BAT_ENABLE 0x01
-
-/* Backup battery constants */
-#define BUP_ICH_SEL_50UA 0x00
-#define BUP_ICH_SEL_150UA 0x04
-#define BUP_ICH_SEL_300UA 0x08
-#define BUP_ICH_SEL_700UA 0x0C
-
-enum bup_vch_sel {
- BUP_VCH_SEL_2P5V,
- BUP_VCH_SEL_2P6V,
- BUP_VCH_SEL_2P8V,
- BUP_VCH_SEL_3P1V,
- /*
- * Note that the following 5 values 2.7v, 2.9v, 3.0v, 3.2v, 3.3v
- * are only available on ab8540. You can't choose these 5
- * voltage on ab8500/ab8505/ab9540.
- */
- BUP_VCH_SEL_2P7V,
- BUP_VCH_SEL_2P9V,
- BUP_VCH_SEL_3P0V,
- BUP_VCH_SEL_3P2V,
- BUP_VCH_SEL_3P3V,
-};
-
-#define BUP_VCH_RANGE 0x02
-#define VBUP33_VRTCN 0x01
-
-/* Battery OVV constants */
-#define BATT_OVV_ENA 0x02
-#define BATT_OVV_TH_3P7 0x00
-#define BATT_OVV_TH_4P75 0x01
-
-/* A value to indicate over voltage */
-#define BATT_OVV_VALUE 4750
-
-/* VBUS OVV constants */
-#define VBUS_OVV_SELECT_MASK 0x78
-#define VBUS_OVV_SELECT_5P6V 0x00
-#define VBUS_OVV_SELECT_5P7V 0x08
-#define VBUS_OVV_SELECT_5P8V 0x10
-#define VBUS_OVV_SELECT_5P9V 0x18
-#define VBUS_OVV_SELECT_6P0V 0x20
-#define VBUS_OVV_SELECT_6P1V 0x28
-#define VBUS_OVV_SELECT_6P2V 0x30
-#define VBUS_OVV_SELECT_6P3V 0x38
-
-#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04
-
-/* Fuel Gauge constants */
-#define RESET_ACCU 0x02
-#define READ_REQ 0x01
-#define CC_DEEP_SLEEP_ENA 0x02
-#define CC_PWR_UP_ENA 0x01
-#define CC_SAMPLES_40 0x28
-#define RD_NCONV_ACCU_REQ 0x01
-#define CC_CALIB 0x08
-#define CC_INTAVGOFFSET_ENA 0x10
-#define CC_MUXOFFSET 0x80
-#define CC_INT_CAL_N_AVG_MASK 0x60
-#define CC_INT_CAL_SAMPLES_16 0x40
-#define CC_INT_CAL_SAMPLES_8 0x20
-#define CC_INT_CAL_SAMPLES_4 0x00
-
-/* RTC constants */
-#define RTC_BUP_CH_ENA 0x10
-
-/* BatCtrl Current Source Constants */
-#define BAT_CTRL_7U_ENA 0x01
-#define BAT_CTRL_20U_ENA 0x02
-#define BAT_CTRL_18U_ENA 0x01
-#define BAT_CTRL_16U_ENA 0x02
-#define BAT_CTRL_CMP_ENA 0x04
-#define FORCE_BAT_CTRL_CMP_HIGH 0x08
-#define BAT_CTRL_PULL_UP_ENA 0x10
-
-/* Battery type */
-#define BATTERY_UNKNOWN 00
-
-/* Registers for pcut feature in ab8505 and ab9540 */
-#define AB8505_RTC_PCUT_CTL_STATUS_REG 0x12
-#define AB8505_RTC_PCUT_TIME_REG 0x13
-#define AB8505_RTC_PCUT_MAX_TIME_REG 0x14
-#define AB8505_RTC_PCUT_FLAG_TIME_REG 0x15
-#define AB8505_RTC_PCUT_RESTART_REG 0x16
-#define AB8505_RTC_PCUT_DEBOUNCE_REG 0x17
-
-/* USB Power Path constants for ab8540 */
-#define BUS_VSYS_VOL_SELECT_MASK 0x06
-#define BUS_VSYS_VOL_SELECT_3P6V 0x00
-#define BUS_VSYS_VOL_SELECT_3P325V 0x02
-#define BUS_VSYS_VOL_SELECT_3P9V 0x04
-#define BUS_VSYS_VOL_SELECT_4P3V 0x06
-#define BUS_POWER_PATH_MODE_ENA 0x01
-#define BUS_PP_PRECHG_CURRENT_MASK 0x0E
-#define BUS_POWER_PATH_PRECHG_ENA 0x01
-
-/**
- * struct res_to_temp - defines one point in a temp to res curve. To
- * be used in battery packs that combines the identification resistor with a
- * NTC resistor.
- * @temp: battery pack temperature in Celsius
- * @resist: NTC resistor net total resistance
- */
-struct res_to_temp {
- int temp;
- int resist;
-};
-
-/**
- * struct batres_vs_temp - defines one point in a temp vs battery internal
- * resistance curve.
- * @temp: battery pack temperature in Celsius
- * @resist: battery internal reistance in mOhm
- */
-struct batres_vs_temp {
- int temp;
- int resist;
-};
-
-/* Forward declaration */
-struct ab8500_fg;
-
-/**
- * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer: Time between measurements while recovering
- * @recovery_total_time: Total recovery time
- * @init_timer: Measurement interval during startup
- * @init_discard_time: Time we discard voltage measurement at startup
- * @init_total_time: Total init time during startup
- * @high_curr_time: Time current has to be high to go to recovery
- * @accu_charging: FG accumulation time while charging
- * @accu_high_curr: FG accumulation time in high current mode
- * @high_curr_threshold: High current threshold, in mA
- * @lowbat_threshold: Low battery threshold, in mV
- * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
- * Resolution in 50 mV step.
- * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
- * Resolution in 50 mV step.
- * @user_cap_limit Capacity reported from user must be within this
- * limit to be considered as sane, in percentage
- * points.
- * @maint_thres This is the threshold where we stop reporting
- * battery full while in maintenance, in per cent
- * @pcut_enable: Enable power cut feature in ab8505
- * @pcut_max_time: Max time threshold
- * @pcut_flag_time: Flagtime threshold
- * @pcut_max_restart: Max number of restarts
- * @pcut_debunce_time: Sets battery debounce time
- */
-struct ab8500_fg_parameters {
- int recovery_sleep_timer;
- int recovery_total_time;
- int init_timer;
- int init_discard_time;
- int init_total_time;
- int high_curr_time;
- int accu_charging;
- int accu_high_curr;
- int high_curr_threshold;
- int lowbat_threshold;
- int battok_falling_th_sel0;
- int battok_raising_th_sel1;
- int user_cap_limit;
- int maint_thres;
- bool pcut_enable;
- u8 pcut_max_time;
- u8 pcut_flag_time;
- u8 pcut_max_restart;
- u8 pcut_debunce_time;
-};
-
-/**
- * struct ab8500_charger_maximization - struct used by the board config.
- * @use_maxi: Enable maximization for this battery type
- * @maxi_chg_curr: Maximum charger current allowed
- * @maxi_wait_cycles: cycles to wait before setting charger current
- * @charger_curr_step delta between two charger current settings (mA)
- */
-struct ab8500_maxim_parameters {
- bool ena_maxi;
- int chg_curr;
- int wait_cycles;
- int charger_curr_step;
-};
-
-/**
- * struct ab8500_bm_capacity_levels - ab8500 capacity level data
- * @critical: critical capacity level in percent
- * @low: low capacity level in percent
- * @normal: normal capacity level in percent
- * @high: high capacity level in percent
- * @full: full capacity level in percent
- */
-struct ab8500_bm_capacity_levels {
- int critical;
- int low;
- int normal;
- int high;
- int full;
-};
-
-/**
- * struct ab8500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max: maximum allowed USB charger voltage in mV
- * @usb_curr_max: maximum allowed USB charger current in mA
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct ab8500_bm_charger_parameters {
- int usb_volt_max;
- int usb_curr_max;
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct ab8500_bm_data - ab8500 battery management data
- * @temp_under under this temp, charging is stopped
- * @temp_low between this temp and temp_under charging is reduced
- * @temp_high between this temp and temp_over charging is reduced
- * @temp_over over this temp, charging is stopped
- * @temp_interval_chg temperature measurement interval in s when charging
- * @temp_interval_nochg temperature measurement interval in s when not charging
- * @main_safety_tmr_h safety timer for main charger
- * @usb_safety_tmr_h safety timer for usb charger
- * @bkup_bat_v voltage which we charge the backup battery with
- * @bkup_bat_i current which we charge the backup battery with
- * @no_maintenance indicates that maintenance charging is disabled
- * @capacity_scaling indicates whether capacity scaling is to be used
- * @adc_therm placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat flag to enable charging of unknown batteries
- * @enable_overshoot flag to enable VBAT overshoot control
- * @fg_res resistance of FG resistor in 0.1mOhm
- * @n_btypes number of elements in array bat_type
- * @batt_id index of the identified battery in array bat_type
- * @interval_charging charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis temperature hysteresis
- * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
- * @maxi: maximization parameters
- * @cap_levels capacity in percent for the different capacity levels
- * @bat_type table of supported battery types
- * @chg_params charger parameters
- * @fg_params fuel gauge parameters
- */
-struct ab8500_bm_data {
- int temp_under;
- int temp_low;
- int temp_high;
- int temp_over;
- int temp_interval_chg;
- int temp_interval_nochg;
- int main_safety_tmr_h;
- int usb_safety_tmr_h;
- int bkup_bat_v;
- int bkup_bat_i;
- bool no_maintenance;
- bool capacity_scaling;
- bool chg_unknown_bat;
- bool enable_overshoot;
- enum abx500_adc_therm adc_therm;
- int fg_res;
- int n_btypes;
- int batt_id;
- int interval_charging;
- int interval_not_charging;
- int temp_hysteresis;
- int gnd_lift_resistance;
- const struct ab8500_maxim_parameters *maxi;
- const struct ab8500_bm_capacity_levels *cap_levels;
- const struct ab8500_bm_charger_parameters *chg_params;
- const struct ab8500_fg_parameters *fg_params;
-};
-
-struct ab8500_btemp;
-struct ab8500_gpadc;
-struct ab8500_fg;
-
-#ifdef CONFIG_AB8500_BM
-extern struct abx500_bm_data ab8500_bm_data;
-
-void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
-struct ab8500_btemp *ab8500_btemp_get(void);
-int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
-int ab8500_btemp_get_temp(struct ab8500_btemp *btemp);
-struct ab8500_fg *ab8500_fg_get(void);
-int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
-int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
-int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
-int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
-int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
-
-#else
-static struct abx500_bm_data ab8500_bm_data;
-#endif
-#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 524a7e4702c2..76d326ea8eba 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -368,7 +368,6 @@ struct ab8500 {
int it_latchhier_num;
};
-struct ab8500_regulator_platform_data;
struct ab8500_codec_platform_data;
struct ab8500_sysctrl_platform_data;
@@ -376,19 +375,13 @@ struct ab8500_sysctrl_platform_data;
* struct ab8500_platform_data - AB8500 platform data
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
* @init: board-specific initialization after detection of ab8500
- * @regulator: machine-specific constraints for regulators
*/
struct ab8500_platform_data {
void (*init) (struct ab8500 *);
- struct ab8500_regulator_platform_data *regulator;
struct ab8500_codec_platform_data *codec;
struct ab8500_sysctrl_platform_data *sysctrl;
};
-extern int ab8500_init(struct ab8500 *ab8500,
- enum ab8500_version version);
-extern int ab8500_exit(struct ab8500 *ab8500);
-
extern int ab8500_suspend(struct ab8500 *ab8500);
static inline int is_ab8500(struct ab8500 *ab)
@@ -506,13 +499,7 @@ static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab)
void ab8500_override_turn_on_stat(u8 mask, u8 set);
-#ifdef CONFIG_AB8500_DEBUG
-extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
-void ab8500_dump_all_banks(struct device *dev);
-void ab8500_debug_register_interrupt(int line);
-#else
static inline void ab8500_dump_all_banks(struct device *dev) {}
static inline void ab8500_debug_register_interrupt(int line) {}
-#endif
#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
deleted file mode 100644
index 9b97d284d0ce..000000000000
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2012
- * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
- */
-
-#ifndef _UX500_CHARGALG_H
-#define _UX500_CHARGALG_H
-
-#include <linux/power_supply.h>
-
-/*
- * Valid only for supplies of type:
- * - POWER_SUPPLY_TYPE_MAINS,
- * - POWER_SUPPLY_TYPE_USB,
- * because only them store as drv_data pointer to struct ux500_charger.
- */
-#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy)
-
-/* Forward declaration */
-struct ux500_charger;
-
-struct ux500_charger_ops {
- int (*enable) (struct ux500_charger *, int, int, int);
- int (*check_enable) (struct ux500_charger *, int, int);
- int (*kick_wd) (struct ux500_charger *);
- int (*update_curr) (struct ux500_charger *, int);
-};
-
-/**
- * struct ux500_charger - power supply ux500 charger sub class
- * @psy power supply base class
- * @ops ux500 charger operations
- * @max_out_volt maximum output charger voltage in mV
- * @max_out_curr maximum output charger current in mA
- * @enabled indicates if this charger is used or not
- * @external external charger unit (pm2xxx)
- */
-struct ux500_charger {
- struct power_supply *psy;
- struct ux500_charger_ops ops;
- int max_out_volt;
- int max_out_curr;
- int wdt_refresh;
- bool enabled;
- bool external;
-};
-
-extern struct blocking_notifier_head charger_notifier_list;
-
-#endif
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
deleted file mode 100644
index 61e686dbaa74..000000000000
--- a/include/linux/mfd/asic3.h
+++ /dev/null
@@ -1,313 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * include/linux/mfd/asic3.h
- *
- * Compaq ASIC3 headers.
- *
- * Copyright 2001 Compaq Computer Corporation.
- * Copyright 2007-2008 OpenedHand Ltd.
- */
-
-#ifndef __ASIC3_H__
-#define __ASIC3_H__
-
-#include <linux/types.h>
-
-struct led_classdev;
-struct asic3_led {
- const char *name;
- const char *default_trigger;
- struct led_classdev *cdev;
-};
-
-struct asic3_platform_data {
- u16 *gpio_config;
- unsigned int gpio_config_num;
-
- unsigned int irq_base;
-
- unsigned int gpio_base;
-
- unsigned int clock_rate;
-
- struct asic3_led *leds;
-};
-
-#define ASIC3_NUM_GPIO_BANKS 4
-#define ASIC3_GPIOS_PER_BANK 16
-#define ASIC3_NUM_GPIOS 64
-#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6
-
-#define ASIC3_IRQ_LED0 64
-#define ASIC3_IRQ_LED1 65
-#define ASIC3_IRQ_LED2 66
-#define ASIC3_IRQ_SPI 67
-#define ASIC3_IRQ_SMBUS 68
-#define ASIC3_IRQ_OWM 69
-
-#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio))
-
-#define ASIC3_GPIO_BANK_A 0
-#define ASIC3_GPIO_BANK_B 1
-#define ASIC3_GPIO_BANK_C 2
-#define ASIC3_GPIO_BANK_D 3
-
-#define ASIC3_GPIO(bank, gpio) \
- ((ASIC3_GPIOS_PER_BANK * ASIC3_GPIO_BANK_##bank) + (gpio))
-#define ASIC3_GPIO_bit(gpio) (1 << (gpio & 0xf))
-/* All offsets below are specified with this address bus shift */
-#define ASIC3_DEFAULT_ADDR_SHIFT 2
-
-#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg)
-#define ASIC3_GPIO_OFFSET(base, reg) \
- (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg)
-
-#define ASIC3_GPIO_A_BASE 0x0000
-#define ASIC3_GPIO_B_BASE 0x0100
-#define ASIC3_GPIO_C_BASE 0x0200
-#define ASIC3_GPIO_D_BASE 0x0300
-
-#define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4)
-#define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \
- (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4)))
-#define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio))
-#define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100))
-#define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100))
-
-#define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */
-#define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */
-#define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */
-#define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */
-#define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */
-#define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */
-#define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */
-#define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */
-#define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */
-#define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */
-#define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */
-#define ASIC3_GPIO_SLEEP_CONF 0x2c /*
- * R/W bit 1: autosleep
- * 0: disable gposlpout in normal mode,
- * enable gposlpout in sleep mode.
- */
-#define ASIC3_GPIO_STATUS 0x30 /* R Pin status */
-
-/*
- * ASIC3 GPIO config
- *
- * Bits 0..6 gpio number
- * Bits 7..13 Alternate function
- * Bit 14 Direction
- * Bit 15 Initial value
- *
- */
-#define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f)
-#define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7)
-#define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14)
-#define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15)
-#define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \
- | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \
- | (((init) & 0x1) << 15))
-#define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \
- ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init))
-#define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \
- ASIC3_CONFIG_GPIO((gpio), 0, 1, (init))
-
-/*
- * Alternate functions
- */
-#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0)
-#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0)
-#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0)
-#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0)
-#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0)
-#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0)
-#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0)
-#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0)
-#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0)
-#define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0)
-#define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0)
-#define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0)
-#define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0)
-#define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0)
-#define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0)
-#define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0)
-#define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0)
-#define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0)
-#define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0)
-#define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0)
-#define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0)
-#define ASIC3_GPIOD4_CF_nCD ASIC3_CONFIG_GPIO(52, 1, 0, 0)
-#define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0)
-#define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0)
-#define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0)
-
-
-#define ASIC3_SPI_Base 0x0400
-#define ASIC3_SPI_Control 0x0000
-#define ASIC3_SPI_TxData 0x0004
-#define ASIC3_SPI_RxData 0x0008
-#define ASIC3_SPI_Int 0x000c
-#define ASIC3_SPI_Status 0x0010
-
-#define SPI_CONTROL_SPR(clk) ((clk) & 0x0f) /* Clock rate */
-
-#define ASIC3_PWM_0_Base 0x0500
-#define ASIC3_PWM_1_Base 0x0600
-#define ASIC3_PWM_TimeBase 0x0000
-#define ASIC3_PWM_PeriodTime 0x0004
-#define ASIC3_PWM_DutyTime 0x0008
-
-#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */
-#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */
-
-#define ASIC3_NUM_LEDS 3
-#define ASIC3_LED_0_Base 0x0700
-#define ASIC3_LED_1_Base 0x0800
-#define ASIC3_LED_2_Base 0x0900
-#define ASIC3_LED_TimeBase 0x0000 /* R/W 7 bits */
-#define ASIC3_LED_PeriodTime 0x0004 /* R/W 12 bits */
-#define ASIC3_LED_DutyTime 0x0008 /* R/W 12 bits */
-#define ASIC3_LED_AutoStopCount 0x000c /* R/W 16 bits */
-
-/* LED TimeBase bits - match ASIC2 */
-#define LED_TBS 0x0f /* Low 4 bits sets time base, max = 13 */
- /* Note: max = 5 on hx4700 */
- /* 0: maximum time base */
- /* 1: maximum time base / 2 */
- /* n: maximum time base / 2^n */
-
-#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */
-#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */
-#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
-
-#define ASIC3_CLOCK_BASE 0x0A00
-#define ASIC3_CLOCK_CDEX 0x00
-#define ASIC3_CLOCK_SEL 0x04
-
-#define CLOCK_CDEX_SOURCE (1 << 0) /* 2 bits */
-#define CLOCK_CDEX_SOURCE0 (1 << 0)
-#define CLOCK_CDEX_SOURCE1 (1 << 1)
-#define CLOCK_CDEX_SPI (1 << 2)
-#define CLOCK_CDEX_OWM (1 << 3)
-#define CLOCK_CDEX_PWM0 (1 << 4)
-#define CLOCK_CDEX_PWM1 (1 << 5)
-#define CLOCK_CDEX_LED0 (1 << 6)
-#define CLOCK_CDEX_LED1 (1 << 7)
-#define CLOCK_CDEX_LED2 (1 << 8)
-
-/* Clocks settings: 1 for 24.576 MHz, 0 for 12.288Mhz */
-#define CLOCK_CDEX_SD_HOST (1 << 9) /* R/W: SD host clock source */
-#define CLOCK_CDEX_SD_BUS (1 << 10) /* R/W: SD bus clock source ctrl */
-#define CLOCK_CDEX_SMBUS (1 << 11)
-#define CLOCK_CDEX_CONTROL_CX (1 << 12)
-
-#define CLOCK_CDEX_EX0 (1 << 13) /* R/W: 32.768 kHz crystal */
-#define CLOCK_CDEX_EX1 (1 << 14) /* R/W: 24.576 MHz crystal */
-
-#define CLOCK_SEL_SD_HCLK_SEL (1 << 0) /* R/W: SDIO host clock select */
-#define CLOCK_SEL_SD_BCLK_SEL (1 << 1) /* R/W: SDIO bus clock select */
-
-/* R/W: INT clock source control (32.768 kHz) */
-#define CLOCK_SEL_CX (1 << 2)
-
-
-#define ASIC3_INTR_BASE 0x0B00
-
-#define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */
-#define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */
-#define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */
-#define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */
-
-#define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */
-#define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */
-#define ASIC3_INTMASK_MASK0 (1 << 2)
-#define ASIC3_INTMASK_MASK1 (1 << 3)
-#define ASIC3_INTMASK_MASK2 (1 << 4)
-#define ASIC3_INTMASK_MASK3 (1 << 5)
-#define ASIC3_INTMASK_MASK4 (1 << 6)
-#define ASIC3_INTMASK_MASK5 (1 << 7)
-
-#define ASIC3_INTR_PERIPHERAL_A (1 << 0)
-#define ASIC3_INTR_PERIPHERAL_B (1 << 1)
-#define ASIC3_INTR_PERIPHERAL_C (1 << 2)
-#define ASIC3_INTR_PERIPHERAL_D (1 << 3)
-#define ASIC3_INTR_LED0 (1 << 4)
-#define ASIC3_INTR_LED1 (1 << 5)
-#define ASIC3_INTR_LED2 (1 << 6)
-#define ASIC3_INTR_SPI (1 << 7)
-#define ASIC3_INTR_SMBUS (1 << 8)
-#define ASIC3_INTR_OWM (1 << 9)
-
-#define ASIC3_INTR_CPS(x) ((x)&0x0f) /* 4 bits, max 14 */
-#define ASIC3_INTR_CPS_SET (1 << 4) /* Time base enable */
-
-
-/* Basic control of the SD ASIC */
-#define ASIC3_SDHWCTRL_BASE 0x0E00
-#define ASIC3_SDHWCTRL_SDCONF 0x00
-
-#define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */
-#define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */
-#define ASIC3_SDHWCTRL_PCLR (1 << 2) /* All registers of SDIO cleared */
-#define ASIC3_SDHWCTRL_LEVCD (1 << 3) /* SD card detection: 0:low */
-
-/* SD card write protection: 0=high */
-#define ASIC3_SDHWCTRL_LEVWP (1 << 4)
-#define ASIC3_SDHWCTRL_SDLED (1 << 5) /* SD card LED signal 0=disable */
-
-/* SD card power supply ctrl 1=enable */
-#define ASIC3_SDHWCTRL_SDPWR (1 << 6)
-
-#define ASIC3_EXTCF_BASE 0x1100
-
-#define ASIC3_EXTCF_SELECT 0x00
-#define ASIC3_EXTCF_RESET 0x04
-
-#define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */
-#define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */
-#define ASIC3_EXTCF_SMOD2 (1 << 2) /* slot number of mode 2 */
-#define ASIC3_EXTCF_OWM_EN (1 << 4) /* enable onewire module */
-#define ASIC3_EXTCF_OWM_SMB (1 << 5) /* OWM bus selection */
-#define ASIC3_EXTCF_OWM_RESET (1 << 6) /* ?? used by OWM and CF */
-#define ASIC3_EXTCF_CF0_SLEEP_MODE (1 << 7) /* CF0 sleep state */
-#define ASIC3_EXTCF_CF1_SLEEP_MODE (1 << 8) /* CF1 sleep state */
-#define ASIC3_EXTCF_CF0_PWAIT_EN (1 << 10) /* CF0 PWAIT_n control */
-#define ASIC3_EXTCF_CF1_PWAIT_EN (1 << 11) /* CF1 PWAIT_n control */
-#define ASIC3_EXTCF_CF0_BUF_EN (1 << 12) /* CF0 buffer control */
-#define ASIC3_EXTCF_CF1_BUF_EN (1 << 13) /* CF1 buffer control */
-#define ASIC3_EXTCF_SD_MEM_ENABLE (1 << 14)
-#define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */
-
-/*********************************************
- * The Onewire interface (DS1WM) is handled
- * by the ds1wm driver.
- *
- *********************************************/
-
-#define ASIC3_OWM_BASE 0xC00
-
-/*****************************************************************************
- * The SD configuration registers are at a completely different location
- * in memory. They are divided into three sets of registers:
- *
- * SD_CONFIG Core configuration register
- * SD_CTRL Control registers for SD operations
- * SDIO_CTRL Control registers for SDIO operations
- *
- *****************************************************************************/
-#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */
-#define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */
-#define ASIC3_SD_CTRL_BASE 0x1000
-#define ASIC3_SDIO_CTRL_BASE 0x1200
-
-#define ASIC3_MAP_SIZE_32BIT 0x2000
-#define ASIC3_MAP_SIZE_16BIT 0x1000
-
-/* Functions needed by leds-asic3 */
-
-struct asic3;
-extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val);
-extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg);
-
-#endif /* __ASIC3_H__ */
diff --git a/include/linux/mfd/atc260x/atc2603c.h b/include/linux/mfd/atc260x/atc2603c.h
new file mode 100644
index 000000000000..07ac640ef3e1
--- /dev/null
+++ b/include/linux/mfd/atc260x/atc2603c.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ATC2603C PMIC register definitions
+ *
+ * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_ATC2603C_H
+#define __LINUX_MFD_ATC260X_ATC2603C_H
+
+enum atc2603c_irq_def {
+ ATC2603C_IRQ_AUDIO = 0,
+ ATC2603C_IRQ_OV,
+ ATC2603C_IRQ_OC,
+ ATC2603C_IRQ_OT,
+ ATC2603C_IRQ_UV,
+ ATC2603C_IRQ_ALARM,
+ ATC2603C_IRQ_ONOFF,
+ ATC2603C_IRQ_SGPIO,
+ ATC2603C_IRQ_IR,
+ ATC2603C_IRQ_REMCON,
+ ATC2603C_IRQ_POWER_IN,
+};
+
+/* PMU Registers */
+#define ATC2603C_PMU_SYS_CTL0 0x00
+#define ATC2603C_PMU_SYS_CTL1 0x01
+#define ATC2603C_PMU_SYS_CTL2 0x02
+#define ATC2603C_PMU_SYS_CTL3 0x03
+#define ATC2603C_PMU_SYS_CTL4 0x04
+#define ATC2603C_PMU_SYS_CTL5 0x05
+#define ATC2603C_PMU_SYS_CTL6 0x06
+#define ATC2603C_PMU_SYS_CTL7 0x07
+#define ATC2603C_PMU_SYS_CTL8 0x08
+#define ATC2603C_PMU_SYS_CTL9 0x09
+#define ATC2603C_PMU_BAT_CTL0 0x0A
+#define ATC2603C_PMU_BAT_CTL1 0x0B
+#define ATC2603C_PMU_VBUS_CTL0 0x0C
+#define ATC2603C_PMU_VBUS_CTL1 0x0D
+#define ATC2603C_PMU_WALL_CTL0 0x0E
+#define ATC2603C_PMU_WALL_CTL1 0x0F
+#define ATC2603C_PMU_SYS_PENDING 0x10
+#define ATC2603C_PMU_DC1_CTL0 0x11
+#define ATC2603C_PMU_DC1_CTL1 0x12 // Undocumented
+#define ATC2603C_PMU_DC1_CTL2 0x13 // Undocumented
+#define ATC2603C_PMU_DC2_CTL0 0x14
+#define ATC2603C_PMU_DC2_CTL1 0x15 // Undocumented
+#define ATC2603C_PMU_DC2_CTL2 0x16 // Undocumented
+#define ATC2603C_PMU_DC3_CTL0 0x17
+#define ATC2603C_PMU_DC3_CTL1 0x18 // Undocumented
+#define ATC2603C_PMU_DC3_CTL2 0x19 // Undocumented
+#define ATC2603C_PMU_DC4_CTL0 0x1A // Undocumented
+#define ATC2603C_PMU_DC4_CTL1 0x1B // Undocumented
+#define ATC2603C_PMU_DC5_CTL0 0x1C // Undocumented
+#define ATC2603C_PMU_DC5_CTL1 0x1D // Undocumented
+#define ATC2603C_PMU_LDO1_CTL 0x1E
+#define ATC2603C_PMU_LDO2_CTL 0x1F
+#define ATC2603C_PMU_LDO3_CTL 0x20
+#define ATC2603C_PMU_LDO4_CTL 0x21 // Undocumented
+#define ATC2603C_PMU_LDO5_CTL 0x22
+#define ATC2603C_PMU_LDO6_CTL 0x23
+#define ATC2603C_PMU_LDO7_CTL 0x24
+#define ATC2603C_PMU_LDO8_CTL 0x25 // Undocumented
+#define ATC2603C_PMU_LDO9_CTL 0x26 // Undocumented
+#define ATC2603C_PMU_LDO10_CTL 0x27 // Undocumented
+#define ATC2603C_PMU_LDO11_CTL 0x28
+#define ATC2603C_PMU_SWITCH_CTL 0x29
+#define ATC2603C_PMU_OV_CTL0 0x2A
+#define ATC2603C_PMU_OV_CTL1 0x2B
+#define ATC2603C_PMU_OV_STATUS 0x2C
+#define ATC2603C_PMU_OV_EN 0x2D
+#define ATC2603C_PMU_OV_INT_EN 0x2E
+#define ATC2603C_PMU_OC_CTL 0x2F
+#define ATC2603C_PMU_OC_STATUS 0x30
+#define ATC2603C_PMU_OC_EN 0x31
+#define ATC2603C_PMU_OC_INT_EN 0x32
+#define ATC2603C_PMU_UV_CTL0 0x33
+#define ATC2603C_PMU_UV_CTL1 0x34
+#define ATC2603C_PMU_UV_STATUS 0x35
+#define ATC2603C_PMU_UV_EN 0x36
+#define ATC2603C_PMU_UV_INT_EN 0x37
+#define ATC2603C_PMU_OT_CTL 0x38
+#define ATC2603C_PMU_CHARGER_CTL0 0x39
+#define ATC2603C_PMU_CHARGER_CTL1 0x3A
+#define ATC2603C_PMU_CHARGER_CTL2 0x3B
+#define ATC2603C_PMU_BAKCHARGER_CTL 0x3C // Undocumented
+#define ATC2603C_PMU_APDS_CTL 0x3D
+#define ATC2603C_PMU_AUXADC_CTL0 0x3E
+#define ATC2603C_PMU_AUXADC_CTL1 0x3F
+#define ATC2603C_PMU_BATVADC 0x40
+#define ATC2603C_PMU_BATIADC 0x41
+#define ATC2603C_PMU_WALLVADC 0x42
+#define ATC2603C_PMU_WALLIADC 0x43
+#define ATC2603C_PMU_VBUSVADC 0x44
+#define ATC2603C_PMU_VBUSIADC 0x45
+#define ATC2603C_PMU_SYSPWRADC 0x46
+#define ATC2603C_PMU_REMCONADC 0x47
+#define ATC2603C_PMU_SVCCADC 0x48
+#define ATC2603C_PMU_CHGIADC 0x49
+#define ATC2603C_PMU_IREFADC 0x4A
+#define ATC2603C_PMU_BAKBATADC 0x4B
+#define ATC2603C_PMU_ICTEMPADC 0x4C
+#define ATC2603C_PMU_AUXADC0 0x4D
+#define ATC2603C_PMU_AUXADC1 0x4E
+#define ATC2603C_PMU_AUXADC2 0x4F
+#define ATC2603C_PMU_ICMADC 0x50
+#define ATC2603C_PMU_BDG_CTL 0x51 // Undocumented
+#define ATC2603C_RTC_CTL 0x52
+#define ATC2603C_RTC_MSALM 0x53
+#define ATC2603C_RTC_HALM 0x54
+#define ATC2603C_RTC_YMDALM 0x55
+#define ATC2603C_RTC_MS 0x56
+#define ATC2603C_RTC_H 0x57
+#define ATC2603C_RTC_DC 0x58
+#define ATC2603C_RTC_YMD 0x59
+#define ATC2603C_EFUSE_DAT 0x5A // Undocumented
+#define ATC2603C_EFUSECRTL1 0x5B // Undocumented
+#define ATC2603C_EFUSECRTL2 0x5C // Undocumented
+#define ATC2603C_PMU_FW_USE0 0x5D // Undocumented
+#define ATC2603C_PMU_FW_USE1 0x5E // Undocumented
+#define ATC2603C_PMU_FW_USE2 0x5F // Undocumented
+#define ATC2603C_PMU_FW_USE3 0x60 // Undocumented
+#define ATC2603C_PMU_FW_USE4 0x61 // Undocumented
+#define ATC2603C_PMU_ABNORMAL_STATUS 0x62
+#define ATC2603C_PMU_WALL_APDS_CTL 0x63
+#define ATC2603C_PMU_REMCON_CTL0 0x64
+#define ATC2603C_PMU_REMCON_CTL1 0x65
+#define ATC2603C_PMU_MUX_CTL0 0x66
+#define ATC2603C_PMU_SGPIO_CTL0 0x67
+#define ATC2603C_PMU_SGPIO_CTL1 0x68
+#define ATC2603C_PMU_SGPIO_CTL2 0x69
+#define ATC2603C_PMU_SGPIO_CTL3 0x6A
+#define ATC2603C_PMU_SGPIO_CTL4 0x6B
+#define ATC2603C_PWMCLK_CTL 0x6C
+#define ATC2603C_PWM0_CTL 0x6D
+#define ATC2603C_PWM1_CTL 0x6E
+#define ATC2603C_PMU_ADC_DBG0 0x70
+#define ATC2603C_PMU_ADC_DBG1 0x71
+#define ATC2603C_PMU_ADC_DBG2 0x72
+#define ATC2603C_PMU_ADC_DBG3 0x73
+#define ATC2603C_PMU_ADC_DBG4 0x74
+#define ATC2603C_IRC_CTL 0x80
+#define ATC2603C_IRC_STAT 0x81
+#define ATC2603C_IRC_CC 0x82
+#define ATC2603C_IRC_KDC 0x83
+#define ATC2603C_IRC_WK 0x84
+#define ATC2603C_IRC_RCC 0x85
+#define ATC2603C_IRC_FILTER 0x86
+
+/* AUDIO_OUT Registers */
+#define ATC2603C_AUDIOINOUT_CTL 0xA0
+#define ATC2603C_AUDIO_DEBUGOUTCTL 0xA1
+#define ATC2603C_DAC_DIGITALCTL 0xA2
+#define ATC2603C_DAC_VOLUMECTL0 0xA3
+#define ATC2603C_DAC_ANALOG0 0xA4
+#define ATC2603C_DAC_ANALOG1 0xA5
+#define ATC2603C_DAC_ANALOG2 0xA6
+#define ATC2603C_DAC_ANALOG3 0xA7
+
+/* AUDIO_IN Registers */
+#define ATC2603C_ADC_DIGITALCTL 0xA8
+#define ATC2603C_ADC_HPFCTL 0xA9
+#define ATC2603C_ADC_CTL 0xAA
+#define ATC2603C_AGC_CTL0 0xAB
+#define ATC2603C_AGC_CTL1 0xAC // Undocumented
+#define ATC2603C_AGC_CTL2 0xAD
+#define ATC2603C_ADC_ANALOG0 0xAE
+#define ATC2603C_ADC_ANALOG1 0xAF
+
+/* PCM_IF Registers */
+#define ATC2603C_PCM0_CTL 0xB0 // Undocumented
+#define ATC2603C_PCM1_CTL 0xB1 // Undocumented
+#define ATC2603C_PCM2_CTL 0xB2 // Undocumented
+#define ATC2603C_PCMIF_CTL 0xB3 // Undocumented
+
+/* CMU_CONTROL Registers */
+#define ATC2603C_CMU_DEVRST 0xC1 // Undocumented
+
+/* INTS Registers */
+#define ATC2603C_INTS_PD 0xC8
+#define ATC2603C_INTS_MSK 0xC9
+
+/* MFP Registers */
+#define ATC2603C_MFP_CTL 0xD0
+#define ATC2603C_PAD_VSEL 0xD1 // Undocumented
+#define ATC2603C_GPIO_OUTEN 0xD2
+#define ATC2603C_GPIO_INEN 0xD3
+#define ATC2603C_GPIO_DAT 0xD4
+#define ATC2603C_PAD_DRV 0xD5
+#define ATC2603C_PAD_EN 0xD6
+#define ATC2603C_DEBUG_SEL 0xD7 // Undocumented
+#define ATC2603C_DEBUG_IE 0xD8 // Undocumented
+#define ATC2603C_DEBUG_OE 0xD9 // Undocumented
+#define ATC2603C_BIST_START 0x0A // Undocumented
+#define ATC2603C_BIST_RESULT 0x0B // Undocumented
+#define ATC2603C_CHIP_VER 0xDC
+
+/* TWSI Registers */
+#define ATC2603C_SADDR 0xFF
+
+/* PMU_SYS_CTL0 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL0_IR_WK_EN BIT(5)
+#define ATC2603C_PMU_SYS_CTL0_RESET_WK_EN BIT(6)
+#define ATC2603C_PMU_SYS_CTL0_HDSW_WK_EN BIT(7)
+#define ATC2603C_PMU_SYS_CTL0_ALARM_WK_EN BIT(8)
+#define ATC2603C_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL0_RESTART_EN BIT(10)
+#define ATC2603C_PMU_SYS_CTL0_SGPIOIRQ_WK_EN BIT(11)
+#define ATC2603C_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12)
+#define ATC2603C_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13)
+#define ATC2603C_PMU_SYS_CTL0_WALL_WK_EN BIT(14)
+#define ATC2603C_PMU_SYS_CTL0_USB_WK_EN BIT(15)
+#define ATC2603C_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10)))
+
+/* PMU_SYS_CTL1 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL1_EN_S1 BIT(0)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4_EN BIT(2)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4_3_1V BIT(4)
+#define ATC2603C_PMU_SYS_CTL1_IR_WK_FLAG BIT(5)
+#define ATC2603C_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6)
+#define ATC2603C_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7)
+#define ATC2603C_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8)
+#define ATC2603C_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_PRESS_RESET_IRQ_PD BIT(10)
+#define ATC2603C_PMU_SYS_CTL1_SGPIOIRQ_WK_FLAG BIT(11)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13)
+#define ATC2603C_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14)
+#define ATC2603C_PMU_SYS_CTL1_USB_WK_FLAG BIT(15)
+
+/* PMU_SYS_CTL2 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL2_PMU_A_EN BIT(0)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2)
+#define ATC2603C_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3)
+#define ATC2603C_PMU_SYS_CTL2_S2_TIMER_EN BIT(6)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_RESET_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_INT_EN BIT(12)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS BIT(15)
+
+/* PMU_SYS_CTL3 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7)
+#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10)
+#define ATC2603C_PMU_SYS_CTL3_S3_TIMER_EN BIT(13)
+#define ATC2603C_PMU_SYS_CTL3_EN_S3 BIT(14)
+#define ATC2603C_PMU_SYS_CTL3_EN_S2 BIT(15)
+
+/* PMU_SYS_CTL5 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL5_WALLWKDTEN BIT(7)
+#define ATC2603C_PMU_SYS_CTL5_VBUSWKDTEN BIT(8)
+#define ATC2603C_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10)
+
+/* INTS_MSK Register Mask Bits */
+#define ATC2603C_INTS_MSK_AUDIO BIT(0)
+#define ATC2603C_INTS_MSK_OV BIT(1)
+#define ATC2603C_INTS_MSK_OC BIT(2)
+#define ATC2603C_INTS_MSK_OT BIT(3)
+#define ATC2603C_INTS_MSK_UV BIT(4)
+#define ATC2603C_INTS_MSK_ALARM BIT(5)
+#define ATC2603C_INTS_MSK_ONOFF BIT(6)
+#define ATC2603C_INTS_MSK_SGPIO BIT(7)
+#define ATC2603C_INTS_MSK_IR BIT(8)
+#define ATC2603C_INTS_MSK_REMCON BIT(9)
+#define ATC2603C_INTS_MSK_POWERIN BIT(10)
+
+/* CMU_DEVRST Register Mask Bits */
+#define ATC2603C_CMU_DEVRST_MFP BIT(1)
+#define ATC2603C_CMU_DEVRST_INTS BIT(2)
+#define ATC2603C_CMU_DEVRST_AUDIO BIT(4)
+
+/* PAD_EN Register Mask Bits */
+#define ATC2603C_PAD_EN_EXTIRQ BIT(0)
+
+#endif /* __LINUX_MFD_ATC260X_ATC2603C_H */
diff --git a/include/linux/mfd/atc260x/atc2609a.h b/include/linux/mfd/atc260x/atc2609a.h
new file mode 100644
index 000000000000..b957d7bd73e9
--- /dev/null
+++ b/include/linux/mfd/atc260x/atc2609a.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ATC2609A PMIC register definitions
+ *
+ * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_ATC2609A_H
+#define __LINUX_MFD_ATC260X_ATC2609A_H
+
+enum atc2609a_irq_def {
+ ATC2609A_IRQ_AUDIO = 0,
+ ATC2609A_IRQ_OV,
+ ATC2609A_IRQ_OC,
+ ATC2609A_IRQ_OT,
+ ATC2609A_IRQ_UV,
+ ATC2609A_IRQ_ALARM,
+ ATC2609A_IRQ_ONOFF,
+ ATC2609A_IRQ_WKUP,
+ ATC2609A_IRQ_IR,
+ ATC2609A_IRQ_REMCON,
+ ATC2609A_IRQ_POWER_IN,
+};
+
+/* PMU Registers */
+#define ATC2609A_PMU_SYS_CTL0 0x00
+#define ATC2609A_PMU_SYS_CTL1 0x01
+#define ATC2609A_PMU_SYS_CTL2 0x02
+#define ATC2609A_PMU_SYS_CTL3 0x03
+#define ATC2609A_PMU_SYS_CTL4 0x04
+#define ATC2609A_PMU_SYS_CTL5 0x05
+#define ATC2609A_PMU_SYS_CTL6 0x06
+#define ATC2609A_PMU_SYS_CTL7 0x07
+#define ATC2609A_PMU_SYS_CTL8 0x08
+#define ATC2609A_PMU_SYS_CTL9 0x09
+#define ATC2609A_PMU_BAT_CTL0 0x0A
+#define ATC2609A_PMU_BAT_CTL1 0x0B
+#define ATC2609A_PMU_VBUS_CTL0 0x0C
+#define ATC2609A_PMU_VBUS_CTL1 0x0D
+#define ATC2609A_PMU_WALL_CTL0 0x0E
+#define ATC2609A_PMU_WALL_CTL1 0x0F
+#define ATC2609A_PMU_SYS_PENDING 0x10
+#define ATC2609A_PMU_APDS_CTL0 0x11
+#define ATC2609A_PMU_APDS_CTL1 0x12
+#define ATC2609A_PMU_APDS_CTL2 0x13
+#define ATC2609A_PMU_CHARGER_CTL 0x14
+#define ATC2609A_PMU_BAKCHARGER_CTL 0x15
+#define ATC2609A_PMU_SWCHG_CTL0 0x16
+#define ATC2609A_PMU_SWCHG_CTL1 0x17
+#define ATC2609A_PMU_SWCHG_CTL2 0x18
+#define ATC2609A_PMU_SWCHG_CTL3 0x19
+#define ATC2609A_PMU_SWCHG_CTL4 0x1A
+#define ATC2609A_PMU_DC_OSC 0x1B
+#define ATC2609A_PMU_DC0_CTL0 0x1C
+#define ATC2609A_PMU_DC0_CTL1 0x1D
+#define ATC2609A_PMU_DC0_CTL2 0x1E
+#define ATC2609A_PMU_DC0_CTL3 0x1F
+#define ATC2609A_PMU_DC0_CTL4 0x20
+#define ATC2609A_PMU_DC0_CTL5 0x21
+#define ATC2609A_PMU_DC0_CTL6 0x22
+#define ATC2609A_PMU_DC1_CTL0 0x23
+#define ATC2609A_PMU_DC1_CTL1 0x24
+#define ATC2609A_PMU_DC1_CTL2 0x25
+#define ATC2609A_PMU_DC1_CTL3 0x26
+#define ATC2609A_PMU_DC1_CTL4 0x27
+#define ATC2609A_PMU_DC1_CTL5 0x28
+#define ATC2609A_PMU_DC1_CTL6 0x29
+#define ATC2609A_PMU_DC2_CTL0 0x2A
+#define ATC2609A_PMU_DC2_CTL1 0x2B
+#define ATC2609A_PMU_DC2_CTL2 0x2C
+#define ATC2609A_PMU_DC2_CTL3 0x2D
+#define ATC2609A_PMU_DC2_CTL4 0x2E
+#define ATC2609A_PMU_DC2_CTL5 0x2F
+#define ATC2609A_PMU_DC2_CTL6 0x30
+#define ATC2609A_PMU_DC3_CTL0 0x31
+#define ATC2609A_PMU_DC3_CTL1 0x32
+#define ATC2609A_PMU_DC3_CTL2 0x33
+#define ATC2609A_PMU_DC3_CTL3 0x34
+#define ATC2609A_PMU_DC3_CTL4 0x35
+#define ATC2609A_PMU_DC3_CTL5 0x36
+#define ATC2609A_PMU_DC3_CTL6 0x37
+#define ATC2609A_PMU_DC_ZR 0x38
+#define ATC2609A_PMU_LDO0_CTL0 0x39
+#define ATC2609A_PMU_LDO0_CTL1 0x3A
+#define ATC2609A_PMU_LDO1_CTL0 0x3B
+#define ATC2609A_PMU_LDO1_CTL1 0x3C
+#define ATC2609A_PMU_LDO2_CTL0 0x3D
+#define ATC2609A_PMU_LDO2_CTL1 0x3E
+#define ATC2609A_PMU_LDO3_CTL0 0x3F
+#define ATC2609A_PMU_LDO3_CTL1 0x40
+#define ATC2609A_PMU_LDO4_CTL0 0x41
+#define ATC2609A_PMU_LDO4_CTL1 0x42
+#define ATC2609A_PMU_LDO5_CTL0 0x43
+#define ATC2609A_PMU_LDO5_CTL1 0x44
+#define ATC2609A_PMU_LDO6_CTL0 0x45
+#define ATC2609A_PMU_LDO6_CTL1 0x46
+#define ATC2609A_PMU_LDO7_CTL0 0x47
+#define ATC2609A_PMU_LDO7_CTL1 0x48
+#define ATC2609A_PMU_LDO8_CTL0 0x49
+#define ATC2609A_PMU_LDO8_CTL1 0x4A
+#define ATC2609A_PMU_LDO9_CTL 0x4B
+#define ATC2609A_PMU_OV_INT_EN 0x4C
+#define ATC2609A_PMU_OV_STATUS 0x4D
+#define ATC2609A_PMU_UV_INT_EN 0x4E
+#define ATC2609A_PMU_UV_STATUS 0x4F
+#define ATC2609A_PMU_OC_INT_EN 0x50
+#define ATC2609A_PMU_OC_STATUS 0x51
+#define ATC2609A_PMU_OT_CTL 0x52
+#define ATC2609A_PMU_CM_CTL0 0x53
+#define ATC2609A_PMU_FW_USE0 0x54
+#define ATC2609A_PMU_FW_USE1 0x55
+#define ATC2609A_PMU_ADC12B_I 0x56
+#define ATC2609A_PMU_ADC12B_V 0x57
+#define ATC2609A_PMU_ADC12B_DUMMY 0x58
+#define ATC2609A_PMU_AUXADC_CTL0 0x59
+#define ATC2609A_PMU_AUXADC_CTL1 0x5A
+#define ATC2609A_PMU_BATVADC 0x5B
+#define ATC2609A_PMU_BATIADC 0x5C
+#define ATC2609A_PMU_WALLVADC 0x5D
+#define ATC2609A_PMU_WALLIADC 0x5E
+#define ATC2609A_PMU_VBUSVADC 0x5F
+#define ATC2609A_PMU_VBUSIADC 0x60
+#define ATC2609A_PMU_SYSPWRADC 0x61
+#define ATC2609A_PMU_REMCONADC 0x62
+#define ATC2609A_PMU_SVCCADC 0x63
+#define ATC2609A_PMU_CHGIADC 0x64
+#define ATC2609A_PMU_IREFADC 0x65
+#define ATC2609A_PMU_BAKBATADC 0x66
+#define ATC2609A_PMU_ICTEMPADC 0x67
+#define ATC2609A_PMU_AUXADC0 0x68
+#define ATC2609A_PMU_AUXADC1 0x69
+#define ATC2609A_PMU_AUXADC2 0x6A
+#define ATC2609A_PMU_AUXADC3 0x6B
+#define ATC2609A_PMU_ICTEMPADC_ADJ 0x6C
+#define ATC2609A_PMU_BDG_CTL 0x6D
+#define ATC2609A_RTC_CTL 0x6E
+#define ATC2609A_RTC_MSALM 0x6F
+#define ATC2609A_RTC_HALM 0x70
+#define ATC2609A_RTC_YMDALM 0x71
+#define ATC2609A_RTC_MS 0x72
+#define ATC2609A_RTC_H 0x73
+#define ATC2609A_RTC_DC 0x74
+#define ATC2609A_RTC_YMD 0x75
+#define ATC2609A_EFUSE_DAT 0x76
+#define ATC2609A_EFUSECRTL1 0x77
+#define ATC2609A_EFUSECRTL2 0x78
+#define ATC2609A_PMU_DC4_CTL0 0x79
+#define ATC2609A_PMU_DC4_CTL1 0x7A
+#define ATC2609A_PMU_DC4_CTL2 0x7B
+#define ATC2609A_PMU_DC4_CTL3 0x7C
+#define ATC2609A_PMU_DC4_CTL4 0x7D
+#define ATC2609A_PMU_DC4_CTL5 0x7E
+#define ATC2609A_PMU_DC4_CTL6 0x7F
+#define ATC2609A_PMU_PWR_STATUS 0x80
+#define ATC2609A_PMU_S2_PWR 0x81
+#define ATC2609A_CLMT_CTL0 0x82
+#define ATC2609A_CLMT_DATA0 0x83
+#define ATC2609A_CLMT_DATA1 0x84
+#define ATC2609A_CLMT_DATA2 0x85
+#define ATC2609A_CLMT_DATA3 0x86
+#define ATC2609A_CLMT_ADD0 0x87
+#define ATC2609A_CLMT_ADD1 0x88
+#define ATC2609A_CLMT_OCV_TABLE 0x89
+#define ATC2609A_CLMT_R_TABLE 0x8A
+#define ATC2609A_PMU_PWRON_CTL0 0x8D
+#define ATC2609A_PMU_PWRON_CTL1 0x8E
+#define ATC2609A_PMU_PWRON_CTL2 0x8F
+#define ATC2609A_IRC_CTL 0x90
+#define ATC2609A_IRC_STAT 0x91
+#define ATC2609A_IRC_CC 0x92
+#define ATC2609A_IRC_KDC 0x93
+#define ATC2609A_IRC_WK 0x94
+#define ATC2609A_IRC_RCC 0x95
+
+/* AUDIO_OUT Registers */
+#define ATC2609A_AUDIOINOUT_CTL 0xA0
+#define ATC2609A_AUDIO_DEBUGOUTCTL 0xA1
+#define ATC2609A_DAC_DIGITALCTL 0xA2
+#define ATC2609A_DAC_VOLUMECTL0 0xA3
+#define ATC2609A_DAC_ANALOG0 0xA4
+#define ATC2609A_DAC_ANALOG1 0xA5
+#define ATC2609A_DAC_ANALOG2 0xA6
+#define ATC2609A_DAC_ANALOG3 0xA7
+
+/* AUDIO_IN Registers */
+#define ATC2609A_ADC_DIGITALCTL 0xA8
+#define ATC2609A_ADC_HPFCTL 0xA9
+#define ATC2609A_ADC_CTL 0xAA
+#define ATC2609A_AGC_CTL0 0xAB
+#define ATC2609A_AGC_CTL1 0xAC
+#define ATC2609A_AGC_CTL2 0xAD
+#define ATC2609A_ADC_ANALOG0 0xAE
+#define ATC2609A_ADC_ANALOG1 0xAF
+
+/* PCM_IF Registers */
+#define ATC2609A_PCM0_CTL 0xB0
+#define ATC2609A_PCM1_CTL 0xB1
+#define ATC2609A_PCM2_CTL 0xB2
+#define ATC2609A_PCMIF_CTL 0xB3
+
+/* CMU_CONTROL Registers */
+#define ATC2609A_CMU_DEVRST 0xC1
+
+/* INTS Registers */
+#define ATC2609A_INTS_PD 0xC8
+#define ATC2609A_INTS_MSK 0xC9
+
+/* MFP Registers */
+#define ATC2609A_MFP_CTL 0xD0
+#define ATC2609A_PAD_VSEL 0xD1
+#define ATC2609A_GPIO_OUTEN 0xD2
+#define ATC2609A_GPIO_INEN 0xD3
+#define ATC2609A_GPIO_DAT 0xD4
+#define ATC2609A_PAD_DRV 0xD5
+#define ATC2609A_PAD_EN 0xD6
+#define ATC2609A_DEBUG_SEL 0xD7
+#define ATC2609A_DEBUG_IE 0xD8
+#define ATC2609A_DEBUG_OE 0xD9
+#define ATC2609A_CHIP_VER 0xDC
+
+/* PWSI Registers */
+#define ATC2609A_PWSI_CTL 0xF0
+#define ATC2609A_PWSI_STATUS 0xF1
+
+/* TWSI Registers */
+#define ATC2609A_SADDR 0xFF
+
+/* PMU_SYS_CTL0 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL0_IR_WK_EN BIT(5)
+#define ATC2609A_PMU_SYS_CTL0_RESET_WK_EN BIT(6)
+#define ATC2609A_PMU_SYS_CTL0_HDSW_WK_EN BIT(7)
+#define ATC2609A_PMU_SYS_CTL0_ALARM_WK_EN BIT(8)
+#define ATC2609A_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL0_RESTART_EN BIT(10)
+#define ATC2609A_PMU_SYS_CTL0_WKIRQ_WK_EN BIT(11)
+#define ATC2609A_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12)
+#define ATC2609A_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13)
+#define ATC2609A_PMU_SYS_CTL0_WALL_WK_EN BIT(14)
+#define ATC2609A_PMU_SYS_CTL0_USB_WK_EN BIT(15)
+#define ATC2609A_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10)))
+
+/* PMU_SYS_CTL1 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL1_EN_S1 BIT(0)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4_EN BIT(2)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4_3_1V BIT(4)
+#define ATC2609A_PMU_SYS_CTL1_IR_WK_FLAG BIT(5)
+#define ATC2609A_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6)
+#define ATC2609A_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7)
+#define ATC2609A_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8)
+#define ATC2609A_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9)
+#define ATC2609A_PMU_SYS_CTL1_RESTART_WK_FLAG BIT(10)
+#define ATC2609A_PMU_SYS_CTL1_WKIRQ_WK_FLAG BIT(11)
+#define ATC2609A_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12)
+#define ATC2609A_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13)
+#define ATC2609A_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14)
+#define ATC2609A_PMU_SYS_CTL1_USB_WK_FLAG BIT(15)
+
+/* PMU_SYS_CTL2 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL2_PMU_A_EN BIT(0)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2)
+#define ATC2609A_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3)
+#define ATC2609A_PMU_SYS_CTL2_S2_TIMER_EN BIT(6)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_LSP_INT_EN BIT(12)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS BIT(15)
+
+/* PMU_SYS_CTL3 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7)
+#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10)
+#define ATC2609A_PMU_SYS_CTL3_S3_TIMER_EN BIT(13)
+#define ATC2609A_PMU_SYS_CTL3_EN_S3 BIT(14)
+#define ATC2609A_PMU_SYS_CTL3_EN_S2 BIT(15)
+
+/* PMU_SYS_CTL5 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL5_WALLWKDTEN BIT(7)
+#define ATC2609A_PMU_SYS_CTL5_VBUSWKDTEN BIT(8)
+#define ATC2609A_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10)
+
+/* INTS_MSK Register Mask Bits */
+#define ATC2609A_INTS_MSK_AUDIO BIT(0)
+#define ATC2609A_INTS_MSK_OV BIT(1)
+#define ATC2609A_INTS_MSK_OC BIT(2)
+#define ATC2609A_INTS_MSK_OT BIT(3)
+#define ATC2609A_INTS_MSK_UV BIT(4)
+#define ATC2609A_INTS_MSK_ALARM BIT(5)
+#define ATC2609A_INTS_MSK_ONOFF BIT(6)
+#define ATC2609A_INTS_MSK_WKUP BIT(7)
+#define ATC2609A_INTS_MSK_IR BIT(8)
+#define ATC2609A_INTS_MSK_REMCON BIT(9)
+#define ATC2609A_INTS_MSK_POWERIN BIT(10)
+
+/* CMU_DEVRST Register Mask Bits */
+#define ATC2609A_CMU_DEVRST_AUDIO BIT(0)
+#define ATC2609A_CMU_DEVRST_MFP BIT(1)
+#define ATC2609A_CMU_DEVRST_INTS BIT(2)
+
+/* PAD_EN Register Mask Bits */
+#define ATC2609A_PAD_EN_EXTIRQ BIT(0)
+
+#endif /* __LINUX_MFD_ATC260X_ATC2609A_H */
diff --git a/include/linux/mfd/atc260x/core.h b/include/linux/mfd/atc260x/core.h
new file mode 100644
index 000000000000..777b6c345d44
--- /dev/null
+++ b/include/linux/mfd/atc260x/core.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core MFD defines for ATC260x PMICs
+ *
+ * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_CORE_H
+#define __LINUX_MFD_ATC260X_CORE_H
+
+#include <linux/mfd/atc260x/atc2603c.h>
+#include <linux/mfd/atc260x/atc2609a.h>
+
+enum atc260x_type {
+ ATC2603A = 0,
+ ATC2603C,
+ ATC2609A,
+};
+
+enum atc260x_ver {
+ ATC260X_A = 0,
+ ATC260X_B,
+ ATC260X_C,
+ ATC260X_D,
+ ATC260X_E,
+ ATC260X_F,
+ ATC260X_G,
+ ATC260X_H,
+};
+
+struct atc260x {
+ struct device *dev;
+
+ struct regmap *regmap;
+ const struct regmap_irq_chip *regmap_irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct mutex *regmap_mutex; /* mutex for custom regmap locking */
+
+ const struct mfd_cell *cells;
+ int nr_cells;
+ int irq;
+
+ enum atc260x_type ic_type;
+ enum atc260x_ver ic_ver;
+ const char *type_name;
+ unsigned int rev_reg;
+
+ const struct atc260x_init_regs *init_regs; /* regs for device init */
+};
+
+struct regmap_config;
+
+int atc260x_match_device(struct atc260x *atc260x, struct regmap_config *regmap_cfg);
+int atc260x_device_probe(struct atc260x *atc260x);
+
+#endif /* __LINUX_MFD_ATC260X_CORE_H */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index fd5957c042da..f1755163dd9f 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,18 +12,22 @@
enum axp20x_variants {
AXP152_ID = 0,
+ AXP192_ID,
AXP202_ID,
AXP209_ID,
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP313A_ID,
AXP803_ID,
AXP806_ID,
AXP809_ID,
AXP813_ID,
+ AXP15060_ID,
NR_AXP20X_VARIANTS,
};
+#define AXP192_DATACACHE(m) (0x06 + (m))
#define AXP20X_DATACACHE(m) (0x04 + (m))
/* Power supply */
@@ -45,6 +49,13 @@ enum axp20x_variants {
#define AXP152_DCDC_FREQ 0x37
#define AXP152_DCDC_MODE 0x80
+#define AXP192_USB_OTG_STATUS 0x04
+#define AXP192_PWR_OUT_CTRL 0x12
+#define AXP192_DCDC2_V_OUT 0x23
+#define AXP192_DCDC1_V_OUT 0x26
+#define AXP192_DCDC3_V_OUT 0x27
+#define AXP192_LDO2_3_V_OUT 0x28
+
#define AXP20X_PWR_INPUT_STATUS 0x00
#define AXP20X_PWR_OP_MODE 0x01
#define AXP20X_USB_OTG_STATUS 0x02
@@ -91,6 +102,17 @@ enum axp20x_variants {
#define AXP22X_ALDO3_V_OUT 0x2a
#define AXP22X_CHRG_CTRL3 0x35
+#define AXP313A_ON_INDICATE 0x00
+#define AXP313A_OUTPUT_CONTROL 0x10
+#define AXP313A_DCDC1_CONRTOL 0x13
+#define AXP313A_DCDC2_CONRTOL 0x14
+#define AXP313A_DCDC3_CONRTOL 0x15
+#define AXP313A_ALDO1_CONRTOL 0x16
+#define AXP313A_DLDO1_CONRTOL 0x17
+#define AXP313A_SHUTDOWN_CTRL 0x1a
+#define AXP313A_IRQ_EN 0x20
+#define AXP313A_IRQ_STATE 0x21
+
#define AXP806_STARTUP_SRC 0x00
#define AXP806_CHIP_ID 0x03
#define AXP806_PWR_OUT_CTRL1 0x10
@@ -131,6 +153,39 @@ enum axp20x_variants {
/* Other DCDC regulator control registers are the same as AXP803 */
#define AXP813_DCDC7_V_OUT 0x26
+#define AXP15060_STARTUP_SRC 0x00
+#define AXP15060_PWR_OUT_CTRL1 0x10
+#define AXP15060_PWR_OUT_CTRL2 0x11
+#define AXP15060_PWR_OUT_CTRL3 0x12
+#define AXP15060_DCDC1_V_CTRL 0x13
+#define AXP15060_DCDC2_V_CTRL 0x14
+#define AXP15060_DCDC3_V_CTRL 0x15
+#define AXP15060_DCDC4_V_CTRL 0x16
+#define AXP15060_DCDC5_V_CTRL 0x17
+#define AXP15060_DCDC6_V_CTRL 0x18
+#define AXP15060_ALDO1_V_CTRL 0x19
+#define AXP15060_DCDC_MODE_CTRL1 0x1a
+#define AXP15060_DCDC_MODE_CTRL2 0x1b
+#define AXP15060_OUTPUT_MONITOR_DISCHARGE 0x1e
+#define AXP15060_IRQ_PWROK_VOFF 0x1f
+#define AXP15060_ALDO2_V_CTRL 0x20
+#define AXP15060_ALDO3_V_CTRL 0x21
+#define AXP15060_ALDO4_V_CTRL 0x22
+#define AXP15060_ALDO5_V_CTRL 0x23
+#define AXP15060_BLDO1_V_CTRL 0x24
+#define AXP15060_BLDO2_V_CTRL 0x25
+#define AXP15060_BLDO3_V_CTRL 0x26
+#define AXP15060_BLDO4_V_CTRL 0x27
+#define AXP15060_BLDO5_V_CTRL 0x28
+#define AXP15060_CLDO1_V_CTRL 0x29
+#define AXP15060_CLDO2_V_CTRL 0x2a
+#define AXP15060_CLDO3_V_CTRL 0x2b
+#define AXP15060_CLDO4_V_CTRL 0x2d
+#define AXP15060_CPUSLDO_V_CTRL 0x2e
+#define AXP15060_PWR_WAKEUP_CTRL 0x31
+#define AXP15060_PWR_DISABLE_DOWN_SEQ 0x32
+#define AXP15060_PEK_KEY 0x36
+
/* Interrupt */
#define AXP152_IRQ1_EN 0x40
#define AXP152_IRQ2_EN 0x41
@@ -139,6 +194,17 @@ enum axp20x_variants {
#define AXP152_IRQ2_STATE 0x49
#define AXP152_IRQ3_STATE 0x4a
+#define AXP192_IRQ1_EN 0x40
+#define AXP192_IRQ2_EN 0x41
+#define AXP192_IRQ3_EN 0x42
+#define AXP192_IRQ4_EN 0x43
+#define AXP192_IRQ1_STATE 0x44
+#define AXP192_IRQ2_STATE 0x45
+#define AXP192_IRQ3_STATE 0x46
+#define AXP192_IRQ4_STATE 0x47
+#define AXP192_IRQ5_EN 0x4a
+#define AXP192_IRQ5_STATE 0x4d
+
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
#define AXP20X_IRQ3_EN 0x42
@@ -152,7 +218,17 @@ enum axp20x_variants {
#define AXP20X_IRQ5_STATE 0x4c
#define AXP20X_IRQ6_STATE 0x4d
+#define AXP15060_IRQ1_EN 0x40
+#define AXP15060_IRQ2_EN 0x41
+#define AXP15060_IRQ1_STATE 0x48
+#define AXP15060_IRQ2_STATE 0x49
+
/* ADC */
+#define AXP192_GPIO2_V_ADC_H 0x68
+#define AXP192_GPIO2_V_ADC_L 0x69
+#define AXP192_GPIO3_V_ADC_H 0x6a
+#define AXP192_GPIO3_V_ADC_L 0x6b
+
#define AXP20X_ACIN_V_ADC_H 0x56
#define AXP20X_ACIN_V_ADC_L 0x57
#define AXP20X_ACIN_I_ADC_H 0x58
@@ -182,6 +258,8 @@ enum axp20x_variants {
#define AXP20X_IPSOUT_V_HIGH_L 0x7f
/* Power supply */
+#define AXP192_GPIO30_IN_RANGE 0x85
+
#define AXP20X_DCDC_MODE 0x80
#define AXP20X_ADC_EN1 0x82
#define AXP20X_ADC_EN2 0x83
@@ -210,6 +288,16 @@ enum axp20x_variants {
#define AXP152_PWM1_FREQ_Y 0x9c
#define AXP152_PWM1_DUTY_CYCLE 0x9d
+#define AXP192_GPIO0_CTRL 0x90
+#define AXP192_LDO_IO0_V_OUT 0x91
+#define AXP192_GPIO1_CTRL 0x92
+#define AXP192_GPIO2_CTRL 0x93
+#define AXP192_GPIO2_0_STATE 0x94
+#define AXP192_GPIO4_3_CTRL 0x95
+#define AXP192_GPIO4_3_STATE 0x96
+#define AXP192_GPIO2_0_PULL 0x97
+#define AXP192_N_RSTO_CTRL 0x9e
+
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
#define AXP20X_GPIO1_CTRL 0x92
@@ -222,6 +310,8 @@ enum axp20x_variants {
#define AXP22X_GPIO_STATE 0x94
#define AXP22X_GPIO_PULL_DOWN 0x95
+#define AXP15060_CLDO4_GPIO2_MODESET 0x2c
+
/* Battery */
#define AXP20X_CHRG_CC_31_24 0xb0
#define AXP20X_CHRG_CC_23_16 0xb1
@@ -288,6 +378,17 @@ enum axp20x_variants {
/* Regulators IDs */
enum {
+ AXP192_DCDC1 = 0,
+ AXP192_DCDC2,
+ AXP192_DCDC3,
+ AXP192_LDO1,
+ AXP192_LDO2,
+ AXP192_LDO3,
+ AXP192_LDO_IO0,
+ AXP192_REG_ID_MAX
+};
+
+enum {
AXP20X_LDO1 = 0,
AXP20X_LDO2,
AXP20X_LDO3,
@@ -323,6 +424,16 @@ enum {
};
enum {
+ AXP313A_DCDC1 = 0,
+ AXP313A_DCDC2,
+ AXP313A_DCDC3,
+ AXP313A_ALDO1,
+ AXP313A_DLDO1,
+ AXP313A_RTC_LDO,
+ AXP313A_REG_ID_MAX,
+};
+
+enum {
AXP806_DCDCA = 0,
AXP806_DCDCB,
AXP806_DCDCC,
@@ -419,6 +530,33 @@ enum {
AXP813_REG_ID_MAX,
};
+enum {
+ AXP15060_DCDC1 = 0,
+ AXP15060_DCDC2,
+ AXP15060_DCDC3,
+ AXP15060_DCDC4,
+ AXP15060_DCDC5,
+ AXP15060_DCDC6,
+ AXP15060_ALDO1,
+ AXP15060_ALDO2,
+ AXP15060_ALDO3,
+ AXP15060_ALDO4,
+ AXP15060_ALDO5,
+ AXP15060_BLDO1,
+ AXP15060_BLDO2,
+ AXP15060_BLDO3,
+ AXP15060_BLDO4,
+ AXP15060_BLDO5,
+ AXP15060_CLDO1,
+ AXP15060_CLDO2,
+ AXP15060_CLDO3,
+ AXP15060_CLDO4,
+ AXP15060_CPUSLDO,
+ AXP15060_SW,
+ AXP15060_RTC_LDO,
+ AXP15060_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP152_IRQ_LDO0IN_CONNECT = 1,
@@ -432,14 +570,51 @@ enum {
AXP152_IRQ_PEK_SHORT,
AXP152_IRQ_PEK_LONG,
AXP152_IRQ_TIMER,
- AXP152_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP152_IRQ_PEK_FAL_EDGE,
+ AXP152_IRQ_PEK_RIS_EDGE,
AXP152_IRQ_GPIO3_INPUT,
AXP152_IRQ_GPIO2_INPUT,
AXP152_IRQ_GPIO1_INPUT,
AXP152_IRQ_GPIO0_INPUT,
};
+enum axp192_irqs {
+ AXP192_IRQ_ACIN_OVER_V = 1,
+ AXP192_IRQ_ACIN_PLUGIN,
+ AXP192_IRQ_ACIN_REMOVAL,
+ AXP192_IRQ_VBUS_OVER_V,
+ AXP192_IRQ_VBUS_PLUGIN,
+ AXP192_IRQ_VBUS_REMOVAL,
+ AXP192_IRQ_VBUS_V_LOW,
+ AXP192_IRQ_BATT_PLUGIN,
+ AXP192_IRQ_BATT_REMOVAL,
+ AXP192_IRQ_BATT_ENT_ACT_MODE,
+ AXP192_IRQ_BATT_EXIT_ACT_MODE,
+ AXP192_IRQ_CHARG,
+ AXP192_IRQ_CHARG_DONE,
+ AXP192_IRQ_BATT_TEMP_HIGH,
+ AXP192_IRQ_BATT_TEMP_LOW,
+ AXP192_IRQ_DIE_TEMP_HIGH,
+ AXP192_IRQ_CHARG_I_LOW,
+ AXP192_IRQ_DCDC1_V_LONG,
+ AXP192_IRQ_DCDC2_V_LONG,
+ AXP192_IRQ_DCDC3_V_LONG,
+ AXP192_IRQ_PEK_SHORT = 22,
+ AXP192_IRQ_PEK_LONG,
+ AXP192_IRQ_N_OE_PWR_ON,
+ AXP192_IRQ_N_OE_PWR_OFF,
+ AXP192_IRQ_VBUS_VALID,
+ AXP192_IRQ_VBUS_NOT_VALID,
+ AXP192_IRQ_VBUS_SESS_VALID,
+ AXP192_IRQ_VBUS_SESS_END,
+ AXP192_IRQ_LOW_PWR_LVL = 31,
+ AXP192_IRQ_TIMER,
+ AXP192_IRQ_GPIO2_INPUT = 37,
+ AXP192_IRQ_GPIO1_INPUT,
+ AXP192_IRQ_GPIO0_INPUT,
+};
+
enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
AXP20X_IRQ_ACIN_PLUGIN,
@@ -472,8 +647,9 @@ enum {
AXP20X_IRQ_LOW_PWR_LVL1,
AXP20X_IRQ_LOW_PWR_LVL2,
AXP20X_IRQ_TIMER,
- AXP20X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP20X_IRQ_PEK_FAL_EDGE,
+ AXP20X_IRQ_PEK_RIS_EDGE,
AXP20X_IRQ_GPIO3_INPUT,
AXP20X_IRQ_GPIO2_INPUT,
AXP20X_IRQ_GPIO1_INPUT,
@@ -502,8 +678,9 @@ enum axp22x_irqs {
AXP22X_IRQ_LOW_PWR_LVL1,
AXP22X_IRQ_LOW_PWR_LVL2,
AXP22X_IRQ_TIMER,
- AXP22X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP22X_IRQ_PEK_FAL_EDGE,
+ AXP22X_IRQ_PEK_RIS_EDGE,
AXP22X_IRQ_GPIO1_INPUT,
AXP22X_IRQ_GPIO0_INPUT,
};
@@ -545,6 +722,16 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp313a_irqs {
+ AXP313A_IRQ_DIE_TEMP_HIGH,
+ AXP313A_IRQ_DCDC2_V_LOW = 2,
+ AXP313A_IRQ_DCDC3_V_LOW,
+ AXP313A_IRQ_PEK_LONG,
+ AXP313A_IRQ_PEK_SHORT,
+ AXP313A_IRQ_PEK_FAL_EDGE,
+ AXP313A_IRQ_PEK_RIS_EDGE,
+};
+
enum axp803_irqs {
AXP803_IRQ_ACIN_OVER_V = 1,
AXP803_IRQ_ACIN_PLUGIN,
@@ -571,8 +758,9 @@ enum axp803_irqs {
AXP803_IRQ_LOW_PWR_LVL1,
AXP803_IRQ_LOW_PWR_LVL2,
AXP803_IRQ_TIMER,
- AXP803_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP803_IRQ_PEK_FAL_EDGE,
+ AXP803_IRQ_PEK_RIS_EDGE,
AXP803_IRQ_PEK_SHORT,
AXP803_IRQ_PEK_LONG,
AXP803_IRQ_PEK_OVER_OFF,
@@ -623,8 +811,9 @@ enum axp809_irqs {
AXP809_IRQ_LOW_PWR_LVL1,
AXP809_IRQ_LOW_PWR_LVL2,
AXP809_IRQ_TIMER,
- AXP809_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP809_IRQ_PEK_FAL_EDGE,
+ AXP809_IRQ_PEK_RIS_EDGE,
AXP809_IRQ_PEK_SHORT,
AXP809_IRQ_PEK_LONG,
AXP809_IRQ_PEK_OVER_OFF,
@@ -632,6 +821,23 @@ enum axp809_irqs {
AXP809_IRQ_GPIO0_INPUT,
};
+enum axp15060_irqs {
+ AXP15060_IRQ_DIE_TEMP_HIGH_LV1 = 1,
+ AXP15060_IRQ_DIE_TEMP_HIGH_LV2,
+ AXP15060_IRQ_DCDC1_V_LOW,
+ AXP15060_IRQ_DCDC2_V_LOW,
+ AXP15060_IRQ_DCDC3_V_LOW,
+ AXP15060_IRQ_DCDC4_V_LOW,
+ AXP15060_IRQ_DCDC5_V_LOW,
+ AXP15060_IRQ_DCDC6_V_LOW,
+ AXP15060_IRQ_PEK_LONG,
+ AXP15060_IRQ_PEK_SHORT,
+ AXP15060_IRQ_GPIO1_INPUT,
+ AXP15060_IRQ_PEK_FAL_EDGE,
+ AXP15060_IRQ_PEK_RIS_EDGE,
+ AXP15060_IRQ_GPIO2_INPUT,
+};
+
struct axp20x_dev {
struct device *dev;
int irq;
@@ -696,6 +902,6 @@ int axp20x_device_probe(struct axp20x_dev *axp20x);
*
* This tells the axp20x core to remove the associated mfd devices
*/
-int axp20x_device_remove(struct axp20x_dev *axp20x);
+void axp20x_device_remove(struct axp20x_dev *axp20x);
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h
index ed37dc40e82a..f70a810c55f7 100644
--- a/include/linux/mfd/bcm2835-pm.h
+++ b/include/linux/mfd/bcm2835-pm.h
@@ -9,6 +9,7 @@ struct bcm2835_pm {
struct device *dev;
void __iomem *base;
void __iomem *asb;
+ void __iomem *rpivid_asb;
};
#endif /* BCM2835_MFD_PM_H */
diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h
index eb05569f752b..8efd99d07c9e 100644
--- a/include/linux/mfd/bd9571mwv.h
+++ b/include/linux/mfd/bd9571mwv.h
@@ -1,16 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * ROHM BD9571MWV-M driver
+ * ROHM BD9571MWV-M and BD9574MWF-M driver
*
* Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
+ * Copyright (C) 2020 Renesas Electronics Corporation
*
* Based on the TPS65086 driver
*/
@@ -21,11 +14,12 @@
#include <linux/device.h>
#include <linux/regmap.h>
-/* List of registers for BD9571MWV */
+/* List of registers for BD9571MWV and BD9574MWF */
#define BD9571MWV_VENDOR_CODE 0x00
#define BD9571MWV_VENDOR_CODE_VAL 0xdb
#define BD9571MWV_PRODUCT_CODE 0x01
-#define BD9571MWV_PRODUCT_CODE_VAL 0x60
+#define BD9571MWV_PRODUCT_CODE_BD9571MWV 0x60
+#define BD9571MWV_PRODUCT_CODE_BD9574MWF 0x74
#define BD9571MWV_PRODUCT_REVISION 0x02
#define BD9571MWV_I2C_FUSA_MODE 0x10
@@ -55,6 +49,7 @@
#define BD9571MWV_VD33_VID 0x44
#define BD9571MWV_DVFS_VINIT 0x50
+#define BD9574MWF_VD09_VINIT 0x51
#define BD9571MWV_DVFS_SETVMAX 0x52
#define BD9571MWV_DVFS_BOOSTVID 0x53
#define BD9571MWV_DVFS_SETVID 0x54
@@ -68,6 +63,7 @@
#define BD9571MWV_GPIO_INT_SET 0x64
#define BD9571MWV_GPIO_INT 0x65
#define BD9571MWV_GPIO_INTMASK 0x66
+#define BD9574MWF_GPIO_MUX 0x67
#define BD9571MWV_REG_KEEP(n) (0x70 + (n))
@@ -77,6 +73,8 @@
#define BD9571MWV_PROT_ERROR_STATUS2 0x83
#define BD9571MWV_PROT_ERROR_STATUS3 0x84
#define BD9571MWV_PROT_ERROR_STATUS4 0x85
+#define BD9574MWF_PROT_ERROR_STATUS5 0x86
+#define BD9574MWF_SYSTEM_ERROR_STATUS 0x87
#define BD9571MWV_INT_INTREQ 0x90
#define BD9571MWV_INT_INTREQ_MD1_INT BIT(0)
@@ -89,6 +87,12 @@
#define BD9571MWV_INT_INTREQ_BKUP_TRG_INT BIT(7)
#define BD9571MWV_INT_INTMASK 0x91
+#define BD9574MWF_SSCG_CNT 0xA0
+#define BD9574MWF_POFFB_MRB 0xA1
+#define BD9574MWF_SMRB_WR_PROT 0xA2
+#define BD9574MWF_SMRB_ASSERT 0xA3
+#define BD9574MWF_SMRB_STATUS 0xA4
+
#define BD9571MWV_ACCESS_KEY 0xff
/* Define the BD9571MWV IRQ numbers */
@@ -98,23 +102,8 @@ enum bd9571mwv_irqs {
BD9571MWV_IRQ_MD2_E2,
BD9571MWV_IRQ_PROT_ERR,
BD9571MWV_IRQ_GP,
- BD9571MWV_IRQ_128H_OF,
+ BD9571MWV_IRQ_128H_OF, /* BKUP_HOLD on BD9574MWF */
BD9571MWV_IRQ_WDT_OF,
BD9571MWV_IRQ_BKUP_TRG,
};
-
-/**
- * struct bd9571mwv - state holder for the bd9571mwv driver
- *
- * Device data may be used to access the BD9571MWV chip
- */
-struct bd9571mwv {
- struct device *dev;
- struct regmap *regmap;
-
- /* IRQ Data */
- int irq;
- struct regmap_irq_chip_data *irq_data;
-};
-
#endif /* __LINUX_MFD_BD9571MWV_H */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 4b35baa14d30..e8bcad641d8c 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -28,13 +28,13 @@
.id = (_id), \
}
-#define OF_MFD_CELL_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \
+#define MFD_CELL_OF_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \
MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL)
-#define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \
+#define MFD_CELL_OF(_name, _res, _pdata, _pdsize, _id, _compat) \
MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL)
-#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \
+#define MFD_CELL_ACPI(_name, _res, _pdata, _pdsize, _id, _match) \
MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match)
#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
@@ -50,7 +50,7 @@
#define MFD_DEP_LEVEL_HIGH 1
struct irq_domain;
-struct property_entry;
+struct software_node;
/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */
struct mfd_cell_acpi_match {
@@ -68,9 +68,6 @@ struct mfd_cell {
int id;
int level;
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
-
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
@@ -78,8 +75,11 @@ struct mfd_cell {
void *platform_data;
size_t pdata_size;
- /* device properties passed to the sub devices drivers */
- const struct property_entry *properties;
+ /* Matches ACPI */
+ const struct mfd_cell_acpi_match *acpi_match;
+
+ /* Software node for the device. */
+ const struct software_node *swnode;
/*
* Device Tree compatible string
@@ -88,18 +88,15 @@ struct mfd_cell {
const char *of_compatible;
/*
- * Address as defined in Device Tree. Used to compement 'of_compatible'
+ * Address as defined in Device Tree. Used to complement 'of_compatible'
* (above) when matching OF nodes with devices that have identical
* compatible strings
*/
- const u64 of_reg;
+ u64 of_reg;
/* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
bool use_of_reg;
- /* Matches ACPI */
- const struct mfd_cell_acpi_match *acpi_match;
-
/*
* These resources can be specified relative to the parent device.
* For accessing hardware you should use resources from the platform dev
@@ -119,20 +116,11 @@ struct mfd_cell {
/* A list of regulator supplies that should be mapped to the MFD
* device rather than the child device when requested
*/
- const char * const *parent_supplies;
int num_parent_supplies;
+ const char * const *parent_supplies;
};
/*
- * Convenience functions for clients using shared cells. Refcounting
- * happens automatically, with the cell's enable/disable callbacks
- * being called only when a device is first being enabled or no other
- * clients are making use of it.
- */
-extern int mfd_cell_enable(struct platform_device *pdev);
-extern int mfd_cell_disable(struct platform_device *pdev);
-
-/*
* Given a platform device that's been created by mfd_add_devices(), fetch
* the mfd_cell that created it.
*/
diff --git a/include/linux/mfd/cs42l43-regs.h b/include/linux/mfd/cs42l43-regs.h
new file mode 100644
index 000000000000..c39a49269cb7
--- /dev/null
+++ b/include/linux/mfd/cs42l43-regs.h
@@ -0,0 +1,1184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cs42l43 register definitions
+ *
+ * Copyright (c) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_CORE_REGS_H
+#define CS42L43_CORE_REGS_H
+
+/* Registers */
+#define CS42L43_GEN_INT_STAT_1 0x000000C0
+#define CS42L43_GEN_INT_MASK_1 0x000000C1
+#define CS42L43_DEVID 0x00003000
+#define CS42L43_REVID 0x00003004
+#define CS42L43_RELID 0x0000300C
+#define CS42L43_SFT_RESET 0x00003020
+#define CS42L43_DRV_CTRL1 0x00006004
+#define CS42L43_DRV_CTRL3 0x0000600C
+#define CS42L43_DRV_CTRL4 0x00006010
+#define CS42L43_DRV_CTRL_5 0x00006014
+#define CS42L43_GPIO_CTRL1 0x00006034
+#define CS42L43_GPIO_CTRL2 0x00006038
+#define CS42L43_GPIO_STS 0x0000603C
+#define CS42L43_GPIO_FN_SEL 0x00006040
+#define CS42L43_MCLK_SRC_SEL 0x00007004
+#define CS42L43_CCM_BLK_CLK_CONTROL 0x00007010
+#define CS42L43_SAMPLE_RATE1 0x00007014
+#define CS42L43_SAMPLE_RATE2 0x00007018
+#define CS42L43_SAMPLE_RATE3 0x0000701C
+#define CS42L43_SAMPLE_RATE4 0x00007020
+#define CS42L43_PLL_CONTROL 0x00007034
+#define CS42L43_FS_SELECT1 0x00007038
+#define CS42L43_FS_SELECT2 0x0000703C
+#define CS42L43_FS_SELECT3 0x00007040
+#define CS42L43_FS_SELECT4 0x00007044
+#define CS42L43_PDM_CONTROL 0x0000704C
+#define CS42L43_ASP_CLK_CONFIG1 0x00007058
+#define CS42L43_ASP_CLK_CONFIG2 0x0000705C
+#define CS42L43_OSC_DIV_SEL 0x00007068
+#define CS42L43_ADC_B_CTRL1 0x00008000
+#define CS42L43_ADC_B_CTRL2 0x00008004
+#define CS42L43_DECIM_HPF_WNF_CTRL1 0x0000803C
+#define CS42L43_DECIM_HPF_WNF_CTRL2 0x00008040
+#define CS42L43_DECIM_HPF_WNF_CTRL3 0x00008044
+#define CS42L43_DECIM_HPF_WNF_CTRL4 0x00008048
+#define CS42L43_DMIC_PDM_CTRL 0x0000804C
+#define CS42L43_DECIM_VOL_CTRL_CH1_CH2 0x00008050
+#define CS42L43_DECIM_VOL_CTRL_CH3_CH4 0x00008054
+#define CS42L43_DECIM_VOL_CTRL_UPDATE 0x00008058
+#define CS42L43_INTP_VOLUME_CTRL1 0x00009008
+#define CS42L43_INTP_VOLUME_CTRL2 0x0000900C
+#define CS42L43_AMP1_2_VOL_RAMP 0x00009010
+#define CS42L43_ASP_CTRL 0x0000A000
+#define CS42L43_ASP_FSYNC_CTRL1 0x0000A004
+#define CS42L43_ASP_FSYNC_CTRL2 0x0000A008
+#define CS42L43_ASP_FSYNC_CTRL3 0x0000A00C
+#define CS42L43_ASP_FSYNC_CTRL4 0x0000A010
+#define CS42L43_ASP_DATA_CTRL 0x0000A018
+#define CS42L43_ASP_RX_EN 0x0000A020
+#define CS42L43_ASP_TX_EN 0x0000A024
+#define CS42L43_ASP_RX_CH1_CTRL 0x0000A028
+#define CS42L43_ASP_RX_CH2_CTRL 0x0000A02C
+#define CS42L43_ASP_RX_CH3_CTRL 0x0000A030
+#define CS42L43_ASP_RX_CH4_CTRL 0x0000A034
+#define CS42L43_ASP_RX_CH5_CTRL 0x0000A038
+#define CS42L43_ASP_RX_CH6_CTRL 0x0000A03C
+#define CS42L43_ASP_TX_CH1_CTRL 0x0000A068
+#define CS42L43_ASP_TX_CH2_CTRL 0x0000A06C
+#define CS42L43_ASP_TX_CH3_CTRL 0x0000A070
+#define CS42L43_ASP_TX_CH4_CTRL 0x0000A074
+#define CS42L43_ASP_TX_CH5_CTRL 0x0000A078
+#define CS42L43_ASP_TX_CH6_CTRL 0x0000A07C
+#define CS42L43_OTP_REVISION_ID 0x0000B02C
+#define CS42L43_ASPTX1_INPUT 0x0000C200
+#define CS42L43_ASPTX2_INPUT 0x0000C210
+#define CS42L43_ASPTX3_INPUT 0x0000C220
+#define CS42L43_ASPTX4_INPUT 0x0000C230
+#define CS42L43_ASPTX5_INPUT 0x0000C240
+#define CS42L43_ASPTX6_INPUT 0x0000C250
+#define CS42L43_SWIRE_DP1_CH1_INPUT 0x0000C280
+#define CS42L43_SWIRE_DP1_CH2_INPUT 0x0000C290
+#define CS42L43_SWIRE_DP1_CH3_INPUT 0x0000C2A0
+#define CS42L43_SWIRE_DP1_CH4_INPUT 0x0000C2B0
+#define CS42L43_SWIRE_DP2_CH1_INPUT 0x0000C2C0
+#define CS42L43_SWIRE_DP2_CH2_INPUT 0x0000C2D0
+#define CS42L43_SWIRE_DP3_CH1_INPUT 0x0000C2E0
+#define CS42L43_SWIRE_DP3_CH2_INPUT 0x0000C2F0
+#define CS42L43_SWIRE_DP4_CH1_INPUT 0x0000C300
+#define CS42L43_SWIRE_DP4_CH2_INPUT 0x0000C310
+#define CS42L43_ASRC_INT1_INPUT1 0x0000C400
+#define CS42L43_ASRC_INT2_INPUT1 0x0000C410
+#define CS42L43_ASRC_INT3_INPUT1 0x0000C420
+#define CS42L43_ASRC_INT4_INPUT1 0x0000C430
+#define CS42L43_ASRC_DEC1_INPUT1 0x0000C440
+#define CS42L43_ASRC_DEC2_INPUT1 0x0000C450
+#define CS42L43_ASRC_DEC3_INPUT1 0x0000C460
+#define CS42L43_ASRC_DEC4_INPUT1 0x0000C470
+#define CS42L43_ISRC1INT1_INPUT1 0x0000C500
+#define CS42L43_ISRC1INT2_INPUT1 0x0000C510
+#define CS42L43_ISRC1DEC1_INPUT1 0x0000C520
+#define CS42L43_ISRC1DEC2_INPUT1 0x0000C530
+#define CS42L43_ISRC2INT1_INPUT1 0x0000C540
+#define CS42L43_ISRC2INT2_INPUT1 0x0000C550
+#define CS42L43_ISRC2DEC1_INPUT1 0x0000C560
+#define CS42L43_ISRC2DEC2_INPUT1 0x0000C570
+#define CS42L43_EQ1MIX_INPUT1 0x0000C580
+#define CS42L43_EQ1MIX_INPUT2 0x0000C584
+#define CS42L43_EQ1MIX_INPUT3 0x0000C588
+#define CS42L43_EQ1MIX_INPUT4 0x0000C58C
+#define CS42L43_EQ2MIX_INPUT1 0x0000C590
+#define CS42L43_EQ2MIX_INPUT2 0x0000C594
+#define CS42L43_EQ2MIX_INPUT3 0x0000C598
+#define CS42L43_EQ2MIX_INPUT4 0x0000C59C
+#define CS42L43_SPDIF1_INPUT1 0x0000C600
+#define CS42L43_SPDIF2_INPUT1 0x0000C610
+#define CS42L43_AMP1MIX_INPUT1 0x0000C620
+#define CS42L43_AMP1MIX_INPUT2 0x0000C624
+#define CS42L43_AMP1MIX_INPUT3 0x0000C628
+#define CS42L43_AMP1MIX_INPUT4 0x0000C62C
+#define CS42L43_AMP2MIX_INPUT1 0x0000C630
+#define CS42L43_AMP2MIX_INPUT2 0x0000C634
+#define CS42L43_AMP2MIX_INPUT3 0x0000C638
+#define CS42L43_AMP2MIX_INPUT4 0x0000C63C
+#define CS42L43_AMP3MIX_INPUT1 0x0000C640
+#define CS42L43_AMP3MIX_INPUT2 0x0000C644
+#define CS42L43_AMP3MIX_INPUT3 0x0000C648
+#define CS42L43_AMP3MIX_INPUT4 0x0000C64C
+#define CS42L43_AMP4MIX_INPUT1 0x0000C650
+#define CS42L43_AMP4MIX_INPUT2 0x0000C654
+#define CS42L43_AMP4MIX_INPUT3 0x0000C658
+#define CS42L43_AMP4MIX_INPUT4 0x0000C65C
+#define CS42L43_ASRC_INT_ENABLES 0x0000E000
+#define CS42L43_ASRC_DEC_ENABLES 0x0000E004
+#define CS42L43_PDNCNTL 0x00010000
+#define CS42L43_RINGSENSE_DEB_CTRL 0x0001001C
+#define CS42L43_TIPSENSE_DEB_CTRL 0x00010020
+#define CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS 0x00010028
+#define CS42L43_HS2 0x00010040
+#define CS42L43_HS_STAT 0x00010048
+#define CS42L43_MCU_SW_INTERRUPT 0x00010094
+#define CS42L43_STEREO_MIC_CTRL 0x000100A4
+#define CS42L43_STEREO_MIC_CLAMP_CTRL 0x000100C4
+#define CS42L43_BLOCK_EN2 0x00010104
+#define CS42L43_BLOCK_EN3 0x00010108
+#define CS42L43_BLOCK_EN4 0x0001010C
+#define CS42L43_BLOCK_EN5 0x00010110
+#define CS42L43_BLOCK_EN6 0x00010114
+#define CS42L43_BLOCK_EN7 0x00010118
+#define CS42L43_BLOCK_EN8 0x0001011C
+#define CS42L43_BLOCK_EN9 0x00010120
+#define CS42L43_BLOCK_EN10 0x00010124
+#define CS42L43_BLOCK_EN11 0x00010128
+#define CS42L43_TONE_CH1_CTRL 0x00010134
+#define CS42L43_TONE_CH2_CTRL 0x00010138
+#define CS42L43_MIC_DETECT_CONTROL_1 0x00011074
+#define CS42L43_DETECT_STATUS_1 0x0001107C
+#define CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL 0x00011090
+#define CS42L43_MIC_DETECT_CONTROL_ANDROID 0x000110B0
+#define CS42L43_ISRC1_CTRL 0x00012004
+#define CS42L43_ISRC2_CTRL 0x00013004
+#define CS42L43_CTRL_REG 0x00014000
+#define CS42L43_FDIV_FRAC 0x00014004
+#define CS42L43_CAL_RATIO 0x00014008
+#define CS42L43_SPI_CLK_CONFIG1 0x00016004
+#define CS42L43_SPI_CONFIG1 0x00016010
+#define CS42L43_SPI_CONFIG2 0x00016014
+#define CS42L43_SPI_CONFIG3 0x00016018
+#define CS42L43_SPI_CONFIG4 0x00016024
+#define CS42L43_SPI_STATUS1 0x00016100
+#define CS42L43_SPI_STATUS2 0x00016104
+#define CS42L43_TRAN_CONFIG1 0x00016200
+#define CS42L43_TRAN_CONFIG2 0x00016204
+#define CS42L43_TRAN_CONFIG3 0x00016208
+#define CS42L43_TRAN_CONFIG4 0x0001620C
+#define CS42L43_TRAN_CONFIG5 0x00016220
+#define CS42L43_TRAN_CONFIG6 0x00016224
+#define CS42L43_TRAN_CONFIG7 0x00016228
+#define CS42L43_TRAN_CONFIG8 0x0001622C
+#define CS42L43_TRAN_STATUS1 0x00016300
+#define CS42L43_TRAN_STATUS2 0x00016304
+#define CS42L43_TRAN_STATUS3 0x00016308
+#define CS42L43_TX_DATA 0x00016400
+#define CS42L43_RX_DATA 0x00016600
+#define CS42L43_DACCNFG1 0x00017000
+#define CS42L43_DACCNFG2 0x00017004
+#define CS42L43_HPPATHVOL 0x0001700C
+#define CS42L43_PGAVOL 0x00017014
+#define CS42L43_LOADDETRESULTS 0x00017018
+#define CS42L43_LOADDETENA 0x00017024
+#define CS42L43_CTRL 0x00017028
+#define CS42L43_COEFF_DATA_IN0 0x00018000
+#define CS42L43_COEFF_RD_WR0 0x00018008
+#define CS42L43_INIT_DONE0 0x00018010
+#define CS42L43_START_EQZ0 0x00018014
+#define CS42L43_MUTE_EQ_IN0 0x0001801C
+#define CS42L43_DECIM_INT 0x0001B000
+#define CS42L43_EQ_INT 0x0001B004
+#define CS42L43_ASP_INT 0x0001B008
+#define CS42L43_PLL_INT 0x0001B00C
+#define CS42L43_SOFT_INT 0x0001B010
+#define CS42L43_SWIRE_INT 0x0001B014
+#define CS42L43_MSM_INT 0x0001B018
+#define CS42L43_ACC_DET_INT 0x0001B01C
+#define CS42L43_I2C_TGT_INT 0x0001B020
+#define CS42L43_SPI_MSTR_INT 0x0001B024
+#define CS42L43_SW_TO_SPI_BRIDGE_INT 0x0001B028
+#define CS42L43_OTP_INT 0x0001B02C
+#define CS42L43_CLASS_D_AMP_INT 0x0001B030
+#define CS42L43_GPIO_INT 0x0001B034
+#define CS42L43_ASRC_INT 0x0001B038
+#define CS42L43_HPOUT_INT 0x0001B03C
+#define CS42L43_DECIM_MASK 0x0001B0A0
+#define CS42L43_EQ_MIX_MASK 0x0001B0A4
+#define CS42L43_ASP_MASK 0x0001B0A8
+#define CS42L43_PLL_MASK 0x0001B0AC
+#define CS42L43_SOFT_MASK 0x0001B0B0
+#define CS42L43_SWIRE_MASK 0x0001B0B4
+#define CS42L43_MSM_MASK 0x0001B0B8
+#define CS42L43_ACC_DET_MASK 0x0001B0BC
+#define CS42L43_I2C_TGT_MASK 0x0001B0C0
+#define CS42L43_SPI_MSTR_MASK 0x0001B0C4
+#define CS42L43_SW_TO_SPI_BRIDGE_MASK 0x0001B0C8
+#define CS42L43_OTP_MASK 0x0001B0CC
+#define CS42L43_CLASS_D_AMP_MASK 0x0001B0D0
+#define CS42L43_GPIO_INT_MASK 0x0001B0D4
+#define CS42L43_ASRC_MASK 0x0001B0D8
+#define CS42L43_HPOUT_MASK 0x0001B0DC
+#define CS42L43_DECIM_INT_SHADOW 0x0001B300
+#define CS42L43_EQ_MIX_INT_SHADOW 0x0001B304
+#define CS42L43_ASP_INT_SHADOW 0x0001B308
+#define CS42L43_PLL_INT_SHADOW 0x0001B30C
+#define CS42L43_SOFT_INT_SHADOW 0x0001B310
+#define CS42L43_SWIRE_INT_SHADOW 0x0001B314
+#define CS42L43_MSM_INT_SHADOW 0x0001B318
+#define CS42L43_ACC_DET_INT_SHADOW 0x0001B31C
+#define CS42L43_I2C_TGT_INT_SHADOW 0x0001B320
+#define CS42L43_SPI_MSTR_INT_SHADOW 0x0001B324
+#define CS42L43_SW_TO_SPI_BRIDGE_SHADOW 0x0001B328
+#define CS42L43_OTP_INT_SHADOW 0x0001B32C
+#define CS42L43_CLASS_D_AMP_INT_SHADOW 0x0001B330
+#define CS42L43_GPIO_SHADOW 0x0001B334
+#define CS42L43_ASRC_SHADOW 0x0001B338
+#define CS42L43_HP_OUT_SHADOW 0x0001B33C
+#define CS42L43_BOOT_CONTROL 0x00101000
+#define CS42L43_BLOCK_EN 0x00101008
+#define CS42L43_SHUTTER_CONTROL 0x0010100C
+#define CS42L43_MCU_SW_REV 0x00114000
+#define CS42L43_PATCH_START_ADDR 0x00114004
+#define CS42L43_NEED_CONFIGS 0x0011400C
+#define CS42L43_BOOT_STATUS 0x0011401C
+#define CS42L43_FW_SH_BOOT_CFG_NEED_CONFIGS 0x0011F8F8
+#define CS42L43_FW_MISSION_CTRL_NEED_CONFIGS 0x0011FE00
+#define CS42L43_FW_MISSION_CTRL_HAVE_CONFIGS 0x0011FE04
+#define CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION 0x0011FE0C
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG 0x0011FE10
+#define CS42L43_MCU_RAM_MAX 0x0011FFFF
+
+/* CS42L43_DEVID */
+#define CS42L43_DEVID_VAL 0x00042A43
+
+/* CS42L43_GEN_INT_STAT_1 */
+#define CS42L43_INT_STAT_GEN1_MASK 0x00000001
+#define CS42L43_INT_STAT_GEN1_SHIFT 0
+
+/* CS42L43_SFT_RESET */
+#define CS42L43_SFT_RESET_MASK 0xFF000000
+#define CS42L43_SFT_RESET_SHIFT 24
+
+#define CS42L43_SFT_RESET_VAL 0x5A000000
+
+/* CS42L43_DRV_CTRL1 */
+#define CS42L43_ASP_DOUT_DRV_MASK 0x00038000
+#define CS42L43_ASP_DOUT_DRV_SHIFT 15
+#define CS42L43_ASP_FSYNC_DRV_MASK 0x00000E00
+#define CS42L43_ASP_FSYNC_DRV_SHIFT 9
+#define CS42L43_ASP_BCLK_DRV_MASK 0x000001C0
+#define CS42L43_ASP_BCLK_DRV_SHIFT 6
+
+/* CS42L43_DRV_CTRL3 */
+#define CS42L43_I2C_ADDR_DRV_MASK 0x30000000
+#define CS42L43_I2C_ADDR_DRV_SHIFT 28
+#define CS42L43_I2C_SDA_DRV_MASK 0x0C000000
+#define CS42L43_I2C_SDA_DRV_SHIFT 26
+#define CS42L43_PDMOUT2_CLK_DRV_MASK 0x00E00000
+#define CS42L43_PDMOUT2_CLK_DRV_SHIFT 21
+#define CS42L43_PDMOUT2_DATA_DRV_MASK 0x001C0000
+#define CS42L43_PDMOUT2_DATA_DRV_SHIFT 18
+#define CS42L43_PDMOUT1_CLK_DRV_MASK 0x00038000
+#define CS42L43_PDMOUT1_CLK_DRV_SHIFT 15
+#define CS42L43_PDMOUT1_DATA_DRV_MASK 0x00007000
+#define CS42L43_PDMOUT1_DATA_DRV_SHIFT 12
+#define CS42L43_SPI_MISO_DRV_MASK 0x00000038
+#define CS42L43_SPI_MISO_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL4 */
+#define CS42L43_GPIO3_DRV_MASK 0x00000E00
+#define CS42L43_GPIO3_DRV_SHIFT 9
+#define CS42L43_GPIO2_DRV_MASK 0x000001C0
+#define CS42L43_GPIO2_DRV_SHIFT 6
+#define CS42L43_GPIO1_DRV_MASK 0x00000038
+#define CS42L43_GPIO1_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL_5 */
+#define CS42L43_I2C_SCL_DRV_MASK 0x18000000
+#define CS42L43_I2C_SCL_DRV_SHIFT 27
+#define CS42L43_SPI_SCK_DRV_MASK 0x07000000
+#define CS42L43_SPI_SCK_DRV_SHIFT 24
+#define CS42L43_SPI_MOSI_DRV_MASK 0x00E00000
+#define CS42L43_SPI_MOSI_DRV_SHIFT 21
+#define CS42L43_SPI_SSB_DRV_MASK 0x001C0000
+#define CS42L43_SPI_SSB_DRV_SHIFT 18
+#define CS42L43_ASP_DIN_DRV_MASK 0x000001C0
+#define CS42L43_ASP_DIN_DRV_SHIFT 6
+
+/* CS42L43_GPIO_CTRL1 */
+#define CS42L43_GPIO3_POL_MASK 0x00040000
+#define CS42L43_GPIO3_POL_SHIFT 18
+#define CS42L43_GPIO2_POL_MASK 0x00020000
+#define CS42L43_GPIO2_POL_SHIFT 17
+#define CS42L43_GPIO1_POL_MASK 0x00010000
+#define CS42L43_GPIO1_POL_SHIFT 16
+#define CS42L43_GPIO3_LVL_MASK 0x00000400
+#define CS42L43_GPIO3_LVL_SHIFT 10
+#define CS42L43_GPIO2_LVL_MASK 0x00000200
+#define CS42L43_GPIO2_LVL_SHIFT 9
+#define CS42L43_GPIO1_LVL_MASK 0x00000100
+#define CS42L43_GPIO1_LVL_SHIFT 8
+#define CS42L43_GPIO3_DIR_MASK 0x00000004
+#define CS42L43_GPIO3_DIR_SHIFT 2
+#define CS42L43_GPIO2_DIR_MASK 0x00000002
+#define CS42L43_GPIO2_DIR_SHIFT 1
+#define CS42L43_GPIO1_DIR_MASK 0x00000001
+#define CS42L43_GPIO1_DIR_SHIFT 0
+
+/* CS42L43_GPIO_CTRL2 */
+#define CS42L43_GPIO3_DEGLITCH_BYP_MASK 0x00000004
+#define CS42L43_GPIO3_DEGLITCH_BYP_SHIFT 2
+#define CS42L43_GPIO2_DEGLITCH_BYP_MASK 0x00000002
+#define CS42L43_GPIO2_DEGLITCH_BYP_SHIFT 1
+#define CS42L43_GPIO1_DEGLITCH_BYP_MASK 0x00000001
+#define CS42L43_GPIO1_DEGLITCH_BYP_SHIFT 0
+
+/* CS42L43_GPIO_STS */
+#define CS42L43_GPIO3_STS_MASK 0x00000004
+#define CS42L43_GPIO3_STS_SHIFT 2
+#define CS42L43_GPIO2_STS_MASK 0x00000002
+#define CS42L43_GPIO2_STS_SHIFT 1
+#define CS42L43_GPIO1_STS_MASK 0x00000001
+#define CS42L43_GPIO1_STS_SHIFT 0
+
+/* CS42L43_GPIO_FN_SEL */
+#define CS42L43_GPIO3_FN_SEL_MASK 0x00000004
+#define CS42L43_GPIO3_FN_SEL_SHIFT 2
+#define CS42L43_GPIO1_FN_SEL_MASK 0x00000001
+#define CS42L43_GPIO1_FN_SEL_SHIFT 0
+
+/* CS42L43_MCLK_SRC_SEL */
+#define CS42L43_OSC_PLL_MCLK_SEL_MASK 0x00000001
+#define CS42L43_OSC_PLL_MCLK_SEL_SHIFT 0
+
+/* CS42L43_SAMPLE_RATE1..CS42L43_SAMPLE_RATE4 */
+#define CS42L43_SAMPLE_RATE_MASK 0x0000001F
+#define CS42L43_SAMPLE_RATE_SHIFT 0
+
+/* CS42L43_PLL_CONTROL */
+#define CS42L43_PLL_REFCLK_EN_MASK 0x00000008
+#define CS42L43_PLL_REFCLK_EN_SHIFT 3
+#define CS42L43_PLL_REFCLK_DIV_MASK 0x00000006
+#define CS42L43_PLL_REFCLK_DIV_SHIFT 1
+#define CS42L43_PLL_REFCLK_SRC_MASK 0x00000001
+#define CS42L43_PLL_REFCLK_SRC_SHIFT 0
+
+/* CS42L43_FS_SELECT1 */
+#define CS42L43_ASP_RATE_MASK 0x00000003
+#define CS42L43_ASP_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT2 */
+#define CS42L43_ASRC_DEC_OUT_RATE_MASK 0x000000C0
+#define CS42L43_ASRC_DEC_OUT_RATE_SHIFT 6
+#define CS42L43_ASRC_INT_OUT_RATE_MASK 0x00000030
+#define CS42L43_ASRC_INT_OUT_RATE_SHIFT 4
+#define CS42L43_ASRC_DEC_IN_RATE_MASK 0x0000000C
+#define CS42L43_ASRC_DEC_IN_RATE_SHIFT 2
+#define CS42L43_ASRC_INT_IN_RATE_MASK 0x00000003
+#define CS42L43_ASRC_INT_IN_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT3 */
+#define CS42L43_HPOUT_RATE_MASK 0x0000C000
+#define CS42L43_HPOUT_RATE_SHIFT 14
+#define CS42L43_EQZ_RATE_MASK 0x00003000
+#define CS42L43_EQZ_RATE_SHIFT 12
+#define CS42L43_DIAGGEN_RATE_MASK 0x00000C00
+#define CS42L43_DIAGGEN_RATE_SHIFT 10
+#define CS42L43_DECIM_CH4_RATE_MASK 0x00000300
+#define CS42L43_DECIM_CH4_RATE_SHIFT 8
+#define CS42L43_DECIM_CH3_RATE_MASK 0x000000C0
+#define CS42L43_DECIM_CH3_RATE_SHIFT 6
+#define CS42L43_DECIM_CH2_RATE_MASK 0x00000030
+#define CS42L43_DECIM_CH2_RATE_SHIFT 4
+#define CS42L43_DECIM_CH1_RATE_MASK 0x0000000C
+#define CS42L43_DECIM_CH1_RATE_SHIFT 2
+#define CS42L43_AMP1_2_RATE_MASK 0x00000003
+#define CS42L43_AMP1_2_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT4 */
+#define CS42L43_SW_DP7_RATE_MASK 0x00C00000
+#define CS42L43_SW_DP7_RATE_SHIFT 22
+#define CS42L43_SW_DP6_RATE_MASK 0x00300000
+#define CS42L43_SW_DP6_RATE_SHIFT 20
+#define CS42L43_SPDIF_RATE_MASK 0x000C0000
+#define CS42L43_SPDIF_RATE_SHIFT 18
+#define CS42L43_SW_DP5_RATE_MASK 0x00030000
+#define CS42L43_SW_DP5_RATE_SHIFT 16
+#define CS42L43_SW_DP4_RATE_MASK 0x0000C000
+#define CS42L43_SW_DP4_RATE_SHIFT 14
+#define CS42L43_SW_DP3_RATE_MASK 0x00003000
+#define CS42L43_SW_DP3_RATE_SHIFT 12
+#define CS42L43_SW_DP2_RATE_MASK 0x00000C00
+#define CS42L43_SW_DP2_RATE_SHIFT 10
+#define CS42L43_SW_DP1_RATE_MASK 0x00000300
+#define CS42L43_SW_DP1_RATE_SHIFT 8
+#define CS42L43_ISRC2_LOW_RATE_MASK 0x000000C0
+#define CS42L43_ISRC2_LOW_RATE_SHIFT 6
+#define CS42L43_ISRC2_HIGH_RATE_MASK 0x00000030
+#define CS42L43_ISRC2_HIGH_RATE_SHIFT 4
+#define CS42L43_ISRC1_LOW_RATE_MASK 0x0000000C
+#define CS42L43_ISRC1_LOW_RATE_SHIFT 2
+#define CS42L43_ISRC1_HIGH_RATE_MASK 0x00000003
+#define CS42L43_ISRC1_HIGH_RATE_SHIFT 0
+
+/* CS42L43_PDM_CONTROL */
+#define CS42L43_PDM2_CLK_DIV_MASK 0x0000000C
+#define CS42L43_PDM2_CLK_DIV_SHIFT 2
+#define CS42L43_PDM1_CLK_DIV_MASK 0x00000003
+#define CS42L43_PDM1_CLK_DIV_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG1 */
+#define CS42L43_ASP_BCLK_N_MASK 0x03FF0000
+#define CS42L43_ASP_BCLK_N_SHIFT 16
+#define CS42L43_ASP_BCLK_M_MASK 0x000003FF
+#define CS42L43_ASP_BCLK_M_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG2 */
+#define CS42L43_ASP_MASTER_MODE_MASK 0x00000002
+#define CS42L43_ASP_MASTER_MODE_SHIFT 1
+#define CS42L43_ASP_BCLK_INV_MASK 0x00000001
+#define CS42L43_ASP_BCLK_INV_SHIFT 0
+
+/* CS42L43_OSC_DIV_SEL */
+#define CS42L43_OSC_DIV2_EN_MASK 0x00000001
+#define CS42L43_OSC_DIV2_EN_SHIFT 0
+
+/* CS42L43_ADC_B_CTRL1..CS42L43_ADC_B_CTRL1 */
+#define CS42L43_PGA_WIDESWING_MODE_EN_MASK 0x00000080
+#define CS42L43_PGA_WIDESWING_MODE_EN_SHIFT 7
+#define CS42L43_ADC_AIN_SEL_MASK 0x00000010
+#define CS42L43_ADC_AIN_SEL_SHIFT 4
+#define CS42L43_ADC_PGA_GAIN_MASK 0x0000000F
+#define CS42L43_ADC_PGA_GAIN_SHIFT 0
+
+/* CS42L43_DECIM_HPF_WNF_CTRL1..CS42L43_DECIM_HPF_WNF_CTRL4 */
+#define CS42L43_DECIM_WNF_CF_MASK 0x00000070
+#define CS42L43_DECIM_WNF_CF_SHIFT 4
+#define CS42L43_DECIM_WNF_EN_MASK 0x00000008
+#define CS42L43_DECIM_WNF_EN_SHIFT 3
+#define CS42L43_DECIM_HPF_CF_MASK 0x00000006
+#define CS42L43_DECIM_HPF_CF_SHIFT 1
+#define CS42L43_DECIM_HPF_EN_MASK 0x00000001
+#define CS42L43_DECIM_HPF_EN_SHIFT 0
+
+/* CS42L43_DMIC_PDM_CTRL */
+#define CS42L43_PDM2R_INV_MASK 0x00000020
+#define CS42L43_PDM2R_INV_SHIFT 5
+#define CS42L43_PDM2L_INV_MASK 0x00000010
+#define CS42L43_PDM2L_INV_SHIFT 4
+#define CS42L43_PDM1R_INV_MASK 0x00000008
+#define CS42L43_PDM1R_INV_SHIFT 3
+#define CS42L43_PDM1L_INV_MASK 0x00000004
+#define CS42L43_PDM1L_INV_SHIFT 2
+
+/* CS42L43_DECIM_VOL_CTRL_CH1_CH2 */
+#define CS42L43_DECIM2_MUTE_MASK 0x80000000
+#define CS42L43_DECIM2_MUTE_SHIFT 31
+#define CS42L43_DECIM2_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM2_VOL_SHIFT 22
+#define CS42L43_DECIM2_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM2_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM2_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM2_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM1_MUTE_MASK 0x00008000
+#define CS42L43_DECIM1_MUTE_SHIFT 15
+#define CS42L43_DECIM1_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM1_VOL_SHIFT 6
+#define CS42L43_DECIM1_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM1_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM1_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM1_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_CH3_CH4 */
+#define CS42L43_DECIM4_MUTE_MASK 0x80000000
+#define CS42L43_DECIM4_MUTE_SHIFT 31
+#define CS42L43_DECIM4_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM4_VOL_SHIFT 22
+#define CS42L43_DECIM4_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM4_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM4_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM4_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM3_MUTE_MASK 0x00008000
+#define CS42L43_DECIM3_MUTE_SHIFT 15
+#define CS42L43_DECIM3_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM3_VOL_SHIFT 6
+#define CS42L43_DECIM3_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM3_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM3_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM3_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_UPDATE */
+#define CS42L43_DECIM4_VOL_UPDATE_MASK 0x00000008
+#define CS42L43_DECIM4_VOL_UPDATE_SHIFT 3
+#define CS42L43_DECIM3_VOL_UPDATE_MASK 0x00000004
+#define CS42L43_DECIM3_VOL_UPDATE_SHIFT 2
+#define CS42L43_DECIM2_VOL_UPDATE_MASK 0x00000002
+#define CS42L43_DECIM2_VOL_UPDATE_SHIFT 1
+#define CS42L43_DECIM1_VOL_UPDATE_MASK 0x00000001
+#define CS42L43_DECIM1_VOL_UPDATE_SHIFT 0
+
+/* CS42L43_INTP_VOLUME_CTRL1..CS42L43_INTP_VOLUME_CTRL2 */
+#define CS42L43_AMP1_2_VU_MASK 0x00000200
+#define CS42L43_AMP1_2_VU_SHIFT 9
+#define CS42L43_AMP_MUTE_MASK 0x00000100
+#define CS42L43_AMP_MUTE_SHIFT 8
+#define CS42L43_AMP_VOL_MASK 0x000000FF
+#define CS42L43_AMP_VOL_SHIFT 0
+
+/* CS42L43_AMP1_2_VOL_RAMP */
+#define CS42L43_AMP1_2_VD_RAMP_MASK 0x00000070
+#define CS42L43_AMP1_2_VD_RAMP_SHIFT 4
+#define CS42L43_AMP1_2_VI_RAMP_MASK 0x00000007
+#define CS42L43_AMP1_2_VI_RAMP_SHIFT 0
+
+/* CS42L43_ASP_CTRL */
+#define CS42L43_ASP_FSYNC_MODE_MASK 0x00000004
+#define CS42L43_ASP_FSYNC_MODE_SHIFT 2
+#define CS42L43_ASP_BCLK_EN_MASK 0x00000002
+#define CS42L43_ASP_BCLK_EN_SHIFT 1
+#define CS42L43_ASP_FSYNC_EN_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_EN_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL1 */
+#define CS42L43_ASP_FSYNC_M_MASK 0x0007FFFF
+#define CS42L43_ASP_FSYNC_M_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL3 */
+#define CS42L43_ASP_FSYNC_IN_INV_MASK 0x00000002
+#define CS42L43_ASP_FSYNC_IN_INV_SHIFT 1
+#define CS42L43_ASP_FSYNC_OUT_INV_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_OUT_INV_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL4 */
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_MASK 0x00001FFE
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_SHIFT 1
+
+/* CS42L43_ASP_DATA_CTRL */
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_MASK 0x00000008
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_SHIFT 3
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_MASK 0x00000007
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_SHIFT 0
+
+/* CS42L43_ASP_RX_EN */
+#define CS42L43_ASP_RX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_RX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_RX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_RX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_RX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_RX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_RX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_RX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_RX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_RX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_RX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_RX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_TX_EN */
+#define CS42L43_ASP_TX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_TX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_TX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_TX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_TX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_TX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_TX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_TX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_TX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_TX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_TX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_TX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_RX_CH1_CTRL..CS42L43_ASP_TX_CH6_CTRL */
+#define CS42L43_ASP_CH_WIDTH_MASK 0x001F0000
+#define CS42L43_ASP_CH_WIDTH_SHIFT 16
+#define CS42L43_ASP_CH_SLOT_MASK 0x00001FFE
+#define CS42L43_ASP_CH_SLOT_SHIFT 1
+#define CS42L43_ASP_CH_SLOT_PHASE_MASK 0x00000001
+#define CS42L43_ASP_CH_SLOT_PHASE_SHIFT 0
+
+/* CS42L43_ASPTX1_INPUT..CS42L43_AMP4MIX_INPUT4 */
+#define CS42L43_MIXER_VOL_MASK 0x00FE0000
+#define CS42L43_MIXER_VOL_SHIFT 17
+#define CS42L43_MIXER_SRC_MASK 0x000001FF
+#define CS42L43_MIXER_SRC_SHIFT 0
+
+/* CS42L43_ASRC_INT_ENABLES */
+#define CS42L43_ASRC_INT4_EN_MASK 0x00000008
+#define CS42L43_ASRC_INT4_EN_SHIFT 3
+#define CS42L43_ASRC_INT3_EN_MASK 0x00000004
+#define CS42L43_ASRC_INT3_EN_SHIFT 2
+#define CS42L43_ASRC_INT2_EN_MASK 0x00000002
+#define CS42L43_ASRC_INT2_EN_SHIFT 1
+#define CS42L43_ASRC_INT1_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT1_EN_SHIFT 0
+
+/* CS42L43_ASRC_DEC_ENABLES */
+#define CS42L43_ASRC_DEC4_EN_MASK 0x00000008
+#define CS42L43_ASRC_DEC4_EN_SHIFT 3
+#define CS42L43_ASRC_DEC3_EN_MASK 0x00000004
+#define CS42L43_ASRC_DEC3_EN_SHIFT 2
+#define CS42L43_ASRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC2_EN_SHIFT 1
+#define CS42L43_ASRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ASRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_PDNCNTL */
+#define CS42L43_RING_SENSE_EN_MASK 0x00000002
+#define CS42L43_RING_SENSE_EN_SHIFT 1
+
+/* CS42L43_RINGSENSE_DEB_CTRL */
+#define CS42L43_RINGSENSE_INV_MASK 0x00000080
+#define CS42L43_RINGSENSE_INV_SHIFT 7
+#define CS42L43_RINGSENSE_PULLUP_PDNB_MASK 0x00000040
+#define CS42L43_RINGSENSE_PULLUP_PDNB_SHIFT 6
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_RINGSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_RINGSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIPSENSE_DEB_CTRL */
+#define CS42L43_TIPSENSE_INV_MASK 0x00000080
+#define CS42L43_TIPSENSE_INV_SHIFT 7
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_TIPSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_TIPSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS */
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_DB_STS_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_DB_STS_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_DB_STS_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_DB_STS_SHIFT 0
+
+/* CS42L43_HS2 */
+#define CS42L43_HS_CLAMP_DISABLE_MASK 0x10000000
+#define CS42L43_HS_CLAMP_DISABLE_SHIFT 28
+#define CS42L43_HSBIAS_RAMP_MASK 0x0C000000
+#define CS42L43_HSBIAS_RAMP_SHIFT 26
+#define CS42L43_HSDET_MODE_MASK 0x00018000
+#define CS42L43_HSDET_MODE_SHIFT 15
+#define CS42L43_HSDET_MANUAL_MODE_MASK 0x00006000
+#define CS42L43_HSDET_MANUAL_MODE_SHIFT 13
+#define CS42L43_AUTO_HSDET_TIME_MASK 0x00000700
+#define CS42L43_AUTO_HSDET_TIME_SHIFT 8
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_MASK 0x00000080
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_SHIFT 7
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_MASK 0x00000040
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_SHIFT 6
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_MASK 0x00000020
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_SHIFT 5
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_MASK 0x00000010
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_SHIFT 4
+#define CS42L43_HSBIAS_OUT_HS3_SEL_MASK 0x00000008
+#define CS42L43_HSBIAS_OUT_HS3_SEL_SHIFT 3
+#define CS42L43_HSBIAS_OUT_HS4_SEL_MASK 0x00000004
+#define CS42L43_HSBIAS_OUT_HS4_SEL_SHIFT 2
+#define CS42L43_HSGND_HS3_SEL_MASK 0x00000002
+#define CS42L43_HSGND_HS3_SEL_SHIFT 1
+#define CS42L43_HSGND_HS4_SEL_MASK 0x00000001
+#define CS42L43_HSGND_HS4_SEL_SHIFT 0
+
+/* CS42L43_HS_STAT */
+#define CS42L43_HSDET_TYPE_STS_MASK 0x00000007
+#define CS42L43_HSDET_TYPE_STS_SHIFT 0
+
+/* CS42L43_MCU_SW_INTERRUPT */
+#define CS42L43_CONTROL_IND_MASK 0x00000004
+#define CS42L43_CONTROL_IND_SHIFT 2
+#define CS42L43_CONFIGS_IND_MASK 0x00000002
+#define CS42L43_CONFIGS_IND_SHIFT 1
+#define CS42L43_PATCH_IND_MASK 0x00000001
+#define CS42L43_PATCH_IND_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CTRL */
+#define CS42L43_HS2_BIAS_SENSE_EN_MASK 0x00000020
+#define CS42L43_HS2_BIAS_SENSE_EN_SHIFT 5
+#define CS42L43_HS1_BIAS_SENSE_EN_MASK 0x00000010
+#define CS42L43_HS1_BIAS_SENSE_EN_SHIFT 4
+#define CS42L43_HS2_BIAS_EN_MASK 0x00000008
+#define CS42L43_HS2_BIAS_EN_SHIFT 3
+#define CS42L43_HS1_BIAS_EN_MASK 0x00000004
+#define CS42L43_HS1_BIAS_EN_SHIFT 2
+#define CS42L43_JACK_STEREO_CONFIG_MASK 0x00000003
+#define CS42L43_JACK_STEREO_CONFIG_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CLAMP_CTRL */
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK 0x00000002
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_SHIFT 1
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK 0x00000001
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_SHIFT 0
+
+/* CS42L43_BLOCK_EN2 */
+#define CS42L43_SPI_MSTR_EN_MASK 0x00000001
+#define CS42L43_SPI_MSTR_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN3 */
+#define CS42L43_PDM2_DIN_R_EN_MASK 0x00000020
+#define CS42L43_PDM2_DIN_R_EN_SHIFT 5
+#define CS42L43_PDM2_DIN_L_EN_MASK 0x00000010
+#define CS42L43_PDM2_DIN_L_EN_SHIFT 4
+#define CS42L43_PDM1_DIN_R_EN_MASK 0x00000008
+#define CS42L43_PDM1_DIN_R_EN_SHIFT 3
+#define CS42L43_PDM1_DIN_L_EN_MASK 0x00000004
+#define CS42L43_PDM1_DIN_L_EN_SHIFT 2
+#define CS42L43_ADC2_EN_MASK 0x00000002
+#define CS42L43_ADC2_EN_SHIFT 1
+#define CS42L43_ADC1_EN_MASK 0x00000001
+#define CS42L43_ADC1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN4 */
+#define CS42L43_ASRC_DEC_BANK_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC_BANK_EN_SHIFT 1
+#define CS42L43_ASRC_INT_BANK_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN5 */
+#define CS42L43_ISRC2_BANK_EN_MASK 0x00000002
+#define CS42L43_ISRC2_BANK_EN_SHIFT 1
+#define CS42L43_ISRC1_BANK_EN_MASK 0x00000001
+#define CS42L43_ISRC1_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN6 */
+#define CS42L43_MIXER_EN_MASK 0x00000001
+#define CS42L43_MIXER_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN7 */
+#define CS42L43_EQ_EN_MASK 0x00000001
+#define CS42L43_EQ_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN8 */
+#define CS42L43_HP_EN_MASK 0x00000001
+#define CS42L43_HP_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN9 */
+#define CS42L43_TONE_EN_MASK 0x00000001
+#define CS42L43_TONE_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN10 */
+#define CS42L43_AMP2_EN_MASK 0x00000002
+#define CS42L43_AMP2_EN_SHIFT 1
+#define CS42L43_AMP1_EN_MASK 0x00000001
+#define CS42L43_AMP1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN11 */
+#define CS42L43_SPDIF_EN_MASK 0x00000001
+#define CS42L43_SPDIF_EN_SHIFT 0
+
+/* CS42L43_TONE_CH1_CTRL..CS42L43_TONE_CH2_CTRL */
+#define CS42L43_TONE_FREQ_MASK 0x00000070
+#define CS42L43_TONE_FREQ_SHIFT 4
+#define CS42L43_TONE_SEL_MASK 0x0000000F
+#define CS42L43_TONE_SEL_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_1 */
+#define CS42L43_BUTTON_DETECT_MODE_MASK 0x00000018
+#define CS42L43_BUTTON_DETECT_MODE_SHIFT 3
+#define CS42L43_HSBIAS_MODE_MASK 0x00000006
+#define CS42L43_HSBIAS_MODE_SHIFT 1
+#define CS42L43_MIC_LVL_DET_DISABLE_MASK 0x00000001
+#define CS42L43_MIC_LVL_DET_DISABLE_SHIFT 0
+
+/* CS42L43_DETECT_STATUS_1 */
+#define CS42L43_HSDET_DC_STS_MASK 0x01FF0000
+#define CS42L43_HSDET_DC_STS_SHIFT 16
+#define CS42L43_JACKDET_STS_MASK 0x00000080
+#define CS42L43_JACKDET_STS_SHIFT 7
+#define CS42L43_HSBIAS_CLAMP_STS_MASK 0x00000040
+#define CS42L43_HSBIAS_CLAMP_STS_SHIFT 6
+
+/* CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL */
+#define CS42L43_JACKDET_MODE_MASK 0xC0000000
+#define CS42L43_JACKDET_MODE_SHIFT 30
+#define CS42L43_JACKDET_INV_MASK 0x20000000
+#define CS42L43_JACKDET_INV_SHIFT 29
+#define CS42L43_JACKDET_DB_TIME_MASK 0x03000000
+#define CS42L43_JACKDET_DB_TIME_SHIFT 24
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_MASK 0x00800000
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_SHIFT 23
+#define CS42L43_HSBIAS_SENSE_EN_MASK 0x00000080
+#define CS42L43_HSBIAS_SENSE_EN_SHIFT 7
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK 0x00000040
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_SHIFT 6
+#define CS42L43_JACKDET_SENSE_EN_MASK 0x00000020
+#define CS42L43_JACKDET_SENSE_EN_SHIFT 5
+#define CS42L43_HSBIAS_SENSE_TRIP_MASK 0x00000007
+#define CS42L43_HSBIAS_SENSE_TRIP_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_ANDROID */
+#define CS42L43_HSDET_LVL_COMBWIDTH_MASK 0xC0000000
+#define CS42L43_HSDET_LVL_COMBWIDTH_SHIFT 30
+#define CS42L43_HSDET_LVL2_THRESH_MASK 0x01FF0000
+#define CS42L43_HSDET_LVL2_THRESH_SHIFT 16
+#define CS42L43_HSDET_LVL1_THRESH_MASK 0x000001FF
+#define CS42L43_HSDET_LVL1_THRESH_SHIFT 0
+
+/* CS42L43_ISRC1_CTRL..CS42L43_ISRC2_CTRL */
+#define CS42L43_ISRC_INT2_EN_MASK 0x00000200
+#define CS42L43_ISRC_INT2_EN_SHIFT 9
+#define CS42L43_ISRC_INT1_EN_MASK 0x00000100
+#define CS42L43_ISRC_INT1_EN_SHIFT 8
+#define CS42L43_ISRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ISRC_DEC2_EN_SHIFT 1
+#define CS42L43_ISRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ISRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_CTRL_REG */
+#define CS42L43_PLL_MODE_BYPASS_500_MASK 0x00000004
+#define CS42L43_PLL_MODE_BYPASS_500_SHIFT 2
+#define CS42L43_PLL_MODE_BYPASS_1029_MASK 0x00000002
+#define CS42L43_PLL_MODE_BYPASS_1029_SHIFT 1
+#define CS42L43_PLL_EN_MASK 0x00000001
+#define CS42L43_PLL_EN_SHIFT 0
+
+/* CS42L43_FDIV_FRAC */
+#define CS42L43_PLL_DIV_INT_MASK 0xFF000000
+#define CS42L43_PLL_DIV_INT_SHIFT 24
+#define CS42L43_PLL_DIV_FRAC_BYTE2_MASK 0x00FF0000
+#define CS42L43_PLL_DIV_FRAC_BYTE2_SHIFT 16
+#define CS42L43_PLL_DIV_FRAC_BYTE1_MASK 0x0000FF00
+#define CS42L43_PLL_DIV_FRAC_BYTE1_SHIFT 8
+#define CS42L43_PLL_DIV_FRAC_BYTE0_MASK 0x000000FF
+#define CS42L43_PLL_DIV_FRAC_BYTE0_SHIFT 0
+
+/* CS42L43_CAL_RATIO */
+#define CS42L43_PLL_CAL_RATIO_MASK 0x000000FF
+#define CS42L43_PLL_CAL_RATIO_SHIFT 0
+
+/* CS42L43_SPI_CLK_CONFIG1 */
+#define CS42L43_SCLK_DIV_MASK 0x0000000F
+#define CS42L43_SCLK_DIV_SHIFT 0
+
+/* CS42L43_SPI_CONFIG1 */
+#define CS42L43_SPI_SS_IDLE_DUR_MASK 0x0F000000
+#define CS42L43_SPI_SS_IDLE_DUR_SHIFT 24
+#define CS42L43_SPI_SS_DELAY_DUR_MASK 0x000F0000
+#define CS42L43_SPI_SS_DELAY_DUR_SHIFT 16
+#define CS42L43_SPI_THREE_WIRE_MASK 0x00000100
+#define CS42L43_SPI_THREE_WIRE_SHIFT 8
+#define CS42L43_SPI_DPHA_MASK 0x00000040
+#define CS42L43_SPI_DPHA_SHIFT 6
+#define CS42L43_SPI_CPHA_MASK 0x00000020
+#define CS42L43_SPI_CPHA_SHIFT 5
+#define CS42L43_SPI_CPOL_MASK 0x00000010
+#define CS42L43_SPI_CPOL_SHIFT 4
+#define CS42L43_SPI_SS_SEL_MASK 0x00000007
+#define CS42L43_SPI_SS_SEL_SHIFT 0
+
+/* CS42L43_SPI_CONFIG2 */
+#define CS42L43_SPI_SS_FRC_MASK 0x00000001
+#define CS42L43_SPI_SS_FRC_SHIFT 0
+
+/* CS42L43_SPI_CONFIG3 */
+#define CS42L43_SPI_WDT_ENA_MASK 0x00000001
+#define CS42L43_SPI_WDT_ENA_SHIFT 0
+
+/* CS42L43_SPI_CONFIG4 */
+#define CS42L43_SPI_STALL_ENA_MASK 0x00010000
+#define CS42L43_SPI_STALL_ENA_SHIFT 16
+
+/* CS42L43_SPI_STATUS1 */
+#define CS42L43_SPI_ABORT_STS_MASK 0x00000002
+#define CS42L43_SPI_ABORT_STS_SHIFT 1
+#define CS42L43_SPI_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_DONE_STS_SHIFT 0
+
+/* CS42L43_SPI_STATUS2 */
+#define CS42L43_SPI_RX_DONE_STS_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_STS_SHIFT 4
+#define CS42L43_SPI_TX_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_STS_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG1 */
+#define CS42L43_SPI_START_MASK 0x00000001
+#define CS42L43_SPI_START_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG2 */
+#define CS42L43_SPI_ABORT_MASK 0x00000001
+#define CS42L43_SPI_ABORT_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG3 */
+#define CS42L43_SPI_WORD_SIZE_MASK 0x00070000
+#define CS42L43_SPI_WORD_SIZE_SHIFT 16
+#define CS42L43_SPI_CMD_MASK 0x00000003
+#define CS42L43_SPI_CMD_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG4 */
+#define CS42L43_SPI_TX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG5 */
+#define CS42L43_SPI_RX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG6 */
+#define CS42L43_SPI_TX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_TX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG7 */
+#define CS42L43_SPI_RX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_RX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG8 */
+#define CS42L43_SPI_RX_DONE_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_SHIFT 4
+#define CS42L43_SPI_TX_DONE_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_SHIFT 0
+
+/* CS42L43_TRAN_STATUS1 */
+#define CS42L43_SPI_BUSY_STS_MASK 0x00000100
+#define CS42L43_SPI_BUSY_STS_SHIFT 8
+#define CS42L43_SPI_RX_REQUEST_MASK 0x00000010
+#define CS42L43_SPI_RX_REQUEST_SHIFT 4
+#define CS42L43_SPI_TX_REQUEST_MASK 0x00000001
+#define CS42L43_SPI_TX_REQUEST_SHIFT 0
+
+/* CS42L43_TRAN_STATUS2 */
+#define CS42L43_SPI_TX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TRAN_STATUS3 */
+#define CS42L43_SPI_RX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TX_DATA */
+#define CS42L43_SPI_TX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_TX_DATA_SHIFT 0
+
+/* CS42L43_RX_DATA */
+#define CS42L43_SPI_RX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_RX_DATA_SHIFT 0
+
+/* CS42L43_DACCNFG1 */
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_MASK 0x00000008
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_SHIFT 3
+#define CS42L43_AMP4_INV_MASK 0x00000002
+#define CS42L43_AMP4_INV_SHIFT 1
+#define CS42L43_AMP3_INV_MASK 0x00000001
+#define CS42L43_AMP3_INV_SHIFT 0
+
+/* CS42L43_DACCNFG2 */
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_MASK 0x00000002
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_SHIFT 1
+#define CS42L43_HP_HPF_EN_MASK 0x00000001
+#define CS42L43_HP_HPF_EN_SHIFT 0
+
+/* CS42L43_HPPATHVOL */
+#define CS42L43_AMP4_PATH_VOL_MASK 0x01FF0000
+#define CS42L43_AMP4_PATH_VOL_SHIFT 16
+#define CS42L43_AMP3_PATH_VOL_MASK 0x000001FF
+#define CS42L43_AMP3_PATH_VOL_SHIFT 0
+
+/* CS42L43_PGAVOL */
+#define CS42L43_HP_PATH_VOL_RAMP_MASK 0x0003C000
+#define CS42L43_HP_PATH_VOL_RAMP_SHIFT 14
+#define CS42L43_HP_PATH_VOL_ZC_MASK 0x00002000
+#define CS42L43_HP_PATH_VOL_ZC_SHIFT 13
+#define CS42L43_HP_PATH_VOL_SFT_MASK 0x00001000
+#define CS42L43_HP_PATH_VOL_SFT_SHIFT 12
+#define CS42L43_HP_DIG_VOL_RAMP_MASK 0x00000F00
+#define CS42L43_HP_DIG_VOL_RAMP_SHIFT 8
+#define CS42L43_HP_ANA_VOL_RAMP_MASK 0x0000000F
+#define CS42L43_HP_ANA_VOL_RAMP_SHIFT 0
+
+/* CS42L43_LOADDETRESULTS */
+#define CS42L43_AMP3_RES_DET_MASK 0x00000003
+#define CS42L43_AMP3_RES_DET_SHIFT 0
+
+/* CS42L43_LOADDETENA */
+#define CS42L43_HPLOAD_DET_EN_MASK 0x00000001
+#define CS42L43_HPLOAD_DET_EN_SHIFT 0
+
+/* CS42L43_CTRL */
+#define CS42L43_ADPTPWR_MODE_MASK 0x00000007
+#define CS42L43_ADPTPWR_MODE_SHIFT 0
+
+/* CS42L43_COEFF_RD_WR0 */
+#define CS42L43_WRITE_MODE_MASK 0x00000002
+#define CS42L43_WRITE_MODE_SHIFT 1
+
+/* CS42L43_INIT_DONE0 */
+#define CS42L43_INITIALIZE_DONE_MASK 0x00000001
+#define CS42L43_INITIALIZE_DONE_SHIFT 0
+
+/* CS42L43_START_EQZ0 */
+#define CS42L43_START_FILTER_MASK 0x00000001
+#define CS42L43_START_FILTER_SHIFT 0
+
+/* CS42L43_MUTE_EQ_IN0 */
+#define CS42L43_MUTE_EQ_CH2_MASK 0x00000002
+#define CS42L43_MUTE_EQ_CH2_SHIFT 1
+#define CS42L43_MUTE_EQ_CH1_MASK 0x00000001
+#define CS42L43_MUTE_EQ_CH1_SHIFT 0
+
+/* CS42L43_PLL_INT */
+#define CS42L43_PLL_LOST_LOCK_INT_MASK 0x00000002
+#define CS42L43_PLL_LOST_LOCK_INT_SHIFT 1
+#define CS42L43_PLL_READY_INT_MASK 0x00000001
+#define CS42L43_PLL_READY_INT_SHIFT 0
+
+/* CS42L43_SOFT_INT */
+#define CS42L43_CONTROL_APPLIED_INT_MASK 0x00000010
+#define CS42L43_CONTROL_APPLIED_INT_SHIFT 4
+#define CS42L43_CONTROL_WARN_INT_MASK 0x00000008
+#define CS42L43_CONTROL_WARN_INT_SHIFT 3
+#define CS42L43_PATCH_WARN_INT_MASK 0x00000002
+#define CS42L43_PATCH_WARN_INT_SHIFT 1
+#define CS42L43_PATCH_APPLIED_INT_MASK 0x00000001
+#define CS42L43_PATCH_APPLIED_INT_SHIFT 0
+
+/* CS42L43_MSM_INT */
+#define CS42L43_HP_STARTUP_DONE_INT_MASK 0x00000800
+#define CS42L43_HP_STARTUP_DONE_INT_SHIFT 11
+#define CS42L43_HP_SHUTDOWN_DONE_INT_MASK 0x00000400
+#define CS42L43_HP_SHUTDOWN_DONE_INT_SHIFT 10
+#define CS42L43_HSDET_DONE_INT_MASK 0x00000200
+#define CS42L43_HSDET_DONE_INT_SHIFT 9
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_MASK 0x00000080
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_SHIFT 7
+#define CS42L43_TIPSENSE_PLUG_DB_INT_MASK 0x00000040
+#define CS42L43_TIPSENSE_PLUG_DB_INT_SHIFT 6
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_MASK 0x00000020
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_SHIFT 5
+#define CS42L43_RINGSENSE_PLUG_DB_INT_MASK 0x00000010
+#define CS42L43_RINGSENSE_PLUG_DB_INT_SHIFT 4
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_SHIFT 0
+
+/* CS42L43_ACC_DET_INT */
+#define CS42L43_HS2_BIAS_SENSE_INT_MASK 0x00000800
+#define CS42L43_HS2_BIAS_SENSE_INT_SHIFT 11
+#define CS42L43_HS1_BIAS_SENSE_INT_MASK 0x00000400
+#define CS42L43_HS1_BIAS_SENSE_INT_SHIFT 10
+#define CS42L43_DC_DETECT1_FALSE_INT_MASK 0x00000080
+#define CS42L43_DC_DETECT1_FALSE_INT_SHIFT 7
+#define CS42L43_DC_DETECT1_TRUE_INT_MASK 0x00000040
+#define CS42L43_DC_DETECT1_TRUE_INT_SHIFT 6
+#define CS42L43_HSBIAS_CLAMPED_INT_MASK 0x00000008
+#define CS42L43_HSBIAS_CLAMPED_INT_SHIFT 3
+#define CS42L43_HS3_4_BIAS_SENSE_INT_MASK 0x00000001
+#define CS42L43_HS3_4_BIAS_SENSE_INT_SHIFT 0
+
+/* CS42L43_SPI_MSTR_INT */
+#define CS42L43_IRQ_SPI_STALLING_INT_MASK 0x00000004
+#define CS42L43_IRQ_SPI_STALLING_INT_SHIFT 2
+#define CS42L43_IRQ_SPI_STS_INT_MASK 0x00000002
+#define CS42L43_IRQ_SPI_STS_INT_SHIFT 1
+#define CS42L43_IRQ_SPI_BLOCK_INT_MASK 0x00000001
+#define CS42L43_IRQ_SPI_BLOCK_INT_SHIFT 0
+
+/* CS42L43_SW_TO_SPI_BRIDGE_INT */
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_MASK 0x00000001
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_SHIFT 0
+
+/* CS42L43_CLASS_D_AMP_INT */
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_MASK 0x00002000
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_SHIFT 13
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_MASK 0x00001000
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_SHIFT 12
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_MASK 0x00000800
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_SHIFT 11
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_MASK 0x00000400
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_SHIFT 10
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_MASK 0x00000200
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_SHIFT 9
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_MASK 0x00000100
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_SHIFT 8
+#define CS42L43_AMP2_STARTUP_DONE_INT_MASK 0x00000080
+#define CS42L43_AMP2_STARTUP_DONE_INT_SHIFT 7
+#define CS42L43_AMP1_STARTUP_DONE_INT_MASK 0x00000040
+#define CS42L43_AMP1_STARTUP_DONE_INT_SHIFT 6
+#define CS42L43_AMP2_THERM_SHDN_INT_MASK 0x00000020
+#define CS42L43_AMP2_THERM_SHDN_INT_SHIFT 5
+#define CS42L43_AMP1_THERM_SHDN_INT_MASK 0x00000010
+#define CS42L43_AMP1_THERM_SHDN_INT_SHIFT 4
+#define CS42L43_AMP2_THERM_WARN_INT_MASK 0x00000008
+#define CS42L43_AMP2_THERM_WARN_INT_SHIFT 3
+#define CS42L43_AMP1_THERM_WARN_INT_MASK 0x00000004
+#define CS42L43_AMP1_THERM_WARN_INT_SHIFT 2
+#define CS42L43_AMP2_SCDET_INT_MASK 0x00000002
+#define CS42L43_AMP2_SCDET_INT_SHIFT 1
+#define CS42L43_AMP1_SCDET_INT_MASK 0x00000001
+#define CS42L43_AMP1_SCDET_INT_SHIFT 0
+
+/* CS42L43_GPIO_INT */
+#define CS42L43_GPIO3_FALL_INT_MASK 0x00000020
+#define CS42L43_GPIO3_FALL_INT_SHIFT 5
+#define CS42L43_GPIO3_RISE_INT_MASK 0x00000010
+#define CS42L43_GPIO3_RISE_INT_SHIFT 4
+#define CS42L43_GPIO2_FALL_INT_MASK 0x00000008
+#define CS42L43_GPIO2_FALL_INT_SHIFT 3
+#define CS42L43_GPIO2_RISE_INT_MASK 0x00000004
+#define CS42L43_GPIO2_RISE_INT_SHIFT 2
+#define CS42L43_GPIO1_FALL_INT_MASK 0x00000002
+#define CS42L43_GPIO1_FALL_INT_SHIFT 1
+#define CS42L43_GPIO1_RISE_INT_MASK 0x00000001
+#define CS42L43_GPIO1_RISE_INT_SHIFT 0
+
+/* CS42L43_HPOUT_INT */
+#define CS42L43_HP_ILIMIT_INT_MASK 0x00000002
+#define CS42L43_HP_ILIMIT_INT_SHIFT 1
+#define CS42L43_HP_LOADDET_DONE_INT_MASK 0x00000001
+#define CS42L43_HP_LOADDET_DONE_INT_SHIFT 0
+
+/* CS42L43_BOOT_CONTROL */
+#define CS42L43_LOCK_HW_STS_MASK 0x00000002
+#define CS42L43_LOCK_HW_STS_SHIFT 1
+
+/* CS42L43_BLOCK_EN */
+#define CS42L43_MCU_EN_MASK 0x00000001
+#define CS42L43_MCU_EN_SHIFT 0
+
+/* CS42L43_SHUTTER_CONTROL */
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_MASK 0x00008000
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_SHIFT 15
+#define CS42L43_SPK_SHUTTER_CFG_MASK 0x00000F00
+#define CS42L43_SPK_SHUTTER_CFG_SHIFT 8
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_MASK 0x00000080
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_SHIFT 7
+#define CS42L43_MIC_SHUTTER_CFG_MASK 0x0000000F
+#define CS42L43_MIC_SHUTTER_CFG_SHIFT 0
+
+/* CS42L43_MCU_SW_REV */
+#define CS42L43_BIOS_SUBMINOR_REV_MASK 0xFF000000
+#define CS42L43_BIOS_SUBMINOR_REV_SHIFT 24
+#define CS42L43_BIOS_MINOR_REV_MASK 0x00F00000
+#define CS42L43_BIOS_MINOR_REV_SHIFT 20
+#define CS42L43_BIOS_MAJOR_REV_MASK 0x000F0000
+#define CS42L43_BIOS_MAJOR_REV_SHIFT 16
+#define CS42L43_FW_SUBMINOR_REV_MASK 0x0000FF00
+#define CS42L43_FW_SUBMINOR_REV_SHIFT 8
+#define CS42L43_FW_MINOR_REV_MASK 0x000000F0
+#define CS42L43_FW_MINOR_REV_SHIFT 4
+#define CS42L43_FW_MAJOR_REV_MASK 0x0000000F
+#define CS42L43_FW_MAJOR_REV_SHIFT 0
+
+/* CS42L43_NEED_CONFIGS */
+#define CS42L43_FW_PATCH_NEED_CFG_MASK 0x80000000
+#define CS42L43_FW_PATCH_NEED_CFG_SHIFT 31
+
+/* CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION */
+#define CS42L43_FW_MM_CTRL_MCU_SEL_MASK 0x00000001
+#define CS42L43_FW_MM_CTRL_MCU_SEL_SHIFT 0
+
+/* CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG */
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_DISABLE_VAL 0xF05AA50F
+
+#endif /* CS42L43_CORE_REGS_H */
diff --git a/include/linux/mfd/cs42l43.h b/include/linux/mfd/cs42l43.h
new file mode 100644
index 000000000000..2239d8585e78
--- /dev/null
+++ b/include/linux/mfd/cs42l43.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CS42L43 core driver external data
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_CORE_EXT_H
+#define CS42L43_CORE_EXT_H
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+
+#define CS42L43_N_SUPPLIES 3
+
+struct device;
+struct gpio_desc;
+struct sdw_slave;
+
+enum cs42l43_irq_numbers {
+ CS42L43_PLL_LOST_LOCK,
+ CS42L43_PLL_READY,
+
+ CS42L43_HP_STARTUP_DONE,
+ CS42L43_HP_SHUTDOWN_DONE,
+ CS42L43_HSDET_DONE,
+ CS42L43_TIPSENSE_UNPLUG_DB,
+ CS42L43_TIPSENSE_PLUG_DB,
+ CS42L43_RINGSENSE_UNPLUG_DB,
+ CS42L43_RINGSENSE_PLUG_DB,
+ CS42L43_TIPSENSE_UNPLUG_PDET,
+ CS42L43_TIPSENSE_PLUG_PDET,
+ CS42L43_RINGSENSE_UNPLUG_PDET,
+ CS42L43_RINGSENSE_PLUG_PDET,
+
+ CS42L43_HS2_BIAS_SENSE,
+ CS42L43_HS1_BIAS_SENSE,
+ CS42L43_DC_DETECT1_FALSE,
+ CS42L43_DC_DETECT1_TRUE,
+ CS42L43_HSBIAS_CLAMPED,
+ CS42L43_HS3_4_BIAS_SENSE,
+
+ CS42L43_AMP2_CLK_STOP_FAULT,
+ CS42L43_AMP1_CLK_STOP_FAULT,
+ CS42L43_AMP2_VDDSPK_FAULT,
+ CS42L43_AMP1_VDDSPK_FAULT,
+ CS42L43_AMP2_SHUTDOWN_DONE,
+ CS42L43_AMP1_SHUTDOWN_DONE,
+ CS42L43_AMP2_STARTUP_DONE,
+ CS42L43_AMP1_STARTUP_DONE,
+ CS42L43_AMP2_THERM_SHDN,
+ CS42L43_AMP1_THERM_SHDN,
+ CS42L43_AMP2_THERM_WARN,
+ CS42L43_AMP1_THERM_WARN,
+ CS42L43_AMP2_SCDET,
+ CS42L43_AMP1_SCDET,
+
+ CS42L43_GPIO3_FALL,
+ CS42L43_GPIO3_RISE,
+ CS42L43_GPIO2_FALL,
+ CS42L43_GPIO2_RISE,
+ CS42L43_GPIO1_FALL,
+ CS42L43_GPIO1_RISE,
+
+ CS42L43_HP_ILIMIT,
+ CS42L43_HP_LOADDET_DONE,
+};
+
+struct cs42l43 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct sdw_slave *sdw;
+
+ struct regulator *vdd_p;
+ struct regulator *vdd_d;
+ struct regulator_bulk_data core_supplies[CS42L43_N_SUPPLIES];
+
+ struct gpio_desc *reset;
+
+ int irq;
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct work_struct boot_work;
+ struct completion device_attach;
+ struct completion device_detach;
+ struct completion firmware_download;
+ int firmware_error;
+
+ unsigned int sdw_freq;
+ /* Lock to gate control of the PLL and its sources. */
+ struct mutex pll_lock;
+
+ bool sdw_pll_active;
+ bool attached;
+ bool hw_lock;
+};
+
+#endif /* CS42L43_CORE_EXT_H */
diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
index d3f126990ad0..137a2b067512 100644
--- a/include/linux/mfd/da9055/pdata.h
+++ b/include/linux/mfd/da9055/pdata.h
@@ -7,7 +7,6 @@
#define DA9055_MAX_REGULATORS 8
struct da9055;
-struct gpio_desc;
enum gpio_select {
NO_GPIO = 0,
@@ -24,16 +23,6 @@ struct da9055_pdata {
/* Enable RTC in RESET Mode */
bool reset_enable;
/*
- * GPI muxed pin to control
- * regulator state A/B, 0 if not available.
- */
- int *gpio_ren;
- /*
- * GPI muxed pin to control
- * regulator set, 0 if not available.
- */
- int *gpio_rsel;
- /*
* Regulator mode control bits value (GPI offset) that
* controls the regulator state, 0 if not available.
*/
@@ -43,7 +32,5 @@ struct da9055_pdata {
* controls the regulator set A/B, 0 if not available.
*/
enum gpio_select *reg_rsel;
- /* GPIO descriptors to enable regulator, NULL if not available */
- struct gpio_desc **ena_gpiods;
};
#endif /* __DA9055_PDATA_H */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index fa7a43f02f27..8db52324f416 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -36,6 +36,7 @@ enum da9063_variant_codes {
PMIC_DA9063_BB = 0x5,
PMIC_DA9063_CA = 0x6,
PMIC_DA9063_DA = 0x7,
+ PMIC_DA9063_EA = 0x8,
};
/* Interrupts */
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 1dbabf1b3cb8..7b8364bd08a0 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -1037,6 +1037,32 @@
#define DA9063_NONKEY_PIN_AUTODOWN 0x02
#define DA9063_NONKEY_PIN_AUTOFLPRT 0x03
+/* DA9063_REG_CONFIG_J (addr=0x10F) */
+#define DA9063_TWOWIRE_TO 0x40
+
+/* DA9063_REG_MON_REG_2 (addr=0x115) */
+#define DA9063_LDO1_MON_EN 0x01
+#define DA9063_LDO2_MON_EN 0x02
+#define DA9063_LDO3_MON_EN 0x04
+#define DA9063_LDO4_MON_EN 0x08
+#define DA9063_LDO5_MON_EN 0x10
+#define DA9063_LDO6_MON_EN 0x20
+#define DA9063_LDO7_MON_EN 0x40
+#define DA9063_LDO8_MON_EN 0x80
+
+/* DA9063_REG_MON_REG_3 (addr=0x116) */
+#define DA9063_LDO9_MON_EN 0x01
+#define DA9063_LDO10_MON_EN 0x02
+#define DA9063_LDO11_MON_EN 0x04
+
+/* DA9063_REG_MON_REG_4 (addr=0x117) */
+#define DA9063_BCORE1_MON_EN 0x04
+#define DA9063_BCORE2_MON_EN 0x08
+#define DA9063_BPRO_MON_EN 0x10
+#define DA9063_BIO_MON_EN 0x20
+#define DA9063_BMEM_MON_EN 0x40
+#define DA9063_BPERI_MON_EN 0x80
+
/* DA9063_REG_MON_REG_5 (addr=0x116) */
#define DA9063_MON_A8_IDX_MASK 0x07
#define DA9063_MON_A8_IDX_NONE 0x00
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 4b63d3ecdcff..a62de3d155ed 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -720,7 +720,7 @@ static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val)
static inline bool db8500_prcmu_is_ac_wake_requested(void)
{
- return 0;
+ return false;
}
static inline int db8500_prcmu_set_arm_opp(u8 opp)
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index e6ee2ec35de9..dd0fc891b228 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -186,10 +186,11 @@ enum ddr_pwrst {
#define PRCMU_FW_PROJECT_U8500_C3 8
#define PRCMU_FW_PROJECT_U8500_C4 9
#define PRCMU_FW_PROJECT_U9500_MBL 10
-#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */
+#define PRCMU_FW_PROJECT_U8500_SSG1 11 /* Samsung specific */
#define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */
#define PRCMU_FW_PROJECT_U8520 13
#define PRCMU_FW_PROJECT_U8420 14
+#define PRCMU_FW_PROJECT_U8500_SSG2 15 /* Samsung specific */
#define PRCMU_FW_PROJECT_U8420_SYSCLK 17
#define PRCMU_FW_PROJECT_A9420 20
/* [32..63] 9540 and derivatives */
@@ -555,36 +556,6 @@ static inline void prcmu_clear(unsigned int reg, u32 bits)
#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-#ifdef CONFIG_DBX500_PRCMU_QOS_POWER
-
-unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
-void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
-void prcmu_qos_force_opp(int, s32);
-int prcmu_qos_requirement(int pm_qos_class);
-int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value);
-int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value);
-void prcmu_qos_remove_requirement(int pm_qos_class, char *name);
-int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-
-#else
-
-static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
-{
- return 0;
-}
-
-static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {}
-
-static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {}
-
-static inline int prcmu_qos_requirement(int prcmu_qos_class)
-{
- return 0;
-}
-
static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
char *name, s32 value)
{
@@ -601,17 +572,4 @@ static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
{
}
-static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-
-#endif
-
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/dm355evm_msp.h b/include/linux/mfd/dm355evm_msp.h
deleted file mode 100644
index 372470350fab..000000000000
--- a/include/linux/mfd/dm355evm_msp.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board
- */
-#ifndef __LINUX_I2C_DM355EVM_MSP
-#define __LINUX_I2C_DM355EVM_MSP
-
-/*
- * Written against Spectrum's writeup for the A4 firmware revision,
- * and tweaked to match source and rev D2 schematics by removing CPLD
- * and NOR flash hooks (which were last appropriate in rev B boards).
- *
- * Note that the firmware supports a flavor of write posting ... to be
- * sure a write completes, issue another read or write.
- */
-
-/* utilities to access "registers" emulated by msp430 firmware */
-extern int dm355evm_msp_write(u8 value, u8 reg);
-extern int dm355evm_msp_read(u8 reg);
-
-
-/* command/control registers */
-#define DM355EVM_MSP_COMMAND 0x00
-# define MSP_COMMAND_NULL 0
-# define MSP_COMMAND_RESET_COLD 1
-# define MSP_COMMAND_RESET_WARM 2
-# define MSP_COMMAND_RESET_WARM_I 3
-# define MSP_COMMAND_POWEROFF 4
-# define MSP_COMMAND_IR_REINIT 5
-#define DM355EVM_MSP_STATUS 0x01
-# define MSP_STATUS_BAD_OFFSET BIT(0)
-# define MSP_STATUS_BAD_COMMAND BIT(1)
-# define MSP_STATUS_POWER_ERROR BIT(2)
-# define MSP_STATUS_RXBUF_OVERRUN BIT(3)
-#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */
-# define MSP_RESET_DC5 BIT(0)
-# define MSP_RESET_TVP5154 BIT(2)
-# define MSP_RESET_IMAGER BIT(3)
-# define MSP_RESET_ETHERNET BIT(4)
-# define MSP_RESET_SYS BIT(5)
-# define MSP_RESET_AIC33 BIT(7)
-
-/* GPIO registers ... bit patterns mostly match the source MSP ports */
-#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */
-#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */
-# define MSP_SWITCH1_SW6_1 BIT(0)
-# define MSP_SWITCH1_SW6_2 BIT(1)
-# define MSP_SWITCH1_SW6_3 BIT(2)
-# define MSP_SWITCH1_SW6_4 BIT(3)
-# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */
-# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */
-#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */
-# define MSP_SWITCH2_SW10 BIT(3)
-# define MSP_SWITCH2_SW11 BIT(4)
-# define MSP_SWITCH2_SW12 BIT(5)
-# define MSP_SWITCH2_SW13 BIT(6)
-# define MSP_SWITCH2_SW14 BIT(7)
-#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */
-# define MSP_SDMMC_0_WP BIT(1)
-# define MSP_SDMMC_0_CD BIT(2) /* active low */
-# define MSP_SDMMC_1_WP BIT(3)
-# define MSP_SDMMC_1_CD BIT(4) /* active low */
-#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */
-#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */
-# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */
-
-/* power supply registers are currently omitted */
-
-/* RTC registers */
-#define DM355EVM_MSP_RTC_0 0x12 /* LSB */
-#define DM355EVM_MSP_RTC_1 0x13
-#define DM355EVM_MSP_RTC_2 0x14
-#define DM355EVM_MSP_RTC_3 0x15 /* MSB */
-
-/* input event queue registers; code == ((HIGH << 8) | LOW) */
-#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */
-#define DM355EVM_MSP_INPUT_HIGH 0x17
-#define DM355EVM_MSP_INPUT_LOW 0x18
-
-#endif /* __LINUX_I2C_DM355EVM_MSP */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index b06171322178..194556851ccf 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -2,7 +2,7 @@
/*
* Device driver for regulators in hi655x IC
*
- * Copyright (c) 2016 Hisilicon.
+ * Copyright (c) 2016 HiSilicon Ltd.
*
* Authors:
* Chen Feng <puck.chen@hisilicon.com>
@@ -12,6 +12,8 @@
#ifndef __HI655X_PMIC_H
#define __HI655X_PMIC_H
+#include <linux/gpio/consumer.h>
+
/* Hi655x registers are mapped to memory bus in 4 bytes stride */
#define HI655X_STRIDE 4
#define HI655X_BUS_ADDR(x) ((x) << 2)
@@ -50,10 +52,9 @@
#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT)
struct hi655x_pmic {
- struct resource *res;
struct device *dev;
struct regmap *regmap;
- int gpio;
+ struct gpio_desc *gpio;
unsigned int ver;
struct regmap_irq_chip_data *irq_data;
};
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h
deleted file mode 100644
index 3d3ed67bd969..000000000000
--- a/include/linux/mfd/htc-pasic3.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * HTC PASIC3 driver - LEDs and DS1WM
- *
- * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- */
-
-#ifndef __PASIC3_H
-#define __PASIC3_H
-
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-
-extern void pasic3_write_register(struct device *dev, u32 reg, u8 val);
-extern u8 pasic3_read_register(struct device *dev, u32 reg);
-
-/*
- * mask for registers 0x20,0x21,0x22
- */
-#define PASIC3_MASK_LED0 0x04
-#define PASIC3_MASK_LED1 0x08
-#define PASIC3_MASK_LED2 0x40
-
-/*
- * bits in register 0x06
- */
-#define PASIC3_BIT2_LED0 0x08
-#define PASIC3_BIT2_LED1 0x10
-#define PASIC3_BIT2_LED2 0x20
-
-struct pasic3_led {
- struct led_classdev led;
- unsigned int hw_num;
- unsigned int bit2;
- unsigned int mask;
- struct pasic3_leds_machinfo *pdata;
-};
-
-struct pasic3_leds_machinfo {
- unsigned int num_leds;
- unsigned int power_gpio;
- struct pasic3_led *leds;
-};
-
-struct pasic3_platform_data {
- struct pasic3_leds_machinfo *led_pdata;
- unsigned int clock_rate;
-};
-
-#endif
diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h
new file mode 100644
index 000000000000..1db532feeb91
--- /dev/null
+++ b/include/linux/mfd/idt82p33_reg.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT82P33_REG
+#define HAVE_IDT82P33_REG
+
+#define REG_ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+
+/* Register address */
+#define DPLL1_TOD_CNFG 0x134
+#define DPLL2_TOD_CNFG 0x1B4
+
+#define DPLL1_TOD_STS 0x10B
+#define DPLL2_TOD_STS 0x18B
+
+#define DPLL1_TOD_TRIGGER 0x115
+#define DPLL2_TOD_TRIGGER 0x195
+
+#define DPLL1_OPERATING_MODE_CNFG 0x120
+#define DPLL2_OPERATING_MODE_CNFG 0x1A0
+
+#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
+#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
+
+#define DPLL1_PHASE_OFFSET_CNFG 0x143
+#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
+
+#define DPLL1_SYNC_EDGE_CNFG 0x140
+#define DPLL2_SYNC_EDGE_CNFG 0x1C0
+
+#define DPLL1_INPUT_MODE_CNFG 0x116
+#define DPLL2_INPUT_MODE_CNFG 0x196
+
+#define DPLL1_OPERATING_STS 0x102
+#define DPLL2_OPERATING_STS 0x182
+
+#define DPLL1_CURRENT_FREQ_STS 0x103
+#define DPLL2_CURRENT_FREQ_STS 0x183
+
+#define REG_SOFT_RESET 0X381
+
+#define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn)))
+#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
+
+/* Register bit definitions */
+#define SYNC_TOD BIT(1)
+#define PH_OFFSET_EN BIT(7)
+#define SQUELCH_ENABLE BIT(5)
+
+/* Bit definitions for the DPLL_MODE register */
+#define PLL_MODE_SHIFT (0)
+#define PLL_MODE_MASK (0x1F)
+#define COMBO_MODE_EN BIT(5)
+#define COMBO_MODE_SHIFT (6)
+#define COMBO_MODE_MASK (0x3)
+
+/* Bit definitions for DPLL_OPERATING_STS register */
+#define OPERATING_STS_MASK (0x7)
+#define OPERATING_STS_SHIFT (0x0)
+
+/* Bit definitions for DPLL_TOD_TRIGGER register */
+#define READ_TRIGGER_MASK (0xF)
+#define READ_TRIGGER_SHIFT (0x0)
+#define WRITE_TRIGGER_MASK (0xF0)
+#define WRITE_TRIGGER_SHIFT (0x4)
+
+/* Bit definitions for REG_SOFT_RESET register */
+#define SOFT_RESET_EN BIT(7)
+
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
+ PLL_MODE_FORCE_FREERUN = 1,
+ PLL_MODE_FORCE_HOLDOVER = 2,
+ PLL_MODE_FORCE_LOCKED = 4,
+ PLL_MODE_FORCE_PRE_LOCKED2 = 5,
+ PLL_MODE_FORCE_PRE_LOCKED = 6,
+ PLL_MODE_FORCE_LOST_PHASE = 7,
+ PLL_MODE_DCO = 10,
+ PLL_MODE_WPH = 18,
+ PLL_MODE_MAX = PLL_MODE_WPH,
+};
+
+enum hw_tod_trig_sel {
+ HW_TOD_TRIG_SEL_MIN = 0,
+ HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_NO_READ = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_SYNC_SEL = 1,
+ HW_TOD_TRIG_SEL_IN12 = 2,
+ HW_TOD_TRIG_SEL_IN13 = 3,
+ HW_TOD_TRIG_SEL_IN14 = 4,
+ HW_TOD_TRIG_SEL_TOD_PPS = 5,
+ HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
+ HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
+ HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
+ HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+};
+
+/** @brief Enumerated type listing DPLL operational modes */
+enum dpll_state {
+ DPLL_STATE_FREERUN = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_LOCKED = 4,
+ DPLL_STATE_PRELOCKED2 = 5,
+ DPLL_STATE_PRELOCKED = 6,
+ DPLL_STATE_LOSTPHASE = 7,
+ DPLL_STATE_MAX
+};
+
+#endif
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
new file mode 100644
index 000000000000..0c706085c205
--- /dev/null
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -0,0 +1,768 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Based on 5.2.0, Family Programming Guide (Sept 30, 2020)
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+#define HW_DPLL_4 (0x8e00)
+#define HW_DPLL_5 (0x8f00)
+#define HW_DPLL_6 (0x9000)
+#define HW_DPLL_7 (0x9100)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define HW_Q8_CTRL_SPARE (0xa7d4)
+#define HW_Q11_CTRL_SPARE (0xa7ec)
+
+/**
+ * Select FOD5 as sync_trigger for Q8 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q8 divider.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD5 as driver for clock and sync for Q8 divider.
+ * Enable fanout buffer for FOD5.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+/**
+ * Select FOD6 as sync_trigger for Q11 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q11 divider.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD6 as driver for clock and sync for Q11 divider.
+ * Enable fanout buffer for FOD6.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_V520 0x0013
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define BOOT_STATUS 0x0000
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
+#define STATUS 0xc03c
+#define DPLL0_STATUS 0x0018
+#define DPLL1_STATUS 0x0019
+#define DPLL2_STATUS 0x001a
+#define DPLL3_STATUS 0x001b
+#define DPLL4_STATUS 0x001c
+#define DPLL5_STATUS 0x001d
+#define DPLL6_STATUS 0x001e
+#define DPLL7_STATUS 0x001f
+#define DPLL_SYS_STATUS 0x0020
+#define DPLL_SYS_APLL_STATUS 0x0021
+#define DPLL0_FILTER_STATUS 0x0044
+#define DPLL1_FILTER_STATUS 0x004c
+#define DPLL2_FILTER_STATUS 0x0054
+#define DPLL3_FILTER_STATUS 0x005c
+#define DPLL4_FILTER_STATUS 0x0064
+#define DPLL5_FILTER_STATUS 0x006c
+#define DPLL6_FILTER_STATUS 0x0074
+#define DPLL7_FILTER_STATUS 0x007c
+#define DPLLSYS_FILTER_STATUS 0x0084
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+#define GPIO0_TO_7_OUT_V520 0x0002
+#define GPIO8_TO_15_OUT_V520 0x0003
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+#define INPUT_1 0xc1c0
+#define INPUT_2 0xc1d0
+#define INPUT_3 0xc200
+#define INPUT_4 0xc210
+#define INPUT_5 0xc220
+#define INPUT_6 0xc230
+#define INPUT_7 0xc240
+#define INPUT_8 0xc250
+#define INPUT_9 0xc260
+#define INPUT_10 0xc280
+#define INPUT_11 0xc290
+#define INPUT_12 0xc2a0
+#define INPUT_13 0xc2b0
+#define INPUT_14 0xc2c0
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+#define REF_MON_1 0xc2ec
+#define REF_MON_2 0xc300
+#define REF_MON_3 0xc30c
+#define REF_MON_4 0xc318
+#define REF_MON_5 0xc324
+#define REF_MON_6 0xc330
+#define REF_MON_7 0xc33c
+#define REF_MON_8 0xc348
+#define REF_MON_9 0xc354
+#define REF_MON_10 0xc360
+#define REF_MON_11 0xc36c
+#define REF_MON_12 0xc380
+#define REF_MON_13 0xc38c
+#define REF_MON_14 0xc398
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+#define DPLL_MODE_V520 0x003B
+#define DPLL_1 0xc400
+#define DPLL_2 0xc438
+#define DPLL_2_V520 0xc43c
+#define DPLL_3 0xc480
+#define DPLL_4 0xc4b8
+#define DPLL_4_V520 0xc4bc
+#define DPLL_5 0xc500
+#define DPLL_6 0xc538
+#define DPLL_6_V520 0xc53c
+#define DPLL_7 0xc580
+#define SYS_DPLL 0xc5b8
+#define SYS_DPLL_V520 0xc5bc
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+#define DPLL_CTRL_DPLL_FOD_FREQ 0x001c
+#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
+#define DPLL_CTRL_1 0xc63c
+#define DPLL_CTRL_2 0xc680
+#define DPLL_CTRL_3 0xc6bc
+#define DPLL_CTRL_4 0xc700
+#define DPLL_CTRL_5 0xc73c
+#define DPLL_CTRL_6 0xc780
+#define DPLL_CTRL_7 0xc7bc
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+#define DPLL_PHASE_1 0xc81c
+#define DPLL_PHASE_2 0xc820
+#define DPLL_PHASE_3 0xc824
+#define DPLL_PHASE_4 0xc828
+#define DPLL_PHASE_5 0xc82c
+#define DPLL_PHASE_6 0xc830
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+#define DPLL_FREQ_1 0xc840
+#define DPLL_FREQ_2 0xc848
+#define DPLL_FREQ_3 0xc850
+#define DPLL_FREQ_4 0xc858
+#define DPLL_FREQ_5 0xc860
+#define DPLL_FREQ_6 0xc868
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+#define DPLL_PHASE_PULL_IN_1 0xc888
+#define DPLL_PHASE_PULL_IN_2 0xc890
+#define DPLL_PHASE_PULL_IN_3 0xc898
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+#define GPIO_CTRL_V520 0x0011
+#define GPIO_1 0xc8d4
+#define GPIO_2 0xc8e6
+#define GPIO_3 0xc900
+#define GPIO_4 0xc912
+#define GPIO_5 0xc924
+#define GPIO_6 0xc936
+#define GPIO_7 0xc948
+#define GPIO_8 0xc95a
+#define GPIO_9 0xc980
+#define GPIO_10 0xc992
+#define GPIO_11 0xc9a4
+#define GPIO_12 0xc9b6
+#define GPIO_13 0xc9c8
+#define GPIO_14 0xc9da
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+#define OUTPUT_0 0xca14
+#define OUTPUT_0_V520 0xca20
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+#define OUTPUT_1 0xca24
+#define OUTPUT_1_V520 0xca30
+#define OUTPUT_2 0xca34
+#define OUTPUT_2_V520 0xca40
+#define OUTPUT_3 0xca44
+#define OUTPUT_3_V520 0xca50
+#define OUTPUT_4 0xca54
+#define OUTPUT_4_V520 0xca60
+#define OUTPUT_5 0xca64
+#define OUTPUT_5_V520 0xca80
+#define OUTPUT_6 0xca80
+#define OUTPUT_6_V520 0xca90
+#define OUTPUT_7 0xca90
+#define OUTPUT_7_V520 0xcaa0
+#define OUTPUT_8 0xcaa0
+#define OUTPUT_8_V520 0xcab0
+#define OUTPUT_9 0xcab0
+#define OUTPUT_9_V520 0xcac0
+#define OUTPUT_10 0xcac0
+#define OUTPUT_10_V520 0xcad0
+#define OUTPUT_11 0xcad0
+#define OUTPUT_11_V520 0xcae0
+
+#define SERIAL 0xcae0
+#define SERIAL_V520 0xcaf0
+
+#define PWM_ENCODER_0 0xcb00
+#define PWM_ENCODER_1 0xcb08
+#define PWM_ENCODER_2 0xcb10
+#define PWM_ENCODER_3 0xcb18
+#define PWM_ENCODER_4 0xcb20
+#define PWM_ENCODER_5 0xcb28
+#define PWM_ENCODER_6 0xcb30
+#define PWM_ENCODER_7 0xcb38
+#define PWM_DECODER_0 0xcb40
+#define PWM_DECODER_1 0xcb48
+#define PWM_DECODER_1_V520 0xcb4a
+#define PWM_DECODER_2 0xcb50
+#define PWM_DECODER_2_V520 0xcb54
+#define PWM_DECODER_3 0xcb58
+#define PWM_DECODER_3_V520 0xcb5e
+#define PWM_DECODER_4 0xcb60
+#define PWM_DECODER_4_V520 0xcb68
+#define PWM_DECODER_5 0xcb68
+#define PWM_DECODER_5_V520 0xcb80
+#define PWM_DECODER_6 0xcb70
+#define PWM_DECODER_6_V520 0xcb8a
+#define PWM_DECODER_7 0xcb80
+#define PWM_DECODER_7_V520 0xcb94
+#define PWM_DECODER_8 0xcb88
+#define PWM_DECODER_8_V520 0xcb9e
+#define PWM_DECODER_9 0xcb90
+#define PWM_DECODER_9_V520 0xcba8
+#define PWM_DECODER_10 0xcb98
+#define PWM_DECODER_10_V520 0xcbb2
+#define PWM_DECODER_11 0xcba0
+#define PWM_DECODER_11_V520 0xcbbc
+#define PWM_DECODER_12 0xcba8
+#define PWM_DECODER_12_V520 0xcbc6
+#define PWM_DECODER_13 0xcbb0
+#define PWM_DECODER_13_V520 0xcbd0
+#define PWM_DECODER_14 0xcbb8
+#define PWM_DECODER_14_V520 0xcbda
+#define PWM_DECODER_15 0xcbc0
+#define PWM_DECODER_15_V520 0xcbe4
+#define PWM_USER_DATA 0xcbc8
+#define PWM_USER_DATA_V520 0xcbf0
+
+#define TOD_0 0xcbcc
+#define TOD_0_V520 0xcc00
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+#define TOD_CFG_V520 0x0001
+#define TOD_1 0xcbce
+#define TOD_1_V520 0xcc02
+#define TOD_2 0xcbd0
+#define TOD_2_V520 0xcc04
+#define TOD_3 0xcbd2
+#define TOD_3_V520 0xcc06
+
+#define TOD_WRITE_0 0xcc00
+#define TOD_WRITE_0_V520 0xcc10
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+#define TOD_WRITE_1 0xcc10
+#define TOD_WRITE_1_V520 0xcc20
+#define TOD_WRITE_2 0xcc20
+#define TOD_WRITE_2_V520 0xcc30
+#define TOD_WRITE_3 0xcc30
+#define TOD_WRITE_3_V520 0xcc40
+
+#define TOD_READ_PRIMARY_0 0xcc40
+#define TOD_READ_PRIMARY_0_V520 0xcc50
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+#define TOD_READ_PRIMARY_CMD_V520 0x000f
+#define TOD_READ_PRIMARY_1 0xcc50
+#define TOD_READ_PRIMARY_1_V520 0xcc60
+#define TOD_READ_PRIMARY_2 0xcc60
+#define TOD_READ_PRIMARY_2_V520 0xcc80
+#define TOD_READ_PRIMARY_3 0xcc80
+#define TOD_READ_PRIMARY_3_V520 0xcc90
+
+#define TOD_READ_SECONDARY_0 0xcc90
+#define TOD_READ_SECONDARY_0_V520 0xcca0
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_SECONDARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_SECONDARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_SECONDARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_SECONDARY_CMD 0x000e
+#define TOD_READ_SECONDARY_CMD_V520 0x000f
+
+#define TOD_READ_SECONDARY_1 0xcca0
+#define TOD_READ_SECONDARY_1_V520 0xccb0
+#define TOD_READ_SECONDARY_2 0xccb0
+#define TOD_READ_SECONDARY_2_V520 0xccc0
+#define TOD_READ_SECONDARY_3 0xccc0
+#define TOD_READ_SECONDARY_3_V520 0xccd0
+
+#define OUTPUT_TDC_CFG 0xccd0
+#define OUTPUT_TDC_CFG_V520 0xcce0
+#define OUTPUT_TDC_0 0xcd00
+#define OUTPUT_TDC_1 0xcd08
+#define OUTPUT_TDC_2 0xcd10
+#define OUTPUT_TDC_3 0xcd18
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+#define SCRATCH_V520 0xcf4c
+
+#define EEPROM 0xcf68
+#define EEPROM_V520 0xcf64
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the DPLL_MANU_REF_CFG register */
+#define MANUAL_REFERENCE_SHIFT (0)
+#define MANUAL_REFERENCE_MASK (0x1f)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+/* 4.8.7 */
+#define TOD_WRITE_TYPE_SHIFT (4)
+#define TOD_WRITE_TYPE_MASK (0x3)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
+#define COMBO_MASTER_HOLD BIT(0)
+
+/* Bit definitions for DPLL_SYS_STATUS register */
+#define DPLL_SYS_STATE_MASK (0xf)
+
+/* Bit definitions for SYS_APLL_STATUS register */
+#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
+#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
+#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
+
+/* Bit definitions for the DPLL0_STATUS register */
+#define DPLL_STATE_MASK (0xf)
+#define DPLL_STATE_SHIFT (0x0)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_PLL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_DISABLED = 6,
+ PLL_MODE_MAX = PLL_MODE_DISABLED,
+};
+
+/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
+enum manual_reference {
+ MANU_REF_MIN = 0,
+ MANU_REF_CLK0 = MANU_REF_MIN,
+ MANU_REF_CLK1,
+ MANU_REF_CLK2,
+ MANU_REF_CLK3,
+ MANU_REF_CLK4,
+ MANU_REF_CLK5,
+ MANU_REF_CLK6,
+ MANU_REF_CLK7,
+ MANU_REF_CLK8,
+ MANU_REF_CLK9,
+ MANU_REF_CLK10,
+ MANU_REF_CLK11,
+ MANU_REF_CLK12,
+ MANU_REF_CLK13,
+ MANU_REF_CLK14,
+ MANU_REF_CLK15,
+ MANU_REF_WRITE_PHASE,
+ MANU_REF_WRITE_FREQUENCY,
+ MANU_REF_XO_DPLL,
+ MANU_REF_MAX = MANU_REF_XO_DPLL,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+enum scsr_read_trig_sel {
+ /* CANCEL CURRENT TOD READ; MODULE BECOMES IDLE - NO TRIGGER OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_DISABLE = 0,
+ /* TRIGGER IMMEDIATELY */
+ SCSR_TOD_READ_TRIG_SEL_IMMEDIATE = 1,
+ /* TRIGGER ON RISING EDGE OF INTERNAL TOD PPS SIGNAL */
+ SCSR_TOD_READ_TRIG_SEL_TODPPS = 2,
+ /* TRGGER ON RISING EDGE OF SELECTED REFERENCE INPUT */
+ SCSR_TOD_READ_TRIG_SEL_REFCLK = 3,
+ /* TRIGGER ON RISING EDGE OF SELECTED PWM DECODER 1PPS OUTPUT */
+ SCSR_TOD_READ_TRIG_SEL_PWMPPS = 4,
+ SCSR_TOD_READ_TRIG_SEL_RESERVED = 5,
+ /* TRIGGER WHEN WRITE FREQUENCY EVENT OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_WRITEFREQUENCYEVENT = 6,
+ /* TRIGGER ON SELECTED GPIO */
+ SCSR_TOD_READ_TRIG_SEL_GPIO = 7,
+ SCSR_TOD_READ_TRIG_SEL_MAX = SCSR_TOD_READ_TRIG_SEL_GPIO,
+};
+
+/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKACQ = 1,
+ DPLL_STATE_LOCKREC = 2,
+ DPLL_STATE_LOCKED = 3,
+ DPLL_STATE_HOLDOVER = 4,
+ DPLL_STATE_OPEN_LOOP = 5,
+ DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_trig_sel {
+ SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
+ SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
+ SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
+ SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
+ SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
+ SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
+ SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_type_sel {
+ SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
+ SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+};
+#endif
diff --git a/include/linux/mfd/idtRC38xxx_reg.h b/include/linux/mfd/idtRC38xxx_reg.h
new file mode 100644
index 000000000000..ec11872f51ad
--- /dev/null
+++ b/include/linux/mfd/idtRC38xxx_reg.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on PolarBear_CSRs.RevA.xlsx (2023-04-21)
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef MFD_IDTRC38XXX_REG
+#define MFD_IDTRC38XXX_REG
+
+/* GLOBAL */
+#define SOFT_RESET_CTRL (0x15) /* Specific to FC3W */
+#define MISC_CTRL (0x14) /* Specific to FC3A */
+#define APLL_REINIT BIT(1)
+#define APLL_REINIT_VFC3A BIT(2)
+
+#define DEVICE_ID (0x2)
+#define DEVICE_ID_MASK (0x1000) /* Bit 12 is 1 if FC3W and 0 if FC3A */
+#define DEVICE_ID_SHIFT (12)
+
+/* FOD */
+#define FOD_0 (0x300)
+#define FOD_0_VFC3A (0x400)
+#define FOD_1 (0x340)
+#define FOD_1_VFC3A (0x440)
+#define FOD_2 (0x380)
+#define FOD_2_VFC3A (0x480)
+
+/* TDCAPLL */
+#define TDC_CTRL (0x44a) /* Specific to FC3W */
+#define TDC_ENABLE_CTRL (0x169) /* Specific to FC3A */
+#define TDC_DAC_CAL_CTRL (0x16a) /* Specific to FC3A */
+#define TDC_EN BIT(0)
+#define TDC_DAC_RECAL_REQ BIT(1)
+#define TDC_DAC_RECAL_REQ_VFC3A BIT(0)
+
+#define TDC_FB_DIV_INT_CNFG (0x442)
+#define TDC_FB_DIV_INT_CNFG_VFC3A (0x162)
+#define TDC_FB_DIV_INT_MASK GENMASK(7, 0)
+#define TDC_REF_DIV_CNFG (0x443)
+#define TDC_REF_DIV_CNFG_VFC3A (0x163)
+#define TDC_REF_DIV_CONFIG_MASK GENMASK(2, 0)
+
+/* TIME SYNC CHANNEL */
+#define TIME_CLOCK_SRC (0xa01) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT (0xa00) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT_MASK GENMASK(5, 0)
+
+#define SUB_SYNC_GEN_CNFG (0xa04)
+
+#define TOD_COUNTER_READ_REQ (0xa5f)
+#define TOD_COUNTER_READ_REQ_VFC3A (0x6df)
+#define TOD_SYNC_LOAD_VAL_CTRL (0xa10)
+#define TOD_SYNC_LOAD_VAL_CTRL_VFC3A (0x690)
+#define SYNC_COUNTER_MASK GENMASK_ULL(51, 0)
+#define SUB_SYNC_COUNTER_MASK GENMASK(30, 0)
+#define TOD_SYNC_LOAD_REQ_CTRL (0xa21)
+#define TOD_SYNC_LOAD_REQ_CTRL_VFC3A (0x6a1)
+#define SYNC_LOAD_ENABLE BIT(1)
+#define SUB_SYNC_LOAD_ENABLE BIT(0)
+#define SYNC_LOAD_REQ BIT(0)
+
+#define LPF_MODE_CNFG (0xa80)
+#define LPF_MODE_CNFG_VFC3A (0x700)
+enum lpf_mode {
+ LPF_DISABLED = 0,
+ LPF_WP = 1,
+ LPF_HOLDOVER = 2,
+ LPF_WF = 3,
+ LPF_INVALID = 4
+};
+#define LPF_CTRL (0xa98)
+#define LPF_CTRL_VFC3A (0x718)
+#define LPF_EN BIT(0)
+
+#define LPF_BW_CNFG (0xa81)
+#define LPF_BW_SHIFT GENMASK(7, 3)
+#define LPF_BW_MULT GENMASK(2, 0)
+#define LPF_BW_SHIFT_DEFAULT (0xb)
+#define LPF_BW_MULT_DEFAULT (0x0)
+#define LPF_BW_SHIFT_1PPS (0x5)
+
+#define LPF_WR_PHASE_CTRL (0xaa8)
+#define LPF_WR_PHASE_CTRL_VFC3A (0x728)
+#define LPF_WR_FREQ_CTRL (0xab0)
+#define LPF_WR_FREQ_CTRL_VFC3A (0x730)
+
+#define TIME_CLOCK_TDC_FANOUT_CNFG (0xB00)
+#define TIME_SYNC_TO_TDC_EN BIT(0)
+#define SIG1_MUX_SEL_MASK GENMASK(7, 4)
+#define SIG2_MUX_SEL_MASK GENMASK(11, 8)
+enum tdc_mux_sel {
+ REF0 = 0,
+ REF1 = 1,
+ REF2 = 2,
+ REF3 = 3,
+ REF_CLK5 = 4,
+ REF_CLK6 = 5,
+ DPLL_FB_TO_TDC = 6,
+ DPLL_FB_DIVIDED_TO_TDC = 7,
+ TIME_CLK_DIVIDED = 8,
+ TIME_SYNC = 9,
+};
+
+#define TIME_CLOCK_MEAS_CNFG (0xB04)
+#define TDC_MEAS_MODE BIT(0)
+enum tdc_meas_mode {
+ CONTINUOUS = 0,
+ ONE_SHOT = 1,
+ MEAS_MODE_INVALID = 2,
+};
+
+#define TIME_CLOCK_MEAS_DIV_CNFG (0xB08)
+#define TIME_REF_DIV_MASK GENMASK(29, 24)
+
+#define TIME_CLOCK_MEAS_CTRL (0xB10)
+#define TDC_MEAS_EN BIT(0)
+#define TDC_MEAS_START BIT(1)
+
+#define TDC_FIFO_READ_REQ (0xB2F)
+#define TDC_FIFO_READ (0xB30)
+#define COARSE_MEAS_MASK GENMASK_ULL(39, 13)
+#define FINE_MEAS_MASK GENMASK(12, 0)
+
+#define TDC_FIFO_CTRL (0xB12)
+#define FIFO_CLEAR BIT(0)
+#define TDC_FIFO_STS (0xB38)
+#define FIFO_FULL BIT(1)
+#define FIFO_EMPTY BIT(0)
+#define TDC_FIFO_EVENT (0xB39)
+#define FIFO_OVERRUN BIT(1)
+
+/* DPLL */
+#define MAX_REFERENCE_INDEX (3)
+#define MAX_NUM_REF_PRIORITY (4)
+
+#define MAX_DPLL_INDEX (2)
+
+#define DPLL_STS (0x580)
+#define DPLL_STS_VFC3A (0x571)
+#define DPLL_STATE_STS_MASK (0x70)
+#define DPLL_STATE_STS_SHIFT (4)
+#define DPLL_REF_SEL_STS_MASK (0x6)
+#define DPLL_REF_SEL_STS_SHIFT (1)
+
+#define DPLL_REF_PRIORITY_CNFG (0x502)
+#define DPLL_REFX_PRIORITY_DISABLE_MASK (0xf)
+#define DPLL_REF0_PRIORITY_ENABLE_AND_SET_MASK (0x31)
+#define DPLL_REF1_PRIORITY_ENABLE_AND_SET_MASK (0xc2)
+#define DPLL_REF2_PRIORITY_ENABLE_AND_SET_MASK (0x304)
+#define DPLL_REF3_PRIORITY_ENABLE_AND_SET_MASK (0xc08)
+#define DPLL_REF0_PRIORITY_SHIFT (4)
+#define DPLL_REF1_PRIORITY_SHIFT (6)
+#define DPLL_REF2_PRIORITY_SHIFT (8)
+#define DPLL_REF3_PRIORITY_SHIFT (10)
+
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKED = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_WRITE_FREQUENCY = 3,
+ DPLL_STATE_ACQUIRE = 4,
+ DPLL_STATE_HITLESS_SWITCH = 5,
+ DPLL_STATE_MAX = DPLL_STATE_HITLESS_SWITCH
+};
+
+/* REFMON */
+#define LOSMON_STS_0 (0x81e)
+#define LOSMON_STS_0_VFC3A (0x18e)
+#define LOSMON_STS_1 (0x82e)
+#define LOSMON_STS_1_VFC3A (0x19e)
+#define LOSMON_STS_2 (0x83e)
+#define LOSMON_STS_2_VFC3A (0x1ae)
+#define LOSMON_STS_3 (0x84e)
+#define LOSMON_STS_3_VFC3A (0x1be)
+#define LOS_STS_MASK (0x1)
+
+#define FREQMON_STS_0 (0x874)
+#define FREQMON_STS_0_VFC3A (0x1d4)
+#define FREQMON_STS_1 (0x894)
+#define FREQMON_STS_1_VFC3A (0x1f4)
+#define FREQMON_STS_2 (0x8b4)
+#define FREQMON_STS_2_VFC3A (0x214)
+#define FREQMON_STS_3 (0x8d4)
+#define FREQMON_STS_3_VFC3A (0x234)
+#define FREQ_FAIL_STS_SHIFT (31)
+
+/* Firmware interface */
+#define TIME_CLK_FREQ_ADDR (0xffa0)
+#define XTAL_FREQ_ADDR (0xffa1)
+
+/*
+ * Return register address and field mask based on passed in firmware version
+ */
+#define IDTFC3_FW_REG(FW, VER, REG) (((FW) < (VER)) ? (REG) : (REG##_##VER))
+#define IDTFC3_FW_FIELD(FW, VER, FIELD) (((FW) < (VER)) ? (FIELD) : (FIELD##_##VER))
+enum fw_version {
+ V_DEFAULT = 0,
+ VFC3W = 1,
+ VFC3A = 2
+};
+
+/* XTAL_FREQ_ADDR/TIME_CLK_FREQ_ADDR */
+enum {
+ FREQ_MIN = 0,
+ FREQ_25M = 1,
+ FREQ_49_152M = 2,
+ FREQ_50M = 3,
+ FREQ_100M = 4,
+ FREQ_125M = 5,
+ FREQ_250M = 6,
+ FREQ_MAX
+};
+
+struct idtfc3_hw_param {
+ u32 xtal_freq;
+ u32 time_clk_freq;
+};
+
+struct idtfc3_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+static inline void idtfc3_default_hw_param(struct idtfc3_hw_param *hw_param)
+{
+ hw_param->xtal_freq = 49152000;
+ hw_param->time_clk_freq = 25000000;
+}
+
+static inline int idtfc3_set_hw_param(struct idtfc3_hw_param *hw_param,
+ u16 addr, u8 val)
+{
+ if (addr == XTAL_FREQ_ADDR)
+ switch (val) {
+ case FREQ_49_152M:
+ hw_param->xtal_freq = 49152000;
+ break;
+ case FREQ_50M:
+ hw_param->xtal_freq = 50000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else if (addr == TIME_CLK_FREQ_ADDR)
+ switch (val) {
+ case FREQ_25M:
+ hw_param->time_clk_freq = 25000000;
+ break;
+ case FREQ_50M:
+ hw_param->time_clk_freq = 50000000;
+ break;
+ case FREQ_100M:
+ hw_param->time_clk_freq = 100000000;
+ break;
+ case FREQ_125M:
+ hw_param->time_clk_freq = 125000000;
+ break;
+ case FREQ_250M:
+ hw_param->time_clk_freq = 250000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EFAULT;
+
+ return 0;
+}
+
+#endif
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
new file mode 100644
index 000000000000..ee66c9751003
--- /dev/null
+++ b/include/linux/mfd/intel-m10-bmc.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel MAX 10 Board Management Controller chip.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation, Inc.
+ */
+#ifndef __MFD_INTEL_M10_BMC_H
+#define __MFD_INTEL_M10_BMC_H
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/regmap.h>
+#include <linux/rwsem.h>
+
+#define M10BMC_N3000_LEGACY_BUILD_VER 0x300468
+#define M10BMC_N3000_SYS_BASE 0x300800
+#define M10BMC_N3000_SYS_END 0x300fff
+#define M10BMC_N3000_FLASH_BASE 0x10000000
+#define M10BMC_N3000_FLASH_END 0x1fffffff
+#define M10BMC_N3000_MEM_END M10BMC_N3000_FLASH_END
+
+#define M10BMC_STAGING_BASE 0x18000000
+#define M10BMC_STAGING_SIZE 0x3800000
+
+/* Register offset of system registers */
+#define NIOS2_N3000_FW_VERSION 0x0
+#define M10BMC_N3000_MAC_LOW 0x10
+#define M10BMC_N3000_MAC_BYTE4 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE3 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_BYTE2 GENMASK(23, 16)
+#define M10BMC_N3000_MAC_BYTE1 GENMASK(31, 24)
+#define M10BMC_N3000_MAC_HIGH 0x14
+#define M10BMC_N3000_MAC_BYTE6 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE5 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_COUNT GENMASK(23, 16)
+#define M10BMC_N3000_TEST_REG 0x3c
+#define M10BMC_N3000_BUILD_VER 0x68
+#define M10BMC_N3000_VER_MAJOR_MSK GENMASK(23, 16)
+#define M10BMC_N3000_VER_PCB_INFO_MSK GENMASK(31, 24)
+#define M10BMC_N3000_VER_LEGACY_INVALID 0xffffffff
+
+/* Telemetry registers */
+#define M10BMC_N3000_TELEM_START 0x100
+#define M10BMC_N3000_TELEM_END 0x250
+#define M10BMC_D5005_TELEM_END 0x300
+
+/* Secure update doorbell register, in system register region */
+#define M10BMC_N3000_DOORBELL 0x400
+
+/* Authorization Result register, in system register region */
+#define M10BMC_N3000_AUTH_RESULT 0x404
+
+/* Doorbell register fields */
+#define DRBL_RSU_REQUEST BIT(0)
+#define DRBL_RSU_PROGRESS GENMASK(7, 4)
+#define DRBL_HOST_STATUS GENMASK(11, 8)
+#define DRBL_RSU_STATUS GENMASK(23, 16)
+#define DRBL_PKVL_EEPROM_LOAD_SEC BIT(24)
+#define DRBL_PKVL1_POLL_EN BIT(25)
+#define DRBL_PKVL2_POLL_EN BIT(26)
+#define DRBL_CONFIG_SEL BIT(28)
+#define DRBL_REBOOT_REQ BIT(29)
+#define DRBL_REBOOT_DISABLED BIT(30)
+
+/* Progress states */
+#define RSU_PROG_IDLE 0x0
+#define RSU_PROG_PREPARE 0x1
+#define RSU_PROG_READY 0x3
+#define RSU_PROG_AUTHENTICATING 0x4
+#define RSU_PROG_COPYING 0x5
+#define RSU_PROG_UPDATE_CANCEL 0x6
+#define RSU_PROG_PROGRAM_KEY_HASH 0x7
+#define RSU_PROG_RSU_DONE 0x8
+#define RSU_PROG_PKVL_PROM_DONE 0x9
+
+/* Device and error states */
+#define RSU_STAT_NORMAL 0x0
+#define RSU_STAT_TIMEOUT 0x1
+#define RSU_STAT_AUTH_FAIL 0x2
+#define RSU_STAT_COPY_FAIL 0x3
+#define RSU_STAT_FATAL 0x4
+#define RSU_STAT_PKVL_REJECT 0x5
+#define RSU_STAT_NON_INC 0x6
+#define RSU_STAT_ERASE_FAIL 0x7
+#define RSU_STAT_WEAROUT 0x8
+#define RSU_STAT_NIOS_OK 0x80
+#define RSU_STAT_USER_OK 0x81
+#define RSU_STAT_FACTORY_OK 0x82
+#define RSU_STAT_USER_FAIL 0x83
+#define RSU_STAT_FACTORY_FAIL 0x84
+#define RSU_STAT_NIOS_FLASH_ERR 0x85
+#define RSU_STAT_FPGA_FLASH_ERR 0x86
+
+#define HOST_STATUS_IDLE 0x0
+#define HOST_STATUS_WRITE_DONE 0x1
+#define HOST_STATUS_ABORT_RSU 0x2
+
+#define rsu_prog(doorbell) FIELD_GET(DRBL_RSU_PROGRESS, doorbell)
+
+/* interval 100ms and timeout 5s */
+#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000)
+#define NIOS_HANDSHAKE_TIMEOUT_US (5 * 1000 * 1000)
+
+/* RSU PREP Timeout (2 minutes) to erase flash staging area */
+#define RSU_PREP_INTERVAL_MS 100
+#define RSU_PREP_TIMEOUT_MS (2 * 60 * 1000)
+
+/* RSU Complete Timeout (40 minutes) for full flash update */
+#define RSU_COMPLETE_INTERVAL_MS 1000
+#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000)
+
+/* Addresses for security related data in FLASH */
+#define M10BMC_N3000_BMC_REH_ADDR 0x17ffc004
+#define M10BMC_N3000_BMC_PROG_ADDR 0x17ffc000
+#define M10BMC_N3000_BMC_PROG_MAGIC 0x5746
+
+#define M10BMC_N3000_SR_REH_ADDR 0x17ffd004
+#define M10BMC_N3000_SR_PROG_ADDR 0x17ffd000
+#define M10BMC_N3000_SR_PROG_MAGIC 0x5253
+
+#define M10BMC_N3000_PR_REH_ADDR 0x17ffe004
+#define M10BMC_N3000_PR_PROG_ADDR 0x17ffe000
+#define M10BMC_N3000_PR_PROG_MAGIC 0x5250
+
+/* Address of 4KB inverted bit vector containing staging area FLASH count */
+#define M10BMC_N3000_STAGING_FLASH_COUNT 0x17ffb000
+
+#define M10BMC_N6000_INDIRECT_BASE 0x400
+
+#define M10BMC_N6000_SYS_BASE 0x0
+#define M10BMC_N6000_SYS_END 0xfff
+
+#define M10BMC_N6000_DOORBELL 0x1c0
+#define M10BMC_N6000_AUTH_RESULT 0x1c4
+#define AUTH_RESULT_RSU_STATUS GENMASK(23, 16)
+
+#define M10BMC_N6000_BUILD_VER 0x0
+#define NIOS2_N6000_FW_VERSION 0x4
+#define M10BMC_N6000_MAC_LOW 0x20
+#define M10BMC_N6000_MAC_HIGH (M10BMC_N6000_MAC_LOW + 4)
+
+/* Addresses for security related data in FLASH */
+#define M10BMC_N6000_BMC_REH_ADDR 0x7ffc004
+#define M10BMC_N6000_BMC_PROG_ADDR 0x7ffc000
+#define M10BMC_N6000_BMC_PROG_MAGIC 0x5746
+
+#define M10BMC_N6000_SR_REH_ADDR 0x7ffd004
+#define M10BMC_N6000_SR_PROG_ADDR 0x7ffd000
+#define M10BMC_N6000_SR_PROG_MAGIC 0x5253
+
+#define M10BMC_N6000_PR_REH_ADDR 0x7ffe004
+#define M10BMC_N6000_PR_PROG_ADDR 0x7ffe000
+#define M10BMC_N6000_PR_PROG_MAGIC 0x5250
+
+#define M10BMC_N6000_STAGING_FLASH_COUNT 0x7ff5000
+
+#define M10BMC_N6000_FLASH_MUX_CTRL 0x1d0
+#define M10BMC_N6000_FLASH_MUX_SELECTION GENMASK(2, 0)
+#define M10BMC_N6000_FLASH_MUX_IDLE 0
+#define M10BMC_N6000_FLASH_MUX_NIOS 1
+#define M10BMC_N6000_FLASH_MUX_HOST 2
+#define M10BMC_N6000_FLASH_MUX_PFL 4
+#define get_flash_mux(mux) FIELD_GET(M10BMC_N6000_FLASH_MUX_SELECTION, mux)
+
+#define M10BMC_N6000_FLASH_NIOS_REQUEST BIT(4)
+#define M10BMC_N6000_FLASH_HOST_REQUEST BIT(5)
+
+#define M10BMC_N6000_FLASH_CTRL 0x40
+#define M10BMC_N6000_FLASH_WR_MODE BIT(0)
+#define M10BMC_N6000_FLASH_RD_MODE BIT(1)
+#define M10BMC_N6000_FLASH_BUSY BIT(2)
+#define M10BMC_N6000_FLASH_FIFO_SPACE GENMASK(13, 4)
+#define M10BMC_N6000_FLASH_READ_COUNT GENMASK(25, 16)
+
+#define M10BMC_N6000_FLASH_ADDR 0x44
+#define M10BMC_N6000_FLASH_FIFO 0x800
+#define M10BMC_N6000_READ_BLOCK_SIZE 0x800
+#define M10BMC_N6000_FIFO_MAX_BYTES 0x800
+#define M10BMC_N6000_FIFO_WORD_SIZE 4
+#define M10BMC_N6000_FIFO_MAX_WORDS (M10BMC_N6000_FIFO_MAX_BYTES / \
+ M10BMC_N6000_FIFO_WORD_SIZE)
+
+#define M10BMC_FLASH_INT_US 1
+#define M10BMC_FLASH_TIMEOUT_US 10000
+
+/**
+ * struct m10bmc_csr_map - Intel MAX 10 BMC CSR register map
+ */
+struct m10bmc_csr_map {
+ unsigned int base;
+ unsigned int build_version;
+ unsigned int fw_version;
+ unsigned int mac_low;
+ unsigned int mac_high;
+ unsigned int doorbell;
+ unsigned int auth_result;
+ unsigned int bmc_prog_addr;
+ unsigned int bmc_reh_addr;
+ unsigned int bmc_magic;
+ unsigned int sr_prog_addr;
+ unsigned int sr_reh_addr;
+ unsigned int sr_magic;
+ unsigned int pr_prog_addr;
+ unsigned int pr_reh_addr;
+ unsigned int pr_magic;
+ unsigned int rsu_update_counter;
+};
+
+/**
+ * struct intel_m10bmc_platform_info - Intel MAX 10 BMC platform specific information
+ * @cells: MFD cells
+ * @n_cells: MFD cells ARRAY_SIZE()
+ * @handshake_sys_reg_ranges: array of register ranges for fw handshake regs
+ * @handshake_sys_reg_nranges: number of register ranges for fw handshake regs
+ * @csr_map: the mappings for register definition of MAX10 BMC
+ */
+struct intel_m10bmc_platform_info {
+ struct mfd_cell *cells;
+ int n_cells;
+ const struct regmap_range *handshake_sys_reg_ranges;
+ unsigned int handshake_sys_reg_nranges;
+ const struct m10bmc_csr_map *csr_map;
+};
+
+struct intel_m10bmc;
+
+/**
+ * struct intel_m10bmc_flash_bulk_ops - device specific operations for flash R/W
+ * @read: read a block of data from flash
+ * @write: write a block of data to flash
+ * @lock_write: locks flash access for erase+write
+ * @unlock_write: unlock flash access
+ *
+ * Write must be protected with @lock_write and @unlock_write. While the flash
+ * is locked, @read returns -EBUSY.
+ */
+struct intel_m10bmc_flash_bulk_ops {
+ int (*read)(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size);
+ int (*write)(struct intel_m10bmc *m10bmc, const u8 *buf, u32 offset, u32 size);
+ int (*lock_write)(struct intel_m10bmc *m10bmc);
+ void (*unlock_write)(struct intel_m10bmc *m10bmc);
+};
+
+enum m10bmc_fw_state {
+ M10BMC_FW_STATE_NORMAL,
+ M10BMC_FW_STATE_SEC_UPDATE_PREPARE,
+ M10BMC_FW_STATE_SEC_UPDATE_WRITE,
+ M10BMC_FW_STATE_SEC_UPDATE_PROGRAM,
+};
+
+/**
+ * struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
+ * @dev: this device
+ * @regmap: the regmap used to access registers by m10bmc itself
+ * @info: the platform information for MAX10 BMC
+ * @flash_bulk_ops: optional device specific operations for flash R/W
+ * @bmcfw_lock: read/write semaphore to BMC firmware running state
+ * @bmcfw_state: BMC firmware running state. Available only when
+ * handshake_sys_reg_nranges > 0.
+ */
+struct intel_m10bmc {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct intel_m10bmc_platform_info *info;
+ const struct intel_m10bmc_flash_bulk_ops *flash_bulk_ops;
+ struct rw_semaphore bmcfw_lock; /* Protects bmcfw_state */
+ enum m10bmc_fw_state bmcfw_state;
+};
+
+/*
+ * register access helper functions.
+ *
+ * m10bmc_raw_read - read m10bmc register per addr
+ * m10bmc_sys_read - read m10bmc system register per offset
+ * m10bmc_sys_update_bits - update m10bmc system register per offset
+ */
+static inline int
+m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(m10bmc->regmap, addr, val);
+ if (ret)
+ dev_err(m10bmc->dev, "fail to read raw reg %x: %d\n",
+ addr, ret);
+
+ return ret;
+}
+
+int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset, unsigned int *val);
+int m10bmc_sys_update_bits(struct intel_m10bmc *m10bmc, unsigned int offset,
+ unsigned int msk, unsigned int val);
+
+/*
+ * Track the state of the firmware, as it is not available for register
+ * handshakes during secure updates on some MAX 10 cards.
+ */
+void m10bmc_fw_state_set(struct intel_m10bmc *m10bmc, enum m10bmc_fw_state new_state);
+
+/*
+ * MAX10 BMC Core support
+ */
+int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info);
+extern const struct attribute_group *m10bmc_dev_groups[];
+
+#endif /* __MFD_INTEL_M10_BMC_H */
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
deleted file mode 100644
index 317e8608cf41..000000000000
--- a/include/linux/mfd/intel_msic.h
+++ /dev/null
@@ -1,453 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Core interface for Intel MSIC
- *
- * Copyright (C) 2011, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#ifndef __LINUX_MFD_INTEL_MSIC_H__
-#define __LINUX_MFD_INTEL_MSIC_H__
-
-/* ID */
-#define INTEL_MSIC_ID0 0x000 /* RO */
-#define INTEL_MSIC_ID1 0x001 /* RO */
-
-/* IRQ */
-#define INTEL_MSIC_IRQLVL1 0x002
-#define INTEL_MSIC_ADC1INT 0x003
-#define INTEL_MSIC_CCINT 0x004
-#define INTEL_MSIC_PWRSRCINT 0x005
-#define INTEL_MSIC_PWRSRCINT1 0x006
-#define INTEL_MSIC_CHRINT 0x007
-#define INTEL_MSIC_CHRINT1 0x008
-#define INTEL_MSIC_RTCIRQ 0x009
-#define INTEL_MSIC_GPIO0LVIRQ 0x00a
-#define INTEL_MSIC_GPIO1LVIRQ 0x00b
-#define INTEL_MSIC_GPIOHVIRQ 0x00c
-#define INTEL_MSIC_VRINT 0x00d
-#define INTEL_MSIC_OCAUDIO 0x00e
-#define INTEL_MSIC_ACCDET 0x00f
-#define INTEL_MSIC_RESETIRQ1 0x010
-#define INTEL_MSIC_RESETIRQ2 0x011
-#define INTEL_MSIC_MADC1INT 0x012
-#define INTEL_MSIC_MCCINT 0x013
-#define INTEL_MSIC_MPWRSRCINT 0x014
-#define INTEL_MSIC_MPWRSRCINT1 0x015
-#define INTEL_MSIC_MCHRINT 0x016
-#define INTEL_MSIC_MCHRINT1 0x017
-#define INTEL_MSIC_RTCIRQMASK 0x018
-#define INTEL_MSIC_GPIO0LVIRQMASK 0x019
-#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a
-#define INTEL_MSIC_GPIOHVIRQMASK 0x01b
-#define INTEL_MSIC_VRINTMASK 0x01c
-#define INTEL_MSIC_OCAUDIOMASK 0x01d
-#define INTEL_MSIC_ACCDETMASK 0x01e
-#define INTEL_MSIC_RESETIRQ1MASK 0x01f
-#define INTEL_MSIC_RESETIRQ2MASK 0x020
-#define INTEL_MSIC_IRQLVL1MSK 0x021
-#define INTEL_MSIC_PBCONFIG 0x03e
-#define INTEL_MSIC_PBSTATUS 0x03f /* RO */
-
-/* GPIO */
-#define INTEL_MSIC_GPIO0LV7CTLO 0x040
-#define INTEL_MSIC_GPIO0LV6CTLO 0x041
-#define INTEL_MSIC_GPIO0LV5CTLO 0x042
-#define INTEL_MSIC_GPIO0LV4CTLO 0x043
-#define INTEL_MSIC_GPIO0LV3CTLO 0x044
-#define INTEL_MSIC_GPIO0LV2CTLO 0x045
-#define INTEL_MSIC_GPIO0LV1CTLO 0x046
-#define INTEL_MSIC_GPIO0LV0CTLO 0x047
-#define INTEL_MSIC_GPIO1LV7CTLOS 0x048
-#define INTEL_MSIC_GPIO1LV6CTLO 0x049
-#define INTEL_MSIC_GPIO1LV5CTLO 0x04a
-#define INTEL_MSIC_GPIO1LV4CTLO 0x04b
-#define INTEL_MSIC_GPIO1LV3CTLO 0x04c
-#define INTEL_MSIC_GPIO1LV2CTLO 0x04d
-#define INTEL_MSIC_GPIO1LV1CTLO 0x04e
-#define INTEL_MSIC_GPIO1LV0CTLO 0x04f
-#define INTEL_MSIC_GPIO0LV7CTLI 0x050
-#define INTEL_MSIC_GPIO0LV6CTLI 0x051
-#define INTEL_MSIC_GPIO0LV5CTLI 0x052
-#define INTEL_MSIC_GPIO0LV4CTLI 0x053
-#define INTEL_MSIC_GPIO0LV3CTLI 0x054
-#define INTEL_MSIC_GPIO0LV2CTLI 0x055
-#define INTEL_MSIC_GPIO0LV1CTLI 0x056
-#define INTEL_MSIC_GPIO0LV0CTLI 0x057
-#define INTEL_MSIC_GPIO1LV7CTLIS 0x058
-#define INTEL_MSIC_GPIO1LV6CTLI 0x059
-#define INTEL_MSIC_GPIO1LV5CTLI 0x05a
-#define INTEL_MSIC_GPIO1LV4CTLI 0x05b
-#define INTEL_MSIC_GPIO1LV3CTLI 0x05c
-#define INTEL_MSIC_GPIO1LV2CTLI 0x05d
-#define INTEL_MSIC_GPIO1LV1CTLI 0x05e
-#define INTEL_MSIC_GPIO1LV0CTLI 0x05f
-#define INTEL_MSIC_PWM0CLKDIV1 0x061
-#define INTEL_MSIC_PWM0CLKDIV0 0x062
-#define INTEL_MSIC_PWM1CLKDIV1 0x063
-#define INTEL_MSIC_PWM1CLKDIV0 0x064
-#define INTEL_MSIC_PWM2CLKDIV1 0x065
-#define INTEL_MSIC_PWM2CLKDIV0 0x066
-#define INTEL_MSIC_PWM0DUTYCYCLE 0x067
-#define INTEL_MSIC_PWM1DUTYCYCLE 0x068
-#define INTEL_MSIC_PWM2DUTYCYCLE 0x069
-#define INTEL_MSIC_GPIO0HV3CTLO 0x06d
-#define INTEL_MSIC_GPIO0HV2CTLO 0x06e
-#define INTEL_MSIC_GPIO0HV1CTLO 0x06f
-#define INTEL_MSIC_GPIO0HV0CTLO 0x070
-#define INTEL_MSIC_GPIO1HV3CTLO 0x071
-#define INTEL_MSIC_GPIO1HV2CTLO 0x072
-#define INTEL_MSIC_GPIO1HV1CTLO 0x073
-#define INTEL_MSIC_GPIO1HV0CTLO 0x074
-#define INTEL_MSIC_GPIO0HV3CTLI 0x075
-#define INTEL_MSIC_GPIO0HV2CTLI 0x076
-#define INTEL_MSIC_GPIO0HV1CTLI 0x077
-#define INTEL_MSIC_GPIO0HV0CTLI 0x078
-#define INTEL_MSIC_GPIO1HV3CTLI 0x079
-#define INTEL_MSIC_GPIO1HV2CTLI 0x07a
-#define INTEL_MSIC_GPIO1HV1CTLI 0x07b
-#define INTEL_MSIC_GPIO1HV0CTLI 0x07c
-
-/* SVID */
-#define INTEL_MSIC_SVIDCTRL0 0x080
-#define INTEL_MSIC_SVIDCTRL1 0x081
-#define INTEL_MSIC_SVIDCTRL2 0x082
-#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */
-#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087
-#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088
-#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089
-#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a
-#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b
-#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c
-#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */
-
-/* VREG */
-#define INTEL_MSIC_VCCLATCH 0x0c0
-#define INTEL_MSIC_VNNLATCH 0x0c1
-#define INTEL_MSIC_VCCCNT 0x0c2
-#define INTEL_MSIC_SMPSRAMP 0x0c3
-#define INTEL_MSIC_VNNCNT 0x0c4
-#define INTEL_MSIC_VNNAONCNT 0x0c5
-#define INTEL_MSIC_VCC122AONCNT 0x0c6
-#define INTEL_MSIC_V180AONCNT 0x0c7
-#define INTEL_MSIC_V500CNT 0x0c8
-#define INTEL_MSIC_VIHFCNT 0x0c9
-#define INTEL_MSIC_LDORAMP1 0x0ca
-#define INTEL_MSIC_LDORAMP2 0x0cb
-#define INTEL_MSIC_VCC108AONCNT 0x0cc
-#define INTEL_MSIC_VCC108ASCNT 0x0cd
-#define INTEL_MSIC_VCC108CNT 0x0ce
-#define INTEL_MSIC_VCCA100ASCNT 0x0cf
-#define INTEL_MSIC_VCCA100CNT 0x0d0
-#define INTEL_MSIC_VCC180AONCNT 0x0d1
-#define INTEL_MSIC_VCC180CNT 0x0d2
-#define INTEL_MSIC_VCC330CNT 0x0d3
-#define INTEL_MSIC_VUSB330CNT 0x0d4
-#define INTEL_MSIC_VCCSDIOCNT 0x0d5
-#define INTEL_MSIC_VPROG1CNT 0x0d6
-#define INTEL_MSIC_VPROG2CNT 0x0d7
-#define INTEL_MSIC_VEMMCSCNT 0x0d8
-#define INTEL_MSIC_VEMMC1CNT 0x0d9
-#define INTEL_MSIC_VEMMC2CNT 0x0da
-#define INTEL_MSIC_VAUDACNT 0x0db
-#define INTEL_MSIC_VHSPCNT 0x0dc
-#define INTEL_MSIC_VHSNCNT 0x0dd
-#define INTEL_MSIC_VHDMICNT 0x0de
-#define INTEL_MSIC_VOTGCNT 0x0df
-#define INTEL_MSIC_V1P35CNT 0x0e0
-#define INTEL_MSIC_V330AONCNT 0x0e1
-
-/* RESET */
-#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */
-#define INTEL_MSIC_ERCONFIG 0x101
-
-/* BURST */
-#define INTEL_MSIC_BATCURRENTLIMIT12 0x102
-#define INTEL_MSIC_BATTIMELIMIT12 0x103
-#define INTEL_MSIC_BATTIMELIMIT3 0x104
-#define INTEL_MSIC_BATTIMEDB 0x105
-#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106
-#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107
-#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108
-
-/* RTC */
-#define INTEL_MSIC_RTCB1 0x140 /* RO */
-#define INTEL_MSIC_RTCB2 0x141 /* RO */
-#define INTEL_MSIC_RTCB3 0x142 /* RO */
-#define INTEL_MSIC_RTCB4 0x143 /* RO */
-#define INTEL_MSIC_RTCOB1 0x144
-#define INTEL_MSIC_RTCOB2 0x145
-#define INTEL_MSIC_RTCOB3 0x146
-#define INTEL_MSIC_RTCOB4 0x147
-#define INTEL_MSIC_RTCAB1 0x148
-#define INTEL_MSIC_RTCAB2 0x149
-#define INTEL_MSIC_RTCAB3 0x14a
-#define INTEL_MSIC_RTCAB4 0x14b
-#define INTEL_MSIC_RTCWAB1 0x14c
-#define INTEL_MSIC_RTCWAB2 0x14d
-#define INTEL_MSIC_RTCWAB3 0x14e
-#define INTEL_MSIC_RTCWAB4 0x14f
-#define INTEL_MSIC_RTCSC1 0x150
-#define INTEL_MSIC_RTCSC2 0x151
-#define INTEL_MSIC_RTCSC3 0x152
-#define INTEL_MSIC_RTCSC4 0x153
-#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */
-#define INTEL_MSIC_RTCCONFIG1 0x155
-#define INTEL_MSIC_RTCCONFIG2 0x156
-
-/* CHARGER */
-#define INTEL_MSIC_BDTIMER 0x180
-#define INTEL_MSIC_BATTRMV 0x181
-#define INTEL_MSIC_VBUSDET 0x182
-#define INTEL_MSIC_VBUSDET1 0x183
-#define INTEL_MSIC_ADPHVDET 0x184
-#define INTEL_MSIC_ADPLVDET 0x185
-#define INTEL_MSIC_ADPDETDBDM 0x186
-#define INTEL_MSIC_LOWBATTDET 0x187
-#define INTEL_MSIC_CHRCTRL 0x188
-#define INTEL_MSIC_CHRCVOLTAGE 0x189
-#define INTEL_MSIC_CHRCCURRENT 0x18a
-#define INTEL_MSIC_SPCHARGER 0x18b
-#define INTEL_MSIC_CHRTTIME 0x18c
-#define INTEL_MSIC_CHRCTRL1 0x18d
-#define INTEL_MSIC_PWRSRCLMT 0x18e
-#define INTEL_MSIC_CHRSTWDT 0x18f
-#define INTEL_MSIC_WDTWRITE 0x190 /* WO */
-#define INTEL_MSIC_CHRSAFELMT 0x191
-#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */
-#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */
-#define INTEL_MSIC_CHRLEDPWM 0x194
-#define INTEL_MSIC_CHRLEDCTRL 0x195
-
-/* ADC */
-#define INTEL_MSIC_ADC1CNTL1 0x1c0
-#define INTEL_MSIC_ADC1CNTL2 0x1c1
-#define INTEL_MSIC_ADC1CNTL3 0x1c2
-#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */
-#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */
-#define INTEL_MSIC_ADC1ADDR0 0x1c5
-#define INTEL_MSIC_ADC1ADDR1 0x1c6
-#define INTEL_MSIC_ADC1ADDR2 0x1c7
-#define INTEL_MSIC_ADC1ADDR3 0x1c8
-#define INTEL_MSIC_ADC1ADDR4 0x1c9
-#define INTEL_MSIC_ADC1ADDR5 0x1ca
-#define INTEL_MSIC_ADC1ADDR6 0x1cb
-#define INTEL_MSIC_ADC1ADDR7 0x1cc
-#define INTEL_MSIC_ADC1ADDR8 0x1cd
-#define INTEL_MSIC_ADC1ADDR9 0x1ce
-#define INTEL_MSIC_ADC1ADDR10 0x1cf
-#define INTEL_MSIC_ADC1ADDR11 0x1d0
-#define INTEL_MSIC_ADC1ADDR12 0x1d1
-#define INTEL_MSIC_ADC1ADDR13 0x1d2
-#define INTEL_MSIC_ADC1ADDR14 0x1d3
-#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */
-#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */
-#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */
-#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */
-#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */
-#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */
-#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */
-#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */
-#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */
-#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */
-#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */
-#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */
-#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */
-#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */
-#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */
-#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */
-#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */
-#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */
-#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */
-#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */
-#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */
-#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */
-#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */
-#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */
-#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */
-#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */
-#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */
-#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */
-#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */
-#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */
-#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */
-#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */
-#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */
-#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */
-#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */
-#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */
-#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */
-#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */
-#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */
-#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */
-#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */
-#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */
-#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */
-#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */
-#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */
-#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */
-#define INTEL_MSIC_CCCNTL 0x202
-#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */
-#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */
-#define INTEL_MSIC_CCADCHA 0x205 /* RO */
-#define INTEL_MSIC_CCADCLA 0x206 /* RO */
-
-/* AUDIO */
-#define INTEL_MSIC_AUDPLLCTRL 0x240
-#define INTEL_MSIC_DMICBUF0123 0x241
-#define INTEL_MSIC_DMICBUF45 0x242
-#define INTEL_MSIC_DMICGPO 0x244
-#define INTEL_MSIC_DMICMUX 0x245
-#define INTEL_MSIC_DMICCLK 0x246
-#define INTEL_MSIC_MICBIAS 0x247
-#define INTEL_MSIC_ADCCONFIG 0x248
-#define INTEL_MSIC_MICAMP1 0x249
-#define INTEL_MSIC_MICAMP2 0x24a
-#define INTEL_MSIC_NOISEMUX 0x24b
-#define INTEL_MSIC_AUDIOMUX12 0x24c
-#define INTEL_MSIC_AUDIOMUX34 0x24d
-#define INTEL_MSIC_AUDIOSINC 0x24e
-#define INTEL_MSIC_AUDIOTXEN 0x24f
-#define INTEL_MSIC_HSEPRXCTRL 0x250
-#define INTEL_MSIC_IHFRXCTRL 0x251
-#define INTEL_MSIC_VOICETXVOL 0x252
-#define INTEL_MSIC_SIDETONEVOL 0x253
-#define INTEL_MSIC_MUSICSHARVOL 0x254
-#define INTEL_MSIC_VOICETXCTRL 0x255
-#define INTEL_MSIC_HSMIXER 0x256
-#define INTEL_MSIC_DACCONFIG 0x257
-#define INTEL_MSIC_SOFTMUTE 0x258
-#define INTEL_MSIC_HSLVOLCTRL 0x259
-#define INTEL_MSIC_HSRVOLCTRL 0x25a
-#define INTEL_MSIC_IHFLVOLCTRL 0x25b
-#define INTEL_MSIC_IHFRVOLCTRL 0x25c
-#define INTEL_MSIC_DRIVEREN 0x25d
-#define INTEL_MSIC_LINEOUTCTRL 0x25e
-#define INTEL_MSIC_VIB1CTRL1 0x25f
-#define INTEL_MSIC_VIB1CTRL2 0x260
-#define INTEL_MSIC_VIB1CTRL3 0x261
-#define INTEL_MSIC_VIB1SPIPCM_1 0x262
-#define INTEL_MSIC_VIB1SPIPCM_2 0x263
-#define INTEL_MSIC_VIB1CTRL5 0x264
-#define INTEL_MSIC_VIB2CTRL1 0x265
-#define INTEL_MSIC_VIB2CTRL2 0x266
-#define INTEL_MSIC_VIB2CTRL3 0x267
-#define INTEL_MSIC_VIB2SPIPCM_1 0x268
-#define INTEL_MSIC_VIB2SPIPCM_2 0x269
-#define INTEL_MSIC_VIB2CTRL5 0x26a
-#define INTEL_MSIC_BTNCTRL1 0x26b
-#define INTEL_MSIC_BTNCTRL2 0x26c
-#define INTEL_MSIC_PCM1TXSLOT01 0x26d
-#define INTEL_MSIC_PCM1TXSLOT23 0x26e
-#define INTEL_MSIC_PCM1TXSLOT45 0x26f
-#define INTEL_MSIC_PCM1RXSLOT0123 0x270
-#define INTEL_MSIC_PCM1RXSLOT045 0x271
-#define INTEL_MSIC_PCM2TXSLOT01 0x272
-#define INTEL_MSIC_PCM2TXSLOT23 0x273
-#define INTEL_MSIC_PCM2TXSLOT45 0x274
-#define INTEL_MSIC_PCM2RXSLOT01 0x275
-#define INTEL_MSIC_PCM2RXSLOT23 0x276
-#define INTEL_MSIC_PCM2RXSLOT45 0x277
-#define INTEL_MSIC_PCM1CTRL1 0x278
-#define INTEL_MSIC_PCM1CTRL2 0x279
-#define INTEL_MSIC_PCM1CTRL3 0x27a
-#define INTEL_MSIC_PCM2CTRL1 0x27b
-#define INTEL_MSIC_PCM2CTRL2 0x27c
-
-/* HDMI */
-#define INTEL_MSIC_HDMIPUEN 0x280
-#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */
-
-/* Physical address of the start of the MSIC interrupt tree in SRAM */
-#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0
-
-/**
- * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
- * @gpio_base: base number for the GPIOs
- */
-struct intel_msic_gpio_pdata {
- unsigned gpio_base;
-};
-
-/**
- * struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver
- * @gpio: GPIO number used for OCD interrupts
- *
- * The MSIC MFD driver converts @gpio into an IRQ number and passes it to
- * the OCD driver as %IORESOURCE_IRQ.
- */
-struct intel_msic_ocd_pdata {
- unsigned gpio;
-};
-
-/* MSIC embedded blocks (subdevices) */
-enum intel_msic_block {
- INTEL_MSIC_BLOCK_TOUCH,
- INTEL_MSIC_BLOCK_ADC,
- INTEL_MSIC_BLOCK_BATTERY,
- INTEL_MSIC_BLOCK_GPIO,
- INTEL_MSIC_BLOCK_AUDIO,
- INTEL_MSIC_BLOCK_HDMI,
- INTEL_MSIC_BLOCK_THERMAL,
- INTEL_MSIC_BLOCK_POWER_BTN,
- INTEL_MSIC_BLOCK_OCD,
-
- INTEL_MSIC_BLOCK_LAST,
-};
-
-/**
- * struct intel_msic_platform_data - platform data for the MSIC driver
- * @irq: array of interrupt numbers, one per device. If @irq is set to %0
- * for a given block, the corresponding platform device is not
- * created. For devices which don't have an interrupt, use %0xff
- * (this is same as in SFI spec).
- * @gpio: platform data for the MSIC GPIO driver
- * @ocd: platform data for the MSIC OCD driver
- *
- * Once the MSIC driver is initialized, the register interface is ready to
- * use. All the platform devices for subdevices are created after the
- * register interface is ready so that we can guarantee its availability to
- * the subdevice drivers.
- *
- * Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ
- * resources of the created platform device.
- */
-struct intel_msic_platform_data {
- int irq[INTEL_MSIC_BLOCK_LAST];
- struct intel_msic_gpio_pdata *gpio;
- struct intel_msic_ocd_pdata *ocd;
-};
-
-struct intel_msic;
-
-extern int intel_msic_reg_read(unsigned short reg, u8 *val);
-extern int intel_msic_reg_write(unsigned short reg, u8 val);
-extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask);
-extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count);
-extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count);
-
-/*
- * pdev_to_intel_msic - gets an MSIC instance from the platform device
- * @pdev: platform device pointer
- *
- * The client drivers need to have pointer to the MSIC instance if they
- * want to call intel_msic_irq_read(). This macro can be used for
- * convenience to get the MSIC pointer from @pdev where needed. This is
- * _only_ valid for devices which are managed by the MSIC.
- */
-#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent))
-
-extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg,
- u8 *val);
-
-#endif /* __LINUX_MFD_INTEL_MSIC_H__ */
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 6a88e34cb955..9ba2c1a8d836 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -13,6 +13,14 @@
#include <linux/regmap.h>
+enum intel_cht_wc_models {
+ INTEL_CHT_WC_UNKNOWN,
+ INTEL_CHT_WC_GPD_WIN_POCKET,
+ INTEL_CHT_WC_XIAOMI_MIPAD2,
+ INTEL_CHT_WC_LENOVO_YOGABOOK1,
+ INTEL_CHT_WC_LENOVO_YT3_X90,
+};
+
/**
* struct intel_soc_pmic - Intel SoC PMIC data
* @irq: Master interrupt number of the parent PMIC device
@@ -39,6 +47,7 @@ struct intel_soc_pmic {
struct regmap_irq_chip_data *irq_chip_data_crit;
struct device *dev;
struct intel_scu_ipc_dev *scu;
+ enum intel_cht_wc_models cht_wc_model;
};
int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h
index ee48a4321c57..d5caa4c86ecc 100644
--- a/include/linux/mfd/ipaq-micro.h
+++ b/include/linux/mfd/ipaq-micro.h
@@ -75,8 +75,8 @@ struct ipaq_micro_rxdev {
* @id: 4-bit ID of the message
* @tx_len: length of TX data
* @tx_data: TX data to send
- * @rx_len: length of receieved RX data
- * @rx_data: RX data to recieve
+ * @rx_len: length of received RX data
+ * @rx_data: RX data to receive
* @ack: a completion that will be completed when RX is complete
* @node: list node if message gets queued
*/
diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h
index 043d3b6de9ec..ffc86010af74 100644
--- a/include/linux/mfd/iqs62x.h
+++ b/include/linux/mfd/iqs62x.h
@@ -14,6 +14,11 @@
#define IQS624_PROD_NUM 0x43
#define IQS625_PROD_NUM 0x4E
+#define IQS620_HW_NUM_V0 0x82
+#define IQS620_HW_NUM_V1 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V2 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V3 0x92
+
#define IQS621_ALS_FLAGS 0x16
#define IQS622_ALS_FLAGS 0x14
@@ -28,7 +33,7 @@
#define IQS620_GLBL_EVENT_MASK_PMU BIT(6)
#define IQS62X_NUM_KEYS 16
-#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 5)
+#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 6)
#define IQS62X_EVENT_SIZE 10
@@ -78,6 +83,7 @@ enum iqs62x_event_flag {
/* everything else */
IQS62X_EVENT_SYS_RESET,
+ IQS62X_EVENT_SYS_ATI,
};
struct iqs62x_event_data {
@@ -97,12 +103,10 @@ struct iqs62x_dev_desc {
const char *dev_name;
const struct mfd_cell *sub_devs;
int num_sub_devs;
-
u8 prod_num;
u8 sw_num;
const u8 *cal_regs;
int num_cal_regs;
-
u8 prox_mask;
u8 sar_mask;
u8 hall_mask;
@@ -110,16 +114,12 @@ struct iqs62x_dev_desc {
u8 temp_mask;
u8 als_mask;
u8 ir_mask;
-
u8 prox_settings;
u8 als_flags;
u8 hall_flags;
u8 hyst_shift;
-
u8 interval;
u8 interval_div;
-
- u8 clk_div;
const char *fw_name;
const enum iqs62x_event_reg (*event_regs)[IQS62X_EVENT_SIZE];
};
@@ -130,8 +130,12 @@ struct iqs62x_core {
struct regmap *regmap;
struct blocking_notifier_head nh;
struct list_head fw_blk_head;
+ struct completion ati_done;
struct completion fw_done;
enum iqs62x_ui_sel ui_sel;
+ unsigned long event_cache;
+ u8 sw_num;
+ u8 hw_num;
};
extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS];
diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h
index 5546688c7da7..fe8174cc8637 100644
--- a/include/linux/mfd/lp873x.h
+++ b/include/linux/mfd/lp873x.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions to access LP873X power management chip.
*
* Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_LP873X_H
diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h
index 43716aca46fa..4c895072d91b 100644
--- a/include/linux/mfd/lp87565.h
+++ b/include/linux/mfd/lp87565.h
@@ -14,6 +14,7 @@
enum lp87565_device_type {
LP87565_DEVICE_TYPE_UNKNOWN = 0,
+ LP87565_DEVICE_TYPE_LP87524_Q1,
LP87565_DEVICE_TYPE_LP87561_Q1,
LP87565_DEVICE_TYPE_LP87565_Q1,
};
@@ -221,34 +222,20 @@ enum lp87565_device_type {
#define LP87565_GPIO2_SEL BIT(1)
#define LP87565_GPIO1_SEL BIT(0)
-#define LP87565_GOIO3_OD BIT(6)
-#define LP87565_GOIO2_OD BIT(5)
-#define LP87565_GOIO1_OD BIT(4)
-#define LP87565_GOIO3_DIR BIT(2)
-#define LP87565_GOIO2_DIR BIT(1)
-#define LP87565_GOIO1_DIR BIT(0)
-
-#define LP87565_GOIO3_IN BIT(2)
-#define LP87565_GOIO2_IN BIT(1)
-#define LP87565_GOIO1_IN BIT(0)
-
-#define LP87565_GOIO3_OUT BIT(2)
-#define LP87565_GOIO2_OUT BIT(1)
-#define LP87565_GOIO1_OUT BIT(0)
-
-/* Number of step-down converters available */
-#define LP87565_NUM_BUCK 6
-
-enum LP87565_regulator_id {
- /* BUCK's */
- LP87565_BUCK_0,
- LP87565_BUCK_1,
- LP87565_BUCK_2,
- LP87565_BUCK_3,
- LP87565_BUCK_10,
- LP87565_BUCK_23,
- LP87565_BUCK_3210,
-};
+#define LP87565_GPIO3_OD BIT(6)
+#define LP87565_GPIO2_OD BIT(5)
+#define LP87565_GPIO1_OD BIT(4)
+#define LP87565_GPIO3_DIR BIT(2)
+#define LP87565_GPIO2_DIR BIT(1)
+#define LP87565_GPIO1_DIR BIT(0)
+
+#define LP87565_GPIO3_IN BIT(2)
+#define LP87565_GPIO2_IN BIT(1)
+#define LP87565_GPIO1_IN BIT(0)
+
+#define LP87565_GPIO3_OUT BIT(2)
+#define LP87565_GPIO2_OUT BIT(1)
+#define LP87565_GPIO1_OUT BIT(0)
/**
* struct LP87565 - state holder for the LP87565 driver
@@ -265,5 +252,6 @@ struct lp87565 {
u8 rev;
u8 dev_type;
struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
};
#endif /* __LINUX_MFD_LP87565_H */
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
index 3d5c480d58ea..51b47966a04d 100644
--- a/include/linux/mfd/lp8788.h
+++ b/include/linux/mfd/lp8788.h
@@ -10,7 +10,6 @@
#ifndef __MFD_LP8788_H__
#define __MFD_LP8788_H__
-#include <linux/gpio.h>
#include <linux/irqdomain.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -159,21 +158,17 @@ struct lp8788;
/*
* lp8788_buck1_dvs
- * @gpio : gpio pin number for dvs control
* @vsel : dvs selector for buck v1 register
*/
struct lp8788_buck1_dvs {
- int gpio;
enum lp8788_dvs_sel vsel;
};
/*
* lp8788_buck2_dvs
- * @gpio : two gpio pin numbers are used for dvs
* @vsel : dvs selector for buck v2 register
*/
struct lp8788_buck2_dvs {
- int gpio[LP8788_NUM_BUCK2_DVS];
enum lp8788_dvs_sel vsel;
};
@@ -268,8 +263,8 @@ struct lp8788_vib_platform_data {
* @buck_data : regulator initial data for buck
* @dldo_data : regulator initial data for digital ldo
* @aldo_data : regulator initial data for analog ldo
- * @buck1_dvs : gpio configurations for buck1 dvs
- * @buck2_dvs : gpio configurations for buck2 dvs
+ * @buck1_dvs : configurations for buck1 dvs
+ * @buck2_dvs : configurations for buck2 dvs
* @chg_pdata : platform data for charger driver
* @alarm_sel : rtc alarm selection (1 or 2)
* @bl_pdata : configurable data for backlight driver
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 6ddca2bbb3a8..1fbda1f8967d 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -8,14 +8,14 @@
#ifndef LPC_ICH_H
#define LPC_ICH_H
-#include <linux/platform_data/intel-spi.h>
+#include <linux/platform_data/x86/spi-intel.h>
/* GPIO resources */
#define ICH_RES_GPIO 0
#define ICH_RES_GPE0 1
/* GPIO compatibility */
-enum {
+enum lpc_gpio_versions {
ICH_I3100_GPIO,
ICH_V5_GPIO,
ICH_V6_GPIO,
@@ -26,11 +26,14 @@ enum {
AVOTON_GPIO,
};
+struct lpc_ich_gpio_info;
+
struct lpc_ich_info {
char name[32];
unsigned int iTCO_version;
- unsigned int gpio_version;
+ enum lpc_gpio_versions gpio_version;
enum intel_spi_type spi_type;
+ const struct lpc_ich_gpio_info *gpio_info;
u8 use_gpio;
};
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index ad2c138105d4..03a8a788424a 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -186,6 +186,7 @@ struct madera {
struct regulator_bulk_data core_supplies[MADERA_MAX_CORE_SUPPLIES];
struct regulator *dcvdd;
bool internal_dcvdd;
+ bool reset_errata;
struct madera_pdata pdata;
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 601cbbc10370..32e3470708ed 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -31,7 +31,7 @@ struct pinctrl_map;
* @irq_flags: Mode for primary IRQ (defaults to active low)
* @gpio_base: Base GPIO number
* @gpio_configs: Array of GPIO configurations (See
- * Documentation/driver-api/pinctl.rst)
+ * Documentation/driver-api/pin-control.rst)
* @n_gpio_configs: Number of entries in gpio_configs
* @gpsw: General purpose switch mode setting. Depends on the external
* hardware connected to the switch. (See the SW1_MODE field
diff --git a/include/linux/mfd/madera/registers.h b/include/linux/mfd/madera/registers.h
index fe909d177762..b44aeb461d0c 100644
--- a/include/linux/mfd/madera/registers.h
+++ b/include/linux/mfd/madera/registers.h
@@ -1286,566 +1286,438 @@
/* (0x0000) Software_Reset */
#define MADERA_SW_RST_DEV_ID1_MASK 0xFFFF
#define MADERA_SW_RST_DEV_ID1_SHIFT 0
-#define MADERA_SW_RST_DEV_ID1_WIDTH 16
/* (0x0001) Hardware_Revision */
#define MADERA_HW_REVISION_MASK 0x00FF
#define MADERA_HW_REVISION_SHIFT 0
-#define MADERA_HW_REVISION_WIDTH 8
/* (0x0020) Tone_Generator_1 */
#define MADERA_TONE2_ENA 0x0002
#define MADERA_TONE2_ENA_MASK 0x0002
#define MADERA_TONE2_ENA_SHIFT 1
-#define MADERA_TONE2_ENA_WIDTH 1
#define MADERA_TONE1_ENA 0x0001
#define MADERA_TONE1_ENA_MASK 0x0001
#define MADERA_TONE1_ENA_SHIFT 0
-#define MADERA_TONE1_ENA_WIDTH 1
/* (0x0021) Tone_Generator_2 */
#define MADERA_TONE1_LVL_0_MASK 0xFFFF
#define MADERA_TONE1_LVL_0_SHIFT 0
-#define MADERA_TONE1_LVL_0_WIDTH 16
/* (0x0022) Tone_Generator_3 */
#define MADERA_TONE1_LVL_MASK 0x00FF
#define MADERA_TONE1_LVL_SHIFT 0
-#define MADERA_TONE1_LVL_WIDTH 8
/* (0x0023) Tone_Generator_4 */
#define MADERA_TONE2_LVL_0_MASK 0xFFFF
#define MADERA_TONE2_LVL_0_SHIFT 0
-#define MADERA_TONE2_LVL_0_WIDTH 16
/* (0x0024) Tone_Generator_5 */
#define MADERA_TONE2_LVL_MASK 0x00FF
#define MADERA_TONE2_LVL_SHIFT 0
-#define MADERA_TONE2_LVL_WIDTH 8
/* (0x0030) PWM_Drive_1 */
#define MADERA_PWM2_ENA 0x0002
#define MADERA_PWM2_ENA_MASK 0x0002
#define MADERA_PWM2_ENA_SHIFT 1
-#define MADERA_PWM2_ENA_WIDTH 1
#define MADERA_PWM1_ENA 0x0001
#define MADERA_PWM1_ENA_MASK 0x0001
#define MADERA_PWM1_ENA_SHIFT 0
-#define MADERA_PWM1_ENA_WIDTH 1
/* (0x00A0) Comfort_Noise_Generator */
#define MADERA_NOISE_GEN_ENA 0x0020
#define MADERA_NOISE_GEN_ENA_MASK 0x0020
#define MADERA_NOISE_GEN_ENA_SHIFT 5
-#define MADERA_NOISE_GEN_ENA_WIDTH 1
#define MADERA_NOISE_GEN_GAIN_MASK 0x001F
#define MADERA_NOISE_GEN_GAIN_SHIFT 0
-#define MADERA_NOISE_GEN_GAIN_WIDTH 5
/* (0x0100) Clock_32k_1 */
#define MADERA_CLK_32K_ENA 0x0040
#define MADERA_CLK_32K_ENA_MASK 0x0040
#define MADERA_CLK_32K_ENA_SHIFT 6
-#define MADERA_CLK_32K_ENA_WIDTH 1
#define MADERA_CLK_32K_SRC_MASK 0x0003
#define MADERA_CLK_32K_SRC_SHIFT 0
-#define MADERA_CLK_32K_SRC_WIDTH 2
/* (0x0101) System_Clock_1 */
#define MADERA_SYSCLK_FRAC 0x8000
#define MADERA_SYSCLK_FRAC_MASK 0x8000
#define MADERA_SYSCLK_FRAC_SHIFT 15
-#define MADERA_SYSCLK_FRAC_WIDTH 1
#define MADERA_SYSCLK_FREQ_MASK 0x0700
#define MADERA_SYSCLK_FREQ_SHIFT 8
-#define MADERA_SYSCLK_FREQ_WIDTH 3
#define MADERA_SYSCLK_ENA 0x0040
#define MADERA_SYSCLK_ENA_MASK 0x0040
#define MADERA_SYSCLK_ENA_SHIFT 6
-#define MADERA_SYSCLK_ENA_WIDTH 1
#define MADERA_SYSCLK_SRC_MASK 0x000F
#define MADERA_SYSCLK_SRC_SHIFT 0
-#define MADERA_SYSCLK_SRC_WIDTH 4
/* (0x0102) Sample_rate_1 */
#define MADERA_SAMPLE_RATE_1_MASK 0x001F
#define MADERA_SAMPLE_RATE_1_SHIFT 0
-#define MADERA_SAMPLE_RATE_1_WIDTH 5
/* (0x0103) Sample_rate_2 */
#define MADERA_SAMPLE_RATE_2_MASK 0x001F
#define MADERA_SAMPLE_RATE_2_SHIFT 0
-#define MADERA_SAMPLE_RATE_2_WIDTH 5
/* (0x0104) Sample_rate_3 */
#define MADERA_SAMPLE_RATE_3_MASK 0x001F
#define MADERA_SAMPLE_RATE_3_SHIFT 0
-#define MADERA_SAMPLE_RATE_3_WIDTH 5
/* (0x0112) Async_clock_1 */
#define MADERA_ASYNC_CLK_FREQ_MASK 0x0700
#define MADERA_ASYNC_CLK_FREQ_SHIFT 8
-#define MADERA_ASYNC_CLK_FREQ_WIDTH 3
#define MADERA_ASYNC_CLK_ENA 0x0040
#define MADERA_ASYNC_CLK_ENA_MASK 0x0040
#define MADERA_ASYNC_CLK_ENA_SHIFT 6
-#define MADERA_ASYNC_CLK_ENA_WIDTH 1
#define MADERA_ASYNC_CLK_SRC_MASK 0x000F
#define MADERA_ASYNC_CLK_SRC_SHIFT 0
-#define MADERA_ASYNC_CLK_SRC_WIDTH 4
/* (0x0113) Async_sample_rate_1 */
#define MADERA_ASYNC_SAMPLE_RATE_1_MASK 0x001F
#define MADERA_ASYNC_SAMPLE_RATE_1_SHIFT 0
-#define MADERA_ASYNC_SAMPLE_RATE_1_WIDTH 5
/* (0x0114) Async_sample_rate_2 */
#define MADERA_ASYNC_SAMPLE_RATE_2_MASK 0x001F
#define MADERA_ASYNC_SAMPLE_RATE_2_SHIFT 0
-#define MADERA_ASYNC_SAMPLE_RATE_2_WIDTH 5
/* (0x0120) DSP_Clock_1 */
#define MADERA_DSP_CLK_FREQ_LEGACY 0x0700
#define MADERA_DSP_CLK_FREQ_LEGACY_MASK 0x0700
#define MADERA_DSP_CLK_FREQ_LEGACY_SHIFT 8
-#define MADERA_DSP_CLK_FREQ_LEGACY_WIDTH 3
#define MADERA_DSP_CLK_ENA 0x0040
#define MADERA_DSP_CLK_ENA_MASK 0x0040
#define MADERA_DSP_CLK_ENA_SHIFT 6
-#define MADERA_DSP_CLK_ENA_WIDTH 1
#define MADERA_DSP_CLK_SRC 0x000F
#define MADERA_DSP_CLK_SRC_MASK 0x000F
#define MADERA_DSP_CLK_SRC_SHIFT 0
-#define MADERA_DSP_CLK_SRC_WIDTH 4
/* (0x0122) DSP_Clock_2 */
#define MADERA_DSP_CLK_FREQ_MASK 0x03FF
#define MADERA_DSP_CLK_FREQ_SHIFT 0
-#define MADERA_DSP_CLK_FREQ_WIDTH 10
/* (0x0149) Output_system_clock */
#define MADERA_OPCLK_ENA 0x8000
#define MADERA_OPCLK_ENA_MASK 0x8000
#define MADERA_OPCLK_ENA_SHIFT 15
-#define MADERA_OPCLK_ENA_WIDTH 1
#define MADERA_OPCLK_DIV_MASK 0x00F8
#define MADERA_OPCLK_DIV_SHIFT 3
-#define MADERA_OPCLK_DIV_WIDTH 5
#define MADERA_OPCLK_SEL_MASK 0x0007
#define MADERA_OPCLK_SEL_SHIFT 0
-#define MADERA_OPCLK_SEL_WIDTH 3
/* (0x014A) Output_async_clock */
#define MADERA_OPCLK_ASYNC_ENA 0x8000
#define MADERA_OPCLK_ASYNC_ENA_MASK 0x8000
#define MADERA_OPCLK_ASYNC_ENA_SHIFT 15
-#define MADERA_OPCLK_ASYNC_ENA_WIDTH 1
#define MADERA_OPCLK_ASYNC_DIV_MASK 0x00F8
#define MADERA_OPCLK_ASYNC_DIV_SHIFT 3
-#define MADERA_OPCLK_ASYNC_DIV_WIDTH 5
#define MADERA_OPCLK_ASYNC_SEL_MASK 0x0007
#define MADERA_OPCLK_ASYNC_SEL_SHIFT 0
-#define MADERA_OPCLK_ASYNC_SEL_WIDTH 3
/* (0x0171) FLL1_Control_1 */
#define CS47L92_FLL1_REFCLK_SRC_MASK 0xF000
#define CS47L92_FLL1_REFCLK_SRC_SHIFT 12
-#define CS47L92_FLL1_REFCLK_SRC_WIDTH 4
#define MADERA_FLL1_HOLD_MASK 0x0004
#define MADERA_FLL1_HOLD_SHIFT 2
-#define MADERA_FLL1_HOLD_WIDTH 1
#define MADERA_FLL1_FREERUN 0x0002
#define MADERA_FLL1_FREERUN_MASK 0x0002
#define MADERA_FLL1_FREERUN_SHIFT 1
-#define MADERA_FLL1_FREERUN_WIDTH 1
#define MADERA_FLL1_ENA 0x0001
#define MADERA_FLL1_ENA_MASK 0x0001
#define MADERA_FLL1_ENA_SHIFT 0
-#define MADERA_FLL1_ENA_WIDTH 1
/* (0x0172) FLL1_Control_2 */
#define MADERA_FLL1_CTRL_UPD 0x8000
#define MADERA_FLL1_CTRL_UPD_MASK 0x8000
#define MADERA_FLL1_CTRL_UPD_SHIFT 15
-#define MADERA_FLL1_CTRL_UPD_WIDTH 1
#define MADERA_FLL1_N_MASK 0x03FF
#define MADERA_FLL1_N_SHIFT 0
-#define MADERA_FLL1_N_WIDTH 10
/* (0x0173) FLL1_Control_3 */
#define MADERA_FLL1_THETA_MASK 0xFFFF
#define MADERA_FLL1_THETA_SHIFT 0
-#define MADERA_FLL1_THETA_WIDTH 16
/* (0x0174) FLL1_Control_4 */
#define MADERA_FLL1_LAMBDA_MASK 0xFFFF
#define MADERA_FLL1_LAMBDA_SHIFT 0
-#define MADERA_FLL1_LAMBDA_WIDTH 16
/* (0x0175) FLL1_Control_5 */
#define MADERA_FLL1_FRATIO_MASK 0x0F00
#define MADERA_FLL1_FRATIO_SHIFT 8
-#define MADERA_FLL1_FRATIO_WIDTH 4
#define MADERA_FLL1_FB_DIV_MASK 0x03FF
#define MADERA_FLL1_FB_DIV_SHIFT 0
-#define MADERA_FLL1_FB_DIV_WIDTH 10
/* (0x0176) FLL1_Control_6 */
#define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0
#define MADERA_FLL1_REFCLK_DIV_SHIFT 6
-#define MADERA_FLL1_REFCLK_DIV_WIDTH 2
#define MADERA_FLL1_REFCLK_SRC_MASK 0x000F
#define MADERA_FLL1_REFCLK_SRC_SHIFT 0
-#define MADERA_FLL1_REFCLK_SRC_WIDTH 4
/* (0x0179) FLL1_Control_7 */
#define MADERA_FLL1_GAIN_MASK 0x003c
#define MADERA_FLL1_GAIN_SHIFT 2
-#define MADERA_FLL1_GAIN_WIDTH 4
/* (0x017A) FLL1_EFS_2 */
#define MADERA_FLL1_PHASE_GAIN_MASK 0xF000
#define MADERA_FLL1_PHASE_GAIN_SHIFT 12
-#define MADERA_FLL1_PHASE_GAIN_WIDTH 4
#define MADERA_FLL1_PHASE_ENA_MASK 0x0800
#define MADERA_FLL1_PHASE_ENA_SHIFT 11
-#define MADERA_FLL1_PHASE_ENA_WIDTH 1
/* (0x017A) FLL1_Control_10 */
#define MADERA_FLL1_HP_MASK 0xC000
#define MADERA_FLL1_HP_SHIFT 14
-#define MADERA_FLL1_HP_WIDTH 2
#define MADERA_FLL1_PHASEDET_ENA_MASK 0x1000
#define MADERA_FLL1_PHASEDET_ENA_SHIFT 12
-#define MADERA_FLL1_PHASEDET_ENA_WIDTH 1
/* (0x017B) FLL1_Control_11 */
#define MADERA_FLL1_LOCKDET_THR_MASK 0x001E
#define MADERA_FLL1_LOCKDET_THR_SHIFT 1
-#define MADERA_FLL1_LOCKDET_THR_WIDTH 4
#define MADERA_FLL1_LOCKDET_MASK 0x0001
#define MADERA_FLL1_LOCKDET_SHIFT 0
-#define MADERA_FLL1_LOCKDET_WIDTH 1
/* (0x017D) FLL1_Digital_Test_1 */
#define MADERA_FLL1_SYNC_EFS_ENA_MASK 0x0100
#define MADERA_FLL1_SYNC_EFS_ENA_SHIFT 8
-#define MADERA_FLL1_SYNC_EFS_ENA_WIDTH 1
#define MADERA_FLL1_CLK_VCO_FAST_SRC_MASK 0x0003
#define MADERA_FLL1_CLK_VCO_FAST_SRC_SHIFT 0
-#define MADERA_FLL1_CLK_VCO_FAST_SRC_WIDTH 2
/* (0x0181) FLL1_Synchroniser_1 */
#define MADERA_FLL1_SYNC_ENA 0x0001
#define MADERA_FLL1_SYNC_ENA_MASK 0x0001
#define MADERA_FLL1_SYNC_ENA_SHIFT 0
-#define MADERA_FLL1_SYNC_ENA_WIDTH 1
/* (0x0182) FLL1_Synchroniser_2 */
#define MADERA_FLL1_SYNC_N_MASK 0x03FF
#define MADERA_FLL1_SYNC_N_SHIFT 0
-#define MADERA_FLL1_SYNC_N_WIDTH 10
/* (0x0183) FLL1_Synchroniser_3 */
#define MADERA_FLL1_SYNC_THETA_MASK 0xFFFF
#define MADERA_FLL1_SYNC_THETA_SHIFT 0
-#define MADERA_FLL1_SYNC_THETA_WIDTH 16
/* (0x0184) FLL1_Synchroniser_4 */
#define MADERA_FLL1_SYNC_LAMBDA_MASK 0xFFFF
#define MADERA_FLL1_SYNC_LAMBDA_SHIFT 0
-#define MADERA_FLL1_SYNC_LAMBDA_WIDTH 16
/* (0x0185) FLL1_Synchroniser_5 */
#define MADERA_FLL1_SYNC_FRATIO_MASK 0x0700
#define MADERA_FLL1_SYNC_FRATIO_SHIFT 8
-#define MADERA_FLL1_SYNC_FRATIO_WIDTH 3
/* (0x0186) FLL1_Synchroniser_6 */
#define MADERA_FLL1_SYNCCLK_DIV_MASK 0x00C0
#define MADERA_FLL1_SYNCCLK_DIV_SHIFT 6
-#define MADERA_FLL1_SYNCCLK_DIV_WIDTH 2
#define MADERA_FLL1_SYNCCLK_SRC_MASK 0x000F
#define MADERA_FLL1_SYNCCLK_SRC_SHIFT 0
-#define MADERA_FLL1_SYNCCLK_SRC_WIDTH 4
/* (0x0187) FLL1_Synchroniser_7 */
#define MADERA_FLL1_SYNC_GAIN_MASK 0x003c
#define MADERA_FLL1_SYNC_GAIN_SHIFT 2
-#define MADERA_FLL1_SYNC_GAIN_WIDTH 4
#define MADERA_FLL1_SYNC_DFSAT 0x0001
#define MADERA_FLL1_SYNC_DFSAT_MASK 0x0001
#define MADERA_FLL1_SYNC_DFSAT_SHIFT 0
-#define MADERA_FLL1_SYNC_DFSAT_WIDTH 1
/* (0x01D1) FLL_AO_Control_1 */
#define MADERA_FLL_AO_HOLD 0x0004
#define MADERA_FLL_AO_HOLD_MASK 0x0004
#define MADERA_FLL_AO_HOLD_SHIFT 2
-#define MADERA_FLL_AO_HOLD_WIDTH 1
#define MADERA_FLL_AO_FREERUN 0x0002
#define MADERA_FLL_AO_FREERUN_MASK 0x0002
#define MADERA_FLL_AO_FREERUN_SHIFT 1
-#define MADERA_FLL_AO_FREERUN_WIDTH 1
#define MADERA_FLL_AO_ENA 0x0001
#define MADERA_FLL_AO_ENA_MASK 0x0001
#define MADERA_FLL_AO_ENA_SHIFT 0
-#define MADERA_FLL_AO_ENA_WIDTH 1
/* (0x01D2) FLL_AO_Control_2 */
#define MADERA_FLL_AO_CTRL_UPD 0x8000
#define MADERA_FLL_AO_CTRL_UPD_MASK 0x8000
#define MADERA_FLL_AO_CTRL_UPD_SHIFT 15
-#define MADERA_FLL_AO_CTRL_UPD_WIDTH 1
/* (0x01D6) FLL_AO_Control_6 */
#define MADERA_FLL_AO_REFCLK_SRC_MASK 0x000F
#define MADERA_FLL_AO_REFCLK_SRC_SHIFT 0
-#define MADERA_FLL_AO_REFCLK_SRC_WIDTH 4
/* (0x0200) Mic_Charge_Pump_1 */
#define MADERA_CPMIC_BYPASS 0x0002
#define MADERA_CPMIC_BYPASS_MASK 0x0002
#define MADERA_CPMIC_BYPASS_SHIFT 1
-#define MADERA_CPMIC_BYPASS_WIDTH 1
#define MADERA_CPMIC_ENA 0x0001
#define MADERA_CPMIC_ENA_MASK 0x0001
#define MADERA_CPMIC_ENA_SHIFT 0
-#define MADERA_CPMIC_ENA_WIDTH 1
/* (0x0210) LDO1_Control_1 */
#define MADERA_LDO1_VSEL_MASK 0x07E0
#define MADERA_LDO1_VSEL_SHIFT 5
-#define MADERA_LDO1_VSEL_WIDTH 6
#define MADERA_LDO1_FAST 0x0010
#define MADERA_LDO1_FAST_MASK 0x0010
#define MADERA_LDO1_FAST_SHIFT 4
-#define MADERA_LDO1_FAST_WIDTH 1
#define MADERA_LDO1_DISCH 0x0004
#define MADERA_LDO1_DISCH_MASK 0x0004
#define MADERA_LDO1_DISCH_SHIFT 2
-#define MADERA_LDO1_DISCH_WIDTH 1
#define MADERA_LDO1_BYPASS 0x0002
#define MADERA_LDO1_BYPASS_MASK 0x0002
#define MADERA_LDO1_BYPASS_SHIFT 1
-#define MADERA_LDO1_BYPASS_WIDTH 1
#define MADERA_LDO1_ENA 0x0001
#define MADERA_LDO1_ENA_MASK 0x0001
#define MADERA_LDO1_ENA_SHIFT 0
-#define MADERA_LDO1_ENA_WIDTH 1
/* (0x0213) LDO2_Control_1 */
#define MADERA_LDO2_VSEL_MASK 0x07E0
#define MADERA_LDO2_VSEL_SHIFT 5
-#define MADERA_LDO2_VSEL_WIDTH 6
#define MADERA_LDO2_FAST 0x0010
#define MADERA_LDO2_FAST_MASK 0x0010
#define MADERA_LDO2_FAST_SHIFT 4
-#define MADERA_LDO2_FAST_WIDTH 1
#define MADERA_LDO2_DISCH 0x0004
#define MADERA_LDO2_DISCH_MASK 0x0004
#define MADERA_LDO2_DISCH_SHIFT 2
-#define MADERA_LDO2_DISCH_WIDTH 1
#define MADERA_LDO2_BYPASS 0x0002
#define MADERA_LDO2_BYPASS_MASK 0x0002
#define MADERA_LDO2_BYPASS_SHIFT 1
-#define MADERA_LDO2_BYPASS_WIDTH 1
#define MADERA_LDO2_ENA 0x0001
#define MADERA_LDO2_ENA_MASK 0x0001
#define MADERA_LDO2_ENA_SHIFT 0
-#define MADERA_LDO2_ENA_WIDTH 1
/* (0x0218) Mic_Bias_Ctrl_1 */
#define MADERA_MICB1_EXT_CAP 0x8000
#define MADERA_MICB1_EXT_CAP_MASK 0x8000
#define MADERA_MICB1_EXT_CAP_SHIFT 15
-#define MADERA_MICB1_EXT_CAP_WIDTH 1
#define MADERA_MICB1_LVL_MASK 0x01E0
#define MADERA_MICB1_LVL_SHIFT 5
-#define MADERA_MICB1_LVL_WIDTH 4
#define MADERA_MICB1_ENA 0x0001
#define MADERA_MICB1_ENA_MASK 0x0001
#define MADERA_MICB1_ENA_SHIFT 0
-#define MADERA_MICB1_ENA_WIDTH 1
/* (0x021C) Mic_Bias_Ctrl_5 */
#define MADERA_MICB1D_ENA 0x1000
#define MADERA_MICB1D_ENA_MASK 0x1000
#define MADERA_MICB1D_ENA_SHIFT 12
-#define MADERA_MICB1D_ENA_WIDTH 1
#define MADERA_MICB1C_ENA 0x0100
#define MADERA_MICB1C_ENA_MASK 0x0100
#define MADERA_MICB1C_ENA_SHIFT 8
-#define MADERA_MICB1C_ENA_WIDTH 1
#define MADERA_MICB1B_ENA 0x0010
#define MADERA_MICB1B_ENA_MASK 0x0010
#define MADERA_MICB1B_ENA_SHIFT 4
-#define MADERA_MICB1B_ENA_WIDTH 1
#define MADERA_MICB1A_ENA 0x0001
#define MADERA_MICB1A_ENA_MASK 0x0001
#define MADERA_MICB1A_ENA_SHIFT 0
-#define MADERA_MICB1A_ENA_WIDTH 1
/* (0x021E) Mic_Bias_Ctrl_6 */
#define MADERA_MICB2D_ENA 0x1000
#define MADERA_MICB2D_ENA_MASK 0x1000
#define MADERA_MICB2D_ENA_SHIFT 12
-#define MADERA_MICB2D_ENA_WIDTH 1
#define MADERA_MICB2C_ENA 0x0100
#define MADERA_MICB2C_ENA_MASK 0x0100
#define MADERA_MICB2C_ENA_SHIFT 8
-#define MADERA_MICB2C_ENA_WIDTH 1
#define MADERA_MICB2B_ENA 0x0010
#define MADERA_MICB2B_ENA_MASK 0x0010
#define MADERA_MICB2B_ENA_SHIFT 4
-#define MADERA_MICB2B_ENA_WIDTH 1
#define MADERA_MICB2A_ENA 0x0001
#define MADERA_MICB2A_ENA_MASK 0x0001
#define MADERA_MICB2A_ENA_SHIFT 0
-#define MADERA_MICB2A_ENA_WIDTH 1
/* (0x0225) - HP Ctrl 1L */
#define MADERA_RMV_SHRT_HP1L 0x4000
#define MADERA_RMV_SHRT_HP1L_MASK 0x4000
#define MADERA_RMV_SHRT_HP1L_SHIFT 14
-#define MADERA_RMV_SHRT_HP1L_WIDTH 1
#define MADERA_HP1L_FLWR 0x0004
#define MADERA_HP1L_FLWR_MASK 0x0004
#define MADERA_HP1L_FLWR_SHIFT 2
-#define MADERA_HP1L_FLWR_WIDTH 1
#define MADERA_HP1L_SHRTI 0x0002
#define MADERA_HP1L_SHRTI_MASK 0x0002
#define MADERA_HP1L_SHRTI_SHIFT 1
-#define MADERA_HP1L_SHRTI_WIDTH 1
#define MADERA_HP1L_SHRTO 0x0001
#define MADERA_HP1L_SHRTO_MASK 0x0001
#define MADERA_HP1L_SHRTO_SHIFT 0
-#define MADERA_HP1L_SHRTO_WIDTH 1
/* (0x0226) - HP Ctrl 1R */
#define MADERA_RMV_SHRT_HP1R 0x4000
#define MADERA_RMV_SHRT_HP1R_MASK 0x4000
#define MADERA_RMV_SHRT_HP1R_SHIFT 14
-#define MADERA_RMV_SHRT_HP1R_WIDTH 1
#define MADERA_HP1R_FLWR 0x0004
#define MADERA_HP1R_FLWR_MASK 0x0004
#define MADERA_HP1R_FLWR_SHIFT 2
-#define MADERA_HP1R_FLWR_WIDTH 1
#define MADERA_HP1R_SHRTI 0x0002
#define MADERA_HP1R_SHRTI_MASK 0x0002
#define MADERA_HP1R_SHRTI_SHIFT 1
-#define MADERA_HP1R_SHRTI_WIDTH 1
#define MADERA_HP1R_SHRTO 0x0001
#define MADERA_HP1R_SHRTO_MASK 0x0001
#define MADERA_HP1R_SHRTO_SHIFT 0
-#define MADERA_HP1R_SHRTO_WIDTH 1
/* (0x0293) Accessory_Detect_Mode_1 */
#define MADERA_ACCDET_SRC 0x2000
#define MADERA_ACCDET_SRC_MASK 0x2000
#define MADERA_ACCDET_SRC_SHIFT 13
-#define MADERA_ACCDET_SRC_WIDTH 1
#define MADERA_ACCDET_POLARITY_INV_ENA 0x0080
#define MADERA_ACCDET_POLARITY_INV_ENA_MASK 0x0080
#define MADERA_ACCDET_POLARITY_INV_ENA_SHIFT 7
-#define MADERA_ACCDET_POLARITY_INV_ENA_WIDTH 1
#define MADERA_ACCDET_MODE_MASK 0x0007
#define MADERA_ACCDET_MODE_SHIFT 0
-#define MADERA_ACCDET_MODE_WIDTH 3
/* (0x0299) Headphone_Detect_0 */
#define MADERA_HPD_GND_SEL 0x0007
#define MADERA_HPD_GND_SEL_MASK 0x0007
#define MADERA_HPD_GND_SEL_SHIFT 0
-#define MADERA_HPD_GND_SEL_WIDTH 3
#define MADERA_HPD_SENSE_SEL 0x00F0
#define MADERA_HPD_SENSE_SEL_MASK 0x00F0
#define MADERA_HPD_SENSE_SEL_SHIFT 4
-#define MADERA_HPD_SENSE_SEL_WIDTH 4
#define MADERA_HPD_FRC_SEL 0x0F00
#define MADERA_HPD_FRC_SEL_MASK 0x0F00
#define MADERA_HPD_FRC_SEL_SHIFT 8
-#define MADERA_HPD_FRC_SEL_WIDTH 4
#define MADERA_HPD_OUT_SEL 0x7000
#define MADERA_HPD_OUT_SEL_MASK 0x7000
#define MADERA_HPD_OUT_SEL_SHIFT 12
-#define MADERA_HPD_OUT_SEL_WIDTH 3
#define MADERA_HPD_OVD_ENA_SEL 0x8000
#define MADERA_HPD_OVD_ENA_SEL_MASK 0x8000
#define MADERA_HPD_OVD_ENA_SEL_SHIFT 15
-#define MADERA_HPD_OVD_ENA_SEL_WIDTH 1
/* (0x029B) Headphone_Detect_1 */
#define MADERA_HP_IMPEDANCE_RANGE_MASK 0x0600
#define MADERA_HP_IMPEDANCE_RANGE_SHIFT 9
-#define MADERA_HP_IMPEDANCE_RANGE_WIDTH 2
#define MADERA_HP_STEP_SIZE 0x0100
#define MADERA_HP_STEP_SIZE_MASK 0x0100
#define MADERA_HP_STEP_SIZE_SHIFT 8
-#define MADERA_HP_STEP_SIZE_WIDTH 1
#define MADERA_HP_CLK_DIV_MASK 0x0018
#define MADERA_HP_CLK_DIV_SHIFT 3
-#define MADERA_HP_CLK_DIV_WIDTH 2
#define MADERA_HP_RATE_MASK 0x0006
#define MADERA_HP_RATE_SHIFT 1
-#define MADERA_HP_RATE_WIDTH 2
#define MADERA_HP_POLL 0x0001
#define MADERA_HP_POLL_MASK 0x0001
#define MADERA_HP_POLL_SHIFT 0
-#define MADERA_HP_POLL_WIDTH 1
/* (0x029C) Headphone_Detect_2 */
#define MADERA_HP_DONE_MASK 0x8000
#define MADERA_HP_DONE_SHIFT 15
-#define MADERA_HP_DONE_WIDTH 1
#define MADERA_HP_LVL_MASK 0x7FFF
#define MADERA_HP_LVL_SHIFT 0
-#define MADERA_HP_LVL_WIDTH 15
/* (0x029D) Headphone_Detect_3 */
#define MADERA_HP_DACVAL_MASK 0x03FF
#define MADERA_HP_DACVAL_SHIFT 0
-#define MADERA_HP_DACVAL_WIDTH 10
/* (0x029F) - Headphone Detect 5 */
#define MADERA_HP_DACVAL_DOWN_MASK 0x03FF
#define MADERA_HP_DACVAL_DOWN_SHIFT 0
-#define MADERA_HP_DACVAL_DOWN_WIDTH 10
/* (0x02A2) Mic_Detect_1_Control_0 */
#define MADERA_MICD1_GND_MASK 0x0007
#define MADERA_MICD1_GND_SHIFT 0
-#define MADERA_MICD1_GND_WIDTH 3
#define MADERA_MICD1_SENSE_MASK 0x00F0
#define MADERA_MICD1_SENSE_SHIFT 4
-#define MADERA_MICD1_SENSE_WIDTH 4
#define MADERA_MICD1_ADC_MODE_MASK 0x8000
#define MADERA_MICD1_ADC_MODE_SHIFT 15
-#define MADERA_MICD1_ADC_MODE_WIDTH 1
/* (0x02A3) Mic_Detect_1_Control_1 */
#define MADERA_MICD_BIAS_STARTTIME_MASK 0xF000
#define MADERA_MICD_BIAS_STARTTIME_SHIFT 12
-#define MADERA_MICD_BIAS_STARTTIME_WIDTH 4
#define MADERA_MICD_RATE_MASK 0x0F00
#define MADERA_MICD_RATE_SHIFT 8
-#define MADERA_MICD_RATE_WIDTH 4
#define MADERA_MICD_BIAS_SRC_MASK 0x00F0
#define MADERA_MICD_BIAS_SRC_SHIFT 4
-#define MADERA_MICD_BIAS_SRC_WIDTH 4
#define MADERA_MICD_DBTIME 0x0002
#define MADERA_MICD_DBTIME_MASK 0x0002
#define MADERA_MICD_DBTIME_SHIFT 1
-#define MADERA_MICD_DBTIME_WIDTH 1
#define MADERA_MICD_ENA 0x0001
#define MADERA_MICD_ENA_MASK 0x0001
#define MADERA_MICD_ENA_SHIFT 0
-#define MADERA_MICD_ENA_WIDTH 1
/* (0x02A4) Mic_Detect_1_Control_2 */
#define MADERA_MICD_LVL_SEL_MASK 0x00FF
#define MADERA_MICD_LVL_SEL_SHIFT 0
-#define MADERA_MICD_LVL_SEL_WIDTH 8
/* (0x02A5) Mic_Detect_1_Control_3 */
#define MADERA_MICD_LVL_0 0x0004
@@ -1859,1746 +1731,1341 @@
#define MADERA_MICD_LVL_8 0x0400
#define MADERA_MICD_LVL_MASK 0x07FC
#define MADERA_MICD_LVL_SHIFT 2
-#define MADERA_MICD_LVL_WIDTH 9
#define MADERA_MICD_VALID 0x0002
#define MADERA_MICD_VALID_MASK 0x0002
#define MADERA_MICD_VALID_SHIFT 1
-#define MADERA_MICD_VALID_WIDTH 1
#define MADERA_MICD_STS 0x0001
#define MADERA_MICD_STS_MASK 0x0001
#define MADERA_MICD_STS_SHIFT 0
-#define MADERA_MICD_STS_WIDTH 1
/* (0x02AB) Mic_Detect_1_Control_4 */
#define MADERA_MICDET_ADCVAL_DIFF_MASK 0xFF00
#define MADERA_MICDET_ADCVAL_DIFF_SHIFT 8
-#define MADERA_MICDET_ADCVAL_DIFF_WIDTH 8
#define MADERA_MICDET_ADCVAL_MASK 0x007F
#define MADERA_MICDET_ADCVAL_SHIFT 0
-#define MADERA_MICDET_ADCVAL_WIDTH 7
/* (0x02C6) Micd_Clamp_control */
#define MADERA_MICD_CLAMP_OVD 0x0010
#define MADERA_MICD_CLAMP_OVD_MASK 0x0010
#define MADERA_MICD_CLAMP_OVD_SHIFT 4
-#define MADERA_MICD_CLAMP_OVD_WIDTH 1
#define MADERA_MICD_CLAMP_MODE_MASK 0x000F
#define MADERA_MICD_CLAMP_MODE_SHIFT 0
-#define MADERA_MICD_CLAMP_MODE_WIDTH 4
/* (0x02C8) GP_Switch_1 */
#define MADERA_SW2_MODE_MASK 0x000C
#define MADERA_SW2_MODE_SHIFT 2
-#define MADERA_SW2_MODE_WIDTH 2
#define MADERA_SW1_MODE_MASK 0x0003
#define MADERA_SW1_MODE_SHIFT 0
-#define MADERA_SW1_MODE_WIDTH 2
/* (0x02D3) Jack_detect_analogue */
#define MADERA_JD2_ENA 0x0002
#define MADERA_JD2_ENA_MASK 0x0002
#define MADERA_JD2_ENA_SHIFT 1
-#define MADERA_JD2_ENA_WIDTH 1
#define MADERA_JD1_ENA 0x0001
#define MADERA_JD1_ENA_MASK 0x0001
#define MADERA_JD1_ENA_SHIFT 0
-#define MADERA_JD1_ENA_WIDTH 1
/* (0x0300) Input_Enables */
#define MADERA_IN6L_ENA 0x0800
#define MADERA_IN6L_ENA_MASK 0x0800
#define MADERA_IN6L_ENA_SHIFT 11
-#define MADERA_IN6L_ENA_WIDTH 1
#define MADERA_IN6R_ENA 0x0400
#define MADERA_IN6R_ENA_MASK 0x0400
#define MADERA_IN6R_ENA_SHIFT 10
-#define MADERA_IN6R_ENA_WIDTH 1
#define MADERA_IN5L_ENA 0x0200
#define MADERA_IN5L_ENA_MASK 0x0200
#define MADERA_IN5L_ENA_SHIFT 9
-#define MADERA_IN5L_ENA_WIDTH 1
#define MADERA_IN5R_ENA 0x0100
#define MADERA_IN5R_ENA_MASK 0x0100
#define MADERA_IN5R_ENA_SHIFT 8
-#define MADERA_IN5R_ENA_WIDTH 1
#define MADERA_IN4L_ENA 0x0080
#define MADERA_IN4L_ENA_MASK 0x0080
#define MADERA_IN4L_ENA_SHIFT 7
-#define MADERA_IN4L_ENA_WIDTH 1
#define MADERA_IN4R_ENA 0x0040
#define MADERA_IN4R_ENA_MASK 0x0040
#define MADERA_IN4R_ENA_SHIFT 6
-#define MADERA_IN4R_ENA_WIDTH 1
#define MADERA_IN3L_ENA 0x0020
#define MADERA_IN3L_ENA_MASK 0x0020
#define MADERA_IN3L_ENA_SHIFT 5
-#define MADERA_IN3L_ENA_WIDTH 1
#define MADERA_IN3R_ENA 0x0010
#define MADERA_IN3R_ENA_MASK 0x0010
#define MADERA_IN3R_ENA_SHIFT 4
-#define MADERA_IN3R_ENA_WIDTH 1
#define MADERA_IN2L_ENA 0x0008
#define MADERA_IN2L_ENA_MASK 0x0008
#define MADERA_IN2L_ENA_SHIFT 3
-#define MADERA_IN2L_ENA_WIDTH 1
#define MADERA_IN2R_ENA 0x0004
#define MADERA_IN2R_ENA_MASK 0x0004
#define MADERA_IN2R_ENA_SHIFT 2
-#define MADERA_IN2R_ENA_WIDTH 1
#define MADERA_IN1L_ENA 0x0002
#define MADERA_IN1L_ENA_MASK 0x0002
#define MADERA_IN1L_ENA_SHIFT 1
-#define MADERA_IN1L_ENA_WIDTH 1
#define MADERA_IN1R_ENA 0x0001
#define MADERA_IN1R_ENA_MASK 0x0001
#define MADERA_IN1R_ENA_SHIFT 0
-#define MADERA_IN1R_ENA_WIDTH 1
/* (0x0308) Input_Rate */
#define MADERA_IN_RATE_MASK 0xF800
#define MADERA_IN_RATE_SHIFT 11
-#define MADERA_IN_RATE_WIDTH 5
#define MADERA_IN_MODE_MASK 0x0400
#define MADERA_IN_MODE_SHIFT 10
-#define MADERA_IN_MODE_WIDTH 1
/* (0x0309) Input_Volume_Ramp */
#define MADERA_IN_VD_RAMP_MASK 0x0070
#define MADERA_IN_VD_RAMP_SHIFT 4
-#define MADERA_IN_VD_RAMP_WIDTH 3
#define MADERA_IN_VI_RAMP_MASK 0x0007
#define MADERA_IN_VI_RAMP_SHIFT 0
-#define MADERA_IN_VI_RAMP_WIDTH 3
/* (0x030C) HPF_Control */
#define MADERA_IN_HPF_CUT_MASK 0x0007
#define MADERA_IN_HPF_CUT_SHIFT 0
-#define MADERA_IN_HPF_CUT_WIDTH 3
/* (0x0310) IN1L_Control */
#define MADERA_IN1L_HPF_MASK 0x8000
#define MADERA_IN1L_HPF_SHIFT 15
-#define MADERA_IN1L_HPF_WIDTH 1
#define MADERA_IN1_DMIC_SUP_MASK 0x1800
#define MADERA_IN1_DMIC_SUP_SHIFT 11
-#define MADERA_IN1_DMIC_SUP_WIDTH 2
#define MADERA_IN1_MODE_MASK 0x0400
#define MADERA_IN1_MODE_SHIFT 10
-#define MADERA_IN1_MODE_WIDTH 1
#define MADERA_IN1L_PGA_VOL_MASK 0x00FE
#define MADERA_IN1L_PGA_VOL_SHIFT 1
-#define MADERA_IN1L_PGA_VOL_WIDTH 7
/* (0x0311) ADC_Digital_Volume_1L */
#define MADERA_IN1L_SRC_MASK 0x4000
#define MADERA_IN1L_SRC_SHIFT 14
-#define MADERA_IN1L_SRC_WIDTH 1
#define MADERA_IN1L_SRC_SE_MASK 0x2000
#define MADERA_IN1L_SRC_SE_SHIFT 13
-#define MADERA_IN1L_SRC_SE_WIDTH 1
#define MADERA_IN1L_LP_MODE 0x0800
#define MADERA_IN1L_LP_MODE_MASK 0x0800
#define MADERA_IN1L_LP_MODE_SHIFT 11
-#define MADERA_IN1L_LP_MODE_WIDTH 1
#define MADERA_IN_VU 0x0200
#define MADERA_IN_VU_MASK 0x0200
#define MADERA_IN_VU_SHIFT 9
-#define MADERA_IN_VU_WIDTH 1
#define MADERA_IN1L_MUTE 0x0100
#define MADERA_IN1L_MUTE_MASK 0x0100
#define MADERA_IN1L_MUTE_SHIFT 8
-#define MADERA_IN1L_MUTE_WIDTH 1
#define MADERA_IN1L_DIG_VOL_MASK 0x00FF
#define MADERA_IN1L_DIG_VOL_SHIFT 0
-#define MADERA_IN1L_DIG_VOL_WIDTH 8
/* (0x0312) DMIC1L_Control */
#define MADERA_IN1_OSR_MASK 0x0700
#define MADERA_IN1_OSR_SHIFT 8
-#define MADERA_IN1_OSR_WIDTH 3
/* (0x0313) IN1L_Rate_Control */
#define MADERA_IN1L_RATE_MASK 0xF800
#define MADERA_IN1L_RATE_SHIFT 11
-#define MADERA_IN1L_RATE_WIDTH 5
/* (0x0314) IN1R_Control */
#define MADERA_IN1R_HPF_MASK 0x8000
#define MADERA_IN1R_HPF_SHIFT 15
-#define MADERA_IN1R_HPF_WIDTH 1
#define MADERA_IN1R_PGA_VOL_MASK 0x00FE
#define MADERA_IN1R_PGA_VOL_SHIFT 1
-#define MADERA_IN1R_PGA_VOL_WIDTH 7
#define MADERA_IN1_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN1_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN1_DMICCLK_SRC_WIDTH 2
/* (0x0315) ADC_Digital_Volume_1R */
#define MADERA_IN1R_SRC_MASK 0x4000
#define MADERA_IN1R_SRC_SHIFT 14
-#define MADERA_IN1R_SRC_WIDTH 1
#define MADERA_IN1R_SRC_SE_MASK 0x2000
#define MADERA_IN1R_SRC_SE_SHIFT 13
-#define MADERA_IN1R_SRC_SE_WIDTH 1
#define MADERA_IN1R_LP_MODE 0x0800
#define MADERA_IN1R_LP_MODE_MASK 0x0800
#define MADERA_IN1R_LP_MODE_SHIFT 11
-#define MADERA_IN1R_LP_MODE_WIDTH 1
#define MADERA_IN1R_MUTE 0x0100
#define MADERA_IN1R_MUTE_MASK 0x0100
#define MADERA_IN1R_MUTE_SHIFT 8
-#define MADERA_IN1R_MUTE_WIDTH 1
#define MADERA_IN1R_DIG_VOL_MASK 0x00FF
#define MADERA_IN1R_DIG_VOL_SHIFT 0
-#define MADERA_IN1R_DIG_VOL_WIDTH 8
/* (0x0317) IN1R_Rate_Control */
#define MADERA_IN1R_RATE_MASK 0xF800
#define MADERA_IN1R_RATE_SHIFT 11
-#define MADERA_IN1R_RATE_WIDTH 5
/* (0x0318) IN2L_Control */
#define MADERA_IN2L_HPF_MASK 0x8000
#define MADERA_IN2L_HPF_SHIFT 15
-#define MADERA_IN2L_HPF_WIDTH 1
#define MADERA_IN2_DMIC_SUP_MASK 0x1800
#define MADERA_IN2_DMIC_SUP_SHIFT 11
-#define MADERA_IN2_DMIC_SUP_WIDTH 2
#define MADERA_IN2_MODE_MASK 0x0400
#define MADERA_IN2_MODE_SHIFT 10
-#define MADERA_IN2_MODE_WIDTH 1
#define MADERA_IN2L_PGA_VOL_MASK 0x00FE
#define MADERA_IN2L_PGA_VOL_SHIFT 1
-#define MADERA_IN2L_PGA_VOL_WIDTH 7
/* (0x0319) ADC_Digital_Volume_2L */
#define MADERA_IN2L_SRC_MASK 0x4000
#define MADERA_IN2L_SRC_SHIFT 14
-#define MADERA_IN2L_SRC_WIDTH 1
#define MADERA_IN2L_SRC_SE_MASK 0x2000
#define MADERA_IN2L_SRC_SE_SHIFT 13
-#define MADERA_IN2L_SRC_SE_WIDTH 1
#define MADERA_IN2L_LP_MODE 0x0800
#define MADERA_IN2L_LP_MODE_MASK 0x0800
#define MADERA_IN2L_LP_MODE_SHIFT 11
-#define MADERA_IN2L_LP_MODE_WIDTH 1
#define MADERA_IN2L_MUTE 0x0100
#define MADERA_IN2L_MUTE_MASK 0x0100
#define MADERA_IN2L_MUTE_SHIFT 8
-#define MADERA_IN2L_MUTE_WIDTH 1
#define MADERA_IN2L_DIG_VOL_MASK 0x00FF
#define MADERA_IN2L_DIG_VOL_SHIFT 0
-#define MADERA_IN2L_DIG_VOL_WIDTH 8
/* (0x031A) DMIC2L_Control */
#define MADERA_IN2_OSR_MASK 0x0700
#define MADERA_IN2_OSR_SHIFT 8
-#define MADERA_IN2_OSR_WIDTH 3
/* (0x031C) IN2R_Control */
#define MADERA_IN2R_HPF_MASK 0x8000
#define MADERA_IN2R_HPF_SHIFT 15
-#define MADERA_IN2R_HPF_WIDTH 1
#define MADERA_IN2R_PGA_VOL_MASK 0x00FE
#define MADERA_IN2R_PGA_VOL_SHIFT 1
-#define MADERA_IN2R_PGA_VOL_WIDTH 7
#define MADERA_IN2_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN2_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN2_DMICCLK_SRC_WIDTH 2
/* (0x031D) ADC_Digital_Volume_2R */
#define MADERA_IN2R_SRC_MASK 0x4000
#define MADERA_IN2R_SRC_SHIFT 14
-#define MADERA_IN2R_SRC_WIDTH 1
#define MADERA_IN2R_SRC_SE_MASK 0x2000
#define MADERA_IN2R_SRC_SE_SHIFT 13
-#define MADERA_IN2R_SRC_SE_WIDTH 1
#define MADERA_IN2R_LP_MODE 0x0800
#define MADERA_IN2R_LP_MODE_MASK 0x0800
#define MADERA_IN2R_LP_MODE_SHIFT 11
-#define MADERA_IN2R_LP_MODE_WIDTH 1
#define MADERA_IN2R_MUTE 0x0100
#define MADERA_IN2R_MUTE_MASK 0x0100
#define MADERA_IN2R_MUTE_SHIFT 8
-#define MADERA_IN2R_MUTE_WIDTH 1
#define MADERA_IN2R_DIG_VOL_MASK 0x00FF
#define MADERA_IN2R_DIG_VOL_SHIFT 0
-#define MADERA_IN2R_DIG_VOL_WIDTH 8
/* (0x0320) IN3L_Control */
#define MADERA_IN3L_HPF_MASK 0x8000
#define MADERA_IN3L_HPF_SHIFT 15
-#define MADERA_IN3L_HPF_WIDTH 1
#define MADERA_IN3_DMIC_SUP_MASK 0x1800
#define MADERA_IN3_DMIC_SUP_SHIFT 11
-#define MADERA_IN3_DMIC_SUP_WIDTH 2
#define MADERA_IN3_MODE_MASK 0x0400
#define MADERA_IN3_MODE_SHIFT 10
-#define MADERA_IN3_MODE_WIDTH 1
#define MADERA_IN3L_PGA_VOL_MASK 0x00FE
#define MADERA_IN3L_PGA_VOL_SHIFT 1
-#define MADERA_IN3L_PGA_VOL_WIDTH 7
/* (0x0321) ADC_Digital_Volume_3L */
#define MADERA_IN3L_MUTE 0x0100
#define MADERA_IN3L_MUTE_MASK 0x0100
#define MADERA_IN3L_MUTE_SHIFT 8
-#define MADERA_IN3L_MUTE_WIDTH 1
#define MADERA_IN3L_DIG_VOL_MASK 0x00FF
#define MADERA_IN3L_DIG_VOL_SHIFT 0
-#define MADERA_IN3L_DIG_VOL_WIDTH 8
/* (0x0322) DMIC3L_Control */
#define MADERA_IN3_OSR_MASK 0x0700
#define MADERA_IN3_OSR_SHIFT 8
-#define MADERA_IN3_OSR_WIDTH 3
/* (0x0324) IN3R_Control */
#define MADERA_IN3R_HPF_MASK 0x8000
#define MADERA_IN3R_HPF_SHIFT 15
-#define MADERA_IN3R_HPF_WIDTH 1
#define MADERA_IN3R_PGA_VOL_MASK 0x00FE
#define MADERA_IN3R_PGA_VOL_SHIFT 1
-#define MADERA_IN3R_PGA_VOL_WIDTH 7
#define MADERA_IN3_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN3_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN3_DMICCLK_SRC_WIDTH 2
/* (0x0325) ADC_Digital_Volume_3R */
#define MADERA_IN3R_MUTE 0x0100
#define MADERA_IN3R_MUTE_MASK 0x0100
#define MADERA_IN3R_MUTE_SHIFT 8
-#define MADERA_IN3R_MUTE_WIDTH 1
#define MADERA_IN3R_DIG_VOL_MASK 0x00FF
#define MADERA_IN3R_DIG_VOL_SHIFT 0
-#define MADERA_IN3R_DIG_VOL_WIDTH 8
/* (0x0328) IN4L_Control */
#define MADERA_IN4L_HPF_MASK 0x8000
#define MADERA_IN4L_HPF_SHIFT 15
-#define MADERA_IN4L_HPF_WIDTH 1
#define MADERA_IN4_DMIC_SUP_MASK 0x1800
#define MADERA_IN4_DMIC_SUP_SHIFT 11
-#define MADERA_IN4_DMIC_SUP_WIDTH 2
/* (0x0329) ADC_Digital_Volume_4L */
#define MADERA_IN4L_MUTE 0x0100
#define MADERA_IN4L_MUTE_MASK 0x0100
#define MADERA_IN4L_MUTE_SHIFT 8
-#define MADERA_IN4L_MUTE_WIDTH 1
#define MADERA_IN4L_DIG_VOL_MASK 0x00FF
#define MADERA_IN4L_DIG_VOL_SHIFT 0
-#define MADERA_IN4L_DIG_VOL_WIDTH 8
/* (0x032A) DMIC4L_Control */
#define MADERA_IN4_OSR_MASK 0x0700
#define MADERA_IN4_OSR_SHIFT 8
-#define MADERA_IN4_OSR_WIDTH 3
/* (0x032C) IN4R_Control */
#define MADERA_IN4R_HPF_MASK 0x8000
#define MADERA_IN4R_HPF_SHIFT 15
-#define MADERA_IN4R_HPF_WIDTH 1
#define MADERA_IN4_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN4_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN4_DMICCLK_SRC_WIDTH 2
/* (0x032D) ADC_Digital_Volume_4R */
#define MADERA_IN4R_MUTE 0x0100
#define MADERA_IN4R_MUTE_MASK 0x0100
#define MADERA_IN4R_MUTE_SHIFT 8
-#define MADERA_IN4R_MUTE_WIDTH 1
#define MADERA_IN4R_DIG_VOL_MASK 0x00FF
#define MADERA_IN4R_DIG_VOL_SHIFT 0
-#define MADERA_IN4R_DIG_VOL_WIDTH 8
/* (0x0330) IN5L_Control */
#define MADERA_IN5L_HPF_MASK 0x8000
#define MADERA_IN5L_HPF_SHIFT 15
-#define MADERA_IN5L_HPF_WIDTH 1
#define MADERA_IN5_DMIC_SUP_MASK 0x1800
#define MADERA_IN5_DMIC_SUP_SHIFT 11
-#define MADERA_IN5_DMIC_SUP_WIDTH 2
/* (0x0331) ADC_Digital_Volume_5L */
#define MADERA_IN5L_MUTE 0x0100
#define MADERA_IN5L_MUTE_MASK 0x0100
#define MADERA_IN5L_MUTE_SHIFT 8
-#define MADERA_IN5L_MUTE_WIDTH 1
#define MADERA_IN5L_DIG_VOL_MASK 0x00FF
#define MADERA_IN5L_DIG_VOL_SHIFT 0
-#define MADERA_IN5L_DIG_VOL_WIDTH 8
/* (0x0332) DMIC5L_Control */
#define MADERA_IN5_OSR_MASK 0x0700
#define MADERA_IN5_OSR_SHIFT 8
-#define MADERA_IN5_OSR_WIDTH 3
/* (0x0334) IN5R_Control */
#define MADERA_IN5R_HPF_MASK 0x8000
#define MADERA_IN5R_HPF_SHIFT 15
-#define MADERA_IN5R_HPF_WIDTH 1
#define MADERA_IN5_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN5_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN5_DMICCLK_SRC_WIDTH 2
/* (0x0335) ADC_Digital_Volume_5R */
#define MADERA_IN5R_MUTE 0x0100
#define MADERA_IN5R_MUTE_MASK 0x0100
#define MADERA_IN5R_MUTE_SHIFT 8
-#define MADERA_IN5R_MUTE_WIDTH 1
#define MADERA_IN5R_DIG_VOL_MASK 0x00FF
#define MADERA_IN5R_DIG_VOL_SHIFT 0
-#define MADERA_IN5R_DIG_VOL_WIDTH 8
/* (0x0338) IN6L_Control */
#define MADERA_IN6L_HPF_MASK 0x8000
#define MADERA_IN6L_HPF_SHIFT 15
-#define MADERA_IN6L_HPF_WIDTH 1
#define MADERA_IN6_DMIC_SUP_MASK 0x1800
#define MADERA_IN6_DMIC_SUP_SHIFT 11
-#define MADERA_IN6_DMIC_SUP_WIDTH 2
/* (0x0339) ADC_Digital_Volume_6L */
#define MADERA_IN6L_MUTE 0x0100
#define MADERA_IN6L_MUTE_MASK 0x0100
#define MADERA_IN6L_MUTE_SHIFT 8
-#define MADERA_IN6L_MUTE_WIDTH 1
#define MADERA_IN6L_DIG_VOL_MASK 0x00FF
#define MADERA_IN6L_DIG_VOL_SHIFT 0
-#define MADERA_IN6L_DIG_VOL_WIDTH 8
/* (0x033A) DMIC6L_Control */
#define MADERA_IN6_OSR_MASK 0x0700
#define MADERA_IN6_OSR_SHIFT 8
-#define MADERA_IN6_OSR_WIDTH 3
/* (0x033C) IN6R_Control */
#define MADERA_IN6R_HPF_MASK 0x8000
#define MADERA_IN6R_HPF_SHIFT 15
-#define MADERA_IN6R_HPF_WIDTH 1
/* (0x033D) ADC_Digital_Volume_6R */
#define MADERA_IN6R_MUTE 0x0100
#define MADERA_IN6R_MUTE_MASK 0x0100
#define MADERA_IN6R_MUTE_SHIFT 8
-#define MADERA_IN6R_MUTE_WIDTH 1
#define MADERA_IN6R_DIG_VOL_MASK 0x00FF
#define MADERA_IN6R_DIG_VOL_SHIFT 0
-#define MADERA_IN6R_DIG_VOL_WIDTH 8
/* (0x033E) DMIC6R_Control */
#define MADERA_IN6_DMICCLK_SRC_MASK 0x1800
#define MADERA_IN6_DMICCLK_SRC_SHIFT 11
-#define MADERA_IN6_DMICCLK_SRC_WIDTH 2
/* (0x0400) Output_Enables_1 */
#define MADERA_EP_SEL 0x8000
#define MADERA_EP_SEL_MASK 0x8000
#define MADERA_EP_SEL_SHIFT 15
-#define MADERA_EP_SEL_WIDTH 1
#define MADERA_OUT6L_ENA 0x0800
#define MADERA_OUT6L_ENA_MASK 0x0800
#define MADERA_OUT6L_ENA_SHIFT 11
-#define MADERA_OUT6L_ENA_WIDTH 1
#define MADERA_OUT6R_ENA 0x0400
#define MADERA_OUT6R_ENA_MASK 0x0400
#define MADERA_OUT6R_ENA_SHIFT 10
-#define MADERA_OUT6R_ENA_WIDTH 1
#define MADERA_OUT5L_ENA 0x0200
#define MADERA_OUT5L_ENA_MASK 0x0200
#define MADERA_OUT5L_ENA_SHIFT 9
-#define MADERA_OUT5L_ENA_WIDTH 1
#define MADERA_OUT5R_ENA 0x0100
#define MADERA_OUT5R_ENA_MASK 0x0100
#define MADERA_OUT5R_ENA_SHIFT 8
-#define MADERA_OUT5R_ENA_WIDTH 1
#define MADERA_OUT4L_ENA 0x0080
#define MADERA_OUT4L_ENA_MASK 0x0080
#define MADERA_OUT4L_ENA_SHIFT 7
-#define MADERA_OUT4L_ENA_WIDTH 1
#define MADERA_OUT4R_ENA 0x0040
#define MADERA_OUT4R_ENA_MASK 0x0040
#define MADERA_OUT4R_ENA_SHIFT 6
-#define MADERA_OUT4R_ENA_WIDTH 1
#define MADERA_OUT3L_ENA 0x0020
#define MADERA_OUT3L_ENA_MASK 0x0020
#define MADERA_OUT3L_ENA_SHIFT 5
-#define MADERA_OUT3L_ENA_WIDTH 1
#define MADERA_OUT3R_ENA 0x0010
#define MADERA_OUT3R_ENA_MASK 0x0010
#define MADERA_OUT3R_ENA_SHIFT 4
-#define MADERA_OUT3R_ENA_WIDTH 1
#define MADERA_OUT2L_ENA 0x0008
#define MADERA_OUT2L_ENA_MASK 0x0008
#define MADERA_OUT2L_ENA_SHIFT 3
-#define MADERA_OUT2L_ENA_WIDTH 1
#define MADERA_OUT2R_ENA 0x0004
#define MADERA_OUT2R_ENA_MASK 0x0004
#define MADERA_OUT2R_ENA_SHIFT 2
-#define MADERA_OUT2R_ENA_WIDTH 1
#define MADERA_OUT1L_ENA 0x0002
#define MADERA_OUT1L_ENA_MASK 0x0002
#define MADERA_OUT1L_ENA_SHIFT 1
-#define MADERA_OUT1L_ENA_WIDTH 1
#define MADERA_OUT1R_ENA 0x0001
#define MADERA_OUT1R_ENA_MASK 0x0001
#define MADERA_OUT1R_ENA_SHIFT 0
-#define MADERA_OUT1R_ENA_WIDTH 1
/* (0x0408) Output_Rate_1 */
#define MADERA_CP_DAC_MODE_MASK 0x0040
#define MADERA_CP_DAC_MODE_SHIFT 6
-#define MADERA_CP_DAC_MODE_WIDTH 1
#define MADERA_OUT_EXT_CLK_DIV_MASK 0x0030
#define MADERA_OUT_EXT_CLK_DIV_SHIFT 4
-#define MADERA_OUT_EXT_CLK_DIV_WIDTH 2
#define MADERA_OUT_CLK_SRC_MASK 0x0007
#define MADERA_OUT_CLK_SRC_SHIFT 0
-#define MADERA_OUT_CLK_SRC_WIDTH 3
/* (0x0409) Output_Volume_Ramp */
#define MADERA_OUT_VD_RAMP_MASK 0x0070
#define MADERA_OUT_VD_RAMP_SHIFT 4
-#define MADERA_OUT_VD_RAMP_WIDTH 3
#define MADERA_OUT_VI_RAMP_MASK 0x0007
#define MADERA_OUT_VI_RAMP_SHIFT 0
-#define MADERA_OUT_VI_RAMP_WIDTH 3
/* (0x0410) Output_Path_Config_1L */
#define MADERA_OUT1_MONO 0x1000
#define MADERA_OUT1_MONO_MASK 0x1000
#define MADERA_OUT1_MONO_SHIFT 12
-#define MADERA_OUT1_MONO_WIDTH 1
#define MADERA_OUT1L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT1L_ANC_SRC_SHIFT 10
-#define MADERA_OUT1L_ANC_SRC_WIDTH 2
/* (0x0411) DAC_Digital_Volume_1L */
#define MADERA_OUT1L_VU 0x0200
#define MADERA_OUT1L_VU_MASK 0x0200
#define MADERA_OUT1L_VU_SHIFT 9
-#define MADERA_OUT1L_VU_WIDTH 1
#define MADERA_OUT1L_MUTE 0x0100
#define MADERA_OUT1L_MUTE_MASK 0x0100
#define MADERA_OUT1L_MUTE_SHIFT 8
-#define MADERA_OUT1L_MUTE_WIDTH 1
#define MADERA_OUT1L_VOL_MASK 0x00FF
#define MADERA_OUT1L_VOL_SHIFT 0
-#define MADERA_OUT1L_VOL_WIDTH 8
/* (0x0412) Output_Path_Config_1 */
#define MADERA_HP1_GND_SEL_MASK 0x0007
#define MADERA_HP1_GND_SEL_SHIFT 0
-#define MADERA_HP1_GND_SEL_WIDTH 3
/* (0x0414) Output_Path_Config_1R */
#define MADERA_OUT1R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT1R_ANC_SRC_SHIFT 10
-#define MADERA_OUT1R_ANC_SRC_WIDTH 2
/* (0x0415) DAC_Digital_Volume_1R */
#define MADERA_OUT1R_MUTE 0x0100
#define MADERA_OUT1R_MUTE_MASK 0x0100
#define MADERA_OUT1R_MUTE_SHIFT 8
-#define MADERA_OUT1R_MUTE_WIDTH 1
#define MADERA_OUT1R_VOL_MASK 0x00FF
#define MADERA_OUT1R_VOL_SHIFT 0
-#define MADERA_OUT1R_VOL_WIDTH 8
/* (0x0418) Output_Path_Config_2L */
#define MADERA_OUT2L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT2L_ANC_SRC_SHIFT 10
-#define MADERA_OUT2L_ANC_SRC_WIDTH 2
/* (0x0419) DAC_Digital_Volume_2L */
#define MADERA_OUT2L_MUTE 0x0100
#define MADERA_OUT2L_MUTE_MASK 0x0100
#define MADERA_OUT2L_MUTE_SHIFT 8
-#define MADERA_OUT2L_MUTE_WIDTH 1
#define MADERA_OUT2L_VOL_MASK 0x00FF
#define MADERA_OUT2L_VOL_SHIFT 0
-#define MADERA_OUT2L_VOL_WIDTH 8
/* (0x041A) Output_Path_Config_2 */
#define MADERA_HP2_GND_SEL_MASK 0x0007
#define MADERA_HP2_GND_SEL_SHIFT 0
-#define MADERA_HP2_GND_SEL_WIDTH 3
/* (0x041C) Output_Path_Config_2R */
#define MADERA_OUT2R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT2R_ANC_SRC_SHIFT 10
-#define MADERA_OUT2R_ANC_SRC_WIDTH 2
/* (0x041D) DAC_Digital_Volume_2R */
#define MADERA_OUT2R_MUTE 0x0100
#define MADERA_OUT2R_MUTE_MASK 0x0100
#define MADERA_OUT2R_MUTE_SHIFT 8
-#define MADERA_OUT2R_MUTE_WIDTH 1
#define MADERA_OUT2R_VOL_MASK 0x00FF
#define MADERA_OUT2R_VOL_SHIFT 0
-#define MADERA_OUT2R_VOL_WIDTH 8
/* (0x0420) Output_Path_Config_3L */
#define MADERA_OUT3L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT3L_ANC_SRC_SHIFT 10
-#define MADERA_OUT3L_ANC_SRC_WIDTH 2
/* (0x0421) DAC_Digital_Volume_3L */
#define MADERA_OUT3L_MUTE 0x0100
#define MADERA_OUT3L_MUTE_MASK 0x0100
#define MADERA_OUT3L_MUTE_SHIFT 8
-#define MADERA_OUT3L_MUTE_WIDTH 1
#define MADERA_OUT3L_VOL_MASK 0x00FF
#define MADERA_OUT3L_VOL_SHIFT 0
-#define MADERA_OUT3L_VOL_WIDTH 8
/* (0x0424) Output_Path_Config_3R */
#define MADERA_OUT3R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT3R_ANC_SRC_SHIFT 10
-#define MADERA_OUT3R_ANC_SRC_WIDTH 2
/* (0x0425) DAC_Digital_Volume_3R */
#define MADERA_OUT3R_MUTE 0x0100
#define MADERA_OUT3R_MUTE_MASK 0x0100
#define MADERA_OUT3R_MUTE_SHIFT 8
-#define MADERA_OUT3R_MUTE_WIDTH 1
#define MADERA_OUT3R_VOL_MASK 0x00FF
#define MADERA_OUT3R_VOL_SHIFT 0
-#define MADERA_OUT3R_VOL_WIDTH 8
/* (0x0428) Output_Path_Config_4L */
#define MADERA_OUT4L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT4L_ANC_SRC_SHIFT 10
-#define MADERA_OUT4L_ANC_SRC_WIDTH 2
/* (0x0429) DAC_Digital_Volume_4L */
#define MADERA_OUT4L_MUTE 0x0100
#define MADERA_OUT4L_MUTE_MASK 0x0100
#define MADERA_OUT4L_MUTE_SHIFT 8
-#define MADERA_OUT4L_MUTE_WIDTH 1
#define MADERA_OUT4L_VOL_MASK 0x00FF
#define MADERA_OUT4L_VOL_SHIFT 0
-#define MADERA_OUT4L_VOL_WIDTH 8
/* (0x042C) Output_Path_Config_4R */
#define MADERA_OUT4R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT4R_ANC_SRC_SHIFT 10
-#define MADERA_OUT4R_ANC_SRC_WIDTH 2
/* (0x042D) DAC_Digital_Volume_4R */
#define MADERA_OUT4R_MUTE 0x0100
#define MADERA_OUT4R_MUTE_MASK 0x0100
#define MADERA_OUT4R_MUTE_SHIFT 8
-#define MADERA_OUT4R_MUTE_WIDTH 1
#define MADERA_OUT4R_VOL_MASK 0x00FF
#define MADERA_OUT4R_VOL_SHIFT 0
-#define MADERA_OUT4R_VOL_WIDTH 8
/* (0x0430) Output_Path_Config_5L */
#define MADERA_OUT5_OSR 0x2000
#define MADERA_OUT5_OSR_MASK 0x2000
#define MADERA_OUT5_OSR_SHIFT 13
-#define MADERA_OUT5_OSR_WIDTH 1
#define MADERA_OUT5L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT5L_ANC_SRC_SHIFT 10
-#define MADERA_OUT5L_ANC_SRC_WIDTH 2
/* (0x0431) DAC_Digital_Volume_5L */
#define MADERA_OUT5L_MUTE 0x0100
#define MADERA_OUT5L_MUTE_MASK 0x0100
#define MADERA_OUT5L_MUTE_SHIFT 8
-#define MADERA_OUT5L_MUTE_WIDTH 1
#define MADERA_OUT5L_VOL_MASK 0x00FF
#define MADERA_OUT5L_VOL_SHIFT 0
-#define MADERA_OUT5L_VOL_WIDTH 8
/* (0x0434) Output_Path_Config_5R */
#define MADERA_OUT5R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT5R_ANC_SRC_SHIFT 10
-#define MADERA_OUT5R_ANC_SRC_WIDTH 2
/* (0x0435) DAC_Digital_Volume_5R */
#define MADERA_OUT5R_MUTE 0x0100
#define MADERA_OUT5R_MUTE_MASK 0x0100
#define MADERA_OUT5R_MUTE_SHIFT 8
-#define MADERA_OUT5R_MUTE_WIDTH 1
#define MADERA_OUT5R_VOL_MASK 0x00FF
#define MADERA_OUT5R_VOL_SHIFT 0
-#define MADERA_OUT5R_VOL_WIDTH 8
/* (0x0438) Output_Path_Config_6L */
#define MADERA_OUT6_OSR 0x2000
#define MADERA_OUT6_OSR_MASK 0x2000
#define MADERA_OUT6_OSR_SHIFT 13
-#define MADERA_OUT6_OSR_WIDTH 1
#define MADERA_OUT6L_ANC_SRC_MASK 0x0C00
#define MADERA_OUT6L_ANC_SRC_SHIFT 10
-#define MADERA_OUT6L_ANC_SRC_WIDTH 2
/* (0x0439) DAC_Digital_Volume_6L */
#define MADERA_OUT6L_MUTE 0x0100
#define MADERA_OUT6L_MUTE_MASK 0x0100
#define MADERA_OUT6L_MUTE_SHIFT 8
-#define MADERA_OUT6L_MUTE_WIDTH 1
#define MADERA_OUT6L_VOL_MASK 0x00FF
#define MADERA_OUT6L_VOL_SHIFT 0
-#define MADERA_OUT6L_VOL_WIDTH 8
/* (0x043C) Output_Path_Config_6R */
#define MADERA_OUT6R_ANC_SRC_MASK 0x0C00
#define MADERA_OUT6R_ANC_SRC_SHIFT 10
-#define MADERA_OUT6R_ANC_SRC_WIDTH 2
/* (0x043D) DAC_Digital_Volume_6R */
#define MADERA_OUT6R_MUTE 0x0100
#define MADERA_OUT6R_MUTE_MASK 0x0100
#define MADERA_OUT6R_MUTE_SHIFT 8
-#define MADERA_OUT6R_MUTE_WIDTH 1
#define MADERA_OUT6R_VOL_MASK 0x00FF
#define MADERA_OUT6R_VOL_SHIFT 0
-#define MADERA_OUT6R_VOL_WIDTH 8
/* (0x0450) - DAC AEC Control 1 */
#define MADERA_AEC1_LOOPBACK_SRC_MASK 0x003C
#define MADERA_AEC1_LOOPBACK_SRC_SHIFT 2
-#define MADERA_AEC1_LOOPBACK_SRC_WIDTH 4
#define MADERA_AEC1_ENA_STS 0x0002
#define MADERA_AEC1_ENA_STS_MASK 0x0002
#define MADERA_AEC1_ENA_STS_SHIFT 1
-#define MADERA_AEC1_ENA_STS_WIDTH 1
#define MADERA_AEC1_LOOPBACK_ENA 0x0001
#define MADERA_AEC1_LOOPBACK_ENA_MASK 0x0001
#define MADERA_AEC1_LOOPBACK_ENA_SHIFT 0
-#define MADERA_AEC1_LOOPBACK_ENA_WIDTH 1
/* (0x0451) DAC_AEC_Control_2 */
#define MADERA_AEC2_LOOPBACK_SRC_MASK 0x003C
#define MADERA_AEC2_LOOPBACK_SRC_SHIFT 2
-#define MADERA_AEC2_LOOPBACK_SRC_WIDTH 4
#define MADERA_AEC2_ENA_STS 0x0002
#define MADERA_AEC2_ENA_STS_MASK 0x0002
#define MADERA_AEC2_ENA_STS_SHIFT 1
-#define MADERA_AEC2_ENA_STS_WIDTH 1
#define MADERA_AEC2_LOOPBACK_ENA 0x0001
#define MADERA_AEC2_LOOPBACK_ENA_MASK 0x0001
#define MADERA_AEC2_LOOPBACK_ENA_SHIFT 0
-#define MADERA_AEC2_LOOPBACK_ENA_WIDTH 1
/* (0x0458) Noise_Gate_Control */
#define MADERA_NGATE_HOLD_MASK 0x0030
#define MADERA_NGATE_HOLD_SHIFT 4
-#define MADERA_NGATE_HOLD_WIDTH 2
#define MADERA_NGATE_THR_MASK 0x000E
#define MADERA_NGATE_THR_SHIFT 1
-#define MADERA_NGATE_THR_WIDTH 3
#define MADERA_NGATE_ENA 0x0001
#define MADERA_NGATE_ENA_MASK 0x0001
#define MADERA_NGATE_ENA_SHIFT 0
-#define MADERA_NGATE_ENA_WIDTH 1
/* (0x0490) PDM_SPK1_CTRL_1 */
#define MADERA_SPK1R_MUTE 0x2000
#define MADERA_SPK1R_MUTE_MASK 0x2000
#define MADERA_SPK1R_MUTE_SHIFT 13
-#define MADERA_SPK1R_MUTE_WIDTH 1
#define MADERA_SPK1L_MUTE 0x1000
#define MADERA_SPK1L_MUTE_MASK 0x1000
#define MADERA_SPK1L_MUTE_SHIFT 12
-#define MADERA_SPK1L_MUTE_WIDTH 1
#define MADERA_SPK1_MUTE_ENDIAN 0x0100
#define MADERA_SPK1_MUTE_ENDIAN_MASK 0x0100
#define MADERA_SPK1_MUTE_ENDIAN_SHIFT 8
-#define MADERA_SPK1_MUTE_ENDIAN_WIDTH 1
#define MADERA_SPK1_MUTE_SEQ1_MASK 0x00FF
#define MADERA_SPK1_MUTE_SEQ1_SHIFT 0
-#define MADERA_SPK1_MUTE_SEQ1_WIDTH 8
/* (0x0491) PDM_SPK1_CTRL_2 */
#define MADERA_SPK1_FMT 0x0001
#define MADERA_SPK1_FMT_MASK 0x0001
#define MADERA_SPK1_FMT_SHIFT 0
-#define MADERA_SPK1_FMT_WIDTH 1
/* (0x0492) PDM_SPK2_CTRL_1 */
#define MADERA_SPK2R_MUTE 0x2000
#define MADERA_SPK2R_MUTE_MASK 0x2000
#define MADERA_SPK2R_MUTE_SHIFT 13
-#define MADERA_SPK2R_MUTE_WIDTH 1
#define MADERA_SPK2L_MUTE 0x1000
#define MADERA_SPK2L_MUTE_MASK 0x1000
#define MADERA_SPK2L_MUTE_SHIFT 12
-#define MADERA_SPK2L_MUTE_WIDTH 1
/* (0x04A0) - HP1 Short Circuit Ctrl */
#define MADERA_HP1_SC_ENA 0x1000
#define MADERA_HP1_SC_ENA_MASK 0x1000
#define MADERA_HP1_SC_ENA_SHIFT 12
-#define MADERA_HP1_SC_ENA_WIDTH 1
/* (0x04A1) - HP2 Short Circuit Ctrl */
#define MADERA_HP2_SC_ENA 0x1000
#define MADERA_HP2_SC_ENA_MASK 0x1000
#define MADERA_HP2_SC_ENA_SHIFT 12
-#define MADERA_HP2_SC_ENA_WIDTH 1
/* (0x04A2) - HP3 Short Circuit Ctrl */
#define MADERA_HP3_SC_ENA 0x1000
#define MADERA_HP3_SC_ENA_MASK 0x1000
#define MADERA_HP3_SC_ENA_SHIFT 12
-#define MADERA_HP3_SC_ENA_WIDTH 1
/* (0x04A8) - HP_Test_Ctrl_5 */
#define MADERA_HP1L_ONEFLT 0x0100
#define MADERA_HP1L_ONEFLT_MASK 0x0100
#define MADERA_HP1L_ONEFLT_SHIFT 8
-#define MADERA_HP1L_ONEFLT_WIDTH 1
/* (0x04A9) - HP_Test_Ctrl_6 */
#define MADERA_HP1R_ONEFLT 0x0100
#define MADERA_HP1R_ONEFLT_MASK 0x0100
#define MADERA_HP1R_ONEFLT_SHIFT 8
-#define MADERA_HP1R_ONEFLT_WIDTH 1
/* (0x0500) AIF1_BCLK_Ctrl */
#define MADERA_AIF1_BCLK_INV 0x0080
#define MADERA_AIF1_BCLK_INV_MASK 0x0080
#define MADERA_AIF1_BCLK_INV_SHIFT 7
-#define MADERA_AIF1_BCLK_INV_WIDTH 1
#define MADERA_AIF1_BCLK_MSTR 0x0020
#define MADERA_AIF1_BCLK_MSTR_MASK 0x0020
#define MADERA_AIF1_BCLK_MSTR_SHIFT 5
-#define MADERA_AIF1_BCLK_MSTR_WIDTH 1
#define MADERA_AIF1_BCLK_FREQ_MASK 0x001F
#define MADERA_AIF1_BCLK_FREQ_SHIFT 0
-#define MADERA_AIF1_BCLK_FREQ_WIDTH 5
/* (0x0501) AIF1_Tx_Pin_Ctrl */
#define MADERA_AIF1TX_LRCLK_SRC 0x0008
#define MADERA_AIF1TX_LRCLK_SRC_MASK 0x0008
#define MADERA_AIF1TX_LRCLK_SRC_SHIFT 3
-#define MADERA_AIF1TX_LRCLK_SRC_WIDTH 1
#define MADERA_AIF1TX_LRCLK_INV 0x0004
#define MADERA_AIF1TX_LRCLK_INV_MASK 0x0004
#define MADERA_AIF1TX_LRCLK_INV_SHIFT 2
-#define MADERA_AIF1TX_LRCLK_INV_WIDTH 1
#define MADERA_AIF1TX_LRCLK_MSTR 0x0001
#define MADERA_AIF1TX_LRCLK_MSTR_MASK 0x0001
#define MADERA_AIF1TX_LRCLK_MSTR_SHIFT 0
-#define MADERA_AIF1TX_LRCLK_MSTR_WIDTH 1
/* (0x0502) AIF1_Rx_Pin_Ctrl */
#define MADERA_AIF1RX_LRCLK_INV 0x0004
#define MADERA_AIF1RX_LRCLK_INV_MASK 0x0004
#define MADERA_AIF1RX_LRCLK_INV_SHIFT 2
-#define MADERA_AIF1RX_LRCLK_INV_WIDTH 1
#define MADERA_AIF1RX_LRCLK_FRC 0x0002
#define MADERA_AIF1RX_LRCLK_FRC_MASK 0x0002
#define MADERA_AIF1RX_LRCLK_FRC_SHIFT 1
-#define MADERA_AIF1RX_LRCLK_FRC_WIDTH 1
#define MADERA_AIF1RX_LRCLK_MSTR 0x0001
#define MADERA_AIF1RX_LRCLK_MSTR_MASK 0x0001
#define MADERA_AIF1RX_LRCLK_MSTR_SHIFT 0
-#define MADERA_AIF1RX_LRCLK_MSTR_WIDTH 1
/* (0x0503) AIF1_Rate_Ctrl */
#define MADERA_AIF1_RATE_MASK 0xF800
#define MADERA_AIF1_RATE_SHIFT 11
-#define MADERA_AIF1_RATE_WIDTH 5
#define MADERA_AIF1_TRI 0x0040
#define MADERA_AIF1_TRI_MASK 0x0040
#define MADERA_AIF1_TRI_SHIFT 6
-#define MADERA_AIF1_TRI_WIDTH 1
/* (0x0504) AIF1_Format */
#define MADERA_AIF1_FMT_MASK 0x0007
#define MADERA_AIF1_FMT_SHIFT 0
-#define MADERA_AIF1_FMT_WIDTH 3
/* (0x0506) AIF1_Rx_BCLK_Rate */
#define MADERA_AIF1RX_BCPF_MASK 0x1FFF
#define MADERA_AIF1RX_BCPF_SHIFT 0
-#define MADERA_AIF1RX_BCPF_WIDTH 13
/* (0x0507) AIF1_Frame_Ctrl_1 */
#define MADERA_AIF1TX_WL_MASK 0x3F00
#define MADERA_AIF1TX_WL_SHIFT 8
-#define MADERA_AIF1TX_WL_WIDTH 6
#define MADERA_AIF1TX_SLOT_LEN_MASK 0x00FF
#define MADERA_AIF1TX_SLOT_LEN_SHIFT 0
-#define MADERA_AIF1TX_SLOT_LEN_WIDTH 8
/* (0x0508) AIF1_Frame_Ctrl_2 */
#define MADERA_AIF1RX_WL_MASK 0x3F00
#define MADERA_AIF1RX_WL_SHIFT 8
-#define MADERA_AIF1RX_WL_WIDTH 6
#define MADERA_AIF1RX_SLOT_LEN_MASK 0x00FF
#define MADERA_AIF1RX_SLOT_LEN_SHIFT 0
-#define MADERA_AIF1RX_SLOT_LEN_WIDTH 8
/* (0x0509) AIF1_Frame_Ctrl_3 */
#define MADERA_AIF1TX1_SLOT_MASK 0x003F
#define MADERA_AIF1TX1_SLOT_SHIFT 0
-#define MADERA_AIF1TX1_SLOT_WIDTH 6
/* (0x0519) AIF1_Tx_Enables */
#define MADERA_AIF1TX8_ENA 0x0080
#define MADERA_AIF1TX8_ENA_MASK 0x0080
#define MADERA_AIF1TX8_ENA_SHIFT 7
-#define MADERA_AIF1TX8_ENA_WIDTH 1
#define MADERA_AIF1TX7_ENA 0x0040
#define MADERA_AIF1TX7_ENA_MASK 0x0040
#define MADERA_AIF1TX7_ENA_SHIFT 6
-#define MADERA_AIF1TX7_ENA_WIDTH 1
#define MADERA_AIF1TX6_ENA 0x0020
#define MADERA_AIF1TX6_ENA_MASK 0x0020
#define MADERA_AIF1TX6_ENA_SHIFT 5
-#define MADERA_AIF1TX6_ENA_WIDTH 1
#define MADERA_AIF1TX5_ENA 0x0010
#define MADERA_AIF1TX5_ENA_MASK 0x0010
#define MADERA_AIF1TX5_ENA_SHIFT 4
-#define MADERA_AIF1TX5_ENA_WIDTH 1
#define MADERA_AIF1TX4_ENA 0x0008
#define MADERA_AIF1TX4_ENA_MASK 0x0008
#define MADERA_AIF1TX4_ENA_SHIFT 3
-#define MADERA_AIF1TX4_ENA_WIDTH 1
#define MADERA_AIF1TX3_ENA 0x0004
#define MADERA_AIF1TX3_ENA_MASK 0x0004
#define MADERA_AIF1TX3_ENA_SHIFT 2
-#define MADERA_AIF1TX3_ENA_WIDTH 1
#define MADERA_AIF1TX2_ENA 0x0002
#define MADERA_AIF1TX2_ENA_MASK 0x0002
#define MADERA_AIF1TX2_ENA_SHIFT 1
-#define MADERA_AIF1TX2_ENA_WIDTH 1
#define MADERA_AIF1TX1_ENA 0x0001
#define MADERA_AIF1TX1_ENA_MASK 0x0001
#define MADERA_AIF1TX1_ENA_SHIFT 0
-#define MADERA_AIF1TX1_ENA_WIDTH 1
/* (0x051A) AIF1_Rx_Enables */
#define MADERA_AIF1RX8_ENA 0x0080
#define MADERA_AIF1RX8_ENA_MASK 0x0080
#define MADERA_AIF1RX8_ENA_SHIFT 7
-#define MADERA_AIF1RX8_ENA_WIDTH 1
#define MADERA_AIF1RX7_ENA 0x0040
#define MADERA_AIF1RX7_ENA_MASK 0x0040
#define MADERA_AIF1RX7_ENA_SHIFT 6
-#define MADERA_AIF1RX7_ENA_WIDTH 1
#define MADERA_AIF1RX6_ENA 0x0020
#define MADERA_AIF1RX6_ENA_MASK 0x0020
#define MADERA_AIF1RX6_ENA_SHIFT 5
-#define MADERA_AIF1RX6_ENA_WIDTH 1
#define MADERA_AIF1RX5_ENA 0x0010
#define MADERA_AIF1RX5_ENA_MASK 0x0010
#define MADERA_AIF1RX5_ENA_SHIFT 4
-#define MADERA_AIF1RX5_ENA_WIDTH 1
#define MADERA_AIF1RX4_ENA 0x0008
#define MADERA_AIF1RX4_ENA_MASK 0x0008
#define MADERA_AIF1RX4_ENA_SHIFT 3
-#define MADERA_AIF1RX4_ENA_WIDTH 1
#define MADERA_AIF1RX3_ENA 0x0004
#define MADERA_AIF1RX3_ENA_MASK 0x0004
#define MADERA_AIF1RX3_ENA_SHIFT 2
-#define MADERA_AIF1RX3_ENA_WIDTH 1
#define MADERA_AIF1RX2_ENA 0x0002
#define MADERA_AIF1RX2_ENA_MASK 0x0002
#define MADERA_AIF1RX2_ENA_SHIFT 1
-#define MADERA_AIF1RX2_ENA_WIDTH 1
#define MADERA_AIF1RX1_ENA 0x0001
#define MADERA_AIF1RX1_ENA_MASK 0x0001
#define MADERA_AIF1RX1_ENA_SHIFT 0
-#define MADERA_AIF1RX1_ENA_WIDTH 1
/* (0x0559) AIF2_Tx_Enables */
#define MADERA_AIF2TX8_ENA 0x0080
#define MADERA_AIF2TX8_ENA_MASK 0x0080
#define MADERA_AIF2TX8_ENA_SHIFT 7
-#define MADERA_AIF2TX8_ENA_WIDTH 1
#define MADERA_AIF2TX7_ENA 0x0040
#define MADERA_AIF2TX7_ENA_MASK 0x0040
#define MADERA_AIF2TX7_ENA_SHIFT 6
-#define MADERA_AIF2TX7_ENA_WIDTH 1
#define MADERA_AIF2TX6_ENA 0x0020
#define MADERA_AIF2TX6_ENA_MASK 0x0020
#define MADERA_AIF2TX6_ENA_SHIFT 5
-#define MADERA_AIF2TX6_ENA_WIDTH 1
#define MADERA_AIF2TX5_ENA 0x0010
#define MADERA_AIF2TX5_ENA_MASK 0x0010
#define MADERA_AIF2TX5_ENA_SHIFT 4
-#define MADERA_AIF2TX5_ENA_WIDTH 1
#define MADERA_AIF2TX4_ENA 0x0008
#define MADERA_AIF2TX4_ENA_MASK 0x0008
#define MADERA_AIF2TX4_ENA_SHIFT 3
-#define MADERA_AIF2TX4_ENA_WIDTH 1
#define MADERA_AIF2TX3_ENA 0x0004
#define MADERA_AIF2TX3_ENA_MASK 0x0004
#define MADERA_AIF2TX3_ENA_SHIFT 2
-#define MADERA_AIF2TX3_ENA_WIDTH 1
#define MADERA_AIF2TX2_ENA 0x0002
#define MADERA_AIF2TX2_ENA_MASK 0x0002
#define MADERA_AIF2TX2_ENA_SHIFT 1
-#define MADERA_AIF2TX2_ENA_WIDTH 1
#define MADERA_AIF2TX1_ENA 0x0001
#define MADERA_AIF2TX1_ENA_MASK 0x0001
#define MADERA_AIF2TX1_ENA_SHIFT 0
-#define MADERA_AIF2TX1_ENA_WIDTH 1
/* (0x055A) AIF2_Rx_Enables */
#define MADERA_AIF2RX8_ENA 0x0080
#define MADERA_AIF2RX8_ENA_MASK 0x0080
#define MADERA_AIF2RX8_ENA_SHIFT 7
-#define MADERA_AIF2RX8_ENA_WIDTH 1
#define MADERA_AIF2RX7_ENA 0x0040
#define MADERA_AIF2RX7_ENA_MASK 0x0040
#define MADERA_AIF2RX7_ENA_SHIFT 6
-#define MADERA_AIF2RX7_ENA_WIDTH 1
#define MADERA_AIF2RX6_ENA 0x0020
#define MADERA_AIF2RX6_ENA_MASK 0x0020
#define MADERA_AIF2RX6_ENA_SHIFT 5
-#define MADERA_AIF2RX6_ENA_WIDTH 1
#define MADERA_AIF2RX5_ENA 0x0010
#define MADERA_AIF2RX5_ENA_MASK 0x0010
#define MADERA_AIF2RX5_ENA_SHIFT 4
-#define MADERA_AIF2RX5_ENA_WIDTH 1
#define MADERA_AIF2RX4_ENA 0x0008
#define MADERA_AIF2RX4_ENA_MASK 0x0008
#define MADERA_AIF2RX4_ENA_SHIFT 3
-#define MADERA_AIF2RX4_ENA_WIDTH 1
#define MADERA_AIF2RX3_ENA 0x0004
#define MADERA_AIF2RX3_ENA_MASK 0x0004
#define MADERA_AIF2RX3_ENA_SHIFT 2
-#define MADERA_AIF2RX3_ENA_WIDTH 1
#define MADERA_AIF2RX2_ENA 0x0002
#define MADERA_AIF2RX2_ENA_MASK 0x0002
#define MADERA_AIF2RX2_ENA_SHIFT 1
-#define MADERA_AIF2RX2_ENA_WIDTH 1
#define MADERA_AIF2RX1_ENA 0x0001
#define MADERA_AIF2RX1_ENA_MASK 0x0001
#define MADERA_AIF2RX1_ENA_SHIFT 0
-#define MADERA_AIF2RX1_ENA_WIDTH 1
/* (0x0599) AIF3_Tx_Enables */
#define MADERA_AIF3TX8_ENA 0x0080
#define MADERA_AIF3TX8_ENA_MASK 0x0080
#define MADERA_AIF3TX8_ENA_SHIFT 7
-#define MADERA_AIF3TX8_ENA_WIDTH 1
#define MADERA_AIF3TX7_ENA 0x0040
#define MADERA_AIF3TX7_ENA_MASK 0x0040
#define MADERA_AIF3TX7_ENA_SHIFT 6
-#define MADERA_AIF3TX7_ENA_WIDTH 1
#define MADERA_AIF3TX6_ENA 0x0020
#define MADERA_AIF3TX6_ENA_MASK 0x0020
#define MADERA_AIF3TX6_ENA_SHIFT 5
-#define MADERA_AIF3TX6_ENA_WIDTH 1
#define MADERA_AIF3TX5_ENA 0x0010
#define MADERA_AIF3TX5_ENA_MASK 0x0010
#define MADERA_AIF3TX5_ENA_SHIFT 4
-#define MADERA_AIF3TX5_ENA_WIDTH 1
#define MADERA_AIF3TX4_ENA 0x0008
#define MADERA_AIF3TX4_ENA_MASK 0x0008
#define MADERA_AIF3TX4_ENA_SHIFT 3
-#define MADERA_AIF3TX4_ENA_WIDTH 1
#define MADERA_AIF3TX3_ENA 0x0004
#define MADERA_AIF3TX3_ENA_MASK 0x0004
#define MADERA_AIF3TX3_ENA_SHIFT 2
-#define MADERA_AIF3TX3_ENA_WIDTH 1
#define MADERA_AIF3TX2_ENA 0x0002
#define MADERA_AIF3TX2_ENA_MASK 0x0002
#define MADERA_AIF3TX2_ENA_SHIFT 1
-#define MADERA_AIF3TX2_ENA_WIDTH 1
#define MADERA_AIF3TX1_ENA 0x0001
#define MADERA_AIF3TX1_ENA_MASK 0x0001
#define MADERA_AIF3TX1_ENA_SHIFT 0
-#define MADERA_AIF3TX1_ENA_WIDTH 1
/* (0x059A) AIF3_Rx_Enables */
#define MADERA_AIF3RX8_ENA 0x0080
#define MADERA_AIF3RX8_ENA_MASK 0x0080
#define MADERA_AIF3RX8_ENA_SHIFT 7
-#define MADERA_AIF3RX8_ENA_WIDTH 1
#define MADERA_AIF3RX7_ENA 0x0040
#define MADERA_AIF3RX7_ENA_MASK 0x0040
#define MADERA_AIF3RX7_ENA_SHIFT 6
-#define MADERA_AIF3RX7_ENA_WIDTH 1
#define MADERA_AIF3RX6_ENA 0x0020
#define MADERA_AIF3RX6_ENA_MASK 0x0020
#define MADERA_AIF3RX6_ENA_SHIFT 5
-#define MADERA_AIF3RX6_ENA_WIDTH 1
#define MADERA_AIF3RX5_ENA 0x0010
#define MADERA_AIF3RX5_ENA_MASK 0x0010
#define MADERA_AIF3RX5_ENA_SHIFT 4
-#define MADERA_AIF3RX5_ENA_WIDTH 1
#define MADERA_AIF3RX4_ENA 0x0008
#define MADERA_AIF3RX4_ENA_MASK 0x0008
#define MADERA_AIF3RX4_ENA_SHIFT 3
-#define MADERA_AIF3RX4_ENA_WIDTH 1
#define MADERA_AIF3RX3_ENA 0x0004
#define MADERA_AIF3RX3_ENA_MASK 0x0004
#define MADERA_AIF3RX3_ENA_SHIFT 2
-#define MADERA_AIF3RX3_ENA_WIDTH 1
#define MADERA_AIF3RX2_ENA 0x0002
#define MADERA_AIF3RX2_ENA_MASK 0x0002
#define MADERA_AIF3RX2_ENA_SHIFT 1
-#define MADERA_AIF3RX2_ENA_WIDTH 1
#define MADERA_AIF3RX1_ENA 0x0001
#define MADERA_AIF3RX1_ENA_MASK 0x0001
#define MADERA_AIF3RX1_ENA_SHIFT 0
-#define MADERA_AIF3RX1_ENA_WIDTH 1
/* (0x05B9) AIF4_Tx_Enables */
#define MADERA_AIF4TX2_ENA 0x0002
#define MADERA_AIF4TX2_ENA_MASK 0x0002
#define MADERA_AIF4TX2_ENA_SHIFT 1
-#define MADERA_AIF4TX2_ENA_WIDTH 1
#define MADERA_AIF4TX1_ENA 0x0001
#define MADERA_AIF4TX1_ENA_MASK 0x0001
#define MADERA_AIF4TX1_ENA_SHIFT 0
-#define MADERA_AIF4TX1_ENA_WIDTH 1
/* (0x05BA) AIF4_Rx_Enables */
#define MADERA_AIF4RX2_ENA 0x0002
#define MADERA_AIF4RX2_ENA_MASK 0x0002
#define MADERA_AIF4RX2_ENA_SHIFT 1
-#define MADERA_AIF4RX2_ENA_WIDTH 1
#define MADERA_AIF4RX1_ENA 0x0001
#define MADERA_AIF4RX1_ENA_MASK 0x0001
#define MADERA_AIF4RX1_ENA_SHIFT 0
-#define MADERA_AIF4RX1_ENA_WIDTH 1
/* (0x05C2) SPD1_TX_Control */
#define MADERA_SPD1_VAL2 0x2000
#define MADERA_SPD1_VAL2_MASK 0x2000
#define MADERA_SPD1_VAL2_SHIFT 13
-#define MADERA_SPD1_VAL2_WIDTH 1
#define MADERA_SPD1_VAL1 0x1000
#define MADERA_SPD1_VAL1_MASK 0x1000
#define MADERA_SPD1_VAL1_SHIFT 12
-#define MADERA_SPD1_VAL1_WIDTH 1
#define MADERA_SPD1_RATE_MASK 0x00F0
#define MADERA_SPD1_RATE_SHIFT 4
-#define MADERA_SPD1_RATE_WIDTH 4
#define MADERA_SPD1_ENA 0x0001
#define MADERA_SPD1_ENA_MASK 0x0001
#define MADERA_SPD1_ENA_SHIFT 0
-#define MADERA_SPD1_ENA_WIDTH 1
/* (0x05F5) SLIMbus_RX_Channel_Enable */
#define MADERA_SLIMRX8_ENA 0x0080
#define MADERA_SLIMRX8_ENA_MASK 0x0080
#define MADERA_SLIMRX8_ENA_SHIFT 7
-#define MADERA_SLIMRX8_ENA_WIDTH 1
#define MADERA_SLIMRX7_ENA 0x0040
#define MADERA_SLIMRX7_ENA_MASK 0x0040
#define MADERA_SLIMRX7_ENA_SHIFT 6
-#define MADERA_SLIMRX7_ENA_WIDTH 1
#define MADERA_SLIMRX6_ENA 0x0020
#define MADERA_SLIMRX6_ENA_MASK 0x0020
#define MADERA_SLIMRX6_ENA_SHIFT 5
-#define MADERA_SLIMRX6_ENA_WIDTH 1
#define MADERA_SLIMRX5_ENA 0x0010
#define MADERA_SLIMRX5_ENA_MASK 0x0010
#define MADERA_SLIMRX5_ENA_SHIFT 4
-#define MADERA_SLIMRX5_ENA_WIDTH 1
#define MADERA_SLIMRX4_ENA 0x0008
#define MADERA_SLIMRX4_ENA_MASK 0x0008
#define MADERA_SLIMRX4_ENA_SHIFT 3
-#define MADERA_SLIMRX4_ENA_WIDTH 1
#define MADERA_SLIMRX3_ENA 0x0004
#define MADERA_SLIMRX3_ENA_MASK 0x0004
#define MADERA_SLIMRX3_ENA_SHIFT 2
-#define MADERA_SLIMRX3_ENA_WIDTH 1
#define MADERA_SLIMRX2_ENA 0x0002
#define MADERA_SLIMRX2_ENA_MASK 0x0002
#define MADERA_SLIMRX2_ENA_SHIFT 1
-#define MADERA_SLIMRX2_ENA_WIDTH 1
#define MADERA_SLIMRX1_ENA 0x0001
#define MADERA_SLIMRX1_ENA_MASK 0x0001
#define MADERA_SLIMRX1_ENA_SHIFT 0
-#define MADERA_SLIMRX1_ENA_WIDTH 1
/* (0x05F6) SLIMbus_TX_Channel_Enable */
#define MADERA_SLIMTX8_ENA 0x0080
#define MADERA_SLIMTX8_ENA_MASK 0x0080
#define MADERA_SLIMTX8_ENA_SHIFT 7
-#define MADERA_SLIMTX8_ENA_WIDTH 1
#define MADERA_SLIMTX7_ENA 0x0040
#define MADERA_SLIMTX7_ENA_MASK 0x0040
#define MADERA_SLIMTX7_ENA_SHIFT 6
-#define MADERA_SLIMTX7_ENA_WIDTH 1
#define MADERA_SLIMTX6_ENA 0x0020
#define MADERA_SLIMTX6_ENA_MASK 0x0020
#define MADERA_SLIMTX6_ENA_SHIFT 5
-#define MADERA_SLIMTX6_ENA_WIDTH 1
#define MADERA_SLIMTX5_ENA 0x0010
#define MADERA_SLIMTX5_ENA_MASK 0x0010
#define MADERA_SLIMTX5_ENA_SHIFT 4
-#define MADERA_SLIMTX5_ENA_WIDTH 1
#define MADERA_SLIMTX4_ENA 0x0008
#define MADERA_SLIMTX4_ENA_MASK 0x0008
#define MADERA_SLIMTX4_ENA_SHIFT 3
-#define MADERA_SLIMTX4_ENA_WIDTH 1
#define MADERA_SLIMTX3_ENA 0x0004
#define MADERA_SLIMTX3_ENA_MASK 0x0004
#define MADERA_SLIMTX3_ENA_SHIFT 2
-#define MADERA_SLIMTX3_ENA_WIDTH 1
#define MADERA_SLIMTX2_ENA 0x0002
#define MADERA_SLIMTX2_ENA_MASK 0x0002
#define MADERA_SLIMTX2_ENA_SHIFT 1
-#define MADERA_SLIMTX2_ENA_WIDTH 1
#define MADERA_SLIMTX1_ENA 0x0001
#define MADERA_SLIMTX1_ENA_MASK 0x0001
#define MADERA_SLIMTX1_ENA_SHIFT 0
-#define MADERA_SLIMTX1_ENA_WIDTH 1
/* (0x0E10) EQ1_1 */
#define MADERA_EQ1_B1_GAIN_MASK 0xF800
#define MADERA_EQ1_B1_GAIN_SHIFT 11
-#define MADERA_EQ1_B1_GAIN_WIDTH 5
#define MADERA_EQ1_B2_GAIN_MASK 0x07C0
#define MADERA_EQ1_B2_GAIN_SHIFT 6
-#define MADERA_EQ1_B2_GAIN_WIDTH 5
#define MADERA_EQ1_B3_GAIN_MASK 0x003E
#define MADERA_EQ1_B3_GAIN_SHIFT 1
-#define MADERA_EQ1_B3_GAIN_WIDTH 5
#define MADERA_EQ1_ENA 0x0001
#define MADERA_EQ1_ENA_MASK 0x0001
#define MADERA_EQ1_ENA_SHIFT 0
-#define MADERA_EQ1_ENA_WIDTH 1
/* (0x0E11) EQ1_2 */
#define MADERA_EQ1_B4_GAIN_MASK 0xF800
#define MADERA_EQ1_B4_GAIN_SHIFT 11
-#define MADERA_EQ1_B4_GAIN_WIDTH 5
#define MADERA_EQ1_B5_GAIN_MASK 0x07C0
#define MADERA_EQ1_B5_GAIN_SHIFT 6
-#define MADERA_EQ1_B5_GAIN_WIDTH 5
#define MADERA_EQ1_B1_MODE 0x0001
#define MADERA_EQ1_B1_MODE_MASK 0x0001
#define MADERA_EQ1_B1_MODE_SHIFT 0
-#define MADERA_EQ1_B1_MODE_WIDTH 1
/* (0x0E26) EQ2_1 */
#define MADERA_EQ2_B1_GAIN_MASK 0xF800
#define MADERA_EQ2_B1_GAIN_SHIFT 11
-#define MADERA_EQ2_B1_GAIN_WIDTH 5
#define MADERA_EQ2_B2_GAIN_MASK 0x07C0
#define MADERA_EQ2_B2_GAIN_SHIFT 6
-#define MADERA_EQ2_B2_GAIN_WIDTH 5
#define MADERA_EQ2_B3_GAIN_MASK 0x003E
#define MADERA_EQ2_B3_GAIN_SHIFT 1
-#define MADERA_EQ2_B3_GAIN_WIDTH 5
#define MADERA_EQ2_ENA 0x0001
#define MADERA_EQ2_ENA_MASK 0x0001
#define MADERA_EQ2_ENA_SHIFT 0
-#define MADERA_EQ2_ENA_WIDTH 1
/* (0x0E27) EQ2_2 */
#define MADERA_EQ2_B4_GAIN_MASK 0xF800
#define MADERA_EQ2_B4_GAIN_SHIFT 11
-#define MADERA_EQ2_B4_GAIN_WIDTH 5
#define MADERA_EQ2_B5_GAIN_MASK 0x07C0
#define MADERA_EQ2_B5_GAIN_SHIFT 6
-#define MADERA_EQ2_B5_GAIN_WIDTH 5
#define MADERA_EQ2_B1_MODE 0x0001
#define MADERA_EQ2_B1_MODE_MASK 0x0001
#define MADERA_EQ2_B1_MODE_SHIFT 0
-#define MADERA_EQ2_B1_MODE_WIDTH 1
/* (0x0E3C) EQ3_1 */
#define MADERA_EQ3_B1_GAIN_MASK 0xF800
#define MADERA_EQ3_B1_GAIN_SHIFT 11
-#define MADERA_EQ3_B1_GAIN_WIDTH 5
#define MADERA_EQ3_B2_GAIN_MASK 0x07C0
#define MADERA_EQ3_B2_GAIN_SHIFT 6
-#define MADERA_EQ3_B2_GAIN_WIDTH 5
#define MADERA_EQ3_B3_GAIN_MASK 0x003E
#define MADERA_EQ3_B3_GAIN_SHIFT 1
-#define MADERA_EQ3_B3_GAIN_WIDTH 5
#define MADERA_EQ3_ENA 0x0001
#define MADERA_EQ3_ENA_MASK 0x0001
#define MADERA_EQ3_ENA_SHIFT 0
-#define MADERA_EQ3_ENA_WIDTH 1
/* (0x0E3D) EQ3_2 */
#define MADERA_EQ3_B4_GAIN_MASK 0xF800
#define MADERA_EQ3_B4_GAIN_SHIFT 11
-#define MADERA_EQ3_B4_GAIN_WIDTH 5
#define MADERA_EQ3_B5_GAIN_MASK 0x07C0
#define MADERA_EQ3_B5_GAIN_SHIFT 6
-#define MADERA_EQ3_B5_GAIN_WIDTH 5
#define MADERA_EQ3_B1_MODE 0x0001
#define MADERA_EQ3_B1_MODE_MASK 0x0001
#define MADERA_EQ3_B1_MODE_SHIFT 0
-#define MADERA_EQ3_B1_MODE_WIDTH 1
/* (0x0E52) EQ4_1 */
#define MADERA_EQ4_B1_GAIN_MASK 0xF800
#define MADERA_EQ4_B1_GAIN_SHIFT 11
-#define MADERA_EQ4_B1_GAIN_WIDTH 5
#define MADERA_EQ4_B2_GAIN_MASK 0x07C0
#define MADERA_EQ4_B2_GAIN_SHIFT 6
-#define MADERA_EQ4_B2_GAIN_WIDTH 5
#define MADERA_EQ4_B3_GAIN_MASK 0x003E
#define MADERA_EQ4_B3_GAIN_SHIFT 1
-#define MADERA_EQ4_B3_GAIN_WIDTH 5
#define MADERA_EQ4_ENA 0x0001
#define MADERA_EQ4_ENA_MASK 0x0001
#define MADERA_EQ4_ENA_SHIFT 0
-#define MADERA_EQ4_ENA_WIDTH 1
/* (0x0E53) EQ4_2 */
#define MADERA_EQ4_B4_GAIN_MASK 0xF800
#define MADERA_EQ4_B4_GAIN_SHIFT 11
-#define MADERA_EQ4_B4_GAIN_WIDTH 5
#define MADERA_EQ4_B5_GAIN_MASK 0x07C0
#define MADERA_EQ4_B5_GAIN_SHIFT 6
-#define MADERA_EQ4_B5_GAIN_WIDTH 5
#define MADERA_EQ4_B1_MODE 0x0001
#define MADERA_EQ4_B1_MODE_MASK 0x0001
#define MADERA_EQ4_B1_MODE_SHIFT 0
-#define MADERA_EQ4_B1_MODE_WIDTH 1
/* (0x0E80) DRC1_ctrl1 */
#define MADERA_DRC1L_ENA 0x0002
#define MADERA_DRC1L_ENA_MASK 0x0002
#define MADERA_DRC1L_ENA_SHIFT 1
-#define MADERA_DRC1L_ENA_WIDTH 1
#define MADERA_DRC1R_ENA 0x0001
#define MADERA_DRC1R_ENA_MASK 0x0001
#define MADERA_DRC1R_ENA_SHIFT 0
-#define MADERA_DRC1R_ENA_WIDTH 1
/* (0x0E88) DRC2_ctrl1 */
#define MADERA_DRC2L_ENA 0x0002
#define MADERA_DRC2L_ENA_MASK 0x0002
#define MADERA_DRC2L_ENA_SHIFT 1
-#define MADERA_DRC2L_ENA_WIDTH 1
#define MADERA_DRC2R_ENA 0x0001
#define MADERA_DRC2R_ENA_MASK 0x0001
#define MADERA_DRC2R_ENA_SHIFT 0
-#define MADERA_DRC2R_ENA_WIDTH 1
/* (0x0EC0) HPLPF1_1 */
#define MADERA_LHPF1_MODE 0x0002
#define MADERA_LHPF1_MODE_MASK 0x0002
#define MADERA_LHPF1_MODE_SHIFT 1
-#define MADERA_LHPF1_MODE_WIDTH 1
#define MADERA_LHPF1_ENA 0x0001
#define MADERA_LHPF1_ENA_MASK 0x0001
#define MADERA_LHPF1_ENA_SHIFT 0
-#define MADERA_LHPF1_ENA_WIDTH 1
/* (0x0EC1) HPLPF1_2 */
#define MADERA_LHPF1_COEFF_MASK 0xFFFF
#define MADERA_LHPF1_COEFF_SHIFT 0
-#define MADERA_LHPF1_COEFF_WIDTH 16
/* (0x0EC4) HPLPF2_1 */
#define MADERA_LHPF2_MODE 0x0002
#define MADERA_LHPF2_MODE_MASK 0x0002
#define MADERA_LHPF2_MODE_SHIFT 1
-#define MADERA_LHPF2_MODE_WIDTH 1
#define MADERA_LHPF2_ENA 0x0001
#define MADERA_LHPF2_ENA_MASK 0x0001
#define MADERA_LHPF2_ENA_SHIFT 0
-#define MADERA_LHPF2_ENA_WIDTH 1
/* (0x0EC5) HPLPF2_2 */
#define MADERA_LHPF2_COEFF_MASK 0xFFFF
#define MADERA_LHPF2_COEFF_SHIFT 0
-#define MADERA_LHPF2_COEFF_WIDTH 16
/* (0x0EC8) HPLPF3_1 */
#define MADERA_LHPF3_MODE 0x0002
#define MADERA_LHPF3_MODE_MASK 0x0002
#define MADERA_LHPF3_MODE_SHIFT 1
-#define MADERA_LHPF3_MODE_WIDTH 1
#define MADERA_LHPF3_ENA 0x0001
#define MADERA_LHPF3_ENA_MASK 0x0001
#define MADERA_LHPF3_ENA_SHIFT 0
-#define MADERA_LHPF3_ENA_WIDTH 1
/* (0x0EC9) HPLPF3_2 */
#define MADERA_LHPF3_COEFF_MASK 0xFFFF
#define MADERA_LHPF3_COEFF_SHIFT 0
-#define MADERA_LHPF3_COEFF_WIDTH 16
/* (0x0ECC) HPLPF4_1 */
#define MADERA_LHPF4_MODE 0x0002
#define MADERA_LHPF4_MODE_MASK 0x0002
#define MADERA_LHPF4_MODE_SHIFT 1
-#define MADERA_LHPF4_MODE_WIDTH 1
#define MADERA_LHPF4_ENA 0x0001
#define MADERA_LHPF4_ENA_MASK 0x0001
#define MADERA_LHPF4_ENA_SHIFT 0
-#define MADERA_LHPF4_ENA_WIDTH 1
/* (0x0ECD) HPLPF4_2 */
#define MADERA_LHPF4_COEFF_MASK 0xFFFF
#define MADERA_LHPF4_COEFF_SHIFT 0
-#define MADERA_LHPF4_COEFF_WIDTH 16
/* (0x0ED0) ASRC2_ENABLE */
#define MADERA_ASRC2_IN2L_ENA 0x0008
#define MADERA_ASRC2_IN2L_ENA_MASK 0x0008
#define MADERA_ASRC2_IN2L_ENA_SHIFT 3
-#define MADERA_ASRC2_IN2L_ENA_WIDTH 1
#define MADERA_ASRC2_IN2R_ENA 0x0004
#define MADERA_ASRC2_IN2R_ENA_MASK 0x0004
#define MADERA_ASRC2_IN2R_ENA_SHIFT 2
-#define MADERA_ASRC2_IN2R_ENA_WIDTH 1
#define MADERA_ASRC2_IN1L_ENA 0x0002
#define MADERA_ASRC2_IN1L_ENA_MASK 0x0002
#define MADERA_ASRC2_IN1L_ENA_SHIFT 1
-#define MADERA_ASRC2_IN1L_ENA_WIDTH 1
#define MADERA_ASRC2_IN1R_ENA 0x0001
#define MADERA_ASRC2_IN1R_ENA_MASK 0x0001
#define MADERA_ASRC2_IN1R_ENA_SHIFT 0
-#define MADERA_ASRC2_IN1R_ENA_WIDTH 1
/* (0x0ED2) ASRC2_RATE1 */
#define MADERA_ASRC2_RATE1_MASK 0xF800
#define MADERA_ASRC2_RATE1_SHIFT 11
-#define MADERA_ASRC2_RATE1_WIDTH 5
/* (0x0ED3) ASRC2_RATE2 */
#define MADERA_ASRC2_RATE2_MASK 0xF800
#define MADERA_ASRC2_RATE2_SHIFT 11
-#define MADERA_ASRC2_RATE2_WIDTH 5
/* (0x0EE0) ASRC1_ENABLE */
#define MADERA_ASRC1_IN2L_ENA 0x0008
#define MADERA_ASRC1_IN2L_ENA_MASK 0x0008
#define MADERA_ASRC1_IN2L_ENA_SHIFT 3
-#define MADERA_ASRC1_IN2L_ENA_WIDTH 1
#define MADERA_ASRC1_IN2R_ENA 0x0004
#define MADERA_ASRC1_IN2R_ENA_MASK 0x0004
#define MADERA_ASRC1_IN2R_ENA_SHIFT 2
-#define MADERA_ASRC1_IN2R_ENA_WIDTH 1
#define MADERA_ASRC1_IN1L_ENA 0x0002
#define MADERA_ASRC1_IN1L_ENA_MASK 0x0002
#define MADERA_ASRC1_IN1L_ENA_SHIFT 1
-#define MADERA_ASRC1_IN1L_ENA_WIDTH 1
#define MADERA_ASRC1_IN1R_ENA 0x0001
#define MADERA_ASRC1_IN1R_ENA_MASK 0x0001
#define MADERA_ASRC1_IN1R_ENA_SHIFT 0
-#define MADERA_ASRC1_IN1R_ENA_WIDTH 1
/* (0x0EE2) ASRC1_RATE1 */
#define MADERA_ASRC1_RATE1_MASK 0xF800
#define MADERA_ASRC1_RATE1_SHIFT 11
-#define MADERA_ASRC1_RATE1_WIDTH 5
/* (0x0EE3) ASRC1_RATE2 */
#define MADERA_ASRC1_RATE2_MASK 0xF800
#define MADERA_ASRC1_RATE2_SHIFT 11
-#define MADERA_ASRC1_RATE2_WIDTH 5
/* (0x0EF0) - ISRC1 CTRL 1 */
#define MADERA_ISRC1_FSH_MASK 0xF800
#define MADERA_ISRC1_FSH_SHIFT 11
-#define MADERA_ISRC1_FSH_WIDTH 5
#define MADERA_ISRC1_CLK_SEL_MASK 0x0700
#define MADERA_ISRC1_CLK_SEL_SHIFT 8
-#define MADERA_ISRC1_CLK_SEL_WIDTH 3
/* (0x0EF1) ISRC1_CTRL_2 */
#define MADERA_ISRC1_FSL_MASK 0xF800
#define MADERA_ISRC1_FSL_SHIFT 11
-#define MADERA_ISRC1_FSL_WIDTH 5
/* (0x0EF2) ISRC1_CTRL_3 */
#define MADERA_ISRC1_INT1_ENA 0x8000
#define MADERA_ISRC1_INT1_ENA_MASK 0x8000
#define MADERA_ISRC1_INT1_ENA_SHIFT 15
-#define MADERA_ISRC1_INT1_ENA_WIDTH 1
#define MADERA_ISRC1_INT2_ENA 0x4000
#define MADERA_ISRC1_INT2_ENA_MASK 0x4000
#define MADERA_ISRC1_INT2_ENA_SHIFT 14
-#define MADERA_ISRC1_INT2_ENA_WIDTH 1
#define MADERA_ISRC1_INT3_ENA 0x2000
#define MADERA_ISRC1_INT3_ENA_MASK 0x2000
#define MADERA_ISRC1_INT3_ENA_SHIFT 13
-#define MADERA_ISRC1_INT3_ENA_WIDTH 1
#define MADERA_ISRC1_INT4_ENA 0x1000
#define MADERA_ISRC1_INT4_ENA_MASK 0x1000
#define MADERA_ISRC1_INT4_ENA_SHIFT 12
-#define MADERA_ISRC1_INT4_ENA_WIDTH 1
#define MADERA_ISRC1_DEC1_ENA 0x0200
#define MADERA_ISRC1_DEC1_ENA_MASK 0x0200
#define MADERA_ISRC1_DEC1_ENA_SHIFT 9
-#define MADERA_ISRC1_DEC1_ENA_WIDTH 1
#define MADERA_ISRC1_DEC2_ENA 0x0100
#define MADERA_ISRC1_DEC2_ENA_MASK 0x0100
#define MADERA_ISRC1_DEC2_ENA_SHIFT 8
-#define MADERA_ISRC1_DEC2_ENA_WIDTH 1
#define MADERA_ISRC1_DEC3_ENA 0x0080
#define MADERA_ISRC1_DEC3_ENA_MASK 0x0080
#define MADERA_ISRC1_DEC3_ENA_SHIFT 7
-#define MADERA_ISRC1_DEC3_ENA_WIDTH 1
#define MADERA_ISRC1_DEC4_ENA 0x0040
#define MADERA_ISRC1_DEC4_ENA_MASK 0x0040
#define MADERA_ISRC1_DEC4_ENA_SHIFT 6
-#define MADERA_ISRC1_DEC4_ENA_WIDTH 1
#define MADERA_ISRC1_NOTCH_ENA 0x0001
#define MADERA_ISRC1_NOTCH_ENA_MASK 0x0001
#define MADERA_ISRC1_NOTCH_ENA_SHIFT 0
-#define MADERA_ISRC1_NOTCH_ENA_WIDTH 1
/* (0x0EF3) ISRC2_CTRL_1 */
#define MADERA_ISRC2_FSH_MASK 0xF800
#define MADERA_ISRC2_FSH_SHIFT 11
-#define MADERA_ISRC2_FSH_WIDTH 5
#define MADERA_ISRC2_CLK_SEL_MASK 0x0700
#define MADERA_ISRC2_CLK_SEL_SHIFT 8
-#define MADERA_ISRC2_CLK_SEL_WIDTH 3
/* (0x0EF4) ISRC2_CTRL_2 */
#define MADERA_ISRC2_FSL_MASK 0xF800
#define MADERA_ISRC2_FSL_SHIFT 11
-#define MADERA_ISRC2_FSL_WIDTH 5
/* (0x0EF5) ISRC2_CTRL_3 */
#define MADERA_ISRC2_INT1_ENA 0x8000
#define MADERA_ISRC2_INT1_ENA_MASK 0x8000
#define MADERA_ISRC2_INT1_ENA_SHIFT 15
-#define MADERA_ISRC2_INT1_ENA_WIDTH 1
#define MADERA_ISRC2_INT2_ENA 0x4000
#define MADERA_ISRC2_INT2_ENA_MASK 0x4000
#define MADERA_ISRC2_INT2_ENA_SHIFT 14
-#define MADERA_ISRC2_INT2_ENA_WIDTH 1
#define MADERA_ISRC2_INT3_ENA 0x2000
#define MADERA_ISRC2_INT3_ENA_MASK 0x2000
#define MADERA_ISRC2_INT3_ENA_SHIFT 13
-#define MADERA_ISRC2_INT3_ENA_WIDTH 1
#define MADERA_ISRC2_INT4_ENA 0x1000
#define MADERA_ISRC2_INT4_ENA_MASK 0x1000
#define MADERA_ISRC2_INT4_ENA_SHIFT 12
-#define MADERA_ISRC2_INT4_ENA_WIDTH 1
#define MADERA_ISRC2_DEC1_ENA 0x0200
#define MADERA_ISRC2_DEC1_ENA_MASK 0x0200
#define MADERA_ISRC2_DEC1_ENA_SHIFT 9
-#define MADERA_ISRC2_DEC1_ENA_WIDTH 1
#define MADERA_ISRC2_DEC2_ENA 0x0100
#define MADERA_ISRC2_DEC2_ENA_MASK 0x0100
#define MADERA_ISRC2_DEC2_ENA_SHIFT 8
-#define MADERA_ISRC2_DEC2_ENA_WIDTH 1
#define MADERA_ISRC2_DEC3_ENA 0x0080
#define MADERA_ISRC2_DEC3_ENA_MASK 0x0080
#define MADERA_ISRC2_DEC3_ENA_SHIFT 7
-#define MADERA_ISRC2_DEC3_ENA_WIDTH 1
#define MADERA_ISRC2_DEC4_ENA 0x0040
#define MADERA_ISRC2_DEC4_ENA_MASK 0x0040
#define MADERA_ISRC2_DEC4_ENA_SHIFT 6
-#define MADERA_ISRC2_DEC4_ENA_WIDTH 1
#define MADERA_ISRC2_NOTCH_ENA 0x0001
#define MADERA_ISRC2_NOTCH_ENA_MASK 0x0001
#define MADERA_ISRC2_NOTCH_ENA_SHIFT 0
-#define MADERA_ISRC2_NOTCH_ENA_WIDTH 1
/* (0x0EF6) ISRC3_CTRL_1 */
#define MADERA_ISRC3_FSH_MASK 0xF800
#define MADERA_ISRC3_FSH_SHIFT 11
-#define MADERA_ISRC3_FSH_WIDTH 5
#define MADERA_ISRC3_CLK_SEL_MASK 0x0700
#define MADERA_ISRC3_CLK_SEL_SHIFT 8
-#define MADERA_ISRC3_CLK_SEL_WIDTH 3
/* (0x0EF7) ISRC3_CTRL_2 */
#define MADERA_ISRC3_FSL_MASK 0xF800
#define MADERA_ISRC3_FSL_SHIFT 11
-#define MADERA_ISRC3_FSL_WIDTH 5
/* (0x0EF8) ISRC3_CTRL_3 */
#define MADERA_ISRC3_INT1_ENA 0x8000
#define MADERA_ISRC3_INT1_ENA_MASK 0x8000
#define MADERA_ISRC3_INT1_ENA_SHIFT 15
-#define MADERA_ISRC3_INT1_ENA_WIDTH 1
#define MADERA_ISRC3_INT2_ENA 0x4000
#define MADERA_ISRC3_INT2_ENA_MASK 0x4000
#define MADERA_ISRC3_INT2_ENA_SHIFT 14
-#define MADERA_ISRC3_INT2_ENA_WIDTH 1
#define MADERA_ISRC3_INT3_ENA 0x2000
#define MADERA_ISRC3_INT3_ENA_MASK 0x2000
#define MADERA_ISRC3_INT3_ENA_SHIFT 13
-#define MADERA_ISRC3_INT3_ENA_WIDTH 1
#define MADERA_ISRC3_INT4_ENA 0x1000
#define MADERA_ISRC3_INT4_ENA_MASK 0x1000
#define MADERA_ISRC3_INT4_ENA_SHIFT 12
-#define MADERA_ISRC3_INT4_ENA_WIDTH 1
#define MADERA_ISRC3_DEC1_ENA 0x0200
#define MADERA_ISRC3_DEC1_ENA_MASK 0x0200
#define MADERA_ISRC3_DEC1_ENA_SHIFT 9
-#define MADERA_ISRC3_DEC1_ENA_WIDTH 1
#define MADERA_ISRC3_DEC2_ENA 0x0100
#define MADERA_ISRC3_DEC2_ENA_MASK 0x0100
#define MADERA_ISRC3_DEC2_ENA_SHIFT 8
-#define MADERA_ISRC3_DEC2_ENA_WIDTH 1
#define MADERA_ISRC3_DEC3_ENA 0x0080
#define MADERA_ISRC3_DEC3_ENA_MASK 0x0080
#define MADERA_ISRC3_DEC3_ENA_SHIFT 7
-#define MADERA_ISRC3_DEC3_ENA_WIDTH 1
#define MADERA_ISRC3_DEC4_ENA 0x0040
#define MADERA_ISRC3_DEC4_ENA_MASK 0x0040
#define MADERA_ISRC3_DEC4_ENA_SHIFT 6
-#define MADERA_ISRC3_DEC4_ENA_WIDTH 1
#define MADERA_ISRC3_NOTCH_ENA 0x0001
#define MADERA_ISRC3_NOTCH_ENA_MASK 0x0001
#define MADERA_ISRC3_NOTCH_ENA_SHIFT 0
-#define MADERA_ISRC3_NOTCH_ENA_WIDTH 1
/* (0x0EF9) ISRC4_CTRL_1 */
#define MADERA_ISRC4_FSH_MASK 0xF800
#define MADERA_ISRC4_FSH_SHIFT 11
-#define MADERA_ISRC4_FSH_WIDTH 5
#define MADERA_ISRC4_CLK_SEL_MASK 0x0700
#define MADERA_ISRC4_CLK_SEL_SHIFT 8
-#define MADERA_ISRC4_CLK_SEL_WIDTH 3
/* (0x0EFA) ISRC4_CTRL_2 */
#define MADERA_ISRC4_FSL_MASK 0xF800
#define MADERA_ISRC4_FSL_SHIFT 11
-#define MADERA_ISRC4_FSL_WIDTH 5
/* (0x0EFB) ISRC4_CTRL_3 */
#define MADERA_ISRC4_INT1_ENA 0x8000
#define MADERA_ISRC4_INT1_ENA_MASK 0x8000
#define MADERA_ISRC4_INT1_ENA_SHIFT 15
-#define MADERA_ISRC4_INT1_ENA_WIDTH 1
#define MADERA_ISRC4_INT2_ENA 0x4000
#define MADERA_ISRC4_INT2_ENA_MASK 0x4000
#define MADERA_ISRC4_INT2_ENA_SHIFT 14
-#define MADERA_ISRC4_INT2_ENA_WIDTH 1
#define MADERA_ISRC4_INT3_ENA 0x2000
#define MADERA_ISRC4_INT3_ENA_MASK 0x2000
#define MADERA_ISRC4_INT3_ENA_SHIFT 13
-#define MADERA_ISRC4_INT3_ENA_WIDTH 1
#define MADERA_ISRC4_INT4_ENA 0x1000
#define MADERA_ISRC4_INT4_ENA_MASK 0x1000
#define MADERA_ISRC4_INT4_ENA_SHIFT 12
-#define MADERA_ISRC4_INT4_ENA_WIDTH 1
#define MADERA_ISRC4_DEC1_ENA 0x0200
#define MADERA_ISRC4_DEC1_ENA_MASK 0x0200
#define MADERA_ISRC4_DEC1_ENA_SHIFT 9
-#define MADERA_ISRC4_DEC1_ENA_WIDTH 1
#define MADERA_ISRC4_DEC2_ENA 0x0100
#define MADERA_ISRC4_DEC2_ENA_MASK 0x0100
#define MADERA_ISRC4_DEC2_ENA_SHIFT 8
-#define MADERA_ISRC4_DEC2_ENA_WIDTH 1
#define MADERA_ISRC4_DEC3_ENA 0x0080
#define MADERA_ISRC4_DEC3_ENA_MASK 0x0080
#define MADERA_ISRC4_DEC3_ENA_SHIFT 7
-#define MADERA_ISRC4_DEC3_ENA_WIDTH 1
#define MADERA_ISRC4_DEC4_ENA 0x0040
#define MADERA_ISRC4_DEC4_ENA_MASK 0x0040
#define MADERA_ISRC4_DEC4_ENA_SHIFT 6
-#define MADERA_ISRC4_DEC4_ENA_WIDTH 1
#define MADERA_ISRC4_NOTCH_ENA 0x0001
#define MADERA_ISRC4_NOTCH_ENA_MASK 0x0001
#define MADERA_ISRC4_NOTCH_ENA_SHIFT 0
-#define MADERA_ISRC4_NOTCH_ENA_WIDTH 1
/* (0x0F00) Clock_Control */
#define MADERA_EXT_NG_SEL_CLR 0x0080
#define MADERA_EXT_NG_SEL_CLR_MASK 0x0080
#define MADERA_EXT_NG_SEL_CLR_SHIFT 7
-#define MADERA_EXT_NG_SEL_CLR_WIDTH 1
#define MADERA_EXT_NG_SEL_SET 0x0040
#define MADERA_EXT_NG_SEL_SET_MASK 0x0040
#define MADERA_EXT_NG_SEL_SET_SHIFT 6
-#define MADERA_EXT_NG_SEL_SET_WIDTH 1
#define MADERA_CLK_R_ENA_CLR 0x0020
#define MADERA_CLK_R_ENA_CLR_MASK 0x0020
#define MADERA_CLK_R_ENA_CLR_SHIFT 5
-#define MADERA_CLK_R_ENA_CLR_WIDTH 1
#define MADERA_CLK_R_ENA_SET 0x0010
#define MADERA_CLK_R_ENA_SET_MASK 0x0010
#define MADERA_CLK_R_ENA_SET_SHIFT 4
-#define MADERA_CLK_R_ENA_SET_WIDTH 1
#define MADERA_CLK_NG_ENA_CLR 0x0008
#define MADERA_CLK_NG_ENA_CLR_MASK 0x0008
#define MADERA_CLK_NG_ENA_CLR_SHIFT 3
-#define MADERA_CLK_NG_ENA_CLR_WIDTH 1
#define MADERA_CLK_NG_ENA_SET 0x0004
#define MADERA_CLK_NG_ENA_SET_MASK 0x0004
#define MADERA_CLK_NG_ENA_SET_SHIFT 2
-#define MADERA_CLK_NG_ENA_SET_WIDTH 1
#define MADERA_CLK_L_ENA_CLR 0x0002
#define MADERA_CLK_L_ENA_CLR_MASK 0x0002
#define MADERA_CLK_L_ENA_CLR_SHIFT 1
-#define MADERA_CLK_L_ENA_CLR_WIDTH 1
#define MADERA_CLK_L_ENA_SET 0x0001
#define MADERA_CLK_L_ENA_SET_MASK 0x0001
#define MADERA_CLK_L_ENA_SET_SHIFT 0
-#define MADERA_CLK_L_ENA_SET_WIDTH 1
/* (0x0F01) ANC_SRC */
#define MADERA_IN_RXANCR_SEL_MASK 0x0070
#define MADERA_IN_RXANCR_SEL_SHIFT 4
-#define MADERA_IN_RXANCR_SEL_WIDTH 3
#define MADERA_IN_RXANCL_SEL_MASK 0x0007
#define MADERA_IN_RXANCL_SEL_SHIFT 0
-#define MADERA_IN_RXANCL_SEL_WIDTH 3
/* (0x0F17) FCL_ADC_reformatter_control */
#define MADERA_FCL_MIC_MODE_SEL 0x000C
#define MADERA_FCL_MIC_MODE_SEL_SHIFT 2
-#define MADERA_FCL_MIC_MODE_SEL_WIDTH 2
/* (0x0F73) FCR_ADC_reformatter_control */
#define MADERA_FCR_MIC_MODE_SEL 0x000C
#define MADERA_FCR_MIC_MODE_SEL_SHIFT 2
-#define MADERA_FCR_MIC_MODE_SEL_WIDTH 2
/* (0x10C0) AUXPDM1_CTRL_0 */
#define MADERA_AUXPDM1_SRC_MASK 0x0F00
#define MADERA_AUXPDM1_SRC_SHIFT 8
-#define MADERA_AUXPDM1_SRC_WIDTH 4
#define MADERA_AUXPDM1_TXEDGE_MASK 0x0010
#define MADERA_AUXPDM1_TXEDGE_SHIFT 4
-#define MADERA_AUXPDM1_TXEDGE_WIDTH 1
#define MADERA_AUXPDM1_MSTR_MASK 0x0008
#define MADERA_AUXPDM1_MSTR_SHIFT 3
-#define MADERA_AUXPDM1_MSTR_WIDTH 1
#define MADERA_AUXPDM1_ENABLE_MASK 0x0001
#define MADERA_AUXPDM1_ENABLE_SHIFT 0
-#define MADERA_AUXPDM1_ENABLE_WIDTH 1
/* (0x10C1) AUXPDM1_CTRL_1 */
#define MADERA_AUXPDM1_CLK_FREQ_MASK 0xC000
#define MADERA_AUXPDM1_CLK_FREQ_SHIFT 14
-#define MADERA_AUXPDM1_CLK_FREQ_WIDTH 2
/* (0x1480) DFC1_CTRL_W0 */
#define MADERA_DFC1_RATE_MASK 0x007C
#define MADERA_DFC1_RATE_SHIFT 2
-#define MADERA_DFC1_RATE_WIDTH 5
#define MADERA_DFC1_DITH_ENA 0x0002
#define MADERA_DFC1_DITH_ENA_MASK 0x0002
#define MADERA_DFC1_DITH_ENA_SHIFT 1
-#define MADERA_DFC1_DITH_ENA_WIDTH 1
#define MADERA_DFC1_ENA 0x0001
#define MADERA_DFC1_ENA_MASK 0x0001
#define MADERA_DFC1_ENA_SHIFT 0
-#define MADERA_DFC1_ENA_WIDTH 1
/* (0x1482) DFC1_RX_W0 */
#define MADERA_DFC1_RX_DATA_WIDTH_MASK 0x1F00
#define MADERA_DFC1_RX_DATA_WIDTH_SHIFT 8
-#define MADERA_DFC1_RX_DATA_WIDTH_WIDTH 5
#define MADERA_DFC1_RX_DATA_TYPE_MASK 0x0007
#define MADERA_DFC1_RX_DATA_TYPE_SHIFT 0
-#define MADERA_DFC1_RX_DATA_TYPE_WIDTH 3
/* (0x1484) DFC1_TX_W0 */
#define MADERA_DFC1_TX_DATA_WIDTH_MASK 0x1F00
#define MADERA_DFC1_TX_DATA_WIDTH_SHIFT 8
-#define MADERA_DFC1_TX_DATA_WIDTH_WIDTH 5
#define MADERA_DFC1_TX_DATA_TYPE_MASK 0x0007
#define MADERA_DFC1_TX_DATA_TYPE_SHIFT 0
-#define MADERA_DFC1_TX_DATA_TYPE_WIDTH 3
/* (0x1600) ADSP2_IRQ0 */
#define MADERA_DSP_IRQ2 0x0002
@@ -3636,449 +3103,347 @@
#define MADERA_GP1_LVL 0x8000
#define MADERA_GP1_LVL_MASK 0x8000
#define MADERA_GP1_LVL_SHIFT 15
-#define MADERA_GP1_LVL_WIDTH 1
#define MADERA_GP1_OP_CFG 0x4000
#define MADERA_GP1_OP_CFG_MASK 0x4000
#define MADERA_GP1_OP_CFG_SHIFT 14
-#define MADERA_GP1_OP_CFG_WIDTH 1
#define MADERA_GP1_DB 0x2000
#define MADERA_GP1_DB_MASK 0x2000
#define MADERA_GP1_DB_SHIFT 13
-#define MADERA_GP1_DB_WIDTH 1
#define MADERA_GP1_POL 0x1000
#define MADERA_GP1_POL_MASK 0x1000
#define MADERA_GP1_POL_SHIFT 12
-#define MADERA_GP1_POL_WIDTH 1
#define MADERA_GP1_IP_CFG 0x0800
#define MADERA_GP1_IP_CFG_MASK 0x0800
#define MADERA_GP1_IP_CFG_SHIFT 11
-#define MADERA_GP1_IP_CFG_WIDTH 1
#define MADERA_GP1_FN_MASK 0x03FF
#define MADERA_GP1_FN_SHIFT 0
-#define MADERA_GP1_FN_WIDTH 10
/* (0x1701) GPIO1_CTRL_2 */
#define MADERA_GP1_DIR 0x8000
#define MADERA_GP1_DIR_MASK 0x8000
#define MADERA_GP1_DIR_SHIFT 15
-#define MADERA_GP1_DIR_WIDTH 1
#define MADERA_GP1_PU 0x4000
#define MADERA_GP1_PU_MASK 0x4000
#define MADERA_GP1_PU_SHIFT 14
-#define MADERA_GP1_PU_WIDTH 1
#define MADERA_GP1_PD 0x2000
#define MADERA_GP1_PD_MASK 0x2000
#define MADERA_GP1_PD_SHIFT 13
-#define MADERA_GP1_PD_WIDTH 1
#define MADERA_GP1_DRV_STR_MASK 0x1800
#define MADERA_GP1_DRV_STR_SHIFT 11
-#define MADERA_GP1_DRV_STR_WIDTH 2
/* (0x1800) IRQ1_Status_1 */
#define MADERA_CTRLIF_ERR_EINT1 0x1000
#define MADERA_CTRLIF_ERR_EINT1_MASK 0x1000
#define MADERA_CTRLIF_ERR_EINT1_SHIFT 12
-#define MADERA_CTRLIF_ERR_EINT1_WIDTH 1
#define MADERA_SYSCLK_FAIL_EINT1 0x0200
#define MADERA_SYSCLK_FAIL_EINT1_MASK 0x0200
#define MADERA_SYSCLK_FAIL_EINT1_SHIFT 9
-#define MADERA_SYSCLK_FAIL_EINT1_WIDTH 1
#define MADERA_CLOCK_DETECT_EINT1 0x0100
#define MADERA_CLOCK_DETECT_EINT1_MASK 0x0100
#define MADERA_CLOCK_DETECT_EINT1_SHIFT 8
-#define MADERA_CLOCK_DETECT_EINT1_WIDTH 1
#define MADERA_BOOT_DONE_EINT1 0x0080
#define MADERA_BOOT_DONE_EINT1_MASK 0x0080
#define MADERA_BOOT_DONE_EINT1_SHIFT 7
-#define MADERA_BOOT_DONE_EINT1_WIDTH 1
/* (0x1801) IRQ1_Status_2 */
#define MADERA_FLLAO_LOCK_EINT1 0x0800
#define MADERA_FLLAO_LOCK_EINT1_MASK 0x0800
#define MADERA_FLLAO_LOCK_EINT1_SHIFT 11
-#define MADERA_FLLAO_LOCK_EINT1_WIDTH 1
#define MADERA_FLL3_LOCK_EINT1 0x0400
#define MADERA_FLL3_LOCK_EINT1_MASK 0x0400
#define MADERA_FLL3_LOCK_EINT1_SHIFT 10
-#define MADERA_FLL3_LOCK_EINT1_WIDTH 1
#define MADERA_FLL2_LOCK_EINT1 0x0200
#define MADERA_FLL2_LOCK_EINT1_MASK 0x0200
#define MADERA_FLL2_LOCK_EINT1_SHIFT 9
-#define MADERA_FLL2_LOCK_EINT1_WIDTH 1
#define MADERA_FLL1_LOCK_EINT1 0x0100
#define MADERA_FLL1_LOCK_EINT1_MASK 0x0100
#define MADERA_FLL1_LOCK_EINT1_SHIFT 8
-#define MADERA_FLL1_LOCK_EINT1_WIDTH 1
/* (0x1805) IRQ1_Status_6 */
#define MADERA_MICDET2_EINT1 0x0200
#define MADERA_MICDET2_EINT1_MASK 0x0200
#define MADERA_MICDET2_EINT1_SHIFT 9
-#define MADERA_MICDET2_EINT1_WIDTH 1
#define MADERA_MICDET1_EINT1 0x0100
#define MADERA_MICDET1_EINT1_MASK 0x0100
#define MADERA_MICDET1_EINT1_SHIFT 8
-#define MADERA_MICDET1_EINT1_WIDTH 1
#define MADERA_HPDET_EINT1 0x0001
#define MADERA_HPDET_EINT1_MASK 0x0001
#define MADERA_HPDET_EINT1_SHIFT 0
-#define MADERA_HPDET_EINT1_WIDTH 1
/* (0x1806) IRQ1_Status_7 */
#define MADERA_MICD_CLAMP_FALL_EINT1 0x0020
#define MADERA_MICD_CLAMP_FALL_EINT1_MASK 0x0020
#define MADERA_MICD_CLAMP_FALL_EINT1_SHIFT 5
-#define MADERA_MICD_CLAMP_FALL_EINT1_WIDTH 1
#define MADERA_MICD_CLAMP_RISE_EINT1 0x0010
#define MADERA_MICD_CLAMP_RISE_EINT1_MASK 0x0010
#define MADERA_MICD_CLAMP_RISE_EINT1_SHIFT 4
-#define MADERA_MICD_CLAMP_RISE_EINT1_WIDTH 1
#define MADERA_JD2_FALL_EINT1 0x0008
#define MADERA_JD2_FALL_EINT1_MASK 0x0008
#define MADERA_JD2_FALL_EINT1_SHIFT 3
-#define MADERA_JD2_FALL_EINT1_WIDTH 1
#define MADERA_JD2_RISE_EINT1 0x0004
#define MADERA_JD2_RISE_EINT1_MASK 0x0004
#define MADERA_JD2_RISE_EINT1_SHIFT 2
-#define MADERA_JD2_RISE_EINT1_WIDTH 1
#define MADERA_JD1_FALL_EINT1 0x0002
#define MADERA_JD1_FALL_EINT1_MASK 0x0002
#define MADERA_JD1_FALL_EINT1_SHIFT 1
-#define MADERA_JD1_FALL_EINT1_WIDTH 1
#define MADERA_JD1_RISE_EINT1 0x0001
#define MADERA_JD1_RISE_EINT1_MASK 0x0001
#define MADERA_JD1_RISE_EINT1_SHIFT 0
-#define MADERA_JD1_RISE_EINT1_WIDTH 1
/* (0x1808) IRQ1_Status_9 */
#define MADERA_ASRC2_IN2_LOCK_EINT1 0x0800
#define MADERA_ASRC2_IN2_LOCK_EINT1_MASK 0x0800
#define MADERA_ASRC2_IN2_LOCK_EINT1_SHIFT 11
-#define MADERA_ASRC2_IN2_LOCK_EINT1_WIDTH 1
#define MADERA_ASRC2_IN1_LOCK_EINT1 0x0400
#define MADERA_ASRC2_IN1_LOCK_EINT1_MASK 0x0400
#define MADERA_ASRC2_IN1_LOCK_EINT1_SHIFT 10
-#define MADERA_ASRC2_IN1_LOCK_EINT1_WIDTH 1
#define MADERA_ASRC1_IN2_LOCK_EINT1 0x0200
#define MADERA_ASRC1_IN2_LOCK_EINT1_MASK 0x0200
#define MADERA_ASRC1_IN2_LOCK_EINT1_SHIFT 9
-#define MADERA_ASRC1_IN2_LOCK_EINT1_WIDTH 1
#define MADERA_ASRC1_IN1_LOCK_EINT1 0x0100
#define MADERA_ASRC1_IN1_LOCK_EINT1_MASK 0x0100
#define MADERA_ASRC1_IN1_LOCK_EINT1_SHIFT 8
-#define MADERA_ASRC1_IN1_LOCK_EINT1_WIDTH 1
#define MADERA_DRC2_SIG_DET_EINT1 0x0002
#define MADERA_DRC2_SIG_DET_EINT1_MASK 0x0002
#define MADERA_DRC2_SIG_DET_EINT1_SHIFT 1
-#define MADERA_DRC2_SIG_DET_EINT1_WIDTH 1
#define MADERA_DRC1_SIG_DET_EINT1 0x0001
#define MADERA_DRC1_SIG_DET_EINT1_MASK 0x0001
#define MADERA_DRC1_SIG_DET_EINT1_SHIFT 0
-#define MADERA_DRC1_SIG_DET_EINT1_WIDTH 1
/* (0x180A) IRQ1_Status_11 */
#define MADERA_DSP_IRQ16_EINT1 0x8000
#define MADERA_DSP_IRQ16_EINT1_MASK 0x8000
#define MADERA_DSP_IRQ16_EINT1_SHIFT 15
-#define MADERA_DSP_IRQ16_EINT1_WIDTH 1
#define MADERA_DSP_IRQ15_EINT1 0x4000
#define MADERA_DSP_IRQ15_EINT1_MASK 0x4000
#define MADERA_DSP_IRQ15_EINT1_SHIFT 14
-#define MADERA_DSP_IRQ15_EINT1_WIDTH 1
#define MADERA_DSP_IRQ14_EINT1 0x2000
#define MADERA_DSP_IRQ14_EINT1_MASK 0x2000
#define MADERA_DSP_IRQ14_EINT1_SHIFT 13
-#define MADERA_DSP_IRQ14_EINT1_WIDTH 1
#define MADERA_DSP_IRQ13_EINT1 0x1000
#define MADERA_DSP_IRQ13_EINT1_MASK 0x1000
#define MADERA_DSP_IRQ13_EINT1_SHIFT 12
-#define MADERA_DSP_IRQ13_EINT1_WIDTH 1
#define MADERA_DSP_IRQ12_EINT1 0x0800
#define MADERA_DSP_IRQ12_EINT1_MASK 0x0800
#define MADERA_DSP_IRQ12_EINT1_SHIFT 11
-#define MADERA_DSP_IRQ12_EINT1_WIDTH 1
#define MADERA_DSP_IRQ11_EINT1 0x0400
#define MADERA_DSP_IRQ11_EINT1_MASK 0x0400
#define MADERA_DSP_IRQ11_EINT1_SHIFT 10
-#define MADERA_DSP_IRQ11_EINT1_WIDTH 1
#define MADERA_DSP_IRQ10_EINT1 0x0200
#define MADERA_DSP_IRQ10_EINT1_MASK 0x0200
#define MADERA_DSP_IRQ10_EINT1_SHIFT 9
-#define MADERA_DSP_IRQ10_EINT1_WIDTH 1
#define MADERA_DSP_IRQ9_EINT1 0x0100
#define MADERA_DSP_IRQ9_EINT1_MASK 0x0100
#define MADERA_DSP_IRQ9_EINT1_SHIFT 8
-#define MADERA_DSP_IRQ9_EINT1_WIDTH 1
#define MADERA_DSP_IRQ8_EINT1 0x0080
#define MADERA_DSP_IRQ8_EINT1_MASK 0x0080
#define MADERA_DSP_IRQ8_EINT1_SHIFT 7
-#define MADERA_DSP_IRQ8_EINT1_WIDTH 1
#define MADERA_DSP_IRQ7_EINT1 0x0040
#define MADERA_DSP_IRQ7_EINT1_MASK 0x0040
#define MADERA_DSP_IRQ7_EINT1_SHIFT 6
-#define MADERA_DSP_IRQ7_EINT1_WIDTH 1
#define MADERA_DSP_IRQ6_EINT1 0x0020
#define MADERA_DSP_IRQ6_EINT1_MASK 0x0020
#define MADERA_DSP_IRQ6_EINT1_SHIFT 5
-#define MADERA_DSP_IRQ6_EINT1_WIDTH 1
#define MADERA_DSP_IRQ5_EINT1 0x0010
#define MADERA_DSP_IRQ5_EINT1_MASK 0x0010
#define MADERA_DSP_IRQ5_EINT1_SHIFT 4
-#define MADERA_DSP_IRQ5_EINT1_WIDTH 1
#define MADERA_DSP_IRQ4_EINT1 0x0008
#define MADERA_DSP_IRQ4_EINT1_MASK 0x0008
#define MADERA_DSP_IRQ4_EINT1_SHIFT 3
-#define MADERA_DSP_IRQ4_EINT1_WIDTH 1
#define MADERA_DSP_IRQ3_EINT1 0x0004
#define MADERA_DSP_IRQ3_EINT1_MASK 0x0004
#define MADERA_DSP_IRQ3_EINT1_SHIFT 2
-#define MADERA_DSP_IRQ3_EINT1_WIDTH 1
#define MADERA_DSP_IRQ2_EINT1 0x0002
#define MADERA_DSP_IRQ2_EINT1_MASK 0x0002
#define MADERA_DSP_IRQ2_EINT1_SHIFT 1
-#define MADERA_DSP_IRQ2_EINT1_WIDTH 1
#define MADERA_DSP_IRQ1_EINT1 0x0001
#define MADERA_DSP_IRQ1_EINT1_MASK 0x0001
#define MADERA_DSP_IRQ1_EINT1_SHIFT 0
-#define MADERA_DSP_IRQ1_EINT1_WIDTH 1
/* (0x180B) IRQ1_Status_12 */
#define MADERA_SPKOUTR_SC_EINT1 0x0080
#define MADERA_SPKOUTR_SC_EINT1_MASK 0x0080
#define MADERA_SPKOUTR_SC_EINT1_SHIFT 7
-#define MADERA_SPKOUTR_SC_EINT1_WIDTH 1
#define MADERA_SPKOUTL_SC_EINT1 0x0040
#define MADERA_SPKOUTL_SC_EINT1_MASK 0x0040
#define MADERA_SPKOUTL_SC_EINT1_SHIFT 6
-#define MADERA_SPKOUTL_SC_EINT1_WIDTH 1
#define MADERA_HP3R_SC_EINT1 0x0020
#define MADERA_HP3R_SC_EINT1_MASK 0x0020
#define MADERA_HP3R_SC_EINT1_SHIFT 5
-#define MADERA_HP3R_SC_EINT1_WIDTH 1
#define MADERA_HP3L_SC_EINT1 0x0010
#define MADERA_HP3L_SC_EINT1_MASK 0x0010
#define MADERA_HP3L_SC_EINT1_SHIFT 4
-#define MADERA_HP3L_SC_EINT1_WIDTH 1
#define MADERA_HP2R_SC_EINT1 0x0008
#define MADERA_HP2R_SC_EINT1_MASK 0x0008
#define MADERA_HP2R_SC_EINT1_SHIFT 3
-#define MADERA_HP2R_SC_EINT1_WIDTH 1
#define MADERA_HP2L_SC_EINT1 0x0004
#define MADERA_HP2L_SC_EINT1_MASK 0x0004
#define MADERA_HP2L_SC_EINT1_SHIFT 2
-#define MADERA_HP2L_SC_EINT1_WIDTH 1
#define MADERA_HP1R_SC_EINT1 0x0002
#define MADERA_HP1R_SC_EINT1_MASK 0x0002
#define MADERA_HP1R_SC_EINT1_SHIFT 1
-#define MADERA_HP1R_SC_EINT1_WIDTH 1
#define MADERA_HP1L_SC_EINT1 0x0001
#define MADERA_HP1L_SC_EINT1_MASK 0x0001
#define MADERA_HP1L_SC_EINT1_SHIFT 0
-#define MADERA_HP1L_SC_EINT1_WIDTH 1
/* (0x180E) IRQ1_Status_15 */
#define MADERA_SPK_OVERHEAT_WARN_EINT1 0x0004
#define MADERA_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
#define MADERA_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
-#define MADERA_SPK_OVERHEAT_WARN_EINT1_WIDTH 1
#define MADERA_SPK_OVERHEAT_EINT1 0x0002
#define MADERA_SPK_OVERHEAT_EINT1_MASK 0x0002
#define MADERA_SPK_OVERHEAT_EINT1_SHIFT 1
-#define MADERA_SPK_OVERHEAT_EINT1_WIDTH 1
#define MADERA_SPK_SHUTDOWN_EINT1 0x0001
#define MADERA_SPK_SHUTDOWN_EINT1_MASK 0x0001
#define MADERA_SPK_SHUTDOWN_EINT1_SHIFT 0
-#define MADERA_SPK_SHUTDOWN_EINT1_WIDTH 1
/* (0x1820) - IRQ1 Status 33 */
#define MADERA_DSP7_BUS_ERR_EINT1 0x0040
#define MADERA_DSP7_BUS_ERR_EINT1_MASK 0x0040
#define MADERA_DSP7_BUS_ERR_EINT1_SHIFT 6
-#define MADERA_DSP7_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP6_BUS_ERR_EINT1 0x0020
#define MADERA_DSP6_BUS_ERR_EINT1_MASK 0x0020
#define MADERA_DSP6_BUS_ERR_EINT1_SHIFT 5
-#define MADERA_DSP6_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP5_BUS_ERR_EINT1 0x0010
#define MADERA_DSP5_BUS_ERR_EINT1_MASK 0x0010
#define MADERA_DSP5_BUS_ERR_EINT1_SHIFT 4
-#define MADERA_DSP5_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP4_BUS_ERR_EINT1 0x0008
#define MADERA_DSP4_BUS_ERR_EINT1_MASK 0x0008
#define MADERA_DSP4_BUS_ERR_EINT1_SHIFT 3
-#define MADERA_DSP4_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP3_BUS_ERR_EINT1 0x0004
#define MADERA_DSP3_BUS_ERR_EINT1_MASK 0x0004
#define MADERA_DSP3_BUS_ERR_EINT1_SHIFT 2
-#define MADERA_DSP3_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP2_BUS_ERR_EINT1 0x0002
#define MADERA_DSP2_BUS_ERR_EINT1_MASK 0x0002
#define MADERA_DSP2_BUS_ERR_EINT1_SHIFT 1
-#define MADERA_DSP2_BUS_ERR_EINT1_WIDTH 1
#define MADERA_DSP1_BUS_ERR_EINT1 0x0001
#define MADERA_DSP1_BUS_ERR_EINT1_MASK 0x0001
#define MADERA_DSP1_BUS_ERR_EINT1_SHIFT 0
-#define MADERA_DSP1_BUS_ERR_EINT1_WIDTH 1
/* (0x1845) IRQ1_Mask_6 */
#define MADERA_IM_MICDET2_EINT1 0x0200
#define MADERA_IM_MICDET2_EINT1_MASK 0x0200
#define MADERA_IM_MICDET2_EINT1_SHIFT 9
-#define MADERA_IM_MICDET2_EINT1_WIDTH 1
#define MADERA_IM_MICDET1_EINT1 0x0100
#define MADERA_IM_MICDET1_EINT1_MASK 0x0100
#define MADERA_IM_MICDET1_EINT1_SHIFT 8
-#define MADERA_IM_MICDET1_EINT1_WIDTH 1
#define MADERA_IM_HPDET_EINT1 0x0001
#define MADERA_IM_HPDET_EINT1_MASK 0x0001
#define MADERA_IM_HPDET_EINT1_SHIFT 0
-#define MADERA_IM_HPDET_EINT1_WIDTH 1
/* (0x184E) IRQ1_Mask_15 */
#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1 0x0004
#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
-#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_WIDTH 1
#define MADERA_IM_SPK_OVERHEAT_EINT1 0x0002
#define MADERA_IM_SPK_OVERHEAT_EINT1_MASK 0x0002
#define MADERA_IM_SPK_OVERHEAT_EINT1_SHIFT 1
-#define MADERA_IM_SPK_OVERHEAT_EINT1_WIDTH 1
#define MADERA_IM_SPK_SHUTDOWN_EINT1 0x0001
#define MADERA_IM_SPK_SHUTDOWN_EINT1_MASK 0x0001
#define MADERA_IM_SPK_SHUTDOWN_EINT1_SHIFT 0
-#define MADERA_IM_SPK_SHUTDOWN_EINT1_WIDTH 1
/* (0x1880) - IRQ1 Raw Status 1 */
#define MADERA_CTRLIF_ERR_STS1 0x1000
#define MADERA_CTRLIF_ERR_STS1_MASK 0x1000
#define MADERA_CTRLIF_ERR_STS1_SHIFT 12
-#define MADERA_CTRLIF_ERR_STS1_WIDTH 1
#define MADERA_SYSCLK_FAIL_STS1 0x0200
#define MADERA_SYSCLK_FAIL_STS1_MASK 0x0200
#define MADERA_SYSCLK_FAIL_STS1_SHIFT 9
-#define MADERA_SYSCLK_FAIL_STS1_WIDTH 1
#define MADERA_CLOCK_DETECT_STS1 0x0100
#define MADERA_CLOCK_DETECT_STS1_MASK 0x0100
#define MADERA_CLOCK_DETECT_STS1_SHIFT 8
-#define MADERA_CLOCK_DETECT_STS1_WIDTH 1
#define MADERA_BOOT_DONE_STS1 0x0080
#define MADERA_BOOT_DONE_STS1_MASK 0x0080
#define MADERA_BOOT_DONE_STS1_SHIFT 7
-#define MADERA_BOOT_DONE_STS1_WIDTH 1
/* (0x1881) - IRQ1 Raw Status 2 */
#define MADERA_FLL3_LOCK_STS1 0x0400
#define MADERA_FLL3_LOCK_STS1_MASK 0x0400
#define MADERA_FLL3_LOCK_STS1_SHIFT 10
-#define MADERA_FLL3_LOCK_STS1_WIDTH 1
#define MADERA_FLL2_LOCK_STS1 0x0200
#define MADERA_FLL2_LOCK_STS1_MASK 0x0200
#define MADERA_FLL2_LOCK_STS1_SHIFT 9
-#define MADERA_FLL2_LOCK_STS1_WIDTH 1
#define MADERA_FLL1_LOCK_STS1 0x0100
#define MADERA_FLL1_LOCK_STS1_MASK 0x0100
#define MADERA_FLL1_LOCK_STS1_SHIFT 8
-#define MADERA_FLL1_LOCK_STS1_WIDTH 1
/* (0x1886) - IRQ1 Raw Status 7 */
#define MADERA_MICD_CLAMP_FALL_STS1 0x0020
#define MADERA_MICD_CLAMP_FALL_STS1_MASK 0x0020
#define MADERA_MICD_CLAMP_FALL_STS1_SHIFT 5
-#define MADERA_MICD_CLAMP_FALL_STS1_WIDTH 1
#define MADERA_MICD_CLAMP_RISE_STS1 0x0010
#define MADERA_MICD_CLAMP_RISE_STS1_MASK 0x0010
#define MADERA_MICD_CLAMP_RISE_STS1_SHIFT 4
-#define MADERA_MICD_CLAMP_RISE_STS1_WIDTH 1
#define MADERA_JD2_FALL_STS1 0x0008
#define MADERA_JD2_FALL_STS1_MASK 0x0008
#define MADERA_JD2_FALL_STS1_SHIFT 3
-#define MADERA_JD2_FALL_STS1_WIDTH 1
#define MADERA_JD2_RISE_STS1 0x0004
#define MADERA_JD2_RISE_STS1_MASK 0x0004
#define MADERA_JD2_RISE_STS1_SHIFT 2
-#define MADERA_JD2_RISE_STS1_WIDTH 1
#define MADERA_JD1_FALL_STS1 0x0002
#define MADERA_JD1_FALL_STS1_MASK 0x0002
#define MADERA_JD1_FALL_STS1_SHIFT 1
-#define MADERA_JD1_FALL_STS1_WIDTH 1
#define MADERA_JD1_RISE_STS1 0x0001
#define MADERA_JD1_RISE_STS1_MASK 0x0001
#define MADERA_JD1_RISE_STS1_SHIFT 0
-#define MADERA_JD1_RISE_STS1_WIDTH 1
/* (0x188E) - IRQ1 Raw Status 15 */
#define MADERA_SPK_OVERHEAT_WARN_STS1 0x0004
#define MADERA_SPK_OVERHEAT_WARN_STS1_MASK 0x0004
#define MADERA_SPK_OVERHEAT_WARN_STS1_SHIFT 2
-#define MADERA_SPK_OVERHEAT_WARN_STS1_WIDTH 1
#define MADERA_SPK_OVERHEAT_STS1 0x0002
#define MADERA_SPK_OVERHEAT_STS1_MASK 0x0002
#define MADERA_SPK_OVERHEAT_STS1_SHIFT 1
-#define MADERA_SPK_OVERHEAT_STS1_WIDTH 1
#define MADERA_SPK_SHUTDOWN_STS1 0x0001
#define MADERA_SPK_SHUTDOWN_STS1_MASK 0x0001
#define MADERA_SPK_SHUTDOWN_STS1_SHIFT 0
-#define MADERA_SPK_SHUTDOWN_STS1_WIDTH 1
/* (0x1A06) Interrupt_Debounce_7 */
#define MADERA_MICD_CLAMP_DB 0x0010
#define MADERA_MICD_CLAMP_DB_MASK 0x0010
#define MADERA_MICD_CLAMP_DB_SHIFT 4
-#define MADERA_MICD_CLAMP_DB_WIDTH 1
#define MADERA_JD2_DB 0x0004
#define MADERA_JD2_DB_MASK 0x0004
#define MADERA_JD2_DB_SHIFT 2
-#define MADERA_JD2_DB_WIDTH 1
#define MADERA_JD1_DB 0x0001
#define MADERA_JD1_DB_MASK 0x0001
#define MADERA_JD1_DB_SHIFT 0
-#define MADERA_JD1_DB_WIDTH 1
/* (0x1A0E) Interrupt_Debounce_15 */
#define MADERA_SPK_OVERHEAT_WARN_DB 0x0004
#define MADERA_SPK_OVERHEAT_WARN_DB_MASK 0x0004
#define MADERA_SPK_OVERHEAT_WARN_DB_SHIFT 2
-#define MADERA_SPK_OVERHEAT_WARN_DB_WIDTH 1
#define MADERA_SPK_OVERHEAT_DB 0x0002
#define MADERA_SPK_OVERHEAT_DB_MASK 0x0002
#define MADERA_SPK_OVERHEAT_DB_SHIFT 1
-#define MADERA_SPK_OVERHEAT_DB_WIDTH 1
/* (0x1A80) IRQ1_CTRL */
#define MADERA_IM_IRQ1 0x0800
#define MADERA_IM_IRQ1_MASK 0x0800
#define MADERA_IM_IRQ1_SHIFT 11
-#define MADERA_IM_IRQ1_WIDTH 1
#define MADERA_IRQ_POL 0x0400
#define MADERA_IRQ_POL_MASK 0x0400
#define MADERA_IRQ_POL_SHIFT 10
-#define MADERA_IRQ_POL_WIDTH 1
/* (0x20004) OTP_HPDET_Cal_1 */
#define MADERA_OTP_HPDET_CALIB_OFFSET_11 0xFF000000
#define MADERA_OTP_HPDET_CALIB_OFFSET_11_MASK 0xFF000000
#define MADERA_OTP_HPDET_CALIB_OFFSET_11_SHIFT 24
-#define MADERA_OTP_HPDET_CALIB_OFFSET_11_WIDTH 8
#define MADERA_OTP_HPDET_CALIB_OFFSET_10 0x00FF0000
#define MADERA_OTP_HPDET_CALIB_OFFSET_10_MASK 0x00FF0000
#define MADERA_OTP_HPDET_CALIB_OFFSET_10_SHIFT 16
-#define MADERA_OTP_HPDET_CALIB_OFFSET_10_WIDTH 8
#define MADERA_OTP_HPDET_CALIB_OFFSET_01 0x0000FF00
#define MADERA_OTP_HPDET_CALIB_OFFSET_01_MASK 0x0000FF00
#define MADERA_OTP_HPDET_CALIB_OFFSET_01_SHIFT 8
-#define MADERA_OTP_HPDET_CALIB_OFFSET_01_WIDTH 8
#define MADERA_OTP_HPDET_CALIB_OFFSET_00 0x000000FF
#define MADERA_OTP_HPDET_CALIB_OFFSET_00_MASK 0x000000FF
#define MADERA_OTP_HPDET_CALIB_OFFSET_00_SHIFT 0
-#define MADERA_OTP_HPDET_CALIB_OFFSET_00_WIDTH 8
/* (0x20006) OTP_HPDET_Cal_2 */
#define MADERA_OTP_HPDET_GRADIENT_1X 0x0000FF00
#define MADERA_OTP_HPDET_GRADIENT_1X_MASK 0x0000FF00
#define MADERA_OTP_HPDET_GRADIENT_1X_SHIFT 8
-#define MADERA_OTP_HPDET_GRADIENT_1X_WIDTH 8
#define MADERA_OTP_HPDET_GRADIENT_0X 0x000000FF
#define MADERA_OTP_HPDET_GRADIENT_0X_MASK 0x000000FF
#define MADERA_OTP_HPDET_GRADIENT_0X_SHIFT 0
-#define MADERA_OTP_HPDET_GRADIENT_0X_WIDTH 8
#endif
diff --git a/include/linux/mfd/max5970.h b/include/linux/mfd/max5970.h
new file mode 100644
index 000000000000..762a7d40c843
--- /dev/null
+++ b/include/linux/mfd/max5970.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device driver for regulators in MAX5970 and MAX5978 IC
+ *
+ * Copyright (c) 2022 9elements GmbH
+ *
+ * Author: Patrick Rudolph <patrick.rudolph@9elements.com>
+ */
+
+#ifndef _MFD_MAX5970_H
+#define _MFD_MAX5970_H
+
+#include <linux/regmap.h>
+
+#define MAX5970_NUM_SWITCHES 2
+#define MAX5978_NUM_SWITCHES 1
+#define MAX5970_NUM_LEDS 4
+
+struct max5970_data {
+ int num_switches;
+ u32 irng[MAX5970_NUM_SWITCHES];
+ u32 mon_rng[MAX5970_NUM_SWITCHES];
+ u32 shunt_micro_ohms[MAX5970_NUM_SWITCHES];
+};
+
+enum max5970_chip_type {
+ TYPE_MAX5978 = 1,
+ TYPE_MAX5970,
+};
+
+#define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4)
+#define MAX5970_REG_CURRENT_H(ch) (0x00 + (ch) * 4)
+#define MAX5970_REG_VOLTAGE_L(ch) (0x03 + (ch) * 4)
+#define MAX5970_REG_VOLTAGE_H(ch) (0x02 + (ch) * 4)
+#define MAX5970_REG_MON_RANGE 0x18
+#define MAX5970_MON_MASK 0x3
+#define MAX5970_MON(reg, ch) (((reg) >> ((ch) * 2)) & MAX5970_MON_MASK)
+#define MAX5970_MON_MAX_RANGE_UV 16000000
+
+#define MAX5970_REG_CH_UV_WARN_H(ch) (0x1A + (ch) * 10)
+#define MAX5970_REG_CH_UV_WARN_L(ch) (0x1B + (ch) * 10)
+#define MAX5970_REG_CH_UV_CRIT_H(ch) (0x1C + (ch) * 10)
+#define MAX5970_REG_CH_UV_CRIT_L(ch) (0x1D + (ch) * 10)
+#define MAX5970_REG_CH_OV_WARN_H(ch) (0x1E + (ch) * 10)
+#define MAX5970_REG_CH_OV_WARN_L(ch) (0x1F + (ch) * 10)
+#define MAX5970_REG_CH_OV_CRIT_H(ch) (0x20 + (ch) * 10)
+#define MAX5970_REG_CH_OV_CRIT_L(ch) (0x21 + (ch) * 10)
+
+#define MAX5970_VAL2REG_H(x) (((x) >> 2) & 0xFF)
+#define MAX5970_VAL2REG_L(x) ((x) & 0x3)
+
+#define MAX5970_REG_DAC_FAST(ch) (0x2E + (ch))
+
+#define MAX5970_FAST2SLOW_RATIO 200
+
+#define MAX5970_REG_STATUS0 0x31
+#define MAX5970_CB_IFAULTF(ch) (1 << (ch))
+#define MAX5970_CB_IFAULTS(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_STATUS1 0x32
+#define STATUS1_PROT_MASK 0x3
+#define STATUS1_PROT(reg) \
+ (((reg) >> 6) & STATUS1_PROT_MASK)
+#define STATUS1_PROT_SHUTDOWN 0
+#define STATUS1_PROT_CLEAR_PG 1
+#define STATUS1_PROT_ALERT_ONLY 2
+
+#define MAX5970_REG_STATUS2 0x33
+#define MAX5970_IRNG_MASK 0x3
+#define MAX5970_IRNG(reg, ch) \
+ (((reg) >> ((ch) * 2)) & MAX5970_IRNG_MASK)
+
+#define MAX5970_REG_STATUS3 0x34
+#define MAX5970_STATUS3_ALERT BIT(4)
+#define MAX5970_STATUS3_PG(ch) BIT(ch)
+
+#define MAX5970_REG_FAULT0 0x35
+#define UV_STATUS_WARN(ch) (1 << (ch))
+#define UV_STATUS_CRIT(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_FAULT1 0x36
+#define OV_STATUS_WARN(ch) (1 << (ch))
+#define OV_STATUS_CRIT(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_FAULT2 0x37
+#define OC_STATUS_WARN(ch) (1 << (ch))
+
+#define MAX5970_REG_CHXEN 0x3b
+#define CHXEN(ch) (3 << ((ch) * 2))
+
+#define MAX5970_REG_LED_FLASH 0x43
+
+#define MAX_REGISTERS 0x49
+#define ADC_MASK 0x3FF
+
+#endif /* _MFD_MAX5970_H */
diff --git a/include/linux/mfd/max77541.h b/include/linux/mfd/max77541.h
new file mode 100644
index 000000000000..fe5c0a3dc637
--- /dev/null
+++ b/include/linux/mfd/max77541.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __MFD_MAX77541_H
+#define __MFD_MAX77541_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* REGISTERS */
+#define MAX77541_REG_INT_SRC 0x00
+#define MAX77541_REG_INT_SRC_M 0x01
+
+#define MAX77541_BIT_INT_SRC_TOPSYS BIT(0)
+#define MAX77541_BIT_INT_SRC_BUCK BIT(1)
+
+#define MAX77541_REG_TOPSYS_INT 0x02
+#define MAX77541_REG_TOPSYS_INT_M 0x03
+
+#define MAX77541_BIT_TOPSYS_INT_TJ_120C BIT(0)
+#define MAX77541_BIT_TOPSYS_INT_TJ_140C BIT(1)
+#define MAX77541_BIT_TOPSYS_INT_TSHDN BIT(2)
+#define MAX77541_BIT_TOPSYS_INT_UVLO BIT(3)
+#define MAX77541_BIT_TOPSYS_INT_ALT_SWO BIT(4)
+#define MAX77541_BIT_TOPSYS_INT_EXT_FREQ_DET BIT(5)
+
+/* REGULATORS */
+#define MAX77541_REG_BUCK_INT 0x20
+#define MAX77541_REG_BUCK_INT_M 0x21
+
+#define MAX77541_BIT_BUCK_INT_M1_POK_FLT BIT(0)
+#define MAX77541_BIT_BUCK_INT_M2_POK_FLT BIT(1)
+#define MAX77541_BIT_BUCK_INT_M1_SCFLT BIT(4)
+#define MAX77541_BIT_BUCK_INT_M2_SCFLT BIT(5)
+
+#define MAX77541_REG_EN_CTRL 0x0B
+
+#define MAX77541_BIT_M1_EN BIT(0)
+#define MAX77541_BIT_M2_EN BIT(1)
+
+#define MAX77541_REG_M1_VOUT 0x23
+#define MAX77541_REG_M2_VOUT 0x33
+
+#define MAX77541_BITS_MX_VOUT GENMASK(7, 0)
+
+#define MAX77541_REG_M1_CFG1 0x25
+#define MAX77541_REG_M2_CFG1 0x35
+
+#define MAX77541_BITS_MX_CFG1_RNG GENMASK(7, 6)
+
+/* ADC */
+#define MAX77541_REG_ADC_INT 0x70
+#define MAX77541_REG_ADC_INT_M 0x71
+
+#define MAX77541_BIT_ADC_INT_CH1_I BIT(0)
+#define MAX77541_BIT_ADC_INT_CH2_I BIT(1)
+#define MAX77541_BIT_ADC_INT_CH3_I BIT(2)
+#define MAX77541_BIT_ADC_INT_CH6_I BIT(5)
+
+#define MAX77541_REG_ADC_DATA_CH1 0x72
+#define MAX77541_REG_ADC_DATA_CH2 0x73
+#define MAX77541_REG_ADC_DATA_CH3 0x74
+#define MAX77541_REG_ADC_DATA_CH6 0x77
+
+/* INTERRUPT MASKS*/
+#define MAX77541_REG_INT_SRC_MASK 0x00
+#define MAX77541_REG_TOPSYS_INT_MASK 0x00
+#define MAX77541_REG_BUCK_INT_MASK 0x00
+
+#define MAX77541_MAX_REGULATORS 2
+
+enum max7754x_ids {
+ MAX77540 = 1,
+ MAX77541,
+};
+
+struct regmap;
+struct regmap_irq_chip_data;
+struct i2c_client;
+
+struct max77541 {
+ struct i2c_client *i2c;
+ struct regmap *regmap;
+ enum max7754x_ids id;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *irq_buck;
+ struct regmap_irq_chip_data *irq_topsys;
+ struct regmap_irq_chip_data *irq_adc;
+};
+
+#endif /* __MFD_MAX77541_H */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 833e578e051e..ea635d12a741 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -133,35 +133,35 @@ enum max77686_pmic_reg {
/* Reserved: 0x7A-0x7D */
MAX77686_REG_BBAT_CHG = 0x7E,
- MAX77686_REG_32KHZ = 0x7F,
+ MAX77686_REG_32KHZ = 0x7F,
MAX77686_REG_PMIC_END = 0x80,
};
enum max77686_rtc_reg {
- MAX77686_RTC_INT = 0x00,
- MAX77686_RTC_INTM = 0x01,
+ MAX77686_RTC_INT = 0x00,
+ MAX77686_RTC_INTM = 0x01,
MAX77686_RTC_CONTROLM = 0x02,
MAX77686_RTC_CONTROL = 0x03,
MAX77686_RTC_UPDATE0 = 0x04,
/* Reserved: 0x5 */
MAX77686_WTSR_SMPL_CNTL = 0x06,
- MAX77686_RTC_SEC = 0x07,
- MAX77686_RTC_MIN = 0x08,
- MAX77686_RTC_HOUR = 0x09,
+ MAX77686_RTC_SEC = 0x07,
+ MAX77686_RTC_MIN = 0x08,
+ MAX77686_RTC_HOUR = 0x09,
MAX77686_RTC_WEEKDAY = 0x0A,
- MAX77686_RTC_MONTH = 0x0B,
- MAX77686_RTC_YEAR = 0x0C,
- MAX77686_RTC_DATE = 0x0D,
- MAX77686_ALARM1_SEC = 0x0E,
- MAX77686_ALARM1_MIN = 0x0F,
+ MAX77686_RTC_MONTH = 0x0B,
+ MAX77686_RTC_YEAR = 0x0C,
+ MAX77686_RTC_MONTHDAY = 0x0D,
+ MAX77686_ALARM1_SEC = 0x0E,
+ MAX77686_ALARM1_MIN = 0x0F,
MAX77686_ALARM1_HOUR = 0x10,
MAX77686_ALARM1_WEEKDAY = 0x11,
MAX77686_ALARM1_MONTH = 0x12,
MAX77686_ALARM1_YEAR = 0x13,
MAX77686_ALARM1_DATE = 0x14,
- MAX77686_ALARM2_SEC = 0x15,
- MAX77686_ALARM2_MIN = 0x16,
+ MAX77686_ALARM2_SEC = 0x15,
+ MAX77686_ALARM2_MIN = 0x16,
MAX77686_ALARM2_HOUR = 0x17,
MAX77686_ALARM2_WEEKDAY = 0x18,
MAX77686_ALARM2_MONTH = 0x19,
@@ -352,7 +352,7 @@ enum max77802_rtc_reg {
MAX77802_RTC_WEEKDAY = 0xCA,
MAX77802_RTC_MONTH = 0xCB,
MAX77802_RTC_YEAR = 0xCC,
- MAX77802_RTC_DATE = 0xCD,
+ MAX77802_RTC_MONTHDAY = 0xCD,
MAX77802_RTC_AE1 = 0xCE,
MAX77802_ALARM1_SEC = 0xCF,
MAX77802_ALARM1_MIN = 0xD0,
@@ -441,8 +441,4 @@ enum max77686_types {
TYPE_MAX77802,
};
-extern int max77686_irq_init(struct max77686_dev *max77686);
-extern void max77686_irq_exit(struct max77686_dev *max77686);
-extern int max77686_irq_resume(struct max77686_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77686_PRIV_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 311f7d3d2323..54444ff2a5de 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -405,7 +405,7 @@ enum max77693_haptic_reg {
MAX77693_HAPTIC_REG_END,
};
-/* max77693-pmic LSCNFG configuraton register */
+/* max77693-pmic LSCNFG configuration register */
#define MAX77693_PMIC_LOW_SYS_MASK 0x80
#define MAX77693_PMIC_LOW_SYS_SHIFT 7
diff --git a/include/linux/mfd/max77714.h b/include/linux/mfd/max77714.h
new file mode 100644
index 000000000000..7947e0d697a5
--- /dev/null
+++ b/include/linux/mfd/max77714.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Maxim MAX77714 Register and data structures definition.
+ *
+ * Copyright (C) 2022 Luca Ceresoli
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+#ifndef __LINUX_MFD_MAX77714_H_
+#define __LINUX_MFD_MAX77714_H_
+
+#include <linux/bits.h>
+
+#define MAX77714_INT_TOP 0x00
+#define MAX77714_INT_TOPM 0x07 /* Datasheet says "read only", but it is RW */
+
+#define MAX77714_INT_TOP_ONOFF BIT(1)
+#define MAX77714_INT_TOP_RTC BIT(3)
+#define MAX77714_INT_TOP_GPIO BIT(4)
+#define MAX77714_INT_TOP_LDO BIT(5)
+#define MAX77714_INT_TOP_SD BIT(6)
+#define MAX77714_INT_TOP_GLBL BIT(7)
+
+#define MAX77714_32K_STATUS 0x30
+#define MAX77714_32K_STATUS_SIOSCOK BIT(5)
+#define MAX77714_32K_STATUS_XOSCOK BIT(4)
+#define MAX77714_32K_STATUS_32KSOURCE BIT(3)
+#define MAX77714_32K_STATUS_32KLOAD_MSK 0x3
+#define MAX77714_32K_STATUS_32KLOAD_SHF 1
+#define MAX77714_32K_STATUS_CRYSTAL_CFG BIT(0)
+
+#define MAX77714_32K_CONFIG 0x31
+#define MAX77714_32K_CONFIG_XOSC_RETRY BIT(4)
+
+#define MAX77714_CNFG_GLBL2 0x91
+#define MAX77714_WDTEN BIT(2)
+#define MAX77714_WDTSLPC BIT(3)
+#define MAX77714_TWD_MASK 0x3
+#define MAX77714_TWD_2s 0x0
+#define MAX77714_TWD_16s 0x1
+#define MAX77714_TWD_64s 0x2
+#define MAX77714_TWD_128s 0x3
+
+#define MAX77714_CNFG_GLBL3 0x92
+#define MAX77714_WDTC BIT(0)
+
+#define MAX77714_CNFG2_ONOFF 0x94
+#define MAX77714_WD_RST_WK BIT(5)
+
+/* Interrupts */
+enum {
+ MAX77714_IRQ_TOP_ONOFF,
+ MAX77714_IRQ_TOP_RTC, /* Real-time clock */
+ MAX77714_IRQ_TOP_GPIO, /* GPIOs */
+ MAX77714_IRQ_TOP_LDO, /* Low-dropout regulators */
+ MAX77714_IRQ_TOP_SD, /* Step-down regulators */
+ MAX77714_IRQ_TOP_GLBL, /* "Global resources": Low-Battery, overtemp... */
+};
+
+#endif /* __LINUX_MFD_MAX77714_H_ */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 0bc7454c4dbe..2fb4db67f110 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -198,7 +198,7 @@ enum max77843_irq_muic {
#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT)
#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT)
-/* Max77843 charger insterrupts */
+/* Max77843 charger interrupts */
#define MAX77843_CHG_BYP_I BIT(0)
#define MAX77843_CHG_BATP_I BIT(2)
#define MAX77843_CHG_BAT_I BIT(3)
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index e955e2f0a2cc..5c2cc1103437 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -14,13 +14,13 @@
* others and b) it can be enabled simply by using MAX17042 driver.
*/
-#ifndef __LINUX_MFD_MAX8998_H
-#define __LINUX_MFD_MAX8998_H
+#ifndef __LINUX_MFD_MAX8997_H
+#define __LINUX_MFD_MAX8997_H
#include <linux/regulator/consumer.h>
/* MAX8997/8966 regulator IDs */
-enum max8998_regulators {
+enum max8997_regulators {
MAX8997_LDO1 = 0,
MAX8997_LDO2,
MAX8997_LDO3,
@@ -110,8 +110,6 @@ enum max8997_haptic_pwm_divisor {
/**
* max8997_haptic_platform_data
- * @pwm_channel_id: channel number of PWM device
- * valid for MAX8997_EXTERNAL_MODE
* @pwm_period: period in nano second for PWM device
* valid for MAX8997_EXTERNAL_MODE
* @type: motor type
@@ -128,7 +126,6 @@ enum max8997_haptic_pwm_divisor {
* [0 - 255]: available period
*/
struct max8997_haptic_platform_data {
- unsigned int pwm_channel_id;
unsigned int pwm_period;
enum max8997_haptic_motor_type type;
@@ -181,7 +178,6 @@ struct max8997_platform_data {
*
*/
bool ignore_gpiodvs_side_effect;
- int buck125_gpios[3]; /* GPIO of [0]SET1, [1]SET2, [2]SET3 */
int buck125_default_idx; /* Default value of SET1, 2, 3 */
unsigned int buck1_voltage[8]; /* buckx_voltage in uV */
bool buck1_gpiodvs;
@@ -207,4 +203,4 @@ struct max8997_platform_data {
struct max8997_led_platform_data *led_pdata;
};
-#endif /* __LINUX_MFD_MAX8998_H */
+#endif /* __LINUX_MFD_MAX8997_H */
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index 79c020bd0c70..a054e55c8646 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -65,10 +65,7 @@ struct max8998_regulator_data {
* be other than the preset values.
* @buck1_voltage: BUCK1 DVS mode 1 voltage registers
* @buck2_voltage: BUCK2 DVS mode 2 voltage registers
- * @buck1_set1: BUCK1 gpio pin 1 to set output voltage
- * @buck1_set2: BUCK1 gpio pin 2 to set output voltage
* @buck1_default_idx: Default for BUCK1 gpio pin 1, 2
- * @buck2_set3: BUCK2 gpio pin to set output voltage
* @buck2_default_idx: Default for BUCK2 gpio pin.
* @wakeup: Allow to wake up from suspend
* @rtc_delay: LP3974 RTC chip bug that requires delay after a register
@@ -91,10 +88,7 @@ struct max8998_platform_data {
bool buck_voltage_lock;
int buck1_voltage[4];
int buck2_voltage[2];
- int buck1_set1;
- int buck1_set2;
int buck1_default_idx;
- int buck2_set3;
int buck2_default_idx;
bool wakeup;
bool rtc_delay;
diff --git a/include/linux/mfd/mt6331/core.h b/include/linux/mfd/mt6331/core.h
new file mode 100644
index 000000000000..df8e6b1e4bc1
--- /dev/null
+++ b/include/linux/mfd/mt6331/core.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_CORE_H__
+#define __MFD_MT6331_CORE_H__
+
+enum mt6331_irq_status_numbers {
+ MT6331_IRQ_STATUS_PWRKEY = 0,
+ MT6331_IRQ_STATUS_HOMEKEY,
+ MT6331_IRQ_STATUS_CHRDET,
+ MT6331_IRQ_STATUS_THR_H,
+ MT6331_IRQ_STATUS_THR_L,
+ MT6331_IRQ_STATUS_BAT_H,
+ MT6331_IRQ_STATUS_BAT_L,
+ MT6331_IRQ_STATUS_RTC,
+ MT6331_IRQ_STATUS_AUDIO,
+ MT6331_IRQ_STATUS_MAD,
+ MT6331_IRQ_STATUS_ACCDET,
+ MT6331_IRQ_STATUS_ACCDET_EINT,
+ MT6331_IRQ_STATUS_ACCDET_NEGV = 12,
+ MT6331_IRQ_STATUS_VDVFS11_OC = 16,
+ MT6331_IRQ_STATUS_VDVFS12_OC,
+ MT6331_IRQ_STATUS_VDVFS13_OC,
+ MT6331_IRQ_STATUS_VDVFS14_OC,
+ MT6331_IRQ_STATUS_GPU_OC,
+ MT6331_IRQ_STATUS_VCORE1_OC,
+ MT6331_IRQ_STATUS_VCORE2_OC,
+ MT6331_IRQ_STATUS_VIO18_OC,
+ MT6331_IRQ_STATUS_LDO_OC,
+ MT6331_IRQ_STATUS_NR,
+};
+
+#define MT6331_IRQ_CON0_BASE MT6331_IRQ_STATUS_PWRKEY
+#define MT6331_IRQ_CON0_BITS (MT6331_IRQ_STATUS_ACCDET_NEGV + 1)
+#define MT6331_IRQ_CON1_BASE MT6331_IRQ_STATUS_VDVFS11_OC
+#define MT6331_IRQ_CON1_BITS (MT6331_IRQ_STATUS_LDO_OC - MT6331_IRQ_STATUS_VDFS11_OC + 1)
+
+#endif /* __MFD_MT6331_CORE_H__ */
diff --git a/include/linux/mfd/mt6331/registers.h b/include/linux/mfd/mt6331/registers.h
new file mode 100644
index 000000000000..e2be6bccd1a7
--- /dev/null
+++ b/include/linux/mfd/mt6331/registers.h
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_REGISTERS_H__
+#define __MFD_MT6331_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6331_STRUP_CON0 0x0
+#define MT6331_STRUP_CON2 0x2
+#define MT6331_STRUP_CON3 0x4
+#define MT6331_STRUP_CON4 0x6
+#define MT6331_STRUP_CON5 0x8
+#define MT6331_STRUP_CON6 0xA
+#define MT6331_STRUP_CON7 0xC
+#define MT6331_STRUP_CON8 0xE
+#define MT6331_STRUP_CON9 0x10
+#define MT6331_STRUP_CON10 0x12
+#define MT6331_STRUP_CON11 0x14
+#define MT6331_STRUP_CON12 0x16
+#define MT6331_STRUP_CON13 0x18
+#define MT6331_STRUP_CON14 0x1A
+#define MT6331_STRUP_CON15 0x1C
+#define MT6331_STRUP_CON16 0x1E
+#define MT6331_STRUP_CON17 0x20
+#define MT6331_STRUP_CON18 0x22
+#define MT6331_HWCID 0x100
+#define MT6331_SWCID 0x102
+#define MT6331_EXT_PMIC_STATUS 0x104
+#define MT6331_TOP_CON 0x106
+#define MT6331_TEST_OUT 0x108
+#define MT6331_TEST_CON0 0x10A
+#define MT6331_TEST_CON1 0x10C
+#define MT6331_TESTMODE_SW 0x10E
+#define MT6331_EN_STATUS0 0x110
+#define MT6331_EN_STATUS1 0x112
+#define MT6331_EN_STATUS2 0x114
+#define MT6331_OCSTATUS0 0x116
+#define MT6331_OCSTATUS1 0x118
+#define MT6331_OCSTATUS2 0x11A
+#define MT6331_PGSTATUS 0x11C
+#define MT6331_TOPSTATUS 0x11E
+#define MT6331_TDSEL_CON 0x120
+#define MT6331_RDSEL_CON 0x122
+#define MT6331_SMT_CON0 0x124
+#define MT6331_SMT_CON1 0x126
+#define MT6331_SMT_CON2 0x128
+#define MT6331_DRV_CON0 0x12A
+#define MT6331_DRV_CON1 0x12C
+#define MT6331_DRV_CON2 0x12E
+#define MT6331_DRV_CON3 0x130
+#define MT6331_TOP_STATUS 0x132
+#define MT6331_TOP_STATUS_SET 0x134
+#define MT6331_TOP_STATUS_CLR 0x136
+#define MT6331_TOP_CKPDN_CON0 0x138
+#define MT6331_TOP_CKPDN_CON0_SET 0x13A
+#define MT6331_TOP_CKPDN_CON0_CLR 0x13C
+#define MT6331_TOP_CKPDN_CON1 0x13E
+#define MT6331_TOP_CKPDN_CON1_SET 0x140
+#define MT6331_TOP_CKPDN_CON1_CLR 0x142
+#define MT6331_TOP_CKPDN_CON2 0x144
+#define MT6331_TOP_CKPDN_CON2_SET 0x146
+#define MT6331_TOP_CKPDN_CON2_CLR 0x148
+#define MT6331_TOP_CKSEL_CON 0x14A
+#define MT6331_TOP_CKSEL_CON_SET 0x14C
+#define MT6331_TOP_CKSEL_CON_CLR 0x14E
+#define MT6331_TOP_CKHWEN_CON 0x150
+#define MT6331_TOP_CKHWEN_CON_SET 0x152
+#define MT6331_TOP_CKHWEN_CON_CLR 0x154
+#define MT6331_TOP_CKTST_CON0 0x156
+#define MT6331_TOP_CKTST_CON1 0x158
+#define MT6331_TOP_CLKSQ 0x15A
+#define MT6331_TOP_CLKSQ_SET 0x15C
+#define MT6331_TOP_CLKSQ_CLR 0x15E
+#define MT6331_TOP_RST_CON 0x160
+#define MT6331_TOP_RST_CON_SET 0x162
+#define MT6331_TOP_RST_CON_CLR 0x164
+#define MT6331_TOP_RST_MISC 0x166
+#define MT6331_TOP_RST_MISC_SET 0x168
+#define MT6331_TOP_RST_MISC_CLR 0x16A
+#define MT6331_INT_CON0 0x16C
+#define MT6331_INT_CON0_SET 0x16E
+#define MT6331_INT_CON0_CLR 0x170
+#define MT6331_INT_CON1 0x172
+#define MT6331_INT_CON1_SET 0x174
+#define MT6331_INT_CON1_CLR 0x176
+#define MT6331_INT_MISC_CON 0x178
+#define MT6331_INT_MISC_CON_SET 0x17A
+#define MT6331_INT_MISC_CON_CLR 0x17C
+#define MT6331_INT_STATUS_CON0 0x17E
+#define MT6331_INT_STATUS_CON1 0x180
+#define MT6331_OC_GEAR_0 0x182
+#define MT6331_FQMTR_CON0 0x184
+#define MT6331_FQMTR_CON1 0x186
+#define MT6331_FQMTR_CON2 0x188
+#define MT6331_RG_SPI_CON 0x18A
+#define MT6331_DEW_DIO_EN 0x18C
+#define MT6331_DEW_READ_TEST 0x18E
+#define MT6331_DEW_WRITE_TEST 0x190
+#define MT6331_DEW_CRC_SWRST 0x192
+#define MT6331_DEW_CRC_EN 0x194
+#define MT6331_DEW_CRC_VAL 0x196
+#define MT6331_DEW_DBG_MON_SEL 0x198
+#define MT6331_DEW_CIPHER_KEY_SEL 0x19A
+#define MT6331_DEW_CIPHER_IV_SEL 0x19C
+#define MT6331_DEW_CIPHER_EN 0x19E
+#define MT6331_DEW_CIPHER_RDY 0x1A0
+#define MT6331_DEW_CIPHER_MODE 0x1A2
+#define MT6331_DEW_CIPHER_SWRST 0x1A4
+#define MT6331_DEW_RDDMY_NO 0x1A6
+#define MT6331_INT_TYPE_CON0 0x1A8
+#define MT6331_INT_TYPE_CON0_SET 0x1AA
+#define MT6331_INT_TYPE_CON0_CLR 0x1AC
+#define MT6331_INT_TYPE_CON1 0x1AE
+#define MT6331_INT_TYPE_CON1_SET 0x1B0
+#define MT6331_INT_TYPE_CON1_CLR 0x1B2
+#define MT6331_INT_STA 0x1B4
+#define MT6331_BUCK_ALL_CON0 0x200
+#define MT6331_BUCK_ALL_CON1 0x202
+#define MT6331_BUCK_ALL_CON2 0x204
+#define MT6331_BUCK_ALL_CON3 0x206
+#define MT6331_BUCK_ALL_CON4 0x208
+#define MT6331_BUCK_ALL_CON5 0x20A
+#define MT6331_BUCK_ALL_CON6 0x20C
+#define MT6331_BUCK_ALL_CON7 0x20E
+#define MT6331_BUCK_ALL_CON8 0x210
+#define MT6331_BUCK_ALL_CON9 0x212
+#define MT6331_BUCK_ALL_CON10 0x214
+#define MT6331_BUCK_ALL_CON11 0x216
+#define MT6331_BUCK_ALL_CON12 0x218
+#define MT6331_BUCK_ALL_CON13 0x21A
+#define MT6331_BUCK_ALL_CON14 0x21C
+#define MT6331_BUCK_ALL_CON15 0x21E
+#define MT6331_BUCK_ALL_CON16 0x220
+#define MT6331_BUCK_ALL_CON17 0x222
+#define MT6331_BUCK_ALL_CON18 0x224
+#define MT6331_BUCK_ALL_CON19 0x226
+#define MT6331_BUCK_ALL_CON20 0x228
+#define MT6331_BUCK_ALL_CON21 0x22A
+#define MT6331_BUCK_ALL_CON22 0x22C
+#define MT6331_BUCK_ALL_CON23 0x22E
+#define MT6331_BUCK_ALL_CON24 0x230
+#define MT6331_BUCK_ALL_CON25 0x232
+#define MT6331_BUCK_ALL_CON26 0x234
+#define MT6331_VDVFS11_CON0 0x236
+#define MT6331_VDVFS11_CON1 0x238
+#define MT6331_VDVFS11_CON2 0x23A
+#define MT6331_VDVFS11_CON3 0x23C
+#define MT6331_VDVFS11_CON4 0x23E
+#define MT6331_VDVFS11_CON5 0x240
+#define MT6331_VDVFS11_CON6 0x242
+#define MT6331_VDVFS11_CON7 0x244
+#define MT6331_VDVFS11_CON8 0x246
+#define MT6331_VDVFS11_CON9 0x248
+#define MT6331_VDVFS11_CON10 0x24A
+#define MT6331_VDVFS11_CON11 0x24C
+#define MT6331_VDVFS11_CON12 0x24E
+#define MT6331_VDVFS11_CON13 0x250
+#define MT6331_VDVFS11_CON14 0x252
+#define MT6331_VDVFS11_CON18 0x25A
+#define MT6331_VDVFS11_CON19 0x25C
+#define MT6331_VDVFS11_CON20 0x25E
+#define MT6331_VDVFS11_CON21 0x260
+#define MT6331_VDVFS11_CON22 0x262
+#define MT6331_VDVFS11_CON23 0x264
+#define MT6331_VDVFS11_CON24 0x266
+#define MT6331_VDVFS11_CON25 0x268
+#define MT6331_VDVFS11_CON26 0x26A
+#define MT6331_VDVFS11_CON27 0x26C
+#define MT6331_VDVFS12_CON0 0x26E
+#define MT6331_VDVFS12_CON1 0x270
+#define MT6331_VDVFS12_CON2 0x272
+#define MT6331_VDVFS12_CON3 0x274
+#define MT6331_VDVFS12_CON4 0x276
+#define MT6331_VDVFS12_CON5 0x278
+#define MT6331_VDVFS12_CON6 0x27A
+#define MT6331_VDVFS12_CON7 0x27C
+#define MT6331_VDVFS12_CON8 0x27E
+#define MT6331_VDVFS12_CON9 0x280
+#define MT6331_VDVFS12_CON10 0x282
+#define MT6331_VDVFS12_CON11 0x284
+#define MT6331_VDVFS12_CON12 0x286
+#define MT6331_VDVFS12_CON13 0x288
+#define MT6331_VDVFS12_CON14 0x28A
+#define MT6331_VDVFS12_CON18 0x292
+#define MT6331_VDVFS12_CON19 0x294
+#define MT6331_VDVFS12_CON20 0x296
+#define MT6331_VDVFS13_CON0 0x298
+#define MT6331_VDVFS13_CON1 0x29A
+#define MT6331_VDVFS13_CON2 0x29C
+#define MT6331_VDVFS13_CON3 0x29E
+#define MT6331_VDVFS13_CON4 0x2A0
+#define MT6331_VDVFS13_CON5 0x2A2
+#define MT6331_VDVFS13_CON6 0x2A4
+#define MT6331_VDVFS13_CON7 0x2A6
+#define MT6331_VDVFS13_CON8 0x2A8
+#define MT6331_VDVFS13_CON9 0x2AA
+#define MT6331_VDVFS13_CON10 0x2AC
+#define MT6331_VDVFS13_CON11 0x2AE
+#define MT6331_VDVFS13_CON12 0x2B0
+#define MT6331_VDVFS13_CON13 0x2B2
+#define MT6331_VDVFS13_CON14 0x2B4
+#define MT6331_VDVFS13_CON18 0x2BC
+#define MT6331_VDVFS13_CON19 0x2BE
+#define MT6331_VDVFS13_CON20 0x2C0
+#define MT6331_VDVFS14_CON0 0x2C2
+#define MT6331_VDVFS14_CON1 0x2C4
+#define MT6331_VDVFS14_CON2 0x2C6
+#define MT6331_VDVFS14_CON3 0x2C8
+#define MT6331_VDVFS14_CON4 0x2CA
+#define MT6331_VDVFS14_CON5 0x2CC
+#define MT6331_VDVFS14_CON6 0x2CE
+#define MT6331_VDVFS14_CON7 0x2D0
+#define MT6331_VDVFS14_CON8 0x2D2
+#define MT6331_VDVFS14_CON9 0x2D4
+#define MT6331_VDVFS14_CON10 0x2D6
+#define MT6331_VDVFS14_CON11 0x2D8
+#define MT6331_VDVFS14_CON12 0x2DA
+#define MT6331_VDVFS14_CON13 0x2DC
+#define MT6331_VDVFS14_CON14 0x2DE
+#define MT6331_VDVFS14_CON18 0x2E6
+#define MT6331_VDVFS14_CON19 0x2E8
+#define MT6331_VDVFS14_CON20 0x2EA
+#define MT6331_VGPU_CON0 0x300
+#define MT6331_VGPU_CON1 0x302
+#define MT6331_VGPU_CON2 0x304
+#define MT6331_VGPU_CON3 0x306
+#define MT6331_VGPU_CON4 0x308
+#define MT6331_VGPU_CON5 0x30A
+#define MT6331_VGPU_CON6 0x30C
+#define MT6331_VGPU_CON7 0x30E
+#define MT6331_VGPU_CON8 0x310
+#define MT6331_VGPU_CON9 0x312
+#define MT6331_VGPU_CON10 0x314
+#define MT6331_VGPU_CON11 0x316
+#define MT6331_VGPU_CON12 0x318
+#define MT6331_VGPU_CON13 0x31A
+#define MT6331_VGPU_CON14 0x31C
+#define MT6331_VGPU_CON15 0x31E
+#define MT6331_VGPU_CON16 0x320
+#define MT6331_VGPU_CON17 0x322
+#define MT6331_VGPU_CON18 0x324
+#define MT6331_VGPU_CON19 0x326
+#define MT6331_VGPU_CON20 0x328
+#define MT6331_VCORE1_CON0 0x32A
+#define MT6331_VCORE1_CON1 0x32C
+#define MT6331_VCORE1_CON2 0x32E
+#define MT6331_VCORE1_CON3 0x330
+#define MT6331_VCORE1_CON4 0x332
+#define MT6331_VCORE1_CON5 0x334
+#define MT6331_VCORE1_CON6 0x336
+#define MT6331_VCORE1_CON7 0x338
+#define MT6331_VCORE1_CON8 0x33A
+#define MT6331_VCORE1_CON9 0x33C
+#define MT6331_VCORE1_CON10 0x33E
+#define MT6331_VCORE1_CON11 0x340
+#define MT6331_VCORE1_CON12 0x342
+#define MT6331_VCORE1_CON13 0x344
+#define MT6331_VCORE1_CON14 0x346
+#define MT6331_VCORE1_CON15 0x348
+#define MT6331_VCORE1_CON16 0x34A
+#define MT6331_VCORE1_CON17 0x34C
+#define MT6331_VCORE1_CON18 0x34E
+#define MT6331_VCORE1_CON19 0x350
+#define MT6331_VCORE1_CON20 0x352
+#define MT6331_VCORE2_CON0 0x354
+#define MT6331_VCORE2_CON1 0x356
+#define MT6331_VCORE2_CON2 0x358
+#define MT6331_VCORE2_CON3 0x35A
+#define MT6331_VCORE2_CON4 0x35C
+#define MT6331_VCORE2_CON5 0x35E
+#define MT6331_VCORE2_CON6 0x360
+#define MT6331_VCORE2_CON7 0x362
+#define MT6331_VCORE2_CON8 0x364
+#define MT6331_VCORE2_CON9 0x366
+#define MT6331_VCORE2_CON10 0x368
+#define MT6331_VCORE2_CON11 0x36A
+#define MT6331_VCORE2_CON12 0x36C
+#define MT6331_VCORE2_CON13 0x36E
+#define MT6331_VCORE2_CON14 0x370
+#define MT6331_VCORE2_CON15 0x372
+#define MT6331_VCORE2_CON16 0x374
+#define MT6331_VCORE2_CON17 0x376
+#define MT6331_VCORE2_CON18 0x378
+#define MT6331_VCORE2_CON19 0x37A
+#define MT6331_VCORE2_CON20 0x37C
+#define MT6331_VCORE2_CON21 0x37E
+#define MT6331_VIO18_CON0 0x380
+#define MT6331_VIO18_CON1 0x382
+#define MT6331_VIO18_CON2 0x384
+#define MT6331_VIO18_CON3 0x386
+#define MT6331_VIO18_CON4 0x388
+#define MT6331_VIO18_CON5 0x38A
+#define MT6331_VIO18_CON6 0x38C
+#define MT6331_VIO18_CON7 0x38E
+#define MT6331_VIO18_CON8 0x390
+#define MT6331_VIO18_CON9 0x392
+#define MT6331_VIO18_CON10 0x394
+#define MT6331_VIO18_CON11 0x396
+#define MT6331_VIO18_CON12 0x398
+#define MT6331_VIO18_CON13 0x39A
+#define MT6331_VIO18_CON14 0x39C
+#define MT6331_VIO18_CON15 0x39E
+#define MT6331_VIO18_CON16 0x3A0
+#define MT6331_VIO18_CON17 0x3A2
+#define MT6331_VIO18_CON18 0x3A4
+#define MT6331_VIO18_CON19 0x3A6
+#define MT6331_VIO18_CON20 0x3A8
+#define MT6331_BUCK_K_CON0 0x3AA
+#define MT6331_BUCK_K_CON1 0x3AC
+#define MT6331_BUCK_K_CON2 0x3AE
+#define MT6331_BUCK_K_CON3 0x3B0
+#define MT6331_ZCD_CON0 0x400
+#define MT6331_ZCD_CON1 0x402
+#define MT6331_ZCD_CON2 0x404
+#define MT6331_ZCD_CON3 0x406
+#define MT6331_ZCD_CON4 0x408
+#define MT6331_ZCD_CON5 0x40A
+#define MT6331_ISINK0_CON0 0x40C
+#define MT6331_ISINK0_CON1 0x40E
+#define MT6331_ISINK0_CON2 0x410
+#define MT6331_ISINK0_CON3 0x412
+#define MT6331_ISINK0_CON4 0x414
+#define MT6331_ISINK1_CON0 0x416
+#define MT6331_ISINK1_CON1 0x418
+#define MT6331_ISINK1_CON2 0x41A
+#define MT6331_ISINK1_CON3 0x41C
+#define MT6331_ISINK1_CON4 0x41E
+#define MT6331_ISINK2_CON0 0x420
+#define MT6331_ISINK2_CON1 0x422
+#define MT6331_ISINK2_CON2 0x424
+#define MT6331_ISINK2_CON3 0x426
+#define MT6331_ISINK2_CON4 0x428
+#define MT6331_ISINK3_CON0 0x42A
+#define MT6331_ISINK3_CON1 0x42C
+#define MT6331_ISINK3_CON2 0x42E
+#define MT6331_ISINK3_CON3 0x430
+#define MT6331_ISINK3_CON4 0x432
+#define MT6331_ISINK_ANA0 0x434
+#define MT6331_ISINK_ANA1 0x436
+#define MT6331_ISINK_PHASE_DLY 0x438
+#define MT6331_ISINK_EN_CTRL 0x43A
+#define MT6331_ANALDO_CON0 0x500
+#define MT6331_ANALDO_CON1 0x502
+#define MT6331_ANALDO_CON2 0x504
+#define MT6331_ANALDO_CON3 0x506
+#define MT6331_ANALDO_CON4 0x508
+#define MT6331_ANALDO_CON5 0x50A
+#define MT6331_ANALDO_CON6 0x50C
+#define MT6331_ANALDO_CON7 0x50E
+#define MT6331_ANALDO_CON8 0x510
+#define MT6331_ANALDO_CON9 0x512
+#define MT6331_ANALDO_CON10 0x514
+#define MT6331_ANALDO_CON11 0x516
+#define MT6331_ANALDO_CON12 0x518
+#define MT6331_ANALDO_CON13 0x51A
+#define MT6331_SYSLDO_CON0 0x51C
+#define MT6331_SYSLDO_CON1 0x51E
+#define MT6331_SYSLDO_CON2 0x520
+#define MT6331_SYSLDO_CON3 0x522
+#define MT6331_SYSLDO_CON4 0x524
+#define MT6331_SYSLDO_CON5 0x526
+#define MT6331_SYSLDO_CON6 0x528
+#define MT6331_SYSLDO_CON7 0x52A
+#define MT6331_SYSLDO_CON8 0x52C
+#define MT6331_SYSLDO_CON9 0x52E
+#define MT6331_SYSLDO_CON10 0x530
+#define MT6331_SYSLDO_CON11 0x532
+#define MT6331_SYSLDO_CON12 0x534
+#define MT6331_SYSLDO_CON13 0x536
+#define MT6331_SYSLDO_CON14 0x538
+#define MT6331_SYSLDO_CON15 0x53A
+#define MT6331_SYSLDO_CON16 0x53C
+#define MT6331_SYSLDO_CON17 0x53E
+#define MT6331_SYSLDO_CON18 0x540
+#define MT6331_SYSLDO_CON19 0x542
+#define MT6331_SYSLDO_CON20 0x544
+#define MT6331_SYSLDO_CON21 0x546
+#define MT6331_DIGLDO_CON0 0x548
+#define MT6331_DIGLDO_CON1 0x54A
+#define MT6331_DIGLDO_CON2 0x54C
+#define MT6331_DIGLDO_CON3 0x54E
+#define MT6331_DIGLDO_CON4 0x550
+#define MT6331_DIGLDO_CON5 0x552
+#define MT6331_DIGLDO_CON6 0x554
+#define MT6331_DIGLDO_CON7 0x556
+#define MT6331_DIGLDO_CON8 0x558
+#define MT6331_DIGLDO_CON9 0x55A
+#define MT6331_DIGLDO_CON10 0x55C
+#define MT6331_DIGLDO_CON11 0x55E
+#define MT6331_DIGLDO_CON12 0x560
+#define MT6331_DIGLDO_CON13 0x562
+#define MT6331_DIGLDO_CON14 0x564
+#define MT6331_DIGLDO_CON15 0x566
+#define MT6331_DIGLDO_CON16 0x568
+#define MT6331_DIGLDO_CON17 0x56A
+#define MT6331_DIGLDO_CON18 0x56C
+#define MT6331_DIGLDO_CON19 0x56E
+#define MT6331_DIGLDO_CON20 0x570
+#define MT6331_DIGLDO_CON21 0x572
+#define MT6331_DIGLDO_CON22 0x574
+#define MT6331_DIGLDO_CON23 0x576
+#define MT6331_DIGLDO_CON24 0x578
+#define MT6331_DIGLDO_CON25 0x57A
+#define MT6331_DIGLDO_CON26 0x57C
+#define MT6331_DIGLDO_CON27 0x57E
+#define MT6331_DIGLDO_CON28 0x580
+#define MT6331_OTP_CON0 0x600
+#define MT6331_OTP_CON1 0x602
+#define MT6331_OTP_CON2 0x604
+#define MT6331_OTP_CON3 0x606
+#define MT6331_OTP_CON4 0x608
+#define MT6331_OTP_CON5 0x60A
+#define MT6331_OTP_CON6 0x60C
+#define MT6331_OTP_CON7 0x60E
+#define MT6331_OTP_CON8 0x610
+#define MT6331_OTP_CON9 0x612
+#define MT6331_OTP_CON10 0x614
+#define MT6331_OTP_CON11 0x616
+#define MT6331_OTP_CON12 0x618
+#define MT6331_OTP_CON13 0x61A
+#define MT6331_OTP_CON14 0x61C
+#define MT6331_OTP_DOUT_0_15 0x61E
+#define MT6331_OTP_DOUT_16_31 0x620
+#define MT6331_OTP_DOUT_32_47 0x622
+#define MT6331_OTP_DOUT_48_63 0x624
+#define MT6331_OTP_DOUT_64_79 0x626
+#define MT6331_OTP_DOUT_80_95 0x628
+#define MT6331_OTP_DOUT_96_111 0x62A
+#define MT6331_OTP_DOUT_112_127 0x62C
+#define MT6331_OTP_DOUT_128_143 0x62E
+#define MT6331_OTP_DOUT_144_159 0x630
+#define MT6331_OTP_DOUT_160_175 0x632
+#define MT6331_OTP_DOUT_176_191 0x634
+#define MT6331_OTP_DOUT_192_207 0x636
+#define MT6331_OTP_DOUT_208_223 0x638
+#define MT6331_OTP_DOUT_224_239 0x63A
+#define MT6331_OTP_DOUT_240_255 0x63C
+#define MT6331_OTP_VAL_0_15 0x63E
+#define MT6331_OTP_VAL_16_31 0x640
+#define MT6331_OTP_VAL_32_47 0x642
+#define MT6331_OTP_VAL_48_63 0x644
+#define MT6331_OTP_VAL_64_79 0x646
+#define MT6331_OTP_VAL_80_95 0x648
+#define MT6331_OTP_VAL_96_111 0x64A
+#define MT6331_OTP_VAL_112_127 0x64C
+#define MT6331_OTP_VAL_128_143 0x64E
+#define MT6331_OTP_VAL_144_159 0x650
+#define MT6331_OTP_VAL_160_175 0x652
+#define MT6331_OTP_VAL_176_191 0x654
+#define MT6331_OTP_VAL_192_207 0x656
+#define MT6331_OTP_VAL_208_223 0x658
+#define MT6331_OTP_VAL_224_239 0x65A
+#define MT6331_OTP_VAL_240_255 0x65C
+#define MT6331_RTC_MIX_CON0 0x65E
+#define MT6331_RTC_MIX_CON1 0x660
+#define MT6331_AUDDAC_CFG0 0x662
+#define MT6331_AUDBUF_CFG0 0x664
+#define MT6331_AUDBUF_CFG1 0x666
+#define MT6331_AUDBUF_CFG2 0x668
+#define MT6331_AUDBUF_CFG3 0x66A
+#define MT6331_AUDBUF_CFG4 0x66C
+#define MT6331_AUDBUF_CFG5 0x66E
+#define MT6331_AUDBUF_CFG6 0x670
+#define MT6331_AUDBUF_CFG7 0x672
+#define MT6331_AUDBUF_CFG8 0x674
+#define MT6331_IBIASDIST_CFG0 0x676
+#define MT6331_AUDCLKGEN_CFG0 0x678
+#define MT6331_AUDLDO_CFG0 0x67A
+#define MT6331_AUDDCDC_CFG0 0x67C
+#define MT6331_AUDDCDC_CFG1 0x67E
+#define MT6331_AUDNVREGGLB_CFG0 0x680
+#define MT6331_AUD_NCP0 0x682
+#define MT6331_AUD_ZCD_CFG0 0x684
+#define MT6331_AUDPREAMP_CFG0 0x686
+#define MT6331_AUDPREAMP_CFG1 0x688
+#define MT6331_AUDPREAMP_CFG2 0x68A
+#define MT6331_AUDADC_CFG0 0x68C
+#define MT6331_AUDADC_CFG1 0x68E
+#define MT6331_AUDADC_CFG2 0x690
+#define MT6331_AUDADC_CFG3 0x692
+#define MT6331_AUDADC_CFG4 0x694
+#define MT6331_AUDADC_CFG5 0x696
+#define MT6331_AUDDIGMI_CFG0 0x698
+#define MT6331_AUDDIGMI_CFG1 0x69A
+#define MT6331_AUDMICBIAS_CFG0 0x69C
+#define MT6331_AUDMICBIAS_CFG1 0x69E
+#define MT6331_AUDENCSPARE_CFG0 0x6A0
+#define MT6331_AUDPREAMPGAIN_CFG0 0x6A2
+#define MT6331_AUDMADPLL_CFG0 0x6A4
+#define MT6331_AUDMADPLL_CFG1 0x6A6
+#define MT6331_AUDMADPLL_CFG2 0x6A8
+#define MT6331_AUDLDO_NVREG_CFG0 0x6AA
+#define MT6331_AUDLDO_NVREG_CFG1 0x6AC
+#define MT6331_AUDLDO_NVREG_CFG2 0x6AE
+#define MT6331_AUXADC_ADC0 0x700
+#define MT6331_AUXADC_ADC1 0x702
+#define MT6331_AUXADC_ADC2 0x704
+#define MT6331_AUXADC_ADC3 0x706
+#define MT6331_AUXADC_ADC4 0x708
+#define MT6331_AUXADC_ADC5 0x70A
+#define MT6331_AUXADC_ADC6 0x70C
+#define MT6331_AUXADC_ADC7 0x70E
+#define MT6331_AUXADC_ADC8 0x710
+#define MT6331_AUXADC_ADC9 0x712
+#define MT6331_AUXADC_ADC10 0x714
+#define MT6331_AUXADC_ADC11 0x716
+#define MT6331_AUXADC_ADC12 0x718
+#define MT6331_AUXADC_ADC13 0x71A
+#define MT6331_AUXADC_ADC14 0x71C
+#define MT6331_AUXADC_ADC15 0x71E
+#define MT6331_AUXADC_ADC16 0x720
+#define MT6331_AUXADC_ADC17 0x722
+#define MT6331_AUXADC_ADC18 0x724
+#define MT6331_AUXADC_ADC19 0x726
+#define MT6331_AUXADC_STA0 0x728
+#define MT6331_AUXADC_STA1 0x72A
+#define MT6331_AUXADC_RQST0 0x72C
+#define MT6331_AUXADC_RQST0_SET 0x72E
+#define MT6331_AUXADC_RQST0_CLR 0x730
+#define MT6331_AUXADC_RQST1 0x732
+#define MT6331_AUXADC_RQST1_SET 0x734
+#define MT6331_AUXADC_RQST1_CLR 0x736
+#define MT6331_AUXADC_CON0 0x738
+#define MT6331_AUXADC_CON1 0x73A
+#define MT6331_AUXADC_CON2 0x73C
+#define MT6331_AUXADC_CON3 0x73E
+#define MT6331_AUXADC_CON4 0x740
+#define MT6331_AUXADC_CON5 0x742
+#define MT6331_AUXADC_CON6 0x744
+#define MT6331_AUXADC_CON7 0x746
+#define MT6331_AUXADC_CON8 0x748
+#define MT6331_AUXADC_CON9 0x74A
+#define MT6331_AUXADC_CON10 0x74C
+#define MT6331_AUXADC_CON11 0x74E
+#define MT6331_AUXADC_CON12 0x750
+#define MT6331_AUXADC_CON13 0x752
+#define MT6331_AUXADC_CON14 0x754
+#define MT6331_AUXADC_CON15 0x756
+#define MT6331_AUXADC_CON16 0x758
+#define MT6331_AUXADC_CON17 0x75A
+#define MT6331_AUXADC_CON18 0x75C
+#define MT6331_AUXADC_CON19 0x75E
+#define MT6331_AUXADC_CON20 0x760
+#define MT6331_AUXADC_CON21 0x762
+#define MT6331_AUXADC_CON22 0x764
+#define MT6331_AUXADC_CON23 0x766
+#define MT6331_AUXADC_CON24 0x768
+#define MT6331_AUXADC_CON25 0x76A
+#define MT6331_AUXADC_CON26 0x76C
+#define MT6331_AUXADC_CON27 0x76E
+#define MT6331_AUXADC_CON28 0x770
+#define MT6331_AUXADC_CON29 0x772
+#define MT6331_AUXADC_CON30 0x774
+#define MT6331_AUXADC_CON31 0x776
+#define MT6331_AUXADC_CON32 0x778
+#define MT6331_ACCDET_CON0 0x77A
+#define MT6331_ACCDET_CON1 0x77C
+#define MT6331_ACCDET_CON2 0x77E
+#define MT6331_ACCDET_CON3 0x780
+#define MT6331_ACCDET_CON4 0x782
+#define MT6331_ACCDET_CON5 0x784
+#define MT6331_ACCDET_CON6 0x786
+#define MT6331_ACCDET_CON7 0x788
+#define MT6331_ACCDET_CON8 0x78A
+#define MT6331_ACCDET_CON9 0x78C
+#define MT6331_ACCDET_CON10 0x78E
+#define MT6331_ACCDET_CON11 0x790
+#define MT6331_ACCDET_CON12 0x792
+#define MT6331_ACCDET_CON13 0x794
+#define MT6331_ACCDET_CON14 0x796
+#define MT6331_ACCDET_CON15 0x798
+#define MT6331_ACCDET_CON16 0x79A
+#define MT6331_ACCDET_CON17 0x79C
+#define MT6331_ACCDET_CON18 0x79E
+#define MT6331_ACCDET_CON19 0x7A0
+#define MT6331_ACCDET_CON20 0x7A2
+#define MT6331_ACCDET_CON21 0x7A4
+#define MT6331_ACCDET_CON22 0x7A6
+#define MT6331_ACCDET_CON23 0x7A8
+#define MT6331_ACCDET_CON24 0x7AA
+
+#endif /* __MFD_MT6331_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6332/core.h b/include/linux/mfd/mt6332/core.h
new file mode 100644
index 000000000000..cd6013eb82d9
--- /dev/null
+++ b/include/linux/mfd/mt6332/core.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_CORE_H__
+#define __MFD_MT6332_CORE_H__
+
+enum mt6332_irq_status_numbers {
+ MT6332_IRQ_STATUS_CHR_COMPLETE = 0,
+ MT6332_IRQ_STATUS_THERMAL_SD,
+ MT6332_IRQ_STATUS_THERMAL_REG_IN,
+ MT6332_IRQ_STATUS_THERMAL_REG_OUT,
+ MT6332_IRQ_STATUS_OTG_OC,
+ MT6332_IRQ_STATUS_CHR_OC,
+ MT6332_IRQ_STATUS_OTG_THERMAL,
+ MT6332_IRQ_STATUS_CHRIN_SHORT,
+ MT6332_IRQ_STATUS_DRVCDT_SHORT,
+ MT6332_IRQ_STATUS_PLUG_IN_FLASH,
+ MT6332_IRQ_STATUS_CHRWDT_FLAG,
+ MT6332_IRQ_STATUS_FLASH_EN_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_OPEN = 13,
+ MT6332_IRQ_STATUS_OV = 16,
+ MT6332_IRQ_STATUS_BVALID_DET,
+ MT6332_IRQ_STATUS_VBATON_UNDET,
+ MT6332_IRQ_STATUS_CHR_PLUG_IN,
+ MT6332_IRQ_STATUS_CHR_PLUG_OUT,
+ MT6332_IRQ_STATUS_BC11_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_OPEN = 23,
+ MT6332_IRQ_STATUS_THR_H = 32,
+ MT6332_IRQ_STATUS_THR_L,
+ MT6332_IRQ_STATUS_BAT_H,
+ MT6332_IRQ_STATUS_BAT_L,
+ MT6332_IRQ_STATUS_M3_H,
+ MT6332_IRQ_STATUS_M3_L,
+ MT6332_IRQ_STATUS_FG_BAT_H,
+ MT6332_IRQ_STATUS_FG_BAT_L,
+ MT6332_IRQ_STATUS_FG_CUR_H,
+ MT6332_IRQ_STATUS_FG_CUR_L,
+ MT6332_IRQ_STATUS_SPKL_D,
+ MT6332_IRQ_STATUS_SPKL_AB,
+ MT6332_IRQ_STATUS_BIF,
+ MT6332_IRQ_STATUS_VWLED_OC = 45,
+ MT6332_IRQ_STATUS_VDRAM_OC = 48,
+ MT6332_IRQ_STATUS_VDVFS2_OC,
+ MT6332_IRQ_STATUS_VRF1_OC,
+ MT6332_IRQ_STATUS_VRF2_OC,
+ MT6332_IRQ_STATUS_VPA_OC,
+ MT6332_IRQ_STATUS_VSBST_OC,
+ MT6332_IRQ_STATUS_LDO_OC,
+ MT6332_IRQ_STATUS_NR,
+};
+
+#define MT6332_IRQ_CON0_BASE MT6332_IRQ_STATUS_CHR_COMPLETE
+#define MT6332_IRQ_CON0_BITS (MT6332_IRQ_STATUS_FLASH_VLED1_OPEN + 1)
+#define MT6332_IRQ_CON1_BASE MT6332_IRQ_STATUS_OV
+#define MT6332_IRQ_CON1_BITS (MT6332_IRQ_STATUS_FLASH_VLED2_OPEN - MT6332_IRQ_STATUS_OV + 1)
+#define MT6332_IRQ_CON2_BASE MT6332_IRQ_STATUS_THR_H
+#define MT6332_IRQ_CON2_BITS (MT6332_IRQ_STATUS_VWLED_OC - MT6332_IRQ_STATUS_THR_H + 1)
+#define MT6332_IRQ_CON3_BASE MT6332_IRQ_STATUS_VDRAM_OC
+#define MT6332_IRQ_CON3_BITS (MT6332_IRQ_STATUS_LDO_OC - MT6332_IRQ_STATUS_VDRAM_OC + 1)
+
+#endif /* __MFD_MT6332_CORE_H__ */
diff --git a/include/linux/mfd/mt6332/registers.h b/include/linux/mfd/mt6332/registers.h
new file mode 100644
index 000000000000..65e0b86fceac
--- /dev/null
+++ b/include/linux/mfd/mt6332/registers.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_REGISTERS_H__
+#define __MFD_MT6332_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6332_HWCID 0x8000
+#define MT6332_SWCID 0x8002
+#define MT6332_TOP_CON 0x8004
+#define MT6332_DDR_VREF_AP_CON 0x8006
+#define MT6332_DDR_VREF_DQ_CON 0x8008
+#define MT6332_DDR_VREF_CA_CON 0x800A
+#define MT6332_TEST_OUT 0x800C
+#define MT6332_TEST_CON0 0x800E
+#define MT6332_TEST_CON1 0x8010
+#define MT6332_TESTMODE_SW 0x8012
+#define MT6332_TESTMODE_ANA 0x8014
+#define MT6332_TDSEL_CON 0x8016
+#define MT6332_RDSEL_CON 0x8018
+#define MT6332_SMT_CON0 0x801A
+#define MT6332_SMT_CON1 0x801C
+#define MT6332_DRV_CON0 0x801E
+#define MT6332_DRV_CON1 0x8020
+#define MT6332_DRV_CON2 0x8022
+#define MT6332_EN_STATUS0 0x8024
+#define MT6332_OCSTATUS0 0x8026
+#define MT6332_TOP_STATUS 0x8028
+#define MT6332_TOP_STATUS_SET 0x802A
+#define MT6332_TOP_STATUS_CLR 0x802C
+#define MT6332_FLASH_CON0 0x802E
+#define MT6332_FLASH_CON1 0x8030
+#define MT6332_FLASH_CON2 0x8032
+#define MT6332_CORE_CON0 0x8034
+#define MT6332_CORE_CON1 0x8036
+#define MT6332_CORE_CON2 0x8038
+#define MT6332_CORE_CON3 0x803A
+#define MT6332_CORE_CON4 0x803C
+#define MT6332_CORE_CON5 0x803E
+#define MT6332_CORE_CON6 0x8040
+#define MT6332_CORE_CON7 0x8042
+#define MT6332_CORE_CON8 0x8044
+#define MT6332_CORE_CON9 0x8046
+#define MT6332_CORE_CON10 0x8048
+#define MT6332_CORE_CON11 0x804A
+#define MT6332_CORE_CON12 0x804C
+#define MT6332_CORE_CON13 0x804E
+#define MT6332_CORE_CON14 0x8050
+#define MT6332_CORE_CON15 0x8052
+#define MT6332_STA_CON0 0x8054
+#define MT6332_STA_CON1 0x8056
+#define MT6332_STA_CON2 0x8058
+#define MT6332_STA_CON3 0x805A
+#define MT6332_STA_CON4 0x805C
+#define MT6332_STA_CON5 0x805E
+#define MT6332_STA_CON6 0x8060
+#define MT6332_STA_CON7 0x8062
+#define MT6332_CHR_CON0 0x8064
+#define MT6332_CHR_CON1 0x8066
+#define MT6332_CHR_CON2 0x8068
+#define MT6332_CHR_CON3 0x806A
+#define MT6332_CHR_CON4 0x806C
+#define MT6332_CHR_CON5 0x806E
+#define MT6332_CHR_CON6 0x8070
+#define MT6332_CHR_CON7 0x8072
+#define MT6332_CHR_CON8 0x8074
+#define MT6332_CHR_CON9 0x8076
+#define MT6332_CHR_CON10 0x8078
+#define MT6332_CHR_CON11 0x807A
+#define MT6332_CHR_CON12 0x807C
+#define MT6332_CHR_CON13 0x807E
+#define MT6332_CHR_CON14 0x8080
+#define MT6332_CHR_CON15 0x8082
+#define MT6332_BOOST_CON0 0x8084
+#define MT6332_BOOST_CON1 0x8086
+#define MT6332_BOOST_CON2 0x8088
+#define MT6332_BOOST_CON3 0x808A
+#define MT6332_BOOST_CON4 0x808C
+#define MT6332_BOOST_CON5 0x808E
+#define MT6332_BOOST_CON6 0x8090
+#define MT6332_BOOST_CON7 0x8092
+#define MT6332_TOP_CKPDN_CON0 0x8094
+#define MT6332_TOP_CKPDN_CON0_SET 0x8096
+#define MT6332_TOP_CKPDN_CON0_CLR 0x8098
+#define MT6332_TOP_CKPDN_CON1 0x809A
+#define MT6332_TOP_CKPDN_CON1_SET 0x809C
+#define MT6332_TOP_CKPDN_CON1_CLR 0x809E
+#define MT6332_TOP_CKPDN_CON2 0x80A0
+#define MT6332_TOP_CKPDN_CON2_SET 0x80A2
+#define MT6332_TOP_CKPDN_CON2_CLR 0x80A4
+#define MT6332_TOP_CKSEL_CON0 0x80A6
+#define MT6332_TOP_CKSEL_CON0_SET 0x80A8
+#define MT6332_TOP_CKSEL_CON0_CLR 0x80AA
+#define MT6332_TOP_CKSEL_CON1 0x80AC
+#define MT6332_TOP_CKSEL_CON1_SET 0x80AE
+#define MT6332_TOP_CKSEL_CON1_CLR 0x80B0
+#define MT6332_TOP_CKHWEN_CON 0x80B2
+#define MT6332_TOP_CKHWEN_CON_SET 0x80B4
+#define MT6332_TOP_CKHWEN_CON_CLR 0x80B6
+#define MT6332_TOP_CKTST_CON0 0x80B8
+#define MT6332_TOP_CKTST_CON1 0x80BA
+#define MT6332_TOP_RST_CON 0x80BC
+#define MT6332_TOP_RST_CON_SET 0x80BE
+#define MT6332_TOP_RST_CON_CLR 0x80C0
+#define MT6332_TOP_RST_MISC 0x80C2
+#define MT6332_TOP_RST_MISC_SET 0x80C4
+#define MT6332_TOP_RST_MISC_CLR 0x80C6
+#define MT6332_INT_CON0 0x80C8
+#define MT6332_INT_CON0_SET 0x80CA
+#define MT6332_INT_CON0_CLR 0x80CC
+#define MT6332_INT_CON1 0x80CE
+#define MT6332_INT_CON1_SET 0x80D0
+#define MT6332_INT_CON1_CLR 0x80D2
+#define MT6332_INT_CON2 0x80D4
+#define MT6332_INT_CON2_SET 0x80D6
+#define MT6332_INT_CON2_CLR 0x80D8
+#define MT6332_INT_CON3 0x80DA
+#define MT6332_INT_CON3_SET 0x80DC
+#define MT6332_INT_CON3_CLR 0x80DE
+#define MT6332_CHRWDT_CON0 0x80E0
+#define MT6332_CHRWDT_STATUS0 0x80E2
+#define MT6332_INT_STATUS0 0x80E4
+#define MT6332_INT_STATUS1 0x80E6
+#define MT6332_INT_STATUS2 0x80E8
+#define MT6332_INT_STATUS3 0x80EA
+#define MT6332_OC_GEAR_0 0x80EC
+#define MT6332_OC_GEAR_1 0x80EE
+#define MT6332_OC_GEAR_2 0x80F0
+#define MT6332_INT_MISC_CON 0x80F2
+#define MT6332_RG_SPI_CON 0x80F4
+#define MT6332_DEW_DIO_EN 0x80F6
+#define MT6332_DEW_READ_TEST 0x80F8
+#define MT6332_DEW_WRITE_TEST 0x80FA
+#define MT6332_DEW_CRC_SWRST 0x80FC
+#define MT6332_DEW_CRC_EN 0x80FE
+#define MT6332_DEW_CRC_VAL 0x8100
+#define MT6332_DEW_DBG_MON_SEL 0x8102
+#define MT6332_DEW_CIPHER_KEY_SEL 0x8104
+#define MT6332_DEW_CIPHER_IV_SEL 0x8106
+#define MT6332_DEW_CIPHER_EN 0x8108
+#define MT6332_DEW_CIPHER_RDY 0x810A
+#define MT6332_DEW_CIPHER_MODE 0x810C
+#define MT6332_DEW_CIPHER_SWRST 0x810E
+#define MT6332_DEW_RDDMY_NO 0x8110
+#define MT6332_INT_STA 0x8112
+#define MT6332_BIF_CON0 0x8114
+#define MT6332_BIF_CON1 0x8116
+#define MT6332_BIF_CON2 0x8118
+#define MT6332_BIF_CON3 0x811A
+#define MT6332_BIF_CON4 0x811C
+#define MT6332_BIF_CON5 0x811E
+#define MT6332_BIF_CON6 0x8120
+#define MT6332_BIF_CON7 0x8122
+#define MT6332_BIF_CON8 0x8124
+#define MT6332_BIF_CON9 0x8126
+#define MT6332_BIF_CON10 0x8128
+#define MT6332_BIF_CON11 0x812A
+#define MT6332_BIF_CON12 0x812C
+#define MT6332_BIF_CON13 0x812E
+#define MT6332_BIF_CON14 0x8130
+#define MT6332_BIF_CON15 0x8132
+#define MT6332_BIF_CON16 0x8134
+#define MT6332_BIF_CON17 0x8136
+#define MT6332_BIF_CON18 0x8138
+#define MT6332_BIF_CON19 0x813A
+#define MT6332_BIF_CON20 0x813C
+#define MT6332_BIF_CON21 0x813E
+#define MT6332_BIF_CON22 0x8140
+#define MT6332_BIF_CON23 0x8142
+#define MT6332_BIF_CON24 0x8144
+#define MT6332_BIF_CON25 0x8146
+#define MT6332_BIF_CON26 0x8148
+#define MT6332_BIF_CON27 0x814A
+#define MT6332_BIF_CON28 0x814C
+#define MT6332_BIF_CON29 0x814E
+#define MT6332_BIF_CON30 0x8150
+#define MT6332_BIF_CON31 0x8152
+#define MT6332_BIF_CON32 0x8154
+#define MT6332_BIF_CON33 0x8156
+#define MT6332_BIF_CON34 0x8158
+#define MT6332_BIF_CON35 0x815A
+#define MT6332_BIF_CON36 0x815C
+#define MT6332_BATON_CON0 0x815E
+#define MT6332_BIF_CON37 0x8160
+#define MT6332_BIF_CON38 0x8162
+#define MT6332_CHR_CON16 0x8164
+#define MT6332_CHR_CON17 0x8166
+#define MT6332_CHR_CON18 0x8168
+#define MT6332_CHR_CON19 0x816A
+#define MT6332_CHR_CON20 0x816C
+#define MT6332_CHR_CON21 0x816E
+#define MT6332_CHR_CON22 0x8170
+#define MT6332_CHR_CON23 0x8172
+#define MT6332_CHR_CON24 0x8174
+#define MT6332_CHR_CON25 0x8176
+#define MT6332_STA_CON8 0x8178
+#define MT6332_BUCK_ALL_CON0 0x8400
+#define MT6332_BUCK_ALL_CON1 0x8402
+#define MT6332_BUCK_ALL_CON2 0x8404
+#define MT6332_BUCK_ALL_CON3 0x8406
+#define MT6332_BUCK_ALL_CON4 0x8408
+#define MT6332_BUCK_ALL_CON5 0x840A
+#define MT6332_BUCK_ALL_CON6 0x840C
+#define MT6332_BUCK_ALL_CON7 0x840E
+#define MT6332_BUCK_ALL_CON8 0x8410
+#define MT6332_BUCK_ALL_CON9 0x8412
+#define MT6332_BUCK_ALL_CON10 0x8414
+#define MT6332_BUCK_ALL_CON11 0x8416
+#define MT6332_BUCK_ALL_CON12 0x8418
+#define MT6332_BUCK_ALL_CON13 0x841A
+#define MT6332_BUCK_ALL_CON14 0x841C
+#define MT6332_BUCK_ALL_CON15 0x841E
+#define MT6332_BUCK_ALL_CON16 0x8420
+#define MT6332_BUCK_ALL_CON17 0x8422
+#define MT6332_BUCK_ALL_CON18 0x8424
+#define MT6332_BUCK_ALL_CON19 0x8426
+#define MT6332_BUCK_ALL_CON20 0x8428
+#define MT6332_BUCK_ALL_CON21 0x842A
+#define MT6332_BUCK_ALL_CON22 0x842C
+#define MT6332_BUCK_ALL_CON23 0x842E
+#define MT6332_BUCK_ALL_CON24 0x8430
+#define MT6332_BUCK_ALL_CON25 0x8432
+#define MT6332_BUCK_ALL_CON26 0x8434
+#define MT6332_BUCK_ALL_CON27 0x8436
+#define MT6332_VDRAM_CON0 0x8438
+#define MT6332_VDRAM_CON1 0x843A
+#define MT6332_VDRAM_CON2 0x843C
+#define MT6332_VDRAM_CON3 0x843E
+#define MT6332_VDRAM_CON4 0x8440
+#define MT6332_VDRAM_CON5 0x8442
+#define MT6332_VDRAM_CON6 0x8444
+#define MT6332_VDRAM_CON7 0x8446
+#define MT6332_VDRAM_CON8 0x8448
+#define MT6332_VDRAM_CON9 0x844A
+#define MT6332_VDRAM_CON10 0x844C
+#define MT6332_VDRAM_CON11 0x844E
+#define MT6332_VDRAM_CON12 0x8450
+#define MT6332_VDRAM_CON13 0x8452
+#define MT6332_VDRAM_CON14 0x8454
+#define MT6332_VDRAM_CON15 0x8456
+#define MT6332_VDRAM_CON16 0x8458
+#define MT6332_VDRAM_CON17 0x845A
+#define MT6332_VDRAM_CON18 0x845C
+#define MT6332_VDRAM_CON19 0x845E
+#define MT6332_VDRAM_CON20 0x8460
+#define MT6332_VDRAM_CON21 0x8462
+#define MT6332_VDVFS2_CON0 0x8464
+#define MT6332_VDVFS2_CON1 0x8466
+#define MT6332_VDVFS2_CON2 0x8468
+#define MT6332_VDVFS2_CON3 0x846A
+#define MT6332_VDVFS2_CON4 0x846C
+#define MT6332_VDVFS2_CON5 0x846E
+#define MT6332_VDVFS2_CON6 0x8470
+#define MT6332_VDVFS2_CON7 0x8472
+#define MT6332_VDVFS2_CON8 0x8474
+#define MT6332_VDVFS2_CON9 0x8476
+#define MT6332_VDVFS2_CON10 0x8478
+#define MT6332_VDVFS2_CON11 0x847A
+#define MT6332_VDVFS2_CON12 0x847C
+#define MT6332_VDVFS2_CON13 0x847E
+#define MT6332_VDVFS2_CON14 0x8480
+#define MT6332_VDVFS2_CON15 0x8482
+#define MT6332_VDVFS2_CON16 0x8484
+#define MT6332_VDVFS2_CON17 0x8486
+#define MT6332_VDVFS2_CON18 0x8488
+#define MT6332_VDVFS2_CON19 0x848A
+#define MT6332_VDVFS2_CON20 0x848C
+#define MT6332_VDVFS2_CON21 0x848E
+#define MT6332_VDVFS2_CON22 0x8490
+#define MT6332_VDVFS2_CON23 0x8492
+#define MT6332_VDVFS2_CON24 0x8494
+#define MT6332_VDVFS2_CON25 0x8496
+#define MT6332_VDVFS2_CON26 0x8498
+#define MT6332_VDVFS2_CON27 0x849A
+#define MT6332_VRF1_CON0 0x849C
+#define MT6332_VRF1_CON1 0x849E
+#define MT6332_VRF1_CON2 0x84A0
+#define MT6332_VRF1_CON3 0x84A2
+#define MT6332_VRF1_CON4 0x84A4
+#define MT6332_VRF1_CON5 0x84A6
+#define MT6332_VRF1_CON6 0x84A8
+#define MT6332_VRF1_CON7 0x84AA
+#define MT6332_VRF1_CON8 0x84AC
+#define MT6332_VRF1_CON9 0x84AE
+#define MT6332_VRF1_CON10 0x84B0
+#define MT6332_VRF1_CON11 0x84B2
+#define MT6332_VRF1_CON12 0x84B4
+#define MT6332_VRF1_CON13 0x84B6
+#define MT6332_VRF1_CON14 0x84B8
+#define MT6332_VRF1_CON15 0x84BA
+#define MT6332_VRF1_CON16 0x84BC
+#define MT6332_VRF1_CON17 0x84BE
+#define MT6332_VRF1_CON18 0x84C0
+#define MT6332_VRF1_CON19 0x84C2
+#define MT6332_VRF1_CON20 0x84C4
+#define MT6332_VRF1_CON21 0x84C6
+#define MT6332_VRF2_CON0 0x84C8
+#define MT6332_VRF2_CON1 0x84CA
+#define MT6332_VRF2_CON2 0x84CC
+#define MT6332_VRF2_CON3 0x84CE
+#define MT6332_VRF2_CON4 0x84D0
+#define MT6332_VRF2_CON5 0x84D2
+#define MT6332_VRF2_CON6 0x84D4
+#define MT6332_VRF2_CON7 0x84D6
+#define MT6332_VRF2_CON8 0x84D8
+#define MT6332_VRF2_CON9 0x84DA
+#define MT6332_VRF2_CON10 0x84DC
+#define MT6332_VRF2_CON11 0x84DE
+#define MT6332_VRF2_CON12 0x84E0
+#define MT6332_VRF2_CON13 0x84E2
+#define MT6332_VRF2_CON14 0x84E4
+#define MT6332_VRF2_CON15 0x84E6
+#define MT6332_VRF2_CON16 0x84E8
+#define MT6332_VRF2_CON17 0x84EA
+#define MT6332_VRF2_CON18 0x84EC
+#define MT6332_VRF2_CON19 0x84EE
+#define MT6332_VRF2_CON20 0x84F0
+#define MT6332_VRF2_CON21 0x84F2
+#define MT6332_VPA_CON0 0x84F4
+#define MT6332_VPA_CON1 0x84F6
+#define MT6332_VPA_CON2 0x84F8
+#define MT6332_VPA_CON3 0x84FC
+#define MT6332_VPA_CON4 0x84FE
+#define MT6332_VPA_CON5 0x8500
+#define MT6332_VPA_CON6 0x8502
+#define MT6332_VPA_CON7 0x8504
+#define MT6332_VPA_CON8 0x8506
+#define MT6332_VPA_CON9 0x8508
+#define MT6332_VPA_CON10 0x850A
+#define MT6332_VPA_CON11 0x850C
+#define MT6332_VPA_CON12 0x850E
+#define MT6332_VPA_CON13 0x8510
+#define MT6332_VPA_CON14 0x8512
+#define MT6332_VPA_CON15 0x8514
+#define MT6332_VPA_CON16 0x8516
+#define MT6332_VPA_CON17 0x8518
+#define MT6332_VPA_CON18 0x851A
+#define MT6332_VPA_CON19 0x851C
+#define MT6332_VPA_CON20 0x851E
+#define MT6332_VPA_CON21 0x8520
+#define MT6332_VPA_CON22 0x8522
+#define MT6332_VPA_CON23 0x8524
+#define MT6332_VPA_CON24 0x8526
+#define MT6332_VPA_CON25 0x8528
+#define MT6332_VSBST_CON0 0x852A
+#define MT6332_VSBST_CON1 0x852C
+#define MT6332_VSBST_CON2 0x852E
+#define MT6332_VSBST_CON3 0x8530
+#define MT6332_VSBST_CON4 0x8532
+#define MT6332_VSBST_CON5 0x8534
+#define MT6332_VSBST_CON6 0x8536
+#define MT6332_VSBST_CON7 0x8538
+#define MT6332_VSBST_CON8 0x853A
+#define MT6332_VSBST_CON9 0x853C
+#define MT6332_VSBST_CON10 0x853E
+#define MT6332_VSBST_CON11 0x8540
+#define MT6332_VSBST_CON12 0x8542
+#define MT6332_VSBST_CON13 0x8544
+#define MT6332_VSBST_CON14 0x8546
+#define MT6332_VSBST_CON15 0x8548
+#define MT6332_VSBST_CON16 0x854A
+#define MT6332_VSBST_CON17 0x854C
+#define MT6332_VSBST_CON18 0x854E
+#define MT6332_VSBST_CON19 0x8550
+#define MT6332_VSBST_CON20 0x8552
+#define MT6332_VSBST_CON21 0x8554
+#define MT6332_BUCK_K_CON0 0x8556
+#define MT6332_BUCK_K_CON1 0x8558
+#define MT6332_BUCK_K_CON2 0x855A
+#define MT6332_BUCK_K_CON3 0x855C
+#define MT6332_BUCK_K_CON4 0x855E
+#define MT6332_BUCK_K_CON5 0x8560
+#define MT6332_AUXADC_ADC0 0x8800
+#define MT6332_AUXADC_ADC1 0x8802
+#define MT6332_AUXADC_ADC2 0x8804
+#define MT6332_AUXADC_ADC3 0x8806
+#define MT6332_AUXADC_ADC4 0x8808
+#define MT6332_AUXADC_ADC5 0x880A
+#define MT6332_AUXADC_ADC6 0x880C
+#define MT6332_AUXADC_ADC7 0x880E
+#define MT6332_AUXADC_ADC8 0x8810
+#define MT6332_AUXADC_ADC9 0x8812
+#define MT6332_AUXADC_ADC10 0x8814
+#define MT6332_AUXADC_ADC11 0x8816
+#define MT6332_AUXADC_ADC12 0x8818
+#define MT6332_AUXADC_ADC13 0x881A
+#define MT6332_AUXADC_ADC14 0x881C
+#define MT6332_AUXADC_ADC15 0x881E
+#define MT6332_AUXADC_ADC16 0x8820
+#define MT6332_AUXADC_ADC17 0x8822
+#define MT6332_AUXADC_ADC18 0x8824
+#define MT6332_AUXADC_ADC19 0x8826
+#define MT6332_AUXADC_ADC20 0x8828
+#define MT6332_AUXADC_ADC21 0x882A
+#define MT6332_AUXADC_ADC22 0x882C
+#define MT6332_AUXADC_ADC23 0x882E
+#define MT6332_AUXADC_ADC24 0x8830
+#define MT6332_AUXADC_ADC25 0x8832
+#define MT6332_AUXADC_ADC26 0x8834
+#define MT6332_AUXADC_ADC27 0x8836
+#define MT6332_AUXADC_ADC28 0x8838
+#define MT6332_AUXADC_ADC29 0x883A
+#define MT6332_AUXADC_ADC30 0x883C
+#define MT6332_AUXADC_ADC31 0x883E
+#define MT6332_AUXADC_ADC32 0x8840
+#define MT6332_AUXADC_ADC33 0x8842
+#define MT6332_AUXADC_ADC34 0x8844
+#define MT6332_AUXADC_ADC35 0x8846
+#define MT6332_AUXADC_ADC36 0x8848
+#define MT6332_AUXADC_ADC37 0x884A
+#define MT6332_AUXADC_ADC38 0x884C
+#define MT6332_AUXADC_ADC39 0x884E
+#define MT6332_AUXADC_ADC40 0x8850
+#define MT6332_AUXADC_ADC41 0x8852
+#define MT6332_AUXADC_ADC42 0x8854
+#define MT6332_AUXADC_ADC43 0x8856
+#define MT6332_AUXADC_STA0 0x8858
+#define MT6332_AUXADC_STA1 0x885A
+#define MT6332_AUXADC_RQST0 0x885C
+#define MT6332_AUXADC_RQST0_SET 0x885E
+#define MT6332_AUXADC_RQST0_CLR 0x8860
+#define MT6332_AUXADC_RQST1 0x8862
+#define MT6332_AUXADC_RQST1_SET 0x8864
+#define MT6332_AUXADC_RQST1_CLR 0x8866
+#define MT6332_AUXADC_CON0 0x8868
+#define MT6332_AUXADC_CON1 0x886A
+#define MT6332_AUXADC_CON2 0x886C
+#define MT6332_AUXADC_CON3 0x886E
+#define MT6332_AUXADC_CON4 0x8870
+#define MT6332_AUXADC_CON5 0x8872
+#define MT6332_AUXADC_CON6 0x8874
+#define MT6332_AUXADC_CON7 0x8876
+#define MT6332_AUXADC_CON8 0x8878
+#define MT6332_AUXADC_CON9 0x887A
+#define MT6332_AUXADC_CON10 0x887C
+#define MT6332_AUXADC_CON11 0x887E
+#define MT6332_AUXADC_CON12 0x8880
+#define MT6332_AUXADC_CON13 0x8882
+#define MT6332_AUXADC_CON14 0x8884
+#define MT6332_AUXADC_CON15 0x8886
+#define MT6332_AUXADC_CON16 0x8888
+#define MT6332_AUXADC_CON17 0x888A
+#define MT6332_AUXADC_CON18 0x888C
+#define MT6332_AUXADC_CON19 0x888E
+#define MT6332_AUXADC_CON20 0x8890
+#define MT6332_AUXADC_CON21 0x8892
+#define MT6332_AUXADC_CON22 0x8894
+#define MT6332_AUXADC_CON23 0x8896
+#define MT6332_AUXADC_CON24 0x8898
+#define MT6332_AUXADC_CON25 0x889A
+#define MT6332_AUXADC_CON26 0x889C
+#define MT6332_AUXADC_CON27 0x889E
+#define MT6332_AUXADC_CON28 0x88A0
+#define MT6332_AUXADC_CON29 0x88A2
+#define MT6332_AUXADC_CON30 0x88A4
+#define MT6332_AUXADC_CON31 0x88A6
+#define MT6332_AUXADC_CON32 0x88A8
+#define MT6332_AUXADC_CON33 0x88AA
+#define MT6332_AUXADC_CON34 0x88AC
+#define MT6332_AUXADC_CON35 0x88AE
+#define MT6332_AUXADC_CON36 0x88B0
+#define MT6332_AUXADC_CON37 0x88B2
+#define MT6332_AUXADC_CON38 0x88B4
+#define MT6332_AUXADC_CON39 0x88B6
+#define MT6332_AUXADC_CON40 0x88B8
+#define MT6332_AUXADC_CON41 0x88BA
+#define MT6332_AUXADC_CON42 0x88BC
+#define MT6332_AUXADC_CON43 0x88BE
+#define MT6332_AUXADC_CON44 0x88C0
+#define MT6332_AUXADC_CON45 0x88C2
+#define MT6332_AUXADC_CON46 0x88C4
+#define MT6332_AUXADC_CON47 0x88C6
+#define MT6332_STRUP_CONA0 0x8C00
+#define MT6332_STRUP_CONA1 0x8C02
+#define MT6332_STRUP_CONA2 0x8C04
+#define MT6332_STRUP_CON0 0x8C06
+#define MT6332_STRUP_CON2 0x8C08
+#define MT6332_STRUP_CON3 0x8C0A
+#define MT6332_STRUP_CON4 0x8C0C
+#define MT6332_STRUP_CON5 0x8C0E
+#define MT6332_STRUP_CON6 0x8C10
+#define MT6332_STRUP_CON7 0x8C12
+#define MT6332_STRUP_CON8 0x8C14
+#define MT6332_STRUP_CON9 0x8C16
+#define MT6332_STRUP_CON10 0x8C18
+#define MT6332_STRUP_CON11 0x8C1A
+#define MT6332_STRUP_CON12 0x8C1C
+#define MT6332_STRUP_CON13 0x8C1E
+#define MT6332_STRUP_CON14 0x8C20
+#define MT6332_STRUP_CON15 0x8C22
+#define MT6332_STRUP_CON16 0x8C24
+#define MT6332_STRUP_CON17 0x8C26
+#define MT6332_FGADC_CON0 0x8C28
+#define MT6332_FGADC_CON1 0x8C2A
+#define MT6332_FGADC_CON2 0x8C2C
+#define MT6332_FGADC_CON3 0x8C2E
+#define MT6332_FGADC_CON4 0x8C30
+#define MT6332_FGADC_CON5 0x8C32
+#define MT6332_FGADC_CON6 0x8C34
+#define MT6332_FGADC_CON7 0x8C36
+#define MT6332_FGADC_CON8 0x8C38
+#define MT6332_FGADC_CON9 0x8C3A
+#define MT6332_FGADC_CON10 0x8C3C
+#define MT6332_FGADC_CON11 0x8C3E
+#define MT6332_FGADC_CON12 0x8C40
+#define MT6332_FGADC_CON13 0x8C42
+#define MT6332_FGADC_CON14 0x8C44
+#define MT6332_FGADC_CON15 0x8C46
+#define MT6332_FGADC_CON16 0x8C48
+#define MT6332_FGADC_CON17 0x8C4A
+#define MT6332_FGADC_CON18 0x8C4C
+#define MT6332_FGADC_CON19 0x8C4E
+#define MT6332_FGADC_CON20 0x8C50
+#define MT6332_FGADC_CON21 0x8C52
+#define MT6332_FGADC_CON22 0x8C54
+#define MT6332_OTP_CON0 0x8C56
+#define MT6332_OTP_CON1 0x8C58
+#define MT6332_OTP_CON2 0x8C5A
+#define MT6332_OTP_CON3 0x8C5C
+#define MT6332_OTP_CON4 0x8C5E
+#define MT6332_OTP_CON5 0x8C60
+#define MT6332_OTP_CON6 0x8C62
+#define MT6332_OTP_CON7 0x8C64
+#define MT6332_OTP_CON8 0x8C66
+#define MT6332_OTP_CON9 0x8C68
+#define MT6332_OTP_CON10 0x8C6A
+#define MT6332_OTP_CON11 0x8C6C
+#define MT6332_OTP_CON12 0x8C6E
+#define MT6332_OTP_CON13 0x8C70
+#define MT6332_OTP_CON14 0x8C72
+#define MT6332_OTP_DOUT_0_15 0x8C74
+#define MT6332_OTP_DOUT_16_31 0x8C76
+#define MT6332_OTP_DOUT_32_47 0x8C78
+#define MT6332_OTP_DOUT_48_63 0x8C7A
+#define MT6332_OTP_DOUT_64_79 0x8C7C
+#define MT6332_OTP_DOUT_80_95 0x8C7E
+#define MT6332_OTP_DOUT_96_111 0x8C80
+#define MT6332_OTP_DOUT_112_127 0x8C82
+#define MT6332_OTP_DOUT_128_143 0x8C84
+#define MT6332_OTP_DOUT_144_159 0x8C86
+#define MT6332_OTP_DOUT_160_175 0x8C88
+#define MT6332_OTP_DOUT_176_191 0x8C8A
+#define MT6332_OTP_DOUT_192_207 0x8C8C
+#define MT6332_OTP_DOUT_208_223 0x8C8E
+#define MT6332_OTP_DOUT_224_239 0x8C90
+#define MT6332_OTP_DOUT_240_255 0x8C92
+#define MT6332_OTP_VAL_0_15 0x8C94
+#define MT6332_OTP_VAL_16_31 0x8C96
+#define MT6332_OTP_VAL_32_47 0x8C98
+#define MT6332_OTP_VAL_48_63 0x8C9A
+#define MT6332_OTP_VAL_64_79 0x8C9C
+#define MT6332_OTP_VAL_80_95 0x8C9E
+#define MT6332_OTP_VAL_96_111 0x8CA0
+#define MT6332_OTP_VAL_112_127 0x8CA2
+#define MT6332_OTP_VAL_128_143 0x8CA4
+#define MT6332_OTP_VAL_144_159 0x8CA6
+#define MT6332_OTP_VAL_160_175 0x8CA8
+#define MT6332_OTP_VAL_176_191 0x8CAA
+#define MT6332_OTP_VAL_192_207 0x8CAC
+#define MT6332_OTP_VAL_208_223 0x8CAE
+#define MT6332_OTP_VAL_224_239 0x8CB0
+#define MT6332_OTP_VAL_240_255 0x8CB2
+#define MT6332_LDO_CON0 0x8CB4
+#define MT6332_LDO_CON1 0x8CB6
+#define MT6332_LDO_CON2 0x8CB8
+#define MT6332_LDO_CON3 0x8CBA
+#define MT6332_LDO_CON5 0x8CBC
+#define MT6332_LDO_CON6 0x8CBE
+#define MT6332_LDO_CON7 0x8CC0
+#define MT6332_LDO_CON8 0x8CC2
+#define MT6332_LDO_CON9 0x8CC4
+#define MT6332_LDO_CON10 0x8CC6
+#define MT6332_LDO_CON11 0x8CC8
+#define MT6332_LDO_CON12 0x8CCA
+#define MT6332_LDO_CON13 0x8CCC
+#define MT6332_FQMTR_CON0 0x8CCE
+#define MT6332_FQMTR_CON1 0x8CD0
+#define MT6332_FQMTR_CON2 0x8CD2
+#define MT6332_IWLED_CON0 0x8CD4
+#define MT6332_IWLED_DEG 0x8CD6
+#define MT6332_IWLED_STATUS 0x8CD8
+#define MT6332_IWLED_EN_CTRL 0x8CDA
+#define MT6332_IWLED_CON1 0x8CDC
+#define MT6332_IWLED_CON2 0x8CDE
+#define MT6332_IWLED_TRIM0 0x8CE0
+#define MT6332_IWLED_TRIM1 0x8CE2
+#define MT6332_IWLED_CON3 0x8CE4
+#define MT6332_IWLED_CON4 0x8CE6
+#define MT6332_IWLED_CON5 0x8CE8
+#define MT6332_IWLED_CON6 0x8CEA
+#define MT6332_IWLED_CON7 0x8CEC
+#define MT6332_IWLED_CON8 0x8CEE
+#define MT6332_IWLED_CON9 0x8CF0
+#define MT6332_SPK_CON0 0x8CF2
+#define MT6332_SPK_CON1 0x8CF4
+#define MT6332_SPK_CON2 0x8CF6
+#define MT6332_SPK_CON3 0x8CF8
+#define MT6332_SPK_CON4 0x8CFA
+#define MT6332_SPK_CON5 0x8CFC
+#define MT6332_SPK_CON6 0x8CFE
+#define MT6332_SPK_CON7 0x8D00
+#define MT6332_SPK_CON8 0x8D02
+#define MT6332_SPK_CON9 0x8D04
+#define MT6332_SPK_CON10 0x8D06
+#define MT6332_SPK_CON11 0x8D08
+#define MT6332_SPK_CON12 0x8D0A
+#define MT6332_SPK_CON13 0x8D0C
+#define MT6332_SPK_CON14 0x8D0E
+#define MT6332_SPK_CON15 0x8D10
+#define MT6332_SPK_CON16 0x8D12
+#define MT6332_TESTI_CON0 0x8D14
+#define MT6332_TESTI_CON1 0x8D16
+#define MT6332_TESTI_CON2 0x8D18
+#define MT6332_TESTI_CON3 0x8D1A
+#define MT6332_TESTI_CON4 0x8D1C
+#define MT6332_TESTI_CON5 0x8D1E
+#define MT6332_TESTI_CON6 0x8D20
+#define MT6332_TESTI_MUX_CON0 0x8D22
+#define MT6332_TESTI_MUX_CON1 0x8D24
+#define MT6332_TESTI_MUX_CON2 0x8D26
+#define MT6332_TESTI_MUX_CON3 0x8D28
+#define MT6332_TESTI_MUX_CON4 0x8D2A
+#define MT6332_TESTI_MUX_CON5 0x8D2C
+#define MT6332_TESTI_MUX_CON6 0x8D2E
+#define MT6332_TESTO_CON0 0x8D30
+#define MT6332_TESTO_CON1 0x8D32
+#define MT6332_TEST_OMUX_CON0 0x8D34
+#define MT6332_TEST_OMUX_CON1 0x8D36
+#define MT6332_DEBUG_CON0 0x8D38
+#define MT6332_DEBUG_CON1 0x8D3A
+#define MT6332_DEBUG_CON2 0x8D3C
+#define MT6332_FGADC_CON23 0x8D3E
+#define MT6332_FGADC_CON24 0x8D40
+#define MT6332_FGADC_CON25 0x8D42
+#define MT6332_TOP_RST_STATUS 0x8D44
+#define MT6332_TOP_RST_STATUS_SET 0x8D46
+#define MT6332_TOP_RST_STATUS_CLR 0x8D48
+#define MT6332_VDVFS2_CON28 0x8D4A
+
+#endif /* __MFD_MT6332_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6357/core.h b/include/linux/mfd/mt6357/core.h
new file mode 100644
index 000000000000..2441611264fd
--- /dev/null
+++ b/include/linux/mfd/mt6357/core.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 BayLibre, SAS
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef __MFD_MT6357_CORE_H__
+#define __MFD_MT6357_CORE_H__
+
+enum mt6357_irq_top_status_shift {
+ MT6357_BUCK_TOP = 0,
+ MT6357_LDO_TOP,
+ MT6357_PSC_TOP,
+ MT6357_SCK_TOP,
+ MT6357_BM_TOP,
+ MT6357_HK_TOP,
+ MT6357_XPP_TOP,
+ MT6357_AUD_TOP,
+ MT6357_MISC_TOP,
+};
+
+enum mt6357_irq_numbers {
+ MT6357_IRQ_VPROC_OC = 0,
+ MT6357_IRQ_VCORE_OC,
+ MT6357_IRQ_VMODEM_OC,
+ MT6357_IRQ_VS1_OC,
+ MT6357_IRQ_VPA_OC,
+ MT6357_IRQ_VCORE_PREOC,
+ MT6357_IRQ_VFE28_OC = 16,
+ MT6357_IRQ_VXO22_OC,
+ MT6357_IRQ_VRF18_OC,
+ MT6357_IRQ_VRF12_OC,
+ MT6357_IRQ_VEFUSE_OC,
+ MT6357_IRQ_VCN33_OC,
+ MT6357_IRQ_VCN28_OC,
+ MT6357_IRQ_VCN18_OC,
+ MT6357_IRQ_VCAMA_OC,
+ MT6357_IRQ_VCAMD_OC,
+ MT6357_IRQ_VCAMIO_OC,
+ MT6357_IRQ_VLDO28_OC,
+ MT6357_IRQ_VUSB33_OC,
+ MT6357_IRQ_VAUX18_OC,
+ MT6357_IRQ_VAUD28_OC,
+ MT6357_IRQ_VIO28_OC,
+ MT6357_IRQ_VIO18_OC,
+ MT6357_IRQ_VSRAM_PROC_OC,
+ MT6357_IRQ_VSRAM_OTHERS_OC,
+ MT6357_IRQ_VIBR_OC,
+ MT6357_IRQ_VDRAM_OC,
+ MT6357_IRQ_VMC_OC,
+ MT6357_IRQ_VMCH_OC,
+ MT6357_IRQ_VEMC_OC,
+ MT6357_IRQ_VSIM1_OC,
+ MT6357_IRQ_VSIM2_OC,
+ MT6357_IRQ_PWRKEY = 48,
+ MT6357_IRQ_HOMEKEY,
+ MT6357_IRQ_PWRKEY_R,
+ MT6357_IRQ_HOMEKEY_R,
+ MT6357_IRQ_NI_LBAT_INT,
+ MT6357_IRQ_CHRDET,
+ MT6357_IRQ_CHRDET_EDGE,
+ MT6357_IRQ_VCDT_HV_DET,
+ MT6357_IRQ_WATCHDOG,
+ MT6357_IRQ_VBATON_UNDET,
+ MT6357_IRQ_BVALID_DET,
+ MT6357_IRQ_OV,
+ MT6357_IRQ_RTC = 64,
+ MT6357_IRQ_FG_BAT0_H = 80,
+ MT6357_IRQ_FG_BAT0_L,
+ MT6357_IRQ_FG_CUR_H,
+ MT6357_IRQ_FG_CUR_L,
+ MT6357_IRQ_FG_ZCV,
+ MT6357_IRQ_BATON_LV = 96,
+ MT6357_IRQ_BATON_HT,
+ MT6357_IRQ_BAT_H = 112,
+ MT6357_IRQ_BAT_L,
+ MT6357_IRQ_AUXADC_IMP,
+ MT6357_IRQ_NAG_C_DLTV,
+ MT6357_IRQ_AUDIO = 128,
+ MT6357_IRQ_ACCDET = 133,
+ MT6357_IRQ_ACCDET_EINT0,
+ MT6357_IRQ_ACCDET_EINT1,
+ MT6357_IRQ_SPI_CMD_ALERT = 144,
+ MT6357_IRQ_NR,
+};
+
+#define MT6357_IRQ_BUCK_BASE MT6357_IRQ_VPROC_OC
+#define MT6357_IRQ_LDO_BASE MT6357_IRQ_VFE28_OC
+#define MT6357_IRQ_PSC_BASE MT6357_IRQ_PWRKEY
+#define MT6357_IRQ_SCK_BASE MT6357_IRQ_RTC
+#define MT6357_IRQ_BM_BASE MT6357_IRQ_FG_BAT0_H
+#define MT6357_IRQ_HK_BASE MT6357_IRQ_BAT_H
+#define MT6357_IRQ_AUD_BASE MT6357_IRQ_AUDIO
+#define MT6357_IRQ_MISC_BASE MT6357_IRQ_SPI_CMD_ALERT
+
+#define MT6357_IRQ_BUCK_BITS (MT6357_IRQ_VCORE_PREOC - MT6357_IRQ_BUCK_BASE + 1)
+#define MT6357_IRQ_LDO_BITS (MT6357_IRQ_VSIM2_OC - MT6357_IRQ_LDO_BASE + 1)
+#define MT6357_IRQ_PSC_BITS (MT6357_IRQ_VCDT_HV_DET - MT6357_IRQ_PSC_BASE + 1)
+#define MT6357_IRQ_SCK_BITS (MT6357_IRQ_RTC - MT6357_IRQ_SCK_BASE + 1)
+#define MT6357_IRQ_BM_BITS (MT6357_IRQ_BATON_HT - MT6357_IRQ_BM_BASE + 1)
+#define MT6357_IRQ_HK_BITS (MT6357_IRQ_NAG_C_DLTV - MT6357_IRQ_HK_BASE + 1)
+#define MT6357_IRQ_AUD_BITS (MT6357_IRQ_ACCDET_EINT1 - MT6357_IRQ_AUD_BASE + 1)
+#define MT6357_IRQ_MISC_BITS \
+ (MT6357_IRQ_SPI_CMD_ALERT - MT6357_IRQ_MISC_BASE + 1)
+
+#define MT6357_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6357_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6357_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6357_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6357_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6357_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6357_CORE_H__ */
diff --git a/include/linux/mfd/mt6357/registers.h b/include/linux/mfd/mt6357/registers.h
new file mode 100644
index 000000000000..e24af83b618d
--- /dev/null
+++ b/include/linux/mfd/mt6357/registers.h
@@ -0,0 +1,1574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6357_REGISTERS_H__
+#define __MFD_MT6357_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6357_TOP0_ID 0x0
+#define MT6357_TOP0_REV0 0x2
+#define MT6357_TOP0_DSN_DBI 0x4
+#define MT6357_TOP0_DSN_DXI 0x6
+#define MT6357_HWCID 0x8
+#define MT6357_SWCID 0xa
+#define MT6357_PONSTS 0xc
+#define MT6357_POFFSTS 0xe
+#define MT6357_PSTSCTL 0x10
+#define MT6357_PG_DEB_STS0 0x12
+#define MT6357_PG_SDN_STS0 0x14
+#define MT6357_OC_SDN_STS0 0x16
+#define MT6357_THERMALSTATUS 0x18
+#define MT6357_TOP_CON 0x1a
+#define MT6357_TEST_OUT 0x1c
+#define MT6357_TEST_CON0 0x1e
+#define MT6357_TEST_CON1 0x20
+#define MT6357_TESTMODE_SW 0x22
+#define MT6357_TOPSTATUS 0x24
+#define MT6357_TDSEL_CON 0x26
+#define MT6357_RDSEL_CON 0x28
+#define MT6357_SMT_CON0 0x2a
+#define MT6357_SMT_CON1 0x2c
+#define MT6357_TOP_RSV0 0x2e
+#define MT6357_TOP_RSV1 0x30
+#define MT6357_DRV_CON0 0x32
+#define MT6357_DRV_CON1 0x34
+#define MT6357_DRV_CON2 0x36
+#define MT6357_DRV_CON3 0x38
+#define MT6357_FILTER_CON0 0x3a
+#define MT6357_FILTER_CON1 0x3c
+#define MT6357_FILTER_CON2 0x3e
+#define MT6357_FILTER_CON3 0x40
+#define MT6357_TOP_STATUS 0x42
+#define MT6357_TOP_STATUS_SET 0x44
+#define MT6357_TOP_STATUS_CLR 0x46
+#define MT6357_TOP_TRAP 0x48
+#define MT6357_TOP1_ID 0x80
+#define MT6357_TOP1_REV0 0x82
+#define MT6357_TOP1_DSN_DBI 0x84
+#define MT6357_TOP1_DSN_DXI 0x86
+#define MT6357_GPIO_DIR0 0x88
+#define MT6357_GPIO_DIR0_SET 0x8a
+#define MT6357_GPIO_DIR0_CLR 0x8c
+#define MT6357_GPIO_PULLEN0 0x8e
+#define MT6357_GPIO_PULLEN0_SET 0x90
+#define MT6357_GPIO_PULLEN0_CLR 0x92
+#define MT6357_GPIO_PULLSEL0 0x94
+#define MT6357_GPIO_PULLSEL0_SET 0x96
+#define MT6357_GPIO_PULLSEL0_CLR 0x98
+#define MT6357_GPIO_DINV0 0x9a
+#define MT6357_GPIO_DINV0_SET 0x9c
+#define MT6357_GPIO_DINV0_CLR 0x9e
+#define MT6357_GPIO_DOUT0 0xa0
+#define MT6357_GPIO_DOUT0_SET 0xa2
+#define MT6357_GPIO_DOUT0_CLR 0xa4
+#define MT6357_GPIO_PI0 0xa6
+#define MT6357_GPIO_POE0 0xa8
+#define MT6357_GPIO_MODE0 0xaa
+#define MT6357_GPIO_MODE0_SET 0xac
+#define MT6357_GPIO_MODE0_CLR 0xae
+#define MT6357_GPIO_MODE1 0xb0
+#define MT6357_GPIO_MODE1_SET 0xb2
+#define MT6357_GPIO_MODE1_CLR 0xb4
+#define MT6357_GPIO_MODE2 0xb6
+#define MT6357_GPIO_MODE2_SET 0xb8
+#define MT6357_GPIO_MODE2_CLR 0xba
+#define MT6357_GPIO_MODE3 0xbc
+#define MT6357_GPIO_MODE3_SET 0xbe
+#define MT6357_GPIO_MODE3_CLR 0xc0
+#define MT6357_GPIO_RSV 0xc2
+#define MT6357_TOP2_ID 0x100
+#define MT6357_TOP2_REV0 0x102
+#define MT6357_TOP2_DSN_DBI 0x104
+#define MT6357_TOP2_DSN_DXI 0x106
+#define MT6357_TOP_PAM0 0x108
+#define MT6357_TOP_PAM1 0x10a
+#define MT6357_TOP_CKPDN_CON0 0x10c
+#define MT6357_TOP_CKPDN_CON0_SET 0x10e
+#define MT6357_TOP_CKPDN_CON0_CLR 0x110
+#define MT6357_TOP_CKPDN_CON1 0x112
+#define MT6357_TOP_CKPDN_CON1_SET 0x114
+#define MT6357_TOP_CKPDN_CON1_CLR 0x116
+#define MT6357_TOP_CKSEL_CON0 0x118
+#define MT6357_TOP_CKSEL_CON0_SET 0x11a
+#define MT6357_TOP_CKSEL_CON0_CLR 0x11c
+#define MT6357_TOP_CKSEL_CON1 0x11e
+#define MT6357_TOP_CKSEL_CON1_SET 0x120
+#define MT6357_TOP_CKSEL_CON1_CLR 0x122
+#define MT6357_TOP_CKDIVSEL_CON0 0x124
+#define MT6357_TOP_CKDIVSEL_CON0_SET 0x126
+#define MT6357_TOP_CKDIVSEL_CON0_CLR 0x128
+#define MT6357_TOP_CKHWEN_CON0 0x12a
+#define MT6357_TOP_CKHWEN_CON0_SET 0x12c
+#define MT6357_TOP_CKHWEN_CON0_CLR 0x12e
+#define MT6357_TOP_CKTST_CON0 0x130
+#define MT6357_TOP_CKTST_CON1 0x132
+#define MT6357_TOP_CLK_CON0 0x134
+#define MT6357_TOP_CLK_CON0_SET 0x136
+#define MT6357_TOP_CLK_CON0_CLR 0x138
+#define MT6357_TOP_DCM_CON0 0x13a
+#define MT6357_TOP_HANDOVER_DEBUG0 0x13c
+#define MT6357_TOP_RST_CON0 0x13e
+#define MT6357_TOP_RST_CON0_SET 0x140
+#define MT6357_TOP_RST_CON0_CLR 0x142
+#define MT6357_TOP_RST_CON1 0x144
+#define MT6357_TOP_RST_CON1_SET 0x146
+#define MT6357_TOP_RST_CON1_CLR 0x148
+#define MT6357_TOP_RST_CON2 0x14a
+#define MT6357_TOP_RST_MISC 0x14c
+#define MT6357_TOP_RST_MISC_SET 0x14e
+#define MT6357_TOP_RST_MISC_CLR 0x150
+#define MT6357_TOP_RST_STATUS 0x152
+#define MT6357_TOP_RST_STATUS_SET 0x154
+#define MT6357_TOP_RST_STATUS_CLR 0x156
+#define MT6357_TOP2_ELR_NUM 0x158
+#define MT6357_TOP2_ELR0 0x15a
+#define MT6357_TOP2_ELR1 0x15c
+#define MT6357_TOP3_ID 0x180
+#define MT6357_TOP3_REV0 0x182
+#define MT6357_TOP3_DSN_DBI 0x184
+#define MT6357_TOP3_DSN_DXI 0x186
+#define MT6357_MISC_TOP_INT_CON0 0x188
+#define MT6357_MISC_TOP_INT_CON0_SET 0x18a
+#define MT6357_MISC_TOP_INT_CON0_CLR 0x18c
+#define MT6357_MISC_TOP_INT_MASK_CON0 0x18e
+#define MT6357_MISC_TOP_INT_MASK_CON0_SET 0x190
+#define MT6357_MISC_TOP_INT_MASK_CON0_CLR 0x192
+#define MT6357_MISC_TOP_INT_STATUS0 0x194
+#define MT6357_MISC_TOP_INT_RAW_STATUS0 0x196
+#define MT6357_TOP_INT_MASK_CON0 0x198
+#define MT6357_TOP_INT_MASK_CON0_SET 0x19a
+#define MT6357_TOP_INT_MASK_CON0_CLR 0x19c
+#define MT6357_TOP_INT_STATUS0 0x19e
+#define MT6357_TOP_INT_RAW_STATUS0 0x1a0
+#define MT6357_TOP_INT_CON0 0x1a2
+#define MT6357_PLT0_ID 0x380
+#define MT6357_PLT0_REV0 0x382
+#define MT6357_PLT0_REV1 0x384
+#define MT6357_PLT0_DSN_DXI 0x386
+#define MT6357_FQMTR_CON0 0x388
+#define MT6357_FQMTR_CON1 0x38a
+#define MT6357_FQMTR_CON2 0x38c
+#define MT6357_TOP_CLK_TRIM 0x38e
+#define MT6357_OTP_CON0 0x390
+#define MT6357_OTP_CON1 0x392
+#define MT6357_OTP_CON2 0x394
+#define MT6357_OTP_CON3 0x396
+#define MT6357_OTP_CON4 0x398
+#define MT6357_OTP_CON5 0x39a
+#define MT6357_OTP_CON6 0x39c
+#define MT6357_OTP_CON7 0x39e
+#define MT6357_OTP_CON8 0x3a0
+#define MT6357_OTP_CON9 0x3a2
+#define MT6357_OTP_CON10 0x3a4
+#define MT6357_OTP_CON11 0x3a6
+#define MT6357_OTP_CON12 0x3a8
+#define MT6357_OTP_CON13 0x3aa
+#define MT6357_OTP_CON14 0x3ac
+#define MT6357_TOP_TMA_KEY 0x3ae
+#define MT6357_TOP_MDB_CONF0 0x3b0
+#define MT6357_TOP_MDB_CONF1 0x3b2
+#define MT6357_TOP_MDB_CONF2 0x3b4
+#define MT6357_PLT0_ELR_NUM 0x3b6
+#define MT6357_PLT0_ELR0 0x3b8
+#define MT6357_PLT0_ELR1 0x3ba
+#define MT6357_SPISLV_ID 0x400
+#define MT6357_SPISLV_REV0 0x402
+#define MT6357_SPISLV_REV1 0x404
+#define MT6357_SPISLV_DSN_DXI 0x406
+#define MT6357_RG_SPI_CON0 0x408
+#define MT6357_DEW_DIO_EN 0x40a
+#define MT6357_DEW_READ_TEST 0x40c
+#define MT6357_DEW_WRITE_TEST 0x40e
+#define MT6357_DEW_CRC_SWRST 0x410
+#define MT6357_DEW_CRC_EN 0x412
+#define MT6357_DEW_CRC_VAL 0x414
+#define MT6357_DEW_DBG_MON_SEL 0x416
+#define MT6357_DEW_CIPHER_KEY_SEL 0x418
+#define MT6357_DEW_CIPHER_IV_SEL 0x41a
+#define MT6357_DEW_CIPHER_EN 0x41c
+#define MT6357_DEW_CIPHER_RDY 0x41e
+#define MT6357_DEW_CIPHER_MODE 0x420
+#define MT6357_DEW_CIPHER_SWRST 0x422
+#define MT6357_DEW_RDDMY_NO 0x424
+#define MT6357_INT_TYPE_CON0 0x426
+#define MT6357_INT_TYPE_CON0_SET 0x428
+#define MT6357_INT_TYPE_CON0_CLR 0x42a
+#define MT6357_INT_STA 0x42c
+#define MT6357_RG_SPI_CON1 0x42e
+#define MT6357_RG_SPI_CON2 0x430
+#define MT6357_RG_SPI_CON3 0x432
+#define MT6357_RG_SPI_CON4 0x434
+#define MT6357_RG_SPI_CON5 0x436
+#define MT6357_RG_SPI_CON6 0x438
+#define MT6357_RG_SPI_CON7 0x43a
+#define MT6357_RG_SPI_CON8 0x43c
+#define MT6357_RG_SPI_CON9 0x43e
+#define MT6357_RG_SPI_CON10 0x440
+#define MT6357_RG_SPI_CON11 0x442
+#define MT6357_RG_SPI_CON12 0x444
+#define MT6357_RG_SPI_CON13 0x446
+#define MT6357_TOP_SPI_CON0 0x448
+#define MT6357_TOP_SPI_CON1 0x44a
+#define MT6357_SCK_TOP_DSN_ID 0x500
+#define MT6357_SCK_TOP_DSN_REV0 0x502
+#define MT6357_SCK_TOP_DBI 0x504
+#define MT6357_SCK_TOP_DXI 0x506
+#define MT6357_SCK_TOP_TPM0 0x508
+#define MT6357_SCK_TOP_TPM1 0x50a
+#define MT6357_SCK_TOP_CON0 0x50c
+#define MT6357_SCK_TOP_CON1 0x50e
+#define MT6357_SCK_TOP_TEST_OUT 0x510
+#define MT6357_SCK_TOP_TEST_CON0 0x512
+#define MT6357_SCK_TOP_CKPDN_CON0 0x514
+#define MT6357_SCK_TOP_CKPDN_CON0_SET 0x516
+#define MT6357_SCK_TOP_CKPDN_CON0_CLR 0x518
+#define MT6357_SCK_TOP_CKHWEN_CON0 0x51a
+#define MT6357_SCK_TOP_CKHWEN_CON0_SET 0x51c
+#define MT6357_SCK_TOP_CKHWEN_CON0_CLR 0x51e
+#define MT6357_SCK_TOP_CKTST_CON 0x520
+#define MT6357_SCK_TOP_RST_CON0 0x522
+#define MT6357_SCK_TOP_RST_CON0_SET 0x524
+#define MT6357_SCK_TOP_RST_CON0_CLR 0x526
+#define MT6357_SCK_TOP_INT_CON0 0x528
+#define MT6357_SCK_TOP_INT_CON0_SET 0x52a
+#define MT6357_SCK_TOP_INT_CON0_CLR 0x52c
+#define MT6357_SCK_TOP_INT_MASK_CON0 0x52e
+#define MT6357_SCK_TOP_INT_MASK_CON0_SET 0x530
+#define MT6357_SCK_TOP_INT_MASK_CON0_CLR 0x532
+#define MT6357_SCK_TOP_INT_STATUS0 0x534
+#define MT6357_SCK_TOP_INT_RAW_STATUS0 0x536
+#define MT6357_SCK_TOP_INT_MISC_CON 0x538
+#define MT6357_EOSC_CALI_CON0 0x53a
+#define MT6357_EOSC_CALI_CON1 0x53c
+#define MT6357_RTC_MIX_CON0 0x53e
+#define MT6357_RTC_MIX_CON1 0x540
+#define MT6357_RTC_MIX_CON2 0x542
+#define MT6357_RTC_DSN_ID 0x580
+#define MT6357_RTC_DSN_REV0 0x582
+#define MT6357_RTC_DBI 0x584
+#define MT6357_RTC_DXI 0x586
+#define MT6357_RTC_BBPU 0x588
+#define MT6357_RTC_IRQ_STA 0x58a
+#define MT6357_RTC_IRQ_EN 0x58c
+#define MT6357_RTC_CII_EN 0x58e
+#define MT6357_RTC_AL_MASK 0x590
+#define MT6357_RTC_TC_SEC 0x592
+#define MT6357_RTC_TC_MIN 0x594
+#define MT6357_RTC_TC_HOU 0x596
+#define MT6357_RTC_TC_DOM 0x598
+#define MT6357_RTC_TC_DOW 0x59a
+#define MT6357_RTC_TC_MTH 0x59c
+#define MT6357_RTC_TC_YEA 0x59e
+#define MT6357_RTC_AL_SEC 0x5a0
+#define MT6357_RTC_AL_MIN 0x5a2
+#define MT6357_RTC_AL_HOU 0x5a4
+#define MT6357_RTC_AL_DOM 0x5a6
+#define MT6357_RTC_AL_DOW 0x5a8
+#define MT6357_RTC_AL_MTH 0x5aa
+#define MT6357_RTC_AL_YEA 0x5ac
+#define MT6357_RTC_OSC32CON 0x5ae
+#define MT6357_RTC_POWERKEY1 0x5b0
+#define MT6357_RTC_POWERKEY2 0x5b2
+#define MT6357_RTC_PDN1 0x5b4
+#define MT6357_RTC_PDN2 0x5b6
+#define MT6357_RTC_SPAR0 0x5b8
+#define MT6357_RTC_SPAR1 0x5ba
+#define MT6357_RTC_PROT 0x5bc
+#define MT6357_RTC_DIFF 0x5be
+#define MT6357_RTC_CALI 0x5c0
+#define MT6357_RTC_WRTGR 0x5c2
+#define MT6357_RTC_CON 0x5c4
+#define MT6357_RTC_SEC_CTRL 0x5c6
+#define MT6357_RTC_INT_CNT 0x5c8
+#define MT6357_RTC_SEC_DAT0 0x5ca
+#define MT6357_RTC_SEC_DAT1 0x5cc
+#define MT6357_RTC_SEC_DAT2 0x5ce
+#define MT6357_RTC_SEC_DSN_ID 0x600
+#define MT6357_RTC_SEC_DSN_REV0 0x602
+#define MT6357_RTC_SEC_DBI 0x604
+#define MT6357_RTC_SEC_DXI 0x606
+#define MT6357_RTC_TC_SEC_SEC 0x608
+#define MT6357_RTC_TC_MIN_SEC 0x60a
+#define MT6357_RTC_TC_HOU_SEC 0x60c
+#define MT6357_RTC_TC_DOM_SEC 0x60e
+#define MT6357_RTC_TC_DOW_SEC 0x610
+#define MT6357_RTC_TC_MTH_SEC 0x612
+#define MT6357_RTC_TC_YEA_SEC 0x614
+#define MT6357_RTC_SEC_CK_PDN 0x616
+#define MT6357_RTC_SEC_WRTGR 0x618
+#define MT6357_DCXO_DSN_ID 0x780
+#define MT6357_DCXO_DSN_REV0 0x782
+#define MT6357_DCXO_DSN_DBI 0x784
+#define MT6357_DCXO_DSN_DXI 0x786
+#define MT6357_DCXO_CW00 0x788
+#define MT6357_DCXO_CW00_SET 0x78a
+#define MT6357_DCXO_CW00_CLR 0x78c
+#define MT6357_DCXO_CW01 0x78e
+#define MT6357_DCXO_CW02 0x790
+#define MT6357_DCXO_CW03 0x792
+#define MT6357_DCXO_CW04 0x794
+#define MT6357_DCXO_CW05 0x796
+#define MT6357_DCXO_CW06 0x798
+#define MT6357_DCXO_CW07 0x79a
+#define MT6357_DCXO_CW08 0x79c
+#define MT6357_DCXO_CW09 0x79e
+#define MT6357_DCXO_CW10 0x7a0
+#define MT6357_DCXO_CW11 0x7a2
+#define MT6357_DCXO_CW11_SET 0x7a4
+#define MT6357_DCXO_CW11_CLR 0x7a6
+#define MT6357_DCXO_CW12 0x7a8
+#define MT6357_DCXO_CW13 0x7aa
+#define MT6357_DCXO_CW14 0x7ac
+#define MT6357_DCXO_CW15 0x7ae
+#define MT6357_DCXO_CW16 0x7b0
+#define MT6357_DCXO_CW17 0x7b2
+#define MT6357_DCXO_CW18 0x7b4
+#define MT6357_DCXO_CW19 0x7b6
+#define MT6357_DCXO_CW20 0x7b8
+#define MT6357_DCXO_CW21 0x7ba
+#define MT6357_DCXO_CW22 0x7bc
+#define MT6357_DCXO_ELR_NUM 0x7be
+#define MT6357_DCXO_ELR0 0x7c0
+#define MT6357_PSC_TOP_ID 0x900
+#define MT6357_PSC_TOP_REV0 0x902
+#define MT6357_PSC_TOP_DBI 0x904
+#define MT6357_PSC_TOP_DXI 0x906
+#define MT6357_PSC_TPM0 0x908
+#define MT6357_PSC_TPM1 0x90a
+#define MT6357_PSC_TOP_RSTCTL_0 0x90c
+#define MT6357_PSC_TOP_INT_CON0 0x90e
+#define MT6357_PSC_TOP_INT_CON0_SET 0x910
+#define MT6357_PSC_TOP_INT_CON0_CLR 0x912
+#define MT6357_PSC_TOP_INT_MASK_CON0 0x914
+#define MT6357_PSC_TOP_INT_MASK_CON0_SET 0x916
+#define MT6357_PSC_TOP_INT_MASK_CON0_CLR 0x918
+#define MT6357_PSC_TOP_INT_STATUS0 0x91a
+#define MT6357_PSC_TOP_INT_RAW_STATUS0 0x91c
+#define MT6357_PSC_TOP_INT_MISC_CON 0x91e
+#define MT6357_PSC_TOP_INT_MISC_CON_SET 0x920
+#define MT6357_PSC_TOP_INT_MISC_CON_CLR 0x922
+#define MT6357_PSC_TOP_MON_CTL 0x924
+#define MT6357_STRUP_ID 0x980
+#define MT6357_STRUP_REV0 0x982
+#define MT6357_STRUP_DBI 0x984
+#define MT6357_STRUP_DXI 0x986
+#define MT6357_STRUP_ANA_CON0 0x988
+#define MT6357_STRUP_ANA_CON1 0x98a
+#define MT6357_STRUP_ANA_CON2 0x98c
+#define MT6357_STRUP_ELR_NUM 0x98e
+#define MT6357_STRUP_ELR_0 0x990
+#define MT6357_PSEQ_ID 0xa00
+#define MT6357_PSEQ_REV0 0xa02
+#define MT6357_PSEQ_DBI 0xa04
+#define MT6357_PSEQ_DXI 0xa06
+#define MT6357_PPCCTL0 0xa08
+#define MT6357_PPCCTL1 0xa0a
+#define MT6357_PPCCTL2 0xa0c
+#define MT6357_PPCCFG0 0xa0e
+#define MT6357_PPCTST0 0xa10
+#define MT6357_PORFLAG 0xa12
+#define MT6357_STRUP_CON0 0xa14
+#define MT6357_STRUP_CON1 0xa16
+#define MT6357_STRUP_CON2 0xa18
+#define MT6357_STRUP_CON3 0xa1a
+#define MT6357_STRUP_CON4 0xa1c
+#define MT6357_STRUP_CON5 0xa1e
+#define MT6357_STRUP_CON6 0xa20
+#define MT6357_STRUP_CON7 0xa22
+#define MT6357_CPSCFG0 0xa24
+#define MT6357_STRUP_CON9 0xa26
+#define MT6357_STRUP_CON10 0xa28
+#define MT6357_STRUP_CON11 0xa2a
+#define MT6357_STRUP_CON12 0xa2c
+#define MT6357_STRUP_CON13 0xa2e
+#define MT6357_STRUP_CON14 0xa30
+#define MT6357_STRUP_CON15 0xa32
+#define MT6357_STRUP_CON16 0xa34
+#define MT6357_STRUP_CON19 0xa36
+#define MT6357_PSEQ_ELR_NUM 0xa38
+#define MT6357_PSEQ_ELR7 0xa3a
+#define MT6357_PSEQ_ELR8 0xa3c
+#define MT6357_PCHR_DIG_DSN_ID 0xa80
+#define MT6357_PCHR_DIG_DSN_REV0 0xa82
+#define MT6357_PCHR_DIG_DSN_DBI 0xa84
+#define MT6357_PCHR_DIG_DSN_DXI 0xa86
+#define MT6357_CHR_TOP_CON0 0xa88
+#define MT6357_CHR_TOP_CON1 0xa8a
+#define MT6357_CHR_TOP_CON2 0xa8c
+#define MT6357_CHR_TOP_CON3 0xa8e
+#define MT6357_CHR_TOP_CON4 0xa90
+#define MT6357_CHR_TOP_CON5 0xa92
+#define MT6357_CHR_TOP_CON6 0xa94
+#define MT6357_PCHR_DIG_ELR_NUM 0xa96
+#define MT6357_PCHR_ELR0 0xa98
+#define MT6357_PCHR_ELR1 0xa9a
+#define MT6357_PCHR_MACRO_DSN_ID 0xb80
+#define MT6357_PCHR_MACRO_DSN_REV0 0xb82
+#define MT6357_PCHR_MACRO_DSN_DBI 0xb84
+#define MT6357_PCHR_MACRO_DSN_DXI 0xb86
+#define MT6357_CHR_CON0 0xb88
+#define MT6357_CHR_CON1 0xb8a
+#define MT6357_CHR_CON2 0xb8c
+#define MT6357_CHR_CON3 0xb8e
+#define MT6357_CHR_CON4 0xb90
+#define MT6357_CHR_CON5 0xb92
+#define MT6357_CHR_CON6 0xb94
+#define MT6357_CHR_CON7 0xb96
+#define MT6357_CHR_CON8 0xb98
+#define MT6357_CHR_CON9 0xb9a
+#define MT6357_BM_TOP_DSN_ID 0xc00
+#define MT6357_BM_TOP_DSN_REV0 0xc02
+#define MT6357_BM_TOP_DBI 0xc04
+#define MT6357_BM_TOP_DXI 0xc06
+#define MT6357_BM_TPM0 0xc08
+#define MT6357_BM_TPM1 0xc0a
+#define MT6357_BM_TOP_CKPDN_CON0 0xc0c
+#define MT6357_BM_TOP_CKPDN_CON0_SET 0xc0e
+#define MT6357_BM_TOP_CKPDN_CON0_CLR 0xc10
+#define MT6357_BM_TOP_CKSEL_CON0 0xc12
+#define MT6357_BM_TOP_CKSEL_CON0_SET 0xc14
+#define MT6357_BM_TOP_CKSEL_CON0_CLR 0xc16
+#define MT6357_BM_TOP_CKTST_CON0 0xc18
+#define MT6357_BM_TOP_RST_CON0 0xc1a
+#define MT6357_BM_TOP_RST_CON0_SET 0xc1c
+#define MT6357_BM_TOP_RST_CON0_CLR 0xc1e
+#define MT6357_BM_TOP_INT_CON0 0xc20
+#define MT6357_BM_TOP_INT_CON0_SET 0xc22
+#define MT6357_BM_TOP_INT_CON0_CLR 0xc24
+#define MT6357_BM_TOP_INT_CON1 0xc26
+#define MT6357_BM_TOP_INT_CON1_SET 0xc28
+#define MT6357_BM_TOP_INT_CON1_CLR 0xc2a
+#define MT6357_BM_TOP_INT_MASK_CON0 0xc2c
+#define MT6357_BM_TOP_INT_MASK_CON0_SET 0xc2e
+#define MT6357_BM_TOP_INT_MASK_CON0_CLR 0xc30
+#define MT6357_BM_TOP_INT_MASK_CON1 0xc32
+#define MT6357_BM_TOP_INT_MASK_CON1_SET 0xc34
+#define MT6357_BM_TOP_INT_MASK_CON1_CLR 0xc36
+#define MT6357_BM_TOP_INT_STATUS0 0xc38
+#define MT6357_BM_TOP_INT_STATUS1 0xc3a
+#define MT6357_BM_TOP_INT_RAW_STATUS0 0xc3c
+#define MT6357_BM_TOP_INT_RAW_STATUS1 0xc3e
+#define MT6357_BM_TOP_INT_MISC_CON 0xc40
+#define MT6357_BM_TOP_DBG_CON 0xc42
+#define MT6357_BM_TOP_RSV0 0xc44
+#define MT6357_FGADC_ANA_DSN_ID 0xc80
+#define MT6357_FGADC_ANA_DSN_REV0 0xc82
+#define MT6357_FGADC_ANA_DSN_DBI 0xc84
+#define MT6357_FGADC_ANA_DSN_DXI 0xc86
+#define MT6357_FGADC_ANA_CON0 0xc88
+#define MT6357_FGADC_ANA_TEST_CON0 0xc8a
+#define MT6357_FGADC_ANA_ELR_NUM 0xc8c
+#define MT6357_FGADC_ANA_ELR0 0xc8e
+#define MT6357_FGADC_ANA_ELR1 0xc90
+#define MT6357_FGADC0_DSN_ID 0xd00
+#define MT6357_FGADC0_DSN_REV0 0xd02
+#define MT6357_FGADC0_DSN_DBI 0xd04
+#define MT6357_FGADC0_DSN_DXI 0xd06
+#define MT6357_FGADC_CON0 0xd08
+#define MT6357_FGADC_CON1 0xd0a
+#define MT6357_FGADC_CON2 0xd0c
+#define MT6357_FGADC_CON3 0xd0e
+#define MT6357_FGADC_CON4 0xd10
+#define MT6357_FGADC_CAR_CON0 0xd12
+#define MT6357_FGADC_CAR_CON1 0xd14
+#define MT6357_FGADC_CAR_CON2 0xd16
+#define MT6357_FGADC_CARTH_CON0 0xd18
+#define MT6357_FGADC_CARTH_CON1 0xd1a
+#define MT6357_FGADC_CARTH_CON2 0xd1c
+#define MT6357_FGADC_CARTH_CON3 0xd1e
+#define MT6357_FGADC_NTER_CON0 0xd20
+#define MT6357_FGADC_NTER_CON1 0xd22
+#define MT6357_FGADC_NTER_CON2 0xd24
+#define MT6357_FGADC_SON_CON0 0xd26
+#define MT6357_FGADC_SON_CON1 0xd28
+#define MT6357_FGADC_SON_CON2 0xd2a
+#define MT6357_FGADC_SON_CON3 0xd2c
+#define MT6357_FGADC_ZCV_CON0 0xd2e
+#define MT6357_FGADC_ZCV_CON1 0xd30
+#define MT6357_FGADC_ZCV_CON2 0xd32
+#define MT6357_FGADC_ZCV_CON3 0xd34
+#define MT6357_FGADC_ZCV_CON4 0xd36
+#define MT6357_FGADC_ZCVTH_CON0 0xd38
+#define MT6357_FGADC_ZCVTH_CON1 0xd3a
+#define MT6357_FGADC_ZCVTH_CON2 0xd3c
+#define MT6357_FGADC1_DSN_ID 0xd80
+#define MT6357_FGADC1_DSN_REV0 0xd82
+#define MT6357_FGADC1_DSN_DBI 0xd84
+#define MT6357_FGADC1_DSN_DXI 0xd86
+#define MT6357_FGADC_R_CON0 0xd88
+#define MT6357_FGADC_CUR_CON0 0xd8a
+#define MT6357_FGADC_CUR_CON1 0xd8c
+#define MT6357_FGADC_CUR_CON2 0xd8e
+#define MT6357_FGADC_CUR_CON3 0xd90
+#define MT6357_FGADC_OFFSET_CON0 0xd92
+#define MT6357_FGADC_OFFSET_CON1 0xd94
+#define MT6357_FGADC_GAIN_CON0 0xd96
+#define MT6357_FGADC_TEST_CON0 0xd98
+#define MT6357_SYSTEM_INFO_CON0 0xd9a
+#define MT6357_SYSTEM_INFO_CON1 0xd9c
+#define MT6357_SYSTEM_INFO_CON2 0xd9e
+#define MT6357_SYSTEM_INFO_CON3 0xda0
+#define MT6357_SYSTEM_INFO_CON4 0xda2
+#define MT6357_BATON_ANA_DSN_ID 0xe00
+#define MT6357_BATON_ANA_DSN_REV0 0xe02
+#define MT6357_BATON_ANA_DSN_DBI 0xe04
+#define MT6357_BATON_ANA_DSN_DXI 0xe06
+#define MT6357_BATON_ANA_CON0 0xe08
+#define MT6357_BATON_ANA_ELR_NUM 0xe0a
+#define MT6357_BATON_ANA_ELR0 0xe0c
+#define MT6357_HK_TOP_ID 0xf80
+#define MT6357_HK_TOP_REV0 0xf82
+#define MT6357_HK_TOP_DBI 0xf84
+#define MT6357_HK_TOP_DXI 0xf86
+#define MT6357_HK_TPM0 0xf88
+#define MT6357_HK_TPM1 0xf8a
+#define MT6357_HK_TOP_CLK_CON0 0xf8c
+#define MT6357_HK_TOP_CLK_CON1 0xf8e
+#define MT6357_HK_TOP_RST_CON0 0xf90
+#define MT6357_HK_TOP_INT_CON0 0xf92
+#define MT6357_HK_TOP_INT_CON0_SET 0xf94
+#define MT6357_HK_TOP_INT_CON0_CLR 0xf96
+#define MT6357_HK_TOP_INT_MASK_CON0 0xf98
+#define MT6357_HK_TOP_INT_MASK_CON0_SET 0xf9a
+#define MT6357_HK_TOP_INT_MASK_CON0_CLR 0xf9c
+#define MT6357_HK_TOP_INT_STATUS0 0xf9e
+#define MT6357_HK_TOP_INT_RAW_STATUS0 0xfa0
+#define MT6357_HK_TOP_MON_CON0 0xfa2
+#define MT6357_HK_TOP_MON_CON1 0xfa4
+#define MT6357_HK_TOP_MON_CON2 0xfa6
+#define MT6357_AUXADC_DSN_ID 0x1000
+#define MT6357_AUXADC_DSN_REV0 0x1002
+#define MT6357_AUXADC_DSN_DBI 0x1004
+#define MT6357_AUXADC_DSN_DXI 0x1006
+#define MT6357_AUXADC_ANA_CON0 0x1008
+#define MT6357_AUXADC_DIG_1_DSN_ID 0x1080
+#define MT6357_AUXADC_DIG_1_DSN_REV0 0x1082
+#define MT6357_AUXADC_DIG_1_DSN_DBI 0x1084
+#define MT6357_AUXADC_DIG_1_DSN_DXI 0x1086
+#define MT6357_AUXADC_ADC0 0x1088
+#define MT6357_AUXADC_ADC1 0x108a
+#define MT6357_AUXADC_ADC2 0x108c
+#define MT6357_AUXADC_ADC3 0x108e
+#define MT6357_AUXADC_ADC4 0x1090
+#define MT6357_AUXADC_ADC5 0x1092
+#define MT6357_AUXADC_ADC6 0x1094
+#define MT6357_AUXADC_ADC7 0x1096
+#define MT6357_AUXADC_ADC8 0x1098
+#define MT6357_AUXADC_ADC9 0x109a
+#define MT6357_AUXADC_ADC10 0x109c
+#define MT6357_AUXADC_ADC11 0x109e
+#define MT6357_AUXADC_ADC12 0x10a0
+#define MT6357_AUXADC_ADC14 0x10a2
+#define MT6357_AUXADC_ADC16 0x10a4
+#define MT6357_AUXADC_ADC17 0x10a6
+#define MT6357_AUXADC_ADC18 0x10a8
+#define MT6357_AUXADC_ADC19 0x10aa
+#define MT6357_AUXADC_ADC20 0x10ac
+#define MT6357_AUXADC_ADC21 0x10ae
+#define MT6357_AUXADC_ADC22 0x10b0
+#define MT6357_AUXADC_ADC23 0x10b2
+#define MT6357_AUXADC_ADC24 0x10b4
+#define MT6357_AUXADC_ADC25 0x10b6
+#define MT6357_AUXADC_ADC26 0x10b8
+#define MT6357_AUXADC_ADC27 0x10ba
+#define MT6357_AUXADC_ADC29 0x10bc
+#define MT6357_AUXADC_ADC30 0x10be
+#define MT6357_AUXADC_ADC31 0x10c0
+#define MT6357_AUXADC_ADC32 0x10c2
+#define MT6357_AUXADC_ADC33 0x10c4
+#define MT6357_AUXADC_ADC34 0x10c6
+#define MT6357_AUXADC_ADC35 0x10c8
+#define MT6357_AUXADC_ADC36 0x10ca
+#define MT6357_AUXADC_ADC38 0x10cc
+#define MT6357_AUXADC_ADC39 0x10ce
+#define MT6357_AUXADC_ADC40 0x10d0
+#define MT6357_AUXADC_ADC41 0x10d2
+#define MT6357_AUXADC_ADC42 0x10d4
+#define MT6357_AUXADC_ADC43 0x10d6
+#define MT6357_AUXADC_ADC46 0x10d8
+#define MT6357_AUXADC_ADC47 0x10da
+#define MT6357_AUXADC_DIG_1_ELR_NUM 0x10dc
+#define MT6357_AUXADC_DIG_1_ELR0 0x10de
+#define MT6357_AUXADC_DIG_1_ELR1 0x10e0
+#define MT6357_AUXADC_DIG_2_DSN_ID 0x1100
+#define MT6357_AUXADC_DIG_2_DSN_REV0 0x1102
+#define MT6357_AUXADC_DIG_2_DSN_DBI 0x1104
+#define MT6357_AUXADC_DIG_2_DSN_DXI 0x1106
+#define MT6357_AUXADC_STA0 0x1108
+#define MT6357_AUXADC_STA1 0x110a
+#define MT6357_AUXADC_STA2 0x110c
+#define MT6357_AUXADC_RQST0 0x110e
+#define MT6357_AUXADC_RQST0_SET 0x1110
+#define MT6357_AUXADC_RQST0_CLR 0x1112
+#define MT6357_AUXADC_RQST2 0x1114
+#define MT6357_AUXADC_RQST2_SET 0x1116
+#define MT6357_AUXADC_RQST2_CLR 0x1118
+#define MT6357_AUXADC_RQST1 0x111a
+#define MT6357_AUXADC_RQST1_SET 0x111c
+#define MT6357_AUXADC_RQST1_CLR 0x111e
+#define MT6357_AUXADC_CON0 0x1120
+#define MT6357_AUXADC_CON0_SET 0x1122
+#define MT6357_AUXADC_CON0_CLR 0x1124
+#define MT6357_AUXADC_CON1 0x1126
+#define MT6357_AUXADC_CON2 0x1128
+#define MT6357_AUXADC_CON3 0x112a
+#define MT6357_AUXADC_CON4 0x112c
+#define MT6357_AUXADC_CON5 0x112e
+#define MT6357_AUXADC_CON6 0x1130
+#define MT6357_AUXADC_CON7 0x1132
+#define MT6357_AUXADC_CON8 0x1134
+#define MT6357_AUXADC_CON9 0x1136
+#define MT6357_AUXADC_CON10 0x1138
+#define MT6357_AUXADC_CON11 0x113a
+#define MT6357_AUXADC_CON12 0x113c
+#define MT6357_AUXADC_CON13 0x113e
+#define MT6357_AUXADC_CON14 0x1140
+#define MT6357_AUXADC_CON15 0x1142
+#define MT6357_AUXADC_CON16 0x1144
+#define MT6357_AUXADC_CON17 0x1146
+#define MT6357_AUXADC_CON18 0x1148
+#define MT6357_AUXADC_CON19 0x114a
+#define MT6357_AUXADC_CON20 0x114c
+#define MT6357_AUXADC_DIG_3_DSN_ID 0x1180
+#define MT6357_AUXADC_DIG_3_DSN_REV0 0x1182
+#define MT6357_AUXADC_DIG_3_DSN_DBI 0x1184
+#define MT6357_AUXADC_DIG_3_DSN_DXI 0x1186
+#define MT6357_AUXADC_AUTORPT0 0x1188
+#define MT6357_AUXADC_LBAT0 0x118a
+#define MT6357_AUXADC_LBAT1 0x118c
+#define MT6357_AUXADC_LBAT2 0x118e
+#define MT6357_AUXADC_LBAT3 0x1190
+#define MT6357_AUXADC_LBAT4 0x1192
+#define MT6357_AUXADC_LBAT5 0x1194
+#define MT6357_AUXADC_LBAT6 0x1196
+#define MT6357_AUXADC_ACCDET 0x1198
+#define MT6357_AUXADC_DBG0 0x119a
+#define MT6357_AUXADC_IMP0 0x119c
+#define MT6357_AUXADC_IMP1 0x119e
+#define MT6357_AUXADC_DIG_3_ELR_NUM 0x11a0
+#define MT6357_AUXADC_DIG_3_ELR0 0x11a2
+#define MT6357_AUXADC_DIG_3_ELR1 0x11a4
+#define MT6357_AUXADC_DIG_3_ELR2 0x11a6
+#define MT6357_AUXADC_DIG_3_ELR3 0x11a8
+#define MT6357_AUXADC_DIG_3_ELR4 0x11aa
+#define MT6357_AUXADC_DIG_3_ELR5 0x11ac
+#define MT6357_AUXADC_DIG_3_ELR6 0x11ae
+#define MT6357_AUXADC_DIG_3_ELR7 0x11b0
+#define MT6357_AUXADC_DIG_3_ELR8 0x11b2
+#define MT6357_AUXADC_DIG_3_ELR9 0x11b4
+#define MT6357_AUXADC_DIG_3_ELR10 0x11b6
+#define MT6357_AUXADC_DIG_3_ELR11 0x11b8
+#define MT6357_AUXADC_DIG_4_DSN_ID 0x1200
+#define MT6357_AUXADC_DIG_4_DSN_REV0 0x1202
+#define MT6357_AUXADC_DIG_4_DSN_DBI 0x1204
+#define MT6357_AUXADC_DIG_4_DSN_DXI 0x1206
+#define MT6357_AUXADC_MDRT_0 0x1208
+#define MT6357_AUXADC_MDRT_1 0x120a
+#define MT6357_AUXADC_MDRT_2 0x120c
+#define MT6357_AUXADC_MDRT_3 0x120e
+#define MT6357_AUXADC_MDRT_4 0x1210
+#define MT6357_AUXADC_DCXO_MDRT_0 0x1212
+#define MT6357_AUXADC_DCXO_MDRT_1 0x1214
+#define MT6357_AUXADC_DCXO_MDRT_2 0x1216
+#define MT6357_AUXADC_NAG_0 0x1218
+#define MT6357_AUXADC_NAG_1 0x121a
+#define MT6357_AUXADC_NAG_2 0x121c
+#define MT6357_AUXADC_NAG_3 0x121e
+#define MT6357_AUXADC_NAG_4 0x1220
+#define MT6357_AUXADC_NAG_5 0x1222
+#define MT6357_AUXADC_NAG_6 0x1224
+#define MT6357_AUXADC_NAG_7 0x1226
+#define MT6357_AUXADC_NAG_8 0x1228
+#define MT6357_AUXADC_RSV_1 0x122a
+#define MT6357_AUXADC_ANA_0 0x122c
+#define MT6357_AUXADC_IMP_CG0 0x122e
+#define MT6357_AUXADC_LBAT_CG0 0x1230
+#define MT6357_AUXADC_NAG_CG0 0x1232
+#define MT6357_AUXADC_PRI_NEW 0x1234
+#define MT6357_AUXADC_CHR_TOP_CON2 0x1236
+#define MT6357_BUCK_TOP_DSN_ID 0x1400
+#define MT6357_BUCK_TOP_DSN_REV0 0x1402
+#define MT6357_BUCK_TOP_DBI 0x1404
+#define MT6357_BUCK_TOP_DXI 0x1406
+#define MT6357_BUCK_TOP_PAM0 0x1408
+#define MT6357_BUCK_TOP_PAM1 0x140a
+#define MT6357_BUCK_TOP_CLK_CON0 0x140c
+#define MT6357_BUCK_TOP_CLK_CON0_SET 0x140e
+#define MT6357_BUCK_TOP_CLK_CON0_CLR 0x1410
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0 0x1412
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_SET 0x1414
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_CLR 0x1416
+#define MT6357_BUCK_TOP_CLK_MISC_CON0 0x1418
+#define MT6357_BUCK_TOP_INT_CON0 0x141a
+#define MT6357_BUCK_TOP_INT_CON0_SET 0x141c
+#define MT6357_BUCK_TOP_INT_CON0_CLR 0x141e
+#define MT6357_BUCK_TOP_INT_MASK_CON0 0x1420
+#define MT6357_BUCK_TOP_INT_MASK_CON0_SET 0x1422
+#define MT6357_BUCK_TOP_INT_MASK_CON0_CLR 0x1424
+#define MT6357_BUCK_TOP_INT_STATUS0 0x1426
+#define MT6357_BUCK_TOP_INT_RAW_STATUS0 0x1428
+#define MT6357_BUCK_TOP_STB_CON 0x142a
+#define MT6357_BUCK_TOP_SLP_CON0 0x142c
+#define MT6357_BUCK_TOP_SLP_CON1 0x142e
+#define MT6357_BUCK_TOP_SLP_CON2 0x1430
+#define MT6357_BUCK_TOP_MINFREQ_CON 0x1432
+#define MT6357_BUCK_TOP_OC_CON0 0x1434
+#define MT6357_BUCK_TOP_K_CON0 0x1436
+#define MT6357_BUCK_TOP_K_CON1 0x1438
+#define MT6357_BUCK_TOP_K_CON2 0x143a
+#define MT6357_BUCK_TOP_WDTDBG0 0x143c
+#define MT6357_BUCK_TOP_WDTDBG1 0x143e
+#define MT6357_BUCK_TOP_WDTDBG2 0x1440
+#define MT6357_BUCK_TOP_ELR_NUM 0x1442
+#define MT6357_BUCK_TOP_ELR0 0x1444
+#define MT6357_BUCK_TOP_ELR1 0x1446
+#define MT6357_BUCK_VPROC_DSN_ID 0x1480
+#define MT6357_BUCK_VPROC_DSN_REV0 0x1482
+#define MT6357_BUCK_VPROC_DSN_DBI 0x1484
+#define MT6357_BUCK_VPROC_DSN_DXI 0x1486
+#define MT6357_BUCK_VPROC_CON0 0x1488
+#define MT6357_BUCK_VPROC_CON1 0x148a
+#define MT6357_BUCK_VPROC_CFG0 0x148c
+#define MT6357_BUCK_VPROC_CFG1 0x148e
+#define MT6357_BUCK_VPROC_OP_EN 0x1490
+#define MT6357_BUCK_VPROC_OP_EN_SET 0x1492
+#define MT6357_BUCK_VPROC_OP_EN_CLR 0x1494
+#define MT6357_BUCK_VPROC_OP_CFG 0x1496
+#define MT6357_BUCK_VPROC_OP_CFG_SET 0x1498
+#define MT6357_BUCK_VPROC_OP_CFG_CLR 0x149a
+#define MT6357_BUCK_VPROC_SP_CON 0x149c
+#define MT6357_BUCK_VPROC_SP_CFG 0x149e
+#define MT6357_BUCK_VPROC_OC_CFG 0x14a0
+#define MT6357_BUCK_VPROC_DBG0 0x14a2
+#define MT6357_BUCK_VPROC_DBG1 0x14a4
+#define MT6357_BUCK_VPROC_DBG2 0x14a6
+#define MT6357_BUCK_VPROC_ELR_NUM 0x14a8
+#define MT6357_BUCK_VPROC_ELR0 0x14aa
+#define MT6357_BUCK_VCORE_DSN_ID 0x1500
+#define MT6357_BUCK_VCORE_DSN_REV0 0x1502
+#define MT6357_BUCK_VCORE_DSN_DBI 0x1504
+#define MT6357_BUCK_VCORE_DSN_DXI 0x1506
+#define MT6357_BUCK_VCORE_CON0 0x1508
+#define MT6357_BUCK_VCORE_CON1 0x150a
+#define MT6357_BUCK_VCORE_CFG0 0x150c
+#define MT6357_BUCK_VCORE_CFG1 0x150e
+#define MT6357_BUCK_VCORE_OP_EN 0x1510
+#define MT6357_BUCK_VCORE_OP_EN_SET 0x1512
+#define MT6357_BUCK_VCORE_OP_EN_CLR 0x1514
+#define MT6357_BUCK_VCORE_OP_CFG 0x1516
+#define MT6357_BUCK_VCORE_OP_CFG_SET 0x1518
+#define MT6357_BUCK_VCORE_OP_CFG_CLR 0x151a
+#define MT6357_BUCK_VCORE_SP_CON 0x151c
+#define MT6357_BUCK_VCORE_SP_CFG 0x151e
+#define MT6357_BUCK_VCORE_OC_CFG 0x1520
+#define MT6357_BUCK_VCORE_DBG0 0x1522
+#define MT6357_BUCK_VCORE_DBG1 0x1524
+#define MT6357_BUCK_VCORE_DBG2 0x1526
+#define MT6357_BUCK_VCORE_ELR_NUM 0x1528
+#define MT6357_BUCK_VCORE_ELR0 0x152a
+#define MT6357_BUCK_VMODEM_DSN_ID 0x1580
+#define MT6357_BUCK_VMODEM_DSN_REV0 0x1582
+#define MT6357_BUCK_VMODEM_DSN_DBI 0x1584
+#define MT6357_BUCK_VMODEM_DSN_DXI 0x1586
+#define MT6357_BUCK_VMODEM_CON0 0x1588
+#define MT6357_BUCK_VMODEM_CON1 0x158a
+#define MT6357_BUCK_VMODEM_CFG0 0x158c
+#define MT6357_BUCK_VMODEM_CFG1 0x158e
+#define MT6357_BUCK_VMODEM_OP_EN 0x1590
+#define MT6357_BUCK_VMODEM_OP_EN_SET 0x1592
+#define MT6357_BUCK_VMODEM_OP_EN_CLR 0x1594
+#define MT6357_BUCK_VMODEM_OP_CFG 0x1596
+#define MT6357_BUCK_VMODEM_OP_CFG_SET 0x1598
+#define MT6357_BUCK_VMODEM_OP_CFG_CLR 0x159a
+#define MT6357_BUCK_VMODEM_SP_CON 0x159c
+#define MT6357_BUCK_VMODEM_SP_CFG 0x159e
+#define MT6357_BUCK_VMODEM_OC_CFG 0x15a0
+#define MT6357_BUCK_VMODEM_DBG0 0x15a2
+#define MT6357_BUCK_VMODEM_DBG1 0x15a4
+#define MT6357_BUCK_VMODEM_DBG2 0x15a6
+#define MT6357_BUCK_VMODEM_ELR_NUM 0x15a8
+#define MT6357_BUCK_VMODEM_ELR0 0x15aa
+#define MT6357_BUCK_VS1_DSN_ID 0x1600
+#define MT6357_BUCK_VS1_DSN_REV0 0x1602
+#define MT6357_BUCK_VS1_DSN_DBI 0x1604
+#define MT6357_BUCK_VS1_DSN_DXI 0x1606
+#define MT6357_BUCK_VS1_CON0 0x1608
+#define MT6357_BUCK_VS1_CON1 0x160a
+#define MT6357_BUCK_VS1_CFG0 0x160c
+#define MT6357_BUCK_VS1_CFG1 0x160e
+#define MT6357_BUCK_VS1_OP_EN 0x1610
+#define MT6357_BUCK_VS1_OP_EN_SET 0x1612
+#define MT6357_BUCK_VS1_OP_EN_CLR 0x1614
+#define MT6357_BUCK_VS1_OP_CFG 0x1616
+#define MT6357_BUCK_VS1_OP_CFG_SET 0x1618
+#define MT6357_BUCK_VS1_OP_CFG_CLR 0x161a
+#define MT6357_BUCK_VS1_SP_CON 0x161c
+#define MT6357_BUCK_VS1_SP_CFG 0x161e
+#define MT6357_BUCK_VS1_OC_CFG 0x1620
+#define MT6357_BUCK_VS1_DBG0 0x1622
+#define MT6357_BUCK_VS1_DBG1 0x1624
+#define MT6357_BUCK_VS1_DBG2 0x1626
+#define MT6357_BUCK_VS1_VOTER 0x1628
+#define MT6357_BUCK_VS1_VOTER_SET 0x162a
+#define MT6357_BUCK_VS1_VOTER_CLR 0x162c
+#define MT6357_BUCK_VS1_VOTER_CFG 0x162e
+#define MT6357_BUCK_VS1_ELR_NUM 0x1630
+#define MT6357_BUCK_VS1_ELR0 0x1632
+#define MT6357_BUCK_VPA_DSN_ID 0x1680
+#define MT6357_BUCK_VPA_DSN_REV0 0x1682
+#define MT6357_BUCK_VPA_DSN_DBI 0x1684
+#define MT6357_BUCK_VPA_DSN_DXI 0x1686
+#define MT6357_BUCK_VPA_CON0 0x1688
+#define MT6357_BUCK_VPA_CON1 0x168a
+#define MT6357_BUCK_VPA_CFG0 0x168c
+#define MT6357_BUCK_VPA_CFG1 0x168e
+#define MT6357_BUCK_VPA_OC_CFG 0x1690
+#define MT6357_BUCK_VPA_DBG0 0x1692
+#define MT6357_BUCK_VPA_DBG1 0x1694
+#define MT6357_BUCK_VPA_DBG2 0x1696
+#define MT6357_BUCK_VPA_DLC_CON0 0x1698
+#define MT6357_BUCK_VPA_DLC_CON1 0x169a
+#define MT6357_BUCK_VPA_DLC_CON2 0x169c
+#define MT6357_BUCK_VPA_MSFG_CON0 0x169e
+#define MT6357_BUCK_VPA_MSFG_CON1 0x16a0
+#define MT6357_BUCK_VPA_MSFG_RRATE0 0x16a2
+#define MT6357_BUCK_VPA_MSFG_RRATE1 0x16a4
+#define MT6357_BUCK_VPA_MSFG_RRATE2 0x16a6
+#define MT6357_BUCK_VPA_MSFG_RTHD0 0x16a8
+#define MT6357_BUCK_VPA_MSFG_RTHD1 0x16aa
+#define MT6357_BUCK_VPA_MSFG_RTHD2 0x16ac
+#define MT6357_BUCK_VPA_MSFG_FRATE0 0x16ae
+#define MT6357_BUCK_VPA_MSFG_FRATE1 0x16b0
+#define MT6357_BUCK_VPA_MSFG_FRATE2 0x16b2
+#define MT6357_BUCK_VPA_MSFG_FTHD0 0x16b4
+#define MT6357_BUCK_VPA_MSFG_FTHD1 0x16b6
+#define MT6357_BUCK_VPA_MSFG_FTHD2 0x16b8
+#define MT6357_BUCK_ANA_DSN_ID 0x1700
+#define MT6357_BUCK_ANA_DSN_REV0 0x1702
+#define MT6357_BUCK_ANA_DSN_DBI 0x1704
+#define MT6357_BUCK_ANA_DSN_FPI 0x1706
+#define MT6357_SMPS_ANA_CON0 0x1708
+#define MT6357_SMPS_ANA_CON1 0x170a
+#define MT6357_SMPS_ANA_CON2 0x170c
+#define MT6357_VCORE_VPROC_ANA_CON0 0x170e
+#define MT6357_VCORE_VPROC_ANA_CON1 0x1710
+#define MT6357_VCORE_VPROC_ANA_CON2 0x1712
+#define MT6357_VCORE_VPROC_ANA_CON3 0x1714
+#define MT6357_VCORE_VPROC_ANA_CON4 0x1716
+#define MT6357_VCORE_VPROC_ANA_CON5 0x1718
+#define MT6357_VCORE_VPROC_ANA_CON6 0x171a
+#define MT6357_VCORE_VPROC_ANA_CON7 0x171c
+#define MT6357_VCORE_VPROC_ANA_CON8 0x171e
+#define MT6357_VCORE_VPROC_ANA_CON9 0x1720
+#define MT6357_VCORE_VPROC_ANA_CON10 0x1722
+#define MT6357_VCORE_VPROC_ANA_CON11 0x1724
+#define MT6357_VMODEM_ANA_CON0 0x1726
+#define MT6357_VMODEM_ANA_CON1 0x1728
+#define MT6357_VMODEM_ANA_CON2 0x172a
+#define MT6357_VMODEM_ANA_CON3 0x172c
+#define MT6357_VMODEM_ANA_CON4 0x172e
+#define MT6357_VMODEM_ANA_CON5 0x1730
+#define MT6357_VS1_ANA_CON0 0x1732
+#define MT6357_VS1_ANA_CON1 0x1734
+#define MT6357_VS1_ANA_CON2 0x1736
+#define MT6357_VS1_ANA_CON3 0x1738
+#define MT6357_VS1_ANA_CON4 0x173a
+#define MT6357_VS1_ANA_CON5 0x173c
+#define MT6357_VPA_ANA_CON0 0x173e
+#define MT6357_VPA_ANA_CON1 0x1740
+#define MT6357_VPA_ANA_CON2 0x1742
+#define MT6357_VPA_ANA_CON3 0x1744
+#define MT6357_VPA_ANA_CON4 0x1746
+#define MT6357_VPA_ANA_CON5 0x1748
+#define MT6357_BUCK_ANA_ELR_NUM 0x174a
+#define MT6357_SMPS_ELR_0 0x174c
+#define MT6357_SMPS_ELR_1 0x174e
+#define MT6357_SMPS_ELR_2 0x1750
+#define MT6357_SMPS_ELR_3 0x1752
+#define MT6357_SMPS_ELR_4 0x1754
+#define MT6357_SMPS_ELR_5 0x1756
+#define MT6357_VCORE_VPROC_ELR_0 0x1758
+#define MT6357_VCORE_VPROC_ELR_1 0x175a
+#define MT6357_VCORE_VPROC_ELR_2 0x175c
+#define MT6357_VCORE_VPROC_ELR_3 0x175e
+#define MT6357_VCORE_VPROC_ELR_4 0x1760
+#define MT6357_VMODEM_ELR_0 0x1762
+#define MT6357_VMODEM_ELR_1 0x1764
+#define MT6357_VMODEM_ELR_2 0x1766
+#define MT6357_VS1_ELR_0 0x1768
+#define MT6357_VS1_ELR_1 0x176a
+#define MT6357_VPA_ELR_0 0x176c
+#define MT6357_LDO_TOP_ID 0x1880
+#define MT6357_LDO_TOP_REV0 0x1882
+#define MT6357_LDO_TOP_DBI 0x1884
+#define MT6357_LDO_TOP_DXI 0x1886
+#define MT6357_LDO_TPM0 0x1888
+#define MT6357_LDO_TPM1 0x188a
+#define MT6357_LDO_TOP_CLK_DCM_CON0 0x188c
+#define MT6357_LDO_TOP_CLK_VIO28_CON0 0x188e
+#define MT6357_LDO_TOP_CLK_VIO18_CON0 0x1890
+#define MT6357_LDO_TOP_CLK_VAUD28_CON0 0x1892
+#define MT6357_LDO_TOP_CLK_VDRAM_CON0 0x1894
+#define MT6357_LDO_TOP_CLK_VSRAM_PROC_CON0 0x1896
+#define MT6357_LDO_TOP_CLK_VSRAM_OTHERS_CON0 0x1898
+#define MT6357_LDO_TOP_CLK_VAUX18_CON0 0x189a
+#define MT6357_LDO_TOP_CLK_VUSB33_CON0 0x189c
+#define MT6357_LDO_TOP_CLK_VEMC_CON0 0x189e
+#define MT6357_LDO_TOP_CLK_VXO22_CON0 0x18a0
+#define MT6357_LDO_TOP_CLK_VSIM1_CON0 0x18a2
+#define MT6357_LDO_TOP_CLK_VSIM2_CON0 0x18a4
+#define MT6357_LDO_TOP_CLK_VCAMD_CON0 0x18a6
+#define MT6357_LDO_TOP_CLK_VCAMIO_CON0 0x18a8
+#define MT6357_LDO_TOP_CLK_VEFUSE_CON0 0x18aa
+#define MT6357_LDO_TOP_CLK_VCN33_CON0 0x18ac
+#define MT6357_LDO_TOP_CLK_VCN18_CON0 0x18ae
+#define MT6357_LDO_TOP_CLK_VCN28_CON0 0x18b0
+#define MT6357_LDO_TOP_CLK_VIBR_CON0 0x18b2
+#define MT6357_LDO_TOP_CLK_VFE28_CON0 0x18b4
+#define MT6357_LDO_TOP_CLK_VMCH_CON0 0x18b6
+#define MT6357_LDO_TOP_CLK_VMC_CON0 0x18b8
+#define MT6357_LDO_TOP_CLK_VRF18_CON0 0x18ba
+#define MT6357_LDO_TOP_CLK_VLDO28_CON0 0x18bc
+#define MT6357_LDO_TOP_CLK_VRF12_CON0 0x18be
+#define MT6357_LDO_TOP_CLK_VCAMA_CON0 0x18c0
+#define MT6357_LDO_TOP_CLK_TREF_CON0 0x18c2
+#define MT6357_LDO_TOP_INT_CON0 0x18c4
+#define MT6357_LDO_TOP_INT_CON0_SET 0x18c6
+#define MT6357_LDO_TOP_INT_CON0_CLR 0x18c8
+#define MT6357_LDO_TOP_INT_CON1 0x18ca
+#define MT6357_LDO_TOP_INT_CON1_SET 0x18cc
+#define MT6357_LDO_TOP_INT_CON1_CLR 0x18ce
+#define MT6357_LDO_TOP_INT_MASK_CON0 0x18d0
+#define MT6357_LDO_TOP_INT_MASK_CON0_SET 0x18d2
+#define MT6357_LDO_TOP_INT_MASK_CON0_CLR 0x18d4
+#define MT6357_LDO_TOP_INT_MASK_CON1 0x18d6
+#define MT6357_LDO_TOP_INT_MASK_CON1_SET 0x18d8
+#define MT6357_LDO_TOP_INT_MASK_CON1_CLR 0x18da
+#define MT6357_LDO_TOP_INT_STATUS0 0x18dc
+#define MT6357_LDO_TOP_INT_STATUS1 0x18de
+#define MT6357_LDO_TOP_INT_RAW_STATUS0 0x18e0
+#define MT6357_LDO_TOP_INT_RAW_STATUS1 0x18e2
+#define MT6357_LDO_TEST_CON0 0x18e4
+#define MT6357_LDO_TOP_WDT_CON0 0x18e6
+#define MT6357_LDO_TOP_RSV_CON0 0x18e8
+#define MT6357_LDO_TOP_RSV_CON1 0x18ea
+#define MT6357_LDO_OCFB0 0x18ec
+#define MT6357_LDO_LP_PROTECTION 0x18ee
+#define MT6357_LDO_DUMMY_LOAD_GATED 0x18f0
+#define MT6357_LDO_GON0_DSN_ID 0x1900
+#define MT6357_LDO_GON0_DSN_REV0 0x1902
+#define MT6357_LDO_GON0_DSN_DBI 0x1904
+#define MT6357_LDO_GON0_DSN_DXI 0x1906
+#define MT6357_LDO_VXO22_CON0 0x1908
+#define MT6357_LDO_VXO22_OP_EN 0x190a
+#define MT6357_LDO_VXO22_OP_EN_SET 0x190c
+#define MT6357_LDO_VXO22_OP_EN_CLR 0x190e
+#define MT6357_LDO_VXO22_OP_CFG 0x1910
+#define MT6357_LDO_VXO22_OP_CFG_SET 0x1912
+#define MT6357_LDO_VXO22_OP_CFG_CLR 0x1914
+#define MT6357_LDO_VXO22_CON1 0x1916
+#define MT6357_LDO_VXO22_CON2 0x1918
+#define MT6357_LDO_VXO22_CON3 0x191a
+#define MT6357_LDO_VAUX18_CON0 0x191c
+#define MT6357_LDO_VAUX18_OP_EN 0x191e
+#define MT6357_LDO_VAUX18_OP_EN_SET 0x1920
+#define MT6357_LDO_VAUX18_OP_EN_CLR 0x1922
+#define MT6357_LDO_VAUX18_OP_CFG 0x1924
+#define MT6357_LDO_VAUX18_OP_CFG_SET 0x1926
+#define MT6357_LDO_VAUX18_OP_CFG_CLR 0x1928
+#define MT6357_LDO_VAUX18_CON1 0x192a
+#define MT6357_LDO_VAUX18_CON2 0x192c
+#define MT6357_LDO_VAUX18_CON3 0x192e
+#define MT6357_LDO_VAUD28_CON0 0x1930
+#define MT6357_LDO_VAUD28_OP_EN 0x1932
+#define MT6357_LDO_VAUD28_OP_EN_SET 0x1934
+#define MT6357_LDO_VAUD28_OP_EN_CLR 0x1936
+#define MT6357_LDO_VAUD28_OP_CFG 0x1938
+#define MT6357_LDO_VAUD28_OP_CFG_SET 0x193a
+#define MT6357_LDO_VAUD28_OP_CFG_CLR 0x193c
+#define MT6357_LDO_VAUD28_CON1 0x193e
+#define MT6357_LDO_VAUD28_CON2 0x1940
+#define MT6357_LDO_VAUD28_CON3 0x1942
+#define MT6357_LDO_VIO28_CON0 0x1944
+#define MT6357_LDO_VIO28_OP_EN 0x1946
+#define MT6357_LDO_VIO28_OP_EN_SET 0x1948
+#define MT6357_LDO_VIO28_OP_EN_CLR 0x194a
+#define MT6357_LDO_VIO28_OP_CFG 0x194c
+#define MT6357_LDO_VIO28_OP_CFG_SET 0x194e
+#define MT6357_LDO_VIO28_OP_CFG_CLR 0x1950
+#define MT6357_LDO_VIO28_CON1 0x1952
+#define MT6357_LDO_VIO28_CON2 0x1954
+#define MT6357_LDO_VIO28_CON3 0x1956
+#define MT6357_LDO_VIO18_CON0 0x1958
+#define MT6357_LDO_VIO18_OP_EN 0x195a
+#define MT6357_LDO_VIO18_OP_EN_SET 0x195c
+#define MT6357_LDO_VIO18_OP_EN_CLR 0x195e
+#define MT6357_LDO_VIO18_OP_CFG 0x1960
+#define MT6357_LDO_VIO18_OP_CFG_SET 0x1962
+#define MT6357_LDO_VIO18_OP_CFG_CLR 0x1964
+#define MT6357_LDO_VIO18_CON1 0x1966
+#define MT6357_LDO_VIO18_CON2 0x1968
+#define MT6357_LDO_VIO18_CON3 0x196a
+#define MT6357_LDO_VDRAM_CON0 0x196c
+#define MT6357_LDO_VDRAM_OP_EN 0x196e
+#define MT6357_LDO_VDRAM_OP_EN_SET 0x1970
+#define MT6357_LDO_VDRAM_OP_EN_CLR 0x1972
+#define MT6357_LDO_VDRAM_OP_CFG 0x1974
+#define MT6357_LDO_VDRAM_OP_CFG_SET 0x1976
+#define MT6357_LDO_VDRAM_OP_CFG_CLR 0x1978
+#define MT6357_LDO_VDRAM_CON1 0x197a
+#define MT6357_LDO_VDRAM_CON2 0x197c
+#define MT6357_LDO_VDRAM_CON3 0x197e
+#define MT6357_LDO_GON1_DSN_ID 0x1980
+#define MT6357_LDO_GON1_DSN_REV0 0x1982
+#define MT6357_LDO_GON1_DSN_DBI 0x1984
+#define MT6357_LDO_GON1_DSN_DXI 0x1986
+#define MT6357_LDO_VEMC_CON0 0x1988
+#define MT6357_LDO_VEMC_OP_EN 0x198a
+#define MT6357_LDO_VEMC_OP_EN_SET 0x198c
+#define MT6357_LDO_VEMC_OP_EN_CLR 0x198e
+#define MT6357_LDO_VEMC_OP_CFG 0x1990
+#define MT6357_LDO_VEMC_OP_CFG_SET 0x1992
+#define MT6357_LDO_VEMC_OP_CFG_CLR 0x1994
+#define MT6357_LDO_VEMC_CON1 0x1996
+#define MT6357_LDO_VEMC_CON2 0x1998
+#define MT6357_LDO_VEMC_CON3 0x199a
+#define MT6357_LDO_VUSB33_CON0_0 0x199c
+#define MT6357_LDO_VUSB33_OP_EN 0x199e
+#define MT6357_LDO_VUSB33_OP_EN_SET 0x19a0
+#define MT6357_LDO_VUSB33_OP_EN_CLR 0x19a2
+#define MT6357_LDO_VUSB33_OP_CFG 0x19a4
+#define MT6357_LDO_VUSB33_OP_CFG_SET 0x19a6
+#define MT6357_LDO_VUSB33_OP_CFG_CLR 0x19a8
+#define MT6357_LDO_VUSB33_CON0_1 0x19aa
+#define MT6357_LDO_VUSB33_CON1 0x19ac
+#define MT6357_LDO_VUSB33_CON2 0x19ae
+#define MT6357_LDO_VUSB33_CON3 0x19b0
+#define MT6357_LDO_VSRAM_PROC_CON0 0x19b2
+#define MT6357_LDO_VSRAM_PROC_CON2 0x19b4
+#define MT6357_LDO_VSRAM_PROC_CFG0 0x19b6
+#define MT6357_LDO_VSRAM_PROC_CFG1 0x19b8
+#define MT6357_LDO_VSRAM_PROC_OP_EN 0x19ba
+#define MT6357_LDO_VSRAM_PROC_OP_EN_SET 0x19bc
+#define MT6357_LDO_VSRAM_PROC_OP_EN_CLR 0x19be
+#define MT6357_LDO_VSRAM_PROC_OP_CFG 0x19c0
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_SET 0x19c2
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_CLR 0x19c4
+#define MT6357_LDO_VSRAM_PROC_CON3 0x19c6
+#define MT6357_LDO_VSRAM_PROC_CON4 0x19c8
+#define MT6357_LDO_VSRAM_PROC_CON5 0x19ca
+#define MT6357_LDO_VSRAM_PROC_DBG0 0x19cc
+#define MT6357_LDO_VSRAM_PROC_DBG1 0x19ce
+#define MT6357_LDO_VSRAM_OTHERS_CON0 0x19d0
+#define MT6357_LDO_VSRAM_OTHERS_CON2 0x19d2
+#define MT6357_LDO_VSRAM_OTHERS_CFG0 0x19d4
+#define MT6357_LDO_VSRAM_OTHERS_CFG1 0x19d6
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN 0x19d8
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_SET 0x19da
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_CLR 0x19dc
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG 0x19de
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_SET 0x19e0
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_CLR 0x19e2
+#define MT6357_LDO_VSRAM_OTHERS_CON3 0x19e4
+#define MT6357_LDO_VSRAM_OTHERS_CON4 0x19e6
+#define MT6357_LDO_VSRAM_OTHERS_CON5 0x19e8
+#define MT6357_LDO_VSRAM_OTHERS_DBG0 0x19ea
+#define MT6357_LDO_VSRAM_OTHERS_DBG1 0x19ec
+#define MT6357_LDO_VSRAM_PROC_SP 0x19ee
+#define MT6357_LDO_VSRAM_OTHERS_SP 0x19f0
+#define MT6357_LDO_VSRAM_PROC_R2R_PDN_DIS 0x19f2
+#define MT6357_LDO_VSRAM_OTHERS_R2R_PDN_DIS 0x19f4
+#define MT6357_LDO_VSRAM_WDT_DBG0 0x19f6
+#define MT6357_LDO_GON1_ELR_NUM 0x19f8
+#define MT6357_LDO_VSRAM_CON0 0x19fa
+#define MT6357_LDO_VSRAM_CON1 0x19fc
+#define MT6357_LDO_VSRAM_CON2 0x19fe
+#define MT6357_LDO_GOFF0_DSN_ID 0x1a00
+#define MT6357_LDO_GOFF0_DSN_REV0 0x1a02
+#define MT6357_LDO_GOFF0_DSN_DBI 0x1a04
+#define MT6357_LDO_GOFF0_DSN_DXI 0x1a06
+#define MT6357_LDO_VFE28_CON0 0x1a08
+#define MT6357_LDO_VFE28_OP_EN 0x1a0a
+#define MT6357_LDO_VFE28_OP_EN_SET 0x1a0c
+#define MT6357_LDO_VFE28_OP_EN_CLR 0x1a0e
+#define MT6357_LDO_VFE28_OP_CFG 0x1a10
+#define MT6357_LDO_VFE28_OP_CFG_SET 0x1a12
+#define MT6357_LDO_VFE28_OP_CFG_CLR 0x1a14
+#define MT6357_LDO_VFE28_CON1 0x1a16
+#define MT6357_LDO_VFE28_CON2 0x1a18
+#define MT6357_LDO_VFE28_CON3 0x1a1a
+#define MT6357_LDO_VRF18_CON0 0x1a1c
+#define MT6357_LDO_VRF18_OP_EN 0x1a1e
+#define MT6357_LDO_VRF18_OP_EN_SET 0x1a20
+#define MT6357_LDO_VRF18_OP_EN_CLR 0x1a22
+#define MT6357_LDO_VRF18_OP_CFG 0x1a24
+#define MT6357_LDO_VRF18_OP_CFG_SET 0x1a26
+#define MT6357_LDO_VRF18_OP_CFG_CLR 0x1a28
+#define MT6357_LDO_VRF18_CON1 0x1a2a
+#define MT6357_LDO_VRF18_CON2 0x1a2c
+#define MT6357_LDO_VRF18_CON3 0x1a2e
+#define MT6357_LDO_VRF12_CON0 0x1a30
+#define MT6357_LDO_VRF12_OP_EN 0x1a32
+#define MT6357_LDO_VRF12_OP_EN_SET 0x1a34
+#define MT6357_LDO_VRF12_OP_EN_CLR 0x1a36
+#define MT6357_LDO_VRF12_OP_CFG 0x1a38
+#define MT6357_LDO_VRF12_OP_CFG_SET 0x1a3a
+#define MT6357_LDO_VRF12_OP_CFG_CLR 0x1a3c
+#define MT6357_LDO_VRF12_CON1 0x1a3e
+#define MT6357_LDO_VRF12_CON2 0x1a40
+#define MT6357_LDO_VRF12_CON3 0x1a42
+#define MT6357_LDO_VEFUSE_CON0 0x1a44
+#define MT6357_LDO_VEFUSE_OP_EN 0x1a46
+#define MT6357_LDO_VEFUSE_OP_EN_SET 0x1a48
+#define MT6357_LDO_VEFUSE_OP_EN_CLR 0x1a4a
+#define MT6357_LDO_VEFUSE_OP_CFG 0x1a4c
+#define MT6357_LDO_VEFUSE_OP_CFG_SET 0x1a4e
+#define MT6357_LDO_VEFUSE_OP_CFG_CLR 0x1a50
+#define MT6357_LDO_VEFUSE_CON1 0x1a52
+#define MT6357_LDO_VEFUSE_CON2 0x1a54
+#define MT6357_LDO_VEFUSE_CON3 0x1a56
+#define MT6357_LDO_VCN18_CON0 0x1a58
+#define MT6357_LDO_VCN18_OP_EN 0x1a5a
+#define MT6357_LDO_VCN18_OP_EN_SET 0x1a5c
+#define MT6357_LDO_VCN18_OP_EN_CLR 0x1a5e
+#define MT6357_LDO_VCN18_OP_CFG 0x1a60
+#define MT6357_LDO_VCN18_OP_CFG_SET 0x1a62
+#define MT6357_LDO_VCN18_OP_CFG_CLR 0x1a64
+#define MT6357_LDO_VCN18_CON1 0x1a66
+#define MT6357_LDO_VCN18_CON2 0x1a68
+#define MT6357_LDO_VCN18_CON3 0x1a6a
+#define MT6357_LDO_VCAMA_CON0 0x1a6c
+#define MT6357_LDO_VCAMA_OP_EN 0x1a6e
+#define MT6357_LDO_VCAMA_OP_EN_SET 0x1a70
+#define MT6357_LDO_VCAMA_OP_EN_CLR 0x1a72
+#define MT6357_LDO_VCAMA_OP_CFG 0x1a74
+#define MT6357_LDO_VCAMA_OP_CFG_SET 0x1a76
+#define MT6357_LDO_VCAMA_OP_CFG_CLR 0x1a78
+#define MT6357_LDO_VCAMA_CON1 0x1a7a
+#define MT6357_LDO_VCAMA_CON2 0x1a7c
+#define MT6357_LDO_VCAMA_CON3 0x1a7e
+#define MT6357_LDO_GOFF1_DSN_ID 0x1a80
+#define MT6357_LDO_GOFF1_DSN_REV0 0x1a82
+#define MT6357_LDO_GOFF1_DSN_DBI 0x1a84
+#define MT6357_LDO_GOFF1_DSN_DXI 0x1a86
+#define MT6357_LDO_VCAMD_CON0 0x1a88
+#define MT6357_LDO_VCAMD_OP_EN 0x1a8a
+#define MT6357_LDO_VCAMD_OP_EN_SET 0x1a8c
+#define MT6357_LDO_VCAMD_OP_EN_CLR 0x1a8e
+#define MT6357_LDO_VCAMD_OP_CFG 0x1a90
+#define MT6357_LDO_VCAMD_OP_CFG_SET 0x1a92
+#define MT6357_LDO_VCAMD_OP_CFG_CLR 0x1a94
+#define MT6357_LDO_VCAMD_CON1 0x1a96
+#define MT6357_LDO_VCAMD_CON2 0x1a98
+#define MT6357_LDO_VCAMD_CON3 0x1a9a
+#define MT6357_LDO_VCAMIO_CON0 0x1a9c
+#define MT6357_LDO_VCAMIO_OP_EN 0x1a9e
+#define MT6357_LDO_VCAMIO_OP_EN_SET 0x1aa0
+#define MT6357_LDO_VCAMIO_OP_EN_CLR 0x1aa2
+#define MT6357_LDO_VCAMIO_OP_CFG 0x1aa4
+#define MT6357_LDO_VCAMIO_OP_CFG_SET 0x1aa6
+#define MT6357_LDO_VCAMIO_OP_CFG_CLR 0x1aa8
+#define MT6357_LDO_VCAMIO_CON1 0x1aaa
+#define MT6357_LDO_VCAMIO_CON2 0x1aac
+#define MT6357_LDO_VCAMIO_CON3 0x1aae
+#define MT6357_LDO_VMC_CON0 0x1ab0
+#define MT6357_LDO_VMC_OP_EN 0x1ab2
+#define MT6357_LDO_VMC_OP_EN_SET 0x1ab4
+#define MT6357_LDO_VMC_OP_EN_CLR 0x1ab6
+#define MT6357_LDO_VMC_OP_CFG 0x1ab8
+#define MT6357_LDO_VMC_OP_CFG_SET 0x1aba
+#define MT6357_LDO_VMC_OP_CFG_CLR 0x1abc
+#define MT6357_LDO_VMC_CON1 0x1abe
+#define MT6357_LDO_VMC_CON2 0x1ac0
+#define MT6357_LDO_VMC_CON3 0x1ac2
+#define MT6357_LDO_VMCH_CON0 0x1ac4
+#define MT6357_LDO_VMCH_OP_EN 0x1ac6
+#define MT6357_LDO_VMCH_OP_EN_SET 0x1ac8
+#define MT6357_LDO_VMCH_OP_EN_CLR 0x1aca
+#define MT6357_LDO_VMCH_OP_CFG 0x1acc
+#define MT6357_LDO_VMCH_OP_CFG_SET 0x1ace
+#define MT6357_LDO_VMCH_OP_CFG_CLR 0x1ad0
+#define MT6357_LDO_VMCH_CON1 0x1ad2
+#define MT6357_LDO_VMCH_CON2 0x1ad4
+#define MT6357_LDO_VMCH_CON3 0x1ad6
+#define MT6357_LDO_VSIM1_CON0 0x1ad8
+#define MT6357_LDO_VSIM1_OP_EN 0x1ada
+#define MT6357_LDO_VSIM1_OP_EN_SET 0x1adc
+#define MT6357_LDO_VSIM1_OP_EN_CLR 0x1ade
+#define MT6357_LDO_VSIM1_OP_CFG 0x1ae0
+#define MT6357_LDO_VSIM1_OP_CFG_SET 0x1ae2
+#define MT6357_LDO_VSIM1_OP_CFG_CLR 0x1ae4
+#define MT6357_LDO_VSIM1_CON1 0x1ae6
+#define MT6357_LDO_VSIM1_CON2 0x1ae8
+#define MT6357_LDO_VSIM1_CON3 0x1aea
+#define MT6357_LDO_VSIM2_CON0 0x1aec
+#define MT6357_LDO_VSIM2_OP_EN 0x1aee
+#define MT6357_LDO_VSIM2_OP_EN_SET 0x1af0
+#define MT6357_LDO_VSIM2_OP_EN_CLR 0x1af2
+#define MT6357_LDO_VSIM2_OP_CFG 0x1af4
+#define MT6357_LDO_VSIM2_OP_CFG_SET 0x1af6
+#define MT6357_LDO_VSIM2_OP_CFG_CLR 0x1af8
+#define MT6357_LDO_VSIM2_CON1 0x1afa
+#define MT6357_LDO_VSIM2_CON2 0x1afc
+#define MT6357_LDO_VSIM2_CON3 0x1afe
+#define MT6357_LDO_GOFF2_DSN_ID 0x1b00
+#define MT6357_LDO_GOFF2_DSN_REV0 0x1b02
+#define MT6357_LDO_GOFF2_DSN_DBI 0x1b04
+#define MT6357_LDO_GOFF2_DSN_DXI 0x1b06
+#define MT6357_LDO_VIBR_CON0 0x1b08
+#define MT6357_LDO_VIBR_OP_EN 0x1b0a
+#define MT6357_LDO_VIBR_OP_EN_SET 0x1b0c
+#define MT6357_LDO_VIBR_OP_EN_CLR 0x1b0e
+#define MT6357_LDO_VIBR_OP_CFG 0x1b10
+#define MT6357_LDO_VIBR_OP_CFG_SET 0x1b12
+#define MT6357_LDO_VIBR_OP_CFG_CLR 0x1b14
+#define MT6357_LDO_VIBR_CON1 0x1b16
+#define MT6357_LDO_VIBR_CON2 0x1b18
+#define MT6357_LDO_VIBR_CON3 0x1b1a
+#define MT6357_LDO_VCN33_CON0_0 0x1b1c
+#define MT6357_LDO_VCN33_OP_EN 0x1b1e
+#define MT6357_LDO_VCN33_OP_EN_SET 0x1b20
+#define MT6357_LDO_VCN33_OP_EN_CLR 0x1b22
+#define MT6357_LDO_VCN33_OP_CFG 0x1b24
+#define MT6357_LDO_VCN33_OP_CFG_SET 0x1b26
+#define MT6357_LDO_VCN33_OP_CFG_CLR 0x1b28
+#define MT6357_LDO_VCN33_CON0_1 0x1b2a
+#define MT6357_LDO_VCN33_CON1 0x1b2c
+#define MT6357_LDO_VCN33_CON2 0x1b2e
+#define MT6357_LDO_VCN33_CON3 0x1b30
+#define MT6357_LDO_VLDO28_CON0_0 0x1b32
+#define MT6357_LDO_VLDO28_OP_EN 0x1b34
+#define MT6357_LDO_VLDO28_OP_EN_SET 0x1b36
+#define MT6357_LDO_VLDO28_OP_EN_CLR 0x1b38
+#define MT6357_LDO_VLDO28_OP_CFG 0x1b3a
+#define MT6357_LDO_VLDO28_OP_CFG_SET 0x1b3c
+#define MT6357_LDO_VLDO28_OP_CFG_CLR 0x1b3e
+#define MT6357_LDO_VLDO28_CON0_1 0x1b40
+#define MT6357_LDO_VLDO28_CON1 0x1b42
+#define MT6357_LDO_VLDO28_CON2 0x1b44
+#define MT6357_LDO_VLDO28_CON3 0x1b46
+#define MT6357_LDO_GOFF2_RSV_CON0 0x1b48
+#define MT6357_LDO_GOFF2_RSV_CON1 0x1b4a
+#define MT6357_LDO_GOFF3_DSN_ID 0x1b80
+#define MT6357_LDO_GOFF3_DSN_REV0 0x1b82
+#define MT6357_LDO_GOFF3_DSN_DBI 0x1b84
+#define MT6357_LDO_GOFF3_DSN_DXI 0x1b86
+#define MT6357_LDO_VCN28_CON0 0x1b88
+#define MT6357_LDO_VCN28_OP_EN 0x1b8a
+#define MT6357_LDO_VCN28_OP_EN_SET 0x1b8c
+#define MT6357_LDO_VCN28_OP_EN_CLR 0x1b8e
+#define MT6357_LDO_VCN28_OP_CFG 0x1b90
+#define MT6357_LDO_VCN28_OP_CFG_SET 0x1b92
+#define MT6357_LDO_VCN28_OP_CFG_CLR 0x1b94
+#define MT6357_LDO_VCN28_CON1 0x1b96
+#define MT6357_LDO_VCN28_CON2 0x1b98
+#define MT6357_LDO_VCN28_CON3 0x1b9a
+#define MT6357_VRTC_CON0 0x1b9c
+#define MT6357_LDO_TREF_CON0 0x1b9e
+#define MT6357_LDO_TREF_OP_EN 0x1ba0
+#define MT6357_LDO_TREF_OP_EN_SET 0x1ba2
+#define MT6357_LDO_TREF_OP_EN_CLR 0x1ba4
+#define MT6357_LDO_TREF_OP_CFG 0x1ba6
+#define MT6357_LDO_TREF_OP_CFG_SET 0x1ba8
+#define MT6357_LDO_TREF_OP_CFG_CLR 0x1baa
+#define MT6357_LDO_TREF_CON1 0x1bac
+#define MT6357_LDO_GOFF3_RSV_CON0 0x1bae
+#define MT6357_LDO_GOFF3_RSV_CON1 0x1bb0
+#define MT6357_LDO_ANA0_DSN_ID 0x1c00
+#define MT6357_LDO_ANA0_DSN_REV0 0x1c02
+#define MT6357_LDO_ANA0_DSN_DBI 0x1c04
+#define MT6357_LDO_ANA0_DSN_DXI 0x1c06
+#define MT6357_VFE28_ANA_CON0 0x1c08
+#define MT6357_VFE28_ANA_CON1 0x1c0a
+#define MT6357_VCN28_ANA_CON0 0x1c0c
+#define MT6357_VCN28_ANA_CON1 0x1c0e
+#define MT6357_VAUD28_ANA_CON0 0x1c10
+#define MT6357_VAUD28_ANA_CON1 0x1c12
+#define MT6357_VAUX18_ANA_CON0 0x1c14
+#define MT6357_VAUX18_ANA_CON1 0x1c16
+#define MT6357_VXO22_ANA_CON0 0x1c18
+#define MT6357_VXO22_ANA_CON1 0x1c1a
+#define MT6357_VCN33_ANA_CON0 0x1c1c
+#define MT6357_VCN33_ANA_CON1 0x1c1e
+#define MT6357_VEMC_ANA_CON0 0x1c20
+#define MT6357_VEMC_ANA_CON1 0x1c22
+#define MT6357_VLDO28_ANA_CON0 0x1c24
+#define MT6357_VLDO28_ANA_CON1 0x1c26
+#define MT6357_VIO28_ANA_CON0 0x1c28
+#define MT6357_VIO28_ANA_CON1 0x1c2a
+#define MT6357_VIBR_ANA_CON0 0x1c2c
+#define MT6357_VIBR_ANA_CON1 0x1c2e
+#define MT6357_VSIM1_ANA_CON0 0x1c30
+#define MT6357_VSIM1_ANA_CON1 0x1c32
+#define MT6357_VSIM2_ANA_CON0 0x1c34
+#define MT6357_VSIM2_ANA_CON1 0x1c36
+#define MT6357_VMCH_ANA_CON0 0x1c38
+#define MT6357_VMCH_ANA_CON1 0x1c3a
+#define MT6357_VMC_ANA_CON0 0x1c3c
+#define MT6357_VMC_ANA_CON1 0x1c3e
+#define MT6357_VCAMIO_ANA_CON0 0x1c40
+#define MT6357_VCAMIO_ANA_CON1 0x1c42
+#define MT6357_VCN18_ANA_CON0 0x1c44
+#define MT6357_VCN18_ANA_CON1 0x1c46
+#define MT6357_VRF18_ANA_CON0 0x1c48
+#define MT6357_VRF18_ANA_CON1 0x1c4a
+#define MT6357_VIO18_ANA_CON0 0x1c4c
+#define MT6357_VIO18_ANA_CON1 0x1c4e
+#define MT6357_VDRAM_ANA_CON1 0x1c50
+#define MT6357_VRF12_ANA_CON0 0x1c52
+#define MT6357_VRF12_ANA_CON1 0x1c54
+#define MT6357_VSRAM_PROC_ANA_CON0 0x1c56
+#define MT6357_VSRAM_OTHERS_ANA_CON0 0x1c58
+#define MT6357_LDO_ANA0_ELR_NUM 0x1c5a
+#define MT6357_VFE28_ELR_0 0x1c5c
+#define MT6357_VCN28_ELR_0 0x1c5e
+#define MT6357_VAUD28_ELR_0 0x1c60
+#define MT6357_VAUX18_ELR_0 0x1c62
+#define MT6357_VXO22_ELR_0 0x1c64
+#define MT6357_VCN33_ELR_0 0x1c66
+#define MT6357_VEMC_ELR_0 0x1c68
+#define MT6357_VLDO28_ELR_0 0x1c6a
+#define MT6357_VIO28_ELR_0 0x1c6c
+#define MT6357_VIBR_ELR_0 0x1c6e
+#define MT6357_VSIM1_ELR_0 0x1c70
+#define MT6357_VSIM2_ELR_0 0x1c72
+#define MT6357_VMCH_ELR_0 0x1c74
+#define MT6357_VMC_ELR_0 0x1c76
+#define MT6357_VCAMIO_ELR_0 0x1c78
+#define MT6357_VCN18_ELR_0 0x1c7a
+#define MT6357_VRF18_ELR_0 0x1c7c
+#define MT6357_LDO_ANA1_DSN_ID 0x1c80
+#define MT6357_LDO_ANA1_DSN_REV0 0x1c82
+#define MT6357_LDO_ANA1_DSN_DBI 0x1c84
+#define MT6357_LDO_ANA1_DSN_DXI 0x1c86
+#define MT6357_VUSB33_ANA_CON0 0x1c88
+#define MT6357_VUSB33_ANA_CON1 0x1c8a
+#define MT6357_VCAMA_ANA_CON0 0x1c8c
+#define MT6357_VCAMA_ANA_CON1 0x1c8e
+#define MT6357_VEFUSE_ANA_CON0 0x1c90
+#define MT6357_VEFUSE_ANA_CON1 0x1c92
+#define MT6357_VCAMD_ANA_CON0 0x1c94
+#define MT6357_VCAMD_ANA_CON1 0x1c96
+#define MT6357_LDO_ANA1_ELR_NUM 0x1c98
+#define MT6357_VUSB33_ELR_0 0x1c9a
+#define MT6357_VCAMA_ELR_0 0x1c9c
+#define MT6357_VEFUSE_ELR_0 0x1c9e
+#define MT6357_VCAMD_ELR_0 0x1ca0
+#define MT6357_VIO18_ELR_0 0x1ca2
+#define MT6357_VDRAM_ELR_0 0x1ca4
+#define MT6357_VRF12_ELR_0 0x1ca6
+#define MT6357_VRTC_ELR_0 0x1ca8
+#define MT6357_VDRAM_ELR_1 0x1caa
+#define MT6357_VDRAM_ELR_2 0x1cac
+#define MT6357_XPP_TOP_ID 0x1e00
+#define MT6357_XPP_TOP_REV0 0x1e02
+#define MT6357_XPP_TOP_DBI 0x1e04
+#define MT6357_XPP_TOP_DXI 0x1e06
+#define MT6357_XPP_TPM0 0x1e08
+#define MT6357_XPP_TPM1 0x1e0a
+#define MT6357_XPP_TOP_TEST_OUT 0x1e0c
+#define MT6357_XPP_TOP_TEST_CON0 0x1e0e
+#define MT6357_XPP_TOP_CKPDN_CON0 0x1e10
+#define MT6357_XPP_TOP_CKPDN_CON0_SET 0x1e12
+#define MT6357_XPP_TOP_CKPDN_CON0_CLR 0x1e14
+#define MT6357_XPP_TOP_CKSEL_CON0 0x1e16
+#define MT6357_XPP_TOP_CKSEL_CON0_SET 0x1e18
+#define MT6357_XPP_TOP_CKSEL_CON0_CLR 0x1e1a
+#define MT6357_XPP_TOP_RST_CON0 0x1e1c
+#define MT6357_XPP_TOP_RST_CON0_SET 0x1e1e
+#define MT6357_XPP_TOP_RST_CON0_CLR 0x1e20
+#define MT6357_XPP_TOP_RST_BANK_CON0 0x1e22
+#define MT6357_XPP_TOP_RST_BANK_CON0_SET 0x1e24
+#define MT6357_XPP_TOP_RST_BANK_CON0_CLR 0x1e26
+#define MT6357_DRIVER_BL_DSN_ID 0x1e80
+#define MT6357_DRIVER_BL_DSN_REV0 0x1e82
+#define MT6357_DRIVER_BL_DSN_DBI 0x1e84
+#define MT6357_DRIVER_BL_DSN_DXI 0x1e86
+#define MT6357_ISINK1_CON0 0x1e88
+#define MT6357_ISINK1_CON1 0x1e8a
+#define MT6357_ISINK1_CON2 0x1e8c
+#define MT6357_ISINK1_CON3 0x1e8e
+#define MT6357_ISINK_ANA1 0x1e90
+#define MT6357_ISINK_PHASE_DLY 0x1e92
+#define MT6357_ISINK_SFSTR 0x1e94
+#define MT6357_ISINK_EN_CTRL 0x1e96
+#define MT6357_ISINK_MODE_CTRL 0x1e98
+#define MT6357_DRIVER_ANA_CON0 0x1e9a
+#define MT6357_ISINK_ANA_CON0 0x1e9c
+#define MT6357_ISINK_ANA_CON1 0x1e9e
+#define MT6357_DRIVER_BL_ELR_NUM 0x1ea0
+#define MT6357_DRIVER_BL_ELR_0 0x1ea2
+#define MT6357_DRIVER_CI_DSN_ID 0x1f00
+#define MT6357_DRIVER_CI_DSN_REV0 0x1f02
+#define MT6357_DRIVER_CI_DSN_DBI 0x1f04
+#define MT6357_DRIVER_CI_DSN_DXI 0x1f06
+#define MT6357_CHRIND_CON0 0x1f08
+#define MT6357_CHRIND_CON1 0x1f0a
+#define MT6357_CHRIND_CON2 0x1f0c
+#define MT6357_CHRIND_CON3 0x1f0e
+#define MT6357_CHRIND_CON4 0x1f10
+#define MT6357_CHRIND_EN_CTRL 0x1f12
+#define MT6357_CHRIND_ANA_CON0 0x1f14
+#define MT6357_DRIVER_DL_DSN_ID 0x1f80
+#define MT6357_DRIVER_DL_DSN_REV0 0x1f82
+#define MT6357_DRIVER_DL_DSN_DBI 0x1f84
+#define MT6357_DRIVER_DL_DSN_DXI 0x1f86
+#define MT6357_ISINK2_CON0 0x1f88
+#define MT6357_ISINK3_CON0 0x1f8a
+#define MT6357_ISINK_EN_CTRL_SMPL 0x1f8c
+#define MT6357_AUD_TOP_ID 0x2080
+#define MT6357_AUD_TOP_REV0 0x2082
+#define MT6357_AUD_TOP_DBI 0x2084
+#define MT6357_AUD_TOP_DXI 0x2086
+#define MT6357_AUD_TOP_CKPDN_TPM0 0x2088
+#define MT6357_AUD_TOP_CKPDN_TPM1 0x208a
+#define MT6357_AUD_TOP_CKPDN_CON0 0x208c
+#define MT6357_AUD_TOP_CKPDN_CON0_SET 0x208e
+#define MT6357_AUD_TOP_CKPDN_CON0_CLR 0x2090
+#define MT6357_AUD_TOP_CKSEL_CON0 0x2092
+#define MT6357_AUD_TOP_CKSEL_CON0_SET 0x2094
+#define MT6357_AUD_TOP_CKSEL_CON0_CLR 0x2096
+#define MT6357_AUD_TOP_CKTST_CON0 0x2098
+#define MT6357_AUD_TOP_RST_CON0 0x209a
+#define MT6357_AUD_TOP_RST_CON0_SET 0x209c
+#define MT6357_AUD_TOP_RST_CON0_CLR 0x209e
+#define MT6357_AUD_TOP_RST_BANK_CON0 0x20a0
+#define MT6357_AUD_TOP_INT_CON0 0x20a2
+#define MT6357_AUD_TOP_INT_CON0_SET 0x20a4
+#define MT6357_AUD_TOP_INT_CON0_CLR 0x20a6
+#define MT6357_AUD_TOP_INT_MASK_CON0 0x20a8
+#define MT6357_AUD_TOP_INT_MASK_CON0_SET 0x20aa
+#define MT6357_AUD_TOP_INT_MASK_CON0_CLR 0x20ac
+#define MT6357_AUD_TOP_INT_STATUS0 0x20ae
+#define MT6357_AUD_TOP_INT_RAW_STATUS0 0x20b0
+#define MT6357_AUD_TOP_INT_MISC_CON0 0x20b2
+#define MT6357_AUDNCP_CLKDIV_CON0 0x20b4
+#define MT6357_AUDNCP_CLKDIV_CON1 0x20b6
+#define MT6357_AUDNCP_CLKDIV_CON2 0x20b8
+#define MT6357_AUDNCP_CLKDIV_CON3 0x20ba
+#define MT6357_AUDNCP_CLKDIV_CON4 0x20bc
+#define MT6357_AUD_TOP_MON_CON0 0x20be
+#define MT6357_AUDIO_DIG_DSN_ID 0x2100
+#define MT6357_AUDIO_DIG_DSN_REV0 0x2102
+#define MT6357_AUDIO_DIG_DSN_DBI 0x2104
+#define MT6357_AUDIO_DIG_DSN_DXI 0x2106
+#define MT6357_AFE_UL_DL_CON0 0x2108
+#define MT6357_AFE_DL_SRC2_CON0_L 0x210a
+#define MT6357_AFE_UL_SRC_CON0_H 0x210c
+#define MT6357_AFE_UL_SRC_CON0_L 0x210e
+#define MT6357_AFE_TOP_CON0 0x2110
+#define MT6357_AUDIO_TOP_CON0 0x2112
+#define MT6357_AFE_MON_DEBUG0 0x2114
+#define MT6357_AFUNC_AUD_CON0 0x2116
+#define MT6357_AFUNC_AUD_CON1 0x2118
+#define MT6357_AFUNC_AUD_CON2 0x211a
+#define MT6357_AFUNC_AUD_CON3 0x211c
+#define MT6357_AFUNC_AUD_CON4 0x211e
+#define MT6357_AFUNC_AUD_CON5 0x2120
+#define MT6357_AFUNC_AUD_CON6 0x2122
+#define MT6357_AFUNC_AUD_MON0 0x2124
+#define MT6357_AUDRC_TUNE_MON0 0x2126
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_CFG0 0x2128
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x212a
+#define MT6357_AFE_ADDA_MTKAIF_MON0 0x212c
+#define MT6357_AFE_ADDA_MTKAIF_MON1 0x212e
+#define MT6357_AFE_ADDA_MTKAIF_MON2 0x2130
+#define MT6357_AFE_ADDA_MTKAIF_MON3 0x2132
+#define MT6357_AFE_ADDA_MTKAIF_CFG0 0x2134
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG0 0x2136
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG1 0x2138
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG2 0x213a
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG3 0x213c
+#define MT6357_AFE_ADDA_MTKAIF_TX_CFG1 0x213e
+#define MT6357_AFE_SGEN_CFG0 0x2140
+#define MT6357_AFE_SGEN_CFG1 0x2142
+#define MT6357_AFE_ADC_ASYNC_FIFO_CFG 0x2144
+#define MT6357_AFE_DCCLK_CFG0 0x2146
+#define MT6357_AFE_DCCLK_CFG1 0x2148
+#define MT6357_AUDIO_DIG_CFG 0x214a
+#define MT6357_AFE_AUD_PAD_TOP 0x214c
+#define MT6357_AFE_AUD_PAD_TOP_MON 0x214e
+#define MT6357_AFE_AUD_PAD_TOP_MON1 0x2150
+#define MT6357_AUDENC_DSN_ID 0x2180
+#define MT6357_AUDENC_DSN_REV0 0x2182
+#define MT6357_AUDENC_DSN_DBI 0x2184
+#define MT6357_AUDENC_DSN_FPI 0x2186
+#define MT6357_AUDENC_ANA_CON0 0x2188
+#define MT6357_AUDENC_ANA_CON1 0x218a
+#define MT6357_AUDENC_ANA_CON2 0x218c
+#define MT6357_AUDENC_ANA_CON3 0x218e
+#define MT6357_AUDENC_ANA_CON4 0x2190
+#define MT6357_AUDENC_ANA_CON5 0x2192
+#define MT6357_AUDENC_ANA_CON6 0x2194
+#define MT6357_AUDENC_ANA_CON7 0x2196
+#define MT6357_AUDENC_ANA_CON8 0x2198
+#define MT6357_AUDENC_ANA_CON9 0x219a
+#define MT6357_AUDENC_ANA_CON10 0x219c
+#define MT6357_AUDENC_ANA_CON11 0x219e
+#define MT6357_AUDDEC_DSN_ID 0x2200
+#define MT6357_AUDDEC_DSN_REV0 0x2202
+#define MT6357_AUDDEC_DSN_DBI 0x2204
+#define MT6357_AUDDEC_DSN_FPI 0x2206
+#define MT6357_AUDDEC_ANA_CON0 0x2208
+#define MT6357_AUDDEC_ANA_CON1 0x220a
+#define MT6357_AUDDEC_ANA_CON2 0x220c
+#define MT6357_AUDDEC_ANA_CON3 0x220e
+#define MT6357_AUDDEC_ANA_CON4 0x2210
+#define MT6357_AUDDEC_ANA_CON5 0x2212
+#define MT6357_AUDDEC_ANA_CON6 0x2214
+#define MT6357_AUDDEC_ANA_CON7 0x2216
+#define MT6357_AUDDEC_ANA_CON8 0x2218
+#define MT6357_AUDDEC_ANA_CON9 0x221a
+#define MT6357_AUDDEC_ANA_CON10 0x221c
+#define MT6357_AUDDEC_ANA_CON11 0x221e
+#define MT6357_AUDDEC_ANA_CON12 0x2220
+#define MT6357_AUDDEC_ANA_CON13 0x2222
+#define MT6357_AUDDEC_ELR_NUM 0x2224
+#define MT6357_AUDDEC_ELR_0 0x2226
+#define MT6357_AUDZCD_DSN_ID 0x2280
+#define MT6357_AUDZCD_DSN_REV0 0x2282
+#define MT6357_AUDZCD_DSN_DBI 0x2284
+#define MT6357_AUDZCD_DSN_FPI 0x2286
+#define MT6357_ZCD_CON0 0x2288
+#define MT6357_ZCD_CON1 0x228a
+#define MT6357_ZCD_CON2 0x228c
+#define MT6357_ZCD_CON3 0x228e
+#define MT6357_ZCD_CON4 0x2290
+#define MT6357_ZCD_CON5 0x2292
+#define MT6357_ACCDET_DSN_DIG_ID 0x2300
+#define MT6357_ACCDET_DSN_DIG_REV0 0x2302
+#define MT6357_ACCDET_DSN_DBI 0x2304
+#define MT6357_ACCDET_DSN_FPI 0x2306
+#define MT6357_ACCDET_CON0 0x2308
+#define MT6357_ACCDET_CON1 0x230a
+#define MT6357_ACCDET_CON2 0x230c
+#define MT6357_ACCDET_CON3 0x230e
+#define MT6357_ACCDET_CON4 0x2310
+#define MT6357_ACCDET_CON5 0x2312
+#define MT6357_ACCDET_CON6 0x2314
+#define MT6357_ACCDET_CON7 0x2316
+#define MT6357_ACCDET_CON8 0x2318
+#define MT6357_ACCDET_CON9 0x231a
+#define MT6357_ACCDET_CON10 0x231c
+#define MT6357_ACCDET_CON11 0x231e
+#define MT6357_ACCDET_CON12 0x2320
+#define MT6357_ACCDET_CON13 0x2322
+#define MT6357_ACCDET_CON14 0x2324
+#define MT6357_ACCDET_CON15 0x2326
+#define MT6357_ACCDET_CON16 0x2328
+#define MT6357_ACCDET_CON17 0x232a
+#define MT6357_ACCDET_CON18 0x232c
+#define MT6357_ACCDET_CON19 0x232e
+#define MT6357_ACCDET_CON20 0x2330
+#define MT6357_ACCDET_CON21 0x2332
+#define MT6357_ACCDET_CON22 0x2334
+#define MT6357_ACCDET_CON23 0x2336
+#define MT6357_ACCDET_CON24 0x2338
+#define MT6357_ACCDET_CON25 0x233a
+#define MT6357_ACCDET_CON26 0x233c
+#define MT6357_ACCDET_CON27 0x233e
+#define MT6357_ACCDET_CON28 0x2340
+
+#endif /* __MFD_MT6357_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
index c5a11b7458d4..68578e2019b0 100644
--- a/include/linux/mfd/mt6358/core.h
+++ b/include/linux/mfd/mt6358/core.h
@@ -6,12 +6,9 @@
#ifndef __MFD_MT6358_CORE_H__
#define __MFD_MT6358_CORE_H__
-#define MT6358_REG_WIDTH 16
-
struct irq_top_t {
int hwirq_base;
unsigned int num_int_regs;
- unsigned int num_int_bits;
unsigned int en_reg;
unsigned int en_reg_shift;
unsigned int sta_reg;
@@ -25,6 +22,7 @@ struct pmic_irq_data {
unsigned short top_int_status_reg;
bool *enable_hwirq;
bool *cache_hwirq;
+ const struct irq_top_t *pmic_ints;
};
enum mt6358_irq_top_status_shift {
@@ -146,8 +144,8 @@ enum mt6358_irq_numbers {
{ \
.hwirq_base = MT6358_IRQ_##sp##_BASE, \
.num_int_regs = \
- ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \
- .num_int_bits = MT6358_IRQ_##sp##_BITS, \
+ ((MT6358_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
.en_reg = MT6358_##sp##_TOP_INT_CON0, \
.en_reg_shift = 0x6, \
.sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
index 2ad0b312aa28..d83e87298ac4 100644
--- a/include/linux/mfd/mt6358/registers.h
+++ b/include/linux/mfd/mt6358/registers.h
@@ -8,6 +8,8 @@
/* PMIC Registers */
#define MT6358_SWCID 0xa
+#define MT6358_TOPSTATUS 0x28
+#define MT6358_TOP_RST_MISC 0x14c
#define MT6358_MISC_TOP_INT_CON0 0x188
#define MT6358_MISC_TOP_INT_STATUS0 0x194
#define MT6358_TOP_INT_STATUS0 0x19e
@@ -92,6 +94,10 @@
#define MT6358_BUCK_VCORE_CON0 0x1488
#define MT6358_BUCK_VCORE_DBG0 0x149e
#define MT6358_BUCK_VCORE_DBG1 0x14a0
+#define MT6358_BUCK_VCORE_SSHUB_CON0 0x14a4
+#define MT6358_BUCK_VCORE_SSHUB_CON1 0x14a6
+#define MT6358_BUCK_VCORE_SSHUB_ELR0 MT6358_BUCK_VCORE_SSHUB_CON1
+#define MT6358_BUCK_VCORE_SSHUB_DBG1 MT6358_BUCK_VCORE_DBG1
#define MT6358_BUCK_VCORE_ELR0 0x14aa
#define MT6358_BUCK_VGPU_CON0 0x1508
#define MT6358_BUCK_VGPU_DBG0 0x151e
@@ -167,6 +173,9 @@
#define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6
#define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0
#define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON0 0x1bc4
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1 0x1bc6
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_DBG1 MT6358_LDO_VSRAM_OTHERS_DBG1
#define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8
#define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2
#define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4
@@ -253,6 +262,12 @@
#define MT6358_LDO_VBIF28_CON3 0x1db0
#define MT6358_VCAMA1_ANA_CON0 0x1e08
#define MT6358_VCAMA2_ANA_CON0 0x1e0c
+#define MT6358_VFE28_ANA_CON0 0x1e10
+#define MT6358_VCN28_ANA_CON0 0x1e14
+#define MT6358_VBIF28_ANA_CON0 0x1e18
+#define MT6358_VAUD28_ANA_CON0 0x1e1c
+#define MT6358_VAUX18_ANA_CON0 0x1e20
+#define MT6358_VXO22_ANA_CON0 0x1e24
#define MT6358_VCN33_ANA_CON0 0x1e28
#define MT6358_VSIM1_ANA_CON0 0x1e2c
#define MT6358_VSIM2_ANA_CON0 0x1e30
@@ -279,4 +294,21 @@
#define MT6358_AUD_TOP_INT_CON0 0x2228
#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+/*
+ * MT6366 has no VCAM*, but has other regulators in its place. The names
+ * keep the MT6358 prefix for ease of use in the regulator driver.
+ */
+#define MT6358_LDO_VSRAM_CON5 0x1bf8
+#define MT6358_LDO_VM18_CON0 MT6358_LDO_VCAMA1_CON0
+#define MT6358_LDO_VM18_CON1 MT6358_LDO_VCAMA1_CON1
+#define MT6358_LDO_VM18_CON2 MT6358_LDO_VCAMA1_CON2
+#define MT6358_LDO_VMDDR_CON0 MT6358_LDO_VCAMA2_CON0
+#define MT6358_LDO_VMDDR_CON1 MT6358_LDO_VCAMA2_CON1
+#define MT6358_LDO_VMDDR_CON2 MT6358_LDO_VCAMA2_CON2
+#define MT6358_LDO_VSRAM_CORE_CON0 MT6358_LDO_VCAMD_CON0
+#define MT6358_LDO_VSRAM_CORE_DBG0 0x1cb6
+#define MT6358_LDO_VSRAM_CORE_DBG1 0x1cb8
+#define MT6358_VM18_ANA_CON0 MT6358_VCAMA1_ANA_CON0
+#define MT6358_VMDDR_ANA_CON0 MT6358_VCAMD_ANA_CON0
+
#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359/core.h b/include/linux/mfd/mt6359/core.h
new file mode 100644
index 000000000000..8d298868126d
--- /dev/null
+++ b/include/linux/mfd/mt6359/core.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_CORE_H__
+#define __MFD_MT6359_CORE_H__
+
+enum mt6359_irq_top_status_shift {
+ MT6359_BUCK_TOP = 0,
+ MT6359_LDO_TOP,
+ MT6359_PSC_TOP,
+ MT6359_SCK_TOP,
+ MT6359_BM_TOP,
+ MT6359_HK_TOP,
+ MT6359_AUD_TOP = 7,
+ MT6359_MISC_TOP,
+};
+
+enum mt6359_irq_numbers {
+ MT6359_IRQ_VCORE_OC = 1,
+ MT6359_IRQ_VGPU11_OC,
+ MT6359_IRQ_VGPU12_OC,
+ MT6359_IRQ_VMODEM_OC,
+ MT6359_IRQ_VPROC1_OC,
+ MT6359_IRQ_VPROC2_OC,
+ MT6359_IRQ_VS1_OC,
+ MT6359_IRQ_VS2_OC,
+ MT6359_IRQ_VPA_OC = 9,
+ MT6359_IRQ_VFE28_OC = 16,
+ MT6359_IRQ_VXO22_OC,
+ MT6359_IRQ_VRF18_OC,
+ MT6359_IRQ_VRF12_OC,
+ MT6359_IRQ_VEFUSE_OC,
+ MT6359_IRQ_VCN33_1_OC,
+ MT6359_IRQ_VCN33_2_OC,
+ MT6359_IRQ_VCN13_OC,
+ MT6359_IRQ_VCN18_OC,
+ MT6359_IRQ_VA09_OC,
+ MT6359_IRQ_VCAMIO_OC,
+ MT6359_IRQ_VA12_OC,
+ MT6359_IRQ_VAUX18_OC,
+ MT6359_IRQ_VAUD18_OC,
+ MT6359_IRQ_VIO18_OC,
+ MT6359_IRQ_VSRAM_PROC1_OC,
+ MT6359_IRQ_VSRAM_PROC2_OC,
+ MT6359_IRQ_VSRAM_OTHERS_OC,
+ MT6359_IRQ_VSRAM_MD_OC,
+ MT6359_IRQ_VEMC_OC,
+ MT6359_IRQ_VSIM1_OC,
+ MT6359_IRQ_VSIM2_OC,
+ MT6359_IRQ_VUSB_OC,
+ MT6359_IRQ_VRFCK_OC,
+ MT6359_IRQ_VBBCK_OC,
+ MT6359_IRQ_VBIF28_OC,
+ MT6359_IRQ_VIBR_OC,
+ MT6359_IRQ_VIO28_OC,
+ MT6359_IRQ_VM18_OC,
+ MT6359_IRQ_VUFS_OC = 45,
+ MT6359_IRQ_PWRKEY = 48,
+ MT6359_IRQ_HOMEKEY,
+ MT6359_IRQ_PWRKEY_R,
+ MT6359_IRQ_HOMEKEY_R,
+ MT6359_IRQ_NI_LBAT_INT,
+ MT6359_IRQ_CHRDET_EDGE = 53,
+ MT6359_IRQ_RTC = 64,
+ MT6359_IRQ_FG_BAT_H = 80,
+ MT6359_IRQ_FG_BAT_L,
+ MT6359_IRQ_FG_CUR_H,
+ MT6359_IRQ_FG_CUR_L,
+ MT6359_IRQ_FG_ZCV = 84,
+ MT6359_IRQ_FG_N_CHARGE_L = 87,
+ MT6359_IRQ_FG_IAVG_H,
+ MT6359_IRQ_FG_IAVG_L = 89,
+ MT6359_IRQ_FG_DISCHARGE = 91,
+ MT6359_IRQ_FG_CHARGE,
+ MT6359_IRQ_BATON_LV = 96,
+ MT6359_IRQ_BATON_BAT_IN = 98,
+ MT6359_IRQ_BATON_BAT_OU,
+ MT6359_IRQ_BIF = 100,
+ MT6359_IRQ_BAT_H = 112,
+ MT6359_IRQ_BAT_L,
+ MT6359_IRQ_BAT2_H,
+ MT6359_IRQ_BAT2_L,
+ MT6359_IRQ_BAT_TEMP_H,
+ MT6359_IRQ_BAT_TEMP_L,
+ MT6359_IRQ_THR_H,
+ MT6359_IRQ_THR_L,
+ MT6359_IRQ_AUXADC_IMP,
+ MT6359_IRQ_NAG_C_DLTV = 121,
+ MT6359_IRQ_AUDIO = 128,
+ MT6359_IRQ_ACCDET = 133,
+ MT6359_IRQ_ACCDET_EINT0,
+ MT6359_IRQ_ACCDET_EINT1,
+ MT6359_IRQ_SPI_CMD_ALERT = 144,
+ MT6359_IRQ_NR,
+};
+
+#define MT6359_IRQ_BUCK_BASE MT6359_IRQ_VCORE_OC
+#define MT6359_IRQ_LDO_BASE MT6359_IRQ_VFE28_OC
+#define MT6359_IRQ_PSC_BASE MT6359_IRQ_PWRKEY
+#define MT6359_IRQ_SCK_BASE MT6359_IRQ_RTC
+#define MT6359_IRQ_BM_BASE MT6359_IRQ_FG_BAT_H
+#define MT6359_IRQ_HK_BASE MT6359_IRQ_BAT_H
+#define MT6359_IRQ_AUD_BASE MT6359_IRQ_AUDIO
+#define MT6359_IRQ_MISC_BASE MT6359_IRQ_SPI_CMD_ALERT
+
+#define MT6359_IRQ_BUCK_BITS (MT6359_IRQ_VPA_OC - MT6359_IRQ_BUCK_BASE + 1)
+#define MT6359_IRQ_LDO_BITS (MT6359_IRQ_VUFS_OC - MT6359_IRQ_LDO_BASE + 1)
+#define MT6359_IRQ_PSC_BITS \
+ (MT6359_IRQ_CHRDET_EDGE - MT6359_IRQ_PSC_BASE + 1)
+#define MT6359_IRQ_SCK_BITS (MT6359_IRQ_RTC - MT6359_IRQ_SCK_BASE + 1)
+#define MT6359_IRQ_BM_BITS (MT6359_IRQ_BIF - MT6359_IRQ_BM_BASE + 1)
+#define MT6359_IRQ_HK_BITS (MT6359_IRQ_NAG_C_DLTV - MT6359_IRQ_HK_BASE + 1)
+#define MT6359_IRQ_AUD_BITS \
+ (MT6359_IRQ_ACCDET_EINT1 - MT6359_IRQ_AUD_BASE + 1)
+#define MT6359_IRQ_MISC_BITS \
+ (MT6359_IRQ_SPI_CMD_ALERT - MT6359_IRQ_MISC_BASE + 1)
+
+#define MT6359_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6359_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6359_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6359_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6359_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6359_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6359_CORE_H__ */
diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h
new file mode 100644
index 000000000000..2a4394a27b1c
--- /dev/null
+++ b/include/linux/mfd/mt6359/registers.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_REGISTERS_H__
+#define __MFD_MT6359_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6359_SWCID 0xa
+#define MT6359_TOPSTATUS 0x2a
+#define MT6359_TOP_RST_MISC 0x14c
+#define MT6359_MISC_TOP_INT_CON0 0x188
+#define MT6359_MISC_TOP_INT_STATUS0 0x194
+#define MT6359_TOP_INT_STATUS0 0x19e
+#define MT6359_SCK_TOP_INT_CON0 0x528
+#define MT6359_SCK_TOP_INT_STATUS0 0x534
+#define MT6359_EOSC_CALI_CON0 0x53a
+#define MT6359_EOSC_CALI_CON1 0x53c
+#define MT6359_RTC_MIX_CON0 0x53e
+#define MT6359_RTC_MIX_CON1 0x540
+#define MT6359_RTC_MIX_CON2 0x542
+#define MT6359_RTC_DSN_ID 0x580
+#define MT6359_RTC_DSN_REV0 0x582
+#define MT6359_RTC_DBI 0x584
+#define MT6359_RTC_DXI 0x586
+#define MT6359_RTC_BBPU 0x588
+#define MT6359_RTC_IRQ_STA 0x58a
+#define MT6359_RTC_IRQ_EN 0x58c
+#define MT6359_RTC_CII_EN 0x58e
+#define MT6359_RTC_AL_MASK 0x590
+#define MT6359_RTC_TC_SEC 0x592
+#define MT6359_RTC_TC_MIN 0x594
+#define MT6359_RTC_TC_HOU 0x596
+#define MT6359_RTC_TC_DOM 0x598
+#define MT6359_RTC_TC_DOW 0x59a
+#define MT6359_RTC_TC_MTH 0x59c
+#define MT6359_RTC_TC_YEA 0x59e
+#define MT6359_RTC_AL_SEC 0x5a0
+#define MT6359_RTC_AL_MIN 0x5a2
+#define MT6359_RTC_AL_HOU 0x5a4
+#define MT6359_RTC_AL_DOM 0x5a6
+#define MT6359_RTC_AL_DOW 0x5a8
+#define MT6359_RTC_AL_MTH 0x5aa
+#define MT6359_RTC_AL_YEA 0x5ac
+#define MT6359_RTC_OSC32CON 0x5ae
+#define MT6359_RTC_POWERKEY1 0x5b0
+#define MT6359_RTC_POWERKEY2 0x5b2
+#define MT6359_RTC_PDN1 0x5b4
+#define MT6359_RTC_PDN2 0x5b6
+#define MT6359_RTC_SPAR0 0x5b8
+#define MT6359_RTC_SPAR1 0x5ba
+#define MT6359_RTC_PROT 0x5bc
+#define MT6359_RTC_DIFF 0x5be
+#define MT6359_RTC_CALI 0x5c0
+#define MT6359_RTC_WRTGR 0x5c2
+#define MT6359_RTC_CON 0x5c4
+#define MT6359_RTC_SEC_CTRL 0x5c6
+#define MT6359_RTC_INT_CNT 0x5c8
+#define MT6359_RTC_SEC_DAT0 0x5ca
+#define MT6359_RTC_SEC_DAT1 0x5cc
+#define MT6359_RTC_SEC_DAT2 0x5ce
+#define MT6359_RTC_SEC_DSN_ID 0x600
+#define MT6359_RTC_SEC_DSN_REV0 0x602
+#define MT6359_RTC_SEC_DBI 0x604
+#define MT6359_RTC_SEC_DXI 0x606
+#define MT6359_RTC_TC_SEC_SEC 0x608
+#define MT6359_RTC_TC_MIN_SEC 0x60a
+#define MT6359_RTC_TC_HOU_SEC 0x60c
+#define MT6359_RTC_TC_DOM_SEC 0x60e
+#define MT6359_RTC_TC_DOW_SEC 0x610
+#define MT6359_RTC_TC_MTH_SEC 0x612
+#define MT6359_RTC_TC_YEA_SEC 0x614
+#define MT6359_RTC_SEC_CK_PDN 0x616
+#define MT6359_RTC_SEC_WRTGR 0x618
+#define MT6359_PSC_TOP_INT_CON0 0x910
+#define MT6359_PSC_TOP_INT_STATUS0 0x91c
+#define MT6359_BM_TOP_INT_CON0 0xc32
+#define MT6359_BM_TOP_INT_CON1 0xc38
+#define MT6359_BM_TOP_INT_STATUS0 0xc4a
+#define MT6359_BM_TOP_INT_STATUS1 0xc4c
+#define MT6359_HK_TOP_INT_CON0 0xf92
+#define MT6359_HK_TOP_INT_STATUS0 0xf9e
+#define MT6359_BUCK_TOP_INT_CON0 0x1418
+#define MT6359_BUCK_TOP_INT_STATUS0 0x1424
+#define MT6359_BUCK_VPU_CON0 0x1488
+#define MT6359_BUCK_VPU_DBG0 0x14a6
+#define MT6359_BUCK_VPU_DBG1 0x14a8
+#define MT6359_BUCK_VPU_ELR0 0x14ac
+#define MT6359_BUCK_VCORE_CON0 0x1508
+#define MT6359_BUCK_VCORE_DBG0 0x1526
+#define MT6359_BUCK_VCORE_DBG1 0x1528
+#define MT6359_BUCK_VCORE_SSHUB_CON0 0x152a
+#define MT6359_BUCK_VCORE_ELR0 0x1534
+#define MT6359_BUCK_VGPU11_CON0 0x1588
+#define MT6359_BUCK_VGPU11_DBG0 0x15a6
+#define MT6359_BUCK_VGPU11_DBG1 0x15a8
+#define MT6359_BUCK_VGPU11_ELR0 0x15ac
+#define MT6359_BUCK_VMODEM_CON0 0x1688
+#define MT6359_BUCK_VMODEM_DBG0 0x16a6
+#define MT6359_BUCK_VMODEM_DBG1 0x16a8
+#define MT6359_BUCK_VMODEM_ELR0 0x16ae
+#define MT6359_BUCK_VPROC1_CON0 0x1708
+#define MT6359_BUCK_VPROC1_DBG0 0x1726
+#define MT6359_BUCK_VPROC1_DBG1 0x1728
+#define MT6359_BUCK_VPROC1_ELR0 0x172e
+#define MT6359_BUCK_VPROC2_CON0 0x1788
+#define MT6359_BUCK_VPROC2_DBG0 0x17a6
+#define MT6359_BUCK_VPROC2_DBG1 0x17a8
+#define MT6359_BUCK_VPROC2_ELR0 0x17b2
+#define MT6359_BUCK_VS1_CON0 0x1808
+#define MT6359_BUCK_VS1_DBG0 0x1826
+#define MT6359_BUCK_VS1_DBG1 0x1828
+#define MT6359_BUCK_VS1_ELR0 0x1834
+#define MT6359_BUCK_VS2_CON0 0x1888
+#define MT6359_BUCK_VS2_DBG0 0x18a6
+#define MT6359_BUCK_VS2_DBG1 0x18a8
+#define MT6359_BUCK_VS2_ELR0 0x18b4
+#define MT6359_BUCK_VPA_CON0 0x1908
+#define MT6359_BUCK_VPA_CON1 0x190e
+#define MT6359_BUCK_VPA_CFG0 0x1910
+#define MT6359_BUCK_VPA_CFG1 0x1912
+#define MT6359_BUCK_VPA_DBG0 0x1914
+#define MT6359_BUCK_VPA_DBG1 0x1916
+#define MT6359_VGPUVCORE_ANA_CON2 0x198e
+#define MT6359_VGPUVCORE_ANA_CON13 0x19a4
+#define MT6359_VPROC1_ANA_CON3 0x19b2
+#define MT6359_VPROC2_ANA_CON3 0x1a0e
+#define MT6359_VMODEM_ANA_CON3 0x1a1a
+#define MT6359_VPU_ANA_CON3 0x1a26
+#define MT6359_VS1_ANA_CON0 0x1a2c
+#define MT6359_VS2_ANA_CON0 0x1a34
+#define MT6359_VPA_ANA_CON0 0x1a3c
+#define MT6359_LDO_TOP_INT_CON0 0x1b14
+#define MT6359_LDO_TOP_INT_CON1 0x1b1a
+#define MT6359_LDO_TOP_INT_STATUS0 0x1b28
+#define MT6359_LDO_TOP_INT_STATUS1 0x1b2a
+#define MT6359_LDO_VSRAM_PROC1_ELR 0x1b40
+#define MT6359_LDO_VSRAM_PROC2_ELR 0x1b42
+#define MT6359_LDO_VSRAM_OTHERS_ELR 0x1b44
+#define MT6359_LDO_VSRAM_MD_ELR 0x1b46
+#define MT6359_LDO_VFE28_CON0 0x1b88
+#define MT6359_LDO_VFE28_MON 0x1b8a
+#define MT6359_LDO_VXO22_CON0 0x1b98
+#define MT6359_LDO_VXO22_MON 0x1b9a
+#define MT6359_LDO_VRF18_CON0 0x1ba8
+#define MT6359_LDO_VRF18_MON 0x1baa
+#define MT6359_LDO_VRF12_CON0 0x1bb8
+#define MT6359_LDO_VRF12_MON 0x1bba
+#define MT6359_LDO_VEFUSE_CON0 0x1bc8
+#define MT6359_LDO_VEFUSE_MON 0x1bca
+#define MT6359_LDO_VCN33_1_CON0 0x1bd8
+#define MT6359_LDO_VCN33_1_MON 0x1bda
+#define MT6359_LDO_VCN33_1_MULTI_SW 0x1be8
+#define MT6359_LDO_VCN33_2_CON0 0x1c08
+#define MT6359_LDO_VCN33_2_MON 0x1c0a
+#define MT6359_LDO_VCN33_2_MULTI_SW 0x1c18
+#define MT6359_LDO_VCN13_CON0 0x1c1a
+#define MT6359_LDO_VCN13_MON 0x1c1c
+#define MT6359_LDO_VCN18_CON0 0x1c2a
+#define MT6359_LDO_VCN18_MON 0x1c2c
+#define MT6359_LDO_VA09_CON0 0x1c3a
+#define MT6359_LDO_VA09_MON 0x1c3c
+#define MT6359_LDO_VCAMIO_CON0 0x1c4a
+#define MT6359_LDO_VCAMIO_MON 0x1c4c
+#define MT6359_LDO_VA12_CON0 0x1c5a
+#define MT6359_LDO_VA12_MON 0x1c5c
+#define MT6359_LDO_VAUX18_CON0 0x1c88
+#define MT6359_LDO_VAUX18_MON 0x1c8a
+#define MT6359_LDO_VAUD18_CON0 0x1c98
+#define MT6359_LDO_VAUD18_MON 0x1c9a
+#define MT6359_LDO_VIO18_CON0 0x1ca8
+#define MT6359_LDO_VIO18_MON 0x1caa
+#define MT6359_LDO_VEMC_CON0 0x1cb8
+#define MT6359_LDO_VEMC_MON 0x1cba
+#define MT6359_LDO_VSIM1_CON0 0x1cc8
+#define MT6359_LDO_VSIM1_MON 0x1cca
+#define MT6359_LDO_VSIM2_CON0 0x1cd8
+#define MT6359_LDO_VSIM2_MON 0x1cda
+#define MT6359_LDO_VUSB_CON0 0x1d08
+#define MT6359_LDO_VUSB_MON 0x1d0a
+#define MT6359_LDO_VUSB_MULTI_SW 0x1d18
+#define MT6359_LDO_VRFCK_CON0 0x1d1a
+#define MT6359_LDO_VRFCK_MON 0x1d1c
+#define MT6359_LDO_VBBCK_CON0 0x1d2a
+#define MT6359_LDO_VBBCK_MON 0x1d2c
+#define MT6359_LDO_VBIF28_CON0 0x1d3a
+#define MT6359_LDO_VBIF28_MON 0x1d3c
+#define MT6359_LDO_VIBR_CON0 0x1d4a
+#define MT6359_LDO_VIBR_MON 0x1d4c
+#define MT6359_LDO_VIO28_CON0 0x1d5a
+#define MT6359_LDO_VIO28_MON 0x1d5c
+#define MT6359_LDO_VM18_CON0 0x1d88
+#define MT6359_LDO_VM18_MON 0x1d8a
+#define MT6359_LDO_VUFS_CON0 0x1d98
+#define MT6359_LDO_VUFS_MON 0x1d9a
+#define MT6359_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359_LDO_VSRAM_PROC1_MON 0x1e8a
+#define MT6359_LDO_VSRAM_PROC1_VOSEL1 0x1e8e
+#define MT6359_LDO_VSRAM_PROC2_CON0 0x1ea6
+#define MT6359_LDO_VSRAM_PROC2_MON 0x1ea8
+#define MT6359_LDO_VSRAM_PROC2_VOSEL1 0x1eac
+#define MT6359_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359_LDO_VSRAM_OTHERS_MON 0x1f0a
+#define MT6359_LDO_VSRAM_OTHERS_VOSEL1 0x1f0e
+#define MT6359_LDO_VSRAM_OTHERS_SSHUB 0x1f26
+#define MT6359_LDO_VSRAM_MD_CON0 0x1f2c
+#define MT6359_LDO_VSRAM_MD_MON 0x1f2e
+#define MT6359_LDO_VSRAM_MD_VOSEL1 0x1f32
+#define MT6359_VFE28_ANA_CON0 0x1f88
+#define MT6359_VAUX18_ANA_CON0 0x1f8c
+#define MT6359_VUSB_ANA_CON0 0x1f90
+#define MT6359_VBIF28_ANA_CON0 0x1f94
+#define MT6359_VCN33_1_ANA_CON0 0x1f98
+#define MT6359_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359_VEMC_ANA_CON0 0x1fa0
+#define MT6359_VSIM1_ANA_CON0 0x1fa4
+#define MT6359_VSIM2_ANA_CON0 0x1fa8
+#define MT6359_VIO28_ANA_CON0 0x1fac
+#define MT6359_VIBR_ANA_CON0 0x1fb0
+#define MT6359_VRF18_ANA_CON0 0x2008
+#define MT6359_VEFUSE_ANA_CON0 0x200c
+#define MT6359_VCN18_ANA_CON0 0x2010
+#define MT6359_VCAMIO_ANA_CON0 0x2014
+#define MT6359_VAUD18_ANA_CON0 0x2018
+#define MT6359_VIO18_ANA_CON0 0x201c
+#define MT6359_VM18_ANA_CON0 0x2020
+#define MT6359_VUFS_ANA_CON0 0x2024
+#define MT6359_VRF12_ANA_CON0 0x202a
+#define MT6359_VCN13_ANA_CON0 0x202e
+#define MT6359_VA09_ANA_CON0 0x2032
+#define MT6359_VA12_ANA_CON0 0x2036
+#define MT6359_VXO22_ANA_CON0 0x2088
+#define MT6359_VRFCK_ANA_CON0 0x208c
+#define MT6359_VBBCK_ANA_CON0 0x2094
+#define MT6359_AUD_TOP_INT_CON0 0x2328
+#define MT6359_AUD_TOP_INT_STATUS0 0x2334
+
+#define MT6359_RG_BUCK_VPU_EN_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_SHIFT 1
+#define MT6359_DA_VPU_VOSEL_ADDR MT6359_BUCK_VPU_DBG0
+#define MT6359_DA_VPU_VOSEL_MASK 0x7F
+#define MT6359_DA_VPU_VOSEL_SHIFT 0
+#define MT6359_DA_VPU_EN_ADDR MT6359_BUCK_VPU_DBG1
+#define MT6359_RG_BUCK_VPU_VOSEL_ADDR MT6359_BUCK_VPU_ELR0
+#define MT6359_RG_BUCK_VPU_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPU_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VCORE_EN_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_SHIFT 1
+#define MT6359_DA_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_DBG0
+#define MT6359_DA_VCORE_VOSEL_MASK 0x7F
+#define MT6359_DA_VCORE_VOSEL_SHIFT 0
+#define MT6359_DA_VCORE_EN_ADDR MT6359_BUCK_VCORE_DBG1
+#define MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT 4
+#define MT6359_RG_BUCK_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_ELR0
+#define MT6359_RG_BUCK_VCORE_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_SHIFT 1
+#define MT6359_DA_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_DBG0
+#define MT6359_DA_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_DA_VGPU11_VOSEL_SHIFT 0
+#define MT6359_DA_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_DBG1
+#define MT6359_RG_BUCK_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_ELR0
+#define MT6359_RG_BUCK_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_SHIFT 1
+#define MT6359_DA_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_DBG0
+#define MT6359_DA_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_DA_VMODEM_VOSEL_SHIFT 0
+#define MT6359_DA_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_DBG1
+#define MT6359_RG_BUCK_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_ELR0
+#define MT6359_RG_BUCK_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_SHIFT 1
+#define MT6359_DA_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_DBG0
+#define MT6359_DA_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC1_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_DBG1
+#define MT6359_RG_BUCK_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_ELR0
+#define MT6359_RG_BUCK_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_SHIFT 1
+#define MT6359_DA_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_DBG0
+#define MT6359_DA_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC2_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_DBG1
+#define MT6359_RG_BUCK_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_ELR0
+#define MT6359_RG_BUCK_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS1_EN_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_SHIFT 1
+#define MT6359_DA_VS1_VOSEL_ADDR MT6359_BUCK_VS1_DBG0
+#define MT6359_DA_VS1_VOSEL_MASK 0x7F
+#define MT6359_DA_VS1_VOSEL_SHIFT 0
+#define MT6359_DA_VS1_EN_ADDR MT6359_BUCK_VS1_DBG1
+#define MT6359_RG_BUCK_VS1_VOSEL_ADDR MT6359_BUCK_VS1_ELR0
+#define MT6359_RG_BUCK_VS1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS2_EN_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_SHIFT 1
+#define MT6359_DA_VS2_VOSEL_ADDR MT6359_BUCK_VS2_DBG0
+#define MT6359_DA_VS2_VOSEL_MASK 0x7F
+#define MT6359_DA_VS2_VOSEL_SHIFT 0
+#define MT6359_DA_VS2_EN_ADDR MT6359_BUCK_VS2_DBG1
+#define MT6359_RG_BUCK_VS2_VOSEL_ADDR MT6359_BUCK_VS2_ELR0
+#define MT6359_RG_BUCK_VS2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPA_EN_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_SHIFT 1
+#define MT6359_RG_BUCK_VPA_VOSEL_ADDR MT6359_BUCK_VPA_CON1
+#define MT6359_RG_BUCK_VPA_VOSEL_MASK 0x3F
+#define MT6359_RG_BUCK_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_VOSEL_ADDR MT6359_BUCK_VPA_DBG0
+#define MT6359_DA_VPA_VOSEL_MASK 0x3F
+#define MT6359_DA_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_EN_ADDR MT6359_BUCK_VPA_DBG1
+#define MT6359_RG_VGPU11_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON2
+#define MT6359_RG_VGPU11_FCCM_SHIFT 9
+#define MT6359_RG_VCORE_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON13
+#define MT6359_RG_VCORE_FCCM_SHIFT 5
+#define MT6359_RG_VPROC1_FCCM_ADDR MT6359_VPROC1_ANA_CON3
+#define MT6359_RG_VPROC1_FCCM_SHIFT 1
+#define MT6359_RG_VPROC2_FCCM_ADDR MT6359_VPROC2_ANA_CON3
+#define MT6359_RG_VPROC2_FCCM_SHIFT 1
+#define MT6359_RG_VMODEM_FCCM_ADDR MT6359_VMODEM_ANA_CON3
+#define MT6359_RG_VMODEM_FCCM_SHIFT 1
+#define MT6359_RG_VPU_FCCM_ADDR MT6359_VPU_ANA_CON3
+#define MT6359_RG_VPU_FCCM_SHIFT 1
+#define MT6359_RG_VS1_FPWM_ADDR MT6359_VS1_ANA_CON0
+#define MT6359_RG_VS1_FPWM_SHIFT 3
+#define MT6359_RG_VS2_FPWM_ADDR MT6359_VS2_ANA_CON0
+#define MT6359_RG_VS2_FPWM_SHIFT 3
+#define MT6359_RG_VPA_MODESET_ADDR MT6359_VPA_ANA_CON0
+#define MT6359_RG_VPA_MODESET_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_ELR
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_ELR
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_ELR
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_ELR
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VFE28_EN_ADDR MT6359_LDO_VFE28_CON0
+#define MT6359_DA_VFE28_B_EN_ADDR MT6359_LDO_VFE28_MON
+#define MT6359_RG_LDO_VXO22_EN_ADDR MT6359_LDO_VXO22_CON0
+#define MT6359_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359_DA_VXO22_B_EN_ADDR MT6359_LDO_VXO22_MON
+#define MT6359_RG_LDO_VRF18_EN_ADDR MT6359_LDO_VRF18_CON0
+#define MT6359_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359_DA_VRF18_B_EN_ADDR MT6359_LDO_VRF18_MON
+#define MT6359_RG_LDO_VRF12_EN_ADDR MT6359_LDO_VRF12_CON0
+#define MT6359_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359_DA_VRF12_B_EN_ADDR MT6359_LDO_VRF12_MON
+#define MT6359_RG_LDO_VEFUSE_EN_ADDR MT6359_LDO_VEFUSE_CON0
+#define MT6359_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359_DA_VEFUSE_B_EN_ADDR MT6359_LDO_VEFUSE_MON
+#define MT6359_RG_LDO_VCN33_1_EN_0_ADDR MT6359_LDO_VCN33_1_CON0
+#define MT6359_RG_LDO_VCN33_1_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VCN33_1_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_1_B_EN_ADDR MT6359_LDO_VCN33_1_MON
+#define MT6359_RG_LDO_VCN33_1_EN_1_ADDR MT6359_LDO_VCN33_1_MULTI_SW
+#define MT6359_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN33_2_EN_0_ADDR MT6359_LDO_VCN33_2_CON0
+#define MT6359_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_2_B_EN_ADDR MT6359_LDO_VCN33_2_MON
+#define MT6359_RG_LDO_VCN33_2_EN_1_ADDR MT6359_LDO_VCN33_2_MULTI_SW
+#define MT6359_RG_LDO_VCN33_2_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VCN33_2_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN13_EN_ADDR MT6359_LDO_VCN13_CON0
+#define MT6359_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359_DA_VCN13_B_EN_ADDR MT6359_LDO_VCN13_MON
+#define MT6359_RG_LDO_VCN18_EN_ADDR MT6359_LDO_VCN18_CON0
+#define MT6359_DA_VCN18_B_EN_ADDR MT6359_LDO_VCN18_MON
+#define MT6359_RG_LDO_VA09_EN_ADDR MT6359_LDO_VA09_CON0
+#define MT6359_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359_DA_VA09_B_EN_ADDR MT6359_LDO_VA09_MON
+#define MT6359_RG_LDO_VCAMIO_EN_ADDR MT6359_LDO_VCAMIO_CON0
+#define MT6359_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359_DA_VCAMIO_B_EN_ADDR MT6359_LDO_VCAMIO_MON
+#define MT6359_RG_LDO_VA12_EN_ADDR MT6359_LDO_VA12_CON0
+#define MT6359_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359_DA_VA12_B_EN_ADDR MT6359_LDO_VA12_MON
+#define MT6359_RG_LDO_VAUX18_EN_ADDR MT6359_LDO_VAUX18_CON0
+#define MT6359_DA_VAUX18_B_EN_ADDR MT6359_LDO_VAUX18_MON
+#define MT6359_RG_LDO_VAUD18_EN_ADDR MT6359_LDO_VAUD18_CON0
+#define MT6359_DA_VAUD18_B_EN_ADDR MT6359_LDO_VAUD18_MON
+#define MT6359_RG_LDO_VIO18_EN_ADDR MT6359_LDO_VIO18_CON0
+#define MT6359_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359_DA_VIO18_B_EN_ADDR MT6359_LDO_VIO18_MON
+#define MT6359_RG_LDO_VEMC_EN_ADDR MT6359_LDO_VEMC_CON0
+#define MT6359_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359_DA_VEMC_B_EN_ADDR MT6359_LDO_VEMC_MON
+#define MT6359_RG_LDO_VSIM1_EN_ADDR MT6359_LDO_VSIM1_CON0
+#define MT6359_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359_DA_VSIM1_B_EN_ADDR MT6359_LDO_VSIM1_MON
+#define MT6359_RG_LDO_VSIM2_EN_ADDR MT6359_LDO_VSIM2_CON0
+#define MT6359_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359_DA_VSIM2_B_EN_ADDR MT6359_LDO_VSIM2_MON
+#define MT6359_RG_LDO_VUSB_EN_0_ADDR MT6359_LDO_VUSB_CON0
+#define MT6359_RG_LDO_VUSB_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_0_SHIFT 0
+#define MT6359_DA_VUSB_B_EN_ADDR MT6359_LDO_VUSB_MON
+#define MT6359_RG_LDO_VUSB_EN_1_ADDR MT6359_LDO_VUSB_MULTI_SW
+#define MT6359_RG_LDO_VUSB_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VRFCK_EN_ADDR MT6359_LDO_VRFCK_CON0
+#define MT6359_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359_DA_VRFCK_B_EN_ADDR MT6359_LDO_VRFCK_MON
+#define MT6359_RG_LDO_VBBCK_EN_ADDR MT6359_LDO_VBBCK_CON0
+#define MT6359_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359_DA_VBBCK_B_EN_ADDR MT6359_LDO_VBBCK_MON
+#define MT6359_RG_LDO_VBIF28_EN_ADDR MT6359_LDO_VBIF28_CON0
+#define MT6359_DA_VBIF28_B_EN_ADDR MT6359_LDO_VBIF28_MON
+#define MT6359_RG_LDO_VIBR_EN_ADDR MT6359_LDO_VIBR_CON0
+#define MT6359_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359_DA_VIBR_B_EN_ADDR MT6359_LDO_VIBR_MON
+#define MT6359_RG_LDO_VIO28_EN_ADDR MT6359_LDO_VIO28_CON0
+#define MT6359_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359_DA_VIO28_B_EN_ADDR MT6359_LDO_VIO28_MON
+#define MT6359_RG_LDO_VM18_EN_ADDR MT6359_LDO_VM18_CON0
+#define MT6359_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359_DA_VM18_B_EN_ADDR MT6359_LDO_VM18_MON
+#define MT6359_RG_LDO_VUFS_EN_ADDR MT6359_LDO_VUFS_CON0
+#define MT6359_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359_DA_VUFS_B_EN_ADDR MT6359_LDO_VUFS_MON
+#define MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359_LDO_VSRAM_PROC1_CON0
+#define MT6359_DA_VSRAM_PROC1_B_EN_ADDR MT6359_LDO_VSRAM_PROC1_MON
+#define MT6359_DA_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359_DA_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC1_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359_LDO_VSRAM_PROC2_CON0
+#define MT6359_DA_VSRAM_PROC2_B_EN_ADDR MT6359_LDO_VSRAM_PROC2_MON
+#define MT6359_DA_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359_DA_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC2_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359_LDO_VSRAM_OTHERS_CON0
+#define MT6359_DA_VSRAM_OTHERS_B_EN_ADDR MT6359_LDO_VSRAM_OTHERS_MON
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_MD_EN_ADDR MT6359_LDO_VSRAM_MD_CON0
+#define MT6359_DA_VSRAM_MD_B_EN_ADDR MT6359_LDO_VSRAM_MD_MON
+#define MT6359_DA_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_VOSEL1
+#define MT6359_DA_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_MD_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_1_VOSEL_ADDR MT6359_VCN33_1_ANA_CON0
+#define MT6359_RG_VCN33_1_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_1_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_2_VOSEL_ADDR MT6359_VCN33_2_ANA_CON0
+#define MT6359_RG_VCN33_2_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_2_VOSEL_SHIFT 8
+#define MT6359_RG_VEMC_VOSEL_ADDR MT6359_VEMC_ANA_CON0
+#define MT6359_RG_VEMC_VOSEL_MASK 0xF
+#define MT6359_RG_VEMC_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM1_VOSEL_ADDR MT6359_VSIM1_ANA_CON0
+#define MT6359_RG_VSIM1_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM1_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM2_VOSEL_ADDR MT6359_VSIM2_ANA_CON0
+#define MT6359_RG_VSIM2_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM2_VOSEL_SHIFT 8
+#define MT6359_RG_VIO28_VOSEL_ADDR MT6359_VIO28_ANA_CON0
+#define MT6359_RG_VIO28_VOSEL_MASK 0xF
+#define MT6359_RG_VIO28_VOSEL_SHIFT 8
+#define MT6359_RG_VIBR_VOSEL_ADDR MT6359_VIBR_ANA_CON0
+#define MT6359_RG_VIBR_VOSEL_MASK 0xF
+#define MT6359_RG_VIBR_VOSEL_SHIFT 8
+#define MT6359_RG_VRF18_VOSEL_ADDR MT6359_VRF18_ANA_CON0
+#define MT6359_RG_VRF18_VOSEL_MASK 0xF
+#define MT6359_RG_VRF18_VOSEL_SHIFT 8
+#define MT6359_RG_VEFUSE_VOSEL_ADDR MT6359_VEFUSE_ANA_CON0
+#define MT6359_RG_VEFUSE_VOSEL_MASK 0xF
+#define MT6359_RG_VEFUSE_VOSEL_SHIFT 8
+#define MT6359_RG_VCAMIO_VOSEL_ADDR MT6359_VCAMIO_ANA_CON0
+#define MT6359_RG_VCAMIO_VOSEL_MASK 0xF
+#define MT6359_RG_VCAMIO_VOSEL_SHIFT 8
+#define MT6359_RG_VIO18_VOSEL_ADDR MT6359_VIO18_ANA_CON0
+#define MT6359_RG_VIO18_VOSEL_MASK 0xF
+#define MT6359_RG_VIO18_VOSEL_SHIFT 8
+#define MT6359_RG_VM18_VOSEL_ADDR MT6359_VM18_ANA_CON0
+#define MT6359_RG_VM18_VOSEL_MASK 0xF
+#define MT6359_RG_VM18_VOSEL_SHIFT 8
+#define MT6359_RG_VUFS_VOSEL_ADDR MT6359_VUFS_ANA_CON0
+#define MT6359_RG_VUFS_VOSEL_MASK 0xF
+#define MT6359_RG_VUFS_VOSEL_SHIFT 8
+#define MT6359_RG_VRF12_VOSEL_ADDR MT6359_VRF12_ANA_CON0
+#define MT6359_RG_VRF12_VOSEL_MASK 0xF
+#define MT6359_RG_VRF12_VOSEL_SHIFT 8
+#define MT6359_RG_VCN13_VOSEL_ADDR MT6359_VCN13_ANA_CON0
+#define MT6359_RG_VCN13_VOSEL_MASK 0xF
+#define MT6359_RG_VCN13_VOSEL_SHIFT 8
+#define MT6359_RG_VA09_VOSEL_ADDR MT6359_VA09_ANA_CON0
+#define MT6359_RG_VA09_VOSEL_MASK 0xF
+#define MT6359_RG_VA09_VOSEL_SHIFT 8
+#define MT6359_RG_VA12_VOSEL_ADDR MT6359_VA12_ANA_CON0
+#define MT6359_RG_VA12_VOSEL_MASK 0xF
+#define MT6359_RG_VA12_VOSEL_SHIFT 8
+#define MT6359_RG_VXO22_VOSEL_ADDR MT6359_VXO22_ANA_CON0
+#define MT6359_RG_VXO22_VOSEL_MASK 0xF
+#define MT6359_RG_VXO22_VOSEL_SHIFT 8
+#define MT6359_RG_VRFCK_VOSEL_ADDR MT6359_VRFCK_ANA_CON0
+#define MT6359_RG_VRFCK_VOSEL_MASK 0xF
+#define MT6359_RG_VRFCK_VOSEL_SHIFT 8
+#define MT6359_RG_VBBCK_VOSEL_ADDR MT6359_VBBCK_ANA_CON0
+#define MT6359_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359_RG_VBBCK_VOSEL_SHIFT 8
+
+#endif /* __MFD_MT6359_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359p/registers.h b/include/linux/mfd/mt6359p/registers.h
new file mode 100644
index 000000000000..3d97c1885171
--- /dev/null
+++ b/include/linux/mfd/mt6359p/registers.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359P_REGISTERS_H__
+#define __MFD_MT6359P_REGISTERS_H__
+
+#define MT6359P_CHIP_VER 0x5930
+
+/* PMIC Registers */
+#define MT6359P_HWCID 0x8
+#define MT6359P_TOP_TRAP 0x50
+#define MT6359P_TOP_TMA_KEY 0x3a8
+#define MT6359P_BUCK_VCORE_ELR_NUM 0x152a
+#define MT6359P_BUCK_VCORE_ELR0 0x152c
+#define MT6359P_BUCK_VGPU11_SSHUB_CON0 0x15aa
+#define MT6359P_BUCK_VGPU11_ELR0 0x15b4
+#define MT6359P_LDO_VSRAM_PROC1_ELR 0x1b44
+#define MT6359P_LDO_VSRAM_PROC2_ELR 0x1b46
+#define MT6359P_LDO_VSRAM_OTHERS_ELR 0x1b48
+#define MT6359P_LDO_VSRAM_MD_ELR 0x1b4a
+#define MT6359P_LDO_VEMC_ELR_0 0x1b4c
+#define MT6359P_LDO_VFE28_CON0 0x1b88
+#define MT6359P_LDO_VFE28_MON 0x1b8c
+#define MT6359P_LDO_VXO22_CON0 0x1b9a
+#define MT6359P_LDO_VXO22_MON 0x1b9e
+#define MT6359P_LDO_VRF18_CON0 0x1bac
+#define MT6359P_LDO_VRF18_MON 0x1bb0
+#define MT6359P_LDO_VRF12_CON0 0x1bbe
+#define MT6359P_LDO_VRF12_MON 0x1bc2
+#define MT6359P_LDO_VEFUSE_CON0 0x1bd0
+#define MT6359P_LDO_VEFUSE_MON 0x1bd4
+#define MT6359P_LDO_VCN33_1_CON0 0x1be2
+#define MT6359P_LDO_VCN33_1_MON 0x1be6
+#define MT6359P_LDO_VCN33_1_MULTI_SW 0x1bf4
+#define MT6359P_LDO_VCN33_2_CON0 0x1c08
+#define MT6359P_LDO_VCN33_2_MON 0x1c0c
+#define MT6359P_LDO_VCN33_2_MULTI_SW 0x1c1a
+#define MT6359P_LDO_VCN13_CON0 0x1c1c
+#define MT6359P_LDO_VCN13_MON 0x1c20
+#define MT6359P_LDO_VCN18_CON0 0x1c2e
+#define MT6359P_LDO_VCN18_MON 0x1c32
+#define MT6359P_LDO_VA09_CON0 0x1c40
+#define MT6359P_LDO_VA09_MON 0x1c44
+#define MT6359P_LDO_VCAMIO_CON0 0x1c52
+#define MT6359P_LDO_VCAMIO_MON 0x1c56
+#define MT6359P_LDO_VA12_CON0 0x1c64
+#define MT6359P_LDO_VA12_MON 0x1c68
+#define MT6359P_LDO_VAUX18_CON0 0x1c88
+#define MT6359P_LDO_VAUX18_MON 0x1c8c
+#define MT6359P_LDO_VAUD18_CON0 0x1c9a
+#define MT6359P_LDO_VAUD18_MON 0x1c9e
+#define MT6359P_LDO_VIO18_CON0 0x1cac
+#define MT6359P_LDO_VIO18_MON 0x1cb0
+#define MT6359P_LDO_VEMC_CON0 0x1cbe
+#define MT6359P_LDO_VEMC_MON 0x1cc2
+#define MT6359P_LDO_VSIM1_CON0 0x1cd0
+#define MT6359P_LDO_VSIM1_MON 0x1cd4
+#define MT6359P_LDO_VSIM2_CON0 0x1ce2
+#define MT6359P_LDO_VSIM2_MON 0x1ce6
+#define MT6359P_LDO_VUSB_CON0 0x1d08
+#define MT6359P_LDO_VUSB_MON 0x1d0c
+#define MT6359P_LDO_VUSB_MULTI_SW 0x1d1a
+#define MT6359P_LDO_VRFCK_CON0 0x1d1c
+#define MT6359P_LDO_VRFCK_MON 0x1d20
+#define MT6359P_LDO_VBBCK_CON0 0x1d2e
+#define MT6359P_LDO_VBBCK_MON 0x1d32
+#define MT6359P_LDO_VBIF28_CON0 0x1d40
+#define MT6359P_LDO_VBIF28_MON 0x1d44
+#define MT6359P_LDO_VIBR_CON0 0x1d52
+#define MT6359P_LDO_VIBR_MON 0x1d56
+#define MT6359P_LDO_VIO28_CON0 0x1d64
+#define MT6359P_LDO_VIO28_MON 0x1d68
+#define MT6359P_LDO_VM18_CON0 0x1d88
+#define MT6359P_LDO_VM18_MON 0x1d8c
+#define MT6359P_LDO_VUFS_CON0 0x1d9a
+#define MT6359P_LDO_VUFS_MON 0x1d9e
+#define MT6359P_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359P_LDO_VSRAM_PROC1_MON 0x1e8c
+#define MT6359P_LDO_VSRAM_PROC1_VOSEL1 0x1e90
+#define MT6359P_LDO_VSRAM_PROC2_CON0 0x1ea8
+#define MT6359P_LDO_VSRAM_PROC2_MON 0x1eac
+#define MT6359P_LDO_VSRAM_PROC2_VOSEL1 0x1eb0
+#define MT6359P_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359P_LDO_VSRAM_OTHERS_MON 0x1f0c
+#define MT6359P_LDO_VSRAM_OTHERS_VOSEL1 0x1f10
+#define MT6359P_LDO_VSRAM_OTHERS_SSHUB 0x1f28
+#define MT6359P_LDO_VSRAM_MD_CON0 0x1f2e
+#define MT6359P_LDO_VSRAM_MD_MON 0x1f32
+#define MT6359P_LDO_VSRAM_MD_VOSEL1 0x1f36
+#define MT6359P_VFE28_ANA_CON0 0x1f88
+#define MT6359P_VAUX18_ANA_CON0 0x1f8c
+#define MT6359P_VUSB_ANA_CON0 0x1f90
+#define MT6359P_VBIF28_ANA_CON0 0x1f94
+#define MT6359P_VCN33_1_ANA_CON0 0x1f98
+#define MT6359P_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359P_VEMC_ANA_CON0 0x1fa0
+#define MT6359P_VSIM1_ANA_CON0 0x1fa2
+#define MT6359P_VSIM2_ANA_CON0 0x1fa6
+#define MT6359P_VIO28_ANA_CON0 0x1faa
+#define MT6359P_VIBR_ANA_CON0 0x1fae
+#define MT6359P_VFE28_ELR_4 0x1fc0
+#define MT6359P_VRF18_ANA_CON0 0x2008
+#define MT6359P_VEFUSE_ANA_CON0 0x200c
+#define MT6359P_VCN18_ANA_CON0 0x2010
+#define MT6359P_VCAMIO_ANA_CON0 0x2014
+#define MT6359P_VAUD18_ANA_CON0 0x2018
+#define MT6359P_VIO18_ANA_CON0 0x201c
+#define MT6359P_VM18_ANA_CON0 0x2020
+#define MT6359P_VUFS_ANA_CON0 0x2024
+#define MT6359P_VRF12_ANA_CON0 0x202a
+#define MT6359P_VCN13_ANA_CON0 0x202e
+#define MT6359P_VA09_ANA_CON0 0x2032
+#define MT6359P_VRF18_ELR_3 0x204e
+#define MT6359P_VXO22_ANA_CON0 0x2088
+#define MT6359P_VRFCK_ANA_CON0 0x208c
+#define MT6359P_VBBCK_ANA_CON0 0x2096
+
+#define MT6359P_RG_BUCK_VCORE_VOSEL_ADDR MT6359P_BUCK_VCORE_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR MT6359P_BUCK_VGPU11_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK 0x7F
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT 4
+#define MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_ELR
+#define MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_ELR
+#define MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_ELR
+#define MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_ELR
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR MT6359P_LDO_VEMC_ELR_0
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_MASK 0xF
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT 0
+#define MT6359P_RG_LDO_VFE28_EN_ADDR MT6359P_LDO_VFE28_CON0
+#define MT6359P_DA_VFE28_B_EN_ADDR MT6359P_LDO_VFE28_MON
+#define MT6359P_RG_LDO_VXO22_EN_ADDR MT6359P_LDO_VXO22_CON0
+#define MT6359P_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359P_DA_VXO22_B_EN_ADDR MT6359P_LDO_VXO22_MON
+#define MT6359P_RG_LDO_VRF18_EN_ADDR MT6359P_LDO_VRF18_CON0
+#define MT6359P_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359P_DA_VRF18_B_EN_ADDR MT6359P_LDO_VRF18_MON
+#define MT6359P_RG_LDO_VRF12_EN_ADDR MT6359P_LDO_VRF12_CON0
+#define MT6359P_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359P_DA_VRF12_B_EN_ADDR MT6359P_LDO_VRF12_MON
+#define MT6359P_RG_LDO_VEFUSE_EN_ADDR MT6359P_LDO_VEFUSE_CON0
+#define MT6359P_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359P_DA_VEFUSE_B_EN_ADDR MT6359P_LDO_VEFUSE_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_0_ADDR MT6359P_LDO_VCN33_1_CON0
+#define MT6359P_DA_VCN33_1_B_EN_ADDR MT6359P_LDO_VCN33_1_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_1_ADDR MT6359P_LDO_VCN33_1_MULTI_SW
+#define MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359P_RG_LDO_VCN33_2_EN_0_ADDR MT6359P_LDO_VCN33_2_CON0
+#define MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359P_DA_VCN33_2_B_EN_ADDR MT6359P_LDO_VCN33_2_MON
+#define MT6359P_RG_LDO_VCN33_2_EN_1_ADDR MT6359P_LDO_VCN33_2_MULTI_SW
+#define MT6359P_RG_LDO_VCN13_EN_ADDR MT6359P_LDO_VCN13_CON0
+#define MT6359P_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359P_DA_VCN13_B_EN_ADDR MT6359P_LDO_VCN13_MON
+#define MT6359P_RG_LDO_VCN18_EN_ADDR MT6359P_LDO_VCN18_CON0
+#define MT6359P_DA_VCN18_B_EN_ADDR MT6359P_LDO_VCN18_MON
+#define MT6359P_RG_LDO_VA09_EN_ADDR MT6359P_LDO_VA09_CON0
+#define MT6359P_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359P_DA_VA09_B_EN_ADDR MT6359P_LDO_VA09_MON
+#define MT6359P_RG_LDO_VCAMIO_EN_ADDR MT6359P_LDO_VCAMIO_CON0
+#define MT6359P_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359P_DA_VCAMIO_B_EN_ADDR MT6359P_LDO_VCAMIO_MON
+#define MT6359P_RG_LDO_VA12_EN_ADDR MT6359P_LDO_VA12_CON0
+#define MT6359P_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359P_DA_VA12_B_EN_ADDR MT6359P_LDO_VA12_MON
+#define MT6359P_RG_LDO_VAUX18_EN_ADDR MT6359P_LDO_VAUX18_CON0
+#define MT6359P_DA_VAUX18_B_EN_ADDR MT6359P_LDO_VAUX18_MON
+#define MT6359P_RG_LDO_VAUD18_EN_ADDR MT6359P_LDO_VAUD18_CON0
+#define MT6359P_DA_VAUD18_B_EN_ADDR MT6359P_LDO_VAUD18_MON
+#define MT6359P_RG_LDO_VIO18_EN_ADDR MT6359P_LDO_VIO18_CON0
+#define MT6359P_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359P_DA_VIO18_B_EN_ADDR MT6359P_LDO_VIO18_MON
+#define MT6359P_RG_LDO_VEMC_EN_ADDR MT6359P_LDO_VEMC_CON0
+#define MT6359P_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359P_DA_VEMC_B_EN_ADDR MT6359P_LDO_VEMC_MON
+#define MT6359P_RG_LDO_VSIM1_EN_ADDR MT6359P_LDO_VSIM1_CON0
+#define MT6359P_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359P_DA_VSIM1_B_EN_ADDR MT6359P_LDO_VSIM1_MON
+#define MT6359P_RG_LDO_VSIM2_EN_ADDR MT6359P_LDO_VSIM2_CON0
+#define MT6359P_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359P_DA_VSIM2_B_EN_ADDR MT6359P_LDO_VSIM2_MON
+#define MT6359P_RG_LDO_VUSB_EN_0_ADDR MT6359P_LDO_VUSB_CON0
+#define MT6359P_DA_VUSB_B_EN_ADDR MT6359P_LDO_VUSB_MON
+#define MT6359P_RG_LDO_VUSB_EN_1_ADDR MT6359P_LDO_VUSB_MULTI_SW
+#define MT6359P_RG_LDO_VRFCK_EN_ADDR MT6359P_LDO_VRFCK_CON0
+#define MT6359P_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359P_DA_VRFCK_B_EN_ADDR MT6359P_LDO_VRFCK_MON
+#define MT6359P_RG_LDO_VBBCK_EN_ADDR MT6359P_LDO_VBBCK_CON0
+#define MT6359P_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359P_DA_VBBCK_B_EN_ADDR MT6359P_LDO_VBBCK_MON
+#define MT6359P_RG_LDO_VBIF28_EN_ADDR MT6359P_LDO_VBIF28_CON0
+#define MT6359P_DA_VBIF28_B_EN_ADDR MT6359P_LDO_VBIF28_MON
+#define MT6359P_RG_LDO_VIBR_EN_ADDR MT6359P_LDO_VIBR_CON0
+#define MT6359P_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359P_DA_VIBR_B_EN_ADDR MT6359P_LDO_VIBR_MON
+#define MT6359P_RG_LDO_VIO28_EN_ADDR MT6359P_LDO_VIO28_CON0
+#define MT6359P_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359P_DA_VIO28_B_EN_ADDR MT6359P_LDO_VIO28_MON
+#define MT6359P_RG_LDO_VM18_EN_ADDR MT6359P_LDO_VM18_CON0
+#define MT6359P_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359P_DA_VM18_B_EN_ADDR MT6359P_LDO_VM18_MON
+#define MT6359P_RG_LDO_VUFS_EN_ADDR MT6359P_LDO_VUFS_CON0
+#define MT6359P_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359P_DA_VUFS_B_EN_ADDR MT6359P_LDO_VUFS_MON
+#define MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359P_LDO_VSRAM_PROC1_CON0
+#define MT6359P_DA_VSRAM_PROC1_B_EN_ADDR MT6359P_LDO_VSRAM_PROC1_MON
+#define MT6359P_DA_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359P_LDO_VSRAM_PROC2_CON0
+#define MT6359P_DA_VSRAM_PROC2_B_EN_ADDR MT6359P_LDO_VSRAM_PROC2_MON
+#define MT6359P_DA_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_CON0
+#define MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_MON
+#define MT6359P_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_MD_EN_ADDR MT6359P_LDO_VSRAM_MD_CON0
+#define MT6359P_DA_VSRAM_MD_B_EN_ADDR MT6359P_LDO_VSRAM_MD_MON
+#define MT6359P_DA_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_VOSEL1
+#define MT6359P_RG_VCN33_1_VOSEL_ADDR MT6359P_VCN33_1_ANA_CON0
+#define MT6359P_RG_VCN33_2_VOSEL_ADDR MT6359P_VCN33_2_ANA_CON0
+#define MT6359P_RG_VEMC_VOSEL_ADDR MT6359P_VEMC_ANA_CON0
+#define MT6359P_RG_VSIM1_VOSEL_ADDR MT6359P_VSIM1_ANA_CON0
+#define MT6359P_RG_VSIM2_VOSEL_ADDR MT6359P_VSIM2_ANA_CON0
+#define MT6359P_RG_VIO28_VOSEL_ADDR MT6359P_VIO28_ANA_CON0
+#define MT6359P_RG_VIBR_VOSEL_ADDR MT6359P_VIBR_ANA_CON0
+#define MT6359P_RG_VRF18_VOSEL_ADDR MT6359P_VRF18_ANA_CON0
+#define MT6359P_RG_VEFUSE_VOSEL_ADDR MT6359P_VEFUSE_ANA_CON0
+#define MT6359P_RG_VCAMIO_VOSEL_ADDR MT6359P_VCAMIO_ANA_CON0
+#define MT6359P_RG_VIO18_VOSEL_ADDR MT6359P_VIO18_ANA_CON0
+#define MT6359P_RG_VM18_VOSEL_ADDR MT6359P_VM18_ANA_CON0
+#define MT6359P_RG_VUFS_VOSEL_ADDR MT6359P_VUFS_ANA_CON0
+#define MT6359P_RG_VRF12_VOSEL_ADDR MT6359P_VRF12_ANA_CON0
+#define MT6359P_RG_VCN13_VOSEL_ADDR MT6359P_VCN13_ANA_CON0
+#define MT6359P_RG_VA09_VOSEL_ADDR MT6359P_VRF18_ELR_3
+#define MT6359P_RG_VA12_VOSEL_ADDR MT6359P_VFE28_ELR_4
+#define MT6359P_RG_VXO22_VOSEL_ADDR MT6359P_VXO22_ANA_CON0
+#define MT6359P_RG_VRFCK_VOSEL_ADDR MT6359P_VRFCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_ADDR MT6359P_VBBCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359P_RG_VBBCK_VOSEL_SHIFT 4
+#define MT6359P_VM_MODE_ADDR MT6359P_TOP_TRAP
+#define MT6359P_TMA_KEY_ADDR MT6359P_TOP_TMA_KEY
+
+#define TMA_KEY 0x9CA6
+
+#endif /* __MFD_MT6359P_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6360.h b/include/linux/mfd/mt6360.h
deleted file mode 100644
index ea1304035d4d..000000000000
--- a/include/linux/mfd/mt6360.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2020 MediaTek Inc.
- */
-
-#ifndef __MT6360_H__
-#define __MT6360_H__
-
-#include <linux/regmap.h>
-
-enum {
- MT6360_SLAVE_PMU = 0,
- MT6360_SLAVE_PMIC,
- MT6360_SLAVE_LDO,
- MT6360_SLAVE_TCPC,
- MT6360_SLAVE_MAX,
-};
-
-#define MT6360_PMU_SLAVEID (0x34)
-#define MT6360_PMIC_SLAVEID (0x1A)
-#define MT6360_LDO_SLAVEID (0x64)
-#define MT6360_TCPC_SLAVEID (0x4E)
-
-struct mt6360_pmu_data {
- struct i2c_client *i2c[MT6360_SLAVE_MAX];
- struct device *dev;
- struct regmap *regmap;
- struct regmap_irq_chip_data *irq_data;
- unsigned int chip_rev;
-};
-
-/* PMU register defininition */
-#define MT6360_PMU_DEV_INFO (0x00)
-#define MT6360_PMU_CORE_CTRL1 (0x01)
-#define MT6360_PMU_RST1 (0x02)
-#define MT6360_PMU_CRCEN (0x03)
-#define MT6360_PMU_RST_PAS_CODE1 (0x04)
-#define MT6360_PMU_RST_PAS_CODE2 (0x05)
-#define MT6360_PMU_CORE_CTRL2 (0x06)
-#define MT6360_PMU_TM_PAS_CODE1 (0x07)
-#define MT6360_PMU_TM_PAS_CODE2 (0x08)
-#define MT6360_PMU_TM_PAS_CODE3 (0x09)
-#define MT6360_PMU_TM_PAS_CODE4 (0x0A)
-#define MT6360_PMU_IRQ_IND (0x0B)
-#define MT6360_PMU_IRQ_MASK (0x0C)
-#define MT6360_PMU_IRQ_SET (0x0D)
-#define MT6360_PMU_SHDN_CTRL (0x0E)
-#define MT6360_PMU_TM_INF (0x0F)
-#define MT6360_PMU_I2C_CTRL (0x10)
-#define MT6360_PMU_CHG_CTRL1 (0x11)
-#define MT6360_PMU_CHG_CTRL2 (0x12)
-#define MT6360_PMU_CHG_CTRL3 (0x13)
-#define MT6360_PMU_CHG_CTRL4 (0x14)
-#define MT6360_PMU_CHG_CTRL5 (0x15)
-#define MT6360_PMU_CHG_CTRL6 (0x16)
-#define MT6360_PMU_CHG_CTRL7 (0x17)
-#define MT6360_PMU_CHG_CTRL8 (0x18)
-#define MT6360_PMU_CHG_CTRL9 (0x19)
-#define MT6360_PMU_CHG_CTRL10 (0x1A)
-#define MT6360_PMU_CHG_CTRL11 (0x1B)
-#define MT6360_PMU_CHG_CTRL12 (0x1C)
-#define MT6360_PMU_CHG_CTRL13 (0x1D)
-#define MT6360_PMU_CHG_CTRL14 (0x1E)
-#define MT6360_PMU_CHG_CTRL15 (0x1F)
-#define MT6360_PMU_CHG_CTRL16 (0x20)
-#define MT6360_PMU_CHG_AICC_RESULT (0x21)
-#define MT6360_PMU_DEVICE_TYPE (0x22)
-#define MT6360_PMU_QC_CONTROL1 (0x23)
-#define MT6360_PMU_QC_CONTROL2 (0x24)
-#define MT6360_PMU_QC30_CONTROL1 (0x25)
-#define MT6360_PMU_QC30_CONTROL2 (0x26)
-#define MT6360_PMU_USB_STATUS1 (0x27)
-#define MT6360_PMU_QC_STATUS1 (0x28)
-#define MT6360_PMU_QC_STATUS2 (0x29)
-#define MT6360_PMU_CHG_PUMP (0x2A)
-#define MT6360_PMU_CHG_CTRL17 (0x2B)
-#define MT6360_PMU_CHG_CTRL18 (0x2C)
-#define MT6360_PMU_CHRDET_CTRL1 (0x2D)
-#define MT6360_PMU_CHRDET_CTRL2 (0x2E)
-#define MT6360_PMU_DPDN_CTRL (0x2F)
-#define MT6360_PMU_CHG_HIDDEN_CTRL1 (0x30)
-#define MT6360_PMU_CHG_HIDDEN_CTRL2 (0x31)
-#define MT6360_PMU_CHG_HIDDEN_CTRL3 (0x32)
-#define MT6360_PMU_CHG_HIDDEN_CTRL4 (0x33)
-#define MT6360_PMU_CHG_HIDDEN_CTRL5 (0x34)
-#define MT6360_PMU_CHG_HIDDEN_CTRL6 (0x35)
-#define MT6360_PMU_CHG_HIDDEN_CTRL7 (0x36)
-#define MT6360_PMU_CHG_HIDDEN_CTRL8 (0x37)
-#define MT6360_PMU_CHG_HIDDEN_CTRL9 (0x38)
-#define MT6360_PMU_CHG_HIDDEN_CTRL10 (0x39)
-#define MT6360_PMU_CHG_HIDDEN_CTRL11 (0x3A)
-#define MT6360_PMU_CHG_HIDDEN_CTRL12 (0x3B)
-#define MT6360_PMU_CHG_HIDDEN_CTRL13 (0x3C)
-#define MT6360_PMU_CHG_HIDDEN_CTRL14 (0x3D)
-#define MT6360_PMU_CHG_HIDDEN_CTRL15 (0x3E)
-#define MT6360_PMU_CHG_HIDDEN_CTRL16 (0x3F)
-#define MT6360_PMU_CHG_HIDDEN_CTRL17 (0x40)
-#define MT6360_PMU_CHG_HIDDEN_CTRL18 (0x41)
-#define MT6360_PMU_CHG_HIDDEN_CTRL19 (0x42)
-#define MT6360_PMU_CHG_HIDDEN_CTRL20 (0x43)
-#define MT6360_PMU_CHG_HIDDEN_CTRL21 (0x44)
-#define MT6360_PMU_CHG_HIDDEN_CTRL22 (0x45)
-#define MT6360_PMU_CHG_HIDDEN_CTRL23 (0x46)
-#define MT6360_PMU_CHG_HIDDEN_CTRL24 (0x47)
-#define MT6360_PMU_CHG_HIDDEN_CTRL25 (0x48)
-#define MT6360_PMU_BC12_CTRL (0x49)
-#define MT6360_PMU_CHG_STAT (0x4A)
-#define MT6360_PMU_RESV1 (0x4B)
-#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEH (0x4E)
-#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEL (0x4F)
-#define MT6360_PMU_TYPEC_OTP_HYST_TH (0x50)
-#define MT6360_PMU_TYPEC_OTP_CTRL (0x51)
-#define MT6360_PMU_ADC_BAT_DATA_H (0x52)
-#define MT6360_PMU_ADC_BAT_DATA_L (0x53)
-#define MT6360_PMU_IMID_BACKBST_ON (0x54)
-#define MT6360_PMU_IMID_BACKBST_OFF (0x55)
-#define MT6360_PMU_ADC_CONFIG (0x56)
-#define MT6360_PMU_ADC_EN2 (0x57)
-#define MT6360_PMU_ADC_IDLE_T (0x58)
-#define MT6360_PMU_ADC_RPT_1 (0x5A)
-#define MT6360_PMU_ADC_RPT_2 (0x5B)
-#define MT6360_PMU_ADC_RPT_3 (0x5C)
-#define MT6360_PMU_ADC_RPT_ORG1 (0x5D)
-#define MT6360_PMU_ADC_RPT_ORG2 (0x5E)
-#define MT6360_PMU_BAT_OVP_TH_SEL_CODEH (0x5F)
-#define MT6360_PMU_BAT_OVP_TH_SEL_CODEL (0x60)
-#define MT6360_PMU_CHG_CTRL19 (0x61)
-#define MT6360_PMU_VDDASUPPLY (0x62)
-#define MT6360_PMU_BC12_MANUAL (0x63)
-#define MT6360_PMU_CHGDET_FUNC (0x64)
-#define MT6360_PMU_FOD_CTRL (0x65)
-#define MT6360_PMU_CHG_CTRL20 (0x66)
-#define MT6360_PMU_CHG_HIDDEN_CTRL26 (0x67)
-#define MT6360_PMU_CHG_HIDDEN_CTRL27 (0x68)
-#define MT6360_PMU_RESV2 (0x69)
-#define MT6360_PMU_USBID_CTRL1 (0x6D)
-#define MT6360_PMU_USBID_CTRL2 (0x6E)
-#define MT6360_PMU_USBID_CTRL3 (0x6F)
-#define MT6360_PMU_FLED_CFG (0x70)
-#define MT6360_PMU_RESV3 (0x71)
-#define MT6360_PMU_FLED1_CTRL (0x72)
-#define MT6360_PMU_FLED_STRB_CTRL (0x73)
-#define MT6360_PMU_FLED1_STRB_CTRL2 (0x74)
-#define MT6360_PMU_FLED1_TOR_CTRL (0x75)
-#define MT6360_PMU_FLED2_CTRL (0x76)
-#define MT6360_PMU_RESV4 (0x77)
-#define MT6360_PMU_FLED2_STRB_CTRL2 (0x78)
-#define MT6360_PMU_FLED2_TOR_CTRL (0x79)
-#define MT6360_PMU_FLED_VMIDTRK_CTRL1 (0x7A)
-#define MT6360_PMU_FLED_VMID_RTM (0x7B)
-#define MT6360_PMU_FLED_VMIDTRK_CTRL2 (0x7C)
-#define MT6360_PMU_FLED_PWSEL (0x7D)
-#define MT6360_PMU_FLED_EN (0x7E)
-#define MT6360_PMU_FLED_Hidden1 (0x7F)
-#define MT6360_PMU_RGB_EN (0x80)
-#define MT6360_PMU_RGB1_ISNK (0x81)
-#define MT6360_PMU_RGB2_ISNK (0x82)
-#define MT6360_PMU_RGB3_ISNK (0x83)
-#define MT6360_PMU_RGB_ML_ISNK (0x84)
-#define MT6360_PMU_RGB1_DIM (0x85)
-#define MT6360_PMU_RGB2_DIM (0x86)
-#define MT6360_PMU_RGB3_DIM (0x87)
-#define MT6360_PMU_RESV5 (0x88)
-#define MT6360_PMU_RGB12_Freq (0x89)
-#define MT6360_PMU_RGB34_Freq (0x8A)
-#define MT6360_PMU_RGB1_Tr (0x8B)
-#define MT6360_PMU_RGB1_Tf (0x8C)
-#define MT6360_PMU_RGB1_TON_TOFF (0x8D)
-#define MT6360_PMU_RGB2_Tr (0x8E)
-#define MT6360_PMU_RGB2_Tf (0x8F)
-#define MT6360_PMU_RGB2_TON_TOFF (0x90)
-#define MT6360_PMU_RGB3_Tr (0x91)
-#define MT6360_PMU_RGB3_Tf (0x92)
-#define MT6360_PMU_RGB3_TON_TOFF (0x93)
-#define MT6360_PMU_RGB_Hidden_CTRL1 (0x94)
-#define MT6360_PMU_RGB_Hidden_CTRL2 (0x95)
-#define MT6360_PMU_RESV6 (0x97)
-#define MT6360_PMU_SPARE1 (0x9A)
-#define MT6360_PMU_SPARE2 (0xA0)
-#define MT6360_PMU_SPARE3 (0xB0)
-#define MT6360_PMU_SPARE4 (0xC0)
-#define MT6360_PMU_CHG_IRQ1 (0xD0)
-#define MT6360_PMU_CHG_IRQ2 (0xD1)
-#define MT6360_PMU_CHG_IRQ3 (0xD2)
-#define MT6360_PMU_CHG_IRQ4 (0xD3)
-#define MT6360_PMU_CHG_IRQ5 (0xD4)
-#define MT6360_PMU_CHG_IRQ6 (0xD5)
-#define MT6360_PMU_QC_IRQ (0xD6)
-#define MT6360_PMU_FOD_IRQ (0xD7)
-#define MT6360_PMU_BASE_IRQ (0xD8)
-#define MT6360_PMU_FLED_IRQ1 (0xD9)
-#define MT6360_PMU_FLED_IRQ2 (0xDA)
-#define MT6360_PMU_RGB_IRQ (0xDB)
-#define MT6360_PMU_BUCK1_IRQ (0xDC)
-#define MT6360_PMU_BUCK2_IRQ (0xDD)
-#define MT6360_PMU_LDO_IRQ1 (0xDE)
-#define MT6360_PMU_LDO_IRQ2 (0xDF)
-#define MT6360_PMU_CHG_STAT1 (0xE0)
-#define MT6360_PMU_CHG_STAT2 (0xE1)
-#define MT6360_PMU_CHG_STAT3 (0xE2)
-#define MT6360_PMU_CHG_STAT4 (0xE3)
-#define MT6360_PMU_CHG_STAT5 (0xE4)
-#define MT6360_PMU_CHG_STAT6 (0xE5)
-#define MT6360_PMU_QC_STAT (0xE6)
-#define MT6360_PMU_FOD_STAT (0xE7)
-#define MT6360_PMU_BASE_STAT (0xE8)
-#define MT6360_PMU_FLED_STAT1 (0xE9)
-#define MT6360_PMU_FLED_STAT2 (0xEA)
-#define MT6360_PMU_RGB_STAT (0xEB)
-#define MT6360_PMU_BUCK1_STAT (0xEC)
-#define MT6360_PMU_BUCK2_STAT (0xED)
-#define MT6360_PMU_LDO_STAT1 (0xEE)
-#define MT6360_PMU_LDO_STAT2 (0xEF)
-#define MT6360_PMU_CHG_MASK1 (0xF0)
-#define MT6360_PMU_CHG_MASK2 (0xF1)
-#define MT6360_PMU_CHG_MASK3 (0xF2)
-#define MT6360_PMU_CHG_MASK4 (0xF3)
-#define MT6360_PMU_CHG_MASK5 (0xF4)
-#define MT6360_PMU_CHG_MASK6 (0xF5)
-#define MT6360_PMU_QC_MASK (0xF6)
-#define MT6360_PMU_FOD_MASK (0xF7)
-#define MT6360_PMU_BASE_MASK (0xF8)
-#define MT6360_PMU_FLED_MASK1 (0xF9)
-#define MT6360_PMU_FLED_MASK2 (0xFA)
-#define MT6360_PMU_FAULTB_MASK (0xFB)
-#define MT6360_PMU_BUCK1_MASK (0xFC)
-#define MT6360_PMU_BUCK2_MASK (0xFD)
-#define MT6360_PMU_LDO_MASK1 (0xFE)
-#define MT6360_PMU_LDO_MASK2 (0xFF)
-#define MT6360_PMU_MAXREG (MT6360_PMU_LDO_MASK2)
-
-/* MT6360_PMU_IRQ_SET */
-#define MT6360_PMU_IRQ_REGNUM (MT6360_PMU_LDO_IRQ2 - MT6360_PMU_CHG_IRQ1 + 1)
-#define MT6360_IRQ_RETRIG BIT(2)
-
-#define CHIP_VEN_MASK (0xF0)
-#define CHIP_VEN_MT6360 (0x50)
-#define CHIP_REV_MASK (0x0F)
-
-#endif /* __MT6360_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 949268581b36..627487e26287 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -12,7 +12,12 @@
enum chip_id {
MT6323_CHIP_ID = 0x23,
+ MT6331_CHIP_ID = 0x20,
+ MT6332_CHIP_ID = 0x20,
+ MT6357_CHIP_ID = 0x57,
MT6358_CHIP_ID = 0x58,
+ MT6359_CHIP_ID = 0x59,
+ MT6366_CHIP_ID = 0x66,
MT6391_CHIP_ID = 0x91,
MT6397_CHIP_ID = 0x97,
};
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index 66989a16221a..068ae1c0f0e8 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -36,6 +36,7 @@
#define RTC_AL_MASK_DOW BIT(4)
#define RTC_TC_SEC 0x000a
+#define RTC_TC_MTH_MASK 0x000f
/* Min, Hour, Dom... register offset to RTC_TC_SEC */
#define RTC_OFFSET_SEC 0
#define RTC_OFFSET_MIN 1
@@ -72,7 +73,6 @@ struct mtk_rtc_data {
};
struct mt6397_rtc {
- struct device *dev;
struct rtc_device *rtc_dev;
/* Protect register access from multiple tasks */
diff --git a/include/linux/mfd/ntxec.h b/include/linux/mfd/ntxec.h
new file mode 100644
index 000000000000..e5880c346da9
--- /dev/null
+++ b/include/linux/mfd/ntxec.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Jonathan Neuschäfer
+ *
+ * Register access and version information for the Netronix embedded
+ * controller.
+ */
+
+#ifndef NTXEC_H
+#define NTXEC_H
+
+#include <linux/types.h>
+
+struct device;
+struct regmap;
+
+struct ntxec {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+/*
+ * Some registers, such as the battery status register (0x41), are in
+ * big-endian, but others only have eight significant bits, which are in the
+ * first byte transmitted over I2C (the MSB of the big-endian value).
+ * This convenience function converts an 8-bit value to 16-bit for use in the
+ * second kind of register.
+ */
+static inline u16 ntxec_reg8(u8 value)
+{
+ return value << 8;
+}
+
+/* Known firmware versions */
+#define NTXEC_VERSION_KOBO_AURA 0xd726 /* found in Kobo Aura */
+#define NTXEC_VERSION_TOLINO_SHINE2 0xf110 /* found in Tolino Shine 2 HD */
+#define NTXEC_VERSION_TOLINO_VISION 0xe135 /* found in Tolino Vision, contains RTC, ADC, PWM, home pad */
+#endif
diff --git a/include/linux/mfd/ocelot.h b/include/linux/mfd/ocelot.h
new file mode 100644
index 000000000000..dd72073d2d4f
--- /dev/null
+++ b/include/linux/mfd/ocelot.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2022 Innovative Advantage Inc. */
+
+#ifndef _LINUX_MFD_OCELOT_H
+#define _LINUX_MFD_OCELOT_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct resource;
+
+static inline struct regmap *
+ocelot_regmap_from_resource_optional(struct platform_device *pdev,
+ unsigned int index,
+ const struct regmap_config *config)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *regs;
+
+ /*
+ * Don't use _get_and_ioremap_resource() here, since that will invoke
+ * prints of "invalid resource" which will simply add confusion.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (res) {
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+ return devm_regmap_init_mmio(dev, regs, config);
+ }
+
+ /*
+ * Fall back to using REG and getting the resource from the parent
+ * device, which is possible in an MFD configuration
+ */
+ if (dev->parent) {
+ res = platform_get_resource(pdev, IORESOURCE_REG, index);
+ if (!res)
+ return NULL;
+
+ return dev_get_regmap(dev->parent, res->name);
+ }
+
+ return NULL;
+}
+
+static inline struct regmap *
+ocelot_regmap_from_resource(struct platform_device *pdev, unsigned int index,
+ const struct regmap_config *config)
+{
+ struct regmap *map;
+
+ map = ocelot_regmap_from_resource_optional(pdev, index, config);
+ return map ?: ERR_PTR(-ENOENT);
+}
+
+#endif
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 1e61c7e9f50d..eda1ffd99c1a 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -16,7 +16,6 @@
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/extcon-provider.h>
-#include <linux/of_gpio.h>
#include <linux/usb/phy_companion.h>
#define PALMAS_NUM_CLIENTS 3
@@ -129,12 +128,6 @@ struct palmas_pmic_driver_data {
struct regulator_config config);
};
-struct palmas_adc_wakeup_property {
- int adc_channel_number;
- int adc_high_threshold;
- int adc_low_threshold;
-};
-
struct palmas_gpadc_platform_data {
/* Channel 3 current source is only enabled during conversion */
int ch3_current; /* 0: off; 1: 10uA; 2: 400uA; 3: 800 uA */
@@ -153,8 +146,6 @@ struct palmas_gpadc_platform_data {
int start_polarity;
int auto_conversion_period_ms;
- struct palmas_adc_wakeup_property *adc_wakeup1_data;
- struct palmas_adc_wakeup_property *adc_wakeup2_data;
};
struct palmas_reg_init {
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 3f752dc62a6c..539f27f8bd89 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -13,6 +13,7 @@
#include <linux/workqueue.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/pm.h>
#include <linux/power_supply.h>
#include <linux/mfd/pcf50633/backlight.h>
@@ -226,9 +227,6 @@ static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
void pcf50633_irq_free(struct pcf50633 *pcf);
-#ifdef CONFIG_PM
-int pcf50633_irq_suspend(struct pcf50633 *pcf);
-int pcf50633_irq_resume(struct pcf50633 *pcf);
-#endif
+extern const struct dev_pm_ops pcf50633_pm;
#endif
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index e07f6e61cd38..78e167a92483 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -289,6 +289,414 @@ enum rk805_reg {
#define RK805_INT_ALARM_EN (1 << 3)
#define RK805_INT_TIMER_EN (1 << 2)
+/* RK806 */
+#define RK806_POWER_EN0 0x0
+#define RK806_POWER_EN1 0x1
+#define RK806_POWER_EN2 0x2
+#define RK806_POWER_EN3 0x3
+#define RK806_POWER_EN4 0x4
+#define RK806_POWER_EN5 0x5
+#define RK806_POWER_SLP_EN0 0x6
+#define RK806_POWER_SLP_EN1 0x7
+#define RK806_POWER_SLP_EN2 0x8
+#define RK806_POWER_DISCHRG_EN0 0x9
+#define RK806_POWER_DISCHRG_EN1 0xA
+#define RK806_POWER_DISCHRG_EN2 0xB
+#define RK806_BUCK_FB_CONFIG 0xC
+#define RK806_SLP_LP_CONFIG 0xD
+#define RK806_POWER_FPWM_EN0 0xE
+#define RK806_POWER_FPWM_EN1 0xF
+#define RK806_BUCK1_CONFIG 0x10
+#define RK806_BUCK2_CONFIG 0x11
+#define RK806_BUCK3_CONFIG 0x12
+#define RK806_BUCK4_CONFIG 0x13
+#define RK806_BUCK5_CONFIG 0x14
+#define RK806_BUCK6_CONFIG 0x15
+#define RK806_BUCK7_CONFIG 0x16
+#define RK806_BUCK8_CONFIG 0x17
+#define RK806_BUCK9_CONFIG 0x18
+#define RK806_BUCK10_CONFIG 0x19
+#define RK806_BUCK1_ON_VSEL 0x1A
+#define RK806_BUCK2_ON_VSEL 0x1B
+#define RK806_BUCK3_ON_VSEL 0x1C
+#define RK806_BUCK4_ON_VSEL 0x1D
+#define RK806_BUCK5_ON_VSEL 0x1E
+#define RK806_BUCK6_ON_VSEL 0x1F
+#define RK806_BUCK7_ON_VSEL 0x20
+#define RK806_BUCK8_ON_VSEL 0x21
+#define RK806_BUCK9_ON_VSEL 0x22
+#define RK806_BUCK10_ON_VSEL 0x23
+#define RK806_BUCK1_SLP_VSEL 0x24
+#define RK806_BUCK2_SLP_VSEL 0x25
+#define RK806_BUCK3_SLP_VSEL 0x26
+#define RK806_BUCK4_SLP_VSEL 0x27
+#define RK806_BUCK5_SLP_VSEL 0x28
+#define RK806_BUCK6_SLP_VSEL 0x29
+#define RK806_BUCK7_SLP_VSEL 0x2A
+#define RK806_BUCK8_SLP_VSEL 0x2B
+#define RK806_BUCK9_SLP_VSEL 0x2D
+#define RK806_BUCK10_SLP_VSEL 0x2E
+#define RK806_BUCK_DEBUG1 0x30
+#define RK806_BUCK_DEBUG2 0x31
+#define RK806_BUCK_DEBUG3 0x32
+#define RK806_BUCK_DEBUG4 0x33
+#define RK806_BUCK_DEBUG5 0x34
+#define RK806_BUCK_DEBUG6 0x35
+#define RK806_BUCK_DEBUG7 0x36
+#define RK806_BUCK_DEBUG8 0x37
+#define RK806_BUCK_DEBUG9 0x38
+#define RK806_BUCK_DEBUG10 0x39
+#define RK806_BUCK_DEBUG11 0x3A
+#define RK806_BUCK_DEBUG12 0x3B
+#define RK806_BUCK_DEBUG13 0x3C
+#define RK806_BUCK_DEBUG14 0x3D
+#define RK806_BUCK_DEBUG15 0x3E
+#define RK806_BUCK_DEBUG16 0x3F
+#define RK806_BUCK_DEBUG17 0x40
+#define RK806_BUCK_DEBUG18 0x41
+#define RK806_NLDO_IMAX 0x42
+#define RK806_NLDO1_ON_VSEL 0x43
+#define RK806_NLDO2_ON_VSEL 0x44
+#define RK806_NLDO3_ON_VSEL 0x45
+#define RK806_NLDO4_ON_VSEL 0x46
+#define RK806_NLDO5_ON_VSEL 0x47
+#define RK806_NLDO1_SLP_VSEL 0x48
+#define RK806_NLDO2_SLP_VSEL 0x49
+#define RK806_NLDO3_SLP_VSEL 0x4A
+#define RK806_NLDO4_SLP_VSEL 0x4B
+#define RK806_NLDO5_SLP_VSEL 0x4C
+#define RK806_PLDO_IMAX 0x4D
+#define RK806_PLDO1_ON_VSEL 0x4E
+#define RK806_PLDO2_ON_VSEL 0x4F
+#define RK806_PLDO3_ON_VSEL 0x50
+#define RK806_PLDO4_ON_VSEL 0x51
+#define RK806_PLDO5_ON_VSEL 0x52
+#define RK806_PLDO6_ON_VSEL 0x53
+#define RK806_PLDO1_SLP_VSEL 0x54
+#define RK806_PLDO2_SLP_VSEL 0x55
+#define RK806_PLDO3_SLP_VSEL 0x56
+#define RK806_PLDO4_SLP_VSEL 0x57
+#define RK806_PLDO5_SLP_VSEL 0x58
+#define RK806_PLDO6_SLP_VSEL 0x59
+#define RK806_CHIP_NAME 0x5A
+#define RK806_CHIP_VER 0x5B
+#define RK806_OTP_VER 0x5C
+#define RK806_SYS_STS 0x5D
+#define RK806_SYS_CFG0 0x5E
+#define RK806_SYS_CFG1 0x5F
+#define RK806_SYS_OPTION 0x61
+#define RK806_SLEEP_CONFIG0 0x62
+#define RK806_SLEEP_CONFIG1 0x63
+#define RK806_SLEEP_CTR_SEL0 0x64
+#define RK806_SLEEP_CTR_SEL1 0x65
+#define RK806_SLEEP_CTR_SEL2 0x66
+#define RK806_SLEEP_CTR_SEL3 0x67
+#define RK806_SLEEP_CTR_SEL4 0x68
+#define RK806_SLEEP_CTR_SEL5 0x69
+#define RK806_DVS_CTRL_SEL0 0x6A
+#define RK806_DVS_CTRL_SEL1 0x6B
+#define RK806_DVS_CTRL_SEL2 0x6C
+#define RK806_DVS_CTRL_SEL3 0x6D
+#define RK806_DVS_CTRL_SEL4 0x6E
+#define RK806_DVS_CTRL_SEL5 0x6F
+#define RK806_DVS_START_CTRL 0x70
+#define RK806_SLEEP_GPIO 0x71
+#define RK806_SYS_CFG3 0x72
+#define RK806_ON_SOURCE 0x74
+#define RK806_OFF_SOURCE 0x75
+#define RK806_PWRON_KEY 0x76
+#define RK806_INT_STS0 0x77
+#define RK806_INT_MSK0 0x78
+#define RK806_INT_STS1 0x79
+#define RK806_INT_MSK1 0x7A
+#define RK806_GPIO_INT_CONFIG 0x7B
+#define RK806_DATA_REG0 0x7C
+#define RK806_DATA_REG1 0x7D
+#define RK806_DATA_REG2 0x7E
+#define RK806_DATA_REG3 0x7F
+#define RK806_DATA_REG4 0x80
+#define RK806_DATA_REG5 0x81
+#define RK806_DATA_REG6 0x82
+#define RK806_DATA_REG7 0x83
+#define RK806_DATA_REG8 0x84
+#define RK806_DATA_REG9 0x85
+#define RK806_DATA_REG10 0x86
+#define RK806_DATA_REG11 0x87
+#define RK806_DATA_REG12 0x88
+#define RK806_DATA_REG13 0x89
+#define RK806_DATA_REG14 0x8A
+#define RK806_DATA_REG15 0x8B
+#define RK806_TM_REG 0x8C
+#define RK806_OTP_EN_REG 0x8D
+#define RK806_FUNC_OTP_EN_REG 0x8E
+#define RK806_TEST_REG1 0x8F
+#define RK806_TEST_REG2 0x90
+#define RK806_TEST_REG3 0x91
+#define RK806_TEST_REG4 0x92
+#define RK806_TEST_REG5 0x93
+#define RK806_BUCK_VSEL_OTP_REG0 0x94
+#define RK806_BUCK_VSEL_OTP_REG1 0x95
+#define RK806_BUCK_VSEL_OTP_REG2 0x96
+#define RK806_BUCK_VSEL_OTP_REG3 0x97
+#define RK806_BUCK_VSEL_OTP_REG4 0x98
+#define RK806_BUCK_VSEL_OTP_REG5 0x99
+#define RK806_BUCK_VSEL_OTP_REG6 0x9A
+#define RK806_BUCK_VSEL_OTP_REG7 0x9B
+#define RK806_BUCK_VSEL_OTP_REG8 0x9C
+#define RK806_BUCK_VSEL_OTP_REG9 0x9D
+#define RK806_NLDO1_VSEL_OTP_REG0 0x9E
+#define RK806_NLDO1_VSEL_OTP_REG1 0x9F
+#define RK806_NLDO1_VSEL_OTP_REG2 0xA0
+#define RK806_NLDO1_VSEL_OTP_REG3 0xA1
+#define RK806_NLDO1_VSEL_OTP_REG4 0xA2
+#define RK806_PLDO_VSEL_OTP_REG0 0xA3
+#define RK806_PLDO_VSEL_OTP_REG1 0xA4
+#define RK806_PLDO_VSEL_OTP_REG2 0xA5
+#define RK806_PLDO_VSEL_OTP_REG3 0xA6
+#define RK806_PLDO_VSEL_OTP_REG4 0xA7
+#define RK806_PLDO_VSEL_OTP_REG5 0xA8
+#define RK806_BUCK_EN_OTP_REG1 0xA9
+#define RK806_NLDO_EN_OTP_REG1 0xAA
+#define RK806_PLDO_EN_OTP_REG1 0xAB
+#define RK806_BUCK_FB_RES_OTP_REG1 0xAC
+#define RK806_OTP_RESEV_REG0 0xAD
+#define RK806_OTP_RESEV_REG1 0xAE
+#define RK806_OTP_RESEV_REG2 0xAF
+#define RK806_OTP_RESEV_REG3 0xB0
+#define RK806_OTP_RESEV_REG4 0xB1
+#define RK806_BUCK_SEQ_REG0 0xB2
+#define RK806_BUCK_SEQ_REG1 0xB3
+#define RK806_BUCK_SEQ_REG2 0xB4
+#define RK806_BUCK_SEQ_REG3 0xB5
+#define RK806_BUCK_SEQ_REG4 0xB6
+#define RK806_BUCK_SEQ_REG5 0xB7
+#define RK806_BUCK_SEQ_REG6 0xB8
+#define RK806_BUCK_SEQ_REG7 0xB9
+#define RK806_BUCK_SEQ_REG8 0xBA
+#define RK806_BUCK_SEQ_REG9 0xBB
+#define RK806_BUCK_SEQ_REG10 0xBC
+#define RK806_BUCK_SEQ_REG11 0xBD
+#define RK806_BUCK_SEQ_REG12 0xBE
+#define RK806_BUCK_SEQ_REG13 0xBF
+#define RK806_BUCK_SEQ_REG14 0xC0
+#define RK806_BUCK_SEQ_REG15 0xC1
+#define RK806_BUCK_SEQ_REG16 0xC2
+#define RK806_BUCK_SEQ_REG17 0xC3
+#define RK806_HK_TRIM_REG1 0xC4
+#define RK806_HK_TRIM_REG2 0xC5
+#define RK806_BUCK_REF_TRIM_REG1 0xC6
+#define RK806_BUCK_REF_TRIM_REG2 0xC7
+#define RK806_BUCK_REF_TRIM_REG3 0xC8
+#define RK806_BUCK_REF_TRIM_REG4 0xC9
+#define RK806_BUCK_REF_TRIM_REG5 0xCA
+#define RK806_BUCK_OSC_TRIM_REG1 0xCB
+#define RK806_BUCK_OSC_TRIM_REG2 0xCC
+#define RK806_BUCK_OSC_TRIM_REG3 0xCD
+#define RK806_BUCK_OSC_TRIM_REG4 0xCE
+#define RK806_BUCK_OSC_TRIM_REG5 0xCF
+#define RK806_BUCK_TRIM_ZCDIOS_REG1 0xD0
+#define RK806_BUCK_TRIM_ZCDIOS_REG2 0xD1
+#define RK806_NLDO_TRIM_REG1 0xD2
+#define RK806_NLDO_TRIM_REG2 0xD3
+#define RK806_NLDO_TRIM_REG3 0xD4
+#define RK806_PLDO_TRIM_REG1 0xD5
+#define RK806_PLDO_TRIM_REG2 0xD6
+#define RK806_PLDO_TRIM_REG3 0xD7
+#define RK806_TRIM_ICOMP_REG1 0xD8
+#define RK806_TRIM_ICOMP_REG2 0xD9
+#define RK806_EFUSE_CONTROL_REGH 0xDA
+#define RK806_FUSE_PROG_REG 0xDB
+#define RK806_MAIN_FSM_STS_REG 0xDD
+#define RK806_FSM_REG 0xDE
+#define RK806_TOP_RESEV_OFFR 0xEC
+#define RK806_TOP_RESEV_POR 0xED
+#define RK806_BUCK_VRSN_REG1 0xEE
+#define RK806_BUCK_VRSN_REG2 0xEF
+#define RK806_NLDO_RLOAD_SEL_REG1 0xF0
+#define RK806_PLDO_RLOAD_SEL_REG1 0xF1
+#define RK806_PLDO_RLOAD_SEL_REG2 0xF2
+#define RK806_BUCK_CMIN_MX_REG1 0xF3
+#define RK806_BUCK_CMIN_MX_REG2 0xF4
+#define RK806_BUCK_FREQ_SET_REG1 0xF5
+#define RK806_BUCK_FREQ_SET_REG2 0xF6
+#define RK806_BUCK_RS_MEABS_REG1 0xF7
+#define RK806_BUCK_RS_MEABS_REG2 0xF8
+#define RK806_BUCK_RS_ZDLEB_REG1 0xF9
+#define RK806_BUCK_RS_ZDLEB_REG2 0xFA
+#define RK806_BUCK_RSERVE_REG1 0xFB
+#define RK806_BUCK_RSERVE_REG2 0xFC
+#define RK806_BUCK_RSERVE_REG3 0xFD
+#define RK806_BUCK_RSERVE_REG4 0xFE
+#define RK806_BUCK_RSERVE_REG5 0xFF
+
+/* INT_STS Register field definitions */
+#define RK806_INT_STS_PWRON_FALL BIT(0)
+#define RK806_INT_STS_PWRON_RISE BIT(1)
+#define RK806_INT_STS_PWRON BIT(2)
+#define RK806_INT_STS_PWRON_LP BIT(3)
+#define RK806_INT_STS_HOTDIE BIT(4)
+#define RK806_INT_STS_VDC_RISE BIT(5)
+#define RK806_INT_STS_VDC_FALL BIT(6)
+#define RK806_INT_STS_VB_LO BIT(7)
+#define RK806_INT_STS_REV0 BIT(0)
+#define RK806_INT_STS_REV1 BIT(1)
+#define RK806_INT_STS_REV2 BIT(2)
+#define RK806_INT_STS_CRC_ERROR BIT(3)
+#define RK806_INT_STS_SLP3_GPIO BIT(4)
+#define RK806_INT_STS_SLP2_GPIO BIT(5)
+#define RK806_INT_STS_SLP1_GPIO BIT(6)
+#define RK806_INT_STS_WDT BIT(7)
+
+/* SPI command */
+#define RK806_CMD_READ 0
+#define RK806_CMD_WRITE BIT(7)
+#define RK806_CMD_CRC_EN BIT(6)
+#define RK806_CMD_CRC_DIS 0
+#define RK806_CMD_LEN_MSK 0x0f
+#define RK806_REG_H 0x00
+
+#define VERSION_AB 0x01
+
+enum rk806_reg_id {
+ RK806_ID_DCDC1 = 0,
+ RK806_ID_DCDC2,
+ RK806_ID_DCDC3,
+ RK806_ID_DCDC4,
+ RK806_ID_DCDC5,
+ RK806_ID_DCDC6,
+ RK806_ID_DCDC7,
+ RK806_ID_DCDC8,
+ RK806_ID_DCDC9,
+ RK806_ID_DCDC10,
+
+ RK806_ID_NLDO1,
+ RK806_ID_NLDO2,
+ RK806_ID_NLDO3,
+ RK806_ID_NLDO4,
+ RK806_ID_NLDO5,
+
+ RK806_ID_PLDO1,
+ RK806_ID_PLDO2,
+ RK806_ID_PLDO3,
+ RK806_ID_PLDO4,
+ RK806_ID_PLDO5,
+ RK806_ID_PLDO6,
+ RK806_ID_END,
+};
+
+/* Define the RK806 IRQ numbers */
+enum rk806_irqs {
+ /* INT_STS0 registers */
+ RK806_IRQ_PWRON_FALL,
+ RK806_IRQ_PWRON_RISE,
+ RK806_IRQ_PWRON,
+ RK806_IRQ_PWRON_LP,
+ RK806_IRQ_HOTDIE,
+ RK806_IRQ_VDC_RISE,
+ RK806_IRQ_VDC_FALL,
+ RK806_IRQ_VB_LO,
+
+ /* INT_STS0 registers */
+ RK806_IRQ_REV0,
+ RK806_IRQ_REV1,
+ RK806_IRQ_REV2,
+ RK806_IRQ_CRC_ERROR,
+ RK806_IRQ_SLP3_GPIO,
+ RK806_IRQ_SLP2_GPIO,
+ RK806_IRQ_SLP1_GPIO,
+ RK806_IRQ_WDT,
+};
+
+/* VCC1 Low Voltage Threshold */
+enum rk806_lv_sel {
+ VB_LO_SEL_2800,
+ VB_LO_SEL_2900,
+ VB_LO_SEL_3000,
+ VB_LO_SEL_3100,
+ VB_LO_SEL_3200,
+ VB_LO_SEL_3300,
+ VB_LO_SEL_3400,
+ VB_LO_SEL_3500,
+};
+
+/* System Shutdown Voltage Select */
+enum rk806_uv_sel {
+ VB_UV_SEL_2700,
+ VB_UV_SEL_2800,
+ VB_UV_SEL_2900,
+ VB_UV_SEL_3000,
+ VB_UV_SEL_3100,
+ VB_UV_SEL_3200,
+ VB_UV_SEL_3300,
+ VB_UV_SEL_3400,
+};
+
+/* Pin Function */
+enum rk806_pwrctrl_fun {
+ PWRCTRL_NULL_FUN,
+ PWRCTRL_SLP_FUN,
+ PWRCTRL_POWOFF_FUN,
+ PWRCTRL_RST_FUN,
+ PWRCTRL_DVS_FUN,
+ PWRCTRL_GPIO_FUN,
+};
+
+/* Pin Polarity */
+enum rk806_pin_level {
+ POL_LOW,
+ POL_HIGH,
+};
+
+enum rk806_vsel_ctr_sel {
+ CTR_BY_NO_EFFECT,
+ CTR_BY_PWRCTRL1,
+ CTR_BY_PWRCTRL2,
+ CTR_BY_PWRCTRL3,
+};
+
+enum rk806_dvs_ctr_sel {
+ CTR_SEL_NO_EFFECT,
+ CTR_SEL_DVS_START1,
+ CTR_SEL_DVS_START2,
+ CTR_SEL_DVS_START3,
+};
+
+enum rk806_pin_dr_sel {
+ RK806_PIN_INPUT,
+ RK806_PIN_OUTPUT,
+};
+
+#define RK806_INT_POL_MSK BIT(1)
+#define RK806_INT_POL_H BIT(1)
+#define RK806_INT_POL_L 0
+
+#define RK806_SLAVE_RESTART_FUN_MSK BIT(1)
+#define RK806_SLAVE_RESTART_FUN_EN BIT(1)
+#define RK806_SLAVE_RESTART_FUN_OFF 0
+
+#define RK806_SYS_ENB2_2M_MSK BIT(1)
+#define RK806_SYS_ENB2_2M_EN BIT(1)
+#define RK806_SYS_ENB2_2M_OFF 0
+
+enum rk806_int_fun {
+ RK806_INT_ONLY,
+ RK806_INT_ADN_WKUP,
+};
+
+enum rk806_dvs_mode {
+ RK806_DVS_NOT_SUPPORT,
+ RK806_DVS_START1,
+ RK806_DVS_START2,
+ RK806_DVS_START3,
+ RK806_DVS_PWRCTRL1,
+ RK806_DVS_PWRCTRL2,
+ RK806_DVS_PWRCTRL3,
+ RK806_DVS_START_PWRCTR1,
+ RK806_DVS_START_PWRCTR2,
+ RK806_DVS_START_PWRCTR3,
+ RK806_DVS_END,
+};
+
/* RK808 IRQ Definitions */
#define RK808_IRQ_VOUT_LO 0
#define RK808_IRQ_VB_LO 1
@@ -373,6 +781,7 @@ enum rk805_reg {
#define SWITCH2_EN BIT(6)
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
+#define DEV_RST BIT(2)
#define DEV_OFF BIT(0)
#define RTC_STOP BIT(0)
@@ -437,6 +846,158 @@ enum rk809_reg_id {
#define RK817_RTC_COMP_LSB_REG 0x10
#define RK817_RTC_COMP_MSB_REG 0x11
+/* RK817 Codec Registers */
+#define RK817_CODEC_DTOP_VUCTL 0x12
+#define RK817_CODEC_DTOP_VUCTIME 0x13
+#define RK817_CODEC_DTOP_LPT_SRST 0x14
+#define RK817_CODEC_DTOP_DIGEN_CLKE 0x15
+#define RK817_CODEC_AREF_RTCFG0 0x16
+#define RK817_CODEC_AREF_RTCFG1 0x17
+#define RK817_CODEC_AADC_CFG0 0x18
+#define RK817_CODEC_AADC_CFG1 0x19
+#define RK817_CODEC_DADC_VOLL 0x1a
+#define RK817_CODEC_DADC_VOLR 0x1b
+#define RK817_CODEC_DADC_SR_ACL0 0x1e
+#define RK817_CODEC_DADC_ALC1 0x1f
+#define RK817_CODEC_DADC_ALC2 0x20
+#define RK817_CODEC_DADC_NG 0x21
+#define RK817_CODEC_DADC_HPF 0x22
+#define RK817_CODEC_DADC_RVOLL 0x23
+#define RK817_CODEC_DADC_RVOLR 0x24
+#define RK817_CODEC_AMIC_CFG0 0x27
+#define RK817_CODEC_AMIC_CFG1 0x28
+#define RK817_CODEC_DMIC_PGA_GAIN 0x29
+#define RK817_CODEC_DMIC_LMT1 0x2a
+#define RK817_CODEC_DMIC_LMT2 0x2b
+#define RK817_CODEC_DMIC_NG1 0x2c
+#define RK817_CODEC_DMIC_NG2 0x2d
+#define RK817_CODEC_ADAC_CFG0 0x2e
+#define RK817_CODEC_ADAC_CFG1 0x2f
+#define RK817_CODEC_DDAC_POPD_DACST 0x30
+#define RK817_CODEC_DDAC_VOLL 0x31
+#define RK817_CODEC_DDAC_VOLR 0x32
+#define RK817_CODEC_DDAC_SR_LMT0 0x35
+#define RK817_CODEC_DDAC_LMT1 0x36
+#define RK817_CODEC_DDAC_LMT2 0x37
+#define RK817_CODEC_DDAC_MUTE_MIXCTL 0x38
+#define RK817_CODEC_DDAC_RVOLL 0x39
+#define RK817_CODEC_DDAC_RVOLR 0x3a
+#define RK817_CODEC_AHP_ANTI0 0x3b
+#define RK817_CODEC_AHP_ANTI1 0x3c
+#define RK817_CODEC_AHP_CFG0 0x3d
+#define RK817_CODEC_AHP_CFG1 0x3e
+#define RK817_CODEC_AHP_CP 0x3f
+#define RK817_CODEC_ACLASSD_CFG1 0x40
+#define RK817_CODEC_ACLASSD_CFG2 0x41
+#define RK817_CODEC_APLL_CFG0 0x42
+#define RK817_CODEC_APLL_CFG1 0x43
+#define RK817_CODEC_APLL_CFG2 0x44
+#define RK817_CODEC_APLL_CFG3 0x45
+#define RK817_CODEC_APLL_CFG4 0x46
+#define RK817_CODEC_APLL_CFG5 0x47
+#define RK817_CODEC_DI2S_CKM 0x48
+#define RK817_CODEC_DI2S_RSD 0x49
+#define RK817_CODEC_DI2S_RXCR1 0x4a
+#define RK817_CODEC_DI2S_RXCR2 0x4b
+#define RK817_CODEC_DI2S_RXCMD_TSD 0x4c
+#define RK817_CODEC_DI2S_TXCR1 0x4d
+#define RK817_CODEC_DI2S_TXCR2 0x4e
+#define RK817_CODEC_DI2S_TXCR3_TXCMD 0x4f
+
+/* RK817_CODEC_DI2S_CKM */
+#define RK817_I2S_MODE_MASK (0x1 << 0)
+#define RK817_I2S_MODE_MST (0x1 << 0)
+#define RK817_I2S_MODE_SLV (0x0 << 0)
+
+/* RK817_CODEC_DDAC_MUTE_MIXCTL */
+#define DACMT_MASK (0x1 << 0)
+#define DACMT_ENABLE (0x1 << 0)
+#define DACMT_DISABLE (0x0 << 0)
+
+/* RK817_CODEC_DI2S_RXCR2 */
+#define VDW_RX_24BITS (0x17)
+#define VDW_RX_16BITS (0x0f)
+
+/* RK817_CODEC_DI2S_TXCR2 */
+#define VDW_TX_24BITS (0x17)
+#define VDW_TX_16BITS (0x0f)
+
+/* RK817_CODEC_AMIC_CFG0 */
+#define MIC_DIFF_MASK (0x1 << 7)
+#define MIC_DIFF_DIS (0x0 << 7)
+#define MIC_DIFF_EN (0x1 << 7)
+
+/* RK817 Battery Registers */
+#define RK817_GAS_GAUGE_ADC_CONFIG0 0x50
+#define RK817_GG_EN (0x1 << 7)
+#define RK817_SYS_VOL_ADC_EN (0x1 << 6)
+#define RK817_TS_ADC_EN (0x1 << 5)
+#define RK817_USB_VOL_ADC_EN (0x1 << 4)
+#define RK817_BAT_VOL_ADC_EN (0x1 << 3)
+#define RK817_BAT_CUR_ADC_EN (0x1 << 2)
+
+#define RK817_GAS_GAUGE_ADC_CONFIG1 0x55
+
+#define RK817_VOL_CUR_CALIB_UPD BIT(7)
+
+#define RK817_GAS_GAUGE_GG_CON 0x56
+#define RK817_GAS_GAUGE_GG_STS 0x57
+
+#define RK817_BAT_CON (0x1 << 4)
+#define RK817_RELAX_VOL_UPD (0x3 << 2)
+#define RK817_RELAX_STS (0x1 << 1)
+
+#define RK817_GAS_GAUGE_RELAX_THRE_H 0x58
+#define RK817_GAS_GAUGE_RELAX_THRE_L 0x59
+#define RK817_GAS_GAUGE_OCV_THRE_VOL 0x62
+#define RK817_GAS_GAUGE_OCV_VOL_H 0x63
+#define RK817_GAS_GAUGE_OCV_VOL_L 0x64
+#define RK817_GAS_GAUGE_PWRON_VOL_H 0x6b
+#define RK817_GAS_GAUGE_PWRON_VOL_L 0x6c
+#define RK817_GAS_GAUGE_PWRON_CUR_H 0x6d
+#define RK817_GAS_GAUGE_PWRON_CUR_L 0x6e
+#define RK817_GAS_GAUGE_OFF_CNT 0x6f
+#define RK817_GAS_GAUGE_Q_INIT_H3 0x70
+#define RK817_GAS_GAUGE_Q_INIT_H2 0x71
+#define RK817_GAS_GAUGE_Q_INIT_L1 0x72
+#define RK817_GAS_GAUGE_Q_INIT_L0 0x73
+#define RK817_GAS_GAUGE_Q_PRES_H3 0x74
+#define RK817_GAS_GAUGE_Q_PRES_H2 0x75
+#define RK817_GAS_GAUGE_Q_PRES_L1 0x76
+#define RK817_GAS_GAUGE_Q_PRES_L0 0x77
+#define RK817_GAS_GAUGE_BAT_VOL_H 0x78
+#define RK817_GAS_GAUGE_BAT_VOL_L 0x79
+#define RK817_GAS_GAUGE_BAT_CUR_H 0x7a
+#define RK817_GAS_GAUGE_BAT_CUR_L 0x7b
+#define RK817_GAS_GAUGE_USB_VOL_H 0x7e
+#define RK817_GAS_GAUGE_USB_VOL_L 0x7f
+#define RK817_GAS_GAUGE_SYS_VOL_H 0x80
+#define RK817_GAS_GAUGE_SYS_VOL_L 0x81
+#define RK817_GAS_GAUGE_Q_MAX_H3 0x82
+#define RK817_GAS_GAUGE_Q_MAX_H2 0x83
+#define RK817_GAS_GAUGE_Q_MAX_L1 0x84
+#define RK817_GAS_GAUGE_Q_MAX_L0 0x85
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_H 0x8f
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_L 0x90
+#define RK817_GAS_GAUGE_CAL_OFFSET_H 0x91
+#define RK817_GAS_GAUGE_CAL_OFFSET_L 0x92
+#define RK817_GAS_GAUGE_VCALIB0_H 0x93
+#define RK817_GAS_GAUGE_VCALIB0_L 0x94
+#define RK817_GAS_GAUGE_VCALIB1_H 0x95
+#define RK817_GAS_GAUGE_VCALIB1_L 0x96
+#define RK817_GAS_GAUGE_IOFFSET_H 0x97
+#define RK817_GAS_GAUGE_IOFFSET_L 0x98
+#define RK817_GAS_GAUGE_BAT_R1 0x9a
+#define RK817_GAS_GAUGE_BAT_R2 0x9b
+#define RK817_GAS_GAUGE_BAT_R3 0x9c
+#define RK817_GAS_GAUGE_DATA0 0x9d
+#define RK817_GAS_GAUGE_DATA1 0x9e
+#define RK817_GAS_GAUGE_DATA2 0x9f
+#define RK817_GAS_GAUGE_DATA3 0xa0
+#define RK817_GAS_GAUGE_DATA4 0xa1
+#define RK817_GAS_GAUGE_DATA5 0xa2
+#define RK817_GAS_GAUGE_CUR_ADC_K0 0xb0
+
#define RK817_POWER_EN_REG(i) (0xb1 + (i))
#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i))
@@ -462,10 +1023,30 @@ enum rk809_reg_id {
#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2)
#define RK817_BOOST_OTG_CFG (0xde)
+#define RK817_PMIC_CHRG_OUT 0xe4
+#define RK817_CHRG_VOL_SEL (0x07 << 4)
+#define RK817_CHRG_CUR_SEL (0x07 << 0)
+
+#define RK817_PMIC_CHRG_IN 0xe5
+#define RK817_USB_VLIM_EN (0x01 << 7)
+#define RK817_USB_VLIM_SEL (0x07 << 4)
+#define RK817_USB_ILIM_EN (0x01 << 3)
+#define RK817_USB_ILIM_SEL (0x07 << 0)
+#define RK817_PMIC_CHRG_TERM 0xe6
+#define RK817_CHRG_TERM_ANA_DIG (0x01 << 2)
+#define RK817_CHRG_TERM_ANA_SEL (0x03 << 0)
+#define RK817_CHRG_EN (0x01 << 6)
+
+#define RK817_PMIC_CHRG_STS 0xeb
+#define RK817_BAT_EXS BIT(7)
+#define RK817_CHG_STS (0x07 << 4)
+
#define RK817_ID_MSB 0xed
#define RK817_ID_LSB 0xee
#define RK817_SYS_STS 0xf0
+#define RK817_PLUG_IN_STS (0x1 << 6)
+
#define RK817_SYS_CFG(i) (0xf1 + (i))
#define RK817_ON_SOURCE_REG 0xf5
@@ -607,6 +1188,7 @@ enum {
enum {
RK805_ID = 0x8050,
+ RK806_ID = 0x8060,
RK808_ID = 0x0000,
RK809_ID = 0x8090,
RK817_ID = 0x8170,
@@ -614,11 +1196,17 @@ enum {
};
struct rk808 {
- struct i2c_client *i2c;
+ struct device *dev;
struct regmap_irq_chip_data *irq_data;
struct regmap *regmap;
long variant;
const struct regmap_config *regmap_cfg;
const struct regmap_irq_chip *regmap_irq_chip;
};
+
+void rk8xx_shutdown(struct device *dev);
+int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap *regmap);
+int rk8xx_suspend(struct device *dev);
+int rk8xx_resume(struct device *dev);
+
#endif /* __LINUX_REGULATOR_RK808_H */
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
index fba0df13d9a8..aacb6d51e99c 100644
--- a/include/linux/mfd/rn5t618.h
+++ b/include/linux/mfd/rn5t618.h
@@ -188,6 +188,7 @@
#define RN5T618_CHGOSCSCORESET3 0xd7
#define RN5T618_CHGOSCFREQSET1 0xd8
#define RN5T618_CHGOSCFREQSET2 0xd9
+#define RN5T618_GCHGDET 0xda
#define RN5T618_CONTROL 0xe0
#define RN5T618_SOC 0xe1
#define RN5T618_RE_CAP_H 0xe2
@@ -226,6 +227,15 @@
#define RN5T618_WATCHDOG_WDOGTIM_S 0
#define RN5T618_PWRIRQ_IR_WDOG BIT(6)
+#define RN5T618_POFFHIS_PWRON BIT(0)
+#define RN5T618_POFFHIS_TSHUT BIT(1)
+#define RN5T618_POFFHIS_VINDET BIT(2)
+#define RN5T618_POFFHIS_IODET BIT(3)
+#define RN5T618_POFFHIS_CPU BIT(4)
+#define RN5T618_POFFHIS_WDG BIT(5)
+#define RN5T618_POFFHIS_DCLIM BIT(6)
+#define RN5T618_POFFHIS_N_OE BIT(7)
+
enum {
RN5T618_DCDC1,
RN5T618_DCDC2,
diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
deleted file mode 100644
index a57af878fd0c..000000000000
--- a/include/linux/mfd/rohm-bd70528.h
+++ /dev/null
@@ -1,391 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Copyright (C) 2018 ROHM Semiconductors */
-
-#ifndef __LINUX_MFD_BD70528_H__
-#define __LINUX_MFD_BD70528_H__
-
-#include <linux/bits.h>
-#include <linux/device.h>
-#include <linux/mfd/rohm-generic.h>
-#include <linux/mfd/rohm-shared.h>
-#include <linux/regmap.h>
-
-enum {
- BD70528_BUCK1,
- BD70528_BUCK2,
- BD70528_BUCK3,
- BD70528_LDO1,
- BD70528_LDO2,
- BD70528_LDO3,
- BD70528_LED1,
- BD70528_LED2,
-};
-
-struct bd70528_data {
- struct rohm_regmap_dev chip;
- struct mutex rtc_timer_lock;
-};
-
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_LDO_VOLTS 0x20
-
-#define BD70528_REG_BUCK1_EN 0x0F
-#define BD70528_REG_BUCK1_VOLT 0x15
-#define BD70528_REG_BUCK2_EN 0x10
-#define BD70528_REG_BUCK2_VOLT 0x16
-#define BD70528_REG_BUCK3_EN 0x11
-#define BD70528_REG_BUCK3_VOLT 0x17
-#define BD70528_REG_LDO1_EN 0x1b
-#define BD70528_REG_LDO1_VOLT 0x1e
-#define BD70528_REG_LDO2_EN 0x1c
-#define BD70528_REG_LDO2_VOLT 0x1f
-#define BD70528_REG_LDO3_EN 0x1d
-#define BD70528_REG_LDO3_VOLT 0x20
-#define BD70528_REG_LED_CTRL 0x2b
-#define BD70528_REG_LED_VOLT 0x29
-#define BD70528_REG_LED_EN 0x2a
-
-/* main irq registers */
-#define BD70528_REG_INT_MAIN 0x7E
-#define BD70528_REG_INT_MAIN_MASK 0x74
-
-/* 'sub irq' registers */
-#define BD70528_REG_INT_SHDN 0x7F
-#define BD70528_REG_INT_PWR_FLT 0x80
-#define BD70528_REG_INT_VR_FLT 0x81
-#define BD70528_REG_INT_MISC 0x82
-#define BD70528_REG_INT_BAT1 0x83
-#define BD70528_REG_INT_BAT2 0x84
-#define BD70528_REG_INT_RTC 0x85
-#define BD70528_REG_INT_GPIO 0x86
-#define BD70528_REG_INT_OP_FAIL 0x87
-
-#define BD70528_REG_INT_SHDN_MASK 0x75
-#define BD70528_REG_INT_PWR_FLT_MASK 0x76
-#define BD70528_REG_INT_VR_FLT_MASK 0x77
-#define BD70528_REG_INT_MISC_MASK 0x78
-#define BD70528_REG_INT_BAT1_MASK 0x79
-#define BD70528_REG_INT_BAT2_MASK 0x7a
-#define BD70528_REG_INT_RTC_MASK 0x7b
-#define BD70528_REG_INT_GPIO_MASK 0x7c
-#define BD70528_REG_INT_OP_FAIL_MASK 0x7d
-
-/* Reset related 'magic' registers */
-#define BD70528_REG_SHIPMODE 0x03
-#define BD70528_REG_HWRESET 0x04
-#define BD70528_REG_WARMRESET 0x05
-#define BD70528_REG_STANDBY 0x06
-
-/* GPIO registers */
-#define BD70528_REG_GPIO_STATE 0x8F
-
-#define BD70528_REG_GPIO1_IN 0x4d
-#define BD70528_REG_GPIO2_IN 0x4f
-#define BD70528_REG_GPIO3_IN 0x51
-#define BD70528_REG_GPIO4_IN 0x53
-#define BD70528_REG_GPIO1_OUT 0x4e
-#define BD70528_REG_GPIO2_OUT 0x50
-#define BD70528_REG_GPIO3_OUT 0x52
-#define BD70528_REG_GPIO4_OUT 0x54
-
-/* RTC */
-
-#define BD70528_REG_RTC_COUNT_H 0x2d
-#define BD70528_REG_RTC_COUNT_L 0x2e
-#define BD70528_REG_RTC_SEC 0x2f
-#define BD70528_REG_RTC_MINUTE 0x30
-#define BD70528_REG_RTC_HOUR 0x31
-#define BD70528_REG_RTC_WEEK 0x32
-#define BD70528_REG_RTC_DAY 0x33
-#define BD70528_REG_RTC_MONTH 0x34
-#define BD70528_REG_RTC_YEAR 0x35
-
-#define BD70528_REG_RTC_ALM_SEC 0x36
-#define BD70528_REG_RTC_ALM_START BD70528_REG_RTC_ALM_SEC
-#define BD70528_REG_RTC_ALM_MINUTE 0x37
-#define BD70528_REG_RTC_ALM_HOUR 0x38
-#define BD70528_REG_RTC_ALM_WEEK 0x39
-#define BD70528_REG_RTC_ALM_DAY 0x3a
-#define BD70528_REG_RTC_ALM_MONTH 0x3b
-#define BD70528_REG_RTC_ALM_YEAR 0x3c
-#define BD70528_REG_RTC_ALM_MASK 0x3d
-#define BD70528_REG_RTC_ALM_REPEAT 0x3e
-#define BD70528_REG_RTC_START BD70528_REG_RTC_SEC
-
-#define BD70528_REG_RTC_WAKE_SEC 0x43
-#define BD70528_REG_RTC_WAKE_START BD70528_REG_RTC_WAKE_SEC
-#define BD70528_REG_RTC_WAKE_MIN 0x44
-#define BD70528_REG_RTC_WAKE_HOUR 0x45
-#define BD70528_REG_RTC_WAKE_CTRL 0x46
-
-#define BD70528_REG_ELAPSED_TIMER_EN 0x42
-#define BD70528_REG_WAKE_EN 0x46
-
-/* WDT registers */
-#define BD70528_REG_WDT_CTRL 0x4A
-#define BD70528_REG_WDT_HOUR 0x49
-#define BD70528_REG_WDT_MINUTE 0x48
-#define BD70528_REG_WDT_SEC 0x47
-
-/* Charger / Battery */
-#define BD70528_REG_CHG_CURR_STAT 0x59
-#define BD70528_REG_CHG_BAT_STAT 0x57
-#define BD70528_REG_CHG_BAT_TEMP 0x58
-#define BD70528_REG_CHG_IN_STAT 0x56
-#define BD70528_REG_CHG_DCIN_ILIM 0x5d
-#define BD70528_REG_CHG_CHG_CURR_WARM 0x61
-#define BD70528_REG_CHG_CHG_CURR_COLD 0x62
-
-/* Masks for main IRQ register bits */
-enum {
- BD70528_INT_SHDN,
-#define BD70528_INT_SHDN_MASK BIT(BD70528_INT_SHDN)
- BD70528_INT_PWR_FLT,
-#define BD70528_INT_PWR_FLT_MASK BIT(BD70528_INT_PWR_FLT)
- BD70528_INT_VR_FLT,
-#define BD70528_INT_VR_FLT_MASK BIT(BD70528_INT_VR_FLT)
- BD70528_INT_MISC,
-#define BD70528_INT_MISC_MASK BIT(BD70528_INT_MISC)
- BD70528_INT_BAT1,
-#define BD70528_INT_BAT1_MASK BIT(BD70528_INT_BAT1)
- BD70528_INT_RTC,
-#define BD70528_INT_RTC_MASK BIT(BD70528_INT_RTC)
- BD70528_INT_GPIO,
-#define BD70528_INT_GPIO_MASK BIT(BD70528_INT_GPIO)
- BD70528_INT_OP_FAIL,
-#define BD70528_INT_OP_FAIL_MASK BIT(BD70528_INT_OP_FAIL)
-};
-
-/* IRQs */
-enum {
- /* Shutdown register IRQs */
- BD70528_INT_LONGPUSH,
- BD70528_INT_WDT,
- BD70528_INT_HWRESET,
- BD70528_INT_RSTB_FAULT,
- BD70528_INT_VBAT_UVLO,
- BD70528_INT_TSD,
- BD70528_INT_RSTIN,
- /* Power failure register IRQs */
- BD70528_INT_BUCK1_FAULT,
- BD70528_INT_BUCK2_FAULT,
- BD70528_INT_BUCK3_FAULT,
- BD70528_INT_LDO1_FAULT,
- BD70528_INT_LDO2_FAULT,
- BD70528_INT_LDO3_FAULT,
- BD70528_INT_LED1_FAULT,
- BD70528_INT_LED2_FAULT,
- /* VR FAULT register IRQs */
- BD70528_INT_BUCK1_OCP,
- BD70528_INT_BUCK2_OCP,
- BD70528_INT_BUCK3_OCP,
- BD70528_INT_LED1_OCP,
- BD70528_INT_LED2_OCP,
- BD70528_INT_BUCK1_FULLON,
- BD70528_INT_BUCK2_FULLON,
- /* PMU register interrupts */
- BD70528_INT_SHORTPUSH,
- BD70528_INT_AUTO_WAKEUP,
- BD70528_INT_STATE_CHANGE,
- /* Charger 1 register IRQs */
- BD70528_INT_BAT_OV_RES,
- BD70528_INT_BAT_OV_DET,
- BD70528_INT_DBAT_DET,
- BD70528_INT_BATTSD_COLD_RES,
- BD70528_INT_BATTSD_COLD_DET,
- BD70528_INT_BATTSD_HOT_RES,
- BD70528_INT_BATTSD_HOT_DET,
- BD70528_INT_CHG_TSD,
- /* Charger 2 register IRQs */
- BD70528_INT_BAT_RMV,
- BD70528_INT_BAT_DET,
- BD70528_INT_DCIN2_OV_RES,
- BD70528_INT_DCIN2_OV_DET,
- BD70528_INT_DCIN2_RMV,
- BD70528_INT_DCIN2_DET,
- BD70528_INT_DCIN1_RMV,
- BD70528_INT_DCIN1_DET,
- /* RTC register IRQs */
- BD70528_INT_RTC_ALARM,
- BD70528_INT_ELPS_TIM,
- /* GPIO register IRQs */
- BD70528_INT_GPIO0,
- BD70528_INT_GPIO1,
- BD70528_INT_GPIO2,
- BD70528_INT_GPIO3,
- /* Invalid operation register IRQs */
- BD70528_INT_BUCK1_DVS_OPFAIL,
- BD70528_INT_BUCK2_DVS_OPFAIL,
- BD70528_INT_BUCK3_DVS_OPFAIL,
- BD70528_INT_LED1_VOLT_OPFAIL,
- BD70528_INT_LED2_VOLT_OPFAIL,
-};
-
-/* Masks */
-#define BD70528_INT_LONGPUSH_MASK 0x1
-#define BD70528_INT_WDT_MASK 0x2
-#define BD70528_INT_HWRESET_MASK 0x4
-#define BD70528_INT_RSTB_FAULT_MASK 0x8
-#define BD70528_INT_VBAT_UVLO_MASK 0x10
-#define BD70528_INT_TSD_MASK 0x20
-#define BD70528_INT_RSTIN_MASK 0x40
-
-#define BD70528_INT_BUCK1_FAULT_MASK 0x1
-#define BD70528_INT_BUCK2_FAULT_MASK 0x2
-#define BD70528_INT_BUCK3_FAULT_MASK 0x4
-#define BD70528_INT_LDO1_FAULT_MASK 0x8
-#define BD70528_INT_LDO2_FAULT_MASK 0x10
-#define BD70528_INT_LDO3_FAULT_MASK 0x20
-#define BD70528_INT_LED1_FAULT_MASK 0x40
-#define BD70528_INT_LED2_FAULT_MASK 0x80
-
-#define BD70528_INT_BUCK1_OCP_MASK 0x1
-#define BD70528_INT_BUCK2_OCP_MASK 0x2
-#define BD70528_INT_BUCK3_OCP_MASK 0x4
-#define BD70528_INT_LED1_OCP_MASK 0x8
-#define BD70528_INT_LED2_OCP_MASK 0x10
-#define BD70528_INT_BUCK1_FULLON_MASK 0x20
-#define BD70528_INT_BUCK2_FULLON_MASK 0x40
-
-#define BD70528_INT_SHORTPUSH_MASK 0x1
-#define BD70528_INT_AUTO_WAKEUP_MASK 0x2
-#define BD70528_INT_STATE_CHANGE_MASK 0x10
-
-#define BD70528_INT_BAT_OV_RES_MASK 0x1
-#define BD70528_INT_BAT_OV_DET_MASK 0x2
-#define BD70528_INT_DBAT_DET_MASK 0x4
-#define BD70528_INT_BATTSD_COLD_RES_MASK 0x8
-#define BD70528_INT_BATTSD_COLD_DET_MASK 0x10
-#define BD70528_INT_BATTSD_HOT_RES_MASK 0x20
-#define BD70528_INT_BATTSD_HOT_DET_MASK 0x40
-#define BD70528_INT_CHG_TSD_MASK 0x80
-
-#define BD70528_INT_BAT_RMV_MASK 0x1
-#define BD70528_INT_BAT_DET_MASK 0x2
-#define BD70528_INT_DCIN2_OV_RES_MASK 0x4
-#define BD70528_INT_DCIN2_OV_DET_MASK 0x8
-#define BD70528_INT_DCIN2_RMV_MASK 0x10
-#define BD70528_INT_DCIN2_DET_MASK 0x20
-#define BD70528_INT_DCIN1_RMV_MASK 0x40
-#define BD70528_INT_DCIN1_DET_MASK 0x80
-
-#define BD70528_INT_RTC_ALARM_MASK 0x1
-#define BD70528_INT_ELPS_TIM_MASK 0x2
-
-#define BD70528_INT_GPIO0_MASK 0x1
-#define BD70528_INT_GPIO1_MASK 0x2
-#define BD70528_INT_GPIO2_MASK 0x4
-#define BD70528_INT_GPIO3_MASK 0x8
-
-#define BD70528_INT_BUCK1_DVS_OPFAIL_MASK 0x1
-#define BD70528_INT_BUCK2_DVS_OPFAIL_MASK 0x2
-#define BD70528_INT_BUCK3_DVS_OPFAIL_MASK 0x4
-#define BD70528_INT_LED1_VOLT_OPFAIL_MASK 0x10
-#define BD70528_INT_LED2_VOLT_OPFAIL_MASK 0x20
-
-#define BD70528_DEBOUNCE_MASK 0x3
-
-#define BD70528_DEBOUNCE_DISABLE 0
-#define BD70528_DEBOUNCE_15MS 1
-#define BD70528_DEBOUNCE_30MS 2
-#define BD70528_DEBOUNCE_50MS 3
-
-#define BD70528_GPIO_DRIVE_MASK 0x2
-#define BD70528_GPIO_PUSH_PULL 0x0
-#define BD70528_GPIO_OPEN_DRAIN 0x2
-
-#define BD70528_GPIO_OUT_EN_MASK 0x80
-#define BD70528_GPIO_OUT_ENABLE 0x80
-#define BD70528_GPIO_OUT_DISABLE 0x0
-
-#define BD70528_GPIO_OUT_HI 0x1
-#define BD70528_GPIO_OUT_LO 0x0
-#define BD70528_GPIO_OUT_MASK 0x1
-
-#define BD70528_GPIO_IN_STATE_BASE 1
-
-/* RTC masks to mask out reserved bits */
-
-#define BD70528_MASK_ELAPSED_TIMER_EN 0x1
-/* Mask second, min and hour fields
- * HW would support ALM irq for over 24h
- * (by setting day, month and year too)
- * but as we wish to keep this same as for
- * wake-up we limit ALM to 24H and only
- * unmask sec, min and hour
- */
-#define BD70528_MASK_WAKE_EN 0x1
-
-/* WDT masks */
-#define BD70528_MASK_WDT_EN 0x1
-#define BD70528_MASK_WDT_HOUR 0x1
-#define BD70528_MASK_WDT_MINUTE 0x7f
-#define BD70528_MASK_WDT_SEC 0x7f
-
-#define BD70528_WDT_STATE_BIT 0x1
-#define BD70528_ELAPSED_STATE_BIT 0x2
-#define BD70528_WAKE_STATE_BIT 0x4
-
-/* Charger masks */
-#define BD70528_MASK_CHG_STAT 0x7f
-#define BD70528_MASK_CHG_BAT_TIMER 0x20
-#define BD70528_MASK_CHG_BAT_OVERVOLT 0x10
-#define BD70528_MASK_CHG_BAT_DETECT 0x1
-#define BD70528_MASK_CHG_DCIN1_UVLO 0x1
-#define BD70528_MASK_CHG_DCIN_ILIM 0x3f
-#define BD70528_MASK_CHG_CHG_CURR 0x1f
-#define BD70528_MASK_CHG_TRICKLE_CURR 0x10
-
-/*
- * Note, external battery register is the lonely rider at
- * address 0xc5. See how to stuff that in the regmap
- */
-#define BD70528_MAX_REGISTER 0x94
-
-/* Buck control masks */
-#define BD70528_MASK_RUN_EN 0x4
-#define BD70528_MASK_STBY_EN 0x2
-#define BD70528_MASK_IDLE_EN 0x1
-#define BD70528_MASK_LED1_EN 0x1
-#define BD70528_MASK_LED2_EN 0x10
-
-#define BD70528_MASK_BUCK_VOLT 0xf
-#define BD70528_MASK_LDO_VOLT 0x1f
-#define BD70528_MASK_LED1_VOLT 0x1
-#define BD70528_MASK_LED2_VOLT 0x10
-
-/* Misc irq masks */
-#define BD70528_INT_MASK_SHORT_PUSH 1
-#define BD70528_INT_MASK_AUTO_WAKE 2
-#define BD70528_INT_MASK_POWER_STATE 4
-
-#define BD70528_MASK_BUCK_RAMP 0x10
-#define BD70528_SIFT_BUCK_RAMP 4
-
-#if IS_ENABLED(CONFIG_BD70528_WATCHDOG)
-
-int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state);
-void bd70528_wdt_lock(struct rohm_regmap_dev *data);
-void bd70528_wdt_unlock(struct rohm_regmap_dev *data);
-
-#else /* CONFIG_BD70528_WATCHDOG */
-
-static inline int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable,
- int *old_state)
-{
- return 0;
-}
-
-static inline void bd70528_wdt_lock(struct rohm_regmap_dev *data)
-{
-}
-
-static inline void bd70528_wdt_unlock(struct rohm_regmap_dev *data)
-{
-}
-
-#endif /* CONFIG_BD70528_WATCHDOG */
-
-#endif /* __LINUX_MFD_BD70528_H__ */
diff --git a/include/linux/mfd/rohm-bd71815.h b/include/linux/mfd/rohm-bd71815.h
new file mode 100644
index 000000000000..ec6d9612bebe
--- /dev/null
+++ b/include/linux/mfd/rohm-bd71815.h
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2021 ROHM Semiconductors.
+ *
+ * Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+ *
+ * Copyright 2014 Embest Technology Co. Ltd. Inc.
+ *
+ * Author: yanglsh@embest-tech.com
+ */
+
+#ifndef _MFD_BD71815_H
+#define _MFD_BD71815_H
+
+#include <linux/regmap.h>
+
+enum {
+ BD71815_BUCK1 = 0,
+ BD71815_BUCK2,
+ BD71815_BUCK3,
+ BD71815_BUCK4,
+ BD71815_BUCK5,
+ /* General Purpose */
+ BD71815_LDO1,
+ BD71815_LDO2,
+ BD71815_LDO3,
+ /* LDOs for SD Card and SD Card Interface */
+ BD71815_LDO4,
+ BD71815_LDO5,
+ /* LDO for DDR Reference Voltage */
+ BD71815_LDODVREF,
+ /* LDO for Low-Power State Retention */
+ BD71815_LDOLPSR,
+ BD71815_WLED,
+ BD71815_REGULATOR_CNT,
+};
+
+#define BD71815_SUPPLY_STATE_ENABLED 0x1
+
+enum {
+ BD71815_REG_DEVICE = 0,
+ BD71815_REG_PWRCTRL,
+ BD71815_REG_BUCK1_MODE,
+ BD71815_REG_BUCK2_MODE,
+ BD71815_REG_BUCK3_MODE,
+ BD71815_REG_BUCK4_MODE,
+ BD71815_REG_BUCK5_MODE,
+ BD71815_REG_BUCK1_VOLT_H,
+ BD71815_REG_BUCK1_VOLT_L,
+ BD71815_REG_BUCK2_VOLT_H,
+ BD71815_REG_BUCK2_VOLT_L,
+ BD71815_REG_BUCK3_VOLT,
+ BD71815_REG_BUCK4_VOLT,
+ BD71815_REG_BUCK5_VOLT,
+ BD71815_REG_LED_CTRL,
+ BD71815_REG_LED_DIMM,
+ BD71815_REG_LDO_MODE1,
+ BD71815_REG_LDO_MODE2,
+ BD71815_REG_LDO_MODE3,
+ BD71815_REG_LDO_MODE4,
+ BD71815_REG_LDO1_VOLT,
+ BD71815_REG_LDO2_VOLT,
+ BD71815_REG_LDO3_VOLT,
+ BD71815_REG_LDO4_VOLT,
+ BD71815_REG_LDO5_VOLT_H,
+ BD71815_REG_LDO5_VOLT_L,
+ BD71815_REG_BUCK_PD_DIS,
+ BD71815_REG_LDO_PD_DIS,
+ BD71815_REG_GPO,
+ BD71815_REG_OUT32K,
+ BD71815_REG_SEC,
+ BD71815_REG_MIN,
+ BD71815_REG_HOUR,
+ BD71815_REG_WEEK,
+ BD71815_REG_DAY,
+ BD71815_REG_MONTH,
+ BD71815_REG_YEAR,
+ BD71815_REG_ALM0_SEC,
+
+ BD71815_REG_ALM1_SEC = 0x2C,
+
+ BD71815_REG_ALM0_MASK = 0x33,
+ BD71815_REG_ALM1_MASK,
+ BD71815_REG_ALM2,
+ BD71815_REG_TRIM,
+ BD71815_REG_CONF,
+ BD71815_REG_SYS_INIT,
+ BD71815_REG_CHG_STATE,
+ BD71815_REG_CHG_LAST_STATE,
+ BD71815_REG_BAT_STAT,
+ BD71815_REG_DCIN_STAT,
+ BD71815_REG_VSYS_STAT,
+ BD71815_REG_CHG_STAT,
+ BD71815_REG_CHG_WDT_STAT,
+ BD71815_REG_BAT_TEMP,
+ BD71815_REG_IGNORE_0,
+ BD71815_REG_INHIBIT_0,
+ BD71815_REG_DCIN_CLPS,
+ BD71815_REG_VSYS_REG,
+ BD71815_REG_VSYS_MAX,
+ BD71815_REG_VSYS_MIN,
+ BD71815_REG_CHG_SET1,
+ BD71815_REG_CHG_SET2,
+ BD71815_REG_CHG_WDT_PRE,
+ BD71815_REG_CHG_WDT_FST,
+ BD71815_REG_CHG_IPRE,
+ BD71815_REG_CHG_IFST,
+ BD71815_REG_CHG_IFST_TERM,
+ BD71815_REG_CHG_VPRE,
+ BD71815_REG_CHG_VBAT_1,
+ BD71815_REG_CHG_VBAT_2,
+ BD71815_REG_CHG_VBAT_3,
+ BD71815_REG_CHG_LED_1,
+ BD71815_REG_VF_TH,
+ BD71815_REG_BAT_SET_1,
+ BD71815_REG_BAT_SET_2,
+ BD71815_REG_BAT_SET_3,
+ BD71815_REG_ALM_VBAT_TH_U,
+ BD71815_REG_ALM_VBAT_TH_L,
+ BD71815_REG_ALM_DCIN_TH,
+ BD71815_REG_ALM_VSYS_TH,
+ BD71815_REG_VM_IBAT_U,
+ BD71815_REG_VM_IBAT_L,
+ BD71815_REG_VM_VBAT_U,
+ BD71815_REG_VM_VBAT_L,
+ BD71815_REG_VM_BTMP,
+ BD71815_REG_VM_VTH,
+ BD71815_REG_VM_DCIN_U,
+ BD71815_REG_VM_DCIN_L,
+ BD71815_REG_VM_VSYS,
+ BD71815_REG_VM_VF,
+ BD71815_REG_VM_OCI_PRE_U,
+ BD71815_REG_VM_OCI_PRE_L,
+ BD71815_REG_VM_OCV_PRE_U,
+ BD71815_REG_VM_OCV_PRE_L,
+ BD71815_REG_VM_OCI_PST_U,
+ BD71815_REG_VM_OCI_PST_L,
+ BD71815_REG_VM_OCV_PST_U,
+ BD71815_REG_VM_OCV_PST_L,
+ BD71815_REG_VM_SA_VBAT_U,
+ BD71815_REG_VM_SA_VBAT_L,
+ BD71815_REG_VM_SA_IBAT_U,
+ BD71815_REG_VM_SA_IBAT_L,
+ BD71815_REG_CC_CTRL,
+ BD71815_REG_CC_BATCAP1_TH_U,
+ BD71815_REG_CC_BATCAP1_TH_L,
+ BD71815_REG_CC_BATCAP2_TH_U,
+ BD71815_REG_CC_BATCAP2_TH_L,
+ BD71815_REG_CC_BATCAP3_TH_U,
+ BD71815_REG_CC_BATCAP3_TH_L,
+ BD71815_REG_CC_STAT,
+ BD71815_REG_CC_CCNTD_3,
+ BD71815_REG_CC_CCNTD_2,
+ BD71815_REG_CC_CCNTD_1,
+ BD71815_REG_CC_CCNTD_0,
+ BD71815_REG_CC_CURCD_U,
+ BD71815_REG_CC_CURCD_L,
+ BD71815_REG_VM_OCUR_THR_1,
+ BD71815_REG_VM_OCUR_DUR_1,
+ BD71815_REG_VM_OCUR_THR_2,
+ BD71815_REG_VM_OCUR_DUR_2,
+ BD71815_REG_VM_OCUR_THR_3,
+ BD71815_REG_VM_OCUR_DUR_3,
+ BD71815_REG_VM_OCUR_MON,
+ BD71815_REG_VM_BTMP_OV_THR,
+ BD71815_REG_VM_BTMP_OV_DUR,
+ BD71815_REG_VM_BTMP_LO_THR,
+ BD71815_REG_VM_BTMP_LO_DUR,
+ BD71815_REG_VM_BTMP_MON,
+ BD71815_REG_INT_EN_01,
+
+ BD71815_REG_INT_EN_11 = 0x95,
+ BD71815_REG_INT_EN_12,
+ BD71815_REG_INT_STAT,
+ BD71815_REG_INT_STAT_01,
+ BD71815_REG_INT_STAT_02,
+ BD71815_REG_INT_STAT_03,
+ BD71815_REG_INT_STAT_04,
+ BD71815_REG_INT_STAT_05,
+ BD71815_REG_INT_STAT_06,
+ BD71815_REG_INT_STAT_07,
+ BD71815_REG_INT_STAT_08,
+ BD71815_REG_INT_STAT_09,
+ BD71815_REG_INT_STAT_10,
+ BD71815_REG_INT_STAT_11,
+ BD71815_REG_INT_STAT_12,
+ BD71815_REG_INT_UPDATE,
+
+ BD71815_REG_VM_VSYS_U = 0xC0,
+ BD71815_REG_VM_VSYS_L,
+ BD71815_REG_VM_SA_VSYS_U,
+ BD71815_REG_VM_SA_VSYS_L,
+
+ BD71815_REG_VM_SA_IBAT_MIN_U = 0xD0,
+ BD71815_REG_VM_SA_IBAT_MIN_L,
+ BD71815_REG_VM_SA_IBAT_MAX_U,
+ BD71815_REG_VM_SA_IBAT_MAX_L,
+ BD71815_REG_VM_SA_VBAT_MIN_U,
+ BD71815_REG_VM_SA_VBAT_MIN_L,
+ BD71815_REG_VM_SA_VBAT_MAX_U,
+ BD71815_REG_VM_SA_VBAT_MAX_L,
+ BD71815_REG_VM_SA_VSYS_MIN_U,
+ BD71815_REG_VM_SA_VSYS_MIN_L,
+ BD71815_REG_VM_SA_VSYS_MAX_U,
+ BD71815_REG_VM_SA_VSYS_MAX_L,
+ BD71815_REG_VM_SA_MINMAX_CLR,
+
+ BD71815_REG_REX_CCNTD_3 = 0xE0,
+ BD71815_REG_REX_CCNTD_2,
+ BD71815_REG_REX_CCNTD_1,
+ BD71815_REG_REX_CCNTD_0,
+ BD71815_REG_REX_SA_VBAT_U,
+ BD71815_REG_REX_SA_VBAT_L,
+ BD71815_REG_REX_CTRL_1,
+ BD71815_REG_REX_CTRL_2,
+ BD71815_REG_FULL_CCNTD_3,
+ BD71815_REG_FULL_CCNTD_2,
+ BD71815_REG_FULL_CCNTD_1,
+ BD71815_REG_FULL_CCNTD_0,
+ BD71815_REG_FULL_CTRL,
+
+ BD71815_REG_CCNTD_CHG_3 = 0xF0,
+ BD71815_REG_CCNTD_CHG_2,
+
+ BD71815_REG_TEST_MODE = 0xFE,
+ BD71815_MAX_REGISTER,
+};
+
+/* BD71815_REG_BUCK1_MODE bits */
+#define BD71815_BUCK_RAMPRATE_MASK 0xC0
+#define BD71815_BUCK_RAMPRATE_10P00MV 0x0
+#define BD71815_BUCK_RAMPRATE_5P00MV 0x01
+#define BD71815_BUCK_RAMPRATE_2P50MV 0x02
+#define BD71815_BUCK_RAMPRATE_1P25MV 0x03
+
+#define BD71815_BUCK_PWM_FIXED BIT(4)
+#define BD71815_BUCK_SNVS_ON BIT(3)
+#define BD71815_BUCK_RUN_ON BIT(2)
+#define BD71815_BUCK_LPSR_ON BIT(1)
+#define BD71815_BUCK_SUSP_ON BIT(0)
+
+/* BD71815_REG_BUCK1_VOLT_H bits */
+#define BD71815_BUCK_DVSSEL BIT(7)
+#define BD71815_BUCK_STBY_DVS BIT(6)
+#define BD71815_VOLT_MASK 0x3F
+#define BD71815_BUCK1_H_DEFAULT 0x14
+#define BD71815_BUCK1_L_DEFAULT 0x14
+
+/* BD71815_REG_BUCK2_VOLT_H bits */
+#define BD71815_BUCK2_H_DEFAULT 0x14
+#define BD71815_BUCK2_L_DEFAULT 0x14
+
+/* WLED output */
+/* current register mask */
+#define LED_DIMM_MASK 0x3f
+/* LED enable bits at LED_CTRL reg */
+#define LED_CHGDONE_EN BIT(4)
+#define LED_RUN_ON BIT(2)
+#define LED_LPSR_ON BIT(1)
+#define LED_SUSP_ON BIT(0)
+
+/* BD71815_REG_LDO1_CTRL bits */
+#define LDO1_EN BIT(0)
+#define LDO2_EN BIT(1)
+#define LDO3_EN BIT(2)
+#define DVREF_EN BIT(3)
+#define VOSNVS_SW_EN BIT(4)
+
+/* LDO_MODE1_register */
+#define LDO1_SNVS_ON BIT(7)
+#define LDO1_RUN_ON BIT(6)
+#define LDO1_LPSR_ON BIT(5)
+#define LDO1_SUSP_ON BIT(4)
+/* set => register control, unset => GPIO control */
+#define LDO4_MODE_MASK BIT(3)
+#define LDO4_MODE_I2C BIT(3)
+#define LDO4_MODE_GPIO 0
+/* set => register control, unset => start when DCIN connected */
+#define LDO3_MODE_MASK BIT(2)
+#define LDO3_MODE_I2C BIT(2)
+#define LDO3_MODE_DCIN 0
+
+/* LDO_MODE2 register */
+#define LDO3_SNVS_ON BIT(7)
+#define LDO3_RUN_ON BIT(6)
+#define LDO3_LPSR_ON BIT(5)
+#define LDO3_SUSP_ON BIT(4)
+#define LDO2_SNVS_ON BIT(3)
+#define LDO2_RUN_ON BIT(2)
+#define LDO2_LPSR_ON BIT(1)
+#define LDO2_SUSP_ON BIT(0)
+
+
+/* LDO_MODE3 register */
+#define LDO5_SNVS_ON BIT(7)
+#define LDO5_RUN_ON BIT(6)
+#define LDO5_LPSR_ON BIT(5)
+#define LDO5_SUSP_ON BIT(4)
+#define LDO4_SNVS_ON BIT(3)
+#define LDO4_RUN_ON BIT(2)
+#define LDO4_LPSR_ON BIT(1)
+#define LDO4_SUSP_ON BIT(0)
+
+/* LDO_MODE4 register */
+#define DVREF_SNVS_ON BIT(7)
+#define DVREF_RUN_ON BIT(6)
+#define DVREF_LPSR_ON BIT(5)
+#define DVREF_SUSP_ON BIT(4)
+#define LDO_LPSR_SNVS_ON BIT(3)
+#define LDO_LPSR_RUN_ON BIT(2)
+#define LDO_LPSR_LPSR_ON BIT(1)
+#define LDO_LPSR_SUSP_ON BIT(0)
+
+/* BD71815_REG_OUT32K bits */
+#define OUT32K_EN BIT(0)
+#define OUT32K_MODE BIT(1)
+#define OUT32K_MODE_CMOS BIT(1)
+#define OUT32K_MODE_OPEN_DRAIN 0
+
+/* BD71815_REG_BAT_STAT bits */
+#define BAT_DET BIT(5)
+#define BAT_DET_OFFSET 5
+#define BAT_DET_DONE BIT(4)
+#define VBAT_OV BIT(3)
+#define DBAT_DET BIT(0)
+
+/* BD71815_REG_VBUS_STAT bits */
+#define VBUS_DET BIT(0)
+
+#define BD71815_REG_RTC_START BD71815_REG_SEC
+#define BD71815_REG_RTC_ALM_START BD71815_REG_ALM0_SEC
+
+/* BD71815_REG_ALM0_MASK bits */
+#define A0_ONESEC BIT(7)
+
+/* BD71815_REG_INT_EN_00 bits */
+#define ALMALE BIT(0)
+
+/* BD71815_REG_INT_STAT_03 bits */
+#define DCIN_MON_DET BIT(1)
+#define DCIN_MON_RES BIT(0)
+#define POWERON_LONG BIT(2)
+#define POWERON_MID BIT(3)
+#define POWERON_SHORT BIT(4)
+#define POWERON_PRESS BIT(5)
+
+/* BD71805_REG_INT_STAT_08 bits */
+#define VBAT_MON_DET BIT(1)
+#define VBAT_MON_RES BIT(0)
+
+/* BD71805_REG_INT_STAT_11 bits */
+#define INT_STAT_11_VF_DET BIT(7)
+#define INT_STAT_11_VF_RES BIT(6)
+#define INT_STAT_11_VF125_DET BIT(5)
+#define INT_STAT_11_VF125_RES BIT(4)
+#define INT_STAT_11_OVTMP_DET BIT(3)
+#define INT_STAT_11_OVTMP_RES BIT(2)
+#define INT_STAT_11_LOTMP_DET BIT(1)
+#define INT_STAT_11_LOTMP_RES BIT(0)
+
+#define VBAT_MON_DET BIT(1)
+#define VBAT_MON_RES BIT(0)
+
+/* BD71815_REG_PWRCTRL bits */
+#define RESTARTEN BIT(0)
+
+/* BD71815_REG_GPO bits */
+#define READY_FORCE_LOW BIT(2)
+#define BD71815_GPIO_DRIVE_MASK BIT(4)
+#define BD71815_GPIO_OPEN_DRAIN 0
+#define BD71815_GPIO_CMOS BIT(4)
+
+/* BD71815 interrupt masks */
+enum {
+ BD71815_INT_EN_01_BUCKAST_MASK = 0x0F,
+ BD71815_INT_EN_02_DCINAST_MASK = 0x3E,
+ BD71815_INT_EN_03_DCINAST_MASK = 0x3F,
+ BD71815_INT_EN_04_VSYSAST_MASK = 0xCF,
+ BD71815_INT_EN_05_CHGAST_MASK = 0xFC,
+ BD71815_INT_EN_06_BATAST_MASK = 0xF3,
+ BD71815_INT_EN_07_BMONAST_MASK = 0xFE,
+ BD71815_INT_EN_08_BMONAST_MASK = 0x03,
+ BD71815_INT_EN_09_BMONAST_MASK = 0x07,
+ BD71815_INT_EN_10_BMONAST_MASK = 0x3F,
+ BD71815_INT_EN_11_TMPAST_MASK = 0xFF,
+ BD71815_INT_EN_12_ALMAST_MASK = 0x07,
+};
+/* BD71815 interrupt irqs */
+enum {
+ /* BUCK reg interrupts */
+ BD71815_INT_BUCK1_OCP,
+ BD71815_INT_BUCK2_OCP,
+ BD71815_INT_BUCK3_OCP,
+ BD71815_INT_BUCK4_OCP,
+ BD71815_INT_BUCK5_OCP,
+ BD71815_INT_LED_OVP,
+ BD71815_INT_LED_OCP,
+ BD71815_INT_LED_SCP,
+ /* DCIN1 interrupts */
+ BD71815_INT_DCIN_RMV,
+ BD71815_INT_CLPS_OUT,
+ BD71815_INT_CLPS_IN,
+ BD71815_INT_DCIN_OVP_RES,
+ BD71815_INT_DCIN_OVP_DET,
+ /* DCIN2 interrupts */
+ BD71815_INT_DCIN_MON_RES,
+ BD71815_INT_DCIN_MON_DET,
+ BD71815_INT_WDOG,
+ /* Vsys INT_STAT_04 */
+ BD71815_INT_VSYS_UV_RES,
+ BD71815_INT_VSYS_UV_DET,
+ BD71815_INT_VSYS_LOW_RES,
+ BD71815_INT_VSYS_LOW_DET,
+ BD71815_INT_VSYS_MON_RES,
+ BD71815_INT_VSYS_MON_DET,
+ /* Charger INT_STAT_05 */
+ BD71815_INT_CHG_WDG_TEMP,
+ BD71815_INT_CHG_WDG_TIME,
+ BD71815_INT_CHG_RECHARGE_RES,
+ BD71815_INT_CHG_RECHARGE_DET,
+ BD71815_INT_CHG_RANGED_TEMP_TRANSITION,
+ BD71815_INT_CHG_STATE_TRANSITION,
+ /* Battery INT_STAT_06 */
+ BD71815_INT_BAT_TEMP_NORMAL,
+ BD71815_INT_BAT_TEMP_ERANGE,
+ BD71815_INT_BAT_REMOVED,
+ BD71815_INT_BAT_DETECTED,
+ BD71815_INT_THERM_REMOVED,
+ BD71815_INT_THERM_DETECTED,
+ /* Battery Mon 1 INT_STAT_07 */
+ BD71815_INT_BAT_DEAD,
+ BD71815_INT_BAT_SHORTC_RES,
+ BD71815_INT_BAT_SHORTC_DET,
+ BD71815_INT_BAT_LOW_VOLT_RES,
+ BD71815_INT_BAT_LOW_VOLT_DET,
+ BD71815_INT_BAT_OVER_VOLT_RES,
+ BD71815_INT_BAT_OVER_VOLT_DET,
+ /* Battery Mon 2 INT_STAT_08 */
+ BD71815_INT_BAT_MON_RES,
+ BD71815_INT_BAT_MON_DET,
+ /* Battery Mon 3 (Coulomb counter) INT_STAT_09 */
+ BD71815_INT_BAT_CC_MON1,
+ BD71815_INT_BAT_CC_MON2,
+ BD71815_INT_BAT_CC_MON3,
+ /* Battery Mon 4 INT_STAT_10 */
+ BD71815_INT_BAT_OVER_CURR_1_RES,
+ BD71815_INT_BAT_OVER_CURR_1_DET,
+ BD71815_INT_BAT_OVER_CURR_2_RES,
+ BD71815_INT_BAT_OVER_CURR_2_DET,
+ BD71815_INT_BAT_OVER_CURR_3_RES,
+ BD71815_INT_BAT_OVER_CURR_3_DET,
+ /* Temperature INT_STAT_11 */
+ BD71815_INT_TEMP_BAT_LOW_RES,
+ BD71815_INT_TEMP_BAT_LOW_DET,
+ BD71815_INT_TEMP_BAT_HI_RES,
+ BD71815_INT_TEMP_BAT_HI_DET,
+ BD71815_INT_TEMP_CHIP_OVER_125_RES,
+ BD71815_INT_TEMP_CHIP_OVER_125_DET,
+ BD71815_INT_TEMP_CHIP_OVER_VF_RES,
+ BD71815_INT_TEMP_CHIP_OVER_VF_DET,
+ /* RTC Alarm INT_STAT_12 */
+ BD71815_INT_RTC0,
+ BD71815_INT_RTC1,
+ BD71815_INT_RTC2,
+};
+
+#define BD71815_INT_BUCK1_OCP_MASK BIT(0)
+#define BD71815_INT_BUCK2_OCP_MASK BIT(1)
+#define BD71815_INT_BUCK3_OCP_MASK BIT(2)
+#define BD71815_INT_BUCK4_OCP_MASK BIT(3)
+#define BD71815_INT_BUCK5_OCP_MASK BIT(4)
+#define BD71815_INT_LED_OVP_MASK BIT(5)
+#define BD71815_INT_LED_OCP_MASK BIT(6)
+#define BD71815_INT_LED_SCP_MASK BIT(7)
+
+#define BD71815_INT_DCIN_RMV_MASK BIT(1)
+#define BD71815_INT_CLPS_OUT_MASK BIT(2)
+#define BD71815_INT_CLPS_IN_MASK BIT(3)
+#define BD71815_INT_DCIN_OVP_RES_MASK BIT(4)
+#define BD71815_INT_DCIN_OVP_DET_MASK BIT(5)
+
+#define BD71815_INT_DCIN_MON_RES_MASK BIT(0)
+#define BD71815_INT_DCIN_MON_DET_MASK BIT(1)
+#define BD71815_INT_WDOG_MASK BIT(6)
+
+#define BD71815_INT_VSYS_UV_RES_MASK BIT(0)
+#define BD71815_INT_VSYS_UV_DET_MASK BIT(1)
+#define BD71815_INT_VSYS_LOW_RES_MASK BIT(2)
+#define BD71815_INT_VSYS_LOW_DET_MASK BIT(3)
+#define BD71815_INT_VSYS_MON_RES_MASK BIT(6)
+#define BD71815_INT_VSYS_MON_DET_MASK BIT(7)
+
+#define BD71815_INT_CHG_WDG_TEMP_MASK BIT(2)
+#define BD71815_INT_CHG_WDG_TIME_MASK BIT(3)
+#define BD71815_INT_CHG_RECHARGE_RES_MASK BIT(4)
+#define BD71815_INT_CHG_RECHARGE_DET_MASK BIT(5)
+#define BD71815_INT_CHG_RANGED_TEMP_TRANSITION_MASK BIT(6)
+#define BD71815_INT_CHG_STATE_TRANSITION_MASK BIT(7)
+
+#define BD71815_INT_BAT_TEMP_NORMAL_MASK BIT(0)
+#define BD71815_INT_BAT_TEMP_ERANGE_MASK BIT(1)
+#define BD71815_INT_BAT_REMOVED_MASK BIT(4)
+#define BD71815_INT_BAT_DETECTED_MASK BIT(5)
+#define BD71815_INT_THERM_REMOVED_MASK BIT(6)
+#define BD71815_INT_THERM_DETECTED_MASK BIT(7)
+
+#define BD71815_INT_BAT_DEAD_MASK BIT(1)
+#define BD71815_INT_BAT_SHORTC_RES_MASK BIT(2)
+#define BD71815_INT_BAT_SHORTC_DET_MASK BIT(3)
+#define BD71815_INT_BAT_LOW_VOLT_RES_MASK BIT(4)
+#define BD71815_INT_BAT_LOW_VOLT_DET_MASK BIT(5)
+#define BD71815_INT_BAT_OVER_VOLT_RES_MASK BIT(6)
+#define BD71815_INT_BAT_OVER_VOLT_DET_MASK BIT(7)
+
+#define BD71815_INT_BAT_MON_RES_MASK BIT(0)
+#define BD71815_INT_BAT_MON_DET_MASK BIT(1)
+
+#define BD71815_INT_BAT_CC_MON1_MASK BIT(0)
+#define BD71815_INT_BAT_CC_MON2_MASK BIT(1)
+#define BD71815_INT_BAT_CC_MON3_MASK BIT(2)
+
+#define BD71815_INT_BAT_OVER_CURR_1_RES_MASK BIT(0)
+#define BD71815_INT_BAT_OVER_CURR_1_DET_MASK BIT(1)
+#define BD71815_INT_BAT_OVER_CURR_2_RES_MASK BIT(2)
+#define BD71815_INT_BAT_OVER_CURR_2_DET_MASK BIT(3)
+#define BD71815_INT_BAT_OVER_CURR_3_RES_MASK BIT(4)
+#define BD71815_INT_BAT_OVER_CURR_3_DET_MASK BIT(5)
+
+#define BD71815_INT_TEMP_BAT_LOW_RES_MASK BIT(0)
+#define BD71815_INT_TEMP_BAT_LOW_DET_MASK BIT(1)
+#define BD71815_INT_TEMP_BAT_HI_RES_MASK BIT(2)
+#define BD71815_INT_TEMP_BAT_HI_DET_MASK BIT(3)
+#define BD71815_INT_TEMP_CHIP_OVER_125_RES_MASK BIT(4)
+#define BD71815_INT_TEMP_CHIP_OVER_125_DET_MASK BIT(5)
+#define BD71815_INT_TEMP_CHIP_OVER_VF_RES_MASK BIT(6)
+#define BD71815_INT_TEMP_CHIP_OVER_VF_DET_MASK BIT(7)
+
+#define BD71815_INT_RTC0_MASK BIT(0)
+#define BD71815_INT_RTC1_MASK BIT(1)
+#define BD71815_INT_RTC2_MASK BIT(2)
+
+/* BD71815_REG_CC_CTRL bits */
+#define CCNTRST 0x80
+#define CCNTENB 0x40
+#define CCCALIB 0x20
+
+/* BD71815_REG_CC_CURCD */
+#define CURDIR_Discharging 0x8000
+
+/* BD71815_REG_VM_SA_IBAT */
+#define IBAT_SA_DIR_Discharging 0x8000
+
+/* BD71815_REG_REX_CTRL_1 bits */
+#define REX_CLR BIT(4)
+
+/* BD71815_REG_REX_CTRL_1 bits */
+#define REX_PMU_STATE_MASK BIT(2)
+
+/* BD71815_REG_LED_CTRL bits */
+#define CHGDONE_LED_EN BIT(4)
+
+#endif /* __LINUX_MFD_BD71815_H */
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
index 017a4c01cb31..3b5f3a7db4bd 100644
--- a/include/linux/mfd/rohm-bd71828.h
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -26,11 +26,11 @@ enum {
BD71828_REGULATOR_AMOUNT,
};
-#define BD71828_BUCK1267_VOLTS 0xEF
-#define BD71828_BUCK3_VOLTS 0x10
-#define BD71828_BUCK4_VOLTS 0x20
-#define BD71828_BUCK5_VOLTS 0x10
-#define BD71828_LDO_VOLTS 0x32
+#define BD71828_BUCK1267_VOLTS 0x100
+#define BD71828_BUCK3_VOLTS 0x20
+#define BD71828_BUCK4_VOLTS 0x40
+#define BD71828_BUCK5_VOLTS 0x20
+#define BD71828_LDO_VOLTS 0x40
/* LDO6 is fixed 1.8V voltage */
#define BD71828_LDO_6_VOLTAGE 1800000
@@ -151,6 +151,9 @@ enum {
#define BD71828_REG_GPIO_CTRL3 0x49
#define BD71828_REG_IO_STAT 0xed
+/* clk */
+#define BD71828_REG_OUT32K 0x4b
+
/* RTC */
#define BD71828_REG_RTC_SEC 0x4c
#define BD71828_REG_RTC_MINUTE 0x4d
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index bee2474a8f9f..df2918198d37 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -310,17 +310,4 @@ enum {
BD718XX_PWRBTN_LONG_PRESS_15S
};
-struct bd718xx {
- /*
- * Please keep this as the first member here as some
- * drivers (clk) supporting more than one chip may only know this
- * generic struct 'struct rohm_regmap_dev' and assume it is
- * the first chunk of parent device's private data.
- */
- struct rohm_regmap_dev chip;
-
- int chip_irq;
- struct regmap_irq_chip_data *irq_data;
-};
-
#endif /* __LINUX_MFD_BD718XX_H__ */
diff --git a/include/linux/mfd/rohm-bd957x.h b/include/linux/mfd/rohm-bd957x.h
new file mode 100644
index 000000000000..acc920b64f75
--- /dev/null
+++ b/include/linux/mfd/rohm-bd957x.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2021 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD957X_H__
+#define __LINUX_MFD_BD957X_H__
+
+enum {
+ BD957X_VD50,
+ BD957X_VD18,
+ BD957X_VDDDR,
+ BD957X_VD10,
+ BD957X_VOUTL1,
+ BD957X_VOUTS1,
+};
+
+/*
+ * The BD9576 has own IRQ 'blocks' for:
+ * - I2C/thermal,
+ * - Over voltage protection
+ * - Short-circuit protection
+ * - Over current protection
+ * - Over voltage detection
+ * - Under voltage detection
+ * - Under voltage protection
+ * - 'system interrupt'.
+ *
+ * Each of the blocks have a status register giving more accurate IRQ source
+ * information - for example which of the regulators have over-voltage.
+ *
+ * On top of this, there is "main IRQ" status register where each bit indicates
+ * which of sub-blocks have active IRQs. Fine. That would fit regmap-irq main
+ * status handling. Except that:
+ * - Only some sub-IRQs can be masked.
+ * - The IRQ informs us about fault-condition, not when fault state changes.
+ * The IRQ line it is kept asserted until the detected condition is acked
+ * AND cleared in HW. This is annoying for IRQs like the one informing high
+ * temperature because if IRQ is not disabled it keeps the CPU in IRQ
+ * handling loop.
+ *
+ * For now we do just use the main-IRQ register as source for our IRQ
+ * information and bind the regmap-irq to this. We leave fine-grained sub-IRQ
+ * register handling to handlers in sub-devices. The regulator driver shall
+ * read which regulators are source for problem - or if the detected error is
+ * regulator temperature error. The sub-drivers do also handle masking of "sub-
+ * IRQs" if this is supported/needed.
+ *
+ * To overcome the problem with HW keeping IRQ asserted we do call
+ * disable_irq_nosync() from sub-device handler and add a delayed work to
+ * re-enable IRQ roughly 1 second later. This should keep our CPU out of
+ * busy-loop.
+ */
+#define IRQS_SILENT_MS 1000
+
+enum {
+ BD9576_INT_THERM,
+ BD9576_INT_OVP,
+ BD9576_INT_SCP,
+ BD9576_INT_OCP,
+ BD9576_INT_OVD,
+ BD9576_INT_UVD,
+ BD9576_INT_UVP,
+ BD9576_INT_SYS,
+};
+
+#define BD957X_REG_SMRB_ASSERT 0x15
+#define BD957X_REG_PMIC_INTERNAL_STAT 0x20
+#define BD957X_REG_INT_THERM_STAT 0x23
+#define BD957X_REG_INT_THERM_MASK 0x24
+#define BD957X_REG_INT_OVP_STAT 0x25
+#define BD957X_REG_INT_SCP_STAT 0x26
+#define BD957X_REG_INT_OCP_STAT 0x27
+#define BD957X_REG_INT_OVD_STAT 0x28
+#define BD957X_REG_INT_UVD_STAT 0x29
+#define BD957X_REG_INT_UVP_STAT 0x2a
+#define BD957X_REG_INT_SYS_STAT 0x2b
+#define BD957X_REG_INT_SYS_MASK 0x2c
+#define BD957X_REG_INT_MAIN_STAT 0x30
+#define BD957X_REG_INT_MAIN_MASK 0x31
+
+#define UVD_IRQ_VALID_MASK 0x6F
+#define OVD_IRQ_VALID_MASK 0x2F
+
+#define BD957X_MASK_INT_MAIN_THERM BIT(0)
+#define BD957X_MASK_INT_MAIN_OVP BIT(1)
+#define BD957X_MASK_INT_MAIN_SCP BIT(2)
+#define BD957X_MASK_INT_MAIN_OCP BIT(3)
+#define BD957X_MASK_INT_MAIN_OVD BIT(4)
+#define BD957X_MASK_INT_MAIN_UVD BIT(5)
+#define BD957X_MASK_INT_MAIN_UVP BIT(6)
+#define BD957X_MASK_INT_MAIN_SYS BIT(7)
+#define BD957X_MASK_INT_ALL 0xff
+
+#define BD957X_REG_WDT_CONF 0x16
+
+#define BD957X_REG_POW_TRIGGER1 0x41
+#define BD957X_REG_POW_TRIGGER2 0x42
+#define BD957X_REG_POW_TRIGGER3 0x43
+#define BD957X_REG_POW_TRIGGER4 0x44
+#define BD957X_REG_POW_TRIGGERL1 0x45
+#define BD957X_REG_POW_TRIGGERS1 0x46
+
+#define BD957X_REGULATOR_EN_MASK 0xff
+#define BD957X_REGULATOR_DIS_VAL 0xff
+
+#define BD957X_VSEL_REG_MASK 0xff
+
+#define BD957X_MASK_VOUT1_TUNE 0x87
+#define BD957X_MASK_VOUT2_TUNE 0x87
+#define BD957X_MASK_VOUT3_TUNE 0x1f
+#define BD957X_MASK_VOUT4_TUNE 0x1f
+#define BD957X_MASK_VOUTL1_TUNE 0x87
+
+#define BD957X_REG_VOUT1_TUNE 0x50
+#define BD957X_REG_VOUT2_TUNE 0x53
+#define BD957X_REG_VOUT3_TUNE 0x56
+#define BD957X_REG_VOUT4_TUNE 0x59
+#define BD957X_REG_VOUTL1_TUNE 0x5c
+
+#define BD9576_REG_VOUT1_OVD 0x51
+#define BD9576_REG_VOUT1_UVD 0x52
+#define BD9576_REG_VOUT2_OVD 0x54
+#define BD9576_REG_VOUT2_UVD 0x55
+#define BD9576_REG_VOUT3_OVD 0x57
+#define BD9576_REG_VOUT3_UVD 0x58
+#define BD9576_REG_VOUT4_OVD 0x5a
+#define BD9576_REG_VOUT4_UVD 0x5b
+#define BD9576_REG_VOUTL1_OVD 0x5d
+#define BD9576_REG_VOUTL1_UVD 0x5e
+
+#define BD9576_MASK_XVD 0x7f
+
+#define BD9576_REG_VOUT1S_OCW 0x5f
+#define BD9576_REG_VOUT1S_OCP 0x60
+
+#define BD9576_MASK_VOUT1S_OCW 0x3f
+#define BD9576_MASK_VOUT1S_OCP 0x3f
+
+#define BD957X_MAX_REGISTER 0x61
+
+#endif
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index 4283b5b33e04..4eeb22876bad 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -8,10 +8,14 @@
#include <linux/regulator/driver.h>
enum rohm_chip_type {
- ROHM_CHIP_TYPE_BD71837 = 0,
- ROHM_CHIP_TYPE_BD71847,
- ROHM_CHIP_TYPE_BD70528,
+ ROHM_CHIP_TYPE_BD9571,
+ ROHM_CHIP_TYPE_BD9573,
+ ROHM_CHIP_TYPE_BD9574,
+ ROHM_CHIP_TYPE_BD9576,
+ ROHM_CHIP_TYPE_BD71815,
ROHM_CHIP_TYPE_BD71828,
+ ROHM_CHIP_TYPE_BD71837,
+ ROHM_CHIP_TYPE_BD71847,
ROHM_CHIP_TYPE_AMOUNT
};
@@ -20,14 +24,13 @@ struct rohm_regmap_dev {
struct regmap *regmap;
};
-enum {
- ROHM_DVS_LEVEL_UNKNOWN,
- ROHM_DVS_LEVEL_RUN,
- ROHM_DVS_LEVEL_IDLE,
- ROHM_DVS_LEVEL_SUSPEND,
- ROHM_DVS_LEVEL_LPSR,
- ROHM_DVS_LEVEL_MAX = ROHM_DVS_LEVEL_LPSR,
-};
+#define ROHM_DVS_LEVEL_RUN BIT(0)
+#define ROHM_DVS_LEVEL_IDLE BIT(1)
+#define ROHM_DVS_LEVEL_SUSPEND BIT(2)
+#define ROHM_DVS_LEVEL_LPSR BIT(3)
+#define ROHM_DVS_LEVEL_SNVS BIT(4)
+#define ROHM_DVS_LEVEL_VALID_AMOUNT 5
+#define ROHM_DVS_LEVEL_UNKNOWN 0
/**
* struct rohm_dvs_config - dynamic voltage scaling register descriptions
@@ -65,6 +68,9 @@ struct rohm_dvs_config {
unsigned int lpsr_reg;
unsigned int lpsr_mask;
unsigned int lpsr_on_mask;
+ unsigned int snvs_reg;
+ unsigned int snvs_mask;
+ unsigned int snvs_on_mask;
};
#if IS_ENABLED(CONFIG_REGULATOR_ROHM)
@@ -73,14 +79,8 @@ int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
const struct regulator_desc *desc,
struct regmap *regmap);
-#else
-static inline int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
- struct device_node *np,
- const struct regulator_desc *desc,
- struct regmap *regmap)
-{
- return 0;
-}
+int rohm_regulator_set_voltage_sel_restricted(struct regulator_dev *rdev,
+ unsigned int sel);
#endif
#endif
diff --git a/include/linux/mfd/rsmu.h b/include/linux/mfd/rsmu.h
new file mode 100644
index 000000000000..0379aa207428
--- /dev/null
+++ b/include/linux/mfd/rsmu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core interface for Renesas Synchronization Management Unit (SMU) devices.
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#ifndef __LINUX_MFD_RSMU_H
+#define __LINUX_MFD_RSMU_H
+
+#define RSMU_MAX_WRITE_COUNT (255)
+#define RSMU_MAX_READ_COUNT (255)
+
+/* The supported devices are ClockMatrix, Sabre and SnowLotus */
+enum rsmu_type {
+ RSMU_CM = 0x34000,
+ RSMU_SABRE = 0x33810,
+ RSMU_SL = 0x19850,
+};
+
+/**
+ *
+ * struct rsmu_ddata - device data structure for sub devices.
+ *
+ * @dev: i2c/spi device.
+ * @regmap: i2c/spi bus access.
+ * @lock: mutex used by sub devices to make sure a series of
+ * bus access requests are not interrupted.
+ * @type: RSMU device type.
+ * @page: i2c/spi bus driver internal use only.
+ */
+struct rsmu_ddata {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ enum rsmu_type type;
+ u32 page;
+};
+#endif /* __LINUX_MFD_RSMU_H */
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
index f812105c538c..0221f806d139 100644
--- a/include/linux/mfd/rt5033-private.h
+++ b/include/linux/mfd/rt5033-private.h
@@ -55,21 +55,28 @@ enum rt5033_reg {
};
/* RT5033 Charger state register */
-#define RT5033_CHG_STAT_MASK 0x20
+#define RT5033_CHG_STAT_TYPE_MASK 0x60
+#define RT5033_CHG_STAT_TYPE_PRE 0x20
+#define RT5033_CHG_STAT_TYPE_FAST 0x60
+#define RT5033_CHG_STAT_MASK 0x30
#define RT5033_CHG_STAT_DISCHARGING 0x00
#define RT5033_CHG_STAT_FULL 0x10
#define RT5033_CHG_STAT_CHARGING 0x20
#define RT5033_CHG_STAT_NOT_CHARGING 0x30
-#define RT5033_CHG_STAT_TYPE_MASK 0x60
-#define RT5033_CHG_STAT_TYPE_PRE 0x20
-#define RT5033_CHG_STAT_TYPE_FAST 0x60
/* RT5033 CHGCTRL1 register */
#define RT5033_CHGCTRL1_IAICR_MASK 0xe0
+#define RT5033_CHGCTRL1_TE_EN_MASK 0x08
+#define RT5033_CHGCTRL1_HZ_MASK 0x02
#define RT5033_CHGCTRL1_MODE_MASK 0x01
/* RT5033 CHGCTRL2 register */
#define RT5033_CHGCTRL2_CV_MASK 0xfc
+#define RT5033_CHGCTRL2_CV_SHIFT 0x02
+
+/* RT5033 DEVICE_ID register */
+#define RT5033_VENDOR_ID_MASK 0xf0
+#define RT5033_CHIP_REV_MASK 0x0f
/* RT5033 CHGCTRL3 register */
#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40
@@ -77,28 +84,28 @@ enum rt5033_reg {
#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01
/* RT5033 CHGCTRL4 register */
-#define RT5033_CHGCTRL4_EOC_MASK 0x07
+#define RT5033_CHGCTRL4_MIVR_MASK 0xe0
#define RT5033_CHGCTRL4_IPREC_MASK 0x18
+#define RT5033_CHGCTRL4_IPREC_SHIFT 0x03
+#define RT5033_CHGCTRL4_EOC_MASK 0x07
/* RT5033 CHGCTRL5 register */
-#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
#define RT5033_CHGCTRL5_ICHG_MASK 0xf0
#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04
-#define RT5033_CHG_MAX_CURRENT 0x0d
+#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
/* RT5033 RT CTRL1 register */
#define RT5033_RT_CTRL1_UUG_MASK 0x02
-#define RT5033_RT_HZ_MASK 0x01
/* RT5033 control register */
-#define RT5033_CTRL_FCCM_BUCK_MASK 0x00
-#define RT5033_CTRL_BUCKOMS_MASK 0x01
-#define RT5033_CTRL_LDOOMS_MASK 0x02
-#define RT5033_CTRL_SLDOOMS_MASK 0x03
-#define RT5033_CTRL_EN_BUCK_MASK 0x04
-#define RT5033_CTRL_EN_LDO_MASK 0x05
-#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06
-#define RT5033_CTRL_LDO_SLEEP_MASK 0x07
+#define RT5033_CTRL_FCCM_BUCK_MASK BIT(0)
+#define RT5033_CTRL_BUCKOMS_MASK BIT(1)
+#define RT5033_CTRL_LDOOMS_MASK BIT(2)
+#define RT5033_CTRL_SLDOOMS_MASK BIT(3)
+#define RT5033_CTRL_EN_BUCK_MASK BIT(4)
+#define RT5033_CTRL_EN_LDO_MASK BIT(5)
+#define RT5033_CTRL_EN_SAFE_LDO_MASK BIT(6)
+#define RT5033_CTRL_LDO_SLEEP_MASK BIT(7)
/* RT5033 BUCK control register */
#define RT5033_BUCK_CTRL_MASK 0x1f
@@ -107,65 +114,77 @@ enum rt5033_reg {
#define RT5033_LDO_CTRL_MASK 0x1f
/* RT5033 charger property - model, manufacturer */
-
#define RT5033_CHARGER_MODEL "RT5033WSC Charger"
#define RT5033_MANUFACTURER "Richtek Technology Corporation"
/*
- * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register),
- * AICR mode limits the input current for example,
- * the AIRC 100 mode limits the input current to 100 mA.
+ * While RT5033 charger can limit the fast-charge current (as in CHGCTRL1
+ * register), AICR mode limits the input current. For example, the AIRC 100
+ * mode limits the input current to 100 mA.
*/
+#define RT5033_AICR_DISABLE 0x00
#define RT5033_AICR_100_MODE 0x20
#define RT5033_AICR_500_MODE 0x40
#define RT5033_AICR_700_MODE 0x60
#define RT5033_AICR_900_MODE 0x80
+#define RT5033_AICR_1000_MODE 0xa0
#define RT5033_AICR_1500_MODE 0xc0
#define RT5033_AICR_2000_MODE 0xe0
-#define RT5033_AICR_MODE_MASK 0xe0
-/* RT5033 use internal timer need to set time */
-#define RT5033_FAST_CHARGE_TIMER4 0x00
-#define RT5033_FAST_CHARGE_TIMER6 0x01
-#define RT5033_FAST_CHARGE_TIMER8 0x02
-#define RT5033_FAST_CHARGE_TIMER9 0x03
-#define RT5033_FAST_CHARGE_TIMER12 0x04
-#define RT5033_FAST_CHARGE_TIMER14 0x05
-#define RT5033_FAST_CHARGE_TIMER16 0x06
+/* RT5033 charger minimum input voltage regulation */
+#define RT5033_CHARGER_MIVR_DISABLE 0x00
+#define RT5033_CHARGER_MIVR_4200MV 0x20
+#define RT5033_CHARGER_MIVR_4300MV 0x40
+#define RT5033_CHARGER_MIVR_4400MV 0x60
+#define RT5033_CHARGER_MIVR_4500MV 0x80
+#define RT5033_CHARGER_MIVR_4600MV 0xa0
+#define RT5033_CHARGER_MIVR_4700MV 0xc0
+#define RT5033_CHARGER_MIVR_4800MV 0xe0
+/* RT5033 use internal timer need to set time */
+#define RT5033_FAST_CHARGE_TIMER4 0x00 /* 4 hrs */
+#define RT5033_FAST_CHARGE_TIMER6 0x08 /* 6 hrs */
+#define RT5033_FAST_CHARGE_TIMER8 0x10 /* 8 hrs */
+#define RT5033_FAST_CHARGE_TIMER10 0x18 /* 10 hrs */
+#define RT5033_FAST_CHARGE_TIMER12 0x20 /* 12 hrs */
+#define RT5033_FAST_CHARGE_TIMER14 0x28 /* 14 hrs */
+#define RT5033_FAST_CHARGE_TIMER16 0x30 /* 16 hrs */
+
+#define RT5033_INT_TIMER_DISABLE 0x00
#define RT5033_INT_TIMER_ENABLE 0x01
-/* RT5033 charger termination enable mask */
-#define RT5033_TE_ENABLE_MASK 0x08
-
/*
- * RT5033 charger opa mode. RT50300 have two opa mode charger mode
- * and boost mode for OTG
+ * RT5033 charger opa mode. RT5033 has two opa modes for OTG: charger mode
+ * and boost mode.
*/
-
#define RT5033_CHARGER_MODE 0x00
#define RT5033_BOOST_MODE 0x01
/* RT5033 charger termination enable */
+#define RT5033_TE_DISABLE 0x00
#define RT5033_TE_ENABLE 0x08
/* RT5033 charger CFO enable */
+#define RT5033_CFO_DISABLE 0x00
#define RT5033_CFO_ENABLE 0x40
/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U
#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U
+#define RT5033_CV_MAX_VOLTAGE 0x1e
/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U
#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U
+#define RT5033_CHG_MAX_PRE_CURRENT 0x03
/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */
#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U
#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U
+#define RT5033_CHG_MAX_CURRENT 0x0d
/*
* RT5033 charger const-charge end of charger current (
@@ -181,20 +200,20 @@ enum rt5033_reg {
* RT5033 charger pre-charge threshold volt limits
* (as in CHGCTRL5 register), uV
*/
-
#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U
#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM 100000U
#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U
/*
- * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger
+ * RT5033 charger UUG. It enables MOS auto control by H/W charger
* circuit.
*/
+#define RT5033_CHARGER_UUG_DISABLE 0x00
#define RT5033_CHARGER_UUG_ENABLE 0x02
-/* RT5033 charger High impedance mode */
+/* RT5033 charger high impedance mode */
#define RT5033_CHARGER_HZ_DISABLE 0x00
-#define RT5033_CHARGER_HZ_ENABLE 0x01
+#define RT5033_CHARGER_HZ_ENABLE 0x02
/* RT5033 regulator BUCK output voltage uV */
#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U
@@ -247,11 +266,11 @@ enum rt5033_fuel_reg {
#define RT5033_FUEL_BAT_PRESENT 0x02
/* RT5033 PMIC interrupts */
-#define RT5033_PMIC_IRQ_BUCKOCP 2
-#define RT5033_PMIC_IRQ_BUCKLV 3
-#define RT5033_PMIC_IRQ_SAFELDOLV 4
-#define RT5033_PMIC_IRQ_LDOLV 5
-#define RT5033_PMIC_IRQ_OT 6
-#define RT5033_PMIC_IRQ_VDDA_UV 7
+#define RT5033_PMIC_IRQ_BUCKOCP BIT(2)
+#define RT5033_PMIC_IRQ_BUCKLV BIT(3)
+#define RT5033_PMIC_IRQ_SAFELDOLV BIT(4)
+#define RT5033_PMIC_IRQ_LDOLV BIT(5)
+#define RT5033_PMIC_IRQ_OT BIT(6)
+#define RT5033_PMIC_IRQ_VDDA_UV BIT(7)
#endif /* __RT5033_PRIVATE_H__ */
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
index 3c23b6220c04..bb3d18945d21 100644
--- a/include/linux/mfd/rt5033.h
+++ b/include/linux/mfd/rt5033.h
@@ -12,7 +12,6 @@
#include <linux/regulator/consumer.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
-#include <linux/power_supply.h>
/* RT5033 regulator IDs */
enum rt5033_regulators {
@@ -32,28 +31,4 @@ struct rt5033_dev {
bool wakeup;
};
-struct rt5033_battery {
- struct i2c_client *client;
- struct rt5033_dev *rt5033;
- struct regmap *regmap;
- struct power_supply *psy;
-};
-
-/* RT5033 charger platform data */
-struct rt5033_charger_data {
- unsigned int pre_uamp;
- unsigned int pre_uvolt;
- unsigned int const_uvolt;
- unsigned int eoc_uamp;
- unsigned int fast_uamp;
-};
-
-struct rt5033_charger {
- struct device *dev;
- struct rt5033_dev *rt5033;
- struct power_supply psy;
-
- struct rt5033_charger_data *chg;
-};
-
#endif /* __RT5033_H__ */
diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h
new file mode 100644
index 000000000000..8421d49500bf
--- /dev/null
+++ b/include/linux/mfd/rz-mtu3.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+#ifndef __MFD_RZ_MTU3_H__
+#define __MFD_RZ_MTU3_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+/* 8-bit shared register offsets macros */
+#define RZ_MTU3_TSTRA 0x080 /* Timer start register A */
+#define RZ_MTU3_TSTRB 0x880 /* Timer start register B */
+
+/* 16-bit shared register offset macros */
+#define RZ_MTU3_TDDRA 0x016 /* Timer dead time data register A */
+#define RZ_MTU3_TDDRB 0x816 /* Timer dead time data register B */
+#define RZ_MTU3_TCDRA 0x014 /* Timer cycle data register A */
+#define RZ_MTU3_TCDRB 0x814 /* Timer cycle data register B */
+#define RZ_MTU3_TCBRA 0x022 /* Timer cycle buffer register A */
+#define RZ_MTU3_TCBRB 0x822 /* Timer cycle buffer register B */
+#define RZ_MTU3_TCNTSA 0x020 /* Timer subcounter A */
+#define RZ_MTU3_TCNTSB 0x820 /* Timer subcounter B */
+
+/*
+ * MTU5 contains 3 timer counter registers and is totaly different
+ * from other channels, so we must separate its offset
+ */
+
+/* 8-bit register offset macros of MTU3 channels except MTU5 */
+#define RZ_MTU3_TIER 0 /* Timer interrupt register */
+#define RZ_MTU3_NFCR 1 /* Noise filter control register */
+#define RZ_MTU3_TSR 2 /* Timer status register */
+#define RZ_MTU3_TCR 3 /* Timer control register */
+#define RZ_MTU3_TCR2 4 /* Timer control register 2 */
+
+/* Timer mode register 1 */
+#define RZ_MTU3_TMDR1 5
+#define RZ_MTU3_TMDR1_MD GENMASK(3, 0)
+#define RZ_MTU3_TMDR1_MD_NORMAL FIELD_PREP(RZ_MTU3_TMDR1_MD, 0)
+#define RZ_MTU3_TMDR1_MD_PWMMODE1 FIELD_PREP(RZ_MTU3_TMDR1_MD, 2)
+
+#define RZ_MTU3_TIOR 6 /* Timer I/O control register */
+#define RZ_MTU3_TIORH 6 /* Timer I/O control register H */
+#define RZ_MTU3_TIORL 7 /* Timer I/O control register L */
+/* Only MTU3/4/6/7 have TBTM registers */
+#define RZ_MTU3_TBTM 8 /* Timer buffer operation transfer mode register */
+
+/* 8-bit MTU5 register offset macros */
+#define RZ_MTU3_TSTR 2 /* MTU5 Timer start register */
+#define RZ_MTU3_TCNTCMPCLR 3 /* MTU5 Timer compare match clear register */
+#define RZ_MTU3_TCRU 4 /* Timer control register U */
+#define RZ_MTU3_TCR2U 5 /* Timer control register 2U */
+#define RZ_MTU3_TIORU 6 /* Timer I/O control register U */
+#define RZ_MTU3_TCRV 7 /* Timer control register V */
+#define RZ_MTU3_TCR2V 8 /* Timer control register 2V */
+#define RZ_MTU3_TIORV 9 /* Timer I/O control register V */
+#define RZ_MTU3_TCRW 10 /* Timer control register W */
+#define RZ_MTU3_TCR2W 11 /* Timer control register 2W */
+#define RZ_MTU3_TIORW 12 /* Timer I/O control register W */
+
+/* 16-bit register offset macros of MTU3 channels except MTU5 */
+#define RZ_MTU3_TCNT 0 /* Timer counter */
+#define RZ_MTU3_TGRA 1 /* Timer general register A */
+#define RZ_MTU3_TGRB 2 /* Timer general register B */
+#define RZ_MTU3_TGRC 3 /* Timer general register C */
+#define RZ_MTU3_TGRD 4 /* Timer general register D */
+#define RZ_MTU3_TGRE 5 /* Timer general register E */
+#define RZ_MTU3_TGRF 6 /* Timer general register F */
+/* Timer A/D converter start request registers */
+#define RZ_MTU3_TADCR 7 /* control register */
+#define RZ_MTU3_TADCORA 8 /* cycle set register A */
+#define RZ_MTU3_TADCORB 9 /* cycle set register B */
+#define RZ_MTU3_TADCOBRA 10 /* cycle set buffer register A */
+#define RZ_MTU3_TADCOBRB 11 /* cycle set buffer register B */
+
+/* 16-bit MTU5 register offset macros */
+#define RZ_MTU3_TCNTU 0 /* MTU5 Timer counter U */
+#define RZ_MTU3_TGRU 1 /* MTU5 Timer general register U */
+#define RZ_MTU3_TCNTV 2 /* MTU5 Timer counter V */
+#define RZ_MTU3_TGRV 3 /* MTU5 Timer general register V */
+#define RZ_MTU3_TCNTW 4 /* MTU5 Timer counter W */
+#define RZ_MTU3_TGRW 5 /* MTU5 Timer general register W */
+
+/* 32-bit register offset */
+#define RZ_MTU3_TCNTLW 0 /* Timer longword counter */
+#define RZ_MTU3_TGRALW 1 /* Timer longword general register A */
+#define RZ_MTU3_TGRBLW 2 /* Timer longowrd general register B */
+
+#define RZ_MTU3_TMDR3 0x191 /* MTU1 Timer Mode Register 3 */
+
+/* Macros for setting registers */
+#define RZ_MTU3_TCR_CCLR GENMASK(7, 5)
+#define RZ_MTU3_TCR_CKEG GENMASK(4, 3)
+#define RZ_MTU3_TCR_TPCS GENMASK(2, 0)
+#define RZ_MTU3_TCR_CCLR_TGRA BIT(5)
+#define RZ_MTU3_TCR_CCLR_TGRC FIELD_PREP(RZ_MTU3_TCR_CCLR, 5)
+#define RZ_MTU3_TCR_CKEG_RISING FIELD_PREP(RZ_MTU3_TCR_CKEG, 0)
+
+#define RZ_MTU3_TIOR_IOB GENMASK(7, 4)
+#define RZ_MTU3_TIOR_IOA GENMASK(3, 0)
+#define RZ_MTU3_TIOR_OC_RETAIN 0
+#define RZ_MTU3_TIOR_OC_INIT_OUT_LO_HI_OUT 2
+#define RZ_MTU3_TIOR_OC_INIT_OUT_HI_TOGGLE_OUT 7
+
+#define RZ_MTU3_TIOR_OC_IOA_H_COMP_MATCH \
+ FIELD_PREP(RZ_MTU3_TIOR_IOA, RZ_MTU3_TIOR_OC_INIT_OUT_LO_HI_OUT)
+#define RZ_MTU3_TIOR_OC_IOB_TOGGLE \
+ FIELD_PREP(RZ_MTU3_TIOR_IOB, RZ_MTU3_TIOR_OC_INIT_OUT_HI_TOGGLE_OUT)
+
+enum rz_mtu3_channels {
+ RZ_MTU3_CHAN_0,
+ RZ_MTU3_CHAN_1,
+ RZ_MTU3_CHAN_2,
+ RZ_MTU3_CHAN_3,
+ RZ_MTU3_CHAN_4,
+ RZ_MTU3_CHAN_5,
+ RZ_MTU3_CHAN_6,
+ RZ_MTU3_CHAN_7,
+ RZ_MTU3_CHAN_8,
+ RZ_MTU_NUM_CHANNELS
+};
+
+/**
+ * struct rz_mtu3_channel - MTU3 channel private data
+ *
+ * @dev: device handle
+ * @channel_number: channel number
+ * @lock: Lock to protect channel state
+ * @is_busy: channel state
+ */
+struct rz_mtu3_channel {
+ struct device *dev;
+ unsigned int channel_number;
+ struct mutex lock;
+ bool is_busy;
+};
+
+/**
+ * struct rz_mtu3 - MTU3 core private data
+ *
+ * @clk: MTU3 module clock
+ * @rz_mtu3_channel: HW channels
+ * @priv_data: MTU3 core driver private data
+ */
+struct rz_mtu3 {
+ struct clk *clk;
+ struct rz_mtu3_channel channels[RZ_MTU_NUM_CHANNELS];
+
+ void *priv_data;
+};
+
+static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
+{
+ mutex_lock(&ch->lock);
+ if (ch->is_busy) {
+ mutex_unlock(&ch->lock);
+ return false;
+ }
+
+ ch->is_busy = true;
+ mutex_unlock(&ch->lock);
+
+ return true;
+}
+
+static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
+{
+ mutex_lock(&ch->lock);
+ ch->is_busy = false;
+ mutex_unlock(&ch->lock);
+}
+
+bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch);
+void rz_mtu3_disable(struct rz_mtu3_channel *ch);
+int rz_mtu3_enable(struct rz_mtu3_channel *ch);
+
+u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off);
+
+void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val);
+void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
+void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val);
+void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
+void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off,
+ u16 pos, u8 val);
+
+#endif /* __MFD_RZ_MTU3_H__ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index f1631a39acfc..a212b9f72bc9 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -36,8 +36,6 @@
struct gpio_desc;
enum sec_device_type {
- S5M8751X,
- S5M8763X,
S5M8767X,
S2MPA01,
S2MPS11X,
@@ -67,11 +65,8 @@ struct sec_pmic_dev {
struct i2c_client *i2c;
unsigned long device_type;
- int irq_base;
int irq;
struct regmap_irq_chip_data *irq_data;
-
- bool wakeup;
};
int sec_irq_init(struct sec_pmic_dev *sec_pmic);
@@ -81,15 +76,8 @@ int sec_irq_resume(struct sec_pmic_dev *sec_pmic);
struct sec_platform_data {
struct sec_regulator_data *regulators;
struct sec_opmode_data *opmode;
- int device_type;
int num_regulators;
- int irq_base;
- int (*cfg_pmic_irq)(void);
-
- bool wakeup;
- bool buck_voltage_lock;
-
int buck_gpios[3];
int buck_ds[3];
unsigned int buck2_voltage[8];
@@ -99,35 +87,12 @@ struct sec_platform_data {
unsigned int buck4_voltage[8];
bool buck4_gpiodvs;
- int buck_set1;
- int buck_set2;
- int buck_set3;
- int buck2_enable;
- int buck3_enable;
- int buck4_enable;
int buck_default_idx;
- int buck2_default_idx;
- int buck3_default_idx;
- int buck4_default_idx;
-
int buck_ramp_delay;
- int buck2_ramp_delay;
- int buck34_ramp_delay;
- int buck5_ramp_delay;
- int buck16_ramp_delay;
- int buck7810_ramp_delay;
- int buck9_ramp_delay;
- int buck24_ramp_delay;
- int buck3_ramp_delay;
- int buck7_ramp_delay;
- int buck8910_ramp_delay;
-
- bool buck1_ramp_enable;
bool buck2_ramp_enable;
bool buck3_ramp_enable;
bool buck4_ramp_enable;
- bool buck6_ramp_enable;
int buck2_init;
int buck3_init;
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 6cfe4201a106..3fd2775eb9bb 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -194,54 +194,4 @@ enum s5m8767_irq {
#define S5M8767_IRQ_RTC1S_MASK (1 << 4)
#define S5M8767_IRQ_WTSR_MASK (1 << 5)
-enum s5m8763_irq {
- S5M8763_IRQ_DCINF,
- S5M8763_IRQ_DCINR,
- S5M8763_IRQ_JIGF,
- S5M8763_IRQ_JIGR,
- S5M8763_IRQ_PWRONF,
- S5M8763_IRQ_PWRONR,
-
- S5M8763_IRQ_WTSREVNT,
- S5M8763_IRQ_SMPLEVNT,
- S5M8763_IRQ_ALARM1,
- S5M8763_IRQ_ALARM0,
-
- S5M8763_IRQ_ONKEY1S,
- S5M8763_IRQ_TOPOFFR,
- S5M8763_IRQ_DCINOVPR,
- S5M8763_IRQ_CHGRSTF,
- S5M8763_IRQ_DONER,
- S5M8763_IRQ_CHGFAULT,
-
- S5M8763_IRQ_LOBAT1,
- S5M8763_IRQ_LOBAT2,
-
- S5M8763_IRQ_NR,
-};
-
-#define S5M8763_IRQ_DCINF_MASK (1 << 2)
-#define S5M8763_IRQ_DCINR_MASK (1 << 3)
-#define S5M8763_IRQ_JIGF_MASK (1 << 4)
-#define S5M8763_IRQ_JIGR_MASK (1 << 5)
-#define S5M8763_IRQ_PWRONF_MASK (1 << 6)
-#define S5M8763_IRQ_PWRONR_MASK (1 << 7)
-
-#define S5M8763_IRQ_WTSREVNT_MASK (1 << 0)
-#define S5M8763_IRQ_SMPLEVNT_MASK (1 << 1)
-#define S5M8763_IRQ_ALARM1_MASK (1 << 2)
-#define S5M8763_IRQ_ALARM0_MASK (1 << 3)
-
-#define S5M8763_IRQ_ONKEY1S_MASK (1 << 0)
-#define S5M8763_IRQ_TOPOFFR_MASK (1 << 2)
-#define S5M8763_IRQ_DCINOVPR_MASK (1 << 3)
-#define S5M8763_IRQ_CHGRSTF_MASK (1 << 4)
-#define S5M8763_IRQ_DONER_MASK (1 << 5)
-#define S5M8763_IRQ_CHGFAULT_MASK (1 << 7)
-
-#define S5M8763_IRQ_LOBAT1_MASK (1 << 0)
-#define S5M8763_IRQ_LOBAT2_MASK (1 << 1)
-
-#define S5M8763_ENRAMP (1 << 4)
-
#endif /* __LINUX_MFD_SEC_IRQ_H */
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
deleted file mode 100644
index c534f086ca16..000000000000
--- a/include/linux/mfd/samsung/s5m8763.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- */
-
-#ifndef __LINUX_MFD_S5M8763_H
-#define __LINUX_MFD_S5M8763_H
-
-/* S5M8763 registers */
-enum s5m8763_reg {
- S5M8763_REG_IRQ1,
- S5M8763_REG_IRQ2,
- S5M8763_REG_IRQ3,
- S5M8763_REG_IRQ4,
- S5M8763_REG_IRQM1,
- S5M8763_REG_IRQM2,
- S5M8763_REG_IRQM3,
- S5M8763_REG_IRQM4,
- S5M8763_REG_STATUS1,
- S5M8763_REG_STATUS2,
- S5M8763_REG_STATUSM1,
- S5M8763_REG_STATUSM2,
- S5M8763_REG_CHGR1,
- S5M8763_REG_CHGR2,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE1,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE2,
- S5M8763_REG_BUCK_ACTIVE_DISCHARGE3,
- S5M8763_REG_ONOFF1,
- S5M8763_REG_ONOFF2,
- S5M8763_REG_ONOFF3,
- S5M8763_REG_ONOFF4,
- S5M8763_REG_BUCK1_VOLTAGE1,
- S5M8763_REG_BUCK1_VOLTAGE2,
- S5M8763_REG_BUCK1_VOLTAGE3,
- S5M8763_REG_BUCK1_VOLTAGE4,
- S5M8763_REG_BUCK2_VOLTAGE1,
- S5M8763_REG_BUCK2_VOLTAGE2,
- S5M8763_REG_BUCK3,
- S5M8763_REG_BUCK4,
- S5M8763_REG_LDO1_LDO2,
- S5M8763_REG_LDO3,
- S5M8763_REG_LDO4,
- S5M8763_REG_LDO5,
- S5M8763_REG_LDO6,
- S5M8763_REG_LDO7,
- S5M8763_REG_LDO7_LDO8,
- S5M8763_REG_LDO9_LDO10,
- S5M8763_REG_LDO11,
- S5M8763_REG_LDO12,
- S5M8763_REG_LDO13,
- S5M8763_REG_LDO14,
- S5M8763_REG_LDO15,
- S5M8763_REG_LDO16,
- S5M8763_REG_BKCHR,
- S5M8763_REG_LBCNFG1,
- S5M8763_REG_LBCNFG2,
-};
-
-/* S5M8763 regulator ids */
-enum s5m8763_regulators {
- S5M8763_LDO1,
- S5M8763_LDO2,
- S5M8763_LDO3,
- S5M8763_LDO4,
- S5M8763_LDO5,
- S5M8763_LDO6,
- S5M8763_LDO7,
- S5M8763_LDO8,
- S5M8763_LDO9,
- S5M8763_LDO10,
- S5M8763_LDO11,
- S5M8763_LDO12,
- S5M8763_LDO13,
- S5M8763_LDO14,
- S5M8763_LDO15,
- S5M8763_LDO16,
- S5M8763_BUCK1,
- S5M8763_BUCK2,
- S5M8763_BUCK3,
- S5M8763_BUCK4,
- S5M8763_AP_EN32KHZ,
- S5M8763_CP_EN32KHZ,
- S5M8763_ENCHGVI,
- S5M8763_ESAFEUSB1,
- S5M8763_ESAFEUSB2,
-};
-
-#define S5M8763_ENRAMP (1 << 4)
-#endif /* __LINUX_MFD_S5M8763_H */
diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h
index 4708c2b8512a..dd95c37ca134 100644
--- a/include/linux/mfd/si476x-core.h
+++ b/include/linux/mfd/si476x-core.h
@@ -57,7 +57,7 @@ enum si476x_mfd_cells {
* @SI476X_POWER_DOWN: In this state all regulators are turned off
* and the reset line is pulled low. The device is completely
* inactive.
- * @SI476X_POWER_UP_FULL: In this state all the power regualtors are
+ * @SI476X_POWER_UP_FULL: In this state all the power regulators are
* turned on, reset line pulled high, IRQ line is enabled(polling is
* active for polling use scenario) and device is turned on with
* POWER_UP command. The device is ready to be used.
diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h
index 18363b773d07..cb99e16ca947 100644
--- a/include/linux/mfd/si476x-platform.h
+++ b/include/linux/mfd/si476x-platform.h
@@ -10,7 +10,7 @@
#ifndef __SI476X_PLATFORM_H__
#define __SI476X_PLATFORM_H__
-/* It is possible to select one of the four adresses using pins A0
+/* It is possible to select one of the four addresses using pins A0
* and A1 on SI476x */
#define SI476X_I2C_ADDR_1 0x60
#define SI476X_I2C_ADDR_2 0x61
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 90b20550c1c8..06d3f11dc3c9 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -45,6 +45,11 @@
#define STM32_LPTIM_PRESC GENMASK(11, 9)
#define STM32_LPTIM_CKPOL GENMASK(2, 1)
+/* STM32_LPTIM_CKPOL */
+#define STM32_LPTIM_CKPOL_RISING_EDGE 0
+#define STM32_LPTIM_CKPOL_FALLING_EDGE 1
+#define STM32_LPTIM_CKPOL_BOTH_EDGES 2
+
/* STM32_LPTIM_ARR */
#define STM32_LPTIM_MAX_ARR 0xFFFF
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index f8db83aedb2b..ca35af30745f 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -31,6 +31,7 @@
#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
#define TIM_DCR 0x48 /* DMA control register */
#define TIM_DMAR 0x4C /* DMA register for transfer */
+#define TIM_TISEL 0x68 /* Input Selection */
#define TIM_CR1_CEN BIT(0) /* Counter Enable */
#define TIM_CR1_DIR BIT(4) /* Counter Direction */
@@ -82,6 +83,10 @@
#define MAX_TIM_ICPSC 0x3
#define TIM_CR2_MMS_SHIFT 4
#define TIM_CR2_MMS2_SHIFT 20
+#define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */
+#define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */
#define TIM_SMCR_TS_SHIFT 4
#define TIM_BDTR_BKF_MASK 0xF
#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
@@ -97,6 +102,15 @@ enum stm32_timers_dmas {
STM32_TIMERS_MAX_DMAS,
};
+/* STM32 Timer may have either a unique global interrupt or 4 interrupt lines */
+enum stm32_timers_irqs {
+ STM32_TIMERS_IRQ_GLOBAL_BRK, /* global or brk IRQ */
+ STM32_TIMERS_IRQ_UP,
+ STM32_TIMERS_IRQ_TRG_COM,
+ STM32_TIMERS_IRQ_CC,
+ STM32_TIMERS_MAX_IRQS,
+};
+
/**
* struct stm32_timers_dma - STM32 timer DMA handling.
* @completion: end of DMA transfer completion
@@ -118,6 +132,8 @@ struct stm32_timers {
struct regmap *regmap;
u32 max_arr;
struct stm32_timers_dma dma; /* Only to be used by the parent */
+ unsigned int nr_irqs;
+ int irq[STM32_TIMERS_MAX_IRQS];
};
#if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS)
diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h
index 744dce63946e..967a2e486800 100644
--- a/include/linux/mfd/stmfx.h
+++ b/include/linux/mfd/stmfx.h
@@ -113,10 +113,8 @@ struct stmfx {
struct irq_domain *irq_domain;
struct mutex lock; /* IRQ bus lock */
u8 irq_src;
-#ifdef CONFIG_PM
u8 bkp_sysctrl;
u8 bkp_irqoutpin;
-#endif
};
int stmfx_function_enable(struct stmfx *stmfx, u32 func);
diff --git a/include/linux/mfd/stpmic1.h b/include/linux/mfd/stpmic1.h
index fa3f99f7e9a1..dc00bac24f5a 100644
--- a/include/linux/mfd/stpmic1.h
+++ b/include/linux/mfd/stpmic1.h
@@ -15,7 +15,7 @@
#define RREQ_STATE_SR 0x5
#define VERSION_SR 0x6
-#define SWOFF_PWRCTRL_CR 0x10
+#define MAIN_CR 0x10
#define PADS_PULL_CR 0x11
#define BUCKS_PD_CR 0x12
#define LDO14_PD_CR 0x13
@@ -148,14 +148,14 @@
#define LDO_BYPASS_MASK BIT(7)
/* Main PMIC Control Register
- * SWOFF_PWRCTRL_CR
+ * MAIN_CR
* Address : 0x10
*/
-#define ICC_EVENT_ENABLED BIT(4)
+#define OCP_OFF_DBG BIT(4)
#define PWRCTRL_POLARITY_HIGH BIT(3)
-#define PWRCTRL_PIN_VALID BIT(2)
-#define RESTART_REQUEST_ENABLED BIT(1)
-#define SOFTWARE_SWITCH_OFF_ENABLED BIT(0)
+#define PWRCTRL_ENABLE BIT(2)
+#define RESTART_REQUEST_ENABLE BIT(1)
+#define SOFTWARE_SWITCH_OFF BIT(0)
/* Main PMIC PADS Control Register
* PADS_PULL_CR
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h
index ea0ccf33a459..021f820f9d52 100644
--- a/include/linux/mfd/sun4i-gpadc.h
+++ b/include/linux/mfd/sun4i-gpadc.h
@@ -81,8 +81,8 @@
#define SUN4I_GPADC_TEMP_DATA 0x20
#define SUN4I_GPADC_DATA 0x24
-#define SUN4I_GPADC_IRQ_FIFO_DATA 0
-#define SUN4I_GPADC_IRQ_TEMP_DATA 1
+#define SUN4I_GPADC_IRQ_FIFO_DATA 1
+#define SUN4I_GPADC_IRQ_TEMP_DATA 2
/* 10s delay before suspending the IP */
#define SUN4I_GPADC_AUTOSUSPEND_DELAY 10000
diff --git a/include/linux/mfd/sy7636a.h b/include/linux/mfd/sy7636a.h
new file mode 100644
index 000000000000..22f03b2f851e
--- /dev/null
+++ b/include/linux/mfd/sy7636a.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Functions to access SY3686A power management chip.
+ *
+ * Copyright (C) 2021 reMarkable AS - http://www.remarkable.com/
+ */
+
+#ifndef __MFD_SY7636A_H
+#define __MFD_SY7636A_H
+
+#define SY7636A_REG_OPERATION_MODE_CRL 0x00
+/* It is set if a gpio is used to control the regulator */
+#define SY7636A_OPERATION_MODE_CRL_VCOMCTL BIT(6)
+#define SY7636A_OPERATION_MODE_CRL_ONOFF BIT(7)
+#define SY7636A_REG_VCOM_ADJUST_CTRL_L 0x01
+#define SY7636A_REG_VCOM_ADJUST_CTRL_H 0x02
+#define SY7636A_REG_VCOM_ADJUST_CTRL_MASK 0x01ff
+#define SY7636A_REG_VLDO_VOLTAGE_ADJULST_CTRL 0x03
+#define SY7636A_REG_POWER_ON_DELAY_TIME 0x06
+#define SY7636A_REG_FAULT_FLAG 0x07
+#define SY7636A_FAULT_FLAG_PG BIT(0)
+#define SY7636A_REG_TERMISTOR_READOUT 0x08
+
+#define SY7636A_REG_MAX 0x08
+
+#define VCOM_ADJUST_CTRL_MASK 0x1ff
+// Used to shift the high byte
+#define VCOM_ADJUST_CTRL_SHIFT 8
+// Used to scale from VCOM_ADJUST_CTRL to mv
+#define VCOM_ADJUST_CTRL_SCAL 10000
+
+#define FAULT_FLAG_SHIFT 1
+
+#endif /* __LINUX_MFD_SY7636A_H */
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 7f20e9b502a5..c315903f6dab 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -17,17 +17,17 @@
struct device_node;
#ifdef CONFIG_MFD_SYSCON
-extern struct regmap *device_node_to_regmap(struct device_node *np);
-extern struct regmap *syscon_node_to_regmap(struct device_node *np);
-extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
-extern struct regmap *syscon_regmap_lookup_by_phandle(
- struct device_node *np,
- const char *property);
-extern struct regmap *syscon_regmap_lookup_by_phandle_args(
- struct device_node *np,
- const char *property,
- int arg_count,
- unsigned int *out_args);
+struct regmap *device_node_to_regmap(struct device_node *np);
+struct regmap *syscon_node_to_regmap(struct device_node *np);
+struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
+struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property);
+struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args);
+struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np,
+ const char *property);
#else
static inline struct regmap *device_node_to_regmap(struct device_node *np)
{
@@ -59,6 +59,14 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle_args(
{
return ERR_PTR(-ENOTSUPP);
}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle_optional(
+ struct device_node *np,
+ const char *property)
+{
+ return NULL;
+}
+
#endif
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d4b5e527a7a3..09c6b3184bb0 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -451,8 +451,10 @@
#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
/* For imx6ul iomux gpr register field define */
-#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
-#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
+#define IMX6UL_GPR1_ENET2_TX_CLK_DIR BIT(18)
+#define IMX6UL_GPR1_ENET1_TX_CLK_DIR BIT(17)
+#define IMX6UL_GPR1_ENET2_CLK_SEL BIT(14)
+#define IMX6UL_GPR1_ENET1_CLK_SEL BIT(13)
#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
diff --git a/include/linux/mfd/syscon/xlnx-vcu.h b/include/linux/mfd/syscon/xlnx-vcu.h
new file mode 100644
index 000000000000..ff7bc3656f6e
--- /dev/null
+++ b/include/linux/mfd/syscon/xlnx-vcu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ */
+
+#ifndef __XLNX_VCU_H
+#define __XLNX_VCU_H
+
+#define VCU_ECODER_ENABLE 0x00
+#define VCU_DECODER_ENABLE 0x04
+#define VCU_MEMORY_DEPTH 0x08
+#define VCU_ENC_COLOR_DEPTH 0x0c
+#define VCU_ENC_VERTICAL_RANGE 0x10
+#define VCU_ENC_FRAME_SIZE_X 0x14
+#define VCU_ENC_FRAME_SIZE_Y 0x18
+#define VCU_ENC_COLOR_FORMAT 0x1c
+#define VCU_ENC_FPS 0x20
+#define VCU_MCU_CLK 0x24
+#define VCU_CORE_CLK 0x28
+#define VCU_PLL_BYPASS 0x2c
+#define VCU_ENC_CLK 0x30
+#define VCU_PLL_CLK 0x34
+#define VCU_ENC_VIDEO_STANDARD 0x38
+#define VCU_STATUS 0x3c
+#define VCU_AXI_ENC_CLK 0x40
+#define VCU_AXI_DEC_CLK 0x44
+#define VCU_AXI_MCU_CLK 0x48
+#define VCU_DEC_VIDEO_STANDARD 0x4c
+#define VCU_DEC_FRAME_SIZE_X 0x50
+#define VCU_DEC_FRAME_SIZE_Y 0x54
+#define VCU_DEC_FPS 0x58
+#define VCU_BUFFER_B_FRAME 0x5c
+#define VCU_WPP_EN 0x60
+#define VCU_PLL_CLK_DEC 0x64
+#define VCU_NUM_CORE 0x6c
+#define VCU_GASKET_INIT 0x74
+#define VCU_GASKET_VALUE 0x03
+
+#endif /* __XLNX_VCU_H */
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
deleted file mode 100644
index 69632c1b07bd..000000000000
--- a/include/linux/mfd/t7l66xb.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * This file contains the definitions for the T7L66XB
- *
- * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
- */
-#ifndef MFD_T7L66XB_H
-#define MFD_T7L66XB_H
-
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-
-struct t7l66xb_platform_data {
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-
- int irq_base; /* The base for subdevice irqs */
-
- struct tmio_nand_data *nand_data;
-};
-
-
-#define IRQ_T7L66XB_MMC (1)
-#define IRQ_T7L66XB_NAND (3)
-
-#define T7L66XB_NR_IRQS 8
-
-#endif
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index bb2b19599761..b84955410e03 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -19,6 +19,9 @@ enum tx3589x_block {
#define TC3589x_RSTCTRL_KBDRST (1 << 1)
#define TC3589x_RSTCTRL_GPIRST (1 << 0)
+#define TC3589x_DKBDMSK_ELINT (1 << 1)
+#define TC3589x_DKBDMSK_EINT (1 << 0)
+
/* Keyboard Configuration Registers */
#define TC3589x_KBDSETTLE_REG 0x01
#define TC3589x_KBDBOUNCE 0x02
@@ -101,6 +104,9 @@ enum tx3589x_block {
#define TC3589x_GPIOODM2 0xE4
#define TC3589x_GPIOODE2 0xE5
+#define TC3589x_DIRECT0 0xEC
+#define TC3589x_DKBDMSK 0xF3
+
#define TC3589x_INT_GPIIRQ 0
#define TC3589x_INT_TI0IRQ 1
#define TC3589x_INT_TI1IRQ 2
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
deleted file mode 100644
index b4888209494a..000000000000
--- a/include/linux/mfd/tc6387xb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This file contains the definitions for the TC6387XB
- *
- * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- */
-#ifndef MFD_TC6387XB_H
-#define MFD_TC6387XB_H
-
-struct tc6387xb_platform_data {
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-};
-
-#endif
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
deleted file mode 100644
index fcc8e74f0e8d..000000000000
--- a/include/linux/mfd/tc6393xb.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Toshiba TC6393XB SoC support
- *
- * Copyright(c) 2005-2006 Chris Humbert
- * Copyright(c) 2005 Dirk Opfer
- * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
- * Copyright(c) 2007 Dmitry Baryshkov
- *
- * Based on code written by Sharp/Lineo for 2.4 kernels
- * Based on locomo.c
- */
-
-#ifndef MFD_TC6393XB_H
-#define MFD_TC6393XB_H
-
-#include <linux/fb.h>
-
-/* Also one should provide the CK3P6MI clock */
-struct tc6393xb_platform_data {
- u16 scr_pll2cr; /* PLL2 Control */
- u16 scr_gper; /* GP Enable */
-
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-
- int irq_base; /* base for subdevice irqs */
- int gpio_base;
- int (*setup)(struct platform_device *dev);
- void (*teardown)(struct platform_device *dev);
-
- struct tmio_nand_data *nand_data;
- struct tmio_fb_data *fb_data;
-
- unsigned resume_restore : 1; /* make special actions
- to preserve the state
- on suspend/resume */
-};
-
-extern int tc6393xb_lcd_mode(struct platform_device *fb,
- const struct fb_videomode *mode);
-extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on);
-
-/*
- * Relative to irq_base
- */
-#define IRQ_TC6393_NAND 0
-#define IRQ_TC6393_MMC 1
-#define IRQ_TC6393_OHCI 2
-#define IRQ_TC6393_FB 4
-
-#define TC6393XB_NR_IRQS 8
-
-#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index ffc091b77633..4063b0614d90 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -1,22 +1,16 @@
-#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
-#define __LINUX_TI_AM335X_TSCADC_MFD_H
-
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Touch Screen / ADC MFD driver
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
+#define __LINUX_TI_AM335X_TSCADC_MFD_H
+
+#include <linux/bitfield.h>
#include <linux/mfd/core.h>
+#include <linux/units.h>
#define REG_RAWIRQSTATUS 0x024
#define REG_IRQSTATUS 0x028
@@ -46,13 +40,6 @@
/* IRQ wakeup enable */
#define IRQWKUP_ENB BIT(0)
-/* Step Enable */
-#define STEPENB_MASK (0x1FFFF << 0)
-#define STEPENB(val) ((val) << 0)
-#define ENB(val) (1 << (val))
-#define STPENB_STEPENB STEPENB(0x1FFFF)
-#define STPENB_STEPENB_TC STEPENB(0x1FFF)
-
/* IRQ enable */
#define IRQENB_HW_PEN BIT(0)
#define IRQENB_EOS BIT(1)
@@ -65,12 +52,10 @@
#define IRQENB_PENUP BIT(9)
/* Step Configuration */
-#define STEPCONFIG_MODE_MASK (3 << 0)
-#define STEPCONFIG_MODE(val) ((val) << 0)
+#define STEPCONFIG_MODE(val) FIELD_PREP(GENMASK(1, 0), (val))
#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1)
#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
-#define STEPCONFIG_AVG_MASK (7 << 2)
-#define STEPCONFIG_AVG(val) ((val) << 2)
+#define STEPCONFIG_AVG(val) FIELD_PREP(GENMASK(4, 2), (val))
#define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4)
#define STEPCONFIG_XPP BIT(5)
#define STEPCONFIG_XNN BIT(6)
@@ -78,70 +63,67 @@
#define STEPCONFIG_YNN BIT(8)
#define STEPCONFIG_XNP BIT(9)
#define STEPCONFIG_YPN BIT(10)
-#define STEPCONFIG_RFP(val) ((val) << 12)
-#define STEPCONFIG_RFP_VREFP (0x3 << 12)
-#define STEPCONFIG_INM_MASK (0xF << 15)
-#define STEPCONFIG_INM(val) ((val) << 15)
+#define STEPCONFIG_RFP(val) FIELD_PREP(GENMASK(13, 12), (val))
+#define STEPCONFIG_RFP_VREFP STEPCONFIG_RFP(3)
+#define STEPCONFIG_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
-#define STEPCONFIG_INP_MASK (0xF << 19)
-#define STEPCONFIG_INP(val) ((val) << 19)
+#define STEPCONFIG_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
#define STEPCONFIG_FIFO1 BIT(26)
-#define STEPCONFIG_RFM(val) ((val) << 23)
-#define STEPCONFIG_RFM_VREFN (0x3 << 23)
+#define STEPCONFIG_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
+#define STEPCONFIG_RFM_VREFN STEPCONFIG_RFM(3)
/* Delay register */
-#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
-#define STEPDELAY_OPEN(val) ((val) << 0)
+#define STEPDELAY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098)
-#define STEPDELAY_SAMPLE_MASK (0xFF << 24)
-#define STEPDELAY_SAMPLE(val) ((val) << 24)
+#define STEPCONFIG_MAX_OPENDLY GENMASK(17, 0)
+#define STEPDELAY_SAMPLE(val) FIELD_PREP(GENMASK(31, 24), (val))
#define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0)
+#define STEPCONFIG_MAX_SAMPLE GENMASK(7, 0)
/* Charge Config */
-#define STEPCHARGE_RFP_MASK (7 << 12)
-#define STEPCHARGE_RFP(val) ((val) << 12)
+#define STEPCHARGE_RFP(val) FIELD_PREP(GENMASK(14, 12), (val))
#define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1)
-#define STEPCHARGE_INM_MASK (0xF << 15)
-#define STEPCHARGE_INM(val) ((val) << 15)
+#define STEPCHARGE_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1)
-#define STEPCHARGE_INP_MASK (0xF << 19)
-#define STEPCHARGE_INP(val) ((val) << 19)
-#define STEPCHARGE_RFM_MASK (3 << 23)
-#define STEPCHARGE_RFM(val) ((val) << 23)
+#define STEPCHARGE_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
+#define STEPCHARGE_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
#define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1)
/* Charge delay */
-#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
-#define CHARGEDLY_OPEN(val) ((val) << 0)
+#define CHARGEDLY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
/* Control register */
-#define CNTRLREG_TSCSSENB BIT(0)
+#define CNTRLREG_SSENB BIT(0)
#define CNTRLREG_STEPID BIT(1)
-#define CNTRLREG_STEPCONFIGWRT BIT(2)
+#define CNTRLREG_TSC_STEPCONFIGWRT BIT(2)
#define CNTRLREG_POWERDOWN BIT(4)
-#define CNTRLREG_AFE_CTRL_MASK (3 << 5)
-#define CNTRLREG_AFE_CTRL(val) ((val) << 5)
-#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1)
-#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2)
-#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
-#define CNTRLREG_TSCENB BIT(7)
+#define CNTRLREG_TSC_AFE_CTRL(val) FIELD_PREP(GENMASK(6, 5), (val))
+#define CNTRLREG_TSC_4WIRE CNTRLREG_TSC_AFE_CTRL(1)
+#define CNTRLREG_TSC_5WIRE CNTRLREG_TSC_AFE_CTRL(2)
+#define CNTRLREG_TSC_ENB BIT(7)
+
+/*Control registers bitfields for MAGADC IP */
+#define CNTRLREG_MAGADCENB BIT(0)
+#define CNTRLREG_MAG_PREAMP_PWRDOWN BIT(5)
+#define CNTRLREG_MAG_PREAMP_BYPASS BIT(6)
/* FIFO READ Register */
-#define FIFOREAD_DATA_MASK (0xfff << 0)
-#define FIFOREAD_CHNLID_MASK (0xf << 16)
+#define FIFOREAD_DATA_MASK GENMASK(11, 0)
+#define FIFOREAD_CHNLID_MASK GENMASK(19, 16)
/* DMA ENABLE/CLEAR Register */
#define DMA_FIFO0 BIT(0)
#define DMA_FIFO1 BIT(1)
/* Sequencer Status */
-#define SEQ_STATUS BIT(5)
+#define SEQ_STATUS BIT(5)
#define CHARGE_STEP 0x11
-#define ADC_CLK 3000000
+#define TSC_ADC_CLK (3 * HZ_PER_MHZ)
+#define MAG_ADC_CLK (13 * HZ_PER_MHZ)
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
#define FIFO1_THRESHOLD 19
@@ -158,21 +140,27 @@
*
* max processing time: 266431 * 308ns = 83ms(approx)
*/
-#define IDLE_TIMEOUT 83 /* milliseconds */
+#define IDLE_TIMEOUT_MS 83 /* milliseconds */
#define TSCADC_CELLS 2
+struct ti_tscadc_data {
+ char *adc_feature_name;
+ char *adc_feature_compatible;
+ char *secondary_feature_name;
+ char *secondary_feature_compatible;
+ unsigned int target_clk_rate;
+};
+
struct ti_tscadc_dev {
struct device *dev;
struct regmap *regmap;
void __iomem *tscadc_base;
phys_addr_t tscadc_phys_base;
+ const struct ti_tscadc_data *data;
int irq;
- int used_cells; /* 1-2 */
- int tsc_wires;
- int tsc_cell; /* -1 if not used */
- int adc_cell; /* -1 if not used */
struct mfd_cell cells[TSCADC_CELLS];
+ u32 ctrl;
u32 reg_se_cache;
bool adc_waiting;
bool adc_in_use;
@@ -194,6 +182,12 @@ static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p)
return *tscadc_dev;
}
+static inline bool ti_adc_with_touchscreen(struct ti_tscadc_dev *tscadc)
+{
+ return of_device_is_compatible(tscadc->dev->of_node,
+ "ti,am3359-tscadc");
+}
+
void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val);
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 8ba042430d8e..eace8ea6cda0 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -55,7 +55,12 @@
*/
#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
-/* BIT(5) is unused */
+/*
+ * Use the busy timeout feature. Probably all TMIO versions support it. Yet,
+ * we don't have documentation for old variants, so we enable only known good
+ * variants with this flag. Can be removed once all variants are known good.
+ */
+#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5)
/*
* Some controllers have CMD12 automatically
@@ -79,11 +84,6 @@
/* Some controllers have a CBSY bit */
#define TMIO_MMC_HAVE_CBSY BIT(11)
-int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
-int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
-void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
-void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
-
struct dma_chan;
/*
@@ -97,7 +97,6 @@ struct tmio_mmc_data {
unsigned long capabilities2;
unsigned long flags;
u32 ocr_mask; /* available voltages */
- int alignment_shift;
dma_addr_t dma_rx_offset;
unsigned int max_blk_count;
unsigned short max_segs;
diff --git a/include/linux/mfd/tps65010.h b/include/linux/mfd/tps65010.h
index a1fb9bc5311d..5edf1aef1118 100644
--- a/include/linux/mfd/tps65010.h
+++ b/include/linux/mfd/tps65010.h
@@ -28,6 +28,8 @@
#ifndef __LINUX_I2C_TPS65010_H
#define __LINUX_I2C_TPS65010_H
+struct gpio_chip;
+
/*
* ----------------------------------------------------------------------------
* Registers, all 8 bits
@@ -176,12 +178,10 @@ struct i2c_client;
/**
* struct tps65010_board - packages GPIO and LED lines
- * @base: the GPIO number to assign to GPIO-1
* @outmask: bit (N-1) is set to allow GPIO-N to be used as an
* (open drain) output
* @setup: optional callback issued once the GPIOs are valid
* @teardown: optional callback issued before the GPIOs are invalidated
- * @context: optional parameter passed to setup() and teardown()
*
* Board data may be used to package the GPIO (and LED) lines for use
* in by the generic GPIO and LED frameworks. The first four GPIOs
@@ -193,12 +193,9 @@ struct i2c_client;
* devices in their initial states using these GPIOs.
*/
struct tps65010_board {
- int base;
unsigned outmask;
-
- int (*setup)(struct i2c_client *client, void *context);
- int (*teardown)(struct i2c_client *client, void *context);
- void *context;
+ int (*setup)(struct i2c_client *client, struct gpio_chip *gc);
+ void (*teardown)(struct i2c_client *client, struct gpio_chip *gc);
};
#endif /* __LINUX_I2C_TPS65010_H */
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
index e0a417e53766..9185b5cd8371 100644
--- a/include/linux/mfd/tps65086.h
+++ b/include/linux/mfd/tps65086.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65912 driver
*/
@@ -21,8 +13,9 @@
#include <linux/regmap.h>
/* List of registers for TPS65086 */
-#define TPS65086_DEVICEID 0x01
-#define TPS65086_IRQ 0x02
+#define TPS65086_DEVICEID1 0x00
+#define TPS65086_DEVICEID2 0x01
+#define TPS65086_IRQ 0x02
#define TPS65086_IRQ_MASK 0x03
#define TPS65086_PMICSTAT 0x04
#define TPS65086_SHUTDNSRC 0x05
@@ -83,10 +76,16 @@
#define TPS65086_IRQ_SHUTDN_MASK BIT(3)
#define TPS65086_IRQ_FAULT_MASK BIT(7)
-/* DEVICEID Register field definitions */
-#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0)
-#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4)
-#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6)
+/* DEVICEID1 Register field definitions */
+#define TPS6508640_ID 0x00
+#define TPS65086401_ID 0x01
+#define TPS6508641_ID 0x10
+#define TPS65086470_ID 0x70
+
+/* DEVICEID2 Register field definitions */
+#define TPS65086_DEVICEID2_PART_MASK GENMASK(3, 0)
+#define TPS65086_DEVICEID2_OTP_MASK GENMASK(5, 4)
+#define TPS65086_DEVICEID2_REV_MASK GENMASK(7, 6)
/* VID Masks */
#define BUCK_VID_MASK GENMASK(7, 1)
@@ -100,6 +99,8 @@ enum tps65086_irqs {
TPS65086_IRQ_FAULT,
};
+struct tps65086_regulator_config;
+
/**
* struct tps65086 - state holder for the tps65086 driver
*
@@ -108,6 +109,8 @@ enum tps65086_irqs {
struct tps65086 {
struct device *dev;
struct regmap *regmap;
+ unsigned int chip_id;
+ const struct tps65086_regulator_config *reg_config;
/* IRQ Data */
int irq;
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index db7091824ed0..877d9c41c53d 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65217.h
*
* Functions to access TPS65217 power management chip.
*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_TPS65217_H
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index f4ca367e3473..2946be2f15f3 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65218.h
*
- * Functions to access TPS65219 power management chip.
+ * Functions to access TPS65218 power management chip.
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
*/
#ifndef __LINUX_MFD_TPS65218_H
diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h
new file mode 100644
index 000000000000..e6826e34e2a6
--- /dev/null
+++ b/include/linux/mfd/tps65219.h
@@ -0,0 +1,345 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions to access TPS65219 Power Management IC.
+ *
+ * Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#ifndef MFD_TPS65219_H
+#define MFD_TPS65219_H
+
+#include <linux/bitops.h>
+#include <linux/notifier.h>
+#include <linux/regulator/driver.h>
+
+struct regmap;
+struct regmap_irq_chip_data;
+
+#define TPS65219_1V35 1350000
+#define TPS65219_1V8 1800000
+
+/* TPS chip id list */
+#define TPS65219 0xF0
+
+/* I2C ID for TPS65219 part */
+#define TPS65219_I2C_ID 0x24
+
+/* All register addresses */
+#define TPS65219_REG_TI_DEV_ID 0x00
+#define TPS65219_REG_NVM_ID 0x01
+#define TPS65219_REG_ENABLE_CTRL 0x02
+#define TPS65219_REG_BUCKS_CONFIG 0x03
+#define TPS65219_REG_LDO4_VOUT 0x04
+#define TPS65219_REG_LDO3_VOUT 0x05
+#define TPS65219_REG_LDO2_VOUT 0x06
+#define TPS65219_REG_LDO1_VOUT 0x07
+#define TPS65219_REG_BUCK3_VOUT 0x8
+#define TPS65219_REG_BUCK2_VOUT 0x9
+#define TPS65219_REG_BUCK1_VOUT 0xA
+#define TPS65219_REG_LDO4_SEQUENCE_SLOT 0xB
+#define TPS65219_REG_LDO3_SEQUENCE_SLOT 0xC
+#define TPS65219_REG_LDO2_SEQUENCE_SLOT 0xD
+#define TPS65219_REG_LDO1_SEQUENCE_SLOT 0xE
+#define TPS65219_REG_BUCK3_SEQUENCE_SLOT 0xF
+#define TPS65219_REG_BUCK2_SEQUENCE_SLOT 0x10
+#define TPS65219_REG_BUCK1_SEQUENCE_SLOT 0x11
+#define TPS65219_REG_nRST_SEQUENCE_SLOT 0x12
+#define TPS65219_REG_GPIO_SEQUENCE_SLOT 0x13
+#define TPS65219_REG_GPO2_SEQUENCE_SLOT 0x14
+#define TPS65219_REG_GPO1_SEQUENCE_SLOT 0x15
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_1 0x16
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_2 0x17
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_3 0x18
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_4 0x19
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_1 0x1A
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_2 0x1B
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_3 0x1C
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_4 0x1D
+#define TPS65219_REG_GENERAL_CONFIG 0x1E
+#define TPS65219_REG_MFP_1_CONFIG 0x1F
+#define TPS65219_REG_MFP_2_CONFIG 0x20
+#define TPS65219_REG_STBY_1_CONFIG 0x21
+#define TPS65219_REG_STBY_2_CONFIG 0x22
+#define TPS65219_REG_OC_DEGL_CONFIG 0x23
+/* 'sub irq' MASK registers */
+#define TPS65219_REG_INT_MASK_UV 0x24
+#define TPS65219_REG_MASK_CONFIG 0x25
+
+#define TPS65219_REG_I2C_ADDRESS_REG 0x26
+#define TPS65219_REG_USER_GENERAL_NVM_STORAGE 0x27
+#define TPS65219_REG_MANUFACTURING_VER 0x28
+#define TPS65219_REG_MFP_CTRL 0x29
+#define TPS65219_REG_DISCHARGE_CONFIG 0x2A
+/* main irq registers */
+#define TPS65219_REG_INT_SOURCE 0x2B
+/* 'sub irq' registers */
+#define TPS65219_REG_INT_LDO_3_4 0x2C
+#define TPS65219_REG_INT_LDO_1_2 0x2D
+#define TPS65219_REG_INT_BUCK_3 0x2E
+#define TPS65219_REG_INT_BUCK_1_2 0x2F
+#define TPS65219_REG_INT_SYSTEM 0x30
+#define TPS65219_REG_INT_RV 0x31
+#define TPS65219_REG_INT_TIMEOUT_RV_SD 0x32
+#define TPS65219_REG_INT_PB 0x33
+
+#define TPS65219_REG_INT_LDO_3_4_POS 0
+#define TPS65219_REG_INT_LDO_1_2_POS 1
+#define TPS65219_REG_INT_BUCK_3_POS 2
+#define TPS65219_REG_INT_BUCK_1_2_POS 3
+#define TPS65219_REG_INT_SYS_POS 4
+#define TPS65219_REG_INT_RV_POS 5
+#define TPS65219_REG_INT_TO_RV_POS 6
+#define TPS65219_REG_INT_PB_POS 7
+
+#define TPS65219_REG_USER_NVM_CMD 0x34
+#define TPS65219_REG_POWER_UP_STATUS 0x35
+#define TPS65219_REG_SPARE_2 0x36
+#define TPS65219_REG_SPARE_3 0x37
+#define TPS65219_REG_FACTORY_CONFIG_2 0x41
+
+/* Register field definitions */
+#define TPS65219_DEVID_REV_MASK GENMASK(7, 0)
+#define TPS65219_BUCKS_LDOS_VOUT_VSET_MASK GENMASK(5, 0)
+#define TPS65219_BUCKS_UV_THR_SEL_MASK BIT(6)
+#define TPS65219_BUCKS_BW_SEL_MASK BIT(7)
+#define LDO_BYP_SHIFT 6
+#define TPS65219_LDOS_BYP_CONFIG_MASK BIT(LDO_BYP_SHIFT)
+#define TPS65219_LDOS_LSW_CONFIG_MASK BIT(7)
+/* Regulators enable control */
+#define TPS65219_ENABLE_BUCK1_EN_MASK BIT(0)
+#define TPS65219_ENABLE_BUCK2_EN_MASK BIT(1)
+#define TPS65219_ENABLE_BUCK3_EN_MASK BIT(2)
+#define TPS65219_ENABLE_LDO1_EN_MASK BIT(3)
+#define TPS65219_ENABLE_LDO2_EN_MASK BIT(4)
+#define TPS65219_ENABLE_LDO3_EN_MASK BIT(5)
+#define TPS65219_ENABLE_LDO4_EN_MASK BIT(6)
+/* power ON-OFF sequence slot */
+#define TPS65219_BUCKS_LDOS_SEQUENCE_OFF_SLOT_MASK GENMASK(3, 0)
+#define TPS65219_BUCKS_LDOS_SEQUENCE_ON_SLOT_MASK GENMASK(7, 4)
+/* TODO: Not needed, same mapping as TPS65219_ENABLE_REGNAME_EN, factorize */
+#define TPS65219_STBY1_BUCK1_STBY_EN_MASK BIT(0)
+#define TPS65219_STBY1_BUCK2_STBY_EN_MASK BIT(1)
+#define TPS65219_STBY1_BUCK3_STBY_EN_MASK BIT(2)
+#define TPS65219_STBY1_LDO1_STBY_EN_MASK BIT(3)
+#define TPS65219_STBY1_LDO2_STBY_EN_MASK BIT(4)
+#define TPS65219_STBY1_LDO3_STBY_EN_MASK BIT(5)
+#define TPS65219_STBY1_LDO4_STBY_EN_MASK BIT(6)
+/* STBY_2 config */
+#define TPS65219_STBY2_GPO1_STBY_EN_MASK BIT(0)
+#define TPS65219_STBY2_GPO2_STBY_EN_MASK BIT(1)
+#define TPS65219_STBY2_GPIO_STBY_EN_MASK BIT(2)
+/* MFP Control */
+#define TPS65219_MFP_I2C_OFF_REQ_MASK BIT(0)
+#define TPS65219_MFP_STBY_I2C_CTRL_MASK BIT(1)
+#define TPS65219_MFP_COLD_RESET_I2C_CTRL_MASK BIT(2)
+#define TPS65219_MFP_WARM_RESET_I2C_CTRL_MASK BIT(3)
+#define TPS65219_MFP_GPIO_STATUS_MASK BIT(4)
+/* MFP_1 Config */
+#define TPS65219_MFP_1_VSEL_DDR_SEL_MASK BIT(0)
+#define TPS65219_MFP_1_VSEL_SD_POL_MASK BIT(1)
+#define TPS65219_MFP_1_VSEL_RAIL_MASK BIT(2)
+/* MFP_2 Config */
+#define TPS65219_MFP_2_MODE_STBY_MASK GENMASK(1, 0)
+#define TPS65219_MFP_2_MODE_RESET_MASK BIT(2)
+#define TPS65219_MFP_2_EN_PB_VSENSE_DEGL_MASK BIT(3)
+#define TPS65219_MFP_2_EN_PB_VSENSE_MASK GENMASK(5, 4)
+#define TPS65219_MFP_2_WARM_COLD_RESET_MASK BIT(6)
+#define TPS65219_MFP_2_PU_ON_FSD_MASK BIT(7)
+#define TPS65219_MFP_2_EN 0
+#define TPS65219_MFP_2_PB BIT(4)
+#define TPS65219_MFP_2_VSENSE BIT(5)
+/* MASK_UV Config */
+#define TPS65219_REG_MASK_UV_LDO1_UV_MASK BIT(0)
+#define TPS65219_REG_MASK_UV_LDO2_UV_MASK BIT(1)
+#define TPS65219_REG_MASK_UV_LDO3_UV_MASK BIT(2)
+#define TPS65219_REG_MASK_UV_LDO4_UV_MASK BIT(3)
+#define TPS65219_REG_MASK_UV_BUCK1_UV_MASK BIT(4)
+#define TPS65219_REG_MASK_UV_BUCK2_UV_MASK BIT(5)
+#define TPS65219_REG_MASK_UV_BUCK3_UV_MASK BIT(6)
+#define TPS65219_REG_MASK_UV_RETRY_MASK BIT(7)
+/* MASK Config */
+// SENSOR_N_WARM_MASK already defined in Thermal
+#define TPS65219_REG_MASK_INT_FOR_RV_MASK BIT(4)
+#define TPS65219_REG_MASK_EFFECT_MASK GENMASK(2, 1)
+#define TPS65219_REG_MASK_INT_FOR_PB_MASK BIT(7)
+/* UnderVoltage - Short to GND - OverCurrent*/
+/* LDO3-4 */
+#define TPS65219_INT_LDO3_SCG_MASK BIT(0)
+#define TPS65219_INT_LDO3_OC_MASK BIT(1)
+#define TPS65219_INT_LDO3_UV_MASK BIT(2)
+#define TPS65219_INT_LDO4_SCG_MASK BIT(3)
+#define TPS65219_INT_LDO4_OC_MASK BIT(4)
+#define TPS65219_INT_LDO4_UV_MASK BIT(5)
+/* LDO1-2 */
+#define TPS65219_INT_LDO1_SCG_MASK BIT(0)
+#define TPS65219_INT_LDO1_OC_MASK BIT(1)
+#define TPS65219_INT_LDO1_UV_MASK BIT(2)
+#define TPS65219_INT_LDO2_SCG_MASK BIT(3)
+#define TPS65219_INT_LDO2_OC_MASK BIT(4)
+#define TPS65219_INT_LDO2_UV_MASK BIT(5)
+/* BUCK3 */
+#define TPS65219_INT_BUCK3_SCG_MASK BIT(0)
+#define TPS65219_INT_BUCK3_OC_MASK BIT(1)
+#define TPS65219_INT_BUCK3_NEG_OC_MASK BIT(2)
+#define TPS65219_INT_BUCK3_UV_MASK BIT(3)
+/* BUCK1-2 */
+#define TPS65219_INT_BUCK1_SCG_MASK BIT(0)
+#define TPS65219_INT_BUCK1_OC_MASK BIT(1)
+#define TPS65219_INT_BUCK1_NEG_OC_MASK BIT(2)
+#define TPS65219_INT_BUCK1_UV_MASK BIT(3)
+#define TPS65219_INT_BUCK2_SCG_MASK BIT(4)
+#define TPS65219_INT_BUCK2_OC_MASK BIT(5)
+#define TPS65219_INT_BUCK2_NEG_OC_MASK BIT(6)
+#define TPS65219_INT_BUCK2_UV_MASK BIT(7)
+/* Thermal Sensor */
+#define TPS65219_INT_SENSOR_3_WARM_MASK BIT(0)
+#define TPS65219_INT_SENSOR_2_WARM_MASK BIT(1)
+#define TPS65219_INT_SENSOR_1_WARM_MASK BIT(2)
+#define TPS65219_INT_SENSOR_0_WARM_MASK BIT(3)
+#define TPS65219_INT_SENSOR_3_HOT_MASK BIT(4)
+#define TPS65219_INT_SENSOR_2_HOT_MASK BIT(5)
+#define TPS65219_INT_SENSOR_1_HOT_MASK BIT(6)
+#define TPS65219_INT_SENSOR_0_HOT_MASK BIT(7)
+/* Residual Voltage */
+#define TPS65219_INT_BUCK1_RV_MASK BIT(0)
+#define TPS65219_INT_BUCK2_RV_MASK BIT(1)
+#define TPS65219_INT_BUCK3_RV_MASK BIT(2)
+#define TPS65219_INT_LDO1_RV_MASK BIT(3)
+#define TPS65219_INT_LDO2_RV_MASK BIT(4)
+#define TPS65219_INT_LDO3_RV_MASK BIT(5)
+#define TPS65219_INT_LDO4_RV_MASK BIT(6)
+/* Residual Voltage ShutDown */
+#define TPS65219_INT_BUCK1_RV_SD_MASK BIT(0)
+#define TPS65219_INT_BUCK2_RV_SD_MASK BIT(1)
+#define TPS65219_INT_BUCK3_RV_SD_MASK BIT(2)
+#define TPS65219_INT_LDO1_RV_SD_MASK BIT(3)
+#define TPS65219_INT_LDO2_RV_SD_MASK BIT(4)
+#define TPS65219_INT_LDO3_RV_SD_MASK BIT(5)
+#define TPS65219_INT_LDO4_RV_SD_MASK BIT(6)
+#define TPS65219_INT_TIMEOUT_MASK BIT(7)
+/* Power Button */
+#define TPS65219_INT_PB_FALLING_EDGE_DETECT_MASK BIT(0)
+#define TPS65219_INT_PB_RISING_EDGE_DETECT_MASK BIT(1)
+#define TPS65219_INT_PB_REAL_TIME_STATUS_MASK BIT(2)
+
+#define TPS65219_PB_POS 7
+#define TPS65219_TO_RV_POS 6
+#define TPS65219_RV_POS 5
+#define TPS65219_SYS_POS 4
+#define TPS65219_BUCK_1_2_POS 3
+#define TPS65219_BUCK_3_POS 2
+#define TPS65219_LDO_1_2_POS 1
+#define TPS65219_LDO_3_4_POS 0
+
+/* IRQs */
+enum {
+ /* LDO3-4 register IRQs */
+ TPS65219_INT_LDO3_SCG,
+ TPS65219_INT_LDO3_OC,
+ TPS65219_INT_LDO3_UV,
+ TPS65219_INT_LDO4_SCG,
+ TPS65219_INT_LDO4_OC,
+ TPS65219_INT_LDO4_UV,
+ /* LDO1-2 */
+ TPS65219_INT_LDO1_SCG,
+ TPS65219_INT_LDO1_OC,
+ TPS65219_INT_LDO1_UV,
+ TPS65219_INT_LDO2_SCG,
+ TPS65219_INT_LDO2_OC,
+ TPS65219_INT_LDO2_UV,
+ /* BUCK3 */
+ TPS65219_INT_BUCK3_SCG,
+ TPS65219_INT_BUCK3_OC,
+ TPS65219_INT_BUCK3_NEG_OC,
+ TPS65219_INT_BUCK3_UV,
+ /* BUCK1-2 */
+ TPS65219_INT_BUCK1_SCG,
+ TPS65219_INT_BUCK1_OC,
+ TPS65219_INT_BUCK1_NEG_OC,
+ TPS65219_INT_BUCK1_UV,
+ TPS65219_INT_BUCK2_SCG,
+ TPS65219_INT_BUCK2_OC,
+ TPS65219_INT_BUCK2_NEG_OC,
+ TPS65219_INT_BUCK2_UV,
+ /* Thermal Sensor */
+ TPS65219_INT_SENSOR_3_WARM,
+ TPS65219_INT_SENSOR_2_WARM,
+ TPS65219_INT_SENSOR_1_WARM,
+ TPS65219_INT_SENSOR_0_WARM,
+ TPS65219_INT_SENSOR_3_HOT,
+ TPS65219_INT_SENSOR_2_HOT,
+ TPS65219_INT_SENSOR_1_HOT,
+ TPS65219_INT_SENSOR_0_HOT,
+ /* Residual Voltage */
+ TPS65219_INT_BUCK1_RV,
+ TPS65219_INT_BUCK2_RV,
+ TPS65219_INT_BUCK3_RV,
+ TPS65219_INT_LDO1_RV,
+ TPS65219_INT_LDO2_RV,
+ TPS65219_INT_LDO3_RV,
+ TPS65219_INT_LDO4_RV,
+ /* Residual Voltage ShutDown */
+ TPS65219_INT_BUCK1_RV_SD,
+ TPS65219_INT_BUCK2_RV_SD,
+ TPS65219_INT_BUCK3_RV_SD,
+ TPS65219_INT_LDO1_RV_SD,
+ TPS65219_INT_LDO2_RV_SD,
+ TPS65219_INT_LDO3_RV_SD,
+ TPS65219_INT_LDO4_RV_SD,
+ TPS65219_INT_TIMEOUT,
+ /* Power Button */
+ TPS65219_INT_PB_FALLING_EDGE_DETECT,
+ TPS65219_INT_PB_RISING_EDGE_DETECT,
+};
+
+enum tps65219_regulator_id {
+ /* DCDC's */
+ TPS65219_BUCK_1,
+ TPS65219_BUCK_2,
+ TPS65219_BUCK_3,
+ /* LDOs */
+ TPS65219_LDO_1,
+ TPS65219_LDO_2,
+ TPS65219_LDO_3,
+ TPS65219_LDO_4,
+};
+
+/* Number of step-down converters available */
+#define TPS65219_NUM_DCDC 3
+/* Number of LDO voltage regulators available */
+#define TPS65219_NUM_LDO 4
+/* Number of total regulators available */
+#define TPS65219_NUM_REGULATOR (TPS65219_NUM_DCDC + TPS65219_NUM_LDO)
+
+/* Define the TPS65219 IRQ numbers */
+enum tps65219_irqs {
+ /* INT source registers */
+ TPS65219_TO_RV_SD_SET_IRQ,
+ TPS65219_RV_SET_IRQ,
+ TPS65219_SYS_SET_IRQ,
+ TPS65219_BUCK_1_2_SET_IRQ,
+ TPS65219_BUCK_3_SET_IRQ,
+ TPS65219_LDO_1_2_SET_IRQ,
+ TPS65219_LDO_3_4_SET_IRQ,
+ TPS65219_PB_SET_IRQ,
+};
+
+/**
+ * struct tps65219 - tps65219 sub-driver chip access routines
+ *
+ * Device data may be used to access the TPS65219 chip
+ *
+ * @dev: MFD device
+ * @regmap: Regmap for accessing the device registers
+ * @irq_data: Regmap irq data used for the irq chip
+ * @nb: notifier block for the restart handler
+ */
+struct tps65219 {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct notifier_block nb;
+};
+
+#endif /* MFD_TPS65219_H */
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index ce4b9e743f7c..f67ef0a4e041 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -749,7 +749,7 @@
#define VDDCTRL_ST_SHIFT 0
-/*Register VDDCTRL_OP (0x28) bit definitios */
+/*Register VDDCTRL_OP (0x28) bit definitions */
#define VDDCTRL_OP_CMD_MASK 0x80
#define VDDCTRL_OP_CMD_SHIFT 7
#define VDDCTRL_OP_SEL_MASK 0x7F
@@ -890,11 +890,6 @@ struct tps65910 {
struct regmap *regmap;
unsigned long id;
- /* Client devices */
- struct tps65910_pmic *pmic;
- struct tps65910_rtc *rtc;
- struct tps65910_power *power;
-
/* Device node parsed board data */
struct tps65910_board *of_plat_data;
@@ -913,39 +908,4 @@ static inline int tps65910_chip_id(struct tps65910 *tps65910)
return tps65910->id;
}
-static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
- unsigned int *val)
-{
- return regmap_read(tps65910->regmap, reg, val);
-}
-
-static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
- unsigned int val)
-{
- return regmap_write(tps65910->regmap, reg, val);
-}
-
-static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, mask);
-}
-
-static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, 0);
-}
-
-static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask, u8 val)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, val);
-}
-
-static inline int tps65910_irq_get_virq(struct tps65910 *tps65910, int irq)
-{
- return regmap_irq_get_virq(tps65910->irq_data, irq);
-}
-
#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index 7943e413deae..860ec0a16c96 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
@@ -322,6 +314,6 @@ struct tps65912 {
extern const struct regmap_config tps65912_regmap_config;
int tps65912_device_init(struct tps65912 *tps);
-int tps65912_device_exit(struct tps65912 *tps);
+void tps65912_device_exit(struct tps65912 *tps);
#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mfd/tps6594.h b/include/linux/mfd/tps6594.h
new file mode 100644
index 000000000000..3f7c5e23cd4c
--- /dev/null
+++ b/include/linux/mfd/tps6594.h
@@ -0,0 +1,1020 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions to access TPS6594 Power Management IC
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#ifndef __LINUX_MFD_TPS6594_H
+#define __LINUX_MFD_TPS6594_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+struct regmap_irq_chip_data;
+
+/* Chip id list */
+enum pmic_id {
+ TPS6594,
+ TPS6593,
+ LP8764,
+};
+
+/* Macro to get page index from register address */
+#define TPS6594_REG_TO_PAGE(reg) ((reg) >> 8)
+
+/* Registers for page 0 of TPS6594 */
+#define TPS6594_REG_DEV_REV 0x01
+
+#define TPS6594_REG_NVM_CODE_1 0x02
+#define TPS6594_REG_NVM_CODE_2 0x03
+
+#define TPS6594_REG_BUCKX_CTRL(buck_inst) (0x04 + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_CONF(buck_inst) (0x05 + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_VOUT_1(buck_inst) (0x0e + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_VOUT_2(buck_inst) (0x0f + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_PG_WINDOW(buck_inst) (0x18 + (buck_inst))
+
+#define TPS6594_REG_LDOX_CTRL(ldo_inst) (0x1d + (ldo_inst))
+#define TPS6594_REG_LDORTC_CTRL 0x22
+#define TPS6594_REG_LDOX_VOUT(ldo_inst) (0x23 + (ldo_inst))
+#define TPS6594_REG_LDOX_PG_WINDOW(ldo_inst) (0x27 + (ldo_inst))
+
+#define TPS6594_REG_VCCA_VMON_CTRL 0x2b
+#define TPS6594_REG_VCCA_PG_WINDOW 0x2c
+#define TPS6594_REG_VMON1_PG_WINDOW 0x2d
+#define TPS6594_REG_VMON1_PG_LEVEL 0x2e
+#define TPS6594_REG_VMON2_PG_WINDOW 0x2f
+#define TPS6594_REG_VMON2_PG_LEVEL 0x30
+
+#define TPS6594_REG_GPIOX_CONF(gpio_inst) (0x31 + (gpio_inst))
+#define TPS6594_REG_NPWRON_CONF 0x3c
+#define TPS6594_REG_GPIO_OUT_1 0x3d
+#define TPS6594_REG_GPIO_OUT_2 0x3e
+#define TPS6594_REG_GPIO_IN_1 0x3f
+#define TPS6594_REG_GPIO_IN_2 0x40
+#define TPS6594_REG_GPIOX_OUT(gpio_inst) (TPS6594_REG_GPIO_OUT_1 + (gpio_inst) / 8)
+#define TPS6594_REG_GPIOX_IN(gpio_inst) (TPS6594_REG_GPIO_IN_1 + (gpio_inst) / 8)
+
+#define TPS6594_REG_GPIO_IN_1 0x3f
+#define TPS6594_REG_GPIO_IN_2 0x40
+
+#define TPS6594_REG_RAIL_SEL_1 0x41
+#define TPS6594_REG_RAIL_SEL_2 0x42
+#define TPS6594_REG_RAIL_SEL_3 0x43
+
+#define TPS6594_REG_FSM_TRIG_SEL_1 0x44
+#define TPS6594_REG_FSM_TRIG_SEL_2 0x45
+#define TPS6594_REG_FSM_TRIG_MASK_1 0x46
+#define TPS6594_REG_FSM_TRIG_MASK_2 0x47
+#define TPS6594_REG_FSM_TRIG_MASK_3 0x48
+
+#define TPS6594_REG_MASK_BUCK1_2 0x49
+#define TPS6594_REG_MASK_BUCK3_4 0x4a
+#define TPS6594_REG_MASK_BUCK5 0x4b
+#define TPS6594_REG_MASK_LDO1_2 0x4c
+#define TPS6594_REG_MASK_LDO3_4 0x4d
+#define TPS6594_REG_MASK_VMON 0x4e
+#define TPS6594_REG_MASK_GPIO1_8_FALL 0x4f
+#define TPS6594_REG_MASK_GPIO1_8_RISE 0x50
+#define TPS6594_REG_MASK_GPIO9_11 0x51
+#define TPS6594_REG_MASK_STARTUP 0x52
+#define TPS6594_REG_MASK_MISC 0x53
+#define TPS6594_REG_MASK_MODERATE_ERR 0x54
+#define TPS6594_REG_MASK_FSM_ERR 0x56
+#define TPS6594_REG_MASK_COMM_ERR 0x57
+#define TPS6594_REG_MASK_READBACK_ERR 0x58
+#define TPS6594_REG_MASK_ESM 0x59
+
+#define TPS6594_REG_INT_TOP 0x5a
+#define TPS6594_REG_INT_BUCK 0x5b
+#define TPS6594_REG_INT_BUCK1_2 0x5c
+#define TPS6594_REG_INT_BUCK3_4 0x5d
+#define TPS6594_REG_INT_BUCK5 0x5e
+#define TPS6594_REG_INT_LDO_VMON 0x5f
+#define TPS6594_REG_INT_LDO1_2 0x60
+#define TPS6594_REG_INT_LDO3_4 0x61
+#define TPS6594_REG_INT_VMON 0x62
+#define TPS6594_REG_INT_GPIO 0x63
+#define TPS6594_REG_INT_GPIO1_8 0x64
+#define TPS6594_REG_INT_STARTUP 0x65
+#define TPS6594_REG_INT_MISC 0x66
+#define TPS6594_REG_INT_MODERATE_ERR 0x67
+#define TPS6594_REG_INT_SEVERE_ERR 0x68
+#define TPS6594_REG_INT_FSM_ERR 0x69
+#define TPS6594_REG_INT_COMM_ERR 0x6a
+#define TPS6594_REG_INT_READBACK_ERR 0x6b
+#define TPS6594_REG_INT_ESM 0x6c
+
+#define TPS6594_REG_STAT_BUCK1_2 0x6d
+#define TPS6594_REG_STAT_BUCK3_4 0x6e
+#define TPS6594_REG_STAT_BUCK5 0x6f
+#define TPS6594_REG_STAT_LDO1_2 0x70
+#define TPS6594_REG_STAT_LDO3_4 0x71
+#define TPS6594_REG_STAT_VMON 0x72
+#define TPS6594_REG_STAT_STARTUP 0x73
+#define TPS6594_REG_STAT_MISC 0x74
+#define TPS6594_REG_STAT_MODERATE_ERR 0x75
+#define TPS6594_REG_STAT_SEVERE_ERR 0x76
+#define TPS6594_REG_STAT_READBACK_ERR 0x77
+
+#define TPS6594_REG_PGOOD_SEL_1 0x78
+#define TPS6594_REG_PGOOD_SEL_2 0x79
+#define TPS6594_REG_PGOOD_SEL_3 0x7a
+#define TPS6594_REG_PGOOD_SEL_4 0x7b
+
+#define TPS6594_REG_PLL_CTRL 0x7c
+
+#define TPS6594_REG_CONFIG_1 0x7d
+#define TPS6594_REG_CONFIG_2 0x7e
+
+#define TPS6594_REG_ENABLE_DRV_REG 0x80
+
+#define TPS6594_REG_MISC_CTRL 0x81
+
+#define TPS6594_REG_ENABLE_DRV_STAT 0x82
+
+#define TPS6594_REG_RECOV_CNT_REG_1 0x83
+#define TPS6594_REG_RECOV_CNT_REG_2 0x84
+
+#define TPS6594_REG_FSM_I2C_TRIGGERS 0x85
+#define TPS6594_REG_FSM_NSLEEP_TRIGGERS 0x86
+
+#define TPS6594_REG_BUCK_RESET_REG 0x87
+
+#define TPS6594_REG_SPREAD_SPECTRUM_1 0x88
+
+#define TPS6594_REG_FREQ_SEL 0x8a
+
+#define TPS6594_REG_FSM_STEP_SIZE 0x8b
+
+#define TPS6594_REG_LDO_RV_TIMEOUT_REG_1 0x8c
+#define TPS6594_REG_LDO_RV_TIMEOUT_REG_2 0x8d
+
+#define TPS6594_REG_USER_SPARE_REGS 0x8e
+
+#define TPS6594_REG_ESM_MCU_START_REG 0x8f
+#define TPS6594_REG_ESM_MCU_DELAY1_REG 0x90
+#define TPS6594_REG_ESM_MCU_DELAY2_REG 0x91
+#define TPS6594_REG_ESM_MCU_MODE_CFG 0x92
+#define TPS6594_REG_ESM_MCU_HMAX_REG 0x93
+#define TPS6594_REG_ESM_MCU_HMIN_REG 0x94
+#define TPS6594_REG_ESM_MCU_LMAX_REG 0x95
+#define TPS6594_REG_ESM_MCU_LMIN_REG 0x96
+#define TPS6594_REG_ESM_MCU_ERR_CNT_REG 0x97
+#define TPS6594_REG_ESM_SOC_START_REG 0x98
+#define TPS6594_REG_ESM_SOC_DELAY1_REG 0x99
+#define TPS6594_REG_ESM_SOC_DELAY2_REG 0x9a
+#define TPS6594_REG_ESM_SOC_MODE_CFG 0x9b
+#define TPS6594_REG_ESM_SOC_HMAX_REG 0x9c
+#define TPS6594_REG_ESM_SOC_HMIN_REG 0x9d
+#define TPS6594_REG_ESM_SOC_LMAX_REG 0x9e
+#define TPS6594_REG_ESM_SOC_LMIN_REG 0x9f
+#define TPS6594_REG_ESM_SOC_ERR_CNT_REG 0xa0
+
+#define TPS6594_REG_REGISTER_LOCK 0xa1
+
+#define TPS6594_REG_MANUFACTURING_VER 0xa6
+
+#define TPS6594_REG_CUSTOMER_NVM_ID_REG 0xa7
+
+#define TPS6594_REG_VMON_CONF_REG 0xa8
+
+#define TPS6594_REG_SOFT_REBOOT_REG 0xab
+
+#define TPS6594_REG_RTC_SECONDS 0xb5
+#define TPS6594_REG_RTC_MINUTES 0xb6
+#define TPS6594_REG_RTC_HOURS 0xb7
+#define TPS6594_REG_RTC_DAYS 0xb8
+#define TPS6594_REG_RTC_MONTHS 0xb9
+#define TPS6594_REG_RTC_YEARS 0xba
+#define TPS6594_REG_RTC_WEEKS 0xbb
+
+#define TPS6594_REG_ALARM_SECONDS 0xbc
+#define TPS6594_REG_ALARM_MINUTES 0xbd
+#define TPS6594_REG_ALARM_HOURS 0xbe
+#define TPS6594_REG_ALARM_DAYS 0xbf
+#define TPS6594_REG_ALARM_MONTHS 0xc0
+#define TPS6594_REG_ALARM_YEARS 0xc1
+
+#define TPS6594_REG_RTC_CTRL_1 0xc2
+#define TPS6594_REG_RTC_CTRL_2 0xc3
+#define TPS6594_REG_RTC_STATUS 0xc4
+#define TPS6594_REG_RTC_INTERRUPTS 0xc5
+#define TPS6594_REG_RTC_COMP_LSB 0xc6
+#define TPS6594_REG_RTC_COMP_MSB 0xc7
+#define TPS6594_REG_RTC_RESET_STATUS 0xc8
+
+#define TPS6594_REG_SCRATCH_PAD_REG_1 0xc9
+#define TPS6594_REG_SCRATCH_PAD_REG_2 0xca
+#define TPS6594_REG_SCRATCH_PAD_REG_3 0xcb
+#define TPS6594_REG_SCRATCH_PAD_REG_4 0xcc
+
+#define TPS6594_REG_PFSM_DELAY_REG_1 0xcd
+#define TPS6594_REG_PFSM_DELAY_REG_2 0xce
+#define TPS6594_REG_PFSM_DELAY_REG_3 0xcf
+#define TPS6594_REG_PFSM_DELAY_REG_4 0xd0
+
+/* Registers for page 1 of TPS6594 */
+#define TPS6594_REG_SERIAL_IF_CONFIG 0x11a
+#define TPS6594_REG_I2C1_ID 0x122
+#define TPS6594_REG_I2C2_ID 0x123
+
+/* Registers for page 4 of TPS6594 */
+#define TPS6594_REG_WD_ANSWER_REG 0x401
+#define TPS6594_REG_WD_QUESTION_ANSW_CNT 0x402
+#define TPS6594_REG_WD_WIN1_CFG 0x403
+#define TPS6594_REG_WD_WIN2_CFG 0x404
+#define TPS6594_REG_WD_LONGWIN_CFG 0x405
+#define TPS6594_REG_WD_MODE_REG 0x406
+#define TPS6594_REG_WD_QA_CFG 0x407
+#define TPS6594_REG_WD_ERR_STATUS 0x408
+#define TPS6594_REG_WD_THR_CFG 0x409
+#define TPS6594_REG_DWD_FAIL_CNT_REG 0x40a
+
+/* BUCKX_CTRL register field definition */
+#define TPS6594_BIT_BUCK_EN BIT(0)
+#define TPS6594_BIT_BUCK_FPWM BIT(1)
+#define TPS6594_BIT_BUCK_FPWM_MP BIT(2)
+#define TPS6594_BIT_BUCK_VSEL BIT(3)
+#define TPS6594_BIT_BUCK_VMON_EN BIT(4)
+#define TPS6594_BIT_BUCK_PLDN BIT(5)
+#define TPS6594_BIT_BUCK_RV_SEL BIT(7)
+
+/* BUCKX_CONF register field definition */
+#define TPS6594_MASK_BUCK_SLEW_RATE GENMASK(2, 0)
+#define TPS6594_MASK_BUCK_ILIM GENMASK(5, 3)
+
+/* BUCKX_PG_WINDOW register field definition */
+#define TPS6594_MASK_BUCK_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_BUCK_UV_THR GENMASK(5, 3)
+
+/* BUCKX VSET */
+#define TPS6594_MASK_BUCKS_VSET GENMASK(7, 0)
+
+/* LDOX_CTRL register field definition */
+#define TPS6594_BIT_LDO_EN BIT(0)
+#define TPS6594_BIT_LDO_SLOW_RAMP BIT(1)
+#define TPS6594_BIT_LDO_VMON_EN BIT(4)
+#define TPS6594_MASK_LDO_PLDN GENMASK(6, 5)
+#define TPS6594_BIT_LDO_RV_SEL BIT(7)
+
+/* LDORTC_CTRL register field definition */
+#define TPS6594_BIT_LDORTC_DIS BIT(0)
+
+/* LDOX_VOUT register field definition */
+#define TPS6594_MASK_LDO123_VSET GENMASK(6, 1)
+#define TPS6594_MASK_LDO4_VSET GENMASK(6, 0)
+#define TPS6594_BIT_LDO_BYPASS BIT(7)
+
+/* LDOX_PG_WINDOW register field definition */
+#define TPS6594_MASK_LDO_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_LDO_UV_THR GENMASK(5, 3)
+
+/* VCCA_VMON_CTRL register field definition */
+#define TPS6594_BIT_VMON_EN BIT(0)
+#define TPS6594_BIT_VMON1_EN BIT(1)
+#define TPS6594_BIT_VMON1_RV_SEL BIT(2)
+#define TPS6594_BIT_VMON2_EN BIT(3)
+#define TPS6594_BIT_VMON2_RV_SEL BIT(4)
+#define TPS6594_BIT_VMON_DEGLITCH_SEL BIT(5)
+
+/* VCCA_PG_WINDOW register field definition */
+#define TPS6594_MASK_VCCA_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_VCCA_UV_THR GENMASK(5, 3)
+#define TPS6594_BIT_VCCA_PG_SET BIT(6)
+
+/* VMONX_PG_WINDOW register field definition */
+#define TPS6594_MASK_VMONX_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_VMONX_UV_THR GENMASK(5, 3)
+#define TPS6594_BIT_VMONX_RANGE BIT(6)
+
+/* GPIOX_CONF register field definition */
+#define TPS6594_BIT_GPIO_DIR BIT(0)
+#define TPS6594_BIT_GPIO_OD BIT(1)
+#define TPS6594_BIT_GPIO_PU_SEL BIT(2)
+#define TPS6594_BIT_GPIO_PU_PD_EN BIT(3)
+#define TPS6594_BIT_GPIO_DEGLITCH_EN BIT(4)
+#define TPS6594_MASK_GPIO_SEL GENMASK(7, 5)
+
+/* NPWRON_CONF register field definition */
+#define TPS6594_BIT_NRSTOUT_OD BIT(0)
+#define TPS6594_BIT_ENABLE_PU_SEL BIT(2)
+#define TPS6594_BIT_ENABLE_PU_PD_EN BIT(3)
+#define TPS6594_BIT_ENABLE_DEGLITCH_EN BIT(4)
+#define TPS6594_BIT_ENABLE_POL BIT(5)
+#define TPS6594_MASK_NPWRON_SEL GENMASK(7, 6)
+
+/* GPIO_OUT_X register field definition */
+#define TPS6594_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst) % 8)
+
+/* GPIO_IN_X register field definition */
+#define TPS6594_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst) % 8)
+#define TPS6594_BIT_NPWRON_IN BIT(3)
+
+/* RAIL_SEL_1 register field definition */
+#define TPS6594_MASK_BUCK1_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_BUCK2_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_BUCK3_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_BUCK4_GRP_SEL GENMASK(7, 6)
+
+/* RAIL_SEL_2 register field definition */
+#define TPS6594_MASK_BUCK5_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_LDO1_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_LDO2_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_LDO3_GRP_SEL GENMASK(7, 6)
+
+/* RAIL_SEL_3 register field definition */
+#define TPS6594_MASK_LDO4_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_VCCA_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_VMON1_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_VMON2_GRP_SEL GENMASK(7, 6)
+
+/* FSM_TRIG_SEL_1 register field definition */
+#define TPS6594_MASK_MCU_RAIL_TRIG GENMASK(1, 0)
+#define TPS6594_MASK_SOC_RAIL_TRIG GENMASK(3, 2)
+#define TPS6594_MASK_OTHER_RAIL_TRIG GENMASK(5, 4)
+#define TPS6594_MASK_SEVERE_ERR_TRIG GENMASK(7, 6)
+
+/* FSM_TRIG_SEL_2 register field definition */
+#define TPS6594_MASK_MODERATE_ERR_TRIG GENMASK(1, 0)
+
+/* FSM_TRIG_MASK_X register field definition */
+#define TPS6594_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 8)
+#define TPS6594_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 8 + 1)
+
+/* MASK_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_ILIM_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* MASK_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_ILIM_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* MASK_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_MASK BIT(0)
+#define TPS6594_BIT_VCCA_UV_MASK BIT(1)
+#define TPS6594_BIT_VMON1_OV_MASK BIT(2)
+#define TPS6594_BIT_VMON1_UV_MASK BIT(3)
+#define TPS6594_BIT_VMON2_OV_MASK BIT(5)
+#define TPS6594_BIT_VMON2_UV_MASK BIT(6)
+
+/* MASK_GPIOX register field definition */
+#define TPS6594_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \
+ (gpio_inst) : (gpio_inst) % 8)
+#define TPS6594_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \
+ (gpio_inst) : (gpio_inst) % 8 + 3)
+
+/* MASK_STARTUP register field definition */
+#define TPS6594_BIT_NPWRON_START_MASK BIT(0)
+#define TPS6594_BIT_ENABLE_MASK BIT(1)
+#define TPS6594_BIT_FSD_MASK BIT(4)
+#define TPS6594_BIT_SOFT_REBOOT_MASK BIT(5)
+
+/* MASK_MISC register field definition */
+#define TPS6594_BIT_BIST_PASS_MASK BIT(0)
+#define TPS6594_BIT_EXT_CLK_MASK BIT(1)
+#define TPS6594_BIT_TWARN_MASK BIT(3)
+
+/* MASK_MODERATE_ERR register field definition */
+#define TPS6594_BIT_BIST_FAIL_MASK BIT(1)
+#define TPS6594_BIT_REG_CRC_ERR_MASK BIT(2)
+#define TPS6594_BIT_SPMI_ERR_MASK BIT(4)
+#define TPS6594_BIT_NPWRON_LONG_MASK BIT(5)
+#define TPS6594_BIT_NINT_READBACK_MASK BIT(6)
+#define TPS6594_BIT_NRSTOUT_READBACK_MASK BIT(7)
+
+/* MASK_FSM_ERR register field definition */
+#define TPS6594_BIT_IMM_SHUTDOWN_MASK BIT(0)
+#define TPS6594_BIT_ORD_SHUTDOWN_MASK BIT(1)
+#define TPS6594_BIT_MCU_PWR_ERR_MASK BIT(2)
+#define TPS6594_BIT_SOC_PWR_ERR_MASK BIT(3)
+
+/* MASK_COMM_ERR register field definition */
+#define TPS6594_BIT_COMM_FRM_ERR_MASK BIT(0)
+#define TPS6594_BIT_COMM_CRC_ERR_MASK BIT(1)
+#define TPS6594_BIT_COMM_ADR_ERR_MASK BIT(3)
+#define TPS6594_BIT_I2C2_CRC_ERR_MASK BIT(5)
+#define TPS6594_BIT_I2C2_ADR_ERR_MASK BIT(7)
+
+/* MASK_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_MASK BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_MASK BIT(3)
+
+/* MASK_ESM register field definition */
+#define TPS6594_BIT_ESM_SOC_PIN_MASK BIT(0)
+#define TPS6594_BIT_ESM_SOC_FAIL_MASK BIT(1)
+#define TPS6594_BIT_ESM_SOC_RST_MASK BIT(2)
+#define TPS6594_BIT_ESM_MCU_PIN_MASK BIT(3)
+#define TPS6594_BIT_ESM_MCU_FAIL_MASK BIT(4)
+#define TPS6594_BIT_ESM_MCU_RST_MASK BIT(5)
+
+/* INT_TOP register field definition */
+#define TPS6594_BIT_BUCK_INT BIT(0)
+#define TPS6594_BIT_LDO_VMON_INT BIT(1)
+#define TPS6594_BIT_GPIO_INT BIT(2)
+#define TPS6594_BIT_STARTUP_INT BIT(3)
+#define TPS6594_BIT_MISC_INT BIT(4)
+#define TPS6594_BIT_MODERATE_ERR_INT BIT(5)
+#define TPS6594_BIT_SEVERE_ERR_INT BIT(6)
+#define TPS6594_BIT_FSM_ERR_INT BIT(7)
+
+/* INT_BUCK register field definition */
+#define TPS6594_BIT_BUCK1_2_INT BIT(0)
+#define TPS6594_BIT_BUCK3_4_INT BIT(1)
+#define TPS6594_BIT_BUCK5_INT BIT(2)
+
+/* INT_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_INT(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_SC_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 2)
+#define TPS6594_BIT_BUCKX_ILIM_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* INT_LDO_VMON register field definition */
+#define TPS6594_BIT_LDO1_2_INT BIT(0)
+#define TPS6594_BIT_LDO3_4_INT BIT(1)
+#define TPS6594_BIT_VCCA_INT BIT(4)
+
+/* INT_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_SC_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 2)
+#define TPS6594_BIT_LDOX_ILIM_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* INT_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_INT BIT(0)
+#define TPS6594_BIT_VCCA_UV_INT BIT(1)
+#define TPS6594_BIT_VMON1_OV_INT BIT(2)
+#define TPS6594_BIT_VMON1_UV_INT BIT(3)
+#define TPS6594_BIT_VMON1_RV_INT BIT(4)
+#define TPS6594_BIT_VMON2_OV_INT BIT(5)
+#define TPS6594_BIT_VMON2_UV_INT BIT(6)
+#define TPS6594_BIT_VMON2_RV_INT BIT(7)
+
+/* INT_GPIO register field definition */
+#define TPS6594_BIT_GPIO9_INT BIT(0)
+#define TPS6594_BIT_GPIO10_INT BIT(1)
+#define TPS6594_BIT_GPIO11_INT BIT(2)
+#define TPS6594_BIT_GPIO1_8_INT BIT(3)
+
+/* INT_GPIOX register field definition */
+#define TPS6594_BIT_GPIOX_INT(gpio_inst) BIT(gpio_inst)
+
+/* INT_STARTUP register field definition */
+#define TPS6594_BIT_NPWRON_START_INT BIT(0)
+#define TPS6594_BIT_ENABLE_INT BIT(1)
+#define TPS6594_BIT_RTC_INT BIT(2)
+#define TPS6594_BIT_FSD_INT BIT(4)
+#define TPS6594_BIT_SOFT_REBOOT_INT BIT(5)
+
+/* INT_MISC register field definition */
+#define TPS6594_BIT_BIST_PASS_INT BIT(0)
+#define TPS6594_BIT_EXT_CLK_INT BIT(1)
+#define TPS6594_BIT_TWARN_INT BIT(3)
+
+/* INT_MODERATE_ERR register field definition */
+#define TPS6594_BIT_TSD_ORD_INT BIT(0)
+#define TPS6594_BIT_BIST_FAIL_INT BIT(1)
+#define TPS6594_BIT_REG_CRC_ERR_INT BIT(2)
+#define TPS6594_BIT_RECOV_CNT_INT BIT(3)
+#define TPS6594_BIT_SPMI_ERR_INT BIT(4)
+#define TPS6594_BIT_NPWRON_LONG_INT BIT(5)
+#define TPS6594_BIT_NINT_READBACK_INT BIT(6)
+#define TPS6594_BIT_NRSTOUT_READBACK_INT BIT(7)
+
+/* INT_SEVERE_ERR register field definition */
+#define TPS6594_BIT_TSD_IMM_INT BIT(0)
+#define TPS6594_BIT_VCCA_OVP_INT BIT(1)
+#define TPS6594_BIT_PFSM_ERR_INT BIT(2)
+
+/* INT_FSM_ERR register field definition */
+#define TPS6594_BIT_IMM_SHUTDOWN_INT BIT(0)
+#define TPS6594_BIT_ORD_SHUTDOWN_INT BIT(1)
+#define TPS6594_BIT_MCU_PWR_ERR_INT BIT(2)
+#define TPS6594_BIT_SOC_PWR_ERR_INT BIT(3)
+#define TPS6594_BIT_COMM_ERR_INT BIT(4)
+#define TPS6594_BIT_READBACK_ERR_INT BIT(5)
+#define TPS6594_BIT_ESM_INT BIT(6)
+#define TPS6594_BIT_WD_INT BIT(7)
+
+/* INT_COMM_ERR register field definition */
+#define TPS6594_BIT_COMM_FRM_ERR_INT BIT(0)
+#define TPS6594_BIT_COMM_CRC_ERR_INT BIT(1)
+#define TPS6594_BIT_COMM_ADR_ERR_INT BIT(3)
+#define TPS6594_BIT_I2C2_CRC_ERR_INT BIT(5)
+#define TPS6594_BIT_I2C2_ADR_ERR_INT BIT(7)
+
+/* INT_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_INT BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_INT BIT(3)
+
+/* INT_ESM register field definition */
+#define TPS6594_BIT_ESM_SOC_PIN_INT BIT(0)
+#define TPS6594_BIT_ESM_SOC_FAIL_INT BIT(1)
+#define TPS6594_BIT_ESM_SOC_RST_INT BIT(2)
+#define TPS6594_BIT_ESM_MCU_PIN_INT BIT(3)
+#define TPS6594_BIT_ESM_MCU_FAIL_INT BIT(4)
+#define TPS6594_BIT_ESM_MCU_RST_INT BIT(5)
+
+/* STAT_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_STAT(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_STAT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_ILIM_STAT(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* STAT_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_ILIM_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* STAT_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_STAT BIT(0)
+#define TPS6594_BIT_VCCA_UV_STAT BIT(1)
+#define TPS6594_BIT_VMON1_OV_STAT BIT(2)
+#define TPS6594_BIT_VMON1_UV_STAT BIT(3)
+#define TPS6594_BIT_VMON2_OV_STAT BIT(5)
+#define TPS6594_BIT_VMON2_UV_STAT BIT(6)
+
+/* STAT_STARTUP register field definition */
+#define TPS6594_BIT_ENABLE_STAT BIT(1)
+
+/* STAT_MISC register field definition */
+#define TPS6594_BIT_EXT_CLK_STAT BIT(1)
+#define TPS6594_BIT_TWARN_STAT BIT(3)
+
+/* STAT_MODERATE_ERR register field definition */
+#define TPS6594_BIT_TSD_ORD_STAT BIT(0)
+
+/* STAT_SEVERE_ERR register field definition */
+#define TPS6594_BIT_TSD_IMM_STAT BIT(0)
+#define TPS6594_BIT_VCCA_OVP_STAT BIT(1)
+
+/* STAT_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_STAT BIT(0)
+#define TPS6594_BIT_NINT_READBACK_STAT BIT(1)
+#define TPS6594_BIT_NRSTOUT_READBACK_STAT BIT(2)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_STAT BIT(3)
+
+/* PGOOD_SEL_1 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_BUCK1 GENMASK(1, 0)
+#define TPS6594_MASK_PGOOD_SEL_BUCK2 GENMASK(3, 2)
+#define TPS6594_MASK_PGOOD_SEL_BUCK3 GENMASK(5, 4)
+#define TPS6594_MASK_PGOOD_SEL_BUCK4 GENMASK(7, 6)
+
+/* PGOOD_SEL_2 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_BUCK5 GENMASK(1, 0)
+
+/* PGOOD_SEL_3 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_LDO1 GENMASK(1, 0)
+#define TPS6594_MASK_PGOOD_SEL_LDO2 GENMASK(3, 2)
+#define TPS6594_MASK_PGOOD_SEL_LDO3 GENMASK(5, 4)
+#define TPS6594_MASK_PGOOD_SEL_LDO4 GENMASK(7, 6)
+
+/* PGOOD_SEL_4 register field definition */
+#define TPS6594_BIT_PGOOD_SEL_VCCA BIT(0)
+#define TPS6594_BIT_PGOOD_SEL_VMON1 BIT(1)
+#define TPS6594_BIT_PGOOD_SEL_VMON2 BIT(2)
+#define TPS6594_BIT_PGOOD_SEL_TDIE_WARN BIT(3)
+#define TPS6594_BIT_PGOOD_SEL_NRSTOUT BIT(4)
+#define TPS6594_BIT_PGOOD_SEL_NRSTOUT_SOC BIT(5)
+#define TPS6594_BIT_PGOOD_POL BIT(6)
+#define TPS6594_BIT_PGOOD_WINDOW BIT(7)
+
+/* PLL_CTRL register field definition */
+#define TPS6594_MASK_EXT_CLK_FREQ GENMASK(1, 0)
+
+/* CONFIG_1 register field definition */
+#define TPS6594_BIT_TWARN_LEVEL BIT(0)
+#define TPS6594_BIT_TSD_ORD_LEVEL BIT(1)
+#define TPS6594_BIT_I2C1_HS BIT(3)
+#define TPS6594_BIT_I2C2_HS BIT(4)
+#define TPS6594_BIT_EN_ILIM_FSM_CTRL BIT(5)
+#define TPS6594_BIT_NSLEEP1_MASK BIT(6)
+#define TPS6594_BIT_NSLEEP2_MASK BIT(7)
+
+/* CONFIG_2 register field definition */
+#define TPS6594_BIT_BB_CHARGER_EN BIT(0)
+#define TPS6594_BIT_BB_ICHR BIT(1)
+#define TPS6594_MASK_BB_VEOC GENMASK(3, 2)
+#define TPS6594_BB_EOC_RDY BIT(7)
+
+/* ENABLE_DRV_REG register field definition */
+#define TPS6594_BIT_ENABLE_DRV BIT(0)
+
+/* MISC_CTRL register field definition */
+#define TPS6594_BIT_NRSTOUT BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC BIT(1)
+#define TPS6594_BIT_LPM_EN BIT(2)
+#define TPS6594_BIT_CLKMON_EN BIT(3)
+#define TPS6594_BIT_AMUXOUT_EN BIT(4)
+#define TPS6594_BIT_SEL_EXT_CLK BIT(5)
+#define TPS6594_MASK_SYNCCLKOUT_FREQ_SEL GENMASK(7, 6)
+
+/* ENABLE_DRV_STAT register field definition */
+#define TPS6594_BIT_EN_DRV_IN BIT(0)
+#define TPS6594_BIT_NRSTOUT_IN BIT(1)
+#define TPS6594_BIT_NRSTOUT_SOC_IN BIT(2)
+#define TPS6594_BIT_FORCE_EN_DRV_LOW BIT(3)
+#define TPS6594_BIT_SPMI_LPM_EN BIT(4)
+
+/* RECOV_CNT_REG_1 register field definition */
+#define TPS6594_MASK_RECOV_CNT GENMASK(3, 0)
+
+/* RECOV_CNT_REG_2 register field definition */
+#define TPS6594_MASK_RECOV_CNT_THR GENMASK(3, 0)
+#define TPS6594_BIT_RECOV_CNT_CLR BIT(4)
+
+/* FSM_I2C_TRIGGERS register field definition */
+#define TPS6594_BIT_TRIGGER_I2C(bit) BIT(bit)
+
+/* FSM_NSLEEP_TRIGGERS register field definition */
+#define TPS6594_BIT_NSLEEP1B BIT(0)
+#define TPS6594_BIT_NSLEEP2B BIT(1)
+
+/* BUCK_RESET_REG register field definition */
+#define TPS6594_BIT_BUCKX_RESET(buck_inst) BIT(buck_inst)
+
+/* SPREAD_SPECTRUM_1 register field definition */
+#define TPS6594_MASK_SS_DEPTH GENMASK(1, 0)
+#define TPS6594_BIT_SS_EN BIT(2)
+
+/* FREQ_SEL register field definition */
+#define TPS6594_BIT_BUCKX_FREQ_SEL(buck_inst) BIT(buck_inst)
+
+/* FSM_STEP_SIZE register field definition */
+#define TPS6594_MASK_PFSM_DELAY_STEP GENMASK(4, 0)
+
+/* LDO_RV_TIMEOUT_REG_1 register field definition */
+#define TPS6594_MASK_LDO1_RV_TIMEOUT GENMASK(3, 0)
+#define TPS6594_MASK_LDO2_RV_TIMEOUT GENMASK(7, 4)
+
+/* LDO_RV_TIMEOUT_REG_2 register field definition */
+#define TPS6594_MASK_LDO3_RV_TIMEOUT GENMASK(3, 0)
+#define TPS6594_MASK_LDO4_RV_TIMEOUT GENMASK(7, 4)
+
+/* USER_SPARE_REGS register field definition */
+#define TPS6594_BIT_USER_SPARE(bit) BIT(bit)
+
+/* ESM_MCU_START_REG register field definition */
+#define TPS6594_BIT_ESM_MCU_START BIT(0)
+
+/* ESM_MCU_MODE_CFG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT_TH GENMASK(3, 0)
+#define TPS6594_BIT_ESM_MCU_ENDRV BIT(5)
+#define TPS6594_BIT_ESM_MCU_EN BIT(6)
+#define TPS6594_BIT_ESM_MCU_MODE BIT(7)
+
+/* ESM_MCU_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT GENMASK(4, 0)
+
+/* ESM_SOC_START_REG register field definition */
+#define TPS6594_BIT_ESM_SOC_START BIT(0)
+
+/* ESM_SOC_MODE_CFG register field definition */
+#define TPS6594_MASK_ESM_SOC_ERR_CNT_TH GENMASK(3, 0)
+#define TPS6594_BIT_ESM_SOC_ENDRV BIT(5)
+#define TPS6594_BIT_ESM_SOC_EN BIT(6)
+#define TPS6594_BIT_ESM_SOC_MODE BIT(7)
+
+/* ESM_SOC_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_SOC_ERR_CNT GENMASK(4, 0)
+
+/* REGISTER_LOCK register field definition */
+#define TPS6594_BIT_REGISTER_LOCK_STATUS BIT(0)
+
+/* VMON_CONF register field definition */
+#define TPS6594_MASK_VMON1_SLEW_RATE GENMASK(2, 0)
+#define TPS6594_MASK_VMON2_SLEW_RATE GENMASK(5, 3)
+
+/* SOFT_REBOOT_REG register field definition */
+#define TPS6594_BIT_SOFT_REBOOT BIT(0)
+
+/* RTC_SECONDS & ALARM_SECONDS register field definition */
+#define TPS6594_MASK_SECOND_0 GENMASK(3, 0)
+#define TPS6594_MASK_SECOND_1 GENMASK(6, 4)
+
+/* RTC_MINUTES & ALARM_MINUTES register field definition */
+#define TPS6594_MASK_MINUTE_0 GENMASK(3, 0)
+#define TPS6594_MASK_MINUTE_1 GENMASK(6, 4)
+
+/* RTC_HOURS & ALARM_HOURS register field definition */
+#define TPS6594_MASK_HOUR_0 GENMASK(3, 0)
+#define TPS6594_MASK_HOUR_1 GENMASK(5, 4)
+#define TPS6594_BIT_PM_NAM BIT(7)
+
+/* RTC_DAYS & ALARM_DAYS register field definition */
+#define TPS6594_MASK_DAY_0 GENMASK(3, 0)
+#define TPS6594_MASK_DAY_1 GENMASK(5, 4)
+
+/* RTC_MONTHS & ALARM_MONTHS register field definition */
+#define TPS6594_MASK_MONTH_0 GENMASK(3, 0)
+#define TPS6594_BIT_MONTH_1 BIT(4)
+
+/* RTC_YEARS & ALARM_YEARS register field definition */
+#define TPS6594_MASK_YEAR_0 GENMASK(3, 0)
+#define TPS6594_MASK_YEAR_1 GENMASK(7, 4)
+
+/* RTC_WEEKS register field definition */
+#define TPS6594_MASK_WEEK GENMASK(2, 0)
+
+/* RTC_CTRL_1 register field definition */
+#define TPS6594_BIT_STOP_RTC BIT(0)
+#define TPS6594_BIT_ROUND_30S BIT(1)
+#define TPS6594_BIT_AUTO_COMP BIT(2)
+#define TPS6594_BIT_MODE_12_24 BIT(3)
+#define TPS6594_BIT_SET_32_COUNTER BIT(5)
+#define TPS6594_BIT_GET_TIME BIT(6)
+#define TPS6594_BIT_RTC_V_OPT BIT(7)
+
+/* RTC_CTRL_2 register field definition */
+#define TPS6594_BIT_XTAL_EN BIT(0)
+#define TPS6594_MASK_XTAL_SEL GENMASK(2, 1)
+#define TPS6594_BIT_LP_STANDBY_SEL BIT(3)
+#define TPS6594_BIT_FAST_BIST BIT(4)
+#define TPS6594_MASK_STARTUP_DEST GENMASK(6, 5)
+#define TPS6594_BIT_FIRST_STARTUP_DONE BIT(7)
+
+/* RTC_STATUS register field definition */
+#define TPS6594_BIT_RUN BIT(1)
+#define TPS6594_BIT_TIMER BIT(5)
+#define TPS6594_BIT_ALARM BIT(6)
+#define TPS6594_BIT_POWER_UP BIT(7)
+
+/* RTC_INTERRUPTS register field definition */
+#define TPS6594_MASK_EVERY GENMASK(1, 0)
+#define TPS6594_BIT_IT_TIMER BIT(2)
+#define TPS6594_BIT_IT_ALARM BIT(3)
+
+/* RTC_RESET_STATUS register field definition */
+#define TPS6594_BIT_RESET_STATUS_RTC BIT(0)
+
+/* SERIAL_IF_CONFIG register field definition */
+#define TPS6594_BIT_I2C_SPI_SEL BIT(0)
+#define TPS6594_BIT_I2C1_SPI_CRC_EN BIT(1)
+#define TPS6594_BIT_I2C2_CRC_EN BIT(2)
+#define TPS6594_MASK_T_CRC GENMASK(7, 3)
+
+/* WD_QUESTION_ANSW_CNT register field definition */
+#define TPS6594_MASK_WD_QUESTION GENMASK(3, 0)
+#define TPS6594_MASK_WD_ANSW_CNT GENMASK(5, 4)
+
+/* WD_MODE_REG register field definition */
+#define TPS6594_BIT_WD_RETURN_LONGWIN BIT(0)
+#define TPS6594_BIT_WD_MODE_SELECT BIT(1)
+#define TPS6594_BIT_WD_PWRHOLD BIT(2)
+
+/* WD_QA_CFG register field definition */
+#define TPS6594_MASK_WD_QUESTION_SEED GENMASK(3, 0)
+#define TPS6594_MASK_WD_QA_LFSR GENMASK(5, 4)
+#define TPS6594_MASK_WD_QA_FDBK GENMASK(7, 6)
+
+/* WD_ERR_STATUS register field definition */
+#define TPS6594_BIT_WD_LONGWIN_TIMEOUT_INT BIT(0)
+#define TPS6594_BIT_WD_TIMEOUT BIT(1)
+#define TPS6594_BIT_WD_TRIG_EARLY BIT(2)
+#define TPS6594_BIT_WD_ANSW_EARLY BIT(3)
+#define TPS6594_BIT_WD_SEQ_ERR BIT(4)
+#define TPS6594_BIT_WD_ANSW_ERR BIT(5)
+#define TPS6594_BIT_WD_FAIL_INT BIT(6)
+#define TPS6594_BIT_WD_RST_INT BIT(7)
+
+/* WD_THR_CFG register field definition */
+#define TPS6594_MASK_WD_RST_TH GENMASK(2, 0)
+#define TPS6594_MASK_WD_FAIL_TH GENMASK(5, 3)
+#define TPS6594_BIT_WD_EN BIT(6)
+#define TPS6594_BIT_WD_RST_EN BIT(7)
+
+/* WD_FAIL_CNT_REG register field definition */
+#define TPS6594_MASK_WD_FAIL_CNT GENMASK(3, 0)
+#define TPS6594_BIT_WD_FIRST_OK BIT(5)
+#define TPS6594_BIT_WD_BAD_EVENT BIT(6)
+
+/* CRC8 polynomial for I2C & SPI protocols */
+#define TPS6594_CRC8_POLYNOMIAL 0x07
+
+/* IRQs */
+enum tps6594_irqs {
+ /* INT_BUCK1_2 register */
+ TPS6594_IRQ_BUCK1_OV,
+ TPS6594_IRQ_BUCK1_UV,
+ TPS6594_IRQ_BUCK1_SC,
+ TPS6594_IRQ_BUCK1_ILIM,
+ TPS6594_IRQ_BUCK2_OV,
+ TPS6594_IRQ_BUCK2_UV,
+ TPS6594_IRQ_BUCK2_SC,
+ TPS6594_IRQ_BUCK2_ILIM,
+ /* INT_BUCK3_4 register */
+ TPS6594_IRQ_BUCK3_OV,
+ TPS6594_IRQ_BUCK3_UV,
+ TPS6594_IRQ_BUCK3_SC,
+ TPS6594_IRQ_BUCK3_ILIM,
+ TPS6594_IRQ_BUCK4_OV,
+ TPS6594_IRQ_BUCK4_UV,
+ TPS6594_IRQ_BUCK4_SC,
+ TPS6594_IRQ_BUCK4_ILIM,
+ /* INT_BUCK5 register */
+ TPS6594_IRQ_BUCK5_OV,
+ TPS6594_IRQ_BUCK5_UV,
+ TPS6594_IRQ_BUCK5_SC,
+ TPS6594_IRQ_BUCK5_ILIM,
+ /* INT_LDO1_2 register */
+ TPS6594_IRQ_LDO1_OV,
+ TPS6594_IRQ_LDO1_UV,
+ TPS6594_IRQ_LDO1_SC,
+ TPS6594_IRQ_LDO1_ILIM,
+ TPS6594_IRQ_LDO2_OV,
+ TPS6594_IRQ_LDO2_UV,
+ TPS6594_IRQ_LDO2_SC,
+ TPS6594_IRQ_LDO2_ILIM,
+ /* INT_LDO3_4 register */
+ TPS6594_IRQ_LDO3_OV,
+ TPS6594_IRQ_LDO3_UV,
+ TPS6594_IRQ_LDO3_SC,
+ TPS6594_IRQ_LDO3_ILIM,
+ TPS6594_IRQ_LDO4_OV,
+ TPS6594_IRQ_LDO4_UV,
+ TPS6594_IRQ_LDO4_SC,
+ TPS6594_IRQ_LDO4_ILIM,
+ /* INT_VMON register */
+ TPS6594_IRQ_VCCA_OV,
+ TPS6594_IRQ_VCCA_UV,
+ TPS6594_IRQ_VMON1_OV,
+ TPS6594_IRQ_VMON1_UV,
+ TPS6594_IRQ_VMON1_RV,
+ TPS6594_IRQ_VMON2_OV,
+ TPS6594_IRQ_VMON2_UV,
+ TPS6594_IRQ_VMON2_RV,
+ /* INT_GPIO register */
+ TPS6594_IRQ_GPIO9,
+ TPS6594_IRQ_GPIO10,
+ TPS6594_IRQ_GPIO11,
+ /* INT_GPIO1_8 register */
+ TPS6594_IRQ_GPIO1,
+ TPS6594_IRQ_GPIO2,
+ TPS6594_IRQ_GPIO3,
+ TPS6594_IRQ_GPIO4,
+ TPS6594_IRQ_GPIO5,
+ TPS6594_IRQ_GPIO6,
+ TPS6594_IRQ_GPIO7,
+ TPS6594_IRQ_GPIO8,
+ /* INT_STARTUP register */
+ TPS6594_IRQ_NPWRON_START,
+ TPS6594_IRQ_ENABLE,
+ TPS6594_IRQ_FSD,
+ TPS6594_IRQ_SOFT_REBOOT,
+ /* INT_MISC register */
+ TPS6594_IRQ_BIST_PASS,
+ TPS6594_IRQ_EXT_CLK,
+ TPS6594_IRQ_TWARN,
+ /* INT_MODERATE_ERR register */
+ TPS6594_IRQ_TSD_ORD,
+ TPS6594_IRQ_BIST_FAIL,
+ TPS6594_IRQ_REG_CRC_ERR,
+ TPS6594_IRQ_RECOV_CNT,
+ TPS6594_IRQ_SPMI_ERR,
+ TPS6594_IRQ_NPWRON_LONG,
+ TPS6594_IRQ_NINT_READBACK,
+ TPS6594_IRQ_NRSTOUT_READBACK,
+ /* INT_SEVERE_ERR register */
+ TPS6594_IRQ_TSD_IMM,
+ TPS6594_IRQ_VCCA_OVP,
+ TPS6594_IRQ_PFSM_ERR,
+ /* INT_FSM_ERR register */
+ TPS6594_IRQ_IMM_SHUTDOWN,
+ TPS6594_IRQ_ORD_SHUTDOWN,
+ TPS6594_IRQ_MCU_PWR_ERR,
+ TPS6594_IRQ_SOC_PWR_ERR,
+ /* INT_COMM_ERR register */
+ TPS6594_IRQ_COMM_FRM_ERR,
+ TPS6594_IRQ_COMM_CRC_ERR,
+ TPS6594_IRQ_COMM_ADR_ERR,
+ TPS6594_IRQ_I2C2_CRC_ERR,
+ TPS6594_IRQ_I2C2_ADR_ERR,
+ /* INT_READBACK_ERR register */
+ TPS6594_IRQ_EN_DRV_READBACK,
+ TPS6594_IRQ_NRSTOUT_SOC_READBACK,
+ /* INT_ESM register */
+ TPS6594_IRQ_ESM_SOC_PIN,
+ TPS6594_IRQ_ESM_SOC_FAIL,
+ TPS6594_IRQ_ESM_SOC_RST,
+ /* RTC_STATUS register */
+ TPS6594_IRQ_TIMER,
+ TPS6594_IRQ_ALARM,
+ TPS6594_IRQ_POWER_UP,
+};
+
+#define TPS6594_IRQ_NAME_BUCK1_OV "buck1_ov"
+#define TPS6594_IRQ_NAME_BUCK1_UV "buck1_uv"
+#define TPS6594_IRQ_NAME_BUCK1_SC "buck1_sc"
+#define TPS6594_IRQ_NAME_BUCK1_ILIM "buck1_ilim"
+#define TPS6594_IRQ_NAME_BUCK2_OV "buck2_ov"
+#define TPS6594_IRQ_NAME_BUCK2_UV "buck2_uv"
+#define TPS6594_IRQ_NAME_BUCK2_SC "buck2_sc"
+#define TPS6594_IRQ_NAME_BUCK2_ILIM "buck2_ilim"
+#define TPS6594_IRQ_NAME_BUCK3_OV "buck3_ov"
+#define TPS6594_IRQ_NAME_BUCK3_UV "buck3_uv"
+#define TPS6594_IRQ_NAME_BUCK3_SC "buck3_sc"
+#define TPS6594_IRQ_NAME_BUCK3_ILIM "buck3_ilim"
+#define TPS6594_IRQ_NAME_BUCK4_OV "buck4_ov"
+#define TPS6594_IRQ_NAME_BUCK4_UV "buck4_uv"
+#define TPS6594_IRQ_NAME_BUCK4_SC "buck4_sc"
+#define TPS6594_IRQ_NAME_BUCK4_ILIM "buck4_ilim"
+#define TPS6594_IRQ_NAME_BUCK5_OV "buck5_ov"
+#define TPS6594_IRQ_NAME_BUCK5_UV "buck5_uv"
+#define TPS6594_IRQ_NAME_BUCK5_SC "buck5_sc"
+#define TPS6594_IRQ_NAME_BUCK5_ILIM "buck5_ilim"
+#define TPS6594_IRQ_NAME_LDO1_OV "ldo1_ov"
+#define TPS6594_IRQ_NAME_LDO1_UV "ldo1_uv"
+#define TPS6594_IRQ_NAME_LDO1_SC "ldo1_sc"
+#define TPS6594_IRQ_NAME_LDO1_ILIM "ldo1_ilim"
+#define TPS6594_IRQ_NAME_LDO2_OV "ldo2_ov"
+#define TPS6594_IRQ_NAME_LDO2_UV "ldo2_uv"
+#define TPS6594_IRQ_NAME_LDO2_SC "ldo2_sc"
+#define TPS6594_IRQ_NAME_LDO2_ILIM "ldo2_ilim"
+#define TPS6594_IRQ_NAME_LDO3_OV "ldo3_ov"
+#define TPS6594_IRQ_NAME_LDO3_UV "ldo3_uv"
+#define TPS6594_IRQ_NAME_LDO3_SC "ldo3_sc"
+#define TPS6594_IRQ_NAME_LDO3_ILIM "ldo3_ilim"
+#define TPS6594_IRQ_NAME_LDO4_OV "ldo4_ov"
+#define TPS6594_IRQ_NAME_LDO4_UV "ldo4_uv"
+#define TPS6594_IRQ_NAME_LDO4_SC "ldo4_sc"
+#define TPS6594_IRQ_NAME_LDO4_ILIM "ldo4_ilim"
+#define TPS6594_IRQ_NAME_VCCA_OV "vcca_ov"
+#define TPS6594_IRQ_NAME_VCCA_UV "vcca_uv"
+#define TPS6594_IRQ_NAME_VMON1_OV "vmon1_ov"
+#define TPS6594_IRQ_NAME_VMON1_UV "vmon1_uv"
+#define TPS6594_IRQ_NAME_VMON1_RV "vmon1_rv"
+#define TPS6594_IRQ_NAME_VMON2_OV "vmon2_ov"
+#define TPS6594_IRQ_NAME_VMON2_UV "vmon2_uv"
+#define TPS6594_IRQ_NAME_VMON2_RV "vmon2_rv"
+#define TPS6594_IRQ_NAME_GPIO9 "gpio9"
+#define TPS6594_IRQ_NAME_GPIO10 "gpio10"
+#define TPS6594_IRQ_NAME_GPIO11 "gpio11"
+#define TPS6594_IRQ_NAME_GPIO1 "gpio1"
+#define TPS6594_IRQ_NAME_GPIO2 "gpio2"
+#define TPS6594_IRQ_NAME_GPIO3 "gpio3"
+#define TPS6594_IRQ_NAME_GPIO4 "gpio4"
+#define TPS6594_IRQ_NAME_GPIO5 "gpio5"
+#define TPS6594_IRQ_NAME_GPIO6 "gpio6"
+#define TPS6594_IRQ_NAME_GPIO7 "gpio7"
+#define TPS6594_IRQ_NAME_GPIO8 "gpio8"
+#define TPS6594_IRQ_NAME_NPWRON_START "npwron_start"
+#define TPS6594_IRQ_NAME_ENABLE "enable"
+#define TPS6594_IRQ_NAME_FSD "fsd"
+#define TPS6594_IRQ_NAME_SOFT_REBOOT "soft_reboot"
+#define TPS6594_IRQ_NAME_BIST_PASS "bist_pass"
+#define TPS6594_IRQ_NAME_EXT_CLK "ext_clk"
+#define TPS6594_IRQ_NAME_TWARN "twarn"
+#define TPS6594_IRQ_NAME_TSD_ORD "tsd_ord"
+#define TPS6594_IRQ_NAME_BIST_FAIL "bist_fail"
+#define TPS6594_IRQ_NAME_REG_CRC_ERR "reg_crc_err"
+#define TPS6594_IRQ_NAME_RECOV_CNT "recov_cnt"
+#define TPS6594_IRQ_NAME_SPMI_ERR "spmi_err"
+#define TPS6594_IRQ_NAME_NPWRON_LONG "npwron_long"
+#define TPS6594_IRQ_NAME_NINT_READBACK "nint_readback"
+#define TPS6594_IRQ_NAME_NRSTOUT_READBACK "nrstout_readback"
+#define TPS6594_IRQ_NAME_TSD_IMM "tsd_imm"
+#define TPS6594_IRQ_NAME_VCCA_OVP "vcca_ovp"
+#define TPS6594_IRQ_NAME_PFSM_ERR "pfsm_err"
+#define TPS6594_IRQ_NAME_IMM_SHUTDOWN "imm_shutdown"
+#define TPS6594_IRQ_NAME_ORD_SHUTDOWN "ord_shutdown"
+#define TPS6594_IRQ_NAME_MCU_PWR_ERR "mcu_pwr_err"
+#define TPS6594_IRQ_NAME_SOC_PWR_ERR "soc_pwr_err"
+#define TPS6594_IRQ_NAME_COMM_FRM_ERR "comm_frm_err"
+#define TPS6594_IRQ_NAME_COMM_CRC_ERR "comm_crc_err"
+#define TPS6594_IRQ_NAME_COMM_ADR_ERR "comm_adr_err"
+#define TPS6594_IRQ_NAME_EN_DRV_READBACK "en_drv_readback"
+#define TPS6594_IRQ_NAME_NRSTOUT_SOC_READBACK "nrstout_soc_readback"
+#define TPS6594_IRQ_NAME_ESM_SOC_PIN "esm_soc_pin"
+#define TPS6594_IRQ_NAME_ESM_SOC_FAIL "esm_soc_fail"
+#define TPS6594_IRQ_NAME_ESM_SOC_RST "esm_soc_rst"
+#define TPS6594_IRQ_NAME_TIMER "timer"
+#define TPS6594_IRQ_NAME_ALARM "alarm"
+#define TPS6594_IRQ_NAME_POWERUP "powerup"
+
+/**
+ * struct tps6594 - device private data structure
+ *
+ * @dev: MFD parent device
+ * @chip_id: chip ID
+ * @reg: I2C slave address or SPI chip select number
+ * @use_crc: if true, use CRC for I2C and SPI interface protocols
+ * @regmap: regmap for accessing the device registers
+ * @irq: irq generated by the device
+ * @irq_data: regmap irq data used for the irq chip
+ */
+struct tps6594 {
+ struct device *dev;
+ unsigned long chip_id;
+ unsigned short reg;
+ bool use_crc;
+ struct regmap *regmap;
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+bool tps6594_is_volatile_reg(struct device *dev, unsigned int reg);
+int tps6594_device_init(struct tps6594 *tps, bool enable_crc);
+
+#endif /* __LINUX_MFD_TPS6594_H */
diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h
index ffe81127d91c..7807fa329db0 100644
--- a/include/linux/mfd/tps68470.h
+++ b/include/linux/mfd/tps68470.h
@@ -75,6 +75,17 @@
#define TPS68470_CLKCFG1_MODE_A_MASK GENMASK(1, 0)
#define TPS68470_CLKCFG1_MODE_B_MASK GENMASK(3, 2)
+#define TPS68470_CLKCFG2_DRV_STR_2MA 0x05
+#define TPS68470_PLL_OUTPUT_ENABLE 0x02
+#define TPS68470_CLK_SRC_XTAL BIT(0)
+#define TPS68470_PLLSWR_DEFAULT GENMASK(1, 0)
+#define TPS68470_OSC_EXT_CAP_DEFAULT 0x05
+
+#define TPS68470_OUTPUT_A_SHIFT 0x00
+#define TPS68470_OUTPUT_B_SHIFT 0x02
+#define TPS68470_CLK_SRC_SHIFT GENMASK(2, 0)
+#define TPS68470_OSC_EXT_CAP_SHIFT BIT(2)
+
#define TPS68470_GPIO_CTL_REG_A(x) (TPS68470_REG_GPCTL0A + (x) * 2)
#define TPS68470_GPIO_CTL_REG_B(x) (TPS68470_REG_GPCTL0B + (x) * 2)
#define TPS68470_GPIO_MODE_MASK GENMASK(1, 0)
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
deleted file mode 100644
index 2c75c9c9318f..000000000000
--- a/include/linux/mfd/tps80031.h
+++ /dev/null
@@ -1,637 +0,0 @@
-/*
- * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver.
- *
- * Copyright (c) 2012, NVIDIA Corporation.
- *
- * Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
- */
-
-#ifndef __LINUX_MFD_TPS80031_H
-#define __LINUX_MFD_TPS80031_H
-
-#include <linux/device.h>
-#include <linux/regmap.h>
-
-/* Pull-ups/Pull-downs */
-#define TPS80031_CFG_INPUT_PUPD1 0xF0
-#define TPS80031_CFG_INPUT_PUPD2 0xF1
-#define TPS80031_CFG_INPUT_PUPD3 0xF2
-#define TPS80031_CFG_INPUT_PUPD4 0xF3
-#define TPS80031_CFG_LDO_PD1 0xF4
-#define TPS80031_CFG_LDO_PD2 0xF5
-#define TPS80031_CFG_SMPS_PD 0xF6
-
-/* Real Time Clock */
-#define TPS80031_SECONDS_REG 0x00
-#define TPS80031_MINUTES_REG 0x01
-#define TPS80031_HOURS_REG 0x02
-#define TPS80031_DAYS_REG 0x03
-#define TPS80031_MONTHS_REG 0x04
-#define TPS80031_YEARS_REG 0x05
-#define TPS80031_WEEKS_REG 0x06
-#define TPS80031_ALARM_SECONDS_REG 0x08
-#define TPS80031_ALARM_MINUTES_REG 0x09
-#define TPS80031_ALARM_HOURS_REG 0x0A
-#define TPS80031_ALARM_DAYS_REG 0x0B
-#define TPS80031_ALARM_MONTHS_REG 0x0C
-#define TPS80031_ALARM_YEARS_REG 0x0D
-#define TPS80031_RTC_CTRL_REG 0x10
-#define TPS80031_RTC_STATUS_REG 0x11
-#define TPS80031_RTC_INTERRUPTS_REG 0x12
-#define TPS80031_RTC_COMP_LSB_REG 0x13
-#define TPS80031_RTC_COMP_MSB_REG 0x14
-#define TPS80031_RTC_RESET_STATUS_REG 0x16
-
-/*PMC Master Module */
-#define TPS80031_PHOENIX_START_CONDITION 0x1F
-#define TPS80031_PHOENIX_MSK_TRANSITION 0x20
-#define TPS80031_STS_HW_CONDITIONS 0x21
-#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22
-#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23
-#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24
-#define TPS80031_PHOENIX_DEV_ON 0x25
-#define TPS80031_STS_PWR_GRP_STATE 0x27
-#define TPS80031_PH_CFG_VSYSLOW 0x28
-#define TPS80031_PH_STS_BOOT 0x29
-#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A
-#define TPS80031_PHOENIX_SEQ_CFG 0x2B
-#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C
-#define TPS80031_KEY_PRESS_DUR_CFG 0X2D
-#define TPS80031_SMPS_LDO_SHORT_STS 0x2E
-
-/* PMC Slave Module - Broadcast */
-#define TPS80031_BROADCAST_ADDR_ALL 0x31
-#define TPS80031_BROADCAST_ADDR_REF 0x32
-#define TPS80031_BROADCAST_ADDR_PROV 0x33
-#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34
-
-/* PMC Slave Module SMPS Regulators */
-#define TPS80031_SMPS4_CFG_TRANS 0x41
-#define TPS80031_SMPS4_CFG_STATE 0x42
-#define TPS80031_SMPS4_CFG_VOLTAGE 0x44
-#define TPS80031_VIO_CFG_TRANS 0x47
-#define TPS80031_VIO_CFG_STATE 0x48
-#define TPS80031_VIO_CFG_FORCE 0x49
-#define TPS80031_VIO_CFG_VOLTAGE 0x4A
-#define TPS80031_VIO_CFG_STEP 0x48
-#define TPS80031_SMPS1_CFG_TRANS 0x53
-#define TPS80031_SMPS1_CFG_STATE 0x54
-#define TPS80031_SMPS1_CFG_FORCE 0x55
-#define TPS80031_SMPS1_CFG_VOLTAGE 0x56
-#define TPS80031_SMPS1_CFG_STEP 0x57
-#define TPS80031_SMPS2_CFG_TRANS 0x59
-#define TPS80031_SMPS2_CFG_STATE 0x5A
-#define TPS80031_SMPS2_CFG_FORCE 0x5B
-#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C
-#define TPS80031_SMPS2_CFG_STEP 0x5D
-#define TPS80031_SMPS3_CFG_TRANS 0x65
-#define TPS80031_SMPS3_CFG_STATE 0x66
-#define TPS80031_SMPS3_CFG_VOLTAGE 0x68
-
-/* PMC Slave Module LDO Regulators */
-#define TPS80031_VANA_CFG_TRANS 0x81
-#define TPS80031_VANA_CFG_STATE 0x82
-#define TPS80031_VANA_CFG_VOLTAGE 0x83
-#define TPS80031_LDO2_CFG_TRANS 0x85
-#define TPS80031_LDO2_CFG_STATE 0x86
-#define TPS80031_LDO2_CFG_VOLTAGE 0x87
-#define TPS80031_LDO4_CFG_TRANS 0x89
-#define TPS80031_LDO4_CFG_STATE 0x8A
-#define TPS80031_LDO4_CFG_VOLTAGE 0x8B
-#define TPS80031_LDO3_CFG_TRANS 0x8D
-#define TPS80031_LDO3_CFG_STATE 0x8E
-#define TPS80031_LDO3_CFG_VOLTAGE 0x8F
-#define TPS80031_LDO6_CFG_TRANS 0x91
-#define TPS80031_LDO6_CFG_STATE 0x92
-#define TPS80031_LDO6_CFG_VOLTAGE 0x93
-#define TPS80031_LDOLN_CFG_TRANS 0x95
-#define TPS80031_LDOLN_CFG_STATE 0x96
-#define TPS80031_LDOLN_CFG_VOLTAGE 0x97
-#define TPS80031_LDO5_CFG_TRANS 0x99
-#define TPS80031_LDO5_CFG_STATE 0x9A
-#define TPS80031_LDO5_CFG_VOLTAGE 0x9B
-#define TPS80031_LDO1_CFG_TRANS 0x9D
-#define TPS80031_LDO1_CFG_STATE 0x9E
-#define TPS80031_LDO1_CFG_VOLTAGE 0x9F
-#define TPS80031_LDOUSB_CFG_TRANS 0xA1
-#define TPS80031_LDOUSB_CFG_STATE 0xA2
-#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3
-#define TPS80031_LDO7_CFG_TRANS 0xA5
-#define TPS80031_LDO7_CFG_STATE 0xA6
-#define TPS80031_LDO7_CFG_VOLTAGE 0xA7
-
-/* PMC Slave Module External Control */
-#define TPS80031_REGEN1_CFG_TRANS 0xAE
-#define TPS80031_REGEN1_CFG_STATE 0xAF
-#define TPS80031_REGEN2_CFG_TRANS 0xB1
-#define TPS80031_REGEN2_CFG_STATE 0xB2
-#define TPS80031_SYSEN_CFG_TRANS 0xB4
-#define TPS80031_SYSEN_CFG_STATE 0xB5
-
-/* PMC Slave Module Internal Control */
-#define TPS80031_NRESPWRON_CFG_TRANS 0xB7
-#define TPS80031_NRESPWRON_CFG_STATE 0xB8
-#define TPS80031_CLK32KAO_CFG_TRANS 0xBA
-#define TPS80031_CLK32KAO_CFG_STATE 0xBB
-#define TPS80031_CLK32KG_CFG_TRANS 0xBD
-#define TPS80031_CLK32KG_CFG_STATE 0xBE
-#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0
-#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1
-#define TPS80031_VRTC_CFG_TRANS 0xC3
-#define TPS80031_VRTC_CFG_STATE 0xC4
-#define TPS80031_BIAS_CFG_TRANS 0xC6
-#define TPS80031_BIAS_CFG_STATE 0xC7
-#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9
-#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA
-#define TPS80031_RC6MHZ_CFG_TRANS 0xCC
-#define TPS80031_RC6MHZ_CFG_STATE 0xCD
-#define TPS80031_TMP_CFG_TRANS 0xCF
-#define TPS80031_TMP_CFG_STATE 0xD0
-
-/* PMC Slave Module resources assignment */
-#define TPS80031_PREQ1_RES_ASS_A 0xD7
-#define TPS80031_PREQ1_RES_ASS_B 0xD8
-#define TPS80031_PREQ1_RES_ASS_C 0xD9
-#define TPS80031_PREQ2_RES_ASS_A 0xDA
-#define TPS80031_PREQ2_RES_ASS_B 0xDB
-#define TPS80031_PREQ2_RES_ASS_C 0xDC
-#define TPS80031_PREQ3_RES_ASS_A 0xDD
-#define TPS80031_PREQ3_RES_ASS_B 0xDE
-#define TPS80031_PREQ3_RES_ASS_C 0xDF
-
-/* PMC Slave Module Miscellaneous */
-#define TPS80031_SMPS_OFFSET 0xE0
-#define TPS80031_SMPS_MULT 0xE3
-#define TPS80031_MISC1 0xE4
-#define TPS80031_MISC2 0xE5
-#define TPS80031_BBSPOR_CFG 0xE6
-#define TPS80031_TMP_CFG 0xE7
-
-/* Battery Charging Controller and Indicator LED */
-#define TPS80031_CONTROLLER_CTRL2 0xDA
-#define TPS80031_CONTROLLER_VSEL_COMP 0xDB
-#define TPS80031_CHARGERUSB_VSYSREG 0xDC
-#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD
-#define TPS80031_LINEAR_CHRG_STS 0xDE
-#define TPS80031_CONTROLLER_INT_MASK 0xE0
-#define TPS80031_CONTROLLER_CTRL1 0xE1
-#define TPS80031_CONTROLLER_WDG 0xE2
-#define TPS80031_CONTROLLER_STAT1 0xE3
-#define TPS80031_CHARGERUSB_INT_STATUS 0xE4
-#define TPS80031_CHARGERUSB_INT_MASK 0xE5
-#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6
-#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7
-#define TPS80031_CHARGERUSB_CTRL1 0xE8
-#define TPS80031_CHARGERUSB_CTRL2 0xE9
-#define TPS80031_CHARGERUSB_CTRL3 0xEA
-#define TPS80031_CHARGERUSB_STAT1 0xEB
-#define TPS80031_CHARGERUSB_VOREG 0xEC
-#define TPS80031_CHARGERUSB_VICHRG 0xED
-#define TPS80031_CHARGERUSB_CINLIMIT 0xEE
-#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF
-#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0
-#define TPS80031_LED_PWM_CTRL1 0xF4
-#define TPS80031_LED_PWM_CTRL2 0xF5
-
-/* USB On-The-Go */
-#define TPS80031_BACKUP_REG 0xFA
-#define TPS80031_USB_VENDOR_ID_LSB 0x00
-#define TPS80031_USB_VENDOR_ID_MSB 0x01
-#define TPS80031_USB_PRODUCT_ID_LSB 0x02
-#define TPS80031_USB_PRODUCT_ID_MSB 0x03
-#define TPS80031_USB_VBUS_CTRL_SET 0x04
-#define TPS80031_USB_VBUS_CTRL_CLR 0x05
-#define TPS80031_USB_ID_CTRL_SET 0x06
-#define TPS80031_USB_ID_CTRL_CLR 0x07
-#define TPS80031_USB_VBUS_INT_SRC 0x08
-#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09
-#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A
-#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B
-#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C
-#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D
-#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E
-#define TPS80031_USB_ID_INT_SRC 0x0F
-#define TPS80031_USB_ID_INT_LATCH_SET 0x10
-#define TPS80031_USB_ID_INT_LATCH_CLR 0x11
-#define TPS80031_USB_ID_INT_EN_LO_SET 0x12
-#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13
-#define TPS80031_USB_ID_INT_EN_HI_SET 0x14
-#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15
-#define TPS80031_USB_OTG_ADP_CTRL 0x16
-#define TPS80031_USB_OTG_ADP_HIGH 0x17
-#define TPS80031_USB_OTG_ADP_LOW 0x18
-#define TPS80031_USB_OTG_ADP_RISE 0x19
-#define TPS80031_USB_OTG_REVISION 0x1A
-
-/* Gas Gauge */
-#define TPS80031_FG_REG_00 0xC0
-#define TPS80031_FG_REG_01 0xC1
-#define TPS80031_FG_REG_02 0xC2
-#define TPS80031_FG_REG_03 0xC3
-#define TPS80031_FG_REG_04 0xC4
-#define TPS80031_FG_REG_05 0xC5
-#define TPS80031_FG_REG_06 0xC6
-#define TPS80031_FG_REG_07 0xC7
-#define TPS80031_FG_REG_08 0xC8
-#define TPS80031_FG_REG_09 0xC9
-#define TPS80031_FG_REG_10 0xCA
-#define TPS80031_FG_REG_11 0xCB
-
-/* General Purpose ADC */
-#define TPS80031_GPADC_CTRL 0x2E
-#define TPS80031_GPADC_CTRL2 0x2F
-#define TPS80031_RTSELECT_LSB 0x32
-#define TPS80031_RTSELECT_ISB 0x33
-#define TPS80031_RTSELECT_MSB 0x34
-#define TPS80031_GPSELECT_ISB 0x35
-#define TPS80031_CTRL_P1 0x36
-#define TPS80031_RTCH0_LSB 0x37
-#define TPS80031_RTCH0_MSB 0x38
-#define TPS80031_RTCH1_LSB 0x39
-#define TPS80031_RTCH1_MSB 0x3A
-#define TPS80031_GPCH0_LSB 0x3B
-#define TPS80031_GPCH0_MSB 0x3C
-
-/* SIM, MMC and Battery Detection */
-#define TPS80031_SIMDEBOUNCING 0xEB
-#define TPS80031_SIMCTRL 0xEC
-#define TPS80031_MMCDEBOUNCING 0xED
-#define TPS80031_MMCCTRL 0xEE
-#define TPS80031_BATDEBOUNCING 0xEF
-
-/* Vibrator Driver and PWMs */
-#define TPS80031_VIBCTRL 0x9B
-#define TPS80031_VIBMODE 0x9C
-#define TPS80031_PWM1ON 0xBA
-#define TPS80031_PWM1OFF 0xBB
-#define TPS80031_PWM2ON 0xBD
-#define TPS80031_PWM2OFF 0xBE
-
-/* Control Interface */
-#define TPS80031_INT_STS_A 0xD0
-#define TPS80031_INT_STS_B 0xD1
-#define TPS80031_INT_STS_C 0xD2
-#define TPS80031_INT_MSK_LINE_A 0xD3
-#define TPS80031_INT_MSK_LINE_B 0xD4
-#define TPS80031_INT_MSK_LINE_C 0xD5
-#define TPS80031_INT_MSK_STS_A 0xD6
-#define TPS80031_INT_MSK_STS_B 0xD7
-#define TPS80031_INT_MSK_STS_C 0xD8
-#define TPS80031_TOGGLE1 0x90
-#define TPS80031_TOGGLE2 0x91
-#define TPS80031_TOGGLE3 0x92
-#define TPS80031_PWDNSTATUS1 0x93
-#define TPS80031_PWDNSTATUS2 0x94
-#define TPS80031_VALIDITY0 0x17
-#define TPS80031_VALIDITY1 0x18
-#define TPS80031_VALIDITY2 0x19
-#define TPS80031_VALIDITY3 0x1A
-#define TPS80031_VALIDITY4 0x1B
-#define TPS80031_VALIDITY5 0x1C
-#define TPS80031_VALIDITY6 0x1D
-#define TPS80031_VALIDITY7 0x1E
-
-/* Version number related register */
-#define TPS80031_JTAGVERNUM 0x87
-#define TPS80031_EPROM_REV 0xDF
-
-/* GPADC Trimming Bits. */
-#define TPS80031_GPADC_TRIM0 0xCC
-#define TPS80031_GPADC_TRIM1 0xCD
-#define TPS80031_GPADC_TRIM2 0xCE
-#define TPS80031_GPADC_TRIM3 0xCF
-#define TPS80031_GPADC_TRIM4 0xD0
-#define TPS80031_GPADC_TRIM5 0xD1
-#define TPS80031_GPADC_TRIM6 0xD2
-#define TPS80031_GPADC_TRIM7 0xD3
-#define TPS80031_GPADC_TRIM8 0xD4
-#define TPS80031_GPADC_TRIM9 0xD5
-#define TPS80031_GPADC_TRIM10 0xD6
-#define TPS80031_GPADC_TRIM11 0xD7
-#define TPS80031_GPADC_TRIM12 0xD8
-#define TPS80031_GPADC_TRIM13 0xD9
-#define TPS80031_GPADC_TRIM14 0xDA
-#define TPS80031_GPADC_TRIM15 0xDB
-#define TPS80031_GPADC_TRIM16 0xDC
-#define TPS80031_GPADC_TRIM17 0xDD
-#define TPS80031_GPADC_TRIM18 0xDE
-
-/* TPS80031_CONTROLLER_STAT1 bit fields */
-#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0
-#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1
-#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2
-#define TPS80031_CONTROLLER_STAT1_VAC_DET 3
-#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4
-#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6
-/* TPS80031_CONTROLLER_INT_MASK bit filed */
-#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0
-#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2
-#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4
-#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5
-
-#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F
-
-/* TPS80031_PHOENIX_DEV_ON bit field */
-#define TPS80031_DEVOFF 0x1
-
-#define TPS80031_EXT_CONTROL_CFG_TRANS 0
-#define TPS80031_EXT_CONTROL_CFG_STATE 1
-
-/* State register field */
-#define TPS80031_STATE_OFF 0x00
-#define TPS80031_STATE_ON 0x01
-#define TPS80031_STATE_MASK 0x03
-
-/* Trans register field */
-#define TPS80031_TRANS_ACTIVE_OFF 0x00
-#define TPS80031_TRANS_ACTIVE_ON 0x01
-#define TPS80031_TRANS_ACTIVE_MASK 0x03
-#define TPS80031_TRANS_SLEEP_OFF 0x00
-#define TPS80031_TRANS_SLEEP_ON 0x04
-#define TPS80031_TRANS_SLEEP_MASK 0x0C
-#define TPS80031_TRANS_OFF_OFF 0x00
-#define TPS80031_TRANS_OFF_ACTIVE 0x10
-#define TPS80031_TRANS_OFF_MASK 0x30
-
-#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \
- TPS80031_PWR_REQ_INPUT_PREQ2 | \
- TPS80031_PWR_REQ_INPUT_PREQ3)
-
-/* TPS80031_BBSPOR_CFG bit field */
-#define TPS80031_BBSPOR_CHG_EN 0x8
-#define TPS80031_MAX_REGISTER 0xFF
-
-struct i2c_client;
-
-/* Supported chips */
-enum chips {
- TPS80031 = 0x00000001,
- TPS80032 = 0x00000002,
-};
-
-enum {
- TPS80031_INT_PWRON,
- TPS80031_INT_RPWRON,
- TPS80031_INT_SYS_VLOW,
- TPS80031_INT_RTC_ALARM,
- TPS80031_INT_RTC_PERIOD,
- TPS80031_INT_HOT_DIE,
- TPS80031_INT_VXX_SHORT,
- TPS80031_INT_SPDURATION,
- TPS80031_INT_WATCHDOG,
- TPS80031_INT_BAT,
- TPS80031_INT_SIM,
- TPS80031_INT_MMC,
- TPS80031_INT_RES,
- TPS80031_INT_GPADC_RT,
- TPS80031_INT_GPADC_SW2_EOC,
- TPS80031_INT_CC_AUTOCAL,
- TPS80031_INT_ID_WKUP,
- TPS80031_INT_VBUSS_WKUP,
- TPS80031_INT_ID,
- TPS80031_INT_VBUS,
- TPS80031_INT_CHRG_CTRL,
- TPS80031_INT_EXT_CHRG,
- TPS80031_INT_INT_CHRG,
- TPS80031_INT_RES2,
- TPS80031_INT_BAT_TEMP_OVRANGE,
- TPS80031_INT_BAT_REMOVED,
- TPS80031_INT_VBUS_DET,
- TPS80031_INT_VAC_DET,
- TPS80031_INT_FAULT_WDG,
- TPS80031_INT_LINCH_GATED,
-
- /* Last interrupt id to get the end number */
- TPS80031_INT_NR,
-};
-
-/* TPS80031 Slave IDs */
-#define TPS80031_NUM_SLAVES 4
-#define TPS80031_SLAVE_ID0 0
-#define TPS80031_SLAVE_ID1 1
-#define TPS80031_SLAVE_ID2 2
-#define TPS80031_SLAVE_ID3 3
-
-/* TPS80031 I2C addresses */
-#define TPS80031_I2C_ID0_ADDR 0x12
-#define TPS80031_I2C_ID1_ADDR 0x48
-#define TPS80031_I2C_ID2_ADDR 0x49
-#define TPS80031_I2C_ID3_ADDR 0x4A
-
-enum {
- TPS80031_REGULATOR_VIO,
- TPS80031_REGULATOR_SMPS1,
- TPS80031_REGULATOR_SMPS2,
- TPS80031_REGULATOR_SMPS3,
- TPS80031_REGULATOR_SMPS4,
- TPS80031_REGULATOR_VANA,
- TPS80031_REGULATOR_LDO1,
- TPS80031_REGULATOR_LDO2,
- TPS80031_REGULATOR_LDO3,
- TPS80031_REGULATOR_LDO4,
- TPS80031_REGULATOR_LDO5,
- TPS80031_REGULATOR_LDO6,
- TPS80031_REGULATOR_LDO7,
- TPS80031_REGULATOR_LDOLN,
- TPS80031_REGULATOR_LDOUSB,
- TPS80031_REGULATOR_VBUS,
- TPS80031_REGULATOR_REGEN1,
- TPS80031_REGULATOR_REGEN2,
- TPS80031_REGULATOR_SYSEN,
- TPS80031_REGULATOR_MAX,
-};
-
-/* Different configurations for the rails */
-enum {
- /* USBLDO input selection */
- TPS80031_USBLDO_INPUT_VSYS = 0x00000001,
- TPS80031_USBLDO_INPUT_PMID = 0x00000002,
-
- /* LDO3 output mode */
- TPS80031_LDO3_OUTPUT_VIB = 0x00000004,
-
- /* VBUS configuration */
- TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004,
- TPS80031_VBUS_SW_ONLY = 0x00000008,
- TPS80031_VBUS_SW_N_ID = 0x00000010,
-};
-
-/* External controls requests */
-enum tps80031_ext_control {
- TPS80031_PWR_REQ_INPUT_NONE = 0x00000000,
- TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001,
- TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002,
- TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004,
- TPS80031_PWR_OFF_ON_SLEEP = 0x00000008,
- TPS80031_PWR_ON_ON_SLEEP = 0x00000010,
-};
-
-enum tps80031_pupd_pins {
- TPS80031_PREQ1 = 0,
- TPS80031_PREQ2A,
- TPS80031_PREQ2B,
- TPS80031_PREQ2C,
- TPS80031_PREQ3,
- TPS80031_NRES_WARM,
- TPS80031_PWM_FORCE,
- TPS80031_CHRG_EXT_CHRG_STATZ,
- TPS80031_SIM,
- TPS80031_MMC,
- TPS80031_GPADC_START,
- TPS80031_DVSI2C_SCL,
- TPS80031_DVSI2C_SDA,
- TPS80031_CTLI2C_SCL,
- TPS80031_CTLI2C_SDA,
-};
-
-enum tps80031_pupd_settings {
- TPS80031_PUPD_NORMAL,
- TPS80031_PUPD_PULLDOWN,
- TPS80031_PUPD_PULLUP,
-};
-
-struct tps80031 {
- struct device *dev;
- unsigned long chip_info;
- int es_version;
- struct i2c_client *clients[TPS80031_NUM_SLAVES];
- struct regmap *regmap[TPS80031_NUM_SLAVES];
- struct regmap_irq_chip_data *irq_data;
-};
-
-struct tps80031_pupd_init_data {
- int input_pin;
- int setting;
-};
-
-/*
- * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
- *
- * @reg_init_data: The regulator init data.
- * @ext_ctrl_flag: External control flag for sleep/power request control.
- * @config_flags: Configuration flag to configure the rails.
- * It should be ORed of config enums.
- */
-
-struct tps80031_regulator_platform_data {
- struct regulator_init_data *reg_init_data;
- unsigned int ext_ctrl_flag;
- unsigned int config_flags;
-};
-
-struct tps80031_platform_data {
- int irq_base;
- bool use_power_off;
- struct tps80031_pupd_init_data *pupd_init_data;
- int pupd_init_data_size;
- struct tps80031_regulator_platform_data
- *regulator_pdata[TPS80031_REGULATOR_MAX];
-};
-
-static inline int tps80031_write(struct device *dev, int sid,
- int reg, uint8_t val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_write(tps80031->regmap[sid], reg, val);
-}
-
-static inline int tps80031_writes(struct device *dev, int sid, int reg,
- int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_write(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_read(struct device *dev, int sid,
- int reg, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
- unsigned int ival;
- int ret;
-
- ret = regmap_read(tps80031->regmap[sid], reg, &ival);
- if (ret < 0) {
- dev_err(dev, "failed reading from reg 0x%02x\n", reg);
- return ret;
- }
-
- *val = ival;
- return ret;
-}
-
-static inline int tps80031_reads(struct device *dev, int sid,
- int reg, int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_read(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_set_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg,
- bit_mask, bit_mask);
-}
-
-static inline int tps80031_clr_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0);
-}
-
-static inline int tps80031_update(struct device *dev, int sid,
- int reg, uint8_t val, uint8_t mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, mask, val);
-}
-
-static inline unsigned long tps80031_get_chip_info(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->chip_info;
-}
-
-static inline int tps80031_get_pmu_version(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->es_version;
-}
-
-static inline int tps80031_irq_get_virq(struct device *dev, int irq)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_irq_get_virq(tps80031->irq_data, irq);
-}
-
-extern int tps80031_ext_power_req_config(struct device *dev,
- unsigned long ext_ctrl_flag, int preq_bit,
- int state_reg_add, int trans_reg_add);
-#endif /*__LINUX_MFD_TPS80031_H */
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 089e8942223a..85dc406173db 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -69,6 +69,8 @@ enum twl6030_module_ids {
TWL6030_MODULE_GPADC,
TWL6030_MODULE_GASGAUGE,
+ /* A few extra registers before the registers shared with the 6030 */
+ TWL6032_MODULE_CHARGE,
TWL6030_MODULE_LAST,
};
@@ -459,6 +461,7 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
#define TWL4030_PM_MASTER_GLOBAL_TST 0xb6
+#define TWL6030_PHOENIX_DEV_ON 0x06
/*----------------------------------------------------------------------*/
/* Power bus message definitions */
@@ -591,11 +594,6 @@ struct twl4030_gpio_platform_data {
*/
u32 pullups;
u32 pulldowns;
-
- int (*setup)(struct device *dev,
- unsigned gpio, unsigned ngpio);
- int (*teardown)(struct device *dev,
- unsigned gpio, unsigned ngpio);
};
struct twl4030_madc_platform_data {
@@ -694,61 +692,6 @@ struct twl4030_audio_data {
unsigned int irq_base;
};
-struct twl4030_platform_data {
- struct twl4030_clock_init_data *clock;
- struct twl4030_bci_platform_data *bci;
- struct twl4030_gpio_platform_data *gpio;
- struct twl4030_madc_platform_data *madc;
- struct twl4030_keypad_data *keypad;
- struct twl4030_usb_data *usb;
- struct twl4030_power_data *power;
- struct twl4030_audio_data *audio;
-
- /* Common LDO regulators for TWL4030/TWL6030 */
- struct regulator_init_data *vdac;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
- struct regulator_init_data *vdd1;
- struct regulator_init_data *vdd2;
- struct regulator_init_data *vdd3;
- /* TWL4030 LDO regulators */
- struct regulator_init_data *vpll1;
- struct regulator_init_data *vpll2;
- struct regulator_init_data *vmmc1;
- struct regulator_init_data *vmmc2;
- struct regulator_init_data *vsim;
- struct regulator_init_data *vaux4;
- struct regulator_init_data *vio;
- struct regulator_init_data *vintana1;
- struct regulator_init_data *vintana2;
- struct regulator_init_data *vintdig;
- /* TWL6030 LDO regulators */
- struct regulator_init_data *vmmc;
- struct regulator_init_data *vpp;
- struct regulator_init_data *vusim;
- struct regulator_init_data *vana;
- struct regulator_init_data *vcxio;
- struct regulator_init_data *vusb;
- struct regulator_init_data *clk32kg;
- struct regulator_init_data *v1v8;
- struct regulator_init_data *v2v1;
- /* TWL6032 LDO regulators */
- struct regulator_init_data *ldo1;
- struct regulator_init_data *ldo2;
- struct regulator_init_data *ldo3;
- struct regulator_init_data *ldo4;
- struct regulator_init_data *ldo5;
- struct regulator_init_data *ldo6;
- struct regulator_init_data *ldo7;
- struct regulator_init_data *ldoln;
- struct regulator_init_data *ldousb;
- /* TWL6032 DCDC regulators */
- struct regulator_init_data *smps3;
- struct regulator_init_data *smps4;
- struct regulator_init_data *vio6025;
-};
-
struct twl_regulator_driver_data {
int (*set_voltage)(void *data, int target_uV);
int (*get_voltage)(void *data);
@@ -781,8 +724,6 @@ int twl4030_sih_setup(struct device *dev, int module, int irq_base);
#define TWL4030_VAUX3_DEV_GRP 0x1F
#define TWL4030_VAUX3_DEDICATED 0x22
-static inline int twl4030charger_usb_en(int enable) { return 0; }
-
/*----------------------------------------------------------------------*/
/* Linux-specific regulator identifiers ... for now, we only support
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 1fc7450bd8ab..286a724e379a 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -174,35 +174,7 @@
#define TWL6040_GPO_MAX 3
-/* TODO: All platform data struct can be removed */
-struct twl6040_codec_data {
- u16 hs_left_step;
- u16 hs_right_step;
- u16 hf_left_step;
- u16 hf_right_step;
-};
-
-struct twl6040_vibra_data {
- unsigned int vibldrv_res; /* left driver resistance */
- unsigned int vibrdrv_res; /* right driver resistance */
- unsigned int viblmotor_res; /* left motor resistance */
- unsigned int vibrmotor_res; /* right motor resistance */
- int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */
- int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */
-};
-
-struct twl6040_gpo_data {
- int gpio_base;
-};
-
-struct twl6040_platform_data {
- int audpwron_gpio; /* audio power-on gpio */
-
- struct twl6040_codec_data *codec;
- struct twl6040_vibra_data *vibra;
- struct twl6040_gpo_data *gpo;
-};
-
+struct gpio_desc;
struct regmap;
struct regmap_irq_chips_data;
@@ -218,7 +190,7 @@ struct twl6040 {
struct mfd_cell cells[TWL6040_CELLS];
struct completion ready;
- int audpwron;
+ struct gpio_desc *audpwron;
int power_count;
int rev;
diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h
index 43bcf35afe27..ede237384723 100644
--- a/include/linux/mfd/ucb1x00.h
+++ b/include/linux/mfd/ucb1x00.h
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/mfd/mcp.h>
#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mutex.h>
#define UCB_IO_DATA 0x00
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
index bb8d2e276668..76a943c83c63 100644
--- a/include/linux/mfd/wcd934x/registers.h
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -18,6 +18,8 @@
#define WCD934X_EFUSE_SENSE_STATE_DEF 0x10
#define WCD934X_EFUSE_SENSE_EN_MASK BIT(0)
#define WCD934X_EFUSE_SENSE_ENABLE BIT(0)
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 0x002a
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 0x002b
#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037
#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038
#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039
@@ -103,21 +105,58 @@
#define WCD934X_ANA_AMIC3 0x0610
#define WCD934X_ANA_AMIC4 0x0611
#define WCD934X_ANA_MBHC_MECH 0x0614
+#define WCD934X_MBHC_L_DET_EN_MASK BIT(7)
+#define WCD934X_MBHC_L_DET_EN BIT(7)
+#define WCD934X_MBHC_GND_DET_EN_MASK BIT(6)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_MASK BIT(5)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_INS 1
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_MASK BIT(4)
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_GND_PLUG_TYPE_MASK BIT(3)
+#define WCD934X_MBHC_GND_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_HSL_PULLUP_COMP_EN BIT(2)
+#define WCD934X_MBHC_HSG_PULLUP_COMP_EN BIT(1)
+#define WCD934X_MBHC_HPHL_100K_TO_GND_EN BIT(0)
#define WCD934X_ANA_MBHC_ELECT 0x0615
+#define WCD934X_ANA_MBHC_BIAS_EN_MASK BIT(0)
+#define WCD934X_ANA_MBHC_BIAS_EN BIT(0)
#define WCD934X_ANA_MBHC_ZDET 0x0616
#define WCD934X_ANA_MBHC_RESULT_1 0x0617
#define WCD934X_ANA_MBHC_RESULT_2 0x0618
#define WCD934X_ANA_MBHC_RESULT_3 0x0619
+#define WCD934X_ANA_MBHC_BTN0 0x061a
+#define WCD934X_VTH_MASK GENMASK(7, 2)
+#define WCD934X_ANA_MBHC_BTN1 0x061b
+#define WCD934X_ANA_MBHC_BTN2 0x061c
+#define WCD934X_ANA_MBHC_BTN3 0x061d
+#define WCD934X_ANA_MBHC_BTN4 0x061e
+#define WCD934X_ANA_MBHC_BTN5 0x061f
+#define WCD934X_ANA_MBHC_BTN6 0x0620
+#define WCD934X_ANA_MBHC_BTN7 0x0621
+#define WCD934X_MBHC_BTN_VTH_MASK GENMASK(7, 2)
#define WCD934X_ANA_MICB1 0x0622
#define WCD934X_MICB_VAL_MASK GENMASK(5, 0)
#define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6)
+#define WCD934X_MICB_DISABLE 0
+#define WCD934X_MICB_ENABLE 1
+#define WCD934X_MICB_PULL_UP 2
+#define WCD934X_MICB_PULL_DOWN 3
#define WCD934X_ANA_MICB_PULL_UP 0x80
#define WCD934X_ANA_MICB_ENABLE 0x40
#define WCD934X_ANA_MICB_DISABLE 0x0
#define WCD934X_ANA_MICB2 0x0623
+#define WCD934X_ANA_MICB2_ENABLE BIT(6)
+#define WCD934X_ANA_MICB2_ENABLE_MASK GENMASK(7, 6)
+#define WCD934X_ANA_MICB2_VOUT_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB2_RAMP 0x0624
+#define WCD934X_RAMP_EN_MASK BIT(7)
+#define WCD934X_RAMP_SHIFT_CTRL_MASK GENMASK(4, 2)
#define WCD934X_ANA_MICB3 0x0625
#define WCD934X_ANA_MICB4 0x0626
#define WCD934X_BIAS_VBG_FINE_ADJ 0x0629
+#define WCD934X_MBHC_CTL_CLK 0x0656
+#define WCD934X_MBHC_CTL_BCS 0x065a
+#define WCD934X_MBHC_STATUS_SPARE_1 0x065b
#define WCD934X_MICB1_TEST_CTL_1 0x066b
#define WCD934X_MICB1_TEST_CTL_2 0x066c
#define WCD934X_MICB2_TEST_CTL_1 0x066e
@@ -141,7 +180,11 @@
#define WCD934X_HPH_CNP_WG_CTL 0x06cc
#define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7)
#define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7)
+#define WCD934X_HPH_CNP_WG_TIME 0x06cd
#define WCD934X_HPH_OCP_CTL 0x06ce
+#define WCD934X_HPH_PA_CTL2 0x06d2
+#define WCD934X_HPHPA_GND_R_MASK BIT(6)
+#define WCD934X_HPHPA_GND_L_MASK BIT(4)
#define WCD934X_HPH_L_EN 0x06d3
#define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5)
#define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0
@@ -152,6 +195,8 @@
#define WCD934X_HPH_OCP_DET_MASK BIT(0)
#define WCD934X_HPH_OCP_DET_ENABLE BIT(0)
#define WCD934X_HPH_OCP_DET_DISABLE 0
+#define WCD934X_HPH_R_ATEST 0x06d8
+#define WCD934X_HPHPA_GND_OVR_MASK BIT(1)
#define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea
#define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb
#define WCD934X_CLK_SYS_MCLK_PRG 0x0711
@@ -172,7 +217,19 @@
#define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e
#define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0)
#define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0)
+#define WCD934X_MBHC_NEW_CTL_1 0x0720
+#define WCD934X_MBHC_CTL_RCO_EN_MASK BIT(7)
+#define WCD935X_MBHC_CTL_RCO_EN BIT(7)
#define WCD934X_MBHC_NEW_CTL_2 0x0721
+#define WCD934X_M_RTH_CTL_MASK GENMASK(3, 2)
+#define WCD934X_MBHC_NEW_PLUG_DETECT_CTL 0x0722
+#define WCD934X_HSDET_PULLUP_C_MASK GENMASK(7, 6)
+#define WCD934X_MBHC_NEW_ZDET_ANA_CTL 0x0723
+#define WCD934X_ZDET_RANGE_CTL_MASK GENMASK(3, 0)
+#define WCD934X_ZDET_MAXV_CTL_MASK GENMASK(6, 4)
+#define WCD934X_MBHC_NEW_ZDET_RAMP_CTL 0x0724
+#define WCD934X_MBHC_NEW_FSM_STATUS 0x0725
+#define WCD934X_MBHC_NEW_ADC_RESULT 0x0726
#define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727
#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index c4a940d98912..77b8c0a26674 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -9,13 +9,14 @@
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/mutex.h>
-#include <linux/rwlock_types.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-#include <linux/spinlock_types.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#define MHI_MAX_OEM_PK_HASH_SEGMENTS 16
+
struct mhi_chan;
struct mhi_event;
struct mhi_ctxt;
@@ -85,13 +86,15 @@ enum mhi_ch_type {
};
/**
- * struct image_info - Firmware and RDDM table table
- * @mhi_buf - Buffer for firmware and RDDM table
- * @entries - # of entries in table
+ * struct image_info - Firmware and RDDM table
+ * @mhi_buf: Buffer for firmware and RDDM table
+ * @entries: # of entries in table
*/
struct image_info {
struct mhi_buf *mhi_buf;
+ /* private: from internal.h */
struct bhi_vec_entry *bhi_vec;
+ /* public: */
u32 entries;
};
@@ -114,6 +117,7 @@ struct mhi_link_info {
* @MHI_EE_WFW: WLAN firmware mode
* @MHI_EE_PTHRU: Passthrough
* @MHI_EE_EDL: Embedded downloader
+ * @MHI_EE_FP: Flash Programmer Environment
*/
enum mhi_ee_type {
MHI_EE_PBL,
@@ -123,7 +127,8 @@ enum mhi_ee_type {
MHI_EE_WFW,
MHI_EE_PTHRU,
MHI_EE_EDL,
- MHI_EE_MAX_SUPPORTED = MHI_EE_EDL,
+ MHI_EE_FP,
+ MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
MHI_EE_NOT_SUPPORTED,
MHI_EE_MAX,
@@ -200,7 +205,7 @@ enum mhi_db_brst_mode {
* @num: The number assigned to this channel
* @num_elements: The number of elements that can be queued to this channel
* @local_elements: The local ring length of the channel
- * @event_ring: The event rung index that services this channel
+ * @event_ring: The event ring index that services this channel
* @dir: Direction that data may flow on this channel
* @type: Channel type
* @ee_mask: Execution Environment mask for this channel
@@ -211,7 +216,6 @@ enum mhi_db_brst_mode {
* @offload_channel: The client manages the channel completely
* @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
* @auto_queue: Framework will automatically queue buffers for DL traffic
- * @auto_start: Automatically start (open) this channel
* @wake-capable: Channel capable of waking up the system
*/
struct mhi_channel_config {
@@ -229,7 +233,6 @@ struct mhi_channel_config {
bool offload_channel;
bool doorbell_mode_switch;
bool auto_queue;
- bool auto_start;
bool wake_capable;
};
@@ -263,6 +266,7 @@ struct mhi_event_config {
* struct mhi_controller_config - Root MHI controller configuration
* @max_channels: Maximum number of channels supported
* @timeout_ms: Timeout value for operations. 0 means use default
+ * @ready_timeout_ms: Timeout value for waiting device to be ready (optional)
* @buf_len: Size of automatically allocated buffers. 0 means use default
* @num_channels: Number of channels defined in @ch_cfg
* @ch_cfg: Array of defined channels
@@ -274,9 +278,10 @@ struct mhi_event_config {
struct mhi_controller_config {
u32 max_channels;
u32 timeout_ms;
+ u32 ready_timeout_ms;
u32 buf_len;
u32 num_channels;
- struct mhi_channel_config *ch_cfg;
+ const struct mhi_channel_config *ch_cfg;
u32 num_events;
struct mhi_event_config *event_cfg;
bool use_bounce_buf;
@@ -288,17 +293,23 @@ struct mhi_controller_config {
* @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
* controller (required)
* @mhi_dev: MHI device instance for the controller
+ * @debugfs_dentry: MHI controller debugfs directory
* @regs: Base address of MHI MMIO register space (required)
* @bhi: Points to base of MHI BHI register space
* @bhie: Points to base of MHI BHIe register space
* @wake_db: MHI WAKE doorbell register address
* @iova_start: IOMMU starting address for data (required)
* @iova_stop: IOMMU stop address for data (required)
- * @fw_image: Firmware image name for normal booting (required)
+ * @fw_image: Firmware image name for normal booting (optional)
+ * @fw_data: Firmware image data content for normal booting, used only
+ * if fw_image is NULL and fbc_download is true (optional)
+ * @fw_sz: Firmware image data size for normal booting, used only if fw_image
+ * is NULL and fbc_download is true (optional)
* @edl_image: Firmware image name for emergency download mode (optional)
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size downloaded through BHIe (optional)
* @seg_len: BHIe vector size (optional)
+ * @reg_len: Length of the MHI MMIO region (required)
* @fbc_image: Points to firmware image buffer
* @rddm_image: Points to RAM dump buffer
* @mhi_chan: Points to the channel configuration table
@@ -308,29 +319,28 @@ struct mhi_controller_config {
* @total_ev_rings: Total # of event rings allocated
* @hw_ev_rings: Number of hardware event rings
* @sw_ev_rings: Number of software event rings
- * @nr_irqs_req: Number of IRQs required to operate (optional)
* @nr_irqs: Number of IRQ allocated by bus master (required)
- * @family_number: MHI controller family number
- * @device_number: MHI controller device number
- * @major_version: MHI controller major revision number
- * @minor_version: MHI controller minor revision number
+ * @serial_number: MHI controller serial number obtained from BHI
* @mhi_event: MHI event ring configurations table
* @mhi_cmd: MHI command ring configurations table
* @mhi_ctxt: MHI device context, shared memory between host and device
* @pm_mutex: Mutex for suspend/resume operation
* @pm_lock: Lock for protecting MHI power management state
* @timeout_ms: Timeout in ms for state transitions
+ * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional)
* @pm_state: MHI power management state
* @db_access: DB access states
* @ee: MHI device execution environment
* @dev_state: MHI device state
* @dev_wake: Device wakeup count
* @pending_pkts: Pending packets for the controller
+ * @M0, M2, M3: Counters to track number of device MHI state changes
* @transition_list: List of MHI state transitions
* @transition_lock: Lock for protecting MHI state transition list
* @wlock: Lock for protecting device wakeup
* @mhi_link_info: Device bandwidth info
* @st_worker: State transition worker
+ * @hiprio_wq: High priority workqueue for MHI work such as state transitions
* @state_event: State change event
* @status_cb: CB function to notify power states of the device (required)
* @wake_get: CB function to assert device wake (optional)
@@ -342,28 +352,23 @@ struct mhi_controller_config {
* @unmap_single: CB function to destroy TRE buffer
* @read_reg: Read a MHI register via the physical link (required)
* @write_reg: Write a MHI register via the physical link (required)
+ * @reset: Controller specific reset function (optional)
* @buffer_len: Bounce buffer length
+ * @index: Index of the MHI controller instance
* @bounce_buf: Use of bounce buffer
* @fbc_download: MHI host needs to do complete image transfer (optional)
- * @pre_init: MHI host needs to do pre-initialization before power up
* @wake_set: Device wakeup set flag
+ * @irq_flags: irq flags passed to request_irq (optional)
+ * @mru: the default MRU for the MHI device
*
* Fields marked as (required) need to be populated by the controller driver
* before calling mhi_register_controller(). For the fields marked as (optional)
* they can be populated depending on the usecase.
- *
- * The following fields are present for the purpose of implementing any device
- * specific quirks or customizations for specific MHI revisions used in device
- * by the controller drivers. The MHI stack will just populate these fields
- * during mhi_register_controller():
- * family_number
- * device_number
- * major_version
- * minor_version
*/
struct mhi_controller {
struct device *cntrl_dev;
struct mhi_device *mhi_dev;
+ struct dentry *debugfs_dentry;
void __iomem *regs;
void __iomem *bhi;
void __iomem *bhie;
@@ -372,10 +377,13 @@ struct mhi_controller {
dma_addr_t iova_start;
dma_addr_t iova_stop;
const char *fw_image;
+ const u8 *fw_data;
+ size_t fw_sz;
const char *edl_image;
size_t rddm_size;
size_t sbl_size;
size_t seg_len;
+ size_t reg_len;
struct image_info *fbc_image;
struct image_info *rddm_image;
struct mhi_chan *mhi_chan;
@@ -385,12 +393,8 @@ struct mhi_controller {
u32 total_ev_rings;
u32 hw_ev_rings;
u32 sw_ev_rings;
- u32 nr_irqs_req;
u32 nr_irqs;
- u32 family_number;
- u32 device_number;
- u32 major_version;
- u32 minor_version;
+ u32 serial_number;
struct mhi_event *mhi_event;
struct mhi_cmd *mhi_cmd;
@@ -399,17 +403,20 @@ struct mhi_controller {
struct mutex pm_mutex;
rwlock_t pm_lock;
u32 timeout_ms;
+ u32 ready_timeout_ms;
u32 pm_state;
u32 db_access;
enum mhi_ee_type ee;
enum mhi_state dev_state;
atomic_t dev_wake;
atomic_t pending_pkts;
+ u32 M0, M2, M3;
struct list_head transition_list;
spinlock_t transition_lock;
spinlock_t wlock;
struct mhi_link_info mhi_link_info;
struct work_struct st_worker;
+ struct workqueue_struct *hiprio_wq;
wait_queue_head_t state_event;
void (*status_cb)(struct mhi_controller *mhi_cntrl,
@@ -427,19 +434,22 @@ struct mhi_controller {
u32 *out);
void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
u32 val);
+ void (*reset)(struct mhi_controller *mhi_cntrl);
size_t buffer_len;
+ int index;
bool bounce_buf;
bool fbc_download;
- bool pre_init;
bool wake_set;
+ unsigned long irq_flags;
+ u32 mru;
};
/**
- * struct mhi_device - Structure representing a MHI device which binds
- * to channels
+ * struct mhi_device - Structure representing an MHI device which binds
+ * to channels or is associated with controllers
* @id: Pointer to MHI device ID struct
- * @chan_name: Name of the channel to which the device binds
+ * @name: Name of the associated MHI device
* @mhi_cntrl: Controller the device belongs to
* @ul_chan: UL channel for the device
* @dl_chan: DL channel for the device
@@ -451,7 +461,7 @@ struct mhi_controller {
*/
struct mhi_device {
const struct mhi_device_id *id;
- const char *chan_name;
+ const char *name;
struct mhi_controller *mhi_cntrl;
struct mhi_chan *ul_chan;
struct mhi_chan *dl_chan;
@@ -518,12 +528,24 @@ struct mhi_driver {
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
/**
+ * mhi_alloc_controller - Allocate the MHI Controller structure
+ * Allocate the mhi_controller structure using zero initialized memory
+ */
+struct mhi_controller *mhi_alloc_controller(void);
+
+/**
+ * mhi_free_controller - Free the MHI Controller structure
+ * Free the mhi_controller structure which was previously allocated
+ */
+void mhi_free_controller(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_register_controller - Register MHI controller
* @mhi_cntrl: MHI controller to register
* @config: Configuration to use for the controller
*/
int mhi_register_controller(struct mhi_controller *mhi_cntrl,
- struct mhi_controller_config *config);
+ const struct mhi_controller_config *config);
/**
* mhi_unregister_controller - Unregister MHI controller
@@ -576,6 +598,15 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
/**
+ * mhi_get_free_desc_count - Get transfer ring length
+ * Get # of TD available to queue buffers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Direction of the channel
+ */
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir);
+
+/**
* mhi_prepare_for_power_up - Do pre-initialization before power up.
* This is optional, call this before power up if
* the controller does not want bus framework to
@@ -593,7 +624,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_sync_power_up - Start MHI power up sequence and wait till the device
- * device enters valid EE state
+ * enters valid EE state
* @mhi_cntrl: MHI controller
*/
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
@@ -624,12 +655,25 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
- * mhi_download_rddm_img - Download ramdump image from device for
- * debugging purpose.
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_download_rddm_image - Download ramdump image from device for
+ * debugging purpose.
* @mhi_cntrl: MHI controller
* @in_panic: Download rddm image during kernel panic
*/
-int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic);
/**
* mhi_force_rddm_mode - Force device into rddm mode
@@ -638,12 +682,25 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
/**
+ * mhi_get_exec_env - Get BHI execution environment of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_get_mhi_state - Get MHI state of the device
* @mhi_cntrl: MHI controller
*/
enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
/**
+ * mhi_soc_reset - Trigger a device reset. This can be used as a last resort
+ * to reset and recover a device.
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_device_get - Disable device low power mode
* @mhi_dev: Device associated with the channel
*/
@@ -663,23 +720,41 @@ int mhi_device_get_sync(struct mhi_device *mhi_dev);
void mhi_device_put(struct mhi_device *mhi_dev);
/**
- * mhi_prepare_for_transfer - Setup channel for data transfer
+ * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer.
* @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/**
- * mhi_unprepare_from_transfer - Unprepare the channels
+ * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue
+ * buffers for DL traffic
* @mhi_dev: Device associated with the channels
- */
-void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
-
-/**
- * mhi_poll - Poll for any available data in DL direction
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
+ * The MHI core will automatically allocate and queue buffers for the DL traffic.
+ */
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
+ * Issue the RESET channel command and let the
+ * device clean-up the context so no incoming
+ * transfers are seen on the host. Free memory
+ * associated with the context on host. If device
+ * is unresponsive, only perform a host side
+ * clean-up. Channels can be reset only if both
+ * host and device execution environments match
+ * and channels are in an ENABLED, STOPPED or
+ * SUSPENDED state.
* @mhi_dev: Device associated with the channels
- * @budget: # of events to process
*/
-int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
* mhi_queue_dma - Send or receive DMA mapped buffers from client device
@@ -716,4 +791,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+/**
+ * mhi_queue_is_full - Determine whether queueing new elements is possible
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ */
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
+
#endif /* _MHI_H_ */
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
new file mode 100644
index 000000000000..11bf3212f782
--- /dev/null
+++ b/include/linux/mhi_ep.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+#ifndef _MHI_EP_H_
+#define _MHI_EP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/mhi.h>
+
+#define MHI_EP_DEFAULT_MTU 0x8000
+
+/**
+ * struct mhi_ep_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @dir: Direction that data may flow on this channel
+ */
+struct mhi_ep_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ enum dma_data_direction dir;
+};
+
+/**
+ * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration
+ * @mhi_version: MHI spec version supported by the controller
+ * @max_channels: Maximum number of channels supported
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ */
+struct mhi_ep_cntrl_config {
+ u32 mhi_version;
+ u32 max_channels;
+ u32 num_channels;
+ const struct mhi_ep_channel_config *ch_cfg;
+};
+
+/**
+ * struct mhi_ep_db_info - MHI Endpoint doorbell info
+ * @mask: Mask of the doorbell interrupt
+ * @status: Status of the doorbell interrupt
+ */
+struct mhi_ep_db_info {
+ u32 mask;
+ u32 status;
+};
+
+/**
+ * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
+ * @mhi_dev: MHI device associated with this buffer
+ * @dev_addr: Address of the buffer in endpoint
+ * @host_addr: Address of the bufffer in host
+ * @size: Size of the buffer
+ * @code: Transfer completion code
+ * @cb: Callback to be executed by controller drivers after transfer completion (async)
+ * @cb_buf: Opaque buffer to be passed to the callback
+ */
+struct mhi_ep_buf_info {
+ struct mhi_ep_device *mhi_dev;
+ void *dev_addr;
+ u64 host_addr;
+ size_t size;
+ int code;
+
+ void (*cb)(struct mhi_ep_buf_info *buf_info);
+ void *cb_buf;
+};
+
+/**
+ * struct mhi_ep_cntrl - MHI Endpoint controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * Endpoint controller
+ * @mhi_dev: MHI Endpoint device instance for the controller
+ * @mmio: MMIO region containing the MHI registers
+ * @mhi_chan: Points to the channel configuration table
+ * @mhi_event: Points to the event ring configurations table
+ * @mhi_cmd: Points to the command ring configurations table
+ * @sm: MHI Endpoint state machine
+ * @ch_ctx_cache: Cache of host channel context data structure
+ * @ev_ctx_cache: Cache of host event context data structure
+ * @cmd_ctx_cache: Cache of host command context data structure
+ * @ch_ctx_host_pa: Physical address of host channel context data structure
+ * @ev_ctx_host_pa: Physical address of host event context data structure
+ * @cmd_ctx_host_pa: Physical address of host command context data structure
+ * @ch_ctx_cache_phys: Physical address of the host channel context cache
+ * @ev_ctx_cache_phys: Physical address of the host event context cache
+ * @cmd_ctx_cache_phys: Physical address of the host command context cache
+ * @chdb: Array of channel doorbell interrupt info
+ * @event_lock: Lock for protecting event rings
+ * @state_lock: Lock for protecting state transitions
+ * @list_lock: Lock for protecting state transition and channel doorbell lists
+ * @st_transition_list: List of state transitions
+ * @ch_db_list: List of queued channel doorbells
+ * @wq: Dedicated workqueue for handling rings and state changes
+ * @state_work: State transition worker
+ * @reset_work: Worker for MHI Endpoint reset
+ * @cmd_ring_work: Worker for processing command rings
+ * @ch_ring_work: Worker for processing channel rings
+ * @raise_irq: CB function for raising IRQ to the host
+ * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
+ * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
+ * @read_sync: CB function for reading from host memory synchronously
+ * @write_sync: CB function for writing to host memory synchronously
+ * @read_async: CB function for reading from host memory asynchronously
+ * @write_async: CB function for writing to host memory asynchronously
+ * @mhi_state: MHI Endpoint state
+ * @max_chan: Maximum channels supported by the endpoint controller
+ * @mru: MRU (Maximum Receive Unit) value of the endpoint controller
+ * @event_rings: Number of event rings supported by the endpoint controller
+ * @hw_event_rings: Number of hardware event rings supported by the endpoint controller
+ * @chdb_offset: Channel doorbell offset set by the host
+ * @erdb_offset: Event ring doorbell offset set by the host
+ * @index: MHI Endpoint controller index
+ * @irq: IRQ used by the endpoint controller
+ * @enabled: Check if the endpoint controller is enabled or not
+ */
+struct mhi_ep_cntrl {
+ struct device *cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ void __iomem *mmio;
+
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_event *mhi_event;
+ struct mhi_ep_cmd *mhi_cmd;
+ struct mhi_ep_sm *sm;
+
+ struct mhi_chan_ctxt *ch_ctx_cache;
+ struct mhi_event_ctxt *ev_ctx_cache;
+ struct mhi_cmd_ctxt *cmd_ctx_cache;
+ u64 ch_ctx_host_pa;
+ u64 ev_ctx_host_pa;
+ u64 cmd_ctx_host_pa;
+ phys_addr_t ch_ctx_cache_phys;
+ phys_addr_t ev_ctx_cache_phys;
+ phys_addr_t cmd_ctx_cache_phys;
+
+ struct mhi_ep_db_info chdb[4];
+ struct mutex event_lock;
+ struct mutex state_lock;
+ spinlock_t list_lock;
+
+ struct list_head st_transition_list;
+ struct list_head ch_db_list;
+
+ struct workqueue_struct *wq;
+ struct work_struct state_work;
+ struct work_struct reset_work;
+ struct work_struct cmd_ring_work;
+ struct work_struct ch_ring_work;
+ struct kmem_cache *ring_item_cache;
+ struct kmem_cache *ev_ring_el_cache;
+ struct kmem_cache *tre_buf_cache;
+
+ void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
+ int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
+ void __iomem **virt, size_t size);
+ void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
+ void __iomem *virt, size_t size);
+ int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+
+ enum mhi_state mhi_state;
+
+ u32 max_chan;
+ u32 mru;
+ u32 event_rings;
+ u32 hw_event_rings;
+ u32 chdb_offset;
+ u32 erdb_offset;
+ u32 index;
+ int irq;
+ bool enabled;
+};
+
+/**
+ * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds
+ * to channels or is associated with controllers
+ * @dev: Driver model device node for the MHI Endpoint device
+ * @mhi_cntrl: Controller the device belongs to
+ * @id: Pointer to MHI Endpoint device ID struct
+ * @name: Name of the associated MHI Endpoint device
+ * @ul_chan: UL (from host to endpoint) channel for the device
+ * @dl_chan: DL (from endpoint to host) channel for the device
+ * @dev_type: MHI device type
+ */
+struct mhi_ep_device {
+ struct device dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_ep_chan *ul_chan;
+ struct mhi_ep_chan *dl_chan;
+ enum mhi_device_type dev_type;
+};
+
+/**
+ * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver
+ * @id_table: Pointer to MHI Endpoint device ID table
+ * @driver: Device driver model driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer
+ * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer
+ */
+struct mhi_ep_driver {
+ const struct mhi_device_id *id_table;
+ struct device_driver driver;
+ int (*probe)(struct mhi_ep_device *mhi_ep,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_ep_device *mhi_ep);
+ void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+};
+
+#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
+#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
+
+/*
+ * module_mhi_ep_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_ep_driver_register() and
+ * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_ep_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_ep_driver_register, \
+ mhi_ep_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_ep_driver_register(mhi_drv) \
+ __mhi_ep_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus
+ * @mhi_drv: Driver to be associated with the device
+ * @owner: The module owner
+ *
+ * Return: 0 if driver registrations succeeds, a negative error code otherwise.
+ */
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
+
+/**
+ * mhi_ep_register_controller - Register MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to register
+ * @config: Configuration to use for the controller
+ *
+ * Return: 0 if controller registrations succeeds, a negative error code otherwise.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config);
+
+/**
+ * mhi_ep_unregister_controller - Unregister MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to unregister
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_up - Power up the MHI endpoint stack
+ * @mhi_cntrl: MHI Endpoint controller
+ *
+ * Return: 0 if power up succeeds, a negative error code otherwise.
+ */
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_down - Power down the MHI endpoint stack
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ *
+ * Return: true if the queue is empty, false otherwise.
+ */
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
+
+/**
+ * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
+ * @mhi_dev: Device associated with the DL channel
+ * @skb: SKBs to be queued
+ *
+ * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
+ */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb);
+
+#endif
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
deleted file mode 100644
index e99c789424e0..000000000000
--- a/include/linux/mic_bus.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel MIC Bus driver.
- *
- * This implementation is very similar to the virtio bus driver
- * implementation @ include/linux/virtio.h.
- */
-#ifndef _MIC_BUS_H_
-#define _MIC_BUS_H_
-/*
- * Everything a mbus driver needs to work with any particular mbus
- * implementation.
- */
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-
-struct mbus_device_id {
- __u32 device;
- __u32 vendor;
-};
-
-#define MBUS_DEV_DMA_HOST 2
-#define MBUS_DEV_DMA_MIC 3
-#define MBUS_DEV_ANY_ID 0xffffffff
-
-/**
- * mbus_device - representation of a device using mbus
- * @mmio_va: virtual address of mmio space
- * @hw_ops: the hardware ops supported by this device.
- * @id: the device type identification (used to match it with a driver).
- * @dev: underlying device.
- * be used to communicate with.
- * @index: unique position on the mbus bus
- */
-struct mbus_device {
- void __iomem *mmio_va;
- struct mbus_hw_ops *hw_ops;
- struct mbus_device_id id;
- struct device dev;
- int index;
-};
-
-/**
- * mbus_driver - operations for a mbus I/O driver
- * @driver: underlying device driver (populate name and owner).
- * @id_table: the ids serviced by this driver.
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct mbus_driver {
- struct device_driver driver;
- const struct mbus_device_id *id_table;
- int (*probe)(struct mbus_device *dev);
- void (*scan)(struct mbus_device *dev);
- void (*remove)(struct mbus_device *dev);
-};
-
-/**
- * struct mic_irq - opaque pointer used as cookie
- */
-struct mic_irq;
-
-/**
- * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus.
- */
-struct mbus_hw_ops {
- struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev,
- irq_handler_t handler,
- irq_handler_t thread_fn,
- const char *name, void *data,
- int intr_src);
- void (*free_irq)(struct mbus_device *mbdev,
- struct mic_irq *cookie, void *data);
- void (*ack_interrupt)(struct mbus_device *mbdev, int num);
-};
-
-struct mbus_device *
-mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
- struct mbus_hw_ops *hw_ops, int index,
- void __iomem *mmio_va);
-void mbus_unregister_device(struct mbus_device *mbdev);
-
-int mbus_register_driver(struct mbus_driver *drv);
-void mbus_unregister_driver(struct mbus_driver *drv);
-
-static inline struct mbus_device *dev_to_mbus(struct device *_dev)
-{
- return container_of(_dev, struct mbus_device, dev);
-}
-
-static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv)
-{
- return container_of(drv, struct mbus_driver, driver);
-}
-
-#endif /* _MIC_BUS_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 75f880c25bb8..591bf5b5e8dc 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -8,6 +8,8 @@
#ifndef _MICREL_PHY_H
#define _MICREL_PHY_H
+#define MICREL_OUI 0x0885
+
#define MICREL_PHY_ID_MASK 0x00fffff0
#define PHY_ID_KSZ8873MLL 0x000e7237
@@ -27,6 +29,9 @@
#define PHY_ID_KSZ8061 0x00221570
#define PHY_ID_KSZ9031 0x00221620
#define PHY_ID_KSZ9131 0x00221640
+#define PHY_ID_LAN8814 0x00221660
+#define PHY_ID_LAN8804 0x00221670
+#define PHY_ID_LAN8841 0x00221650
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
@@ -36,12 +41,33 @@
#define PHY_ID_KSZ9477 0x00221631
/* struct phy_device dev_flags definitions */
-#define MICREL_PHY_50MHZ_CLK 0x00000001
-#define MICREL_PHY_FXEN 0x00000002
+#define MICREL_PHY_50MHZ_CLK BIT(0)
+#define MICREL_PHY_FXEN BIT(1)
+#define MICREL_KSZ8_P1_ERRATA BIT(2)
+#define MICREL_NO_EEE BIT(3)
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
#define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104
#define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105
+/* Device specific MII_BMCR (Reg 0) bits */
+/* 1 = HP Auto MDI/MDI-X mode, 0 = Microchip Auto MDI/MDI-X mode */
+#define KSZ886X_BMCR_HP_MDIX BIT(5)
+/* 1 = Force MDI (transmit on RXP/RXM pins), 0 = Normal operation
+ * (transmit on TXP/TXM pins)
+ */
+#define KSZ886X_BMCR_FORCE_MDI BIT(4)
+/* 1 = Disable auto MDI-X */
+#define KSZ886X_BMCR_DISABLE_AUTO_MDIX BIT(3)
+#define KSZ886X_BMCR_DISABLE_FAR_END_FAULT BIT(2)
+#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1)
+#define KSZ886X_BMCR_DISABLE_LED BIT(0)
+
+/* PHY Special Control/Status Register (Reg 31) */
+#define KSZ886X_CTRL_MDIX_STAT BIT(4)
+#define KSZ886X_CTRL_FORCE_LINK BIT(3)
+#define KSZ886X_CTRL_PWRSAVE BIT(2)
+#define KSZ886X_CTRL_REMOTE_LOOPBACK BIT(1)
+
#endif /* _MICREL_PHY_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 0f8d1583fa8e..2ce13e8a309b 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,8 +7,8 @@
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>
-typedef struct page *new_page_t(struct page *page, unsigned long private);
-typedef void free_page_t(struct page *page, unsigned long private);
+typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
+typedef void free_folio_t(struct folio *folio, unsigned long private);
struct migration_target_control;
@@ -18,66 +18,85 @@ struct migration_target_control;
* - zero on page migration success;
*/
#define MIGRATEPAGE_SUCCESS 0
-
-enum migrate_reason {
- MR_COMPACTION,
- MR_MEMORY_FAILURE,
- MR_MEMORY_HOTPLUG,
- MR_SYSCALL, /* also applies to cpusets */
- MR_MEMPOLICY_MBIND,
- MR_NUMA_MISPLACED,
- MR_CONTIG_RANGE,
- MR_TYPES
+#define MIGRATEPAGE_UNMAP 1
+
+/**
+ * struct movable_operations - Driver page migration
+ * @isolate_page:
+ * The VM calls this function to prepare the page to be moved. The page
+ * is locked and the driver should not unlock it. The driver should
+ * return ``true`` if the page is movable and ``false`` if it is not
+ * currently movable. After this function returns, the VM uses the
+ * page->lru field, so the driver must preserve any information which
+ * is usually stored here.
+ *
+ * @migrate_page:
+ * After isolation, the VM calls this function with the isolated
+ * @src page. The driver should copy the contents of the
+ * @src page to the @dst page and set up the fields of @dst page.
+ * Both pages are locked.
+ * If page migration is successful, the driver should call
+ * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
+ * If the driver cannot migrate the page at the moment, it can return
+ * -EAGAIN. The VM interprets this as a temporary migration failure and
+ * will retry it later. Any other error value is a permanent migration
+ * failure and migration will not be retried.
+ * The driver shouldn't touch the @src->lru field while in the
+ * migrate_page() function. It may write to @dst->lru.
+ *
+ * @putback_page:
+ * If migration fails on the isolated page, the VM informs the driver
+ * that the page is no longer a candidate for migration by calling
+ * this function. The driver should put the isolated page back into
+ * its own data structure.
+ */
+struct movable_operations {
+ bool (*isolate_page)(struct page *, isolate_mode_t);
+ int (*migrate_page)(struct page *dst, struct page *src,
+ enum migrate_mode);
+ void (*putback_page)(struct page *);
};
-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
+/* Defined in mm/debug.c: */
extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
-extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode);
-extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
- unsigned long private, enum migrate_mode mode, int reason);
-extern struct page *alloc_migration_target(struct page *page, unsigned long private);
-extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
-extern void putback_movable_page(struct page *page);
-
-extern int migrate_prep(void);
-extern int migrate_prep_local(void);
-extern void migrate_page_states(struct page *newpage, struct page *page);
-extern void migrate_page_copy(struct page *newpage, struct page *page);
-extern int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page);
-extern int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page, int extra_count);
+void putback_movable_pages(struct list_head *l);
+int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode, int extra_count);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
+int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
+ unsigned long private, enum migrate_mode mode, int reason,
+ unsigned int *ret_succeeded);
+struct folio *alloc_migration_target(struct folio *src, unsigned long private);
+bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct folio *dst, struct folio *src);
+void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
+ __releases(ptl);
+void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
+void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
+int folio_migrate_mapping(struct address_space *mapping,
+ struct folio *newfolio, struct folio *folio, int extra_count);
+
#else
static inline void putback_movable_pages(struct list_head *l) {}
-static inline int migrate_pages(struct list_head *l, new_page_t new,
- free_page_t free, unsigned long private, enum migrate_mode mode,
- int reason)
+static inline int migrate_pages(struct list_head *l, new_folio_t new,
+ free_folio_t free, unsigned long private,
+ enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{ return -ENOSYS; }
-static inline struct page *alloc_migration_target(struct page *page,
+static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private)
{ return NULL; }
-static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
- { return -EBUSY; }
-
-static inline int migrate_prep(void) { return -ENOSYS; }
-static inline int migrate_prep_local(void) { return -ENOSYS; }
-
-static inline void migrate_page_states(struct page *newpage, struct page *page)
-{
-}
-
-static inline void migrate_page_copy(struct page *newpage,
- struct page *page) {}
+static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct folio *dst, struct folio *src)
{
return -ENOSYS;
}
@@ -85,13 +104,13 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_COMPACTION
-extern int PageMovable(struct page *page);
-extern void __SetPageMovable(struct page *page, struct address_space *mapping);
-extern void __ClearPageMovable(struct page *page);
+bool PageMovable(struct page *page);
+void __SetPageMovable(struct page *page, const struct movable_operations *ops);
+void __ClearPageMovable(struct page *page);
#else
-static inline int PageMovable(struct page *page) { return 0; };
+static inline bool PageMovable(struct page *page) { return false; }
static inline void __SetPageMovable(struct page *page,
- struct address_space *mapping)
+ const struct movable_operations *ops)
{
}
static inline void __ClearPageMovable(struct page *page)
@@ -99,39 +118,39 @@ static inline void __ClearPageMovable(struct page *page)
}
#endif
-#ifdef CONFIG_NUMA_BALANCING
-extern bool pmd_trans_migrating(pmd_t pmd);
-extern int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node);
-#else
-static inline bool pmd_trans_migrating(pmd_t pmd)
+static inline bool folio_test_movable(struct folio *folio)
{
- return false;
+ return PageMovable(&folio->page);
}
-static inline int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node)
+
+static inline
+const struct movable_operations *folio_movable_ops(struct folio *folio)
{
- return -EAGAIN; /* can't migrate now */
+ VM_BUG_ON(!__folio_test_movable(folio));
+
+ return (const struct movable_operations *)
+ ((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE);
}
-#endif /* CONFIG_NUMA_BALANCING */
-#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node);
-#else
-static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node)
+static inline
+const struct movable_operations *page_movable_ops(struct page *page)
{
- return -EAGAIN;
+ VM_BUG_ON(!__PageMovable(page));
+
+ return (const struct movable_operations *)
+ ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
}
-#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
+#ifdef CONFIG_NUMA_BALANCING
+int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
+ int node);
+#else
+static inline int migrate_misplaced_folio(struct folio *folio,
+ struct vm_area_struct *vma, int node)
+{
+ return -EAGAIN; /* can't migrate now */
+}
+#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_MIGRATION
@@ -142,7 +161,6 @@ static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
*/
#define MIGRATE_PFN_VALID (1UL << 0)
#define MIGRATE_PFN_MIGRATE (1UL << 1)
-#define MIGRATE_PFN_LOCKED (1UL << 2)
#define MIGRATE_PFN_WRITE (1UL << 3)
#define MIGRATE_PFN_SHIFT 6
@@ -161,6 +179,7 @@ static inline unsigned long migrate_pfn(unsigned long pfn)
enum migrate_vma_direction {
MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
};
struct migrate_vma {
@@ -190,11 +209,23 @@ struct migrate_vma {
*/
void *pgmap_owner;
unsigned long flags;
+
+ /*
+ * Set to vmf->page if this is being called to migrate a page as part of
+ * a migrate_to_ram() callback.
+ */
+ struct page *fault_page;
};
int migrate_vma_setup(struct migrate_vma *args);
void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
+int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+ unsigned long npages);
+void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
+ unsigned long npages);
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages);
#endif /* CONFIG_MIGRATION */
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index 883c99249033..f37cc03f9369 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -19,4 +19,17 @@ enum migrate_mode {
MIGRATE_SYNC_NO_COPY,
};
+enum migrate_reason {
+ MR_COMPACTION,
+ MR_MEMORY_FAILURE,
+ MR_MEMORY_HOTPLUG,
+ MR_SYSCALL, /* also applies to cpusets */
+ MR_MEMPOLICY_MBIND,
+ MR_NUMA_MISPLACED,
+ MR_CONTIG_RANGE,
+ MR_LONGTERM_PIN,
+ MR_DEMOTION,
+ MR_TYPES
+};
+
#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 219b93cad1dd..d5a959ce4877 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -32,7 +32,7 @@ struct mii_if_info {
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
-extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern void mii_ethtool_get_link_ksettings(
struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
@@ -355,56 +355,6 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
}
/**
- * mii_lpa_mod_linkmode_adv_sgmii
- * @lp_advertising: pointer to destination link mode.
- * @lpa: value of the MII_LPA register
- *
- * A small helper function that translates MII_LPA bits to
- * linkmode advertisement settings for SGMII.
- * Leaves other bits unchanged.
- */
-static inline void
-mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa)
-{
- u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_1000HALF);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_1000FULL);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_100HALF);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_100FULL);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_10HALF);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_10FULL);
-}
-
-/**
- * mii_lpa_to_linkmode_adv_sgmii
- * @advertising: pointer to destination link mode.
- * @lpa: value of the MII_LPA register
- *
- * A small helper function that translates MII_ADVERTISE bits
- * to linkmode advertisement settings when in SGMII mode.
- * Clears the old value of advertising.
- */
-static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising,
- u32 lpa)
-{
- linkmode_zero(lp_advertising);
-
- mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa);
-}
-
-/**
* mii_adv_mod_linkmode_adv_t
* @advertising:pointer to destination link mode.
* @adv: value of the MII_ADVERTISE register
@@ -595,4 +545,39 @@ static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
return cap;
}
+/**
+ * mii_bmcr_encode_fixed - encode fixed speed/duplex settings to a BMCR value
+ * @speed: a SPEED_* value
+ * @duplex: a DUPLEX_* value
+ *
+ * Encode the speed and duplex to a BMCR value. 2500, 1000, 100 and 10 Mbps are
+ * supported. 2500Mbps is encoded to 1000Mbps. Other speeds are encoded as 10
+ * Mbps. Unknown duplex values are encoded to half-duplex.
+ */
+static inline u16 mii_bmcr_encode_fixed(int speed, int duplex)
+{
+ u16 bmcr;
+
+ switch (speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ bmcr = BMCR_SPEED1000;
+ break;
+
+ case SPEED_100:
+ bmcr = BMCR_SPEED100;
+ break;
+
+ case SPEED_10:
+ default:
+ bmcr = BMCR_SPEED10;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ return bmcr;
+}
+
#endif /* __LINUX_MII_H__ */
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
index fa940bbaf8ae..26b04f73f214 100644
--- a/include/linux/mii_timestamper.h
+++ b/include/linux/mii_timestamper.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
+#include <linux/net_tstamp.h>
struct phy_device;
@@ -51,7 +52,8 @@ struct mii_timestamper {
struct sk_buff *skb, int type);
int (*hwtstamp)(struct mii_timestamper *mii_ts,
- struct ifreq *ifreq);
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
void (*link_state)(struct mii_timestamper *mii_ts,
struct phy_device *phydev);
diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h
index 44077837385f..d52daf45861b 100644
--- a/include/linux/min_heap.h
+++ b/include/linux/min_heap.h
@@ -35,31 +35,33 @@ static __always_inline
void min_heapify(struct min_heap *heap, int pos,
const struct min_heap_callbacks *func)
{
- void *left, *right, *parent, *smallest;
+ void *left, *right;
void *data = heap->data;
+ void *root = data + pos * func->elem_size;
+ int i = pos, j;
+ /* Find the sift-down path all the way to the leaves. */
for (;;) {
- if (pos * 2 + 1 >= heap->nr)
+ if (i * 2 + 2 >= heap->nr)
break;
+ left = data + (i * 2 + 1) * func->elem_size;
+ right = data + (i * 2 + 2) * func->elem_size;
+ i = func->less(left, right) ? i * 2 + 1 : i * 2 + 2;
+ }
- left = data + ((pos * 2 + 1) * func->elem_size);
- parent = data + (pos * func->elem_size);
- smallest = parent;
- if (func->less(left, smallest))
- smallest = left;
-
- if (pos * 2 + 2 < heap->nr) {
- right = data + ((pos * 2 + 2) * func->elem_size);
- if (func->less(right, smallest))
- smallest = right;
- }
- if (smallest == parent)
- break;
- func->swp(smallest, parent);
- if (smallest == left)
- pos = (pos * 2) + 1;
- else
- pos = (pos * 2) + 2;
+ /* Special case for the last leaf with no sibling. */
+ if (i * 2 + 2 == heap->nr)
+ i = i * 2 + 1;
+
+ /* Backtrack to the correct location. */
+ while (i != pos && func->less(root, data + i * func->elem_size))
+ i = (i - 1) / 2;
+
+ /* Shift the element into its correct place. */
+ j = i;
+ while (i != pos) {
+ i = (i - 1) / 2;
+ func->swp(data + i * func->elem_size, data + j * func->elem_size);
}
}
@@ -70,7 +72,7 @@ void min_heapify_all(struct min_heap *heap,
{
int i;
- for (i = heap->nr / 2; i >= 0; i--)
+ for (i = heap->nr / 2 - 1; i >= 0; i--)
min_heapify(heap, i, func);
}
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
new file mode 100644
index 000000000000..2ec559284a9f
--- /dev/null
+++ b/include/linux/minmax.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MINMAX_H
+#define _LINUX_MINMAX_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+/*
+ * min()/max()/clamp() macros must accomplish three things:
+ *
+ * - Avoid multiple evaluations of the arguments (so side-effects like
+ * "x++" happen only once) when non-constant.
+ * - Retain result as a constant expressions when called with only
+ * constant expressions (to avoid tripping VLA warnings in stack
+ * allocation usage).
+ * - Perform signed v unsigned type-checking (to generate compile
+ * errors instead of nasty runtime surprises).
+ * - Unsigned char/short are always promoted to signed int and can be
+ * compared against signed or unsigned arguments.
+ * - Unsigned arguments can be compared against non-negative signed constants.
+ * - Comparison of a signed argument against an unsigned constant fails
+ * even if the constant is below __INT_MAX__ and could be cast to int.
+ */
+#define __typecheck(x, y) \
+ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
+
+/* is_signed_type() isn't a constexpr for pointer types */
+#define __is_signed(x) \
+ __builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \
+ is_signed_type(typeof(x)), 0)
+
+/* True for a non-negative signed int constant */
+#define __is_noneg_int(x) \
+ (__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0)
+
+#define __types_ok(x, y) \
+ (__is_signed(x) == __is_signed(y) || \
+ __is_signed((x) + 0) == __is_signed((y) + 0) || \
+ __is_noneg_int(x) || __is_noneg_int(y))
+
+#define __cmp_op_min <
+#define __cmp_op_max >
+
+#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
+
+#define __cmp_once(op, x, y, unique_x, unique_y) ({ \
+ typeof(x) unique_x = (x); \
+ typeof(y) unique_y = (y); \
+ static_assert(__types_ok(x, y), \
+ #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
+ __cmp(op, unique_x, unique_y); })
+
+#define __careful_cmp(op, x, y) \
+ __builtin_choose_expr(__is_constexpr((x) - (y)), \
+ __cmp(op, x, y), \
+ __cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y)))
+
+#define __clamp(val, lo, hi) \
+ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+
+#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
+ typeof(val) unique_val = (val); \
+ typeof(lo) unique_lo = (lo); \
+ typeof(hi) unique_hi = (hi); \
+ static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
+ (lo) <= (hi), true), \
+ "clamp() low limit " #lo " greater than high limit " #hi); \
+ static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \
+ static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \
+ __clamp(unique_val, unique_lo, unique_hi); })
+
+#define __careful_clamp(val, lo, hi) ({ \
+ __builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \
+ __clamp(val, lo, hi), \
+ __clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
+ __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
+
+/**
+ * min - return minimum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
+#define min(x, y) __careful_cmp(min, x, y)
+
+/**
+ * max - return maximum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
+#define max(x, y) __careful_cmp(max, x, y)
+
+/**
+ * umin - return minimum of two non-negative values
+ * Signed types are zero extended to match a larger unsigned type.
+ * @x: first value
+ * @y: second value
+ */
+#define umin(x, y) \
+ __careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+
+/**
+ * umax - return maximum of two non-negative values
+ * @x: first value
+ * @y: second value
+ */
+#define umax(x, y) \
+ __careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+
+/**
+ * min3 - return minimum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
+#define min3(x, y, z) min((typeof(x))min(x, y), z)
+
+/**
+ * max3 - return maximum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
+#define max3(x, y, z) max((typeof(x))max(x, y), z)
+
+/**
+ * min_not_zero - return the minimum that is _not_ zero, unless both are zero
+ * @x: value1
+ * @y: value2
+ */
+#define min_not_zero(x, y) ({ \
+ typeof(x) __x = (x); \
+ typeof(y) __y = (y); \
+ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+
+/**
+ * clamp - return a value clamped to a given range with strict typechecking
+ * @val: current value
+ * @lo: lowest allowable value
+ * @hi: highest allowable value
+ *
+ * This macro does strict typechecking of @lo/@hi to make sure they are of the
+ * same type as @val. See the unnecessary pointer comparisons.
+ */
+#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
+
+/*
+ * ..and if you can't take the strict
+ * types, you can specify one yourself.
+ *
+ * Or not use min/max/clamp at all, of course.
+ */
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y))
+
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y))
+
+/*
+ * Do not check the array parameter using __must_be_array().
+ * In the following legit use-case where the "array" passed is a simple pointer,
+ * __must_be_array() will return a failure.
+ * --- 8< ---
+ * int *buff
+ * ...
+ * min = min_array(buff, nb_items);
+ * --- 8< ---
+ *
+ * The first typeof(&(array)[0]) is needed in order to support arrays of both
+ * 'int *buff' and 'int buff[N]' types.
+ *
+ * The array can be an array of const items.
+ * typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order
+ * to discard the const qualifier for the __element variable.
+ */
+#define __minmax_array(op, array, len) ({ \
+ typeof(&(array)[0]) __array = (array); \
+ typeof(len) __len = (len); \
+ __unqual_scalar_typeof(__array[0]) __element = __array[--__len];\
+ while (__len--) \
+ __element = op(__element, __array[__len]); \
+ __element; })
+
+/**
+ * min_array - return minimum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define min_array(array, len) __minmax_array(min, array, len)
+
+/**
+ * max_array - return maximum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define max_array(array, len) __minmax_array(max, array, len)
+
+/**
+ * clamp_t - return a value clamped to a given range using a given type
+ * @type: the type of variable to use
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of type
+ * @type to make all the comparisons.
+ */
+#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
+
+/**
+ * clamp_val - return a value clamped to a given range using val's type
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of whatever
+ * type the input argument @val is. This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
+ * integer type.
+ */
+#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
+
+static inline bool in_range64(u64 val, u64 start, u64 len)
+{
+ return (val - start) < len;
+}
+
+static inline bool in_range32(u32 val, u32 start, u32 len)
+{
+ return (val - start) < len;
+}
+
+/**
+ * in_range - Determine if a value lies within a range.
+ * @val: Value to test.
+ * @start: First value in range.
+ * @len: Number of values in range.
+ *
+ * This is more efficient than "if (start <= val && val < (start + len))".
+ * It also gives a different answer if @start + @len overflows the size of
+ * the type by a sufficient amount to encompass @val. Decide for yourself
+ * which behaviour you want, or prove that start + len never overflow.
+ * Do not blindly replace one form with the other.
+ */
+#define in_range(val, start, len) \
+ ((sizeof(start) | sizeof(len) | sizeof(val)) <= sizeof(u32) ? \
+ in_range32(val, start, len) : in_range64(val, start, len))
+
+/**
+ * swap - swap values of @a and @b
+ * @a: first value
+ * @b: second value
+ */
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+#endif /* _LINUX_MINMAX_H */
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
new file mode 100644
index 000000000000..e799b1f8d05b
--- /dev/null
+++ b/include/linux/misc_cgroup.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Miscellaneous cgroup controller.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Vipin Sharma <vipinsh@google.com>
+ */
+#ifndef _MISC_CGROUP_H_
+#define _MISC_CGROUP_H_
+
+/**
+ * Types of misc cgroup entries supported by the host.
+ */
+enum misc_res_type {
+#ifdef CONFIG_KVM_AMD_SEV
+ /* AMD SEV ASIDs resource */
+ MISC_CG_RES_SEV,
+ /* AMD SEV-ES ASIDs resource */
+ MISC_CG_RES_SEV_ES,
+#endif
+ MISC_CG_RES_TYPES
+};
+
+struct misc_cg;
+
+#ifdef CONFIG_CGROUP_MISC
+
+#include <linux/cgroup.h>
+
+/**
+ * struct misc_res: Per cgroup per misc type resource
+ * @max: Maximum limit on the resource.
+ * @usage: Current usage of the resource.
+ * @events: Number of times, the resource limit exceeded.
+ */
+struct misc_res {
+ u64 max;
+ atomic64_t usage;
+ atomic64_t events;
+};
+
+/**
+ * struct misc_cg - Miscellaneous controller's cgroup structure.
+ * @css: cgroup subsys state object.
+ * @events_file: Handle for the misc resources events file.
+ * @res: Array of misc resources usage in the cgroup.
+ */
+struct misc_cg {
+ struct cgroup_subsys_state css;
+
+ /* misc.events */
+ struct cgroup_file events_file;
+
+ struct misc_res res[MISC_CG_RES_TYPES];
+};
+
+u64 misc_cg_res_total_usage(enum misc_res_type type);
+int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
+
+/**
+ * css_misc() - Get misc cgroup from the css.
+ * @css: cgroup subsys state object.
+ *
+ * Context: Any context.
+ * Return:
+ * * %NULL - If @css is null.
+ * * struct misc_cg* - misc cgroup pointer of the passed css.
+ */
+static inline struct misc_cg *css_misc(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct misc_cg, css) : NULL;
+}
+
+/*
+ * get_current_misc_cg() - Find and get the misc cgroup of the current task.
+ *
+ * Returned cgroup has its ref count increased by 1. Caller must call
+ * put_misc_cg() to return the reference.
+ *
+ * Return: Misc cgroup to which the current task belongs to.
+ */
+static inline struct misc_cg *get_current_misc_cg(void)
+{
+ return css_misc(task_get_css(current, misc_cgrp_id));
+}
+
+/*
+ * put_misc_cg() - Put the misc cgroup and reduce its ref count.
+ * @cg - cgroup to put.
+ */
+static inline void put_misc_cg(struct misc_cg *cg)
+{
+ if (cg)
+ css_put(&cg->css);
+}
+
+#else /* !CONFIG_CGROUP_MISC */
+
+static inline u64 misc_cg_res_total_usage(enum misc_res_type type)
+{
+ return 0;
+}
+
+static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
+{
+ return 0;
+}
+
+static inline int misc_cg_try_charge(enum misc_res_type type,
+ struct misc_cg *cg,
+ u64 amount)
+{
+ return 0;
+}
+
+static inline void misc_cg_uncharge(enum misc_res_type type,
+ struct misc_cg *cg,
+ u64 amount)
+{
+}
+
+static inline struct misc_cg *get_current_misc_cg(void)
+{
+ return NULL;
+}
+
+static inline void put_misc_cg(struct misc_cg *cg)
+{
+}
+
+#endif /* CONFIG_CGROUP_MISC */
+#endif /* _MISC_CGROUP_H_ */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index c7a93002a3c1..c0fea6ca5076 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -7,9 +7,9 @@
#include <linux/device.h>
/*
- * These allocations are managed by device@lanana.org. If you use an
- * entry that is not in assigned your entry may well be moved and
- * reassigned, or set dynamic if a fixed value is not justified.
+ * These allocations are managed by device@lanana.org. If you need
+ * an entry that is not assigned here, it can be moved and
+ * reassigned or dynamically set if a fixed value is not justified.
*/
#define PSMOUSE_MINOR 1
@@ -44,7 +44,7 @@
#define AGPGART_MINOR 175
#define TOSH_MINOR_DEV 181
#define HWRNG_MINOR 183
-#define MICROCODE_MINOR 184
+/*#define MICROCODE_MINOR 184 unused */
#define KEYPAD_MINOR 185
#define IRNET_MINOR 187
#define D7S_MINOR 193
@@ -93,14 +93,14 @@ extern void misc_deregister(struct miscdevice *misc);
/*
* Helper macro for drivers that don't do anything special in the initcall.
- * This helps in eleminating of boilerplate code.
+ * This helps to eliminate boilerplate code.
*/
#define builtin_misc_device(__misc_device) \
builtin_driver(__misc_device, misc_register)
/*
* Helper macro for drivers that don't do anything special in module init / exit
- * call. This helps in eleminating of boilerplate code.
+ * call. This helps to eliminate boilerplate code.
*/
#define module_misc_device(__misc_device) \
module_driver(__misc_device, misc_register, misc_deregister)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 06e066e04a4b..27f42f713c89 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -33,6 +33,7 @@
#ifndef MLX4_DEVICE_H
#define MLX4_DEVICE_H
+#include <linux/auxiliary_bus.h>
#include <linux/if_ether.h>
#include <linux/pci.h>
#include <linux/completion.h>
@@ -46,7 +47,6 @@
#define DEFAULT_UAR_PAGE_SHIFT 12
-#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 128
#define MIN_MSIX_P_PORT 5
#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
@@ -631,6 +631,7 @@ struct mlx4_caps {
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
u32 health_buffer_addrs;
+ bool map_clock_to_user;
};
struct mlx4_buf_list {
@@ -889,6 +890,12 @@ struct mlx4_dev {
u8 uar_page_shift;
};
+struct mlx4_adev {
+ struct auxiliary_device adev;
+ struct mlx4_dev *mdev;
+ int idx;
+};
+
struct mlx4_clock_params {
u64 offset;
u8 bar;
@@ -1087,6 +1094,19 @@ static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
(offset & (PAGE_SIZE - 1));
}
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+ return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
+{
+ return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
+}
+
+int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1,
+ u8 v2p_p2);
+
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
@@ -1436,7 +1456,7 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index a858bcb6220b..69825223081f 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -34,8 +34,12 @@
#define MLX4_DRIVER_H
#include <net/devlink.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/notifier.h>
#include <linux/mlx4/device.h>
+#define MLX4_ADEV_NAME "mlx4_core"
+
struct mlx4_dev;
#define MLX4_MAC_MASK 0xffffffffffffULL
@@ -54,64 +58,20 @@ enum {
MLX4_INTFF_BONDING = 1 << 0
};
-struct mlx4_interface {
- void * (*add) (struct mlx4_dev *dev);
- void (*remove)(struct mlx4_dev *dev, void *context);
- void (*event) (struct mlx4_dev *dev, void *context,
- enum mlx4_dev_event event, unsigned long param);
- void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
- void (*activate)(struct mlx4_dev *dev, void *context);
- struct list_head list;
+struct mlx4_adrv {
+ struct auxiliary_driver adrv;
enum mlx4_protocol protocol;
int flags;
};
-int mlx4_register_interface(struct mlx4_interface *intf);
-void mlx4_unregister_interface(struct mlx4_interface *intf);
-
-int mlx4_bond(struct mlx4_dev *dev);
-int mlx4_unbond(struct mlx4_dev *dev);
-static inline int mlx4_is_bonded(struct mlx4_dev *dev)
-{
- return !!(dev->flags & MLX4_FLAG_BONDED);
-}
-
-static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
-{
- return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
-}
-
-struct mlx4_port_map {
- u8 port1;
- u8 port2;
-};
-
-int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv);
+void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv);
-void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+int mlx4_register_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb);
+int mlx4_unregister_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb);
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
-static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
-{
- int i;
-
- for (i = ETH_ALEN; i > 0; i--) {
- addr[i - 1] = mac & 0xFF;
- mac >>= 8;
- }
-}
-
#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 9db93e487496..b9a7b1319f5d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -446,6 +446,7 @@ enum {
struct mlx4_wqe_inline_seg {
__be32 byte_count;
+ __u8 data[];
};
enum mlx4_update_qp_attr {
@@ -503,4 +504,5 @@ static inline u16 folded_qp(u32 q)
u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
+void mlx4_put_qp(struct mlx4_qp *qp);
#endif /* MLX4_QP_H */
diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h
deleted file mode 100644
index dacf69516002..000000000000
--- a/include/linux/mlx5/accel.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_H__
-#define __MLX5_ACCEL_H__
-
-#include <linux/mlx5/driver.h>
-
-enum mlx5_accel_esp_aes_gcm_keymat_iv_algo {
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ,
-};
-
-enum mlx5_accel_esp_flags {
- MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
- MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
- MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
-};
-
-enum mlx5_accel_esp_action {
- MLX5_ACCEL_ESP_ACTION_DECRYPT,
- MLX5_ACCEL_ESP_ACTION_ENCRYPT,
-};
-
-enum mlx5_accel_esp_keymats {
- MLX5_ACCEL_ESP_KEYMAT_AES_NONE,
- MLX5_ACCEL_ESP_KEYMAT_AES_GCM,
-};
-
-enum mlx5_accel_esp_replay {
- MLX5_ACCEL_ESP_REPLAY_NONE,
- MLX5_ACCEL_ESP_REPLAY_BMP,
-};
-
-struct aes_gcm_keymat {
- u64 seq_iv;
- enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo;
-
- u32 salt;
- u32 icv_len;
-
- u32 key_len;
- u32 aes_key[256 / 32];
-};
-
-struct mlx5_accel_esp_xfrm_attrs {
- enum mlx5_accel_esp_action action;
- u32 esn;
- __be32 spi;
- u32 seq;
- u32 tfc_pad;
- u32 flags;
- u32 sa_handle;
- enum mlx5_accel_esp_replay replay_type;
- union {
- struct {
- u32 size;
-
- } bmp;
- } replay;
- enum mlx5_accel_esp_keymats keymat_type;
- union {
- struct aes_gcm_keymat aes_gcm;
- } keymat;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } saddr;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } daddr;
-
- u8 is_ipv6;
-};
-
-struct mlx5_accel_esp_xfrm {
- struct mlx5_core_dev *mdev;
- struct mlx5_accel_esp_xfrm_attrs attrs;
-};
-
-enum {
- MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0,
-};
-
-enum mlx5_accel_ipsec_cap {
- MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
- MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1,
- MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2,
- MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3,
- MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4,
- MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
- MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6,
- MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
-};
-
-#ifdef CONFIG_MLX5_ACCEL
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
-
-struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags);
-void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
-int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs);
-
-#else
-
-static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-
-static inline struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags) { return ERR_PTR(-EOPNOTSUPP); }
-static inline void
-mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
-static inline int
-mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
-
-#endif /* CONFIG_MLX5_ACCEL */
-#endif /* __MLX5_ACCEL_H__ */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 7bfb67363434..cb15308b5cb0 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -183,6 +183,8 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
complete(&cq->free);
}
+int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 4d3376e20f5e..01275c6e8468 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/bitfield.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -290,9 +291,9 @@ enum {
MLX5_UMR_INLINE = (1 << 7),
};
-#define MLX5_UMR_MTT_ALIGNMENT 0x40
-#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
-#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+#define MLX5_UMR_FLEX_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
+#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
@@ -324,6 +325,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+ MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
@@ -346,6 +348,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
+ MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
@@ -358,14 +361,26 @@ enum mlx5_event {
MLX5_EVENT_TYPE_MAX = 0x100,
};
+enum mlx5_driver_event {
+ MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+ MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
+ MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
+ MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
+ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
+ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+};
+
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
+ MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE = 0x2,
};
enum {
MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
+ MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
};
@@ -380,21 +395,6 @@ enum {
};
enum {
- MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
- MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
- MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
- MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
- MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
- MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
- MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
- MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
- MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
- MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
- MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
- MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
-};
-
-enum {
MLX5_ROCE_VERSION_1 = 0,
MLX5_ROCE_VERSION_2 = 2,
};
@@ -448,6 +448,9 @@ enum {
MLX5_OPCODE_UMR = 0x25,
+ MLX5_OPCODE_FLOW_TBL_ACCESS = 0x2c,
+
+ MLX5_OPCODE_ACCESS_ASO = 0x2d,
};
enum {
@@ -489,10 +492,6 @@ enum {
};
enum {
- MLX5_CAP_OFF_CMDIF_CSUM = 46,
-};
-
-enum {
/*
* Max wqe size for rdma read is 512 bytes, so this
* limits our max_sge_rd as the wqe needs to fit:
@@ -535,19 +534,21 @@ struct mlx5_cmd_layout {
u8 status_own;
};
-enum mlx5_fatal_assert_bit_offsets {
- MLX5_RFR_OFFSET = 31,
+enum mlx5_rfr_severity_bit_offsets {
+ MLX5_RFR_BIT_OFFSET = 0x7,
};
struct health_buffer {
- __be32 assert_var[5];
- __be32 rsvd0[3];
+ __be32 assert_var[6];
+ __be32 rsvd0[2];
__be32 assert_exit_ptr;
__be32 assert_callra;
- __be32 rsvd1[2];
+ __be32 rsvd1[1];
+ __be32 time;
__be32 fw_ver;
__be32 hw_id;
- __be32 rfr;
+ u8 rfr_severity;
+ u8 rsvd2[3];
u8 irisc_index;
u8 synd;
__be16 ext_synd;
@@ -571,12 +572,17 @@ struct mlx5_init_seg {
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[880];
+ __be32 rsvd2[878];
+ __be32 cmd_exec_to;
+ __be32 cmd_q_init_to;
__be32 internal_timer_h;
__be32 internal_timer_l;
__be32 rsvd3[2];
__be32 health_counter;
- __be32 rsvd4[1019];
+ __be32 rsvd4[11];
+ __be32 real_time_h;
+ __be32 real_time_l;
+ __be32 rsvd5[1006];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
@@ -703,12 +709,19 @@ struct mlx5_eqe_temp_warning {
__be64 sensor_warning_lsb;
} __packed;
+struct mlx5_eqe_obj_change {
+ u8 rsvd0[2];
+ __be16 obj_type;
+ __be32 obj_id;
+} __packed;
+
#define SYNC_RST_STATE_MASK 0xf
enum sync_rst_state_type {
MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
+ MLX5_SYNC_RST_STATE_RESET_UNLOAD = 0x3,
};
struct mlx5_eqe_sync_fw_update {
@@ -716,6 +729,11 @@ struct mlx5_eqe_sync_fw_update {
u8 sync_rst_state;
};
+struct mlx5_eqe_vhca_state {
+ __be16 ec_function;
+ __be16 function_id;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -735,6 +753,8 @@ union ev_data {
struct mlx5_eqe_temp_warning temp_warning;
struct mlx5_eqe_xrq_err xrq_err;
struct mlx5_eqe_sync_fw_update sync_fw_update;
+ struct mlx5_eqe_vhca_state vhca_state;
+ struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {
@@ -780,10 +800,23 @@ struct mlx5_cqe64 {
u8 tls_outer_l3_tunneled;
u8 rsvd0;
__be16 wqe_id;
- u8 lro_tcppsh_abort_dupack;
- u8 lro_min_ttl;
- __be16 lro_tcp_win;
- __be32 lro_ack_seq_num;
+ union {
+ struct {
+ u8 tcppsh_abort_dupack;
+ u8 min_ttl;
+ __be16 tcp_win;
+ __be32 ack_seq_num;
+ } lro;
+ struct {
+ u8 reserved0:1;
+ u8 match:1;
+ u8 flush:1;
+ u8 reserved3:5;
+ u8 header_size;
+ __be16 header_entry_index;
+ __be32 data_offset;
+ } shampo;
+ };
__be32 rss_hash_result;
u8 rss_hash_type;
u8 ml_path;
@@ -807,7 +840,10 @@ struct mlx5_cqe64 {
__be32 timestamp_l;
__be32 sop_drop_qpn;
__be16 wqe_counter;
- u8 signature;
+ union {
+ u8 signature;
+ u8 validity_iteration_count;
+ };
u8 op_own;
};
@@ -816,7 +852,7 @@ struct mlx5_mini_cqe8 {
__be32 rx_hash_result;
struct {
__be16 checksum;
- __be16 rsvd;
+ __be16 stridx;
};
struct {
__be16 wqe_counter;
@@ -836,6 +872,12 @@ enum {
enum {
MLX5_CQE_FORMAT_CSUM = 0x1,
+ MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
+};
+
+enum {
+ MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
+ MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
};
#define MLX5_MINI_CQE_ARRAY_SIZE 8
@@ -850,19 +892,20 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
return cqe->op_own >> 4;
}
-static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
{
- return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+ /* num_of_mini_cqes is zero based */
+ return get_cqe_opcode(cqe) + 1;
}
-static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_l3_hdr_type >> 4) & 0x7;
+ return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
}
-static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+ return (cqe->l4_l3_hdr_type >> 4) & 0x7;
}
static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
@@ -875,7 +918,7 @@ static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
}
-static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
+static inline bool cqe_has_vlan(const struct mlx5_cqe64 *cqe)
{
return cqe->l4_l3_hdr_type & 0x1;
}
@@ -890,8 +933,16 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
-#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9)
-#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6)
+static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
+{
+ return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
+}
+
+#define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
+#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
+#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
+#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
+#define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
struct mpwrq_cqe_bc {
__be16 filler_consumed_strides;
@@ -938,14 +989,23 @@ enum {
};
enum {
- CQE_RSS_HTYPE_IP = 0x3 << 2,
+ CQE_RSS_HTYPE_IP = GENMASK(3, 2),
/* cqe->rss_hash_type[3:2] - IP destination selected for hash
* (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
*/
- CQE_RSS_HTYPE_L4 = 0x3 << 6,
+ CQE_RSS_IP_NONE = 0x0,
+ CQE_RSS_IPV4 = 0x1,
+ CQE_RSS_IPV6 = 0x2,
+ CQE_RSS_RESERVED = 0x3,
+
+ CQE_RSS_HTYPE_L4 = GENMASK(7, 6),
/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
* (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
*/
+ CQE_RSS_L4_NONE = 0x0,
+ CQE_RSS_L4_TCP = 0x1,
+ CQE_RSS_L4_UDP = 0x2,
+ CQE_RSS_L4_IPSEC = 0x3,
};
enum {
@@ -1014,7 +1074,7 @@ enum {
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
- * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+ * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
*/
u8 status;
u8 pcie_control;
@@ -1051,6 +1111,11 @@ enum {
};
enum {
+ MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1,
+ MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3,
+};
+
+enum {
MLX5_L3_PROT_TYPE_IPV4 = 0,
MLX5_L3_PROT_TYPE_IPV6 = 1,
};
@@ -1074,6 +1139,8 @@ enum {
MLX5_MATCH_INNER_HEADERS = 1 << 2,
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
+ MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
+ MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
};
enum {
@@ -1120,6 +1187,8 @@ enum mlx5_flex_parser_protos {
MLX5_FLEX_PROTO_GENEVE = 1 << 3,
MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
+ MLX5_FLEX_PROTO_ICMP = 1 << 8,
+ MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
};
/* MLX5 DEV CAPs */
@@ -1130,6 +1199,9 @@ enum mlx5_cap_mode {
HCA_CAP_OPMOD_GET_CUR = 1,
};
+/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
+ * capability memory.
+ */
enum mlx5_cap_type {
MLX5_CAP_GENERAL = 0,
MLX5_CAP_ETHERNET_OFFLOADS,
@@ -1141,9 +1213,7 @@ enum mlx5_cap_type {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
- MLX5_CAP_RESERVED,
- MLX5_CAP_VECTOR_CALC,
- MLX5_CAP_QOS,
+ MLX5_CAP_QOS = 0xc,
MLX5_CAP_DEBUG,
MLX5_CAP_RESERVED_14,
MLX5_CAP_DEV_MEM,
@@ -1152,6 +1222,11 @@ enum mlx5_cap_type {
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
+ MLX5_CAP_CRYPTO = 0x1a,
+ MLX5_CAP_MACSEC = 0x1f,
+ MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
+ MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1166,7 +1241,6 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
- MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
MLX5_MCAM_REGS_NUM = 0x3,
};
@@ -1185,136 +1259,117 @@ enum mlx5_qcam_feature_groups {
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_64(mdev, cap) \
- MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
-#define MLX5_CAP_ETH(mdev, cap) \
- MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+#define MLX5_CAP_GEN_2(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
-#define MLX5_CAP_ETH_MAX(mdev, cap) \
+#define MLX5_CAP_GEN_2_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
+
+#define MLX5_CAP_GEN_2_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
#define MLX5_CAP_ROCE(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
- MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
-
-#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
-
#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
-
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
-
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-
#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
-#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
-
#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
-#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
-
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
-
-#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
-
#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
-#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
-
#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
-#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET64(flow_table_eswitch_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
-#define MLX5_CAP_ESW_MAX(mdev, cap) \
- MLX5_GET(e_switch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
+#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
+
+#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
#define MLX5_CAP_ODP(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
#define MLX5_CAP_ODP_MAX(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
-
-#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
- MLX5_GET(vector_calc_cap, \
- mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
#define MLX5_CAP_QOS(mdev, cap)\
- MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+ MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
#define MLX5_CAP_DEBUG(mdev, cap)\
- MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
+ MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
@@ -1326,10 +1381,6 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
mng_access_reg_cap_mask.access_regs.reg)
-#define MLX5_CAP_MCAM_REG1(mdev, reg) \
- MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
- mng_access_reg_cap_mask.access_regs1.reg)
-
#define MLX5_CAP_MCAM_REG2(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
mng_access_reg_cap_mask.access_regs2.reg)
@@ -1350,27 +1401,33 @@ enum mlx5_qcam_feature_groups {
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
#define MLX5_CAP_DEV_MEM(mdev, cap)\
- MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+ MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
#define MLX5_CAP64_DEV_MEM(mdev, cap)\
- MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+ MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
#define MLX5_CAP_TLS(mdev, cap) \
- MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap)
+ MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
- MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
+ MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET(virtio_emulation_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET64(virtio_emulation_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
#define MLX5_CAP_IPSEC(mdev, cap)\
- MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap)
+ MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+
+#define MLX5_CAP_CRYPTO(mdev, cap)\
+ MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
+
+#define MLX5_CAP_MACSEC(mdev, cap)\
+ MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
@@ -1416,6 +1473,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index c145de0473bc..bf9324a31ae9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -48,6 +48,8 @@
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/mutex.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -56,15 +58,15 @@
#include <linux/ptp_clock_kernel.h>
#include <net/devlink.h>
+#define MLX5_ADEV_NAME "mlx5_core"
+
+#define MLX5_IRQ_EQ_CTRL (U8_MAX)
+
enum {
MLX5_BOARD_ID_LEN = 64,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -83,7 +85,7 @@ enum mlx5_sqp_t {
};
enum {
- MLX5_MAX_PORTS = 2,
+ MLX5_MAX_PORTS = 4,
};
enum {
@@ -99,6 +101,8 @@ enum {
};
enum {
+ MLX5_REG_SBPR = 0xb001,
+ MLX5_REG_SBCM = 0xb002,
MLX5_REG_QPTS = 0x4002,
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
@@ -124,14 +128,18 @@ enum {
MLX5_REG_PELC = 0x500e,
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
+ MLX5_REG_PDDR = 0x5031,
MLX5_REG_PMLP = 0x5002,
MLX5_REG_PPLM = 0x5023,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
+ MLX5_REG_MTCAP = 0x9009,
+ MLX5_REG_MTMP = 0x900A,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MRTC = 0x902d,
MLX5_REG_MTRC_CAP = 0x9040,
MLX5_REG_MTRC_CONF = 0x9041,
MLX5_REG_MTRC_STDB = 0x9042,
@@ -140,15 +148,20 @@ enum {
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MTUTC = 0x9055,
MLX5_REG_MPEGC = 0x9056,
+ MLX5_REG_MPIR = 0x9059,
MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
+ MLX5_REG_MSECQ = 0x9155,
+ MLX5_REG_MSEES = 0x9156,
MLX5_REG_MIRC = 0x9162,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_DTOR = 0xC00E,
};
enum mlx5_qpts_trust_state {
@@ -190,7 +203,8 @@ enum port_state_policy {
enum mlx5_coredev_type {
MLX5_COREDEV_PF,
- MLX5_COREDEV_VF
+ MLX5_COREDEV_VF,
+ MLX5_COREDEV_SF,
};
struct mlx5_field_desc {
@@ -208,6 +222,7 @@ struct mlx5_rsc_debug {
enum mlx5_dev_event {
MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
MLX5_DEV_EVENT_PORT_AFFINITY = 129,
+ MLX5_DEV_EVENT_MULTIPORT_ESW = 130,
};
enum mlx5_port_status {
@@ -258,6 +273,16 @@ enum {
struct mlx5_cmd_stats {
u64 sum;
u64 n;
+ /* number of times command failed */
+ u64 failed;
+ /* number of times command failed on bad status returned by FW */
+ u64 failed_mbox_status;
+ /* last command failed returned errno */
+ u32 last_failed_errno;
+ /* last bad status returned by FW */
+ u8 last_failed_mbox_status;
+ /* last command failed syndrome returned by FW */
+ u32 last_failed_syndrome;
struct dentry *root;
/* protect command average calculations */
spinlock_t lock;
@@ -266,18 +291,23 @@ struct mlx5_cmd_stats {
struct mlx5_cmd {
struct mlx5_nb nb;
+ /* members which needs to be queried or reinitialized each reload */
+ struct {
+ u16 cmdif_rev;
+ u8 log_sz;
+ u8 log_stride;
+ int max_reg_cmds;
+ unsigned long bitmask;
+ struct semaphore sem;
+ struct semaphore pages_sem;
+ struct semaphore throttle_sem;
+ } vars;
enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
void *cmd_buf;
dma_addr_t dma;
- u16 cmdif_rev;
- u8 log_sz;
- u8 log_stride;
- int max_reg_cmds;
- int events;
- u32 __iomem *vector;
/* protect command queue allocations
*/
@@ -287,11 +317,8 @@ struct mlx5_cmd {
*/
spinlock_t token_lock;
u8 token;
- unsigned long bitmask;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct workqueue_struct *wq;
- struct semaphore sem;
- struct semaphore pages_sem;
int mode;
u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
@@ -299,14 +326,7 @@ struct mlx5_cmd {
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
- struct mlx5_cmd_stats *stats;
-};
-
-struct mlx5_port_caps {
- int gid_table_len;
- int pkey_table_len;
- u8 ext_port_cap;
- bool has_smi;
+ struct xarray stats;
};
struct mlx5_cmd_mailbox {
@@ -358,20 +378,6 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
-enum {
- MLX5_MKEY_MR = 1,
- MLX5_MKEY_MW,
- MLX5_MKEY_INDIRECT_DEVX,
-};
-
-struct mlx5_core_mkey {
- u64 iova;
- u64 size;
- u32 key;
- u32 pd;
- u32 type;
-};
-
#define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type {
@@ -381,7 +387,6 @@ enum mlx5_res_type {
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
MLX5_RES_XRQ = 5,
- MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
};
struct mlx5_core_rsc_common {
@@ -432,24 +437,19 @@ struct mlx5_core_health {
u8 synd;
u32 fatal_error;
u32 crdump_size;
- /* wq spinlock to synchronize draining */
- spinlock_t wq_lock;
struct workqueue_struct *wq;
unsigned long flags;
struct work_struct fatal_report_work;
struct work_struct report_work;
- struct delayed_work recover_work;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
+ struct devlink_health_reporter *vnic_reporter;
+ struct delayed_work update_fw_log_ts_work;
};
-struct mlx5_qp_table {
- struct notifier_block nb;
-
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
+enum {
+ MLX5_PF_NOTIFY_DISABLE_VF,
+ MLX5_PF_NOTIFY_ENABLE_VF,
};
struct mlx5_vf_context {
@@ -462,12 +462,14 @@ struct mlx5_vf_context {
u8 port_guid_valid:1;
u8 node_guid_valid:1;
enum port_state_policy policy;
+ struct blocking_notifier_head notifier;
};
struct mlx5_core_sriov {
struct mlx5_vf_context *vfs_ctx;
int num_vfs;
u16 max_vfs;
+ u16 max_ec_vfs;
};
struct mlx5_fc_pool {
@@ -493,6 +495,10 @@ struct mlx5_fc_stats {
unsigned long next_query;
unsigned long sampling_interval; /* jiffies */
u32 *bulk_query_out;
+ int bulk_query_len;
+ size_t num_counters;
+ bool bulk_query_alloc_failed;
+ unsigned long next_bulk_query_alloc;
struct mlx5_fc_pool fc_pool;
};
@@ -500,9 +506,15 @@ struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
-struct mlx5_devcom;
+struct mlx5_devcom_dev;
+struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
+struct mlx5_vhca_state_notifier;
+struct mlx5_sf_dev_table;
+struct mlx5_sf_hw_table;
+struct mlx5_sf_table;
+struct mlx5_crypto_dek_priv;
struct mlx5_rate_limit {
u32 rate;
@@ -512,8 +524,8 @@ struct mlx5_rate_limit {
struct mlx5_rl_entry {
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
- u16 index;
u64 refcount;
+ u16 index;
u16 uid;
u8 dedicated : 1;
};
@@ -525,6 +537,7 @@ struct mlx5_rl_table {
u32 max_rate;
u32 min_rate;
struct mlx5_rl_entry *rl_entry;
+ u64 refcount;
};
struct mlx5_core_roce {
@@ -533,6 +546,41 @@ struct mlx5_core_roce {
struct mlx5_flow_handle *allow_rule;
};
+enum {
+ MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
+ MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+ /* Set during device detach to block any further devices
+ * creation/deletion on drivers rescan. Unset during device attach.
+ */
+ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
+};
+
+struct mlx5_adev {
+ struct auxiliary_device adev;
+ struct mlx5_core_dev *mdev;
+ int idx;
+};
+
+struct mlx5_debugfs_entries {
+ struct dentry *dbg_root;
+ struct dentry *qp_debugfs;
+ struct dentry *eq_debugfs;
+ struct dentry *cq_debugfs;
+ struct dentry *cmdif_debugfs;
+ struct dentry *pages_debugfs;
+ struct dentry *lag_debugfs;
+};
+
+enum mlx5_func_type {
+ MLX5_PF,
+ MLX5_VF,
+ MLX5_SF,
+ MLX5_HOST_PF,
+ MLX5_EC_VF,
+ MLX5_FUNC_TYPE_NUM,
+};
+
+struct mlx5_ft_pool;
struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */
struct mlx5_irq_table *irq_table;
@@ -542,58 +590,69 @@ struct mlx5_priv {
struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
struct xarray page_root_xa;
- int fw_pages;
atomic_t reg_pages;
struct list_head free_list;
- int vfs_pages;
- int peer_pf_pages;
+ u32 fw_pages;
+ u32 page_counters[MLX5_FUNC_TYPE_NUM];
+ u32 fw_pages_alloc_failed;
+ u32 give_pages_dropped;
+ u32 reclaim_pages_discard;
struct mlx5_core_health health;
+ struct list_head traps;
- /* start: qp staff */
- struct dentry *qp_debugfs;
- struct dentry *eq_debugfs;
- struct dentry *cq_debugfs;
- struct dentry *cmdif_debugfs;
- /* end: qp staff */
+ struct mlx5_debugfs_entries dbg;
/* start: alloc staff */
- /* protect buffer alocation according to numa node */
+ /* protect buffer allocation according to numa node */
struct mutex alloc_mutex;
int numa_node;
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
- struct dentry *dbg_root;
- struct list_head dev_list;
- struct list_head ctx_list;
- spinlock_t ctx_lock;
+ struct mlx5_adev **adev;
+ int adev_idx;
+ int sw_vhca_id;
struct mlx5_events *events;
+ struct mlx5_vhca_events *vhca_events;
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
- struct mlx5_devcom *devcom;
+ u32 flags;
+ struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_comp_dev *hca_devcom_comp;
+ struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
+ struct mlx5_ft_pool *ft_pool;
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
+#ifdef CONFIG_MLX5_SF
+ struct mlx5_vhca_state_notifier *vhca_state_notifier;
+ struct mlx5_sf_dev_table *sf_dev_table;
+ struct mlx5_core_dev *parent_mdev;
+#endif
+#ifdef CONFIG_MLX5_SF_MANAGER
+ struct mlx5_sf_hw_table *sf_hw_table;
+ struct mlx5_sf_table *sf_table;
+#endif
};
enum mlx5_device_state {
- MLX5_DEVICE_STATE_UNINITIALIZED,
- MLX5_DEVICE_STATE_UP,
+ MLX5_DEVICE_STATE_UP = 1,
MLX5_DEVICE_STATE_INTERNAL_ERROR,
};
enum mlx5_interface_state {
MLX5_INTERFACE_STATE_UP = BIT(0),
+ MLX5_BREAK_FW_WAIT = BIT(1),
};
enum mlx5_pci_status {
@@ -615,15 +674,25 @@ struct mlx5_td {
};
struct mlx5e_resources {
- u32 pdn;
- struct mlx5_td td;
- struct mlx5_core_mkey mkey;
- struct mlx5_sq_bfreg bfreg;
+ struct mlx5e_hw_objs {
+ u32 pdn;
+ struct mlx5_td td;
+ u32 mkey;
+ struct mlx5_sq_bfreg bfreg;
+#define MLX5_MAX_NUM_TC 8
+ u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
+ bool tisn_valid;
+ } hw_objs;
+ struct net_device *uplink_netdev;
+ struct mutex uplink_netdev_lock;
+ struct mlx5_crypto_dek_priv *dek_priv;
};
enum mlx5_sw_icm_type {
MLX5_SW_ICM_TYPE_STEERING,
MLX5_SW_ICM_TYPE_HEADER_MODIFY,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN,
+ MLX5_SW_ICM_TYPE_SW_ENCAP,
};
#define MLX5_MAX_RESERVED_GIDS 8
@@ -640,21 +709,26 @@ struct mlx5_pps {
struct work_struct out_work;
u64 start[MAX_PIN_NUM];
u8 enabled;
+ u64 min_npps_period;
+ u64 min_out_pulse_duration_ns;
};
-struct mlx5_clock {
- struct mlx5_core_dev *mdev;
- struct mlx5_nb pps_nb;
- seqlock_t lock;
+struct mlx5_timer {
struct cyclecounter cycles;
struct timecounter tc;
- struct hwtstamp_config hwtstamp_config;
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
+};
+
+struct mlx5_clock {
+ struct mlx5_nb pps_nb;
+ seqlock_t lock;
+ struct hwtstamp_config hwtstamp_config;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
};
struct mlx5_dm;
@@ -666,6 +740,32 @@ struct mlx5_hv_vhca;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+enum {
+ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+};
+
+enum {
+ MKEY_CACHE_LAST_STD_ENTRY = 20,
+ MLX5_IMR_KSM_CACHE_ENTRY,
+ MAX_MKEY_CACHE_ENTRIES
+};
+
+struct mlx5_profile {
+ u64 mask;
+ u8 log_max_qp;
+ u8 num_cmd_caches;
+ struct {
+ int size;
+ int limit;
+ } mr_cache[MAX_MKEY_CACHE_ENTRIES];
+};
+
+struct mlx5_hca_cap {
+ u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
+};
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -676,16 +776,15 @@ struct mlx5_core_dev {
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
- struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
struct {
- u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
} caps;
+ struct mlx5_timeouts *timeouts;
u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
@@ -693,9 +792,10 @@ struct mlx5_core_dev {
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
+ struct lock_class_key lock_key;
unsigned long intf_state;
struct mlx5_priv priv;
- struct mlx5_profile *profile;
+ struct mlx5_profile profile;
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
@@ -708,15 +808,22 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
-#ifdef CONFIG_MLX5_ACCEL
- const struct mlx5_accel_ipsec_ops *ipsec_ops;
-#endif
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct mlx5_fw_tracer *tracer;
struct mlx5_rsc_dump *rsc_dump;
u32 vsc_addr;
struct mlx5_hv_vhca *hv_vhca;
+ struct mlx5_hwmon *hwmon;
+ u64 num_block_tc;
+ u64 num_block_ipsec;
+#ifdef CONFIG_MLX5_MACSEC
+ struct mlx5_macsec_fs *macsec_fs;
+ /* MACsec notifier chain to sync MACsec core and IB database */
+ struct blocking_notifier_head macsec_nh;
+#endif
+ u64 num_ipsec_offloads;
+ struct mlx5_sd *sd;
};
struct mlx5_db {
@@ -767,11 +874,8 @@ struct mlx5_cmd_work_ent {
u64 ts2;
u16 op;
bool polling;
-};
-
-struct mlx5_pas {
- u64 pa;
- u8 log_sz;
+ /* Track the max comp handlers */
+ refcount_t refcnt;
};
enum phy_port_state {
@@ -805,20 +909,10 @@ struct mlx5_hca_vport_context {
bool grh_required;
};
-static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
-{
- return buf->frags->buf + offset;
-}
-
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
-static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
-{
- return pci_get_drvdata(pdev);
-}
-
extern struct dentry *mlx5_debugfs_root;
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
@@ -841,6 +935,11 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
+static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
+{
+ return ((u32)1 << log_sz) << log_stride;
+}
+
static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
u8 log_stride, u8 log_sz,
u16 strides_offset,
@@ -885,10 +984,6 @@ enum {
CMD_ALLOWED_OPCODE_ALL,
};
-int mlx5_cmd_init(struct mlx5_core_dev *dev);
-void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
-void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
- enum mlx5_cmdif_state cmdif_state);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
@@ -896,7 +991,7 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
struct mlx5_async_ctx {
struct mlx5_core_dev *dev;
atomic_t num_inflight;
- struct wait_queue_head wait;
+ struct completion inflight_done;
};
struct mlx5_async_work;
@@ -906,6 +1001,9 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
struct mlx5_async_work {
struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback;
+ u16 opcode; /* cmd opcode */
+ u16 op_mod; /* cmd op_mod */
+ void *out; /* pointer to the cmd output buffer */
};
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
@@ -914,7 +1012,9 @@ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work);
-
+void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
+int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
+int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
@@ -932,65 +1032,65 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
+bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
+
+void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
+
+void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
-void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
+void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
- int size, struct mlx5_frag_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
-struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- gfp_t flags, int npages);
-void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s32 npages, bool ec_function);
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn);
+int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
+ void *data_out, int size_out, u16 reg_id, int arg,
+ int write, bool verbose);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
+
+static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
+
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
@@ -999,11 +1099,8 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
- struct mlx5_odp_caps *odp_caps);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
- u8 port_num, void *out, size_t sz);
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
@@ -1020,9 +1117,8 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
-unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
-struct cpumask *
-mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
+unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev);
+int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
@@ -1043,48 +1139,37 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
return mkey & 0xff;
}
-enum {
- MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
- MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
-};
-
-enum {
- MR_CACHE_LAST_STD_ENTRY = 20,
- MLX5_IMR_MTT_CACHE_ENTRY,
- MLX5_IMR_KSM_CACHE_ENTRY,
- MAX_MR_CACHE_ENTRIES
-};
-
-enum {
- MLX5_INTERFACE_PROTOCOL_IB = 0,
- MLX5_INTERFACE_PROTOCOL_ETH = 1,
- MLX5_INTERFACE_PROTOCOL_VDPA = 2,
-};
-
-struct mlx5_interface {
- void * (*add)(struct mlx5_core_dev *dev);
- void (*remove)(struct mlx5_core_dev *dev, void *context);
- int (*attach)(struct mlx5_core_dev *dev, void *context);
- void (*detach)(struct mlx5_core_dev *dev, void *context);
- int protocol;
- struct list_head list;
-};
-
-int mlx5_register_interface(struct mlx5_interface *intf);
-void mlx5_unregister_interface(struct mlx5_interface *intf);
+/* Async-atomic event notifier used by mlx5 core to forward FW
+ * evetns received from event queue to mlx5 consumers.
+ * Optimise event queue dipatching.
+ */
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+
+/* Async-atomic event notifier used for forwarding
+ * evetns from the event queue into the to mlx5 events dispatcher,
+ * eswitch, clock and others.
+ */
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+/* Blocking event notifier used to forward SW events, used for slow path */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data);
+
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
-bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
@@ -1092,6 +1177,14 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
int num_counters,
size_t *offsets);
+struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i);
+
+#define mlx5_lag_for_each_peer_mdev(dev, peer, i) \
+ for (i = 0, peer = mlx5_lag_get_next_peer_mdev(dev, &i); \
+ peer; \
+ peer = mlx5_lag_get_next_peer_mdev(dev, &i))
+
+u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
@@ -1100,25 +1193,19 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
-#ifdef CONFIG_MLX5_CORE_IPOIB
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *));
-#endif /* CONFIG_MLX5_CORE_IPOIB */
+struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
+void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
+
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
struct ib_device *device,
struct rdma_netdev_alloc_params *params);
-struct mlx5_profile {
- u64 mask;
- u8 log_max_qp;
- struct {
- int size;
- int limit;
- } mr_cache[MAX_MR_CACHE_ENTRIES];
-};
-
enum {
MLX5_PCI_DEV_IS_VF = 1 << 0,
};
@@ -1133,7 +1220,7 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF;
}
-static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
+static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
}
@@ -1154,6 +1241,23 @@ static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
return dev->priv.sriov.max_vfs;
}
+static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
+{
+ /* LACP owner conditions:
+ * 1) Function is physical.
+ * 2) LAG is supported by FW.
+ * 3) LAG is managed by driver (currently the only option).
+ */
+ return MLX5_CAP_GEN(dev, vport_group_manager) &&
+ (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
+ MLX5_CAP_GEN(dev, lag_master);
+}
+
+static inline u16 mlx5_core_max_ec_vfs(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.sriov.max_ec_vfs;
+}
+
static inline int mlx5_get_gid_table_len(u16 param)
{
if (param > 4) {
@@ -1194,19 +1298,87 @@ static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
return MLX5_CAP_GEN(dev, native_port_num);
}
+static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
+{
+ int idx = MLX5_CAP_GEN(dev, native_port_num);
+
+ if (idx >= 1 && idx <= MLX5_MAX_PORTS)
+ return idx - 1;
+ else
+ return PCI_FUNC(dev->pdev->devfn);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
-static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
+
+static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- union devlink_param_value val;
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ return MLX5_CAP_GEN(dev, roce);
- devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
- return val.vbool;
+ /* If RoCE cap is read-only in FW, get RoCE state from devlink
+ * in order to support RoCE enable/disable feature
+ */
+ return mlx5_is_roce_on(dev);
}
+#ifdef CONFIG_MLX5_MACSEC
+static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
+{
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
+ return false;
+
+ return true;
+}
+
+#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
+
+static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
+{
+ if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) &
+ NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) ||
+ !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs)
+ return false;
+
+ return true;
+}
+#endif
+
+enum {
+ MLX5_OCTWORD = 16,
+};
+
+struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
+ irqreturn_t (*handler)(int, void *),
+ const struct irq_affinity_desc *affdesc,
+ const char *name);
+void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map);
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
index e49d8c0d4f26..3705a382276b 100644
--- a/include/linux/mlx5/eq.h
+++ b/include/linux/mlx5/eq.h
@@ -4,18 +4,18 @@
#ifndef MLX5_CORE_EQ_H
#define MLX5_CORE_EQ_H
-#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_NUM_CMD_EQE (32)
#define MLX5_NUM_ASYNC_EQE (0x1000)
#define MLX5_NUM_SPARE_EQE (0x80)
struct mlx5_eq;
+struct mlx5_irq;
struct mlx5_core_dev;
struct mlx5_eq_param {
- u8 irq_index;
int nent;
u64 mask[4];
+ struct mlx5_irq *irq;
};
struct mlx5_eq *
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index c16827eeba9c..df73a2ccc9af 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -7,12 +7,12 @@
#define _MLX5_ESWITCH_
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <net/devlink.h>
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
enum {
- MLX5_ESWITCH_NONE,
MLX5_ESWITCH_LEGACY,
MLX5_ESWITCH_OFFLOADS
};
@@ -29,11 +29,20 @@ enum {
REP_LOADED,
};
+enum mlx5_switchdev_event {
+ MLX5_SWITCHDEV_EVENT_PAIR,
+ MLX5_SWITCHDEV_EVENT_UNPAIR,
+};
+
struct mlx5_eswitch_rep;
struct mlx5_eswitch_rep_ops {
int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
void (*unload)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
+ int (*event)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ enum mlx5_switchdev_event event,
+ void *data);
};
struct mlx5_eswitch_rep_data {
@@ -48,6 +57,7 @@ struct mlx5_eswitch_rep {
/* Only IB rep is using vport_index */
u16 vport_index;
u32 vlan_refcount;
+ struct mlx5_eswitch *esw;
};
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
@@ -61,10 +71,9 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
u16 vport_num);
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
struct mlx5_flow_handle *
-mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
- u16 vport_num, u32 sqn);
-
-u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ struct mlx5_eswitch *from_esw,
+ struct mlx5_eswitch_rep *rep, u32 sqn);
#ifdef CONFIG_MLX5_ESWITCH
enum devlink_eswitch_encap_mode
@@ -74,19 +83,19 @@ bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
/* Reg C0 usage:
- * Reg C0 = < ESW_VHCA_ID_BITS(8) | ESW_VPORT BITS(8) | ESW_CHAIN_TAG(16) >
+ * Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_REG_C0_OBJ(16) >
*
- * Highest 8 bits of the reg c0 is the vhca_id, next 8 bits is vport_num,
- * the rest (lowest 16 bits) is left for tc chain tag restoration.
- * VHCA_ID + VPORT comprise the SOURCE_PORT matching.
+ * Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of
+ * unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left
+ * for user data objects managed by a common mapping context.
+ * PFNUM + VPORT comprise the SOURCE_PORT matching.
*/
-#define ESW_VHCA_ID_BITS 8
-#define ESW_VPORT_BITS 8
-#define ESW_SOURCE_PORT_METADATA_BITS (ESW_VHCA_ID_BITS + ESW_VPORT_BITS)
+#define ESW_VPORT_BITS 12
+#define ESW_PFNUM_BITS 4
+#define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS)
#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
-#define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
-#define ESW_CHAIN_TAG_METADATA_MASK GENMASK(ESW_CHAIN_TAG_METADATA_BITS - 1,\
- 0)
+#define ESW_REG_C0_USER_DATA_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_REG_C0_USER_DATA_METADATA_MASK GENMASK(ESW_REG_C0_USER_DATA_METADATA_BITS - 1, 0)
static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
{
@@ -95,12 +104,59 @@ static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num);
-u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
+u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ u16 vport_num);
+
+/* Reg C1 usage:
+ * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) >
+ *
+ * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1
+ * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options,
+ * and the lowest 8 bits are used for zone id.
+ *
+ * Zone id is used to restore CT flow when packet misses on chain.
+ *
+ * Tunnel id and options are used together to restore the tunnel info metadata
+ * on miss and to support inner header rewrite by means of implicit chain 0
+ * flows.
+ */
+#define ESW_RESERVED_BITS 1
+#define ESW_ZONE_ID_BITS 8
+#define ESW_TUN_OPTS_BITS 11
+#define ESW_TUN_ID_BITS 12
+#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
+#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
+#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
+#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
+#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
+#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
+#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
+/* 0x7FF is a reserved mapping */
+#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
+/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
+#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
+ ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
+ GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
+ ESW_TUN_OPTS_OFFSET + 1)
+
+/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */
+#define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+
+u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
+u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
-static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
+static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
- return MLX5_ESWITCH_NONE;
+ return MLX5_ESWITCH_LEGACY;
}
static inline enum devlink_eswitch_encap_mode
@@ -122,8 +178,7 @@ mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
};
static inline u32
-mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
- int vport_num)
+mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num)
{
return 0;
};
@@ -133,6 +188,34 @@ mlx5_eswitch_get_vport_metadata_mask(void)
{
return 0;
}
+
+static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
+{
+ return NULL;
+}
+
#endif /* CONFIG_MLX5_ESWITCH */
+static inline bool is_mdev_legacy_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_LEGACY;
+}
+
+static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
+}
+
+/* The returned number is valid only when the dev is eswitch manager. */
+static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_ecpf_esw_manager(dev) ?
+ MLX5_VPORT_ECPF : MLX5_VPORT_PF;
+}
+
#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 92d991d93757..3fb428ce7d1c 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -38,6 +38,22 @@
#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_NONE,
+ MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ MLX5_FLOW_DESTINATION_TYPE_TIR,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK,
+ MLX5_FLOW_DESTINATION_TYPE_PORT,
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
+ MLX5_FLOW_DESTINATION_TYPE_RANGE,
+ MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE,
+};
+
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
@@ -50,6 +66,8 @@ enum {
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
+ MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
+ MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
};
#define LEFTOVERS_RULE_NUM 2
@@ -64,33 +82,49 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR,
+ MLX5_FLOW_NAMESPACE_FDB_BYPASS,
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS,
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC,
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
};
enum {
FDB_BYPASS_PATH,
+ FDB_CRYPTO_INGRESS,
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
+ FDB_TC_MISS,
+ FDB_BR_OFFLOAD,
FDB_SLOW_PATH,
+ FDB_CRYPTO_EGRESS,
FDB_PER_VPORT,
};
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
+struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@@ -98,6 +132,7 @@ struct mlx5_flow_handle;
enum {
FLOW_CONTEXT_HAS_TAG = BIT(0),
+ FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
};
struct mlx5_flow_context {
@@ -118,6 +153,10 @@ enum {
MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
};
+enum mlx5_flow_dest_range_field {
+ MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0,
+};
+
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
@@ -131,6 +170,14 @@ struct mlx5_flow_destination {
struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
+ struct {
+ struct mlx5_flow_table *hit_ft;
+ struct mlx5_flow_table *miss_ft;
+ enum mlx5_flow_dest_range_field field;
+ u32 min;
+ u32 max;
+ } range;
+ u32 sampler_id;
};
};
@@ -154,6 +201,7 @@ struct mlx5_flow_table_attr {
int max_fte;
u32 level;
u32 flags;
+ u16 uid;
struct mlx5_flow_table *next_ft;
struct {
@@ -172,9 +220,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- u32 level, u16 vport);
+ struct mlx5_flow_table_attr *ft_attr, u16 vport);
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
struct mlx5_flow_namespace *ns,
int prio, u32 level);
@@ -190,6 +236,19 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_exe_aso {
+ u32 object_id;
+ u8 type;
+ u8 return_reg_id;
+ union {
+ u32 ctrl_data;
+ struct {
+ u8 meter_idx;
+ u8 init_color;
+ } flow_meter;
+ };
+};
+
struct mlx5_fs_vlan {
u16 ethtype;
u16 vid;
@@ -207,13 +266,15 @@ struct mlx5_flow_act {
u32 action;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
- union {
- u32 ipsec_obj_id;
- uintptr_t esp_id;
- };
+ struct mlx5_flow_act_crypto_params {
+ u8 type;
+ u32 obj_id;
+ } crypto;
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
+ struct mlx5_flow_group *fg;
+ struct mlx5_exe_aso exe_aso;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
@@ -236,10 +297,16 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *old_dest);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+
+/* As mlx5_fc_create() but doesn't queue stats refresh thread. */
+struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging);
+
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
+void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes);
u32 mlx5_fc_id(struct mlx5_fc *counter);
@@ -252,13 +319,27 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask);
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
+
+struct mlx5_pkt_reformat_params {
+ int type;
+ u8 param_0;
+ u8 param_1;
+ size_t size;
+ void *data;
+};
struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type ns_type);
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
#endif
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
index 9db21cd0e92c..bc5125bc0561 100644
--- a/include/linux/mlx5/fs_helpers.h
+++ b/include/linux/mlx5/fs_helpers.h
@@ -38,46 +38,6 @@
#define MLX5_FS_IPV4_VERSION 4
#define MLX5_FS_IPV6_VERSION 6
-static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c)
-{
- void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
- misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
-}
-
-static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c,
- const u32 *match_v, u8 match)
-{
- const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
- outer_headers);
-
- return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff &&
- MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match;
-}
-
-static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c,
- const u32 *match_v)
-{
- return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP);
-}
-
-static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c,
- const u32 *match_v)
-{
- return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP);
-}
-
-static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c)
-{
- void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
- misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni);
-}
-
static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
const u32 *match_c,
const u32 *match_v, int version)
@@ -131,12 +91,4 @@ mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
MLX5_FS_IPV6_VERSION);
}
-static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c)
-{
- void *misc_params_c =
- MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
-}
-
#endif
diff --git a/include/linux/mlx5/macsec.h b/include/linux/mlx5/macsec.h
new file mode 100644
index 000000000000..f7ff4c2a95d0
--- /dev/null
+++ b/include/linux/mlx5/macsec.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef MLX5_MACSEC_H
+#define MLX5_MACSEC_H
+
+#ifdef CONFIG_MLX5_MACSEC
+struct mlx5_macsec_event_data {
+ struct mlx5_macsec_fs *macsec_fs;
+ void *macdev;
+ u32 fs_id;
+ bool is_tx;
+};
+
+int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs);
+
+void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list);
+
+void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs, bool is_tx);
+
+void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list, bool is_tx);
+
+#endif
+#endif /* MLX5_MACSEC_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index de1ffb4804d6..c940b329a475 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -64,17 +64,14 @@ enum {
};
enum {
- MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
- MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
- MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
- MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
-};
-
-enum {
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS = 0x1,
MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC = 0x15,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2 = 0x20,
+ MLX5_SET_HCA_CAP_OP_MOD_PORT_SELECTION = 0x25,
};
enum {
@@ -83,17 +80,24 @@ enum {
enum {
MLX5_OBJ_TYPE_SW_ICM = 0x0008,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
};
enum {
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
+ (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
};
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
+ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@@ -126,6 +130,11 @@ enum {
MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111,
MLX5_CMD_OP_ALLOC_SF = 0x113,
MLX5_CMD_OP_DEALLOC_SF = 0x114,
+ MLX5_CMD_OP_SUSPEND_VHCA = 0x115,
+ MLX5_CMD_OP_RESUME_VHCA = 0x116,
+ MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE = 0x117,
+ MLX5_CMD_OP_SAVE_VHCA_STATE = 0x118,
+ MLX5_CMD_OP_LOAD_VHCA_STATE = 0x119,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -133,6 +142,7 @@ enum {
MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
MLX5_CMD_OP_ALLOC_MEMIC = 0x205,
MLX5_CMD_OP_DEALLOC_MEMIC = 0x206,
+ MLX5_CMD_OP_MODIFY_MEMIC = 0x207,
MLX5_CMD_OP_CREATE_EQ = 0x301,
MLX5_CMD_OP_DESTROY_EQ = 0x302,
MLX5_CMD_OP_QUERY_EQ = 0x303,
@@ -299,6 +309,10 @@ enum {
MLX5_CMD_OP_CREATE_UMEM = 0xa08,
MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
+ MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
+ MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
+ MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
+ MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
MLX5_CMD_OP_MAX
};
@@ -308,6 +322,15 @@ enum {
MLX5_CMD_OP_GENERAL_END = 0xd00,
};
+enum {
+ MLX5_FT_NIC_RX_2_NIC_RX_RDMA = BIT(0),
+ MLX5_FT_NIC_TX_RDMA_2_NIC_TX = BIT(1),
+};
+
+enum {
+ MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT = 0x1,
+};
+
struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
@@ -339,7 +362,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_geneve_oam[0x1];
u8 outer_geneve_protocol_type[0x1];
u8 outer_geneve_opt_len[0x1];
- u8 reserved_at_1e[0x1];
+ u8 source_vhca_port[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@@ -368,7 +391,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 reserved_at_37[0x9];
u8 geneve_tlv_option_0_data[0x1];
- u8 reserved_at_41[0x4];
+ u8 geneve_tlv_option_0_exist[0x1];
+ u8 reserved_at_42[0x3];
u8 outer_first_mpls_over_udp[0x4];
u8 outer_first_mpls_over_gre[0x4];
u8 inner_first_mpls[0x4];
@@ -390,6 +414,17 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 metadata_reg_c_0[0x1];
};
+/* Table 2170 - Flow Table Fields Supported 2 Format */
+struct mlx5_ifc_flow_table_fields_supported_2_bits {
+ u8 reserved_at_0[0xe];
+ u8 bth_opcode[0x1];
+ u8 reserved_at_f[0x1];
+ u8 tunnel_header_0_1[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x60];
+};
+
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
u8 reserved_at_1[0x1];
@@ -400,7 +435,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 flow_table_modify[0x1];
u8 reformat[0x1];
u8 decap[0x1];
- u8 reserved_at_9[0x1];
+ u8 reset_root_to_default[0x1];
u8 pop_vlan[0x1];
u8 push_vlan[0x1];
u8 reserved_at_c[0x1];
@@ -420,7 +455,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_1a[0x2];
u8 ipsec_encrypt[0x1];
u8 ipsec_decrypt[0x1];
- u8 reserved_at_1e[0x2];
+ u8 sw_owner_v2[0x1];
+ u8 reserved_at_1f[0x1];
u8 termination_table_raw_traffic[0x1];
u8 reserved_at_21[0x1];
@@ -429,16 +465,31 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
- u8 reserved_at_40[0x20];
+ u8 reformat_add_esp_trasport[0x1];
+ u8 reformat_l2_to_l3_esp_tunnel[0x1];
+ u8 reformat_add_esp_transport_over_udp[0x1];
+ u8 reformat_del_esp_trasport[0x1];
+ u8 reformat_l3_esp_tunnel_to_l2[0x1];
+ u8 reformat_del_esp_transport_over_udp[0x1];
+ u8 execute_aso[0x1];
+ u8 reserved_at_47[0x19];
- u8 reserved_at_60[0x18];
+ u8 reserved_at_60[0x2];
+ u8 reformat_insert[0x1];
+ u8 reformat_remove[0x1];
+ u8 macsec_encrypt[0x1];
+ u8 macsec_decrypt[0x1];
+ u8 reserved_at_66[0x2];
+ u8 reformat_add_macsec[0x1];
+ u8 reformat_remove_macsec[0x1];
+ u8 reserved_at_6a[0xe];
u8 log_max_ft_num[0x8];
- u8 reserved_at_80[0x18];
+ u8 reserved_at_80[0x10];
+ u8 log_max_flow_counter[0x8];
u8 log_max_destination[0x8];
- u8 log_max_flow_counter[0x8];
- u8 reserved_at_a8[0x10];
+ u8 reserved_at_a0[0x18];
u8 log_max_flow[0x8];
u8 reserved_at_c0[0x40];
@@ -458,6 +509,22 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
u8 reserved_at_6[0x1a];
};
+struct mlx5_ifc_ipv4_layout_bits {
+ u8 reserved_at_0[0x60];
+
+ u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+ u8 ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+ struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+ u8 reserved_at_0[0x80];
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -483,7 +550,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_sport[0x10];
u8 tcp_dport[0x10];
- u8 reserved_at_c0[0x18];
+ u8 reserved_at_c0[0x10];
+ u8 ipv4_ihl[0x4];
+ u8 reserved_at_c4[0x4];
+
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10];
@@ -532,10 +602,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
- u8 reserved_at_b8[0x8];
+ u8 bth_opcode[0x8];
u8 geneve_vni[0x18];
- u8 reserved_at_d8[0x7];
+ u8 reserved_at_d8[0x6];
+ u8 geneve_tlv_option_0_exist[0x1];
u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc];
@@ -550,7 +621,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
- u8 reserved_at_160[0x20];
+ u8 inner_esp_spi[0x20];
u8 outer_esp_spi[0x20];
u8 reserved_at_1a0[0x60];
};
@@ -589,7 +660,13 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
- u8 reserved_at_1a0[0x60];
+ u8 reserved_at_1a0[0x8];
+
+ u8 macsec_syndrome[0x8];
+ u8 ipsec_syndrome[0x8];
+ u8 reserved_at_1b8[0x8];
+
+ u8 reserved_at_1c0[0x40];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
@@ -619,7 +696,59 @@ struct mlx5_ifc_fte_match_set_misc3_bits {
u8 geneve_tlv_option_0_data[0x20];
- u8 reserved_at_140[0xc0];
+ u8 gtpu_teid[0x20];
+
+ u8 gtpu_msg_type[0x8];
+ u8 gtpu_msg_flags[0x8];
+ u8 reserved_at_170[0x10];
+
+ u8 gtpu_dw_2[0x20];
+
+ u8 gtpu_first_ext_dw_0[0x20];
+
+ u8 gtpu_dw_0[0x20];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc4_bits {
+ u8 prog_sample_field_value_0[0x20];
+
+ u8 prog_sample_field_id_0[0x20];
+
+ u8 prog_sample_field_value_1[0x20];
+
+ u8 prog_sample_field_id_1[0x20];
+
+ u8 prog_sample_field_value_2[0x20];
+
+ u8 prog_sample_field_id_2[0x20];
+
+ u8 prog_sample_field_value_3[0x20];
+
+ u8 prog_sample_field_id_3[0x20];
+
+ u8 reserved_at_100[0x100];
+};
+
+struct mlx5_ifc_fte_match_set_misc5_bits {
+ u8 macsec_tag_0[0x20];
+
+ u8 macsec_tag_1[0x20];
+
+ u8 macsec_tag_2[0x20];
+
+ u8 macsec_tag_3[0x20];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 tunnel_header_1[0x20];
+
+ u8 tunnel_header_2[0x20];
+
+ u8 tunnel_header_3[0x20];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -717,7 +846,15 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x1200];
+ u8 reserved_at_e00[0x700];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
+
+ u8 reserved_at_1580[0x280];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma;
+
+ u8 reserved_at_1880[0x780];
u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
@@ -728,6 +865,20 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_at_20c0[0x5f40];
};
+struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0x1];
+ u8 port_select_flow_table_bypass[0x1];
+ u8 reserved_at_13[0xd];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ u8 reserved_at_400[0x7c00];
+};
+
enum {
MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
@@ -741,9 +892,14 @@ enum {
struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 fdb_to_vport_reg_c_id[0x8];
- u8 reserved_at_8[0xd];
+ u8 reserved_at_8[0x5];
+ u8 fdb_uplink_hairpin[0x1];
+ u8 fdb_multi_path_any_table_limit_regc[0x1];
+ u8 reserved_at_f[0x3];
+ u8 fdb_multi_path_any_table[0x1];
+ u8 reserved_at_13[0x2];
u8 fdb_modify_header_fwd_to_table[0x1];
- u8 reserved_at_16[0x1];
+ u8 fdb_ipv4_ttl_modify[0x1];
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
@@ -759,7 +915,13 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
- u8 reserved_at_800[0x1000];
+ u8 reserved_at_800[0xC00];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_esw_fdb;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_bitmask_support_2_esw_fdb;
+
+ u8 reserved_at_1500[0x300];
u8 sw_steering_fdb_action_drop_icm_address_rx[0x40];
@@ -783,9 +945,12 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x3];
+ u8 reserved_at_5[0x1];
+ u8 vport_cvlan_insert_always[0x1];
+ u8 esw_shared_ingress_acl[0x1];
u8 esw_uplink_ingress_acl[0x1];
- u8 reserved_at_9[0x10];
+ u8 root_ft_on_other_esw[0x1];
+ u8 reserved_at_a[0xf];
u8 esw_functions_changed[0x1];
u8 reserved_at_1a[0x1];
u8 ecpf_vport_exists[0x1];
@@ -819,11 +984,17 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1];
- u8 reserved_at_7[0x4];
+ u8 reserved_at_7[0x1];
+ u8 nic_sq_scheduling[0x1];
+ u8 nic_bw_share[0x1];
+ u8 nic_rate_limit[0x1];
u8 packet_pacing_uid[0x1];
- u8 reserved_at_c[0x14];
+ u8 log_esw_max_sched_depth[0x4];
+ u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x20];
+ u8 reserved_at_20[0xb];
+ u8 log_max_qos_nic_queue_group[0x5];
+ u8 reserved_at_30[0x10];
u8 packet_pacing_max_rate[0x20];
@@ -840,7 +1011,17 @@ struct mlx5_ifc_qos_cap_bits {
u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x700];
+ u8 reserved_at_100[0x20];
+
+ u8 reserved_at_120[0x3];
+ u8 log_meter_aso_granularity[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_meter_aso_max_alloc[0x5];
+ u8 reserved_at_130[0x3];
+ u8 log_max_num_meter_aso[0x5];
+ u8 reserved_at_138[0x8];
+
+ u8 reserved_at_140[0x6c0];
};
struct mlx5_ifc_debug_cap_bits {
@@ -875,7 +1056,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 scatter_fcs[0x1];
u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
- u8 reserved_at_1c[0x2];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
@@ -890,7 +1072,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 tunnel_stateless_ipv4_over_vxlan[0x1];
u8 tunnel_stateless_ip_over_ip[0x1];
u8 insert_trailer[0x1];
- u8 reserved_at_2b[0x5];
+ u8 reserved_at_2b[0x1];
+ u8 tunnel_stateless_ip_over_ip_rx[0x1];
+ u8 tunnel_stateless_ip_over_ip_tx[0x1];
+ u8 reserved_at_2e[0x2];
u8 max_vxlan_udp_ports[0x8];
u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
@@ -906,11 +1091,22 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 reserved_at_200[0x600];
};
+enum {
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1];
u8 reserved_at_1[0x3];
u8 sw_r_roce_src_udp_port[0x1];
- u8 reserved_at_5[0x1b];
+ u8 fl_rc_qp_when_roce_disabled[0x1];
+ u8 fl_rc_qp_when_roce_enabled[0x1];
+ u8 roce_cc_general[0x1];
+ u8 qp_ooo_transmit_default[0x1];
+ u8 reserved_at_9[0x15];
+ u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60];
@@ -950,6 +1146,30 @@ struct mlx5_ifc_sync_steering_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_sync_crypto_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 crypto_type[0x10];
+
+ u8 reserved_at_80[0x80];
+};
+
+struct mlx5_ifc_sync_crypto_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_device_mem_cap_bits {
u8 memic[0x1];
u8 reserved_at_1[0x1f];
@@ -973,11 +1193,23 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 log_sw_icm_alloc_granularity[0x6];
u8 log_steering_sw_icm_size[0x8];
- u8 reserved_at_120[0x20];
+ u8 log_indirect_encap_sw_icm_size[0x8];
+ u8 reserved_at_128[0x10];
+ u8 log_header_modify_pattern_sw_icm_size[0x8];
u8 header_modify_sw_icm_start_address[0x40];
- u8 reserved_at_180[0x680];
+ u8 reserved_at_180[0x40];
+
+ u8 header_modify_pattern_sw_icm_start_address[0x40];
+
+ u8 memic_operations[0x20];
+
+ u8 reserved_at_220[0x20];
+
+ u8 indirect_encap_sw_icm_start_address[0x40];
+
+ u8 reserved_at_280[0x580];
};
struct mlx5_ifc_device_event_cap_bits {
@@ -1005,7 +1237,14 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
u8 max_emulated_devices[0x8];
u8 max_num_virtio_queues[0x18];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x20];
+
+ u8 reserved_at_c0[0x13];
+ u8 desc_group_mkey_supported[0x1];
+ u8 freeze_to_rdy_supported[0x1];
+ u8 reserved_at_d5[0xb];
+
+ u8 reserved_at_e0[0x20];
u8 umem_1_buffer_param_a[0x20];
@@ -1090,33 +1329,6 @@ struct mlx5_ifc_odp_cap_bits {
u8 reserved_at_120[0x6E0];
};
-struct mlx5_ifc_calc_op {
- u8 reserved_at_0[0x10];
- u8 reserved_at_10[0x9];
- u8 op_swap_endianness[0x1];
- u8 op_min[0x1];
- u8 op_xor[0x1];
- u8 op_or[0x1];
- u8 op_and[0x1];
- u8 op_max[0x1];
- u8 op_add[0x1];
-};
-
-struct mlx5_ifc_vector_calc_cap_bits {
- u8 calc_matrix[0x1];
- u8 reserved_at_1[0x1f];
- u8 reserved_at_20[0x8];
- u8 max_vec_count[0x8];
- u8 reserved_at_30[0xd];
- u8 max_chunk_size[0x3];
- struct mlx5_ifc_calc_op calc0;
- struct mlx5_ifc_calc_op calc1;
- struct mlx5_ifc_calc_op calc2;
- struct mlx5_ifc_calc_op calc3;
-
- u8 reserved_at_c0[0x720];
-};
-
struct mlx5_ifc_tls_cap_bits {
u8 tls_1_2_aes_gcm_128[0x1];
u8 tls_1_3_aes_gcm_128[0x1];
@@ -1144,6 +1356,24 @@ struct mlx5_ifc_ipsec_cap_bits {
u8 reserved_at_30[0x7d0];
};
+struct mlx5_ifc_macsec_cap_bits {
+ u8 macsec_epn[0x1];
+ u8 reserved_at_1[0x2];
+ u8 macsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_macsec_offload[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 min_log_macsec_full_replay_window[0x8];
+ u8 max_log_macsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x7c0];
+};
+
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
@@ -1197,9 +1427,17 @@ enum {
enum {
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
+ MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
+ MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
+ MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED = 1 << 10,
+ MLX5_FLEX_PARSER_GTPU_ENABLED = 1 << 11,
+ MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED = 1 << 16,
+ MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED = 1 << 17,
+ MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED = 1 << 18,
+ MLX5_FLEX_PARSER_GTPU_TEID_ENABLED = 1 << 19,
};
enum {
@@ -1222,8 +1460,29 @@ enum mlx5_fc_bulk_alloc_bitmask {
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
+enum {
+ MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
+ MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+ MLX5_STEERING_FORMAT_CONNECTX_7 = 2,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x30];
+ u8 reserved_at_0[0x10];
+ u8 shared_object_to_user_object_allowed[0x1];
+ u8 reserved_at_13[0xe];
+ u8 vhca_resource_manager[0x1];
+
+ u8 hca_cap_2[0x1];
+ u8 create_lag_when_not_master_up[0x1];
+ u8 dtor[0x1];
+ u8 event_on_vhca_state_teardown_request[0x1];
+ u8 event_on_vhca_state_in_use[0x1];
+ u8 event_on_vhca_state_active[0x1];
+ u8 event_on_vhca_state_allocated[0x1];
+ u8 event_on_vhca_state_invalid[0x1];
+ u8 reserved_at_28[0x8];
u8 vhca_id[0x10];
u8 reserved_at_40[0x40];
@@ -1231,16 +1490,26 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
u8 event_cap[0x1];
- u8 reserved_at_91[0x7];
+ u8 reserved_at_91[0x2];
+ u8 isolate_vl_tc_new[0x1];
+ u8 reserved_at_94[0x4];
u8 prio_tag_required[0x1];
u8 reserved_at_99[0x2];
u8 log_max_qp[0x5];
u8 reserved_at_a0[0x3];
u8 ece_support[0x1];
- u8 reserved_at_a4[0x7];
+ u8 reserved_at_a4[0x5];
+ u8 reg_c_preserve[0x1];
+ u8 reserved_at_aa[0x1];
u8 log_max_srq[0x5];
- u8 reserved_at_b0[0x10];
+ u8 reserved_at_b0[0x1];
+ u8 uplink_follow[0x1];
+ u8 ts_cqe_to_dest_cqn[0x1];
+ u8 reserved_at_b3[0x6];
+ u8 go_back_n[0x1];
+ u8 shampo[0x1];
+ u8 reserved_at_bb[0x5];
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
@@ -1253,9 +1522,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_eq_sz[0x8];
u8 relaxed_ordering_write[0x1];
- u8 relaxed_ordering_read[0x1];
+ u8 relaxed_ordering_read_pci_enabled[0x1];
u8 log_max_mkey[0x6];
- u8 reserved_at_f0[0x8];
+ u8 reserved_at_f0[0x6];
+ u8 terminate_scatter_list_mkey[0x1];
+ u8 repeated_mkey[0x1];
u8 dump_fill_mkey[0x1];
u8 reserved_at_f9[0x2];
u8 fast_teardown[0x1];
@@ -1271,13 +1542,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 null_mkey[0x1];
u8 log_max_klm_list_size[0x6];
- u8 reserved_at_120[0xa];
+ u8 reserved_at_120[0x2];
+ u8 qpc_extension[0x1];
+ u8 reserved_at_123[0x7];
u8 log_max_ra_req_dc[0x6];
- u8 reserved_at_130[0xa];
+ u8 reserved_at_130[0x2];
+ u8 eth_wqe_too_small[0x1];
+ u8 reserved_at_133[0x6];
+ u8 vnic_env_cq_overrun[0x1];
u8 log_max_ra_res_dc[0x6];
- u8 reserved_at_140[0x6];
+ u8 reserved_at_140[0x5];
u8 release_all_pages[0x1];
+ u8 must_not_use[0x1];
u8 reserved_at_147[0x2];
u8 roce_accl[0x1];
u8 log_max_ra_req_qp[0x6];
@@ -1416,9 +1693,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 rc[0x1];
u8 uar_4k[0x1];
- u8 reserved_at_241[0x9];
+ u8 reserved_at_241[0x7];
+ u8 fl_rc_qp_when_roce_disabled[0x1];
+ u8 regexp_params[0x1];
u8 uar_sz[0x6];
- u8 reserved_at_250[0x8];
+ u8 port_selection_cap[0x1];
+ u8 reserved_at_251[0x1];
+ u8 umem_uid_0[0x1];
+ u8 reserved_at_253[0x5];
u8 log_pg_sz[0x8];
u8 bf[0x1];
@@ -1430,9 +1712,13 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x8];
+ u8 reserved_at_270[0x3];
+ u8 qp_error_syndrome[0x1];
+ u8 reserved_at_274[0x2];
+ u8 lag_dct[0x2];
u8 lag_tx_port_affinity[0x1];
- u8 reserved_at_279[0x2];
+ u8 lag_native_fdb_selection[0x1];
+ u8 reserved_at_27a[0x1];
u8 lag_master[0x1];
u8 num_lag_ports[0x4];
@@ -1454,15 +1740,25 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_320[0x3];
u8 log_max_transport_domain[0x5];
- u8 reserved_at_328[0x3];
+ u8 reserved_at_328[0x2];
+ u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
- u8 reserved_at_330[0xb];
+ u8 reserved_at_330[0x6];
+ u8 pci_sync_for_fw_update_with_driver_unload[0x1];
+ u8 vnic_env_cnt_steering_fail[0x1];
+ u8 vport_counter_local_loopback[0x1];
+ u8 q_counter_aggregation[0x1];
+ u8 q_counter_other_vport[0x1];
u8 log_max_xrcd[0x5];
u8 nic_receive_steering_discard[0x1];
u8 receive_discard_vport_down[0x1];
u8 transmit_discard_vport_down[0x1];
- u8 reserved_at_343[0x5];
+ u8 eq_overrun_count[0x1];
+ u8 reserved_at_344[0x1];
+ u8 invalid_command_count[0x1];
+ u8 quota_exceeded_count[0x1];
+ u8 reserved_at_347[0x1];
u8 log_max_flow_counter_bulk[0x8];
u8 max_flow_counter_15_0[0x10];
@@ -1487,7 +1783,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1];
- u8 reserved_at_3a1[0x2];
+ u8 roce_rw_supported[0x1];
+ u8 log_max_current_uc_list_wr_supported[0x1];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@ -1510,7 +1807,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1];
u8 log_min_hairpin_wq_data_sz[0x5];
- u8 reserved_at_3e8[0x3];
+ u8 reserved_at_3e8[0x1];
+ u8 silent_mode[0x1];
+ u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@@ -1519,11 +1818,17 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 general_obj_types[0x40];
- u8 reserved_at_440[0x20];
+ u8 sq_ts_format[0x2];
+ u8 rq_ts_format[0x2];
+ u8 steering_format_version[0x4];
+ u8 create_qp_start_hint[0x18];
- u8 reserved_at_460[0x3];
+ u8 reserved_at_460[0x1];
+ u8 ats[0x1];
+ u8 cross_vhca_rqt[0x1];
u8 log_max_uctx[0x5];
- u8 reserved_at_468[0x2];
+ u8 reserved_at_468[0x1];
+ u8 crypto[0x1];
u8 ipsec_offload[0x1];
u8 log_max_umem[0x5];
u8 max_num_eqs[0x10];
@@ -1547,9 +1852,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_geneve_tlv_options[0x8];
u8 reserved_at_568[0x3];
u8 max_geneve_tlv_option_data_len[0x5];
- u8 reserved_at_570[0x10];
-
- u8 reserved_at_580[0x33];
+ u8 reserved_at_570[0x9];
+ u8 adv_virtualization[0x1];
+ u8 reserved_at_57a[0x6];
+
+ u8 reserved_at_580[0xb];
+ u8 log_max_dci_stream_channels[0x5];
+ u8 reserved_at_590[0x3];
+ u8 log_max_dci_errored_streams[0x5];
+ u8 reserved_at_598[0x8];
+
+ u8 reserved_at_5a0[0x10];
+ u8 enhanced_cqe_compression[0x1];
+ u8 reserved_at_5b1[0x2];
u8 log_max_dek[0x5];
u8 reserved_at_5b8[0x4];
u8 mini_cqe_resp_stride_index[0x1];
@@ -1560,7 +1875,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cqe_compression_timeout[0x10];
u8 cqe_compression_max_num[0x10];
- u8 reserved_at_5e0[0x10];
+ u8 reserved_at_5e0[0x8];
+ u8 flex_parser_id_gtpu_dw_0[0x4];
+ u8 reserved_at_5ec[0x4];
u8 tag_matching[0x1];
u8 rndv_offload_rc[0x1];
u8 rndv_offload_dc[0x1];
@@ -1571,14 +1888,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 affiliate_nic_vport_criteria[0x8];
u8 native_port_num[0x8];
u8 num_vhca_ports[0x8];
- u8 reserved_at_618[0x6];
+ u8 flex_parser_id_gtpu_teid[0x4];
+ u8 reserved_at_61c[0x2];
u8 sw_owner_id[0x1];
u8 reserved_at_61f[0x1];
u8 max_num_of_monitor_counters[0x10];
u8 num_ppcnt_monitor_counters[0x10];
- u8 reserved_at_640[0x10];
+ u8 max_num_sf[0x10];
u8 num_q_monitor_counters[0x10];
u8 reserved_at_660[0x20];
@@ -1587,7 +1905,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 sf_set_partition[0x1];
u8 reserved_at_682[0x1];
u8 log_max_sf[0x5];
- u8 reserved_at_688[0x8];
+ u8 apu[0x1];
+ u8 reserved_at_689[0x4];
+ u8 migration[0x1];
+ u8 reserved_at_68e[0x2];
u8 log_min_sf_size[0x8];
u8 max_num_sf_partitions[0x8];
@@ -1602,22 +1923,97 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
- u8 reserved_at_6e0[0x10];
+ u8 max_num_match_definer[0x10];
u8 sf_base_id[0x10];
- u8 reserved_at_700[0x80];
+ u8 flex_parser_id_gtpu_dw_2[0x4];
+ u8 flex_parser_id_gtpu_first_ext_dw_0[0x4];
+ u8 num_total_dynamic_vf_msix[0x18];
+ u8 reserved_at_720[0x14];
+ u8 dynamic_msix_table_size[0xc];
+ u8 reserved_at_740[0xc];
+ u8 min_dynamic_vf_msix_table_size[0x4];
+ u8 reserved_at_750[0x4];
+ u8 max_dynamic_vf_msix_table_size[0xc];
+
+ u8 reserved_at_760[0x3];
+ u8 log_max_num_header_modify_argument[0x5];
+ u8 reserved_at_768[0x4];
+ u8 log_header_modify_argument_granularity[0x4];
+ u8 reserved_at_770[0x3];
+ u8 log_header_modify_argument_max_alloc[0x5];
+ u8 reserved_at_778[0x8];
+
u8 vhca_tunnel_commands[0x40];
- u8 reserved_at_7c0[0x40];
+ u8 match_definer_format_supported[0x40];
};
-enum mlx5_flow_destination_type {
- MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
- MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+enum {
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000,
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE = (1ULL << 20),
+};
- MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
- MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101,
+enum {
+ MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE = 0x200,
+};
+
+struct mlx5_ifc_cmd_hca_cap_2_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 migratable[0x1];
+ u8 reserved_at_81[0x1f];
+
+ u8 max_reformat_insert_size[0x8];
+ u8 max_reformat_insert_offset[0x8];
+ u8 max_reformat_remove_size[0x8];
+ u8 max_reformat_remove_offset[0x8];
+
+ u8 reserved_at_c0[0x8];
+ u8 migration_multi_load[0x1];
+ u8 migration_tracking_state[0x1];
+ u8 reserved_at_ca[0x6];
+ u8 migration_in_chunks[0x1];
+ u8 reserved_at_d1[0xf];
+
+ u8 cross_vhca_object_to_object_supported[0x20];
+
+ u8 allowed_object_for_other_vhca_access[0x40];
+
+ u8 reserved_at_140[0x60];
+
+ u8 flow_table_type_2_type[0x8];
+ u8 reserved_at_1a8[0x3];
+ u8 log_min_mkey_entity_size[0x5];
+ u8 reserved_at_1b0[0x10];
+
+ u8 reserved_at_1c0[0x60];
+
+ u8 reserved_at_220[0x1];
+ u8 sw_vhca_id_valid[0x1];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_230[0x10];
+
+ u8 reserved_at_240[0xb];
+ u8 ts_cqe_metadata_size2wqe_counter[0x5];
+ u8 reserved_at_250[0x10];
+
+ u8 reserved_at_260[0x120];
+ u8 reserved_at_380[0x10];
+ u8 ec_vf_vport_base[0x10];
+
+ u8 reserved_at_3a0[0x10];
+ u8 max_rqt_vhca_id[0x10];
+
+ u8 reserved_at_3c0[0x440];
+};
+
+enum mlx5_ifc_flow_destination_type {
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
};
enum mlx5_flow_table_miss_action {
@@ -1632,7 +2028,8 @@ struct mlx5_ifc_dest_format_struct_bits {
u8 destination_eswitch_owner_vhca_id_valid[0x1];
u8 packet_reformat[0x1];
- u8 reserved_at_22[0xe];
+ u8 reserved_at_22[0x6];
+ u8 destination_table_type[0x8];
u8 destination_eswitch_owner_vhca_id[0x10];
};
@@ -1666,7 +2063,11 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
- u8 reserved_at_a00[0x600];
+ struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
+
+ struct mlx5_ifc_fte_match_set_misc5_bits misc_parameters_5;
+
+ u8 reserved_at_e00[0x200];
};
enum {
@@ -1738,7 +2139,21 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x4c0];
+ u8 reserved_at_140[0x80];
+
+ u8 headers_mkey[0x20];
+
+ u8 shampo_enable[0x1];
+ u8 reserved_at_1e1[0x4];
+ u8 log_reservation_size[0x3];
+ u8 reserved_at_1e8[0x5];
+ u8 log_max_num_of_packets_per_reservation[0x3];
+ u8 reserved_at_1f0[0x6];
+ u8 log_headers_entry_size[0x2];
+ u8 reserved_at_1f8[0x4];
+ u8 log_headers_buffer_entry_num[0x4];
+
+ u8 reserved_at_200[0x400];
struct mlx5_ifc_cmd_pas_bits pas[];
};
@@ -1748,6 +2163,13 @@ struct mlx5_ifc_rq_num_bits {
u8 rq_num[0x18];
};
+struct mlx5_ifc_rq_vhca_bits {
+ u8 reserved_at_0[0x8];
+ u8 rq_num[0x18];
+ u8 reserved_at_20[0x10];
+ u8 rq_vhca_id[0x10];
+};
+
struct mlx5_ifc_mac_address_layout_bits {
u8 reserved_at_0[0x10];
u8 mac_addr_47_32[0x10];
@@ -1822,6 +2244,17 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
u8 reserved_at_360[0x4a0];
};
+struct mlx5_ifc_cong_control_r_roce_general_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 reserved_at_80[0x10];
+ u8 rtt_resp_dscp_valid[0x1];
+ u8 reserved_at_91[0x9];
+ u8 rtt_resp_dscp[0x6];
+
+ u8 reserved_at_a0[0x760];
+};
+
struct mlx5_ifc_cong_control_802_1qau_rp_bits {
u8 reserved_at_0[0x80];
@@ -2669,6 +3102,42 @@ struct mlx5_ifc_dropped_packet_logged_bits {
u8 reserved_at_0[0xe0];
};
+struct mlx5_ifc_default_timeout_bits {
+ u8 to_multiplier[0x3];
+ u8 reserved_at_3[0x9];
+ u8 to_value[0x14];
+};
+
+struct mlx5_ifc_dtor_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+ u8 reserved_at_40[0x60];
+
+ struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+ struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+ struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+ struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+ struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+ struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+ struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reset_unload_to;
+
+ u8 reserved_at_1c0[0x20];
+};
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@ -2818,11 +3287,18 @@ enum {
MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
};
+enum {
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
u8 lag_tx_port_affinity[0x4];
u8 st[0x8];
- u8 reserved_at_10[0x3];
+ u8 reserved_at_10[0x2];
+ u8 isolate_vl_tc[0x1];
u8 pm_state[0x2];
u8 reserved_at_15[0x1];
u8 req_e2e_credit_mode[0x2];
@@ -2846,7 +3322,10 @@ struct mlx5_ifc_qpc_bits {
u8 log_rq_stride[0x3];
u8 no_sq[0x1];
u8 log_sq_size[0x4];
- u8 reserved_at_55[0x6];
+ u8 reserved_at_55[0x1];
+ u8 retry_mode[0x2];
+ u8 ts_format[0x2];
+ u8 reserved_at_5a[0x1];
u8 rlky[0x1];
u8 ulp_stateless_offload_mode[0x4];
@@ -2881,10 +3360,12 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3c0[0x8];
u8 next_send_psn[0x18];
- u8 reserved_at_3e0[0x8];
+ u8 reserved_at_3e0[0x3];
+ u8 log_num_dci_stream_channels[0x5];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x8];
+ u8 reserved_at_400[0x3];
+ u8 log_num_dci_errored_streams[0x5];
u8 deth_sqpn[0x18];
u8 reserved_at_420[0x20];
@@ -2969,8 +3450,33 @@ struct mlx5_ifc_roce_addr_layout_bits {
u8 reserved_at_e0[0x20];
};
+struct mlx5_ifc_crypto_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 synchronize_dek[0x1];
+ u8 int_kek_manual[0x1];
+ u8 int_kek_auto[0x1];
+ u8 reserved_at_6[0x1a];
+
+ u8 reserved_at_20[0x3];
+ u8 log_dek_max_alloc[0x5];
+ u8 reserved_at_28[0x3];
+ u8 log_max_num_deks[0x5];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x3];
+ u8 log_dek_granularity[0x5];
+ u8 reserved_at_68[0x3];
+ u8 log_max_num_int_kek[0x5];
+ u8 sw_wrapped_dek[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
struct mlx5_ifc_odp_cap_bits odp_cap;
struct mlx5_ifc_atomic_caps_bits atomic_caps;
struct mlx5_ifc_roce_cap_bits roce_cap;
@@ -2978,13 +3484,16 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
- struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
+ struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_macsec_cap_bits macsec_cap;
+ struct mlx5_ifc_crypto_cap_bits crypto_cap;
+ struct mlx5_ifc_ipsec_cap_bits ipsec_cap;
u8 reserved_at_0[0x8000];
};
@@ -3000,8 +3509,9 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000,
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT = 0x1000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT = 0x2000,
+ MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000,
};
enum {
@@ -3010,6 +3520,11 @@ enum {
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2,
};
+enum {
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC = 0x1,
+};
+
struct mlx5_ifc_vlan_bits {
u8 ethtype[0x10];
u8 prio[0x3];
@@ -3017,6 +3532,38 @@ struct mlx5_ifc_vlan_bits {
u8 vid[0xc];
};
+enum {
+ MLX5_FLOW_METER_COLOR_RED = 0x0,
+ MLX5_FLOW_METER_COLOR_YELLOW = 0x1,
+ MLX5_FLOW_METER_COLOR_GREEN = 0x2,
+ MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum {
+ MLX5_EXE_ASO_FLOW_METER = 0x2,
+};
+
+struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits {
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_8[0x14];
+ u8 action[0x1];
+ u8 init_color[0x2];
+ u8 meter_id[0x1];
+};
+
+union mlx5_ifc_exe_aso_ctrl {
+ struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter;
+};
+
+struct mlx5_ifc_execute_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x7];
+ u8 aso_object_id[0x18];
+
+ union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl;
+};
+
struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan;
@@ -3029,9 +3576,9 @@ struct mlx5_ifc_flow_context_bits {
u8 action[0x10];
u8 extended_destination[0x1];
- u8 reserved_at_81[0x1];
+ u8 uplink_hairpin_en[0x1];
u8 flow_source[0x2];
- u8 reserved_at_84[0x4];
+ u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
@@ -3043,12 +3590,14 @@ struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan_2;
- u8 ipsec_obj_id[0x20];
+ u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
- u8 reserved_at_1200[0x600];
+ struct mlx5_ifc_execute_aso_bits execute_aso[4];
+
+ u8 reserved_at_1300[0x500];
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
};
@@ -3116,11 +3665,29 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits {
u8 transmit_discard_vport_down[0x40];
- u8 reserved_at_140[0xa0];
+ u8 async_eq_overrun[0x20];
+
+ u8 comp_eq_overrun[0x20];
+
+ u8 reserved_at_180[0x20];
+
+ u8 invalid_command[0x20];
+
+ u8 quota_exceeded_command[0x20];
u8 internal_rq_out_of_buffer[0x20];
- u8 reserved_at_200[0xe00];
+ u8 cq_overrun[0x20];
+
+ u8 eth_wqe_too_small[0x20];
+
+ u8 reserved_at_220[0xc0];
+
+ u8 generated_pkt_steering_fail[0x40];
+
+ u8 handled_pkt_steering_fail[0x40];
+
+ u8 reserved_at_360[0xc80];
};
struct mlx5_ifc_traffic_counter_bits {
@@ -3159,8 +3726,8 @@ enum {
};
enum {
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
};
enum {
@@ -3185,7 +3752,7 @@ struct mlx5_ifc_tirc_bits {
u8 reserved_at_80[0x4];
u8 lro_timeout_period_usecs[0x10];
- u8 lro_enable_mask[0x4];
+ u8 packet_merge_mask[0x4];
u8 lro_max_ip_payload_size[0x8];
u8 reserved_at_a0[0x40];
@@ -3273,7 +3840,9 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -3287,11 +3856,15 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_80[0x10];
u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_a0[0x50];
+ u8 reserved_at_a0[0x20];
+
+ u8 reserved_at_c0[0x8];
+ u8 ts_cqe_to_dest_cqn[0x18];
+ u8 reserved_at_e0[0x10];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
- u8 reserved_at_110[0x10];
+ u8 qos_queue_group_id[0x10];
u8 reserved_at_120[0x40];
@@ -3306,6 +3879,7 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4,
};
enum {
@@ -3346,7 +3920,10 @@ struct mlx5_ifc_rqtc_bits {
u8 reserved_at_e0[0x6a0];
- struct mlx5_ifc_rq_num_bits rq_num[];
+ union {
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_num_bits, rq_num);
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_vhca_bits, rq_vhca);
+ };
};
enum {
@@ -3360,6 +3937,18 @@ enum {
MLX5_RQC_STATE_ERR = 0x3,
};
+enum {
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_BYTE = 0x0,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE = 0x1,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_PAGE = 0x2,
+};
+
+enum {
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_NO_MATCH = 0x0,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED = 0x1,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_FIVE_TUPLE = 0x2,
+};
+
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
u8 delay_drop_en[0x1];
@@ -3370,7 +3959,9 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -3390,7 +3981,13 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_c0[0x10];
u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_e0[0xa0];
+ u8 reserved_at_e0[0x46];
+ u8 shampo_no_match_alignment_granularity[0x2];
+ u8 reserved_at_128[0x6];
+ u8 shampo_match_criteria_type[0x2];
+ u8 reservation_timeout[0x10];
+
+ u8 reserved_at_140[0x40];
struct mlx5_ifc_wq_bits wq;
};
@@ -3413,6 +4010,11 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_wq_bits wq;
};
+enum {
+ VHCA_ID_TYPE_HW = 0,
+ VHCA_ID_TYPE_SW = 1,
+};
+
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3];
@@ -3429,13 +4031,18 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xc];
-
+ u8 vhca_id_type[0x1];
+ u8 reserved_at_41[0xb];
u8 affiliation_criteria[0x4];
u8 affiliated_vhca_id[0x10];
- u8 reserved_at_60[0xd0];
+ u8 reserved_at_60[0xa0];
+ u8 reserved_at_100[0x1];
+ u8 sd_group[0x3];
+ u8 reserved_at_104[0x1c];
+
+ u8 reserved_at_120[0x10];
u8 mtu[0x10];
u8 system_image_guid[0x40];
@@ -3486,7 +4093,9 @@ struct mlx5_ifc_mkc_bits {
u8 lw[0x1];
u8 lr[0x1];
u8 access_mode_1_0[0x2];
- u8 reserved_at_18[0x8];
+ u8 reserved_at_18[0x2];
+ u8 ma_translation_mode[0x2];
+ u8 reserved_at_1c[0x4];
u8 qpn[0x18];
u8 mkey_7_0[0x8];
@@ -3635,8 +4244,8 @@ struct mlx5_ifc_eqc_bits {
u8 reserved_at_80[0x20];
- u8 reserved_at_a0[0x18];
- u8 intr[0x8];
+ u8 reserved_at_a0[0x14];
+ u8 intr[0xc];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -3762,7 +4371,7 @@ struct mlx5_ifc_cqc_bits {
u8 status[0x4];
u8 reserved_at_4[0x2];
u8 dbr_umem_valid[0x1];
- u8 reserved_at_7[0x1];
+ u8 apu_cq[0x1];
u8 cqe_sz[0x3];
u8 cc[0x1];
u8 reserved_at_c[0x1];
@@ -3772,7 +4381,8 @@ struct mlx5_ifc_cqc_bits {
u8 cqe_comp_en[0x1];
u8 mini_cqe_res_format[0x2];
u8 st[0x4];
- u8 reserved_at_18[0x8];
+ u8 reserved_at_18[0x6];
+ u8 cqe_compression_layout[0x2];
u8 reserved_at_20[0x20];
@@ -3788,8 +4398,7 @@ struct mlx5_ifc_cqc_bits {
u8 cq_period[0xc];
u8 cq_max_count[0x10];
- u8 reserved_at_a0[0x18];
- u8 c_eqn[0x8];
+ u8 c_eqn_or_apu_element[0x20];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -3818,6 +4427,7 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits {
struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ struct mlx5_ifc_cong_control_r_roce_general_bits cong_control_r_roce_general;
u8 reserved_at_0[0x800];
};
@@ -3939,13 +4549,19 @@ struct mlx5_ifc_health_buffer_bits {
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40];
+ u8 reserved_at_140[0x20];
+
+ u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20];
+ u8 rfr[0x1];
+ u8 reserved_at_1c1[0x3];
+ u8 valid[0x1];
+ u8 severity[0x3];
+ u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8];
u8 synd[0x8];
@@ -4155,7 +4771,10 @@ struct mlx5_ifc_set_l2_table_entry_in_bits {
u8 reserved_at_c0[0x20];
- u8 reserved_at_e0[0x13];
+ u8 reserved_at_e0[0x10];
+ u8 silent_mode_valid[0x1];
+ u8 silent_mode[0x1];
+ u8 reserved_at_f2[0x1];
u8 vlan_valid[0x1];
u8 vlan[0xc];
@@ -4202,7 +4821,12 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_function[0x1];
+ u8 ec_vf_function[0x1];
+ u8 reserved_at_42[0xe];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
union mlx5_ifc_hca_cap_union_bits capability;
};
@@ -4577,7 +5201,9 @@ struct mlx5_ifc_query_vport_counter_out_bits {
struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
- u8 reserved_at_680[0xa00];
+ struct mlx5_ifc_traffic_counter_bits local_loopback;
+
+ u8 reserved_at_700[0x980];
};
enum {
@@ -4716,7 +5342,11 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 null_mkey[0x20];
- u8 reserved_at_a0[0x60];
+ u8 terminate_scatter_list_mkey[0x20];
+
+ u8 repeated_mkey[0x20];
+
+ u8 reserved_at_a0[0x20];
};
struct mlx5_ifc_query_special_contexts_in_bits {
@@ -4745,6 +5375,7 @@ struct mlx5_ifc_query_scheduling_element_out_bits {
enum {
SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+ SCHEDULING_HIERARCHY_NIC = 0x3,
};
struct mlx5_ifc_query_scheduling_element_in_bits {
@@ -4859,24 +5490,54 @@ struct mlx5_ifc_query_rmp_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_cqe_error_syndrome_bits {
+ u8 hw_error_syndrome[0x8];
+ u8 hw_syndrome_type[0x4];
+ u8 reserved_at_c[0x4];
+ u8 vendor_error_syndrome[0x8];
+ u8 syndrome[0x8];
+};
+
+struct mlx5_ifc_qp_context_extension_bits {
+ u8 reserved_at_0[0x60];
+
+ struct mlx5_ifc_cqe_error_syndrome_bits error_syndrome;
+
+ u8 reserved_at_80[0x580];
+};
+
+struct mlx5_ifc_qpc_extension_and_pas_list_in_bits {
+ struct mlx5_ifc_qp_context_extension_bits qpc_data_extension;
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_qp_pas_list_in_bits {
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits {
+ struct mlx5_ifc_qp_pas_list_in_bits qp_pas_list;
+ struct mlx5_ifc_qpc_extension_and_pas_list_in_bits qpc_ext_and_pas_list;
+};
+
struct mlx5_ifc_query_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x20];
- u8 ece[0x20];
+ u8 reserved_at_40[0x40];
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
u8 reserved_at_800[0x80];
- u8 pas[][0x40];
+ union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits qp_pas_or_qpc_ext_and_pas;
};
struct mlx5_ifc_query_qp_in_bits {
@@ -4886,7 +5547,8 @@ struct mlx5_ifc_query_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 qpn[0x18];
u8 reserved_at_60[0x20];
@@ -5004,10 +5666,15 @@ struct mlx5_ifc_query_q_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x80];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x60];
u8 clear[0x1];
- u8 reserved_at_c1[0x1f];
+ u8 aggregate[0x1];
+ u8 reserved_at_c2[0x1e];
u8 reserved_at_e0[0x18];
u8 counter_set_id[0x8];
@@ -5309,7 +5976,8 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 op_mod[0x10];
u8 other_function[0x1];
- u8 reserved_at_41[0xf];
+ u8 ec_vf_function[0x1];
+ u8 reserved_at_42[0xe];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -5453,12 +6121,375 @@ struct mlx5_ifc_query_fte_in_bits {
u8 reserved_at_120[0xe0];
};
+struct mlx5_ifc_match_definer_format_0_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_dmac_15_0[0x10];
+ u8 outer_ethertype[0x10];
+
+ u8 reserved_at_180[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 outer_l4_type_ext[0x4];
+ u8 reserved_at_1a4[0x2];
+ u8 outer_ipsec_layer[0x2];
+ u8 outer_l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 outer_l2_ok[0x1];
+ u8 outer_l3_ok[0x1];
+ u8 outer_l4_ok[0x1];
+ u8 outer_second_vlan_type[0x2];
+ u8 outer_second_vlan_prio[0x3];
+ u8 outer_second_vlan_cfi[0x1];
+ u8 outer_second_vlan_vid[0xc];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 inner_ipv4_checksum_ok[0x1];
+ u8 inner_l4_checksum_ok[0x1];
+ u8 outer_ipv4_checksum_ok[0x1];
+ u8 outer_l4_checksum_ok[0x1];
+ u8 inner_l3_ok[0x1];
+ u8 inner_l4_ok[0x1];
+ u8 outer_l3_ok_duplicate[0x1];
+ u8 outer_l4_ok_duplicate[0x1];
+ u8 outer_tcp_cwr[0x1];
+ u8 outer_tcp_ece[0x1];
+ u8 outer_tcp_urg[0x1];
+ u8 outer_tcp_ack[0x1];
+ u8 outer_tcp_psh[0x1];
+ u8 outer_tcp_rst[0x1];
+ u8 outer_tcp_syn[0x1];
+ u8 outer_tcp_fin[0x1];
+};
+
+struct mlx5_ifc_match_definer_format_22_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 outer_ip_src_addr[0x20];
+
+ u8 outer_ip_dest_addr[0x20];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_23_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 inner_ip_src_addr[0x20];
+
+ u8 inner_ip_dest_addr[0x20];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 inner_ip_frag[0x1];
+ u8 inner_qp_type[0x2];
+ u8 inner_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 inner_l3_type[0x2];
+ u8 inner_l4_type[0x2];
+ u8 inner_first_vlan_type[0x2];
+ u8 inner_first_vlan_prio[0x3];
+ u8 inner_first_vlan_cfi[0x1];
+ u8 inner_first_vlan_vid[0xc];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_29_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_30_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_31_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_32_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+enum {
+ MLX5_IFC_DEFINER_FORMAT_ID_SELECT = 61,
+};
+
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED 0x0
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN 0x48
+#define MLX5_IFC_DEFINER_DW_SELECTORS_NUM 9
+#define MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM 8
+
+struct mlx5_ifc_match_definer_match_mask_bits {
+ u8 reserved_at_1c0[5][0x20];
+ u8 match_dw_8[0x20];
+ u8 match_dw_7[0x20];
+ u8 match_dw_6[0x20];
+ u8 match_dw_5[0x20];
+ u8 match_dw_4[0x20];
+ u8 match_dw_3[0x20];
+ u8 match_dw_2[0x20];
+ u8 match_dw_1[0x20];
+ u8 match_dw_0[0x20];
+
+ u8 match_byte_7[0x8];
+ u8 match_byte_6[0x8];
+ u8 match_byte_5[0x8];
+ u8 match_byte_4[0x8];
+
+ u8 match_byte_3[0x8];
+ u8 match_byte_2[0x8];
+ u8 match_byte_1[0x8];
+ u8 match_byte_0[0x8];
+};
+
+struct mlx5_ifc_match_definer_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x10];
+ u8 format_id[0x10];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+
+ u8 reserved_at_180[0x40];
+
+ union {
+ struct {
+ u8 match_mask[16][0x20];
+ };
+ struct mlx5_ifc_match_definer_match_mask_bits match_mask_format;
+ };
+};
+
+struct mlx5_ifc_general_obj_create_param_bits {
+ u8 alias_object[0x1];
+ u8 reserved_at_1[0x2];
+ u8 log_obj_range[0x5];
+ u8 reserved_at_8[0x18];
+};
+
+struct mlx5_ifc_general_obj_query_param_bits {
+ u8 alias_object[0x1];
+ u8 obj_offset[0x1f];
+};
+
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 vhca_tunnel_id[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ union {
+ struct mlx5_ifc_general_obj_create_param_bits create;
+ struct mlx5_ifc_general_obj_query_param_bits query;
+ } op_param;
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_allow_other_vhca_access_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x50];
+ u8 object_type_to_be_accessed[0x10];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_c0[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+};
+
+struct mlx5_ifc_allow_other_vhca_access_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_header_arg_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 reserved_at_80[0x8];
+ u8 access_pd[0x18];
+};
+
+struct mlx5_ifc_create_modify_header_arg_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_modify_header_arg_bits arg;
+};
+
+struct mlx5_ifc_create_match_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_match_definer_bits obj_context;
+};
+
+struct mlx5_ifc_create_match_definer_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_alias_context_bits {
+ u8 vhca_id_to_be_accessed[0x10];
+ u8 reserved_at_10[0xd];
+ u8 status[0x3];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_40[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+ u8 metadata[0x80];
+};
+
+struct mlx5_ifc_create_alias_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_alias_context_bits alias_ctx;
+};
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_5 = 0x6,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -5624,12 +6655,14 @@ struct mlx5_ifc_query_eq_in_bits {
};
struct mlx5_ifc_packet_reformat_context_in_bits {
- u8 reserved_at_0[0x5];
- u8 reformat_type[0x3];
- u8 reserved_at_8[0xe];
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_10[0x6];
u8 reformat_data_size[0xa];
- u8 reserved_at_20[0x10];
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_28[0x8];
u8 reformat_data[2][0x8];
u8 more_reformat_data[][0x8];
@@ -5669,12 +6702,30 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 reserved_at_60[0x20];
};
+enum {
+ MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9,
+};
+
enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xa,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xc,
+ MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
+ MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
+ MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
+ MLX5_REFORMAT_TYPE_DEL_MACSEC = 0x12,
};
struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
@@ -5795,6 +6846,8 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -5821,7 +6874,7 @@ struct mlx5_ifc_alloc_modify_header_context_in_bits {
u8 reserved_at_68[0x10];
u8 num_of_actions[0x8];
- union mlx5_ifc_set_add_copy_action_in_auto_bits actions[0];
+ union mlx5_ifc_set_add_copy_action_in_auto_bits actions[];
};
struct mlx5_ifc_dealloc_modify_header_context_out_bits {
@@ -5845,6 +6898,18 @@ struct mlx5_ifc_dealloc_modify_header_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6181,7 +7246,7 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved_at_3c[0x1];
u8 hash[0x1];
u8 reserved_at_3e[0x1];
- u8 lro[0x1];
+ u8 packet_merge[0x1];
};
struct mlx5_ifc_modify_tir_out_bits {
@@ -6622,7 +7687,12 @@ struct mlx5_ifc_init_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x2];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_70[0x10];
+
u8 sw_owner_id[4][0x20];
};
@@ -7381,7 +8451,7 @@ struct mlx5_ifc_dealloc_uar_out_bits {
struct mlx5_ifc_dealloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7757,7 +8827,8 @@ struct mlx5_ifc_create_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 input_qpn[0x18];
u8 reserved_at_60[0x20];
@@ -7870,7 +8941,7 @@ struct mlx5_ifc_create_flow_table_out_bits {
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7902,6 +8973,11 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
+};
+
+enum {
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
@@ -7922,7 +8998,9 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -7937,7 +9015,10 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
+
+ u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@@ -8228,7 +9309,7 @@ struct mlx5_ifc_alloc_uar_out_bits {
struct mlx5_ifc_alloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -8320,7 +9401,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x38];
+ u8 reserved_at_40[0x33];
+ u8 flow_counter_bulk_log_size[0x5];
u8 flow_counter_bulk[0x8];
};
@@ -8710,6 +9792,8 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_100g_2x[0x10];
u8 fec_override_admin_50g_1x[0x10];
+
+ u8 reserved_at_140[0x140];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -9035,6 +10119,51 @@ struct mlx5_ifc_mpegc_reg_bits {
u8 reserved_at_60[0x100];
};
+struct mlx5_ifc_mpir_reg_bits {
+ u8 sdm[0x1];
+ u8 reserved_at_1[0x1b];
+ u8 host_buses[0x4];
+
+ u8 reserved_at_20[0x20];
+
+ u8 local_port[0x8];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+enum {
+ MLX5_MTUTC_FREQ_ADJ_UNITS_PPB = 0x0,
+ MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM = 0x1,
+};
+
+enum {
+ MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
+ MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3,
+};
+
+struct mlx5_ifc_mtutc_reg_bits {
+ u8 reserved_at_0[0x5];
+ u8 freq_adj_units[0x3];
+ u8 reserved_at_8[0x3];
+ u8 log_max_freq_adjustment[0x5];
+
+ u8 reserved_at_10[0xc];
+ u8 operation[0x4];
+
+ u8 freq_adjustment[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 utc_sec[0x20];
+
+ u8 reserved_at_a0[0x2];
+ u8 utc_nsec[0x1e];
+
+ u8 time_adjustment[0x20];
+};
+
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x68];
u8 fec_50G_per_lane_in_pplm[0x1];
@@ -9093,7 +10222,17 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x6e];
+ u8 reserved_at_0[0x50];
+ u8 mtutc_freq_adj_units[0x1];
+ u8 mtutc_time_adjustment_extended_range[0x1];
+ u8 reserved_at_52[0xb];
+ u8 mcia_32dwords[0x1];
+ u8 out_pulse_duration_ns[0x1];
+ u8 npps_period[0x1];
+ u8 reserved_at_60[0xa];
+ u8 reset_state[0x1];
+ u8 ptpcyc2realtime_modify[0x1];
+ u8 reserved_at_6c[0x2];
u8 pci_status_and_power[0x1];
u8 reserved_at_6f[0x5];
u8 mark_tx_action_cnp[0x1];
@@ -9114,13 +10253,23 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mcqi[0x1];
u8 mcqs[0x1];
- u8 regs_95_to_87[0x9];
+ u8 regs_95_to_90[0x6];
+ u8 mpir[0x1];
+ u8 regs_88_to_87[0x2];
u8 mpegc[0x1];
- u8 regs_85_to_68[0x12];
+ u8 mtutc[0x1];
+ u8 regs_84_to_68[0x11];
u8 tracer_registers[0x4];
- u8 regs_63_to_32[0x20];
- u8 regs_31_to_0[0x20];
+ u8 regs_63_to_46[0x12];
+ u8 mrtc[0x1];
+ u8 regs_44_to_41[0x4];
+ u8 mfrl[0x1];
+ u8 regs_39_to_32[0x8];
+
+ u8 regs_31_to_10[0x16];
+ u8 mtmp[0x1];
+ u8 regs_8_to_0[0x9];
};
struct mlx5_ifc_mcam_access_reg_bits1 {
@@ -9138,7 +10287,9 @@ struct mlx5_ifc_mcam_access_reg_bits2 {
u8 mirc[0x1];
u8 regs_97_to_96[0x2];
- u8 regs_95_to_64[0x20];
+ u8 regs_95_to_87[0x09];
+ u8 synce_registers[0x2];
+ u8 regs_84_to_64[0x15];
u8 regs_63_to_32[0x20];
@@ -9260,25 +10411,31 @@ struct mlx5_ifc_pcmr_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
u8 reserved_at_10[0x10];
+
u8 entropy_force_cap[0x1];
u8 entropy_calc_cap[0x1];
u8 entropy_gre_calc_cap[0x1];
- u8 reserved_at_23[0x1b];
+ u8 reserved_at_23[0xf];
+ u8 rx_ts_over_crc_cap[0x1];
+ u8 reserved_at_33[0xb];
u8 fcs_cap[0x1];
u8 reserved_at_3f[0x1];
+
u8 entropy_force[0x1];
u8 entropy_calc[0x1];
u8 entropy_gre_calc[0x1];
- u8 reserved_at_43[0x1b];
+ u8 reserved_at_43[0xf];
+ u8 rx_ts_over_crc[0x1];
+ u8 reserved_at_53[0xb];
u8 fcs_chk[0x1];
u8 reserved_at_5f[0x1];
};
struct mlx5_ifc_lane_2_module_mapping_bits {
- u8 reserved_at_0[0x6];
- u8 rx_lane[0x2];
- u8 reserved_at_8[0x6];
- u8 tx_lane[0x2];
+ u8 reserved_at_0[0x4];
+ u8 rx_lane[0x4];
+ u8 reserved_at_8[0x4];
+ u8 tx_lane[0x4];
u8 reserved_at_10[0x8];
u8 module[0x8];
};
@@ -9287,8 +10444,8 @@ struct mlx5_ifc_bufferx_reg_bits {
u8 reserved_at_0[0x6];
u8 lossy[0x1];
u8 epsb[0x1];
- u8 reserved_at_8[0xc];
- u8 size[0xc];
+ u8 reserved_at_8[0x8];
+ u8 size[0x10];
u8 xoff_threshold[0x10];
u8 xon_threshold[0x10];
@@ -9508,6 +10665,7 @@ enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET = 0x7,
};
enum {
@@ -9528,6 +10686,7 @@ enum {
MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12,
};
struct mlx5_ifc_initial_seg_bits {
@@ -9580,7 +10739,12 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_18[0x4];
u8 cap_max_num_of_pps_out_pins[0x4];
- u8 reserved_at_20[0x24];
+ u8 reserved_at_20[0x13];
+ u8 cap_log_min_npps_period[0x5];
+ u8 reserved_at_38[0x3];
+ u8 cap_log_min_out_pulse_duration_ns[0x5];
+
+ u8 reserved_at_40[0x4];
u8 cap_pin_3_mode[0x4];
u8 reserved_at_48[0x4];
u8 cap_pin_2_mode[0x4];
@@ -9599,7 +10763,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 cap_pin_4_mode[0x4];
u8 field_select[0x20];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x20];
+
+ u8 npps_period[0x40];
u8 enable[0x1];
u8 reserved_at_101[0xb];
@@ -9608,7 +10774,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 pin_mode[0x4];
u8 pin[0x8];
- u8 reserved_at_120[0x20];
+ u8 reserved_at_120[0x2];
+ u8 out_pulse_duration_ns[0x1e];
u8 time_stamp[0x40];
@@ -9759,7 +10926,16 @@ struct mlx5_ifc_mcda_reg_bits {
u8 reserved_at_60[0x20];
- u8 data[0][0x20];
+ u8 data[][0x20];
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
+ MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
+ MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
+ MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT = 3,
+ MLX5_MFRL_REG_RESET_STATE_NACK = 4,
+ MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT = 5,
};
enum {
@@ -9780,7 +10956,8 @@ struct mlx5_ifc_mfrl_reg_bits {
u8 pci_sync_for_fw_update_start[0x1];
u8 pci_sync_for_fw_update_resp[0x2];
u8 rst_type_sel[0x3];
- u8 reserved_at_28[0x8];
+ u8 reserved_at_28[0x4];
+ u8 reset_state[0x4];
u8 reset_type[0x8];
u8 reset_level[0x8];
};
@@ -9792,6 +10969,98 @@ struct mlx5_ifc_mirc_reg_bits {
u8 reserved_at_20[0x20];
};
+struct mlx5_ifc_pddr_monitor_opcode_bits {
+ u8 reserved_at_0[0x10];
+ u8 monitor_opcode[0x10];
+};
+
+union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits {
+ struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
+ u8 reserved_at_0[0x20];
+};
+
+enum {
+ /* Monitor opcodes */
+ MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR = 0x0,
+};
+
+struct mlx5_ifc_pddr_troubleshooting_page_bits {
+ u8 reserved_at_0[0x10];
+ u8 group_opcode[0x10];
+
+ union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits status_opcode;
+
+ u8 reserved_at_40[0x20];
+
+ u8 status_message[59][0x20];
+};
+
+union mlx5_ifc_pddr_reg_page_data_auto_bits {
+ struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
+ u8 reserved_at_0[0x7c0];
+};
+
+enum {
+ MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE = 0x1,
+};
+
+struct mlx5_ifc_pddr_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_at_12[0xe];
+
+ u8 reserved_at_20[0x18];
+ u8 page_select[0x8];
+
+ union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
+};
+
+struct mlx5_ifc_mrtc_reg_bits {
+ u8 time_synced[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0x20];
+
+ u8 time_h[0x20];
+
+ u8 time_l[0x20];
+};
+
+struct mlx5_ifc_mtcap_reg_bits {
+ u8 reserved_at_0[0x19];
+ u8 sensor_count[0x7];
+
+ u8 reserved_at_20[0x20];
+
+ u8 sensor_map[0x40];
+};
+
+struct mlx5_ifc_mtmp_reg_bits {
+ u8 reserved_at_0[0x14];
+ u8 sensor_index[0xc];
+
+ u8 reserved_at_20[0x10];
+ u8 temperature[0x10];
+
+ u8 mte[0x1];
+ u8 mtr[0x1];
+ u8 reserved_at_42[0xe];
+ u8 max_temperature[0x10];
+
+ u8 tee[0x2];
+ u8 reserved_at_62[0xe];
+ u8 temp_threshold_hi[0x10];
+
+ u8 reserved_at_80[0x10];
+ u8 temp_threshold_lo[0x10];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sensor_name_hi[0x20];
+ u8 sensor_name_lo[0x20];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -9806,6 +11075,9 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pamp_reg_bits pamp_reg;
struct mlx5_ifc_paos_reg_bits paos_reg;
struct mlx5_ifc_pcap_reg_bits pcap_reg;
+ struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
+ struct mlx5_ifc_pddr_reg_bits pddr_reg;
+ struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
struct mlx5_ifc_peir_reg_bits peir_reg;
struct mlx5_ifc_pelc_reg_bits pelc_reg;
struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
@@ -9849,6 +11121,10 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcda_reg_bits mcda_reg;
struct mlx5_ifc_mirc_reg_bits mirc_reg;
struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
+ struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
+ struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
+ struct mlx5_ifc_mtcap_reg_bits mtcap_reg;
+ struct mlx5_ifc_mtmp_reg_bits mtmp_reg;
u8 reserved_at_0[0x60e0];
};
@@ -9885,14 +11161,19 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x7];
+ u8 table_of_other_vport[0x1];
+ u8 table_vport_number[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_at_c0[0x8];
u8 underlay_qpn[0x18];
- u8 reserved_at_e0[0x120];
+ u8 table_eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_e1[0xf];
+ u8 table_eswitch_owner_vhca_id[0x10];
+ u8 reserved_at_100[0x100];
};
enum {
@@ -10047,7 +11328,68 @@ struct mlx5_ifc_pbmc_reg_bits {
struct mlx5_ifc_bufferx_reg_bits buffer[10];
- u8 reserved_at_2e0[0x40];
+ u8 reserved_at_2e0[0x80];
+};
+
+struct mlx5_ifc_sbpr_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x4];
+ u8 dir[0x2];
+ u8 reserved_at_8[0x14];
+ u8 pool[0x4];
+
+ u8 infi_size[0x1];
+ u8 reserved_at_21[0x7];
+ u8 size[0x18];
+
+ u8 reserved_at_40[0x1c];
+ u8 mode[0x4];
+
+ u8 reserved_at_60[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_81[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 ext_buff_occupancy[0x18];
+};
+
+struct mlx5_ifc_sbcm_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x6];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 pg_buff[0x6];
+ u8 reserved_at_18[0x6];
+ u8 dir[0x2];
+
+ u8 reserved_at_20[0x1f];
+ u8 exc[0x1];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_a1[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_c0[0x8];
+ u8 min_buff[0x18];
+
+ u8 infi_max[0x1];
+ u8 reserved_at_e1[0x7];
+ u8 max_buff[0x18];
+
+ u8 reserved_at_100[0x20];
+
+ u8 reserved_at_120[0x1c];
+ u8 pool[0x4];
};
struct mlx5_ifc_qtct_reg_bits {
@@ -10121,11 +11463,22 @@ struct mlx5_ifc_dcbx_param_bits {
u8 reserved_at_a0[0x160];
};
+enum {
+ MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT = 1,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW = 2,
+};
+
struct mlx5_ifc_lagc_bits {
- u8 reserved_at_0[0x1d];
+ u8 fdb_selection_mode[0x1];
+ u8 reserved_at_1[0x14];
+ u8 port_select_mode[0x3];
+ u8 reserved_at_18[0x5];
u8 lag_state[0x3];
- u8 reserved_at_20[0x14];
+ u8 reserved_at_20[0xc];
+ u8 active_port[0x4];
+ u8 reserved_at_30[0x4];
u8 tx_remap_affinity_2[0x4];
u8 reserved_at_38[0x4];
u8 tx_remap_affinity_1[0x4];
@@ -10248,6 +11601,41 @@ struct mlx5_ifc_destroy_vport_lag_in_bits {
u8 reserved_at_40[0x40];
};
+enum {
+ MLX5_MODIFY_MEMIC_OP_MOD_ALLOC,
+ MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC,
+};
+
+struct mlx5_ifc_modify_memic_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x18];
+ u8 memic_operation_type[0x8];
+
+ u8 memic_start_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_modify_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 memic_operation_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
struct mlx5_ifc_alloc_memic_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -10301,33 +11689,11 @@ struct mlx5_ifc_dealloc_memic_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
-
- u8 vhca_tunnel_id[0x10];
- u8 obj_type[0x10];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
struct mlx5_ifc_umem_bits {
u8 reserved_at_0[0x80];
- u8 reserved_at_80[0x1b];
+ u8 ats[0x1];
+ u8 reserved_at_81[0x1a];
u8 log_page_size[0x5];
u8 page_offset[0x20];
@@ -10653,19 +12019,66 @@ struct mlx5_ifc_affiliated_event_header_bits {
};
enum {
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc),
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24),
};
enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
+ MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
+ MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
+ MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
};
enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
- MLX5_IPSEC_OBJECT_ICV_LEN_12B,
- MLX5_IPSEC_OBJECT_ICV_LEN_8B,
+};
+
+enum {
+ MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
+ MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
+ MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
+ MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
+};
+
+enum {
+ MLX5_IPSEC_ASO_MODE = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
+ MLX5_IPSEC_ASO_INC_SN = 0x2,
+};
+
+enum {
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+struct mlx5_ifc_ipsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_201[0x1];
+ u8 mode[0x2];
+ u8 window_sz[0x2];
+ u8 soft_lft_arm[0x1];
+ u8 hard_lft_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 esn_event_arm[0x1];
+ u8 reserved_at_20a[0x16];
+
+ u8 remove_flow_pkt_cnt[0x20];
+
+ u8 remove_flow_soft_lft[0x20];
+
+ u8 reserved_at_260[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[0x100];
};
struct mlx5_ifc_ipsec_obj_bits {
@@ -10689,7 +12102,11 @@ struct mlx5_ifc_ipsec_obj_bits {
u8 implicit_iv[0x40];
- u8 reserved_at_100[0x700];
+ u8 reserved_at_100[0x8];
+ u8 ipsec_aso_access_pd[0x18];
+ u8 reserved_at_120[0xe0];
+
+ struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
};
struct mlx5_ifc_create_ipsec_obj_in_bits {
@@ -10712,21 +12129,152 @@ struct mlx5_ifc_modify_ipsec_obj_in_bits {
struct mlx5_ifc_ipsec_obj_bits ipsec_object;
};
+enum {
+ MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1,
+};
+
+enum {
+ MLX5_MACSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_MACSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_MACSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_MACSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+#define MLX5_MACSEC_ASO_INC_SN 0x2
+#define MLX5_MACSEC_ASO_REG_C_4_5 0x2
+
+struct mlx5_ifc_macsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x1];
+ u8 mode[0x2];
+ u8 window_size[0x2];
+ u8 soft_lifetime_arm[0x1];
+ u8 hard_lifetime_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 epn_event_arm[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 remove_flow_packet_count[0x20];
+
+ u8 remove_flow_soft_lifetime[0x20];
+
+ u8 reserved_at_60[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[8][0x20];
+};
+
+struct mlx5_ifc_macsec_offload_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 confidentiality_en[0x1];
+ u8 reserved_at_41[0x1];
+ u8 epn_en[0x1];
+ u8 epn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 confidentiality_offset[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 epn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sci[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 macsec_aso_access_pd[0x18];
+
+ u8 reserved_at_120[0x60];
+
+ u8 salt[3][0x20];
+
+ u8 reserved_at_1e0[0x20];
+
+ struct mlx5_ifc_macsec_aso_bits macsec_aso;
+};
+
+struct mlx5_ifc_create_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_modify_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+enum {
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP = BIT(0),
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB = BIT(1),
+};
+
+struct mlx5_ifc_query_macsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_wrapped_dek_bits {
+ u8 gcm_iv[0x60];
+
+ u8 reserved_at_60[0x20];
+
+ u8 const0[0x1];
+ u8 key_size[0x1];
+ u8 reserved_at_82[0x2];
+ u8 key2_invalid[0x1];
+ u8 reserved_at_85[0x3];
+ u8 pd[0x18];
+
+ u8 key_purpose[0x5];
+ u8 reserved_at_a5[0x13];
+ u8 kek_id[0x8];
+
+ u8 reserved_at_c0[0x40];
+
+ u8 key1[0x8][0x20];
+
+ u8 key2[0x8][0x20];
+
+ u8 reserved_at_300[0x40];
+
+ u8 const1[0x1];
+ u8 reserved_at_341[0x1f];
+
+ u8 reserved_at_360[0x20];
+
+ u8 auth_tag[0x80];
+};
+
struct mlx5_ifc_encryption_key_obj_bits {
u8 modify_field_select[0x40];
- u8 reserved_at_40[0x14];
+ u8 state[0x8];
+ u8 sw_wrapped[0x1];
+ u8 reserved_at_49[0xb];
u8 key_size[0x4];
u8 reserved_at_58[0x4];
- u8 key_type[0x4];
+ u8 key_purpose[0x4];
u8 reserved_at_60[0x8];
u8 pd[0x18];
- u8 reserved_at_80[0x180];
- u8 key[8][0x20];
+ u8 reserved_at_80[0x100];
+
+ u8 opaque[0x40];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 key[8][0x80];
- u8 reserved_at_300[0x500];
+ u8 sw_wrapped_dek[8][0x80];
+
+ u8 reserved_at_a00[0x600];
};
struct mlx5_ifc_create_encryption_key_in_bits {
@@ -10734,14 +12282,135 @@ struct mlx5_ifc_create_encryption_key_in_bits {
struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
};
+struct mlx5_ifc_modify_encryption_key_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
+};
+
+enum {
+ MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2_IPG = 0x2,
+ MLX5_FLOW_METER_MODE_NUM_PACKETS = 0x3,
+};
+
+struct mlx5_ifc_flow_meter_parameters_bits {
+ u8 valid[0x1];
+ u8 bucket_overflow[0x1];
+ u8 start_color[0x2];
+ u8 both_buckets_on_green[0x1];
+ u8 reserved_at_5[0x1];
+ u8 meter_mode[0x2];
+ u8 reserved_at_8[0x18];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x3];
+ u8 cbs_exponent[0x5];
+ u8 cbs_mantissa[0x8];
+ u8 reserved_at_50[0x3];
+ u8 cir_exponent[0x5];
+ u8 cir_mantissa[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ u8 reserved_at_80[0x3];
+ u8 ebs_exponent[0x5];
+ u8 ebs_mantissa[0x8];
+ u8 reserved_at_90[0x3];
+ u8 eir_exponent[0x5];
+ u8 eir_mantissa[0x8];
+
+ u8 reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_flow_meter_aso_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 meter_aso_access_pd[0x18];
+
+ u8 reserved_at_a0[0x160];
+
+ struct mlx5_ifc_flow_meter_parameters_bits flow_meter_parameters[2];
+};
+
+struct mlx5_ifc_create_flow_meter_aso_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj;
+};
+
+struct mlx5_ifc_int_kek_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 state[0x8];
+ u8 auto_gen[0x1];
+ u8 reserved_at_49[0xb];
+ u8 key_size[0x4];
+ u8 reserved_at_58[0x8];
+
+ u8 reserved_at_60[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_80[0x180];
+ u8 key[8][0x80];
+
+ u8 reserved_at_600[0x200];
+};
+
+struct mlx5_ifc_create_int_kek_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
+struct mlx5_ifc_create_int_kek_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
+struct mlx5_ifc_sampler_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 table_type[0x8];
+ u8 level[0x8];
+ u8 reserved_at_50[0xf];
+ u8 ignore_flow_level[0x1];
+
+ u8 sample_ratio[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 sample_table_id[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 default_table_id[0x18];
+
+ u8 sw_steering_icm_address_rx[0x40];
+ u8 sw_steering_icm_address_tx[0x40];
+
+ u8 reserved_at_140[0xa0];
+};
+
+struct mlx5_ifc_create_sampler_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
+struct mlx5_ifc_query_sampler_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
};
enum {
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
};
struct mlx5_ifc_tls_static_params_bits {
@@ -10784,4 +12453,296 @@ enum {
MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE,
};
+enum {
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR = 0x0,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER = 0x1,
+};
+
+struct mlx5_ifc_suspend_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_suspend_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER = 0x0,
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR = 0x1,
+};
+
+struct mlx5_ifc_resume_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_resume_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 incremental[0x1];
+ u8 chunk[0x1];
+ u8 reserved_at_42[0xe];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 required_umem_size[0x20];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 remaining_total_size[0x40];
+
+ u8 reserved_at_100[0x100];
+};
+
+struct mlx5_ifc_save_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 incremental[0x1];
+ u8 set_track[0x1];
+ u8 reserved_at_42[0xe];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_save_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 actual_image_size[0x20];
+
+ u8 next_required_umem_size[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_adv_virtualization_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 pg_track_log_max_num[0x5];
+ u8 pg_track_max_num_range[0x8];
+ u8 pg_track_log_min_addr_space[0x8];
+ u8 pg_track_log_max_addr_space[0x8];
+
+ u8 reserved_at_20[0x3];
+ u8 pg_track_log_min_msg_size[0x5];
+ u8 reserved_at_28[0x3];
+ u8 pg_track_log_max_msg_size[0x5];
+ u8 reserved_at_30[0x3];
+ u8 pg_track_log_min_page_size[0x5];
+ u8 reserved_at_38[0x3];
+ u8 pg_track_log_max_page_size[0x5];
+
+ u8 reserved_at_40[0x7c0];
+};
+
+struct mlx5_ifc_page_track_report_entry_bits {
+ u8 dirty_address_high[0x20];
+
+ u8 dirty_address_low[0x20];
+};
+
+enum {
+ MLX5_PAGE_TRACK_STATE_TRACKING,
+ MLX5_PAGE_TRACK_STATE_REPORTING,
+ MLX5_PAGE_TRACK_STATE_ERROR,
+};
+
+struct mlx5_ifc_page_track_range_bits {
+ u8 start_address[0x40];
+
+ u8 length[0x40];
+};
+
+struct mlx5_ifc_page_track_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 state[0x4];
+ u8 track_type[0x4];
+ u8 log_addr_space_size[0x8];
+ u8 reserved_at_90[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_at_98[0x3];
+ u8 log_msg_size[0x5];
+
+ u8 reserved_at_a0[0x8];
+ u8 reporting_qpn[0x18];
+
+ u8 reserved_at_c0[0x18];
+ u8 num_ranges[0x8];
+
+ u8 reserved_at_e0[0x20];
+
+ u8 range_start_address[0x40];
+
+ u8 length[0x40];
+
+ struct mlx5_ifc_page_track_range_bits track_range[0];
+};
+
+struct mlx5_ifc_create_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_modify_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_query_page_track_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_msecq_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x12];
+ u8 network_option[0x2];
+ u8 local_ssm_code[0x4];
+ u8 local_enhanced_ssm_code[0x8];
+
+ u8 local_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
+enum {
+ MLX5_MSEES_FIELD_SELECT_ENABLE = BIT(0),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS = BIT(1),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE = BIT(2),
+};
+
+enum mlx5_msees_admin_status {
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_ADMIN_STATUS_TRACK = 0x1,
+};
+
+enum mlx5_msees_oper_status {
+ MLX5_MSEES_OPER_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_OPER_STATUS_SELF_TRACK = 0x1,
+ MLX5_MSEES_OPER_STATUS_OTHER_TRACK = 0x2,
+ MLX5_MSEES_OPER_STATUS_HOLDOVER = 0x3,
+ MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER = 0x4,
+ MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5,
+};
+
+enum mlx5_msees_failure_reason {
+ MLX5_MSEES_FAILURE_REASON_UNDEFINED_ERROR = 0x0,
+ MLX5_MSEES_FAILURE_REASON_PORT_DOWN = 0x1,
+ MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF = 0x2,
+ MLX5_MSEES_FAILURE_REASON_NET_SYNCHRONIZER_DEVICE_ERROR = 0x3,
+ MLX5_MSEES_FAILURE_REASON_LACK_OF_RESOURCES = 0x4,
+};
+
+struct mlx5_ifc_msees_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 lp_msb[0x2];
+ u8 reserved_at_14[0xc];
+
+ u8 field_select[0x20];
+
+ u8 admin_status[0x4];
+ u8 oper_status[0x4];
+ u8 ho_acq[0x1];
+ u8 reserved_at_49[0xc];
+ u8 admin_freq_measure[0x1];
+ u8 oper_freq_measure[0x1];
+ u8 failure_reason[0x9];
+
+ u8 frequency_diff[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 07d77323f78a..0596472923ad 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -32,31 +32,6 @@
#ifndef MLX5_IFC_FPGA_H
#define MLX5_IFC_FPGA_H
-struct mlx5_ifc_ipv4_layout_bits {
- u8 reserved_at_0[0x60];
-
- u8 ipv4[0x20];
-};
-
-struct mlx5_ifc_ipv6_layout_bits {
- u8 ipv6[16][0x8];
-};
-
-union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
- struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
- struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
- u8 reserved_at_0[0x80];
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9,
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3,
-};
-
struct mlx5_ifc_fpga_shell_caps_bits {
u8 max_num_qps[0x10];
u8 reserved_at_10[0x8];
@@ -387,89 +362,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_tls_extended_cap_bits {
- u8 aes_gcm_128[0x1];
- u8 aes_gcm_256[0x1];
- u8 reserved_at_2[0x1e];
- u8 reserved_at_20[0x20];
- u8 context_capacity_total[0x20];
- u8 context_capacity_rx[0x20];
- u8 context_capacity_tx[0x20];
- u8 reserved_at_a0[0x10];
- u8 tls_counter_size[0x10];
- u8 tls_counters_addr_low[0x20];
- u8 tls_counters_addr_high[0x20];
- u8 rx[0x1];
- u8 tx[0x1];
- u8 tls_v12[0x1];
- u8 tls_v13[0x1];
- u8 lro[0x1];
- u8 ipv6[0x1];
- u8 reserved_at_106[0x1a];
-};
-
-struct mlx5_ifc_ipsec_extended_cap_bits {
- u8 encapsulation[0x20];
-
- u8 reserved_0[0x12];
- u8 v2_command[0x1];
- u8 udp_encap[0x1];
- u8 rx_no_trailer[0x1];
- u8 ipv4_fragment[0x1];
- u8 ipv6[0x1];
- u8 esn[0x1];
- u8 lso[0x1];
- u8 transport_and_tunnel_mode[0x1];
- u8 tunnel_mode[0x1];
- u8 transport_mode[0x1];
- u8 ah_esp[0x1];
- u8 esp[0x1];
- u8 ah[0x1];
- u8 ipv4_options[0x1];
-
- u8 auth_alg[0x20];
-
- u8 enc_alg[0x20];
-
- u8 sa_cap[0x20];
-
- u8 reserved_1[0x10];
- u8 number_of_ipsec_counters[0x10];
-
- u8 ipsec_counters_addr_low[0x20];
- u8 ipsec_counters_addr_high[0x20];
-};
-
-struct mlx5_ifc_ipsec_counters_bits {
- u8 dec_in_packets[0x40];
-
- u8 dec_out_packets[0x40];
-
- u8 dec_bypass_packets[0x40];
-
- u8 enc_in_packets[0x40];
-
- u8 enc_out_packets[0x40];
-
- u8 enc_bypass_packets[0x40];
-
- u8 drop_dec_packets[0x40];
-
- u8 failed_auth_dec_packets[0x40];
-
- u8 drop_enc_packets[0x40];
-
- u8 success_add_sa[0x40];
-
- u8 fail_add_sa[0x40];
-
- u8 success_delete_sa[0x40];
-
- u8 fail_delete_sa[0x40];
-
- u8 dropped_cmd[0x40];
-};
-
enum {
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1,
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2,
@@ -486,131 +378,4 @@ struct mlx5_ifc_fpga_qp_error_event_bits {
u8 reserved_at_c0[0x8];
u8 fpga_qpn[0x18];
};
-enum mlx5_ifc_fpga_ipsec_response_syndrome {
- MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
- MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
- MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2,
- MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
-};
-
-struct mlx5_ifc_fpga_ipsec_cmd_resp {
- __be32 syndrome;
- union {
- __be32 sw_sa_handle;
- __be32 flags;
- };
- u8 reserved[24];
-} __packed;
-
-enum mlx5_ifc_fpga_ipsec_cmd_opcode {
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
- MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
- MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
-};
-
-enum mlx5_ifc_fpga_ipsec_cap {
- MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
-};
-
-struct mlx5_ifc_fpga_ipsec_cmd_cap {
- __be32 cmd;
- __be32 flags;
- u8 reserved[24];
-} __packed;
-
-enum mlx5_ifc_fpga_ipsec_sa_flags {
- MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
- MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
- MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
- MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
- MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
- MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
- MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
-};
-
-enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
- MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
-};
-
-struct mlx5_ifc_fpga_ipsec_sa_v1 {
- __be32 cmd;
- u8 key_enc[32];
- u8 key_auth[32];
- __be32 sip[4];
- __be32 dip[4];
- union {
- struct {
- __be32 reserved;
- u8 salt_iv[8];
- __be32 salt;
- } __packed gcm;
- struct {
- u8 salt[16];
- } __packed cbc;
- };
- __be32 spi;
- __be32 sw_sa_handle;
- __be16 tfclen;
- u8 enc_mode;
- u8 reserved1[2];
- u8 flags;
- u8 reserved2[2];
-};
-
-struct mlx5_ifc_fpga_ipsec_sa {
- struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
- __be16 udp_sp;
- __be16 udp_dp;
- u8 reserved1[4];
- __be32 esn;
- __be16 vid; /* only 12 bits, rest is reserved */
- __be16 reserved2;
-} __packed;
-
-enum fpga_tls_cmds {
- CMD_SETUP_STREAM = 0x1001,
- CMD_TEARDOWN_STREAM = 0x1002,
- CMD_RESYNC_RX = 0x1003,
-};
-
-#define MLX5_TLS_1_2 (0)
-
-#define MLX5_TLS_ALG_AES_GCM_128 (0)
-#define MLX5_TLS_ALG_AES_GCM_256 (1)
-
-struct mlx5_ifc_tls_cmd_bits {
- u8 command_type[0x20];
- u8 ipv6[0x1];
- u8 direction_sx[0x1];
- u8 tls_version[0x2];
- u8 reserved[0x1c];
- u8 swid[0x20];
- u8 src_port[0x10];
- u8 dst_port[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
- u8 tls_rcd_sn[0x40];
- u8 tcp_sn[0x20];
- u8 tls_implicit_iv[0x20];
- u8 tls_xor_iv[0x40];
- u8 encryption_key[0x100];
- u8 alg[4];
- u8 reserved2[0x1c];
- u8 reserved3[0x4a0];
-};
-
-struct mlx5_ifc_tls_resp_bits {
- u8 syndrome[0x20];
- u8 stream_id[0x20];
- u8 reserved[0x40];
-};
-
-#define MLX5_TLS_COMMAND_SIZE (0x100)
-
#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
new file mode 100644
index 000000000000..40371c916cf9
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_IFC_VDPA_H_
+#define __MLX5_IFC_VDPA_H_
+
+enum {
+ MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0,
+ MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1,
+ MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
+};
+
+struct mlx5_ifc_virtio_q_bits {
+ u8 virtio_q_type[0x8];
+ u8 reserved_at_8[0x5];
+ u8 event_mode[0x3];
+ u8 queue_index[0x10];
+
+ u8 full_emulation[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 reserved_at_22[0x2];
+ u8 offload_type[0x4];
+ u8 event_qpn_or_msix[0x18];
+
+ u8 doorbell_stride_index[0x10];
+ u8 queue_size[0x10];
+
+ u8 device_emulation_id[0x20];
+
+ u8 desc_addr[0x40];
+
+ u8 used_addr[0x40];
+
+ u8 available_addr[0x40];
+
+ u8 virtio_q_mkey[0x20];
+
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_170[0x8];
+ u8 error_type[0x8];
+
+ u8 umem_1_id[0x20];
+
+ u8 umem_1_size[0x20];
+
+ u8 umem_1_offset[0x40];
+
+ u8 umem_2_id[0x20];
+
+ u8 umem_2_size[0x20];
+
+ u8 umem_2_offset[0x40];
+
+ u8 umem_3_id[0x20];
+
+ u8 umem_3_size[0x20];
+
+ u8 umem_3_offset[0x40];
+
+ u8 counter_set_id[0x20];
+
+ u8 reserved_at_320[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_340[0x20];
+
+ u8 desc_group_mkey[0x20];
+
+ u8 reserved_at_380[0x80];
+};
+
+struct mlx5_ifc_virtio_net_q_object_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x20];
+
+ u8 vhca_id[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 queue_feature_bit_mask_12_3[0xa];
+ u8 dirty_bitmap_dump_enable[0x1];
+ u8 vhost_log_page[0x5];
+ u8 reserved_at_90[0xc];
+ u8 state[0x4];
+
+ u8 reserved_at_a0[0x5];
+ u8 queue_feature_bit_mask_2_0[0x3];
+ u8 tisn_or_qpn[0x18];
+
+ u8 dirty_bitmap_mkey[0x20];
+
+ u8 dirty_bitmap_size[0x20];
+
+ u8 dirty_bitmap_addr[0x40];
+
+ u8 hw_available_index[0x10];
+ u8 hw_used_index[0x10];
+
+ u8 reserved_at_160[0xa0];
+
+ struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+enum {
+ MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY = (u64)1 << 11,
+ MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14,
+};
+
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
+};
+
+/* This indicates that the object was not created or has already
+ * been desroyed. It is very safe to assume that this object will never
+ * have so many states
+ */
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_NONE = 0xffffffff
+};
+
+enum {
+ MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
+ MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
+};
+
+struct mlx5_ifc_modify_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_modify_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_virtio_q_counters_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 received_desc[0x40];
+ u8 completed_desc[0x40];
+ u8 error_cqes[0x20];
+ u8 bad_desc_errors[0x20];
+ u8 exceed_max_chain[0x20];
+ u8 invalid_buffer[0x20];
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_create_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_create_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits counters;
+};
+
+#endif /* __MLX5_IFC_VDPA_H_ */
diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h
new file mode 100644
index 000000000000..bf700c8d5516
--- /dev/null
+++ b/include/linux/mlx5/mpfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2021 Mellanox Technologies Ltd.
+ */
+
+#ifndef _MLX5_MPFS_
+#define _MLX5_MPFS_
+
+struct mlx5_core_dev;
+
+#ifdef CONFIG_MLX5_MPFS
+int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+#else /* #ifndef CONFIG_MLX5_MPFS */
+static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 2d45a6af52a4..26092c78a985 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -45,6 +45,7 @@ enum mlx5_module_id {
MLX5_MODULE_ID_QSFP = 0xC,
MLX5_MODULE_ID_QSFP_PLUS = 0xD,
MLX5_MODULE_ID_QSFP28 = 0x11,
+ MLX5_MODULE_ID_DSFP = 0x1B,
};
enum mlx5_an_status {
@@ -55,13 +56,20 @@ enum mlx5_an_status {
MLX5_AN_LINK_DOWN = 4,
};
-#define MLX5_EEPROM_MAX_BYTES 32
-#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
#define MLX5_I2C_ADDR_LOW 0x50
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128
+struct mlx5_module_eeprom_query_params {
+ u16 size;
+ u16 offset;
+ u16 i2c_address;
+ u32 page;
+ u32 bank;
+ u32 module_number;
+};
+
enum mlx5e_link_mode {
MLX5E_1000BASE_CX_SGMII = 0,
MLX5E_1000BASE_KX = 1,
@@ -107,8 +115,9 @@ enum mlx5e_ext_link_mode {
MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
- MLX5E_400GAUI_8 = 15,
+ MLX5E_400GAUI_8_400GBASE_CR8 = 15,
MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
+ MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
MLX5E_EXT_LINK_MODES_NUMBER,
};
@@ -125,7 +134,21 @@ enum mlx5e_connector_type {
MLX5E_CONNECTOR_TYPE_NUMBER,
};
-#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+enum mlx5_ptys_width {
+ MLX5_PTYS_WIDTH_1X = 1 << 0,
+ MLX5_PTYS_WIDTH_2X = 1 << 1,
+ MLX5_PTYS_WIDTH_4X = 1 << 2,
+ MLX5_PTYS_WIDTH_8X = 1 << 3,
+ MLX5_PTYS_WIDTH_12X = 1 << 4,
+};
+
+struct mlx5_port_eth_proto {
+ u32 cap;
+ u32 admin;
+ u32 oper;
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
(ext ? MLX5_GET(reg, out, ext_##field) : \
MLX5_GET(reg, out, field))
@@ -133,10 +156,9 @@ enum mlx5e_connector_type {
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
- u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, u8 local_port);
+
+int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
+ u16 *proto_oper, u8 local_port);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
@@ -193,6 +215,8 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
bool *enabled);
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data);
+int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params, u8 *data);
int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
@@ -201,4 +225,14 @@ int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5_port_eth_proto *eproto);
+bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
+ bool force_legacy);
+u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
+ bool force_legacy);
+int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 36492a1342cf..f0e55bf3ec8b 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -36,7 +36,7 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/driver.h>
-#define MLX5_INVALID_LKEY 0x100
+#define MLX5_TERMINATE_SCATTER_LIST_LKEY cpu_to_be32(0x100)
/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8)
#define MLX5_DIF_SIZE 8
@@ -162,6 +162,8 @@ enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16,
};
+#define MLX5_SEND_WQE_MAX_SIZE (MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQE_BB)
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -202,6 +204,9 @@ struct mlx5_wqe_fmr_seg {
struct mlx5_wqe_ctrl_seg {
__be32 opmod_idx_opcode;
__be32 qpn_ds;
+
+ struct_group(trailer,
+
u8 signature;
u8 rsvd[2];
u8 fm_ce_se;
@@ -211,6 +216,8 @@ struct mlx5_wqe_ctrl_seg {
__be32 umr_mkey;
__be32 tis_tir_num;
};
+
+ ); /* end of trailer group */
};
#define MLX5_WQE_CTRL_DS_MASK 0x3f
@@ -245,6 +252,11 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
};
+enum {
+ MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
+ MLX5_ETH_WQE_FT_META_MACSEC = BIT(1),
+};
+
struct mlx5_wqe_eth_seg {
u8 swp_outer_l4_offset;
u8 swp_outer_l3_offset;
@@ -253,11 +265,14 @@ struct mlx5_wqe_eth_seg {
u8 cs_flags;
u8 swp_flags;
__be16 mss;
- __be32 rsvd2;
+ __be32 flow_table_metadata;
union {
struct {
__be16 sz;
- u8 start[2];
+ union {
+ u8 start[2];
+ DECLARE_FLEX_ARRAY(u8, data);
+ };
} inline_hdr;
struct {
__be16 type;
@@ -466,6 +481,12 @@ struct mlx5_klm {
__be64 va;
};
+struct mlx5_ksm {
+ __be32 reserved;
+ __be32 key;
+ __be64 va;
+};
+
struct mlx5_stride_block_entry {
__be16 stride;
__be16 bcount;
@@ -481,6 +502,16 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
+struct mlx5_wqe_flow_update_ctrl_seg {
+ __be32 flow_idx_update;
+ __be32 dest_handle;
+ u8 reserved0[40];
+};
+
+struct mlx5_wqe_header_modify_argument_update_seg {
+ u8 argument_list[64];
+};
+
struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
@@ -543,4 +574,11 @@ static inline const char *mlx5_qp_state_str(int state)
}
}
+static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
+{
+ return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_TIMESTAMP_FORMAT_DEFAULT;
+}
+
#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 028f442530cf..60ffeb6b67ae 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_hairpin_params *params);
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 4db87bcfce7b..c36cc6d82926 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -36,14 +36,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
-#define MLX5_VPORT_PF_PLACEHOLDER (1u)
-#define MLX5_VPORT_UPLINK_PLACEHOLDER (1u)
-#define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev))
-
-#define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \
- MLX5_VPORT_UPLINK_PLACEHOLDER + \
- MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
-
#define MLX5_VPORT_MANAGER(mdev) \
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
@@ -80,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u16 vport, u64 node_guid);
@@ -140,4 +133,6 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
+ u16 opmod);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h
deleted file mode 100644
index 9c4bedc95504..000000000000
--- a/include/linux/mm-arch-hooks.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Generic mm no-op hooks.
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- */
-#ifndef _LINUX_MM_ARCH_HOOKS_H
-#define _LINUX_MM_ARCH_HOOKS_H
-
-#include <asm/mm-arch-hooks.h>
-
-#ifndef arch_remap
-static inline void arch_remap(struct mm_struct *mm,
- unsigned long old_start, unsigned long old_end,
- unsigned long new_start, unsigned long new_end)
-{
-}
-#define arch_remap arch_remap
-#endif
-
-#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ca6e6a81576b..2c0910bc3e4a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3,9 +3,6 @@
#define _LINUX_MM_H
#include <linux/errno.h>
-
-#ifdef __KERNEL__
-
#include <linux/mmdebug.h>
#include <linux/gfp.h>
#include <linux/bug.h>
@@ -26,24 +23,27 @@
#include <linux/err.h>
#include <linux/page-flags.h>
#include <linux/page_ref.h>
-#include <linux/memremap.h>
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/sched.h>
#include <linux/pgtable.h>
+#include <linux/kasan.h>
+#include <linux/memremap.h>
+#include <linux/slab.h>
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
-struct file_ra_state;
struct user_struct;
-struct writeback_control;
-struct bdi_writeback;
struct pt_regs;
+struct folio_batch;
+extern int sysctl_page_lock_unfairness;
+
+void mm_core_init(void);
void init_mm_internals(void);
-#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
+#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
static inline void set_max_mapnr(unsigned long limit)
@@ -77,6 +77,7 @@ static inline void totalram_pages_add(long count)
extern void * high_memory;
extern int page_cluster;
+extern const int page_cluster_max;
#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
@@ -98,17 +99,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#include <asm/page.h>
#include <asm/processor.h>
-/*
- * Architectures that support memory tagging (assigning tags to memory regions,
- * embedding these tags into addresses that point to these memory regions, and
- * checking that the memory and the pointer tags match on memory accesses)
- * redefine this macro to strip tags from pointers.
- * It's defined as noop for arcitectures that don't support memory tagging.
- */
-#ifndef untagged_addr
-#define untagged_addr(addr) (addr)
-#endif
-
#ifndef __pa_symbol
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
@@ -139,10 +129,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
* define their own version of this macro in <asm/pgtable.h>
*/
#if BITS_PER_LONG == 64
-/* This function must be updated when the size of struct page grows above 80
+/* This function must be updated when the size of struct page grows above 96
* or reduces below 56. The idea that compiler optimizes out switch()
* statement, and only leaves move/store instructions. Also the compiler can
- * combine write statments if they are both assignments and can be reordered,
+ * combine write statements if they are both assignments and can be reordered,
* this can result in several of the writes here being dropped.
*/
#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
@@ -150,12 +140,18 @@ static inline void __mm_zero_struct_page(struct page *page)
{
unsigned long *_pp = (void *)page;
- /* Check that struct page is either 56, 64, 72, or 80 bytes */
+ /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
BUILD_BUG_ON(sizeof(struct page) & 7);
BUILD_BUG_ON(sizeof(struct page) < 56);
- BUILD_BUG_ON(sizeof(struct page) > 80);
+ BUILD_BUG_ON(sizeof(struct page) > 96);
switch (sizeof(struct page)) {
+ case 96:
+ _pp[11] = 0;
+ fallthrough;
+ case 88:
+ _pp[10] = 0;
+ fallthrough;
case 80:
_pp[9] = 0;
fallthrough;
@@ -214,15 +210,30 @@ int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+#define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio))
+#else
+#define nth_page(page,n) ((page) + (n))
+#define folio_page_idx(folio, p) ((p) - &(folio)->page)
+#endif
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+/* to align the pointer to the (prev) page boundary */
+#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
+
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
-#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+static inline struct folio *lru_to_folio(struct list_head *head)
+{
+ return list_entry((head)->prev, struct folio, lru);
+}
+
+void setup_initial_init_mm(void *start_code, void *end_code,
+ void *end_data, void *brk);
/*
* Linux kernel virtual memory manager primitives.
@@ -236,6 +247,8 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *);
+/* Use only if VMA has no other users */
+void __vm_area_free(struct vm_area_struct *vma);
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
@@ -262,9 +275,13 @@ extern unsigned int kobjsize(const void *objp);
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#ifdef CONFIG_MMU
#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
+#else /* CONFIG_MMU */
+#define VM_MAYOVERLAY 0x00000200 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
+#define VM_UFFD_MISSING 0
+#endif /* CONFIG_MMU */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
-#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
#define VM_LOCKED 0x00002000
@@ -302,11 +319,13 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
+#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#ifdef CONFIG_ARCH_HAS_PKEYS
@@ -322,14 +341,27 @@ extern unsigned int kobjsize(const void *objp);
#endif
#endif /* CONFIG_ARCH_HAS_PKEYS */
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+/*
+ * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
+ * support core mm.
+ *
+ * These VMAs will get a single end guard page. This helps userspace protect
+ * itself from attacks. A single page is enough for current shadow stack archs
+ * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
+ * for more details on the guard size.
+ */
+# define VM_SHADOW_STACK VM_HIGH_ARCH_5
+#else
+# define VM_SHADOW_STACK VM_NONE
+#endif
+
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
-#elif defined(CONFIG_IA64)
-# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_SPARC64)
# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
# define VM_ARCH_CLEAR VM_SPARC_ADI
@@ -340,12 +372,41 @@ extern unsigned int kobjsize(const void *objp);
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
+#if defined(CONFIG_ARM64_MTE)
+# define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */
+# define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */
+#else
+# define VM_MTE VM_NONE
+# define VM_MTE_ALLOWED VM_NONE
+#endif
+
#ifndef VM_GROWSUP
# define VM_GROWSUP VM_NONE
#endif
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
+# define VM_UFFD_MINOR_BIT 38
+# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
+#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+# define VM_UFFD_MINOR VM_NONE
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+
+/*
+ * This flag is used to connect VFIO to arch specific KVM code. It
+ * indicates that the memory under this VMA is safe for use with any
+ * non-cachable memory type inside KVM. Some VFIO devices, on some
+ * platforms, are thought to be unsafe and can cause machine crashes
+ * if KVM does not lock down the memory type.
+ */
+#ifdef CONFIG_64BIT
+#define VM_ALLOW_ANY_UNCACHED_BIT 39
+#define VM_ALLOW_ANY_UNCACHED BIT(VM_ALLOW_ANY_UNCACHED_BIT)
+#else
+#define VM_ALLOW_ANY_UNCACHED VM_NONE
+#endif
+
/* Bits set in the VMA until the stack is in its final location */
-#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
+#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
@@ -365,10 +426,14 @@ extern unsigned int kobjsize(const void *objp);
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
+#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
+
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK VM_GROWSUP
+#define VM_STACK_EARLY VM_GROWSDOWN
#else
#define VM_STACK VM_GROWSDOWN
+#define VM_STACK_EARLY 0
#endif
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
@@ -388,8 +453,8 @@ extern unsigned int kobjsize(const void *objp);
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
-/* This mask is used to clear all the VMA flags used by mlock */
-#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
+/* This mask represents all the VMA flag bits used by mlock */
+#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
/* Arch-specific flags to clear when updating VM flags on protection change */
#ifndef VM_ARCH_CLEAR
@@ -401,51 +466,6 @@ extern unsigned int kobjsize(const void *objp);
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
-extern pgprot_t protection_map[16];
-
-/**
- * Fault flag definitions.
- *
- * @FAULT_FLAG_WRITE: Fault was a write fault.
- * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
- * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
- * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
- * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
- * @FAULT_FLAG_TRIED: The fault has been tried once.
- * @FAULT_FLAG_USER: The fault originated in userspace.
- * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
- * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
- * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
- *
- * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
- * whether we would allow page faults to retry by specifying these two
- * fault flags correctly. Currently there can be three legal combinations:
- *
- * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
- * this is the first try
- *
- * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
- * we've already tried at least once
- *
- * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
- *
- * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
- * be used. Note that page faults can be allowed to retry for multiple times,
- * in which case we'll have an initial fault with flags (a) then later on
- * continuous faults with flags (b). We should always try to detect pending
- * signals before a retry to make sure the continuous page faults can still be
- * interrupted if necessary.
- */
-#define FAULT_FLAG_WRITE 0x01
-#define FAULT_FLAG_MKWRITE 0x02
-#define FAULT_FLAG_ALLOW_RETRY 0x04
-#define FAULT_FLAG_RETRY_NOWAIT 0x08
-#define FAULT_FLAG_KILLABLE 0x10
-#define FAULT_FLAG_TRIED 0x20
-#define FAULT_FLAG_USER 0x40
-#define FAULT_FLAG_REMOTE 0x80
-#define FAULT_FLAG_INSTRUCTION 0x100
-#define FAULT_FLAG_INTERRUPTIBLE 0x200
/*
* The default fault flags that should be used by most of the
@@ -457,6 +477,7 @@ extern pgprot_t protection_map[16];
/**
* fault_flag_allow_retry_first - check ALLOW_RETRY the first time
+ * @flags: Fault flags.
*
* This is mostly used for places where we want to try to avoid taking
* the mmap_lock for too long a time when waiting for another condition
@@ -467,7 +488,7 @@ extern pgprot_t protection_map[16];
* Return: true if the page fault allows retry and this is the first
* attempt of the fault handling; false otherwise.
*/
-static inline bool fault_flag_allow_retry_first(unsigned int flags)
+static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
{
return (flags & FAULT_FLAG_ALLOW_RETRY) &&
(!(flags & FAULT_FLAG_TRIED));
@@ -483,7 +504,8 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags)
{ FAULT_FLAG_USER, "USER" }, \
{ FAULT_FLAG_REMOTE, "REMOTE" }, \
{ FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
- { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }
+ { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }, \
+ { FAULT_FLAG_VMA_LOCK, "VMA_LOCK" }
/*
* vm_fault is filled by the pagefault handler and passed to the vma's
@@ -496,17 +518,26 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags)
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
- struct vm_area_struct *vma; /* Target VMA */
- unsigned int flags; /* FAULT_FLAG_xxx flags */
- gfp_t gfp_mask; /* gfp mask to be used for allocations */
- pgoff_t pgoff; /* Logical page offset based on vma */
- unsigned long address; /* Faulting virtual address */
+ const struct {
+ struct vm_area_struct *vma; /* Target VMA */
+ gfp_t gfp_mask; /* gfp mask to be used for allocations */
+ pgoff_t pgoff; /* Logical page offset based on vma */
+ unsigned long address; /* Faulting virtual address - masked */
+ unsigned long real_address; /* Faulting virtual address - unmasked */
+ };
+ enum fault_flag flags; /* FAULT_FLAG_xxx flags
+ * XXX: should really be 'const' */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' */
pud_t *pud; /* Pointer to pud entry matching
* the 'address'
*/
- pte_t orig_pte; /* Value of PTE at the time of fault */
+ union {
+ pte_t orig_pte; /* Value of PTE at the time of fault */
+ pmd_t orig_pmd; /* Value of PMD at the time of fault,
+ * used by PMD fault only.
+ */
+ };
struct page *cow_page; /* Page handler may use for COW fault */
struct page *page; /* ->fault handlers should return a
@@ -524,21 +555,14 @@ struct vm_fault {
* is not NULL, otherwise pmd.
*/
pgtable_t prealloc_pte; /* Pre-allocated pte page table.
- * vm_ops->map_pages() calls
- * alloc_set_pte() from atomic context.
+ * vm_ops->map_pages() sets up a page
+ * table from atomic context.
* do_fault_around() pre-allocates
* page table to avoid allocation from
* atomic context.
*/
};
-/* page entry size for vm->huge_fault() */
-enum page_entry_size {
- PE_SIZE_PTE = 0,
- PE_SIZE_PMD,
- PE_SIZE_PUD,
-};
-
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -546,13 +570,24 @@ enum page_entry_size {
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
+ /**
+ * @close: Called when the VMA is being removed from the MM.
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
void (*close)(struct vm_area_struct * area);
- int (*split)(struct vm_area_struct * area, unsigned long addr);
- int (*mremap)(struct vm_area_struct * area);
+ /* Called any time before splitting to check if it's allowed */
+ int (*may_split)(struct vm_area_struct *area, unsigned long addr);
+ int (*mremap)(struct vm_area_struct *area);
+ /*
+ * Called by mprotect() to make driver-specific permission
+ * checks before mprotect() is finalised. The VMA must not
+ * be modified. Returns 0 if mprotect() can proceed.
+ */
+ int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
- vm_fault_t (*huge_fault)(struct vm_fault *vmf,
- enum page_entry_size pe_size);
- void (*map_pages)(struct vm_fault *vmf,
+ vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
+ vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
@@ -564,7 +599,8 @@ struct vm_operations_struct {
vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
- * for use by special VMAs that can switch between memory and hardware
+ * for use by special VMAs. See also generic_access_phys() for a generic
+ * implementation useful for any iomem mapping.
*/
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
@@ -595,7 +631,7 @@ struct vm_operations_struct {
* policy.
*/
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, pgoff_t *ilx);
#endif
/*
* Called by vm_normal_page() for special PTEs to find the
@@ -606,14 +642,251 @@ struct vm_operations_struct {
unsigned long addr);
};
-static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+#ifdef CONFIG_NUMA_BALANCING
+static inline void vma_numab_state_init(struct vm_area_struct *vma)
+{
+ vma->numab_state = NULL;
+}
+static inline void vma_numab_state_free(struct vm_area_struct *vma)
+{
+ kfree(vma->numab_state);
+}
+#else
+static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
+static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#ifdef CONFIG_PER_VMA_LOCK
+/*
+ * Try to read-lock a vma. The function is allowed to occasionally yield false
+ * locked result to avoid performance overhead, in which case we fall back to
+ * using mmap_lock. The function should never yield false unlocked result.
+ */
+static inline bool vma_start_read(struct vm_area_struct *vma)
+{
+ /*
+ * Check before locking. A race might cause false locked result.
+ * We can use READ_ONCE() for the mm_lock_seq here, and don't need
+ * ACQUIRE semantics, because this is just a lockless check whose result
+ * we don't rely on for anything - the mm_lock_seq read against which we
+ * need ordering is below.
+ */
+ if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
+ return false;
+
+ if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
+ return false;
+
+ /*
+ * Overflow might produce false locked result.
+ * False unlocked result is impossible because we modify and check
+ * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
+ * modification invalidates all existing locks.
+ *
+ * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
+ * racing with vma_end_write_all(), we only start reading from the VMA
+ * after it has been unlocked.
+ * This pairs with RELEASE semantics in vma_end_write_all().
+ */
+ if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
+ up_read(&vma->vm_lock->lock);
+ return false;
+ }
+ return true;
+}
+
+static inline void vma_end_read(struct vm_area_struct *vma)
+{
+ rcu_read_lock(); /* keeps vma alive till the end of up_read */
+ up_read(&vma->vm_lock->lock);
+ rcu_read_unlock();
+}
+
+/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
+static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
+{
+ mmap_assert_write_locked(vma->vm_mm);
+
+ /*
+ * current task is holding mmap_write_lock, both vma->vm_lock_seq and
+ * mm->mm_lock_seq can't be concurrently modified.
+ */
+ *mm_lock_seq = vma->vm_mm->mm_lock_seq;
+ return (vma->vm_lock_seq == *mm_lock_seq);
+}
+
+/*
+ * Begin writing to a VMA.
+ * Exclude concurrent readers under the per-VMA lock until the currently
+ * write-locked mmap_lock is dropped or downgraded.
+ */
+static inline void vma_start_write(struct vm_area_struct *vma)
+{
+ int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
+ return;
+
+ down_write(&vma->vm_lock->lock);
+ /*
+ * We should use WRITE_ONCE() here because we can have concurrent reads
+ * from the early lockless pessimistic check in vma_start_read().
+ * We don't really care about the correctness of that early check, but
+ * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
+ */
+ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
+ up_write(&vma->vm_lock->lock);
+}
+
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+{
+ int mm_lock_seq;
+
+ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+}
+
+static inline void vma_assert_locked(struct vm_area_struct *vma)
+{
+ if (!rwsem_is_locked(&vma->vm_lock->lock))
+ vma_assert_write_locked(vma);
+}
+
+static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+{
+ /* When detaching vma should be write-locked */
+ if (detached)
+ vma_assert_write_locked(vma);
+ vma->detached = detached;
+}
+
+static inline void release_fault_lock(struct vm_fault *vmf)
+{
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+ vma_end_read(vmf->vma);
+ else
+ mmap_read_unlock(vmf->vma->vm_mm);
+}
+
+static inline void assert_fault_locked(struct vm_fault *vmf)
+{
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+ vma_assert_locked(vmf->vma);
+ else
+ mmap_assert_locked(vmf->vma->vm_mm);
+}
+
+struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address);
+
+#else /* CONFIG_PER_VMA_LOCK */
+
+static inline bool vma_start_read(struct vm_area_struct *vma)
+ { return false; }
+static inline void vma_end_read(struct vm_area_struct *vma) {}
+static inline void vma_start_write(struct vm_area_struct *vma) {}
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+ { mmap_assert_write_locked(vma->vm_mm); }
+static inline void vma_mark_detached(struct vm_area_struct *vma,
+ bool detached) {}
+
+static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address)
+{
+ return NULL;
+}
+
+static inline void vma_assert_locked(struct vm_area_struct *vma)
+{
+ mmap_assert_locked(vma->vm_mm);
+}
+
+static inline void release_fault_lock(struct vm_fault *vmf)
+{
+ mmap_read_unlock(vmf->vma->vm_mm);
+}
+
+static inline void assert_fault_locked(struct vm_fault *vmf)
{
- static const struct vm_operations_struct dummy_vm_ops = {};
+ mmap_assert_locked(vmf->vma->vm_mm);
+}
+
+#endif /* CONFIG_PER_VMA_LOCK */
+extern const struct vm_operations_struct vma_dummy_vm_ops;
+
+/*
+ * WARNING: vma_init does not initialize vma->vm_lock.
+ * Use vm_area_alloc()/vm_area_free() if vma needs locking.
+ */
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
- vma->vm_ops = &dummy_vm_ops;
+ vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
+ vma_mark_detached(vma, false);
+ vma_numab_state_init(vma);
+}
+
+/* Use when VMA is not part of the VMA tree and needs no locking */
+static inline void vm_flags_init(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ ACCESS_PRIVATE(vma, __vm_flags) = flags;
+}
+
+/*
+ * Use when VMA is part of the VMA tree and modifications need coordination
+ * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
+ * it should be locked explicitly beforehand.
+ */
+static inline void vm_flags_reset(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_assert_write_locked(vma);
+ vm_flags_init(vma, flags);
+}
+
+static inline void vm_flags_reset_once(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_assert_write_locked(vma);
+ WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
+}
+
+static inline void vm_flags_set(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_start_write(vma);
+ ACCESS_PRIVATE(vma, __vm_flags) |= flags;
+}
+
+static inline void vm_flags_clear(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_start_write(vma);
+ ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
+}
+
+/*
+ * Use only if VMA is not part of the VMA tree or has no other users and
+ * therefore needs no locking.
+ */
+static inline void __vm_flags_mod(struct vm_area_struct *vma,
+ vm_flags_t set, vm_flags_t clear)
+{
+ vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
+}
+
+/*
+ * Use only when the order of set/clear operations is unimportant, otherwise
+ * use vm_flags_{set|clear} explicitly.
+ */
+static inline void vm_flags_mod(struct vm_area_struct *vma,
+ vm_flags_t set, vm_flags_t clear)
+{
+ vma_start_write(vma);
+ __vm_flags_mod(vma, set, clear);
}
static inline void vma_set_anonymous(struct vm_area_struct *vma)
@@ -626,6 +899,31 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
return !vma->vm_ops;
}
+/*
+ * Indicate if the VMA is a heap for the given task; for
+ * /proc/PID/maps that is the heap of the main task.
+ */
+static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
+{
+ return vma->vm_start < vma->vm_mm->brk &&
+ vma->vm_end > vma->vm_mm->start_brk;
+}
+
+/*
+ * Indicate if the VMA is a stack for the given task; for
+ * /proc/PID/maps that is the stack of the main task.
+ */
+static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
+{
+ /*
+ * We make no effort to guess what a given thread considers to be
+ * its "stack". It's not even well-defined for programs written
+ * languages like Go.
+ */
+ return vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack;
+}
+
static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
@@ -656,14 +954,121 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
return vma->vm_flags & VM_ACCESS_FLAGS;
}
+static inline bool is_shared_maywrite(vm_flags_t vm_flags)
+{
+ return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
+ (VM_SHARED | VM_MAYWRITE);
+}
+
+static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
+{
+ return is_shared_maywrite(vma->vm_flags);
+}
+
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_find(&vmi->mas, max - 1);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
+{
+ /*
+ * Uses mas_find() to get the first VMA when the iterator starts.
+ * Calling mas_next() could skip the first entry.
+ */
+ return mas_find(&vmi->mas, ULONG_MAX);
+}
+
+static inline
+struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
+{
+ return mas_next_range(&vmi->mas, ULONG_MAX);
+}
+
+
+static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
+{
+ return mas_prev(&vmi->mas, 0);
+}
+
+static inline
+struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
+{
+ return mas_prev_range(&vmi->mas, 0);
+}
+
+static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
+{
+ return vmi->mas.index;
+}
+
+static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
+{
+ return vmi->mas.last + 1;
+}
+static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
+ unsigned long count)
+{
+ return mas_expected_entries(&vmi->mas, count);
+}
+
+static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
+ unsigned long start, unsigned long end, gfp_t gfp)
+{
+ __mas_set_range(&vmi->mas, start, end - 1);
+ mas_store_gfp(&vmi->mas, NULL, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Free any unused preallocations */
+static inline void vma_iter_free(struct vma_iterator *vmi)
+{
+ mas_destroy(&vmi->mas);
+}
+
+static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ vmi->mas.index = vma->vm_start;
+ vmi->mas.last = vma->vm_end - 1;
+ mas_store(&vmi->mas, vma);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void vma_iter_invalidate(struct vma_iterator *vmi)
+{
+ mas_pause(&vmi->mas);
+}
+
+static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
+{
+ mas_set(&vmi->mas, addr);
+}
+
+#define for_each_vma(__vmi, __vma) \
+ while (((__vma) = vma_next(&(__vmi))) != NULL)
+
+/* The MM code likes to work with exclusive end addresses */
+#define for_each_vma_range(__vmi, __vma, __end) \
+ while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
+
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
* paths in userfault.
*/
bool vma_is_shmem(struct vm_area_struct *vma);
+bool vma_is_anon_shmem(struct vm_area_struct *vma);
#else
static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
#endif
int vma_is_stack_for_current(struct vm_area_struct *vma);
@@ -674,6 +1079,38 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
+/*
+ * compound_order() can be called without holding a reference, which means
+ * that niceties like page_folio() don't work. These callers should be
+ * prepared to handle wild return values. For example, PG_head may be
+ * set before the order is initialised, or this may be a tail page.
+ * See compaction.c for some good examples.
+ */
+static inline unsigned int compound_order(struct page *page)
+{
+ struct folio *folio = (struct folio *)page;
+
+ if (!test_bit(PG_head, &folio->flags))
+ return 0;
+ return folio->_flags_1 & 0xff;
+}
+
+/**
+ * folio_order - The allocation order of a folio.
+ * @folio: The folio.
+ *
+ * A folio is composed of 2^order pages. See get_order() for the definition
+ * of order.
+ *
+ * Return: The order of the folio.
+ */
+static inline unsigned int folio_order(struct folio *folio)
+{
+ if (!folio_test_large(folio))
+ return 0;
+ return folio->_flags_1 & 0xff;
+}
+
#include <linux/huge_mm.h>
/*
@@ -698,17 +1135,29 @@ static inline int put_page_testzero(struct page *page)
return page_ref_dec_and_test(page);
}
+static inline int folio_put_testzero(struct folio *folio)
+{
+ return put_page_testzero(&folio->page);
+}
+
/*
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
* This can be called when MMU is off so it must not access
* any of the virtual mappings.
*/
-static inline int get_page_unless_zero(struct page *page)
+static inline bool get_page_unless_zero(struct page *page)
{
return page_ref_add_unless(page, 1, 0);
}
+static inline struct folio *folio_get_nontail_page(struct page *page)
+{
+ if (unlikely(!get_page_unless_zero(page)))
+ return NULL;
+ return (struct folio *)page;
+}
+
extern int page_is_ram(unsigned long pfn);
enum {
@@ -730,11 +1179,6 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-
-#ifndef is_ioremap_addr
-#define is_ioremap_addr(x) is_vmalloc_addr(x)
-#endif
-
#ifdef CONFIG_MMU
extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
@@ -749,53 +1193,16 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
-static inline void *kvmalloc(size_t size, gfp_t flags)
-{
- return kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
-{
- return kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-static inline void *kvzalloc(size_t size, gfp_t flags)
-{
- return kvmalloc(size, flags | __GFP_ZERO);
-}
-
-static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
-{
- size_t bytes;
-
- if (unlikely(check_mul_overflow(n, size, &bytes)))
- return NULL;
-
- return kvmalloc(bytes, flags);
-}
-
-static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
-{
- return kvmalloc_array(n, size, flags | __GFP_ZERO);
-}
-
-extern void kvfree(const void *addr);
-extern void kvfree_sensitive(const void *addr, size_t len);
-
-static inline int head_mapcount(struct page *head)
-{
- return atomic_read(compound_mapcount_ptr(head)) + 1;
-}
-
/*
- * Mapcount of compound page as a whole, does not include mapped sub-pages.
- *
- * Must be called only for compound pages or any their tail sub-pages.
+ * How many times the entire folio is mapped as a single unit (eg by a
+ * PMD or PUD entry). This is probably not what you want, except for
+ * debugging purposes - it does not include PTE-mapped sub-pages; look
+ * at folio_mapcount() or page_mapcount() instead.
*/
-static inline int compound_mapcount(struct page *page)
+static inline int folio_entire_mapcount(struct folio *folio)
{
- VM_BUG_ON_PAGE(!PageCompound(page), page);
- page = compound_head(page);
- return head_mapcount(page);
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ return atomic_read(&folio->_entire_mapcount) + 1;
}
/*
@@ -808,132 +1215,108 @@ static inline void page_mapcount_reset(struct page *page)
atomic_set(&(page)->_mapcount, -1);
}
-int __page_mapcount(struct page *page);
-
-/*
- * Mapcount of 0-order page; when compound sub-page, includes
- * compound_mapcount().
+/**
+ * page_mapcount() - Number of times this precise page is mapped.
+ * @page: The page.
*
- * Result is undefined for pages which cannot be mapped into userspace.
+ * The number of times this page is mapped. If this page is part of
+ * a large folio, it includes the number of times this page is mapped
+ * as part of that folio.
+ *
+ * The result is undefined for pages which cannot be mapped into userspace.
* For example SLAB or special types of pages. See function page_has_type().
- * They use this place in struct page differently.
+ * They use this field in struct page differently.
*/
static inline int page_mapcount(struct page *page)
{
+ int mapcount = atomic_read(&page->_mapcount) + 1;
+
if (unlikely(PageCompound(page)))
- return __page_mapcount(page);
- return atomic_read(&page->_mapcount) + 1;
-}
+ mapcount += folio_entire_mapcount(page_folio(page));
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-int total_mapcount(struct page *page);
-int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
-#else
-static inline int total_mapcount(struct page *page)
-{
- return page_mapcount(page);
-}
-static inline int page_trans_huge_mapcount(struct page *page,
- int *total_mapcount)
-{
- int mapcount = page_mapcount(page);
- if (total_mapcount)
- *total_mapcount = mapcount;
return mapcount;
}
-#endif
-
-static inline struct page *virt_to_head_page(const void *x)
-{
- struct page *page = virt_to_page(x);
-
- return compound_head(page);
-}
-void __put_page(struct page *page);
-
-void put_pages_list(struct list_head *pages);
-
-void split_page(struct page *page, unsigned int order);
+int folio_total_mapcount(struct folio *folio);
-/*
- * Compound pages have a destructor function. Provide a
- * prototype for that function and accessor functions.
- * These are _only_ valid on the head of a compound page.
+/**
+ * folio_mapcount() - Calculate the number of mappings of this folio.
+ * @folio: The folio.
+ *
+ * A large folio tracks both how many times the entire folio is mapped,
+ * and how many times each individual page in the folio is mapped.
+ * This function calculates the total number of times the folio is
+ * mapped.
+ *
+ * Return: The number of times this folio is mapped.
*/
-typedef void compound_page_dtor(struct page *);
-
-/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
-enum compound_dtor_id {
- NULL_COMPOUND_DTOR,
- COMPOUND_PAGE_DTOR,
-#ifdef CONFIG_HUGETLB_PAGE
- HUGETLB_PAGE_DTOR,
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- TRANSHUGE_PAGE_DTOR,
-#endif
- NR_COMPOUND_DTORS,
-};
-extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
-
-static inline void set_compound_page_dtor(struct page *page,
- enum compound_dtor_id compound_dtor)
-{
- VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
- page[1].compound_dtor = compound_dtor;
-}
-
-static inline void destroy_compound_page(struct page *page)
-{
- VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
- compound_page_dtors[page[1].compound_dtor](page);
-}
-
-static inline unsigned int compound_order(struct page *page)
+static inline int folio_mapcount(struct folio *folio)
{
- if (!PageHead(page))
- return 0;
- return page[1].compound_order;
+ if (likely(!folio_test_large(folio)))
+ return atomic_read(&folio->_mapcount) + 1;
+ return folio_total_mapcount(folio);
}
-static inline bool hpage_pincount_available(struct page *page)
+static inline bool folio_large_is_mapped(struct folio *folio)
{
/*
- * Can the page->hpage_pinned_refcount field be used? That field is in
- * the 3rd page of the compound page, so the smallest (2-page) compound
- * pages cannot support it.
+ * Reading _entire_mapcount below could be omitted if hugetlb
+ * participated in incrementing nr_pages_mapped when compound mapped.
*/
- page = compound_head(page);
- return PageCompound(page) && compound_order(page) > 1;
+ return atomic_read(&folio->_nr_pages_mapped) > 0 ||
+ atomic_read(&folio->_entire_mapcount) >= 0;
}
-static inline int head_pincount(struct page *head)
+/**
+ * folio_mapped - Is this folio mapped into userspace?
+ * @folio: The folio.
+ *
+ * Return: True if any page in this folio is referenced by user page tables.
+ */
+static inline bool folio_mapped(struct folio *folio)
{
- return atomic_read(compound_pincount_ptr(head));
+ if (likely(!folio_test_large(folio)))
+ return atomic_read(&folio->_mapcount) >= 0;
+ return folio_large_is_mapped(folio);
}
-static inline int compound_pincount(struct page *page)
+/*
+ * Return true if this page is mapped into pagetables.
+ * For compound page it returns true if any sub-page of compound page is mapped,
+ * even if this particular sub-page is not itself mapped by any PTE or PMD.
+ */
+static inline bool page_mapped(struct page *page)
{
- VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
- page = compound_head(page);
- return head_pincount(page);
+ if (likely(!PageCompound(page)))
+ return atomic_read(&page->_mapcount) >= 0;
+ return folio_large_is_mapped(page_folio(page));
}
-static inline void set_compound_order(struct page *page, unsigned int order)
+static inline struct page *virt_to_head_page(const void *x)
{
- page[1].compound_order = order;
- page[1].compound_nr = 1U << order;
+ struct page *page = virt_to_page(x);
+
+ return compound_head(page);
}
-/* Returns the number of pages in this potentially compound page. */
-static inline unsigned long compound_nr(struct page *page)
+static inline struct folio *virt_to_folio(const void *x)
{
- if (!PageHead(page))
- return 1;
- return page[1].compound_nr;
+ struct page *page = virt_to_page(x);
+
+ return page_folio(page);
}
+void __folio_put(struct folio *folio);
+
+void put_pages_list(struct list_head *pages);
+
+void split_page(struct page *page, unsigned int order);
+void folio_copy(struct folio *dst, struct folio *src);
+
+unsigned long nr_free_buffer_pages(void);
+
+void destroy_large_folio(struct folio *folio);
+
/* Returns the number of bytes in this potentially compound page. */
static inline unsigned long page_size(struct page *page)
{
@@ -946,7 +1329,26 @@ static inline unsigned int page_shift(struct page *page)
return PAGE_SHIFT + compound_order(page);
}
-void free_compound_page(struct page *page);
+/**
+ * thp_order - Order of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ */
+static inline unsigned int thp_order(struct page *page)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ return compound_order(page);
+}
+
+/**
+ * thp_size - Size of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ *
+ * Return: Number of bytes in this page.
+ */
+static inline unsigned long thp_size(struct page *page)
+{
+ return PAGE_SIZE << thp_order(page);
+}
#ifdef CONFIG_MMU
/*
@@ -958,13 +1360,15 @@ void free_compound_page(struct page *page);
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite(pte, vma);
return pte;
}
-vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page);
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
+void set_pte_range(struct vm_fault *vmf, struct folio *folio,
+ struct page *page, unsigned int nr, unsigned long addr);
+
vm_fault_t finish_fault(struct vm_fault *vmf);
-vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
@@ -1027,135 +1431,53 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
* back into memory.
*/
-/*
- * The zone field is never updated after free_area_init_core()
- * sets it, so none of the operations on it need to be atomic.
- */
-
-/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
-#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
-#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
-#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
-#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
-#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
-
-/*
- * Define the bit shifts to access each section. For non-existent
- * sections we define the shift as 0; that plus a 0 mask ensures
- * the compiler will optimise away reference to them.
- */
-#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
-#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
-#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
-#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
-#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
-
-/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
-#ifdef NODE_NOT_IN_PAGE_FLAGS
-#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
- SECTIONS_PGOFF : ZONES_PGOFF)
-#else
-#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
- NODES_PGOFF : ZONES_PGOFF)
-#endif
-
-#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
-
-#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
-#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
-#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
-#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
-#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
-#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
-
-static inline enum zone_type page_zonenum(const struct page *page)
-{
- ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
- return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
-}
-
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_zone_device_page(const struct page *page)
-{
- return page_zonenum(page) == ZONE_DEVICE;
-}
-extern void memmap_init_zone_device(struct zone *, unsigned long,
- unsigned long, struct dev_pagemap *);
-#else
-static inline bool is_zone_device_page(const struct page *page)
-{
- return false;
-}
-#endif
-
-#ifdef CONFIG_DEV_PAGEMAP_OPS
-void free_devmap_managed_page(struct page *page);
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-static inline bool page_is_devmap_managed(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs);
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
{
if (!static_branch_unlikely(&devmap_managed_key))
return false;
if (!is_zone_device_page(page))
return false;
- switch (page->pgmap->type) {
- case MEMORY_DEVICE_PRIVATE:
- case MEMORY_DEVICE_FS_DAX:
- return true;
- default:
- break;
- }
- return false;
+ return __put_devmap_managed_page_refs(page, refs);
}
-
-void put_devmap_managed_page(struct page *page);
-
-#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool page_is_devmap_managed(struct page *page)
+#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
{
return false;
}
+#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-static inline void put_devmap_managed_page(struct page *page)
+static inline bool put_devmap_managed_page(struct page *page)
{
+ return put_devmap_managed_page_refs(page, 1);
}
-#endif /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool is_device_private_page(const struct page *page)
-{
- return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
- IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
- is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PRIVATE;
-}
+/* 127: arbitrary random number, small enough to assemble well */
+#define folio_ref_zero_or_close_to_overflow(folio) \
+ ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
-static inline bool is_pci_p2pdma_page(const struct page *page)
+/**
+ * folio_get - Increment the reference count on a folio.
+ * @folio: The folio.
+ *
+ * Context: May be called in any context, as long as you know that
+ * you have a refcount on the folio. If you do not already have one,
+ * folio_try_get() may be the right interface for you to use.
+ */
+static inline void folio_get(struct folio *folio)
{
- return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
- IS_ENABLED(CONFIG_PCI_P2PDMA) &&
- is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+ VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
+ folio_ref_inc(folio);
}
-/* 127: arbitrary random number, small enough to assemble well */
-#define page_ref_zero_or_close_to_overflow(page) \
- ((unsigned int) page_ref_count(page) + 127u <= 127u)
-
static inline void get_page(struct page *page)
{
- page = compound_head(page);
- /*
- * Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_refcount.
- */
- VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
- page_ref_inc(page);
+ folio_get(page_folio(page));
}
-bool __must_check try_grab_page(struct page *page, unsigned int flags);
-
static inline __must_check bool try_get_page(struct page *page)
{
page = compound_head(page);
@@ -1165,23 +1487,95 @@ static inline __must_check bool try_get_page(struct page *page)
return true;
}
+/**
+ * folio_put - Decrement the reference count on a folio.
+ * @folio: The folio.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put() unless you can be sure that it wasn't the
+ * last reference.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put(struct folio *folio)
+{
+ if (folio_put_testzero(folio))
+ __folio_put(folio);
+}
+
+/**
+ * folio_put_refs - Reduce the reference count on a folio.
+ * @folio: The folio.
+ * @refs: The amount to subtract from the folio's reference count.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put_refs() unless you can be sure that these weren't
+ * the last references.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put_refs(struct folio *folio, int refs)
+{
+ if (folio_ref_sub_and_test(folio, refs))
+ __folio_put(folio);
+}
+
+void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
+
+/*
+ * union release_pages_arg - an array of pages or folios
+ *
+ * release_pages() releases a simple array of multiple pages, and
+ * accepts various different forms of said page array: either
+ * a regular old boring array of pages, an array of folios, or
+ * an array of encoded page pointers.
+ *
+ * The transparent union syntax for this kind of "any of these
+ * argument types" is all kinds of ugly, so look away.
+ */
+typedef union {
+ struct page **pages;
+ struct folio **folios;
+ struct encoded_page **encoded_pages;
+} release_pages_arg __attribute__ ((__transparent_union__));
+
+void release_pages(release_pages_arg, int nr);
+
+/**
+ * folios_put - Decrement the reference count on an array of folios.
+ * @folios: The folios.
+ *
+ * Like folio_put(), but for a batch of folios. This is more efficient
+ * than writing the loop yourself as it will optimise the locks which need
+ * to be taken if the folios are freed. The folios batch is returned
+ * empty and ready to be reused for another batch; there is no need to
+ * reinitialise it.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folios_put(struct folio_batch *folios)
+{
+ folios_put_refs(folios, NULL);
+}
+
static inline void put_page(struct page *page)
{
- page = compound_head(page);
+ struct folio *folio = page_folio(page);
/*
- * For devmap managed pages we need to catch refcount transition from
- * 2 to 1, when refcount reach one it means the page is free and we
- * need to inform the device driver through callback. See
- * include/linux/memremap.h and HMM for details.
+ * For some devmap managed pages we need to catch refcount transition
+ * from 2 to 1:
*/
- if (page_is_devmap_managed(page)) {
- put_devmap_managed_page(page);
+ if (put_devmap_managed_page(&folio->page))
return;
- }
-
- if (put_page_testzero(page))
- __put_page(page);
+ folio_put(folio);
}
/*
@@ -1210,60 +1604,38 @@ static inline void put_page(struct page *page)
* applications that don't have huge page reference counts, this won't be an
* issue.
*
- * Locking: the lockless algorithm described in page_cache_get_speculative()
- * and page_cache_gup_pin_speculative() provides safe operation for
- * get_user_pages and page_mkclean and other calls that race to set up page
- * table entries.
+ * Locking: the lockless algorithm described in folio_try_get_rcu()
+ * provides safe operation for get_user_pages(), page_mkclean() and
+ * other calls that race to set up page table entries.
*/
#define GUP_PIN_COUNTING_BIAS (1U << 10)
void unpin_user_page(struct page *page);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
+void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
+ bool make_dirty);
void unpin_user_pages(struct page **pages, unsigned long npages);
-/**
- * page_maybe_dma_pinned() - report if a page is pinned for DMA.
- *
- * This function checks if a page has been pinned via a call to
- * pin_user_pages*().
- *
- * For non-huge pages, the return value is partially fuzzy: false is not fuzzy,
- * because it means "definitely not pinned for DMA", but true means "probably
- * pinned for DMA, but possibly a false positive due to having at least
- * GUP_PIN_COUNTING_BIAS worth of normal page references".
- *
- * False positives are OK, because: a) it's unlikely for a page to get that many
- * refcounts, and b) all the callers of this routine are expected to be able to
- * deal gracefully with a false positive.
- *
- * For huge pages, the result will be exactly correct. That's because we have
- * more tracking data available: the 3rd struct page in the compound page is
- * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS
- * scheme).
- *
- * For more information, please see Documentation/core-api/pin_user_pages.rst.
- *
- * @page: pointer to page to be queried.
- * @Return: True, if it is likely that the page has been "dma-pinned".
- * False, if the page is definitely not dma-pinned.
- */
-static inline bool page_maybe_dma_pinned(struct page *page)
+static inline bool is_cow_mapping(vm_flags_t flags)
{
- if (hpage_pincount_available(page))
- return compound_pincount(page) > 0;
+ return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+}
+#ifndef CONFIG_MMU
+static inline bool is_nommu_shared_mapping(vm_flags_t flags)
+{
/*
- * page_ref_count() is signed. If that refcount overflows, then
- * page_ref_count() returns a negative value, and callers will avoid
- * further incrementing the refcount.
- *
- * Here, for that overflow case, use the signed bit to count a little
- * bit higher via unsigned math, and thus still get an accurate result.
+ * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
+ * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
+ * a file mapping. R/O MAP_PRIVATE mappings might still modify
+ * underlying memory if ptrace is active, so this is only possible if
+ * ptrace does not apply. Note that there is no mprotect() to upgrade
+ * write permissions later.
*/
- return ((unsigned int)page_ref_count(compound_head(page))) >=
- GUP_PIN_COUNTING_BIAS;
+ return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
}
+#endif
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
@@ -1283,17 +1655,32 @@ static inline int page_zone_id(struct page *page)
}
#ifdef NODE_NOT_IN_PAGE_FLAGS
-extern int page_to_nid(const struct page *page);
+int page_to_nid(const struct page *page);
#else
static inline int page_to_nid(const struct page *page)
{
- struct page *p = (struct page *)page;
-
- return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
+ return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
+static inline int folio_nid(const struct folio *folio)
+{
+ return page_to_nid(&folio->page);
+}
+
#ifdef CONFIG_NUMA_BALANCING
+/* page access time bits needs to hold at least 4 seconds */
+#define PAGE_ACCESS_TIME_MIN_BITS 12
+#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
+#define PAGE_ACCESS_TIME_BUCKETS \
+ (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
+#else
+#define PAGE_ACCESS_TIME_BUCKETS 0
+#endif
+
+#define PAGE_ACCESS_TIME_MASK \
+ (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
+
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
@@ -1331,41 +1718,65 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
-static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
- return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
+ return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return page->_last_cpupid;
+ return folio->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
{
page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
+ return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
-extern int page_cpupid_xchg_last(struct page *page, int cpupid);
+int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+
+static inline int folio_xchg_access_time(struct folio *folio, int time)
+{
+ int last_time;
+
+ last_time = folio_xchg_last_cpupid(folio,
+ time >> PAGE_ACCESS_TIME_BUCKETS);
+ return last_time << PAGE_ACCESS_TIME_BUCKETS;
+}
+
+static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
+{
+ unsigned int pid_bit;
+
+ pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
+ if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
+ __set_bit(pid_bit, &vma->numab_state->pids_active[1]);
+ }
+}
#else /* !CONFIG_NUMA_BALANCING */
-static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
+{
+ return folio_nid(folio); /* XXX */
+}
+
+static inline int folio_xchg_access_time(struct folio *folio, int time)
{
- return page_to_nid(page); /* XXX */
+ return 0;
}
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return page_to_nid(page); /* XXX */
+ return folio_nid(folio); /* XXX */
}
static inline int cpupid_to_nid(int cpupid)
@@ -1401,25 +1812,56 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
return false;
}
+
+static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
+{
+}
#endif /* CONFIG_NUMA_BALANCING */
-#ifdef CONFIG_KASAN_SW_TAGS
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+
+/*
+ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
+ * setting tags for all pages to native kernel tag value 0xff, as the default
+ * value 0x00 maps to 0xff.
+ */
+
static inline u8 page_kasan_tag(const struct page *page)
{
- return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ u8 tag = KASAN_TAG_KERNEL;
+
+ if (kasan_enabled()) {
+ tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ tag ^= 0xff;
+ }
+
+ return tag;
}
static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
- page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
- page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+ unsigned long old_flags, flags;
+
+ if (!kasan_enabled())
+ return;
+
+ tag ^= 0xff;
+ old_flags = READ_ONCE(page->flags);
+ do {
+ flags = old_flags;
+ flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+ flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+ } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
}
static inline void page_kasan_tag_reset(struct page *page)
{
- page_kasan_tag_set(page, 0xff);
+ if (kasan_enabled())
+ page_kasan_tag_set(page, KASAN_TAG_KERNEL);
}
-#else
+
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
static inline u8 page_kasan_tag(const struct page *page)
{
return 0xff;
@@ -1427,7 +1869,8 @@ static inline u8 page_kasan_tag(const struct page *page)
static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
static inline void page_kasan_tag_reset(struct page *page) { }
-#endif
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline struct zone *page_zone(const struct page *page)
{
@@ -1439,6 +1882,16 @@ static inline pg_data_t *page_pgdat(const struct page *page)
return NODE_DATA(page_to_nid(page));
}
+static inline struct zone *folio_zone(const struct folio *folio)
+{
+ return page_zone(&folio->page);
+}
+
+static inline pg_data_t *folio_pgdat(const struct folio *folio)
+{
+ return page_pgdat(&folio->page);
+}
+
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
@@ -1452,6 +1905,140 @@ static inline unsigned long page_to_section(const struct page *page)
}
#endif
+/**
+ * folio_pfn - Return the Page Frame Number of a folio.
+ * @folio: The folio.
+ *
+ * A folio may contain multiple pages. The pages have consecutive
+ * Page Frame Numbers.
+ *
+ * Return: The Page Frame Number of the first page in the folio.
+ */
+static inline unsigned long folio_pfn(struct folio *folio)
+{
+ return page_to_pfn(&folio->page);
+}
+
+static inline struct folio *pfn_folio(unsigned long pfn)
+{
+ return page_folio(pfn_to_page(pfn));
+}
+
+/**
+ * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
+ * @folio: The folio.
+ *
+ * This function checks if a folio has been pinned via a call to
+ * a function in the pin_user_pages() family.
+ *
+ * For small folios, the return value is partially fuzzy: false is not fuzzy,
+ * because it means "definitely not pinned for DMA", but true means "probably
+ * pinned for DMA, but possibly a false positive due to having at least
+ * GUP_PIN_COUNTING_BIAS worth of normal folio references".
+ *
+ * False positives are OK, because: a) it's unlikely for a folio to
+ * get that many refcounts, and b) all the callers of this routine are
+ * expected to be able to deal gracefully with a false positive.
+ *
+ * For large folios, the result will be exactly correct. That's because
+ * we have more tracking data available: the _pincount field is used
+ * instead of the GUP_PIN_COUNTING_BIAS scheme.
+ *
+ * For more information, please see Documentation/core-api/pin_user_pages.rst.
+ *
+ * Return: True, if it is likely that the page has been "dma-pinned".
+ * False, if the page is definitely not dma-pinned.
+ */
+static inline bool folio_maybe_dma_pinned(struct folio *folio)
+{
+ if (folio_test_large(folio))
+ return atomic_read(&folio->_pincount) > 0;
+
+ /*
+ * folio_ref_count() is signed. If that refcount overflows, then
+ * folio_ref_count() returns a negative value, and callers will avoid
+ * further incrementing the refcount.
+ *
+ * Here, for that overflow case, use the sign bit to count a little
+ * bit higher via unsigned math, and thus still get an accurate result.
+ */
+ return ((unsigned int)folio_ref_count(folio)) >=
+ GUP_PIN_COUNTING_BIAS;
+}
+
+static inline bool page_maybe_dma_pinned(struct page *page)
+{
+ return folio_maybe_dma_pinned(page_folio(page));
+}
+
+/*
+ * This should most likely only be called during fork() to see whether we
+ * should break the cow immediately for an anon page on the src mm.
+ *
+ * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
+ */
+static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct folio *folio)
+{
+ VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
+
+ if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
+ return false;
+
+ return folio_maybe_dma_pinned(folio);
+}
+
+/**
+ * is_zero_page - Query if a page is a zero page
+ * @page: The page to query
+ *
+ * This returns true if @page is one of the permanent zero pages.
+ */
+static inline bool is_zero_page(const struct page *page)
+{
+ return is_zero_pfn(page_to_pfn(page));
+}
+
+/**
+ * is_zero_folio - Query if a folio is a zero page
+ * @folio: The folio to query
+ *
+ * This returns true if @folio is one of the permanent zero pages.
+ */
+static inline bool is_zero_folio(const struct folio *folio)
+{
+ return is_zero_page(&folio->page);
+}
+
+/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
+#ifdef CONFIG_MIGRATION
+static inline bool folio_is_longterm_pinnable(struct folio *folio)
+{
+#ifdef CONFIG_CMA
+ int mt = folio_migratetype(folio);
+
+ if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
+ return false;
+#endif
+ /* The zero page can be "pinned" but gets special handling. */
+ if (is_zero_folio(folio))
+ return true;
+
+ /* Coherent device memory must always allow eviction. */
+ if (folio_is_device_coherent(folio))
+ return false;
+
+ /* Otherwise, non-movable zone folios can be pinned. */
+ return !folio_is_zone_movable(folio);
+
+}
+#else
+static inline bool folio_is_longterm_pinnable(struct folio *folio)
+{
+ return true;
+}
+#endif
+
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
@@ -1474,25 +2061,144 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#endif
}
-#ifdef CONFIG_MEMCG
-static inline struct mem_cgroup *page_memcg(struct page *page)
+/**
+ * folio_nr_pages - The number of pages in the folio.
+ * @folio: The folio.
+ *
+ * Return: A positive power of two.
+ */
+static inline long folio_nr_pages(struct folio *folio)
{
- return page->mem_cgroup;
+ if (!folio_test_large(folio))
+ return 1;
+#ifdef CONFIG_64BIT
+ return folio->_folio_nr_pages;
+#else
+ return 1L << (folio->_flags_1 & 0xff);
+#endif
}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+
+/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+#define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER)
+#else
+#define MAX_FOLIO_NR_PAGES MAX_ORDER_NR_PAGES
+#endif
+
+/*
+ * compound_nr() returns the number of pages in this potentially compound
+ * page. compound_nr() can be called on a tail page, and is defined to
+ * return 1 in that case.
+ */
+static inline unsigned long compound_nr(struct page *page)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return READ_ONCE(page->mem_cgroup);
-}
+ struct folio *folio = (struct folio *)page;
+
+ if (!test_bit(PG_head, &folio->flags))
+ return 1;
+#ifdef CONFIG_64BIT
+ return folio->_folio_nr_pages;
#else
-static inline struct mem_cgroup *page_memcg(struct page *page)
+ return 1L << (folio->_flags_1 & 0xff);
+#endif
+}
+
+/**
+ * thp_nr_pages - The number of regular pages in this huge page.
+ * @page: The head page of a huge page.
+ */
+static inline int thp_nr_pages(struct page *page)
{
- return NULL;
+ return folio_nr_pages((struct folio *)page);
}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+
+/**
+ * folio_next - Move to the next physical folio.
+ * @folio: The folio we're currently operating on.
+ *
+ * If you have physically contiguous memory which may span more than
+ * one folio (eg a &struct bio_vec), use this function to move from one
+ * folio to the next. Do not use it if the memory is only virtually
+ * contiguous as the folios are almost certainly not adjacent to each
+ * other. This is the folio equivalent to writing ``page++``.
+ *
+ * Context: We assume that the folios are refcounted and/or locked at a
+ * higher level and do not adjust the reference counts.
+ * Return: The next struct folio.
+ */
+static inline struct folio *folio_next(struct folio *folio)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return NULL;
+ return (struct folio *)folio_page(folio, folio_nr_pages(folio));
+}
+
+/**
+ * folio_shift - The size of the memory described by this folio.
+ * @folio: The folio.
+ *
+ * A folio represents a number of bytes which is a power-of-two in size.
+ * This function tells you which power-of-two the folio is. See also
+ * folio_size() and folio_order().
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The base-2 logarithm of the size of this folio.
+ */
+static inline unsigned int folio_shift(struct folio *folio)
+{
+ return PAGE_SHIFT + folio_order(folio);
+}
+
+/**
+ * folio_size - The number of bytes in a folio.
+ * @folio: The folio.
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The number of bytes in this folio.
+ */
+static inline size_t folio_size(struct folio *folio)
+{
+ return PAGE_SIZE << folio_order(folio);
+}
+
+/**
+ * folio_estimated_sharers - Estimate the number of sharers of a folio.
+ * @folio: The folio.
+ *
+ * folio_estimated_sharers() aims to serve as a function to efficiently
+ * estimate the number of processes sharing a folio. This is done by
+ * looking at the precise mapcount of the first subpage in the folio, and
+ * assuming the other subpages are the same. This may not be true for large
+ * folios. If you want exact mapcounts for exact calculations, look at
+ * page_mapcount() or folio_total_mapcount().
+ *
+ * Return: The estimated number of processes sharing a folio.
+ */
+static inline int folio_estimated_sharers(struct folio *folio)
+{
+ return page_mapcount(folio_page(folio, 0));
+}
+
+#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
+static inline int arch_make_page_accessible(struct page *page)
+{
+ return 0;
+}
+#endif
+
+#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
+static inline int arch_make_folio_accessible(struct folio *folio)
+{
+ int ret;
+ long i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ ret = arch_make_page_accessible(folio_page(folio, i));
+ if (ret)
+ break;
+ }
+
+ return ret;
}
#endif
@@ -1534,19 +2240,9 @@ void page_address_init(void);
#define page_address_init() do { } while(0)
#endif
-extern void *page_rmapping(struct page *page);
-extern struct anon_vma *page_anon_vma(struct page *page);
-extern struct address_space *page_mapping(struct page *page);
-
-extern struct address_space *__page_file_mapping(struct page *);
-
-static inline
-struct address_space *page_file_mapping(struct page *page)
+static inline void *folio_address(const struct folio *folio)
{
- if (unlikely(PageSwapCache(page)))
- return __page_file_mapping(page);
-
- return page->mapping;
+ return page_address(&folio->page);
}
extern pgoff_t __page_file_index(struct page *page);
@@ -1562,22 +2258,34 @@ static inline pgoff_t page_index(struct page *page)
return page->index;
}
-bool page_mapped(struct page *page);
-struct address_space *page_mapping(struct page *page);
-struct address_space *page_mapping_file(struct page *page);
-
/*
* Return true only if the page has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
-static inline bool page_is_pfmemalloc(struct page *page)
+static inline bool page_is_pfmemalloc(const struct page *page)
+{
+ /*
+ * lru.next has bit 1 set if the page is allocated from the
+ * pfmemalloc reserves. Callers may simply overwrite it if
+ * they do not need to preserve that information.
+ */
+ return (uintptr_t)page->lru.next & BIT(1);
+}
+
+/*
+ * Return true only if the folio has been allocated with
+ * ALLOC_NO_WATERMARKS and the low watermark was not
+ * met implying that the system is under some pressure.
+ */
+static inline bool folio_is_pfmemalloc(const struct folio *folio)
{
/*
- * Page index cannot be this large so this must be
- * a pfmemalloc page.
+ * lru.next has bit 1 set if the page is allocated from the
+ * pfmemalloc reserves. Callers may simply overwrite it if
+ * they do not need to preserve that information.
*/
- return page->index == -1UL;
+ return (uintptr_t)folio->lru.next & BIT(1);
}
/*
@@ -1586,12 +2294,12 @@ static inline bool page_is_pfmemalloc(struct page *page)
*/
static inline void set_page_pfmemalloc(struct page *page)
{
- page->index = -1UL;
+ page->lru.next = (void *)BIT(1);
}
static inline void clear_page_pfmemalloc(struct page *page)
{
- page->index = 0;
+ page->lru.next = NULL;
}
/*
@@ -1601,53 +2309,90 @@ extern void pagefault_out_of_memory(void);
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
+#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
- * Flags passed to show_mem() and show_free_areas() to suppress output in
- * various contexts.
+ * Parameter block passed down to zap_pte_range in exceptional cases.
*/
-#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
+struct zap_details {
+ struct folio *single_folio; /* Locked folio to be unmapped */
+ bool even_cows; /* Zap COWed private pages too? */
+ zap_flags_t zap_flags; /* Extra flags for zapping */
+};
-extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
+/*
+ * Whether to drop the pte markers, for example, the uffd-wp information for
+ * file-backed memory. This should only be specified when we will completely
+ * drop the page in the mm, either by truncation or unmapping of the vma. By
+ * default, the flag is not set.
+ */
+#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
+/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
+#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
+
+#ifdef CONFIG_SCHED_MM_CID
+void sched_mm_cid_before_execve(struct task_struct *t);
+void sched_mm_cid_after_execve(struct task_struct *t);
+void sched_mm_cid_fork(struct task_struct *t);
+void sched_mm_cid_exit_signals(struct task_struct *t);
+static inline int task_mm_cid(struct task_struct *t)
+{
+ return t->mm_cid;
+}
+#else
+static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_fork(struct task_struct *t) { }
+static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
+static inline int task_mm_cid(struct task_struct *t)
+{
+ /*
+ * Use the processor id as a fall-back when the mm cid feature is
+ * disabled. This provides functional per-cpu data structure accesses
+ * in user-space, althrough it won't provide the memory usage benefits.
+ */
+ return raw_smp_processor_id();
+}
+#endif
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
static inline bool can_do_mlock(void) { return false; }
#endif
-extern int user_shm_lock(size_t, struct user_struct *);
-extern void user_shm_unlock(size_t, struct user_struct *);
-
-/*
- * Parameter block passed down to zap_pte_range in exceptional cases.
- */
-struct zap_details {
- struct address_space *check_mapping; /* Check page->mapping if set */
- pgoff_t first_index; /* Lowest page->index to unmap */
- pgoff_t last_index; /* Highest page->index to unmap */
-};
+extern int user_shm_lock(size_t, struct ucounts *);
+extern void user_shm_unlock(size_t, struct ucounts *);
+struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-void zap_page_range(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
-void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
- unsigned long start, unsigned long end);
+void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *details);
+static inline void zap_vma_pages(struct vm_area_struct *vma)
+{
+ zap_page_range_single(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start, NULL);
+}
+void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
+ struct vm_area_struct *start_vma, unsigned long start,
+ unsigned long end, unsigned long tree_end, bool mm_wr_locked);
struct mmu_notifier_range;
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
-int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
- struct vm_area_struct *vma);
-int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
- struct mmu_notifier_range *range,
- pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
+int
+copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+int follow_pte(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
@@ -1659,9 +2404,11 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int truncate_inode_page(struct address_space *mapping, struct page *page);
-int generic_error_remove_page(struct address_space *mapping, struct page *page);
-int invalidate_inode_page(struct page *page);
+int generic_error_remove_folio(struct address_space *mapping,
+ struct folio *folio);
+
+struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
+ unsigned long address, struct pt_regs *regs);
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
@@ -1702,31 +2449,57 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
+static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
+ unsigned long addr);
+
extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
-extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, unsigned int gup_flags);
long get_user_pages_remote(struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas, int *locked);
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ int *locked);
long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas, int *locked);
+ int *locked);
+
+/*
+ * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
+ */
+static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
+ unsigned long addr,
+ int gup_flags,
+ struct vm_area_struct **vmap)
+{
+ struct page *page;
+ struct vm_area_struct *vma;
+ int got;
+
+ if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
+ return ERR_PTR(-EINVAL);
+
+ got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
+
+ if (got < 0)
+ return ERR_PTR(got);
+
+ vma = vma_lookup(mm, addr);
+ if (WARN_ON_ONCE(!vma)) {
+ put_page(page);
+ return ERR_PTR(-EINVAL);
+ }
+
+ *vmap = vma;
+ return page;
+}
+
long get_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas);
+ unsigned int gup_flags, struct page **pages);
long pin_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas);
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages, int *locked);
-long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages, int *locked);
+ unsigned int gup_flags, struct page **pages);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
@@ -1736,88 +2509,25 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
+void folio_add_pin(struct folio *folio);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
struct task_struct *task, bool bypass_rlim);
-/* Container for pinned pfns / pages */
-struct frame_vector {
- unsigned int nr_allocated; /* Number of frames we have space for */
- unsigned int nr_frames; /* Number of frames stored in ptrs array */
- bool got_ref; /* Did we pin pages by getting page ref? */
- bool is_pfns; /* Does array contain pages or pfns? */
- void *ptrs[]; /* Array of pinned pfns / pages. Use
- * pfns_vector_pages() or pfns_vector_pfns()
- * for access */
-};
-
-struct frame_vector *frame_vector_create(unsigned int nr_frames);
-void frame_vector_destroy(struct frame_vector *vec);
-int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
- unsigned int gup_flags, struct frame_vector *vec);
-void put_vaddr_frames(struct frame_vector *vec);
-int frame_vector_to_pages(struct frame_vector *vec);
-void frame_vector_to_pfns(struct frame_vector *vec);
-
-static inline unsigned int frame_vector_count(struct frame_vector *vec)
-{
- return vec->nr_frames;
-}
-
-static inline struct page **frame_vector_pages(struct frame_vector *vec)
-{
- if (vec->is_pfns) {
- int err = frame_vector_to_pages(vec);
-
- if (err)
- return ERR_PTR(err);
- }
- return (struct page **)(vec->ptrs);
-}
-
-static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
-{
- if (!vec->is_pfns)
- frame_vector_to_pfns(vec);
- return (unsigned long *)(vec->ptrs);
-}
-
struct kvec;
-int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
- struct page **pages);
-int get_kernel_page(unsigned long start, int write, struct page **pages);
struct page *get_dump_page(unsigned long addr);
-extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
-extern void do_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-
-void __set_page_dirty(struct page *, struct address_space *, int warn);
-int __set_page_dirty_nobuffers(struct page *page);
-int __set_page_dirty_no_writeback(struct page *page);
-int redirty_page_for_writepage(struct writeback_control *wbc,
- struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping);
-void account_page_cleaned(struct page *page, struct address_space *mapping,
- struct bdi_writeback *wb);
-int set_page_dirty(struct page *page);
+bool folio_mark_dirty(struct folio *folio);
+bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
-void __cancel_dirty_page(struct page *page);
-static inline void cancel_dirty_page(struct page *page)
-{
- /* Avoid atomic ops, locking, etc. when not actually needed. */
- if (PageDirty(page))
- __cancel_dirty_page(page);
-}
-int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
- bool need_rmap_locks);
+ bool need_rmap_locks, bool for_stack);
/*
* Flags used by change_protection(). For now we make it a bitmap so
@@ -1825,8 +2535,12 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
* for now all the callers are only use one of the flags at the same
* time.
*/
-/* Whether we should allow dirty bit accounting */
-#define MM_CP_DIRTY_ACCT (1UL << 0)
+/*
+ * Whether we should manually check if we can map individual PTEs writable,
+ * because something (e.g., COW, uffd-wp) blocks that from happening for all
+ * PTEs automatically in a writable mapping.
+ */
+#define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0)
/* Whether this protection change is for NUMA hints */
#define MM_CP_PROT_NUMA (1UL << 1)
/* Whether this change is for write protecting */
@@ -1835,20 +2549,35 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
-extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgprot_t newprot,
- unsigned long cp_flags);
-extern int mprotect_fixup(struct vm_area_struct *vma,
- struct vm_area_struct **pprev, unsigned long start,
- unsigned long end, unsigned long newflags);
+bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
+int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
+static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
+{
+ /*
+ * We want to check manually if we can change individual PTEs writable
+ * if we can't do that automatically for all PTEs in a mapping. For
+ * private mappings, that's always the case when we have write
+ * permissions as we properly have to handle COW.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ return vma_wants_writenotify(vma, vma->vm_page_prot);
+ return !!(vma->vm_flags & VM_WRITE);
+
+}
+bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+extern long change_protection(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long cp_flags);
+extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
+ struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags);
/*
* doesn't attempt to fault and will return short.
*/
int get_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
-int pin_user_pages_fast_only(unsigned long start, int nr_pages,
- unsigned int gup_flags, struct page **pages);
static inline bool get_user_page_fast_only(unsigned long addr,
unsigned int gup_flags, struct page **pagep)
@@ -1860,55 +2589,45 @@ static inline bool get_user_page_fast_only(unsigned long addr,
*/
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
- long val = atomic_long_read(&mm->rss_stat.count[member]);
-
-#ifdef SPLIT_RSS_COUNTING
- /*
- * counter is updated in asynchronous manner and may go to minus.
- * But it's never be expected number for users.
- */
- if (val < 0)
- val = 0;
-#endif
- return (unsigned long)val;
+ return percpu_counter_read_positive(&mm->rss_stat[member]);
}
-void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
+void mm_trace_rss_stat(struct mm_struct *mm, int member);
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
- long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+ percpu_counter_add(&mm->rss_stat[member], value);
- mm_trace_rss_stat(mm, member, count);
+ mm_trace_rss_stat(mm, member);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
- long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+ percpu_counter_inc(&mm->rss_stat[member]);
- mm_trace_rss_stat(mm, member, count);
+ mm_trace_rss_stat(mm, member);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
- long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+ percpu_counter_dec(&mm->rss_stat[member]);
- mm_trace_rss_stat(mm, member, count);
+ mm_trace_rss_stat(mm, member);
}
-/* Optimized variant when page is already known not to be PageAnon */
-static inline int mm_counter_file(struct page *page)
+/* Optimized variant when folio is already known not to be anon */
+static inline int mm_counter_file(struct folio *folio)
{
- if (PageSwapBacked(page))
+ if (folio_test_swapbacked(folio))
return MM_SHMEMPAGES;
return MM_FILEPAGES;
}
-static inline int mm_counter(struct page *page)
+static inline int mm_counter(struct folio *folio)
{
- if (PageAnon(page))
+ if (folio_test_anon(folio))
return MM_ANONPAGES;
- return mm_counter_file(page);
+ return mm_counter_file(folio);
}
static inline unsigned long get_mm_rss(struct mm_struct *mm)
@@ -1956,14 +2675,6 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
*maxrss = hiwater_rss;
}
-#if defined(SPLIT_RSS_COUNTING)
-void sync_mm_rss(struct mm_struct *mm);
-#else
-static inline void sync_mm_rss(struct mm_struct *mm)
-{
-}
-#endif
-
#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline int pte_special(pte_t pte)
{
@@ -1983,8 +2694,6 @@ static inline int pte_devmap(pte_t pte)
}
#endif
-int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
-
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -2118,42 +2827,93 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU */
+static inline struct ptdesc *virt_to_ptdesc(const void *x)
+{
+ return page_ptdesc(virt_to_page(x));
+}
+
+static inline void *ptdesc_to_virt(const struct ptdesc *pt)
+{
+ return page_to_virt(ptdesc_page(pt));
+}
+
+static inline void *ptdesc_address(const struct ptdesc *pt)
+{
+ return folio_address(ptdesc_folio(pt));
+}
+
+static inline bool pagetable_is_reserved(struct ptdesc *pt)
+{
+ return folio_test_reserved(ptdesc_folio(pt));
+}
+
+/**
+ * pagetable_alloc - Allocate pagetables
+ * @gfp: GFP flags
+ * @order: desired pagetable order
+ *
+ * pagetable_alloc allocates memory for page tables as well as a page table
+ * descriptor to describe that memory.
+ *
+ * Return: The ptdesc describing the allocated page tables.
+ */
+static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order)
+{
+ struct page *page = alloc_pages(gfp | __GFP_COMP, order);
+
+ return page_ptdesc(page);
+}
+
+/**
+ * pagetable_free - Free pagetables
+ * @pt: The page table descriptor
+ *
+ * pagetable_free frees the memory of all page tables described by a page
+ * table descriptor and the memory for the descriptor itself.
+ */
+static inline void pagetable_free(struct ptdesc *pt)
+{
+ struct page *page = ptdesc_page(pt);
+
+ __free_pages(page, compound_order(page));
+}
+
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
-extern bool ptlock_alloc(struct page *page);
-extern void ptlock_free(struct page *page);
+bool ptlock_alloc(struct ptdesc *ptdesc);
+void ptlock_free(struct ptdesc *ptdesc);
-static inline spinlock_t *ptlock_ptr(struct page *page)
+static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
- return page->ptl;
+ return ptdesc->ptl;
}
#else /* ALLOC_SPLIT_PTLOCKS */
static inline void ptlock_cache_init(void)
{
}
-static inline bool ptlock_alloc(struct page *page)
+static inline bool ptlock_alloc(struct ptdesc *ptdesc)
{
return true;
}
-static inline void ptlock_free(struct page *page)
+static inline void ptlock_free(struct ptdesc *ptdesc)
{
}
-static inline spinlock_t *ptlock_ptr(struct page *page)
+static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
- return &page->ptl;
+ return &ptdesc->ptl;
}
#endif /* ALLOC_SPLIT_PTLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
- return ptlock_ptr(pmd_page(*pmd));
+ return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
}
-static inline bool ptlock_init(struct page *page)
+static inline bool ptlock_init(struct ptdesc *ptdesc)
{
/*
* prep_new_page() initialize page->private (and therefore page->ptl)
@@ -2162,10 +2922,10 @@ static inline bool ptlock_init(struct page *page)
* It can happen if arch try to use slab for page table allocation:
* slab code uses page->slab_cache, which share storage with page->ptl.
*/
- VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
- if (!ptlock_alloc(page))
+ VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
+ if (!ptlock_alloc(ptdesc))
return false;
- spin_lock_init(ptlock_ptr(page));
+ spin_lock_init(ptlock_ptr(ptdesc));
return true;
}
@@ -2178,40 +2938,49 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
return &mm->page_table_lock;
}
static inline void ptlock_cache_init(void) {}
-static inline bool ptlock_init(struct page *page) { return true; }
-static inline void ptlock_free(struct page *page) {}
+static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
+static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
-static inline void pgtable_init(void)
+static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
{
- ptlock_cache_init();
- pgtable_cache_init();
-}
+ struct folio *folio = ptdesc_folio(ptdesc);
-static inline bool pgtable_pte_page_ctor(struct page *page)
-{
- if (!ptlock_init(page))
+ if (!ptlock_init(ptdesc))
return false;
- __SetPageTable(page);
- inc_zone_page_state(page, NR_PAGETABLE);
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
return true;
}
-static inline void pgtable_pte_page_dtor(struct page *page)
+static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ ptlock_free(ptdesc);
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+}
+
+pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
+static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
+{
+ return __pte_offset_map(pmd, addr, NULL);
+}
+
+pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
+static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp)
{
- ptlock_free(page);
- __ClearPageTable(page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ pte_t *pte;
+
+ __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
+ return pte;
}
-#define pte_offset_map_lock(mm, pmd, address, ptlp) \
-({ \
- spinlock_t *__ptl = pte_lockptr(mm, pmd); \
- pte_t *__pte = pte_offset_map(pmd, address); \
- *(ptlp) = __ptl; \
- spin_lock(__ptl); \
- __pte; \
-})
+pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
#define pte_unmap_unlock(pte, ptl) do { \
spin_unlock(ptl); \
@@ -2233,34 +3002,39 @@ static inline void pgtable_pte_page_dtor(struct page *page)
#if USE_SPLIT_PMD_PTLOCKS
-static struct page *pmd_to_page(pmd_t *pmd)
+static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{
unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
return virt_to_page((void *)((unsigned long) pmd & mask));
}
+static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
+{
+ return page_ptdesc(pmd_pgtable_page(pmd));
+}
+
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
- return ptlock_ptr(pmd_to_page(pmd));
+ return ptlock_ptr(pmd_ptdesc(pmd));
}
-static inline bool pgtable_pmd_page_ctor(struct page *page)
+static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- page->pmd_huge_pte = NULL;
+ ptdesc->pmd_huge_pte = NULL;
#endif
- return ptlock_init(page);
+ return ptlock_init(ptdesc);
}
-static inline void pgtable_pmd_page_dtor(struct page *page)
+static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
+ VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
#endif
- ptlock_free(page);
+ ptlock_free(ptdesc);
}
-#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
+#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
#else
@@ -2269,8 +3043,8 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
return &mm->page_table_lock;
}
-static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
-static inline void pgtable_pmd_page_dtor(struct page *page) {}
+static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
+static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
@@ -2283,6 +3057,26 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
+static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ if (!pmd_ptlock_init(ptdesc))
+ return false;
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
+ return true;
+}
+
+static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ pmd_ptlock_free(ptdesc);
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+}
+
/*
* No scalability reason to split PUD locks yet, but follow the same pattern
* as the PMD locks to make it easier if we decide to. The VM should not be
@@ -2302,8 +3096,23 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
return ptl;
}
+static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
+}
+
+static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+}
+
extern void __init pagecache_init(void);
-extern void __init free_area_init_memoryless_node(int nid);
extern void free_initmem(void);
/*
@@ -2315,32 +3124,20 @@ extern void free_initmem(void);
extern unsigned long free_reserved_area(void *start, void *end,
int poison, const char *s);
-#ifdef CONFIG_HIGHMEM
-/*
- * Free a highmem page into the buddy system, adjusting totalhigh_pages
- * and totalram_pages.
- */
-extern void free_highmem_page(struct page *page);
-#endif
-
extern void adjust_managed_page_count(struct page *page, long count);
-extern void mem_init_print_info(const char *str);
-extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
+extern void reserve_bootmem_region(phys_addr_t start,
+ phys_addr_t end, int nid);
/* Free the reserved page into the buddy system, so it gets managed. */
-static inline void __free_reserved_page(struct page *page)
+static inline void free_reserved_page(struct page *page)
{
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
-}
-
-static inline void free_reserved_page(struct page *page)
-{
- __free_reserved_page(page);
adjust_managed_page_count(page, 1);
}
+#define free_highmem_page(page) free_reserved_page(page)
static inline void mark_page_reserved(struct page *page)
{
@@ -2348,6 +3145,11 @@ static inline void mark_page_reserved(struct page *page)
adjust_managed_page_count(page, -1);
}
+static inline void free_reserved_ptdesc(struct ptdesc *pt)
+{
+ free_reserved_page(ptdesc_page(pt));
+}
+
/*
* Default method to free all the __init memory into the buddy system.
* The freed pages will be poisoned with pattern "poison" if it's within
@@ -2359,7 +3161,7 @@ static inline unsigned long free_initmem_default(int poison)
extern char __init_begin[], __init_end[];
return free_reserved_area(&__init_begin, &__init_end,
- poison, "unused kernel");
+ poison, "unused kernel image (initmem)");
}
static inline unsigned long get_num_physpages(void)
@@ -2386,7 +3188,7 @@ static inline unsigned long get_num_physpages(void)
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * memblock_add_node(base, size, nid)
+ * memblock_add_node(base, size, nid, MEMBLOCK_NONE)
* free_area_init(max_zone_pfns);
*/
void free_area_init(unsigned long *max_zone_pfn);
@@ -2397,9 +3199,8 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
-extern unsigned long find_min_pfn_with_active_regions(void);
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
static inline int early_pfn_to_nid(unsigned long pfn)
{
return 0;
@@ -2407,19 +3208,17 @@ static inline int early_pfn_to_nid(unsigned long pfn)
#else
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
-/* there is a per-arch backend function. */
-extern int __meminit __early_pfn_to_nid(unsigned long pfn,
- struct mminit_pfnnid_cache *state);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
- enum memmap_context, struct vmem_altmap *);
-extern void setup_per_zone_wmarks(void);
-extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
-extern void show_mem(unsigned int flags, nodemask_t *nodemask);
+
+extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static inline void show_mem(void)
+{
+ __show_mem(0, NULL, MAX_NR_ZONES - 1);
+}
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
@@ -2432,12 +3231,6 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
-/* page_alloc.c */
-extern int min_free_kbytes;
-extern int watermark_boost_factor;
-extern int watermark_scale_factor;
-extern bool arch_has_descending_max_zone_pfns(void);
-
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
@@ -2478,31 +3271,78 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
- struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
-{
- return __vma_adjust(vma, start, end, pgoff, insert, NULL);
-}
-extern struct vm_area_struct *vma_merge(struct mm_struct *,
- struct vm_area_struct *prev, unsigned long addr, unsigned long end,
- unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *, struct vm_userfaultfd_ctx);
+extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff,
+ struct vm_area_struct *next);
+extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
-extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
- unsigned long addr, int new_below);
-extern int split_vma(struct mm_struct *, struct vm_area_struct *,
- unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
- struct rb_node **, struct rb_node *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
+struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long vm_flags,
+ struct mempolicy *policy,
+ struct vm_userfaultfd_ctx uffd_ctx,
+ struct anon_vma_name *anon_name);
+
+/* We are about to modify the VMA's flags. */
+static inline struct vm_area_struct
+*vma_modify_flags(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags)
+{
+ return vma_modify(vmi, prev, vma, start, end, new_flags,
+ vma_policy(vma), vma->vm_userfaultfd_ctx,
+ anon_vma_name(vma));
+}
+
+/* We are about to modify the VMA's flags and/or anon_name. */
+static inline struct vm_area_struct
+*vma_modify_flags_name(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long new_flags,
+ struct anon_vma_name *new_name)
+{
+ return vma_modify(vmi, prev, vma, start, end, new_flags,
+ vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
+}
+
+/* We are about to modify the VMA's memory policy. */
+static inline struct vm_area_struct
+*vma_modify_policy(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct mempolicy *new_pol)
+{
+ return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
+ new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
+}
+
+/* We are about to modify the VMA's flags and/or uffd context. */
+static inline struct vm_area_struct
+*vma_modify_flags_uffd(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags,
+ struct vm_userfaultfd_ctx new_ctx)
+{
+ return vma_modify(vmi, prev, vma, start, end, new_flags,
+ vma_policy(vma), new_ctx, anon_vma_name(vma));
+}
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
@@ -2521,7 +3361,8 @@ static inline int check_data_rlimit(unsigned long rlim,
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
-extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern struct file *get_task_exe_file(struct task_struct *task);
@@ -2540,6 +3381,7 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long flags, struct page **pages);
unsigned long randomize_stack_top(unsigned long stack_top);
+unsigned long randomize_page(unsigned long start, unsigned long range);
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
@@ -2548,14 +3390,19 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate, struct list_head *uf);
-extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
- struct list_head *uf, bool downgrade);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf);
+extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
-extern int do_madvise(unsigned long start, size_t len_in, int behavior);
+extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
+extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
@@ -2567,8 +3414,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len)
static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif
-/* These take the mm semaphore themselves */
-extern int __must_check vm_brk(unsigned long, unsigned long);
+/* This takes the mm semaphore itself */
extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
@@ -2595,52 +3441,63 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
-extern void filemap_map_pages(struct vm_fault *vmf,
+extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
-/* mm/page-writeback.c */
-int __must_check write_one_page(struct page *page);
-void task_dirty_inc(struct task_struct *tsk);
-
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
-extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
+struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
-extern int expand_downwards(struct vm_area_struct *vma,
- unsigned long address);
-#if VM_GROWSUP
-extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
-#else
- #define expand_upwards(vma, address) (0)
-#endif
+int expand_downwards(struct vm_area_struct *vma, unsigned long address);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
-/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
- NULL if none. Assume start_addr < end_addr. */
-static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+/*
+ * Look up the first VMA which intersects the interval [start_addr, end_addr)
+ * NULL if none. Assume start_addr < end_addr.
+ */
+struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr, unsigned long end_addr);
+
+/**
+ * vma_lookup() - Find a VMA at a specific address
+ * @mm: The process address space.
+ * @addr: The user address.
+ *
+ * Return: The vm_area_struct at the given address, %NULL otherwise.
+ */
+static inline
+struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
{
- struct vm_area_struct * vma = find_vma(mm,start_addr);
+ return mtree_load(&mm->mm_mt, addr);
+}
- if (vma && end_addr <= vma->vm_start)
- vma = NULL;
- return vma;
+static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_GROWSDOWN)
+ return stack_guard_gap;
+
+ /* See reasoning around the VM_SHADOW_STACK definition */
+ if (vma->vm_flags & VM_SHADOW_STACK)
+ return PAGE_SIZE;
+
+ return 0;
}
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
{
+ unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
- if (vma->vm_flags & VM_GROWSDOWN) {
- vm_start -= stack_guard_gap;
- if (vm_start > vma->vm_start)
- vm_start = 0;
- }
+ vm_start -= gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
return vm_start;
}
@@ -2665,7 +3522,7 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
{
- struct vm_area_struct *vma = find_vma(mm, vm_start);
+ struct vm_area_struct *vma = vma_lookup(mm, vm_start);
if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
vma = NULL;
@@ -2693,14 +3550,19 @@ static inline void vma_set_page_prot(struct vm_area_struct *vma)
}
#endif
+void vma_set_file(struct vm_area_struct *vma, struct file *file);
+
#ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#endif
-struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
+ unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
+int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num);
@@ -2714,8 +3576,6 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
-vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
@@ -2733,94 +3593,45 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
+#ifndef io_remap_pfn_range
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
+}
+#endif
+
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
return VM_FAULT_OOM;
+ else if (err == -EHWPOISON)
+ return VM_FAULT_HWPOISON;
+ return VM_FAULT_SIGBUS;
+}
+
+/*
+ * Convert errno to return value for ->page_mkwrite() calls.
+ *
+ * This should eventually be merged with vmf_error() above, but will need a
+ * careful audit of all vmf_error() callers.
+ */
+static inline vm_fault_t vmf_fs_error(int err)
+{
+ if (err == 0)
+ return VM_FAULT_LOCKED;
+ if (err == -EFAULT || err == -EAGAIN)
+ return VM_FAULT_NOPAGE;
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ /* -ENOSPC, -EDQUOT, -EIO ... */
return VM_FAULT_SIGBUS;
}
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags);
-#define FOLL_WRITE 0x01 /* check pte is writable */
-#define FOLL_TOUCH 0x02 /* mark page accessed */
-#define FOLL_GET 0x04 /* do get_page on page */
-#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
-#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
-#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
- * and return without waiting upon it */
-#define FOLL_POPULATE 0x40 /* fault in page */
-#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
-#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
-#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
-#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
-#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
-#define FOLL_MLOCK 0x1000 /* lock present pages */
-#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
-#define FOLL_COW 0x4000 /* internal GUP flag */
-#define FOLL_ANON 0x8000 /* don't do file mappings */
-#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
-#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
-#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
-#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */
-
-/*
- * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
- * other. Here is what they mean, and how to use them:
- *
- * FOLL_LONGTERM indicates that the page will be held for an indefinite time
- * period _often_ under userspace control. This is in contrast to
- * iov_iter_get_pages(), whose usages are transient.
- *
- * FIXME: For pages which are part of a filesystem, mappings are subject to the
- * lifetime enforced by the filesystem and we need guarantees that longterm
- * users like RDMA and V4L2 only establish mappings which coordinate usage with
- * the filesystem. Ideas for this coordination include revoking the longterm
- * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
- * added after the problem with filesystems was found FS DAX VMAs are
- * specifically failed. Filesystem pages are still subject to bugs and use of
- * FOLL_LONGTERM should be avoided on those pages.
- *
- * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call.
- * Currently only get_user_pages() and get_user_pages_fast() support this flag
- * and calls to get_user_pages_[un]locked are specifically not allowed. This
- * is due to an incompatibility with the FS DAX check and
- * FAULT_FLAG_ALLOW_RETRY.
- *
- * In the CMA case: long term pins in a CMA region would unnecessarily fragment
- * that region. And so, CMA attempts to migrate the page before pinning, when
- * FOLL_LONGTERM is specified.
- *
- * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
- * but an additional pin counting system) will be invoked. This is intended for
- * anything that gets a page reference and then touches page data (for example,
- * Direct IO). This lets the filesystem know that some non-file-system entity is
- * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
- * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
- * a call to unpin_user_page().
- *
- * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
- * and separate refcounting mechanisms, however, and that means that each has
- * its own acquire and release mechanisms:
- *
- * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
- *
- * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
- *
- * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
- * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
- * calls applied to them, and that's perfectly OK. This is a constraint on the
- * callers, not on the pages.)
- *
- * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
- * directly by the caller. That's in order to help avoid mismatches when
- * releasing pages: get_user_pages*() pages must be released via put_page(),
- * while pin_user_pages*() pages must be released via unpin_user_page().
- *
- * Please see Documentation/core-api/pin_user_pages.rst for more information.
- */
-
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
@@ -2832,6 +3643,30 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
return 0;
}
+/*
+ * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
+ * a (NUMA hinting) fault is required.
+ */
+static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
+ unsigned int flags)
+{
+ /*
+ * If callers don't want to honor NUMA hinting faults, no need to
+ * determine if we would actually have to trigger a NUMA hinting fault.
+ */
+ if (!(flags & FOLL_HONOR_NUMA_FAULT))
+ return true;
+
+ /*
+ * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
+ *
+ * Requiring a fault here even for inaccessible VMAs would mean that
+ * FOLL_FORCE cannot make any progress, because handle_mm_fault()
+ * refuses to process NUMA hinting faults in inaccessible VMAs.
+ */
+ return !vma_is_accessible(vma);
+}
+
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
@@ -2840,43 +3675,56 @@ extern int apply_to_existing_page_range(struct mm_struct *mm,
pte_fn_t fn, void *data);
#ifdef CONFIG_PAGE_POISONING
-extern bool page_poisoning_enabled(void);
-extern void kernel_poison_pages(struct page *page, int numpages, int enable);
+extern void __kernel_poison_pages(struct page *page, int numpages);
+extern void __kernel_unpoison_pages(struct page *page, int numpages);
+extern bool _page_poisoning_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
+static inline bool page_poisoning_enabled(void)
+{
+ return _page_poisoning_enabled_early;
+}
+/*
+ * For use in fast paths after init_mem_debugging() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool page_poisoning_enabled_static(void)
+{
+ return static_branch_unlikely(&_page_poisoning_enabled);
+}
+static inline void kernel_poison_pages(struct page *page, int numpages)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_poison_pages(page, numpages);
+}
+static inline void kernel_unpoison_pages(struct page *page, int numpages)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_unpoison_pages(page, numpages);
+}
#else
static inline bool page_poisoning_enabled(void) { return false; }
-static inline void kernel_poison_pages(struct page *page, int numpages,
- int enable) { }
+static inline bool page_poisoning_enabled_static(void) { return false; }
+static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
+static inline void kernel_poison_pages(struct page *page, int numpages) { }
+static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
#endif
-#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
-DECLARE_STATIC_KEY_TRUE(init_on_alloc);
-#else
-DECLARE_STATIC_KEY_FALSE(init_on_alloc);
-#endif
+DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
static inline bool want_init_on_alloc(gfp_t flags)
{
- if (static_branch_unlikely(&init_on_alloc) &&
- !page_poisoning_enabled())
+ if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+ &init_on_alloc))
return true;
return flags & __GFP_ZERO;
}
-#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
-DECLARE_STATIC_KEY_TRUE(init_on_free);
-#else
-DECLARE_STATIC_KEY_FALSE(init_on_free);
-#endif
+DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
static inline bool want_init_on_free(void)
{
- return static_branch_unlikely(&init_on_free) &&
- !page_poisoning_enabled();
+ return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
+ &init_on_free);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern void init_debug_pagealloc(void);
-#else
-static inline void init_debug_pagealloc(void) {}
-#endif
extern bool _debug_pagealloc_enabled_early;
DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
@@ -2887,8 +3735,8 @@ static inline bool debug_pagealloc_enabled(void)
}
/*
- * For use in fast paths after init_debug_pagealloc() has run, or when a
- * false negative result is not harmful when called too early.
+ * For use in fast paths after mem_debugging_and_hardening_init() has run,
+ * or when a false negative result is not harmful when called too early.
*/
static inline bool debug_pagealloc_enabled_static(void)
{
@@ -2898,28 +3746,76 @@ static inline bool debug_pagealloc_enabled_static(void)
return static_branch_unlikely(&_debug_pagealloc_enabled);
}
-#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
-extern void __kernel_map_pages(struct page *page, int numpages, int enable);
-
/*
- * When called in DEBUG_PAGEALLOC context, the call should most likely be
- * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
+ * To support DEBUG_PAGEALLOC architecture must ensure that
+ * __kernel_map_pages() never fails
*/
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable)
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
+{
+ if (debug_pagealloc_enabled_static())
+ __kernel_map_pages(page, numpages, 1);
+}
+
+static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
{
- __kernel_map_pages(page, numpages, enable);
+ if (debug_pagealloc_enabled_static())
+ __kernel_map_pages(page, numpages, 0);
}
-#ifdef CONFIG_HIBERNATION
-extern bool kernel_page_present(struct page *page);
-#endif /* CONFIG_HIBERNATION */
-#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable) {}
-#ifdef CONFIG_HIBERNATION
-static inline bool kernel_page_present(struct page *page) { return true; }
-#endif /* CONFIG_HIBERNATION */
-#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
+
+extern unsigned int _debug_guardpage_minorder;
+DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+ return _debug_guardpage_minorder;
+}
+
+static inline bool debug_guardpage_enabled(void)
+{
+ return static_branch_unlikely(&_debug_guardpage_enabled);
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+ if (!debug_guardpage_enabled())
+ return false;
+
+ return PageGuard(page);
+}
+
+bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
+ int migratetype);
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
+{
+ if (!debug_guardpage_enabled())
+ return false;
+ return __set_page_guard(zone, page, order, migratetype);
+}
+
+void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
+ int migratetype);
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
+{
+ if (!debug_guardpage_enabled())
+ return;
+ __clear_page_guard(zone, page, order, migratetype);
+}
+
+#else /* CONFIG_DEBUG_PAGEALLOC */
+static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
+static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool debug_guardpage_enabled(void) { return false; }
+static inline bool page_is_guard(struct page *page) { return false; }
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) { return false; }
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) {}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
@@ -2946,7 +3842,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
#endif
void drop_slab(void);
-void drop_slab_node(int nid);
#ifndef CONFIG_MMU
#define randomize_va_space 0
@@ -2965,20 +3860,29 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
void *sparse_buffer_alloc(unsigned long size);
struct page * __populate_section_memmap(unsigned long pfn,
- unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
+ unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
+void pmd_init(void *addr);
+void pud_init(void *addr);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
- struct vmem_altmap *altmap);
+ struct vmem_altmap *altmap, struct page *reuse);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
void *vmemmap_alloc_block_buf(unsigned long size, int node,
struct vmem_altmap *altmap);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
+void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+ unsigned long addr, unsigned long next);
+int vmemmap_check_pmd(pmd_t *pmd, int node,
+ unsigned long addr, unsigned long next);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
+int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
+ int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
void vmemmap_populate_print_last(void);
@@ -2986,6 +3890,67 @@ void vmemmap_populate_print_last(void);
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ /* number of pfns from base where pfn_to_page() is valid */
+ if (altmap)
+ return altmap->reserve + altmap->free;
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+ altmap->alloc -= nr_pfns;
+}
+#else
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+}
+#endif
+
+#define VMEMMAP_RESERVE_NR 2
+#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
+static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ unsigned long nr_pages;
+ unsigned long nr_vmemmap_pages;
+
+ if (!pgmap || !is_power_of_2(sizeof(struct page)))
+ return false;
+
+ nr_pages = pgmap_vmemmap_nr(pgmap);
+ nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
+ /*
+ * For vmemmap optimization with DAX we need minimum 2 vmemmap
+ * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
+ */
+ return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
+}
+/*
+ * If we don't have an architecture override, use the generic rule
+ */
+#ifndef vmemmap_can_optimize
+#define vmemmap_can_optimize __vmemmap_can_optimize
+#endif
+
+#else
+static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ return false;
+}
+#endif
+
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
unsigned long nr_pages);
@@ -2994,19 +3959,82 @@ enum mf_flags {
MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
+ MF_UNPOISON = 1 << 4,
+ MF_SW_SIMULATED = 1 << 5,
+ MF_NO_RETRY = 1 << 6,
+ MF_MEM_PRE_REMOVE = 1 << 7,
};
+int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+ unsigned long count, int mf_flags);
extern int memory_failure(unsigned long pfn, int flags);
-extern void memory_failure_queue(unsigned long pfn, int flags);
extern void memory_failure_queue_kick(int cpu);
extern int unpoison_memory(unsigned long pfn);
-extern int get_hwpoison_page(struct page *page);
-#define put_hwpoison_page(page) put_page(page)
-extern int sysctl_memory_failure_early_kill;
-extern int sysctl_memory_failure_recovery;
-extern void shake_page(struct page *p, int access);
+extern void shake_page(struct page *p);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(unsigned long pfn, int flags);
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Sysfs entries for memory failure handling statistics.
+ */
+extern const struct attribute_group memory_failure_attr_group;
+extern void memory_failure_queue(unsigned long pfn, int flags);
+extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared);
+void num_poisoned_pages_inc(unsigned long pfn);
+void num_poisoned_pages_sub(unsigned long pfn, long i);
+struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
+#else
+static inline void memory_failure_queue(unsigned long pfn, int flags)
+{
+}
+
+static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared)
+{
+ return 0;
+}
+
+static inline void num_poisoned_pages_inc(unsigned long pfn)
+{
+}
+
+static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
+{
+}
+#endif
+
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_KSM)
+void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
+ struct vm_area_struct *vma, struct list_head *to_kill,
+ unsigned long ksm_addr);
+#endif
+
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
+extern void memblk_nr_poison_inc(unsigned long pfn);
+extern void memblk_nr_poison_sub(unsigned long pfn, long i);
+#else
+static inline void memblk_nr_poison_inc(unsigned long pfn)
+{
+}
+static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
+{
+}
+#endif
+
+#ifndef arch_memory_failure
+static inline int arch_memory_failure(unsigned long pfn, int flags)
+{
+ return -ENXIO;
+}
+#endif
+
+#ifndef arch_is_platform_page
+static inline bool arch_is_platform_page(u64 paddr)
+{
+ return false;
+}
+#endif
/*
* Error handlers for various types of pages.
@@ -3023,10 +4051,8 @@ enum mf_action_page_type {
MF_MSG_KERNEL_HIGH_ORDER,
MF_MSG_SLAB,
MF_MSG_DIFFERENT_COMPOUND,
- MF_MSG_POISONED_HUGE,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
- MF_MSG_NON_PMD_HUGE,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,
@@ -3038,8 +4064,8 @@ enum mf_action_page_type {
MF_MSG_CLEAN_LRU,
MF_MSG_TRUNCATED_LRU,
MF_MSG_BUDDY,
- MF_MSG_BUDDY_2ND,
MF_MSG_DAX,
+ MF_MSG_UNSPLIT_THP,
MF_MSG_UNKNOWN,
};
@@ -3047,14 +4073,12 @@ enum mf_action_page_type {
extern void clear_huge_page(struct page *page,
unsigned long addr_hint,
unsigned int pages_per_huge_page);
-extern void copy_user_huge_page(struct page *dst, struct page *src,
- unsigned long addr_hint,
- struct vm_area_struct *vma,
- unsigned int pages_per_huge_page);
-extern long copy_huge_page_from_user(struct page *dst_page,
- const void __user *usr_src,
- unsigned int pages_per_huge_page,
- bool allow_pagefault);
+int copy_user_large_folio(struct folio *dst, struct folio *src,
+ unsigned long addr_hint,
+ struct vm_area_struct *vma);
+long copy_folio_from_user(struct folio *dst_folio,
+ const void __user *usr_src,
+ bool allow_pagefault);
/**
* vma_is_special_huge - Are transhuge page-table entries considered special?
@@ -3074,33 +4098,6 @@ static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern unsigned int _debug_guardpage_minorder;
-DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
-
-static inline unsigned int debug_guardpage_minorder(void)
-{
- return _debug_guardpage_minorder;
-}
-
-static inline bool debug_guardpage_enabled(void)
-{
- return static_branch_unlikely(&_debug_guardpage_enabled);
-}
-
-static inline bool page_is_guard(struct page *page)
-{
- if (!debug_guardpage_enabled())
- return false;
-
- return PageGuard(page);
-}
-#else
-static inline unsigned int debug_guardpage_minorder(void) { return 0; }
-static inline bool debug_guardpage_enabled(void) { return false; }
-static inline bool page_is_guard(struct page *page) { return false; }
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
#if MAX_NUMNODES > 1
void __init setup_nr_node_ids(void);
#else
@@ -3128,5 +4125,81 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
extern int sysctl_nr_trim_pages;
-#endif /* __KERNEL__ */
+#ifdef CONFIG_PRINTK
+void mem_dump_obj(void *object);
+#else
+static inline void mem_dump_obj(void *object) {}
+#endif
+
+/**
+ * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
+ * handle them.
+ * @seals: the seals to check
+ * @vma: the vma to operate on
+ *
+ * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper
+ * check/handling on the vma flags. Return 0 if check pass, or <0 for errors.
+ */
+static inline int seal_check_write(int seals, struct vm_area_struct *vma)
+{
+ if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
+ /*
+ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+ * write seals are active.
+ */
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+ return -EPERM;
+
+ /*
+ * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
+ * MAP_SHARED and read-only, take care to not allow mprotect to
+ * revert protections on such mappings. Do this only for shared
+ * mappings. For private mappings, don't need to mask
+ * VM_MAYWRITE as we still want them to be COW-writable.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ vm_flags_clear(vma, VM_MAYWRITE);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ANON_VMA_NAME
+int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
+ unsigned long len_in,
+ struct anon_vma_name *anon_name);
+#else
+static inline int
+madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
+ unsigned long len_in, struct anon_vma_name *anon_name) {
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_UNACCEPTED_MEMORY
+
+bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end);
+void accept_memory(phys_addr_t start, phys_addr_t end);
+
+#else
+
+static inline bool range_contains_unaccepted_memory(phys_addr_t start,
+ phys_addr_t end)
+{
+ return false;
+}
+
+static inline void accept_memory(phys_addr_t start, phys_addr_t end)
+{
+}
+
+#endif
+
+static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
+{
+ phys_addr_t paddr = pfn << PAGE_SHIFT;
+
+ return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
+}
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_api.h b/include/linux/mm_api.h
new file mode 100644
index 000000000000..a5ace2b198b8
--- /dev/null
+++ b/include/linux/mm_api.h
@@ -0,0 +1 @@
+#include <linux/mm.h>
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8fc71e9d7bb0..f4fe593c1400 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -2,34 +2,48 @@
#ifndef LINUX_MM_INLINE_H
#define LINUX_MM_INLINE_H
+#include <linux/atomic.h>
#include <linux/huge_mm.h>
+#include <linux/mm_types.h>
#include <linux/swap.h>
+#include <linux/string.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/swapops.h>
/**
- * page_is_file_lru - should the page be on a file LRU or anon LRU?
- * @page: the page to test
- *
- * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
- * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal
- * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by
- * functions that manipulate the LRU lists, to sort a page onto the right LRU
- * list.
+ * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
+ * @folio: The folio to test.
*
* We would like to get this info without a page flag, but the state
- * needs to survive until the page is last deleted from the LRU, which
+ * needs to survive until the folio is last deleted from the LRU, which
* could be as far down as __page_cache_release.
+ *
+ * Return: An integer (not a boolean!) used to sort a folio onto the
+ * right LRU list and to account folios correctly.
+ * 1 if @folio is a regular filesystem backed page cache folio
+ * or a lazily freed anonymous folio (e.g. via MADV_FREE).
+ * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
+ * ram or swap backed folio.
*/
+static inline int folio_is_file_lru(struct folio *folio)
+{
+ return !folio_test_swapbacked(folio);
+}
+
static inline int page_is_file_lru(struct page *page)
{
- return !PageSwapBacked(page);
+ return folio_is_file_lru(page_folio(page));
}
static __always_inline void __update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
- int nr_pages)
+ long nr_pages)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ lockdep_assert_held(&lruvec->lru_lock);
+ WARN_ON_ONCE(nr_pages != (int)nr_pages);
+
__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
@@ -37,7 +51,7 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
static __always_inline void update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
- int nr_pages)
+ long nr_pages)
{
__update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
@@ -45,84 +59,535 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
#endif
}
-static __always_inline void add_page_to_lru_list(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
+/**
+ * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
+ * @folio: The folio that was on lru and now has a zero reference.
+ */
+static __always_inline void __folio_clear_lru_flags(struct folio *folio)
{
- update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
- list_add(&page->lru, &lruvec->lists[lru]);
+ VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
+
+ __folio_clear_lru(folio);
+
+ /* this shouldn't happen, so leave the flags to bad_page() */
+ if (folio_test_active(folio) && folio_test_unevictable(folio))
+ return;
+
+ __folio_clear_active(folio);
+ __folio_clear_unevictable(folio);
}
-static __always_inline void add_page_to_lru_list_tail(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
+/**
+ * folio_lru_list - Which LRU list should a folio be on?
+ * @folio: The folio to test.
+ *
+ * Return: The LRU list a folio should be on, as an index
+ * into the array of LRU lists.
+ */
+static __always_inline enum lru_list folio_lru_list(struct folio *folio)
{
- update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
- list_add_tail(&page->lru, &lruvec->lists[lru]);
+ enum lru_list lru;
+
+ VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
+
+ if (folio_test_unevictable(folio))
+ return LRU_UNEVICTABLE;
+
+ lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
+ if (folio_test_active(folio))
+ lru += LRU_ACTIVE;
+
+ return lru;
}
-static __always_inline void del_page_from_lru_list(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
+#ifdef CONFIG_LRU_GEN
+
+#ifdef CONFIG_LRU_GEN_ENABLED
+static inline bool lru_gen_enabled(void)
{
- list_del(&page->lru);
- update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
+ DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
}
+#else
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
-/**
- * page_lru_base_type - which LRU list type should a page be on?
- * @page: the page to test
- *
- * Used for LRU list index arithmetic.
- *
- * Returns the base LRU type - file or anon - @page should be on.
- */
-static inline enum lru_list page_lru_base_type(struct page *page)
+ return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#endif
+
+static inline bool lru_gen_in_fault(void)
{
- if (page_is_file_lru(page))
- return LRU_INACTIVE_FILE;
- return LRU_INACTIVE_ANON;
+ return current->in_lru_fault;
}
-/**
- * page_off_lru - which LRU list was page on? clearing its lru flags.
- * @page: the page to test
- *
- * Returns the LRU list a page was on, as an index into the array of LRU
- * lists; and clears its Unevictable or Active flags, ready for freeing.
- */
-static __always_inline enum lru_list page_off_lru(struct page *page)
+static inline int lru_gen_from_seq(unsigned long seq)
{
- enum lru_list lru;
+ return seq % MAX_NR_GENS;
+}
+
+static inline int lru_hist_from_seq(unsigned long seq)
+{
+ return seq % NR_HIST_GENS;
+}
+
+static inline int lru_tier_from_refs(int refs)
+{
+ VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
+
+ /* see the comment in folio_lru_refs() */
+ return order_base_2(refs + 1);
+}
+
+static inline int folio_lru_refs(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+ bool workingset = flags & BIT(PG_workingset);
- if (PageUnevictable(page)) {
- __ClearPageUnevictable(page);
- lru = LRU_UNEVICTABLE;
- } else {
- lru = page_lru_base_type(page);
- if (PageActive(page)) {
- __ClearPageActive(page);
+ /*
+ * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
+ * total number of accesses is N>1, since N=0,1 both map to the first
+ * tier. lru_tier_from_refs() will account for this off-by-one. Also see
+ * the comment on MAX_NR_TIERS.
+ */
+ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
+}
+
+static inline int folio_lru_gen(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+
+ return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+}
+
+static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+{
+ unsigned long max_seq = lruvec->lrugen.max_seq;
+
+ VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
+
+ /* see the comment on MIN_NR_GENS */
+ return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
+}
+
+static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
+ int old_gen, int new_gen)
+{
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ int delta = folio_nr_pages(folio);
+ enum lru_list lru = type * LRU_INACTIVE_FILE;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
+
+ if (old_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
+ lrugen->nr_pages[old_gen][type][zone] - delta);
+ if (new_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
+ lrugen->nr_pages[new_gen][type][zone] + delta);
+
+ /* addition */
+ if (old_gen < 0) {
+ if (lru_gen_is_active(lruvec, new_gen))
lru += LRU_ACTIVE;
- }
+ __update_lru_size(lruvec, lru, zone, delta);
+ return;
}
- return lru;
+
+ /* deletion */
+ if (new_gen < 0) {
+ if (lru_gen_is_active(lruvec, old_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, -delta);
+ return;
+ }
+
+ /* promotion */
+ if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
+ __update_lru_size(lruvec, lru, zone, -delta);
+ __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
+ }
+
+ /* demotion requires isolation, e.g., lru_deactivate_fn() */
+ VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
}
-/**
- * page_lru - which LRU list should a page be on?
- * @page: the page to test
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long seq;
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
+
+ if (folio_test_unevictable(folio) || !lrugen->enabled)
+ return false;
+ /*
+ * There are four common cases for this page:
+ * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
+ * generation, and it's protected over the rest below.
+ * 2. If it can't be evicted immediately, i.e., a dirty page pending
+ * writeback, add it to the second youngest generation.
+ * 3. If it should be evicted first, e.g., cold and clean from
+ * folio_rotate_reclaimable(), add it to the oldest generation.
+ * 4. Everything else falls between 2 & 3 above and is added to the
+ * second oldest generation if it's considered inactive, or the
+ * oldest generation otherwise. See lru_gen_is_active().
+ */
+ if (folio_test_active(folio))
+ seq = lrugen->max_seq;
+ else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+ seq = lrugen->max_seq - 1;
+ else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
+ seq = lrugen->min_seq[type];
+ else
+ seq = lrugen->min_seq[type] + 1;
+
+ gen = lru_gen_from_seq(seq);
+ flags = (gen + 1UL) << LRU_GEN_PGOFF;
+ /* see the comment on MIN_NR_GENS about PG_active */
+ set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
+
+ lru_gen_update_size(lruvec, folio, -1, gen);
+ /* for folio_rotate_reclaimable() */
+ if (reclaiming)
+ list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ else
+ list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
+
+ return true;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+
+ if (gen < 0)
+ return false;
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+
+ /* for folio_migrate_flags() */
+ flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
+ flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
+ gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+
+ lru_gen_update_size(lruvec, folio, gen, -1);
+ list_del(&folio->lru);
+
+ return true;
+}
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline bool lru_gen_enabled(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_in_fault(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+#endif /* CONFIG_LRU_GEN */
+
+static __always_inline
+void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_add_folio(lruvec, folio, false))
+ return;
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ if (lru != LRU_UNEVICTABLE)
+ list_add(&folio->lru, &lruvec->lists[lru]);
+}
+
+static __always_inline
+void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_add_folio(lruvec, folio, true))
+ return;
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ /* This is not expected to be used on LRU_UNEVICTABLE */
+ list_add_tail(&folio->lru, &lruvec->lists[lru]);
+}
+
+static __always_inline
+void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_del_folio(lruvec, folio, false))
+ return;
+
+ if (lru != LRU_UNEVICTABLE)
+ list_del(&folio->lru);
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ -folio_nr_pages(folio));
+}
+
+#ifdef CONFIG_ANON_VMA_NAME
+/* mmap_lock should be read-locked */
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
+{
+ if (anon_name)
+ kref_get(&anon_name->kref);
+}
+
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
+{
+ if (anon_name)
+ kref_put(&anon_name->kref, anon_vma_name_free);
+}
+
+static inline
+struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
+{
+ /* Prevent anon_name refcount saturation early on */
+ if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
+ anon_vma_name_get(anon_name);
+ return anon_name;
+
+ }
+ return anon_vma_name_alloc(anon_name->name);
+}
+
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma)
+{
+ struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
+
+ if (anon_name)
+ new_vma->anon_name = anon_vma_name_reuse(anon_name);
+}
+
+static inline void free_anon_vma_name(struct vm_area_struct *vma)
+{
+ /*
+ * Not using anon_vma_name because it generates a warning if mmap_lock
+ * is not held, which might be the case here.
+ */
+ anon_vma_name_put(vma->anon_name);
+}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ if (anon_name1 == anon_name2)
+ return true;
+
+ return anon_name1 && anon_name2 &&
+ !strcmp(anon_name1->name, anon_name2->name);
+}
+
+#else /* CONFIG_ANON_VMA_NAME */
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma) {}
+static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ return true;
+}
+
+#endif /* CONFIG_ANON_VMA_NAME */
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_set(&mm->tlb_flush_pending, 0);
+}
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_inc(&mm->tlb_flush_pending);
+ /*
+ * The only time this value is relevant is when there are indeed pages
+ * to flush. And we'll only flush pages after changing them, which
+ * requires the PTL.
+ *
+ * So the ordering here is:
+ *
+ * atomic_inc(&mm->tlb_flush_pending);
+ * spin_lock(&ptl);
+ * ...
+ * set_pte_at();
+ * spin_unlock(&ptl);
+ *
+ * spin_lock(&ptl)
+ * mm_tlb_flush_pending();
+ * ....
+ * spin_unlock(&ptl);
+ *
+ * flush_tlb_range();
+ * atomic_dec(&mm->tlb_flush_pending);
+ *
+ * Where the increment if constrained by the PTL unlock, it thus
+ * ensures that the increment is visible if the PTE modification is
+ * visible. After all, if there is no PTE modification, nobody cares
+ * about TLB flushes either.
+ *
+ * This very much relies on users (mm_tlb_flush_pending() and
+ * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
+ * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
+ * locks (PPC) the unlock of one doesn't order against the lock of
+ * another PTL.
+ *
+ * The decrement is ordered by the flush_tlb_range(), such that
+ * mm_tlb_flush_pending() will not return false unless all flushes have
+ * completed.
+ */
+}
+
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * See inc_tlb_flush_pending().
+ *
+ * This cannot be smp_mb__before_atomic() because smp_mb() simply does
+ * not order against TLB invalidate completion, which is what we need.
+ *
+ * Therefore we must rely on tlb_flush_*() to guarantee order.
+ */
+ atomic_dec(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * Must be called after having acquired the PTL; orders against that
+ * PTLs release and therefore ensures that if we observe the modified
+ * PTE we must also observe the increment from inc_tlb_flush_pending().
+ *
+ * That is, it only guarantees to return true if there is a flush
+ * pending for _this_ PTL.
+ */
+ return atomic_read(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+ /*
+ * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
+ * for which there is a TLB flush pending in order to guarantee
+ * we've seen both that PTE modification and the increment.
+ *
+ * (no requirement on actually still holding the PTL, that is irrelevant)
+ */
+ return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+#ifdef CONFIG_MMU
+/*
+ * Computes the pte marker to copy from the given source entry into dst_vma.
+ * If no marker should be copied, returns 0.
+ * The caller should insert a new pte created with make_pte_marker().
+ */
+static inline pte_marker copy_pte_marker(
+ swp_entry_t entry, struct vm_area_struct *dst_vma)
+{
+ pte_marker srcm = pte_marker_get(entry);
+ /* Always copy error entries. */
+ pte_marker dstm = srcm & PTE_MARKER_POISONED;
+
+ /* Only copy PTE markers if UFFD register matches. */
+ if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
+ dstm |= PTE_MARKER_UFFD_WP;
+
+ return dstm;
+}
+#endif
+
+/*
+ * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
+ * replace a none pte. NOTE! This should only be called when *pte is already
+ * cleared so we will never accidentally replace something valuable. Meanwhile
+ * none pte also means we are not demoting the pte so tlb flushed is not needed.
+ * E.g., when pte cleared the caller should have taken care of the tlb flush.
*
- * Returns the LRU list a page should be on, as an index
- * into the array of LRU lists.
+ * Must be called with pgtable lock held so that no thread will see the none
+ * pte, and if they see it, they'll fault and serialize at the pgtable lock.
+ *
+ * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
*/
-static __always_inline enum lru_list page_lru(struct page *page)
+static inline void
+pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *pte, pte_t pteval)
{
- enum lru_list lru;
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
+ bool arm_uffd_pte = false;
- if (PageUnevictable(page))
- lru = LRU_UNEVICTABLE;
- else {
- lru = page_lru_base_type(page);
- if (PageActive(page))
- lru += LRU_ACTIVE;
- }
- return lru;
+ /* The current status of the pte should be "cleared" before calling */
+ WARN_ON_ONCE(!pte_none(ptep_get(pte)));
+
+ /*
+ * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
+ * thing, because when zapping either it means it's dropping the
+ * page, or in TTU where the present pte will be quickly replaced
+ * with a swap pte. There's no way of leaking the bit.
+ */
+ if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
+ return;
+
+ /* A uffd-wp wr-protected normal pte */
+ if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
+ arm_uffd_pte = true;
+
+ /*
+ * A uffd-wp wr-protected swap pte. Note: this should even cover an
+ * existing pte marker with uffd-wp bit set.
+ */
+ if (unlikely(pte_swp_uffd_wp_any(pteval)))
+ arm_uffd_pte = true;
+
+ if (unlikely(arm_uffd_pte))
+ set_pte_at(vma->vm_mm, addr, pte,
+ make_pte_marker(PTE_MARKER_UFFD_WP));
+#endif
}
+
+static inline bool vma_has_recency(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
+ return false;
+
+ if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
+ return false;
+
+ return true;
+}
+
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 496c3ff97cce..5240bd7bca33 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -5,15 +5,20 @@
#include <linux/mm_types_task.h>
#include <linux/auxvec.h>
+#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
+#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/uprobes.h>
+#include <linux/rcupdate.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
+#include <linux/seqlock.h>
+#include <linux/percpu_counter.h>
#include <asm/mmu.h>
@@ -22,6 +27,7 @@
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
+#define INIT_PASID 0
struct address_space;
struct mem_cgroup;
@@ -53,16 +59,16 @@ struct mem_cgroup;
* in each subpage, but you may need to restore some of their values
* afterwards.
*
- * SLUB uses cmpxchg_double() to atomically update its freelist and
- * counters. That requires that freelist & counters be adjacent and
- * double-word aligned. We align all struct pages to double-word
- * boundaries, and ensure that 'freelist' is aligned within the
- * struct.
+ * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
+ * That requires that freelist & counters in struct slab be adjacent and
+ * double-word aligned. Because struct slab currently just reinterprets the
+ * bits of struct page, we align all struct pages to double-word boundaries,
+ * and ensure that 'freelist' is aligned within struct slab.
*/
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
#else
-#define _struct_page_alignment
+#define _struct_page_alignment __aligned(sizeof(unsigned long))
#endif
struct page {
@@ -78,13 +84,30 @@ struct page {
struct { /* Page cache and anonymous pages */
/**
* @lru: Pageout list, eg. active_list protected by
- * pgdat->lru_lock. Sometimes used as a generic list
+ * lruvec->lru_lock. Sometimes used as a generic list
* by the page owner.
*/
- struct list_head lru;
+ union {
+ struct list_head lru;
+
+ /* Or, for the Unevictable "LRU list" slot */
+ struct {
+ /* Always even, to negate PageTail */
+ void *__filler;
+ /* Count page's or folio's mlocks */
+ unsigned int mlock_count;
+ };
+
+ /* Or, free page */
+ struct list_head buddy_list;
+ struct list_head pcp_list;
+ };
/* See page-flags.h for PAGE_MAPPING_FLAGS */
struct address_space *mapping;
- pgoff_t index; /* Our offset within mapping. */
+ union {
+ pgoff_t index; /* Our offset within mapping. */
+ unsigned long share; /* share count for fsdax */
+ };
/**
* @private: Mapping-private opaque data.
* Usually used for buffer_heads if PagePrivate.
@@ -95,66 +118,17 @@ struct page {
};
struct { /* page_pool used by netstack */
/**
- * @dma_addr: might require a 64-bit value even on
- * 32-bit architectures.
+ * @pp_magic: magic value to avoid recycling non
+ * page_pool allocated pages.
*/
- dma_addr_t dma_addr;
- };
- struct { /* slab, slob and slub */
- union {
- struct list_head slab_list;
- struct { /* Partial pages */
- struct page *next;
-#ifdef CONFIG_64BIT
- int pages; /* Nr of pages left */
- int pobjects; /* Approximate count */
-#else
- short int pages;
- short int pobjects;
-#endif
- };
- };
- struct kmem_cache *slab_cache; /* not slob */
- /* Double-word boundary */
- void *freelist; /* first free object */
- union {
- void *s_mem; /* slab: first object */
- unsigned long counters; /* SLUB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- };
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
};
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
-
- /* First tail page only */
- unsigned char compound_dtor;
- unsigned char compound_order;
- atomic_t compound_mapcount;
- unsigned int compound_nr; /* 1 << compound_order */
- };
- struct { /* Second tail page of compound page */
- unsigned long _compound_pad_1; /* compound_head */
- atomic_t hpage_pinned_refcount;
- /* For both global and memcg */
- struct list_head deferred_list;
- };
- struct { /* Page table pages */
- unsigned long _pt_pad_1; /* compound_head */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
- unsigned long _pt_pad_2; /* mapping */
- union {
- struct mm_struct *pt_mm; /* x86 pgds only */
- atomic_t pt_frag_refcount; /* powerpc */
- };
-#if ALLOC_SPLIT_PTLOCKS
- spinlock_t *ptl;
-#else
- spinlock_t ptl;
-#endif
};
struct { /* ZONE_DEVICE pages */
/** @pgmap: Points to the hosting device page map. */
@@ -190,19 +164,13 @@ struct page {
* which are currently stored here.
*/
unsigned int page_type;
-
- unsigned int active; /* SLAB */
- int units; /* SLOB */
};
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
atomic_t _refcount;
#ifdef CONFIG_MEMCG
- union {
- struct mem_cgroup *mem_cgroup;
- struct obj_cgroup **obj_cgroups;
- };
+ unsigned long memcg_data;
#endif
/*
@@ -223,19 +191,315 @@ struct page {
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
+
+#ifdef CONFIG_KMSAN
+ /*
+ * KMSAN metadata for this page:
+ * - shadow page: every bit indicates whether the corresponding
+ * bit of the original page is initialized (0) or not (1);
+ * - origin page: every 4 bytes contain an id of the stack trace
+ * where the uninitialized value was created.
+ */
+ struct page *kmsan_shadow;
+ struct page *kmsan_origin;
+#endif
} _struct_page_alignment;
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
+/*
+ * struct encoded_page - a nonexistent type marking this pointer
+ *
+ * An 'encoded_page' pointer is a pointer to a regular 'struct page', but
+ * with the low bits of the pointer indicating extra context-dependent
+ * information. Only used in mmu_gather handling, and this acts as a type
+ * system check on that use.
+ *
+ * We only really have two guaranteed bits in general, although you could
+ * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+ * for more.
+ *
+ * Use the supplied helper functions to endcode/decode the pointer and bits.
+ */
+struct encoded_page;
+
+#define ENCODED_PAGE_BITS 3ul
+
+/* Perform rmap removal after we have flushed the TLB. */
+#define ENCODED_PAGE_BIT_DELAY_RMAP 1ul
+
+/*
+ * The next item in an encoded_page array is the "nr_pages" argument, specifying
+ * the number of consecutive pages starting from this page, that all belong to
+ * the same folio. For example, "nr_pages" corresponds to the number of folio
+ * references that must be dropped. If this bit is not set, "nr_pages" is
+ * implicitly 1.
+ */
+#define ENCODED_PAGE_BIT_NR_PAGES_NEXT 2ul
+
+static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
{
- return &page[1].compound_mapcount;
+ BUILD_BUG_ON(flags > ENCODED_PAGE_BITS);
+ return (struct encoded_page *)(flags | (unsigned long)page);
}
-static inline atomic_t *compound_pincount_ptr(struct page *page)
+static inline unsigned long encoded_page_flags(struct encoded_page *page)
{
- return &page[2].hpage_pinned_refcount;
+ return ENCODED_PAGE_BITS & (unsigned long)page;
+}
+
+static inline struct page *encoded_page_ptr(struct encoded_page *page)
+{
+ return (struct page *)(~ENCODED_PAGE_BITS & (unsigned long)page);
+}
+
+static __always_inline struct encoded_page *encode_nr_pages(unsigned long nr)
+{
+ VM_WARN_ON_ONCE((nr << 2) >> 2 != nr);
+ return (struct encoded_page *)(nr << 2);
+}
+
+static __always_inline unsigned long encoded_nr_pages(struct encoded_page *page)
+{
+ return ((unsigned long)page) >> 2;
}
/*
+ * A swap entry has to fit into a "unsigned long", as the entry is hidden
+ * in the "index" field of the swapper address space.
+ */
+typedef struct {
+ unsigned long val;
+} swp_entry_t;
+
+/**
+ * struct folio - Represents a contiguous set of bytes.
+ * @flags: Identical to the page flags.
+ * @lru: Least Recently Used list; tracks how recently this folio was used.
+ * @mlock_count: Number of times this folio has been pinned by mlock().
+ * @mapping: The file this page belongs to, or refers to the anon_vma for
+ * anonymous memory.
+ * @index: Offset within the file, in units of pages. For anonymous memory,
+ * this is the index from the beginning of the mmap.
+ * @private: Filesystem per-folio data (see folio_attach_private()).
+ * @swap: Used for swp_entry_t if folio_test_swapcache().
+ * @_mapcount: Do not access this member directly. Use folio_mapcount() to
+ * find out how many times this folio is mapped by userspace.
+ * @_refcount: Do not access this member directly. Use folio_ref_count()
+ * to find how many references there are to this folio.
+ * @memcg_data: Memory Control Group data.
+ * @virtual: Virtual address in the kernel direct map.
+ * @_last_cpupid: IDs of last CPU and last process that accessed the folio.
+ * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
+ * @_nr_pages_mapped: Do not use directly, call folio_mapcount().
+ * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
+ * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
+ * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
+ * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
+ * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
+ * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
+ * @_deferred_list: Folios to be split under memory pressure.
+ *
+ * A folio is a physically, virtually and logically contiguous set
+ * of bytes. It is a power-of-two in size, and it is aligned to that
+ * same power-of-two. It is at least as large as %PAGE_SIZE. If it is
+ * in the page cache, it is at a file offset which is a multiple of that
+ * power-of-two. It may be mapped into userspace at an address which is
+ * at an arbitrary page offset, but its kernel virtual address is aligned
+ * to its size.
+ */
+struct folio {
+ /* private: don't document the anon union */
+ union {
+ struct {
+ /* public: */
+ unsigned long flags;
+ union {
+ struct list_head lru;
+ /* private: avoid cluttering the output */
+ struct {
+ void *__filler;
+ /* public: */
+ unsigned int mlock_count;
+ /* private: */
+ };
+ /* public: */
+ };
+ struct address_space *mapping;
+ pgoff_t index;
+ union {
+ void *private;
+ swp_entry_t swap;
+ };
+ atomic_t _mapcount;
+ atomic_t _refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#endif
+#if defined(WANT_PAGE_VIRTUAL)
+ void *virtual;
+#endif
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ int _last_cpupid;
+#endif
+ /* private: the union with struct page is transitional */
+ };
+ struct page page;
+ };
+ union {
+ struct {
+ unsigned long _flags_1;
+ unsigned long _head_1;
+ unsigned long _folio_avail;
+ /* public: */
+ atomic_t _entire_mapcount;
+ atomic_t _nr_pages_mapped;
+ atomic_t _pincount;
+#ifdef CONFIG_64BIT
+ unsigned int _folio_nr_pages;
+#endif
+ /* private: the union with struct page is transitional */
+ };
+ struct page __page_1;
+ };
+ union {
+ struct {
+ unsigned long _flags_2;
+ unsigned long _head_2;
+ /* public: */
+ void *_hugetlb_subpool;
+ void *_hugetlb_cgroup;
+ void *_hugetlb_cgroup_rsvd;
+ void *_hugetlb_hwpoison;
+ /* private: the union with struct page is transitional */
+ };
+ struct {
+ unsigned long _flags_2a;
+ unsigned long _head_2a;
+ /* public: */
+ struct list_head _deferred_list;
+ /* private: the union with struct page is transitional */
+ };
+ struct page __page_2;
+ };
+};
+
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
+FOLIO_MATCH(flags, flags);
+FOLIO_MATCH(lru, lru);
+FOLIO_MATCH(mapping, mapping);
+FOLIO_MATCH(compound_head, lru);
+FOLIO_MATCH(index, index);
+FOLIO_MATCH(private, private);
+FOLIO_MATCH(_mapcount, _mapcount);
+FOLIO_MATCH(_refcount, _refcount);
+#ifdef CONFIG_MEMCG
+FOLIO_MATCH(memcg_data, memcg_data);
+#endif
+#if defined(WANT_PAGE_VIRTUAL)
+FOLIO_MATCH(virtual, virtual);
+#endif
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+FOLIO_MATCH(_last_cpupid, _last_cpupid);
+#endif
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + sizeof(struct page))
+FOLIO_MATCH(flags, _flags_1);
+FOLIO_MATCH(compound_head, _head_1);
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + 2 * sizeof(struct page))
+FOLIO_MATCH(flags, _flags_2);
+FOLIO_MATCH(compound_head, _head_2);
+FOLIO_MATCH(flags, _flags_2a);
+FOLIO_MATCH(compound_head, _head_2a);
+#undef FOLIO_MATCH
+
+/**
+ * struct ptdesc - Memory descriptor for page tables.
+ * @__page_flags: Same as page flags. Powerpc only.
+ * @pt_rcu_head: For freeing page table pages.
+ * @pt_list: List of used page tables. Used for s390 and x86.
+ * @_pt_pad_1: Padding that aliases with page's compound head.
+ * @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
+ * @__page_mapping: Aliases with page->mapping. Unused for page tables.
+ * @pt_index: Used for s390 gmap.
+ * @pt_mm: Used for x86 pgds.
+ * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
+ * @_pt_pad_2: Padding to ensure proper alignment.
+ * @ptl: Lock for the page table.
+ * @__page_type: Same as page->page_type. Unused for page tables.
+ * @__page_refcount: Same as page refcount.
+ * @pt_memcg_data: Memcg data. Tracked for page tables here.
+ *
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
+ */
+struct ptdesc {
+ unsigned long __page_flags;
+
+ union {
+ struct rcu_head pt_rcu_head;
+ struct list_head pt_list;
+ struct {
+ unsigned long _pt_pad_1;
+ pgtable_t pmd_huge_pte;
+ };
+ };
+ unsigned long __page_mapping;
+
+ union {
+ pgoff_t pt_index;
+ struct mm_struct *pt_mm;
+ atomic_t pt_frag_refcount;
+ };
+
+ union {
+ unsigned long _pt_pad_2;
+#if ALLOC_SPLIT_PTLOCKS
+ spinlock_t *ptl;
+#else
+ spinlock_t ptl;
+#endif
+ };
+ unsigned int __page_type;
+ atomic_t __page_refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long pt_memcg_data;
+#endif
+};
+
+#define TABLE_MATCH(pg, pt) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
+TABLE_MATCH(flags, __page_flags);
+TABLE_MATCH(compound_head, pt_list);
+TABLE_MATCH(compound_head, _pt_pad_1);
+TABLE_MATCH(mapping, __page_mapping);
+TABLE_MATCH(index, pt_index);
+TABLE_MATCH(rcu_head, pt_rcu_head);
+TABLE_MATCH(page_type, __page_type);
+TABLE_MATCH(_refcount, __page_refcount);
+#ifdef CONFIG_MEMCG
+TABLE_MATCH(memcg_data, pt_memcg_data);
+#endif
+#undef TABLE_MATCH
+static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
+
+#define ptdesc_page(pt) (_Generic((pt), \
+ const struct ptdesc *: (const struct page *)(pt), \
+ struct ptdesc *: (struct page *)(pt)))
+
+#define ptdesc_folio(pt) (_Generic((pt), \
+ const struct ptdesc *: (const struct folio *)(pt), \
+ struct ptdesc *: (struct folio *)(pt)))
+
+#define page_ptdesc(p) (_Generic((p), \
+ const struct page *: (const struct ptdesc *)(p), \
+ struct page *: (struct ptdesc *)(p)))
+
+/*
* Used for sizing the vmemmap region on some architectures
*/
#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
@@ -243,6 +507,12 @@ static inline atomic_t *compound_pincount_ptr(struct page *page)
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+/*
+ * page_private can be used on tail pages. However, PagePrivate is only
+ * checked by the VM on the head page. So page_private on the tail pages
+ * should be used for data that's ancillary to the head page (eg attaching
+ * buffer heads to tail pages after attaching buffer heads to the head page)
+ */
#define page_private(page) ((page)->private)
static inline void set_page_private(struct page *page, unsigned long private)
@@ -250,6 +520,11 @@ static inline void set_page_private(struct page *page, unsigned long private)
page->private = private;
}
+static inline void *folio_get_private(struct folio *folio)
+{
+ return folio->private;
+}
+
struct page_frag_cache {
void * va;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
@@ -296,6 +571,73 @@ struct vm_userfaultfd_ctx {
struct vm_userfaultfd_ctx {};
#endif /* CONFIG_USERFAULTFD */
+struct anon_vma_name {
+ struct kref kref;
+ /* The name needs to be at the end because it is dynamically sized. */
+ char name[];
+};
+
+#ifdef CONFIG_ANON_VMA_NAME
+/*
+ * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
+ * either keep holding the lock while using the returned pointer or it should
+ * raise anon_vma_name refcount before releasing the lock.
+ */
+struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
+struct anon_vma_name *anon_vma_name_alloc(const char *name);
+void anon_vma_name_free(struct kref *kref);
+#else /* CONFIG_ANON_VMA_NAME */
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
+{
+ return NULL;
+}
+#endif
+
+struct vma_lock {
+ struct rw_semaphore lock;
+};
+
+struct vma_numab_state {
+ /*
+ * Initialised as time in 'jiffies' after which VMA
+ * should be scanned. Delays first scan of new VMA by at
+ * least sysctl_numa_balancing_scan_delay:
+ */
+ unsigned long next_scan;
+
+ /*
+ * Time in jiffies when pids_active[] is reset to
+ * detect phase change behaviour:
+ */
+ unsigned long pids_active_reset;
+
+ /*
+ * Approximate tracking of PIDs that trapped a NUMA hinting
+ * fault. May produce false positives due to hash collisions.
+ *
+ * [0] Previous PID tracking
+ * [1] Current PID tracking
+ *
+ * Window moves after next_pid_reset has expired approximately
+ * every VMA_PID_RESET_PERIOD jiffies:
+ */
+ unsigned long pids_active[2];
+
+ /* MM scan sequence ID when scan first started after VMA creation */
+ int start_scan_seq;
+
+ /*
+ * MM scan sequence ID when the VMA was last completely scanned.
+ * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
+ */
+ int prev_scan_seq;
+};
+
/*
* This struct describes a virtual memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
@@ -305,37 +647,55 @@ struct vm_userfaultfd_ctx {};
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
- unsigned long vm_start; /* Our start address within vm_mm. */
- unsigned long vm_end; /* The first byte after our end address
- within vm_mm. */
-
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next, *vm_prev;
+ union {
+ struct {
+ /* VMA covers [vm_start; vm_end) addresses within mm */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ };
+#ifdef CONFIG_PER_VMA_LOCK
+ struct rcu_head vm_rcu; /* Used for deferred freeing. */
+#endif
+ };
- struct rb_node vm_rb;
+ struct mm_struct *vm_mm; /* The address space we belong to. */
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
- * Largest free memory gap in bytes to the left of this VMA.
- * Either between this VMA and vma->vm_prev, or between one of the
- * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
- * get_unmapped_area find a free area of the right size.
+ * Flags, see mm.h.
+ * To modify use vm_flags_{init|reset|set|clear|mod} functions.
*/
- unsigned long rb_subtree_gap;
-
- /* Second cache line starts here. */
-
- struct mm_struct *vm_mm; /* The address space we belong to. */
+ union {
+ const vm_flags_t vm_flags;
+ vm_flags_t __private __vm_flags;
+ };
+#ifdef CONFIG_PER_VMA_LOCK
/*
- * Access permissions of this VMA.
- * See vmf_insert_mixed_prot() for discussion.
+ * Can only be written (using WRITE_ONCE()) while holding both:
+ * - mmap_lock (in write mode)
+ * - vm_lock->lock (in write mode)
+ * Can be read reliably while holding one of:
+ * - mmap_lock (in read or write mode)
+ * - vm_lock->lock (in read or write mode)
+ * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
+ * while holding nothing (except RCU to keep the VMA struct allocated).
+ *
+ * This sequence counter is explicitly allowed to overflow; sequence
+ * counter reuse can only lead to occasional unnecessary use of the
+ * slowpath.
*/
- pgprot_t vm_page_prot;
- unsigned long vm_flags; /* Flags, see mm.h. */
+ int vm_lock_seq;
+ struct vma_lock *vm_lock;
+
+ /* Flag to indicate areas detached from the mm->mm_mt tree */
+ bool detached;
+#endif
/*
* For areas with an address space and backing store,
* linkage into the address_space->i_mmap interval tree.
+ *
*/
struct {
struct rb_node rb;
@@ -361,6 +721,14 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
+#ifdef CONFIG_ANON_VMA_NAME
+ /*
+ * For private and shared anonymous mappings, a pointer to a null
+ * terminated string containing the name given to the vma, or NULL if
+ * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
+ */
+ struct anon_vma_name *anon_name;
+#endif
#ifdef CONFIG_SWAP
atomic_long_t swap_readahead_info;
#endif
@@ -370,26 +738,45 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ struct vma_numab_state *numab_state; /* NUMA Balancing state */
+#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
-struct core_thread {
- struct task_struct *task;
- struct core_thread *next;
-};
+#ifdef CONFIG_NUMA
+#define vma_policy(vma) ((vma)->vm_policy)
+#else
+#define vma_policy(vma) NULL
+#endif
-struct core_state {
- atomic_t nr_threads;
- struct core_thread dumper;
- struct completion startup;
+#ifdef CONFIG_SCHED_MM_CID
+struct mm_cid {
+ u64 time;
+ int cid;
};
+#endif
struct kioctx_table;
+struct iommu_mm_data;
struct mm_struct {
struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u64 vmacache_seqnum; /* per-thread vmacache */
+ /*
+ * Fields which are often written to are placed in a separate
+ * cache line.
+ */
+ struct {
+ /**
+ * @mm_count: The number of references to &struct
+ * mm_struct (@mm_users count as 1).
+ *
+ * Use mmgrab()/mmdrop() to modify. When this drops to
+ * 0, the &struct mm_struct is freed.
+ */
+ atomic_t mm_count;
+ } ____cacheline_aligned_in_smp;
+
+ struct maple_tree mm_mt;
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
@@ -398,12 +785,11 @@ struct mm_struct {
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
+ /* Base addresses for compatible mmap() */
unsigned long mmap_compat_base;
unsigned long mmap_compat_legacy_base;
#endif
unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
#ifdef CONFIG_MEMBARRIER
@@ -427,23 +813,42 @@ struct mm_struct {
*/
atomic_t mm_users;
+#ifdef CONFIG_SCHED_MM_CID
/**
- * @mm_count: The number of references to &struct mm_struct
- * (@mm_users count as 1).
+ * @pcpu_cid: Per-cpu current cid.
*
- * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
- * &struct mm_struct is freed.
+ * Keep track of the currently allocated mm_cid for each cpu.
+ * The per-cpu mm_cid values are serialized by their respective
+ * runqueue locks.
*/
- atomic_t mm_count;
-
+ struct mm_cid __percpu *pcpu_cid;
+ /*
+ * @mm_cid_next_scan: Next mm_cid scan (in jiffies).
+ *
+ * When the next mm_cid scan is due (in jiffies).
+ */
+ unsigned long mm_cid_next_scan;
+#endif
#ifdef CONFIG_MMU
- atomic_long_t pgtables_bytes; /* PTE page table pages */
+ atomic_long_t pgtables_bytes; /* size of all page tables */
#endif
int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
+ /*
+ * With some kernel config, the current mmap_lock's offset
+ * inside 'mm_struct' is at 0x120, which is very optimal, as
+ * its two hot fields 'count' and 'owner' sit in 2 different
+ * cachelines, and when mmap_lock is highly contended, both
+ * of the 2 fields will be accessed frequently, current layout
+ * will help to reduce cache bouncing.
+ *
+ * So please be careful with adding new fields before
+ * mmap_lock, which can easily push the 2 fields into one
+ * cacheline.
+ */
struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
@@ -451,6 +856,23 @@ struct mm_struct {
* init_mm.mmlist, and are protected
* by mmlist_lock
*/
+#ifdef CONFIG_PER_VMA_LOCK
+ /*
+ * This field has lock-like semantics, meaning it is sometimes
+ * accessed with ACQUIRE/RELEASE semantics.
+ * Roughly speaking, incrementing the sequence number is
+ * equivalent to releasing locks on VMAs; reading the sequence
+ * number can be part of taking a read lock on a VMA.
+ *
+ * Can be modified under write mmap_lock using RELEASE
+ * semantics.
+ * Can be read with no other protection when holding write
+ * mmap_lock.
+ * Can be read with ACQUIRE semantics if not holding write
+ * mmap_lock.
+ */
+ int mm_lock_seq;
+#endif
unsigned long hiwater_rss; /* High-watermark of RSS usage */
@@ -464,18 +886,22 @@ struct mm_struct {
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
+ /**
+ * @write_protect_seq: Locked when any thread is write
+ * protecting pages mapped by this mm to enforce a later COW,
+ * for instance during page table copying for fork().
+ */
+ seqcount_t write_protect_seq;
+
spinlock_t arg_lock; /* protect the below fields */
+
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- /*
- * Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- struct mm_rss_stat rss_stat;
+ struct percpu_counter rss_stat[NR_MM_COUNTERS];
struct linux_binfmt *binfmt;
@@ -484,8 +910,6 @@ struct mm_struct {
unsigned long flags; /* Must use atomic bitops to access */
- struct core_state *core_state; /* coredumping support */
-
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
struct kioctx_table __rcu *ioctx_table;
@@ -515,33 +939,73 @@ struct mm_struct {
#endif
#ifdef CONFIG_NUMA_BALANCING
/*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and
- * migrate pages to new nodes if necessary.
+ * numa_next_scan is the next time that PTEs will be remapped
+ * PROT_NONE to trigger NUMA hinting faults; such faults gather
+ * statistics and migrate pages to new nodes if necessary.
*/
unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
+ /* Restart point for scanning and remapping PTEs. */
unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
+ /* numa_scan_seq prevents two threads remapping PTEs. */
int numa_scan_seq;
#endif
/*
* An operation with batched TLB flushing is going on. Anything
* that can move process memory needs to flush the TLB when
- * moving a PROT_NONE or PROT_NUMA mapped page.
+ * moving a PROT_NONE mapped page.
*/
atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/* See flush_tlb_batched_pending() */
- bool tlb_flush_batched;
+ atomic_t tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
+#ifdef CONFIG_PREEMPT_RT
+ struct rcu_head delayed_drop;
+#endif
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
+
+#ifdef CONFIG_IOMMU_MM_DATA
+ struct iommu_mm_data *iommu_mm;
+#endif
+#ifdef CONFIG_KSM
+ /*
+ * Represent how many pages of this process are involved in KSM
+ * merging (not including ksm_zero_pages).
+ */
+ unsigned long ksm_merging_pages;
+ /*
+ * Represent how many pages are checked for ksm merging
+ * including merged and not merged.
+ */
+ unsigned long ksm_rmap_items;
+ /*
+ * Represent how many empty pages are merged with kernel zero
+ * pages when enabling KSM use_zero_pages.
+ */
+ unsigned long ksm_zero_pages;
+#endif /* CONFIG_KSM */
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+ struct {
+ /* this mm_struct is on lru_gen_mm_list */
+ struct list_head list;
+ /*
+ * Set when switching to this mm_struct, as a hint of
+ * whether it has been used since the last time per-node
+ * page table walkers cleared the corresponding bits.
+ */
+ unsigned long bitmap;
+#ifdef CONFIG_MEMCG
+ /* points to the memcg of "owner" above */
+ struct mem_cgroup *memcg;
+#endif
+ } lru_gen;
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
} __randomize_layout;
/*
@@ -551,6 +1015,8 @@ struct mm_struct {
unsigned long cpu_bitmap[];
};
+#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
+ MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
/* Pointer magic because the dynamic array size confuses some compilers. */
@@ -568,96 +1034,176 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return (struct cpumask *)&mm->cpu_bitmap;
}
-struct mmu_gather;
-extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end);
-extern void tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end);
+#ifdef CONFIG_LRU_GEN
-static inline void init_tlb_flush_pending(struct mm_struct *mm)
+struct lru_gen_mm_list {
+ /* mm_struct list for page table walkers */
+ struct list_head fifo;
+ /* protects the list above */
+ spinlock_t lock;
+};
+
+#endif /* CONFIG_LRU_GEN */
+
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+
+void lru_gen_add_mm(struct mm_struct *mm);
+void lru_gen_del_mm(struct mm_struct *mm);
+void lru_gen_migrate_mm(struct mm_struct *mm);
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
{
- atomic_set(&mm->tlb_flush_pending, 0);
+ INIT_LIST_HEAD(&mm->lru_gen.list);
+ mm->lru_gen.bitmap = 0;
+#ifdef CONFIG_MEMCG
+ mm->lru_gen.memcg = NULL;
+#endif
}
-static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+static inline void lru_gen_use_mm(struct mm_struct *mm)
{
- atomic_inc(&mm->tlb_flush_pending);
/*
- * The only time this value is relevant is when there are indeed pages
- * to flush. And we'll only flush pages after changing them, which
- * requires the PTL.
- *
- * So the ordering here is:
- *
- * atomic_inc(&mm->tlb_flush_pending);
- * spin_lock(&ptl);
- * ...
- * set_pte_at();
- * spin_unlock(&ptl);
- *
- * spin_lock(&ptl)
- * mm_tlb_flush_pending();
- * ....
- * spin_unlock(&ptl);
- *
- * flush_tlb_range();
- * atomic_dec(&mm->tlb_flush_pending);
- *
- * Where the increment if constrained by the PTL unlock, it thus
- * ensures that the increment is visible if the PTE modification is
- * visible. After all, if there is no PTE modification, nobody cares
- * about TLB flushes either.
- *
- * This very much relies on users (mm_tlb_flush_pending() and
- * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
- * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
- * locks (PPC) the unlock of one doesn't order against the lock of
- * another PTL.
- *
- * The decrement is ordered by the flush_tlb_range(), such that
- * mm_tlb_flush_pending() will not return false unless all flushes have
- * completed.
+ * When the bitmap is set, page reclaim knows this mm_struct has been
+ * used since the last time it cleared the bitmap. So it might be worth
+ * walking the page tables of this mm_struct to clear the accessed bit.
*/
+ WRITE_ONCE(mm->lru_gen.bitmap, -1);
}
-static inline void dec_tlb_flush_pending(struct mm_struct *mm)
+#else /* !CONFIG_LRU_GEN_WALKS_MMU */
+
+static inline void lru_gen_add_mm(struct mm_struct *mm)
{
- /*
- * See inc_tlb_flush_pending().
- *
- * This cannot be smp_mb__before_atomic() because smp_mb() simply does
- * not order against TLB invalidate completion, which is what we need.
- *
- * Therefore we must rely on tlb_flush_*() to guarantee order.
- */
- atomic_dec(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+static inline void lru_gen_del_mm(struct mm_struct *mm)
{
- /*
- * Must be called after having acquired the PTL; orders against that
- * PTLs release and therefore ensures that if we observe the modified
- * PTE we must also observe the increment from inc_tlb_flush_pending().
- *
- * That is, it only guarantees to return true if there is a flush
- * pending for _this_ PTL.
- */
- return atomic_read(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+static inline void lru_gen_migrate_mm(struct mm_struct *mm)
+{
+}
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
+{
+}
+
+static inline void lru_gen_use_mm(struct mm_struct *mm)
{
- /*
- * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
- * for which there is a TLB flush pending in order to guarantee
- * we've seen both that PTE modification and the increment.
- *
- * (no requirement on actually still holding the PTL, that is irrelevant)
- */
- return atomic_read(&mm->tlb_flush_pending) > 1;
}
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
+
+struct vma_iterator {
+ struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, __mm, __addr) \
+ struct vma_iterator name = { \
+ .mas = { \
+ .tree = &(__mm)->mm_mt, \
+ .index = __addr, \
+ .node = NULL, \
+ .status = ma_start, \
+ }, \
+ }
+
+static inline void vma_iter_init(struct vma_iterator *vmi,
+ struct mm_struct *mm, unsigned long addr)
+{
+ mas_init(&vmi->mas, &mm->mm_mt, addr);
+}
+
+#ifdef CONFIG_SCHED_MM_CID
+
+enum mm_cid_state {
+ MM_CID_UNSET = -1U, /* Unset state has lazy_put flag set. */
+ MM_CID_LAZY_PUT = (1U << 31),
+};
+
+static inline bool mm_cid_is_unset(int cid)
+{
+ return cid == MM_CID_UNSET;
+}
+
+static inline bool mm_cid_is_lazy_put(int cid)
+{
+ return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT);
+}
+
+static inline bool mm_cid_is_valid(int cid)
+{
+ return !(cid & MM_CID_LAZY_PUT);
+}
+
+static inline int mm_cid_set_lazy_put(int cid)
+{
+ return cid | MM_CID_LAZY_PUT;
+}
+
+static inline int mm_cid_clear_lazy_put(int cid)
+{
+ return cid & ~MM_CID_LAZY_PUT;
+}
+
+/* Accessor for struct mm_struct's cidmask. */
+static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
+{
+ unsigned long cid_bitmap = (unsigned long)mm;
+
+ cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ /* Skip cpu_bitmap */
+ cid_bitmap += cpumask_size();
+ return (struct cpumask *)cid_bitmap;
+}
+
+static inline void mm_init_cid(struct mm_struct *mm)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
+
+ pcpu_cid->cid = MM_CID_UNSET;
+ pcpu_cid->time = 0;
+ }
+ cpumask_clear(mm_cidmask(mm));
+}
+
+static inline int mm_alloc_cid(struct mm_struct *mm)
+{
+ mm->pcpu_cid = alloc_percpu(struct mm_cid);
+ if (!mm->pcpu_cid)
+ return -ENOMEM;
+ mm_init_cid(mm);
+ return 0;
+}
+
+static inline void mm_destroy_cid(struct mm_struct *mm)
+{
+ free_percpu(mm->pcpu_cid);
+ mm->pcpu_cid = NULL;
+}
+
+static inline unsigned int mm_cid_size(void)
+{
+ return cpumask_size();
+}
+#else /* CONFIG_SCHED_MM_CID */
+static inline void mm_init_cid(struct mm_struct *mm) { }
+static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; }
+static inline void mm_destroy_cid(struct mm_struct *mm) { }
+static inline unsigned int mm_cid_size(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SCHED_MM_CID */
+
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_finish_mmu(struct mmu_gather *tlb);
+
struct vm_fault;
/**
@@ -676,7 +1222,6 @@ typedef __bitwise unsigned int vm_fault_t;
* @VM_FAULT_OOM: Out Of Memory
* @VM_FAULT_SIGBUS: Bad access
* @VM_FAULT_MAJOR: Page read from storage
- * @VM_FAULT_WRITE: Special case for get_user_pages
* @VM_FAULT_HWPOISON: Hit poisoned small page
* @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded
* in upper bits
@@ -689,6 +1234,7 @@ typedef __bitwise unsigned int vm_fault_t;
* @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs
* fsync() to complete (for synchronous page faults
* in DAX)
+ * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released
* @VM_FAULT_HINDEX_MASK: mask HINDEX value
*
*/
@@ -696,7 +1242,6 @@ enum vm_fault_reason {
VM_FAULT_OOM = (__force vm_fault_t)0x000001,
VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002,
VM_FAULT_MAJOR = (__force vm_fault_t)0x000004,
- VM_FAULT_WRITE = (__force vm_fault_t)0x000008,
VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010,
VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040,
@@ -706,6 +1251,7 @@ enum vm_fault_reason {
VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
+ VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000,
VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
};
@@ -721,7 +1267,6 @@ enum vm_fault_reason {
{ VM_FAULT_OOM, "OOM" }, \
{ VM_FAULT_SIGBUS, "SIGBUS" }, \
{ VM_FAULT_MAJOR, "MAJOR" }, \
- { VM_FAULT_WRITE, "WRITE" }, \
{ VM_FAULT_HWPOISON, "HWPOISON" }, \
{ VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
{ VM_FAULT_SIGSEGV, "SIGSEGV" }, \
@@ -730,7 +1275,8 @@ enum vm_fault_reason {
{ VM_FAULT_RETRY, "RETRY" }, \
{ VM_FAULT_FALLBACK, "FALLBACK" }, \
{ VM_FAULT_DONE_COW, "DONE_COW" }, \
- { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
+ { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }, \
+ { VM_FAULT_COMPLETED, "COMPLETED" }
struct vm_special_mapping {
const char *name; /* The name, e.g. "[vdso]". */
@@ -764,12 +1310,156 @@ enum tlb_flush_reason {
NR_TLB_FLUSH_REASONS,
};
- /*
- * A swap entry has to fit into a "unsigned long", as the entry is hidden
- * in the "index" field of the swapper address space.
- */
-typedef struct {
- unsigned long val;
-} swp_entry_t;
+/**
+ * enum fault_flag - Fault flag definitions.
+ * @FAULT_FLAG_WRITE: Fault was a write fault.
+ * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
+ * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
+ * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
+ * @FAULT_FLAG_TRIED: The fault has been tried once.
+ * @FAULT_FLAG_USER: The fault originated in userspace.
+ * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
+ * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
+ * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
+ * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to break COW in a
+ * COW mapping, making sure that an exclusive anon page is
+ * mapped after the fault.
+ * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
+ * We should only access orig_pte if this flag set.
+ * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock.
+ *
+ * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
+ * whether we would allow page faults to retry by specifying these two
+ * fault flags correctly. Currently there can be three legal combinations:
+ *
+ * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
+ * this is the first try
+ *
+ * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
+ * we've already tried at least once
+ *
+ * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
+ *
+ * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
+ * be used. Note that page faults can be allowed to retry for multiple times,
+ * in which case we'll have an initial fault with flags (a) then later on
+ * continuous faults with flags (b). We should always try to detect pending
+ * signals before a retry to make sure the continuous page faults can still be
+ * interrupted if necessary.
+ *
+ * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
+ * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
+ * applied to mappings that are not COW mappings.
+ */
+enum fault_flag {
+ FAULT_FLAG_WRITE = 1 << 0,
+ FAULT_FLAG_MKWRITE = 1 << 1,
+ FAULT_FLAG_ALLOW_RETRY = 1 << 2,
+ FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
+ FAULT_FLAG_KILLABLE = 1 << 4,
+ FAULT_FLAG_TRIED = 1 << 5,
+ FAULT_FLAG_USER = 1 << 6,
+ FAULT_FLAG_REMOTE = 1 << 7,
+ FAULT_FLAG_INSTRUCTION = 1 << 8,
+ FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
+ FAULT_FLAG_UNSHARE = 1 << 10,
+ FAULT_FLAG_ORIG_PTE_VALID = 1 << 11,
+ FAULT_FLAG_VMA_LOCK = 1 << 12,
+};
+
+typedef unsigned int __bitwise zap_flags_t;
+
+/*
+ * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
+ * other. Here is what they mean, and how to use them:
+ *
+ *
+ * FIXME: For pages which are part of a filesystem, mappings are subject to the
+ * lifetime enforced by the filesystem and we need guarantees that longterm
+ * users like RDMA and V4L2 only establish mappings which coordinate usage with
+ * the filesystem. Ideas for this coordination include revoking the longterm
+ * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
+ * added after the problem with filesystems was found FS DAX VMAs are
+ * specifically failed. Filesystem pages are still subject to bugs and use of
+ * FOLL_LONGTERM should be avoided on those pages.
+ *
+ * In the CMA case: long term pins in a CMA region would unnecessarily fragment
+ * that region. And so, CMA attempts to migrate the page before pinning, when
+ * FOLL_LONGTERM is specified.
+ *
+ * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
+ * but an additional pin counting system) will be invoked. This is intended for
+ * anything that gets a page reference and then touches page data (for example,
+ * Direct IO). This lets the filesystem know that some non-file-system entity is
+ * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
+ * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
+ * a call to unpin_user_page().
+ *
+ * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
+ * and separate refcounting mechanisms, however, and that means that each has
+ * its own acquire and release mechanisms:
+ *
+ * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
+ *
+ * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
+ *
+ * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
+ * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
+ * calls applied to them, and that's perfectly OK. This is a constraint on the
+ * callers, not on the pages.)
+ *
+ * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
+ * directly by the caller. That's in order to help avoid mismatches when
+ * releasing pages: get_user_pages*() pages must be released via put_page(),
+ * while pin_user_pages*() pages must be released via unpin_user_page().
+ *
+ * Please see Documentation/core-api/pin_user_pages.rst for more information.
+ */
+
+enum {
+ /* check pte is writable */
+ FOLL_WRITE = 1 << 0,
+ /* do get_page on page */
+ FOLL_GET = 1 << 1,
+ /* give error on hole if it would be zero */
+ FOLL_DUMP = 1 << 2,
+ /* get_user_pages read/write w/o permission */
+ FOLL_FORCE = 1 << 3,
+ /*
+ * if a disk transfer is needed, start the IO and return without waiting
+ * upon it
+ */
+ FOLL_NOWAIT = 1 << 4,
+ /* do not fault in pages */
+ FOLL_NOFAULT = 1 << 5,
+ /* check page is hwpoisoned */
+ FOLL_HWPOISON = 1 << 6,
+ /* don't do file mappings */
+ FOLL_ANON = 1 << 7,
+ /*
+ * FOLL_LONGTERM indicates that the page will be held for an indefinite
+ * time period _often_ under userspace control. This is in contrast to
+ * iov_iter_get_pages(), whose usages are transient.
+ */
+ FOLL_LONGTERM = 1 << 8,
+ /* split huge pmd before returning */
+ FOLL_SPLIT_PMD = 1 << 9,
+ /* allow returning PCI P2PDMA pages */
+ FOLL_PCI_P2PDMA = 1 << 10,
+ /* allow interrupts from generic signals */
+ FOLL_INTERRUPTIBLE = 1 << 11,
+ /*
+ * Always honor (trigger) NUMA hinting faults.
+ *
+ * FOLL_WRITE implicitly honors NUMA hinting faults because a
+ * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE
+ * apply). get_user_pages_fast_only() always implicitly honors NUMA
+ * hinting faults.
+ */
+ FOLL_HONOR_NUMA_FAULT = 1 << 12,
+
+ /* See also internal only FOLL flags in mm/internal.h */
+};
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index c1bc6731125c..a2f6179b672b 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -9,9 +9,6 @@
*/
#include <linux/types.h>
-#include <linux/threads.h>
-#include <linux/atomic.h>
-#include <linux/cpumask.h>
#include <asm/page.h>
@@ -25,18 +22,6 @@
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
- * The per task VMA cache array:
- */
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
-struct vmacache {
- u64 seqnum;
- struct vm_area_struct *vmas[VMACACHE_SIZE];
-};
-
-/*
* When updating this, please also update struct resident_page_types[] in
* kernel/fork.c
*/
@@ -48,18 +33,7 @@ enum {
NR_MM_COUNTERS
};
-#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
-#define SPLIT_RSS_COUNTING
-/* per-thread cached information, */
-struct task_rss_stat {
- int events; /* for synchronization threshold */
- int count[NR_MM_COUNTERS];
-};
-#endif /* USE_SPLIT_PTE_PTLOCKS */
-
-struct mm_rss_stat {
- atomic_long_t count[NR_MM_COUNTERS];
-};
+struct page;
struct page_frag {
struct page *page;
@@ -77,8 +51,8 @@ struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
* The arch code makes the following promise: generic code can modify a
- * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
- * needed barriers), then call arch_tlbbatch_flush(), and the entries
+ * PTE, then call arch_tlbbatch_add_pending() (which internally provides
+ * all needed barriers), then call arch_tlbbatch_flush(), and the entries
* will be flushed on all CPUs by the time that arch_tlbbatch_flush()
* returns.
*/
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 6f34c33075f9..dc7048824be8 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -15,6 +15,9 @@
#ifndef MAP_32BIT
#define MAP_32BIT 0
#endif
+#ifndef MAP_ABOVE4G
+#define MAP_ABOVE4G 0
+#endif
#ifndef MAP_HUGE_2MB
#define MAP_HUGE_2MB 0
#endif
@@ -31,6 +34,9 @@
/*
* The historical set of flags that all mmap implementations implicitly
* support when a ->mmap_validate() op is not provided in file_operations.
+ *
+ * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the
+ * kernel.
*/
#define LEGACY_MAP_MASK (MAP_SHARED \
| MAP_PRIVATE \
@@ -47,6 +53,7 @@
| MAP_STACK \
| MAP_HUGETLB \
| MAP_32BIT \
+ | MAP_ABOVE4G \
| MAP_HUGE_2MB \
| MAP_HUGE_1GB)
@@ -78,15 +85,16 @@ static inline void vm_unacct_memory(long pages)
}
/*
- * Allow architectures to handle additional protection bits
+ * Allow architectures to handle additional protection and flag bits. The
+ * overriding macros must be defined in the arch-specific asm/mman.h file.
*/
#ifndef arch_calc_vm_prot_bits
#define arch_calc_vm_prot_bits(prot, pkey) 0
#endif
-#ifndef arch_vm_get_page_prot
-#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
+#ifndef arch_calc_vm_flag_bits
+#define arch_calc_vm_flag_bits(flags) 0
#endif
#ifndef arch_validate_prot
@@ -103,6 +111,19 @@ static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
#define arch_validate_prot arch_validate_prot
#endif
+#ifndef arch_validate_flags
+/*
+ * This is called from mmap() and mprotect() with the updated vma->vm_flags.
+ *
+ * Returns true if the VM_* flags are valid.
+ */
+static inline bool arch_validate_flags(unsigned long flags)
+{
+ return true;
+}
+#define arch_validate_flags arch_validate_flags
+#endif
+
/*
* Optimisation macro. It is equivalent to:
* (x & bit1) ? bit2 : 0
@@ -133,10 +154,45 @@ static inline unsigned long
calc_vm_flag_bits(unsigned long flags)
{
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
- _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
- _calc_vm_trans(flags, MAP_SYNC, VM_SYNC );
+ _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
+ _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
+ arch_calc_vm_flag_bits(flags);
}
unsigned long vm_commit_limit(void);
+
+/*
+ * Denies creating a writable executable mapping or gaining executable permissions.
+ *
+ * This denies the following:
+ *
+ * a) mmap(PROT_WRITE | PROT_EXEC)
+ *
+ * b) mmap(PROT_WRITE)
+ * mprotect(PROT_EXEC)
+ *
+ * c) mmap(PROT_WRITE)
+ * mprotect(PROT_READ)
+ * mprotect(PROT_EXEC)
+ *
+ * But allows the following:
+ *
+ * d) mmap(PROT_READ | PROT_EXEC)
+ * mmap(PROT_READ | PROT_EXEC | PROT_BTI)
+ */
+static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags)
+{
+ if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
+ return false;
+
+ if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE))
+ return true;
+
+ if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
+ return true;
+
+ return false;
+}
+
#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 0707671851a8..8d38dcb6d044 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -1,11 +1,102 @@
#ifndef _LINUX_MMAP_LOCK_H
#define _LINUX_MMAP_LOCK_H
+#include <linux/lockdep.h>
+#include <linux/mm_types.h>
#include <linux/mmdebug.h>
+#include <linux/rwsem.h>
+#include <linux/tracepoint-defs.h>
+#include <linux/types.h>
#define MMAP_LOCK_INITIALIZER(name) \
.mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
+DECLARE_TRACEPOINT(mmap_lock_start_locking);
+DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
+DECLARE_TRACEPOINT(mmap_lock_released);
+
+#ifdef CONFIG_TRACING
+
+void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
+void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
+ bool success);
+void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
+
+static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
+ bool write)
+{
+ if (tracepoint_enabled(mmap_lock_start_locking))
+ __mmap_lock_do_trace_start_locking(mm, write);
+}
+
+static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
+ bool write, bool success)
+{
+ if (tracepoint_enabled(mmap_lock_acquire_returned))
+ __mmap_lock_do_trace_acquire_returned(mm, write, success);
+}
+
+static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
+{
+ if (tracepoint_enabled(mmap_lock_released))
+ __mmap_lock_do_trace_released(mm, write);
+}
+
+#else /* !CONFIG_TRACING */
+
+static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
+ bool write)
+{
+}
+
+static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
+ bool write, bool success)
+{
+}
+
+static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
+{
+}
+
+#endif /* CONFIG_TRACING */
+
+static inline void mmap_assert_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+static inline void mmap_assert_write_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held_write(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+#ifdef CONFIG_PER_VMA_LOCK
+/*
+ * Drop all currently-held per-VMA locks.
+ * This is called from the mmap_lock implementation directly before releasing
+ * a write-locked mmap_lock (or downgrading it to read-locked).
+ * This should normally NOT be called manually from other places.
+ * If you want to call this manually anyway, keep in mind that this will release
+ * *all* VMA write locks, including ones from further up the stack.
+ */
+static inline void vma_end_write_all(struct mm_struct *mm)
+{
+ mmap_assert_write_locked(mm);
+ /*
+ * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
+ * mmap_lock being held.
+ * We need RELEASE semantics here to ensure that preceding stores into
+ * the VMA take effect before we unlock it with this store.
+ * Pairs with ACQUIRE semantics in vma_start_read().
+ */
+ smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
+}
+#else
+static inline void vma_end_write_all(struct mm_struct *mm) {}
+#endif
+
static inline void mmap_init_lock(struct mm_struct *mm)
{
init_rwsem(&mm->mmap_lock);
@@ -13,78 +104,84 @@ static inline void mmap_init_lock(struct mm_struct *mm)
static inline void mmap_write_lock(struct mm_struct *mm)
{
+ __mmap_lock_trace_start_locking(mm, true);
down_write(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
}
static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
{
+ __mmap_lock_trace_start_locking(mm, true);
down_write_nested(&mm->mmap_lock, subclass);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
}
static inline int mmap_write_lock_killable(struct mm_struct *mm)
{
- return down_write_killable(&mm->mmap_lock);
-}
+ int ret;
-static inline bool mmap_write_trylock(struct mm_struct *mm)
-{
- return down_write_trylock(&mm->mmap_lock) != 0;
+ __mmap_lock_trace_start_locking(mm, true);
+ ret = down_write_killable(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
+ return ret;
}
static inline void mmap_write_unlock(struct mm_struct *mm)
{
+ __mmap_lock_trace_released(mm, true);
+ vma_end_write_all(mm);
up_write(&mm->mmap_lock);
}
static inline void mmap_write_downgrade(struct mm_struct *mm)
{
+ __mmap_lock_trace_acquire_returned(mm, false, true);
+ vma_end_write_all(mm);
downgrade_write(&mm->mmap_lock);
}
static inline void mmap_read_lock(struct mm_struct *mm)
{
+ __mmap_lock_trace_start_locking(mm, false);
down_read(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, false, true);
}
static inline int mmap_read_lock_killable(struct mm_struct *mm)
{
- return down_read_killable(&mm->mmap_lock);
+ int ret;
+
+ __mmap_lock_trace_start_locking(mm, false);
+ ret = down_read_killable(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
+ return ret;
}
static inline bool mmap_read_trylock(struct mm_struct *mm)
{
- return down_read_trylock(&mm->mmap_lock) != 0;
+ bool ret;
+
+ __mmap_lock_trace_start_locking(mm, false);
+ ret = down_read_trylock(&mm->mmap_lock) != 0;
+ __mmap_lock_trace_acquire_returned(mm, false, ret);
+ return ret;
}
static inline void mmap_read_unlock(struct mm_struct *mm)
{
+ __mmap_lock_trace_released(mm, false);
up_read(&mm->mmap_lock);
}
-static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
-{
- if (down_read_trylock(&mm->mmap_lock)) {
- rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
- return true;
- }
- return false;
-}
-
static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
+ __mmap_lock_trace_released(mm, false);
up_read_non_owner(&mm->mmap_lock);
}
-static inline void mmap_assert_locked(struct mm_struct *mm)
+static inline int mmap_lock_is_contended(struct mm_struct *mm)
{
- lockdep_assert_held(&mm->mmap_lock);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
-}
-
-static inline void mmap_assert_write_locked(struct mm_struct *mm)
-{
- lockdep_assert_held_write(&mm->mmap_lock);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+ return rwsem_is_contended(&mm->mmap_lock);
}
#endif /* _LINUX_MMAP_LOCK_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 7d46411ffaa2..f34407cc2788 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -32,6 +32,7 @@ struct mmc_csd {
unsigned int r2w_factor;
unsigned int max_dtr;
unsigned int erase_size; /* In sectors */
+ unsigned int wp_grp_size;
unsigned int read_blkbits;
unsigned int write_blkbits;
unsigned int capacity;
@@ -52,9 +53,6 @@ struct mmc_ext_csd {
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
- u8 max_packed_writes;
- u8 max_packed_reads;
- u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
unsigned int generic_cmd6_time; /* Units: 10ms */
@@ -109,6 +107,7 @@ struct mmc_ext_csd {
u8 raw_hc_erase_gap_size; /* 221 */
u8 raw_erase_timeout_mult; /* 223 */
u8 raw_hc_erase_grp_size; /* 224 */
+ u8 raw_boot_mult; /* 226 */
u8 raw_sec_trim_mult; /* 229 */
u8 raw_sec_erase_mult; /* 230 */
u8 raw_sec_feature_support;/* 231 */
@@ -139,6 +138,8 @@ struct sd_scr {
unsigned char cmds;
#define SD_SCR_CMD20_SUPPORT (1<<0)
#define SD_SCR_CMD23_SUPPORT (1<<1)
+#define SD_SCR_CMD48_SUPPORT (1<<2)
+#define SD_SCR_CMD58_SUPPORT (1<<3)
};
struct sd_ssr {
@@ -189,6 +190,25 @@ struct sd_switch_caps {
#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
};
+struct sd_ext_reg {
+ u8 fno;
+ u8 page;
+ u16 offset;
+ u8 rev;
+ u8 feature_enabled;
+ u8 feature_support;
+/* Power Management Function. */
+#define SD_EXT_POWER_OFF_NOTIFY (1<<0)
+#define SD_EXT_POWER_SUSTENANCE (1<<1)
+#define SD_EXT_POWER_DOWN_MODE (1<<2)
+/* Performance Enhancement Function. */
+#define SD_EXT_PERF_FX_EVENT (1<<0)
+#define SD_EXT_PERF_CARD_MAINT (1<<1)
+#define SD_EXT_PERF_HOST_MAINT (1<<2)
+#define SD_EXT_PERF_CACHE (1<<3)
+#define SD_EXT_PERF_CMD_QUEUE (1<<4)
+};
+
struct sdio_cccr {
unsigned int sdio_vsn;
unsigned int sd_vsn;
@@ -197,7 +217,8 @@ struct sdio_cccr {
wide_bus:1,
high_power:1,
high_speed:1,
- disable_cd:1;
+ disable_cd:1,
+ enable_async_irq:1;
};
struct sdio_cis {
@@ -270,7 +291,11 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
+#define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
+#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
+ bool written_flag; /* Indicates eMMC has been written since power on */
bool reenable_cmdq; /* Re-enable Command Queue */
unsigned int erase_size; /* erase size in sectors */
@@ -279,6 +304,7 @@ struct mmc_card {
unsigned int eg_boundary; /* don't cross erase-group boundaries */
unsigned int erase_arg; /* erase / trim / discard */
u8 erased_byte; /* value of erased bytes */
+ unsigned int wp_grp_size; /* write group size in sectors */
u32 raw_cid[4]; /* raw card CID */
u32 raw_csd[4]; /* raw card CSD */
@@ -290,6 +316,8 @@ struct mmc_card {
struct sd_scr scr; /* extra SD information */
struct sd_ssr ssr; /* yet more SD information */
struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
+ struct sd_ext_reg ext_power; /* SD extension reg for PM */
+ struct sd_ext_reg ext_perf; /* SD extension reg for PERF */
unsigned int sdio_funcs; /* number of SDIO functions */
atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
@@ -297,6 +325,8 @@ struct mmc_card {
struct sdio_cis cis; /* common tuple info */
struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */
+ u8 major_rev; /* major revision number */
+ u8 minor_rev; /* minor revision number */
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
struct sdio_func_tuple *tuples; /* unknown common tuples */
@@ -309,7 +339,6 @@ struct mmc_card {
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
- unsigned int bouncesz; /* Bounce buffer size */
struct workqueue_struct *complete_wq; /* Private workqueue */
};
@@ -318,10 +347,16 @@ static inline bool mmc_large_sector(struct mmc_card *card)
return card->ext_csd.data_sector_size == 4096;
}
+static inline int mmc_card_enable_async_irq(struct mmc_card *card)
+{
+ return card->cccr.enable_async_irq;
+}
+
bool mmc_card_is_blockaddr(struct mmc_card *card);
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
+#define mmc_card_sd_combo(c) ((c)->type == MMC_TYPE_SD_COMBO)
#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 29aa50711626..2c7928a50907 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -27,7 +27,6 @@ struct mmc_command {
u32 opcode;
u32 arg;
#define MMC_CMD23_ARG_REL_WR (1 << 31)
-#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
u32 resp[4];
unsigned int flags; /* expected response type */
@@ -162,6 +161,11 @@ struct mmc_request {
bool cap_cmd_during_tfr;
int tag;
+
+#ifdef CONFIG_MMC_CRYPTO
+ const struct bio_crypt_ctx *crypto_ctx;
+ int crypto_key_slot;
+#endif
};
struct mmc_card;
@@ -170,8 +174,8 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
int retries);
-int mmc_hw_reset(struct mmc_host *host);
-int mmc_sw_reset(struct mmc_host *host);
+int mmc_hw_reset(struct mmc_card *card);
+int mmc_sw_reset(struct mmc_card *card);
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c5b6e97cb21a..5894bf912f7b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -15,6 +15,7 @@
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
#include <linux/dma-direction.h>
+#include <linux/blk-crypto-profile.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
@@ -60,6 +61,8 @@ struct mmc_ios {
#define MMC_TIMING_MMC_DDR52 8
#define MMC_TIMING_MMC_HS200 9
#define MMC_TIMING_MMC_HS400 10
+#define MMC_TIMING_SD_EXP 11
+#define MMC_TIMING_SD_EXP_1_2V 12
unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
@@ -77,8 +80,38 @@ struct mmc_ios {
bool enhanced_strobe; /* hs400es selection */
};
+struct mmc_clk_phase {
+ bool valid;
+ u16 in_deg;
+ u16 out_deg;
+};
+
+#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1)
+struct mmc_clk_phase_map {
+ struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES];
+};
+
struct mmc_host;
+enum mmc_err_stat {
+ MMC_ERR_CMD_TIMEOUT,
+ MMC_ERR_CMD_CRC,
+ MMC_ERR_DAT_TIMEOUT,
+ MMC_ERR_DAT_CRC,
+ MMC_ERR_AUTO_CMD,
+ MMC_ERR_ADMA,
+ MMC_ERR_TUNING,
+ MMC_ERR_CMDQ_RED,
+ MMC_ERR_CMDQ_GCE,
+ MMC_ERR_CMDQ_ICCE,
+ MMC_ERR_REQ_TIMEOUT,
+ MMC_ERR_CMDQ_REQ_TIMEOUT,
+ MMC_ERR_ICE_CFG,
+ MMC_ERR_CTRL_TIMEOUT,
+ MMC_ERR_UNEXPECTED_IRQ,
+ MMC_ERR_MAX,
+};
+
struct mmc_host_ops {
/*
* It is optional for the host to implement pre_req and post_req in
@@ -139,7 +172,7 @@ struct mmc_host_ops {
int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
- /* Check if the card is pulling dat[0:3] low */
+ /* Check if the card is pulling dat[0] low */
int (*card_busy)(struct mmc_host *host);
/* The tuning command opcode value is different for SD and eMMC cards */
@@ -148,6 +181,15 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+ /* Execute HS400 tuning depending host driver */
+ int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card);
+
+ /* Optional callback to prepare for SD high-speed tuning */
+ int (*prepare_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
+
+ /* Optional callback to execute SD high-speed tuning */
+ int (*execute_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
+
/* Prepare switch to DDR during the HS400 init sequence */
int (*hs400_prepare_ddr)(struct mmc_host *host);
@@ -163,7 +205,8 @@ struct mmc_host_ops {
int (*select_drive_strength)(struct mmc_card *card,
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type);
- void (*hw_reset)(struct mmc_host *host);
+ /* Reset the eMMC card via RST_n */
+ void (*card_hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
/*
@@ -172,6 +215,9 @@ struct mmc_host_ops {
*/
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
+
+ /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */
+ int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios);
};
struct mmc_cqe_ops {
@@ -284,9 +330,6 @@ struct mmc_host {
u32 ocr_avail_sdio; /* SDIO-specific OCR */
u32 ocr_avail_sd; /* SD-specific OCR */
u32 ocr_avail_mmc; /* MMC-specific OCR */
-#ifdef CONFIG_PM_SLEEP
- struct notifier_block pm_notify;
-#endif
struct wakeup_source *ws; /* Enable consume of uevents */
u32 max_current_330;
u32 max_current_300;
@@ -346,7 +389,7 @@ struct mmc_host {
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
-#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
+#define MMC_CAP_HW_RESET (1 << 31) /* Reset the eMMC card via RST_n */
u32 caps2; /* More host capabilities */
@@ -357,6 +400,8 @@ struct mmc_host {
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
MMC_CAP2_HS200_1_2V_SDR)
+#define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */
+#define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */
#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
@@ -376,6 +421,12 @@ struct mmc_host {
#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */
+#ifdef CONFIG_MMC_CRYPTO
+#define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */
+#else
+#define MMC_CAP2_CRYPTO 0
+#endif
+#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */
int fixed_drv_type; /* fixed driver type for non-removable media */
@@ -398,14 +449,14 @@ struct mmc_host {
/* group bitfields together to minimize padding */
unsigned int use_spi_crc:1;
unsigned int claimed:1; /* host exclusively claimed */
- unsigned int bus_dead:1; /* bus has been released */
+ unsigned int doing_init_tune:1; /* initial tuning in progress */
unsigned int can_retune:1; /* re-tuning can be used */
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
- unsigned int use_blk_mq:1; /* use blk-mq */
unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge:1; /* merging can be used */
+ unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -429,11 +480,10 @@ struct mmc_host {
struct mmc_slot slot;
const struct mmc_bus_ops *bus_ops; /* current bus driver */
- unsigned int bus_refs; /* reference counter */
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
- struct delayed_work sdio_irq_work;
+ struct work_struct sdio_irq_work;
bool sdio_irq_pending;
atomic_t sdio_irq_thread_abort;
@@ -469,20 +519,30 @@ struct mmc_host {
bool cqe_enabled;
bool cqe_on;
+ /* Inline encryption support */
+#ifdef CONFIG_MMC_CRYPTO
+ struct blk_crypto_profile crypto_profile;
+#endif
+
/* Host Software Queue support */
bool hsq_enabled;
+ int hsq_depth;
+ u32 err_stats[MMC_ERR_MAX];
unsigned long private[] ____cacheline_aligned;
};
struct device_node;
struct mmc_host *mmc_alloc_host(int extra, struct device *);
+struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra);
int mmc_add_host(struct mmc_host *);
void mmc_remove_host(struct mmc_host *);
void mmc_free_host(struct mmc_host *);
+void mmc_of_parse_clk_phase(struct device *dev,
+ struct mmc_clk_phase_map *map);
int mmc_of_parse(struct mmc_host *host);
-int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
+int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask);
static inline void *mmc_priv(struct mmc_host *host)
{
@@ -546,6 +606,8 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
#endif
int mmc_regulator_get_supply(struct mmc_host *mmc);
+int mmc_regulator_enable_vqmmc(struct mmc_host *mmc);
+void mmc_regulator_disable_vqmmc(struct mmc_host *mmc);
static inline int mmc_card_is_removable(struct mmc_host *host)
{
@@ -594,12 +656,26 @@ static inline bool mmc_doing_retune(struct mmc_host *host)
return host->doing_retune == 1;
}
+static inline bool mmc_doing_tune(struct mmc_host *host)
+{
+ return host->doing_retune == 1 || host->doing_init_tune == 1;
+}
+
static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
{
return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
+static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
+ enum mmc_err_stat stat)
+{
+ host->err_stats[stat] += 1;
+}
+
+int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp);
+int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index d9a65c6a8816..cf2bcb5da30d 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -99,6 +99,12 @@ static inline bool mmc_op_multi(u32 opcode)
opcode == MMC_READ_MULTIPLE_BLOCK;
}
+static inline bool mmc_op_tuning(u32 opcode)
+{
+ return opcode == MMC_SEND_TUNING_BLOCK ||
+ opcode == MMC_SEND_TUNING_BLOCK_HS200;
+}
+
/*
* MMC_SWITCH argument format:
*
@@ -251,8 +257,6 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
-#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
-#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
@@ -315,8 +319,6 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
-#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
-#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */
#define EXT_CSD_HPI_FEATURES 503 /* RO */
@@ -396,18 +398,12 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
-#define EXT_CSD_PACKED_EVENT_EN BIT(3)
-
/*
* EXCEPTION_EVENT_STATUS field
*/
#define EXT_CSD_URGENT_BKOPS BIT(0)
#define EXT_CSD_DYNCAP_NEEDED BIT(1)
#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2)
-#define EXT_CSD_PACKED_FAILURE BIT(3)
-
-#define EXT_CSD_PACKED_GENERIC_ERROR BIT(0)
-#define EXT_CSD_PACKED_INDEXED_ERROR BIT(1)
/*
* BKOPS status level
@@ -445,7 +441,7 @@ static inline bool mmc_ready_for_data(u32 status)
#define MMC_SECURE_TRIM1_ARG 0x80000001
#define MMC_SECURE_TRIM2_ARG 0x80008000
#define MMC_SECURE_ARGS 0x80000000
-#define MMC_TRIM_ARGS 0x00008001
+#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003
#define mmc_driver_type_mask(n) (1 << (n))
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 2236aa540faa..6727576a8755 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -29,6 +29,10 @@
#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
#define SD_APP_SEND_SCR 51 /* adtc R1 */
+ /* class 11 */
+#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */
+#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */
+
/* OCR bit definitions */
#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
deleted file mode 100644
index 1d42872d22f3..000000000000
--- a/include/linux/mmc/sdhci-pci-data.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_MMC_SDHCI_PCI_DATA_H
-#define LINUX_MMC_SDHCI_PCI_DATA_H
-
-struct pci_dev;
-
-struct sdhci_pci_data {
- struct pci_dev *pdev;
- int slotno;
- int rst_n_gpio; /* Set to -EINVAL if unused */
- int cd_gpio; /* Set to -EINVAL if unused */
- int (*setup)(struct sdhci_pci_data *data);
- void (*cleanup)(struct sdhci_pci_data *data);
-};
-
-extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
- int slotno);
-#endif
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index e28769991e82..1ef400f28642 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -82,7 +82,7 @@
#define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */
#define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */
#define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */
-#define SDIO_SD_REV_3_00 3 /* SD Physical Spev Version 3.00 */
+#define SDIO_SD_REV_3_00 3 /* SD Physical Spec Version 3.00 */
#define SDIO_CCCR_IOEx 0x02
#define SDIO_CCCR_IORx 0x03
@@ -159,6 +159,11 @@
#define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT)
+
+#define SDIO_CCCR_INTERRUPT_EXT 0x16
+#define SDIO_INTERRUPT_EXT_SAI (1 << 0)
+#define SDIO_INTERRUPT_EXT_EAI (1 << 1)
+
/*
* Function Basic Registers (FBR)
*/
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index fa2aaab5e57a..478855b8e406 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -51,6 +51,8 @@ struct sdio_func {
u8 *tmpbuf; /* DMA:able scratch buffer */
+ u8 major_rev; /* major revision number */
+ u8 minor_rev; /* minor revision number */
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 12036619346c..7fada7a714fe 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -74,7 +74,12 @@
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
+
+#define SDIO_VENDOR_ID_CYPRESS 0x04b4
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xbd3d
#define SDIO_VENDOR_ID_MARVELL 0x02df
#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
@@ -100,14 +105,26 @@
#define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146
#define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149
#define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a
+#define SDIO_DEVICE_ID_MARVELL_8978_WLAN 0x9159
#define SDIO_VENDOR_ID_MEDIATEK 0x037a
#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668
+#define SDIO_DEVICE_ID_MEDIATEK_MT7961 0x7961
#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347
+#define SDIO_VENDOR_ID_REALTEK 0x024c
+#define SDIO_DEVICE_ID_REALTEK_RTW8723BS 0xb723
+#define SDIO_DEVICE_ID_REALTEK_RTW8821BS 0xb821
+#define SDIO_DEVICE_ID_REALTEK_RTW8822BS 0xb822
+#define SDIO_DEVICE_ID_REALTEK_RTW8821CS 0xc821
+#define SDIO_DEVICE_ID_REALTEK_RTW8822CS 0xc822
+#define SDIO_DEVICE_ID_REALTEK_RTW8723DS_2ANT 0xd723
+#define SDIO_DEVICE_ID_REALTEK_RTW8723DS_1ANT 0xd724
+#define SDIO_DEVICE_ID_REALTEK_RTW8821DS 0xd821
+
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
#define SDIO_DEVICE_ID_SIANO_NICE 0x0202
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 4ae2f2908f99..5d3d15e97868 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -15,6 +15,7 @@ struct mmc_host;
int mmc_gpio_get_ro(struct mmc_host *host);
int mmc_gpio_get_cd(struct mmc_host *host);
+void mmc_gpio_set_cd_irq(struct mmc_host *host, int irq);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce);
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 2ad72d2c8cc5..39a7714605a7 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -8,11 +8,12 @@
struct page;
struct vm_area_struct;
struct mm_struct;
+struct vma_iterator;
-extern void dump_page(struct page *page, const char *reason);
-extern void __dump_page(struct page *page, const char *reason);
+void dump_page(const struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
+void vma_iter_dump_tree(const struct vma_iterator *vmi);
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
@@ -23,6 +24,13 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
+#define VM_BUG_ON_FOLIO(cond, folio) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_page(&folio->page, "VM_BUG_ON_FOLIO(" __stringify(cond)")");\
+ BUG(); \
+ } \
+ } while (0)
#define VM_BUG_ON_VMA(cond, vma) \
do { \
if (unlikely(cond)) { \
@@ -37,6 +45,49 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
+#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#define VM_WARN_ON_FOLIO(cond, folio) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_page(&folio->page, "VM_WARN_ON_FOLIO(" __stringify(cond)")");\
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_page(&folio->page, "VM_WARN_ON_ONCE_FOLIO(" __stringify(cond)")");\
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#define VM_WARN_ON_ONCE_MM(cond, mm) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_mm(mm); \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+
#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
@@ -44,14 +95,25 @@ void dump_mm(const struct mm_struct *mm);
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
+#define VM_BUG_ON_FOLIO(cond, folio) VM_BUG_ON(cond)
#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
+#ifdef CONFIG_DEBUG_VM_IRQSOFF
+#define VM_WARN_ON_IRQS_ENABLED() WARN_ON_ONCE(!irqs_disabled())
+#else
+#define VM_WARN_ON_IRQS_ENABLED() do { } while (0)
+#endif
+
#ifdef CONFIG_DEBUG_VIRTUAL
#define VIRTUAL_BUG_ON(cond) BUG_ON(cond)
#else
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index 03dee12d2b61..bbaec80c78c5 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -11,7 +11,35 @@
#endif
#ifndef leave_mm
-static inline void leave_mm(int cpu) { }
+static inline void leave_mm(void) { }
+#endif
+
+/*
+ * CPUs that are capable of running user task @p. Must contain at least one
+ * active CPU. It is assumed that the kernel can run on all CPUs, so calling
+ * this for a kernel thread is pointless.
+ *
+ * By default, we assume a sane, homogeneous system.
+ */
+#ifndef task_cpu_possible_mask
+# define task_cpu_possible_mask(p) cpu_possible_mask
+# define task_cpu_possible(cpu, p) true
+#else
+# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
+#endif
+
+#ifndef mm_untag_mask
+static inline unsigned long mm_untag_mask(struct mm_struct *mm)
+{
+ return -1UL;
+}
+#endif
+
+#ifndef arch_pgtable_dma_compat
+static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
+{
+ return true;
+}
#endif
#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index b8200782dede..f349e08a9dfe 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -33,7 +33,7 @@ struct mmu_interval_notifier;
*
* @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
* access flags). User should soft dirty the page in the end callback to make
- * sure that anyone relying on soft dirtyness catch pages that might be written
+ * sure that anyone relying on soft dirtiness catch pages that might be written
* through non CPU mappings.
*
* @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
@@ -41,7 +41,12 @@ struct mmu_interval_notifier;
*
* @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
* a device driver to possibly ignore the invalidation if the
- * migrate_pgmap_owner field matches the driver's device private pgmap owner.
+ * owner field matches the driver's device private pgmap owner.
+ *
+ * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
+ * longer have exclusive access to the page. When sent during creation of an
+ * exclusive range the owner will be initialised to the value provided by the
+ * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
@@ -51,6 +56,7 @@ enum mmu_notifier_event {
MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
MMU_NOTIFY_MIGRATE,
+ MMU_NOTIFY_EXCLUSIVE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
@@ -161,7 +167,7 @@ struct mmu_notifier_ops {
* decrease the refcount. If the refcount is decreased on
* invalidate_range_start() then the VM can free pages as page
* table entries are removed. If the refcount is only
- * droppped on invalidate_range_end() then the driver itself
+ * dropped on invalidate_range_end() then the driver itself
* will drop the last refcount but it must take care to flush
* any secondary tlb before doing the final free on the
* page. Pages will no longer be referenced by the linux
@@ -169,11 +175,11 @@ struct mmu_notifier_ops {
* the last refcount is dropped.
*
* If blockable argument is set to false then the callback cannot
- * sleep and has to return with -EAGAIN. 0 should be returned
- * otherwise. Please note that if invalidate_range_start approves
- * a non-blocking behavior then the same applies to
- * invalidate_range_end.
- *
+ * sleep and has to return with -EAGAIN if sleeping would be required.
+ * 0 should be returned otherwise. Please note that notifiers that can
+ * fail invalidate_range_start are not allowed to implement
+ * invalidate_range_end, as there is no mechanism for informing the
+ * notifier that its start failed.
*/
int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
@@ -181,27 +187,27 @@ struct mmu_notifier_ops {
const struct mmu_notifier_range *range);
/*
- * invalidate_range() is either called between
- * invalidate_range_start() and invalidate_range_end() when the
- * VM has to free pages that where unmapped, but before the
- * pages are actually freed, or outside of _start()/_end() when
- * a (remote) TLB is necessary.
+ * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
+ * which shares page-tables with the CPU. The
+ * invalidate_range_start()/end() callbacks should not be implemented as
+ * invalidate_secondary_tlbs() already catches the points in time when
+ * an external TLB needs to be flushed.
*
- * If invalidate_range() is used to manage a non-CPU TLB with
- * shared page-tables, it not necessary to implement the
- * invalidate_range_start()/end() notifiers, as
- * invalidate_range() alread catches the points in time when an
- * external TLB range needs to be flushed. For more in depth
- * discussion on this see Documentation/vm/mmu_notifier.rst
+ * This requires arch_invalidate_secondary_tlbs() to be called while
+ * holding the ptl spin-lock and therefore this callback is not allowed
+ * to sleep.
*
- * Note that this function might be called with just a sub-range
- * of what was passed to invalidate_range_start()/end(), if
- * called between those functions.
+ * This is called by architecture code whenever invalidating a TLB
+ * entry. It is assumed that any secondary TLB has the same rules for
+ * when invalidations are required. If this is not the case architecture
+ * code will need to call this explicitly when required for secondary
+ * TLB invalidation.
*/
- void (*invalidate_range)(struct mmu_notifier *subscription,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
+ void (*arch_invalidate_secondary_tlbs)(
+ struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* These callbacks are used with the get/put interface to manage the
@@ -263,13 +269,12 @@ extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
#endif
struct mmu_notifier_range {
- struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long start;
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
- void *migrate_pgmap_owner;
+ void *owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -363,7 +368,7 @@ mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
* mmu_interval_read_retry() will return true.
*
* False is not reliable and only suggests a collision may not have
- * occured. It can be called many times and does not have to hold the user
+ * occurred. It can be called many times and does not have to hold the user
* provided lock.
*
* This call can be used as part of loops and other expensive operations to
@@ -390,10 +395,9 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
-extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
- bool only_end);
-extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
- unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
+extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
extern bool
mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
@@ -455,7 +459,14 @@ mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
-static inline int
+/*
+ * This version of mmu_notifier_invalidate_range_start() avoids blocking, but it
+ * can return an error if a notifier can't proceed without blocking, in which
+ * case you're not allowed to modify PTEs in the specified range.
+ *
+ * This is mainly intended for OOM handling.
+ */
+static inline int __must_check
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
int ret = 0;
@@ -476,21 +487,14 @@ mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
might_sleep();
if (mm_has_notifiers(range->mm))
- __mmu_notifier_invalidate_range_end(range, false);
+ __mmu_notifier_invalidate_range_end(range);
}
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
- if (mm_has_notifiers(range->mm))
- __mmu_notifier_invalidate_range_end(range, true);
-}
-
-static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range(mm, start, end);
+ __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
@@ -508,12 +512,10 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
enum mmu_notifier_event event,
unsigned flags,
- struct vm_area_struct *vma,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
- range->vma = vma;
range->event = event;
range->mm = mm;
range->start = start;
@@ -521,14 +523,14 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->flags = flags;
}
-static inline void mmu_notifier_range_init_migrate(
- struct mmu_notifier_range *range, unsigned int flags,
- struct vm_area_struct *vma, struct mm_struct *mm,
- unsigned long start, unsigned long end, void *pgmap)
+static inline void mmu_notifier_range_init_owner(
+ struct mmu_notifier_range *range,
+ enum mmu_notifier_event event, unsigned int flags,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, void *owner)
{
- mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
- start, end);
- range->migrate_pgmap_owner = pgmap;
+ mmu_notifier_range_init(range, event, flags, mm, start, end);
+ range->owner = owner;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
@@ -579,45 +581,6 @@ static inline void mmu_notifier_range_init_migrate(
__young; \
})
-#define ptep_clear_flush_notify(__vma, __address, __ptep) \
-({ \
- unsigned long ___addr = __address & PAGE_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pte_t ___pte; \
- \
- ___pte = ptep_clear_flush(__vma, __address, __ptep); \
- mmu_notifier_invalidate_range(___mm, ___addr, \
- ___addr + PAGE_SIZE); \
- \
- ___pte; \
-})
-
-#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pmd_t ___pmd; \
- \
- ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
- mmu_notifier_invalidate_range(___mm, ___haddr, \
- ___haddr + HPAGE_PMD_SIZE); \
- \
- ___pmd; \
-})
-
-#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pud_t ___pud; \
- \
- ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
- mmu_notifier_invalidate_range(___mm, ___haddr, \
- ___haddr + HPAGE_PUD_SIZE); \
- \
- ___pud; \
-})
-
/*
* set_pte_at_notify() sets the pte _after_ running the notifier.
* This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -653,10 +616,10 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
range->end = end;
}
-#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
+#define mmu_notifier_range_init(range,event,flags,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
-#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
- pgmap) \
+#define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
+ end, owner) \
_mmu_notifier_range_init(range, start, end)
static inline bool
@@ -708,12 +671,7 @@ void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
}
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
-}
-
-static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8379432f4f2f..c11b7cde81ef 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/list_nulls.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/cache.h>
@@ -20,15 +21,21 @@
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
+#include <linux/local_lock.h>
+#include <linux/zswap.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
-#ifndef CONFIG_FORCE_MAX_ZONEORDER
-#define MAX_ORDER 11
+#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
+#define MAX_PAGE_ORDER 10
#else
-#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
+#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
-#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
+#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
+
+#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
+
+#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
@@ -53,10 +60,7 @@ enum migratetype {
*
* The way to use it is to change migratetype of a range of
* pageblocks to MIGRATE_CMA which can be done by
- * __free_pageblock_cma() function. What is important though
- * is that a range of pageblocks must be aligned to
- * MAX_ORDER_NR_PAGES should biggest page be bigger then
- * a single pageblock.
+ * __free_pageblock_cma() function.
*/
MIGRATE_CMA,
#endif
@@ -72,9 +76,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
+# define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \
+ get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
#else
# define is_migrate_cma(migratetype) false
# define is_migrate_cma_page(_page) false
+# define is_migrate_cma_folio(folio, pfn) false
#endif
static inline bool is_migrate_movable(int mt)
@@ -82,8 +89,19 @@ static inline bool is_migrate_movable(int mt)
return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
}
+/*
+ * Check whether a migratetype can be merged with another migratetype.
+ *
+ * It is only mergeable when it can fall back to other migratetypes for
+ * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c.
+ */
+static inline bool migratetype_is_mergeable(int mt)
+{
+ return mt < MIGRATE_PCPTYPES;
+}
+
#define for_each_migratetype_order(order, type) \
- for (order = 0; order < MAX_ORDER; order++) \
+ for (order = 0; order < NR_PAGE_ORDERS; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled;
@@ -93,40 +111,16 @@ extern int page_group_by_mobility_disabled;
#define get_pageblock_migratetype(page) \
get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
+#define folio_migratetype(folio) \
+ get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
+ MIGRATETYPE_MASK)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
-static inline struct page *get_page_from_free_area(struct free_area *area,
- int migratetype)
-{
- return list_first_entry_or_null(&area->free_list[migratetype],
- struct page, lru);
-}
-
-static inline bool free_area_empty(struct free_area *area, int migratetype)
-{
- return list_empty(&area->free_list[migratetype]);
-}
-
struct pglist_data;
-/*
- * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
- * So add a wild amount of padding here to ensure that they fall into separate
- * cachelines. There are very few zone structures in the machine, so space
- * consumption is not a concern here.
- */
-#if defined(CONFIG_SMP)
-struct zone_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define ZONE_PADDING(name) struct zone_padding name;
-#else
-#define ZONE_PADDING(name)
-#endif
-
#ifdef CONFIG_NUMA
enum numa_stat_item {
NUMA_HIT, /* allocated in intended node */
@@ -135,10 +129,10 @@ enum numa_stat_item {
NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
NUMA_LOCAL, /* allocation from local node */
NUMA_OTHER, /* allocation from other node */
- NR_VM_NUMA_STAT_ITEMS
+ NR_VM_NUMA_EVENT_ITEMS
};
#else
-#define NR_VM_NUMA_STAT_ITEMS 0
+#define NR_VM_NUMA_EVENT_ITEMS 0
#endif
enum zone_stat_item {
@@ -152,13 +146,15 @@ enum zone_stat_item {
NR_ZONE_UNEVICTABLE,
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
- NR_PAGETABLE, /* used for pagetables */
/* Second 128 byte cacheline */
NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
NR_FREE_CMA_PAGES,
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ NR_UNACCEPTED,
+#endif
NR_VM_ZONE_STAT_ITEMS };
enum node_stat_item {
@@ -200,6 +196,7 @@ enum node_stat_item {
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
@@ -207,10 +204,40 @@ enum node_stat_item {
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
NR_KERNEL_SCS_KB, /* measured in KiB */
#endif
+ NR_PAGETABLE, /* used for pagetables */
+ NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */
+#ifdef CONFIG_SWAP
+ NR_SWAPCACHE,
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+ PGPROMOTE_SUCCESS, /* promote successfully */
+ PGPROMOTE_CANDIDATE, /* candidate pages to promote */
+#endif
+ /* PGDEMOTE_*: pages demoted */
+ PGDEMOTE_KSWAPD,
+ PGDEMOTE_DIRECT,
+ PGDEMOTE_KHUGEPAGED,
NR_VM_NODE_STAT_ITEMS
};
/*
+ * Returns true if the item should be printed in THPs (/proc/vmstat
+ * currently prints number of anon, file and shmem THPs. But the item
+ * is charged in pages).
+ */
+static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return false;
+
+ return item == NR_ANON_THPS ||
+ item == NR_FILE_THPS ||
+ item == NR_SHMEM_THPS ||
+ item == NR_SHMEM_PMDMAPPED ||
+ item == NR_FILE_PMDMAPPED;
+}
+
+/*
* Returns true if the value is measured in bytes (most vmstat values are
* measured in pages). This defines the API part, the internal representation
* might be different.
@@ -252,6 +279,14 @@ enum lru_list {
NR_LRU_LISTS
};
+enum vmscan_throttle_state {
+ VMSCAN_THROTTLE_WRITEBACK,
+ VMSCAN_THROTTLE_ISOLATED,
+ VMSCAN_THROTTLE_NOPROGRESS,
+ VMSCAN_THROTTLE_CONGESTED,
+ NR_VMSCAN_THROTTLE,
+};
+
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
@@ -266,14 +301,312 @@ static inline bool is_active_lru(enum lru_list lru)
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
+#define WORKINGSET_ANON 0
+#define WORKINGSET_FILE 1
+#define ANON_AND_FILE 2
+
enum lruvec_flags {
- LRUVEC_CONGESTED, /* lruvec has many dirty pages
- * backed by a congested BDI
- */
+ /*
+ * An lruvec has many dirty pages backed by a congested BDI:
+ * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim.
+ * It can be cleared by cgroup reclaim or kswapd.
+ * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim.
+ * It can only be cleared by kswapd.
+ *
+ * Essentially, kswapd can unthrottle an lruvec throttled by cgroup
+ * reclaim, but not vice versa. This only applies to the root cgroup.
+ * The goal is to prevent cgroup reclaim on the root cgroup (e.g.
+ * memory.reclaim) to unthrottle an unbalanced node (that was throttled
+ * by kswapd).
+ */
+ LRUVEC_CGROUP_CONGESTED,
+ LRUVEC_NODE_CONGESTED,
};
+#endif /* !__GENERATING_BOUNDS_H */
+
+/*
+ * Evictable pages are divided into multiple generations. The youngest and the
+ * oldest generation numbers, max_seq and min_seq, are monotonically increasing.
+ * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
+ * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
+ * corresponding generation. The gen counter in folio->flags stores gen+1 while
+ * a page is on one of lrugen->folios[]. Otherwise it stores 0.
+ *
+ * A page is added to the youngest generation on faulting. The aging needs to
+ * check the accessed bit at least twice before handing this page over to the
+ * eviction. The first check takes care of the accessed bit set on the initial
+ * fault; the second check makes sure this page hasn't been used since then.
+ * This process, AKA second chance, requires a minimum of two generations,
+ * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive
+ * LRU, e.g., /proc/vmstat, these two generations are considered active; the
+ * rest of generations, if they exist, are considered inactive. See
+ * lru_gen_is_active().
+ *
+ * PG_active is always cleared while a page is on one of lrugen->folios[] so
+ * that the aging needs not to worry about it. And it's set again when a page
+ * considered active is isolated for non-reclaiming purposes, e.g., migration.
+ * See lru_gen_add_folio() and lru_gen_del_folio().
+ *
+ * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
+ * in folio->flags.
+ */
+#define MIN_NR_GENS 2U
+#define MAX_NR_GENS 4U
+
+/*
+ * Each generation is divided into multiple tiers. A page accessed N times
+ * through file descriptors is in tier order_base_2(N). A page in the first tier
+ * (N=0,1) is marked by PG_referenced unless it was faulted in through page
+ * tables or read ahead. A page in any other tier (N>1) is marked by
+ * PG_referenced and PG_workingset. This implies a minimum of two tiers is
+ * supported without using additional bits in folio->flags.
+ *
+ * In contrast to moving across generations which requires the LRU lock, moving
+ * across tiers only involves atomic operations on folio->flags and therefore
+ * has a negligible cost in the buffered access path. In the eviction path,
+ * comparisons of refaulted/(evicted+protected) from the first tier and the
+ * rest infer whether pages accessed multiple times through file descriptors
+ * are statistically hot and thus worth protecting.
+ *
+ * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
+ * folio->flags.
+ */
+#define MAX_NR_TIERS 4U
+
+#ifndef __GENERATING_BOUNDS_H
+
+struct lruvec;
+struct page_vma_mapped_walk;
+
+#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
+#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
+
+#ifdef CONFIG_LRU_GEN
+
+enum {
+ LRU_GEN_ANON,
+ LRU_GEN_FILE,
+};
+
+enum {
+ LRU_GEN_CORE,
+ LRU_GEN_MM_WALK,
+ LRU_GEN_NONLEAF_YOUNG,
+ NR_LRU_GEN_CAPS
+};
+
+#define MIN_LRU_BATCH BITS_PER_LONG
+#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
+
+/* whether to keep historical stats from evicted generations */
+#ifdef CONFIG_LRU_GEN_STATS
+#define NR_HIST_GENS MAX_NR_GENS
+#else
+#define NR_HIST_GENS 1U
+#endif
+
+/*
+ * The youngest generation number is stored in max_seq for both anon and file
+ * types as they are aged on an equal footing. The oldest generation numbers are
+ * stored in min_seq[] separately for anon and file types as clean file pages
+ * can be evicted regardless of swap constraints.
+ *
+ * Normally anon and file min_seq are in sync. But if swapping is constrained,
+ * e.g., out of swap space, file min_seq is allowed to advance and leave anon
+ * min_seq behind.
+ *
+ * The number of pages in each generation is eventually consistent and therefore
+ * can be transiently negative when reset_batch_size() is pending.
+ */
+struct lru_gen_folio {
+ /* the aging increments the youngest generation number */
+ unsigned long max_seq;
+ /* the eviction increments the oldest generation numbers */
+ unsigned long min_seq[ANON_AND_FILE];
+ /* the birth time of each generation in jiffies */
+ unsigned long timestamps[MAX_NR_GENS];
+ /* the multi-gen LRU lists, lazily sorted on eviction */
+ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the multi-gen LRU sizes, eventually consistent */
+ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the exponential moving average of refaulted */
+ unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the exponential moving average of evicted+protected */
+ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the first tier doesn't need protection, hence the minus one */
+ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
+ /* can be modified without holding the LRU lock */
+ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ /* whether the multi-gen LRU is enabled */
+ bool enabled;
+ /* the memcg generation this lru_gen_folio belongs to */
+ u8 gen;
+ /* the list segment this lru_gen_folio belongs to */
+ u8 seg;
+ /* per-node lru_gen_folio list for global reclaim */
+ struct hlist_nulls_node list;
+};
+
+enum {
+ MM_LEAF_TOTAL, /* total leaf entries */
+ MM_LEAF_OLD, /* old leaf entries */
+ MM_LEAF_YOUNG, /* young leaf entries */
+ MM_NONLEAF_TOTAL, /* total non-leaf entries */
+ MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
+ MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
+ NR_MM_STATS
+};
+
+/* double-buffering Bloom filters */
+#define NR_BLOOM_FILTERS 2
+
+struct lru_gen_mm_state {
+ /* synced with max_seq after each iteration */
+ unsigned long seq;
+ /* where the current iteration continues after */
+ struct list_head *head;
+ /* where the last iteration ended before */
+ struct list_head *tail;
+ /* Bloom filters flip after each iteration */
+ unsigned long *filters[NR_BLOOM_FILTERS];
+ /* the mm stats for debugging */
+ unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
+};
+
+struct lru_gen_mm_walk {
+ /* the lruvec under reclaim */
+ struct lruvec *lruvec;
+ /* max_seq from lru_gen_folio: can be out of date */
+ unsigned long seq;
+ /* the next address within an mm to scan */
+ unsigned long next_addr;
+ /* to batch promoted pages */
+ int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* to batch the mm stats */
+ int mm_stats[NR_MM_STATS];
+ /* total batched items */
+ int batched;
+ bool can_swap;
+ bool force_scan;
+};
+
+/*
+ * For each node, memcgs are divided into two generations: the old and the
+ * young. For each generation, memcgs are randomly sharded into multiple bins
+ * to improve scalability. For each bin, the hlist_nulls is virtually divided
+ * into three segments: the head, the tail and the default.
+ *
+ * An onlining memcg is added to the tail of a random bin in the old generation.
+ * The eviction starts at the head of a random bin in the old generation. The
+ * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
+ * the old generation, is incremented when all its bins become empty.
+ *
+ * There are four operations:
+ * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
+ * current generation (old or young) and updates its "seg" to "head";
+ * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
+ * current generation (old or young) and updates its "seg" to "tail";
+ * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
+ * generation, updates its "gen" to "old" and resets its "seg" to "default";
+ * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
+ * young generation, updates its "gen" to "young" and resets its "seg" to
+ * "default".
+ *
+ * The events that trigger the above operations are:
+ * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
+ * 2. The first attempt to reclaim a memcg below low, which triggers
+ * MEMCG_LRU_TAIL;
+ * 3. The first attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_TAIL;
+ * 4. The second attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_YOUNG;
+ * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
+ * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
+ * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
+ *
+ * Notes:
+ * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
+ * of their max_seq counters ensures the eventual fairness to all eligible
+ * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * 2. There are only two valid generations: old (seq) and young (seq+1).
+ * MEMCG_NR_GENS is set to three so that when reading the generation counter
+ * locklessly, a stale value (seq-1) does not wraparound to young.
+ */
+#define MEMCG_NR_GENS 3
+#define MEMCG_NR_BINS 8
+
+struct lru_gen_memcg {
+ /* the per-node memcg generation counter */
+ unsigned long seq;
+ /* each memcg has one lru_gen_folio per node */
+ unsigned long nr_memcgs[MEMCG_NR_GENS];
+ /* per-node lru_gen_folio list for global reclaim */
+ struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS];
+ /* protects the above */
+ spinlock_t lock;
+};
+
+void lru_gen_init_pgdat(struct pglist_data *pgdat);
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+
+void lru_gen_init_memcg(struct mem_cgroup *memcg);
+void lru_gen_exit_memcg(struct mem_cgroup *memcg);
+void lru_gen_online_memcg(struct mem_cgroup *memcg);
+void lru_gen_offline_memcg(struct mem_cgroup *memcg);
+void lru_gen_release_memcg(struct mem_cgroup *memcg);
+void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
+{
+}
+
+static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
+{
+}
+
+static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+{
+}
+
+static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_online_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
+{
+}
+
+#endif /* CONFIG_LRU_GEN */
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
+ /* per lruvec lru_lock for memcg */
+ spinlock_t lru_lock;
/*
* These track the cost of reclaiming one LRU - file or anon -
* over the other. As the observed cost of reclaiming one LRU
@@ -283,17 +616,24 @@ struct lruvec {
unsigned long file_cost;
/* Non-resident age, driven by LRU movement */
atomic_long_t nonresident_age;
- /* Refaults at the time of last reclaim cycle, anon=0, file=1 */
- unsigned long refaults[2];
+ /* Refaults at the time of last reclaim cycle */
+ unsigned long refaults[ANON_AND_FILE];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
+#ifdef CONFIG_LRU_GEN
+ /* evictable pages divided into generations */
+ struct lru_gen_folio lrugen;
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+ /* to concurrently iterate lru_gen_mm_list */
+ struct lru_gen_mm_state mm_state;
+#endif
+#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
+ struct zswap_lruvec_state zswap_lruvec_state;
};
-/* Isolate unmapped pages */
-#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
/* Isolate unevictable pages */
@@ -306,32 +646,74 @@ enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
+ WMARK_PROMO,
NR_WMARK
};
+/*
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
+ * for THP which will usually be GFP_MOVABLE. Even if it is another type,
+ * it should not contribute to serious fragmentation causing THP allocation
+ * failures.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define NR_PCP_THP 1
+#else
+#define NR_PCP_THP 0
+#endif
+#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
+#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
+
#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
+/*
+ * Flags used in pcp->flags field.
+ *
+ * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
+ * previous page freeing. To avoid to drain PCP for an accident
+ * high-order page freeing.
+ *
+ * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
+ * draining PCP for consecutive high-order pages freeing without
+ * allocation if data cache slice of CPU is large enough. To reduce
+ * zone lock contention and keep cache-hot pages reusing.
+ */
+#define PCPF_PREV_FREE_HIGH_ORDER BIT(0)
+#define PCPF_FREE_HIGH_BATCH BIT(1)
+
struct per_cpu_pages {
+ spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
+ int high_min; /* min high watermark */
+ int high_max; /* max high watermark */
int batch; /* chunk size for buddy add/remove */
+ u8 flags; /* protected by pcp->lock */
+ u8 alloc_factor; /* batch scaling factor during allocate */
+#ifdef CONFIG_NUMA
+ u8 expire; /* When 0, remote pagesets are drained */
+#endif
+ short free_count; /* consecutive free count */
/* Lists of pages, one per migrate type stored on the pcp-lists */
- struct list_head lists[MIGRATE_PCPTYPES];
-};
+ struct list_head lists[NR_PCP_LISTS];
+} ____cacheline_aligned_in_smp;
-struct per_cpu_pageset {
- struct per_cpu_pages pcp;
-#ifdef CONFIG_NUMA
- s8 expire;
- u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
-#endif
+struct per_cpu_zonestat {
#ifdef CONFIG_SMP
- s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
+ s8 stat_threshold;
+#endif
+#ifdef CONFIG_NUMA
+ /*
+ * Low priority inaccurate counters that are only folded
+ * on demand. Use a large type to avoid the overhead of
+ * folding during refresh_cpu_vm_stats.
+ */
+ unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
#endif
};
@@ -352,26 +734,6 @@ enum zone_type {
* DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
* platforms may need both zones as they support peripherals with
* different DMA addressing limitations.
- *
- * Some examples:
- *
- * - i386 and x86_64 have a fixed 16M ZONE_DMA and ZONE_DMA32 for the
- * rest of the lower 4G.
- *
- * - arm only uses ZONE_DMA, the size, up to 4G, may vary depending on
- * the specific device.
- *
- * - arm64 has a fixed 1G ZONE_DMA and ZONE_DMA32 for the rest of the
- * lower 4G.
- *
- * - powerpc only uses ZONE_DMA, the size, up to 2G, may vary
- * depending on the specific device.
- *
- * - s390 uses ZONE_DMA fixed to the lower 2G.
- *
- * - ia64 and riscv only use ZONE_DMA32.
- *
- * - parisc uses neither.
*/
#ifdef CONFIG_ZONE_DMA
ZONE_DMA,
@@ -396,6 +758,55 @@ enum zone_type {
*/
ZONE_HIGHMEM,
#endif
+ /*
+ * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
+ * movable pages with few exceptional cases described below. Main use
+ * cases for ZONE_MOVABLE are to make memory offlining/unplug more
+ * likely to succeed, and to locally limit unmovable allocations - e.g.,
+ * to increase the number of THP/huge pages. Notable special cases are:
+ *
+ * 1. Pinned pages: (long-term) pinning of movable pages might
+ * essentially turn such pages unmovable. Therefore, we do not allow
+ * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
+ * faulted, they come from the right zone right away. However, it is
+ * still possible that address space already has pages in
+ * ZONE_MOVABLE at the time when pages are pinned (i.e. user has
+ * touches that memory before pinning). In such case we migrate them
+ * to a different zone. When migration fails - pinning fails.
+ * 2. memblock allocations: kernelcore/movablecore setups might create
+ * situations where ZONE_MOVABLE contains unmovable allocations
+ * after boot. Memory offlining and allocations fail early.
+ * 3. Memory holes: kernelcore/movablecore setups might create very rare
+ * situations where ZONE_MOVABLE contains memory holes after boot,
+ * for example, if we have sections that are only partially
+ * populated. Memory offlining and allocations fail early.
+ * 4. PG_hwpoison pages: while poisoned pages can be skipped during
+ * memory offlining, such pages cannot be allocated.
+ * 5. Unmovable PG_offline pages: in paravirtualized environments,
+ * hotplugged memory blocks might only partially be managed by the
+ * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
+ * parts not manged by the buddy are unmovable PG_offline pages. In
+ * some cases (virtio-mem), such pages can be skipped during
+ * memory offlining, however, cannot be moved/allocated. These
+ * techniques might use alloc_contig_range() to hide previously
+ * exposed pages from the buddy again (e.g., to implement some sort
+ * of memory unplug in virtio-mem).
+ * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create
+ * situations where ZERO_PAGE(0) which is allocated differently
+ * on different platforms may end up in a movable zone. ZERO_PAGE(0)
+ * cannot be migrated.
+ * 7. Memory-hotplug: when using memmap_on_memory and onlining the
+ * memory to the MOVABLE zone, the vmemmap pages are also placed in
+ * such zone. Such pages cannot be really moved around as they are
+ * self-stored in the range, but they are treated as movable when
+ * the range they describe is about to be offlined.
+ *
+ * In general, no unmovable allocations that degrade memory offlining
+ * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
+ * have to expect that migrating pages in ZONE_MOVABLE can fail (even
+ * if has_unmovable_pages() states that there are no unmovable pages,
+ * there can be false negatives).
+ */
ZONE_MOVABLE,
#ifdef CONFIG_ZONE_DEVICE
ZONE_DEVICE,
@@ -406,6 +817,8 @@ enum zone_type {
#ifndef __GENERATING_BOUNDS_H
+#define ASYNC_AND_SYNC 2
+
struct zone {
/* Read-mostly fields */
@@ -430,7 +843,15 @@ struct zone {
int node;
#endif
struct pglist_data *zone_pgdat;
- struct per_cpu_pageset __percpu *pageset;
+ struct per_cpu_pages __percpu *per_cpu_pageset;
+ struct per_cpu_zonestat __percpu *per_cpu_zonestats;
+ /*
+ * the high and batch values are copied to individual pagesets for
+ * faster access
+ */
+ int pageset_high_min;
+ int pageset_high_max;
+ int pageset_batch;
#ifndef CONFIG_SPARSEMEM
/*
@@ -452,11 +873,18 @@ struct zone {
* is calculated as:
* present_pages = spanned_pages - absent_pages(pages in holes);
*
+ * present_early_pages is present pages existing within the zone
+ * located on memory available since early boot, excluding hotplugged
+ * memory.
+ *
* managed_pages is present pages managed by the buddy system, which
* is calculated as (reserved_pages includes pages allocated by the
* bootmem allocator):
* managed_pages = present_pages - reserved_pages;
*
+ * cma pages is present pages that are assigned for CMA use
+ * (MIGRATE_CMA).
+ *
* So present_pages may be used by memory hotplug or memory power
* management logic to figure out unmanaged pages by checking
* (present_pages - managed_pages). And managed_pages should be used
@@ -475,12 +903,18 @@ struct zone {
* give them a chance of being in the same cacheline.
*
* Write access to present_pages at runtime should be protected by
- * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
- * present_pages should get_online_mems() to get a stable value.
+ * mem_hotplug_begin/done(). Any reader who can't tolerant drift of
+ * present_pages should use get_online_mems() to get a stable value.
*/
atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
+#if defined(CONFIG_MEMORY_HOTPLUG)
+ unsigned long present_early_pages;
+#endif
+#ifdef CONFIG_CMA
+ unsigned long cma_pages;
+#endif
const char *name;
@@ -501,10 +935,15 @@ struct zone {
int initialized;
/* Write-intensive fields used from the page allocator */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
- struct free_area free_area[MAX_ORDER];
+ struct free_area free_area[NR_PAGE_ORDERS];
+
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
+ struct list_head unaccepted_pages;
+#endif
/* zone flags, see below */
unsigned long flags;
@@ -513,7 +952,7 @@ struct zone {
spinlock_t lock;
/* Write-intensive fields used by compaction and vmstats. */
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
/*
* When free pages are below this point, additional steps are taken
@@ -525,8 +964,8 @@ struct zone {
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* pfn where compaction free scanner should start */
unsigned long compact_cached_free_pfn;
- /* pfn where async and sync compaction migration scanner should start */
- unsigned long compact_cached_migrate_pfn[2];
+ /* pfn where compaction migration scanner should start */
+ unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
unsigned long compact_init_migrate_pfn;
unsigned long compact_init_free_pfn;
#endif
@@ -550,10 +989,10 @@ struct zone {
bool contiguous;
- ZONE_PADDING(_pad3_)
+ CACHELINE_PADDING(_pad3_);
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
- atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
+ atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
@@ -571,6 +1010,8 @@ enum zone_flags {
ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
* Cleared when kswapd is woken.
*/
+ ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
+ ZONE_BELOW_HIGH, /* zone is below high watermark. */
};
static inline unsigned long zone_managed_pages(struct zone *zone)
@@ -578,6 +1019,15 @@ static inline unsigned long zone_managed_pages(struct zone *zone)
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
+static inline unsigned long zone_cma_pages(struct zone *zone)
+{
+#ifdef CONFIG_CMA
+ return zone->cma_pages;
+#else
+ return 0;
+#endif
+}
+
static inline unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
@@ -598,6 +1048,117 @@ static inline bool zone_is_empty(struct zone *zone)
return zone->spanned_pages == 0;
}
+#ifndef BUILD_VDSO32_64
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+
+/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
+#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
+#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
+#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
+#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
+#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
+#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
+#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
+
+/*
+ * Define the bit shifts to access each section. For non-existent
+ * sections we define the shift as 0; that plus a 0 mask ensures
+ * the compiler will optimise away reference to them.
+ */
+#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
+#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
+#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
+#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
+#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
+
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
+ SECTIONS_PGOFF : ZONES_PGOFF)
+#else
+#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
+ NODES_PGOFF : ZONES_PGOFF)
+#endif
+
+#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
+
+#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
+#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
+#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
+#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
+#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
+#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+
+static inline enum zone_type page_zonenum(const struct page *page)
+{
+ ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
+ return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+}
+
+static inline enum zone_type folio_zonenum(const struct folio *folio)
+{
+ return page_zonenum(&folio->page);
+}
+
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return page_zonenum(page) == ZONE_DEVICE;
+}
+
+/*
+ * Consecutive zone device pages should not be merged into the same sgl
+ * or bvec segment with other types of pages or if they belong to different
+ * pgmaps. Otherwise getting the pgmap of a given segment is not possible
+ * without scanning the entire segment. This helper returns true either if
+ * both pages are not zone device pages or both pages are zone device pages
+ * with the same pgmap.
+ */
+static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
+ const struct page *b)
+{
+ if (is_zone_device_page(a) != is_zone_device_page(b))
+ return false;
+ if (!is_zone_device_page(a))
+ return true;
+ return a->pgmap == b->pgmap;
+}
+
+extern void memmap_init_zone_device(struct zone *, unsigned long,
+ unsigned long, struct dev_pagemap *);
+#else
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return false;
+}
+static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
+ const struct page *b)
+{
+ return true;
+}
+#endif
+
+static inline bool folio_is_zone_device(const struct folio *folio)
+{
+ return is_zone_device_page(&folio->page);
+}
+
+static inline bool is_zone_movable_page(const struct page *page)
+{
+ return page_zonenum(page) == ZONE_MOVABLE;
+}
+
+static inline bool folio_is_zone_movable(const struct folio *folio)
+{
+ return folio_zonenum(folio) == ZONE_MOVABLE;
+}
+#endif
+
/*
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
@@ -663,10 +1224,12 @@ struct zonelist {
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
};
-#ifndef CONFIG_DISCONTIGMEM
-/* The array of struct pages - for discontigmem use pgdat->lmem_map */
+/*
+ * The array of struct pages for flatmem.
+ * It must be declared for SPARSEMEM as well because there are configurations
+ * that rely on that.
+ */
extern struct page *mem_map;
-#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split {
@@ -676,6 +1239,31 @@ struct deferred_split {
};
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Per NUMA node memory failure handling statistics.
+ */
+struct memory_failure_stats {
+ /*
+ * Number of raw pages poisoned.
+ * Cases not accounted: memory outside kernel control, offline page,
+ * arch-specific memory_failure (SGX), hwpoison_filter() filtered
+ * error events, and unpoison actions from hwpoison_unpoison.
+ */
+ unsigned long total;
+ /*
+ * Recovery results of poisoned raw pages handled by memory_failure,
+ * in sync with mf_result.
+ * total = ignored + failed + delayed + recovered.
+ * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted.
+ */
+ unsigned long ignored;
+ unsigned long failed;
+ unsigned long delayed;
+ unsigned long recovered;
+};
+#endif
+
/*
* On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout. On UMA machines there is a single pglist_data which
@@ -700,7 +1288,7 @@ typedef struct pglist_data {
struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones; /* number of populated zones in this node */
-#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
+#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
struct page_ext *node_page_ext;
@@ -728,8 +1316,17 @@ typedef struct pglist_data {
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
- struct task_struct *kswapd; /* Protected by
- mem_hotplug_begin/end() */
+
+ /* workqueues for throttling reclaim for different reasons. */
+ wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
+
+ atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
+ unsigned long nr_reclaim_start; /* nr pages written while throttled
+ * when throttling started. */
+#ifdef CONFIG_MEMORY_HOTPLUG
+ struct mutex kswapd_lock;
+#endif
+ struct task_struct *kswapd; /* Protected by kswapd_lock */
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
@@ -740,6 +1337,7 @@ typedef struct pglist_data {
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
+ bool proactive_compact_trigger;
#endif
/*
* This is a per-node reserve of pages that are not available
@@ -756,8 +1354,7 @@ typedef struct pglist_data {
#endif /* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */
- ZONE_PADDING(_pad1_)
- spinlock_t lru_lock;
+ CACHELINE_PADDING(_pad1_);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -771,6 +1368,21 @@ typedef struct pglist_data {
struct deferred_split deferred_split_queue;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ /* start time in ms of current promote rate limit period */
+ unsigned int nbp_rl_start;
+ /* number of promote candidate pages at start time of current rate limit period */
+ unsigned long nbp_rl_nr_cand;
+ /* promote threshold in ms */
+ unsigned int nbp_threshold;
+ /* start time in ms of current promote threshold adjustment period */
+ unsigned int nbp_th_start;
+ /*
+ * number of promote candidate pages at start time of current promote
+ * threshold adjustment period
+ */
+ unsigned long nbp_th_nr_cand;
+#endif
/* Fields commonly accessed by the page reclaim scanner */
/*
@@ -782,21 +1394,28 @@ typedef struct pglist_data {
unsigned long flags;
- ZONE_PADDING(_pad2_)
+#ifdef CONFIG_LRU_GEN
+ /* kswap mm walk data */
+ struct lru_gen_mm_walk mm_walk;
+ /* lru_gen_folio list */
+ struct lru_gen_memcg memcg_lru;
+#endif
+
+ CACHELINE_PADDING(_pad2_);
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
+#ifdef CONFIG_NUMA
+ struct memory_tier __rcu *memtier;
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ struct memory_failure_stats mf_stats;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
-#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
-#else
-#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
-#endif
-#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
@@ -806,11 +1425,6 @@ static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}
-static inline bool pgdat_is_empty(pg_data_t *pgdat)
-{
- return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
-}
-
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
@@ -824,10 +1438,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx);
-enum memmap_context {
- MEMMAP_EARLY,
- MEMMAP_HOTPLUG,
+/*
+ * Memory initialization context, use to differentiate memory added by
+ * the platform statically or via memory hotplug interface.
+ */
+enum meminit_context {
+ MEMINIT_EARLY,
+ MEMINIT_HOTPLUG,
};
+
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size);
@@ -842,8 +1461,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
#endif
}
-extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
-
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
@@ -855,6 +1472,18 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool zone_is_zone_device(struct zone *zone)
+{
+ return zone_idx(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool zone_is_zone_device(struct zone *zone)
+{
+ return false;
+}
+#endif
+
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -893,22 +1522,11 @@ static inline void zone_set_nid(struct zone *zone, int nid) {}
extern int movable_zone;
-#ifdef CONFIG_HIGHMEM
-static inline int zone_movable_is_highmem(void)
-{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
- return movable_zone == ZONE_HIGHMEM;
-#else
- return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
-#endif
-}
-#endif
-
static inline int is_highmem_idx(enum zone_type idx)
{
#ifdef CONFIG_HIGHMEM
return (idx == ZONE_HIGHMEM ||
- (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
+ (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
#else
return 0;
#endif
@@ -918,50 +1536,37 @@ static inline int is_highmem_idx(enum zone_type idx)
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
- * @zone - pointer to struct zone variable
+ * @zone: pointer to struct zone variable
+ * Return: 1 for a highmem zone, 0 otherwise
*/
static inline int is_highmem(struct zone *zone)
{
-#ifdef CONFIG_HIGHMEM
return is_highmem_idx(zone_idx(zone));
-#else
- return 0;
-#endif
}
-/* These two functions are used to setup the per zone pages min values */
-struct ctl_table;
+#ifdef CONFIG_ZONE_DMA
+bool has_managed_dma(void);
+#else
+static inline bool has_managed_dma(void)
+{
+ return false;
+}
+#endif
-int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
- size_t *, loff_t *);
-extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
-int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
- size_t *, loff_t *);
-int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-int numa_zonelist_order_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-extern int percpu_pagelist_fraction;
-extern char numa_zonelist_order[];
-#define NUMA_ZONELIST_ORDER_LEN 16
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
extern struct pglist_data contig_page_data;
-#define NODE_DATA(nid) (&contig_page_data)
-#define NODE_MEM_MAP(nid) mem_map
+static inline struct pglist_data *NODE_DATA(int nid)
+{
+ return &contig_page_data;
+}
-#else /* CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
#include <asm/mmzone.h>
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
@@ -969,7 +1574,7 @@ extern struct zone *next_zone(struct zone *zone);
/**
* for_each_online_pgdat - helper macro to iterate over all online nodes
- * @pgdat - pointer to a pg_data_t variable
+ * @pgdat: pointer to a pg_data_t variable
*/
#define for_each_online_pgdat(pgdat) \
for (pgdat = first_online_pgdat(); \
@@ -977,7 +1582,7 @@ extern struct zone *next_zone(struct zone *zone);
pgdat = next_online_pgdat(pgdat))
/**
* for_each_zone - helper macro to iterate over all memory zones
- * @zone - pointer to struct zone variable
+ * @zone: pointer to struct zone variable
*
* The user only needs to declare the zone variable, for_each_zone
* fills it in.
@@ -1016,15 +1621,18 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
/**
* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
- * @z - The cursor used as a starting point for the search
- * @highest_zoneidx - The zone index of the highest zone to return
- * @nodes - An optional nodemask to filter the zonelist with
+ * @z: The cursor used as a starting point for the search
+ * @highest_zoneidx: The zone index of the highest zone to return
+ * @nodes: An optional nodemask to filter the zonelist with
*
* This function returns the next zone at or below a given zone index that is
* within the allowed nodemask using a cursor as the starting point for the
* search. The zoneref returned is a cursor that represents the current zone
* being examined. It should be advanced by one before calling
* next_zones_zonelist again.
+ *
+ * Return: the next zone at or below highest_zoneidx within the allowed
+ * nodemask using a cursor within a zonelist as a starting point
*/
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
@@ -1037,10 +1645,9 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
/**
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
- * @zonelist - The zonelist to search for a suitable zone
- * @highest_zoneidx - The zone index of the highest zone to return
- * @nodes - An optional nodemask to filter the zonelist with
- * @return - Zoneref pointer for the first suitable zone found (see below)
+ * @zonelist: The zonelist to search for a suitable zone
+ * @highest_zoneidx: The zone index of the highest zone to return
+ * @nodes: An optional nodemask to filter the zonelist with
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
@@ -1050,6 +1657,8 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
* When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
* never NULL). This may happen either genuinely, or due to concurrent nodemask
* update due to cpuset modification.
+ *
+ * Return: Zoneref pointer for the first suitable zone found
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
@@ -1061,11 +1670,11 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
/**
* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
- * @zone - The current zone in the iterator
- * @z - The current pointer within zonelist->_zonerefs being iterated
- * @zlist - The zonelist being iterated
- * @highidx - The zone index of the highest zone to return
- * @nodemask - Nodemask allowed by the allocator
+ * @zone: The current zone in the iterator
+ * @z: The current pointer within zonelist->_zonerefs being iterated
+ * @zlist: The zonelist being iterated
+ * @highidx: The zone index of the highest zone to return
+ * @nodemask: Nodemask allowed by the allocator
*
* This iterator iterates though all zones at or below a given zone index and
* within a given nodemask
@@ -1076,7 +1685,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
-#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
for (zone = z->zone; \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
@@ -1085,16 +1694,38 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
- * @zone - The current zone in the iterator
- * @z - The current pointer within zonelist->zones being iterated
- * @zlist - The zonelist being iterated
- * @highidx - The zone index of the highest zone to return
+ * @zone: The current zone in the iterator
+ * @z: The current pointer within zonelist->zones being iterated
+ * @zlist: The zonelist being iterated
+ * @highidx: The zone index of the highest zone to return
*
* This iterator iterates though all zones at or below a given zone index.
*/
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
+/* Whether the 'nodes' are all movable nodes */
+static inline bool movable_only_nodes(nodemask_t *nodes)
+{
+ struct zonelist *zonelist;
+ struct zoneref *z;
+ int nid;
+
+ if (nodes_empty(*nodes))
+ return false;
+
+ /*
+ * We can chose arbitrary node from the nodemask to get a
+ * zonelist as they are interlinked. We just need to find
+ * at least one zone that can satisfy kernel allocations.
+ */
+ nid = first_node(*nodes);
+ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
+ z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
+ return (!z->zone) ? true : false;
+}
+
+
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif
@@ -1106,8 +1737,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#ifdef CONFIG_SPARSEMEM
/*
- * SECTION_SHIFT #bits space required to store a section #
- *
* PA_SECTION_SHIFT physical address to/from section number
* PFN_SECTION_SHIFT pfn to/from section number
*/
@@ -1122,8 +1751,8 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#define SECTION_BLOCKFLAGS_BITS \
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
-#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
-#error Allocator MAX_ORDER exceeds SECTION_SIZE
+#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
+#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE
#endif
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
@@ -1155,6 +1784,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
+ struct rcu_head rcu;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
#endif
@@ -1219,15 +1849,17 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
+ unsigned long root = SECTION_NR_TO_ROOT(nr);
+
+ if (unlikely(root >= NR_SECTION_ROOTS))
+ return NULL;
+
#ifdef CONFIG_SPARSEMEM_EXTREME
- if (!mem_section)
+ if (!mem_section || !mem_section[root])
return NULL;
#endif
- if (!mem_section[SECTION_NR_TO_ROOT(nr)])
- return NULL;
- return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+ return &mem_section[root][nr & SECTION_ROOT_MASK];
}
-extern unsigned long __section_nr(struct mem_section *ms);
extern size_t mem_section_usage_size(void);
/*
@@ -1241,15 +1873,32 @@ extern size_t mem_section_usage_size(void);
* (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
* worst combination is powerpc with 256k pages,
* which results in PFN_SECTION_SHIFT equal 6.
- * To sum it up, at least 6 bits are available.
+ * To sum it up, at least 6 bits are available on all architectures.
+ * However, we can exceed 6 bits on some other architectures except
+ * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available
+ * with the worst case of 64K pages on arm64) if we make sure the
+ * exceeded bit is not applicable to powerpc.
*/
-#define SECTION_MARKED_PRESENT (1UL<<0)
-#define SECTION_HAS_MEM_MAP (1UL<<1)
-#define SECTION_IS_ONLINE (1UL<<2)
-#define SECTION_IS_EARLY (1UL<<3)
-#define SECTION_MAP_LAST_BIT (1UL<<4)
-#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
-#define SECTION_NID_SHIFT 3
+enum {
+ SECTION_MARKED_PRESENT_BIT,
+ SECTION_HAS_MEM_MAP_BIT,
+ SECTION_IS_ONLINE_BIT,
+ SECTION_IS_EARLY_BIT,
+#ifdef CONFIG_ZONE_DEVICE
+ SECTION_TAINT_ZONE_DEVICE_BIT,
+#endif
+ SECTION_MAP_LAST_BIT,
+};
+
+#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT)
+#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT)
+#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT)
+#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT)
+#ifdef CONFIG_ZONE_DEVICE
+#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
+#endif
+#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
+#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
@@ -1288,6 +1937,20 @@ static inline int online_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
+#ifdef CONFIG_ZONE_DEVICE
+static inline int online_device_section(struct mem_section *section)
+{
+ unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
+
+ return section && ((section->section_mem_map & flags) == flags);
+}
+#else
+static inline int online_device_section(struct mem_section *section)
+{
+ return 0;
+}
+#endif
+
static inline int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
@@ -1295,10 +1958,8 @@ static inline int online_section_nr(unsigned long nr)
#ifdef CONFIG_MEMORY_HOTPLUG
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
-#ifdef CONFIG_MEMORY_HOTREMOVE
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#endif
-#endif
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
@@ -1317,7 +1978,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
- return test_bit(idx, ms->usage->subsection_map);
+ return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
@@ -1327,20 +1988,47 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
#endif
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
+/**
+ * pfn_valid - check if there is a valid memory map entry for a PFN
+ * @pfn: the page frame number to check
+ *
+ * Check if there is a valid memory map entry aka struct page for the @pfn.
+ * Note, that availability of the memory map entry does not imply that
+ * there is actual usable memory at that @pfn. The struct page may
+ * represent a hole or an unusable page frame.
+ *
+ * Return: 1 for PFNs that have memory map entries and 0 otherwise
+ */
static inline int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;
+ int ret;
+
+ /*
+ * Ensure the upper PAGE_SHIFT bits are clear in the
+ * pfn. Else it might lead to false positives when
+ * some of the upper bits are set, but the lower bits
+ * match a valid pfn.
+ */
+ if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
+ return 0;
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- ms = __nr_to_section(pfn_to_section_nr(pfn));
- if (!valid_section(ms))
+ ms = __pfn_to_section(pfn);
+ rcu_read_lock_sched();
+ if (!valid_section(ms)) {
+ rcu_read_unlock_sched();
return 0;
+ }
/*
* Traditionally early sections always returned pfn_valid() for
* the entire section-sized span.
*/
- return early_section(ms) || pfn_section_valid(ms, pfn);
+ ret = early_section(ms) || pfn_section_valid(ms, pfn);
+ rcu_read_unlock_sched();
+
+ return ret;
}
#endif
@@ -1348,7 +2036,7 @@ static inline int pfn_in_present_section(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ return present_section(__pfn_to_section(pfn));
}
static inline unsigned long next_present_section_nr(unsigned long section_nr)
@@ -1376,7 +2064,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#define pfn_to_nid(pfn) (0)
#endif
-#define early_pfn_valid(pfn) pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init() do {} while (0)
@@ -1385,64 +2072,6 @@ void sparse_init(void);
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
-/*
- * During memory init memblocks map pfns to nids. The search is expensive and
- * this caches recent lookups. The implementation of __early_pfn_to_nid
- * may treat start/end as pfns or sections.
- */
-struct mminit_pfnnid_cache {
- unsigned long last_start;
- unsigned long last_end;
- int last_nid;
-};
-
-#ifndef early_pfn_valid
-#define early_pfn_valid(pfn) (1)
-#endif
-
-/*
- * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
- * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
- * pfn_valid_within() should be used in this case; we optimise this away
- * when we have no holes within a MAX_ORDER_NR_PAGES block.
- */
-#ifdef CONFIG_HOLES_IN_ZONE
-#define pfn_valid_within(pfn) pfn_valid(pfn)
-#else
-#define pfn_valid_within(pfn) (1)
-#endif
-
-#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
-/*
- * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
- * associated with it or not. This means that a struct page exists for this
- * pfn. The caller cannot assume the page is fully initialized in general.
- * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
- * will ensure the struct page is fully online and initialized. Special pages
- * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
- *
- * In FLATMEM, it is expected that holes always have valid memmap as long as
- * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
- * that a valid section has a memmap for the entire section.
- *
- * However, an ARM, and maybe other embedded architectures in the future
- * free memmap backing holes to save memory on the assumption the memmap is
- * never used. The page_zone linkages are then broken even though pfn_valid()
- * returns true. A walker of the full memmap must then do this additional
- * check to ensure the memmap they are looking at is sane by making sure
- * the zone and PFN linkages are still valid. This is expensive, but walkers
- * of the full memmap are extremely rare.
- */
-bool memmap_valid_within(unsigned long pfn,
- struct page *page, struct zone *zone);
-#else
-static inline bool memmap_valid_within(unsigned long pfn,
- struct page *page, struct zone *zone)
-{
- return true;
-}
-#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
-
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
new file mode 100644
index 000000000000..cd4d5c8781f5
--- /dev/null
+++ b/include/linux/mnt_idmapping.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MNT_IDMAPPING_H
+#define _LINUX_MNT_IDMAPPING_H
+
+#include <linux/types.h>
+#include <linux/uidgid.h>
+
+struct mnt_idmap;
+struct user_namespace;
+
+extern struct mnt_idmap nop_mnt_idmap;
+extern struct user_namespace init_user_ns;
+
+typedef struct {
+ uid_t val;
+} vfsuid_t;
+
+typedef struct {
+ gid_t val;
+} vfsgid_t;
+
+static_assert(sizeof(vfsuid_t) == sizeof(kuid_t));
+static_assert(sizeof(vfsgid_t) == sizeof(kgid_t));
+static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val));
+static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val));
+
+#ifdef CONFIG_MULTIUSER
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+ return uid.val;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+ return gid.val;
+}
+#else
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+ return 0;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+ return 0;
+}
+#endif
+
+static inline bool vfsuid_valid(vfsuid_t uid)
+{
+ return __vfsuid_val(uid) != (uid_t)-1;
+}
+
+static inline bool vfsgid_valid(vfsgid_t gid)
+{
+ return __vfsgid_val(gid) != (gid_t)-1;
+}
+
+static inline bool vfsuid_eq(vfsuid_t left, vfsuid_t right)
+{
+ return vfsuid_valid(left) && __vfsuid_val(left) == __vfsuid_val(right);
+}
+
+static inline bool vfsgid_eq(vfsgid_t left, vfsgid_t right)
+{
+ return vfsgid_valid(left) && __vfsgid_val(left) == __vfsgid_val(right);
+}
+
+/**
+ * vfsuid_eq_kuid - check whether kuid and vfsuid have the same value
+ * @vfsuid: the vfsuid to compare
+ * @kuid: the kuid to compare
+ *
+ * Check whether @vfsuid and @kuid have the same values.
+ *
+ * Return: true if @vfsuid and @kuid have the same value, false if not.
+ * Comparison between two invalid uids returns false.
+ */
+static inline bool vfsuid_eq_kuid(vfsuid_t vfsuid, kuid_t kuid)
+{
+ return vfsuid_valid(vfsuid) && __vfsuid_val(vfsuid) == __kuid_val(kuid);
+}
+
+/**
+ * vfsgid_eq_kgid - check whether kgid and vfsgid have the same value
+ * @vfsgid: the vfsgid to compare
+ * @kgid: the kgid to compare
+ *
+ * Check whether @vfsgid and @kgid have the same values.
+ *
+ * Return: true if @vfsgid and @kgid have the same value, false if not.
+ * Comparison between two invalid gids returns false.
+ */
+static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
+{
+ return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid);
+}
+
+/*
+ * vfs{g,u}ids are created from k{g,u}ids.
+ * We don't allow them to be created from regular {u,g}id.
+ */
+#define VFSUIDT_INIT(val) (vfsuid_t){ __kuid_val(val) }
+#define VFSGIDT_INIT(val) (vfsgid_t){ __kgid_val(val) }
+
+#define INVALID_VFSUID VFSUIDT_INIT(INVALID_UID)
+#define INVALID_VFSGID VFSGIDT_INIT(INVALID_GID)
+
+/*
+ * Allow a vfs{g,u}id to be used as a k{g,u}id where we want to compare
+ * whether the mapped value is identical to value of a k{g,u}id.
+ */
+#define AS_KUIDT(val) (kuid_t){ __vfsuid_val(val) }
+#define AS_KGIDT(val) (kgid_t){ __vfsgid_val(val) }
+
+int vfsgid_in_group_p(vfsgid_t vfsgid);
+
+struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap);
+void mnt_idmap_put(struct mnt_idmap *idmap);
+
+vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, kuid_t kuid);
+
+vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, kgid_t kgid);
+
+kuid_t from_vfsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, vfsuid_t vfsuid);
+
+kgid_t from_vfsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, vfsgid_t vfsgid);
+
+/**
+ * vfsuid_has_fsmapping - check whether a vfsuid maps into the filesystem
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsuid: vfsuid to be mapped
+ *
+ * Check whether @vfsuid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsuid.
+ *
+ * Return: true if @vfsuid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsuid_has_fsmapping(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns,
+ vfsuid_t vfsuid)
+{
+ return uid_valid(from_vfsuid(idmap, fs_userns, vfsuid));
+}
+
+static inline bool vfsuid_has_mapping(struct user_namespace *userns,
+ vfsuid_t vfsuid)
+{
+ return from_kuid(userns, AS_KUIDT(vfsuid)) != (uid_t)-1;
+}
+
+/**
+ * vfsuid_into_kuid - convert vfsuid into kuid
+ * @vfsuid: the vfsuid to convert
+ *
+ * This can be used when a vfsuid is committed as a kuid.
+ *
+ * Return: a kuid with the value of @vfsuid
+ */
+static inline kuid_t vfsuid_into_kuid(vfsuid_t vfsuid)
+{
+ return AS_KUIDT(vfsuid);
+}
+
+/**
+ * vfsgid_has_fsmapping - check whether a vfsgid maps into the filesystem
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsgid: vfsgid to be mapped
+ *
+ * Check whether @vfsgid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsgid.
+ *
+ * Return: true if @vfsgid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsgid_has_fsmapping(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns,
+ vfsgid_t vfsgid)
+{
+ return gid_valid(from_vfsgid(idmap, fs_userns, vfsgid));
+}
+
+static inline bool vfsgid_has_mapping(struct user_namespace *userns,
+ vfsgid_t vfsgid)
+{
+ return from_kgid(userns, AS_KGIDT(vfsgid)) != (gid_t)-1;
+}
+
+/**
+ * vfsgid_into_kgid - convert vfsgid into kgid
+ * @vfsgid: the vfsgid to convert
+ *
+ * This can be used when a vfsgid is committed as a kgid.
+ *
+ * Return: a kgid with the value of @vfsgid
+ */
+static inline kgid_t vfsgid_into_kgid(vfsgid_t vfsgid)
+{
+ return AS_KGIDT(vfsgid);
+}
+
+/**
+ * mapped_fsuid - return caller's fsuid mapped according to an idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ *
+ * Use this helper to initialize a new vfs or filesystem object based on
+ * the caller's fsuid. A common example is initializing the i_uid field of
+ * a newly allocated inode triggered by a creation event such as mkdir or
+ * O_CREAT. Other examples include the allocation of quotas for a specific
+ * user.
+ *
+ * Return: the caller's current fsuid mapped up according to @idmap.
+ */
+static inline kuid_t mapped_fsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns)
+{
+ return from_vfsuid(idmap, fs_userns, VFSUIDT_INIT(current_fsuid()));
+}
+
+/**
+ * mapped_fsgid - return caller's fsgid mapped according to an idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ *
+ * Use this helper to initialize a new vfs or filesystem object based on
+ * the caller's fsgid. A common example is initializing the i_gid field of
+ * a newly allocated inode triggered by a creation event such as mkdir or
+ * O_CREAT. Other examples include the allocation of quotas for a specific
+ * user.
+ *
+ * Return: the caller's current fsgid mapped up according to @idmap.
+ */
+static inline kgid_t mapped_fsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns)
+{
+ return from_vfsgid(idmap, fs_userns, VFSGIDT_INIT(current_fsgid()));
+}
+
+#endif /* _LINUX_MNT_IDMAPPING_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 5b08a473cdba..7a9a07ea451b 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -9,6 +9,7 @@
#define LINUX_MOD_DEVICETABLE_H
#ifdef __KERNEL__
+#include <linux/mei.h>
#include <linux/types.h>
#include <linux/uuid.h>
typedef unsigned long kernel_ulong_t;
@@ -16,6 +17,10 @@ typedef unsigned long kernel_ulong_t;
#define PCI_ANY_ID (~0)
+enum {
+ PCI_ID_F_VFIO_DRIVER_OVERRIDE = 1,
+};
+
/**
* struct pci_device_id - PCI device ID structure
* @vendor: Vendor ID to match (or PCI_ANY_ID)
@@ -34,12 +39,14 @@ typedef unsigned long kernel_ulong_t;
* Best practice is to use driver_data as an index
* into a static list of equivalent device types,
* instead of using it as a pointer.
+ * @override_only: Match only when dev->driver_override is this driver.
*/
struct pci_device_id {
__u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
__u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
__u32 class, class_mask; /* (class,subclass,prog-if) triplet */
kernel_ulong_t driver_data; /* Data private to the driver */
+ __u32 override_only;
};
@@ -205,7 +212,7 @@ struct css_device_id {
kernel_ulong_t driver_data;
};
-#define ACPI_ID_LEN 9
+#define ACPI_ID_LEN 16
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
@@ -214,6 +221,19 @@ struct acpi_device_id {
__u32 cls_msk;
};
+/**
+ * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
+ * the PCI-defined class-code information
+ *
+ * @_cls : the class, subclass, prog-if triple for this device
+ * @_msk : the class mask for this device
+ *
+ * This macro is used to create a struct acpi_device_id that matches a
+ * specific PCI class. The .id and .driver_data fields will be left
+ * initialized with the default value.
+ */
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
+
#define PNP_ID_LEN 8
#define PNP_MAX_DEVICES 8
@@ -447,6 +467,7 @@ struct hv_vmbus_device_id {
struct rpmsg_device_id {
char name[RPMSG_NAME_SIZE];
+ kernel_ulong_t driver_data;
};
/* i2c */
@@ -828,6 +849,8 @@ struct wmi_device_id {
#define MHI_DEVICE_MODALIAS_FMT "mhi:%s"
#define MHI_NAME_SIZE 32
+#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s"
+
/**
* struct mhi_device_id - MHI device identification
* @chan: MHI channel name
@@ -838,4 +861,113 @@ struct mhi_device_id {
kernel_ulong_t driver_data;
};
+#define AUXILIARY_NAME_SIZE 32
+#define AUXILIARY_MODULE_PREFIX "auxiliary:"
+
+struct auxiliary_device_id {
+ char name[AUXILIARY_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
+/* Surface System Aggregator Module */
+
+#define SSAM_MATCH_TARGET 0x1
+#define SSAM_MATCH_INSTANCE 0x2
+#define SSAM_MATCH_FUNCTION 0x4
+
+struct ssam_device_id {
+ __u8 match_flags;
+
+ __u8 domain;
+ __u8 category;
+ __u8 target;
+ __u8 instance;
+ __u8 function;
+
+ kernel_ulong_t driver_data;
+};
+
+/*
+ * DFL (Device Feature List)
+ *
+ * DFL defines a linked list of feature headers within the device MMIO space to
+ * provide an extensible way of adding features. Software can walk through these
+ * predefined data structures to enumerate features. It is now used in the FPGA.
+ * See Documentation/fpga/dfl.rst for more information.
+ *
+ * The dfl bus type is introduced to match the individual feature devices (dfl
+ * devices) for specific dfl drivers.
+ */
+
+/**
+ * struct dfl_device_id - dfl device identifier
+ * @type: DFL FIU type of the device. See enum dfl_id_type.
+ * @feature_id: feature identifier local to its DFL FIU type.
+ * @driver_data: driver specific data.
+ */
+struct dfl_device_id {
+ __u16 type;
+ __u16 feature_id;
+ kernel_ulong_t driver_data;
+};
+
+/* ISHTP (Integrated Sensor Hub Transport Protocol) */
+
+#define ISHTP_MODULE_PREFIX "ishtp:"
+
+/**
+ * struct ishtp_device_id - ISHTP device identifier
+ * @guid: GUID of the device.
+ * @driver_data: pointer to driver specific data
+ */
+struct ishtp_device_id {
+ guid_t guid;
+ kernel_ulong_t driver_data;
+};
+
+#define CDX_ANY_ID (0xFFFF)
+
+enum {
+ CDX_ID_F_VFIO_DRIVER_OVERRIDE = 1,
+};
+
+/**
+ * struct cdx_device_id - CDX device identifier
+ * @vendor: Vendor ID
+ * @device: Device ID
+ * @subvendor: Subsystem vendor ID (or CDX_ANY_ID)
+ * @subdevice: Subsystem device ID (or CDX_ANY_ID)
+ * @class: Device class
+ * Most drivers do not need to specify class/class_mask
+ * as vendor/device is normally sufficient.
+ * @class_mask: Limit which sub-fields of the class field are compared.
+ * @override_only: Match only when dev->driver_override is this driver.
+ *
+ * Type of entries in the "device Id" table for CDX devices supported by
+ * a CDX device driver.
+ */
+struct cdx_device_id {
+ __u16 vendor;
+ __u16 device;
+ __u16 subvendor;
+ __u16 subdevice;
+ __u32 class;
+ __u32 class_mask;
+ __u32 override_only;
+};
+
+struct vchiq_device_id {
+ char name[32];
+};
+
+/**
+ * struct coreboot_device_id - Identifies a coreboot table entry
+ * @tag: tag ID
+ * @driver_data: driver specific data
+ */
+struct coreboot_device_id {
+ __u32 tag;
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index e30ed5fa33a7..1153b0d99a80 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/stat.h>
+#include <linux/buildid.h>
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/kmod.h>
@@ -25,13 +26,12 @@
#include <linux/error-injection.h>
#include <linux/tracepoint-defs.h>
#include <linux/srcu.h>
+#include <linux/static_call_types.h>
+#include <linux/dynamic_debug.h>
#include <linux/percpu.h>
#include <asm/module.h>
-/* Not Yet Implemented */
-#define MODULE_SUPPORTED_DEVICE(name)
-
#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
struct modversion_info {
@@ -65,7 +65,7 @@ struct module_version_attribute {
struct module_attribute mattr;
const char *module_name;
const char *version;
-} __attribute__ ((__aligned__(sizeof(void *))));
+};
extern ssize_t __modver_version_show(struct module_attribute *,
struct module_kobject *, char *);
@@ -130,13 +130,17 @@ extern void cleanup_module(void);
#define module_init(initfn) \
static inline initcall_t __maybe_unused __inittest(void) \
{ return initfn; } \
- int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
+ int init_module(void) __copy(initfn) \
+ __attribute__((alias(#initfn))); \
+ ___ADDRESSABLE(init_module, __initdata);
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
static inline exitcall_t __maybe_unused __exittest(void) \
{ return exitfn; } \
- void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
+ void cleanup_module(void) __copy(exitfn) \
+ __attribute__((alias(#exitfn))); \
+ ___ADDRESSABLE(cleanup_module, __exitdata);
#endif
@@ -265,20 +269,20 @@ extern typeof(name) __mod_##type##__##name##_device_table \
#else
#define MODULE_VERSION(_version) \
MODULE_INFO(version, _version); \
- static struct module_version_attribute ___modver_attr = { \
- .mattr = { \
- .attr = { \
- .name = "version", \
- .mode = S_IRUGO, \
+ static struct module_version_attribute __modver_attr \
+ __used __section("__modver") \
+ __aligned(__alignof__(struct module_version_attribute)) \
+ = { \
+ .mattr = { \
+ .attr = { \
+ .name = "version", \
+ .mode = S_IRUGO, \
+ }, \
+ .show = __modver_version_show, \
}, \
- .show = __modver_version_show, \
- }, \
- .module_name = KBUILD_MODNAME, \
- .version = _version, \
- }; \
- static const struct module_version_attribute \
- __used __attribute__ ((__section__ ("__modver"))) \
- * __moduleparam_const __modver_attr = &___modver_attr
+ .module_name = KBUILD_MODNAME, \
+ .version = _version, \
+ }
#endif
/* Optional firmware file (or files) needed by the module
@@ -286,7 +290,7 @@ extern typeof(name) __mod_##type##__##name##_device_table \
* files require multiple MODULE_FIRMWARE() specifiers */
#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
-#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns)
+#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, __stringify(ns))
struct notifier_block;
@@ -317,17 +321,47 @@ struct mod_tree_node {
struct latch_tree_node node;
};
-struct module_layout {
- /* The actual code + data. */
+enum mod_mem_type {
+ MOD_TEXT = 0,
+ MOD_DATA,
+ MOD_RODATA,
+ MOD_RO_AFTER_INIT,
+ MOD_INIT_TEXT,
+ MOD_INIT_DATA,
+ MOD_INIT_RODATA,
+
+ MOD_MEM_NUM_TYPES,
+ MOD_INVALID = -1,
+};
+
+#define mod_mem_type_is_init(type) \
+ ((type) == MOD_INIT_TEXT || \
+ (type) == MOD_INIT_DATA || \
+ (type) == MOD_INIT_RODATA)
+
+#define mod_mem_type_is_core(type) (!mod_mem_type_is_init(type))
+
+#define mod_mem_type_is_text(type) \
+ ((type) == MOD_TEXT || \
+ (type) == MOD_INIT_TEXT)
+
+#define mod_mem_type_is_data(type) (!mod_mem_type_is_text(type))
+
+#define mod_mem_type_is_core_data(type) \
+ (mod_mem_type_is_core(type) && \
+ mod_mem_type_is_data(type))
+
+#define for_each_mod_mem_type(type) \
+ for (enum mod_mem_type (type) = 0; \
+ (type) < MOD_MEM_NUM_TYPES; (type)++)
+
+#define for_class_mod_mem_type(type, class) \
+ for_each_mod_mem_type(type) \
+ if (mod_mem_type_is_##class(type))
+
+struct module_memory {
void *base;
- /* Total size. */
unsigned int size;
- /* The size of the executable code. */
- unsigned int text_size;
- /* Size of RO section of the module (text+rodata) */
- unsigned int ro_size;
- /* Size of RO after init section */
- unsigned int ro_after_init_size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
struct mod_tree_node mtn;
@@ -336,9 +370,9 @@ struct module_layout {
#ifdef CONFIG_MODULES_TREE_LOOKUP
/* Only touch one cacheline for common rbtree-for-core-layout case. */
-#define __module_layout_align ____cacheline_aligned
+#define __module_memory_align ____cacheline_aligned
#else
-#define __module_layout_align
+#define __module_memory_align
#endif
struct mod_kallsyms {
@@ -349,6 +383,14 @@ struct mod_kallsyms {
};
#ifdef CONFIG_LIVEPATCH
+/**
+ * struct klp_modinfo - ELF information preserved from the livepatch module
+ *
+ * @hdr: ELF header
+ * @sechdrs: Section header table
+ * @secstrings: String table for the section headers
+ * @symndx: The symbol table section index
+ */
struct klp_modinfo {
Elf_Ehdr hdr;
Elf_Shdr *sechdrs;
@@ -366,6 +408,11 @@ struct module {
/* Unique handle for this module */
char name[MODULE_NAME_LEN];
+#ifdef CONFIG_STACKTRACE_BUILD_ID
+ /* Module build ID */
+ unsigned char build_id[BUILD_ID_SIZE_MAX];
+#endif
+
/* Sysfs stuff. */
struct module_kobject mkobj;
struct module_attribute *modinfo_attrs;
@@ -378,6 +425,11 @@ struct module {
const s32 *crcs;
unsigned int num_syms;
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+ s32 *kcfi_traps;
+ s32 *kcfi_traps_end;
+#endif
+
/* Kernel parameters. */
#ifdef CONFIG_SYSFS
struct mutex param_lock;
@@ -391,18 +443,6 @@ struct module {
const s32 *gpl_crcs;
bool using_gplonly_symbols;
-#ifdef CONFIG_UNUSED_SYMBOLS
- /* unused exported symbols. */
- const struct kernel_symbol *unused_syms;
- const s32 *unused_crcs;
- unsigned int num_unused_syms;
-
- /* GPL-only, unused exported symbols. */
- unsigned int num_unused_gpl_syms;
- const struct kernel_symbol *unused_gpl_syms;
- const s32 *unused_gpl_crcs;
-#endif
-
#ifdef CONFIG_MODULE_SIG
/* Signature was verified. */
bool sig_ok;
@@ -410,11 +450,6 @@ struct module {
bool async_probe_requested;
- /* symbols that will be GPL-only in the near future. */
- const struct kernel_symbol *gpl_future_syms;
- const s32 *gpl_future_crcs;
- unsigned int num_gpl_future_syms;
-
/* Exception table */
unsigned int num_exentries;
struct exception_table_entry *extable;
@@ -422,9 +457,7 @@ struct module {
/* Startup function. */
int (*init)(void);
- /* Core layout: rbtree is accessed frequently, so keep together. */
- struct module_layout core_layout __module_layout_align;
- struct module_layout init_layout;
+ struct module_memory mem[MOD_MEM_NUM_TYPES] __module_memory_align;
/* Arch-specific module values */
struct mod_arch_specific arch;
@@ -474,6 +507,10 @@ struct module {
unsigned int num_bpf_raw_events;
struct bpf_raw_event_map *bpf_raw_events;
#endif
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+ unsigned int btf_data_size;
+ void *btf_data;
+#endif
#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
@@ -498,15 +535,31 @@ struct module {
unsigned long *kprobe_blacklist;
unsigned int num_kprobe_blacklist;
#endif
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+ int num_static_call_sites;
+ struct static_call_site *static_call_sites;
+#endif
+#if IS_ENABLED(CONFIG_KUNIT)
+ int num_kunit_init_suites;
+ struct kunit_suite **kunit_init_suites;
+ int num_kunit_suites;
+ struct kunit_suite **kunit_suites;
+#endif
+
#ifdef CONFIG_LIVEPATCH
bool klp; /* Is this a livepatch module? */
bool klp_alive;
- /* Elf information */
+ /* ELF information */
struct klp_modinfo *klp_info;
#endif
+#ifdef CONFIG_PRINTK_INDEX
+ unsigned int printk_index_size;
+ struct pi_entry **printk_index_start;
+#endif
+
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
struct list_head source_list;
@@ -529,6 +582,9 @@ struct module {
struct error_injection_entry *ei_funcs;
unsigned int num_ei_funcs;
#endif
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+ struct _ddebug_info dyndbg_info;
+#endif
} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
@@ -541,8 +597,6 @@ static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
}
#endif
-extern struct mutex module_mutex;
-
/* FIXME: It'd be nice to isolate modules during init, too, so they
aren't used before they (may) fail. But presently too much code
(IDE & SCSI) require entry into the module during init.*/
@@ -558,18 +612,35 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
+static inline bool within_module_mem_type(unsigned long addr,
+ const struct module *mod,
+ enum mod_mem_type type)
+{
+ unsigned long base, size;
+
+ base = (unsigned long)mod->mem[type].base;
+ size = mod->mem[type].size;
+ return addr - base < size;
+}
+
static inline bool within_module_core(unsigned long addr,
const struct module *mod)
{
- return (unsigned long)mod->core_layout.base <= addr &&
- addr < (unsigned long)mod->core_layout.base + mod->core_layout.size;
+ for_class_mod_mem_type(type, core) {
+ if (within_module_mem_type(addr, mod, type))
+ return true;
+ }
+ return false;
}
static inline bool within_module_init(unsigned long addr,
const struct module *mod)
{
- return (unsigned long)mod->init_layout.base <= addr &&
- addr < (unsigned long)mod->init_layout.base + mod->init_layout.size;
+ for_class_mod_mem_type(type, init) {
+ if (within_module_mem_type(addr, mod, type))
+ return true;
+ }
+ return false;
}
static inline bool within_module(unsigned long addr, const struct module *mod)
@@ -577,35 +648,12 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
return within_module_init(addr, mod) || within_module_core(addr, mod);
}
-/* Search for module by name: must hold module_mutex. */
+/* Search for module by name: must be in a RCU-sched critical section. */
struct module *find_module(const char *name);
-struct symsearch {
- const struct kernel_symbol *start, *stop;
- const s32 *crcs;
- enum mod_license {
- NOT_GPL_ONLY,
- GPL_ONLY,
- WILL_BE_GPL_ONLY,
- } license;
- bool unused;
-};
-
-/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
- symnum out of range. */
-int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
- char *name, char *module_name, int *exported);
-
-/* Look for this name: can be of form module:name. */
-unsigned long module_kallsyms_lookup_name(const char *name);
-
-int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *, unsigned long),
- void *data);
-
-extern void __noreturn __module_put_and_exit(struct module *mod,
+extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
long code);
-#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
+#define module_put_and_kthread_exit(code) __module_put_and_kthread_exit(THIS_MODULE, code)
#ifdef CONFIG_MODULE_UNLOAD
int module_refcount(struct module *mod);
@@ -617,10 +665,46 @@ void symbol_put_addr(void *addr);
to handle the error case (which only happens with rmmod --wait). */
extern void __module_get(struct module *module);
-/* This is the Right Way to get a module: if it fails, it's being removed,
- * so pretend it's not there. */
+/**
+ * try_module_get() - take module refcount unless module is being removed
+ * @module: the module we should check for
+ *
+ * Only try to get a module reference count if the module is not being removed.
+ * This call will fail if the module is in the process of being removed.
+ *
+ * Care must also be taken to ensure the module exists and is alive prior to
+ * usage of this call. This can be gauranteed through two means:
+ *
+ * 1) Direct protection: you know an earlier caller must have increased the
+ * module reference through __module_get(). This can typically be achieved
+ * by having another entity other than the module itself increment the
+ * module reference count.
+ *
+ * 2) Implied protection: there is an implied protection against module
+ * removal. An example of this is the implied protection used by kernfs /
+ * sysfs. The sysfs store / read file operations are guaranteed to exist
+ * through the use of kernfs's active reference (see kernfs_active()) and a
+ * sysfs / kernfs file removal cannot happen unless the same file is not
+ * active. Therefore, if a sysfs file is being read or written to the module
+ * which created it must still exist. It is therefore safe to use
+ * try_module_get() on module sysfs store / read ops.
+ *
+ * One of the real values to try_module_get() is the module_is_live() check
+ * which ensures that the caller of try_module_get() can yield to userspace
+ * module removal requests and gracefully fail if the module is on its way out.
+ *
+ * Returns true if the reference count was successfully incremented.
+ */
extern bool try_module_get(struct module *module);
+/**
+ * module_put() - release a reference count to a module
+ * @module: the module we should release a reference count for
+ *
+ * If you successfully bump a reference count to a module with try_module_get(),
+ * when you are finished you must call module_put() to release that reference
+ * count.
+ */
extern void module_put(struct module *module);
#else /*!CONFIG_MODULE_UNLOAD*/
@@ -649,17 +733,6 @@ static inline void __module_get(struct module *module)
/* Dereference module function descriptor */
void *dereference_module_function_descriptor(struct module *mod, void *ptr);
-/* For kallsyms to ask for address resolution. namebuf should be at
- * least KSYM_NAME_LEN long: a pointer to namebuf is returned if
- * found, otherwise NULL. */
-const char *module_address_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname,
- char *namebuf);
-int lookup_module_symbol_name(unsigned long addr, char *symname);
-int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
-
int register_module_notifier(struct notifier_block *nb);
int unregister_module_notifier(struct notifier_block *nb);
@@ -670,19 +743,15 @@ static inline bool module_requested_async_probing(struct module *module)
return module && module->async_probe_requested;
}
-#ifdef CONFIG_LIVEPATCH
static inline bool is_livepatch_module(struct module *mod)
{
+#ifdef CONFIG_LIVEPATCH
return mod->klp;
-}
-#else /* !CONFIG_LIVEPATCH */
-static inline bool is_livepatch_module(struct module *mod)
-{
+#else
return false;
+#endif
}
-#endif /* CONFIG_LIVEPATCH */
-bool is_module_sig_enforced(void);
void set_module_sig_enforced(void);
#else /* !CONFIG_MODULES... */
@@ -735,7 +804,7 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
}
/* Get/put a kernel symbol (calls should be symmetric) */
-#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
+#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak,visibility("hidden"))); &(x); })
#define symbol_put(x) do { } while (0)
#define symbol_put_addr(x) do { } while (0)
@@ -754,46 +823,6 @@ static inline void module_put(struct module *module)
#define module_name(mod) "kernel"
-/* For kallsyms to ask for address resolution. NULL means not found. */
-static inline const char *module_address_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname,
- char *namebuf)
-{
- return NULL;
-}
-
-static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
-{
- return -ERANGE;
-}
-
-static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
-{
- return -ERANGE;
-}
-
-static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
- char *type, char *name,
- char *module_name, int *exported)
-{
- return -ERANGE;
-}
-
-static inline unsigned long module_kallsyms_lookup_name(const char *name)
-{
- return 0;
-}
-
-static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *,
- unsigned long),
- void *data)
-{
- return 0;
-}
-
static inline int register_module_notifier(struct notifier_block *nb)
{
/* no events will happen anyway, so this can always succeed */
@@ -805,7 +834,7 @@ static inline int unregister_module_notifier(struct notifier_block *nb)
return 0;
}
-#define module_put_and_exit(code) do_exit(code)
+#define module_put_and_kthread_exit(code) kthread_exit(code)
static inline void print_modules(void)
{
@@ -816,10 +845,6 @@ static inline bool module_requested_async_probing(struct module *module)
return false;
}
-static inline bool is_module_sig_enforced(void)
-{
- return false;
-}
static inline void set_module_sig_enforced(void)
{
@@ -836,8 +861,7 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
#ifdef CONFIG_SYSFS
extern struct kset *module_kset;
-extern struct kobj_type module_ktype;
-extern int module_sysfs_initialized;
+extern const struct kobj_type module_ktype;
#endif /* CONFIG_SYSFS */
#define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x)
@@ -861,7 +885,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_MITIGATION_RETPOLINE
extern bool retpoline_module_ok(bool has_retpoline);
#else
static inline bool retpoline_module_ok(bool has_retpoline)
@@ -871,15 +895,99 @@ static inline bool retpoline_module_ok(bool has_retpoline)
#endif
#ifdef CONFIG_MODULE_SIG
+bool is_module_sig_enforced(void);
+
static inline bool module_sig_ok(struct module *module)
{
return module->sig_ok;
}
#else /* !CONFIG_MODULE_SIG */
+static inline bool is_module_sig_enforced(void)
+{
+ return false;
+}
+
static inline bool module_sig_ok(struct module *module)
{
return true;
}
#endif /* CONFIG_MODULE_SIG */
+#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS)
+int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *, unsigned long),
+ void *data);
+
+/* For kallsyms to ask for address resolution. namebuf should be at
+ * least KSYM_NAME_LEN long: a pointer to namebuf is returned if
+ * found, otherwise NULL.
+ */
+const char *module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, const unsigned char **modbuildid,
+ char *namebuf);
+int lookup_module_symbol_name(unsigned long addr, char *symname);
+int lookup_module_symbol_attrs(unsigned long addr,
+ unsigned long *size,
+ unsigned long *offset,
+ char *modname,
+ char *name);
+
+/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
+ * symnum out of range.
+ */
+int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *name, char *module_name, int *exported);
+
+/* Look for this name: can be of form module:name. */
+unsigned long module_kallsyms_lookup_name(const char *name);
+
+unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name);
+
+#else /* CONFIG_MODULES && CONFIG_KALLSYMS */
+
+static inline int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *, unsigned long),
+ void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+/* For kallsyms to ask for address resolution. NULL means not found. */
+static inline const char *module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname,
+ const unsigned char **modbuildid,
+ char *namebuf)
+{
+ return NULL;
+}
+
+static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
+{
+ return -ERANGE;
+}
+
+static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported)
+{
+ return -ERANGE;
+}
+
+static inline unsigned long module_kallsyms_lookup_name(const char *name)
+{
+ return 0;
+}
+
+static inline unsigned long find_kallsyms_symbol_value(struct module *mod,
+ const char *name)
+{
+ return 0;
+}
+
+#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h
new file mode 100644
index 000000000000..77c9895b9ddb
--- /dev/null
+++ b/include/linux/module_symbol.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_MODULE_SYMBOL_H
+#define _LINUX_MODULE_SYMBOL_H
+
+/* This ignores the intensely annoying "mapping symbols" found in ELF files. */
+static inline bool is_mapping_symbol(const char *str)
+{
+ if (str[0] == '.' && str[1] == 'L')
+ return true;
+ if (str[0] == 'L' && str[1] == '0')
+ return true;
+ return str[0] == '$';
+}
+
+#endif /* _LINUX_MODULE_SYMBOL_H */
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 4fa67a8b2265..89b1e0ed9811 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -13,6 +13,9 @@
* must be implemented by each architecture.
*/
+/* arch may override to do additional checking of ELF header architecture */
+bool module_elf_check_arch(Elf_Ehdr *hdr);
+
/* Adjust arch-specific sections. Return 0 on success. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
@@ -39,6 +42,11 @@ bool module_init_section(const char *name);
*/
bool module_exit_section(const char *name);
+/* Describes whether within_module_init() will consider this an init section
+ * or not. This behaviour changes with CONFIG_MODULE_UNLOAD.
+ */
+bool module_init_layout_section(const char *sname);
+
/*
* Apply the given relocation to the (simplified) ELF. Return -error
* or 0.
@@ -72,6 +80,23 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
unsigned int symindex,
unsigned int relsec,
struct module *mod);
+#ifdef CONFIG_LIVEPATCH
+/*
+ * Some architectures (namely x86_64 and ppc64) perform sanity checks when
+ * applying relocations. If a patched module gets unloaded and then later
+ * reloaded (and re-patched), klp re-applies relocations to the replacement
+ * function(s). Any leftover relocations from the previous loading of the
+ * patched module might trigger the sanity checks.
+ *
+ * To prevent that, when unloading a patched module, clear out any relocations
+ * that might trigger arch-specific sanity checks on a future module reload.
+ */
+void clear_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me);
+#endif
#else
static inline int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
@@ -90,13 +115,22 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod);
+#ifdef CONFIG_MODULES
+void flush_module_init_free_work(void);
+#else
+static inline void flush_module_init_free_work(void)
+{
+}
+#endif
+
/* Any cleanup needed when module leaves. */
void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
-#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
#include <linux/kasan.h>
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 1ad5aa3b86d9..bfb85fd13e1f 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -21,12 +21,12 @@
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
#define __MODULE_INFO(tag, name, info) \
-static const char __UNIQUE_ID(name)[] \
- __used __attribute__((section(".modinfo"), unused, aligned(1))) \
- = __MODULE_INFO_PREFIX __stringify(tag) "=" info
+ static const char __UNIQUE_ID(name)[] \
+ __used __section(".modinfo") __aligned(1) \
+ = __MODULE_INFO_PREFIX __stringify(tag) "=" info
#define __MODULE_PARM_TYPE(name, _type) \
- __MODULE_INFO(parmtype, name##type, #name ":" _type)
+ __MODULE_INFO(parmtype, name##type, #name ":" _type)
/* One for each parameter, describing how to use it. Some files do
multiple of these per line, so can't just use MODULE_INFO. */
@@ -118,7 +118,7 @@ struct kparam_array
* you can create your own by defining those variables.
*
* Standard types are:
- * byte, short, ushort, int, uint, long, ulong
+ * byte, hexint, short, ushort, int, uint, long, ulong
* charp: a character pointer
* bool: a bool, values 0/1, y/n, Y/N.
* invbool: the above, only sense-reversed (N = true).
@@ -276,7 +276,7 @@ struct kparam_array
read-only sections (which is part of respective UNIX ABI on these
platforms). So 'const' makes no sense and even causes compile failures
with some compilers. */
-#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64)
+#if defined(CONFIG_ALPHA) || defined(CONFIG_PPC64)
#define __moduleparam_const
#else
#define __moduleparam_const const
@@ -288,12 +288,16 @@ struct kparam_array
/* Default value instead of permissions? */ \
static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
- __used \
- __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
+ __used __section("__param") \
+ __aligned(__alignof__(struct kernel_param)) \
= { __param_str_##name, THIS_MODULE, ops, \
VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
-/* Obsolete - use module_param_cb() */
+/*
+ * Useful for describing a set/get pair used only once (i.e. for this
+ * parameter). For repeated set/get pairs (i.e. the same struct
+ * kernel_param_ops), use module_param_cb() instead.
+ */
#define module_param_call(name, _set, _get, arg, perm) \
static const struct kernel_param_ops __param_ops_##name = \
{ .flags = 0, .set = _set, .get = _get }; \
@@ -381,6 +385,8 @@ extern bool parameq(const char *name1, const char *name2);
*/
extern bool parameqn(const char *name1, const char *name2, size_t n);
+typedef int (*parse_unknown_fn)(char *param, char *val, const char *doing, void *arg);
+
/* Called on module insert or kernel boot */
extern char *parse_args(const char *name,
char *args,
@@ -388,9 +394,7 @@ extern char *parse_args(const char *name,
unsigned num,
s16 level_min,
s16 level_max,
- void *arg,
- int (*unknown)(char *param, char *val,
- const char *doing, void *arg));
+ void *arg, parse_unknown_fn unknown);
/* Called by module remove. */
#ifdef CONFIG_SYSFS
@@ -431,6 +435,8 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp);
extern const struct kernel_param_ops param_ops_uint;
extern int param_set_uint(const char *val, const struct kernel_param *kp);
extern int param_get_uint(char *buffer, const struct kernel_param *kp);
+int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
+ unsigned int min, unsigned int max);
#define param_check_uint(name, p) __param_check(name, p, unsigned int)
extern const struct kernel_param_ops param_ops_long;
@@ -448,6 +454,11 @@ extern int param_set_ullong(const char *val, const struct kernel_param *kp);
extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
#define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
+extern const struct kernel_param_ops param_ops_hexint;
+extern int param_set_hexint(const char *val, const struct kernel_param *kp);
+extern int param_get_hexint(char *buffer, const struct kernel_param *kp);
+#define param_check_hexint(name, p) param_check_uint(name, p)
+
extern const struct kernel_param_ops param_ops_charp;
extern int param_set_charp(const char *val, const struct kernel_param *kp);
extern int param_get_charp(char *buffer, const struct kernel_param *kp);
diff --git a/include/linux/mount.h b/include/linux/mount.h
index de657bd211fa..c34c18b4e8f3 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -11,17 +11,16 @@
#define _LINUX_MOUNT_H
#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/nodemask.h>
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <linux/atomic.h>
+#include <asm/barrier.h>
struct super_block;
-struct vfsmount;
struct dentry;
-struct mnt_namespace;
+struct user_namespace;
+struct mnt_idmap;
+struct file_system_type;
struct fs_context;
+struct file;
+struct path;
#define MNT_NOSUID 0x01
#define MNT_NODEV 0x02
@@ -30,6 +29,7 @@ struct fs_context;
#define MNT_NODIRATIME 0x10
#define MNT_RELATIME 0x20
#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
+#define MNT_NOSYMFOLLOW 0x80
#define MNT_SHRINKABLE 0x100
#define MNT_WRITE_HOLD 0x200
@@ -46,12 +46,11 @@ struct fs_context;
#define MNT_SHARED_MASK (MNT_UNBINDABLE)
#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
| MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
- | MNT_READONLY)
+ | MNT_READONLY | MNT_NOSYMFOLLOW)
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \
- MNT_CURSOR)
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB)
#define MNT_INTERNAL 0x4000
@@ -65,34 +64,36 @@ struct fs_context;
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
#define MNT_UMOUNT 0x8000000
-#define MNT_CURSOR 0x10000000
+#define MNT_ONRB 0x10000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
+ struct mnt_idmap *mnt_idmap;
} __randomize_layout;
-struct file; /* forward dec */
-struct path;
+static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt)
+{
+ /* Pairs with smp_store_release() in do_idmap_mount(). */
+ return smp_load_acquire(&mnt->mnt_idmap);
+}
extern int mnt_want_write(struct vfsmount *mnt);
extern int mnt_want_write_file(struct file *file);
-extern int mnt_clone_write(struct vfsmount *mnt);
extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
+extern void mnt_make_shortterm(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
-struct path;
extern struct vfsmount *clone_private_mount(const struct path *path);
-extern int __mnt_want_write(struct vfsmount *);
-extern void __mnt_drop_write(struct vfsmount *);
+int mnt_get_write_access(struct vfsmount *mnt);
+void mnt_put_write_access(struct vfsmount *mnt);
-struct file_system_type;
extern struct vfsmount *fc_mount(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
@@ -105,12 +106,22 @@ extern struct vfsmount *vfs_submount(const struct dentry *mountpoint,
extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
-extern dev_t name_to_dev_t(const char *name);
-
-extern unsigned int sysctl_mount_max;
-
extern bool path_is_mountpoint(const struct path *path);
+extern bool our_mnt(struct vfsmount *mnt);
+
+extern struct vfsmount *kern_mount(struct file_system_type *);
+extern void kern_unmount(struct vfsmount *mnt);
+extern int may_umount_tree(struct vfsmount *);
+extern int may_umount(struct vfsmount *);
+extern long do_mount(const char *, const char __user *,
+ const char *, unsigned long, void *);
+extern struct vfsmount *collect_mounts(const struct path *);
+extern void drop_collected_mounts(struct vfsmount *);
+extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
+ struct vfsmount *);
extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
+extern int cifs_root_data(char **dev, char **opts);
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h
index 490db6886dcc..ac577699edfd 100644
--- a/include/linux/moxtet.h
+++ b/include/linux/moxtet.h
@@ -2,7 +2,7 @@
/*
* Turris Mox module configuration bus driver
*
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
*/
#ifndef __LINUX_MOXTET_H
@@ -35,8 +35,6 @@ enum turris_mox_module_id {
#define MOXTET_NIRQS 16
-extern struct bus_type moxtet_type;
-
struct moxtet {
struct device *dev;
struct mutex lock;
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
index f4f5e90a6844..1bdc39daac0a 100644
--- a/include/linux/mpage.h
+++ b/include/linux/mpage.h
@@ -16,10 +16,8 @@ struct writeback_control;
struct readahead_control;
void mpage_readahead(struct readahead_control *, get_block_t get_block);
-int mpage_readpage(struct page *page, get_block_t get_block);
+int mpage_read_folio(struct folio *folio, get_block_t get_block);
int mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block);
-int mpage_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
#endif
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 5d906dfbf3ed..eb0d1c1db208 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -40,21 +40,79 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
+#define mpi_has_sign(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
+void mpi_clear(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
+static inline MPI mpi_new(unsigned int nbits)
+{
+ return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB);
+}
+
+MPI mpi_copy(MPI a);
+MPI mpi_alloc_like(MPI a);
+void mpi_snatch(MPI w, MPI u);
+MPI mpi_set(MPI w, MPI u);
+MPI mpi_set_ui(MPI w, unsigned long u);
+MPI mpi_alloc_set_ui(unsigned long u);
+void mpi_swap_cond(MPI a, MPI b, unsigned long swap);
+
+/* Constants used to return constant MPIs. See mpi_init if you
+ * want to add more constants.
+ */
+#define MPI_NUMBER_OF_CONSTANTS 6
+enum gcry_mpi_constants {
+ MPI_C_ZERO,
+ MPI_C_ONE,
+ MPI_C_TWO,
+ MPI_C_THREE,
+ MPI_C_FOUR,
+ MPI_C_EIGHT
+};
+
+MPI mpi_const(enum gcry_mpi_constants no);
+
/*-- mpicoder.c --*/
+
+/* Different formats of external big integer representation. */
+enum gcry_mpi_format {
+ GCRYMPI_FMT_NONE = 0,
+ GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */
+ GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */
+ GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */
+ GCRYMPI_FMT_HEX = 4, /* Hex format. */
+ GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */
+ GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */
+};
+
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
+int mpi_fromstr(MPI val, const char *str);
+MPI mpi_scanval(const char *string);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
+int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
+ size_t buflen, size_t *nwritten, MPI a);
+
+/*-- mpi-mod.c --*/
+void mpi_mod(MPI rem, MPI dividend, MPI divisor);
+
+/* Context used with Barrett reduction. */
+struct barrett_ctx_s;
+typedef struct barrett_ctx_s *mpi_barrett_t;
+
+mpi_barrett_t mpi_barrett_init(MPI m, int copy);
+void mpi_barrett_free(mpi_barrett_t ctx);
+void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx);
+void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx);
/*-- mpi-pow.c --*/
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
@@ -62,6 +120,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
+int mpi_cmpabs(MPI u, MPI v);
/*-- mpi-sub-ui.c --*/
int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
@@ -69,13 +128,146 @@ int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
/*-- mpi-bit.c --*/
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
+int mpi_test_bit(MPI a, unsigned int n);
+void mpi_set_bit(MPI a, unsigned int n);
+void mpi_set_highbit(MPI a, unsigned int n);
+void mpi_clear_highbit(MPI a, unsigned int n);
+void mpi_clear_bit(MPI a, unsigned int n);
+void mpi_rshift_limbs(MPI a, unsigned int count);
+void mpi_rshift(MPI x, MPI a, unsigned int n);
+void mpi_lshift_limbs(MPI a, unsigned int count);
+void mpi_lshift(MPI x, MPI a, unsigned int n);
+
+/*-- mpi-add.c --*/
+void mpi_add_ui(MPI w, MPI u, unsigned long v);
+void mpi_add(MPI w, MPI u, MPI v);
+void mpi_sub(MPI w, MPI u, MPI v);
+void mpi_addm(MPI w, MPI u, MPI v, MPI m);
+void mpi_subm(MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-mul.c --*/
+void mpi_mul(MPI w, MPI u, MPI v);
+void mpi_mulm(MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-div.c --*/
+void mpi_tdiv_r(MPI rem, MPI num, MPI den);
+void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
+void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
+
+/*-- mpi-inv.c --*/
+int mpi_invm(MPI x, MPI a, MPI n);
+
+/*-- ec.c --*/
+
+/* Object to represent a point in projective coordinates */
+struct gcry_mpi_point {
+ MPI x;
+ MPI y;
+ MPI z;
+};
+
+typedef struct gcry_mpi_point *MPI_POINT;
+
+/* Models describing an elliptic curve */
+enum gcry_mpi_ec_models {
+ /* The Short Weierstrass equation is
+ * y^2 = x^3 + ax + b
+ */
+ MPI_EC_WEIERSTRASS = 0,
+ /* The Montgomery equation is
+ * by^2 = x^3 + ax^2 + x
+ */
+ MPI_EC_MONTGOMERY,
+ /* The Twisted Edwards equation is
+ * ax^2 + y^2 = 1 + bx^2y^2
+ * Note that we use 'b' instead of the commonly used 'd'.
+ */
+ MPI_EC_EDWARDS
+};
+
+/* Dialects used with elliptic curves */
+enum ecc_dialects {
+ ECC_DIALECT_STANDARD = 0,
+ ECC_DIALECT_ED25519,
+ ECC_DIALECT_SAFECURVE
+};
+
+/* This context is used with all our EC functions. */
+struct mpi_ec_ctx {
+ enum gcry_mpi_ec_models model; /* The model describing this curve. */
+ enum ecc_dialects dialect; /* The ECC dialect used with the curve. */
+ int flags; /* Public key flags (not always used). */
+ unsigned int nbits; /* Number of bits. */
+
+ /* Domain parameters. Note that they may not all be set and if set
+ * the MPIs may be flagged as constant.
+ */
+ MPI p; /* Prime specifying the field GF(p). */
+ MPI a; /* First coefficient of the Weierstrass equation. */
+ MPI b; /* Second coefficient of the Weierstrass equation. */
+ MPI_POINT G; /* Base point (generator). */
+ MPI n; /* Order of G. */
+ unsigned int h; /* Cofactor. */
+
+ /* The actual key. May not be set. */
+ MPI_POINT Q; /* Public key. */
+ MPI d; /* Private key. */
+
+ const char *name; /* Name of the curve. */
+
+ /* This structure is private to mpi/ec.c! */
+ struct {
+ struct {
+ unsigned int a_is_pminus3:1;
+ unsigned int two_inv_p:1;
+ } valid; /* Flags to help setting the helper vars below. */
+
+ int a_is_pminus3; /* True if A = P - 3. */
+
+ MPI two_inv_p;
+
+ mpi_barrett_t p_barrett;
+
+ /* Scratch variables. */
+ MPI scratch[11];
+
+ /* Helper for fast reduction. */
+ /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */
+ /* MPI s[10]; */
+ /* MPI c; */
+ } t;
+
+ /* Curve specific computation routines for the field. */
+ void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec);
+ void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
+ void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
+};
+
+void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ enum ecc_dialects dialect,
+ int flags, MPI p, MPI a, MPI b);
+void mpi_ec_deinit(struct mpi_ec_ctx *ctx);
+MPI_POINT mpi_point_new(unsigned int nbits);
+void mpi_point_release(MPI_POINT p);
+void mpi_point_init(MPI_POINT p);
+void mpi_point_free_parts(MPI_POINT p);
+int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx);
+void mpi_ec_add_points(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx);
+void mpi_ec_mul_point(MPI_POINT result,
+ MPI scalar, MPI_POINT point,
+ struct mpi_ec_ctx *ctx);
+int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx);
/* inline functions */
/**
* mpi_get_size() - returns max size required to store the number
*
- * @a: A multi precision integer for which we want to allocate a bufer
+ * @a: A multi precision integer for which we want to allocate a buffer
*
* Return: size required to store the number
*/
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 6cbbfe94348c..4c5003afee6c 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -17,11 +17,12 @@ static inline int ip_mroute_opt(int opt)
}
int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
-int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
-int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
+int ipmr_ioctl(struct sock *sk, int cmd, void *arg);
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
int ip_mr_init(void);
bool ipmr_rule_default(const struct fib_rule *rule);
+int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
#else
static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
sockptr_t optval, unsigned int optlen)
@@ -29,13 +30,13 @@ static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
return -ENOPROTOOPT;
}
-static inline int ip_mroute_getsockopt(struct sock *sock, int optname,
- char __user *optval, int __user *optlen)
+static inline int ip_mroute_getsockopt(struct sock *sk, int optname,
+ sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
-static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
+static inline int ipmr_ioctl(struct sock *sk, int cmd, void *arg)
{
return -ENOIOCTLCMD;
}
@@ -54,6 +55,12 @@ static inline bool ipmr_rule_default(const struct fib_rule *rule)
{
return true;
}
+
+static inline int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ return 1;
+}
#endif
#define VIFF_STATIC 0x8000
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index bc351a85ce9b..63ef5191cc57 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -27,12 +27,12 @@ struct sock;
#ifdef CONFIG_IPV6_MROUTE
extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
-extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
extern int ip6_mr_input(struct sk_buff *skb);
-extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip6_mr_init(void);
extern void ip6_mr_cleanup(void);
+int ip6mr_ioctl(struct sock *sk, int cmd, void *arg);
#else
static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
sockptr_t optval, unsigned int optlen)
@@ -42,13 +42,13 @@ static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
static inline
int ip6_mroute_getsockopt(struct sock *sock,
- int optname, char __user *optval, int __user *optlen)
+ int optname, sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
static inline
-int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
+int ip6mr_ioctl(struct sock *sk, int cmd, void *arg)
{
return -ENOIOCTLCMD;
}
@@ -100,6 +100,27 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
#ifdef CONFIG_IPV6_MROUTE
bool mroute6_is_socket(struct net *net, struct sk_buff *skb);
extern int ip6mr_sk_done(struct sock *sk);
+static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ switch (cmd) {
+ /* These userspace buffers will be consumed by ip6mr_ioctl() */
+ case SIOCGETMIFCNT_IN6: {
+ struct sioc_mif_req6 buffer;
+
+ return sock_ioctl_inout(sk, cmd, arg, &buffer,
+ sizeof(buffer));
+ }
+ case SIOCGETSGCNT_IN6: {
+ struct sioc_sg_req6 buffer;
+
+ return sock_ioctl_inout(sk, cmd, arg, &buffer,
+ sizeof(buffer));
+ }
+ }
+
+ return 1;
+}
#else
static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
{
@@ -109,5 +130,11 @@ static inline int ip6mr_sk_done(struct sock *sk)
{
return 0;
}
+
+static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ return 1;
+}
#endif
#endif
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 8071148f29a6..9dd4bf157255 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -12,6 +12,7 @@
/**
* struct vif_device - interface representor for multicast routing
* @dev: network device being used
+ * @dev_tracker: refcount tracker for @dev reference
* @bytes_in: statistic; bytes ingressing
* @bytes_out: statistic; bytes egresing
* @pkt_in: statistic; packets ingressing
@@ -25,7 +26,8 @@
* @remote: Remote address for tunnels
*/
struct vif_device {
- struct net_device *dev;
+ struct net_device __rcu *dev;
+ netdevice_tracker dev_tracker;
unsigned long bytes_in, bytes_out;
unsigned long pkt_in, pkt_out;
unsigned long rate_limit;
@@ -50,6 +52,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
+ struct net_device *vif_dev,
unsigned short vif_index, u32 tb_id,
struct netlink_ext_ack *extack)
{
@@ -58,7 +61,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb,
.family = family,
.extack = extack,
},
- .dev = vif->dev,
+ .dev = vif_dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
@@ -71,6 +74,7 @@ static inline int mr_call_vif_notifiers(struct net *net,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
+ struct net_device *vif_dev,
unsigned short vif_index, u32 tb_id,
unsigned int *ipmr_seq)
{
@@ -78,7 +82,7 @@ static inline int mr_call_vif_notifiers(struct net *net,
.info = {
.family = family,
},
- .dev = vif->dev,
+ .dev = vif_dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
@@ -96,7 +100,8 @@ static inline int mr_call_vif_notifiers(struct net *net,
#define MAXVIFS 32
#endif
-#define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev))
+/* Note: This helper is deprecated. */
+#define VIF_EXISTS(_mrt, _idx) (!!rcu_access_pointer((_mrt)->vif_table[_idx].dev))
/* mfc_flags:
* MFC_STATIC - the entry was added statically (not by a routing daemon)
@@ -303,7 +308,7 @@ int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock, struct netlink_ext_ack *extack);
+ struct netlink_ext_ack *extack);
#else
static inline void vif_device_init(struct vif_device *v,
struct net_device *dev,
@@ -358,7 +363,7 @@ static inline int mr_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock, struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack)
{
return -EINVAL;
}
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8ad679e9d9c0..26d07e23052e 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -2,89 +2,187 @@
#ifndef LINUX_MSI_H
#define LINUX_MSI_H
-#include <linux/kobject.h>
+/*
+ * This header file contains MSI data structures and functions which are
+ * only relevant for:
+ * - Interrupt core code
+ * - PCI/MSI core code
+ * - MSI interrupt domain implementations
+ * - IOMMU, low level VFIO, NTB and other justified exceptions
+ * dealing with low level MSI details.
+ *
+ * Regular device drivers have no business with any of these functions and
+ * especially storing MSI descriptor pointers in random code is considered
+ * abuse.
+ *
+ * Device driver relevant functions are available in <linux/msi_api.h>
+ */
+
+#include <linux/irqdomain_defs.h>
+#include <linux/cpumask.h>
+#include <linux/msi_api.h>
+#include <linux/xarray.h>
+#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/bits.h>
+
+#include <asm/msi.h>
+
+/* Dummy shadow structures if an architecture does not define them */
+#ifndef arch_msi_msg_addr_lo
+typedef struct arch_msi_msg_addr_lo {
+ u32 address_lo;
+} __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
+#endif
+
+#ifndef arch_msi_msg_addr_hi
+typedef struct arch_msi_msg_addr_hi {
+ u32 address_hi;
+} __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
+#endif
+
+#ifndef arch_msi_msg_data
+typedef struct arch_msi_msg_data {
+ u32 data;
+} __attribute__ ((packed)) arch_msi_msg_data_t;
+#endif
+
+#ifndef arch_is_isolated_msi
+#define arch_is_isolated_msi() false
+#endif
+/**
+ * msi_msg - Representation of a MSI message
+ * @address_lo: Low 32 bits of msi message address
+ * @arch_addrlo: Architecture specific shadow of @address_lo
+ * @address_hi: High 32 bits of msi message address
+ * (only used when device supports it)
+ * @arch_addrhi: Architecture specific shadow of @address_hi
+ * @data: MSI message data (usually 16 bits)
+ * @arch_data: Architecture specific shadow of @data
+ */
struct msi_msg {
- u32 address_lo; /* low 32 bits of msi message address */
- u32 address_hi; /* high 32 bits of msi message address */
- u32 data; /* 16 bits of msi message data */
+ union {
+ u32 address_lo;
+ arch_msi_msg_addr_lo_t arch_addr_lo;
+ };
+ union {
+ u32 address_hi;
+ arch_msi_msg_addr_hi_t arch_addr_hi;
+ };
+ union {
+ u32 data;
+ arch_msi_msg_data_t arch_data;
+ };
};
extern int pci_msi_ignore_mask;
/* Helper functions */
-struct irq_data;
struct msi_desc;
struct pci_dev;
struct platform_msi_priv_data;
+struct device_attribute;
+struct irq_domain;
+struct irq_affinity_desc;
+
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
#ifdef CONFIG_GENERIC_MSI_IRQ
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
#else
-static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
-{
-}
+static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { }
#endif
typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
struct msi_msg *msg);
/**
- * platform_msi_desc - Platform device specific msi descriptor data
- * @msi_priv_data: Pointer to platform private data
- * @msi_index: The index of the MSI descriptor for multi MSI
+ * pci_msi_desc - PCI/MSI specific MSI descriptor data
+ *
+ * @msi_mask: [PCI MSI] MSI cached mask bits
+ * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
+ * @is_msix: [PCI MSI/X] True if MSI-X
+ * @multiple: [PCI MSI/X] log2 num of messages allocated
+ * @multi_cap: [PCI MSI/X] log2 num of messages supported
+ * @can_mask: [PCI MSI/X] Masking supported?
+ * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos: [PCI MSI] Mask register position
+ * @mask_base: [PCI MSI-X] Mask register base address
*/
-struct platform_msi_desc {
- struct platform_msi_priv_data *msi_priv_data;
- u16 msi_index;
+struct pci_msi_desc {
+ union {
+ u32 msi_mask;
+ u32 msix_ctrl;
+ };
+ struct {
+ u8 is_msix : 1;
+ u8 multiple : 3;
+ u8 multi_cap : 3;
+ u8 can_mask : 1;
+ u8 is_64 : 1;
+ u8 is_virtual : 1;
+ unsigned default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void __iomem *mask_base;
+ };
};
/**
- * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
- * @msi_index: The index of the MSI descriptor
+ * union msi_domain_cookie - Opaque MSI domain specific data
+ * @value: u64 value store
+ * @ptr: Pointer to domain specific data
+ * @iobase: Domain specific IOmem pointer
+ *
+ * The content of this data is implementation defined and used by the MSI
+ * domain to store domain specific information which is requried for
+ * interrupt chip callbacks.
*/
-struct fsl_mc_msi_desc {
- u16 msi_index;
+union msi_domain_cookie {
+ u64 value;
+ void *ptr;
+ void __iomem *iobase;
};
/**
- * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
- * @dev_index: TISCI device index
+ * struct msi_desc_data - Generic MSI descriptor data
+ * @dcookie: Cookie for MSI domain specific data which is required
+ * for irq_chip callbacks
+ * @icookie: Cookie for the MSI interrupt instance provided by
+ * the usage site to the allocation function
+ *
+ * The content of this data is implementation defined, e.g. PCI/IMS
+ * implementations define the meaning of the data. The MSI core ignores
+ * this data completely.
*/
-struct ti_sci_inta_msi_desc {
- u16 dev_index;
+struct msi_desc_data {
+ union msi_domain_cookie dcookie;
+ union msi_instance_cookie icookie;
};
+#define MSI_MAX_INDEX ((unsigned int)USHRT_MAX)
+
/**
* struct msi_desc - Descriptor structure for MSI based interrupts
- * @list: List head for management
* @irq: The base interrupt number
* @nvec_used: The number of vectors used
* @dev: Pointer to the device which uses this descriptor
* @msg: The last set MSI message cached for reuse
* @affinity: Optional pointer to a cpu affinity mask for this descriptor
+ * @sysfs_attr: Pointer to sysfs device attribute
*
* @write_msi_msg: Callback that may be called when the MSI message
* address or data changes
* @write_msi_msg_data: Data parameter for the callback.
*
- * @masked: [PCI MSI/X] Mask bits
- * @is_msix: [PCI MSI/X] True if MSI-X
- * @multiple: [PCI MSI/X] log2 num of messages allocated
- * @multi_cap: [PCI MSI/X] log2 num of messages supported
- * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
- * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
- * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
- * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
- * @mask_pos: [PCI MSI] Mask register position
- * @mask_base: [PCI MSI-X] Mask register base address
- * @platform: [platform] Platform device specific msi descriptor data
- * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
- * @inta: [INTA] TISCI based INTA specific msi descriptor data
+ * @msi_index: Index of the msi descriptor
+ * @pci: PCI specific msi descriptor data
+ * @data: Generic MSI descriptor data
*/
struct msi_desc {
/* Shared device/bus type independent data */
- struct list_head list;
unsigned int irq;
unsigned int nvec_used;
struct device *dev;
@@ -93,52 +191,120 @@ struct msi_desc {
#ifdef CONFIG_IRQ_MSI_IOMMU
const void *iommu_cookie;
#endif
+#ifdef CONFIG_SYSFS
+ struct device_attribute *sysfs_attrs;
+#endif
void (*write_msi_msg)(struct msi_desc *entry, void *data);
void *write_msi_msg_data;
+ u16 msi_index;
union {
- /* PCI MSI/X specific data */
- struct {
- u32 masked;
- struct {
- u8 is_msix : 1;
- u8 multiple : 3;
- u8 multi_cap : 3;
- u8 maskbit : 1;
- u8 is_64 : 1;
- u8 is_virtual : 1;
- u16 entry_nr;
- unsigned default_irq;
- } msi_attrib;
- union {
- u8 mask_pos;
- void __iomem *mask_base;
- };
- };
-
- /*
- * Non PCI variants add their data structure here. New
- * entries need to use a named structure. We want
- * proper name spaces for this. The PCI part is
- * anonymous for now as it would require an immediate
- * tree wide cleanup.
- */
- struct platform_msi_desc platform;
- struct fsl_mc_msi_desc fsl_mc;
- struct ti_sci_inta_msi_desc inta;
+ struct pci_msi_desc pci;
+ struct msi_desc_data data;
};
};
-/* Helpers to hide struct msi_desc implementation details */
+/*
+ * Filter values for the MSI descriptor iterators and accessor functions.
+ */
+enum msi_desc_filter {
+ /* All descriptors */
+ MSI_DESC_ALL,
+ /* Descriptors which have no interrupt associated */
+ MSI_DESC_NOTASSOCIATED,
+ /* Descriptors which have an interrupt associated */
+ MSI_DESC_ASSOCIATED,
+};
+
+
+/**
+ * struct msi_dev_domain - The internals of MSI domain info per device
+ * @store: Xarray for storing MSI descriptor pointers
+ * @irqdomain: Pointer to a per device interrupt domain
+ */
+struct msi_dev_domain {
+ struct xarray store;
+ struct irq_domain *domain;
+};
+
+/**
+ * msi_device_data - MSI per device data
+ * @properties: MSI properties which are interesting to drivers
+ * @platform_data: Platform-MSI specific data
+ * @mutex: Mutex protecting the MSI descriptor store
+ * @__domains: Internal data for per device MSI domains
+ * @__iter_idx: Index to search the next entry for iterators
+ */
+struct msi_device_data {
+ unsigned long properties;
+ struct platform_msi_priv_data *platform_data;
+ struct mutex mutex;
+ struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS];
+ unsigned long __iter_idx;
+};
+
+int msi_setup_device_data(struct device *dev);
+
+void msi_lock_descs(struct device *dev);
+void msi_unlock_descs(struct device *dev);
+
+struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
+ enum msi_desc_filter filter);
+
+/**
+ * msi_first_desc - Get the first MSI descriptor of the default irqdomain
+ * @dev: Device to operate on
+ * @filter: Descriptor state filter
+ *
+ * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
+ * must be invoked before the call.
+ *
+ * Return: Pointer to the first MSI descriptor matching the search
+ * criteria, NULL if none found.
+ */
+static inline struct msi_desc *msi_first_desc(struct device *dev,
+ enum msi_desc_filter filter)
+{
+ return msi_domain_first_desc(dev, MSI_DEFAULT_DOMAIN, filter);
+}
+
+struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
+ enum msi_desc_filter filter);
+
+/**
+ * msi_domain_for_each_desc - Iterate the MSI descriptors in a specific domain
+ *
+ * @desc: struct msi_desc pointer used as iterator
+ * @dev: struct device pointer - device to iterate
+ * @domid: The id of the interrupt domain which should be walked.
+ * @filter: Filter for descriptor selection
+ *
+ * Notes:
+ * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
+ * pair.
+ * - It is safe to remove a retrieved MSI descriptor in the loop.
+ */
+#define msi_domain_for_each_desc(desc, dev, domid, filter) \
+ for ((desc) = msi_domain_first_desc((dev), (domid), (filter)); (desc); \
+ (desc) = msi_next_desc((dev), (domid), (filter)))
+
+/**
+ * msi_for_each_desc - Iterate the MSI descriptors in the default irqdomain
+ *
+ * @desc: struct msi_desc pointer used as iterator
+ * @dev: struct device pointer - device to iterate
+ * @filter: Filter for descriptor selection
+ *
+ * Notes:
+ * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
+ * pair.
+ * - It is safe to remove a retrieved MSI descriptor in the loop.
+ */
+#define msi_for_each_desc(desc, dev, filter) \
+ msi_domain_for_each_desc((desc), (dev), MSI_DEFAULT_DOMAIN, (filter))
+
#define msi_desc_to_dev(desc) ((desc)->dev)
-#define dev_to_msi_list(dev) (&(dev)->msi_list)
-#define first_msi_entry(dev) \
- list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
-#define for_each_msi_entry(desc, dev) \
- list_for_each_entry((desc), dev_to_msi_list((dev)), list)
-#define for_each_msi_entry_safe(desc, tmp, dev) \
- list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
#ifdef CONFIG_IRQ_MSI_IOMMU
static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
@@ -163,70 +329,90 @@ static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
}
#endif
-#ifdef CONFIG_PCI_MSI
-#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
-#define for_each_pci_msi_entry(desc, pdev) \
- for_each_msi_entry((desc), &(pdev)->dev)
-
-struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
-void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
-void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
-#else /* CONFIG_PCI_MSI */
-static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
+int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
+ struct msi_desc *init_desc);
+/**
+ * msi_insert_msi_desc - Allocate and initialize a MSI descriptor in the
+ * default irqdomain and insert it at @init_desc->msi_index
+ * @dev: Pointer to the device for which the descriptor is allocated
+ * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
+ *
+ * Return: 0 on success or an appropriate failure code.
+ */
+static inline int msi_insert_msi_desc(struct device *dev, struct msi_desc *init_desc)
{
- return NULL;
+ return msi_domain_insert_msi_desc(dev, MSI_DEFAULT_DOMAIN, init_desc);
}
-static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
+
+void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+
+/**
+ * msi_free_msi_descs_range - Free a range of MSI descriptors of a device
+ * in the default irqdomain
+ *
+ * @dev: Device for which to free the descriptors
+ * @first: Index to start freeing from (inclusive)
+ * @last: Last index to be freed (inclusive)
+ */
+static inline void msi_free_msi_descs_range(struct device *dev, unsigned int first,
+ unsigned int last)
{
+ msi_domain_free_msi_descs_range(dev, MSI_DEFAULT_DOMAIN, first, last);
}
-#endif /* CONFIG_PCI_MSI */
-struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
- const struct irq_affinity_desc *affinity);
-void free_msi_entry(struct msi_desc *entry);
-void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-
-u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
-void pci_msi_mask_irq(struct irq_data *data);
-void pci_msi_unmask_irq(struct irq_data *data);
+/**
+ * msi_free_msi_descs - Free all MSI descriptors of a device in the default irqdomain
+ * @dev: Device to free the descriptors
+ */
+static inline void msi_free_msi_descs(struct device *dev)
+{
+ msi_free_msi_descs_range(dev, 0, MSI_MAX_INDEX);
+}
/*
- * The arch hooks to setup up msi irqs. Those functions are
- * implemented as weak symbols so that they /can/ be overriden by
- * architecture specific code if needed.
+ * The arch hooks to setup up msi irqs. Default functions are implemented
+ * as weak symbols so that they /can/ be overriden by architecture specific
+ * code if needed. These hooks can only be enabled by the architecture.
+ *
+ * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
+ * stubs with warnings.
*/
+#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void arch_teardown_msi_irqs(struct pci_dev *dev);
-void arch_restore_msi_irqs(struct pci_dev *dev);
-
-void default_teardown_msi_irqs(struct pci_dev *dev);
-void default_restore_msi_irqs(struct pci_dev *dev);
-
-struct msi_controller {
- struct module *owner;
- struct device *dev;
- struct device_node *of_node;
- struct list_head list;
-
- int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
- struct msi_desc *desc);
- int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
- int nvec, int type);
- void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
-};
+#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
+
+/*
+ * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs
+ * entries of MSI IRQs.
+ */
+#if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS)
+#ifdef CONFIG_SYSFS
+int msi_device_populate_sysfs(struct device *dev);
+void msi_device_destroy_sysfs(struct device *dev);
+#else /* CONFIG_SYSFS */
+static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
+static inline void msi_device_destroy_sysfs(struct device *dev) { }
+#endif /* !CONFIG_SYSFS */
+#endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+/*
+ * The restore hook is still available even for fully irq domain based
+ * setups. Courtesy to XEN/X86.
+ */
+bool arch_restore_msi_irqs(struct pci_dev *dev);
+
+#ifdef CONFIG_GENERIC_MSI_IRQ
#include <linux/irqhandler.h>
-#include <asm/msi.h>
struct irq_domain;
struct irq_domain_ops;
struct irq_chip;
+struct irq_fwspec;
struct device_node;
struct fwnode_handle;
struct msi_domain_info;
@@ -236,18 +422,31 @@ struct msi_domain_info;
* @get_hwirq: Retrieve the resulting hw irq number
* @msi_init: Domain specific init function for MSI interrupts
* @msi_free: Domain specific function to free a MSI interrupts
- * @msi_check: Callback for verification of the domain/info/dev data
* @msi_prepare: Prepare the allocation of the interrupts in the domain
- * @msi_finish: Optional callback to finalize the allocation
+ * @prepare_desc: Optional function to prepare the allocated MSI descriptor
+ * in the domain
* @set_desc: Set the msi descriptor for an interrupt
- * @handle_error: Optional error handler if the allocation fails
+ * @domain_alloc_irqs: Optional function to override the default allocation
+ * function.
+ * @domain_free_irqs: Optional function to override the default free
+ * function.
+ * @msi_post_free: Optional function which is invoked after freeing
+ * all interrupts.
+ * @msi_translate: Optional translate callback to support the odd wire to
+ * MSI bridges, e.g. MBIGEN
*
- * @get_hwirq, @msi_init and @msi_free are callbacks used by
- * msi_create_irq_domain() and related interfaces
+ * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
+ * irqdomain.
*
- * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
- * are callbacks used by msi_domain_alloc_irqs() and related
- * interfaces which are based on msi_desc.
+ * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the
+ * msi_domain_alloc/free_irqs*() variants.
+ *
+ * @domain_alloc_irqs, @domain_free_irqs can be used to override the
+ * default allocation/free functions (__msi_domain_alloc/free_irqs). This
+ * is initially for a wrapper around XENs seperate MSI universe which can't
+ * be wrapped into the regular irq domains concepts by mere mortals. This
+ * allows to universally use msi_domain_alloc/free_irqs without having to
+ * special case XEN all over the place.
*/
struct msi_domain_ops {
irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
@@ -259,22 +458,31 @@ struct msi_domain_ops {
void (*msi_free)(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int virq);
- int (*msi_check)(struct irq_domain *domain,
- struct msi_domain_info *info,
- struct device *dev);
int (*msi_prepare)(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *arg);
- void (*msi_finish)(msi_alloc_info_t *arg, int retval);
+ void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc);
void (*set_desc)(msi_alloc_info_t *arg,
struct msi_desc *desc);
- int (*handle_error)(struct irq_domain *domain,
- struct msi_desc *desc, int error);
+ int (*domain_alloc_irqs)(struct irq_domain *domain,
+ struct device *dev, int nvec);
+ void (*domain_free_irqs)(struct irq_domain *domain,
+ struct device *dev);
+ void (*msi_post_free)(struct irq_domain *domain,
+ struct device *dev);
+ int (*msi_translate)(struct irq_domain *domain, struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq, unsigned int *type);
};
/**
* struct msi_domain_info - MSI interrupt domain data
* @flags: Flags to decribe features and capabilities
+ * @bus_token: The domain bus token
+ * @hwsize: The hardware table size or the software index limit.
+ * If 0 then the size is considered unlimited and
+ * gets initialized to the maximum software index limit
+ * by the domain creation code.
* @ops: The callback data structure
* @chip: Optional: associated interrupt chip
* @chip_data: Optional: associated interrupt chip data
@@ -284,17 +492,42 @@ struct msi_domain_ops {
* @data: Optional: domain specific data
*/
struct msi_domain_info {
- u32 flags;
- struct msi_domain_ops *ops;
- struct irq_chip *chip;
- void *chip_data;
- irq_flow_handler_t handler;
- void *handler_data;
- const char *handler_name;
- void *data;
+ u32 flags;
+ enum irq_domain_bus_token bus_token;
+ unsigned int hwsize;
+ struct msi_domain_ops *ops;
+ struct irq_chip *chip;
+ void *chip_data;
+ irq_flow_handler_t handler;
+ void *handler_data;
+ const char *handler_name;
+ void *data;
+};
+
+/**
+ * struct msi_domain_template - Template for MSI device domains
+ * @name: Storage for the resulting name. Filled in by the core.
+ * @chip: Interrupt chip for this domain
+ * @ops: MSI domain ops
+ * @info: MSI domain info data
+ */
+struct msi_domain_template {
+ char name[48];
+ struct irq_chip chip;
+ struct msi_domain_ops ops;
+ struct msi_domain_info info;
};
-/* Flags for msi_domain_info */
+/*
+ * Flags for msi_domain_info
+ *
+ * Bit 0-15: Generic MSI functionality which is not subject to restriction
+ * by parent domains
+ *
+ * Bit 16-31: Functionality which depends on the underlying parent domain and
+ * can be masked out by msi_parent_ops::init_dev_msi_info() when
+ * a device MSI domain is initialized.
+ */
enum {
/*
* Init non implemented ops callbacks with default MSI domain
@@ -306,44 +539,119 @@ enum {
* callbacks.
*/
MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
- /* Support multiple PCI MSI interrupts */
- MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
- /* Support PCI MSIX interrupts */
- MSI_FLAG_PCI_MSIX = (1 << 3),
/* Needs early activate, required for PCI */
- MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
+ MSI_FLAG_ACTIVATE_EARLY = (1 << 2),
/*
* Must reactivate when irq is started even when
* MSI_FLAG_ACTIVATE_EARLY has been set.
*/
- MSI_FLAG_MUST_REACTIVATE = (1 << 5),
+ MSI_FLAG_MUST_REACTIVATE = (1 << 3),
+ /* Populate sysfs on alloc() and destroy it on free() */
+ MSI_FLAG_DEV_SYSFS = (1 << 4),
+ /* Allocate simple MSI descriptors */
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
+ /* Free MSI descriptors */
+ MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
+ /* Use dev->fwnode for MSI device domain creation */
+ MSI_FLAG_USE_DEV_FWNODE = (1 << 7),
+ /* Set parent->dev into domain->pm_dev on device domain creation */
+ MSI_FLAG_PARENT_PM_DEV = (1 << 8),
+
+ /* Mask for the generic functionality */
+ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
+
+ /* Mask for the domain specific functionality */
+ MSI_DOMAIN_FLAGS_MASK = GENMASK(31, 16),
+
+ /* Support multiple PCI MSI interrupts */
+ MSI_FLAG_MULTI_PCI_MSI = (1 << 16),
+ /* Support PCI MSIX interrupts */
+ MSI_FLAG_PCI_MSIX = (1 << 17),
/* Is level-triggered capable, using two messages */
- MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
+ MSI_FLAG_LEVEL_CAPABLE = (1 << 18),
+ /* MSI-X entries must be contiguous */
+ MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
+ /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
+ MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
+ /* Support for PCI/IMS */
+ MSI_FLAG_PCI_IMS = (1 << 21),
+};
+
+/**
+ * struct msi_parent_ops - MSI parent domain callbacks and configuration info
+ *
+ * @supported_flags: Required: The supported MSI flags of the parent domain
+ * @required_flags: Optional: The required MSI flags of the parent MSI domain
+ * @bus_select_token: Optional: The bus token of the real parent domain for
+ * irq_domain::select()
+ * @bus_select_mask: Optional: A mask of supported BUS_DOMAINs for
+ * irq_domain::select()
+ * @prefix: Optional: Prefix for the domain and chip name
+ * @init_dev_msi_info: Required: Callback for MSI parent domains to setup parent
+ * domain specific domain flags, domain ops and interrupt chip
+ * callbacks when a per device domain is created.
+ */
+struct msi_parent_ops {
+ u32 supported_flags;
+ u32 required_flags;
+ u32 bus_select_token;
+ u32 bus_select_mask;
+ const char *prefix;
+ bool (*init_dev_msi_info)(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *msi_parent_domain,
+ struct msi_domain_info *msi_child_info);
};
+bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *msi_parent_domain,
+ struct msi_domain_info *msi_child_info);
+
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force);
struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
-int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
- int nvec);
-void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
+
+bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
+ const struct msi_domain_template *template,
+ unsigned int hwsize, void *domain_data,
+ void *chip_data);
+void msi_remove_device_irq_domain(struct device *dev, unsigned int domid);
+
+bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
+ enum irq_domain_bus_token bus_token);
+
+int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs);
+
+struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
+ const struct irq_affinity_desc *affdesc,
+ union msi_instance_cookie *cookie);
+
+void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid);
+void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
+
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
-int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg);
-void platform_msi_domain_free_irqs(struct device *dev);
/* When an MSI domain is used as an intermediate domain */
int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *args);
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
int virq, int nvec, msi_alloc_info_t *args);
+void msi_domain_depopulate_descs(struct device *dev, int virq, int nvec);
+
struct irq_domain *
__platform_msi_create_device_domain(struct device *dev,
unsigned int nvec,
@@ -357,29 +665,49 @@ __platform_msi_create_device_domain(struct device *dev,
#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
__platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
-int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs);
-void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nvec);
+int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nvec);
void *platform_msi_get_host_data(struct irq_domain *domain);
-#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+/* Per device platform MSI */
+int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg);
+void platform_device_msi_free_irqs_all(struct device *dev);
+
+bool msi_device_has_isolated_msi(struct device *dev);
+#else /* CONFIG_GENERIC_MSI_IRQ */
+static inline bool msi_device_has_isolated_msi(struct device *dev)
+{
+ /*
+ * Arguably if the platform does not enable MSI support then it has
+ * "isolated MSI", as an interrupt controller that cannot receive MSIs
+ * is inherently isolated by our definition. The default definition for
+ * arch_is_isolated_msi() is conservative and returns false anyhow.
+ */
+ return arch_is_isolated_msi();
+}
+#endif /* CONFIG_GENERIC_MSI_IRQ */
-#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
+/* PCI specific interfaces */
+#ifdef CONFIG_PCI_MSI
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void pci_msi_mask_irq(struct irq_data *data);
+void pci_msi_unmask_irq(struct irq_data *data);
struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
-irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
- struct msi_desc *desc);
-int pci_msi_domain_check_cap(struct irq_domain *domain,
- struct msi_domain_info *info, struct device *dev);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
-#else
+#else /* CONFIG_PCI_MSI */
static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
return NULL;
}
-#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
+static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { }
+#endif /* !CONFIG_PCI_MSI */
#endif /* LINUX_MSI_H */
diff --git a/include/linux/msi_api.h b/include/linux/msi_api.h
new file mode 100644
index 000000000000..391087ad99b1
--- /dev/null
+++ b/include/linux/msi_api.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_MSI_API_H
+#define LINUX_MSI_API_H
+
+/*
+ * APIs which are relevant for device driver code for allocating and
+ * freeing MSI interrupts and querying the associations between
+ * hardware/software MSI indices and the Linux interrupt number.
+ */
+
+struct device;
+
+/*
+ * Per device interrupt domain related constants.
+ */
+enum msi_domain_ids {
+ MSI_DEFAULT_DOMAIN,
+ MSI_SECONDARY_DOMAIN,
+ MSI_MAX_DEVICE_IRQDOMAINS,
+};
+
+/**
+ * union msi_instance_cookie - MSI instance cookie
+ * @value: u64 value store
+ * @ptr: Pointer to usage site specific data
+ *
+ * This cookie is handed to the IMS allocation function and stored in the
+ * MSI descriptor for the interrupt chip callbacks.
+ *
+ * The content of this cookie is MSI domain implementation defined. For
+ * PCI/IMS implementations this could be a PASID or a pointer to queue
+ * memory.
+ */
+union msi_instance_cookie {
+ u64 value;
+ void *ptr;
+};
+
+/**
+ * msi_map - Mapping between MSI index and Linux interrupt number
+ * @index: The MSI index, e.g. slot in the MSI-X table or
+ * a software managed index if >= 0. If negative
+ * the allocation function failed and it contains
+ * the error code.
+ * @virq: The associated Linux interrupt number
+ */
+struct msi_map {
+ int index;
+ int virq;
+};
+
+/*
+ * Constant to be used for dynamic allocations when the allocation is any
+ * free MSI index, which is either an entry in a hardware table or a
+ * software managed index.
+ */
+#define MSI_ANY_INDEX UINT_MAX
+
+unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index);
+
+/**
+ * msi_get_virq - Lookup the Linux interrupt number for a MSI index on the default interrupt domain
+ * @dev: Device for which the lookup happens
+ * @index: The MSI index to lookup
+ *
+ * Return: The Linux interrupt number on success (> 0), 0 if not found
+ */
+static inline unsigned int msi_get_virq(struct device *dev, unsigned int index)
+{
+ return msi_domain_get_virq(dev, MSI_DEFAULT_DOMAIN, index);
+}
+
+#endif
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 3c668cb1e344..6e471436bba5 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -34,7 +34,7 @@ struct mtd_blktrans_dev {
struct blk_mq_tag_set *tag_set;
spinlock_t queue_lock;
void *priv;
- fmode_t file_mode;
+ bool writable;
};
struct mtd_blktrans_ops {
@@ -77,5 +77,16 @@ extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev);
+/**
+ * module_mtd_blktrans() - Helper macro for registering a mtd blktrans driver
+ * @__mtd_blktrans: mtd_blktrans_ops struct
+ *
+ * Helper macro for mtd blktrans drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_mtd_blktrans(__mtd_blktrans) \
+ module_driver(__mtd_blktrans, register_mtd_blktrans, \
+ deregister_mtd_blktrans)
#endif /* __MTD_TRANS_H__ */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index fd1ecb821106..947410faf9e2 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -286,7 +286,8 @@ struct cfi_private {
map_word sector_erase_cmd;
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
- struct flchip chips[]; /* per-chip data structure for each chip */
+ unsigned long quirks;
+ struct flchip chips[] __counted_by(numchips); /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index c04f690871ca..9798c1a1d3b6 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -13,6 +13,7 @@
*/
#include <linux/sched.h>
#include <linux/mutex.h>
+#include <linux/wait.h>
typedef enum {
FL_READY,
diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h
index 2129f7d3b6eb..bb6b7121a542 100644
--- a/include/linux/mtd/hyperbus.h
+++ b/include/linux/mtd/hyperbus.h
@@ -8,6 +8,17 @@
#include <linux/mtd/map.h>
+/* HyperBus command bits */
+#define HYPERBUS_RW 0x80 /* R/W# */
+#define HYPERBUS_RW_WRITE 0
+#define HYPERBUS_RW_READ 0x80
+#define HYPERBUS_AS 0x40 /* Address Space */
+#define HYPERBUS_AS_MEM 0
+#define HYPERBUS_AS_REG 0x40
+#define HYPERBUS_BT 0x20 /* Burst Type */
+#define HYPERBUS_BT_WRAPPED 0
+#define HYPERBUS_BT_LINEAR 0x20
+
enum hyperbus_memtype {
HYPERFLASH,
HYPERRAM,
@@ -20,6 +31,7 @@ enum hyperbus_memtype {
* @mtd: pointer to MTD struct
* @ctlr: pointer to HyperBus controller struct
* @memtype: type of memory device: HyperFlash or HyperRAM
+ * @priv: pointer to controller specific per device private data
*/
struct hyperbus_device {
@@ -28,6 +40,7 @@ struct hyperbus_device {
struct mtd_info *mtd;
struct hyperbus_ctlr *ctlr;
enum hyperbus_memtype memtype;
+ void *priv;
};
/**
@@ -76,9 +89,7 @@ int hyperbus_register_device(struct hyperbus_device *hbdev);
/**
* hyperbus_unregister_device - deregister HyperBus slave memory device
* @hbdev: hyperbus_device to be unregistered
- *
- * Return: 0 for success, others for failure.
*/
-int hyperbus_unregister_device(struct hyperbus_device *hbdev);
+void hyperbus_unregister_device(struct hyperbus_device *hbdev);
#endif /* __LINUX_MTD_HYPERBUS_H__ */
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
index 0b6b59f7cfbd..56047a4e54c9 100644
--- a/include/linux/mtd/jedec.h
+++ b/include/linux/mtd/jedec.h
@@ -21,6 +21,9 @@ struct jedec_ecc_info {
/* JEDEC features */
#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+/* JEDEC Optional Commands */
+#define JEDEC_OPT_CMD_READ_CACHE BIT(1)
+
struct nand_jedec_params {
/* rev info and features block */
/* 'J' 'E' 'S' 'D' */
diff --git a/include/linux/mtd/latch-addr-flash.h b/include/linux/mtd/latch-addr-flash.h
deleted file mode 100644
index e94b8e128074..000000000000
--- a/include/linux/mtd/latch-addr-flash.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Interface for NOR flash driver whose high address lines are latched
- *
- * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef __LATCH_ADDR_FLASH__
-#define __LATCH_ADDR_FLASH__
-
-struct map_info;
-struct mtd_partition;
-
-struct latch_addr_flash_data {
- unsigned int width;
- unsigned int size;
-
- int (*init)(void *data, int cs);
- void (*done)(void *data);
- void (*set_window)(unsigned long offset, void *data);
- void *data;
-
- unsigned int nr_parts;
- struct mtd_partition *parts;
-};
-
-#endif
diff --git a/include/linux/mtd/lpc32xx_mlc.h b/include/linux/mtd/lpc32xx_mlc.h
index d168c628c0d5..35e971be0950 100644
--- a/include/linux/mtd/lpc32xx_mlc.h
+++ b/include/linux/mtd/lpc32xx_mlc.h
@@ -11,7 +11,7 @@
#include <linux/dmaengine.h>
struct lpc32xx_mlc_platform_data {
- bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ dma_filter_fn dma_filter;
};
#endif /* __LINUX_MTD_LPC32XX_MLC_H */
diff --git a/include/linux/mtd/lpc32xx_slc.h b/include/linux/mtd/lpc32xx_slc.h
index cf54a9f80460..a044b806566b 100644
--- a/include/linux/mtd/lpc32xx_slc.h
+++ b/include/linux/mtd/lpc32xx_slc.h
@@ -11,7 +11,7 @@
#include <linux/dmaengine.h>
struct lpc32xx_slc_platform_data {
- bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ dma_filter_fn dma_filter;
};
#endif /* __LINUX_MTD_LPC32XX_SLC_H */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 157357ec1441..8d10d9d2e830 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -40,6 +40,12 @@ struct mtd_erase_region_info {
unsigned long *lockmap; /* If keeping bitmap of locks */
};
+struct mtd_req_stats {
+ unsigned int uncorrectable_errors;
+ unsigned int corrected_bitflips;
+ unsigned int max_bitflips;
+};
+
/**
* struct mtd_oob_ops - oob operation operands
* @mode: operation mode
@@ -70,10 +76,9 @@ struct mtd_oob_ops {
uint32_t ooboffs;
uint8_t *datbuf;
uint8_t *oobbuf;
+ struct mtd_req_stats *stats;
};
-#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
/**
* struct mtd_oob_region - oob region definition
* @offset: region offset
@@ -190,9 +195,6 @@ struct module; /* only needed for owner field in mtd_info */
*/
struct mtd_debug_info {
struct dentry *dfs_dir;
-
- const char *partname;
- const char *partid;
};
/**
@@ -221,7 +223,7 @@ struct mtd_part {
* @partitions_lock: lock protecting accesses to the partition list. Protects
* not only the master partition list, but also all
* sub-partitions.
- * @suspended: et to 1 when the device is suspended, 0 otherwise
+ * @suspended: set to 1 when the device is suspended, 0 otherwise
*
* This struct is embedded in mtd_info and contains master-specific
* properties/fields. The master is the root MTD device from the MTD partition
@@ -229,6 +231,7 @@ struct mtd_part {
*/
struct mtd_master {
struct mutex partitions_lock;
+ struct mutex chrdev_lock;
unsigned int suspended : 1;
};
@@ -333,9 +336,12 @@ struct mtd_info {
int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf);
int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
- size_t len, size_t *retlen, u_char *buf);
+ size_t len, size_t *retlen,
+ const u_char *buf);
int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
size_t len);
+ int (*_erase_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len);
int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
void (*_sync) (struct mtd_info *mtd);
@@ -373,9 +379,11 @@ struct mtd_info {
struct module *owner;
struct device dev;
- int usecount;
+ struct kref refcnt;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
+ struct nvmem_device *otp_user_nvmem;
+ struct nvmem_device *otp_factory_nvmem;
/*
* Parent device from the MTD partition point of view.
@@ -388,10 +396,8 @@ struct mtd_info {
/* List of partitions attached to this MTD device */
struct list_head partitions;
- union {
- struct mtd_part part;
- struct mtd_master master;
- };
+ struct mtd_part part;
+ struct mtd_master master;
};
static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
@@ -515,8 +521,9 @@ int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, u_char *buf);
+ size_t *retlen, const u_char *buf);
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
+int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
@@ -677,6 +684,7 @@ extern int mtd_device_unregister(struct mtd_info *master);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
extern int __get_mtd_device(struct mtd_info *mtd);
extern void __put_mtd_device(struct mtd_info *mtd);
+extern struct mtd_info *of_get_mtd_device_by_node(struct device_node *np);
extern struct mtd_info *get_mtd_device_nm(const char *name);
extern void put_mtd_device(struct mtd_info *mtd);
@@ -706,4 +714,11 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
+#ifdef CONFIG_DEBUG_FS
+bool mtd_check_expert_analysis_mode(void);
+#else
+static inline bool mtd_check_expert_analysis_mode(void) { return false; }
+#endif
+
+
#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/nand-ecc-mtk.h b/include/linux/mtd/nand-ecc-mtk.h
new file mode 100644
index 000000000000..0e48c36e6ca0
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-mtk.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * MTK SDG1 ECC controller
+ *
+ * Copyright (c) 2016 Mediatek
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ */
+
+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+
+#include <linux/types.h>
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+
+struct device_node;
+struct mtk_ecc;
+
+struct mtk_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct mtk_ecc_config {
+ enum mtk_ecc_operation op;
+ enum mtk_ecc_mode mode;
+ dma_addr_t addr;
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+void mtk_ecc_disable(struct mtk_ecc *);
+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+void mtk_ecc_release(struct mtk_ecc *);
+
+#endif
diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h
new file mode 100644
index 000000000000..b125926e458c
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-mxic.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2019 Macronix
+ * Author: Miquèl Raynal <miquel.raynal@bootlin.com>
+ *
+ * Header for the Macronix external ECC engine.
+ */
+
+#ifndef __MTD_NAND_ECC_MXIC_H__
+#define __MTD_NAND_ECC_MXIC_H__
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+struct mxic_ecc_engine;
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+
+struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void);
+struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev);
+void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng);
+int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction, dma_addr_t dirmap);
+
+#else /* !CONFIG_MTD_NAND_ECC_MXIC */
+
+static inline struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
+{
+ return NULL;
+}
+
+static inline struct nand_ecc_engine *
+mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng) {}
+
+static inline int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction,
+ dma_addr_t dirmap)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_MTD_NAND_ECC_MXIC */
+
+#endif /* __MTD_NAND_ECC_MXIC_H__ */
diff --git a/include/linux/mtd/nand-ecc-sw-bch.h b/include/linux/mtd/nand-ecc-sw-bch.h
new file mode 100644
index 000000000000..9da9969505a8
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-sw-bch.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This file is the header for the NAND BCH ECC implementation.
+ */
+
+#ifndef __MTD_NAND_ECC_SW_BCH_H__
+#define __MTD_NAND_ECC_SW_BCH_H__
+
+#include <linux/mtd/nand.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_ecc_sw_bch_conf - private software BCH ECC engine structure
+ * @req_ctx: Save request context and tweak the original request to fit the
+ * engine needs
+ * @code_size: Number of bytes needed to store a code (one code per step)
+ * @calc_buf: Buffer to use when calculating ECC bytes
+ * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip
+ * @bch: BCH control structure
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_ecc_sw_bch_conf {
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ unsigned int code_size;
+ u8 *calc_buf;
+ u8 *code_buf;
+ struct bch_control *bch;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
+
+int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf, unsigned char *code);
+int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc);
+int nand_ecc_sw_bch_init_ctx(struct nand_device *nand);
+void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
+
+#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */
+
+static inline int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_bch_correct(struct nand_device *nand,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_bch_init_ctx(struct nand_device *nand)
+{
+ return -ENOTSUPP;
+}
+
+static inline void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand) {}
+
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
+
+#endif /* __MTD_NAND_ECC_SW_BCH_H__ */
diff --git a/include/linux/mtd/nand-ecc-sw-hamming.h b/include/linux/mtd/nand-ecc-sw-hamming.h
new file mode 100644
index 000000000000..c6c71894c575
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-sw-hamming.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
+ * David Woodhouse <dwmw2@infradead.org>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This file is the header for the NAND Hamming ECC implementation.
+ */
+
+#ifndef __MTD_NAND_ECC_SW_HAMMING_H__
+#define __MTD_NAND_ECC_SW_HAMMING_H__
+
+#include <linux/mtd/nand.h>
+
+/**
+ * struct nand_ecc_sw_hamming_conf - private software Hamming ECC engine structure
+ * @req_ctx: Save request context and tweak the original request to fit the
+ * engine needs
+ * @code_size: Number of bytes needed to store a code (one code per step)
+ * @calc_buf: Buffer to use when calculating ECC bytes
+ * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip
+ * @sm_order: Smart Media special ordering
+ */
+struct nand_ecc_sw_hamming_conf {
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ unsigned int code_size;
+ u8 *calc_buf;
+ u8 *code_buf;
+ unsigned int sm_order;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
+
+int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand);
+void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand);
+int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size,
+ unsigned char *code, bool sm_order);
+int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code);
+int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc,
+ unsigned char *calc_ecc, unsigned int step_size,
+ bool sm_order);
+int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc);
+
+#else /* !CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+static inline int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand)
+{
+ return -ENOTSUPP;
+}
+
+static inline void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand) {}
+
+static inline int ecc_sw_hamming_calculate(const unsigned char *buf,
+ unsigned int step_size,
+ unsigned char *code, bool sm_order)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ return -ENOTSUPP;
+}
+
+static inline int ecc_sw_hamming_correct(unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc,
+ unsigned int step_size, bool sm_order)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_hamming_correct(struct nand_device *nand,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return -ENOTSUPP;
+}
+
+#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+#endif /* __MTD_NAND_ECC_SW_HAMMING_H__ */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index af99041ceaa9..b2996dc987ff 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -83,7 +83,18 @@ struct nand_pos {
};
/**
+ * enum nand_page_io_req_type - Direction of an I/O request
+ * @NAND_PAGE_READ: from the chip, to the controller
+ * @NAND_PAGE_WRITE: from the controller, to the chip
+ */
+enum nand_page_io_req_type {
+ NAND_PAGE_READ = 0,
+ NAND_PAGE_WRITE,
+};
+
+/**
* struct nand_page_io_req - NAND I/O request object
+ * @type: the type of page I/O: read or write
* @pos: the position this I/O request is targeting
* @dataoffs: the offset within the page
* @datalen: number of data bytes to read from/write to this page
@@ -99,6 +110,7 @@ struct nand_pos {
* specific commands/operations.
*/
struct nand_page_io_req {
+ enum nand_page_io_req_type type;
struct nand_pos pos;
unsigned int dataoffs;
unsigned int datalen;
@@ -115,18 +127,77 @@ struct nand_page_io_req {
int mode;
};
+const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
+const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
+const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
+
+/**
+ * enum nand_ecc_engine_type - NAND ECC engine type
+ * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value
+ * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
+ * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
+ * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
+ * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
+ */
+enum nand_ecc_engine_type {
+ NAND_ECC_ENGINE_TYPE_INVALID,
+ NAND_ECC_ENGINE_TYPE_NONE,
+ NAND_ECC_ENGINE_TYPE_SOFT,
+ NAND_ECC_ENGINE_TYPE_ON_HOST,
+ NAND_ECC_ENGINE_TYPE_ON_DIE,
+};
+
+/**
+ * enum nand_ecc_placement - NAND ECC bytes placement
+ * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
+ * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
+ * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
+ * interleaved with regular data in the main
+ * area
+ */
+enum nand_ecc_placement {
+ NAND_ECC_PLACEMENT_UNKNOWN,
+ NAND_ECC_PLACEMENT_OOB,
+ NAND_ECC_PLACEMENT_INTERLEAVED,
+};
+
+/**
+ * enum nand_ecc_algo - NAND ECC algorithm
+ * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm
+ * @NAND_ECC_ALGO_HAMMING: Hamming algorithm
+ * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
+ * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
+ */
+enum nand_ecc_algo {
+ NAND_ECC_ALGO_UNKNOWN,
+ NAND_ECC_ALGO_HAMMING,
+ NAND_ECC_ALGO_BCH,
+ NAND_ECC_ALGO_RS,
+};
+
/**
* struct nand_ecc_props - NAND ECC properties
+ * @engine_type: ECC engine type
+ * @placement: OOB placement (if relevant)
+ * @algo: ECC algorithm (if relevant)
* @strength: ECC strength
* @step_size: Number of bytes per step
+ * @flags: Misc properties
*/
struct nand_ecc_props {
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement placement;
+ enum nand_ecc_algo algo;
unsigned int strength;
unsigned int step_size;
+ unsigned int flags;
};
#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
+/* NAND ECC misc flags */
+#define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
+
/**
* struct nand_bbt - bad block table object
* @cache: in memory BBT cache
@@ -158,10 +229,177 @@ struct nand_ops {
};
/**
+ * struct nand_ecc_context - Context for the ECC engine
+ * @conf: basic ECC engine parameters
+ * @nsteps: number of ECC steps
+ * @total: total number of bytes used for storing ECC codes, this is used by
+ * generic OOB layouts
+ * @priv: ECC engine driver private data
+ */
+struct nand_ecc_context {
+ struct nand_ecc_props conf;
+ unsigned int nsteps;
+ unsigned int total;
+ void *priv;
+};
+
+/**
+ * struct nand_ecc_engine_ops - ECC engine operations
+ * @init_ctx: given a desired user configuration for the pointed NAND device,
+ * requests the ECC engine driver to setup a configuration with
+ * values it supports.
+ * @cleanup_ctx: clean the context initialized by @init_ctx.
+ * @prepare_io_req: is called before reading/writing a page to prepare the I/O
+ * request to be performed with ECC correction.
+ * @finish_io_req: is called after reading/writing a page to terminate the I/O
+ * request and ensure proper ECC correction.
+ */
+struct nand_ecc_engine_ops {
+ int (*init_ctx)(struct nand_device *nand);
+ void (*cleanup_ctx)(struct nand_device *nand);
+ int (*prepare_io_req)(struct nand_device *nand,
+ struct nand_page_io_req *req);
+ int (*finish_io_req)(struct nand_device *nand,
+ struct nand_page_io_req *req);
+};
+
+/**
+ * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated
+ * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value
+ * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly
+ * correction, does not need to copy
+ * data around
+ * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the
+ * data into its own area before use
+ */
+enum nand_ecc_engine_integration {
+ NAND_ECC_ENGINE_INTEGRATION_INVALID,
+ NAND_ECC_ENGINE_INTEGRATION_PIPELINED,
+ NAND_ECC_ENGINE_INTEGRATION_EXTERNAL,
+};
+
+/**
+ * struct nand_ecc_engine - ECC engine abstraction for NAND devices
+ * @dev: Host device
+ * @node: Private field for registration time
+ * @ops: ECC engine operations
+ * @integration: How the engine is integrated with the host
+ * (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines)
+ * @priv: Private data
+ */
+struct nand_ecc_engine {
+ struct device *dev;
+ struct list_head node;
+ struct nand_ecc_engine_ops *ops;
+ enum nand_ecc_engine_integration integration;
+ void *priv;
+};
+
+void of_get_nand_ecc_user_config(struct nand_device *nand);
+int nand_ecc_init_ctx(struct nand_device *nand);
+void nand_ecc_cleanup_ctx(struct nand_device *nand);
+int nand_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req);
+int nand_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req);
+bool nand_ecc_is_strong_enough(struct nand_device *nand);
+
+#if IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine);
+int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine);
+#else
+static inline int
+nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+static inline int
+nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand);
+void nand_ecc_put_on_host_hw_engine(struct nand_device *nand);
+struct device *nand_ecc_get_engine_dev(struct device *host);
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
+struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
+#else
+static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
+#else
+static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
+
+/**
+ * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests
+ * @orig_req: Pointer to the original IO request
+ * @nand: Related NAND device, to have access to its memory organization
+ * @page_buffer_size: Real size of the page buffer to use (can be set by the
+ * user before the tweaking mechanism initialization)
+ * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the
+ * user before the tweaking mechanism initialization)
+ * @spare_databuf: Data bounce buffer
+ * @spare_oobbuf: OOB bounce buffer
+ * @bounce_data: Flag indicating a data bounce buffer is used
+ * @bounce_oob: Flag indicating an OOB bounce buffer is used
+ */
+struct nand_ecc_req_tweak_ctx {
+ struct nand_page_io_req orig_req;
+ struct nand_device *nand;
+ unsigned int page_buffer_size;
+ unsigned int oob_buffer_size;
+ void *spare_databuf;
+ void *spare_oobbuf;
+ bool bounce_data;
+ bool bounce_oob;
+};
+
+int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_device *nand);
+void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
+void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req);
+void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req);
+
+/**
+ * struct nand_ecc - Information relative to the ECC
+ * @defaults: Default values, depend on the underlying subsystem
+ * @requirements: ECC requirements from the NAND chip perspective
+ * @user_conf: User desires in terms of ECC parameters
+ * @ctx: ECC context for the ECC engine, derived from the device @requirements
+ * the @user_conf and the @defaults
+ * @ondie_engine: On-die ECC engine reference, if any
+ * @engine: ECC engine actually bound
+ */
+struct nand_ecc {
+ struct nand_ecc_props defaults;
+ struct nand_ecc_props requirements;
+ struct nand_ecc_props user_conf;
+ struct nand_ecc_context ctx;
+ struct nand_ecc_engine *ondie_engine;
+ struct nand_ecc_engine *engine;
+};
+
+/**
* struct nand_device - NAND device
* @mtd: MTD instance attached to the NAND device
* @memorg: memory layout
- * @eccreq: ECC requirements
+ * @ecc: NAND ECC object attached to the NAND device
* @rowconv: position to row address converter
* @bbt: bad block table info
* @ops: NAND operations attached to the NAND device
@@ -169,8 +407,8 @@ struct nand_ops {
* Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
* should declare their own NAND object embedding a nand_device struct (that's
* how inheritance is done).
- * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
- * at device detection time to reflect the NAND device
+ * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
+ * be filled at device detection time to reflect the NAND device
* capabilities/requirements. Once this is done nanddev_init() can be called.
* It will take care of converting NAND information into MTD ones, which means
* the specialized NAND layers should never manually tweak
@@ -179,7 +417,7 @@ struct nand_ops {
struct nand_device {
struct mtd_info mtd;
struct nand_memory_organization memorg;
- struct nand_ecc_props eccreq;
+ struct nand_ecc ecc;
struct nand_row_converter rowconv;
struct nand_bbt bbt;
const struct nand_ops *ops;
@@ -383,6 +621,60 @@ nanddev_get_memorg(struct nand_device *nand)
return &nand->memorg;
}
+/**
+ * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
+ * @nand: NAND device
+ */
+static inline const struct nand_ecc_props *
+nanddev_get_ecc_conf(struct nand_device *nand)
+{
+ return &nand->ecc.ctx.conf;
+}
+
+/**
+ * nanddev_get_ecc_nsteps() - Extract the number of ECC steps
+ * @nand: NAND device
+ */
+static inline unsigned int
+nanddev_get_ecc_nsteps(struct nand_device *nand)
+{
+ return nand->ecc.ctx.nsteps;
+}
+
+/**
+ * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step
+ * @nand: NAND device
+ */
+static inline unsigned int
+nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
+{
+ return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
+}
+
+/**
+ * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
+ * device
+ * @nand: NAND device
+ */
+static inline const struct nand_ecc_props *
+nanddev_get_ecc_requirements(struct nand_device *nand)
+{
+ return &nand->ecc.requirements;
+}
+
+/**
+ * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
+ * device
+ * @nand: NAND device
+ * @reqs: Requirements
+ */
+static inline void
+nanddev_set_ecc_requirements(struct nand_device *nand,
+ const struct nand_ecc_props *reqs)
+{
+ nand->ecc.requirements = *reqs;
+}
+
int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
struct module *owner);
void nanddev_cleanup(struct nand_device *nand);
@@ -624,11 +916,13 @@ static inline void nanddev_pos_next_page(struct nand_device *nand,
* layer.
*/
static inline void nanddev_io_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
loff_t offs, struct mtd_oob_ops *req,
struct nand_io_iter *iter)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
+ iter->req.type = reqtype;
iter->req.mode = req->mode;
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
iter->req.ooboffs = req->ooboffs;
@@ -698,16 +992,24 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
*
* Should be used for iterate over pages that are contained in an MTD request.
*/
-#define nanddev_io_for_each_page(nand, start, req, iter) \
- for (nanddev_io_iter_init(nand, start, req, iter); \
+#define nanddev_io_for_each_page(nand, type, start, req, iter) \
+ for (nanddev_io_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
-int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
+/* ECC related functions */
+int nanddev_ecc_engine_init(struct nand_device *nand);
+void nanddev_ecc_engine_cleanup(struct nand_device *nand);
+
+static inline void *nand_to_ecc_ctx(struct nand_device *nand)
+{
+ return nand->ecc.ctx.priv;
+}
+
/* BBT related functions */
enum nand_bbt_block_status {
NAND_BBT_BLOCK_STATUS_UNKNOWN,
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
deleted file mode 100644
index d5956cc48ba9..000000000000
--- a/include/linux/mtd/nand_bch.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
- *
- * This file is the header for the NAND BCH ECC implementation.
- */
-
-#ifndef __MTD_NAND_BCH_H__
-#define __MTD_NAND_BCH_H__
-
-struct mtd_info;
-struct nand_chip;
-struct nand_bch_control;
-
-#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
-
-static inline int mtd_nand_has_bch(void) { return 1; }
-
-/*
- * Calculate BCH ecc code
- */
-int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
- u_char *ecc_code);
-
-/*
- * Detect and correct bit errors
- */
-int nand_bch_correct_data(struct nand_chip *chip, u_char *dat,
- u_char *read_ecc, u_char *calc_ecc);
-/*
- * Initialize BCH encoder/decoder
- */
-struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
-/*
- * Release BCH encoder/decoder resources
- */
-void nand_bch_free(struct nand_bch_control *nbc);
-
-#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */
-
-static inline int mtd_nand_has_bch(void) { return 0; }
-
-static inline int
-nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
- u_char *ecc_code)
-{
- return -1;
-}
-
-static inline int
-nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc)
-{
- return -ENOTSUPP;
-}
-
-static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
-{
- return NULL;
-}
-
-static inline void nand_bch_free(struct nand_bch_control *nbc) {}
-
-#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
-
-#endif /* __MTD_NAND_BCH_H__ */
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
deleted file mode 100644
index d423916b94f0..000000000000
--- a/include/linux/mtd/nand_ecc.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
- * David Woodhouse <dwmw2@infradead.org>
- * Thomas Gleixner <tglx@linutronix.de>
- *
- * This file is the header for the ECC algorithm.
- */
-
-#ifndef __MTD_NAND_ECC_H__
-#define __MTD_NAND_ECC_H__
-
-struct nand_chip;
-
-/*
- * Calculate 3 byte ECC code for eccsize byte block
- */
-void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
- u_char *ecc_code, bool sm_order);
-
-/*
- * Calculate 3 byte ECC code for 256/512 byte block
- */
-int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
- u_char *ecc_code);
-
-/*
- * Detect and correct a 1 bit error for eccsize byte block
- */
-int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
- unsigned int eccsize, bool sm_order);
-
-/*
- * Detect and correct a 1 bit error for 256/512 byte block
- */
-int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc,
- u_char *calc_ecc);
-
-#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index 339ac798568e..55ab2e4d62f9 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -11,6 +11,7 @@
#define __LINUX_MTD_ONFI_H
#include <linux/types.h>
+#include <linux/bitfield.h>
/* ONFI version bits */
#define ONFI_VERSION_1_0 BIT(1)
@@ -24,17 +25,22 @@
#define ONFI_VERSION_4_0 BIT(9)
/* ONFI features */
-#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
-#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
+#define ONFI_FEATURE_16_BIT_BUS BIT(0)
+#define ONFI_FEATURE_NV_DDR BIT(5)
+#define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7)
/* ONFI timing mode, used in both asynchronous and synchronous mode */
-#define ONFI_TIMING_MODE_0 (1 << 0)
-#define ONFI_TIMING_MODE_1 (1 << 1)
-#define ONFI_TIMING_MODE_2 (1 << 2)
-#define ONFI_TIMING_MODE_3 (1 << 3)
-#define ONFI_TIMING_MODE_4 (1 << 4)
-#define ONFI_TIMING_MODE_5 (1 << 5)
-#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
+#define ONFI_DATA_INTERFACE_SDR 0
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define ONFI_DATA_INTERFACE_NVDDR2 BIT(5)
+#define ONFI_TIMING_MODE_0 BIT(0)
+#define ONFI_TIMING_MODE_1 BIT(1)
+#define ONFI_TIMING_MODE_2 BIT(2)
+#define ONFI_TIMING_MODE_3 BIT(3)
+#define ONFI_TIMING_MODE_4 BIT(4)
+#define ONFI_TIMING_MODE_5 BIT(5)
+#define ONFI_TIMING_MODE_UNKNOWN BIT(6)
+#define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x))
/* ONFI feature number/address */
#define ONFI_FEATURE_NUMBER 256
@@ -49,7 +55,8 @@
#define ONFI_SUBFEATURE_PARAM_LEN 4
/* ONFI optional commands SET/GET FEATURES supported? */
-#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
+#define ONFI_OPT_CMD_READ_CACHE BIT(1)
+#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
struct nand_onfi_params {
/* rev info and features block */
@@ -93,14 +100,15 @@ struct nand_onfi_params {
/* electrical parameter block */
u8 io_pin_capacitance_max;
- __le16 async_timing_mode;
+ __le16 sdr_timing_modes;
__le16 program_cache_timing_mode;
__le16 t_prog;
__le16 t_bers;
__le16 t_r;
__le16 t_ccs;
- __le16 src_sync_timing_mode;
- u8 src_ssync_features;
+ u8 nvddr_timing_modes;
+ u8 nvddr2_timing_modes;
+ u8 nvddr_nvddr2_features;
__le16 clk_pin_capacitance_typ;
__le16 io_pin_capacitance_typ;
__le16 input_pin_capacitance_typ;
@@ -160,7 +168,9 @@ struct onfi_ext_param_page {
* @tBERS: Block erase time
* @tR: Page read time
* @tCCS: Change column setup time
- * @async_timing_mode: Supported asynchronous timing mode
+ * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only)
+ * @sdr_timing_modes: Supported asynchronous/SDR timing modes
+ * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes
* @vendor_revision: Vendor specific revision number
* @vendor: Vendor specific data
*/
@@ -170,7 +180,9 @@ struct onfi_params {
u16 tBERS;
u16 tR;
u16 tCCS;
- u16 async_timing_mode;
+ bool fast_tCAD;
+ u16 sdr_timing_modes;
+ u16 nvddr_timing_modes;
u16 vendor_revision;
u8 vendor[88];
};
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
index 6166e7c60869..146413d4bdb7 100644
--- a/include/linux/mtd/pfow.h
+++ b/include/linux/mtd/pfow.h
@@ -121,37 +121,4 @@ static inline void send_pfow_command(struct map_info *map,
map_write(map, CMD(LPDDR_START_EXECUTION),
map->pfow_base + PFOW_COMMAND_EXECUTE);
}
-
-static inline void print_drs_error(unsigned dsr)
-{
- int prog_status = (dsr & DSR_RPS) >> 8;
-
- if (!(dsr & DSR_AVAILABLE))
- printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
- if (prog_status & 0x03)
- printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
- "half with 41h command\n");
- else if (prog_status & 0x02)
- printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt "
- "in region with Control Mode data\n");
- else if (prog_status & 0x01)
- printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region "
- "with Object Mode data\n");
- if (!(dsr & DSR_READY_STATUS))
- printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n");
- if (dsr & DSR_ESS)
- printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n");
- if (dsr & DSR_ERASE_STATUS)
- printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n");
- if (dsr & DSR_PROGRAM_STATUS)
- printk(KERN_NOTICE"DSR.4: (1) Program Error\n");
- if (dsr & DSR_VPPS)
- printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation "
- "aborted\n");
- if (dsr & DSR_PSS)
- printk(KERN_NOTICE"DSR.2: (1) Program suspended\n");
- if (dsr & DSR_DPS)
- printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt "
- "on locked block\n");
-}
#endif /* __LINUX_MTD_PFOW_H */
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index 2e3f43788d48..0421f12156b5 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -24,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[];
+ struct flchip chips[] __counted_by(numchips);
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index a725b620aca2..e84522e31301 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -14,16 +14,17 @@
#define __LINUX_MTD_RAWNAND_H
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
#include <linux/mtd/jedec.h>
-#include <linux/mtd/nand.h>
#include <linux/mtd/onfi.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/types.h>
struct nand_chip;
+struct gpio_desc;
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -66,6 +67,8 @@ struct nand_chip;
/* Extended commands for large page devices */
#define NAND_CMD_READSTART 0x30
+#define NAND_CMD_READCACHESEQ 0x31
+#define NAND_CMD_READCACHEEND 0x3f
#define NAND_CMD_RNDOUTSTART 0xE0
#define NAND_CMD_CACHEDPROG 0x15
@@ -81,25 +84,6 @@ struct nand_chip;
#define NAND_DATA_IFACE_CHECK_ONLY -1
/*
- * Constants for ECC_MODES
- */
-enum nand_ecc_mode {
- NAND_ECC_INVALID,
- NAND_ECC_NONE,
- NAND_ECC_SOFT,
- NAND_ECC_HW,
- NAND_ECC_HW_SYNDROME,
- NAND_ECC_ON_DIE,
-};
-
-enum nand_ecc_algo {
- NAND_ECC_UNKNOWN,
- NAND_ECC_HAMMING,
- NAND_ECC_BCH,
- NAND_ECC_RS,
-};
-
-/*
* Constants for Hardware ECC
*/
/* Reset Hardware ECC for read */
@@ -116,7 +100,6 @@ enum nand_ecc_algo {
* pages and you want to rely on the default implementation.
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
-#define NAND_ECC_MAXIMIZE BIT(1)
/*
* Option constants for bizarre disfunctionality and real
@@ -242,6 +225,7 @@ enum nand_ecc_algo {
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
* @supports_set_get_features: The NAND chip supports setting/getting features
+ * @supports_read_cache: The NAND chip supports read cache operations
* @set_feature_list: Bitmap of features that can be set
* @get_feature_list: Bitmap of features that can be get
* @onfi: ONFI specific parameters
@@ -250,6 +234,7 @@ struct nand_parameters {
/* Generic parameters */
const char *model;
bool supports_set_get_features;
+ bool supports_read_cache;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
@@ -310,7 +295,8 @@ static const struct nand_ecc_caps __name = { \
/**
* struct nand_ecc_ctrl - Control structure for ECC
- * @mode: ECC mode
+ * @engine_type: ECC engine type
+ * @placement: OOB bytes placement
* @algo: ECC algorithm
* @steps: number of ECC steps per page
* @size: data bytes per ECC step
@@ -320,7 +306,6 @@ static const struct nand_ecc_caps __name = { \
* @prepad: padding information for syndrome based ECC generators
* @postpad: padding information for syndrome based ECC generators
* @options: ECC specific options (see NAND_ECC_XXX flags defined above)
- * @priv: pointer to private ECC control data
* @calc_buf: buffer for calculated ECC, size is oobsize.
* @code_buf: buffer for ECC read from flash, size is oobsize.
* @hwctl: function to control hardware ECC generator. Must only
@@ -338,7 +323,7 @@ static const struct nand_ecc_caps __name = { \
* controller and always return contiguous in-band and
* out-of-band data even if they're not stored
* contiguously on the NAND chip (e.g.
- * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
* out-of-band data).
* @write_page_raw: function to write a raw page without ECC. This function
* should hide the specific layout used by the ECC
@@ -346,7 +331,7 @@ static const struct nand_ecc_caps __name = { \
* in-band and out-of-band data. ECC controller is
* responsible for doing the appropriate transformations
* to adapt to its specific layout (e.g.
- * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
* out-of-band data).
* @read_page: function to read a page according to the ECC generator
* requirements; returns maximum number of bitflips corrected in
@@ -362,7 +347,8 @@ static const struct nand_ecc_caps __name = { \
* @write_oob: function to write chip OOB data
*/
struct nand_ecc_ctrl {
- enum nand_ecc_mode mode;
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement placement;
enum nand_ecc_algo algo;
int steps;
int size;
@@ -372,7 +358,6 @@ struct nand_ecc_ctrl {
int prepad;
int postpad;
unsigned int options;
- void *priv;
u8 *calc_buf;
u8 *code_buf;
void (*hwctl)(struct nand_chip *chip, int mode);
@@ -405,8 +390,8 @@ struct nand_ecc_ctrl {
* This struct defines the timing requirements of a SDR NAND chip.
* These information can be found in every NAND datasheets and the timings
* meaning are described in the ONFI specifications:
- * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
- * Parameters)
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf
+ * (chapter 4.15 Timing Parameters)
*
* All these timings are expressed in picoseconds.
*
@@ -492,11 +477,127 @@ struct nand_sdr_timings {
};
/**
+ * struct nand_nvddr_timings - NV-DDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a NV-DDR NAND data interface.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf
+ * (chapter 4.18.2 NV-DDR)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
+ * @tAC_min: Access window of DQ[7:0] from CLK
+ * @tAC_max: Access window of DQ[7:0] from CLK
+ * @tADL_min: ALE to data loading time
+ * @tCAD_min: Command, Address, Data delay
+ * @tCAH_min: Command/Address DQ hold time
+ * @tCALH_min: W/R_n, CLE and ALE hold time
+ * @tCALS_min: W/R_n, CLE and ALE setup time
+ * @tCAS_min: Command/address DQ setup time
+ * @tCEH_min: CE# high hold time
+ * @tCH_min: CE# hold time
+ * @tCK_min: Average clock cycle time
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDQSCK_min: Start of the access window of DQS from CLK
+ * @tDQSCK_max: End of the access window of DQS from CLK
+ * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device
+ * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device
+ * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device
+ * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access
+ * @tDS_min: Data setup time
+ * @tDSC_min: DQS cycle time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tQHS_max: Data hold skew factor
+ * @tRHW_min: Data output cycle to command, address, or data input cycle
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ * rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWHR_min: WE# high to RE# low
+ * @tWRCK_min: W/R_n low to data output cycle
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_nvddr_timings {
+ u64 tBERS_max;
+ u32 tCCS_min;
+ u64 tPROG_max;
+ u64 tR_max;
+ u32 tAC_min;
+ u32 tAC_max;
+ u32 tADL_min;
+ u32 tCAD_min;
+ u32 tCAH_min;
+ u32 tCALH_min;
+ u32 tCALS_min;
+ u32 tCAS_min;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCK_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDQSCK_min;
+ u32 tDQSCK_max;
+ u32 tDQSD_min;
+ u32 tDQSD_max;
+ u32 tDQSHZ_max;
+ u32 tDQSQ_max;
+ u32 tDS_min;
+ u32 tDSC_min;
+ u32 tFEAT_max;
+ u32 tITC_max;
+ u32 tQHS_max;
+ u32 tRHW_min;
+ u32 tRR_min;
+ u32 tRST_max;
+ u32 tWB_max;
+ u32 tWHR_min;
+ u32 tWRCK_min;
+ u32 tWW_min;
+};
+
+/*
+ * While timings related to the data interface itself are mostly different
+ * between SDR and NV-DDR, timings related to the internal chip behavior are
+ * common. IOW, the following entries which describe the internal delays have
+ * the same definition and are shared in both SDR and NV-DDR timing structures:
+ * - tADL_min
+ * - tBERS_max
+ * - tCCS_min
+ * - tFEAT_max
+ * - tPROG_max
+ * - tR_max
+ * - tRR_min
+ * - tRST_max
+ * - tWB_max
+ *
+ * The below macros return the value of a given timing, no matter the interface.
+ */
+#define NAND_COMMON_TIMING_PS(conf, timing_name) \
+ nand_interface_is_sdr(conf) ? \
+ nand_get_sdr_timings(conf)->timing_name : \
+ nand_get_nvddr_timings(conf)->timing_name
+
+#define NAND_COMMON_TIMING_MS(conf, timing_name) \
+ PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+#define NAND_COMMON_TIMING_NS(conf, timing_name) \
+ PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+/**
* enum nand_interface_type - NAND interface type
* @NAND_SDR_IFACE: Single Data Rate interface
+ * @NAND_NVDDR_IFACE: Double Data Rate interface
*/
enum nand_interface_type {
NAND_SDR_IFACE,
+ NAND_NVDDR_IFACE,
};
/**
@@ -505,6 +606,7 @@ enum nand_interface_type {
* @timings: The timing information
* @timings.mode: Timing mode as defined in the specification
* @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
+ * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE.
*/
struct nand_interface_config {
enum nand_interface_type type;
@@ -512,24 +614,56 @@ struct nand_interface_config {
unsigned int mode;
union {
struct nand_sdr_timings sdr;
+ struct nand_nvddr_timings nvddr;
};
} timings;
};
/**
+ * nand_interface_is_sdr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_sdr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_SDR_IFACE;
+}
+
+/**
+ * nand_interface_is_nvddr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_nvddr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_NVDDR_IFACE;
+}
+
+/**
* nand_get_sdr_timings - get SDR timing from data interface
* @conf: The data interface
*/
static inline const struct nand_sdr_timings *
nand_get_sdr_timings(const struct nand_interface_config *conf)
{
- if (conf->type != NAND_SDR_IFACE)
+ if (!nand_interface_is_sdr(conf))
return ERR_PTR(-EINVAL);
return &conf->timings.sdr;
}
/**
+ * nand_get_nvddr_timings - get NV-DDR timing from data interface
+ * @conf: The data interface
+ */
+static inline const struct nand_nvddr_timings *
+nand_get_nvddr_timings(const struct nand_interface_config *conf)
+{
+ if (!nand_interface_is_nvddr(conf))
+ return ERR_PTR(-EINVAL);
+
+ return &conf->timings.nvddr;
+}
+
+/**
* struct nand_op_cmd_instr - Definition of a command instruction
* @opcode: the command to issue in one cycle
*/
@@ -869,6 +1003,8 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
* @cs: the CS line to select for this NAND operation
+ * @deassert_wp: set to true when the operation requires the WP pin to be
+ * de-asserted (ERASE, PROG, ...)
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
@@ -876,6 +1012,7 @@ struct nand_op_parser {
*/
struct nand_operation {
unsigned int cs;
+ bool deassert_wp;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
@@ -887,6 +1024,14 @@ struct nand_operation {
.ninstrs = ARRAY_SIZE(_instrs), \
}
+#define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .deassert_wp = true, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
@@ -943,7 +1088,7 @@ static inline void nand_op_trace(const char *prefix,
* @exec_op: controller specific method to execute NAND operations.
* This method replaces chip->legacy.cmdfunc(),
* chip->legacy.{read,write}_{buf,byte,word}(),
- * chip->legacy.dev_ready() and chip->legacy.waifunc().
+ * chip->legacy.dev_ready() and chip->legacy.waitfunc().
* @setup_interface: setup the data interface and timing. If chipnr is set to
* %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
* should not be applied but only checked.
@@ -964,10 +1109,22 @@ struct nand_controller_ops {
*
* @lock: lock used to serialize accesses to the NAND controller
* @ops: NAND controller operations.
+ * @supported_op: NAND controller known-to-be-supported operations,
+ * only writable by the core after initial checking.
+ * @supported_op.data_only_read: The controller supports reading more data from
+ * the bus without restarting an entire read operation nor
+ * changing the column.
+ * @supported_op.cont_read: The controller supports sequential cache reads.
+ * @controller_wp: the controller is in charge of handling the WP pin.
*/
struct nand_controller {
struct mutex lock;
const struct nand_controller_ops *ops;
+ struct {
+ unsigned int data_only_read: 1;
+ unsigned int cont_read: 1;
+ } supported_op;
+ bool controller_wp;
};
static inline void nand_controller_init(struct nand_controller *nfc)
@@ -1056,6 +1213,16 @@ struct nand_manufacturer {
};
/**
+ * struct nand_secure_region - NAND secure region structure
+ * @offset: Offset of the start of the secure region
+ * @size: Size of the secure region
+ */
+struct nand_secure_region {
+ u64 offset;
+ u64 size;
+};
+
+/**
* struct nand_chip - NAND Private Flash Chip Data
* @base: Inherit from the generic NAND device
* @id: Holds NAND ID
@@ -1100,11 +1267,19 @@ struct nand_manufacturer {
* @lock: Lock protecting the suspended field. Also used to serialize accesses
* to the NAND device
* @suspended: Set to 1 when the device is suspended, 0 when it's not
+ * @resume_wq: wait queue to sleep if rawnand is in suspended state.
* @cur_cs: Currently selected target. -1 means no target selected, otherwise we
* should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
* NAND Controller drivers should not modify this value, but they're
* allowed to read it.
* @read_retries: The number of read retry modes supported
+ * @secure_regions: Structure containing the secure regions info
+ * @nr_secure_regions: Number of secure regions
+ * @cont_read: Sequential page read internals
+ * @cont_read.ongoing: Whether a continuous read is ongoing or not
+ * @cont_read.first_page: Start of the continuous read operation
+ * @cont_read.pause_page: End of the current sequential cache read operation
+ * @cont_read.last_page: End of the continuous read operation
* @controller: The hardware controller structure which is shared among multiple
* independent devices
* @ecc: The ECC controller structure
@@ -1152,8 +1327,17 @@ struct nand_chip {
/* Internals */
struct mutex lock;
unsigned int suspended : 1;
+ wait_queue_head_t resume_wq;
int cur_cs;
int read_retries;
+ struct nand_secure_region *secure_regions;
+ u8 nr_secure_regions;
+ struct {
+ bool ongoing;
+ unsigned int first_page;
+ unsigned int pause_page;
+ unsigned int last_page;
+ } cont_read;
/* Externals */
struct nand_controller *controller;
@@ -1161,9 +1345,6 @@ struct nand_chip {
void *priv;
};
-extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
-extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
-
static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
{
return container_of(mtd, struct nand_chip, base.mtd);
@@ -1306,7 +1487,8 @@ static inline bool nand_is_slc(struct nand_chip *chip)
}
/**
- * Check if the opcode's address should be sent only on the lower 8 bits
+ * nand_opcode_8bits - Check if the opcode's address should be sent only on the
+ * lower 8 bits
* @command: opcode to check
*/
static inline int nand_opcode_8bits(unsigned int command)
@@ -1323,6 +1505,20 @@ static inline int nand_opcode_8bits(unsigned int command)
return 0;
}
+int rawnand_sw_hamming_init(struct nand_chip *chip);
+int rawnand_sw_hamming_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code);
+int rawnand_sw_hamming_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc);
+void rawnand_sw_hamming_cleanup(struct nand_chip *chip);
+int rawnand_sw_bch_init(struct nand_chip *chip);
+int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc);
+void rawnand_sw_bch_cleanup(struct nand_chip *chip);
+
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
void *extraoob, int extraooblen,
@@ -1361,6 +1557,7 @@ int nand_reset_op(struct nand_chip *chip);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len);
int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
@@ -1383,6 +1580,8 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
bool force_8bit, bool check_only);
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
+int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
/* Scan and identify a NAND device */
int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
@@ -1407,7 +1606,6 @@ void nand_cleanup(struct nand_chip *chip);
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
-struct gpio_desc;
int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
unsigned long timeout_ms);
@@ -1440,4 +1638,8 @@ static inline void *nand_get_data_buf(struct nand_chip *chip)
return chip->data_buf;
}
+/* Parse the gpio-cs property */
+int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
+ unsigned int *ncs_array);
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
index d2c3cf29e0d1..231bd1c3f408 100644
--- a/include/linux/mtd/sharpsl.h
+++ b/include/linux/mtd/sharpsl.h
@@ -9,7 +9,6 @@
#define _MTD_SHARPSL_H
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
struct sharpsl_nand_platform_data {
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 60bac2c0ec45..cdcfe0fd2e7d 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -7,7 +7,6 @@
#define __LINUX_MTD_SPI_NOR_H
#include <linux/bitops.h>
-#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/spi/spi-mem.h>
@@ -47,10 +46,9 @@
#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */
#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
-#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
-#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
-#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
-#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */
+#define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */
+#define SPINOR_OP_SRST 0x99 /* Software Reset */
+#define SPINOR_OP_GBULK 0x98 /* Global Block Unlock */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -83,27 +81,22 @@
#define SPINOR_OP_BP 0x02 /* Byte program */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
-/* Used for S3AN flashes only */
-#define SPINOR_OP_XSE 0x50 /* Sector erase */
-#define SPINOR_OP_XPP 0x82 /* Page program */
-#define SPINOR_OP_XRDSR 0xd7 /* Read status register */
-
-#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
-#define XSR_RDY BIT(7) /* Ready */
-
-
/* Used for Macronix and Winbond flashes. */
#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
/* Used for Spansion flashes only. */
#define SPINOR_OP_BRWR 0x17 /* Bank register write */
-#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
/* Used for Micron flashes only. */
#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
+/* Used for GigaDevices and Winbond flashes. */
+#define SPINOR_OP_ESECR 0x44 /* Erase Security registers */
+#define SPINOR_OP_PSECR 0x42 /* Program Security registers */
+#define SPINOR_OP_RSECR 0x48 /* Read Security registers */
+
/* Status Register bits. */
#define SR_WIP BIT(0) /* Write in progress */
#define SR_WEL BIT(1) /* Write enable latch */
@@ -127,14 +120,11 @@
/* Enhanced Volatile Configuration Register bits */
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
-/* Flag Status Register bits */
-#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
-#define FSR_E_ERR BIT(5) /* Erase operation status */
-#define FSR_P_ERR BIT(4) /* Program operation status */
-#define FSR_PT_ERR BIT(1) /* Protection error bit */
-
/* Status Register 2 bits. */
#define SR2_QUAD_EN_BIT1 BIT(1)
+#define SR2_LB1 BIT(3) /* Security Register Lock Bit 1 */
+#define SR2_LB2 BIT(4) /* Security Register Lock Bit 2 */
+#define SR2_LB3 BIT(5) /* Security Register Lock Bit 3 */
#define SR2_QUAD_EN_BIT7 BIT(7)
/* Supported SPI protocols */
@@ -182,6 +172,7 @@ enum spi_nor_protocol {
SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2),
SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4),
SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8),
+ SNOR_PROTO_8_8_8_DTR = SNOR_PROTO_DTR(8, 8, 8),
};
static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto)
@@ -228,7 +219,7 @@ struct spi_nor_hwcaps {
* then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
* (Slow) Read.
*/
-#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0)
+#define SNOR_HWCAPS_READ_MASK GENMASK(15, 0)
#define SNOR_HWCAPS_READ BIT(0)
#define SNOR_HWCAPS_READ_FAST BIT(1)
#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2)
@@ -245,11 +236,12 @@ struct spi_nor_hwcaps {
#define SNOR_HWCAPS_READ_4_4_4 BIT(9)
#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10)
-#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11)
+#define SNOR_HWCAPS_READ_OCTAL GENMASK(15, 11)
#define SNOR_HWCAPS_READ_1_1_8 BIT(11)
#define SNOR_HWCAPS_READ_1_8_8 BIT(12)
#define SNOR_HWCAPS_READ_8_8_8 BIT(13)
#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14)
+#define SNOR_HWCAPS_READ_8_8_8_DTR BIT(15)
/*
* Page Program capabilities.
@@ -260,18 +252,19 @@ struct spi_nor_hwcaps {
* JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
* implements such commands.
*/
-#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16)
-#define SNOR_HWCAPS_PP BIT(16)
+#define SNOR_HWCAPS_PP_MASK GENMASK(23, 16)
+#define SNOR_HWCAPS_PP BIT(16)
-#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
-#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
-#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
-#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
+#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
+#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
+#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
+#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
-#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20)
-#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
-#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
-#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+#define SNOR_HWCAPS_PP_OCTAL GENMASK(23, 20)
+#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
+#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
+#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+#define SNOR_HWCAPS_PP_8_8_8_DTR BIT(23)
#define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \
SNOR_HWCAPS_READ_4_4_4 | \
@@ -279,10 +272,14 @@ struct spi_nor_hwcaps {
SNOR_HWCAPS_PP_4_4_4 | \
SNOR_HWCAPS_PP_8_8_8)
+#define SNOR_HWCAPS_X_X_X_DTR (SNOR_HWCAPS_READ_8_8_8_DTR | \
+ SNOR_HWCAPS_PP_8_8_8_DTR)
+
#define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \
SNOR_HWCAPS_READ_1_2_2_DTR | \
SNOR_HWCAPS_READ_1_4_4_DTR | \
- SNOR_HWCAPS_READ_1_8_8_DTR)
+ SNOR_HWCAPS_READ_1_8_8_DTR | \
+ SNOR_HWCAPS_READ_8_8_8_DTR)
#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \
SNOR_HWCAPS_PP_MASK)
@@ -318,6 +315,22 @@ struct spi_nor_controller_ops {
int (*erase)(struct spi_nor *nor, loff_t offs);
};
+/**
+ * enum spi_nor_cmd_ext - describes the command opcode extension in DTR mode
+ * @SPI_NOR_EXT_NONE: no extension. This is the default, and is used in Legacy
+ * SPI mode
+ * @SPI_NOR_EXT_REPEAT: the extension is same as the opcode
+ * @SPI_NOR_EXT_INVERT: the extension is the bitwise inverse of the opcode
+ * @SPI_NOR_EXT_HEX: the extension is any hex value. The command and opcode
+ * combine to form a 16-bit opcode.
+ */
+enum spi_nor_cmd_ext {
+ SPI_NOR_EXT_NONE = 0,
+ SPI_NOR_EXT_REPEAT,
+ SPI_NOR_EXT_INVERT,
+ SPI_NOR_EXT_HEX,
+};
+
/*
* Forward declarations that are used internally by the core and manufacturer
* drivers.
@@ -330,24 +343,34 @@ struct spi_nor_flash_parameter;
* struct spi_nor - Structure for defining the SPI NOR layer
* @mtd: an mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
+ * @rww: Read-While-Write (RWW) sync lock
+ * @rww.wait: wait queue for the RWW sync
+ * @rww.ongoing_io: the bus is busy
+ * @rww.ongoing_rd: a read is ongoing on the chip
+ * @rww.ongoing_pe: a program/erase is ongoing on the chip
+ * @rww.used_banks: bitmap of the banks in use
* @dev: pointer to an SPI device or an SPI NOR controller device
* @spimem: pointer to the SPI memory device
* @bouncebuf: bounce buffer used when the buffer passed by the MTD
* layer is not DMA-able
* @bouncebuf_size: size of the bounce buffer
+ * @id: The flash's ID bytes. Always contains
+ * SPI_NOR_MAX_ID_LEN bytes.
* @info: SPI NOR part JEDEC MFR ID and other info
* @manufacturer: SPI NOR manufacturer
- * @page_size: the page size of the SPI NOR
- * @addr_width: number of address bytes
+ * @addr_nbytes: number of address bytes
* @erase_opcode: the opcode for erasing a sector
* @read_opcode: the read opcode
* @read_dummy: the dummy needed by the read operation
* @program_opcode: the program opcode
* @sst_write_second: used by the SST write operation
* @flags: flag options for the current SPI NOR (SNOR_F_*)
+ * @cmd_ext_type: the command opcode extension type for DTR mode.
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
* @reg_proto: the SPI protocol for read_reg/write_reg/erase operations
+ * @sfdp: the SFDP data of the flash
+ * @debugfs_root: pointer to the debugfs directory
* @controller_ops: SPI NOR controller driver specific operations.
* @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
* The structure includes legacy flash parameters and
@@ -359,14 +382,21 @@ struct spi_nor_flash_parameter;
struct spi_nor {
struct mtd_info mtd;
struct mutex lock;
+ struct spi_nor_rww {
+ wait_queue_head_t wait;
+ bool ongoing_io;
+ bool ongoing_rd;
+ bool ongoing_pe;
+ unsigned int used_banks;
+ } rww;
struct device *dev;
struct spi_mem *spimem;
u8 *bouncebuf;
size_t bouncebuf_size;
+ u8 *id;
const struct flash_info *info;
const struct spi_nor_manufacturer *manufacturer;
- u32 page_size;
- u8 addr_width;
+ u8 addr_nbytes;
u8 erase_opcode;
u8 read_opcode;
u8 read_dummy;
@@ -376,6 +406,9 @@ struct spi_nor {
enum spi_nor_protocol reg_proto;
bool sst_write_second;
u32 flags;
+ enum spi_nor_cmd_ext cmd_ext_type;
+ struct sfdp *sfdp;
+ struct dentry *debugfs_root;
const struct spi_nor_controller_ops *controller_ops;
@@ -406,7 +439,7 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
* @name: the chip type name
* @hwcaps: the hardware capabilities supported by the controller driver
*
- * The drivers can use this fuction to scan the SPI NOR.
+ * The drivers can use this function to scan the SPI NOR.
* In the scanning, it will try to get all the necessary information to
* fill the mtd_info{} and the spi_nor{}.
*
@@ -417,10 +450,4 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps);
-/**
- * spi_nor_restore_addr_mode() - restore the status of SPI NOR
- * @nor: the spi_nor structure
- */
-void spi_nor_restore(struct spi_nor *nor);
-
#endif
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 7b78c4ba9b3e..5c19ead60499 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -169,7 +169,29 @@
struct spinand_op;
struct spinand_device;
-#define SPINAND_MAX_ID_LEN 4
+#define SPINAND_MAX_ID_LEN 5
+/*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+ * tPROG 300us to 400us
+ * tREAD 25us to 100us
+ * In order to minimize latency, the min value is divided by 4 for the
+ * initial delay, and dividing by 20 for the poll delay.
+ * For reset, 5us/10us/500us if the device is respectively
+ * reading/programming/erasing when the RESET occurs. Since we always
+ * issue a RESET when the device is IDLE, 5us is selected for both initial
+ * and poll delay.
+ */
+#define SPINAND_READ_INITIAL_DELAY_US 6
+#define SPINAND_READ_POLL_DELAY_US 5
+#define SPINAND_RESET_INITIAL_DELAY_US 5
+#define SPINAND_RESET_POLL_DELAY_US 5
+#define SPINAND_WRITE_INITIAL_DELAY_US 75
+#define SPINAND_WRITE_POLL_DELAY_US 15
+#define SPINAND_ERASE_INITIAL_DELAY_US 250
+#define SPINAND_ERASE_POLL_DELAY_US 50
+
+#define SPINAND_WAITRDY_TIMEOUT_MS 400
/**
* struct spinand_id - SPI NAND id structure
@@ -238,12 +260,17 @@ struct spinand_manufacturer {
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
+extern const struct spinand_manufacturer ato_spinand_manufacturer;
+extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
extern const struct spinand_manufacturer paragon_spinand_manufacturer;
extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+extern const struct spinand_manufacturer xtx_spinand_manufacturer;
/**
* struct spinand_op_variants - SPI NAND operation variants
@@ -287,6 +314,15 @@ struct spinand_ecc_info {
#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
/**
+ * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
+ * @status: status of the last wait operation that will be used in case
+ * ->get_status() is not populated by the spinand device.
+ */
+struct spinand_ondie_ecc_conf {
+ u8 status;
+};
+
+/**
* struct spinand_info - Structure used to describe SPI NAND chips
* @model: model name
* @devid: device ID
@@ -358,6 +394,8 @@ struct spinand_info {
struct spinand_dirmap {
struct spi_mem_dirmap_desc *wdesc;
struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc_ecc;
+ struct spi_mem_dirmap_desc *rdesc_ecc;
};
/**
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 7d48ea368c5e..a529347fd75b 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -110,6 +110,7 @@ struct ubi_volume_info {
int name_len;
const char *name;
dev_t cdev;
+ struct device *dev;
};
/**
diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h
index a4e352b1dfe6..3cac9360588f 100644
--- a/include/linux/mtd/xip.h
+++ b/include/linux/mtd/xip.h
@@ -28,7 +28,7 @@
* those functions so they get relocated to ram.
*/
#ifdef CONFIG_XIP_KERNEL
-#define __xipram noinline __attribute__ ((__section__ (".xiptext")))
+#define __xipram noinline __section(".xiptext")
#endif
/*
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index dcd185cbfe79..67edc4ca2bee 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -19,79 +19,22 @@
#include <asm/processor.h>
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
+#include <linux/cleanup.h>
+#include <linux/mutex_types.h>
-struct ww_acquire_ctx;
-
-/*
- * Simple, straightforward mutexes with strict semantics:
- *
- * - only one task can hold the mutex at a time
- * - only the owner can unlock the mutex
- * - multiple unlocks are not permitted
- * - recursive locking is not permitted
- * - a mutex object must be initialized via the API
- * - a mutex object must not be initialized via memset or copying
- * - task may not exit with mutex held
- * - memory areas where held locks reside must not be freed
- * - held mutexes must not be reinitialized
- * - mutexes may not be used in hardware or software interrupt
- * contexts such as tasklets and timers
- *
- * These semantics are fully enforced when DEBUG_MUTEXES is
- * enabled. Furthermore, besides enforcing the above rules, the mutex
- * debugging code also implements a number of additional features
- * that make lock debugging easier and faster:
- *
- * - uses symbolic names of mutexes, whenever they are printed in debug output
- * - point-of-acquire tracking, symbolic lookup of function names
- * - list of all locks held in the system, printout of them
- * - owner tracking
- * - detects self-recursing locks and prints out all relevant info
- * - detects multi-task circular deadlocks and prints out all affected
- * locks and tasks (and only those tasks)
- */
-struct mutex {
- atomic_long_t owner;
- spinlock_t wait_lock;
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- struct optimistic_spin_queue osq; /* Spinner MCS lock */
-#endif
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_MUTEXES
- void *magic;
-#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-struct ww_class;
-struct ww_acquire_ctx;
-
-struct ww_mutex {
- struct mutex base;
- struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- struct ww_class *ww_class;
-#endif
-};
-
-/*
- * This is the control structure for tasks blocked on mutex,
- * which resides on the blocked task's kernel stack:
- */
-struct mutex_waiter {
- struct list_head list;
- struct task_struct *task;
- struct ww_acquire_ctx *ww_ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- void *magic;
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
-};
#ifdef CONFIG_DEBUG_MUTEXES
-#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+# define __DEBUG_MUTEX_INITIALIZER(lockname) \
, .magic = &lockname
extern void mutex_destroy(struct mutex *lock);
@@ -104,6 +47,7 @@ static inline void mutex_destroy(struct mutex *lock) {}
#endif
+#ifndef CONFIG_PREEMPT_RT
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
@@ -119,19 +63,9 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_SLEEP, \
- }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
- , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+ , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
__DEBUG_MUTEX_INITIALIZER(lockname) \
__DEP_MAP_MUTEX_INITIALIZER(lockname) }
@@ -150,6 +84,39 @@ extern void __mutex_init(struct mutex *lock, const char *name,
*/
extern bool mutex_is_locked(struct mutex *lock);
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+
+#define __MUTEX_INITIALIZER(mutexname) \
+{ \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \
+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
+}
+
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void __mutex_rt_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
+
+#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
+
+#define __mutex_init(mutex, name, key) \
+do { \
+ rt_mutex_base_init(&(mutex)->rtmutex); \
+ __mutex_rt_init((mutex), name, key); \
+} while (0)
+
+#define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
+#endif /* CONFIG_PREEMPT_RT */
+
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/locking/mutex-design.rst.
@@ -185,7 +152,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
/*
@@ -199,29 +166,8 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-/*
- * These values are chosen such that FAIL and SUCCESS match the
- * values of the regular mutex_trylock().
- */
-enum mutex_trylock_recursive_enum {
- MUTEX_TRYLOCK_FAILED = 0,
- MUTEX_TRYLOCK_SUCCESS = 1,
- MUTEX_TRYLOCK_RECURSIVE,
-};
-
-/**
- * mutex_trylock_recursive - trylock variant that allows recursive locking
- * @lock: mutex to be locked
- *
- * This function should not be used, _ever_. It is purely for hysterical GEM
- * raisins, and once those are gone this will be removed.
- *
- * Returns:
- * - MUTEX_TRYLOCK_FAILED - trylock failed,
- * - MUTEX_TRYLOCK_SUCCESS - lock acquired,
- * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
- */
-extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
-mutex_trylock_recursive(struct mutex *lock);
+DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
+DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
+DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/mutex_api.h b/include/linux/mutex_api.h
new file mode 100644
index 000000000000..85ab9491e13e
--- /dev/null
+++ b/include/linux/mutex_api.h
@@ -0,0 +1 @@
+#include <linux/mutex.h>
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
new file mode 100644
index 000000000000..fdf7f515fde8
--- /dev/null
+++ b/include/linux/mutex_types.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MUTEX_TYPES_H
+#define __LINUX_MUTEX_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/lockdep_types.h>
+#include <linux/osq_lock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#ifndef CONFIG_PREEMPT_RT
+
+/*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+ * - only one task can hold the mutex at a time
+ * - only the owner can unlock the mutex
+ * - multiple unlocks are not permitted
+ * - recursive locking is not permitted
+ * - a mutex object must be initialized via the API
+ * - a mutex object must not be initialized via memset or copying
+ * - task may not exit with mutex held
+ * - memory areas where held locks reside must not be freed
+ * - held mutexes must not be reinitialized
+ * - mutexes may not be used in hardware or software interrupt
+ * contexts such as tasklets and timers
+ *
+ * These semantics are fully enforced when DEBUG_MUTEXES is
+ * enabled. Furthermore, besides enforcing the above rules, the mutex
+ * debugging code also implements a number of additional features
+ * that make lock debugging easier and faster:
+ *
+ * - uses symbolic names of mutexes, whenever they are printed in debug output
+ * - point-of-acquire tracking, symbolic lookup of function names
+ * - list of all locks held in the system, printout of them
+ * - owner tracking
+ * - detects self-recursing locks and prints out all relevant info
+ * - detects multi-task circular deadlocks and prints out all affected
+ * locks and tasks (and only those tasks)
+ */
+struct mutex {
+ atomic_long_t owner;
+ raw_spinlock_t wait_lock;
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* Spinner MCS lock */
+#endif
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_MUTEXES
+ void *magic;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+#include <linux/rtmutex.h>
+
+struct mutex {
+ struct rt_mutex_base rtmutex;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#endif /* CONFIG_PREEMPT_RT */
+
+#endif /* __LINUX_MUTEX_TYPES_H */
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 5fc6bb2fefad..2e25c838f831 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -14,18 +14,51 @@
struct device;
struct mux_control;
+struct mux_state;
unsigned int mux_control_states(struct mux_control *mux);
-int __must_check mux_control_select(struct mux_control *mux,
- unsigned int state);
-int __must_check mux_control_try_select(struct mux_control *mux,
- unsigned int state);
+int __must_check mux_control_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+int __must_check mux_state_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
+int __must_check mux_control_try_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+int __must_check mux_state_try_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
+
+static inline int __must_check mux_control_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_select_delay(mux, state, 0);
+}
+
+static inline int __must_check mux_state_select(struct mux_state *mstate)
+{
+ return mux_state_select_delay(mstate, 0);
+}
+
+static inline int __must_check mux_control_try_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_try_select_delay(mux, state, 0);
+}
+
+static inline int __must_check mux_state_try_select(struct mux_state *mstate)
+{
+ return mux_state_try_select_delay(mstate, 0);
+}
+
int mux_control_deselect(struct mux_control *mux);
+int mux_state_deselect(struct mux_state *mstate);
struct mux_control *mux_control_get(struct device *dev, const char *mux_name);
void mux_control_put(struct mux_control *mux);
struct mux_control *devm_mux_control_get(struct device *dev,
const char *mux_name);
+struct mux_state *devm_mux_state_get(struct device *dev,
+ const char *mux_name);
#endif /* _LINUX_MUX_CONSUMER_H */
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
index 627a2c6bc02d..18824064f8c0 100644
--- a/include/linux/mux/driver.h
+++ b/include/linux/mux/driver.h
@@ -12,6 +12,7 @@
#include <dt-bindings/mux/mux.h>
#include <linux/device.h>
+#include <linux/ktime.h>
#include <linux/semaphore.h>
struct mux_chip;
@@ -33,6 +34,7 @@ struct mux_control_ops {
* @states: The number of mux controller states.
* @idle_state: The mux controller state to use when inactive, or one
* of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT.
+ * @last_change: Timestamp of last change
*
* Mux drivers may only change @states and @idle_state, and may only do so
* between allocation and registration of the mux controller. Specifically,
@@ -47,6 +49,8 @@ struct mux_control {
unsigned int states;
int idle_state;
+
+ ktime_t last_change;
};
/**
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index 47e5679b48e1..000b126acfb6 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -918,12 +918,4 @@
extern void mv64340_irq_init(unsigned int base);
-/* Watchdog Platform Device, Driver Data */
-#define MV64x60_WDT_NAME "mv64x60_wdt"
-
-struct mv64x60_wdt_pdata {
- int timeout; /* watchdog expiry in seconds, default 10 */
- int bus_clk; /* bus clock in MHz, default 133 */
-};
-
#endif /* __ASM_MV643XX_H */
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 3682ae75c7aa..145169be2ed8 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -8,6 +8,7 @@
#include <linux/mbus.h>
#include <linux/if_ether.h>
+#include <linux/phy.h>
#define MV643XX_ETH_SHARED_NAME "mv643xx_eth"
#define MV643XX_ETH_NAME "mv643xx_eth_port"
@@ -59,6 +60,7 @@ struct mv643xx_eth_platform_data {
*/
int speed;
int duplex;
+ phy_interface_t interface;
/*
* How many RX/TX queues to use.
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h
deleted file mode 100644
index 90a803aa42e8..000000000000
--- a/include/linux/n_r3964.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/* r3964 linediscipline for linux
- *
- * -----------------------------------------------------------
- * Copyright by
- * Philips Automation Projects
- * Kassel (Germany)
- * -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
- * Author:
- * L. Haag
- *
- * $Log: r3964.h,v $
- * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de>
- * Fixed HZ usage on 2.6 kernels
- * Removed unnecessary include
- *
- * Revision 1.3 2001/03/18 13:02:24 dwmw2
- * Fix timer usage, use spinlocks properly.
- *
- * Revision 1.2 2001/03/18 12:53:15 dwmw2
- * Merge changes in 2.4.2
- *
- * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2
- * This'll screw the version control
- *
- * Revision 1.6 1998/09/30 00:40:38 dwmw2
- * Updated to use kernel's N_R3964 if available
- *
- * Revision 1.4 1998/04/02 20:29:44 lhaag
- * select, blocking, ...
- *
- * Revision 1.3 1998/02/12 18:58:43 root
- * fixed some memory leaks
- * calculation of checksum characters
- *
- * Revision 1.2 1998/02/07 13:03:17 root
- * ioctl read_telegram
- *
- * Revision 1.1 1998/02/06 19:19:43 root
- * Initial revision
- *
- *
- */
-#ifndef __LINUX_N_R3964_H__
-#define __LINUX_N_R3964_H__
-
-
-#include <linux/param.h>
-#include <uapi/linux/n_r3964.h>
-
-/*
- * Common ascii handshake characters:
- */
-
-#define STX 0x02
-#define ETX 0x03
-#define DLE 0x10
-#define NAK 0x15
-
-/*
- * Timeouts (from milliseconds to jiffies)
- */
-
-#define R3964_TO_QVZ ((550)*HZ/1000)
-#define R3964_TO_ZVZ ((220)*HZ/1000)
-#define R3964_TO_NO_BUF ((400)*HZ/1000)
-#define R3964_NO_TX_ROOM ((100)*HZ/1000)
-#define R3964_TO_RX_PANIC ((4000)*HZ/1000)
-#define R3964_MAX_RETRIES 5
-
-
-enum { R3964_IDLE,
- R3964_TX_REQUEST, R3964_TRANSMITTING,
- R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK,
- R3964_WAIT_FOR_RX_BUF,
- R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT
- };
-
-/*
- * All open file-handles are 'clients' and are stored in a linked list:
- */
-
-struct r3964_message;
-
-struct r3964_client_info {
- spinlock_t lock;
- struct pid *pid;
- unsigned int sig_flags;
-
- struct r3964_client_info *next;
-
- struct r3964_message *first_msg;
- struct r3964_message *last_msg;
- struct r3964_block_header *next_block_to_read;
- int msg_count;
-};
-
-
-
-struct r3964_block_header;
-
-/* internal version of client_message: */
-struct r3964_message {
- int msg_id;
- int arg;
- int error_code;
- struct r3964_block_header *block;
- struct r3964_message *next;
-};
-
-/*
- * Header of received block in rx_buf/tx_buf:
- */
-
-struct r3964_block_header
-{
- unsigned int length; /* length in chars without header */
- unsigned char *data; /* usually data is located
- immediately behind this struct */
- unsigned int locks; /* only used in rx_buffer */
-
- struct r3964_block_header *next;
- struct r3964_client_info *owner; /* =NULL in rx_buffer */
-};
-
-/*
- * If rx_buf hasn't enough space to store R3964_MTU chars,
- * we will reject all incoming STX-requests by sending NAK.
- */
-
-#define RX_BUF_SIZE 4000
-#define TX_BUF_SIZE 4000
-#define R3964_MAX_BLOCKS_IN_RX_QUEUE 100
-
-#define R3964_PARITY 0x0001
-#define R3964_FRAME 0x0002
-#define R3964_OVERRUN 0x0004
-#define R3964_UNKNOWN 0x0008
-#define R3964_BREAK 0x0010
-#define R3964_CHECKSUM 0x0020
-#define R3964_ERROR 0x003f
-#define R3964_BCC 0x4000
-#define R3964_DEBUG 0x8000
-
-
-struct r3964_info {
- spinlock_t lock;
- struct tty_struct *tty;
- unsigned char priority;
- unsigned char *rx_buf; /* ring buffer */
- unsigned char *tx_buf;
-
- struct r3964_block_header *rx_first;
- struct r3964_block_header *rx_last;
- struct r3964_block_header *tx_first;
- struct r3964_block_header *tx_last;
- unsigned int tx_position;
- unsigned int rx_position;
- unsigned char last_rx;
- unsigned char bcc;
- unsigned int blocks_in_rx_queue;
-
- struct mutex read_lock; /* serialize r3964_read */
-
- struct r3964_client_info *firstClient;
- unsigned int state;
- unsigned int flags;
-
- struct timer_list tmr;
- int nRetry;
-};
-
-#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index a4bb992623c4..74e0cc14ebf8 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -36,9 +36,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
/* internal use only */
#define LOOKUP_PARENT 0x0010
-#define LOOKUP_JUMPED 0x1000
-#define LOOKUP_ROOT 0x2000
-#define LOOKUP_ROOT_GRABBED 0x0008
/* Scoping flags for lookup. */
#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */
@@ -46,6 +43,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
+#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
@@ -59,26 +57,67 @@ static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
return user_path_at_empty(dfd, name, flags, path, NULL);
}
+struct dentry *lookup_one_qstr_excl(const struct qstr *name,
+ struct dentry *base,
+ unsigned int flags);
extern int kern_path(const char *, unsigned, struct path *);
extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int);
extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
+extern struct dentry *user_path_locked_at(int , const char __user *, struct path *);
+int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
+ struct path *parent, struct qstr *last, int *type,
+ const struct path *root);
+int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *,
+ unsigned int, struct path *);
extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
+struct dentry *lookup_one(struct mnt_idmap *, const char *, struct dentry *, int);
+struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap,
+ const char *name, struct dentry *base,
+ int len);
+struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
+ const char *name,
+ struct dentry *base, int len);
extern int follow_down_one(struct path *);
-extern int follow_down(struct path *);
+extern int follow_down(struct path *path, unsigned int flags);
extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
+extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern int __must_check nd_jump_link(struct path *path);
+/**
+ * mode_strip_umask - handle vfs umask stripping
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode to be created in @dir
+ *
+ * In most filesystems, umask stripping depends on whether or not the
+ * filesystem supports POSIX ACLs. If the filesystem doesn't support it umask
+ * stripping is done directly in here. If the filesystem does support POSIX
+ * ACLs umask stripping is deferred until the filesystem calls
+ * posix_acl_create().
+ *
+ * Some filesystems (like NFSv4) also want to avoid umask stripping by the
+ * VFS, but don't support POSIX ACLs. Those filesystems can set SB_I_NOUMASK
+ * to get this effect without declaring that they support POSIX ACLs.
+ *
+ * Returns: mode
+ */
+static inline umode_t __must_check mode_strip_umask(const struct inode *dir, umode_t mode)
+{
+ if (!IS_POSIXACL(dir) && !(dir->i_sb->s_iflags & SB_I_NOUMASK))
+ mode &= ~current_umask();
+ return mode;
+}
+
+extern int __must_check nd_jump_link(const struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
@@ -98,7 +137,7 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
static inline bool
retry_estale(const long error, const unsigned int flags)
{
- return error == -ESTALE && !(flags & LOOKUP_REVAL);
+ return unlikely(error == -ESTALE && !(flags & LOOKUP_REVAL));
}
#endif /* _LINUX_NAMEI_H */
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 55c735997805..b9771ba1ef87 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -8,9 +8,11 @@
#include <linux/ndctl.h>
#include <linux/device.h>
#include <linux/badblocks.h>
+#include <linux/perf_event.h>
enum nvdimm_event {
NVDIMM_REVALIDATE_POISON,
+ NVDIMM_REVALIDATE_REGION,
};
enum nvdimm_claim_class {
@@ -22,11 +24,62 @@ enum nvdimm_claim_class {
NVDIMM_CCLASS_UNKNOWN,
};
+#define NVDIMM_EVENT_VAR(_id) event_attr_##_id
+#define NVDIMM_EVENT_PTR(_id) (&event_attr_##_id.attr.attr)
+
+#define NVDIMM_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id, \
+ nvdimm_events_sysfs_show)
+
+/* Event attribute array index */
+#define NVDIMM_PMU_FORMAT_ATTR 0
+#define NVDIMM_PMU_EVENT_ATTR 1
+#define NVDIMM_PMU_CPUMASK_ATTR 2
+#define NVDIMM_PMU_NULL_ATTR 3
+
+/**
+ * struct nvdimm_pmu - data structure for nvdimm perf driver
+ * @pmu: pmu data structure for nvdimm performance stats.
+ * @dev: nvdimm device pointer.
+ * @cpu: designated cpu for counter access.
+ * @node: node for cpu hotplug notifier link.
+ * @cpuhp_state: state for cpu hotplug notification.
+ * @arch_cpumask: cpumask to get designated cpu for counter access.
+ */
+struct nvdimm_pmu {
+ struct pmu pmu;
+ struct device *dev;
+ int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
+ /* cpumask provided by arch/platform specific code */
+ struct cpumask arch_cpumask;
+};
+
+struct platform_device;
+
+#ifdef CONFIG_PERF_EVENTS
+extern ssize_t nvdimm_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page);
+
+int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
+void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu);
+
+#else
+static inline int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev)
+{
+ return -ENXIO;
+}
+
+static inline void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) { }
+#endif
+
struct nd_device_driver {
struct device_driver drv;
unsigned long type;
int (*probe)(struct device *dev);
- int (*remove)(struct device *dev);
+ void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
void (*notify)(struct device *dev, enum nvdimm_event event);
};
@@ -87,31 +140,10 @@ struct nd_namespace_pmem {
struct nd_namespace_io nsio;
unsigned long lbasize;
char *alt_name;
- u8 *uuid;
+ uuid_t *uuid;
int id;
};
-/**
- * struct nd_namespace_blk - namespace for dimm-bounded persistent memory
- * @alt_name: namespace name supplied in the dimm label
- * @uuid: namespace name supplied in the dimm label
- * @id: ida allocated id
- * @lbasize: blk namespaces have a native sector size when btt not present
- * @size: sum of all the resource ranges allocated to this namespace
- * @num_resources: number of dpa extents to claim
- * @res: discontiguous dpa extents for given dimm
- */
-struct nd_namespace_blk {
- struct nd_namespace_common common;
- char *alt_name;
- u8 *uuid;
- int id;
- unsigned long lbasize;
- resource_size_t size;
- int num_resources;
- struct resource **res;
-};
-
static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
{
return container_of(dev, struct nd_namespace_io, common.dev);
@@ -124,11 +156,6 @@ static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device
return container_of(nsio, struct nd_namespace_pmem, nsio);
}
-static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev)
-{
- return container_of(dev, struct nd_namespace_blk, common.dev);
-}
-
/**
* nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
* @ndns: device to read
diff --git a/include/linux/net.h b/include/linux/net.h
index d48ff1180879..15df6d5f27a7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -21,6 +21,7 @@
#include <linux/rcupdate.h>
#include <linux/once.h>
#include <linux/fs.h>
+#include <linux/mm.h>
#include <linux/sockptr.h>
#include <uapi/linux/net.h>
@@ -40,6 +41,9 @@ struct net;
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
+#define SOCK_SUPPORT_ZC 5
+#define SOCK_CUSTOM_SOCKOPT 6
+#define SOCK_PASSPIDFD 7
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -119,11 +123,30 @@ struct socket {
struct file *file;
struct sock *sk;
- const struct proto_ops *ops;
+ const struct proto_ops *ops; /* Might change with IPV6_ADDRFORM or MPTCP. */
struct socket_wq wq;
};
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+ size_t written;
+ size_t count;
+ union {
+ char __user *buf;
+ void *data;
+ } arg;
+ int error;
+} read_descriptor_t;
+
struct vm_area_struct;
struct page;
struct sockaddr;
@@ -132,6 +155,8 @@ struct module;
struct sk_buff;
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
unsigned int, size_t);
+typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *);
+
struct proto_ops {
int family;
@@ -182,10 +207,9 @@ struct proto_ops {
size_t total_len, int flags);
int (*mmap) (struct file *file, struct socket *sock,
struct vm_area_struct * vma);
- ssize_t (*sendpage) (struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
+ void (*splice_eof)(struct socket *sock);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);
@@ -194,8 +218,8 @@ struct proto_ops {
*/
int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
- int (*sendpage_locked)(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
+ /* This is different from read_sock(), it reads an entire skb at a time. */
+ int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor);
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
size_t size);
int (*set_rcvlowat)(struct sock *sk, int val);
@@ -236,7 +260,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
-struct socket *sock_from_file(struct file *file, int *err);
+struct socket *sock_from_file(struct file *file);
#define sockfd_put(sock) fput(sock->file)
int net_ratelimit(void);
@@ -275,16 +299,26 @@ do { \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
#define net_dbg_ratelimited(fmt, ...) \
- do { \
- if (0) \
- no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
- } while (0)
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
#define net_get_random_once(buf, nbytes) \
get_random_once((buf), (nbytes))
-#define net_get_random_once_wait(buf, nbytes) \
- get_random_once_wait((buf), (nbytes))
+
+/*
+ * E.g. XFS meta- & log-data is in slab pages, or bcache meta
+ * data pages, or other high order pages allocated by
+ * __get_free_pages() without __GFP_COMP, which have a page_count
+ * of 0 and/or have PageSlab() set. We cannot use send_page for
+ * those, as that does get_page(); put_page(); and would cause
+ * either a VM_BUG directly, or __page_cache_release a page that
+ * would actually still be referenced by someone, leading to some
+ * obscure delayed Oops somewhere else.
+ */
+static inline bool sendpage_ok(struct page *page)
+{
+ return !PageSlab(page) && page_count(page) >= 1;
+}
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
@@ -300,10 +334,6 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags);
int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
-int kernel_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h
index f41387a8969f..0aa4411528fc 100644
--- a/include/linux/net/intel/i40e_client.h
+++ b/include/linux/net/intel/i40e_client.h
@@ -4,6 +4,8 @@
#ifndef _I40E_CLIENT_H_
#define _I40E_CLIENT_H_
+#include <linux/auxiliary_bus.h>
+
#define I40E_CLIENT_STR_LENGTH 10
/* Client interface version should be updated anytime there is a change in the
@@ -24,11 +26,6 @@ struct i40e_client_version {
u8 rsvd;
};
-enum i40e_client_state {
- __I40E_CLIENT_NULL,
- __I40E_CLIENT_REGISTERED
-};
-
enum i40e_client_instance_state {
__I40E_CLIENT_INSTANCE_NONE,
__I40E_CLIENT_INSTANCE_OPENED,
@@ -48,7 +45,7 @@ struct i40e_qv_info {
struct i40e_qvlist_info {
u32 num_vectors;
- struct i40e_qv_info qv_info[1];
+ struct i40e_qv_info qv_info[] __counted_by(num_vectors);
};
@@ -78,6 +75,7 @@ struct i40e_info {
u8 lanmac[6];
struct net_device *netdev;
struct pci_dev *pcidev;
+ struct auxiliary_device *aux_dev;
u8 __iomem *hw_addr;
u8 fid; /* function id, PF id or VF id */
#define I40E_CLIENT_FTYPE_PF 0
@@ -100,6 +98,11 @@ struct i40e_info {
u32 fw_build; /* firmware build number */
};
+struct i40e_auxiliary_device {
+ struct auxiliary_device aux_dev;
+ struct i40e_info *ldev;
+};
+
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
@@ -182,13 +185,7 @@ struct i40e_client {
const struct i40e_client_ops *ops; /* client ops provided by the client */
};
-static inline bool i40e_client_is_registered(struct i40e_client *client)
-{
- return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
-}
-
-/* used by clients */
-int i40e_register_client(struct i40e_client *client);
-int i40e_unregister_client(struct i40e_client *client);
+void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client);
+void i40e_client_device_unregister(struct i40e_info *ldev);
#endif /* _I40E_CLIENT_H_ */
diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h
new file mode 100644
index 000000000000..1c1332e4df26
--- /dev/null
+++ b/include/linux/net/intel/iidc.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _IIDC_H_
+#define _IIDC_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/dcbnl.h>
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+enum iidc_event_type {
+ IIDC_EVENT_BEFORE_MTU_CHANGE,
+ IIDC_EVENT_AFTER_MTU_CHANGE,
+ IIDC_EVENT_BEFORE_TC_CHANGE,
+ IIDC_EVENT_AFTER_TC_CHANGE,
+ IIDC_EVENT_CRIT_ERR,
+ IIDC_EVENT_NBITS /* must be last */
+};
+
+enum iidc_reset_type {
+ IIDC_PFR,
+ IIDC_CORER,
+ IIDC_GLOBR,
+};
+
+enum iidc_rdma_protocol {
+ IIDC_RDMA_PROTOCOL_IWARP = BIT(0),
+ IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1),
+};
+
+#define IIDC_MAX_USER_PRIORITY 8
+#define IIDC_MAX_DSCP_MAPPING 64
+#define IIDC_DSCP_PFC_MODE 0x1
+
+/* Struct to hold per RDMA Qset info */
+struct iidc_rdma_qset_params {
+ /* Qset TEID returned to the RDMA driver in
+ * ice_add_rdma_qset and used by RDMA driver
+ * for calls to ice_del_rdma_qset
+ */
+ u32 teid; /* Qset TEID */
+ u16 qs_handle; /* RDMA driver provides this */
+ u16 vport_id; /* VSI index */
+ u8 tc; /* TC branch the Qset should belong to */
+};
+
+struct iidc_qos_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+/* Struct to pass QoS info */
+struct iidc_qos_params {
+ struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
+ u8 up2tc[IIDC_MAX_USER_PRIORITY];
+ u8 vport_relative_bw;
+ u8 vport_priority_type;
+ u8 num_tc;
+ u8 pfc_mode;
+ u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+};
+
+struct iidc_event {
+ DECLARE_BITMAP(type, IIDC_EVENT_NBITS);
+ u32 reg;
+};
+
+struct ice_pf;
+
+int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
+int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
+int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type);
+int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable);
+void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos);
+
+/* Structure representing auxiliary driver tailored information about the core
+ * PCI dev, each auxiliary driver using the IIDC interface will have an
+ * instance of this struct dedicated to it.
+ */
+
+struct iidc_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct ice_pf *pf;
+};
+
+/* structure representing the auxiliary driver. This struct is to be
+ * allocated and populated by the auxiliary driver's owner. The core PCI
+ * driver will access these ops by performing a container_of on the
+ * auxiliary_device->dev.driver.
+ */
+struct iidc_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ /* This event_handler is meant to be a blocking call. For instance,
+ * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not
+ * return until the auxiliary driver is ready for the MTU change to
+ * happen.
+ */
+ void (*event_handler)(struct ice_pf *pf, struct iidc_event *event);
+};
+
+#endif /* _IIDC_H_*/
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
new file mode 100644
index 000000000000..eb01c37e71e0
--- /dev/null
+++ b/include/linux/net_tstamp.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_NET_TIMESTAMPING_H_
+#define _LINUX_NET_TIMESTAMPING_H_
+
+#include <uapi/linux/net_tstamp.h>
+
+enum hwtstamp_source {
+ HWTSTAMP_SOURCE_NETDEV,
+ HWTSTAMP_SOURCE_PHYLIB,
+};
+
+/**
+ * struct kernel_hwtstamp_config - Kernel copy of struct hwtstamp_config
+ *
+ * @flags: see struct hwtstamp_config
+ * @tx_type: see struct hwtstamp_config
+ * @rx_filter: see struct hwtstamp_config
+ * @ifr: pointer to ifreq structure from the original ioctl request, to pass to
+ * a legacy implementation of a lower driver
+ * @copied_to_user: request was passed to a legacy implementation which already
+ * copied the ioctl request back to user space
+ * @source: indication whether timestamps should come from the netdev or from
+ * an attached phylib PHY
+ *
+ * Prefer using this structure for in-kernel processing of hardware
+ * timestamping configuration, over the inextensible struct hwtstamp_config
+ * exposed to the %SIOCGHWTSTAMP and %SIOCSHWTSTAMP ioctl UAPI.
+ */
+struct kernel_hwtstamp_config {
+ int flags;
+ int tx_type;
+ int rx_filter;
+ struct ifreq *ifr;
+ bool copied_to_user;
+ enum hwtstamp_source source;
+};
+
+static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kernel_cfg,
+ const struct hwtstamp_config *cfg)
+{
+ kernel_cfg->flags = cfg->flags;
+ kernel_cfg->tx_type = cfg->tx_type;
+ kernel_cfg->rx_filter = cfg->rx_filter;
+}
+
+static inline void hwtstamp_config_from_kernel(struct hwtstamp_config *cfg,
+ const struct kernel_hwtstamp_config *kernel_cfg)
+{
+ cfg->flags = kernel_cfg->flags;
+ cfg->tx_type = kernel_cfg->tx_type;
+ cfg->rx_filter = kernel_cfg->rx_filter;
+}
+
+static inline bool kernel_hwtstamp_config_changed(const struct kernel_hwtstamp_config *a,
+ const struct kernel_hwtstamp_config *b)
+{
+ return a->flags != b->flags ||
+ a->tx_type != b->tx_type ||
+ a->rx_filter != b->rx_filter;
+}
+
+#endif /* _LINUX_NET_TIMESTAMPING_H_ */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 0b17c4322b09..7c2d77d75a88 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -84,10 +84,16 @@ enum {
NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */
+ NETIF_F_GRO_UDP_FWD_BIT, /* Allow UDP GRO for forwarding */
+
+ NETIF_F_HW_HSR_TAG_INS_BIT, /* Offload HSR tag insertion */
+ NETIF_F_HW_HSR_TAG_RM_BIT, /* Offload HSR tag removal */
+ NETIF_F_HW_HSR_FWD_BIT, /* Offload HSR forwarding */
+ NETIF_F_HW_HSR_DUP_BIT, /* Offload HSR duplication */
/*
* Add your fresh new feature above and remember to update
- * netdev_features_strings[] in net/core/ethtool.c and maybe
+ * netdev_features_strings[] in net/ethtool/common.c and maybe
* some feature mask #defines below. Please also describe it
* in Documentation/networking/netdev-features.rst.
*/
@@ -157,8 +163,13 @@ enum {
#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC)
+#define NETIF_F_GRO_UDP_FWD __NETIF_F(GRO_UDP_FWD)
+#define NETIF_F_HW_HSR_TAG_INS __NETIF_F(HW_HSR_TAG_INS)
+#define NETIF_F_HW_HSR_TAG_RM __NETIF_F(HW_HSR_TAG_RM)
+#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD)
+#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP)
-/* Finds the next feature with the highest number of the range of start till 0.
+/* Finds the next feature with the highest number of the range of start-1 till 0.
*/
static inline int find_next_netdev_feature(u64 feature, unsigned long start)
{
@@ -177,7 +188,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
for ((bit) = find_next_netdev_feature((mask_addr), \
NETDEV_FEATURE_COUNT); \
(bit) >= 0; \
- (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
+ (bit) = find_next_netdev_feature((mask_addr), (bit)))
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
@@ -207,8 +218,8 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
NETIF_F_FSO)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \
- NETIF_F_GSO_SCTP)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \
+ NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST)
/*
* If one device supports one of these features, then enable them
@@ -234,7 +245,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
/* Changeable features with no special hardware requirements that defaults to off. */
-#define NETIF_F_SOFT_FEATURES_OFF NETIF_F_GRO_FRAGLIST
+#define NETIF_F_SOFT_FEATURES_OFF (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7bd4fcdd0738..cb37817d6382 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -28,35 +28,42 @@
#include <linux/prefetch.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
+#include <asm/local.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
#include <linux/workqueue.h>
#include <linux/dynamic_queue_limits.h>
-#include <linux/ethtool.h>
#include <net/net_namespace.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
#include <net/netprio_cgroup.h>
-#include <net/xdp.h>
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
+#include <uapi/linux/netdev.h>
#include <linux/hashtable.h>
+#include <linux/rbtree.h>
+#include <net/net_trackers.h>
+#include <net/net_debug.h>
+#include <net/dropreason-core.h>
struct netpoll_info;
struct device;
+struct ethtool_ops;
+struct kernel_hwtstamp_config;
struct phy_device;
struct dsa_port;
struct ip_tunnel_parm;
struct macsec_context;
struct macsec_ops;
-
+struct netdev_name_node;
+struct sd_flow_limit;
struct sfp_bus;
/* 802.11 specific */
struct wireless_dev;
@@ -69,9 +76,16 @@ struct udp_tunnel_nic_info;
struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;
+struct xdp_frame;
+struct xdp_metadata_ops;
+struct xdp_md;
+
+typedef u32 xdp_features_t;
+void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
+void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
@@ -165,55 +179,65 @@ static inline bool dev_xmit_complete(int rc)
* (unsigned long) so they can be read and written atomically.
*/
+#define NET_DEV_STAT(FIELD) \
+ union { \
+ unsigned long FIELD; \
+ atomic_long_t __##FIELD; \
+ }
+
struct net_device_stats {
- unsigned long rx_packets;
- unsigned long tx_packets;
- unsigned long rx_bytes;
- unsigned long tx_bytes;
- unsigned long rx_errors;
- unsigned long tx_errors;
- unsigned long rx_dropped;
- unsigned long tx_dropped;
- unsigned long multicast;
- unsigned long collisions;
- unsigned long rx_length_errors;
- unsigned long rx_over_errors;
- unsigned long rx_crc_errors;
- unsigned long rx_frame_errors;
- unsigned long rx_fifo_errors;
- unsigned long rx_missed_errors;
- unsigned long tx_aborted_errors;
- unsigned long tx_carrier_errors;
- unsigned long tx_fifo_errors;
- unsigned long tx_heartbeat_errors;
- unsigned long tx_window_errors;
- unsigned long rx_compressed;
- unsigned long tx_compressed;
+ NET_DEV_STAT(rx_packets);
+ NET_DEV_STAT(tx_packets);
+ NET_DEV_STAT(rx_bytes);
+ NET_DEV_STAT(tx_bytes);
+ NET_DEV_STAT(rx_errors);
+ NET_DEV_STAT(tx_errors);
+ NET_DEV_STAT(rx_dropped);
+ NET_DEV_STAT(tx_dropped);
+ NET_DEV_STAT(multicast);
+ NET_DEV_STAT(collisions);
+ NET_DEV_STAT(rx_length_errors);
+ NET_DEV_STAT(rx_over_errors);
+ NET_DEV_STAT(rx_crc_errors);
+ NET_DEV_STAT(rx_frame_errors);
+ NET_DEV_STAT(rx_fifo_errors);
+ NET_DEV_STAT(rx_missed_errors);
+ NET_DEV_STAT(tx_aborted_errors);
+ NET_DEV_STAT(tx_carrier_errors);
+ NET_DEV_STAT(tx_fifo_errors);
+ NET_DEV_STAT(tx_heartbeat_errors);
+ NET_DEV_STAT(tx_window_errors);
+ NET_DEV_STAT(rx_compressed);
+ NET_DEV_STAT(tx_compressed);
};
+#undef NET_DEV_STAT
+/* per-cpu stats, allocated on demand.
+ * Try to fit them in a single cache line, for dev_get_stats() sake.
+ */
+struct net_device_core_stats {
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+ unsigned long rx_nohandler;
+ unsigned long rx_otherhost_dropped;
+} __aligned(4 * sizeof(unsigned long));
#include <linux/cache.h>
#include <linux/skbuff.h>
-#ifdef CONFIG_RPS
-#include <linux/static_key.h>
-extern struct static_key_false rps_needed;
-extern struct static_key_false rfs_needed;
-#endif
-
struct neighbour;
struct neigh_parms;
struct sk_buff;
struct netdev_hw_addr {
struct list_head list;
+ struct rb_node node;
unsigned char addr[MAX_ADDR_LEN];
unsigned char type;
#define NETDEV_HW_ADDR_T_LAN 1
#define NETDEV_HW_ADDR_T_SAN 2
-#define NETDEV_HW_ADDR_T_SLAVE 3
-#define NETDEV_HW_ADDR_T_UNICAST 4
-#define NETDEV_HW_ADDR_T_MULTICAST 5
+#define NETDEV_HW_ADDR_T_UNICAST 3
+#define NETDEV_HW_ADDR_T_MULTICAST 4
bool global_use;
int sync_cnt;
int refcount;
@@ -224,6 +248,9 @@ struct netdev_hw_addr {
struct netdev_hw_addr_list {
struct list_head list;
int count;
+
+ /* Auxiliary tree for faster lookup on addition and deletion */
+ struct rb_root tree;
};
#define netdev_hw_addr_list_count(l) ((l)->count)
@@ -235,11 +262,17 @@ struct netdev_hw_addr_list {
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->uc)
+#define netdev_for_each_synced_uc_addr(_ha, _dev) \
+ netdev_for_each_uc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
#define netdev_for_each_mc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
+#define netdev_for_each_synced_mc_addr(_ha, _dev) \
+ netdev_for_each_mc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
struct hh_cache {
unsigned int hh_len;
@@ -263,9 +296,11 @@ struct hh_cache {
* relationship HH alignment <= LL alignment.
*/
#define LL_RESERVED_SPACE(dev) \
- ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
- ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
@@ -294,19 +329,6 @@ enum netdev_state_t {
__LINK_STATE_TESTING,
};
-
-/*
- * This structure holds boot-time configured netdevice settings. They
- * are then used in the device probing.
- */
-struct netdev_boot_setup {
- char name[IFNAMSIZ];
- struct ifmap map;
-};
-#define NETDEV_BOOT_SETUP_MAX 8
-
-int __init netdev_boot_setup(char *str);
-
struct gro_list {
struct list_head list;
int count;
@@ -336,37 +358,49 @@ struct napi_struct {
unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
+ /* CPU actively polling if netpoll is configured */
int poll_owner;
#endif
+ /* CPU on which NAPI has been scheduled for processing */
+ int list_owner;
struct net_device *dev;
struct gro_list gro_hash[GRO_HASH_BUCKETS];
struct sk_buff *skb;
struct list_head rx_list; /* Pending GRO_NORMAL skbs */
int rx_count; /* length of rx_list */
+ unsigned int napi_id;
struct hrtimer timer;
+ struct task_struct *thread;
+ /* control-path-only fields follow */
struct list_head dev_list;
struct hlist_node napi_hash_node;
- unsigned int napi_id;
+ int irq;
};
enum {
- NAPI_STATE_SCHED, /* Poll is scheduled */
- NAPI_STATE_MISSED, /* reschedule a napi */
- NAPI_STATE_DISABLE, /* Disable pending */
- NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
- NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
- NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
- NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_SCHED, /* Poll is scheduled */
+ NAPI_STATE_MISSED, /* reschedule a napi */
+ NAPI_STATE_DISABLE, /* Disable pending */
+ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
+ NAPI_STATE_LISTED, /* NAPI added to system lists */
+ NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
+ NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
+ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
+ NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
};
enum {
- NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
- NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
- NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
- NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
- NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
- NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
- NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
+ NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
+ NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
+ NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
+ NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
+ NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
+ NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
+ NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
+ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
+ NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
};
enum gro_result {
@@ -374,7 +408,6 @@ enum gro_result {
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
- GRO_DROP,
GRO_CONSUMED,
};
typedef enum gro_result gro_result_t;
@@ -437,6 +470,34 @@ static inline bool napi_disable_pending(struct napi_struct *n)
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
+static inline bool napi_prefer_busy_poll(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
+}
+
+/**
+ * napi_is_scheduled - test if NAPI is scheduled
+ * @n: NAPI context
+ *
+ * This check is "best-effort". With no locking implemented,
+ * a NAPI can be scheduled or terminate right after this check
+ * and produce not precise results.
+ *
+ * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
+ * should not be used normally and napi_schedule should be
+ * used instead.
+ *
+ * Use only if the driver really needs to check if a NAPI
+ * is scheduled for example in the context of delayed timer
+ * that can be skipped if a NAPI is already scheduled.
+ *
+ * Return True if NAPI is scheduled, False otherwise.
+ */
+static inline bool napi_is_scheduled(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_SCHED, &n->state);
+}
+
bool napi_schedule_prep(struct napi_struct *n);
/**
@@ -445,11 +506,18 @@ bool napi_schedule_prep(struct napi_struct *n);
*
* Schedule NAPI poll routine to be called if it is not already
* running.
+ * Return true if we schedule a NAPI or false if not.
+ * Refer to napi_schedule_prep() for additional reason on why
+ * a NAPI might not be scheduled.
*/
-static inline void napi_schedule(struct napi_struct *n)
+static inline bool napi_schedule(struct napi_struct *n)
{
- if (napi_schedule_prep(n))
+ if (napi_schedule_prep(n)) {
__napi_schedule(n);
+ return true;
+ }
+
+ return false;
}
/**
@@ -464,43 +532,24 @@ static inline void napi_schedule_irqoff(struct napi_struct *n)
__napi_schedule_irqoff(n);
}
-/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
-static inline bool napi_reschedule(struct napi_struct *napi)
-{
- if (napi_schedule_prep(napi)) {
- __napi_schedule(napi);
- return true;
- }
- return false;
-}
-
-bool napi_complete_done(struct napi_struct *n, int work_done);
/**
- * napi_complete - NAPI processing complete
- * @n: NAPI context
+ * napi_complete_done - NAPI processing complete
+ * @n: NAPI context
+ * @work_done: number of packets processed
*
- * Mark NAPI processing as complete.
- * Consider using napi_complete_done() instead.
+ * Mark NAPI processing as complete. Should only be called if poll budget
+ * has not been completely consumed.
+ * Prefer over napi_complete().
* Return false if device should avoid rearming interrupts.
*/
+bool napi_complete_done(struct napi_struct *n, int work_done);
+
static inline bool napi_complete(struct napi_struct *n)
{
return napi_complete_done(n, 0);
}
-/**
- * napi_hash_del - remove a NAPI from global table
- * @napi: NAPI context
- *
- * Warning: caller must observe RCU grace period
- * before freeing memory containing @napi, if
- * this function returns true.
- * Note: core networking stack automatically calls it
- * from netif_napi_del().
- * Drivers might want to call this helper to combine all
- * the needed RCU grace periods into a single one.
- */
-bool napi_hash_del(struct napi_struct *napi);
+int dev_set_threaded(struct net_device *dev, bool threaded);
/**
* napi_disable - prevent NAPI from scheduling
@@ -511,20 +560,7 @@ bool napi_hash_del(struct napi_struct *napi);
*/
void napi_disable(struct napi_struct *n);
-/**
- * napi_enable - enable NAPI scheduling
- * @n: NAPI context
- *
- * Resume NAPI from being scheduled on this context.
- * Must be paired with napi_disable.
- */
-static inline void napi_enable(struct napi_struct *n)
-{
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_atomic();
- clear_bit(NAPI_STATE_SCHED, &n->state);
- clear_bit(NAPI_STATE_NPSVC, &n->state);
-}
+void napi_enable(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running
@@ -555,8 +591,8 @@ static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
{
unsigned long val, new;
+ val = READ_ONCE(n->state);
do {
- val = READ_ONCE(n->state);
if (val & NAPIF_STATE_DISABLE)
return true;
@@ -564,7 +600,7 @@ static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
return false;
new = val | NAPIF_STATE_MISSED;
- } while (cmpxchg(&n->state, val, new) != val);
+ } while (!try_cmpxchg(&n->state, &val, new));
return true;
}
@@ -600,8 +636,10 @@ struct netdev_queue {
* read-mostly part
*/
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
struct Qdisc __rcu *qdisc;
- struct Qdisc *qdisc_sleeping;
+ struct Qdisc __rcu *qdisc_sleeping;
#ifdef CONFIG_SYSFS
struct kobject kobj;
#endif
@@ -613,13 +651,17 @@ struct netdev_queue {
* Number of TX timeouts for this queue
* (/sys/class/net/DEV/Q/trans_timeout)
*/
- unsigned long trans_timeout;
+ atomic_long_t trans_timeout;
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
#ifdef CONFIG_XDP_SOCKETS
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
#endif
+ /* NAPI instance for the queue
+ * Readers and writers must hold RTNL
+ */
+ struct napi_struct *napi;
/*
* write-mostly part
*/
@@ -640,11 +682,30 @@ struct netdev_queue {
extern int sysctl_fb_tunnels_only_for_init_net;
extern int sysctl_devconf_inherit_init_net;
+/*
+ * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
+ * == 1 : For initns only
+ * == 2 : For none.
+ */
static inline bool net_has_fallback_tunnels(const struct net *net)
{
- return net == &init_net ||
- !IS_ENABLED(CONFIG_SYSCTL) ||
- !sysctl_fb_tunnels_only_for_init_net;
+#if IS_ENABLED(CONFIG_SYSCTL)
+ int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
+
+ return !fb_tunnels_only_for_init_net ||
+ (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
+#else
+ return true;
+#endif
+}
+
+static inline int net_inherit_devconf(void)
+{
+#if IS_ENABLED(CONFIG_SYSCTL)
+ return READ_ONCE(sysctl_devconf_inherit_init_net);
+#else
+ return 0;
+#endif
}
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -663,106 +724,16 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node
#endif
}
-#ifdef CONFIG_RPS
-/*
- * This structure holds an RPS map which can be of variable length. The
- * map is an array of CPUs.
- */
-struct rps_map {
- unsigned int len;
- struct rcu_head rcu;
- u16 cpus[];
-};
-#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
-
-/*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
- * tail pointer for that CPU's input queue at the time of last enqueue, and
- * a hardware filter index.
- */
-struct rps_dev_flow {
- u16 cpu;
- u16 filter;
- unsigned int last_qtail;
-};
-#define RPS_NO_FILTER 0xffff
-
-/*
- * The rps_dev_flow_table structure contains a table of flow mappings.
- */
-struct rps_dev_flow_table {
- unsigned int mask;
- struct rcu_head rcu;
- struct rps_dev_flow flows[];
-};
-#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
- ((_num) * sizeof(struct rps_dev_flow)))
-
-/*
- * The rps_sock_flow_table contains mappings of flows to the last CPU
- * on which they were processed by the application (set in recvmsg).
- * Each entry is a 32bit value. Upper part is the high-order bits
- * of flow hash, lower part is CPU number.
- * rps_cpu_mask is used to partition the space, depending on number of
- * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
- * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
- * meaning we use 32-6=26 bits for the hash.
- */
-struct rps_sock_flow_table {
- u32 mask;
-
- u32 ents[] ____cacheline_aligned_in_smp;
-};
-#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
-
-#define RPS_NO_CPU 0xffff
-
-extern u32 rps_cpu_mask;
-extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
-
-static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
- u32 hash)
-{
- if (table && hash) {
- unsigned int index = hash & table->mask;
- u32 val = hash & ~rps_cpu_mask;
-
- /* We only give a hint, preemption can change CPU under us */
- val |= raw_smp_processor_id();
-
- if (table->ents[index] != val)
- table->ents[index] = val;
- }
-}
-
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
#endif
-#endif /* CONFIG_RPS */
-/* This structure contains an instance of an RX queue. */
-struct netdev_rx_queue {
-#ifdef CONFIG_RPS
- struct rps_map __rcu *rps_map;
- struct rps_dev_flow_table __rcu *rps_flow_table;
-#endif
- struct kobject kobj;
- struct net_device *dev;
- struct xdp_rxq_info xdp_rxq;
-#ifdef CONFIG_XDP_SOCKETS
- struct xdp_umem *umem;
-#endif
-} ____cacheline_aligned_in_smp;
-
-/*
- * RX queue sysfs structures and functions.
- */
-struct rx_queue_attribute {
- struct attribute attr;
- ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
- ssize_t (*store)(struct netdev_rx_queue *queue,
- const char *buf, size_t len);
+/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
+enum xps_map_type {
+ XPS_CPUS = 0,
+ XPS_RXQS,
+ XPS_MAPS_MAX,
};
#ifdef CONFIG_XPS
@@ -782,9 +753,19 @@ struct xps_map {
/*
* This structure holds all XPS maps for device. Maps are indexed by CPU.
+ *
+ * We keep track of the number of cpus/rxqs used when the struct is allocated,
+ * in nr_ids. This will help not accessing out-of-bound memory.
+ *
+ * We keep track of the number of traffic classes used when the struct is
+ * allocated, in num_tc. This will be used to navigate the maps, to ensure we're
+ * not crossing its upper bound, as the original dev->num_tc can be updated in
+ * the meantime.
*/
struct xps_dev_maps {
struct rcu_head rcu;
+ unsigned int nr_ids;
+ s16 num_tc;
struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
};
@@ -842,7 +823,69 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);
+enum net_device_path_type {
+ DEV_PATH_ETHERNET = 0,
+ DEV_PATH_VLAN,
+ DEV_PATH_BRIDGE,
+ DEV_PATH_PPPOE,
+ DEV_PATH_DSA,
+ DEV_PATH_MTK_WDMA,
+};
+
+struct net_device_path {
+ enum net_device_path_type type;
+ const struct net_device *dev;
+ union {
+ struct {
+ u16 id;
+ __be16 proto;
+ u8 h_dest[ETH_ALEN];
+ } encap;
+ struct {
+ enum {
+ DEV_PATH_BR_VLAN_KEEP,
+ DEV_PATH_BR_VLAN_TAG,
+ DEV_PATH_BR_VLAN_UNTAG,
+ DEV_PATH_BR_VLAN_UNTAG_HW,
+ } vlan_mode;
+ u16 vlan_id;
+ __be16 vlan_proto;
+ } bridge;
+ struct {
+ int port;
+ u16 proto;
+ } dsa;
+ struct {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+ u8 amsdu;
+ } mtk_wdma;
+ };
+};
+
+#define NET_DEVICE_PATH_STACK_MAX 5
+#define NET_DEVICE_PATH_VLAN_MAX 2
+
+struct net_device_path_stack {
+ int num_paths;
+ struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
+};
+
+struct net_device_path_ctx {
+ const struct net_device *dev;
+ u8 daddr[ETH_ALEN];
+
+ int num_vlans;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlan[NET_DEVICE_PATH_VLAN_MAX];
+};
+
enum tc_setup_type {
+ TC_QUERY_CAPS,
TC_SETUP_QDISC_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
@@ -861,6 +904,8 @@ enum tc_setup_type {
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
TC_SETUP_QDISC_FIFO,
+ TC_SETUP_QDISC_HTB,
+ TC_SETUP_ACT,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -879,7 +924,7 @@ enum bpf_netdev_command {
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
- XDP_SETUP_XSK_UMEM,
+ XDP_SETUP_XSK_POOL,
};
struct bpf_prog_offload_ops;
@@ -913,9 +958,9 @@ struct netdev_bpf {
struct {
struct bpf_offloaded_map *offmap;
};
- /* XDP_SETUP_XSK_UMEM */
+ /* XDP_SETUP_XSK_POOL */
struct {
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
u16 queue_id;
} xsk;
};
@@ -927,12 +972,16 @@ struct netdev_bpf {
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrmdev_ops {
- int (*xdo_dev_state_add) (struct xfrm_state *x);
+ int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
void (*xdo_dev_state_delete) (struct xfrm_state *x);
void (*xdo_dev_state_free) (struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+ void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
+ int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
+ void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
+ void (*xdo_dev_policy_free) (struct xfrm_policy *x);
};
#endif
@@ -944,16 +993,6 @@ struct dev_ifalias {
struct devlink;
struct tlsdev_ops;
-struct netdev_name_node {
- struct hlist_node hlist;
- struct list_head list;
- struct net_device *dev;
- const char *name;
-};
-
-int netdev_name_node_alt_create(struct net_device *dev, const char *name);
-int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
-
struct netdev_net_notifier {
struct list_head list;
struct notifier_block *nb;
@@ -1024,9 +1063,18 @@ struct netdev_net_notifier {
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
- * Called when a user requests an ioctl which can't be handled by
- * the generic interface code. If not defined ioctls return
- * not supported error code.
+ * Old-style ioctl entry point. This is used internally by the
+ * appletalk and ieee802154 subsystems but is no longer called by
+ * the device ioctl handler.
+ *
+ * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Used by the bonding driver for its device specific ioctls:
+ * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
+ * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
+ *
+ * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
+ * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
@@ -1183,12 +1231,28 @@ struct netdev_net_notifier {
* struct net_device *dev,
* const unsigned char *addr, u16 vid)
* Deletes the FDB entry from dev coresponding to addr.
+ * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
+ * struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
* int *idx)
* Used to add FDB entries to dump requests. Implementers should add
* entries to skb and update idx with the number of entries.
*
+ * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[],
+ * u16 nlmsg_flags, struct netlink_ext_ack *extack);
+ * Adds an MDB entry to dev.
+ * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
+ * struct netlink_ext_ack *extack);
+ * Deletes the MDB entry from dev.
+ * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
+ * struct netlink_ext_ack *extack);
+ * Bulk deletes MDB entries from dev.
+ * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
+ * struct netlink_callback *cb);
+ * Dumps MDB entries from dev. The first argument (marker) in the netlink
+ * callback is used by core rtnetlink code.
+ *
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags, struct netlink_ext_ack *extack)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
@@ -1215,19 +1279,6 @@ struct netdev_net_notifier {
* struct netdev_phys_item_id *ppid)
* Called to get the parent ID of the physical port of this device.
*
- * void (*ndo_udp_tunnel_add)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify a driver about the UDP port and socket
- * address family that a UDP tunnel is listnening to. It is called only
- * when a new port starts listening. The operation is protected by the
- * RTNL.
- *
- * void (*ndo_udp_tunnel_del)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify the driver about a UDP port and socket
- * address family that the UDP tunnel is not listening to anymore. The
- * operation is protected by the RTNL.
- *
* void* (*ndo_dfwd_add_station)(struct net_device *pdev,
* struct net_device *dev)
* Called by upper layer devices to accelerate switching or other
@@ -1246,11 +1297,6 @@ struct netdev_net_notifier {
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
- * void (*ndo_change_proto_down)(struct net_device *dev,
- * bool proto_down);
- * This function is used to pass protocol port error state information
- * to the switch driver. The switch driver can react to the proto_down
- * by doing a phys down on the associated switch port.
* int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
@@ -1272,19 +1318,39 @@ struct netdev_net_notifier {
* that got dropped are freed/returned via xdp_return_frame().
* Returns negative number, means general error invoking ndo, meaning
* no frames were xmit'ed and core-caller will free all frames.
+ * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ * struct xdp_buff *xdp);
+ * Get the xmit slave of master device based on the xdp_buff.
* int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
* This function is used to wake up the softirq, ksoftirqd or kthread
* responsible for sending and/or receiving packets on a specific
* queue id bound to an AF_XDP socket. The flags field specifies if
* only RX, only Tx, or both should be woken up using the flags
* XDP_WAKEUP_RX and XDP_WAKEUP_TX.
- * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev);
- * Get devlink port instance associated with a given netdev.
- * Called with a reference on the netdevice and devlink locks only,
- * rtnl_lock is not held.
* int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
* int cmd);
* Add, change, delete or get information on an IPv4 tunnel.
+ * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
+ * If a device is paired with a peer device, return the peer instance.
+ * The caller must be under RCU read context.
+ * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
+ * Get the forwarding path to reach the real device from the HW destination address
+ * ktime_t (*ndo_get_tstamp)(struct net_device *dev,
+ * const struct skb_shared_hwtstamps *hwtstamps,
+ * bool cycles);
+ * Get hardware timestamp based on normal/adjustable time or free running
+ * cycle counter. This function is required if physical clock supports a
+ * free running cycle counter.
+ *
+ * int (*ndo_hwtstamp_get)(struct net_device *dev,
+ * struct kernel_hwtstamp_config *kernel_config);
+ * Get the currently configured hardware timestamping parameters for the
+ * NIC device.
+ *
+ * int (*ndo_hwtstamp_set)(struct net_device *dev,
+ * struct kernel_hwtstamp_config *kernel_config,
+ * struct netlink_ext_ack *extack);
+ * Change the hardware timestamping parameters for NIC device.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1307,6 +1373,15 @@ struct net_device_ops {
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
+ int (*ndo_eth_ioctl)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocbond)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocwandev)(struct net_device *dev,
+ struct if_settings *ifs);
+ int (*ndo_siocdevprivate)(struct net_device *dev,
+ struct ifreq *ifr,
+ void __user *data, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
@@ -1411,6 +1486,8 @@ struct net_device_ops {
struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
+ struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
+ struct sock *sk);
netdev_features_t (*ndo_fix_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
@@ -1431,7 +1508,10 @@ struct net_device_ops {
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 vid);
+ u16 vid, struct netlink_ext_ack *extack);
+ int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
@@ -1443,6 +1523,23 @@ struct net_device_ops {
const unsigned char *addr,
u16 vid, u32 portid, u32 seq,
struct netlink_ext_ack *extack);
+ int (*ndo_mdb_add)(struct net_device *dev,
+ struct nlattr *tb[],
+ u16 nlmsg_flags,
+ struct netlink_ext_ack *extack);
+ int (*ndo_mdb_del)(struct net_device *dev,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
+ int (*ndo_mdb_del_bulk)(struct net_device *dev,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
+ int (*ndo_mdb_dump)(struct net_device *dev,
+ struct sk_buff *skb,
+ struct netlink_callback *cb);
+ int (*ndo_mdb_get)(struct net_device *dev,
+ struct nlattr *tb[], u32 portid,
+ u32 seq,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags,
@@ -1463,10 +1560,6 @@ struct net_device_ops {
struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
- void (*ndo_udp_tunnel_add)(struct net_device *dev,
- struct udp_tunnel_info *ti);
- void (*ndo_udp_tunnel_del)(struct net_device *dev,
- struct udp_tunnel_info *ti);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
@@ -1476,8 +1569,6 @@ struct net_device_ops {
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
- int (*ndo_change_proto_down)(struct net_device *dev,
- bool proto_down);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_set_rx_headroom)(struct net_device *dev,
@@ -1487,15 +1578,27 @@ struct net_device_ops {
int (*ndo_xdp_xmit)(struct net_device *dev, int n,
struct xdp_frame **xdp,
u32 flags);
+ struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ struct xdp_buff *xdp);
int (*ndo_xsk_wakeup)(struct net_device *dev,
u32 queue_id, u32 flags);
- struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
int (*ndo_tunnel_ctl)(struct net_device *dev,
struct ip_tunnel_parm *p, int cmd);
+ struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
+ int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
+ struct net_device_path *path);
+ ktime_t (*ndo_get_tstamp)(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles);
+ int (*ndo_hwtstamp_get)(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config);
+ int (*ndo_hwtstamp_set)(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
};
/**
- * enum net_device_priv_flags - &struct net_device priv_flags
+ * enum netdev_priv_flags - &struct net_device priv_flags
*
* These are the &struct net_device, they are only set internally
* by drivers and used in the kernel. These flags are invisible to
@@ -1538,7 +1641,13 @@ struct net_device_ops {
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
- * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
+ * @IFF_NO_ADDRCONF: prevent ipv6 addrconf
+ * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
+ * skb_headlen(skb) == 0 (data starts from frag0)
+ * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN
+ * @IFF_SEE_ALL_HWTSTAMP_REQUESTS: device wants to see calls to
+ * ndo_hwtstamp_set() for all timestamp requests regardless of source,
+ * even if those aren't HWTSTAMP_SOURCE_NETDEV.
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1571,7 +1680,10 @@ enum netdev_priv_flags {
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
- IFF_LIVE_RENAME_OK = 1<<30,
+ IFF_NO_ADDRCONF = BIT_ULL(30),
+ IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
+ IFF_CHANGE_PROTO_DOWN = BIT_ULL(32),
+ IFF_SEE_ALL_HWTSTAMP_REQUESTS = BIT_ULL(33),
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1598,12 +1710,35 @@ enum netdev_priv_flags {
#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
#define IFF_TEAM IFF_TEAM
#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
+#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM
#define IFF_MACSEC IFF_MACSEC
#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
#define IFF_FAILOVER IFF_FAILOVER
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
-#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
+#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
+
+/* Specifies the type of the struct net_device::ml_priv pointer */
+enum netdev_ml_priv_type {
+ ML_PRIV_NONE,
+ ML_PRIV_CAN,
+};
+
+enum netdev_stat_type {
+ NETDEV_PCPU_STAT_NONE,
+ NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
+ NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
+ NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
+};
+
+enum netdev_reg_state {
+ NETREG_UNINITIALIZED = 0,
+ NETREG_REGISTERED, /* completed register_netdevice */
+ NETREG_UNREGISTERING, /* called unregister_netdevice */
+ NETREG_UNREGISTERED, /* completed unregister todo */
+ NETREG_RELEASED, /* called free_netdev */
+ NETREG_DUMMY, /* dummy device for NAPI poll */
+};
/**
* struct net_device - The DEVICE structure.
@@ -1653,12 +1788,8 @@ enum netdev_priv_flags {
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
*
- * @rx_dropped: Dropped packets by core network,
+ * @core_stats: core networking counters,
* do not use this in drivers
- * @tx_dropped: Dropped packets by core network,
- * do not use this in drivers
- * @rx_nohandler: nohandler dropped packets by core network on
- * inactive devices, do not use this in drivers
* @carrier_up_count: Number of times the carrier has been up
* @carrier_down_count: Number of times the carrier has been down
*
@@ -1669,6 +1800,8 @@ enum netdev_priv_flags {
*
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
+ * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
+ * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks.
* @ethtool_ops: Management operations
* @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
@@ -1679,6 +1812,7 @@ enum netdev_priv_flags {
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
+ * @xdp_features: XDP capability supported by the device
* @priv_flags: Like 'flags' but invisible to userspace,
* see if.h for the definitions
* @gflags: Global flags ( kept as legacy )
@@ -1732,13 +1866,13 @@ enum netdev_priv_flags {
* @tipc_ptr: TIPC specific data
* @atalk_ptr: AppleTalk link
* @ip_ptr: IPv4 specific data
- * @dn_ptr: DECnet specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
* @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
* device struct
* @mpls_ptr: mpls_dev struct pointer
+ * @mctp_ptr: MCTP specific data
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
@@ -1754,8 +1888,7 @@ enum netdev_priv_flags {
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
- * @miniq_ingress: ingress/clsact qdisc specific data for
- * ingress processing
+ * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing
* @ingress_queue: XXX: need comments on this one
* @nf_hooks_ingress: netfilter hooks executed for ingress packets
* @broadcast: hw bcast address
@@ -1773,12 +1906,11 @@ enum netdev_priv_flags {
* @tx_queue_len: Max frames per queue allowed
* @tx_global_lock: XXX: need comments on this one
* @xdp_bulkq: XDP device bulk queue
- * @xps_cpus_map: all CPUs map for XPS device
- * @xps_rxqs_map: all RXQs map for XPS device
+ * @xps_maps: all CPUs/RXQs maps for XPS device
*
* @xps_maps: XXX: need comments on this one
- * @miniq_egress: clsact qdisc specific data for
- * egress processing
+ * @tcx_egress: BPF & clsact qdisc specific data for egress processing
+ * @nf_hooks_egress: netfilter hooks executed for egress packets
* @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
@@ -1786,6 +1918,8 @@ enum netdev_priv_flags {
*
* @proto_down_reason: reason a netdev interface is held down
* @pcpu_refcnt: Number of references to this device
+ * @dev_refcnt: Number of references to this device
+ * @refcnt_tracker: Tracker directory for tracked references to this device
* @todo_list: Delayed register/unregister
* @link_watch_list: XXX: need comments on this one
*
@@ -1800,24 +1934,36 @@ enum netdev_priv_flags {
* @nd_net: Network namespace this network device is inside
*
* @ml_priv: Mid-layer private
- * @lstats: Loopback statistics
- * @tstats: Tunnel statistics
- * @dstats: Dummy statistics
- * @vstats: Virtual ethernet statistics
+ * @ml_priv_type: Mid-layer private type
+ *
+ * @pcpu_stat_type: Type of device statistics which the core should
+ * allocate/free: none, lstats, tstats, dstats. none
+ * means the driver is handling statistics allocation/
+ * freeing internally.
+ * @lstats: Loopback statistics: packets, bytes
+ * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
+ * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
*
* @garp_port: GARP
* @mrp_port: MRP
*
+ * @dm_private: Drop monitor private
+ *
* @dev: Class/net/name entry
* @sysfs_groups: Space for optional device, statistics and wireless
* sysfs groups
*
* @sysfs_rx_queue_group: Space for optional per-rx queue attributes
* @rtnl_link_ops: Rtnl_link_ops
+ * @stat_ops: Optional ops for queue-aware statistics
*
* @gso_max_size: Maximum size of generic segmentation offload
+ * @tso_max_size: Device (as in HW) limit on the max TSO request size
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
+ * @tso_max_segs: Device (as in HW) limit on the max TSO segment count
+ * @gso_ipv4_max_size: Maximum size of generic segmentation offload,
+ * for IPv4.
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
@@ -1832,7 +1978,6 @@ enum netdev_priv_flags {
* @sfp_bus: attached &struct sfp_bus structure.
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
@@ -1840,6 +1985,8 @@ enum netdev_priv_flags {
*
* @wol_enabled: Wake-on-LAN is enabled
*
+ * @threaded: napi threaded mode is enabled
+ *
* @net_notifier_list: List of per-net netdev notifier block
* that follow this device when it is moved
* to another network namespace.
@@ -1851,11 +1998,109 @@ enum netdev_priv_flags {
* @udp_tunnel_nic: UDP tunnel offload state
* @xdp_state: stores info on attached XDP BPF programs
*
+ * @nested_level: Used as a parameter of spin_lock_nested() of
+ * dev->addr_list_lock.
+ * @unlink_list: As netif_addr_lock() can be called recursively,
+ * keep a list of interfaces to be deleted.
+ * @gro_max_size: Maximum size of aggregated packet in generic
+ * receive offload (GRO)
+ * @gro_ipv4_max_size: Maximum size of aggregated packet in generic
+ * receive offload (GRO), for IPv4.
+ * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP
+ * zero copy driver
+ *
+ * @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
+ * @linkwatch_dev_tracker: refcount tracker used by linkwatch.
+ * @watchdog_dev_tracker: refcount tracker used by watchdog.
+ * @dev_registered_tracker: tracker for reference held while
+ * registered
+ * @offload_xstats_l3: L3 HW stats for this netdevice.
+ *
+ * @devlink_port: Pointer to related devlink port structure.
+ * Assigned by a driver before netdev registration using
+ * SET_NETDEV_DEVLINK_PORT macro. This pointer is static
+ * during the time netdevice is registered.
+ *
+ * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
+ * where the clock is recovered.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
struct net_device {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/net_device.rst.
+ * Please update the document when adding new fields.
+ */
+
+ /* TX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_tx);
+ unsigned long long priv_flags;
+ const struct net_device_ops *netdev_ops;
+ const struct header_ops *header_ops;
+ struct netdev_queue *_tx;
+ netdev_features_t gso_partial_features;
+ unsigned int real_num_tx_queues;
+ unsigned int gso_max_size;
+ unsigned int gso_ipv4_max_size;
+ u16 gso_max_segs;
+ s16 num_tc;
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
+ unsigned int mtu;
+ unsigned short needed_headroom;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+#ifdef CONFIG_XPS
+ struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ struct nf_hook_entries __rcu *nf_hooks_egress;
+#endif
+#ifdef CONFIG_NET_XGRESS
+ struct bpf_mprog_entry __rcu *tcx_egress;
+#endif
+ __cacheline_group_end(net_device_read_tx);
+
+ /* TXRX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_txrx);
+ union {
+ struct pcpu_lstats __percpu *lstats;
+ struct pcpu_sw_netstats __percpu *tstats;
+ struct pcpu_dstats __percpu *dstats;
+ };
+ unsigned long state;
+ unsigned int flags;
+ unsigned short hard_header_len;
+ netdev_features_t features;
+ struct inet6_dev __rcu *ip6_ptr;
+ __cacheline_group_end(net_device_read_txrx);
+
+ /* RX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_rx);
+ struct bpf_prog __rcu *xdp_prog;
+ struct list_head ptype_specific;
+ int ifindex;
+ unsigned int real_num_rx_queues;
+ struct netdev_rx_queue *_rx;
+ unsigned long gro_flush_timeout;
+ int napi_defer_hard_irqs;
+ unsigned int gro_max_size;
+ unsigned int gro_ipv4_max_size;
+ rx_handler_func_t __rcu *rx_handler;
+ void __rcu *rx_handler_data;
+ possible_net_t nd_net;
+#ifdef CONFIG_NETPOLL
+ struct netpoll_info __rcu *npinfo;
+#endif
+#ifdef CONFIG_NET_XGRESS
+ struct bpf_mprog_entry __rcu *tcx_ingress;
+#endif
+ __cacheline_group_end(net_device_read_rx);
+
char name[IFNAMSIZ];
struct netdev_name_node *name_node;
struct dev_ifalias __rcu *ifalias;
@@ -1866,7 +2111,6 @@ struct net_device {
unsigned long mem_end;
unsigned long mem_start;
unsigned long base_addr;
- int irq;
/*
* Some hardware also needs these fields (state,dev_list,
@@ -1874,36 +2118,43 @@ struct net_device {
* part of the usual set specified in Space.c.
*/
- unsigned long state;
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
struct list_head close_list;
struct list_head ptype_all;
- struct list_head ptype_specific;
struct {
struct list_head upper;
struct list_head lower;
} adj_list;
- netdev_features_t features;
+ /* Read-mostly cache-line for fast-path access */
+ xdp_features_t xdp_features;
+ const struct xdp_metadata_ops *xdp_metadata_ops;
+ const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
+ unsigned short gflags;
+
+ unsigned short needed_tailroom;
+
netdev_features_t hw_features;
netdev_features_t wanted_features;
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
- netdev_features_t gso_partial_features;
- int ifindex;
+ unsigned int min_mtu;
+ unsigned int max_mtu;
+ unsigned short type;
+ unsigned char min_header_len;
+ unsigned char name_assign_type;
+
int group;
- struct net_device_stats stats;
+ struct net_device_stats stats; /* not used by modern drivers */
- atomic_long_t rx_dropped;
- atomic_long_t tx_dropped;
- atomic_long_t rx_nohandler;
+ struct net_device_core_stats __percpu *core_stats;
/* Stats to monitor link on/off, flapping */
atomic_t carrier_up_count;
@@ -1913,7 +2164,6 @@ struct net_device {
const struct iw_handler_def *wireless_handlers;
struct iw_public_data *wireless_data;
#endif
- const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_L3_MASTER_DEV
const struct l3mdev_ops *l3mdev_ops;
@@ -1930,47 +2180,27 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops;
#endif
- const struct header_ops *header_ops;
-
- unsigned int flags;
- unsigned int priv_flags;
-
- unsigned short gflags;
- unsigned short padded;
-
- unsigned char operstate;
+ unsigned int operstate;
unsigned char link_mode;
unsigned char if_port;
unsigned char dma;
- /* Note : dev->mtu is often read without holding a lock.
- * Writers usually hold RTNL.
- * It is recommended to use READ_ONCE() to annotate the reads,
- * and to use WRITE_ONCE() to annotate the writes.
- */
- unsigned int mtu;
- unsigned int min_mtu;
- unsigned int max_mtu;
- unsigned short type;
- unsigned short hard_header_len;
- unsigned char min_header_len;
-
- unsigned short needed_headroom;
- unsigned short needed_tailroom;
-
/* Interface address info. */
unsigned char perm_addr[MAX_ADDR_LEN];
unsigned char addr_assign_type;
unsigned char addr_len;
unsigned char upper_level;
unsigned char lower_level;
+
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
+ unsigned short padded;
+
spinlock_t addr_list_lock;
- unsigned char name_assign_type;
- bool uc_promisc;
+ int irq;
+
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
@@ -1978,12 +2208,19 @@ struct net_device {
#ifdef CONFIG_SYSFS
struct kset *queues_kset;
#endif
+#ifdef CONFIG_LOCKDEP
+ struct list_head unlink_list;
+#endif
unsigned int promiscuity;
unsigned int allmulti;
+ bool uc_promisc;
+#ifdef CONFIG_LOCKDEP
+ unsigned char nested_level;
+#endif
/* Protocol-specific pointers */
-
+ struct in_device __rcu *ip_ptr;
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
#endif
@@ -1993,42 +2230,38 @@ struct net_device {
#if IS_ENABLED(CONFIG_TIPC)
struct tipc_bearer __rcu *tipc_ptr;
#endif
-#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
+#if IS_ENABLED(CONFIG_ATALK)
void *atalk_ptr;
#endif
- struct in_device __rcu *ip_ptr;
-#if IS_ENABLED(CONFIG_DECNET)
- struct dn_dev __rcu *dn_ptr;
-#endif
- struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_AX25)
void *ax25_ptr;
#endif
+#if IS_ENABLED(CONFIG_CFG80211)
struct wireless_dev *ieee80211_ptr;
+#endif
+#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
struct wpan_dev *ieee802154_ptr;
+#endif
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
struct mpls_dev __rcu *mpls_ptr;
#endif
+#if IS_ENABLED(CONFIG_MCTP)
+ struct mctp_dev __rcu *mctp_ptr;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
/* Interface address info used in eth_type_trans() */
- unsigned char *dev_addr;
+ const unsigned char *dev_addr;
- struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
- unsigned int real_num_rx_queues;
-
- struct bpf_prog __rcu *xdp_prog;
- unsigned long gro_flush_timeout;
- int napi_defer_hard_irqs;
- rx_handler_func_t __rcu *rx_handler;
- void __rcu *rx_handler_data;
-
-#ifdef CONFIG_NET_CLS_ACT
- struct mini_Qdisc __rcu *miniq_ingress;
-#endif
+#define GRO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GRO_MAX_SIZE (8 * 65535u)
+ unsigned int xdp_zc_max_segs;
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
struct nf_hook_entries __rcu *nf_hooks_ingress;
@@ -2043,23 +2276,13 @@ struct net_device {
/*
* Cache lines mostly used on transmit path
*/
- struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
- unsigned int real_num_tx_queues;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
-#ifdef CONFIG_XPS
- struct xps_dev_maps __rcu *xps_cpus_map;
- struct xps_dev_maps __rcu *xps_rxqs_map;
-#endif
-#ifdef CONFIG_NET_CLS_ACT
- struct mini_Qdisc __rcu *miniq_egress;
-#endif
-
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
@@ -2070,17 +2293,17 @@ struct net_device {
u32 proto_down_reason;
struct list_head todo_list;
+
+#ifdef CONFIG_PCPU_DEV_REFCNT
int __percpu *pcpu_refcnt;
+#else
+ refcount_t dev_refcnt;
+#endif
+ struct ref_tracker_dir refcnt_tracker;
struct list_head link_watch_list;
- enum { NETREG_UNINITIALIZED=0,
- NETREG_REGISTERED, /* completed register_netdevice */
- NETREG_UNREGISTERING, /* called unregister_netdevice */
- NETREG_UNREGISTERED, /* completed unregister todo */
- NETREG_RELEASED, /* called free_netdev */
- NETREG_DUMMY, /* dummy device for NAPI poll */
- } reg_state:8;
+ u8 reg_state;
bool dismantle;
@@ -2092,19 +2315,11 @@ struct net_device {
bool needs_free_netdev;
void (*priv_destructor)(struct net_device *dev);
-#ifdef CONFIG_NETPOLL
- struct netpoll_info __rcu *npinfo;
-#endif
-
- possible_net_t nd_net;
-
/* mid-layer private */
- union {
- void *ml_priv;
- struct pcpu_lstats __percpu *lstats;
- struct pcpu_sw_netstats __percpu *tstats;
- struct pcpu_dstats __percpu *dstats;
- };
+ void *ml_priv;
+ enum netdev_ml_priv_type ml_priv_type;
+
+ enum netdev_stat_type pcpu_stat_type:8;
#if IS_ENABLED(CONFIG_GARP)
struct garp_port __rcu *garp_port;
@@ -2112,24 +2327,34 @@ struct net_device {
#if IS_ENABLED(CONFIG_MRP)
struct mrp_port __rcu *mrp_port;
#endif
-
+#if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
+ struct dm_hw_stat_delta __rcu *dm_private;
+#endif
struct device dev;
const struct attribute_group *sysfs_groups[4];
const struct attribute_group *sysfs_rx_queue_group;
const struct rtnl_link_ops *rtnl_link_ops;
+ const struct netdev_stat_ops *stat_ops;
+
/* for setting kernel sock attribute on TCP connection setup */
-#define GSO_MAX_SIZE 65536
- unsigned int gso_max_size;
-#define GSO_MAX_SEGS 65535
- u16 gso_max_segs;
+#define GSO_MAX_SEGS 65535u
+#define GSO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
+
+#define TSO_LEGACY_MAX_SIZE 65536
+#define TSO_MAX_SIZE UINT_MAX
+ unsigned int tso_max_size;
+#define TSO_MAX_SEGS U16_MAX
+ u16 tso_max_segs;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- s16 num_tc;
- struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE)
@@ -2141,9 +2366,9 @@ struct net_device {
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
- struct lock_class_key *qdisc_running_key;
bool proto_down;
unsigned wol_enabled:1;
+ unsigned threaded:1;
struct list_head net_notifier_list;
@@ -2156,9 +2381,36 @@ struct net_device {
/* protected by rtnl_lock */
struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
+
+ u8 dev_addr_shadow[MAX_ADDR_LEN];
+ netdevice_tracker linkwatch_dev_tracker;
+ netdevice_tracker watchdog_dev_tracker;
+ netdevice_tracker dev_registered_tracker;
+ struct rtnl_hw_stats64 *offload_xstats_l3;
+
+ struct devlink_port *devlink_port;
+
+#if IS_ENABLED(CONFIG_DPLL)
+ struct dpll_pin __rcu *dpll_pin;
+#endif
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ /** @page_pools: page pools created for this netdevice */
+ struct hlist_head page_pools;
+#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
+/*
+ * Driver should use this to assign devlink port instance to a netdevice
+ * before it registers the netdevice. Therefore devlink_port is static
+ * during the netdev lifetime after it is registered.
+ */
+#define SET_NETDEV_DEVLINK_PORT(dev, port) \
+({ \
+ WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \
+ ((dev)->devlink_port = (port)); \
+})
+
static inline bool netif_elide_gro(const struct net_device *dev)
{
if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
@@ -2195,6 +2447,22 @@ int netdev_get_num_tc(struct net_device *dev)
return dev->num_tc;
}
+static inline void net_prefetch(void *p)
+{
+ prefetch(p);
+#if L1_CACHE_BYTES < 128
+ prefetch((u8 *)p + L1_CACHE_BYTES);
+#endif
+}
+
+static inline void net_prefetchw(void *p)
+{
+ prefetchw(p);
+#if L1_CACHE_BYTES < 128
+ prefetchw((u8 *)p + L1_CACHE_BYTES);
+#endif
+}
+
void netdev_unbind_sb_channel(struct net_device *dev,
struct net_device *sb_dev);
int netdev_bind_sb_channel_queue(struct net_device *dev,
@@ -2210,6 +2478,7 @@ static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
+ DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
return &dev->_tx[index];
}
@@ -2234,13 +2503,11 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
#define netdev_lockdep_set_classes(dev) \
{ \
static struct lock_class_key qdisc_tx_busylock_key; \
- static struct lock_class_key qdisc_running_key; \
static struct lock_class_key qdisc_xmit_lock_key; \
static struct lock_class_key dev_addr_list_lock_key; \
unsigned int i; \
\
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
- (dev)->qdisc_running_key = &qdisc_running_key; \
lockdep_set_class(&(dev)->addr_list_lock, \
&dev_addr_list_lock_key); \
for (i = 0; i < (dev)->num_tx_queues; i++) \
@@ -2274,6 +2541,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev)
netdev_set_rx_headroom(dev, -1);
}
+static inline void *netdev_get_ml_priv(struct net_device *dev,
+ enum netdev_ml_priv_type type)
+{
+ if (dev->ml_priv_type != type)
+ return NULL;
+
+ return dev->ml_priv;
+}
+
+static inline void netdev_set_ml_priv(struct net_device *dev,
+ void *ml_priv,
+ enum netdev_ml_priv_type type)
+{
+ WARN(dev->ml_priv_type && dev->ml_priv_type != type,
+ "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
+ dev->ml_priv_type, type);
+ WARN(!dev->ml_priv_type && dev->ml_priv,
+ "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
+
+ dev->ml_priv = ml_priv;
+ dev->ml_priv_type = type;
+}
+
/*
* Net namespace inlines
*/
@@ -2311,159 +2601,93 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
+void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type,
+ struct napi_struct *napi);
+
+static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
+{
+ napi->irq = irq;
+}
+
/* Default NAPI poll() weight
* Device drivers are strongly advised to not use bigger value
*/
#define NAPI_POLL_WEIGHT 64
+void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight);
+
/**
- * netif_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add() - initialize a NAPI context
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
*
* netif_napi_add() must be used to initialize a NAPI context prior to calling
* *any* of the other NAPI-related functions.
*/
-void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight);
+static inline void
+netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int))
+{
+ netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
+static inline void
+netif_napi_add_tx_weight(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
+ netif_napi_add_weight(dev, napi, poll, weight);
+}
/**
- * netif_tx_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
*
* This variant of netif_napi_add() should be used from drivers using NAPI
* to exclusively poll a TX queue.
* This will avoid we add it into napi_hash[], thus polluting this hash table.
*/
-static inline void netif_tx_napi_add(struct net_device *dev,
+static inline void netif_napi_add_tx(struct net_device *dev,
struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int),
- int weight)
+ int (*poll)(struct napi_struct *, int))
{
- set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
- netif_napi_add(dev, napi, poll, weight);
+ netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
/**
+ * __netif_napi_del - remove a NAPI context
+ * @napi: NAPI context
+ *
+ * Warning: caller must observe RCU grace period before freeing memory
+ * containing @napi. Drivers might want to call this helper to combine
+ * all the needed RCU grace periods into a single one.
+ */
+void __netif_napi_del(struct napi_struct *napi);
+
+/**
* netif_napi_del - remove a NAPI context
* @napi: NAPI context
*
* netif_napi_del() removes a NAPI context from the network device NAPI list
*/
-void netif_napi_del(struct napi_struct *napi);
-
-struct napi_gro_cb {
- /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
-
- /* Length of frag0. */
- unsigned int frag0_len;
-
- /* This indicates where we are processing relative to skb->data. */
- int data_offset;
-
- /* This is non-zero if the packet cannot be merged with the new skb. */
- u16 flush;
-
- /* Save the IP ID here and check when we get to the transport layer */
- u16 flush_id;
-
- /* Number of segments aggregated. */
- u16 count;
-
- /* Start offset for remote checksum offload */
- u16 gro_remcsum_start;
-
- /* jiffies when first packet was created/queued */
- unsigned long age;
-
- /* Used in ipv6_gro_receive() and foo-over-udp */
- u16 proto;
-
- /* This is non-zero if the packet may be of the same flow. */
- u8 same_flow:1;
-
- /* Used in tunnel GRO receive */
- u8 encap_mark:1;
-
- /* GRO checksum is valid */
- u8 csum_valid:1;
-
- /* Number of checksums via CHECKSUM_UNNECESSARY */
- u8 csum_cnt:3;
-
- /* Free the skb? */
- u8 free:2;
-#define NAPI_GRO_FREE 1
-#define NAPI_GRO_FREE_STOLEN_HEAD 2
-
- /* Used in foo-over-udp, set in udp[46]_gro_receive */
- u8 is_ipv6:1;
-
- /* Used in GRE, set in fou/gue_gro_receive */
- u8 is_fou:1;
-
- /* Used to determine if flush_id can be ignored */
- u8 is_atomic:1;
-
- /* Number of gro_receive callbacks this packet already went through */
- u8 recursion_counter:4;
-
- /* GRO is done by frag_list pointer chaining. */
- u8 is_flist:1;
-
- /* used to support CHECKSUM_COMPLETE for tunneling protocols */
- __wsum csum;
-
- /* used in skb_gro_receive() slow path */
- struct sk_buff *last;
-};
-
-#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
-
-#define GRO_RECURSION_LIMIT 15
-static inline int gro_recursion_inc_test(struct sk_buff *skb)
-{
- return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
-}
-
-typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
-static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
- struct list_head *head,
- struct sk_buff *skb)
-{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(head, skb);
-}
-
-typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
- struct sk_buff *);
-static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
- struct sock *sk,
- struct list_head *head,
- struct sk_buff *skb)
+static inline void netif_napi_del(struct napi_struct *napi)
{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(sk, head, skb);
+ __netif_napi_del(napi);
+ synchronize_net();
}
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
bool ignore_outgoing;
struct net_device *dev; /* NULL is wildcarded here */
+ netdevice_tracker dev_tracker;
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
@@ -2473,6 +2697,7 @@ struct packet_type {
struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
+ struct net *af_packet_net;
void *af_packet_priv;
struct list_head list;
};
@@ -2494,13 +2719,23 @@ struct packet_offload {
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
} __aligned(4 * sizeof(u64));
+struct pcpu_dstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+ struct u64_stats_sync syncp;
+} __aligned(8 * sizeof(u64));
+
struct pcpu_lstats {
u64_stats_t packets;
u64_stats_t bytes;
@@ -2509,6 +2744,28 @@ struct pcpu_lstats {
void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
+static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->rx_bytes, len);
+ u64_stats_inc(&tstats->rx_packets);
+ u64_stats_update_end(&tstats->syncp);
+}
+
+static inline void dev_sw_netstats_tx_add(struct net_device *dev,
+ unsigned int packets,
+ unsigned int len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->tx_bytes, len);
+ u64_stats_add(&tstats->tx_packets, packets);
+ u64_stats_update_end(&tstats->syncp);
+}
+
static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
{
struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
@@ -2536,6 +2793,20 @@ static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
#define netdev_alloc_pcpu_stats(type) \
__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
+#define devm_netdev_alloc_pcpu_stats(dev, type) \
+({ \
+ typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
+ if (pcpu_stats) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
+ typeof(type) *stat; \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
+ u64_stats_init(&stat->syncp); \
+ } \
+ } \
+ pcpu_stats; \
+})
+
enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_UNKNOWN,
NETDEV_LAG_TX_TYPE_RANDOM,
@@ -2552,6 +2823,7 @@ enum netdev_lag_hash {
NETDEV_LAG_HASH_L23,
NETDEV_LAG_HASH_E23,
NETDEV_LAG_HASH_E34,
+ NETDEV_LAG_HASH_VLAN_SRCMAC,
NETDEV_LAG_HASH_UNKNOWN,
};
@@ -2592,6 +2864,7 @@ enum netdev_cmd {
NETDEV_PRE_TYPE_CHANGE,
NETDEV_POST_TYPE_CHANGE,
NETDEV_POST_INIT,
+ NETDEV_PRE_UNINIT,
NETDEV_RELEASE,
NETDEV_NOTIFY_PEERS,
NETDEV_JOIN,
@@ -2609,6 +2882,11 @@ enum netdev_cmd {
NETDEV_CVLAN_FILTER_DROP_INFO,
NETDEV_SVLAN_FILTER_PUSH_INFO,
NETDEV_SVLAN_FILTER_DROP_INFO,
+ NETDEV_OFFLOAD_XSTATS_ENABLE,
+ NETDEV_OFFLOAD_XSTATS_DISABLE,
+ NETDEV_OFFLOAD_XSTATS_REPORT_USED,
+ NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
+ NETDEV_XDP_FEAT_CHANGE,
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
@@ -2659,6 +2937,42 @@ struct netdev_notifier_pre_changeaddr_info {
const unsigned char *dev_addr;
};
+enum netdev_offload_xstats_type {
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
+};
+
+struct netdev_notifier_offload_xstats_info {
+ struct netdev_notifier_info info; /* must be first */
+ enum netdev_offload_xstats_type type;
+
+ union {
+ /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
+ struct netdev_notifier_offload_xstats_rd *report_delta;
+ /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
+ struct netdev_notifier_offload_xstats_ru *report_used;
+ };
+};
+
+int netdev_offload_xstats_enable(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ struct netlink_ext_ack *extack);
+int netdev_offload_xstats_disable(struct net_device *dev,
+ enum netdev_offload_xstats_type type);
+bool netdev_offload_xstats_enabled(const struct net_device *dev,
+ enum netdev_offload_xstats_type type);
+int netdev_offload_xstats_get(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ struct rtnl_hw_stats64 *stats, bool *used,
+ struct netlink_ext_ack *extack);
+void
+netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
+ const struct rtnl_hw_stats64 *stats);
+void
+netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
+void netdev_offload_xstats_push_delta(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ const struct rtnl_hw_stats64 *stats);
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2679,9 +2993,8 @@ netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
}
int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
-
-
-extern rwlock_t dev_base_lock; /* Device list lock */
+int call_netdevice_notifiers_info(unsigned long val,
+ struct netdev_notifier_info *info);
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
@@ -2703,6 +3016,9 @@ extern rwlock_t dev_base_lock; /* Device list lock */
if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
+#define for_each_netdev_dump(net, d, ifindex) \
+ xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex))
+
static inline struct net_device *next_net_device(struct net_device *dev)
{
struct list_head *lh;
@@ -2737,11 +3053,9 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
}
int netdev_boot_setup_check(struct net_device *dev);
-unsigned long netdev_boot_base(const char *prefix, int unit);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
void dev_add_pack(struct packet_type *pt);
void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
@@ -2750,11 +3064,14 @@ void dev_remove_offload(struct packet_offload *po);
int dev_get_iflink(const struct net_device *dev);
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
+int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
+ struct net_device_path_stack *stack);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
+bool netdev_name_in_use(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void dev_close(struct net_device *dev);
@@ -2765,9 +3082,31 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
-int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+
+int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
+int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+ return __dev_queue_xmit(skb, NULL);
+}
+
+static inline int dev_queue_xmit_accel(struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ return __dev_queue_xmit(skb, sb_dev);
+}
+
+static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+{
+ int ret;
+
+ ret = __dev_direct_xmit(skb, queue_id);
+ if (!dev_xmit_complete(ret))
+ kfree_skb(skb);
+ return ret;
+}
+
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
@@ -2779,266 +3118,21 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
void netdev_freemem(struct net_device *dev);
-void synchronize_net(void);
-int init_dummy_netdev(struct net_device *dev);
+void init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
+struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
+ struct sock *sk);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *netdev_get_by_index(struct net *net, int ifindex,
+ netdevice_tracker *tracker, gfp_t gfp);
+struct net_device *netdev_get_by_name(struct net *net, const char *name,
+ netdevice_tracker *tracker, gfp_t gfp);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
-int netdev_get_name(struct net *net, char *name, int ifindex);
-int dev_restart(struct net_device *dev);
-int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
-int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
-
-static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
-{
- return NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline unsigned int skb_gro_len(const struct sk_buff *skb)
-{
- return skb->len - NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
-{
- NAPI_GRO_CB(skb)->data_offset += len;
-}
-
-static inline void *skb_gro_header_fast(struct sk_buff *skb,
- unsigned int offset)
-{
- return NAPI_GRO_CB(skb)->frag0 + offset;
-}
-
-static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
-{
- return NAPI_GRO_CB(skb)->frag0_len < hlen;
-}
-
-static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
-{
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
-}
-
-static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
- unsigned int offset)
-{
- if (!pskb_may_pull(skb, hlen))
- return NULL;
-
- skb_gro_frag0_invalidate(skb);
- return skb->data + offset;
-}
-
-static inline void *skb_gro_network_header(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
- skb_network_offset(skb);
-}
-
-static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
- const void *start, unsigned int len)
-{
- if (NAPI_GRO_CB(skb)->csum_valid)
- NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
- csum_partial(start, len, 0));
-}
-
-/* GRO checksum functions. These are logical equivalents of the normal
- * checksum functions (in skbuff.h) except that they operate on the GRO
- * offsets and fields in sk_buff.
- */
-
-__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
-
-static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
-}
-
-static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
- bool zero_okay,
- __sum16 check)
-{
- return ((skb->ip_summed != CHECKSUM_PARTIAL ||
- skb_checksum_start_offset(skb) <
- skb_gro_offset(skb)) &&
- !skb_at_gro_remcsum_start(skb) &&
- NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- (!zero_okay || check));
-}
-
-static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
- __wsum psum)
-{
- if (NAPI_GRO_CB(skb)->csum_valid &&
- !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
- return 0;
-
- NAPI_GRO_CB(skb)->csum = psum;
-
- return __skb_gro_checksum_complete(skb);
-}
-
-static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
-{
- if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
- /* Consume a checksum from CHECKSUM_UNNECESSARY */
- NAPI_GRO_CB(skb)->csum_cnt--;
- } else {
- /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
- * verified a new top level checksum or an encapsulated one
- * during GRO. This saves work if we fallback to normal path.
- */
- __skb_incr_checksum_unnecessary(skb);
- }
-}
-
-#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
- compute_pseudo) \
-({ \
- __sum16 __ret = 0; \
- if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
- __ret = __skb_gro_checksum_validate_complete(skb, \
- compute_pseudo(skb, proto)); \
- if (!__ret) \
- skb_gro_incr_csum_unnecessary(skb); \
- __ret; \
-})
-
-#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
-
-#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
- compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
-
-#define skb_gro_checksum_simple_validate(skb) \
- __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
-
-static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- !NAPI_GRO_CB(skb)->csum_valid);
-}
-
-static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
- __wsum pseudo)
-{
- NAPI_GRO_CB(skb)->csum = ~pseudo;
- NAPI_GRO_CB(skb)->csum_valid = 1;
-}
-
-#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
-do { \
- if (__skb_gro_checksum_convert_check(skb)) \
- __skb_gro_checksum_convert(skb, \
- compute_pseudo(skb, proto)); \
-} while (0)
-
-struct gro_remcsum {
- int offset;
- __wsum delta;
-};
-
-static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
-{
- grc->offset = 0;
- grc->delta = 0;
-}
-
-static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- unsigned int off, size_t hdrlen,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
-{
- __wsum delta;
- size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
-
- BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
-
- if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
- return ptr;
- }
-
- ptr = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, off + plen)) {
- ptr = skb_gro_header_slow(skb, off + plen, off);
- if (!ptr)
- return NULL;
- }
-
- delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
- start, offset);
-
- /* Adjust skb->csum since we changed the packet */
- NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
-
- grc->offset = off + hdrlen + offset;
- grc->delta = delta;
-
- return ptr;
-}
-
-static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
- struct gro_remcsum *grc)
-{
- void *ptr;
- size_t plen = grc->offset + sizeof(u16);
-
- if (!grc->delta)
- return;
-
- ptr = skb_gro_header_fast(skb, grc->offset);
- if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
- ptr = skb_gro_header_slow(skb, plen, grc->offset);
- if (!ptr)
- return;
- }
-
- remcsum_unadjust((__sum16 *)ptr, grc->delta);
-}
-
-#ifdef CONFIG_XFRM_OFFLOAD
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
-{
- if (PTR_ERR(pp) != -EINPROGRESS)
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff *pp,
- int flush,
- struct gro_remcsum *grc)
-{
- if (PTR_ERR(pp) != -EINPROGRESS) {
- NAPI_GRO_CB(skb)->flush |= flush;
- skb_gro_remcsum_cleanup(skb, grc);
- skb->remcsum_offload = 0;
- }
-}
-#else
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
-{
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff *pp,
- int flush,
- struct gro_remcsum *grc)
-{
- NAPI_GRO_CB(skb)->flush |= flush;
- skb_gro_remcsum_cleanup(skb, grc);
- skb->remcsum_offload = 0;
-}
-#endif
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -3090,27 +3184,11 @@ static inline bool dev_validate_header(const struct net_device *dev,
return false;
}
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
- int len, int size);
-int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
-static inline int unregister_gifconf(unsigned int family)
+static inline bool dev_has_header(const struct net_device *dev)
{
- return register_gifconf(family, NULL);
+ return dev->header_ops && dev->header_ops->create;
}
-#ifdef CONFIG_NET_FLOW_LIMIT
-#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
-struct sd_flow_limit {
- u64 count;
- unsigned int num_buckets;
- unsigned int history_head;
- u16 history[FLOW_LIMIT_HISTORY];
- u8 buckets[];
-};
-
-extern int netdev_flow_limit_table_len;
-#endif /* CONFIG_NET_FLOW_LIMIT */
-
/*
* Incoming packets are placed on per-CPU queues
*/
@@ -3121,10 +3199,13 @@ struct softnet_data {
/* stats */
unsigned int processed;
unsigned int time_squeeze;
- unsigned int received_rps;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
#endif
+
+ bool in_net_rx_action;
+ bool in_napi_threaded_poll;
+
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit __rcu *flow_limit;
#endif
@@ -3138,6 +3219,9 @@ struct softnet_data {
struct {
u16 recursion;
u8 more;
+#ifdef CONFIG_NET_EGRESS
+ u8 skip_txqueue;
+#endif
} xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
@@ -3151,10 +3235,17 @@ struct softnet_data {
unsigned int cpu;
unsigned int input_queue_tail;
#endif
+ unsigned int received_rps;
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
+ /* Another possibly contended cache line */
+ spinlock_t defer_lock ____cacheline_aligned_in_smp;
+ int defer_count;
+ int defer_ipi_scheduled;
+ struct sk_buff *defer_list;
+ call_single_data_t defer_csd;
};
static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -3259,6 +3350,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
+ /* Must be an atomic op see netif_txq_try_stop() */
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -3310,6 +3402,34 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
}
/**
+ * netdev_queue_set_dql_min_limit - set dql minimum limit
+ * @dev_queue: pointer to transmit queue
+ * @min_limit: dql minimum limit
+ *
+ * Forces xmit_more() to return true until the minimum threshold
+ * defined by @min_limit is reached (or until the tx queue is
+ * empty). Warning: to be use with care, misuse will impact the
+ * latency.
+ */
+static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
+ unsigned int min_limit)
+{
+#ifdef CONFIG_BQL
+ dev_queue->dql.min_limit = min_limit;
+#endif
+}
+
+static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+ /* Non-BQL migrated drivers will return 0, too. */
+ return dql_avail(&txq->dql);
+#else
+ return 0;
+#endif
+}
+
+/**
* netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
*
@@ -3337,6 +3457,16 @@ static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_qu
#endif
}
+/**
+ * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
+ * @dev_queue: network device queue
+ * @bytes: number of bytes queued to the device queue
+ *
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
+ */
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes)
{
@@ -3382,13 +3512,14 @@ static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
}
/**
- * netdev_sent_queue - report the number of bytes queued to hardware
- * @dev: network device
- * @bytes: number of bytes queued to the hardware device queue
+ * netdev_sent_queue - report the number of bytes queued to hardware
+ * @dev: network device
+ * @bytes: number of bytes queued to the hardware device queue
*
- * Report the number of bytes queued for sending/completion to the network
- * device hardware queue. @bytes should be a good approximation and should
- * exactly match netdev_completed_queue() @bytes
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue#0. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
*/
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
@@ -3403,6 +3534,15 @@ static inline bool __netdev_sent_queue(struct net_device *dev,
xmit_more);
}
+/**
+ * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
+ * @dev_queue: network device queue
+ * @pkts: number of packets (currently ignored)
+ * @bytes: number of bytes dequeued from the device queue
+ *
+ * Must be called at most once per TX completion round (and not per
+ * individual packet), so that BQL can adjust its limits appropriately.
+ */
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{
@@ -3417,7 +3557,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
* netdev_tx_sent_queue will miss the update and cause the queue to
* be stopped forever
*/
- smp_mb();
+ smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
if (unlikely(dql_avail(&dev_queue->dql) < 0))
return;
@@ -3529,7 +3669,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
}
/**
- * netif_subqueue_stopped - test status of subqueue
+ * __netif_subqueue_stopped - test status of subqueue
* @dev: network device
* @queue_index: sub queue index
*
@@ -3543,6 +3683,13 @@ static inline bool __netif_subqueue_stopped(const struct net_device *dev,
return netif_tx_queue_stopped(txq);
}
+/**
+ * netif_subqueue_stopped - test status of subqueue
+ * @dev: network device
+ * @skb: sub queue buffer pointer
+ *
+ * Check individual transmit queue of a device with multiple transmit queues.
+ */
static inline bool netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
@@ -3567,7 +3714,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index);
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
- u16 index, bool is_rxqs_map);
+ u16 index, enum xps_map_type type);
/**
* netif_attr_test_mask - Test a CPU or Rx queue set in a mask
@@ -3662,7 +3809,7 @@ static inline int netif_set_xps_queue(struct net_device *dev,
static inline int __netif_set_xps_queue(struct net_device *dev,
const unsigned long *mask,
- u16 index, bool is_rxqs_map)
+ u16 index, enum xps_map_type type)
{
return 0;
}
@@ -3691,40 +3838,18 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
return 0;
}
#endif
+int netif_set_real_num_queues(struct net_device *dev,
+ unsigned int txq, unsigned int rxq);
-static inline struct netdev_rx_queue *
-__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
-{
- return dev->_rx + rxq;
-}
-
-#ifdef CONFIG_SYSFS
-static inline unsigned int get_netdev_rx_queue_index(
- struct netdev_rx_queue *queue)
-{
- struct net_device *dev = queue->dev;
- int index = queue - dev->_rx;
-
- BUG_ON(index >= dev->num_rx_queues);
- return index;
-}
-#endif
-
-#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
int netif_get_num_default_rss_queues(void);
-enum skb_free_reason {
- SKB_REASON_CONSUMED,
- SKB_REASON_DROPPED,
-};
-
-void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
-void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
+void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
+void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
/*
* It is not allowed to call kfree_skb() or consume_skb() from hardware
* interrupt context or with hardware interrupts being disabled.
- * (in_irq() || irqs_disabled())
+ * (in_hardirq() || irqs_disabled())
*
* We provide four helpers that can be used in following contexts :
*
@@ -3742,37 +3867,40 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
*/
static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{
- __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
+ dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}
static inline void dev_consume_skb_irq(struct sk_buff *skb)
{
- __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
+ dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
}
static inline void dev_kfree_skb_any(struct sk_buff *skb)
{
- __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
+ dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}
static inline void dev_consume_skb_any(struct sk_buff *skb)
{
- __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
+ dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
}
+u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
-int netif_rx_ni(struct sk_buff *skb);
+int __netif_rx(struct sk_buff *skb);
+
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
+void netif_receive_skb_list_internal(struct list_head *head);
void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
+void napi_get_frags_check(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
-struct packet_offload *gro_find_receive_by_type(__be16 type);
-struct packet_offload *gro_find_complete_by_type(__be16 type);
static inline void napi_free_frags(struct napi_struct *napi)
{
@@ -3787,73 +3915,118 @@ int netdev_rx_handler_register(struct net_device *dev,
void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name);
+static inline bool is_socket_ioctl_cmd(unsigned int cmd)
+{
+ return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
+}
+int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
+int put_user_ifreq(struct ifreq *ifr, void __user *arg);
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
- bool *need_copyout);
-int dev_ifconf(struct net *net, struct ifconf *, int);
-int dev_ethtool(struct net *net, struct ifreq *);
+ void __user *data, bool *need_copyout);
+int dev_ifconf(struct net *net, struct ifconf __user *ifc);
+int generic_hwtstamp_get_lower(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_cfg);
+int generic_hwtstamp_set_lower(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_cfg,
+ struct netlink_ext_ack *extack);
+int dev_set_hwtstamp_phylib(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
int dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
-void __dev_notify_flags(struct net_device *, unsigned int old_flags,
- unsigned int gchanges);
-int dev_change_name(struct net_device *, const char *);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
-int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ const char *pat, int new_ifindex);
+static inline
+int dev_change_net_namespace(struct net_device *dev, struct net *net,
+ const char *pat)
+{
+ return __dev_change_net_namespace(dev, net, pat, 0);
+}
int __dev_set_mtu(struct net_device *, int);
-int dev_validate_mtu(struct net_device *dev, int mtu,
- struct netlink_ext_ack *extack);
-int dev_set_mtu_ext(struct net_device *dev, int mtu,
- struct netlink_ext_ack *extack);
int dev_set_mtu(struct net_device *, int);
-int dev_change_tx_queue_len(struct net_device *, unsigned long);
-void dev_set_group(struct net_device *, int);
int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack);
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack);
-int dev_change_carrier(struct net_device *, bool new_carrier);
-int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid);
-int dev_get_phys_port_name(struct net_device *dev,
- char *name, size_t len);
+int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack);
+int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
-int dev_change_proto_down(struct net_device *dev, bool proto_down);
-int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
-void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
- u32 value);
+
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
-typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
-int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
- int fd, int expected_fd, u32 flags);
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+u8 dev_xdp_prog_count(struct net_device *dev);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
-int xdp_umem_query(struct net_device *dev, u16 queue_id);
-
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
+int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb);
+static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
+ const struct sk_buff *skb,
+ const bool check_mtu)
+{
+ const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
+ unsigned int len;
+
+ if (!(dev->flags & IFF_UP))
+ return false;
+
+ if (!check_mtu)
+ return true;
+
+ len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
+ if (skb->len <= len)
+ return true;
+
+ /* if TSO is enabled, we don't care about the length as the packet
+ * could be forwarded without being segmented before
+ */
+ if (skb_is_gso(skb))
+ return true;
+
+ return false;
+}
+
+void netdev_core_stats_inc(struct net_device *dev, u32 offset);
+
+#define DEV_CORE_STATS_INC(FIELD) \
+static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
+{ \
+ netdev_core_stats_inc(dev, \
+ offsetof(struct net_device_core_stats, FIELD)); \
+}
+DEV_CORE_STATS_INC(rx_dropped)
+DEV_CORE_STATS_INC(tx_dropped)
+DEV_CORE_STATS_INC(rx_nohandler)
+DEV_CORE_STATS_INC(rx_otherhost_dropped)
+#undef DEV_CORE_STATS_INC
+
static __always_inline int ____dev_forward_skb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ const bool check_mtu)
{
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
- unlikely(!is_skb_forwardable(dev, skb))) {
- atomic_long_inc(&dev->rx_dropped);
+ unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
+ dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
return NET_RX_DROP;
}
- skb_scrub_packet(skb, true);
+ skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
skb->priority = 0;
return 0;
}
@@ -3861,21 +4034,73 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
bool dev_nit_active(struct net_device *dev);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
-extern int netdev_budget;
-extern unsigned int netdev_budget_usecs;
+static inline void __dev_put(struct net_device *dev)
+{
+ if (dev) {
+#ifdef CONFIG_PCPU_DEV_REFCNT
+ this_cpu_dec(*dev->pcpu_refcnt);
+#else
+ refcount_dec(&dev->dev_refcnt);
+#endif
+ }
+}
-/* Called by rtnetlink.c:rtnl_unlock() */
-void netdev_run_todo(void);
+static inline void __dev_hold(struct net_device *dev)
+{
+ if (dev) {
+#ifdef CONFIG_PCPU_DEV_REFCNT
+ this_cpu_inc(*dev->pcpu_refcnt);
+#else
+ refcount_inc(&dev->dev_refcnt);
+#endif
+ }
+}
-/**
- * dev_put - release reference to device
- * @dev: network device
- *
- * Release reference to device to allow it to be freed.
+static inline void __netdev_tracker_alloc(struct net_device *dev,
+ netdevice_tracker *tracker,
+ gfp_t gfp)
+{
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
+#endif
+}
+
+/* netdev_tracker_alloc() can upgrade a prior untracked reference
+ * taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
*/
-static inline void dev_put(struct net_device *dev)
+static inline void netdev_tracker_alloc(struct net_device *dev,
+ netdevice_tracker *tracker, gfp_t gfp)
+{
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ refcount_dec(&dev->refcnt_tracker.no_tracker);
+ __netdev_tracker_alloc(dev, tracker, gfp);
+#endif
+}
+
+static inline void netdev_tracker_free(struct net_device *dev,
+ netdevice_tracker *tracker)
{
- this_cpu_dec(*dev->pcpu_refcnt);
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ ref_tracker_free(&dev->refcnt_tracker, tracker);
+#endif
+}
+
+static inline void netdev_hold(struct net_device *dev,
+ netdevice_tracker *tracker, gfp_t gfp)
+{
+ if (dev) {
+ __dev_hold(dev);
+ __netdev_tracker_alloc(dev, tracker, gfp);
+ }
+}
+
+static inline void netdev_put(struct net_device *dev,
+ netdevice_tracker *tracker)
+{
+ if (dev) {
+ netdev_tracker_free(dev, tracker);
+ __dev_put(dev);
+ }
}
/**
@@ -3883,10 +4108,38 @@ static inline void dev_put(struct net_device *dev)
* @dev: network device
*
* Hold reference to device to keep it from being freed.
+ * Try using netdev_hold() instead.
*/
static inline void dev_hold(struct net_device *dev)
{
- this_cpu_inc(*dev->pcpu_refcnt);
+ netdev_hold(dev, NULL, GFP_ATOMIC);
+}
+
+/**
+ * dev_put - release reference to device
+ * @dev: network device
+ *
+ * Release reference to device to allow it to be freed.
+ * Try using netdev_put() instead.
+ */
+static inline void dev_put(struct net_device *dev)
+{
+ netdev_put(dev, NULL);
+}
+
+static inline void netdev_ref_replace(struct net_device *odev,
+ struct net_device *ndev,
+ netdevice_tracker *tracker,
+ gfp_t gfp)
+{
+ if (odev)
+ netdev_tracker_free(odev, tracker);
+
+ __dev_hold(ndev);
+ __dev_put(odev);
+
+ if (ndev)
+ __netdev_tracker_alloc(ndev, tracker, gfp);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
@@ -3897,10 +4150,16 @@ static inline void dev_hold(struct net_device *dev)
* called netif_lowerlayer_*() because they represent the state of any
* kind of lower layer not just hardware media.
*/
-
-void linkwatch_init_dev(struct net_device *dev);
void linkwatch_fire_event(struct net_device *dev);
-void linkwatch_forget_dev(struct net_device *dev);
+
+/**
+ * linkwatch_sync_dev - sync linkwatch for the given device
+ * @dev: network device to sync linkwatch for
+ *
+ * Sync linkwatch for the given device, removing it from the
+ * pending work list (if queued).
+ */
+void linkwatch_sync_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -3918,8 +4177,8 @@ unsigned long dev_trans_start(struct net_device *dev);
void __netdev_watchdog_up(struct net_device *dev);
void netif_carrier_on(struct net_device *dev);
-
void netif_carrier_off(struct net_device *dev);
+void netif_carrier_event(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
@@ -4011,8 +4270,10 @@ static inline bool netif_testing(const struct net_device *dev)
*/
static inline bool netif_oper_up(const struct net_device *dev)
{
- return (dev->operstate == IF_OPER_UP ||
- dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
+ unsigned int operstate = READ_ONCE(dev->operstate);
+
+ return operstate == IF_OPER_UP ||
+ operstate == IF_OPER_UNKNOWN /* backward compat */;
}
/**
@@ -4021,7 +4282,7 @@ static inline bool netif_oper_up(const struct net_device *dev)
*
* Check if device has not been removed from system.
*/
-static inline bool netif_device_present(struct net_device *dev)
+static inline bool netif_device_present(const struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
@@ -4108,7 +4369,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- txq->xmit_lock_owner = cpu;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -4125,33 +4387,50 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- txq->xmit_lock_owner = smp_processor_id();
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
bool ok = spin_trylock(&txq->_xmit_lock);
- if (likely(ok))
- txq->xmit_lock_owner = smp_processor_id();
+
+ if (likely(ok)) {
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
+/*
+ * txq->trans_start can be read locklessly from dev_watchdog()
+ */
static inline void txq_trans_update(struct netdev_queue *txq)
{
if (txq->xmit_lock_owner != -1)
- txq->trans_start = jiffies;
+ WRITE_ONCE(txq->trans_start, jiffies);
+}
+
+static inline void txq_trans_cond_update(struct netdev_queue *txq)
+{
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(txq->trans_start) != now)
+ WRITE_ONCE(txq->trans_start, now);
}
/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
@@ -4159,8 +4438,7 @@ static inline void netif_trans_update(struct net_device *dev)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
- if (txq->trans_start != jiffies)
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
}
/**
@@ -4169,27 +4447,7 @@ static inline void netif_trans_update(struct net_device *dev)
*
* Get network device transmit lock
*/
-static inline void netif_tx_lock(struct net_device *dev)
-{
- unsigned int i;
- int cpu;
-
- spin_lock(&dev->tx_global_lock);
- cpu = smp_processor_id();
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* We are the only thread of execution doing a
- * freeze, but we have to grab the _xmit_lock in
- * order to synchronize with threads which are in
- * the ->hard_start_xmit() handler and already
- * checked the frozen bit.
- */
- __netif_tx_lock(txq, cpu);
- set_bit(__QUEUE_STATE_FROZEN, &txq->state);
- __netif_tx_unlock(txq);
- }
-}
+void netif_tx_lock(struct net_device *dev);
static inline void netif_tx_lock_bh(struct net_device *dev)
{
@@ -4197,22 +4455,7 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
netif_tx_lock(dev);
}
-static inline void netif_tx_unlock(struct net_device *dev)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* No need to grab the _xmit_lock here. If the
- * queue is not stopped for another reason, we
- * force a schedule.
- */
- clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
- netif_schedule_queue(txq);
- }
- spin_unlock(&dev->tx_global_lock);
-}
+void netif_tx_unlock(struct net_device *dev);
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
@@ -4248,6 +4491,7 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_disable();
cpu = smp_processor_id();
+ spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -4255,22 +4499,29 @@ static inline void netif_tx_disable(struct net_device *dev)
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
+ spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}
static inline void netif_addr_lock(struct net_device *dev)
{
- spin_lock(&dev->addr_list_lock);
-}
+ unsigned char nest_level = 0;
-static inline void netif_addr_lock_nested(struct net_device *dev)
-{
- spin_lock_nested(&dev->addr_list_lock, dev->lower_level);
+#ifdef CONFIG_LOCKDEP
+ nest_level = dev->nested_level;
+#endif
+ spin_lock_nested(&dev->addr_list_lock, nest_level);
}
static inline void netif_addr_lock_bh(struct net_device *dev)
{
- spin_lock_bh(&dev->addr_list_lock);
+ unsigned char nest_level = 0;
+
+#ifdef CONFIG_LOCKDEP
+ nest_level = dev->nested_level;
+#endif
+ local_bh_disable();
+ spin_lock_nested(&dev->addr_list_lock, nest_level);
}
static inline void netif_addr_unlock(struct net_device *dev)
@@ -4338,12 +4589,24 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
+void dev_addr_mod(struct net_device *dev, unsigned int offset,
+ const void *addr, size_t len);
+
+static inline void
+__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
+{
+ dev_addr_mod(dev, 0, addr, len);
+}
+
+static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, dev->addr_len);
+}
+
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
-void dev_addr_flush(struct net_device *dev);
-int dev_addr_init(struct net_device *dev);
/* Functions used for unicast addresses handling */
int dev_uc_add(struct net_device *dev, const unsigned char *addr);
@@ -4433,10 +4696,10 @@ static inline void __dev_mc_unsync(struct net_device *dev,
/* Functions used for secondary unicast and multicast support */
void dev_set_rx_mode(struct net_device *dev);
-void __dev_set_rx_mode(struct net_device *dev);
int dev_set_promiscuity(struct net_device *dev, int inc);
int dev_set_allmulti(struct net_device *dev, int inc);
void netdev_state_change(struct net_device *dev);
+void __netdev_notify_peers(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
@@ -4445,21 +4708,29 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats);
+void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
+ const struct pcpu_sw_netstats __percpu *netstats);
+void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
+
+enum {
+ NESTED_SYNC_IMM_BIT,
+ NESTED_SYNC_TODO_BIT,
+};
-extern int netdev_max_backlog;
-extern int netdev_tstamp_prequeue;
-extern int weight_p;
-extern int dev_weight_rx_bias;
-extern int dev_weight_tx_bias;
-extern int dev_rx_weight;
-extern int dev_tx_weight;
-extern int gro_normal_batch;
+#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
+#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
+
+#define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
+#define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
+
+struct netdev_nested_priv {
+ unsigned char flags;
+ void *data;
+};
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
-struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
- struct list_head **iter);
/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
@@ -4470,8 +4741,8 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *upper_dev,
- void *data),
- void *data);
+ struct netdev_nested_priv *priv),
+ struct netdev_nested_priv *priv);
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);
@@ -4508,12 +4779,12 @@ struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter);
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
- void *data),
- void *data);
+ struct netdev_nested_priv *priv),
+ struct netdev_nested_priv *priv);
int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
- void *data),
- void *data);
+ struct netdev_nested_priv *priv),
+ struct netdev_nested_priv *priv);
void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
@@ -4553,11 +4824,6 @@ int skb_crc32c_csum_help(struct sk_buff *skb);
int skb_csum_hwoffload_help(struct sk_buff *skb,
const netdev_features_t features);
-struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features, bool tx_path);
-struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
- netdev_features_t features);
-
struct netdev_bonding_info {
ifslave slave;
ifbond master;
@@ -4580,11 +4846,6 @@ static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
}
#endif
-static inline
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
-{
- return __skb_gso_segment(skb, features, true);
-}
__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
static inline bool can_checksum_protocol(netdev_features_t features,
@@ -4622,11 +4883,17 @@ static inline void netdev_rx_csum_fault(struct net_device *dev,
void net_enable_timestamp(void);
void net_disable_timestamp(void);
-#ifdef CONFIG_PROC_FS
-int __init dev_proc_init(void);
-#else
-#define dev_proc_init() 0
-#endif
+static inline ktime_t netdev_get_tstamp(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (ops->ndo_get_tstamp)
+ return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
+
+ return hwtstamps->hwtstamp;
+}
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev,
@@ -4659,22 +4926,10 @@ int netdev_class_create_file_ns(const struct class_attribute *class_attr,
void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
const void *ns);
-static inline int netdev_class_create_file(const struct class_attribute *class_attr)
-{
- return netdev_class_create_file_ns(class_attr, NULL);
-}
-
-static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
-{
- netdev_class_remove_file_ns(class_attr, NULL);
-}
-
extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
-void linkwatch_run_queue(void);
-
static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
netdev_features_t f2)
{
@@ -4717,6 +4972,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
netdev_features_t netif_skb_features(struct sk_buff *skb);
+void skb_warn_bad_offload(const struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
@@ -4760,24 +5016,10 @@ static inline bool netif_needs_gso(struct sk_buff *skb,
(skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
-static inline void netif_set_gso_max_size(struct net_device *dev,
- unsigned int size)
-{
- dev->gso_max_size = size;
-}
-
-static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
- int pulled_hlen, u16 mac_offset,
- int mac_len)
-{
- skb->protocol = protocol;
- skb->encapsulation = 1;
- skb_push(skb, pulled_hlen);
- skb_reset_transport_header(skb);
- skb->mac_header = mac_offset;
- skb->network_header = skb->mac_header + mac_len;
- skb->mac_len = mac_len;
-}
+void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
+void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
+void netif_inherit_tso_max(struct net_device *to,
+ const struct net_device *from);
static inline bool netif_is_macsec(const struct net_device *dev)
{
@@ -4824,6 +5066,15 @@ static inline bool netif_is_l3_slave(const struct net_device *dev)
return dev->priv_flags & IFF_L3MDEV_SLAVE;
}
+static inline int dev_sdif(const struct net_device *dev)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ if (netif_is_l3_slave(dev))
+ return dev->ifindex;
+#endif
+ return 0;
+}
+
static inline bool netif_is_bridge_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_EBRIDGE;
@@ -4844,6 +5095,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev)
return dev->priv_flags & IFF_OVS_DATAPATH;
}
+static inline bool netif_is_any_bridge_master(const struct net_device *dev)
+{
+ return netif_is_bridge_master(dev) || netif_is_ovs_master(dev);
+}
+
static inline bool netif_is_any_bridge_port(const struct net_device *dev)
{
return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
@@ -4894,7 +5150,7 @@ static inline void netif_keep_dst(struct net_device *dev)
static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
{
/* TODO: reserve and use an additional IFF bit, if we get more users */
- return dev->priv_flags & IFF_MACSEC;
+ return netif_is_macsec(dev);
}
extern struct pernet_operations __net_initdata loopback_net_ops;
@@ -4910,14 +5166,11 @@ static inline const char *netdev_name(const struct net_device *dev)
return dev->name;
}
-static inline bool netdev_unregistering(const struct net_device *dev)
-{
- return dev->reg_state == NETREG_UNREGISTERING;
-}
-
static inline const char *netdev_reg_state(const struct net_device *dev)
{
- switch (dev->reg_state) {
+ u8 reg_state = READ_ONCE(dev->reg_state);
+
+ switch (reg_state) {
case NETREG_UNINITIALIZED: return " (uninitialized)";
case NETREG_REGISTERED: return "";
case NETREG_UNREGISTERING: return " (unregistering)";
@@ -4926,85 +5179,13 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
case NETREG_DUMMY: return " (dummy)";
}
- WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
+ WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state);
return " (unknown)";
}
-__printf(3, 4) __cold
-void netdev_printk(const char *level, const struct net_device *dev,
- const char *format, ...);
-__printf(2, 3) __cold
-void netdev_emerg(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_alert(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_crit(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_err(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_warn(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_notice(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_info(const struct net_device *dev, const char *format, ...);
-
-#define netdev_level_once(level, dev, fmt, ...) \
-do { \
- static bool __print_once __read_mostly; \
- \
- if (!__print_once) { \
- __print_once = true; \
- netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
- } \
-} while (0)
-
-#define netdev_emerg_once(dev, fmt, ...) \
- netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
-#define netdev_alert_once(dev, fmt, ...) \
- netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
-#define netdev_crit_once(dev, fmt, ...) \
- netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
-#define netdev_err_once(dev, fmt, ...) \
- netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
-#define netdev_warn_once(dev, fmt, ...) \
- netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
-#define netdev_notice_once(dev, fmt, ...) \
- netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
-#define netdev_info_once(dev, fmt, ...) \
- netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
-
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
-#if defined(CONFIG_DYNAMIC_DEBUG) || \
- (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
-#define netdev_dbg(__dev, format, args...) \
-do { \
- dynamic_netdev_dbg(__dev, format, ##args); \
-} while (0)
-#elif defined(DEBUG)
-#define netdev_dbg(__dev, format, args...) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args)
-#else
-#define netdev_dbg(__dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args); \
-})
-#endif
-
-#if defined(VERBOSE_DEBUG)
-#define netdev_vdbg netdev_dbg
-#else
-
-#define netdev_vdbg(dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
/*
* netdev_WARN() acts like dev_printk(), but with the key difference
* of using a WARN/WARN_ON to get the message out, including the
@@ -5018,74 +5199,6 @@ do { \
WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
-/* netif printk helpers, similar to netdev_printk */
-
-#define netif_printk(priv, type, level, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_printk(level, (dev), fmt, ##args); \
-} while (0)
-
-#define netif_level(level, priv, type, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_##level(dev, fmt, ##args); \
-} while (0)
-
-#define netif_emerg(priv, type, dev, fmt, args...) \
- netif_level(emerg, priv, type, dev, fmt, ##args)
-#define netif_alert(priv, type, dev, fmt, args...) \
- netif_level(alert, priv, type, dev, fmt, ##args)
-#define netif_crit(priv, type, dev, fmt, args...) \
- netif_level(crit, priv, type, dev, fmt, ##args)
-#define netif_err(priv, type, dev, fmt, args...) \
- netif_level(err, priv, type, dev, fmt, ##args)
-#define netif_warn(priv, type, dev, fmt, args...) \
- netif_level(warn, priv, type, dev, fmt, ##args)
-#define netif_notice(priv, type, dev, fmt, args...) \
- netif_level(notice, priv, type, dev, fmt, ##args)
-#define netif_info(priv, type, dev, fmt, args...) \
- netif_level(info, priv, type, dev, fmt, ##args)
-
-#if defined(CONFIG_DYNAMIC_DEBUG) || \
- (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
-#define netif_dbg(priv, type, netdev, format, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- dynamic_netdev_dbg(netdev, format, ##args); \
-} while (0)
-#elif defined(DEBUG)
-#define netif_dbg(priv, type, dev, format, args...) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
-#else
-#define netif_dbg(priv, type, dev, format, args...) \
-({ \
- if (0) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
-/* if @cond then downgrade to debug, else print at @level */
-#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
- do { \
- if (cond) \
- netif_dbg(priv, type, netdev, fmt, ##args); \
- else \
- netif_ ## level(priv, type, netdev, fmt, ##args); \
- } while (0)
-
-#if defined(VERBOSE_DEBUG)
-#define netif_vdbg netif_dbg
-#else
-#define netif_vdbg(priv, type, dev, format, args...) \
-({ \
- if (0) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
/*
* The list of packet types we will receive (as opposed to discard)
* and the routines to invoke.
@@ -5108,6 +5221,14 @@ do { \
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
+extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+
extern struct net_device *blackhole_netdev;
+/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
+#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
+#define DEV_STATS_ADD(DEV, FIELD, VAL) \
+ atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
+#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
+
#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 0101747de549..2683b2b77612 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/static_key.h>
+#include <linux/module.h>
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
#include <linux/sockptr.h>
@@ -21,6 +22,16 @@ static inline int NF_DROP_GETERR(int verdict)
return -(verdict >> NF_VERDICT_QBITS);
}
+static __always_inline int
+NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err)
+{
+ BUILD_BUG_ON(err > 0xffff);
+
+ kfree_skb_reason(skb, reason);
+
+ return ((err << 16) | NF_STOLEN);
+}
+
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
@@ -65,8 +76,8 @@ struct nf_hook_ops;
struct sock;
struct nf_hook_state {
- unsigned int hook;
- u_int8_t pf;
+ u8 hook;
+ u8 pf;
struct net_device *in;
struct net_device *out;
struct sock *sk;
@@ -77,12 +88,19 @@ struct nf_hook_state {
typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
+enum nf_hook_ops_type {
+ NF_HOOK_OP_UNDEFINED,
+ NF_HOOK_OP_NF_TABLES,
+ NF_HOOK_OP_BPF,
+};
+
struct nf_hook_ops {
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
void *priv;
- u_int8_t pf;
+ u8 pf;
+ enum nf_hook_ops_type hook_ops_type:8;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
@@ -237,11 +255,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
#endif
break;
-#if IS_ENABLED(CONFIG_DECNET)
- case NFPROTO_DECNET:
- hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
- break;
-#endif
default:
WARN_ON_ONCE(1);
break;
@@ -357,7 +370,6 @@ __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
u_int8_t protocol, unsigned short family);
int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict, unsigned short family);
-int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
#include <net/flow.h>
@@ -373,15 +385,16 @@ struct nf_nat_hook {
unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
enum nf_nat_manip_type mtype,
enum ip_conntrack_dir dir);
+ void (*remove_nat_bysrc)(struct nf_conn *ct);
};
-extern struct nf_nat_hook __rcu *nf_nat_hook;
+extern const struct nf_nat_hook __rcu *nf_nat_hook;
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
#if IS_ENABLED(CONFIG_NF_NAT)
- struct nf_nat_hook *nat_hook;
+ const struct nf_nat_hook *nat_hook;
rcu_read_lock();
nat_hook = rcu_dereference(nf_nat_hook);
@@ -434,13 +447,14 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_zones_common.h>
-extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_set_closing(struct nf_conntrack *nfct);
struct nf_conntrack_tuple;
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb);
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {}
struct nf_conntrack_tuple;
static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
@@ -457,14 +471,15 @@ struct nf_ct_hook {
void (*destroy)(struct nf_conntrack *);
bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
const struct sk_buff *);
+ void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
+ void (*set_closing)(struct nf_conntrack *nfct);
+ int (*confirm)(struct sk_buff *skb);
};
-extern struct nf_ct_hook __rcu *nf_ct_hook;
+extern const struct nf_ct_hook __rcu *nf_ct_hook;
struct nlattr;
struct nfnl_ct_hook {
- struct nf_conn *(*get_ct)(const struct sk_buff *skb,
- enum ip_conntrack_info *ctinfo);
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
@@ -475,9 +490,18 @@ struct nfnl_ct_hook {
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, s32 off);
};
-extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
+extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
-/**
+struct nf_defrag_hook {
+ struct module *owner;
+ int (*enable)(struct net *net);
+ void (*disable)(struct net *net);
+};
+
+extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook;
+extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook;
+
+/*
* nf_skb_duplicated - TEE target has sent a packet
*
* When a xtables target sends a packet, the OUTPUT and POSTROUTING
@@ -488,4 +512,9 @@ extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
*/
DECLARE_PER_CPU(bool, nf_skb_duplicated);
+/*
+ * Contains bitmask of ctnetlink event subscribers, if any.
+ * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
+ */
+extern u8 nf_ctnetlink_has_listener;
#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index ab192720e2d6..e9f4f845d760 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -124,8 +124,6 @@ struct ip_set_ext {
bool target;
};
-struct ip_set;
-
#define ext_timeout(e, s) \
((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
#define ext_counter(e, s) \
@@ -188,6 +186,8 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+ /* Cancel ongoing garbage collectors before destroying the set*/
+ void (*cancel_gc)(struct ip_set *set);
/* Region-locking is used */
bool region_lock;
};
@@ -198,6 +198,12 @@ struct ip_set_region {
u32 elements; /* Number of elements vs timeout */
};
+/* Max range where every element is added/deleted in one step */
+#define IPSET_MAX_RANGE (1<<14)
+
+/* The max revision number supported by any set type + 1 */
+#define IPSET_REVISION_MAX 9
+
/* The core set type structure */
struct ip_set_type {
struct list_head list;
@@ -215,6 +221,8 @@ struct ip_set_type {
u8 family;
/* Type revisions */
u8 revision_min, revision_max;
+ /* Revision-specific supported (create) flags */
+ u8 create_flags[IPSET_REVISION_MAX+1];
/* Set features to control swapping */
u16 features;
@@ -236,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type);
/* A generic IP set */
struct ip_set {
+ /* For call_cru in destroy */
+ struct rcu_head rcu;
/* The name of the set */
char name[IPSET_MAXNAMELEN];
/* Lock protecting the set data */
@@ -509,6 +519,16 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
*skbinfo = ext->skbinfo;
}
+static inline void
+nf_inet_addr_mask_inplace(union nf_inet_addr *a1,
+ const union nf_inet_addr *mask)
+{
+ a1->all[0] &= mask->all[0];
+ a1->all[1] &= mask->all[1];
+ a1->all[2] &= mask->all[2];
+ a1->all[3] &= mask->all[3];
+}
+
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, .target = true,\
.timeout = ip_set_adt_opt_timeout(opt, set) }
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 1db83c931d9c..2770db2fa080 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -2,15 +2,15 @@
#ifndef _NF_CONNTRACK_COMMON_H
#define _NF_CONNTRACK_COMMON_H
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
unsigned int found;
unsigned int invalid;
- unsigned int ignore;
unsigned int insert;
unsigned int insert_failed;
+ unsigned int clash_resolve;
unsigned int drop;
unsigned int early_drop;
unsigned int error;
@@ -18,25 +18,28 @@ struct ip_conntrack_stat {
unsigned int expect_create;
unsigned int expect_delete;
unsigned int search_restart;
+ unsigned int chaintoolong;
};
#define NFCT_INFOMASK 7UL
#define NFCT_PTRMASK ~(NFCT_INFOMASK)
struct nf_conntrack {
- atomic_t use;
+ refcount_t use;
};
void nf_conntrack_destroy(struct nf_conntrack *nfct);
+
+/* like nf_ct_put, but without module dependency on nf_conntrack */
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
{
- if (nfct && atomic_dec_and_test(&nfct->use))
+ if (nfct && refcount_dec_and_test(&nfct->use))
nf_conntrack_destroy(nfct);
}
static inline void nf_conntrack_get(struct nf_conntrack *nfct)
{
if (nfct)
- atomic_inc(&nfct->use);
+ refcount_inc(&nfct->use);
}
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index 4561ec0fcea4..81286c499325 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -34,64 +34,63 @@ struct nf_ct_h323_master {
int get_h225_addr(struct nf_conn *ct, unsigned char *data,
TransportAddress *taddr, union nf_inet_addr *addr,
__be16 *port);
-void nf_conntrack_h245_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
-void nf_conntrack_q931_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
-extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- H245_TransportAddress *taddr,
- union nf_inet_addr *addr,
- __be16 port);
-extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr,
- union nf_inet_addr *addr,
- __be16 port);
-extern int (*set_sig_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count);
-extern int (*set_ras_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count);
-extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- int dataoff,
- H245_TransportAddress *taddr,
- __be16 port, __be16 rtp_port,
- struct nf_conntrack_expect *rtp_exp,
- struct nf_conntrack_expect *rtcp_exp);
-extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
+
+struct nfct_h323_nat_hooks {
+ int (*set_h245_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
- H245_TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
+ H245_TransportAddress *taddr,
+ union nf_inet_addr *addr, __be16 port);
+ int (*set_h225_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
- TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr,
- __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, TransportAddress *taddr,
- int idx, __be16 port,
- struct nf_conntrack_expect *exp);
+ TransportAddress *taddr,
+ union nf_inet_addr *addr, __be16 port);
+ int (*set_sig_addr)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+ int (*set_ras_addr)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+ int (*nat_rtp_rtcp)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr,
+ __be16 port, __be16 rtp_port,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp);
+ int (*nat_t120)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_h245)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_callforwarding)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_q931)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, TransportAddress *taddr, int idx,
+ __be16 port, struct nf_conntrack_expect *exp);
+};
+extern const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook;
#endif
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
index a28aa289afdc..c3bdb4370938 100644
--- a/include/linux/netfilter/nf_conntrack_pptp.h
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -300,26 +300,22 @@ union pptp_ctrl_union {
struct PptpSetLinkInfo setlink;
};
-extern int
-(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- struct PptpControlHeader *ctlh,
- union pptp_ctrl_union *pptpReq);
-
-extern int
-(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- struct PptpControlHeader *ctlh,
- union pptp_ctrl_union *pptpReq);
-
-extern void
-(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig,
- struct nf_conntrack_expect *exp_reply);
-
-extern void
-(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
- struct nf_conntrack_expect *exp);
+struct nf_nat_pptp_hook {
+ int (*outbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+ int (*inbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+ void (*exp_gre)(struct nf_conntrack_expect *exp_orig,
+ struct nf_conntrack_expect *exp_reply);
+ void (*expectfn)(struct nf_conn *ct,
+ struct nf_conntrack_expect *exp);
+};
+extern const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook;
#endif /* _NF_CONNTRACK_PPTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index f33aa6021364..34ce5d2f37a2 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -25,7 +25,6 @@ struct nf_ct_gre_keymap {
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
-void nf_ct_gre_keymap_flush(struct net *net);
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 625f491b95de..fb31312825ae 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,7 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 init[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index c620521c42bc..dbc614dfe0d5 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -164,7 +164,7 @@ struct nf_nat_sip_hooks {
unsigned int medialen,
union nf_inet_addr *rtp_addr);
};
-extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+extern const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks;
int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
unsigned int datalen, unsigned int *matchoff,
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 89016d08f6a2..e9a9ab34a7cc 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -7,21 +7,33 @@
#include <net/netlink.h>
#include <uapi/linux/netfilter/nfnetlink.h>
+struct nfnl_info {
+ struct net *net;
+ struct sock *sk;
+ const struct nlmsghdr *nlh;
+ const struct nfgenmsg *nfmsg;
+ struct netlink_ext_ack *extack;
+};
+
+enum nfnl_callback_type {
+ NFNL_CB_UNSPEC = 0,
+ NFNL_CB_MUTEX,
+ NFNL_CB_RCU,
+ NFNL_CB_BATCH,
+};
+
struct nfnl_callback {
- int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[],
- struct netlink_ext_ack *extack);
- int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[],
- struct netlink_ext_ack *extack);
- int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[],
- struct netlink_ext_ack *extack);
- const struct nla_policy *policy; /* netlink attribute policy */
- const u_int16_t attr_count; /* number of nlattr's */
+ int (*call)(struct sk_buff *skb, const struct nfnl_info *info,
+ const struct nlattr * const cda[]);
+ const struct nla_policy *policy;
+ enum nfnl_callback_type type;
+ __u16 attr_count;
+};
+
+enum nfnl_abort_action {
+ NFNL_ABORT_NONE = 0,
+ NFNL_ABORT_AUTOLOAD,
+ NFNL_ABORT_VALIDATE,
};
struct nfnetlink_subsystem {
@@ -31,8 +43,8 @@ struct nfnetlink_subsystem {
const struct nfnl_callback *cb; /* callback for individual types */
struct module *owner;
int (*commit)(struct net *net, struct sk_buff *skb);
- int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
- void (*cleanup)(struct net *net);
+ int (*abort)(struct net *net, struct sk_buff *skb,
+ enum nfnl_abort_action action);
bool (*valid_genid)(struct net *net, u32 genid);
};
@@ -44,12 +56,41 @@ int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
+void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid,
+ __u32 group, gfp_t allocation);
static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
{
return subsys << 8 | msg_type;
}
+static inline void nfnl_fill_hdr(struct nlmsghdr *nlh, u8 family, u8 version,
+ __be16 res_id)
+{
+ struct nfgenmsg *nfmsg;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = family;
+ nfmsg->version = version;
+ nfmsg->res_id = res_id;
+}
+
+static inline struct nlmsghdr *nfnl_msg_put(struct sk_buff *skb, u32 portid,
+ u32 seq, int type, int flags,
+ u8 family, u8 version,
+ __be16 res_id)
+{
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags);
+ if (!nlh)
+ return NULL;
+
+ nfnl_fill_hdr(nlh, family, version, res_id);
+
+ return nlh;
+}
+
void nfnl_lock(__u8 subsys_id);
void nfnl_unlock(__u8 subsys_id);
#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 5deb099d156d..5897f3dbaf7c 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -36,8 +36,8 @@ struct xt_action_param {
const void *matchinfo, *targinfo;
};
const struct nf_hook_state *state;
- int fragoff;
unsigned int thoff;
+ u16 fragoff;
bool hotdrop;
};
@@ -158,7 +158,7 @@ struct xt_match {
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_mtdtor_param *);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, const void *src);
int (*compat_to_user)(void __user *dst, const void *src);
@@ -169,7 +169,7 @@ struct xt_match {
const char *table;
unsigned int matchsize;
unsigned int usersize;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
unsigned int compatsize;
#endif
unsigned int hooks;
@@ -199,7 +199,7 @@ struct xt_target {
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_tgdtor_param *);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, const void *src);
int (*compat_to_user)(void __user *dst, const void *src);
@@ -210,7 +210,7 @@ struct xt_target {
const char *table;
unsigned int targetsize;
unsigned int usersize;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
unsigned int compatsize;
#endif
unsigned int hooks;
@@ -229,15 +229,15 @@ struct xt_table {
/* Man behind the curtain... */
struct xt_table_info *private;
+ /* hook ops that register the table with the netfilter core */
+ struct nf_hook_ops *ops;
+
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
u_int8_t af; /* address/protocol family */
int priority; /* hook order */
- /* called when table is needed in the given netns */
- int (*table_init)(struct net *net);
-
/* A unique name... */
const char name[XT_TABLE_MAXNAMELEN];
};
@@ -322,6 +322,7 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
int xt_find_revision(u8 af, const char *name, u8 revision, int target,
int *err);
+struct xt_table *xt_find_table(struct net *net, u8 af, const char *name);
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name);
struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
@@ -376,7 +377,7 @@ static inline unsigned int xt_write_recseq_begin(void)
* since addend is most likely 1
*/
__this_cpu_add(xt_recseq.sequence, addend);
- smp_wmb();
+ smp_mb();
return addend;
}
@@ -448,7 +449,10 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
-#ifdef CONFIG_COMPAT
+int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net));
+void xt_unregister_template(const struct xt_table *t);
+
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_xt_entry_match {
@@ -529,5 +533,5 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems,
unsigned int target_offset,
unsigned int next_offset);
-#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 7d3537c40ec9..a40aaf645fa4 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -51,14 +51,13 @@ struct arpt_error {
extern void *arpt_alloc_initial_table(const struct xt_table *);
int arpt_register_table(struct net *net, const struct xt_table *table,
const struct arpt_replace *repl,
- const struct nf_hook_ops *ops, struct xt_table **res);
-void arpt_unregister_table(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
-extern unsigned int arpt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+ const struct nf_hook_ops *ops);
+void arpt_unregister_table(struct net *net, const char *name);
+void arpt_unregister_table_pre_exit(struct net *net, const char *name);
+extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_arpt_entry {
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index f980edfdd278..743475ca7e9d 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -42,7 +42,7 @@ static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
if (!nf_bridge)
return 0;
- return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
+ return nf_bridge->physinif;
}
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
@@ -56,11 +56,11 @@ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
}
static inline struct net_device *
-nf_bridge_get_physindev(const struct sk_buff *skb)
+nf_bridge_get_physindev(const struct sk_buff *skb, struct net *net)
{
const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- return nf_bridge ? nf_bridge->physindev : NULL;
+ return nf_bridge ? dev_get_by_index_rcu(net, nf_bridge->physinif) : NULL;
}
static inline struct net_device *
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 2f5c4e6ecd8a..fd533552a062 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -94,12 +94,9 @@ struct ebt_table {
struct ebt_replace_kernel *table;
unsigned int valid_hooks;
rwlock_t lock;
- /* e.g. could be the table explicitly only allows certain
- * matches, targets, ... 0 == let it in */
- int (*check)(const struct ebt_table_info *info,
- unsigned int valid_hooks);
/* the data used by the kernel */
struct ebt_table_info *private;
+ struct nf_hook_ops *ops;
struct module *me;
};
@@ -108,13 +105,11 @@ struct ebt_table {
extern int ebt_register_table(struct net *net,
const struct ebt_table *table,
- const struct nf_hook_ops *ops,
- struct ebt_table **res);
-extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
- const struct nf_hook_ops *);
-extern unsigned int ebt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct ebt_table *table);
+ const struct nf_hook_ops *ops);
+extern void ebt_unregister_table(struct net *net, const char *tablename);
+void ebt_unregister_table_pre_exit(struct net *net, const char *tablename);
+extern unsigned int ebt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
/* True if the hook mask denotes that the rule is in a base chain,
* used in the check() functions */
@@ -127,4 +122,6 @@ static inline bool ebt_invalid_target(int target)
return (target < -NUM_STANDARD_TARGETS || target >= 0);
}
+int ebt_register_template(const struct ebt_table *t, int(*table_init)(struct net *net));
+void ebt_unregister_template(const struct ebt_table *t);
#endif
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index 8dddfb151f00..a5f7bef1b3a4 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -7,14 +7,6 @@
/* in/out/forward only */
#define NF_ARP_NUMHOOKS 3
-/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
-#define NF_DN_NUMHOOKS 7
-
-#if IS_ENABLED(CONFIG_DECNET)
-/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS NF_DN_NUMHOOKS
-#else
#define NF_MAX_HOOKS NF_INET_NUMHOOKS
-#endif
#endif
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
deleted file mode 100644
index a13774be2eb5..000000000000
--- a/include/linux/netfilter_ingress.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NETFILTER_INGRESS_H_
-#define _NETFILTER_INGRESS_H_
-
-#include <linux/netfilter.h>
-#include <linux/netdevice.h>
-
-#ifdef CONFIG_NETFILTER_INGRESS
-static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
-{
-#ifdef CONFIG_JUMP_LABEL
- if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
- return false;
-#endif
- return rcu_access_pointer(skb->dev->nf_hooks_ingress);
-}
-
-/* caller must hold rcu_read_lock */
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
- struct nf_hook_state state;
- int ret;
-
- /* Must recheck the ingress hook head, in the event it became NULL
- * after the check in nf_hook_ingress_active evaluated to true.
- */
- if (unlikely(!e))
- return 0;
-
- nf_hook_state_init(&state, NF_NETDEV_INGRESS,
- NFPROTO_NETDEV, skb->dev, NULL, NULL,
- dev_net(skb->dev), NULL);
- ret = nf_hook_slow(skb, &state, e, 0);
- if (ret == 0)
- return -1;
-
- return ret;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev)
-{
- RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
-}
-#else /* CONFIG_NETFILTER_INGRESS */
-static inline int nf_hook_ingress_active(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev) {}
-#endif /* CONFIG_NETFILTER_INGRESS */
-#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 082e2c41b7ff..5b70ca868bb1 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -16,7 +16,7 @@ struct ip_rt_info {
u_int32_t mark;
};
-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
+int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type);
struct nf_queue_entry;
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index c4676d6feeff..132b0e4a6d4d 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -24,15 +24,10 @@
int ipt_register_table(struct net *net, const struct xt_table *table,
const struct ipt_replace *repl,
- const struct nf_hook_ops *ops, struct xt_table **res);
-
-void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops);
-void ipt_unregister_table_exit(struct net *net, struct xt_table *table);
-
-void ipt_unregister_table(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
+void ipt_unregister_table_pre_exit(struct net *net, const char *name);
+void ipt_unregister_table_exit(struct net *net, const char *name);
/* Standard entry. */
struct ipt_standard {
@@ -68,11 +63,11 @@ struct ipt_error {
}
extern void *ipt_alloc_initial_table(const struct xt_table *);
-extern unsigned int ipt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int ipt_do_table(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_ipt_entry {
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 9b67394471e1..61aa48f46dd7 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -42,7 +42,7 @@ struct nf_ipv6_ops {
#if IS_MODULE(CONFIG_IPV6)
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
- int (*route_me_harder)(struct net *net, struct sk_buff *skb);
+ int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
@@ -51,7 +51,7 @@ struct nf_ipv6_ops {
u32 (*cookie_init_sequence)(const struct ipv6hdr *iph,
const struct tcphdr *th, u16 *mssp);
int (*cookie_v6_check)(const struct ipv6hdr *iph,
- const struct tcphdr *th, __u32 cookie);
+ const struct tcphdr *th);
#endif
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
@@ -143,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
#endif
}
-int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
+int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb);
-static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb)
{
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
@@ -153,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
if (!v6_ops)
return -EHOSTUNREACH;
- return v6_ops->route_me_harder(net, skb);
+ return v6_ops->route_me_harder(net, sk, skb);
#elif IS_BUILTIN(CONFIG_IPV6)
- return ip6_route_me_harder(net, skb);
+ return ip6_route_me_harder(net, sk, skb);
#else
return -EHOSTUNREACH;
#endif
@@ -179,16 +179,16 @@ static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph,
}
static inline int nf_cookie_v6_check(const struct ipv6hdr *iph,
- const struct tcphdr *th, __u32 cookie)
+ const struct tcphdr *th)
{
#if IS_ENABLED(CONFIG_SYN_COOKIES)
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
if (v6_ops)
- return v6_ops->cookie_v6_check(iph, th, cookie);
+ return v6_ops->cookie_v6_check(iph, th);
#elif IS_BUILTIN(CONFIG_IPV6)
- return __cookie_v6_check(iph, th, cookie);
+ return __cookie_v6_check(iph, th);
#endif
#endif
return 0;
@@ -197,6 +197,8 @@ static inline int nf_cookie_v6_check(const struct ipv6hdr *iph,
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
+int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen);
+
int ipv6_netfilter_init(void);
void ipv6_netfilter_fini(void);
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 1547d5f9ae06..8b8885a73c76 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -26,17 +26,13 @@ extern void *ip6t_alloc_initial_table(const struct xt_table *);
int ip6t_register_table(struct net *net, const struct xt_table *table,
const struct ip6t_replace *repl,
- const struct nf_hook_ops *ops, struct xt_table **res);
-void ip6t_unregister_table(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
-void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
-void ip6t_unregister_table_exit(struct net *net, struct xt_table *table);
-extern unsigned int ip6t_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
-
-#ifdef CONFIG_COMPAT
+ const struct nf_hook_ops *ops);
+void ip6t_unregister_table_pre_exit(struct net *net, const char *name);
+void ip6t_unregister_table_exit(struct net *net, const char *name);
+extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_ip6t_entry {
diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
new file mode 100644
index 000000000000..8676316547cc
--- /dev/null
+++ b/include/linux/netfilter_netdev.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NETFILTER_NETDEV_H_
+#define _NETFILTER_NETDEV_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+ return false;
+#endif
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
+}
+
+/* caller must hold rcu_read_lock */
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
+ struct nf_hook_state state;
+ int ret;
+
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, NF_NETDEV_INGRESS,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
+ ret = nf_hook_slow(skb, &state, e, 0);
+ if (ret == 0)
+ return -1;
+
+ return ret;
+}
+
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+#endif /* CONFIG_NETFILTER_INGRESS */
+
+#ifdef CONFIG_NETFILTER_EGRESS
+static inline bool nf_hook_egress_active(void)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS]))
+ return false;
+#endif
+ return true;
+}
+
+/**
+ * nf_hook_egress - classify packets before transmission
+ * @skb: packet to be classified
+ * @rc: result code which shall be returned by __dev_queue_xmit() on failure
+ * @dev: netdev whose egress hooks shall be applied to @skb
+ *
+ * Returns @skb on success or %NULL if the packet was consumed or filtered.
+ * Caller must hold rcu_read_lock.
+ *
+ * On ingress, packets are classified first by tc, then by netfilter.
+ * On egress, the order is reversed for symmetry. Conceptually, tc and
+ * netfilter can be thought of as layers, with netfilter layered above tc:
+ * When tc redirects a packet to another interface, netfilter is not applied
+ * because the packet is on the tc layer.
+ *
+ * The nf_skip_egress flag controls whether netfilter is applied on egress.
+ * It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the
+ * packet passes through tc and netfilter. Because __dev_queue_xmit() may be
+ * called recursively by tunnel drivers such as vxlan, the flag is reverted to
+ * false after sch_handle_egress(). This ensures that netfilter is applied
+ * both on the overlay and underlying network.
+ */
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ struct nf_hook_entries *e;
+ struct nf_hook_state state;
+ int ret;
+
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ if (skb->nf_skip_egress)
+ return skb;
+#endif
+
+ e = rcu_dereference_check(dev->nf_hooks_egress, rcu_read_lock_bh_held());
+ if (!e)
+ return skb;
+
+ nf_hook_state_init(&state, NF_NETDEV_EGRESS,
+ NFPROTO_NETDEV, NULL, dev, NULL,
+ dev_net(dev), NULL);
+
+ /* nf assumes rcu_read_lock, not just read_lock_bh */
+ rcu_read_lock();
+ ret = nf_hook_slow(skb, &state, e, 0);
+ rcu_read_unlock();
+
+ if (ret == 1) {
+ return skb;
+ } else if (ret < 0) {
+ *rc = NET_XMIT_DROP;
+ return NULL;
+ } else { /* ret == 0 */
+ *rc = NET_XMIT_SUCCESS;
+ return NULL;
+ }
+}
+#else /* CONFIG_NETFILTER_EGRESS */
+static inline bool nf_hook_egress_active(void)
+{
+ return false;
+}
+
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ return skb;
+}
+#endif /* CONFIG_NETFILTER_EGRESS */
+
+static inline void nf_skip_egress(struct sk_buff *skb, bool skip)
+{
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ skb->nf_skip_egress = skip;
+#endif
+}
+
+static inline void nf_hook_netdev_init(struct net_device *dev)
+{
+#ifdef CONFIG_NETFILTER_INGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_egress, NULL);
+#endif
+}
+
+#endif /* _NETFILTER_NETDEV_H_ */
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
new file mode 100644
index 000000000000..100cbb261269
--- /dev/null
+++ b/include/linux/netfs.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Network filesystem support services.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/filesystems/netfs_library.rst
+ *
+ * for a description of the network filesystem interface declared here.
+ */
+
+#ifndef _LINUX_NETFS_H
+#define _LINUX_NETFS_H
+
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/uio.h>
+
+enum netfs_sreq_ref_trace;
+
+/*
+ * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
+ * a page is currently backed by a local disk cache
+ */
+#define folio_test_fscache(folio) folio_test_private_2(folio)
+#define PageFsCache(page) PagePrivate2((page))
+#define SetPageFsCache(page) SetPagePrivate2((page))
+#define ClearPageFsCache(page) ClearPagePrivate2((page))
+#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
+#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
+
+/**
+ * folio_start_fscache - Start an fscache write on a folio.
+ * @folio: The folio.
+ *
+ * Call this function before writing a folio to a local cache. Starting a
+ * second write before the first one finishes is not allowed.
+ */
+static inline void folio_start_fscache(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio);
+ folio_get(folio);
+ folio_set_private_2(folio);
+}
+
+/**
+ * folio_end_fscache - End an fscache write on a folio.
+ * @folio: The folio.
+ *
+ * Call this function after the folio has been written to the local cache.
+ * This will wake any sleepers waiting on this folio.
+ */
+static inline void folio_end_fscache(struct folio *folio)
+{
+ folio_end_private_2(folio);
+}
+
+/**
+ * folio_wait_fscache - Wait for an fscache write on this folio to end.
+ * @folio: The folio.
+ *
+ * If this folio is currently being written to a local cache, wait for
+ * the write to finish. Another write may start after this one finishes,
+ * unless the caller holds the folio lock.
+ */
+static inline void folio_wait_fscache(struct folio *folio)
+{
+ folio_wait_private_2(folio);
+}
+
+/**
+ * folio_wait_fscache_killable - Wait for an fscache write on this folio to end.
+ * @folio: The folio.
+ *
+ * If this folio is currently being written to a local cache, wait
+ * for the write to finish or for a fatal signal to be received.
+ * Another write may start after this one finishes, unless the caller
+ * holds the folio lock.
+ *
+ * Return:
+ * - 0 if successful.
+ * - -EINTR if a fatal signal was encountered.
+ */
+static inline int folio_wait_fscache_killable(struct folio *folio)
+{
+ return folio_wait_private_2_killable(folio);
+}
+
+static inline void set_page_fscache(struct page *page)
+{
+ folio_start_fscache(page_folio(page));
+}
+
+static inline void end_page_fscache(struct page *page)
+{
+ folio_end_private_2(page_folio(page));
+}
+
+static inline void wait_on_page_fscache(struct page *page)
+{
+ folio_wait_private_2(page_folio(page));
+}
+
+static inline int wait_on_page_fscache_killable(struct page *page)
+{
+ return folio_wait_private_2_killable(page_folio(page));
+}
+
+/* Marks used on xarray-based buffers */
+#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
+#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
+
+enum netfs_io_source {
+ NETFS_FILL_WITH_ZEROES,
+ NETFS_DOWNLOAD_FROM_SERVER,
+ NETFS_READ_FROM_CACHE,
+ NETFS_INVALID_READ,
+ NETFS_UPLOAD_TO_SERVER,
+ NETFS_WRITE_TO_CACHE,
+ NETFS_INVALID_WRITE,
+} __mode(byte);
+
+typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
+ bool was_async);
+
+/*
+ * Per-inode context. This wraps the VFS inode.
+ */
+struct netfs_inode {
+ struct inode inode; /* The VFS inode */
+ const struct netfs_request_ops *ops;
+#if IS_ENABLED(CONFIG_FSCACHE)
+ struct fscache_cookie *cache;
+#endif
+ loff_t remote_i_size; /* Size of the remote file */
+ loff_t zero_point; /* Size after which we assume there's no data
+ * on the server */
+ unsigned long flags;
+#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
+#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
+#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
+#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */
+};
+
+/*
+ * A netfs group - for instance a ceph snap. This is marked on dirty pages and
+ * pages marked with a group must be flushed before they can be written under
+ * the domain of another group.
+ */
+struct netfs_group {
+ refcount_t ref;
+ void (*free)(struct netfs_group *netfs_group);
+};
+
+/*
+ * Information about a dirty page (attached only if necessary).
+ * folio->private
+ */
+struct netfs_folio {
+ struct netfs_group *netfs_group; /* Filesystem's grouping marker (or NULL). */
+ unsigned int dirty_offset; /* Write-streaming dirty data offset */
+ unsigned int dirty_len; /* Write-streaming dirty data length */
+};
+#define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */
+
+static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+{
+ void *priv = folio_get_private(folio);
+
+ if ((unsigned long)priv & NETFS_FOLIO_INFO)
+ return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
+ return NULL;
+}
+
+static inline struct netfs_group *netfs_folio_group(struct folio *folio)
+{
+ struct netfs_folio *finfo;
+ void *priv = folio_get_private(folio);
+
+ finfo = netfs_folio_info(folio);
+ if (finfo)
+ return finfo->netfs_group;
+ return priv;
+}
+
+/*
+ * Resources required to do operations on a cache.
+ */
+struct netfs_cache_resources {
+ const struct netfs_cache_ops *ops;
+ void *cache_priv;
+ void *cache_priv2;
+ unsigned int debug_id; /* Cookie debug ID */
+ unsigned int inval_counter; /* object->inval_counter at begin_op */
+};
+
+/*
+ * Descriptor for a single component subrequest. Each operation represents an
+ * individual read/write from/to a server, a cache, a journal, etc..
+ *
+ * The buffer iterator is persistent for the life of the subrequest struct and
+ * the pages it points to can be relied on to exist for the duration.
+ */
+struct netfs_io_subrequest {
+ struct netfs_io_request *rreq; /* Supervising I/O request */
+ struct work_struct work;
+ struct list_head rreq_link; /* Link in rreq->subrequests */
+ struct iov_iter io_iter; /* Iterator for this subrequest */
+ loff_t start; /* Where to start the I/O */
+ size_t len; /* Size of the I/O */
+ size_t transferred; /* Amount of data transferred */
+ refcount_t ref;
+ short error; /* 0 or error that occurred */
+ unsigned short debug_index; /* Index in list (for debugging output) */
+ unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */
+ enum netfs_io_source source; /* Where to read from/write to */
+ unsigned long flags;
+#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
+#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
+#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */
+#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
+#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
+#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
+};
+
+enum netfs_io_origin {
+ NETFS_READAHEAD, /* This read was triggered by readahead */
+ NETFS_READPAGE, /* This read is a synchronous read */
+ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+ NETFS_WRITEBACK, /* This write was triggered by writepages */
+ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
+ NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
+ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
+ NETFS_DIO_READ, /* This is a direct I/O read */
+ NETFS_DIO_WRITE, /* This is a direct I/O write */
+ nr__netfs_io_origin
+} __mode(byte);
+
+/*
+ * Descriptor for an I/O helper request. This is used to make multiple I/O
+ * operations to a variety of data stores and then stitch the result together.
+ */
+struct netfs_io_request {
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
+ struct inode *inode; /* The file being accessed */
+ struct address_space *mapping; /* The mapping being accessed */
+ struct kiocb *iocb; /* AIO completion vector */
+ struct netfs_cache_resources cache_resources;
+ struct list_head proc_link; /* Link in netfs_iorequests */
+ struct list_head subrequests; /* Contributory I/O operations */
+ struct iov_iter iter; /* Unencrypted-side iterator */
+ struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
+ void *netfs_priv; /* Private data for the netfs */
+ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
+ unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
+ unsigned int debug_id;
+ unsigned int rsize; /* Maximum read size (0 for none) */
+ unsigned int wsize; /* Maximum write size (0 for none) */
+ unsigned int subreq_counter; /* Next subreq->debug_index */
+ atomic_t nr_outstanding; /* Number of ops in progress */
+ atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
+ size_t submitted; /* Amount submitted for I/O so far */
+ size_t len; /* Length of the request */
+ size_t upper_len; /* Length can be extended to here */
+ size_t transferred; /* Amount to be indicated as transferred */
+ short error; /* 0 or error that occurred */
+ enum netfs_io_origin origin; /* Origin of the request */
+ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
+ loff_t i_size; /* Size of the file */
+ loff_t start; /* Start position */
+ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
+ refcount_t ref;
+ unsigned long flags;
+#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
+#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
+#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
+#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
+#define NETFS_RREQ_FAILED 4 /* The request failed */
+#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
+#define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */
+#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
+#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
+#define NETFS_RREQ_BLOCKED 10 /* We blocked */
+ const struct netfs_request_ops *netfs_ops;
+ void (*cleanup)(struct netfs_io_request *req);
+};
+
+/*
+ * Operations the network filesystem can/must provide to the helpers.
+ */
+struct netfs_request_ops {
+ unsigned int io_request_size; /* Alloc size for netfs_io_request struct */
+ unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */
+ int (*init_request)(struct netfs_io_request *rreq, struct file *file);
+ void (*free_request)(struct netfs_io_request *rreq);
+ void (*free_subrequest)(struct netfs_io_subrequest *rreq);
+
+ /* Read request handling */
+ void (*expand_readahead)(struct netfs_io_request *rreq);
+ bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+ void (*issue_read)(struct netfs_io_subrequest *subreq);
+ bool (*is_still_valid)(struct netfs_io_request *rreq);
+ int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
+ struct folio **foliop, void **_fsdata);
+ void (*done)(struct netfs_io_request *rreq);
+
+ /* Modification handling */
+ void (*update_i_size)(struct inode *inode, loff_t i_size);
+
+ /* Write request handling */
+ void (*create_write_requests)(struct netfs_io_request *wreq,
+ loff_t start, size_t len);
+ void (*invalidate_cache)(struct netfs_io_request *wreq);
+};
+
+/*
+ * How to handle reading from a hole.
+ */
+enum netfs_read_from_hole {
+ NETFS_READ_HOLE_IGNORE,
+ NETFS_READ_HOLE_CLEAR,
+ NETFS_READ_HOLE_FAIL,
+};
+
+/*
+ * Table of operations for access to a cache.
+ */
+struct netfs_cache_ops {
+ /* End an operation */
+ void (*end_operation)(struct netfs_cache_resources *cres);
+
+ /* Read data from the cache */
+ int (*read)(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
+
+ /* Write data to the cache */
+ int (*write)(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
+
+ /* Expand readahead request */
+ void (*expand_readahead)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size);
+
+ /* Prepare a read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+ enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
+ loff_t i_size);
+
+ /* Prepare a write operation, working out what part of the write we can
+ * actually do.
+ */
+ int (*prepare_write)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, size_t upper_len,
+ loff_t i_size, bool no_space_allocated_yet);
+
+ /* Prepare an on-demand read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+ enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres,
+ loff_t start, size_t *_len,
+ loff_t i_size,
+ unsigned long *_flags, ino_t ino);
+
+ /* Query the occupancy of the cache in a region, returning where the
+ * next chunk of data starts and how long it is.
+ */
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
+};
+
+/* High-level read API. */
+ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+
+/* High-level write API */
+ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group);
+ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+ struct netfs_group *netfs_group);
+ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from);
+ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+/* Address operations API */
+struct readahead_control;
+void netfs_readahead(struct readahead_control *);
+int netfs_read_folio(struct file *, struct folio *);
+int netfs_write_begin(struct netfs_inode *, struct file *,
+ struct address_space *, loff_t pos, unsigned int len,
+ struct folio **, void **fsdata);
+int netfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
+bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
+int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
+void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+bool netfs_release_folio(struct folio *folio, gfp_t gfp);
+int netfs_launder_folio(struct folio *folio);
+
+/* VMA operations API. */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
+
+/* (Sub)request management API. */
+void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what);
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+ bool was_async, enum netfs_sreq_ref_trace what);
+ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
+ struct iov_iter *new,
+ iov_iter_extraction_t extraction_flags);
+size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs);
+struct netfs_io_subrequest *netfs_create_write_request(
+ struct netfs_io_request *wreq, enum netfs_io_source dest,
+ loff_t start, size_t len, work_func_t worker);
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
+ bool was_async);
+void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
+
+int netfs_start_io_read(struct inode *inode);
+void netfs_end_io_read(struct inode *inode);
+int netfs_start_io_write(struct inode *inode);
+void netfs_end_io_write(struct inode *inode);
+int netfs_start_io_direct(struct inode *inode);
+void netfs_end_io_direct(struct inode *inode);
+
+/**
+ * netfs_inode - Get the netfs inode context from the inode
+ * @inode: The inode to query
+ *
+ * Get the netfs lib inode context from the network filesystem's inode. The
+ * context struct is expected to directly follow on from the VFS inode struct.
+ */
+static inline struct netfs_inode *netfs_inode(struct inode *inode)
+{
+ return container_of(inode, struct netfs_inode, inode);
+}
+
+/**
+ * netfs_inode_init - Initialise a netfslib inode context
+ * @ctx: The netfs inode to initialise
+ * @ops: The netfs's operations list
+ * @use_zero_point: True to use the zero_point read optimisation
+ *
+ * Initialise the netfs library context struct. This is expected to follow on
+ * directly from the VFS inode struct.
+ */
+static inline void netfs_inode_init(struct netfs_inode *ctx,
+ const struct netfs_request_ops *ops,
+ bool use_zero_point)
+{
+ ctx->ops = ops;
+ ctx->remote_i_size = i_size_read(&ctx->inode);
+ ctx->zero_point = LLONG_MAX;
+ ctx->flags = 0;
+#if IS_ENABLED(CONFIG_FSCACHE)
+ ctx->cache = NULL;
+#endif
+ /* ->releasepage() drives zero_point */
+ if (use_zero_point) {
+ ctx->zero_point = ctx->remote_i_size;
+ mapping_set_release_always(ctx->inode.i_mapping);
+ }
+}
+
+/**
+ * netfs_resize_file - Note that a file got resized
+ * @ctx: The netfs inode being resized
+ * @new_i_size: The new file size
+ * @changed_on_server: The change was applied to the server
+ *
+ * Inform the netfs lib that a file got resized so that it can adjust its state.
+ */
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size,
+ bool changed_on_server)
+{
+ if (changed_on_server)
+ ctx->remote_i_size = new_i_size;
+ if (new_i_size < ctx->zero_point)
+ ctx->zero_point = new_i_size;
+}
+
+/**
+ * netfs_i_cookie - Get the cache cookie from the inode
+ * @ctx: The netfs inode to query
+ *
+ * Get the caching cookie (if enabled) from the network filesystem's inode.
+ */
+static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+ return ctx->cache;
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _LINUX_NETFS_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index e3e49f0e5c13..5df7340d4dab 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -11,6 +11,8 @@
struct net;
+void do_trace_netlink_extack(const char *msg);
+
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
@@ -48,7 +50,7 @@ struct netlink_kernel_cfg {
struct mutex *cb_mutex;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
- bool (*compare)(struct net *net, struct sock *sk);
+ void (*release) (struct sock *sk, unsigned long *groups);
};
struct sock *__netlink_kernel_create(struct net *net, int unit,
@@ -62,74 +64,156 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
/* this can be increased when necessary - don't expose to userland */
#define NETLINK_MAX_COOKIE_LEN 20
+#define NETLINK_MAX_FMTMSG_LEN 80
/**
* struct netlink_ext_ack - netlink extended ACK report struct
* @_msg: message string to report - don't access directly, use
* %NL_SET_ERR_MSG
* @bad_attr: attribute with error
+ * @policy: policy for a bad attribute
+ * @miss_type: attribute type which was missing
+ * @miss_nest: nest missing an attribute (%NULL if missing top level attr)
* @cookie: cookie data to return to userspace (for success)
* @cookie_len: actual cookie data length
+ * @_msg_buf: output buffer for formatted message strings - don't access
+ * directly, use %NL_SET_ERR_MSG_FMT
*/
struct netlink_ext_ack {
const char *_msg;
const struct nlattr *bad_attr;
+ const struct nla_policy *policy;
+ const struct nlattr *miss_nest;
+ u16 miss_type;
u8 cookie[NETLINK_MAX_COOKIE_LEN];
u8 cookie_len;
+ char _msg_buf[NETLINK_MAX_FMTMSG_LEN];
};
/* Always use this macro, this allows later putting the
* message into a separate section or such for things
* like translation or listing all possible messages.
- * Currently string formatting is not supported (due
- * to the lack of an output buffer.)
+ * If string formatting is needed use NL_SET_ERR_MSG_FMT.
*/
#define NL_SET_ERR_MSG(extack, msg) do { \
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
+ do_trace_netlink_extack(__msg); \
+ \
if (__extack) \
__extack->_msg = __msg; \
} while (0)
+/* We splice fmt with %s at each end even in the snprintf so that both calls
+ * can use the same string constant, avoiding its duplication in .ro
+ */
+#define NL_SET_ERR_MSG_FMT(extack, fmt, args...) do { \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (!__extack) \
+ break; \
+ if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \
+ "%s" fmt "%s", "", ##args, "") >= \
+ NETLINK_MAX_FMTMSG_LEN) \
+ net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \
+ ##args, "\n"); \
+ \
+ do_trace_netlink_extack(__extack->_msg_buf); \
+ \
+ __extack->_msg = __extack->_msg_buf; \
+} while (0)
+
#define NL_SET_ERR_MSG_MOD(extack, msg) \
NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
-#define NL_SET_BAD_ATTR(extack, attr) do { \
- if ((extack)) \
+#define NL_SET_ERR_MSG_FMT_MOD(extack, fmt, args...) \
+ NL_SET_ERR_MSG_FMT((extack), KBUILD_MODNAME ": " fmt, ##args)
+
+#define NL_SET_ERR_MSG_WEAK(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG((extack), msg); \
+} while (0)
+
+#define NL_SET_ERR_MSG_WEAK_MOD(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG_MOD((extack), msg); \
+} while (0)
+
+#define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \
+ if ((extack)) { \
(extack)->bad_attr = (attr); \
+ (extack)->policy = (pol); \
+ } \
} while (0)
-#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
- static const char __msg[] = msg; \
+#define NL_SET_BAD_ATTR(extack, attr) NL_SET_BAD_ATTR_POLICY(extack, attr, NULL)
+
+#define NL_SET_ERR_MSG_ATTR_POL(extack, attr, pol, msg) do { \
+ static const char __msg[] = msg; \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ do_trace_netlink_extack(__msg); \
+ \
+ if (__extack) { \
+ __extack->_msg = __msg; \
+ __extack->bad_attr = (attr); \
+ __extack->policy = (pol); \
+ } \
+} while (0)
+
+#define NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, pol, fmt, args...) do { \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (!__extack) \
+ break; \
+ \
+ if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \
+ "%s" fmt "%s", "", ##args, "") >= \
+ NETLINK_MAX_FMTMSG_LEN) \
+ net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \
+ ##args, "\n"); \
+ \
+ do_trace_netlink_extack(__extack->_msg_buf); \
+ \
+ __extack->_msg = __extack->_msg_buf; \
+ __extack->bad_attr = (attr); \
+ __extack->policy = (pol); \
+} while (0)
+
+#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) \
+ NL_SET_ERR_MSG_ATTR_POL(extack, attr, NULL, msg)
+
+#define NL_SET_ERR_MSG_ATTR_FMT(extack, attr, msg, args...) \
+ NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, NULL, msg, ##args)
+
+#define NL_SET_ERR_ATTR_MISS(extack, nest, type) do { \
struct netlink_ext_ack *__extack = (extack); \
\
if (__extack) { \
- __extack->_msg = __msg; \
- __extack->bad_attr = (attr); \
+ __extack->miss_nest = (nest); \
+ __extack->miss_type = (type); \
} \
} while (0)
+#define NL_REQ_ATTR_CHECK(extack, nest, tb, type) ({ \
+ struct nlattr **__tb = (tb); \
+ u32 __attr = (type); \
+ int __retval; \
+ \
+ __retval = !__tb[__attr]; \
+ if (__retval) \
+ NL_SET_ERR_ATTR_MISS((extack), (nest), __attr); \
+ __retval; \
+})
+
static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
u64 cookie)
{
- u64 __cookie = cookie;
-
if (!extack)
return;
- memcpy(extack->cookie, &__cookie, sizeof(__cookie));
- extack->cookie_len = sizeof(__cookie);
-}
-
-static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack,
- u32 cookie)
-{
- u32 __cookie = cookie;
-
- if (!extack)
- return;
- memcpy(extack->cookie, &__cookie, sizeof(__cookie));
- extack->cookie_len = sizeof(__cookie);
+ memcpy(extack->cookie, &cookie, sizeof(cookie));
+ extack->cookie_len = sizeof(cookie);
}
void netlink_kernel_release(struct sock *sk);
@@ -144,9 +228,12 @@ bool netlink_strict_get_check(struct sk_buff *skb);
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
+
+typedef int (*netlink_filter_fn)(struct sock *dsk, struct sk_buff *skb, void *data);
+
int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
__u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+ netlink_filter_fn filter,
void *filter_data);
int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
int netlink_register_notifier(struct notifier_block *nb);
@@ -204,6 +291,7 @@ struct netlink_callback {
u16 answer_flags;
u32 min_dump_alloc;
unsigned int prev_seq, seq;
+ int flags;
bool strict_check;
union {
u8 ctx[48];
@@ -215,6 +303,10 @@ struct netlink_callback {
};
};
+#define NL_ASSERT_DUMP_CTX_FITS(type_name) \
+ BUILD_BUG_ON(sizeof(type_name) > \
+ sizeof_field(struct netlink_callback, ctx))
+
struct netlink_notify {
struct net *net;
u32 portid;
@@ -228,9 +320,11 @@ struct netlink_dump_control {
int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
int (*done)(struct netlink_callback *);
+ struct netlink_ext_ack *extack;
void *data;
struct module *module;
- u16 min_dump_alloc;
+ u32 min_dump_alloc;
+ int flags;
};
int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
@@ -261,5 +355,6 @@ bool netlink_ns_capable(const struct sk_buff *skb,
struct user_namespace *ns, int cap);
bool netlink_capable(const struct sk_buff *skb, int cap);
bool netlink_net_capable(const struct sk_buff *skb, int cap);
+struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast);
#endif /* __LINUX_NETLINK_H */
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e6a2d72e0dc7..bd19c4b91e31 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -24,6 +24,7 @@ union inet_addr {
struct netpoll {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
char dev_name[IFNAMSIZ];
const char *name;
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 0dc7ad38a0da..ceb70a926b95 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -10,6 +10,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/string.h>
+#include <linux/crc32.h>
#include <uapi/linux/nfs.h>
/*
@@ -36,14 +37,6 @@ static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *sourc
memcpy(target->data, source->data, source->size);
}
-
-/*
- * This is really a general kernel constant, but since nothing like
- * this is defined in the kernel headers, I have to do it here.
- */
-#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1))
-
-
enum nfs3_stable_how {
NFS_UNSTABLE = 0,
NFS_DATA_SYNC = 1,
@@ -52,4 +45,23 @@ enum nfs3_stable_how {
/* used by direct.c to mark verf as invalid */
NFS_INVALID_STABLE_HOW = -1
};
+
+#ifdef CONFIG_CRC32
+/**
+ * nfs_fhandle_hash - calculate the crc32 hash for the filehandle
+ * @fh - pointer to filehandle
+ *
+ * returns a crc32 hash for the filehandle that is compatible with
+ * the one displayed by "wireshark".
+ */
+static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
+{
+ return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
+}
+#else /* CONFIG_CRC32 */
+static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
+{
+ return 0;
+}
+#endif /* CONFIG_CRC32 */
#endif /* _LINUX_NFS_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b8360be141da..ef8d2d618d5b 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -150,7 +150,7 @@ enum nfs_opnum4 {
OP_WRITE_SAME = 70,
OP_CLONE = 71,
- /* xattr support (RFC8726) */
+ /* xattr support (RFC8276) */
OP_GETXATTR = 72,
OP_SETXATTR = 73,
OP_LISTXATTRS = 74,
@@ -292,6 +292,10 @@ enum nfsstat4 {
NFS4ERR_XATTR2BIG = 10096,
};
+/* error codes for internal client use */
+#define NFS4ERR_RESET_TO_MDS 12001
+#define NFS4ERR_RESET_TO_PNFS 12002
+
static inline bool seqid_mutating_err(u32 err)
{
/* See RFC 7530, section 9.1.7 */
@@ -385,84 +389,203 @@ enum lock_type4 {
NFS4_WRITEW_LT = 4
};
-enum change_attr_type4 {
- NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0,
- NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1,
- NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2,
- NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3,
- NFS4_CHANGE_TYPE_IS_UNDEFINED = 4
+/*
+ * Symbol names and values are from RFC 7531 Section 2.
+ * "XDR Description of NFSv4.0"
+ */
+enum {
+ FATTR4_SUPPORTED_ATTRS = 0,
+ FATTR4_TYPE = 1,
+ FATTR4_FH_EXPIRE_TYPE = 2,
+ FATTR4_CHANGE = 3,
+ FATTR4_SIZE = 4,
+ FATTR4_LINK_SUPPORT = 5,
+ FATTR4_SYMLINK_SUPPORT = 6,
+ FATTR4_NAMED_ATTR = 7,
+ FATTR4_FSID = 8,
+ FATTR4_UNIQUE_HANDLES = 9,
+ FATTR4_LEASE_TIME = 10,
+ FATTR4_RDATTR_ERROR = 11,
+ FATTR4_ACL = 12,
+ FATTR4_ACLSUPPORT = 13,
+ FATTR4_ARCHIVE = 14,
+ FATTR4_CANSETTIME = 15,
+ FATTR4_CASE_INSENSITIVE = 16,
+ FATTR4_CASE_PRESERVING = 17,
+ FATTR4_CHOWN_RESTRICTED = 18,
+ FATTR4_FILEHANDLE = 19,
+ FATTR4_FILEID = 20,
+ FATTR4_FILES_AVAIL = 21,
+ FATTR4_FILES_FREE = 22,
+ FATTR4_FILES_TOTAL = 23,
+ FATTR4_FS_LOCATIONS = 24,
+ FATTR4_HIDDEN = 25,
+ FATTR4_HOMOGENEOUS = 26,
+ FATTR4_MAXFILESIZE = 27,
+ FATTR4_MAXLINK = 28,
+ FATTR4_MAXNAME = 29,
+ FATTR4_MAXREAD = 30,
+ FATTR4_MAXWRITE = 31,
+ FATTR4_MIMETYPE = 32,
+ FATTR4_MODE = 33,
+ FATTR4_NO_TRUNC = 34,
+ FATTR4_NUMLINKS = 35,
+ FATTR4_OWNER = 36,
+ FATTR4_OWNER_GROUP = 37,
+ FATTR4_QUOTA_AVAIL_HARD = 38,
+ FATTR4_QUOTA_AVAIL_SOFT = 39,
+ FATTR4_QUOTA_USED = 40,
+ FATTR4_RAWDEV = 41,
+ FATTR4_SPACE_AVAIL = 42,
+ FATTR4_SPACE_FREE = 43,
+ FATTR4_SPACE_TOTAL = 44,
+ FATTR4_SPACE_USED = 45,
+ FATTR4_SYSTEM = 46,
+ FATTR4_TIME_ACCESS = 47,
+ FATTR4_TIME_ACCESS_SET = 48,
+ FATTR4_TIME_BACKUP = 49,
+ FATTR4_TIME_CREATE = 50,
+ FATTR4_TIME_DELTA = 51,
+ FATTR4_TIME_METADATA = 52,
+ FATTR4_TIME_MODIFY = 53,
+ FATTR4_TIME_MODIFY_SET = 54,
+ FATTR4_MOUNTED_ON_FILEID = 55,
+};
+
+/*
+ * Symbol names and values are from RFC 5662 Section 2.
+ * "XDR Description of NFSv4.1"
+ */
+enum {
+ FATTR4_DIR_NOTIF_DELAY = 56,
+ FATTR4_DIRENT_NOTIF_DELAY = 57,
+ FATTR4_DACL = 58,
+ FATTR4_SACL = 59,
+ FATTR4_CHANGE_POLICY = 60,
+ FATTR4_FS_STATUS = 61,
+ FATTR4_FS_LAYOUT_TYPES = 62,
+ FATTR4_LAYOUT_HINT = 63,
+ FATTR4_LAYOUT_TYPES = 64,
+ FATTR4_LAYOUT_BLKSIZE = 65,
+ FATTR4_LAYOUT_ALIGNMENT = 66,
+ FATTR4_FS_LOCATIONS_INFO = 67,
+ FATTR4_MDSTHRESHOLD = 68,
+ FATTR4_RETENTION_GET = 69,
+ FATTR4_RETENTION_SET = 70,
+ FATTR4_RETENTEVT_GET = 71,
+ FATTR4_RETENTEVT_SET = 72,
+ FATTR4_RETENTION_HOLD = 73,
+ FATTR4_MODE_SET_MASKED = 74,
+ FATTR4_SUPPATTR_EXCLCREAT = 75,
+ FATTR4_FS_CHARSET_CAP = 76,
+};
+
+/*
+ * Symbol names and values are from RFC 7863 Section 2.
+ * "XDR Description of NFSv4.2"
+ */
+enum {
+ FATTR4_CLONE_BLKSIZE = 77,
+ FATTR4_SPACE_FREED = 78,
+ FATTR4_CHANGE_ATTR_TYPE = 79,
+ FATTR4_SEC_LABEL = 80,
+};
+
+/*
+ * Symbol names and values are from RFC 8275 Section 5.
+ * "The mode_umask Attribute"
+ */
+enum {
+ FATTR4_MODE_UMASK = 81,
};
+/*
+ * Symbol names and values are from RFC 8276 Section 8.6.
+ * "Numeric Values Assigned to Protocol Extensions"
+ */
+enum {
+ FATTR4_XATTR_SUPPORT = 82,
+};
+
+/*
+ * The following internal definitions enable processing the above
+ * attribute bits within 32-bit word boundaries.
+ */
+
/* Mandatory Attributes */
-#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
-#define FATTR4_WORD0_TYPE (1UL << 1)
-#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2)
-#define FATTR4_WORD0_CHANGE (1UL << 3)
-#define FATTR4_WORD0_SIZE (1UL << 4)
-#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5)
-#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6)
-#define FATTR4_WORD0_NAMED_ATTR (1UL << 7)
-#define FATTR4_WORD0_FSID (1UL << 8)
-#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
-#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
-#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
+#define FATTR4_WORD0_SUPPORTED_ATTRS BIT(FATTR4_SUPPORTED_ATTRS)
+#define FATTR4_WORD0_TYPE BIT(FATTR4_TYPE)
+#define FATTR4_WORD0_FH_EXPIRE_TYPE BIT(FATTR4_FH_EXPIRE_TYPE)
+#define FATTR4_WORD0_CHANGE BIT(FATTR4_CHANGE)
+#define FATTR4_WORD0_SIZE BIT(FATTR4_SIZE)
+#define FATTR4_WORD0_LINK_SUPPORT BIT(FATTR4_LINK_SUPPORT)
+#define FATTR4_WORD0_SYMLINK_SUPPORT BIT(FATTR4_SYMLINK_SUPPORT)
+#define FATTR4_WORD0_NAMED_ATTR BIT(FATTR4_NAMED_ATTR)
+#define FATTR4_WORD0_FSID BIT(FATTR4_FSID)
+#define FATTR4_WORD0_UNIQUE_HANDLES BIT(FATTR4_UNIQUE_HANDLES)
+#define FATTR4_WORD0_LEASE_TIME BIT(FATTR4_LEASE_TIME)
+#define FATTR4_WORD0_RDATTR_ERROR BIT(FATTR4_RDATTR_ERROR)
/* Mandatory in NFSv4.1 */
-#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
+#define FATTR4_WORD2_SUPPATTR_EXCLCREAT BIT(FATTR4_SUPPATTR_EXCLCREAT - 64)
/* Recommended Attributes */
-#define FATTR4_WORD0_ACL (1UL << 12)
-#define FATTR4_WORD0_ACLSUPPORT (1UL << 13)
-#define FATTR4_WORD0_ARCHIVE (1UL << 14)
-#define FATTR4_WORD0_CANSETTIME (1UL << 15)
-#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16)
-#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17)
-#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18)
-#define FATTR4_WORD0_FILEHANDLE (1UL << 19)
-#define FATTR4_WORD0_FILEID (1UL << 20)
-#define FATTR4_WORD0_FILES_AVAIL (1UL << 21)
-#define FATTR4_WORD0_FILES_FREE (1UL << 22)
-#define FATTR4_WORD0_FILES_TOTAL (1UL << 23)
-#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24)
-#define FATTR4_WORD0_HIDDEN (1UL << 25)
-#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26)
-#define FATTR4_WORD0_MAXFILESIZE (1UL << 27)
-#define FATTR4_WORD0_MAXLINK (1UL << 28)
-#define FATTR4_WORD0_MAXNAME (1UL << 29)
-#define FATTR4_WORD0_MAXREAD (1UL << 30)
-#define FATTR4_WORD0_MAXWRITE (1UL << 31)
-#define FATTR4_WORD1_MIMETYPE (1UL << 0)
-#define FATTR4_WORD1_MODE (1UL << 1)
-#define FATTR4_WORD1_NO_TRUNC (1UL << 2)
-#define FATTR4_WORD1_NUMLINKS (1UL << 3)
-#define FATTR4_WORD1_OWNER (1UL << 4)
-#define FATTR4_WORD1_OWNER_GROUP (1UL << 5)
-#define FATTR4_WORD1_QUOTA_HARD (1UL << 6)
-#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7)
-#define FATTR4_WORD1_QUOTA_USED (1UL << 8)
-#define FATTR4_WORD1_RAWDEV (1UL << 9)
-#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10)
-#define FATTR4_WORD1_SPACE_FREE (1UL << 11)
-#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12)
-#define FATTR4_WORD1_SPACE_USED (1UL << 13)
-#define FATTR4_WORD1_SYSTEM (1UL << 14)
-#define FATTR4_WORD1_TIME_ACCESS (1UL << 15)
-#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16)
-#define FATTR4_WORD1_TIME_BACKUP (1UL << 17)
-#define FATTR4_WORD1_TIME_CREATE (1UL << 18)
-#define FATTR4_WORD1_TIME_DELTA (1UL << 19)
-#define FATTR4_WORD1_TIME_METADATA (1UL << 20)
-#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
-#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
-#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
-#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
-#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
-#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
-#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
-#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
-#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
-#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
-#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
-#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18)
+#define FATTR4_WORD0_ACL BIT(FATTR4_ACL)
+#define FATTR4_WORD0_ACLSUPPORT BIT(FATTR4_ACLSUPPORT)
+#define FATTR4_WORD0_ARCHIVE BIT(FATTR4_ARCHIVE)
+#define FATTR4_WORD0_CANSETTIME BIT(FATTR4_CANSETTIME)
+#define FATTR4_WORD0_CASE_INSENSITIVE BIT(FATTR4_CASE_INSENSITIVE)
+#define FATTR4_WORD0_CASE_PRESERVING BIT(FATTR4_CASE_PRESERVING)
+#define FATTR4_WORD0_CHOWN_RESTRICTED BIT(FATTR4_CHOWN_RESTRICTED)
+#define FATTR4_WORD0_FILEHANDLE BIT(FATTR4_FILEHANDLE)
+#define FATTR4_WORD0_FILEID BIT(FATTR4_FILEID)
+#define FATTR4_WORD0_FILES_AVAIL BIT(FATTR4_FILES_AVAIL)
+#define FATTR4_WORD0_FILES_FREE BIT(FATTR4_FILES_FREE)
+#define FATTR4_WORD0_FILES_TOTAL BIT(FATTR4_FILES_TOTAL)
+#define FATTR4_WORD0_FS_LOCATIONS BIT(FATTR4_FS_LOCATIONS)
+#define FATTR4_WORD0_HIDDEN BIT(FATTR4_HIDDEN)
+#define FATTR4_WORD0_HOMOGENEOUS BIT(FATTR4_HOMOGENEOUS)
+#define FATTR4_WORD0_MAXFILESIZE BIT(FATTR4_MAXFILESIZE)
+#define FATTR4_WORD0_MAXLINK BIT(FATTR4_MAXLINK)
+#define FATTR4_WORD0_MAXNAME BIT(FATTR4_MAXNAME)
+#define FATTR4_WORD0_MAXREAD BIT(FATTR4_MAXREAD)
+#define FATTR4_WORD0_MAXWRITE BIT(FATTR4_MAXWRITE)
+
+#define FATTR4_WORD1_MIMETYPE BIT(FATTR4_MIMETYPE - 32)
+#define FATTR4_WORD1_MODE BIT(FATTR4_MODE - 32)
+#define FATTR4_WORD1_NO_TRUNC BIT(FATTR4_NO_TRUNC - 32)
+#define FATTR4_WORD1_NUMLINKS BIT(FATTR4_NUMLINKS - 32)
+#define FATTR4_WORD1_OWNER BIT(FATTR4_OWNER - 32)
+#define FATTR4_WORD1_OWNER_GROUP BIT(FATTR4_OWNER_GROUP - 32)
+#define FATTR4_WORD1_QUOTA_HARD BIT(FATTR4_QUOTA_AVAIL_HARD - 32)
+#define FATTR4_WORD1_QUOTA_SOFT BIT(FATTR4_QUOTA_AVAIL_SOFT - 32)
+#define FATTR4_WORD1_QUOTA_USED BIT(FATTR4_QUOTA_USED - 32)
+#define FATTR4_WORD1_RAWDEV BIT(FATTR4_RAWDEV - 32)
+#define FATTR4_WORD1_SPACE_AVAIL BIT(FATTR4_SPACE_AVAIL - 32)
+#define FATTR4_WORD1_SPACE_FREE BIT(FATTR4_SPACE_FREE - 32)
+#define FATTR4_WORD1_SPACE_TOTAL BIT(FATTR4_SPACE_TOTAL - 32)
+#define FATTR4_WORD1_SPACE_USED BIT(FATTR4_SPACE_USED - 32)
+#define FATTR4_WORD1_SYSTEM BIT(FATTR4_SYSTEM - 32)
+#define FATTR4_WORD1_TIME_ACCESS BIT(FATTR4_TIME_ACCESS - 32)
+#define FATTR4_WORD1_TIME_ACCESS_SET BIT(FATTR4_TIME_ACCESS_SET - 32)
+#define FATTR4_WORD1_TIME_BACKUP BIT(FATTR4_TIME_BACKUP - 32)
+#define FATTR4_WORD1_TIME_CREATE BIT(FATTR4_TIME_CREATE - 32)
+#define FATTR4_WORD1_TIME_DELTA BIT(FATTR4_TIME_DELTA - 32)
+#define FATTR4_WORD1_TIME_METADATA BIT(FATTR4_TIME_METADATA - 32)
+#define FATTR4_WORD1_TIME_MODIFY BIT(FATTR4_TIME_MODIFY - 32)
+#define FATTR4_WORD1_TIME_MODIFY_SET BIT(FATTR4_TIME_MODIFY_SET - 32)
+#define FATTR4_WORD1_MOUNTED_ON_FILEID BIT(FATTR4_MOUNTED_ON_FILEID - 32)
+#define FATTR4_WORD1_DACL BIT(FATTR4_DACL - 32)
+#define FATTR4_WORD1_SACL BIT(FATTR4_SACL - 32)
+#define FATTR4_WORD1_FS_LAYOUT_TYPES BIT(FATTR4_FS_LAYOUT_TYPES - 32)
+
+#define FATTR4_WORD2_LAYOUT_TYPES BIT(FATTR4_LAYOUT_TYPES - 64)
+#define FATTR4_WORD2_LAYOUT_BLKSIZE BIT(FATTR4_LAYOUT_BLKSIZE - 64)
+#define FATTR4_WORD2_MDSTHRESHOLD BIT(FATTR4_MDSTHRESHOLD - 64)
+#define FATTR4_WORD2_CLONE_BLKSIZE BIT(FATTR4_CLONE_BLKSIZE - 64)
+#define FATTR4_WORD2_CHANGE_ATTR_TYPE BIT(FATTR4_CHANGE_ATTR_TYPE - 64)
+#define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64)
+#define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64)
+#define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
@@ -551,13 +674,13 @@ enum {
NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LAYOUTERROR,
-
NFSPROC4_CLNT_COPY_NOTIFY,
NFSPROC4_CLNT_GETXATTR,
NFSPROC4_CLNT_SETXATTR,
NFSPROC4_CLNT_LISTXATTRS,
NFSPROC4_CLNT_REMOVEXATTR,
+ NFSPROC4_CLNT_READ_PLUS,
};
/* nfs41 types */
@@ -717,6 +840,14 @@ struct nl4_server {
} u;
};
+enum nfs4_change_attr_type {
+ NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2,
+ NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3,
+ NFS4_CHANGE_TYPE_IS_UNDEFINED = 4,
+};
+
/*
* Options for setxattr. These match the flags for setxattr(2).
*/
@@ -725,4 +856,39 @@ enum nfs4_setxattr_options {
SETXATTR4_CREATE = 1,
SETXATTR4_REPLACE = 2,
};
+
+enum {
+ RCA4_TYPE_MASK_RDATA_DLG = 0,
+ RCA4_TYPE_MASK_WDATA_DLG = 1,
+ RCA4_TYPE_MASK_DIR_DLG = 2,
+ RCA4_TYPE_MASK_FILE_LAYOUT = 3,
+ RCA4_TYPE_MASK_BLK_LAYOUT = 4,
+ RCA4_TYPE_MASK_OBJ_LAYOUT_MIN = 8,
+ RCA4_TYPE_MASK_OBJ_LAYOUT_MAX = 9,
+ RCA4_TYPE_MASK_OTHER_LAYOUT_MIN = 12,
+ RCA4_TYPE_MASK_OTHER_LAYOUT_MAX = 15,
+};
+
+enum nfs_cb_opnum4 {
+ OP_CB_GETATTR = 3,
+ OP_CB_RECALL = 4,
+
+ /* Callback operations new to NFSv4.1 */
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
+
+ /* Callback operations new to NFSv4.2 */
+ OP_CB_OFFLOAD = 15,
+
+ OP_CB_ILLEGAL = 10044,
+};
+
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a2c6455ea3fa..d59116ac8209 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -31,6 +31,10 @@
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/clnt.h>
+#ifdef CONFIG_NFS_FSCACHE
+#include <linux/netfs.h>
+#endif
+
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs3.h>
@@ -41,9 +45,14 @@
#include <linux/mempool.h>
/*
- * These are the default flags for swap requests
+ * These are the default for number of transports to different server IPs
+ */
+#define NFS_MAX_TRANSPORTS 16
+
+/*
+ * Size of the NFS directory verifier
*/
-#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
+#define NFS_DIR_VERIFIER_SIZE 2
/*
* NFSv3/v4 Access mode cache entry
@@ -51,7 +60,10 @@
struct nfs_access_entry {
struct rb_node rb_node;
struct list_head lru;
- const struct cred * cred;
+ kuid_t fsuid;
+ kgid_t fsgid;
+ struct group_info *group_info;
+ u64 timestamp;
__u32 mask;
struct rcu_head rcu_head;
};
@@ -71,14 +83,14 @@ struct nfs_open_context {
fl_owner_t flock_owner;
struct dentry *dentry;
const struct cred *cred;
- struct rpc_cred *ll_cred; /* low-level cred - use to check for expiry */
+ struct rpc_cred __rcu *ll_cred; /* low-level cred - use to check for expiry */
struct nfs4_state *state;
fmode_t mode;
unsigned long flags;
-#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
+#define NFS_CONTEXT_FILE_OPEN (4)
int error;
struct list_head list;
@@ -88,11 +100,17 @@ struct nfs_open_context {
struct nfs_open_dir_context {
struct list_head list;
- const struct cred *cred;
+ atomic_t cache_hits;
+ atomic_t cache_misses;
unsigned long attr_gencount;
+ __be32 verf[NFS_DIR_VERIFIER_SIZE];
__u64 dir_cookie;
- __u64 dup_cookie;
- signed char duped;
+ __u64 last_cookie;
+ pgoff_t page_index;
+ unsigned int dtsize;
+ bool force_clear;
+ bool eof;
+ struct rcu_head rcu_head;
};
/*
@@ -143,35 +161,72 @@ struct nfs_inode {
unsigned long attrtimeo_timestamp;
unsigned long attr_gencount;
- /* "Generation counter" for the attribute cache. This is
- * bumped whenever we update the metadata on the
- * server.
- */
- unsigned long cache_change_attribute;
struct rb_root access_cache;
struct list_head access_cache_entry_lru;
struct list_head access_cache_inode_lru;
- /*
- * This is the cookie verifier used for NFSv3 readdir
- * operations
- */
- __be32 cookieverf[2];
-
- atomic_long_t nrequests;
- struct nfs_mds_commit_info commit_info;
+ union {
+ /* Directory */
+ struct {
+ /* "Generation counter" for the attribute cache.
+ * This is bumped whenever we update the metadata
+ * on the server.
+ */
+ unsigned long cache_change_attribute;
+ /*
+ * This is the cookie verifier used for NFSv3 readdir
+ * operations
+ */
+ __be32 cookieverf[NFS_DIR_VERIFIER_SIZE];
+ /* Readers: in-flight sillydelete RPC calls */
+ /* Writers: rmdir */
+ struct rw_semaphore rmdir_sem;
+ };
+ /* Regular file */
+ struct {
+ atomic_long_t nrequests;
+ atomic_long_t redirtied_pages;
+ struct nfs_mds_commit_info commit_info;
+ struct mutex commit_mutex;
+ };
+ };
/* Open contexts for shared mmap writes */
struct list_head open_files;
- /* Readers: in-flight sillydelete RPC calls */
- /* Writers: rmdir */
- struct rw_semaphore rmdir_sem;
- struct mutex commit_mutex;
-
- /* track last access to cached pages */
- unsigned long page_index;
+ /* Keep track of out-of-order replies.
+ * The ooo array contains start/end pairs of
+ * numbers from the changeid sequence when
+ * the inode's iversion has been updated.
+ * It also contains end/start pair (i.e. reverse order)
+ * of sections of the changeid sequence that have
+ * been seen in replies from the server.
+ * Normally these should match and when both
+ * A:B and B:A are found in ooo, they are both removed.
+ * And if a reply with A:B causes an iversion update
+ * of A:B, then neither are added.
+ * When a reply has pre_change that doesn't match
+ * iversion, then the changeid pair and any consequent
+ * change in iversion ARE added. Later replies
+ * might fill in the gaps, or possibly a gap is caused
+ * by a change from another client.
+ * When a file or directory is opened, if the ooo table
+ * is not empty, then we assume the gaps were due to
+ * another client and we invalidate the cached data.
+ *
+ * We can only track a limited number of concurrent gaps.
+ * Currently that limit is 16.
+ * We allocate the table on demand. If there is insufficient
+ * memory, then we probably cannot cache the file anyway
+ * so there is no loss.
+ */
+ struct {
+ int cnt;
+ struct {
+ u64 start, end;
+ } gap[16];
+ } *ooo;
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
@@ -186,14 +241,15 @@ struct nfs_inode {
/* how many bytes have been written/read and how many bytes queued up */
__u64 write_io;
__u64 read_io;
-#ifdef CONFIG_NFS_FSCACHE
- struct fscache_cookie *fscache;
-#endif
- struct inode vfs_inode;
-
#ifdef CONFIG_NFS_V4_2
struct nfs4_xattr_cache *xattr_cache;
#endif
+ union {
+ struct inode vfs_inode;
+#ifdef CONFIG_NFS_FSCACHE
+ struct netfs_inode netfs; /* netfs context and VFS inode */
+#endif
+ };
};
struct nfs4_copy_state {
@@ -229,7 +285,6 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */
#define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */
#define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */
-#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */
#define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */
#define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */
#define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */
@@ -241,22 +296,24 @@ struct nfs4_copy_state {
BIT(13) /* Deferred cache invalidation */
#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */
#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */
+#define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */
+#define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \
| NFS_INO_INVALID_MTIME \
| NFS_INO_INVALID_SIZE \
+ | NFS_INO_INVALID_NLINK \
+ | NFS_INO_INVALID_MODE \
| NFS_INO_INVALID_OTHER) /* inode metadata is invalid */
/*
* Bit offsets in flags field
*/
-#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */
#define NFS_INO_STALE (1) /* possible stale inode */
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
-#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
-#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
+#define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
@@ -309,15 +366,6 @@ static inline int NFS_STALE(const struct inode *inode)
return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
}
-static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
-{
-#ifdef CONFIG_NFS_FSCACHE
- return NFS_I(inode)->fscache;
-#else
- return NULL;
-#endif
-}
-
static inline __u64 NFS_FILEID(const struct inode *inode)
{
return NFS_I(inode)->fileid;
@@ -333,17 +381,15 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
- nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL
- | NFS_INO_INVALID_CHANGE
- | NFS_INO_INVALID_CTIME;
+ nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
+ NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME |
+ NFS_INO_INVALID_SIZE;
if (S_ISDIR(inode->i_mode))
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
}
-static inline int nfs_server_capable(struct inode *inode, int cap)
+static inline int nfs_server_capable(const struct inode *inode, int cap)
{
return NFS_SERVER(inode)->caps & cap;
}
@@ -368,27 +414,28 @@ extern void nfs_zap_caches(struct inode *);
extern void nfs_set_inode_stale(struct inode *inode);
extern void nfs_invalidate_atime(struct inode *);
extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
+ struct nfs_fattr *);
struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *);
extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
-extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
-extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
+extern int nfs_getattr(struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
+extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *, const struct cred *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
-extern int nfs_permission(struct inode *, int);
+extern int nfs_permission(struct mnt_idmap *, struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
extern int nfs_attribute_cache_expired(struct inode *inode);
-extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
+extern int nfs_revalidate_inode(struct inode *inode, unsigned long flags);
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
+extern int nfs_clear_invalid_mapping(struct address_space *mapping);
extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
-extern int nfs_setattr(struct dentry *, struct iattr *);
+extern int nfs_setattr(struct mnt_idmap *, struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
-extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
- struct nfs4_label *label);
+extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode);
@@ -404,9 +451,22 @@ extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
extern unsigned long nfs_inc_attr_generation_counter(void);
extern struct nfs_fattr *nfs_alloc_fattr(void);
+extern struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server);
+
+static inline void nfs4_label_free(struct nfs4_label *label)
+{
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ if (label) {
+ kfree(label->label);
+ kfree(label);
+ }
+#endif
+}
static inline void nfs_free_fattr(const struct nfs_fattr *fattr)
{
+ if (fattr)
+ nfs4_label_free(fattr->label);
kfree(fattr);
}
@@ -476,11 +536,11 @@ static inline const struct cred *nfs_file_cred(struct file *file)
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
- struct iov_iter *iter);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
- struct iov_iter *iter);
+int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t nfs_file_direct_read(struct kiocb *iocb,
+ struct iov_iter *iter, bool swap);
+ssize_t nfs_file_direct_write(struct kiocb *iocb,
+ struct iov_iter *iter, bool swap);
/*
* linux/fs/nfs/dir.c
@@ -494,14 +554,13 @@ extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf);
extern void nfs_clear_verifier_delegated(struct inode *inode);
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
- struct nfs_fh *fh, struct nfs_fattr *fattr,
- struct nfs4_label *label);
+ struct nfs_fh *fh, struct nfs_fattr *fattr);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
- struct nfs_fattr *fattr, struct nfs4_label *label);
+ struct nfs_fattr *fattr);
extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
-extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res,
- bool may_block);
+extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred,
+ u32 *mask, bool may_block);
/*
* linux/fs/nfs/symlink.c
@@ -536,10 +595,10 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
* linux/fs/nfs/write.c
*/
extern int nfs_congestion_kb;
-extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
-extern int nfs_flush_incompatible(struct file *file, struct page *page);
-extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
+extern int nfs_flush_incompatible(struct file *file, struct folio *folio);
+extern int nfs_update_folio(struct file *file, struct folio *folio,
+ unsigned int offset, unsigned int count);
/*
* Try to write back everything synchronously (but check the
@@ -547,26 +606,26 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
*/
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
-extern int nfs_wb_page(struct inode *inode, struct page *page);
-extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
+extern int nfs_wb_folio(struct inode *inode, struct folio *folio);
+int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
-extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
+extern struct nfs_commit_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_commit_data *data);
+void nfs_commit_begin(struct nfs_mds_commit_info *cinfo);
+bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
-static inline int
-nfs_have_writebacks(struct inode *inode)
+static inline bool nfs_have_writebacks(const struct inode *inode)
{
- return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
+ if (S_ISREG(inode->i_mode))
+ return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
+ return false;
}
/*
* linux/fs/nfs/read.c
*/
-extern int nfs_readpage(struct file *, struct page *);
-extern int nfs_readpages(struct file *, struct address_space *,
- struct list_head *, unsigned);
-extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
- struct page *);
+int nfs_read_folio(struct file *, struct folio *);
+void nfs_readahead(struct readahead_control *);
/*
* inline functions
@@ -586,8 +645,31 @@ nfs_fileid_to_ino_t(u64 fileid)
return ino;
}
+static inline void nfs_ooo_clear(struct nfs_inode *nfsi)
+{
+ nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER;
+ kfree(nfsi->ooo);
+ nfsi->ooo = NULL;
+}
+
+static inline bool nfs_ooo_test(struct nfs_inode *nfsi)
+{
+ return (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER) ||
+ (nfsi->ooo && nfsi->ooo->cnt > 0);
+
+}
+
#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
+/* We need to block new opens while a file is being unlinked.
+ * If it is opened *before* we decide to unlink, we will silly-rename
+ * instead. If it is opened *after*, then we need to create or will fail.
+ * If we allow the two to race, we could end up with a file that is open
+ * but deleted on the server resulting in ESTALE.
+ * So use ->d_fsdata to record when the unlink is happening
+ * and block dentry revalidation while it is set.
+ */
+#define NFS_FSDATA_BLOCKED ((void*)1)
# undef ifdebug
# ifdef NFS_DEBUG
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7eae72a8762e..92de074e63b9 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -48,6 +48,7 @@ struct nfs_client {
#define NFS_CS_NOPING 6 /* - don't ping on connect */
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
+#define NFS_CS_PNFS 9 /* - Server used for pnfs */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -62,7 +63,9 @@ struct nfs_client {
u32 cl_minorversion;/* NFSv4 minorversion */
unsigned int cl_nconnect; /* Number of connections */
- const char * cl_principal; /* used for machine cred */
+ unsigned int cl_max_connect; /* max number of xprts allowed */
+ const char * cl_principal; /* used for machine cred */
+ struct xprtsec_parms cl_xprtsec; /* xprt security policy */
#if IS_ENABLED(CONFIG_NFS_V4)
struct list_head cl_ds_clients; /* auth flavor data servers */
@@ -119,13 +122,9 @@ struct nfs_client {
* This is used to generate the mv0 callback address.
*/
char cl_ipaddr[48];
-
-#ifdef CONFIG_NFS_FSCACHE
- struct fscache_cookie *fscache; /* client index cache cookie */
-#endif
-
struct net *cl_net;
struct list_head pending_cb_stateids;
+ struct rcu_head rcu;
};
/*
@@ -142,7 +141,8 @@ struct nfs_server {
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
atomic_long_t writeback; /* number of writeback pages */
- int flags; /* various flags */
+ unsigned int write_congested;/* flag set when writeback gets too high */
+ unsigned int flags; /* various flags */
/* The following are for internal use only. Also see uapi/linux/nfs_mount.h */
#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
@@ -153,7 +153,12 @@ struct nfs_server {
#define NFS_MOUNT_LOCAL_FCNTL 0x200000
#define NFS_MOUNT_SOFTERR 0x400000
#define NFS_MOUNT_SOFTREVAL 0x800000
+#define NFS_MOUNT_WRITE_EAGER 0x01000000
+#define NFS_MOUNT_WRITE_WAIT 0x02000000
+#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
+#define NFS_MOUNT_SHUTDOWN 0x08000000
+ unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
unsigned int rsize; /* read size */
unsigned int rpages; /* read size (in pages) */
@@ -178,7 +183,11 @@ struct nfs_server {
#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
#define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */
+ enum nfs4_change_attr_type
+ change_attr_type;/* Description of change attribute */
+
struct nfs_fsid fsid;
+ int s_sysfs_id; /* sysfs dentry index */
__u64 maxfilesize; /* maximum file size */
struct timespec64 time_delta; /* smallest time granularity */
unsigned long mount_time; /* when this fs was mounted */
@@ -187,8 +196,8 @@ struct nfs_server {
struct nfs_auth_info auth_info; /* parsed auth flavors */
#ifdef CONFIG_NFS_FSCACHE
- struct nfs_fscache_key *fscache_key; /* unique key for superblock */
- struct fscache_cookie *fscache; /* superblock cookie */
+ struct fscache_volume *fscache; /* superblock cookie */
+ char *fscache_uniq; /* Uniquifier (or NULL) */
#endif
u32 pnfs_blksize; /* layout_blksize attr */
@@ -231,6 +240,7 @@ struct nfs_server {
struct list_head delegations;
struct list_head ss_copies;
+ unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;
#define NFS_MIG_IN_TRANSITION (1)
@@ -254,6 +264,9 @@ struct nfs_server {
/* User namespace info */
const struct cred *cred;
+ bool has_sec_mnt_opts;
+ struct kobject kobj;
+ struct rcu_head rcu;
};
/* Server capabilities */
@@ -262,16 +275,9 @@ struct nfs_server {
#define NFS_CAP_SYMLINKS (1U << 2)
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
-/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
#define NFS_CAP_LGOPEN (1U << 5)
-#define NFS_CAP_FILEID (1U << 6)
-#define NFS_CAP_MODE (1U << 7)
-#define NFS_CAP_NLINK (1U << 8)
-#define NFS_CAP_OWNER (1U << 9)
-#define NFS_CAP_OWNER_GROUP (1U << 10)
-#define NFS_CAP_ATIME (1U << 11)
-#define NFS_CAP_CTIME (1U << 12)
-#define NFS_CAP_MTIME (1U << 13)
+#define NFS_CAP_CASE_INSENSITIVE (1U << 6)
+#define NFS_CAP_CASE_PRESERVING (1U << 7)
#define NFS_CAP_POSIX_LOCK (1U << 14)
#define NFS_CAP_UIDGID_NOMAP (1U << 15)
#define NFS_CAP_STATEID_NFSV41 (1U << 16)
@@ -287,5 +293,7 @@ struct nfs_server {
#define NFS_CAP_LAYOUTERROR (1U << 26)
#define NFS_CAP_COPY_NOTIFY (1U << 27)
#define NFS_CAP_XATTR (1U << 28)
-
+#define NFS_CAP_READ_PLUS (1U << 29)
+#define NFS_CAP_FS_LOCATIONS (1U << 30)
+#define NFS_CAP_MOVEABLE (1U << 31)
#endif
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
index 027874c36c88..8d946089d151 100644
--- a/include/linux/nfs_iostat.h
+++ b/include/linux/nfs_iostat.h
@@ -119,16 +119,4 @@ enum nfs_stat_eventcounters {
__NFSIOS_COUNTSMAX,
};
-/*
- * NFS local caching servicing counters
- */
-enum nfs_stat_fscachecounters {
- NFSIOS_FSCACHE_PAGES_READ_OK,
- NFSIOS_FSCACHE_PAGES_READ_FAIL,
- NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
- NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
- NFSIOS_FSCACHE_PAGES_UNCACHED,
- __NFSIOS_FSCACHEMAX,
-};
-
#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index c32c15216da3..1c315f854ea8 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -25,6 +25,7 @@
enum {
PG_BUSY = 0, /* nfs_{un}lock_request */
PG_MAPPED, /* page private set for buffered io */
+ PG_FOLIO, /* Tracking a folio (unset for O_DIRECT) */
PG_CLEAN, /* write succeeded */
PG_COMMIT_TO_DS, /* used by pnfs layouts */
PG_INODE_REF, /* extra ref held by inode when in writeback */
@@ -41,7 +42,10 @@ enum {
struct nfs_inode;
struct nfs_page {
struct list_head wb_list; /* Defines state of page: */
- struct page *wb_page; /* page to read in/write out */
+ union {
+ struct page *wb_page; /* page to read in/write out */
+ struct folio *wb_folio;
+ };
struct nfs_lock_context *wb_lock_context; /* lock context info */
pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
unsigned int wb_offset, /* Offset & ~PAGE_MASK */
@@ -55,6 +59,7 @@ struct nfs_page {
unsigned short wb_nio; /* Number of I/O attempts */
};
+struct nfs_pgio_mirror;
struct nfs_pageio_descriptor;
struct nfs_pageio_ops {
void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *);
@@ -64,6 +69,9 @@ struct nfs_pageio_ops {
unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
struct nfs_page *);
void (*pg_cleanup)(struct nfs_pageio_descriptor *);
+ struct nfs_pgio_mirror *
+ (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32);
+ u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32);
};
struct nfs_rw_ops {
@@ -97,6 +105,9 @@ struct nfs_pageio_descriptor {
struct pnfs_layout_segment *pg_lseg;
struct nfs_io_completion *pg_io_completion;
struct nfs_direct_req *pg_dreq;
+#ifdef CONFIG_NFS_FSCACHE
+ void *pg_netfs;
+#endif
unsigned int pg_bsize; /* default bsize for mirrors */
u32 pg_mirror_count;
@@ -113,10 +124,15 @@ struct nfs_pageio_descriptor {
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
-extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
- struct page *page,
- unsigned int offset,
- unsigned int count);
+extern struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
+ struct page *page,
+ unsigned int pgbase,
+ loff_t offset,
+ unsigned int count);
+extern struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
+ struct folio *folio,
+ unsigned int offset,
+ unsigned int count);
extern void nfs_release_request(struct nfs_page *);
@@ -141,7 +157,9 @@ extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
-extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+extern void nfs_join_page_group(struct nfs_page *head,
+ struct nfs_commit_info *cinfo,
+ struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
@@ -149,6 +167,66 @@ extern int nfs_page_set_headlock(struct nfs_page *req);
extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
+/**
+ * nfs_page_to_folio - Retrieve a struct folio for the request
+ * @req: pointer to a struct nfs_page
+ *
+ * If a folio was assigned to @req, then return it, otherwise return NULL.
+ */
+static inline struct folio *nfs_page_to_folio(const struct nfs_page *req)
+{
+ if (test_bit(PG_FOLIO, &req->wb_flags))
+ return req->wb_folio;
+ return NULL;
+}
+
+/**
+ * nfs_page_to_page - Retrieve a struct page for the request
+ * @req: pointer to a struct nfs_page
+ * @pgbase: folio byte offset
+ *
+ * Return the page containing the byte that is at offset @pgbase relative
+ * to the start of the folio.
+ * Note: The request starts at offset @req->wb_pgbase.
+ */
+static inline struct page *nfs_page_to_page(const struct nfs_page *req,
+ size_t pgbase)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return req->wb_page;
+ return folio_page(folio, pgbase >> PAGE_SHIFT);
+}
+
+/**
+ * nfs_page_to_inode - Retrieve an inode for the request
+ * @req: pointer to a struct nfs_page
+ */
+static inline struct inode *nfs_page_to_inode(const struct nfs_page *req)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return page_file_mapping(req->wb_page)->host;
+ return folio_file_mapping(folio)->host;
+}
+
+/**
+ * nfs_page_max_length - Retrieve the maximum possible length for a request
+ * @req: pointer to a struct nfs_page
+ *
+ * Returns the maximum possible length of a request
+ */
+static inline size_t nfs_page_max_length(const struct nfs_page *req)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return PAGE_SIZE;
+ return folio_size(folio);
+}
+
/*
* Lock the page of an asynchronous request
*/
@@ -198,8 +276,7 @@ nfs_list_entry(struct list_head *head)
return list_entry(head, struct nfs_page, wb_list);
}
-static inline
-loff_t req_offset(struct nfs_page *req)
+static inline loff_t req_offset(const struct nfs_page *req)
{
return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
}
diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
new file mode 100644
index 000000000000..22265b1ff080
--- /dev/null
+++ b/include/linux/nfs_ssc.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/nfs_ssc.h
+ *
+ * Author: Dai Ngo <dai.ngo@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/sunrpc/svc.h>
+
+extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
+
+/*
+ * NFS_V4
+ */
+struct nfs4_ssc_client_ops {
+ struct file *(*sco_open)(struct vfsmount *ss_mnt,
+ struct nfs_fh *src_fh, nfs4_stateid *stateid);
+ void (*sco_close)(struct file *filep);
+};
+
+/*
+ * NFS_FS
+ */
+struct nfs_ssc_client_ops {
+ void (*sco_sb_deactive)(struct super_block *sb);
+};
+
+struct nfs_ssc_client_ops_tbl {
+ const struct nfs4_ssc_client_ops *ssc_nfs4_ops;
+ const struct nfs_ssc_client_ops *ssc_nfs_ops;
+};
+
+extern void nfs42_ssc_register_ops(void);
+extern void nfs42_ssc_unregister_ops(void);
+
+extern void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops);
+extern void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops);
+
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+static inline struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
+ struct nfs_fh *src_fh, nfs4_stateid *stateid)
+{
+ if (nfs_ssc_client_tbl.ssc_nfs4_ops)
+ return (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_open)(ss_mnt, src_fh, stateid);
+ return ERR_PTR(-EIO);
+}
+
+static inline void nfs42_ssc_close(struct file *filep)
+{
+ if (nfs_ssc_client_tbl.ssc_nfs4_ops)
+ (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
+}
+#endif
+
+struct nfsd4_ssc_umount_item {
+ struct list_head nsui_list;
+ bool nsui_busy;
+ /*
+ * nsui_refcnt inited to 2, 1 on list and 1 for consumer. Entry
+ * is removed when refcnt drops to 1 and nsui_expire expires.
+ */
+ refcount_t nsui_refcnt;
+ unsigned long nsui_expire;
+ struct vfsmount *nsui_vfsmount;
+ char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1];
+};
+
+/*
+ * NFS_FS
+ */
+extern void nfs_ssc_register(const struct nfs_ssc_client_ops *ops);
+extern void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops);
+
+static inline void nfs_do_sb_deactive(struct super_block *sb)
+{
+ if (nfs_ssc_client_tbl.ssc_nfs_ops)
+ (*nfs_ssc_client_tbl.ssc_nfs_ops->sco_sb_deactive)(sb);
+}
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 9408f3252c8e..d09b9773b20c 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -15,6 +15,8 @@
#define NFS_DEF_FILE_IO_SIZE (4096U)
#define NFS_MIN_FILE_IO_SIZE (1024U)
+#define NFS_BITMASK_SZ 3
+
struct nfs4_string {
unsigned int len;
char *data;
@@ -150,6 +152,8 @@ struct nfs_fsinfo {
__u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */
__u32 blksize; /* preferred pnfs io block size */
__u32 clone_blksize; /* granularity of a CLONE operation */
+ enum nfs4_change_attr_type
+ change_attr_type; /* Info about change attr */
__u32 xattr_support; /* User xattrs supported */
};
@@ -273,6 +277,7 @@ struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
const struct cred *cred;
+ struct pnfs_layout_hdr *lo;
gfp_t gfp_flags;
};
@@ -483,7 +488,6 @@ struct nfs_openres {
struct nfs4_change_info cinfo;
__u32 rflags;
struct nfs_fattr * f_attr;
- struct nfs4_label *f_label;
struct nfs_seqid * seqid;
const struct nfs_server *server;
fmode_t delegation_type;
@@ -526,6 +530,7 @@ struct nfs_closeargs {
fmode_t fmode;
u32 share_access;
const u32 * bitmask;
+ u32 bitmask_store[NFS_BITMASK_SZ];
struct nfs4_layoutreturn_args *lr_args;
};
@@ -608,7 +613,8 @@ struct nfs4_delegreturnargs {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
- const u32 * bitmask;
+ const u32 *bitmask;
+ u32 bitmask_store[NFS_BITMASK_SZ];
struct nfs4_layoutreturn_args *lr_args;
};
@@ -649,6 +655,7 @@ struct nfs_pgio_args {
unsigned int replen; /* used by read */
struct {
const u32 * bitmask; /* used by write */
+ u32 bitmask_store[NFS_BITMASK_SZ]; /* used by write */
enum nfs3_stable_how stable; /* used by write */
};
};
@@ -657,12 +664,13 @@ struct nfs_pgio_args {
struct nfs_pgio_res {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- __u32 count;
+ __u64 count;
__u32 op_status;
union {
struct {
unsigned int replen; /* used by read */
int eof; /* used by read */
+ void * scratch; /* used by read */
};
struct {
struct nfs_writeverf * verf; /* used by write */
@@ -738,18 +746,30 @@ struct nfs_auth_info {
*/
struct nfs_entry {
__u64 ino;
- __u64 cookie,
- prev_cookie;
+ __u64 cookie;
const char * name;
unsigned int len;
int eof;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
unsigned char d_type;
struct nfs_server * server;
};
+struct nfs_readdir_arg {
+ struct dentry *dentry;
+ const struct cred *cred;
+ __be32 *verf;
+ u64 cookie;
+ struct page **pages;
+ unsigned int page_len;
+ bool plus;
+};
+
+struct nfs_readdir_res {
+ __be32 *verf;
+};
+
/*
* The following types are for NFSv2 only.
*/
@@ -781,9 +801,17 @@ struct nfs_setattrargs {
const struct nfs4_label *label;
};
+enum nfs4_acl_type {
+ NFS4ACL_NONE = 0,
+ NFS4ACL_ACL,
+ NFS4ACL_DACL,
+ NFS4ACL_SACL,
+};
+
struct nfs_setaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -795,6 +823,7 @@ struct nfs_setaclres {
struct nfs_getaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -803,6 +832,7 @@ struct nfs_getaclargs {
#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */
struct nfs_getaclres {
struct nfs4_sequence_res seq_res;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
size_t acl_data_offset;
int acl_flags;
@@ -812,7 +842,6 @@ struct nfs_getaclres {
struct nfs_setattrres {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
const struct nfs_server * server;
};
@@ -1019,7 +1048,6 @@ struct nfs4_create_res {
const struct nfs_server * server;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info dir_cinfo;
};
@@ -1044,7 +1072,6 @@ struct nfs4_getattr_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
};
struct nfs4_link_arg {
@@ -1059,7 +1086,6 @@ struct nfs4_link_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
};
@@ -1076,7 +1102,6 @@ struct nfs4_lookup_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
- struct nfs4_label *label;
};
struct nfs4_lookupp_arg {
@@ -1090,7 +1115,6 @@ struct nfs4_lookupp_res {
const struct nfs_server *server;
struct nfs_fattr *fattr;
struct nfs_fh *fh;
- struct nfs4_label *label;
};
struct nfs4_lookup_root_arg {
@@ -1180,6 +1204,8 @@ struct nfs4_server_caps_res {
u32 has_links;
u32 has_symlinks;
u32 fh_expire_type;
+ u32 case_insensitive;
+ u32 case_preserving;
};
#define NFS4_PATHNAME_MAXCOMPONENTS 512
@@ -1197,7 +1223,7 @@ struct nfs4_fs_location {
#define NFS4_FS_LOCATIONS_MAXENTRIES 10
struct nfs4_fs_locations {
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
const struct nfs_server *server;
struct nfs4_pathname fs_path;
int nlocations;
@@ -1502,6 +1528,7 @@ struct nfs42_seek_res {
struct nfs42_setxattrargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh *fh;
+ const u32 *bitmask;
const char *xattr_name;
u32 xattr_flags;
size_t xattr_len;
@@ -1511,6 +1538,8 @@ struct nfs42_setxattrargs {
struct nfs42_setxattrres {
struct nfs4_sequence_res seq_res;
struct nfs4_change_info cinfo;
+ struct nfs_fattr *fattr;
+ const struct nfs_server *server;
};
struct nfs42_getxattrargs {
@@ -1575,6 +1604,7 @@ enum {
NFS_IOHDR_STAT,
NFS_IOHDR_RESEND_PNFS,
NFS_IOHDR_RESEND_MDS,
+ NFS_IOHDR_UNSTABLE_WRITES,
};
struct nfs_io_completion;
@@ -1593,6 +1623,9 @@ struct nfs_pgio_header {
const struct nfs_rw_ops *rw_ops;
struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq;
+#ifdef CONFIG_NFS_FSCACHE
+ void *netfs;
+#endif
int pnfs_error;
int error; /* merge with pnfs_error */
@@ -1611,8 +1644,8 @@ struct nfs_pgio_header {
__u64 mds_offset; /* Filelayout dense stripe */
struct nfs_page_array page_array;
struct nfs_client *ds_clp; /* pNFS data server */
- int ds_commit_idx; /* ds index if ds_clp is set */
- int pgio_mirror_idx;/* mirror index in pgio layer */
+ u32 ds_commit_idx; /* ds index if ds_clp is set */
+ u32 pgio_mirror_idx;/* mirror index in pgio layer */
};
struct nfs_mds_commit_info {
@@ -1679,6 +1712,7 @@ struct nfs_unlinkdata {
struct nfs_renamedata {
struct nfs_renameargs args;
struct nfs_renameres res;
+ struct rpc_task task;
const struct cred *cred;
struct inode *old_dir;
struct dentry *old_dentry;
@@ -1716,16 +1750,14 @@ struct nfs_rpc_ops {
int (*submount) (struct fs_context *, struct nfs_server *);
int (*try_get_tree) (struct fs_context *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *,
- struct inode *);
+ struct nfs_fattr *, struct inode *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
int (*lookup) (struct inode *, struct dentry *,
- struct nfs_fh *, struct nfs_fattr *,
- struct nfs4_label *);
+ struct nfs_fh *, struct nfs_fattr *);
int (*lookupp) (struct inode *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
- int (*access) (struct inode *, struct nfs_access_entry *);
+ struct nfs_fattr *);
+ int (*access) (struct inode *, struct nfs_access_entry *, const struct cred *);
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
int (*create) (struct inode *, struct dentry *,
@@ -1740,12 +1772,11 @@ struct nfs_rpc_ops {
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
int (*link) (struct inode *, struct inode *, const struct qstr *);
- int (*symlink) (struct inode *, struct dentry *, struct page *,
+ int (*symlink) (struct inode *, struct dentry *, struct folio *,
unsigned int, struct iattr *);
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
- int (*readdir) (struct dentry *, const struct cred *,
- u64, struct page **, unsigned int, bool);
+ int (*readdir) (struct nfs_readdir_arg *, struct nfs_readdir_res *);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
dev_t);
int (*statfs) (struct nfs_server *, struct nfs_fh *,
@@ -1784,16 +1815,12 @@ struct nfs_rpc_ops {
struct nfs_server *(*create_server)(struct fs_context *);
struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
+ int (*discover_trunking)(struct nfs_server *, struct nfs_fh *);
+ void (*enable_swap)(struct inode *inode);
+ void (*disable_swap)(struct inode *inode);
};
/*
- * NFS_CALL(getattr, inode, (fattr));
- * into
- * NFS_PROTO(inode)->getattr(fattr);
- */
-#define NFS_CALL(op, inode, args) NFS_PROTO(inode)->op args
-
-/*
* Function vectors etc. for the NFS client
*/
extern const struct nfs_rpc_ops nfs_v2_clientops;
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index 103d44695323..8e76a79cdc6a 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -38,5 +38,11 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
extern int
nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
struct posix_acl **pacl);
+extern bool
+nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt,
+ struct posix_acl **pacl);
+extern bool
+nfs_stream_encode_acl(struct xdr_stream *xdr, struct inode *inode,
+ struct posix_acl *acl, int encode_entries, int typeflag);
#endif /* __LINUX_NFSACL_H */
diff --git a/include/linux/nitro_enclaves.h b/include/linux/nitro_enclaves.h
new file mode 100644
index 000000000000..d91ef2bfdf47
--- /dev/null
+++ b/include/linux/nitro_enclaves.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef _LINUX_NITRO_ENCLAVES_H_
+#define _LINUX_NITRO_ENCLAVES_H_
+
+#include <uapi/linux/nitro_enclaves.h>
+
+#endif /* _LINUX_NITRO_ENCLAVES_H_ */
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index b22782225f27..cbe5fd1dd2e7 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -8,6 +8,8 @@
#ifndef NL802154_H
#define NL802154_H
+#include <net/netlink.h>
+
#define IEEE802154_NL_NAME "802.15.4 MAC"
#define IEEE802154_MCAST_COORD_NAME "coordinator"
#define IEEE802154_MCAST_BEACON_NAME "beacon"
diff --git a/include/linux/nls.h b/include/linux/nls.h
index 499e486b3722..e0bf8367b274 100644
--- a/include/linux/nls.h
+++ b/include/linux/nls.h
@@ -47,7 +47,7 @@ enum utf16_endian {
/* nls_base.c */
extern int __register_nls(struct nls_table *, struct module *);
extern int unregister_nls(struct nls_table *);
-extern struct nls_table *load_nls(char *);
+extern struct nls_table *load_nls(const char *charset);
extern void unload_nls(struct nls_table *);
extern struct nls_table *load_nls_default(void);
#define register_nls(nls) __register_nls((nls), THIS_MODULE)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 750c7f395ca9..f53438eae815 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -7,19 +7,19 @@
#include <linux/sched.h>
#include <asm/irq.h>
-#if defined(CONFIG_HAVE_NMI_WATCHDOG)
+
+/* Arch specific watchdogs might need to share extra watchdog-related APIs. */
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_ARCH) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
#include <asm/nmi.h>
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
+void lockup_detector_retry_init(void);
void lockup_detector_soft_poweroff(void);
void lockup_detector_cleanup(void);
-bool is_hardlockup(void);
extern int watchdog_user_enabled;
-extern int nmi_watchdog_user_enabled;
-extern int soft_watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
@@ -35,6 +35,7 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
#else /* CONFIG_LOCKUP_DETECTOR */
static inline void lockup_detector_init(void) { }
+static inline void lockup_detector_retry_init(void) { }
static inline void lockup_detector_soft_poweroff(void) { }
static inline void lockup_detector_cleanup(void) { }
#endif /* !CONFIG_LOCKUP_DETECTOR */
@@ -69,17 +70,17 @@ static inline void reset_hung_task_detector(void) { }
* 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
* bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
*
- * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
- * 'soft_watchdog_user_enabled' are variables that are only used as an
+ * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and
+ * 'watchdog_softlockup_user_enabled' are variables that are only used as an
* 'interface' between the parameters in /proc/sys/kernel and the internal
* state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
* handled differently because its value is not boolean, and the lockup
* detectors are 'suspended' while 'watchdog_thresh' is equal zero.
*/
-#define NMI_WATCHDOG_ENABLED_BIT 0
-#define SOFT_WATCHDOG_ENABLED_BIT 1
-#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
-#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+#define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0
+#define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1
+#define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT)
+#define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT)
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
@@ -88,50 +89,63 @@ extern unsigned int hardlockup_panic;
static inline void hardlockup_detector_disable(void) {}
#endif
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
-# define NMI_WATCHDOG_SYSCTL_PERM 0644
+/* Sparc64 has special implemetantion that is always enabled. */
+#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
+void arch_touch_nmi_watchdog(void);
#else
-# define NMI_WATCHDOG_SYSCTL_PERM 0444
+static inline void arch_touch_nmi_watchdog(void) { }
+#endif
+
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
+void watchdog_hardlockup_touch_cpu(unsigned int cpu);
+void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
-extern void arch_touch_nmi_watchdog(void);
extern void hardlockup_detector_perf_stop(void);
extern void hardlockup_detector_perf_restart(void);
-extern void hardlockup_detector_perf_disable(void);
-extern void hardlockup_detector_perf_enable(void);
extern void hardlockup_detector_perf_cleanup(void);
-extern int hardlockup_detector_perf_init(void);
#else
static inline void hardlockup_detector_perf_stop(void) { }
static inline void hardlockup_detector_perf_restart(void) { }
-static inline void hardlockup_detector_perf_disable(void) { }
-static inline void hardlockup_detector_perf_enable(void) { }
static inline void hardlockup_detector_perf_cleanup(void) { }
-# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
-static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
-static inline void arch_touch_nmi_watchdog(void) {}
-# else
-static inline int hardlockup_detector_perf_init(void) { return 0; }
-# endif
#endif
-void watchdog_nmi_stop(void);
-void watchdog_nmi_start(void);
-int watchdog_nmi_probe(void);
-int watchdog_nmi_enable(unsigned int cpu);
-void watchdog_nmi_disable(unsigned int cpu);
+void watchdog_hardlockup_stop(void);
+void watchdog_hardlockup_start(void);
+int watchdog_hardlockup_probe(void);
+void watchdog_hardlockup_enable(unsigned int cpu);
+void watchdog_hardlockup_disable(unsigned int cpu);
+
+void lockup_detector_reconfigure(void);
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY
+void watchdog_buddy_check_hardlockup(int hrtimer_interrupts);
+#else
+static inline void watchdog_buddy_check_hardlockup(int hrtimer_interrupts) {}
+#endif
/**
- * touch_nmi_watchdog - restart NMI watchdog timeout.
+ * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout.
*
- * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
- * may be used to reset the timeout - for code which intentionally
- * disables interrupts for a long time. This call is stateless.
+ * If we support detecting hardlockups, touch_nmi_watchdog() may be
+ * used to pet the watchdog (reset the timeout) - for code which
+ * intentionally disables interrupts for a long time. This call is stateless.
+ *
+ * Though this function has "nmi" in the name, the hardlockup watchdog might
+ * not be backed by NMIs. This function will likely be renamed to
+ * touch_hardlockup_watchdog() in the future.
*/
static inline void touch_nmi_watchdog(void)
{
+ /*
+ * Pass on to the hardlockup detector selected via CONFIG_. Note that
+ * the hardlockup detector may not be arch-specific nor using NMIs
+ * and the arch_touch_nmi_watchdog() function will likely be renamed
+ * in the future.
+ */
arch_touch_nmi_watchdog();
+
touch_softlockup_watchdog();
}
@@ -143,31 +157,31 @@ static inline void touch_nmi_watchdog(void)
#ifdef arch_trigger_cpumask_backtrace
static inline bool trigger_all_cpu_backtrace(void)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, false);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, -1);
return true;
}
-static inline bool trigger_allbutself_cpu_backtrace(void)
+static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, true);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu);
return true;
}
static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
{
- arch_trigger_cpumask_backtrace(mask, false);
+ arch_trigger_cpumask_backtrace(mask, -1);
return true;
}
static inline bool trigger_single_cpu_backtrace(int cpu)
{
- arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
+ arch_trigger_cpumask_backtrace(cpumask_of(cpu), -1);
return true;
}
/* generic implementation */
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
- bool exclude_self,
+ int exclude_cpu,
void (*raise)(cpumask_t *mask));
bool nmi_cpu_backtrace(struct pt_regs *regs);
@@ -176,7 +190,7 @@ static inline bool trigger_all_cpu_backtrace(void)
{
return false;
}
-static inline bool trigger_allbutself_cpu_backtrace(void)
+static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
return false;
}
@@ -192,24 +206,26 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh);
+bool arch_perf_nmi_is_available(void);
#endif
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
- defined(CONFIG_HARDLOCKUP_DETECTOR)
+ defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
void watchdog_update_hrtimer_threshold(u64 period);
#else
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
#endif
-struct ctl_table;
-int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
-int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
-int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
-int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
-
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
#include <asm/nmi.h>
#endif
+#ifdef CONFIG_NMI_CHECK_CPU
+void nmi_backtrace_stall_snap(const struct cpumask *btp);
+void nmi_backtrace_stall_check(const struct cpumask *btp);
+#else
+static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {}
+static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {}
+#endif
+
#endif
diff --git a/include/linux/node.h b/include/linux/node.h
index 4866f32a02d8..dfc004e4bee7 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -2,15 +2,15 @@
/*
* include/linux/node.h - generic node definition
*
- * This is mainly for topological representation. We define the
- * basic 'struct node' here, which can be embedded in per-arch
+ * This is mainly for topological representation. We define the
+ * basic 'struct node' here, which can be embedded in per-arch
* definitions of processors.
*
* Basic handling of the devices is done in drivers/base/node.c
- * and system devices are handled in drivers/base/sys.c.
+ * and system devices are handled in drivers/base/sys.c.
*
* Nodes are exported via driverfs in the class/node/devices/
- * directory.
+ * directory.
*/
#ifndef _LINUX_NODE_H_
#define _LINUX_NODE_H_
@@ -18,23 +18,34 @@
#include <linux/device.h>
#include <linux/cpumask.h>
#include <linux/list.h>
-#include <linux/workqueue.h>
/**
- * struct node_hmem_attrs - heterogeneous memory performance attributes
+ * struct access_coordinate - generic performance coordinates container
*
* @read_bandwidth: Read bandwidth in MB/s
* @write_bandwidth: Write bandwidth in MB/s
* @read_latency: Read latency in nanoseconds
* @write_latency: Write latency in nanoseconds
*/
-struct node_hmem_attrs {
+struct access_coordinate {
unsigned int read_bandwidth;
unsigned int write_bandwidth;
unsigned int read_latency;
unsigned int write_latency;
};
+/*
+ * ACCESS_COORDINATE_LOCAL correlates to ACCESS CLASS 0
+ * - access_coordinate between target node and nearest initiator node
+ * ACCESS_COORDINATE_CPU correlates to ACCESS CLASS 1
+ * - access_coordinate between target node and nearest CPU node
+ */
+enum access_coordinate_class {
+ ACCESS_COORDINATE_LOCAL,
+ ACCESS_COORDINATE_CPU,
+ ACCESS_COORDINATE_MAX
+};
+
enum cache_indexing {
NODE_CACHE_DIRECT_MAP,
NODE_CACHE_INDEXED,
@@ -66,8 +77,8 @@ struct node_cache_attrs {
#ifdef CONFIG_HMEM_REPORTING
void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
- unsigned access);
+void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access);
#else
static inline void node_add_cache(unsigned int nid,
struct node_cache_attrs *cache_attrs)
@@ -75,8 +86,8 @@ static inline void node_add_cache(unsigned int nid,
}
static inline void node_set_perf_attrs(unsigned int nid,
- struct node_hmem_attrs *hmem_attrs,
- unsigned access)
+ struct access_coordinate *coord,
+ enum access_coordinate_class access)
{
}
#endif
@@ -84,10 +95,6 @@ static inline void node_set_perf_attrs(unsigned int nid,
struct node {
struct device dev;
struct list_head access_list;
-
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
- struct work_struct node_work;
-#endif
#ifdef CONFIG_HMEM_REPORTING
struct list_head cache_attrs;
struct device *cache_dev;
@@ -96,21 +103,22 @@ struct node {
struct memory_block;
extern struct node *node_devices[];
-typedef void (*node_registration_func_t)(struct node *);
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
-extern int link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
+void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context);
#else
-static inline int link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long end_pfn)
+static inline void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context)
{
- return 0;
}
#endif
extern void unregister_node(struct node *node);
#ifdef CONFIG_NUMA
+extern void node_dev_init(void);
/* Core of the node registration - only memory hotplug should use this */
extern int __register_one_node(int nid);
@@ -127,8 +135,8 @@ static inline int register_one_node(int nid)
error = __register_one_node(nid);
if (error)
return error;
- /* link memory sections under this node */
- error = link_mem_sections(nid, start_pfn, end_pfn);
+ register_memory_blocks_under_node(nid, start_pfn, end_pfn,
+ MEMINIT_EARLY);
}
return error;
@@ -141,13 +149,11 @@ extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
extern int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
- unsigned access);
-
-#ifdef CONFIG_HUGETLBFS
-extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister);
-#endif
+ enum access_coordinate_class access);
#else
+static inline void node_dev_init(void)
+{
+}
static inline int __register_one_node(int nid)
{
return 0;
@@ -171,11 +177,6 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
}
-
-static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
- node_registration_func_t unreg)
-{
-}
#endif
#define to_node(device) container_of(device, struct node, dev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 27e7fa36f707..b61438313a73 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -42,11 +42,11 @@
* void nodes_shift_right(dst, src, n) Shift right
* void nodes_shift_left(dst, src, n) Shift left
*
- * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
- * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
- * int next_node_in(node, mask) Next node past 'node', or wrap to first,
+ * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES
+ * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
* or MAX_NUMNODES
- * int first_unset_node(mask) First node not set in mask, or
+ * unsigned int first_unset_node(mask) First node not set in mask, or
* MAX_NUMNODES
*
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
@@ -90,12 +90,13 @@
* for such situations. See below and CPUMASK_ALLOC also.
*/
-#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
+#include <linux/minmax.h>
+#include <linux/nodemask_types.h>
#include <linux/numa.h>
+#include <linux/random.h>
-typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
/**
@@ -119,7 +120,7 @@ static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
* The inline keyword gives the compiler room to decide to inline, or
* not inline a function as it sees best. However, as these functions
* are called in both __init and non-__init functions, if they are not
- * inlined we will end up with a section mis-match error (of the type of
+ * inlined we will end up with a section mismatch error (of the type of
* freeable items not being freed). So we must use __always_inline here
* to fix the problem. If other functions in the future also end up in
* this situation they will also need to be annotated as __always_inline
@@ -153,7 +154,7 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
#define node_test_and_set(node, nodemask) \
__node_test_and_set((node), &(nodemask))
-static inline int __node_test_and_set(int node, nodemask_t *addr)
+static inline bool __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
@@ -200,7 +201,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_equal(const nodemask_t *src1p,
+static inline bool __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -208,7 +209,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_intersects(const nodemask_t *src1p,
+static inline bool __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -216,20 +217,20 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_subset(const nodemask_t *src1p,
+static inline bool __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
@@ -260,15 +261,15 @@ static inline void __nodes_shift_left(nodemask_t *dstp,
> MAX_NUMNODES, then the silly min_ts could be dropped. */
#define first_node(src) __first_node(&(src))
-static inline int __first_node(const nodemask_t *srcp)
+static inline unsigned int __first_node(const nodemask_t *srcp)
{
- return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
+ return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
-static inline int __next_node(int n, const nodemask_t *srcp)
+static inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
- return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+ return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
/*
@@ -276,7 +277,14 @@ static inline int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-int __next_node_in(int node, const nodemask_t *srcp);
+static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
+{
+ unsigned int ret = __next_node(node, srcp);
+
+ if (ret == MAX_NUMNODES)
+ ret = __first_node(srcp);
+ return ret;
+}
static inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
@@ -296,9 +304,9 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
})
#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline int __first_unset_node(const nodemask_t *maskp)
+static inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
- return min_t(int,MAX_NUMNODES,
+ return min_t(unsigned int, MAX_NUMNODES,
find_first_zero_bit(maskp->bits, MAX_NUMNODES));
}
@@ -375,14 +383,13 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
}
#if MAX_NUMNODES > 1
-#define for_each_node_mask(node, mask) \
- for ((node) = first_node(mask); \
- (node) < MAX_NUMNODES; \
- (node) = next_node((node), (mask)))
+#define for_each_node_mask(node, mask) \
+ for ((node) = first_node(mask); \
+ (node) < MAX_NUMNODES; \
+ (node) = next_node((node), (mask)))
#else /* MAX_NUMNODES == 1 */
-#define for_each_node_mask(node, mask) \
- if (!nodes_empty(mask)) \
- for ((node) = 0; (node) < 1; (node)++)
+#define for_each_node_mask(node, mask) \
+ for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++)
#endif /* MAX_NUMNODES */
/*
@@ -399,6 +406,7 @@ enum node_states {
#endif
N_MEMORY, /* The node has memory(regular, high, movable) */
N_CPU, /* The node has one or more cpus */
+ N_GENERIC_INITIATOR, /* The node has one or more Generic Initiators */
NR_NODE_STATES
};
@@ -435,11 +443,11 @@ static inline int num_node_state(enum node_states state)
#define first_online_node first_node(node_states[N_ONLINE])
#define first_memory_node first_node(node_states[N_MEMORY])
-static inline int next_online_node(int nid)
+static inline unsigned int next_online_node(int nid)
{
return next_node(nid, node_states[N_ONLINE]);
}
-static inline int next_memory_node(int nid)
+static inline unsigned int next_memory_node(int nid)
{
return next_node(nid, node_states[N_MEMORY]);
}
@@ -485,6 +493,7 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define first_memory_node 0
#define next_online_node(nid) (MAX_NUMNODES)
+#define next_memory_node(nid) (MAX_NUMNODES)
#define nr_node_ids 1U
#define nr_online_nodes 1U
@@ -493,14 +502,28 @@ static inline int num_node_state(enum node_states state)
#endif
+static inline int node_random(const nodemask_t *maskp)
+{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
-extern int node_random(const nodemask_t *maskp);
+ int w, bit;
+
+ w = nodes_weight(*maskp);
+ switch (w) {
+ case 0:
+ bit = NUMA_NO_NODE;
+ break;
+ case 1:
+ bit = first_node(*maskp);
+ break;
+ default:
+ bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w));
+ break;
+ }
+ return bit;
#else
-static inline int node_random(const nodemask_t *mask)
-{
return 0;
-}
#endif
+}
#define node_online_map node_states[N_ONLINE]
#define node_possible_map node_states[N_POSSIBLE]
@@ -514,7 +537,7 @@ static inline int node_random(const nodemask_t *mask)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
- * For nodemask scrach area.
+ * For nodemask scratch area.
* NODEMASK_ALLOC(type, name) allocates an object with a specified type and
* name.
*/
@@ -527,7 +550,7 @@ static inline int node_random(const nodemask_t *mask)
#define NODEMASK_FREE(m) do {} while (0)
#endif
-/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
+/* Example structure for using NODEMASK_ALLOC, used in mempolicy. */
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
diff --git a/include/linux/nodemask_types.h b/include/linux/nodemask_types.h
new file mode 100644
index 000000000000..6b28d97ea6ed
--- /dev/null
+++ b/include/linux/nodemask_types.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_NODEMASK_TYPES_H
+#define __LINUX_NODEMASK_TYPES_H
+
+#include <linux/bitops.h>
+#include <linux/numa.h>
+
+typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
+
+#endif /* __LINUX_NODEMASK_TYPES_H */
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index c1e79f72cd89..9f0af4f116d9 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -11,6 +11,10 @@
struct task_struct;
+#ifndef barrier_nospec
+# define barrier_nospec() do { } while (0)
+#endif
+
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 018947611483..45702bdcbceb 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -73,6 +73,7 @@ struct raw_notifier_head {
struct srcu_notifier_head {
struct mutex mutex;
+ struct srcu_usage srcuu;
struct srcu_struct srcu;
struct notifier_block __rcu *head;
};
@@ -107,7 +108,8 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
{ \
.mutex = __MUTEX_INITIALIZER(name.mutex), \
.head = NULL, \
- .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
+ .srcuu = __SRCU_USAGE_INIT(name.srcuu), \
+ .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \
}
#define ATOMIC_NOTIFIER_HEAD(name) \
@@ -150,6 +152,11 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);
+extern int atomic_notifier_chain_register_unique_prio(
+ struct atomic_notifier_head *nh, struct notifier_block *nb);
+extern int blocking_notifier_chain_register_unique_prio(
+ struct blocking_notifier_head *nh, struct notifier_block *nb);
+
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
@@ -161,20 +168,19 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v);
-extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v);
-extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v);
-extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);
-extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
+
+extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
+ unsigned long val_up, unsigned long val_down, void *v);
+extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
+ unsigned long val_up, unsigned long val_down, void *v);
+
+extern bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh);
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
index 5fbc4000358f..7d22ea50b098 100644
--- a/include/linux/ns_common.h
+++ b/include/linux/ns_common.h
@@ -2,12 +2,15 @@
#ifndef _LINUX_NS_COMMON_H
#define _LINUX_NS_COMMON_H
+#include <linux/refcount.h>
+
struct proc_ns_operations;
struct ns_common {
- atomic_long_t stashed;
+ struct dentry *stashed;
const struct proc_ns_operations *ops;
unsigned int inum;
+ refcount_t count;
};
#endif
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index cdb171efc7cb..5601d14e2886 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_NSPROXY_H
#define _LINUX_NSPROXY_H
+#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
@@ -29,7 +30,7 @@ struct fs_struct;
* nsproxy is copied.
*/
struct nsproxy {
- atomic_t count;
+ refcount_t count;
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
@@ -94,6 +95,7 @@ static inline struct cred *nsset_cred(struct nsset *set)
int copy_namespaces(unsigned long flags, struct task_struct *tsk);
void exit_task_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
+int exec_task_namespaces(void);
void free_nsproxy(struct nsproxy *ns);
int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
struct cred *, struct fs_struct *);
@@ -101,14 +103,13 @@ int __init nsproxy_cache_init(void);
static inline void put_nsproxy(struct nsproxy *ns)
{
- if (atomic_dec_and_test(&ns->count)) {
+ if (refcount_dec_and_test(&ns->count))
free_nsproxy(ns);
- }
}
static inline void get_nsproxy(struct nsproxy *ns)
{
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
}
#endif
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index eba50b057f6f..4d103ac8f5c7 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -86,13 +86,12 @@ extern struct list_head nubus_func_rsrcs;
struct nubus_driver {
struct device_driver driver;
int (*probe)(struct nubus_board *board);
- int (*remove)(struct nubus_board *board);
+ void (*remove)(struct nubus_board *board);
};
-extern struct bus_type nubus_bus_type;
-
/* Generic NuBus interface functions, modelled after the PCI interface */
#ifdef CONFIG_PROC_FS
+extern bool nubus_populate_procfs;
void nubus_proc_init(void);
struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
diff --git a/include/linux/numa.h b/include/linux/numa.h
index a42df804679e..915033a75731 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NUMA_H
#define _LINUX_NUMA_H
+#include <linux/init.h>
#include <linux/types.h>
#ifdef CONFIG_NODES_SHIFT
@@ -12,6 +13,7 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
#define NUMA_NO_NODE (-1)
+#define NUMA_NO_MEMBLK (-1)
/* optionally keep NUMA memory info available post init */
#ifdef CONFIG_NUMA_KEEP_MEMINFO
@@ -21,24 +23,46 @@
#endif
#ifdef CONFIG_NUMA
+#include <asm/sparsemem.h>
+
/* Generic implementation available */
-int numa_map_to_online_node(int node);
+int numa_nearest_node(int node, unsigned int state);
-/*
- * Optional architecture specific implementation, users need a "depends
- * on $ARCH"
- */
-int phys_to_target_node(phys_addr_t addr);
-#else
-static inline int numa_map_to_online_node(int node)
+#ifndef memory_add_physaddr_to_nid
+int memory_add_physaddr_to_nid(u64 start);
+#endif
+
+#ifndef phys_to_target_node
+int phys_to_target_node(u64 start);
+#endif
+
+#ifndef numa_fill_memblks
+static inline int __init numa_fill_memblks(u64 start, u64 end)
{
- return NUMA_NO_NODE;
+ return NUMA_NO_MEMBLK;
}
+#endif
-static inline int phys_to_target_node(phys_addr_t addr)
+#else /* !CONFIG_NUMA */
+static inline int numa_nearest_node(int node, unsigned int state)
{
return NUMA_NO_NODE;
}
+
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ return 0;
+}
+static inline int phys_to_target_node(u64 start)
+{
+ return 0;
+}
+#endif
+
+#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
+
+#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+extern const struct attribute_group arch_node_dev_group;
#endif
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
new file mode 100644
index 000000000000..c1d0bc5d9624
--- /dev/null
+++ b/include/linux/nvme-auth.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions
+ */
+
+#ifndef _NVME_AUTH_H
+#define _NVME_AUTH_H
+
+#include <crypto/kpp.h>
+
+struct nvme_dhchap_key {
+ size_t len;
+ u8 hash;
+ u8 key[];
+};
+
+u32 nvme_auth_get_seqnum(void);
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id);
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id);
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name);
+
+const char *nvme_auth_hmac_name(u8 hmac_id);
+const char *nvme_auth_digest_name(u8 hmac_id);
+size_t nvme_auth_hmac_hash_len(u8 hmac_id);
+u8 nvme_auth_hmac_id(const char *hmac_name);
+
+u32 nvme_auth_key_struct_size(u32 key_len);
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash);
+void nvme_auth_free_key(struct nvme_dhchap_key *key);
+struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash);
+struct nvme_dhchap_key *nvme_auth_transform_key(
+ struct nvme_dhchap_key *key, char *nqn);
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen);
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid);
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len);
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len);
+
+#endif /* _NVME_AUTH_H */
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 2a38f2b477a5..4109f1bd6128 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -7,6 +7,7 @@
#define _NVME_FC_DRIVER_H 1
#include <linux/scatterlist.h>
+#include <linux/blk-mq.h>
/*
@@ -184,7 +185,6 @@ enum nvmefc_fcp_datadir {
* @first_sgl: memory for 1st scatter/gather list segment for payload data
* @sg_cnt: number of elements in the scatter/gather list
* @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
- * @sqid: The nvme SQID the command is being issued on
* @done: The callback routine the LLDD is to invoke upon completion of
* the FCP operation. req argument is the pointer to the original
* FCP IO operation.
@@ -193,12 +193,13 @@ enum nvmefc_fcp_datadir {
* while processing the operation. The length of the buffer
* corresponds to the fcprqst_priv_sz value specified in the
* nvme_fc_port_template supplied by the LLDD.
+ * @sqid: The nvme SQID the command is being issued on
*
* Values set by the LLDD indicating completion status of the FCP operation.
* Must be set prior to calling the done() callback.
+ * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @transferred_length: amount of payload data, in bytes, that were
* transferred. Should equal payload_length on success.
- * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @status: Completion status of the FCP operation. must be 0 upon success,
* negative errno value upon failure (ex: -EIO). Note: this is
* NOT a reflection of the NVME CQE completion status. Only the
@@ -218,14 +219,14 @@ struct nvmefc_fcp_req {
int sg_cnt;
enum nvmefc_fcp_datadir io_dir;
- __le16 sqid;
-
void (*done)(struct nvmefc_fcp_req *req);
void *private;
- u32 transferred_length;
+ __le16 sqid;
+
u16 rcv_rsplen;
+ u32 transferred_length;
u32 status;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
@@ -497,6 +498,8 @@ struct nvme_fc_port_template {
int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *rport,
struct nvmefc_ls_rsp *ls_rsp);
+ void (*map_queues)(struct nvme_fc_local_port *localport,
+ struct blk_mq_queue_map *map);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -561,6 +564,15 @@ int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *remoteport,
void *lsreqbuf, u32 lsreqbuf_len);
+/*
+ * Routine called to get the appid field associated with request by the lldd
+ *
+ * If the return value is NULL : the user/libvirt has not set the appid to VM
+ * If the return value is non-zero: Returns the appid associated with VM
+ *
+ * @req: IO request from nvme fc to driver
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req);
/*
* *************** LLDD FC-NVME Target/Subsystem API ***************
@@ -718,7 +730,7 @@ enum {
*
* Fields with static values for the port. Initialized by the
* port_info struct supplied to the registration call.
- * @port_num: NVME-FC transport subsytem port number
+ * @port_num: NVME-FC transport subsystem port number
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @private: pointer to memory allocated alongside the local port
@@ -779,6 +791,10 @@ struct nvmet_fc_target_port {
* LS received.
* Entrypoint is Mandatory.
*
+ * @map_queues: This functions lets the driver expose the queue mapping
+ * to the block layer.
+ * Entrypoint is Optional.
+ *
* @fcp_op: Called to perform a data transfer or transmit a response.
* The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
* exchange structure specified in the nvmet_fc_rcv_fcp_req() call
@@ -1041,5 +1057,10 @@ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
+/*
+ * add a define, visible to the compiler, that indicates support
+ * for feature. Allows for conditional compilation in LLDDs.
+ */
+#define NVME_FC_FEAT_UUID 0x0001
#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h
new file mode 100644
index 000000000000..e10333d78dbb
--- /dev/null
+++ b/include/linux/nvme-keyring.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Hannes Reinecke, SUSE Labs
+ */
+
+#ifndef _NVME_KEYRING_H
+#define _NVME_KEYRING_H
+
+#if IS_ENABLED(CONFIG_NVME_KEYRING)
+
+key_serial_t nvme_tls_psk_default(struct key *keyring,
+ const char *hostnqn, const char *subnqn);
+
+key_serial_t nvme_keyring_id(void);
+
+#else
+
+static inline key_serial_t nvme_tls_psk_default(struct key *keyring,
+ const char *hostnqn, const char *subnqn)
+{
+ return 0;
+}
+static inline key_serial_t nvme_keyring_id(void)
+{
+ return 0;
+}
+#endif /* !CONFIG_NVME_KEYRING */
+#endif /* _NVME_KEYRING_H */
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index 3ec8e50efa16..eb2f04d636c8 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -6,6 +6,12 @@
#ifndef _LINUX_NVME_RDMA_H
#define _LINUX_NVME_RDMA_H
+#define NVME_RDMA_IP_PORT 4420
+
+#define NVME_RDMA_MAX_QUEUE_SIZE 256
+#define NVME_RDMA_MAX_METADATA_QUEUE_SIZE 128
+#define NVME_RDMA_DEFAULT_QUEUE_SIZE 128
+
enum nvme_rdma_cm_fmt {
NVME_RDMA_CM_FMT_1_0 = 0x0,
};
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 959e0bd9a913..e07e8978d691 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -12,11 +12,18 @@
#define NVME_TCP_DISC_PORT 8009
#define NVME_TCP_ADMIN_CCSZ SZ_8K
#define NVME_TCP_DIGEST_LENGTH 4
+#define NVME_TCP_MIN_MAXH2CDATA 4096
enum nvme_tcp_pfv {
NVME_TCP_PFV_1_0 = 0x0,
};
+enum nvme_tcp_tls_cipher {
+ NVME_TCP_TLS_CIPHER_INVALID = 0,
+ NVME_TCP_TLS_CIPHER_SHA256 = 1,
+ NVME_TCP_TLS_CIPHER_SHA384 = 2,
+};
+
enum nvme_tcp_fatal_error_status {
NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
@@ -114,8 +121,9 @@ struct nvme_tcp_icresp_pdu {
struct nvme_tcp_term_pdu {
struct nvme_tcp_hdr hdr;
__le16 fes;
- __le32 fei;
- __u8 rsvd[8];
+ __le16 feil;
+ __le16 feiu;
+ __u8 rsvd[10];
};
/**
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index d92535997687..425573202295 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -7,6 +7,7 @@
#ifndef _LINUX_NVME_H
#define _LINUX_NVME_H
+#include <linux/bits.h>
#include <linux/types.h>
#include <linux/uuid.h>
@@ -22,13 +23,29 @@
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
-#define NVME_RDMA_IP_PORT 4420
-
#define NVME_NSID_ALL 0xffffffff
enum nvme_subsys_type {
- NVME_NQN_DISC = 1, /* Discovery type target subsystem */
- NVME_NQN_NVME = 2, /* NVME type target subsystem */
+ /* Referral to another discovery type target subsystem */
+ NVME_NQN_DISC = 1,
+
+ /* NVME type target subsystem */
+ NVME_NQN_NVME = 2,
+
+ /* Current discovery type target subsystem */
+ NVME_NQN_CURR = 3,
+};
+
+enum nvme_ctrl_type {
+ NVME_CTRL_IO = 1, /* I/O controller */
+ NVME_CTRL_DISC = 2, /* Discovery controller */
+ NVME_CTRL_ADMIN = 3, /* Administrative controller */
+};
+
+enum nvme_dctype {
+ NVME_DCTYPE_NOT_REPORTED = 0,
+ NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */
+ NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */
};
/* Address Family codes for Discovery Log Page entry ADRFAM field */
@@ -88,6 +105,13 @@ enum {
NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
};
+/* TSAS SECTYPE for TCP transport */
+enum {
+ NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
+ NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+ NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+};
+
#define NVME_AQ_DEPTH 32
#define NVME_NR_AEN_COMMANDS 1
#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
@@ -116,6 +140,10 @@ enum {
NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
* Location
*/
+ NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory
+ * Space Control
+ */
+ NVME_REG_CRTO = 0x0068, /* Controller Ready Timeouts */
NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
@@ -135,10 +163,14 @@ enum {
#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1)
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
+#define NVME_CRTO_CRIMT(crto) ((crto) >> 16)
+#define NVME_CRTO_CRWMT(crto) ((crto) & 0xffff)
+
enum {
NVME_CMBSZ_SQS = 1 << 0,
NVME_CMBSZ_CQS = 1 << 1,
@@ -182,8 +214,10 @@ enum {
NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
- NVME_CAP_CSS_NVM = 1 << 0,
- NVME_CAP_CSS_CSI = 1 << 6,
+ NVME_CC_CRIME = 1 << 24,
+};
+
+enum {
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
NVME_CSTS_NSSRO = 1 << 4,
@@ -194,6 +228,21 @@ enum {
NVME_CSTS_SHST_MASK = 3 << 2,
};
+enum {
+ NVME_CMBMSC_CRE = 1 << 0,
+ NVME_CMBMSC_CMSE = 1 << 1,
+};
+
+enum {
+ NVME_CAP_CSS_NVM = 1 << 0,
+ NVME_CAP_CSS_CSI = 1 << 6,
+};
+
+enum {
+ NVME_CAP_CRMS_CRWMS = 1ULL << 59,
+ NVME_CAP_CRMS_CRIMS = 1ULL << 60,
+};
+
struct nvme_id_power_state {
__le16 max_power; /* centiwatts */
__u8 rsvd2;
@@ -220,6 +269,7 @@ enum {
enum nvme_ctrl_attr {
NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
NVME_CTRL_ATTR_TBKAS = (1 << 6),
+ NVME_CTRL_ATTR_ELBAS = (1 << 15),
};
struct nvme_id_ctrl {
@@ -238,7 +288,9 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
- __u8 rsvd100[28];
+ __u8 rsvd100[11];
+ __u8 cntrltype;
+ __u8 fguid[16];
__le16 crdt1;
__le16 crdt2;
__le16 crdt3;
@@ -300,12 +352,15 @@ struct nvme_id_ctrl {
__le16 icdoff;
__u8 ctrattr;
__u8 msdbd;
- __u8 rsvd1804[244];
+ __u8 rsvd1804[2];
+ __u8 dctype;
+ __u8 rsvd1807[241];
struct nvme_id_power_state psd[32];
__u8 vs[1024];
};
enum {
+ NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
NVME_CTRL_CMIC_ANA = 1 << 3,
NVME_CTRL_ONCS_COMPARE = 1 << 0,
@@ -316,6 +371,7 @@ enum {
NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
+ NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
@@ -370,11 +426,25 @@ struct nvme_id_ns {
__le16 endgid;
__u8 nguid[16];
__u8 eui64[8];
- struct nvme_lbaf lbaf[16];
- __u8 rsvd192[192];
+ struct nvme_lbaf lbaf[64];
__u8 vs[3712];
};
+/* I/O Command Set Independent Identify Namespace Data Structure */
+struct nvme_id_ns_cs_indep {
+ __u8 nsfeat;
+ __u8 nmic;
+ __u8 rescap;
+ __u8 fpi;
+ __le32 anagrpid;
+ __u8 nsattr;
+ __u8 rsvd9;
+ __le16 nvmsetid;
+ __le16 endgid;
+ __u8 nstat;
+ __u8 rsvd15[4081];
+};
+
struct nvme_zns_lbafe {
__le64 zsze;
__u8 zdes;
@@ -389,8 +459,7 @@ struct nvme_id_ns_zns {
__le32 rrl;
__le32 frl;
__u8 rsvd20[2796];
- struct nvme_zns_lbafe lbafe[16];
- __u8 rsvd3072[768];
+ struct nvme_zns_lbafe lbafe[64];
__u8 vs[256];
};
@@ -399,6 +468,40 @@ struct nvme_id_ctrl_zns {
__u8 rsvd1[4095];
};
+struct nvme_id_ns_nvm {
+ __le64 lbstm;
+ __u8 pic;
+ __u8 rsvd9[3];
+ __le32 elbaf[64];
+ __u8 rsvd268[3828];
+};
+
+enum {
+ NVME_ID_NS_NVM_STS_MASK = 0x7f,
+ NVME_ID_NS_NVM_GUARD_SHIFT = 7,
+ NVME_ID_NS_NVM_GUARD_MASK = 0x3,
+};
+
+static inline __u8 nvme_elbaf_sts(__u32 elbaf)
+{
+ return elbaf & NVME_ID_NS_NVM_STS_MASK;
+}
+
+static inline __u8 nvme_elbaf_guard_type(__u32 elbaf)
+{
+ return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK;
+}
+
+struct nvme_id_ctrl_nvm {
+ __u8 vsl;
+ __u8 wzsl;
+ __u8 wusl;
+ __u8 dmrl;
+ __le32 dmrsl;
+ __le64 dmsl;
+ __u8 rsvd16[4080];
+};
+
enum {
NVME_ID_CNS_NS = 0x00,
NVME_ID_CNS_CTRL = 0x01,
@@ -406,6 +509,7 @@ enum {
NVME_ID_CNS_NS_DESC_LIST = 0x03,
NVME_ID_CNS_CS_NS = 0x05,
NVME_ID_CNS_CS_CTRL = 0x06,
+ NVME_ID_CNS_NS_CS_INDEP = 0x08,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
@@ -439,6 +543,8 @@ enum {
NVME_NS_FEAT_IO_OPT = 1 << 4,
NVME_NS_ATTR_RO = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
+ NVME_NS_FLBAS_LBA_UMASK = 0x60,
+ NVME_NS_FLBAS_LBA_SHIFT = 1,
NVME_NS_FLBAS_META_EXT = 0x10,
NVME_NS_NMIC_SHARED = 1 << 0,
NVME_LBAF_RP_BEST = 0,
@@ -457,6 +563,22 @@ enum {
NVME_NS_DPS_PI_TYPE3 = 3,
};
+enum {
+ NVME_NSTAT_NRDY = 1 << 0,
+};
+
+enum {
+ NVME_NVM_NS_16B_GUARD = 0,
+ NVME_NVM_NS_32B_GUARD = 1,
+ NVME_NVM_NS_64B_GUARD = 2,
+};
+
+static inline __u8 nvme_lbaf_index(__u8 flbas)
+{
+ return (flbas & NVME_NS_FLBAS_LBA_MASK) |
+ ((flbas & NVME_NS_FLBAS_LBA_UMASK) >> NVME_NS_FLBAS_LBA_SHIFT);
+}
+
/* Identify Namespace Metadata Capabilities (MC): */
enum {
NVME_MC_EXTENDED_LBA = (1 << 0),
@@ -522,8 +644,10 @@ enum {
NVME_CMD_EFFECTS_NCC = 1 << 2,
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
- NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
+ NVME_CMD_EFFECTS_CSER_MASK = GENMASK(15, 14),
+ NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
+ NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20),
};
struct nvme_effects_log {
@@ -596,6 +720,10 @@ enum {
};
enum {
+ NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
+};
+
+enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
NVME_AER_NOTICE_ANA = 0x03,
@@ -620,8 +748,8 @@ struct nvme_lba_range_type {
__u8 type;
__u8 attributes;
__u8 rsvd2[14];
- __u64 slba;
- __u64 nlb;
+ __le64 slba;
+ __le64 nlb;
__u8 guid[16];
__u8 rsvd48[16];
};
@@ -636,26 +764,55 @@ enum {
NVME_LBART_ATTRIB_HIDE = 1 << 1,
};
+enum nvme_pr_type {
+ NVME_PR_WRITE_EXCLUSIVE = 1,
+ NVME_PR_EXCLUSIVE_ACCESS = 2,
+ NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3,
+ NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4,
+ NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5,
+ NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6,
+};
+
+enum nvme_eds {
+ NVME_EXTENDED_DATA_STRUCT = 0x1,
+};
+
+struct nvme_registered_ctrl {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 rsvd3[5];
+ __le64 hostid;
+ __le64 rkey;
+};
+
struct nvme_reservation_status {
__le32 gen;
__u8 rtype;
__u8 regctl[2];
__u8 resv5[2];
__u8 ptpls;
- __u8 resv10[13];
- struct {
- __le16 cntlid;
- __u8 rcsts;
- __u8 resv3[5];
- __le64 hostid;
- __le64 rkey;
- } regctl_ds[];
+ __u8 resv10[14];
+ struct nvme_registered_ctrl regctl_ds[];
};
-enum nvme_async_event_type {
- NVME_AER_TYPE_ERROR = 0,
- NVME_AER_TYPE_SMART = 1,
- NVME_AER_TYPE_NOTICE = 2,
+struct nvme_registered_ctrl_ext {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 rsvd3[5];
+ __le64 rkey;
+ __u8 hostid[16];
+ __u8 rsvd32[32];
+};
+
+struct nvme_reservation_status_ext {
+ __le32 gen;
+ __u8 rtype;
+ __u8 regctl[2];
+ __u8 resv5[2];
+ __u8 ptpls;
+ __u8 resv10[14];
+ __u8 rsvd24[40];
+ struct nvme_registered_ctrl_ext regctl_eds[];
};
/* I/O commands */
@@ -676,6 +833,7 @@ enum nvme_opcode {
nvme_cmd_zone_mgmt_send = 0x79,
nvme_cmd_zone_mgmt_recv = 0x7a,
nvme_cmd_zone_append = 0x7d,
+ nvme_cmd_vendor_start = 0x80,
};
#define nvme_opcode_name(opcode) { opcode, #opcode }
@@ -688,10 +846,15 @@ enum nvme_opcode {
nvme_opcode_name(nvme_cmd_compare), \
nvme_opcode_name(nvme_cmd_write_zeroes), \
nvme_opcode_name(nvme_cmd_dsm), \
+ nvme_opcode_name(nvme_cmd_verify), \
nvme_opcode_name(nvme_cmd_resv_register), \
nvme_opcode_name(nvme_cmd_resv_report), \
nvme_opcode_name(nvme_cmd_resv_acquire), \
- nvme_opcode_name(nvme_cmd_resv_release))
+ nvme_opcode_name(nvme_cmd_resv_release), \
+ nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
+ nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
+ nvme_opcode_name(nvme_cmd_zone_append))
+
/*
@@ -786,12 +949,14 @@ struct nvme_common_command {
__le32 cdw2[2];
__le64 metadata;
union nvme_data_ptr dptr;
+ struct_group(cdws,
__le32 cdw10;
__le32 cdw11;
__le32 cdw12;
__le32 cdw13;
__le32 cdw14;
__le32 cdw15;
+ );
};
struct nvme_rw_command {
@@ -799,7 +964,8 @@ struct nvme_rw_command {
__u8 flags;
__u16 command_id;
__le32 nsid;
- __u64 rsvd2;
+ __le32 cdw2;
+ __le32 cdw3;
__le64 metadata;
union nvme_data_ptr dptr;
__le64 slba;
@@ -835,6 +1001,7 @@ enum {
NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
NVME_RW_PRINFO_PRACT = 1 << 13,
NVME_RW_DTYPE_STREAMS = 1 << 4,
+ NVME_WZ_DEAC = 1 << 9,
};
struct nvme_dsm_cmd {
@@ -924,6 +1091,13 @@ struct nvme_zone_mgmt_recv_cmd {
enum {
NVME_ZRA_ZONE_REPORT = 0,
NVME_ZRASF_ZONE_REPORT_ALL = 0,
+ NVME_ZRASF_ZONE_STATE_EMPTY = 0x01,
+ NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02,
+ NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03,
+ NVME_ZRASF_ZONE_STATE_CLOSED = 0x04,
+ NVME_ZRASF_ZONE_STATE_READONLY = 0x05,
+ NVME_ZRASF_ZONE_STATE_FULL = 0x06,
+ NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07,
NVME_REPORT_ZONE_PARTIAL = 1,
};
@@ -946,11 +1120,14 @@ enum {
struct nvme_feat_host_behavior {
__u8 acre;
- __u8 resv1[511];
+ __u8 etdas;
+ __u8 lbafee;
+ __u8 resv1[509];
};
enum {
NVME_ENABLE_ACRE = 1,
+ NVME_ENABLE_LBAFEE = 1,
};
/* Admin commands */
@@ -1002,10 +1179,14 @@ enum nvme_admin_opcode {
nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
nvme_admin_opcode_name(nvme_admin_activate_fw), \
nvme_admin_opcode_name(nvme_admin_download_fw), \
+ nvme_admin_opcode_name(nvme_admin_dev_self_test), \
nvme_admin_opcode_name(nvme_admin_ns_attach), \
nvme_admin_opcode_name(nvme_admin_keep_alive), \
nvme_admin_opcode_name(nvme_admin_directive_send), \
nvme_admin_opcode_name(nvme_admin_directive_recv), \
+ nvme_admin_opcode_name(nvme_admin_virtual_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_nvme_mi_send), \
+ nvme_admin_opcode_name(nvme_admin_nvme_mi_recv), \
nvme_admin_opcode_name(nvme_admin_dbbuf), \
nvme_admin_opcode_name(nvme_admin_format_nvm), \
nvme_admin_opcode_name(nvme_admin_security_send), \
@@ -1236,6 +1417,8 @@ enum nvmf_capsule_command {
nvme_fabrics_type_property_set = 0x00,
nvme_fabrics_type_connect = 0x01,
nvme_fabrics_type_property_get = 0x04,
+ nvme_fabrics_type_auth_send = 0x05,
+ nvme_fabrics_type_auth_receive = 0x06,
};
#define nvme_fabrics_type_name(type) { type, #type }
@@ -1243,7 +1426,9 @@ enum nvmf_capsule_command {
__print_symbolic(type, \
nvme_fabrics_type_name(nvme_fabrics_type_property_set), \
nvme_fabrics_type_name(nvme_fabrics_type_connect), \
- nvme_fabrics_type_name(nvme_fabrics_type_property_get))
+ nvme_fabrics_type_name(nvme_fabrics_type_property_get), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_send), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_receive))
/*
* If not fabrics command, fctype will be ignored.
@@ -1276,6 +1461,12 @@ struct nvmf_common_command {
#define MAX_DISC_LOGS 255
+/* Discovery log page entry flags (EFLAGS): */
+enum {
+ NVME_DISC_EFLAGS_EPCSD = (1 << 1),
+ NVME_DISC_EFLAGS_DUPRETINFO = (1 << 0),
+};
+
/* Discovery log page entry */
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
@@ -1285,7 +1476,8 @@ struct nvmf_disc_rsp_page_entry {
__le16 portid;
__le16 cntlid;
__le16 asqsz;
- __u8 resv8[22];
+ __le16 eflags;
+ __u8 resv10[20];
char trsvcid[NVMF_TRSVCID_SIZE];
__u8 resv64[192];
char subnqn[NVMF_NQN_FIELD_LEN];
@@ -1300,6 +1492,9 @@ struct nvmf_disc_rsp_page_entry {
__u16 pkey;
__u8 resv10[246];
} rdma;
+ struct tcp {
+ __u8 sectype;
+ } tcp;
} tsas;
};
@@ -1332,6 +1527,11 @@ struct nvmf_connect_command {
__u8 resv4[12];
};
+enum {
+ NVME_CONNECT_AUTHREQ_ASCR = (1U << 18),
+ NVME_CONNECT_AUTHREQ_ATR = (1U << 17),
+};
+
struct nvmf_connect_data {
uuid_t hostid;
__le16 cntlid;
@@ -1366,6 +1566,200 @@ struct nvmf_property_get_command {
__u8 resv4[16];
};
+struct nvmf_auth_common_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al_tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_send_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_receive_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al;
+ __u8 resv4[16];
+};
+
+/* Value for secp */
+enum {
+ NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER = 0xe9,
+};
+
+/* Defined value for auth_type */
+enum {
+ NVME_AUTH_COMMON_MESSAGES = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGES = 0x01,
+};
+
+/* Defined messages for auth_id */
+enum {
+ NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE = 0x01,
+ NVME_AUTH_DHCHAP_MESSAGE_REPLY = 0x02,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1 = 0x03,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 = 0x04,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2 = 0xf0,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1 = 0xf1,
+};
+
+struct nvmf_auth_dhchap_protocol_descriptor {
+ __u8 authid;
+ __u8 rsvd;
+ __u8 halen;
+ __u8 dhlen;
+ __u8 idlist[60];
+};
+
+enum {
+ NVME_AUTH_DHCHAP_AUTH_ID = 0x01,
+};
+
+/* Defined hash functions for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_HASH_SHA256 = 0x01,
+ NVME_AUTH_HASH_SHA384 = 0x02,
+ NVME_AUTH_HASH_SHA512 = 0x03,
+ NVME_AUTH_HASH_INVALID = 0xff,
+};
+
+/* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_DHGROUP_NULL = 0x00,
+ NVME_AUTH_DHGROUP_2048 = 0x01,
+ NVME_AUTH_DHGROUP_3072 = 0x02,
+ NVME_AUTH_DHGROUP_4096 = 0x03,
+ NVME_AUTH_DHGROUP_6144 = 0x04,
+ NVME_AUTH_DHGROUP_8192 = 0x05,
+ NVME_AUTH_DHGROUP_INVALID = 0xff,
+};
+
+union nvmf_auth_protocol {
+ struct nvmf_auth_dhchap_protocol_descriptor dhchap;
+};
+
+struct nvmf_auth_dhchap_negotiate_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd;
+ __le16 t_id;
+ __u8 sc_c;
+ __u8 napd;
+ union nvmf_auth_protocol auth_protocol[];
+};
+
+struct nvmf_auth_dhchap_challenge_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __u16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 hashid;
+ __u8 dhgid;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of challenge value */
+ __u8 cval[];
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+struct nvmf_auth_dhchap_reply_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 cvalid;
+ __u8 rsvd3;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of response data */
+ __u8 rval[];
+ /* followed by 'hl' bytes of Challenge value */
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+enum {
+ NVME_AUTH_DHCHAP_RESPONSE_VALID = (1 << 0),
+};
+
+struct nvmf_auth_dhchap_success1_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 rvalid;
+ __u8 rsvd3[7];
+ /* 'hl' bytes of response value */
+ __u8 rval[];
+};
+
+struct nvmf_auth_dhchap_success2_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rsvd2[10];
+};
+
+struct nvmf_auth_dhchap_failure_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rescode;
+ __u8 rescode_exp;
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED = 0x01,
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_FAILED = 0x01,
+ NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE = 0x02,
+ NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH = 0x03,
+ NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE = 0x04,
+ NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE = 0x05,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD = 0x06,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE = 0x07,
+};
+
+
struct nvme_dbbuf {
__u8 opcode;
__u8 flags;
@@ -1409,12 +1803,15 @@ struct nvme_command {
struct nvmf_connect_command connect;
struct nvmf_property_set_command prop_set;
struct nvmf_property_get_command prop_get;
+ struct nvmf_auth_common_command auth_common;
+ struct nvmf_auth_send_command auth_send;
+ struct nvmf_auth_receive_command auth_receive;
struct nvme_dbbuf dbbuf;
struct nvme_directive_cmd directive;
};
};
-static inline bool nvme_is_fabrics(struct nvme_command *cmd)
+static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
{
return cmd->common.opcode == nvme_fabrics_command;
}
@@ -1433,7 +1830,7 @@ struct nvme_error_slot {
__u8 resv2[24];
};
-static inline bool nvme_is_write(struct nvme_command *cmd)
+static inline bool nvme_is_write(const struct nvme_command *cmd)
{
/*
* What a mess...
@@ -1467,20 +1864,31 @@ enum {
NVME_SC_SGL_INVALID_DATA = 0xf,
NVME_SC_SGL_INVALID_METADATA = 0x10,
NVME_SC_SGL_INVALID_TYPE = 0x11,
-
+ NVME_SC_CMB_INVALID_USE = 0x12,
+ NVME_SC_PRP_INVALID_OFFSET = 0x13,
+ NVME_SC_ATOMIC_WU_EXCEEDED = 0x14,
+ NVME_SC_OP_DENIED = 0x15,
NVME_SC_SGL_INVALID_OFFSET = 0x16,
- NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
-
+ NVME_SC_RESERVED = 0x17,
+ NVME_SC_HOST_ID_INCONSIST = 0x18,
+ NVME_SC_KA_TIMEOUT_EXPIRED = 0x19,
+ NVME_SC_KA_TIMEOUT_INVALID = 0x1A,
+ NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B,
NVME_SC_SANITIZE_FAILED = 0x1C,
NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
-
+ NVME_SC_SGL_INVALID_GRANULARITY = 0x1E,
+ NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F,
NVME_SC_NS_WRITE_PROTECTED = 0x20,
NVME_SC_CMD_INTERRUPTED = 0x21,
+ NVME_SC_TRANSIENT_TR_ERR = 0x22,
+ NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 0x24,
+ NVME_SC_INVALID_IO_CMD_SET = 0x2C,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
NVME_SC_RESERVATION_CONFLICT = 0x83,
+ NVME_SC_FORMAT_IN_PROGRESS = 0x84,
/*
* Command Specific Status:
@@ -1513,8 +1921,15 @@ enum {
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
+ NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
+ NVME_SC_CTRL_ID_INVALID = 0x11f,
+ NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
+ NVME_SC_CTRL_RES_NUM_INVALID = 0x121,
+ NVME_SC_RES_ID_INVALID = 0x122,
NVME_SC_PMR_SAN_PROHIBITED = 0x123,
+ NVME_SC_ANA_GROUP_ID_INVALID = 0x124,
+ NVME_SC_ANA_ATTACH_FAILED = 0x125,
/*
* I/O Command Set Specific - NVM commands:
@@ -1563,13 +1978,16 @@ enum {
/*
* Path-related Errors:
*/
+ NVME_SC_INTERNAL_PATH_ERROR = 0x300,
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
+ NVME_SC_CTRL_PATH_ERROR = 0x360,
NVME_SC_HOST_PATH_ERROR = 0x370,
NVME_SC_HOST_ABORTED_CMD = 0x371,
NVME_SC_CRD = 0x1800,
+ NVME_SC_MORE = 0x2000,
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 052293f4cbdb..34c0e58dfa26 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -18,14 +18,7 @@ struct device_node;
/* consumer cookie */
struct nvmem_cell;
struct nvmem_device;
-
-struct nvmem_cell_info {
- const char *name;
- unsigned int offset;
- unsigned int bytes;
- unsigned int bit_offset;
- unsigned int nbits;
-};
+struct nvmem_cell_info;
/**
* struct nvmem_cell_lookup - cell lookup entry
@@ -50,6 +43,8 @@ enum {
NVMEM_REMOVE,
NVMEM_CELL_ADD,
NVMEM_CELL_REMOVE,
+ NVMEM_LAYOUT_ADD,
+ NVMEM_LAYOUT_REMOVE,
};
#if IS_ENABLED(CONFIG_NVMEM)
@@ -65,6 +60,10 @@ int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val);
int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val);
int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val);
+int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
+ u32 *val);
+int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
+ u64 *val);
/* direct nvmem device read/write interface */
struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
@@ -82,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf);
const char *nvmem_dev_name(struct nvmem_device *nvmem);
+size_t nvmem_dev_size(struct nvmem_device *nvmem);
void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
size_t nentries);
@@ -128,6 +128,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_u8(struct device *dev,
+ const char *cell_id, u8 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int nvmem_cell_read_u16(struct device *dev,
const char *cell_id, u16 *val)
{
@@ -146,6 +152,20 @@ static inline int nvmem_cell_read_u64(struct device *dev,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
+ const char *cell_id,
+ u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
+ const char *cell_id,
+ u64 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 06409a6c40bc..f0ba0e03218f 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -9,28 +9,72 @@
#ifndef _LINUX_NVMEM_PROVIDER_H
#define _LINUX_NVMEM_PROVIDER_H
+#include <linux/device.h>
+#include <linux/device/driver.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
struct nvmem_device;
-struct nvmem_cell_info;
typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
+/* used for vendor specific post processing of cell data */
+typedef int (*nvmem_cell_post_process_t)(void *priv, const char *id, int index,
+ unsigned int offset, void *buf,
+ size_t bytes);
enum nvmem_type {
NVMEM_TYPE_UNKNOWN = 0,
NVMEM_TYPE_EEPROM,
NVMEM_TYPE_OTP,
NVMEM_TYPE_BATTERY_BACKED,
+ NVMEM_TYPE_FRAM,
};
#define NVMEM_DEVID_NONE (-1)
#define NVMEM_DEVID_AUTO (-2)
/**
+ * struct nvmem_keepout - NVMEM register keepout range.
+ *
+ * @start: The first byte offset to avoid.
+ * @end: One beyond the last byte offset to avoid.
+ * @value: The byte to fill reads with for this region.
+ */
+struct nvmem_keepout {
+ unsigned int start;
+ unsigned int end;
+ unsigned char value;
+};
+
+/**
+ * struct nvmem_cell_info - NVMEM cell description
+ * @name: Name.
+ * @offset: Offset within the NVMEM device.
+ * @raw_len: Length of raw data (without post processing).
+ * @bytes: Length of the cell.
+ * @bit_offset: Bit offset if cell is smaller than a byte.
+ * @nbits: Number of bits.
+ * @np: Optional device_node pointer.
+ * @read_post_process: Callback for optional post processing of cell data
+ * on reads.
+ * @priv: Opaque data passed to the read_post_process hook.
+ */
+struct nvmem_cell_info {
+ const char *name;
+ unsigned int offset;
+ size_t raw_len;
+ unsigned int bytes;
+ unsigned int bit_offset;
+ unsigned int nbits;
+ struct device_node *np;
+ nvmem_cell_post_process_t read_post_process;
+ void *priv;
+};
+
+/**
* struct nvmem_config - NVMEM device configuration
*
* @dev: Parent device.
@@ -39,17 +83,23 @@ enum nvmem_type {
* @owner: Pointer to exporter module. Used for refcounting.
* @cells: Optional array of pre-defined NVMEM cells.
* @ncells: Number of elements in cells.
+ * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax.
+ * @fixup_dt_cell_info: Will be called before a cell is added. Can be
+ * used to modify the nvmem_cell_info.
+ * @keepout: Optional array of keepout ranges (sorted ascending by start).
+ * @nkeepout: Number of elements in the keepout array.
* @type: Type of the nvmem storage
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
- * @no_of_node: Device should not use the parent's of_node even if it's !NULL.
+ * @of_node: If given, this will be used instead of the parent's of_node.
* @reg_read: Callback to read data.
* @reg_write: Callback to write data.
* @size: Device size.
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
* @priv: User context passed to read/write callbacks.
- * @wp-gpio: Write protect pin
+ * @ignore_wp: Write Protect pin is managed by the provider.
+ * @layout: Fixed layout associated with this nvmem device.
*
* Note: A default "nvmem<id>" name will be assigned to the device if
* no name is specified in its configuration. In such case "<id>" is
@@ -63,13 +113,19 @@ struct nvmem_config {
const char *name;
int id;
struct module *owner;
- struct gpio_desc *wp_gpio;
const struct nvmem_cell_info *cells;
int ncells;
+ bool add_legacy_fixed_of_cells;
+ void (*fixup_dt_cell_info)(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell);
+ const struct nvmem_keepout *keepout;
+ unsigned int nkeepout;
enum nvmem_type type;
bool read_only;
bool root_only;
- bool no_of_node;
+ bool ignore_wp;
+ struct nvmem_layout *layout;
+ struct device_node *of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
int size;
@@ -100,6 +156,32 @@ struct nvmem_cell_table {
struct list_head node;
};
+/**
+ * struct nvmem_layout - NVMEM layout definitions
+ *
+ * @dev: Device-model layout device.
+ * @nvmem: The underlying NVMEM device
+ * @add_cells: Will be called if a nvmem device is found which
+ * has this layout. The function will add layout
+ * specific cells with nvmem_add_one_cell().
+ *
+ * A nvmem device can hold a well defined structure which can just be
+ * evaluated during runtime. For example a TLV list, or a list of "name=val"
+ * pairs. A nvmem layout can parse the nvmem device and add appropriate
+ * cells.
+ */
+struct nvmem_layout {
+ struct device dev;
+ struct nvmem_device *nvmem;
+ int (*add_cells)(struct nvmem_layout *layout);
+};
+
+struct nvmem_layout_driver {
+ struct device_driver driver;
+ int (*probe)(struct nvmem_layout *layout);
+ void (*remove)(struct nvmem_layout *layout);
+};
+
#if IS_ENABLED(CONFIG_NVMEM)
struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
@@ -108,11 +190,21 @@ void nvmem_unregister(struct nvmem_device *nvmem);
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *cfg);
-int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
-
void nvmem_add_cell_table(struct nvmem_cell_table *table);
void nvmem_del_cell_table(struct nvmem_cell_table *table);
+int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info);
+
+int nvmem_layout_register(struct nvmem_layout *layout);
+void nvmem_layout_unregister(struct nvmem_layout *layout);
+
+int nvmem_layout_driver_register(struct nvmem_layout_driver *drv);
+void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv);
+#define module_nvmem_layout_driver(__nvmem_layout_driver) \
+ module_driver(__nvmem_layout_driver, nvmem_layout_driver_register, \
+ nvmem_layout_driver_unregister)
+
#else
static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
@@ -128,14 +220,42 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
return nvmem_register(c);
}
-static inline int
-devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
+static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
+static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
+static inline int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info)
{
return -EOPNOTSUPP;
}
-static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
-static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
+static inline int nvmem_layout_register(struct nvmem_layout *layout)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void nvmem_layout_unregister(struct nvmem_layout *layout) {}
#endif /* CONFIG_NVMEM */
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+
+/**
+ * of_nvmem_layout_get_container() - Get OF node of layout container
+ *
+ * @nvmem: nvmem device
+ *
+ * Return: a node pointer with refcount incremented or NULL if no
+ * container exists. Use of_node_put() on it when done.
+ */
+struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem);
+
+#else /* CONFIG_NVMEM && CONFIG_OF */
+
+static inline struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_NVMEM && CONFIG_OF */
+
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/objpool.h b/include/linux/objpool.h
new file mode 100644
index 000000000000..15aff4a17f0c
--- /dev/null
+++ b/include/linux/objpool.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_OBJPOOL_H
+#define _LINUX_OBJPOOL_H
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+
+/*
+ * objpool: ring-array based lockless MPMC queue
+ *
+ * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
+ *
+ * objpool is a scalable implementation of high performance queue for
+ * object allocation and reclamation, such as kretprobe instances.
+ *
+ * With leveraging percpu ring-array to mitigate hot spots of memory
+ * contention, it delivers near-linear scalability for high parallel
+ * scenarios. The objpool is best suited for the following cases:
+ * 1) Memory allocation or reclamation are prohibited or too expensive
+ * 2) Consumers are of different priorities, such as irqs and threads
+ *
+ * Limitations:
+ * 1) Maximum objects (capacity) is fixed after objpool creation
+ * 2) All pre-allocated objects are managed in percpu ring array,
+ * which consumes more memory than linked lists
+ */
+
+/**
+ * struct objpool_slot - percpu ring array of objpool
+ * @head: head sequence of the local ring array (to retrieve at)
+ * @tail: tail sequence of the local ring array (to append at)
+ * @last: the last sequence number marked as ready for retrieve
+ * @mask: bits mask for modulo capacity to compute array indexes
+ * @entries: object entries on this slot
+ *
+ * Represents a cpu-local array-based ring buffer, its size is specialized
+ * during initialization of object pool. The percpu objpool node is to be
+ * allocated from local memory for NUMA system, and to be kept compact in
+ * continuous memory: CPU assigned number of objects are stored just after
+ * the body of objpool_node.
+ *
+ * Real size of the ring array is far too smaller than the value range of
+ * head and tail, typed as uint32_t: [0, 2^32), so only lower bits (mask)
+ * of head and tail are used as the actual position in the ring array. In
+ * general the ring array is acting like a small sliding window, which is
+ * always moving forward in the loop of [0, 2^32).
+ */
+struct objpool_slot {
+ uint32_t head;
+ uint32_t tail;
+ uint32_t last;
+ uint32_t mask;
+ void *entries[];
+} __packed;
+
+struct objpool_head;
+
+/*
+ * caller-specified callback for object initial setup, it's only called
+ * once for each object (just after the memory allocation of the object)
+ */
+typedef int (*objpool_init_obj_cb)(void *obj, void *context);
+
+/* caller-specified cleanup callback for objpool destruction */
+typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context);
+
+/**
+ * struct objpool_head - object pooling metadata
+ * @obj_size: object size, aligned to sizeof(void *)
+ * @nr_objs: total objs (to be pre-allocated with objpool)
+ * @nr_cpus: local copy of nr_cpu_ids
+ * @capacity: max objs can be managed by one objpool_slot
+ * @gfp: gfp flags for kmalloc & vmalloc
+ * @ref: refcount of objpool
+ * @flags: flags for objpool management
+ * @cpu_slots: pointer to the array of objpool_slot
+ * @release: resource cleanup callback
+ * @context: caller-provided context
+ */
+struct objpool_head {
+ int obj_size;
+ int nr_objs;
+ int nr_cpus;
+ int capacity;
+ gfp_t gfp;
+ refcount_t ref;
+ unsigned long flags;
+ struct objpool_slot **cpu_slots;
+ objpool_fini_cb release;
+ void *context;
+};
+
+#define OBJPOOL_NR_OBJECT_MAX (1UL << 24) /* maximum numbers of total objects */
+#define OBJPOOL_OBJECT_SIZE_MAX (1UL << 16) /* maximum size of an object */
+
+/**
+ * objpool_init() - initialize objpool and pre-allocated objects
+ * @pool: the object pool to be initialized, declared by caller
+ * @nr_objs: total objects to be pre-allocated by this object pool
+ * @object_size: size of an object (should be > 0)
+ * @gfp: flags for memory allocation (via kmalloc or vmalloc)
+ * @context: user context for object initialization callback
+ * @objinit: object initialization callback for extra setup
+ * @release: cleanup callback for extra cleanup task
+ *
+ * return value: 0 for success, otherwise error code
+ *
+ * All pre-allocated objects are to be zeroed after memory allocation.
+ * Caller could do extra initialization in objinit callback. objinit()
+ * will be called just after slot allocation and called only once for
+ * each object. After that the objpool won't touch any content of the
+ * objects. It's caller's duty to perform reinitialization after each
+ * pop (object allocation) or do clearance before each push (object
+ * reclamation).
+ */
+int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
+ gfp_t gfp, void *context, objpool_init_obj_cb objinit,
+ objpool_fini_cb release);
+
+/**
+ * objpool_pop() - allocate an object from objpool
+ * @pool: object pool
+ *
+ * return value: object ptr or NULL if failed
+ */
+void *objpool_pop(struct objpool_head *pool);
+
+/**
+ * objpool_push() - reclaim the object and return back to objpool
+ * @obj: object ptr to be pushed to objpool
+ * @pool: object pool
+ *
+ * return: 0 or error code (it fails only when user tries to push
+ * the same object multiple times or wrong "objects" into objpool)
+ */
+int objpool_push(void *obj, struct objpool_head *pool);
+
+/**
+ * objpool_drop() - discard the object and deref objpool
+ * @obj: object ptr to be discarded
+ * @pool: object pool
+ *
+ * return: 0 if objpool was released; -EAGAIN if there are still
+ * outstanding objects
+ *
+ * objpool_drop is normally for the release of outstanding objects
+ * after objpool cleanup (objpool_fini). Thinking of this example:
+ * kretprobe is unregistered and objpool_fini() is called to release
+ * all remained objects, but there are still objects being used by
+ * unfinished kretprobes (like blockable function: sys_accept). So
+ * only when the last outstanding object is dropped could the whole
+ * objpool be released along with the call of objpool_drop()
+ */
+int objpool_drop(void *obj, struct objpool_head *pool);
+
+/**
+ * objpool_free() - release objpool forcely (all objects to be freed)
+ * @pool: object pool to be released
+ */
+void objpool_free(struct objpool_head *pool);
+
+/**
+ * objpool_fini() - deref object pool (also releasing unused objects)
+ * @pool: object pool to be dereferenced
+ *
+ * objpool_fini() will try to release all remained free objects and
+ * then drop an extra reference of the objpool. If all objects are
+ * already returned to objpool (so called synchronous use cases),
+ * the objpool itself will be freed together. But if there are still
+ * outstanding objects (so called asynchronous use cases, such like
+ * blockable kretprobe), the objpool won't be released until all
+ * the outstanding objects are dropped, but the caller must assure
+ * there are no concurrent objpool_push() on the fly. Normally RCU
+ * is being required to make sure all ongoing objpool_push() must
+ * be finished before calling objpool_fini(), so does test_objpool,
+ * kretprobe or rethook
+ */
+void objpool_fini(struct objpool_head *pool);
+
+#endif /* _LINUX_OBJPOOL_H */
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
new file mode 100644
index 000000000000..b3b8d3dab52d
--- /dev/null
+++ b/include/linux/objtool.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_OBJTOOL_H
+#define _LINUX_OBJTOOL_H
+
+#include <linux/objtool_types.h>
+
+#ifdef CONFIG_OBJTOOL
+
+#include <asm/asm.h>
+
+#ifndef __ASSEMBLY__
+
+#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \
+ "987: \n\t" \
+ ".pushsection .discard.unwind_hints\n\t" \
+ /* struct unwind_hint */ \
+ ".long 987b - .\n\t" \
+ ".short " __stringify(sp_offset) "\n\t" \
+ ".byte " __stringify(sp_reg) "\n\t" \
+ ".byte " __stringify(type) "\n\t" \
+ ".byte " __stringify(signal) "\n\t" \
+ ".balign 4 \n\t" \
+ ".popsection\n\t"
+
+/*
+ * This macro marks the given function's stack frame as "non-standard", which
+ * tells objtool to ignore the function when doing stack metadata validation.
+ * It should only be used in special cases where you're 100% sure it won't
+ * affect the reliability of frame pointers and kernel stack traces.
+ *
+ * For more information, see tools/objtool/Documentation/objtool.txt.
+ */
+#define STACK_FRAME_NON_STANDARD(func) \
+ static void __used __section(".discard.func_stack_frame_non_standard") \
+ *__func_stack_frame_non_standard_##func = func
+
+/*
+ * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore
+ * for the case where a function is intentionally missing frame pointer setup,
+ * but otherwise needs objtool/ORC coverage when frame pointers are disabled.
+ */
+#ifdef CONFIG_FRAME_POINTER
+#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func)
+#else
+#define STACK_FRAME_NON_STANDARD_FP(func)
+#endif
+
+#define ANNOTATE_NOENDBR \
+ "986: \n\t" \
+ ".pushsection .discard.noendbr\n\t" \
+ ".long 986b\n\t" \
+ ".popsection\n\t"
+
+#define ASM_REACHABLE \
+ "998:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long 998b\n\t" \
+ ".popsection\n\t"
+
+#else /* __ASSEMBLY__ */
+
+/*
+ * This macro indicates that the following intra-function call is valid.
+ * Any non-annotated intra-function call will cause objtool to issue a warning.
+ */
+#define ANNOTATE_INTRA_FUNCTION_CALL \
+ 999: \
+ .pushsection .discard.intra_function_calls; \
+ .long 999b; \
+ .popsection;
+
+/*
+ * In asm, there are two kinds of code: normal C-type callable functions and
+ * the rest. The normal callable functions can be called by other code, and
+ * don't do anything unusual with the stack. Such normal callable functions
+ * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
+ * category. In this case, no special debugging annotations are needed because
+ * objtool can automatically generate the ORC data for the ORC unwinder to read
+ * at runtime.
+ *
+ * Anything which doesn't fall into the above category, such as syscall and
+ * interrupt handlers, tends to not be called directly by other functions, and
+ * often does unusual non-C-function-type things with the stack pointer. Such
+ * code needs to be annotated such that objtool can understand it. The
+ * following CFI hint macros are for this type of code.
+ *
+ * These macros provide hints to objtool about the state of the stack at each
+ * instruction. Objtool starts from the hints and follows the code flow,
+ * making automatic CFI adjustments when it sees pushes and pops, filling out
+ * the debuginfo as necessary. It will also warn if it sees any
+ * inconsistencies.
+ */
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
+.Lhere_\@:
+ .pushsection .discard.unwind_hints
+ /* struct unwind_hint */
+ .long .Lhere_\@ - .
+ .short \sp_offset
+ .byte \sp_reg
+ .byte \type
+ .byte \signal
+ .balign 4
+ .popsection
+.endm
+
+.macro STACK_FRAME_NON_STANDARD func:req
+ .pushsection .discard.func_stack_frame_non_standard, "aw"
+ .long \func - .
+ .popsection
+.endm
+
+.macro STACK_FRAME_NON_STANDARD_FP func:req
+#ifdef CONFIG_FRAME_POINTER
+ STACK_FRAME_NON_STANDARD \func
+#endif
+.endm
+
+.macro ANNOTATE_NOENDBR
+.Lhere_\@:
+ .pushsection .discard.noendbr
+ .long .Lhere_\@
+ .popsection
+.endm
+
+/*
+ * Use objtool to validate the entry requirement that all code paths do
+ * VALIDATE_UNRET_END before RET.
+ *
+ * NOTE: The macro must be used at the beginning of a global symbol, otherwise
+ * it will be ignored.
+ */
+.macro VALIDATE_UNRET_BEGIN
+#if defined(CONFIG_NOINSTR_VALIDATION) && \
+ (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
+.Lhere_\@:
+ .pushsection .discard.validate_unret
+ .long .Lhere_\@ - .
+ .popsection
+#endif
+.endm
+
+.macro REACHABLE
+.Lhere_\@:
+ .pushsection .discard.reachable
+ .long .Lhere_\@
+ .popsection
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#else /* !CONFIG_OBJTOOL */
+
+#ifndef __ASSEMBLY__
+
+#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t"
+#define STACK_FRAME_NON_STANDARD(func)
+#define STACK_FRAME_NON_STANDARD_FP(func)
+#define ANNOTATE_NOENDBR
+#define ASM_REACHABLE
+#else
+#define ANNOTATE_INTRA_FUNCTION_CALL
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
+.endm
+.macro STACK_FRAME_NON_STANDARD func:req
+.endm
+.macro ANNOTATE_NOENDBR
+.endm
+.macro REACHABLE
+.endm
+#endif
+
+#endif /* CONFIG_OBJTOOL */
+
+#endif /* _LINUX_OBJTOOL_H */
diff --git a/include/linux/objtool_types.h b/include/linux/objtool_types.h
new file mode 100644
index 000000000000..453a4f4ef39d
--- /dev/null
+++ b/include/linux/objtool_types.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_OBJTOOL_TYPES_H
+#define _LINUX_OBJTOOL_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack.
+ */
+struct unwind_hint {
+ u32 ip;
+ s16 sp_offset;
+ u8 sp_reg;
+ u8 type;
+ u8 signal;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * UNWIND_HINT_TYPE_UNDEFINED: A blind spot in ORC coverage which can result in
+ * a truncated and unreliable stack unwind.
+ *
+ * UNWIND_HINT_TYPE_END_OF_STACK: The end of the kernel stack unwind before
+ * hitting user entry, boot code, or fork entry (when there are no pt_regs
+ * available).
+ *
+ * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
+ * (the caller's SP right before it made the call). Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
+ * points to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
+ * sp_reg+sp_offset points to the iret return frame.
+ *
+ * UNWIND_HINT_TYPE_FUNC: Generate the unwind metadata of a callable function.
+ * Useful for code which doesn't have an ELF function annotation.
+ *
+ * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain
+ * location so that it can be restored later.
+ */
+#define UNWIND_HINT_TYPE_UNDEFINED 0
+#define UNWIND_HINT_TYPE_END_OF_STACK 1
+#define UNWIND_HINT_TYPE_CALL 2
+#define UNWIND_HINT_TYPE_REGS 3
+#define UNWIND_HINT_TYPE_REGS_PARTIAL 4
+/* The below hint types don't have corresponding ORC types */
+#define UNWIND_HINT_TYPE_FUNC 5
+#define UNWIND_HINT_TYPE_SAVE 6
+#define UNWIND_HINT_TYPE_RESTORE 7
+
+#endif /* _LINUX_OBJTOOL_TYPES_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 5cf7ae0465d1..a0bedd038a05 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -13,17 +13,14 @@
*/
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/kobject.h>
#include <linux/mod_devicetable.h>
-#include <linux/spinlock.h>
-#include <linux/topology.h>
-#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/list.h>
#include <asm/byteorder.h>
-#include <asm/errno.h>
typedef u32 phandle;
typedef u32 ihandle;
@@ -100,15 +97,25 @@ struct of_reconfig_data {
struct property *old_prop;
};
-/* initialize a node */
-extern struct kobj_type of_node_ktype;
+extern const struct kobj_type of_node_ktype;
extern const struct fwnode_operations of_fwnode_ops;
+
+/**
+ * of_node_init - initialize a devicetree node
+ * @node: Pointer to device node that has been created by kzalloc()
+ *
+ * On return the device_node refcount is set to one. Use of_node_put()
+ * on @node when done to free the memory allocated for it. If the node
+ * is NOT a dynamic node the memory will not be freed. The decision of
+ * whether to free the memory will be done by node->release(), which is
+ * of_node_release().
+ */
static inline void of_node_init(struct device_node *node)
{
#if defined(CONFIG_OF_KOBJ)
kobject_init(&node->kobj, &of_node_ktype);
#endif
- node->fwnode.ops = &of_fwnode_ops;
+ fwnode_init(&node->fwnode, &of_fwnode_ops);
}
#if defined(CONFIG_OF_KOBJ)
@@ -128,13 +135,13 @@ static inline struct device_node *of_node_get(struct device_node *node)
}
static inline void of_node_put(struct device_node *node) { }
#endif /* !CONFIG_OF_DYNAMIC */
+DEFINE_FREE(device_node, struct device_node *, if (_T) of_node_put(_T))
/* Pointer for first entry in chain of all nodes. */
extern struct device_node *of_root;
extern struct device_node *of_chosen;
extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
-extern raw_spinlock_t devtree_lock;
/*
* struct device_node flag descriptions
@@ -175,17 +182,12 @@ static inline bool is_of_node(const struct fwnode_handle *fwnode)
&__of_fwnode_handle_node->fwnode : NULL; \
})
-static inline bool of_have_populated_dt(void)
-{
- return of_root != NULL;
-}
-
static inline bool of_node_is_root(const struct device_node *node)
{
return node && (node->parent == NULL);
}
-static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+static inline int of_node_check_flag(const struct device_node *n, unsigned long flag)
{
return test_bit(flag, &n->_flags);
}
@@ -207,7 +209,7 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
}
#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC)
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+static inline int of_property_check_flag(const struct property *p, unsigned long flag)
{
return test_bit(flag, &p->_flags);
}
@@ -289,6 +291,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev);
extern struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev);
+extern struct device_node *of_get_next_reserved_child(
+ const struct device_node *node, struct device_node *prev);
extern struct device_node *of_get_compatible_child(const struct device_node *parent,
const char *compatible);
@@ -342,7 +346,7 @@ extern int of_property_read_string_helper(const struct device_node *np,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
-extern int of_device_compatible_match(struct device_node *device,
+extern int of_device_compatible_match(const struct device_node *device,
const char *const *compat);
extern bool of_device_is_available(const struct device_node *device);
extern bool of_device_is_big_endian(const struct device_node *device);
@@ -350,34 +354,34 @@ extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+extern struct device_node *of_cpu_device_node_get(int cpu);
+extern int of_cpu_node_to_id(struct device_node *np);
extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
int index);
-
-#define for_each_property_of_node(dn, pp) \
- for (pp = dn->properties; pp != NULL; pp = pp->next)
+extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread);
extern int of_n_addr_cells(struct device_node *np);
extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
-extern int of_modalias_node(struct device_node *node, char *modalias, int len);
+extern const void *of_device_get_match_data(const struct device *dev);
+extern int of_alias_from_compatible(const struct device_node *node, char *alias,
+ int len);
extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
-extern struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index);
-extern int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name, const char *cells_name, int index,
- struct of_phandle_args *out_args);
+extern int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name, const char *cells_name, int cell_count,
+ int index, struct of_phandle_args *out_args);
extern int of_parse_phandle_with_args_map(const struct device_node *np,
const char *list_name, const char *stem_name, int index,
struct of_phandle_args *out_args);
-extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
- struct of_phandle_args *out_args);
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
+/* module functions */
+extern ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len);
+extern int of_request_module(const struct device_node *np);
+
/* phandle iterator functions */
extern int of_phandle_iterator_init(struct of_phandle_iterator *it,
const struct device_node *np,
@@ -393,11 +397,21 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
-extern int of_alias_get_alias_list(const struct of_device_id *matches,
- const char *stem, unsigned long *bitmap,
- unsigned int nbits);
-extern int of_machine_is_compatible(const char *compat);
+bool of_machine_compatible_match(const char *const *compats);
+
+/**
+ * of_machine_is_compatible - Test root of device tree for a given compatible value
+ * @compat: compatible string to look for in root node's compatible property.
+ *
+ * Return: true if the root node has the given value in its compatible property.
+ */
+static inline bool of_machine_is_compatible(const char *compat)
+{
+ const char *compats[] = { compat, NULL };
+
+ return of_machine_compatible_match(compats);
+}
extern int of_add_property(struct device_node *np, struct property *prop);
extern int of_remove_property(struct device_node *np, struct property *prop);
@@ -415,122 +429,6 @@ extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
-/**
- * of_property_read_u8_array - Find and read an array of u8 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 8-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * dts entry of array should be like:
- * property = /bits/ 8 <0x50 0x60 0x70>;
- *
- * The out_values is modified only if a valid u8 value can be decoded.
- */
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname,
- u8 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u8_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u16_array - Find and read an array of u16 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 16-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * dts entry of array should be like:
- * property = /bits/ 16 <0x5000 0x6000 0x7000>;
- *
- * The out_values is modified only if a valid u16 value can be decoded.
- */
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname,
- u16 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u16_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u32_array - Find and read an array of 32 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 32-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u32 value can be decoded.
- */
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u32_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u64_array - Find and read an array of 64 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 64-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u64 value can be decoded.
- */
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u64_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
/*
* struct property *prop;
* const __be32 *p;
@@ -552,12 +450,17 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
bool of_console_check(struct device_node *dn, char *name, int index);
-extern int of_cpu_node_to_id(struct device_node *np);
-
int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out);
+phys_addr_t of_dma_get_max_cpu_address(struct device_node *np);
+
+struct kimage;
+void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
+ unsigned long initrd_load_addr,
+ unsigned long initrd_len,
+ const char *cmdline, size_t extra_fdt_size);
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -647,6 +550,12 @@ static inline struct device_node *of_get_next_available_child(
return NULL;
}
+static inline struct device_node *of_get_next_reserved_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
+
static inline struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name)
{
@@ -655,11 +564,6 @@ static inline struct device_node *of_find_node_with_property(
#define of_fwnode_handle(node) NULL
-static inline bool of_have_populated_dt(void)
-{
- return false;
-}
-
static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
const char *compatible)
{
@@ -679,7 +583,7 @@ static inline int of_device_is_compatible(const struct device_node *device,
return 0;
}
-static inline int of_device_compatible_match(struct device_node *device,
+static inline int of_device_compatible_match(const struct device_node *device,
const char *const *compat)
{
return 0;
@@ -716,32 +620,6 @@ static inline int of_property_count_elems_of_size(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname, u8 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname, u16 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
static inline int of_property_read_u32_index(const struct device_node *np,
const char *propname, u32 index, u32 *out_value)
{
@@ -767,6 +645,16 @@ static inline struct device_node *of_get_cpu_node(int cpu,
return NULL;
}
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+ return NULL;
+}
+
+static inline int of_cpu_node_to_id(struct device_node *np)
+{
+ return -ENODEV;
+}
+
static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
{
return NULL;
@@ -847,18 +735,12 @@ static inline int of_property_read_string_helper(const struct device_node *np,
return -ENOSYS;
}
-static inline struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index)
-{
- return NULL;
-}
-
-static inline int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name,
- const char *cells_name,
- int index,
- struct of_phandle_args *out_args)
+static inline int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
{
return -ENOSYS;
}
@@ -872,18 +754,22 @@ static inline int of_parse_phandle_with_args_map(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
- struct of_phandle_args *out_args)
+static inline int of_count_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name)
{
return -ENOSYS;
}
-static inline int of_count_phandle_with_args(struct device_node *np,
- const char *list_name,
- const char *cells_name)
+static inline ssize_t of_modalias(const struct device_node *np, char *str,
+ ssize_t len)
{
- return -ENOSYS;
+ return -ENODEV;
+}
+
+static inline int of_request_module(const struct device_node *np)
+{
+ return -ENODEV;
}
static inline int of_phandle_iterator_init(struct of_phandle_iterator *it,
@@ -917,18 +803,26 @@ static inline int of_alias_get_highest_id(const char *stem)
return -ENOSYS;
}
-static inline int of_alias_get_alias_list(const struct of_device_id *matches,
- const char *stem, unsigned long *bitmap,
- unsigned int nbits)
+static inline int of_machine_is_compatible(const char *compat)
{
- return -ENOSYS;
+ return 0;
}
-static inline int of_machine_is_compatible(const char *compat)
+static inline int of_add_property(struct device_node *np, struct property *prop)
+{
+ return 0;
+}
+
+static inline int of_remove_property(struct device_node *np, struct property *prop)
{
return 0;
}
+static inline bool of_machine_compatible_match(const char *const *compats)
+{
+ return false;
+}
+
static inline bool of_console_check(const struct device_node *dn, const char *name, int index)
{
return false;
@@ -965,7 +859,8 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
{
}
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+static inline int of_property_check_flag(const struct property *p,
+ unsigned long flag)
{
return 0;
}
@@ -978,11 +873,6 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
-static inline int of_cpu_node_to_id(struct device_node *np)
-{
- return -ENODEV;
-}
-
static inline int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
@@ -990,6 +880,16 @@ static inline int of_map_id(struct device_node *np, u32 id,
return -EINVAL;
}
+static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np)
+{
+ return PHYS_ADDR_MAX;
+}
+
+static inline const void *of_device_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
@@ -1007,6 +907,9 @@ static inline int of_prop_val_eq(struct property *p1, struct property *p2)
!memcmp(p1->value, p2->value, (size_t)p1->length);
}
+#define for_each_property_of_node(dn, pp) \
+ for (pp = dn->properties; pp != NULL; pp = pp->next)
+
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
extern int of_node_to_nid(struct device_node *np);
#else
@@ -1045,13 +948,167 @@ static inline bool of_node_is_type(const struct device_node *np, const char *typ
}
/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ * the table
+ *
+ * Return: The device_node pointer with refcount incremented. Use
+ * of_node_put() on it when done.
+ */
+static inline struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name,
+ int index)
+{
+ struct of_phandle_args args;
+
+ if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+ index, &args))
+ return NULL;
+
+ return args.np;
+}
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * #list-cells = <2>;
+ * };
+ *
+ * phandle2: node2 {
+ * #list-cells = <1>;
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 1 2 &phandle2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
+static inline int of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ int cell_count = -1;
+
+ /* If cells_name is NULL we assume a cell count of 0 */
+ if (!cells_name)
+ cell_count = 0;
+
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
+ cell_count, index, out_args);
+}
+
+/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * };
+ *
+ * phandle2: node2 {
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 0 2 &phandle2 2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+ index, out_args);
+}
+
+/**
+ * of_parse_phandle_with_optional_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * Same as of_parse_phandle_with_args() except that if the cells_name property
+ * is not found, cell_count of 0 is assumed.
+ *
+ * This is used to useful, if you have a phandle which didn't have arguments
+ * before and thus doesn't have a '#*-cells' property but is now migrated to
+ * having arguments while retaining backwards compatibility.
+ */
+static inline int of_parse_phandle_with_optional_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
+ 0, index, out_args);
+}
+
+/**
+ * of_phandle_args_equal() - Compare two of_phandle_args
+ * @a1: First of_phandle_args to compare
+ * @a2: Second of_phandle_args to compare
+ *
+ * Return: True if a1 and a2 are the same (same node pointer, same phandle
+ * args), false otherwise.
+ */
+static inline bool of_phandle_args_equal(const struct of_phandle_args *a1,
+ const struct of_phandle_args *a2)
+{
+ return a1->np == a2->np &&
+ a1->args_count == a2->args_count &&
+ !memcmp(a1->args, a2->args, sizeof(a1->args[0]) * a1->args_count);
+}
+
+/**
* of_property_count_u8_elems - Count the number of u8 elements in a property
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u8 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u8 and -ENODATA if the
* property does not have a value.
*/
@@ -1068,7 +1125,9 @@ static inline int of_property_count_u8_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u16 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u16 and -ENODATA if the
* property does not have a value.
*/
@@ -1085,7 +1144,9 @@ static inline int of_property_count_u16_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u32 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u32 and -ENODATA if the
* property does not have a value.
*/
@@ -1102,7 +1163,9 @@ static inline int of_property_count_u32_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u64 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u64 and -ENODATA if the
* property does not have a value.
*/
@@ -1123,7 +1186,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
* Search for a property in a device tree node and retrieve a list of
* terminated string values (pointer to data, not a copy) in that property.
*
- * If @out_strs is NULL, the number of strings in the property is returned.
+ * Return: If @out_strs is NULL, the number of strings in the property is returned.
*/
static inline int of_property_read_string_array(const struct device_node *np,
const char *propname, const char **out_strs,
@@ -1139,10 +1202,11 @@ static inline int of_property_read_string_array(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device tree node and retrieve the number of null
- * terminated string contain in it. Returns the number of strings on
- * success, -EINVAL if the property does not exist, -ENODATA if property
- * does not have a value, and -EILSEQ if the string is not null-terminated
- * within the length of the property data.
+ * terminated string contain in it.
+ *
+ * Return: The number of strings on success, -EINVAL if the property does not
+ * exist, -ENODATA if property does not have a value, and -EILSEQ if the string
+ * is not null-terminated within the length of the property data.
*/
static inline int of_property_count_strings(const struct device_node *np,
const char *propname)
@@ -1156,13 +1220,14 @@ static inline int of_property_count_strings(const struct device_node *np,
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @index: index of the string in the list of strings
- * @out_string: pointer to null terminated return string, modified only if
+ * @output: pointer to null terminated return string, modified only if
* return value is 0.
*
* Search for a property in a device tree node and retrieve a null
* terminated string value (pointer to data, not a copy) in the list of strings
* contained in that property.
- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if
* property does not have a value, and -EILSEQ if the string is not
* null-terminated within the length of the property data.
*
@@ -1181,8 +1246,10 @@ static inline int of_property_read_string_index(const struct device_node *np,
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
*
- * Search for a property in a device node.
- * Returns true if the property exists false otherwise.
+ * Search for a boolean property in a device node. Usage on non-boolean
+ * property types is deprecated.
+ *
+ * Return: true if the property exists false otherwise.
*/
static inline bool of_property_read_bool(const struct device_node *np,
const char *propname)
@@ -1192,6 +1259,144 @@ static inline bool of_property_read_bool(const struct device_node *np,
return prop ? true : false;
}
+/**
+ * of_property_present - Test if a property is present in a node
+ * @np: device node to search for the property.
+ * @propname: name of the property to be searched.
+ *
+ * Test for a property present in a device node.
+ *
+ * Return: true if the property exists false otherwise.
+ */
+static inline bool of_property_present(const struct device_node *np, const char *propname)
+{
+ return of_property_read_bool(np, propname);
+}
+
+/**
+ * of_property_read_u8_array - Find and read an array of u8 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 8-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 8 <0x50 0x60 0x70>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u8 value can be decoded.
+ */
+static inline int of_property_read_u8_array(const struct device_node *np,
+ const char *propname,
+ u8 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u8_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u16_array - Find and read an array of u16 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 16-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u16 value can be decoded.
+ */
+static inline int of_property_read_u16_array(const struct device_node *np,
+ const char *propname,
+ u16 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u16_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u32_array - Find and read an array of 32 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+static inline int of_property_read_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u32_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u64_array - Find and read an array of 64 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 64-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u64 value can be decoded.
+ */
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u64_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
static inline int of_property_read_u8(const struct device_node *np,
const char *propname,
u8 *out_value)
@@ -1257,9 +1462,25 @@ static inline int of_property_read_s32(const struct device_node *np,
#define for_each_child_of_node(parent, child) \
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
+
+#define for_each_child_of_node_scoped(parent, child) \
+ for (struct device_node *child __free(device_node) = \
+ of_get_next_child(parent, NULL); \
+ child != NULL; \
+ child = of_get_next_child(parent, child))
+
#define for_each_available_child_of_node(parent, child) \
for (child = of_get_next_available_child(parent, NULL); child != NULL; \
child = of_get_next_available_child(parent, child))
+#define for_each_reserved_child_of_node(parent, child) \
+ for (child = of_get_next_reserved_child(parent, NULL); child != NULL; \
+ child = of_get_next_reserved_child(parent, child))
+
+#define for_each_available_child_of_node_scoped(parent, child) \
+ for (struct device_node *child __free(device_node) = \
+ of_get_next_available_child(parent, NULL); \
+ child != NULL; \
+ child = of_get_next_available_child(parent, child))
#define for_each_of_cpu_node(cpu) \
for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
@@ -1291,18 +1512,22 @@ static inline int of_get_available_child_count(const struct device_node *np)
return num;
}
+#define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \
+ static const struct of_device_id __of_table_##name \
+ __attribute__((unused)) \
+ = { .compatible = compat, \
+ .data = (fn == (fn_type)NULL) ? fn : fn }
+
#if defined(CONFIG_OF) && !defined(MODULE)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
- __used __section(__##table##_of_table) \
+ __used __section("__" #table "_of_table") \
+ __aligned(__alignof__(struct of_device_id)) \
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
- static const struct of_device_id __of_table_##name \
- __attribute__((unused)) \
- = { .compatible = compat, \
- .data = (fn == (fn_type)NULL) ? fn : fn }
+ _OF_DECLARE_STUB(table, name, compat, fn, fn_type)
#endif
typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
@@ -1358,6 +1583,8 @@ enum of_reconfig_change {
OF_RECONFIG_CHANGE_REMOVE,
};
+struct notifier_block;
+
#ifdef CONFIG_OF_DYNAMIC
extern int of_reconfig_notifier_register(struct notifier_block *);
extern int of_reconfig_notifier_unregister(struct notifier_block *);
@@ -1402,6 +1629,29 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
{
return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
}
+
+struct device_node *of_changeset_create_node(struct of_changeset *ocs,
+ struct device_node *parent,
+ const char *full_name);
+int of_changeset_add_prop_string(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name, const char *str);
+int of_changeset_add_prop_string_array(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const char **str_array, size_t sz);
+int of_changeset_add_prop_u32_array(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const u32 *array, size_t sz);
+static inline int of_changeset_add_prop_u32(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const u32 val)
+{
+ return of_changeset_add_prop_u32_array(ocs, np, prop_name, &val, 1);
+}
+
#else /* CONFIG_OF_DYNAMIC */
static inline int of_reconfig_notifier_register(struct notifier_block *nb)
{
@@ -1427,7 +1677,7 @@ static inline int of_reconfig_get_state_change(unsigned long action,
* of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
* @np: Pointer to the given device_node
*
- * return true if present false otherwise
+ * Return: true if present false otherwise
*/
static inline bool of_device_is_system_power_controller(const struct device_node *np)
{
@@ -1435,16 +1685,45 @@ static inline bool of_device_is_system_power_controller(const struct device_node
}
/**
+ * of_have_populated_dt() - Has DT been populated by bootloader
+ *
+ * Return: True if a DTB has been populated by the bootloader and it isn't the
+ * empty builtin one. False otherwise.
+ */
+static inline bool of_have_populated_dt(void)
+{
+#ifdef CONFIG_OF
+ return of_property_present(of_root, "compatible");
+#else
+ return false;
+#endif
+}
+
+/*
* Overlay support
*/
enum of_overlay_notify_action {
- OF_OVERLAY_PRE_APPLY = 0,
+ OF_OVERLAY_INIT = 0, /* kzalloc() of ovcs sets this value */
+ OF_OVERLAY_PRE_APPLY,
OF_OVERLAY_POST_APPLY,
OF_OVERLAY_PRE_REMOVE,
OF_OVERLAY_POST_REMOVE,
};
+static inline const char *of_overlay_action_name(enum of_overlay_notify_action action)
+{
+ static const char *const of_overlay_action_name[] = {
+ "init",
+ "pre-apply",
+ "post-apply",
+ "pre-remove",
+ "post-remove",
+ };
+
+ return of_overlay_action_name[action];
+}
+
struct of_overlay_notify_data {
struct device_node *overlay;
struct device_node *target;
@@ -1453,7 +1732,7 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id);
+ int *ovcs_id, struct device_node *target_base);
int of_overlay_remove(int *ovcs_id);
int of_overlay_remove_all(void);
@@ -1462,8 +1741,8 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id)
+static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id, struct device_node *target_base)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 88bc943405cd..26a19daf0d09 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -35,9 +35,27 @@ struct of_pci_range {
for (; of_pci_range_parser_one(parser, range);)
#define for_each_of_range for_each_of_pci_range
+/*
+ * of_range_count - Get the number of "ranges" or "dma-ranges" entries
+ * @parser: Parser state initialized by of_range_parser_init()
+ *
+ * Returns the number of entries or 0 if none.
+ *
+ * Note that calling this within or after the for_each_of_range() iterator will
+ * be inaccurate giving the number of entries remaining.
+ */
+static inline int of_range_count(const struct of_range_parser *parser)
+{
+ if (!parser || !parser->node || !parser->range || parser->range == parser->end)
+ return 0;
+ return (parser->end - parser->range) / (parser->na + parser->pna + parser->ns);
+}
+
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
const __be32 *in_addr);
+extern const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *addr,
+ phys_addr_t *start, size_t *length);
#ifdef CONFIG_OF_ADDRESS
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
@@ -51,8 +69,10 @@ void __iomem *of_io_request_and_map(struct device_node *device,
* the address space flags too. The PCI version uses a BAR number
* instead of an absolute index
*/
-extern const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags);
+extern const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags);
+
+int of_property_read_reg(struct device_node *np, int idx, u64 *addr, u64 *size);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
@@ -61,6 +81,13 @@ extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+extern int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res);
+extern int of_range_to_resource(struct device_node *np, int index,
+ struct resource *res);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
static inline void __iomem *of_io_request_and_map(struct device_node *device,
@@ -75,12 +102,17 @@ static inline u64 of_translate_address(struct device_node *np,
return OF_BAD_ADDR;
}
-static inline const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags)
+static inline const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags)
{
return NULL;
}
+static inline int of_property_read_reg(struct device_node *np, int idx, u64 *addr, u64 *size)
+{
+ return -ENOSYS;
+}
+
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
@@ -100,6 +132,25 @@ static inline struct of_pci_range *of_pci_range_parser_one(
return NULL;
}
+static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ return -ENOSYS;
+}
+
+static inline int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
+static inline int of_range_to_resource(struct device_node *np, int index,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
static inline bool of_dma_is_coherent(struct device_node *np)
{
return false;
@@ -124,32 +175,27 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
#endif
#define of_range_parser_init of_pci_range_parser_init
-#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
-extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
- u64 *size, unsigned int *flags);
-extern int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r);
-extern int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res);
-#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
-static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r)
+static inline const __be32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags)
{
- return -ENOSYS;
+ return __of_get_address(dev, index, -1, size, flags);
}
-static inline const __be32 *of_get_pci_address(struct device_node *dev,
- int bar_no, u64 *size, unsigned int *flags)
+static inline const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags)
{
- return NULL;
+ return __of_get_address(dev, -1, bar_no, size, flags);
}
-static inline int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res)
+
+static inline int of_address_count(struct device_node *np)
{
- return -ENOSYS;
+ struct resource res;
+ int count = 0;
+
+ while (of_address_to_resource(np, count, &res) == 0)
+ count++;
+
+ return count;
}
-#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
#endif /* __OF_ADDRESS_H */
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 07ca187fc5e4..9042bca5bb84 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -2,14 +2,11 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
-#include <linux/cpu.h>
-#include <linux/platform_device.h>
-#include <linux/of_platform.h> /* temporary until merge */
-
-#include <linux/of.h>
-#include <linux/mod_devicetable.h>
+#include <linux/device/driver.h>
struct device;
+struct of_device_id;
+struct kobj_uevent_env;
#ifdef CONFIG_OF
extern const struct of_device_id *of_match_device(
@@ -26,34 +23,10 @@ static inline int of_driver_match_device(struct device *dev,
return of_match_device(drv->of_match_table, dev) != NULL;
}
-extern struct platform_device *of_dev_get(struct platform_device *dev);
-extern void of_dev_put(struct platform_device *dev);
-
-extern int of_device_add(struct platform_device *pdev);
-extern int of_device_register(struct platform_device *ofdev);
-extern void of_device_unregister(struct platform_device *ofdev);
-
-extern const void *of_device_get_match_data(const struct device *dev);
-
extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len);
-extern int of_device_request_module(struct device *dev);
-extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
-extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
-
-static inline void of_device_node_put(struct device *dev)
-{
- of_node_put(dev->of_node);
-}
-
-static inline struct device_node *of_cpu_device_node_get(int cpu)
-{
- struct device *cpu_dev;
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev)
- return of_get_cpu_node(cpu, NULL);
- return of_node_get(cpu_dev->of_node);
-}
+extern void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
+extern int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env);
int of_dma_configure_id(struct device *dev,
struct device_node *np,
@@ -64,6 +37,9 @@ static inline int of_dma_configure(struct device *dev,
{
return of_dma_configure_id(dev, np, force_dma, NULL);
}
+
+void of_device_make_bus_id(struct device *dev);
+
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -72,49 +48,31 @@ static inline int of_driver_match_device(struct device *dev,
return 0;
}
-static inline void of_device_uevent(struct device *dev,
+static inline void of_device_uevent(const struct device *dev,
struct kobj_uevent_env *env) { }
-static inline const void *of_device_get_match_data(const struct device *dev)
-{
- return NULL;
-}
-
static inline int of_device_modalias(struct device *dev,
char *str, ssize_t len)
{
return -ENODEV;
}
-static inline int of_device_request_module(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int of_device_uevent_modalias(struct device *dev,
+static inline int of_device_uevent_modalias(const struct device *dev,
struct kobj_uevent_env *env)
{
return -ENODEV;
}
-static inline void of_device_node_put(struct device *dev) { }
-
-static inline const struct of_device_id *__of_match_device(
+static inline const struct of_device_id *of_match_device(
const struct of_device_id *matches, const struct device *dev)
{
return NULL;
}
-#define of_match_device(matches, dev) \
- __of_match_device(of_match_ptr(matches), (dev))
-
-static inline struct device_node *of_cpu_device_node_get(int cpu)
-{
- return NULL;
-}
static inline int of_dma_configure_id(struct device *dev,
- struct device_node *np,
- bool force_dma)
+ struct device_node *np,
+ bool force_dma,
+ const u32 *id)
{
return 0;
}
@@ -124,6 +82,9 @@ static inline int of_dma_configure(struct device *dev,
{
return 0;
}
+
+static inline void of_device_make_bus_id(struct device *dev) {}
+
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index acf820e88952..d69ad5bb1eb1 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -58,23 +58,17 @@ extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
extern unsigned long of_get_flat_dt_root(void);
extern uint32_t of_get_flat_dt_phandle(unsigned long node);
-extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
- int depth, void *data);
-extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_chosen(char *cmdline);
+extern int early_init_dt_scan_memory(void);
+extern void early_init_dt_check_for_usable_mem_range(void);
extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
-extern void __init early_init_dt_scan_chosen_arch(unsigned long node);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
-extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
-extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
- bool no_map);
extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
/* Early flat tree scan hooks */
-extern int early_init_dt_scan_root(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_root(void);
extern bool early_init_dt_scan(void *params);
extern bool early_init_dt_verify(void *params);
@@ -90,6 +84,7 @@ extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *);
#else /* CONFIG_OF_EARLY_FLATTREE */
+static inline void early_init_dt_check_for_usable_mem_range(void) {}
static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline void early_init_fdt_reserve_self(void) {}
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index f821095218b0..d0f66a5e1b2a 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -17,135 +17,22 @@
struct device_node;
-/*
- * This is Linux-specific flags. By default controllers' and Linux' mapping
- * match, but GPIO controllers are free to translate their own flags to
- * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
- */
-enum of_gpio_flags {
- OF_GPIO_ACTIVE_LOW = 0x1,
- OF_GPIO_SINGLE_ENDED = 0x2,
- OF_GPIO_OPEN_DRAIN = 0x4,
- OF_GPIO_TRANSITORY = 0x8,
- OF_GPIO_PULL_UP = 0x10,
- OF_GPIO_PULL_DOWN = 0x20,
-};
-
#ifdef CONFIG_OF_GPIO
-#include <linux/kernel.h>
-
-/*
- * OF GPIO chip for memory mapped banks
- */
-struct of_mm_gpio_chip {
- struct gpio_chip gc;
- void (*save_regs)(struct of_mm_gpio_chip *mm_gc);
- void __iomem *regs;
-};
-
-static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
-{
- return container_of(gc, struct of_mm_gpio_chip, gc);
-}
-
-extern int of_get_named_gpio_flags(struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags);
-
-extern int of_mm_gpiochip_add_data(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc,
- void *data);
-static inline int of_mm_gpiochip_add(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc)
-{
- return of_mm_gpiochip_add_data(np, mm_gc, NULL);
-}
-extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
+extern int of_get_named_gpio(const struct device_node *np,
+ const char *list_name, int index);
#else /* CONFIG_OF_GPIO */
#include <linux/errno.h>
/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_named_gpio_flags(struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags)
+static inline int of_get_named_gpio(const struct device_node *np,
+ const char *propname, int index)
{
- if (flags)
- *flags = 0;
-
return -ENOSYS;
}
#endif /* CONFIG_OF_GPIO */
-/**
- * of_gpio_named_count() - Count GPIOs for a device
- * @np: device node to count GPIOs for
- * @propname: property name containing gpio specifier(s)
- *
- * The function returns the count of GPIOs specified for a node.
- * Note that the empty GPIO specifiers count too. Returns either
- * Number of gpios defined in property,
- * -EINVAL for an incorrectly formed gpios property, or
- * -ENOENT for a missing gpios property
- *
- * Example:
- * gpios = <0
- * &gpio1 1 2
- * 0
- * &gpio2 3 4>;
- *
- * The above example defines four GPIOs, two of which are not specified.
- * This function will return '4'
- */
-static inline int of_gpio_named_count(struct device_node *np, const char* propname)
-{
- return of_count_phandle_with_args(np, propname, "#gpio-cells");
-}
-
-/**
- * of_gpio_count() - Count GPIOs for a device
- * @np: device node to count GPIOs for
- *
- * Same as of_gpio_named_count, but hard coded to use the 'gpios' property
- */
-static inline int of_gpio_count(struct device_node *np)
-{
- return of_gpio_named_count(np, "gpios");
-}
-
-static inline int of_get_gpio_flags(struct device_node *np, int index,
- enum of_gpio_flags *flags)
-{
- return of_get_named_gpio_flags(np, "gpios", index, flags);
-}
-
-/**
- * of_get_named_gpio() - Get a GPIO number to use with GPIO API
- * @np: device node to get GPIO from
- * @propname: Name of property containing gpio specifier(s)
- * @index: index of the GPIO
- *
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
- * value on the error condition.
- */
-static inline int of_get_named_gpio(struct device_node *np,
- const char *propname, int index)
-{
- return of_get_named_gpio_flags(np, propname, index, NULL);
-}
-
-/**
- * of_get_gpio() - Get a GPIO number to use with GPIO API
- * @np: device node to get GPIO from
- * @index: index of the GPIO
- *
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
- * value on the error condition.
- */
-static inline int of_get_gpio(struct device_node *np, int index)
-{
- return of_get_gpio_flags(np, index, NULL);
-}
-
#endif /* __LINUX_OF_GPIO_H */
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 4d7756087b6b..a4bea62bfa29 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -41,7 +41,7 @@ struct of_endpoint {
bool of_graph_is_present(const struct device_node *node);
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
-int of_graph_get_endpoint_count(const struct device_node *np);
+unsigned int of_graph_get_endpoint_count(const struct device_node *np);
struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *previous);
@@ -68,7 +68,7 @@ static inline int of_graph_parse_endpoint(const struct device_node *node,
return -ENOSYS;
}
-static inline int of_graph_get_endpoint_count(const struct device_node *np)
+static inline unsigned int of_graph_get_endpoint_count(const struct device_node *np)
{
return 0;
}
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 16f4b3e87f20..e61cbbe12dac 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -2,34 +2,30 @@
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
-#include <linux/device.h>
-#include <linux/iommu.h>
-#include <linux/of.h>
+struct device;
+struct device_node;
+struct iommu_ops;
#ifdef CONFIG_OF_IOMMU
-extern int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size);
+extern int of_iommu_configure(struct device *dev, struct device_node *master_np,
+ const u32 *id);
-extern const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np,
- const u32 *id);
+extern void of_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list);
#else
-static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size)
+static inline int of_iommu_configure(struct device *dev,
+ struct device_node *master_np,
+ const u32 *id)
{
- return -EINVAL;
+ return -ENODEV;
}
-static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np,
- const u32 *id)
+static inline void of_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list)
{
- return NULL;
}
#endif /* CONFIG_OF_IOMMU */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index e8b78139f78c..d6d3eae2f145 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -20,12 +20,12 @@ typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
extern unsigned int of_irq_workarounds;
extern struct device_node *of_irq_dflt_pic;
-extern int of_irq_parse_oldworld(struct device_node *device, int index,
- struct of_phandle_args *out_irq);
+int of_irq_parse_oldworld(const struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
#define of_irq_workarounds (0)
#define of_irq_dflt_pic (NULL)
-static inline int of_irq_parse_oldworld(struct device_node *device, int index,
+static inline int of_irq_parse_oldworld(const struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
return -EINVAL;
@@ -33,15 +33,14 @@ static inline int of_irq_parse_oldworld(struct device_node *device, int index,
#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
-extern int of_irq_parse_one(struct device_node *device, int index,
- struct of_phandle_args *out_irq);
extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern void of_irq_init(const struct of_device_id *matches);
-
#ifdef CONFIG_OF_IRQ
+extern void of_irq_init(const struct of_device_id *matches);
+extern int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
@@ -57,6 +56,14 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
extern void of_msi_configure(struct device *dev, struct device_node *np);
u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
+static inline void of_irq_init(const struct of_device_id *matches)
+{
+}
+static inline int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq)
+{
+ return -EINVAL;
+}
static inline int of_irq_count(struct device_node *dev)
{
return 0;
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 1efb88d9f892..8a52ef2e6fa6 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -14,9 +14,26 @@
#if IS_ENABLED(CONFIG_OF_MDIO)
bool of_mdiobus_child_is_phy(struct device_node *child);
-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
- struct device_node *np);
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+ struct module *owner);
+
+static inline int of_mdiobus_register(struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __of_mdiobus_register(mdio, np, THIS_MODULE);
+}
+
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np, struct module *owner);
+
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __devm_of_mdiobus_register(dev, mdio, np, THIS_MODULE);
+}
+
+struct mdio_device *of_mdio_find_device(struct device_node *np);
struct phy_device *of_phy_find_device(struct device_node *phy_np);
struct phy_device *
of_phy_connect(struct net_device *dev, struct device_node *phy_np,
@@ -25,9 +42,6 @@ of_phy_connect(struct net_device *dev, struct device_node *phy_np,
struct phy_device *
of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
void (*hndlr)(struct net_device *));
-struct phy_device *
-of_phy_attach(struct net_device *dev, struct device_node *phy_np,
- u32 flags, phy_interface_t iface);
struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
int of_phy_register_fixed_link(struct device_node *np);
@@ -74,6 +88,18 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *
return mdiobus_register(mdio);
}
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return devm_mdiobus_register(dev, mdio);
+}
+
+static inline struct mdio_device *of_mdio_find_device(struct device_node *np)
+{
+ return NULL;
+}
+
static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
return NULL;
@@ -94,13 +120,6 @@ of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
return NULL;
}
-static inline struct phy_device *of_phy_attach(struct net_device *dev,
- struct device_node *phy_np,
- u32 flags, phy_interface_t iface)
-{
- return NULL;
-}
-
static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 71bbfcf3adcd..d88715a0b3a5 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -8,12 +8,14 @@
#include <linux/phy.h>
-#ifdef CONFIG_OF_NET
+#if defined(CONFIG_OF) && defined(CONFIG_NET)
#include <linux/of.h>
struct net_device;
extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
-extern const void *of_get_mac_address(struct device_node *np);
+extern int of_get_mac_address(struct device_node *np, u8 *mac);
+extern int of_get_mac_address_nvmem(struct device_node *np, u8 *mac);
+int of_get_ethdev_address(struct device_node *np, struct net_device *dev);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np,
@@ -22,9 +24,19 @@ static inline int of_get_phy_mode(struct device_node *np,
return -ENODEV;
}
-static inline const void *of_get_mac_address(struct device_node *np)
+static inline int of_get_mac_address(struct device_node *np, u8 *mac)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
+}
+
+static inline int of_get_mac_address_nvmem(struct device_node *np, u8 *mac)
+{
+ return -ENODEV;
+}
+
+static inline int of_get_ethdev_address(struct device_node *np, struct net_device *dev)
+{
+ return -ENODEV;
}
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 84a966623e78..a2ff1ad48f7f 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -6,11 +6,12 @@
* <benh@kernel.crashing.org>
*/
-#include <linux/device.h>
#include <linux/mod_devicetable.h>
-#include <linux/pm.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
+
+struct device;
+struct device_node;
+struct of_device_id;
+struct platform_device;
/**
* struct of_dev_auxdata - lookup table entry for device names & platform_data
@@ -52,6 +53,11 @@ extern const struct of_device_id of_default_bus_match_table[];
extern struct platform_device *of_device_alloc(struct device_node *np,
const char *bus_id,
struct device *parent);
+
+extern int of_device_add(struct platform_device *pdev);
+extern int of_device_register(struct platform_device *ofdev);
+extern void of_device_unregister(struct platform_device *ofdev);
+
#ifdef CONFIG_OF
extern struct platform_device *of_find_device_by_node(struct device_node *np);
#else
@@ -61,16 +67,18 @@ static inline struct platform_device *of_find_device_by_node(struct device_node
}
#endif
+extern int of_platform_bus_probe(struct device_node *root,
+ const struct of_device_id *matches,
+ struct device *parent);
+
+#ifdef CONFIG_OF_ADDRESS
/* Platform devices and busses creation */
extern struct platform_device *of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent);
extern int of_platform_device_destroy(struct device *dev, void *data);
-extern int of_platform_bus_probe(struct device_node *root,
- const struct of_device_id *matches,
- struct device *parent);
-#ifdef CONFIG_OF_ADDRESS
+
extern int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
@@ -84,6 +92,18 @@ extern int devm_of_platform_populate(struct device *dev);
extern void devm_of_platform_depopulate(struct device *dev);
#else
+/* Platform devices and busses creation */
+static inline struct platform_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ return NULL;
+}
+static inline int of_platform_device_destroy(struct device *dev, void *data)
+{
+ return -ENODEV;
+}
+
static inline int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
@@ -107,10 +127,4 @@ static inline int devm_of_platform_populate(struct device *dev)
static inline void devm_of_platform_depopulate(struct device *dev) { }
#endif
-#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
-extern void of_platform_register_reconfig_notifier(void);
-#else
-static inline void of_platform_register_reconfig_notifier(void) { }
-#endif
-
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 8216a4156263..4de2a24cadc9 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -27,11 +27,11 @@ struct reserved_mem_ops {
typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
+#ifdef CONFIG_OF_RESERVED_MEM
+
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
-#ifdef CONFIG_OF_RESERVED_MEM
-
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
int of_reserved_mem_device_init_by_name(struct device *dev,
@@ -39,11 +39,12 @@ int of_reserved_mem_device_init_by_name(struct device *dev,
const char *name);
void of_reserved_mem_device_release(struct device *dev);
-void fdt_init_reserved_mem(void);
-void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
- phys_addr_t base, phys_addr_t size);
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np);
#else
+
+#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
+ _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn)
+
static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)
{
@@ -59,9 +60,6 @@ static inline int of_reserved_mem_device_init_by_name(struct device *dev,
static inline void of_reserved_mem_device_release(struct device *pdev) { }
-static inline void fdt_init_reserved_mem(void) { }
-static inline void fdt_reserved_mem_save_node(unsigned long node,
- const char *uname, phys_addr_t base, phys_addr_t size) { }
static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
{
return NULL;
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 657d6bf2c064..3921fbed0b28 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -17,17 +17,17 @@
* build_OID_registry.pl to generate the data for look_up_OID().
*/
enum OID {
- OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
OID_id_dsa, /* 1.2.840.10040.4.1 */
- OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
+ OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */
+ OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */
+ OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */
+ OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */
+ OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */
+ OID_id_ecdsa_with_sha512, /* 1.2.840.10045.4.3.4 */
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
- OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */
- OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */
- OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */
- OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */
OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */
OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */
OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */
@@ -43,10 +43,9 @@ enum OID {
OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */
- /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */
- OID_md2, /* 1.2.840.113549.2.2 */
- OID_md4, /* 1.2.840.113549.2.4 */
- OID_md5, /* 1.2.840.113549.2.5 */
+ OID_mskrb5, /* 1.2.840.48018.1.2.2 */
+ OID_krb5, /* 1.2.840.113554.1.2.2 */
+ OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */
/* Microsoft Authenticode & Software Publishing */
OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
@@ -56,8 +55,16 @@ enum OID {
OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
+ OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */
+ OID_negoex, /* 1.3.6.1.4.1.311.2.2.30 */
+
+ OID_spnego, /* 1.3.6.1.5.5.2 */
+
+ OID_IAKerb, /* 1.3.6.1.5.2.5 */
+ OID_PKU2U, /* 1.3.5.1.5.2.7 */
+ OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
- OID_sha1, /* 1.3.14.3.2.26 */
+ OID_id_ansip384r1, /* 1.3.132.0.34 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
@@ -89,6 +96,10 @@ enum OID {
OID_authorityKeyIdentifier, /* 2.5.29.35 */
OID_extKeyUsage, /* 2.5.29.37 */
+ /* Heimdal mechanisms */
+ OID_NetlogonMechanism, /* 1.2.752.43.14.2 */
+ OID_appleLocalKdcSupported, /* 1.2.752.43.14.3 */
+
/* EC-RDSA */
OID_gostCPSignA, /* 1.2.643.2.2.35.1 */
OID_gostCPSignB, /* 1.2.643.2.2.35.2 */
@@ -107,10 +118,33 @@ enum OID {
OID_gostTC26Sign512B, /* 1.2.643.7.1.2.1.2.2 */
OID_gostTC26Sign512C, /* 1.2.643.7.1.2.1.2.3 */
+ /* OSCCA */
+ OID_sm2, /* 1.2.156.10197.1.301 */
+ OID_sm3, /* 1.2.156.10197.1.401 */
+ OID_SM2_with_SM3, /* 1.2.156.10197.1.501 */
+ OID_sm3WithRSAEncryption, /* 1.2.156.10197.1.504 */
+
+ /* TCG defined OIDS for TPM based keys */
+ OID_TPMLoadableKey, /* 2.23.133.10.1.3 */
+ OID_TPMImportableKey, /* 2.23.133.10.1.4 */
+ OID_TPMSealedData, /* 2.23.133.10.1.5 */
+
+ /* CSOR FIPS-202 SHA-3 */
+ OID_sha3_256, /* 2.16.840.1.101.3.4.2.8 */
+ OID_sha3_384, /* 2.16.840.1.101.3.4.2.9 */
+ OID_sha3_512, /* 2.16.840.1.101.3.4.2.10 */
+ OID_id_ecdsa_with_sha3_256, /* 2.16.840.1.101.3.4.3.10 */
+ OID_id_ecdsa_with_sha3_384, /* 2.16.840.1.101.3.4.3.11 */
+ OID_id_ecdsa_with_sha3_512, /* 2.16.840.1.101.3.4.3.12 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_256, /* 2.16.840.1.101.3.4.3.14 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */
+
OID__NR
};
extern enum OID look_up_OID(const void *data, size_t datasize);
+extern int parse_OID(const void *data, size_t datasize, enum OID *oid);
extern int sprint_oid(const void *, size_t, char *, size_t);
extern int sprint_OID(enum OID, char *, size_t);
diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h
index c4602364e909..3c2891d85c41 100644
--- a/include/linux/olpc-ec.h
+++ b/include/linux/olpc-ec.h
@@ -56,6 +56,8 @@ extern int olpc_ec_sci_query(u16 *sci_value);
extern bool olpc_ec_wakeup_available(void);
+asmlinkage int xo1_do_sleep(u8 sleep_state);
+
#else
static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 5c5c93ad6b50..6f6c31e3fb93 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -292,15 +292,22 @@ struct omap_system_dma_plat_info {
#define dma_omap15xx() __dma_omap15xx(d)
#define dma_omap16xx() __dma_omap16xx(d)
-#if defined(CONFIG_ARCH_OMAP)
extern struct omap_system_dma_plat_info *omap_get_plat_info(void);
+#if defined(CONFIG_ARCH_OMAP1)
extern void omap_set_dma_priority(int lch, int dst_port, int priority);
+#else
+static inline void omap_set_dma_priority(int lch, int dst_port, int priority)
+{
+}
+#endif
+
extern int omap_request_dma(int dev_id, const char *dev_name,
void (*callback)(int lch, u16 ch_status, void *data),
void *data, int *dma_ch);
-extern void omap_disable_dma_irq(int ch, u16 irq_bits);
extern void omap_free_dma(int ch);
+#if IS_ENABLED(CONFIG_USB_OMAP)
+extern void omap_disable_dma_irq(int ch, u16 irq_bits);
extern void omap_start_dma(int lch);
extern void omap_stop_dma(int lch);
extern void omap_set_dma_transfer_params(int lch, int data_type,
@@ -326,10 +333,12 @@ extern void omap_set_dma_dest_burst_mode(int lch,
extern dma_addr_t omap_get_dma_src_pos(int lch);
extern dma_addr_t omap_get_dma_dst_pos(int lch);
extern int omap_get_dma_active_status(int lch);
+#endif
+
extern int omap_dma_running(void);
-#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
-#include <mach/lcd_dma.h>
+#if IS_ENABLED(CONFIG_FB_OMAP)
+extern int omap_lcd_dma_running(void);
#else
static inline int omap_lcd_dma_running(void)
{
@@ -337,22 +346,4 @@ static inline int omap_lcd_dma_running(void)
}
#endif
-#else /* CONFIG_ARCH_OMAP */
-
-static inline struct omap_system_dma_plat_info *omap_get_plat_info(void)
-{
- return NULL;
-}
-
-static inline int omap_request_dma(int dev_id, const char *dev_name,
- void (*callback)(int lch, u16 ch_status, void *data),
- void *data, int *dma_ch)
-{
- return -ENODEV;
-}
-
-static inline void omap_free_dma(int ch) { }
-
-#endif /* CONFIG_ARCH_OMAP */
-
#endif /* __LINUX_OMAP_DMA_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index b7bf735960c2..082841908fe7 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -81,9 +81,6 @@ extern int gpmc_configure(int cmd, int wval);
extern void gpmc_read_settings_dt(struct device_node *np,
struct gpmc_settings *p);
-extern void omap3_gpmc_save_context(void);
-extern void omap3_gpmc_restore_context(void);
-
struct gpmc_timings;
struct omap_nand_platform_data;
struct omap_onenand_platform_data;
diff --git a/include/linux/once.h b/include/linux/once.h
index 9225ee6d96c7..bc714d414448 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -5,9 +5,17 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+/* Helpers used from arbitrary contexts.
+ * Hard irqs are blocked, be cautious.
+ */
bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key_true *once_key,
- unsigned long *flags);
+ unsigned long *flags, struct module *mod);
+
+/* Variant for process contexts only. */
+bool __do_once_sleepable_start(bool *done);
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ struct module *mod);
/* Call a function exactly once. The idea of DO_ONCE() is to perform
* a function call such as initialization of random seeds, etc, only
@@ -16,7 +24,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
* out the condition into a nop. DO_ONCE() guarantees type safety of
* arguments!
*
- * Not that the following is not equivalent ...
+ * Note that the following is not equivalent ...
*
* DO_ONCE(func, arg);
* DO_ONCE(func, arg);
@@ -38,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE(func, ...) \
({ \
bool ___ret = false; \
- static bool ___done = false; \
+ static bool __section(".data.once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
unsigned long ___flags; \
@@ -46,15 +54,33 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
if (unlikely(___ret)) { \
func(__VA_ARGS__); \
__do_once_done(&___done, &___once_key, \
- &___flags); \
+ &___flags, THIS_MODULE); \
} \
} \
___ret; \
})
+/* Variant of DO_ONCE() for process/sleepable contexts. */
+#define DO_ONCE_SLEEPABLE(func, ...) \
+ ({ \
+ bool ___ret = false; \
+ static bool __section(".data.once") ___done = false; \
+ static DEFINE_STATIC_KEY_TRUE(___once_key); \
+ if (static_branch_unlikely(&___once_key)) { \
+ ___ret = __do_once_sleepable_start(&___done); \
+ if (unlikely(___ret)) { \
+ func(__VA_ARGS__); \
+ __do_once_sleepable_done(&___done, &___once_key,\
+ THIS_MODULE); \
+ } \
+ } \
+ ___ret; \
+ })
+
#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
-#define get_random_once_wait(buf, nbytes) \
- DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \
+
+#define get_random_sleepable_once(buf, nbytes) \
+ DO_ONCE_SLEEPABLE(get_random_bytes, (buf), (nbytes))
#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h
new file mode 100644
index 000000000000..b7bce4983638
--- /dev/null
+++ b/include/linux/once_lite.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ONCE_LITE_H
+#define _LINUX_ONCE_LITE_H
+
+#include <linux/types.h>
+
+/* Call a function once. Similar to DO_ONCE(), but does not use jump label
+ * patching via static keys.
+ */
+#define DO_ONCE_LITE(func, ...) \
+ DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__)
+
+#define __ONCE_LITE_IF(condition) \
+ ({ \
+ static bool __section(".data.once") __already_done; \
+ bool __ret_cond = !!(condition); \
+ bool __ret_once = false; \
+ \
+ if (unlikely(__ret_cond && !__already_done)) { \
+ __already_done = true; \
+ __ret_once = true; \
+ } \
+ unlikely(__ret_once); \
+ })
+
+#define DO_ONCE_LITE_IF(condition, func, ...) \
+ ({ \
+ bool __ret_do_once = !!(condition); \
+ \
+ if (__ONCE_LITE_IF(__ret_do_once)) \
+ func(__VA_ARGS__); \
+ \
+ unlikely(__ret_do_once); \
+ })
+
+#endif /* _LINUX_ONCE_LITE_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index f022f581ac29..7d0c9c48a0c5 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -55,6 +55,7 @@ struct oom_control {
};
extern struct mutex oom_lock;
+extern struct mutex oom_adj_mutex;
static inline void set_current_oom_origin(void)
{
@@ -77,15 +78,6 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
}
/*
- * Use this helper if tsk->mm != mm and the victim mm needs a special
- * handling. This is guaranteed to stay true after once set.
- */
-static inline bool mm_is_oom_victim(struct mm_struct *mm)
-{
- return test_bit(MMF_OOM_VICTIM, &mm->flags);
-}
-
-/*
* Checks whether a page fault on the given mm is still reliable.
* This is no longer true if the oom reaper started to reap the
* address space which is reflected by MMF_UNSTABLE flag set in
@@ -105,8 +97,6 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
return 0;
}
-bool __oom_reap_task_mm(struct mm_struct *mm);
-
long oom_badness(struct task_struct *p,
unsigned long totalpages);
@@ -122,8 +112,4 @@ extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
-/* sysctls */
-extern int sysctl_oom_dump_tasks;
-extern int sysctl_oom_kill_allocating_task;
-extern int sysctl_panic_on_oom;
#endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
deleted file mode 100644
index b2a0f15f11fe..000000000000
--- a/include/linux/oprofile.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * @file oprofile.h
- *
- * API for machine-specific interrupts to interface
- * to oprofile.
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROFILE_H
-#define OPROFILE_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/printk.h>
-#include <linux/atomic.h>
-
-/* Each escaped entry is prefixed by ESCAPE_CODE
- * then one of the following codes, then the
- * relevant data.
- * These #defines live in this file so that arch-specific
- * buffer sync'ing code can access them.
- */
-#define ESCAPE_CODE ~0UL
-#define CTX_SWITCH_CODE 1
-#define CPU_SWITCH_CODE 2
-#define COOKIE_SWITCH_CODE 3
-#define KERNEL_ENTER_SWITCH_CODE 4
-#define KERNEL_EXIT_SWITCH_CODE 5
-#define MODULE_LOADED_CODE 6
-#define CTX_TGID_CODE 7
-#define TRACE_BEGIN_CODE 8
-#define TRACE_END_CODE 9
-#define XEN_ENTER_SWITCH_CODE 10
-#define SPU_PROFILING_CODE 11
-#define SPU_CTX_SWITCH_CODE 12
-#define IBS_FETCH_CODE 13
-#define IBS_OP_CODE 14
-
-struct dentry;
-struct file_operations;
-struct pt_regs;
-
-/* Operations structure to be filled in */
-struct oprofile_operations {
- /* create any necessary configuration files in the oprofile fs.
- * Optional. */
- int (*create_files)(struct dentry * root);
- /* Do any necessary interrupt setup. Optional. */
- int (*setup)(void);
- /* Do any necessary interrupt shutdown. Optional. */
- void (*shutdown)(void);
- /* Start delivering interrupts. */
- int (*start)(void);
- /* Stop delivering interrupts. */
- void (*stop)(void);
- /* Arch-specific buffer sync functions.
- * Return value = 0: Success
- * Return value = -1: Failure
- * Return value = 1: Run generic sync function
- */
- int (*sync_start)(void);
- int (*sync_stop)(void);
-
- /* Initiate a stack backtrace. Optional. */
- void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
-
- /* Multiplex between different events. Optional. */
- int (*switch_events)(void);
- /* CPU identification string. */
- char * cpu_type;
-};
-
-/**
- * One-time initialisation. *ops must be set to a filled-in
- * operations structure. This is called even in timer interrupt
- * mode so an arch can set a backtrace callback.
- *
- * If an error occurs, the fields should be left untouched.
- */
-int oprofile_arch_init(struct oprofile_operations * ops);
-
-/**
- * One-time exit/cleanup for the arch.
- */
-void oprofile_arch_exit(void);
-
-/**
- * Add a sample. This may be called from any context.
- */
-void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
-
-/**
- * Add an extended sample. Use this when the PC is not from the regs, and
- * we cannot determine if we're in kernel mode from the regs.
- *
- * This function does perform a backtrace.
- *
- */
-void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel);
-
-/**
- * Add an hardware sample.
- */
-void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel,
- struct task_struct *task);
-
-/* Use this instead when the PC value is not from the regs. Doesn't
- * backtrace. */
-void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
-
-/* add a backtrace entry, to be called from the ->backtrace callback */
-void oprofile_add_trace(unsigned long eip);
-
-
-/**
- * Create a file of the given name as a child of the given root, with
- * the specified file operations.
- */
-int oprofilefs_create_file(struct dentry * root,
- char const * name, const struct file_operations * fops);
-
-int oprofilefs_create_file_perm(struct dentry * root,
- char const * name, const struct file_operations * fops, int perm);
-
-/** Create a file for read/write access to an unsigned long. */
-int oprofilefs_create_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an unsigned long. */
-int oprofilefs_create_ro_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an atomic_t. */
-int oprofilefs_create_ro_atomic(struct dentry * root,
- char const * name, atomic_t * val);
-
-/** create a directory */
-struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
-
-/**
- * Write the given asciz string to the given user buffer @buf, updating *offset
- * appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Convert an unsigned long value into ASCII and copy it to the user buffer @buf,
- * updating *offset appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Read an ASCII string for a number from a userspace buffer and fill *val on success.
- * Returns 0 on success, < 0 on error.
- */
-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
-
-/** lock for read/write safety */
-extern raw_spinlock_t oprofilefs_lock;
-
-/**
- * Add the contents of a circular buffer to the event buffer.
- */
-void oprofile_put_buff(unsigned long *buf, unsigned int start,
- unsigned int stop, unsigned int max);
-
-unsigned long oprofile_get_cpu_buffer_size(void);
-void oprofile_cpu_buffer_inc_smpl_lost(void);
-
-/* cpu buffer functions */
-
-struct op_sample;
-
-struct op_entry {
- struct ring_buffer_event *event;
- struct op_sample *sample;
- unsigned long size;
- unsigned long *data;
-};
-
-void oprofile_write_reserve(struct op_entry *entry,
- struct pt_regs * const regs,
- unsigned long pc, int code, int size);
-int oprofile_add_data(struct op_entry *entry, unsigned long val);
-int oprofile_add_data64(struct op_entry *entry, u64 val);
-int oprofile_write_commit(struct op_entry *entry);
-
-#ifdef CONFIG_HW_PERF_EVENTS
-int __init oprofile_perf_init(struct oprofile_operations *ops);
-void oprofile_perf_exit(void);
-char *op_name_from_perf_id(void);
-#else
-static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
-{
- pr_info("oprofile: hardware counters not available\n");
- return -ENODEV;
-}
-static inline void oprofile_perf_exit(void) { }
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* OPROFILE_H */
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 5581dbd3bd34..ea8fb31379e3 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -6,11 +6,6 @@
* An MCS like lock especially tailored for optimistic spinning for sleeping
* lock implementations (mutex, rwsem, etc).
*/
-struct optimistic_spin_node {
- struct optimistic_spin_node *next, *prev;
- int locked; /* 1 if lock acquired */
- int cpu; /* encoded CPU # + 1 value */
-};
struct optimistic_spin_queue {
/*
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 93fcef105061..aa691f2119b0 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -3,14 +3,13 @@
#define __LINUX_OVERFLOW_H
#include <linux/compiler.h>
+#include <linux/limits.h>
+#include <linux/const.h>
/*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -31,10 +30,11 @@
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
-#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
-#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
-#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+#define __type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_max(t) __type_max(typeof(t))
+#define __type_min(T) ((T)((T)-type_max(T)-(T)1))
+#define type_min(t) __type_min(typeof(t))
/*
* Avoids triggering -Wtype-limits compilation warning,
@@ -43,283 +43,305 @@
#define is_non_negative(a) ((a) > 0 || (a) == 0)
#define is_negative(a) (!(is_non_negative(a)))
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
/*
- * For simplicity and code hygiene, the fallback code below insists on
- * a, b and *d having the same type (similar to the min() and max()
- * macros), whereas gcc's type-generic overflow checkers accept
- * different types. Hence we don't just make check_add_overflow an
- * alias for __builtin_add_overflow, but add type checks similar to
- * below.
+ * Allows for effectively applying __must_check to a macro so we can have
+ * both the type-agnostic benefits of the macros while also being able to
+ * enforce that the return value is, in fact, checked.
*/
-#define check_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_add_overflow(__a, __b, __d); \
-})
-
-#define check_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_sub_overflow(__a, __b, __d); \
-})
-
-#define check_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_mul_overflow(__a, __b, __d); \
-})
-
-#else
-
+static inline bool __must_check __must_check_overflow(bool overflow)
+{
+ return unlikely(overflow);
+}
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a + __b; \
- *__d < __a; \
-})
-#define __unsigned_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a - __b; \
- __a < __b; \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
+/**
+ * check_add_overflow() - Calculate addition with overflow checking
+ * @a: first addend
+ * @b: second addend
+ * @d: pointer to store sum
+ *
+ * Returns true on wrap-around, false otherwise.
+ *
+ * *@d holds the results of the attempted addition, regardless of whether
+ * wrap-around occurred.
*/
-#define __unsigned_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a * __b; \
- __builtin_constant_p(__b) ? \
- __b > 0 && __a > type_max(typeof(__a)) / __b : \
- __a > 0 && __b > type_max(typeof(__b)) / __a; \
-})
+#define check_add_overflow(a, b, d) \
+ __must_check_overflow(__builtin_add_overflow(a, b, d))
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
+/**
+ * wrapping_add() - Intentionally perform a wrapping addition
+ * @type: type for result of calculation
+ * @a: first addend
+ * @b: second addend
+ *
+ * Return the potentially wrapped-around addition without
+ * tripping any wrap-around sanitizers that may be enabled.
*/
+#define wrapping_add(type, a, b) \
+ ({ \
+ type __val; \
+ __builtin_add_overflow(a, b, &__val); \
+ __val; \
+ })
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
+/**
+ * wrapping_assign_add() - Intentionally perform a wrapping increment assignment
+ * @var: variable to be incremented
+ * @offset: amount to add
+ *
+ * Increments @var by @offset with wrap-around. Returns the resulting
+ * value of @var. Will not trip any wrap-around sanitizers.
+ *
+ * Returns the new value of @var.
*/
-#define __signed_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a + (u64)__b; \
- (((~(__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
+#define wrapping_assign_add(var, offset) \
+ ({ \
+ typeof(var) *__ptr = &(var); \
+ *__ptr = wrapping_add(typeof(var), *__ptr, offset); \
+ })
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
+/**
+ * check_sub_overflow() - Calculate subtraction with overflow checking
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ * @d: pointer to store difference
+ *
+ * Returns true on wrap-around, false otherwise.
+ *
+ * *@d holds the results of the attempted subtraction, regardless of whether
+ * wrap-around occurred.
*/
-#define __signed_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a - (u64)__b; \
- ((((__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
+#define check_sub_overflow(a, b, d) \
+ __must_check_overflow(__builtin_sub_overflow(a, b, d))
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
+/**
+ * wrapping_sub() - Intentionally perform a wrapping subtraction
+ * @type: type for result of calculation
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ *
+ * Return the potentially wrapped-around subtraction without
+ * tripping any wrap-around sanitizers that may be enabled.
*/
+#define wrapping_sub(type, a, b) \
+ ({ \
+ type __val; \
+ __builtin_sub_overflow(a, b, &__val); \
+ __val; \
+ })
-#define __signed_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- typeof(a) __tmax = type_max(typeof(a)); \
- typeof(a) __tmin = type_min(typeof(a)); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a * (u64)__b; \
- (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
- (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
- (__b == (typeof(__b))-1 && __a == __tmin); \
-})
-
-
-#define check_add_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_add_overflow(a, b, d), \
- __unsigned_add_overflow(a, b, d))
-
-#define check_sub_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_sub_overflow(a, b, d), \
- __unsigned_sub_overflow(a, b, d))
-
-#define check_mul_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_mul_overflow(a, b, d), \
- __unsigned_mul_overflow(a, b, d))
-
+/**
+ * wrapping_assign_sub() - Intentionally perform a wrapping decrement assign
+ * @var: variable to be decremented
+ * @offset: amount to subtract
+ *
+ * Decrements @var by @offset with wrap-around. Returns the resulting
+ * value of @var. Will not trip any wrap-around sanitizers.
+ *
+ * Returns the new value of @var.
+ */
+#define wrapping_assign_sub(var, offset) \
+ ({ \
+ typeof(var) *__ptr = &(var); \
+ *__ptr = wrapping_sub(typeof(var), *__ptr, offset); \
+ })
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+/**
+ * check_mul_overflow() - Calculate multiplication with overflow checking
+ * @a: first factor
+ * @b: second factor
+ * @d: pointer to store product
+ *
+ * Returns true on wrap-around, false otherwise.
+ *
+ * *@d holds the results of the attempted multiplication, regardless of whether
+ * wrap-around occurred.
+ */
+#define check_mul_overflow(a, b, d) \
+ __must_check_overflow(__builtin_mul_overflow(a, b, d))
-/** check_shl_overflow() - Calculate a left-shifted value and check overflow
+/**
+ * wrapping_mul() - Intentionally perform a wrapping multiplication
+ * @type: type for result of calculation
+ * @a: first factor
+ * @b: second factor
*
+ * Return the potentially wrapped-around multiplication without
+ * tripping any wrap-around sanitizers that may be enabled.
+ */
+#define wrapping_mul(type, a, b) \
+ ({ \
+ type __val; \
+ __builtin_mul_overflow(a, b, &__val); \
+ __val; \
+ })
+
+/**
+ * check_shl_overflow() - Calculate a left-shifted value and check overflow
* @a: Value to be shifted
* @s: How many bits left to shift
* @d: Pointer to where to store the result
*
* Computes *@d = (@a << @s)
*
- * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
+ * Returns true if '*@d' cannot hold the result or when '@a << @s' doesn't
* make sense. Example conditions:
- * - 'a << s' causes bits to be lost when stored in *d.
- * - 's' is garbage (e.g. negative) or so large that the result of
- * 'a << s' is guaranteed to be 0.
- * - 'a' is negative.
- * - 'a << s' sets the sign bit, if any, in '*d'.
- *
- * '*d' will hold the results of the attempted shift, but is not
- * considered "safe for use" if false is returned.
+ *
+ * - '@a << @s' causes bits to be lost when stored in *@d.
+ * - '@s' is garbage (e.g. negative) or so large that the result of
+ * '@a << @s' is guaranteed to be 0.
+ * - '@a' is negative.
+ * - '@a << @s' sets the sign bit, if any, in '*@d'.
+ *
+ * '*@d' will hold the results of the attempted shift, but is not
+ * considered "safe for use" if true is returned.
*/
-#define check_shl_overflow(a, s, d) ({ \
+#define check_shl_overflow(a, s, d) __must_check_overflow(({ \
typeof(a) _a = a; \
typeof(s) _s = s; \
typeof(d) _d = d; \
- u64 _a_full = _a; \
+ unsigned long long _a_full = _a; \
unsigned int _to_shift = \
is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
*_d = (_a_full << _to_shift); \
(_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
(*_d >> _to_shift) != _a); \
+}))
+
+#define __overflows_type_constexpr(x, T) ( \
+ is_unsigned_type(typeof(x)) ? \
+ (x) > type_max(T) : \
+ is_unsigned_type(typeof(T)) ? \
+ (x) < 0 || (x) > type_max(T) : \
+ (x) < type_min(T) || (x) > type_max(T))
+
+#define __overflows_type(x, T) ({ \
+ typeof(T) v = 0; \
+ check_add_overflow((x), v, &v); \
})
/**
- * array_size() - Calculate size of 2-dimensional array.
+ * overflows_type - helper for checking the overflows between value, variables,
+ * or data type
*
- * @a: dimension one
- * @b: dimension two
+ * @n: source constant value or variable to be checked
+ * @T: destination variable or data type proposed to store @x
*
- * Calculates size of 2-dimensional array: @a * @b.
+ * Compares the @x expression for whether or not it can safely fit in
+ * the storage of the type in @T. @x and @T can have different types.
+ * If @x is a constant expression, this will also resolve to a constant
+ * expression.
*
- * Returns: number of bytes needed to represent the array or SIZE_MAX on
- * overflow.
+ * Returns: true if overflow can occur, false otherwise.
*/
-static inline __must_check size_t array_size(size_t a, size_t b)
+#define overflows_type(n, T) \
+ __builtin_choose_expr(__is_constexpr(n), \
+ __overflows_type_constexpr(n, T), \
+ __overflows_type(n, T))
+
+/**
+ * castable_to_type - like __same_type(), but also allows for casted literals
+ *
+ * @n: variable or constant value
+ * @T: variable or data type
+ *
+ * Unlike the __same_type() macro, this allows a constant value as the
+ * first argument. If this value would not overflow into an assignment
+ * of the second argument's type, it returns true. Otherwise, this falls
+ * back to __same_type().
+ */
+#define castable_to_type(n, T) \
+ __builtin_choose_expr(__is_constexpr(n), \
+ !__overflows_type_constexpr(n, T), \
+ __same_type(n, T))
+
+/**
+ * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
+ * @factor1: first factor
+ * @factor2: second factor
+ *
+ * Returns: calculate @factor1 * @factor2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
{
size_t bytes;
- if (check_mul_overflow(a, b, &bytes))
+ if (check_mul_overflow(factor1, factor2, &bytes))
return SIZE_MAX;
return bytes;
}
/**
- * array3_size() - Calculate size of 3-dimensional array.
+ * size_add() - Calculate size_t addition with saturation at SIZE_MAX
+ * @addend1: first addend
+ * @addend2: second addend
*
- * @a: dimension one
- * @b: dimension two
- * @c: dimension three
- *
- * Calculates size of 3-dimensional array: @a * @b * @c.
- *
- * Returns: number of bytes needed to represent the array or SIZE_MAX on
- * overflow.
+ * Returns: calculate @addend1 + @addend2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
*/
-static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+static inline size_t __must_check size_add(size_t addend1, size_t addend2)
{
size_t bytes;
- if (check_mul_overflow(a, b, &bytes))
- return SIZE_MAX;
- if (check_mul_overflow(bytes, c, &bytes))
+ if (check_add_overflow(addend1, addend2, &bytes))
return SIZE_MAX;
return bytes;
}
-/*
- * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for
- * struct_size() below.
+/**
+ * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
+ * @minuend: value to subtract from
+ * @subtrahend: value to subtract from @minuend
+ *
+ * Returns: calculate @minuend - @subtrahend, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. For
+ * composition with the size_add() and size_mul() helpers, neither
+ * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
+ * The lvalue must be size_t to avoid implicit type conversion.
*/
-static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
+static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
{
size_t bytes;
- if (check_mul_overflow(a, b, &bytes))
- return SIZE_MAX;
- if (check_add_overflow(bytes, c, &bytes))
+ if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
+ check_sub_overflow(minuend, subtrahend, &bytes))
return SIZE_MAX;
return bytes;
}
/**
- * struct_size() - Calculate size of structure with trailing array.
- * @p: Pointer to the structure.
- * @member: Name of the array member.
- * @count: Number of elements in the array.
+ * array_size() - Calculate size of 2-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
*
- * Calculates size of memory needed for structure @p followed by an
- * array of @count number of @member elements.
+ * Calculates size of 2-dimensional array: @a * @b.
*
- * Return: number of bytes needed or SIZE_MAX on overflow.
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
*/
-#define struct_size(p, member, count) \
- __ab_c_size(count, \
- sizeof(*(p)->member) + __must_be_array((p)->member),\
- sizeof(*(p)))
+#define array_size(a, b) size_mul(a, b)
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array3_size(a, b, c) size_mul(size_mul(a, b), c)
/**
* flex_array_size() - Calculate size of a flexible array member
* within an enclosing structure.
- *
* @p: Pointer to the structure.
* @member: Name of the flexible array member.
* @count: Number of elements in the array.
@@ -330,7 +352,75 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define flex_array_size(p, member, count) \
- array_size(count, \
- sizeof(*(p)->member) + __must_be_array((p)->member))
+ __builtin_choose_expr(__is_constexpr(count), \
+ (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \
+ size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
+
+/**
+ * struct_size() - Calculate size of structure with trailing flexible array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure of @p followed by an
+ * array of @count number of @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ sizeof(*(p)) + flex_array_size(p, member, count), \
+ size_add(sizeof(*(p)), flex_array_size(p, member, count)))
+
+/**
+ * struct_size_t() - Calculate size of structure with trailing flexible array
+ * @type: structure type name.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @type followed by an
+ * array of @count number of @member elements. Prefer using struct_size()
+ * when possible instead, to keep calculations associated with a specific
+ * instance variable of type @type.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size_t(type, member, count) \
+ struct_size((type *)NULL, member, count)
+
+/**
+ * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass (different) initializer.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ * @initializer: initializer expression (could be empty for no init).
+ */
+#define _DEFINE_FLEX(type, name, member, count, initializer) \
+ _Static_assert(__builtin_constant_p(count), \
+ "onstack flex array members require compile-time const count"); \
+ union { \
+ u8 bytes[struct_size_t(type, member, count)]; \
+ type obj; \
+ } name##_u initializer; \
+ type *name = (type *)&name##_u
+
+/**
+ * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ *
+ * Define a zeroed, on-stack, instance of @type structure with a trailing
+ * flexible array member.
+ * Use __struct_size(@name) to get compile-time size of it afterwards.
+ */
+#define DEFINE_FLEX(type, name, member, count) \
+ _DEFINE_FLEX(type, name, member, count, = {})
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 54667735cc67..8d6571feb95d 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _LINUX_PACKING_H
diff --git a/include/linux/padata.h b/include/linux/padata.h
index a433f13fc4bf..0146daf34430 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -12,6 +12,7 @@
#ifndef PADATA_H
#define PADATA_H
+#include <linux/refcount.h>
#include <linux/compiler_types.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
@@ -96,7 +97,7 @@ struct parallel_data {
struct padata_shell *ps;
struct padata_list __percpu *reorder_list;
struct padata_serial_queue __percpu *squeue;
- atomic_t refcnt;
+ refcount_t refcnt;
unsigned int seq_nr;
unsigned int processed;
int cpu;
@@ -136,6 +137,7 @@ struct padata_shell {
* appropriate for one worker thread to do at once.
* @max_threads: Max threads to use for the job, actual number may be less
* depending on task size and minimum chunk size.
+ * @numa_aware: Distribute jobs to different nodes with CPU in a round robin fashion.
*/
struct padata_mt_job {
void (*thread_fn)(unsigned long start, unsigned long end, void *arg);
@@ -145,6 +147,7 @@ struct padata_mt_job {
unsigned long align;
unsigned long min_chunk;
int max_threads;
+ bool numa_aware;
};
/**
@@ -177,10 +180,6 @@ struct padata_instance {
#ifdef CONFIG_PADATA
extern void __init padata_init(void);
-#else
-static inline void __init padata_init(void) {}
-#endif
-
extern struct padata_instance *padata_alloc(const char *name);
extern void padata_free(struct padata_instance *pinst);
extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
@@ -191,4 +190,12 @@ extern void padata_do_serial(struct padata_priv *padata);
extern void __init padata_do_multithreaded(struct padata_mt_job *job);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
+#else
+static inline void __init padata_init(void) {}
+static inline void __init padata_do_multithreaded(struct padata_mt_job *job)
+{
+ job->thread_fn(job->start, job->start + job->size, job->fn_arg);
+}
+#endif
+
#endif
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index e200eef6a7fd..7d79818dc065 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -21,16 +21,17 @@
#elif MAX_NR_ZONES <= 8
#define ZONES_SHIFT 3
#else
-#error ZONES_SHIFT -- too many zones configured adjust calculation
+#error ZONES_SHIFT "Too many zones configured"
#endif
+#define ZONES_WIDTH ZONES_SHIFT
+
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
-
-/* SECTION_SHIFT #bits space required to store a section # */
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
-
-#endif /* CONFIG_SPARSEMEM */
+#else
+#define SECTIONS_SHIFT 0
+#endif
#ifndef BUILD_VDSO32_64
/*
@@ -54,17 +55,29 @@
#define SECTIONS_WIDTH 0
#endif
-#define ZONES_WIDTH ZONES_SHIFT
-
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \
+ <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
-#else
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
#error "Vmemmap: No space for nodes field in page flags"
-#endif
+#else
#define NODES_WIDTH 0
#endif
+/*
+ * Note that this #define MUST have a value so that it can be tested with
+ * the IS_ENABLED() macro.
+ */
+#if NODES_SHIFT != 0 && NODES_WIDTH == 0
+#define NODE_NOT_IN_PAGE_FLAGS 1
+#endif
+
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+#define KASAN_TAG_WIDTH 8
+#else
+#define KASAN_TAG_WIDTH 0
+#endif
+
#ifdef CONFIG_NUMA_BALANCING
#define LAST__PID_SHIFT 8
#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
@@ -77,37 +90,26 @@
#define LAST_CPUPID_SHIFT 0
#endif
-#ifdef CONFIG_KASAN_SW_TAGS
-#define KASAN_TAG_WIDTH 8
-#else
-#define KASAN_TAG_WIDTH 0
-#endif
-
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
- <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
#define LAST_CPUPID_WIDTH 0
#endif
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
- > BITS_PER_LONG - NR_PAGEFLAGS
-#error "Not enough bits in page flags"
+#if LAST_CPUPID_SHIFT != 0 && LAST_CPUPID_WIDTH == 0
+#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
-/*
- * We are going to use the flags for the page to node mapping if its in
- * there. This includes the case where there is no node, so it is implicit.
- * Note that this #define MUST have a value so that it can be tested with
- * the IS_ENABLED() macro.
- */
-#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
-#define NODE_NOT_IN_PAGE_FLAGS 1
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#error "Not enough bits in page flags"
#endif
-#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
-#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
-#endif
+/* see the comment on MAX_NR_TIERS */
+#define LRU_REFS_WIDTH min(__LRU_REFS_WIDTH, BITS_PER_LONG - NR_PAGEFLAGS - \
+ ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \
+ NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH)
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6be1aa559b1e..652d77805e99 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -68,9 +68,6 @@
* might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
* a result of MADV_FREE).
*
- * PG_uptodate tells whether the page's contents is valid. When a read
- * completes, the page becomes uptodate, unless a disk I/O error happened.
- *
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
*
@@ -86,8 +83,7 @@
*/
/*
- * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
- * locked- and dirty-page accounting.
+ * Don't use the pageflags directly. Use the PageFoo macros.
*
* The page flags field is split into two parts, the main flags area
* which extends from the low bits upwards, and the fields area which
@@ -103,13 +99,15 @@
*/
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
+ PG_writeback, /* Page is under writeback */
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
+ PG_head, /* Must be in bit 6 */
+ PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
- PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_error,
PG_slab,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
@@ -117,8 +115,6 @@ enum pageflags {
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
- PG_writeback, /* Page is under writeback */
- PG_head, /* A head page */
PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
@@ -132,12 +128,27 @@ enum pageflags {
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
PG_young,
PG_idle,
#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+ PG_arch_2,
+ PG_arch_3,
+#endif
__NR_PAGEFLAGS,
+ PG_readahead = PG_reclaim,
+
+ /*
+ * Depending on the way an anonymous folio can be mapped into a page
+ * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
+ * THP), PG_anon_exclusive may be set only for the head page or for
+ * tail pages of an anonymous folio. For now, we only expect it to be
+ * set on tail pages for PTE-mapped THP.
+ */
+ PG_anon_exclusive = PG_mappedtodisk,
+
/* Filesystems */
PG_checked = PG_owner_priv_1,
@@ -160,46 +171,131 @@ enum pageflags {
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
- /* SLOB */
- PG_slob_free = PG_private,
-
- /* Compound pages. Stored in first tail page's flags */
- PG_double_map = PG_private_2,
-
/* non-lru isolated movable page */
PG_isolated = PG_reclaim,
/* Only valid for buddy pages. Used to track pages that are reported */
PG_reported = PG_uptodate,
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /* For self-hosted memmap pages */
+ PG_vmemmap_self_hosted = PG_owner_priv_1,
+#endif
+
+ /*
+ * Flags only valid for compound pages. Stored in first tail page's
+ * flags word. Cannot use the first 8 flags or any flag marked as
+ * PF_ANY.
+ */
+
+ /* At least one page in this folio has the hwpoison flag set */
+ PG_has_hwpoisoned = PG_error,
+ PG_hugetlb = PG_active,
+ PG_large_rmappable = PG_workingset, /* anon or file-backed */
};
+#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
+
#ifndef __GENERATING_BOUNDS_H
-struct page; /* forward declaration */
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
-static inline struct page *compound_head(struct page *page)
+/*
+ * Return the real head page struct iff the @page is a fake head page, otherwise
+ * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
+ */
+static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
+{
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
+ return page;
+
+ /*
+ * Only addresses aligned with PAGE_SIZE of struct page may be fake head
+ * struct page. The alignment check aims to avoid access the fields (
+ * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
+ * cold cacheline in some cases.
+ */
+ if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
+ test_bit(PG_head, &page->flags)) {
+ /*
+ * We can safely access the field of the @page[1] with PG_head
+ * because the @page is a compound page composed with at least
+ * two contiguous pages.
+ */
+ unsigned long head = READ_ONCE(page[1].compound_head);
+
+ if (likely(head & 1))
+ return (const struct page *)(head - 1);
+ }
+ return page;
+}
+#else
+static inline const struct page *page_fixed_fake_head(const struct page *page)
+{
+ return page;
+}
+#endif
+
+static __always_inline int page_is_fake_head(const struct page *page)
+{
+ return page_fixed_fake_head(page) != page;
+}
+
+static inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head);
if (unlikely(head & 1))
- return (struct page *) (head - 1);
- return page;
+ return head - 1;
+ return (unsigned long)page_fixed_fake_head(page);
}
-static __always_inline int PageTail(struct page *page)
+#define compound_head(page) ((typeof(page))_compound_head(page))
+
+/**
+ * page_folio - Converts from page to folio.
+ * @p: The page.
+ *
+ * Every page is part of a folio. This function cannot be called on a
+ * NULL pointer.
+ *
+ * Context: No reference, nor lock is required on @page. If the caller
+ * does not hold a reference, this call may race with a folio split, so
+ * it should re-check the folio still contains this page after gaining
+ * a reference on the folio.
+ * Return: The folio which contains this page.
+ */
+#define page_folio(p) (_Generic((p), \
+ const struct page *: (const struct folio *)_compound_head(p), \
+ struct page *: (struct folio *)_compound_head(p)))
+
+/**
+ * folio_page - Return a page from a folio.
+ * @folio: The folio.
+ * @n: The page number to return.
+ *
+ * @n is relative to the start of the folio. This function does not
+ * check that the page number lies within @folio; the caller is presumed
+ * to have a reference to the page.
+ */
+#define folio_page(folio, n) nth_page(&(folio)->page, n)
+
+static __always_inline int PageTail(const struct page *page)
{
- return READ_ONCE(page->compound_head) & 1;
+ return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
}
-static __always_inline int PageCompound(struct page *page)
+static __always_inline int PageCompound(const struct page *page)
{
- return test_bit(PG_head, &page->flags) || PageTail(page);
+ return test_bit(PG_head, &page->flags) ||
+ READ_ONCE(page->compound_head) & 1;
}
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
- return page->flags == PAGE_POISON_PATTERN;
+ return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
}
#ifdef CONFIG_DEBUG_VM
@@ -210,6 +306,25 @@ static inline void page_init_poison(struct page *page, size_t size)
}
#endif
+static const unsigned long *const_folio_flags(const struct folio *folio,
+ unsigned n)
+{
+ const struct page *page = &folio->page;
+
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
+ return &page[n].flags;
+}
+
+static unsigned long *folio_flags(struct folio *folio, unsigned n)
+{
+ struct page *page = &folio->page;
+
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
+ return &page[n].flags;
+}
+
/*
* Page flags policies wrt compound pages
*
@@ -223,61 +338,111 @@ static inline void page_init_poison(struct page *page, size_t size)
* for compound page all operations related to the page flag applied to
* head page.
*
- * PF_ONLY_HEAD:
- * for compound page, callers only ever operate on the head page.
- *
* PF_NO_TAIL:
* modifications of the page flag must be done on small or head pages,
* checks can be done on tail pages too.
*
* PF_NO_COMPOUND:
* the page flag is not relevant for compound pages.
+ *
+ * PF_SECOND:
+ * the page flag is stored in the first tail page.
*/
#define PF_POISONED_CHECK(page) ({ \
VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
page; })
#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
-#define PF_ONLY_HEAD(page, enforce) ({ \
- VM_BUG_ON_PGFLAGS(PageTail(page), page); \
- PF_POISONED_CHECK(page); })
#define PF_NO_TAIL(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
PF_POISONED_CHECK(compound_head(page)); })
#define PF_NO_COMPOUND(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
PF_POISONED_CHECK(page); })
+#define PF_SECOND(page, enforce) ({ \
+ VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
+ PF_POISONED_CHECK(&page[1]); })
+
+/* Which page is the flag stored in */
+#define FOLIO_PF_ANY 0
+#define FOLIO_PF_HEAD 0
+#define FOLIO_PF_NO_TAIL 0
+#define FOLIO_PF_NO_COMPOUND 0
+#define FOLIO_PF_SECOND 1
+
+#define FOLIO_HEAD_PAGE 0
+#define FOLIO_SECOND_PAGE 1
/*
* Macros to create function definitions for page flags
*/
+#define FOLIO_TEST_FLAG(name, page) \
+static __always_inline bool folio_test_##name(const struct folio *folio) \
+{ return test_bit(PG_##name, const_folio_flags(folio, page)); }
+
+#define FOLIO_SET_FLAG(name, page) \
+static __always_inline void folio_set_##name(struct folio *folio) \
+{ set_bit(PG_##name, folio_flags(folio, page)); }
+
+#define FOLIO_CLEAR_FLAG(name, page) \
+static __always_inline void folio_clear_##name(struct folio *folio) \
+{ clear_bit(PG_##name, folio_flags(folio, page)); }
+
+#define __FOLIO_SET_FLAG(name, page) \
+static __always_inline void __folio_set_##name(struct folio *folio) \
+{ __set_bit(PG_##name, folio_flags(folio, page)); }
+
+#define __FOLIO_CLEAR_FLAG(name, page) \
+static __always_inline void __folio_clear_##name(struct folio *folio) \
+{ __clear_bit(PG_##name, folio_flags(folio, page)); }
+
+#define FOLIO_TEST_SET_FLAG(name, page) \
+static __always_inline bool folio_test_set_##name(struct folio *folio) \
+{ return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
+
+#define FOLIO_TEST_CLEAR_FLAG(name, page) \
+static __always_inline bool folio_test_clear_##name(struct folio *folio) \
+{ return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
+
+#define FOLIO_FLAG(name, page) \
+FOLIO_TEST_FLAG(name, page) \
+FOLIO_SET_FLAG(name, page) \
+FOLIO_CLEAR_FLAG(name, page)
+
#define TESTPAGEFLAG(uname, lname, policy) \
-static __always_inline int Page##uname(struct page *page) \
- { return test_bit(PG_##lname, &policy(page, 0)->flags); }
+FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
+static __always_inline int Page##uname(const struct page *page) \
+{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
#define SETPAGEFLAG(uname, lname, policy) \
+FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void SetPage##uname(struct page *page) \
- { set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ set_bit(PG_##lname, &policy(page, 1)->flags); }
#define CLEARPAGEFLAG(uname, lname, policy) \
+FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void ClearPage##uname(struct page *page) \
- { clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define __SETPAGEFLAG(uname, lname, policy) \
+__FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void __SetPage##uname(struct page *page) \
- { __set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
+__FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void __ClearPage##uname(struct page *page) \
- { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTSETFLAG(uname, lname, policy) \
+FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestSetPage##uname(struct page *page) \
- { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTCLEARFLAG(uname, lname, policy) \
+FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestClearPage##uname(struct page *page) \
- { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
@@ -293,32 +458,40 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
-#define TESTPAGEFLAG_FALSE(uname) \
+#define TESTPAGEFLAG_FALSE(uname, lname) \
+static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
static inline int Page##uname(const struct page *page) { return 0; }
-#define SETPAGEFLAG_NOOP(uname) \
+#define SETPAGEFLAG_NOOP(uname, lname) \
+static inline void folio_set_##lname(struct folio *folio) { } \
static inline void SetPage##uname(struct page *page) { }
-#define CLEARPAGEFLAG_NOOP(uname) \
+#define CLEARPAGEFLAG_NOOP(uname, lname) \
+static inline void folio_clear_##lname(struct folio *folio) { } \
static inline void ClearPage##uname(struct page *page) { }
-#define __CLEARPAGEFLAG_NOOP(uname) \
+#define __CLEARPAGEFLAG_NOOP(uname, lname) \
+static inline void __folio_clear_##lname(struct folio *folio) { } \
static inline void __ClearPage##uname(struct page *page) { }
-#define TESTSETFLAG_FALSE(uname) \
+#define TESTSETFLAG_FALSE(uname, lname) \
+static inline bool folio_test_set_##lname(struct folio *folio) \
+{ return 0; } \
static inline int TestSetPage##uname(struct page *page) { return 0; }
-#define TESTCLEARFLAG_FALSE(uname) \
+#define TESTCLEARFLAG_FALSE(uname, lname) \
+static inline bool folio_test_clear_##lname(struct folio *folio) \
+{ return 0; } \
static inline int TestClearPage##uname(struct page *page) { return 0; }
-#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
- SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
+#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
+ SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
-#define TESTSCFLAG_FALSE(uname) \
- TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
+#define TESTSCFLAG_FALSE(uname, lname) \
+ TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
-PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
+FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
PAGEFLAG(Referenced, referenced, PF_HEAD)
TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
@@ -326,12 +499,12 @@ PAGEFLAG(Referenced, referenced, PF_HEAD)
PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
+ TESTCLEARFLAG(LRU, lru, PF_HEAD)
PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
TESTCLEARFLAG(Active, active, PF_HEAD)
PAGEFLAG(Workingset, workingset, PF_HEAD)
TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
__PAGEFLAG(Slab, slab, PF_NO_TAIL)
-__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
/* Xen */
@@ -352,10 +525,9 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
/*
* Private page markings that may be used by the filesystem that owns the page
* for its own purposes.
- * - PG_private and PG_private_2 cause releasepage() and co to be invoked
+ * - PG_private and PG_private_2 cause release_folio() and co to be invoked
*/
-PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
- __CLEARPAGEFLAG(Private, private, PF_ANY)
+PAGEFLAG(Private, private, PF_ANY)
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
@@ -371,8 +543,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
- TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
+PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
+ TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
#ifdef CONFIG_HIGHMEM
/*
@@ -380,23 +552,27 @@ PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
* available at this point.
*/
#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
+#define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f))
#else
-PAGEFLAG_FALSE(HighMem)
+PAGEFLAG_FALSE(HighMem, highmem)
#endif
#ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
+static __always_inline bool folio_test_swapcache(const struct folio *folio)
{
-#ifdef CONFIG_THP_SWAP
- page = compound_head(page);
-#endif
- return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
+ return folio_test_swapbacked(folio) &&
+ test_bit(PG_swapcache, const_folio_flags(folio, 0));
+}
+static __always_inline bool PageSwapCache(const struct page *page)
+{
+ return folio_test_swapcache(page_folio(page));
}
+
SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
#else
-PAGEFLAG_FALSE(SwapCache)
+PAGEFLAG_FALSE(SwapCache, swapcache)
#endif
PAGEFLAG(Unevictable, unevictable, PF_HEAD)
@@ -408,35 +584,35 @@ PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
#else
-PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
- TESTSCFLAG_FALSE(Mlocked)
+PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
+ TESTSCFLAG_FALSE(Mlocked, mlocked)
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
#else
-PAGEFLAG_FALSE(Uncached)
+PAGEFLAG_FALSE(Uncached, uncached)
#endif
#ifdef CONFIG_MEMORY_FAILURE
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
-extern bool set_hwpoison_free_buddy_page(struct page *page);
+#define MAGIC_HWPOISON 0x48575053U /* HWPS */
+extern void SetPageHWPoisonTakenOff(struct page *page);
+extern void ClearPageHWPoisonTakenOff(struct page *page);
+extern bool take_page_off_buddy(struct page *page);
+extern bool put_page_back_buddy(struct page *page);
#else
-PAGEFLAG_FALSE(HWPoison)
-static inline bool set_hwpoison_free_buddy_page(struct page *page)
-{
- return 0;
-}
+PAGEFLAG_FALSE(HWPoison, hwpoison)
#define __PG_HWPOISON 0
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
-TESTPAGEFLAG(Young, young, PF_ANY)
-SETPAGEFLAG(Young, young, PF_ANY)
-TESTCLEARFLAG(Young, young, PF_ANY)
-PAGEFLAG(Idle, idle, PF_ANY)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
+FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
+FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
+FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
+FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
#endif
/*
@@ -447,6 +623,12 @@ PAGEFLAG(Idle, idle, PF_ANY)
*/
__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
+#ifdef CONFIG_MEMORY_HOTPLUG
+PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
+#else
+PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
+#endif
+
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
@@ -458,29 +640,56 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
* structure which KSM associates with that merged page. See ksm.h.
*
* PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
- * page and then page->mapping points a struct address_space.
+ * page and then page->mapping points to a struct movable_operations.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
+ *
+ * For slab pages, since slab reuses the bits in struct page to store its
+ * internal states, the page->mapping does not exist as such, nor do these
+ * flags below. So in order to avoid testing non-existent bits, please
+ * make sure that PageSlab(page) actually evaluates to false before calling
+ * the following functions (e.g., PageAnon). See mm/slab.h.
*/
#define PAGE_MAPPING_ANON 0x1
#define PAGE_MAPPING_MOVABLE 0x2
#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
-static __always_inline int PageMappingFlags(struct page *page)
+/*
+ * Different with flags above, this flag is used only for fsdax mode. It
+ * indicates that this page->mapping is now under reflink case.
+ */
+#define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
+
+static __always_inline bool folio_mapping_flags(const struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
+}
+
+static __always_inline int PageMappingFlags(const struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
}
-static __always_inline int PageAnon(struct page *page)
+static __always_inline bool folio_test_anon(const struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+static __always_inline bool PageAnon(const struct page *page)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+ return folio_test_anon(page_folio(page));
}
-static __always_inline int __PageMovable(struct page *page)
+static __always_inline bool __folio_test_movable(const struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
+ PAGE_MAPPING_MOVABLE;
+}
+
+static __always_inline int __PageMovable(const struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
PAGE_MAPPING_MOVABLE;
@@ -493,30 +702,61 @@ static __always_inline int __PageMovable(struct page *page)
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
-static __always_inline int PageKsm(struct page *page)
+static __always_inline bool folio_test_ksm(const struct folio *folio)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
PAGE_MAPPING_KSM;
}
+
+static __always_inline bool PageKsm(const struct page *page)
+{
+ return folio_test_ksm(page_folio(page));
+}
#else
-TESTPAGEFLAG_FALSE(Ksm)
+TESTPAGEFLAG_FALSE(Ksm, ksm)
#endif
u64 stable_page_flags(struct page *page);
-static inline int PageUptodate(struct page *page)
+/**
+ * folio_xor_flags_has_waiters - Change some folio flags.
+ * @folio: The folio.
+ * @mask: Bits set in this word will be changed.
+ *
+ * This must only be used for flags which are changed with the folio
+ * lock held. For example, it is unsafe to use for PG_dirty as that
+ * can be set without the folio lock held. It can also only be used
+ * on flags which are in the range 0-6 as some of the implementations
+ * only affect those bits.
+ *
+ * Return: Whether there are tasks waiting on the folio.
+ */
+static inline bool folio_xor_flags_has_waiters(struct folio *folio,
+ unsigned long mask)
+{
+ return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
+}
+
+/**
+ * folio_test_uptodate - Is this folio up to date?
+ * @folio: The folio.
+ *
+ * The uptodate flag is set on a folio when every byte in the folio is
+ * at least as new as the corresponding bytes on storage. Anonymous
+ * and CoW folios are always uptodate. If the folio is not uptodate,
+ * some of the bytes in it may be; see the is_partially_uptodate()
+ * address_space operation.
+ */
+static inline bool folio_test_uptodate(const struct folio *folio)
{
- int ret;
- page = compound_head(page);
- ret = test_bit(PG_uptodate, &(page)->flags);
+ bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
/*
- * Must ensure that the data we read out of the page is loaded
- * _after_ we've loaded page->flags to check for PageUptodate.
- * We can skip the barrier if the page is not uptodate, because
+ * Must ensure that the data we read out of the folio is loaded
+ * _after_ we've loaded folio->flags to check the uptodate bit.
+ * We can skip the barrier if the folio is not uptodate, because
* we wouldn't be reading anything from it.
*
- * See SetPageUptodate() for the other side of the story.
+ * See folio_mark_uptodate() for the other side of the story.
*/
if (ret)
smp_rmb();
@@ -524,46 +764,73 @@ static inline int PageUptodate(struct page *page)
return ret;
}
-static __always_inline void __SetPageUptodate(struct page *page)
+static inline int PageUptodate(const struct page *page)
+{
+ return folio_test_uptodate(page_folio(page));
+}
+
+static __always_inline void __folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
smp_wmb();
- __set_bit(PG_uptodate, &page->flags);
+ __set_bit(PG_uptodate, folio_flags(folio, 0));
}
-static __always_inline void SetPageUptodate(struct page *page)
+static __always_inline void folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
/*
* Memory barrier must be issued before setting the PG_uptodate bit,
- * so that all previous stores issued in order to bring the page
- * uptodate are actually visible before PageUptodate becomes true.
+ * so that all previous stores issued in order to bring the folio
+ * uptodate are actually visible before folio_test_uptodate becomes true.
*/
smp_wmb();
- set_bit(PG_uptodate, &page->flags);
+ set_bit(PG_uptodate, folio_flags(folio, 0));
+}
+
+static __always_inline void __SetPageUptodate(struct page *page)
+{
+ __folio_mark_uptodate((struct folio *)page);
+}
+
+static __always_inline void SetPageUptodate(struct page *page)
+{
+ folio_mark_uptodate((struct folio *)page);
}
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
-int test_clear_page_writeback(struct page *page);
-int __test_set_page_writeback(struct page *page, bool keep_write);
+void __folio_start_writeback(struct folio *folio, bool keep_write);
+void set_page_writeback(struct page *page);
-#define test_set_page_writeback(page) \
- __test_set_page_writeback(page, false)
-#define test_set_page_writeback_keepwrite(page) \
- __test_set_page_writeback(page, true)
+#define folio_start_writeback(folio) \
+ __folio_start_writeback(folio, false)
+#define folio_start_writeback_keepwrite(folio) \
+ __folio_start_writeback(folio, true)
-static inline void set_page_writeback(struct page *page)
+static __always_inline bool folio_test_head(const struct folio *folio)
{
- test_set_page_writeback(page);
+ return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
}
-static inline void set_page_writeback_keepwrite(struct page *page)
+static __always_inline int PageHead(const struct page *page)
{
- test_set_page_writeback_keepwrite(page);
+ PF_POISONED_CHECK(page);
+ return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
}
-__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
+__SETPAGEFLAG(Head, head, PF_ANY)
+__CLEARPAGEFLAG(Head, head, PF_ANY)
+CLEARPAGEFLAG(Head, head, PF_ANY)
+
+/**
+ * folio_test_large() - Does this folio contain more than one page?
+ * @folio: The folio to test.
+ *
+ * Return: True if the folio is larger than one page.
+ */
+static inline bool folio_test_large(const struct folio *folio)
+{
+ return folio_test_head(folio);
+}
static __always_inline void set_compound_head(struct page *page, struct page *head)
{
@@ -581,25 +848,36 @@ static inline void ClearPageCompound(struct page *page)
BUG_ON(!PageHead(page));
ClearPageHead(page);
}
+PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND)
+#else
+TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
#endif
#define PG_head_mask ((1UL << PG_head))
#ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(struct page *page);
-int PageHeadHuge(struct page *page);
-bool page_huge_active(struct page *page);
-#else
-TESTPAGEFLAG_FALSE(Huge)
-TESTPAGEFLAG_FALSE(HeadHuge)
+int PageHuge(const struct page *page);
+SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
+CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-static inline bool page_huge_active(struct page *page)
+/**
+ * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
+ * @folio: The folio to test.
+ *
+ * Context: Any context. Caller should have a reference on the folio to
+ * prevent it from being turned into a tail page.
+ * Return: True for hugetlbfs folios, false for anon folios or folios
+ * belonging to other filesystems.
+ */
+static inline bool folio_test_hugetlb(const struct folio *folio)
{
- return 0;
+ return folio_test_large(folio) &&
+ test_bit(PG_hugetlb, const_folio_flags(folio, 1));
}
+#else
+TESTPAGEFLAG_FALSE(Huge, hugetlb)
#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -609,7 +887,7 @@ static inline bool page_huge_active(struct page *page)
* hugetlbfs pages, but not normal pages. PageTransHuge() can only be
* called only in the core VM paths where hugetlbfs pages can't exist.
*/
-static inline int PageTransHuge(struct page *page)
+static inline int PageTransHuge(const struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
return PageHead(page);
@@ -620,109 +898,53 @@ static inline int PageTransHuge(struct page *page)
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
*/
-static inline int PageTransCompound(struct page *page)
+static inline int PageTransCompound(const struct page *page)
{
return PageCompound(page);
}
/*
- * PageTransCompoundMap is the same as PageTransCompound, but it also
- * guarantees the primary MMU has the entire compound page mapped
- * through pmd_trans_huge, which in turn guarantees the secondary MMUs
- * can also map the entire compound page. This allows the secondary
- * MMUs to call get_user_pages() only once for each compound page and
- * to immediately map the entire compound page with a single secondary
- * MMU fault. If there will be a pmd split later, the secondary MMUs
- * will get an update through the MMU notifier invalidation through
- * split_huge_pmd().
- *
- * Unlike PageTransCompound, this is safe to be called only while
- * split_huge_pmd() cannot run from under us, like if protected by the
- * MMU notifier, otherwise it may result in page->_mapcount check false
- * positives.
- *
- * We have to treat page cache THP differently since every subpage of it
- * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
- * mapped in the current process so comparing subpage's _mapcount to
- * compound_mapcount to filter out PTE mapped case.
- */
-static inline int PageTransCompoundMap(struct page *page)
-{
- struct page *head;
-
- if (!PageTransCompound(page))
- return 0;
-
- if (PageAnon(page))
- return atomic_read(&page->_mapcount) < 0;
-
- head = compound_head(page);
- /* File THP is PMD mapped and not PTE mapped */
- return atomic_read(&page->_mapcount) ==
- atomic_read(compound_mapcount_ptr(head));
-}
-
-/*
* PageTransTail returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
*/
-static inline int PageTransTail(struct page *page)
+static inline int PageTransTail(const struct page *page)
{
return PageTail(page);
}
+#else
+TESTPAGEFLAG_FALSE(TransHuge, transhuge)
+TESTPAGEFLAG_FALSE(TransCompound, transcompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
+TESTPAGEFLAG_FALSE(TransTail, transtail)
+#endif
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
/*
- * PageDoubleMap indicates that the compound page is mapped with PTEs as well
- * as PMDs.
- *
- * This is required for optimization of rmap operations for THP: we can postpone
- * per small page mapcount accounting (and its overhead from atomic operations)
- * until the first PMD split.
+ * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
+ * compound page.
*
- * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
- * by one. This reference will go away with last compound_mapcount.
- *
- * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
+ * This flag is set by hwpoison handler. Cleared by THP split or free page.
*/
-static inline int PageDoubleMap(struct page *page)
-{
- return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
-}
-
-static inline void SetPageDoubleMap(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- set_bit(PG_double_map, &page[1].flags);
-}
-
-static inline void ClearPageDoubleMap(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- clear_bit(PG_double_map, &page[1].flags);
-}
-static inline int TestSetPageDoubleMap(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- return test_and_set_bit(PG_double_map, &page[1].flags);
-}
+PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+ TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+#else
+PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+ TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+#endif
-static inline int TestClearPageDoubleMap(struct page *page)
+/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- return test_and_clear_bit(PG_double_map, &page[1].flags);
+ if (PageHWPoison(page))
+ return true;
+ return PageHuge(page) && PageHWPoison(compound_head(page));
}
-#else
-TESTPAGEFLAG_FALSE(TransHuge)
-TESTPAGEFLAG_FALSE(TransCompound)
-TESTPAGEFLAG_FALSE(TransCompoundMap)
-TESTPAGEFLAG_FALSE(TransTail)
-PAGEFLAG_FALSE(DoubleMap)
- TESTSETFLAG_FALSE(DoubleMap)
- TESTCLEARFLAG_FALSE(DoubleMap)
-#endif
-
/*
* For pages that are never mapped to userspace (and aren't PageSlab),
* page_type may be used. Because it is initialised to -1, we invert the
@@ -737,39 +959,59 @@ PAGEFLAG_FALSE(DoubleMap)
#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
#define PG_offline 0x00000100
-#define PG_kmemcg 0x00000200
-#define PG_table 0x00000400
-#define PG_guard 0x00000800
+#define PG_table 0x00000200
+#define PG_guard 0x00000400
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+#define folio_test_type(folio, flag) \
+ ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+
+static inline int page_type_has_type(unsigned int page_type)
+{
+ return (int)page_type < PAGE_MAPCOUNT_RESERVE;
+}
-static inline int page_has_type(struct page *page)
+static inline int page_has_type(const struct page *page)
{
- return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
+ return page_type_has_type(page->page_type);
}
-#define PAGE_TYPE_OPS(uname, lname) \
-static __always_inline int Page##uname(struct page *page) \
+#define PAGE_TYPE_OPS(uname, lname, fname) \
+static __always_inline int Page##uname(const struct page *page) \
{ \
return PageType(page, PG_##lname); \
} \
+static __always_inline int folio_test_##fname(const struct folio *folio)\
+{ \
+ return folio_test_type(folio, PG_##lname); \
+} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!PageType(page, 0), page); \
page->page_type &= ~PG_##lname; \
} \
+static __always_inline void __folio_set_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
+ folio->page.page_type &= ~PG_##lname; \
+} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
page->page_type |= PG_##lname; \
-}
+} \
+static __always_inline void __folio_clear_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
+ folio->page.page_type |= PG_##lname; \
+} \
/*
* PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c).
*/
-PAGE_TYPE_OPS(Buddy, buddy)
+PAGE_TYPE_OPS(Buddy, buddy, buddy)
/*
* PageOffline() indicates that the page is logically offline although the
@@ -787,55 +1029,59 @@ PAGE_TYPE_OPS(Buddy, buddy)
* relies on this feature is aware that re-onlining the memory block will
* require to re-set the pages PageOffline() and not giving them to the
* buddy via online_page_callback_t.
+ *
+ * There are drivers that mark a page PageOffline() and expect there won't be
+ * any further access to page content. PFN walkers that read content of random
+ * pages should check PageOffline() and synchronize with such drivers using
+ * page_offline_freeze()/page_offline_thaw().
*/
-PAGE_TYPE_OPS(Offline, offline)
+PAGE_TYPE_OPS(Offline, offline, offline)
-/*
- * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
- * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
- */
-PAGE_TYPE_OPS(Kmemcg, kmemcg)
+extern void page_offline_freeze(void);
+extern void page_offline_thaw(void);
+extern void page_offline_begin(void);
+extern void page_offline_end(void);
/*
* Marks pages in use as page tables.
*/
-PAGE_TYPE_OPS(Table, table)
+PAGE_TYPE_OPS(Table, table, pgtable)
/*
* Marks guardpages used with debug_pagealloc.
*/
-PAGE_TYPE_OPS(Guard, guard)
+PAGE_TYPE_OPS(Guard, guard, guard)
extern bool is_free_buddy_page(struct page *page);
-__PAGEFLAG(Isolated, isolated, PF_ANY);
+PAGEFLAG(Isolated, isolated, PF_ANY);
-/*
- * If network-based swap is enabled, sl*b must keep track of whether pages
- * were allocated from pfmemalloc reserves.
- */
-static inline int PageSlabPfmemalloc(struct page *page)
+static __always_inline int PageAnonExclusive(const struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- return PageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
-static inline void SetPageSlabPfmemalloc(struct page *page)
+static __always_inline void SetPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- SetPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
-static inline void __ClearPageSlabPfmemalloc(struct page *page)
+static __always_inline void ClearPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- __ClearPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
-static inline void ClearPageSlabPfmemalloc(struct page *page)
+static __always_inline void __ClearPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- ClearPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
#ifdef CONFIG_MMU
@@ -846,25 +1092,33 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
/*
* Flags checked when a page is freed. Pages being freed should not have
- * these flags set. It they are, there is a problem.
+ * these flags set. If they are, there is a problem.
*/
#define PAGE_FLAGS_CHECK_AT_FREE \
(1UL << PG_lru | 1UL << PG_locked | \
1UL << PG_private | 1UL << PG_private_2 | \
1UL << PG_writeback | 1UL << PG_reserved | \
1UL << PG_slab | 1UL << PG_active | \
- 1UL << PG_unevictable | __PG_MLOCKED)
+ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
/*
* Flags checked when a page is prepped for return by the page allocator.
- * Pages being prepped should not have these flags set. It they are set,
+ * Pages being prepped should not have these flags set. If they are set,
* there has been a kernel bug or struct page corruption.
*
* __PG_HWPOISON is exceptional because it needs to be kept beyond page's
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
- (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
+ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
+
+/*
+ * Flags stored in the second page of a compound page. They may overlap
+ * the CHECK_AT_FREE flags above, so need to be cleared.
+ */
+#define PAGE_FLAGS_SECOND \
+ (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
+ 1UL << PG_hugetlb | 1UL << PG_large_rmappable)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
@@ -875,16 +1129,21 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* Determine if a page has private stuff, indicating that release routines
* should be invoked upon it.
*/
-static inline int page_has_private(struct page *page)
+static inline int page_has_private(const struct page *page)
{
return !!(page->flags & PAGE_FLAGS_PRIVATE);
}
+static inline bool folio_has_private(const struct folio *folio)
+{
+ return page_has_private(&folio->page);
+}
+
#undef PF_ANY
#undef PF_HEAD
-#undef PF_ONLY_HEAD
#undef PF_NO_TAIL
#undef PF_NO_COMPOUND
+#undef PF_SECOND
#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 572458016331..4ac34392823a 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,33 +33,16 @@ static inline bool is_migrate_isolate(int migratetype)
#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
-struct page *has_unmovable_pages(struct zone *zone, struct page *page,
- int migratetype, int flags);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
-/*
- * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- */
-int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype, int flags);
+int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ int migratetype, int flags, gfp_t gfp_flags);
-/*
- * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
- * target range is [start_pfn, end_pfn)
- */
-void
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype);
+void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ int migratetype);
-/*
- * Test all pages in [start_pfn, end_pfn) are isolated or not.
- */
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int isol_flags);
-
-struct page *alloc_migrate_target(struct page *page, unsigned long private);
-
#endif
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 85bd413e784e..8cd858d912c4 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -3,16 +3,17 @@
#define _LINUX_PAGE_COUNTER_H
#include <linux/atomic.h>
-#include <linux/kernel.h>
+#include <linux/cache.h>
+#include <linux/limits.h>
#include <asm/page.h>
struct page_counter {
+ /*
+ * Make sure 'usage' does not share cacheline with any other field. The
+ * memcg->memory.usage is a hot member of struct mem_cgroup.
+ */
atomic_long_t usage;
- unsigned long min;
- unsigned long low;
- unsigned long high;
- unsigned long max;
- struct page_counter *parent;
+ CACHELINE_PADDING(_pad1_);
/* effective memory.min and memory.min usage tracking */
unsigned long emin;
@@ -24,10 +25,18 @@ struct page_counter {
atomic_long_t low_usage;
atomic_long_t children_low_usage;
- /* legacy */
unsigned long watermark;
unsigned long failcnt;
-};
+
+ /* Keep all the read most fields in a separete cacheline. */
+ CACHELINE_PADDING(_pad2_);
+
+ unsigned long min;
+ unsigned long low;
+ unsigned long high;
+ unsigned long max;
+ struct page_counter *parent;
+} ____cacheline_internodealigned_in_smp;
#if BITS_PER_LONG == 32
#define PAGE_COUNTER_MAX LONG_MAX
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index cfce186f0c4e..be98564191e6 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -7,19 +7,36 @@
#include <linux/stackdepot.h>
struct pglist_data;
+
+#ifdef CONFIG_PAGE_EXTENSION
+/**
+ * struct page_ext_operations - per page_ext client operations
+ * @offset: Offset to the client's data within page_ext. Offset is returned to
+ * the client by page_ext_init.
+ * @size: The size of the client data within page_ext.
+ * @need: Function that returns true if client requires page_ext.
+ * @init: (optional) Called to initialize client once page_exts are allocated.
+ * @need_shared_flags: True when client is using shared page_ext->flags
+ * field.
+ *
+ * Each Page Extension client must define page_ext_operations in
+ * page_ext_ops array.
+ */
struct page_ext_operations {
size_t offset;
size_t size;
bool (*need)(void);
void (*init)(void);
+ bool need_shared_flags;
};
-#ifdef CONFIG_PAGE_EXTENSION
-
+/*
+ * The page_ext_flags users must set need_shared_flags to true.
+ */
enum page_ext_flags {
PAGE_EXT_OWNER,
PAGE_EXT_OWNER_ALLOCATED,
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,
#endif
@@ -36,22 +53,39 @@ struct page_ext {
unsigned long flags;
};
+extern bool early_page_ext;
extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+static inline bool early_page_ext_enabled(void)
+{
+ return early_page_ext;
+}
+
#ifdef CONFIG_SPARSEMEM
static inline void page_ext_init_flatmem(void)
{
}
extern void page_ext_init(void);
+static inline void page_ext_init_flatmem_late(void)
+{
+}
#else
extern void page_ext_init_flatmem(void);
+extern void page_ext_init_flatmem_late(void);
static inline void page_ext_init(void)
{
}
#endif
-struct page_ext *lookup_page_ext(const struct page *page);
+extern struct page_ext *page_ext_get(struct page *page);
+extern void page_ext_put(struct page_ext *page_ext);
+
+static inline void *page_ext_data(struct page_ext *page_ext,
+ struct page_ext_operations *ops)
+{
+ return (void *)(page_ext) + ops->offset;
+}
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
@@ -63,21 +97,34 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
-static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
+static inline bool early_page_ext_enabled(void)
{
+ return false;
}
-static inline struct page_ext *lookup_page_ext(const struct page *page)
+static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
- return NULL;
}
static inline void page_ext_init(void)
{
}
+static inline void page_ext_init_flatmem_late(void)
+{
+}
+
static inline void page_ext_init_flatmem(void)
{
}
+
+static inline struct page_ext *page_ext_get(struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_ext_put(struct page_ext *page_ext)
+{
+}
#endif /* CONFIG_PAGE_EXTENSION */
#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index 1e894d34bdce..d8f344840643 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -6,135 +6,142 @@
#include <linux/page-flags.h>
#include <linux/page_ext.h>
-#ifdef CONFIG_IDLE_PAGE_TRACKING
+#ifdef CONFIG_PAGE_IDLE_FLAG
-#ifdef CONFIG_64BIT
-static inline bool page_is_young(struct page *page)
-{
- return PageYoung(page);
-}
-
-static inline void set_page_young(struct page *page)
-{
- SetPageYoung(page);
-}
-
-static inline bool test_and_clear_page_young(struct page *page)
-{
- return TestClearPageYoung(page);
-}
-
-static inline bool page_is_idle(struct page *page)
-{
- return PageIdle(page);
-}
-
-static inline void set_page_idle(struct page *page)
-{
- SetPageIdle(page);
-}
-
-static inline void clear_page_idle(struct page *page)
-{
- ClearPageIdle(page);
-}
-#else /* !CONFIG_64BIT */
+#ifndef CONFIG_64BIT
/*
* If there is not enough space to store Idle and Young bits in page flags, use
* page ext flags instead.
*/
-extern struct page_ext_operations page_idle_ops;
-
-static inline bool page_is_young(struct page *page)
+static inline bool folio_test_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
-static inline void set_page_young(struct page *page)
+static inline void folio_set_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
}
-static inline bool test_and_clear_page_young(struct page *page)
+static inline bool folio_test_clear_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
-static inline bool page_is_idle(struct page *page)
+static inline bool folio_test_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_idle;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_idle;
}
-static inline void set_page_idle(struct page *page)
+static inline void folio_set_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
-static inline void clear_page_idle(struct page *page)
+static inline void folio_clear_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
-#endif /* CONFIG_64BIT */
+#endif /* !CONFIG_64BIT */
-#else /* !CONFIG_IDLE_PAGE_TRACKING */
+#else /* !CONFIG_PAGE_IDLE_FLAG */
-static inline bool page_is_young(struct page *page)
+static inline bool folio_test_young(struct folio *folio)
{
return false;
}
-static inline void set_page_young(struct page *page)
+static inline void folio_set_young(struct folio *folio)
{
}
-static inline bool test_and_clear_page_young(struct page *page)
+static inline bool folio_test_clear_young(struct folio *folio)
{
return false;
}
-static inline bool page_is_idle(struct page *page)
+static inline bool folio_test_idle(struct folio *folio)
{
return false;
}
-static inline void set_page_idle(struct page *page)
+static inline void folio_set_idle(struct folio *folio)
{
}
-static inline void clear_page_idle(struct page *page)
+static inline void folio_clear_idle(struct folio *folio)
{
}
-#endif /* CONFIG_IDLE_PAGE_TRACKING */
+#endif /* CONFIG_PAGE_IDLE_FLAG */
+static inline bool page_is_young(struct page *page)
+{
+ return folio_test_young(page_folio(page));
+}
+
+static inline void set_page_young(struct page *page)
+{
+ folio_set_young(page_folio(page));
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return folio_test_clear_young(page_folio(page));
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return folio_test_idle(page_folio(page));
+}
+
+static inline void set_page_idle(struct page *page)
+{
+ folio_set_idle(page_folio(page));
+}
#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 8679ccd722e8..debdc25f08b9 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,68 +8,70 @@
extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
-extern void __reset_page_owner(struct page *page, unsigned int order);
+extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, unsigned int order);
-extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+ unsigned short order, gfp_t gfp_mask);
+extern void __split_page_owner(struct page *page, int old_order,
+ int new_order);
+extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
-extern void __dump_page_owner(struct page *page);
+extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
if (static_branch_unlikely(&page_owner_inited))
__reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner(page, order, gfp_mask);
}
-static inline void split_page_owner(struct page *page, unsigned int order)
+static inline void split_page_owner(struct page *page, int old_order,
+ int new_order)
{
if (static_branch_unlikely(&page_owner_inited))
- __split_page_owner(page, order);
+ __split_page_owner(page, old_order, new_order);
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
{
if (static_branch_unlikely(&page_owner_inited))
- __copy_page_owner(oldpage, newpage);
+ __folio_copy_owner(newfolio, old);
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner_migrate_reason(page, reason);
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
if (static_branch_unlikely(&page_owner_inited))
__dump_page_owner(page);
}
#else
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
}
static inline void set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
}
-static inline void split_page_owner(struct page *page,
- unsigned int order)
+static inline void split_page_owner(struct page *page, int old_order,
+ int new_order)
{
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
}
#endif /* CONFIG_PAGE_OWNER */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index d27701199a4d..d7c2d33baa7f 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -7,13 +7,13 @@
#include <linux/page-flags.h>
#include <linux/tracepoint-defs.h>
-extern struct tracepoint __tracepoint_page_ref_set;
-extern struct tracepoint __tracepoint_page_ref_mod;
-extern struct tracepoint __tracepoint_page_ref_mod_and_test;
-extern struct tracepoint __tracepoint_page_ref_mod_and_return;
-extern struct tracepoint __tracepoint_page_ref_mod_unless;
-extern struct tracepoint __tracepoint_page_ref_freeze;
-extern struct tracepoint __tracepoint_page_ref_unfreeze;
+DECLARE_TRACEPOINT(page_ref_set);
+DECLARE_TRACEPOINT(page_ref_mod);
+DECLARE_TRACEPOINT(page_ref_mod_and_test);
+DECLARE_TRACEPOINT(page_ref_mod_and_return);
+DECLARE_TRACEPOINT(page_ref_mod_unless);
+DECLARE_TRACEPOINT(page_ref_freeze);
+DECLARE_TRACEPOINT(page_ref_unfreeze);
#ifdef CONFIG_DEBUG_PAGE_REF
@@ -24,7 +24,7 @@ extern struct tracepoint __tracepoint_page_ref_unfreeze;
*
* See trace_##name##_enabled(void) in include/linux/tracepoint.h
*/
-#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
+#define page_ref_tracepoint_active(t) tracepoint_enabled(t)
extern void __page_ref_set(struct page *page, int v);
extern void __page_ref_mod(struct page *page, int v);
@@ -62,23 +62,50 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
#endif
-static inline int page_ref_count(struct page *page)
+static inline int page_ref_count(const struct page *page)
{
return atomic_read(&page->_refcount);
}
-static inline int page_count(struct page *page)
+/**
+ * folio_ref_count - The reference count on this folio.
+ * @folio: The folio.
+ *
+ * The refcount is usually incremented by calls to folio_get() and
+ * decremented by calls to folio_put(). Some typical users of the
+ * folio refcount:
+ *
+ * - Each reference from a page table
+ * - The page cache
+ * - Filesystem private data
+ * - The LRU list
+ * - Pipes
+ * - Direct IO which references this page in the process address space
+ *
+ * Return: The number of references to this folio.
+ */
+static inline int folio_ref_count(const struct folio *folio)
{
- return atomic_read(&compound_head(page)->_refcount);
+ return page_ref_count(&folio->page);
+}
+
+static inline int page_count(const struct page *page)
+{
+ return folio_ref_count(page_folio(page));
}
static inline void set_page_count(struct page *page, int v)
{
atomic_set(&page->_refcount, v);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
+ if (page_ref_tracepoint_active(page_ref_set))
__page_ref_set(page, v);
}
+static inline void folio_set_count(struct folio *folio, int v)
+{
+ set_page_count(&folio->page, v);
+}
+
/*
* Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug)
@@ -91,102 +118,227 @@ static inline void init_page_count(struct page *page)
static inline void page_ref_add(struct page *page, int nr)
{
atomic_add(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, nr);
}
+static inline void folio_ref_add(struct folio *folio, int nr)
+{
+ page_ref_add(&folio->page, nr);
+}
+
static inline void page_ref_sub(struct page *page, int nr)
{
atomic_sub(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -nr);
}
+static inline void folio_ref_sub(struct folio *folio, int nr)
+{
+ page_ref_sub(&folio->page, nr);
+}
+
static inline int page_ref_sub_return(struct page *page, int nr)
{
int ret = atomic_sub_return(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(page, -nr, ret);
return ret;
}
+static inline int folio_ref_sub_return(struct folio *folio, int nr)
+{
+ return page_ref_sub_return(&folio->page, nr);
+}
+
static inline void page_ref_inc(struct page *page)
{
atomic_inc(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, 1);
}
+static inline void folio_ref_inc(struct folio *folio)
+{
+ page_ref_inc(&folio->page);
+}
+
static inline void page_ref_dec(struct page *page)
{
atomic_dec(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -1);
}
+static inline void folio_ref_dec(struct folio *folio)
+{
+ page_ref_dec(&folio->page);
+}
+
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -nr, ret);
return ret;
}
+static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
+{
+ return page_ref_sub_and_test(&folio->page, nr);
+}
+
static inline int page_ref_inc_return(struct page *page)
{
int ret = atomic_inc_return(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(page, 1, ret);
return ret;
}
+static inline int folio_ref_inc_return(struct folio *folio)
+{
+ return page_ref_inc_return(&folio->page);
+}
+
static inline int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -1, ret);
return ret;
}
+static inline int folio_ref_dec_and_test(struct folio *folio)
+{
+ return page_ref_dec_and_test(&folio->page);
+}
+
static inline int page_ref_dec_return(struct page *page)
{
int ret = atomic_dec_return(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(page, -1, ret);
return ret;
}
-static inline int page_ref_add_unless(struct page *page, int nr, int u)
+static inline int folio_ref_dec_return(struct folio *folio)
+{
+ return page_ref_dec_return(&folio->page);
+}
+
+static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{
- int ret = atomic_add_unless(&page->_refcount, nr, u);
+ bool ret = atomic_add_unless(&page->_refcount, nr, u);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
+ if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
return ret;
}
+static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
+{
+ return page_ref_add_unless(&folio->page, nr, u);
+}
+
+/**
+ * folio_try_get - Attempt to increase the refcount on a folio.
+ * @folio: The folio.
+ *
+ * If you do not already have a reference to a folio, you can attempt to
+ * get one using this function. It may fail if, for example, the folio
+ * has been freed since you found a pointer to it, or it is frozen for
+ * the purposes of splitting or migration.
+ *
+ * Return: True if the reference count was successfully incremented.
+ */
+static inline bool folio_try_get(struct folio *folio)
+{
+ return folio_ref_add_unless(folio, 1, 0);
+}
+
+static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
+{
+#ifdef CONFIG_TINY_RCU
+ /*
+ * The caller guarantees the folio will not be freed from interrupt
+ * context, so (on !SMP) we only need preemption to be disabled
+ * and TINY_RCU does that for us.
+ */
+# ifdef CONFIG_PREEMPT_COUNT
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
+# endif
+ VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
+ folio_ref_add(folio, count);
+#else
+ if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
+ /* Either the folio has been freed, or will be freed. */
+ return false;
+ }
+#endif
+ return true;
+}
+
+/**
+ * folio_try_get_rcu - Attempt to increase the refcount on a folio.
+ * @folio: The folio.
+ *
+ * This is a version of folio_try_get() optimised for non-SMP kernels.
+ * If you are still holding the rcu_read_lock() after looking up the
+ * page and know that the page cannot have its refcount decreased to
+ * zero in interrupt context, you can use this instead of folio_try_get().
+ *
+ * Example users include get_user_pages_fast() (as pages are not unmapped
+ * from interrupt context) and the page cache lookups (as pages are not
+ * truncated from interrupt context). We also know that pages are not
+ * frozen in interrupt context for the purposes of splitting or migration.
+ *
+ * You can also use this function if you're holding a lock that prevents
+ * pages being frozen & removed; eg the i_pages lock for the page cache
+ * or the mmap_lock or page table lock for page tables. In this case,
+ * it will always succeed, and you could have used a plain folio_get(),
+ * but it's sometimes more convenient to have a common function called
+ * from both locked and RCU-protected contexts.
+ *
+ * Return: True if the reference count was successfully incremented.
+ */
+static inline bool folio_try_get_rcu(struct folio *folio)
+{
+ return folio_ref_try_add_rcu(folio, 1);
+}
+
static inline int page_ref_freeze(struct page *page, int count)
{
int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
+ if (page_ref_tracepoint_active(page_ref_freeze))
__page_ref_freeze(page, count, ret);
return ret;
}
+static inline int folio_ref_freeze(struct folio *folio, int count)
+{
+ return page_ref_freeze(&folio->page, count);
+}
+
static inline void page_ref_unfreeze(struct page *page, int count)
{
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
atomic_set_release(&page->_refcount, count);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
+ if (page_ref_tracepoint_active(page_ref_unfreeze))
__page_ref_unfreeze(page, count);
}
+static inline void folio_ref_unfreeze(struct folio *folio, int count)
+{
+ page_ref_unfreeze(&folio->page, count);
+}
#endif
diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h
index 3b99e0ec24f2..fe648dfa3a7c 100644
--- a/include/linux/page_reporting.h
+++ b/include/linux/page_reporting.h
@@ -18,6 +18,9 @@ struct page_reporting_dev_info {
/* Current state of page reporting */
atomic_t state;
+
+ /* Minimal order of page reporting */
+ unsigned int order;
};
/* Tear-down and bring-up for page reporting devices */
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
new file mode 100644
index 000000000000..6722941c7cb8
--- /dev/null
+++ b/include/linux/page_table_check.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2021, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef __LINUX_PAGE_TABLE_CHECK_H
+#define __LINUX_PAGE_TABLE_CHECK_H
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+#include <linux/jump_label.h>
+
+extern struct static_key_true page_table_check_disabled;
+extern struct page_ext_operations page_table_check_ops;
+
+void __page_table_check_zero(struct page *page, unsigned int order);
+void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
+void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
+void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
+void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
+ unsigned int nr);
+void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd);
+void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud);
+void __page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd);
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear(mm, pte);
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmd_clear(mm, pmd);
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pud_clear(mm, pud);
+}
+
+static inline void page_table_check_ptes_set(struct mm_struct *mm,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_ptes_set(mm, ptep, pte, nr);
+}
+
+static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmd_set(mm, pmdp, pmd);
+}
+
+static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
+ pud_t pud)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pud_set(mm, pudp, pud);
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear_range(mm, addr, pmd);
+}
+
+#else
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+{
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+{
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+{
+}
+
+static inline void page_table_check_ptes_set(struct mm_struct *mm,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+}
+
+static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
+ pmd_t pmd)
+{
+}
+
+static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
+ pud_t pud)
+{
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+}
+
+#endif /* CONFIG_PAGE_TABLE_CHECK */
+#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index fff52ad370c1..3f2409b968ec 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -37,24 +37,31 @@ extern unsigned int pageblock_order;
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
-/* Huge pages are a constant size */
-#define pageblock_order HUGETLB_PAGE_ORDER
+/*
+ * Huge pages are a constant size, but don't exceed the maximum allocation
+ * granularity.
+ */
+#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#else /* CONFIG_HUGETLB_PAGE */
/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
-#define pageblock_order (MAX_ORDER-1)
+#define pageblock_order MAX_PAGE_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
+#define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages)
+#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
+#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(struct page *page,
+unsigned long get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
unsigned long mask);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7de11dcd534d..2df35e65557d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -16,7 +16,180 @@
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
-struct pagevec;
+struct folio_batch;
+
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+
+static inline void invalidate_remote_inode(struct inode *inode)
+{
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode))
+ invalidate_mapping_pages(inode->i_mapping, 0, -1);
+}
+int invalidate_inode_pages2(struct address_space *mapping);
+int invalidate_inode_pages2_range(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
+void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
+
+int write_inode_now(struct inode *, int sync);
+int filemap_fdatawrite(struct address_space *);
+int filemap_flush(struct address_space *);
+int filemap_fdatawait_keep_errors(struct address_space *mapping);
+int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
+int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
+
+static inline int filemap_fdatawait(struct address_space *mapping)
+{
+ return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
+}
+
+bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
+int filemap_write_and_wait_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend);
+int __filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end, int sync_mode);
+int filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end);
+int filemap_check_errors(struct address_space *mapping);
+void __filemap_set_wb_err(struct address_space *mapping, int err);
+int filemap_fdatawrite_wbc(struct address_space *mapping,
+ struct writeback_control *wbc);
+int kiocb_write_and_wait(struct kiocb *iocb, size_t count);
+
+static inline int filemap_write_and_wait(struct address_space *mapping)
+{
+ return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
+}
+
+/**
+ * filemap_set_wb_err - set a writeback error on an address_space
+ * @mapping: mapping in which to set writeback error
+ * @err: error to be set in mapping
+ *
+ * When writeback fails in some way, we must record that error so that
+ * userspace can be informed when fsync and the like are called. We endeavor
+ * to report errors on any file that was open at the time of the error. Some
+ * internal callers also need to know when writeback errors have occurred.
+ *
+ * When a writeback error occurs, most filesystems will want to call
+ * filemap_set_wb_err to record the error in the mapping so that it will be
+ * automatically reported whenever fsync is called on the file.
+ */
+static inline void filemap_set_wb_err(struct address_space *mapping, int err)
+{
+ /* Fastpath for common case of no error */
+ if (unlikely(err))
+ __filemap_set_wb_err(mapping, err);
+}
+
+/**
+ * filemap_check_wb_err - has an error occurred since the mark was sampled?
+ * @mapping: mapping to check for writeback errors
+ * @since: previously-sampled errseq_t
+ *
+ * Grab the errseq_t value from the mapping, and see if it has changed "since"
+ * the given value was sampled.
+ *
+ * If it has then report the latest error set, otherwise return 0.
+ */
+static inline int filemap_check_wb_err(struct address_space *mapping,
+ errseq_t since)
+{
+ return errseq_check(&mapping->wb_err, since);
+}
+
+/**
+ * filemap_sample_wb_err - sample the current errseq_t to test for later errors
+ * @mapping: mapping to be sampled
+ *
+ * Writeback errors are always reported relative to a particular sample point
+ * in the past. This function provides those sample points.
+ */
+static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
+{
+ return errseq_sample(&mapping->wb_err);
+}
+
+/**
+ * file_sample_sb_err - sample the current errseq_t to test for later errors
+ * @file: file pointer to be sampled
+ *
+ * Grab the most current superblock-level errseq_t value for the given
+ * struct file.
+ */
+static inline errseq_t file_sample_sb_err(struct file *file)
+{
+ return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
+}
+
+/*
+ * Flush file data before changing attributes. Caller must hold any locks
+ * required to prevent further writes to this file until we're done setting
+ * flags.
+ */
+static inline int inode_drain_writes(struct inode *inode)
+{
+ inode_dio_wait(inode);
+ return filemap_write_and_wait(inode->i_mapping);
+}
+
+static inline bool mapping_empty(struct address_space *mapping)
+{
+ return xa_empty(&mapping->i_pages);
+}
+
+/*
+ * mapping_shrinkable - test if page cache state allows inode reclaim
+ * @mapping: the page cache mapping
+ *
+ * This checks the mapping's cache state for the pupose of inode
+ * reclaim and LRU management.
+ *
+ * The caller is expected to hold the i_lock, but is not required to
+ * hold the i_pages lock, which usually protects cache state. That's
+ * because the i_lock and the list_lru lock that protect the inode and
+ * its LRU state don't nest inside the irq-safe i_pages lock.
+ *
+ * Cache deletions are performed under the i_lock, which ensures that
+ * when an inode goes empty, it will reliably get queued on the LRU.
+ *
+ * Cache additions do not acquire the i_lock and may race with this
+ * check, in which case we'll report the inode as shrinkable when it
+ * has cache pages. This is okay: the shrinker also checks the
+ * refcount and the referenced bit, which will be elevated or set in
+ * the process of adding new cache pages to an inode.
+ */
+static inline bool mapping_shrinkable(struct address_space *mapping)
+{
+ void *head;
+
+ /*
+ * On highmem systems, there could be lowmem pressure from the
+ * inodes before there is highmem pressure from the page
+ * cache. Make inodes shrinkable regardless of cache state.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ return true;
+
+ /* Cache completely empty? Shrink away. */
+ head = rcu_access_pointer(mapping->i_pages.xa_head);
+ if (!head)
+ return true;
+
+ /*
+ * The xarray stores single offset-0 entries directly in the
+ * head pointer, which allows non-resident page cache entries
+ * to escape the shadow shrinker's list of xarray nodes. The
+ * inode shrinker needs to pick them up under memory pressure.
+ */
+ if (!xa_is_node(head) && xa_is_value(head))
+ return true;
+
+ return false;
+}
/*
* Bits in mapping->flags.
@@ -29,6 +202,11 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
+ AS_LARGE_FOLIO_SUPPORT = 6,
+ AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
+ AS_STABLE_WRITES, /* must wait for writeback before modifying
+ folio contents */
+ AS_UNMOVABLE, /* The mapping cannot be moved, ever */
};
/**
@@ -54,7 +232,8 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
__filemap_set_wb_err(mapping, error);
/* Record it in superblock */
- errseq_set(&mapping->host->i_sb->s_wb_err, error);
+ if (mapping->host)
+ errseq_set(&mapping->host->i_sb->s_wb_err, error);
/* Record it in flags for now, for legacy callers */
if (error == -ENOSPC)
@@ -98,6 +277,52 @@ static inline int mapping_use_writeback_tags(struct address_space *mapping)
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
+static inline bool mapping_release_always(const struct address_space *mapping)
+{
+ return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline void mapping_set_release_always(struct address_space *mapping)
+{
+ set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline void mapping_clear_release_always(struct address_space *mapping)
+{
+ clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline bool mapping_stable_writes(const struct address_space *mapping)
+{
+ return test_bit(AS_STABLE_WRITES, &mapping->flags);
+}
+
+static inline void mapping_set_stable_writes(struct address_space *mapping)
+{
+ set_bit(AS_STABLE_WRITES, &mapping->flags);
+}
+
+static inline void mapping_clear_stable_writes(struct address_space *mapping)
+{
+ clear_bit(AS_STABLE_WRITES, &mapping->flags);
+}
+
+static inline void mapping_set_unmovable(struct address_space *mapping)
+{
+ /*
+ * It's expected unmovable mappings are also unevictable. Compaction
+ * migrate scanner (isolate_migratepages_block()) relies on this to
+ * reduce page locking.
+ */
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+ set_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
+static inline bool mapping_unmovable(struct address_space *mapping)
+{
+ return test_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return mapping->gfp_mask;
@@ -119,141 +344,217 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
-void release_pages(struct page **pages, int nr);
+/**
+ * mapping_set_large_folios() - Indicate the file supports large folios.
+ * @mapping: The file.
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate that the VFS can use large folios to cache the contents of
+ * the file.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_large_folios(struct address_space *mapping)
+{
+ __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+}
/*
- * speculatively take a reference to a page.
- * If the page is free (_refcount == 0), then _refcount is untouched, and 0
- * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
- *
- * This function must be called inside the same rcu_read_lock() section as has
- * been used to lookup the page in the pagecache radix-tree (or page table):
- * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
- *
- * Unless an RCU grace period has passed, the count of all pages coming out
- * of the allocator must be considered unstable. page_count may return higher
- * than expected, and put_page must be able to do the right thing when the
- * page has been finished with, no matter what it is subsequently allocated
- * for (because put_page is what is used here to drop an invalid speculative
- * reference).
- *
- * This is the interesting part of the lockless pagecache (and lockless
- * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
- * has the following pattern:
- * 1. find page in radix tree
- * 2. conditionally increment refcount
- * 3. check the page is still in pagecache (if no, goto 1)
- *
- * Remove-side that cares about stability of _refcount (eg. reclaim) has the
- * following (with the i_pages lock held):
- * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
- * B. remove page from pagecache
- * C. free the page
- *
- * There are 2 critical interleavings that matter:
- * - 2 runs before A: in this case, A sees elevated refcount and bails out
- * - A runs before 2: in this case, 2 sees zero refcount and retries;
- * subsequently, B will complete and 1 will find no page, causing the
- * lookup to return NULL.
- *
- * It is possible that between 1 and 2, the page is removed then the exact same
- * page is inserted into the same position in pagecache. That's OK: the
- * old find_get_page using a lock could equally have run before or after
- * such a re-insertion, depending on order that locks are granted.
- *
- * Lookups racing against pagecache insertion isn't a big problem: either 1
- * will find the page or it will not. Likewise, the old find_get_page could run
- * either before the insertion or afterwards, depending on timing.
- */
-static inline int __page_cache_add_speculative(struct page *page, int count)
-{
-#ifdef CONFIG_TINY_RCU
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- /*
- * Preempt must be disabled here - we rely on rcu_read_lock doing
- * this for us.
- *
- * Pagecache won't be truncated from interrupt context, so if we have
- * found a page in the radix tree here, we have pinned its refcount by
- * disabling preempt, and hence no need for the "speculative get" that
- * SMP requires.
- */
- VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_add(page, count);
+ * Large folio support currently depends on THP. These dependencies are
+ * being worked on but are not yet fixed.
+ */
+static inline bool mapping_large_folio_support(struct address_space *mapping)
+{
+ return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+}
+static inline int filemap_nr_thps(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ return atomic_read(&mapping->nr_thps);
#else
- if (unlikely(!page_ref_add_unless(page, count, 0))) {
- /*
- * Either the page has been freed, or will be freed.
- * In either case, retry here and the caller should
- * do the right thing (see comments above).
- */
- return 0;
- }
+ return 0;
#endif
- VM_BUG_ON_PAGE(PageTail(page), page);
+}
- return 1;
+static inline void filemap_nr_thps_inc(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ if (!mapping_large_folio_support(mapping))
+ atomic_inc(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
+#endif
}
-static inline int page_cache_get_speculative(struct page *page)
+static inline void filemap_nr_thps_dec(struct address_space *mapping)
{
- return __page_cache_add_speculative(page, 1);
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ if (!mapping_large_folio_support(mapping))
+ atomic_dec(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
+#endif
}
-static inline int page_cache_add_speculative(struct page *page, int count)
+struct address_space *page_mapping(struct page *);
+struct address_space *folio_mapping(struct folio *);
+struct address_space *swapcache_mapping(struct folio *);
+
+/**
+ * folio_file_mapping - Find the mapping this folio belongs to.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the mapping that this
+ * page belongs to. Folios in the swap cache return the mapping of the
+ * swap file or swap device where the data is stored. This is different
+ * from the mapping returned by folio_mapping(). The only reason to
+ * use it is if, like NFS, you return 0 from ->activate_swapfile.
+ *
+ * Do not call this for folios which aren't in the page cache or swap cache.
+ */
+static inline struct address_space *folio_file_mapping(struct folio *folio)
{
- return __page_cache_add_speculative(page, count);
+ if (unlikely(folio_test_swapcache(folio)))
+ return swapcache_mapping(folio);
+
+ return folio->mapping;
}
/**
- * attach_page_private - Attach private data to a page.
- * @page: Page to attach data to.
- * @data: Data to attach to page.
+ * folio_flush_mapping - Find the file mapping this folio belongs to.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the mapping that this
+ * page belongs to. Anonymous folios return NULL, even if they're in
+ * the swap cache. Other kinds of folio also return NULL.
*
- * Attaching private data to a page increments the page's reference count.
- * The data must be detached before the page will be freed.
+ * This is ONLY used by architecture cache flushing code. If you aren't
+ * writing cache flushing code, you want either folio_mapping() or
+ * folio_file_mapping().
*/
-static inline void attach_page_private(struct page *page, void *data)
+static inline struct address_space *folio_flush_mapping(struct folio *folio)
{
- get_page(page);
- set_page_private(page, (unsigned long)data);
- SetPagePrivate(page);
+ if (unlikely(folio_test_swapcache(folio)))
+ return NULL;
+
+ return folio_mapping(folio);
+}
+
+static inline struct address_space *page_file_mapping(struct page *page)
+{
+ return folio_file_mapping(page_folio(page));
}
/**
- * detach_page_private - Detach private data from a page.
- * @page: Page to detach data from.
+ * folio_inode - Get the host inode for this folio.
+ * @folio: The folio.
*
- * Removes the data that was previously attached to the page and decrements
+ * For folios which are in the page cache, return the inode that this folio
+ * belongs to.
+ *
+ * Do not call this for folios which aren't in the page cache.
+ */
+static inline struct inode *folio_inode(struct folio *folio)
+{
+ return folio->mapping->host;
+}
+
+/**
+ * folio_attach_private - Attach private data to a folio.
+ * @folio: Folio to attach data to.
+ * @data: Data to attach to folio.
+ *
+ * Attaching private data to a folio increments the page's reference count.
+ * The data must be detached before the folio will be freed.
+ */
+static inline void folio_attach_private(struct folio *folio, void *data)
+{
+ folio_get(folio);
+ folio->private = data;
+ folio_set_private(folio);
+}
+
+/**
+ * folio_change_private - Change private data on a folio.
+ * @folio: Folio to change the data on.
+ * @data: Data to set on the folio.
+ *
+ * Change the private data attached to a folio and return the old
+ * data. The page must previously have had data attached and the data
+ * must be detached before the folio will be freed.
+ *
+ * Return: Data that was previously attached to the folio.
+ */
+static inline void *folio_change_private(struct folio *folio, void *data)
+{
+ void *old = folio_get_private(folio);
+
+ folio->private = data;
+ return old;
+}
+
+/**
+ * folio_detach_private - Detach private data from a folio.
+ * @folio: Folio to detach data from.
+ *
+ * Removes the data that was previously attached to the folio and decrements
* the refcount on the page.
*
- * Return: Data that was attached to the page.
+ * Return: Data that was attached to the folio.
*/
-static inline void *detach_page_private(struct page *page)
+static inline void *folio_detach_private(struct folio *folio)
{
- void *data = (void *)page_private(page);
+ void *data = folio_get_private(folio);
- if (!PagePrivate(page))
+ if (!folio_test_private(folio))
return NULL;
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
+ folio_clear_private(folio);
+ folio->private = NULL;
+ folio_put(folio);
return data;
}
+static inline void attach_page_private(struct page *page, void *data)
+{
+ folio_attach_private(page_folio(page), data);
+}
+
+static inline void *detach_page_private(struct page *page)
+{
+ return folio_detach_private(page_folio(page));
+}
+
+/*
+ * There are some parts of the kernel which assume that PMD entries
+ * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
+ * limit the maximum allocation order to PMD size. I'm not aware of any
+ * assumptions about maximum order if THP are disabled, but 8 seems like
+ * a good order (that's 1MB if you're using 4kB pages)
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
+#else
+#define MAX_PAGECACHE_ORDER 8
+#endif
+
#ifdef CONFIG_NUMA
-extern struct page *__page_cache_alloc(gfp_t gfp);
+struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
#else
-static inline struct page *__page_cache_alloc(gfp_t gfp)
+static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
{
- return alloc_pages(gfp, 0);
+ return folio_alloc(gfp, order);
}
#endif
+static inline struct page *__page_cache_alloc(gfp_t gfp)
+{
+ return &filemap_alloc_folio(gfp, 0)->page;
+}
+
static inline struct page *page_cache_alloc(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x));
@@ -264,23 +565,131 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
}
-typedef int filler_t(void *, struct page *);
+typedef int filler_t(struct file *, struct folio *);
pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
-#define FGP_ACCESSED 0x00000001
-#define FGP_LOCK 0x00000002
-#define FGP_CREAT 0x00000004
-#define FGP_WRITE 0x00000008
-#define FGP_NOFS 0x00000010
-#define FGP_NOWAIT 0x00000020
-#define FGP_FOR_MMAP 0x00000040
+/**
+ * typedef fgf_t - Flags for getting folios from the page cache.
+ *
+ * Most users of the page cache will not need to use these flags;
+ * there are convenience functions such as filemap_get_folio() and
+ * filemap_lock_folio(). For users which need more control over exactly
+ * what is done with the folios, these flags to __filemap_get_folio()
+ * are available.
+ *
+ * * %FGP_ACCESSED - The folio will be marked accessed.
+ * * %FGP_LOCK - The folio is returned locked.
+ * * %FGP_CREAT - If no folio is present then a new folio is allocated,
+ * added to the page cache and the VM's LRU list. The folio is
+ * returned locked.
+ * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
+ * folio is already in cache. If the folio was allocated, unlock it
+ * before returning so the caller can do the same dance.
+ * * %FGP_WRITE - The folio will be written to by the caller.
+ * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
+ * * %FGP_NOWAIT - Don't block on the folio lock.
+ * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
+ * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
+ * implementation.
+ */
+typedef unsigned int __bitwise fgf_t;
+
+#define FGP_ACCESSED ((__force fgf_t)0x00000001)
+#define FGP_LOCK ((__force fgf_t)0x00000002)
+#define FGP_CREAT ((__force fgf_t)0x00000004)
+#define FGP_WRITE ((__force fgf_t)0x00000008)
+#define FGP_NOFS ((__force fgf_t)0x00000010)
+#define FGP_NOWAIT ((__force fgf_t)0x00000020)
+#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
+#define FGP_STABLE ((__force fgf_t)0x00000080)
+#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
+
+#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
+
+/**
+ * fgf_set_order - Encode a length in the fgf_t flags.
+ * @size: The suggested size of the folio to create.
+ *
+ * The caller of __filemap_get_folio() can use this to suggest a preferred
+ * size for the folio that is created. If there is already a folio at
+ * the index, it will be returned, no matter what its size. If a folio
+ * is freshly created, it may be of a different size than requested
+ * due to alignment constraints, memory pressure, or the presence of
+ * other folios at nearby indices.
+ */
+static inline fgf_t fgf_set_order(size_t size)
+{
+ unsigned int shift = ilog2(size);
+
+ if (shift <= PAGE_SHIFT)
+ return 0;
+ return (__force fgf_t)((shift - PAGE_SHIFT) << 26);
+}
-struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask);
+void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
+struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
+ fgf_t fgp_flags, gfp_t gfp);
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
+ fgf_t fgp_flags, gfp_t gfp);
+
+/**
+ * filemap_get_folio - Find and get a folio.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ *
+ * Looks up the page cache entry at @mapping & @index. If a folio is
+ * present, it is returned with an increased refcount.
+ *
+ * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
+ * this index. Will not return a shadow, swap or DAX entry.
+ */
+static inline struct folio *filemap_get_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, 0, 0);
+}
+
+/**
+ * filemap_lock_folio - Find and lock a folio.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ *
+ * Looks up the page cache entry at @mapping & @index. If a folio is
+ * present, it is returned locked with an increased refcount.
+ *
+ * Context: May sleep.
+ * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
+ * this index. Will not return a shadow, swap or DAX entry.
+ */
+static inline struct folio *filemap_lock_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
+}
+
+/**
+ * filemap_grab_folio - grab a folio from the page cache
+ * @mapping: The address space to search
+ * @index: The page index
+ *
+ * Looks up the page cache entry at @mapping & @index. If no folio is found,
+ * a new folio is created. The folio is locked, marked as accessed, and
+ * returned.
+ *
+ * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
+ * and failed to create a folio.
+ */
+static inline struct folio *filemap_grab_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping_gfp_mask(mapping));
+}
/**
* find_get_page - find and get a page reference
@@ -299,7 +708,7 @@ static inline struct page *find_get_page(struct address_space *mapping,
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
- pgoff_t offset, int fgp_flags)
+ pgoff_t offset, fgf_t fgp_flags)
{
return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
@@ -307,20 +716,20 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
- * @offset: the page index
+ * @index: the page index
*
- * Looks up the page cache slot at @mapping & @offset. If there is a
+ * Looks up the page cache entry at @mapping & @index. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
- * Otherwise, %NULL is returned.
- *
- * find_lock_page() may sleep.
+ * Context: May sleep.
+ * Return: A struct page or %NULL if there is no page in the cache for this
+ * index.
*/
static inline struct page *find_lock_page(struct address_space *mapping,
- pgoff_t offset)
+ pgoff_t index)
{
- return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
+ return pagecache_get_page(mapping, index, FGP_LOCK, 0);
}
/**
@@ -371,6 +780,67 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
+#define swapcache_index(folio) __page_file_index(&(folio)->page)
+
+/**
+ * folio_index - File index of a folio.
+ * @folio: The folio.
+ *
+ * For a folio which is either in the page cache or the swap cache,
+ * return its index within the address_space it belongs to. If you know
+ * the page is definitely in the page cache, you can look at the folio's
+ * index directly.
+ *
+ * Return: The index (offset in units of pages) of a folio in its file.
+ */
+static inline pgoff_t folio_index(struct folio *folio)
+{
+ if (unlikely(folio_test_swapcache(folio)))
+ return swapcache_index(folio);
+ return folio->index;
+}
+
+/**
+ * folio_next_index - Get the index of the next folio.
+ * @folio: The current folio.
+ *
+ * Return: The index of the folio which follows this folio in the file.
+ */
+static inline pgoff_t folio_next_index(struct folio *folio)
+{
+ return folio->index + folio_nr_pages(folio);
+}
+
+/**
+ * folio_file_page - The page for a particular index.
+ * @folio: The folio which contains this index.
+ * @index: The index we want to look up.
+ *
+ * Sometimes after looking up a folio in the page cache, we need to
+ * obtain the specific page for an index (eg a page fault).
+ *
+ * Return: The page containing the file data for this index.
+ */
+static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
+{
+ return folio_page(folio, index & (folio_nr_pages(folio) - 1));
+}
+
+/**
+ * folio_contains - Does this folio contain this index?
+ * @folio: The folio.
+ * @index: The page index within the file.
+ *
+ * Context: The caller should have the page locked in order to prevent
+ * (eg) shmem from moving the page between the page cache and swap cache
+ * and changing its index in the middle of the operation.
+ * Return: true or false.
+ */
+static inline bool folio_contains(struct folio *folio, pgoff_t index)
+{
+ return index - folio_index(folio) < folio_nr_pages(folio);
+}
+
/*
* Given the page we found in the page cache, return the page corresponding
* to this index in the file
@@ -384,36 +854,15 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
return head + (index & (thp_nr_pages(head) - 1));
}
-struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
-struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
-unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
- unsigned int nr_entries, struct page **entries,
- pgoff_t *indices);
-unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
- pgoff_t end, unsigned int nr_pages,
- struct page **pages);
-static inline unsigned find_get_pages(struct address_space *mapping,
- pgoff_t *start, unsigned int nr_pages,
- struct page **pages)
-{
- return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
- pages);
-}
-unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
- unsigned int nr_pages, struct page **pages);
-unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
- pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
- struct page **pages);
-static inline unsigned find_get_pages_tag(struct address_space *mapping,
- pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
- struct page **pages)
-{
- return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
- nr_pages, pages);
-}
+unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, struct folio_batch *fbatch);
+unsigned filemap_get_folios_contig(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
+unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
- pgoff_t index, unsigned flags);
+ pgoff_t index);
/*
* Returns locked page at given index in given cache, creating it if needed.
@@ -424,49 +873,43 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
}
-extern struct page * read_cache_page(struct address_space *mapping,
- pgoff_t index, filler_t *filler, void *data);
+struct folio *read_cache_folio(struct address_space *, pgoff_t index,
+ filler_t *filler, struct file *file);
+struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
+ gfp_t flags);
+struct page *read_cache_page(struct address_space *, pgoff_t index,
+ filler_t *filler, struct file *file);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-extern int read_cache_pages(struct address_space *mapping,
- struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page(struct address_space *mapping,
- pgoff_t index, void *data)
+ pgoff_t index, struct file *file)
+{
+ return read_cache_page(mapping, index, NULL, file);
+}
+
+static inline struct folio *read_mapping_folio(struct address_space *mapping,
+ pgoff_t index, struct file *file)
{
- return read_cache_page(mapping, index, NULL, data);
+ return read_cache_folio(mapping, index, NULL, file);
}
/*
- * Get index of the page with in radix-tree
- * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
+ * Get the offset in PAGE_SIZE (even for hugetlb pages).
*/
-static inline pgoff_t page_to_index(struct page *page)
+static inline pgoff_t page_to_pgoff(struct page *page)
{
- pgoff_t pgoff;
+ struct page *head;
if (likely(!PageTransTail(page)))
return page->index;
+ head = compound_head(page);
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
- pgoff = compound_head(page)->index;
- pgoff += page - compound_head(page);
- return pgoff;
-}
-
-/*
- * Get the offset in PAGE_SIZE.
- * (TODO: hugepage should have ->index in PAGE_SIZE)
- */
-static inline pgoff_t page_to_pgoff(struct page *page)
-{
- if (unlikely(PageHeadHuge(page)))
- return page->index << compound_order(page);
-
- return page_to_index(page);
+ return head->index + page - head;
}
/*
@@ -482,29 +925,52 @@ static inline loff_t page_file_offset(struct page *page)
return ((loff_t)page_index(page)) << PAGE_SHIFT;
}
-extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
- unsigned long address);
+/**
+ * folio_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
+ */
+static inline loff_t folio_pos(struct folio *folio)
+{
+ return page_offset(&folio->page);
+}
+
+/**
+ * folio_file_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
+ *
+ * This differs from folio_pos() for folios which belong to a swap file.
+ * NFS is the only filesystem today which needs to use folio_file_pos().
+ */
+static inline loff_t folio_file_pos(struct folio *folio)
+{
+ return page_file_offset(&folio->page);
+}
+
+/*
+ * Get the offset in PAGE_SIZE (even for hugetlb folios).
+ */
+static inline pgoff_t folio_pgoff(struct folio *folio)
+{
+ return folio->index;
+}
static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
unsigned long address)
{
pgoff_t pgoff;
- if (unlikely(is_vm_hugetlb_page(vma)))
- return linear_hugepage_index(vma, address);
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
return pgoff;
}
-/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
struct wait_page_key {
- struct page *page;
+ struct folio *folio;
int bit_nr;
int page_match;
};
struct wait_page_queue {
- struct page *page;
+ struct folio *folio;
int bit_nr;
wait_queue_entry_t wait;
};
@@ -512,7 +978,7 @@ struct wait_page_queue {
static inline bool wake_page_match(struct wait_page_queue *wait_page,
struct wait_page_key *key)
{
- if (wait_page->page != key->page)
+ if (wait_page->folio != key->folio)
return false;
key->page_match = 1;
@@ -522,207 +988,248 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
return true;
}
-extern void __lock_page(struct page *page);
-extern int __lock_page_killable(struct page *page);
-extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
-extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
- unsigned int flags);
-extern void unlock_page(struct page *page);
+void __folio_lock(struct folio *folio);
+int __folio_lock_killable(struct folio *folio);
+vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
+void unlock_page(struct page *page);
+void folio_unlock(struct folio *folio);
+
+/**
+ * folio_trylock() - Attempt to lock a folio.
+ * @folio: The folio to attempt to lock.
+ *
+ * Sometimes it is undesirable to wait for a folio to be unlocked (eg
+ * when the locks are being taken in the wrong order, or if making
+ * progress through a batch of folios is more important than processing
+ * them in order). Usually folio_lock() is the correct function to call.
+ *
+ * Context: Any context.
+ * Return: Whether the lock was successfully acquired.
+ */
+static inline bool folio_trylock(struct folio *folio)
+{
+ return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
+}
/*
* Return true if the page was successfully locked
*/
static inline int trylock_page(struct page *page)
{
- page = compound_head(page);
- return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+ return folio_trylock(page_folio(page));
}
-/*
- * lock_page may only be called if we have the page's inode pinned.
+/**
+ * folio_lock() - Lock this folio.
+ * @folio: The folio to lock.
+ *
+ * The folio lock protects against many things, probably more than it
+ * should. It is primarily held while a folio is being brought uptodate,
+ * either from its backing file or from swap. It is also held while a
+ * folio is being truncated from its address_space, so holding the lock
+ * is sufficient to keep folio->mapping stable.
+ *
+ * The folio lock is also held while write() is modifying the page to
+ * provide POSIX atomicity guarantees (as long as the write does not
+ * cross a page boundary). Other modifications to the data in the folio
+ * do not hold the folio lock and can race with writes, eg DMA and stores
+ * to mapped pages.
+ *
+ * Context: May sleep. If you need to acquire the locks of two or
+ * more folios, they must be in order of ascending index, if they are
+ * in the same address_space. If they are in different address_spaces,
+ * acquire the lock of the folio which belongs to the address_space which
+ * has the lowest address in memory first.
*/
-static inline void lock_page(struct page *page)
+static inline void folio_lock(struct folio *folio)
{
might_sleep();
- if (!trylock_page(page))
- __lock_page(page);
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
}
-/*
- * lock_page_killable is like lock_page but can be interrupted by fatal
- * signals. It returns 0 if it locked the page and -EINTR if it was
- * killed while waiting.
+/**
+ * lock_page() - Lock the folio containing this page.
+ * @page: The page to lock.
+ *
+ * See folio_lock() for a description of what the lock protects.
+ * This is a legacy function and new code should probably use folio_lock()
+ * instead.
+ *
+ * Context: May sleep. Pages in the same folio share a lock, so do not
+ * attempt to lock two pages which share a folio.
*/
-static inline int lock_page_killable(struct page *page)
+static inline void lock_page(struct page *page)
{
+ struct folio *folio;
might_sleep();
- if (!trylock_page(page))
- return __lock_page_killable(page);
- return 0;
+
+ folio = page_folio(page);
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
}
-/*
- * lock_page_async - Lock the page, unless this would block. If the page
- * is already locked, then queue a callback when the page becomes unlocked.
- * This callback can then retry the operation.
+/**
+ * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
+ * @folio: The folio to lock.
+ *
+ * Attempts to lock the folio, like folio_lock(), except that the sleep
+ * to acquire the lock is interruptible by a fatal signal.
*
- * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
- * was already locked and the callback defined in 'wait' was queued.
+ * Context: May sleep; see folio_lock().
+ * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
*/
-static inline int lock_page_async(struct page *page,
- struct wait_page_queue *wait)
+static inline int folio_lock_killable(struct folio *folio)
{
- if (!trylock_page(page))
- return __lock_page_async(page, wait);
+ might_sleep();
+ if (!folio_trylock(folio))
+ return __folio_lock_killable(folio);
return 0;
}
/*
- * lock_page_or_retry - Lock the page, unless this would block and the
+ * folio_lock_or_retry - Lock the folio, unless this would block and the
* caller indicated that it can handle a retry.
*
* Return value and mmap_lock implications depend on flags; see
- * __lock_page_or_retry().
+ * __folio_lock_or_retry().
*/
-static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
- unsigned int flags)
+static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
+ struct vm_fault *vmf)
{
might_sleep();
- return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
+ if (!folio_trylock(folio))
+ return __folio_lock_or_retry(folio, vmf);
+ return 0;
}
/*
- * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
+ * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
* and should not be used directly.
*/
-extern void wait_on_page_bit(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+void folio_wait_bit(struct folio *folio, int bit_nr);
+int folio_wait_bit_killable(struct folio *folio, int bit_nr);
/*
- * Wait for a page to be unlocked.
+ * Wait for a folio to be unlocked.
*
- * This must be called with the caller "holding" the page,
- * ie with increased "page->count" so that the page won't
- * go away during the wait..
+ * This must be called with the caller "holding" the folio,
+ * ie with increased folio reference count so that the folio won't
+ * go away during the wait.
*/
-static inline void wait_on_page_locked(struct page *page)
+static inline void folio_wait_locked(struct folio *folio)
{
- if (PageLocked(page))
- wait_on_page_bit(compound_head(page), PG_locked);
+ if (folio_test_locked(folio))
+ folio_wait_bit(folio, PG_locked);
}
-static inline int wait_on_page_locked_killable(struct page *page)
+static inline int folio_wait_locked_killable(struct folio *folio)
{
- if (!PageLocked(page))
+ if (!folio_test_locked(folio))
return 0;
- return wait_on_page_bit_killable(compound_head(page), PG_locked);
+ return folio_wait_bit_killable(folio, PG_locked);
}
-extern void put_and_wait_on_page_locked(struct page *page);
+static inline void wait_on_page_locked(struct page *page)
+{
+ folio_wait_locked(page_folio(page));
+}
+void folio_end_read(struct folio *folio, bool success);
void wait_on_page_writeback(struct page *page);
-extern void end_page_writeback(struct page *page);
+void folio_wait_writeback(struct folio *folio);
+int folio_wait_writeback_killable(struct folio *folio);
+void end_page_writeback(struct page *page);
+void folio_end_writeback(struct folio *folio);
void wait_for_stable_page(struct page *page);
-
-void page_endio(struct page *page, bool is_write, int err);
+void folio_wait_stable(struct folio *folio);
+void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
+static inline void __set_page_dirty(struct page *page,
+ struct address_space *mapping, int warn)
+{
+ __folio_mark_dirty(page_folio(page), mapping, warn);
+}
+void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
+void __folio_cancel_dirty(struct folio *folio);
+static inline void folio_cancel_dirty(struct folio *folio)
+{
+ /* Avoid atomic ops, locking, etc. when not actually needed. */
+ if (folio_test_dirty(folio))
+ __folio_cancel_dirty(folio);
+}
+bool folio_clear_dirty_for_io(struct folio *folio);
+bool clear_page_dirty_for_io(struct page *page);
+void folio_invalidate(struct folio *folio, size_t offset, size_t length);
+int __set_page_dirty_nobuffers(struct page *page);
+bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
+
+#ifdef CONFIG_MIGRATION
+int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
+#else
+#define filemap_migrate_folio NULL
+#endif
+void folio_end_private_2(struct folio *folio);
+void folio_wait_private_2(struct folio *folio);
+int folio_wait_private_2_killable(struct folio *folio);
/*
* Add an arbitrary waiter to a page's wait queue
*/
-extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
+void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
/*
- * Fault everything in given userspace address range in.
+ * Fault in userspace address range.
*/
-static inline int fault_in_pages_writeable(char __user *uaddr, int size)
-{
- char __user *end = uaddr + size - 1;
-
- if (unlikely(size == 0))
- return 0;
+size_t fault_in_writeable(char __user *uaddr, size_t size);
+size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
+size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
+size_t fault_in_readable(const char __user *uaddr, size_t size);
- if (unlikely(uaddr > end))
- return -EFAULT;
- /*
- * Writing zeroes into userspace here is OK, because we know that if
- * the zero gets there, we'll be overwriting it.
- */
- do {
- if (unlikely(__put_user(0, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
-
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK))
- return __put_user(0, end);
-
- return 0;
-}
-
-static inline int fault_in_pages_readable(const char __user *uaddr, int size)
-{
- volatile char c;
- const char __user *end = uaddr + size - 1;
-
- if (unlikely(size == 0))
- return 0;
-
- if (unlikely(uaddr > end))
- return -EFAULT;
-
- do {
- if (unlikely(__get_user(c, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
-
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK)) {
- return __get_user(c, end);
- }
-
- (void)c;
- return 0;
-}
-
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
-int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
+ pgoff_t index, gfp_t gfp);
+int filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp);
+void filemap_remove_folio(struct folio *folio);
+void __filemap_remove_folio(struct folio *folio, void *shadow);
+void replace_page_cache_folio(struct folio *old, struct folio *new);
void delete_from_page_cache_batch(struct address_space *mapping,
- struct pagevec *pvec);
+ struct folio_batch *fbatch);
+bool filemap_release_folio(struct folio *folio, gfp_t gfp);
+loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
+ int whence);
-#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
+/* Must be non-static for BPF error injection */
+int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp, void **shadowp);
-void page_cache_sync_readahead(struct address_space *, struct file_ra_state *,
- struct file *, pgoff_t index, unsigned long req_count);
-void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
- struct file *, struct page *, pgoff_t index,
- unsigned long req_count);
-void page_cache_readahead_unbounded(struct address_space *, struct file *,
- pgoff_t index, unsigned long nr_to_read,
- unsigned long lookahead_count);
+bool filemap_range_has_writeback(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
-/*
- * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __SetPageLocked() against it.
+/**
+ * filemap_range_needs_writeback - check if range potentially needs writeback
+ * @mapping: address space within which to check
+ * @start_byte: offset in bytes where the range starts
+ * @end_byte: offset in bytes where the range ends (inclusive)
+ *
+ * Find at least one page in the range supplied, usually used to check if
+ * direct writing in this range will trigger a writeback. Used by O_DIRECT
+ * read/write with IOCB_NOWAIT, to see if the caller needs to do
+ * filemap_write_and_wait_range() before proceeding.
+ *
+ * Return: %true if the caller should do filemap_write_and_wait_range() before
+ * doing O_DIRECT to a page in this range, %false otherwise.
*/
-static inline int add_to_page_cache(struct page *page,
- struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
+static inline bool filemap_range_needs_writeback(struct address_space *mapping,
+ loff_t start_byte,
+ loff_t end_byte)
{
- int error;
-
- __SetPageLocked(page);
- error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
- if (unlikely(error))
- __ClearPageLocked(page);
- return error;
+ if (!mapping->nrpages)
+ return false;
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
+ !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+ return false;
+ return filemap_range_has_writeback(mapping, start_byte, end_byte);
}
/**
@@ -739,43 +1246,134 @@ static inline int add_to_page_cache(struct page *page,
* @file: The file, used primarily by network filesystems for authentication.
* May be NULL if invoked internally by the filesystem.
* @mapping: Readahead this filesystem object.
+ * @ra: File readahead state. May be NULL.
*/
struct readahead_control {
struct file *file;
struct address_space *mapping;
+ struct file_ra_state *ra;
/* private: use the readahead_* accessors instead */
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
+ bool _workingset;
+ unsigned long _pflags;
};
+#define DEFINE_READAHEAD(ractl, f, r, m, i) \
+ struct readahead_control ractl = { \
+ .file = f, \
+ .mapping = m, \
+ .ra = r, \
+ ._index = i, \
+ }
+
+#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
+
+void page_cache_ra_unbounded(struct readahead_control *,
+ unsigned long nr_to_read, unsigned long lookahead_count);
+void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
+void page_cache_async_ra(struct readahead_control *, struct folio *,
+ unsigned long req_count);
+void readahead_expand(struct readahead_control *ractl,
+ loff_t new_start, size_t new_len);
+
+/**
+ * page_cache_sync_readahead - generic file readahead
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_sync_readahead() should be called when a cache miss happened:
+ * it will submit the read. The readahead logic may decide to piggyback more
+ * pages onto the read request if access patterns suggest it will improve
+ * performance.
+ */
+static inline
+void page_cache_sync_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *file, pgoff_t index,
+ unsigned long req_count)
+{
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_sync_ra(&ractl, req_count);
+}
+
+/**
+ * page_cache_async_readahead - file readahead for marked pages
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @folio: The folio at @index which triggered the readahead call.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_async_readahead() should be called when a page is used which
+ * is marked as PageReadahead; this is a marker to suggest that the application
+ * has used up enough of the readahead window that we should start pulling in
+ * more pages.
+ */
+static inline
+void page_cache_async_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *file,
+ struct folio *folio, pgoff_t index, unsigned long req_count)
+{
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_async_ra(&ractl, folio, req_count);
+}
+
+static inline struct folio *__readahead_folio(struct readahead_control *ractl)
+{
+ struct folio *folio;
+
+ BUG_ON(ractl->_batch_count > ractl->_nr_pages);
+ ractl->_nr_pages -= ractl->_batch_count;
+ ractl->_index += ractl->_batch_count;
+
+ if (!ractl->_nr_pages) {
+ ractl->_batch_count = 0;
+ return NULL;
+ }
+
+ folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ ractl->_batch_count = folio_nr_pages(folio);
+
+ return folio;
+}
+
/**
* readahead_page - Get the next page to read.
- * @rac: The current readahead request.
+ * @ractl: The current readahead request.
*
* Context: The page is locked and has an elevated refcount. The caller
* should decreases the refcount once the page has been submitted for I/O
* and unlock the page once all I/O to that page has completed.
* Return: A pointer to the next page, or %NULL if we are done.
*/
-static inline struct page *readahead_page(struct readahead_control *rac)
+static inline struct page *readahead_page(struct readahead_control *ractl)
{
- struct page *page;
-
- BUG_ON(rac->_batch_count > rac->_nr_pages);
- rac->_nr_pages -= rac->_batch_count;
- rac->_index += rac->_batch_count;
+ struct folio *folio = __readahead_folio(ractl);
- if (!rac->_nr_pages) {
- rac->_batch_count = 0;
- return NULL;
- }
+ return &folio->page;
+}
- page = xa_load(&rac->mapping->i_pages, rac->_index);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- rac->_batch_count = thp_nr_pages(page);
+/**
+ * readahead_folio - Get the next folio to read.
+ * @ractl: The current readahead request.
+ *
+ * Context: The folio is locked. The caller should unlock the folio once
+ * all I/O to that folio has completed.
+ * Return: A pointer to the next folio, or %NULL if we are done.
+ */
+static inline struct folio *readahead_folio(struct readahead_control *ractl)
+{
+ struct folio *folio = __readahead_folio(ractl);
- return page;
+ if (folio)
+ folio_put(folio);
+ return folio;
}
static inline unsigned int __readahead_batch(struct readahead_control *rac,
@@ -793,20 +1391,12 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
xas_set(&xas, rac->_index);
rcu_read_lock();
xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
+ if (xas_retry(&xas, page))
+ continue;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
array[i++] = page;
rac->_batch_count += thp_nr_pages(page);
-
- /*
- * The page cache isn't using multi-index entries yet,
- * so the xas cursor needs to be manually moved to the
- * next index. This can be removed once the page cache
- * is converted.
- */
- if (PageHead(page))
- xas_set(&xas, rac->_index + rac->_batch_count);
-
if (i == array_sz)
break;
}
@@ -842,9 +1432,9 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
* readahead_length - The number of bytes in this readahead request.
* @rac: The readahead request.
*/
-static inline loff_t readahead_length(struct readahead_control *rac)
+static inline size_t readahead_length(struct readahead_control *rac)
{
- return (loff_t)rac->_nr_pages * PAGE_SIZE;
+ return rac->_nr_pages * PAGE_SIZE;
}
/**
@@ -865,6 +1455,15 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
return rac->_nr_pages;
}
+/**
+ * readahead_batch_length - The number of bytes in the current batch.
+ * @rac: The readahead request.
+ */
+static inline size_t readahead_batch_length(struct readahead_control *rac)
+{
+ return rac->_batch_count * PAGE_SIZE;
+}
+
static inline unsigned long dir_pages(struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
@@ -872,6 +1471,34 @@ static inline unsigned long dir_pages(struct inode *inode)
}
/**
+ * folio_mkwrite_check_truncate - check if folio was truncated
+ * @folio: the folio to check
+ * @inode: the inode to check the folio against
+ *
+ * Return: the number of bytes in the folio up to EOF,
+ * or -EFAULT if the folio was truncated.
+ */
+static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
+ struct inode *inode)
+{
+ loff_t size = i_size_read(inode);
+ pgoff_t index = size >> PAGE_SHIFT;
+ size_t offset = offset_in_folio(folio, size);
+
+ if (!folio->mapping)
+ return -EFAULT;
+
+ /* folio is wholly inside EOF */
+ if (folio_next_index(folio) - 1 < index)
+ return folio_size(folio);
+ /* folio is wholly past EOF */
+ if (folio->index > index || !offset)
+ return -EFAULT;
+ /* folio is partially inside EOF */
+ return offset;
+}
+
+/**
* page_mkwrite_check_truncate - check if page was truncated
* @page: the page to check
* @inode: the inode to check the page against
@@ -899,4 +1526,26 @@ static inline int page_mkwrite_check_truncate(struct page *page,
return offset;
}
+/**
+ * i_blocks_per_folio - How many blocks fit in this folio.
+ * @inode: The inode which contains the blocks.
+ * @folio: The folio.
+ *
+ * If the block size is larger than the size of this folio, return zero.
+ *
+ * Context: The caller should hold a refcount on the folio to prevent it
+ * from being split.
+ * Return: The number of filesystem blocks covered by this folio.
+ */
+static inline
+unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
+{
+ return folio_size(folio) >> inode->i_blkbits;
+}
+
+static inline
+unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
+{
+ return i_blocks_per_folio(inode, page_folio(page));
+}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 081d934eda64..fcc06c300a72 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -3,89 +3,103 @@
* include/linux/pagevec.h
*
* In many places it is efficient to batch an operation up against multiple
- * pages. A pagevec is a multipage container which is used for that.
+ * folios. A folio_batch is a container which is used for that.
*/
#ifndef _LINUX_PAGEVEC_H
#define _LINUX_PAGEVEC_H
-#include <linux/xarray.h>
+#include <linux/types.h>
-/* 15 pointers + header align the pagevec structure to a power of two */
+/* 15 pointers + header align the folio_batch structure to a power of two */
#define PAGEVEC_SIZE 15
-struct page;
-struct address_space;
+struct folio;
-struct pagevec {
+/**
+ * struct folio_batch - A collection of folios.
+ *
+ * The folio_batch is used to amortise the cost of retrieving and
+ * operating on a set of folios. The order of folios in the batch may be
+ * significant (eg delete_from_page_cache_batch()). Some users of the
+ * folio_batch store "exceptional" entries in it which can be removed
+ * by calling folio_batch_remove_exceptionals().
+ */
+struct folio_batch {
unsigned char nr;
+ unsigned char i;
bool percpu_pvec_drained;
- struct page *pages[PAGEVEC_SIZE];
+ struct folio *folios[PAGEVEC_SIZE];
};
-void __pagevec_release(struct pagevec *pvec);
-void __pagevec_lru_add(struct pagevec *pvec);
-unsigned pagevec_lookup_entries(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t start, unsigned nr_entries,
- pgoff_t *indices);
-void pagevec_remove_exceptionals(struct pagevec *pvec);
-unsigned pagevec_lookup_range(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t *start, pgoff_t end);
-static inline unsigned pagevec_lookup(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t *start)
-{
- return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
-}
-
-unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, pgoff_t end,
- xa_mark_t tag);
-unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, pgoff_t end,
- xa_mark_t tag, unsigned max_pages);
-static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, xa_mark_t tag)
+/**
+ * folio_batch_init() - Initialise a batch of folios
+ * @fbatch: The folio batch.
+ *
+ * A freshly initialised folio_batch contains zero folios.
+ */
+static inline void folio_batch_init(struct folio_batch *fbatch)
{
- return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
+ fbatch->nr = 0;
+ fbatch->i = 0;
+ fbatch->percpu_pvec_drained = false;
}
-static inline void pagevec_init(struct pagevec *pvec)
+static inline void folio_batch_reinit(struct folio_batch *fbatch)
{
- pvec->nr = 0;
- pvec->percpu_pvec_drained = false;
+ fbatch->nr = 0;
+ fbatch->i = 0;
}
-static inline void pagevec_reinit(struct pagevec *pvec)
+static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
{
- pvec->nr = 0;
+ return fbatch->nr;
}
-static inline unsigned pagevec_count(struct pagevec *pvec)
+static inline unsigned int folio_batch_space(struct folio_batch *fbatch)
{
- return pvec->nr;
+ return PAGEVEC_SIZE - fbatch->nr;
}
-static inline unsigned pagevec_space(struct pagevec *pvec)
+/**
+ * folio_batch_add() - Add a folio to a batch.
+ * @fbatch: The folio batch.
+ * @folio: The folio to add.
+ *
+ * The folio is added to the end of the batch.
+ * The batch must have previously been initialised using folio_batch_init().
+ *
+ * Return: The number of slots still available.
+ */
+static inline unsigned folio_batch_add(struct folio_batch *fbatch,
+ struct folio *folio)
{
- return PAGEVEC_SIZE - pvec->nr;
+ fbatch->folios[fbatch->nr++] = folio;
+ return folio_batch_space(fbatch);
}
-/*
- * Add a page to a pagevec. Returns the number of slots still available.
+/**
+ * folio_batch_next - Return the next folio to process.
+ * @fbatch: The folio batch being processed.
+ *
+ * Use this function to implement a queue of folios.
+ *
+ * Return: The next folio in the queue, or NULL if the queue is empty.
*/
-static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
+static inline struct folio *folio_batch_next(struct folio_batch *fbatch)
{
- pvec->pages[pvec->nr++] = page;
- return pagevec_space(pvec);
+ if (fbatch->i == fbatch->nr)
+ return NULL;
+ return fbatch->folios[fbatch->i++];
}
-static inline void pagevec_release(struct pagevec *pvec)
+void __folio_batch_release(struct folio_batch *pvec);
+
+static inline void folio_batch_release(struct folio_batch *fbatch)
{
- if (pagevec_count(pvec))
- __pagevec_release(pvec);
+ if (folio_batch_count(fbatch))
+ __folio_batch_release(fbatch);
}
+void folio_batch_remove_exceptionals(struct folio_batch *fbatch);
#endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index b1cb6b753abb..27cd1e59ccf7 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -6,8 +6,18 @@
struct mm_walk;
+/* Locking requirement during a page walk. */
+enum page_walk_lock {
+ /* mmap_lock should be locked for read to stabilize the vma tree */
+ PGWALK_RDLOCK = 0,
+ /* vma will be write-locked during the walk */
+ PGWALK_WRLOCK = 1,
+ /* vma is expected to be already write-locked during the walk */
+ PGWALK_WRLOCK_VERIFY = 2,
+};
+
/**
- * mm_walk_ops - callbacks for walk_page_range
+ * struct mm_walk_ops - callbacks for walk_page_range
* @pgd_entry: if set, called for each non-empty PGD (top-level) entry
* @p4d_entry: if set, called for each non-empty P4D entry
* @pud_entry: if set, called for each non-empty PUD entry
@@ -15,18 +25,29 @@ struct mm_walk;
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
* split_huge_page() instead of handling it explicitly.
- * @pte_entry: if set, called for each non-empty PTE (lowest-level)
- * entry
+ * @pte_entry: if set, called for each PTE (lowest-level) entry,
+ * including empty ones
* @pte_hole: if set, called for each hole at all levels,
- * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
- * 4:PTE. Any folded depths (where PTRS_PER_P?D is equal
- * to 1) are skipped.
- * @hugetlb_entry: if set, called for each hugetlb entry
+ * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD.
+ * Any folded depths (where PTRS_PER_P?D is equal to 1)
+ * are skipped.
+ * @hugetlb_entry: if set, called for each hugetlb entry. This hook
+ * function is called with the vma lock held, in order to
+ * protect against a concurrent freeing of the pte_t* or
+ * the ptl. In some cases, the hook function needs to drop
+ * and retake the vma lock in order to avoid deadlocks
+ * while calling other functions. In such cases the hook
+ * function must either refrain from accessing the pte or
+ * ptl after dropping the vma lock, or else revalidate
+ * those items after re-acquiring the vma lock and before
+ * accessing them.
* @test_walk: caller specific callback function to determine whether
* we walk over the current vma or not. Returning 0 means
* "do page table walk over the current vma", returning
* a negative value means "abort current page table walk
* right now" and returning 1 means "skip the current vma"
+ * Note that this callback is not called when the caller
+ * passes in a single VMA as for walk_page_vma().
* @pre_vma: if set, called before starting walk on a non-null vma.
* @post_vma: if set, called after a walk on a non-null vma, provided
* that @pre_vma and the vma walk succeeded.
@@ -55,6 +76,7 @@ struct mm_walk_ops {
int (*pre_vma)(unsigned long start, unsigned long end,
struct mm_walk *walk);
void (*post_vma)(struct mm_walk *walk);
+ enum page_walk_lock walk_lock;
};
/*
@@ -71,7 +93,7 @@ enum page_walk_action {
};
/**
- * mm_walk - walk_page_range data
+ * struct mm_walk - walk_page_range data
* @ops: operation to call during the walk
* @mm: mm_struct representing the target process of page table walk
* @pgd: pointer to PGD; only valid with no_vma (otherwise set to NULL)
@@ -99,6 +121,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
pgd_t *pgd,
void *private);
+int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ void *private);
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private);
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
diff --git a/include/linux/panic.h b/include/linux/panic.h
new file mode 100644
index 000000000000..6717b15e798c
--- /dev/null
+++ b/include/linux/panic.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_H
+#define _LINUX_PANIC_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+extern long (*panic_blink)(int state);
+__printf(1, 2)
+void panic(const char *fmt, ...) __noreturn __cold;
+void nmi_panic(struct pt_regs *regs, const char *msg);
+void check_panic_on_warn(const char *origin);
+extern void oops_enter(void);
+extern void oops_exit(void);
+extern bool oops_may_print(void);
+
+extern int panic_timeout;
+extern unsigned long panic_print;
+extern int panic_on_oops;
+extern int panic_on_unrecovered_nmi;
+extern int panic_on_io_nmi;
+extern int panic_on_warn;
+
+extern unsigned long panic_on_taint;
+extern bool panic_on_taint_nousertaint;
+
+extern int sysctl_panic_on_rcu_stall;
+extern int sysctl_max_rcu_stall_to_panic;
+extern int sysctl_panic_on_stackoverflow;
+
+extern bool crash_kexec_post_notifiers;
+
+extern void __stack_chk_fail(void);
+void abort(void);
+
+/*
+ * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
+ * holds a CPU number which is executing panic() currently. A value of
+ * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
+ */
+extern atomic_t panic_cpu;
+#define PANIC_CPU_INVALID -1
+
+/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
+{
+ if (panic_timeout == arch_default_timeout)
+ panic_timeout = timeout;
+}
+
+/* This cannot be an enum because some may be used in assembly source. */
+#define TAINT_PROPRIETARY_MODULE 0
+#define TAINT_FORCED_MODULE 1
+#define TAINT_CPU_OUT_OF_SPEC 2
+#define TAINT_FORCED_RMMOD 3
+#define TAINT_MACHINE_CHECK 4
+#define TAINT_BAD_PAGE 5
+#define TAINT_USER 6
+#define TAINT_DIE 7
+#define TAINT_OVERRIDDEN_ACPI_TABLE 8
+#define TAINT_WARN 9
+#define TAINT_CRAP 10
+#define TAINT_FIRMWARE_WORKAROUND 11
+#define TAINT_OOT_MODULE 12
+#define TAINT_UNSIGNED_MODULE 13
+#define TAINT_SOFTLOCKUP 14
+#define TAINT_LIVEPATCH 15
+#define TAINT_AUX 16
+#define TAINT_RANDSTRUCT 17
+#define TAINT_TEST 18
+#define TAINT_FLAGS_COUNT 19
+#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
+
+struct taint_flag {
+ char c_true; /* character printed when tainted */
+ char c_false; /* character printed when not tainted */
+ bool module; /* also show as a per-module taint flag */
+};
+
+extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
+
+enum lockdep_ok {
+ LOCKDEP_STILL_OK,
+ LOCKDEP_NOW_UNRELIABLE,
+};
+
+extern const char *print_tainted(void);
+extern void add_taint(unsigned flag, enum lockdep_ok);
+extern int test_taint(unsigned flag);
+extern unsigned long get_taint(void);
+
+#endif /* _LINUX_PANIC_H */
diff --git a/include/linux/panic_notifier.h b/include/linux/panic_notifier.h
new file mode 100644
index 000000000000..41e32483d7a7
--- /dev/null
+++ b/include/linux/panic_notifier.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_NOTIFIERS_H
+#define _LINUX_PANIC_NOTIFIERS_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+extern struct atomic_notifier_head panic_notifier_list;
+
+extern bool crash_kexec_post_notifiers;
+
+#endif /* _LINUX_PANIC_NOTIFIERS_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 1fb508c19e83..fff39bc30629 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -40,10 +40,6 @@ struct amiga_parport_state {
unsigned char statusdir;/* ciab.ddrb & 7 */
};
-struct ax88796_parport_state {
- unsigned char cpr;
-};
-
struct ip32_parport_state {
unsigned int dcr;
unsigned int ecr;
@@ -55,7 +51,6 @@ struct parport_state {
/* ARC has no state. */
struct ax_parport_state ax;
struct amiga_parport_state amiga;
- struct ax88796_parport_state ax88796;
/* Atari has not state. */
struct ip32_parport_state ip32;
void *misc;
@@ -297,13 +292,54 @@ int __must_check __parport_register_driver(struct parport_driver *,
* parport_register_driver must be a macro so that KBUILD_MODNAME can
* be expanded
*/
+
+/**
+ * parport_register_driver - register a parallel port device driver
+ * @driver: structure describing the driver
+ *
+ * This can be called by a parallel port device driver in order
+ * to receive notifications about ports being found in the
+ * system, as well as ports no longer available.
+ *
+ * If devmodel is true then the new device model is used
+ * for registration.
+ *
+ * The @driver structure is allocated by the caller and must not be
+ * deallocated until after calling parport_unregister_driver().
+ *
+ * If using the non device model:
+ * The driver's attach() function may block. The port that
+ * attach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so. Calling
+ * parport_register_device() on that port will do this for you.
+ *
+ * The driver's detach() function may block. The port that
+ * detach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so.
+ *
+ *
+ * Returns 0 on success. The non device model will always succeeds.
+ * but the new device model can fail and will return the error code.
+ **/
#define parport_register_driver(driver) \
__parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
/* Unregister a high-level driver. */
-extern void parport_unregister_driver (struct parport_driver *);
void parport_unregister_driver(struct parport_driver *);
+/**
+ * module_parport_driver() - Helper macro for registering a modular parport driver
+ * @__parport_driver: struct parport_driver to be used
+ *
+ * Helper macro for parport drivers which do not do anything special in module
+ * init and exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit().
+ */
+#define module_parport_driver(__parport_driver) \
+ module_driver(__parport_driver, parport_register_driver, parport_unregister_driver)
+
/* If parport_register_driver doesn't fit your needs, perhaps
* parport_find_xxx does. */
extern struct parport *parport_find_number (int);
@@ -478,7 +514,7 @@ extern int parport_device_proc_register(struct pardevice *device);
extern int parport_device_proc_unregister(struct pardevice *device);
/* If PC hardware is the only type supported, we can optimise a bit. */
-#if !defined(CONFIG_PARPORT_NOT_PC)
+#if !defined(CONFIG_PARPORT_NOT_PC) && defined(CONFIG_PARPORT_PC)
#include <linux/parport_pc.h>
#define parport_write_data(p,x) parport_pc_write_data(p,x)
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index 3d6fc576d6a1..f1ec5c10c3b3 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -26,6 +26,9 @@ struct parport_pc_private {
/* Whether or not there's an ECR. */
int ecr;
+ /* Bitmask of writable ECR bits. */
+ unsigned char ecr_writable;
+
/* Number of PWords that FIFO will hold. */
int fifo_depth;
diff --git a/include/linux/parser.h b/include/linux/parser.h
index 89e2b23fb888..dd79f45a37b8 100644
--- a/include/linux/parser.h
+++ b/include/linux/parser.h
@@ -29,6 +29,7 @@ typedef struct {
int match_token(char *, const match_table_t table, substring_t args[]);
int match_int(substring_t *, int *result);
+int match_uint(substring_t *s, unsigned int *result);
int match_u64(substring_t *, u64 *result);
int match_octal(substring_t *, int *result);
int match_hex(substring_t *, int *result);
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
index 24125778ef3e..abeba356bc3f 100644
--- a/include/linux/part_stat.h
+++ b/include/linux/part_stat.h
@@ -2,7 +2,8 @@
#ifndef _LINUX_PART_STAT_H
#define _LINUX_PART_STAT_H
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <asm/local.h>
struct disk_stats {
u64 nsecs[NR_STAT_GROUPS];
@@ -25,26 +26,26 @@ struct disk_stats {
#define part_stat_unlock() preempt_enable()
#define part_stat_get_cpu(part, field, cpu) \
- (per_cpu_ptr((part)->dkstats, (cpu))->field)
+ (per_cpu_ptr((part)->bd_stats, (cpu))->field)
#define part_stat_get(part, field) \
part_stat_get_cpu(part, field, smp_processor_id())
#define part_stat_read(part, field) \
({ \
- typeof((part)->dkstats->field) res = 0; \
+ typeof((part)->bd_stats->field) res = 0; \
unsigned int _cpu; \
for_each_possible_cpu(_cpu) \
- res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
+ res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
res; \
})
-static inline void part_stat_set_all(struct hd_struct *part, int value)
+static inline void part_stat_set_all(struct block_device *part, int value)
{
int i;
for_each_possible_cpu(i)
- memset(per_cpu_ptr(part->dkstats, i), value,
+ memset(per_cpu_ptr(part->bd_stats, i), value,
sizeof(struct disk_stats));
}
@@ -54,29 +55,28 @@ static inline void part_stat_set_all(struct hd_struct *part, int value)
part_stat_read(part, field[STAT_DISCARD]))
#define __part_stat_add(part, field, addnd) \
- __this_cpu_add((part)->dkstats->field, addnd)
+ __this_cpu_add((part)->bd_stats->field, addnd)
#define part_stat_add(part, field, addnd) do { \
__part_stat_add((part), field, addnd); \
- if ((part)->partno) \
- __part_stat_add(&part_to_disk((part))->part0, \
- field, addnd); \
+ if ((part)->bd_partno) \
+ __part_stat_add(bdev_whole(part), field, addnd); \
} while (0)
-#define part_stat_dec(gendiskp, field) \
- part_stat_add(gendiskp, field, -1)
-#define part_stat_inc(gendiskp, field) \
- part_stat_add(gendiskp, field, 1)
-#define part_stat_sub(gendiskp, field, subnd) \
- part_stat_add(gendiskp, field, -subnd)
+#define part_stat_dec(part, field) \
+ part_stat_add(part, field, -1)
+#define part_stat_inc(part, field) \
+ part_stat_add(part, field, 1)
+#define part_stat_sub(part, field, subnd) \
+ part_stat_add(part, field, -subnd)
-#define part_stat_local_dec(gendiskp, field) \
- local_dec(&(part_stat_get(gendiskp, field)))
-#define part_stat_local_inc(gendiskp, field) \
- local_inc(&(part_stat_get(gendiskp, field)))
-#define part_stat_local_read(gendiskp, field) \
- local_read(&(part_stat_get(gendiskp, field)))
-#define part_stat_local_read_cpu(gendiskp, field, cpu) \
- local_read(&(part_stat_get_cpu(gendiskp, field, cpu)))
+#define part_stat_local_dec(part, field) \
+ local_dec(&(part_stat_get(part, field)))
+#define part_stat_local_inc(part, field) \
+ local_inc(&(part_stat_get(part, field)))
+#define part_stat_local_read(part, field) \
+ local_read(&(part_stat_get(part, field)))
+#define part_stat_local_read_cpu(part, field, cpu) \
+ local_read(&(part_stat_get_cpu(part, field, cpu)))
#endif /* _LINUX_PART_STAT_H */
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 5ba475ca9078..078225b514d4 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -84,6 +84,14 @@ extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
void acpi_pci_add_bus(struct pci_bus *bus);
void acpi_pci_remove_bus(struct pci_bus *bus);
+#ifdef CONFIG_PCI
+void pci_acpi_setup(struct device *dev, struct acpi_device *adev);
+void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev);
+#else
+static inline void pci_acpi_setup(struct device *dev, struct acpi_device *adev) {}
+static inline void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) {}
+#endif
+
#ifdef CONFIG_ACPI_PCI_SLOT
void acpi_pci_slot_init(void);
void acpi_pci_slot_enumerate(struct pci_bus *bus);
@@ -122,6 +130,9 @@ static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { }
static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { }
#endif /* CONFIG_PCIE_EDR */
+int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *));
+void pci_acpi_clear_companion_lookup_hook(void);
+
#else /* CONFIG_ACPI */
static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
deleted file mode 100644
index 249d4d7fbf18..000000000000
--- a/include/linux/pci-dma-compat.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* include this file if the platform implements the dma_ DMA Mapping API
- * and wants to provide the pci_ DMA Mapping API in terms of it */
-
-#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
-#define _ASM_GENERIC_PCI_DMA_COMPAT_H
-
-#include <linux/dma-mapping.h>
-
-/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
-#define PCI_DMA_TODEVICE DMA_TO_DEVICE
-#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE
-#define PCI_DMA_NONE DMA_NONE
-
-static inline void *
-pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void *
-pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void
-pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t
-pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
- return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
-}
-
-static inline dma_addr_t
-pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
-{
- return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
- size_t size, int direction)
-{
- dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
-{
- return dma_mapping_error(&pdev->dev, dma_addr);
-}
-
-#ifdef CONFIG_PCI
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
- return dma_set_mask(&dev->dev, mask);
-}
-
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{
- return dma_set_coherent_mask(&dev->dev, mask);
-}
-#else
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-#endif
-
-#endif
diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h
new file mode 100644
index 000000000000..1f14aed4354b
--- /dev/null
+++ b/include/linux/pci-doe.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Object Exchange
+ * PCIe r6.0, sec 6.30 DOE
+ *
+ * Copyright (C) 2021 Huawei
+ * Jonathan Cameron <Jonathan.Cameron@huawei.com>
+ *
+ * Copyright (C) 2022 Intel Corporation
+ * Ira Weiny <ira.weiny@intel.com>
+ */
+
+#ifndef LINUX_PCI_DOE_H
+#define LINUX_PCI_DOE_H
+
+struct pci_doe_mb;
+
+struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
+ u8 type);
+
+int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type,
+ const void *request, size_t request_sz,
+ void *response, size_t response_sz);
+
+#endif
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 1af5cb02ef7f..3a4860bd2758 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -10,6 +10,33 @@
#include <linux/platform_device.h>
/*
+ * Memory address shift values for the byte-level address that
+ * can be used when accessing the PCI Express Configuration Space.
+ */
+
+/*
+ * Enhanced Configuration Access Mechanism (ECAM)
+ *
+ * See PCI Express Base Specification, Revision 5.0, Version 1.0,
+ * Section 7.2.2, Table 7-1, p. 677.
+ */
+#define PCIE_ECAM_BUS_SHIFT 20 /* Bus number */
+#define PCIE_ECAM_DEVFN_SHIFT 12 /* Device and Function number */
+
+#define PCIE_ECAM_BUS_MASK 0xff
+#define PCIE_ECAM_DEVFN_MASK 0xff
+#define PCIE_ECAM_REG_MASK 0xfff /* Limit offset to a maximum of 4K */
+
+#define PCIE_ECAM_BUS(x) (((x) & PCIE_ECAM_BUS_MASK) << PCIE_ECAM_BUS_SHIFT)
+#define PCIE_ECAM_DEVFN(x) (((x) & PCIE_ECAM_DEVFN_MASK) << PCIE_ECAM_DEVFN_SHIFT)
+#define PCIE_ECAM_REG(x) ((x) & PCIE_ECAM_REG_MASK)
+
+#define PCIE_ECAM_OFFSET(bus, devfn, where) \
+ (PCIE_ECAM_BUS(bus) | \
+ PCIE_ECAM_DEVFN(devfn) | \
+ PCIE_ECAM_REG(where))
+
+/*
* struct to hold pci ops and bus shift of the config window
* for a PCI controller.
*/
@@ -28,6 +55,7 @@ struct pci_ecam_ops {
struct pci_config_window {
struct resource res;
struct resource busr;
+ unsigned int bus_shift;
void *priv;
const struct pci_ecam_ops *ops;
union {
@@ -51,17 +79,20 @@ extern const struct pci_ecam_ops pci_generic_ecam_ops;
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
+extern const struct pci_ecam_ops pci_32b_read_ops; /* 32-bit read only */
extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
+extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
+extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
#endif
#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev);
-int pci_host_common_remove(struct platform_device *pdev);
+void pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
index f42b0fd4b4bc..3e2140d7e31d 100644
--- a/include/linux/pci-ep-cfs.h
+++ b/include/linux/pci-ep-cfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/**
+/*
* PCI Endpoint ConfigFS header file
*
* Copyright (C) 2017 Texas Instruments
@@ -19,7 +19,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group);
#else
static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name)
{
- return 0;
+ return NULL;
}
static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
@@ -28,7 +28,7 @@ static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name)
{
- return 0;
+ return NULL;
}
static inline void pci_ep_cfs_remove_epf_group(struct config_group *group)
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index cc66bec8be90..cc2f70d061c8 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* PCI Endpoint *Controller* (EPC) header file
*
* Copyright (C) 2017 Texas Instruments
@@ -13,13 +13,25 @@
struct pci_epc;
-enum pci_epc_irq_type {
- PCI_EPC_IRQ_UNKNOWN,
- PCI_EPC_IRQ_LEGACY,
- PCI_EPC_IRQ_MSI,
- PCI_EPC_IRQ_MSIX,
+enum pci_epc_interface_type {
+ UNKNOWN_INTERFACE = -1,
+ PRIMARY_INTERFACE,
+ SECONDARY_INTERFACE,
};
+static inline const char *
+pci_epc_interface_string(enum pci_epc_interface_type type)
+{
+ switch (type) {
+ case PRIMARY_INTERFACE:
+ return "primary";
+ case SECONDARY_INTERFACE:
+ return "secondary";
+ default:
+ return "UNKNOWN interface";
+ }
+}
+
/**
* struct pci_epc_ops - set of function pointers for performing EPC operations
* @write_header: ops to populate configuration space header
@@ -36,32 +48,39 @@ enum pci_epc_irq_type {
* @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
* from the MSI-X capability register
* @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
+ * @map_msi_irq: ops to map physical address to MSI address and return MSI data
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
+ * @get_features: ops to get the features supported by the EPC
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
- int (*write_header)(struct pci_epc *epc, u8 func_no,
+ int (*write_header)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
- int (*set_bar)(struct pci_epc *epc, u8 func_no,
+ int (*set_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- void (*clear_bar)(struct pci_epc *epc, u8 func_no,
+ void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- int (*map_addr)(struct pci_epc *epc, u8 func_no,
+ int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr, u64 pci_addr, size_t size);
- void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
+ void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr);
- int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
- int (*get_msi)(struct pci_epc *epc, u8 func_no);
- int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno, u32 offset);
- int (*get_msix)(struct pci_epc *epc, u8 func_no);
- int (*raise_irq)(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+ int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+ int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ unsigned int type, u16 interrupt_num);
+ int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
- u8 func_no);
+ u8 func_no, u8 vfunc_no);
struct module *owner;
};
@@ -96,6 +115,7 @@ struct pci_epc_mem {
* struct pci_epc - represents the PCI EPC device
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
+ * @list_lock: Mutex for protecting pci_epf list
* @ops: function pointers for performing endpoint operations
* @windows: array of address space of the endpoint controller
* @mem: first window of the endpoint controller, which corresponds to
@@ -103,34 +123,67 @@ struct pci_epc_mem {
* single window.
* @num_windows: number of windows supported by device
* @max_functions: max number of functions that can be configured in this EPC
+ * @max_vfs: Array indicating the maximum number of virtual functions that can
+ * be associated with each physical function
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
- * @notifier: used to notify EPF of any EPC events (like linkup)
*/
struct pci_epc {
struct device dev;
struct list_head pci_epf;
+ struct mutex list_lock;
const struct pci_epc_ops *ops;
struct pci_epc_mem **windows;
struct pci_epc_mem *mem;
unsigned int num_windows;
u8 max_functions;
+ u8 *max_vfs;
struct config_group *group;
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
unsigned long function_num_map;
- struct atomic_notifier_head notifier;
+};
+
+/**
+ * @BAR_PROGRAMMABLE: The BAR mask can be configured by the EPC.
+ * @BAR_FIXED: The BAR mask is fixed by the hardware.
+ * @BAR_RESERVED: The BAR should not be touched by an EPF driver.
+ */
+enum pci_epc_bar_type {
+ BAR_PROGRAMMABLE = 0,
+ BAR_FIXED,
+ BAR_RESERVED,
+};
+
+/**
+ * struct pci_epc_bar_desc - hardware description for a BAR
+ * @type: the type of the BAR
+ * @fixed_size: the fixed size, only applicable if type is BAR_FIXED_MASK.
+ * @only_64bit: if true, an EPF driver is not allowed to choose if this BAR
+ * should be configured as 32-bit or 64-bit, the EPF driver must
+ * configure this BAR as 64-bit. Additionally, the BAR succeeding
+ * this BAR must be set to type BAR_RESERVED.
+ *
+ * only_64bit should not be set on a BAR of type BAR_RESERVED.
+ * (If BARx is a 64-bit BAR that an EPF driver is not allowed to
+ * touch, then both BARx and BARx+1 must be set to type
+ * BAR_RESERVED.)
+ */
+struct pci_epc_bar_desc {
+ enum pci_epc_bar_type type;
+ u64 fixed_size;
+ bool only_64bit;
};
/**
* struct pci_epc_features - features supported by a EPC device per function
* @linkup_notifier: indicate if the EPC device can notify EPF driver on link up
+ * @core_init_notifier: indicate cores that can notify about their availability
+ * for initialization
* @msi_capable: indicate if the endpoint function has MSI capability
* @msix_capable: indicate if the endpoint function has MSI-X capability
- * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver
- * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs
- * @bar_fixed_size: Array specifying the size supported by each BAR
+ * @bar: array specifying the hardware description for each BAR
* @align: alignment size required for BAR buffer allocation
*/
struct pci_epc_features {
@@ -138,9 +191,7 @@ struct pci_epc_features {
unsigned int core_init_notifier : 1;
unsigned int msi_capable : 1;
unsigned int msix_capable : 1;
- u8 reserved_bar;
- u8 bar_fixed_64bit;
- u64 bar_fixed_size[PCI_STD_NUM_BARS];
+ struct pci_epc_bar_desc bar[PCI_STD_NUM_BARS];
size_t align;
};
@@ -161,12 +212,6 @@ static inline void *epc_get_drvdata(struct pci_epc *epc)
return dev_get_drvdata(&epc->dev);
}
-static inline int
-pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&epc->notifier, nb);
-}
-
struct pci_epc *
__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
@@ -175,34 +220,44 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
void pci_epc_destroy(struct pci_epc *epc);
-int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_linkdown(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
-void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+void pci_epc_bme_notify(struct pci_epc *epc);
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type);
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size);
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr);
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno, u32 offset);
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ unsigned int type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no);
-unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
- *epc_features);
+ u8 func_no, u8 vfunc_no);
+enum pci_barno
+pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
+enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
+ *epc_features, enum pci_barno bar);
struct pci_epc *pci_epc_get(const char *epc_name);
void pci_epc_put(struct pci_epc *epc);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 6644ff3b0702..adee6a1b35db 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* PCI Endpoint *Function* (EPF) header file
*
* Copyright (C) 2017 Texas Instruments
@@ -9,18 +9,17 @@
#ifndef __LINUX_PCI_EPF_H
#define __LINUX_PCI_EPF_H
+#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
struct pci_epf;
-
-enum pci_notify_event {
- CORE_INIT,
- LINK_UP,
-};
+struct pci_epc_features;
+enum pci_epc_interface_type;
enum pci_barno {
+ NO_BAR = -1,
BAR_0,
BAR_1,
BAR_2,
@@ -60,10 +59,27 @@ struct pci_epf_header {
* @bind: ops to perform when a EPC device has been bound to EPF device
* @unbind: ops to perform when a binding has been lost between a EPC device
* and EPF device
+ * @add_cfs: ops to initialize function specific configfs attributes
*/
struct pci_epf_ops {
int (*bind)(struct pci_epf *epf);
void (*unbind)(struct pci_epf *epf);
+ struct config_group *(*add_cfs)(struct pci_epf *epf,
+ struct config_group *group);
+};
+
+/**
+ * struct pci_epc_event_ops - Callbacks for capturing the EPC events
+ * @core_init: Callback for the EPC initialization complete event
+ * @link_up: Callback for the EPC link up event
+ * @link_down: Callback for the EPC link down event
+ * @bme: Callback for the EPC BME (Bus Master Enable) event
+ */
+struct pci_epc_event_ops {
+ int (*core_init)(struct pci_epf *epf);
+ int (*link_up)(struct pci_epf *epf);
+ int (*link_down)(struct pci_epf *epf);
+ int (*bme)(struct pci_epf *epf);
};
/**
@@ -78,11 +94,12 @@ struct pci_epf_ops {
* @id_table: identifies EPF devices for probing
*/
struct pci_epf_driver {
- int (*probe)(struct pci_epf *epf);
- int (*remove)(struct pci_epf *epf);
+ int (*probe)(struct pci_epf *epf,
+ const struct pci_epf_device_id *id);
+ void (*remove)(struct pci_epf *epf);
struct device_driver driver;
- struct pci_epf_ops *ops;
+ const struct pci_epf_ops *ops;
struct module *owner;
struct list_head epf_group;
const struct pci_epf_device_id *id_table;
@@ -96,6 +113,8 @@ struct pci_epf_driver {
* @phys_addr: physical address that should be mapped to the BAR
* @addr: virtual address corresponding to the @phys_addr
* @size: the size of the address space present in BAR
+ * @barno: BAR number
+ * @flags: flags that are set for the BAR
*/
struct pci_epf_bar {
dma_addr_t phys_addr;
@@ -112,12 +131,26 @@ struct pci_epf_bar {
* @header: represents standard configuration header
* @bar: represents the BAR of EPF device
* @msi_interrupts: number of MSI interrupts required by this function
- * @func_no: unique function number within this endpoint device
+ * @msix_interrupts: number of MSI-X interrupts required by this function
+ * @func_no: unique (physical) function number within this endpoint device
+ * @vfunc_no: unique virtual function number within a physical function
* @epc: the EPC device to which this EPF device is bound
+ * @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
+ * @id: Pointer to the EPF device ID
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
- * @nb: notifier block to notify EPF of any EPC events (like linkup)
* @lock: mutex to protect pci_epf_ops
+ * @sec_epc: the secondary EPC device to which this EPF device is bound
+ * @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary
+ * EPC device
+ * @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC
+ * @sec_epc_func_no: unique (physical) function number within the secondary EPC
+ * @group: configfs group associated with the EPF device
+ * @is_bound: indicates if bind notification to function driver has been invoked
+ * @is_vf: true - virtual function, false - physical function
+ * @vfunction_num_map: bitmap to manage virtual function number
+ * @pci_vepf: list of virtual endpoint functions associated with this function
+ * @event_ops: Callbacks for capturing the EPC events
*/
struct pci_epf {
struct device dev;
@@ -127,13 +160,27 @@ struct pci_epf {
u8 msi_interrupts;
u16 msix_interrupts;
u8 func_no;
+ u8 vfunc_no;
struct pci_epc *epc;
+ struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
+ const struct pci_epf_device_id *id;
struct list_head list;
- struct notifier_block nb;
/* mutex to protect against concurrent access of pci_epf_ops */
struct mutex lock;
+
+ /* Below members are to attach secondary EPC to an endpoint function */
+ struct pci_epc *sec_epc;
+ struct list_head sec_epc_list;
+ struct pci_epf_bar sec_epc_bar[6];
+ u8 sec_epc_func_no;
+ struct config_group *group;
+ unsigned int is_bound;
+ unsigned int is_vf;
+ unsigned long vfunction_num_map;
+ struct list_head pci_vepf;
+ const struct pci_epc_event_ops *event_ops;
};
/**
@@ -164,16 +211,18 @@ static inline void *epf_get_drvdata(struct pci_epf *epf)
return dev_get_drvdata(&epf->dev);
}
-const struct pci_epf_device_id *
-pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf);
struct pci_epf *pci_epf_create(const char *name);
void pci_epf_destroy(struct pci_epf *epf);
int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner);
void pci_epf_unregister_driver(struct pci_epf_driver *driver);
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align);
-void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type);
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+ enum pci_epc_interface_type type);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 8318a97c9c61..2c07aa6b7665 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -30,10 +30,6 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
unsigned int *nents, u32 length);
void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
-int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs);
-void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs);
int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
bool *use_p2pdma);
ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
@@ -83,17 +79,6 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
{
}
-static inline int pci_p2pdma_map_sg_attrs(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
static inline int pci_p2pdma_enable_store(const char *page,
struct pci_dev **p2p_dev, bool *use_p2pdma)
{
@@ -119,16 +104,4 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
return pci_p2pmem_find_many(&client, 1);
}
-static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0);
-}
-
-static inline void pci_p2pdma_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir)
-{
- pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0);
-}
-
#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 835530605c0d..16493426a04f 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -23,7 +23,7 @@
#ifndef LINUX_PCI_H
#define LINUX_PCI_H
-
+#include <linux/args.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h>
@@ -38,6 +38,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/resource_ext.h>
+#include <linux/msi_api.h>
#include <uapi/linux/pci.h>
#include <linux/pci_ids.h>
@@ -49,6 +50,12 @@
PCI_STATUS_SIG_TARGET_ABORT | \
PCI_STATUS_PARITY)
+/* Number of reset methods used in pci_reset_fn_methods array in pci.c */
+#define PCI_NUM_RESET_METHODS 7
+
+#define PCI_RESET_PROBE true
+#define PCI_RESET_DO_RESET false
+
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
@@ -149,6 +156,15 @@ enum pci_interrupt_pin {
#define PCI_NUM_INTX 4
/*
+ * Reading from a device that doesn't respond typically returns ~0. A
+ * successful read from a device may also return ~0, so you need additional
+ * information to reliably identify errors.
+ */
+#define PCI_ERROR_RESPONSE (~0ULL)
+#define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
+#define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
+
+/*
* pci_power_t values must match the bits in the Capabilities PME_Support
* and Control/Status PowerState fields in the Power Management capability.
*/
@@ -227,6 +243,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
/* Don't use Relaxed Ordering for TLPs directed at this device */
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
+ /* Device does honor MSI masking despite saying otherwise */
+ PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
};
enum pci_irq_reroute_variant {
@@ -281,29 +299,24 @@ enum pci_bus_speed {
PCIE_SPEED_8_0GT = 0x16,
PCIE_SPEED_16_0GT = 0x17,
PCIE_SPEED_32_0GT = 0x18,
+ PCIE_SPEED_64_0GT = 0x19,
PCI_SPEED_UNKNOWN = 0xff,
};
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-struct pci_cap_saved_data {
- u16 cap_nr;
- bool cap_extended;
- unsigned int size;
- u32 data[];
-};
-
-struct pci_cap_saved_state {
- struct hlist_node next;
- struct pci_cap_saved_data cap;
+struct pci_vpd {
+ struct mutex lock;
+ unsigned int len;
+ u8 cap;
};
struct irq_affinity;
struct pcie_link_state;
-struct pci_vpd;
struct pci_sriov;
struct pci_p2pdma;
+struct rcec_ea;
/* The pci_dev structure describes PCI devices */
struct pci_dev {
@@ -327,6 +340,11 @@ struct pci_dev {
u16 aer_cap; /* AER capability offset */
struct aer_stats *aer_stats; /* AER stats for this device */
#endif
+#ifdef CONFIG_PCIEPORTBUS
+ struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
+ struct pci_dev *rcec; /* Associated RCEC device */
+#endif
+ u32 devcap; /* PCIe Device Capabilities */
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
@@ -348,8 +366,8 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI,
this is D0-D3, D0 being fully
functional, and D3 being off. */
- unsigned int imm_ready:1; /* Supports Immediate Readiness */
u8 pm_cap; /* PM capability offset */
+ unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int pme_poll:1; /* Poll device's PME status bit */
@@ -362,10 +380,6 @@ struct pci_dev {
unsigned int mmio_always_on:1; /* Disallow turning off io/mem
decoding during BAR sizing */
unsigned int wakeup_prepared:1;
- unsigned int runtime_d3cold:1; /* Whether go through runtime
- D3cold, not set for devices
- powered on/off by the
- corresponding bridge */
unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
@@ -373,14 +387,16 @@ struct pci_dev {
user sysfs */
unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
bit manually */
- unsigned int d3_delay; /* D3->D0 transition time in ms */
+ unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
+ u16 l1ss; /* L1SS Capability pointer */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
unsigned int ltr_path:1; /* Latency Tolerance Reporting
supported from root to here */
#endif
+ unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
pci_channel_state_t error_state; /* Current connectivity state */
@@ -394,6 +410,7 @@ struct pci_dev {
*/
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+ struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
bool match_driver; /* Skip attaching driver */
@@ -415,12 +432,12 @@ struct pci_dev {
unsigned int ats_enabled:1; /* Address Translation Svc */
unsigned int pasid_enabled:1; /* Process Address Space ID */
unsigned int pri_enabled:1; /* Page Request Interface */
- unsigned int is_managed:1;
+ unsigned int is_managed:1; /* Managed via devres */
+ unsigned int is_msi_managed:1; /* MSI release via devres installed */
unsigned int needs_freset:1; /* Requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
unsigned int is_virtfn:1;
- unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
@@ -445,13 +462,15 @@ struct pci_dev {
unsigned int is_probed:1; /* Device probing in progress */
unsigned int link_active_reporting:1;/* Device capable of reporting link active */
unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
+ unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
+ unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
+ unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
+ spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
u32 saved_config_space[16]; /* Config space saved at suspend time */
struct hlist_head saved_cap_space;
- struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */
- int rom_attr_enabled; /* Display of ROM attribute enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
@@ -459,14 +478,16 @@ struct pci_dev {
unsigned int broken_cmd_compl:1; /* No compl for some cmds */
#endif
#ifdef CONFIG_PCIE_PTM
+ u16 ptm_cap; /* PTM Capability */
unsigned int ptm_root:1;
unsigned int ptm_enabled:1;
u8 ptm_granularity;
#endif
#ifdef CONFIG_PCI_MSI
- const struct attribute_group **msi_irq_groups;
+ void __iomem *msix_base;
+ raw_spinlock_t msi_lock;
#endif
- struct pci_vpd *vpd;
+ struct pci_vpd vpd;
#ifdef CONFIG_PCIE_DPC
u16 dpc_cap;
unsigned int dpc_rp_extensions:1;
@@ -490,14 +511,24 @@ struct pci_dev {
u16 pasid_features;
#endif
#ifdef CONFIG_PCI_P2PDMA
- struct pci_p2pdma *p2pdma;
+ struct pci_p2pdma __rcu *p2pdma;
+#endif
+#ifdef CONFIG_PCI_DOE
+ struct xarray doe_mbs; /* Data Object Exchange mailboxes */
#endif
u16 acs_cap; /* ACS Capability offset */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
unsigned long priv_flags; /* Private flags for the PCI driver */
+
+ /* These methods index pci_reset_fn_methods[] */
+ u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -519,29 +550,43 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
return (pdev->error_state != pci_channel_io_normal);
}
+/*
+ * Currently in ACPI spec, for each PCI host bridge, PCI Segment
+ * Group number is limited to a 16-bit value, therefore (int)-1 is
+ * not a valid PCI domain number, and can be used as a sentinel
+ * value indicating ->domain_nr is not set by the driver (and
+ * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
+ * pci_bus_find_domain_nr()).
+ */
+#define PCI_DOMAIN_NR_NOT_SET (-1)
+
struct pci_host_bridge {
struct device dev;
struct pci_bus *bus; /* Root bus */
struct pci_ops *ops;
+ struct pci_ops *child_ops;
void *sysdata;
int busnr;
+ int domain_nr;
struct list_head windows; /* resource_entry */
struct list_head dma_ranges; /* dma ranges resource list */
u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
int (*map_irq)(const struct pci_dev *, u8, u8);
void (*release_fn)(struct pci_host_bridge *);
void *release_data;
- struct msi_controller *msi;
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
+ unsigned int no_inc_mrrs:1; /* No Increase MRRS */
unsigned int native_aer:1; /* OS may use PCIe AER */
unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
unsigned int native_pme:1; /* OS may use PCIe PME */
unsigned int native_ltr:1; /* OS may use PCIe LTR */
unsigned int native_dpc:1; /* OS may use PCIe DPC */
+ unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
unsigned int preserve_config:1; /* Preserve FW resource setup */
unsigned int size_windows:1; /* Enable root bus sizing */
+ unsigned int msi_domain:1; /* Bridge wants MSI domain */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
@@ -612,7 +657,6 @@ struct pci_bus {
struct resource busn_res; /* Bus numbers routed to this bus */
struct pci_ops *ops; /* Configuration access functions */
- struct msi_controller *msi; /* MSI controller */
void *sysdata; /* Hook for sys-specific extension */
struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
@@ -633,6 +677,7 @@ struct pci_bus {
struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
struct bin_attribute *legacy_mem; /* Legacy mem */
unsigned int is_added:1;
+ unsigned int unsafe_warn:1; /* warned about RW1C config write */
};
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
@@ -668,6 +713,31 @@ static inline bool pci_is_bridge(struct pci_dev *dev)
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
}
+/**
+ * pci_is_vga - check if the PCI device is a VGA device
+ * @pdev: PCI device
+ *
+ * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
+ * VGA Base Class and Sub-Classes:
+ *
+ * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible
+ * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code)
+ *
+ * Return true if the PCI device is a VGA device and uses the legacy VGA
+ * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
+ * aliases).
+ */
+static inline bool pci_is_vga(struct pci_dev *pdev)
+{
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ return true;
+
+ if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
+ return true;
+
+ return false;
+}
+
#define for_each_pci_bridge(dev, bus) \
list_for_each_entry(dev, &bus->devices, bus_list) \
if (!pci_is_bridge(dev)) {} else
@@ -806,6 +876,9 @@ struct pci_error_handlers {
/* Device driver may resume normal operations */
void (*resume)(struct pci_dev *dev);
+
+ /* Allow device driver to record more details of a correctable error */
+ void (*cor_error_detected)(struct pci_dev *dev);
};
@@ -813,7 +886,6 @@ struct module;
/**
* struct pci_driver - PCI driver structure
- * @node: List of driver structures.
* @name: Driver name.
* @id_table: Pointer to table of device IDs the driver is
* interested in. Most drivers should export this
@@ -847,13 +919,27 @@ struct module;
* e.g. drivers/net/e100.c.
* @sriov_configure: Optional driver callback to allow configuration of
* number of VFs to enable via sysfs "sriov_numvfs" file.
+ * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
+ * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
+ * This will change MSI-X Table Size in the VF Message Control
+ * registers.
+ * @sriov_get_vf_total_msix: PF driver callback to get the total number of
+ * MSI-X vectors available for distribution to the VFs.
* @err_handler: See Documentation/PCI/pci-error-recovery.rst
* @groups: Sysfs attribute groups.
+ * @dev_groups: Attributes attached to the device that will be
+ * created once it is bound to the driver.
* @driver: Driver model structure.
* @dynids: List of dynamically added device IDs.
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
*/
struct pci_driver {
- struct list_head node;
const char *name;
const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
@@ -862,13 +948,20 @@ struct pci_driver {
int (*resume)(struct pci_dev *dev); /* Device woken up */
void (*shutdown)(struct pci_dev *dev);
int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
+ int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
+ u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
const struct pci_error_handlers *err_handler;
const struct attribute_group **groups;
+ const struct attribute_group **dev_groups;
struct device_driver driver;
struct pci_dynids dynids;
+ bool driver_managed_dma;
};
-#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
+static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
+{
+ return drv ? container_of(drv, struct pci_driver, driver) : NULL;
+}
/**
* PCI_DEVICE - macro used to describe a specific PCI device
@@ -884,6 +977,35 @@ struct pci_driver {
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
/**
+ * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
+ * override_only flags.
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ * @driver_override: the 32 bit PCI Device override_only
+ *
+ * This macro is used to create a struct pci_device_id that matches only a
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID.
+ */
+#define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
+ .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, .override_only = (driver_override)
+
+/**
+ * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
+ * "driver_override" PCI device.
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID and the driver_override will be set to
+ * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
+ */
+#define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
+ PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
+
+/**
* PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
@@ -950,11 +1072,13 @@ enum {
PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
};
-#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
+#define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
+#define PCI_IRQ_LEGACY PCI_IRQ_INTX /* Deprecated! Use PCI_IRQ_INTX */
+
/* These external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
@@ -1047,6 +1171,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
struct pci_dev *pci_dev_get(struct pci_dev *dev);
void pci_dev_put(struct pci_dev *dev);
+DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
void pci_remove_bus(struct pci_bus *b);
void pci_stop_and_remove_bus_device(struct pci_dev *dev);
void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
@@ -1060,13 +1185,16 @@ void pci_sort_breadthfirst(void);
/* Generic PCI functions exported to card drivers */
-int pci_find_capability(struct pci_dev *dev, int cap);
-int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
-int pci_find_ext_capability(struct pci_dev *dev, int cap);
-int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
-int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
-int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
+u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
+u8 pci_find_capability(struct pci_dev *dev, int cap);
+u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
+u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
+u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
+u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
+u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
+u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
u64 pci_get_dsn(struct pci_dev *dev);
@@ -1079,6 +1207,8 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
unsigned int devfn);
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
+struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
+
int pci_dev_present(const struct pci_device_id *ids);
int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
@@ -1111,16 +1241,47 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
+void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
+ u32 clear, u32 set);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
-int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
- u16 clear, u16 set);
+int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
+int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
u32 clear, u32 set);
+/**
+ * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
+ * @dev: PCI device structure of the PCI Express device
+ * @pos: PCI Express Capability Register
+ * @clear: Clear bitmask
+ * @set: Set bitmask
+ *
+ * Perform a Read-Modify-Write (RMW) operation using @clear and @set
+ * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
+ * Capability Registers are accessed concurrently in RMW fashion, hence
+ * require locking which is handled transparently to the caller.
+ */
+static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
+ int pos,
+ u16 clear, u16 set)
+{
+ switch (pos) {
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_RTCTL:
+ return pcie_capability_clear_and_set_word_locked(dev, pos,
+ clear, set);
+ default:
+ return pcie_capability_clear_and_set_word_unlocked(dev, pos,
+ clear, set);
+ }
+}
+
static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
u16 set)
{
@@ -1187,11 +1348,11 @@ void pci_clear_master(struct pci_dev *dev);
int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
int pci_set_cacheline_size(struct pci_dev *dev);
-#define HAVE_PCI_SET_MWI
int __must_check pci_set_mwi(struct pci_dev *dev);
int __must_check pcim_set_mwi(struct pci_dev *dev);
int pci_try_set_mwi(struct pci_dev *dev);
void pci_clear_mwi(struct pci_dev *dev);
+void pci_disable_parity(struct pci_dev *dev);
void pci_intx(struct pci_dev *dev, int enable);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
@@ -1207,8 +1368,9 @@ int pcie_set_mps(struct pci_dev *dev, int mps);
u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
+int pcie_link_speed_mbps(struct pci_dev *pdev);
void pcie_print_link_status(struct pci_dev *dev);
-bool pcie_has_flr(struct pci_dev *dev);
+int pcie_reset_flr(struct pci_dev *dev, bool probe);
int pcie_flr(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
@@ -1223,6 +1385,15 @@ void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
void pci_release_resource(struct pci_dev *dev, int resno);
+static inline int pci_rebar_bytes_to_size(u64 bytes)
+{
+ bytes = roundup_pow_of_two(bytes);
+
+ /* Return BAR size as defined in the resizable BAR specification */
+ return max(ilog2(bytes), 20) - 20;
+}
+
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
@@ -1249,14 +1420,9 @@ int pci_load_saved_state(struct pci_dev *dev,
struct pci_saved_state *state);
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
-struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
-struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
- u16 cap);
-int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
-int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
- u16 cap, unsigned int size);
int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
@@ -1268,7 +1434,7 @@ bool pci_dev_run_wake(struct pci_dev *dev);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
-void pci_wakeup_bus(struct pci_bus *bus);
+void pci_resume_bus(struct pci_bus *bus);
void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
/* For use by arch with custom probe code */
@@ -1276,7 +1442,6 @@ void set_pcie_port_type(struct pci_dev *pdev);
void set_pcie_hotplug_bridge(struct pci_dev *pdev);
/* Functions for PCI Hotplug drivers to use */
-int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
unsigned int pci_rescan_bus(struct pci_bus *bus);
void pci_lock_rescan_remove(void);
@@ -1285,7 +1450,8 @@ void pci_unlock_rescan_remove(void);
/* Vital Product Data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
-int pci_set_vpd_size(struct pci_dev *dev, size_t len);
+ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
@@ -1299,7 +1465,6 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
-void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_assign_irq(struct pci_dev *dev);
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
@@ -1313,6 +1478,21 @@ int pci_request_selected_regions(struct pci_dev *, int, const char *);
int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
void pci_release_selected_regions(struct pci_dev *, int);
+static inline __must_check struct resource *
+pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
+ unsigned int len, const char *name)
+{
+ return __request_region(&pdev->driver_exclusive_resource, offset, len,
+ name, IORESOURCE_EXCLUSIVE);
+}
+
+static inline void pci_release_config_region(struct pci_dev *pdev,
+ unsigned int offset,
+ unsigned int len)
+{
+ __release_region(&pdev->driver_exclusive_resource, offset, len);
+}
+
/* drivers/pci/bus.c */
void pci_add_resource(struct list_head *resources, struct resource *res);
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
@@ -1322,16 +1502,51 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
unsigned int flags);
struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
void pci_bus_remove_resources(struct pci_bus *bus);
+void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
int devm_request_pci_bus_resources(struct device *dev,
struct list_head *resources);
/* Temporary until new and working PCI SBR API in place */
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
-#define pci_bus_for_each_resource(bus, res, i) \
- for (i = 0; \
- (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
- i++)
+#define __pci_bus_for_each_res0(bus, res, ...) \
+ for (unsigned int __b = 0; \
+ (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
+ __b++)
+
+#define __pci_bus_for_each_res1(bus, res, __b) \
+ for (__b = 0; \
+ (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
+ __b++)
+
+/**
+ * pci_bus_for_each_resource - iterate over PCI bus resources
+ * @bus: the PCI bus
+ * @res: pointer to the current resource
+ * @...: optional index of the current resource
+ *
+ * Iterate over PCI bus resources. The first part is to go over PCI bus
+ * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
+ * After that continue with the separate list of the additional resources,
+ * if not empty. That's why the Logical OR is being used.
+ *
+ * Possible usage:
+ *
+ * struct pci_bus *bus = ...;
+ * struct resource *res;
+ * unsigned int i;
+ *
+ * // With optional index
+ * pci_bus_for_each_resource(bus, res, i)
+ * pr_info("PCI bus resource[%u]: %pR\n", i, res);
+ *
+ * // Without index
+ * pci_bus_for_each_resource(bus, res)
+ * _do_something_(res);
+ */
+#define pci_bus_for_each_resource(bus, res, ...) \
+ CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
+ (bus, res, __VA_ARGS__)
int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
struct resource *res, resource_size_t size,
@@ -1411,6 +1626,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata);
int pci_cfg_space_size(struct pci_dev *dev);
unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
@@ -1434,24 +1651,15 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
#define PCI_IRQ_ALL_TYPES \
(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
-/* kmem_cache style wrapper around pci_alloc_consistent() */
-
#include <linux/dmapool.h>
-#define pci_pool dma_pool
-#define pci_pool_create(name, pdev, size, align, allocation) \
- dma_pool_create(name, &pdev->dev, size, align, allocation)
-#define pci_pool_destroy(pool) dma_pool_destroy(pool)
-#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
-#define pci_pool_zalloc(pool, flags, handle) \
- dma_pool_zalloc(pool, flags, handle)
-#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-
struct msix_entry {
u32 vector; /* Kernel uses to write allocated vector */
u16 entry; /* Driver uses to specify entry, OS writes */
};
+struct msi_domain_template;
+
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
void pci_disable_msi(struct pci_dev *dev);
@@ -1470,13 +1678,25 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
return rc;
return 0;
}
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags);
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
struct irq_affinity *affd);
+bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
+struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
+ const struct irq_affinity_desc *affdesc);
+void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
+
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
+bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
+ unsigned int hwsize, void *data);
+struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
+ const struct irq_affinity_desc *affdesc);
+void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
@@ -1503,6 +1723,27 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
return 1;
return -ENOSPC;
}
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
+ flags, NULL);
+}
+
+static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
+{ return false; }
+static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
+ const struct irq_affinity_desc *affdesc)
+{
+ struct msi_map map = { .index = -ENOSYS, };
+
+ return map;
+}
+
+static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
+{
+}
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
@@ -1519,6 +1760,25 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
{
return cpu_possible_mask;
}
+
+static inline bool pci_create_ims_domain(struct pci_dev *pdev,
+ const struct msi_domain_template *template,
+ unsigned int hwsize, void *data)
+{ return false; }
+
+static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev,
+ union msi_instance_cookie *icookie,
+ const struct irq_affinity_desc *affdesc)
+{
+ struct msi_map map = { .index = -ENOSYS, };
+
+ return map;
+}
+
+static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map)
+{
+}
+
#endif
/**
@@ -1568,10 +1828,16 @@ extern bool pcie_ports_native;
#define PCIE_LINK_STATE_L1_2 BIT(4)
#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5)
#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6)
+#define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\
+ PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\
+ PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
#ifdef CONFIG_PCIEASPM
int pci_disable_link_state(struct pci_dev *pdev, int state);
int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
+int pci_enable_link_state(struct pci_dev *pdev, int state);
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
void pcie_no_aspm(void);
bool pcie_aspm_support_enabled(void);
bool pcie_aspm_enabled(struct pci_dev *pdev);
@@ -1580,6 +1846,10 @@ static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
{ return 0; }
static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{ return 0; }
+static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
+{ return 0; }
+static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{ return 0; }
static inline void pcie_no_aspm(void) { }
static inline bool pcie_aspm_support_enabled(void) { return false; }
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
@@ -1593,10 +1863,27 @@ static inline bool pci_aer_available(void) { return false; }
bool pci_ats_disabled(void);
+#ifdef CONFIG_PCIE_PTM
+int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
+void pci_disable_ptm(struct pci_dev *dev);
+bool pcie_ptm_enabled(struct pci_dev *dev);
+#else
+static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+{ return -EINVAL; }
+static inline void pci_disable_ptm(struct pci_dev *dev) { }
+static inline bool pcie_ptm_enabled(struct pci_dev *dev)
+{ return false; }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
+void pci_dev_lock(struct pci_dev *dev);
+int pci_dev_trylock(struct pci_dev *dev);
+void pci_dev_unlock(struct pci_dev *dev);
+DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
* a PCI domain is defined to be a set of PCI buses which share
@@ -1627,6 +1914,7 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
{ return 0; }
#endif
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
+void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent);
#endif
/* Some architectures require additional setup to direct VGA traffic */
@@ -1700,23 +1988,31 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
struct pci_dev *from)
{ return NULL; }
-#define pci_dev_present(ids) (0)
+static inline struct pci_dev *pci_get_base_class(unsigned int class,
+ struct pci_dev *from)
+{ return NULL; }
+
+static inline int pci_dev_present(const struct pci_device_id *ids)
+{ return 0; }
+
#define no_pci_devices() (1)
#define pci_dev_put(dev) do { } while (0)
static inline void pci_set_master(struct pci_dev *dev) { }
+static inline void pci_clear_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
-static inline int __pci_register_driver(struct pci_driver *drv,
- struct module *owner)
+static inline int __must_check __pci_register_driver(struct pci_driver *drv,
+ struct module *owner,
+ const char *mod_name)
{ return 0; }
static inline int pci_register_driver(struct pci_driver *drv)
{ return 0; }
static inline void pci_unregister_driver(struct pci_driver *drv) { }
-static inline int pci_find_capability(struct pci_dev *dev, int cap)
+static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
{ return 0; }
static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
int cap)
@@ -1732,6 +2028,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; }
static inline void pci_restore_state(struct pci_dev *dev) { }
static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{ return 0; }
+static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{ return 0; }
static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{ return 0; }
static inline pci_power_t pci_choose_state(struct pci_dev *dev,
@@ -1748,6 +2046,10 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
+static inline int pci_register_io_range(struct fwnode_handle *fwnode,
+ phys_addr_t addr, resource_size_t size)
+{ return -EINVAL; }
+
static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
@@ -1791,38 +2093,26 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
{
return -ENOSPC;
}
-#endif /* CONFIG_PCI */
-
static inline int
pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
- return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
- NULL);
+ return -ENOSPC;
}
+#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
#include <asm/pci.h>
-/* These two functions provide almost identical functionality. Depending
- * on the architecture, one will be implemented as a wrapper around the
- * other (in drivers/pci/mmap.c).
- *
+/*
* pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
* is expected to be an offset within that region.
*
- * pci_mmap_page_range() is the legacy architecture-specific interface,
- * which accepts a "user visible" resource address converted by
- * pci_resource_to_user(), as used in the legacy mmap() interface in
- * /proc/bus/pci/.
*/
int pci_mmap_resource_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
#ifndef arch_can_pci_mmap_wc
#define arch_can_pci_mmap_wc() 0
@@ -1843,16 +2133,27 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
* These helpers provide future and backwards compatibility
* for accessing popular PCI BAR info
*/
-#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
-#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
-#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
-#define pci_resource_len(dev,bar) \
- ((pci_resource_start((dev), (bar)) == 0 && \
- pci_resource_end((dev), (bar)) == \
- pci_resource_start((dev), (bar))) ? 0 : \
- \
- (pci_resource_end((dev), (bar)) - \
- pci_resource_start((dev), (bar)) + 1))
+#define pci_resource_n(dev, bar) (&(dev)->resource[(bar)])
+#define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start)
+#define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end)
+#define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags)
+#define pci_resource_len(dev,bar) \
+ (pci_resource_end((dev), (bar)) ? \
+ resource_size(pci_resource_n((dev), (bar))) : 0)
+
+#define __pci_dev_for_each_res0(dev, res, ...) \
+ for (unsigned int __b = 0; \
+ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
+ __b++)
+
+#define __pci_dev_for_each_res1(dev, res, __b) \
+ for (__b = 0; \
+ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
+ __b++)
+
+#define pci_dev_for_each_resource(dev, res, ...) \
+ CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
+ (dev, res, __VA_ARGS__)
/*
* Similar to the helpers above, these manipulate per-pci_dev
@@ -1909,7 +2210,7 @@ enum pci_fixup_pass {
};
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+#define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
class_shift, hook) \
__ADDRESSABLE(hook) \
asm(".section " #sec ", \"a\" \n" \
@@ -1918,10 +2219,33 @@ enum pci_fixup_pass {
".long " #class ", " #class_shift " \n" \
".long " #hook " - . \n" \
".previous \n");
+
+/*
+ * Clang's LTO may rename static functions in C, but has no way to
+ * handle such renamings when referenced from inline asm. To work
+ * around this, create global C stubs for these cases.
+ */
+#ifdef CONFIG_LTO_CLANG
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook, stub) \
+ void stub(struct pci_dev *dev); \
+ void stub(struct pci_dev *dev) \
+ { \
+ hook(dev); \
+ } \
+ ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, stub)
+#else
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook, stub) \
+ ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook)
+#endif
+
#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
class_shift, hook) \
__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
- class_shift, hook)
+ class_shift, hook, __UNIQUE_ID(hook))
#else
/* Anonymous variables would be nice... */
#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
@@ -2023,7 +2347,7 @@ void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
-int pcibios_add_device(struct pci_dev *dev);
+int pcibios_device_add(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
#ifdef CONFIG_PCI
void pcibios_penalize_isa_irq(int irq, int active);
@@ -2034,8 +2358,9 @@ int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-extern struct dev_pm_ops pcibios_pm_ops;
+#if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
+extern int pci_create_resource_files(struct pci_dev *dev);
+extern void pci_remove_resource_files(struct pci_dev *dev);
#endif
#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
@@ -2054,7 +2379,8 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
-
+int pci_iov_vf_id(struct pci_dev *dev);
+void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
@@ -2082,6 +2408,18 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
{
return -ENOSYS;
}
+
+static inline int pci_iov_vf_id(struct pci_dev *dev)
+{
+ return -ENOSYS;
+}
+
+static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
+ struct pci_driver *pf_driver)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
@@ -2179,6 +2517,11 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
return NULL;
}
+static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+{
+ return dev->error_state == pci_channel_io_perm_failure;
+}
+
void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
@@ -2197,20 +2540,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
-/* Small Resource Data Type Tag Item Names */
-#define PCI_VPD_STIN_END 0x0f /* End */
-
-#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
-
-#define PCI_VPD_SRDT_TIN_MASK 0x78
-#define PCI_VPD_SRDT_LEN_MASK 0x07
-#define PCI_VPD_LRDT_TIN_MASK 0x7f
-
-#define PCI_VPD_LRDT_TAG_SIZE 3
-#define PCI_VPD_SRDT_TAG_SIZE 1
-
-#define PCI_VPD_INFO_FLD_HDR_SIZE 3
-
#define PCI_VPD_RO_KEYWORD_PARTNO "PN"
#define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
#define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
@@ -2218,90 +2547,52 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
/**
- * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type length.
- */
-static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
-{
- return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
-}
-
-/**
- * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type Tag item.
- */
-static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
-{
- return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
-}
-
-/**
- * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
- * @srdt: Pointer to the beginning of the Small Resource Data Type tag
- *
- * Returns the extracted Small Resource Data Type length.
- */
-static inline u8 pci_vpd_srdt_size(const u8 *srdt)
-{
- return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
-}
-
-/**
- * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
- * @srdt: Pointer to the beginning of the Small Resource Data Type tag
+ * pci_vpd_alloc - Allocate buffer and read VPD into it
+ * @dev: PCI device
+ * @size: pointer to field where VPD length is returned
*
- * Returns the extracted Small Resource Data Type Tag Item.
+ * Returns pointer to allocated buffer or an ERR_PTR in case of failure
*/
-static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
-{
- return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
-}
+void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
/**
- * pci_vpd_info_field_size - Extracts the information field length
- * @info_field: Pointer to the beginning of an information field header
+ * pci_vpd_find_id_string - Locate id string in VPD
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @size: Pointer to field where length of id string is returned
*
- * Returns the extracted information field length.
+ * Returns the index of the id string or -ENOENT if not found.
*/
-static inline u8 pci_vpd_info_field_size(const u8 *info_field)
-{
- return info_field[2];
-}
+int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
/**
- * pci_vpd_find_tag - Locates the Resource Data Type tag provided
- * @buf: Pointer to buffered vpd data
- * @off: The offset into the buffer at which to begin the search
- * @len: The length of the vpd buffer
- * @rdt: The Resource Data Type to search for
+ * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @kw: The keyword to search for
+ * @size: Pointer to field where length of found keyword data is returned
*
- * Returns the index where the Resource Data Type was found or
- * -ENOENT otherwise.
+ * Returns the index of the information field keyword data or -ENOENT if
+ * not found.
*/
-int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
+int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
+ const char *kw, unsigned int *size);
/**
- * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
- * @buf: Pointer to buffered vpd data
- * @off: The offset into the buffer at which to begin the search
- * @len: The length of the buffer area, relative to off, in which to search
- * @kw: The keyword to search for
+ * pci_vpd_check_csum - Check VPD checksum
+ * @buf: Pointer to buffered VPD data
+ * @len: VPD size
*
- * Returns the index where the information field keyword was found or
- * -ENOENT otherwise.
+ * Returns 1 if VPD has no checksum, otherwise 0 or an errno
*/
-int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
- unsigned int len, const char *kw);
+int pci_vpd_check_csum(const void *buf, unsigned int len);
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+bool pci_host_of_has_msi_map(struct device *dev);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2309,6 +2600,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
#endif /* CONFIG_OF */
static inline struct device_node *
@@ -2398,8 +2690,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
-/* Provide the legacy pci_dma_* API */
-#include <linux/pci-dma-compat.h>
+#include <linux/dma-mapping.h>
#define pci_printk(level, pdev, fmt, arg...) \
dev_printk(level, &(pdev)->dev, fmt, ##arg)
@@ -2409,6 +2700,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
+#define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index b482e42d7153..3a10d6ec3ee7 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -44,12 +44,14 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
- int (*reset_slot) (struct hotplug_slot *slot, int probe);
+ int (*reset_slot) (struct hotplug_slot *slot, bool probe);
};
/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
+ * @slot_list: internal list used to track hotplug PCI slots
+ * @pci_slot: represents a physical slot
* @owner: The module owner of this structure
* @mod_name: The module name (KBUILD_MODNAME) of this structure
*/
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1ab1e24bcbce..a0c75e467df3 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2,7 +2,7 @@
/*
* PCI Class, Vendor and Device IDs
*
- * Please keep sorted.
+ * Please keep sorted by numeric Vendor ID and Device ID.
*
* Do not add new entries to this file unless the definitions
* are shared between multiple drivers.
@@ -51,6 +51,7 @@
#define PCI_BASE_CLASS_MEMORY 0x05
#define PCI_CLASS_MEMORY_RAM 0x0500
#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_CXL 0x0502
#define PCI_CLASS_MEMORY_OTHER 0x0580
#define PCI_BASE_CLASS_BRIDGE 0x06
@@ -59,6 +60,8 @@
#define PCI_CLASS_BRIDGE_EISA 0x0602
#define PCI_CLASS_BRIDGE_MC 0x0603
#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCI_NORMAL 0x060400
+#define PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE 0x060401
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
#define PCI_CLASS_BRIDGE_NUBUS 0x0606
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
@@ -72,6 +75,9 @@
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+/* Interface for SERIAL/MODEM */
+#define PCI_SERIAL_16550_COMPATIBLE 0x02
+
#define PCI_BASE_CLASS_SYSTEM 0x08
#define PCI_CLASS_SYSTEM_PIC 0x0800
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
@@ -81,6 +87,7 @@
#define PCI_CLASS_SYSTEM_RTC 0x0803
#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
#define PCI_CLASS_SYSTEM_SDHCI 0x0805
+#define PCI_CLASS_SYSTEM_RCEC 0x0807
#define PCI_CLASS_SYSTEM_OTHER 0x0880
#define PCI_BASE_CLASS_INPUT 0x09
@@ -144,12 +151,21 @@
#define PCI_CLASS_SP_DPIO 0x1100
#define PCI_CLASS_SP_OTHER 0x1180
+#define PCI_BASE_CLASS_ACCELERATOR 0x12
+#define PCI_CLASS_ACCELERATOR_PROCESSING 0x1200
+
#define PCI_CLASS_OTHERS 0xff
/* Vendors and devices. Sort key: vendor first, device next. */
+#define PCI_VENDOR_ID_PCI_SIG 0x0001
#define PCI_VENDOR_ID_LOONGSON 0x0014
+#define PCI_DEVICE_ID_LOONGSON_HDA 0x7a07
+#define PCI_DEVICE_ID_LOONGSON_HDMI 0x7a37
+
+#define PCI_VENDOR_ID_SOLIDIGM 0x025e
+
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_DEVICE_ID_TTTECH_MC322 0x000a
@@ -164,6 +180,8 @@
#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
+#define PCI_VENDOR_ID_ITTIM 0x0b48
+
#define PCI_VENDOR_ID_COMPAQ 0x0e11
#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
@@ -552,7 +570,19 @@
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3 0x1727
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
+#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F3 0x14b0
+#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
+#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
+#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
+#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
+#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
+#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
+#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
+#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -629,6 +659,8 @@
#define PCI_DEVICE_ID_DELL_RAC4 0x0012
#define PCI_DEVICE_ID_DELL_PERC5 0x0015
+#define PCI_SUBVENDOR_ID_DELL 0x1028
+
#define PCI_VENDOR_ID_MATROX 0x102B
#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
#define PCI_DEVICE_ID_MATROX_MIL 0x0519
@@ -880,6 +912,7 @@
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
+#define PCI_DEVICE_ID_TI_J721E 0xb00d
#define PCI_DEVICE_ID_TI_DRA74x 0xb500
#define PCI_DEVICE_ID_TI_DRA72x 0xb501
@@ -1115,6 +1148,7 @@
#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a
#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1489 0x1489
#define PCI_DEVICE_ID_AL_M1533 0x1533
#define PCI_DEVICE_ID_AL_M1535 0x1535
#define PCI_DEVICE_ID_AL_M1541 0x1541
@@ -1685,37 +1719,8 @@
#define PCI_VENDOR_ID_MICROSEMI 0x11f8
#define PCI_VENDOR_ID_RP 0x11fe
-#define PCI_DEVICE_ID_RP32INTF 0x0001
-#define PCI_DEVICE_ID_RP8INTF 0x0002
-#define PCI_DEVICE_ID_RP16INTF 0x0003
-#define PCI_DEVICE_ID_RP4QUAD 0x0004
-#define PCI_DEVICE_ID_RP8OCTA 0x0005
-#define PCI_DEVICE_ID_RP8J 0x0006
-#define PCI_DEVICE_ID_RP4J 0x0007
-#define PCI_DEVICE_ID_RP8SNI 0x0008
-#define PCI_DEVICE_ID_RP16SNI 0x0009
-#define PCI_DEVICE_ID_RPP4 0x000A
-#define PCI_DEVICE_ID_RPP8 0x000B
-#define PCI_DEVICE_ID_RP4M 0x000D
-#define PCI_DEVICE_ID_RP2_232 0x000E
-#define PCI_DEVICE_ID_RP2_422 0x000F
-#define PCI_DEVICE_ID_URP32INTF 0x0801
-#define PCI_DEVICE_ID_URP8INTF 0x0802
-#define PCI_DEVICE_ID_URP16INTF 0x0803
-#define PCI_DEVICE_ID_URP8OCTA 0x0805
-#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C
-#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D
-#define PCI_DEVICE_ID_CRP16INTF 0x0903
#define PCI_VENDOR_ID_CYCLADES 0x120e
-#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
-#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
-#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102
-#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103
-#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104
-#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105
-#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
-#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
#define PCI_DEVICE_ID_PC300_RX_2 0x0300
#define PCI_DEVICE_ID_PC300_RX_1 0x0301
#define PCI_DEVICE_ID_PC300_TE_2 0x0310
@@ -1761,6 +1766,10 @@
#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701
#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_AX99100 0x9100
+#define PCI_DEVICE_ID_ASIX_AX99100_LB 0x9110
+
#define PCI_VENDOR_ID_ESS 0x125d
#define PCI_DEVICE_ID_ESS_ESS1968 0x1968
#define PCI_DEVICE_ID_ESS_ESS1978 0x1978
@@ -1985,24 +1994,6 @@
#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
#define PCI_VENDOR_ID_MOXA 0x1393
-#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
-#define PCI_DEVICE_ID_MOXA_CP102 0x1020
-#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
-#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
-#define PCI_DEVICE_ID_MOXA_C104 0x1040
-#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
-#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
-#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043
-#define PCI_DEVICE_ID_MOXA_CT114 0x1140
-#define PCI_DEVICE_ID_MOXA_CP114 0x1141
-#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
-#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181
-#define PCI_DEVICE_ID_MOXA_CP132 0x1320
-#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
-#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
-#define PCI_DEVICE_ID_MOXA_C168 0x1680
-#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
-#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682
#define PCI_DEVICE_ID_MOXA_CP204J 0x2040
#define PCI_DEVICE_ID_MOXA_C218 0x2180
#define PCI_DEVICE_ID_MOXA_C320 0x3200
@@ -2062,8 +2053,6 @@
#define PCI_DEVICE_ID_EXAR_XR17V358 0x0358
#define PCI_VENDOR_ID_MICROGATE 0x13c0
-#define PCI_DEVICE_ID_MICROGATE_USC 0x0010
-#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030
#define PCI_VENDOR_ID_3WARE 0x13C1
#define PCI_DEVICE_ID_3WARE_1000 0x1000
@@ -2113,6 +2102,9 @@
#define PCI_DEVICE_ID_ICE_1712 0x1712
#define PCI_DEVICE_ID_VT1724 0x1724
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
+
#define PCI_VENDOR_ID_OXSEMI 0x1415
#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
@@ -2476,7 +2468,8 @@
#define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
-#define PCI_VENDOR_ID_FREESCALE 0x1957
+#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */
+#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */
#define PCI_DEVICE_ID_MPC8308 0xc006
#define PCI_DEVICE_ID_MPC8315E 0x00b4
#define PCI_DEVICE_ID_MPC8315 0x00b5
@@ -2568,11 +2561,16 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
#define PCI_VENDOR_ID_HUAWEI 0x19e5
+#define PCI_DEVICE_ID_HUAWEI_ZIP_VF 0xa251
+#define PCI_DEVICE_ID_HUAWEI_SEC_VF 0xa256
+#define PCI_DEVICE_ID_HUAWEI_HPRE_VF 0xa259
#define PCI_VENDOR_ID_NETRONOME 0x19ee
+#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
+#define PCI_DEVICE_ID_NETRONOME_NFP3800_VF 0x3803
#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
#define PCI_VENDOR_ID_QMI 0x1a32
@@ -2587,6 +2585,8 @@
#define PCI_VENDOR_ID_REDHAT 0x1b36
+#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
+
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
@@ -2598,11 +2598,15 @@
#define PCI_VENDOR_ID_HYGON 0x1d94
+#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
+
#define PCI_VENDOR_ID_HXT 0x1dbf
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+#define PCI_VENDOR_ID_ALIBABA 0x1ded
+
#define PCI_VENDOR_ID_TEHUTI 0x1fc9
#define PCI_DEVICE_ID_TEHUTI_3009 0x3009
#define PCI_DEVICE_ID_TEHUTI_3010 0x3010
@@ -2652,11 +2656,12 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
+#define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
-#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
-#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
+#define PCI_DEVICE_ID_INTEL_PXH_1 0x032a
+#define PCI_DEVICE_ID_INTEL_PXHV 0x032c
#define PCI_DEVICE_ID_INTEL_80332_0 0x0330
#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
@@ -2666,26 +2671,33 @@
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82425 0x0486
+#define PCI_DEVICE_ID_INTEL_HDA_CML_H 0x06c8
#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
+#define PCI_DEVICE_ID_INTEL_HDA_OAKTRAIL 0x080a
#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820
#define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821
#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
-#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
-#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
+#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084f
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095e
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_0 0x0a0c
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_2 0x0c0c
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_3 0x0d0c
+#define PCI_DEVICE_ID_INTEL_HDA_BYT 0x0f04
+#define PCI_DEVICE_ID_INTEL_SST_BYT 0x0f28
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
-#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
+#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108f
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
+#define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
-#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
-#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#define PCI_DEVICE_ID_INTEL_82437 0x122d
#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230
@@ -2711,20 +2723,26 @@
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
+#define PCI_DEVICE_ID_INTEL_HDA_BDW 0x160c
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
+#define PCI_DEVICE_ID_INTEL_HDA_CPT 0x1c20
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
+#define PCI_DEVICE_ID_INTEL_HDA_PBG 0x1d20
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
+#define PCI_DEVICE_ID_INTEL_HDA_PPT 0x1e20
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d
+#define PCI_DEVICE_ID_INTEL_HDA_BSW 0x2284
+#define PCI_DEVICE_ID_INTEL_SST_BSW 0x22a8
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
@@ -2774,17 +2792,13 @@
#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db
#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc
#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd
-#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
-#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
-#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
-#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
-#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
-#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
+#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
+#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
@@ -2794,30 +2808,39 @@
#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
-#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0
-#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5
-#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6
-#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
-#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
-#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
-#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
-#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
+#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
+#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
+#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
+#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
+#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
+#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25f0
+#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25f5
+#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25f6
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
+#define PCI_DEVICE_ID_INTEL_HDA_ICH6 0x2668
#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a
#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d
#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e
#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f
#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
+#define PCI_DEVICE_ID_INTEL_HDA_ESB2 0x269a
#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
+#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
+#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
+#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27a0
+#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27a2
+#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
-#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc
#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd
+#define PCI_DEVICE_ID_INTEL_HDA_ICH7 0x27d8
#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da
#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd
#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de
@@ -2828,17 +2851,20 @@
#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
+#define PCI_DEVICE_ID_INTEL_HDA_ICH8 0x284b
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0
#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
-#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913
#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
-#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
-#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
+#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
+#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
+#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
+#define PCI_DEVICE_ID_INTEL_HDA_ICH9_0 0x293e
+#define PCI_DEVICE_ID_INTEL_HDA_ICH9_1 0x293f
#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18
#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19
#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a
@@ -2855,8 +2881,8 @@
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33
-#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70
@@ -2865,7 +2891,7 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99
-#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9c
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2
@@ -2890,6 +2916,7 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3
+#define PCI_DEVICE_ID_INTEL_HDA_GML 0x3198
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
@@ -2900,12 +2927,13 @@
#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432
#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_LP 0x34c8
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
-#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
-#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
+#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
+#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592
#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595
@@ -2915,11 +2943,11 @@
#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
+#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
+#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
#define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c
#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f
#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610
-#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
-#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
@@ -2932,14 +2960,19 @@
#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8
#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_N 0x38c8
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
+#define PCI_DEVICE_ID_INTEL_HDA_ICH10_0 0x3a3e
#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
+#define PCI_DEVICE_ID_INTEL_HDA_ICH10_1 0x3a6e
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f
+#define PCI_DEVICE_ID_INTEL_HDA_5_3400_SERIES_0 0x3b56
+#define PCI_DEVICE_ID_INTEL_HDA_5_3400_SERIES_1 0x3b57
#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20
#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21
#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22
@@ -2950,16 +2983,12 @@
#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27
#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e
#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f
-#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
-#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
-#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
-#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
-#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
#define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41
#define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42
#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
@@ -2971,22 +3000,45 @@
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
+#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
+#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
+#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
+#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_H 0x3dc8
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
-#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
-#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
-#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
-#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
-#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
+#define PCI_DEVICE_ID_INTEL_HDA_TGL_H 0x43c8
+#define PCI_DEVICE_ID_INTEL_HDA_DG1 0x490d
+#define PCI_DEVICE_ID_INTEL_HDA_EHL_0 0x4b55
+#define PCI_DEVICE_ID_INTEL_HDA_EHL_3 0x4b58
+#define PCI_DEVICE_ID_INTEL_HDA_JSL_N 0x4dc8
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_0 0x4f90
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_1 0x4f91
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_2 0x4f92
#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_P 0x51c8
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_PS 0x51c9
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_P_0 0x51ca
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_P_1 0x51cb
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_M 0x51cc
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_PX 0x51cd
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_M 0x51ce
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_PX 0x51cf
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_N 0x54c8
+#define PCI_DEVICE_ID_INTEL_HDA_APL 0x5a98
+#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
+#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
+#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
+#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
+#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
@@ -3015,8 +3067,14 @@
#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2
#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
+#define PCI_DEVICE_ID_INTEL_HDA_ARL 0x7728
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_S 0x7a50
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_S 0x7ad0
+#define PCI_DEVICE_ID_INTEL_HDA_MTL 0x7e28
+#define PCI_DEVICE_ID_INTEL_HDA_ARL_S 0x7f50
#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119
#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a
+#define PCI_DEVICE_ID_INTEL_HDA_POULSBO 0x811b
#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183
#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186
#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
@@ -3025,9 +3083,33 @@
#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb
#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
+#define PCI_DEVICE_ID_INTEL_HDA_LPT 0x8c20
+#define PCI_DEVICE_ID_INTEL_HDA_9_SERIES 0x8ca0
+#define PCI_DEVICE_ID_INTEL_HDA_WBG_0 0x8d20
+#define PCI_DEVICE_ID_INTEL_HDA_WBG_1 0x8d21
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_HDA_LKF 0x98c8
#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
+#define PCI_DEVICE_ID_INTEL_HDA_LPT_LP_0 0x9c20
+#define PCI_DEVICE_ID_INTEL_HDA_LPT_LP_1 0x9c21
+#define PCI_DEVICE_ID_INTEL_HDA_WPT_LP 0x9ca0
+#define PCI_DEVICE_ID_INTEL_HDA_SKL_LP 0x9d70
+#define PCI_DEVICE_ID_INTEL_HDA_KBL_LP 0x9d71
+#define PCI_DEVICE_ID_INTEL_HDA_CNL_LP 0x9dc8
+#define PCI_DEVICE_ID_INTEL_HDA_TGL_LP 0xa0c8
+#define PCI_DEVICE_ID_INTEL_HDA_SKL 0xa170
+#define PCI_DEVICE_ID_INTEL_HDA_KBL 0xa171
+#define PCI_DEVICE_ID_INTEL_HDA_LBG_0 0xa1f0
+#define PCI_DEVICE_ID_INTEL_HDA_LBG_1 0xa270
+#define PCI_DEVICE_ID_INTEL_HDA_KBL_H 0xa2f0
+#define PCI_DEVICE_ID_INTEL_HDA_CNL_H 0xa348
+#define PCI_DEVICE_ID_INTEL_HDA_CML_S 0xa3f0
+#define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_DEVICE_ID_INTEL_HDA_CML_R 0xf0c8
+#define PCI_DEVICE_ID_INTEL_HDA_RKL_S 0xf1c8
+
+#define PCI_VENDOR_ID_WANGXUN 0x8088
#define PCI_VENDOR_ID_SCALEMP 0x8686
#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
@@ -3109,6 +3191,8 @@
#define PCI_VENDOR_ID_3COM_2 0xa727
+#define PCI_VENDOR_ID_SOLIDRUN 0xd063
+
#define PCI_VENDOR_ID_DIGIUM 0xd161
#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410
diff --git a/include/linux/pcs-lynx.h b/include/linux/pcs-lynx.h
new file mode 100644
index 000000000000..7958cccd16f2
--- /dev/null
+++ b/include/linux/pcs-lynx.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2020 NXP
+ * Lynx PCS helpers
+ */
+
+#ifndef __LINUX_PCS_LYNX_H
+#define __LINUX_PCS_LYNX_H
+
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+
+struct phylink_pcs *lynx_pcs_create_mdiodev(struct mii_bus *bus, int addr);
+struct phylink_pcs *lynx_pcs_create_fwnode(struct fwnode_handle *node);
+
+void lynx_pcs_destroy(struct phylink_pcs *pcs);
+
+#endif /* __LINUX_PCS_LYNX_H */
diff --git a/include/linux/pcs-rzn1-miic.h b/include/linux/pcs-rzn1-miic.h
new file mode 100644
index 000000000000..56d12b21365d
--- /dev/null
+++ b/include/linux/pcs-rzn1-miic.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#ifndef __LINUX_PCS_MIIC_H
+#define __LINUX_PCS_MIIC_H
+
+struct phylink;
+struct device_node;
+
+struct phylink_pcs *miic_create(struct device *dev, struct device_node *np);
+
+void miic_destroy(struct phylink_pcs *pcs);
+
+#endif /* __LINUX_PCS_MIIC_H */
diff --git a/include/linux/pcs/pcs-mtk-lynxi.h b/include/linux/pcs/pcs-mtk-lynxi.h
new file mode 100644
index 000000000000..be3b4ab32f4a
--- /dev/null
+++ b/include/linux/pcs/pcs-mtk-lynxi.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_PCS_MTK_LYNXI_H
+#define __LINUX_PCS_MTK_LYNXI_H
+
+#include <linux/phylink.h>
+#include <linux/regmap.h>
+
+#define MTK_SGMII_FLAG_PN_SWAP BIT(0)
+struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
+ struct regmap *regmap,
+ u32 ana_rgc3, u32 flags);
+void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs);
+#endif
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
new file mode 100644
index 000000000000..da3a6c30f6d2
--- /dev/null
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ */
+
+#ifndef __LINUX_PCS_XPCS_H
+#define __LINUX_PCS_XPCS_H
+
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+#define NXP_SJA1105_XPCS_ID 0x00000010
+#define NXP_SJA1110_XPCS_ID 0x00000020
+
+/* AN mode */
+#define DW_AN_C73 1
+#define DW_AN_C37_SGMII 2
+#define DW_2500BASEX 3
+#define DW_AN_C37_1000BASEX 4
+#define DW_10GBASER 5
+
+/* device vendor OUI */
+#define DW_OUI_WX 0x0018fc80
+
+/* dev_flag */
+#define DW_DEV_TXGBE BIT(0)
+
+struct xpcs_id;
+
+struct dw_xpcs {
+ struct mdio_device *mdiodev;
+ const struct xpcs_id *id;
+ struct phylink_pcs pcs;
+ phy_interface_t interface;
+ int dev_flag;
+};
+
+int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
+void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface, int speed, int duplex);
+int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
+ const unsigned long *advertising, unsigned int neg_mode);
+void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces);
+int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
+ int enable);
+struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
+ phy_interface_t interface);
+void xpcs_destroy(struct dw_xpcs *xpcs);
+
+#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h
deleted file mode 100644
index 2a69db4b60b7..000000000000
--- a/include/linux/pda_power.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Common power driver for PDAs and phones with one or two external
- * power supplies (AC/USB) connected to main and backup batteries,
- * and optional builtin charger.
- *
- * Copyright © 2007 Anton Vorontsov <cbou@mail.ru>
- */
-
-#ifndef __PDA_POWER_H__
-#define __PDA_POWER_H__
-
-#define PDA_POWER_CHARGE_AC (1 << 0)
-#define PDA_POWER_CHARGE_USB (1 << 1)
-
-struct device;
-
-struct pda_power_pdata {
- int (*init)(struct device *dev);
- int (*is_ac_online)(void);
- int (*is_usb_online)(void);
- void (*set_charge)(int flags);
- void (*exit)(struct device *dev);
- int (*suspend)(pm_message_t state);
- int (*resume)(void);
-
- char **supplied_to;
- size_t num_supplicants;
-
- unsigned int wait_for_status; /* msecs, default is 500 */
- unsigned int wait_for_charger; /* msecs, default is 500 */
- unsigned int polling_interval; /* msecs, default is 2000 */
-
- unsigned long ac_max_uA; /* current to draw when on AC */
-
- bool use_otg_notifier;
-};
-
-#endif /* __PDA_POWER_H__ */
diff --git a/include/linux/pds/pds_adminq.h b/include/linux/pds/pds_adminq.h
new file mode 100644
index 000000000000..4b4e9a98b37b
--- /dev/null
+++ b/include/linux/pds/pds_adminq.h
@@ -0,0 +1,1269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDS_CORE_ADMINQ_H_
+#define _PDS_CORE_ADMINQ_H_
+
+#define PDSC_ADMINQ_MAX_POLL_INTERVAL 256
+
+enum pds_core_adminq_flags {
+ PDS_AQ_FLAG_FASTPOLL = BIT(1), /* completion poll at 1ms */
+};
+
+/*
+ * enum pds_core_adminq_opcode - AdminQ command opcodes
+ * These commands are only processed on AdminQ, not available in devcmd
+ */
+enum pds_core_adminq_opcode {
+ PDS_AQ_CMD_NOP = 0,
+
+ /* Client control */
+ PDS_AQ_CMD_CLIENT_REG = 6,
+ PDS_AQ_CMD_CLIENT_UNREG = 7,
+ PDS_AQ_CMD_CLIENT_CMD = 8,
+
+ /* LIF commands */
+ PDS_AQ_CMD_LIF_IDENTIFY = 20,
+ PDS_AQ_CMD_LIF_INIT = 21,
+ PDS_AQ_CMD_LIF_RESET = 22,
+ PDS_AQ_CMD_LIF_GETATTR = 23,
+ PDS_AQ_CMD_LIF_SETATTR = 24,
+ PDS_AQ_CMD_LIF_SETPHC = 25,
+
+ PDS_AQ_CMD_RX_MODE_SET = 30,
+ PDS_AQ_CMD_RX_FILTER_ADD = 31,
+ PDS_AQ_CMD_RX_FILTER_DEL = 32,
+
+ /* Queue commands */
+ PDS_AQ_CMD_Q_IDENTIFY = 39,
+ PDS_AQ_CMD_Q_INIT = 40,
+ PDS_AQ_CMD_Q_CONTROL = 41,
+
+ /* SR/IOV commands */
+ PDS_AQ_CMD_VF_GETATTR = 60,
+ PDS_AQ_CMD_VF_SETATTR = 61,
+};
+
+/*
+ * enum pds_core_notifyq_opcode - NotifyQ event codes
+ */
+enum pds_core_notifyq_opcode {
+ PDS_EVENT_LINK_CHANGE = 1,
+ PDS_EVENT_RESET = 2,
+ PDS_EVENT_XCVR = 5,
+ PDS_EVENT_CLIENT = 6,
+};
+
+#define PDS_COMP_COLOR_MASK 0x80
+
+/**
+ * struct pds_core_notifyq_event - Generic event reporting structure
+ * @eid: event number
+ * @ecode: event code
+ *
+ * This is the generic event report struct from which the other
+ * actual events will be formed.
+ */
+struct pds_core_notifyq_event {
+ __le64 eid;
+ __le16 ecode;
+};
+
+/**
+ * struct pds_core_link_change_event - Link change event notification
+ * @eid: event number
+ * @ecode: event code = PDS_EVENT_LINK_CHANGE
+ * @link_status: link up/down, with error bits
+ * @link_speed: speed of the network link
+ *
+ * Sent when the network link state changes between UP and DOWN
+ */
+struct pds_core_link_change_event {
+ __le64 eid;
+ __le16 ecode;
+ __le16 link_status;
+ __le32 link_speed; /* units of 1Mbps: e.g. 10000 = 10Gbps */
+};
+
+/**
+ * struct pds_core_reset_event - Reset event notification
+ * @eid: event number
+ * @ecode: event code = PDS_EVENT_RESET
+ * @reset_code: reset type
+ * @state: 0=pending, 1=complete, 2=error
+ *
+ * Sent when the NIC or some subsystem is going to be or
+ * has been reset.
+ */
+struct pds_core_reset_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 reset_code;
+ u8 state;
+};
+
+/**
+ * struct pds_core_client_event - Client event notification
+ * @eid: event number
+ * @ecode: event code = PDS_EVENT_CLIENT
+ * @client_id: client to sent event to
+ * @client_event: wrapped event struct for the client
+ *
+ * Sent when an event needs to be passed on to a client
+ */
+struct pds_core_client_event {
+ __le64 eid;
+ __le16 ecode;
+ __le16 client_id;
+ u8 client_event[54];
+};
+
+/**
+ * struct pds_core_notifyq_cmd - Placeholder for building qcq
+ * @data: anonymous field for building the qcq
+ */
+struct pds_core_notifyq_cmd {
+ __le32 data; /* Not used but needed for qcq structure */
+};
+
+/*
+ * union pds_core_notifyq_comp - Overlay of notifyq event structures
+ */
+union pds_core_notifyq_comp {
+ struct {
+ __le64 eid;
+ __le16 ecode;
+ };
+ struct pds_core_notifyq_event event;
+ struct pds_core_link_change_event link_change;
+ struct pds_core_reset_event reset;
+ u8 data[64];
+};
+
+#define PDS_DEVNAME_LEN 32
+/**
+ * struct pds_core_client_reg_cmd - Register a new client with DSC
+ * @opcode: opcode PDS_AQ_CMD_CLIENT_REG
+ * @rsvd: word boundary padding
+ * @devname: text name of client device
+ * @vif_type: what type of device (enum pds_core_vif_types)
+ *
+ * Tell the DSC of the new client, and receive a client_id from DSC.
+ */
+struct pds_core_client_reg_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ char devname[PDS_DEVNAME_LEN];
+ u8 vif_type;
+};
+
+/**
+ * struct pds_core_client_reg_comp - Client registration completion
+ * @status: Status of the command (enum pdc_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @client_id: New id assigned by DSC
+ * @rsvd1: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_client_reg_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le16 client_id;
+ u8 rsvd1[9];
+ u8 color;
+};
+
+/**
+ * struct pds_core_client_unreg_cmd - Unregister a client from DSC
+ * @opcode: opcode PDS_AQ_CMD_CLIENT_UNREG
+ * @rsvd: word boundary padding
+ * @client_id: id of client being removed
+ *
+ * Tell the DSC this client is going away and remove its context
+ * This uses the generic completion.
+ */
+struct pds_core_client_unreg_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 client_id;
+};
+
+/**
+ * struct pds_core_client_request_cmd - Pass along a wrapped client AdminQ cmd
+ * @opcode: opcode PDS_AQ_CMD_CLIENT_CMD
+ * @rsvd: word boundary padding
+ * @client_id: id of client being removed
+ * @client_cmd: the wrapped client command
+ *
+ * Proxy post an adminq command for the client.
+ * This uses the generic completion.
+ */
+struct pds_core_client_request_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 client_id;
+ u8 client_cmd[60];
+};
+
+#define PDS_CORE_MAX_FRAGS 16
+
+#define PDS_CORE_QCQ_F_INITED BIT(0)
+#define PDS_CORE_QCQ_F_SG BIT(1)
+#define PDS_CORE_QCQ_F_INTR BIT(2)
+#define PDS_CORE_QCQ_F_TX_STATS BIT(3)
+#define PDS_CORE_QCQ_F_RX_STATS BIT(4)
+#define PDS_CORE_QCQ_F_NOTIFYQ BIT(5)
+#define PDS_CORE_QCQ_F_CMB_RINGS BIT(6)
+#define PDS_CORE_QCQ_F_CORE BIT(7)
+
+enum pds_core_lif_type {
+ PDS_CORE_LIF_TYPE_DEFAULT = 0,
+};
+
+#define PDS_CORE_IFNAMSIZ 16
+
+/**
+ * enum pds_core_logical_qtype - Logical Queue Types
+ * @PDS_CORE_QTYPE_ADMINQ: Administrative Queue
+ * @PDS_CORE_QTYPE_NOTIFYQ: Notify Queue
+ * @PDS_CORE_QTYPE_RXQ: Receive Queue
+ * @PDS_CORE_QTYPE_TXQ: Transmit Queue
+ * @PDS_CORE_QTYPE_EQ: Event Queue
+ * @PDS_CORE_QTYPE_MAX: Max queue type supported
+ */
+enum pds_core_logical_qtype {
+ PDS_CORE_QTYPE_ADMINQ = 0,
+ PDS_CORE_QTYPE_NOTIFYQ = 1,
+ PDS_CORE_QTYPE_RXQ = 2,
+ PDS_CORE_QTYPE_TXQ = 3,
+ PDS_CORE_QTYPE_EQ = 4,
+
+ PDS_CORE_QTYPE_MAX = 16 /* don't change - used in struct size */
+};
+
+/**
+ * union pds_core_lif_config - LIF configuration
+ * @state: LIF state (enum pds_core_lif_state)
+ * @rsvd: Word boundary padding
+ * @name: LIF name
+ * @rsvd2: Word boundary padding
+ * @features: LIF features active (enum pds_core_hw_features)
+ * @queue_count: Queue counts per queue-type
+ * @words: Full union buffer size
+ */
+union pds_core_lif_config {
+ struct {
+ u8 state;
+ u8 rsvd[3];
+ char name[PDS_CORE_IFNAMSIZ];
+ u8 rsvd2[12];
+ __le64 features;
+ __le32 queue_count[PDS_CORE_QTYPE_MAX];
+ } __packed;
+ __le32 words[64];
+};
+
+/**
+ * struct pds_core_lif_status - LIF status register
+ * @eid: most recent NotifyQ event id
+ * @rsvd: full struct size
+ */
+struct pds_core_lif_status {
+ __le64 eid;
+ u8 rsvd[56];
+};
+
+/**
+ * struct pds_core_lif_info - LIF info structure
+ * @config: LIF configuration structure
+ * @status: LIF status structure
+ */
+struct pds_core_lif_info {
+ union pds_core_lif_config config;
+ struct pds_core_lif_status status;
+};
+
+/**
+ * struct pds_core_lif_identity - LIF identity information (type-specific)
+ * @features: LIF features (see enum pds_core_hw_features)
+ * @version: Identify structure version
+ * @hw_index: LIF hardware index
+ * @rsvd: Word boundary padding
+ * @max_nb_sessions: Maximum number of sessions supported
+ * @rsvd2: buffer padding
+ * @config: LIF config struct with features, q counts
+ */
+struct pds_core_lif_identity {
+ __le64 features;
+ u8 version;
+ u8 hw_index;
+ u8 rsvd[2];
+ __le32 max_nb_sessions;
+ u8 rsvd2[120];
+ union pds_core_lif_config config;
+};
+
+/**
+ * struct pds_core_lif_identify_cmd - Get LIF identity info command
+ * @opcode: Opcode PDS_AQ_CMD_LIF_IDENTIFY
+ * @type: LIF type (enum pds_core_lif_type)
+ * @client_id: Client identifier
+ * @ver: Version of identify returned by device
+ * @rsvd: Word boundary padding
+ * @ident_pa: DMA address to receive identity info
+ *
+ * Firmware will copy LIF identity data (struct pds_core_lif_identity)
+ * into the buffer address given.
+ */
+struct pds_core_lif_identify_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ u8 ver;
+ u8 rsvd[3];
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_core_lif_identify_comp - LIF identify command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @ver: Version of identify returned by device
+ * @bytes: Bytes copied into the buffer
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_identify_comp {
+ u8 status;
+ u8 ver;
+ __le16 bytes;
+ u8 rsvd[11];
+ u8 color;
+};
+
+/**
+ * struct pds_core_lif_init_cmd - LIF init command
+ * @opcode: Opcode PDS_AQ_CMD_LIF_INIT
+ * @type: LIF type (enum pds_core_lif_type)
+ * @client_id: Client identifier
+ * @rsvd: Word boundary padding
+ * @info_pa: Destination address for LIF info (struct pds_core_lif_info)
+ */
+struct pds_core_lif_init_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ __le32 rsvd;
+ __le64 info_pa;
+};
+
+/**
+ * struct pds_core_lif_init_comp - LIF init command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @hw_index: Hardware index of the initialized LIF
+ * @rsvd1: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_init_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 hw_index;
+ u8 rsvd1[11];
+ u8 color;
+};
+
+/**
+ * struct pds_core_lif_reset_cmd - LIF reset command
+ * Will reset only the specified LIF.
+ * @opcode: Opcode PDS_AQ_CMD_LIF_RESET
+ * @rsvd: Word boundary padding
+ * @client_id: Client identifier
+ */
+struct pds_core_lif_reset_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 client_id;
+};
+
+/**
+ * enum pds_core_lif_attr - List of LIF attributes
+ * @PDS_CORE_LIF_ATTR_STATE: LIF state attribute
+ * @PDS_CORE_LIF_ATTR_NAME: LIF name attribute
+ * @PDS_CORE_LIF_ATTR_FEATURES: LIF features attribute
+ * @PDS_CORE_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
+ */
+enum pds_core_lif_attr {
+ PDS_CORE_LIF_ATTR_STATE = 0,
+ PDS_CORE_LIF_ATTR_NAME = 1,
+ PDS_CORE_LIF_ATTR_FEATURES = 4,
+ PDS_CORE_LIF_ATTR_STATS_CTRL = 6,
+};
+
+/**
+ * struct pds_core_lif_setattr_cmd - Set LIF attributes on the NIC
+ * @opcode: Opcode PDS_AQ_CMD_LIF_SETATTR
+ * @attr: Attribute type (enum pds_core_lif_attr)
+ * @client_id: Client identifier
+ * @state: LIF state (enum pds_core_lif_state)
+ * @name: The name string, 0 terminated
+ * @features: Features (enum pds_core_hw_features)
+ * @stats_ctl: Stats control commands (enum pds_core_stats_ctl_cmd)
+ * @rsvd: Command Buffer padding
+ */
+struct pds_core_lif_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 client_id;
+ union {
+ u8 state;
+ char name[PDS_CORE_IFNAMSIZ];
+ __le64 features;
+ u8 stats_ctl;
+ u8 rsvd[60];
+ } __packed;
+};
+
+/**
+ * struct pds_core_lif_setattr_comp - LIF set attr command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @features: Features (enum pds_core_hw_features)
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_setattr_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ __le64 features;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+/**
+ * struct pds_core_lif_getattr_cmd - Get LIF attributes from the NIC
+ * @opcode: Opcode PDS_AQ_CMD_LIF_GETATTR
+ * @attr: Attribute type (enum pds_core_lif_attr)
+ * @client_id: Client identifier
+ */
+struct pds_core_lif_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 client_id;
+};
+
+/**
+ * struct pds_core_lif_getattr_comp - LIF get attr command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @state: LIF state (enum pds_core_lif_state)
+ * @name: LIF name string, 0 terminated
+ * @features: Features (enum pds_core_hw_features)
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_getattr_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ u8 state;
+ __le64 features;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+/**
+ * union pds_core_q_identity - Queue identity information
+ * @version: Queue type version that can be used with FW
+ * @supported: Bitfield of queue versions, first bit = ver 0
+ * @rsvd: Word boundary padding
+ * @features: Queue features
+ * @desc_sz: Descriptor size
+ * @comp_sz: Completion descriptor size
+ * @rsvd2: Word boundary padding
+ */
+struct pds_core_q_identity {
+ u8 version;
+ u8 supported;
+ u8 rsvd[6];
+#define PDS_CORE_QIDENT_F_CQ 0x01 /* queue has completion ring */
+ __le64 features;
+ __le16 desc_sz;
+ __le16 comp_sz;
+ u8 rsvd2[6];
+};
+
+/**
+ * struct pds_core_q_identify_cmd - queue identify command
+ * @opcode: Opcode PDS_AQ_CMD_Q_IDENTIFY
+ * @type: Logical queue type (enum pds_core_logical_qtype)
+ * @client_id: Client identifier
+ * @ver: Highest queue type version that the driver supports
+ * @rsvd: Word boundary padding
+ * @ident_pa: DMA address to receive the data (struct pds_core_q_identity)
+ */
+struct pds_core_q_identify_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ u8 ver;
+ u8 rsvd[3];
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_core_q_identify_comp - queue identify command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @ver: Queue type version that can be used with FW
+ * @rsvd1: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_q_identify_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 ver;
+ u8 rsvd1[10];
+ u8 color;
+};
+
+/**
+ * struct pds_core_q_init_cmd - Queue init command
+ * @opcode: Opcode PDS_AQ_CMD_Q_INIT
+ * @type: Logical queue type
+ * @client_id: Client identifier
+ * @ver: Queue type version
+ * @rsvd: Word boundary padding
+ * @index: (LIF, qtype) relative admin queue index
+ * @intr_index: Interrupt control register index, or Event queue index
+ * @pid: Process ID
+ * @flags:
+ * IRQ: Interrupt requested on completion
+ * ENA: Enable the queue. If ENA=0 the queue is initialized
+ * but remains disabled, to be later enabled with the
+ * Queue Enable command. If ENA=1, then queue is
+ * initialized and then enabled.
+ * @cos: Class of service for this queue
+ * @ring_size: Queue ring size, encoded as a log2(size), in
+ * number of descriptors. The actual ring size is
+ * (1 << ring_size). For example, to select a ring size
+ * of 64 descriptors write ring_size = 6. The minimum
+ * ring_size value is 2 for a ring of 4 descriptors.
+ * The maximum ring_size value is 12 for a ring of 4k
+ * descriptors. Values of ring_size <2 and >12 are
+ * reserved.
+ * @ring_base: Queue ring base address
+ * @cq_ring_base: Completion queue ring base address
+ */
+struct pds_core_q_init_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ u8 ver;
+ u8 rsvd[3];
+ __le32 index;
+ __le16 pid;
+ __le16 intr_index;
+ __le16 flags;
+#define PDS_CORE_QINIT_F_IRQ 0x01 /* Request interrupt on completion */
+#define PDS_CORE_QINIT_F_ENA 0x02 /* Enable the queue */
+ u8 cos;
+#define PDS_CORE_QSIZE_MIN_LG2 2
+#define PDS_CORE_QSIZE_MAX_LG2 12
+ u8 ring_size;
+ __le64 ring_base;
+ __le64 cq_ring_base;
+} __packed;
+
+/**
+ * struct pds_core_q_init_comp - Queue init command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @hw_index: Hardware Queue ID
+ * @hw_type: Hardware Queue type
+ * @rsvd2: Word boundary padding
+ * @color: Color
+ */
+struct pds_core_q_init_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le32 hw_index;
+ u8 hw_type;
+ u8 rsvd2[6];
+ u8 color;
+};
+
+/*
+ * enum pds_vdpa_cmd_opcode - vDPA Device commands
+ */
+enum pds_vdpa_cmd_opcode {
+ PDS_VDPA_CMD_INIT = 48,
+ PDS_VDPA_CMD_IDENT = 49,
+ PDS_VDPA_CMD_RESET = 51,
+ PDS_VDPA_CMD_VQ_RESET = 52,
+ PDS_VDPA_CMD_VQ_INIT = 53,
+ PDS_VDPA_CMD_STATUS_UPDATE = 54,
+ PDS_VDPA_CMD_SET_FEATURES = 55,
+ PDS_VDPA_CMD_SET_ATTR = 56,
+};
+
+/**
+ * struct pds_vdpa_cmd - generic command
+ * @opcode: Opcode
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ */
+struct pds_vdpa_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_vdpa_init_cmd - INIT command
+ * @opcode: Opcode PDS_VDPA_CMD_INIT
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ */
+struct pds_vdpa_init_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_vdpa_ident - vDPA identification data
+ * @hw_features: vDPA features supported by device
+ * @max_vqs: max queues available (2 queues for a single queuepair)
+ * @max_qlen: log(2) of maximum number of descriptors
+ * @min_qlen: log(2) of minimum number of descriptors
+ *
+ * This struct is used in a DMA block that is set up for the PDS_VDPA_CMD_IDENT
+ * transaction. Set up the DMA block and send the address in the IDENT cmd
+ * data, the DSC will write the ident information, then we can remove the DMA
+ * block after reading the answer. If the completion status is 0, then there
+ * is valid information, else there was an error and the data should be invalid.
+ */
+struct pds_vdpa_ident {
+ __le64 hw_features;
+ __le16 max_vqs;
+ __le16 max_qlen;
+ __le16 min_qlen;
+};
+
+/**
+ * struct pds_vdpa_ident_cmd - IDENT command
+ * @opcode: Opcode PDS_VDPA_CMD_IDENT
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @len: length of ident info DMA space
+ * @ident_pa: address for DMA of ident info (struct pds_vdpa_ident)
+ * only used for this transaction, then forgotten by DSC
+ */
+struct pds_vdpa_ident_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ __le32 len;
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_vdpa_status_cmd - STATUS_UPDATE command
+ * @opcode: Opcode PDS_VDPA_CMD_STATUS_UPDATE
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @status: new status bits
+ */
+struct pds_vdpa_status_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ u8 status;
+};
+
+/**
+ * enum pds_vdpa_attr - List of VDPA device attributes
+ * @PDS_VDPA_ATTR_MAC: MAC address
+ * @PDS_VDPA_ATTR_MAX_VQ_PAIRS: Max virtqueue pairs
+ */
+enum pds_vdpa_attr {
+ PDS_VDPA_ATTR_MAC = 1,
+ PDS_VDPA_ATTR_MAX_VQ_PAIRS = 2,
+};
+
+/**
+ * struct pds_vdpa_setattr_cmd - SET_ATTR command
+ * @opcode: Opcode PDS_VDPA_CMD_SET_ATTR
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @attr: attribute to be changed (enum pds_vdpa_attr)
+ * @pad: Word boundary padding
+ * @mac: new mac address to be assigned as vdpa device address
+ * @max_vq_pairs: new limit of virtqueue pairs
+ */
+struct pds_vdpa_setattr_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ u8 attr;
+ u8 pad[3];
+ union {
+ u8 mac[6];
+ __le16 max_vq_pairs;
+ } __packed;
+};
+
+/**
+ * struct pds_vdpa_vq_init_cmd - queue init command
+ * @opcode: Opcode PDS_VDPA_CMD_VQ_INIT
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @qid: Queue id (bit0 clear = rx, bit0 set = tx, qid=N is ctrlq)
+ * @len: log(2) of max descriptor count
+ * @desc_addr: DMA address of descriptor area
+ * @avail_addr: DMA address of available descriptors (aka driver area)
+ * @used_addr: DMA address of used descriptors (aka device area)
+ * @intr_index: interrupt index
+ * @avail_index: initial device position in available ring
+ * @used_index: initial device position in used ring
+ */
+struct pds_vdpa_vq_init_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le16 qid;
+ __le16 len;
+ __le64 desc_addr;
+ __le64 avail_addr;
+ __le64 used_addr;
+ __le16 intr_index;
+ __le16 avail_index;
+ __le16 used_index;
+};
+
+/**
+ * struct pds_vdpa_vq_init_comp - queue init completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @hw_qtype: HW queue type, used in doorbell selection
+ * @hw_qindex: HW queue index, used in doorbell selection
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_vdpa_vq_init_comp {
+ u8 status;
+ u8 hw_qtype;
+ __le16 hw_qindex;
+ u8 rsvd[11];
+ u8 color;
+};
+
+/**
+ * struct pds_vdpa_vq_reset_cmd - queue reset command
+ * @opcode: Opcode PDS_VDPA_CMD_VQ_RESET
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @qid: Queue id
+ */
+struct pds_vdpa_vq_reset_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le16 qid;
+};
+
+/**
+ * struct pds_vdpa_vq_reset_comp - queue reset completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd0: Word boundary padding
+ * @avail_index: current device position in available ring
+ * @used_index: current device position in used ring
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_vdpa_vq_reset_comp {
+ u8 status;
+ u8 rsvd0;
+ __le16 avail_index;
+ __le16 used_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct pds_vdpa_set_features_cmd - set hw features
+ * @opcode: Opcode PDS_VDPA_CMD_SET_FEATURES
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @rsvd: Word boundary padding
+ * @features: Feature bit mask
+ */
+struct pds_vdpa_set_features_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le32 rsvd;
+ __le64 features;
+};
+
+#define PDS_LM_DEVICE_STATE_LENGTH 65536
+#define PDS_LM_CHECK_DEVICE_STATE_LENGTH(X) \
+ PDS_CORE_SIZE_CHECK(union, PDS_LM_DEVICE_STATE_LENGTH, X)
+
+/*
+ * enum pds_lm_cmd_opcode - Live Migration Device commands
+ */
+enum pds_lm_cmd_opcode {
+ PDS_LM_CMD_HOST_VF_STATUS = 1,
+
+ /* Device state commands */
+ PDS_LM_CMD_STATE_SIZE = 16,
+ PDS_LM_CMD_SUSPEND = 18,
+ PDS_LM_CMD_SUSPEND_STATUS = 19,
+ PDS_LM_CMD_RESUME = 20,
+ PDS_LM_CMD_SAVE = 21,
+ PDS_LM_CMD_RESTORE = 22,
+
+ /* Dirty page tracking commands */
+ PDS_LM_CMD_DIRTY_STATUS = 32,
+ PDS_LM_CMD_DIRTY_ENABLE = 33,
+ PDS_LM_CMD_DIRTY_DISABLE = 34,
+ PDS_LM_CMD_DIRTY_READ_SEQ = 35,
+ PDS_LM_CMD_DIRTY_WRITE_ACK = 36,
+};
+
+/**
+ * struct pds_lm_cmd - generic command
+ * @opcode: Opcode
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Structure padding to 60 Bytes
+ */
+struct pds_lm_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[56];
+};
+
+/**
+ * struct pds_lm_state_size_cmd - STATE_SIZE command
+ * @opcode: Opcode
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ */
+struct pds_lm_state_size_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_lm_state_size_comp - STATE_SIZE command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the desc ring for which this is the completion
+ * @size: Size of the device state
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_lm_state_size_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ __le64 size;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+enum pds_lm_suspend_resume_type {
+ PDS_LM_SUSPEND_RESUME_TYPE_FULL = 0,
+ PDS_LM_SUSPEND_RESUME_TYPE_P2P = 1,
+};
+
+/**
+ * struct pds_lm_suspend_cmd - SUSPEND command
+ * @opcode: Opcode PDS_LM_CMD_SUSPEND
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of suspend (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_suspend_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_suspend_status_cmd - SUSPEND status command
+ * @opcode: Opcode PDS_AQ_CMD_LM_SUSPEND_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of suspend (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_suspend_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_resume_cmd - RESUME command
+ * @opcode: Opcode PDS_LM_CMD_RESUME
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of resume (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_resume_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_sg_elem - Transmit scatter-gather (SG) descriptor element
+ * @addr: DMA address of SG element data buffer
+ * @len: Length of SG element data buffer, in bytes
+ * @rsvd: Word boundary padding
+ */
+struct pds_lm_sg_elem {
+ __le64 addr;
+ __le32 len;
+ __le16 rsvd[2];
+};
+
+/**
+ * struct pds_lm_save_cmd - SAVE command
+ * @opcode: Opcode PDS_LM_CMD_SAVE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: IOVA address of the SGL to dma the device state
+ * @num_sge: Total number of SG elements
+ */
+struct pds_lm_save_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[4];
+ __le64 sgl_addr;
+ __le32 num_sge;
+} __packed;
+
+/**
+ * struct pds_lm_restore_cmd - RESTORE command
+ * @opcode: Opcode PDS_LM_CMD_RESTORE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: IOVA address of the SGL to dma the device state
+ * @num_sge: Total number of SG elements
+ */
+struct pds_lm_restore_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[4];
+ __le64 sgl_addr;
+ __le32 num_sge;
+} __packed;
+
+/**
+ * union pds_lm_dev_state - device state information
+ * @words: Device state words
+ */
+union pds_lm_dev_state {
+ __le32 words[PDS_LM_DEVICE_STATE_LENGTH / sizeof(__le32)];
+};
+
+enum pds_lm_host_vf_status {
+ PDS_LM_STA_NONE = 0,
+ PDS_LM_STA_IN_PROGRESS,
+ PDS_LM_STA_MAX,
+};
+
+/**
+ * struct pds_lm_dirty_region_info - Memory region info for STATUS and ENABLE
+ * @dma_base: Base address of the DMA-contiguous memory region
+ * @page_count: Number of pages in the memory region
+ * @page_size_log2: Log2 page size in the memory region
+ * @rsvd: Word boundary padding
+ */
+struct pds_lm_dirty_region_info {
+ __le64 dma_base;
+ __le32 page_count;
+ u8 page_size_log2;
+ u8 rsvd[3];
+};
+
+/**
+ * struct pds_lm_dirty_status_cmd - DIRTY_STATUS command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @max_regions: Capacity of the region info buffer
+ * @rsvd2: Word boundary padding
+ * @regions_dma: DMA address of the region info buffer
+ *
+ * The minimum of max_regions (from the command) and num_regions (from the
+ * completion) of struct pds_lm_dirty_region_info will be written to
+ * regions_dma.
+ *
+ * The max_regions may be zero, in which case regions_dma is ignored. In that
+ * case, the completion will only report the maximum number of regions
+ * supported by the device, and the number of regions currently enabled.
+ */
+struct pds_lm_dirty_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 max_regions;
+ u8 rsvd2[3];
+ __le64 regions_dma;
+} __packed;
+
+/**
+ * enum pds_lm_dirty_bmp_type - Type of dirty page bitmap
+ * @PDS_LM_DIRTY_BMP_TYPE_NONE: No bitmap / disabled
+ * @PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK: Seq/Ack bitmap representation
+ */
+enum pds_lm_dirty_bmp_type {
+ PDS_LM_DIRTY_BMP_TYPE_NONE = 0,
+ PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK = 1,
+};
+
+/**
+ * struct pds_lm_dirty_status_comp - STATUS command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the desc ring for which this is the completion
+ * @max_regions: Maximum number of regions supported by the device
+ * @num_regions: Number of regions currently enabled
+ * @bmp_type: Type of dirty bitmap representation
+ * @rsvd2: Word boundary padding
+ * @bmp_type_mask: Mask of supported bitmap types, bit index per type
+ * @rsvd3: Word boundary padding
+ * @color: Color bit
+ *
+ * This completion descriptor is used for STATUS, ENABLE, and DISABLE.
+ */
+struct pds_lm_dirty_status_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 max_regions;
+ u8 num_regions;
+ u8 bmp_type;
+ u8 rsvd2;
+ __le32 bmp_type_mask;
+ u8 rsvd3[3];
+ u8 color;
+};
+
+/**
+ * struct pds_lm_dirty_enable_cmd - DIRTY_ENABLE command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_ENABLE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @bmp_type: Type of dirty bitmap representation
+ * @num_regions: Number of entries in the region info buffer
+ * @rsvd2: Word boundary padding
+ * @regions_dma: DMA address of the region info buffer
+ *
+ * The num_regions must be nonzero, and less than or equal to the maximum
+ * number of regions supported by the device.
+ *
+ * The memory regions should not overlap.
+ *
+ * The information should be initialized by the driver. The device may modify
+ * the information on successful completion, such as by size-aligning the
+ * number of pages in a region.
+ *
+ * The modified number of pages will be greater than or equal to the page count
+ * given in the enable command, and at least as coarsly aligned as the given
+ * value. For example, the count might be aligned to a multiple of 64, but
+ * if the value is already a multiple of 128 or higher, it will not change.
+ * If the driver requires its own minimum alignment of the number of pages, the
+ * driver should account for that already in the region info of this command.
+ *
+ * This command uses struct pds_lm_dirty_status_comp for its completion.
+ */
+struct pds_lm_dirty_enable_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 bmp_type;
+ u8 num_regions;
+ u8 rsvd2[2];
+ __le64 regions_dma;
+} __packed;
+
+/**
+ * struct pds_lm_dirty_disable_cmd - DIRTY_DISABLE command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_DISABLE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ *
+ * Dirty page tracking will be disabled. This may be called in any state, as
+ * long as dirty page tracking is supported by the device, to ensure that dirty
+ * page tracking is disabled.
+ *
+ * This command uses struct pds_lm_dirty_status_comp for its completion. On
+ * success, num_regions will be zero.
+ */
+struct pds_lm_dirty_disable_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_lm_dirty_seq_ack_cmd - DIRTY_READ_SEQ or _WRITE_ACK command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_[READ_SEQ|WRITE_ACK]
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @off_bytes: Byte offset in the bitmap
+ * @len_bytes: Number of bytes to transfer
+ * @num_sge: Number of DMA scatter gather elements
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: DMA address of scatter gather list
+ *
+ * Read bytes from the SEQ bitmap, or write bytes into the ACK bitmap.
+ *
+ * This command treats the entire bitmap as a byte buffer. It does not
+ * distinguish between guest memory regions. The driver should refer to the
+ * number of pages in each region, according to PDS_LM_CMD_DIRTY_STATUS, to
+ * determine the region boundaries in the bitmap. Each region will be
+ * represented by exactly the number of bits as the page count for that region,
+ * immediately following the last bit of the previous region.
+ */
+struct pds_lm_dirty_seq_ack_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ __le32 off_bytes;
+ __le32 len_bytes;
+ __le16 num_sge;
+ u8 rsvd2[2];
+ __le64 sgl_addr;
+} __packed;
+
+/**
+ * struct pds_lm_host_vf_status_cmd - HOST_VF_STATUS command
+ * @opcode: Opcode PDS_LM_CMD_HOST_VF_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @status: Current LM status of host VF driver (enum pds_lm_host_status)
+ */
+struct pds_lm_host_vf_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 status;
+};
+
+union pds_core_adminq_cmd {
+ u8 opcode;
+ u8 bytes[64];
+
+ struct pds_core_client_reg_cmd client_reg;
+ struct pds_core_client_unreg_cmd client_unreg;
+ struct pds_core_client_request_cmd client_request;
+
+ struct pds_core_lif_identify_cmd lif_ident;
+ struct pds_core_lif_init_cmd lif_init;
+ struct pds_core_lif_reset_cmd lif_reset;
+ struct pds_core_lif_setattr_cmd lif_setattr;
+ struct pds_core_lif_getattr_cmd lif_getattr;
+
+ struct pds_core_q_identify_cmd q_ident;
+ struct pds_core_q_init_cmd q_init;
+
+ struct pds_vdpa_cmd vdpa;
+ struct pds_vdpa_init_cmd vdpa_init;
+ struct pds_vdpa_ident_cmd vdpa_ident;
+ struct pds_vdpa_status_cmd vdpa_status;
+ struct pds_vdpa_setattr_cmd vdpa_setattr;
+ struct pds_vdpa_set_features_cmd vdpa_set_features;
+ struct pds_vdpa_vq_init_cmd vdpa_vq_init;
+ struct pds_vdpa_vq_reset_cmd vdpa_vq_reset;
+
+ struct pds_lm_suspend_cmd lm_suspend;
+ struct pds_lm_suspend_status_cmd lm_suspend_status;
+ struct pds_lm_resume_cmd lm_resume;
+ struct pds_lm_state_size_cmd lm_state_size;
+ struct pds_lm_save_cmd lm_save;
+ struct pds_lm_restore_cmd lm_restore;
+ struct pds_lm_host_vf_status_cmd lm_host_vf_status;
+ struct pds_lm_dirty_status_cmd lm_dirty_status;
+ struct pds_lm_dirty_enable_cmd lm_dirty_enable;
+ struct pds_lm_dirty_disable_cmd lm_dirty_disable;
+ struct pds_lm_dirty_seq_ack_cmd lm_dirty_seq_ack;
+};
+
+union pds_core_adminq_comp {
+ struct {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 rsvd2[11];
+ u8 color;
+ };
+ u32 words[4];
+
+ struct pds_core_client_reg_comp client_reg;
+
+ struct pds_core_lif_identify_comp lif_ident;
+ struct pds_core_lif_init_comp lif_init;
+ struct pds_core_lif_setattr_comp lif_setattr;
+ struct pds_core_lif_getattr_comp lif_getattr;
+
+ struct pds_core_q_identify_comp q_ident;
+ struct pds_core_q_init_comp q_init;
+
+ struct pds_vdpa_vq_init_comp vdpa_vq_init;
+ struct pds_vdpa_vq_reset_comp vdpa_vq_reset;
+
+ struct pds_lm_state_size_comp lm_state_size;
+ struct pds_lm_dirty_status_comp lm_dirty_status;
+};
+
+#ifndef __CHECKER__
+static_assert(sizeof(union pds_core_adminq_cmd) == 64);
+static_assert(sizeof(union pds_core_adminq_comp) == 16);
+static_assert(sizeof(union pds_core_notifyq_comp) == 64);
+#endif /* __CHECKER__ */
+
+/* The color bit is a 'done' bit for the completion descriptors
+ * where the meaning alternates between '1' and '0' for alternating
+ * passes through the completion descriptor ring.
+ */
+static inline bool pdsc_color_match(u8 color, bool done_color)
+{
+ return (!!(color & PDS_COMP_COLOR_MASK)) == done_color;
+}
+
+struct pdsc;
+int pdsc_adminq_post(struct pdsc *pdsc,
+ union pds_core_adminq_cmd *cmd,
+ union pds_core_adminq_comp *comp,
+ bool fast_poll);
+
+#endif /* _PDS_CORE_ADMINQ_H_ */
diff --git a/include/linux/pds/pds_auxbus.h b/include/linux/pds/pds_auxbus.h
new file mode 100644
index 000000000000..214ef12302d0
--- /dev/null
+++ b/include/linux/pds/pds_auxbus.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDSC_AUXBUS_H_
+#define _PDSC_AUXBUS_H_
+
+#include <linux/auxiliary_bus.h>
+
+struct pds_auxiliary_dev {
+ struct auxiliary_device aux_dev;
+ struct pci_dev *vf_pdev;
+ u16 client_id;
+};
+
+int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
+ union pds_core_adminq_cmd *req,
+ size_t req_len,
+ union pds_core_adminq_comp *resp,
+ u64 flags);
+#endif /* _PDSC_AUXBUS_H_ */
diff --git a/include/linux/pds/pds_common.h b/include/linux/pds/pds_common.h
new file mode 100644
index 000000000000..30581e2e04cc
--- /dev/null
+++ b/include/linux/pds/pds_common.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PDS_COMMON_H_
+#define _PDS_COMMON_H_
+
+#define PDS_CORE_DRV_NAME "pds_core"
+
+/* the device's internal addressing uses up to 52 bits */
+#define PDS_CORE_ADDR_LEN 52
+#define PDS_CORE_ADDR_MASK (BIT_ULL(PDS_ADDR_LEN) - 1)
+#define PDS_PAGE_SIZE 4096
+
+enum pds_core_driver_type {
+ PDS_DRIVER_LINUX = 1,
+ PDS_DRIVER_WIN = 2,
+ PDS_DRIVER_DPDK = 3,
+ PDS_DRIVER_FREEBSD = 4,
+ PDS_DRIVER_IPXE = 5,
+ PDS_DRIVER_ESXI = 6,
+};
+
+enum pds_core_vif_types {
+ PDS_DEV_TYPE_CORE = 0,
+ PDS_DEV_TYPE_VDPA = 1,
+ PDS_DEV_TYPE_VFIO = 2,
+ PDS_DEV_TYPE_ETH = 3,
+ PDS_DEV_TYPE_RDMA = 4,
+ PDS_DEV_TYPE_LM = 5,
+
+ /* new ones added before this line */
+ PDS_DEV_TYPE_MAX = 16 /* don't change - used in struct size */
+};
+
+#define PDS_DEV_TYPE_CORE_STR "Core"
+#define PDS_DEV_TYPE_VDPA_STR "vDPA"
+#define PDS_DEV_TYPE_VFIO_STR "vfio"
+#define PDS_DEV_TYPE_ETH_STR "Eth"
+#define PDS_DEV_TYPE_RDMA_STR "RDMA"
+#define PDS_DEV_TYPE_LM_STR "LM"
+
+#define PDS_VDPA_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_VDPA_STR
+#define PDS_VFIO_LM_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_LM_STR "." PDS_DEV_TYPE_VFIO_STR
+
+struct pdsc;
+
+int pdsc_register_notify(struct notifier_block *nb);
+void pdsc_unregister_notify(struct notifier_block *nb);
+void *pdsc_get_pf_struct(struct pci_dev *vf_pdev);
+int pds_client_register(struct pdsc *pf, char *devname);
+int pds_client_unregister(struct pdsc *pf, u16 client_id);
+#endif /* _PDS_COMMON_H_ */
diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h
new file mode 100644
index 000000000000..17a87c1a55d7
--- /dev/null
+++ b/include/linux/pds/pds_core_if.h
@@ -0,0 +1,572 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PDS_CORE_IF_H_
+#define _PDS_CORE_IF_H_
+
+#define PCI_VENDOR_ID_PENSANDO 0x1dd8
+#define PCI_DEVICE_ID_PENSANDO_CORE_PF 0x100c
+#define PCI_DEVICE_ID_VIRTIO_NET_TRANS 0x1000
+#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
+#define PCI_DEVICE_ID_PENSANDO_VDPA_VF 0x100b
+#define PDS_CORE_BARS_MAX 4
+#define PDS_CORE_PCI_BAR_DBELL 1
+
+/* Bar0 */
+#define PDS_CORE_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */
+#define PDS_CORE_BAR0_SIZE 0x8000
+#define PDS_CORE_BAR0_DEV_INFO_REGS_OFFSET 0x0000
+#define PDS_CORE_BAR0_DEV_CMD_REGS_OFFSET 0x0800
+#define PDS_CORE_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00
+#define PDS_CORE_BAR0_INTR_STATUS_OFFSET 0x1000
+#define PDS_CORE_BAR0_INTR_CTRL_OFFSET 0x2000
+#define PDS_CORE_DEV_CMD_DONE 0x00000001
+
+#define PDS_CORE_DEVCMD_TIMEOUT 5
+
+#define PDS_CORE_CLIENT_ID 0
+#define PDS_CORE_ASIC_TYPE_CAPRI 0
+
+/*
+ * enum pds_core_cmd_opcode - Device commands
+ */
+enum pds_core_cmd_opcode {
+ /* Core init */
+ PDS_CORE_CMD_NOP = 0,
+ PDS_CORE_CMD_IDENTIFY = 1,
+ PDS_CORE_CMD_RESET = 2,
+ PDS_CORE_CMD_INIT = 3,
+
+ PDS_CORE_CMD_FW_DOWNLOAD = 4,
+ PDS_CORE_CMD_FW_CONTROL = 5,
+
+ /* SR/IOV commands */
+ PDS_CORE_CMD_VF_GETATTR = 60,
+ PDS_CORE_CMD_VF_SETATTR = 61,
+ PDS_CORE_CMD_VF_CTRL = 62,
+
+ /* Add commands before this line */
+ PDS_CORE_CMD_MAX,
+ PDS_CORE_CMD_COUNT
+};
+
+/*
+ * enum pds_core_status_code - Device command return codes
+ */
+enum pds_core_status_code {
+ PDS_RC_SUCCESS = 0, /* Success */
+ PDS_RC_EVERSION = 1, /* Incorrect version for request */
+ PDS_RC_EOPCODE = 2, /* Invalid cmd opcode */
+ PDS_RC_EIO = 3, /* I/O error */
+ PDS_RC_EPERM = 4, /* Permission denied */
+ PDS_RC_EQID = 5, /* Bad qid */
+ PDS_RC_EQTYPE = 6, /* Bad qtype */
+ PDS_RC_ENOENT = 7, /* No such element */
+ PDS_RC_EINTR = 8, /* operation interrupted */
+ PDS_RC_EAGAIN = 9, /* Try again */
+ PDS_RC_ENOMEM = 10, /* Out of memory */
+ PDS_RC_EFAULT = 11, /* Bad address */
+ PDS_RC_EBUSY = 12, /* Device or resource busy */
+ PDS_RC_EEXIST = 13, /* object already exists */
+ PDS_RC_EINVAL = 14, /* Invalid argument */
+ PDS_RC_ENOSPC = 15, /* No space left or alloc failure */
+ PDS_RC_ERANGE = 16, /* Parameter out of range */
+ PDS_RC_BAD_ADDR = 17, /* Descriptor contains a bad ptr */
+ PDS_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */
+ PDS_RC_ENOSUPP = 19, /* Operation not supported */
+ PDS_RC_ERROR = 29, /* Generic error */
+ PDS_RC_ERDMA = 30, /* Generic RDMA error */
+ PDS_RC_EVFID = 31, /* VF ID does not exist */
+ PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */
+ PDS_RC_ECLIENT = 33, /* No such client id */
+ PDS_RC_BAD_PCI = 255, /* Broken PCI when reading status */
+};
+
+/**
+ * struct pds_core_drv_identity - Driver identity information
+ * @drv_type: Driver type (enum pds_core_driver_type)
+ * @os_dist: OS distribution, numeric format
+ * @os_dist_str: OS distribution, string format
+ * @kernel_ver: Kernel version, numeric format
+ * @kernel_ver_str: Kernel version, string format
+ * @driver_ver_str: Driver version, string format
+ */
+struct pds_core_drv_identity {
+ __le32 drv_type;
+ __le32 os_dist;
+ char os_dist_str[128];
+ __le32 kernel_ver;
+ char kernel_ver_str[32];
+ char driver_ver_str[32];
+};
+
+#define PDS_DEV_TYPE_MAX 16
+/**
+ * struct pds_core_dev_identity - Device identity information
+ * @version: Version of device identify
+ * @type: Identify type (0 for now)
+ * @state: Device state
+ * @rsvd: Word boundary padding
+ * @nlifs: Number of LIFs provisioned
+ * @nintrs: Number of interrupts provisioned
+ * @ndbpgs_per_lif: Number of doorbell pages per LIF
+ * @intr_coal_mult: Interrupt coalescing multiplication factor
+ * Scale user-supplied interrupt coalescing
+ * value in usecs to device units using:
+ * device units = usecs * mult / div
+ * @intr_coal_div: Interrupt coalescing division factor
+ * Scale user-supplied interrupt coalescing
+ * value in usecs to device units using:
+ * device units = usecs * mult / div
+ * @vif_types: How many of each VIF device type is supported
+ */
+struct pds_core_dev_identity {
+ u8 version;
+ u8 type;
+ u8 state;
+ u8 rsvd;
+ __le32 nlifs;
+ __le32 nintrs;
+ __le32 ndbpgs_per_lif;
+ __le32 intr_coal_mult;
+ __le32 intr_coal_div;
+ __le16 vif_types[PDS_DEV_TYPE_MAX];
+};
+
+#define PDS_CORE_IDENTITY_VERSION_1 1
+
+/**
+ * struct pds_core_dev_identify_cmd - Driver/device identify command
+ * @opcode: Opcode PDS_CORE_CMD_IDENTIFY
+ * @ver: Highest version of identify supported by driver
+ *
+ * Expects to find driver identification info (struct pds_core_drv_identity)
+ * in cmd_regs->data. Driver should keep the devcmd interface locked
+ * while preparing the driver info.
+ */
+struct pds_core_dev_identify_cmd {
+ u8 opcode;
+ u8 ver;
+};
+
+/**
+ * struct pds_core_dev_identify_comp - Device identify command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @ver: Version of identify returned by device
+ *
+ * Device identification info (struct pds_core_dev_identity) can be found
+ * in cmd_regs->data. Driver should keep the devcmd interface locked
+ * while reading the results.
+ */
+struct pds_core_dev_identify_comp {
+ u8 status;
+ u8 ver;
+};
+
+/**
+ * struct pds_core_dev_reset_cmd - Device reset command
+ * @opcode: Opcode PDS_CORE_CMD_RESET
+ *
+ * Resets and clears all LIFs, VDevs, and VIFs on the device.
+ */
+struct pds_core_dev_reset_cmd {
+ u8 opcode;
+};
+
+/**
+ * struct pds_core_dev_reset_comp - Reset command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ */
+struct pds_core_dev_reset_comp {
+ u8 status;
+};
+
+/*
+ * struct pds_core_dev_init_data - Pointers and info needed for the Core
+ * initialization PDS_CORE_CMD_INIT command. The in and out structs are
+ * overlays on the pds_core_dev_cmd_regs.data space for passing data down
+ * to the firmware on init, and then returning initialization results.
+ */
+struct pds_core_dev_init_data_in {
+ __le64 adminq_q_base;
+ __le64 adminq_cq_base;
+ __le64 notifyq_cq_base;
+ __le32 flags;
+ __le16 intr_index;
+ u8 adminq_ring_size;
+ u8 notifyq_ring_size;
+};
+
+struct pds_core_dev_init_data_out {
+ __le32 core_hw_index;
+ __le32 adminq_hw_index;
+ __le32 notifyq_hw_index;
+ u8 adminq_hw_type;
+ u8 notifyq_hw_type;
+};
+
+/**
+ * struct pds_core_dev_init_cmd - Core device initialize
+ * @opcode: opcode PDS_CORE_CMD_INIT
+ *
+ * Initializes the core device and sets up the AdminQ and NotifyQ.
+ * Expects to find initialization data (struct pds_core_dev_init_data_in)
+ * in cmd_regs->data. Driver should keep the devcmd interface locked
+ * while preparing the driver info.
+ */
+struct pds_core_dev_init_cmd {
+ u8 opcode;
+};
+
+/**
+ * struct pds_core_dev_init_comp - Core init completion
+ * @status: Status of the command (enum pds_core_status_code)
+ *
+ * Initialization result data (struct pds_core_dev_init_data_in)
+ * is found in cmd_regs->data.
+ */
+struct pds_core_dev_init_comp {
+ u8 status;
+};
+
+/**
+ * struct pds_core_fw_download_cmd - Firmware download command
+ * @opcode: opcode
+ * @rsvd: Word boundary padding
+ * @addr: DMA address of the firmware buffer
+ * @offset: offset of the firmware buffer within the full image
+ * @length: number of valid bytes in the firmware buffer
+ */
+struct pds_core_fw_download_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ __le32 offset;
+ __le64 addr;
+ __le32 length;
+};
+
+/**
+ * struct pds_core_fw_download_comp - Firmware download completion
+ * @status: Status of the command (enum pds_core_status_code)
+ */
+struct pds_core_fw_download_comp {
+ u8 status;
+};
+
+/**
+ * enum pds_core_fw_control_oper - FW control operations
+ * @PDS_CORE_FW_INSTALL_ASYNC: Install firmware asynchronously
+ * @PDS_CORE_FW_INSTALL_STATUS: Firmware installation status
+ * @PDS_CORE_FW_ACTIVATE_ASYNC: Activate firmware asynchronously
+ * @PDS_CORE_FW_ACTIVATE_STATUS: Firmware activate status
+ * @PDS_CORE_FW_UPDATE_CLEANUP: Cleanup any firmware update leftovers
+ * @PDS_CORE_FW_GET_BOOT: Return current active firmware slot
+ * @PDS_CORE_FW_SET_BOOT: Set active firmware slot for next boot
+ * @PDS_CORE_FW_GET_LIST: Return list of installed firmware images
+ */
+enum pds_core_fw_control_oper {
+ PDS_CORE_FW_INSTALL_ASYNC = 0,
+ PDS_CORE_FW_INSTALL_STATUS = 1,
+ PDS_CORE_FW_ACTIVATE_ASYNC = 2,
+ PDS_CORE_FW_ACTIVATE_STATUS = 3,
+ PDS_CORE_FW_UPDATE_CLEANUP = 4,
+ PDS_CORE_FW_GET_BOOT = 5,
+ PDS_CORE_FW_SET_BOOT = 6,
+ PDS_CORE_FW_GET_LIST = 7,
+};
+
+enum pds_core_fw_slot {
+ PDS_CORE_FW_SLOT_INVALID = 0,
+ PDS_CORE_FW_SLOT_A = 1,
+ PDS_CORE_FW_SLOT_B = 2,
+ PDS_CORE_FW_SLOT_GOLD = 3,
+};
+
+/**
+ * struct pds_core_fw_control_cmd - Firmware control command
+ * @opcode: opcode
+ * @rsvd: Word boundary padding
+ * @oper: firmware control operation (enum pds_core_fw_control_oper)
+ * @slot: slot to operate on (enum pds_core_fw_slot)
+ */
+struct pds_core_fw_control_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ u8 oper;
+ u8 slot;
+};
+
+/**
+ * struct pds_core_fw_control_comp - Firmware control copletion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word alignment space
+ * @slot: Slot number (enum pds_core_fw_slot)
+ * @rsvd1: Struct padding
+ * @color: Color bit
+ */
+struct pds_core_fw_control_comp {
+ u8 status;
+ u8 rsvd[3];
+ u8 slot;
+ u8 rsvd1[10];
+ u8 color;
+};
+
+struct pds_core_fw_name_info {
+#define PDS_CORE_FWSLOT_BUFLEN 8
+#define PDS_CORE_FWVERS_BUFLEN 32
+ char slotname[PDS_CORE_FWSLOT_BUFLEN];
+ char fw_version[PDS_CORE_FWVERS_BUFLEN];
+};
+
+struct pds_core_fw_list_info {
+#define PDS_CORE_FWVERS_LIST_LEN 16
+ u8 num_fw_slots;
+ struct pds_core_fw_name_info fw_names[PDS_CORE_FWVERS_LIST_LEN];
+} __packed;
+
+enum pds_core_vf_attr {
+ PDS_CORE_VF_ATTR_SPOOFCHK = 1,
+ PDS_CORE_VF_ATTR_TRUST = 2,
+ PDS_CORE_VF_ATTR_MAC = 3,
+ PDS_CORE_VF_ATTR_LINKSTATE = 4,
+ PDS_CORE_VF_ATTR_VLAN = 5,
+ PDS_CORE_VF_ATTR_RATE = 6,
+ PDS_CORE_VF_ATTR_STATSADDR = 7,
+};
+
+/**
+ * enum pds_core_vf_link_status - Virtual Function link status
+ * @PDS_CORE_VF_LINK_STATUS_AUTO: Use link state of the uplink
+ * @PDS_CORE_VF_LINK_STATUS_UP: Link always up
+ * @PDS_CORE_VF_LINK_STATUS_DOWN: Link always down
+ */
+enum pds_core_vf_link_status {
+ PDS_CORE_VF_LINK_STATUS_AUTO = 0,
+ PDS_CORE_VF_LINK_STATUS_UP = 1,
+ PDS_CORE_VF_LINK_STATUS_DOWN = 2,
+};
+
+/**
+ * struct pds_core_vf_setattr_cmd - Set VF attributes on the NIC
+ * @opcode: Opcode
+ * @attr: Attribute type (enum pds_core_vf_attr)
+ * @vf_index: VF index
+ * @macaddr: mac address
+ * @vlanid: vlan ID
+ * @maxrate: max Tx rate in Mbps
+ * @spoofchk: enable address spoof checking
+ * @trust: enable VF trust
+ * @linkstate: set link up or down
+ * @stats: stats addr struct
+ * @stats.pa: set DMA address for VF stats
+ * @stats.len: length of VF stats space
+ * @pad: force union to specific size
+ */
+struct pds_core_vf_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ struct {
+ __le64 pa;
+ __le32 len;
+ } stats;
+ u8 pad[60];
+ } __packed;
+};
+
+struct pds_core_vf_setattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ __le16 comp_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct pds_core_vf_getattr_cmd - Get VF attributes from the NIC
+ * @opcode: Opcode
+ * @attr: Attribute type (enum pds_core_vf_attr)
+ * @vf_index: VF index
+ */
+struct pds_core_vf_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+};
+
+struct pds_core_vf_getattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ __le64 stats_pa;
+ u8 pad[11];
+ } __packed;
+ u8 color;
+};
+
+enum pds_core_vf_ctrl_opcode {
+ PDS_CORE_VF_CTRL_START_ALL = 0,
+ PDS_CORE_VF_CTRL_START = 1,
+};
+
+/**
+ * struct pds_core_vf_ctrl_cmd - VF control command
+ * @opcode: Opcode for the command
+ * @ctrl_opcode: VF control operation type
+ * @vf_index: VF Index. It is unused if op START_ALL is used.
+ */
+
+struct pds_core_vf_ctrl_cmd {
+ u8 opcode;
+ u8 ctrl_opcode;
+ __le16 vf_index;
+};
+
+/**
+ * struct pds_core_vf_ctrl_comp - VF_CTRL command completion.
+ * @status: Status of the command (enum pds_core_status_code)
+ */
+struct pds_core_vf_ctrl_comp {
+ u8 status;
+};
+
+/*
+ * union pds_core_dev_cmd - Overlay of core device command structures
+ */
+union pds_core_dev_cmd {
+ u8 opcode;
+ u32 words[16];
+
+ struct pds_core_dev_identify_cmd identify;
+ struct pds_core_dev_init_cmd init;
+ struct pds_core_dev_reset_cmd reset;
+ struct pds_core_fw_download_cmd fw_download;
+ struct pds_core_fw_control_cmd fw_control;
+
+ struct pds_core_vf_setattr_cmd vf_setattr;
+ struct pds_core_vf_getattr_cmd vf_getattr;
+ struct pds_core_vf_ctrl_cmd vf_ctrl;
+};
+
+/*
+ * union pds_core_dev_comp - Overlay of core device completion structures
+ */
+union pds_core_dev_comp {
+ u8 status;
+ u8 bytes[16];
+
+ struct pds_core_dev_identify_comp identify;
+ struct pds_core_dev_reset_comp reset;
+ struct pds_core_dev_init_comp init;
+ struct pds_core_fw_download_comp fw_download;
+ struct pds_core_fw_control_comp fw_control;
+
+ struct pds_core_vf_setattr_comp vf_setattr;
+ struct pds_core_vf_getattr_comp vf_getattr;
+ struct pds_core_vf_ctrl_comp vf_ctrl;
+};
+
+/**
+ * struct pds_core_dev_hwstamp_regs - Hardware current timestamp registers
+ * @tick_low: Low 32 bits of hardware timestamp
+ * @tick_high: High 32 bits of hardware timestamp
+ */
+struct pds_core_dev_hwstamp_regs {
+ u32 tick_low;
+ u32 tick_high;
+};
+
+/**
+ * struct pds_core_dev_info_regs - Device info register format (read-only)
+ * @signature: Signature value of 0x44455649 ('DEVI')
+ * @version: Current version of info
+ * @asic_type: Asic type
+ * @asic_rev: Asic revision
+ * @fw_status: Firmware status
+ * bit 0 - 1 = fw running
+ * bit 4-7 - 4 bit generation number, changes on fw restart
+ * @fw_heartbeat: Firmware heartbeat counter
+ * @serial_num: Serial number
+ * @fw_version: Firmware version
+ * @oprom_regs: oprom_regs to store oprom debug enable/disable and bmp
+ * @rsvd_pad1024: Struct padding
+ * @hwstamp: Hardware current timestamp registers
+ * @rsvd_pad2048: Struct padding
+ */
+struct pds_core_dev_info_regs {
+#define PDS_CORE_DEVINFO_FWVERS_BUFLEN 32
+#define PDS_CORE_DEVINFO_SERIAL_BUFLEN 32
+ u32 signature;
+ u8 version;
+ u8 asic_type;
+ u8 asic_rev;
+#define PDS_CORE_FW_STS_F_STOPPED 0x00
+#define PDS_CORE_FW_STS_F_RUNNING 0x01
+#define PDS_CORE_FW_STS_F_GENERATION 0xF0
+ u8 fw_status;
+ __le32 fw_heartbeat;
+ char fw_version[PDS_CORE_DEVINFO_FWVERS_BUFLEN];
+ char serial_num[PDS_CORE_DEVINFO_SERIAL_BUFLEN];
+ u8 oprom_regs[32]; /* reserved */
+ u8 rsvd_pad1024[916];
+ struct pds_core_dev_hwstamp_regs hwstamp; /* on 1k boundary */
+ u8 rsvd_pad2048[1016];
+} __packed;
+
+/**
+ * struct pds_core_dev_cmd_regs - Device command register format (read-write)
+ * @doorbell: Device Cmd Doorbell, write-only
+ * Write a 1 to signal device to process cmd
+ * @done: Command completed indicator, poll for completion
+ * bit 0 == 1 when command is complete
+ * @cmd: Opcode-specific command bytes
+ * @comp: Opcode-specific response bytes
+ * @rsvd: Struct padding
+ * @data: Opcode-specific side-data
+ */
+struct pds_core_dev_cmd_regs {
+ u32 doorbell;
+ u32 done;
+ union pds_core_dev_cmd cmd;
+ union pds_core_dev_comp comp;
+ u8 rsvd[48];
+ u32 data[478];
+} __packed;
+
+/**
+ * struct pds_core_dev_regs - Device register format for bar 0 page 0
+ * @info: Device info registers
+ * @devcmd: Device command registers
+ */
+struct pds_core_dev_regs {
+ struct pds_core_dev_info_regs info;
+ struct pds_core_dev_cmd_regs devcmd;
+} __packed;
+
+#ifndef __CHECKER__
+static_assert(sizeof(struct pds_core_drv_identity) <= 1912);
+static_assert(sizeof(struct pds_core_dev_identity) <= 1912);
+static_assert(sizeof(union pds_core_dev_cmd) == 64);
+static_assert(sizeof(union pds_core_dev_comp) == 16);
+static_assert(sizeof(struct pds_core_dev_info_regs) == 2048);
+static_assert(sizeof(struct pds_core_dev_cmd_regs) == 2048);
+static_assert(sizeof(struct pds_core_dev_regs) == 4096);
+#endif /* __CHECKER__ */
+
+#endif /* _PDS_CORE_IF_H_ */
diff --git a/include/linux/pds/pds_intr.h b/include/linux/pds/pds_intr.h
new file mode 100644
index 000000000000..56277c37248c
--- /dev/null
+++ b/include/linux/pds/pds_intr.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PDS_INTR_H_
+#define _PDS_INTR_H_
+
+/*
+ * Interrupt control register
+ * @coal_init: Coalescing timer initial value, in
+ * device units. Use @identity->intr_coal_mult
+ * and @identity->intr_coal_div to convert from
+ * usecs to device units:
+ *
+ * coal_init = coal_usecs * coal_mutl / coal_div
+ *
+ * When an interrupt is sent the interrupt
+ * coalescing timer current value
+ * (@coalescing_curr) is initialized with this
+ * value and begins counting down. No more
+ * interrupts are sent until the coalescing
+ * timer reaches 0. When @coalescing_init=0
+ * interrupt coalescing is effectively disabled
+ * and every interrupt assert results in an
+ * interrupt. Reset value: 0
+ * @mask: Interrupt mask. When @mask=1 the interrupt
+ * resource will not send an interrupt. When
+ * @mask=0 the interrupt resource will send an
+ * interrupt if an interrupt event is pending
+ * or on the next interrupt assertion event.
+ * Reset value: 1
+ * @credits: Interrupt credits. This register indicates
+ * how many interrupt events the hardware has
+ * sent. When written by software this
+ * register atomically decrements @int_credits
+ * by the value written. When @int_credits
+ * becomes 0 then the "pending interrupt" bit
+ * in the Interrupt Status register is cleared
+ * by the hardware and any pending but unsent
+ * interrupts are cleared.
+ * !!!IMPORTANT!!! This is a signed register.
+ * @flags: Interrupt control flags
+ * @unmask -- When this bit is written with a 1
+ * the interrupt resource will set mask=0.
+ * @coal_timer_reset -- When this
+ * bit is written with a 1 the
+ * @coalescing_curr will be reloaded with
+ * @coalescing_init to reset the coalescing
+ * timer.
+ * @mask_on_assert: Automatically mask on assertion. When
+ * @mask_on_assert=1 the interrupt resource
+ * will set @mask=1 whenever an interrupt is
+ * sent. When using interrupts in Legacy
+ * Interrupt mode the driver must select
+ * @mask_on_assert=0 for proper interrupt
+ * operation.
+ * @coalescing_curr: Coalescing timer current value, in
+ * microseconds. When this value reaches 0
+ * the interrupt resource is again eligible to
+ * send an interrupt. If an interrupt event
+ * is already pending when @coalescing_curr
+ * reaches 0 the pending interrupt will be
+ * sent, otherwise an interrupt will be sent
+ * on the next interrupt assertion event.
+ */
+struct pds_core_intr {
+ u32 coal_init;
+ u32 mask;
+ u16 credits;
+ u16 flags;
+#define PDS_CORE_INTR_F_UNMASK 0x0001
+#define PDS_CORE_INTR_F_TIMER_RESET 0x0002
+ u32 mask_on_assert;
+ u32 coalescing_curr;
+ u32 rsvd6[3];
+};
+
+#ifndef __CHECKER__
+static_assert(sizeof(struct pds_core_intr) == 32);
+#endif /* __CHECKER__ */
+
+#define PDS_CORE_INTR_CTRL_REGS_MAX 2048
+#define PDS_CORE_INTR_CTRL_COAL_MAX 0x3F
+#define PDS_CORE_INTR_INDEX_NOT_ASSIGNED -1
+
+struct pds_core_intr_status {
+ u32 status[2];
+};
+
+/**
+ * enum pds_core_intr_mask_vals - valid values for mask and mask_assert.
+ * @PDS_CORE_INTR_MASK_CLEAR: unmask interrupt.
+ * @PDS_CORE_INTR_MASK_SET: mask interrupt.
+ */
+enum pds_core_intr_mask_vals {
+ PDS_CORE_INTR_MASK_CLEAR = 0,
+ PDS_CORE_INTR_MASK_SET = 1,
+};
+
+/**
+ * enum pds_core_intr_credits_bits - Bitwise composition of credits values.
+ * @PDS_CORE_INTR_CRED_COUNT: bit mask of credit count, no shift needed.
+ * @PDS_CORE_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit.
+ * @PDS_CORE_INTR_CRED_UNMASK: unmask the interrupt.
+ * @PDS_CORE_INTR_CRED_RESET_COALESCE: reset the coalesce timer.
+ * @PDS_CORE_INTR_CRED_REARM: unmask the and reset the timer.
+ */
+enum pds_core_intr_credits_bits {
+ PDS_CORE_INTR_CRED_COUNT = 0x7fffu,
+ PDS_CORE_INTR_CRED_COUNT_SIGNED = 0xffffu,
+ PDS_CORE_INTR_CRED_UNMASK = 0x10000u,
+ PDS_CORE_INTR_CRED_RESET_COALESCE = 0x20000u,
+ PDS_CORE_INTR_CRED_REARM = (PDS_CORE_INTR_CRED_UNMASK |
+ PDS_CORE_INTR_CRED_RESET_COALESCE),
+};
+
+static inline void
+pds_core_intr_coal_init(struct pds_core_intr __iomem *intr_ctrl, u32 coal)
+{
+ iowrite32(coal, &intr_ctrl->coal_init);
+}
+
+static inline void
+pds_core_intr_mask(struct pds_core_intr __iomem *intr_ctrl, u32 mask)
+{
+ iowrite32(mask, &intr_ctrl->mask);
+}
+
+static inline void
+pds_core_intr_credits(struct pds_core_intr __iomem *intr_ctrl,
+ u32 cred, u32 flags)
+{
+ if (WARN_ON_ONCE(cred > PDS_CORE_INTR_CRED_COUNT)) {
+ cred = ioread32(&intr_ctrl->credits);
+ cred &= PDS_CORE_INTR_CRED_COUNT_SIGNED;
+ }
+
+ iowrite32(cred | flags, &intr_ctrl->credits);
+}
+
+static inline void
+pds_core_intr_clean_flags(struct pds_core_intr __iomem *intr_ctrl, u32 flags)
+{
+ u32 cred;
+
+ cred = ioread32(&intr_ctrl->credits);
+ cred &= PDS_CORE_INTR_CRED_COUNT_SIGNED;
+ cred |= flags;
+ iowrite32(cred, &intr_ctrl->credits);
+}
+
+static inline void
+pds_core_intr_clean(struct pds_core_intr __iomem *intr_ctrl)
+{
+ pds_core_intr_clean_flags(intr_ctrl, PDS_CORE_INTR_CRED_RESET_COALESCE);
+}
+
+static inline void
+pds_core_intr_mask_assert(struct pds_core_intr __iomem *intr_ctrl, u32 mask)
+{
+ iowrite32(mask, &intr_ctrl->mask_on_assert);
+}
+
+#endif /* _PDS_INTR_H_ */
diff --git a/include/linux/pe.h b/include/linux/pe.h
index 8ad71d763a77..fdf9c95709ba 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -11,26 +11,34 @@
#include <linux/types.h>
/*
- * Linux EFI stub v1.0 adds the following functionality:
- * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path,
- * - Loading/starting the kernel from firmware that targets a different
- * machine type, via the entrypoint exposed in the .compat PE/COFF section.
+ * Starting from version v3.0, the major version field should be interpreted as
+ * a bit mask of features supported by the kernel's EFI stub:
+ * - 0x1: initrd loading from the LINUX_EFI_INITRD_MEDIA_GUID device path,
+ * - 0x2: initrd loading using the initrd= command line option, where the file
+ * may be specified using device path notation, and is not required to
+ * reside on the same volume as the loaded kernel image.
*
* The recommended way of loading and starting v1.0 or later kernels is to use
* the LoadImage() and StartImage() EFI boot services, and expose the initrd
* via the LINUX_EFI_INITRD_MEDIA_GUID device path.
*
- * Versions older than v1.0 support initrd loading via the image load options
- * (using initrd=, limited to the volume from which the kernel itself was
- * loaded), or via arch specific means (bootparams, DT, etc).
+ * Versions older than v1.0 may support initrd loading via the image load
+ * options (using initrd=, limited to the volume from which the kernel itself
+ * was loaded), or only via arch specific means (bootparams, DT, etc).
*
- * On x86, LoadImage() and StartImage() can be omitted if the EFI handover
- * protocol is implemented, which can be inferred from the version,
- * handover_offset and xloadflags fields in the bootparams structure.
+ * The minor version field must remain 0x0.
+ * (https://lore.kernel.org/all/efd6f2d4-547c-1378-1faa-53c044dbd297@gmail.com/)
*/
-#define LINUX_EFISTUB_MAJOR_VERSION 0x1
+#define LINUX_EFISTUB_MAJOR_VERSION 0x3
#define LINUX_EFISTUB_MINOR_VERSION 0x0
+/*
+ * LINUX_PE_MAGIC appears at offset 0x38 into the MS-DOS header of EFI bootable
+ * Linux kernel images that target the architecture as specified by the PE/COFF
+ * header machine type field.
+ */
+#define LINUX_PE_MAGIC 0x818223cd
+
#define MZ_MAGIC 0x5a4d /* "MZ" */
#define PE_MAGIC 0x00004550 /* "PE\0\0" */
@@ -55,6 +63,9 @@
#define IMAGE_FILE_MACHINE_POWERPC 0x01f0
#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1
#define IMAGE_FILE_MACHINE_R4000 0x0166
+#define IMAGE_FILE_MACHINE_RISCV32 0x5032
+#define IMAGE_FILE_MACHINE_RISCV64 0x5064
+#define IMAGE_FILE_MACHINE_RISCV128 0x5128
#define IMAGE_FILE_MACHINE_SH3 0x01a2
#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3
#define IMAGE_FILE_MACHINE_SH3E 0x01a4
@@ -62,6 +73,8 @@
#define IMAGE_FILE_MACHINE_SH5 0x01a8
#define IMAGE_FILE_MACHINE_THUMB 0x01c2
#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169
+#define IMAGE_FILE_MACHINE_LOONGARCH32 0x6232
+#define IMAGE_FILE_MACHINE_LOONGARCH64 0x6264
/* flags */
#define IMAGE_FILE_RELOCS_STRIPPED 0x0001
@@ -106,6 +119,9 @@
#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000
#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT 0x0001
+#define IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT 0x0040
+
/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
#define IMAGE_SCN_RESERVED_0 0x00000001
#define IMAGE_SCN_RESERVED_1 0x00000002
@@ -153,6 +169,7 @@
#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
#define IMAGE_DEBUG_TYPE_CODEVIEW 2
+#define IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS 20
#ifndef __ASSEMBLY__
diff --git a/include/linux/peci-cpu.h b/include/linux/peci-cpu.h
new file mode 100644
index 000000000000..ff8ae9c26c80
--- /dev/null
+++ b/include/linux/peci-cpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_CPU_H
+#define __LINUX_PECI_CPU_H
+
+#include <linux/types.h>
+
+#include "../../arch/x86/include/asm/intel-family.h"
+
+#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */
+#define PECI_PKG_ID_CPU_ID 0x0000 /* CPUID Info */
+#define PECI_PKG_ID_PLATFORM_ID 0x0001 /* Platform ID */
+#define PECI_PKG_ID_DEVICE_ID 0x0002 /* Uncore Device ID */
+#define PECI_PKG_ID_MAX_THREAD_ID 0x0003 /* Max Thread ID */
+#define PECI_PKG_ID_MICROCODE_REV 0x0004 /* CPU Microcode Update Revision */
+#define PECI_PKG_ID_MCA_ERROR_LOG 0x0005 /* Machine Check Status */
+#define PECI_PCS_MODULE_TEMP 9 /* Per Core DTS Temperature Read */
+#define PECI_PCS_THERMAL_MARGIN 10 /* DTS thermal margin */
+#define PECI_PCS_DDR_DIMM_TEMP 14 /* DDR DIMM Temperature */
+#define PECI_PCS_TEMP_TARGET 16 /* Temperature Target Read */
+#define PECI_PCS_TDP_UNITS 30 /* Units for power/energy registers */
+
+struct peci_device;
+
+int peci_temp_read(struct peci_device *device, s16 *temp_raw);
+
+int peci_pcs_read(struct peci_device *device, u8 index,
+ u16 param, u32 *data);
+
+int peci_pci_local_read(struct peci_device *device, u8 bus, u8 dev,
+ u8 func, u16 reg, u32 *data);
+
+int peci_ep_pci_local_read(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg, u32 *data);
+
+int peci_mmio_read(struct peci_device *device, u8 bar, u8 seg,
+ u8 bus, u8 dev, u8 func, u64 address, u32 *data);
+
+#endif /* __LINUX_PECI_CPU_H */
diff --git a/include/linux/peci.h b/include/linux/peci.h
new file mode 100644
index 000000000000..9b3d36aff431
--- /dev/null
+++ b/include/linux/peci.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_H
+#define __LINUX_PECI_H
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/*
+ * Currently we don't support any PECI command over 32 bytes.
+ */
+#define PECI_REQUEST_MAX_BUF_SIZE 32
+
+struct peci_controller;
+struct peci_request;
+
+/**
+ * struct peci_controller_ops - PECI controller specific methods
+ * @xfer: PECI transfer function
+ *
+ * PECI controllers may have different hardware interfaces - the drivers
+ * implementing PECI controllers can use this structure to abstract away those
+ * differences by exposing a common interface for PECI core.
+ */
+struct peci_controller_ops {
+ int (*xfer)(struct peci_controller *controller, u8 addr, struct peci_request *req);
+};
+
+/**
+ * struct peci_controller - PECI controller
+ * @dev: device object to register PECI controller to the device model
+ * @ops: pointer to device specific controller operations
+ * @bus_lock: lock used to protect multiple callers
+ * @id: PECI controller ID
+ *
+ * PECI controllers usually connect to their drivers using non-PECI bus,
+ * such as the platform bus.
+ * Each PECI controller can communicate with one or more PECI devices.
+ */
+struct peci_controller {
+ struct device dev;
+ const struct peci_controller_ops *ops;
+ struct mutex bus_lock; /* held for the duration of xfer */
+ u8 id;
+};
+
+struct peci_controller *devm_peci_controller_add(struct device *parent,
+ const struct peci_controller_ops *ops);
+
+static inline struct peci_controller *to_peci_controller(void *d)
+{
+ return container_of(d, struct peci_controller, dev);
+}
+
+/**
+ * struct peci_device - PECI device
+ * @dev: device object to register PECI device to the device model
+ * @controller: manages the bus segment hosting this PECI device
+ * @info: PECI device characteristics
+ * @info.family: device family
+ * @info.model: device model
+ * @info.peci_revision: PECI revision supported by the PECI device
+ * @info.socket_id: the socket ID represented by the PECI device
+ * @addr: address used on the PECI bus connected to the parent controller
+ * @deleted: indicates that PECI device was already deleted
+ *
+ * A peci_device identifies a single device (i.e. CPU) connected to a PECI bus.
+ * The behaviour exposed to the rest of the system is defined by the PECI driver
+ * managing the device.
+ */
+struct peci_device {
+ struct device dev;
+ struct {
+ u16 family;
+ u8 model;
+ u8 peci_revision;
+ u8 socket_id;
+ } info;
+ u8 addr;
+ bool deleted;
+};
+
+static inline struct peci_device *to_peci_device(struct device *d)
+{
+ return container_of(d, struct peci_device, dev);
+}
+
+/**
+ * struct peci_request - PECI request
+ * @device: PECI device to which the request is sent
+ * @tx: TX buffer specific data
+ * @tx.buf: TX buffer
+ * @tx.len: transfer data length in bytes
+ * @rx: RX buffer specific data
+ * @rx.buf: RX buffer
+ * @rx.len: received data length in bytes
+ *
+ * A peci_request represents a request issued by PECI originator (TX) and
+ * a response received from PECI responder (RX).
+ */
+struct peci_request {
+ struct peci_device *device;
+ struct {
+ u8 buf[PECI_REQUEST_MAX_BUF_SIZE];
+ u8 len;
+ } rx, tx;
+};
+
+#endif /* __LINUX_PECI_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 176bfbd52d97..ec3573119923 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -51,7 +51,7 @@
PER_CPU_ATTRIBUTES
#define __PCPU_DUMMY_ATTRS \
- __attribute__((section(".discard"), unused))
+ __section(".discard") __attribute__((unused))
/*
* s390 and alpha modules require percpu variables to be defined as
@@ -310,7 +310,7 @@ extern void __bad_size_call_parameter(void);
#ifdef CONFIG_DEBUG_PREEMPT
extern void __this_cpu_preempt_check(const char *op);
#else
-static inline void __this_cpu_preempt_check(const char *op) { }
+static __always_inline void __this_cpu_preempt_check(const char *op) { }
#endif
#define __pcpu_size_call_return(stem, variable) \
@@ -343,31 +343,19 @@ static inline void __this_cpu_preempt_check(const char *op) { }
pscr2_ret__; \
})
-/*
- * Special handling for cmpxchg_double. cmpxchg_double is passed two
- * percpu variables. The first has to be aligned to a double word
- * boundary and the second has to follow directly thereafter.
- * We enforce this on all architectures even if they don't support
- * a double cmpxchg instruction, since it's a cheap requirement, and it
- * avoids breaking the requirement for architectures with the instruction.
- */
-#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
+#define __pcpu_size_call_return2bool(stem, variable, ...) \
({ \
- bool pdcrb_ret__; \
- __verify_pcpu_ptr(&(pcp1)); \
- BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
- VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \
- VM_BUG_ON((unsigned long)(&(pcp2)) != \
- (unsigned long)(&(pcp1)) + sizeof(pcp1)); \
- switch(sizeof(pcp1)) { \
- case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
- case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
- case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
- case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
+ bool pscr2_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
+ switch(sizeof(variable)) { \
+ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
+ case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
+ case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
+ case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
default: \
__bad_size_call_parameter(); break; \
} \
- pdcrb_ret__; \
+ pscr2_ret__; \
})
#define __pcpu_size_call(stem, variable, ...) \
@@ -412,7 +400,7 @@ do { \
* instead.
*
* If there is no other protection through preempt disable and/or disabling
- * interupts then one of these RMW operations can show unexpected behavior
+ * interrupts then one of these RMW operations can show unexpected behavior
* because the execution thread was rescheduled on another processor or an
* interrupt occurred and the same percpu variable was modified from the
* interrupt context.
@@ -426,9 +414,8 @@ do { \
#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
#define raw_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
-#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
-
+#define raw_cpu_try_cmpxchg(pcp, ovalp, nval) \
+ __pcpu_size_call_return2bool(raw_cpu_try_cmpxchg_, pcp, ovalp, nval)
#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val))
#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1)
#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1)
@@ -488,11 +475,6 @@ do { \
raw_cpu_cmpxchg(pcp, oval, nval); \
})
-#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
-({ __this_cpu_preempt_check("cmpxchg_double"); \
- raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \
-})
-
#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val))
#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1)
#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1)
@@ -513,9 +495,8 @@ do { \
#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
#define this_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
-#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
-
+#define this_cpu_try_cmpxchg(pcp, ovalp, nval) \
+ __pcpu_size_call_return2bool(this_cpu_try_cmpxchg_, pcp, ovalp, nval)
#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val))
#define this_cpu_inc(pcp) this_cpu_add(pcp, 1)
#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1)
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 87d8a38bdea1..d73a1c08c3e3 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -51,9 +51,9 @@
#define _LINUX_PERCPU_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
+#include <linux/types.h>
#include <linux/gfp.h>
struct percpu_ref;
@@ -92,18 +92,30 @@ enum {
PERCPU_REF_ALLOW_REINIT = 1 << 2,
};
-struct percpu_ref {
+struct percpu_ref_data {
atomic_long_t count;
- /*
- * The low bit of the pointer indicates whether the ref is in percpu
- * mode; if set, then get/put will manipulate the atomic_t.
- */
- unsigned long percpu_count_ptr;
percpu_ref_func_t *release;
percpu_ref_func_t *confirm_switch;
bool force_atomic:1;
bool allow_reinit:1;
struct rcu_head rcu;
+ struct percpu_ref *ref;
+};
+
+struct percpu_ref {
+ /*
+ * The low bit of the pointer indicates whether the ref is in percpu
+ * mode; if set, then get/put will manipulate the atomic_t.
+ */
+ unsigned long percpu_count_ptr;
+
+ /*
+ * 'percpu_ref' is often embedded into user structure, and only
+ * 'percpu_count_ptr' is required in fast path, move other fields
+ * into 'percpu_ref_data', so we can reduce memory footprint in
+ * fast path.
+ */
+ struct percpu_ref_data *data;
};
int __must_check percpu_ref_init(struct percpu_ref *ref,
@@ -118,6 +130,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
+bool percpu_ref_is_zero(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
@@ -191,7 +204,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_add(*percpu_count, nr);
else
- atomic_long_add(nr, &ref->count);
+ atomic_long_add(nr, &ref->data->count);
rcu_read_unlock();
}
@@ -200,7 +213,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
* percpu_ref_get - increment a percpu refcount
* @ref: percpu_ref to get
*
- * Analagous to atomic_long_inc().
+ * Analogous to atomic_long_inc().
*
* This function is safe to call as long as @ref is between init and exit.
*/
@@ -231,7 +244,7 @@ static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
this_cpu_add(*percpu_count, nr);
ret = true;
} else {
- ret = atomic_long_add_unless(&ref->count, nr, 0);
+ ret = atomic_long_add_unless(&ref->data->count, nr, 0);
}
rcu_read_unlock();
@@ -254,6 +267,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
}
/**
+ * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
+ * caller is responsible for taking RCU.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ bool ret = false;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (likely(__ref_is_percpu(ref, &percpu_count))) {
+ this_cpu_inc(*percpu_count);
+ ret = true;
+ } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
+ ret = atomic_long_inc_not_zero(&ref->data->count);
+ }
+ return ret;
+}
+
+/**
* percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get
*
@@ -270,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
- unsigned long __percpu *percpu_count;
bool ret = false;
rcu_read_lock();
-
- if (__ref_is_percpu(ref, &percpu_count)) {
- this_cpu_inc(*percpu_count);
- ret = true;
- } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
- ret = atomic_long_inc_not_zero(&ref->count);
- }
-
+ ret = percpu_ref_tryget_live_rcu(ref);
rcu_read_unlock();
-
return ret;
}
@@ -305,8 +331,8 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_sub(*percpu_count, nr);
- else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
- ref->release(ref);
+ else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
+ ref->data->release(ref);
rcu_read_unlock();
}
@@ -339,21 +365,4 @@ static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
}
-/**
- * percpu_ref_is_zero - test whether a percpu refcount reached zero
- * @ref: percpu_ref to test
- *
- * Returns %true if @ref reached zero.
- *
- * This function is safe to call as long as @ref is between init and exit.
- */
-static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
-{
- unsigned long __percpu *percpu_count;
-
- if (__ref_is_percpu(ref, &percpu_count))
- return false;
- return !atomic_long_read(&ref->count);
-}
-
#endif
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 5e033fe1ff4e..36b942b67b7d 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -60,7 +60,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
* anything we did within this RCU-sched read-size critical section.
*/
if (likely(rcu_sync_is_idle(&sem->rss)))
- __this_cpu_inc(*sem->read_count);
+ this_cpu_inc(*sem->read_count);
else
__percpu_down_read(sem, false); /* Unconditional memory barrier */
/*
@@ -79,7 +79,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
* Same as in percpu_down_read().
*/
if (likely(rcu_sync_is_idle(&sem->rss)))
- __this_cpu_inc(*sem->read_count);
+ this_cpu_inc(*sem->read_count);
else
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
preempt_enable();
@@ -103,7 +103,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
* Same as in percpu_down_read().
*/
if (likely(rcu_sync_is_idle(&sem->rss))) {
- __this_cpu_dec(*sem->read_count);
+ this_cpu_dec(*sem->read_count);
} else {
/*
* slowpath; reader will only ever wake a single blocked
@@ -115,15 +115,21 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
* aggregate zero, as that is the only time it matters) they
* will also see our critical section.
*/
- __this_cpu_dec(*sem->read_count);
+ this_cpu_dec(*sem->read_count);
rcuwait_wake_up(&sem->writer);
}
preempt_enable();
}
+extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
+static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
+{
+ return atomic_read(&sem->block);
+}
+
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5e76af742c80..8c677f185901 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -6,9 +6,9 @@
#include <linux/preempt.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
-#include <linux/printk.h>
#include <linux/pfn.h>
#include <linux/init.h>
+#include <linux/cleanup.h>
#include <asm/percpu.h>
@@ -35,15 +35,20 @@
#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \
PCPU_MIN_ALLOC_SHIFT)
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+#define PERCPU_DYNAMIC_SIZE_SHIFT 12
+#else
+#define PERCPU_DYNAMIC_SIZE_SHIFT 10
+#endif
+
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
- * The following two parameters decide how much resource to
- * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
- * larger than PERCPU_DYNAMIC_EARLY_SIZE.
+ * The following parameter decide how much resource to preallocate
+ * for this. Keep PERCPU_DYNAMIC_RESERVE equal to or larger than
+ * PERCPU_DYNAMIC_EARLY_SIZE.
*/
-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
-#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
+#define PERCPU_DYNAMIC_EARLY_SIZE (20 << PERCPU_DYNAMIC_SIZE_SHIFT)
/*
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
@@ -57,9 +62,9 @@
* intelligent way to determine this would be nice.
*/
#if BITS_PER_LONG > 32
-#define PERCPU_DYNAMIC_RESERVE (28 << 10)
+#define PERCPU_DYNAMIC_RESERVE (28 << PERCPU_DYNAMIC_SIZE_SHIFT)
#else
-#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#define PERCPU_DYNAMIC_RESERVE (20 << PERCPU_DYNAMIC_SIZE_SHIFT)
#endif
extern void *pcpu_base_addr;
@@ -95,10 +100,7 @@ extern const char * const pcpu_fc_names[PCPU_FC_NR];
extern enum pcpu_fc pcpu_chosen_fc;
-typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
- size_t align);
-typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
-typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
+typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu);
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
@@ -108,22 +110,18 @@ extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr);
-#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn);
-#endif
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+void __init pcpu_populate_pte(unsigned long addr);
extern int __init pcpu_page_first_chunk(size_t reserved_size,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn,
- pcpu_fc_populate_pte_fn_t populate_pte_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#endif
-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1);
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
@@ -131,9 +129,13 @@ extern bool is_kernel_percpu_address(unsigned long addr);
extern void __init setup_per_cpu_areas(void);
#endif
-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
+extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
extern void free_percpu(void __percpu *__pdata);
+extern size_t pcpu_alloc_size(void __percpu *__pdata);
+
+DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
+
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#define alloc_percpu_gfp(type, gfp) \
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 01861eebed79..3a44dd1e33d2 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -13,7 +13,9 @@
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/types.h>
-#include <linux/gfp.h>
+
+/* percpu_counter batch for local add or sub */
+#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
#ifdef CONFIG_SMP
@@ -28,22 +30,35 @@ struct percpu_counter {
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
- struct lock_class_key *key);
+int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp, u32 nr_counters,
+ struct lock_class_key *key);
-#define percpu_counter_init(fbc, value, gfp) \
+#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, gfp, &__key); \
+ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
+ &__key); \
})
-void percpu_counter_destroy(struct percpu_counter *fbc);
+
+#define percpu_counter_init(fbc, value, gfp) \
+ percpu_counter_init_many(fbc, value, gfp, 1)
+
+void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+ percpu_counter_destroy_many(fbc, 1);
+}
+
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit,
+ s64 amount, s32 batch);
void percpu_counter_sync(struct percpu_counter *fbc);
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
@@ -56,6 +71,29 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
+static inline bool
+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
+{
+ return __percpu_counter_limited_add(fbc, limit, amount,
+ percpu_counter_batch);
+}
+
+/*
+ * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
+ * are accumulated in local per cpu counter and not in fbc->count until
+ * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
+ * write efficient.
+ * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
+ * used to add up the counts from each CPU to account for all the local
+ * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
+ * should be used when a counter is updated frequently and read rarely.
+ */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
+}
+
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
@@ -98,11 +136,27 @@ struct percpu_counter {
s64 count;
};
+static inline int percpu_counter_init_many(struct percpu_counter *fbc,
+ s64 amount, gfp_t gfp,
+ u32 nr_counters)
+{
+ u32 i;
+
+ for (i = 0; i < nr_counters; i++)
+ fbc[i].count = amount;
+
+ return 0;
+}
+
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
gfp_t gfp)
{
- fbc->count = amount;
- return 0;
+ return percpu_counter_init_many(fbc, amount, gfp, 1);
+}
+
+static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
+ u32 nr_counters)
+{
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
@@ -133,9 +187,39 @@ __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
- preempt_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
fbc->count += amount;
- preempt_enable();
+ local_irq_restore(flags);
+}
+
+static inline bool
+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
+{
+ unsigned long flags;
+ bool good = false;
+ s64 count;
+
+ if (amount == 0)
+ return true;
+
+ local_irq_save(flags);
+ count = fbc->count + amount;
+ if ((amount > 0 && count <= limit) ||
+ (amount < 0 && count >= limit)) {
+ fbc->count = count;
+ good = true;
+ }
+ local_irq_restore(flags);
+ return good;
+}
+
+/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, amount);
}
static inline void
@@ -193,4 +277,10 @@ static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
percpu_counter_add(fbc, -amount);
}
+static inline void
+percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_local(fbc, -amount);
+}
+
#endif /* _LINUX_PERCPU_COUNTER_H */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 5b616dde9a4c..b3b34f6670cf 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -24,8 +24,13 @@
/*
* ARM PMU hw_event flags
*/
-/* Event uses a 64bit counter */
-#define ARMPMU_EVT_64BIT 1
+#define ARMPMU_EVT_64BIT 0x00001 /* Event uses a 64bit counter */
+#define ARMPMU_EVT_47BIT 0x00002 /* Event uses a 47bit counter */
+#define ARMPMU_EVT_63BIT 0x00004 /* Event uses a 63bit counter */
+
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_64BIT) == ARMPMU_EVT_64BIT);
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_47BIT) == ARMPMU_EVT_47BIT);
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_63BIT) == ARMPMU_EVT_63BIT);
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) PERF_COUNT_HW_CACHE_##_x
@@ -55,12 +60,6 @@ struct pmu_hw_events {
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
-
- /*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we
* already have to allocate this struct per cpu.
*/
@@ -73,6 +72,7 @@ enum armpmu_attr_groups {
ARMPMU_ATTR_GROUP_COMMON,
ARMPMU_ATTR_GROUP_EVENTS,
ARMPMU_ATTR_GROUP_FORMATS,
+ ARMPMU_ATTR_GROUP_CAPS,
ARMPMU_NR_ATTR_GROUPS
};
@@ -96,7 +96,6 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
- int (*filter_match)(struct perf_event *event);
int num_events;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
@@ -109,6 +108,8 @@ struct arm_pmu {
struct notifier_block cpu_pm_nb;
/* the attr_groups array must be NULL-terminated */
const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
+ /* store the PMMIR_EL1 to expose slots */
+ u64 reg_pmmir;
/* Only to be used by ACPI probing code */
unsigned long acpi_cpuid;
@@ -160,9 +161,16 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
+#ifdef CONFIG_KVM
+void kvm_host_pmu_init(struct arm_pmu *pmu);
+#else
+#define kvm_host_pmu_init(x) do { } while(0)
+#endif
+
+bool arm_pmu_irq_is_nmi(void);
+
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
-struct arm_pmu *armpmu_alloc_atomic(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
int armpmu_request_irq(int irq, int cpu);
@@ -173,5 +181,28 @@ void armpmu_free_irq(int irq, int cpu);
#endif /* CONFIG_ARM_PMU */
#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
+#define ARMV8_TRBE_PDEV_NAME "arm,trbe"
+
+/* Why does everything I do descend into this? */
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name) \
+ PMU_FORMAT_ATTR(name, \
+ _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
+ ((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name) \
+ _ATTR_CFG_GET_FLD(attr, \
+ ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI)
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
new file mode 100644
index 000000000000..46377e134d67
--- /dev/null
+++ b/include/linux/perf/arm_pmuv3.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __PERF_ARM_PMUV3_H
+#define __PERF_ARM_PMUV3_H
+
+#define ARMV8_PMU_MAX_COUNTERS 32
+#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
+
+/*
+ * Common architectural and microarchitectural event numbers.
+ */
+#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001
+#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004
+#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005
+#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006
+#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007
+#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008
+#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009
+#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A
+#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B
+#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C
+#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D
+#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E
+#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011
+#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012
+#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018
+#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019
+#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A
+#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B
+#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C
+#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D
+#define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020
+#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022
+#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023
+#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024
+#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025
+#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C
+#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E
+#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030
+#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033
+#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034
+#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037
+#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039
+#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A
+#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B
+#define ARMV8_PMUV3_PERFCTR_STALL 0x003C
+#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D
+#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E
+#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F
+
+/* Statistical profiling extension microarchitectural events */
+#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000
+#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001
+#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002
+#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003
+
+/* AMUv1 architecture events */
+#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004
+#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005
+
+/* long-latency read miss events */
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B
+
+/* Trace buffer events */
+#define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C
+#define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E
+
+/* Trace unit events */
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B
+
+/* additional latency from alignment events */
+#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020
+#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021
+#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022
+
+/* Armv8.5 Memory Tagging Extension events */
+#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024
+#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025
+#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026
+
+/* ARMv8 recommended implementation defined event types */
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048
+
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A
+
+#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C
+#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D
+#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E
+#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F
+#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070
+#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071
+#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072
+#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073
+#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074
+#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075
+#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076
+#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077
+#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078
+#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079
+#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A
+
+#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C
+#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D
+#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081
+#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082
+#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083
+#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086
+#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087
+#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F
+#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090
+#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
+#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
+#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
+#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
+#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
+#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
+#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */
+/* Mask for writable bits */
+#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \
+ ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \
+ ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \
+ ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP)
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
+#define ARMV8_PMU_OVSR_C BIT(31)
+/* Mask for writable bits is both P and C fields */
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C)
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_TH GENMASK_ULL(43, 32) /* arm64 only */
+#define ARMV8_PMU_EVTYPE_TC GENMASK_ULL(63, 61) /* arm64 only */
+
+/*
+ * Event filters for PMUv3
+ */
+#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
+#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
+#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29)
+#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28)
+#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
+#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26)
+
+/*
+ * PMUSERENR: user enable reg
+ */
+#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
+#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
+#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+/* Mask for writable bits */
+#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
+ ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
+
+/* PMMIR_EL1.SLOTS mask */
+#define ARMV8_PMU_SLOTS GENMASK(7, 0)
+#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8)
+#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16)
+#define ARMV8_PMU_THWIDTH GENMASK(23, 20)
+
+/*
+ * This code is really good
+ */
+
+#define PMEVN_CASE(n, case_macro) \
+ case n: case_macro(n); break
+
+#define PMEVN_SWITCH(x, case_macro) \
+ do { \
+ switch (x) { \
+ PMEVN_CASE(0, case_macro); \
+ PMEVN_CASE(1, case_macro); \
+ PMEVN_CASE(2, case_macro); \
+ PMEVN_CASE(3, case_macro); \
+ PMEVN_CASE(4, case_macro); \
+ PMEVN_CASE(5, case_macro); \
+ PMEVN_CASE(6, case_macro); \
+ PMEVN_CASE(7, case_macro); \
+ PMEVN_CASE(8, case_macro); \
+ PMEVN_CASE(9, case_macro); \
+ PMEVN_CASE(10, case_macro); \
+ PMEVN_CASE(11, case_macro); \
+ PMEVN_CASE(12, case_macro); \
+ PMEVN_CASE(13, case_macro); \
+ PMEVN_CASE(14, case_macro); \
+ PMEVN_CASE(15, case_macro); \
+ PMEVN_CASE(16, case_macro); \
+ PMEVN_CASE(17, case_macro); \
+ PMEVN_CASE(18, case_macro); \
+ PMEVN_CASE(19, case_macro); \
+ PMEVN_CASE(20, case_macro); \
+ PMEVN_CASE(21, case_macro); \
+ PMEVN_CASE(22, case_macro); \
+ PMEVN_CASE(23, case_macro); \
+ PMEVN_CASE(24, case_macro); \
+ PMEVN_CASE(25, case_macro); \
+ PMEVN_CASE(26, case_macro); \
+ PMEVN_CASE(27, case_macro); \
+ PMEVN_CASE(28, case_macro); \
+ PMEVN_CASE(29, case_macro); \
+ PMEVN_CASE(30, case_macro); \
+ default: WARN(1, "Invalid PMEV* index\n"); \
+ } \
+ } while (0)
+
+#endif
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
new file mode 100644
index 000000000000..43282e22ebe1
--- /dev/null
+++ b/include/linux/perf/riscv_pmu.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 SiFive
+ * Copyright (C) 2018 Andes Technology Corporation
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ *
+ */
+
+#ifndef _RISCV_PMU_H
+#define _RISCV_PMU_H
+
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+
+#ifdef CONFIG_RISCV_PMU
+
+/*
+ * The RISCV_MAX_COUNTERS parameter should be specified.
+ */
+
+#define RISCV_MAX_COUNTERS 64
+#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
+#define RISCV_PMU_SBI_PDEV_NAME "riscv-pmu-sbi"
+#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
+
+#define RISCV_PMU_STOP_FLAG_RESET 1
+
+#define RISCV_PMU_CONFIG1_GUEST_EVENTS 0x1
+
+struct cpu_hw_events {
+ /* currently enabled events */
+ int n_events;
+ /* Counter overflow interrupt */
+ int irq;
+ /* currently enabled events */
+ struct perf_event *events[RISCV_MAX_COUNTERS];
+ /* currently enabled hardware counters */
+ DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS);
+ /* currently enabled firmware counters */
+ DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS);
+};
+
+struct riscv_pmu {
+ struct pmu pmu;
+ char *name;
+
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+
+ unsigned long cmask;
+ u64 (*ctr_read)(struct perf_event *event);
+ int (*ctr_get_idx)(struct perf_event *event);
+ int (*ctr_get_width)(int idx);
+ void (*ctr_clear_idx)(struct perf_event *event);
+ void (*ctr_start)(struct perf_event *event, u64 init_val);
+ void (*ctr_stop)(struct perf_event *event, unsigned long flag);
+ int (*event_map)(struct perf_event *event, u64 *config);
+ void (*event_init)(struct perf_event *event);
+ void (*event_mapped)(struct perf_event *event, struct mm_struct *mm);
+ void (*event_unmapped)(struct perf_event *event, struct mm_struct *mm);
+ uint8_t (*csr_index)(struct perf_event *event);
+
+ struct cpu_hw_events __percpu *hw_events;
+ struct hlist_node node;
+ struct notifier_block riscv_pm_nb;
+};
+
+#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
+
+void riscv_pmu_start(struct perf_event *event, int flags);
+void riscv_pmu_stop(struct perf_event *event, int flags);
+unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
+int riscv_pmu_event_set_period(struct perf_event *event);
+uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);
+u64 riscv_pmu_event_update(struct perf_event *event);
+#ifdef CONFIG_RISCV_PMU_LEGACY
+void riscv_pmu_legacy_skip_init(void);
+#else
+static inline void riscv_pmu_legacy_skip_init(void) {};
+#endif
+struct riscv_pmu *riscv_pmu_alloc(void);
+#ifdef CONFIG_RISCV_PMU_SBI
+int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr);
+#endif
+
+#endif /* CONFIG_RISCV_PMU */
+
+#endif /* _RISCV_PMU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 04a49ccc7beb..d2a15c0c6f8a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -26,14 +26,17 @@
# include <asm/local64.h>
#endif
+#define PERF_GUEST_ACTIVE 0x01
+#define PERF_GUEST_USER 0x02
+
struct perf_guest_info_callbacks {
- int (*is_in_guest)(void);
- int (*is_user_mode)(void);
- unsigned long (*get_guest_ip)(void);
- void (*handle_intel_pt_intr)(void);
+ unsigned int (*state)(void);
+ unsigned long (*get_ip)(void);
+ unsigned int (*handle_intel_pt_intr)(void);
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <linux/rhashtable-types.h>
#include <asm/hw_breakpoint.h>
#endif
@@ -57,6 +60,8 @@ struct perf_guest_info_callbacks {
#include <linux/cgroup.h>
#include <linux/refcount.h>
#include <linux/security.h>
+#include <linux/static_call.h>
+#include <linux/lockdep.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -90,6 +95,11 @@ struct perf_raw_record {
u32 size;
};
+static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
+{
+ return frag->pad < sizeof(u64);
+}
+
/*
* branch stack layout:
* nr: number of taken branches stored in entries[]
@@ -129,6 +139,17 @@ struct hw_perf_event_extra {
};
/**
+ * hw_perf_event::flag values
+ *
+ * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
+ * usage.
+ */
+#define PERF_EVENT_FLAG_ARCH 0x000fffff
+#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
+
+static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
+
+/**
* struct hw_perf_event - performance event hardware details:
*/
struct hw_perf_event {
@@ -166,7 +187,7 @@ struct hw_perf_event {
* creation and event initalization.
*/
struct arch_hw_breakpoint info;
- struct list_head bp_list;
+ struct rhlist_head bp_list;
};
#endif
struct { /* amd_iommu */
@@ -212,17 +233,26 @@ struct hw_perf_event {
*/
u64 sample_period;
- /*
- * The period we started this sample with.
- */
- u64 last_period;
+ union {
+ struct { /* Sampling */
+ /*
+ * The period we started this sample with.
+ */
+ u64 last_period;
- /*
- * However much is left of the current period; note that this is
- * a full 64bit value and allows for generation of periods longer
- * than hardware might allow.
- */
- local64_t period_left;
+ /*
+ * However much is left of the current period;
+ * note that this is a full 64bit value and
+ * allows for generation of periods longer
+ * than hardware might allow.
+ */
+ local64_t period_left;
+ };
+ struct { /* Topdown events counting for context switch */
+ u64 saved_metric;
+ u64 saved_slots;
+ };
+ };
/*
* State for throttling the event, see __perf_event_overflow() and
@@ -241,6 +271,7 @@ struct hw_perf_event {
};
struct perf_event;
+struct perf_event_pmu_context;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
@@ -251,18 +282,20 @@ struct perf_event;
/**
* pmu::capabilities flags
*/
-#define PERF_PMU_CAP_NO_INTERRUPT 0x01
-#define PERF_PMU_CAP_NO_NMI 0x02
-#define PERF_PMU_CAP_AUX_NO_SG 0x04
-#define PERF_PMU_CAP_EXTENDED_REGS 0x08
-#define PERF_PMU_CAP_EXCLUSIVE 0x10
-#define PERF_PMU_CAP_ITRACE 0x20
-#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
-#define PERF_PMU_CAP_NO_EXCLUDE 0x80
-#define PERF_PMU_CAP_AUX_OUTPUT 0x100
+#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
+#define PERF_PMU_CAP_NO_NMI 0x0002
+#define PERF_PMU_CAP_AUX_NO_SG 0x0004
+#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
+#define PERF_PMU_CAP_EXCLUSIVE 0x0010
+#define PERF_PMU_CAP_ITRACE 0x0020
+#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
+#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
+#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
struct perf_output_handle;
+#define PMU_NULL_DEV ((void *)(~0UL))
+
/**
* struct pmu - generic performance monitoring unit
*/
@@ -271,6 +304,7 @@ struct pmu {
struct module *module;
struct device *dev;
+ struct device *parent;
const struct attribute_group **attr_groups;
const struct attribute_group **attr_update;
const char *name;
@@ -282,7 +316,7 @@ struct pmu {
int capabilities;
int __percpu *pmu_disable_count;
- struct perf_cpu_context __percpu *pmu_cpu_context;
+ struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
@@ -410,14 +444,15 @@ struct pmu {
/*
* Will return the value for perf_event_mmap_page::index for this event,
- * if no implementation is provided it will default to: event->hw.idx + 1.
+ * if no implementation is provided it will default to 0 (see
+ * perf_event_idx_default).
*/
int (*event_idx) (struct perf_event *event); /*optional */
/*
* context-switches callback
*/
- void (*sched_task) (struct perf_event_context *ctx,
+ void (*sched_task) (struct perf_event_pmu_context *pmu_ctx,
bool sched_in);
/*
@@ -431,8 +466,8 @@ struct pmu {
* implementation and Perf core context switch handling callbacks for usage
* examples.
*/
- void (*swap_task_ctx) (struct perf_event_context *prev,
- struct perf_event_context *next);
+ void (*swap_task_ctx) (struct perf_event_pmu_context *prev_epc,
+ struct perf_event_pmu_context *next_epc);
/* optional */
/*
@@ -496,9 +531,10 @@ struct pmu {
/* optional */
/*
- * Filter events for PMU-specific reasons.
+ * Skip programming this PMU on the given CPU. Typically needed for
+ * big.LITTLE things.
*/
- int (*filter_match) (struct perf_event *event); /* optional */
+ bool (*filter) (struct pmu *pmu, int cpu); /* optional */
/*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
@@ -576,9 +612,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
* PERF_EV_CAP_SOFTWARE: Is a software event.
* PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
* from any CPU in the package where it is active.
+ * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
+ * cannot be a group leader. If an event with this flag is detached from the
+ * group it is scheduled out and moved into an unrecoverable ERROR state.
*/
#define PERF_EV_CAP_SOFTWARE BIT(0)
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
+#define PERF_EV_CAP_SIBLING BIT(2)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -593,7 +633,10 @@ struct swevent_hlist {
#define PERF_ATTACH_TASK 0x04
#define PERF_ATTACH_TASK_DATA 0x08
#define PERF_ATTACH_ITRACE 0x10
+#define PERF_ATTACH_SCHED_CB 0x20
+#define PERF_ATTACH_CHILD 0x40
+struct bpf_prog;
struct perf_cgroup;
struct perf_buffer;
@@ -602,7 +645,23 @@ struct pmu_event_list {
struct list_head list;
};
+/*
+ * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
+ * as such iteration must hold either lock. However, since ctx->lock is an IRQ
+ * safe lock, and is only held by the CPU doing the modification, having IRQs
+ * disabled is sufficient since it will hold-off the IPIs.
+ */
+#ifdef CONFIG_PROVE_LOCKING
+#define lockdep_assert_event_ctx(event) \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (this_cpu_read(hardirqs_enabled) && \
+ lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
+#else
+#define lockdep_assert_event_ctx(event)
+#endif
+
#define for_each_sibling_event(sibling, event) \
+ lockdep_assert_event_ctx(event); \
if ((event)->group_leader == (event)) \
list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
@@ -645,7 +704,13 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
+ unsigned int group_generation;
struct perf_event *group_leader;
+ /*
+ * event->pmu will always point to pmu in which this event belongs.
+ * Whereas event->pmu_ctx->pmu may point to other pmu when group of
+ * different pmu events is created.
+ */
struct pmu *pmu;
void *pmu_private;
@@ -664,16 +729,6 @@ struct perf_event {
u64 total_time_running;
u64 tstamp;
- /*
- * timestamp shadows the actual context timing but it can
- * be safely used in NMI interrupt context. It reflects the
- * context time as it was when the event was last scheduled in.
- *
- * ctx_time already accounts for ctx->timestamp. Therefore to
- * compute ctx_time for a sample, simply add perf_clock().
- */
- u64 shadow_ctx_time;
-
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
@@ -681,6 +736,12 @@ struct perf_event {
struct hw_perf_event hw;
struct perf_event_context *ctx;
+ /*
+ * event->pmu_ctx points to perf_event_pmu_context in which the event
+ * is added. This pmu_ctx can be of other pmu for sw event when that
+ * sw event is part of a group which also contains non-sw events.
+ */
+ struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount;
/*
@@ -717,10 +778,14 @@ struct perf_event {
struct fasync_struct *fasync;
/* delayed work for NMIs and such */
- int pending_wakeup;
- int pending_kill;
- int pending_disable;
- struct irq_work pending;
+ unsigned int pending_wakeup;
+ unsigned int pending_kill;
+ unsigned int pending_disable;
+ unsigned int pending_sigtrap;
+ unsigned long pending_addr; /* SIGTRAP */
+ struct irq_work pending_irq;
+ struct callback_head pending_task;
+ unsigned int pending_work;
atomic_t event_limit;
@@ -739,12 +804,15 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
+ atomic64_t lost_samples;
+
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
#ifdef CONFIG_BPF_SYSCALL
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
+ u64 bpf_cookie;
#endif
#ifdef CONFIG_EVENT_TRACING
@@ -763,22 +831,84 @@ struct perf_event {
void *security;
#endif
struct list_head sb_list;
+
+ /*
+ * Certain events gets forwarded to another pmu internally by over-
+ * writing kernel copy of event->attr.type without user being aware
+ * of it. event->orig_type contains original 'type' requested by
+ * user.
+ */
+ __u32 orig_type;
#endif /* CONFIG_PERF_EVENTS */
};
+/*
+ * ,-----------------------[1:n]------------------------.
+ * V V
+ * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
+ * | |
+ * `--[n:1]-> pmu <-[1:n]--'
+ *
+ *
+ * struct perf_event_pmu_context lifetime is refcount based and RCU freed
+ * (similar to perf_event_context). Locking is as if it were a member of
+ * perf_event_context; specifically:
+ *
+ * modification, both: ctx->mutex && ctx->lock
+ * reading, either: ctx->mutex || ctx->lock
+ *
+ * There is one exception to this; namely put_pmu_ctx() isn't always called
+ * with ctx->mutex held; this means that as long as we can guarantee the epc
+ * has events the above rules hold.
+ *
+ * Specificially, sys_perf_event_open()'s group_leader case depends on
+ * ctx->mutex pinning the configuration. Since we hold a reference on
+ * group_leader (through the filedesc) it can't go away, therefore it's
+ * associated pmu_ctx must exist and cannot change due to ctx->mutex.
+ *
+ * perf_event holds a refcount on perf_event_context
+ * perf_event holds a refcount on perf_event_pmu_context
+ */
+struct perf_event_pmu_context {
+ struct pmu *pmu;
+ struct perf_event_context *ctx;
+
+ struct list_head pmu_ctx_entry;
+
+ struct list_head pinned_active;
+ struct list_head flexible_active;
+
+ /* Used to avoid freeing per-cpu perf_event_pmu_context */
+ unsigned int embedded : 1;
+
+ unsigned int nr_events;
+ unsigned int nr_cgroups;
+
+ atomic_t refcount; /* event <-> epc */
+ struct rcu_head rcu_head;
+
+ void *task_ctx_data; /* pmu specific data */
+ /*
+ * Set when one or more (plausibly active) event can't be scheduled
+ * due to pmu overcommit or pmu constraints, except tolerant to
+ * events not necessary to be active due to scheduling constraints,
+ * such as cgroups.
+ */
+ int rotate_necessary;
+};
struct perf_event_groups {
struct rb_root tree;
u64 index;
};
+
/**
* struct perf_event_context - event context structure
*
* Used as a container for task events and CPU events as well:
*/
struct perf_event_context {
- struct pmu *pmu;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
@@ -791,26 +921,21 @@ struct perf_event_context {
*/
struct mutex mutex;
- struct list_head active_ctx_list;
+ struct list_head pmu_ctx_list;
struct perf_event_groups pinned_groups;
struct perf_event_groups flexible_groups;
struct list_head event_list;
- struct list_head pinned_active;
- struct list_head flexible_active;
-
int nr_events;
- int nr_active;
+ int nr_user;
int is_active;
+
+ int nr_task_data;
int nr_stat;
int nr_freq;
int rotate_disable;
- /*
- * Set when nr_events != nr_active, except tolerant to events not
- * necessary to be active due to scheduling constraints, such as cgroups.
- */
- int rotate_necessary;
- refcount_t refcount;
+
+ refcount_t refcount; /* event <-> ctx */
struct task_struct *task;
/*
@@ -818,6 +943,7 @@ struct perf_event_context {
*/
u64 time;
u64 timestamp;
+ u64 timeoffset;
/*
* These fields let us detect when two contexts have both
@@ -830,8 +956,15 @@ struct perf_event_context {
#ifdef CONFIG_CGROUP_PERF
int nr_cgroups; /* cgroup evts */
#endif
- void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
+
+ /*
+ * Sum (event->pending_sigtrap + event->pending_work)
+ *
+ * The SIGTRAP is targeted at ctx->task, as such it won't do changing
+ * that until the signal is delivered.
+ */
+ local_t nr_pending;
};
/*
@@ -840,12 +973,13 @@ struct perf_event_context {
*/
#define PERF_NR_CONTEXTS 4
-/**
- * struct perf_event_cpu_context - per cpu event context structure
- */
-struct perf_cpu_context {
- struct perf_event_context ctx;
- struct perf_event_context *task_ctx;
+struct perf_cpu_pmu_context {
+ struct perf_event_pmu_context epc;
+ struct perf_event_pmu_context *task_epc;
+
+ struct list_head sched_cb_entry;
+ int sched_cb_usage;
+
int active_oncpu;
int exclusive;
@@ -853,16 +987,20 @@ struct perf_cpu_context {
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
unsigned int hrtimer_active;
+};
+
+/**
+ * struct perf_event_cpu_context - per cpu event context structure
+ */
+struct perf_cpu_context {
+ struct perf_event_context ctx;
+ struct perf_event_context *task_ctx;
+ int online;
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
- struct list_head cgrp_cpuctx_entry;
#endif
- struct list_head sched_cb_entry;
- int sched_cb_usage;
-
- int online;
/*
* Per-CPU storage for iterators used in visit_groups_merge. The default
* storage is of size 2 to hold the CPU and any CPU event iterators.
@@ -900,6 +1038,8 @@ struct bpf_perf_event_data_kern {
struct perf_cgroup_info {
u64 time;
u64 timestamp;
+ u64 timeoffset;
+ int active;
};
struct perf_cgroup {
@@ -924,6 +1064,8 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
#ifdef CONFIG_PERF_EVENTS
+extern struct perf_event_context *perf_cpu_task_ctx(void);
+
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
@@ -937,13 +1079,11 @@ extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
-extern int perf_num_counters(void);
-extern const char *perf_pmu_name(void);
extern void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next);
-extern int perf_event_init_task(struct task_struct *child);
+extern int perf_event_init_task(struct task_struct *child, u64 clone_flags);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
@@ -976,52 +1116,92 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
+extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
+
+static inline bool branch_sample_no_flags(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
+}
+
+static inline bool branch_sample_no_cycles(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
+}
+
+static inline bool branch_sample_type(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
+}
+
+static inline bool branch_sample_hw_index(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
+}
+
+static inline bool branch_sample_priv(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
+}
+
+static inline bool branch_sample_counters(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS;
+}
+
+static inline bool branch_sample_call_stack(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
+}
struct perf_sample_data {
/*
- * Fields set by perf_sample_data_init(), group so as to
- * minimize the cachelines touched.
+ * Fields set by perf_sample_data_init() unconditionally,
+ * group so as to minimize the cachelines touched.
*/
- u64 addr;
- struct perf_raw_record *raw;
- struct perf_branch_stack *br_stack;
+ u64 sample_flags;
u64 period;
- u64 weight;
- u64 txn;
- union perf_mem_data_src data_src;
+ u64 dyn_size;
/*
- * The other fields, optionally {set,used} by
- * perf_{prepare,output}_sample().
+ * Fields commonly set by __perf_event_header__init_id(),
+ * group so as to minimize the cachelines touched.
*/
u64 type;
- u64 ip;
struct {
u32 pid;
u32 tid;
} tid_entry;
u64 time;
u64 id;
- u64 stream_id;
struct {
u32 cpu;
u32 reserved;
} cpu_entry;
- struct perf_callchain_entry *callchain;
- u64 aux_size;
/*
- * regs_user may point to task_pt_regs or to regs_user_copy, depending
- * on arch details.
+ * The other fields, optionally {set,used} by
+ * perf_{prepare,output}_sample().
*/
- struct perf_regs regs_user;
- struct pt_regs regs_user_copy;
+ u64 ip;
+ struct perf_callchain_entry *callchain;
+ struct perf_raw_record *raw;
+ struct perf_branch_stack *br_stack;
+ u64 *br_stack_cntr;
+ union perf_sample_weight weight;
+ union perf_mem_data_src data_src;
+ u64 txn;
+ struct perf_regs regs_user;
struct perf_regs regs_intr;
u64 stack_user_size;
- u64 phys_addr;
+ u64 stream_id;
u64 cgroup;
+ u64 addr;
+ u64 phys_addr;
+ u64 data_page_size;
+ u64 code_page_size;
+ u64 aux_size;
} ____cacheline_aligned;
/* default value for data source */
@@ -1029,26 +1209,120 @@ struct perf_sample_data {
PERF_MEM_S(LVL, NA) |\
PERF_MEM_S(SNOOP, NA) |\
PERF_MEM_S(LOCK, NA) |\
- PERF_MEM_S(TLB, NA))
+ PERF_MEM_S(TLB, NA) |\
+ PERF_MEM_S(LVLNUM, NA))
static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
{
/* remaining struct members initialized in perf_prepare_sample() */
- data->addr = addr;
- data->raw = NULL;
- data->br_stack = NULL;
+ data->sample_flags = PERF_SAMPLE_PERIOD;
data->period = period;
- data->weight = 0;
- data->data_src.val = PERF_MEM_NA;
- data->txn = 0;
+ data->dyn_size = 0;
+
+ if (addr) {
+ data->addr = addr;
+ data->sample_flags |= PERF_SAMPLE_ADDR;
+ }
+}
+
+static inline void perf_sample_save_callchain(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct pt_regs *regs)
+{
+ int size = 1;
+
+ data->callchain = perf_callchain(event, regs);
+ size += data->callchain->nr;
+
+ data->dyn_size += size * sizeof(u64);
+ data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
+}
+
+static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
+ struct perf_raw_record *raw)
+{
+ struct perf_raw_frag *frag = &raw->frag;
+ u32 sum = 0;
+ int size;
+
+ do {
+ sum += frag->size;
+ if (perf_raw_frag_last(frag))
+ break;
+ frag = frag->next;
+ } while (1);
+
+ size = round_up(sum + sizeof(u32), sizeof(u64));
+ raw->size = size - sizeof(u32);
+ frag->pad = raw->size - sum;
+
+ data->raw = raw;
+ data->dyn_size += size;
+ data->sample_flags |= PERF_SAMPLE_RAW;
+}
+
+static inline void perf_sample_save_brstack(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct perf_branch_stack *brs,
+ u64 *brs_cntr)
+{
+ int size = sizeof(u64); /* nr */
+
+ if (branch_sample_hw_index(event))
+ size += sizeof(u64);
+ size += brs->nr * sizeof(struct perf_branch_entry);
+
+ /*
+ * The extension space for counters is appended after the
+ * struct perf_branch_stack. It is used to store the occurrences
+ * of events of each branch.
+ */
+ if (brs_cntr)
+ size += brs->nr * sizeof(u64);
+
+ data->br_stack = brs;
+ data->br_stack_cntr = brs_cntr;
+ data->dyn_size += size;
+ data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
+}
+
+static inline u32 perf_sample_data_size(struct perf_sample_data *data,
+ struct perf_event *event)
+{
+ u32 size = sizeof(struct perf_event_header);
+
+ size += event->header_size + event->id_header_size;
+ size += data->dyn_size;
+
+ return size;
+}
+
+/*
+ * Clear all bitfields in the perf_branch_entry.
+ * The to and from fields are not cleared because they are
+ * systematically modified by caller.
+ */
+static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
+{
+ br->mispred = 0;
+ br->predicted = 0;
+ br->in_tx = 0;
+ br->abort = 0;
+ br->cycles = 0;
+ br->type = 0;
+ br->spec = PERF_BR_SPEC_NA;
+ br->reserved = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event);
-extern void perf_prepare_sample(struct perf_event_header *header,
+extern void perf_prepare_sample(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct pt_regs *regs);
+extern void perf_prepare_header(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs);
@@ -1068,15 +1342,31 @@ extern int perf_event_output(struct perf_event *event,
struct pt_regs *regs);
static inline bool
-is_default_overflow_handler(struct perf_event *event)
+__is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
{
- if (likely(event->overflow_handler == perf_event_output_forward))
+ if (likely(overflow_handler == perf_event_output_forward))
return true;
- if (unlikely(event->overflow_handler == perf_event_output_backward))
+ if (unlikely(overflow_handler == perf_event_output_backward))
return true;
return false;
}
+#define is_default_overflow_handler(event) \
+ __is_default_overflow_handler((event)->overflow_handler)
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline bool uses_default_overflow_handler(struct perf_event *event)
+{
+ if (likely(is_default_overflow_handler(event)))
+ return true;
+
+ return __is_default_overflow_handler(event->orig_overflow_handler);
+}
+#else
+#define uses_default_overflow_handler(event) \
+ is_default_overflow_handler(event)
+#endif
+
extern void
perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
@@ -1116,7 +1406,7 @@ static inline int is_software_event(struct perf_event *event)
*/
static inline int in_software_context(struct perf_event *event)
{
- return event->ctx->pmu->task_ctx_nr == perf_sw_context;
+ return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context;
}
static inline int is_exclusive_pmu(struct pmu *pmu)
@@ -1166,30 +1456,24 @@ DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
* which is guaranteed by us not actually scheduling inside other swevents
* because those disable preemption.
*/
-static __always_inline void
-perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
+static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
- if (static_key_false(&perf_swevent_enabled[event_id])) {
- struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
- perf_fetch_caller_regs(regs);
- ___perf_sw_event(event_id, nr, regs, addr);
- }
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(event_id, nr, regs, addr);
}
extern struct static_key_false perf_sched_events;
-static __always_inline bool
-perf_sw_migrate_enabled(void)
+static __always_inline bool __perf_sw_enabled(int swevt)
{
- if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
- return true;
- return false;
+ return static_key_false(&perf_swevent_enabled[swevt]);
}
static inline void perf_event_task_migrate(struct task_struct *task)
{
- if (perf_sw_migrate_enabled())
+ if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS))
task->sched_migrated = 1;
}
@@ -1199,11 +1483,9 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_in(prev, task);
- if (perf_sw_migrate_enabled() && task->sched_migrated) {
- struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
-
- perf_fetch_caller_regs(regs);
- ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
+ if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) &&
+ task->sched_migrated) {
+ __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
task->sched_migrated = 0;
}
}
@@ -1211,7 +1493,15 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{
- perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
+ if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES))
+ __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
+
+#ifdef CONFIG_CGROUP_PERF
+ if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) &&
+ perf_cgroup_from_task(prev, NULL) !=
+ perf_cgroup_from_task(next, NULL))
+ __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0);
+#endif
if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_out(prev, next);
@@ -1225,9 +1515,32 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags);
-extern struct perf_guest_info_callbacks *perf_guest_cbs;
-extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
-extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+#ifdef CONFIG_GUEST_PERF_EVENTS
+extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
+
+DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
+DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
+DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
+
+static inline unsigned int perf_guest_state(void)
+{
+ return static_call(__perf_guest_state)();
+}
+static inline unsigned long perf_guest_get_ip(void)
+{
+ return static_call(__perf_guest_get_ip)();
+}
+static inline unsigned int perf_guest_handle_intel_pt_intr(void)
+{
+ return static_call(__perf_guest_handle_intel_pt_intr)();
+}
+extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+#else
+static inline unsigned int perf_guest_state(void) { return 0; }
+static inline unsigned long perf_guest_get_ip(void) { return 0; }
+static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
+#endif /* CONFIG_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
@@ -1245,7 +1558,6 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
-extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
@@ -1286,7 +1598,7 @@ extern int sysctl_perf_cpu_time_max_percent;
extern void perf_sample_event_took(u64 sample_len_ns);
-int perf_proc_update_handler(struct ctl_table *table, int write,
+int perf_event_max_sample_rate_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
@@ -1386,13 +1698,17 @@ perf_event_addr_filters(struct perf_event *event)
}
extern void perf_event_addr_filters_sync(struct perf_event *event);
+extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
extern int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event, unsigned int size);
extern int perf_output_begin_forward(struct perf_output_handle *handle,
- struct perf_event *event,
- unsigned int size);
+ struct perf_sample_data *data,
+ struct perf_event *event,
+ unsigned int size);
extern int perf_output_begin_backward(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event,
unsigned int size);
@@ -1435,7 +1751,8 @@ perf_event_task_sched_in(struct task_struct *prev,
static inline void
perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next) { }
-static inline int perf_event_init_task(struct task_struct *child) { return 0; }
+static inline int perf_event_init_task(struct task_struct *child,
+ u64 clone_flags) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
@@ -1464,15 +1781,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
-perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
-static inline void
perf_bp_event(struct perf_event *event, void *data) { }
-static inline int perf_register_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-static inline int perf_unregister_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
@@ -1515,11 +1825,6 @@ extern void perf_restore_debug_store(void);
static inline void perf_restore_debug_store(void) { }
#endif
-static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
-{
- return frag->pad < sizeof(u64);
-}
-
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
struct perf_pmu_events_attr {
@@ -1535,6 +1840,18 @@ struct perf_pmu_events_ht_attr {
const char *event_str_noht;
};
+struct perf_pmu_events_hybrid_attr {
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
+ u64 pmu_type;
+};
+
+struct perf_pmu_format_hybrid_attr {
+ struct device_attribute attr;
+ u64 pmu_type;
+};
+
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
@@ -1551,7 +1868,13 @@ static struct perf_pmu_events_attr _var = { \
.event_str = _str, \
};
-#define PMU_FORMAT_ATTR(_name, _format) \
+#define PMU_EVENT_ATTR_ID(_name, _show, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, _show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+#define PMU_FORMAT_ATTR_SHOW(_name, _format) \
static ssize_t \
_name##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -1560,6 +1883,9 @@ _name##_show(struct device *dev, \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
+
+#define PMU_FORMAT_ATTR(_name, _format) \
+ PMU_FORMAT_ATTR_SHOW(_name, _format) \
\
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
@@ -1572,8 +1898,36 @@ int perf_event_exit_cpu(unsigned int cpu);
#define perf_event_exit_cpu NULL
#endif
-extern void __weak arch_perf_update_userpage(struct perf_event *event,
- struct perf_event_mmap_page *userpg,
- u64 now);
+extern void arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg,
+ u64 now);
+
+/*
+ * Snapshot branch stack on software events.
+ *
+ * Branch stack can be very useful in understanding software events. For
+ * example, when a long function, e.g. sys_perf_event_open, returns an
+ * errno, it is not obvious why the function failed. Branch stack could
+ * provide very helpful information in this type of scenarios.
+ *
+ * On software event, it is necessary to stop the hardware branch recorder
+ * fast. Otherwise, the hardware register/buffer will be flushed with
+ * entries of the triggering event. Therefore, static call is used to
+ * stop the hardware recorder.
+ */
+
+/*
+ * cnt is the number of entries allocated for entries.
+ * Return number of entries copied to .
+ */
+typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
+ unsigned int cnt);
+DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
+
+#ifndef PERF_NEEDS_LOPWR_CB
+static inline void perf_lopwr_cb(bool mode)
+{
+}
+#endif
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/perf_event_api.h b/include/linux/perf_event_api.h
new file mode 100644
index 000000000000..c2fd6048b790
--- /dev/null
+++ b/include/linux/perf_event_api.h
@@ -0,0 +1 @@
+#include <linux/perf_event.h>
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 2d12e97d5e7b..f632c5725f16 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -20,8 +20,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx);
int perf_reg_validate(u64 mask);
u64 perf_reg_abi(struct task_struct *task);
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy);
+ struct pt_regs *regs);
#else
#define PERF_REG_EXTENDED_MASK 0
@@ -42,8 +41,7 @@ static inline u64 perf_reg_abi(struct task_struct *task)
}
static inline void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index e8cbc2e795d5..85fc7554cd52 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -5,6 +5,9 @@
#include <linux/pfn.h>
#include <asm/pgtable.h>
+#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
+#define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT)
+
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
@@ -12,6 +15,7 @@
#include <linux/bug.h>
#include <linux/errno.h>
#include <asm-generic/pgtable_uffd.h>
+#include <linux/page_table_check.h>
#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
@@ -29,6 +33,24 @@
#endif
/*
+ * This defines the first usable user address. Platforms
+ * can override its value with custom FIRST_USER_ADDRESS
+ * defined in their respective <asm/pgtable.h>.
+ */
+#ifndef FIRST_USER_ADDRESS
+#define FIRST_USER_ADDRESS 0UL
+#endif
+
+/*
+ * This defines the generic helper for accessing PMD page
+ * table page. Although platforms can still override this
+ * via their respective <asm/pgtable.h>.
+ */
+#ifndef pmd_pgtable
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
+
+/*
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
*
* The pXx_index() functions return the index of the entry in the page
@@ -74,21 +96,31 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
#define pte_offset_kernel pte_offset_kernel
#endif
-#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
- pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte))
+#ifdef CONFIG_HIGHPTE
+#define __pte_map(pmd, address) \
+ ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
+#define pte_unmap(pte) do { \
+ kunmap_local((pte)); \
+ rcu_read_unlock(); \
+} while (0)
#else
-#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_unmap(pte) ((void)(pte)) /* NOP */
+static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
+{
+ return pte_offset_kernel(pmd, address);
+}
+static inline void pte_unmap(pte_t *pte)
+{
+ rcu_read_unlock();
+}
#endif
+void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
+
/* Find an entry in the second-level page table.. */
#ifndef pmd_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+ return pud_pgtable(*pud) + pmd_index(address);
}
#define pmd_offset pmd_offset
#endif
@@ -96,7 +128,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#ifndef pud_offset
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
- return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
+ return p4d_pgtable(*p4d) + pud_index(address);
}
#define pud_offset pud_offset
#endif
@@ -145,6 +177,108 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr)
return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
}
+#ifndef pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
+#ifndef pmd_dirty
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+#ifndef pte_batch_hint
+/**
+ * pte_batch_hint - Number of pages that can be added to batch without scanning.
+ * @ptep: Page table pointer for the entry.
+ * @pte: Page table entry.
+ *
+ * Some architectures know that a set of contiguous ptes all map the same
+ * contiguous memory with the same permissions. In this case, it can provide a
+ * hint to aid pte batching without the core code needing to scan every pte.
+ *
+ * An architecture implementation may ignore the PTE accessed state. Further,
+ * the dirty state must apply atomically to all the PTEs described by the hint.
+ *
+ * May be overridden by the architecture, else pte_batch_hint is always 1.
+ */
+static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
+{
+ return 1;
+}
+#endif
+
+#ifndef pte_advance_pfn
+static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
+{
+ return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
+}
+#endif
+
+#define pte_next_pfn(pte) pte_advance_pfn(pte, 1)
+
+#ifndef set_ptes
+/**
+ * set_ptes - Map consecutive pages to a contiguous range of addresses.
+ * @mm: Address space to map the pages into.
+ * @addr: Address to map the first page at.
+ * @ptep: Page table pointer for the first entry.
+ * @pte: Page table entry for the first page.
+ * @nr: Number of pages to map.
+ *
+ * When nr==1, initial state of pte may be present or not present, and new state
+ * may be present or not present. When nr>1, initial state of all ptes must be
+ * not present, and new state must be present.
+ *
+ * May be overridden by the architecture, or the architecture can define
+ * set_pte() and PFN_PTE_SHIFT.
+ *
+ * Context: The caller holds the page table lock. The pages all belong
+ * to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ page_table_check_ptes_set(mm, ptep, pte, nr);
+
+ arch_enter_lazy_mmu_mode();
+ for (;;) {
+ set_pte(ptep, pte);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte = pte_next_pfn(pte);
+ }
+ arch_leave_lazy_mmu_mode();
+}
+#endif
+#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -177,12 +311,47 @@ static inline int pudp_set_access_flags(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef ptep_get
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#endif
+
+#ifndef pmdp_get
+static inline pmd_t pmdp_get(pmd_t *pmdp)
+{
+ return READ_ONCE(*pmdp);
+}
+#endif
+
+#ifndef pudp_get
+static inline pud_t pudp_get(pud_t *pudp)
+{
+ return READ_ONCE(*pudp);
+}
+#endif
+
+#ifndef p4dp_get
+static inline p4d_t p4dp_get(p4d_t *p4dp)
+{
+ return READ_ONCE(*p4dp);
+}
+#endif
+
+#ifndef pgdp_get
+static inline pgd_t pgdp_get(pgd_t *pgdp)
+{
+ return READ_ONCE(*pgdp);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
- pte_t pte = *ptep;
+ pte_t pte = ptep_get(ptep);
int r = 1;
if (!pte_young(pte))
r = 0;
@@ -193,7 +362,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
@@ -214,7 +383,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
BUILD_BUG();
return 0;
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
@@ -240,21 +409,142 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef arch_has_hw_nonleaf_pmd_young
+/*
+ * Return whether the accessed bit in non-leaf PMD entries is supported on the
+ * local CPU.
+ */
+static inline bool arch_has_hw_nonleaf_pmd_young(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
+}
+#endif
+
+#ifndef arch_has_hw_pte_young
+/*
+ * Return whether the accessed bit is supported on the local CPU.
+ *
+ * This stub assumes accessing through an old PTE triggers a page fault.
+ * Architectures that automatically set the access bit should overwrite it.
+ */
+static inline bool arch_has_hw_pte_young(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
+}
+#endif
+
+#ifndef arch_check_zapped_pte
+static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
+ pte_t pte)
+{
+}
+#endif
+
+#ifndef arch_check_zapped_pmd
+static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
+ pmd_t pmd)
+{
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
pte_t *ptep)
{
- pte_t pte = *ptep;
+ pte_t pte = ptep_get(ptep);
pte_clear(mm, address, ptep);
+ page_table_check_pte_clear(mm, pte);
return pte;
}
#endif
-#ifndef __HAVE_ARCH_PTEP_GET
-static inline pte_t ptep_get(pte_t *ptep)
+static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ ptep_get_and_clear(mm, addr, ptep);
+}
+
+#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
+/*
+ * For walking the pagetables without holding any locks. Some architectures
+ * (eg x86-32 PAE) cannot load the entries atomically without using expensive
+ * instructions. We are guaranteed that a PTE will only either go from not
+ * present to present, or present to not present -- it will not switch to a
+ * completely different present page without a TLB flush inbetween; which we
+ * are blocking by holding interrupts off.
+ *
+ * Setting ptes from not present to present goes:
+ *
+ * ptep->pte_high = h;
+ * smp_wmb();
+ * ptep->pte_low = l;
+ *
+ * And present to not present goes:
+ *
+ * ptep->pte_low = 0;
+ * smp_wmb();
+ * ptep->pte_high = 0;
+ *
+ * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
+ * We load pte_high *after* loading pte_low, which ensures we don't see an older
+ * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
+ * picked up a changed pte high. We might have gotten rubbish values from
+ * pte_low and pte_high, but we are guaranteed that pte_low will not have the
+ * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
+ * operates on present ptes we're safe.
+ */
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ pte_t pte;
+
+ do {
+ pte.pte_low = ptep->pte_low;
+ smp_rmb();
+ pte.pte_high = ptep->pte_high;
+ smp_rmb();
+ } while (unlikely(pte.pte_low != ptep->pte_low));
+
+ return pte;
+}
+#define ptep_get_lockless ptep_get_lockless
+
+#if CONFIG_PGTABLE_LEVELS > 2
+static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
+{
+ pmd_t pmd;
+
+ do {
+ pmd.pmd_low = pmdp->pmd_low;
+ smp_rmb();
+ pmd.pmd_high = pmdp->pmd_high;
+ smp_rmb();
+ } while (unlikely(pmd.pmd_low != pmdp->pmd_low));
+
+ return pmd;
+}
+#define pmdp_get_lockless pmdp_get_lockless
+#define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
+#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
+
+/*
+ * We require that the PTE can be read atomically.
+ */
+#ifndef ptep_get_lockless
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ return ptep_get(ptep);
+}
+#endif
+
+#ifndef pmdp_get_lockless
+static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
+{
+ return pmdp_get(pmdp);
+}
+static inline void pmdp_get_lockless_sync(void)
{
- return READ_ONCE(*ptep);
}
#endif
@@ -265,7 +555,10 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
+
pmd_clear(pmdp);
+ page_table_check_pmd_clear(mm, pmd);
+
return pmd;
}
#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
@@ -277,6 +570,8 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
pud_t pud = *pudp;
pud_clear(pudp);
+ page_table_check_pud_clear(mm, pud);
+
return pud;
}
#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
@@ -293,11 +588,11 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
-static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
int full)
{
- return pudp_huge_get_and_clear(mm, address, pudp);
+ return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
}
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -307,12 +602,80 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pte_t *ptep,
int full)
{
- pte_t pte;
- pte = ptep_get_and_clear(mm, address, ptep);
+ return ptep_get_and_clear(mm, address, ptep);
+}
+#endif
+
+#ifndef get_and_clear_full_ptes
+/**
+ * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of
+ * the same folio, collecting dirty/accessed bits.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear.
+ * @full: Whether we are clearing a full mm.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the
+ * returned PTE.
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, unsigned int nr, int full)
+{
+ pte_t pte, tmp_pte;
+
+ pte = ptep_get_and_clear_full(mm, addr, ptep, full);
+ while (--nr) {
+ ptep++;
+ addr += PAGE_SIZE;
+ tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full);
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
return pte;
}
#endif
+#ifndef clear_full_ptes
+/**
+ * clear_full_ptes - Clear present PTEs that map consecutive pages of the same
+ * folio.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear.
+ * @full: Whether we are clearing a full mm.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_get_and_clear_full().
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr, int full)
+{
+ for (;;) {
+ ptep_get_and_clear_full(mm, addr, ptep, full);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+}
+#endif
/*
* If two threads concurrently fault at the same page, the thread that
@@ -360,18 +723,63 @@ extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
pud_t *pudp);
#endif
+#ifndef pte_mkwrite
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ return pte_mkwrite_novma(pte);
+}
+#endif
+
+#if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+ return pmd_mkwrite_novma(pmd);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
- pte_t old_pte = *ptep;
+ pte_t old_pte = ptep_get(ptep);
set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
}
#endif
+#ifndef wrprotect_ptes
+/**
+ * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same
+ * folio.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to write-protect.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_set_wrprotect().
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
+{
+ for (;;) {
+ ptep_set_wrprotect(mm, addr, ptep);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+}
+#endif
+
/*
* On some architectures hardware does not set page access bit when accessing
- * memory page, it is responsibilty of software setting this bit. It brings
+ * memory page, it is responsibility of software setting this bit. It brings
* out extra page fault penalty to track page access bit. For optimization page
* access bit can be set during all page fault flow on these arches.
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
@@ -385,30 +793,6 @@ static inline pte_t pte_sw_mkyoung(pte_t pte)
#define pte_sw_mkyoung pte_sw_mkyoung
#endif
-#ifndef pte_savedwrite
-#define pte_savedwrite pte_write
-#endif
-
-#ifndef pte_mk_savedwrite
-#define pte_mk_savedwrite pte_mkwrite
-#endif
-
-#ifndef pte_clear_savedwrite
-#define pte_clear_savedwrite pte_wrprotect
-#endif
-
-#ifndef pmd_savedwrite
-#define pmd_savedwrite pmd_write
-#endif
-
-#ifndef pmd_mk_savedwrite
-#define pmd_mk_savedwrite pmd_mkwrite
-#endif
-
-#ifndef pmd_clear_savedwrite
-#define pmd_clear_savedwrite pmd_wrprotect
-#endif
-
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
@@ -427,6 +811,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
#endif
#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pudp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pud_t *pudp)
{
@@ -440,6 +825,7 @@ static inline void pudp_set_wrprotect(struct mm_struct *mm,
{
BUILD_BUG();
}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#endif
@@ -468,11 +854,15 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
+#ifndef arch_needs_pgtable_deposit
+#define arch_needs_pgtable_deposit() (false)
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* This is an implementation of pmdp_establish() that is only suitable for an
* architecture that doesn't have hardware dirty/accessed bits. In this case we
- * can't race with CPU which sets these bits and non-atomic aproach is fine.
+ * can't race with CPU which sets these bits and non-atomic approach is fine.
*/
static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
@@ -488,6 +878,26 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
#endif
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
+
+/*
+ * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
+ * hugepage mapping in the page tables. This function is similar to
+ * pmdp_invalidate(), but should only be used if the access and dirty bits would
+ * not be cleared by the software in the new PMD value. The function ensures
+ * that hardware changes of the access and dirty bits updates would not be lost.
+ *
+ * Doing so can allow in certain architectures to avoid a TLB flush in most
+ * cases. Yet, another TLB flush might be necessary later if the PMD update
+ * itself requires such flush (e.g., if protection was set to be stricter). Yet,
+ * even when a TLB flush is needed because of the update, the caller may be able
+ * to batch these TLB flushing operations, so fewer TLB flush operations are
+ * needed.
+ */
+extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#endif
+
#ifndef __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
@@ -538,11 +948,14 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
}
+#endif
+#ifndef pud_same
static inline int pud_same(pud_t pud_a, pud_t pud_b)
{
return pud_val(pud_a) == pud_val(pud_b);
}
+#define pud_same pud_same
#endif
#ifndef __HAVE_ARCH_P4D_SAME
@@ -633,6 +1046,34 @@ static inline int arch_unmap_one(struct mm_struct *mm,
}
#endif
+/*
+ * Allow architectures to preserve additional metadata associated with
+ * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
+ * prototypes must be defined in the arch-specific asm/pgtable.h file.
+ */
+#ifndef __HAVE_ARCH_PREPARE_TO_SWAP
+static inline int arch_prepare_to_swap(struct page *page)
+{
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_SWAP_INVALIDATE
+static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
+{
+}
+
+static inline void arch_swap_invalidate_area(int type)
+{
+}
+#endif
+
+#ifndef __HAVE_ARCH_SWAP_RESTORE
+static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
+{
+}
+#endif
+
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
#endif
@@ -646,7 +1087,7 @@ static inline int arch_unmap_one(struct mm_struct *mm,
#endif
#ifndef flush_tlb_fix_spurious_fault
-#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
+#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
#endif
/*
@@ -777,7 +1218,7 @@ static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
* updates, but to prevent any updates it may make from being lost.
*
* This does not protect against other software modifications of the
- * pte; the appropriate pte lock must be held over the transation.
+ * pte; the appropriate pte lock must be held over the transaction.
*
* Note that this interface is intended to be batchable, meaning that
* ptep_modify_prot_commit may not actually update the pte, but merely
@@ -829,6 +1270,10 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
#define pgprot_device pgprot_noncached
#endif
+#ifndef pgprot_mhp
+#define pgprot_mhp(prot) (prot)
+#endif
+
#ifdef CONFIG_MMU
#ifndef pgprot_modify
#define pgprot_modify pgprot_modify
@@ -854,27 +1299,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#endif
/*
- * A facility to provide lazy MMU batching. This allows PTE updates and
- * page invalidations to be delayed until a call to leave lazy MMU mode
- * is issued. Some architectures may benefit from doing this, and it is
- * beneficial for both shadow and direct mode hypervisors, which may batch
- * the PTE updates which happen during this window. Note that using this
- * interface requires that read hazards be removed from the code. A read
- * hazard could result in the direct mode hypervisor case, since the actual
- * write to the page tables may not yet have taken place, so reads though
- * a raw PTE pointer after it has been modified are not guaranteed to be
- * up to date. This mode can only be entered and left under the protection of
- * the page table locks for all page tables which may be modified. In the UP
- * case, this is required so that preemption is disabled, and in the SMP case,
- * it must synchronize the delayed page table writes properly on other CPUs.
- */
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define arch_enter_lazy_mmu_mode() do {} while (0)
-#define arch_leave_lazy_mmu_mode() do {} while (0)
-#define arch_flush_lazy_mmu_mode() do {} while (0)
-#endif
-
-/*
* A facility to provide batching of the reload of page tables and
* other process state with the actual context switch code for
* paravirtualized guests. By convention, only one of the batched
@@ -1010,14 +1434,16 @@ static inline int track_pfn_copy(struct vm_area_struct *vma)
* can be for the entire vma (in which case pfn, size are zero).
*/
static inline void untrack_pfn(struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size)
+ unsigned long pfn, unsigned long size,
+ bool mm_wr_locked)
{
}
/*
- * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
+ * untrack_pfn_clear is called while mremapping a pfnmap for a new region
+ * or fails to copy pgtable during duplicate vm area.
*/
-static inline void untrack_pfn_moved(struct vm_area_struct *vma)
+static inline void untrack_pfn_clear(struct vm_area_struct *vma)
{
}
#else
@@ -1028,10 +1454,11 @@ extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
pfn_t pfn);
extern int track_pfn_copy(struct vm_area_struct *vma);
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
- unsigned long size);
-extern void untrack_pfn_moved(struct vm_area_struct *vma);
+ unsigned long size, bool mm_wr_locked);
+extern void untrack_pfn_clear(struct vm_area_struct *vma);
#endif
+#ifdef CONFIG_MMU
#ifdef __HAVE_COLOR_ZERO_PAGE
static inline int is_zero_pfn(unsigned long pfn)
{
@@ -1055,6 +1482,17 @@ static inline unsigned long my_zero_pfn(unsigned long addr)
return zero_pfn;
}
#endif
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ return 0;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ return 0;
+}
+#endif /* CONFIG_MMU */
#ifdef CONFIG_MMU
@@ -1096,17 +1534,17 @@ static inline int pgd_devmap(pgd_t pgd)
#endif
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
- (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline int pud_trans_huge(pud_t pud)
{
return 0;
}
#endif
-/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
-static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
+static inline int pud_trans_unstable(pud_t *pud)
{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pudval = READ_ONCE(*pud);
if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
@@ -1115,130 +1553,22 @@ static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
pud_clear_bad(pud);
return 1;
}
- return 0;
-}
-
-/* See pmd_trans_unstable for discussion. */
-static inline int pud_trans_unstable(pud_t *pud)
-{
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
- return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
-#else
- return 0;
-#endif
-}
-
-#ifndef pmd_read_atomic
-static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-{
- /*
- * Depend on compiler for an atomic pmd read. NOTE: this is
- * only going to work, if the pmdval_t isn't larger than
- * an unsigned long.
- */
- return *pmdp;
-}
#endif
-
-#ifndef arch_needs_pgtable_deposit
-#define arch_needs_pgtable_deposit() (false)
-#endif
-/*
- * This function is meant to be used by sites walking pagetables with
- * the mmap_lock held in read mode to protect against MADV_DONTNEED and
- * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
- * into a null pmd and the transhuge page fault can convert a null pmd
- * into an hugepmd or into a regular pmd (if the hugepage allocation
- * fails). While holding the mmap_lock in read mode the pmd becomes
- * stable and stops changing under us only if it's not null and not a
- * transhuge pmd. When those races occurs and this function makes a
- * difference vs the standard pmd_none_or_clear_bad, the result is
- * undefined so behaving like if the pmd was none is safe (because it
- * can return none anyway). The compiler level barrier() is critically
- * important to compute the two checks atomically on the same pmdval.
- *
- * For 32bit kernels with a 64bit large pmd_t this automatically takes
- * care of reading the pmd atomically to avoid SMP race conditions
- * against pmd_populate() when the mmap_lock is hold for reading by the
- * caller (a special atomic read not done by "gcc" as in the generic
- * version above, is also needed when THP is disabled because the page
- * fault can populate the pmd from under us).
- */
-static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
-{
- pmd_t pmdval = pmd_read_atomic(pmd);
- /*
- * The barrier will stabilize the pmdval in a register or on
- * the stack so that it will stop changing under the code.
- *
- * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
- * pmd_read_atomic is allowed to return a not atomic pmdval
- * (for example pointing to an hugepage that has never been
- * mapped in the pmd). The below checks will only care about
- * the low part of the pmd with 32bit PAE x86 anyway, with the
- * exception of pmd_none(). So the important thing is that if
- * the low part of the pmd is found null, the high part will
- * be also null or the pmd_none() check below would be
- * confused.
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- barrier();
-#endif
- /*
- * !pmd_present() checks for pmd migration entries
- *
- * The complete check uses is_pmd_migration_entry() in linux/swapops.h
- * But using that requires moving current function and pmd_trans_unstable()
- * to linux/swapops.h to resovle dependency, which is too much code move.
- *
- * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
- * because !pmd_present() pages can only be under migration not swapped
- * out.
- *
- * pmd_none() is preseved for future condition checks on pmd migration
- * entries and not confusing with this function name, although it is
- * redundant with !pmd_present().
- */
- if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
- (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
- return 1;
- if (unlikely(pmd_bad(pmdval))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-
-/*
- * This is a noop if Transparent Hugepage Support is not built into
- * the kernel. Otherwise it is equivalent to
- * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
- * places that already verified the pmd is not none and they want to
- * walk ptes while holding the mmap sem in read mode (write mode don't
- * need this). If THP is not enabled, the pmd can't go away under the
- * code even if MADV_DONTNEED runs, but if THP is enabled we need to
- * run a pmd_trans_unstable before walking the ptes after
- * split_huge_pmd returns (because it may have run when the pmd become
- * null, but then a page fault can map in a THP and not a regular page).
- */
-static inline int pmd_trans_unstable(pmd_t *pmd)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- return pmd_none_or_trans_huge_or_clear_bad(pmd);
-#else
return 0;
-#endif
}
#ifndef CONFIG_NUMA_BALANCING
/*
- * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
- * the only case the kernel cares is for NUMA balancing and is only ever set
- * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
- * _PAGE_PROTNONE so by default, implement the helper as "always no". It
- * is the responsibility of the caller to distinguish between PROT_NONE
- * protections and NUMA hinting fault protections.
+ * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
+ * perfectly valid to indicate "no" in that case, which is why our default
+ * implementation defaults to "always no".
+ *
+ * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
+ * page protection due to NUMA hinting. NUMA hinting faults only apply in
+ * accessible VMAs.
+ *
+ * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
+ * looking at the VMA accessibility is sufficient.
*/
static inline int pte_protnone(pte_t pte)
{
@@ -1257,16 +1587,13 @@ static inline int pmd_protnone(pmd_t pmd)
#ifndef __PAGETABLE_P4D_FOLDED
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
-int p4d_clear_huge(p4d_t *p4d);
+void p4d_clear_huge(p4d_t *p4d);
#else
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
+static inline void p4d_clear_huge(p4d_t *p4d) { }
#endif /* !__PAGETABLE_P4D_FOLDED */
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
@@ -1289,10 +1616,7 @@ static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
+static inline void p4d_clear_huge(p4d_t *p4d) { }
static inline int pud_clear_huge(pud_t *pud)
{
return 0;
@@ -1399,18 +1723,26 @@ typedef unsigned int pgtbl_mod_mask;
#endif /* !__ASSEMBLY__ */
-#ifndef io_remap_pfn_range
-#define io_remap_pfn_range remap_pfn_range
+#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+/*
+ * ZSMALLOC needs to know the highest PFN on 32-bit architectures
+ * with physical address space extension, but falls back to
+ * BITS_PER_LONG otherwise.
+ */
+#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
+#else
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#endif
#endif
#ifndef has_transparent_hugepage
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define has_transparent_hugepage() 1
-#else
-#define has_transparent_hugepage() 0
-#endif
+#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
#endif
+#ifndef has_transparent_pud_hugepage
+#define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+#endif
/*
* On some architectures it depends on the mm if the p4d/pud or pmd
* layer of the page table hierarchy is folded or not.
@@ -1427,6 +1759,16 @@ typedef unsigned int pgtbl_mod_mask;
#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
#endif
+#ifndef p4d_offset_lockless
+#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
+#endif
+#ifndef pud_offset_lockless
+#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
+#endif
+#ifndef pmd_offset_lockless
+#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
+#endif
+
/*
* p?d_leaf() - true if this entry is a final mapping to a physical address.
* This differs from p?d_huge() by the fact that they are always available (if
@@ -1435,16 +1777,82 @@ typedef unsigned int pgtbl_mod_mask;
* Only meaningful when called on a valid entry.
*/
#ifndef pgd_leaf
-#define pgd_leaf(x) 0
+#define pgd_leaf(x) false
#endif
#ifndef p4d_leaf
-#define p4d_leaf(x) 0
+#define p4d_leaf(x) false
#endif
#ifndef pud_leaf
-#define pud_leaf(x) 0
+#define pud_leaf(x) false
#endif
#ifndef pmd_leaf
-#define pmd_leaf(x) 0
+#define pmd_leaf(x) false
#endif
+#ifndef pgd_leaf_size
+#define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
+#endif
+#ifndef p4d_leaf_size
+#define p4d_leaf_size(x) P4D_SIZE
+#endif
+#ifndef pud_leaf_size
+#define pud_leaf_size(x) PUD_SIZE
+#endif
+#ifndef pmd_leaf_size
+#define pmd_leaf_size(x) PMD_SIZE
+#endif
+#ifndef pte_leaf_size
+#define pte_leaf_size(x) PAGE_SIZE
+#endif
+
+/*
+ * Some architectures have MMUs that are configurable or selectable at boot
+ * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
+ * helps to have a static maximum value.
+ */
+
+#ifndef MAX_PTRS_PER_PTE
+#define MAX_PTRS_PER_PTE PTRS_PER_PTE
+#endif
+
+#ifndef MAX_PTRS_PER_PMD
+#define MAX_PTRS_PER_PMD PTRS_PER_PMD
+#endif
+
+#ifndef MAX_PTRS_PER_PUD
+#define MAX_PTRS_PER_PUD PTRS_PER_PUD
+#endif
+
+#ifndef MAX_PTRS_PER_P4D
+#define MAX_PTRS_PER_P4D PTRS_PER_P4D
+#endif
+
+/* description of effects of mapping type and prot in current implementation.
+ * this is due to the limited x86 page protection hardware. The expected
+ * behavior is in parens:
+ *
+ * map_type prot
+ * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
+ * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (yes) yes w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (copy) copy w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
+ * MAP_PRIVATE (with Enhanced PAN supported):
+ * r: (no) no
+ * w: (no) no
+ * x: (yes) yes
+ */
+#define DECLARE_VM_GET_PAGE_PROT \
+pgprot_t vm_get_page_prot(unsigned long vm_flags) \
+{ \
+ return protection_map[vm_flags & \
+ (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
+} \
+EXPORT_SYMBOL(vm_get_page_prot);
+
#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/pgtable_api.h b/include/linux/pgtable_api.h
new file mode 100644
index 000000000000..ff367a4ba8c4
--- /dev/null
+++ b/include/linux/pgtable_api.h
@@ -0,0 +1 @@
+#include <linux/pgtable.h>
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3a09d2bf69ea..3f68b8239bb1 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -14,6 +14,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
+#include <linux/leds.h>
#include <linux/linkmode.h>
#include <linux/netlink.h>
#include <linux/mdio.h>
@@ -29,6 +30,7 @@
#include <linux/refcount.h>
#include <linux/atomic.h>
+#include <net/eee.h>
#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
@@ -45,44 +47,91 @@
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
+#define PHY_BASIC_T1S_P2MP_FEATURES ((unsigned long *)&phy_basic_t1s_p2mp_features)
#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
+#define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features)
+#define PHY_EEE_CAP2_FEATURES ((unsigned long *)&phy_eee_cap2_features)
extern const int phy_basic_ports_array[3];
extern const int phy_fibre_port_array[1];
extern const int phy_all_ports_features_array[7];
extern const int phy_10_100_features_array[4];
-extern const int phy_basic_t1_features_array[2];
+extern const int phy_basic_t1_features_array[3];
+extern const int phy_basic_t1s_p2mp_features_array[2];
extern const int phy_gbit_features_array[2];
extern const int phy_10gbit_features_array[1];
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
- * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
- * the attached driver handles the interrupt
+ * or not desired for this PHY. Set to PHY_MAC_INTERRUPT if
+ * the attached MAC driver handles the interrupt
*/
#define PHY_POLL -1
-#define PHY_IGNORE_INTERRUPT -2
+#define PHY_MAC_INTERRUPT -2
#define PHY_IS_INTERNAL 0x00000001
#define PHY_RST_AFTER_CLK_EN 0x00000002
#define PHY_POLL_CABLE_TEST 0x00000004
+#define PHY_ALWAYS_CALL_SUSPEND 0x00000008
#define MDIO_DEVICE_IS_PHY 0x80000000
-/* Interface Mode definitions */
+/**
+ * enum phy_interface_t - Interface Mode definitions
+ *
+ * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch
+ * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined
+ * @PHY_INTERFACE_MODE_MII: Media-independent interface
+ * @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
+ * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
+ * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface
+ * @PHY_INTERFACE_MODE_REVRMII: Reduced Media Independent Interface in PHY role
+ * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay
+ * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RTBI: Reduced TBI
+ * @PHY_INTERFACE_MODE_SMII: Serial MII
+ * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
+ * @PHY_INTERFACE_MODE_PSGMII: Penta SGMII
+ * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
+ * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
+ * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
+ * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX
+ * @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX
+ * @PHY_INTERFACE_MODE_5GBASER: 5G BaseR
+ * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
+ * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
+ * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR
+ * @PHY_INTERFACE_MODE_25GBASER: 25G BaseR
+ * @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII
+ * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
+ * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_MAX: Book keeping
+ *
+ * Describes the interface between the MAC and PHY.
+ */
typedef enum {
PHY_INTERFACE_MODE_NA,
PHY_INTERFACE_MODE_INTERNAL,
@@ -92,6 +141,7 @@ typedef enum {
PHY_INTERFACE_MODE_TBI,
PHY_INTERFACE_MODE_REVMII,
PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_REVRMII,
PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_RGMII_ID,
PHY_INTERFACE_MODE_RGMII_RXID,
@@ -101,30 +151,62 @@ typedef enum {
PHY_INTERFACE_MODE_XGMII,
PHY_INTERFACE_MODE_XLGMII,
PHY_INTERFACE_MODE_MOCA,
+ PHY_INTERFACE_MODE_PSGMII,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
+ PHY_INTERFACE_MODE_100BASEX,
PHY_INTERFACE_MODE_1000BASEX,
PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_5GBASER,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
/* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_25GBASER,
PHY_INTERFACE_MODE_USXGMII,
/* 10GBASE-KR - with Clause 73 AN */
PHY_INTERFACE_MODE_10GKR,
+ PHY_INTERFACE_MODE_QUSGMII,
+ PHY_INTERFACE_MODE_1000BASEKX,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
-/**
- * phy_supported_speeds - return all speeds currently supported by a phy device
- * @phy: The phy device to return supported speeds of.
- * @speeds: buffer to store supported speeds in.
- * @size: size of speeds buffer.
- *
- * Description: Returns the number of supported speeds, and fills
- * the speeds buffer with the supported speeds. If speeds buffer is
- * too small to contain all currently supported speeds, will return as
- * many speeds as can fit.
+/* PHY interface mode bitmap handling */
+#define DECLARE_PHY_INTERFACE_MASK(name) \
+ DECLARE_BITMAP(name, PHY_INTERFACE_MODE_MAX)
+
+static inline void phy_interface_zero(unsigned long *intf)
+{
+ bitmap_zero(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline bool phy_interface_empty(const unsigned long *intf)
+{
+ return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_and(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_and(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_or(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_or(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_set_rgmii(unsigned long *intf)
+{
+ __set_bit(PHY_INTERFACE_MODE_RGMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf);
+}
+
+/*
+ * phy_supported_speeds - return all speeds currently supported by a PHY device
*/
unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
@@ -134,9 +216,9 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
* phy_modes - map phy_interface_t enum to device tree binding of phy-mode
* @interface: enum phy_interface_t value
*
- * Description: maps 'enum phy_interface_t' defined in this file
+ * Description: maps enum &phy_interface_t defined in this file
* into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
+ * device driver can get PHY interface from device tree.
*/
static inline const char *phy_modes(phy_interface_t interface)
{
@@ -157,6 +239,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "rev-mii";
case PHY_INTERFACE_MODE_RMII:
return "rmii";
+ case PHY_INTERFACE_MODE_REVRMII:
+ return "rev-rmii";
case PHY_INTERFACE_MODE_RGMII:
return "rgmii";
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -175,30 +259,41 @@ static inline const char *phy_modes(phy_interface_t interface)
return "xlgmii";
case PHY_INTERFACE_MODE_MOCA:
return "moca";
+ case PHY_INTERFACE_MODE_PSGMII:
+ return "psgmii";
case PHY_INTERFACE_MODE_QSGMII:
return "qsgmii";
case PHY_INTERFACE_MODE_TRGMII:
return "trgmii";
case PHY_INTERFACE_MODE_1000BASEX:
return "1000base-x";
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ return "1000base-kx";
case PHY_INTERFACE_MODE_2500BASEX:
return "2500base-x";
+ case PHY_INTERFACE_MODE_5GBASER:
+ return "5gbase-r";
case PHY_INTERFACE_MODE_RXAUI:
return "rxaui";
case PHY_INTERFACE_MODE_XAUI:
return "xaui";
case PHY_INTERFACE_MODE_10GBASER:
return "10gbase-r";
+ case PHY_INTERFACE_MODE_25GBASER:
+ return "25gbase-r";
case PHY_INTERFACE_MODE_USXGMII:
return "usxgmii";
case PHY_INTERFACE_MODE_10GKR:
return "10gbase-kr";
+ case PHY_INTERFACE_MODE_100BASEX:
+ return "100base-x";
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return "qusgmii";
default:
return "unknown";
}
}
-
#define PHY_INIT_TIMEOUT 100000
#define PHY_FORCE_TIMEOUT 10
@@ -210,11 +305,20 @@ static inline const char *phy_modes(phy_interface_t interface)
#define MII_BUS_ID_SIZE 61
struct device;
+struct kernel_hwtstamp_config;
struct phylink;
struct sfp_bus;
struct sfp_upstream_ops;
struct sk_buff;
+/**
+ * struct mdio_bus_stats - Statistics counters for MDIO busses
+ * @transfers: Total number of transfers, i.e. @writes + @reads
+ * @errors: Number of MDIO transfers that returned an error
+ * @writes: Number of write transfers
+ * @reads: Number of read transfers
+ * @syncp: Synchronisation for incrementing statistics
+ */
struct mdio_bus_stats {
u64_stats_t transfers;
u64_stats_t errors;
@@ -224,12 +328,24 @@ struct mdio_bus_stats {
struct u64_stats_sync syncp;
};
-/* Represents a shared structure between different phydev's in the same
+/**
+ * struct phy_package_shared - Shared information in PHY packages
+ * @base_addr: Base PHY address of PHY package used to combine PHYs
+ * in one package and for offset calculation of phy_package_read/write
+ * @np: Pointer to the Device Node if PHY package defined in DT
+ * @refcnt: Number of PHYs connected to this shared data
+ * @flags: Initialization of PHY package
+ * @priv_size: Size of the shared private data @priv
+ * @priv: Driver private data shared across a PHY package
+ *
+ * Represents a shared structure between different phydev's in the same
* package, for example a quad PHY. See phy_package_join() and
* phy_package_leave().
*/
struct phy_package_shared {
- int addr;
+ u8 base_addr;
+ /* With PHY package defined in DT this points to the PHY package node */
+ struct device_node *np;
refcount_t refcnt;
unsigned long flags;
size_t priv_size;
@@ -247,7 +363,14 @@ struct phy_package_shared {
#define PHY_SHARED_F_INIT_DONE 0
#define PHY_SHARED_F_PROBE_DONE 1
-/*
+/**
+ * struct mii_bus - Represents an MDIO bus
+ *
+ * @owner: Who owns this device
+ * @name: User friendly name for this MDIO device, or driver name
+ * @id: Unique identifier for this bus, typical from bus hierarchy
+ * @priv: Driver private data
+ *
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
*/
@@ -256,65 +379,78 @@ struct mii_bus {
const char *name;
char id[MII_BUS_ID_SIZE];
void *priv;
+ /** @read: Perform a read transfer on the bus */
int (*read)(struct mii_bus *bus, int addr, int regnum);
+ /** @write: Perform a write transfer on the bus */
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ /** @read_c45: Perform a C45 read transfer on the bus */
+ int (*read_c45)(struct mii_bus *bus, int addr, int devnum, int regnum);
+ /** @write_c45: Perform a C45 write transfer on the bus */
+ int (*write_c45)(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val);
+ /** @reset: Perform a reset of the bus */
int (*reset)(struct mii_bus *bus);
+
+ /** @stats: Statistic counters per device on the bus */
struct mdio_bus_stats stats[PHY_MAX_ADDR];
- /*
- * A lock to ensure that only one thing can read/write
+ /**
+ * @mdio_lock: A lock to ensure that only one thing can read/write
* the MDIO bus at a time
*/
struct mutex mdio_lock;
+ /** @parent: Parent device of this bus */
struct device *parent;
+ /** @state: State of bus structure */
enum {
MDIOBUS_ALLOCATED = 1,
MDIOBUS_REGISTERED,
MDIOBUS_UNREGISTERED,
MDIOBUS_RELEASED,
} state;
+
+ /** @dev: Kernel device representation */
struct device dev;
- /* list of all PHYs on bus */
+ /** @mdio_map: list of all MDIO devices on bus */
struct mdio_device *mdio_map[PHY_MAX_ADDR];
- /* PHY addresses to be ignored when probing */
+ /** @phy_mask: PHY addresses to be ignored when probing */
u32 phy_mask;
- /* PHY addresses to ignore the TA/read failure */
+ /** @phy_ignore_ta_mask: PHY addresses to ignore the TA/read failure */
u32 phy_ignore_ta_mask;
- /*
- * An array of interrupts, each PHY's interrupt at the index
+ /**
+ * @irq: An array of interrupts, each PHY's interrupt at the index
* matching its address
*/
int irq[PHY_MAX_ADDR];
- /* GPIO reset pulse width in microseconds */
+ /** @reset_delay_us: GPIO reset pulse width in microseconds */
int reset_delay_us;
- /* GPIO reset deassert delay in microseconds */
+ /** @reset_post_delay_us: GPIO reset deassert delay in microseconds */
int reset_post_delay_us;
- /* RESET GPIO descriptor pointer */
+ /** @reset_gpiod: Reset GPIO descriptor pointer */
struct gpio_desc *reset_gpiod;
- /* bus capabilities, used for probing */
- enum {
- MDIOBUS_NO_CAP = 0,
- MDIOBUS_C22,
- MDIOBUS_C45,
- MDIOBUS_C22_C45,
- } probe_capabilities;
-
- /* protect access to the shared element */
+ /** @shared_lock: protect access to the shared element */
struct mutex shared_lock;
- /* shared state across different PHYs */
+ /** @shared: shared state across different PHYs */
struct phy_package_shared *shared[PHY_MAX_ADDR];
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
-struct mii_bus *mdiobus_alloc_size(size_t);
+struct mii_bus *mdiobus_alloc_size(size_t size);
+
+/**
+ * mdiobus_alloc - Allocate an MDIO bus structure
+ *
+ * The internal state of the MDIO bus will be set of MDIOBUS_ALLOCATED ready
+ * for the driver to register the bus.
+ */
static inline struct mii_bus *mdiobus_alloc(void)
{
return mdiobus_alloc_size(0);
@@ -336,50 +472,54 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
}
struct mii_bus *mdio_find_bus(const char *mdio_name);
-struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr);
#define PHY_INTERRUPT_DISABLED false
#define PHY_INTERRUPT_ENABLED true
-/* PHY state machine states:
+/**
+ * enum phy_state - PHY state machine states:
*
- * DOWN: PHY device and driver are not ready for anything. probe
+ * @PHY_DOWN: PHY device and driver are not ready for anything. probe
* should be called if and only if the PHY is in this state,
* given that the PHY device exists.
- * - PHY driver probe function will set the state to READY
+ * - PHY driver probe function will set the state to @PHY_READY
*
- * READY: PHY is ready to send and receive packets, but the
+ * @PHY_READY: PHY is ready to send and receive packets, but the
* controller is not. By default, PHYs which do not implement
* probe will be set to this state by phy_probe().
* - start will set the state to UP
*
- * UP: The PHY and attached device are ready to do work.
+ * @PHY_UP: The PHY and attached device are ready to do work.
* Interrupts should be started here.
- * - timer moves to NOLINK or RUNNING
+ * - timer moves to @PHY_NOLINK or @PHY_RUNNING
*
- * NOLINK: PHY is up, but not currently plugged in.
- * - irq or timer will set RUNNING if link comes back
- * - phy_stop moves to HALTED
+ * @PHY_NOLINK: PHY is up, but not currently plugged in.
+ * - irq or timer will set @PHY_RUNNING if link comes back
+ * - phy_stop moves to @PHY_HALTED
*
- * RUNNING: PHY is currently up, running, and possibly sending
+ * @PHY_RUNNING: PHY is currently up, running, and possibly sending
* and/or receiving packets
- * - irq or timer will set NOLINK if link goes down
- * - phy_stop moves to HALTED
+ * - irq or timer will set @PHY_NOLINK if link goes down
+ * - phy_stop moves to @PHY_HALTED
*
- * CABLETEST: PHY is performing a cable test. Packet reception/sending
+ * @PHY_CABLETEST: PHY is performing a cable test. Packet reception/sending
* is not expected to work, carrier will be indicated as down. PHY will be
* poll once per second, or on interrupt for it current state.
* Once complete, move to UP to restart the PHY.
- * - phy_stop aborts the running test and moves to HALTED
+ * - phy_stop aborts the running test and moves to @PHY_HALTED
*
- * HALTED: PHY is up, but no polling or interrupts are done. Or
- * PHY is in an error state.
- * - phy_start moves to UP
+ * @PHY_HALTED: PHY is up, but no polling or interrupts are done.
+ * - phy_start moves to @PHY_UP
+ *
+ * @PHY_ERROR: PHY is up, but is in an error state.
+ * - phy_stop moves to @PHY_HALTED
*/
enum phy_state {
PHY_DOWN = 0,
PHY_READY,
PHY_HALTED,
+ PHY_ERROR,
PHY_UP,
PHY_RUNNING,
PHY_NOLINK,
@@ -403,34 +543,96 @@ struct phy_c45_device_ids {
struct macsec_context;
struct macsec_ops;
-/* phy_device: An instance of a PHY
+/**
+ * struct phy_device - An instance of a PHY
*
- * drv: Pointer to the driver for this PHY instance
- * phy_id: UID for this device found during discovery
- * c45_ids: 802.3-c45 Device Identifers if is_c45.
- * is_c45: Set to true if this phy uses clause 45 addressing.
- * is_internal: Set to true if this phy is internal to a MAC.
- * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
- * is_gigabit_capable: Set to true if PHY supports 1000Mbps
- * has_fixups: Set to true if this phy has fixups/quirks.
- * suspended: Set to true if this phy has been suspended successfully.
- * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
- * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
- * loopback_enabled: Set true if this phy has been loopbacked successfully.
- * downshifted_rate: Set true if link speed has been downshifted.
- * state: state of the PHY for management purposes
- * dev_flags: Device-specific flags used by the PHY driver.
- * irq: IRQ number of the PHY's interrupt (-1 if none)
- * phy_timer: The timer for handling the state machine
- * sfp_bus_attached: flag indicating whether the SFP bus has been attached
- * sfp_bus: SFP bus attached to this PHY's fiber port
- * attached_dev: The attached enet driver's device instance ptr
- * adjust_link: Callback for the enet controller to respond to
- * changes in the link state.
- * macsec_ops: MACsec offloading ops.
+ * @mdio: MDIO bus this PHY is on
+ * @drv: Pointer to the driver for this PHY instance
+ * @devlink: Create a link between phy dev and mac dev, if the external phy
+ * used by current mac interface is managed by another mac interface.
+ * @phy_id: UID for this device found during discovery
+ * @c45_ids: 802.3-c45 Device Identifiers if is_c45.
+ * @is_c45: Set to true if this PHY uses clause 45 addressing.
+ * @is_internal: Set to true if this PHY is internal to a MAC.
+ * @is_pseudo_fixed_link: Set to true if this PHY is an Ethernet switch, etc.
+ * @is_gigabit_capable: Set to true if PHY supports 1000Mbps
+ * @has_fixups: Set to true if this PHY has fixups/quirks.
+ * @suspended: Set to true if this PHY has been suspended successfully.
+ * @suspended_by_mdio_bus: Set to true if this PHY was suspended by MDIO bus.
+ * @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
+ * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
+ * @downshifted_rate: Set true if link speed has been downshifted.
+ * @is_on_sfp_module: Set true if PHY is located on an SFP module.
+ * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
+ * @wol_enabled: Set to true if the PHY or the attached MAC have Wake-on-LAN
+ * enabled.
+ * @state: State of the PHY for management purposes
+ * @dev_flags: Device-specific flags used by the PHY driver.
*
- * speed, duplex, pause, supported, advertising, lp_advertising,
- * and autoneg are used like in mii_if_info
+ * - Bits [15:0] are free to use by the PHY driver to communicate
+ * driver specific behavior.
+ * - Bits [23:16] are currently reserved for future use.
+ * - Bits [31:24] are reserved for defining generic
+ * PHY driver behavior.
+ * @irq: IRQ number of the PHY's interrupt (-1 if none)
+ * @phylink: Pointer to phylink instance for this PHY
+ * @sfp_bus_attached: Flag indicating whether the SFP bus has been attached
+ * @sfp_bus: SFP bus attached to this PHY's fiber port
+ * @attached_dev: The attached enet driver's device instance ptr
+ * @adjust_link: Callback for the enet controller to respond to changes: in the
+ * link state.
+ * @phy_link_change: Callback for phylink for notification of link change
+ * @macsec_ops: MACsec offloading ops.
+ *
+ * @speed: Current link speed
+ * @duplex: Current duplex
+ * @port: Current port
+ * @pause: Current pause
+ * @asym_pause: Current asymmetric pause
+ * @supported: Combined MAC/PHY supported linkmodes
+ * @advertising: Currently advertised linkmodes
+ * @adv_old: Saved advertised while power saving for WoL
+ * @supported_eee: supported PHY EEE linkmodes
+ * @advertising_eee: Currently advertised EEE linkmodes
+ * @eee_enabled: Flag indicating whether the EEE feature is enabled
+ * @enable_tx_lpi: When True, MAC should transmit LPI to PHY
+ * @eee_cfg: User configuration of EEE
+ * @lp_advertising: Current link partner advertised linkmodes
+ * @host_interfaces: PHY interface modes supported by host
+ * @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
+ * @autoneg: Flag autoneg being used
+ * @rate_matching: Current rate matching mode
+ * @link: Current link state
+ * @autoneg_complete: Flag auto negotiation of the link has completed
+ * @mdix: Current crossover
+ * @mdix_ctrl: User setting of crossover
+ * @pma_extable: Cached value of PMA/PMD Extended Abilities Register
+ * @interrupts: Flag interrupts have been enabled
+ * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt
+ * handling shall be postponed until PHY has resumed
+ * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
+ * requiring a rerun of the interrupt handler after resume
+ * @interface: enum phy_interface_t value
+ * @possible_interfaces: bitmap if interface modes that the attached PHY
+ * will switch between depending on media speed.
+ * @skb: Netlink message for cable diagnostics
+ * @nest: Netlink nest used for cable diagnostics
+ * @ehdr: nNtlink header for cable diagnostics
+ * @phy_led_triggers: Array of LED triggers
+ * @phy_num_led_triggers: Number of triggers in @phy_led_triggers
+ * @led_link_trigger: LED trigger for link up/down
+ * @last_triggered: last LED trigger for link speed
+ * @leds: list of PHY LED structures
+ * @master_slave_set: User requested master/slave configuration
+ * @master_slave_get: Current master/slave advertisement
+ * @master_slave_state: Current master/slave configuration
+ * @mii_ts: Pointer to time stamper callbacks
+ * @psec: Pointer to Power Sourcing Equipment control struct
+ * @lock: Mutex for serialization access to PHY
+ * @state_queue: Work queue for state machine
+ * @link_down_events: Number of times link was lost
+ * @shared: Pointer to private data shared by phys in one package
+ * @priv: Pointer to driver private data
*
* interrupts currently only supports enabled or disabled,
* but could be changed in the future to support enabling
@@ -444,7 +646,9 @@ struct phy_device {
/* Information about the PHY type */
/* And management functions */
- struct phy_driver *drv;
+ const struct phy_driver *drv;
+
+ struct device_link *devlink;
u32 phy_id;
@@ -459,6 +663,9 @@ struct phy_device {
unsigned sysfs_links:1;
unsigned loopback_enabled:1;
unsigned downshifted_rate:1;
+ unsigned is_on_sfp_module:1;
+ unsigned mac_managed_pm:1;
+ unsigned wol_enabled:1;
unsigned autoneg:1;
/* The most recently read link state */
@@ -467,12 +674,17 @@ struct phy_device {
/* Interrupts are enabled */
unsigned interrupts:1;
+ unsigned irq_suspended:1;
+ unsigned irq_rerun:1;
+
+ int rate_matching;
enum phy_state state;
u32 dev_flags;
phy_interface_t interface;
+ DECLARE_PHY_INTERFACE_MASK(possible_interfaces);
/*
* forced speed & duplex (no autoneg)
@@ -480,6 +692,7 @@ struct phy_device {
*/
int speed;
int duplex;
+ int port;
int pause;
int asym_pause;
u8 master_slave_get;
@@ -493,9 +706,18 @@ struct phy_device {
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
/* used with phy_speed_down */
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ /* used for eee validation and configuration*/
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee);
+ bool eee_enabled;
+
+ /* Host supported PHY interface types. Should be ignored if empty. */
+ DECLARE_PHY_INTERFACE_MASK(host_interfaces);
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
+ bool enable_tx_lpi;
+ struct eee_config eee_cfg;
#ifdef CONFIG_LED_TRIGGER_PHY
struct phy_led_trigger *phy_led_triggers;
@@ -504,6 +726,7 @@ struct phy_device {
struct phy_led_trigger *led_link_trigger;
#endif
+ struct list_head leds;
/*
* Interrupt number for this PHY
@@ -535,10 +758,15 @@ struct phy_device {
struct phylink *phylink;
struct net_device *attached_dev;
struct mii_timestamper *mii_ts;
+ struct pse_control *psec;
u8 mdix;
u8 mdix_ctrl;
+ int pma_extable;
+
+ unsigned int link_down_events;
+
void (*phy_link_change)(struct phy_device *phydev, bool up);
void (*adjust_link)(struct net_device *dev);
@@ -547,12 +775,27 @@ struct phy_device {
const struct macsec_ops *macsec_ops;
#endif
};
-#define to_phy_device(d) container_of(to_mdio_device(d), \
- struct phy_device, mdio)
-/* A structure containing possible configuration parameters
+/* Generic phy_device::dev_flags */
+#define PHY_F_NO_IRQ 0x80000000
+
+static inline struct phy_device *to_phy_device(const struct device *dev)
+{
+ return container_of(to_mdio_device(dev), struct phy_device, mdio);
+}
+
+/**
+ * struct phy_tdr_config - Configuration of a TDR raw test
+ *
+ * @first: Distance for first data collection point
+ * @last: Distance for last data collection point
+ * @step: Step between data collection points
+ * @pair: Bitmap of cable pairs to collect data for
+ *
+ * A structure containing possible configuration parameters
* for a TDR cable test. The driver does not need to implement
* all the parameters, but should report what is actually used.
+ * All distances are in centimeters.
*/
struct phy_tdr_config {
u32 first;
@@ -562,18 +805,103 @@ struct phy_tdr_config {
};
#define PHY_PAIR_ALL -1
-/* struct phy_driver: Driver structure for a particular PHY type
+/**
+ * struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @version: read-only PLCA register map version. -1 = not available. Ignored
+ * when setting the configuration. Format is the same as reported by the PLCA
+ * IDVER register (31.CA00). -1 = not available.
+ * @enabled: PLCA configured mode (enabled/disabled). -1 = not available / don't
+ * set. 0 = disabled, anything else = enabled.
+ * @node_id: the PLCA local node identifier. -1 = not available / don't set.
+ * Allowed values [0 .. 254]. 255 = node disabled.
+ * @node_cnt: the PLCA node count (maximum number of nodes having a TO). Only
+ * meaningful for the coordinator (node_id = 0). -1 = not available / don't
+ * set. Allowed values [1 .. 255].
+ * @to_tmr: The value of the PLCA to_timer in bit-times, which determines the
+ * PLCA transmit opportunity window opening. See IEEE802.3 Clause 148 for
+ * more details. The to_timer shall be set equal over all nodes.
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ * @burst_cnt: controls how many additional frames a node is allowed to send in
+ * single transmit opportunity (TO). The default value of 0 means that the
+ * node is allowed exactly one frame per TO. A value of 1 allows two frames
+ * per TO, and so on. -1 = not available / don't set.
+ * Allowed values [0 .. 255].
+ * @burst_tmr: controls how many bit times to wait for the MAC to send a new
+ * frame before interrupting the burst. This value should be set to a value
+ * greater than the MAC inter-packet gap (which is typically 96 bits).
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ *
+ * A structure containing configuration parameters for setting/getting the PLCA
+ * RS configuration. The driver does not need to implement all the parameters,
+ * but should report what is actually used.
+ */
+struct phy_plca_cfg {
+ int version;
+ int enabled;
+ int node_id;
+ int node_cnt;
+ int to_tmr;
+ int burst_cnt;
+ int burst_tmr;
+};
+
+/**
+ * struct phy_plca_status - Status of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @pst: The PLCA status as reported by the PST bit in the PLCA STATUS
+ * register(31.CA03), indicating BEACON activity.
+ *
+ * A structure containing status information of the PLCA RS configuration.
+ * The driver does not need to implement all the parameters, but should report
+ * what is actually used.
+ */
+struct phy_plca_status {
+ bool pst;
+};
+
+/* Modes for PHY LED configuration */
+enum phy_led_modes {
+ PHY_LED_ACTIVE_LOW = 0,
+ PHY_LED_INACTIVE_HIGH_IMPEDANCE = 1,
+
+ /* keep it last */
+ __PHY_LED_MODES_NUM,
+};
+
+/**
+ * struct phy_led: An LED driven by the PHY
+ *
+ * @list: List of LEDs
+ * @phydev: PHY this LED is attached to
+ * @led_cdev: Standard LED class structure
+ * @index: Number of the LED
+ */
+struct phy_led {
+ struct list_head list;
+ struct phy_device *phydev;
+ struct led_classdev led_cdev;
+ u8 index;
+};
+
+#define to_phy_led(d) container_of(d, struct phy_led, led_cdev)
+
+/**
+ * struct phy_driver - Driver structure for a particular PHY type
*
- * driver_data: static driver data
- * phy_id: The result of reading the UID registers of this PHY
+ * @mdiodrv: Data common to all MDIO devices
+ * @phy_id: The result of reading the UID registers of this PHY
* type, and ANDing them with the phy_id_mask. This driver
* only works for PHYs with IDs which match this field
- * name: The friendly name of this PHY type
- * phy_id_mask: Defines the important bits of the phy_id
- * features: A mandatory list of features (speed, duplex, etc)
+ * @name: The friendly name of this PHY type
+ * @phy_id_mask: Defines the important bits of the phy_id
+ * @features: A mandatory list of features (speed, duplex, etc)
* supported by this PHY
- * flags: A bitfield defining certain other features this PHY
+ * @flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
+ * @driver_data: Static driver data
*
* All functions are optional. If config_aneg or read_status
* are not implemented, the phy core uses the genphy versions.
@@ -592,152 +920,263 @@ struct phy_driver {
u32 flags;
const void *driver_data;
- /*
- * Called to issue a PHY software reset
+ /**
+ * @soft_reset: Called to issue a PHY software reset
*/
int (*soft_reset)(struct phy_device *phydev);
- /*
- * Called to initialize the PHY,
+ /**
+ * @config_init: Called to initialize the PHY,
* including after a reset
*/
int (*config_init)(struct phy_device *phydev);
- /*
- * Called during discovery. Used to set
+ /**
+ * @probe: Called during discovery. Used to set
* up device-specific structures, if any
*/
int (*probe)(struct phy_device *phydev);
- /*
- * Probe the hardware to determine what abilities it has.
- * Should only set phydev->supported.
+ /**
+ * @get_features: Probe the hardware to determine what
+ * abilities it has. Should only set phydev->supported.
*/
int (*get_features)(struct phy_device *phydev);
+ /**
+ * @get_rate_matching: Get the supported type of rate matching for a
+ * particular phy interface. This is used by phy consumers to determine
+ * whether to advertise lower-speed modes for that interface. It is
+ * assumed that if a rate matching mode is supported on an interface,
+ * then that interface's rate can be adapted to all slower link speeds
+ * supported by the phy. If the interface is not supported, this should
+ * return %RATE_MATCH_NONE.
+ */
+ int (*get_rate_matching)(struct phy_device *phydev,
+ phy_interface_t iface);
+
/* PHY Power Management */
+ /** @suspend: Suspend the hardware, saving state if needed */
int (*suspend)(struct phy_device *phydev);
+ /** @resume: Resume the hardware, restoring state if needed */
int (*resume)(struct phy_device *phydev);
- /*
- * Configures the advertisement and resets
+ /**
+ * @config_aneg: Configures the advertisement and resets
* autonegotiation if phydev->autoneg is on,
* forces the speed to the current settings in phydev
* if phydev->autoneg is off
*/
int (*config_aneg)(struct phy_device *phydev);
- /* Determines the auto negotiation result */
+ /** @aneg_done: Determines the auto negotiation result */
int (*aneg_done)(struct phy_device *phydev);
- /* Determines the negotiated speed and duplex */
+ /** @read_status: Determines the negotiated speed and duplex */
int (*read_status)(struct phy_device *phydev);
- /* Clears any pending interrupts */
- int (*ack_interrupt)(struct phy_device *phydev);
-
- /* Enables or disables interrupts */
- int (*config_intr)(struct phy_device *phydev);
-
- /*
- * Checks if the PHY generated an interrupt.
- * For multi-PHY devices with shared PHY interrupt pin
- * Set interrupt bits have to be cleared.
+ /**
+ * @config_intr: Enables or disables interrupts.
+ * It should also clear any pending interrupts prior to enabling the
+ * IRQs and after disabling them.
*/
- int (*did_interrupt)(struct phy_device *phydev);
+ int (*config_intr)(struct phy_device *phydev);
- /* Override default interrupt handling */
+ /** @handle_interrupt: Override default interrupt handling */
irqreturn_t (*handle_interrupt)(struct phy_device *phydev);
- /* Clears up any memory if needed */
+ /** @remove: Clears up any memory if needed */
void (*remove)(struct phy_device *phydev);
- /* Returns true if this is a suitable driver for the given
- * phydev. If NULL, matching is based on phy_id and
- * phy_id_mask.
+ /**
+ * @match_phy_device: Returns true if this is a suitable
+ * driver for the given phydev. If NULL, matching is based on
+ * phy_id and phy_id_mask.
*/
int (*match_phy_device)(struct phy_device *phydev);
- /* Some devices (e.g. qnap TS-119P II) require PHY register changes to
- * enable Wake on LAN, so set_wol is provided to be called in the
- * ethernet driver's set_wol function. */
+ /**
+ * @set_wol: Some devices (e.g. qnap TS-119P II) require PHY
+ * register changes to enable Wake on LAN, so set_wol is
+ * provided to be called in the ethernet driver's set_wol
+ * function.
+ */
int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /* See set_wol, but for checking whether Wake on LAN is enabled. */
+ /**
+ * @get_wol: See set_wol, but for checking whether Wake on LAN
+ * is enabled.
+ */
void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /*
- * Called to inform a PHY device driver when the core is about to
- * change the link state. This callback is supposed to be used as
- * fixup hook for drivers that need to take action when the link
- * state changes. Drivers are by no means allowed to mess with the
+ /**
+ * @link_change_notify: Called to inform a PHY device driver
+ * when the core is about to change the link state. This
+ * callback is supposed to be used as fixup hook for drivers
+ * that need to take action when the link state
+ * changes. Drivers are by no means allowed to mess with the
* PHY device structure in their implementations.
*/
void (*link_change_notify)(struct phy_device *dev);
- /*
- * Phy specific driver override for reading a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD read function will be used
- * by phy_read_mmd(), which will use either a direct read for
- * Clause 45 PHYs or an indirect read for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
+ /**
+ * @read_mmd: PHY specific driver override for reading a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD read function
+ * will be used by phy_read_mmd(), which will use either a
+ * direct read for Clause 45 PHYs or an indirect read for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device.
*/
int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum);
- /*
- * Phy specific driver override for writing a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD write function will be used
- * by phy_write_mmd(), which will use either a direct write for
- * Clause 45 PHYs, or an indirect write for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
- * val is the value to be written.
+ /**
+ * @write_mmd: PHY specific driver override for writing a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD write function
+ * will be used by phy_write_mmd(), which will use either a
+ * direct write for Clause 45 PHYs, or an indirect write for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device. val is the value to be written.
*/
int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
u16 val);
+ /** @read_page: Return the current PHY register page number */
int (*read_page)(struct phy_device *dev);
+ /** @write_page: Set the current PHY register page number */
int (*write_page)(struct phy_device *dev, int page);
- /* Get the size and type of the eeprom contained within a plug-in
- * module */
+ /**
+ * @module_info: Get the size and type of the eeprom contained
+ * within a plug-in module
+ */
int (*module_info)(struct phy_device *dev,
struct ethtool_modinfo *modinfo);
- /* Get the eeprom information from the plug-in module */
+ /**
+ * @module_eeprom: Get the eeprom information from the plug-in
+ * module
+ */
int (*module_eeprom)(struct phy_device *dev,
struct ethtool_eeprom *ee, u8 *data);
- /* Start a cable test */
+ /** @cable_test_start: Start a cable test */
int (*cable_test_start)(struct phy_device *dev);
- /* Start a raw TDR cable test */
+ /** @cable_test_tdr_start: Start a raw TDR cable test */
int (*cable_test_tdr_start)(struct phy_device *dev,
const struct phy_tdr_config *config);
- /* Once per second, or on interrupt, request the status of the
- * test.
+ /**
+ * @cable_test_get_status: Once per second, or on interrupt,
+ * request the status of the test.
*/
int (*cable_test_get_status)(struct phy_device *dev, bool *finished);
- /* Get statistics from the phy using ethtool */
+ /* Get statistics from the PHY using ethtool */
+ /** @get_sset_count: Number of statistic counters */
int (*get_sset_count)(struct phy_device *dev);
+ /** @get_strings: Names of the statistic counters */
void (*get_strings)(struct phy_device *dev, u8 *data);
+ /** @get_stats: Return the statistic counter values */
void (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
/* Get and Set PHY tunables */
+ /** @get_tunable: Return the value of a tunable */
int (*get_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna, void *data);
+ /** @set_tunable: Set the value of a tunable */
int (*set_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna,
const void *data);
+ /** @set_loopback: Set the loopback mood of the PHY */
int (*set_loopback)(struct phy_device *dev, bool enable);
+ /** @get_sqi: Get the signal quality indication */
int (*get_sqi)(struct phy_device *dev);
+ /** @get_sqi_max: Get the maximum signal quality indication */
int (*get_sqi_max)(struct phy_device *dev);
+
+ /* PLCA RS interface */
+ /** @get_plca_cfg: Return the current PLCA configuration */
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ /** @set_plca_cfg: Set the PLCA configuration */
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg);
+ /** @get_plca_status: Return the current PLCA status info */
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
+
+ /**
+ * @led_brightness_set: Set a PHY LED brightness. Index
+ * indicates which of the PHYs led should be set. Value
+ * follows the standard LED class meaning, e.g. LED_OFF,
+ * LED_HALF, LED_FULL.
+ */
+ int (*led_brightness_set)(struct phy_device *dev,
+ u8 index, enum led_brightness value);
+
+ /**
+ * @led_blink_set: Set a PHY LED brightness. Index indicates
+ * which of the PHYs led should be configured to blink. Delays
+ * are in milliseconds and if both are zero then a sensible
+ * default should be chosen. The call should adjust the
+ * timings in that case and if it can't match the values
+ * specified exactly.
+ */
+ int (*led_blink_set)(struct phy_device *dev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+ /**
+ * @led_hw_is_supported: Can the HW support the given rules.
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @rules The core is interested in these rules
+ *
+ * Return 0 if yes, -EOPNOTSUPP if not, or an error code.
+ */
+ int (*led_hw_is_supported)(struct phy_device *dev, u8 index,
+ unsigned long rules);
+ /**
+ * @led_hw_control_set: Set the HW to control the LED
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @rules The rules used to control the LED
+ *
+ * Returns 0, or a an error code.
+ */
+ int (*led_hw_control_set)(struct phy_device *dev, u8 index,
+ unsigned long rules);
+ /**
+ * @led_hw_control_get: Get how the HW is controlling the LED
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @rules Pointer to the rules used to control the LED
+ *
+ * Set *@rules to how the HW is currently blinking. Returns 0
+ * on success, or a error code if the current blinking cannot
+ * be represented in rules, or some other error happens.
+ */
+ int (*led_hw_control_get)(struct phy_device *dev, u8 index,
+ unsigned long *rules);
+
+ /**
+ * @led_polarity_set: Set the LED polarity modes
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @modes: bitmap of LED polarity modes
+ *
+ * Configure LED with all the required polarity modes in @modes
+ * to make it correctly turn ON or OFF.
+ *
+ * Returns 0, or an error code.
+ */
+ int (*led_polarity_set)(struct phy_device *dev, int index,
+ unsigned long modes);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -749,6 +1188,34 @@ struct phy_driver {
#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+/**
+ * phy_id_compare - compare @id1 with @id2 taking account of @mask
+ * @id1: first PHY ID
+ * @id2: second PHY ID
+ * @mask: the PHY ID mask, set bits are significant in matching
+ *
+ * Return true if the bits from @id1 and @id2 specified by @mask match.
+ * This uses an equivalent test to (@id & @mask) == (@phy_id & @mask).
+ */
+static inline bool phy_id_compare(u32 id1, u32 id2, u32 mask)
+{
+ return !((id1 ^ id2) & mask);
+}
+
+/**
+ * phydev_id_compare - compare @id with the PHY's Clause 22 ID
+ * @phydev: the PHY device
+ * @id: the PHY ID to be matched
+ *
+ * Compare the @phydev clause 22 ID with the provided @id and return true or
+ * false depending whether it matches, using the bound driver mask. The
+ * @phydev must be bound to a driver.
+ */
+static inline bool phydev_id_compare(struct phy_device *phydev, u32 id)
+{
+ return phy_id_compare(id, phydev->phy_id, phydev->drv->phy_id_mask);
+}
+
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
@@ -760,6 +1227,9 @@ struct phy_fixup {
const char *phy_speed_to_str(int speed);
const char *phy_duplex_to_str(unsigned int duplex);
+const char *phy_rate_matching_to_str(int rate_matching);
+
+int phy_interface_num_ports(phy_interface_t interface);
/* A structure for mapping a particular speed and duplex
* combination to a particular SUPPORTED and ADVERTISED value
@@ -809,16 +1279,17 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
#define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \
timeout_us, sleep_before_read) \
({ \
- int __ret = read_poll_timeout(phy_read, val, (cond) || val < 0, \
+ int __ret, __val; \
+ __ret = read_poll_timeout(__val = phy_read, val, \
+ __val < 0 || (cond), \
sleep_us, timeout_us, sleep_before_read, phydev, regnum); \
- if (val < 0) \
- __ret = val; \
+ if (__val < 0) \
+ __ret = __val; \
if (__ret) \
phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
__ret; \
})
-
/**
* __phy_read - convenience function for reading a given PHY register
* @phydev: the phy_device struct
@@ -879,62 +1350,60 @@ static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum,
regnum, mask, set);
}
-/**
+/*
* phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Same rules as for phy_read();
*/
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+/**
+ * phy_read_mmd_poll_timeout - Periodically poll a PHY register until a
+ * condition is met or a timeout occurs
+ *
+ * @phydev: The phy_device struct
+ * @devaddr: The MMD to read from
+ * @regnum: The register on the MMD to read
+ * @val: Variable to read the register into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \
sleep_us, timeout_us, sleep_before_read) \
({ \
- int __ret = read_poll_timeout(phy_read_mmd, val, (cond) || val < 0, \
+ int __ret, __val; \
+ __ret = read_poll_timeout(__val = phy_read_mmd, val, \
+ __val < 0 || (cond), \
sleep_us, timeout_us, sleep_before_read, \
phydev, devaddr, regnum); \
- if (val < 0) \
- __ret = val; \
+ if (__val < 0) \
+ __ret = __val; \
if (__ret) \
phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
__ret; \
})
-/**
+/*
* __phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Same rules as for __phy_read();
*/
int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
-/**
+/*
* phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to read
- * @val: value to write to @regnum
- *
- * Same rules as for phy_write();
*/
int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
-/**
+/*
* __phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to read
- * @val: value to write to @regnum
- *
- * Same rules as for __phy_write();
*/
int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
@@ -1068,11 +1537,11 @@ static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad,
* @phydev: the phy_device struct
*
* NOTE: must be kept in sync with addition/removal of PHY_POLL and
- * PHY_IGNORE_INTERRUPT
+ * PHY_MAC_INTERRUPT
*/
static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
{
- return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT;
+ return phydev->irq != PHY_POLL && phydev->irq != PHY_MAC_INTERRUPT;
}
/**
@@ -1126,9 +1595,11 @@ static inline bool phy_has_txtstamp(struct phy_device *phydev)
return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp;
}
-static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+static inline int phy_hwtstamp(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, cfg, extack);
}
static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
@@ -1159,9 +1630,18 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_on_sfp - Convenience function for testing if a PHY is on an SFP module
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_on_sfp(struct phy_device *phydev)
+{
+ return phydev->is_on_sfp_module;
+}
+
+/**
* phy_interface_mode_is_rgmii - Convenience function for testing if a
* PHY interface mode is RGMII (all variants)
- * @mode: the phy_interface_t enum
+ * @mode: the &phy_interface_t enum
*/
static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
{
@@ -1170,11 +1650,11 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
};
/**
- * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z
+ * phy_interface_mode_is_8023z() - does the PHY interface mode use 802.3z
* negotiation
* @mode: one of &enum phy_interface_t
*
- * Returns true if the phy interface mode uses the 16-bit negotiation
+ * Returns true if the PHY interface mode uses the 16-bit negotiation
* word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
*/
static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
@@ -1193,7 +1673,7 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
return phy_interface_mode_is_rgmii(phydev->interface);
};
-/*
+/**
* phy_is_pseudo_fixed_link - Convenience function for testing if this
* PHY is the CPU port facing side of an Ethernet switch, or similar.
* @phydev: the phy_device struct
@@ -1217,10 +1697,42 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
#if IS_ENABLED(CONFIG_PHYLIB)
+int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id);
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode);
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
+struct phy_device *device_phy_find_device(struct device *dev);
+struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
void phy_device_free(struct phy_device *phydev);
#else
+static inline int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id)
+{
+ return 0;
+}
+static inline
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode)
+{
+ return 0;
+}
+
+static inline
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
+{
+ return NULL;
+}
+
+static inline struct phy_device *device_phy_find_device(struct device *dev)
+{
+ return NULL;
+}
+
+static inline
+struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
static inline
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
@@ -1235,6 +1747,7 @@ static inline int phy_device_register(struct phy_device *phy)
static inline void phy_device_free(struct phy_device *phydev) { }
#endif /* CONFIG_PHYLIB */
void phy_device_remove(struct phy_device *phydev);
+int phy_get_c45_ids(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
@@ -1259,10 +1772,13 @@ void phy_disconnect(struct phy_device *phydev);
void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
+int phy_config_aneg(struct phy_device *phydev);
+int _phy_start_aneg(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
int phy_speed_down(struct phy_device *phydev, bool sync);
int phy_speed_up(struct phy_device *phydev);
+bool phy_check_valid(int speed, int duplex, unsigned long *features);
int phy_restart_aneg(struct phy_device *phydev);
int phy_reset_after_clk_enable(struct phy_device *phydev);
@@ -1291,10 +1807,6 @@ int phy_start_cable_test_tdr(struct phy_device *phydev,
}
#endif
-int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result);
-int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair,
- u16 cm);
-
static inline void phy_device_reset(struct phy_device *phydev, int value)
{
mdio_device_reset(&phydev->mdio, value);
@@ -1303,6 +1815,9 @@ static inline void phy_device_reset(struct phy_device *phydev, int value)
#define phydev_err(_phydev, format, args...) \
dev_err(&_phydev->mdio.dev, format, ##args)
+#define phydev_err_probe(_phydev, err, format, args...) \
+ dev_err_probe(&_phydev->mdio.dev, err, format, ##args)
+
#define phydev_info(_phydev, format, args...) \
dev_info(&_phydev->mdio.dev, format, ##args)
@@ -1345,20 +1860,18 @@ int genphy_update_link(struct phy_device *phydev);
int genphy_read_lpa(struct phy_device *phydev);
int genphy_read_status_fixed(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
+int genphy_read_master_slave(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
int genphy_loopback(struct phy_device *phydev, bool enable);
int genphy_soft_reset(struct phy_device *phydev);
+irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev);
static inline int genphy_config_aneg(struct phy_device *phydev)
{
return __genphy_config_aneg(phydev, false);
}
-static inline int genphy_no_ack_interrupt(struct phy_device *phydev)
-{
- return 0;
-}
static inline int genphy_no_config_intr(struct phy_device *phydev)
{
return 0;
@@ -1370,7 +1883,7 @@ int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
/* Clause 37 */
int genphy_c37_config_aneg(struct phy_device *phydev);
-int genphy_c37_read_status(struct phy_device *phydev);
+int genphy_c37_read_status(struct phy_device *phydev, bool *changed);
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
@@ -1380,12 +1893,37 @@ int genphy_c45_read_link(struct phy_device *phydev);
int genphy_c45_read_lpa(struct phy_device *phydev);
int genphy_c45_read_pma(struct phy_device *phydev);
int genphy_c45_pma_setup_forced(struct phy_device *phydev);
+int genphy_c45_pma_baset1_setup_master_slave(struct phy_device *phydev);
int genphy_c45_an_config_aneg(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
int genphy_c45_read_mdix(struct phy_device *phydev);
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+int genphy_c45_pma_read_ext_abilities(struct phy_device *phydev);
+int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev);
+int genphy_c45_read_eee_abilities(struct phy_device *phydev);
+int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);
int genphy_c45_read_status(struct phy_device *phydev);
+int genphy_c45_baset1_read_status(struct phy_device *phydev);
int genphy_c45_config_aneg(struct phy_device *phydev);
+int genphy_c45_loopback(struct phy_device *phydev, bool enable);
+int genphy_c45_pma_resume(struct phy_device *phydev);
+int genphy_c45_pma_suspend(struct phy_device *phydev);
+int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable);
+int genphy_c45_plca_get_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_set_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_get_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
+ unsigned long *lp, bool *is_enabled);
+int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
+ struct ethtool_keee *data);
+int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
+ struct ethtool_keee *data);
+int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv);
+int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
+int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
/* Generic C45 PHY driver */
extern struct phy_driver genphy_c45_driver;
@@ -1409,8 +1947,10 @@ void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
+void phy_error(struct phy_device *phydev);
void phy_state_machine(struct work_struct *work);
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies);
+void phy_trigger_machine(struct phy_device *phydev);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
@@ -1425,11 +1965,15 @@ int phy_disable_interrupts(struct phy_device *phydev);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
-int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface);
+void phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
void phy_advertise_supported(struct phy_device *phydev);
+void phy_advertise_eee_all(struct phy_device *phydev);
void phy_support_sym_pause(struct phy_device *phydev);
void phy_support_asym_pause(struct phy_device *phydev);
+void phy_support_eee(struct phy_device *phydev);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
bool autoneg);
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
@@ -1456,8 +2000,8 @@ int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
int phy_get_eee_err(struct phy_device *phydev);
-int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
-int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data);
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data);
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
void phy_ethtool_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol);
@@ -1466,63 +2010,112 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev,
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
-int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size);
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size);
void phy_package_leave(struct phy_device *phydev);
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int addr, size_t priv_size);
+ int base_addr, size_t priv_size);
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size);
-#if IS_ENABLED(CONFIG_PHYLIB)
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
-#endif
int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
int phy_ethtool_get_sset_count(struct phy_device *phydev);
int phy_ethtool_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data);
-
-static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
+int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+int phy_ethtool_get_plca_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
+
+int __phy_hwtstamp_get(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config);
+int __phy_hwtstamp_set(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
+static inline int phy_package_address(struct phy_device *phydev,
+ unsigned int addr_offset)
{
struct phy_package_shared *shared = phydev->shared;
+ u8 base_addr = shared->base_addr;
- if (!shared)
+ if (addr_offset >= PHY_MAX_ADDR - base_addr)
return -EIO;
- return mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+ /* we know that addr will be in the range 0..31 and thus the
+ * implicit cast to a signed int is not a problem.
+ */
+ return base_addr + addr_offset;
}
-static inline int __phy_package_read(struct phy_device *phydev, u32 regnum)
+static inline int phy_package_read(struct phy_device *phydev,
+ unsigned int addr_offset, u32 regnum)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
- return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+ return mdiobus_read(phydev->mdio.bus, addr, regnum);
+}
+
+static inline int __phy_package_read(struct phy_device *phydev,
+ unsigned int addr_offset, u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ return __mdiobus_read(phydev->mdio.bus, addr, regnum);
}
static inline int phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ unsigned int addr_offset, u32 regnum,
+ u16 val)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
- return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+ return mdiobus_write(phydev->mdio.bus, addr, regnum, val);
}
static inline int __phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ unsigned int addr_offset, u32 regnum,
+ u16 val)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
- return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+ return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
}
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+
+int phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+
+int phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+
static inline bool __phy_package_set_once(struct phy_device *phydev,
unsigned int b)
{
@@ -1544,7 +2137,7 @@ static inline bool phy_package_probe_once(struct phy_device *phydev)
return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE);
}
-extern struct bus_type mdio_bus_type;
+extern const struct bus_type mdio_bus_type;
struct mdio_board_info {
const char *bus_id;
@@ -1566,8 +2159,9 @@ static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
/**
- * module_phy_driver() - Helper macro for registering PHY drivers
+ * phy_module_driver() - Helper macro for registering PHY drivers
* @__phy_drivers: array of PHY drivers to register
+ * @__count: Numbers of members in array
*
* Helper macro for PHY drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/phy/pcie.h b/include/linux/phy/pcie.h
new file mode 100644
index 000000000000..e7ac81764576
--- /dev/null
+++ b/include/linux/phy/pcie.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ */
+#ifndef __PHY_PCIE_H
+#define __PHY_PCIE_H
+
+#define PHY_MODE_PCIE_RC 20
+#define PHY_MODE_PCIE_EP 21
+#define PHY_MODE_PCIE_BIFURCATION 22
+
+#endif
diff --git a/include/linux/phy/phy-lvds.h b/include/linux/phy/phy-lvds.h
new file mode 100644
index 000000000000..09931d080a6d
--- /dev/null
+++ b/include/linux/phy/phy-lvds.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020,2022 NXP
+ */
+
+#ifndef __PHY_LVDS_H_
+#define __PHY_LVDS_H_
+
+/**
+ * struct phy_configure_opts_lvds - LVDS configuration set
+ * @bits_per_lane_and_dclk_cycle: Number of bits per lane per differential
+ * clock cycle.
+ * @differential_clk_rate: Clock rate, in Hertz, of the LVDS
+ * differential clock.
+ * @lanes: Number of active, consecutive,
+ * data lanes, starting from lane 0,
+ * used for the transmissions.
+ * @is_slave: Boolean, true if the phy is a slave
+ * which works together with a master
+ * phy to support dual link transmission,
+ * otherwise a regular phy or a master phy.
+ *
+ * This structure is used to represent the configuration state of a LVDS phy.
+ */
+struct phy_configure_opts_lvds {
+ unsigned int bits_per_lane_and_dclk_cycle;
+ unsigned long differential_clk_rate;
+ unsigned int lanes;
+ bool is_slave;
+};
+
+#endif /* __PHY_LVDS_H_ */
diff --git a/include/linux/phy/phy-mipi-dphy.h b/include/linux/phy/phy-mipi-dphy.h
index a877ffee845d..1ac128d78dfe 100644
--- a/include/linux/phy/phy-mipi-dphy.h
+++ b/include/linux/phy/phy-mipi-dphy.h
@@ -279,6 +279,9 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
unsigned int bpp,
unsigned int lanes,
struct phy_configure_opts_mipi_dphy *cfg);
+int phy_mipi_dphy_get_default_config_for_hsclk(unsigned long long hs_clk_rate,
+ unsigned int lanes,
+ struct phy_configure_opts_mipi_dphy *cfg);
int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg);
#endif /* __PHY_MIPI_DPHY_H_ */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index bcee8eba62b3..03cd5bae92d3 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/phy/phy-dp.h>
+#include <linux/phy/phy-lvds.h>
#include <linux/phy/phy-mipi-dphy.h>
struct phy;
@@ -44,6 +45,12 @@ enum phy_mode {
PHY_MODE_DP
};
+enum phy_media {
+ PHY_MEDIA_DEFAULT,
+ PHY_MEDIA_SR,
+ PHY_MEDIA_DAC,
+};
+
/**
* union phy_configure_opts - Opaque generic phy configuration
*
@@ -51,10 +58,13 @@ enum phy_mode {
* the MIPI_DPHY phy mode.
* @dp: Configuration set applicable for phys supporting
* the DisplayPort protocol.
+ * @lvds: Configuration set applicable for phys supporting
+ * the LVDS phy mode.
*/
union phy_configure_opts {
struct phy_configure_opts_mipi_dphy mipi_dphy;
struct phy_configure_opts_dp dp;
+ struct phy_configure_opts_lvds lvds;
};
/**
@@ -64,6 +74,8 @@ union phy_configure_opts {
* @power_on: powering on the phy
* @power_off: powering off the phy
* @set_mode: set the mode of the phy
+ * @set_media: set the media type of the phy (optional)
+ * @set_speed: set the speed of the phy (optional)
* @reset: resetting the phy
* @calibrate: calibrate the phy
* @release: ops to be performed while the consumer relinquishes the PHY
@@ -75,6 +87,8 @@ struct phy_ops {
int (*power_on)(struct phy *phy);
int (*power_off)(struct phy *phy);
int (*set_mode)(struct phy *phy, enum phy_mode mode, int submode);
+ int (*set_media)(struct phy *phy, enum phy_media media);
+ int (*set_speed)(struct phy *phy, int speed);
/**
* @configure:
@@ -108,6 +122,11 @@ struct phy_ops {
union phy_configure_opts *opts);
int (*reset)(struct phy *phy);
int (*calibrate)(struct phy *phy);
+
+ /* notify phy connect status change */
+ int (*connect)(struct phy *phy, int port);
+ int (*disconnect)(struct phy *phy, int port);
+
void (*release)(struct phy *phy);
struct module *owner;
};
@@ -115,10 +134,12 @@ struct phy_ops {
/**
* struct phy_attrs - represents phy attributes
* @bus_width: Data path width implemented by PHY
+ * @max_link_rate: Maximum link rate supported by PHY (units to be decided by producer and consumer)
* @mode: PHY mode
*/
struct phy_attrs {
u32 bus_width;
+ u32 max_link_rate;
enum phy_mode mode;
};
@@ -132,6 +153,7 @@ struct phy_attrs {
* @power_count: used to protect when the PHY is used by multiple consumers
* @attrs: used to specify PHY specific attributes
* @pwr: power regulator associated with the phy
+ * @debugfs: debugfs directory
*/
struct phy {
struct device dev;
@@ -142,6 +164,7 @@ struct phy {
int power_count;
struct phy_attrs attrs;
struct regulator *pwr;
+ struct dentry *debugfs;
};
/**
@@ -158,7 +181,7 @@ struct phy_provider {
struct module *owner;
struct list_head list;
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args);
+ const struct of_phandle_args *args);
};
/**
@@ -213,6 +236,8 @@ int phy_power_off(struct phy *phy);
int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode);
#define phy_set_mode(phy, mode) \
phy_set_mode_ext(phy, mode, 0)
+int phy_set_media(struct phy *phy, enum phy_media media);
+int phy_set_speed(struct phy *phy, int speed);
int phy_configure(struct phy *phy, union phy_configure_opts *opts);
int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts);
@@ -223,6 +248,8 @@ static inline enum phy_mode phy_get_mode(struct phy *phy)
}
int phy_reset(struct phy *phy);
int phy_calibrate(struct phy *phy);
+int phy_notify_connect(struct phy *phy, int port);
+int phy_notify_disconnect(struct phy *phy, int port);
static inline int phy_get_bus_width(struct phy *phy)
{
return phy->attrs.bus_width;
@@ -232,11 +259,12 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width)
phy->attrs.bus_width = bus_width;
}
struct phy *phy_get(struct device *dev, const char *string);
-struct phy *phy_optional_get(struct device *dev, const char *string);
struct phy *devm_phy_get(struct device *dev, const char *string);
struct phy *devm_phy_optional_get(struct device *dev, const char *string);
struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id);
+struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np,
+ const char *con_id);
struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
int index);
void of_phy_put(struct phy *phy);
@@ -244,7 +272,7 @@ void phy_put(struct device *dev, struct phy *phy);
void devm_phy_put(struct device *dev, struct phy *phy);
struct phy *of_phy_get(struct device_node *np, const char *con_id);
struct phy *of_phy_simple_xlate(struct device *dev,
- struct of_phandle_args *args);
+ const struct of_phandle_args *args);
struct phy *phy_create(struct device *dev, struct device_node *node,
const struct phy_ops *ops);
struct phy *devm_phy_create(struct device *dev, struct device_node *node,
@@ -254,11 +282,11 @@ void devm_phy_destroy(struct device *dev, struct phy *phy);
struct phy_provider *__of_phy_provider_register(struct device *dev,
struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ const struct of_phandle_args *args));
struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ const struct of_phandle_args *args));
void of_phy_provider_unregister(struct phy_provider *phy_provider);
void devm_of_phy_provider_unregister(struct device *dev,
struct phy_provider *phy_provider);
@@ -342,6 +370,20 @@ static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode,
#define phy_set_mode(phy, mode) \
phy_set_mode_ext(phy, mode, 0)
+static inline int phy_set_media(struct phy *phy, enum phy_media media)
+{
+ if (!phy)
+ return 0;
+ return -ENODEV;
+}
+
+static inline int phy_set_speed(struct phy *phy, int speed)
+{
+ if (!phy)
+ return 0;
+ return -ENODEV;
+}
+
static inline enum phy_mode phy_get_mode(struct phy *phy)
{
return PHY_MODE_INVALID;
@@ -361,6 +403,20 @@ static inline int phy_calibrate(struct phy *phy)
return -ENOSYS;
}
+static inline int phy_notify_connect(struct phy *phy, int index)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_notify_disconnect(struct phy *phy, int index)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
static inline int phy_configure(struct phy *phy,
union phy_configure_opts *opts)
{
@@ -394,12 +450,6 @@ static inline struct phy *phy_get(struct device *dev, const char *string)
return ERR_PTR(-ENOSYS);
}
-static inline struct phy *phy_optional_get(struct device *dev,
- const char *string)
-{
- return ERR_PTR(-ENOSYS);
-}
-
static inline struct phy *devm_phy_get(struct device *dev, const char *string)
{
return ERR_PTR(-ENOSYS);
@@ -418,6 +468,13 @@ static inline struct phy *devm_of_phy_get(struct device *dev,
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *devm_of_phy_optional_get(struct device *dev,
+ struct device_node *np,
+ const char *con_id)
+{
+ return NULL;
+}
+
static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
struct device_node *np,
int index)
@@ -443,7 +500,7 @@ static inline struct phy *of_phy_get(struct device_node *np, const char *con_id)
}
static inline struct phy *of_phy_simple_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
return ERR_PTR(-ENOSYS);
}
@@ -473,7 +530,7 @@ static inline void devm_phy_destroy(struct device *dev, struct phy *phy)
static inline struct phy_provider *__of_phy_provider_register(
struct device *dev, struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
@@ -481,7 +538,7 @@ static inline struct phy_provider *__of_phy_provider_register(
static inline struct phy_provider *__devm_of_phy_provider_register(struct device
*dev, struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index 71d956935405..6ca51e0080ec 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef PHY_TEGRA_XUSB_H
@@ -8,6 +8,7 @@
struct tegra_xusb_padctl;
struct device;
+enum usb_device_speed;
struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev);
void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl);
@@ -20,7 +21,17 @@ int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable);
int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
bool val);
+void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy);
+void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy);
int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
unsigned int port);
+int tegra_xusb_padctl_get_port_number(struct phy *phy);
+int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
+ enum usb_device_speed speed);
+int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy);
+
#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 52bc8e487ef7..1acafd86ab13 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -2,6 +2,8 @@
#ifndef __PHY_FIXED_H
#define __PHY_FIXED_H
+#include <linux/types.h>
+
struct fixed_phy_status {
int link;
int speed;
@@ -12,6 +14,7 @@ struct fixed_phy_status {
struct device_node;
struct gpio_desc;
+struct net_device;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
diff --git a/include/linux/phylib_stubs.h b/include/linux/phylib_stubs.h
new file mode 100644
index 000000000000..1279f48c8a70
--- /dev/null
+++ b/include/linux/phylib_stubs.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Stubs for the Network PHY library
+ */
+
+#include <linux/rtnetlink.h>
+
+struct kernel_hwtstamp_config;
+struct netlink_ext_ack;
+struct phy_device;
+
+#if IS_ENABLED(CONFIG_PHYLIB)
+
+extern const struct phylib_stubs *phylib_stubs;
+
+struct phylib_stubs {
+ int (*hwtstamp_get)(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config);
+ int (*hwtstamp_set)(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+};
+
+static inline int phy_hwtstamp_get(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config)
+{
+ /* phylib_register_stubs() and phylib_unregister_stubs()
+ * also run under rtnl_lock().
+ */
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return -EOPNOTSUPP;
+
+ return phylib_stubs->hwtstamp_get(phydev, config);
+}
+
+static inline int phy_hwtstamp_set(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ /* phylib_register_stubs() and phylib_unregister_stubs()
+ * also run under rtnl_lock().
+ */
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return -EOPNOTSUPP;
+
+ return phylib_stubs->hwtstamp_set(phydev, config, extack);
+}
+
+#else
+
+static inline int phy_hwtstamp_get(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int phy_hwtstamp_set(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index c36fb41a7d90..9a57deefcb07 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -9,6 +9,7 @@ struct device_node;
struct ethtool_cmd;
struct fwnode_handle;
struct net_device;
+struct phylink;
enum {
MLO_PAUSE_NONE,
@@ -20,6 +21,76 @@ enum {
MLO_AN_PHY = 0, /* Conventional PHY */
MLO_AN_FIXED, /* Fixed-link mode */
MLO_AN_INBAND, /* In-band protocol */
+
+ /* PCS "negotiation" mode.
+ * PHYLINK_PCS_NEG_NONE - protocol has no inband capability
+ * PHYLINK_PCS_NEG_OUTBAND - some out of band or fixed link setting
+ * PHYLINK_PCS_NEG_INBAND_DISABLED - inband mode disabled, e.g.
+ * 1000base-X with autoneg off
+ * PHYLINK_PCS_NEG_INBAND_ENABLED - inband mode enabled
+ * Additionally, this can be tested using bitmasks:
+ * PHYLINK_PCS_NEG_INBAND - inband mode selected
+ * PHYLINK_PCS_NEG_ENABLED - negotiation mode enabled
+ */
+ PHYLINK_PCS_NEG_NONE = 0,
+ PHYLINK_PCS_NEG_ENABLED = BIT(4),
+ PHYLINK_PCS_NEG_OUTBAND = BIT(5),
+ PHYLINK_PCS_NEG_INBAND = BIT(6),
+ PHYLINK_PCS_NEG_INBAND_DISABLED = PHYLINK_PCS_NEG_INBAND,
+ PHYLINK_PCS_NEG_INBAND_ENABLED = PHYLINK_PCS_NEG_INBAND |
+ PHYLINK_PCS_NEG_ENABLED,
+
+ /* MAC_SYM_PAUSE and MAC_ASYM_PAUSE are used when configuring our
+ * autonegotiation advertisement. They correspond to the PAUSE and
+ * ASM_DIR bits defined by 802.3, respectively.
+ *
+ * The following table lists the values of tx_pause and rx_pause which
+ * might be requested in mac_link_up. The exact values depend on either
+ * the results of autonegotation (if MLO_PAUSE_AN is set) or user
+ * configuration (if MLO_PAUSE_AN is not set).
+ *
+ * MAC_SYM_PAUSE MAC_ASYM_PAUSE MLO_PAUSE_AN tx_pause/rx_pause
+ * ============= ============== ============ ==================
+ * 0 0 0 0/0
+ * 0 0 1 0/0
+ * 0 1 0 0/0, 0/1, 1/0, 1/1
+ * 0 1 1 0/0, 1/0
+ * 1 0 0 0/0, 1/1
+ * 1 0 1 0/0, 1/1
+ * 1 1 0 0/0, 0/1, 1/0, 1/1
+ * 1 1 1 0/0, 0/1, 1/1
+ *
+ * If you set MAC_ASYM_PAUSE, the user may request any combination of
+ * tx_pause and rx_pause. You do not have to support these
+ * combinations.
+ *
+ * However, you should support combinations of tx_pause and rx_pause
+ * which might be the result of autonegotation. For example, don't set
+ * MAC_SYM_PAUSE unless your device can support tx_pause and rx_pause
+ * at the same time.
+ */
+ MAC_SYM_PAUSE = BIT(0),
+ MAC_ASYM_PAUSE = BIT(1),
+ MAC_10HD = BIT(2),
+ MAC_10FD = BIT(3),
+ MAC_10 = MAC_10HD | MAC_10FD,
+ MAC_100HD = BIT(4),
+ MAC_100FD = BIT(5),
+ MAC_100 = MAC_100HD | MAC_100FD,
+ MAC_1000HD = BIT(6),
+ MAC_1000FD = BIT(7),
+ MAC_1000 = MAC_1000HD | MAC_1000FD,
+ MAC_2500FD = BIT(8),
+ MAC_5000FD = BIT(9),
+ MAC_10000FD = BIT(10),
+ MAC_20000FD = BIT(11),
+ MAC_25000FD = BIT(12),
+ MAC_40000FD = BIT(13),
+ MAC_50000FD = BIT(14),
+ MAC_56000FD = BIT(15),
+ MAC_100000FD = BIT(16),
+ MAC_200000FD = BIT(17),
+ MAC_400000FD = BIT(18),
};
static inline bool phylink_autoneg_inband(unsigned int mode)
@@ -36,8 +107,11 @@ static inline bool phylink_autoneg_inband(unsigned int mode)
* @speed: link speed, one of the SPEED_* constants.
* @duplex: link duplex mode, one of DUPLEX_* constants.
* @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @rate_matching: rate matching being performed, one of the RATE_MATCH_*
+ * constants. If rate matching is taking place, then the speed/duplex of
+ * the medium link mode (@speed and @duplex) and the speed/duplex of the phy
+ * interface mode (@interface) are different.
* @link: true if the link is up.
- * @an_enabled: true if autonegotiation is enabled/desired.
* @an_complete: true if autonegotiation has completed.
*/
struct phylink_link_state {
@@ -47,8 +121,8 @@ struct phylink_link_state {
int speed;
int duplex;
int pause;
+ int rate_matching;
unsigned int link:1;
- unsigned int an_enabled:1;
unsigned int an_complete:1;
};
@@ -61,47 +135,53 @@ enum phylink_op_type {
* struct phylink_config - PHYLINK configuration structure
* @dev: a pointer to a struct device associated with the MAC
* @type: operation type of PHYLINK instance
- * @pcs_poll: MAC PCS cannot provide link change interrupt
* @poll_fixed_state: if true, starts link_poll,
* if MAC link is at %MLO_AN_FIXED mode.
+ * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
+ * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
* @get_fixed_state: callback to execute to determine the fixed link state,
* if MAC link is at %MLO_AN_FIXED mode.
+ * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx
+ * are supported by the MAC/PCS.
+ * @mac_capabilities: MAC pause/speed/duplex capabilities.
*/
struct phylink_config {
struct device *dev;
enum phylink_op_type type;
- bool pcs_poll;
bool poll_fixed_state;
+ bool mac_managed_pm;
+ bool ovr_an_inband;
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
+ DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
+ unsigned long mac_capabilities;
};
+void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
+
/**
* struct phylink_mac_ops - MAC operations structure.
- * @validate: Validate and update the link configuration.
- * @mac_pcs_get_state: Read the current link state from the hardware.
+ * @mac_get_caps: Get MAC capabilities for interface mode.
+ * @mac_select_pcs: Select a PCS for the interface mode.
* @mac_prepare: prepare for a major reconfiguration of the interface.
* @mac_config: configure the MAC for the selected mode and state.
* @mac_finish: finish a major reconfiguration of the interface.
- * @mac_an_restart: restart 802.3z BaseX autonegotiation.
* @mac_link_down: take the link down.
* @mac_link_up: allow the link to come up.
*
* The individual methods are described more fully below.
*/
struct phylink_mac_ops {
- void (*validate)(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state);
- void (*mac_pcs_get_state)(struct phylink_config *config,
- struct phylink_link_state *state);
+ unsigned long (*mac_get_caps)(struct phylink_config *config,
+ phy_interface_t interface);
+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
+ phy_interface_t interface);
int (*mac_prepare)(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
void (*mac_config)(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
int (*mac_finish)(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
- void (*mac_an_restart)(struct phylink_config *config);
void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
void (*mac_link_up)(struct phylink_config *config,
@@ -112,48 +192,32 @@ struct phylink_mac_ops {
#if 0 /* For kernel-doc purposes only. */
/**
- * validate - Validate and update the link configuration
+ * mac_get_caps: Get MAC capabilities for interface mode.
* @config: a pointer to a &struct phylink_config.
- * @supported: ethtool bitmask for supported link modes.
- * @state: a pointer to a &struct phylink_link_state.
- *
- * Clear bits in the @supported and @state->advertising masks that
- * are not supportable by the MAC.
- *
- * Note that the PHY may be able to transform from one connection
- * technology to another, so, eg, don't clear 1000BaseX just
- * because the MAC is unable to BaseX mode. This is more about
- * clearing unsupported speeds and duplex settings. The port modes
- * should not be cleared; phylink_set_port_modes() will help with this.
- *
- * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
- * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
- * based on @state->advertising and/or @state->speed and update
- * @state->interface accordingly. See phylink_helper_basex_speed().
+ * @interface: PHY interface mode.
*
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the
- * MAC driver to return all supported link modes.
- *
- * If the @state->interface mode is not supported, then the @supported
- * mask must be cleared.
+ * Optional method. When not provided, config->mac_capabilities will be used.
+ * When implemented, this returns the MAC capabilities for the specified
+ * interface mode where there is some special handling required by the MAC
+ * driver (e.g. not supporting half-duplex in certain interface modes.)
*/
-void validate(struct phylink_config *config, unsigned long *supported,
- struct phylink_link_state *state);
-
+unsigned long mac_get_caps(struct phylink_config *config,
+ phy_interface_t interface);
/**
- * mac_pcs_get_state() - Read the current inband link state from the hardware
+ * mac_select_pcs: Select a PCS for the interface mode.
* @config: a pointer to a &struct phylink_config.
- * @state: a pointer to a &struct phylink_link_state.
+ * @interface: PHY interface mode for PCS
*
- * Read the current inband link state from the MAC PCS, reporting the
- * current speed in @state->speed, duplex mode in @state->duplex, pause
- * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
- * negotiation completion state in @state->an_complete, and link up state
- * in @state->link. If possible, @state->lp_advertising should also be
- * populated.
+ * Return the &struct phylink_pcs for the specified interface mode, or
+ * NULL if none is required, or an error pointer on error.
+ *
+ * This must not modify any state. It is used to query which PCS should
+ * be used. Phylink will use this during validation to ensure that the
+ * configuration is valid, and when setting a configuration to internally
+ * set the PCS that will be used.
*/
-void mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state);
+struct phylink_pcs *mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface);
/**
* mac_prepare() - prepare to change the PHY interface mode
@@ -191,8 +255,9 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* guaranteed to be correct, and so any mac_config() implementation must
* never reference these fields.
*
- * (this requires a rewrite - please refer to mac_link_up() for situations
- * where the PCS and MAC are not tightly integrated.)
+ * This will only be called to reconfigure the MAC for a "major" change in
+ * e.g. interface mode. It will not be called for changes in speed, duplex
+ * or pause modes or to change the in-band advertisement.
*
* In all negotiation modes, as defined by @mode, @state->pause indicates the
* pause settings which should be applied as follows. If %MLO_PAUSE_AN is not
@@ -224,7 +289,7 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* 1000base-X or Cisco SGMII mode depending on the @state->interface
* mode). In both cases, link state management (whether the link
* is up or not) is performed by the MAC, and reported via the
- * mac_pcs_get_state() callback. Changes in link state must be made
+ * pcs_get_state() callback. Changes in link state must be made
* by calling phylink_mac_change().
*
* Interface mode specific details are mentioned below.
@@ -273,12 +338,6 @@ int mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
/**
- * mac_an_restart() - restart 802.3z BaseX autonegotiation
- * @config: a pointer to a &struct phylink_config.
- */
-void mac_an_restart(struct phylink_config *config);
-
-/**
* mac_link_down() - take the link down
* @config: a pointer to a &struct phylink_config.
* @mode: link autonegotiation mode
@@ -330,18 +389,30 @@ struct phylink_pcs_ops;
/**
* struct phylink_pcs - PHYLINK PCS instance
* @ops: a pointer to the &struct phylink_pcs_ops structure
+ * @phylink: pointer to &struct phylink_config
+ * @neg_mode: provide PCS neg mode via "mode" argument
* @poll: poll the PCS for link changes
*
* This structure is designed to be embedded within the PCS private data,
* and will be passed between phylink and the PCS.
+ *
+ * The @phylink member is private to phylink and must not be touched by
+ * the PCS driver.
*/
struct phylink_pcs {
const struct phylink_pcs_ops *ops;
+ struct phylink *phylink;
+ bool neg_mode;
bool poll;
};
/**
* struct phylink_pcs_ops - MAC PCS operations structure.
+ * @pcs_validate: validate the link configuration.
+ * @pcs_enable: enable the PCS.
+ * @pcs_disable: disable the PCS.
+ * @pcs_pre_config: pre-mac_config method (for errata)
+ * @pcs_post_config: post-mac_config method (for arrata)
* @pcs_get_state: read the current MAC PCS link state from the hardware.
* @pcs_config: configure the MAC PCS for the selected mode and state.
* @pcs_an_restart: restart 802.3z BaseX autonegotiation.
@@ -349,19 +420,56 @@ struct phylink_pcs {
* (where necessary).
*/
struct phylink_pcs_ops {
+ int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state);
+ int (*pcs_enable)(struct phylink_pcs *pcs);
+ void (*pcs_disable)(struct phylink_pcs *pcs);
+ void (*pcs_pre_config)(struct phylink_pcs *pcs,
+ phy_interface_t interface);
+ int (*pcs_post_config)(struct phylink_pcs *pcs,
+ phy_interface_t interface);
void (*pcs_get_state)(struct phylink_pcs *pcs,
struct phylink_link_state *state);
- int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode,
+ int (*pcs_config)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac);
void (*pcs_an_restart)(struct phylink_pcs *pcs);
- void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode,
+ void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
};
#if 0 /* For kernel-doc purposes only. */
/**
+ * pcs_validate() - validate the link configuration.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a const pointer to a &struct phylink_link_state.
+ *
+ * Validate the interface mode, and advertising's autoneg bit, removing any
+ * media ethtool link modes that would not be supportable from the supported
+ * mask. Phylink will propagate the changes to the advertising mask. See the
+ * &struct phylink_mac_ops validate() method.
+ *
+ * Returns -EINVAL if the interface mode/autoneg mode is not supported.
+ * Returns non-zero positive if the link state can be supported.
+ */
+int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state);
+
+/**
+ * pcs_enable() - enable the PCS.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ */
+int pcs_enable(struct phylink_pcs *pcs);
+
+/**
+ * pcs_disable() - disable the PCS.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ */
+void pcs_disable(struct phylink_pcs *pcs);
+
+/**
* pcs_get_state() - Read the current inband link state from the hardware
* @pcs: a pointer to a &struct phylink_pcs.
* @state: a pointer to a &struct phylink_link_state.
@@ -372,9 +480,6 @@ struct phylink_pcs_ops {
* negotiation completion state in @state->an_complete, and link up state
* in @state->link. If possible, @state->lp_advertising should also be
* populated.
- *
- * When present, this overrides mac_pcs_get_state() in &struct
- * phylink_mac_ops.
*/
void pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state);
@@ -382,7 +487,7 @@ void pcs_get_state(struct phylink_pcs *pcs,
/**
* pcs_config() - Configure the PCS mode and advertisement
* @pcs: a pointer to a &struct phylink_pcs.
- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @neg_mode: link negotiation mode (see below)
* @interface: interface mode to be used
* @advertising: adertisement ethtool link mode mask
* @permit_pause_to_mac: permit forwarding pause resolution to MAC
@@ -400,8 +505,12 @@ void pcs_get_state(struct phylink_pcs *pcs,
* For 1000BASE-X, the advertisement should be programmed into the PCS.
*
* For most 10GBASE-R, there is no advertisement.
+ *
+ * The %neg_mode argument should be tested via the phylink_mode_*() family of
+ * functions, or for PCS that set pcs->neg_mode true, should be tested
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
-int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, const unsigned long *advertising,
bool permit_pause_to_mac);
@@ -417,7 +526,7 @@ void pcs_an_restart(struct phylink_pcs *pcs);
/**
* pcs_link_up() - program the PCS for the resolved link configuration
* @pcs: a pointer to a &struct phylink_pcs.
- * @mode: link autonegotiation mode
+ * @neg_mode: link negotiation mode (see below)
* @interface: link &typedef phy_interface_t mode
* @speed: link speed
* @duplex: link duplex
@@ -426,26 +535,38 @@ void pcs_an_restart(struct phylink_pcs *pcs);
* the resolved link parameters. For example, a PCS operating in SGMII
* mode without in-band AN needs to be manually configured for the link
* and duplex setting. Otherwise, this should be a no-op.
+ *
+ * The %mode argument should be tested via the phylink_mode_*() family of
+ * functions, or for PCS that set pcs->neg_mode true, should be tested
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
-void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
#endif
-struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
- phy_interface_t iface,
- const struct phylink_mac_ops *mac_ops);
-void phylink_set_pcs(struct phylink *, struct phylink_pcs *pcs);
+struct phylink *phylink_create(struct phylink_config *,
+ const struct fwnode_handle *,
+ phy_interface_t,
+ const struct phylink_mac_ops *);
void phylink_destroy(struct phylink *);
+bool phylink_expects_phy(struct phylink *pl);
int phylink_connect_phy(struct phylink *, struct phy_device *);
int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
+int phylink_fwnode_phy_connect(struct phylink *pl,
+ const struct fwnode_handle *fwnode,
+ u32 flags);
void phylink_disconnect_phy(struct phylink *);
void phylink_mac_change(struct phylink *, bool up);
+void phylink_pcs_change(struct phylink_pcs *, bool up);
void phylink_start(struct phylink *);
void phylink_stop(struct phylink *);
+void phylink_suspend(struct phylink *pl, bool mac_wol);
+void phylink_resume(struct phylink *pl);
+
void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *);
int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *);
@@ -460,8 +581,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
int phylink_get_eee_err(struct phylink *);
int phylink_init_eee(struct phylink *, bool);
-int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
-int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
+int phylink_ethtool_get_eee(struct phylink *link, struct ethtool_keee *eee);
+int phylink_ethtool_set_eee(struct phylink *link, struct ethtool_keee *eee);
int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
int phylink_speed_down(struct phylink *pl, bool sync);
int phylink_speed_up(struct phylink *pl);
@@ -476,18 +597,48 @@ int phylink_speed_up(struct phylink *pl);
#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
void phylink_set_port_modes(unsigned long *bits);
-void phylink_helper_basex_speed(struct phylink_link_state *state);
+/**
+ * phylink_get_link_timer_ns - return the PCS link timer value
+ * @interface: link &typedef phy_interface_t mode
+ *
+ * Return the PCS link timer setting in nanoseconds for the PHY @interface
+ * mode, or -EINVAL if not appropriate.
+ */
+static inline int phylink_get_link_timer_ns(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ return 1600000;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return 10000000;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
+ u16 bmsr, u16 lpa);
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state);
-int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
- phy_interface_t interface,
- const unsigned long *advertising);
-int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
+int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
+ const unsigned long *advertising);
+int phylink_mii_c22_pcs_config(struct mdio_device *pcs,
phy_interface_t interface,
- const unsigned long *advertising);
+ const unsigned long *advertising,
+ unsigned int neg_mode);
void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
+void phylink_resolve_c73(struct phylink_link_state *state);
+
void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state);
+
+void phylink_decode_usxgmii_word(struct phylink_link_state *state,
+ uint16_t lpa);
#endif
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 176d6cf80e7c..a3aad9b4074c 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -2,18 +2,12 @@
#ifndef _LINUX_PID_H
#define _LINUX_PID_H
+#include <linux/pid_types.h>
#include <linux/rculist.h>
-#include <linux/wait.h>
+#include <linux/rcupdate.h>
#include <linux/refcount.h>
-
-enum pid_type
-{
- PIDTYPE_PID,
- PIDTYPE_TGID,
- PIDTYPE_PGID,
- PIDTYPE_SID,
- PIDTYPE_MAX,
-};
+#include <linux/sched.h>
+#include <linux/wait.h>
/*
* What is struct pid?
@@ -51,6 +45,8 @@ enum pid_type
* find_pid_ns() using the int nr and struct pid_namespace *ns.
*/
+#define RESERVED_PIDS 300
+
struct upid {
int nr;
struct pid_namespace *ns;
@@ -61,22 +57,26 @@ struct pid
refcount_t count;
unsigned int level;
spinlock_t lock;
+ struct dentry *stashed;
+ u64 ino;
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
struct hlist_head inodes;
/* wait queue for pidfd notifications */
wait_queue_head_t wait_pidfd;
struct rcu_head rcu;
- struct upid numbers[1];
+ struct upid numbers[];
};
extern struct pid init_struct_pid;
-extern const struct file_operations pidfd_fops;
-
struct file;
-extern struct pid *pidfd_pid(const struct file *file);
+struct pid *pidfd_pid(const struct file *file);
+struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
+struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
+int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret);
+void do_notify_pidfd(struct task_struct *task);
static inline struct pid *get_pid(struct pid *pid)
{
@@ -106,9 +106,6 @@ extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
-struct pid_namespace;
-extern struct pid_namespace init_pid_ns;
-
extern int pid_max;
extern int pid_max_min, pid_max_max;
@@ -211,4 +208,127 @@ pid_t pid_vnr(struct pid *pid);
} \
task = tg___; \
} while_each_pid_task(pid, type, task)
+
+static inline struct pid *task_pid(struct task_struct *task)
+{
+ return task->thread_pid;
+}
+
+/*
+ * the helpers to get the task's different pids as they are seen
+ * from various namespaces
+ *
+ * task_xid_nr() : global id, i.e. the id seen from the init namespace;
+ * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
+ * current.
+ * task_xid_nr_ns() : id seen from the ns specified;
+ *
+ * see also pid_nr() etc in include/linux/pid.h
+ */
+pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+ return tsk->pid;
+}
+
+static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
+}
+
+static inline pid_t task_pid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
+}
+
+
+static inline pid_t task_tgid_nr(struct task_struct *tsk)
+{
+ return tsk->tgid;
+}
+
+/**
+ * pid_alive - check that a task structure is not stale
+ * @p: Task structure to be checked.
+ *
+ * Test if a process is not yet dead (at most zombie state)
+ * If pid_alive fails, then pointers within the task structure
+ * can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
+ */
+static inline int pid_alive(const struct task_struct *p)
+{
+ return p->thread_pid != NULL;
+}
+
+static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+}
+
+static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
+}
+
+
+static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
+}
+
+static inline pid_t task_session_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
+}
+
+static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
+}
+
+static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
+}
+
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
+/* Obsolete, do not use: */
+static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+{
+ return task_pgrp_nr_ns(tsk, &init_pid_ns);
+}
+
+/**
+ * is_global_init - check if a task structure is init. Since init
+ * is free to have sub-threads we need to check tgid.
+ * @tsk: Task structure to be checked.
+ *
+ * Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
+ */
+static inline int is_global_init(struct task_struct *tsk)
+{
+ return task_tgid_nr(tsk) == 1;
+}
+
#endif /* _LINUX_PID_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 5a5cb45ac57e..f9f9931e02d6 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -8,7 +8,6 @@
#include <linux/workqueue.h>
#include <linux/threads.h>
#include <linux/nsproxy.h>
-#include <linux/kref.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
@@ -17,8 +16,14 @@
struct fs_pin;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+/* modes for vm.memfd_noexec sysctl */
+#define MEMFD_NOEXEC_SCOPE_EXEC 0 /* MFD_EXEC implied if unset */
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1 /* MFD_NOEXEC_SEAL implied if unset */
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2 /* same as 1, except MFD_EXEC rejected */
+#endif
+
struct pid_namespace {
- struct kref kref;
struct idr idr;
struct rcu_head rcu;
unsigned int pid_allocated;
@@ -33,6 +38,9 @@ struct pid_namespace {
struct ucounts *ucounts;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+ int memfd_noexec_scope;
+#endif
} __randomize_layout;
extern struct pid_namespace init_pid_ns;
@@ -43,10 +51,27 @@ extern struct pid_namespace init_pid_ns;
static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
{
if (ns != &init_pid_ns)
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
return ns;
}
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ int scope = MEMFD_NOEXEC_SCOPE_EXEC;
+
+ for (; ns; ns = ns->parent)
+ scope = max(scope, READ_ONCE(ns->memfd_noexec_scope));
+
+ return scope;
+}
+#else
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ return 0;
+}
+#endif
+
extern struct pid_namespace *copy_pid_ns(unsigned long flags,
struct user_namespace *user_ns, struct pid_namespace *ns);
extern void zap_pid_ns_processes(struct pid_namespace *pid_ns);
@@ -61,6 +86,11 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
return ns;
}
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ return 0;
+}
+
static inline struct pid_namespace *copy_pid_ns(unsigned long flags,
struct user_namespace *user_ns, struct pid_namespace *ns)
{
@@ -88,4 +118,9 @@ extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
void pid_idr_init(void);
+static inline bool task_is_in_init_pid_ns(struct task_struct *tsk)
+{
+ return task_active_pid_ns(tsk) == &init_pid_ns;
+}
+
#endif /* _LINUX_PID_NS_H */
diff --git a/include/linux/pid_types.h b/include/linux/pid_types.h
new file mode 100644
index 000000000000..c2aee1d91dcf
--- /dev/null
+++ b/include/linux/pid_types.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PID_TYPES_H
+#define _LINUX_PID_TYPES_H
+
+enum pid_type {
+ PIDTYPE_PID,
+ PIDTYPE_TGID,
+ PIDTYPE_PGID,
+ PIDTYPE_SID,
+ PIDTYPE_MAX,
+};
+
+struct pid_namespace;
+extern struct pid_namespace init_pid_ns;
+
+#endif /* _LINUX_PID_TYPES_H */
diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h
new file mode 100644
index 000000000000..75bdf9807802
--- /dev/null
+++ b/include/linux/pidfs.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PID_FS_H
+#define _LINUX_PID_FS_H
+
+struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags);
+void __init pidfs_init(void);
+
+#endif /* _LINUX_PID_FS_H */
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 019fecd75d0c..73de70362b98 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -12,40 +12,44 @@
#define __LINUX_PINCTRL_CONSUMER_H
#include <linux/err.h>
-#include <linux/list.h>
-#include <linux/seq_file.h>
+#include <linux/types.h>
+
#include <linux/pinctrl/pinctrl-state.h>
+struct device;
+struct gpio_chip;
+
/* This struct is private to the core and should be regarded as a cookie */
struct pinctrl;
struct pinctrl_state;
-struct device;
#ifdef CONFIG_PINCTRL
/* External interface to pin control */
-extern bool pinctrl_gpio_can_use_line(unsigned gpio);
-extern int pinctrl_gpio_request(unsigned gpio);
-extern void pinctrl_gpio_free(unsigned gpio);
-extern int pinctrl_gpio_direction_input(unsigned gpio);
-extern int pinctrl_gpio_direction_output(unsigned gpio);
-extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
-
-extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
-extern void pinctrl_put(struct pinctrl *p);
-extern struct pinctrl_state * __must_check pinctrl_lookup_state(
- struct pinctrl *p,
- const char *name);
-extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
-
-extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
-extern void devm_pinctrl_put(struct pinctrl *p);
-extern int pinctrl_select_default_state(struct device *dev);
+bool pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset);
+void pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset);
+int pinctrl_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset);
+int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config);
+
+struct pinctrl * __must_check pinctrl_get(struct device *dev);
+void pinctrl_put(struct pinctrl *p);
+struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
+ const char *name);
+int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
+
+struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
+void devm_pinctrl_put(struct pinctrl *p);
+int pinctrl_select_default_state(struct device *dev);
#ifdef CONFIG_PM
-extern int pinctrl_pm_select_default_state(struct device *dev);
-extern int pinctrl_pm_select_sleep_state(struct device *dev);
-extern int pinctrl_pm_select_idle_state(struct device *dev);
+int pinctrl_pm_select_default_state(struct device *dev);
+int pinctrl_pm_select_sleep_state(struct device *dev);
+int pinctrl_pm_select_idle_state(struct device *dev);
#else
static inline int pinctrl_pm_select_default_state(struct device *dev)
{
@@ -63,31 +67,38 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#else /* !CONFIG_PINCTRL */
-static inline bool pinctrl_gpio_can_use_line(unsigned gpio)
+static inline bool
+pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset)
{
return true;
}
-static inline int pinctrl_gpio_request(unsigned gpio)
+static inline int
+pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline void pinctrl_gpio_free(unsigned gpio)
+static inline void
+pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
}
-static inline int pinctrl_gpio_direction_input(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline int pinctrl_gpio_direction_output(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+static inline int
+pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
{
return 0;
}
@@ -101,9 +112,8 @@ static inline void pinctrl_put(struct pinctrl *p)
{
}
-static inline struct pinctrl_state * __must_check pinctrl_lookup_state(
- struct pinctrl *p,
- const char *name)
+static inline struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
+ const char *name)
{
return NULL;
}
@@ -145,8 +155,8 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#endif /* CONFIG_PINCTRL */
-static inline struct pinctrl * __must_check pinctrl_get_select(
- struct device *dev, const char *name)
+static inline struct pinctrl * __must_check pinctrl_get_select(struct device *dev,
+ const char *name)
{
struct pinctrl *p;
struct pinctrl_state *s;
@@ -171,14 +181,13 @@ static inline struct pinctrl * __must_check pinctrl_get_select(
return p;
}
-static inline struct pinctrl * __must_check pinctrl_get_select_default(
- struct device *dev)
+static inline struct pinctrl * __must_check pinctrl_get_select_default(struct device *dev)
{
return pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
}
-static inline struct pinctrl * __must_check devm_pinctrl_get_select(
- struct device *dev, const char *name)
+static inline struct pinctrl * __must_check devm_pinctrl_get_select(struct device *dev,
+ const char *name)
{
struct pinctrl *p;
struct pinctrl_state *s;
@@ -203,8 +212,7 @@ static inline struct pinctrl * __must_check devm_pinctrl_get_select(
return p;
}
-static inline struct pinctrl * __must_check devm_pinctrl_get_select_default(
- struct device *dev)
+static inline struct pinctrl * __must_check devm_pinctrl_get_select_default(struct device *dev)
{
return devm_pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
}
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
index a48ff69acddd..bb6653af4f92 100644
--- a/include/linux/pinctrl/devinfo.h
+++ b/include/linux/pinctrl/devinfo.h
@@ -14,11 +14,17 @@
#ifndef PINCTRL_DEVINFO_H
#define PINCTRL_DEVINFO_H
+struct device;
+
#ifdef CONFIG_PINCTRL
+#include <linux/device.h>
+
/* The device core acts as a consumer toward pinctrl */
#include <linux/pinctrl/consumer.h>
+struct pinctrl;
+
/**
* struct dev_pin_info - pin state container for devices
* @p: pinctrl handle for the containing device
@@ -40,9 +46,15 @@ struct dev_pin_info {
extern int pinctrl_bind_pins(struct device *dev);
extern int pinctrl_init_done(struct device *dev);
-#else
+static inline struct pinctrl *dev_pinctrl(struct device *dev)
+{
+ if (!dev->pins)
+ return NULL;
-struct device;
+ return dev->pins->p;
+}
+
+#else
/* Stubs if we're not using pinctrl */
@@ -56,5 +68,10 @@ static inline int pinctrl_init_done(struct device *dev)
return 0;
}
+static inline struct pinctrl *dev_pinctrl(struct device *dev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_PINCTRL */
#endif /* PINCTRL_DEVINFO_H */
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index e987dc9fd2af..673e96df453b 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -11,7 +11,7 @@
#ifndef __LINUX_PINCTRL_MACHINE_H
#define __LINUX_PINCTRL_MACHINE_H
-#include <linux/bug.h>
+#include <linux/array_size.h>
#include <linux/pinctrl/pinctrl-state.h>
@@ -47,7 +47,7 @@ struct pinctrl_map_mux {
struct pinctrl_map_configs {
const char *group_or_pin;
unsigned long *configs;
- unsigned num_configs;
+ unsigned int num_configs;
};
/**
@@ -149,16 +149,18 @@ struct pinctrl_map {
#define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \
PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs)
+struct pinctrl_map;
+
#ifdef CONFIG_PINCTRL
extern int pinctrl_register_mappings(const struct pinctrl_map *map,
- unsigned num_maps);
+ unsigned int num_maps);
extern void pinctrl_unregister_mappings(const struct pinctrl_map *map);
extern void pinctrl_provide_dummies(void);
#else
static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
- unsigned num_maps)
+ unsigned int num_maps)
{
return 0;
}
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 6aeb711f7cd1..a65d3d078e58 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -11,9 +11,12 @@
#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H
#define __LINUX_PINCTRL_PINCONF_GENERIC_H
-#include <linux/device.h>
+#include <linux/types.h>
+
#include <linux/pinctrl/machine.h>
+struct device_node;
+
struct pinctrl_dev;
struct pinctrl_map;
@@ -35,7 +38,8 @@ struct pinctrl_map;
* impedance.
* @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high
* impedance to GROUND). If the argument is != 0 pull-down is enabled,
- * if it is 0, pull-down is total, i.e. the pin is connected to GROUND.
+ * the value is interpreted by the driver and can be custom or an SI unit
+ * such as Ohms.
* @PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: the pin will be pulled up or down based
* on embedded knowledge of the controller hardware, like current mux
* function. The pull direction and possibly strength too will normally
@@ -46,7 +50,8 @@ struct pinctrl_map;
* @PIN_CONFIG_BIAS_DISABLE.
* @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
* impedance to VDD). If the argument is != 0 pull-up is enabled,
- * if it is 0, pull-up is total, i.e. the pin is connected to VDD.
+ * the value is interpreted by the driver and can be custom or an SI unit
+ * such as Ohms.
* @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
* collector) which means it is usually wired with other output ports
* which are then pulled up with an external resistor. Setting this
@@ -76,32 +81,35 @@ struct pinctrl_map;
* @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
* If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
* schmitt-trigger mode is disabled.
- * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power
+ * @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
* to indicate low power mode, argument 0 turns low power mode off.
+ * @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM
+ * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
+ * value on the line. Use argument 1 to indicate high level, argument 0 to
+ * indicate low level. (Please see Documentation/driver-api/pin-control.rst,
+ * section "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode
* without driving a value there. For most platforms this reduces to
* enable the output buffers and then let the pin controller current
* configuration (eg. the currently selected mux function) drive values on
* the line. Use argument 1 to enable output mode, argument 0 to disable
* it.
- * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
- * value on the line. Use argument 1 to indicate high level, argument 0 to
- * indicate low level. (Please see Documentation/driver-api/pinctl.rst,
- * section "GPIO mode pitfalls" for a discussion around this parameter.)
+ * @PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: this will configure the output impedance
+ * of the pin with the value passed as argument. The argument is in ohms.
+ * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
* supplies, the argument to this parameter (on a custom format) tells
* the driver which alternative power source to use.
- * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
- * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
- * this parameter (on a custom format) tells the driver which alternative
- * slew rate to use.
* @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs)
* or latch delay (on outputs) this parameter (in a custom format)
* specifies the clock skew or latch delay. It typically controls how
* many double inverters are put in front of the line.
- * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
+ * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
+ * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
+ * this parameter (on a custom format) tells the driver which alternative
+ * slew rate to use.
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
@@ -124,14 +132,16 @@ enum pin_config_param {
PIN_CONFIG_INPUT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
- PIN_CONFIG_LOW_POWER_MODE,
- PIN_CONFIG_OUTPUT_ENABLE,
+ PIN_CONFIG_MODE_LOW_POWER,
+ PIN_CONFIG_MODE_PWM,
PIN_CONFIG_OUTPUT,
+ PIN_CONFIG_OUTPUT_ENABLE,
+ PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS,
+ PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_POWER_SOURCE,
+ PIN_CONFIG_SKEW_DELAY,
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
- PIN_CONFIG_SKEW_DELAY,
- PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
};
@@ -183,33 +193,33 @@ struct pinconf_generic_params {
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
- unsigned *reserved_maps, unsigned *num_maps,
+ unsigned int *reserved_maps, unsigned int *num_maps,
enum pinctrl_map_type type);
int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
- unsigned *num_maps, enum pinctrl_map_type type);
+ unsigned int *num_maps, enum pinctrl_map_type type);
void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps);
+ struct pinctrl_map *map, unsigned int num_maps);
-static inline int pinconf_generic_dt_node_to_map_group(
- struct pinctrl_dev *pctldev, struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned int *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_GROUP);
}
-static inline int pinconf_generic_dt_node_to_map_pin(
- struct pinctrl_dev *pctldev, struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+static inline int pinconf_generic_dt_node_to_map_pin(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned int *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_PIN);
}
-static inline int pinconf_generic_dt_node_to_map_all(
- struct pinctrl_dev *pctldev, struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+static inline int pinconf_generic_dt_node_to_map_all(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned *num_maps)
{
/*
* passing the type as PIN_MAP_TYPE_INVALID causes the underlying parser
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index f8a8215e9021..770ec2221156 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -40,25 +40,25 @@ struct pinconf_ops {
bool is_generic;
#endif
int (*pin_config_get) (struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *config);
int (*pin_config_set) (struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs);
+ unsigned int num_configs);
int (*pin_config_group_get) (struct pinctrl_dev *pctldev,
- unsigned selector,
+ unsigned int selector,
unsigned long *config);
int (*pin_config_group_set) (struct pinctrl_dev *pctldev,
- unsigned selector,
+ unsigned int selector,
unsigned long *configs,
- unsigned num_configs);
+ unsigned int num_configs);
void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned offset);
+ unsigned int offset);
void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned selector);
+ unsigned int selector);
void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned long config);
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 2aef59df93d7..9a8189ffd0f2 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -11,20 +11,40 @@
#ifndef __LINUX_PINCTRL_PINCTRL_H
#define __LINUX_PINCTRL_PINCTRL_H
-#include <linux/radix-tree.h>
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include <linux/pinctrl/pinctrl-state.h>
-#include <linux/pinctrl/devinfo.h>
+#include <linux/types.h>
struct device;
+struct device_node;
+struct gpio_chip;
+struct module;
+struct seq_file;
+
+struct pin_config_item;
+struct pinconf_generic_params;
+struct pinconf_ops;
struct pinctrl_dev;
struct pinctrl_map;
struct pinmux_ops;
-struct pinconf_ops;
-struct pin_config_item;
-struct gpio_chip;
-struct device_node;
+
+/**
+ * struct pingroup - provides information on pingroup
+ * @name: a name for pingroup
+ * @pins: an array of pins in the pingroup
+ * @npins: number of pins in the pingroup
+ */
+struct pingroup {
+ const char *name;
+ const unsigned int *pins;
+ size_t npins;
+};
+
+/* Convenience macro to define a single named or anonymous pingroup */
+#define PINCTRL_PINGROUP(_name, _pins, _npins) \
+(struct pingroup) { \
+ .name = _name, \
+ .pins = _pins, \
+ .npins = _npins, \
+}
/**
* struct pinctrl_pin_desc - boards/machines provide information on their
@@ -34,7 +54,7 @@ struct device_node;
* @drv_data: driver-defined per-pin data. pinctrl core does not touch this
*/
struct pinctrl_pin_desc {
- unsigned number;
+ unsigned int number;
const char *name;
void *drv_data;
};
@@ -51,8 +71,8 @@ struct pinctrl_pin_desc {
* @id: an ID number for the chip in this range
* @base: base offset of the GPIO range
* @pin_base: base pin number of the GPIO range if pins == NULL
- * @pins: enumeration of pins in GPIO range or NULL
* @npins: number of pins in the GPIO range, including the base number
+ * @pins: enumeration of pins in GPIO range or NULL
* @gc: an optional pointer to a gpio_chip
*/
struct pinctrl_gpio_range {
@@ -61,8 +81,8 @@ struct pinctrl_gpio_range {
unsigned int id;
unsigned int base;
unsigned int pin_base;
- unsigned const *pins;
unsigned int npins;
+ unsigned int const *pins;
struct gpio_chip *gc;
};
@@ -88,18 +108,18 @@ struct pinctrl_gpio_range {
struct pinctrl_ops {
int (*get_groups_count) (struct pinctrl_dev *pctldev);
const char *(*get_group_name) (struct pinctrl_dev *pctldev,
- unsigned selector);
+ unsigned int selector);
int (*get_group_pins) (struct pinctrl_dev *pctldev,
- unsigned selector,
- const unsigned **pins,
- unsigned *num_pins);
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins);
void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned offset);
+ unsigned int offset);
int (*dt_node_to_map) (struct pinctrl_dev *pctldev,
struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps);
+ struct pinctrl_map **map, unsigned int *num_maps);
void (*dt_free_map) (struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps);
+ struct pinctrl_map *map, unsigned int num_maps);
};
/**
@@ -173,7 +193,7 @@ extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *ranges,
- unsigned nranges);
+ unsigned int nranges);
extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
@@ -183,8 +203,28 @@ extern struct pinctrl_gpio_range *
pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
unsigned int pin);
extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
- const char *pin_group, const unsigned **pins,
- unsigned *num_pins);
+ const char *pin_group, const unsigned int **pins,
+ unsigned int *num_pins);
+
+/**
+ * struct pinfunction - Description about a function
+ * @name: Name of the function
+ * @groups: An array of groups for this function
+ * @ngroups: Number of groups in @groups
+ */
+struct pinfunction {
+ const char *name;
+ const char * const *groups;
+ size_t ngroups;
+};
+
+/* Convenience macro to define a single named pinfunction */
+#define PINCTRL_PINFUNCTION(_name, _groups, _ngroups) \
+(struct pinfunction) { \
+ .name = (_name), \
+ .groups = (_groups), \
+ .ngroups = (_ngroups), \
+ }
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL)
extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 9a647fa5c8f1..d6f7b58d6ad0 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -11,11 +11,10 @@
#ifndef __LINUX_PINCTRL_PINMUX_H
#define __LINUX_PINCTRL_PINMUX_H
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include <linux/pinctrl/pinctrl.h>
+#include <linux/types.h>
struct pinctrl_dev;
+struct pinctrl_gpio_range;
/**
* struct pinmux_ops - pinmux operations, to be implemented by pin controller
@@ -58,26 +57,26 @@ struct pinctrl_dev;
* the pin request.
*/
struct pinmux_ops {
- int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
- int (*free) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*request) (struct pinctrl_dev *pctldev, unsigned int offset);
+ int (*free) (struct pinctrl_dev *pctldev, unsigned int offset);
int (*get_functions_count) (struct pinctrl_dev *pctldev);
const char *(*get_function_name) (struct pinctrl_dev *pctldev,
- unsigned selector);
+ unsigned int selector);
int (*get_function_groups) (struct pinctrl_dev *pctldev,
- unsigned selector,
- const char * const **groups,
- unsigned *num_groups);
- int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
- unsigned group_selector);
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups);
+ int (*set_mux) (struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset);
+ unsigned int offset);
void (*gpio_disable_free) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset);
+ unsigned int offset);
int (*gpio_set_direction) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset,
+ unsigned int offset,
bool input);
bool strict;
};
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 50afd0d0084c..8ff23bf5a819 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -48,6 +48,7 @@ struct pipe_buffer {
* @files: number of struct file referring this pipe (protected by ->i_lock)
* @r_counter: reader counter
* @w_counter: writer counter
+ * @poll_usage: is this pipe used for epoll, which has crazy wakeups?
* @fasync_readers: reader side fasync
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
@@ -61,15 +62,16 @@ struct pipe_inode_info {
unsigned int tail;
unsigned int max_usage;
unsigned int ring_size;
-#ifdef CONFIG_WATCH_QUEUE
- bool note_loss;
-#endif
unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
unsigned int files;
unsigned int r_counter;
unsigned int w_counter;
+ bool poll_usage;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
@@ -123,6 +125,22 @@ struct pipe_buf_operations {
};
/**
+ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
+ * i.e. it was created with O_NOTIFICATION_PIPE
+ * @pipe: The pipe to check
+ *
+ * Return: true if pipe is a watch queue, false otherwise.
+ */
+static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
+{
+#ifdef CONFIG_WATCH_QUEUE
+ return pipe->watch_queue != NULL;
+#else
+ return false;
+#endif
+}
+
+/**
* pipe_empty - Return true if the pipe is empty
* @head: The pipe ring head pointer
* @tail: The pipe ring tail pointer
@@ -155,23 +173,23 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,
}
/**
- * pipe_space_for_user - Return number of slots available to userspace
- * @head: The pipe ring head pointer
- * @tail: The pipe ring tail pointer
- * @pipe: The pipe info structure
+ * pipe_buf - Return the pipe buffer for the specified slot in the pipe ring
+ * @pipe: The pipe to access
+ * @slot: The slot of interest
*/
-static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
- struct pipe_inode_info *pipe)
+static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
+ unsigned int slot)
{
- unsigned int p_occupancy, p_space;
+ return &pipe->bufs[slot & (pipe->ring_size - 1)];
+}
- p_occupancy = pipe_occupancy(head, tail);
- if (p_occupancy >= pipe->max_usage)
- return 0;
- p_space = pipe->ring_size - p_occupancy;
- if (p_space > pipe->max_usage)
- p_space = pipe->max_usage;
- return p_space;
+/**
+ * pipe_head_buf - Return the pipe buffer at the head of the pipe ring
+ * @pipe: The pipe to access
+ */
+static inline struct pipe_buffer *pipe_head_buf(const struct pipe_inode_info *pipe)
+{
+ return pipe_buf(pipe, pipe->head);
}
/**
@@ -227,6 +245,15 @@ static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe,
return buf->ops->try_steal(pipe, buf);
}
+static inline void pipe_discard_from(struct pipe_inode_info *pipe,
+ unsigned int old_head)
+{
+ unsigned int mask = pipe->ring_size - 1;
+
+ while (pipe->head > old_head)
+ pipe_buf_release(pipe, &pipe->bufs[--pipe->head & mask]);
+}
+
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE
@@ -236,12 +263,9 @@ void pipe_lock(struct pipe_inode_info *);
void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
-extern unsigned int pipe_max_size;
-extern unsigned long pipe_user_pages_hard;
-extern unsigned long pipe_user_pages_soft;
-
-/* Drop the inode semaphore and wait for a pipe event, atomically */
-void pipe_wait(struct pipe_inode_info *pipe);
+/* Wait for a pipe to be readable/writable while dropping the pipe lock */
+void pipe_wait_readable(struct pipe_inode_info *);
+void pipe_wait_writable(struct pipe_inode_info *);
struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);
@@ -253,22 +277,18 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
-#ifdef CONFIG_WATCH_QUEUE
unsigned long account_pipe_buffers(struct user_struct *user,
unsigned long old, unsigned long new);
bool too_many_pipe_buffers_soft(unsigned long user_bufs);
bool too_many_pipe_buffers_hard(unsigned long user_bufs);
bool pipe_is_unprivileged_user(void);
-#endif
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
-#ifdef CONFIG_WATCH_QUEUE
int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
-#endif
-long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+long pipe_fcntl(struct file *, unsigned int, unsigned int arg);
struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
int create_pipe_files(struct file **, int);
-unsigned int round_pipe_size(unsigned long size);
+unsigned int round_pipe_size(unsigned int size);
#endif
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index 2955ba976048..86be8bf27b41 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -4,6 +4,8 @@
#include <linux/mm.h>
+#define ARCH_DEFAULT_PKEY 0
+
#ifdef CONFIG_ARCH_HAS_PKEYS
#include <asm/pkeys.h>
#else /* ! CONFIG_ARCH_HAS_PKEYS */
@@ -44,10 +46,6 @@ static inline bool arch_pkeys_enabled(void)
return false;
}
-static inline void copy_init_pkru_to_fpregs(void)
-{
-}
-
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
#endif /* _LINUX_PKEYS_H */
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 174601554b06..2f1b952d596a 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -152,19 +152,12 @@ struct packet_stacked_data
};
#define PSD_POOL_SIZE 64
-struct pktcdvd_kobj
-{
- struct kobject kobj;
- struct pktcdvd_device *pd;
-};
-#define to_pktcdvdkobj(_k) \
- ((struct pktcdvd_kobj*)container_of(_k,struct pktcdvd_kobj,kobj))
-
struct pktcdvd_device
{
- struct block_device *bdev; /* dev attached */
+ struct file *bdev_file; /* dev attached */
+ /* handle acquired for bdev during pkt_open_dev() */
+ struct file *f_open_bdev;
dev_t pkt_dev; /* our dev */
- char name[20];
struct packet_settings settings;
struct packet_stats stats;
int refcnt; /* Open count */
@@ -183,6 +176,8 @@ struct pktcdvd_device
spinlock_t lock; /* Serialize access to bio_queue */
struct rb_root bio_queue; /* Work queue of bios we need to handle */
int bio_queue_size; /* Number of nodes in bio_queue */
+ bool congested; /* Someone is waiting for bio_queue_size
+ * to drop. */
sector_t current_sector; /* Keep track of where the elevator is */
atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
/* needs to be run. */
@@ -195,8 +190,6 @@ struct pktcdvd_device
int write_congestion_on;
struct device *dev; /* sysfs pktcdvd[0-7] dev */
- struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */
- struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */
struct dentry *dfs_d_root; /* debugfs: devname directory */
struct dentry *dfs_f_info; /* debugfs: info file */
diff --git a/include/linux/pl353-smc.h b/include/linux/pl353-smc.h
deleted file mode 100644
index 0e0d3df9bf72..000000000000
--- a/include/linux/pl353-smc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ARM PL353 SMC Driver Header
- *
- * Copyright (C) 2012 - 2018 Xilinx, Inc
- */
-
-#ifndef __LINUX_PL353_SMC_H
-#define __LINUX_PL353_SMC_H
-
-enum pl353_smc_ecc_mode {
- PL353_SMC_ECCMODE_BYPASS = 0,
- PL353_SMC_ECCMODE_APB = 1,
- PL353_SMC_ECCMODE_MEM = 2
-};
-
-enum pl353_smc_mem_width {
- PL353_SMC_MEM_WIDTH_8 = 0,
- PL353_SMC_MEM_WIDTH_16 = 1
-};
-
-u32 pl353_smc_get_ecc_val(int ecc_reg);
-bool pl353_smc_ecc_is_busy(void);
-int pl353_smc_get_nand_int_status_raw(void);
-void pl353_smc_clr_nand_int(void);
-int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode);
-int pl353_smc_set_ecc_pg_size(unsigned int pg_sz);
-int pl353_smc_set_buswidth(unsigned int bw);
-void pl353_smc_set_cycles(u32 timings[]);
-#endif
diff --git a/include/linux/platform_data/ad5755.h b/include/linux/platform_data/ad5755.h
deleted file mode 100644
index e371e08f04bc..000000000000
--- a/include/linux/platform_data/ad5755.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Analog Devices Inc.
- */
-#ifndef __LINUX_PLATFORM_DATA_AD5755_H__
-#define __LINUX_PLATFORM_DATA_AD5755_H__
-
-enum ad5755_mode {
- AD5755_MODE_VOLTAGE_0V_5V = 0,
- AD5755_MODE_VOLTAGE_0V_10V = 1,
- AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2,
- AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3,
- AD5755_MODE_CURRENT_4mA_20mA = 4,
- AD5755_MODE_CURRENT_0mA_20mA = 5,
- AD5755_MODE_CURRENT_0mA_24mA = 6,
-};
-
-enum ad5755_dc_dc_phase {
- AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0,
- AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1,
- AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2,
- AD5755_DC_DC_PHASE_90_DEGREE = 3,
-};
-
-enum ad5755_dc_dc_freq {
- AD5755_DC_DC_FREQ_250kHZ = 0,
- AD5755_DC_DC_FREQ_410kHZ = 1,
- AD5755_DC_DC_FREQ_650kHZ = 2,
-};
-
-enum ad5755_dc_dc_maxv {
- AD5755_DC_DC_MAXV_23V = 0,
- AD5755_DC_DC_MAXV_24V5 = 1,
- AD5755_DC_DC_MAXV_27V = 2,
- AD5755_DC_DC_MAXV_29V5 = 3,
-};
-
-enum ad5755_slew_rate {
- AD5755_SLEW_RATE_64k = 0,
- AD5755_SLEW_RATE_32k = 1,
- AD5755_SLEW_RATE_16k = 2,
- AD5755_SLEW_RATE_8k = 3,
- AD5755_SLEW_RATE_4k = 4,
- AD5755_SLEW_RATE_2k = 5,
- AD5755_SLEW_RATE_1k = 6,
- AD5755_SLEW_RATE_500 = 7,
- AD5755_SLEW_RATE_250 = 8,
- AD5755_SLEW_RATE_125 = 9,
- AD5755_SLEW_RATE_64 = 10,
- AD5755_SLEW_RATE_32 = 11,
- AD5755_SLEW_RATE_16 = 12,
- AD5755_SLEW_RATE_8 = 13,
- AD5755_SLEW_RATE_4 = 14,
- AD5755_SLEW_RATE_0_5 = 15,
-};
-
-enum ad5755_slew_step_size {
- AD5755_SLEW_STEP_SIZE_1 = 0,
- AD5755_SLEW_STEP_SIZE_2 = 1,
- AD5755_SLEW_STEP_SIZE_4 = 2,
- AD5755_SLEW_STEP_SIZE_8 = 3,
- AD5755_SLEW_STEP_SIZE_16 = 4,
- AD5755_SLEW_STEP_SIZE_32 = 5,
- AD5755_SLEW_STEP_SIZE_64 = 6,
- AD5755_SLEW_STEP_SIZE_128 = 7,
- AD5755_SLEW_STEP_SIZE_256 = 8,
-};
-
-/**
- * struct ad5755_platform_data - AD5755 DAC driver platform data
- * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter
- * compensation register is used.
- * @dc_dc_phase: DC-DC converter phase.
- * @dc_dc_freq: DC-DC converter frequency.
- * @dc_dc_maxv: DC-DC maximum allowed boost voltage.
- * @dac.mode: The mode to be used for the DAC output.
- * @dac.ext_current_sense_resistor: Whether an external current sense resistor
- * is used.
- * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange.
- * @dac.slew.enable: Whether to enable digital slew.
- * @dac.slew.rate: Slew rate of the digital slew.
- * @dac.slew.step_size: Slew step size of the digital slew.
- **/
-struct ad5755_platform_data {
- bool ext_dc_dc_compenstation_resistor;
- enum ad5755_dc_dc_phase dc_dc_phase;
- enum ad5755_dc_dc_freq dc_dc_freq;
- enum ad5755_dc_dc_maxv dc_dc_maxv;
-
- struct {
- enum ad5755_mode mode;
- bool ext_current_sense_resistor;
- bool enable_voltage_overrange;
- struct {
- bool enable;
- enum ad5755_slew_rate rate;
- enum ad5755_slew_step_size step_size;
- } slew;
- } dac[4];
-};
-
-#endif
diff --git a/include/linux/platform_data/ad7291.h b/include/linux/platform_data/ad7291.h
deleted file mode 100644
index b1fd1530c9a5..000000000000
--- a/include/linux/platform_data/ad7291.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IIO_AD7291_H__
-#define __IIO_AD7291_H__
-
-/**
- * struct ad7291_platform_data - AD7291 platform data
- * @use_external_ref: Whether to use an external or internal reference voltage
- */
-struct ad7291_platform_data {
- bool use_external_ref;
-};
-
-#endif
diff --git a/include/linux/platform_data/ad7298.h b/include/linux/platform_data/ad7298.h
deleted file mode 100644
index 3e0ffe2d5d3d..000000000000
--- a/include/linux/platform_data/ad7298.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AD7298 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_AD7298_H__
-#define __LINUX_PLATFORM_DATA_AD7298_H__
-
-/**
- * struct ad7298_platform_data - Platform data for the ad7298 ADC driver
- * @ext_ref: Whether to use an external reference voltage.
- **/
-struct ad7298_platform_data {
- bool ext_ref;
-};
-
-#endif /* IIO_ADC_AD7298_H_ */
diff --git a/include/linux/platform_data/ad7303.h b/include/linux/platform_data/ad7303.h
deleted file mode 100644
index c2bd0a13bea1..000000000000
--- a/include/linux/platform_data/ad7303.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Analog Devices AD7303 DAC driver
- *
- * Copyright 2013 Analog Devices Inc.
- */
-
-#ifndef __IIO_ADC_AD7303_H__
-#define __IIO_ADC_AD7303_H__
-
-/**
- * struct ad7303_platform_data - AD7303 platform data
- * @use_external_ref: If set to true use an external voltage reference connected
- * to the REF pin, otherwise use the internal reference derived from Vdd.
- */
-struct ad7303_platform_data {
- bool use_external_ref;
-};
-
-#endif
diff --git a/include/linux/platform_data/ad7793.h b/include/linux/platform_data/ad7793.h
index 576c7f962c4e..7c697e58f02a 100644
--- a/include/linux/platform_data/ad7793.h
+++ b/include/linux/platform_data/ad7793.h
@@ -40,7 +40,7 @@ enum ad7793_bias_voltage {
* enum ad7793_refsel - AD7793 reference voltage selection
* @AD7793_REFSEL_REFIN1: External reference applied between REFIN1(+)
* and REFIN1(-).
- * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+) and
+ * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+)
* and REFIN1(-). Only valid for AD7795/AD7796.
* @AD7793_REFSEL_INTERNAL: Internal 1.17 V reference.
*/
diff --git a/include/linux/platform_data/ad7887.h b/include/linux/platform_data/ad7887.h
index 732af46b2d16..9b4dca6ae70b 100644
--- a/include/linux/platform_data/ad7887.h
+++ b/include/linux/platform_data/ad7887.h
@@ -13,13 +13,9 @@
* second input channel, and Vref is internally connected to Vdd. If set to
* false the device is used in single channel mode and AIN1/Vref is used as
* VREF input.
- * @use_onchip_ref: Whether to use the onchip reference. If set to true the
- * internal 2.5V reference is used. If set to false a external reference is
- * used.
*/
struct ad7887_platform_data {
bool en_dual;
- bool use_onchip_ref;
};
#endif /* IIO_ADC_AD7887_H_ */
diff --git a/include/linux/platform_data/adau1977.h b/include/linux/platform_data/adau1977.h
deleted file mode 100644
index 86667235077a..000000000000
--- a/include/linux/platform_data/adau1977.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * ADAU1977/ADAU1978/ADAU1979 driver
- *
- * Copyright 2014 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__
-#define __LINUX_PLATFORM_DATA_ADAU1977_H__
-
-/**
- * enum adau1977_micbias - ADAU1977 MICBIAS pin voltage setting
- * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V
- * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V
- * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V
- * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V
- * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V
- * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V
- * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V
- * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V
- * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V
- */
-enum adau1977_micbias {
- ADAU1977_MICBIAS_5V0 = 0x0,
- ADAU1977_MICBIAS_5V5 = 0x1,
- ADAU1977_MICBIAS_6V0 = 0x2,
- ADAU1977_MICBIAS_6V5 = 0x3,
- ADAU1977_MICBIAS_7V0 = 0x4,
- ADAU1977_MICBIAS_7V5 = 0x5,
- ADAU1977_MICBIAS_8V0 = 0x6,
- ADAU1977_MICBIAS_8V5 = 0x7,
- ADAU1977_MICBIAS_9V0 = 0x8,
-};
-
-/**
- * struct adau1977_platform_data - Platform configuration data for the ADAU1977
- * @micbias: Specifies the voltage for the MICBIAS pin
- */
-struct adau1977_platform_data {
- enum adau1977_micbias micbias;
-};
-
-#endif
diff --git a/include/linux/platform_data/adp5588.h b/include/linux/platform_data/adp5588.h
deleted file mode 100644
index 6d3f7d911a92..000000000000
--- a/include/linux/platform_data/adp5588.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
- *
- * Copyright 2009-2010 Analog Devices Inc.
- */
-
-#ifndef _ADP5588_H
-#define _ADP5588_H
-
-#define DEV_ID 0x00 /* Device ID */
-#define CFG 0x01 /* Configuration Register1 */
-#define INT_STAT 0x02 /* Interrupt Status Register */
-#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */
-#define Key_EVENTA 0x04 /* Key Event Register A */
-#define Key_EVENTB 0x05 /* Key Event Register B */
-#define Key_EVENTC 0x06 /* Key Event Register C */
-#define Key_EVENTD 0x07 /* Key Event Register D */
-#define Key_EVENTE 0x08 /* Key Event Register E */
-#define Key_EVENTF 0x09 /* Key Event Register F */
-#define Key_EVENTG 0x0A /* Key Event Register G */
-#define Key_EVENTH 0x0B /* Key Event Register H */
-#define Key_EVENTI 0x0C /* Key Event Register I */
-#define Key_EVENTJ 0x0D /* Key Event Register J */
-#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */
-#define UNLOCK1 0x0F /* Unlock Key1 */
-#define UNLOCK2 0x10 /* Unlock Key2 */
-#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */
-#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */
-#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */
-#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */
-#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */
-#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */
-#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */
-#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */
-#define GPI_EM1 0x20 /* GPI Event Mode 1 */
-#define GPI_EM2 0x21 /* GPI Event Mode 2 */
-#define GPI_EM3 0x22 /* GPI Event Mode 3 */
-#define GPIO_DIR1 0x23 /* GPIO Data Direction */
-#define GPIO_DIR2 0x24 /* GPIO Data Direction */
-#define GPIO_DIR3 0x25 /* GPIO Data Direction */
-#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */
-#define Debounce_DIS1 0x29 /* Debounce Disable */
-#define Debounce_DIS2 0x2A /* Debounce Disable */
-#define Debounce_DIS3 0x2B /* Debounce Disable */
-#define GPIO_PULL1 0x2C /* GPIO Pull Disable */
-#define GPIO_PULL2 0x2D /* GPIO Pull Disable */
-#define GPIO_PULL3 0x2E /* GPIO Pull Disable */
-#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */
-#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */
-#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */
-#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */
-#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */
-#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */
-#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */
-#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */
-#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */
-#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */
-#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */
-#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */
-#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */
-#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */
-#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */
-
-#define ADP5588_DEVICE_ID_MASK 0xF
-
- /* Configuration Register1 */
-#define ADP5588_AUTO_INC (1 << 7)
-#define ADP5588_GPIEM_CFG (1 << 6)
-#define ADP5588_OVR_FLOW_M (1 << 5)
-#define ADP5588_INT_CFG (1 << 4)
-#define ADP5588_OVR_FLOW_IEN (1 << 3)
-#define ADP5588_K_LCK_IM (1 << 2)
-#define ADP5588_GPI_IEN (1 << 1)
-#define ADP5588_KE_IEN (1 << 0)
-
-/* Interrupt Status Register */
-#define ADP5588_CMP2_INT (1 << 5)
-#define ADP5588_CMP1_INT (1 << 4)
-#define ADP5588_OVR_FLOW_INT (1 << 3)
-#define ADP5588_K_LCK_INT (1 << 2)
-#define ADP5588_GPI_INT (1 << 1)
-#define ADP5588_KE_INT (1 << 0)
-
-/* Key Lock and Event Counter Register */
-#define ADP5588_K_LCK_EN (1 << 6)
-#define ADP5588_LCK21 0x30
-#define ADP5588_KEC 0xF
-
-#define ADP5588_MAXGPIO 18
-#define ADP5588_BANK(offs) ((offs) >> 3)
-#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
-
-/* Put one of these structures in i2c_board_info platform_data */
-
-#define ADP5588_KEYMAPSIZE 80
-
-#define GPI_PIN_ROW0 97
-#define GPI_PIN_ROW1 98
-#define GPI_PIN_ROW2 99
-#define GPI_PIN_ROW3 100
-#define GPI_PIN_ROW4 101
-#define GPI_PIN_ROW5 102
-#define GPI_PIN_ROW6 103
-#define GPI_PIN_ROW7 104
-#define GPI_PIN_COL0 105
-#define GPI_PIN_COL1 106
-#define GPI_PIN_COL2 107
-#define GPI_PIN_COL3 108
-#define GPI_PIN_COL4 109
-#define GPI_PIN_COL5 110
-#define GPI_PIN_COL6 111
-#define GPI_PIN_COL7 112
-#define GPI_PIN_COL8 113
-#define GPI_PIN_COL9 114
-
-#define GPI_PIN_ROW_BASE GPI_PIN_ROW0
-#define GPI_PIN_ROW_END GPI_PIN_ROW7
-#define GPI_PIN_COL_BASE GPI_PIN_COL0
-#define GPI_PIN_COL_END GPI_PIN_COL9
-
-#define GPI_PIN_BASE GPI_PIN_ROW_BASE
-#define GPI_PIN_END GPI_PIN_COL_END
-
-#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1)
-
-struct adp5588_gpi_map {
- unsigned short pin;
- unsigned short sw_evt;
-};
-
-struct adp5588_kpad_platform_data {
- int rows; /* Number of rows */
- int cols; /* Number of columns */
- const unsigned short *keymap; /* Pointer to keymap */
- unsigned short keymapsize; /* Keymap size */
- unsigned repeat:1; /* Enable key repeat */
- unsigned en_keylock:1; /* Enable Key Lock feature */
- unsigned short unlock_key1; /* Unlock Key 1 */
- unsigned short unlock_key2; /* Unlock Key 2 */
- const struct adp5588_gpi_map *gpimap;
- unsigned short gpimapsize;
- const struct adp5588_gpio_platform_data *gpio_data;
-};
-
-struct i2c_client; /* forward declaration */
-
-struct adp5588_gpio_platform_data {
- int gpio_start; /* GPIO Chip base # */
- const char *const *names;
- unsigned irq_base; /* interrupt base # */
- unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- void *context;
-};
-
-#endif
diff --git a/include/linux/platform_data/amd_xdma.h b/include/linux/platform_data/amd_xdma.h
new file mode 100644
index 000000000000..b5e23e14bac8
--- /dev/null
+++ b/include/linux/platform_data/amd_xdma.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_XDMA_H
+#define _PLATDATA_AMD_XDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct xdma_chan_info - DMA channel information
+ * This information is used to match channel when request dma channel
+ * @dir: Channel transfer direction
+ */
+struct xdma_chan_info {
+ enum dma_transfer_direction dir;
+};
+
+#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info))
+
+struct dma_slave_map;
+
+/**
+ * struct xdma_platdata - platform specific data for XDMA engine
+ * @max_dma_channels: Maximum dma channels in each direction
+ */
+struct xdma_platdata {
+ u32 max_dma_channels;
+ u32 device_map_cnt;
+ struct dma_slave_map *device_map;
+};
+
+#endif /* _PLATDATA_AMD_XDMA_H */
diff --git a/include/linux/platform_data/asoc-mx27vis.h b/include/linux/platform_data/asoc-mx27vis.h
deleted file mode 100644
index 2107d0d992dd..000000000000
--- a/include/linux/platform_data/asoc-mx27vis.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PLATFORM_DATA_ASOC_MX27VIS_H
-#define __PLATFORM_DATA_ASOC_MX27VIS_H
-
-struct snd_mx27vis_platform_data {
- int amp_gain0_gpio;
- int amp_gain1_gpio;
- int amp_mutel_gpio;
- int amp_muter_gpio;
-};
-
-#endif /* __PLATFORM_DATA_ASOC_MX27VIS_H */
diff --git a/include/linux/platform_data/asoc-palm27x.h b/include/linux/platform_data/asoc-palm27x.h
deleted file mode 100644
index 22b69a393a57..000000000000
--- a/include/linux/platform_data/asoc-palm27x.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _INCLUDE_PALMASOC_H_
-#define _INCLUDE_PALMASOC_H_
-
-struct palm27x_asoc_info {
- int jack_gpio;
-};
-
-#endif
diff --git a/include/linux/platform_data/asoc-pxa.h b/include/linux/platform_data/asoc-pxa.h
new file mode 100644
index 000000000000..7b5b9e20fbf5
--- /dev/null
+++ b/include/linux/platform_data/asoc-pxa.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_PXA_AUDIO_H__
+#define __SOC_PXA_AUDIO_H__
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+
+/*
+ * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95)
+ * a -1 value means no gpio will be used for reset
+ * @codec_pdata: AC97 codec platform_data
+
+ * reset_gpio should only be specified for pxa27x CPUs where a silicon
+ * bug prevents correct operation of the reset line. If not specified,
+ * the default behaviour on these CPUs is to consider gpio 113 as the
+ * AC97 reset line, which is the default on most boards.
+ */
+typedef struct {
+ int (*startup)(struct snd_pcm_substream *, void *);
+ void (*shutdown)(struct snd_pcm_substream *, void *);
+ void (*suspend)(void *);
+ void (*resume)(void *);
+ void *priv;
+ int reset_gpio;
+ void *codec_pdata[AC97_BUS_MAX_DEVICES];
+} pxa2xx_audio_ops_t;
+
+extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops);
+extern void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio);
+
+#endif
diff --git a/include/linux/platform_data/asoc-s3c24xx_simtec.h b/include/linux/platform_data/asoc-s3c24xx_simtec.h
deleted file mode 100644
index 1a7efc98d108..000000000000
--- a/include/linux/platform_data/asoc-s3c24xx_simtec.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Simtec Audio support.
-*/
-
-/**
- * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio
- * @use_mpllin: Select codec clock from MPLLin
- * @output_cdclk: Need to output CDCLK to the codec
- * @have_mic: Set if we have a MIC socket
- * @have_lout: Set if we have a LineOut socket
- * @amp_gpio: GPIO pin to enable the AMP
- * @amp_gain: Option GPIO to control AMP gain
- */
-struct s3c24xx_audio_simtec_pdata {
- unsigned int use_mpllin:1;
- unsigned int output_cdclk:1;
-
- unsigned int have_mic:1;
- unsigned int have_lout:1;
-
- int amp_gpio;
- int amp_gain[2];
-
- void (*startup)(void);
-};
diff --git a/include/linux/platform_data/asoc-ux500-msp.h b/include/linux/platform_data/asoc-ux500-msp.h
deleted file mode 100644
index b8d0f730dda8..000000000000
--- a/include/linux/platform_data/asoc-ux500-msp.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- */
-
-#ifndef __MSP_H
-#define __MSP_H
-
-#include <linux/platform_data/dma-ste-dma40.h>
-
-/* Platform data structure for a MSP I2S-device */
-struct msp_i2s_platform_data {
- int id;
- struct stedma40_chan_cfg *msp_i2s_dma_rx;
- struct stedma40_chan_cfg *msp_i2s_dma_tx;
-};
-
-#endif
diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h
deleted file mode 100644
index f20eaeb827ce..000000000000
--- a/include/linux/platform_data/at91_adc.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2011 Free Electrons
- */
-
-#ifndef _AT91_ADC_H_
-#define _AT91_ADC_H_
-
-enum atmel_adc_ts_type {
- ATMEL_ADC_TOUCHSCREEN_NONE = 0,
- ATMEL_ADC_TOUCHSCREEN_4WIRE = 4,
- ATMEL_ADC_TOUCHSCREEN_5WIRE = 5,
-};
-
-/**
- * struct at91_adc_trigger - description of triggers
- * @name: name of the trigger advertised to the user
- * @value: value to set in the ADC's trigger setup register
- to enable the trigger
- * @is_external: Does the trigger rely on an external pin?
- */
-struct at91_adc_trigger {
- const char *name;
- u8 value;
- bool is_external;
-};
-
-/**
- * struct at91_adc_data - platform data for ADC driver
- * @channels_used: channels in use on the board as a bitmask
- * @startup_time: startup time of the ADC in microseconds
- * @trigger_list: Triggers available in the ADC
- * @trigger_number: Number of triggers available in the ADC
- * @use_external_triggers: does the board has external triggers availables
- * @vref: Reference voltage for the ADC in millivolts
- * @touchscreen_type: If a touchscreen is connected, its type (4 or 5 wires)
- */
-struct at91_adc_data {
- unsigned long channels_used;
- u8 startup_time;
- struct at91_adc_trigger *trigger_list;
- u8 trigger_number;
- bool use_external_triggers;
- u16 vref;
- enum atmel_adc_ts_type touchscreen_type;
-};
-
-extern void __init at91_add_device_adc(struct at91_adc_data *data);
-#endif
diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h
deleted file mode 100644
index fccf969dc4da..000000000000
--- a/include/linux/platform_data/ata-samsung_cf.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Samsung CF-ATA platform_device info
-*/
-
-#ifndef __ATA_SAMSUNG_CF_H
-#define __ATA_SAMSUNG_CF_H __FILE__
-
-/**
- * struct s3c_ide_platdata - S3C IDE driver platform data.
- * @setup_gpio: Setup the external GPIO pins to the right state for data
- * transfer in true-ide mode.
- */
-struct s3c_ide_platdata {
- void (*setup_gpio)(void);
-};
-
-/*
- * s3c_ide_set_platdata() - Setup the platform specifc data for IDE driver.
- * @pdata: Platform data for IDE driver.
- */
-extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata);
-
-/* architecture-specific IDE configuration */
-extern void s3c64xx_ide_setup_gpio(void);
-extern void s5pv210_ide_setup_gpio(void);
-
-#endif /*__ATA_SAMSUNG_CF_H */
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 99e6069c5fd8..73f63be509c4 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -6,18 +6,6 @@
#ifndef __ATMEL_H__
#define __ATMEL_H__
- /* Compact Flash */
-struct at91_cf_data {
- int irq_pin; /* I/O IRQ */
- int det_pin; /* Card detect */
- int vcc_pin; /* power switching */
- int rst_pin; /* card reset */
- u8 chipselect; /* EBI Chip Select number */
- u8 flags;
-#define AT91_CF_TRUE_IDE 0x01
-#define AT91_IDE_SWAP_A0_A2 0x02
-};
-
/* FIXME: this needs a better location, but gets stuff building again */
#ifdef CONFIG_ATMEL_PM
extern int at91_suspend_entering_slow_clock(void);
diff --git a/include/linux/platform_data/bcm7038_wdt.h b/include/linux/platform_data/bcm7038_wdt.h
new file mode 100644
index 000000000000..e18cfd9ec8f9
--- /dev/null
+++ b/include/linux/platform_data/bcm7038_wdt.h
@@ -0,0 +1,8 @@
+#ifndef __BCM7038_WDT_PDATA_H
+#define __BCM7038_WDT_PDATA_H
+
+struct bcm7038_wdt_platform_data {
+ const char *clk_name;
+};
+
+#endif /* __BCM7038_WDT_PDATA_H */
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
index 54a06a4d2618..596ca4f95cfa 100644
--- a/include/linux/platform_data/bd6107.h
+++ b/include/linux/platform_data/bd6107.h
@@ -8,7 +8,7 @@
struct device;
struct bd6107_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int def_value;
};
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
index 1d30bf278231..ec99b7b73d1d 100644
--- a/include/linux/platform_data/brcmfmac.h
+++ b/include/linux/platform_data/brcmfmac.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 201 Broadcom Corporation
+ * Copyright (c) 2016 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -125,7 +125,7 @@ struct brcmfmac_pd_cc_entry {
*/
struct brcmfmac_pd_cc {
int table_size;
- struct brcmfmac_pd_cc_entry table[0];
+ struct brcmfmac_pd_cc_entry table[];
};
/**
@@ -178,7 +178,7 @@ struct brcmfmac_platform_data {
void (*power_off)(void);
char *fw_alternative_path;
int device_count;
- struct brcmfmac_pd_device devices[0];
+ struct brcmfmac_pd_device devices[];
};
diff --git a/include/linux/platform_data/brcmnand.h b/include/linux/platform_data/brcmnand.h
new file mode 100644
index 000000000000..8b8777985dce
--- /dev/null
+++ b/include/linux/platform_data/brcmnand.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef BRCMNAND_PLAT_DATA_H
+#define BRCMNAND_PLAT_DATA_H
+
+struct brcmnand_platform_data {
+ int chip_select;
+ const char * const *part_probe_types;
+ unsigned int ecc_stepsize;
+ unsigned int ecc_strength;
+};
+
+#endif /* BRCMNAND_PLAT_DATA_H */
diff --git a/include/linux/platform_data/clk-fch.h b/include/linux/platform_data/clk-fch.h
index b9f682459f08..11a2a23fd9b2 100644
--- a/include/linux/platform_data/clk-fch.h
+++ b/include/linux/platform_data/clk-fch.h
@@ -12,7 +12,7 @@
struct fch_clk_data {
void __iomem *base;
- u32 is_rv;
+ char *name;
};
#endif /* __CLK_FCH_H */
diff --git a/include/linux/platform_data/clk-u300.h b/include/linux/platform_data/clk-u300.h
deleted file mode 100644
index 8429e73911a1..000000000000
--- a/include/linux/platform_data/clk-u300.h
+++ /dev/null
@@ -1 +0,0 @@
-void __init u300_clk_init(void __iomem *base);
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 1fcfe9e63cb9..ecc47d5fe239 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -13,8 +13,8 @@
#ifndef __CROS_EC_COMMANDS_H
#define __CROS_EC_COMMANDS_H
-
-
+#include <linux/bits.h>
+#include <linux/types.h>
#define BUILD_ASSERT(_cond)
@@ -51,10 +51,14 @@
/*
* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff
* and they tell the kernel that so we have to think of it as two parts.
+ *
+ * Other BIOSes report only the I/O port region spanned by the Microchip
+ * MEC series EC; an attempt to address a larger region may fail.
*/
-#define EC_HOST_CMD_REGION0 0x800
-#define EC_HOST_CMD_REGION1 0x880
-#define EC_HOST_CMD_REGION_SIZE 0x80
+#define EC_HOST_CMD_REGION0 0x800
+#define EC_HOST_CMD_REGION1 0x880
+#define EC_HOST_CMD_REGION_SIZE 0x80
+#define EC_HOST_CMD_MEC_REGION_SIZE 0x8
/* EC command register bit functions */
#define EC_LPC_CMDR_DATA BIT(0) /* Data ready for host to read */
@@ -783,7 +787,7 @@ struct ec_host_response {
*
* Packets always start with a request or response header. They are followed
* by data_len bytes of data. If the data_crc_present flag is set, the data
- * bytes are followed by a CRC-8 of that data, using using x^8 + x^2 + x + 1
+ * bytes are followed by a CRC-8 of that data, using x^8 + x^2 + x + 1
* polynomial.
*
* Host algorithm when sending a request q:
@@ -1078,7 +1082,7 @@ struct ec_params_get_cmd_versions_v1 {
} __ec_align2;
/**
- * struct ec_response_get_cmd_version - Response to the get command versions.
+ * struct ec_response_get_cmd_versions - Response to the get command versions.
* @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with
* a desired version.
*/
@@ -1284,6 +1288,30 @@ enum ec_feature_code {
EC_FEATURE_SCP = 39,
/* The MCU is an Integrated Sensor Hub */
EC_FEATURE_ISH = 40,
+ /* New TCPMv2 TYPEC_ prefaced commands supported */
+ EC_FEATURE_TYPEC_CMD = 41,
+ /*
+ * The EC will wait for direction from the AP to enter Type-C alternate
+ * modes or USB4.
+ */
+ EC_FEATURE_TYPEC_REQUIRE_AP_MODE_ENTRY = 42,
+ /*
+ * The EC will wait for an acknowledge from the AP after setting the
+ * mux.
+ */
+ EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK = 43,
+ /*
+ * The EC supports entering and residing in S4.
+ */
+ EC_FEATURE_S4_RESIDENCY = 44,
+ /*
+ * The EC supports the AP directing mux sets for the board.
+ */
+ EC_FEATURE_TYPEC_AP_MUX_SET = 45,
+ /*
+ * The EC supports the AP composing VDMs for us to send.
+ */
+ EC_FEATURE_TYPEC_AP_VDM_SEND = 46,
};
#define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32)
@@ -1419,7 +1447,7 @@ struct ec_response_flash_info_2 {
uint16_t num_banks_total;
/* Number of banks described in banks array. */
uint16_t num_banks_desc;
- struct ec_flash_bank banks[0];
+ struct ec_flash_bank banks[];
} __ec_align4;
/*
@@ -2420,12 +2448,12 @@ struct ec_response_motion_sense_fifo_info {
/* Total amount of vector lost */
uint16_t total_lost;
/* Lost events since the last fifo_info, per sensors */
- uint16_t lost[0];
+ uint16_t lost[];
} __ec_todo_packed;
struct ec_response_motion_sense_fifo_data {
uint32_t number_data;
- struct ec_response_motion_sensor_data data[0];
+ struct ec_response_motion_sensor_data data[];
} __ec_todo_packed;
/* List supported activity recognition */
@@ -2673,7 +2701,7 @@ struct ec_response_motion_sense {
* Sensor data is truncated if response_max is too small
* for holding all the data.
*/
- struct ec_response_motion_sensor_data sensor[0];
+ DECLARE_FLEX_ARRAY(struct ec_response_motion_sensor_data, sensor);
} dump;
/* Used for MOTIONSENSE_CMD_INFO. */
@@ -3093,7 +3121,7 @@ struct ec_response_tmp006_get_calibration_v1 {
uint8_t algorithm;
uint8_t num_params;
uint8_t reserved[2];
- float val[0];
+ float val[];
} __ec_align4;
struct ec_params_tmp006_set_calibration_v1 {
@@ -3101,7 +3129,7 @@ struct ec_params_tmp006_set_calibration_v1 {
uint8_t algorithm;
uint8_t num_params;
uint8_t reserved;
- float val[0];
+ float val[];
} __ec_align4;
@@ -3374,6 +3402,9 @@ enum ec_mkbp_event {
/* Send an incoming CEC message to the AP */
EC_MKBP_EVENT_CEC_MESSAGE = 9,
+ /* Peripheral device charger event */
+ EC_MKBP_EVENT_PCHG = 12,
+
/* Number of MKBP events */
EC_MKBP_EVENT_COUNT,
};
@@ -3450,11 +3481,15 @@ struct ec_response_get_next_event_v1 {
#define EC_MKBP_VOL_UP 1
#define EC_MKBP_VOL_DOWN 2
#define EC_MKBP_RECOVERY 3
+#define EC_MKBP_BRI_UP 4
+#define EC_MKBP_BRI_DOWN 5
+#define EC_MKBP_SCREEN_LOCK 6
/* Switches */
#define EC_MKBP_LID_OPEN 0
#define EC_MKBP_TABLET_MODE 1
#define EC_MKBP_BASE_ATTACHED 2
+#define EC_MKBP_FRONT_PROXIMITY 3
/* Run keyboard factory test scanning */
#define EC_CMD_KEYBOARD_FACTORY_TEST 0x0068
@@ -3926,60 +3961,52 @@ struct ec_response_i2c_passthru {
} __ec_align1;
/*****************************************************************************/
-/* Power button hang detect */
-
+/* AP hang detect */
#define EC_CMD_HANG_DETECT 0x009F
-/* Reasons to start hang detection timer */
-/* Power button pressed */
-#define EC_HANG_START_ON_POWER_PRESS BIT(0)
-
-/* Lid closed */
-#define EC_HANG_START_ON_LID_CLOSE BIT(1)
-
- /* Lid opened */
-#define EC_HANG_START_ON_LID_OPEN BIT(2)
-
-/* Start of AP S3->S0 transition (booting or resuming from suspend) */
-#define EC_HANG_START_ON_RESUME BIT(3)
+#define EC_HANG_DETECT_MIN_TIMEOUT 5
+#define EC_HANG_DETECT_MAX_TIMEOUT 65535
-/* Reasons to cancel hang detection */
+/* EC hang detect commands */
+enum ec_hang_detect_cmds {
+ /* Reload AP hang detect timer. */
+ EC_HANG_DETECT_CMD_RELOAD = 0x0,
-/* Power button released */
-#define EC_HANG_STOP_ON_POWER_RELEASE BIT(8)
+ /* Stop AP hang detect timer. */
+ EC_HANG_DETECT_CMD_CANCEL = 0x1,
-/* Any host command from AP received */
-#define EC_HANG_STOP_ON_HOST_COMMAND BIT(9)
+ /* Configure watchdog with given reboot timeout and
+ * cancel currently running AP hang detect timer.
+ */
+ EC_HANG_DETECT_CMD_SET_TIMEOUT = 0x2,
-/* Stop on end of AP S0->S3 transition (suspending or shutting down) */
-#define EC_HANG_STOP_ON_SUSPEND BIT(10)
+ /* Get last hang status - whether the AP boot was clear or not */
+ EC_HANG_DETECT_CMD_GET_STATUS = 0x3,
-/*
- * If this flag is set, all the other fields are ignored, and the hang detect
- * timer is started. This provides the AP a way to start the hang timer
- * without reconfiguring any of the other hang detect settings. Note that
- * you must previously have configured the timeouts.
- */
-#define EC_HANG_START_NOW BIT(30)
-
-/*
- * If this flag is set, all the other fields are ignored (including
- * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer
- * without reconfiguring any of the other hang detect settings.
- */
-#define EC_HANG_STOP_NOW BIT(31)
+ /* Clear last hang status. Called when AP is rebooting/shutting down
+ * gracefully.
+ */
+ EC_HANG_DETECT_CMD_CLEAR_STATUS = 0x4
+};
struct ec_params_hang_detect {
- /* Flags; see EC_HANG_* */
- uint32_t flags;
-
- /* Timeout in msec before generating host event, if enabled */
- uint16_t host_event_timeout_msec;
+ uint16_t command; /* enum ec_hang_detect_cmds */
+ /* Timeout in seconds before generating reboot */
+ uint16_t reboot_timeout_sec;
+} __ec_align2;
- /* Timeout in msec before generating warm reboot, if enabled */
- uint16_t warm_reboot_timeout_msec;
-} __ec_align4;
+/* Status codes that describe whether AP has boot normally or the hang has been
+ * detected and EC has reset AP
+ */
+enum ec_hang_detect_status {
+ EC_HANG_DETECT_AP_BOOT_NORMAL = 0x0,
+ EC_HANG_DETECT_AP_BOOT_EC_WDT = 0x1,
+ EC_HANG_DETECT_AP_BOOT_COUNT,
+};
+struct ec_response_hang_detect {
+ uint8_t status; /* enum ec_hang_detect_status */
+} __ec_align1;
/*****************************************************************************/
/* Commands for battery charging */
@@ -4215,6 +4242,7 @@ enum ec_device_event {
EC_DEVICE_EVENT_TRACKPAD,
EC_DEVICE_EVENT_DSP,
EC_DEVICE_EVENT_WIFI,
+ EC_DEVICE_EVENT_WLC,
};
enum ec_device_event_param {
@@ -4400,8 +4428,20 @@ struct ec_response_i2c_passthru_protect {
* These commands are for sending and receiving message via HDMI CEC
*/
+#define EC_CEC_MAX_PORTS 16
+
#define MAX_CEC_MSG_LEN 16
+/*
+ * Helper macros for packing/unpacking cec_events.
+ * bits[27:0] : bitmask of events from enum mkbp_cec_event
+ * bits[31:28]: port number
+ */
+#define EC_MKBP_EVENT_CEC_PACK(events, port) \
+ (((events) & GENMASK(27, 0)) | (((port) & 0xf) << 28))
+#define EC_MKBP_EVENT_CEC_GET_EVENTS(event) ((event) & GENMASK(27, 0))
+#define EC_MKBP_EVENT_CEC_GET_PORT(event) (((event) >> 28) & 0xf)
+
/* CEC message from the AP to be written on the CEC bus */
#define EC_CMD_CEC_WRITE_MSG 0x00B8
@@ -4413,19 +4453,54 @@ struct ec_params_cec_write {
uint8_t msg[MAX_CEC_MSG_LEN];
} __ec_align1;
+/**
+ * struct ec_params_cec_write_v1 - Message to write to the CEC bus
+ * @port: CEC port to write the message on
+ * @msg_len: length of msg in bytes
+ * @msg: message content to write to the CEC bus
+ */
+struct ec_params_cec_write_v1 {
+ uint8_t port;
+ uint8_t msg_len;
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
+/* CEC message read from a CEC bus reported back to the AP */
+#define EC_CMD_CEC_READ_MSG 0x00B9
+
+/**
+ * struct ec_params_cec_read - Read a message from the CEC bus
+ * @port: CEC port to read a message on
+ */
+struct ec_params_cec_read {
+ uint8_t port;
+} __ec_align1;
+
+/**
+ * struct ec_response_cec_read - Message read from the CEC bus
+ * @msg_len: length of msg in bytes
+ * @msg: message content read from the CEC bus
+ */
+struct ec_response_cec_read {
+ uint8_t msg_len;
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
/* Set various CEC parameters */
#define EC_CMD_CEC_SET 0x00BA
/**
* struct ec_params_cec_set - CEC parameters set
* @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to set the parameter on
* @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC
* or 1 to enable CEC functionality, in case cmd is
* CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical
* address between 0 and 15 or 0xff to unregister
*/
struct ec_params_cec_set {
- uint8_t cmd; /* enum cec_command */
+ uint8_t cmd : 4; /* enum cec_command */
+ uint8_t port : 4;
uint8_t val;
} __ec_align1;
@@ -4435,9 +4510,11 @@ struct ec_params_cec_set {
/**
* struct ec_params_cec_get - CEC parameters get
* @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to get the parameter on
*/
struct ec_params_cec_get {
- uint8_t cmd; /* enum cec_command */
+ uint8_t cmd : 4; /* enum cec_command */
+ uint8_t port : 4;
} __ec_align1;
/**
@@ -4451,6 +4528,17 @@ struct ec_response_cec_get {
uint8_t val;
} __ec_align1;
+/* Get the number of CEC ports */
+#define EC_CMD_CEC_PORT_COUNT 0x00C1
+
+/**
+ * struct ec_response_cec_port_count - CEC port count response
+ * @port_count: number of CEC ports
+ */
+struct ec_response_cec_port_count {
+ uint8_t port_count;
+} __ec_align1;
+
/* CEC parameters command */
enum cec_command {
/* CEC reading, writing and events enable */
@@ -4465,6 +4553,8 @@ enum mkbp_cec_event {
EC_MKBP_CEC_SEND_OK = BIT(0),
/* Outgoing message was not acknowledged */
EC_MKBP_CEC_SEND_FAILED = BIT(1),
+ /* Incoming message can be read out by AP */
+ EC_MKBP_CEC_HAVE_DATA = BIT(2),
};
/*****************************************************************************/
@@ -4598,6 +4688,7 @@ enum ec_codec_i2s_rx_subcmd {
EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH = 0x2,
EC_CODEC_I2S_RX_SET_DAIFMT = 0x3,
EC_CODEC_I2S_RX_SET_BCLK = 0x4,
+ EC_CODEC_I2S_RX_RESET = 0x5,
EC_CODEC_I2S_RX_SUBCMD_COUNT,
};
@@ -4729,6 +4820,7 @@ enum ec_reboot_cmd {
EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */
EC_REBOOT_HIBERNATE = 6, /* Hibernate EC */
EC_REBOOT_HIBERNATE_CLEAR_AP_OFF = 7, /* and clears AP_OFF flag */
+ EC_REBOOT_COLD_AP_OFF = 8, /* Cold-reboot and don't boot AP */
};
/* Flags for ec_params_reboot_ec.reboot_flags */
@@ -5076,7 +5168,7 @@ struct ec_response_pd_log {
uint8_t type; /* event type : see PD_EVENT_xx below */
uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */
uint16_t data; /* type-defined data payload */
- uint8_t payload[0]; /* optional additional data payload: 0..16 bytes */
+ uint8_t payload[]; /* optional additional data payload: 0..16 bytes */
} __ec_align4;
/* The timestamp is the microsecond counter shifted to get about a ms. */
@@ -5445,6 +5537,133 @@ struct ec_response_rollback_info {
/* Issue AP reset */
#define EC_CMD_AP_RESET 0x0125
+/*
+ * Get the number of peripheral charge ports
+ */
+#define EC_CMD_PCHG_COUNT 0x0134
+
+#define EC_PCHG_MAX_PORTS 8
+
+struct ec_response_pchg_count {
+ uint8_t port_count;
+} __ec_align1;
+
+/*
+ * Get the status of a peripheral charge port
+ */
+#define EC_CMD_PCHG 0x0135
+
+struct ec_params_pchg {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_pchg {
+ uint32_t error; /* enum pchg_error */
+ uint8_t state; /* enum pchg_state state */
+ uint8_t battery_percentage;
+ uint8_t unused0;
+ uint8_t unused1;
+ /* Fields added in version 1 */
+ uint32_t fw_version;
+ uint32_t dropped_event_count;
+} __ec_align2;
+
+enum pchg_state {
+ /* Charger is reset and not initialized. */
+ PCHG_STATE_RESET = 0,
+ /* Charger is initialized or disabled. */
+ PCHG_STATE_INITIALIZED,
+ /* Charger is enabled and ready to detect a device. */
+ PCHG_STATE_ENABLED,
+ /* Device is in proximity. */
+ PCHG_STATE_DETECTED,
+ /* Device is being charged. */
+ PCHG_STATE_CHARGING,
+ /* Device is fully charged. It implies DETECTED (& not charging). */
+ PCHG_STATE_FULL,
+ /* In download (a.k.a. firmware update) mode */
+ PCHG_STATE_DOWNLOAD,
+ /* In download mode. Ready for receiving data. */
+ PCHG_STATE_DOWNLOADING,
+ /* Device is ready for data communication. */
+ PCHG_STATE_CONNECTED,
+ /* Put no more entry below */
+ PCHG_STATE_COUNT,
+};
+
+#define EC_PCHG_STATE_TEXT { \
+ [PCHG_STATE_RESET] = "RESET", \
+ [PCHG_STATE_INITIALIZED] = "INITIALIZED", \
+ [PCHG_STATE_ENABLED] = "ENABLED", \
+ [PCHG_STATE_DETECTED] = "DETECTED", \
+ [PCHG_STATE_CHARGING] = "CHARGING", \
+ [PCHG_STATE_FULL] = "FULL", \
+ [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \
+ [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \
+ [PCHG_STATE_CONNECTED] = "CONNECTED", \
+ }
+
+/*
+ * Update firmware of peripheral chip
+ */
+#define EC_CMD_PCHG_UPDATE 0x0136
+
+/* Port number is encoded in bit[28:31]. */
+#define EC_MKBP_PCHG_PORT_SHIFT 28
+/* Utility macro for converting MKBP event to port number. */
+#define EC_MKBP_PCHG_EVENT_TO_PORT(e) (((e) >> EC_MKBP_PCHG_PORT_SHIFT) & 0xf)
+/* Utility macro for extracting event bits. */
+#define EC_MKBP_PCHG_EVENT_MASK(e) ((e) \
+ & GENMASK(EC_MKBP_PCHG_PORT_SHIFT-1, 0))
+
+#define EC_MKBP_PCHG_UPDATE_OPENED BIT(0)
+#define EC_MKBP_PCHG_WRITE_COMPLETE BIT(1)
+#define EC_MKBP_PCHG_UPDATE_CLOSED BIT(2)
+#define EC_MKBP_PCHG_UPDATE_ERROR BIT(3)
+#define EC_MKBP_PCHG_DEVICE_EVENT BIT(4)
+
+enum ec_pchg_update_cmd {
+ /* Reset chip to normal mode. */
+ EC_PCHG_UPDATE_CMD_RESET_TO_NORMAL = 0,
+ /* Reset and put a chip in update (a.k.a. download) mode. */
+ EC_PCHG_UPDATE_CMD_OPEN,
+ /* Write a block of data containing FW image. */
+ EC_PCHG_UPDATE_CMD_WRITE,
+ /* Close update session. */
+ EC_PCHG_UPDATE_CMD_CLOSE,
+ /* End of commands */
+ EC_PCHG_UPDATE_CMD_COUNT,
+};
+
+struct ec_params_pchg_update {
+ /* PCHG port number */
+ uint8_t port;
+ /* enum ec_pchg_update_cmd */
+ uint8_t cmd;
+ /* Padding */
+ uint8_t reserved0;
+ uint8_t reserved1;
+ /* Version of new firmware */
+ uint32_t version;
+ /* CRC32 of new firmware */
+ uint32_t crc32;
+ /* Address in chip memory where <data> is written to */
+ uint32_t addr;
+ /* Size of <data> */
+ uint32_t size;
+ /* Partial data of new firmware */
+ uint8_t data[];
+} __ec_align4;
+
+BUILD_ASSERT(EC_PCHG_UPDATE_CMD_COUNT
+ < BIT(sizeof(((struct ec_params_pchg_update *)0)->cmd)*8));
+
+struct ec_response_pchg_update {
+ /* Block size */
+ uint32_t block_size;
+} __ec_align4;
+
+
/*****************************************************************************/
/* Voltage regulator controls */
@@ -5528,6 +5747,253 @@ struct ec_response_regulator_get_voltage {
uint32_t voltage_mv;
} __ec_align4;
+/*
+ * Gather all discovery information for the given port and partner type.
+ *
+ * Note that if discovery has not yet completed, only the currently completed
+ * responses will be filled in. If the discovery data structures are changed
+ * in the process of the command running, BUSY will be returned.
+ *
+ * VDO field sizes are set to the maximum possible number of VDOs a VDM may
+ * contain, while the number of SVIDs here is selected to fit within the PROTO2
+ * maximum parameter size.
+ */
+#define EC_CMD_TYPEC_DISCOVERY 0x0131
+
+enum typec_partner_type {
+ TYPEC_PARTNER_SOP = 0,
+ TYPEC_PARTNER_SOP_PRIME = 1,
+};
+
+struct ec_params_typec_discovery {
+ uint8_t port;
+ uint8_t partner_type; /* enum typec_partner_type */
+} __ec_align1;
+
+struct svid_mode_info {
+ uint16_t svid;
+ uint16_t mode_count; /* Number of modes partner sent */
+ uint32_t mode_vdo[6]; /* Max VDOs allowed after VDM header is 6 */
+};
+
+struct ec_response_typec_discovery {
+ uint8_t identity_count; /* Number of identity VDOs partner sent */
+ uint8_t svid_count; /* Number of SVIDs partner sent */
+ uint16_t reserved;
+ uint32_t discovery_vdo[6]; /* Max VDOs allowed after VDM header is 6 */
+ struct svid_mode_info svids[];
+} __ec_align1;
+
+/* USB Type-C commands for AP-controlled device policy. */
+#define EC_CMD_TYPEC_CONTROL 0x0132
+
+enum typec_control_command {
+ TYPEC_CONTROL_COMMAND_EXIT_MODES,
+ TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
+ TYPEC_CONTROL_COMMAND_ENTER_MODE,
+ TYPEC_CONTROL_COMMAND_TBT_UFP_REPLY,
+ TYPEC_CONTROL_COMMAND_USB_MUX_SET,
+ TYPEC_CONTROL_COMMAND_BIST_SHARE_MODE,
+ TYPEC_CONTROL_COMMAND_SEND_VDM_REQ,
+};
+
+/* Replies the AP may specify to the TBT EnterMode command as a UFP */
+enum typec_tbt_ufp_reply {
+ TYPEC_TBT_UFP_REPLY_NAK,
+ TYPEC_TBT_UFP_REPLY_ACK,
+};
+
+struct typec_usb_mux_set {
+ uint8_t mux_index; /* Index of the mux to set in the chain */
+ uint8_t mux_flags; /* USB_PD_MUX_*-encoded USB mux state to set */
+} __ec_align1;
+
+#define VDO_MAX_SIZE 7
+
+struct typec_vdm_req {
+ /* VDM data, including VDM header */
+ uint32_t vdm_data[VDO_MAX_SIZE];
+ /* Number of 32-bit fields filled in */
+ uint8_t vdm_data_objects;
+ /* Partner to address - see enum typec_partner_type */
+ uint8_t partner_type;
+} __ec_align1;
+
+struct ec_params_typec_control {
+ uint8_t port;
+ uint8_t command; /* enum typec_control_command */
+ uint16_t reserved;
+
+ /*
+ * This section will be interpreted based on |command|. Define a
+ * placeholder structure to avoid having to increase the size and bump
+ * the command version when adding new sub-commands.
+ */
+ union {
+ uint32_t clear_events_mask;
+ uint8_t mode_to_enter; /* enum typec_mode */
+ uint8_t tbt_ufp_reply; /* enum typec_tbt_ufp_reply */
+ struct typec_usb_mux_set mux_params;
+ /* Used for VMD_REQ */
+ struct typec_vdm_req vdm_req_params;
+ uint8_t placeholder[128];
+ };
+} __ec_align1;
+
+/*
+ * Gather all status information for a port.
+ *
+ * Note: this covers many of the return fields from the deprecated
+ * EC_CMD_USB_PD_CONTROL command, except those that are redundant with the
+ * discovery data. The "enum pd_cc_states" is defined with the deprecated
+ * EC_CMD_USB_PD_CONTROL command.
+ *
+ * This also combines in the EC_CMD_USB_PD_MUX_INFO flags.
+ */
+#define EC_CMD_TYPEC_STATUS 0x0133
+
+/*
+ * Power role.
+ *
+ * Note this is also used for PD header creation, and values align to those in
+ * the Power Delivery Specification Revision 3.0 (See
+ * 6.2.1.1.4 Port Power Role).
+ */
+enum pd_power_role {
+ PD_ROLE_SINK = 0,
+ PD_ROLE_SOURCE = 1
+};
+
+/*
+ * Data role.
+ *
+ * Note this is also used for PD header creation, and the first two values
+ * align to those in the Power Delivery Specification Revision 3.0 (See
+ * 6.2.1.1.6 Port Data Role).
+ */
+enum pd_data_role {
+ PD_ROLE_UFP = 0,
+ PD_ROLE_DFP = 1,
+ PD_ROLE_DISCONNECTED = 2,
+};
+
+enum pd_vconn_role {
+ PD_ROLE_VCONN_OFF = 0,
+ PD_ROLE_VCONN_SRC = 1,
+};
+
+/*
+ * Note: BIT(0) may be used to determine whether the polarity is CC1 or CC2,
+ * regardless of whether a debug accessory is connected.
+ */
+enum tcpc_cc_polarity {
+ /*
+ * _CCx: is used to indicate the polarity while not connected to
+ * a Debug Accessory. Only one CC line will assert a resistor and
+ * the other will be open.
+ */
+ POLARITY_CC1 = 0,
+ POLARITY_CC2 = 1,
+
+ /*
+ * _CCx_DTS is used to indicate the polarity while connected to a
+ * SRC Debug Accessory. Assert resistors on both lines.
+ */
+ POLARITY_CC1_DTS = 2,
+ POLARITY_CC2_DTS = 3,
+
+ /*
+ * The current TCPC code relies on these specific POLARITY values.
+ * Adding in a check to verify if the list grows for any reason
+ * that this will give a hint that other places need to be
+ * adjusted.
+ */
+ POLARITY_COUNT
+};
+
+#define PD_STATUS_EVENT_SOP_DISC_DONE BIT(0)
+#define PD_STATUS_EVENT_SOP_PRIME_DISC_DONE BIT(1)
+#define PD_STATUS_EVENT_HARD_RESET BIT(2)
+#define PD_STATUS_EVENT_DISCONNECTED BIT(3)
+#define PD_STATUS_EVENT_MUX_0_SET_DONE BIT(4)
+#define PD_STATUS_EVENT_MUX_1_SET_DONE BIT(5)
+#define PD_STATUS_EVENT_VDM_REQ_REPLY BIT(6)
+#define PD_STATUS_EVENT_VDM_REQ_FAILED BIT(7)
+#define PD_STATUS_EVENT_VDM_ATTENTION BIT(8)
+
+struct ec_params_typec_status {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_typec_status {
+ uint8_t pd_enabled; /* PD communication enabled - bool */
+ uint8_t dev_connected; /* Device connected - bool */
+ uint8_t sop_connected; /* Device is SOP PD capable - bool */
+ uint8_t source_cap_count; /* Number of Source Cap PDOs */
+
+ uint8_t power_role; /* enum pd_power_role */
+ uint8_t data_role; /* enum pd_data_role */
+ uint8_t vconn_role; /* enum pd_vconn_role */
+ uint8_t sink_cap_count; /* Number of Sink Cap PDOs */
+
+ uint8_t polarity; /* enum tcpc_cc_polarity */
+ uint8_t cc_state; /* enum pd_cc_states */
+ uint8_t dp_pin; /* DP pin mode (MODE_DP_IN_[A-E]) */
+ uint8_t mux_state; /* USB_PD_MUX* - encoded mux state */
+
+ char tc_state[32]; /* TC state name */
+
+ uint32_t events; /* PD_STATUS_EVENT bitmask */
+
+ /*
+ * BCD PD revisions for partners
+ *
+ * The format has the PD major reversion in the upper nibble, and PD
+ * minor version in the next nibble. Following two nibbles are
+ * currently 0.
+ * ex. PD 3.2 would map to 0x3200
+ *
+ * PD major/minor will be 0 if no PD device is connected.
+ */
+ uint16_t sop_revision;
+ uint16_t sop_prime_revision;
+
+ uint32_t source_cap_pdos[7]; /* Max 7 PDOs can be present */
+
+ uint32_t sink_cap_pdos[7]; /* Max 7 PDOs can be present */
+} __ec_align1;
+
+/*
+ * Gather the response to the most recent VDM REQ from the AP, as well
+ * as popping the oldest VDM:Attention from the DPM queue
+ */
+#define EC_CMD_TYPEC_VDM_RESPONSE 0x013C
+
+struct ec_params_typec_vdm_response {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_typec_vdm_response {
+ /* Number of 32-bit fields filled in */
+ uint8_t vdm_data_objects;
+ /* Partner to address - see enum typec_partner_type */
+ uint8_t partner_type;
+ /* enum ec_status describing VDM response */
+ uint16_t vdm_response_err;
+ /* VDM data, including VDM header */
+ uint32_t vdm_response[VDO_MAX_SIZE];
+ /* Number of 32-bit Attention fields filled in */
+ uint8_t vdm_attention_objects;
+ /* Number of remaining messages to consume */
+ uint8_t vdm_attention_left;
+ /* Reserved */
+ uint16_t reserved1;
+ /* VDM:Attention contents */
+ uint32_t vdm_attention[2];
+} __ec_align1;
+
+#undef VDO_MAX_SIZE
+
/*****************************************************************************/
/* The command range 0x200-0x2FF is reserved for Rotor. */
@@ -5789,7 +6255,7 @@ struct ec_response_fp_encryption_status {
struct ec_response_tp_frame_info {
uint32_t n_frames;
- uint32_t frame_sizes[0];
+ uint32_t frame_sizes[];
} __ec_align4;
/* Create a snapshot of current frame readings */
@@ -5899,6 +6365,13 @@ struct ec_params_charger_control {
uint8_t allow_charging;
} __ec_align_size1;
+/* Get ACK from the USB-C SS muxes */
+#define EC_CMD_USB_PD_MUX_ACK 0x0603
+
+struct ec_params_usb_pd_mux_ack {
+ uint8_t port; /* USB-C port number */
+} __ec_align1;
+
/*****************************************************************************/
/*
* Reserve a range of host commands for board-specific, experimental, or
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 4a415ae851ef..8865e350c12a 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -9,6 +9,7 @@
#define __LINUX_CROS_EC_PROTO_H
#include <linux/device.h>
+#include <linux/lockdep_types.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
@@ -21,6 +22,9 @@
#define CROS_EC_DEV_SCP_NAME "cros_scp"
#define CROS_EC_DEV_TP_NAME "cros_tp"
+#define CROS_EC_DEV_EC_INDEX 0
+#define CROS_EC_DEV_PD_INDEX 1
+
/*
* The EC is unresponsive for a time after a reboot command. Add a
* simple delay to make sure that the bus stays locked.
@@ -38,6 +42,13 @@
#define EC_MAX_RESPONSE_OVERHEAD 32
/*
+ * EC panic is not covered by the standard (0-F) ACPI notify values.
+ * Arbitrarily choosing B0 to notify ec panic, which is in the 84-BF
+ * device specific ACPI notify range.
+ */
+#define ACPI_NOTIFY_CROS_EC_PANIC 0xB0
+
+/*
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
*/
enum {
@@ -69,15 +80,13 @@ struct cros_ec_command {
uint32_t outsize;
uint32_t insize;
uint32_t result;
- uint8_t data[0];
+ uint8_t data[];
};
/**
* struct cros_ec_device - Information about a ChromeOS EC device.
* @phys_name: Name of physical comms layer (e.g. 'i2c-4').
* @dev: Device pointer for physical comms device
- * @was_wake_device: True if this device was set to wake the system from
- * sleep at the last suspend.
* @cros_class: The class structure for this device.
* @cmd_readmem: Direct read of the EC memory-mapped region, if supported.
* @offset: Is within EC_LPC_ADDR_MEMMAP region.
@@ -114,6 +123,8 @@ struct cros_ec_command {
* command. The caller should check msg.result for the EC's result
* code.
* @pkt_xfer: Send packet to EC and get response.
+ * @lockdep_key: Lockdep class for each instance. Unused if CONFIG_LOCKDEP is
+ * not enabled.
* @lock: One transaction at a time.
* @mkbp_event_supported: 0 if MKBP not supported. Otherwise its value is
* the maximum supported version of the MKBP host event
@@ -123,6 +134,15 @@ struct cros_ec_command {
* @event_data: Raw payload transferred with the MKBP event.
* @event_size: Size in bytes of the event data.
* @host_event_wake_mask: Mask of host events that cause wake from suspend.
+ * @suspend_timeout_ms: The timeout in milliseconds between when sleep event
+ * is received and when the EC will declare sleep
+ * transition failure if the sleep signal is not
+ * asserted. See also struct
+ * ec_params_host_sleep_event_v1 in cros_ec_commands.h.
+ * @last_resume_result: The number of sleep power signal transitions that
+ * occurred since the suspend message. The high bit
+ * indicates a timeout occurred. See also struct
+ * ec_response_host_sleep_event_v1 in cros_ec_commands.h.
* @last_event_time: exact time from the hard irq when we got notified of
* a new event.
* @notifier_ready: The notifier_block to let the kernel re-query EC
@@ -132,12 +152,12 @@ struct cros_ec_command {
* main EC.
* @pd: The platform_device used by the mfd driver to interface with the
* PD behind an EC.
+ * @panic_notifier: EC panic notifier.
*/
struct cros_ec_device {
/* These are used by other drivers that want to talk to the EC */
const char *phys_name;
struct device *dev;
- bool was_wake_device;
struct class *cros_class;
int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
unsigned int bytes, void *dest);
@@ -159,6 +179,7 @@ struct cros_ec_device {
struct cros_ec_command *msg);
int (*pkt_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
+ struct lock_class_key lockdep_key;
struct mutex lock;
u8 mkbp_event_supported;
bool host_sleep_v1;
@@ -168,12 +189,15 @@ struct cros_ec_device {
int event_size;
u32 host_event_wake_mask;
u32 last_resume_result;
+ u16 suspend_timeout_ms;
ktime_t last_event_time;
struct notifier_block notifier_ready;
/* The platform devices used by the mfd driver */
struct platform_device *ec;
struct platform_device *pd;
+
+ struct blocking_notifier_head panic_notifier;
};
/**
@@ -205,7 +229,7 @@ struct cros_ec_dev {
struct cros_ec_debugfs *debug_info;
bool has_kb_wake_angle;
u16 cmd_offset;
- u32 features[2];
+ struct ec_response_get_features features;
};
#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
@@ -216,6 +240,9 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
+int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
@@ -227,10 +254,13 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
-int cros_ec_check_features(struct cros_ec_dev *ec, int feature);
+bool cros_ec_check_features(struct cros_ec_dev *ec, int feature);
int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
+int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, const void *outdata,
+ size_t outsize, void *indata, size_t insize);
+
/**
* cros_ec_get_time_ns() - Return time in ns.
*
diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h
index bc208c64e3d7..1ef91c36f609 100644
--- a/include/linux/platform_data/davinci-cpufreq.h
+++ b/include/linux/platform_data/davinci-cpufreq.h
@@ -16,4 +16,10 @@ struct davinci_cpufreq_config {
int (*init)(void);
};
+#ifdef CONFIG_CPU_FREQ
+int davinci_cpufreq_init(void);
+#else
+static inline int davinci_cpufreq_init(void) { return 0; }
+#endif
+
#endif /* _MACH_DAVINCI_CPUFREQ_H */
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 5d1fb0d78a22..c8645b2ed3c0 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI DaVinci Audio Serial Port support
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DAVINCI_ASP_H
@@ -96,6 +88,7 @@ enum {
MCASP_VERSION_2, /* DA8xx/OMAPL1x */
MCASP_VERSION_3, /* TI81xx/AM33xx */
MCASP_VERSION_4, /* DRA7xxx */
+ MCASP_VERSION_OMAP, /* OMAP4/5 */
};
enum mcbsp_clk_input_pin {
diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h
deleted file mode 100644
index 069637e6004f..000000000000
--- a/include/linux/platform_data/dma-atmel.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Header file for the Atmel AHB DMA Controller driver
- *
- * Copyright (C) 2008 Atmel Corporation
- */
-#ifndef AT_HDMAC_H
-#define AT_HDMAC_H
-
-#include <linux/dmaengine.h>
-
-/**
- * struct at_dma_platform_data - Controller configuration parameters
- * @nr_channels: Number of channels supported by hardware (max 8)
- * @cap_mask: dma_capability flags supported by the platform
- */
-struct at_dma_platform_data {
- unsigned int nr_channels;
- dma_cap_mask_t cap_mask;
-};
-
-/**
- * struct at_dma_slave - Controller-specific information about a slave
- * @dma_dev: required DMA master device
- * @cfg: Platform-specific initializer for the CFG register
- */
-struct at_dma_slave {
- struct device *dma_dev;
- u32 cfg;
-};
-
-
-/* Platform-configurable bits in CFG */
-#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
-
-#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
-#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
-#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
-#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
-#define ATC_SRC_H2SEL_SW (0x0 << 9)
-#define ATC_SRC_H2SEL_HW (0x1 << 9)
-#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
-#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
-#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
-#define ATC_DST_H2SEL_SW (0x0 << 13)
-#define ATC_DST_H2SEL_HW (0x1 << 13)
-#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
-#define ATC_SOD (0x1 << 16) /* Stop On Done */
-#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
-#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
-#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
-#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
-#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
-#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
-#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
-#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
-#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
-#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
-
-
-#endif /* AT_HDMAC_H */
diff --git a/include/linux/platform_data/dma-coh901318.h b/include/linux/platform_data/dma-coh901318.h
deleted file mode 100644
index 4cca529f8d56..000000000000
--- a/include/linux/platform_data/dma-coh901318.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform data for the COH901318 DMA controller
- * Copyright (C) 2007-2013 ST-Ericsson
- */
-
-#ifndef PLAT_COH901318_H
-#define PLAT_COH901318_H
-
-#ifdef CONFIG_COH901318
-
-/* We only support the U300 DMA channels */
-#define U300_DMA_MSL_TX_0 0
-#define U300_DMA_MSL_TX_1 1
-#define U300_DMA_MSL_TX_2 2
-#define U300_DMA_MSL_TX_3 3
-#define U300_DMA_MSL_TX_4 4
-#define U300_DMA_MSL_TX_5 5
-#define U300_DMA_MSL_TX_6 6
-#define U300_DMA_MSL_RX_0 7
-#define U300_DMA_MSL_RX_1 8
-#define U300_DMA_MSL_RX_2 9
-#define U300_DMA_MSL_RX_3 10
-#define U300_DMA_MSL_RX_4 11
-#define U300_DMA_MSL_RX_5 12
-#define U300_DMA_MSL_RX_6 13
-#define U300_DMA_MMCSD_RX_TX 14
-#define U300_DMA_MSPRO_TX 15
-#define U300_DMA_MSPRO_RX 16
-#define U300_DMA_UART0_TX 17
-#define U300_DMA_UART0_RX 18
-#define U300_DMA_APEX_TX 19
-#define U300_DMA_APEX_RX 20
-#define U300_DMA_PCM_I2S0_TX 21
-#define U300_DMA_PCM_I2S0_RX 22
-#define U300_DMA_PCM_I2S1_TX 23
-#define U300_DMA_PCM_I2S1_RX 24
-#define U300_DMA_XGAM_CDI 25
-#define U300_DMA_XGAM_PDI 26
-#define U300_DMA_SPI_TX 27
-#define U300_DMA_SPI_RX 28
-#define U300_DMA_GENERAL_PURPOSE_0 29
-#define U300_DMA_GENERAL_PURPOSE_1 30
-#define U300_DMA_GENERAL_PURPOSE_2 31
-#define U300_DMA_GENERAL_PURPOSE_3 32
-#define U300_DMA_GENERAL_PURPOSE_4 33
-#define U300_DMA_GENERAL_PURPOSE_5 34
-#define U300_DMA_GENERAL_PURPOSE_6 35
-#define U300_DMA_GENERAL_PURPOSE_7 36
-#define U300_DMA_GENERAL_PURPOSE_8 37
-#define U300_DMA_UART1_TX 38
-#define U300_DMA_UART1_RX 39
-
-#define U300_DMA_DEVICE_CHANNELS 32
-#define U300_DMA_CHANNELS 40
-
-/**
- * coh901318_filter_id() - DMA channel filter function
- * @chan: dma channel handle
- * @chan_id: id of dma channel to be filter out
- *
- * In dma_request_channel() it specifies what channel id to be requested
- */
-bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
-#else
-static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
-{
- return false;
-}
-#endif
-
-#endif /* PLAT_COH901318_H */
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index fbbeb2f6189b..860ba4bc5ead 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -26,6 +26,7 @@ struct device;
* @dst_id: dst request line
* @m_master: memory master for transfers on allocated channel
* @p_master: peripheral master for transfers on allocated channel
+ * @channels: mask of the channels permitted for allocation (zero value means any)
* @hs_polarity:set active low polarity of handshake interface
*/
struct dw_dma_slave {
@@ -34,41 +35,45 @@ struct dw_dma_slave {
u8 dst_id;
u8 m_master;
u8 p_master;
+ u8 channels;
bool hs_polarity;
};
/**
* struct dw_dma_platform_data - Controller configuration parameters
+ * @nr_masters: Number of AHB masters supported by the controller
* @nr_channels: Number of channels supported by hardware (max 8)
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
- * @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
* (in bytes, power of 2)
* @multi_block: Multi block transfers supported by hardware per channel.
* @max_burst: Maximum value of burst transaction size supported by hardware
* per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH).
* @protctl: Protection control signals setting per channel.
+ * @quirks: Optional platform quirks.
*/
struct dw_dma_platform_data {
- unsigned int nr_channels;
+ u32 nr_masters;
+ u32 nr_channels;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
- unsigned char chan_allocation_order;
+ u32 chan_allocation_order;
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
- unsigned char chan_priority;
- unsigned int block_size;
- unsigned char nr_masters;
- unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
- unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
+ u32 chan_priority;
+ u32 block_size;
+ u32 data_width[DW_DMA_MAX_NR_MASTERS];
+ u32 multi_block[DW_DMA_MAX_NR_CHANNELS];
u32 max_burst[DW_DMA_MAX_NR_CHANNELS];
#define CHAN_PROTCTL_PRIVILEGED BIT(0)
#define CHAN_PROTCTL_BUFFERABLE BIT(1)
#define CHAN_PROTCTL_CACHEABLE BIT(2)
#define CHAN_PROTCTL_MASK GENMASK(2, 0)
- unsigned char protctl;
+ u32 protctl;
+#define DW_DMA_QUIRK_XBAR_PRESENT BIT(0)
+ u32 quirks;
};
#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
index c65b412b2b33..611bae193c1c 100644
--- a/include/linux/platform_data/dma-hsu.h
+++ b/include/linux/platform_data/dma-hsu.h
@@ -8,7 +8,7 @@
#ifndef _PLATFORM_DATA_DMA_HSU_H
#define _PLATFORM_DATA_DMA_HSU_H
-#include <linux/device.h>
+struct device;
struct hsu_dma_slave {
struct device *dma_dev;
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
deleted file mode 100644
index 30e676b36b24..000000000000
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MACH_MXC_SDMA_H__
-#define __MACH_MXC_SDMA_H__
-
-/**
- * struct sdma_script_start_addrs - SDMA script start pointers
- *
- * start addresses of the different functions in the physical
- * address space of the SDMA engine.
- */
-struct sdma_script_start_addrs {
- s32 ap_2_ap_addr;
- s32 ap_2_bp_addr;
- s32 ap_2_ap_fixed_addr;
- s32 bp_2_ap_addr;
- s32 loopback_on_dsp_side_addr;
- s32 mcu_interrupt_only_addr;
- s32 firi_2_per_addr;
- s32 firi_2_mcu_addr;
- s32 per_2_firi_addr;
- s32 mcu_2_firi_addr;
- s32 uart_2_per_addr;
- s32 uart_2_mcu_addr;
- s32 per_2_app_addr;
- s32 mcu_2_app_addr;
- s32 per_2_per_addr;
- s32 uartsh_2_per_addr;
- s32 uartsh_2_mcu_addr;
- s32 per_2_shp_addr;
- s32 mcu_2_shp_addr;
- s32 ata_2_mcu_addr;
- s32 mcu_2_ata_addr;
- s32 app_2_per_addr;
- s32 app_2_mcu_addr;
- s32 shp_2_per_addr;
- s32 shp_2_mcu_addr;
- s32 mshc_2_mcu_addr;
- s32 mcu_2_mshc_addr;
- s32 spdif_2_mcu_addr;
- s32 mcu_2_spdif_addr;
- s32 asrc_2_mcu_addr;
- s32 ext_mem_2_ipu_addr;
- s32 descrambler_addr;
- s32 dptc_dvfs_addr;
- s32 utra_addr;
- s32 ram_code_start_addr;
- /* End of v1 array */
- s32 mcu_2_ssish_addr;
- s32 ssish_2_mcu_addr;
- s32 hdmi_dma_addr;
- /* End of v2 array */
- s32 zcanfd_2_mcu_addr;
- s32 zqspi_2_mcu_addr;
- s32 mcu_2_ecspi_addr;
- /* End of v3 array */
- s32 mcu_2_zqspi_addr;
- /* End of v4 array */
-};
-
-/**
- * struct sdma_platform_data - platform specific data for SDMA engine
- *
- * @fw_name The firmware name
- * @script_addrs SDMA scripts addresses in SDMA ROM
- */
-struct sdma_platform_data {
- char *fw_name;
- struct sdma_script_start_addrs *script_addrs;
-};
-
-#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
deleted file mode 100644
index 8bec5484dc86..000000000000
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SRAM Memory Management
- *
- * Copyright (c) 2011 Marvell Semiconductors Inc.
- */
-
-#ifndef __DMA_MMP_TDMA_H
-#define __DMA_MMP_TDMA_H
-
-#include <linux/genalloc.h>
-
-/* ARBITRARY: SRAM allocations are multiples of this 2^N size */
-#define SRAM_GRANULARITY 512
-
-enum sram_type {
- MMP_SRAM_UNDEFINED = 0,
- MMP_ASRAM,
- MMP_ISRAM,
-};
-
-struct sram_platdata {
- char *pool_name;
- int granularity;
-};
-
-#ifdef CONFIG_MMP_SRAM
-extern struct gen_pool *sram_get_gpool(char *pool_name);
-#else
-static inline struct gen_pool *sram_get_gpool(char *pool_name)
-{
- return NULL;
-}
-#endif
-
-#endif /* __DMA_MMP_TDMA_H */
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h
deleted file mode 100644
index 96d02dbeea67..000000000000
--- a/include/linux/platform_data/dma-s3c24xx.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * S3C24XX DMA handling
- *
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- */
-
-/* Helper to encode the source selection constraints for early s3c socs. */
-#define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4)
-
-enum s3c24xx_dma_bus {
- S3C24XX_DMA_APB,
- S3C24XX_DMA_AHB,
-};
-
-/**
- * @bus: on which bus does the peripheral reside - AHB or APB.
- * @handshake: is a handshake with the peripheral necessary
- * @chansel: channel selection information, depending on variant; reqsel for
- * s3c2443 and later and channel-selection map for earlier SoCs
- * see CHANSEL doc in s3c2443-dma.c
- */
-struct s3c24xx_dma_channel {
- enum s3c24xx_dma_bus bus;
- bool handshake;
- u16 chansel;
-};
-
-struct dma_slave_map;
-
-/**
- * struct s3c24xx_dma_platdata - platform specific settings
- * @num_phy_channels: number of physical channels
- * @channels: array of virtual channel descriptions
- * @num_channels: number of virtual channels
- * @slave_map: dma slave map matching table
- * @slavecnt: number of elements in slave_map
- */
-struct s3c24xx_dma_platdata {
- int num_phy_channels;
- struct s3c24xx_dma_channel *channels;
- int num_channels;
- const struct dma_slave_map *slave_map;
- int slavecnt;
-};
-
-struct dma_chan;
-bool s3c24xx_dma_filter(struct dma_chan *chan, void *param);
diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h
deleted file mode 100644
index 10641633facc..000000000000
--- a/include/linux/platform_data/dma-ste-dma40.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2007-2010
- * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
- */
-
-
-#ifndef STE_DMA40_H
-#define STE_DMA40_H
-
-#include <linux/dmaengine.h>
-#include <linux/scatterlist.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-
-/*
- * Maxium size for a single dma descriptor
- * Size is limited to 16 bits.
- * Size is in the units of addr-widths (1,2,4,8 bytes)
- * Larger transfers will be split up to multiple linked desc
- */
-#define STEDMA40_MAX_SEG_SIZE 0xFFFF
-
-/* dev types for memcpy */
-#define STEDMA40_DEV_DST_MEMORY (-1)
-#define STEDMA40_DEV_SRC_MEMORY (-1)
-
-enum stedma40_mode {
- STEDMA40_MODE_LOGICAL = 0,
- STEDMA40_MODE_PHYSICAL,
- STEDMA40_MODE_OPERATION,
-};
-
-enum stedma40_mode_opt {
- STEDMA40_PCHAN_BASIC_MODE = 0,
- STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0,
- STEDMA40_PCHAN_MODULO_MODE,
- STEDMA40_PCHAN_DOUBLE_DST_MODE,
- STEDMA40_LCHAN_SRC_PHY_DST_LOG,
- STEDMA40_LCHAN_SRC_LOG_DST_PHY,
-};
-
-#define STEDMA40_ESIZE_8_BIT 0x0
-#define STEDMA40_ESIZE_16_BIT 0x1
-#define STEDMA40_ESIZE_32_BIT 0x2
-#define STEDMA40_ESIZE_64_BIT 0x3
-
-/* The value 4 indicates that PEN-reg shall be set to 0 */
-#define STEDMA40_PSIZE_PHY_1 0x4
-#define STEDMA40_PSIZE_PHY_2 0x0
-#define STEDMA40_PSIZE_PHY_4 0x1
-#define STEDMA40_PSIZE_PHY_8 0x2
-#define STEDMA40_PSIZE_PHY_16 0x3
-
-/*
- * The number of elements differ in logical and
- * physical mode
- */
-#define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2
-#define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4
-#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
-#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
-
-/* Maximum number of possible physical channels */
-#define STEDMA40_MAX_PHYS 32
-
-enum stedma40_flow_ctrl {
- STEDMA40_NO_FLOW_CTRL,
- STEDMA40_FLOW_CTRL,
-};
-
-/**
- * struct stedma40_half_channel_info - dst/src channel configuration
- *
- * @big_endian: true if the src/dst should be read as big endian
- * @data_width: Data width of the src/dst hardware
- * @p_size: Burst size
- * @flow_ctrl: Flow control on/off.
- */
-struct stedma40_half_channel_info {
- bool big_endian;
- enum dma_slave_buswidth data_width;
- int psize;
- enum stedma40_flow_ctrl flow_ctrl;
-};
-
-/**
- * struct stedma40_chan_cfg - Structure to be filled by client drivers.
- *
- * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
- * @high_priority: true if high-priority
- * @realtime: true if realtime mode is to be enabled. Only available on DMA40
- * version 3+, i.e DB8500v2+
- * @mode: channel mode: physical, logical, or operation
- * @mode_opt: options for the chosen channel mode
- * @dev_type: src/dst device type (driver uses dir to figure out which)
- * @src_info: Parameters for dst half channel
- * @dst_info: Parameters for dst half channel
- * @use_fixed_channel: if true, use physical channel specified by phy_channel
- * @phy_channel: physical channel to use, only if use_fixed_channel is true
- *
- * This structure has to be filled by the client drivers.
- * It is recommended to do all dma configurations for clients in the machine.
- *
- */
-struct stedma40_chan_cfg {
- enum dma_transfer_direction dir;
- bool high_priority;
- bool realtime;
- enum stedma40_mode mode;
- enum stedma40_mode_opt mode_opt;
- int dev_type;
- struct stedma40_half_channel_info src_info;
- struct stedma40_half_channel_info dst_info;
-
- bool use_fixed_channel;
- int phy_channel;
-};
-
-/**
- * struct stedma40_platform_data - Configuration struct for the dma device.
- *
- * @dev_tx: mapping between destination event line and io address
- * @dev_rx: mapping between source event line and io address
- * @disabled_channels: A vector, ending with -1, that marks physical channels
- * that are for different reasons not available for the driver.
- * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
- * which avoids HW bug that exists in some versions of the controller.
- * SoftLLI introduces relink overhead that could impact performace for
- * certain use cases.
- * @num_of_soft_lli_chans: The number of channels that needs to be configured
- * to use SoftLLI.
- * @use_esram_lcla: flag for mapping the lcla into esram region
- * @num_of_memcpy_chans: The number of channels reserved for memcpy.
- * @num_of_phy_chans: The number of physical channels implemented in HW.
- * 0 means reading the number of channels from DMA HW but this is only valid
- * for 'multiple of 4' channels, like 8.
- */
-struct stedma40_platform_data {
- int disabled_channels[STEDMA40_MAX_PHYS];
- int *soft_lli_chans;
- int num_of_soft_lli_chans;
- bool use_esram_lcla;
- int num_of_memcpy_chans;
- int num_of_phy_chans;
-};
-
-#ifdef CONFIG_STE_DMA40
-
-/**
- * stedma40_filter() - Provides stedma40_chan_cfg to the
- * ste_dma40 dma driver via the dmaengine framework.
- * does some checking of what's provided.
- *
- * Never directly called by client. It used by dmaengine.
- * @chan: dmaengine handle.
- * @data: Must be of type: struct stedma40_chan_cfg and is
- * the configuration of the framework.
- *
- *
- */
-
-bool stedma40_filter(struct dma_chan *chan, void *data);
-
-/**
- * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
- * (=device)
- *
- * @chan: dmaengine handle
- * @addr: source or destination physicall address.
- * @size: bytes to transfer
- * @direction: direction of transfer
- * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
- */
-
-static inline struct
-dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
- dma_addr_t addr,
- unsigned int size,
- enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct scatterlist sg;
- sg_init_table(&sg, 1);
- sg.dma_address = addr;
- sg.length = size;
-
- return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
-}
-
-#else
-static inline bool stedma40_filter(struct dma_chan *chan, void *data)
-{
- return false;
-}
-
-static inline struct
-dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
- dma_addr_t addr,
- unsigned int size,
- enum dma_transfer_direction direction,
- unsigned long flags)
-{
- return NULL;
-}
-#endif
-
-#endif
diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h
deleted file mode 100644
index a2c56fcd0534..000000000000
--- a/include/linux/platform_data/efm32-spi.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__
-#define __LINUX_PLATFORM_DATA_EFM32_SPI_H__
-
-#include <linux/types.h>
-
-/**
- * struct efm32_spi_pdata
- * @location: pinmux location for the I/O pins (to be written to the ROUTE
- * register)
- */
-struct efm32_spi_pdata {
- u8 location;
-};
-#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */
diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h
deleted file mode 100644
index ccbb8f11db75..000000000000
--- a/include/linux/platform_data/efm32-uart.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *
- *
- */
-#ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__
-#define __LINUX_PLATFORM_DATA_EFM32_UART_H__
-
-#include <linux/types.h>
-
-/**
- * struct efm32_uart_pdata
- * @location: pinmux location for the I/O pins (to be written to the ROUTE
- * register)
- */
-struct efm32_uart_pdata {
- u8 location;
-};
-#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */
diff --git a/include/linux/platform_data/emc2305.h b/include/linux/platform_data/emc2305.h
new file mode 100644
index 000000000000..54d672dd6f7d
--- /dev/null
+++ b/include/linux/platform_data/emc2305.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_PLATFORM_DATA_EMC2305__
+#define __LINUX_PLATFORM_DATA_EMC2305__
+
+#define EMC2305_PWM_MAX 5
+
+/**
+ * struct emc2305_platform_data - EMC2305 driver platform data
+ * @max_state: maximum cooling state of the cooling device;
+ * @pwm_num: number of active channels;
+ * @pwm_separate: separate PWM settings for every channel;
+ * @pwm_min: array of minimum PWM per channel;
+ */
+struct emc2305_platform_data {
+ u8 max_state;
+ u8 pwm_num;
+ bool pwm_separate;
+ u8 pwm_min[EMC2305_PWM_MAX];
+};
+
+#endif
diff --git a/include/linux/platform_data/eth_ixp4xx.h b/include/linux/platform_data/eth_ixp4xx.h
deleted file mode 100644
index 6f652ea0c6ae..000000000000
--- a/include/linux/platform_data/eth_ixp4xx.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PLATFORM_DATA_ETH_IXP4XX
-#define __PLATFORM_DATA_ETH_IXP4XX
-
-#include <linux/types.h>
-
-#define IXP4XX_ETH_NPEA 0x00
-#define IXP4XX_ETH_NPEB 0x10
-#define IXP4XX_ETH_NPEC 0x20
-
-/* Information about built-in Ethernet MAC interfaces */
-struct eth_plat_info {
- u8 phy; /* MII PHY ID, 0 - 31 */
- u8 rxq; /* configurable, currently 0 - 31 only */
- u8 txreadyq;
- u8 hwaddr[6];
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index e182a46e609f..b82e44662efe 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* DaVinci GPIO Platform Related Defines
*
* Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DAVINCI_GPIO_PLATFORM_H
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
deleted file mode 100644
index ff1be737bad6..000000000000
--- a/include/linux/platform_data/gpio-dwapb.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2014 Intel Corporation.
- */
-
-#ifndef GPIO_DW_APB_H
-#define GPIO_DW_APB_H
-
-struct dwapb_port_property {
- struct fwnode_handle *fwnode;
- unsigned int idx;
- unsigned int ngpio;
- unsigned int gpio_base;
- int irq[32];
- bool irq_shared;
-};
-
-struct dwapb_platform_data {
- struct dwapb_port_property *properties;
- unsigned int nports;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 8b30b14b47d3..cdd8cfb424f5 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -85,6 +85,7 @@
* omap2+ specific GPIO registers
*/
#define OMAP24XX_GPIO_REVISION 0x0000
+#define OMAP24XX_GPIO_SYSCONFIG 0x0010
#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
#define OMAP24XX_GPIO_IRQENABLE2 0x002c
@@ -108,6 +109,7 @@
#define OMAP24XX_GPIO_SETDATAOUT 0x0094
#define OMAP4_GPIO_REVISION 0x0000
+#define OMAP4_GPIO_SYSCONFIG 0x0010
#define OMAP4_GPIO_EOI 0x0020
#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
@@ -142,12 +144,10 @@
#define OMAP_MAX_GPIO_LINES 192
-#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
-#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
-
#ifndef __ASSEMBLER__
struct omap_gpio_reg_offs {
u16 revision;
+ u16 sysconfig;
u16 direction;
u16 datain;
u16 dataout;
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h
index 9e46678edb2a..255d51c9d36d 100644
--- a/include/linux/platform_data/gpio/gpio-amd-fch.h
+++ b/include/linux/platform_data/gpio/gpio-amd-fch.h
@@ -19,7 +19,7 @@
#define AMD_FCH_GPIO_REG_GPIO49 0x40
#define AMD_FCH_GPIO_REG_GPIO50 0x41
#define AMD_FCH_GPIO_REG_GPIO51 0x42
-#define AMD_FCH_GPIO_REG_GPIO59_DEVSLP0 0x43
+#define AMD_FCH_GPIO_REG_GPIO55_DEVSLP0 0x43
#define AMD_FCH_GPIO_REG_GPIO57 0x44
#define AMD_FCH_GPIO_REG_GPIO58 0x45
#define AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 0x46
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
index 1a8b5b1946fe..323fbf5f7613 100644
--- a/include/linux/platform_data/gpio_backlight.h
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -8,7 +8,7 @@
struct device;
struct gpio_backlight_platform_data {
- struct device *fbdev;
+ struct device *dev;
};
#endif
diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h
index c9cc4e32435d..dcca6c5e23bb 100644
--- a/include/linux/platform_data/gpmc-omap.h
+++ b/include/linux/platform_data/gpmc-omap.h
@@ -136,6 +136,13 @@ struct gpmc_device_timings {
#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
+/* Wait pin polarity values */
+#define GPMC_WAITPINPOLARITY_INVALID UINT_MAX
+#define GPMC_WAITPINPOLARITY_ACTIVE_LOW 0
+#define GPMC_WAITPINPOLARITY_ACTIVE_HIGH 1
+
+#define GPMC_WAITPIN_INVALID UINT_MAX
+
struct gpmc_settings {
bool burst_wrap; /* enables wrap bursting */
bool burst_read; /* enables read page/burst mode */
@@ -149,6 +156,7 @@ struct gpmc_settings {
u32 device_width; /* device bus width (8 or 16 bit) */
u32 mux_add_data; /* multiplex address & data */
u32 wait_pin; /* wait-pin to be used */
+ u32 wait_pin_polarity;
};
/* Data for each chip select */
diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h
index 37a8f554da00..70e8a6bec0f6 100644
--- a/include/linux/platform_data/gsc_hwmon.h
+++ b/include/linux/platform_data/gsc_hwmon.h
@@ -7,6 +7,7 @@ enum gsc_hwmon_mode {
mode_voltage_24bit,
mode_voltage_raw,
mode_voltage_16bit,
+ mode_fan,
mode_max,
};
@@ -28,18 +29,17 @@ struct gsc_hwmon_channel {
/**
* struct gsc_hwmon_platform_data - platform data for gsc_hwmon driver
- * @channels: pointer to array of gsc_hwmon_channel structures
- * describing channels
* @nchannels: number of elements in @channels array
* @vreference: voltage reference (mV)
* @resolution: ADC bit resolution
* @fan_base: register base for FAN controller
+ * @channels: array of gsc_hwmon_channel structures describing channels
*/
struct gsc_hwmon_platform_data {
- const struct gsc_hwmon_channel *channels;
int nchannels;
unsigned int resolution;
unsigned int vreference;
unsigned int fan_base;
+ struct gsc_hwmon_channel channels[] __counted_by(nchannels);
};
#endif
diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h
new file mode 100644
index 000000000000..8748680e9e3c
--- /dev/null
+++ b/include/linux/platform_data/hirschmann-hellcreek.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Hirschmann Hellcreek TSN switch platform data.
+ *
+ * Copyright (C) 2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#ifndef _HIRSCHMANN_HELLCREEK_H_
+#define _HIRSCHMANN_HELLCREEK_H_
+
+#include <linux/types.h>
+
+struct hellcreek_platform_data {
+ const char *name; /* Switch name */
+ int num_ports; /* Amount of switch ports */
+ int is_100_mbits; /* Is it configured to 100 or 1000 mbit/s */
+ int qbv_support; /* Qbv support on front TSN ports */
+ int qbv_on_cpu_port; /* Qbv support on the CPU port */
+ int qbu_support; /* Qbu support on front TSN ports */
+ u16 module_id; /* Module identificaton */
+};
+
+#endif /* _HIRSCHMANN_HELLCREEK_H_ */
diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h
deleted file mode 100644
index 014c4a5a7e13..000000000000
--- a/include/linux/platform_data/i2c-designware.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2014 Intel Corporation.
- */
-
-#ifndef I2C_DESIGNWARE_H
-#define I2C_DESIGNWARE_H
-
-struct dw_i2c_platform_data {
- unsigned int i2c_scl_freq;
-};
-
-#endif
diff --git a/include/linux/platform_data/i2c-gpio.h b/include/linux/platform_data/i2c-gpio.h
index a907774fd177..545639bcca72 100644
--- a/include/linux/platform_data/i2c-gpio.h
+++ b/include/linux/platform_data/i2c-gpio.h
@@ -16,16 +16,25 @@
* isn't actively driven high when setting the output value high.
* gpio_get_value() must return the actual pin state even if the
* pin is configured as an output.
+ * @sda_is_output_only: SDA output drivers can't be turned off.
+ * This is for clients that can only read SDA/SCL.
+ * @sda_has_no_pullup: SDA is used in a non-compliant way and has no pull-up.
+ * Therefore disable open-drain.
* @scl_is_open_drain: SCL is set up as open drain. Same requirements
* as for sda_is_open_drain apply.
* @scl_is_output_only: SCL output drivers cannot be turned off.
+ * @scl_has_no_pullup: SCL is used in a non-compliant way and has no pull-up.
+ * Therefore disable open-drain.
*/
struct i2c_gpio_platform_data {
int udelay;
int timeout;
unsigned int sda_is_open_drain:1;
+ unsigned int sda_is_output_only:1;
+ unsigned int sda_has_no_pullup:1;
unsigned int scl_is_open_drain:1;
unsigned int scl_is_output_only:1;
+ unsigned int scl_has_no_pullup:1;
};
#endif /* _LINUX_I2C_GPIO_H */
diff --git a/include/linux/platform_data/i2c-hid.h b/include/linux/platform_data/i2c-hid.h
deleted file mode 100644
index c628bb5e1061..000000000000
--- a/include/linux/platform_data/i2c-hid.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * HID over I2C protocol implementation
- *
- * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
- * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#ifndef __LINUX_I2C_HID_H
-#define __LINUX_I2C_HID_H
-
-#include <linux/regulator/consumer.h>
-#include <linux/types.h>
-
-/**
- * struct i2chid_platform_data - used by hid over i2c implementation.
- * @hid_descriptor_address: i2c register where the HID descriptor is stored.
- * @supplies: regulators for powering on the device.
- * @post_power_delay_ms: delay after powering on before device is usable.
- *
- * Note that it is the responsibility of the platform driver (or the acpi 5.0
- * driver, or the flattened device tree) to setup the irq related to the gpio in
- * the struct i2c_board_info.
- * The platform driver should also setup the gpio according to the device:
- *
- * A typical example is the following:
- * irq = gpio_to_irq(intr_gpio);
- * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info
- * gpio_request(intr_gpio, "elan-irq");
- * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP);
- */
-struct i2c_hid_platform_data {
- u16 hid_descriptor_address;
- struct regulator_bulk_data supplies[2];
- int post_power_delay_ms;
-};
-
-#endif /* __LINUX_I2C_HID_H */
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
index 2543c2a1c9ae..e2e895768311 100644
--- a/include/linux/platform_data/i2c-mux-reg.h
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -17,7 +17,6 @@
* @n_values: Number of multiplexer channels
* @little_endian: Indicating if the register is in little endian
* @write_only: Reading the register is not allowed by hardware
- * @classes: Optional I2C auto-detection classes
* @idle: Value to write to mux when idle
* @idle_in_use: indicate if idle value is in use
* @reg: Virtual address of the register to switch channel
@@ -30,7 +29,6 @@ struct i2c_mux_reg_platform_data {
int n_values;
bool little_endian;
bool write_only;
- const unsigned int *classes;
u32 idle;
bool idle_in_use;
void __iomem *reg;
diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h
index 93974f4cfba1..f05b37521f67 100644
--- a/include/linux/platform_data/invensense_mpu6050.h
+++ b/include/linux/platform_data/invensense_mpu6050.h
@@ -12,7 +12,7 @@
* mounting matrix retrieved from device-tree)
*
* Contains platform specific information on how to configure the MPU6050 to
- * work on this platform. The orientation matricies are 3x3 rotation matricies
+ * work on this platform. The orientation matrices are 3x3 rotation matrices
* that are applied to the data to rotate from the mounting orientation to the
* platform orientation. The values must be one of 0, 1, or -1 and each row and
* column should have exactly 1 non-zero value.
diff --git a/include/linux/platform_data/irda-pxaficp.h b/include/linux/platform_data/irda-pxaficp.h
deleted file mode 100644
index bd35ddcf3068..000000000000
--- a/include/linux/platform_data/irda-pxaficp.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASMARM_ARCH_IRDA_H
-#define ASMARM_ARCH_IRDA_H
-
-/* board specific transceiver capabilities */
-
-#define IR_OFF 1
-#define IR_SIRMODE 2
-#define IR_FIRMODE 4
-
-struct pxaficp_platform_data {
- int transceiver_cap;
- void (*transceiver_mode)(struct device *dev, int mode);
- int (*startup)(struct device *dev);
- void (*shutdown)(struct device *dev);
- int gpio_pwdown; /* powerdown GPIO for the IrDA chip */
- bool gpio_pwdown_inverted; /* gpio_pwdown is inverted */
-};
-
-extern void pxa_set_ficp_info(struct pxaficp_platform_data *info);
-
-#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
-void pxa2xx_transceiver_mode(struct device *dev, int mode);
-#endif
-
-#endif
diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h
deleted file mode 100644
index 7db59c917575..000000000000
--- a/include/linux/platform_data/irda-sa11x0.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/include/asm/mach/irda.h
- *
- * Copyright (C) 2004 Russell King.
- */
-#ifndef __ASM_ARM_MACH_IRDA_H
-#define __ASM_ARM_MACH_IRDA_H
-
-struct irda_platform_data {
- int (*startup)(struct device *);
- void (*shutdown)(struct device *);
- int (*set_power)(struct device *, unsigned int state);
- void (*set_speed)(struct device *, unsigned int speed);
-};
-
-#endif
diff --git a/include/linux/platform_data/jz4740/jz4740_nand.h b/include/linux/platform_data/jz4740/jz4740_nand.h
deleted file mode 100644
index b3f066d63059..000000000000
--- a/include/linux/platform_data/jz4740/jz4740_nand.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * JZ4740 SoC NAND controller driver
- */
-
-#ifndef __JZ4740_NAND_H__
-#define __JZ4740_NAND_H__
-
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#define JZ_NAND_NUM_BANKS 4
-
-struct jz_nand_platform_data {
- int num_partitions;
- struct mtd_partition *partitions;
-
- unsigned char banks[JZ_NAND_NUM_BANKS];
-
- void (*ident_callback)(struct platform_device *, struct mtd_info *,
- struct mtd_partition **, int *num_partitions);
-};
-
-#endif
diff --git a/include/linux/platform_data/keyboard-pxa930_rotary.h b/include/linux/platform_data/keyboard-pxa930_rotary.h
deleted file mode 100644
index 3271aa01cbe8..000000000000
--- a/include/linux/platform_data/keyboard-pxa930_rotary.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_PXA930_ROTARY_H
-#define __ASM_ARCH_PXA930_ROTARY_H
-
-/* NOTE:
- *
- * rotary can be either interpreted as a ralative input event (e.g.
- * REL_WHEEL or REL_HWHEEL) or a specific key event (e.g. UP/DOWN
- * or LEFT/RIGHT), depending on if up_key & down_key are assigned
- * or rel_code is assigned a non-zero value. When all are non-zero,
- * up_key and down_key will be preferred.
- */
-struct pxa930_rotary_platform_data {
- int up_key;
- int down_key;
- int rel_code;
-};
-
-void __init pxa930_set_rotarykey_info(struct pxa930_rotary_platform_data *info);
-
-#endif /* __ASM_ARCH_PXA930_ROTARY_H */
diff --git a/include/linux/platform_data/keypad-omap.h b/include/linux/platform_data/keypad-omap.h
index 3e7c64c854f4..f3f1311cdf3a 100644
--- a/include/linux/platform_data/keypad-omap.h
+++ b/include/linux/platform_data/keypad-omap.h
@@ -19,9 +19,6 @@ struct omap_kp_platform_data {
bool rep;
unsigned long delay;
bool dbounce;
- /* specific to OMAP242x*/
- unsigned int *row_gpios;
- unsigned int *col_gpios;
};
/* Group (0..3) -- when multiple keys are pressed, only the
diff --git a/include/linux/platform_data/lcd-mipid.h b/include/linux/platform_data/lcd-mipid.h
index 63f05eb23827..4927cfc5158c 100644
--- a/include/linux/platform_data/lcd-mipid.h
+++ b/include/linux/platform_data/lcd-mipid.h
@@ -15,10 +15,8 @@ enum mipid_test_result {
#ifdef __KERNEL__
struct mipid_platform_data {
- int nreset_gpio;
int data_lines;
- void (*shutdown)(struct mipid_platform_data *pdata);
void (*set_bklight_level)(struct mipid_platform_data *pdata,
int level);
int (*get_bklight_level)(struct mipid_platform_data *pdata);
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 3441064713a3..3cc8db0b12b5 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -73,6 +73,9 @@ struct lp55xx_platform_data {
/* Clock configuration */
u8 clock_mode;
+ /* Charge pump mode */
+ u32 charge_pump_mode;
+
/* optional enable GPIO */
struct gpio_desc *enable_gpiod;
diff --git a/include/linux/platform_data/leds-omap.h b/include/linux/platform_data/leds-omap.h
deleted file mode 100644
index dd1a3ec86fe4..000000000000
--- a/include/linux/platform_data/leds-omap.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2006 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- */
-#ifndef ASMARM_ARCH_LED_H
-#define ASMARM_ARCH_LED_H
-
-struct omap_led_config {
- struct led_classdev cdev;
- s16 gpio;
-};
-
-struct omap_led_platform_data {
- s16 nr_leds;
- struct omap_led_config *leds;
-};
-
-#endif
diff --git a/include/linux/platform_data/leds-pca963x.h b/include/linux/platform_data/leds-pca963x.h
deleted file mode 100644
index 6091337ce4bf..000000000000
--- a/include/linux/platform_data/leds-pca963x.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * PCA963X LED chip driver.
- *
- * Copyright 2012 bct electronic GmbH
- * Copyright 2013 Qtechnology A/S
- */
-
-#ifndef __LINUX_PCA963X_H
-#define __LINUX_PCA963X_H
-#include <linux/leds.h>
-
-enum pca963x_outdrv {
- PCA963X_OPEN_DRAIN,
- PCA963X_TOTEM_POLE, /* aka push-pull */
-};
-
-enum pca963x_blink_type {
- PCA963X_SW_BLINK,
- PCA963X_HW_BLINK,
-};
-
-enum pca963x_direction {
- PCA963X_NORMAL,
- PCA963X_INVERTED,
-};
-
-struct pca963x_platform_data {
- struct led_platform_data leds;
- enum pca963x_outdrv outdrv;
- enum pca963x_blink_type blink_type;
- enum pca963x_direction dir;
-};
-
-#endif /* __LINUX_PCA963X_H*/
diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h
deleted file mode 100644
index 64f8d14876e0..000000000000
--- a/include/linux/platform_data/leds-s3c24xx.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2006 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX - LEDs GPIO connector
-*/
-
-#ifndef __LEDS_S3C24XX_H
-#define __LEDS_S3C24XX_H
-
-struct s3c24xx_led_platdata {
- char *name;
- char *def_trigger;
-};
-
-#endif /* __LEDS_S3C24XX_H */
diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h
index c9da8d402750..95d85c1394bc 100644
--- a/include/linux/platform_data/lv5207lp.h
+++ b/include/linux/platform_data/lv5207lp.h
@@ -8,7 +8,7 @@
struct device;
struct lv5207lp_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int max_value;
unsigned int def_value;
};
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
deleted file mode 100644
index aa5b5562d6f7..000000000000
--- a/include/linux/platform_data/macb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2004-2006 Atmel Corporation
- */
-#ifndef __MACB_PDATA_H__
-#define __MACB_PDATA_H__
-
-#include <linux/clk.h>
-
-/**
- * struct macb_platform_data - platform data for MACB Ethernet
- * @pclk: platform clock
- * @hclk: AHB clock
- */
-struct macb_platform_data {
- struct clk *pclk;
- struct clk *hclk;
-};
-
-#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/max732x.h b/include/linux/platform_data/max732x.h
index f231c635faec..423999207cd5 100644
--- a/include/linux/platform_data/max732x.h
+++ b/include/linux/platform_data/max732x.h
@@ -7,17 +7,5 @@
struct max732x_platform_data {
/* number of the first GPIO */
unsigned gpio_base;
-
- /* interrupt base */
- int irq_base;
-
- void *context; /* param to setup/teardown */
-
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
};
#endif /* __LINUX_I2C_MAX732X_H */
diff --git a/include/linux/platform_data/mdio-bcm-unimac.h b/include/linux/platform_data/mdio-bcm-unimac.h
index 8a5f9f0b2c52..724e1f57b81f 100644
--- a/include/linux/platform_data/mdio-bcm-unimac.h
+++ b/include/linux/platform_data/mdio-bcm-unimac.h
@@ -1,11 +1,14 @@
#ifndef __MDIO_BCM_UNIMAC_PDATA_H
#define __MDIO_BCM_UNIMAC_PDATA_H
+struct clk;
+
struct unimac_mdio_pdata {
u32 phy_mask;
int (*wait_func)(void *data);
void *wait_func_data;
const char *bus_name;
+ struct clk *clk;
};
#define UNIMAC_MDIO_DRV_NAME "unimac-mdio"
diff --git a/include/linux/platform_data/media/camera-mx2.h b/include/linux/platform_data/media/camera-mx2.h
deleted file mode 100644
index 8cfa76b6e1e1..000000000000
--- a/include/linux/platform_data/media/camera-mx2.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mx2-cam.h - i.MX27/i.MX25 camera driver header file
- *
- * Copyright (C) 2003, Intel Corporation
- * Copyright (C) 2008, Sascha Hauer <s.hauer@pengutronix.de>
- * Copyright (C) 2010, Baruch Siach <baruch@tkos.co.il>
- */
-
-#ifndef __MACH_MX2_CAM_H_
-#define __MACH_MX2_CAM_H_
-
-#define MX2_CAMERA_EXT_VSYNC (1 << 1)
-#define MX2_CAMERA_CCIR (1 << 2)
-#define MX2_CAMERA_CCIR_INTERLACE (1 << 3)
-#define MX2_CAMERA_HSYNC_HIGH (1 << 4)
-#define MX2_CAMERA_GATED_CLOCK (1 << 5)
-#define MX2_CAMERA_INV_DATA (1 << 6)
-#define MX2_CAMERA_PCLK_SAMPLE_RISING (1 << 7)
-
-/**
- * struct mx2_camera_platform_data - optional platform data for mx2_camera
- * @flags: any combination of MX2_CAMERA_*
- * @clk: clock rate of the csi block / 2
- */
-struct mx2_camera_platform_data {
- unsigned long flags;
- unsigned long clk;
-};
-
-#endif /* __MACH_MX2_CAM_H_ */
diff --git a/include/linux/platform_data/media/camera-mx3.h b/include/linux/platform_data/media/camera-mx3.h
deleted file mode 100644
index 781c004e5596..000000000000
--- a/include/linux/platform_data/media/camera-mx3.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mx3_camera.h - i.MX3x camera driver header file
- *
- * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#ifndef _MX3_CAMERA_H_
-#define _MX3_CAMERA_H_
-
-#include <linux/device.h>
-
-#define MX3_CAMERA_CLK_SRC 1
-#define MX3_CAMERA_EXT_VSYNC 2
-#define MX3_CAMERA_DP 4
-#define MX3_CAMERA_PCP 8
-#define MX3_CAMERA_HSP 0x10
-#define MX3_CAMERA_VSP 0x20
-#define MX3_CAMERA_DATAWIDTH_4 0x40
-#define MX3_CAMERA_DATAWIDTH_8 0x80
-#define MX3_CAMERA_DATAWIDTH_10 0x100
-#define MX3_CAMERA_DATAWIDTH_15 0x200
-
-#define MX3_CAMERA_DATAWIDTH_MASK (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | \
- MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15)
-
-struct v4l2_async_subdev;
-
-/**
- * struct mx3_camera_pdata - i.MX3x camera platform data
- * @flags: MX3_CAMERA_* flags
- * @mclk_10khz: master clock frequency in 10kHz units
- * @dma_dev: IPU DMA device to match against in channel allocation
- */
-struct mx3_camera_pdata {
- unsigned long flags;
- unsigned long mclk_10khz;
- struct device *dma_dev;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- int *asd_sizes; /* 0-terminated array of asd group sizes */
-};
-
-#endif
diff --git a/include/linux/platform_data/media/coda.h b/include/linux/platform_data/media/coda.h
deleted file mode 100644
index 293b61b60c9d..000000000000
--- a/include/linux/platform_data/media/coda.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2013 Philipp Zabel, Pengutronix
- */
-#ifndef PLATFORM_CODA_H
-#define PLATFORM_CODA_H
-
-struct device;
-
-struct coda_platform_data {
- struct device *iram_dev;
-};
-
-#endif
diff --git a/include/linux/platform_data/media/s5p_hdmi.h b/include/linux/platform_data/media/s5p_hdmi.h
deleted file mode 100644
index 457321e917b9..000000000000
--- a/include/linux/platform_data/media/s5p_hdmi.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Driver header for S5P HDMI chip.
- *
- * Copyright (c) 2011 Samsung Electronics, Co. Ltd
- * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com>
- */
-
-#ifndef S5P_HDMI_H
-#define S5P_HDMI_H
-
-struct i2c_board_info;
-
-/**
- * @hdmiphy_bus: controller id for HDMIPHY bus
- * @hdmiphy_info: template for HDMIPHY I2C device
- * @mhl_bus: controller id for MHL control bus
- * @mhl_info: template for MHL I2C device
- * @hpd_gpio: GPIO for Hot-Plug-Detect pin
- *
- * NULL pointer for *_info fields indicates that
- * the corresponding chip is not present
- */
-struct s5p_hdmi_platform_data {
- int hdmiphy_bus;
- struct i2c_board_info *hdmiphy_info;
- int mhl_bus;
- struct i2c_board_info *mhl_info;
- int hpd_gpio;
-};
-
-#endif /* S5P_HDMI_H */
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index ea1cc6d829e9..8c659db4da6b 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -20,10 +20,32 @@
#define __MICROCHIP_KSZ_H
#include <linux/types.h>
+#include <linux/platform_data/dsa.h>
+
+enum ksz_chip_id {
+ KSZ8563_CHIP_ID = 0x8563,
+ KSZ8795_CHIP_ID = 0x8795,
+ KSZ8794_CHIP_ID = 0x8794,
+ KSZ8765_CHIP_ID = 0x8765,
+ KSZ8830_CHIP_ID = 0x8830,
+ KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
+ KSZ9897_CHIP_ID = 0x00989700,
+ KSZ9893_CHIP_ID = 0x00989300,
+ KSZ9563_CHIP_ID = 0x00956300,
+ KSZ8567_CHIP_ID = 0x00856700,
+ KSZ9567_CHIP_ID = 0x00956700,
+ LAN9370_CHIP_ID = 0x00937000,
+ LAN9371_CHIP_ID = 0x00937100,
+ LAN9372_CHIP_ID = 0x00937200,
+ LAN9373_CHIP_ID = 0x00937300,
+ LAN9374_CHIP_ID = 0x00937400,
+};
struct ksz_platform_data {
+ /* Must be first such that dsa_register_switch() can access it */
+ struct dsa_chip_data cd;
u32 chip_id;
- u16 enabled_ports;
};
#endif
diff --git a/include/linux/platform_data/mlxcpld.h b/include/linux/platform_data/mlxcpld.h
new file mode 100644
index 000000000000..d7610b528856
--- /dev/null
+++ b/include/linux/platform_data/mlxcpld.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/*
+ * Mellanox I2C multiplexer support in CPLD
+ *
+ * Copyright (C) 2016-2020 Mellanox Technologies
+ */
+
+#ifndef _LINUX_I2C_MLXCPLD_H
+#define _LINUX_I2C_MLXCPLD_H
+
+/* Platform data for the CPLD I2C multiplexers */
+
+/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
+ * @chan_ids - channels array
+ * @num_adaps - number of adapters
+ * @sel_reg_addr - mux select register offset in CPLD space
+ * @reg_size: register size in bytes
+ * @handle: handle to be passed by callback
+ * @completion_notify: callback to notify when all the adapters are created
+ */
+struct mlxcpld_mux_plat_data {
+ int *chan_ids;
+ int num_adaps;
+ int sel_reg_addr;
+ u8 reg_size;
+ void *handle;
+ int (*completion_notify)(void *handle, struct i2c_adapter *parent,
+ struct i2c_adapter *adapters[]);
+};
+
+#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index 1af9c01563f9..0b9f81a6f753 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -1,34 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2017-2020 Mellanox Technologies Ltd.
*/
#ifndef __LINUX_PLATFORM_DATA_MLXREG_H
@@ -53,12 +25,75 @@ enum mlxreg_wdt_type {
};
/**
+ * enum mlxreg_hotplug_kind - kind of hotplug entry
+ *
+ * @MLXREG_HOTPLUG_DEVICE_NA: do not care;
+ * @MLXREG_HOTPLUG_LC_PRESENT: entry for line card presence in/out events;
+ * @MLXREG_HOTPLUG_LC_VERIFIED: entry for line card verification status events
+ * coming after line card security signature validation;
+ * @MLXREG_HOTPLUG_LC_POWERED: entry for line card power on/off events;
+ * @MLXREG_HOTPLUG_LC_SYNCED: entry for line card synchronization events, coming
+ * after hardware-firmware synchronization handshake;
+ * @MLXREG_HOTPLUG_LC_READY: entry for line card ready events, indicating line card
+ PHYs ready / unready state;
+ * @MLXREG_HOTPLUG_LC_ACTIVE: entry for line card active events, indicating firmware
+ * availability / unavailability for the ports on line card;
+ * @MLXREG_HOTPLUG_LC_THERMAL: entry for line card thermal shutdown events, positive
+ * event indicates that system should power off the line
+ * card for which this event has been received;
+ */
+enum mlxreg_hotplug_kind {
+ MLXREG_HOTPLUG_DEVICE_NA = 0,
+ MLXREG_HOTPLUG_LC_PRESENT = 1,
+ MLXREG_HOTPLUG_LC_VERIFIED = 2,
+ MLXREG_HOTPLUG_LC_POWERED = 3,
+ MLXREG_HOTPLUG_LC_SYNCED = 4,
+ MLXREG_HOTPLUG_LC_READY = 5,
+ MLXREG_HOTPLUG_LC_ACTIVE = 6,
+ MLXREG_HOTPLUG_LC_THERMAL = 7,
+};
+
+/**
+ * enum mlxreg_hotplug_device_action - hotplug device action required for
+ * driver's connectivity
+ *
+ * @MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: probe device for 'on' event, remove
+ * for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: probe platform device for 'on'
+ * event, remove for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_NO_ACTION: no connectivity action is required;
+ */
+enum mlxreg_hotplug_device_action {
+ MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION = 0,
+ MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION = 1,
+ MLXREG_HOTPLUG_DEVICE_NO_ACTION = 2,
+};
+
+/**
+ * struct mlxreg_core_hotplug_notifier - hotplug notifier block:
+ *
+ * @identity: notifier identity name;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ */
+struct mlxreg_core_hotplug_notifier {
+ char identity[MLXREG_CORE_LABEL_MAX_SIZE];
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
+};
+
+/**
* struct mlxreg_hotplug_device - I2C device data:
*
* @adapter: I2C device adapter;
* @client: I2C device client;
* @brdinfo: device board information;
* @nr: I2C device adapter number, to which device is to be attached;
+ * @pdev: platform device, if device is instantiated as a platform device;
+ * @action: action to be performed upon event receiving;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ * @notifier: pointer to event notifier block;
*
* Structure represents I2C hotplug device static data (board topology) and
* dynamic data (related kernel objects handles).
@@ -68,6 +103,11 @@ struct mlxreg_hotplug_device {
struct i2c_client *client;
struct i2c_board_info *brdinfo;
int nr;
+ struct platform_device *pdev;
+ enum mlxreg_hotplug_device_action action;
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
+ struct mlxreg_core_hotplug_notifier *notifier;
};
/**
@@ -79,12 +119,18 @@ struct mlxreg_hotplug_device {
* @bit: attribute effective bit;
* @capability: attribute capability register;
* @reg_prsnt: attribute presence register;
+ * @reg_sync: attribute synch register;
+ * @reg_pwr: attribute power register;
+ * @reg_ena: attribute enable register;
* @mode: access mode;
* @np - pointer to node platform associated with attribute;
* @hpdev - hotplug device data;
+ * @notifier: pointer to event notifier block;
* @health_cntr: dynamic device health indication counter;
* @attached: true if device has been attached after good health indication;
* @regnum: number of registers occupied by multi-register attribute;
+ * @slot: slot number, at which device is located;
+ * @secured: if set indicates that entry access is secured;
*/
struct mlxreg_core_data {
char label[MLXREG_CORE_LABEL_MAX_SIZE];
@@ -93,18 +139,25 @@ struct mlxreg_core_data {
u32 bit;
u32 capability;
u32 reg_prsnt;
+ u32 reg_sync;
+ u32 reg_pwr;
+ u32 reg_ena;
umode_t mode;
struct device_node *np;
struct mlxreg_hotplug_device hpdev;
+ struct mlxreg_core_hotplug_notifier *notifier;
u32 health_cntr;
bool attached;
u8 regnum;
+ u8 slot;
+ u8 secured;
};
/**
* struct mlxreg_core_item - same type components controlled by the driver:
*
* @data: component data;
+ * @kind: kind of hotplug attribute;
* @aggr_mask: group aggregation mask;
* @reg: group interrupt status register;
* @mask: group interrupt mask;
@@ -117,6 +170,7 @@ struct mlxreg_core_data {
*/
struct mlxreg_core_item {
struct mlxreg_core_data *data;
+ enum mlxreg_hotplug_kind kind;
u32 aggr_mask;
u32 reg;
u32 mask;
@@ -137,6 +191,7 @@ struct mlxreg_core_item {
* @features: supported features of device;
* @version: implementation version;
* @identity: device identity name;
+ * @capability: device capability register;
*/
struct mlxreg_core_platform_data {
struct mlxreg_core_data *data;
@@ -145,6 +200,7 @@ struct mlxreg_core_platform_data {
u32 features;
u32 version;
char identity[MLXREG_CORE_LABEL_MAX_SIZE];
+ u32 capability;
};
/**
@@ -160,6 +216,9 @@ struct mlxreg_core_platform_data {
* @mask_low: low aggregation interrupt common mask;
* @deferred_nr: I2C adapter number must be exist prior probing execution;
* @shift_nr: I2C adapter numbers must be incremented by this value;
+ * @addr: mapped resource address;
+ * @handle: handle to be passed by callback;
+ * @completion_notify: callback to notify when platform driver probing is done;
*/
struct mlxreg_core_hotplug_platform_data {
struct mlxreg_core_item *items;
@@ -172,6 +231,9 @@ struct mlxreg_core_hotplug_platform_data {
u32 mask_low;
int deferred_nr;
int shift_nr;
+ void __iomem *addr;
+ void *handle;
+ int (*completion_notify)(void *handle, int id);
};
#endif /* __LINUX_PLATFORM_DATA_MLXREG_H */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
deleted file mode 100644
index cba1184b364c..000000000000
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2010 Wolfram Sang <kernel@pengutronix.de>
- */
-
-#ifndef __ASM_ARCH_IMX_ESDHC_H
-#define __ASM_ARCH_IMX_ESDHC_H
-
-#include <linux/types.h>
-
-enum wp_types {
- ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
- ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
- ESDHC_WP_GPIO, /* external gpio pin for WP */
-};
-
-enum cd_types {
- ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
- ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
- ESDHC_CD_GPIO, /* external gpio pin for CD */
- ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
-};
-
-/**
- * struct esdhc_platform_data - platform data for esdhc on i.MX
- *
- * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
- *
- * @wp_type: type of write_protect method (see wp_types enum above)
- * @cd_type: type of card_detect method (see cd_types enum above)
- */
-
-struct esdhc_platform_data {
- enum wp_types wp_type;
- enum cd_types cd_type;
- int max_bus_width;
- unsigned int delay_line;
- unsigned int tuning_step; /* The delay cell steps in tuning procedure */
- unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
- unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */
-};
-#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index f0b8947e6b07..054d0c3c5ec5 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -20,8 +20,6 @@ struct omap_mmc_platform_data {
* maximum frequency on the MMC bus */
unsigned int max_freq;
- /* switch the bus to a new slot */
- int (*switch_slot)(struct device *dev, int slot);
/* initialize board-specific MMC functionality, can be NULL if
* not supported */
int (*init)(struct device *dev);
@@ -108,8 +106,7 @@ struct omap_mmc_platform_data {
const char *name;
u32 ocr_mask;
- /* Card detection IRQs */
- int card_detect_irq;
+ /* Card detection */
int (*card_detect)(struct device *dev, int slot);
unsigned int ban_openended:1;
diff --git a/include/linux/platform_data/mmc-s3cmci.h b/include/linux/platform_data/mmc-s3cmci.h
deleted file mode 100644
index 33310b11cbdd..000000000000
--- a/include/linux/platform_data/mmc-s3cmci.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ARCH_MCI_H
-#define _ARCH_MCI_H
-
-/**
- * struct s3c24xx_mci_pdata - sd/mmc controller platform data
- * @no_wprotect: Set this to indicate there is no write-protect switch.
- * @no_detect: Set this if there is no detect switch.
- * @wprotect_invert: Invert the default sense of the write protect switch.
- * @use_dma: Set to allow the use of DMA.
- * @gpio_detect: GPIO number for the card detect line.
- * @gpio_wprotect: GPIO number for the write protect line.
- * @ocr_avail: The mask of the available power states, non-zero to use.
- * @set_power: Callback to control the power mode.
- *
- * The @gpio_detect is used for card detection when @no_wprotect is unset,
- * and the default sense is that 0 returned from gpio_get_value() means
- * that a card is inserted. If @detect_invert is set, then the value from
- * gpio_get_value() is inverted, which makes 1 mean card inserted.
- *
- * The driver will use @gpio_wprotect to signal whether the card is write
- * protected if @no_wprotect is not set. A 0 returned from gpio_get_value()
- * means the card is read/write, and 1 means read-only. The @wprotect_invert
- * will invert the value returned from gpio_get_value().
- *
- * Card power is set by @ocr_availa, using MCC_VDD_ constants if it is set
- * to a non-zero value, otherwise the default of 3.2-3.4V is used.
- */
-struct s3c24xx_mci_pdata {
- unsigned int no_wprotect:1;
- unsigned int no_detect:1;
- unsigned int wprotect_invert:1;
- unsigned int use_dma:1;
-
- unsigned long ocr_avail;
- void (*set_power)(unsigned char power_mode,
- unsigned short vdd);
-};
-
-/**
- * s3c24xx_mci_set_platdata - set platform data for mmc/sdi device
- * @pdata: The platform data
- *
- * Copy the platform data supplied by @pdata so that this can be marked
- * __initdata.
- */
-extern void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata);
-
-#endif /* _ARCH_NCI_H */
diff --git a/include/linux/platform_data/mmp_audio.h b/include/linux/platform_data/mmp_audio.h
deleted file mode 100644
index 83428d8ee18d..000000000000
--- a/include/linux/platform_data/mmp_audio.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * MMP Platform AUDIO Management
- *
- * Copyright (c) 2011 Marvell Semiconductors Inc.
- */
-
-#ifndef MMP_AUDIO_H
-#define MMP_AUDIO_H
-
-struct mmp_audio_platdata {
- u32 period_max_capture;
- u32 buffer_max_capture;
- u32 period_max_playback;
- u32 buffer_max_playback;
-};
-
-#endif /* MMP_AUDIO_H */
diff --git a/include/linux/platform_data/mouse-pxa930_trkball.h b/include/linux/platform_data/mouse-pxa930_trkball.h
deleted file mode 100644
index ba0ac7a30d8c..000000000000
--- a/include/linux/platform_data/mouse-pxa930_trkball.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_PXA930_TRKBALL_H
-#define __ASM_ARCH_PXA930_TRKBALL_H
-
-struct pxa930_trkball_platform_data {
- int x_filter;
- int y_filter;
-};
-
-#endif /* __ASM_ARCH_PXA930_TRKBALL_H */
-
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
index 03e92c71b3fa..dd474dd44848 100644
--- a/include/linux/platform_data/mtd-davinci.h
+++ b/include/linux/platform_data/mtd-davinci.h
@@ -60,15 +60,16 @@ struct davinci_nand_pdata { /* platform_data */
struct mtd_partition *parts;
unsigned nr_parts;
- /* none == NAND_ECC_NONE (strongly *not* advised!!)
- * soft == NAND_ECC_SOFT
- * else == NAND_ECC_HW, according to ecc_bits
+ /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
+ * soft == NAND_ECC_ENGINE_TYPE_SOFT
+ * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
- enum nand_ecc_mode ecc_mode;
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement ecc_placement;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
diff --git a/include/linux/platform_data/mtd-mxc_nand.h b/include/linux/platform_data/mtd-mxc_nand.h
deleted file mode 100644
index d1230030c6db..000000000000
--- a/include/linux/platform_data/mtd-mxc_nand.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
- */
-
-#ifndef __ASM_ARCH_NAND_H
-#define __ASM_ARCH_NAND_H
-
-#include <linux/mtd/partitions.h>
-
-struct mxc_nand_platform_data {
- unsigned int width; /* data bus width in bytes */
- unsigned int hw_ecc:1; /* 0 if suppress hardware ECC */
- unsigned int flash_bbt:1; /* set to 1 to use a flash based bbt */
- struct mtd_partition *parts; /* partition table */
- int nr_parts; /* size of parts */
-};
-#endif /* __ASM_ARCH_NAND_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index de6ada739121..8c2f1f185353 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -7,6 +7,7 @@
#define _MTD_NAND_OMAP2_H
#include <linux/mtd/partitions.h>
+#include <linux/mod_devicetable.h>
#define GPMC_BCH_NUM_REMAINDER 8
@@ -61,4 +62,11 @@ struct gpmc_nand_regs {
void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
};
-#endif
+
+static const struct of_device_id omap_nand_ids[] = {
+ { .compatible = "ti,omap2-nand", },
+ { .compatible = "ti,am64-nand", },
+ {},
+};
+
+#endif /* _MTD_NAND_OMAP2_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index 08675b16f9e1..25390fc3e795 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -49,7 +49,7 @@ struct s3c2410_platform_nand {
unsigned int ignore_unset_ecc:1;
- enum nand_ecc_mode ecc_mode;
+ enum nand_ecc_engine_type engine_type;
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h
index c510734405bb..89d0ec6f7d46 100644
--- a/include/linux/platform_data/net-cw1200.h
+++ b/include/linux/platform_data/net-cw1200.h
@@ -14,8 +14,6 @@ struct cw1200_platform_data_spi {
/* All others are optional */
bool have_5ghz;
- int reset; /* GPIO to RSTn signal (0 disables) */
- int powerup; /* GPIO to POWERUP signal (0 disables) */
int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata,
bool enable); /* Control 3v3 / 1v8 supply */
int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata,
@@ -30,8 +28,6 @@ struct cw1200_platform_data_sdio {
/* All others are optional */
bool have_5ghz;
bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */
- int reset; /* GPIO to RSTn signal (0 disables) */
- int powerup; /* GPIO to POWERUP signal (0 disables) */
int irq; /* IRQ line or 0 to use SDIO IRQ */
int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata,
bool enable); /* Control 3v3 / 1v8 supply */
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h
deleted file mode 100644
index 9e75ac8d19be..000000000000
--- a/include/linux/platform_data/nfcmrvl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- */
-
-#ifndef _NFCMRVL_PTF_H_
-#define _NFCMRVL_PTF_H_
-
-struct nfcmrvl_platform_data {
- /*
- * Generic
- */
-
- /* GPIO that is wired to RESET_N signal */
- int reset_n_io;
- /* Tell if transport is muxed in HCI one */
- unsigned int hci_muxed;
-
- /*
- * UART specific
- */
-
- /* Tell if UART needs flow control at init */
- unsigned int flow_control;
- /* Tell if firmware supports break control for power management */
- unsigned int break_control;
-
-
- /*
- * I2C specific
- */
-
- unsigned int irq;
- unsigned int irq_polarity;
-};
-
-#endif /* _NFCMRVL_PTF_H_ */
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
deleted file mode 100644
index b324d03e580c..000000000000
--- a/include/linux/platform_data/ntc_thermistor.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * ntc_thermistor.h - NTC Thermistors
- *
- * Copyright (C) 2010 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- */
-#ifndef _LINUX_NTC_H
-#define _LINUX_NTC_H
-
-struct iio_channel;
-
-enum ntc_thermistor_type {
- TYPE_B57330V2103,
- TYPE_B57891S0103,
- TYPE_NCPXXWB473,
- TYPE_NCPXXWF104,
- TYPE_NCPXXWL333,
- TYPE_NCPXXXH103,
-};
-
-struct ntc_thermistor_platform_data {
- /*
- * One (not both) of read_uV and read_ohm should be provided and only
- * one of the two should be provided.
- * Both functions should return negative value for an error case.
- *
- * pullup_uV, pullup_ohm, pulldown_ohm, and connect are required to use
- * read_uV()
- *
- * How to setup pullup_ohm, pulldown_ohm, and connect is
- * described at Documentation/hwmon/ntc_thermistor.rst
- *
- * pullup/down_ohm: 0 for infinite / not-connected
- *
- * chan: iio_channel pointer to communicate with the ADC which the
- * thermistor is using for conversion of the analog values.
- */
- int (*read_uv)(struct ntc_thermistor_platform_data *);
- unsigned int pullup_uv;
-
- unsigned int pullup_ohm;
- unsigned int pulldown_ohm;
- enum { NTC_CONNECTED_POSITIVE, NTC_CONNECTED_GROUND } connect;
- struct iio_channel *chan;
-
- int (*read_ohm)(void);
-};
-
-#endif /* _LINUX_NTC_H */
diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h
index 0dd851ea1c72..7fcb55fe21c9 100644
--- a/include/linux/platform_data/omap-twl4030.h
+++ b/include/linux/platform_data/omap-twl4030.h
@@ -37,9 +37,6 @@ struct omap_tw4030_pdata {
bool has_digimic0;
bool has_digimic1;
u8 has_linein;
-
- /* Jack detect GPIO or <= 0 if it is not implemented */
- int jack_detect;
};
#endif /* _OMAP_TWL4030_H_ */
diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h
index 4eb53e023997..3c3787c4d96c 100644
--- a/include/linux/platform_data/pca953x.h
+++ b/include/linux/platform_data/pca953x.h
@@ -11,21 +11,8 @@ struct pca953x_platform_data {
/* number of the first GPIO */
unsigned gpio_base;
- /* initial polarity inversion setting */
- u32 invert;
-
/* interrupt base */
int irq_base;
-
- void *context; /* param to setup/teardown */
-
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- const char *const *names;
};
#endif /* _LINUX_PCA953X_H */
diff --git a/include/linux/platform_data/pcf857x.h b/include/linux/platform_data/pcf857x.h
deleted file mode 100644
index 11d4ed78c7f4..000000000000
--- a/include/linux/platform_data/pcf857x.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_PCF857X_H
-#define __LINUX_PCF857X_H
-
-/**
- * struct pcf857x_platform_data - data to set up pcf857x driver
- * @gpio_base: number of the chip's first GPIO
- * @n_latch: optional bit-inverse of initial register value; if
- * you leave this initialized to zero the driver will act
- * like the chip was just reset
- * @setup: optional callback issued once the GPIOs are valid
- * @teardown: optional callback issued before the GPIOs are invalidated
- * @context: optional parameter passed to setup() and teardown()
- *
- * In addition to the I2C_BOARD_INFO() state appropriate to each chip,
- * the i2c_board_info used with the pcf875x driver must provide its
- * platform_data (pointer to one of these structures) with at least
- * the gpio_base value initialized.
- *
- * The @setup callback may be used with the kind of board-specific glue
- * which hands the (now-valid) GPIOs to other drivers, or which puts
- * devices in their initial states using these GPIOs.
- *
- * These GPIO chips are only "quasi-bidirectional"; read the chip specs
- * to understand the behavior. They don't have separate registers to
- * record which pins are used for input or output, record which output
- * values are driven, or provide access to input values. That must be
- * inferred by reading the chip's value and knowing the last value written
- * to it. If you leave n_latch initialized to zero, that last written
- * value is presumed to be all ones (as if the chip were just reset).
- */
-struct pcf857x_platform_data {
- unsigned gpio_base;
- unsigned n_latch;
-
- int (*setup)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- void *context;
-};
-
-#endif /* __LINUX_PCF857X_H */
diff --git a/include/linux/platform_data/pcmcia-pxa2xx_viper.h b/include/linux/platform_data/pcmcia-pxa2xx_viper.h
deleted file mode 100644
index a23b58aff9e1..000000000000
--- a/include/linux/platform_data/pcmcia-pxa2xx_viper.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCOM_PCMCIA_H
-#define __ARCOM_PCMCIA_H
-
-struct arcom_pcmcia_pdata {
- int cd_gpio;
- int rdy_gpio;
- int pwr_gpio;
- void (*reset)(int state);
-};
-
-#endif
diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h
index 644af1d89cfa..7037ba7a53ca 100644
--- a/include/linux/platform_data/pm33xx.h
+++ b/include/linux/platform_data/pm33xx.h
@@ -54,11 +54,8 @@ struct am33xx_pm_platform_data {
void (*begin_suspend)(void);
void (*finish_suspend)(void);
struct am33xx_pm_sram_addr *(*get_sram_addrs)(void);
- void __iomem *(*get_rtc_base_addr)(void);
void (*save_context)(void);
void (*restore_context)(void);
- void (*prepare_rtc_suspend)(void);
- void (*prepare_rtc_resume)(void);
int (*check_off_mode_enable)(void);
};
diff --git a/include/linux/platform_data/pxa2xx_udc.h b/include/linux/platform_data/pxa2xx_udc.h
index ff9c35dca59d..bc99cc6a3c5f 100644
--- a/include/linux/platform_data/pxa2xx_udc.h
+++ b/include/linux/platform_data/pxa2xx_udc.h
@@ -25,4 +25,10 @@ struct pxa2xx_udc_mach_info {
int gpio_pullup; /* high == pullup activated */
};
+#ifdef CONFIG_PXA27x
+extern void pxa27x_clear_otgph(void);
+#else
+#define pxa27x_clear_otgph() do {} while (0)
+#endif
+
#endif
diff --git a/include/linux/platform_data/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h
deleted file mode 100644
index 22c53825528f..000000000000
--- a/include/linux/platform_data/rtc-ds2404.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * ds2404.h - platform data structure for the DS2404 RTC.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
- */
-
-#ifndef __LINUX_DS2404_H
-#define __LINUX_DS2404_H
-
-struct ds2404_platform_data {
-
- unsigned int gpio_rst;
- unsigned int gpio_clk;
- unsigned int gpio_dq;
-};
-#endif
diff --git a/include/linux/platform_data/rtc-v3020.h b/include/linux/platform_data/rtc-v3020.h
deleted file mode 100644
index e55d82cebf80..000000000000
--- a/include/linux/platform_data/rtc-v3020.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * v3020.h - Registers definition and platform data structure for the v3020 RTC.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006, 8D Technologies inc.
- */
-#ifndef __LINUX_V3020_H
-#define __LINUX_V3020_H
-
-/* The v3020 has only one data pin but which one
- * is used depends on the board. */
-struct v3020_platform_data {
- int leftshift; /* (1<<(leftshift)) & readl() */
-
- unsigned int use_gpio:1;
- unsigned int gpio_cs;
- unsigned int gpio_wr;
- unsigned int gpio_rd;
- unsigned int gpio_io;
-};
-
-#define V3020_STATUS_0 0x00
-#define V3020_STATUS_1 0x01
-#define V3020_SECONDS 0x02
-#define V3020_MINUTES 0x03
-#define V3020_HOURS 0x04
-#define V3020_MONTH_DAY 0x05
-#define V3020_MONTH 0x06
-#define V3020_YEAR 0x07
-#define V3020_WEEK_DAY 0x08
-#define V3020_WEEK 0x09
-
-#define V3020_IS_COMMAND(val) ((val)>=0x0E)
-
-#define V3020_CMD_RAM2CLOCK 0x0E
-#define V3020_CMD_CLOCK2RAM 0x0F
-
-#endif /* __LINUX_V3020_H */
diff --git a/include/linux/platform_data/s3c-hsudc.h b/include/linux/platform_data/s3c-hsudc.h
deleted file mode 100644
index 4dc9b8760166..000000000000
--- a/include/linux/platform_data/s3c-hsudc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * S3C24XX USB 2.0 High-speed USB controller gadget driver
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
- * Each endpoint can be configured as either in or out endpoint. Endpoints
- * can be configured for Bulk or Interrupt transfer mode.
-*/
-
-#ifndef __LINUX_USB_S3C_HSUDC_H
-#define __LINUX_USB_S3C_HSUDC_H
-
-/**
- * s3c24xx_hsudc_platdata - Platform data for USB High-Speed gadget controller.
- * @epnum: Number of endpoints to be instantiated by the controller driver.
- * @gpio_init: Platform specific USB related GPIO initialization.
- * @gpio_uninit: Platform specific USB releted GPIO uninitialzation.
- *
- * Representation of platform data for the S3C24XX USB 2.0 High Speed gadget
- * controllers.
- */
-struct s3c24xx_hsudc_platdata {
- unsigned int epnum;
- void (*gpio_init)(void);
- void (*gpio_uninit)(void);
-};
-
-#endif /* __LINUX_USB_S3C_HSUDC_H */
diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h
deleted file mode 100644
index 0844b21372c7..000000000000
--- a/include/linux/platform_data/serial-imx.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
- */
-
-#ifndef ASMARM_ARCH_UART_H
-#define ASMARM_ARCH_UART_H
-
-#define IMXUART_HAVE_RTSCTS (1<<0)
-
-struct imxuart_platform_data {
- unsigned int flags;
-};
-
-#endif
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/platform_data/sh_mmcif.h
index e25533b95d9f..6eb914f958f9 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/platform_data/sh_mmcif.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/linux/mmc/sh_mmcif.h
- *
* platform data for eMMC driver
*
* Copyright (C) 2010 Renesas Solutions Corp.
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index fe815d7d9f58..6c19d4fbbe39 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -10,9 +10,7 @@
#ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__
-#include <linux/kernel.h>
-
-#include <drm/drm_mode.h>
+#include <video/videomode.h>
enum shmob_drm_clk_source {
SHMOB_DRM_CLK_BUS,
@@ -20,72 +18,21 @@ enum shmob_drm_clk_source {
SHMOB_DRM_CLK_EXTERNAL,
};
-enum shmob_drm_interface {
- SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */
- SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */
- SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */
- SHMOB_DRM_IFACE_RGB12B, /* 12bpp */
- SHMOB_DRM_IFACE_RGB16, /* 16bpp */
- SHMOB_DRM_IFACE_RGB18, /* 18bpp */
- SHMOB_DRM_IFACE_RGB24, /* 24bpp */
- SHMOB_DRM_IFACE_YUV422, /* 16bpp */
- SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */
- SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */
- SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */
- SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */
- SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */
- SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */
- SHMOB_DRM_IFACE_SYS16A, /* 16bpp */
- SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */
- SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */
- SHMOB_DRM_IFACE_SYS18, /* 18bpp */
- SHMOB_DRM_IFACE_SYS24, /* 24bpp */
-};
-
-struct shmob_drm_backlight_data {
- const char *name;
- int max_brightness;
- int (*get_brightness)(void);
- int (*set_brightness)(int brightness);
-};
-
struct shmob_drm_panel_data {
unsigned int width_mm; /* Panel width in mm */
unsigned int height_mm; /* Panel height in mm */
- struct drm_mode_modeinfo mode;
-};
-
-struct shmob_drm_sys_interface_data {
- unsigned int read_latch:6;
- unsigned int read_setup:8;
- unsigned int read_cycle:8;
- unsigned int read_strobe:8;
- unsigned int write_setup:8;
- unsigned int write_cycle:8;
- unsigned int write_strobe:8;
- unsigned int cs_setup:3;
- unsigned int vsync_active_high:1;
- unsigned int vsync_dir_input:1;
+ struct videomode mode;
};
-#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */
-#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */
-#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */
-#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
-#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */
-
struct shmob_drm_interface_data {
- enum shmob_drm_interface interface;
- struct shmob_drm_sys_interface_data sys;
+ unsigned int bus_fmt; /* MEDIA_BUS_FMT_* */
unsigned int clk_div;
- unsigned int flags;
};
struct shmob_drm_platform_data {
enum shmob_drm_clk_source clk_source;
struct shmob_drm_interface_data iface;
struct shmob_drm_panel_data panel;
- struct shmob_drm_backlight_data backlight;
};
#endif /* __SHMOB_DRM_H__ */
diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h
deleted file mode 100644
index 14680d2a98f7..000000000000
--- a/include/linux/platform_data/sht3x.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2016 Sensirion AG, Switzerland
- * Author: David Frey <david.frey@sensirion.com>
- * Author: Pascal Sachs <pascal.sachs@sensirion.com>
- */
-
-#ifndef __SHT3X_H_
-#define __SHT3X_H_
-
-struct sht3x_platform_data {
- bool blocking_io;
- bool high_precision;
-};
-#endif /* __SHT3X_H_ */
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index c71a2dd66143..5f412a615532 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -105,10 +105,12 @@ struct si5351_clkout_config {
* @clk_xtal: xtal input clock
* @clk_clkin: clkin input clock
* @pll_src: array of pll source clock setting
+ * @pll_reset: array indicating if plls should be reset after setting the rate
* @clkout: array of clkout configuration
*/
struct si5351_platform_data {
enum si5351_pll_src pll_src[2];
+ bool pll_reset[2];
struct si5351_clkout_config clkout[8];
};
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
index ca8337695c2a..4f94d52ac99f 100644
--- a/include/linux/platform_data/simplefb.h
+++ b/include/linux/platform_data/simplefb.h
@@ -16,11 +16,13 @@
#define SIMPLEFB_FORMATS \
{ \
{ "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \
+ { "r5g5b5a1", 16, {11, 5}, {6, 5}, {1, 5}, {0, 1}, DRM_FORMAT_RGBA5551 }, \
{ "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \
{ "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \
{ "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \
{ "x8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_XRGB8888 }, \
{ "a8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {24, 8}, DRM_FORMAT_ARGB8888 }, \
+ { "x8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {0, 0}, DRM_FORMAT_XBGR8888 }, \
{ "a8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {24, 8}, DRM_FORMAT_ABGR8888 }, \
{ "x2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {0, 0}, DRM_FORMAT_XRGB2101010 }, \
{ "a2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {30, 2}, DRM_FORMAT_ARGB2101010 }, \
diff --git a/include/linux/platform_data/spi-ath79.h b/include/linux/platform_data/spi-ath79.h
deleted file mode 100644
index 81a388ff58cc..000000000000
--- a/include/linux/platform_data/spi-ath79.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#ifndef _ATH79_SPI_PLATFORM_H
-#define _ATH79_SPI_PLATFORM_H
-
-struct ath79_spi_platform_data {
- unsigned bus_num;
- unsigned num_chipselect;
-};
-
-#endif /* _ATH79_SPI_PLATFORM_H */
diff --git a/include/linux/platform_data/spi-clps711x.h b/include/linux/platform_data/spi-clps711x.h
deleted file mode 100644
index efaa596848c9..000000000000
--- a/include/linux/platform_data/spi-clps711x.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CLPS711X SPI bus driver definitions
- *
- * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
- */
-
-#ifndef ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
-#define ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
-
-/* Board specific platform_data */
-struct spi_clps711x_pdata {
- int *chipselect; /* Array of GPIO-numbers */
- int num_chipselect; /* Total count of GPIOs */
-};
-
-#endif
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
index 65fd5ffd257c..f0db674f07b8 100644
--- a/include/linux/platform_data/spi-mt65xx.h
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -12,5 +12,6 @@
/* Board specific platform_data */
struct mtk_chip_config {
u32 sample_sel;
+ u32 tick_delay;
};
#endif
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index 773daf7915a3..1d6e6c424fc6 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -16,7 +16,6 @@ struct platform_device;
* struct s3c64xx_spi_csinfo - ChipSelect description
* @fb_delay: Slave specific feedback delay.
* Refer to FB_CLK_SEL register definition in SPI chapter.
- * @line: Custom 'identity' of the CS line.
*
* This is per SPI-Slave Chipselect information.
* Allocate and initialize one in machine init code and make the
@@ -24,45 +23,36 @@ struct platform_device;
*/
struct s3c64xx_spi_csinfo {
u8 fb_delay;
- unsigned line;
};
/**
* struct s3c64xx_spi_info - SPI Controller defining structure
* @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field.
* @num_cs: Number of CS this controller emulates.
+ * @no_cs: Used when CS line is not connected.
* @cfg_gpio: Configure pins for this SPI controller.
*/
struct s3c64xx_spi_info {
int src_clk_nr;
int num_cs;
bool no_cs;
+ bool polling;
int (*cfg_gpio)(void);
};
/**
* s3c64xx_spi_set_platdata - SPI Controller configure callback by the board
* initialization code.
- * @cfg_gpio: Pointer to gpio setup function.
* @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks.
* @num_cs: Number of elements in the 'cs' array.
*
* Call this from machine init code for each SPI Controller that
* has some chips attached to it.
*/
-extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
-extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
-extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
+extern void s3c64xx_spi0_set_platdata(int src_clk_nr, int num_cs);
/* defined by architecture to configure gpio */
extern int s3c64xx_spi0_cfg_gpio(void);
-extern int s3c64xx_spi1_cfg_gpio(void);
-extern int s3c64xx_spi2_cfg_gpio(void);
extern struct s3c64xx_spi_info s3c64xx_spi0_pdata;
-extern struct s3c64xx_spi_info s3c64xx_spi1_pdata;
-extern struct s3c64xx_spi_info s3c64xx_spi2_pdata;
#endif /*__SPI_S3C64XX_H */
diff --git a/include/linux/platform_data/ssm2518.h b/include/linux/platform_data/ssm2518.h
deleted file mode 100644
index 3f9e632d6f63..000000000000
--- a/include/linux/platform_data/ssm2518.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SSM2518 amplifier audio driver
- *
- * Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_SSM2518_H__
-#define __LINUX_PLATFORM_DATA_SSM2518_H__
-
-/**
- * struct ssm2518_platform_data - Platform data for the ssm2518 driver
- * @enable_gpio: GPIO connected to the nSD pin. Set to -1 if the nSD pin is
- * hardwired.
- */
-struct ssm2518_platform_data {
- int enable_gpio;
-};
-
-#endif
diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h
deleted file mode 100644
index 61db674f36cc..000000000000
--- a/include/linux/platform_data/st33zp24.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24
- * Copyright (C) 2009 - 2016 STMicroelectronics
- */
-#ifndef __ST33ZP24_H__
-#define __ST33ZP24_H__
-
-#define TPM_ST33_I2C "st33zp24-i2c"
-#define TPM_ST33_SPI "st33zp24-spi"
-
-struct st33zp24_platform_data {
- int io_lpcpd;
-};
-
-#endif /* __ST33ZP24_H__ */
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index e40b28ca892e..a657830232ae 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -13,8 +13,9 @@
/**
* struct st_sensors_platform_data - Platform data for the ST sensors
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
- * Available only for accelerometer and pressure sensors.
+ * Available only for accelerometer, magnetometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ * Magnetometer DRDY is supported only on LSM9DS0 and LSM303D.
* @open_drain: set the interrupt line to be open drain if possible.
* @spi_3wire: enable spi-3wire mode.
* @pullups: enable/disable i2c controller pullup resistors.
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index c59999ce044e..eb556f988d57 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -50,6 +50,12 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_MODULE_QUIRK_OTG BIT(30)
+#define SYSC_QUIRK_RESET_ON_CTX_LOST BIT(29)
+#define SYSC_QUIRK_REINIT_ON_CTX_LOST BIT(28)
+#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
+#define SYSC_QUIRK_GPMC_DEBUG BIT(26)
+#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
#define SYSC_MODULE_QUIRK_PRUSS BIT(24)
#define SYSC_MODULE_QUIRK_DSS_RESET BIT(23)
#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22)
diff --git a/include/linux/platform_data/timer-ixp4xx.h b/include/linux/platform_data/timer-ixp4xx.h
deleted file mode 100644
index ee92ae7edaed..000000000000
--- a/include/linux/platform_data/timer-ixp4xx.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TIMER_IXP4XX_H
-#define __TIMER_IXP4XX_H
-
-#include <linux/ioport.h>
-
-void __init ixp4xx_timer_setup(resource_size_t timerbase,
- int timer_irq,
- unsigned int timer_freq);
-
-#endif
diff --git a/include/linux/platform_data/tps68470.h b/include/linux/platform_data/tps68470.h
new file mode 100644
index 000000000000..e605a2cab07f
--- /dev/null
+++ b/include/linux/platform_data/tps68470.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#ifndef __PDATA_TPS68470_H
+#define __PDATA_TPS68470_H
+
+enum tps68470_regulators {
+ TPS68470_CORE,
+ TPS68470_ANA,
+ TPS68470_VCM,
+ TPS68470_VIO,
+ TPS68470_VSIO,
+ TPS68470_AUX1,
+ TPS68470_AUX2,
+ TPS68470_NUM_REGULATORS
+};
+
+struct regulator_init_data;
+
+struct tps68470_regulator_platform_data {
+ const struct regulator_init_data *reg_init_data[TPS68470_NUM_REGULATORS];
+};
+
+struct tps68470_clk_consumer {
+ const char *consumer_dev_name;
+ const char *consumer_con_id;
+};
+
+struct tps68470_clk_platform_data {
+ unsigned int n_consumers;
+ struct tps68470_clk_consumer consumers[];
+};
+
+#endif
diff --git a/include/linux/platform_data/tsl2563.h b/include/linux/platform_data/tsl2563.h
deleted file mode 100644
index 9cf9309c3f24..000000000000
--- a/include/linux/platform_data/tsl2563.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_TSL2563_H
-#define __LINUX_TSL2563_H
-
-struct tsl2563_platform_data {
- int cover_comp_gain;
-};
-
-#endif /* __LINUX_TSL2563_H */
diff --git a/include/linux/platform_data/uio_dmem_genirq.h b/include/linux/platform_data/uio_dmem_genirq.h
index 973c1bb32168..c8f6de685306 100644
--- a/include/linux/platform_data/uio_dmem_genirq.h
+++ b/include/linux/platform_data/uio_dmem_genirq.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/platform_data/uio_dmem_genirq.h
*
* Copyright (C) 2012 Damian Hobson-Garcia
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _UIO_DMEM_GENIRQ_H
diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h
index 31f2e22661bc..f76fa393b802 100644
--- a/include/linux/platform_data/uio_pruss.h
+++ b/include/linux/platform_data/uio_pruss.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/platform_data/uio_pruss.h
*
* Platform data for uio_pruss driver
*
* Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _UIO_PRUSS_H_
diff --git a/include/linux/platform_data/usb-ehci-mxc.h b/include/linux/platform_data/usb-ehci-mxc.h
deleted file mode 100644
index ad9794d09bc8..000000000000
--- a/include/linux/platform_data/usb-ehci-mxc.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H
-#define __INCLUDE_ASM_ARCH_MXC_EHCI_H
-
-struct mxc_usbh_platform_data {
- int (*init)(struct platform_device *pdev);
- int (*exit)(struct platform_device *pdev);
-
- unsigned int portsc;
- struct usb_phy *otg;
-};
-
-#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
-
diff --git a/include/linux/platform_data/usb-mx2.h b/include/linux/platform_data/usb-mx2.h
deleted file mode 100644
index 97a670f3d8fb..000000000000
--- a/include/linux/platform_data/usb-mx2.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 Martin Fuzzey <mfuzzey@gmail.com>
- */
-
-#ifndef __ASM_ARCH_MX21_USBH
-#define __ASM_ARCH_MX21_USBH
-
-enum mx21_usbh_xcvr {
- /* Values below as used by hardware (HWMODE register) */
- MX21_USBXCVR_TXDIF_RXDIF = 0,
- MX21_USBXCVR_TXDIF_RXSE = 1,
- MX21_USBXCVR_TXSE_RXDIF = 2,
- MX21_USBXCVR_TXSE_RXSE = 3,
-};
-
-struct mx21_usbh_platform_data {
- enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */
- enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */
- u16 enable_host1:1,
- enable_host2:1,
- enable_otg_host:1, /* enable "OTG" port (as host) */
- host1_xcverless:1, /* traceiverless host1 port */
- host1_txenoe:1, /* output enable host1 transmit enable */
- otg_ext_xcvr:1, /* external tranceiver for OTG port */
- unused:10;
-};
-
-#endif /* __ASM_ARCH_MX21_USBH */
diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h
index 5e70d667031c..580978e468f8 100644
--- a/include/linux/platform_data/usb-omap.h
+++ b/include/linux/platform_data/usb-omap.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* usb-omap.h - Platform data for the various OMAP USB IPs
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
- *
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#define OMAP3_HS_USB_PORTS 3
diff --git a/include/linux/platform_data/usb-omap1.h b/include/linux/platform_data/usb-omap1.h
index 43b5ce139c37..e7b8dc92a269 100644
--- a/include/linux/platform_data/usb-omap1.h
+++ b/include/linux/platform_data/usb-omap1.h
@@ -48,6 +48,10 @@ struct omap_usb_config {
u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
int (*ocpi_enable)(void);
+
+ void (*lb_reset)(void);
+
+ int (*transceiver_power)(int on);
};
#endif /* __LINUX_USB_OMAP1_H */
diff --git a/include/linux/platform_data/usb-pxa3xx-ulpi.h b/include/linux/platform_data/usb-pxa3xx-ulpi.h
deleted file mode 100644
index 4d31a5cbdeb1..000000000000
--- a/include/linux/platform_data/usb-pxa3xx-ulpi.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * PXA3xx U2D header
- *
- * Copyright (C) 2010 CompuLab Ltd.
- *
- * Igor Grinberg <grinberg@compulab.co.il>
- */
-#ifndef __PXA310_U2D__
-#define __PXA310_U2D__
-
-#include <linux/usb/ulpi.h>
-
-struct pxa3xx_u2d_platform_data {
-
-#define ULPI_SER_6PIN (1 << 0)
-#define ULPI_SER_3PIN (1 << 1)
- unsigned int ulpi_mode;
-
- int (*init)(struct device *);
- void (*exit)(struct device *);
-};
-
-
-/* Start PXA3xx U2D host */
-int pxa3xx_u2d_start_hc(struct usb_bus *host);
-/* Stop PXA3xx U2D host */
-void pxa3xx_u2d_stop_hc(struct usb_bus *host);
-
-extern void pxa3xx_set_u2d_info(struct pxa3xx_u2d_platform_data *info);
-
-#endif /* __PXA310_U2D__ */
diff --git a/include/linux/platform_data/usb-s3c2410_udc.h b/include/linux/platform_data/usb-s3c2410_udc.h
deleted file mode 100644
index 07394819d03b..000000000000
--- a/include/linux/platform_data/usb-s3c2410_udc.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* arch/arm/plat-samsung/include/plat/udc.h
- *
- * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
- *
- * Changelog:
- * 14-Mar-2005 RTP Created file
- * 02-Aug-2005 RTP File rename
- * 07-Sep-2005 BJD Minor cleanups, changed cmd to enum
- * 18-Jan-2007 HMW Add per-platform vbus_draw function
-*/
-
-#ifndef __ASM_ARM_ARCH_UDC_H
-#define __ASM_ARM_ARCH_UDC_H
-
-enum s3c2410_udc_cmd_e {
- S3C2410_UDC_P_ENABLE = 1, /* Pull-up enable */
- S3C2410_UDC_P_DISABLE = 2, /* Pull-up disable */
- S3C2410_UDC_P_RESET = 3, /* UDC reset, in case of */
-};
-
-struct s3c2410_udc_mach_info {
- void (*udc_command)(enum s3c2410_udc_cmd_e);
- void (*vbus_draw)(unsigned int ma);
-
- unsigned int pullup_pin;
- unsigned int pullup_pin_inverted;
-
- unsigned int vbus_pin;
- unsigned char vbus_pin_inverted;
-};
-
-extern void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *);
-
-struct s3c24xx_hsudc_platdata;
-
-extern void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd);
-
-#endif /* __ASM_ARM_ARCH_UDC_H */
diff --git a/include/linux/platform_data/usb3503.h b/include/linux/platform_data/usb3503.h
index d01ef97ddf36..f3c942f396f8 100644
--- a/include/linux/platform_data/usb3503.h
+++ b/include/linux/platform_data/usb3503.h
@@ -12,6 +12,7 @@ enum usb3503_mode {
USB3503_MODE_UNKNOWN,
USB3503_MODE_HUB,
USB3503_MODE_STANDBY,
+ USB3503_MODE_BYPASS,
};
struct usb3503_platform_data {
diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h
deleted file mode 100644
index de6a4ad41e76..000000000000
--- a/include/linux/platform_data/ux500_wdt.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST Ericsson SA 2011
- *
- * STE Ux500 Watchdog platform data
- */
-#ifndef __UX500_WDT_H
-#define __UX500_WDT_H
-
-/**
- * struct ux500_wdt_data
- */
-struct ux500_wdt_data {
- unsigned int timeout;
- bool has_28_bits_resolution;
-};
-
-#endif /* __UX500_WDT_H */
diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h
deleted file mode 100644
index 02812651af7d..000000000000
--- a/include/linux/platform_data/video-imxfb.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This structure describes the machine which we are running on.
- */
-#ifndef __MACH_IMXFB_H__
-#define __MACH_IMXFB_H__
-
-#include <linux/fb.h>
-
-#define PCR_TFT (1 << 31)
-#define PCR_COLOR (1 << 30)
-#define PCR_PBSIZ_1 (0 << 28)
-#define PCR_PBSIZ_2 (1 << 28)
-#define PCR_PBSIZ_4 (2 << 28)
-#define PCR_PBSIZ_8 (3 << 28)
-#define PCR_BPIX_1 (0 << 25)
-#define PCR_BPIX_2 (1 << 25)
-#define PCR_BPIX_4 (2 << 25)
-#define PCR_BPIX_8 (3 << 25)
-#define PCR_BPIX_12 (4 << 25)
-#define PCR_BPIX_16 (5 << 25)
-#define PCR_BPIX_18 (6 << 25)
-#define PCR_PIXPOL (1 << 24)
-#define PCR_FLMPOL (1 << 23)
-#define PCR_LPPOL (1 << 22)
-#define PCR_CLKPOL (1 << 21)
-#define PCR_OEPOL (1 << 20)
-#define PCR_SCLKIDLE (1 << 19)
-#define PCR_END_SEL (1 << 18)
-#define PCR_END_BYTE_SWAP (1 << 17)
-#define PCR_REV_VS (1 << 16)
-#define PCR_ACD_SEL (1 << 15)
-#define PCR_ACD(x) (((x) & 0x7f) << 8)
-#define PCR_SCLK_SEL (1 << 7)
-#define PCR_SHARP (1 << 6)
-#define PCR_PCD(x) ((x) & 0x3f)
-
-#define PWMR_CLS(x) (((x) & 0x1ff) << 16)
-#define PWMR_LDMSK (1 << 15)
-#define PWMR_SCR1 (1 << 10)
-#define PWMR_SCR0 (1 << 9)
-#define PWMR_CC_EN (1 << 8)
-#define PWMR_PW(x) ((x) & 0xff)
-
-#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26)
-#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16)
-#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
-#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
-#define LSCR1_GRAY1(x) (((x) & 0xf))
-
-struct imx_fb_videomode {
- struct fb_videomode mode;
- u32 pcr;
- bool aus_mode;
- unsigned char bpp;
-};
-
-struct imx_fb_platform_data {
- struct imx_fb_videomode *mode;
- int num_modes;
-
- u_int pwmr;
- u_int lscr1;
- u_int dmacr;
-
- int (*init)(struct platform_device *);
- void (*exit)(struct platform_device *);
-};
-
-#endif /* ifndef __MACH_IMXFB_H__ */
diff --git a/include/linux/platform_data/video-mx3fb.h b/include/linux/platform_data/video-mx3fb.h
deleted file mode 100644
index d03dc322a616..000000000000
--- a/include/linux/platform_data/video-mx3fb.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#ifndef __ASM_ARCH_MX3FB_H__
-#define __ASM_ARCH_MX3FB_H__
-
-#include <linux/device.h>
-#include <linux/fb.h>
-
-/* Proprietary FB_SYNC_ flags */
-#define FB_SYNC_OE_ACT_HIGH 0x80000000
-#define FB_SYNC_CLK_INVERT 0x40000000
-#define FB_SYNC_DATA_INVERT 0x20000000
-#define FB_SYNC_CLK_IDLE_EN 0x10000000
-#define FB_SYNC_SHARP_MODE 0x08000000
-#define FB_SYNC_SWAP_RGB 0x04000000
-#define FB_SYNC_CLK_SEL_EN 0x02000000
-
-/*
- * Specify the way your display is connected. The IPU can arbitrarily
- * map the internal colors to the external data lines. We only support
- * the following mappings at the moment.
- */
-enum disp_data_mapping {
- /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
- IPU_DISP_DATA_MAPPING_RGB666,
- /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
- IPU_DISP_DATA_MAPPING_RGB565,
- /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
- IPU_DISP_DATA_MAPPING_RGB888,
-};
-
-/**
- * struct mx3fb_platform_data - mx3fb platform data
- *
- * @dma_dev: pointer to the dma-device, used for dma-slave connection
- * @mode: pointer to a platform-provided per mxc_register_fb() videomode
- */
-struct mx3fb_platform_data {
- struct device *dma_dev;
- const char *name;
- const struct fb_videomode *mode;
- int num_modes;
- enum disp_data_mapping disp_data_fmt;
-};
-
-#endif
diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h
index b3d574778326..6333bac166a5 100644
--- a/include/linux/platform_data/video-pxafb.h
+++ b/include/linux/platform_data/video-pxafb.h
@@ -8,7 +8,6 @@
*/
#include <linux/fb.h>
-#include <mach/regs-lcd.h>
/*
* Supported LCD connections
@@ -153,6 +152,27 @@ struct pxafb_mach_info {
void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
unsigned long pxafb_get_hsync_time(struct device *dev);
+/* smartpanel related */
+#define SMART_CMD_A0 (0x1 << 8)
+#define SMART_CMD_READ_STATUS_REG (0x0 << 9)
+#define SMART_CMD_READ_FRAME_BUFFER ((0x0 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_COMMAND (0x1 << 9)
+#define SMART_CMD_WRITE_DATA ((0x1 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_FRAME ((0x2 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WAIT_FOR_VSYNC (0x3 << 9)
+#define SMART_CMD_NOOP (0x4 << 9)
+#define SMART_CMD_INTERRUPT (0x5 << 9)
+
+#define SMART_CMD(x) (SMART_CMD_WRITE_COMMAND | ((x) & 0xff))
+#define SMART_DAT(x) (SMART_CMD_WRITE_DATA | ((x) & 0xff))
+
+/* SMART_DELAY() is introduced for software controlled delay primitive which
+ * can be inserted between command sequences, unused command 0x6 is used here
+ * and delay ranges from 0ms ~ 255ms
+ */
+#define SMART_CMD_DELAY (0x6 << 9)
+#define SMART_DELAY(ms) (SMART_CMD_DELAY | ((ms) & 0xff))
+
#ifdef CONFIG_FB_PXA_SMARTPANEL
extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
extern int pxafb_smart_flush(struct fb_info *info);
diff --git a/include/linux/platform_data/voltage-omap.h b/include/linux/platform_data/voltage-omap.h
index 43e8da9fb447..6d74e507dbd2 100644
--- a/include/linux/platform_data/voltage-omap.h
+++ b/include/linux/platform_data/voltage-omap.h
@@ -29,7 +29,6 @@ struct omap_volt_data {
struct voltagedomain;
struct voltagedomain *voltdm_lookup(const char *name);
-int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt);
unsigned long voltdm_get_voltage(struct voltagedomain *voltdm);
struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
unsigned long volt);
diff --git a/include/linux/platform_data/wan_ixp4xx_hss.h b/include/linux/platform_data/wan_ixp4xx_hss.h
deleted file mode 100644
index d525a0feb9e1..000000000000
--- a/include/linux/platform_data/wan_ixp4xx_hss.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PLATFORM_DATA_WAN_IXP4XX_HSS_H
-#define __PLATFORM_DATA_WAN_IXP4XX_HSS_H
-
-#include <linux/types.h>
-
-/* Information about built-in HSS (synchronous serial) interfaces */
-struct hss_plat_info {
- int (*set_clock)(int port, unsigned int clock_type);
- int (*open)(int port, void *pdev,
- void (*set_carrier_cb)(void *pdev, int carrier));
- void (*close)(int port, void *pdev);
- u8 txreadyq;
- u32 timer_freq;
-};
-
-#endif
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 897b8332a39f..ab1c7deff118 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -49,6 +49,7 @@
#define ASUS_WMI_DEVID_LED4 0x00020014
#define ASUS_WMI_DEVID_LED5 0x00020015
#define ASUS_WMI_DEVID_LED6 0x00020016
+#define ASUS_WMI_DEVID_MICMUTE_LED 0x00040017
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
@@ -57,11 +58,19 @@
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+/* This can only be used to disable the screen, not re-enable */
+#define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031
+/* Writing a brightness re-enables the screen if disabled */
+#define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032
#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
/* Misc */
+#define ASUS_WMI_DEVID_PANEL_OD 0x00050019
#define ASUS_WMI_DEVID_CAMERA 0x00060013
+#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
+#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
+#define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
@@ -75,6 +84,20 @@
#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */
#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013
+#define ASUS_WMI_DEVID_GPU_FAN_CTRL 0x00110014
+#define ASUS_WMI_DEVID_MID_FAN_CTRL 0x00110031
+#define ASUS_WMI_DEVID_CPU_FAN_CURVE 0x00110024
+#define ASUS_WMI_DEVID_GPU_FAN_CURVE 0x00110025
+#define ASUS_WMI_DEVID_MID_FAN_CURVE 0x00110032
+
+/* Tunables for AUS ROG laptops */
+#define ASUS_WMI_DEVID_PPT_PL2_SPPT 0x001200A0
+#define ASUS_WMI_DEVID_PPT_PL1_SPL 0x001200A3
+#define ASUS_WMI_DEVID_PPT_APU_SPPT 0x001200B0
+#define ASUS_WMI_DEVID_PPT_PLAT_SPPT 0x001200B1
+#define ASUS_WMI_DEVID_PPT_FPPT 0x001200C1
+#define ASUS_WMI_DEVID_NV_DYN_BOOST 0x001200C0
+#define ASUS_WMI_DEVID_NV_THERM_TARGET 0x001200C2
/* Power */
#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
@@ -88,6 +111,29 @@
/* Keyboard dock */
#define ASUS_WMI_DEVID_KBD_DOCK 0x00120063
+/* Charging mode - 1=Barrel, 2=USB */
+#define ASUS_WMI_DEVID_CHARGE_MODE 0x0012006C
+
+/* MCU powersave mode */
+#define ASUS_WMI_DEVID_MCU_POWERSAVE 0x001200E2
+
+/* epu is connected? 1 == true */
+#define ASUS_WMI_DEVID_EGPU_CONNECTED 0x00090018
+/* egpu on/off */
+#define ASUS_WMI_DEVID_EGPU 0x00090019
+
+/* dgpu on/off */
+#define ASUS_WMI_DEVID_DGPU 0x00090020
+
+/* gpu mux switch, 0 = dGPU, 1 = Optimus */
+#define ASUS_WMI_DEVID_GPU_MUX 0x00090016
+
+/* TUF laptop RGB modes/colours */
+#define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056
+
+/* TUF laptop RGB power/state */
+#define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057
+
/* DSTS masks */
#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
diff --git a/include/linux/platform_data/x86/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h
index 207e1a317800..7f132029316a 100644
--- a/include/linux/platform_data/x86/clk-lpss.h
+++ b/include/linux/platform_data/x86/clk-lpss.h
@@ -15,6 +15,6 @@ struct lpss_clk_data {
struct clk *clk;
};
-extern int lpt_clk_init(void);
+int lpss_atom_clk_init(void);
#endif /* __CLK_LPSS_H */
diff --git a/include/linux/platform_data/x86/mlxcpld.h b/include/linux/platform_data/x86/mlxcpld.h
deleted file mode 100644
index b08dcb183fca..000000000000
--- a/include/linux/platform_data/x86/mlxcpld.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * mlxcpld.h - Mellanox I2C multiplexer support in CPLD
- *
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _LINUX_I2C_MLXCPLD_H
-#define _LINUX_I2C_MLXCPLD_H
-
-/* Platform data for the CPLD I2C multiplexers */
-
-/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
- * @adap_ids - adapter array
- * @num_adaps - number of adapters
- * @sel_reg_addr - mux select register offset in CPLD space
- */
-struct mlxcpld_mux_plat_data {
- int *adap_ids;
- int num_adaps;
- int sel_reg_addr;
-};
-
-#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
new file mode 100644
index 000000000000..23d60130272c
--- /dev/null
+++ b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+#define __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+
+#define WMI_BRIGHTNESS_GUID "603E9613-EF25-4338-A3D0-C46177516DB7"
+
+/**
+ * enum wmi_brightness_method - WMI method IDs
+ * @WMI_BRIGHTNESS_METHOD_LEVEL: Get/Set EC brightness level status
+ * @WMI_BRIGHTNESS_METHOD_SOURCE: Get/Set EC Brightness Source
+ */
+enum wmi_brightness_method {
+ WMI_BRIGHTNESS_METHOD_LEVEL = 1,
+ WMI_BRIGHTNESS_METHOD_SOURCE = 2,
+ WMI_BRIGHTNESS_METHOD_MAX
+};
+
+/**
+ * enum wmi_brightness_mode - Operation mode for WMI-wrapped method
+ * @WMI_BRIGHTNESS_MODE_GET: Get the current brightness level/source.
+ * @WMI_BRIGHTNESS_MODE_SET: Set the brightness level.
+ * @WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL: Get the maximum brightness level. This
+ * is only valid when the WMI method is
+ * %WMI_BRIGHTNESS_METHOD_LEVEL.
+ */
+enum wmi_brightness_mode {
+ WMI_BRIGHTNESS_MODE_GET = 0,
+ WMI_BRIGHTNESS_MODE_SET = 1,
+ WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL = 2,
+ WMI_BRIGHTNESS_MODE_MAX
+};
+
+/**
+ * enum wmi_brightness_source - Backlight brightness control source selection
+ * @WMI_BRIGHTNESS_SOURCE_GPU: Backlight brightness is controlled by the GPU.
+ * @WMI_BRIGHTNESS_SOURCE_EC: Backlight brightness is controlled by the
+ * system's Embedded Controller (EC).
+ * @WMI_BRIGHTNESS_SOURCE_AUX: Backlight brightness is controlled over the
+ * DisplayPort AUX channel.
+ */
+enum wmi_brightness_source {
+ WMI_BRIGHTNESS_SOURCE_GPU = 1,
+ WMI_BRIGHTNESS_SOURCE_EC = 2,
+ WMI_BRIGHTNESS_SOURCE_AUX = 3,
+ WMI_BRIGHTNESS_SOURCE_MAX
+};
+
+/**
+ * struct wmi_brightness_args - arguments for the WMI-wrapped ACPI method
+ * @mode: Pass in an &enum wmi_brightness_mode value to select between
+ * getting or setting a value.
+ * @val: In parameter for value to set when using %WMI_BRIGHTNESS_MODE_SET
+ * mode. Not used in conjunction with %WMI_BRIGHTNESS_MODE_GET or
+ * %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL mode.
+ * @ret: Out parameter returning retrieved value when operating in
+ * %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL
+ * mode. Not used in %WMI_BRIGHTNESS_MODE_SET mode.
+ * @ignored: Padding; not used. The ACPI method expects a 24 byte params struct.
+ *
+ * This is the parameters structure for the WmiBrightnessNotify ACPI method as
+ * wrapped by WMI. The value passed in to @val or returned by @ret will be a
+ * brightness value when the WMI method ID is %WMI_BRIGHTNESS_METHOD_LEVEL, or
+ * an &enum wmi_brightness_source value with %WMI_BRIGHTNESS_METHOD_SOURCE.
+ */
+struct wmi_brightness_args {
+ u32 mode;
+ u32 val;
+ u32 ret;
+ u32 ignored[3];
+};
+
+#endif
diff --git a/include/linux/platform_data/x86/p2sb.h b/include/linux/platform_data/x86/p2sb.h
new file mode 100644
index 000000000000..a1d5fddc8f13
--- /dev/null
+++ b/include/linux/platform_data/x86/p2sb.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Primary to Sideband (P2SB) bridge access support
+ */
+
+#ifndef _PLATFORM_DATA_X86_P2SB_H
+#define _PLATFORM_DATA_X86_P2SB_H
+
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+
+struct pci_bus;
+struct resource;
+
+#if IS_BUILTIN(CONFIG_P2SB)
+
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem);
+
+#else /* CONFIG_P2SB */
+
+static inline int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_P2SB is not set */
+
+#endif /* _PLATFORM_DATA_X86_P2SB_H */
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
index 022bcea9edec..161e4bc1c9ee 100644
--- a/include/linux/platform_data/x86/pmc_atom.h
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Intel Atom SOC Power Management Controller Header File
- * Copyright (c) 2014, Intel Corporation.
+ * Intel Atom SoC Power Management Controller Header File
+ * Copyright (c) 2014-2015,2022 Intel Corporation.
*/
#ifndef PMC_ATOM_H
#define PMC_ATOM_H
+#include <linux/bits.h>
+
/* ValleyView Power Control Unit PCI Device ID */
#define PCI_DEVICE_ID_VLV_PMC 0x0F1C
/* CherryTrail Power Control Unit PCI Device ID */
@@ -41,13 +43,26 @@
BIT_ORED_DEDICATED_IRQ_GPSC | \
BIT_SHARED_IRQ_GPSS)
+/* External clk generator settings */
+#define PMC_CLK_CTL_OFFSET 0x60
+#define PMC_CLK_CTL_SIZE 4
+#define PMC_CLK_NUM 6
+#define PMC_CLK_CTL_GATED_ON_D3 0x0
+#define PMC_CLK_CTL_FORCE_ON 0x1
+#define PMC_CLK_CTL_FORCE_OFF 0x2
+#define PMC_CLK_CTL_RESERVED 0x3
+#define PMC_MASK_CLK_CTL GENMASK(1, 0)
+#define PMC_MASK_CLK_FREQ BIT(2)
+#define PMC_CLK_FREQ_XTAL (0 << 2) /* 25 MHz */
+#define PMC_CLK_FREQ_PLL (1 << 2) /* 19.2 MHz */
+
/* The timers accumulate time spent in sleep state */
#define PMC_S0IR_TMR 0x80
#define PMC_S0I1_TMR 0x84
#define PMC_S0I2_TMR 0x88
#define PMC_S0I3_TMR 0x8C
#define PMC_S0_TMR 0x90
-/* Sleep state counter is in units of of 32us */
+/* Sleep state counter is in units of 32us */
#define PMC_TMR_SHIFT 5
/* Power status of power islands */
@@ -102,14 +117,14 @@
#define BIT_SCC_SDIO BIT(9)
#define BIT_SCC_SDCARD BIT(10)
#define BIT_SCC_MIPI BIT(11)
-#define BIT_HDA BIT(12)
+#define BIT_HDA BIT(12) /* CHT datasheet: reserved */
#define BIT_LPE BIT(13)
#define BIT_OTG BIT(14)
-#define BIT_USH BIT(15)
-#define BIT_GBE BIT(16)
-#define BIT_SATA BIT(17)
-#define BIT_USB_EHCI BIT(18)
-#define BIT_SEC BIT(19)
+#define BIT_USH BIT(15) /* CHT datasheet: reserved */
+#define BIT_GBE BIT(16) /* CHT datasheet: reserved */
+#define BIT_SATA BIT(17) /* CHT datasheet: reserved */
+#define BIT_USB_EHCI BIT(18) /* CHT datasheet: XHCI! */
+#define BIT_SEC BIT(19) /* BYT datasheet: reserved */
#define BIT_PCIE_PORT0 BIT(20)
#define BIT_PCIE_PORT1 BIT(21)
#define BIT_PCIE_PORT2 BIT(22)
@@ -139,11 +154,10 @@
#define ACPI_MMIO_REG_LEN 0x100
#define PM1_CNT 0x4
-#define SLEEP_TYPE_MASK 0xFFFFECFF
+#define SLEEP_TYPE_MASK GENMASK(12, 10)
#define SLEEP_TYPE_S5 0x1C00
-#define SLEEP_ENABLE 0x2000
+#define SLEEP_ENABLE BIT(13)
extern int pmc_atom_read(int offset, u32 *value);
-extern int pmc_atom_write(int offset, u32 value);
#endif /* PMC_ATOM_H */
diff --git a/include/linux/platform_data/x86/pwm-lpss.h b/include/linux/platform_data/x86/pwm-lpss.h
new file mode 100644
index 000000000000..752c06b47cc8
--- /dev/null
+++ b/include/linux/platform_data/x86/pwm-lpss.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Intel Low Power Subsystem PWM controller driver */
+
+#ifndef __PLATFORM_DATA_X86_PWM_LPSS_H
+#define __PLATFORM_DATA_X86_PWM_LPSS_H
+
+#include <linux/types.h>
+
+struct device;
+
+struct pwm_lpss_chip;
+
+struct pwm_lpss_boardinfo {
+ unsigned long clk_rate;
+ unsigned int npwm;
+ unsigned long base_unit_bits;
+ /*
+ * Some versions of the IP may stuck in the state machine if enable
+ * bit is not set, and hence update bit will show busy status till
+ * the reset. For the rest it may be otherwise.
+ */
+ bool bypass;
+ /*
+ * On some devices the _PS0/_PS3 AML code of the GPU (GFX0) device
+ * messes with the PWM0 controllers state,
+ */
+ bool other_devices_aml_touches_pwm_regs;
+};
+
+struct pwm_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
+ const struct pwm_lpss_boardinfo *info);
+
+#endif /* __PLATFORM_DATA_X86_PWM_LPSS_H */
diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h
new file mode 100644
index 000000000000..2d7f7120ba6b
--- /dev/null
+++ b/include/linux/platform_data/x86/simatic-ipc-base.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Siemens SIMATIC IPC drivers
+ *
+ * Copyright (c) Siemens AG, 2018-2023
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H
+#define __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H
+
+#include <linux/types.h>
+
+#define SIMATIC_IPC_DEVICE_NONE 0
+#define SIMATIC_IPC_DEVICE_227D 1
+#define SIMATIC_IPC_DEVICE_427E 2
+#define SIMATIC_IPC_DEVICE_127E 3
+#define SIMATIC_IPC_DEVICE_227E 4
+#define SIMATIC_IPC_DEVICE_227G 5
+#define SIMATIC_IPC_DEVICE_BX_21A 6
+#define SIMATIC_IPC_DEVICE_BX_39A 7
+#define SIMATIC_IPC_DEVICE_BX_59A 8
+
+struct simatic_ipc_platform {
+ u8 devmode;
+};
+
+#endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H */
diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
new file mode 100644
index 000000000000..8d8b3b919674
--- /dev/null
+++ b/include/linux/platform_data/x86/simatic-ipc.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Siemens SIMATIC IPC drivers
+ *
+ * Copyright (c) Siemens AG, 2018-2023
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_SIMATIC_IPC_H
+#define __PLATFORM_DATA_X86_SIMATIC_IPC_H
+
+#include <linux/dmi.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#define SIMATIC_IPC_DMI_ENTRY_OEM 129
+/* binary type */
+#define SIMATIC_IPC_DMI_TYPE 0xff
+#define SIMATIC_IPC_DMI_GROUP 0x05
+#define SIMATIC_IPC_DMI_ENTRY 0x02
+#define SIMATIC_IPC_DMI_TID 0x02
+
+enum simatic_ipc_station_ids {
+ SIMATIC_IPC_INVALID_STATION_ID = 0,
+ SIMATIC_IPC_IPC227D = 0x00000501,
+ SIMATIC_IPC_IPC427D = 0x00000701,
+ SIMATIC_IPC_IPC227E = 0x00000901,
+ SIMATIC_IPC_IPC277E = 0x00000902,
+ SIMATIC_IPC_IPC427E = 0x00000A01,
+ SIMATIC_IPC_IPC477E = 0x00000A02,
+ SIMATIC_IPC_IPC127E = 0x00000D01,
+ SIMATIC_IPC_IPC227G = 0x00000F01,
+ SIMATIC_IPC_IPC277G = 0x00000F02,
+ SIMATIC_IPC_IPCBX_39A = 0x00001001,
+ SIMATIC_IPC_IPCPX_39A = 0x00001002,
+ SIMATIC_IPC_IPCBX_21A = 0x00001101,
+ SIMATIC_IPC_IPCBX_56A = 0x00001201,
+ SIMATIC_IPC_IPCBX_59A = 0x00001202,
+};
+
+static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
+{
+ struct {
+ u8 type; /* type (0xff = binary) */
+ u8 len; /* len of data entry */
+ u8 group;
+ u8 entry;
+ u8 tid;
+ __le32 station_id; /* station id (LE) */
+ } __packed * data_entry = (void *)data + sizeof(struct dmi_header);
+
+ while ((u8 *)data_entry < data + max_len) {
+ if (data_entry->type == SIMATIC_IPC_DMI_TYPE &&
+ data_entry->len == sizeof(*data_entry) &&
+ data_entry->group == SIMATIC_IPC_DMI_GROUP &&
+ data_entry->entry == SIMATIC_IPC_DMI_ENTRY &&
+ data_entry->tid == SIMATIC_IPC_DMI_TID) {
+ return le32_to_cpu(data_entry->station_id);
+ }
+ data_entry = (void *)((u8 *)(data_entry) + data_entry->len);
+ }
+
+ return SIMATIC_IPC_INVALID_STATION_ID;
+}
+
+static inline void
+simatic_ipc_find_dmi_entry_helper(const struct dmi_header *dh, void *_data)
+{
+ u32 *id = _data;
+
+ if (dh->type != SIMATIC_IPC_DMI_ENTRY_OEM)
+ return;
+
+ *id = simatic_ipc_get_station_id((u8 *)dh, dh->length);
+}
+
+#endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_H */
diff --git a/include/linux/platform_data/x86/soc.h b/include/linux/platform_data/x86/soc.h
new file mode 100644
index 000000000000..a5705189e2ac
--- /dev/null
+++ b/include/linux/platform_data/x86/soc.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Helpers for Intel SoC model detection
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#ifndef __PLATFORM_DATA_X86_SOC_H
+#define __PLATFORM_DATA_X86_SOC_H
+
+#include <linux/types.h>
+
+#if IS_ENABLED(CONFIG_X86)
+
+#include <linux/mod_devicetable.h>
+
+#include <asm/cpu_device_id.h>
+
+#define SOC_INTEL_IS_CPU(soc, type) \
+static inline bool soc_intel_is_##soc(void) \
+{ \
+ static const struct x86_cpu_id soc##_cpu_ids[] = { \
+ X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \
+ {} \
+ }; \
+ const struct x86_cpu_id *id; \
+ \
+ id = x86_match_cpu(soc##_cpu_ids); \
+ if (id) \
+ return true; \
+ return false; \
+}
+
+SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT);
+SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT);
+SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT);
+SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS);
+SOC_INTEL_IS_CPU(cml, KABYLAKE_L);
+
+#undef SOC_INTEL_IS_CPU
+
+#else /* IS_ENABLED(CONFIG_X86) */
+
+static inline bool soc_intel_is_byt(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cht(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_apl(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_glk(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cml(void)
+{
+ return false;
+}
+#endif /* IS_ENABLED(CONFIG_X86) */
+
+#endif /* __PLATFORM_DATA_X86_SOC_H */
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/x86/spi-intel.h
index 7f53a5c6f35e..a512ec37abbb 100644
--- a/include/linux/platform_data/intel-spi.h
+++ b/include/linux/platform_data/x86/spi-intel.h
@@ -6,8 +6,8 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#ifndef INTEL_SPI_PDATA_H
-#define INTEL_SPI_PDATA_H
+#ifndef SPI_INTEL_PDATA_H
+#define SPI_INTEL_PDATA_H
enum intel_spi_type {
INTEL_SPI_BYT = 1,
@@ -19,11 +19,13 @@ enum intel_spi_type {
/**
* struct intel_spi_boardinfo - Board specific data for Intel SPI driver
* @type: Type which this controller is compatible with
- * @writeable: The chip is writeable
+ * @set_writeable: Try to make the chip writeable (optional)
+ * @data: Data to be passed to @set_writeable can be %NULL
*/
struct intel_spi_boardinfo {
enum intel_spi_type type;
- bool writeable;
+ bool (*set_writeable)(void __iomem *base, void *data);
+ void *data;
};
-#endif /* INTEL_SPI_PDATA_H */
+#endif /* SPI_INTEL_PDATA_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 77a2aada106d..7a41c72c1959 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -15,6 +15,7 @@
#define PLATFORM_DEVID_NONE (-1)
#define PLATFORM_DEVID_AUTO (-2)
+struct irq_affinity;
struct mfd_cell;
struct property_entry;
struct platform_device_id;
@@ -30,7 +31,11 @@ struct platform_device {
struct resource *resource;
const struct platform_device_id *id_entry;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
/* MFD cell pointer */
struct mfd_cell *mfd_cell;
@@ -52,9 +57,14 @@ extern struct device platform_bus;
extern struct resource *platform_get_resource(struct platform_device *,
unsigned int, unsigned int);
+extern struct resource *platform_get_mem_or_io(struct platform_device *,
+ unsigned int);
+
extern struct device *
platform_find_device_by_driver(struct device *start,
const struct device_driver *drv);
+
+#ifdef CONFIG_HAS_IOMEM
extern void __iomem *
devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
unsigned int index, struct resource **res);
@@ -62,14 +72,42 @@ extern void __iomem *
devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index);
extern void __iomem *
-devm_platform_ioremap_resource_wc(struct platform_device *pdev,
- unsigned int index);
-extern void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
const char *name);
+#else
+
+static inline void __iomem *
+devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
+ unsigned int index, struct resource **res)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+
+static inline void __iomem *
+devm_platform_ioremap_resource(struct platform_device *pdev,
+ unsigned int index)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif
+
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_get_irq_optional(struct platform_device *, unsigned int);
extern int platform_irq_count(struct platform_device *);
+extern int devm_platform_get_irqs_affinity(struct platform_device *dev,
+ struct irq_affinity *affd,
+ unsigned int minvec,
+ unsigned int maxvec,
+ int **irqs);
extern struct resource *platform_get_resource_byname(struct platform_device *,
unsigned int,
const char *);
@@ -191,21 +229,38 @@ extern int platform_device_add_resources(struct platform_device *pdev,
unsigned int num);
extern int platform_device_add_data(struct platform_device *pdev,
const void *data, size_t size);
-extern int platform_device_add_properties(struct platform_device *pdev,
- const struct property_entry *properties);
extern int platform_device_add(struct platform_device *pdev);
extern void platform_device_del(struct platform_device *pdev);
extern void platform_device_put(struct platform_device *pdev);
struct platform_driver {
int (*probe)(struct platform_device *);
+
+ /*
+ * Traditionally the remove callback returned an int which however is
+ * ignored by the driver core. This led to wrong expectations by driver
+ * authors who thought returning an error code was a valid error
+ * handling strategy. To convert to a callback returning void, new
+ * drivers should implement .remove_new() until the conversion it done
+ * that eventually makes .remove() return void.
+ */
int (*remove)(struct platform_device *);
+ void (*remove_new)(struct platform_device *);
+
void (*shutdown)(struct platform_device *);
int (*suspend)(struct platform_device *, pm_message_t state);
int (*resume)(struct platform_device *);
struct device_driver driver;
const struct platform_device_id *id_table;
bool prevent_deferred_probe;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
@@ -324,8 +379,6 @@ extern int platform_pm_restore(struct device *dev);
#define platform_pm_restore NULL
#endif
-extern int platform_dma_configure(struct device *dev);
-
#ifdef CONFIG_PM_SLEEP
#define USE_PLATFORM_PM_SLEEP_OPS \
.suspend = platform_pm_suspend, \
@@ -350,4 +403,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
}
#endif /* CONFIG_SUPERH */
+/* For now only SuperH uses it */
+void early_platform_cleanup(void);
+
#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
new file mode 100644
index 000000000000..e5cbb6841f3a
--- /dev/null
+++ b/include/linux/platform_profile.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Platform profile sysfs interface
+ *
+ * See Documentation/userspace-api/sysfs-platform_profile.rst for more
+ * information.
+ */
+
+#ifndef _PLATFORM_PROFILE_H_
+#define _PLATFORM_PROFILE_H_
+
+#include <linux/bitops.h>
+
+/*
+ * If more options are added please update profile_names array in
+ * platform_profile.c and sysfs-platform_profile documentation.
+ */
+
+enum platform_profile_option {
+ PLATFORM_PROFILE_LOW_POWER,
+ PLATFORM_PROFILE_COOL,
+ PLATFORM_PROFILE_QUIET,
+ PLATFORM_PROFILE_BALANCED,
+ PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ PLATFORM_PROFILE_PERFORMANCE,
+ PLATFORM_PROFILE_LAST, /*must always be last */
+};
+
+struct platform_profile_handler {
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int (*profile_get)(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile);
+ int (*profile_set)(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile);
+};
+
+int platform_profile_register(struct platform_profile_handler *pprof);
+int platform_profile_remove(void);
+void platform_profile_notify(void);
+
+#endif /*_PLATFORM_PROFILE_H_*/
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 66bab1bca35c..8c1c8adf7fe9 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -73,18 +73,11 @@
#ifndef _LINUX_PLIST_H_
#define _LINUX_PLIST_H_
-#include <linux/kernel.h>
+#include <linux/container_of.h>
#include <linux/list.h>
+#include <linux/plist_types.h>
-struct plist_head {
- struct list_head node_list;
-};
-
-struct plist_node {
- int prio;
- struct list_head prio_list;
- struct list_head node_list;
-};
+#include <asm/bug.h>
/**
* PLIST_HEAD_INIT - static struct plist_head initializer
diff --git a/include/linux/plist_types.h b/include/linux/plist_types.h
new file mode 100644
index 000000000000..c37e784330af
--- /dev/null
+++ b/include/linux/plist_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_PLIST_TYPES_H
+#define _LINUX_PLIST_TYPES_H
+
+#include <linux/types.h>
+
+struct plist_head {
+ struct list_head node_list;
+};
+
+struct plist_node {
+ int prio;
+ struct list_head prio_list;
+ struct list_head node_list;
+};
+
+#endif /* _LINUX_PLIST_TYPES_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index a30a4b54df52..97b0e23363c8 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -8,6 +8,7 @@
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
@@ -20,7 +21,6 @@
* Callbacks for platform drivers to implement.
*/
extern void (*pm_power_off)(void);
-extern void (*pm_power_off_prepare)(void);
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
@@ -35,11 +35,19 @@ static inline void pm_vt_switch_unregister(struct device *dev)
}
#endif /* CONFIG_VT_CONSOLE_SLEEP */
+#ifdef CONFIG_CXL_SUSPEND
+bool cxl_mem_active(void);
+#else
+static inline bool cxl_mem_active(void)
+{
+ return false;
+}
+#endif
+
/*
* Device power management
*/
-struct device;
#ifdef CONFIG_PM
extern const char power_group_name[]; /* = "power" */
@@ -301,55 +309,133 @@ struct dev_pm_ops {
int (*runtime_idle)(struct device *dev);
};
+#define SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend = pm_sleep_ptr(suspend_fn), \
+ .resume = pm_sleep_ptr(resume_fn), \
+ .freeze = pm_sleep_ptr(suspend_fn), \
+ .thaw = pm_sleep_ptr(resume_fn), \
+ .poweroff = pm_sleep_ptr(suspend_fn), \
+ .restore = pm_sleep_ptr(resume_fn),
+
+#define LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_late = pm_sleep_ptr(suspend_fn), \
+ .resume_early = pm_sleep_ptr(resume_fn), \
+ .freeze_late = pm_sleep_ptr(suspend_fn), \
+ .thaw_early = pm_sleep_ptr(resume_fn), \
+ .poweroff_late = pm_sleep_ptr(suspend_fn), \
+ .restore_early = pm_sleep_ptr(resume_fn),
+
+#define NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_noirq = pm_sleep_ptr(suspend_fn), \
+ .resume_noirq = pm_sleep_ptr(resume_fn), \
+ .freeze_noirq = pm_sleep_ptr(suspend_fn), \
+ .thaw_noirq = pm_sleep_ptr(resume_fn), \
+ .poweroff_noirq = pm_sleep_ptr(suspend_fn), \
+ .restore_noirq = pm_sleep_ptr(resume_fn),
+
+#define RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ .runtime_suspend = suspend_fn, \
+ .runtime_resume = resume_fn, \
+ .runtime_idle = idle_fn,
+
#ifdef CONFIG_PM_SLEEP
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend = suspend_fn, \
- .resume = resume_fn, \
- .freeze = suspend_fn, \
- .thaw = resume_fn, \
- .poweroff = suspend_fn, \
- .restore = resume_fn,
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM_SLEEP
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend_late = suspend_fn, \
- .resume_early = resume_fn, \
- .freeze_late = suspend_fn, \
- .thaw_early = resume_fn, \
- .poweroff_late = suspend_fn, \
- .restore_early = resume_fn,
+ LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM_SLEEP
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend_noirq = suspend_fn, \
- .resume_noirq = resume_fn, \
- .freeze_noirq = suspend_fn, \
- .thaw_noirq = resume_fn, \
- .poweroff_noirq = suspend_fn, \
- .restore_noirq = resume_fn,
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
- .runtime_suspend = suspend_fn, \
- .runtime_resume = resume_fn, \
- .runtime_idle = idle_fn,
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#else
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif
+#define _DEFINE_DEV_PM_OPS(name, \
+ suspend_fn, resume_fn, \
+ runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+const struct dev_pm_ops name = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+}
+
+#define _EXPORT_PM_OPS(name, license, ns) \
+ const struct dev_pm_ops name; \
+ __EXPORT_SYMBOL(name, license, ns); \
+ const struct dev_pm_ops name
+
+#define _DISCARD_PM_OPS(name, license, ns) \
+ static __maybe_unused const struct dev_pm_ops __static_##name
+
+#ifdef CONFIG_PM
+#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
+#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
+#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
+#else
+#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
+#define EXPORT_PM_FN_GPL(name)
+#define EXPORT_PM_FN_NS_GPL(name, ns)
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
+#else
+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
+#endif
+
+#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
+#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
+
+#define EXPORT_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "")
+#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns)
+
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_SIMPLE_DEV_PM_OPS() or EXPORT_GPL_SIMPLE_DEV_PM_OPS() instead.
*/
+#define DEFINE_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
+
+#define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ EXPORT_DEV_SLEEP_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+ EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+ EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+
+/* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
@@ -367,6 +453,9 @@ const struct dev_pm_ops __maybe_unused name = { \
* suspend and "early" resume callback pointers, .suspend_late() and
* .resume_early(), to the same routines as .runtime_suspend() and
* .runtime_resume(), respectively (and analogously for hibernation).
+ *
+ * Deprecated. You most likely don't want this macro. Use
+ * DEFINE_RUNTIME_DEV_PM_OPS() instead.
*/
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
const struct dev_pm_ops __maybe_unused name = { \
@@ -374,11 +463,17 @@ const struct dev_pm_ops __maybe_unused name = { \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
-#ifdef CONFIG_PM
-#define pm_ptr(_ptr) (_ptr)
-#else
-#define pm_ptr(_ptr) NULL
-#endif
+/*
+ * Use this if you want to have the suspend and resume callbacks be called
+ * with IRQs disabled.
+ */
+#define DEFINE_NOIRQ_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+const struct dev_pm_ops name = { \
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+}
+
+#define pm_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM), (_ptr))
+#define pm_sleep_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM_SLEEP), (_ptr))
/*
* PM_EVENT_ messages
@@ -500,6 +595,7 @@ const struct dev_pm_ops __maybe_unused name = { \
*/
enum rpm_status {
+ RPM_INVALID = -1,
RPM_ACTIVE = 0,
RPM_RESUMING,
RPM_SUSPENDED,
@@ -537,6 +633,8 @@ struct pm_subsys_data {
spinlock_t lock;
unsigned int refcount;
#ifdef CONFIG_PM_CLK
+ unsigned int clock_op_might_sleep;
+ struct mutex clock_mutex;
struct list_head clock_list;
#endif
#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -564,8 +662,8 @@ struct pm_subsys_data {
struct dev_pm_info {
pm_message_t power_state;
- unsigned int can_wakeup:1;
- unsigned int async_suspend:1;
+ bool can_wakeup:1;
+ bool async_suspend:1;
bool in_dpm_list:1; /* Owned by the PM core */
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
@@ -583,33 +681,36 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
- unsigned int must_resume:1; /* Owned by the PM core */
- unsigned int may_skip_resume:1; /* Set by subsystems */
+ bool async_in_progress:1; /* Owned by the PM core */
+ bool must_resume:1; /* Owned by the PM core */
+ bool may_skip_resume:1; /* Set by subsystems */
#else
- unsigned int should_wakeup:1;
+ bool should_wakeup:1;
#endif
#ifdef CONFIG_PM
struct hrtimer suspend_timer;
- unsigned long timer_expires;
+ u64 timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
struct wake_irq *wakeirq;
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
- unsigned int idle_notification:1;
- unsigned int request_pending:1;
- unsigned int deferred_resume:1;
- unsigned int runtime_auto:1;
+ bool idle_notification:1;
+ bool request_pending:1;
+ bool deferred_resume:1;
+ bool needs_force_resume:1;
+ bool runtime_auto:1;
bool ignore_children:1;
- unsigned int no_callbacks:1;
- unsigned int irq_safe:1;
- unsigned int use_autosuspend:1;
- unsigned int timer_autosuspends:1;
- unsigned int memalloc_noio:1;
+ bool no_callbacks:1;
+ bool irq_safe:1;
+ bool use_autosuspend:1;
+ bool timer_autosuspends:1;
+ bool memalloc_noio:1;
unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
+ enum rpm_status last_status;
int runtime_error;
int autosuspend_delay;
u64 last_busy;
@@ -634,6 +735,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
* @dismiss: Called after unsuccessful driver probe and after driver removal.
+ * @set_performance_state: Called to request a new performance state.
*
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions instead of
@@ -646,6 +748,7 @@ struct dev_pm_domain {
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
void (*dismiss)(struct device *dev);
+ int (*set_performance_state)(struct device *dev, unsigned int state);
};
/*
@@ -719,11 +822,11 @@ extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
extern int dpm_prepare(pm_message_t state);
-extern void __suspend_report_result(const char *function, void *fn, int ret);
+extern void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret);
-#define suspend_report_result(fn, ret) \
+#define suspend_report_result(dev, fn, ret) \
do { \
- __suspend_report_result(__func__, fn, ret); \
+ __suspend_report_result(__func__, dev, fn, ret); \
} while (0)
extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
@@ -763,7 +866,7 @@ static inline int dpm_suspend_start(pm_message_t state)
return 0;
}
-#define suspend_report_result(fn, ret) do {} while (0)
+#define suspend_report_result(dev, fn, ret) do {} while (0)
static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
{
diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h
deleted file mode 100644
index b8fac96f05aa..000000000000
--- a/include/linux/pm2301_charger.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * PM2301 charger driver.
- *
- * Copyright (C) 2012 ST Ericsson Corporation
- *
- * Contact: Olivier LAUNAY (olivier.launay@stericsson.com
- */
-
-#ifndef __LINUX_PM2301_H
-#define __LINUX_PM2301_H
-
-/**
- * struct pm2xxx_bm_charger_parameters - Charger specific parameters
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct pm2xxx_bm_charger_parameters {
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct pm2xxx_bm_data - pm2xxx battery management data
- * @enable_overshoot flag to enable VBAT overshoot control
- * @chg_params charger parameters
- */
-struct pm2xxx_bm_data {
- bool enable_overshoot;
- const struct pm2xxx_bm_charger_parameters *chg_params;
-};
-
-struct pm2xxx_charger_platform_data {
- char **supplied_to;
- size_t num_supplicants;
- int i2c_bus;
- const char *label;
- int gpio_irq_number;
- unsigned int lpn_gpio;
- int irq_type;
-};
-
-struct pm2xxx_platform_data {
- struct pm2xxx_charger_platform_data *wall_charger;
- struct pm2xxx_bm_data *battery;
-};
-
-#endif /* __LINUX_PM2301_H */
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 8ddc7860e131..68669ce18720 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -47,6 +47,7 @@ extern void pm_clk_remove(struct device *dev, const char *con_id);
extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);
extern int pm_clk_suspend(struct device *dev);
extern int pm_clk_resume(struct device *dev);
+extern int devm_pm_clk_create(struct device *dev);
#else
static inline bool pm_clk_no_clocks(struct device *dev)
{
@@ -83,13 +84,17 @@ static inline void pm_clk_remove(struct device *dev, const char *con_id)
static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk)
{
}
+static inline int devm_pm_clk_create(struct device *dev)
+{
+ return -EINVAL;
+}
#endif
#ifdef CONFIG_HAVE_CLK
-extern void pm_clk_add_notifier(struct bus_type *bus,
+extern void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb);
#else
-static inline void pm_clk_add_notifier(struct bus_type *bus,
+static inline void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
}
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index ee11502a575b..772d3280d35f 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -9,6 +9,7 @@
#define _LINUX_PM_DOMAIN_H
#include <linux/device.h>
+#include <linux/ktime.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/err.h>
@@ -16,6 +17,34 @@
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
+#include <linux/time64.h>
+
+/*
+ * Flags to control the behaviour when attaching a device to its PM domains.
+ *
+ * PD_FLAG_NO_DEV_LINK: As the default behaviour creates a device-link
+ * for every PM domain that gets attached, this
+ * flag can be used to skip that.
+ *
+ * PD_FLAG_DEV_LINK_ON: Add the DL_FLAG_RPM_ACTIVE to power-on the
+ * supplier and its PM domain when creating the
+ * device-links.
+ *
+ */
+#define PD_FLAG_NO_DEV_LINK BIT(0)
+#define PD_FLAG_DEV_LINK_ON BIT(1)
+
+struct dev_pm_domain_attach_data {
+ const char * const *pd_names;
+ const u32 num_pd_names;
+ const u32 pd_flags;
+};
+
+struct dev_pm_domain_list {
+ struct device **pd_devs;
+ struct device_link **pd_links;
+ u32 num_pds;
+};
/*
* Flags to control the behaviour of a genpd.
@@ -55,6 +84,14 @@
*
* GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain
* powered on except for system suspend.
+ *
+ * GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its
+ * components' next wakeup when determining the
+ * optimal idle state.
+ *
+ * GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states,
+ * but its corresponding OPP tables are not
+ * described in DT, but are given directly by FW.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -62,10 +99,19 @@
#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
+#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
+#define GENPD_FLAG_OPP_TABLE_FW (1U << 7)
enum gpd_status {
- GPD_STATE_ACTIVE = 0, /* PM domain is active */
- GPD_STATE_POWER_OFF, /* PM domain is off */
+ GENPD_STATE_ON = 0, /* PM domain is on */
+ GENPD_STATE_OFF, /* PM domain is off */
+};
+
+enum genpd_notication {
+ GENPD_NOTIFY_PRE_OFF = 0,
+ GENPD_NOTIFY_OFF,
+ GENPD_NOTIFY_PRE_ON,
+ GENPD_NOTIFY_ON,
};
struct dev_power_governor {
@@ -78,17 +124,27 @@ struct gpd_dev_ops {
int (*stop)(struct device *dev);
};
+struct genpd_governor_data {
+ s64 max_off_time_ns;
+ bool max_off_time_changed;
+ ktime_t next_wakeup;
+ ktime_t next_hrtimer;
+ bool cached_power_down_ok;
+ bool cached_power_down_state_idx;
+};
+
struct genpd_power_state {
s64 power_off_latency_ns;
s64 power_on_latency_ns;
s64 residency_ns;
+ u64 usage;
+ u64 rejected;
struct fwnode_handle *fwnode;
- ktime_t idle_time;
+ u64 idle_time;
void *data;
};
struct genpd_lock_ops;
-struct dev_pm_opp;
struct opp_table;
struct generic_pm_domain {
@@ -99,6 +155,7 @@ struct generic_pm_domain {
struct list_head child_links; /* Links with PM domain as a child */
struct list_head dev_list; /* List of devices */
struct dev_power_governor *gov;
+ struct genpd_governor_data *gd; /* Data used by a genpd governor. */
struct work_struct power_off_work;
struct fwnode_handle *provider; /* Identity of the domain provider */
bool has_provider;
@@ -110,18 +167,14 @@ struct generic_pm_domain {
unsigned int prepared_count; /* Suspend counter of prepared devices */
unsigned int performance_state; /* Aggregated max performance state */
cpumask_var_t cpus; /* A cpumask of the attached CPUs */
+ bool synced_poweroff; /* A consumer needs a synced poweroff */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
+ struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
struct opp_table *opp_table; /* OPP table of the genpd */
- unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
- struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
unsigned int state);
struct gpd_dev_ops dev_ops;
- s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
- bool max_off_time_changed;
- bool cached_power_down_ok;
- bool cached_power_down_state_idx;
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
@@ -132,8 +185,8 @@ struct generic_pm_domain {
unsigned int state_count);
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
- ktime_t on_time;
- ktime_t accounting_time;
+ u64 on_time;
+ u64 accounting_time;
const struct genpd_lock_ops *lock_ops;
union {
struct mutex mlock;
@@ -165,6 +218,7 @@ struct gpd_timing_data {
s64 suspend_latency_ns;
s64 resume_latency_ns;
s64 effective_constraint_ns;
+ ktime_t next_wakeup;
bool constraint_changed;
bool cached_suspend_ok;
};
@@ -176,10 +230,13 @@ struct pm_domain_data {
struct generic_pm_domain_data {
struct pm_domain_data base;
- struct gpd_timing_data td;
+ struct gpd_timing_data *td;
struct notifier_block nb;
+ struct notifier_block *power_nb;
int cpu;
unsigned int performance_state;
+ unsigned int default_pstate;
+ unsigned int rpm_pstate;
void *data;
};
@@ -204,6 +261,11 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
int pm_genpd_remove(struct generic_pm_domain *genpd);
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
+int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
+int dev_pm_genpd_remove_notifier(struct device *dev);
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
+ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
+void dev_pm_genpd_synced_poweroff(struct device *dev);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -242,31 +304,52 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd,
}
static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_genpd_set_performance_state(struct device *dev,
unsigned int state)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int dev_pm_genpd_add_notifier(struct device *dev,
+ struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int dev_pm_genpd_remove_notifier(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
+{ }
+
+static inline ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
+{
+ return KTIME_MAX;
}
+static inline void dev_pm_genpd_synced_poweroff(struct device *dev)
+{ }
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
-void pm_genpd_syscore_poweroff(struct device *dev);
-void pm_genpd_syscore_poweron(struct device *dev);
+void dev_pm_genpd_suspend(struct device *dev);
+void dev_pm_genpd_resume(struct device *dev);
#else
-static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
-static inline void pm_genpd_syscore_poweron(struct device *dev) {}
+static inline void dev_pm_genpd_suspend(struct device *dev) {}
+static inline void dev_pm_genpd_resume(struct device *dev) {}
#endif
/* OF PM domain providers */
struct of_device_id;
-typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
+typedef struct generic_pm_domain *(*genpd_xlate_t)(const struct of_phandle_args *args,
void *data);
struct genpd_onecell_data {
@@ -281,16 +364,14 @@ int of_genpd_add_provider_simple(struct device_node *np,
int of_genpd_add_provider_onecell(struct device_node *np,
struct genpd_onecell_data *data);
void of_genpd_del_provider(struct device_node *np);
-int of_genpd_add_device(struct of_phandle_args *args, struct device *dev);
-int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec);
-int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec);
+int of_genpd_add_device(const struct of_phandle_args *args, struct device *dev);
+int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec);
+int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec);
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
-unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -301,31 +382,31 @@ struct device *genpd_dev_pm_attach_by_name(struct device *dev,
static inline int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int of_genpd_add_provider_onecell(struct device_node *np,
struct genpd_onecell_data *data)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void of_genpd_del_provider(struct device_node *np) {}
-static inline int of_genpd_add_device(struct of_phandle_args *args,
+static inline int of_genpd_add_device(const struct of_phandle_args *args,
struct device *dev)
{
return -ENODEV;
}
-static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec)
+static inline int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec)
{
return -ENODEV;
}
-static inline int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec)
+static inline int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec)
{
return -ENODEV;
}
@@ -336,13 +417,6 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
return -ENODEV;
}
-static inline unsigned int
-pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp)
-{
- return 0;
-}
-
static inline int genpd_dev_pm_attach(struct device *dev)
{
return 0;
@@ -363,7 +437,7 @@ static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
static inline
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -373,9 +447,14 @@ struct device *dev_pm_domain_attach_by_id(struct device *dev,
unsigned int index);
struct device *dev_pm_domain_attach_by_name(struct device *dev,
const char *name);
+int dev_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list);
void dev_pm_domain_detach(struct device *dev, bool power_off);
+void dev_pm_domain_detach_list(struct dev_pm_domain_list *list);
int dev_pm_domain_start(struct device *dev);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
+int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
{
@@ -391,13 +470,25 @@ static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
{
return NULL;
}
+static inline int dev_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ return 0;
+}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+static inline void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) {}
static inline int dev_pm_domain_start(struct device *dev)
{
return 0;
}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
+static inline int dev_pm_domain_set_performance_state(struct device *dev,
+ unsigned int state)
+{
+ return 0;
+}
#endif
#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index dbb484524f82..065a47382302 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -16,6 +16,7 @@
#include <linux/notifier.h>
struct clk;
+struct cpufreq_frequency_table;
struct regulator;
struct dev_pm_opp;
struct device;
@@ -32,73 +33,92 @@ enum dev_pm_opp_event {
* @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
* @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
* @u_amp: Maximum current drawn by the device in microamperes
+ * @u_watt: Power used by the device in microwatts
*
- * This structure stores the voltage/current values for a single power supply.
+ * This structure stores the voltage/current/power values for a single power
+ * supply.
*/
struct dev_pm_opp_supply {
unsigned long u_volt;
unsigned long u_volt_min;
unsigned long u_volt_max;
unsigned long u_amp;
+ unsigned long u_watt;
};
-/**
- * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
- * @avg: Average bandwidth corresponding to this OPP (in icc units)
- * @peak: Peak bandwidth corresponding to this OPP (in icc units)
- *
- * This structure stores the bandwidth values for a single interconnect path.
- */
-struct dev_pm_opp_icc_bw {
- u32 avg;
- u32 peak;
-};
+typedef int (*config_regulators_t)(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count);
+
+typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data, bool scaling_down);
/**
- * struct dev_pm_opp_info - OPP freq/voltage/current values
- * @rate: Target clk rate in hz
- * @supplies: Array of voltage/current values for all power supplies
+ * struct dev_pm_opp_config - Device OPP configuration values
+ * @clk_names: Clk names, NULL terminated array.
+ * @config_clks: Custom set clk helper.
+ * @prop_name: Name to postfix to properties.
+ * @config_regulators: Custom set regulator helper.
+ * @supported_hw: Array of hierarchy of versions to match.
+ * @supported_hw_count: Number of elements in the array.
+ * @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
+ * @genpd_names: Null terminated array of pointers containing names of genpd to
+ * attach. Mutually exclusive with required_devs.
+ * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually
+ * exclusive with required_devs.
+ * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs.
*
- * This structure stores the freq/voltage/current values for a single OPP.
+ * This structure contains platform specific OPP configurations for the device.
*/
-struct dev_pm_opp_info {
- unsigned long rate;
- struct dev_pm_opp_supply *supplies;
+struct dev_pm_opp_config {
+ /* NULL terminated */
+ const char * const *clk_names;
+ config_clks_t config_clks;
+ const char *prop_name;
+ config_regulators_t config_regulators;
+ const unsigned int *supported_hw;
+ unsigned int supported_hw_count;
+ const char * const *regulator_names;
+ const char * const *genpd_names;
+ struct device ***virt_devs;
+ struct device **required_devs;
};
+#define OPP_LEVEL_UNSET U32_MAX
+
/**
- * struct dev_pm_set_opp_data - Set OPP data
- * @old_opp: Old OPP info
- * @new_opp: New OPP info
- * @regulators: Array of regulator pointers
- * @regulator_count: Number of regulators
- * @clk: Pointer to clk
- * @dev: Pointer to the struct device
- *
- * This structure contains all information required for setting an OPP.
+ * struct dev_pm_opp_data - The data to use to initialize an OPP.
+ * @turbo: Flag to indicate whether the OPP is to be marked turbo or not.
+ * @level: The performance level for the OPP. Set level to OPP_LEVEL_UNSET if
+ * level field isn't used.
+ * @freq: The clock rate in Hz for the OPP.
+ * @u_volt: The voltage in uV for the OPP.
*/
-struct dev_pm_set_opp_data {
- struct dev_pm_opp_info old_opp;
- struct dev_pm_opp_info new_opp;
-
- struct regulator **regulators;
- unsigned int regulator_count;
- struct clk *clk;
- struct device *dev;
+struct dev_pm_opp_data {
+ bool turbo;
+ unsigned int level;
+ unsigned long freq;
+ unsigned long u_volt;
};
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
-struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
-unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies);
+
+unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp);
+
+unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index);
unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp);
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index);
+
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
int dev_pm_opp_get_opp_count(struct device *dev);
@@ -110,20 +130,42 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
-struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level);
+
+struct dev_pm_opp *
+dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ u32 index, bool available);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_floor_indexed(struct device *dev,
+ unsigned long *freq, u32 index);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_indexed(struct device *dev,
+ unsigned long *freq, u32 index);
+
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level);
+
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level);
+
+struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
+ unsigned int *level);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index);
+
void dev_pm_opp_put(struct dev_pm_opp *opp);
-int dev_pm_opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt);
+int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp);
+
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
void dev_pm_opp_remove_all_dynamic(struct device *dev);
@@ -138,34 +180,31 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq);
int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb);
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name);
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
-void dev_pm_opp_put_regulators(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
-void dev_pm_opp_put_clkname(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
-void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
+int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+void dev_pm_opp_clear_config(int token);
+int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down);
+
+struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp);
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
-int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp);
+int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
+int dev_pm_opp_sync_regulators(struct device *dev);
#else
static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
@@ -175,7 +214,17 @@ static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
return 0;
}
-static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+static inline int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
{
return 0;
}
@@ -185,6 +234,13 @@ static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
return 0;
}
+static inline
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index)
+{
+ return 0;
+}
+
static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
return false;
@@ -218,39 +274,76 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level)
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ u32 index, bool available)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt)
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, u32 index)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, u32 index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
+ unsigned int *level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
-static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt)
+static inline int
+dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
@@ -281,77 +374,57 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb)
{
- return -ENOTSUPP;
-}
-
-static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions,
- unsigned int count)
-{
- return ERR_PTR(-ENOTSUPP);
-}
-
-static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
-{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+static inline int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
+static inline int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {}
+static inline void dev_pm_opp_clear_config(int token) {}
-static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name)
+static inline int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs)
+static inline struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
+ struct opp_table *dst_table, struct dev_pm_opp *src_opp)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {}
-
static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
-static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp)
+static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
{
return -EOPNOTSUPP;
}
static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
@@ -367,12 +440,34 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask
{
}
+static inline int dev_pm_opp_sync_regulators(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_PM_OPP */
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table);
+void dev_pm_opp_free_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table);
+#else
+static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table)
+{
+ return -EINVAL;
+}
+
+static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table)
+{
+}
+#endif
+
+
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
+int devm_pm_opp_of_add_table_indexed(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
+int devm_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@@ -388,21 +483,31 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev)
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
+{
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
+static inline int devm_pm_opp_of_add_table(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
@@ -411,7 +516,7 @@ static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpum
static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
@@ -427,7 +532,7 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
static inline int dev_pm_opp_of_register_em(struct device *dev,
struct cpumask *cpus)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_unregister_em(struct device *dev)
@@ -436,13 +541,174 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev)
static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
#endif
+/* OPP Configuration helpers */
+
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ struct dev_pm_opp_data data = {
+ .freq = freq,
+ .u_volt = u_volt,
+ };
+
+ return dev_pm_opp_add_dynamic(dev, &data);
+}
+
+/* Regulators helpers */
+static inline int dev_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* Supported-hw helpers */
+static inline int dev_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_supported_hw(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* clkname helpers */
+static inline int dev_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_clkname(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* config-regulators helpers */
+static inline int dev_pm_opp_set_config_regulators(struct device *dev,
+ config_regulators_t helper)
+{
+ struct dev_pm_opp_config config = {
+ .config_regulators = helper,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_config_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+/* genpd helpers */
+static inline int dev_pm_opp_attach_genpd(struct device *dev,
+ const char * const *names,
+ struct device ***virt_devs)
+{
+ struct dev_pm_opp_config config = {
+ .genpd_names = names,
+ .virt_devs = virt_devs,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_detach_genpd(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_attach_genpd(struct device *dev,
+ const char * const *names,
+ struct device ***virt_devs)
+{
+ struct dev_pm_opp_config config = {
+ .genpd_names = names,
+ .virt_devs = virt_devs,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* prop-name helpers */
+static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+ struct dev_pm_opp_config config = {
+ .prop_name = name,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_prop_name(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_freq_indexed(opp, 0);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 6245caa18034..d39dc863f612 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -22,6 +22,40 @@
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
+/*
+ * Use this for defining a set of PM operations to be used in all situations
+ * (system suspend, hibernation or runtime PM).
+ *
+ * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
+ * macro, which uses the provided callbacks for both runtime PM and system
+ * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
+ * and pm_runtime_force_resume() for its system sleep callbacks.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
+ */
+#define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ _DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
+ pm_runtime_force_resume, suspend_fn, \
+ resume_fn, idle_fn)
+
+#define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ EXPORT_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ EXPORT_GPL_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
+ EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
+ EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
@@ -38,7 +72,8 @@ extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
-extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
+extern int pm_runtime_get_if_active(struct device *dev);
+extern int pm_runtime_get_if_in_use(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
@@ -51,26 +86,14 @@ extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
-extern void pm_runtime_update_max_time_suspended(struct device *dev,
- s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
-extern void pm_runtime_clean_up_links(struct device *dev);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
-extern void pm_runtime_drop_link(struct device *dev);
+extern void pm_runtime_drop_link(struct device_link *link);
+extern void pm_runtime_release_supplier(struct device_link *link);
-/**
- * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
- * @dev: Target device.
- *
- * Increment the runtime PM usage counter of @dev if its runtime PM status is
- * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
- */
-static inline int pm_runtime_get_if_in_use(struct device *dev)
-{
- return pm_runtime_get_if_active(dev, false);
-}
+extern int devm_pm_runtime_enable(struct device *dev);
/**
* pm_suspend_ignore_children - Set runtime PM behavior regarding children.
@@ -128,7 +151,7 @@ static inline bool pm_runtime_suspended(struct device *dev)
* pm_runtime_active - Check whether or not a device is runtime-active.
* @dev: Target device.
*
- * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
* %RPM_ACTIVE, or %false otherwise.
*
* Note that the return value of this function can only be trusted if it is
@@ -241,8 +264,7 @@ static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
-static inline int pm_runtime_get_if_active(struct device *dev,
- bool ign_usage_count)
+static inline int pm_runtime_get_if_active(struct device *dev)
{
return -EINVAL;
}
@@ -254,6 +276,8 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
+
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
@@ -266,7 +290,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
-static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
+static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
@@ -276,11 +300,11 @@ static inline u64 pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
-static inline void pm_runtime_clean_up_links(struct device *dev) {}
static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
-static inline void pm_runtime_drop_link(struct device *dev) {}
+static inline void pm_runtime_drop_link(struct device_link *link) {}
+static inline void pm_runtime_release_supplier(struct device_link *link) {}
#endif /* !CONFIG_PM */
@@ -382,6 +406,9 @@ static inline int pm_runtime_get(struct device *dev)
* The possible return values of this function are the same as for
* pm_runtime_resume() and the runtime PM usage counter of @dev remains
* incremented in all cases, even if it returns an error code.
+ * Consider using pm_runtime_resume_and_get() instead of it, especially
+ * if its return value is checked by the caller, as this is likely to result
+ * in cleaner code.
*/
static inline int pm_runtime_get_sync(struct device *dev)
{
@@ -389,6 +416,27 @@ static inline int pm_runtime_get_sync(struct device *dev)
}
/**
+ * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
+ * @dev: Target device.
+ *
+ * Resume @dev synchronously and if that is successful, increment its runtime
+ * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
+ * incremented or a negative error code otherwise.
+ */
+static inline int pm_runtime_resume_and_get(struct device *dev)
+{
+ int ret;
+
+ ret = __pm_runtime_resume(dev, RPM_GET_PUT);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
* pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
* @dev: Target device.
*
@@ -401,6 +449,18 @@ static inline int pm_runtime_put(struct device *dev)
}
/**
+ * __pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
+ */
+static inline int __pm_runtime_put_autosuspend(struct device *dev)
+{
+ return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
+}
+
+/**
* pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
* @dev: Target device.
*
@@ -479,7 +539,7 @@ static inline int pm_runtime_set_active(struct device *dev)
}
/**
- * pm_runtime_set_suspended - Set runtime PM status to "active".
+ * pm_runtime_set_suspended - Set runtime PM status to "suspended".
* @dev: Target device.
*
* Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
@@ -514,6 +574,10 @@ static inline void pm_runtime_disable(struct device *dev)
* Allow the runtime PM autosuspend mechanism to be used for @dev whenever
* requested (or "autosuspend" will be handled as direct runtime-suspend for
* it).
+ *
+ * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
+ * at driver exit time unless your driver initially enabled pm_runtime
+ * with devm_pm_runtime_enable() (which handles it for you).
*/
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
index cd5b62db9084..d9642c6cf852 100644
--- a/include/linux/pm_wakeirq.h
+++ b/include/linux/pm_wakeirq.h
@@ -1,15 +1,5 @@
-/*
- * pm_wakeirq.h - Device wakeirq helper functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* pm_wakeirq.h - Device wakeirq helper functions */
#ifndef _LINUX_PM_WAKEIRQ_H
#define _LINUX_PM_WAKEIRQ_H
@@ -17,11 +7,9 @@
#ifdef CONFIG_PM
extern int dev_pm_set_wake_irq(struct device *dev, int irq);
-extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
- int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
-extern void dev_pm_enable_wake_irq(struct device *dev);
-extern void dev_pm_disable_wake_irq(struct device *dev);
#else /* !CONFIG_PM */
@@ -35,15 +23,12 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
return 0;
}
-static inline void dev_pm_clear_wake_irq(struct device *dev)
-{
-}
-
-static inline void dev_pm_enable_wake_irq(struct device *dev)
+static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
{
+ return 0;
}
-static inline void dev_pm_disable_wake_irq(struct device *dev)
+static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index aa3da6611533..6eb9adaef52b 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -84,6 +84,11 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && !!dev->power.wakeup;
}
+static inline bool device_wakeup_path(struct device *dev)
+{
+ return dev->power.wakeup_path;
+}
+
static inline void device_set_wakeup_path(struct device *dev)
{
dev->power.wakeup_path = true;
@@ -104,7 +109,6 @@ extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws);
extern int device_wakeup_enable(struct device *dev);
extern int device_wakeup_disable(struct device *dev);
extern void device_set_wakeup_capable(struct device *dev, bool capable);
-extern int device_init_wakeup(struct device *dev, bool val);
extern int device_set_wakeup_enable(struct device *dev, bool enable);
extern void __pm_stay_awake(struct wakeup_source *ws);
extern void pm_stay_awake(struct device *dev);
@@ -162,16 +166,14 @@ static inline int device_set_wakeup_enable(struct device *dev, bool enable)
return 0;
}
-static inline int device_init_wakeup(struct device *dev, bool val)
+static inline bool device_may_wakeup(struct device *dev)
{
- device_set_wakeup_capable(dev, val);
- device_set_wakeup_enable(dev, val);
- return 0;
+ return dev->power.can_wakeup && dev->power.should_wakeup;
}
-static inline bool device_may_wakeup(struct device *dev)
+static inline bool device_wakeup_path(struct device *dev)
{
- return dev->power.can_wakeup && dev->power.should_wakeup;
+ return false;
}
static inline void device_set_wakeup_path(struct device *dev) {}
@@ -192,6 +194,16 @@ static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
#endif /* !CONFIG_PM_SLEEP */
+static inline bool device_awake_path(struct device *dev)
+{
+ return device_wakeup_path(dev);
+}
+
+static inline void device_set_awake_path(struct device *dev)
+{
+ device_set_wakeup_path(dev);
+}
+
static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
{
return pm_wakeup_ws_event(ws, msec, false);
@@ -207,4 +219,27 @@ static inline void pm_wakeup_hard_event(struct device *dev)
return pm_wakeup_dev_event(dev, 0, true);
}
+/**
+ * device_init_wakeup - Device wakeup initialization.
+ * @dev: Device to handle.
+ * @enable: Whether or not to enable @dev as a wakeup device.
+ *
+ * By default, most devices should leave wakeup disabled. The exceptions are
+ * devices that everyone expects to be wakeup sources: keyboards, power buttons,
+ * possibly network interfaces, etc. Also, devices that don't generate their
+ * own wakeup requests but merely forward requests from one bus to another
+ * (like PCI bridges) should have wakeup enabled by default.
+ */
+static inline int device_init_wakeup(struct device *dev, bool enable)
+{
+ if (enable) {
+ device_set_wakeup_capable(dev, true);
+ return device_wakeup_enable(dev);
+ } else {
+ device_wakeup_disable(dev);
+ device_set_wakeup_capable(dev, false);
+ return 0;
+ }
+}
+
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h
index 1ea5bae708a1..fa9f08164c36 100644
--- a/include/linux/pmbus.h
+++ b/include/linux/pmbus.h
@@ -34,6 +34,45 @@
*/
#define PMBUS_WRITE_PROTECTED BIT(1)
+/*
+ * PMBUS_NO_CAPABILITY
+ *
+ * Some PMBus chips don't respond with valid data when reading the CAPABILITY
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use CAPABILITY to determine it's behavior.
+ */
+#define PMBUS_NO_CAPABILITY BIT(2)
+
+/*
+ * PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+ *
+ * Some PMBus chips end up in an undefined state when trying to read an
+ * unsupported register. For such chips, it is necessary to reset the
+ * chip pmbus controller to a known state after a failed register check.
+ * This can be done by reading a known register. By setting this flag the
+ * driver will try to read the STATUS register after each failed
+ * register check. This read may fail, but it will put the chip in a
+ * known state.
+ */
+#define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3)
+
+/*
+ * PMBUS_NO_WRITE_PROTECT
+ *
+ * Some PMBus chips respond with invalid data when reading the WRITE_PROTECT
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use the WRITE_PROTECT command to determine its behavior.
+ */
+#define PMBUS_NO_WRITE_PROTECT BIT(4)
+
+/*
+ * PMBUS_USE_COEFFICIENTS_CMD
+ *
+ * When this flag is set the PMBus core driver will use the COEFFICIENTS
+ * register to initialize the coefficients for the direct mode format.
+ */
+#define PMBUS_USE_COEFFICIENTS_CMD BIT(5)
+
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 52453a24a24f..c677442d007c 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -13,7 +13,7 @@
#include <uapi/linux/pmu.h>
-extern int find_via_pmu(void);
+extern int __init find_via_pmu(void);
extern int pmu_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
deleted file mode 100644
index 17d7d0d20eca..000000000000
--- a/include/linux/pnfs_osd_xdr.h
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * pNFS-osd on-the-wire data structures
- *
- * Copyright (C) 2007 Panasas Inc. [year of first publication]
- * All rights reserved.
- *
- * Benny Halevy <bhalevy@panasas.com>
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * See the file COPYING included with this distribution for more details.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Panasas company nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __PNFS_OSD_XDR_H__
-#define __PNFS_OSD_XDR_H__
-
-#include <linux/nfs_fs.h>
-
-/*
- * draft-ietf-nfsv4-minorversion-22
- * draft-ietf-nfsv4-pnfs-obj-12
- */
-
-/* Layout Structure */
-
-enum pnfs_osd_raid_algorithm4 {
- PNFS_OSD_RAID_0 = 1,
- PNFS_OSD_RAID_4 = 2,
- PNFS_OSD_RAID_5 = 3,
- PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */
-};
-
-/* struct pnfs_osd_data_map4 {
- * uint32_t odm_num_comps;
- * length4 odm_stripe_unit;
- * uint32_t odm_group_width;
- * uint32_t odm_group_depth;
- * uint32_t odm_mirror_cnt;
- * pnfs_osd_raid_algorithm4 odm_raid_algorithm;
- * };
- */
-struct pnfs_osd_data_map {
- u32 odm_num_comps;
- u64 odm_stripe_unit;
- u32 odm_group_width;
- u32 odm_group_depth;
- u32 odm_mirror_cnt;
- u32 odm_raid_algorithm;
-};
-
-/* struct pnfs_osd_objid4 {
- * deviceid4 oid_device_id;
- * uint64_t oid_partition_id;
- * uint64_t oid_object_id;
- * };
- */
-struct pnfs_osd_objid {
- struct nfs4_deviceid oid_device_id;
- u64 oid_partition_id;
- u64 oid_object_id;
-};
-
-/* For printout. I use:
- * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer));
- * BE style
- */
-#define _DEVID_LO(oid_device_id) \
- (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data)
-
-#define _DEVID_HI(oid_device_id) \
- (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1)
-
-enum pnfs_osd_version {
- PNFS_OSD_MISSING = 0,
- PNFS_OSD_VERSION_1 = 1,
- PNFS_OSD_VERSION_2 = 2
-};
-
-struct pnfs_osd_opaque_cred {
- u32 cred_len;
- void *cred;
-};
-
-enum pnfs_osd_cap_key_sec {
- PNFS_OSD_CAP_KEY_SEC_NONE = 0,
- PNFS_OSD_CAP_KEY_SEC_SSV = 1,
-};
-
-/* struct pnfs_osd_object_cred4 {
- * pnfs_osd_objid4 oc_object_id;
- * pnfs_osd_version4 oc_osd_version;
- * pnfs_osd_cap_key_sec4 oc_cap_key_sec;
- * opaque oc_capability_key<>;
- * opaque oc_capability<>;
- * };
- */
-struct pnfs_osd_object_cred {
- struct pnfs_osd_objid oc_object_id;
- u32 oc_osd_version;
- u32 oc_cap_key_sec;
- struct pnfs_osd_opaque_cred oc_cap_key;
- struct pnfs_osd_opaque_cred oc_cap;
-};
-
-/* struct pnfs_osd_layout4 {
- * pnfs_osd_data_map4 olo_map;
- * uint32_t olo_comps_index;
- * pnfs_osd_object_cred4 olo_components<>;
- * };
- */
-struct pnfs_osd_layout {
- struct pnfs_osd_data_map olo_map;
- u32 olo_comps_index;
- u32 olo_num_comps;
- struct pnfs_osd_object_cred *olo_comps;
-};
-
-/* Device Address */
-enum pnfs_osd_targetid_type {
- OBJ_TARGET_ANON = 1,
- OBJ_TARGET_SCSI_NAME = 2,
- OBJ_TARGET_SCSI_DEVICE_ID = 3,
-};
-
-/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) {
- * case OBJ_TARGET_SCSI_NAME:
- * string oti_scsi_name<>;
- *
- * case OBJ_TARGET_SCSI_DEVICE_ID:
- * opaque oti_scsi_device_id<>;
- *
- * default:
- * void;
- * };
- *
- * union pnfs_osd_targetaddr4 switch (bool ota_available) {
- * case TRUE:
- * netaddr4 ota_netaddr;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_deviceaddr4 {
- * pnfs_osd_targetid4 oda_targetid;
- * pnfs_osd_targetaddr4 oda_targetaddr;
- * uint64_t oda_lun;
- * opaque oda_systemid<>;
- * pnfs_osd_object_cred4 oda_root_obj_cred;
- * opaque oda_osdname<>;
- * };
- */
-struct pnfs_osd_targetid {
- u32 oti_type;
- struct nfs4_string oti_scsi_device_id;
-};
-
-/* struct netaddr4 {
- * // see struct rpcb in RFC1833
- * string r_netid<>; // network id
- * string r_addr<>; // universal address
- * };
- */
-struct pnfs_osd_net_addr {
- struct nfs4_string r_netid;
- struct nfs4_string r_addr;
-};
-
-struct pnfs_osd_targetaddr {
- u32 ota_available;
- struct pnfs_osd_net_addr ota_netaddr;
-};
-
-struct pnfs_osd_deviceaddr {
- struct pnfs_osd_targetid oda_targetid;
- struct pnfs_osd_targetaddr oda_targetaddr;
- u8 oda_lun[8];
- struct nfs4_string oda_systemid;
- struct pnfs_osd_object_cred oda_root_obj_cred;
- struct nfs4_string oda_osdname;
-};
-
-/* LAYOUTCOMMIT: layoutupdate */
-
-/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) {
- * case TRUE:
- * int64_t dsu_delta;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_layoutupdate4 {
- * pnfs_osd_deltaspaceused4 olu_delta_space_used;
- * bool olu_ioerr_flag;
- * };
- */
-struct pnfs_osd_layoutupdate {
- u32 dsu_valid;
- s64 dsu_delta;
- u32 olu_ioerr_flag;
-};
-
-/* LAYOUTRETURN: I/O Rrror Report */
-
-enum pnfs_osd_errno {
- PNFS_OSD_ERR_EIO = 1,
- PNFS_OSD_ERR_NOT_FOUND = 2,
- PNFS_OSD_ERR_NO_SPACE = 3,
- PNFS_OSD_ERR_BAD_CRED = 4,
- PNFS_OSD_ERR_NO_ACCESS = 5,
- PNFS_OSD_ERR_UNREACHABLE = 6,
- PNFS_OSD_ERR_RESOURCE = 7
-};
-
-/* struct pnfs_osd_ioerr4 {
- * pnfs_osd_objid4 oer_component;
- * length4 oer_comp_offset;
- * length4 oer_comp_length;
- * bool oer_iswrite;
- * pnfs_osd_errno4 oer_errno;
- * };
- */
-struct pnfs_osd_ioerr {
- struct pnfs_osd_objid oer_component;
- u64 oer_comp_offset;
- u64 oer_comp_length;
- u32 oer_iswrite;
- u32 oer_errno;
-};
-
-/* OSD XDR Client API */
-/* Layout helpers */
-/* Layout decoding is done in two parts:
- * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part
- * of the layout. @iter members need not be initialized.
- * Returned:
- * @layout members are set. (@layout->olo_comps set to NULL).
- *
- * Zero on success, or negative error if passed xdr is broken.
- *
- * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns
- * false, to decode the next component.
- * Returned:
- * true if there is more to decode or false if we are done or error.
- *
- * Example:
- * struct pnfs_osd_xdr_decode_layout_iter iter;
- * struct pnfs_osd_layout layout;
- * struct pnfs_osd_object_cred comp;
- * int status;
- *
- * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
- * if (unlikely(status))
- * goto err;
- * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) {
- * // All of @comp strings point to inside the xdr_buffer
- * // or scrach buffer. Copy them out to user memory eg.
- * copy_single_comp(dest_comp++, &comp);
- * }
- * if (unlikely(status))
- * goto err;
- */
-
-struct pnfs_osd_xdr_decode_layout_iter {
- unsigned total_comps;
- unsigned decoded_comps;
-};
-
-extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr);
-
-extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
- int *err);
-
-/* Device Info helpers */
-
-/* Note: All strings inside @deviceaddr point to space inside @p.
- * @p should stay valid while @deviceaddr is in use.
- */
-extern void pnfs_osd_xdr_decode_deviceaddr(
- struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p);
-
-/* layoutupdate (layout_commit) xdr helpers */
-extern int
-pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
- struct pnfs_osd_layoutupdate *lou);
-
-/* osd_ioerror encoding (layout_return) */
-extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr);
-extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr);
-
-#endif /* __PNFS_OSD_XDR_H__ */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index c2a7cfbca713..ddbe7c3ca4ce 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -291,7 +291,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
struct pnp_fixup {
char id[7];
- void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
+ void (*quirk_function) (struct pnp_dev *dev); /* fixup function */
};
/* config parameters */
@@ -419,8 +419,8 @@ struct pnp_protocol {
/* protocol specific suspend/resume */
bool (*can_wakeup) (struct pnp_dev *dev);
- int (*suspend) (struct pnp_dev * dev, pm_message_t state);
- int (*resume) (struct pnp_dev * dev);
+ int (*suspend) (struct pnp_dev *dev, pm_message_t state);
+ int (*resume) (struct pnp_dev *dev);
/* used by pnp layer only (look but don't touch) */
unsigned char number; /* protocol number */
@@ -435,7 +435,7 @@ struct pnp_protocol {
#define protocol_for_each_dev(protocol, dev) \
list_for_each_entry(dev, &(protocol)->devices, protocol_list)
-extern struct bus_type pnp_bus_type;
+extern const struct bus_type pnp_bus_type;
#if defined(CONFIG_PNP)
@@ -492,7 +492,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
-static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;}
+static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0; }
/* protocol helpers */
static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
diff --git a/include/linux/poison.h b/include/linux/poison.h
index dc8ae5d8db03..1f0ee2459f2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -27,11 +27,7 @@
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
/********** mm/page_poison.c **********/
-#ifdef CONFIG_PAGE_POISONING_ZERO
-#define PAGE_POISON 0x00
-#else
#define PAGE_POISON 0xaa
-#endif
/********** mm/page_alloc.c ************/
@@ -82,4 +78,21 @@
/********** security/ **********/
#define KEY_DESTROY 0xbd
+/********** net/core/page_pool.c **********/
+#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
+
+/********** net/core/skbuff.c **********/
+#define SKB_LIST_POISON_NEXT ((void *)(0x800 + POISON_POINTER_DELTA))
+/********** net/ **********/
+#define NET_PTR_POISON ((void *)(0x801 + POISON_POINTER_DELTA))
+
+/********** kernel/bpf/ **********/
+#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
+
+/********** VFS **********/
+#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
+
+/********** lib/stackdepot.c **********/
+#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 1cdc32b1f1b0..d1ea4f3714a8 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -8,19 +8,13 @@
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/fs.h>
-#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <uapi/linux/poll.h>
#include <uapi/linux/eventpoll.h>
-extern struct ctl_table epoll_table[]; /* for sysctl */
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
additional memory. */
-#ifdef __clang__
-#define MAX_STACK_ALLOC 768
-#else
#define MAX_STACK_ALLOC 832
-#endif
#define FRONTEND_STACK_ALLOC 256
#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
diff --git a/include/linux/polynomial.h b/include/linux/polynomial.h
new file mode 100644
index 000000000000..9e074a0bb6fa
--- /dev/null
+++ b/include/linux/polynomial.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ */
+
+#ifndef _POLYNOMIAL_H
+#define _POLYNOMIAL_H
+
+/*
+ * struct polynomial_term - one term descriptor of a polynomial
+ * @deg: degree of the term.
+ * @coef: multiplication factor of the term.
+ * @divider: distributed divider per each degree.
+ * @divider_leftover: divider leftover, which couldn't be redistributed.
+ */
+struct polynomial_term {
+ unsigned int deg;
+ long coef;
+ long divider;
+ long divider_leftover;
+};
+
+/*
+ * struct polynomial - a polynomial descriptor
+ * @total_divider: total data divider.
+ * @terms: polynomial terms, last term must have degree of 0
+ */
+struct polynomial {
+ long total_divider;
+ struct polynomial_term terms[];
+};
+
+long polynomial_calc(const struct polynomial *poly, long data);
+
+#endif
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 468328b1e1dd..ef8619f48920 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -14,6 +14,7 @@
#include <linux/rwsem.h>
struct posix_clock;
+struct posix_clock_context;
/**
* struct posix_clock_operations - functional interface to the clock
@@ -50,18 +51,18 @@ struct posix_clock_operations {
/*
* Optional character device methods:
*/
- long (*ioctl) (struct posix_clock *pc,
- unsigned int cmd, unsigned long arg);
+ long (*ioctl)(struct posix_clock_context *pccontext, unsigned int cmd,
+ unsigned long arg);
- int (*open) (struct posix_clock *pc, fmode_t f_mode);
+ int (*open)(struct posix_clock_context *pccontext, fmode_t f_mode);
- __poll_t (*poll) (struct posix_clock *pc,
- struct file *file, poll_table *wait);
+ __poll_t (*poll)(struct posix_clock_context *pccontext, struct file *file,
+ poll_table *wait);
- int (*release) (struct posix_clock *pc);
+ int (*release)(struct posix_clock_context *pccontext);
- ssize_t (*read) (struct posix_clock *pc,
- uint flags, char __user *buf, size_t cnt);
+ ssize_t (*read)(struct posix_clock_context *pccontext, uint flags,
+ char __user *buf, size_t cnt);
};
/**
@@ -91,6 +92,24 @@ struct posix_clock {
};
/**
+ * struct posix_clock_context - represents clock file operations context
+ *
+ * @clk: Pointer to the clock
+ * @private_clkdata: Pointer to user data
+ *
+ * Drivers should use struct posix_clock_context during specific character
+ * device file operation methods to access the posix clock.
+ *
+ * Drivers can store a private data structure during the open operation
+ * if they have specific information that is required in other file
+ * operations.
+ */
+struct posix_clock_context {
+ struct posix_clock *clk;
+ void *private_clkdata;
+};
+
+/**
* posix_clock_register() - register a new clock
* @clk: Pointer to the clock. Caller must provide 'ops' field
* @dev: Pointer to the initialized device. Caller must provide
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 896c16d2c5fb..dc7b738de299 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -2,40 +2,16 @@
#ifndef _linux_POSIX_TIMERS_H
#define _linux_POSIX_TIMERS_H
-#include <linux/spinlock.h>
-#include <linux/list.h>
#include <linux/alarmtimer.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/posix-timers_types.h>
+#include <linux/spinlock.h>
#include <linux/timerqueue.h>
-#include <linux/task_work.h>
struct kernel_siginfo;
struct task_struct;
-/*
- * Bit fields within a clockid:
- *
- * The most significant 29 bits hold either a pid or a file descriptor.
- *
- * Bit 2 indicates whether a cpu clock refers to a thread or a process.
- *
- * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
- *
- * A clockid is invalid if bits 2, 1, and 0 are all set.
- */
-#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
-#define CPUCLOCK_PERTHREAD(clock) \
- (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
-
-#define CPUCLOCK_PERTHREAD_MASK 4
-#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
-#define CPUCLOCK_CLOCK_MASK 3
-#define CPUCLOCK_PROF 0
-#define CPUCLOCK_VIRT 1
-#define CPUCLOCK_SCHED 2
-#define CPUCLOCK_MAX 3
-#define CLOCKFD CPUCLOCK_MAX
-#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
-
static inline clockid_t make_process_cpuclock(const unsigned int pid,
const clockid_t clock)
{
@@ -63,16 +39,18 @@ static inline int clockid_to_fd(const clockid_t clk)
* cpu_timer - Posix CPU timer representation for k_itimer
* @node: timerqueue node to queue in the task/sig
* @head: timerqueue head on which this timer is queued
- * @task: Pointer to target task
+ * @pid: Pointer to target task PID
* @elist: List head for the expiry list
* @firing: Timer is currently firing
+ * @handling: Pointer to the task which handles expiry
*/
struct cpu_timer {
- struct timerqueue_node node;
- struct timerqueue_head *head;
- struct pid *pid;
- struct list_head elist;
- int firing;
+ struct timerqueue_node node;
+ struct timerqueue_head *head;
+ struct pid *pid;
+ struct list_head elist;
+ int firing;
+ struct task_struct __rcu *handling;
};
static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
@@ -82,12 +60,19 @@ static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
return timerqueue_add(head, &ctmr->node);
}
-static inline void cpu_timer_dequeue(struct cpu_timer *ctmr)
+static inline bool cpu_timer_queued(struct cpu_timer *ctmr)
{
- if (ctmr->head) {
+ return !!ctmr->head;
+}
+
+static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr)
+{
+ if (cpu_timer_queued(ctmr)) {
timerqueue_del(ctmr->head, &ctmr->node);
ctmr->head = NULL;
+ return true;
}
+ return false;
}
static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr)
@@ -100,42 +85,6 @@ static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp)
ctmr->node.expires = exp;
}
-/**
- * posix_cputimer_base - Container per posix CPU clock
- * @nextevt: Earliest-expiration cache
- * @tqhead: timerqueue head for cpu_timers
- */
-struct posix_cputimer_base {
- u64 nextevt;
- struct timerqueue_head tqhead;
-};
-
-/**
- * posix_cputimers - Container for posix CPU timer related data
- * @bases: Base container for posix CPU clocks
- * @timers_active: Timers are queued.
- * @expiry_active: Timer expiry is active. Used for
- * process wide timers to avoid multiple
- * task trying to handle expiry concurrently
- *
- * Used in task_struct and signal_struct
- */
-struct posix_cputimers {
- struct posix_cputimer_base bases[CPUCLOCK_MAX];
- unsigned int timers_active;
- unsigned int expiry_active;
-};
-
-/**
- * posix_cputimers_work - Container for task work based posix CPU timer expiry
- * @work: The task work to be scheduled
- * @scheduled: @work has been scheduled already, no further processing
- */
-struct posix_cputimers_work {
- struct callback_head work;
- unsigned int scheduled;
-};
-
static inline void posix_cputimers_init(struct posix_cputimers *pct)
{
memset(pct, 0, sizeof(*pct));
@@ -168,7 +117,6 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
.bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \
},
#else
-struct posix_cputimers { };
struct cpu_timer { };
#define INIT_CPU_TIMERS(s)
static inline void posix_cputimers_init(struct posix_cputimers *pct) { }
@@ -177,8 +125,10 @@ static inline void posix_cputimers_group_init(struct posix_cputimers *pct,
#endif
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+void clear_posix_cputimers_work(struct task_struct *p);
void posix_cputimers_init_work(void);
#else
+static inline void clear_posix_cputimers_work(struct task_struct *p) { }
static inline void posix_cputimers_init_work(void) { }
#endif
@@ -244,7 +194,7 @@ void posix_cpu_timers_exit_group(struct task_struct *task);
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
u64 *newval, u64 *oldval);
-void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
+int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
void posixtimer_rearm(struct kernel_siginfo *info);
#endif
diff --git a/include/linux/posix-timers_types.h b/include/linux/posix-timers_types.h
new file mode 100644
index 000000000000..a4712c1008c9
--- /dev/null
+++ b/include/linux/posix-timers_types.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _linux_POSIX_TIMERS_TYPES_H
+#define _linux_POSIX_TIMERS_TYPES_H
+
+#include <linux/mutex_types.h>
+#include <linux/timerqueue_types.h>
+#include <linux/types.h>
+
+/*
+ * Bit fields within a clockid:
+ *
+ * The most significant 29 bits hold either a pid or a file descriptor.
+ *
+ * Bit 2 indicates whether a cpu clock refers to a thread or a process.
+ *
+ * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
+ *
+ * A clockid is invalid if bits 2, 1, and 0 are all set.
+ */
+#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
+#define CPUCLOCK_PERTHREAD(clock) \
+ (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
+
+#define CPUCLOCK_PERTHREAD_MASK 4
+#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
+#define CPUCLOCK_CLOCK_MASK 3
+#define CPUCLOCK_PROF 0
+#define CPUCLOCK_VIRT 1
+#define CPUCLOCK_SCHED 2
+#define CPUCLOCK_MAX 3
+#define CLOCKFD CPUCLOCK_MAX
+#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
+
+#ifdef CONFIG_POSIX_TIMERS
+
+/**
+ * posix_cputimer_base - Container per posix CPU clock
+ * @nextevt: Earliest-expiration cache
+ * @tqhead: timerqueue head for cpu_timers
+ */
+struct posix_cputimer_base {
+ u64 nextevt;
+ struct timerqueue_head tqhead;
+};
+
+/**
+ * posix_cputimers - Container for posix CPU timer related data
+ * @bases: Base container for posix CPU clocks
+ * @timers_active: Timers are queued.
+ * @expiry_active: Timer expiry is active. Used for
+ * process wide timers to avoid multiple
+ * task trying to handle expiry concurrently
+ *
+ * Used in task_struct and signal_struct
+ */
+struct posix_cputimers {
+ struct posix_cputimer_base bases[CPUCLOCK_MAX];
+ unsigned int timers_active;
+ unsigned int expiry_active;
+};
+
+/**
+ * posix_cputimers_work - Container for task work based posix CPU timer expiry
+ * @work: The task work to be scheduled
+ * @mutex: Mutex held around expiry in context of this task work
+ * @scheduled: @work has been scheduled already, no further processing
+ */
+struct posix_cputimers_work {
+ struct callback_head work;
+ struct mutex mutex;
+ unsigned int scheduled;
+};
+
+#else /* CONFIG_POSIX_TIMERS */
+
+struct posix_cputimers { };
+
+#endif /* CONFIG_POSIX_TIMERS */
+
+#endif /* _linux_POSIX_TIMERS_TYPES_H */
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 90797f1b421d..0e65b3d634d9 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -15,6 +15,8 @@
#include <linux/refcount.h>
#include <uapi/linux/posix_acl.h>
+struct user_namespace;
+
struct posix_acl_entry {
short e_tag;
unsigned short e_perm;
@@ -61,38 +63,54 @@ posix_acl_release(struct posix_acl *acl)
extern void posix_acl_init(struct posix_acl *, int);
extern struct posix_acl *posix_acl_alloc(int, gfp_t);
-extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
-extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int);
-extern int set_posix_acl(struct inode *, int, struct posix_acl *);
+int set_posix_acl(struct mnt_idmap *, struct dentry *, int,
+ struct posix_acl *);
+
+struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
+struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags);
#ifdef CONFIG_FS_POSIX_ACL
-extern int posix_acl_chmod(struct inode *, umode_t);
+int posix_acl_chmod(struct mnt_idmap *, struct dentry *, umode_t);
extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
struct posix_acl **);
-extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
+int posix_acl_update_mode(struct mnt_idmap *, struct inode *, umode_t *,
+ struct posix_acl **);
-extern int simple_set_acl(struct inode *, struct posix_acl *, int);
+int simple_set_acl(struct mnt_idmap *, struct dentry *,
+ struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
struct posix_acl *get_cached_acl(struct inode *inode, int type);
-struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
void forget_cached_acl(struct inode *inode, int type);
void forget_all_cached_acls(struct inode *inode);
+int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
+int posix_acl_permission(struct mnt_idmap *, struct inode *,
+ const struct posix_acl *, int);
static inline void cache_no_acl(struct inode *inode)
{
inode->i_acl = NULL;
inode->i_default_acl = NULL;
}
+
+int vfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name, struct posix_acl *kacl);
+struct posix_acl *vfs_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name);
+int vfs_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name);
+int posix_acl_listxattr(struct inode *inode, char **buffer,
+ ssize_t *remaining_size);
#else
-static inline int posix_acl_chmod(struct inode *inode, umode_t mode)
+static inline int posix_acl_chmod(struct mnt_idmap *idmap,
+ struct dentry *dentry, umode_t mode)
{
return 0;
}
@@ -117,8 +135,33 @@ static inline int posix_acl_create(struct inode *inode, umode_t *mode,
static inline void forget_all_cached_acls(struct inode *inode)
{
}
+
+static inline int vfs_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
+ struct posix_acl *acl)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct posix_acl *vfs_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int vfs_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+{
+ return -EOPNOTSUPP;
+}
+static inline int posix_acl_listxattr(struct inode *inode, char **buffer,
+ ssize_t *remaining_size)
+{
+ return 0;
+}
#endif /* CONFIG_FS_POSIX_ACL */
-struct posix_acl *get_acl(struct inode *inode, int type);
+struct posix_acl *get_inode_acl(struct inode *inode, int type);
#endif /* __LINUX_POSIX_ACL_H */
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index 2387709991b5..e86f3b731da2 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -33,23 +33,43 @@ posix_acl_xattr_count(size_t size)
}
#ifdef CONFIG_FS_POSIX_ACL
-void posix_acl_fix_xattr_from_user(void *value, size_t size);
-void posix_acl_fix_xattr_to_user(void *value, size_t size);
+struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
+ const void *value, size_t size);
#else
-static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
-{
-}
-static inline void posix_acl_fix_xattr_to_user(void *value, size_t size)
+static inline struct posix_acl *
+posix_acl_from_xattr(struct user_namespace *user_ns, const void *value,
+ size_t size)
{
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif
-struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
- const void *value, size_t size);
int posix_acl_to_xattr(struct user_namespace *user_ns,
const struct posix_acl *acl, void *buffer, size_t size);
+static inline const char *posix_acl_xattr_name(int type)
+{
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ return XATTR_NAME_POSIX_ACL_ACCESS;
+ case ACL_TYPE_DEFAULT:
+ return XATTR_NAME_POSIX_ACL_DEFAULT;
+ }
+
+ return "";
+}
+
+static inline int posix_acl_type(const char *name)
+{
+ if (strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) == 0)
+ return ACL_TYPE_ACCESS;
+ else if (strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)
+ return ACL_TYPE_DEFAULT;
+
+ return -1;
+}
-extern const struct xattr_handler posix_acl_access_xattr_handler;
-extern const struct xattr_handler posix_acl_default_xattr_handler;
+/* These are legacy handlers. Don't use them for new code. */
+extern const struct xattr_handler nop_posix_acl_access;
+extern const struct xattr_handler nop_posix_acl_default;
#endif /* _POSIX_ACL_XATTR_H */
diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h
deleted file mode 100644
index 51976b52f373..000000000000
--- a/include/linux/power/ab8500.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson 2013
- * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
- */
-
-#ifndef PWR_AB8500_H
-#define PWR_AB8500_H
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[];
-extern const int ab8500_temp_tbl_a_size;
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[];
-extern const int ab8500_temp_tbl_b_size;
-
-#endif /* PWR_AB8500_H */
diff --git a/include/linux/power/bq25890_charger.h b/include/linux/power/bq25890_charger.h
new file mode 100644
index 000000000000..c706ddb77a08
--- /dev/null
+++ b/include/linux/power/bq25890_charger.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Platform data for the TI bq25890 battery charger driver.
+ */
+
+#ifndef _BQ25890_CHARGER_H_
+#define _BQ25890_CHARGER_H_
+
+struct regulator_init_data;
+
+struct bq25890_platform_data {
+ const struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 987d9652aa4e..b9e5bd2b42d3 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -2,6 +2,8 @@
#ifndef __LINUX_BQ27X00_BATTERY_H__
#define __LINUX_BQ27X00_BATTERY_H__
+#include <linux/power_supply.h>
+
enum bq27xxx_chip {
BQ27000 = 1, /* bq27000, bq27200 */
BQ27010, /* bq27010, bq27210 */
@@ -32,6 +34,8 @@ enum bq27xxx_chip {
BQ27621,
BQ27Z561,
BQ28Z610,
+ BQ34Z100,
+ BQ78Z100,
};
struct bq27xxx_device_info;
@@ -52,13 +56,11 @@ struct bq27xxx_reg_cache {
int capacity;
int energy;
int flags;
- int power_avg;
int health;
};
struct bq27xxx_device_info {
struct device *dev;
- int id;
enum bq27xxx_chip chip;
u32 opts;
const char *name;
@@ -67,7 +69,9 @@ struct bq27xxx_device_info {
struct bq27xxx_access_methods bus;
struct bq27xxx_reg_cache cache;
int charge_design_full;
+ bool removed;
unsigned long last_update;
+ union power_supply_propval last_status;
struct delayed_work work;
struct power_supply *bat;
struct list_head list;
@@ -78,5 +82,6 @@ struct bq27xxx_device_info {
void bq27xxx_battery_update(struct bq27xxx_device_info *di);
int bq27xxx_battery_setup(struct bq27xxx_device_info *di);
void bq27xxx_battery_teardown(struct bq27xxx_device_info *di);
+extern const struct dev_pm_ops bq27xxx_battery_battery_pm_ops;
#endif
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index ae94dcebd936..45e228b353ea 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -31,22 +31,16 @@ enum polling_modes {
CM_POLL_CHARGING_ONLY,
};
-enum cm_event_types {
- CM_EVENT_UNKNOWN = 0,
- CM_EVENT_BATT_FULL,
- CM_EVENT_BATT_IN,
- CM_EVENT_BATT_OUT,
- CM_EVENT_BATT_OVERHEAT,
- CM_EVENT_BATT_COLD,
- CM_EVENT_EXT_PWR_IN_OUT,
- CM_EVENT_CHG_START_STOP,
- CM_EVENT_OTHERS,
+enum cm_batt_temp {
+ CM_BATT_OK = 0,
+ CM_BATT_OVERHEAT,
+ CM_BATT_COLD,
};
/**
* struct charger_cable
* @extcon_name: the name of extcon device.
- * @name: the name of charger cable(external connector).
+ * @name: the name of the cable connector
* @extcon_dev: the extcon device.
* @wq: the workqueue to control charger according to the state of
* charger cable. If charger cable is attached, enable charger.
@@ -62,9 +56,10 @@ enum cm_event_types {
struct charger_cable {
const char *extcon_name;
const char *name;
+ struct extcon_dev *extcon_dev;
+ u64 extcon_type;
/* The charger-manager use Extcon framework */
- struct extcon_specific_cable_nb extcon_dev;
struct work_struct wq;
struct notifier_block nb;
@@ -131,11 +126,10 @@ struct charger_regulator {
* @psy_name: the name of power-supply-class for charger manager
* @polling_mode:
* Determine which polling mode will be used
- * @fullbatt_vchkdrop_ms:
* @fullbatt_vchkdrop_uV:
* Check voltage drop after the battery is fully charged.
- * If it has dropped more than fullbatt_vchkdrop_uV after
- * fullbatt_vchkdrop_ms, CM will restart charging.
+ * If it has dropped more than fullbatt_vchkdrop_uV
+ * CM will restart charging.
* @fullbatt_uV: voltage in microvolt
* If VBATT >= fullbatt_uV, it is assumed to be full.
* @fullbatt_soc: state of Charge in %
@@ -172,7 +166,6 @@ struct charger_desc {
enum polling_modes polling_mode;
unsigned int polling_interval_ms;
- unsigned int fullbatt_vchkdrop_ms;
unsigned int fullbatt_vchkdrop_uV;
unsigned int fullbatt_uV;
unsigned int fullbatt_soc;
@@ -211,9 +204,6 @@ struct charger_desc {
* @charger_stat: array of power_supply for chargers
* @tzd_batt : thermal zone device for battery
* @charger_enabled: the state of charger
- * @fullbatt_vchk_jiffies_at:
- * jiffies at the time full battery check will occur.
- * @fullbatt_vchk_work: work queue for full battery check
* @emergency_stop:
* When setting true, stop charging
* @psy_name_buf: the name of power-supply-class for charger manager
@@ -224,6 +214,7 @@ struct charger_desc {
* saved status of battery before entering suspend-to-RAM
* @charging_start_time: saved start time of enabling charging
* @charging_end_time: saved end time of disabling charging
+ * @battery_status: Current battery status
*/
struct charger_manager {
struct list_head entry;
@@ -235,9 +226,6 @@ struct charger_manager {
#endif
bool charger_enabled;
- unsigned long fullbatt_vchk_jiffies_at;
- struct delayed_work fullbatt_vchk_work;
-
int emergency_stop;
char psy_name_buf[PSY_NAME_MAX + 1];
@@ -246,13 +234,8 @@ struct charger_manager {
u64 charging_start_time;
u64 charging_end_time;
+
+ int battery_status;
};
-#if IS_ENABLED(CONFIG_CHARGER_MANAGER)
-extern void cm_notify_event(struct power_supply *psy,
- enum cm_event_types type, char *msg);
-#else
-static inline void cm_notify_event(struct power_supply *psy,
- enum cm_event_types type, char *msg) { }
-#endif
#endif /* _CHARGER_MANAGER_H */
diff --git a/include/linux/power/generic-adc-battery.h b/include/linux/power/generic-adc-battery.h
deleted file mode 100644
index 40f9c7628f7b..000000000000
--- a/include/linux/power/generic-adc-battery.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012, Anish Kumar <anish198519851985@gmail.com>
- */
-
-#ifndef GENERIC_ADC_BATTERY_H
-#define GENERIC_ADC_BATTERY_H
-
-/**
- * struct gab_platform_data - platform_data for generic adc iio battery driver.
- * @battery_info: recommended structure to specify static power supply
- * parameters
- * @cal_charge: calculate charge level.
- * @gpio_charge_finished: gpio for the charger.
- * @gpio_inverted: Should be 1 if the GPIO is active low otherwise 0
- * @jitter_delay: delay required after the interrupt to check battery
- * status.Default set is 10ms.
- */
-struct gab_platform_data {
- struct power_supply_info battery_info;
- int (*cal_charge)(long value);
- int gpio_charge_finished;
- bool gpio_inverted;
- int jitter_delay;
-};
-
-#endif /* GENERIC_ADC_BATTERY_H */
diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h
index 5a5a8de98181..c0b7657ac1df 100644
--- a/include/linux/power/gpio-charger.h
+++ b/include/linux/power/gpio-charger.h
@@ -13,18 +13,12 @@
* struct gpio_charger_platform_data - platform_data for gpio_charger devices
* @name: Name for the chargers power_supply device
* @type: Type of the charger
- * @gpio: GPIO which is used to indicate the chargers status
- * @gpio_active_low: Should be set to 1 if the GPIO is active low otherwise 0
* @supplied_to: Array of battery names to which this chargers supplies power
* @num_supplicants: Number of entries in the supplied_to array
*/
struct gpio_charger_platform_data {
const char *name;
enum power_supply_type type;
-
- int gpio;
- int gpio_active_low;
-
char **supplied_to;
size_t num_supplicants;
};
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index d55c746ac56e..c417abd2ab70 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -69,7 +69,7 @@ enum max17042_register {
MAX17042_RelaxCFG = 0x2A,
MAX17042_MiscCFG = 0x2B,
MAX17042_TGAIN = 0x2C,
- MAx17042_TOFF = 0x2D,
+ MAX17042_TOFF = 0x2D,
MAX17042_CGAIN = 0x2E,
MAX17042_COFF = 0x2F,
@@ -78,7 +78,7 @@ enum max17042_register {
MAX17042_T_empty = 0x34,
MAX17042_FullCAP0 = 0x35,
- MAX17042_LAvg_empty = 0x36,
+ MAX17042_IAvg_empty = 0x36,
MAX17042_FCTC = 0x37,
MAX17042_RCOMP0 = 0x38,
MAX17042_TempCo = 0x39,
@@ -110,13 +110,14 @@ enum max17042_register {
MAX17042_VFSOC = 0xFF,
};
+/* Registers specific to max17055 only */
enum max17055_register {
MAX17055_QRes = 0x0C,
+ MAX17055_RCell = 0x14,
MAX17055_TTF = 0x20,
- MAX17055_V_empty = 0x3A,
- MAX17055_TIMER = 0x3E,
+ MAX17055_DieTemp = 0x34,
MAX17055_USER_MEM = 0x40,
- MAX17055_RGAIN = 0x42,
+ MAX17055_RGAIN = 0x43,
MAX17055_ConvgCfg = 0x49,
MAX17055_VFRemCap = 0x4A,
@@ -155,13 +156,14 @@ enum max17055_register {
MAX17055_AtAvCap = 0xDF,
};
-/* Registers specific to max17047/50 */
+/* Registers specific to max17047/50/55 */
enum max17047_register {
MAX17047_QRTbl00 = 0x12,
MAX17047_FullSOCThr = 0x13,
MAX17047_QRTbl10 = 0x22,
MAX17047_QRTbl20 = 0x32,
MAX17047_V_empty = 0x3A,
+ MAX17047_TIMER = 0x3E,
MAX17047_QRTbl30 = 0x42,
};
@@ -219,7 +221,7 @@ struct max17042_config_data {
u16 fullcap; /* 0x10 */
u16 fullcapnom; /* 0x23 */
u16 socempty; /* 0x33 */
- u16 lavg_empty; /* 0x36 */
+ u16 iavg_empty; /* 0x36 */
u16 dqacc; /* 0x45 */
u16 dpacc; /* 0x46 */
u16 qrtbl00; /* 0x12 */
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
deleted file mode 100644
index 02f94a1b323b..000000000000
--- a/include/linux/power/max8903_charger.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- */
-
-#ifndef __MAX8903_CHARGER_H__
-#define __MAX8903_CHARGER_H__
-
-struct max8903_pdata {
- /*
- * GPIOs
- * cen, chg, flt, dcm and usus are optional.
- * dok and uok are not optional depending on the status of
- * dc_valid and usb_valid.
- */
- int cen; /* Charger Enable input */
- int dok; /* DC(Adapter) Power OK output */
- int uok; /* USB Power OK output */
- int chg; /* Charger status output */
- int flt; /* Fault output */
- int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
- int usus; /* USB Suspend Input (1: suspended) */
-
- /*
- * DC(Adapter/TA) is wired
- * When dc_valid is true,
- * dok should be valid.
- *
- * At least one of dc_valid or usb_valid should be true.
- */
- bool dc_valid;
- /*
- * USB is wired
- * When usb_valid is true,
- * uok should be valid.
- */
- bool usb_valid;
-};
-
-#endif /* __MAX8903_CHARGER_H__ */
diff --git a/include/linux/power/power_on_reason.h b/include/linux/power/power_on_reason.h
new file mode 100644
index 000000000000..95a1ec0c403c
--- /dev/null
+++ b/include/linux/power/power_on_reason.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Author: Kamel Bouhra <kamel.bouhara@bootlin.com>
+ */
+
+#ifndef POWER_ON_REASON_H
+#define POWER_ON_REASON_H
+
+#define POWER_ON_REASON_REGULAR "regular power-up"
+#define POWER_ON_REASON_RTC "RTC wakeup"
+#define POWER_ON_REASON_WATCHDOG "watchdog timeout"
+#define POWER_ON_REASON_SOFTWARE "software reset"
+#define POWER_ON_REASON_RST_BTN "reset button action"
+#define POWER_ON_REASON_CPU_CLK_FAIL "CPU clock failure"
+#define POWER_ON_REASON_XTAL_FAIL "crystal oscillator failure"
+#define POWER_ON_REASON_BROWN_OUT "brown-out reset"
+#define POWER_ON_REASON_UNKNOWN "unknown reason"
+
+#endif /* POWER_ON_REASON_H */
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index 971c9264179e..3a2c79dfc1ff 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -155,6 +155,7 @@ struct omap_sr {
struct voltagedomain *voltdm;
struct dentry *dbg_dir;
unsigned int irq;
+ struct clk *fck;
int srid;
int ip_type;
int nvalue_count;
@@ -169,6 +170,7 @@ struct omap_sr {
u32 senp_mod;
u32 senn_mod;
void __iomem *base;
+ unsigned long enabled:1;
};
/**
@@ -271,8 +273,6 @@ struct omap_sr_nvalue_table {
* @senn_avgweight SENNAVGWEIGHT value of the sr AVGWEIGHT register
* @senp_avgweight SENPAVGWEIGHT value of the sr AVGWEIGHT register
* @nvalue_count: Number of distinct nvalues in the nvalue table
- * @enable_on_init: whether this sr module needs to enabled at
- * boot up or not.
* @nvalue_table: table containing the efuse offsets and nvalues
* corresponding to them.
* @voltdm: Pointer to the voltage domain associated with the SR
@@ -288,7 +288,6 @@ struct omap_sr_data {
u32 senn_avgweight;
u32 senp_avgweight;
int nvalue_count;
- bool enable_on_init;
struct omap_sr_nvalue_table *nvalue_table;
struct voltagedomain *voltdm;
};
diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h
deleted file mode 100644
index e0b687a4d20c..000000000000
--- a/include/linux/power/smb347-charger.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Summit Microelectronics SMB347 Battery Charger Driver
- *
- * Copyright (C) 2011, Intel Corporation
- *
- * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
- * Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#ifndef SMB347_CHARGER_H
-#define SMB347_CHARGER_H
-
-#include <linux/types.h>
-#include <linux/power_supply.h>
-
-enum {
- /* use the default compensation method */
- SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1,
-
- SMB347_SOFT_TEMP_COMPENSATE_NONE,
- SMB347_SOFT_TEMP_COMPENSATE_CURRENT,
- SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE,
-};
-
-/* Use default factory programmed value for hard/soft temperature limit */
-#define SMB347_TEMP_USE_DEFAULT -273
-
-/*
- * Charging enable can be controlled by software (via i2c) by
- * smb347-charger driver or by EN pin (active low/high).
- */
-enum smb347_chg_enable {
- SMB347_CHG_ENABLE_SW,
- SMB347_CHG_ENABLE_PIN_ACTIVE_LOW,
- SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH,
-};
-
-/**
- * struct smb347_charger_platform_data - platform data for SMB347 charger
- * @battery_info: Information about the battery
- * @max_charge_current: maximum current (in uA) the battery can be charged
- * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
- * @pre_charge_current: current (in uA) to use in pre-charging phase
- * @termination_current: current (in uA) used to determine when the
- * charging cycle terminates
- * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to
- * pre-charge to fast charge mode
- * @mains_current_limit: maximum input current drawn from AC/DC input (in uA)
- * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB
- * input
- * @chip_temp_threshold: die temperature where device starts limiting charge
- * current [%100 - %130] (in degree C)
- * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C),
- * granularity is 5 deg C.
- * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C),
- * granularity is 5 deg C.
- * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C),
- * granularity is 5 deg C.
- * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C),
- * granularity is 5 deg C.
- * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit
- * @soft_temp_limit_compensation: compensation method when soft temperature
- * limit is hit
- * @charge_current_compensation: current (in uA) for charging compensation
- * current when temperature hits soft limits
- * @use_mains: AC/DC input can be used
- * @use_usb: USB input can be used
- * @use_usb_otg: USB OTG output can be used (not implemented yet)
- * @irq_gpio: GPIO number used for interrupts (%-1 if not used)
- * @enable_control: how charging enable/disable is controlled
- * (driver/pin controls)
- *
- * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
- * hardware support for these. This is useful when we want to have for
- * example OTG charging controlled via OTG transceiver driver and not by
- * the SMB347 hardware.
- *
- * Hard and soft temperature limit values are given as described in the
- * device data sheet and assuming NTC beta value is %3750. Even if this is
- * not the case, these values should be used. They can be mapped to the
- * corresponding NTC beta values with the help of table %2 in the data
- * sheet. So for example if NTC beta is %3375 and we want to program hard
- * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50.
- *
- * If zero value is given in any of the current and voltage values, the
- * factory programmed default will be used. For soft/hard temperature
- * values, pass in %SMB347_TEMP_USE_DEFAULT instead.
- */
-struct smb347_charger_platform_data {
- struct power_supply_info battery_info;
- unsigned int max_charge_current;
- unsigned int max_charge_voltage;
- unsigned int pre_charge_current;
- unsigned int termination_current;
- unsigned int pre_to_fast_voltage;
- unsigned int mains_current_limit;
- unsigned int usb_hc_current_limit;
- unsigned int chip_temp_threshold;
- int soft_cold_temp_limit;
- int soft_hot_temp_limit;
- int hard_cold_temp_limit;
- int hard_hot_temp_limit;
- bool suspend_on_hard_temp_limit;
- unsigned int soft_temp_limit_compensation;
- unsigned int charge_current_compensation;
- bool use_mains;
- bool use_usb;
- bool use_usb_otg;
- int irq_gpio;
- enum smb347_chg_enable enable_control;
-};
-
-#endif /* SMB347_CHARGER_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 97cc4b85bf61..8e5705a56b85 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -49,6 +49,7 @@ enum {
POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */
POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */
POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */
+ POWER_SUPPLY_CHARGE_TYPE_BYPASS, /* bypassing the charger */
};
enum {
@@ -66,6 +67,7 @@ enum {
POWER_SUPPLY_HEALTH_WARM,
POWER_SUPPLY_HEALTH_COOL,
POWER_SUPPLY_HEALTH_HOT,
+ POWER_SUPPLY_HEALTH_NO_BATTERY,
};
enum {
@@ -132,6 +134,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD, /* in percents! */
POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD, /* in percents! */
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
POWER_SUPPLY_PROP_INPUT_POWER_LIMIT,
@@ -186,6 +189,7 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */
POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */
POWER_SUPPLY_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
+ POWER_SUPPLY_TYPE_WIRELESS, /* Wireless */
};
enum power_supply_usb_type {
@@ -201,6 +205,12 @@ enum power_supply_usb_type {
POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
};
+enum power_supply_charge_behaviour {
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE,
+};
+
enum power_supply_notifier_events {
PSY_EVENT_PROP_CHANGED,
};
@@ -232,6 +242,7 @@ struct power_supply_config {
struct power_supply_desc {
const char *name;
enum power_supply_type type;
+ u8 charge_behaviours;
const enum power_supply_usb_type *usb_types;
size_t num_usb_types;
const enum power_supply_property *properties;
@@ -291,6 +302,7 @@ struct power_supply {
bool initialized;
bool removing;
atomic_t use_cnt;
+ struct power_supply_battery_info *battery_info;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
struct thermal_cooling_device *tcd;
@@ -339,43 +351,433 @@ struct power_supply_resistance_temp_table {
int resistance; /* internal resistance percent */
};
+struct power_supply_vbat_ri_table {
+ int vbat_uv; /* Battery voltage in microvolt */
+ int ri_uohm; /* Internal resistance in microohm */
+};
+
+/**
+ * struct power_supply_maintenance_charge_table - setting for maintenace charging
+ * @charge_current_max_ua: maintenance charging current that is used to keep
+ * the charge of the battery full as current is consumed after full charging.
+ * The corresponding charge_voltage_max_uv is used as a safeguard: when we
+ * reach this voltage the maintenance charging current is turned off. It is
+ * turned back on if we fall below this voltage.
+ * @charge_voltage_max_uv: maintenance charging voltage that is usually a bit
+ * lower than the constant_charge_voltage_max_uv. We can apply this settings
+ * charge_current_max_ua until we get back up to this voltage.
+ * @safety_timer_minutes: maintenance charging safety timer, with an expiry
+ * time in minutes. We will only use maintenance charging in this setting
+ * for a certain amount of time, then we will first move to the next
+ * maintenance charge current and voltage pair in respective array and wait
+ * for the next safety timer timeout, or, if we reached the last maintencance
+ * charging setting, disable charging until we reach
+ * charge_restart_voltage_uv and restart ordinary CC/CV charging from there.
+ * These timers should be chosen to align with the typical discharge curve
+ * for the battery.
+ *
+ * Ordinary CC/CV charging will stop charging when the charge current goes
+ * below charge_term_current_ua, and then restart it (if the device is still
+ * plugged into the charger) at charge_restart_voltage_uv. This happens in most
+ * consumer products because the power usage while connected to a charger is
+ * not zero, and devices are not manufactured to draw power directly from the
+ * charger: instead they will at all times dissipate the battery a little, like
+ * the power used in standby mode. This will over time give a charge graph
+ * such as this:
+ *
+ * Energy
+ * ^ ... ... ... ... ... ... ...
+ * | . . . . . . . . . . . . .
+ * | .. . .. . .. . .. . .. . .. . ..
+ * |. .. .. .. .. .. ..
+ * +-------------------------------------------------------------------> t
+ *
+ * Practically this means that the Li-ions are wandering back and forth in the
+ * battery and this causes degeneration of the battery anode and cathode.
+ * To prolong the life of the battery, maintenance charging is applied after
+ * reaching charge_term_current_ua to hold up the charge in the battery while
+ * consuming power, thus lowering the wear on the battery:
+ *
+ * Energy
+ * ^ .......................................
+ * | . ......................
+ * | ..
+ * |.
+ * +-------------------------------------------------------------------> t
+ *
+ * Maintenance charging uses the voltages from this table: a table of settings
+ * is traversed using a slightly lower current and voltage than what is used for
+ * CC/CV charging. The maintenance charging will for safety reasons not go on
+ * indefinately: we lower the current and voltage with successive maintenance
+ * settings, then disable charging completely after we reach the last one,
+ * and after that we do not restart charging until we reach
+ * charge_restart_voltage_uv (see struct power_supply_battery_info) and restart
+ * ordinary CC/CV charging from there.
+ *
+ * As an example, a Samsung EB425161LA Lithium-Ion battery is CC/CV charged
+ * at 900mA to 4340mV, then maintenance charged at 600mA and 4150mV for up to
+ * 60 hours, then maintenance charged at 600mA and 4100mV for up to 200 hours.
+ * After this the charge cycle is restarted waiting for
+ * charge_restart_voltage_uv.
+ *
+ * For most mobile electronics this type of maintenance charging is enough for
+ * the user to disconnect the device and make use of it before both maintenance
+ * charging cycles are complete, if the current and voltage has been chosen
+ * appropriately. These need to be determined from battery discharge curves
+ * and expected standby current.
+ *
+ * If the voltage anyway drops to charge_restart_voltage_uv during maintenance
+ * charging, ordinary CC/CV charging is restarted. This can happen if the
+ * device is e.g. actively used during charging, so more current is drawn than
+ * the expected stand-by current. Also overvoltage protection will be applied
+ * as usual.
+ */
+struct power_supply_maintenance_charge_table {
+ int charge_current_max_ua;
+ int charge_voltage_max_uv;
+ int charge_safety_timer_minutes;
+};
+
#define POWER_SUPPLY_OCV_TEMP_MAX 20
-/*
+/**
+ * struct power_supply_battery_info - information about batteries
+ * @technology: from the POWER_SUPPLY_TECHNOLOGY_* enum
+ * @energy_full_design_uwh: energy content when fully charged in microwatt
+ * hours
+ * @charge_full_design_uah: charge content when fully charged in microampere
+ * hours
+ * @voltage_min_design_uv: minimum voltage across the poles when the battery
+ * is at minimum voltage level in microvolts. If the voltage drops below this
+ * level the battery will need precharging when using CC/CV charging.
+ * @voltage_max_design_uv: voltage across the poles when the battery is fully
+ * charged in microvolts. This is the "nominal voltage" i.e. the voltage
+ * printed on the label of the battery.
+ * @tricklecharge_current_ua: the tricklecharge current used when trickle
+ * charging the battery in microamperes. This is the charging phase when the
+ * battery is completely empty and we need to carefully trickle in some
+ * charge until we reach the precharging voltage.
+ * @precharge_current_ua: current to use in the precharge phase in microamperes,
+ * the precharge rate is limited by limiting the current to this value.
+ * @precharge_voltage_max_uv: the maximum voltage allowed when precharging in
+ * microvolts. When we pass this voltage we will nominally switch over to the
+ * CC (constant current) charging phase defined by constant_charge_current_ua
+ * and constant_charge_voltage_max_uv.
+ * @charge_term_current_ua: when the current in the CV (constant voltage)
+ * charging phase drops below this value in microamperes the charging will
+ * terminate completely and not restart until the voltage over the battery
+ * poles reach charge_restart_voltage_uv unless we use maintenance charging.
+ * @charge_restart_voltage_uv: when the battery has been fully charged by
+ * CC/CV charging and charging has been disabled, and the voltage subsequently
+ * drops below this value in microvolts, the charging will be restarted
+ * (typically using CV charging).
+ * @overvoltage_limit_uv: If the voltage exceeds the nominal voltage
+ * voltage_max_design_uv and we reach this voltage level, all charging must
+ * stop and emergency procedures take place, such as shutting down the system
+ * in some cases.
+ * @constant_charge_current_max_ua: current in microamperes to use in the CC
+ * (constant current) charging phase. The charging rate is limited
+ * by this current. This is the main charging phase and as the current is
+ * constant into the battery the voltage slowly ascends to
+ * constant_charge_voltage_max_uv.
+ * @constant_charge_voltage_max_uv: voltage in microvolts signifying the end of
+ * the CC (constant current) charging phase and the beginning of the CV
+ * (constant voltage) charging phase.
+ * @maintenance_charge: an array of maintenance charging settings to be used
+ * after the main CC/CV charging phase is complete.
+ * @maintenance_charge_size: the number of maintenance charging settings in
+ * maintenance_charge.
+ * @alert_low_temp_charge_current_ua: The charging current to use if the battery
+ * enters low alert temperature, i.e. if the internal temperature is between
+ * temp_alert_min and temp_min. No matter the charging phase, this
+ * and alert_high_temp_charge_voltage_uv will be applied.
+ * @alert_low_temp_charge_voltage_uv: Same as alert_low_temp_charge_current_ua,
+ * but for the charging voltage.
+ * @alert_high_temp_charge_current_ua: The charging current to use if the
+ * battery enters high alert temperature, i.e. if the internal temperature is
+ * between temp_alert_max and temp_max. No matter the charging phase, this
+ * and alert_high_temp_charge_voltage_uv will be applied, usually lowering
+ * the charging current as an evasive manouver.
+ * @alert_high_temp_charge_voltage_uv: Same as
+ * alert_high_temp_charge_current_ua, but for the charging voltage.
+ * @factory_internal_resistance_uohm: the internal resistance of the battery
+ * at fabrication time, expressed in microohms. This resistance will vary
+ * depending on the lifetime and charge of the battery, so this is just a
+ * nominal ballpark figure. This internal resistance is given for the state
+ * when the battery is discharging.
+ * @factory_internal_resistance_charging_uohm: the internal resistance of the
+ * battery at fabrication time while charging, expressed in microohms.
+ * The charging process will affect the internal resistance of the battery
+ * so this value provides a better resistance under these circumstances.
+ * This resistance will vary depending on the lifetime and charge of the
+ * battery, so this is just a nominal ballpark figure.
+ * @ocv_temp: array indicating the open circuit voltage (OCV) capacity
+ * temperature indices. This is an array of temperatures in degrees Celsius
+ * indicating which capacity table to use for a certain temperature, since
+ * the capacity for reasons of chemistry will be different at different
+ * temperatures. Determining capacity is a multivariate problem and the
+ * temperature is the first variable we determine.
+ * @temp_ambient_alert_min: the battery will go outside of operating conditions
+ * when the ambient temperature goes below this temperature in degrees
+ * Celsius.
+ * @temp_ambient_alert_max: the battery will go outside of operating conditions
+ * when the ambient temperature goes above this temperature in degrees
+ * Celsius.
+ * @temp_alert_min: the battery should issue an alert if the internal
+ * temperature goes below this temperature in degrees Celsius.
+ * @temp_alert_max: the battery should issue an alert if the internal
+ * temperature goes above this temperature in degrees Celsius.
+ * @temp_min: the battery will go outside of operating conditions when
+ * the internal temperature goes below this temperature in degrees Celsius.
+ * Normally this means the system should shut down.
+ * @temp_max: the battery will go outside of operating conditions when
+ * the internal temperature goes above this temperature in degrees Celsius.
+ * Normally this means the system should shut down.
+ * @ocv_table: for each entry in ocv_temp there is a corresponding entry in
+ * ocv_table and a size for each entry in ocv_table_size. These arrays
+ * determine the capacity in percent in relation to the voltage in microvolts
+ * at the indexed temperature.
+ * @ocv_table_size: for each entry in ocv_temp this array is giving the size of
+ * each entry in the array of capacity arrays in ocv_table.
+ * @resist_table: this is a table that correlates a battery temperature to the
+ * expected internal resistance at this temperature. The resistance is given
+ * as a percentage of factory_internal_resistance_uohm. Knowing the
+ * resistance of the battery is usually necessary for calculating the open
+ * circuit voltage (OCV) that is then used with the ocv_table to calculate
+ * the capacity of the battery. The resist_table must be ordered descending
+ * by temperature: highest temperature with lowest resistance first, lowest
+ * temperature with highest resistance last.
+ * @resist_table_size: the number of items in the resist_table.
+ * @vbat2ri_discharging: this is a table that correlates Battery voltage (VBAT)
+ * to internal resistance (Ri). The resistance is given in microohm for the
+ * corresponding voltage in microvolts. The internal resistance is used to
+ * determine the open circuit voltage so that we can determine the capacity
+ * of the battery. These voltages to resistance tables apply when the battery
+ * is discharging. The table must be ordered descending by voltage: highest
+ * voltage first.
+ * @vbat2ri_discharging_size: the number of items in the vbat2ri_discharging
+ * table.
+ * @vbat2ri_charging: same function as vbat2ri_discharging but for the state
+ * when the battery is charging. Being under charge changes the battery's
+ * internal resistance characteristics so a separate table is needed.*
+ * The table must be ordered descending by voltage: highest voltage first.
+ * @vbat2ri_charging_size: the number of items in the vbat2ri_charging
+ * table.
+ * @bti_resistance_ohm: The Battery Type Indicator (BIT) nominal resistance
+ * in ohms for this battery, if an identification resistor is mounted
+ * between a third battery terminal and ground. This scheme is used by a lot
+ * of mobile device batteries.
+ * @bti_resistance_tolerance: The tolerance in percent of the BTI resistance,
+ * for example 10 for +/- 10%, if the bti_resistance is set to 7000 and the
+ * tolerance is 10% we will detect a proper battery if the BTI resistance
+ * is between 6300 and 7700 Ohm.
+ *
* This is the recommended struct to manage static battery parameters,
* populated by power_supply_get_battery_info(). Most platform drivers should
* use these for consistency.
+ *
* Its field names must correspond to elements in enum power_supply_property.
- * The default field value is -EINVAL.
- * Power supply class itself doesn't use this.
+ * The default field value is -EINVAL or NULL for pointers.
+ *
+ * CC/CV CHARGING:
+ *
+ * The charging parameters here assume a CC/CV charging scheme. This method
+ * is most common with Lithium Ion batteries (other methods are possible) and
+ * looks as follows:
+ *
+ * ^ Battery voltage
+ * | --- overvoltage_limit_uv
+ * |
+ * | ...................................................
+ * | .. constant_charge_voltage_max_uv
+ * | ..
+ * | .
+ * | .
+ * | .
+ * | .
+ * | .
+ * | .. precharge_voltage_max_uv
+ * | ..
+ * |. (trickle charging)
+ * +------------------------------------------------------------------> time
+ *
+ * ^ Current into the battery
+ * |
+ * | ............. constant_charge_current_max_ua
+ * | . .
+ * | . .
+ * | . .
+ * | . .
+ * | . ..
+ * | . ....
+ * | . .....
+ * | ... precharge_current_ua ....... charge_term_current_ua
+ * | . .
+ * | . .
+ * |.... tricklecharge_current_ua .
+ * | .
+ * +-----------------------------------------------------------------> time
+ *
+ * These diagrams are synchronized on time and the voltage and current
+ * follow each other.
+ *
+ * With CC/CV charging commence over time like this for an empty battery:
+ *
+ * 1. When the battery is completely empty it may need to be charged with
+ * an especially small current so that electrons just "trickle in",
+ * this is the tricklecharge_current_ua.
+ *
+ * 2. Next a small initial pre-charge current (precharge_current_ua)
+ * is applied if the voltage is below precharge_voltage_max_uv until we
+ * reach precharge_voltage_max_uv. CAUTION: in some texts this is referred
+ * to as "trickle charging" but the use in the Linux kernel is different
+ * see below!
+ *
+ * 3. Then the main charging current is applied, which is called the constant
+ * current (CC) phase. A current regulator is set up to allow
+ * constant_charge_current_max_ua of current to flow into the battery.
+ * The chemical reaction in the battery will make the voltage go up as
+ * charge goes into the battery. This current is applied until we reach
+ * the constant_charge_voltage_max_uv voltage.
+ *
+ * 4. At this voltage we switch over to the constant voltage (CV) phase. This
+ * means we allow current to go into the battery, but we keep the voltage
+ * fixed. This current will continue to charge the battery while keeping
+ * the voltage the same. A chemical reaction in the battery goes on
+ * storing energy without affecting the voltage. Over time the current
+ * will slowly drop and when we reach charge_term_current_ua we will
+ * end the constant voltage phase.
+ *
+ * After this the battery is fully charged, and if we do not support maintenance
+ * charging, the charging will not restart until power dissipation makes the
+ * voltage fall so that we reach charge_restart_voltage_uv and at this point
+ * we restart charging at the appropriate phase, usually this will be inside
+ * the CV phase.
+ *
+ * If we support maintenance charging the voltage is however kept high after
+ * the CV phase with a very low current. This is meant to let the same charge
+ * go in for usage while the charger is still connected, mainly for
+ * dissipation for the power consuming entity while connected to the
+ * charger.
+ *
+ * All charging MUST terminate if the overvoltage_limit_uv is ever reached.
+ * Overcharging Lithium Ion cells can be DANGEROUS and lead to fire or
+ * explosions.
+ *
+ * DETERMINING BATTERY CAPACITY:
+ *
+ * Several members of the struct deal with trying to determine the remaining
+ * capacity in the battery, usually as a percentage of charge. In practice
+ * many chargers uses a so-called fuel gauge or coloumb counter that measure
+ * how much charge goes into the battery and how much goes out (+/- leak
+ * consumption). This does not help if we do not know how much capacity the
+ * battery has to begin with, such as when it is first used or was taken out
+ * and charged in a separate charger. Therefore many capacity algorithms use
+ * the open circuit voltage with a look-up table to determine the rough
+ * capacity of the battery. The open circuit voltage can be conceptualized
+ * with an ideal voltage source (V) in series with an internal resistance (Ri)
+ * like this:
+ *
+ * +-------> IBAT >----------------+
+ * | ^ |
+ * [ ] Ri | |
+ * | | VBAT |
+ * o <---------- | |
+ * +| ^ | [ ] Rload
+ * .---. | | |
+ * | V | | OCV | |
+ * '---' | | |
+ * | | | |
+ * GND +-------------------------------+
+ *
+ * If we disconnect the load (here simplified as a fixed resistance Rload)
+ * and measure VBAT with a infinite impedance voltage meter we will get
+ * VBAT = OCV and this assumption is sometimes made even under load, assuming
+ * Rload is insignificant. However this will be of dubious quality because the
+ * load is rarely that small and Ri is strongly nonlinear depending on
+ * temperature and how much capacity is left in the battery due to the
+ * chemistry involved.
+ *
+ * In many practical applications we cannot just disconnect the battery from
+ * the load, so instead we often try to measure the instantaneous IBAT (the
+ * current out from the battery), estimate the Ri and thus calculate the
+ * voltage drop over Ri and compensate like this:
+ *
+ * OCV = VBAT - (IBAT * Ri)
+ *
+ * The tables vbat2ri_discharging and vbat2ri_charging are used to determine
+ * (by interpolation) the Ri from the VBAT under load. These curves are highly
+ * nonlinear and may need many datapoints but can be found in datasheets for
+ * some batteries. This gives the compensated open circuit voltage (OCV) for
+ * the battery even under load. Using this method will also compensate for
+ * temperature changes in the environment: this will also make the internal
+ * resistance change, and it will affect the VBAT under load, so correlating
+ * VBAT to Ri takes both remaining capacity and temperature into consideration.
+ *
+ * Alternatively a manufacturer can specify how the capacity of the battery
+ * is dependent on the battery temperature which is the main factor affecting
+ * Ri. As we know all checmical reactions are faster when it is warm and slower
+ * when it is cold. You can put in 1500mAh and only get 800mAh out before the
+ * voltage drops too low for example. This effect is also highly nonlinear and
+ * the purpose of the table resist_table: this will take a temperature and
+ * tell us how big percentage of Ri the specified temperature correlates to.
+ * Usually we have 100% of the factory_internal_resistance_uohm at 25 degrees
+ * Celsius.
+ *
+ * The power supply class itself doesn't use this struct as of now.
*/
struct power_supply_battery_info {
- int energy_full_design_uwh; /* microWatt-hours */
- int charge_full_design_uah; /* microAmp-hours */
- int voltage_min_design_uv; /* microVolts */
- int voltage_max_design_uv; /* microVolts */
- int tricklecharge_current_ua; /* microAmps */
- int precharge_current_ua; /* microAmps */
- int precharge_voltage_max_uv; /* microVolts */
- int charge_term_current_ua; /* microAmps */
- int charge_restart_voltage_uv; /* microVolts */
- int overvoltage_limit_uv; /* microVolts */
- int constant_charge_current_max_ua; /* microAmps */
- int constant_charge_voltage_max_uv; /* microVolts */
- int factory_internal_resistance_uohm; /* microOhms */
- int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */
+ unsigned int technology;
+ int energy_full_design_uwh;
+ int charge_full_design_uah;
+ int voltage_min_design_uv;
+ int voltage_max_design_uv;
+ int tricklecharge_current_ua;
+ int precharge_current_ua;
+ int precharge_voltage_max_uv;
+ int charge_term_current_ua;
+ int charge_restart_voltage_uv;
+ int overvoltage_limit_uv;
+ int constant_charge_current_max_ua;
+ int constant_charge_voltage_max_uv;
+ struct power_supply_maintenance_charge_table *maintenance_charge;
+ int maintenance_charge_size;
+ int alert_low_temp_charge_current_ua;
+ int alert_low_temp_charge_voltage_uv;
+ int alert_high_temp_charge_current_ua;
+ int alert_high_temp_charge_voltage_uv;
+ int factory_internal_resistance_uohm;
+ int factory_internal_resistance_charging_uohm;
+ int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];
+ int temp_ambient_alert_min;
+ int temp_ambient_alert_max;
+ int temp_alert_min;
+ int temp_alert_max;
+ int temp_min;
+ int temp_max;
struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
struct power_supply_resistance_temp_table *resist_table;
int resist_table_size;
+ struct power_supply_vbat_ri_table *vbat2ri_discharging;
+ int vbat2ri_discharging_size;
+ struct power_supply_vbat_ri_table *vbat2ri_charging;
+ int vbat2ri_charging_size;
+ int bti_resistance_ohm;
+ int bti_resistance_tolerance;
};
-extern struct atomic_notifier_head power_supply_notifier;
extern int power_supply_reg_notifier(struct notifier_block *nb);
extern void power_supply_unreg_notifier(struct notifier_block *nb);
+#if IS_ENABLED(CONFIG_POWER_SUPPLY)
extern struct power_supply *power_supply_get_by_name(const char *name);
extern void power_supply_put(struct power_supply *psy);
+#else
+static inline void power_supply_put(struct power_supply *psy) {}
+static inline struct power_supply *power_supply_get_by_name(const char *name)
+{ return NULL; }
+#endif
#ifdef CONFIG_OF
extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
const char *property);
@@ -390,10 +792,17 @@ devm_power_supply_get_by_phandle(struct device *dev, const char *property)
{ return NULL; }
#endif /* CONFIG_OF */
+extern const enum power_supply_property power_supply_battery_info_properties[];
+extern const size_t power_supply_battery_info_properties_size;
extern int power_supply_get_battery_info(struct power_supply *psy,
- struct power_supply_battery_info *info);
+ struct power_supply_battery_info **info_out);
extern void power_supply_put_battery_info(struct power_supply *psy,
struct power_supply_battery_info *info);
+extern bool power_supply_battery_info_has_prop(struct power_supply_battery_info *info,
+ enum power_supply_property psp);
+extern int power_supply_battery_info_get_prop(struct power_supply_battery_info *info,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
int table_len, int ocv);
extern struct power_supply_battery_ocv_table *
@@ -404,12 +813,43 @@ extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
extern int
power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
int table_len, int temp);
+extern int power_supply_vbat2ri(struct power_supply_battery_info *info,
+ int vbat_uv, bool charging);
+extern struct power_supply_maintenance_charge_table *
+power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index);
+extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info,
+ int resistance);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
-extern int power_supply_set_input_current_limit_from_supplier(
- struct power_supply *psy);
+int power_supply_get_property_from_supplier(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
extern int power_supply_set_battery_charged(struct power_supply *psy);
+static inline bool
+power_supply_supports_maintenance_charging(struct power_supply_battery_info *info)
+{
+ struct power_supply_maintenance_charge_table *mt;
+
+ mt = power_supply_get_maintenance_charging_setting(info, 0);
+
+ return (mt != NULL);
+}
+
+static inline bool
+power_supply_supports_vbat2ri(struct power_supply_battery_info *info)
+{
+ return ((info->vbat2ri_discharging != NULL) &&
+ info->vbat2ri_discharging_size > 0);
+}
+
+static inline bool
+power_supply_supports_temp2ri(struct power_supply_battery_info *info)
+{
+ return ((info->resist_table != NULL) &&
+ info->resist_table_size > 0);
+}
+
#ifdef CONFIG_POWER_SUPPLY
extern int power_supply_is_system_supplied(void);
#else
@@ -419,9 +859,16 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
extern int power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val);
+#if IS_ENABLED(CONFIG_POWER_SUPPLY)
extern int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+#else
+static inline int power_supply_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{ return 0; }
+#endif
extern int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp);
extern void power_supply_external_power_changed(struct power_supply *psy);
@@ -448,8 +895,7 @@ extern int power_supply_powers(struct power_supply *psy, struct device *dev);
#define to_power_supply(device) container_of(device, struct power_supply, dev)
extern void *power_supply_get_drvdata(struct power_supply *psy);
-/* For APM emulation, think legacy userspace. */
-extern struct class *power_supply_class;
+extern int power_supply_for_each_device(void *data, int (*fn)(struct device *dev, void *data));
static inline bool power_supply_is_amp_property(enum power_supply_property psp)
{
@@ -469,12 +915,12 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_BOOT:
- return 1;
+ return true;
default:
break;
}
- return 0;
+ return false;
}
static inline bool power_supply_is_watt_property(enum power_supply_property psp)
@@ -497,12 +943,12 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
case POWER_SUPPLY_PROP_POWER_NOW:
- return 1;
+ return true;
default:
break;
}
- return 0;
+ return false;
}
#ifdef CONFIG_POWER_SUPPLY_HWMON
@@ -518,4 +964,28 @@ static inline
void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {}
#endif
+#ifdef CONFIG_SYSFS
+ssize_t power_supply_charge_behaviour_show(struct device *dev,
+ unsigned int available_behaviours,
+ enum power_supply_charge_behaviour behaviour,
+ char *buf);
+
+int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const char *buf);
+#else
+static inline
+ssize_t power_supply_charge_behaviour_show(struct device *dev,
+ unsigned int available_behaviours,
+ enum power_supply_charge_behaviour behaviour,
+ char *buf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int power_supply_charge_behaviour_parse(unsigned int available_behaviours,
+ const char *buf)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#endif /* __LINUX_POWER_SUPPLY_H__ */
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
index 4537f57f9e42..3d557bbcd2c7 100644
--- a/include/linux/powercap.h
+++ b/include/linux/powercap.h
@@ -44,19 +44,18 @@ struct powercap_control_type_ops {
};
/**
- * struct powercap_control_type- Defines a powercap control_type
- * @name: name of control_type
+ * struct powercap_control_type - Defines a powercap control_type
* @dev: device for this control_type
* @idr: idr to have unique id for its child
- * @root_node: Root holding power zones for this control_type
+ * @nr_zones: counter for number of zones of this type
* @ops: Pointer to callback struct
- * @node_lock: mutex for control type
+ * @lock: mutex for control type
* @allocated: This is possible that client owns the memory
* used by this structure. In this case
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @ctrl_inst: link to the control_type list
+ * @node: linked-list node
*
* Defines powercap control_type. This acts as a container for power
* zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
@@ -129,7 +128,7 @@ struct powercap_zone_ops {
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @constraint_ptr: List of constraints for this zone.
+ * @constraints: List of constraints for this zone.
*
* This defines a power zone instance. The fields of this structure are
* private, and should not be used by client drivers.
diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
index 9d3ffc8f5ea6..fb847e47f148 100644
--- a/include/linux/ppp-comp.h
+++ b/include/linux/ppp-comp.h
@@ -9,7 +9,7 @@
#include <uapi/linux/ppp-comp.h>
-
+struct compstat;
struct module;
/*
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index 98966064ee68..45e6e427ceb8 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -20,6 +20,8 @@
#include <linux/poll.h>
#include <net/net_namespace.h>
+struct net_device_path;
+struct net_device_path_ctx;
struct ppp_channel;
struct ppp_channel_ops {
@@ -28,6 +30,9 @@ struct ppp_channel_ops {
int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
/* Handle an ioctl call that has come in via /dev/ppp. */
int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
+ int (*fill_forward_path)(struct net_device_path_ctx *,
+ struct net_device_path *,
+ const struct ppp_channel *);
};
struct ppp_channel {
diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h
index 9d2b388fae1a..b7e57fdbd413 100644
--- a/include/linux/ppp_defs.h
+++ b/include/linux/ppp_defs.h
@@ -11,4 +11,18 @@
#include <uapi/linux/ppp_defs.h>
#define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c)
+
+/**
+ * ppp_proto_is_valid - checks if PPP protocol is valid
+ * @proto: PPP protocol
+ *
+ * Assumes proto is not compressed.
+ * Protocol is valid if the value is odd and the least significant bit of the
+ * most significant octet is 0 (see RFC 1661, section 2).
+ */
+static inline bool ppp_proto_is_valid(u16 proto)
+{
+ return !!((proto & 0x0101) == 0x0001);
+}
+
#endif /* _PPP_DEFS_H_ */
diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h
deleted file mode 100644
index 7bf49908be06..000000000000
--- a/include/linux/pps-gpio.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * pps-gpio.h -- PPS client for GPIOs
- *
- * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca>
- */
-
-#ifndef _PPS_GPIO_H
-#define _PPS_GPIO_H
-
-struct pps_gpio_platform_data {
- struct gpio_desc *gpio_pin;
- struct gpio_desc *echo_pin;
- bool assert_falling_edge;
- bool capture_clear;
- unsigned int echo_active_ms;
-};
-
-#endif /* _PPS_GPIO_H */
diff --git a/include/linux/pr.h b/include/linux/pr.h
index 94ceec713afe..3003daec28a5 100644
--- a/include/linux/pr.h
+++ b/include/linux/pr.h
@@ -4,6 +4,18 @@
#include <uapi/linux/pr.h>
+struct pr_keys {
+ u32 generation;
+ u32 num_keys;
+ u64 keys[];
+};
+
+struct pr_held_reservation {
+ u64 key;
+ u32 generation;
+ enum pr_type type;
+};
+
struct pr_ops {
int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key,
u32 flags);
@@ -14,6 +26,19 @@ struct pr_ops {
int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort);
int (*pr_clear)(struct block_device *bdev, u64 key);
+ /*
+ * pr_read_keys - Read the registered keys and return them in the
+ * pr_keys->keys array. The keys array will have been allocated at the
+ * end of the pr_keys struct, and pr_keys->num_keys must be set to the
+ * number of keys the array can hold. If there are more than can fit
+ * in the array, success will still be returned and pr_keys->num_keys
+ * will reflect the total number of keys the device contains, so the
+ * caller can retry with a larger array.
+ */
+ int (*pr_read_keys)(struct block_device *bdev,
+ struct pr_keys *keys_info);
+ int (*pr_read_reservation)(struct block_device *bdev,
+ struct pr_held_reservation *rsv);
};
#endif /* LINUX_PR_H */
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index aa16e6468f91..f7f1e5251c67 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -9,19 +9,13 @@
#define _LINUX_PRANDOM_H
#include <linux/types.h>
-#include <linux/percpu.h>
-
-u32 prandom_u32(void);
-void prandom_bytes(void *buf, size_t nbytes);
-void prandom_seed(u32 seed);
-void prandom_reseed_late(void);
+#include <linux/once.h>
+#include <linux/random.h>
struct rnd_state {
__u32 s1, s2, s3, s4;
};
-DECLARE_PER_CPU(struct rnd_state, net_rand_state);
-
u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
@@ -29,23 +23,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
#define prandom_init_once(pcpu_state) \
DO_ONCE(prandom_seed_full_state, (pcpu_state))
-/**
- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
- * @ep_ro: right open interval endpoint
- *
- * Returns a pseudo-random number that is in interval [0, ep_ro). Note
- * that the result depends on PRNG being well distributed in [0, ~0U]
- * u32 space. Here we use maximally equidistributed combined Tausworthe
- * generator, that is, prandom_u32(). This is useful when requesting a
- * random index of an array containing ep_ro elements, for example.
- *
- * Returns: pseudo-random number in interval [0, ep_ro)
- */
-static inline u32 prandom_u32_max(u32 ep_ro)
-{
- return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
-}
-
/*
* Handle minimum values for seeds
*/
@@ -61,7 +38,7 @@ static inline u32 __seed(u32 x, u32 m)
*/
static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
{
- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+ u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
state->s1 = __seed(i, 2U);
state->s2 = __seed(i, 8U);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 7d9c1c0e149c..7233e9cf1bab 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -8,7 +8,8 @@
*/
#include <linux/linkage.h>
-#include <linux/list.h>
+#include <linux/cleanup.h>
+#include <linux/types.h>
/*
* We put the hardirq and softirq counter into the preemption
@@ -77,31 +78,69 @@
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+/**
+ * interrupt_context_level - return interrupt context level
+ *
+ * Returns the current interrupt context level.
+ * 0 - normal context
+ * 1 - softirq context
+ * 2 - hardirq context
+ * 3 - NMI context
+ */
+static __always_inline unsigned char interrupt_context_level(void)
+{
+ unsigned long pc = preempt_count();
+ unsigned char level = 0;
+
+ level += !!(pc & (NMI_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+ return level;
+}
+
+/*
+ * These macro definitions avoid redundant invocations of preempt_count()
+ * because such invocations would result in redundant loads given that
+ * preempt_count() is commonly implemented with READ_ONCE().
+ */
+
+#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
+#ifdef CONFIG_PREEMPT_RT
+# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
+# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
+#else
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
+#endif
/*
- * Are we doing bottom half or hardware interrupt processing?
+ * Macros to retrieve the current execution context:
*
- * in_irq() - We're in (hard) IRQ context
+ * in_nmi() - We're in NMI context
+ * in_hardirq() - We're in hard IRQ context
+ * in_serving_softirq() - We're in softirq context
+ * in_task() - We're in task context
+ */
+#define in_nmi() (nmi_count())
+#define in_hardirq() (hardirq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+#ifdef CONFIG_PREEMPT_RT
+# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
+#else
+# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+#endif
+
+/*
+ * The following macros are deprecated and should not be used in new code:
+ * in_irq() - Obsolete version of in_hardirq()
* in_softirq() - We have BH disabled, or are processing softirqs
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
- * in_serving_softirq() - We're in softirq context
- * in_nmi() - We're in NMI context
- * in_task() - We're in task context
- *
- * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
- * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#define in_nmi() (preempt_count() & NMI_MASK)
-#define in_task() (!(preempt_count() & \
- (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
@@ -115,7 +154,12 @@
/*
* The preempt_count offset after spin_lock()
*/
-#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#if !defined(CONFIG_PREEMPT_RT)
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#else
+/* Locks on RT do not disable preemption */
+#define PREEMPT_LOCK_OFFSET 0
+#endif
/*
* The preempt_count offset needed for things like:
@@ -316,40 +360,125 @@ void preempt_notifier_unregister(struct preempt_notifier *notifier);
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
struct preempt_ops *ops)
{
- INIT_HLIST_NODE(&notifier->link);
+ /* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */
+ notifier->link.next = NULL;
+ notifier->link.pprev = NULL;
notifier->ops = ops;
}
#endif
-/**
- * migrate_disable - Prevent migration of the current task
+#ifdef CONFIG_SMP
+
+/*
+ * Migrate-Disable and why it is undesired.
*
- * Maps to preempt_disable() which also disables preemption. Use
- * migrate_disable() to annotate that the intent is to prevent migration,
- * but not necessarily preemption.
+ * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * becomes one of the M highest priority tasks), it might still have to wait
+ * for the preemptee's migrate_disable() section to complete. Thereby suffering
+ * a reduction in bandwidth in the exact duration of the migrate_disable()
+ * section.
+ *
+ * Per this argument, the change from preempt_disable() to migrate_disable()
+ * gets us:
+ *
+ * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
+ * it would have had to wait for the lower priority task.
+ *
+ * - a lower priority tasks; which under preempt_disable() could've instantly
+ * migrated away when another CPU becomes available, is now constrained
+ * by the ability to push the higher priority task away, which might itself be
+ * in a migrate_disable() section, reducing it's available bandwidth.
+ *
+ * IOW it trades latency / moves the interference term, but it stays in the
+ * system, and as long as it remains unbounded, the system is not fully
+ * deterministic.
+ *
+ *
+ * The reason we have it anyway.
+ *
+ * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
+ * number of primitives into becoming preemptible, they would also allow
+ * migration. This turns out to break a bunch of per-cpu usage. To this end,
+ * all these primitives employ migirate_disable() to restore this implicit
+ * assumption.
+ *
+ * This is a 'temporary' work-around at best. The correct solution is getting
+ * rid of the above assumptions and reworking the code to employ explicit
+ * per-cpu locking or short preempt-disable regions.
+ *
+ * The end goal must be to get rid of migrate_disable(), alternatively we need
+ * a schedulability theory that does not depend on abritrary migration.
+ *
+ *
+ * Notes on the implementation.
+ *
+ * The implementation is particularly tricky since existing code patterns
+ * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
+ * This means that it cannot use cpus_read_lock() to serialize against hotplug,
+ * nor can it easily migrate itself into a pending affinity mask change on
+ * migrate_enable().
+ *
+ *
+ * Note: even non-work-conserving schedulers like semi-partitioned depends on
+ * migration, so migrate_disable() is not only a problem for
+ * work-conserving schedulers.
*
- * Can be invoked nested like preempt_disable() and needs the corresponding
- * number of migrate_enable() invocations.
*/
-static __always_inline void migrate_disable(void)
-{
- preempt_disable();
-}
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+
+#else
+
+static inline void migrate_disable(void) { }
+static inline void migrate_enable(void) { }
+
+#endif /* CONFIG_SMP */
/**
- * migrate_enable - Allow migration of the current task
+ * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
*
- * Counterpart to migrate_disable().
+ * Use for code which requires preemption protection inside a critical
+ * section which has preemption disabled implicitly on non-PREEMPT_RT
+ * enabled kernels, by e.g.:
+ * - holding a spinlock/rwlock
+ * - soft interrupt context
+ * - regular interrupt handlers
*
- * As migrate_disable() can be invoked nested, only the outermost invocation
- * reenables migration.
+ * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
+ * interrupt context and regular interrupt handlers are preemptible and
+ * only prevent migration. preempt_disable_nested() ensures that preemption
+ * is disabled for cases which require CPU local serialization even on
+ * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
*
- * Currently mapped to preempt_enable().
+ * The use cases are code sequences which are not serialized by a
+ * particular lock instance, e.g.:
+ * - seqcount write side critical sections where the seqcount is not
+ * associated to a particular lock and therefore the automatic
+ * protection mechanism does not work. This prevents a live lock
+ * against a preempting high priority reader.
+ * - RMW per CPU variable updates like vmstat.
+ */
+/* Macro to avoid header recursion hell vs. lockdep */
+#define preempt_disable_nested() \
+do { \
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ preempt_disable(); \
+ else \
+ lockdep_assert_preemption_disabled(); \
+} while (0)
+
+/**
+ * preempt_enable_nested - Undo the effect of preempt_disable_nested()
*/
-static __always_inline void migrate_enable(void)
+static __always_inline void preempt_enable_nested(void)
{
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
+DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
+DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
+DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index 13eafebf3549..b068e2e60939 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -15,6 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
+struct page;
/*
prefetch(x) attempts to pre-emptively get the memory pointed to
by address "x" into the CPU L1 cache.
@@ -24,11 +25,10 @@
prefetch() should be defined by the architecture, if not, the
#define below provides a no-op define.
- There are 3 prefetch() macros:
+ There are 2 prefetch() macros:
prefetch(x) - prefetches the cacheline at "x" for read
prefetchw(x) - prefetches the cacheline at "x" for write
- spin_lock_prefetch(x) - prefetches the spinlock *x for taking
there is also PREFETCH_STRIDE which is the architecure-preferred
"lookahead" size for prefetching streamed operations.
@@ -43,10 +43,6 @@
#define prefetchw(x) __builtin_prefetch(x,1)
#endif
-#ifndef ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) prefetchw(x)
-#endif
-
#ifndef PREFETCH_STRIDE
#define PREFETCH_STRIDE (4*L1_CACHE_BYTES)
#endif
@@ -62,4 +58,11 @@ static inline void prefetch_range(void *addr, size_t len)
#endif
}
+static inline void prefetch_page_address(struct page *page)
+{
+#if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL)
+ prefetch(page);
+#endif
+}
+
#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 34c1a7be3e01..955e31860095 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -2,16 +2,18 @@
#ifndef __KERNEL_PRINTK__
#define __KERNEL_PRINTK__
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/init.h>
#include <linux/kern_levels.h>
#include <linux/linkage.h>
-#include <linux/cache.h>
#include <linux/ratelimit_types.h>
+#include <linux/once_lite.h>
extern const char linux_banner[];
extern const char linux_proc_banner[];
+extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
+
#define PRINTK_MAX_SINGLE_HEADER_LEN 2
static inline int printk_get_level(const char *buffer)
@@ -42,8 +44,6 @@ static inline const char *printk_skip_headers(const char *buffer)
return buffer;
}
-#define CONSOLE_EXT_LOG_MAX 8192
-
/* printk's without a loglevel use this.. */
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
@@ -67,16 +67,7 @@ extern int console_printk[];
#define minimum_console_loglevel (console_printk[2])
#define default_console_loglevel (console_printk[3])
-static inline void console_silent(void)
-{
- console_loglevel = CONSOLE_LOGLEVEL_SILENT;
-}
-
-static inline void console_verbose(void)
-{
- if (console_loglevel)
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
+extern void console_verbose(void);
/* strlen("ratelimit") + 1 */
#define DEVKMSG_STR_MAX_SIZE 10
@@ -147,34 +138,34 @@ static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
-#ifdef CONFIG_PRINTK_NMI
-extern void printk_nmi_enter(void);
-extern void printk_nmi_exit(void);
-extern void printk_nmi_direct_enter(void);
-extern void printk_nmi_direct_exit(void);
-#else
-static inline void printk_nmi_enter(void) { }
-static inline void printk_nmi_exit(void) { }
-static inline void printk_nmi_direct_enter(void) { }
-static inline void printk_nmi_direct_exit(void) { }
-#endif /* PRINTK_NMI */
+struct dev_printk_info;
#ifdef CONFIG_PRINTK
-asmlinkage __printf(5, 0)
+asmlinkage __printf(4, 0)
int vprintk_emit(int facility, int level,
- const char *dict, size_t dictlen,
+ const struct dev_printk_info *dev_info,
const char *fmt, va_list args);
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
asmlinkage __printf(1, 2) __cold
-int printk(const char *fmt, ...);
+int _printk(const char *fmt, ...);
/*
* Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
*/
-__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
+__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
+
+extern void __printk_safe_enter(void);
+extern void __printk_safe_exit(void);
+/*
+ * The printk_deferred_enter/exit macros are available only as a hack for
+ * some code paths that need to defer all printk console printing. Interrupts
+ * must be disabled for the deferred duration.
+ */
+#define printk_deferred_enter __printk_safe_enter
+#define printk_deferred_exit __printk_safe_exit
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -189,10 +180,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
extern int printk_delay_msec;
extern int dmesg_restrict;
-extern int
-devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf,
- size_t *lenp, loff_t *ppos);
-
extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
@@ -202,9 +189,9 @@ void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
+extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
-extern void printk_safe_flush(void);
-extern void printk_safe_flush_on_panic(void);
+void printk_trigger_flush(void);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
@@ -212,15 +199,24 @@ int vprintk(const char *s, va_list args)
return 0;
}
static inline __printf(1, 2) __cold
-int printk(const char *s, ...)
+int _printk(const char *s, ...)
{
return 0;
}
static inline __printf(1, 2) __cold
-int printk_deferred(const char *s, ...)
+int _printk_deferred(const char *s, ...)
{
return 0;
}
+
+static inline void printk_deferred_enter(void)
+{
+}
+
+static inline void printk_deferred_exit(void)
+{
+}
+
static inline int printk_ratelimit(void)
{
return 0;
@@ -265,19 +261,73 @@ static inline void show_regs_print_info(const char *log_lvl)
{
}
-static inline void dump_stack(void)
+static inline void dump_stack_lvl(const char *log_lvl)
{
}
-static inline void printk_safe_flush(void)
+static inline void dump_stack(void)
{
}
-
-static inline void printk_safe_flush_on_panic(void)
+static inline void printk_trigger_flush(void)
{
}
#endif
+bool this_cpu_in_panic(void);
+
+#ifdef CONFIG_SMP
+extern int __printk_cpu_sync_try_get(void);
+extern void __printk_cpu_sync_wait(void);
+extern void __printk_cpu_sync_put(void);
+
+#else
+
+#define __printk_cpu_sync_try_get() true
+#define __printk_cpu_sync_wait()
+#define __printk_cpu_sync_put()
+#endif /* CONFIG_SMP */
+
+/**
+ * printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk
+ * cpu-reentrant spinning lock.
+ * @flags: Stack-allocated storage for saving local interrupt state,
+ * to be passed to printk_cpu_sync_put_irqrestore().
+ *
+ * If the lock is owned by another CPU, spin until it becomes available.
+ * Interrupts are restored while spinning.
+ *
+ * CAUTION: This function must be used carefully. It does not behave like a
+ * typical lock. Here are important things to watch out for...
+ *
+ * * This function is reentrant on the same CPU. Therefore the calling
+ * code must not assume exclusive access to data if code accessing the
+ * data can run reentrant or within NMI context on the same CPU.
+ *
+ * * If there exists usage of this function from NMI context, it becomes
+ * unsafe to perform any type of locking or spinning to wait for other
+ * CPUs after calling this function from any context. This includes
+ * using spinlocks or any other busy-waiting synchronization methods.
+ */
+#define printk_cpu_sync_get_irqsave(flags) \
+ for (;;) { \
+ local_irq_save(flags); \
+ if (__printk_cpu_sync_try_get()) \
+ break; \
+ local_irq_restore(flags); \
+ __printk_cpu_sync_wait(); \
+ }
+
+/**
+ * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
+ * lock and restore interrupts.
+ * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
+ */
+#define printk_cpu_sync_put_irqrestore(flags) \
+ do { \
+ __printk_cpu_sync_put(); \
+ local_irq_restore(flags); \
+ } while (0)
+
extern int kptr_restrict;
/**
@@ -297,6 +347,117 @@ extern int kptr_restrict;
#define pr_fmt(fmt) fmt
#endif
+struct module;
+
+#ifdef CONFIG_PRINTK_INDEX
+struct pi_entry {
+ const char *fmt;
+ const char *func;
+ const char *file;
+ unsigned int line;
+
+ /*
+ * While printk and pr_* have the level stored in the string at compile
+ * time, some subsystems dynamically add it at runtime through the
+ * format string. For these dynamic cases, we allow the subsystem to
+ * tell us the level at compile time.
+ *
+ * NULL indicates that the level, if any, is stored in fmt.
+ */
+ const char *level;
+
+ /*
+ * The format string used by various subsystem specific printk()
+ * wrappers to prefix the message.
+ *
+ * Note that the static prefix defined by the pr_fmt() macro is stored
+ * directly in the message format (@fmt), not here.
+ */
+ const char *subsys_fmt_prefix;
+} __packed;
+
+#define __printk_index_emit(_fmt, _level, _subsys_fmt_prefix) \
+ do { \
+ if (__builtin_constant_p(_fmt) && __builtin_constant_p(_level)) { \
+ /*
+ * We check __builtin_constant_p multiple times here
+ * for the same input because GCC will produce an error
+ * if we try to assign a static variable to fmt if it
+ * is not a constant, even with the outer if statement.
+ */ \
+ static const struct pi_entry _entry \
+ __used = { \
+ .fmt = __builtin_constant_p(_fmt) ? (_fmt) : NULL, \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ .level = __builtin_constant_p(_level) ? (_level) : NULL, \
+ .subsys_fmt_prefix = _subsys_fmt_prefix,\
+ }; \
+ static const struct pi_entry *_entry_ptr \
+ __used __section(".printk_index") = &_entry; \
+ } \
+ } while (0)
+
+#else /* !CONFIG_PRINTK_INDEX */
+#define __printk_index_emit(...) do {} while (0)
+#endif /* CONFIG_PRINTK_INDEX */
+
+/*
+ * Some subsystems have their own custom printk that applies a va_format to a
+ * generic format, for example, to include a device number or other metadata
+ * alongside the format supplied by the caller.
+ *
+ * In order to store these in the way they would be emitted by the printk
+ * infrastructure, the subsystem provides us with the start, fixed string, and
+ * any subsequent text in the format string.
+ *
+ * We take a variable argument list as pr_fmt/dev_fmt/etc are sometimes passed
+ * as multiple arguments (eg: `"%s: ", "blah"`), and we must only take the
+ * first one.
+ *
+ * subsys_fmt_prefix must be known at compile time, or compilation will fail
+ * (since this is a mistake). If fmt or level is not known at compile time, no
+ * index entry will be made (since this can legitimately happen).
+ */
+#define printk_index_subsys_emit(subsys_fmt_prefix, level, fmt, ...) \
+ __printk_index_emit(fmt, level, subsys_fmt_prefix)
+
+#define printk_index_wrap(_p_func, _fmt, ...) \
+ ({ \
+ __printk_index_emit(_fmt, NULL, NULL); \
+ _p_func(_fmt, ##__VA_ARGS__); \
+ })
+
+
+/**
+ * printk - print a kernel message
+ * @fmt: format string
+ *
+ * This is printk(). It can be called from any context. We want it to work.
+ *
+ * If printk indexing is enabled, _printk() is called from printk_index_wrap.
+ * Otherwise, printk is simply #defined to _printk.
+ *
+ * We try to grab the console_lock. If we succeed, it's easy - we log the
+ * output and call the console drivers. If we fail to get the semaphore, we
+ * place the output into the log buffer and return. The current holder of
+ * the console_sem will notice the new output in console_unlock(); and will
+ * send it to the consoles before releasing the lock.
+ *
+ * One effect of this deferred printing is that code which calls printk() and
+ * then changes console_loglevel may break. This is because console_loglevel
+ * is inspected when the actual printing occurs.
+ *
+ * See also:
+ * printf(3)
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
+ */
+#define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__)
+#define printk_deferred(fmt, ...) \
+ printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__)
+
/**
* pr_emerg - Print an emergency-level message
* @fmt: format string
@@ -432,27 +593,9 @@ extern int kptr_restrict;
#ifdef CONFIG_PRINTK
#define printk_once(fmt, ...) \
-({ \
- static bool __section(.data.once) __print_once; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__)
#define printk_deferred_once(fmt, ...) \
-({ \
- static bool __section(.data.once) __print_once; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk_deferred(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__)
#else
#define printk_once(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
diff --git a/include/linux/prmt.h b/include/linux/prmt.h
new file mode 100644
index 000000000000..24da8364b919
--- /dev/null
+++ b/include/linux/prmt.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifdef CONFIG_ACPI_PRMT
+void init_prmt(void);
+#else
+static inline void init_prmt(void) { }
+#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 2df965cd0974..0b2a89854440 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -30,7 +30,9 @@ struct proc_ops {
unsigned int proc_flags;
int (*proc_open)(struct inode *, struct file *);
ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *);
ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *);
+ /* mandatory unless nonseekable_open() or equivalent is used */
loff_t (*proc_lseek)(struct file *, loff_t, int);
int (*proc_release)(struct inode *, struct file *);
__poll_t (*proc_poll)(struct file *, struct poll_table_struct *);
@@ -63,6 +65,7 @@ struct proc_fs_info {
kgid_t pid_gid;
enum proc_hidepid hide_pid;
enum proc_pidonly pidonly;
+ struct rcu_head rcu;
};
static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
@@ -79,6 +82,7 @@ extern void proc_flush_pid(struct pid *);
extern struct proc_dir_entry *proc_symlink(const char *,
struct proc_dir_entry *, const char *);
+struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool);
extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
struct proc_dir_entry *, void *);
@@ -107,7 +111,16 @@ extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops);
extern void proc_set_size(struct proc_dir_entry *, loff_t);
extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
-extern void *PDE_DATA(const struct inode *);
+
+/*
+ * Obtain the private data passed by user through proc_create_data() or
+ * related.
+ */
+static inline void *pde_data(const struct inode *inode)
+{
+ return inode->i_private;
+}
+
extern void *proc_get_parent_data(const struct inode *);
extern void proc_remove(struct proc_dir_entry *);
extern void remove_proc_entry(const char *, struct proc_dir_entry *);
@@ -146,6 +159,9 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
+void arch_report_meminfo(struct seq_file *m);
+void arch_proc_pid_thread_features(struct seq_file *m, struct task_struct *task);
+
#else /* CONFIG_PROC_FS */
static inline void proc_root_init(void)
@@ -161,6 +177,11 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
static inline struct proc_dir_entry *proc_mkdir(const char *name,
struct proc_dir_entry *parent) {return NULL;}
static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; }
+static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, void *data, bool force_lookup)
+{
+ return NULL;
+}
static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
@@ -170,12 +191,20 @@ static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
#define proc_create_seq(name, mode, parent, ops) ({NULL;})
#define proc_create_single(name, mode, parent, show) ({NULL;})
#define proc_create_single_data(name, mode, parent, show, data) ({NULL;})
-#define proc_create(name, mode, parent, proc_ops) ({NULL;})
-#define proc_create_data(name, mode, parent, proc_ops, data) ({NULL;})
+
+static inline struct proc_dir_entry *
+proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops)
+{ return NULL; }
+
+static inline struct proc_dir_entry *
+proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops, void *data)
+{ return NULL; }
static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {}
static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {}
-static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;}
+static inline void *pde_data(const struct inode *inode) {BUG(); return NULL;}
static inline void *proc_get_parent_data(const struct inode *inode) { BUG(); return NULL; }
static inline void proc_remove(struct proc_dir_entry *de) {}
@@ -183,8 +212,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {}
static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; }
#define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;})
+#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;})
#define proc_create_net(name, mode, parent, state_size, ops) ({NULL;})
#define proc_create_net_single(name, mode, parent, show, data) ({NULL;})
+#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;})
static inline struct pid *tgid_pidfd_to_pid(const struct file *file)
{
@@ -198,7 +229,7 @@ struct net;
static inline struct proc_dir_entry *proc_net_mkdir(
struct net *net, const char *name, struct proc_dir_entry *parent)
{
- return proc_mkdir_data(name, 0, parent, net);
+ return _proc_mkdir(name, 0, parent, net, true);
}
struct ns_common;
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 75807ecef880..5ea470eb4d76 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -66,13 +66,12 @@ static inline void proc_free_inum(unsigned int inum) {}
static inline int ns_alloc_inum(struct ns_common *ns)
{
- atomic_long_set(&ns->stashed, 0);
+ WRITE_ONCE(ns->stashed, NULL);
return proc_alloc_inum(&ns->inum);
}
#define ns_free_inum(ns) proc_free_inum((ns)->inum)
-extern struct file *proc_ns_fget(int fd);
#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
extern int ns_get_path(struct path *path, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
diff --git a/include/linux/profile.h b/include/linux/profile.h
index bad18ca43150..11db1ec516e2 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -15,7 +15,6 @@
#define KVM_PROFILING 4
struct proc_dir_entry;
-struct pt_regs;
struct notifier_block;
#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
@@ -32,11 +31,6 @@ static inline int create_proc_profile(void)
}
#endif
-enum profile_type {
- PROFILE_TASK_EXIT,
- PROFILE_MUNMAP
-};
-
#ifdef CONFIG_PROFILING
extern int prof_on __read_mostly;
@@ -67,25 +61,6 @@ static inline void profile_hit(int type, void *ip)
struct task_struct;
struct mm_struct;
-/* task is in do_exit() */
-void profile_task_exit(struct task_struct * task);
-
-/* task is dead, free task struct ? Returns 1 if
- * the task was taken, 0 if the task should be freed.
- */
-int profile_handoff_task(struct task_struct * task);
-
-/* sys_munmap */
-void profile_munmap(unsigned long addr);
-
-int task_handoff_register(struct notifier_block * n);
-int task_handoff_unregister(struct notifier_block * n);
-
-int profile_event_register(enum profile_type, struct notifier_block * n);
-int profile_event_unregister(enum profile_type, struct notifier_block * n);
-
-struct pt_regs;
-
#else
#define prof_on 0
@@ -110,29 +85,6 @@ static inline void profile_hit(int type, void *ip)
return;
}
-static inline int task_handoff_register(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int task_handoff_unregister(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-#define profile_task_exit(a) do { } while (0)
-#define profile_handoff_task(a) (0)
-#define profile_munmap(a) do { } while (0)
#endif /* CONFIG_PROFILING */
diff --git a/include/linux/property.h b/include/linux/property.h
index 9f805c442819..3a1045eb786c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -10,8 +10,11 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/args.h>
+#include <linux/array_size.h>
#include <linux/bits.h>
#include <linux/fwnode.h>
+#include <linux/stddef.h>
#include <linux/types.h>
struct device;
@@ -25,31 +28,29 @@ enum dev_prop_type {
DEV_PROP_REF,
};
-enum dev_dma_attr {
- DEV_DMA_NOT_SUPPORTED,
- DEV_DMA_NON_COHERENT,
- DEV_DMA_COHERENT,
-};
-
-struct fwnode_handle *dev_fwnode(struct device *dev);
+const struct fwnode_handle *__dev_fwnode_const(const struct device *dev);
+struct fwnode_handle *__dev_fwnode(struct device *dev);
+#define dev_fwnode(dev) \
+ _Generic((dev), \
+ const struct device *: __dev_fwnode_const, \
+ struct device *: __dev_fwnode)(dev)
-bool device_property_present(struct device *dev, const char *propname);
-int device_property_read_u8_array(struct device *dev, const char *propname,
+bool device_property_present(const struct device *dev, const char *propname);
+int device_property_read_u8_array(const struct device *dev, const char *propname,
u8 *val, size_t nval);
-int device_property_read_u16_array(struct device *dev, const char *propname,
+int device_property_read_u16_array(const struct device *dev, const char *propname,
u16 *val, size_t nval);
-int device_property_read_u32_array(struct device *dev, const char *propname,
+int device_property_read_u32_array(const struct device *dev, const char *propname,
u32 *val, size_t nval);
-int device_property_read_u64_array(struct device *dev, const char *propname,
+int device_property_read_u64_array(const struct device *dev, const char *propname,
u64 *val, size_t nval);
-int device_property_read_string_array(struct device *dev, const char *propname,
+int device_property_read_string_array(const struct device *dev, const char *propname,
const char **val, size_t nval);
-int device_property_read_string(struct device *dev, const char *propname,
+int device_property_read_string(const struct device *dev, const char *propname,
const char **val);
-int device_property_match_string(struct device *dev,
+int device_property_match_string(const struct device *dev,
const char *propname, const char *string);
-bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
bool fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname);
int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
@@ -71,6 +72,65 @@ int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val);
int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string);
+
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
+
+static inline bool fwnode_device_is_big_endian(const struct fwnode_handle *fwnode)
+{
+ if (fwnode_property_present(fwnode, "big-endian"))
+ return true;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+ fwnode_property_present(fwnode, "native-endian"))
+ return true;
+ return false;
+}
+
+static inline
+bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char *compat)
+{
+ return fwnode_property_match_string(fwnode, "compatible", compat) >= 0;
+}
+
+/**
+ * device_is_big_endian - check if a device has BE registers
+ * @dev: Pointer to the struct device
+ *
+ * Returns: true if the device has a "big-endian" property, or if the kernel
+ * was compiled for BE *and* the device has a "native-endian" property.
+ * Returns false otherwise.
+ *
+ * Callers would nominally use ioread32be/iowrite32be if
+ * device_is_big_endian() == true, or readl/writel otherwise.
+ */
+static inline bool device_is_big_endian(const struct device *dev)
+{
+ return fwnode_device_is_big_endian(dev_fwnode(dev));
+}
+
+/**
+ * device_is_compatible - match 'compatible' property of the device with a given string
+ * @dev: Pointer to the struct device
+ * @compat: The string to match 'compatible' property with
+ *
+ * Returns: true if matches, otherwise false.
+ */
+static inline bool device_is_compatible(const struct device *dev, const char *compat)
+{
+ return fwnode_device_is_compatible(dev_fwnode(dev), compat);
+}
+
+int fwnode_property_match_property_string(const struct fwnode_handle *fwnode,
+ const char *propname,
+ const char * const *array, size_t n);
+
+static inline
+int device_property_match_property_string(const struct device *dev,
+ const char *propname,
+ const char * const *array, size_t n)
+{
+ return fwnode_property_match_property_string(dev_fwnode(dev), propname, array, n);
+}
+
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
@@ -82,9 +142,15 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *fwnode_get_name(const struct fwnode_handle *fwnode);
const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
+bool fwnode_name_eq(const struct fwnode_handle *fwnode, const char *name);
+
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_get_next_parent(
- struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
+
+#define fwnode_for_each_parent_node(fwnode, parent) \
+ for (parent = fwnode_get_parent(fwnode); parent; \
+ parent = fwnode_get_next_parent(parent))
+
unsigned int fwnode_count_parents(const struct fwnode_handle *fwn);
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn,
unsigned int depth);
@@ -101,75 +167,82 @@ struct fwnode_handle *fwnode_get_next_available_child_node(
for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\
child = fwnode_get_next_available_child_node(fwnode, child))
-struct fwnode_handle *device_get_next_child_node(
- struct device *dev, struct fwnode_handle *child);
+struct fwnode_handle *device_get_next_child_node(const struct device *dev,
+ struct fwnode_handle *child);
#define device_for_each_child_node(dev, child) \
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
-struct fwnode_handle *fwnode_get_named_child_node(
- const struct fwnode_handle *fwnode, const char *childname);
-struct fwnode_handle *device_get_named_child_node(struct device *dev,
+struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname);
+struct fwnode_handle *device_get_named_child_node(const struct device *dev,
const char *childname);
struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
-int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index);
+int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index);
+int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name);
-unsigned int device_get_child_node_count(struct device *dev);
+unsigned int device_get_child_node_count(const struct device *dev);
-static inline bool device_property_read_bool(struct device *dev,
+static inline bool device_property_read_bool(const struct device *dev,
const char *propname)
{
return device_property_present(dev, propname);
}
-static inline int device_property_read_u8(struct device *dev,
+static inline int device_property_read_u8(const struct device *dev,
const char *propname, u8 *val)
{
return device_property_read_u8_array(dev, propname, val, 1);
}
-static inline int device_property_read_u16(struct device *dev,
+static inline int device_property_read_u16(const struct device *dev,
const char *propname, u16 *val)
{
return device_property_read_u16_array(dev, propname, val, 1);
}
-static inline int device_property_read_u32(struct device *dev,
+static inline int device_property_read_u32(const struct device *dev,
const char *propname, u32 *val)
{
return device_property_read_u32_array(dev, propname, val, 1);
}
-static inline int device_property_read_u64(struct device *dev,
+static inline int device_property_read_u64(const struct device *dev,
const char *propname, u64 *val)
{
return device_property_read_u64_array(dev, propname, val, 1);
}
-static inline int device_property_count_u8(struct device *dev, const char *propname)
+static inline int device_property_count_u8(const struct device *dev, const char *propname)
{
return device_property_read_u8_array(dev, propname, NULL, 0);
}
-static inline int device_property_count_u16(struct device *dev, const char *propname)
+static inline int device_property_count_u16(const struct device *dev, const char *propname)
{
return device_property_read_u16_array(dev, propname, NULL, 0);
}
-static inline int device_property_count_u32(struct device *dev, const char *propname)
+static inline int device_property_count_u32(const struct device *dev, const char *propname)
{
return device_property_read_u32_array(dev, propname, NULL, 0);
}
-static inline int device_property_count_u64(struct device *dev, const char *propname)
+static inline int device_property_count_u64(const struct device *dev, const char *propname)
{
return device_property_read_u64_array(dev, propname, NULL, 0);
}
+static inline int device_property_string_array_count(const struct device *dev,
+ const char *propname)
+{
+ return device_property_read_string_array(dev, propname, NULL, 0);
+}
+
static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
const char *propname)
{
@@ -224,6 +297,13 @@ static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode,
return fwnode_property_read_u64_array(fwnode, propname, NULL, 0);
}
+static inline int
+fwnode_property_string_array_count(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return fwnode_property_read_string_array(fwnode, propname, NULL, 0);
+}
+
struct software_node;
/**
@@ -238,6 +318,13 @@ struct software_node_ref_args {
u64 args[NR_FWNODE_REFERENCE_ARGS];
};
+#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \
+(const struct software_node_ref_args) { \
+ .node = _ref_, \
+ .nargs = COUNT_ARGS(__VA_ARGS__), \
+ .args = { __VA_ARGS__ }, \
+}
+
/**
* struct property_entry - "Built-in" device property representation.
* @name: Name of the property.
@@ -269,24 +356,14 @@ struct property_entry {
* crafted to avoid gcc-4.4.4's problems with initialization of anon unions
* and structs.
*/
-
-#define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \
- sizeof(((struct property_entry *)NULL)->value._elem_[0])
-
-#define __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, _elsize_, _Type_, \
- _val_, _len_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = (_len_) * (_elsize_), \
- .type = DEV_PROP_##_Type_, \
- { .pointer = _val_ }, \
+#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = (_len_) * sizeof_field(struct property_entry, value._elem_[0]), \
+ .type = DEV_PROP_##_Type_, \
+ { .pointer = _val_ }, \
}
-#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\
- __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \
- __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
- _Type_, _val_, _len_)
-
#define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \
__PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_)
#define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \
@@ -297,10 +374,14 @@ struct property_entry {
__PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_)
#define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \
__PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_)
+
#define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_) \
- __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \
- sizeof(struct software_node_ref_args), \
- REF, _val_, _len_)
+(struct property_entry) { \
+ .name = _name_, \
+ .length = (_len_) * sizeof(struct software_node_ref_args), \
+ .type = DEV_PROP_REF, \
+ { .pointer = _val_ }, \
+}
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
@@ -312,13 +393,13 @@ struct property_entry {
PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
-#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \
+#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
#define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \
(struct property_entry) { \
.name = _name_, \
- .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
+ .length = sizeof_field(struct property_entry, value._elem_[0]), \
.is_inline = true, \
.type = DEV_PROP_##_Type_, \
{ .value = { ._elem_[0] = _val_ } }, \
@@ -335,46 +416,34 @@ struct property_entry {
#define PROPERTY_ENTRY_STRING(_name_, _val_) \
__PROPERTY_ENTRY_ELEMENT(_name_, str, STRING, _val_)
-#define PROPERTY_ENTRY_BOOL(_name_) \
-(struct property_entry) { \
- .name = _name_, \
- .is_inline = true, \
-}
-
#define PROPERTY_ENTRY_REF(_name_, _ref_, ...) \
(struct property_entry) { \
.name = _name_, \
.length = sizeof(struct software_node_ref_args), \
.type = DEV_PROP_REF, \
- { .pointer = &(const struct software_node_ref_args) { \
- .node = _ref_, \
- .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
- .args = { __VA_ARGS__ }, \
- } }, \
+ { .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \
+}
+
+#define PROPERTY_ENTRY_BOOL(_name_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .is_inline = true, \
}
struct property_entry *
property_entries_dup(const struct property_entry *properties);
-
void property_entries_free(const struct property_entry *properties);
-int device_add_properties(struct device *dev,
- const struct property_entry *properties);
-void device_remove_properties(struct device *dev);
-
-bool device_dma_supported(struct device *dev);
-
-enum dev_dma_attr device_get_dma_attr(struct device *dev);
+bool device_dma_supported(const struct device *dev);
+enum dev_dma_attr device_get_dma_attr(const struct device *dev);
-const void *device_get_match_data(struct device *dev);
+const void *device_get_match_data(const struct device *dev);
int device_get_phy_mode(struct device *dev);
+int fwnode_get_phy_mode(const struct fwnode_handle *fwnode);
-void *device_get_mac_address(struct device *dev, char *addr, int alen);
+void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index);
-int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
-void *fwnode_get_mac_address(struct fwnode_handle *fwnode,
- char *addr, int alen);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
@@ -385,11 +454,8 @@ struct fwnode_handle *fwnode_graph_get_remote_port(
const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_endpoint(
const struct fwnode_handle *fwnode);
-struct fwnode_handle *
-fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
- u32 endpoint);
-static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode)
+static inline bool fwnode_graph_is_endpoint(const struct fwnode_handle *fwnode)
{
return fwnode_property_present(fwnode, "remote-endpoint");
}
@@ -402,7 +468,8 @@ static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode)
* one.
* @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote
* endpoint of the given endpoint belongs to,
- * may be disabled.
+ * may be disabled, or that the endpoint is not
+ * connected.
*/
#define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0)
#define FWNODE_GRAPH_DEVICE_DISABLED BIT(1)
@@ -410,14 +477,35 @@ static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode)
struct fwnode_handle *
fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
u32 port, u32 endpoint, unsigned long flags);
+unsigned int fwnode_graph_get_endpoint_count(const struct fwnode_handle *fwnode,
+ unsigned long flags);
-#define fwnode_graph_for_each_endpoint(fwnode, child) \
- for (child = NULL; \
- (child = fwnode_graph_get_next_endpoint(fwnode, child)); )
+#define fwnode_graph_for_each_endpoint(fwnode, child) \
+ for (child = fwnode_graph_get_next_endpoint(fwnode, NULL); child; \
+ child = fwnode_graph_get_next_endpoint(fwnode, child))
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+typedef void *(*devcon_match_fn_t)(const struct fwnode_handle *fwnode, const char *id,
+ void *data);
+
+void *fwnode_connection_find_match(const struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match);
+
+static inline void *device_connection_find_match(const struct device *dev,
+ const char *con_id, void *data,
+ devcon_match_fn_t match)
+{
+ return fwnode_connection_find_match(dev_fwnode(dev), con_id, data, match);
+}
+
+int fwnode_connection_find_matches(const struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches, unsigned int matches_len);
+
/* -------------------------------------------------------------------------- */
/* Software fwnode support - when HW description is incomplete or missing */
@@ -433,6 +521,13 @@ struct software_node {
const struct property_entry *properties;
};
+#define SOFTWARE_NODE(_name_, _properties_, _parent_) \
+ (struct software_node) { \
+ .name = _name_, \
+ .properties = _properties_, \
+ .parent = _parent_, \
+ }
+
bool is_software_node(const struct fwnode_handle *fwnode);
const struct software_node *
to_software_node(const struct fwnode_handle *fwnode);
@@ -442,20 +537,22 @@ const struct software_node *
software_node_find_by_name(const struct software_node *parent,
const char *name);
-int software_node_register_nodes(const struct software_node *nodes);
-void software_node_unregister_nodes(const struct software_node *nodes);
-
int software_node_register_node_group(const struct software_node **node_group);
void software_node_unregister_node_group(const struct software_node **node_group);
int software_node_register(const struct software_node *node);
void software_node_unregister(const struct software_node *node);
-int software_node_notify(struct device *dev, unsigned long action);
-
struct fwnode_handle *
fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent);
void fwnode_remove_software_node(struct fwnode_handle *fwnode);
+int device_add_software_node(struct device *dev, const struct software_node *node);
+void device_remove_software_node(struct device *dev);
+
+int device_create_managed_software_node(struct device *dev,
+ const struct property_entry *properties,
+ const struct software_node *parent);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h
new file mode 100644
index 000000000000..c9a31c567e85
--- /dev/null
+++ b/include/linux/pruss_driver.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PRU-ICSS sub-system specific definitions
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef _PRUSS_DRIVER_H_
+#define _PRUSS_DRIVER_H_
+
+#include <linux/mutex.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/types.h>
+#include <linux/err.h>
+
+/*
+ * enum pruss_gp_mux_sel - PRUSS GPI/O Mux modes for the
+ * PRUSS_GPCFG0/1 registers
+ *
+ * NOTE: The below defines are the most common values, but there
+ * are some exceptions like on 66AK2G, where the RESERVED and MII2
+ * values are interchanged. Also, this bit-field does not exist on
+ * AM335x SoCs
+ */
+enum pruss_gp_mux_sel {
+ PRUSS_GP_MUX_SEL_GP,
+ PRUSS_GP_MUX_SEL_ENDAT,
+ PRUSS_GP_MUX_SEL_RESERVED,
+ PRUSS_GP_MUX_SEL_SD,
+ PRUSS_GP_MUX_SEL_MII2,
+ PRUSS_GP_MUX_SEL_MAX,
+};
+
+/*
+ * enum pruss_gpi_mode - PRUSS GPI configuration modes, used
+ * to program the PRUSS_GPCFG0/1 registers
+ */
+enum pruss_gpi_mode {
+ PRUSS_GPI_MODE_DIRECT,
+ PRUSS_GPI_MODE_PARALLEL,
+ PRUSS_GPI_MODE_28BIT_SHIFT,
+ PRUSS_GPI_MODE_MII,
+ PRUSS_GPI_MODE_MAX,
+};
+
+/**
+ * enum pru_type - PRU core type identifier
+ *
+ * @PRU_TYPE_PRU: Programmable Real-time Unit
+ * @PRU_TYPE_RTU: Auxiliary Programmable Real-Time Unit
+ * @PRU_TYPE_TX_PRU: Transmit Programmable Real-Time Unit
+ * @PRU_TYPE_MAX: just keep this one at the end
+ */
+enum pru_type {
+ PRU_TYPE_PRU,
+ PRU_TYPE_RTU,
+ PRU_TYPE_TX_PRU,
+ PRU_TYPE_MAX,
+};
+
+/*
+ * enum pruss_mem - PRUSS memory range identifiers
+ */
+enum pruss_mem {
+ PRUSS_MEM_DRAM0 = 0,
+ PRUSS_MEM_DRAM1,
+ PRUSS_MEM_SHRD_RAM2,
+ PRUSS_MEM_MAX,
+};
+
+/**
+ * struct pruss_mem_region - PRUSS memory region structure
+ * @va: kernel virtual address of the PRUSS memory region
+ * @pa: physical (bus) address of the PRUSS memory region
+ * @size: size of the PRUSS memory region
+ */
+struct pruss_mem_region {
+ void __iomem *va;
+ phys_addr_t pa;
+ size_t size;
+};
+
+/**
+ * struct pruss - PRUSS parent structure
+ * @dev: pruss device pointer
+ * @cfg_base: base iomap for CFG region
+ * @cfg_regmap: regmap for config region
+ * @mem_regions: data for each of the PRUSS memory regions
+ * @mem_in_use: to indicate if memory resource is in use
+ * @lock: mutex to serialize access to resources
+ * @core_clk_mux: clk handle for PRUSS CORE_CLK_MUX
+ * @iep_clk_mux: clk handle for PRUSS IEP_CLK_MUX
+ */
+struct pruss {
+ struct device *dev;
+ void __iomem *cfg_base;
+ struct regmap *cfg_regmap;
+ struct pruss_mem_region mem_regions[PRUSS_MEM_MAX];
+ struct pruss_mem_region *mem_in_use[PRUSS_MEM_MAX];
+ struct mutex lock; /* PRU resource lock */
+ struct clk *core_clk_mux;
+ struct clk *iep_clk_mux;
+};
+
+#if IS_ENABLED(CONFIG_TI_PRUSS)
+
+struct pruss *pruss_get(struct rproc *rproc);
+void pruss_put(struct pruss *pruss);
+int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id,
+ struct pruss_mem_region *region);
+int pruss_release_mem_region(struct pruss *pruss,
+ struct pruss_mem_region *region);
+int pruss_cfg_get_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 *mux);
+int pruss_cfg_set_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 mux);
+int pruss_cfg_gpimode(struct pruss *pruss, enum pruss_pru_id pru_id,
+ enum pruss_gpi_mode mode);
+int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable);
+int pruss_cfg_xfr_enable(struct pruss *pruss, enum pru_type pru_type,
+ bool enable);
+
+#else
+
+static inline struct pruss *pruss_get(struct rproc *rproc)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void pruss_put(struct pruss *pruss) { }
+
+static inline int pruss_request_mem_region(struct pruss *pruss,
+ enum pruss_mem mem_id,
+ struct pruss_mem_region *region)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_release_mem_region(struct pruss *pruss,
+ struct pruss_mem_region *region)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_cfg_get_gpmux(struct pruss *pruss,
+ enum pruss_pru_id pru_id, u8 *mux)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int pruss_cfg_set_gpmux(struct pruss *pruss,
+ enum pruss_pru_id pru_id, u8 mux)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int pruss_cfg_gpimode(struct pruss *pruss,
+ enum pruss_pru_id pru_id,
+ enum pruss_gpi_mode mode)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int pruss_cfg_xfr_enable(struct pruss *pruss,
+ enum pru_type pru_type,
+ bool enable);
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+#endif /* CONFIG_TI_PRUSS */
+
+#endif /* _PRUSS_DRIVER_H_ */
diff --git a/include/linux/psci.h b/include/linux/psci.h
index 14ad9b9ebcd6..4ca0060a3fc4 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -18,7 +18,7 @@ bool psci_tos_resident_on(int cpu);
int psci_cpu_suspend_enter(u32 state);
bool psci_power_state_is_valid(u32 state);
-int psci_set_osi_mode(void);
+int psci_set_osi_mode(bool enable);
bool psci_has_osi_support(void);
struct psci_operations {
@@ -34,6 +34,15 @@ struct psci_operations {
extern struct psci_operations psci_ops;
+struct psci_0_1_function_ids {
+ u32 cpu_suspend;
+ u32 cpu_on;
+ u32 cpu_off;
+ u32 migrate;
+};
+
+struct psci_0_1_function_ids get_psci_0_1_function_ids(void);
+
#if defined(CONFIG_ARM_PSCI_FW)
int __init psci_dt_init(void);
#else
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
new file mode 100644
index 000000000000..fb724c65c77b
--- /dev/null
+++ b/include/linux/pse-pd/pse.h
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+#ifndef _LINUX_PSE_CONTROLLER_H
+#define _LINUX_PSE_CONTROLLER_H
+
+#include <linux/ethtool.h>
+#include <linux/list.h>
+#include <uapi/linux/ethtool.h>
+
+struct phy_device;
+struct pse_controller_dev;
+
+/**
+ * struct pse_control_config - PSE control/channel configuration.
+ *
+ * @admin_cotrol: set PoDL PSE admin control as described in
+ * IEEE 802.3-2018 30.15.1.2.1 acPoDLPSEAdminControl
+ */
+struct pse_control_config {
+ enum ethtool_podl_pse_admin_state admin_cotrol;
+};
+
+/**
+ * struct pse_control_status - PSE control/channel status.
+ *
+ * @podl_admin_state: operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @podl_pw_status: power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ */
+struct pse_control_status {
+ enum ethtool_podl_pse_admin_state podl_admin_state;
+ enum ethtool_podl_pse_pw_d_status podl_pw_status;
+};
+
+/**
+ * struct pse_controller_ops - PSE controller driver callbacks
+ *
+ * @ethtool_get_status: get PSE control status for ethtool interface
+ * @ethtool_set_config: set PSE control configuration over ethtool interface
+ */
+struct pse_controller_ops {
+ int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
+ unsigned long id, struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+ int (*ethtool_set_config)(struct pse_controller_dev *pcdev,
+ unsigned long id, struct netlink_ext_ack *extack,
+ const struct pse_control_config *config);
+};
+
+struct module;
+struct device_node;
+struct of_phandle_args;
+struct pse_control;
+
+/**
+ * struct pse_controller_dev - PSE controller entity that might
+ * provide multiple PSE controls
+ * @ops: a pointer to device specific struct pse_controller_ops
+ * @owner: kernel module of the PSE controller driver
+ * @list: internal list of PSE controller devices
+ * @pse_control_head: head of internal list of requested PSE controls
+ * @dev: corresponding driver model device struct
+ * @of_pse_n_cells: number of cells in PSE line specifiers
+ * @of_xlate: translation function to translate from specifier as found in the
+ * device tree to id as given to the PSE control ops
+ * @nr_lines: number of PSE controls in this controller device
+ * @lock: Mutex for serialization access to the PSE controller
+ */
+struct pse_controller_dev {
+ const struct pse_controller_ops *ops;
+ struct module *owner;
+ struct list_head list;
+ struct list_head pse_control_head;
+ struct device *dev;
+ int of_pse_n_cells;
+ int (*of_xlate)(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec);
+ unsigned int nr_lines;
+ struct mutex lock;
+};
+
+#if IS_ENABLED(CONFIG_PSE_CONTROLLER)
+int pse_controller_register(struct pse_controller_dev *pcdev);
+void pse_controller_unregister(struct pse_controller_dev *pcdev);
+struct device;
+int devm_pse_controller_register(struct device *dev,
+ struct pse_controller_dev *pcdev);
+
+struct pse_control *of_pse_control_get(struct device_node *node);
+void pse_control_put(struct pse_control *psec);
+
+int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config);
+
+#else
+
+static inline struct pse_control *of_pse_control_get(struct device_node *node)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline void pse_control_put(struct pse_control *psec)
+{
+}
+
+static inline int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ return -ENOTSUPP;
+}
+
+static inline int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ return -ENOTSUPP;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
index eceda1d1407a..730f77381d55 100644
--- a/include/linux/pseudo_fs.h
+++ b/include/linux/pseudo_fs.h
@@ -5,7 +5,7 @@
struct pseudo_fs_context {
const struct super_operations *ops;
- const struct xattr_handler **xattr;
+ const struct xattr_handler * const *xattr;
const struct dentry_operations *dops;
unsigned long magic;
};
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 7361023f3fdd..e0745873e3f2 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PSI_H
#define _LINUX_PSI_H
@@ -5,6 +6,8 @@
#include <linux/psi_types.h>
#include <linux/sched.h>
#include <linux/poll.h>
+#include <linux/cgroup-defs.h>
+#include <linux/cgroup.h>
struct seq_file;
struct css_set;
@@ -16,27 +19,28 @@ extern struct psi_group psi_system;
void psi_init(void);
-void psi_task_change(struct task_struct *task, int clear, int set);
-void psi_task_switch(struct task_struct *prev, struct task_struct *next,
- bool sleep);
-
-void psi_memstall_tick(struct task_struct *task, int cpu);
void psi_memstall_enter(unsigned long *flags);
void psi_memstall_leave(unsigned long *flags);
int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
+struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
+ enum psi_res res, struct file *file,
+ struct kernfs_open_file *of);
+void psi_trigger_destroy(struct psi_trigger *t);
+
+__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
+ poll_table *wait);
#ifdef CONFIG_CGROUPS
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+}
+
int psi_cgroup_alloc(struct cgroup *cgrp);
void psi_cgroup_free(struct cgroup *cgrp);
void cgroup_move_task(struct task_struct *p, struct css_set *to);
-
-struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, size_t nbytes, enum psi_res res);
-void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
-
-__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
- poll_table *wait);
+void psi_cgroup_restart(struct psi_group *group);
#endif
#else /* CONFIG_PSI */
@@ -58,6 +62,7 @@ static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
{
rcu_assign_pointer(p->cgroups, to);
}
+static inline void psi_cgroup_restart(struct psi_group *group) {}
#endif
#endif /* CONFIG_PSI */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index b95f3211566a..f1fd3a8044e0 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PSI_TYPES_H
#define _LINUX_PSI_TYPES_H
@@ -15,12 +16,15 @@ enum psi_task_count {
NR_MEMSTALL,
NR_RUNNING,
/*
- * This can't have values other than 0 or 1 and could be
- * implemented as a bit flag. But for now we still have room
- * in the first cacheline of psi_group_cpu, and this way we
- * don't have to special case any state tracking for it.
+ * For IO and CPU stalls the presence of running/oncpu tasks
+ * in the domain means a partial rather than a full stall.
+ * For memory it's not so simple because of page reclaimers:
+ * they are running/oncpu while representing a stall. To tell
+ * whether a domain has productivity left or not, we need to
+ * distinguish between regular running (i.e. productive)
+ * threads and memstall ones.
*/
- NR_ONCPU,
+ NR_MEMSTALL_RUNNING,
NR_PSI_TASK_COUNTS = 4,
};
@@ -28,14 +32,20 @@ enum psi_task_count {
#define TSK_IOWAIT (1 << NR_IOWAIT)
#define TSK_MEMSTALL (1 << NR_MEMSTALL)
#define TSK_RUNNING (1 << NR_RUNNING)
-#define TSK_ONCPU (1 << NR_ONCPU)
+#define TSK_MEMSTALL_RUNNING (1 << NR_MEMSTALL_RUNNING)
+
+/* Only one task can be scheduled, no corresponding task count */
+#define TSK_ONCPU (1 << NR_PSI_TASK_COUNTS)
/* Resources that workloads could be stalled on */
enum psi_res {
PSI_IO,
PSI_MEM,
PSI_CPU,
- NR_PSI_RESOURCES = 3,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ,
+#endif
+ NR_PSI_RESOURCES,
};
/*
@@ -50,11 +60,21 @@ enum psi_states {
PSI_MEM_SOME,
PSI_MEM_FULL,
PSI_CPU_SOME,
+ PSI_CPU_FULL,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ_FULL,
+#endif
/* Only per-CPU, to weigh the CPU in the global average: */
PSI_NONIDLE,
- NR_PSI_STATES = 6,
+ NR_PSI_STATES,
};
+/* Use one bit in the state mask to track TSK_ONCPU */
+#define PSI_ONCPU (1 << NR_PSI_STATES)
+
+/* Flag whether to re-arm avgs_work, see details in get_recent_times() */
+#define PSI_STATE_RESCHEDULE (1 << (NR_PSI_STATES + 1))
+
enum psi_aggregators {
PSI_AVGS = 0,
PSI_POLL,
@@ -117,6 +137,9 @@ struct psi_trigger {
/* Wait queue for polling */
wait_queue_head_t event_wait;
+ /* Kernfs file for cgroup triggers */
+ struct kernfs_open_file *of;
+
/* Pending event flag */
int event;
@@ -129,11 +152,17 @@ struct psi_trigger {
*/
u64 last_event_time;
- /* Refcounting to prevent premature destruction */
- struct kref refcount;
+ /* Deferred event(s) from previous ratelimit window */
+ bool pending_event;
+
+ /* Trigger type - PSI_AVGS for unprivileged, PSI_POLL for RT */
+ enum psi_aggregators aggregator;
};
struct psi_group {
+ struct psi_group *parent;
+ bool enabled;
+
/* Protects data used by the aggregator */
struct mutex avgs_lock;
@@ -148,33 +177,40 @@ struct psi_group {
/* Aggregator work control */
struct delayed_work avgs_work;
+ /* Unprivileged triggers against N*PSI_FREQ windows */
+ struct list_head avg_triggers;
+ u32 avg_nr_triggers[NR_PSI_STATES - 1];
+
/* Total stall times and sampled pressure averages */
u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
unsigned long avg[NR_PSI_STATES - 1][3];
- /* Monitor work control */
- struct task_struct __rcu *poll_task;
- struct timer_list poll_timer;
- wait_queue_head_t poll_wait;
- atomic_t poll_wakeup;
+ /* Monitor RT polling work control */
+ struct task_struct __rcu *rtpoll_task;
+ struct timer_list rtpoll_timer;
+ wait_queue_head_t rtpoll_wait;
+ atomic_t rtpoll_wakeup;
+ atomic_t rtpoll_scheduled;
/* Protects data used by the monitor */
- struct mutex trigger_lock;
-
- /* Configured polling triggers */
- struct list_head triggers;
- u32 nr_triggers[NR_PSI_STATES - 1];
- u32 poll_states;
- u64 poll_min_period;
-
- /* Total stall times at the start of monitor activation */
- u64 polling_total[NR_PSI_STATES - 1];
- u64 polling_next_update;
- u64 polling_until;
+ struct mutex rtpoll_trigger_lock;
+
+ /* Configured RT polling triggers */
+ struct list_head rtpoll_triggers;
+ u32 rtpoll_nr_triggers[NR_PSI_STATES - 1];
+ u32 rtpoll_states;
+ u64 rtpoll_min_period;
+
+ /* Total stall times at the start of RT polling monitor activation */
+ u64 rtpoll_total[NR_PSI_STATES - 1];
+ u64 rtpoll_next_update;
+ u64 rtpoll_until;
};
#else /* CONFIG_PSI */
+#define NR_PSI_RESOURCES 0
+
struct psi_group { };
#endif /* CONFIG_PSI */
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
new file mode 100644
index 000000000000..c1dc87fc536b
--- /dev/null
+++ b/include/linux/psp-platform-access.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_PLATFORM_ACCESS_H
+#define __PSP_PLATFORM_ACCESS_H
+
+#include <linux/psp.h>
+
+enum psp_platform_access_msg {
+ PSP_CMD_NONE = 0x0,
+ PSP_I2C_REQ_BUS_CMD = 0x64,
+ PSP_DYNAMIC_BOOST_GET_NONCE,
+ PSP_DYNAMIC_BOOST_SET_UID,
+ PSP_DYNAMIC_BOOST_GET_PARAMETER,
+ PSP_DYNAMIC_BOOST_SET_PARAMETER,
+};
+
+struct psp_req_buffer_hdr {
+ u32 payload_size;
+ u32 status;
+} __packed;
+
+struct psp_request {
+ struct psp_req_buffer_hdr header;
+ void *buf;
+} __packed;
+
+/**
+ * psp_send_platform_access_msg() - Send a message to control platform features
+ *
+ * This function is intended to be used by drivers outside of ccp to communicate
+ * with the platform.
+ *
+ * Returns:
+ * 0: success
+ * -%EBUSY: mailbox in recovery or in use
+ * -%ENODEV: driver not bound with PSP device
+ * -%ETIMEDOUT: request timed out
+ * -%EIO: unknown error (see kernel log)
+ */
+int psp_send_platform_access_msg(enum psp_platform_access_msg, struct psp_request *req);
+
+/**
+ * psp_ring_platform_doorbell() - Ring platform doorbell
+ *
+ * This function is intended to be used by drivers outside of ccp to ring the
+ * platform doorbell with a message.
+ *
+ * Returns:
+ * 0: success
+ * -%EBUSY: mailbox in recovery or in use
+ * -%ENODEV: driver not bound with PSP device
+ * -%ETIMEDOUT: request timed out
+ * -%EIO: error will be stored in result argument
+ */
+int psp_ring_platform_doorbell(int msg, u32 *result);
+
+/**
+ * psp_check_platform_access_status() - Checks whether platform features is ready
+ *
+ * This function is intended to be used by drivers outside of ccp to determine
+ * if platform features has initialized.
+ *
+ * Returns:
+ * 0 platform features is ready
+ * -%ENODEV platform features is not ready or present
+ */
+int psp_check_platform_access_status(void);
+
+#endif /* __PSP_PLATFORM_ACCESS_H */
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 49d155cd2dfe..3705c2044fc0 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -14,14 +14,6 @@
#include <uapi/linux/psp-sev.h>
-#ifdef CONFIG_X86
-#include <linux/mem_encrypt.h>
-
-#define __psp_pa(x) __sme_pa(x)
-#else
-#define __psp_pa(x) __pa(x)
-#endif
-
#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */
/**
@@ -52,6 +44,7 @@ enum sev_cmd {
SEV_CMD_DF_FLUSH = 0x00A,
SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B,
SEV_CMD_GET_ID = 0x00C,
+ SEV_CMD_INIT_EX = 0x00D,
/* Guest commands */
SEV_CMD_DECOMMISSION = 0x020,
@@ -66,12 +59,14 @@ enum sev_cmd {
SEV_CMD_LAUNCH_MEASURE = 0x033,
SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034,
SEV_CMD_LAUNCH_FINISH = 0x035,
+ SEV_CMD_ATTESTATION_REPORT = 0x036,
/* Guest migration commands (outgoing) */
SEV_CMD_SEND_START = 0x040,
SEV_CMD_SEND_UPDATE_DATA = 0x041,
SEV_CMD_SEND_UPDATE_VMSA = 0x042,
SEV_CMD_SEND_FINISH = 0x043,
+ SEV_CMD_SEND_CANCEL = 0x044,
/* Guest migration commands (incoming) */
SEV_CMD_RECEIVE_START = 0x050,
@@ -83,6 +78,36 @@ enum sev_cmd {
SEV_CMD_DBG_DECRYPT = 0x060,
SEV_CMD_DBG_ENCRYPT = 0x061,
+ /* SNP specific commands */
+ SEV_CMD_SNP_INIT = 0x081,
+ SEV_CMD_SNP_SHUTDOWN = 0x082,
+ SEV_CMD_SNP_PLATFORM_STATUS = 0x083,
+ SEV_CMD_SNP_DF_FLUSH = 0x084,
+ SEV_CMD_SNP_INIT_EX = 0x085,
+ SEV_CMD_SNP_SHUTDOWN_EX = 0x086,
+ SEV_CMD_SNP_DECOMMISSION = 0x090,
+ SEV_CMD_SNP_ACTIVATE = 0x091,
+ SEV_CMD_SNP_GUEST_STATUS = 0x092,
+ SEV_CMD_SNP_GCTX_CREATE = 0x093,
+ SEV_CMD_SNP_GUEST_REQUEST = 0x094,
+ SEV_CMD_SNP_ACTIVATE_EX = 0x095,
+ SEV_CMD_SNP_LAUNCH_START = 0x0A0,
+ SEV_CMD_SNP_LAUNCH_UPDATE = 0x0A1,
+ SEV_CMD_SNP_LAUNCH_FINISH = 0x0A2,
+ SEV_CMD_SNP_DBG_DECRYPT = 0x0B0,
+ SEV_CMD_SNP_DBG_ENCRYPT = 0x0B1,
+ SEV_CMD_SNP_PAGE_SWAP_OUT = 0x0C0,
+ SEV_CMD_SNP_PAGE_SWAP_IN = 0x0C1,
+ SEV_CMD_SNP_PAGE_MOVE = 0x0C2,
+ SEV_CMD_SNP_PAGE_MD_INIT = 0x0C3,
+ SEV_CMD_SNP_PAGE_SET_STATE = 0x0C6,
+ SEV_CMD_SNP_PAGE_RECLAIM = 0x0C7,
+ SEV_CMD_SNP_PAGE_UNSMASH = 0x0C8,
+ SEV_CMD_SNP_CONFIG = 0x0C9,
+ SEV_CMD_SNP_DOWNLOAD_FIRMWARE_EX = 0x0CA,
+ SEV_CMD_SNP_COMMIT = 0x0CB,
+ SEV_CMD_SNP_VLEK_LOAD = 0x0CD,
+
SEV_CMD_MAX,
};
@@ -100,6 +125,26 @@ struct sev_data_init {
u32 tmr_len; /* In */
} __packed;
+/**
+ * struct sev_data_init_ex - INIT_EX command parameters
+ *
+ * @length: len of the command buffer read by the PSP
+ * @flags: processing flags
+ * @tmr_address: system physical address used for SEV-ES
+ * @tmr_len: len of tmr_address
+ * @nv_address: system physical address used for PSP NV storage
+ * @nv_len: len of nv_address
+ */
+struct sev_data_init_ex {
+ u32 length; /* In */
+ u32 flags; /* In */
+ u64 tmr_address; /* In */
+ u32 tmr_len; /* In */
+ u32 reserved; /* In */
+ u64 nv_address; /* In/Out */
+ u32 nv_len; /* In */
+} __packed;
+
#define SEV_INIT_FLAGS_SEV_ES 0x01
/**
@@ -325,11 +370,11 @@ struct sev_data_send_start {
u64 pdh_cert_address; /* In */
u32 pdh_cert_len; /* In */
u32 reserved1;
- u64 plat_cert_address; /* In */
- u32 plat_cert_len; /* In */
+ u64 plat_certs_address; /* In */
+ u32 plat_certs_len; /* In */
u32 reserved2;
- u64 amd_cert_address; /* In */
- u32 amd_cert_len; /* In */
+ u64 amd_certs_address; /* In */
+ u32 amd_certs_len; /* In */
u32 reserved3;
u64 session_address; /* In */
u32 session_len; /* In/Out */
@@ -392,6 +437,15 @@ struct sev_data_send_finish {
} __packed;
/**
+ * struct sev_data_send_cancel - SEND_CANCEL command parameters
+ *
+ * @handle: handle of the VM to process
+ */
+struct sev_data_send_cancel {
+ u32 handle; /* In */
+} __packed;
+
+/**
* struct sev_data_receive_start - RECEIVE_START command parameters
*
* @handle: handle of the VM to perform receive operation
@@ -483,12 +537,285 @@ struct sev_data_dbg {
u32 len; /* In */
} __packed;
+/**
+ * struct sev_data_attestation_report - SEV_ATTESTATION_REPORT command parameters
+ *
+ * @handle: handle of the VM
+ * @mnonce: a random nonce that will be included in the report.
+ * @address: physical address where the report will be copied.
+ * @len: length of the physical buffer.
+ */
+struct sev_data_attestation_report {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u8 mnonce[16]; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_snp_download_firmware - SNP_DOWNLOAD_FIRMWARE command params
+ *
+ * @address: physical address of firmware image
+ * @len: length of the firmware image
+ */
+struct sev_data_snp_download_firmware {
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_snp_activate - SNP_ACTIVATE command params
+ *
+ * @gctx_paddr: system physical address guest context page
+ * @asid: ASID to bind to the guest
+ */
+struct sev_data_snp_activate {
+ u64 gctx_paddr; /* In */
+ u32 asid; /* In */
+} __packed;
+
+/**
+ * struct sev_data_snp_addr - generic SNP command params
+ *
+ * @address: physical address of generic data param
+ */
+struct sev_data_snp_addr {
+ u64 address; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_snp_launch_start - SNP_LAUNCH_START command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @policy: guest policy
+ * @ma_gctx_paddr: system physical address of migration agent
+ * @ma_en: the guest is associated with a migration agent
+ * @imi_en: launch flow is launching an IMI (Incoming Migration Image) for the
+ * purpose of guest-assisted migration.
+ * @rsvd: reserved
+ * @gosvw: guest OS-visible workarounds, as defined by hypervisor
+ */
+struct sev_data_snp_launch_start {
+ u64 gctx_paddr; /* In */
+ u64 policy; /* In */
+ u64 ma_gctx_paddr; /* In */
+ u32 ma_en:1; /* In */
+ u32 imi_en:1; /* In */
+ u32 rsvd:30;
+ u8 gosvw[16]; /* In */
+} __packed;
+
+/* SNP support page type */
+enum {
+ SNP_PAGE_TYPE_NORMAL = 0x1,
+ SNP_PAGE_TYPE_VMSA = 0x2,
+ SNP_PAGE_TYPE_ZERO = 0x3,
+ SNP_PAGE_TYPE_UNMEASURED = 0x4,
+ SNP_PAGE_TYPE_SECRET = 0x5,
+ SNP_PAGE_TYPE_CPUID = 0x6,
+
+ SNP_PAGE_TYPE_MAX
+};
+
+/**
+ * struct sev_data_snp_launch_update - SNP_LAUNCH_UPDATE command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @page_size: page size 0 indicates 4K and 1 indicates 2MB page
+ * @page_type: encoded page type
+ * @imi_page: indicates that this page is part of the IMI (Incoming Migration
+ * Image) of the guest
+ * @rsvd: reserved
+ * @rsvd2: reserved
+ * @address: system physical address of destination page to encrypt
+ * @rsvd3: reserved
+ * @vmpl1_perms: VMPL permission mask for VMPL1
+ * @vmpl2_perms: VMPL permission mask for VMPL2
+ * @vmpl3_perms: VMPL permission mask for VMPL3
+ * @rsvd4: reserved
+ */
+struct sev_data_snp_launch_update {
+ u64 gctx_paddr; /* In */
+ u32 page_size:1; /* In */
+ u32 page_type:3; /* In */
+ u32 imi_page:1; /* In */
+ u32 rsvd:27;
+ u32 rsvd2;
+ u64 address; /* In */
+ u32 rsvd3:8;
+ u32 vmpl1_perms:8; /* In */
+ u32 vmpl2_perms:8; /* In */
+ u32 vmpl3_perms:8; /* In */
+ u32 rsvd4;
+} __packed;
+
+/**
+ * struct sev_data_snp_launch_finish - SNP_LAUNCH_FINISH command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @id_block_paddr: system physical address of ID block
+ * @id_auth_paddr: system physical address of ID block authentication structure
+ * @id_block_en: indicates whether ID block is present
+ * @auth_key_en: indicates whether author key is present in authentication structure
+ * @rsvd: reserved
+ * @host_data: host-supplied data for guest, not interpreted by firmware
+ */
+struct sev_data_snp_launch_finish {
+ u64 gctx_paddr;
+ u64 id_block_paddr;
+ u64 id_auth_paddr;
+ u8 id_block_en:1;
+ u8 auth_key_en:1;
+ u64 rsvd:62;
+ u8 host_data[32];
+} __packed;
+
+/**
+ * struct sev_data_snp_guest_status - SNP_GUEST_STATUS command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @address: system physical address of guest status page
+ */
+struct sev_data_snp_guest_status {
+ u64 gctx_paddr;
+ u64 address;
+} __packed;
+
+/**
+ * struct sev_data_snp_page_reclaim - SNP_PAGE_RECLAIM command params
+ *
+ * @paddr: system physical address of page to be claimed. The 0th bit in the
+ * address indicates the page size. 0h indicates 4KB and 1h indicates
+ * 2MB page.
+ */
+struct sev_data_snp_page_reclaim {
+ u64 paddr;
+} __packed;
+
+/**
+ * struct sev_data_snp_page_unsmash - SNP_PAGE_UNSMASH command params
+ *
+ * @paddr: system physical address of page to be unsmashed. The 0th bit in the
+ * address indicates the page size. 0h indicates 4 KB and 1h indicates
+ * 2 MB page.
+ */
+struct sev_data_snp_page_unsmash {
+ u64 paddr;
+} __packed;
+
+/**
+ * struct sev_data_snp_dbg - DBG_ENCRYPT/DBG_DECRYPT command parameters
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @src_addr: source address of data to operate on
+ * @dst_addr: destination address of data to operate on
+ */
+struct sev_data_snp_dbg {
+ u64 gctx_paddr; /* In */
+ u64 src_addr; /* In */
+ u64 dst_addr; /* In */
+} __packed;
+
+/**
+ * struct sev_data_snp_guest_request - SNP_GUEST_REQUEST command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @req_paddr: system physical address of request page
+ * @res_paddr: system physical address of response page
+ */
+struct sev_data_snp_guest_request {
+ u64 gctx_paddr; /* In */
+ u64 req_paddr; /* In */
+ u64 res_paddr; /* In */
+} __packed;
+
+/**
+ * struct sev_data_snp_init_ex - SNP_INIT_EX structure
+ *
+ * @init_rmp: indicate that the RMP should be initialized.
+ * @list_paddr_en: indicate that list_paddr is valid
+ * @rsvd: reserved
+ * @rsvd1: reserved
+ * @list_paddr: system physical address of range list
+ * @rsvd2: reserved
+ */
+struct sev_data_snp_init_ex {
+ u32 init_rmp:1;
+ u32 list_paddr_en:1;
+ u32 rsvd:30;
+ u32 rsvd1;
+ u64 list_paddr;
+ u8 rsvd2[48];
+} __packed;
+
+/**
+ * struct sev_data_range - RANGE structure
+ *
+ * @base: system physical address of first byte of range
+ * @page_count: number of 4KB pages in this range
+ * @rsvd: reserved
+ */
+struct sev_data_range {
+ u64 base;
+ u32 page_count;
+ u32 rsvd;
+} __packed;
+
+/**
+ * struct sev_data_range_list - RANGE_LIST structure
+ *
+ * @num_elements: number of elements in RANGE_ARRAY
+ * @rsvd: reserved
+ * @ranges: array of num_elements of type RANGE
+ */
+struct sev_data_range_list {
+ u32 num_elements;
+ u32 rsvd;
+ struct sev_data_range ranges[];
+} __packed;
+
+/**
+ * struct sev_data_snp_shutdown_ex - SNP_SHUTDOWN_EX structure
+ *
+ * @len: length of the command buffer read by the PSP
+ * @iommu_snp_shutdown: Disable enforcement of SNP in the IOMMU
+ * @rsvd1: reserved
+ */
+struct sev_data_snp_shutdown_ex {
+ u32 len;
+ u32 iommu_snp_shutdown:1;
+ u32 rsvd1:31;
+} __packed;
+
+/**
+ * struct sev_platform_init_args
+ *
+ * @error: SEV firmware error code
+ * @probe: True if this is being called as part of CCP module probe, which
+ * will defer SEV_INIT/SEV_INIT_EX firmware initialization until needed
+ * unless psp_init_on_probe module param is set
+ */
+struct sev_platform_init_args {
+ int error;
+ bool probe;
+};
+
+/**
+ * struct sev_data_snp_commit - SNP_COMMIT structure
+ *
+ * @len: length of the command buffer read by the PSP
+ */
+struct sev_data_snp_commit {
+ u32 len;
+} __packed;
+
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
/**
* sev_platform_init - perform SEV INIT command
*
- * @error: SEV command return code
+ * @args: struct sev_platform_init_args to pass in arguments
*
* Returns:
* 0 if the SEV successfully processed the command
@@ -497,7 +824,7 @@ struct sev_data_dbg {
* -%ETIMEDOUT if the SEV command timed out
* -%EIO if the SEV returned a non-zero return code
*/
-int sev_platform_init(int *error);
+int sev_platform_init(struct sev_platform_init_args *args);
/**
* sev_platform_status - perform SEV PLATFORM_STATUS command
@@ -597,14 +924,32 @@ int sev_guest_df_flush(int *error);
*/
int sev_guest_decommission(struct sev_data_decommission *data, int *error);
+/**
+ * sev_do_cmd - issue an SEV or an SEV-SNP command
+ *
+ * @cmd: SEV or SEV-SNP firmware command to issue
+ * @data: arguments for firmware command
+ * @psp_ret: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV device successfully processed the command
+ * -%ENODEV if the PSP device is not available
+ * -%ENOTSUPP if PSP device does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if PSP device returned a non-zero return code
+ */
+int sev_do_cmd(int cmd, void *data, int *psp_ret);
+
void *psp_copy_user_blob(u64 uaddr, u32 len);
+void *snp_alloc_firmware_page(gfp_t mask);
+void snp_free_firmware_page(void *addr);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
static inline int
sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; }
-static inline int sev_platform_init(int *error) { return -ENODEV; }
+static inline int sev_platform_init(struct sev_platform_init_args *args) { return -ENODEV; }
static inline int
sev_guest_deactivate(struct sev_data_deactivate *data, int *error) { return -ENODEV; }
@@ -613,6 +958,9 @@ static inline int
sev_guest_decommission(struct sev_data_decommission *data, int *error) { return -ENODEV; }
static inline int
+sev_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; }
+
+static inline int
sev_guest_activate(struct sev_data_activate *data, int *error) { return -ENODEV; }
static inline int sev_guest_df_flush(int *error) { return -ENODEV; }
@@ -622,6 +970,13 @@ sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int
static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); }
+static inline void *snp_alloc_firmware_page(gfp_t mask)
+{
+ return NULL;
+}
+
+static inline void snp_free_firmware_page(void *addr) { }
+
#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
#endif /* __PSP_SEV_H__ */
diff --git a/include/linux/psp.h b/include/linux/psp.h
new file mode 100644
index 000000000000..92e60aeef21e
--- /dev/null
+++ b/include/linux/psp.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_H
+#define __PSP_H
+
+#ifdef CONFIG_X86
+#include <linux/mem_encrypt.h>
+
+#define __psp_pa(x) __sme_pa(x)
+#else
+#define __psp_pa(x) __pa(x)
+#endif
+
+/*
+ * Fields and bits used by most PSP mailboxes
+ *
+ * Note: Some mailboxes (such as SEV) have extra bits or different meanings
+ * and should include an appropriate local definition in their source file.
+ */
+#define PSP_CMDRESP_STS GENMASK(15, 0)
+#define PSP_CMDRESP_CMD GENMASK(23, 16)
+#define PSP_CMDRESP_RESERVED GENMASK(29, 24)
+#define PSP_CMDRESP_RECOVERY BIT(30)
+#define PSP_CMDRESP_RESP BIT(31)
+
+#define PSP_DRBL_MSG PSP_CMDRESP_CMD
+#define PSP_DRBL_RING BIT(0)
+
+#endif /* __PSP_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index eb93a54cff31..638507a3c8ff 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
-#include <linux/semaphore.h>
+#include <linux/spinlock.h>
#include <linux/time.h>
#include <linux/types.h>
@@ -57,6 +57,9 @@ struct pstore_info;
* @size: size of @buf
* @ecc_notice_size:
* ECC information for @buf
+ * @priv: pointer for backend specific use, will be
+ * kfree()d by the pstore core if non-NULL
+ * when the record is freed.
*
* Valid for PSTORE_TYPE_DMESG @type:
*
@@ -74,6 +77,7 @@ struct pstore_record {
char *buf;
ssize_t size;
ssize_t ecc_notice_size;
+ void *priv;
int count;
enum kmsg_dump_reason reason;
@@ -87,7 +91,7 @@ struct pstore_record {
* @owner: module which is responsible for this backend driver
* @name: name of the backend driver
*
- * @buf_lock: semaphore to serialize access to @buf
+ * @buf_lock: spinlock to serialize access to @buf
* @buf: preallocated crash dump buffer
* @bufsize: size of @buf available for crash dump bytes (must match
* smallest number of bytes available for writing to a
@@ -178,7 +182,7 @@ struct pstore_info {
struct module *owner;
const char *name;
- struct semaphore buf_lock;
+ spinlock_t buf_lock;
char *buf;
size_t bufsize;
diff --git a/include/linux/pstore_blk.h b/include/linux/pstore_blk.h
index 61e914522b01..924ca07aafbd 100644
--- a/include/linux/pstore_blk.h
+++ b/include/linux/pstore_blk.h
@@ -8,80 +8,17 @@
#include <linux/pstore_zone.h>
/**
- * typedef pstore_blk_panic_write_op - panic write operation to block device
- *
- * @buf: the data to write
- * @start_sect: start sector to block device
- * @sects: sectors count on buf
- *
- * Return: On success, zero should be returned. Others excluding -ENOMSG
- * mean error. -ENOMSG means to try next zone.
- *
- * Panic write to block device must be aligned to SECTOR_SIZE.
- */
-typedef int (*pstore_blk_panic_write_op)(const char *buf, sector_t start_sect,
- sector_t sects);
-
-/**
- * struct pstore_blk_info - pstore/blk registration details
- *
- * @major: Which major device number to support with pstore/blk
- * @flags: The supported PSTORE_FLAGS_* from linux/pstore.h.
- * @panic_write:The write operation only used for the panic case.
- * This can be NULL, but is recommended to avoid losing
- * crash data if the kernel's IO path or work queues are
- * broken during a panic.
- * @devt: The dev_t that pstore/blk has attached to.
- * @nr_sects: Number of sectors on @devt.
- * @start_sect: Starting sector on @devt.
- */
-struct pstore_blk_info {
- unsigned int major;
- unsigned int flags;
- pstore_blk_panic_write_op panic_write;
-
- /* Filled in by pstore/blk after registration. */
- dev_t devt;
- sector_t nr_sects;
- sector_t start_sect;
-};
-
-int register_pstore_blk(struct pstore_blk_info *info);
-void unregister_pstore_blk(unsigned int major);
-
-/**
* struct pstore_device_info - back-end pstore/blk driver structure.
*
- * @total_size: The total size in bytes pstore/blk can use. It must be greater
- * than 4096 and be multiple of 4096.
* @flags: Refer to macro starting with PSTORE_FLAGS defined in
* linux/pstore.h. It means what front-ends this device support.
* Zero means all backends for compatible.
- * @read: The general read operation. Both of the function parameters
- * @size and @offset are relative value to bock device (not the
- * whole disk).
- * On success, the number of bytes should be returned, others
- * means error.
- * @write: The same as @read, but the following error number:
- * -EBUSY means try to write again later.
- * -ENOMSG means to try next zone.
- * @erase: The general erase operation for device with special removing
- * job. Both of the function parameters @size and @offset are
- * relative value to storage.
- * Return 0 on success and others on failure.
- * @panic_write:The write operation only used for panic case. It's optional
- * if you do not care panic log. The parameters are relative
- * value to storage.
- * On success, the number of bytes should be returned, others
- * excluding -ENOMSG mean error. -ENOMSG means to try next zone.
+ * @zone: The struct pstore_zone_info details.
+ *
*/
struct pstore_device_info {
- unsigned long total_size;
unsigned int flags;
- pstore_zone_read_op read;
- pstore_zone_write_op write;
- pstore_zone_erase_op erase;
- pstore_zone_write_op panic_write;
+ struct pstore_zone_info zone;
};
int register_pstore_device(struct pstore_device_info *dev);
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9f16afec7290..9d65ff94e216 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -8,28 +8,7 @@
#ifndef __LINUX_PSTORE_RAM_H__
#define __LINUX_PSTORE_RAM_H__
-#include <linux/compiler.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/pstore.h>
-#include <linux/types.h>
-
-/*
- * Choose whether access to the RAM zone requires locking or not. If a zone
- * can be written to from different CPUs like with ftrace for example, then
- * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
- */
-#define PRZ_FLAG_NO_LOCK BIT(0)
-/*
- * If a PRZ should only have a single-boot lifetime, this marks it as
- * getting wiped after its contents get copied out after boot.
- */
-#define PRZ_FLAG_ZAP_OLD BIT(1)
-
-struct persistent_ram_buffer;
-struct rs_control;
struct persistent_ram_ecc_info {
int block_size;
@@ -39,84 +18,6 @@ struct persistent_ram_ecc_info {
uint16_t *par;
};
-/**
- * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ)
- * used as a pstore backend
- *
- * @paddr: physical address of the mapped RAM area
- * @size: size of mapping
- * @label: unique name of this PRZ
- * @type: frontend type for this PRZ
- * @flags: holds PRZ_FLAGS_* bits
- *
- * @buffer_lock:
- * locks access to @buffer "size" bytes and "start" offset
- * @buffer:
- * pointer to actual RAM area managed by this PRZ
- * @buffer_size:
- * bytes in @buffer->data (not including any trailing ECC bytes)
- *
- * @par_buffer:
- * pointer into @buffer->data containing ECC bytes for @buffer->data
- * @par_header:
- * pointer into @buffer->data containing ECC bytes for @buffer header
- * (i.e. all fields up to @data)
- * @rs_decoder:
- * RSLIB instance for doing ECC calculations
- * @corrected_bytes:
- * ECC corrected bytes accounting since boot
- * @bad_blocks:
- * ECC uncorrectable bytes accounting since boot
- * @ecc_info:
- * ECC configuration details
- *
- * @old_log:
- * saved copy of @buffer->data prior to most recent wipe
- * @old_log_size:
- * bytes contained in @old_log
- *
- */
-struct persistent_ram_zone {
- phys_addr_t paddr;
- size_t size;
- void *vaddr;
- char *label;
- enum pstore_type_id type;
- u32 flags;
-
- raw_spinlock_t buffer_lock;
- struct persistent_ram_buffer *buffer;
- size_t buffer_size;
-
- char *par_buffer;
- char *par_header;
- struct rs_control *rs_decoder;
- int corrected_bytes;
- int bad_blocks;
- struct persistent_ram_ecc_info ecc_info;
-
- char *old_log;
- size_t old_log_size;
-};
-
-struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
- u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype, u32 flags, char *label);
-void persistent_ram_free(struct persistent_ram_zone *prz);
-void persistent_ram_zap(struct persistent_ram_zone *prz);
-
-int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
- unsigned int count);
-int persistent_ram_write_user(struct persistent_ram_zone *prz,
- const void __user *s, unsigned int count);
-
-void persistent_ram_save_old(struct persistent_ram_zone *prz);
-size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
-void *persistent_ram_old(struct persistent_ram_zone *prz);
-void persistent_ram_free_old(struct persistent_ram_zone *prz);
-ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
- char *str, size_t len);
-
/*
* Ramoops platform data
* @mem_size memory size for ramoops
diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h
index 2a3a95586425..8dbd51ea8626 100644
--- a/include/linux/ptdump.h
+++ b/include/linux/ptdump.h
@@ -18,6 +18,16 @@ struct ptdump_state {
const struct ptdump_range *range;
};
+bool ptdump_walk_pgd_level_core(struct seq_file *m,
+ struct mm_struct *mm, pgd_t *pgd,
+ bool checkwx, bool dmesg);
void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd);
+bool ptdump_check_wx(void);
+
+static inline void debug_checkwx(void)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_WX))
+ ptdump_check_wx();
+}
#endif /* _LINUX_PTDUMP_H */
diff --git a/include/linux/pti.h b/include/linux/pti.h
index 1a941efcaa62..1fbf9d6c20ef 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -2,7 +2,7 @@
#ifndef _INCLUDE_PTI_H
#define _INCLUDE_PTI_H
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
#include <asm/pti.h>
#else
static inline void pti_init(void) { }
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index dd00fa41f7e7..1b5a953c6bbc 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -10,8 +10,12 @@
#ifndef _PTP_CLASSIFY_H_
#define _PTP_CLASSIFY_H_
+#include <asm/unaligned.h>
#include <linux/ip.h>
+#include <linux/ktime.h>
#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <net/checksum.h>
#define PTP_CLASS_NONE 0x00 /* not a PTP event message */
#define PTP_CLASS_V1 0x01 /* protocol version 1 */
@@ -31,12 +35,20 @@
#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
#define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6)
+#define PTP_MSGTYPE_SYNC 0x0
+#define PTP_MSGTYPE_DELAY_REQ 0x1
+#define PTP_MSGTYPE_PDELAY_REQ 0x2
+#define PTP_MSGTYPE_PDELAY_RESP 0x3
+
#define PTP_EV_PORT 319
+#define PTP_GEN_PORT 320
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
#define OFF_PTP_SEQUENCE_ID 30
-#define OFF_PTP_CONTROL 32 /* PTPv1 only */
+
+/* PTP header flag fields */
+#define PTP_FLAG_TWOSTEP BIT(1)
/* Below defines should actually be removed at some point in time. */
#define IP6_HLEN 40
@@ -44,6 +56,30 @@
#define OFF_IHL 14
#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
+struct clock_identity {
+ u8 id[8];
+} __packed;
+
+struct port_identity {
+ struct clock_identity clock_identity;
+ __be16 port_number;
+} __packed;
+
+struct ptp_header {
+ u8 tsmt; /* transportSpecific | messageType */
+ u8 ver; /* reserved | versionPTP */
+ __be16 message_length;
+ u8 domain_number;
+ u8 reserved1;
+ u8 flag_field[2];
+ __be64 correction;
+ __be32 reserved2;
+ struct port_identity source_port_identity;
+ __be16 sequence_id;
+ u8 control;
+ u8 log_message_interval;
+} __packed;
+
#if defined(CONFIG_NET_PTP_CLASSIFY)
/**
* ptp_classify_raw - classify a PTP packet
@@ -57,6 +93,120 @@
*/
unsigned int ptp_classify_raw(const struct sk_buff *skb);
+/**
+ * ptp_parse_header - Get pointer to the PTP v2 header
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function takes care of the VLAN, UDP, IPv4 and IPv6 headers. The length
+ * is checked.
+ *
+ * Note, internally skb_mac_header() is used. Make sure that the @skb is
+ * initialized accordingly.
+ *
+ * Return: Pointer to the ptp v2 header or NULL if not found
+ */
+struct ptp_header *ptp_parse_header(struct sk_buff *skb, unsigned int type);
+
+/**
+ * ptp_get_msgtype - Extract ptp message type from given header
+ * @hdr: ptp header
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function returns the message type for a given ptp header. It takes care
+ * of the different ptp header versions (v1 or v2).
+ *
+ * Return: The message type
+ */
+static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
+ unsigned int type)
+{
+ u8 msgtype;
+
+ if (unlikely(type & PTP_CLASS_V1)) {
+ /* msg type is located at the control field for ptp v1 */
+ msgtype = hdr->control;
+ } else {
+ msgtype = hdr->tsmt & 0x0f;
+ }
+
+ return msgtype;
+}
+
+/**
+ * ptp_check_diff8 - Computes new checksum (when altering a 64-bit field)
+ * @old: old field value
+ * @new: new field value
+ * @oldsum: previous checksum
+ *
+ * This function can be used to calculate a new checksum when only a single
+ * field is changed. Similar as ip_vs_check_diff*() in ip_vs.h.
+ *
+ * Return: Updated checksum
+ */
+static inline __wsum ptp_check_diff8(__be64 old, __be64 new, __wsum oldsum)
+{
+ __be64 diff[2] = { ~old, new };
+
+ return csum_partial(diff, sizeof(diff), oldsum);
+}
+
+/**
+ * ptp_header_update_correction - Update PTP header's correction field
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ * @hdr: ptp header
+ * @correction: new correction value
+ *
+ * This updates the correction field of a PTP header and updates the UDP
+ * checksum (if UDP is used as transport). It is needed for hardware capable of
+ * one-step P2P that does not already modify the correction field of Pdelay_Req
+ * event messages on ingress.
+ */
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+ __be64 correction_old;
+ struct udphdr *uhdr;
+
+ /* previous correction value is required for checksum update. */
+ memcpy(&correction_old, &hdr->correction, sizeof(correction_old));
+
+ /* write new correction value */
+ put_unaligned_be64((u64)correction, &hdr->correction);
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ case PTP_CLASS_IPV6:
+ /* locate udp header */
+ uhdr = (struct udphdr *)((char *)hdr - sizeof(struct udphdr));
+ break;
+ default:
+ return;
+ }
+
+ /* update checksum */
+ uhdr->check = csum_fold(ptp_check_diff8(correction_old,
+ hdr->correction,
+ ~csum_unfold(uhdr->check)));
+ if (!uhdr->check)
+ uhdr->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/**
+ * ptp_msg_is_sync - Evaluates whether the given skb is a PTP Sync message
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function evaluates whether the given skb is a PTP Sync message.
+ *
+ * Return: true if sync message, false otherwise
+ */
+bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type);
+
void __init ptp_classifier_init(void);
#else
static inline void ptp_classifier_init(void)
@@ -66,5 +216,28 @@ static inline unsigned int ptp_classify_raw(struct sk_buff *skb)
{
return PTP_CLASS_NONE;
}
+static inline struct ptp_header *ptp_parse_header(struct sk_buff *skb,
+ unsigned int type)
+{
+ return NULL;
+}
+static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
+ unsigned int type)
+{
+ /* The return is meaningless. The stub function would not be
+ * executed since no available header from ptp_parse_header.
+ */
+ return PTP_MSGTYPE_SYNC;
+}
+static inline bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type)
+{
+ return false;
+}
+
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index d3e8ba5c7125..6e4b8206c7d0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -11,7 +11,23 @@
#include <linux/device.h>
#include <linux/pps_kernel.h>
#include <linux/ptp_clock.h>
+#include <linux/timecounter.h>
+#include <linux/skbuff.h>
+#define PTP_CLOCK_NAME_LEN 32
+/**
+ * struct ptp_clock_request - request PTP clock event
+ *
+ * @type: The type of the request.
+ * EXTTS: Configure external trigger timestamping
+ * PEROUT: Configure periodic output signal (e.g. PPS)
+ * PPS: trigger internal PPS event for input
+ * into kernel PPS subsystem
+ * @extts: describes configuration for external trigger timestamping.
+ * This is only valid when event == PTP_CLK_REQ_EXTTS.
+ * @perout: describes configuration for periodic output.
+ * This is only valid when event == PTP_CLK_REQ_PEROUT.
+ */
struct ptp_clock_request {
enum {
@@ -29,6 +45,8 @@ struct system_device_crosststamp;
/**
* struct ptp_system_timestamp - system time corresponding to a PHC timestamp
+ * @pre_ts: system timestamp before capturing PHC
+ * @post_ts: system timestamp after capturing PHC
*/
struct ptp_system_timestamp {
struct timespec64 pre_ts;
@@ -59,14 +77,14 @@ struct ptp_system_timestamp {
* nominal frequency in parts per million, but with a
* 16 bit binary fractional field.
*
- * @adjfreq: Adjusts the frequency of the hardware clock.
- * This method is deprecated. New drivers should implement
- * the @adjfine method instead.
- * parameter delta: Desired frequency offset from nominal frequency
- * in parts per billion
+ * @adjphase: Indicates that the PHC should use an internal servo
+ * algorithm to correct the provided phase offset.
+ * parameter delta: PHC servo phase adjustment target
+ * in nanoseconds.
*
- * @adjphase: Adjusts the phase offset of the hardware clock.
- * parameter delta: Desired change in nanoseconds.
+ * @getmaxphase: Advertises maximum offset that can be provided
+ * to the hardware clock's phase control functionality
+ * through adjphase.
*
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.
@@ -92,6 +110,32 @@ struct ptp_system_timestamp {
* @settime64: Set the current time on the hardware clock.
* parameter ts: Time value to set.
*
+ * @getcycles64: Reads the current free running cycle counter from the hardware
+ * clock.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @gettime64 or @gettimex64 will be used as default
+ * implementation.
+ * parameter ts: Holds the result.
+ *
+ * @getcyclesx64: Reads the current free running cycle counter from the
+ * hardware clock and optionally also the system clock.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @gettimex64 will be used as default implementation if
+ * available.
+ * parameter ts: Holds the PHC timestamp.
+ * parameter sts: If not NULL, it holds a pair of timestamps
+ * from the system clock. The first reading is made right before
+ * reading the lowest bits of the PHC timestamp and the second
+ * reading immediately follows that.
+ *
+ * @getcrosscycles: Reads the current free running cycle counter from the
+ * hardware clock and system clock simultaneously.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @getcrosststamp will be used as default implementation if
+ * available.
+ * parameter cts: Contains timestamp (device,system) pair,
+ * where system time is realtime and monotonic.
+ *
* @enable: Request driver to enable or disable an ancillary feature.
* parameter request: Desired resource to enable or disable.
* parameter on: Caller passes one to enable or zero to disable.
@@ -121,7 +165,7 @@ struct ptp_system_timestamp {
struct ptp_clock_info {
struct module *owner;
- char name[16];
+ char name[PTP_CLOCK_NAME_LEN];
s32 max_adj;
int n_alarm;
int n_ext_ts;
@@ -130,8 +174,8 @@ struct ptp_clock_info {
int pps;
struct ptp_pin_desc *pin_config;
int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm);
- int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjphase)(struct ptp_clock_info *ptp, s32 phase);
+ s32 (*getmaxphase)(struct ptp_clock_info *ptp);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
@@ -139,6 +183,11 @@ struct ptp_clock_info {
int (*getcrosststamp)(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
+ int (*getcycles64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*getcyclesx64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts);
+ int (*getcrosscycles)(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts);
int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
@@ -151,6 +200,7 @@ struct ptp_clock;
enum ptp_clock_events {
PTP_CLOCK_ALARM,
PTP_CLOCK_EXTTS,
+ PTP_CLOCK_EXTOFF,
PTP_CLOCK_PPS,
PTP_CLOCK_PPSUSR,
};
@@ -161,6 +211,7 @@ enum ptp_clock_events {
* @type: One of the ptp_clock_events enumeration values.
* @index: Identifies the source of the event.
* @timestamp: When the event occurred (%PTP_CLOCK_EXTTS only).
+ * @offset: When the event occurred (%PTP_CLOCK_EXTOFF only).
* @pps_times: When the event occurred (%PTP_CLOCK_PPSUSR only).
*/
@@ -169,11 +220,84 @@ struct ptp_clock_event {
int index;
union {
u64 timestamp;
+ s64 offset;
struct pps_event_time pps_times;
};
};
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+/**
+ * scaled_ppm_to_ppb() - convert scaled ppm to ppb
+ *
+ * @ppm: Parts per million, but with a 16 bit binary fractional field
+ */
+static inline long scaled_ppm_to_ppb(long ppm)
+{
+ /*
+ * The 'freq' field in the 'struct timex' is in parts per
+ * million, but with a 16 bit binary fractional field.
+ *
+ * We want to calculate
+ *
+ * ppb = scaled_ppm * 1000 / 2^16
+ *
+ * which simplifies to
+ *
+ * ppb = scaled_ppm * 125 / 2^13
+ */
+ s64 ppb = 1 + ppm;
+
+ ppb *= 125;
+ ppb >>= 13;
+ return (long)ppb;
+}
+
+/**
+ * diff_by_scaled_ppm - Calculate difference using scaled ppm
+ * @base: the base increment value to adjust
+ * @scaled_ppm: scaled parts per million to adjust by
+ * @diff: on return, the absolute value of calculated diff
+ *
+ * Calculate the difference to adjust the base increment using scaled parts
+ * per million.
+ *
+ * Use mul_u64_u64_div_u64 to perform the difference calculation in avoid
+ * possible overflow.
+ *
+ * Returns: true if scaled_ppm is negative, false otherwise
+ */
+static inline bool diff_by_scaled_ppm(u64 base, long scaled_ppm, u64 *diff)
+{
+ bool negative = false;
+
+ if (scaled_ppm < 0) {
+ negative = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ *diff = mul_u64_u64_div_u64(base, (u64)scaled_ppm, 1000000ULL << 16);
+
+ return negative;
+}
+
+/**
+ * adjust_by_scaled_ppm - Adjust a base increment by scaled parts per million
+ * @base: the base increment value to adjust
+ * @scaled_ppm: scaled parts per million frequency adjustment
+ *
+ * Helper function which calculates a new increment value based on the
+ * requested scaled parts per million adjustment.
+ */
+static inline u64 adjust_by_scaled_ppm(u64 base, long scaled_ppm)
+{
+ u64 diff;
+
+ if (diff_by_scaled_ppm(base, scaled_ppm, &diff))
+ return base - diff;
+
+ return base + diff;
+}
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
/**
* ptp_clock_register() - register a PTP hardware clock driver
@@ -217,14 +341,6 @@ extern void ptp_clock_event(struct ptp_clock *ptp,
extern int ptp_clock_index(struct ptp_clock *ptp);
/**
- * scaled_ppm_to_ppb() - convert scaled ppm to ppb
- *
- * @ppm: Parts per million, but with a 16 bit binary fractional field
- */
-
-extern s32 scaled_ppm_to_ppb(long ppm);
-
-/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
*
* The caller must hold ptp_clock::pincfg_mux. Drivers do not have
@@ -251,6 +367,11 @@ int ptp_find_pin(struct ptp_clock *ptp,
* should most likely call ptp_find_pin() directly from their
* ptp_clock_info::enable() method.
*
+* @ptp: The clock obtained from ptp_clock_register().
+* @func: One of the ptp_pin_function enumerated values.
+* @chan: The particular functional channel to find.
+* Return: Pin index in the range of zero to ptp_clock_caps.n_pins - 1,
+* or -1 if the auxiliary function cannot be found.
*/
int ptp_find_pin_unlocked(struct ptp_clock *ptp,
@@ -287,11 +408,50 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
+static inline int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{ return -1; }
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
unsigned long delay)
{ return -EOPNOTSUPP; }
static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
{ }
+#endif
+
+#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK)
+/*
+ * These are called by the network core, and don't work if PTP is in
+ * a loadable module.
+ */
+
+/**
+ * ptp_get_vclocks_index() - get all vclocks index on pclock, and
+ * caller is responsible to free memory
+ * of vclock_index
+ *
+ * @pclock_index: phc index of ptp pclock.
+ * @vclock_index: pointer to pointer of vclock index.
+ *
+ * return number of vclocks.
+ */
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
+
+/**
+ * ptp_convert_timestamp() - convert timestamp to a ptp vclock time
+ *
+ * @hwtstamp: timestamp
+ * @vclock_index: phc index of ptp vclock.
+ *
+ * Returns converted timestamp, or 0 on error.
+ */
+ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index);
+#else
+static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{ return 0; }
+static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp,
+ int vclock_index)
+{ return 0; }
#endif
diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h
new file mode 100644
index 000000000000..e8c74fa3f455
--- /dev/null
+++ b/include/linux/ptp_kvm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Virtual PTP 1588 clock for use with KVM guests
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ */
+
+#ifndef _PTP_KVM_H_
+#define _PTP_KVM_H_
+
+#include <linux/clocksource_ids.h>
+#include <linux/types.h>
+
+struct timespec64;
+
+int kvm_arch_ptp_init(void);
+void kvm_arch_ptp_exit(void);
+int kvm_arch_ptp_get_clock(struct timespec64 *ts);
+int kvm_arch_ptp_get_crosststamp(u64 *cycle,
+ struct timespec64 *tspec, enum clocksource_ids *cs_id);
+
+#endif /* _PTP_KVM_H_ */
diff --git a/include/linux/ptp_mock.h b/include/linux/ptp_mock.h
new file mode 100644
index 000000000000..72eb401034d9
--- /dev/null
+++ b/include/linux/ptp_mock.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Mock-up PTP Hardware Clock driver for virtual network devices
+ *
+ * Copyright 2023 NXP
+ */
+
+#ifndef _PTP_MOCK_H_
+#define _PTP_MOCK_H_
+
+struct device;
+struct mock_phc;
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK_MOCK)
+
+struct mock_phc *mock_phc_create(struct device *dev);
+void mock_phc_destroy(struct mock_phc *phc);
+int mock_phc_index(struct mock_phc *phc);
+
+#else
+
+static inline struct mock_phc *mock_phc_create(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void mock_phc_destroy(struct mock_phc *phc)
+{
+}
+
+static inline int mock_phc_index(struct mock_phc *phc)
+{
+ return -1;
+}
+
+#endif
+
+#endif /* _PTP_MOCK_H_ */
diff --git a/include/linux/ptp_pch.h b/include/linux/ptp_pch.h
new file mode 100644
index 000000000000..7ba643b62c15
--- /dev/null
+++ b/include/linux/ptp_pch.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PTP PCH
+ *
+ * Copyright 2019 Linaro Ltd.
+ *
+ * Author Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef _PTP_PCH_H_
+#define _PTP_PCH_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+
+#endif /* _PTP_PCH_H_ */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 2a9df80ea887..90507d4afcd6 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -30,7 +30,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
#define PT_PTRACED 0x00000001
-#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
#define PT_OPT_FLAG_SHIFT 3
/* PT_TRACE_* event enable flags */
@@ -47,12 +46,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
-/* single stepping state bits (used on ARM and PA-RISC) */
-#define PT_SINGLESTEP_BIT 31
-#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
-#define PT_BLOCKSTEP_BIT 30
-#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
-
extern long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
@@ -60,7 +53,7 @@ extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned
extern void ptrace_disable(struct task_struct *);
extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
-extern void ptrace_notify(int exit_code);
+extern int ptrace_notify(int exit_code, unsigned long message);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent,
const struct cred *ptracer_cred);
@@ -155,8 +148,7 @@ static inline bool ptrace_event_enabled(struct task_struct *task, int event)
static inline void ptrace_event(int event, unsigned long message)
{
if (unlikely(ptrace_event_enabled(current, event))) {
- current->ptrace_message = message;
- ptrace_notify((event << 8) | SIGTRAP);
+ ptrace_notify((event << 8) | SIGTRAP, message);
} else if (event == PTRACE_EVENT_EXEC) {
/* legacy EXEC report via SIGTRAP */
if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
@@ -171,7 +163,7 @@ static inline void ptrace_event(int event, unsigned long message)
*
* Check whether @event is enabled and, if so, report @event and @pid
* to the ptrace parent. @pid is reported as the pid_t seen from the
- * the ptrace parent's pid namespace.
+ * ptrace parent's pid namespace.
*
* Called without locks.
*/
@@ -362,29 +354,25 @@ static inline void user_single_step_report(struct pt_regs *regs)
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with the siglock held, to decide whether or not it's
- * necessary to release the siglock and call arch_ptrace_stop() with the
- * same @code and @info arguments. It can be defined to a constant if
- * arch_ptrace_stop() is never required, or always is. On machines where
- * this makes sense, it should be defined to a quick test to optimize out
- * calling arch_ptrace_stop() when it would be superfluous. For example,
- * if the thread has not been back to user mode since the last stop, the
- * thread state might indicate that nothing needs to be done.
+ * necessary to release the siglock and call arch_ptrace_stop(). It can be
+ * defined to a constant if arch_ptrace_stop() is never required, or always
+ * is. On machines where this makes sense, it should be defined to a quick
+ * test to optimize out calling arch_ptrace_stop() when it would be
+ * superfluous. For example, if the thread has not been back to user mode
+ * since the last stop, the thread state might indicate that nothing needs
+ * to be done.
*
* This is guaranteed to be invoked once before a task stops for ptrace and
* may include arch-specific operations necessary prior to a ptrace stop.
*/
-#define arch_ptrace_stop_needed(code, info) (0)
+#define arch_ptrace_stop_needed() (0)
#endif
#ifndef arch_ptrace_stop
/**
* arch_ptrace_stop - Do machine-specific work before stopping for ptrace
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with no locks held when arch_ptrace_stop_needed() has
* just returned nonzero. It is allowed to block, e.g. for user memory
@@ -394,27 +382,98 @@ static inline void user_single_step_report(struct pt_regs *regs)
* we only do it when the arch requires it for this particular stop, as
* indicated by arch_ptrace_stop_needed().
*/
-#define arch_ptrace_stop(code, info) do { } while (0)
+#define arch_ptrace_stop() do { } while (0)
#endif
#ifndef current_pt_regs
#define current_pt_regs() task_pt_regs(current)
#endif
-/*
- * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
- * on *all* architectures; the only reason to have a per-arch definition
- * is optimisation.
- */
-#ifndef signal_pt_regs
-#define signal_pt_regs() task_pt_regs(current)
-#endif
-
#ifndef current_user_stack_pointer
#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
#endif
+#ifndef exception_ip
+#define exception_ip(x) instruction_pointer(x)
+#endif
+
extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
+
+/*
+ * ptrace report for syscall entry and exit looks identical.
+ */
+static inline int ptrace_report_syscall(unsigned long message)
+{
+ int ptrace = current->ptrace;
+ int signr;
+
+ if (!(ptrace & PT_PTRACED))
+ return 0;
+
+ signr = ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0),
+ message);
+
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (signr)
+ send_sig(signr, current, 1);
+
+ return fatal_signal_pending(current);
+}
+
+/**
+ * ptrace_report_syscall_entry - task is about to attempt a system call
+ * @regs: user register state of current task
+ *
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just
+ * entered the kernel for a system call. Full user register state is
+ * available here. Changing the values in @regs can affect the system
+ * call number and arguments to be tried. It is safe to block here,
+ * preventing the system call from beginning.
+ *
+ * Returns zero normally, or nonzero if the calling arch code should abort
+ * the system call. That must prevent normal entry so no system call is
+ * made. If @task ever returns to user mode after this, its register state
+ * is unspecified, but should be something harmless like an %ENOSYS error
+ * return. It should preserve enough information so that syscall_rollback()
+ * can work (see asm-generic/syscall.h).
+ *
+ * Called without locks, just after entering kernel mode.
+ */
+static inline __must_check int ptrace_report_syscall_entry(
+ struct pt_regs *regs)
+{
+ return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY);
+}
+
+/**
+ * ptrace_report_syscall_exit - task has just finished a system call
+ * @regs: user register state of current task
+ * @step: nonzero if simulating single-step or block-step
+ *
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when
+ * the current task has just finished an attempted system call. Full
+ * user register state is available here. It is safe to block here,
+ * preventing signals from being processed.
+ *
+ * If @step is nonzero, this report is also in lieu of the normal
+ * trap that would follow the system call instruction because
+ * user_enable_block_step() or user_enable_single_step() was used.
+ * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set.
+ *
+ * Called without locks, just before checking for pending signals.
+ */
+static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step)
+{
+ if (step)
+ user_single_step_report(regs);
+ else
+ ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT);
+}
#endif
diff --git a/include/linux/ptrace_api.h b/include/linux/ptrace_api.h
new file mode 100644
index 000000000000..26e7d275ad8d
--- /dev/null
+++ b/include/linux/ptrace_api.h
@@ -0,0 +1 @@
+#include <linux/ptrace.h>
diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h
index b950e961cfa8..d7dc1559427f 100644
--- a/include/linux/purgatory.h
+++ b/include/linux/purgatory.h
@@ -3,7 +3,7 @@
#define _LINUX_PURGATORY_H
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <uapi/linux/kexec.h>
struct kexec_sha_region {
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index a13ff383fa1d..4a6568dfdf3f 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -6,9 +6,6 @@
#include <linux/mutex.h>
#include <linux/of.h>
-struct pwm_capture;
-struct seq_file;
-
struct pwm_chip;
/**
@@ -44,8 +41,8 @@ struct pwm_args {
};
enum {
- PWMF_REQUESTED = 1 << 0,
- PWMF_EXPORTED = 1 << 1,
+ PWMF_REQUESTED = 0,
+ PWMF_EXPORTED = 1,
};
/*
@@ -54,12 +51,17 @@ enum {
* @duty_cycle: PWM duty cycle (in nanoseconds)
* @polarity: PWM polarity
* @enabled: PWM enabled status
+ * @usage_power: If set, the PWM driver is only required to maintain the power
+ * output but has more freedom regarding signal form.
+ * If supported, the signal can be optimized, for example to
+ * improve EMI by phase shifting individual channels.
*/
struct pwm_state {
u64 period;
u64 duty_cycle;
enum pwm_polarity polarity;
bool enabled;
+ bool usage_power;
};
/**
@@ -67,9 +69,7 @@ struct pwm_state {
* @label: name of the PWM device
* @flags: flags associated with the PWM device
* @hwpwm: per-chip relative index of the PWM device
- * @pwm: global index of the PWM device
* @chip: PWM chip providing this PWM device
- * @chip_data: chip-private data associated with the PWM device
* @args: PWM arguments
* @state: last applied state
* @last: last implemented state (for PWM_DEBUG)
@@ -78,9 +78,7 @@ struct pwm_device {
const char *label;
unsigned long flags;
unsigned int hwpwm;
- unsigned int pwm;
struct pwm_chip *chip;
- void *chip_data;
struct pwm_args args;
struct pwm_state state;
@@ -91,6 +89,11 @@ struct pwm_device {
* pwm_get_state() - retrieve the current PWM state
* @pwm: PWM device
* @state: state to fill with the current PWM state
+ *
+ * The returned PWM state represents the state that was applied by a previous call to
+ * pwm_apply_might_sleep(). Drivers may have to slightly tweak that state before programming it to
+ * hardware. If pwm_apply_might_sleep() was never called, this returns either the current hardware
+ * state (if supported) or the default settings.
*/
static inline void pwm_get_state(const struct pwm_device *pwm,
struct pwm_state *state)
@@ -107,12 +110,6 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm)
return state.enabled;
}
-static inline void pwm_set_period(struct pwm_device *pwm, u64 period)
-{
- if (pwm)
- pwm->state.period = period;
-}
-
static inline u64 pwm_get_period(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -122,12 +119,6 @@ static inline u64 pwm_get_period(const struct pwm_device *pwm)
return state.period;
}
-static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
-{
- if (pwm)
- pwm->state.duty_cycle = duty;
-}
-
static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -153,20 +144,20 @@ static inline void pwm_get_args(const struct pwm_device *pwm,
}
/**
- * pwm_init_state() - prepare a new state to be applied with pwm_apply_state()
+ * pwm_init_state() - prepare a new state to be applied with pwm_apply_might_sleep()
* @pwm: PWM device
* @state: state to fill with the prepared PWM state
*
* This functions prepares a state that can later be tweaked and applied
- * to the PWM device with pwm_apply_state(). This is a convenient function
+ * to the PWM device with pwm_apply_might_sleep(). This is a convenient function
* that first retrieves the current PWM state and the replaces the period
* and polarity fields with the reference values defined in pwm->args.
* Once the function returns, you can adjust the ->enabled and ->duty_cycle
- * fields according to your needs before calling pwm_apply_state().
+ * fields according to your needs before calling pwm_apply_might_sleep().
*
* ->duty_cycle is initially set to zero to avoid cases where the current
* ->duty_cycle value exceed the pwm_args->period one, which would trigger
- * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle
+ * an error if the user calls pwm_apply_might_sleep() without adjusting ->duty_cycle
* first.
*/
static inline void pwm_init_state(const struct pwm_device *pwm,
@@ -183,6 +174,7 @@ static inline void pwm_init_state(const struct pwm_device *pwm,
state->period = args.period;
state->polarity = args.polarity;
state->duty_cycle = 0;
+ state->usage_power = false;
}
/**
@@ -221,7 +213,7 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
*
* pwm_init_state(pwm, &state);
* pwm_set_relative_duty_cycle(&state, 50, 100);
- * pwm_apply_state(pwm, &state);
+ * pwm_apply_might_sleep(pwm, &state);
*
* This functions returns -EINVAL if @duty_cycle and/or @scale are
* inconsistent (@scale == 0 or @duty_cycle > @scale).
@@ -241,6 +233,16 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
}
/**
+ * struct pwm_capture - PWM capture data
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ */
+struct pwm_capture {
+ unsigned int period;
+ unsigned int duty_cycle;
+};
+
+/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
@@ -249,11 +251,6 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
* @get_state: get the current PWM state. This function is only
* called once per PWM device when the PWM chip is
* registered.
- * @owner: helps prevent removal of modules exporting active PWMs
- * @config: configure duty cycles and period length for this PWM
- * @set_polarity: configure the polarity of this PWM
- * @enable: enable PWM output toggling
- * @disable: disable PWM output toggling
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
@@ -262,60 +259,65 @@ struct pwm_ops {
struct pwm_capture *result, unsigned long timeout);
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state);
- void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state);
- struct module *owner;
-
- /* Only used by legacy drivers */
- int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns);
- int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity);
- int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
- void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
+ int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state);
};
/**
* struct pwm_chip - abstract a PWM controller
* @dev: device providing the PWMs
* @ops: callbacks for this PWM controller
- * @base: number of first PWM controlled by this chip
+ * @owner: module providing this chip
+ * @id: unique number of this PWM chip
* @npwm: number of PWMs controlled by this chip
* @of_xlate: request a PWM device given a device tree PWM specifier
- * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
- * @list: list node for internal use
+ * @atomic: can the driver's ->apply() be called in atomic context
+ * @driver_data: Private pointer for driver specific info
* @pwms: array of PWM devices allocated by the framework
*/
struct pwm_chip {
struct device *dev;
const struct pwm_ops *ops;
- int base;
+ struct module *owner;
+ unsigned int id;
unsigned int npwm;
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ struct pwm_device * (*of_xlate)(struct pwm_chip *chip,
const struct of_phandle_args *args);
- unsigned int of_pwm_n_cells;
+ bool atomic;
/* only used internally by the PWM framework */
- struct list_head list;
+ void *driver_data;
struct pwm_device *pwms;
};
-/**
- * struct pwm_capture - PWM capture data
- * @period: period of the PWM signal (in nanoseconds)
- * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
- */
-struct pwm_capture {
- unsigned int period;
- unsigned int duty_cycle;
-};
+static inline struct device *pwmchip_parent(const struct pwm_chip *chip)
+{
+ return chip->dev;
+}
+
+static inline void *pwmchip_get_drvdata(struct pwm_chip *chip)
+{
+ /*
+ * After pwm_chip got a dedicated struct device, this can be replaced by
+ * dev_get_drvdata(&chip->dev);
+ */
+ return chip->driver_data;
+}
+
+static inline void pwmchip_set_drvdata(struct pwm_chip *chip, void *data)
+{
+ /*
+ * After pwm_chip got a dedicated struct device, this can be replaced by
+ * dev_set_drvdata(&chip->dev, data);
+ */
+ chip->driver_data = data;
+}
#if IS_ENABLED(CONFIG_PWM)
/* PWM user APIs */
-struct pwm_device *pwm_request(int pwm_id, const char *label);
-void pwm_free(struct pwm_device *pwm);
-int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state);
int pwm_adjust_config(struct pwm_device *pwm);
/**
@@ -343,7 +345,7 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
state.duty_cycle = duty_ns;
state.period = period_ns;
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
/**
@@ -364,7 +366,7 @@ static inline int pwm_enable(struct pwm_device *pwm)
return 0;
state.enabled = true;
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
/**
@@ -383,89 +385,116 @@ static inline void pwm_disable(struct pwm_device *pwm)
return;
state.enabled = false;
- pwm_apply_state(pwm, &state);
+ pwm_apply_might_sleep(pwm, &state);
+}
+
+/**
+ * pwm_might_sleep() - is pwm_apply_atomic() supported?
+ * @pwm: PWM device
+ *
+ * Returns: false if pwm_apply_atomic() can be called from atomic context.
+ */
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
+{
+ return !pwm->chip->atomic;
}
/* PWM provider APIs */
int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
unsigned long timeout);
-int pwm_set_chip_data(struct pwm_device *pwm, void *data);
-void *pwm_get_chip_data(struct pwm_device *pwm);
-int pwmchip_add_with_polarity(struct pwm_chip *chip,
- enum pwm_polarity polarity);
-int pwmchip_add(struct pwm_chip *chip);
-int pwmchip_remove(struct pwm_chip *chip);
+void pwmchip_put(struct pwm_chip *chip);
+struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
+struct pwm_chip *devm_pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
+
+int __pwmchip_add(struct pwm_chip *chip, struct module *owner);
+#define pwmchip_add(chip) __pwmchip_add(chip, THIS_MODULE)
+void pwmchip_remove(struct pwm_chip *chip);
+
+int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner);
+#define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE)
+
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label);
-struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
+struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip,
const struct of_phandle_args *args);
+struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip,
+ const struct of_phandle_args *args);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id);
void pwm_put(struct pwm_device *pwm);
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id);
struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id);
-void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
#else
-static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
{
- return ERR_PTR(-ENODEV);
+ return true;
}
-static inline void pwm_free(struct pwm_device *pwm)
+static inline int pwm_apply_might_sleep(struct pwm_device *pwm,
+ const struct pwm_state *state)
{
+ might_sleep();
+ return -EOPNOTSUPP;
}
-static inline int pwm_apply_state(struct pwm_device *pwm,
- const struct pwm_state *state)
+static inline int pwm_apply_atomic(struct pwm_device *pwm,
+ const struct pwm_state *state)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pwm_adjust_config(struct pwm_device *pwm)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
int period_ns)
{
+ might_sleep();
return -EINVAL;
}
-static inline int pwm_capture(struct pwm_device *pwm,
- struct pwm_capture *result,
- unsigned long timeout)
+static inline int pwm_enable(struct pwm_device *pwm)
{
+ might_sleep();
return -EINVAL;
}
-static inline int pwm_enable(struct pwm_device *pwm)
+static inline void pwm_disable(struct pwm_device *pwm)
+{
+ might_sleep();
+}
+
+static inline int pwm_capture(struct pwm_device *pwm,
+ struct pwm_capture *result,
+ unsigned long timeout)
{
return -EINVAL;
}
-static inline void pwm_disable(struct pwm_device *pwm)
+static inline void pwmchip_put(struct pwm_chip *chip)
{
}
-static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
+static inline struct pwm_chip *pwmchip_alloc(struct device *parent,
+ unsigned int npwm,
+ size_t sizeof_priv)
{
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
-static inline void *pwm_get_chip_data(struct pwm_device *pwm)
+static inline struct pwm_chip *devm_pwmchip_alloc(struct device *parent,
+ unsigned int npwm,
+ size_t sizeof_priv)
{
- return NULL;
+ return pwmchip_alloc(parent, npwm, sizeof_priv);
}
static inline int pwmchip_add(struct pwm_chip *chip)
@@ -473,12 +502,12 @@ static inline int pwmchip_add(struct pwm_chip *chip)
return -EINVAL;
}
-static inline int pwmchip_add_inversed(struct pwm_chip *chip)
+static inline int pwmchip_remove(struct pwm_chip *chip)
{
return -EINVAL;
}
-static inline int pwmchip_remove(struct pwm_chip *chip)
+static inline int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip)
{
return -EINVAL;
}
@@ -487,36 +516,26 @@ static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct pwm_device *of_pwm_get(struct device *dev,
- struct device_node *np,
- const char *con_id)
-{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline void pwm_put(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline struct pwm_device *devm_pwm_get(struct device *dev,
const char *consumer)
{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
- struct device_node *np,
- const char *con_id)
-{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
@@ -524,12 +543,9 @@ static inline struct pwm_device *
devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode,
const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
-
-static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
-{
-}
#endif
static inline void pwm_apply_args(struct pwm_device *pwm)
@@ -560,8 +576,16 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
state.enabled = false;
state.polarity = pwm->args.polarity;
state.period = pwm->args.period;
+ state.usage_power = false;
- pwm_apply_state(pwm, &state);
+ pwm_apply_might_sleep(pwm, &state);
+}
+
+/* only for backwards-compatibility, new code should not use this */
+static inline int pwm_apply_state(struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ return pwm_apply_might_sleep(pwm, state);
}
struct pwm_lookup {
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 06086cb93b6f..cdd2ac366bc7 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -8,7 +8,6 @@
#include <linux/backlight.h>
struct platform_pwm_backlight_data {
- int pwm_id;
unsigned int max_brightness;
unsigned int dft_brightness;
unsigned int lth_brightness;
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 6facf27865f9..cd1973e6ac4b 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -1,8 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * pxa2xx_ssp.h
- *
- * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This driver supports the following PXA CPU/SSP ports:-
*
@@ -13,13 +11,19 @@
* PXA3xx SSP1, SSP2, SSP3, SSP4
*/
-#ifndef __LINUX_SSP_H
-#define __LINUX_SSP_H
+#ifndef __LINUX_PXA2XX_SSP_H
+#define __LINUX_PXA2XX_SSP_H
-#include <linux/list.h>
+#include <linux/bits.h>
+#include <linux/compiler_types.h>
#include <linux/io.h>
-#include <linux/of.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/types.h>
+struct clk;
+struct device;
+struct device_node;
/*
* SSP Serial Port Registers
@@ -34,7 +38,6 @@
#define SSDR (0x10) /* SSP Data Write/Data Read Register */
#define SSTO (0x28) /* SSP Time Out Register */
-#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
#define SSTSA (0x30) /* SSP Tx Timeslot Active */
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
@@ -43,130 +46,130 @@
#define SSACDD (0x40) /* SSP Audio Clock Dither Divider */
/* Common PXA2xx bits first */
-#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
+#define SSCR0_DSS GENMASK(3, 0) /* Data Size Select (mask) */
#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */
-#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */
+#define SSCR0_FRF GENMASK(5, 4) /* FRame Format (mask) */
#define SSCR0_Motorola (0x0 << 4) /* Motorola's Serial Peripheral Interface (SPI) */
#define SSCR0_TI (0x1 << 4) /* Texas Instruments' Synchronous Serial Protocol (SSP) */
#define SSCR0_National (0x2 << 4) /* National Microwire */
-#define SSCR0_ECS (1 << 6) /* External clock select */
-#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
+#define SSCR0_ECS BIT(6) /* External clock select */
+#define SSCR0_SSE BIT(7) /* Synchronous Serial Port Enable */
#define SSCR0_SCR(x) ((x) << 8) /* Serial Clock Rate (mask) */
/* PXA27x, PXA3xx */
-#define SSCR0_EDSS (1 << 20) /* Extended data size select */
-#define SSCR0_NCS (1 << 21) /* Network clock select */
-#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */
-#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */
-#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
+#define SSCR0_EDSS BIT(20) /* Extended data size select */
+#define SSCR0_NCS BIT(21) /* Network clock select */
+#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */
+#define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */
+#define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */
#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */
-#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */
-#define SSCR0_ACS (1 << 30) /* Audio clock select */
-#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
-
-
-#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
-#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
-#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */
-#define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */
-#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */
-#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */
-
-#define SSSR_ALT_FRM_MASK 3 /* Masks the SFRM signal number */
-#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */
-#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */
-#define SSSR_BSY (1 << 4) /* SSP Busy */
-#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */
-#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */
-#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */
+#define SSCR0_FPCKE BIT(29) /* FIFO packing enable */
+#define SSCR0_ACS BIT(30) /* Audio clock select */
+#define SSCR0_MOD BIT(31) /* Mode (normal or network) */
+
+#define SSCR1_RIE BIT(0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE BIT(1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM BIT(2) /* Loop-Back Mode */
+#define SSCR1_SPO BIT(3) /* Motorola SPI SSPSCLK polarity setting */
+#define SSCR1_SPH BIT(4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS BIT(5) /* Microwire Transmit Data Size */
+
+#define SSSR_ALT_FRM_MASK GENMASK(1, 0) /* Masks the SFRM signal number */
+#define SSSR_TNF BIT(2) /* Transmit FIFO Not Full */
+#define SSSR_RNE BIT(3) /* Receive FIFO Not Empty */
+#define SSSR_BSY BIT(4) /* SSP Busy */
+#define SSSR_TFS BIT(5) /* Transmit FIFO Service Request */
+#define SSSR_RFS BIT(6) /* Receive FIFO Service Request */
+#define SSSR_ROR BIT(7) /* Receive FIFO Overrun */
#define RX_THRESH_DFLT 8
#define TX_THRESH_DFLT 8
-#define SSSR_TFL_MASK (0xf << 8) /* Transmit FIFO Level mask */
-#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */
+#define SSSR_TFL_MASK GENMASK(11, 8) /* Transmit FIFO Level mask */
+#define SSSR_RFL_MASK GENMASK(15, 12) /* Receive FIFO Level mask */
-#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TFT GENMASK(9, 6) /* Transmit FIFO Threshold (mask) */
#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
-#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
+#define SSCR1_RFT GENMASK(13, 10) /* Receive FIFO Threshold (mask) */
#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
#define RX_THRESH_CE4100_DFLT 2
#define TX_THRESH_CE4100_DFLT 2
-#define CE4100_SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */
-#define CE4100_SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */
+#define CE4100_SSSR_TFL_MASK GENMASK(9, 8) /* Transmit FIFO Level mask */
+#define CE4100_SSSR_RFL_MASK GENMASK(13, 12) /* Receive FIFO Level mask */
-#define CE4100_SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */
+#define CE4100_SSCR1_TFT GENMASK(7, 6) /* Transmit FIFO Threshold (mask) */
#define CE4100_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */
-#define CE4100_SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */
+#define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */
#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
+/* Intel Quark X1000 */
+#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */
+
/* QUARK_X1000 SSCR0 bit definition */
-#define QUARK_X1000_SSCR0_DSS (0x1F << 0) /* Data Size Select (mask) */
+#define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */
#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
-#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
+#define QUARK_X1000_SSCR0_FRF GENMASK(6, 5) /* FRame Format (mask) */
#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
#define RX_THRESH_QUARK_X1000_DFLT 1
#define TX_THRESH_QUARK_X1000_DFLT 16
-#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */
-#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */
+#define QUARK_X1000_SSSR_TFL_MASK GENMASK(12, 8) /* Transmit FIFO Level mask */
+#define QUARK_X1000_SSSR_RFL_MASK GENMASK(17, 13) /* Receive FIFO Level mask */
-#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_TFT GENMASK(10, 6) /* Transmit FIFO Threshold (mask) */
#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
-#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_RFT GENMASK(15, 11) /* Receive FIFO Threshold (mask) */
#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
-#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
-#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
+#define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */
+#define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */
-/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
+/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
-#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */
-#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */
-#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */
-#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */
-#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */
-#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */
-#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */
-#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */
-#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */
-#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */
-#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */
-#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */
-#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */
-#define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interrupt Enable */
-#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */
-#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */
-#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */
-
-#define SSSR_BCE (1 << 23) /* Bit Count Error */
-#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */
-#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */
-#define SSSR_EOC (1 << 20) /* End Of Chain */
-#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */
-#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */
+#define SSCR1_EFWR BIT(14) /* Enable FIFO Write/Read */
+#define SSCR1_STRF BIT(15) /* Select FIFO or EFWR */
+#define SSCR1_IFS BIT(16) /* Invert Frame Signal */
+#define SSCR1_PINTE BIT(18) /* Peripheral Trailing Byte Interrupt Enable */
+#define SSCR1_TINTE BIT(19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_RSRE BIT(20) /* Receive Service Request Enable */
+#define SSCR1_TSRE BIT(21) /* Transmit Service Request Enable */
+#define SSCR1_TRAIL BIT(22) /* Trailing Byte */
+#define SSCR1_RWOT BIT(23) /* Receive Without Transmit */
+#define SSCR1_SFRMDIR BIT(24) /* Frame Direction */
+#define SSCR1_SCLKDIR BIT(25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_ECRB BIT(26) /* Enable Clock request B */
+#define SSCR1_ECRA BIT(27) /* Enable Clock Request A */
+#define SSCR1_SCFR BIT(28) /* Slave Clock free Running */
+#define SSCR1_EBCEI BIT(29) /* Enable Bit Count Error interrupt */
+#define SSCR1_TTE BIT(30) /* TXD Tristate Enable */
+#define SSCR1_TTELP BIT(31) /* TXD Tristate Enable Last Phase */
+
+#define SSSR_PINT BIT(18) /* Peripheral Trailing Byte Interrupt */
+#define SSSR_TINT BIT(19) /* Receiver Time-out Interrupt */
+#define SSSR_EOC BIT(20) /* End Of Chain */
+#define SSSR_TUR BIT(21) /* Transmit FIFO Under Run */
+#define SSSR_CSS BIT(22) /* Clock Synchronisation Status */
+#define SSSR_BCE BIT(23) /* Bit Count Error */
#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */
-#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
-#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
+#define SSPSP_SFRMP BIT(2) /* Serial Frame Polarity */
+#define SSPSP_ETDS BIT(3) /* End of Transfer data State */
#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */
#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
-#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
+#define SSPSP_FSRT BIT(25) /* Frame Sync Relative Timing */
/* PXA3xx */
#define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */
#define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */
#define SSPSP_TIMING_MASK (0x7f8001f0)
-#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
-#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
#define SSACD_ACDS_1 (0)
#define SSACD_ACDS_2 (1)
@@ -174,18 +177,39 @@
#define SSACD_ACDS_8 (3)
#define SSACD_ACDS_16 (4)
#define SSACD_ACDS_32 (5)
+#define SSACD_SCDB BIT(3) /* SSPSYSCLK Divider Bypass */
#define SSACD_SCDB_4X (0)
#define SSACD_SCDB_1X (1)
-#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
+#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
+#define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */
+
+/* Intel Merrifield SSP */
+#define SFIFOL 0x68 /* FIFO level */
+#define SFIFOTT 0x6c /* FIFO trigger threshold */
+
+#define RX_THRESH_MRFLD_DFLT 16
+#define TX_THRESH_MRFLD_DFLT 16
+
+#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */
+#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */
+
+#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */
+#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */
+#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */
+#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */
/* LPSS SSP */
#define SSITF 0x44 /* TX FIFO trigger level */
+#define SSITF_TxHiThresh(x) (((x) - 1) << 0)
#define SSITF_TxLoThresh(x) (((x) - 1) << 8)
-#define SSITF_TxHiThresh(x) ((x) - 1)
#define SSIRF 0x48 /* RX FIFO trigger level */
#define SSIRF_RxThresh(x) ((x) - 1)
+/* LPT/WPT SSP */
+#define SSCR2 (0x40) /* SSP Command / Status 2 */
+#define SSPSP2 (0x44) /* SSP Programmable Serial Protocol 2 */
+
enum pxa_ssp_type {
SSP_UNDEFINED = 0,
PXA25x_SSP, /* pxa 210, 250, 255, 26x */
@@ -196,13 +220,16 @@ enum pxa_ssp_type {
MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
+ MRFLD_SSP,
QUARK_X1000_SSP,
- LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_LPT_SSP,
LPSS_BYT_SSP,
LPSS_BSW_SSP,
LPSS_SPT_SSP,
LPSS_BXT_SSP,
LPSS_CNL_SSP,
+ SSP_MAX
};
struct ssp_device {
@@ -245,6 +272,22 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
return __raw_readl(dev->mmio_base + reg);
}
+static inline void pxa_ssp_enable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
+static inline void pxa_ssp_disable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
#if IS_ENABLED(CONFIG_PXA_SSP)
struct ssp_device *pxa_ssp_request(int port, const char *label);
void pxa_ssp_free(struct ssp_device *);
@@ -263,4 +306,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
static inline void pxa_ssp_free(struct ssp_device *ssp) {}
#endif
-#endif
+#endif /* __LINUX_PXA2XX_SSP_H */
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
deleted file mode 100644
index 2e1193a3fb5f..000000000000
--- a/include/linux/qcom_scm.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (C) 2015 Linaro Ltd.
- */
-#ifndef __QCOM_SCM_H
-#define __QCOM_SCM_H
-
-#include <linux/err.h>
-#include <linux/types.h>
-#include <linux/cpumask.h>
-
-#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
-#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
-#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
-#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
-
-struct qcom_scm_hdcp_req {
- u32 addr;
- u32 val;
-};
-
-struct qcom_scm_vmperm {
- int vmid;
- int perm;
-};
-
-enum qcom_scm_ocmem_client {
- QCOM_SCM_OCMEM_UNUSED_ID = 0x0,
- QCOM_SCM_OCMEM_GRAPHICS_ID,
- QCOM_SCM_OCMEM_VIDEO_ID,
- QCOM_SCM_OCMEM_LP_AUDIO_ID,
- QCOM_SCM_OCMEM_SENSORS_ID,
- QCOM_SCM_OCMEM_OTHER_OS_ID,
- QCOM_SCM_OCMEM_DEBUG_ID,
-};
-
-enum qcom_scm_sec_dev_id {
- QCOM_SCM_MDSS_DEV_ID = 1,
- QCOM_SCM_OCMEM_DEV_ID = 5,
- QCOM_SCM_PCIE0_DEV_ID = 11,
- QCOM_SCM_PCIE1_DEV_ID = 12,
- QCOM_SCM_GFX_DEV_ID = 18,
- QCOM_SCM_UFS_DEV_ID = 19,
- QCOM_SCM_ICE_DEV_ID = 20,
-};
-
-enum qcom_scm_ice_cipher {
- QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0,
- QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1,
- QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3,
- QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
-};
-
-#define QCOM_SCM_VMID_HLOS 0x3
-#define QCOM_SCM_VMID_MSS_MSA 0xF
-#define QCOM_SCM_VMID_WLAN 0x18
-#define QCOM_SCM_VMID_WLAN_CE 0x19
-#define QCOM_SCM_PERM_READ 0x4
-#define QCOM_SCM_PERM_WRITE 0x2
-#define QCOM_SCM_PERM_EXEC 0x1
-#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
-#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
-
-#if IS_ENABLED(CONFIG_QCOM_SCM)
-extern bool qcom_scm_is_available(void);
-
-extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
-extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
-extern void qcom_scm_cpu_power_down(u32 flags);
-extern int qcom_scm_set_remote_state(u32 state, u32 id);
-
-extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size);
-extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size);
-extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
-extern int qcom_scm_pas_shutdown(u32 peripheral);
-extern bool qcom_scm_pas_supported(u32 peripheral);
-
-extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
-extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
-
-extern bool qcom_scm_restore_sec_cfg_available(void);
-extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
-extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
-extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
-extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- unsigned int *src,
- const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt);
-
-extern bool qcom_scm_ocmem_lock_available(void);
-extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size, u32 mode);
-extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size);
-
-extern bool qcom_scm_ice_available(void);
-extern int qcom_scm_ice_invalidate_key(u32 index);
-extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
- enum qcom_scm_ice_cipher cipher,
- u32 data_unit_size);
-
-extern bool qcom_scm_hdcp_available(void);
-extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp);
-
-extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
-#else
-
-#include <linux/errno.h>
-
-static inline bool qcom_scm_is_available(void) { return false; }
-
-static inline int qcom_scm_set_cold_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline int qcom_scm_set_warm_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
- { return -ENODEV; }
-
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
- { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
- { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
- { return -ENODEV; }
-
-static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
- { return -ENODEV; }
-static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- unsigned int *src, const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt) { return -ENODEV; }
-
-static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
-static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size, u32 mode) { return -ENODEV; }
-static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
- u32 offset, u32 size) { return -ENODEV; }
-
-static inline bool qcom_scm_ice_available(void) { return false; }
-static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; }
-static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
- enum qcom_scm_ice_cipher cipher,
- u32 data_unit_size) { return -ENODEV; }
-
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp) { return -ENODEV; }
-
-static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
- { return -ENODEV; }
-#endif
-#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 977807e1be53..827624840ee2 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2016 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _COMMON_HSI_H
@@ -47,10 +47,10 @@
#define ISCSI_CDU_TASK_SEG_TYPE 0
#define FCOE_CDU_TASK_SEG_TYPE 0
#define RDMA_CDU_TASK_SEG_TYPE 1
+#define ETH_CDU_TASK_SEG_TYPE 2
#define FW_ASSERT_GENERAL_ATTN_IDX 32
-
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
#define MSTORM_QZONE_SIZE 16
@@ -60,9 +60,12 @@
#define PSTORM_QZONE_SIZE 0
#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+#define ETH_MAX_RXQ_VF_DEFAULT 16
+#define ETH_MAX_RXQ_VF_DOUBLE 48
+#define ETH_MAX_RXQ_VF_QUAD 112
+
+#define ETH_RGSRC_CTX_SIZE 6
+#define ETH_TGSRC_CTX_SIZE 6
/********************************/
/* CORE (LIGHT L2) FW CONSTANTS */
@@ -89,8 +92,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 42
-#define FW_REVISION_VERSION 2
+#define FW_MINOR_VERSION 59
+#define FW_REVISION_VERSION 1
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -112,6 +115,7 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
@@ -133,7 +137,7 @@
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_CONNECTION_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
@@ -144,7 +148,7 @@
#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
/* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION 11
/*****************/
/* CDU CONSTANTS */
@@ -162,6 +166,7 @@
#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+#define CDU_CONTEXT_VALIDATION_DEFAULT_CFG (0x3d)
/*****************/
/* DQ CONSTANTS */
@@ -302,6 +307,9 @@
/* PWM address mapping */
#define DQ_PWM_OFFSET_DPM_BASE 0x0
#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM32_24ICID_BASE 0x28
+#define DQ_PWM_OFFSET_UCM32_24ICID_BASE 0x30
+#define DQ_PWM_OFFSET_TCM32_24ICID_BASE 0x38
#define DQ_PWM_OFFSET_XCM16_BASE 0x40
#define DQ_PWM_OFFSET_XCM32_BASE 0x44
#define DQ_PWM_OFFSET_UCM16_BASE 0x48
@@ -325,6 +333,13 @@
#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
(DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
+#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \
+ (DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \
+ (DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4)
+#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD \
+ (DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1)
+
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -360,6 +375,7 @@
/* Number of global Vport/QCN rate limiters */
#define MAX_QM_GLOBAL_RLS 256
+#define COMMON_MAX_QM_GLOBAL_RLS MAX_QM_GLOBAL_RLS
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
@@ -379,7 +395,7 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB_E4 12
+#define PIS_PER_SB 12
#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
@@ -700,9 +716,16 @@ enum mf_mode {
MAX_MF_MODE
};
+/* Per protocol packet duplication enable bit vector. If set, duplicate
+ * offloaded traffic to LL2 debug queueu.
+ */
+struct offload_pkt_dup_enable {
+ __le16 enable_vector;
+};
+
/* Per-protocol connection types */
enum protocol_type {
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
PROTOCOLID_FCOE,
PROTOCOLID_ROCE,
PROTOCOLID_CORE,
@@ -717,6 +740,12 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+/* Pstorm packet duplication config */
+struct pstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved[3];
+};
+
struct regpair {
__le32 lo;
__le32 hi;
@@ -728,10 +757,24 @@ struct rdma_eqe_destroy_qp {
u8 reserved[4];
};
+/* RoCE Suspend Event Data */
+struct rdma_eqe_suspend_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
/* RDMA Event Data Union */
union rdma_eqe_data {
struct regpair async_handle;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+ struct rdma_eqe_suspend_qp rdma_suspend_qp_data;
+};
+
+/* Tstorm packet duplication config */
+struct tstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved;
+ __le32 cid;
};
struct tstorm_queue_zone {
@@ -891,6 +934,15 @@ struct db_legacy_addr {
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
+/* Structure for doorbell address, in legacy mode, without DEMS */
+struct db_legacy_wo_dems_addr {
+ __le32 addr;
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_MASK 0x3FFFFFFF
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_SHIFT 2
+};
+
/* Structure for doorbell address, in PWM mode */
struct db_pwm_addr {
__le32 addr;
@@ -907,6 +959,31 @@ struct db_pwm_addr {
};
/* Parameters to RDMA firmware, passed in EDPM doorbell */
+struct db_rdma_24b_icid_dpm_params {
+ __le32 params;
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT 16
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK 0x7
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_SHIFT 24
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT 27
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+};
+
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
struct db_rdma_dpm_params {
__le32 params;
#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
@@ -1220,21 +1297,41 @@ struct rdif_task_context {
__le32 reserved2;
};
+/* Searcher Table struct */
+struct src_entry_header {
+ __le32 flags;
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_MASK 0x1
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_SHIFT 0
+#define SRC_ENTRY_HEADER_EMPTY_MASK 0x1
+#define SRC_ENTRY_HEADER_EMPTY_SHIFT 1
+#define SRC_ENTRY_HEADER_RESERVED_MASK 0x3FFFFFFF
+#define SRC_ENTRY_HEADER_RESERVED_SHIFT 2
+ __le32 magic_number;
+ struct regpair next_ptr;
+};
+
+/* Enumeration for address type */
+enum src_header_next_ptr_type_enum {
+ e_physical_addr,
+ e_logical_addr,
+ MAX_SRC_HEADER_NEXT_PTR_TYPE_ENUM
+};
+
/* Status block structure */
-struct status_block_e4 {
- __le16 pi_array[PIS_PER_SB_E4];
+struct status_block {
+ __le16 pi_array[PIS_PER_SB];
__le32 sb_num;
-#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
/* Tdif context */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cd1207ad4ada..c84e08bc6802 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -67,6 +67,7 @@
/* Ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
#define ETH_RSS_KEY_SIZE_REGS 10
#define ETH_RSS_ENGINE_NUM_K2 207
#define ETH_RSS_ENGINE_NUM_BB 127
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 68eda1c21cde..7ba0abc867f1 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -150,49 +150,49 @@ struct ystorm_fcoe_task_st_ctx {
u8 reserved2[8];
};
-struct e4_ystorm_fcoe_task_ag_ctx {
+struct ystorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -206,73 +206,73 @@ struct e4_ystorm_fcoe_task_ag_ctx {
__le32 reg2;
};
-struct e4_tstorm_fcoe_task_ag_ctx {
+struct tstorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
u8 flags1;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
u8 flags3;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 cleanup_state;
__le16 last_sent_tid;
__le32 rec_rr_tov_exp_timeout;
@@ -352,49 +352,49 @@ struct tstorm_fcoe_task_st_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
};
-struct e4_mstorm_fcoe_task_ag_ctx {
+struct mstorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 cleanup_state;
__le32 received_bytes;
u8 byte3;
@@ -440,56 +440,56 @@ struct mstorm_fcoe_task_st_ctx {
struct scsi_cached_sges data_desc;
};
-struct e4_ustorm_fcoe_task_ag_ctx {
+struct ustorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 global_cq_num;
@@ -499,18 +499,18 @@ struct e4_ustorm_fcoe_task_ag_ctx {
};
/* FCoE task context */
-struct e4_fcoe_task_context {
+struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
- struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
- struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+ struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+ struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
struct tstorm_fcoe_task_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_task_st_ctx mstorm_st_context;
- struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+ struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
struct rdif_task_context rdif_context;
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 157019f716f1..1a60285a01e3 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -714,49 +714,49 @@ struct ystorm_iscsi_task_st_ctx {
union iscsi_task_hdr pdu_hdr;
};
-struct e4_ystorm_iscsi_task_ag_ctx {
+struct ystorm_iscsi_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 TTT;
u8 byte3;
@@ -764,49 +764,49 @@ struct e4_ystorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_mstorm_iscsi_task_ag_ctx {
+struct mstorm_iscsi_task_ag_ctx {
u8 cdu_validation;
u8 byte1;
__le16 task_cid;
u8 flags0;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
u8 flags1;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -814,56 +814,56 @@ struct e4_mstorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_ustorm_iscsi_task_ag_ctx {
+struct ustorm_iscsi_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
u8 flags1;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 rcv_cont_len;
@@ -952,14 +952,14 @@ struct ustorm_iscsi_task_st_ctx {
};
/* iscsi task context */
-struct e4_iscsi_task_context {
+struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
- struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
struct regpair mstorm_ag_padding[2];
- struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
struct mstorm_iscsi_task_st_ctx mstorm_st_context;
struct ustorm_iscsi_task_st_ctx ustorm_st_context;
struct rdif_task_context rdif_context;
@@ -1431,73 +1431,73 @@ struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_tcp_pkt_cnt;
};
-struct e4_tstorm_iscsi_task_ag_ctx {
+struct tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h
new file mode 100644
index 000000000000..cc7c7481a0e0
--- /dev/null
+++ b/include/linux/qed/nvmetcp_common.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef __NVMETCP_COMMON__
+#define __NVMETCP_COMMON__
+
+#include "tcp_common.h"
+#include <linux/nvme-tcp.h>
+
+#define NVMETCP_SLOW_PATH_LAYER_CODE (6)
+#define NVMETCP_WQE_NUM_SGES_SLOWIO (0xf)
+
+/* NVMeTCP firmware function init parameters */
+struct nvmetcp_spe_func_init {
+ __le16 half_way_close_timeout;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 ll2_rx_queue_id;
+ u8 flags;
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_SHIFT 1
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_MASK 0x3F
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_SHIFT 2
+ u8 debug_flags;
+ __le16 reserved1;
+ u8 params;
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_SHIFT 4
+ u8 reserved2[5];
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+};
+
+/* NVMeTCP init params passed by driver to FW in NVMeTCP init ramrod. */
+struct nvmetcp_init_ramrod_params {
+ struct nvmetcp_spe_func_init nvmetcp_init_spe;
+ struct tcp_init_params tcp_init;
+};
+
+/* NVMeTCP Ramrod Command IDs */
+enum nvmetcp_ramrod_cmd_id {
+ NVMETCP_RAMROD_CMD_ID_UNUSED = 0,
+ NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1,
+ NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2,
+ NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
+ NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4,
+ NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5,
+ NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ MAX_NVMETCP_RAMROD_CMD_ID
+};
+
+struct nvmetcp_glbl_queue_entry {
+ struct regpair cq_pbl_addr;
+ struct regpair reserved;
+};
+
+/* NVMeTCP conn level EQEs */
+enum nvmetcp_eqe_opcode {
+ NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */
+ NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */
+ NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */
+ NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */
+ NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */
+ NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */
+ NVMETCP_EVENT_TYPE_RESERVED0,
+ NVMETCP_EVENT_TYPE_RESERVED1,
+ NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */
+ NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */
+ MAX_NVMETCP_EQE_OPCODE
+};
+
+struct nvmetcp_conn_offload_section {
+ struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */
+ __le16 cccid_max_range; /* CCCID max value - used for validation */
+ __le16 reserved[3];
+};
+
+/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */
+struct nvmetcp_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4
+ u8 default_cq;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le32 initial_ack;
+
+ struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */
+};
+
+/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */
+struct nvmetcp_spe_conn_offload {
+ __le16 reserved;
+ __le16 conn_id;
+ __le32 fw_cid;
+ struct nvmetcp_conn_offload_params nvmetcp;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */
+struct nvmetcp_conn_update_ramrod_params {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 flags;
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7
+ u8 reserved3[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 reserved4[5];
+};
+
+/* NVMeTCP connection termination request */
+struct nvmetcp_spe_conn_termination {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 abortive;
+ u8 reserved2[7];
+ struct regpair reserved3;
+ struct regpair reserved4;
+};
+
+struct nvmetcp_dif_flags {
+ u8 flags;
+};
+
+enum nvmetcp_wqe_type {
+ NVMETCP_WQE_TYPE_NORMAL,
+ NVMETCP_WQE_TYPE_TASK_CLEANUP,
+ NVMETCP_WQE_TYPE_MIDDLE_PATH,
+ NVMETCP_WQE_TYPE_IC,
+ MAX_NVMETCP_WQE_TYPE
+};
+
+struct nvmetcp_wqe {
+ __le16 task_id;
+ u8 flags;
+#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */
+#define NVMETCP_WQE_WQE_TYPE_SHIFT 0
+#define NVMETCP_WQE_NUM_SGES_MASK 0xF
+#define NVMETCP_WQE_NUM_SGES_SHIFT 3
+#define NVMETCP_WQE_RESPONSE_MASK 0x1
+#define NVMETCP_WQE_RESPONSE_SHIFT 7
+ struct nvmetcp_dif_flags prot_flags;
+ __le32 contlen_cdbsize;
+#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF
+#define NVMETCP_WQE_CONT_LEN_SHIFT 0
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24
+};
+
+struct nvmetcp_host_cccid_itid_entry {
+ __le16 itid;
+};
+
+struct nvmetcp_connect_done_results {
+ __le16 icid;
+ __le16 conn_id;
+ struct tcp_ulp_connect_done_params params;
+};
+
+struct nvmetcp_eqe_data {
+ __le16 icid;
+ __le16 conn_id;
+ __le16 reserved;
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define NVMETCP_EQE_DATA_RESERVED0_MASK 0x1
+#define NVMETCP_EQE_DATA_RESERVED0_SHIFT 7
+};
+
+enum nvmetcp_task_type {
+ NVMETCP_TASK_TYPE_HOST_WRITE,
+ NVMETCP_TASK_TYPE_HOST_READ,
+ NVMETCP_TASK_TYPE_INIT_CONN_REQUEST,
+ NVMETCP_TASK_TYPE_RESERVED0,
+ NVMETCP_TASK_TYPE_CLEANUP,
+ NVMETCP_TASK_TYPE_HOST_READ_NO_CQE,
+ MAX_NVMETCP_TASK_TYPE
+};
+
+struct nvmetcp_db_data {
+ u8 params;
+#define NVMETCP_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */
+#define NVMETCP_DB_DATA_DEST_SHIFT 0
+#define NVMETCP_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
+#define NVMETCP_DB_DATA_AGG_CMD_SHIFT 2
+#define NVMETCP_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
+#define NVMETCP_DB_DATA_BYPASS_EN_SHIFT 4
+#define NVMETCP_DB_DATA_RESERVED_MASK 0x1
+#define NVMETCP_DB_DATA_RESERVED_SHIFT 5
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags; /* bit for every DQ counter flags in CM context that DQ can increment */
+ __le16 sq_prod;
+};
+
+struct nvmetcp_fw_nvmf_cqe {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_icresp_mdata {
+ u8 digest;
+ u8 cpda;
+ __le16 pfv;
+ __le32 maxdata;
+ __le16 rsvd[4];
+};
+
+union nvmetcp_fw_cqe_data {
+ struct nvmetcp_fw_nvmf_cqe nvme_cqe;
+ struct nvmetcp_icresp_mdata icresp_mdata;
+};
+
+struct nvmetcp_fw_cqe {
+ __le16 conn_id;
+ u8 cqe_type;
+ u8 cqe_error_status_bits;
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+ __le16 itid;
+ u8 task_type;
+ u8 fw_dbg_field;
+ u8 caused_conn_err;
+ u8 reserved0[3];
+ __le32 reserved1;
+ union nvmetcp_fw_cqe_data cqe_data;
+ struct regpair task_opaque;
+ __le32 reserved[6];
+};
+
+enum nvmetcp_fw_cqes_type {
+ NVMETCP_FW_CQE_TYPE_NORMAL = 1,
+ NVMETCP_FW_CQE_TYPE_RESERVED0,
+ NVMETCP_FW_CQE_TYPE_RESERVED1,
+ NVMETCP_FW_CQE_TYPE_CLEANUP,
+ NVMETCP_FW_CQE_TYPE_DUMMY,
+ MAX_NVMETCP_FW_CQES_TYPE
+};
+
+struct ystorm_nvmetcp_task_state {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 resrved0;
+ __le32 buffer_offset;
+ __le16 cccid;
+ struct nvmetcp_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_SHIFT 3
+};
+
+struct ystorm_nvmetcp_task_rxmit_opt {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_task_hdr {
+ __le32 reg[18];
+};
+
+struct nvmetcp_task_hdr_aligned {
+ struct nvmetcp_task_hdr task_hdr;
+ __le32 reserved[2]; /* HSI_COMMENT: Align to QREG */
+};
+
+struct e5_tdif_task_context {
+ __le32 reserved[16];
+};
+
+struct e5_rdif_task_context {
+ __le32 reserved[12];
+};
+
+struct ystorm_nvmetcp_task_st_ctx {
+ struct ystorm_nvmetcp_task_state state;
+ struct ystorm_nvmetcp_task_rxmit_opt rxmit_opt;
+ struct nvmetcp_task_hdr_aligned pdu_hdr;
+};
+
+struct mstorm_nvmetcp_task_st_ctx {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
+ u8 task_type;
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 dif_task_icid;
+ struct regpair reserved0;
+ __le32 expected_itt;
+ __le32 reserved1;
+};
+
+struct ustorm_nvmetcp_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair reserved0;
+ __le32 reg1_map;
+#define REG1_NUM_SGES_MASK 0xF
+#define REG1_NUM_SGES_SHIFT 0
+#define REG1_RESERVED1_MASK 0xFFFFFFF
+#define REG1_RESERVED1_SHIFT 4
+ u8 flags2;
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_SHIFT 1
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 reserved3;
+ __le16 tqe_opaque[2];
+ __le32 reserved5;
+ __le32 nvme_tcp_opaque_lo;
+ __le32 nvme_tcp_opaque_hi;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_SHIFT 3
+ u8 flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+ u8 cq_rss_number;
+};
+
+struct e5_ystorm_nvmetcp_task_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state_and_core_id */;
+ __le16 word0 /* icid */;
+ u8 flags0;
+ u8 flags1;
+ u8 flags2;
+ u8 flags3;
+ __le32 TTT;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 reserved7;
+};
+
+struct e5_mstorm_nvmetcp_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_SHIFT 2
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+ u8 flags3;
+ __le32 reg0;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 reserved7;
+};
+
+struct e5_ustorm_nvmetcp_task_ag_ctx {
+ u8 reserved;
+ u8 state_and_core_id;
+ __le16 icid;
+ u8 flags0;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+ u8 flags4;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ u8 byte2;
+ u8 byte3;
+ u8 reserved8;
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ __le16 word1;
+ __le16 next_tid;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+struct e5_nvmetcp_task_context {
+ struct ystorm_nvmetcp_task_st_ctx ystorm_st_context;
+ struct e5_ystorm_nvmetcp_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct e5_tdif_task_context tdif_context;
+ struct e5_mstorm_nvmetcp_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct e5_ustorm_nvmetcp_task_ag_ctx ustorm_ag_context;
+ struct regpair ustorm_ag_padding[2];
+ struct mstorm_nvmetcp_task_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_nvmetcp_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct e5_rdif_task_context rdif_context;
+};
+
+#endif /* __NVMETCP_COMMON__*/
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 4d58dc8943f0..a84063492c71 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -19,7 +19,7 @@ enum qed_chain_mode {
/* Each Page contains a next pointer at its end */
QED_CHAIN_MODE_NEXT_PTR,
- /* Chain is a single page (next ptr) is unrequired */
+ /* Chain is a single page (next ptr) is not required */
QED_CHAIN_MODE_SINGLE,
/* Page pointers are located in a side list */
@@ -56,13 +56,13 @@ struct qed_chain_pbl_u32 {
};
struct qed_chain_u16 {
- /* Cyclic index of next element to produce/consme */
+ /* Cyclic index of next element to produce/consume */
u16 prod_idx;
u16 cons_idx;
};
struct qed_chain_u32 {
- /* Cyclic index of next element to produce/consme */
+ /* Cyclic index of next element to produce/consume */
u32 prod_idx;
u32 cons_idx;
};
@@ -268,14 +268,15 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
}
/**
- * @brief qed_chain_advance_page -
+ * qed_chain_advance_page(): Advance the next element across pages for a
+ * linked chain.
*
- * Advance the next element accros pages for a linked chain
+ * @p_chain: P_chain.
+ * @p_next_elem: P_next_elem.
+ * @idx_to_inc: Idx_to_inc.
+ * @page_to_inc: page_to_inc.
*
- * @param p_chain
- * @param p_next_elem
- * @param idx_to_inc
- * @param page_to_inc
+ * Return: Void.
*/
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
@@ -336,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain,
} while (0)
/**
- * @brief qed_chain_return_produced -
+ * qed_chain_return_produced(): A chain in which the driver "Produces"
+ * elements should use this API
+ * to indicate previous produced elements
+ * are now consumed.
*
- * A chain in which the driver "Produces" elements should use this API
- * to indicate previous produced elements are now consumed.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
{
@@ -353,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_produce -
+ * qed_chain_produce(): A chain in which the driver "Produces"
+ * elements should use this to get a pointer to
+ * the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for
+ * new element.
*
- * A chain in which the driver "Produces" elements should use this to get
- * a pointer to the next element which can be "Produced". It's driver
- * responsibility to validate that the chain has room for new element.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to next element
+ * Return: void*, a pointer to next element.
*/
static inline void *qed_chain_produce(struct qed_chain *p_chain)
{
@@ -395,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_capacity -
- *
- * Get the maximum number of BDs in chain
+ * qed_chain_get_capacity(): Get the maximum number of BDs in chain
*
- * @param p_chain
- * @param num
+ * @p_chain: Chain.
*
- * @return number of unusable BDs
+ * Return: number of unusable BDs.
*/
static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
{
@@ -410,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_recycle_consumed -
+ * qed_chain_recycle_consumed(): Returns an element which was
+ * previously consumed;
+ * Increments producers so they could
+ * be written to FW.
*
- * Returns an element which was previously consumed;
- * Increments producers so they could be written to FW.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
{
@@ -427,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_consume -
+ * qed_chain_consume(): A Chain in which the driver utilizes data written
+ * by a different source (i.e., FW) should use this to
+ * access passed buffers.
*
- * A Chain in which the driver utilizes data written by a different source
- * (i.e., FW) should use this to access passed buffers.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to the next buffer written
+ * Return: void*, a pointer to the next buffer written.
*/
static inline void *qed_chain_consume(struct qed_chain *p_chain)
{
@@ -468,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_reset - Resets the chain to its start state
+ * qed_chain_reset(): Resets the chain to its start state.
+ *
+ * @p_chain: pointer to a previously allocated chain.
*
- * @param p_chain pointer to a previously allocted chain
+ * Return Void.
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
@@ -519,13 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_last_elem -
+ * qed_chain_get_last_elem(): Returns a pointer to the last element of the
+ * chain.
*
- * Returns a pointer to the last element of the chain
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*
+ * Return: void*.
*/
static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
{
@@ -563,10 +565,13 @@ out:
}
/**
- * @brief qed_chain_set_prod - sets the prod to the given value
+ * qed_chain_set_prod(): sets the prod to the given value.
+ *
+ * @p_chain: Chain.
+ * @prod_idx: Prod Idx.
+ * @p_prod_elem: Prod elem.
*
- * @param prod_idx
- * @param p_prod_elem
+ * Return Void.
*/
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
@@ -610,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain,
}
/**
- * @brief qed_chain_pbl_zero_mem - set chain memory to 0
+ * qed_chain_pbl_zero_mem(): set chain memory to 0.
+ *
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
{
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 812a4d751163..e1bf3219b4e6 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -145,12 +145,6 @@ struct qed_filter_mcast_params {
unsigned char mac[64][ETH_ALEN];
};
-union qed_filter_type_params {
- enum qed_filter_rx_mode_type accept_flags;
- struct qed_filter_ucast_params ucast;
- struct qed_filter_mcast_params mcast;
-};
-
enum qed_filter_type {
QED_FILTER_TYPE_UCAST,
QED_FILTER_TYPE_MCAST,
@@ -158,11 +152,6 @@ enum qed_filter_type {
QED_MAX_FILTER_TYPES,
};
-struct qed_filter_params {
- enum qed_filter_type type;
- union qed_filter_type_params filter;
-};
-
struct qed_tunn_params {
u16 vxlan_port;
u8 update_vxlan_port;
@@ -314,8 +303,14 @@ struct qed_eth_ops {
int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
- int (*filter_config)(struct qed_dev *cdev,
- struct qed_filter_params *params);
+ int (*filter_config_rx_mode)(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type);
+
+ int (*filter_config_ucast)(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params);
+
+ int (*filter_config_mcast)(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params);
int (*fastpath_stop)(struct qed_dev *cdev);
@@ -336,7 +331,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev,
enum qed_filter_config_mode mode);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
- int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*req_bulletin_update_mac)(struct qed_dev *cdev, const u8 *mac);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
index 16752eca5cbd..0d3b6ed21628 100644
--- a/include/linux/qed/qed_fcoe_if.h
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -67,16 +67,13 @@ struct qed_fcoe_cb_ops {
u32 (*get_login_failures)(void *cookie);
};
-void qed_fcoe_set_pf_params(struct qed_dev *cdev,
- struct qed_fcoe_pf_params *params);
-
/**
* struct qed_fcoe_ops - qed FCoE operations.
* @common: common operations pointer
* @fill_dev_info: fills FCoE specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register FCoE operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -96,7 +93,7 @@ void qed_fcoe_set_pf_params(struct qed_dev *cdev,
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * return 0 on sucesss, otherwise error value.
+ * return 0 on success, otherwise error value.
* @release_conn: release a previously acquired fcoe connection
* @param cdev
* @param handle - the connection handle.
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index cdd73afc4c46..6dc4943d8aec 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -7,6 +7,7 @@
#ifndef _QED_IF_H
#define _QED_IF_H
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
@@ -21,6 +22,10 @@
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <net/devlink.h>
+
+#define QED_TX_SWS_TIMER_DFLT 500
+#define QED_TWO_MSL_TIMER_DFLT 4000
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
@@ -540,6 +545,22 @@ struct qed_iscsi_pf_params {
u8 bdq_pbl_num_entries[3];
};
+struct qed_nvmetcp_pf_params {
+ u64 glbl_q_params_addr;
+ u16 cq_num_entries;
+ u16 num_cons;
+ u16 num_tasks;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u16 min_rto;
+};
+
struct qed_rdma_pf_params {
/* Supplied to QED during resource allocation (may affect the ILT and
* the doorbell BAR).
@@ -558,6 +579,7 @@ struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
struct qed_fcoe_pf_params fcoe_pf_params;
struct qed_iscsi_pf_params iscsi_pf_params;
+ struct qed_nvmetcp_pf_params nvmetcp_pf_params;
struct qed_rdma_pf_params rdma_pf_params;
};
@@ -569,7 +591,7 @@ enum qed_int_mode {
};
struct qed_sb_info {
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
@@ -594,7 +616,6 @@ enum qed_hw_err_type {
enum qed_dev_type {
QED_DEV_TYPE_BB,
QED_DEV_TYPE_AH,
- QED_DEV_TYPE_E5,
};
struct qed_dev_info {
@@ -631,6 +652,7 @@ struct qed_dev_info {
bool wol_support;
bool smart_an;
+ bool esl;
/* MBI version */
u32 mbi_version;
@@ -660,6 +682,7 @@ enum qed_sb_type {
enum qed_protocol {
QED_PROTOCOL_ETH,
QED_PROTOCOL_ISCSI,
+ QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI,
QED_PROTOCOL_FCOE,
};
@@ -780,6 +803,17 @@ enum qed_nvm_flash_cmd {
QED_NVM_FLASH_CMD_NVM_MAX,
};
+struct qed_devlink {
+ struct qed_dev *cdev;
+ struct devlink_health_reporter *fw_reporter;
+};
+
+struct qed_sb_info_dbg {
+ u32 igu_prod;
+ u32 igu_cons;
+ u16 pi[PIS_PER_SB];
+};
+
struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev, struct qed_link_output *link);
@@ -794,47 +828,47 @@ struct qed_common_cb_ops {
struct qed_selftest_ops {
/**
- * @brief selftest_interrupt - Perform interrupt test
+ * selftest_interrupt(): Perform interrupt test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_interrupt)(struct qed_dev *cdev);
/**
- * @brief selftest_memory - Perform memory test
+ * selftest_memory(): Perform memory test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_memory)(struct qed_dev *cdev);
/**
- * @brief selftest_register - Perform register test
+ * selftest_register(): Perform register test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_register)(struct qed_dev *cdev);
/**
- * @brief selftest_clock - Perform clock test
+ * selftest_clock(): Perform clock test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
/**
- * @brief selftest_nvram - Perform nvram test
+ * selftest_nvram(): Perform nvram test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_nvram) (struct qed_dev *cdev);
};
@@ -845,10 +879,9 @@ struct qed_common_ops {
struct qed_dev* (*probe)(struct pci_dev *dev,
struct qed_probe_params *params);
- void (*remove)(struct qed_dev *cdev);
+ void (*remove)(struct qed_dev *cdev);
- int (*set_power_state)(struct qed_dev *cdev,
- pci_power_t state);
+ int (*set_power_state)(struct qed_dev *cdev, pci_power_t state);
void (*set_name) (struct qed_dev *cdev, char name[]);
@@ -856,93 +889,100 @@ struct qed_common_ops {
* PF params required for the call before slowpath_start is
* documented within the qed_pf_params structure definition.
*/
- void (*update_pf_params)(struct qed_dev *cdev,
- struct qed_pf_params *params);
- int (*slowpath_start)(struct qed_dev *cdev,
- struct qed_slowpath_params *params);
+ void (*update_pf_params)(struct qed_dev *cdev,
+ struct qed_pf_params *params);
+
+ int (*slowpath_start)(struct qed_dev *cdev,
+ struct qed_slowpath_params *params);
- int (*slowpath_stop)(struct qed_dev *cdev);
+ int (*slowpath_stop)(struct qed_dev *cdev);
/* Requests to use `cnt' interrupts for fastpath.
* upon success, returns number of interrupts allocated for fastpath.
*/
- int (*set_fp_int)(struct qed_dev *cdev,
- u16 cnt);
+ int (*set_fp_int)(struct qed_dev *cdev, u16 cnt);
/* Fills `info' with pointers required for utilizing interrupts */
- int (*get_fp_int)(struct qed_dev *cdev,
- struct qed_int_info *info);
-
- u32 (*sb_init)(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr,
- u16 sb_id,
- enum qed_sb_type type);
-
- u32 (*sb_release)(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- u16 sb_id,
- enum qed_sb_type type);
-
- void (*simd_handler_config)(struct qed_dev *cdev,
- void *token,
- int index,
- void (*handler)(void *));
-
- void (*simd_handler_clean)(struct qed_dev *cdev,
- int index);
- int (*dbg_grc)(struct qed_dev *cdev,
- void *buffer, u32 *num_dumped_bytes);
+ int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info);
+
+ u32 (*sb_init)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ u32 (*sb_release)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ void (*simd_handler_config)(struct qed_dev *cdev,
+ void *token,
+ int index,
+ void (*handler)(void *));
+
+ void (*simd_handler_clean)(struct qed_dev *cdev, int index);
+
+ int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int (*dbg_grc_size)(struct qed_dev *cdev);
- int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
+ int (*dbg_all_data)(struct qed_dev *cdev, void *buffer);
- int (*dbg_all_data_size) (struct qed_dev *cdev);
+ int (*dbg_all_data_size)(struct qed_dev *cdev);
+
+ int (*report_fatal_error)(struct devlink *devlink,
+ enum qed_hw_err_type err_type);
/**
- * @brief can_link_change - can the instance change the link or not
+ * can_link_change(): can the instance change the link or not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return true if link-change is allowed, false otherwise.
+ * Return: true if link-change is allowed, false otherwise.
*/
bool (*can_link_change)(struct qed_dev *cdev);
/**
- * @brief set_link - set links according to params
+ * set_link(): set links according to params.
*
- * @param cdev
- * @param params - values used to override the default link configuration
+ * @cdev: Qed dev pointer.
+ * @params: values used to override the default link configuration.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_link)(struct qed_dev *cdev,
struct qed_link_params *params);
/**
- * @brief get_link - returns the current link state.
+ * get_link(): returns the current link state.
+ *
+ * @cdev: Qed dev pointer.
+ * @if_link: structure to be filled with current link configuration.
*
- * @param cdev
- * @param if_link - structure to be filled with current link configuration.
+ * Return: Void.
*/
void (*get_link)(struct qed_dev *cdev,
struct qed_link_output *if_link);
/**
- * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ * drain(): drains chip in case Tx completions fail to arrive due to pause.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Int.
*/
int (*drain)(struct qed_dev *cdev);
/**
- * @brief update_msglvl - update module debug level
+ * update_msglvl(): update module debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * @cdev: Qed dev pointer.
+ * @dp_module: Debug module.
+ * @dp_level: Debug level.
+ *
+ * Return: Void.
*/
void (*update_msglvl)(struct qed_dev *cdev,
u32 dp_module,
@@ -956,70 +996,73 @@ struct qed_common_ops {
struct qed_chain *p_chain);
/**
- * @brief nvm_flash - Flash nvm data.
+ * nvm_flash(): Flash nvm data.
*
- * @param cdev
- * @param name - file containing the data
+ * @cdev: Qed dev pointer.
+ * @name: file containing the data.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_flash)(struct qed_dev *cdev, const char *name);
/**
- * @brief nvm_get_image - reads an entire image from nvram
+ * nvm_get_image(): reads an entire image from nvram.
*
- * @param cdev
- * @param type - type of the request nvram image
- * @param buf - preallocated buffer to fill with the image
- * @param len - length of the allocated buffer
+ * @cdev: Qed dev pointer.
+ * @type: type of the request nvram image.
+ * @buf: preallocated buffer to fill with the image.
+ * @len: length of the allocated buffer.
*
- * @return 0 on success, error otherwise
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_get_image)(struct qed_dev *cdev,
enum qed_nvm_images type, u8 *buf, u16 len);
/**
- * @brief set_coalesce - Configure Rx coalesce value in usec
+ * set_coalesce(): Configure Rx coalesce value in usec.
*
- * @param cdev
- * @param rx_coal - Rx coalesce value in usec
- * @param tx_coal - Tx coalesce value in usec
- * @param qid - Queue index
- * @param sb_id - Status Block Id
+ * @cdev: Qed dev pointer.
+ * @rx_coal: Rx coalesce value in usec.
+ * @tx_coal: Tx coalesce value in usec.
+ * @handle: Handle.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_coalesce)(struct qed_dev *cdev,
u16 rx_coal, u16 tx_coal, void *handle);
/**
- * @brief set_led - Configure LED mode
+ * set_led() - Configure LED mode.
*
- * @param cdev
- * @param mode - LED mode
+ * @cdev: Qed dev pointer.
+ * @mode: LED mode.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
/**
- * @brief attn_clr_enable - Prevent attentions from being reasserted
+ * attn_clr_enable(): Prevent attentions from being reasserted.
+ *
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable.
*
- * @param cdev
- * @param clr_enable
+ * Return: Void.
*/
void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Dddress of where db_data is stored.
+ * @db_width: Doorbell is 32b or 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_is_32b - doorbell is 32b pr 64b
- * @param db_is_user - doorbell recovery addresses are user or kernel space
+ * Return: Int.
*/
int (*db_recovery_add)(struct qed_dev *cdev,
void __iomem *db_addr,
@@ -1028,116 +1071,143 @@ struct qed_common_ops {
enum qed_db_rec_space db_space);
/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
+ * db_recovery_del(): remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
- * entry to delete.
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address where db_data is stored. Serves as key for the
+ * entry to delete.
+ *
+ * Return: Int.
*/
int (*db_recovery_del)(struct qed_dev *cdev,
void __iomem *db_addr, void *db_data);
/**
- * @brief recovery_process - Trigger a recovery process
+ * recovery_process(): Trigger a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_process)(struct qed_dev *cdev);
/**
- * @brief recovery_prolog - Execute the prolog operations of a recovery process
+ * recovery_prolog(): Execute the prolog operations of a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_prolog)(struct qed_dev *cdev);
/**
- * @brief update_drv_state - API to inform the change in the driver state.
+ * update_drv_state(): API to inform the change in the driver state.
*
- * @param cdev
- * @param active
+ * @cdev: Qed dev pointer.
+ * @active: Active
*
+ * Return: Int.
*/
int (*update_drv_state)(struct qed_dev *cdev, bool active);
/**
- * @brief update_mac - API to inform the change in the mac address
+ * update_mac(): API to inform the change in the mac address.
*
- * @param cdev
- * @param mac
+ * @cdev: Qed dev pointer.
+ * @mac: MAC.
*
+ * Return: Int.
*/
- int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
/**
- * @brief update_mtu - API to inform the change in the mtu
+ * update_mtu(): API to inform the change in the mtu.
*
- * @param cdev
- * @param mtu
+ * @cdev: Qed dev pointer.
+ * @mtu: MTU.
*
+ * Return: Int.
*/
int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
/**
- * @brief update_wol - update of changes in the WoL configuration
+ * update_wol(): Update of changes in the WoL configuration.
+ *
+ * @cdev: Qed dev pointer.
+ * @enabled: true iff WoL should be enabled.
*
- * @param cdev
- * @param enabled - true iff WoL should be enabled.
+ * Return: Int.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
/**
- * @brief read_module_eeprom
+ * read_module_eeprom(): Read EEPROM.
*
- * @param cdev
- * @param buf - buffer
- * @param dev_addr - PHY device memory region
- * @param offset - offset into eeprom contents to be read
- * @param len - buffer length, i.e., max bytes to be read
+ * @cdev: Qed dev pointer.
+ * @buf: buffer.
+ * @dev_addr: PHY device memory region.
+ * @offset: offset into eeprom contents to be read.
+ * @len: buffer length, i.e., max bytes to be read.
+ *
+ * Return: Int.
*/
int (*read_module_eeprom)(struct qed_dev *cdev,
char *buf, u8 dev_addr, u32 offset, u32 len);
/**
- * @brief get_affin_hwfn_idx
+ * get_affin_hwfn_idx(): Get affine HW function.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: u8.
*/
u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param buf - buffer
- * @param cmd - NVM CFG command id
- * @param entity_id - Entity id
+ * read_nvm_cfg(): Read NVM config attribute value.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: Buffer.
+ * @cmd: NVM CFG command id.
+ * @entity_id: Entity id.
*
+ * Return: Int.
*/
int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
u32 entity_id);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param cmd - NVM CFG command id
+ * read_nvm_cfg_len(): Read NVM config attribute value.
*
- * @return config id length, 0 on error.
+ * @cdev: Qed dev pointer.
+ * @cmd: NVM CFG command id.
+ *
+ * Return: config id length, 0 on error.
*/
int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
/**
- * @brief set_grc_config - Configure value for grc config id.
- * @param cdev
- * @param cfg_id - grc config id
- * @param val - grc config value
+ * set_grc_config(): Configure value for grc config id.
+ *
+ * @cdev: Qed dev pointer.
+ * @cfg_id: grc config id
+ * @val: grc config value
*
+ * Return: Int.
*/
int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
+
+ struct devlink* (*devlink_register)(struct qed_dev *cdev);
+
+ void (*devlink_unregister)(struct devlink *devlink);
+
+ __printf(2, 3) void (*mfw_report)(struct qed_dev *cdev, char *fmt, ...);
+
+ int (*get_sb_info)(struct qed_dev *cdev, struct qed_sb_info *sb,
+ u16 qid, struct qed_sb_info_dbg *sb_dbg);
+
+ int (*get_esl_status)(struct qed_dev *cdev, bool *esl_active);
};
#define MASK_FIELD(_name, _value) \
@@ -1357,7 +1427,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
u16 rc = 0;
prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
- STATUS_BLOCK_E4_PROD_INDEX_MASK;
+ STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= QED_SB_IDX;
@@ -1368,18 +1438,16 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
}
/**
+ * qed_sb_ack(): This function creates an update command for interrupts
+ * that is written to the IGU.
*
- * @brief This function creates an update command for interrupts that is
- * written to the IGU.
- *
- * @param sb_info - This is the structure allocated and
- * initialized per status block. Assumption is
- * that it was initialized using qed_sb_init
- * @param int_cmd - Enable/Disable/Nop
- * @param upd_flg - whether igu consumer should be
- * updated.
+ * @sb_info: This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using qed_sb_init
+ * @int_cmd: Enable/Disable/Nop
+ * @upd_flg: Whether igu consumer should be updated.
*
- * @return inline void
+ * Return: inline void.
*/
static inline void qed_sb_ack(struct qed_sb_info *sb_info,
enum igu_int_cmd int_cmd,
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 04180d9af560..fbf7973ae9ba 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -133,7 +133,7 @@ struct qed_iscsi_cb_ops {
* @fill_dev_info: fills iSCSI specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register iscsi operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -152,7 +152,7 @@ struct qed_iscsi_cb_ops {
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @release_conn: release a previously acquired iscsi connection
* @param cdev
* @param handle - the connection handle.
@@ -182,7 +182,7 @@ struct qed_iscsi_cb_ops {
* @param stats - pointer to struck that would be filled
* we stats
* @return 0 on success, error otherwise.
- * @change_mac Change MAC of interface
+ * @change_mac: Change MAC of interface
* @param cdev
* @param handle - the connection handle.
* @param mac - new MAC to configure.
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 2f64ed79cee9..5b67cd03276e 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -12,14 +12,13 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/qed/qed_if.h>
enum qed_ll2_conn_type {
QED_LL2_TYPE_FCOE,
- QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TCP_ULP,
QED_LL2_TYPE_TEST,
QED_LL2_TYPE_OOO,
QED_LL2_TYPE_RESERVED2,
@@ -209,57 +208,57 @@ enum qed_ll2_xmit_flags {
struct qed_ll2_ops {
/**
- * @brief start - initializes ll2
+ * start(): Initializes ll2.
*
- * @param cdev
- * @param params - protocol driver configuration for the ll2.
+ * @cdev: Qed dev pointer.
+ * @params: Protocol driver configuration for the ll2.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
/**
- * @brief stop - stops the ll2
+ * stop(): Stops the ll2
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*stop)(struct qed_dev *cdev);
/**
- * @brief start_xmit - transmits an skb over the ll2 interface
+ * start_xmit(): Transmits an skb over the ll2 interface
*
- * @param cdev
- * @param skb
- * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags.
+ * @cdev: Qed dev pointer.
+ * @skb: SKB.
+ * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
unsigned long xmit_flags);
/**
- * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * register_cb_ops(): Protocol driver register the callback for Rx/Tx
* packets. Should be called before `start'.
*
- * @param cdev
- * @param cookie - to be passed to the callback functions.
- * @param ops - the callback functions to register for Rx / Tx.
+ * @cdev: Qed dev pointer.
+ * @cookie: to be passed to the callback functions.
+ * @ops: the callback functions to register for Rx / Tx.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
void (*register_cb_ops)(struct qed_dev *cdev,
const struct qed_ll2_cb_ops *ops,
void *cookie);
/**
- * @brief get LL2 related statistics
+ * get_stats(): Get LL2 related statistics.
*
- * @param cdev
- * @param stats - pointer to struct that would be filled with stats
+ * @cdev: Qed dev pointer.
+ * @stats: Pointer to struct that would be filled with stats.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
};
diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h
new file mode 100644
index 000000000000..bbfbfba51f37
--- /dev/null
+++ b/include/linux/qed/qed_nvmetcp_if.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_IF_H
+#define _QED_NVMETCP_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+
+#define QED_NVMETCP_MAX_IO_SIZE 0x800000
+#define QED_NVMETCP_CMN_HDR_SIZE (sizeof(struct nvme_tcp_hdr))
+#define QED_NVMETCP_CMD_HDR_SIZE (sizeof(struct nvme_tcp_cmd_pdu))
+#define QED_NVMETCP_NON_IO_HDR_SIZE ((QED_NVMETCP_CMN_HDR_SIZE + 16))
+
+typedef int (*nvmetcp_event_cb_t) (void *context,
+ u8 fw_event_code, void *fw_handle);
+
+struct qed_dev_nvmetcp_info {
+ struct qed_dev_info common;
+ u8 port_id; /* Physical port */
+ u8 num_cqs;
+};
+
+#define MAX_TID_BLOCKS_NVMETCP (512)
+struct qed_nvmetcp_tid {
+ u32 size; /* In bytes per task */
+ u32 num_tids_per_block;
+ u8 *blocks[MAX_TID_BLOCKS_NVMETCP];
+};
+
+struct qed_nvmetcp_id_params {
+ u8 mac[ETH_ALEN];
+ u32 ip[4];
+ u16 port;
+};
+
+struct qed_nvmetcp_params_offload {
+ /* FW initializations */
+ dma_addr_t sq_pbl_addr;
+ dma_addr_t nvmetcp_cccid_itid_table_addr;
+ u16 nvmetcp_cccid_max_range;
+ u8 default_cq;
+
+ /* Networking and TCP stack initializations */
+ struct qed_nvmetcp_id_params src;
+ struct qed_nvmetcp_id_params dst;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u32 cwnd;
+ u16 mss;
+ u16 vlan_id;
+ bool timestamp_en;
+ bool delayed_ack_en;
+ bool tcp_keep_alive_en;
+ bool ecn_en;
+ u8 ip_version;
+ u8 ka_max_probe_cnt;
+ u8 ttl;
+ u8 tos_or_tc;
+ u8 rcv_wnd_scale;
+};
+
+struct qed_nvmetcp_params_update {
+ u32 max_io_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+
+ /* Placeholder: pfv, cpda, hpda */
+
+ bool hdr_digest_en;
+ bool data_digest_en;
+};
+
+struct qed_nvmetcp_cb_ops {
+ struct qed_common_cb_ops common;
+};
+
+struct nvmetcp_sge {
+ struct regpair sge_addr; /* SGE address */
+ __le32 sge_len; /* SGE length */
+ __le32 reserved;
+};
+
+/* IO path HSI function SGL params */
+struct storage_sgl_task_params {
+ struct nvmetcp_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+ bool small_mid_sge;
+};
+
+/* IO path HSI function FW task context params */
+struct nvmetcp_task_params {
+ void *context; /* Output parameter - set/filled by the HSI function */
+ struct nvmetcp_wqe *sqe;
+ u32 tx_io_size; /* in bytes (Without DIF, if exists) */
+ u32 rx_io_size; /* in bytes (Without DIF, if exists) */
+ u16 conn_icid;
+ u16 itid;
+ struct regpair opq; /* qedn_task_ctx address */
+ u16 host_cccid;
+ u8 cq_rss_number;
+ bool send_write_incapsule;
+};
+
+/**
+ * struct qed_nvmetcp_ops - qed NVMeTCP operations.
+ * @common: common operations pointer
+ * @ll2: light L2 operations pointer
+ * @fill_dev_info: fills NVMeTCP specific information
+ * @param cdev
+ * @param info
+ * @return 0 on success, otherwise error value.
+ * @register_ops: register nvmetcp operations
+ * @param cdev
+ * @param ops - specified using qed_nvmetcp_cb_ops
+ * @param cookie - driver private
+ * @start: nvmetcp in FW
+ * @param cdev
+ * @param tasks - qed will fill information about tasks
+ * return 0 on success, otherwise error value.
+ * @stop: nvmetcp in FW
+ * @param cdev
+ * return 0 on success, otherwise error value.
+ * @acquire_conn: acquire a new nvmetcp connection
+ * @param cdev
+ * @param handle - qed will fill handle that should be
+ * used henceforth as identifier of the
+ * connection.
+ * @param p_doorbell - qed will fill the address of the
+ * doorbell.
+ * @return 0 on success, otherwise error value.
+ * @release_conn: release a previously acquired nvmetcp connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @offload_conn: configures an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @update_conn: updates an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @destroy_conn: stops an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @clear_sq: clear all task in sq
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @add_src_tcp_port_filter: Add source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @remove_src_tcp_port_filter: Remove source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @add_dst_tcp_port_filter: Add destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @remove_dst_tcp_port_filter: Remove destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @clear_all_filters: Clear all filters.
+ * @param cdev
+ * @init_read_io: Init read IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_write_io: Init write IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_icreq_exchange: Exchange ICReq.
+ * @task_params
+ * @init_conn_req_pdu_hdr
+ * @tx_sgl_task_params
+ * @rx_sgl_task_params
+ * @init_task_cleanup: Init task cleanup.
+ * @task_params
+ */
+struct qed_nvmetcp_ops {
+ const struct qed_common_ops *common;
+
+ const struct qed_ll2_ops *ll2;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_nvmetcp_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_nvmetcp_cb_ops *ops, void *cookie);
+
+ int (*start)(struct qed_dev *cdev,
+ struct qed_nvmetcp_tid *tasks,
+ void *event_context, nvmetcp_event_cb_t async_event_cb);
+
+ int (*stop)(struct qed_dev *cdev);
+
+ int (*acquire_conn)(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell);
+
+ int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+ int (*offload_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_offload *conn_info);
+
+ int (*update_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_update *conn_info);
+
+ int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
+
+ int (*clear_sq)(struct qed_dev *cdev, u32 handle);
+
+ int (*add_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ void (*remove_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ int (*add_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*remove_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*clear_all_filters)(struct qed_dev *cdev);
+
+ void (*init_read_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_write_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_icreq_exchange)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params);
+
+ void (*init_task_cleanup)(struct nvmetcp_task_params *task_params);
+};
+
+const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void);
+void qed_put_nvmetcp_ops(void);
+#endif
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index f464d85e88a4..3b76c07fbcf8 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -242,10 +242,8 @@ struct qed_rdma_register_tid_in_params {
bool pbl_two_level;
u8 pbl_page_size_log;
u8 page_size_log;
- u32 fbo;
u64 length;
u64 vaddr;
- bool zbva;
bool phy_mr;
bool dma_mr;
@@ -664,7 +662,8 @@ struct qed_rdma_ops {
u8 connection_handle,
struct qed_ll2_stats *p_stats);
int (*ll2_set_mac_filter)(struct qed_dev *cdev,
- u8 *old_mac_address, u8 *new_mac_address);
+ u8 *old_mac_address,
+ const u8 *new_mac_address);
int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset);
diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h
index 072da2f6da37..0d5564a59a59 100644
--- a/include/linux/qed/qede_rdma.h
+++ b/include/linux/qed/qede_rdma.h
@@ -20,7 +20,8 @@ enum qede_rdma_event {
QEDE_UP,
QEDE_DOWN,
QEDE_CHANGE_ADDR,
- QEDE_CLOSE
+ QEDE_CLOSE,
+ QEDE_CHANGE_MTU,
};
struct qede_rdma_event_work {
@@ -54,6 +55,7 @@ void qede_rdma_dev_event_open(struct qede_dev *dev);
void qede_rdma_dev_event_close(struct qede_dev *dev);
void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery);
void qede_rdma_event_changeaddr(struct qede_dev *edr);
+void qede_rdma_event_change_mtu(struct qede_dev *edev);
#else
static inline int qede_rdma_dev_add(struct qede_dev *dev,
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index bab078b25834..6dfed163ab6c 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -27,6 +27,7 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_MAX_XRC_SRQS (1024)
#define RDMA_MAX_SRQS (32 * 1024)
+#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 27aab84fcbaa..07071e64abf3 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -91,7 +91,7 @@ extern bool qid_valid(struct kqid qid);
*
* When there is no mapping defined for the user-namespace, type,
* qid tuple an invalid kqid is returned. Callers are expected to
- * test for and handle handle invalid kqids being returned.
+ * test for and handle invalid kqids being returned.
* Invalid kqids may be tested for using qid_valid().
*/
static inline struct kqid make_kqid(struct user_namespace *from,
@@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type)
#define DQ_FAKE_B 3 /* no limits only usage */
#define DQ_READ_B 4 /* dquot was read into memory */
#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */
-#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\
+#define DQ_RELEASING_B 6 /* dquot is in releasing_dquots list waiting
+ * to be cleaned up */
+#define DQ_LASTSET_B 7 /* Following 6 bits (see QIF_) are reserved\
* for the mask of entries set via SETQUOTA\
* quotactl. They are set under dq_data_lock\
* and the quota format handling dquot can\
@@ -448,17 +450,18 @@ struct quota_format_type {
};
/**
- * Quota state flags - they actually come in two flavors - for users and groups.
+ * Quota state flags - they come in three flavors - for users, groups and projects.
*
* Actual typed flags layout:
- * USRQUOTA GRPQUOTA
- * DQUOT_USAGE_ENABLED 0x0001 0x0002
- * DQUOT_LIMITS_ENABLED 0x0004 0x0008
- * DQUOT_SUSPENDED 0x0010 0x0020
+ * USRQUOTA GRPQUOTA PRJQUOTA
+ * DQUOT_USAGE_ENABLED 0x0001 0x0002 0x0004
+ * DQUOT_LIMITS_ENABLED 0x0008 0x0010 0x0020
+ * DQUOT_SUSPENDED 0x0040 0x0080 0x0100
*
* Following bits are used for non-typed flags:
- * DQUOT_QUOTA_SYS_FILE 0x0040
- * DQUOT_NEGATIVE_USAGE 0x0080
+ * DQUOT_QUOTA_SYS_FILE 0x0200
+ * DQUOT_NEGATIVE_USAGE 0x0400
+ * DQUOT_NOLIST_DIRTY 0x0800
*/
enum {
_DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 9cf0cd3dc88c..06cc8888199e 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -20,16 +20,14 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
}
/* i_mutex must being held */
-static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+static inline bool is_quota_modification(struct mnt_idmap *idmap,
+ struct inode *inode, struct iattr *ia)
{
- return (ia->ia_valid & ATTR_SIZE) ||
- (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
- (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
+ return ((ia->ia_valid & ATTR_SIZE) ||
+ i_uid_needs_update(idmap, ia, inode) ||
+ i_gid_needs_update(idmap, ia, inode));
}
-int kernel_quotactl(unsigned int cmd, const char __user *special,
- qid_t id, void __user *addr);
-
#if defined(CONFIG_QUOTA)
#define quota_error(sb, fmt, args...) \
@@ -59,7 +57,7 @@ static inline bool dquot_is_busy(struct dquot *dquot)
{
if (test_bit(DQ_MOD_B, &dquot->dq_flags))
return true;
- if (atomic_read(&dquot->dq_count) > 1)
+ if (atomic_read(&dquot->dq_count) > 0)
return true;
return false;
}
@@ -76,7 +74,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
int dquot_alloc_inode(struct inode *inode);
-int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
+void dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
void dquot_free_inode(struct inode *inode);
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
@@ -118,7 +116,8 @@ int dquot_set_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
-int dquot_transfer(struct inode *inode, struct iattr *iattr);
+int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
+ struct iattr *iattr);
static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
{
@@ -237,7 +236,8 @@ static inline void dquot_free_inode(struct inode *inode)
{
}
-static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
+static inline int dquot_transfer(struct mnt_idmap *idmap,
+ struct inode *inode, struct iattr *iattr)
{
return 0;
}
@@ -257,10 +257,9 @@ static inline void __dquot_free_space(struct inode *inode, qsize_t number,
inode_sub_bytes(inode, number);
}
-static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+static inline void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
inode_add_bytes(inode, number);
- return 0;
}
static inline int dquot_reclaim_space_nodirty(struct inode *inode,
@@ -358,14 +357,10 @@ static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE);
}
-static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
+static inline void dquot_claim_block(struct inode *inode, qsize_t nr)
{
- int ret;
-
- ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
- if (!ret)
- mark_inode_dirty_sync(inode);
- return ret;
+ dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
+ mark_inode_dirty_sync(inode);
}
static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index c2a9f7c90727..eae67015ce51 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -9,8 +9,11 @@
#define _LINUX_RADIX_TREE_H
#include <linux/bitops.h>
-#include <linux/kernel.h>
+#include <linux/gfp_types.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/math.h>
+#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
@@ -376,7 +379,7 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
* radix_tree_next_slot - find next slot in chunk
*
* @slot: pointer to current slot
- * @iter: pointer to interator state
+ * @iter: pointer to iterator state
* @flags: RADIX_TREE_ITER_*, should be constant
* Returns: pointer to next slot, or NULL if there no more left
*
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 154e954b711d..98030accf641 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -10,17 +10,9 @@
#ifdef __KERNEL__
-/* Set to 1 to use kernel-wide empty_zero_page */
-#define RAID6_USE_EMPTY_ZERO_PAGE 0
#include <linux/blkdev.h>
-/* We need a pre-zeroed page... if we don't want to use the kernel-provided
- one define it here */
-#if RAID6_USE_EMPTY_ZERO_PAGE
-# define raid6_empty_zero_page empty_zero_page
-#else
extern const char raid6_empty_zero_page[PAGE_SIZE];
-#endif
#else /* ! __KERNEL__ */
/* Used for testing in user space */
@@ -81,7 +73,7 @@ struct raid6_calls {
void (*xor_syndrome)(int, int, int, size_t, void **);
int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */
- int prefer; /* Has special performance attribute */
+ int priority; /* Relative priority ranking if non-zero */
};
/* Selected algorithm */
@@ -92,8 +84,6 @@ extern const struct raid6_calls raid6_intx1;
extern const struct raid6_calls raid6_intx2;
extern const struct raid6_calls raid6_intx4;
extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_intx16;
-extern const struct raid6_calls raid6_intx32;
extern const struct raid6_calls raid6_mmxx1;
extern const struct raid6_calls raid6_mmxx2;
extern const struct raid6_calls raid6_sse1x1;
@@ -116,6 +106,8 @@ extern const struct raid6_calls raid6_vpermxor1;
extern const struct raid6_calls raid6_vpermxor2;
extern const struct raid6_calls raid6_vpermxor4;
extern const struct raid6_calls raid6_vpermxor8;
+extern const struct raid6_calls raid6_lsx;
+extern const struct raid6_calls raid6_lasx;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -131,6 +123,8 @@ extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_recov_calls raid6_recov_neon;
+extern const struct raid6_recov_calls raid6_recov_lsx;
+extern const struct raid6_recov_calls raid6_recov_lasx;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 2a9fee8ddae3..51b811b62322 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -11,13 +11,20 @@ struct xor_block_template {
struct xor_block_template *next;
const char *name;
int speed;
- void (*do_2)(unsigned long, unsigned long *, unsigned long *);
- void (*do_3)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
- void (*do_4)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
- void (*do_5)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+ void (*do_2)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_3)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_4)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_5)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
};
#endif
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
index 5cdfcb873a8f..e50416ba9cd9 100644
--- a/include/linux/raid_class.h
+++ b/include/linux/raid_class.h
@@ -11,7 +11,7 @@ struct raid_template {
};
struct raid_function_template {
- void *cookie;
+ const void *cookie;
int (*is_raid)(struct device *);
void (*get_resync)(struct device *);
void (*get_state)(struct device *);
@@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
struct raid_template *raid_class_attach(struct raid_function_template *);
void raid_class_release(struct raid_template *);
-
-int __must_check raid_component_add(struct raid_template *, struct device *,
- struct device *);
-
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 917528d102c4..d506dc63dd47 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -7,6 +7,7 @@
struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev);
extern int ramfs_init_fs_context(struct fs_context *fc);
+extern void ramfs_kill_sb(struct super_block *sb);
#ifdef CONFIG_MMU
static inline int
diff --git a/include/linux/random.h b/include/linux/random.h
index f45b8be3e3c4..b0a940af4fff 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -1,60 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * include/linux/random.h
- *
- * Include file for the random number generator.
- */
+
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/once.h>
#include <uapi/linux/random.h>
-struct random_ready_callback {
- struct list_head list;
- void (*func)(struct random_ready_callback *rdy);
- struct module *owner;
-};
+struct notifier_block;
-extern void add_device_randomness(const void *, unsigned int);
-extern void add_bootloader_randomness(const void *, unsigned int);
+void add_device_randomness(const void *buf, size_t len);
+void __init add_bootloader_randomness(const void *buf, size_t len);
+void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value) __latent_entropy;
+void add_interrupt_randomness(int irq) __latent_entropy;
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
-#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
{
- add_device_randomness((const void *)&latent_entropy,
- sizeof(latent_entropy));
-}
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
#else
-static inline void add_latent_entropy(void) {}
+ add_device_randomness(NULL, 0);
#endif
+}
-extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value) __latent_entropy;
-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
-
-extern void get_random_bytes(void *buf, int nbytes);
-extern int wait_for_random_bytes(void);
-extern int __init rand_initialize(void);
-extern bool rng_is_initialized(void);
-extern int add_random_ready_callback(struct random_ready_callback *rdy);
-extern void del_random_ready_callback(struct random_ready_callback *rdy);
-extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
-
-#ifndef MODULE
-extern const struct file_operations random_fops, urandom_fops;
+#if IS_ENABLED(CONFIG_VMGENID)
+void add_vmfork_randomness(const void *unique_vm_id, size_t len);
+int register_random_vmfork_notifier(struct notifier_block *nb);
+int unregister_random_vmfork_notifier(struct notifier_block *nb);
+#else
+static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
+static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
#endif
+void get_random_bytes(void *buf, size_t len);
+u8 get_random_u8(void);
+u16 get_random_u16(void);
u32 get_random_u32(void);
u64 get_random_u64(void);
-static inline unsigned int get_random_int(void)
-{
- return get_random_u32();
-}
static inline unsigned long get_random_long(void)
{
#if BITS_PER_LONG == 64
@@ -64,52 +50,101 @@ static inline unsigned long get_random_long(void)
#endif
}
+u32 __get_random_u32_below(u32 ceil);
+
/*
- * On 64-bit architectures, protect against non-terminated C string overflows
- * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ * Returns a random integer in the interval [0, ceil), with uniform
+ * distribution, suitable for all uses. Fastest when ceil is a constant, but
+ * still fast for variable ceil as well.
*/
-#ifdef CONFIG_64BIT
-# ifdef __LITTLE_ENDIAN
-# define CANARY_MASK 0xffffffffffffff00UL
-# else /* big endian, 64 bits: */
-# define CANARY_MASK 0x00ffffffffffffffUL
-# endif
-#else /* 32 bits: */
-# define CANARY_MASK 0xffffffffUL
-#endif
+static inline u32 get_random_u32_below(u32 ceil)
+{
+ if (!__builtin_constant_p(ceil))
+ return __get_random_u32_below(ceil);
+
+ /*
+ * For the fast path, below, all operations on ceil are precomputed by
+ * the compiler, so this incurs no overhead for checking pow2, doing
+ * divisions, or branching based on integer size. The resultant
+ * algorithm does traditional reciprocal multiplication (typically
+ * optimized by the compiler into shifts and adds), rejecting samples
+ * whose lower half would indicate a range indivisible by ceil.
+ */
+ BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
+ if (ceil <= 1)
+ return 0;
+ for (;;) {
+ if (ceil <= 1U << 8) {
+ u32 mult = ceil * get_random_u8();
+ if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
+ return mult >> 8;
+ } else if (ceil <= 1U << 16) {
+ u32 mult = ceil * get_random_u16();
+ if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
+ return mult >> 16;
+ } else {
+ u64 mult = (u64)ceil * get_random_u32();
+ if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
+ return mult >> 32;
+ }
+ }
+}
-static inline unsigned long get_random_canary(void)
+/*
+ * Returns a random integer in the interval (floor, U32_MAX], with uniform
+ * distribution, suitable for all uses. Fastest when floor is a constant, but
+ * still fast for variable floor as well.
+ */
+static inline u32 get_random_u32_above(u32 floor)
{
- unsigned long val = get_random_long();
+ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
+ "get_random_u32_above() must take floor < U32_MAX");
+ return floor + 1 + get_random_u32_below(U32_MAX - floor);
+}
- return val & CANARY_MASK;
+/*
+ * Returns a random integer in the interval [floor, ceil], with uniform
+ * distribution, suitable for all uses. Fastest when floor and ceil are
+ * constant, but still fast for variable floor and ceil as well.
+ */
+static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
+{
+ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
+ (floor > ceil || ceil - floor == U32_MAX),
+ "get_random_u32_inclusive() must take floor <= ceil");
+ return floor + get_random_u32_below(ceil - floor + 1);
}
+void __init random_init_early(const char *command_line);
+void __init random_init(void);
+bool rng_is_initialized(void);
+int wait_for_random_bytes(void);
+int execute_with_initialized_rng(struct notifier_block *nb);
+
/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
* Returns the result of the call to wait_for_random_bytes. */
-static inline int get_random_bytes_wait(void *buf, int nbytes)
+static inline int get_random_bytes_wait(void *buf, size_t nbytes)
{
int ret = wait_for_random_bytes();
get_random_bytes(buf, nbytes);
return ret;
}
-#define declare_get_random_var_wait(var) \
- static inline int get_random_ ## var ## _wait(var *out) { \
+#define declare_get_random_var_wait(name, ret_type) \
+ static inline int get_random_ ## name ## _wait(ret_type *out) { \
int ret = wait_for_random_bytes(); \
if (unlikely(ret)) \
return ret; \
- *out = get_random_ ## var(); \
+ *out = get_random_ ## name(); \
return 0; \
}
-declare_get_random_var_wait(u32)
-declare_get_random_var_wait(u64)
-declare_get_random_var_wait(int)
-declare_get_random_var_wait(long)
+declare_get_random_var_wait(u8, u8)
+declare_get_random_var_wait(u16, u16)
+declare_get_random_var_wait(u32, u32)
+declare_get_random_var_wait(u64, u32)
+declare_get_random_var_wait(long, unsigned long)
#undef declare_get_random_var
-unsigned long randomize_page(unsigned long start, unsigned long range);
-
/*
* This is designed to be standalone for just prandom
* users, but for now we include it from <linux/random.h>
@@ -117,45 +152,13 @@ unsigned long randomize_page(unsigned long start, unsigned long range);
*/
#include <linux/prandom.h>
-#ifdef CONFIG_ARCH_RANDOM
-# include <asm/archrandom.h>
-#else
-static inline bool __must_check arch_get_random_long(unsigned long *v)
-{
- return false;
-}
-static inline bool __must_check arch_get_random_int(unsigned int *v)
-{
- return false;
-}
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
-{
- return false;
-}
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
-{
- return false;
-}
+#ifdef CONFIG_SMP
+int random_prepare_cpu(unsigned int cpu);
+int random_online_cpu(unsigned int cpu);
#endif
-/*
- * Called from the boot CPU during startup; not valid to call once
- * secondary CPUs are up and preemption is possible.
- */
-#ifndef arch_get_random_seed_long_early
-static inline bool __init arch_get_random_seed_long_early(unsigned long *v)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_seed_long(v);
-}
-#endif
-
-#ifndef arch_get_random_long_early
-static inline bool __init arch_get_random_long_early(unsigned long *v)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_long(v);
-}
+#ifndef MODULE
+extern const struct file_operations random_fops, urandom_fops;
#endif
#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
new file mode 100644
index 000000000000..5d868505a94e
--- /dev/null
+++ b/include/linux/randomize_kstack.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_RANDOMIZE_KSTACK_H
+#define _LINUX_RANDOMIZE_KSTACK_H
+
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <linux/percpu-defs.h>
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
+ randomize_kstack_offset);
+DECLARE_PER_CPU(u32, kstack_offset);
+
+/*
+ * Do not use this anywhere else in the kernel. This is used here because
+ * it provides an arch-agnostic way to grow the stack with correct
+ * alignment. Also, since this use is being explicitly masked to a max of
+ * 10 bits, stack-clash style attacks are unlikely. For more details see
+ * "VLAs" in Documentation/process/deprecated.rst
+ *
+ * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently
+ * only with Clang and not GCC). Initializing the unused area on each syscall
+ * entry is expensive, and generating an implicit call to memset() may also be
+ * problematic (such as in noinstr functions). Therefore, if the compiler
+ * supports it (which it should if it initializes allocas), always use the
+ * "uninitialized" variant of the builtin.
+ */
+#if __has_builtin(__builtin_alloca_uninitialized)
+#define __kstack_alloca __builtin_alloca_uninitialized
+#else
+#define __kstack_alloca __builtin_alloca
+#endif
+
+/*
+ * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
+ * "VLA" from being unbounded (see above). 10 bits leaves enough room for
+ * per-arch offset masks to reduce entropy (by removing higher bits, since
+ * high entropy may overly constrain usable stack space), and for
+ * compiler/arch-specific stack alignment to remove the lower bits.
+ */
+#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
+
+/**
+ * add_random_kstack_offset - Increase stack utilization by previously
+ * chosen random offset
+ *
+ * This should be used in the syscall entry path when interrupts and
+ * preempt are disabled, and after user registers have been stored to
+ * the stack. For testing the resulting entropy, please see:
+ * tools/testing/selftests/lkdtm/stack-entropy.sh
+ */
+#define add_random_kstack_offset() do { \
+ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)) { \
+ u32 offset = raw_cpu_read(kstack_offset); \
+ u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
+ /* Keep allocation even after "ptr" loses scope. */ \
+ asm volatile("" :: "r"(ptr) : "memory"); \
+ } \
+} while (0)
+
+/**
+ * choose_random_kstack_offset - Choose the random offset for the next
+ * add_random_kstack_offset()
+ *
+ * This should only be used during syscall exit when interrupts and
+ * preempt are disabled. This position in the syscall flow is done to
+ * frustrate attacks from userspace attempting to learn the next offset:
+ * - Maximize the timing uncertainty visible from userspace: if the
+ * offset is chosen at syscall entry, userspace has much more control
+ * over the timing between choosing offsets. "How long will we be in
+ * kernel mode?" tends to be more difficult to predict than "how long
+ * will we be in user mode?"
+ * - Reduce the lifetime of the new offset sitting in memory during
+ * kernel mode execution. Exposure of "thread-local" memory content
+ * (e.g. current, percpu, etc) tends to be easier than arbitrary
+ * location memory exposure.
+ */
+#define choose_random_kstack_offset(rand) do { \
+ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)) { \
+ u32 offset = raw_cpu_read(kstack_offset); \
+ offset ^= (rand); \
+ raw_cpu_write(kstack_offset, offset); \
+ } \
+} while (0)
+#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+#define add_random_kstack_offset() do { } while (0)
+#define choose_random_kstack_offset(rand) do { } while (0)
+#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+
+#endif
diff --git a/include/linux/range.h b/include/linux/range.h
index d1fbeb664012..6ad0b73cb7ad 100644
--- a/include/linux/range.h
+++ b/include/linux/range.h
@@ -1,12 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RANGE_H
#define _LINUX_RANGE_H
+#include <linux/types.h>
struct range {
u64 start;
u64 end;
};
+static inline u64 range_len(const struct range *range)
+{
+ return range->end - range->start + 1;
+}
+
+static inline bool range_contains(struct range *r1, struct range *r2)
+{
+ return r1->start <= r2->start && r1->end >= r2->end;
+}
+
int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end);
@@ -20,12 +31,4 @@ int clean_sort_range(struct range *range, int az);
void sort_range(struct range *range, int nr_range);
-#define MAX_RESOURCE ((resource_size_t)~0)
-static inline resource_size_t cap_resource(u64 val)
-{
- if (val > MAX_RESOURCE)
- return MAX_RESOURCE;
-
- return val;
-}
#endif
diff --git a/include/linux/ras.h b/include/linux/ras.h
index 1f4048bf2674..a64182bc72ad 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -25,6 +25,7 @@ void log_non_standard_event(const guid_t *sec_type,
const guid_t *fru_id, const char *fru_text,
const u8 sev, const u8 *err, const u32 len);
void log_arm_hw_error(struct cper_sec_proc_arm *err);
+
#else
static inline void
log_non_standard_event(const guid_t *sec_type,
@@ -35,4 +36,21 @@ static inline void
log_arm_hw_error(struct cper_sec_proc_arm *err) { return; }
#endif
+struct atl_err {
+ u64 addr;
+ u64 ipid;
+ u32 cpu;
+};
+
+#if IS_ENABLED(CONFIG_AMD_ATL)
+void amd_atl_register_decoder(unsigned long (*f)(struct atl_err *));
+void amd_atl_unregister_decoder(void);
+void amd_retire_dram_row(struct atl_err *err);
+unsigned long amd_convert_umc_mca_addr_to_sys_addr(struct atl_err *err);
+#else
+static inline void amd_retire_dram_row(struct atl_err *err) { }
+static inline unsigned long
+amd_convert_umc_mca_addr_to_sys_addr(struct atl_err *err) { return -EINVAL; }
+#endif /* CONFIG_AMD_ATL */
+
#endif /* __RAS_H__ */
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index b676aa419eef..002266693e50 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -4,7 +4,7 @@
#include <linux/bits.h>
#include <linux/param.h>
-#include <linux/spinlock_types.h>
+#include <linux/spinlock_types_raw.h>
#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
#define DEFAULT_RATELIMIT_BURST 10
@@ -23,12 +23,16 @@ struct ratelimit_state {
unsigned long flags;
};
-#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .interval = interval_init, \
- .burst = burst_init, \
+#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .interval = interval_init, \
+ .burst = burst_init, \
+ .flags = flags_init, \
}
+#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+ RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0)
+
#define RATELIMIT_STATE_INIT_DISABLED \
RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index d7db17996322..f7edca369eda 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -17,24 +17,14 @@
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/rbtree_types.h>
+
#include <linux/stddef.h>
#include <linux/rcupdate.h>
-struct rb_node {
- unsigned long __rb_parent_color;
- struct rb_node *rb_right;
- struct rb_node *rb_left;
-} __attribute__((aligned(sizeof(long))));
- /* The alignment might seem pointless, but allegedly CRIS needs it */
-
-struct rb_root {
- struct rb_node *rb_node;
-};
-
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
-#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -112,23 +102,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
typeof(*pos), field); 1; }); \
pos = n)
-/*
- * Leftmost-cached rbtrees.
- *
- * We do not cache the rightmost node based on footprint
- * size vs number of potential users that could benefit
- * from O(1) rb_last(). Just not worth it, users that want
- * this feature can always implement the logic explicitly.
- * Furthermore, users that want to cache both pointers may
- * find it a bit asymmetric, but that's ok.
- */
-struct rb_root_cached {
- struct rb_root rb_root;
- struct rb_node *rb_leftmost;
-};
-
-#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
-
/* Same as rb_first(), but O(1) */
#define rb_first_cached(root) (root)->rb_leftmost
@@ -141,12 +114,18 @@ static inline void rb_insert_color_cached(struct rb_node *node,
rb_insert_color(node, &root->rb_root);
}
-static inline void rb_erase_cached(struct rb_node *node,
- struct rb_root_cached *root)
+
+static inline struct rb_node *
+rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
{
+ struct rb_node *leftmost = NULL;
+
if (root->rb_leftmost == node)
- root->rb_leftmost = rb_next(node);
+ leftmost = root->rb_leftmost = rb_next(node);
+
rb_erase(node, &root->rb_root);
+
+ return leftmost;
}
static inline void rb_replace_node_cached(struct rb_node *victim,
@@ -158,4 +137,198 @@ static inline void rb_replace_node_cached(struct rb_node *victim,
rb_replace_node(victim, new, &root->rb_root);
}
+/*
+ * The below helper functions use 2 operators with 3 different
+ * calling conventions. The operators are related like:
+ *
+ * comp(a->key,b) < 0 := less(a,b)
+ * comp(a->key,b) > 0 := less(b,a)
+ * comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
+ *
+ * If these operators define a partial order on the elements we make no
+ * guarantee on which of the elements matching the key is found. See
+ * rb_find().
+ *
+ * The reason for this is to allow the find() interface without requiring an
+ * on-stack dummy object, which might not be feasible due to object size.
+ */
+
+/**
+ * rb_add_cached() - insert @node into the leftmost cached tree @tree
+ * @node: node to insert
+ * @tree: leftmost cached tree to insert @node into
+ * @less: operator defining the (partial) node order
+ *
+ * Returns @node when it is the new leftmost, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color_cached(node, tree, leftmost);
+
+ return leftmost ? node : NULL;
+}
+
+/**
+ * rb_add() - insert @node into @tree
+ * @node: node to insert
+ * @tree: tree to insert @node into
+ * @less: operator defining the (partial) node order
+ */
+static __always_inline void
+rb_add(struct rb_node *node, struct rb_root *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent))
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+}
+
+/**
+ * rb_find_add() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add(struct rb_node *node, struct rb_root *tree,
+ int (*cmp)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0)
+ link = &parent->rb_left;
+ else if (c > 0)
+ link = &parent->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+ return NULL;
+}
+
+/**
+ * rb_find() - find @key in tree @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @key or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c < 0)
+ node = node->rb_left;
+ else if (c > 0)
+ node = node->rb_right;
+ else
+ return node;
+ }
+
+ return NULL;
+}
+
+/**
+ * rb_find_first() - find the first @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the leftmost node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find_first(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+ struct rb_node *match = NULL;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c <= 0) {
+ if (!c)
+ match = node;
+ node = node->rb_left;
+ } else if (c > 0) {
+ node = node->rb_right;
+ }
+ }
+
+ return match;
+}
+
+/**
+ * rb_next_match() - find the next @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the next node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_next_match(const void *key, struct rb_node *node,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ node = rb_next(node);
+ if (node && cmp(key, node))
+ node = NULL;
+ return node;
+}
+
+/**
+ * rb_for_each() - iterates a subtree matching @key
+ * @node: iterator
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ */
+#define rb_for_each(node, key, tree, cmp) \
+ for ((node) = rb_find_first((key), (tree), (cmp)); \
+ (node); (node) = rb_next_match((key), (node), (cmp)))
+
#endif /* _LINUX_RBTREE_H */
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index d1c53e9d8c75..6dbc5a1bf6a8 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -60,6 +60,32 @@ rb_insert_augmented_cached(struct rb_node *node,
rb_insert_augmented(node, &root->rb_root, augment);
}
+static __always_inline struct rb_node *
+rb_add_augmented_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *),
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ augment->propagate(parent, NULL); /* suboptimal */
+ rb_insert_augmented_cached(node, tree, leftmost, augment);
+
+ return leftmost ? node : NULL;
+}
+
/*
* Template for declaring augmented rbtree callbacks (generic case)
*
@@ -156,13 +182,13 @@ RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
{
- rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
+ rb->__rb_parent_color = rb_color(rb) + (unsigned long)p;
}
static inline void rb_set_parent_color(struct rb_node *rb,
struct rb_node *p, int color)
{
- rb->__rb_parent_color = (unsigned long)p | color;
+ rb->__rb_parent_color = (unsigned long)p + color;
}
static inline void
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index 7d012faa509a..6a0999c26c7c 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -42,8 +42,8 @@ struct latch_tree_node {
};
struct latch_tree_root {
- seqcount_t seq;
- struct rb_root tree[2];
+ seqcount_latch_t seq;
+ struct rb_root tree[2];
};
/**
@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
do {
seq = raw_read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
- } while (read_seqcount_retry(&root->seq, seq));
+ } while (raw_read_seqcount_latch_retry(&root->seq, seq));
return node;
}
diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h
new file mode 100644
index 000000000000..45b6ecde3665
--- /dev/null
+++ b/include/linux/rbtree_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_RBTREE_TYPES_H
+#define _LINUX_RBTREE_TYPES_H
+
+struct rb_node {
+ unsigned long __rb_parent_color;
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+/* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+ struct rb_node *rb_node;
+};
+
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
+
+#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+#endif
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h
index b8e094b125ee..78feb8ba7358 100644
--- a/include/linux/rcu_node_tree.h
+++ b/include/linux/rcu_node_tree.h
@@ -20,6 +20,8 @@
#ifndef __LINUX_RCU_NODE_TREE_H
#define __LINUX_RCU_NODE_TREE_H
+#include <linux/math.h>
+
/*
* Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
* CONFIG_RCU_FANOUT_LEAF.
diff --git a/include/linux/rcu_notifier.h b/include/linux/rcu_notifier.h
new file mode 100644
index 000000000000..5640f024773b
--- /dev/null
+++ b/include/linux/rcu_notifier.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Read-Copy Update notifiers, initially RCU CPU stall notifier.
+ * Separate from rcupdate.h to avoid #include loops.
+ *
+ * Copyright (C) 2023 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_RCU_NOTIFIER_H
+#define __LINUX_RCU_NOTIFIER_H
+
+// Actions for RCU CPU stall notifier calls.
+#define RCU_STALL_NOTIFY_NORM 1
+#define RCU_STALL_NOTIFY_EXP 2
+
+#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+int rcu_stall_chain_notifier_register(struct notifier_block *n);
+int rcu_stall_chain_notifier_unregister(struct notifier_block *n);
+
+#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
+
+// No RCU CPU stall warnings in Tiny RCU.
+static inline int rcu_stall_chain_notifier_register(struct notifier_block *n) { return -EEXIST; }
+static inline int rcu_stall_chain_notifier_unregister(struct notifier_block *n) { return -ENOENT; }
+
+#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
+
+#endif /* __LINUX_RCU_NOTIFIER_H */
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index b36afe7b22c9..659d13a7ddaa 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -63,6 +63,146 @@ struct rcu_cblist {
#define RCU_NEXT_TAIL 3
#define RCU_CBLIST_NSEGS 4
+
+/*
+ * ==NOCB Offloading state machine==
+ *
+ *
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, without holding nocb_lock. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
+ * | allowing nocb_timer to be armed. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * -----------------------------------
+ * | |
+ * v v
+ * --------------------------------------- ----------------------------------|
+ * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
+ * | | | |
+ * | | | |
+ * | CB kthread woke up and | | GP kthread woke up and |
+ * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
+ * | Processes callbacks concurrently | | |
+ * | with rcu_core(), holding | | |
+ * | nocb_lock. | | |
+ * --------------------------------------- -----------------------------------
+ * | |
+ * -----------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_GP | |
+ * | SEGCBLIST_KTHREAD_CB |
+ * | |
+ * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
+ * | handling callbacks. Enable bypass queueing. |
+ * ----------------------------------------------------------------------------
+ */
+
+
+
+/*
+ * ==NOCB De-Offloading state machine==
+ *
+ *
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
+ * | ignores callbacks. Bypass enqueue is enabled. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
+ * | handles callbacks concurrently. Bypass enqueue is enabled. |
+ * | Invoke RCU core so we make sure not to preempt it in the middle with |
+ * | leaving some urgent work unattended within a jiffy. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
+ * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
+ * | bypass enqueue. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * -----------------------------------
+ * | |
+ * v v
+ * ---------------------------------------------------------------------------|
+ * | | |
+ * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
+ * | | |
+ * | GP kthread woke up and | CB kthread woke up and |
+ * | acknowledged the fact that | acknowledged the fact that |
+ * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
+ * | | The CB kthread goes to sleep |
+ * | The callbacks from the target CPU | until it ever gets re-offloaded. |
+ * | will be ignored from the GP kthread | |
+ * | loop. | |
+ * ----------------------------------------------------------------------------
+ * | |
+ * -----------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
+ * | Flush pending nocb_timer. Flush nocb bypass callbacks. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, without holding nocb_lock. |
+ * ----------------------------------------------------------------------------
+ */
+#define SEGCBLIST_ENABLED BIT(0)
+#define SEGCBLIST_RCU_CORE BIT(1)
+#define SEGCBLIST_LOCKING BIT(2)
+#define SEGCBLIST_KTHREAD_CB BIT(3)
+#define SEGCBLIST_KTHREAD_GP BIT(4)
+#define SEGCBLIST_OFFLOADED BIT(5)
+
struct rcu_segcblist {
struct rcu_head *head;
struct rcu_head **tails[RCU_CBLIST_NSEGS];
@@ -72,8 +212,8 @@ struct rcu_segcblist {
#else
long len;
#endif
- u8 enabled;
- u8 offloaded;
+ long seglen[RCU_CBLIST_NSEGS];
+ u8 flags;
};
#define RCU_SEGCBLIST_INITIALIZER(n) \
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index 0027d4c8087c..3860dbb9107a 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -37,7 +37,6 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
}
extern void rcu_sync_init(struct rcu_sync *);
-extern void rcu_sync_enter_start(struct rcu_sync *);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
extern void rcu_sync_dtor(struct rcu_sync *);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 7a6fc9956510..3dc1e58865f7 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -11,15 +11,6 @@
#include <linux/rcupdate.h>
/*
- * Why is there no list_empty_rcu()? Because list_empty() serves this
- * purpose. The list_empty() function fetches the RCU-protected pointer
- * and compares it to the address of the list head, but neither dereferences
- * this pointer itself nor provides this pointer to the caller. Therefore,
- * it is not necessary to use rcu_dereference(), so that list_empty() can
- * be used anywhere you would want to use a list_empty_rcu().
- */
-
-/*
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
* @list: list to be initialized
*
@@ -63,9 +54,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \
"RCU-list traversed in non-reader section!"); \
})
+
+#define __list_check_srcu(cond) \
+ ({ \
+ RCU_LOCKDEP_WARN(!(cond), \
+ "RCU-list traversed without holding the required lock!");\
+ })
#else
#define __list_check_rcu(dummy, cond, extra...) \
({ check_arg_count_one(extra); })
+
+#define __list_check_srcu(cond) ({ })
#endif
/*
@@ -310,21 +309,29 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
/*
* Where are list_empty_rcu() and list_first_entry_rcu()?
*
- * Implementing those functions following their counterparts list_empty() and
- * list_first_entry() is not advisable because they lead to subtle race
- * conditions as the following snippet shows:
+ * They do not exist because they would lead to subtle race conditions:
*
* if (!list_empty_rcu(mylist)) {
* struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
* do_something(bar);
* }
*
- * The list may not be empty when list_empty_rcu checks it, but it may be when
- * list_first_entry_rcu rereads the ->next pointer.
+ * The list might be non-empty when list_empty_rcu() checks it, but it
+ * might have become empty by the time that list_first_entry_rcu() rereads
+ * the ->next pointer, which would result in a SEGV.
+ *
+ * When not using RCU, it is OK for list_first_entry() to re-read that
+ * pointer because both functions should be protected by some lock that
+ * blocks writers.
*
- * Rereading the ->next pointer is not a problem for list_empty() and
- * list_first_entry() because they would be protected by a lock that blocks
- * writers.
+ * When using RCU, list_empty() uses READ_ONCE() to fetch the
+ * RCU-protected ->next pointer and then compares it to the address of the
+ * list head. However, it neither dereferences this pointer nor provides
+ * this pointer to its caller. Thus, READ_ONCE() suffices (that is,
+ * rcu_dereference() is not needed), which means that list_empty() can be
+ * used anywhere you would want to use list_empty_rcu(). Just don't
+ * expect anything useful to happen if you do a subsequent lockless
+ * call to list_first_entry_rcu()!!!
*
* See list_first_or_null_rcu for an alternative.
*/
@@ -348,7 +355,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
})
/**
- * list_next_or_null_rcu - get the first element from a list
+ * list_next_or_null_rcu - get the next element from a list
* @head: the head for the list.
* @ptr: the list head to take the next element from.
* @type: the type of the struct this is embedded in.
@@ -386,6 +393,25 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
+ * list_for_each_entry_srcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ * @cond: lockdep expression for the lock required to traverse the list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by srcu_read_lock().
+ * The lockdep expression srcu_read_lock_held() can be passed as the
+ * cond argument from read side.
+ */
+#define list_for_each_entry_srcu(pos, head, member, cond) \
+ for (__list_check_srcu(cond), \
+ pos = list_entry_rcu((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
+
+/**
* list_entry_lockless - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
@@ -684,6 +710,27 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
&(pos)->member)), typeof(*(pos)), member))
/**
+ * hlist_for_each_entry_srcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ * @cond: lockdep expression for the lock required to traverse the list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by srcu_read_lock().
+ * The lockdep expression srcu_read_lock_held() can be passed as the
+ * cond argument from read side.
+ */
+#define hlist_for_each_entry_srcu(pos, head, member, cond) \
+ for (__list_check_srcu(cond), \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
+
+/**
* hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index ff3e94779e73..89186c499dd4 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -101,7 +101,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
{
struct hlist_nulls_node *first = h->first;
- n->next = first;
+ WRITE_ONCE(n->next, first);
WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
@@ -137,9 +137,9 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
last = i;
if (last) {
- n->next = last->next;
+ WRITE_ONCE(n->next, last->next);
n->pprev = &last->next;
- rcu_assign_pointer(hlist_next_rcu(last), n);
+ rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
} else {
hlist_nulls_add_head_rcu(n, h);
}
@@ -161,7 +161,7 @@ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
*
* The barrier() is needed to make sure compiler doesn't cache first element [1],
* as this loop can be restarted [2]
- * [1] Documentation/core-api/atomic_ops.rst around line 114
+ * [1] Documentation/memory-barriers.txt around line 1533
* [2] Documentation/RCU/rculist_nulls.rst around line 146
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d15d46db61f7..17d7ed5f3ae6 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -27,12 +27,13 @@
#include <linux/preempt.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
#include <asm/processor.h>
#include <linux/cpumask.h>
+#include <linux/context_tracking_irq.h>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
-#define ulong2long(a) (*(long *)(&(a)))
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
@@ -40,6 +41,31 @@ void rcu_barrier_tasks(void);
void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
+struct rcu_gp_oldstate;
+unsigned long get_completed_synchronize_rcu(void);
+void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+
+// Maximum number of unsigned long values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_OLDSTATE 2
+
+/**
+ * same_state_synchronize_rcu - Are two old-state values identical?
+ * @oldstate1: First old-state value.
+ * @oldstate2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or
+ * get_completed_synchronize_rcu(). Returns @true if the two values are
+ * identical and @false otherwise. This allows structures whose lifetimes
+ * are tracked by old-state values to push these values to a list header,
+ * allowing those structures to be slightly smaller.
+ */
+static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2)
+{
+ return oldstate1 == oldstate2;
+}
+
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
@@ -51,10 +77,16 @@ void __rcu_read_unlock(void);
* nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting)
#else /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TINY_RCU
+#define rcu_read_unlock_strict() do { } while (0)
+#else
+void rcu_read_unlock_strict(void);
+#endif
+
static inline void __rcu_read_lock(void)
{
preempt_disable();
@@ -63,6 +95,8 @@ static inline void __rcu_read_lock(void)
static inline void __rcu_read_unlock(void)
{
preempt_enable();
+ if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
+ rcu_read_unlock_strict();
}
static inline int rcu_preempt_depth(void)
@@ -72,12 +106,25 @@ static inline int rcu_preempt_depth(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_RCU_LAZY
+void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func);
+#else
+static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+#endif
+
/* Internal to kernel */
void rcu_init(void);
-extern int rcu_scheduler_active __read_mostly;
+extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
-void rcu_report_dead(unsigned int cpu);
-void rcutree_migrate_callbacks(int cpu);
+
+#ifdef CONFIG_TASKS_RCU_GENERIC
+void rcu_init_tasks_generic(void);
+#else
+static inline void rcu_init_tasks_generic(void) { }
+#endif
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
@@ -87,45 +134,24 @@ static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
-#ifdef CONFIG_NO_HZ_FULL
-void rcu_user_enter(void);
-void rcu_user_exit(void);
+#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
+void rcu_irq_work_resched(void);
#else
-static inline void rcu_user_enter(void) { }
-static inline void rcu_user_exit(void) { }
-#endif /* CONFIG_NO_HZ_FULL */
+static inline void rcu_irq_work_resched(void) { }
+#endif
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
+int rcu_nocb_cpu_offload(int cpu);
+int rcu_nocb_cpu_deoffload(int cpu);
+void rcu_nocb_flush_deferred_wakeup(void);
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static inline void rcu_init_nohz(void) { }
+static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
+static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
+static inline void rcu_nocb_flush_deferred_wakeup(void) { }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
-/**
- * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
- * @a: Code that RCU needs to pay attention to.
- *
- * RCU read-side critical sections are forbidden in the inner idle loop,
- * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
- * will happily ignore any such read-side critical sections. However,
- * things like powertop need tracepoints in the inner idle loop.
- *
- * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
- * will tell RCU that it needs to pay attention, invoke its argument
- * (in this example, calling the do_something_with_RCU() function),
- * and then tell RCU to go back to ignoring this CPU. It is permissible
- * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is
- * on the order of a million or so, even on 32-bit systems). It is
- * not legal to block within RCU_NONIDLE(), nor is it permissible to
- * transfer control either into or out of RCU_NONIDLE()'s statement.
- */
-#define RCU_NONIDLE(a) \
- do { \
- rcu_irq_enter_irqson(); \
- do { a; } while (0); \
- rcu_irq_exit_irqson(); \
- } while (0)
-
/*
* Note a quasi-voluntary context switch for RCU-tasks's benefit.
* This is a macro rather than an inline function to avoid #include hell.
@@ -146,14 +172,25 @@ void synchronize_rcu_tasks(void);
# define synchronize_rcu_tasks synchronize_rcu
# endif
-# ifdef CONFIG_TASKS_RCU_TRACE
-# define rcu_tasks_trace_qs(t) \
- do { \
- if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
- !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
- smp_store_release(&(t)->trc_reader_checked, true); \
- smp_mb(); /* Readers partitioned by store. */ \
- } \
+# ifdef CONFIG_TASKS_TRACE_RCU
+// Bits for ->trc_reader_special.b.need_qs field.
+#define TRC_NEED_QS 0x1 // Task needs a quiescent state.
+#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
+
+u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
+void rcu_tasks_trace_qs_blkd(struct task_struct *t);
+
+# define rcu_tasks_trace_qs(t) \
+ do { \
+ int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
+ \
+ if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
+ likely(!___rttq_nesting)) { \
+ rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
+ } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
+ !READ_ONCE((t)->trc_reader_special.b.blocked)) { \
+ rcu_tasks_trace_qs_blkd(t); \
+ } \
} while (0)
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
@@ -162,7 +199,7 @@ void synchronize_rcu_tasks(void);
#define rcu_tasks_qs(t, preempt) \
do { \
rcu_tasks_classic_qs((t), (preempt)); \
- rcu_tasks_trace_qs((t)); \
+ rcu_tasks_trace_qs(t); \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
@@ -172,17 +209,32 @@ void synchronize_rcu_tasks_rude(void);
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
+void exit_tasks_rcu_stop(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
#define rcu_tasks_qs(t, preempt) do { } while (0)
#define rcu_note_voluntary_context_switch(t) do { } while (0)
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
+static inline void exit_tasks_rcu_stop(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
/**
+ * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period?
+ *
+ * As an accident of implementation, an RCU Tasks Trace grace period also
+ * acts as an RCU grace period. However, this could change at any time.
+ * Code relying on this accident must call this function to verify that
+ * this accident is still happening.
+ *
+ * You have been warned!
+ */
+static inline bool rcu_trace_implies_rcu_gp(void) { return true; }
+
+/**
* cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
*
* This macro resembles cond_resched(), except that it is defined to
@@ -195,6 +247,37 @@ do { \
cond_resched(); \
} while (0)
+/**
+ * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states
+ * @old_ts: jiffies at start of processing.
+ *
+ * This helper is for long-running softirq handlers, such as NAPI threads in
+ * networking. The caller should initialize the variable passed in as @old_ts
+ * at the beginning of the softirq handler. When invoked frequently, this macro
+ * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will
+ * provide both RCU and RCU-Tasks quiescent states. Note that this macro
+ * modifies its old_ts argument.
+ *
+ * Because regions of code that have disabled softirq act as RCU read-side
+ * critical sections, this macro should be invoked with softirq (and
+ * preemption) enabled.
+ *
+ * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would
+ * have more chance to invoke schedule() calls and provide necessary quiescent
+ * states. As a contrast, calling cond_resched() only won't achieve the same
+ * effect because cond_resched() does not provide RCU-Tasks quiescent states.
+ */
+#define rcu_softirq_qs_periodic(old_ts) \
+do { \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \
+ time_after(jiffies, (old_ts) + HZ / 10)) { \
+ preempt_disable(); \
+ rcu_softirq_qs(); \
+ preempt_enable(); \
+ (old_ts) = jiffies; \
+ } \
+} while (0)
+
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -234,6 +317,11 @@ bool rcu_lockdep_current_cpu_online(void);
static inline bool rcu_lockdep_current_cpu_online(void) { return true; }
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
+extern struct lockdep_map rcu_lock_map;
+extern struct lockdep_map rcu_bh_lock_map;
+extern struct lockdep_map rcu_sched_lock_map;
+extern struct lockdep_map rcu_callback_map;
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void rcu_lock_acquire(struct lockdep_map *map)
@@ -241,15 +329,16 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
}
+static inline void rcu_try_lock_acquire(struct lockdep_map *map)
+{
+ lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
+}
+
static inline void rcu_lock_release(struct lockdep_map *map)
{
lock_release(map, _THIS_IP_);
}
-extern struct lockdep_map rcu_lock_map;
-extern struct lockdep_map rcu_bh_lock_map;
-extern struct lockdep_map rcu_sched_lock_map;
-extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void);
@@ -259,6 +348,7 @@ int rcu_read_lock_any_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
# define rcu_lock_acquire(a) do { } while (0)
+# define rcu_try_lock_acquire(a) do { } while (0)
# define rcu_lock_release(a) do { } while (0)
static inline int rcu_read_lock_held(void)
@@ -281,6 +371,11 @@ static inline int rcu_read_lock_any_held(void)
return !preemptible();
}
+static inline int debug_lockdep_rcu_enabled(void)
+{
+ return 0;
+}
+
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_PROVE_RCU
@@ -289,11 +384,18 @@ static inline int rcu_read_lock_any_held(void)
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
* @s: informative message
+ *
+ * This checks debug_lockdep_rcu_enabled() before checking (c) to
+ * prevent early boot splats due to lockdep not yet being initialized,
+ * and rechecks it after checking (c) to prevent false-positive splats
+ * due to races with lockdep being disabled. See commit 3066820034b5dd
+ * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail.
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
- static bool __section(.data.unlikely) __warned; \
- if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
+ static bool __section(".data.unlikely") __warned; \
+ if (debug_lockdep_rcu_enabled() && (c) && \
+ debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
@@ -312,7 +414,8 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
"Illegal context switch in RCU-bh read-side critical section"); \
RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
"Illegal context switch in RCU-sched read-side critical section"); \
@@ -320,7 +423,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#else /* #ifdef CONFIG_PROVE_RCU */
-#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -340,32 +443,48 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_check_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */
-#define __rcu_access_pointer(p, space) \
+#define __unrcu_pointer(p, local) \
+({ \
+ typeof(*p) *local = (typeof(*p) *__force)(p); \
+ rcu_check_sparse(p, __rcu); \
+ ((typeof(*p) __force __kernel *)(local)); \
+})
+/**
+ * unrcu_pointer - mark a pointer as not being RCU protected
+ * @p: pointer needing to lose its __rcu property
+ *
+ * Converts @p from an __rcu pointer to a __kernel pointer.
+ * This allows an __rcu pointer to be used with xchg() and friends.
+ */
+#define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu))
+
+#define __rcu_access_pointer(p, local, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
+ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
rcu_check_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(_________p1)); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
-#define __rcu_dereference_check(p, c, space) \
+#define __rcu_dereference_check(p, local, c, space) \
({ \
/* Dependency order vs. p above. */ \
- typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
+ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_check_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(________p1)); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
-#define __rcu_dereference_protected(p, c, space) \
+#define __rcu_dereference_protected(p, local, c, space) \
({ \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
-#define rcu_dereference_raw(p) \
+#define __rcu_dereference_raw(p, local) \
({ \
/* Dependency order vs. p above. */ \
- typeof(p) ________p1 = READ_ONCE(p); \
- ((typeof(*p) __force __kernel *)(________p1)); \
+ typeof(p) local = READ_ONCE(p); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
+#define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu))
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
@@ -444,15 +563,23 @@ do { \
* against NULL. Although rcu_access_pointer() may also be used in cases
* where update-side locks prevent the value of the pointer from changing,
* you should instead use rcu_dereference_protected() for this use case.
+ * Within an RCU read-side critical section, there is little reason to
+ * use rcu_access_pointer().
+ *
+ * It is usually best to test the rcu_access_pointer() return value
+ * directly in order to avoid accidental dereferences being introduced
+ * by later inattentive changes. In other words, assigning the
+ * rcu_access_pointer() return value to a local variable results in an
+ * accident waiting to happen.
*
* It is also permissible to use rcu_access_pointer() when read-side
- * access to the pointer was removed at least one grace period ago, as
- * is the case in the context of the RCU callback that is freeing up
- * the data, or after a synchronize_rcu() returns. This can be useful
- * when tearing down multi-linked structures after a grace period
- * has elapsed.
+ * access to the pointer was removed at least one grace period ago, as is
+ * the case in the context of the RCU callback that is freeing up the data,
+ * or after a synchronize_rcu() returns. This can be useful when tearing
+ * down multi-linked structures after a grace period has elapsed. However,
+ * rcu_dereference_protected() is normally preferred for this use case.
*/
-#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
+#define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu)
/**
* rcu_dereference_check() - rcu_dereference with debug checking
@@ -488,17 +615,24 @@ do { \
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_held(), __rcu)
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
- * This is the RCU-bh counterpart to rcu_dereference_check().
+ * This is the RCU-bh counterpart to rcu_dereference_check(). However,
+ * please note that starting in v5.0 kernels, vanilla RCU grace periods
+ * wait for local_bh_disable() regions of code in addition to regions of
+ * code demarked by rcu_read_lock() and rcu_read_unlock(). This means
+ * that synchronize_rcu(), call_rcu, and friends all take not only
+ * rcu_read_lock() but also rcu_read_lock_bh() into account.
*/
#define rcu_dereference_bh_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_bh_held(), __rcu)
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
@@ -506,9 +640,15 @@ do { \
* @c: The conditions under which the dereference will take place
*
* This is the RCU-sched counterpart to rcu_dereference_check().
+ * However, please note that starting in v5.0 kernels, vanilla RCU grace
+ * periods wait for preempt_disable() regions of code in addition to
+ * regions of code demarked by rcu_read_lock() and rcu_read_unlock().
+ * This means that synchronize_rcu(), call_rcu, and friends all take not
+ * only rcu_read_lock() but also rcu_read_lock_sched() into account.
*/
#define rcu_dereference_sched_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_sched_held(), \
__rcu)
/*
@@ -518,7 +658,8 @@ do { \
* The no-tracing version of rcu_dereference_raw() must not call
* rcu_read_lock_held().
*/
-#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
+#define rcu_dereference_raw_check(p) \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu)
/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
@@ -537,7 +678,7 @@ do { \
* but very ugly failures.
*/
#define rcu_dereference_protected(p, c) \
- __rcu_dereference_protected((p), (c), __rcu)
+ __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu)
/**
@@ -597,6 +738,12 @@ do { \
* sections, invocation of the corresponding RCU callback is deferred
* until after the all the other CPUs exit their critical sections.
*
+ * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also
+ * wait for regions of code with preemption disabled, including regions of
+ * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which
+ * define synchronize_sched(), only code enclosed within rcu_read_lock()
+ * and rcu_read_unlock() are guaranteed to be waited for.
+ *
* Note, however, that RCU callbacks are permitted to run concurrently
* with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
@@ -649,33 +796,12 @@ static __always_inline void rcu_read_lock(void)
/**
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
- * In most situations, rcu_read_unlock() is immune from deadlock.
- * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
- * is responsible for deboosting, which it does via rt_mutex_unlock().
- * Unfortunately, this function acquires the scheduler's runqueue and
- * priority-inheritance spinlocks. This means that deadlock could result
- * if the caller of rcu_read_unlock() already holds one of these locks or
- * any lock that is ever acquired while holding them.
- *
- * That said, RCU readers are never priority boosted unless they were
- * preempted. Therefore, one way to avoid deadlock is to make sure
- * that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with one of
- * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
- * a number of ways, for example, by invoking preempt_disable() before
- * critical section's outermost rcu_read_lock().
- *
- * Given that the set of locks acquired by rt_mutex_unlock() might change
- * at any time, a somewhat more future-proofed approach is to make sure
- * that that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with irqs disabled.
- * This approach relies on the fact that rt_mutex_unlock() currently only
- * acquires irq-disabled locks.
- *
- * The second of these two approaches is best in most situations,
- * however, the first approach can also be useful, at least to those
- * developers willing to keep abreast of the set of locks acquired by
- * rt_mutex_unlock().
+ * In almost all situations, rcu_read_unlock() is immune from deadlock.
+ * In recent kernels that have consolidated synchronize_sched() and
+ * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity
+ * also extends to the scheduler's runqueue and priority-inheritance
+ * spinlocks, courtesy of the quiescent-state deferral that is carried
+ * out when rcu_read_unlock() is invoked with interrupts disabled.
*
* See rcu_read_lock() for more information.
*/
@@ -691,9 +817,11 @@ static inline void rcu_read_unlock(void)
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
- * This is equivalent of rcu_read_lock(), but also disables softirqs.
- * Note that anything else that disables softirqs can also serve as
- * an RCU read-side critical section.
+ * This is equivalent to rcu_read_lock(), but also disables softirqs.
+ * Note that anything else that disables softirqs can also serve as an RCU
+ * read-side critical section. However, please note that this equivalence
+ * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
+ * rcu_read_lock_bh() were unrelated.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
@@ -709,8 +837,8 @@ static inline void rcu_read_lock_bh(void)
"rcu_read_lock_bh() used illegally while idle");
}
-/*
- * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
+/**
+ * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section
*
* See rcu_read_lock_bh() for more information.
*/
@@ -726,9 +854,12 @@ static inline void rcu_read_unlock_bh(void)
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * This is equivalent of rcu_read_lock(), but disables preemption.
- * Read-side critical sections can also be introduced by anything else
- * that disables preemption, including local_irq_disable() and friends.
+ * This is equivalent to rcu_read_lock(), but also disables preemption.
+ * Read-side critical sections can also be introduced by anything else that
+ * disables preemption, including local_irq_disable() and friends. However,
+ * please note that the equivalence to rcu_read_lock() applies only to
+ * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
+ * were unrelated.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
@@ -751,10 +882,10 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
__acquire(RCU_SCHED);
}
-/*
- * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
+/**
+ * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section
*
- * See rcu_read_lock_sched for more information.
+ * See rcu_read_lock_sched() for more information.
*/
static inline void rcu_read_unlock_sched(void)
{
@@ -832,19 +963,10 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*/
#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
-/*
- * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
- */
-#define __kvfree_rcu(head, offset) \
- do { \
- BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \
- kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
- } while (0)
-
/**
* kfree_rcu() - kfree an object after a grace period.
- * @ptr: pointer to kfree
- * @rhf: the name of the struct rcu_head within the type of @ptr.
+ * @ptr: pointer to kfree for double-argument invocations.
+ * @rhf: the name of the struct rcu_head within the type of @ptr.
*
* Many rcu callbacks functions just call kfree() on the base structure.
* These functions are trivial, but their size adds up, and furthermore
@@ -857,61 +979,57 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* Because the functions are not allowed in the low-order 4096 bytes of
* kernel virtual memory, offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
- * be generated in __kvfree_rcu(). If this error is triggered, you can
+ * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
- * Note that the allowable offset might decrease in the future, for example,
- * to allow something like kmem_cache_free_rcu().
+ * The object to be freed can be allocated either by kmalloc() or
+ * kmem_cache_alloc().
+ *
+ * Note that the allowable offset might decrease in the future.
*
* The BUILD_BUG_ON check must not involve any function calls, hence the
* checks are done in macros here.
*/
-#define kfree_rcu(ptr, rhf) \
-do { \
- typeof (ptr) ___p = (ptr); \
- \
- if (___p) \
- __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
-} while (0)
+#define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
+#define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
/**
- * kvfree_rcu() - kvfree an object after a grace period.
- *
- * This macro consists of one or two arguments and it is
- * based on whether an object is head-less or not. If it
- * has a head then a semantic stays the same as it used
- * to be before:
- *
- * kvfree_rcu(ptr, rhf);
- *
- * where @ptr is a pointer to kvfree(), @rhf is the name
- * of the rcu_head structure within the type of @ptr.
+ * kfree_rcu_mightsleep() - kfree an object after a grace period.
+ * @ptr: pointer to kfree for single-argument invocations.
*
* When it comes to head-less variant, only one argument
* is passed and that is just a pointer which has to be
* freed after a grace period. Therefore the semantic is
*
- * kvfree_rcu(ptr);
+ * kfree_rcu_mightsleep(ptr);
*
- * where @ptr is a pointer to kvfree().
+ * where @ptr is the pointer to be freed by kvfree().
*
* Please note, head-less way of freeing is permitted to
* use from a context that has to follow might_sleep()
* annotation. Otherwise, please switch and embed the
* rcu_head structure within the type of @ptr.
*/
-#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
- kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
+#define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
+#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
+
+#define kvfree_rcu_arg_2(ptr, rhf) \
+do { \
+ typeof (ptr) ___p = (ptr); \
+ \
+ if (___p) { \
+ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
+ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
+ } \
+} while (0)
-#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
-#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf)
#define kvfree_rcu_arg_1(ptr) \
do { \
typeof(ptr) ___p = (ptr); \
\
if (___p) \
- kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
+ kvfree_call_rcu(NULL, (void *) (___p)); \
} while (0)
/*
@@ -945,7 +1063,7 @@ static inline void rcu_head_init(struct rcu_head *rhp)
}
/**
- * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()?
+ * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()?
* @rhp: The rcu_head structure to test.
* @f: The function passed to call_rcu() along with @rhp.
*
@@ -972,4 +1090,6 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index d9015aac78c6..eda493200663 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -11,10 +11,10 @@
#include <linux/sched.h>
#include <linux/rcupdate.h>
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-
extern struct lockdep_map rcu_trace_lock_map;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
static inline int rcu_read_lock_trace_held(void)
{
return lock_is_held(&rcu_trace_lock_map);
@@ -31,7 +31,7 @@ static inline int rcu_read_lock_trace_held(void)
#ifdef CONFIG_TASKS_TRACE_RCU
-void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
+void rcu_read_unlock_trace_special(struct task_struct *t);
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
@@ -50,6 +50,7 @@ static inline void rcu_read_lock_trace(void)
struct task_struct *t = current;
WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
+ barrier();
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
t->trc_reader_special.b.need_mb)
smp_mb(); // Pairs with update-side barriers
@@ -72,17 +73,29 @@ static inline void rcu_read_unlock_trace(void)
rcu_lock_release(&rcu_trace_lock_map);
nesting = READ_ONCE(t->trc_reader_nesting) - 1;
+ barrier(); // Critical section before disabling.
+ // Disable IPI-based setting of .need_qs.
+ WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
WRITE_ONCE(t->trc_reader_nesting, nesting);
return; // We assume shallow reader nesting.
}
- rcu_read_unlock_trace_special(t, nesting);
+ WARN_ON_ONCE(nesting != 0);
+ rcu_read_unlock_trace_special(t);
}
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
void synchronize_rcu_tasks_trace(void);
void rcu_barrier_tasks_trace(void);
-
+struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
+#else
+/*
+ * The BPF JIT forms these addresses even when it doesn't call these
+ * functions, so provide definitions that result in runtime errors.
+ */
+static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
+static inline void rcu_read_lock_trace(void) { BUG(); }
+static inline void rcu_read_unlock_trace(void) { BUG(); }
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
#endif /* __LINUX_RCUPDATE_TRACE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 699b938358bf..d07f0848802e 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -8,6 +8,7 @@
#include <linux/rcupdate.h>
#include <linux/completion.h>
+#include <linux/sched.h>
/*
* Structure allowing asynchronous waiting on RCU.
@@ -42,6 +43,11 @@ do { \
* call_srcu() function, with this wrapper supplying the pointer to the
* corresponding srcu_struct.
*
+ * Note that call_rcu_hurry() should be used instead of call_rcu()
+ * because in kernels built with CONFIG_RCU_LAZY=y the delay between the
+ * invocation of call_rcu() and that of the corresponding RCU callback
+ * can be multiple seconds.
+ *
* The first argument tells Tiny RCU's _wait_rcu_gp() not to
* bother waiting for RCU. The reason for this is because anywhere
* synchronize_rcu_mult() can be called is automatically already a full
@@ -50,4 +56,13 @@ do { \
#define synchronize_rcu_mult(...) \
_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+static inline void cond_resched_rcu(void)
+{
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
+ rcu_read_unlock();
+ cond_resched();
+ rcu_read_lock();
+#endif
+}
+
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
new file mode 100644
index 000000000000..2c8bfd0f1b6b
--- /dev/null
+++ b/include/linux/rcuref.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_RCUREF_H
+#define _LINUX_RCUREF_H
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/limits.h>
+#include <linux/lockdep.h>
+#include <linux/preempt.h>
+#include <linux/rcupdate.h>
+
+#define RCUREF_ONEREF 0x00000000U
+#define RCUREF_MAXREF 0x7FFFFFFFU
+#define RCUREF_SATURATED 0xA0000000U
+#define RCUREF_RELEASED 0xC0000000U
+#define RCUREF_DEAD 0xE0000000U
+#define RCUREF_NOREF 0xFFFFFFFFU
+
+/**
+ * rcuref_init - Initialize a rcuref reference count with the given reference count
+ * @ref: Pointer to the reference count
+ * @cnt: The initial reference count typically '1'
+ */
+static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
+{
+ atomic_set(&ref->refcnt, cnt - 1);
+}
+
+/**
+ * rcuref_read - Read the number of held reference counts of a rcuref
+ * @ref: Pointer to the reference count
+ *
+ * Return: The number of held references (0 ... N)
+ */
+static inline unsigned int rcuref_read(rcuref_t *ref)
+{
+ unsigned int c = atomic_read(&ref->refcnt);
+
+ /* Return 0 if within the DEAD zone. */
+ return c >= RCUREF_RELEASED ? 0 : c + 1;
+}
+
+extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);
+
+/**
+ * rcuref_get - Acquire one reference on a rcuref reference count
+ * @ref: Pointer to the reference count
+ *
+ * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See documentation in lib/rcuref.c
+ *
+ * Return:
+ * False if the attempt to acquire a reference failed. This happens
+ * when the last reference has been put already
+ *
+ * True if a reference was successfully acquired
+ */
+static inline __must_check bool rcuref_get(rcuref_t *ref)
+{
+ /*
+ * Unconditionally increase the reference count. The saturation and
+ * dead zones provide enough tolerance for this.
+ */
+ if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt)))
+ return true;
+
+ /* Handle the cases inside the saturation and dead zones */
+ return rcuref_get_slowpath(ref);
+}
+
+extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
+
+/*
+ * Internal helper. Do not invoke directly.
+ */
+static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
+ "suspicious rcuref_put_rcusafe() usage");
+ /*
+ * Unconditionally decrease the reference count. The saturation and
+ * dead zones provide enough tolerance for this.
+ */
+ if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
+ return false;
+
+ /*
+ * Handle the last reference drop and cases inside the saturation
+ * and dead zones.
+ */
+ return rcuref_put_slowpath(ref);
+}
+
+/**
+ * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe
+ * @ref: Pointer to the reference count
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Can be invoked from contexts, which guarantee that no grace period can
+ * happen which would free the object concurrently if the decrement drops
+ * the last reference and the slowpath races against a concurrent get() and
+ * put() pair. rcu_read_lock()'ed and atomic contexts qualify.
+ *
+ * Return:
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely release the
+ * object which is protected by the reference counter.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * release the protected object.
+ */
+static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref)
+{
+ return __rcuref_put(ref);
+}
+
+/**
+ * rcuref_put -- Release one reference for a rcuref reference count
+ * @ref: Pointer to the reference count
+ *
+ * Can be invoked from any context.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Return:
+ *
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely schedule the
+ * object, which is protected by the reference counter, for
+ * deconstruction.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * deconstruct the protected object.
+ */
+static inline __must_check bool rcuref_put(rcuref_t *ref)
+{
+ bool released;
+
+ preempt_disable();
+ released = __rcuref_put(ref);
+ preempt_enable();
+ return released;
+}
+
+#endif
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 5cc9637cac16..d9ac7b136aea 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -14,12 +14,43 @@
#include <asm/param.h> /* for HZ */
-/* Never flag non-existent other CPUs! */
-static inline bool rcu_eqs_special_set(int cpu) { return false; }
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+};
-static inline unsigned long get_state_synchronize_rcu(void)
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
+
+/*
+ * Are the two oldstate values the same? See the Tree RCU version for
+ * docbook header.
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
{
- return 0;
+ return rgosp1->rgos_norm == rgosp2->rgos_norm;
+}
+
+unsigned long get_state_synchronize_rcu(void);
+
+static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = get_state_synchronize_rcu();
+}
+
+unsigned long start_poll_synchronize_rcu(void);
+
+static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu();
+}
+
+bool poll_state_synchronize_rcu(unsigned long oldstate);
+
+static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ return poll_state_synchronize_rcu(rgosp->rgos_norm);
}
static inline void cond_synchronize_rcu(unsigned long oldstate)
@@ -27,6 +58,31 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}
+static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu(rgosp->rgos_norm);
+}
+
+static inline unsigned long start_poll_synchronize_rcu_expedited(void)
+{
+ return start_poll_synchronize_rcu();
+}
+
+static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
+}
+
+static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
+{
+ cond_synchronize_rcu(oldstate);
+}
+
+static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu_expedited(rgosp->rgos_norm);
+}
+
extern void rcu_barrier(void);
static inline void synchronize_rcu_expedited(void)
@@ -42,19 +98,28 @@ static inline void synchronize_rcu_expedited(void)
*/
extern void kvfree(const void *addr);
-static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
if (head) {
- call_rcu(head, func);
+ call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
return;
}
// kvfree_rcu(one_arg) call.
might_sleep();
synchronize_rcu();
- kvfree((void *) func);
+ kvfree(ptr);
}
+#ifdef CONFIG_KASAN_GENERIC
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
+#else
+static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
+{
+ __kvfree_call_rcu(head, ptr);
+}
+#endif
+
void rcu_qs(void);
static inline void rcu_softirq_qs(void)
@@ -68,26 +133,20 @@ static inline void rcu_softirq_qs(void)
rcu_tasks_qs(current, (preempt)); \
} while (0)
-static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
+static inline int rcu_needs_cpu(void)
{
- *nextevt = KTIME_MAX;
return 0;
}
+static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
+
/*
* Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches.
*/
-static inline void rcu_virt_note_context_switch(int cpu) { }
+static inline void rcu_virt_note_context_switch(void) { }
static inline void rcu_cpu_stall_reset(void) { }
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
-static inline void rcu_idle_enter(void) { }
-static inline void rcu_idle_exit(void) { }
-static inline void rcu_irq_enter(void) { }
-static inline void rcu_irq_exit_irqson(void) { }
-static inline void rcu_irq_enter_irqson(void) { }
-static inline void rcu_irq_exit(void) { }
-static inline void rcu_irq_exit_preempt(void) { }
static inline void rcu_irq_exit_check_preempt(void) { }
static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
@@ -95,15 +154,10 @@ static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
return false;
}
static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
-#ifdef CONFIG_SRCU
void rcu_scheduler_starting(void);
-#else /* #ifndef CONFIG_SRCU */
-static inline void rcu_scheduler_starting(void) { }
-#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
-static inline bool __rcu_is_watching(void) { return true; }
static inline void rcu_momentary_dyntick_idle(void) { }
static inline void kfree_rcu_scheduler_running(void) { }
static inline bool rcu_gp_might_be_stalled(void) { return false; }
@@ -117,6 +171,6 @@ static inline void rcu_all_qs(void) { barrier(); }
#define rcutree_offline_cpu NULL
#define rcutree_dead_cpu NULL
#define rcutree_dying_cpu NULL
-static inline void rcu_cpu_starting(unsigned int cpu) { }
+static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index d2f4064ebd1d..254244202ea9 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -19,37 +19,73 @@
void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
-int rcu_needs_cpu(u64 basem, u64 *nextevt);
+int rcu_needs_cpu(void);
void rcu_cpu_stall_reset(void);
+void rcu_request_urgent_qs_task(struct task_struct *t);
/*
* Note a virtualization-based context switch. This is simply a
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
* to save a few bytes. The caller must have disabled interrupts.
*/
-static inline void rcu_virt_note_context_switch(int cpu)
+static inline void rcu_virt_note_context_switch(void)
{
rcu_note_context_switch(false);
}
void synchronize_rcu_expedited(void);
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
void rcu_barrier(void);
-bool rcu_eqs_special_set(int cpu);
void rcu_momentary_dyntick_idle(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
+
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+ unsigned long rgos_exp;
+};
+
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4
+
+/**
+ * same_state_synchronize_rcu_full - Are two old-state values identical?
+ * @rgosp1: First old-state value.
+ * @rgosp2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
+ * or get_completed_synchronize_rcu_full(). Returns @true if the two
+ * values are identical and @false otherwise. This allows structures
+ * whose lifetimes are tracked by old-state values to push these values
+ * to a list header, allowing those structures to be slightly smaller.
+ *
+ * Note that equality is judged on a bitwise basis, so that an
+ * @rcu_gp_oldstate structure with an already-completed state in one field
+ * will compare not-equal to a structure with an already-completed state
+ * in the other field. After all, the @rcu_gp_oldstate structure is opaque
+ * so how did such a situation come to pass in the first place?
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
+{
+ return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp;
+}
+
+unsigned long start_poll_synchronize_rcu_expedited(void);
+void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
+void cond_synchronize_rcu_expedited(unsigned long oldstate);
+void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
unsigned long get_state_synchronize_rcu(void);
+void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+unsigned long start_poll_synchronize_rcu(void);
+void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+bool poll_state_synchronize_rcu(unsigned long oldstate);
+bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
void cond_synchronize_rcu(unsigned long oldstate);
-
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
-void rcu_irq_exit_preempt(void);
-void rcu_irq_enter_irqson(void);
-void rcu_irq_exit_irqson(void);
+void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
#ifdef CONFIG_PROVE_RCU
void rcu_irq_exit_check_preempt(void);
@@ -57,14 +93,16 @@ void rcu_irq_exit_check_preempt(void);
static inline void rcu_irq_exit_check_preempt(void) { }
#endif
+struct task_struct;
+void rcu_preempt_deferred_qs(struct task_struct *t);
+
void exit_rcu(void);
void rcu_scheduler_starting(void);
-extern int rcu_scheduler_active __read_mostly;
+extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void);
bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void);
-bool __rcu_is_watching(void);
#ifndef CONFIG_PREEMPTION
void rcu_all_qs(void);
#endif
@@ -72,9 +110,21 @@ void rcu_all_qs(void);
/* RCUtree hotplug events */
int rcutree_prepare_cpu(unsigned int cpu);
int rcutree_online_cpu(unsigned int cpu);
-int rcutree_offline_cpu(unsigned int cpu);
+void rcutree_report_cpu_starting(unsigned int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu);
-void rcu_cpu_starting(unsigned int cpu);
+int rcutree_offline_cpu(unsigned int cpu);
+#else
+#define rcutree_dead_cpu NULL
+#define rcutree_dying_cpu NULL
+#define rcutree_offline_cpu NULL
+#endif
+
+void rcutree_migrate_callbacks(int cpu);
+
+/* Called from hotplug and also arm64 early secondary boot failure */
+void rcutree_report_cpu_dead(void);
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 61c56cca95c4..27343424225c 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -47,15 +47,11 @@ static inline void prepare_to_rcuwait(struct rcuwait *w)
rcu_assign_pointer(w->task, current);
}
-static inline void finish_rcuwait(struct rcuwait *w)
-{
- rcu_assign_pointer(w->task, NULL);
- __set_current_state(TASK_RUNNING);
-}
+extern void finish_rcuwait(struct rcuwait *w);
-#define rcuwait_wait_event(w, condition, state) \
+#define ___rcuwait_wait_event(w, condition, state, ret, cmd) \
({ \
- int __ret = 0; \
+ long __ret = ret; \
prepare_to_rcuwait(w); \
for (;;) { \
/* \
@@ -71,10 +67,27 @@ static inline void finish_rcuwait(struct rcuwait *w)
break; \
} \
\
- schedule(); \
+ cmd; \
} \
finish_rcuwait(w); \
__ret; \
})
+#define rcuwait_wait_event(w, condition, state) \
+ ___rcuwait_wait_event(w, condition, state, 0, schedule())
+
+#define __rcuwait_wait_event_timeout(w, condition, state, timeout) \
+ ___rcuwait_wait_event(w, ___wait_cond_timeout(condition), \
+ state, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define rcuwait_wait_event_timeout(w, condition, state, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __rcuwait_wait_event_timeout(w, condition, \
+ state, timeout); \
+ __ret; \
+})
+
#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/rcuwait_api.h b/include/linux/rcuwait_api.h
new file mode 100644
index 000000000000..f962e28544dd
--- /dev/null
+++ b/include/linux/rcuwait_api.h
@@ -0,0 +1 @@
+#include <linux/rcuwait.h>
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 3734cd8f38a8..abcdde4df697 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -7,6 +7,7 @@
#include <uapi/linux/reboot.h>
struct device;
+struct sys_off_handler;
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
@@ -62,6 +63,106 @@ extern void machine_shutdown(void);
struct pt_regs;
extern void machine_crash_shutdown(struct pt_regs *);
+void do_kernel_power_off(void);
+
+/*
+ * sys-off handler API.
+ */
+
+/*
+ * Standard sys-off priority levels. Users are expected to set priorities
+ * relative to the standard levels.
+ *
+ * SYS_OFF_PRIO_PLATFORM: Use this for platform-level handlers.
+ *
+ * SYS_OFF_PRIO_LOW: Use this for handler of last resort.
+ *
+ * SYS_OFF_PRIO_DEFAULT: Use this for normal handlers.
+ *
+ * SYS_OFF_PRIO_HIGH: Use this for higher priority handlers.
+ *
+ * SYS_OFF_PRIO_FIRMWARE: Use this if handler uses firmware call.
+ */
+#define SYS_OFF_PRIO_PLATFORM -256
+#define SYS_OFF_PRIO_LOW -128
+#define SYS_OFF_PRIO_DEFAULT 0
+#define SYS_OFF_PRIO_HIGH 192
+#define SYS_OFF_PRIO_FIRMWARE 224
+
+enum sys_off_mode {
+ /**
+ * @SYS_OFF_MODE_POWER_OFF_PREPARE:
+ *
+ * Handlers prepare system to be powered off. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF_PREPARE,
+
+ /**
+ * @SYS_OFF_MODE_POWER_OFF:
+ *
+ * Handlers power-off system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF,
+
+ /**
+ * @SYS_OFF_MODE_RESTART_PREPARE:
+ *
+ * Handlers prepare system to be restarted. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART_PREPARE,
+
+ /**
+ * @SYS_OFF_MODE_RESTART:
+ *
+ * Handlers restart system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART,
+};
+
+/**
+ * struct sys_off_data - sys-off callback argument
+ *
+ * @mode: Mode ID. Currently used only by the sys-off restart mode,
+ * see enum reboot_mode for the available modes.
+ * @cb_data: User's callback data.
+ * @cmd: Command string. Currently used only by the sys-off restart mode,
+ * NULL otherwise.
+ * @dev: Device of the sys-off handler. Only if known (devm_register_*),
+ * NULL otherwise.
+ */
+struct sys_off_data {
+ int mode;
+ void *cb_data;
+ const char *cmd;
+ struct device *dev;
+};
+
+struct sys_off_handler *
+register_sys_off_handler(enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+void unregister_sys_off_handler(struct sys_off_handler *handler);
+
+int devm_register_sys_off_handler(struct device *dev,
+ enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_power_off_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_restart_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int register_platform_power_off(void (*power_off)(void));
+void unregister_platform_power_off(void (*power_off)(void));
+
/*
* Architecture independent implemenations of sys_reboot commands.
*/
@@ -70,15 +171,23 @@ extern void kernel_restart_prepare(char *cmd);
extern void kernel_restart(char *cmd);
extern void kernel_halt(void);
extern void kernel_power_off(void);
+extern bool kernel_can_power_off(void);
-extern int C_A_D; /* for sysctl */
void ctrl_alt_del(void);
-#define POWEROFF_CMD_PATH_LEN 256
-extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
-
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
+void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown);
+
+static inline void hw_protection_reboot(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, false);
+}
+
+static inline void hw_protection_shutdown(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, true);
+}
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
new file mode 100644
index 000000000000..8eac4f3d5254
--- /dev/null
+++ b/include/linux/ref_tracker.h
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#ifndef _LINUX_REF_TRACKER_H
+#define _LINUX_REF_TRACKER_H
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/stackdepot.h>
+
+struct ref_tracker;
+
+struct ref_tracker_dir {
+#ifdef CONFIG_REF_TRACKER
+ spinlock_t lock;
+ unsigned int quarantine_avail;
+ refcount_t untracked;
+ refcount_t no_tracker;
+ bool dead;
+ struct list_head list; /* List of active trackers */
+ struct list_head quarantine; /* List of dead trackers */
+ char name[32];
+#endif
+};
+
+#ifdef CONFIG_REF_TRACKER
+
+static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ unsigned int quarantine_count,
+ const char *name)
+{
+ INIT_LIST_HEAD(&dir->list);
+ INIT_LIST_HEAD(&dir->quarantine);
+ spin_lock_init(&dir->lock);
+ dir->quarantine_avail = quarantine_count;
+ dir->dead = false;
+ refcount_set(&dir->untracked, 1);
+ refcount_set(&dir->no_tracker, 1);
+ strscpy(dir->name, name, sizeof(dir->name));
+ stack_depot_init();
+}
+
+void ref_tracker_dir_exit(struct ref_tracker_dir *dir);
+
+void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit);
+
+void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit);
+
+int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size);
+
+int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp, gfp_t gfp);
+
+int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp);
+
+#else /* CONFIG_REF_TRACKER */
+
+static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ unsigned int quarantine_count,
+ const char *name)
+{
+}
+
+static inline void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
+{
+}
+
+static inline void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+}
+
+static inline void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+}
+
+static inline int ref_tracker_dir_snprint(struct ref_tracker_dir *dir,
+ char *buf, size_t size)
+{
+ return 0;
+}
+
+static inline int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp,
+ gfp_t gfp)
+{
+ return 0;
+}
+
+static inline int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _LINUX_REF_TRACKER_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 0e3ee25eb156..59b3b752394d 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -96,22 +96,11 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/limits.h>
+#include <linux/refcount_types.h>
#include <linux/spinlock_types.h>
struct mutex;
-/**
- * struct refcount_t - variant of atomic_t specialized for reference counts
- * @refs: atomic_t counter field
- *
- * The counter saturates at REFCOUNT_SATURATED and will not move once
- * there. This avoids wrapping the counter and causing 'spurious'
- * use-after-free bugs.
- */
-typedef struct refcount_struct {
- atomic_t refs;
-} refcount_t;
-
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
#define REFCOUNT_MAX INT_MAX
#define REFCOUNT_SATURATED (INT_MIN / 2)
@@ -147,6 +136,25 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
+static inline __must_check __signed_wrap
+bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
+{
+ int old = refcount_read(r);
+
+ do {
+ if (!old)
+ break;
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
+
+ if (oldp)
+ *oldp = old;
+
+ if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
+
+ return old;
+}
+
/**
* refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
@@ -167,17 +175,21 @@ static inline unsigned int refcount_read(const refcount_t *r)
*/
static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
{
- int old = refcount_read(r);
+ return __refcount_add_not_zero(i, r, NULL);
+}
- do {
- if (!old)
- break;
- } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
+static inline __signed_wrap
+void __refcount_add(int i, refcount_t *r, int *oldp)
+{
+ int old = atomic_fetch_add_relaxed(i, &r->refs);
- if (unlikely(old < 0 || old + i < 0))
- refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
+ if (oldp)
+ *oldp = old;
- return old;
+ if (unlikely(!old))
+ refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
+ else if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
/**
@@ -198,12 +210,12 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
*/
static inline void refcount_add(int i, refcount_t *r)
{
- int old = atomic_fetch_add_relaxed(i, &r->refs);
+ __refcount_add(i, r, NULL);
+}
- if (unlikely(!old))
- refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
- else if (unlikely(old < 0 || old + i < 0))
- refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
+static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
+{
+ return __refcount_add_not_zero(1, r, oldp);
}
/**
@@ -221,7 +233,12 @@ static inline void refcount_add(int i, refcount_t *r)
*/
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
- return refcount_add_not_zero(1, r);
+ return __refcount_inc_not_zero(r, NULL);
+}
+
+static inline void __refcount_inc(refcount_t *r, int *oldp)
+{
+ __refcount_add(1, r, oldp);
}
/**
@@ -238,7 +255,26 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
*/
static inline void refcount_inc(refcount_t *r)
{
- refcount_add(1, r);
+ __refcount_inc(r, NULL);
+}
+
+static inline __must_check __signed_wrap
+bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
+{
+ int old = atomic_fetch_sub_release(i, &r->refs);
+
+ if (oldp)
+ *oldp = old;
+
+ if (old == i) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ if (unlikely(old < 0 || old - i < 0))
+ refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
+
+ return false;
}
/**
@@ -263,17 +299,12 @@ static inline void refcount_inc(refcount_t *r)
*/
static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
{
- int old = atomic_fetch_sub_release(i, &r->refs);
-
- if (old == i) {
- smp_acquire__after_ctrl_dep();
- return true;
- }
-
- if (unlikely(old < 0 || old - i < 0))
- refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
+ return __refcount_sub_and_test(i, r, NULL);
+}
- return false;
+static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
+{
+ return __refcount_sub_and_test(1, r, oldp);
}
/**
@@ -291,7 +322,18 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
*/
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- return refcount_sub_and_test(1, r);
+ return __refcount_dec_and_test(r, NULL);
+}
+
+static inline void __refcount_dec(refcount_t *r, int *oldp)
+{
+ int old = atomic_fetch_sub_release(1, &r->refs);
+
+ if (oldp)
+ *oldp = old;
+
+ if (unlikely(old <= 1))
+ refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
/**
@@ -306,15 +348,14 @@ static inline __must_check bool refcount_dec_and_test(refcount_t *r)
*/
static inline void refcount_dec(refcount_t *r)
{
- if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
- refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
+ __refcount_dec(r, NULL);
}
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
-extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
- unsigned long *flags);
+ unsigned long *flags) __cond_acquires(lock);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/refcount_api.h b/include/linux/refcount_api.h
new file mode 100644
index 000000000000..5f032589f568
--- /dev/null
+++ b/include/linux/refcount_api.h
@@ -0,0 +1 @@
+#include <linux/refcount.h>
diff --git a/include/linux/refcount_types.h b/include/linux/refcount_types.h
new file mode 100644
index 000000000000..162004f06edf
--- /dev/null
+++ b/include/linux/refcount_types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_REFCOUNT_TYPES_H
+#define _LINUX_REFCOUNT_TYPES_H
+
+#include <linux/types.h>
+
+/**
+ * typedef refcount_t - variant of atomic_t specialized for reference counts
+ * @refs: atomic_t counter field
+ *
+ * The counter saturates at REFCOUNT_SATURATED and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free bugs.
+ */
+typedef struct refcount_struct {
+ atomic_t refs;
+} refcount_t;
+
+#endif /* _LINUX_REFCOUNT_TYPES_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 1970ed59d49f..b743241cfb7c 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -24,9 +24,11 @@ struct module;
struct clk;
struct device;
struct device_node;
+struct fsi_device;
struct i2c_client;
struct i3c_device;
struct irq_domain;
+struct mdio_device;
struct slim_device;
struct spi_device;
struct spmi_device;
@@ -36,12 +38,28 @@ struct regmap_field;
struct snd_ac97;
struct sdw_slave;
+/*
+ * regmap_mdio address encoding. IEEE 802.3ae clause 45 addresses consist of a
+ * device address and a register address.
+ */
+#define REGMAP_MDIO_C45_DEVAD_SHIFT 16
+#define REGMAP_MDIO_C45_DEVAD_MASK GENMASK(20, 16)
+#define REGMAP_MDIO_C45_REGNUM_MASK GENMASK(15, 0)
+
+/*
+ * regmap.reg_shift indicates by how much we must shift registers prior to
+ * performing any operation. It's a signed value, positive numbers means
+ * downshifting the register's address, while negative numbers means upshifting.
+ */
+#define REGMAP_UPSHIFT(s) (-(s))
+#define REGMAP_DOWNSHIFT(s) (s)
+
/* An enum of all the supported cache types */
enum regcache_type {
REGCACHE_NONE,
REGCACHE_RBTREE,
- REGCACHE_COMPRESSED,
REGCACHE_FLAT,
+ REGCACHE_MAPLE,
};
/**
@@ -236,6 +254,11 @@ typedef void (*regmap_unlock)(void *);
* @reg_stride: The register address stride. Valid register addresses are a
* multiple of this value. If set to 0, a value of 1 will be
* used.
+ * @reg_shift: The number of bits to shift the register before performing any
+ * operations. Any positive number will be downshifted, and negative
+ * values will be upshifted
+ * @reg_base: Value to be added to every register address before performing any
+ * operation.
* @pad_bits: Number of bits of padding between register and value.
* @val_bits: Number of bits in a register value, mandatory.
*
@@ -289,13 +312,30 @@ typedef void (*regmap_unlock)(void *);
* read operation on a bus such as SPI, I2C, etc. Most of the
* devices do not need this.
* @reg_write: Same as above for writing.
+ * @reg_update_bits: Optional callback that if filled will be used to perform
+ * all the update_bits(rmw) operation. Should only be provided
+ * if the function require special handling with lock and reg
+ * handling and the operation cannot be represented as a simple
+ * update_bits operation on a bus such as SPI, I2C, etc.
+ * @read: Optional callback that if filled will be used to perform all the
+ * bulk reads from the registers. Data is returned in the buffer used
+ * to transmit data.
+ * @write: Same as above for writing.
+ * @max_raw_read: Max raw read size that can be used on the device.
+ * @max_raw_write: Max raw write size that can be used on the device.
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of struct regmap_config).
* This field is a duplicate of a similar file in
* 'struct regmap_bus' and serves exact same purpose.
* Use it only for "no-bus" cases.
+ * @io_port: Support IO port accessors. Makes sense only when MMIO vs. IO port
+ * access can be distinguished.
* @max_register: Optional, specifies the maximum valid register address.
+ * @max_register_is_0: Optional, specifies that zero value in @max_register
+ * should be taken into account. This is a workaround to
+ * apply handling of @max_register for regmap that contains
+ * only one register.
* @wr_table: Optional, points to a struct regmap_access_table specifying
* valid ranges for write access.
* @rd_table: As above, for read access.
@@ -315,6 +355,10 @@ typedef void (*regmap_unlock)(void *);
* masks are used.
* @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
* if they are both empty.
+ * @use_relaxed_mmio: If set, MMIO R/W operations will not use memory barriers.
+ * This can avoid load on devices which don't require strict
+ * orderings, but drivers should carefully add any explicit
+ * memory barriers when they may require them.
* @use_single_read: If set, converts the bulk read operation into a series of
* single read operations. This is useful for a device that
* does not support bulk read.
@@ -339,15 +383,19 @@ typedef void (*regmap_unlock)(void *);
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
* @use_hwlock: Indicate if a hardware spinlock should be used.
+ * @use_raw_spinlock: Indicate if a raw spinlock should be used.
* @hwlock_id: Specify the hardware spinlock id.
* @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
* HWLOCK_IRQ or 0.
+ * @can_sleep: Optional, specifies whether regmap operations can sleep.
*/
struct regmap_config {
const char *name;
int reg_bits;
int reg_stride;
+ int reg_shift;
+ unsigned int reg_base;
int pad_bits;
int val_bits;
@@ -365,10 +413,20 @@ struct regmap_config {
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
+ size_t max_raw_read;
+ size_t max_raw_write;
bool fast_io;
+ bool io_port;
unsigned int max_register;
+ bool max_register_is_0;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
@@ -387,6 +445,7 @@ struct regmap_config {
bool use_single_read;
bool use_single_write;
+ bool use_relaxed_mmio;
bool can_multi_write;
enum regmap_endian reg_format_endian;
@@ -396,8 +455,11 @@ struct regmap_config {
unsigned int num_ranges;
bool use_hwlock;
+ bool use_raw_spinlock;
unsigned int hwlock_id;
unsigned int hwlock_mode;
+
+ bool can_sleep;
};
/**
@@ -453,8 +515,12 @@ typedef int (*regmap_hw_read)(void *context,
void *val_buf, size_t val_size);
typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
unsigned int *val);
+typedef int (*regmap_hw_reg_noinc_read)(void *context, unsigned int reg,
+ void *val, size_t val_count);
typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
unsigned int val);
+typedef int (*regmap_hw_reg_noinc_write)(void *context, unsigned int reg,
+ const void *val, size_t val_count);
typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
unsigned int mask, unsigned int val);
typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
@@ -468,6 +534,7 @@ typedef void (*regmap_hw_free_context)(void *context);
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of
* struct regmap_config).
+ * @free_on_exit: kfree this on exit of regmap
* @write: Write operation.
* @gather_write: Write operation with split register/value, return -ENOTSUPP
* if not implemented on a given device.
@@ -475,6 +542,8 @@ typedef void (*regmap_hw_free_context)(void *context);
* must serialise with respect to non-async I/O.
* @reg_write: Write a single register value to the given register address. This
* write operation has to complete when returning from the function.
+ * @reg_write_noinc: Write multiple register value to the same register. This
+ * write operation has to complete when returning from the function.
* @reg_update_bits: Update bits operation to be used against volatile
* registers, intended for devices supporting some mechanism
* for setting clearing bits without having to
@@ -497,13 +566,16 @@ typedef void (*regmap_hw_free_context)(void *context);
*/
struct regmap_bus {
bool fast_io;
+ bool free_on_exit;
regmap_hw_write write;
regmap_hw_gather_write gather_write;
regmap_hw_async_write async_write;
regmap_hw_reg_write reg_write;
+ regmap_hw_reg_noinc_write reg_noinc_write;
regmap_hw_reg_update_bits reg_update_bits;
regmap_hw_read read;
regmap_hw_reg_read reg_read;
+ regmap_hw_reg_noinc_read reg_noinc_read;
regmap_hw_free_context free_context;
regmap_hw_async_alloc async_alloc;
u8 read_flag_mask;
@@ -530,6 +602,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -567,6 +643,18 @@ struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_fsi(struct fsi_device *fsi_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -578,6 +666,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -612,6 +704,10 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -620,6 +716,15 @@ struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_fsi(struct fsi_device *fsi_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
* for each call. No-op if CONFIG_LOCKDEP is not set.
@@ -674,6 +779,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_mdio() - Initialise register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* regmap_init_sccb() - Initialise register map
*
* @i2c: Device that will be interacted with
@@ -806,6 +924,45 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
sdw, config)
+/**
+ * regmap_init_sdw_mbq() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
+ * regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.
+ */
+#define regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi_avmm, #config, \
+ spi, config)
+
+/**
+ * regmap_init_fsi() - Initialise register map
+ *
+ * @fsi_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_fsi(fsi_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_fsi, #config, fsi_dev, \
+ config)
/**
* devm_regmap_init() - Initialise managed register map
@@ -839,6 +996,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
+ * devm_regmap_init_mdio() - Initialise managed register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* devm_regmap_init_sccb() - Initialise managed register map
*
* @i2c: Device that will be interacted with
@@ -966,6 +1137,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
sdw, config)
/**
+ * devm_regmap_init_sdw_mbq() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
* devm_regmap_init_slimbus() - Initialise managed register map
*
* @slimbus: Device that will be interacted with
@@ -993,6 +1178,35 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_i3c, #config, \
i3c, config)
+/**
+ * devm_regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi_avmm, #config, \
+ spi, config)
+
+/**
+ * devm_regmap_init_fsi() - Initialise managed register map
+ *
+ * @fsi_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_fsi(fsi_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_fsi, #config, \
+ fsi_dev, config)
+
int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk);
void regmap_mmio_detach_clk(struct regmap *map);
void regmap_exit(struct regmap *map);
@@ -1064,6 +1278,7 @@ static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
int regmap_get_val_bytes(struct regmap *map);
int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
+bool regmap_might_sleep(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
size_t regmap_get_raw_read_max(struct regmap *map);
@@ -1077,6 +1292,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
void regcache_cache_only(struct regmap *map, bool enable);
void regcache_cache_bypass(struct regmap *map, bool enable);
void regcache_mark_dirty(struct regmap *map);
+bool regcache_reg_cached(struct regmap *map, unsigned int reg);
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table);
@@ -1150,6 +1366,18 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev,
struct regmap *regmap, struct reg_field reg_field);
void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
+int regmap_field_bulk_alloc(struct regmap *regmap,
+ struct regmap_field **rm_field,
+ const struct reg_field *reg_field,
+ int num_fields);
+void regmap_field_bulk_free(struct regmap_field *field);
+int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap,
+ struct regmap_field **field,
+ const struct reg_field *reg_field,
+ int num_fields);
+void devm_regmap_field_bulk_free(struct device *dev,
+ struct regmap_field *field);
+
int regmap_field_read(struct regmap_field *field, unsigned int *val);
int regmap_field_update_bits_base(struct regmap_field *field,
unsigned int mask, unsigned int val,
@@ -1180,6 +1408,22 @@ static inline int regmap_field_update_bits(struct regmap_field *field,
NULL, false, false);
}
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, bits, NULL, false,
+ false);
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, 0, NULL, false,
+ false);
+}
+
+int regmap_field_test_bits(struct regmap_field *field, unsigned int bits);
+
static inline int
regmap_field_force_update_bits(struct regmap_field *field,
unsigned int mask, unsigned int val)
@@ -1268,6 +1512,8 @@ struct regmap_irq_sub_irq_map {
unsigned int *offset;
};
+struct regmap_irq_chip_data;
+
/**
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
@@ -1292,40 +1538,62 @@ struct regmap_irq_sub_irq_map {
* main_status set.
*
* @status_base: Base status register address.
- * @mask_base: Base mask register address.
- * @mask_writeonly: Base mask register is write only.
- * @unmask_base: Base unmask register address. for chips who have
- * separate mask and unmask registers
+ * @mask_base: Base mask register address. Mask bits are set to 1 when an
+ * interrupt is masked, 0 when unmasked.
+ * @unmask_base: Base unmask register address. Unmask bits are set to 1 when
+ * an interrupt is unmasked and 0 when masked.
* @ack_base: Base ack address. If zero then the chip is clear on read.
* Using zero value is possible with @use_ack bit.
* @wake_base: Base address for wake enables. If zero unsupported.
- * @type_base: Base address for irq type. If zero unsupported.
+ * @config_base: Base address for IRQ type config regs. If null unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous.
* @init_ack_masked: Ack all masked interrupts once during initalization.
- * @mask_invert: Inverted mask register: cleared bits are masked out.
+ * @mask_unmask_non_inverted: Controls mask bit inversion for chips that set
+ * both @mask_base and @unmask_base. If false, mask and unmask bits are
+ * inverted (which is deprecated behavior); if true, bits will not be
+ * inverted and the registers keep their normal behavior. Note that if
+ * you use only one of @mask_base or @unmask_base, this flag has no
+ * effect and is unnecessary. Any new drivers that set both @mask_base
+ * and @unmask_base should set this to true to avoid relying on the
+ * deprecated behavior.
* @use_ack: Use @ack register even if it is zero.
* @ack_invert: Inverted ack register: cleared bits for ack.
+ * @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
+ * @status_invert: Inverted status register: cleared bits are active interrupts.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
- * @type_invert: Invert the type flags.
- * @type_in_mask: Use the mask registers for controlling irq type. For
- * interrupts defining type_rising/falling_mask use mask_base
- * for edge configuration and never update bits in type_base.
+ * @type_in_mask: Use the mask registers for controlling irq type. Use this if
+ * the hardware provides separate bits for rising/falling edge
+ * or low/high level interrupts and they should be combined into
+ * a single logical interrupt. Use &struct regmap_irq_type data
+ * to define the mask bit for each irq type.
* @clear_on_unmask: For chips with interrupts cleared on read: read the status
* registers before unmasking interrupts to clear any bits
* set when they were masked.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
+ * @no_status: No status register: all interrupts assumed generated by device.
*
* @num_regs: Number of registers in each control bank.
+ *
* @irqs: Descriptors for individual IRQs. Interrupt numbers are
* assigned based on the index in the array of the interrupt.
* @num_irqs: Number of descriptors.
- * @num_type_reg: Number of type registers.
- * @type_reg_stride: Stride to use for chips where type registers are not
- * contiguous.
+ * @num_config_bases: Number of config base registers.
+ * @num_config_regs: Number of config registers for each config base register.
+ *
* @handle_pre_irq: Driver specific callback to handle interrupt from device
* before regmap_irq_handler process the interrupts.
* @handle_post_irq: Driver specific callback to handle interrupt from device
* after handling the interrupts in regmap_irq_handler().
+ * @handle_mask_sync: Callback used to handle IRQ mask syncs. The index will be
+ * in the range [0, num_regs)
+ * @set_type_config: Callback used for configuring irq types.
+ * @get_irq_reg: Callback for mapping (base register, index) pairs to register
+ * addresses. The base register will be one of @status_base,
+ * @mask_base, etc., @main_status, or any of @config_base.
+ * The index will be in the range [0, num_main_regs[ for the
+ * main status base, [0, num_config_regs[ for any config
+ * register base, and [0, num_regs[ for any other base.
+ * If unspecified then regmap_irq_get_irq_reg_linear() is used.
* @irq_drv_data: Driver specific IRQ data which is passed as parameter when
* driver specific pre/post interrupt handler is called.
*
@@ -1346,33 +1614,45 @@ struct regmap_irq_chip {
unsigned int unmask_base;
unsigned int ack_base;
unsigned int wake_base;
- unsigned int type_base;
+ const unsigned int *config_base;
unsigned int irq_reg_stride;
- bool mask_writeonly:1;
- bool init_ack_masked:1;
- bool mask_invert:1;
- bool use_ack:1;
- bool ack_invert:1;
- bool wake_invert:1;
- bool runtime_pm:1;
- bool type_invert:1;
- bool type_in_mask:1;
- bool clear_on_unmask:1;
+ unsigned int init_ack_masked:1;
+ unsigned int mask_unmask_non_inverted:1;
+ unsigned int use_ack:1;
+ unsigned int ack_invert:1;
+ unsigned int clear_ack:1;
+ unsigned int status_invert:1;
+ unsigned int wake_invert:1;
+ unsigned int type_in_mask:1;
+ unsigned int clear_on_unmask:1;
+ unsigned int runtime_pm:1;
+ unsigned int no_status:1;
int num_regs;
const struct regmap_irq *irqs;
int num_irqs;
- int num_type_reg;
- unsigned int type_reg_stride;
+ int num_config_bases;
+ int num_config_regs;
int (*handle_pre_irq)(void *irq_drv_data);
int (*handle_post_irq)(void *irq_drv_data);
+ int (*handle_mask_sync)(int index, unsigned int mask_buf_def,
+ unsigned int mask_buf, void *irq_drv_data);
+ int (*set_type_config)(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data, int idx,
+ void *irq_drv_data);
+ unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
void *irq_drv_data;
};
-struct regmap_irq_chip_data;
+unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
+int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data,
+ int idx, void *irq_drv_data);
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
@@ -1593,6 +1873,27 @@ regmap_field_force_update_bits(struct regmap_field *field,
return -EINVAL;
}
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_test_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_fields_write(struct regmap_field *field,
unsigned int id, unsigned int val)
{
@@ -1641,6 +1942,12 @@ static inline int regmap_get_reg_stride(struct regmap *map)
return -EINVAL;
}
+static inline bool regmap_might_sleep(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return true;
+}
+
static inline int regcache_sync(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regset.h b/include/linux/regset.h
index c3403f328257..9061266dd8de 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -46,6 +46,18 @@ static inline int membuf_write(struct membuf *s, const void *v, size_t size)
return s->left;
}
+static inline struct membuf membuf_at(const struct membuf *s, size_t offs)
+{
+ struct membuf n = *s;
+
+ if (offs > n.left)
+ offs = n.left;
+ n.p += offs;
+ n.left -= offs;
+
+ return n;
+}
+
/* current s->p must be aligned for v; v must be a scalar */
#define membuf_store(s, v) \
({ \
@@ -263,15 +275,15 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
return 0;
}
-static inline int user_regset_copyin_ignore(unsigned int *pos,
- unsigned int *count,
- const void **kbuf,
- const void __user **ubuf,
- const int start_pos,
- const int end_pos)
+static inline void user_regset_copyin_ignore(unsigned int *pos,
+ unsigned int *count,
+ const void **kbuf,
+ const void __user **ubuf,
+ const int start_pos,
+ const int end_pos)
{
if (*count == 0)
- return 0;
+ return;
BUG_ON(*pos < start_pos);
if (end_pos < 0 || *pos < end_pos) {
unsigned int copy = (end_pos < 0 ? *count
@@ -283,7 +295,6 @@ static inline int user_regset_copyin_ignore(unsigned int *pos,
*pos += copy;
*count -= copy;
}
- return 0;
}
extern int regset_get(struct task_struct *target,
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
deleted file mode 100644
index 3ab1ddf151a2..000000000000
--- a/include/linux/regulator/ab8500.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
- * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
- * Daniel Willerud <daniel.willerud@stericsson.com> for ST-Ericsson
- */
-
-#ifndef __LINUX_MFD_AB8500_REGULATOR_H
-#define __LINUX_MFD_AB8500_REGULATOR_H
-
-#include <linux/platform_device.h>
-
-/* AB8500 regulators */
-enum ab8500_regulator_id {
- AB8500_LDO_AUX1,
- AB8500_LDO_AUX2,
- AB8500_LDO_AUX3,
- AB8500_LDO_INTCORE,
- AB8500_LDO_TVOUT,
- AB8500_LDO_AUDIO,
- AB8500_LDO_ANAMIC1,
- AB8500_LDO_ANAMIC2,
- AB8500_LDO_DMIC,
- AB8500_LDO_ANA,
- AB8500_NUM_REGULATORS,
-};
-
-/* AB8505 regulators */
-enum ab8505_regulator_id {
- AB8505_LDO_AUX1,
- AB8505_LDO_AUX2,
- AB8505_LDO_AUX3,
- AB8505_LDO_AUX4,
- AB8505_LDO_AUX5,
- AB8505_LDO_AUX6,
- AB8505_LDO_INTCORE,
- AB8505_LDO_ADC,
- AB8505_LDO_AUDIO,
- AB8505_LDO_ANAMIC1,
- AB8505_LDO_ANAMIC2,
- AB8505_LDO_AUX8,
- AB8505_LDO_ANA,
- AB8505_NUM_REGULATORS,
-};
-
-/* AB8500 and AB8505 register initialization */
-struct ab8500_regulator_reg_init {
- int id;
- u8 mask;
- u8 value;
-};
-
-#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \
- { \
- .id = _id, \
- .mask = _mask, \
- .value = _value, \
- }
-
-/* AB8500 registers */
-enum ab8500_regulator_reg {
- AB8500_REGUREQUESTCTRL2,
- AB8500_REGUREQUESTCTRL3,
- AB8500_REGUREQUESTCTRL4,
- AB8500_REGUSYSCLKREQ1HPVALID1,
- AB8500_REGUSYSCLKREQ1HPVALID2,
- AB8500_REGUHWHPREQ1VALID1,
- AB8500_REGUHWHPREQ1VALID2,
- AB8500_REGUHWHPREQ2VALID1,
- AB8500_REGUHWHPREQ2VALID2,
- AB8500_REGUSWHPREQVALID1,
- AB8500_REGUSWHPREQVALID2,
- AB8500_REGUSYSCLKREQVALID1,
- AB8500_REGUSYSCLKREQVALID2,
- AB8500_REGUMISC1,
- AB8500_VAUDIOSUPPLY,
- AB8500_REGUCTRL1VAMIC,
- AB8500_VPLLVANAREGU,
- AB8500_VREFDDR,
- AB8500_EXTSUPPLYREGU,
- AB8500_VAUX12REGU,
- AB8500_VRF1VAUX3REGU,
- AB8500_VAUX1SEL,
- AB8500_VAUX2SEL,
- AB8500_VRF1VAUX3SEL,
- AB8500_REGUCTRL2SPARE,
- AB8500_REGUCTRLDISCH,
- AB8500_REGUCTRLDISCH2,
- AB8500_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8505 registers */
-enum ab8505_regulator_reg {
- AB8505_REGUREQUESTCTRL1,
- AB8505_REGUREQUESTCTRL2,
- AB8505_REGUREQUESTCTRL3,
- AB8505_REGUREQUESTCTRL4,
- AB8505_REGUSYSCLKREQ1HPVALID1,
- AB8505_REGUSYSCLKREQ1HPVALID2,
- AB8505_REGUHWHPREQ1VALID1,
- AB8505_REGUHWHPREQ1VALID2,
- AB8505_REGUHWHPREQ2VALID1,
- AB8505_REGUHWHPREQ2VALID2,
- AB8505_REGUSWHPREQVALID1,
- AB8505_REGUSWHPREQVALID2,
- AB8505_REGUSYSCLKREQVALID1,
- AB8505_REGUSYSCLKREQVALID2,
- AB8505_REGUVAUX4REQVALID,
- AB8505_REGUMISC1,
- AB8505_VAUDIOSUPPLY,
- AB8505_REGUCTRL1VAMIC,
- AB8505_VSMPSAREGU,
- AB8505_VSMPSBREGU,
- AB8505_VSAFEREGU, /* NOTE! PRCMU register */
- AB8505_VPLLVANAREGU,
- AB8505_EXTSUPPLYREGU,
- AB8505_VAUX12REGU,
- AB8505_VRF1VAUX3REGU,
- AB8505_VSMPSASEL1,
- AB8505_VSMPSASEL2,
- AB8505_VSMPSASEL3,
- AB8505_VSMPSBSEL1,
- AB8505_VSMPSBSEL2,
- AB8505_VSMPSBSEL3,
- AB8505_VSAFESEL1, /* NOTE! PRCMU register */
- AB8505_VSAFESEL2, /* NOTE! PRCMU register */
- AB8505_VSAFESEL3, /* NOTE! PRCMU register */
- AB8505_VAUX1SEL,
- AB8505_VAUX2SEL,
- AB8505_VRF1VAUX3SEL,
- AB8505_VAUX4REQCTRL,
- AB8505_VAUX4REGU,
- AB8505_VAUX4SEL,
- AB8505_REGUCTRLDISCH,
- AB8505_REGUCTRLDISCH2,
- AB8505_REGUCTRLDISCH3,
- AB8505_CTRLVAUX5,
- AB8505_CTRLVAUX6,
- AB8505_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8500 external regulators */
-struct ab8500_ext_regulator_cfg {
- bool hwreq; /* requires hw mode or high power mode */
-};
-
-enum ab8500_ext_regulator_id {
- AB8500_EXT_SUPPLY1,
- AB8500_EXT_SUPPLY2,
- AB8500_EXT_SUPPLY3,
- AB8500_NUM_EXT_REGULATORS,
-};
-
-/* AB8500 regulator platform data */
-struct ab8500_regulator_platform_data {
- int num_reg_init;
- struct ab8500_regulator_reg_init *reg_init;
- int num_regulator;
- struct regulator_init_data *regulator;
- int num_ext_regulator;
- struct regulator_init_data *ext_regulator;
-};
-
-#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 2024944fd2f7..4660582a3302 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -33,6 +33,7 @@
#include <linux/err.h>
#include <linux/suspend.h>
+#include <regulator/regulator.h>
struct device;
struct notifier_block;
@@ -85,42 +86,6 @@ struct regulator_dev;
#define REGULATOR_MODE_STANDBY 0x8
/*
- * Regulator notifier events.
- *
- * UNDER_VOLTAGE Regulator output is under voltage.
- * OVER_CURRENT Regulator output current is too high.
- * REGULATION_OUT Regulator output is out of regulation.
- * FAIL Regulator output has failed.
- * OVER_TEMP Regulator over temp.
- * FORCE_DISABLE Regulator forcibly shut down by software.
- * VOLTAGE_CHANGE Regulator voltage changed.
- * Data passed is old voltage cast to (void *).
- * DISABLE Regulator was disabled.
- * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
- * Data passed is "struct pre_voltage_change_data"
- * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
- * Data passed is old voltage cast to (void *).
- * PRE_DISABLE Regulator is about to be disabled
- * ABORT_DISABLE Regulator disable failed for some reason
- *
- * NOTE: These events can be OR'ed together when passed into handler.
- */
-
-#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
-#define REGULATOR_EVENT_OVER_CURRENT 0x02
-#define REGULATOR_EVENT_REGULATION_OUT 0x04
-#define REGULATOR_EVENT_FAIL 0x08
-#define REGULATOR_EVENT_OVER_TEMP 0x10
-#define REGULATOR_EVENT_FORCE_DISABLE 0x20
-#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
-#define REGULATOR_EVENT_DISABLE 0x80
-#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
-#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
-#define REGULATOR_EVENT_PRE_DISABLE 0x400
-#define REGULATOR_EVENT_ABORT_DISABLE 0x800
-#define REGULATOR_EVENT_ENABLE 0x1000
-
-/*
* Regulator errors that can be queried using regulator_get_error_flags
*
* UNDER_VOLTAGE Regulator output is under voltage.
@@ -138,6 +103,10 @@ struct regulator_dev;
#define REGULATOR_ERROR_FAIL BIT(4)
#define REGULATOR_ERROR_OVER_TEMP BIT(5)
+#define REGULATOR_ERROR_UNDER_VOLTAGE_WARN BIT(6)
+#define REGULATOR_ERROR_OVER_CURRENT_WARN BIT(7)
+#define REGULATOR_ERROR_OVER_VOLTAGE_WARN BIT(8)
+#define REGULATOR_ERROR_OVER_TEMP_WARN BIT(9)
/**
* struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
@@ -157,10 +126,13 @@ struct regulator;
/**
* struct regulator_bulk_data - Data used for bulk regulator operations.
*
- * @supply: The name of the supply. Initialised by the user before
- * using the bulk regulator APIs.
- * @consumer: The regulator consumer for the supply. This will be managed
- * by the bulk API.
+ * @supply: The name of the supply. Initialised by the user before
+ * using the bulk regulator APIs.
+ * @init_load_uA: After getting the regulator, regulator_set_load() will be
+ * called with this load. Initialised by the user before
+ * using the bulk regulator APIs.
+ * @consumer: The regulator consumer for the supply. This will be managed
+ * by the bulk API.
*
* The regulator APIs provide a series of regulator_bulk_() API calls as
* a convenience to consumers which require multiple supplies. This
@@ -168,6 +140,7 @@ struct regulator;
*/
struct regulator_bulk_data {
const char *supply;
+ int init_load_uA;
struct regulator *consumer;
/* private: Internal use */
@@ -189,6 +162,8 @@ struct regulator *__must_check regulator_get_optional(struct device *dev,
const char *id);
struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
const char *id);
+int devm_regulator_get_enable(struct device *dev, const char *id);
+int devm_regulator_get_enable_optional(struct device *dev, const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
@@ -208,17 +183,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev,
int devm_regulator_register_supply_alias(struct device *dev, const char *id,
struct device *alias_dev,
const char *alias_id);
-void devm_regulator_unregister_supply_alias(struct device *dev,
- const char *id);
int devm_regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
struct device *alias_dev,
const char *const *alias_id,
int num_id);
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
- const char *const *id,
- int num_id);
/* regulator output control and status */
int __must_check regulator_enable(struct regulator *regulator);
@@ -229,10 +199,21 @@ int regulator_disable_deferred(struct regulator *regulator, int ms);
int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
+int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers);
int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
+void devm_regulator_bulk_put(struct regulator_bulk_data *consumers);
+int __must_check devm_regulator_bulk_get_exclusive(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers);
+int __must_check devm_regulator_bulk_get_const(
+ struct device *dev, int num_consumers,
+ const struct regulator_bulk_data *in_consumers,
+ struct regulator_bulk_data **out_consumers);
int __must_check regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers);
+int devm_regulator_bulk_get_enable(struct device *dev, int num_consumers,
+ const char * const *id);
int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers);
int regulator_bulk_force_disable(int num_consumers,
@@ -332,6 +313,23 @@ regulator_get_exclusive(struct device *dev, const char *id)
}
static inline struct regulator *__must_check
+devm_regulator_get_exclusive(struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int devm_regulator_get_enable(struct device *dev, const char *id)
+{
+ return -ENODEV;
+}
+
+static inline int devm_regulator_get_enable_optional(struct device *dev,
+ const char *id)
+{
+ return -ENODEV;
+}
+
+static inline struct regulator *__must_check
regulator_get_optional(struct device *dev, const char *id)
{
return ERR_PTR(-ENODEV);
@@ -352,6 +350,10 @@ static inline void devm_regulator_put(struct regulator *regulator)
{
}
+static inline void devm_regulator_bulk_put(struct regulator_bulk_data *consumers)
+{
+}
+
static inline int regulator_register_supply_alias(struct device *dev,
const char *id,
struct device *alias_dev,
@@ -388,11 +390,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev,
return 0;
}
-static inline void devm_regulator_unregister_supply_alias(struct device *dev,
- const char *id)
-{
-}
-
static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
struct device *alias_dev,
@@ -402,11 +399,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
return 0;
}
-static inline void devm_regulator_bulk_unregister_supply_alias(
- struct device *dev, const char *const *id, int num_id)
-{
-}
-
static inline int regulator_enable(struct regulator *regulator)
{
return 0;
@@ -446,12 +438,25 @@ static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers,
return 0;
}
+static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers)
+{
+ return 0;
+}
+
static inline int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers)
{
return 0;
}
+static inline int devm_regulator_bulk_get_enable(struct device *dev,
+ int num_consumers,
+ const char * const *id)
+{
+ return 0;
+}
+
static inline int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers)
{
@@ -486,6 +491,11 @@ static inline int regulator_get_voltage(struct regulator *regulator)
return -EINVAL;
}
+static inline int regulator_sync_voltage(struct regulator *regulator)
+{
+ return -EINVAL;
+}
+
static inline int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
@@ -578,6 +588,25 @@ static inline int devm_regulator_unregister_notifier(struct regulator *regulator
return 0;
}
+static inline int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
static inline void *regulator_get_drvdata(struct regulator *regulator)
{
return NULL;
diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h
index 5f86824bd117..73291f280a23 100644
--- a/include/linux/regulator/coupler.h
+++ b/include/linux/regulator/coupler.h
@@ -52,7 +52,6 @@ struct regulator_coupler {
#ifdef CONFIG_REGULATOR
int regulator_coupler_register(struct regulator_coupler *coupler);
-const char *rdev_get_name(struct regulator_dev *rdev);
int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state);
@@ -69,10 +68,6 @@ static inline int regulator_coupler_register(struct regulator_coupler *coupler)
{
return 0;
}
-static inline const char *rdev_get_name(struct regulator_dev *rdev)
-{
- return NULL;
-}
static inline int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state)
diff --git a/include/linux/regulator/da9121.h b/include/linux/regulator/da9121.h
new file mode 100644
index 000000000000..62d9d257dc25
--- /dev/null
+++ b/include/linux/regulator/da9121.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * DA9121 Single-channel dual-phase 10A buck converter
+ * DA9130 Single-channel dual-phase 10A buck converter (Automotive)
+ * DA9217 Single-channel dual-phase 6A buck converter
+ * DA9122 Dual-channel single-phase 5A buck converter
+ * DA9131 Dual-channel single-phase 5A buck converter (Automotive)
+ * DA9220 Dual-channel single-phase 3A buck converter
+ * DA9132 Dual-channel single-phase 3A buck converter (Automotive)
+ *
+ * Copyright (C) 2020 Dialog Semiconductor
+ *
+ * Authors: Adam Ward, Dialog Semiconductor
+ */
+
+#ifndef __LINUX_REGULATOR_DA9121_H
+#define __LINUX_REGULATOR_DA9121_H
+
+#include <linux/regulator/machine.h>
+
+struct gpio_desc;
+
+enum {
+ DA9121_IDX_BUCK1,
+ DA9121_IDX_BUCK2,
+ DA9121_IDX_MAX
+};
+
+struct da9121_pdata {
+ int num_buck;
+ struct gpio_desc *gpiod_ren[DA9121_IDX_MAX];
+ struct device_node *reg_node[DA9121_IDX_MAX];
+ struct regulator_init_data *init_data[DA9121_IDX_MAX];
+};
+
+#endif
diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h
index f90df9ee703e..d58ff273157e 100644
--- a/include/linux/regulator/db8500-prcmu.h
+++ b/include/linux/regulator/db8500-prcmu.h
@@ -35,10 +35,4 @@ enum db8500_regulator_id {
DB8500_NUM_REGULATORS
};
-/*
- * Exported interface for CPUIdle only. This function is called with all
- * interrupts turned off.
- */
-int power_state_active_is_enabled(void);
-
#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 8539f34ae42b..22a07c0900a4 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -40,14 +40,18 @@ enum regulator_status {
REGULATOR_STATUS_UNDEFINED,
};
+enum regulator_detection_severity {
+ /* Hardware shut down voltage outputs if condition is detected */
+ REGULATOR_SEVERITY_PROT,
+ /* Hardware is probably damaged/inoperable */
+ REGULATOR_SEVERITY_ERR,
+ /* Hardware is still recoverable but recovery action must be taken */
+ REGULATOR_SEVERITY_WARN,
+};
+
/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
-{ \
- .min = _min_uV, \
- .min_sel = _min_sel, \
- .max_sel = _max_sel, \
- .step = _step_uV, \
-}
+ LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV)
/**
* struct regulator_ops - regulator operations.
@@ -78,8 +82,31 @@ enum regulator_status {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
- * @set_over_current_protection: Support capability of automatically shutting
- * down when detecting an over current event.
+ * @set_over_current_protection: Support enabling of and setting limits for over
+ * current situation detection. Detection can be configured for three
+ * levels of severity.
+ *
+ * - REGULATOR_SEVERITY_PROT should automatically shut down the regulator(s).
+ *
+ * - REGULATOR_SEVERITY_ERR should indicate that over-current situation is
+ * caused by an unrecoverable error but HW does not perform
+ * automatic shut down.
+ *
+ * - REGULATOR_SEVERITY_WARN should indicate situation where hardware is
+ * still believed to not be damaged but that a board sepcific
+ * recovery action is needed. If lim_uA is 0 the limit should not
+ * be changed but the detection should just be enabled/disabled as
+ * is requested.
+ *
+ * @set_over_voltage_protection: Support enabling of and setting limits for over
+ * voltage situation detection. Detection can be configured for same
+ * severities as over current protection. Units of uV.
+ * @set_under_voltage_protection: Support enabling of and setting limits for
+ * under voltage situation detection. Detection can be configured for same
+ * severities as over current protection. Units of uV.
+ * @set_thermal_protection: Support enabling of and setting limits for over
+ * temperature situation detection.Detection can be configured for same
+ * severities as over current protection. Units of degree Kelvin.
*
* @set_active_discharge: Set active discharge enable/disable of regulators.
*
@@ -143,8 +170,15 @@ struct regulator_ops {
int (*get_current_limit) (struct regulator_dev *);
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
- int (*set_over_current_protection) (struct regulator_dev *);
- int (*set_active_discharge) (struct regulator_dev *, bool enable);
+ int (*set_over_current_protection)(struct regulator_dev *, int lim_uA,
+ int severity, bool enable);
+ int (*set_over_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_under_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_thermal_protection)(struct regulator_dev *, int lim,
+ int severity, bool enable);
+ int (*set_active_discharge)(struct regulator_dev *, bool enable);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
@@ -223,6 +257,8 @@ enum regulator_type {
* @name: Identifying name for the regulator.
* @supply_name: Identifying the regulator supply
* @of_match: Name used to identify regulator in DT.
+ * @of_match_full_name: A flag to indicate that the of_match string, if
+ * present, should be matched against the node full_name.
* @regulators_node: Name of node containing regulator definitions in DT.
* @of_parse_cb: Optional callback called only if of_match is present.
* Will be called for each regulator parsed from DT, during
@@ -251,11 +287,12 @@ enum regulator_type {
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @min_dropout_uV: The minimum dropout voltage this regulator can handle
* @linear_ranges: A constant table of possible voltage ranges.
- * @linear_range_selectors: A constant table of voltage range selectors.
- * If pickable ranges are used each range must
- * have corresponding selector here.
+ * @linear_range_selectors_bitfield: A constant table of voltage range
+ * selectors as bitfield values. If
+ * pickable ranges are used each range
+ * must have corresponding selector here.
* @n_linear_ranges: Number of entries in the @linear_ranges (and in
- * linear_range_selectors if used) table(s).
+ * linear_range_selectors_bitfield if used) table(s).
* @volt_table: Voltage mapping table (if table based mapping)
* @curr_table: Current limit mapping table (if table based mapping)
*
@@ -302,6 +339,13 @@ enum regulator_type {
* @pull_down_val_on: Enabling value for control when using regmap
* set_pull_down
*
+ * @ramp_reg: Register for controlling the regulator ramp-rate.
+ * @ramp_mask: Bitmask for the ramp-rate control register.
+ * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values
+ * should be given in units of V/S (uV/uS). See the
+ * regulator_set_ramp_delay_regmap().
+ * @n_ramp_values: number of elements at @ramp_delay_table.
+ *
* @enable_time: Time taken for initial enable of regulator (in uS).
* @off_on_delay: guard time (in uS), before re-enabling a regulator
*
@@ -314,6 +358,7 @@ struct regulator_desc {
const char *name;
const char *supply_name;
const char *of_match;
+ bool of_match_full_name;
const char *regulators_node;
int (*of_parse_cb)(struct device_node *,
const struct regulator_desc *,
@@ -335,7 +380,7 @@ struct regulator_desc {
int min_dropout_uV;
const struct linear_range *linear_ranges;
- const unsigned int *linear_range_selectors;
+ const unsigned int *linear_range_selectors_bitfield;
int n_linear_ranges;
@@ -370,6 +415,10 @@ struct regulator_desc {
unsigned int pull_down_reg;
unsigned int pull_down_mask;
unsigned int pull_down_val_on;
+ unsigned int ramp_reg;
+ unsigned int ramp_mask;
+ const unsigned int *ramp_delay_table;
+ unsigned int n_ramp_values;
unsigned int enable_time;
@@ -406,6 +455,130 @@ struct regulator_config {
struct gpio_desc *ena_gpiod;
};
+/**
+ * struct regulator_err_state - regulator error/notification status
+ *
+ * @rdev: Regulator which status the struct indicates.
+ * @notifs: Events which have occurred on the regulator.
+ * @errors: Errors which are active on the regulator.
+ * @possible_errs: Errors which can be signaled (by given IRQ).
+ */
+struct regulator_err_state {
+ struct regulator_dev *rdev;
+ unsigned long notifs;
+ unsigned long errors;
+ int possible_errs;
+};
+
+/**
+ * struct regulator_irq_data - regulator error/notification status data
+ *
+ * @states: Status structs for each of the associated regulators.
+ * @num_states: Amount of associated regulators.
+ * @data: Driver data pointer given at regulator_irq_desc.
+ * @opaque: Value storage for IC driver. Core does not update this. ICs
+ * may want to store status register value here at map_event and
+ * compare contents at 'renable' callback to see if new problems
+ * have been added to status. If that is the case it may be
+ * desirable to return REGULATOR_ERROR_CLEARED and not
+ * REGULATOR_ERROR_ON to allow IRQ fire again and to generate
+ * notifications also for the new issues.
+ *
+ * This structure is passed to 'map_event' and 'renable' callbacks for
+ * reporting regulator status to core.
+ */
+struct regulator_irq_data {
+ struct regulator_err_state *states;
+ int num_states;
+ void *data;
+ long opaque;
+};
+
+/**
+ * struct regulator_irq_desc - notification sender for IRQ based events.
+ *
+ * @name: The visible name for the IRQ
+ * @fatal_cnt: If this IRQ is used to signal HW damaging condition it may be
+ * best to shut-down regulator(s) or reboot the SOC if error
+ * handling is repeatedly failing. If fatal_cnt is given the IRQ
+ * handling is aborted if it fails for fatal_cnt times and die()
+ * callback (if populated) is called. If die() is not populated
+ * poweroff for the system is attempted in order to prevent any
+ * further damage.
+ * @reread_ms: The time which is waited before attempting to re-read status
+ * at the worker if IC reading fails. Immediate re-read is done
+ * if time is not specified.
+ * @irq_off_ms: The time which IRQ is kept disabled before re-evaluating the
+ * status for devices which keep IRQ disabled for duration of the
+ * error. If this is not given the IRQ is left enabled and renable
+ * is not called.
+ * @skip_off: If set to true the IRQ handler will attempt to check if any of
+ * the associated regulators are enabled prior to taking other
+ * actions. If no regulators are enabled and this is set to true
+ * a spurious IRQ is assumed and IRQ_NONE is returned.
+ * @high_prio: Boolean to indicate that high priority WQ should be used.
+ * @data: Driver private data pointer which will be passed as such to
+ * the renable, map_event and die callbacks in regulator_irq_data.
+ * @die: Protection callback. If IC status reading or recovery actions
+ * fail fatal_cnt times this callback is called or system is
+ * powered off. This callback should implement a final protection
+ * attempt like disabling the regulator. If protection succeeded
+ * die() may return 0. If anything else is returned the core
+ * assumes final protection failed and attempts to perform a
+ * poweroff as a last resort.
+ * @map_event: Driver callback to map IRQ status into regulator devices with
+ * events / errors. NOTE: callback MUST initialize both the
+ * errors and notifs for all rdevs which it signals having
+ * active events as core does not clean the map data.
+ * REGULATOR_FAILED_RETRY can be returned to indicate that the
+ * status reading from IC failed. If this is repeated for
+ * fatal_cnt times the core will call die() callback or power-off
+ * the system as a last resort to protect the HW.
+ * @renable: Optional callback to check status (if HW supports that) before
+ * re-enabling IRQ. If implemented this should clear the error
+ * flags so that errors fetched by regulator_get_error_flags()
+ * are updated. If callback is not implemented then errors are
+ * assumed to be cleared and IRQ is re-enabled.
+ * REGULATOR_FAILED_RETRY can be returned to
+ * indicate that the status reading from IC failed. If this is
+ * repeated for 'fatal_cnt' times the core will call die()
+ * callback or if die() is not populated then attempt to power-off
+ * the system as a last resort to protect the HW.
+ * Returning zero indicates that the problem in HW has been solved
+ * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON
+ * indicates the error condition is still active and keeps IRQ
+ * disabled. Please note that returning REGULATOR_ERROR_ON does
+ * not retrigger evaluating what events are active or resending
+ * notifications. If this is needed you probably want to return
+ * zero and allow IRQ to retrigger causing events to be
+ * re-evaluated and re-sent.
+ *
+ * This structure is used for registering regulator IRQ notification helper.
+ */
+struct regulator_irq_desc {
+ const char *name;
+ int fatal_cnt;
+ int reread_ms;
+ int irq_off_ms;
+ bool skip_off;
+ bool high_prio;
+ void *data;
+
+ int (*die)(struct regulator_irq_data *rid);
+ int (*map_event)(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
+ int (*renable)(struct regulator_irq_data *rid);
+};
+
+/*
+ * Return values for regulator IRQ helpers.
+ */
+enum {
+ REGULATOR_ERROR_CLEARED,
+ REGULATOR_FAILED_RETRY,
+ REGULATOR_ERROR_ON,
+};
+
/*
* struct coupling_desc
*
@@ -469,21 +642,70 @@ struct regulator_dev {
unsigned int is_switch:1;
/* time when this regulator was disabled last time */
- unsigned long last_off_jiffy;
+ ktime_t last_off;
+ int cached_err;
+ bool use_cached_err;
+ spinlock_t err_lock;
};
+/*
+ * Convert error flags to corresponding notifications.
+ *
+ * Can be used by drivers which use the notification helpers to
+ * find out correct notification flags based on the error flags. Drivers
+ * can avoid storing both supported notification and error flags which
+ * may save few bytes.
+ */
+static inline int regulator_err2notif(int err)
+{
+ switch (err) {
+ case REGULATOR_ERROR_UNDER_VOLTAGE:
+ return REGULATOR_EVENT_UNDER_VOLTAGE;
+ case REGULATOR_ERROR_OVER_CURRENT:
+ return REGULATOR_EVENT_OVER_CURRENT;
+ case REGULATOR_ERROR_REGULATION_OUT:
+ return REGULATOR_EVENT_REGULATION_OUT;
+ case REGULATOR_ERROR_FAIL:
+ return REGULATOR_EVENT_FAIL;
+ case REGULATOR_ERROR_OVER_TEMP:
+ return REGULATOR_EVENT_OVER_TEMP;
+ case REGULATOR_ERROR_UNDER_VOLTAGE_WARN:
+ return REGULATOR_EVENT_UNDER_VOLTAGE_WARN;
+ case REGULATOR_ERROR_OVER_CURRENT_WARN:
+ return REGULATOR_EVENT_OVER_CURRENT_WARN;
+ case REGULATOR_ERROR_OVER_VOLTAGE_WARN:
+ return REGULATOR_EVENT_OVER_VOLTAGE_WARN;
+ case REGULATOR_ERROR_OVER_TEMP_WARN:
+ return REGULATOR_EVENT_OVER_TEMP_WARN;
+ }
+ return 0;
+}
+
+
struct regulator_dev *
-regulator_register(const struct regulator_desc *regulator_desc,
+regulator_register(struct device *dev,
+ const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
struct regulator_dev *
devm_regulator_register(struct device *dev,
const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
void regulator_unregister(struct regulator_dev *rdev);
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
+void *devm_regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs,
+ int *per_rdev_errs, struct regulator_dev **rdev,
+ int rdev_amount);
+void *regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs, int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount);
+void regulator_irq_helper_cancel(void **handle);
+int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
void *rdev_get_drvdata(struct regulator_dev *rdev);
struct device *rdev_get_dev(struct regulator_dev *rdev);
@@ -532,9 +754,10 @@ int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
int min_uA, int max_uA);
int regulator_get_current_limit_regmap(struct regulator_dev *rdev);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
-
-void regulator_lock(struct regulator_dev *rdev);
-void regulator_unlock(struct regulator_dev *rdev);
+int regulator_find_closest_bigger(unsigned int target, const unsigned int *table,
+ unsigned int num_sel, unsigned int *sel);
+int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay);
+int regulator_sync_voltage_rdev(struct regulator_dev *rdev);
/*
* Helper functions intended to be used by regulator drivers prior registering
@@ -543,4 +766,16 @@ void regulator_unlock(struct regulator_dev *rdev);
int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
unsigned int selector);
+int regulator_desc_list_voltage_linear(const struct regulator_desc *desc,
+ unsigned int selector);
+
+#ifdef CONFIG_REGULATOR
+const char *rdev_get_name(struct regulator_dev *rdev);
+#else
+static inline const char *rdev_get_name(struct regulator_dev *rdev)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h
index fdeb312cdabd..c223e50ff9f7 100644
--- a/include/linux/regulator/gpio-regulator.h
+++ b/include/linux/regulator/gpio-regulator.h
@@ -42,6 +42,7 @@ struct gpio_regulator_state {
/**
* struct gpio_regulator_config - config structure
* @supply_name: Name of the regulator supply
+ * @input_supply: Name of the input regulator supply
* @enabled_at_boot: Whether regulator has been enabled at
* boot or not. 1 = Yes, 0 = No
* This is used to keep the regulator at
@@ -62,6 +63,7 @@ struct gpio_regulator_state {
*/
struct gpio_regulator_config {
const char *supply_name;
+ const char *input_supply;
unsigned enabled_at_boot:1;
unsigned startup_delay;
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
index d780dbb8b423..b62e45aa1dd3 100644
--- a/include/linux/regulator/lp872x.h
+++ b/include/linux/regulator/lp872x.h
@@ -10,7 +10,7 @@
#include <linux/regulator/machine.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#define LP872X_MAX_REGULATORS 9
@@ -40,11 +40,6 @@ enum lp872x_regulator_id {
LP872X_ID_MAX,
};
-enum lp872x_dvs_state {
- DVS_LOW = GPIOF_OUT_INIT_LOW,
- DVS_HIGH = GPIOF_OUT_INIT_HIGH,
-};
-
enum lp872x_dvs_sel {
SEL_V1,
SEL_V2,
@@ -52,14 +47,14 @@ enum lp872x_dvs_sel {
/**
* lp872x_dvs
- * @gpio : gpio pin number for dvs control
+ * @gpio : gpio descriptor for dvs control
* @vsel : dvs selector for buck v1 or buck v2 register
* @init_state : initial dvs pin state
*/
struct lp872x_dvs {
- int gpio;
+ struct gpio_desc *gpio;
enum lp872x_dvs_sel vsel;
- enum lp872x_dvs_state init_state;
+ enum gpiod_flags init_state;
};
/**
@@ -78,14 +73,14 @@ struct lp872x_regulator_data {
* @update_config : if LP872X_GENERAL_CFG register is updated, set true
* @regulator_data : platform regulator id and init data
* @dvs : dvs data for buck voltage control
- * @enable_gpio : gpio pin number for enable control
+ * @enable_gpio : gpio descriptor for enable control
*/
struct lp872x_platform_data {
u8 general_config;
bool update_config;
struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
struct lp872x_dvs *dvs;
- int enable_gpio;
+ struct gpio_desc *enable_gpio;
};
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 8a56f033b6cd..0cd76d264727 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -49,6 +49,13 @@ struct regulator;
#define DISABLE_IN_SUSPEND 1
#define ENABLE_IN_SUSPEND 2
+/*
+ * Default time window (in milliseconds) following a critical under-voltage
+ * event during which less critical actions can be safely carried out by the
+ * system.
+ */
+#define REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS 10
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -83,6 +90,14 @@ struct regulator_state {
bool changeable;
};
+#define REGULATOR_NOTIF_LIMIT_DISABLE -1
+#define REGULATOR_NOTIF_LIMIT_ENABLE -2
+struct notification_limit {
+ int prot;
+ int err;
+ int warn;
+};
+
/**
* struct regulation_constraints - regulator operating constraints.
*
@@ -100,6 +115,11 @@ struct regulator_state {
* @ilim_uA: Maximum input current.
* @system_load: Load that isn't captured by any consumer requests.
*
+ * @over_curr_limits: Limits for acting on over current.
+ * @over_voltage_limits: Limits for acting on over voltage.
+ * @under_voltage_limits: Limits for acting on under voltage.
+ * @temp_limits: Limits for acting on over temperature.
+ *
* @max_spread: Max possible spread between coupled regulators
* @max_uV_step: Max possible step change in voltage
* @valid_modes_mask: Mask of modes which may be configured by consumers.
@@ -114,8 +134,15 @@ struct regulator_state {
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
* @soft_start: Enable soft start so that voltage ramps slowly.
* @pull_down: Enable pull down when regulator is disabled.
+ * @system_critical: Set if the regulator is critical to system stability or
+ * functionality.
* @over_current_protection: Auto disable on over current event.
*
+ * @over_current_detection: Configure over current limits.
+ * @over_voltage_detection: Configure over voltage limits.
+ * @under_voltage_detection: Configure under voltage limits.
+ * @over_temp_detection: Configure over temperature limits.
+ *
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
* @state_disk: State for regulator when system is suspended in disk mode.
@@ -135,6 +162,13 @@ struct regulator_state {
* regulator_active_discharge values are used for
* initialisation.
* @enable_time: Turn-on time of the rails (unit: microseconds)
+ * @uv_less_critical_window_ms: Specifies the time window (in milliseconds)
+ * following a critical under-voltage (UV) event
+ * during which less critical actions can be
+ * safely carried out by the system (for example
+ * logging). After this time window more critical
+ * actions should be done (for example prevent
+ * HW damage).
*/
struct regulation_constraints {
@@ -172,6 +206,10 @@ struct regulation_constraints {
struct regulator_state state_disk;
struct regulator_state state_mem;
struct regulator_state state_standby;
+ struct notification_limit over_curr_limits;
+ struct notification_limit over_voltage_limits;
+ struct notification_limit under_voltage_limits;
+ struct notification_limit temp_limits;
suspend_state_t initial_state; /* suspend state to set at init */
/* mode to set on startup */
@@ -182,6 +220,7 @@ struct regulation_constraints {
unsigned int settling_time_up;
unsigned int settling_time_down;
unsigned int enable_time;
+ unsigned int uv_less_critical_window_ms;
unsigned int active_discharge;
@@ -192,7 +231,12 @@ struct regulation_constraints {
unsigned ramp_disable:1; /* disable ramp delay */
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
+ unsigned system_critical:1; /* critical to system stability */
unsigned over_current_protection:1; /* auto disable on over current */
+ unsigned over_current_detection:1; /* notify on over current */
+ unsigned over_voltage_detection:1; /* notify on over voltage */
+ unsigned under_voltage_detection:1; /* notify on under voltage */
+ unsigned over_temp_detection:1; /* notify on over temperature */
};
/**
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index 8313e7ed6aec..a225e9eeb30d 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -48,10 +48,6 @@
* control signal from EN input pin. If it is false then
* voltage output will be enabled/disabled through EN bit of
* device register.
- * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host
- * then GPIO number can be provided. If no GPIO controlled then
- * it should be -1.
- * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
* @dvs_def_state: Default state of dvs. 1 if it is high else 0.
*/
struct max8973_regulator_platform_data {
@@ -59,8 +55,6 @@ struct max8973_regulator_platform_data {
unsigned long control_flags;
unsigned long junction_temp_warning;
bool enable_ext_control;
- int enable_gpio;
- int dvs_gpio;
unsigned dvs_def_state:1;
};
diff --git a/include/linux/regulator/mt6315-regulator.h b/include/linux/regulator/mt6315-regulator.h
new file mode 100644
index 000000000000..3b80d3f3910c
--- /dev/null
+++ b/include/linux/regulator/mt6315-regulator.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6315_H
+#define __LINUX_REGULATOR_MT6315_H
+
+#define MT6315_RP 3
+#define MT6315_PP 6
+#define MT6315_SP 7
+
+enum {
+ MT6315_VBUCK1 = 0,
+ MT6315_VBUCK2,
+ MT6315_VBUCK3,
+ MT6315_VBUCK4,
+ MT6315_VBUCK_MAX,
+};
+
+/* Register */
+#define MT6315_TOP2_ELR7 0x139
+#define MT6315_TOP_TMA_KEY 0x39F
+#define MT6315_TOP_TMA_KEY_H 0x3A0
+#define MT6315_BUCK_TOP_CON0 0x1440
+#define MT6315_BUCK_TOP_CON1 0x1443
+#define MT6315_BUCK_TOP_ELR0 0x1449
+#define MT6315_BUCK_TOP_ELR2 0x144B
+#define MT6315_BUCK_TOP_ELR4 0x144D
+#define MT6315_BUCK_TOP_ELR6 0x144F
+#define MT6315_VBUCK1_DBG0 0x1499
+#define MT6315_VBUCK1_DBG4 0x149D
+#define MT6315_VBUCK2_DBG0 0x1519
+#define MT6315_VBUCK2_DBG4 0x151D
+#define MT6315_VBUCK3_DBG0 0x1599
+#define MT6315_VBUCK3_DBG4 0x159D
+#define MT6315_VBUCK4_DBG0 0x1619
+#define MT6315_VBUCK4_DBG4 0x161D
+#define MT6315_BUCK_TOP_4PHASE_ANA_CON42 0x16B1
+
+#define PROTECTION_KEY_H 0x9C
+#define PROTECTION_KEY 0xEA
+
+#endif /* __LINUX_REGULATOR_MT6315_H */
diff --git a/include/linux/regulator/mt6331-regulator.h b/include/linux/regulator/mt6331-regulator.h
new file mode 100644
index 000000000000..2801a9879c14
--- /dev/null
+++ b/include/linux/regulator/mt6331-regulator.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6331_H
+#define __LINUX_REGULATOR_MT6331_H
+
+enum {
+ /* BUCK */
+ MT6331_ID_VDVFS11 = 0,
+ MT6331_ID_VDVFS12,
+ MT6331_ID_VDVFS13,
+ MT6331_ID_VDVFS14,
+ MT6331_ID_VCORE2,
+ MT6331_ID_VIO18,
+ /* LDO */
+ MT6331_ID_VTCXO1,
+ MT6331_ID_VTCXO2,
+ MT6331_ID_AVDD32_AUD,
+ MT6331_ID_VAUXA32,
+ MT6331_ID_VCAMA,
+ MT6331_ID_VIO28,
+ MT6331_ID_VCAM_AF,
+ MT6331_ID_VMC,
+ MT6331_ID_VMCH,
+ MT6331_ID_VEMC33,
+ MT6331_ID_VGP1,
+ MT6331_ID_VSIM1,
+ MT6331_ID_VSIM2,
+ MT6331_ID_VMIPI,
+ MT6331_ID_VIBR,
+ MT6331_ID_VGP4,
+ MT6331_ID_VCAMD,
+ MT6331_ID_VUSB10,
+ MT6331_ID_VCAM_IO,
+ MT6331_ID_VSRAM_DVFS1,
+ MT6331_ID_VGP2,
+ MT6331_ID_VGP3,
+ MT6331_ID_VRTC,
+ MT6331_ID_VDIG18,
+ MT6331_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6331_H */
diff --git a/include/linux/regulator/mt6332-regulator.h b/include/linux/regulator/mt6332-regulator.h
new file mode 100644
index 000000000000..af5e3ed31029
--- /dev/null
+++ b/include/linux/regulator/mt6332-regulator.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6332_H
+#define __LINUX_REGULATOR_MT6332_H
+
+enum {
+ /* BUCK */
+ MT6332_ID_VDRAM = 0,
+ MT6332_ID_VDVFS2,
+ MT6332_ID_VPA,
+ MT6332_ID_VRF1,
+ MT6332_ID_VRF2,
+ MT6332_ID_VSBST,
+ /* LDO */
+ MT6332_ID_VAUXB32,
+ MT6332_ID_VBIF28,
+ MT6332_ID_VDIG18,
+ MT6332_ID_VSRAM_DVFS2,
+ MT6332_ID_VUSB33,
+ MT6332_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6332_H */
diff --git a/include/linux/regulator/mt6357-regulator.h b/include/linux/regulator/mt6357-regulator.h
new file mode 100644
index 000000000000..238b1ee77ea6
--- /dev/null
+++ b/include/linux/regulator/mt6357-regulator.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6357_H
+#define __LINUX_REGULATOR_MT6357_H
+
+enum {
+ /* Bucks */
+ MT6357_ID_VCORE,
+ MT6357_ID_VMODEM,
+ MT6357_ID_VPA,
+ MT6357_ID_VPROC,
+ MT6357_ID_VS1,
+
+ /* LDOs */
+ MT6357_ID_VAUX18,
+ MT6357_ID_VAUD28,
+ MT6357_ID_VCAMA,
+ MT6357_ID_VCAMD,
+ MT6357_ID_VCAMIO,
+ MT6357_ID_VCN18,
+ MT6357_ID_VCN28,
+ MT6357_ID_VCN33_BT,
+ MT6357_ID_VCN33_WIFI,
+ MT6357_ID_VDRAM,
+ MT6357_ID_VEFUSE,
+ MT6357_ID_VEMC,
+ MT6357_ID_VFE28,
+ MT6357_ID_VIBR,
+ MT6357_ID_VIO18,
+ MT6357_ID_VIO28,
+ MT6357_ID_VLDO28,
+ MT6357_ID_VMC,
+ MT6357_ID_VMCH,
+ MT6357_ID_VRF12,
+ MT6357_ID_VRF18,
+ MT6357_ID_VSIM1,
+ MT6357_ID_VSIM2,
+ MT6357_ID_VSRAM_OTHERS,
+ MT6357_ID_VSRAM_PROC,
+ MT6357_ID_VUSB33,
+ MT6357_ID_VXO22,
+
+ MT6357_ID_RG_MAX,
+};
+
+#define MT6357_MAX_REGULATOR MT6357_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6357_H */
diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h
index 1cc304946d09..562386f9b80e 100644
--- a/include/linux/regulator/mt6358-regulator.h
+++ b/include/linux/regulator/mt6358-regulator.h
@@ -41,8 +41,7 @@ enum {
MT6358_ID_VIO28,
MT6358_ID_VA12,
MT6358_ID_VRF18,
- MT6358_ID_VCN33_BT,
- MT6358_ID_VCN33_WIFI,
+ MT6358_ID_VCN33,
MT6358_ID_VCAMA2,
MT6358_ID_VMC,
MT6358_ID_VLDO28,
@@ -51,6 +50,49 @@ enum {
MT6358_ID_RG_MAX,
};
+enum {
+ MT6366_ID_VDRAM1 = 0,
+ MT6366_ID_VCORE,
+ MT6366_ID_VPA,
+ MT6366_ID_VPROC11,
+ MT6366_ID_VPROC12,
+ MT6366_ID_VGPU,
+ MT6366_ID_VS2,
+ MT6366_ID_VMODEM,
+ MT6366_ID_VS1,
+ MT6366_ID_VDRAM2,
+ MT6366_ID_VSIM1,
+ MT6366_ID_VIBR,
+ MT6366_ID_VRF12,
+ MT6366_ID_VIO18,
+ MT6366_ID_VUSB,
+ MT6366_ID_VCN18,
+ MT6366_ID_VFE28,
+ MT6366_ID_VSRAM_PROC11,
+ MT6366_ID_VCN28,
+ MT6366_ID_VSRAM_OTHERS,
+ MT6366_ID_VSRAM_GPU,
+ MT6366_ID_VXO22,
+ MT6366_ID_VEFUSE,
+ MT6366_ID_VAUX18,
+ MT6366_ID_VMCH,
+ MT6366_ID_VBIF28,
+ MT6366_ID_VSRAM_PROC12,
+ MT6366_ID_VEMC,
+ MT6366_ID_VIO28,
+ MT6366_ID_VA12,
+ MT6366_ID_VRF18,
+ MT6366_ID_VCN33,
+ MT6366_ID_VMC,
+ MT6366_ID_VAUD28,
+ MT6366_ID_VSIM2,
+ MT6366_ID_VM18,
+ MT6366_ID_VMDDR,
+ MT6366_ID_VSRAM_CORE,
+ MT6366_ID_RG_MAX,
+};
+
#define MT6358_MAX_REGULATOR MT6358_ID_RG_MAX
+#define MT6366_MAX_REGULATOR MT6366_ID_RG_MAX
#endif /* __LINUX_REGULATOR_MT6358_H */
diff --git a/include/linux/regulator/mt6359-regulator.h b/include/linux/regulator/mt6359-regulator.h
new file mode 100644
index 000000000000..6d6e5a58f482
--- /dev/null
+++ b/include/linux/regulator/mt6359-regulator.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6359_H
+#define __LINUX_REGULATOR_MT6359_H
+
+enum {
+ MT6359_ID_VS1 = 0,
+ MT6359_ID_VGPU11,
+ MT6359_ID_VMODEM,
+ MT6359_ID_VPU,
+ MT6359_ID_VCORE,
+ MT6359_ID_VS2,
+ MT6359_ID_VPA,
+ MT6359_ID_VPROC2,
+ MT6359_ID_VPROC1,
+ MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VGPU11_SSHUB = MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VAUD18 = 10,
+ MT6359_ID_VSIM1,
+ MT6359_ID_VIBR,
+ MT6359_ID_VRF12,
+ MT6359_ID_VUSB,
+ MT6359_ID_VSRAM_PROC2,
+ MT6359_ID_VIO18,
+ MT6359_ID_VCAMIO,
+ MT6359_ID_VCN18,
+ MT6359_ID_VFE28,
+ MT6359_ID_VCN13,
+ MT6359_ID_VCN33_1_BT,
+ MT6359_ID_VCN33_1_WIFI,
+ MT6359_ID_VAUX18,
+ MT6359_ID_VSRAM_OTHERS,
+ MT6359_ID_VEFUSE,
+ MT6359_ID_VXO22,
+ MT6359_ID_VRFCK,
+ MT6359_ID_VBIF28,
+ MT6359_ID_VIO28,
+ MT6359_ID_VEMC,
+ MT6359_ID_VCN33_2_BT,
+ MT6359_ID_VCN33_2_WIFI,
+ MT6359_ID_VA12,
+ MT6359_ID_VA09,
+ MT6359_ID_VRF18,
+ MT6359_ID_VSRAM_MD,
+ MT6359_ID_VUFS,
+ MT6359_ID_VM18,
+ MT6359_ID_VBBCK,
+ MT6359_ID_VSRAM_PROC1,
+ MT6359_ID_VSIM2,
+ MT6359_ID_VSRAM_OTHERS_SSHUB,
+ MT6359_ID_RG_MAX,
+};
+
+#define MT6359_MAX_REGULATOR MT6359_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6359_H */
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
index 1bbd3014f906..505c908dbb81 100644
--- a/include/linux/regulator/pca9450.h
+++ b/include/linux/regulator/pca9450.h
@@ -147,6 +147,9 @@ enum {
#define BUCK6_FPWM 0x04
#define BUCK6_ENMODE_MASK 0x03
+/* PCA9450_REG_BUCK123_PRESET_EN bit */
+#define BUCK123_PRESET_EN 0x80
+
/* PCA9450_BUCK1OUT_DVS0 bits */
#define BUCK1OUT_DVS0_MASK 0x7F
#define BUCK1OUT_DVS0_DEFAULT 0x14
@@ -193,11 +196,11 @@ enum {
/* PCA9450_REG_LDO3_VOLT bits */
#define LDO3_EN_MASK 0xC0
-#define LDO3OUT_MASK 0x0F
+#define LDO3OUT_MASK 0x1F
/* PCA9450_REG_LDO4_VOLT bits */
#define LDO4_EN_MASK 0xC0
-#define LDO4OUT_MASK 0x0F
+#define LDO4OUT_MASK 0x1F
/* PCA9450_REG_LDO5_VOLT bits */
#define LDO5L_EN_MASK 0xC0
@@ -216,4 +219,18 @@ enum {
#define IRQ_THERM_105 0x02
#define IRQ_THERM_125 0x01
+/* PCA9450_REG_RESET_CTRL bits */
+#define WDOG_B_CFG_MASK 0xC0
+#define WDOG_B_CFG_NONE 0x00
+#define WDOG_B_CFG_WARM 0x40
+#define WDOG_B_CFG_COLD_LDO12 0x80
+#define WDOG_B_CFG_COLD 0xC0
+
+/* PCA9450_REG_CONFIG2 bits */
+#define I2C_LT_MASK 0x03
+#define I2C_LT_FORCE_DISABLE 0x00
+#define I2C_LT_ON_STANDBY_RUN 0x01
+#define I2C_LT_ON_RUN 0x02
+#define I2C_LT_FORCE_ENABLE 0x03
+
#endif /* __LINUX_REG_PCA9450_H__ */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index d47e668d9ca8..c964fe8ab698 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -63,10 +63,4 @@
#define PFUZE3001_VLDO3 8
#define PFUZE3001_VLDO4 9
-struct regulator_init_data;
-
-struct pfuze_regulator_platform_data {
- struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR];
-};
-
#endif /* __LINUX_REG_PFUZE100_H */
diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h
index 94a90c06f1e5..398e74a1d941 100644
--- a/include/linux/regulator/tps62360.h
+++ b/include/linux/regulator/tps62360.h
@@ -19,10 +19,6 @@
* @en_discharge: Enable discharge the output capacitor via internal
* register.
* @en_internal_pulldn: internal pull down enable or not.
- * @vsel0_gpio: Gpio number for vsel0. It should be -1 if this is tied with
- * fixed logic.
- * @vsel1_gpio: Gpio number for vsel1. It should be -1 if this is tied with
- * fixed logic.
* @vsel0_def_state: Default state of vsel0. 1 if it is high else 0.
* @vsel1_def_state: Default state of vsel1. 1 if it is high else 0.
*/
@@ -30,8 +26,6 @@ struct tps62360_regulator_platform_data {
struct regulator_init_data *reg_init_data;
bool en_discharge;
bool en_internal_pulldn;
- int vsel0_gpio;
- int vsel1_gpio;
int vsel0_def_state;
int vsel1_def_state;
};
diff --git a/include/linux/regulator/userspace-consumer.h b/include/linux/regulator/userspace-consumer.h
index b5dba0628951..2249ee697f8b 100644
--- a/include/linux/regulator/userspace-consumer.h
+++ b/include/linux/regulator/userspace-consumer.h
@@ -21,6 +21,7 @@ struct regulator_userspace_consumer_data {
struct regulator_bulk_data *supplies;
bool init_on;
+ bool no_autoswitch;
};
#endif /* __REGULATOR_PLATFORM_CONSUMER_H_ */
diff --git a/include/linux/relay.h b/include/linux/relay.h
index e13a333e7c37..72b876dd5cb8 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -62,7 +62,7 @@ struct rchan
size_t subbuf_size; /* sub-buffer size */
size_t n_subbufs; /* number of sub-buffers per buffer */
size_t alloc_size; /* total buffer size allocated */
- struct rchan_callbacks *cb; /* client callbacks */
+ const struct rchan_callbacks *cb; /* client callbacks */
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
size_t last_toobig; /* tried to log event > subbuf size */
@@ -89,6 +89,8 @@ struct rchan_callbacks
* The client should return 1 to continue logging, 0 to stop
* logging.
*
+ * This callback is optional.
+ *
* NOTE: subbuf_start will also be invoked when the buffer is
* created, so that the first sub-buffer can be initialized
* if necessary. In this case, prev_subbuf will be NULL.
@@ -102,25 +104,6 @@ struct rchan_callbacks
size_t prev_padding);
/*
- * buf_mapped - relay buffer mmap notification
- * @buf: the channel buffer
- * @filp: relay file pointer
- *
- * Called when a relay file is successfully mmapped
- */
- void (*buf_mapped)(struct rchan_buf *buf,
- struct file *filp);
-
- /*
- * buf_unmapped - relay buffer unmap notification
- * @buf: the channel buffer
- * @filp: relay file pointer
- *
- * Called when a relay file is successfully unmapped
- */
- void (*buf_unmapped)(struct rchan_buf *buf,
- struct file *filp);
- /*
* create_buf_file - create file to represent a relay channel buffer
* @filename: the name of the file to create
* @parent: the parent of the file to create
@@ -141,6 +124,8 @@ struct rchan_callbacks
* cause relay_open() to create a single global buffer rather
* than the default set of per-cpu buffers.
*
+ * This callback is mandatory.
+ *
* See Documentation/filesystems/relay.rst for more info.
*/
struct dentry *(*create_buf_file)(const char *filename,
@@ -158,6 +143,8 @@ struct rchan_callbacks
* channel buffer.
*
* The callback should return 0 if successful, negative if not.
+ *
+ * This callback is mandatory.
*/
int (*remove_buf_file)(struct dentry *dentry);
};
@@ -170,7 +157,7 @@ struct rchan *relay_open(const char *base_filename,
struct dentry *parent,
size_t subbuf_size,
size_t n_subbufs,
- struct rchan_callbacks *cb,
+ const struct rchan_callbacks *cb,
void *private_data);
extern int relay_late_setup_files(struct rchan *chan,
const char *base_filename,
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 2fa68bf5aa4f..b4795698d8c2 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -243,7 +243,7 @@ struct fw_rsc_trace {
* @da: device address
* @align: the alignment between the consumer and producer parts of the vring
* @num: num of buffers supported by this vring (must be power of two)
- * @notifyid is a unique rproc-wide notify index for this vring. This notify
+ * @notifyid: a unique rproc-wide notify index for this vring. This notify
* index is used when kicking a remote processor, to let it know that this
* vring is triggered.
* @pa: physical address
@@ -266,18 +266,18 @@ struct fw_rsc_vdev_vring {
/**
* struct fw_rsc_vdev - virtio device header
* @id: virtio device id (as in virtio_ids.h)
- * @notifyid is a unique rproc-wide notify index for this vdev. This notify
+ * @notifyid: a unique rproc-wide notify index for this vdev. This notify
* index is used when kicking a remote processor, to let it know that the
* status/features of this vdev have changes.
- * @dfeatures specifies the virtio device features supported by the firmware
- * @gfeatures is a place holder used by the host to write back the
+ * @dfeatures: specifies the virtio device features supported by the firmware
+ * @gfeatures: a place holder used by the host to write back the
* negotiated features that are supported by both sides.
- * @config_len is the size of the virtio config space of this vdev. The config
+ * @config_len: the size of the virtio config space of this vdev. The config
* space lies in the resource table immediate after this vdev header.
- * @status is a place holder where the host will indicate its virtio progress.
- * @num_of_vrings indicates how many vrings are described in this vdev header
+ * @status: a place holder where the host will indicate its virtio progress.
+ * @num_of_vrings: indicates how many vrings are described in this vdev header
* @reserved: reserved (must be zero)
- * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
+ * @vring: an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
*
* This resource is a virtio device header: it provides information about
* the vdev, and is then used by the host and its peer remote processors
@@ -287,16 +287,17 @@ struct fw_rsc_vdev_vring {
* to statically allocate a vdev upon registration of the rproc (dynamic vdev
* allocation is not yet supported).
*
- * Note: unlike virtualization systems, the term 'host' here means
- * the Linux side which is running remoteproc to control the remote
- * processors. We use the name 'gfeatures' to comply with virtio's terms,
- * though there isn't really any virtualized guest OS here: it's the host
- * which is responsible for negotiating the final features.
- * Yeah, it's a bit confusing.
- *
- * Note: immediately following this structure is the virtio config space for
- * this vdev (which is specific to the vdev; for more info, read the virtio
- * spec). the size of the config space is specified by @config_len.
+ * Note:
+ * 1. unlike virtualization systems, the term 'host' here means
+ * the Linux side which is running remoteproc to control the remote
+ * processors. We use the name 'gfeatures' to comply with virtio's terms,
+ * though there isn't really any virtualized guest OS here: it's the host
+ * which is responsible for negotiating the final features.
+ * Yeah, it's a bit confusing.
+ *
+ * 2. immediately following this structure is the virtio config space for
+ * this vdev (which is specific to the vdev; for more info, read the virtio
+ * spec). The size of the config space is specified by @config_len.
*/
struct fw_rsc_vdev {
u32 id;
@@ -315,6 +316,7 @@ struct rproc;
/**
* struct rproc_mem_entry - memory entry descriptor
* @va: virtual address
+ * @is_iomem: io memory
* @dma: dma address
* @len: length, in bytes
* @da: device address
@@ -329,6 +331,7 @@ struct rproc;
*/
struct rproc_mem_entry {
void *va;
+ bool is_iomem;
dma_addr_t dma;
size_t len;
u32 da;
@@ -361,20 +364,23 @@ enum rsc_handling_status {
* @start: power on the device and boot it
* @stop: power off the device
* @attach: attach to a device that his already powered up
+ * @detach: detach from a device, leaving it powered up
* @kick: kick a virtqueue (virtqueue id given as a parameter)
* @da_to_va: optional platform hook to perform address translations
* @parse_fw: parse firmware to extract information (e.g. resource table)
* @handle_rsc: optional platform hook to handle vendor resources. Should return
- * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a
- * negative value on error
- * @load_rsc_table: load resource table from firmware image
- * @find_loaded_rsc_table: find the loaded resouce table
+ * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled
+ * and a negative value on error
+ * @find_loaded_rsc_table: find the loaded resource table from firmware image
+ * @get_loaded_rsc_table: get resource table installed in memory
+ * by external entity
* @load: load firmware to memory, where the remote processor
* expects to find it
* @sanity_check: sanity check the fw image
* @get_boot_addr: get boot address to entry point specified in firmware
* @panic: optional callback to react to system panic, core will delay
* panic at least the returned number of milliseconds
+ * @coredump: collect firmware dump after the subsystem is shutdown
*/
struct rproc_ops {
int (*prepare)(struct rproc *rproc);
@@ -382,17 +388,21 @@ struct rproc_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
int (*attach)(struct rproc *rproc);
+ int (*detach)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
- void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len);
+ void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc,
int offset, int avail);
struct resource_table *(*find_loaded_rsc_table)(
struct rproc *rproc, const struct firmware *fw);
+ struct resource_table *(*get_loaded_rsc_table)(
+ struct rproc *rproc, size_t *size);
int (*load)(struct rproc *rproc, const struct firmware *fw);
int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
unsigned long (*panic)(struct rproc *rproc);
+ void (*coredump)(struct rproc *rproc);
};
/**
@@ -403,6 +413,8 @@ struct rproc_ops {
* @RPROC_RUNNING: device is up and running
* @RPROC_CRASHED: device has crashed; need to start recovery
* @RPROC_DELETED: device is deleted
+ * @RPROC_ATTACHED: device has been booted by another entity and the core
+ * has attached to it
* @RPROC_DETACHED: device has been booted by another entity and waiting
* for the core to attach to it
* @RPROC_LAST: just keep this one at the end
@@ -419,15 +431,16 @@ enum rproc_state {
RPROC_RUNNING = 2,
RPROC_CRASHED = 3,
RPROC_DELETED = 4,
- RPROC_DETACHED = 5,
- RPROC_LAST = 6,
+ RPROC_ATTACHED = 5,
+ RPROC_DETACHED = 6,
+ RPROC_LAST = 7,
};
/**
* enum rproc_crash_type - remote processor crash types
* @RPROC_MMUFAULT: iommu fault
* @RPROC_WATCHDOG: watchdog bite
- * @RPROC_FATAL_ERROR fatal error
+ * @RPROC_FATAL_ERROR: fatal error
*
* Each element of the enum is used as an array index. So that, the value of
* the elements should be always something sane.
@@ -442,16 +455,16 @@ enum rproc_crash_type {
/**
* enum rproc_dump_mechanism - Coredump options for core
- * @RPROC_COREDUMP_DEFAULT: Copy dump to separate buffer and carry on with
- recovery
- * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall
- recovery until all segments are read
* @RPROC_COREDUMP_DISABLED: Don't perform any dump
+ * @RPROC_COREDUMP_ENABLED: Copy dump to separate buffer and carry on with
+ * recovery
+ * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall
+ * recovery until all segments are read
*/
enum rproc_dump_mechanism {
- RPROC_COREDUMP_DEFAULT,
- RPROC_COREDUMP_INLINE,
RPROC_COREDUMP_DISABLED,
+ RPROC_COREDUMP_ENABLED,
+ RPROC_COREDUMP_INLINE,
};
/**
@@ -462,6 +475,7 @@ enum rproc_dump_mechanism {
* @priv: private data associated with the dump_segment
* @dump: custom dump function to fill device memory segment associated
* with coredump
+ * @offset: offset of the segment
*/
struct rproc_dump_segment {
struct list_head node;
@@ -476,6 +490,20 @@ struct rproc_dump_segment {
};
/**
+ * enum rproc_features - features supported
+ *
+ * @RPROC_FEAT_ATTACH_ON_RECOVERY: The remote processor does not need help
+ * from Linux to recover, such as firmware
+ * loading. Linux just needs to attach after
+ * recovery.
+ */
+
+enum rproc_features {
+ RPROC_FEAT_ATTACH_ON_RECOVERY,
+ RPROC_MAX_FEATURES,
+};
+
+/**
* struct rproc - represents a physical remote processor device
* @node: list node of this rproc object
* @domain: iommu domain
@@ -503,15 +531,20 @@ struct rproc_dump_segment {
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
* @table_ptr: pointer to the resource table in effect
+ * @clean_table: copy of the resource table without modifications. Used
+ * when a remote processor is attached or detached from the core
* @cached_table: copy of the resource table
* @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
* @auto_boot: flag to indicate if remote processor should be auto-started
- * @autonomous: true if an external entity has booted the remote processor
+ * @sysfs_read_only: flag to make remoteproc sysfs files read only
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
- * @char_dev: character device of the rproc
+ * @elf_class: firmware ELF class
+ * @elf_machine: firmware ELF machine
+ * @cdev: character device of the rproc
* @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release
+ * @features: indicate remoteproc features
*/
struct rproc {
struct list_head node;
@@ -540,17 +573,19 @@ struct rproc {
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
+ struct resource_table *clean_table;
struct resource_table *cached_table;
size_t table_sz;
bool has_iommu;
bool auto_boot;
- bool autonomous;
+ bool sysfs_read_only;
struct list_head dump_segments;
int nb_vdev;
u8 elf_class;
u16 elf_machine;
struct cdev cdev;
bool cdev_put_on_release;
+ DECLARE_BITMAP(features, RPROC_MAX_FEATURES);
};
/**
@@ -578,7 +613,7 @@ struct rproc_subdev {
/**
* struct rproc_vring - remoteproc vring state
* @va: virtual address
- * @len: length, in bytes
+ * @num: vring size
* @da: device address
* @align: vring alignment
* @notifyid: rproc-specific unique vring index
@@ -587,7 +622,7 @@ struct rproc_subdev {
*/
struct rproc_vring {
void *va;
- int len;
+ int num;
u32 da;
u32 align;
int notifyid;
@@ -597,21 +632,19 @@ struct rproc_vring {
/**
* struct rproc_vdev - remoteproc state for a supported virtio device
- * @refcount: reference counter for the vdev and vring allocations
* @subdev: handle for registering the vdev as a rproc subdevice
+ * @pdev: remoteproc virtio platform device
* @id: virtio device id (as in virtio_ids.h)
* @node: list node
* @rproc: the rproc handle
- * @vdev: the virio device
* @vring: the vrings for this vdev
* @rsc_offset: offset of the vdev's resource entry
* @index: vdev position versus other vdev declared in resource table
*/
struct rproc_vdev {
- struct kref refcount;
struct rproc_subdev subdev;
- struct device dev;
+ struct platform_device *pdev;
unsigned int id;
struct list_head node;
@@ -652,8 +685,16 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...);
int rproc_boot(struct rproc *rproc);
-void rproc_shutdown(struct rproc *rproc);
+int rproc_shutdown(struct rproc *rproc);
+int rproc_detach(struct rproc *rproc);
+int rproc_set_firmware(struct rproc *rproc, const char *fw_name);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
+
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
+void rproc_coredump_using_sections(struct rproc *rproc);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
int rproc_coredump_add_custom_segment(struct rproc *rproc,
dma_addr_t da, size_t size,
@@ -664,18 +705,6 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc,
void *priv);
int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine);
-static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
-{
- return container_of(vdev->dev.parent, struct rproc_vdev, dev);
-}
-
-static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
-{
- struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
-
- return rvdev->rproc;
-}
-
void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h
index b47416f7aeb8..7c2b7cc9fe6c 100644
--- a/include/linux/remoteproc/mtk_scp.h
+++ b/include/linux/remoteproc/mtk_scp.h
@@ -41,6 +41,8 @@ enum scp_ipi_id {
SCP_IPI_ISP_FRAME,
SCP_IPI_FD_CMD,
SCP_IPI_CROS_HOST_CMD,
+ SCP_IPI_VDEC_LAT,
+ SCP_IPI_VDEC_CORE,
SCP_IPI_NS_SERVICE = 0xFF,
SCP_IPI_MAX = 0x100,
};
diff --git a/include/linux/remoteproc/pruss.h b/include/linux/remoteproc/pruss.h
new file mode 100644
index 000000000000..039b50d58df2
--- /dev/null
+++ b/include/linux/remoteproc/pruss.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PRU-ICSS Subsystem user interfaces
+ *
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - http://www.ti.com
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef __LINUX_PRUSS_H
+#define __LINUX_PRUSS_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define PRU_RPROC_DRVNAME "pru-rproc"
+
+/**
+ * enum pruss_pru_id - PRU core identifiers
+ * @PRUSS_PRU0: PRU Core 0.
+ * @PRUSS_PRU1: PRU Core 1.
+ * @PRUSS_NUM_PRUS: Total number of PRU Cores available.
+ *
+ */
+
+enum pruss_pru_id {
+ PRUSS_PRU0 = 0,
+ PRUSS_PRU1,
+ PRUSS_NUM_PRUS,
+};
+
+/*
+ * enum pru_ctable_idx - Configurable Constant table index identifiers
+ */
+enum pru_ctable_idx {
+ PRU_C24 = 0,
+ PRU_C25,
+ PRU_C26,
+ PRU_C27,
+ PRU_C28,
+ PRU_C29,
+ PRU_C30,
+ PRU_C31,
+};
+
+struct device_node;
+struct rproc;
+
+#if IS_ENABLED(CONFIG_PRU_REMOTEPROC)
+
+struct rproc *pru_rproc_get(struct device_node *np, int index,
+ enum pruss_pru_id *pru_id);
+void pru_rproc_put(struct rproc *rproc);
+int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr);
+
+#else
+
+static inline struct rproc *
+pru_rproc_get(struct device_node *np, int index, enum pruss_pru_id *pru_id)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void pru_rproc_put(struct rproc *rproc) { }
+
+static inline int pru_rproc_set_ctable(struct rproc *rproc,
+ enum pru_ctable_idx c, u32 addr)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_PRU_REMOTEPROC */
+
+static inline bool is_pru_rproc(struct device *dev)
+{
+ const char *drv_name = dev_driver_string(dev);
+
+ if (strncmp(drv_name, PRU_RPROC_DRVNAME, sizeof(PRU_RPROC_DRVNAME)))
+ return false;
+
+ return true;
+}
+
+#endif /* __LINUX_PRUSS_H */
diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h
index 647051662174..82b211518136 100644
--- a/include/linux/remoteproc/qcom_rproc.h
+++ b/include/linux/remoteproc/qcom_rproc.h
@@ -3,8 +3,6 @@
struct notifier_block;
-#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
-
/**
* enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc
* processor.
@@ -26,6 +24,8 @@ struct qcom_ssr_notify_data {
bool crashed;
};
+#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
+
void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb);
int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb);
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index daf5cf64c6a6..a365f67131ec 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -2,6 +2,16 @@
#ifndef _RESCTRL_H
#define _RESCTRL_H
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pid.h>
+
+/* CLOSID, RMID value used by the default control group */
+#define RESCTRL_RESERVED_CLOSID 0
+#define RESCTRL_RESERVED_RMID 0
+
+#define RESCTRL_PICK_ANY_CPU -1
+
#ifdef CONFIG_PROC_CPU_RESCTRL
int proc_resctrl_show(struct seq_file *m,
@@ -11,4 +21,287 @@ int proc_resctrl_show(struct seq_file *m,
#endif
+/* max value for struct rdt_domain's mbps_val */
+#define MBA_MAX_MBPS U32_MAX
+
+/**
+ * enum resctrl_conf_type - The type of configuration.
+ * @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
+ * @CDP_CODE: Configuration applies to instruction fetches.
+ * @CDP_DATA: Configuration applies to reads and writes.
+ */
+enum resctrl_conf_type {
+ CDP_NONE,
+ CDP_CODE,
+ CDP_DATA,
+};
+
+#define CDP_NUM_TYPES (CDP_DATA + 1)
+
+/*
+ * Event IDs, the values match those used to program IA32_QM_EVTSEL before
+ * reading IA32_QM_CTR on RDT systems.
+ */
+enum resctrl_event_id {
+ QOS_L3_OCCUP_EVENT_ID = 0x01,
+ QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
+ QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+};
+
+/**
+ * struct resctrl_staged_config - parsed configuration to be applied
+ * @new_ctrl: new ctrl value to be loaded
+ * @have_new_ctrl: whether the user provided new_ctrl is valid
+ */
+struct resctrl_staged_config {
+ u32 new_ctrl;
+ bool have_new_ctrl;
+};
+
+/**
+ * struct rdt_domain - group of CPUs sharing a resctrl resource
+ * @list: all instances of this resource
+ * @id: unique id for this instance
+ * @cpu_mask: which CPUs share this resource
+ * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
+ * @mbm_total: saved state for MBM total bandwidth
+ * @mbm_local: saved state for MBM local bandwidth
+ * @mbm_over: worker to periodically read MBM h/w counters
+ * @cqm_limbo: worker to periodically read CQM h/w counters
+ * @mbm_work_cpu: worker CPU for MBM h/w counters
+ * @cqm_work_cpu: worker CPU for CQM h/w counters
+ * @plr: pseudo-locked region (if any) associated with domain
+ * @staged_config: parsed configuration to be applied
+ * @mbps_val: When mba_sc is enabled, this holds the array of user
+ * specified control values for mba_sc in MBps, indexed
+ * by closid
+ */
+struct rdt_domain {
+ struct list_head list;
+ int id;
+ struct cpumask cpu_mask;
+ unsigned long *rmid_busy_llc;
+ struct mbm_state *mbm_total;
+ struct mbm_state *mbm_local;
+ struct delayed_work mbm_over;
+ struct delayed_work cqm_limbo;
+ int mbm_work_cpu;
+ int cqm_work_cpu;
+ struct pseudo_lock_region *plr;
+ struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
+ u32 *mbps_val;
+};
+
+/**
+ * struct resctrl_cache - Cache allocation related data
+ * @cbm_len: Length of the cache bit mask
+ * @min_cbm_bits: Minimum number of consecutive bits to be set.
+ * The value 0 means the architecture can support
+ * zero CBM.
+ * @shareable_bits: Bitmask of shareable resource with other
+ * executing entities
+ * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid.
+ * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
+ * level has CPU scope.
+ */
+struct resctrl_cache {
+ unsigned int cbm_len;
+ unsigned int min_cbm_bits;
+ unsigned int shareable_bits;
+ bool arch_has_sparse_bitmasks;
+ bool arch_has_per_cpu_cfg;
+};
+
+/**
+ * enum membw_throttle_mode - System's memory bandwidth throttling mode
+ * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
+ * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core
+ * always using smallest bandwidth percentage
+ * assigned to threads, aka "max throttling"
+ * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread
+ */
+enum membw_throttle_mode {
+ THREAD_THROTTLE_UNDEFINED = 0,
+ THREAD_THROTTLE_MAX,
+ THREAD_THROTTLE_PER_THREAD,
+};
+
+/**
+ * struct resctrl_membw - Memory bandwidth allocation related data
+ * @min_bw: Minimum memory bandwidth percentage user can request
+ * @bw_gran: Granularity at which the memory bandwidth is allocated
+ * @delay_linear: True if memory B/W delay is in linear scale
+ * @arch_needs_linear: True if we can't configure non-linear resources
+ * @throttle_mode: Bandwidth throttling mode when threads request
+ * different memory bandwidths
+ * @mba_sc: True if MBA software controller(mba_sc) is enabled
+ * @mb_map: Mapping of memory B/W percentage to memory B/W delay
+ */
+struct resctrl_membw {
+ u32 min_bw;
+ u32 bw_gran;
+ u32 delay_linear;
+ bool arch_needs_linear;
+ enum membw_throttle_mode throttle_mode;
+ bool mba_sc;
+ u32 *mb_map;
+};
+
+struct rdt_parse_data;
+struct resctrl_schema;
+
+/**
+ * struct rdt_resource - attributes of a resctrl resource
+ * @rid: The index of the resource
+ * @alloc_capable: Is allocation available on this machine
+ * @mon_capable: Is monitor feature available on this machine
+ * @num_rmid: Number of RMIDs available
+ * @cache_level: Which cache level defines scope of this resource
+ * @cache: Cache allocation related data
+ * @membw: If the component has bandwidth controls, their properties.
+ * @domains: RCU list of all domains for this resource
+ * @name: Name to use in "schemata" file.
+ * @data_width: Character width of data when displaying
+ * @default_ctrl: Specifies default cache cbm or memory B/W percent.
+ * @format_str: Per resource format string to show domain value
+ * @parse_ctrlval: Per resource function pointer to parse control values
+ * @evt_list: List of monitoring events
+ * @fflags: flags to choose base and info files
+ * @cdp_capable: Is the CDP feature available on this resource
+ */
+struct rdt_resource {
+ int rid;
+ bool alloc_capable;
+ bool mon_capable;
+ int num_rmid;
+ int cache_level;
+ struct resctrl_cache cache;
+ struct resctrl_membw membw;
+ struct list_head domains;
+ char *name;
+ int data_width;
+ u32 default_ctrl;
+ const char *format_str;
+ int (*parse_ctrlval)(struct rdt_parse_data *data,
+ struct resctrl_schema *s,
+ struct rdt_domain *d);
+ struct list_head evt_list;
+ unsigned long fflags;
+ bool cdp_capable;
+};
+
+/**
+ * struct resctrl_schema - configuration abilities of a resource presented to
+ * user-space
+ * @list: Member of resctrl_schema_all.
+ * @name: The name to use in the "schemata" file.
+ * @conf_type: Whether this schema is specific to code/data.
+ * @res: The resource structure exported by the architecture to describe
+ * the hardware that is configured by this schema.
+ * @num_closid: The number of closid that can be used with this schema. When
+ * features like CDP are enabled, this will be lower than the
+ * hardware supports for the resource.
+ */
+struct resctrl_schema {
+ struct list_head list;
+ char name[8];
+ enum resctrl_conf_type conf_type;
+ struct rdt_resource *res;
+ u32 num_closid;
+};
+
+/* The number of closid supported by this resource regardless of CDP */
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+
+/*
+ * Update the ctrl_val and apply this config right now.
+ * Must be called on one of the domain's CPUs.
+ */
+int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
+ u32 closid, enum resctrl_conf_type t, u32 cfg_val);
+
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+ u32 closid, enum resctrl_conf_type type);
+int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
+void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
+void resctrl_online_cpu(unsigned int cpu);
+void resctrl_offline_cpu(unsigned int cpu);
+
+/**
+ * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
+ * for this resource and domain.
+ * @r: resource that the counter should be read from.
+ * @d: domain that the counter should be read from.
+ * @closid: closid that matches the rmid. Depending on the architecture, the
+ * counter may match traffic of both @closid and @rmid, or @rmid
+ * only.
+ * @rmid: rmid of the counter to read.
+ * @eventid: eventid to read, e.g. L3 occupancy.
+ * @val: result of the counter read in bytes.
+ * @arch_mon_ctx: An architecture specific value from
+ * resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
+ * the hardware monitor allocated for this read request.
+ *
+ * Some architectures need to sleep when first programming some of the counters.
+ * (specifically: arm64's MPAM cache occupancy counters can return 'not ready'
+ * for a short period of time). Call from a non-migrateable process context on
+ * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or
+ * schedule_work_on(). This function can be called with interrupts masked,
+ * e.g. using smp_call_function_any(), but may consistently return an error.
+ *
+ * Return:
+ * 0 on success, or -EIO, -EINVAL etc on error.
+ */
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
+ u32 closid, u32 rmid, enum resctrl_event_id eventid,
+ u64 *val, void *arch_mon_ctx);
+
+/**
+ * resctrl_arch_rmid_read_context_check() - warn about invalid contexts
+ *
+ * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when
+ * resctrl_arch_rmid_read() is called with preemption disabled.
+ *
+ * The contract with resctrl_arch_rmid_read() is that if interrupts
+ * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an
+ * IPI, (and fail if the call needed to sleep), while most of the time
+ * the work is scheduled, allowing the call to sleep.
+ */
+static inline void resctrl_arch_rmid_read_context_check(void)
+{
+ if (!irqs_disabled())
+ might_sleep();
+}
+
+/**
+ * resctrl_arch_reset_rmid() - Reset any private state associated with rmid
+ * and eventid.
+ * @r: The domain's resource.
+ * @d: The rmid's domain.
+ * @closid: closid that matches the rmid. Depending on the architecture, the
+ * counter may match traffic of both @closid and @rmid, or @rmid only.
+ * @rmid: The rmid whose counter values should be reset.
+ * @eventid: The eventid whose counter values should be reset.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
+ u32 closid, u32 rmid,
+ enum resctrl_event_id eventid);
+
+/**
+ * resctrl_arch_reset_rmid_all() - Reset all private state associated with
+ * all rmids and eventids.
+ * @r: The resctrl resource.
+ * @d: The domain for which all architectural counter state will
+ * be cleared.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d);
+
+extern unsigned int resctrl_rmid_realloc_threshold;
+extern unsigned int resctrl_rmid_realloc_limit;
+
#endif /* _RESCTRL_H */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index ec35814e0bbb..357df16ede32 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -60,6 +60,9 @@ struct reset_control_lookup {
* @reset_control_head: head of internal list of requested reset controls
* @dev: corresponding driver model device struct
* @of_node: corresponding device tree node as phandle target
+ * @of_args: for reset-gpios controllers: corresponding phandle args with
+ * of_node and GPIO number complementing of_node; either this or
+ * of_node should be present
* @of_reset_n_cells: number of cells in reset line specifiers
* @of_xlate: translation function to translate from specifier as found in the
* device tree to id as given to the reset control ops, defaults
@@ -73,12 +76,14 @@ struct reset_controller_dev {
struct list_head reset_control_head;
struct device *dev;
struct device_node *of_node;
+ const struct of_phandle_args *of_args;
int of_reset_n_cells;
int (*of_xlate)(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec);
unsigned int nr_resets;
};
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
int reset_controller_register(struct reset_controller_dev *rcdev);
void reset_controller_unregister(struct reset_controller_dev *rcdev);
@@ -88,5 +93,26 @@ int devm_reset_controller_register(struct device *dev,
void reset_controller_add_lookup(struct reset_control_lookup *lookup,
unsigned int num_entries);
+#else
+static inline int reset_controller_register(struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
+
+static inline void reset_controller_unregister(struct reset_controller_dev *rcdev)
+{
+}
+
+static inline int devm_reset_controller_register(struct device *dev,
+ struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
+
+static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup,
+ unsigned int num_entries)
+{
+}
+#endif
#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 05aa9f440f48..514ddf003efc 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -10,15 +10,37 @@ struct device;
struct device_node;
struct reset_control;
+/**
+ * struct reset_control_bulk_data - Data used for bulk reset control operations.
+ *
+ * @id: reset control consumer ID
+ * @rstc: struct reset_control * to store the associated reset control
+ *
+ * The reset APIs provide a series of reset_control_bulk_*() API calls as
+ * a convenience to consumers which require multiple reset controls.
+ * This structure is used to manage data for these calls.
+ */
+struct reset_control_bulk_data {
+ const char *id;
+ struct reset_control *rstc;
+};
+
#ifdef CONFIG_RESET_CONTROLLER
int reset_control_reset(struct reset_control *rstc);
+int reset_control_rearm(struct reset_control *rstc);
int reset_control_assert(struct reset_control *rstc);
int reset_control_deassert(struct reset_control *rstc);
int reset_control_status(struct reset_control *rstc);
int reset_control_acquire(struct reset_control *rstc);
void reset_control_release(struct reset_control *rstc);
+int reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs);
+int reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs);
+int reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs);
+int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs);
+void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs);
+
struct reset_control *__of_reset_control_get(struct device_node *node,
const char *id, int index, bool shared,
bool optional, bool acquired);
@@ -26,10 +48,18 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id,
int index, bool shared,
bool optional, bool acquired);
void reset_control_put(struct reset_control *rstc);
+int __reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired);
+void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs);
+
int __device_reset(struct device *dev, bool optional);
struct reset_control *__devm_reset_control_get(struct device *dev,
const char *id, int index, bool shared,
bool optional, bool acquired);
+int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired);
struct reset_control *devm_reset_control_array_get(struct device *dev,
bool shared, bool optional);
@@ -46,6 +76,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
return 0;
}
+static inline int reset_control_rearm(struct reset_control *rstc)
+{
+ return 0;
+}
+
static inline int reset_control_assert(struct reset_control *rstc)
{
return 0;
@@ -95,6 +130,48 @@ static inline struct reset_control *__reset_control_get(
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline int
+reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline int
+reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline int
+reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline int
+reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline void
+reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+}
+
+static inline int
+__reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ return optional ? 0 : -EOPNOTSUPP;
+}
+
+static inline void
+reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+}
+
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
int index, bool shared, bool optional,
@@ -103,6 +180,14 @@ static inline struct reset_control *__devm_reset_control_get(
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline int
+__devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ return optional ? 0 : -EOPNOTSUPP;
+}
+
static inline struct reset_control *
devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
{
@@ -155,6 +240,23 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
}
/**
+ * reset_control_bulk_get_exclusive - Lookup and obtain exclusive references to
+ * multiple reset controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Fills the rstcs array with pointers to exclusive reset controls and
+ * returns 0, or an IS_ERR() condition containing errno.
+ */
+static inline int __must_check
+reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+}
+
+/**
* reset_control_get_exclusive_released - Lookup and obtain a temoprarily
* exclusive reference to a reset
* controller.
@@ -176,6 +278,48 @@ __must_check reset_control_get_exclusive_released(struct device *dev,
}
/**
+ * reset_control_bulk_get_exclusive_released - Lookup and obtain temporarily
+ * exclusive references to multiple reset
+ * controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Fills the rstcs array with pointers to exclusive reset controls and
+ * returns 0, or an IS_ERR() condition containing errno.
+ * reset-controls returned by this function must be acquired via
+ * reset_control_bulk_acquire() before they can be used and should be released
+ * via reset_control_bulk_release() afterwards.
+ */
+static inline int __must_check
+reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+}
+
+/**
+ * reset_control_bulk_get_optional_exclusive_released - Lookup and obtain optional
+ * temporarily exclusive references to multiple
+ * reset controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Optional variant of reset_control_bulk_get_exclusive_released(). If the
+ * requested reset is not specified in the device tree, this function returns 0
+ * instead of an error and missing rtsc is set to NULL.
+ *
+ * See reset_control_bulk_get_exclusive_released() for more information.
+ */
+static inline int __must_check
+reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+}
+
+/**
* reset_control_get_shared - Lookup and obtain a shared reference to a
* reset controller.
* @dev: device to be reset by the controller
@@ -204,6 +348,23 @@ static inline struct reset_control *reset_control_get_shared(
}
/**
+ * reset_control_bulk_get_shared - Lookup and obtain shared references to
+ * multiple reset controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Fills the rstcs array with pointers to shared reset controls and
+ * returns 0, or an IS_ERR() condition containing errno.
+ */
+static inline int __must_check
+reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+}
+
+/**
* reset_control_get_optional_exclusive - optional reset_control_get_exclusive()
* @dev: device to be reset by the controller
* @id: reset line name
@@ -221,6 +382,26 @@ static inline struct reset_control *reset_control_get_optional_exclusive(
}
/**
+ * reset_control_bulk_get_optional_exclusive - optional
+ * reset_control_bulk_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Optional variant of reset_control_bulk_get_exclusive(). If any of the
+ * requested resets are not specified in the device tree, this function sets
+ * them to NULL instead of returning an error.
+ *
+ * See reset_control_bulk_get_exclusive() for more information.
+ */
+static inline int __must_check
+reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
+}
+
+/**
* reset_control_get_optional_shared - optional reset_control_get_shared()
* @dev: device to be reset by the controller
* @id: reset line name
@@ -238,6 +419,26 @@ static inline struct reset_control *reset_control_get_optional_shared(
}
/**
+ * reset_control_bulk_get_optional_shared - optional
+ * reset_control_bulk_get_shared()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Optional variant of reset_control_bulk_get_shared(). If the requested resets
+ * are not specified in the device tree, this function sets them to NULL
+ * instead of returning an error.
+ *
+ * See reset_control_bulk_get_shared() for more information.
+ */
+static inline int __must_check
+reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+}
+
+/**
* of_reset_control_get_exclusive - Lookup and obtain an exclusive reference
* to a reset controller.
* @node: device to be reset by the controller
@@ -254,6 +455,26 @@ static inline struct reset_control *of_reset_control_get_exclusive(
}
/**
+ * of_reset_control_get_optional_exclusive - Lookup and obtain an optional exclusive
+ * reference to a reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Optional variant of of_reset_control_get_exclusive(). If the requested reset
+ * is not specified in the device tree, this function returns NULL instead of
+ * an error.
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get_optional_exclusive(
+ struct device_node *node, const char *id)
+{
+ return __of_reset_control_get(node, id, 0, false, true, true);
+}
+
+/**
* of_reset_control_get_shared - Lookup and obtain a shared reference
* to a reset controller.
* @node: device to be reset by the controller
@@ -343,6 +564,26 @@ __must_check devm_reset_control_get_exclusive(struct device *dev,
}
/**
+ * devm_reset_control_bulk_get_exclusive - resource managed
+ * reset_control_bulk_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_exclusive(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_bulk_get_exclusive() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+}
+
+/**
* devm_reset_control_get_exclusive_released - resource managed
* reset_control_get_exclusive_released()
* @dev: device to be reset by the controller
@@ -362,6 +603,65 @@ __must_check devm_reset_control_get_exclusive_released(struct device *dev,
}
/**
+ * devm_reset_control_bulk_get_exclusive_released - resource managed
+ * reset_control_bulk_get_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_exclusive_released(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_bulk_get_exclusive_released() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+}
+
+/**
+ * devm_reset_control_get_optional_exclusive_released - resource managed
+ * reset_control_get_optional_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed-and-optional variant of reset_control_get_exclusive_released(). For
+ * reset controllers returned from this function, reset_control_put() is called
+ * automatically on driver detach.
+ *
+ * See reset_control_get_exclusive_released() for more information.
+ */
+static inline struct reset_control *
+__must_check devm_reset_control_get_optional_exclusive_released(struct device *dev,
+ const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, false, true, false);
+}
+
+/**
+ * devm_reset_control_bulk_get_optional_exclusive_released - resource managed
+ * reset_control_bulk_optional_get_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_optional_get_exclusive_released(). For reset
+ * controllers returned from this function, reset_control_put() is called
+ * automatically on driver detach.
+ *
+ * See reset_control_bulk_optional_get_exclusive_released() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+}
+
+/**
* devm_reset_control_get_shared - resource managed reset_control_get_shared()
* @dev: device to be reset by the controller
* @id: reset line name
@@ -377,6 +677,26 @@ static inline struct reset_control *devm_reset_control_get_shared(
}
/**
+ * devm_reset_control_bulk_get_shared - resource managed
+ * reset_control_bulk_get_shared()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_shared(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_bulk_get_shared() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+}
+
+/**
* devm_reset_control_get_optional_exclusive - resource managed
* reset_control_get_optional_exclusive()
* @dev: device to be reset by the controller
@@ -395,6 +715,26 @@ static inline struct reset_control *devm_reset_control_get_optional_exclusive(
}
/**
+ * devm_reset_control_bulk_get_optional_exclusive - resource managed
+ * reset_control_bulk_get_optional_exclusive()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_optional_exclusive(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_bulk_get_optional_exclusive() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
+}
+
+/**
* devm_reset_control_get_optional_shared - resource managed
* reset_control_get_optional_shared()
* @dev: device to be reset by the controller
@@ -413,6 +753,26 @@ static inline struct reset_control *devm_reset_control_get_optional_shared(
}
/**
+ * devm_reset_control_bulk_get_optional_shared - resource managed
+ * reset_control_bulk_get_optional_shared()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_optional_shared(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_bulk_get_optional_shared() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+}
+
+/**
* devm_reset_control_get_exclusive_by_index - resource managed
* reset_control_get_exclusive()
* @dev: device to be reset by the controller
diff --git a/include/linux/reset/bcm63xx_pmb.h b/include/linux/reset/bcm63xx_pmb.h
index bb4af7b5eb36..c77b6999518a 100644
--- a/include/linux/reset/bcm63xx_pmb.h
+++ b/include/linux/reset/bcm63xx_pmb.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset)
*
* Copyright (C) 2015, Broadcom Corporation
* Author: Florian Fainelli <f.fainelli@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __BCM63XX_PMB_H
#define __BCM63XX_PMB_H
diff --git a/include/linux/resource.h b/include/linux/resource.h
index bdf491cbcab7..4fdbc0c3f315 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -8,7 +8,5 @@
struct task_struct;
void getrusage(struct task_struct *p, int who, struct rusage *ru);
-int do_prlimit(struct task_struct *tsk, unsigned int resource,
- struct rlimit *new_rlim, struct rlimit *old_rlim);
#endif
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index bba2920e9c05..13f17676c5f4 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -7,8 +7,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/time64.h>
+struct __kernel_timespec;
struct timespec;
struct old_timespec32;
struct pollfd;
@@ -23,6 +23,7 @@ enum timespec_type {
* System call restart block.
*/
struct restart_block {
+ unsigned long arch_data;
long (*fn)(struct restart_block *);
union {
/* For futex_wait and futex_wait_requeue_pi */
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
new file mode 100644
index 000000000000..e0135e0adae0
--- /dev/null
+++ b/include/linux/resume_user_mode.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LINUX_RESUME_USER_MODE_H
+#define LINUX_RESUME_USER_MODE_H
+
+#include <linux/sched.h>
+#include <linux/task_work.h>
+#include <linux/memcontrol.h>
+#include <linux/rseq.h>
+#include <linux/blk-cgroup.h>
+
+/**
+ * set_notify_resume - cause resume_user_mode_work() to be called
+ * @task: task that will call resume_user_mode_work()
+ *
+ * Calling this arranges that @task will call resume_user_mode_work()
+ * before returning to user mode. If it's already running in user mode,
+ * it will enter the kernel and call resume_user_mode_work() soon.
+ * If it's blocked, it will not be woken.
+ */
+static inline void set_notify_resume(struct task_struct *task)
+{
+ if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
+ kick_process(task);
+}
+
+
+/**
+ * resume_user_mode_work - Perform work before returning to user mode
+ * @regs: user-mode registers of @current task
+ *
+ * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
+ * about to return to user mode, and the user state in @regs can be
+ * inspected or adjusted. The caller in arch code has cleared
+ * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
+ * asynchronously, this will be called again before we return to
+ * user mode.
+ *
+ * Called without locks.
+ */
+static inline void resume_user_mode_work(struct pt_regs *regs)
+{
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ /*
+ * This barrier pairs with task_work_add()->set_notify_resume() after
+ * hlist_add_head(task->task_works);
+ */
+ smp_mb__after_atomic();
+ if (unlikely(task_work_pending(current)))
+ task_work_run();
+
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ if (unlikely(current->cached_requested_key)) {
+ key_put(current->cached_requested_key);
+ current->cached_requested_key = NULL;
+ }
+#endif
+
+ mem_cgroup_handle_over_high(GFP_KERNEL);
+ blkcg_maybe_throttle_current();
+
+ rseq_handle_notify_resume(NULL, regs);
+}
+
+#endif /* LINUX_RESUME_USER_MODE_H */
diff --git a/include/linux/rethook.h b/include/linux/rethook.h
new file mode 100644
index 000000000000..ba60962805f6
--- /dev/null
+++ b/include/linux/rethook.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Return hooking with list-based shadow stack.
+ */
+#ifndef _LINUX_RETHOOK_H
+#define _LINUX_RETHOOK_H
+
+#include <linux/compiler.h>
+#include <linux/objpool.h>
+#include <linux/kallsyms.h>
+#include <linux/llist.h>
+#include <linux/rcupdate.h>
+
+struct rethook_node;
+
+typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long, struct pt_regs *);
+
+/**
+ * struct rethook - The rethook management data structure.
+ * @data: The user-defined data storage.
+ * @handler: The user-defined return hook handler.
+ * @pool: The pool of struct rethook_node.
+ * @ref: The reference counter.
+ * @rcu: The rcu_head for deferred freeing.
+ *
+ * Don't embed to another data structure, because this is a self-destructive
+ * data structure when all rethook_node are freed.
+ */
+struct rethook {
+ void *data;
+ /*
+ * To avoid sparse warnings, this uses a raw function pointer with
+ * __rcu, instead of rethook_handler_t. But this must be same as
+ * rethook_handler_t.
+ */
+ void (__rcu *handler) (struct rethook_node *, void *, unsigned long, struct pt_regs *);
+ struct objpool_head pool;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct rethook_node - The rethook shadow-stack entry node.
+ * @rcu: The rcu_head for deferred freeing.
+ * @llist: The llist, linked to a struct task_struct::rethooks.
+ * @rethook: The pointer to the struct rethook.
+ * @ret_addr: The storage for the real return address.
+ * @frame: The storage for the frame pointer.
+ *
+ * You can embed this to your extended data structure to store any data
+ * on each entry of the shadow stack.
+ */
+struct rethook_node {
+ struct rcu_head rcu;
+ struct llist_node llist;
+ struct rethook *rethook;
+ unsigned long ret_addr;
+ unsigned long frame;
+};
+
+struct rethook *rethook_alloc(void *data, rethook_handler_t handler, int size, int num);
+void rethook_stop(struct rethook *rh);
+void rethook_free(struct rethook *rh);
+struct rethook_node *rethook_try_get(struct rethook *rh);
+void rethook_recycle(struct rethook_node *node);
+void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount);
+unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame,
+ struct llist_node **cur);
+
+/* Arch dependent code must implement arch_* and trampoline code */
+void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs, bool mcount);
+void arch_rethook_trampoline(void);
+
+/**
+ * is_rethook_trampoline() - Check whether the address is rethook trampoline
+ * @addr: The address to be checked
+ *
+ * Return true if the @addr is the rethook trampoline address.
+ */
+static inline bool is_rethook_trampoline(unsigned long addr)
+{
+ return addr == (unsigned long)dereference_symbol_descriptor(arch_rethook_trampoline);
+}
+
+/* If the architecture needs to fixup the return address, implement it. */
+void arch_rethook_fixup_return(struct pt_regs *regs,
+ unsigned long correct_ret_addr);
+
+/* Generic trampoline handler, arch code must prepare asm stub */
+unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ unsigned long frame);
+
+#ifdef CONFIG_RETHOOK
+void rethook_flush_task(struct task_struct *tk);
+#else
+#define rethook_flush_task(tsk) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 8ad2487a86d5..373003ace639 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -138,6 +138,17 @@ void rfkill_unregister(struct rfkill *rfkill);
void rfkill_destroy(struct rfkill *rfkill);
/**
+ * rfkill_set_hw_state_reason - Set the internal rfkill hardware block state
+ * with a reason
+ * @rfkill: pointer to the rfkill class to modify.
+ * @blocked: the current hardware block state to set
+ * @reason: one of &enum rfkill_hard_block_reasons
+ *
+ * Prefer to use rfkill_set_hw_state if you don't need any special reason.
+ */
+bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked, unsigned long reason);
+/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
* @blocked: the current hardware block state to set
@@ -156,7 +167,11 @@ void rfkill_destroy(struct rfkill *rfkill);
* should be blocked) so that drivers need not keep track of the soft
* block state -- which they might not be able to.
*/
-bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
+static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
+{
+ return rfkill_set_hw_state_reason(rfkill, blocked,
+ RFKILL_HARD_BLOCK_SIGNAL);
+}
/**
* rfkill_set_sw_state - Set the internal rfkill software block state
@@ -215,6 +230,13 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
bool rfkill_blocked(struct rfkill *rfkill);
/**
+ * rfkill_soft_blocked - Query soft rfkill block state
+ *
+ * @rfkill: rfkill struct to query
+ */
+bool rfkill_soft_blocked(struct rfkill *rfkill);
+
+/**
* rfkill_find_type - Helper for finding rfkill type by name
* @name: the name of the type
*
@@ -256,6 +278,13 @@ static inline void rfkill_destroy(struct rfkill *rfkill)
{
}
+static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked,
+ unsigned long reason)
+{
+ return blocked;
+}
+
static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
{
return blocked;
@@ -279,6 +308,11 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
return false;
}
+static inline bool rfkill_soft_blocked(struct rfkill *rfkill)
+{
+ return false;
+}
+
static inline enum rfkill_type rfkill_find_type(const char *name)
{
return RFKILL_TYPE_ALL;
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
index 57467cbf4c5b..b6f3797277ff 100644
--- a/include/linux/rhashtable-types.h
+++ b/include/linux/rhashtable-types.h
@@ -12,7 +12,7 @@
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/workqueue_types.h>
struct rhash_head {
struct rhash_head __rcu *next;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 68dab3e08aad..5b5357c0bd8c 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -323,29 +323,36 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* When we write to a bucket without unlocking, we use rht_assign_locked().
*/
-static inline void rht_lock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt)
+static inline unsigned long rht_lock(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bkt)
{
- local_bh_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
bit_spin_lock(0, (unsigned long *)bkt);
lock_map_acquire(&tbl->dep_map);
+ return flags;
}
-static inline void rht_lock_nested(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bucket,
- unsigned int subclass)
+static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bucket,
+ unsigned int subclass)
{
- local_bh_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
bit_spin_lock(0, (unsigned long *)bucket);
lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
+ return flags;
}
static inline void rht_unlock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt)
+ struct rhash_lock_head __rcu **bkt,
+ unsigned long flags)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
- local_bh_enable();
+ local_irq_restore(flags);
}
static inline struct rhash_head *__rht_ptr(
@@ -393,7 +400,8 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
- struct rhash_head *obj)
+ struct rhash_head *obj,
+ unsigned long flags)
{
if (rht_is_a_nulls(obj))
obj = NULL;
@@ -401,7 +409,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
__release(bitlock);
- local_bh_enable();
+ local_irq_restore(flags);
}
/**
@@ -706,6 +714,7 @@ static inline void *__rhashtable_insert_fast(
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
+ unsigned long flags;
unsigned int hash;
int elasticity;
void *data;
@@ -720,11 +729,11 @@ static inline void *__rhashtable_insert_fast(
if (!bkt)
goto out;
pprev = NULL;
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
rcu_read_unlock();
return rhashtable_insert_slow(ht, key, obj);
}
@@ -756,9 +765,9 @@ slow_path:
RCU_INIT_POINTER(list->rhead.next, head);
if (pprev) {
rcu_assign_pointer(*pprev, obj);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
} else
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, flags);
data = NULL;
goto out;
}
@@ -785,7 +794,7 @@ slow_path:
}
atomic_inc(&ht->nelems);
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, flags);
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
@@ -797,7 +806,7 @@ out:
return data;
out_unlock:
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
goto out;
}
@@ -991,6 +1000,7 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;
@@ -999,7 +1009,7 @@ static inline int __rhashtable_remove_fast_one(
if (!bkt)
return -ENOENT;
pprev = NULL;
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list;
@@ -1043,14 +1053,14 @@ static inline int __rhashtable_remove_fast_one(
if (pprev) {
rcu_assign_pointer(*pprev, obj);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
} else {
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, flags);
}
goto unlocked;
}
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
unlocked:
if (err > 0) {
atomic_dec(&ht->nelems);
@@ -1143,6 +1153,7 @@ static inline int __rhashtable_replace_fast(
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;
@@ -1158,7 +1169,7 @@ static inline int __rhashtable_replace_fast(
return -ENOENT;
pprev = NULL;
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
if (he != obj_old) {
@@ -1169,15 +1180,15 @@ static inline int __rhashtable_replace_fast(
rcu_assign_pointer(obj_new->next, obj_old->next);
if (pprev) {
rcu_assign_pointer(*pprev, obj_new);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
} else {
- rht_assign_unlock(tbl, bkt, obj_new);
+ rht_assign_unlock(tbl, bkt, obj_new, flags);
}
err = 0;
goto unlocked;
}
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
unlocked:
return err;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 136ea0997e6d..dc5ae4e96aee 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -61,7 +61,8 @@ enum ring_buffer_type {
unsigned ring_buffer_event_length(struct ring_buffer_event *event);
void *ring_buffer_event_data(struct ring_buffer_event *event);
-u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
+u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
+ struct ring_buffer_event *event);
/*
* ring_buffer_discard_commit will remove an event that has not
@@ -97,10 +98,12 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+typedef bool (*ring_buffer_cond_fn)(void *data);
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
+ ring_buffer_cond_fn cond, void *data);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table);
-
+ struct file *filp, poll_table *poll_table, int full);
+void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
#define RING_BUFFER_ALL_CPUS -1
@@ -112,8 +115,7 @@ void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
unsigned long length);
-int ring_buffer_unlock_commit(struct trace_buffer *buffer,
- struct ring_buffer_event *event);
+int ring_buffer_unlock_commit(struct trace_buffer *buffer);
int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length, void *data);
@@ -141,6 +143,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer);
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
@@ -180,7 +183,7 @@ unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cp
unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
-u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts);
void ring_buffer_set_clock(struct trace_buffer *buffer,
@@ -191,15 +194,24 @@ bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
-void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
+struct buffer_data_read_page;
+struct buffer_data_read_page *
+ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
+ struct buffer_data_read_page *page);
+int ring_buffer_read_page(struct trace_buffer *buffer,
+ struct buffer_data_read_page *data_page,
size_t len, int cpu, int full);
+void *ring_buffer_read_page_data(struct buffer_data_read_page *page);
struct trace_seq;
int ring_buffer_print_entry_header(struct trace_seq *s);
-int ring_buffer_print_page_header(struct trace_seq *s);
+int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s);
+
+int ring_buffer_subbuf_order_get(struct trace_buffer *buffer);
+int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order);
+int ring_buffer_subbuf_size_get(struct trace_buffer *buffer);
enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index d637742e5039..e49c32b0f394 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -444,9 +444,6 @@ static inline void rio_set_drvdata(struct rio_dev *rdev, void *data)
/* Misc driver helpers */
extern u16 rio_local_get_device_id(struct rio_mport *port);
extern void rio_local_set_device_id(struct rio_mport *port, u16 did);
-extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
-extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
- struct rio_dev *from);
extern int rio_init_mports(void);
#endif /* LINUX_RIO_DRV_H */
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 4846f72759b2..c7e2f21dd5c1 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -9,18 +9,6 @@
#ifndef LINUX_RIO_IDS_H
#define LINUX_RIO_IDS_H
-#define RIO_VID_FREESCALE 0x0002
-#define RIO_DID_MPC8560 0x0003
-
-#define RIO_VID_TUNDRA 0x000d
-#define RIO_DID_TSI500 0x0500
-#define RIO_DID_TSI568 0x0568
-#define RIO_DID_TSI572 0x0572
-#define RIO_DID_TSI574 0x0574
-#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */
-#define RIO_DID_TSI577 0x0577
-#define RIO_DID_TSI578 0x0578
-
#define RIO_VID_IDT 0x0038
#define RIO_DID_IDT70K200 0x0310
#define RIO_DID_IDTCPS8 0x035c
@@ -33,7 +21,6 @@
#define RIO_DID_IDTCPS1616 0x0379
#define RIO_DID_IDTVPS1616 0x0377
#define RIO_DID_IDTSPS1616 0x0378
-#define RIO_DID_TSI721 0x80ab
#define RIO_DID_IDTRXS1632 0x80e5
#define RIO_DID_IDTRXS2448 0x80e6
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 3a6adfa70fb0..b7944a833668 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -11,6 +11,8 @@
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/memremap.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -39,12 +41,15 @@ struct anon_vma {
atomic_t refcount;
/*
- * Count of child anon_vmas and VMAs which points to this anon_vma.
+ * Count of child anon_vmas. Equals to the count of all anon_vmas that
+ * have ->parent pointing to this one, including itself.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
- unsigned degree;
+ unsigned long num_children;
+ /* Count of VMAs whose ->anon_vma pointer points to this object. */
+ unsigned long num_active_vmas;
struct anon_vma *parent; /* Parent of this anon_vma */
@@ -86,19 +91,15 @@ struct anon_vma_chain {
};
enum ttu_flags {
- TTU_MIGRATION = 0x1, /* migration mode */
- TTU_MUNLOCK = 0x2, /* munlock mode */
-
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
- TTU_IGNORE_ACCESS = 0x10, /* don't age */
- TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
+ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
+ TTU_HWPOISON = 0x20, /* do convert pte to hwpoison entry */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
- TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
};
#ifdef CONFIG_MMU
@@ -120,6 +121,11 @@ static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
down_write(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
+{
+ return down_write_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
{
up_write(&anon_vma->root->rwsem);
@@ -130,6 +136,11 @@ static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
down_read(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
+{
+ return down_read_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
{
up_read(&anon_vma->root->rwsem);
@@ -160,50 +171,476 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}
-struct anon_vma *page_get_anon_vma(struct page *page);
+struct anon_vma *folio_get_anon_vma(struct folio *folio);
+
+/* RMAP flags, currently only relevant for some anon rmap operations. */
+typedef int __bitwise rmap_t;
+
+/*
+ * No special request: A mapped anonymous (sub)page is possibly shared between
+ * processes.
+ */
+#define RMAP_NONE ((__force rmap_t)0)
-/* bitflags for do_page_add_anon_rmap() */
-#define RMAP_EXCLUSIVE 0x01
-#define RMAP_COMPOUND 0x02
+/* The anonymous (sub)page is exclusive to a single process. */
+#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
+
+/*
+ * Internally, we're using an enum to specify the granularity. We make the
+ * compiler emit specialized code for each granularity.
+ */
+enum rmap_level {
+ RMAP_LEVEL_PTE = 0,
+ RMAP_LEVEL_PMD,
+};
+
+static inline void __folio_rmap_sanity_checks(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ /* hugetlb folios are handled separately. */
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+
+ /*
+ * TODO: we get driver-allocated folios that have nothing to do with
+ * the rmap using vm_insert_page(); therefore, we cannot assume that
+ * folio_test_large_rmappable() holds for large folios. We should
+ * handle any desired mapcount+stats accounting for these folios in
+ * VM_MIXEDMAP VMAs separately, and then sanity-check here that
+ * we really only get rmappable folios.
+ */
+
+ VM_WARN_ON_ONCE(nr_pages <= 0);
+ VM_WARN_ON_FOLIO(page_folio(page) != folio, folio);
+ VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ break;
+ case RMAP_LEVEL_PMD:
+ /*
+ * We don't support folios larger than a single PMD yet. So
+ * when RMAP_LEVEL_PMD is set, we assume that we are creating
+ * a single "entire" mapping of the folio.
+ */
+ VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
+ VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
+ break;
+ default:
+ VM_WARN_ON_ONCE(true);
+ }
+}
/*
* rmap interfaces called when adding or removing pte of page
*/
-void page_move_anon_rmap(struct page *, struct vm_area_struct *);
-void page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, bool);
-void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, int);
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, bool);
-void page_add_file_rmap(struct page *, bool);
-void page_remove_rmap(struct page *, bool);
-
-void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long);
-void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long);
-
-static inline void page_dup_rmap(struct page *page, bool compound)
+void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
+void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
+#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \
+ folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
+void folio_add_anon_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
+void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+ unsigned long address);
+void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_add_file_rmap_pte(folio, page, vma) \
+ folio_add_file_rmap_ptes(folio, page, 1, vma)
+void folio_add_file_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_remove_rmap_pte(folio, page, vma) \
+ folio_remove_rmap_ptes(folio, page, 1, vma)
+void folio_remove_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+
+void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
+ unsigned long address, rmap_t flags);
+void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+ unsigned long address);
+
+/* See folio_try_dup_anon_rmap_*() */
+static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
+ struct vm_area_struct *vma)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+
+ if (PageAnonExclusive(&folio->page)) {
+ if (unlikely(folio_needs_cow_for_dma(vma, folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
+ }
+ atomic_inc(&folio->_entire_mapcount);
+ return 0;
+}
+
+/* See folio_try_share_anon_rmap_*() */
+static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(folio_maybe_dma_pinned(folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
+ return 0;
+}
+
+static inline void hugetlb_add_file_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
+
+ atomic_inc(&folio->_entire_mapcount);
+}
+
+static inline void hugetlb_remove_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
+ atomic_dec(&folio->_entire_mapcount);
+}
+
+static __always_inline void __folio_dup_file_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ do {
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
+}
+
+/**
+ * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
+ *
+ * The caller needs to hold the page table lock.
+ */
+static inline void folio_dup_file_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages)
+{
+ __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
+}
+#define folio_dup_file_rmap_pte(folio, page) \
+ folio_dup_file_rmap_ptes(folio, page, 1)
+
+/**
+ * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock.
+ */
+static inline void folio_dup_file_rmap_pmd(struct folio *folio,
+ struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
+static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma,
+ enum rmap_level level)
+{
+ bool maybe_pinned;
+ int i;
+
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ /*
+ * If this folio may have been pinned by the parent process,
+ * don't allow to duplicate the mappings but instead require to e.g.,
+ * copy the subpage immediately for the child so that we'll always
+ * guarantee the pinned folio won't be randomly replaced in the
+ * future on write faults.
+ */
+ maybe_pinned = likely(!folio_is_device_private(folio)) &&
+ unlikely(folio_needs_cow_for_dma(src_vma, folio));
+
+ /*
+ * No need to check+clear for already shared PTEs/PMDs of the
+ * folio. But if any page is PageAnonExclusive, we must fallback to
+ * copying if the folio maybe pinned.
+ */
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ if (unlikely(maybe_pinned)) {
+ for (i = 0; i < nr_pages; i++)
+ if (PageAnonExclusive(page + i))
+ return -EBUSY;
+ }
+ do {
+ if (PageAnonExclusive(page))
+ ClearPageAnonExclusive(page);
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ if (PageAnonExclusive(page)) {
+ if (unlikely(maybe_pinned))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+ }
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
+ * @src_vma: The vm area from which the mappings are duplicated
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
+ *
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mappings can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
+ *
+ * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
+ *
+ * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
+ */
+static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma)
+{
+ return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma,
+ RMAP_LEVEL_PTE);
+}
+#define folio_try_dup_anon_rmap_pte(folio, page, vma) \
+ folio_try_dup_anon_rmap_ptes(folio, page, 1, vma)
+
+/**
+ * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
+ * @src_vma: The vm area from which the mapping is duplicated
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mapping can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
+ *
+ * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
+ *
+ * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ */
+static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
+ struct page *page, struct vm_area_struct *src_vma)
{
- atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
+static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ /* device private folios cannot get pinned via GUP. */
+ if (unlikely(folio_is_device_private(folio))) {
+ ClearPageAnonExclusive(page);
+ return 0;
+ }
+
+ /*
+ * We have to make sure that when we clear PageAnonExclusive, that
+ * the page is not pinned and that concurrent GUP-fast won't succeed in
+ * concurrently pinning the page.
+ *
+ * Conceptually, PageAnonExclusive clearing consists of:
+ * (A1) Clear PTE
+ * (A2) Check if the page is pinned; back off if so.
+ * (A3) Clear PageAnonExclusive
+ * (A4) Restore PTE (optional, but certainly not writable)
+ *
+ * When clearing PageAnonExclusive, we cannot possibly map the page
+ * writable again, because anon pages that may be shared must never
+ * be writable. So in any case, if the PTE was writable it cannot
+ * be writable anymore afterwards and there would be a PTE change. Only
+ * if the PTE wasn't writable, there might not be a PTE change.
+ *
+ * Conceptually, GUP-fast pinning of an anon page consists of:
+ * (B1) Read the PTE
+ * (B2) FOLL_WRITE: check if the PTE is not writable; back off if so.
+ * (B3) Pin the mapped page
+ * (B4) Check if the PTE changed by re-reading it; back off if so.
+ * (B5) If the original PTE is not writable, check if
+ * PageAnonExclusive is not set; back off if so.
+ *
+ * If the PTE was writable, we only have to make sure that GUP-fast
+ * observes a PTE change and properly backs off.
+ *
+ * If the PTE was not writable, we have to make sure that GUP-fast either
+ * detects a (temporary) PTE change or that PageAnonExclusive is cleared
+ * and properly backs off.
+ *
+ * Consequently, when clearing PageAnonExclusive(), we have to make
+ * sure that (A1), (A2)/(A3) and (A4) happen in the right memory
+ * order. In GUP-fast pinning code, we have to make sure that (B3),(B4)
+ * and (B5) happen in the right memory order.
+ *
+ * We assume that there might not be a memory barrier after
+ * clearing/invalidating the PTE (A1) and before restoring the PTE (A4),
+ * so we use explicit ones here.
+ */
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(folio_maybe_dma_pinned(folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
+ return 0;
+}
+
+/**
+ * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page
+ * mapped by a PTE possibly shared to prepare
+ * for KSM or temporary unmapping
+ * @folio: The folio to share a mapping of
+ * @page: The mapped exclusive page
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during
+ * fork() to duplicate mappings, but instead to prepare for KSM or temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte().
+ *
+ * Marking the mapped page shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped page possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
+ struct page *page)
+{
+ return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+}
+
+/**
+ * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page
+ * range mapped by a PMD possibly shared to
+ * prepare for temporary unmapping
+ * @folio: The folio to share the mapping of
+ * @page: The first page to share the mapping of
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during
+ * fork() to duplicate a mapping, but instead to prepare for temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd().
+ *
+ * Marking the mapped pages shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped pages possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
+ struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
}
/*
* Called from mm/vmscan.c to handle paging out
*/
-int page_referenced(struct page *, int is_locked,
+int folio_referenced(struct folio *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
-bool try_to_unmap(struct page *, enum ttu_flags flags);
+void try_to_migrate(struct folio *folio, enum ttu_flags flags);
+void try_to_unmap(struct folio *, enum ttu_flags flags);
+
+int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct page **pages,
+ void *arg);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
-/* Look for migarion entries rather than present PTEs */
+/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
struct page_vma_mapped_walk {
- struct page *page;
+ unsigned long pfn;
+ unsigned long nr_pages;
+ pgoff_t pgoff;
struct vm_area_struct *vma;
unsigned long address;
pmd_t *pmd;
@@ -212,9 +649,30 @@ struct page_vma_mapped_walk {
unsigned int flags;
};
+#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \
+ struct page_vma_mapped_walk name = { \
+ .pfn = page_to_pfn(_page), \
+ .nr_pages = compound_nr(_page), \
+ .pgoff = page_to_pgoff(_page), \
+ .vma = _vma, \
+ .address = _address, \
+ .flags = _flags, \
+ }
+
+#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \
+ struct page_vma_mapped_walk name = { \
+ .pfn = folio_pfn(_folio), \
+ .nr_pages = folio_nr_pages(_folio), \
+ .pgoff = folio_pgoff(_folio), \
+ .vma = _vma, \
+ .address = _address, \
+ .flags = _flags, \
+ }
+
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
- if (pvmw->pte)
+ /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
+ if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma))
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);
@@ -233,27 +691,21 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*
* returns the number of cleaned PTEs.
*/
-int page_mkclean(struct page *);
+int folio_mkclean(struct folio *);
-/*
- * called in munlock()/munmap() path to check for other vmas holding
- * the page mlocked.
- */
-void try_to_munlock(struct page *);
+int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
+ struct vm_area_struct *vma);
-void remove_migration_ptes(struct page *old, struct page *new, bool locked);
+void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
-/*
- * Called by memory-failure.c to kill processes.
- */
-struct anon_vma *page_lock_anon_vma_read(struct page *page);
-void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
/*
* rmap_walk_control: To control rmap traversing for specific needs
*
* arg: passed to rmap_one() and invalid_vma()
+ * try_lock: bail out if the rmap lock is contended
+ * contended: indicate the rmap traversal bailed out due to lock contention
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
* anon_lock: for getting anon_lock by optimized way rather than default
@@ -261,27 +713,31 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
*/
struct rmap_walk_control {
void *arg;
+ bool try_lock;
+ bool contended;
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
- bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
- int (*done)(struct page *page);
- struct anon_vma *(*anon_lock)(struct page *page);
+ int (*done)(struct folio *folio);
+ struct anon_vma *(*anon_lock)(struct folio *folio,
+ struct rmap_walk_control *rwc);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
-void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
-void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
+void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
+struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
+ struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
#define anon_vma_prepare(vma) (0)
-#define anon_vma_link(vma) do {} while (0)
-static inline int page_referenced(struct page *page, int is_locked,
+static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
@@ -289,14 +745,18 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
-#define try_to_unmap(page, refs) false
+static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags)
+{
+}
-static inline int page_mkclean(struct page *page)
+static inline int folio_mkclean(struct folio *folio)
{
return 0;
}
-
-
#endif /* CONFIG_MMU */
+static inline int page_mkclean(struct page *page)
+{
+ return folio_mkclean(page_folio(page));
+}
#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index 8ed37f93f3c8..ab7eea01ab42 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -102,15 +102,16 @@ struct rmi_2d_sensor_platform_data {
};
/**
- * struct rmi_f30_data - overrides defaults for a single F30 GPIOs/LED chip.
+ * struct rmi_gpio_data - overrides defaults for a single F30/F3A GPIOs/LED
+ * chip.
* @buttonpad - the touchpad is a buttonpad, so enable only the first actual
* button that is found.
- * @trackstick_buttons - Set when the function 30 is handling the physical
+ * @trackstick_buttons - Set when the function 30 or 3a is handling the physical
* buttons of the trackstick (as a PS/2 passthrough device).
- * @disable - the touchpad incorrectly reports F30 and it should be ignored.
+ * @disable - the touchpad incorrectly reports F30/F3A and it should be ignored.
* This is a special case which is due to misconfigured firmware.
*/
-struct rmi_f30_data {
+struct rmi_gpio_data {
bool buttonpad;
bool trackstick_buttons;
bool disable;
@@ -218,7 +219,7 @@ struct rmi_device_platform_data {
/* function handler pdata */
struct rmi_2d_sensor_platform_data sensor_pdata;
struct rmi_f01_power_management power_management;
- struct rmi_f30_data f30_data;
+ struct rmi_gpio_data gpio_data;
};
/**
diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h
index 4e78651371ba..847c9a06101b 100644
--- a/include/linux/root_dev.h
+++ b/include/linux/root_dev.h
@@ -9,15 +9,8 @@
enum {
Root_NFS = MKDEV(UNNAMED_MAJOR, 255),
Root_CIFS = MKDEV(UNNAMED_MAJOR, 254),
+ Root_Generic = MKDEV(UNNAMED_MAJOR, 253),
Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0),
- Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1),
- Root_FD0 = MKDEV(FLOPPY_MAJOR, 0),
- Root_HDA1 = MKDEV(IDE0_MAJOR, 1),
- Root_HDA2 = MKDEV(IDE0_MAJOR, 2),
- Root_SDA1 = MKDEV(SCSI_DISK0_MAJOR, 1),
- Root_SDA2 = MKDEV(SCSI_DISK0_MAJOR, 2),
- Root_HDC1 = MKDEV(IDE1_MAJOR, 1),
- Root_SR0 = MKDEV(SCSI_CDROM_MAJOR, 0),
};
extern dev_t ROOT_DEV;
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 9fe156d1c018..90d8e4475f80 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -17,8 +17,8 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/poll.h>
-
-#define RPMSG_ADDR_ANY 0xFFFFFFFF
+#include <linux/rpmsg/byteorder.h>
+#include <uapi/linux/rpmsg.h>
struct rpmsg_device;
struct rpmsg_endpoint;
@@ -41,31 +41,37 @@ struct rpmsg_channel_info {
* rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
* @src: local address
* @dst: destination address
* @ept: the rpmsg endpoint of this channel
* @announce: if set, rpmsg will announce the creation/removal of this channel
+ * @little_endian: True if transport is using little endian byte representation
*/
struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
- char *driver_override;
+ const char *driver_override;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
bool announce;
+ bool little_endian;
const struct rpmsg_device_ops *ops;
};
typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
+typedef int (*rpmsg_flowcontrol_cb_t)(struct rpmsg_device *, void *, bool);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @rpdev: rpmsg channel device
* @refcount: when this drops to zero, the ept is deallocated
* @cb: rx callback handler
+ * @flow_cb: remote flow control callback handler
* @cb_lock: must be taken before accessing/changing @cb
* @addr: local rpmsg address
* @priv: private data for the driver's use
@@ -88,6 +94,7 @@ struct rpmsg_endpoint {
struct rpmsg_device *rpdev;
struct kref refcount;
rpmsg_rx_cb_t cb;
+ rpmsg_flowcontrol_cb_t flow_cb;
struct mutex cb_lock;
u32 addr;
void *priv;
@@ -102,6 +109,7 @@ struct rpmsg_endpoint {
* @probe: invoked when a matching rpmsg channel (i.e. device) is found
* @remove: invoked when the rpmsg channel is removed
* @callback: invoked when an inbound message is received on the channel
+ * @flowcontrol: invoked when remote side flow control request is received
*/
struct rpmsg_driver {
struct device_driver drv;
@@ -109,12 +117,64 @@ struct rpmsg_driver {
int (*probe)(struct rpmsg_device *dev);
void (*remove)(struct rpmsg_device *dev);
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
+ int (*flowcontrol)(struct rpmsg_device *, void *, bool);
};
+static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val)
+{
+ if (!rpdev)
+ return __rpmsg16_to_cpu(rpmsg_is_little_endian(), val);
+ else
+ return __rpmsg16_to_cpu(rpdev->little_endian, val);
+}
+
+static inline __rpmsg16 cpu_to_rpmsg16(struct rpmsg_device *rpdev, u16 val)
+{
+ if (!rpdev)
+ return __cpu_to_rpmsg16(rpmsg_is_little_endian(), val);
+ else
+ return __cpu_to_rpmsg16(rpdev->little_endian, val);
+}
+
+static inline u32 rpmsg32_to_cpu(struct rpmsg_device *rpdev, __rpmsg32 val)
+{
+ if (!rpdev)
+ return __rpmsg32_to_cpu(rpmsg_is_little_endian(), val);
+ else
+ return __rpmsg32_to_cpu(rpdev->little_endian, val);
+}
+
+static inline __rpmsg32 cpu_to_rpmsg32(struct rpmsg_device *rpdev, u32 val)
+{
+ if (!rpdev)
+ return __cpu_to_rpmsg32(rpmsg_is_little_endian(), val);
+ else
+ return __cpu_to_rpmsg32(rpdev->little_endian, val);
+}
+
+static inline u64 rpmsg64_to_cpu(struct rpmsg_device *rpdev, __rpmsg64 val)
+{
+ if (!rpdev)
+ return __rpmsg64_to_cpu(rpmsg_is_little_endian(), val);
+ else
+ return __rpmsg64_to_cpu(rpdev->little_endian, val);
+}
+
+static inline __rpmsg64 cpu_to_rpmsg64(struct rpmsg_device *rpdev, u64 val)
+{
+ if (!rpdev)
+ return __cpu_to_rpmsg64(rpmsg_is_little_endian(), val);
+ else
+ return __cpu_to_rpmsg64(rpdev->little_endian, val);
+}
+
#if IS_ENABLED(CONFIG_RPMSG)
-int register_rpmsg_device(struct rpmsg_device *dev);
-void unregister_rpmsg_device(struct rpmsg_device *dev);
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override);
+int rpmsg_register_device(struct rpmsg_device *rpdev);
+int rpmsg_unregister_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo);
int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
void unregister_rpmsg_driver(struct rpmsg_driver *drv);
void rpmsg_destroy_ept(struct rpmsg_endpoint *);
@@ -135,17 +195,30 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
+ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
+
+int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst);
+
#else
-static inline int register_rpmsg_device(struct rpmsg_device *dev)
+static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override)
{
return -ENXIO;
}
-static inline void unregister_rpmsg_device(struct rpmsg_device *dev)
+static inline int rpmsg_register_device(struct rpmsg_device *rpdev)
+{
+ return -ENXIO;
+}
+
+static inline int rpmsg_unregister_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo)
{
/* This shouldn't be possible */
WARN_ON(1);
+
+ return -ENXIO;
}
static inline int __register_rpmsg_driver(struct rpmsg_driver *drv,
@@ -177,7 +250,7 @@ static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev
/* This shouldn't be possible */
WARN_ON(1);
- return ERR_PTR(-ENXIO);
+ return NULL;
}
static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
@@ -242,6 +315,22 @@ static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
return 0;
}
+static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
#endif /* IS_ENABLED(CONFIG_RPMSG) */
/* use a macro to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/rpmsg/byteorder.h b/include/linux/rpmsg/byteorder.h
new file mode 100644
index 000000000000..c0f565dbad6d
--- /dev/null
+++ b/include/linux/rpmsg/byteorder.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Follows implementation found in linux/virtio_byteorder.h
+ */
+#ifndef _LINUX_RPMSG_BYTEORDER_H
+#define _LINUX_RPMSG_BYTEORDER_H
+#include <linux/types.h>
+#include <uapi/linux/rpmsg_types.h>
+
+static inline bool rpmsg_is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN
+ return true;
+#else
+ return false;
+#endif
+}
+
+static inline u16 __rpmsg16_to_cpu(bool little_endian, __rpmsg16 val)
+{
+ if (little_endian)
+ return le16_to_cpu((__force __le16)val);
+ else
+ return be16_to_cpu((__force __be16)val);
+}
+
+static inline __rpmsg16 __cpu_to_rpmsg16(bool little_endian, u16 val)
+{
+ if (little_endian)
+ return (__force __rpmsg16)cpu_to_le16(val);
+ else
+ return (__force __rpmsg16)cpu_to_be16(val);
+}
+
+static inline u32 __rpmsg32_to_cpu(bool little_endian, __rpmsg32 val)
+{
+ if (little_endian)
+ return le32_to_cpu((__force __le32)val);
+ else
+ return be32_to_cpu((__force __be32)val);
+}
+
+static inline __rpmsg32 __cpu_to_rpmsg32(bool little_endian, u32 val)
+{
+ if (little_endian)
+ return (__force __rpmsg32)cpu_to_le32(val);
+ else
+ return (__force __rpmsg32)cpu_to_be32(val);
+}
+
+static inline u64 __rpmsg64_to_cpu(bool little_endian, __rpmsg64 val)
+{
+ if (little_endian)
+ return le64_to_cpu((__force __le64)val);
+ else
+ return be64_to_cpu((__force __be64)val);
+}
+
+static inline __rpmsg64 __cpu_to_rpmsg64(bool little_endian, u64 val)
+{
+ if (little_endian)
+ return (__force __rpmsg64)cpu_to_le64(val);
+ else
+ return (__force __rpmsg64)cpu_to_be64(val);
+}
+
+#endif /* _LINUX_RPMSG_BYTEORDER_H */
diff --git a/include/linux/rpmsg/ns.h b/include/linux/rpmsg/ns.h
new file mode 100644
index 000000000000..a7804edd6d58
--- /dev/null
+++ b/include/linux/rpmsg/ns.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_RPMSG_NS_H
+#define _LINUX_RPMSG_NS_H
+
+#include <linux/mod_devicetable.h>
+#include <linux/rpmsg.h>
+#include <linux/rpmsg/byteorder.h>
+#include <linux/types.h>
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ * @flags: indicates whether service is created or destroyed
+ *
+ * This message is sent across to publish a new service, or announce
+ * about its removal. When we receive these messages, an appropriate
+ * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
+ * or ->remove() handler of the appropriate rpmsg driver will be invoked
+ * (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+ char name[RPMSG_NAME_SIZE];
+ __rpmsg32 addr;
+ __rpmsg32 flags;
+} __packed;
+
+/**
+ * enum rpmsg_ns_flags - dynamic name service announcement flags
+ *
+ * @RPMSG_NS_CREATE: a new remote service was just created
+ * @RPMSG_NS_DESTROY: a known remote service was just destroyed
+ */
+enum rpmsg_ns_flags {
+ RPMSG_NS_CREATE = 0,
+ RPMSG_NS_DESTROY = 1,
+};
+
+/* Address 53 is reserved for advertising remote services */
+#define RPMSG_NS_ADDR (53)
+
+int rpmsg_ns_register_device(struct rpmsg_device *rpdev);
+
+#endif
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
index daded9fddf36..bfbd48f435fa 100644
--- a/include/linux/rpmsg/qcom_glink.h
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -5,26 +5,30 @@
#include <linux/device.h>
-struct qcom_glink;
+struct qcom_glink_smem;
+
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK)
+void qcom_glink_ssr_notify(const char *ssr_name);
+#else
+static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
+#endif
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
-struct qcom_glink *qcom_glink_smem_register(struct device *parent,
- struct device_node *node);
-void qcom_glink_smem_unregister(struct qcom_glink *glink);
-void qcom_glink_ssr_notify(const char *ssr_name);
+struct qcom_glink_smem *qcom_glink_smem_register(struct device *parent,
+ struct device_node *node);
+void qcom_glink_smem_unregister(struct qcom_glink_smem *glink);
#else
-static inline struct qcom_glink *
+static inline struct qcom_glink_smem *
qcom_glink_smem_register(struct device *parent,
struct device_node *node)
{
return NULL;
}
-static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
-static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
+static inline void qcom_glink_smem_unregister(struct qcom_glink_smem *glink) {}
#endif
#endif
diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h
index 2e92d7407a85..3379bf4e1cb1 100644
--- a/include/linux/rpmsg/qcom_smd.h
+++ b/include/linux/rpmsg/qcom_smd.h
@@ -11,7 +11,7 @@ struct qcom_smd_edge;
struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
struct device_node *node);
-int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
+void qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
#else
@@ -22,9 +22,8 @@ qcom_smd_register_edge(struct device *parent,
return NULL;
}
-static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+static inline void qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
{
- return 0;
}
#endif
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
new file mode 100644
index 000000000000..bc8af3eb5598
--- /dev/null
+++ b/include/linux/rseq.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _LINUX_RSEQ_H
+#define _LINUX_RSEQ_H
+
+#ifdef CONFIG_RSEQ
+
+#include <linux/preempt.h>
+#include <linux/sched.h>
+
+/*
+ * Map the event mask on the user-space ABI enum rseq_cs_flags
+ * for direct mask checks.
+ */
+enum rseq_event_mask_bits {
+ RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
+ RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
+ RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
+};
+
+enum rseq_event_mask {
+ RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
+ RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
+ RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
+};
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+ if (t->rseq)
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+}
+
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
+
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+ if (current->rseq)
+ __rseq_handle_notify_resume(ksig, regs);
+}
+
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+ preempt_disable();
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+ preempt_enable();
+ rseq_handle_notify_resume(ksig, regs);
+}
+
+/* rseq_preempt() requires preemption to be disabled. */
+static inline void rseq_preempt(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/* rseq_migrate() requires preemption to be disabled. */
+static inline void rseq_migrate(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/*
+ * If parent process has a registered restartable sequences area, the
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
+ */
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+ if (clone_flags & CLONE_VM) {
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+ } else {
+ t->rseq = current->rseq;
+ t->rseq_len = current->rseq_len;
+ t->rseq_sig = current->rseq_sig;
+ t->rseq_event_mask = current->rseq_event_mask;
+ }
+}
+
+static inline void rseq_execve(struct task_struct *t)
+{
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+}
+
+#else
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+}
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+}
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+}
+static inline void rseq_preempt(struct task_struct *t)
+{
+}
+static inline void rseq_migrate(struct task_struct *t)
+{
+}
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+}
+static inline void rseq_execve(struct task_struct *t)
+{
+}
+
+#endif
+
+#ifdef CONFIG_DEBUG_RSEQ
+
+void rseq_syscall(struct pt_regs *regs);
+
+#else
+
+static inline void rseq_syscall(struct pt_regs *regs)
+{
+}
+
+#endif
+
+#endif /* _LINUX_RSEQ_H */
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
index 238bb85243d3..a04dacbdc8ae 100644
--- a/include/linux/rslib.h
+++ b/include/linux/rslib.h
@@ -10,7 +10,6 @@
#ifndef _RSLIB_H_
#define _RSLIB_H_
-#include <linux/list.h>
#include <linux/types.h> /* for gfp_t */
#include <linux/gfp.h> /* for GFP_KERNEL */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 22d1575e4991..5f8e438a0312 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -66,6 +66,8 @@ struct rtc_class_ops {
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
int (*read_offset)(struct device *, long *offset);
int (*set_offset)(struct device *, long offset);
+ int (*param_get)(struct device *, struct rtc_param *param);
+ int (*param_set)(struct device *, struct rtc_param *param);
};
struct rtc_device;
@@ -80,6 +82,7 @@ struct rtc_timer {
/* flags */
#define RTC_DEV_BUSY 0
+#define RTC_NO_CDEV 1
struct rtc_device {
struct device dev;
@@ -107,25 +110,43 @@ struct rtc_device {
struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
int pie_enabled;
struct work_struct irqwork;
- /* Some hardware can't support UIE mode */
- int uie_unsupported;
-
- /* Number of nsec it takes to set the RTC clock. This influences when
- * the set ops are called. An offset:
- * - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s
- * - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s
- * - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s
- */
- long set_offset_nsec;
- bool registered;
+ /*
+ * This offset specifies the update timing of the RTC.
+ *
+ * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds
+ *
+ * The offset defines how tsched is computed so that the write to
+ * the RTC (t2.tv_sec - 1sec) is correct versus the time required
+ * for the transport of the write and the time which the RTC needs
+ * to increment seconds the first time after the write (t2).
+ *
+ * For direct accessible RTCs tsched ~= t1 because the write time
+ * is negligible. For RTCs behind slow busses the transport time is
+ * significant and has to be taken into account.
+ *
+ * The time between the write (t1) and the first increment after
+ * the write (t2) is RTC specific. For a MC146818 RTC it's 500ms,
+ * for many others it's exactly 1 second. Consult the datasheet.
+ *
+ * The value of this offset is also used to calculate the to be
+ * written value (t2.tv_sec - 1sec) at tsched.
+ *
+ * The default value for this is NSEC_PER_SEC + 10 msec default
+ * transport time. The offset can be adjusted by drivers so the
+ * calculation for the to be written value at tsched becomes
+ * correct:
+ *
+ * newval = tsched + set_offset_nsec - NSEC_PER_SEC
+ * and (tsched + set_offset_nsec) % NSEC_PER_SEC == 0
+ */
+ unsigned long set_offset_nsec;
- /* Old ABI support */
- bool nvram_old_abi;
- struct bin_attribute *nvram;
+ unsigned long features[BITS_TO_LONGS(RTC_FEATURE_CNT)];
time64_t range_min;
timeu64_t range_max;
+ timeu64_t alarm_offset_max;
time64_t start_secs;
time64_t offset_secs;
bool set_start_time;
@@ -161,11 +182,10 @@ extern struct rtc_device *devm_rtc_device_register(struct device *dev,
const struct rtc_class_ops *ops,
struct module *owner);
struct rtc_device *devm_rtc_allocate_device(struct device *dev);
-int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
+int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc);
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
-extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec);
int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
extern int rtc_read_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
@@ -205,41 +225,25 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
-/* Determine if we can call to driver to set the time. Drivers can only be
- * called to set a second aligned time value, and the field set_offset_nsec
- * specifies how far away from the second aligned time to call the driver.
- *
- * This also computes 'to_set' which is the time we are trying to set, and has
- * a zero in tv_nsecs, such that:
- * to_set - set_delay_nsec == now +/- FUZZ
+/**
+ * rtc_bound_alarmtime() - Return alarm time bound by rtc limit
+ * @rtc: Pointer to rtc device structure
+ * @requested: Requested alarm timeout
*
+ * Return: Alarm timeout bound by maximum alarm time supported by rtc.
*/
-static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec,
- struct timespec64 *to_set,
- const struct timespec64 *now)
+static inline ktime_t rtc_bound_alarmtime(struct rtc_device *rtc,
+ ktime_t requested)
{
- /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
- const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
- struct timespec64 delay = {.tv_sec = 0,
- .tv_nsec = set_offset_nsec};
-
- *to_set = timespec64_add(*now, delay);
-
- if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
- to_set->tv_nsec = 0;
- return true;
- }
-
- if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
- to_set->tv_sec++;
- to_set->tv_nsec = 0;
- return true;
- }
- return false;
+ if (rtc->alarm_offset_max &&
+ rtc->alarm_offset_max * MSEC_PER_SEC < ktime_to_ms(requested))
+ return ms_to_ktime(rtc->alarm_offset_max * MSEC_PER_SEC);
+
+ return requested;
}
-#define rtc_register_device(device) \
- __rtc_register_device(THIS_MODULE, device)
+#define devm_rtc_register_device(device) \
+ __devm_rtc_register_device(THIS_MODULE, device)
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
extern int rtc_hctosys_ret;
@@ -248,16 +252,14 @@ extern int rtc_hctosys_ret;
#endif
#ifdef CONFIG_RTC_NVMEM
-int rtc_nvmem_register(struct rtc_device *rtc,
- struct nvmem_config *nvmem_config);
-void rtc_nvmem_unregister(struct rtc_device *rtc);
+int devm_rtc_nvmem_register(struct rtc_device *rtc,
+ struct nvmem_config *nvmem_config);
#else
-static inline int rtc_nvmem_register(struct rtc_device *rtc,
- struct nvmem_config *nvmem_config)
+static inline int devm_rtc_nvmem_register(struct rtc_device *rtc,
+ struct nvmem_config *nvmem_config)
{
return 0;
}
-static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {}
#endif
#ifdef CONFIG_RTC_INTF_SYSFS
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
index 67ee9d20cc5a..5a41c3bbcbe3 100644
--- a/include/linux/rtc/ds1685.h
+++ b/include/linux/rtc/ds1685.h
@@ -46,7 +46,6 @@ struct ds1685_priv {
u32 regstep;
int irq_num;
bool bcd_mode;
- bool no_irq;
u8 (*read)(struct ds1685_priv *, int);
void (*write)(struct ds1685_priv *, int, u8);
void (*prepare_poweroff)(void);
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
deleted file mode 100644
index b31f2856733d..000000000000
--- a/include/linux/rtc/sirfsoc_rtciobrg.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * RTC I/O Bridge interfaces for CSR SiRFprimaII
- * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-#ifndef _SIRFSOC_RTC_IOBRG_H_
-#define _SIRFSOC_RTC_IOBRG_H_
-
-struct regmap_config;
-
-extern void sirfsoc_rtc_iobrg_besyncing(void);
-
-extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
-
-extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
-struct regmap *devm_regmap_init_iobg(struct device *dev,
- const struct regmap_config *config);
-
-#endif
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 6fd615a0eea9..7d049883a08a 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -13,12 +13,39 @@
#ifndef __LINUX_RT_MUTEX_H
#define __LINUX_RT_MUTEX_H
+#include <linux/compiler.h>
#include <linux/linkage.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock_types.h>
+#include <linux/rbtree_types.h>
+#include <linux/spinlock_types_raw.h>
extern int max_lock_depth; /* for sysctl */
+struct rt_mutex_base {
+ raw_spinlock_t wait_lock;
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
+};
+
+#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \
+{ \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \
+ .waiters = RB_ROOT_CACHED, \
+ .owner = NULL \
+}
+
+/**
+ * rt_mutex_base_is_locked - is the rtmutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
+{
+ return READ_ONCE(lock->owner) != NULL;
+}
+
+extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
+
/**
* The rt_mutex structure
*
@@ -28,15 +55,7 @@ extern int max_lock_depth; /* for sysctl */
* @owner: the mutex owner
*/
struct rt_mutex {
- raw_spinlock_t wait_lock;
- struct rb_root_cached waiters;
- struct task_struct *owner;
-#ifdef CONFIG_DEBUG_RT_MUTEXES
- int save_state;
- const char *name, *file;
- int line;
- void *magic;
-#endif
+ struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
@@ -46,78 +65,56 @@ struct rt_mutex_waiter;
struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
- extern int rt_mutex_debug_check_no_locks_freed(const void *from,
- unsigned long len);
- extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
+extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
- static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
- unsigned long len)
- {
- return 0;
- }
-# define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { }
#endif
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- , .name = #mutexname, .file = __FILE__, .line = __LINE__
-
-# define rt_mutex_init(mutex) \
+#define rt_mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
__rt_mutex_init(mutex, __func__, &__key); \
} while (0)
- extern void rt_mutex_debug_task_free(struct task_struct *tsk);
-#else
-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
-# define rt_mutex_debug_task_free(t) do { } while (0)
-#endif
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
- , .dep_map = { .name = #mutexname }
+#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+ .dep_map = { \
+ .name = #mutexname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
#else
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
#endif
-#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .waiters = RB_ROOT_CACHED \
- , .owner = NULL \
- __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+{ \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \
+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+}
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
-/**
- * rt_mutex_is_locked - is the mutex locked
- * @lock: the mutex to be queried
- *
- * Returns 1 if the mutex is locked, 0 if unlocked.
- */
-static inline int rt_mutex_is_locked(struct rt_mutex *lock)
-{
- return lock->owner != NULL;
-}
-
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
-extern void rt_mutex_destroy(struct rt_mutex *lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
+extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
+#define rt_mutex_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
+
#else
extern void rt_mutex_lock(struct rt_mutex *lock);
#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
+#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
#endif
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
-extern int rt_mutex_timed_lock(struct rt_mutex *lock,
- struct hrtimer_sleeper *timeout);
-
+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index bb9cb84114c1..cdfc897f1e3c 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -10,23 +10,32 @@
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
+
+static inline int rtnetlink_maybe_send(struct sk_buff *skb, struct net *net,
+ u32 pid, u32 group, int echo)
+{
+ return !skb ? 0 : rtnetlink_send(skb, net, pid, group, echo);
+}
+
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
- u32 group, struct nlmsghdr *nlh, gfp_t flags);
+ u32 group, const struct nlmsghdr *nlh, gfp_t flags);
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, long expires, u32 error);
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, gfp_t flags,
+ u32 portid, const struct nlmsghdr *nlh);
void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
gfp_t flags, int *new_nsid, int new_ifindex);
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned change, u32 event,
gfp_t flags, int *new_nsid,
- int new_ifindex);
+ int new_ifindex, u32 portid,
+ const struct nlmsghdr *nlh);
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
- gfp_t flags);
+ gfp_t flags, u32 portid, const struct nlmsghdr *nlh);
/* RTNL is used as a global lock for all changes to network configuration */
@@ -38,6 +47,7 @@ extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
extern wait_queue_head_t netdev_unregistering_wq;
+extern atomic_t dev_unreg_count;
extern struct rw_semaphore pernet_ops_rwsem;
extern struct rw_semaphore net_rwsem;
@@ -61,16 +71,6 @@ static inline bool lockdep_rtnl_is_held(void)
rcu_dereference_check(p, lockdep_rtnl_is_held())
/**
- * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
- * @p: The pointer to read, prior to dereference
- *
- * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
- * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
- */
-#define rcu_dereference_bh_rtnl(p) \
- rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
-
-/**
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
* @p: The pointer to read, prior to dereferencing
*
@@ -80,6 +80,18 @@ static inline bool lockdep_rtnl_is_held(void)
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())
+/**
+ * rcu_replace_pointer_rtnl - replace an RCU pointer under rtnl_lock, returning
+ * its old value
+ * @rp: RCU pointer, whose value is returned
+ * @p: regular pointer
+ *
+ * Perform a replacement under rtnl_lock, where @rp is an RCU-annotated
+ * pointer. The old value of @rp is returned, and @rp is set to @p
+ */
+#define rcu_replace_pointer_rtnl(rp, p) \
+ rcu_replace_pointer(rp, p, lockdep_rtnl_is_held())
+
static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
{
return rtnl_dereference(dev->ingress_queue);
@@ -100,6 +112,7 @@ void net_dec_ingress_queue(void);
#ifdef CONFIG_NET_EGRESS
void net_inc_egress_queue(void);
void net_dec_egress_queue(void);
+void netdev_xmit_skip_txqueue(bool skip);
#endif
void rtnetlink_init(void);
@@ -134,4 +147,31 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
int (*vlan_fill)(struct sk_buff *skb,
struct net_device *dev,
u32 filter_mask));
+
+extern void rtnl_offload_xstats_notify(struct net_device *dev);
+
+static inline int rtnl_has_listeners(const struct net *net, u32 group)
+{
+ struct sock *rtnl = net->rtnl;
+
+ return netlink_has_listeners(rtnl, group);
+}
+
+/**
+ * rtnl_notify_needed - check if notification is needed
+ * @net: Pointer to the net namespace
+ * @nlflags: netlink ingress message flags
+ * @group: rtnl group
+ *
+ * Based on the ingress message flags and rtnl group, returns true
+ * if a notification is needed, false otherwise.
+ */
+static inline bool
+rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group)
+{
+ return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group);
+}
+
+void netdev_set_operstate(struct net_device *dev, int newstate);
+
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 745f5e73f99a..4612ef09a0c7 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -60,6 +60,7 @@
#define SD_EXIST (1 << 16)
#define DELINK_INT GPIO0_INT
#define MS_OC_INT (1 << 23)
+#define SD_OVP_INT (1 << 23)
#define SD_OC_INT (1 << 22)
#define CARD_INT (XD_INT | MS_INT | SD_INT)
@@ -80,8 +81,10 @@
#define OC_INT_EN (1 << 23)
#define DELINK_INT_EN GPIO0_INT_EN
#define MS_OC_INT_EN (1 << 23)
+#define SD_OVP_INT_EN (1 << 23)
#define SD_OC_INT_EN (1 << 22)
+#define RTSX_DUM_REG 0x1C
/*
* macros for easy use
@@ -582,6 +585,7 @@
#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
+#define CDGW 0xFE53
#define WAKE_SEL_CTL 0xFE54
#define PCLK_CTL 0xFE55
#define PCLK_MODE_SEL 0x20
@@ -658,6 +662,23 @@
#define PM_WAKE_EN 0x01
#define PM_CTRL4 0xFF47
+/* FW config info register */
+#define RTS5261_FW_CFG_INFO0 0xFF50
+#define RTS5261_FW_EXPRESS_TEST_MASK (0x01 << 0)
+#define RTS5261_FW_EA_MODE_MASK (0x01 << 5)
+#define RTS5261_FW_CFG0 0xFF54
+#define RTS5261_FW_ENTER_EXPRESS (0x01 << 0)
+
+#define RTS5261_FW_CFG1 0xFF55
+#define RTS5261_SYS_CLK_SEL_MCU_CLK (0x01 << 7)
+#define RTS5261_CRC_CLK_SEL_MCU_CLK (0x01 << 6)
+#define RTS5261_FAKE_MCU_CLOCK_GATING (0x01 << 5)
+#define RTS5261_MCU_BUS_SEL_MASK (0x01 << 4)
+#define RTS5261_MCU_CLOCK_SEL_MASK (0x03 << 2)
+#define RTS5261_MCU_CLOCK_SEL_16M (0x01 << 2)
+#define RTS5261_MCU_CLOCK_GATING (0x01 << 1)
+#define RTS5261_DRIVER_ENABLE_FW (0x01 << 0)
+
#define REG_CFG_OOBS_OFF_TIMER 0xFEA6
#define REG_CFG_OOBS_ON_TIMER 0xFEA7
#define REG_CFG_VCM_ON_TIMER 0xFEA8
@@ -701,6 +722,13 @@
#define RTS5260_DVCC_TUNE_MASK 0x70
#define RTS5260_DVCC_33 0x70
+/*RTS5261*/
+#define RTS5261_LDO1_CFG0 0xFF72
+#define RTS5261_LDO1_OCP_THD_MASK (0x07 << 5)
+#define RTS5261_LDO1_OCP_EN (0x01 << 4)
+#define RTS5261_LDO1_OCP_LMT_THD_MASK (0x03 << 2)
+#define RTS5261_LDO1_OCP_LMT_EN (0x01 << 1)
+
#define LDO_VCC_CFG1 0xFF73
#define LDO_VCC_REF_TUNE_MASK 0x30
#define LDO_VCC_REF_1V2 0x20
@@ -739,8 +767,13 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
+#define RTS5264_AUTOLOAD_CFG2 0xFF7D
+#define RTS5264_CHIP_RST_N_SEL (1 << 6)
+
#define RTS5260_AUTOLOAD_CFG4 0xFF7F
#define RTS5260_MIMO_DISABLE 0x8A
+/*RTS5261*/
+#define RTS5261_AUX_CLK_16M_EN (1 << 5)
#define RTS5260_REG_GPIO_CTL0 0xFC1A
#define RTS5260_REG_GPIO_MASK 0x01
@@ -1040,6 +1073,9 @@
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
+#define PCR_SETTING_REG4 0x818
+#define PCR_SETTING_REG5 0x81C
+
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
@@ -1068,7 +1104,7 @@ struct pcr_ops {
unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr);
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
- void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+ void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state, bool runtime);
void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
@@ -1082,6 +1118,7 @@ struct pcr_ops {
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+enum ASPM_MODE {ASPM_MODE_CFG, ASPM_MODE_REG};
#define ASPM_L1_1_EN BIT(0)
#define ASPM_L1_2_EN BIT(1)
@@ -1173,7 +1210,6 @@ struct rtsx_pcr {
unsigned int card_exist;
struct delayed_work carddet_work;
- struct delayed_work idle_work;
spinlock_t lock;
struct mutex pcr_mutex;
@@ -1191,6 +1227,7 @@ struct rtsx_pcr {
#define EXTRA_CAPS_MMC_HS200 (1 << 4)
#define EXTRA_CAPS_MMC_8BIT (1 << 5)
#define EXTRA_CAPS_NO_MMC (1 << 7)
+#define EXTRA_CAPS_SD_EXPRESS (1 << 8)
u32 extra_caps;
#define IC_VER_A 0
@@ -1204,6 +1241,7 @@ struct rtsx_pcr {
u8 card_drive_sel;
#define ASPM_L1_EN 0x02
u8 aspm_en;
+ enum ASPM_MODE aspm_mode;
bool aspm_enabled;
#define PCR_MS_PMOS (1 << 0)
@@ -1229,6 +1267,7 @@ struct rtsx_pcr {
u8 dma_error_count;
u8 ocp_stat;
u8 ocp_stat2;
+ u8 ovp_stat;
u8 rtd3_en;
};
@@ -1239,12 +1278,15 @@ struct rtsx_pcr {
#define PID_5260 0x5260
#define PID_5261 0x5261
#define PID_5228 0x5228
+#define PID_5264 0x5264
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
#define PCI_PID(pcr) ((pcr)->pci->device)
#define is_version(pcr, pid, ver) \
(CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver))
+#define is_version_higher_than(pcr, pid, ver) \
+ (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version > (ver))
#define pcr_dbg(pcr, fmt, arg...) \
dev_dbg(&(pcr)->pci->dev, fmt, ##arg)
diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h
index 159729cffd8e..3247ed8e9ff0 100644
--- a/include/linux/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
@@ -54,8 +54,6 @@ struct rtsx_ucr {
struct usb_device *pusb_dev;
struct usb_interface *pusb_intf;
struct usb_sg_request current_sg;
- unsigned char *iobuf;
- dma_addr_t iobuf_dma;
struct timer_list sg_timer;
struct mutex dev_mutex;
diff --git a/include/linux/rv.h b/include/linux/rv.h
new file mode 100644
index 000000000000..8883b41d88ec
--- /dev/null
+++ b/include/linux/rv.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Runtime Verification.
+ *
+ * For futher information, see: kernel/trace/rv/rv.c.
+ */
+#ifndef _LINUX_RV_H
+#define _LINUX_RV_H
+
+#define MAX_DA_NAME_LEN 24
+
+#ifdef CONFIG_RV
+/*
+ * Deterministic automaton per-object variables.
+ */
+struct da_monitor {
+ bool monitoring;
+ unsigned int curr_state;
+};
+
+/*
+ * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS.
+ * If we find justification for more monitors, we can think about
+ * adding more or developing a dynamic method. So far, none of
+ * these are justified.
+ */
+#define RV_PER_TASK_MONITORS 1
+#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS)
+
+/*
+ * Futher monitor types are expected, so make this a union.
+ */
+union rv_task_monitor {
+ struct da_monitor da_mon;
+};
+
+#ifdef CONFIG_RV_REACTORS
+struct rv_reactor {
+ const char *name;
+ const char *description;
+ void (*react)(char *msg);
+};
+#endif
+
+struct rv_monitor {
+ const char *name;
+ const char *description;
+ bool enabled;
+ int (*enable)(void);
+ void (*disable)(void);
+ void (*reset)(void);
+#ifdef CONFIG_RV_REACTORS
+ void (*react)(char *msg);
+#endif
+};
+
+bool rv_monitoring_on(void);
+int rv_unregister_monitor(struct rv_monitor *monitor);
+int rv_register_monitor(struct rv_monitor *monitor);
+int rv_get_task_monitor_slot(void);
+void rv_put_task_monitor_slot(int slot);
+
+#ifdef CONFIG_RV_REACTORS
+bool rv_reacting_on(void);
+int rv_unregister_reactor(struct rv_reactor *reactor);
+int rv_register_reactor(struct rv_reactor *reactor);
+#endif /* CONFIG_RV_REACTORS */
+
+#endif /* CONFIG_RV */
+#endif /* _LINUX_RV_H */
diff --git a/include/linux/rw_hint.h b/include/linux/rw_hint.h
new file mode 100644
index 000000000000..309ca72f2dfb
--- /dev/null
+++ b/include/linux/rw_hint.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RW_HINT_H
+#define _LINUX_RW_HINT_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler_attributes.h>
+#include <uapi/linux/fcntl.h>
+
+/* Block storage write lifetime hint values. */
+enum rw_hint {
+ WRITE_LIFE_NOT_SET = RWH_WRITE_LIFE_NOT_SET,
+ WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE,
+ WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT,
+ WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM,
+ WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG,
+ WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
+} __packed;
+
+/* Sparse ignores __packed annotations on enums, hence the #ifndef below. */
+#ifndef __CHECKER__
+static_assert(sizeof(enum rw_hint) == 1);
+#endif
+
+#endif /* _LINUX_RW_HINT_H */
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
new file mode 100644
index 000000000000..29c4e4f243e4
--- /dev/null
+++ b/include/linux/rwbase_rt.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef _LINUX_RWBASE_RT_H
+#define _LINUX_RWBASE_RT_H
+
+#include <linux/rtmutex.h>
+#include <linux/atomic.h>
+
+#define READER_BIAS (1U << 31)
+#define WRITER_BIAS (1U << 30)
+
+struct rwbase_rt {
+ atomic_t readers;
+ struct rt_mutex_base rtmutex;
+};
+
+#define __RWBASE_INITIALIZER(name) \
+{ \
+ .readers = ATOMIC_INIT(READER_BIAS), \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex), \
+}
+
+#define init_rwbase_rt(rwbase) \
+ do { \
+ rt_mutex_base_init(&(rwbase)->rtmutex); \
+ atomic_set(&(rwbase)->readers, READER_BIAS); \
+ } while (0)
+
+
+static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) != READER_BIAS;
+}
+
+static inline void rw_base_assert_held_write(const struct rwbase_rt *rwb)
+{
+ WARN_ON(atomic_read(&rwb->readers) != WRITER_BIAS);
+}
+
+static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) > 0;
+}
+
+#endif /* _LINUX_RWBASE_RT_H */
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 3dcd617e65ae..c0ef596f340b 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_RWLOCK_H
#define __LINUX_RWLOCK_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -30,31 +30,16 @@ do { \
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
extern int do_raw_read_trylock(rwlock_t *lock);
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
-
-#ifndef arch_read_lock_flags
-# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#endif
-
-#ifndef arch_write_lock_flags
-# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#endif
-
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_lock_flags(lock, flags) \
- do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_write_lock_flags(lock, flags) \
- do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
@@ -70,6 +55,12 @@ do { \
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define write_lock_nested(lock, subclass) _raw_write_lock_nested(lock, subclass)
+#else
+#define write_lock_nested(lock, subclass) _raw_write_lock(lock)
+#endif
+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define read_lock_irqsave(lock, flags) \
@@ -128,4 +119,11 @@ do { \
1 : ({ local_irq_restore(flags); 0; }); \
})
+#ifdef arch_rwlock_is_contended
+#define rwlock_is_contended(lock) \
+ arch_rwlock_is_contended(&(lock)->raw_lock)
+#else
+#define rwlock_is_contended(lock) ((void)(lock), 0)
+#endif /* arch_rwlock_is_contended */
+
#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index abfb53ab11be..dceb0a59b692 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -17,6 +17,7 @@
void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
@@ -157,8 +158,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
- do_raw_read_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
return flags;
}
@@ -184,8 +184,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
- do_raw_write_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
return flags;
}
@@ -211,6 +210,13 @@ static inline void __raw_write_lock(rwlock_t *lock)
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
+static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
new file mode 100644
index 000000000000..8544ff05e594
--- /dev/null
+++ b/include/linux/rwlock_rt.h
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_RT_H
+#error Do not #include directly. Use <linux/spinlock.h>.
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
+ struct lock_class_key *key);
+#else
+static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name,
+ struct lock_class_key *key)
+{
+}
+#endif
+
+#define rwlock_init(rwl) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ init_rwbase_rt(&(rwl)->rwbase); \
+ __rt_rwlock_init(rwl, #rwl, &__key); \
+} while (0)
+
+extern void rt_read_lock(rwlock_t *rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock);
+extern void rt_write_lock(rwlock_t *rwlock);
+extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
+extern int rt_write_trylock(rwlock_t *rwlock);
+extern void rt_write_unlock(rwlock_t *rwlock);
+
+static __always_inline void read_lock(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_bh(rwlock_t *rwlock)
+{
+ local_bh_disable();
+ rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_irq(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+}
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ rt_read_lock(lock); \
+ flags = 0; \
+ } while (0)
+
+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+
+static __always_inline void read_unlock(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+ local_bh_enable();
+}
+
+static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
+ unsigned long flags)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void write_lock(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
+{
+ rt_write_lock_nested(rwlock, subclass);
+}
+#else
+#define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock)))
+#endif
+
+static __always_inline void write_lock_bh(rwlock_t *rwlock)
+{
+ local_bh_disable();
+ rt_write_lock(rwlock);
+}
+
+static __always_inline void write_lock_irq(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+}
+
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ rt_write_lock(lock); \
+ flags = 0; \
+ } while (0)
+
+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ int __locked; \
+ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __locked = write_trylock(lock); \
+ __locked; \
+})
+
+static __always_inline void write_unlock(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+ local_bh_enable();
+}
+
+static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
+ unsigned long flags)
+{
+ rt_write_unlock(rwlock);
+}
+
+#define rwlock_is_contended(lock) (((void)(lock), 0))
+
+#endif /* __LINUX_RWLOCK_RT_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index 3bd03e18061c..1948442e7750 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -1,9 +1,23 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
+# error "Do not include directly, include spinlock_types.h"
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
/*
- * include/linux/rwlock_types.h - generic rwlock type definitions
- * and initializers
+ * generic rwlock type definitions and initializers
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
@@ -21,16 +35,6 @@ typedef struct {
#define RWLOCK_MAGIC 0xdeaf1eed
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_CONFIG, \
- }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
@@ -46,4 +50,29 @@ typedef struct {
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+typedef struct {
+ struct rwbase_rt rwbase;
+ atomic_t readers;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define __RWLOCK_RT_INITIALIZER(name) \
+{ \
+ .rwbase = __RWBASE_INITIALIZER(name), \
+ RW_DEP_MAP_INIT(name) \
+}
+
+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
+
+#define DEFINE_RWLOCK(name) \
+ rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 25e3fde85617..4f1c18992f76 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -11,11 +11,24 @@
#include <linux/linkage.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/err.h>
+#include <linux/cleanup.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ },
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
#endif
@@ -53,26 +66,26 @@ struct rw_semaphore {
#endif
};
-/* In all implementations count != 0 means locked */
+#define RWSEM_UNLOCKED_VALUE 0UL
+#define RWSEM_WRITER_LOCKED (1UL << 0)
+#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
+
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
- return atomic_long_read(&sem->count) != 0;
+ return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE;
}
-#define RWSEM_UNLOCKED_VALUE 0L
-#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
+static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+{
+ WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
+}
-/* Common initializer macros and functions */
+static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+{
+ WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
+}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_SLEEP, \
- },
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
+/* Common initializer macros and functions */
#ifdef CONFIG_DEBUG_RWSEMS
# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
@@ -110,7 +123,7 @@ do { \
/*
* This is the same regardless of which rwsem implementation that is being used.
- * It is just a heuristic meant to be called by somebody alreadying holding the
+ * It is just a heuristic meant to be called by somebody already holding the
* rwsem to see if somebody from an incompatible type is wanting access to the
* lock.
*/
@@ -119,10 +132,84 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
return !list_empty(&sem->wait_list);
}
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+struct rw_semaphore {
+ struct rwbase_rt rwbase;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#define __RWSEM_INITIALIZER(name) \
+ { \
+ .rwbase = __RWBASE_INITIALIZER(name), \
+ __RWSEM_DEP_MAP_INIT(name) \
+ }
+
+#define DECLARE_RWSEM(lockname) \
+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
+
+extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
+static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
+{
+ return rw_base_is_locked(&sem->rwbase);
+}
+
+static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+{
+ WARN_ON(!rwsem_is_locked(sem));
+}
+
+static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+{
+ rw_base_assert_held_write(sem);
+}
+
+static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
+{
+ return rw_base_is_contended(&sem->rwbase);
+}
+
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * The functions below are the same for all rwsem implementations including
+ * the RT specific variant.
+ */
+
+static inline void rwsem_assert_held(const struct rw_semaphore *sem)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ lockdep_assert_held(sem);
+ else
+ rwsem_assert_held_nolockdep(sem);
+}
+
+static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ lockdep_assert_held_write(sem);
+ else
+ rwsem_assert_held_write_nolockdep(sem);
+}
+
/*
* lock for reading
*/
extern void down_read(struct rw_semaphore *sem);
+extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
extern int __must_check down_read_killable(struct rw_semaphore *sem);
/*
@@ -151,6 +238,13 @@ extern void up_read(struct rw_semaphore *sem);
*/
extern void up_write(struct rw_semaphore *sem);
+DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
+DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
+DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
+
+DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
+DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
+
/*
* downgrade write lock to read lock
*/
@@ -171,6 +265,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
* See Documentation/locking/lockdep-design.rst for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
@@ -179,7 +274,7 @@ extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
-} while (0);
+} while (0)
/*
* Take/release a lock when not the owner will release it.
@@ -191,6 +286,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
+# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h
deleted file mode 100644
index 833871dcf6fd..000000000000
--- a/include/linux/s3c_adc_battery.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _S3C_ADC_BATTERY_H
-#define _S3C_ADC_BATTERY_H
-
-struct s3c_adc_bat_thresh {
- int volt; /* mV */
- int cur; /* mA */
- int level; /* percent */
-};
-
-struct s3c_adc_bat_pdata {
- int (*init)(void);
- void (*exit)(void);
- void (*enable_charger)(void);
- void (*disable_charger)(void);
-
- int gpio_charge_finished;
- int gpio_inverted;
-
- const struct s3c_adc_bat_thresh *lut_noac;
- unsigned int lut_noac_cnt;
- const struct s3c_adc_bat_thresh *lut_acin;
- unsigned int lut_acin_cnt;
-
- const unsigned int volt_channel;
- const unsigned int current_channel;
- const unsigned int backup_volt_channel;
-
- const unsigned int volt_samples;
- const unsigned int current_samples;
- const unsigned int backup_volt_samples;
-
- const unsigned int volt_mult;
- const unsigned int current_mult;
- const unsigned int backup_volt_mult;
- const unsigned int internal_impedance;
-
- const unsigned int backup_volt_max;
- const unsigned int backup_volt_min;
-};
-
-#endif
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index e40d019c3d9d..d662cf136021 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -9,8 +9,17 @@
#ifndef __LINUX_SCALE_BITMAP_H
#define __LINUX_SCALE_BITMAP_H
-#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/minmax.h>
+#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/wait.h>
struct seq_file;
@@ -19,24 +28,14 @@ struct seq_file;
*/
struct sbitmap_word {
/**
- * @depth: Number of bits being used in @word/@cleared
- */
- unsigned long depth;
-
- /**
* @word: word holding free bits
*/
- unsigned long word ____cacheline_aligned_in_smp;
+ unsigned long word;
/**
* @cleared: word holding cleared bits
*/
unsigned long cleared ____cacheline_aligned_in_smp;
-
- /**
- * @swap_lock: Held while swapping word <-> cleared
- */
- spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
@@ -62,9 +61,22 @@ struct sbitmap {
unsigned int map_nr;
/**
+ * @round_robin: Allocate bits in strict round-robin order.
+ */
+ bool round_robin;
+
+ /**
* @map: Allocated bitmap.
*/
struct sbitmap_word *map;
+
+ /*
+ * @alloc_hint: Cache of last successfully allocated or freed bit.
+ *
+ * This is per-cpu, which allows multiple users to stick to different
+ * cachelines until the map is exhausted.
+ */
+ unsigned int __percpu *alloc_hint;
};
#define SBQ_WAIT_QUEUES 8
@@ -75,11 +87,6 @@ struct sbitmap {
*/
struct sbq_wait_state {
/**
- * @wait_cnt: Number of frees remaining before we wake up.
- */
- atomic_t wait_cnt;
-
- /**
* @wait: Wait queue.
*/
wait_queue_head_t wait;
@@ -100,14 +107,6 @@ struct sbitmap_queue {
*/
struct sbitmap sb;
- /*
- * @alloc_hint: Cache of last successfully allocated or freed bit.
- *
- * This is per-cpu, which allows multiple users to stick to different
- * cachelines until the map is exhausted.
- */
- unsigned int __percpu *alloc_hint;
-
/**
* @wake_batch: Number of bits which must be freed before we wake up any
* waiters.
@@ -130,15 +129,21 @@ struct sbitmap_queue {
atomic_t ws_active;
/**
- * @round_robin: Allocate bits in strict round-robin order.
+ * @min_shallow_depth: The minimum shallow depth which may be passed to
+ * sbitmap_queue_get_shallow()
*/
- bool round_robin;
+ unsigned int min_shallow_depth;
/**
- * @min_shallow_depth: The minimum shallow depth which may be passed to
- * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
+ * @completion_cnt: Number of bits cleared passed to the
+ * wakeup function.
*/
- unsigned int min_shallow_depth;
+ atomic_t completion_cnt;
+
+ /**
+ * @wakeup_cnt: Number of thread wake ups issued.
+ */
+ atomic_t wakeup_cnt;
};
/**
@@ -149,11 +154,24 @@ struct sbitmap_queue {
* given, a good default is chosen.
* @flags: Allocation flags.
* @node: Memory node to allocate on.
+ * @round_robin: If true, be stricter about allocation order; always allocate
+ * starting from the last allocated bit. This is less efficient
+ * than the default behavior (false).
+ * @alloc_hint: If true, apply percpu hint for where to start searching for
+ * a free bit.
*
* Return: Zero on success or negative errno on failure.
*/
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
- gfp_t flags, int node);
+ gfp_t flags, int node, bool round_robin, bool alloc_hint);
+
+/* sbitmap internal helper */
+static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
+{
+ if (index == sb->map_nr - 1)
+ return sb->depth - (index << sb->shift);
+ return 1U << sb->shift;
+}
/**
* sbitmap_free() - Free memory used by a &struct sbitmap.
@@ -161,7 +179,8 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
*/
static inline void sbitmap_free(struct sbitmap *sb)
{
- kfree(sb->map);
+ free_percpu(sb->alloc_hint);
+ kvfree(sb->map);
sb->map = NULL;
}
@@ -178,22 +197,17 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
/**
* sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
* @sb: Bitmap to allocate from.
- * @alloc_hint: Hint for where to start searching for a free bit.
- * @round_robin: If true, be stricter about allocation order; always allocate
- * starting from the last allocated bit. This is less efficient
- * than the default behavior (false).
*
* This operation provides acquire barrier semantics if it succeeds.
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
+int sbitmap_get(struct sbitmap *sb);
/**
* sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
* limiting the depth used from each word.
* @sb: Bitmap to allocate from.
- * @alloc_hint: Hint for where to start searching for a free bit.
* @shallow_depth: The maximum number of bits to allocate from a single word.
*
* This rather specific operation allows for having multiple users with
@@ -205,8 +219,7 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
- unsigned long shallow_depth);
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
/**
* sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
@@ -247,7 +260,7 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb,
while (scanned < sb->depth) {
unsigned long word;
unsigned int depth = min_t(unsigned int,
- sb->map[index].depth - nr,
+ __map_depth(sb, index) - nr,
sb->depth - scanned);
scanned += depth;
@@ -320,10 +333,16 @@ static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int b
set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
}
-static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
- unsigned int bitnr)
+/*
+ * Pair of sbitmap_get, and this one applies both cleared bit and
+ * allocation hint.
+ */
+static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
{
- clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+ sbitmap_deferred_clear_bit(sb, bitnr);
+
+ if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
+ *raw_cpu_ptr(sb->alloc_hint) = bitnr;
}
static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
@@ -331,6 +350,24 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
+static inline int sbitmap_calculate_shift(unsigned int depth)
+{
+ int shift = ilog2(BITS_PER_LONG);
+
+ /*
+ * If the bitmap is small, shrink the number of bits per word so
+ * we spread over a few cachelines, at least. If less than 4
+ * bits, just forget about it, it's not going to work optimally
+ * anyway.
+ */
+ if (depth >= 4) {
+ while ((4U << shift) > depth)
+ shift--;
+ }
+
+ return shift;
+}
+
/**
* sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
* @sb: Bitmap to show.
@@ -340,6 +377,16 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
*/
void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
+
+/**
+ * sbitmap_weight() - Return how many set and not cleared bits in a &struct
+ * sbitmap.
+ * @sb: Bitmap to check.
+ *
+ * Return: How many set and not cleared bits set
+ */
+unsigned int sbitmap_weight(const struct sbitmap *sb);
+
/**
* sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
* seq_file.
@@ -374,11 +421,21 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
{
kfree(sbq->ws);
- free_percpu(sbq->alloc_hint);
sbitmap_free(&sbq->sb);
}
/**
+ * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
+ * @sbq: Bitmap queue to recalculate wake batch.
+ * @users: Number of shares.
+ *
+ * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
+ * by depth. This interface is for HCTX shared tags or queue shared tags.
+ */
+void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int users);
+
+/**
* sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
* @sbq: Bitmap queue to resize.
* @depth: New number of bits to resize to.
@@ -399,7 +456,20 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
/**
- * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
+ * @sbq: Bitmap queue to allocate from.
+ * @nr_tags: number of tags requested
+ * @offset: offset to add to returned bits
+ *
+ * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
+ * a bit in the mask returned, and the caller must add @offset to the value to
+ * get the absolute tag value.
+ */
+unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ unsigned int *offset);
+
+/**
+ * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
* sbitmap_queue, limiting the depth used from each word, with preemption
* already disabled.
* @sbq: Bitmap queue to allocate from.
@@ -411,8 +481,8 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq);
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int shallow_depth);
+int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth);
/**
* sbitmap_queue_get() - Try to allocate a free bit from a &struct
@@ -435,32 +505,6 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
}
/**
- * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
- * sbitmap_queue, limiting the depth used from each word.
- * @sbq: Bitmap queue to allocate from.
- * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
- * sbitmap_queue_clear()).
- * @shallow_depth: The maximum number of bits to allocate from a single word.
- * See sbitmap_get_shallow().
- *
- * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
- * initializing @sbq.
- *
- * Return: Non-negative allocated bit number if successful, -1 otherwise.
- */
-static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int *cpu,
- unsigned int shallow_depth)
-{
- int nr;
-
- *cpu = get_cpu();
- nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
- put_cpu();
- return nr;
-}
-
-/**
* sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
* minimum shallow depth that will be used.
* @sbq: Bitmap queue in question.
@@ -487,6 +531,17 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu);
+/**
+ * sbitmap_queue_clear_batch() - Free a batch of allocated bits
+ * &struct sbitmap_queue.
+ * @sbq: Bitmap to free from.
+ * @offset: offset for each tag in array
+ * @tags: array of tags
+ * @nr_tags: number of tags in array
+ */
+void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
+ int *tags, int nr_tags);
+
static inline int sbq_index_inc(int index)
{
return (index + 1) & (SBQ_WAIT_QUEUES - 1);
@@ -526,8 +581,9 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
* sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
* on a &struct sbitmap_queue.
* @sbq: Bitmap queue to wake up.
+ * @nr: Number of bits cleared.
*/
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
/**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 45cf7b69d852..77df3d7b18a6 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -16,15 +16,12 @@ struct scatterlist {
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
+#ifdef CONFIG_NEED_SG_DMA_FLAGS
+ unsigned int dma_flags;
+#endif
};
/*
- * Since the above length field is an unsigned int, below we define the maximum
- * length in bytes that can be stored in one scatterlist entry.
- */
-#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
-
-/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries dma_map_sg
@@ -45,6 +42,12 @@ struct sg_table {
unsigned int orig_nents; /* original size of list */
};
+struct sg_append_table {
+ struct sg_table sgt; /* The scatter list table */
+ struct scatterlist *prv; /* last populated sge in the table */
+ unsigned int total_nents; /* Total entries in the table */
+};
+
/*
* Notes on SG table design.
*
@@ -69,10 +72,27 @@ struct sg_table {
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
-#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
-#define sg_is_last(sg) ((sg)->page_link & SG_END)
-#define sg_chain_ptr(sg) \
- ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
+#define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END)
+
+static inline unsigned int __sg_flags(struct scatterlist *sg)
+{
+ return sg->page_link & SG_PAGE_LINK_MASK;
+}
+
+static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
+{
+ return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK);
+}
+
+static inline bool sg_is_chain(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_CHAIN;
+}
+
+static inline bool sg_is_last(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_END;
+}
/**
* sg_assign_page - Assign a given page to an SG entry
@@ -92,7 +112,7 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
- BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
+ BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
@@ -121,12 +141,36 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
sg->length = len;
}
+/**
+ * sg_set_folio - Set sg entry to point at given folio
+ * @sg: SG entry
+ * @folio: The folio
+ * @len: Length of data
+ * @offset: Offset into folio
+ *
+ * Description:
+ * Use this function to set an sg entry pointing at a folio, never assign
+ * the folio directly. We encode sg table information in the lower bits
+ * of the folio pointer. See sg_page() for looking up the page belonging
+ * to an sg entry.
+ *
+ **/
+static inline void sg_set_folio(struct scatterlist *sg, struct folio *folio,
+ size_t len, size_t offset)
+{
+ WARN_ON_ONCE(len > UINT_MAX);
+ WARN_ON_ONCE(offset > UINT_MAX);
+ sg_assign_page(sg, &folio->page);
+ sg->offset = offset;
+ sg->length = len;
+}
+
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
- return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
+ return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK);
}
/**
@@ -165,6 +209,22 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
#define for_each_sgtable_dma_sg(sgt, sg, i) \
for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
+static inline void __sg_chain(struct scatterlist *chain_sg,
+ struct scatterlist *sgl)
+{
+ /*
+ * offset and length are unused for chain entry. Clear them.
+ */
+ chain_sg->offset = 0;
+ chain_sg->length = 0;
+
+ /*
+ * Set lowest bit to indicate a link pointer, and make sure to clear
+ * the termination bit if it happens to be set.
+ */
+ chain_sg->page_link = ((unsigned long) sgl | SG_CHAIN) & ~SG_END;
+}
+
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
@@ -178,18 +238,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
- /*
- * offset and length are unused for chain entry. Clear them.
- */
- prv[prv_nents - 1].offset = 0;
- prv[prv_nents - 1].length = 0;
-
- /*
- * Set lowest bit to indicate a link pointer, and make sure to clear
- * the termination bit if it happens to be set.
- */
- prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN)
- & ~SG_END;
+ __sg_chain(&prv[prv_nents - 1], sgl);
}
/**
@@ -223,6 +272,108 @@ static inline void sg_unmark_end(struct scatterlist *sg)
sg->page_link &= ~SG_END;
}
+/*
+ * One 64-bit architectures there is a 4-byte padding in struct scatterlist
+ * (assuming also CONFIG_NEED_SG_DMA_LENGTH is set). Use this padding for DMA
+ * flags bits to indicate when a specific dma address is a bus address or the
+ * buffer may have been bounced via SWIOTLB.
+ */
+#ifdef CONFIG_NEED_SG_DMA_FLAGS
+
+#define SG_DMA_BUS_ADDRESS (1 << 0)
+#define SG_DMA_SWIOTLB (1 << 1)
+
+/**
+ * sg_dma_is_bus_address - Return whether a given segment was marked
+ * as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Returns true if sg_dma_mark_bus_address() has been called on
+ * this segment.
+ **/
+static inline bool sg_dma_is_bus_address(struct scatterlist *sg)
+{
+ return sg->dma_flags & SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_dma_mark_bus_address - Mark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Marks the passed in sg entry to indicate that the dma_address is
+ * a bus address and doesn't need to be unmapped. This should only be
+ * used by dma_map_sg() implementations to mark bus addresses
+ * so they can be properly cleaned up in dma_unmap_sg().
+ **/
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags |= SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Clears the bus address mark.
+ **/
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags &= ~SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_dma_is_swiotlb - Return whether the scatterlist was marked for SWIOTLB
+ * bouncing
+ * @sg: SG entry
+ *
+ * Description:
+ * Returns true if the scatterlist was marked for SWIOTLB bouncing. Not all
+ * elements may have been bounced, so the caller would have to check
+ * individual SG entries with is_swiotlb_buffer().
+ */
+static inline bool sg_dma_is_swiotlb(struct scatterlist *sg)
+{
+ return sg->dma_flags & SG_DMA_SWIOTLB;
+}
+
+/**
+ * sg_dma_mark_swiotlb - Mark the scatterlist for SWIOTLB bouncing
+ * @sg: SG entry
+ *
+ * Description:
+ * Marks a a scatterlist for SWIOTLB bounce. Not all SG entries may be
+ * bounced.
+ */
+static inline void sg_dma_mark_swiotlb(struct scatterlist *sg)
+{
+ sg->dma_flags |= SG_DMA_SWIOTLB;
+}
+
+#else
+
+static inline bool sg_dma_is_bus_address(struct scatterlist *sg)
+{
+ return false;
+}
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+}
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+}
+static inline bool sg_dma_is_swiotlb(struct scatterlist *sg)
+{
+ return false;
+}
+static inline void sg_dma_mark_swiotlb(struct scatterlist *sg)
+{
+}
+
+#endif /* CONFIG_NEED_SG_DMA_FLAGS */
+
/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
@@ -281,18 +432,51 @@ typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
- sg_free_fn *);
+ sg_free_fn *, unsigned int);
void sg_free_table(struct sg_table *);
+void sg_free_append_table(struct sg_append_table *sgt);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
-int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, unsigned int max_segment,
- gfp_t gfp_mask);
-int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, gfp_t gfp_mask);
+int sg_alloc_append_table_from_pages(struct sg_append_table *sgt,
+ struct page **pages, unsigned int n_pages,
+ unsigned int offset, unsigned long size,
+ unsigned int max_segment,
+ unsigned int left_pages, gfp_t gfp_mask);
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size,
+ unsigned int max_segment, gfp_t gfp_mask);
+
+/**
+ * sg_alloc_table_from_pages - Allocate and initialize an sg table from
+ * an array of pages
+ * @sgt: The sg table header to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
+ * @offset: Offset from start of the first page to the start of a buffer
+ * @size: Number of valid bytes in the buffer (after offset)
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table from a list of pages. Contiguous
+ * ranges of the pages are squashed into a single scatterlist node. A user
+ * may provide an offset at a start and a size of valid data in a buffer
+ * specified by the page array. The returned sg table is released by
+ * sg_free_table.
+ *
+ * Returns:
+ * 0 on success, negative error on failure
+ */
+static inline int sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages,
+ unsigned int n_pages,
+ unsigned int offset,
+ unsigned long size, gfp_t gfp_mask)
+{
+ return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
+ size, UINT_MAX, gfp_mask);
+}
#ifdef CONFIG_SGL_ALLOC
struct scatterlist *sgl_alloc_order(unsigned long long length,
@@ -474,7 +658,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* Iterates over sg entries mapping page-by-page. On each successful
* iteration, @miter->page points to the mapped page and
* @miter->length bytes of data can be accessed at @miter->addr. As
- * long as an interation is enclosed between start and stop, the user
+ * long as an iteration is enclosed between start and stop, the user
* is free to choose control structure and when to stop.
*
* @miter->consumed is set to @miter->length on each iteration. It
diff --git a/include/linux/sched.h b/include/linux/sched.h
index afe01e232935..3c2abbc587b4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -10,41 +10,55 @@
#include <uapi/linux/sched.h>
#include <asm/current.h>
-
-#include <linux/pid.h>
-#include <linux/sem.h>
+#include <asm/processor.h>
+#include <linux/thread_info.h>
+#include <linux/preempt.h>
+#include <linux/cpumask.h>
+
+#include <linux/cache.h>
+#include <linux/irqflags_types.h>
+#include <linux/smp_types.h>
+#include <linux/pid_types.h>
+#include <linux/sem_types.h>
#include <linux/shm.h>
-#include <linux/kcov.h>
-#include <linux/mutex.h>
-#include <linux/plist.h>
-#include <linux/hrtimer.h>
-#include <linux/irqflags.h>
-#include <linux/seccomp.h>
-#include <linux/nodemask.h>
-#include <linux/rcupdate.h>
-#include <linux/refcount.h>
+#include <linux/kmsan_types.h>
+#include <linux/mutex_types.h>
+#include <linux/plist_types.h>
+#include <linux/hrtimer_types.h>
+#include <linux/timer_types.h>
+#include <linux/seccomp_types.h>
+#include <linux/nodemask_types.h>
+#include <linux/refcount_types.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
+#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
-#include <linux/posix-timers.h>
-#include <linux/rseq.h>
-#include <linux/seqlock.h>
+#include <linux/posix-timers_types.h>
+#include <linux/restart_block.h>
+#include <uapi/linux/rseq.h>
+#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
+#include <linux/rv.h>
+#include <linux/livepatch_sched.h>
+#include <linux/uidgid_types.h>
+#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-struct backing_dev_info;
struct bio_list;
struct blk_plug;
+struct bpf_local_storage;
+struct bpf_run_ctx;
struct capture_control;
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
+struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
@@ -57,47 +71,60 @@ struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
-struct sched_param;
+struct sched_dl_entity;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
+struct task_struct;
+struct user_event_mm;
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
- * We have two separate sets of flags: task->state
+ * We have two separate sets of flags: task->__state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
-/* Used in tsk->state: */
-#define TASK_RUNNING 0x0000
-#define TASK_INTERRUPTIBLE 0x0001
-#define TASK_UNINTERRUPTIBLE 0x0002
-#define __TASK_STOPPED 0x0004
-#define __TASK_TRACED 0x0008
+/* Used in tsk->__state: */
+#define TASK_RUNNING 0x00000000
+#define TASK_INTERRUPTIBLE 0x00000001
+#define TASK_UNINTERRUPTIBLE 0x00000002
+#define __TASK_STOPPED 0x00000004
+#define __TASK_TRACED 0x00000008
/* Used in tsk->exit_state: */
-#define EXIT_DEAD 0x0010
-#define EXIT_ZOMBIE 0x0020
+#define EXIT_DEAD 0x00000010
+#define EXIT_ZOMBIE 0x00000020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
-/* Used in tsk->state again: */
-#define TASK_PARKED 0x0040
-#define TASK_DEAD 0x0080
-#define TASK_WAKEKILL 0x0100
-#define TASK_WAKING 0x0200
-#define TASK_NOLOAD 0x0400
-#define TASK_NEW 0x0800
-#define TASK_STATE_MAX 0x1000
+/* Used in tsk->__state again: */
+#define TASK_PARKED 0x00000040
+#define TASK_DEAD 0x00000080
+#define TASK_WAKEKILL 0x00000100
+#define TASK_WAKING 0x00000200
+#define TASK_NOLOAD 0x00000400
+#define TASK_NEW 0x00000800
+#define TASK_RTLOCK_WAIT 0x00001000
+#define TASK_FREEZABLE 0x00002000
+#define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
+#define TASK_FROZEN 0x00008000
+#define TASK_STATE_MAX 0x00010000
+
+#define TASK_ANY (TASK_STATE_MAX-1)
+
+/*
+ * DO NOT ADD ANY NEW USERS !
+ */
+#define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
-#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_TRACED __TASK_TRACED
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
@@ -110,13 +137,11 @@ struct task_group;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
-
-#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
-
-#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
+#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
+#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
/*
* Special states are those that do not use the normal wait-loop pattern. See
@@ -125,32 +150,39 @@ struct task_group;
#define is_special_task_state(state) \
((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
-#define __set_current_state(state_value) \
- do { \
- WARN_ON_ONCE(is_special_task_state(state_value));\
- current->task_state_change = _THIS_IP_; \
- current->state = (state_value); \
- } while (0)
-
-#define set_current_state(state_value) \
- do { \
- WARN_ON_ONCE(is_special_task_state(state_value));\
- current->task_state_change = _THIS_IP_; \
- smp_store_mb(current->state, (state_value)); \
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define debug_normal_state_change(state_value) \
+ do { \
+ WARN_ON_ONCE(is_special_task_state(state_value)); \
+ current->task_state_change = _THIS_IP_; \
} while (0)
-#define set_special_state(state_value) \
+# define debug_special_state_change(state_value) \
do { \
- unsigned long flags; /* may shadow */ \
WARN_ON_ONCE(!is_special_task_state(state_value)); \
- raw_spin_lock_irqsave(&current->pi_lock, flags); \
current->task_state_change = _THIS_IP_; \
- current->state = (state_value); \
- raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
+
+# define debug_rtlock_wait_set_state() \
+ do { \
+ current->saved_state_change = current->task_state_change;\
+ current->task_state_change = _THIS_IP_; \
+ } while (0)
+
+# define debug_rtlock_wait_restore_state() \
+ do { \
+ current->task_state_change = current->saved_state_change;\
+ } while (0)
+
#else
+# define debug_normal_state_change(cond) do { } while (0)
+# define debug_special_state_change(cond) do { } while (0)
+# define debug_rtlock_wait_set_state() do { } while (0)
+# define debug_rtlock_wait_restore_state() do { } while (0)
+#endif
+
/*
- * set_current_state() includes a barrier so that the write of current->state
+ * set_current_state() includes a barrier so that the write of current->__state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
@@ -173,9 +205,9 @@ struct task_group;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
* where wake_up_state()/try_to_wake_up() executes a full memory barrier before
- * accessing p->state.
+ * accessing p->__state.
*
- * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
+ * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
@@ -187,29 +219,87 @@ struct task_group;
* Also see the comments of try_to_wake_up().
*/
#define __set_current_state(state_value) \
- current->state = (state_value)
+ do { \
+ debug_normal_state_change((state_value)); \
+ WRITE_ONCE(current->__state, (state_value)); \
+ } while (0)
#define set_current_state(state_value) \
- smp_store_mb(current->state, (state_value))
+ do { \
+ debug_normal_state_change((state_value)); \
+ smp_store_mb(current->__state, (state_value)); \
+ } while (0)
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
- * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
- * will not collide with our state change.
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING
+ * stores will not collide with our state change.
*/
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
+ \
raw_spin_lock_irqsave(&current->pi_lock, flags); \
- current->state = (state_value); \
+ debug_special_state_change((state_value)); \
+ WRITE_ONCE(current->__state, (state_value)); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
-#endif
+/*
+ * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
+ *
+ * RT's spin/rwlock substitutions are state preserving. The state of the
+ * task when blocking on the lock is saved in task_struct::saved_state and
+ * restored after the lock has been acquired. These operations are
+ * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
+ * lock related wakeups while the task is blocked on the lock are
+ * redirected to operate on task_struct::saved_state to ensure that these
+ * are not dropped. On restore task_struct::saved_state is set to
+ * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
+ *
+ * The lock operation looks like this:
+ *
+ * current_save_and_set_rtlock_wait_state();
+ * for (;;) {
+ * if (try_lock())
+ * break;
+ * raw_spin_unlock_irq(&lock->wait_lock);
+ * schedule_rtlock();
+ * raw_spin_lock_irq(&lock->wait_lock);
+ * set_current_state(TASK_RTLOCK_WAIT);
+ * }
+ * current_restore_rtlock_saved_state();
+ */
+#define current_save_and_set_rtlock_wait_state() \
+ do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_lock(&current->pi_lock); \
+ current->saved_state = current->__state; \
+ debug_rtlock_wait_set_state(); \
+ WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
+ raw_spin_unlock(&current->pi_lock); \
+ } while (0);
+
+#define current_restore_rtlock_saved_state() \
+ do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_lock(&current->pi_lock); \
+ debug_rtlock_wait_restore_state(); \
+ WRITE_ONCE(current->__state, current->saved_state); \
+ current->saved_state = TASK_RUNNING; \
+ raw_spin_unlock(&current->pi_lock); \
+ } while (0);
+
+#define get_current_state() READ_ONCE(current->__state)
-/* Task command name length: */
-#define TASK_COMM_LEN 16
+/*
+ * Define the task command name length as enum, then it can be visible to
+ * BPF programs.
+ */
+enum {
+ TASK_COMM_LEN = 16,
+};
extern void scheduler_tick(void);
@@ -223,6 +313,9 @@ extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
asmlinkage void preempt_schedule_irq(void);
+#ifdef CONFIG_PREEMPT_RT
+ extern void schedule_rtlock(void);
+#endif
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
@@ -286,6 +379,10 @@ extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
#endif
+struct sched_param {
+ int sched_priority;
+};
+
struct sched_info {
#ifdef CONFIG_SCHED_INFO
/* Cumulative counters: */
@@ -326,34 +423,6 @@ struct load_weight {
u32 inv_weight;
};
-/**
- * struct util_est - Estimation utilization of FAIR tasks
- * @enqueued: instantaneous estimated utilization of a task/cpu
- * @ewma: the Exponential Weighted Moving Average (EWMA)
- * utilization of a task
- *
- * Support data structure to track an Exponential Weighted Moving Average
- * (EWMA) of a FAIR task's utilization. New samples are added to the moving
- * average each time a task completes an activation. Sample's weight is chosen
- * so that the EWMA will be relatively insensitive to transient changes to the
- * task's workload.
- *
- * The enqueued attribute has a slightly different meaning for tasks and cpus:
- * - task: the task's util_avg at last task dequeue time
- * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
- * Thus, the util_est.enqueued of a task represents the contribution on the
- * estimated utilization of the CPU where that task is currently enqueued.
- *
- * Only for tasks we track a moving average of the past instantaneous
- * estimated utilization. This allows to absorb sporadic drops in utilization
- * of an otherwise almost periodic task.
- */
-struct util_est {
- unsigned int enqueued;
- unsigned int ewma;
-#define UTIL_EST_WEIGHT_SHIFT 2
-} __attribute__((__aligned__(sizeof(u64))));
-
/*
* The load/runnable/util_avg accumulates an infinite geometric series
* (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
@@ -408,9 +477,20 @@ struct sched_avg {
unsigned long load_avg;
unsigned long runnable_avg;
unsigned long util_avg;
- struct util_est util_est;
+ unsigned int util_est;
} ____cacheline_aligned;
+/*
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est at dequeue time.
+ * Since max value of util_est for a task is 1024 (PELT util_avg for a task)
+ * it is safe to use MSB.
+ */
+#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
+
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
@@ -426,7 +506,9 @@ struct sched_statistics {
u64 block_start;
u64 block_max;
- u64 exec_max;
+ s64 sum_block_runtime;
+
+ s64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
@@ -444,25 +526,32 @@ struct sched_statistics {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_forceidle_sum;
#endif
-};
+#endif /* CONFIG_SCHEDSTATS */
+} ____cacheline_aligned;
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
struct rb_node run_node;
+ u64 deadline;
+ u64 min_vruntime;
+
struct list_head group_node;
unsigned int on_rq;
u64 exec_start;
u64 sum_exec_runtime;
- u64 vruntime;
u64 prev_sum_exec_runtime;
+ u64 vruntime;
+ s64 vlag;
+ u64 slice;
u64 nr_migrations;
- struct sched_statistics statistics;
-
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
@@ -503,6 +592,9 @@ struct sched_rt_entity {
#endif
} __randomize_layout;
+typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
+
struct sched_dl_entity {
struct rb_node rb_node;
@@ -533,10 +625,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
- * @dl_boosted tells if we are boosted due to DI. If so we are
- * outside bandwidth enforcement mechanism (but only until we
- * exit the critical section);
- *
* @dl_yielded tells if task gave up the CPU before consuming
* all its available runtime during the last job.
*
@@ -551,10 +639,10 @@ struct sched_dl_entity {
* overruns.
*/
unsigned int dl_throttled : 1;
- unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
+ unsigned int dl_server : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -569,7 +657,29 @@ struct sched_dl_entity {
* timer is needed to decrease the active utilization at the correct
* time.
*/
- struct hrtimer inactive_timer;
+ struct hrtimer inactive_timer;
+
+ /*
+ * Bits for DL-server functionality. Also see the comment near
+ * dl_server_update().
+ *
+ * @rq the runqueue this server is for
+ *
+ * @server_has_tasks() returns true if @server_pick return a
+ * runnable task.
+ */
+ struct rq *rq;
+ dl_server_has_tasks_f server_has_tasks;
+ dl_server_pick_f server_pick;
+
+#ifdef CONFIG_RT_MUTEXES
+ /*
+ * Priority Inheritance. When a DEADLINE scheduling entity is boosted
+ * pi_se points to the donor, otherwise points to the dl_se it belongs
+ * to (the original one/itself).
+ */
+ struct sched_dl_entity *pi_se;
+#endif
};
#ifdef CONFIG_UCLAMP_TASK
@@ -628,6 +738,13 @@ struct wake_q_node {
struct wake_q_node *next;
};
+struct kmap_ctrl {
+#ifdef CONFIG_KMAP_LOCAL
+ int idx;
+ pte_t pteval[KM_MAX_IDX];
+#endif
+};
+
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -636,8 +753,10 @@ struct task_struct {
*/
struct thread_info thread_info;
#endif
- /* -1 unrunnable, 0 runnable, >0 stopped: */
- volatile long state;
+ unsigned int __state;
+
+ /* saved state for "spinlock sleepers" */
+ unsigned int saved_state;
/*
* This begins the randomizable portion of task_struct. Only
@@ -654,10 +773,6 @@ struct task_struct {
#ifdef CONFIG_SMP
int on_cpu;
struct __call_single_node wake_entry;
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- /* Current CPU: */
- unsigned int cpu;
-#endif
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -679,13 +794,21 @@ struct task_struct {
int normal_prio;
unsigned int rt_priority;
- const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+ struct sched_dl_entity dl;
+ struct sched_dl_entity *dl_server;
+ const struct sched_class *sched_class;
+
+#ifdef CONFIG_SCHED_CORE
+ struct rb_node core_node;
+ unsigned long core_cookie;
+ unsigned int core_occupation;
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
- struct sched_dl_entity dl;
#ifdef CONFIG_UCLAMP_TASK
/*
@@ -700,6 +823,8 @@ struct task_struct {
struct uclamp_se uclamp[UCLAMP_CNT];
#endif
+ struct sched_statistics stats;
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* List of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
@@ -712,7 +837,13 @@ struct task_struct {
unsigned int policy;
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
+ cpumask_t *user_cpus_ptr;
cpumask_t cpus_mask;
+ void *migration_pending;
+#ifdef CONFIG_SMP
+ unsigned short migration_disabled;
+#endif
+ unsigned short migration_flags;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -727,14 +858,17 @@ struct task_struct {
u8 rcu_tasks_idx;
int rcu_tasks_idle_cpu;
struct list_head rcu_tasks_holdout_list;
+ int rcu_tasks_exit_cpu;
+ struct list_head rcu_tasks_exit_list;
#endif /* #ifdef CONFIG_TASKS_RCU */
#ifdef CONFIG_TASKS_TRACE_RCU
int trc_reader_nesting;
int trc_ipi_to_cpu;
union rcu_special trc_reader_special;
- bool trc_reader_checked;
struct list_head trc_holdout_list;
+ struct list_head trc_blkd_node;
+ int trc_blkd_cpu;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
struct sched_info sched_info;
@@ -747,13 +881,8 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
+ struct address_space *faults_disabled_mapping;
- /* Per-thread vma caching: */
- struct vmacache vmacache;
-
-#ifdef SPLIT_RSS_COUNTING
- struct task_rss_stat rss_stat;
-#endif
int exit_state;
int exit_code;
int exit_signal;
@@ -769,17 +898,31 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
- unsigned sched_remote_wakeup:1;
-#ifdef CONFIG_PSI
- unsigned sched_psi_wake_requeue:1;
-#endif
/* Force alignment to the next boundary: */
unsigned :0;
/* Unserialized, strictly 'current' */
- /* Bit to tell LSMs we're in execve(): */
+ /*
+ * This field must not be in the scheduler word above due to wakelist
+ * queueing no longer being serialized by p->on_cpu. However:
+ *
+ * p->XXX = X; ttwu()
+ * schedule() if (p->on_rq && ..) // false
+ * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
+ * deactivate_task() ttwu_queue_wakelist())
+ * p->on_rq = 0; p->sched_remote_wakeup = Y;
+ *
+ * guarantees all stores of 'current' are visible before
+ * ->sched_remote_wakeup gets used, so it can be in this word.
+ */
+ unsigned sched_remote_wakeup:1;
+#ifdef CONFIG_RT_MUTEXES
+ unsigned sched_rt_mutex:1;
+#endif
+
+ /* Bit to tell TOMOYO we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
@@ -788,6 +931,10 @@ struct task_struct {
#ifdef CONFIG_MEMCG
unsigned in_user_fault:1;
#endif
+#ifdef CONFIG_LRU_GEN
+ /* whether the LRU algorithm may apply to this access */
+ unsigned in_lru_fault:1;
+#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
@@ -804,6 +951,24 @@ struct task_struct {
/* Stalled due to lack of memory */
unsigned in_memstall:1;
#endif
+#ifdef CONFIG_PAGE_OWNER
+ /* Used by page_owner=on to detect recursion in page tracking. */
+ unsigned in_page_owner:1;
+#endif
+#ifdef CONFIG_EVENTFD
+ /* Recursion prevention for eventfd_signal() */
+ unsigned in_eventfd:1;
+#endif
+#ifdef CONFIG_ARCH_HAS_CPU_PASID
+ unsigned pasid_activated:1;
+#endif
+#ifdef CONFIG_CPU_SUP_INTEL
+ unsigned reported_split_lock:1;
+#endif
+#ifdef CONFIG_TASK_DELAY_ACCT
+ /* delay due to memory thrashing */
+ unsigned in_thrashing:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -847,7 +1012,6 @@ struct task_struct {
/* PID/PID hash table linkage. */
struct pid *thread_pid;
struct hlist_node pid_links[PIDTYPE_MAX];
- struct list_head thread_group;
struct list_head thread_node;
struct completion *vfork_done;
@@ -858,6 +1022,9 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
+ /* PF_KTHREAD | PF_IO_WORKER */
+ void *worker_private;
+
u64 utime;
u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
@@ -935,6 +1102,10 @@ struct task_struct {
/* Open file information: */
struct files_struct *files;
+#ifdef CONFIG_IO_URING
+ struct io_uring_task *io_uring;
+#endif
+
/* Namespaces: */
struct nsproxy *nsproxy;
@@ -960,6 +1131,7 @@ struct task_struct {
unsigned int sessionid;
#endif
struct seccomp seccomp;
+ struct syscall_user_dispatch syscall_dispatch;
/* Thread group tracking: */
u64 parent_exec_id;
@@ -999,6 +1171,9 @@ struct task_struct {
int softirq_context;
int irq_config;
#endif
+#ifdef CONFIG_PREEMPT_RT
+ int softirq_disable_cnt;
+#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
@@ -1008,7 +1183,7 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
-#ifdef CONFIG_UBSAN
+#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
unsigned int in_ubsan;
#endif
@@ -1018,16 +1193,12 @@ struct task_struct {
/* Stacked block device info: */
struct bio_list *bio_list;
-#ifdef CONFIG_BLOCK
/* Stack plugging: */
struct blk_plug *plug;
-#endif
/* VM state: */
struct reclaim_state *reclaim_state;
- struct backing_dev_info *backing_dev_info;
-
struct io_context *io_context;
#ifdef CONFIG_COMPACTION
@@ -1053,7 +1224,7 @@ struct task_struct {
#ifdef CONFIG_CPUSETS
/* Protected by ->alloc_lock: */
nodemask_t mems_allowed;
- /* Seqence number to catch updates: */
+ /* Sequence number to catch updates: */
seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
@@ -1079,7 +1250,7 @@ struct task_struct {
unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
- struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
+ struct perf_event_context *perf_event_ctxp;
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
@@ -1090,6 +1261,7 @@ struct task_struct {
/* Protected by alloc_lock: */
struct mempolicy *mempolicy;
short il_prev;
+ u8 il_weight;
short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
@@ -1144,6 +1316,7 @@ struct task_struct {
#ifdef CONFIG_RSEQ
struct rseq __user *rseq;
+ u32 rseq_len;
u32 rseq_sig;
/*
* RmW on rseq_event_mask must be performed atomically
@@ -1152,12 +1325,15 @@ struct task_struct {
unsigned long rseq_event_mask;
#endif
- struct tlbflush_unmap_batch tlb_ubc;
+#ifdef CONFIG_SCHED_MM_CID
+ int mm_cid; /* Current cid in mm */
+ int last_mm_cid; /* Most recent cid in mm */
+ int migrate_from_cpu;
+ int mm_cid_active; /* Whether cid bitmap is active */
+ struct callback_head cid_work;
+#endif
- union {
- refcount_t rcu_users;
- struct rcu_head rcu;
- };
+ struct tlbflush_unmap_batch tlb_ubc;
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
@@ -1192,7 +1368,7 @@ struct task_struct {
u64 timer_slack_ns;
u64 default_timer_slack_ns;
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
unsigned int kasan_depth;
#endif
@@ -1201,6 +1377,17 @@ struct task_struct {
#ifdef CONFIG_TRACE_IRQFLAGS
struct irqtrace_events kcsan_save_irqtrace;
#endif
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ int kcsan_stack_depth;
+#endif
+#endif
+
+#ifdef CONFIG_KMSAN
+ struct kmsan_ctx kmsan_ctx;
+#endif
+
+#if IS_ENABLED(CONFIG_KUNIT)
+ struct kunit *kunit_test;
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -1225,9 +1412,6 @@ struct task_struct {
#endif
#ifdef CONFIG_TRACING
- /* State flags for use by tracers: */
- unsigned long trace;
-
/* Bitmask and counter of trace recursion: */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
@@ -1269,8 +1453,12 @@ struct task_struct {
struct mem_cgroup *active_memcg;
#endif
+#ifdef CONFIG_MEMCG_KMEM
+ struct obj_cgroup *objcg;
+#endif
+
#ifdef CONFIG_BLK_CGROUP
- struct request_queue *throttle_queue;
+ struct gendisk *throttle_disk;
#endif
#ifdef CONFIG_UPROBES
@@ -1280,12 +1468,19 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
+ struct kmap_ctrl kmap_ctrl;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
+# ifdef CONFIG_PREEMPT_RT
+ unsigned long saved_state_change;
+# endif
#endif
+ struct rcu_head rcu;
+ refcount_t rcu_users;
int pagefault_disabled;
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
+ struct timer_list oom_reaper_timer;
#endif
#ifdef CONFIG_VMAP_STACK
struct vm_struct *stack_vm_area;
@@ -1301,6 +1496,12 @@ struct task_struct {
/* Used by LSM modules for access restriction: */
void *security;
#endif
+#ifdef CONFIG_BPF_SYSCALL
+ /* Used by BPF task local storage */
+ struct bpf_local_storage __rcu *bpf_storage;
+ /* Used for BPF run context */
+ struct bpf_run_ctx *bpf_ctx;
+#endif
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
unsigned long lowest_stack;
@@ -1308,11 +1509,45 @@ struct task_struct {
#endif
#ifdef CONFIG_X86_MCE
+ void __user *mce_vaddr;
+ __u64 mce_kflags;
u64 mce_addr;
__u64 mce_ripv : 1,
mce_whole_page : 1,
__mce_reserved : 62;
struct callback_head mce_kill_me;
+ int mce_count;
+#endif
+
+#ifdef CONFIG_KRETPROBES
+ struct llist_head kretprobe_instances;
+#endif
+#ifdef CONFIG_RETHOOK
+ struct llist_head rethooks;
+#endif
+
+#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
+ /*
+ * If L1D flush is supported on mm context switch
+ * then we use this callback head to queue kill work
+ * to kill tasks that are not running on SMT disabled
+ * cores
+ */
+ struct callback_head l1d_flush_kill;
+#endif
+
+#ifdef CONFIG_RV
+ /*
+ * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
+ * If we find justification for more monitors, we can think
+ * about adding more or developing a dynamic method. So far,
+ * none of these are justified.
+ */
+ union rv_task_monitor rv[RV_PER_TASK_MONITORS];
+#endif
+
+#ifdef CONFIG_USER_EVENTS
+ struct user_event_mm *user_event_mm;
#endif
/*
@@ -1332,130 +1567,35 @@ struct task_struct {
*/
};
-static inline struct pid *task_pid(struct task_struct *task)
-{
- return task->thread_pid;
-}
-
-/*
- * the helpers to get the task's different pids as they are seen
- * from various namespaces
- *
- * task_xid_nr() : global id, i.e. the id seen from the init namespace;
- * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
- * current.
- * task_xid_nr_ns() : id seen from the ns specified;
- *
- * see also pid_nr() etc in include/linux/pid.h
- */
-pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
-
-static inline pid_t task_pid_nr(struct task_struct *tsk)
-{
- return tsk->pid;
-}
-
-static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
-}
-
-static inline pid_t task_pid_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
-}
-
-
-static inline pid_t task_tgid_nr(struct task_struct *tsk)
-{
- return tsk->tgid;
-}
-
-/**
- * pid_alive - check that a task structure is not stale
- * @p: Task structure to be checked.
- *
- * Test if a process is not yet dead (at most zombie state)
- * If pid_alive fails, then pointers within the task structure
- * can be stale and must not be dereferenced.
- *
- * Return: 1 if the process is alive. 0 otherwise.
- */
-static inline int pid_alive(const struct task_struct *p)
-{
- return p->thread_pid != NULL;
-}
-
-static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
-}
-
-static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
-}
-
-
-static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
-}
-
-static inline pid_t task_session_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
-}
-
-static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
-}
-
-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
-}
-
-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
-{
- pid_t pid = 0;
-
- rcu_read_lock();
- if (pid_alive(tsk))
- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
- rcu_read_unlock();
-
- return pid;
-}
-
-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
-{
- return task_ppid_nr_ns(tsk, &init_pid_ns);
-}
-
-/* Obsolete, do not use: */
-static inline pid_t task_pgrp_nr(struct task_struct *tsk)
-{
- return task_pgrp_nr_ns(tsk, &init_pid_ns);
-}
-
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
-static inline unsigned int task_state_index(struct task_struct *tsk)
+static inline unsigned int __task_state_index(unsigned int tsk_state,
+ unsigned int tsk_exit_state)
{
- unsigned int tsk_state = READ_ONCE(tsk->state);
- unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
+ unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
- if (tsk_state == TASK_IDLE)
+ if ((tsk_state & TASK_IDLE) == TASK_IDLE)
state = TASK_REPORT_IDLE;
+ /*
+ * We're lying here, but rather than expose a completely new task state
+ * to userspace, we can make this appear as if the task has gone through
+ * a regular rt_mutex_lock() call.
+ */
+ if (tsk_state & TASK_RTLOCK_WAIT)
+ state = TASK_UNINTERRUPTIBLE;
+
return fls(state);
}
+static inline unsigned int task_state_index(struct task_struct *tsk)
+{
+ return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
+}
+
static inline char task_index_to_char(unsigned int state)
{
static const char state_char[] = "RSDTtXZPI";
@@ -1470,53 +1610,44 @@ static inline char task_state_to_char(struct task_struct *tsk)
return task_index_to_char(task_state_index(tsk));
}
-/**
- * is_global_init - check if a task structure is init. Since init
- * is free to have sub-threads we need to check tgid.
- * @tsk: Task structure to be checked.
- *
- * Check if a task structure is the first user space task the kernel created.
- *
- * Return: 1 if the task structure is init. 0 otherwise.
- */
-static inline int is_global_init(struct task_struct *tsk)
-{
- return task_tgid_nr(tsk) == 1;
-}
-
extern struct pid *cad_pid;
/*
* Per process flags
*/
+#define PF_VCPU 0x00000001 /* I'm a virtual CPU */
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
-#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+#define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */
+#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* Dumped core */
#define PF_SIGNALED 0x00000400 /* Killed by a signal */
-#define PF_MEMALLOC 0x00000800 /* Allocating memory */
+#define PF_MEMALLOC 0x00000800 /* Allocating memory to free memory. See memalloc_noreclaim_save() */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
-#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
+#define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
-#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
+#define PF__HOLE__00010000 0x00010000
#define PF_KSWAPD 0x00020000 /* I am kswapd */
-#define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */
-#define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */
+#define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */
+#define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to,
* I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
-#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
+#define PF_MEMALLOC_NORECLAIM 0x00800000 /* All allocation requests will clear __GFP_DIRECT_RECLAIM */
+#define PF_MEMALLOC_NOWARN 0x01000000 /* All allocation requests will inherit __GFP_NOWARN */
+#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
-#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
-#define PF_IO_WORKER 0x20000000 /* Task is an IO worker */
-#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF_MEMALLOC_PIN 0x10000000 /* Allocations constrained to zones which allow long term pinning.
+ * See memalloc_pin_save() */
+#define PF_BLOCK_TS 0x20000000 /* plug has ts that needs updating */
+#define PF__HOLE__40000000 0x40000000
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
/*
@@ -1547,7 +1678,7 @@ extern struct pid *cad_pid;
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-static inline bool is_percpu_thread(void)
+static __always_inline bool is_percpu_thread(void)
{
#ifdef CONFIG_SMP
return (current->flags & PF_NO_SETAFFINITY) &&
@@ -1616,10 +1747,27 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
+extern int task_can_attach(struct task_struct *p);
+extern int dl_bw_alloc(int cpu, u64 dl_bw);
+extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_SMP
+
+/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+
+/**
+ * set_cpus_allowed_ptr - set CPU affinity mask of a task
+ * @p: the task
+ * @new_mask: CPU affinity mask
+ *
+ * Return: zero if successful, or a negative error code
+ */
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
+extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
+extern void release_user_cpus_ptr(struct task_struct *p);
+extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
+extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
+extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
#else
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -1630,6 +1778,21 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
return -EINVAL;
return 0;
}
+static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
+{
+ if (src->user_cpus_ptr)
+ return -EINVAL;
+ return 0;
+}
+static inline void release_user_cpus_ptr(struct task_struct *p)
+{
+ WARN_ON(p->user_cpus_ptr);
+}
+
+static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+{
+ return 0;
+}
#endif
extern int yield_to(struct task_struct *p, bool preempt);
@@ -1677,9 +1840,7 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
-#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
struct task_struct task;
-#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
@@ -1693,10 +1854,7 @@ extern struct thread_info init_thread_info;
extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
-static inline struct thread_info *task_thread_info(struct task_struct *task)
-{
- return &task->thread_info;
-}
+# define task_thread_info(task) (&(task)->thread_info)
#elif !defined(__HAVE_THREAD_FUNCTIONS)
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
@@ -1753,15 +1911,12 @@ static __always_inline void scheduler_ipi(void)
*/
preempt_fold_need_resched();
}
-extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
static inline void scheduler_ipi(void) { }
-static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
-{
- return 1;
-}
#endif
+extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
+
/*
* Set thread flags in other task's structures.
* See asm/thread_info.h for TIF_xxxx flags available:
@@ -1818,45 +1973,132 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
*/
-#ifndef CONFIG_PREEMPTION
-extern int _cond_resched(void);
-#else
-static inline int _cond_resched(void) { return 0; }
-#endif
+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+extern int __cond_resched(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+
+void sched_dynamic_klp_enable(void);
+void sched_dynamic_klp_disable(void);
+
+DECLARE_STATIC_CALL(cond_resched, __cond_resched);
+
+static __always_inline int _cond_resched(void)
+{
+ return static_call_mod(cond_resched)();
+}
+
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+extern int dynamic_cond_resched(void);
+
+static __always_inline int _cond_resched(void)
+{
+ return dynamic_cond_resched();
+}
+
+#else /* !CONFIG_PREEMPTION */
+
+static inline int _cond_resched(void)
+{
+ klp_sched_try_switch();
+ return __cond_resched();
+}
+
+#endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
+
+#else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
+
+static inline int _cond_resched(void)
+{
+ klp_sched_try_switch();
+ return 0;
+}
+
+#endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
#define cond_resched() ({ \
- ___might_sleep(__FILE__, __LINE__, 0); \
+ __might_resched(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
extern int __cond_resched_lock(spinlock_t *lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock);
+
+#define MIGHT_RESCHED_RCU_SHIFT 8
+#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
-#define cond_resched_lock(lock) ({ \
- ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
- __cond_resched_lock(lock); \
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * Non RT kernels have an elevated preempt count due to the held lock,
+ * but are not allowed to be inside a RCU read side critical section
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
+#else
+/*
+ * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
+ * cond_resched*lock() has to take that into account because it checks for
+ * preempt_count() and rcu_preempt_depth().
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS \
+ (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
+#endif
+
+#define cond_resched_lock(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_lock(lock); \
+})
+
+#define cond_resched_rwlock_read(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_read(lock); \
+})
+
+#define cond_resched_rwlock_write(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_write(lock); \
})
-static inline void cond_resched_rcu(void)
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+extern bool preempt_model_none(void);
+extern bool preempt_model_voluntary(void);
+extern bool preempt_model_full(void);
+
+#else
+
+static inline bool preempt_model_none(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_NONE);
+}
+static inline bool preempt_model_voluntary(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
+}
+static inline bool preempt_model_full(void)
{
-#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
- rcu_read_unlock();
- cond_resched();
- rcu_read_lock();
+ return IS_ENABLED(CONFIG_PREEMPT);
+}
+
#endif
+
+static inline bool preempt_model_rt(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_RT);
}
/*
- * Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
- * but a general need for low latency)
+ * Does the preemption model allow non-cooperative preemption?
+ *
+ * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
+ * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
+ * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
+ * PREEMPT_NONE model.
*/
-static inline int spin_needbreak(spinlock_t *lock)
+static inline bool preempt_model_preemptible(void)
{
-#ifdef CONFIG_PREEMPTION
- return spin_is_contended(lock);
-#else
- return 0;
-#endif
+ return preempt_model_full() || preempt_model_rt();
}
static __always_inline bool need_resched(void)
@@ -1871,11 +2113,7 @@ static __always_inline bool need_resched(void)
static inline unsigned int task_cpu(const struct task_struct *p)
{
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- return READ_ONCE(p->cpu);
-#else
return READ_ONCE(task_thread_info(p)->cpu);
-#endif
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
@@ -1893,6 +2131,12 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+extern bool sched_task_on_rq(struct task_struct *p);
+extern unsigned long get_wchan(struct task_struct *p);
+extern struct task_struct *cpu_curr_snapshot(int cpu);
+
+#include <linux/spinlock.h>
+
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@@ -1915,137 +2159,32 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
-#ifdef CONFIG_RSEQ
-
-/*
- * Map the event mask on the user-space ABI enum rseq_cs_flags
- * for direct mask checks.
- */
-enum rseq_event_mask_bits {
- RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
- RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
- RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
-};
-
-enum rseq_event_mask {
- RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
- RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
- RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
-};
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
- if (t->rseq)
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
-}
-
-void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
-
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- if (current->rseq)
- __rseq_handle_notify_resume(ksig, regs);
-}
-
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- preempt_disable();
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
- preempt_enable();
- rseq_handle_notify_resume(ksig, regs);
-}
-
-/* rseq_preempt() requires preemption to be disabled. */
-static inline void rseq_preempt(struct task_struct *t)
-{
- __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
-}
-
-/* rseq_migrate() requires preemption to be disabled. */
-static inline void rseq_migrate(struct task_struct *t)
-{
- __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
-}
-
-/*
- * If parent process has a registered restartable sequences area, the
- * child inherits. Unregister rseq for a clone with CLONE_VM set.
- */
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
-{
- if (clone_flags & CLONE_VM) {
- t->rseq = NULL;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
- } else {
- t->rseq = current->rseq;
- t->rseq_sig = current->rseq_sig;
- t->rseq_event_mask = current->rseq_event_mask;
- }
-}
-
-static inline void rseq_execve(struct task_struct *t)
-{
- t->rseq = NULL;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
-}
-
-#else
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
-}
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
-{
-}
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
-{
-}
-static inline void rseq_preempt(struct task_struct *t)
-{
-}
-static inline void rseq_migrate(struct task_struct *t)
-{
-}
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
-{
-}
-static inline void rseq_execve(struct task_struct *t)
+#ifdef CONFIG_SMP
+static inline bool owner_on_cpu(struct task_struct *owner)
{
+ /*
+ * As lock holder preemption issue, we both skip spinning if
+ * task is not on cpu or its cpu is preempted
+ */
+ return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
}
-#endif
-
-#ifdef CONFIG_DEBUG_RSEQ
-
-void rseq_syscall(struct pt_regs *regs);
+/* Returns effective CPU energy utilization, as seen by the scheduler */
+unsigned long sched_cpu_util(int cpu);
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_CORE
+extern void sched_core_free(struct task_struct *tsk);
+extern void sched_core_fork(struct task_struct *p);
+extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
+ unsigned long uaddr);
+extern int sched_core_idle_cpu(int cpu);
#else
-
-static inline void rseq_syscall(struct pt_regs *regs)
-{
-}
-
+static inline void sched_core_free(struct task_struct *tsk) { }
+static inline void sched_core_fork(struct task_struct *p) { }
+static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
#endif
-const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
-char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
-int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
-
-const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
-const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
-const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
-
-int sched_trace_rq_cpu(struct rq *rq);
-int sched_trace_rq_nr_running(struct rq *rq);
-
-const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
#endif
diff --git a/include/linux/sched/affinity.h b/include/linux/sched/affinity.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/affinity.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 867d588314e0..196f0ca351a2 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -12,7 +12,16 @@
*
* Please use one of the three interfaces below.
*/
-extern unsigned long long notrace sched_clock(void);
+extern u64 sched_clock(void);
+
+#if defined(CONFIG_ARCH_WANTS_NO_INSTR) || defined(CONFIG_GENERIC_SCHED_CLOCK)
+extern u64 sched_clock_noinstr(void);
+#else
+static __always_inline u64 sched_clock_noinstr(void)
+{
+ return sched_clock();
+}
+#endif
/*
* See the comment in kernel/sched/clock.c
@@ -45,7 +54,12 @@ static inline u64 cpu_clock(int cpu)
return sched_clock();
}
-static inline u64 local_clock(void)
+static __always_inline u64 local_clock_noinstr(void)
+{
+ return sched_clock_noinstr();
+}
+
+static __always_inline u64 local_clock(void)
{
return sched_clock();
}
@@ -79,10 +93,9 @@ static inline u64 cpu_clock(int cpu)
return sched_clock_cpu(cpu);
}
-static inline u64 local_clock(void)
-{
- return sched_clock_cpu(raw_smp_processor_id());
-}
+extern u64 local_clock_noinstr(void);
+extern u64 local_clock(void);
+
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
diff --git a/include/linux/sched/cond_resched.h b/include/linux/sched/cond_resched.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/cond_resched.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ecdc6542070f..02f5090ffea2 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -57,7 +57,8 @@ static inline int get_dumpable(struct mm_struct *mm)
#endif
/* leave room for more dump flags */
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
-#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
+#define MMF_VM_HUGEPAGE 17 /* set when mm is available for
+ khugepaged */
/*
* This one-shot flag is dropped due to necessity of changing exe once again
* on NFS restore
@@ -70,11 +71,37 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
-#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
-#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
+#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
+#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
+/*
+ * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
+ * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
+ * a counter on its own. We're aggresive on this bit for now: even if the
+ * pinned pages were unpinned later on, we'll still keep this bit set for the
+ * lifecycle of this mm, just for simplicity.
+ */
+#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
+
+#define MMF_HAS_MDWE 28
+#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE)
+
+
+#define MMF_HAS_MDWE_NO_INHERIT 29
+
+#define MMF_VM_MERGE_ANY 30
+#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
- MMF_DISABLE_THP_MASK)
+ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
+ MMF_VM_MERGE_ANY_MASK)
+
+static inline unsigned long mmf_init_flags(unsigned long flags)
+{
+ if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
+ flags &= ~((1UL << MMF_HAS_MDWE) |
+ (1UL << MMF_HAS_MDWE_NO_INHERIT));
+ return flags & MMF_INIT_MASK;
+}
#endif /* _LINUX_SCHED_COREDUMP_H */
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 3ed5aa18593f..bdd31ab93bc5 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -26,7 +26,12 @@ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
static inline unsigned long map_util_freq(unsigned long util,
unsigned long freq, unsigned long cap)
{
- return (freq + (freq >> 2)) * util / cap;
+ return freq * util / cap;
+}
+
+static inline unsigned long map_util_perf(unsigned long util)
+{
+ return util + (util >> 2);
}
#endif /* CONFIG_CPU_FREQ */
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 6c9f19a33865..5f8fd5b24a2e 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -8,25 +8,17 @@
* cputime accounting APIs:
*/
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#include <asm/cputime.h>
-
-#ifndef cputime_to_nsecs
-# define cputime_to_nsecs(__ct) \
- (cputime_to_usecs(__ct) * NSEC_PER_USEC)
-#endif
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
+extern bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime);
extern u64 task_gtime(struct task_struct *t);
#else
-static inline void task_cputime(struct task_struct *t,
+static inline bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
+ return false;
}
static inline u64 task_gtime(struct task_struct *t)
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 1aff00b65f3c..df3aca89d4f5 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -1,4 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_DEADLINE_H
+#define _LINUX_SCHED_DEADLINE_H
/*
* SCHED_DEADLINE tasks has negative priorities, reflecting
@@ -6,6 +8,8 @@
* NORMAL/BATCH tasks.
*/
+#include <linux/sched.h>
+
#define MAX_DL_PRIO 0
static inline int dl_prio(int prio)
@@ -32,3 +36,5 @@ extern void dl_add_task_root_domain(struct task_struct *p);
extern void dl_clear_root_domain(struct root_domain *rd);
#endif /* CONFIG_SMP */
+
+#endif /* _LINUX_SCHED_DEADLINE_H */
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index 00c45a0e6abe..b5035afa2396 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -14,7 +14,7 @@ extern void dump_cpu_task(int cpu);
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
-extern void show_state_filter(unsigned long state_filter);
+extern void show_state_filter(unsigned int state_filter);
static inline void show_state(void)
{
@@ -43,7 +43,7 @@ extern void proc_sched_set_task(struct task_struct *p);
#endif
/* Attach to any functions which should be ignored in wchan output. */
-#define __sched __attribute__((__section__(".sched.text")))
+#define __sched __section(".sched.text")
/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];
diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h
index 9a62ffdd296f..412cdaba33eb 100644
--- a/include/linux/sched/hotplug.h
+++ b/include/linux/sched/hotplug.h
@@ -11,8 +11,10 @@ extern int sched_cpu_activate(unsigned int cpu);
extern int sched_cpu_deactivate(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_cpu_wait_empty(unsigned int cpu);
extern int sched_cpu_dying(unsigned int cpu);
#else
+# define sched_cpu_wait_empty NULL
# define sched_cpu_dying NULL
#endif
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 22873d276be6..478084f9105e 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -11,7 +11,11 @@ enum cpu_idle_type {
CPU_MAX_IDLE_TYPES
};
+#ifdef CONFIG_SMP
extern void wake_up_if_idle(int cpu);
+#else
+static inline void wake_up_if_idle(int cpu) { }
+#endif
/*
* Idle thread specific functions to determine the need_resched
@@ -19,12 +23,37 @@ extern void wake_up_if_idle(int cpu);
*/
#ifdef TIF_POLLING_NRFLAG
-static inline void __current_set_polling(void)
+#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
+
+static __always_inline void __current_set_polling(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
+ arch_set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
}
-static inline bool __must_check current_set_polling_and_test(void)
+static __always_inline void __current_clr_polling(void)
+{
+ arch_clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#else
+
+static __always_inline void __current_set_polling(void)
+{
+ set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+static __always_inline void __current_clr_polling(void)
+{
+ clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */
+
+static __always_inline bool __must_check current_set_polling_and_test(void)
{
__current_set_polling();
@@ -37,12 +66,7 @@ static inline bool __must_check current_set_polling_and_test(void)
return unlikely(tif_need_resched());
}
-static inline void __current_clr_polling(void)
-{
- clear_thread_flag(TIF_POLLING_NRFLAG);
-}
-
-static inline bool __must_check current_clr_polling_and_test(void)
+static __always_inline bool __must_check current_clr_polling_and_test(void)
{
__current_clr_polling();
@@ -69,7 +93,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
}
#endif
-static inline void current_clr_polling(void)
+static __always_inline void current_clr_polling(void)
{
__current_clr_polling();
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index cc9f393e2a70..2b461129d1fa 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -2,59 +2,74 @@
#define _LINUX_SCHED_ISOLATION_H
#include <linux/cpumask.h>
+#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/tick.h>
-enum hk_flags {
- HK_FLAG_TIMER = 1,
- HK_FLAG_RCU = (1 << 1),
- HK_FLAG_MISC = (1 << 2),
- HK_FLAG_SCHED = (1 << 3),
- HK_FLAG_TICK = (1 << 4),
- HK_FLAG_DOMAIN = (1 << 5),
- HK_FLAG_WQ = (1 << 6),
- HK_FLAG_MANAGED_IRQ = (1 << 7),
- HK_FLAG_KTHREAD = (1 << 8),
+enum hk_type {
+ HK_TYPE_TIMER,
+ HK_TYPE_RCU,
+ HK_TYPE_MISC,
+ HK_TYPE_SCHED,
+ HK_TYPE_TICK,
+ HK_TYPE_DOMAIN,
+ HK_TYPE_WQ,
+ HK_TYPE_MANAGED_IRQ,
+ HK_TYPE_KTHREAD,
+ HK_TYPE_MAX
};
#ifdef CONFIG_CPU_ISOLATION
DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
-extern int housekeeping_any_cpu(enum hk_flags flags);
-extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
-extern bool housekeeping_enabled(enum hk_flags flags);
-extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
-extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags);
+extern int housekeeping_any_cpu(enum hk_type type);
+extern const struct cpumask *housekeeping_cpumask(enum hk_type type);
+extern bool housekeeping_enabled(enum hk_type type);
+extern void housekeeping_affine(struct task_struct *t, enum hk_type type);
+extern bool housekeeping_test_cpu(int cpu, enum hk_type type);
extern void __init housekeeping_init(void);
#else
-static inline int housekeeping_any_cpu(enum hk_flags flags)
+static inline int housekeeping_any_cpu(enum hk_type type)
{
return smp_processor_id();
}
-static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
+static inline const struct cpumask *housekeeping_cpumask(enum hk_type type)
{
return cpu_possible_mask;
}
-static inline bool housekeeping_enabled(enum hk_flags flags)
+static inline bool housekeeping_enabled(enum hk_type type)
{
return false;
}
static inline void housekeeping_affine(struct task_struct *t,
- enum hk_flags flags) { }
+ enum hk_type type) { }
+
+static inline bool housekeeping_test_cpu(int cpu, enum hk_type type)
+{
+ return true;
+}
+
static inline void housekeeping_init(void) { }
#endif /* CONFIG_CPU_ISOLATION */
-static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
+static inline bool housekeeping_cpu(int cpu, enum hk_type type)
{
#ifdef CONFIG_CPU_ISOLATION
if (static_branch_unlikely(&housekeeping_overridden))
- return housekeeping_test_cpu(cpu, flags);
+ return housekeeping_test_cpu(cpu, type);
#endif
return true;
}
+static inline bool cpu_is_isolated(int cpu)
+{
+ return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
+ !housekeeping_test_cpu(cpu, HK_TYPE_TICK) ||
+ cpuset_cpu_is_isolated(cpu);
+}
+
#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index d2b4204ba4d3..68876d0a7ef9 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -19,7 +19,10 @@ struct task_struct;
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
-#define JOBCTL_TASK_WORK_BIT 24 /* set by TWA_SIGNAL */
+#define JOBCTL_PTRACE_FROZEN_BIT 24 /* frozen for ptrace */
+
+#define JOBCTL_STOPPED_BIT 26 /* do_signal_stop() */
+#define JOBCTL_TRACED_BIT 27 /* ptrace_stop() */
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -29,10 +32,13 @@ struct task_struct;
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
-#define JOBCTL_TASK_WORK (1UL << JOBCTL_TASK_WORK_BIT)
+#define JOBCTL_PTRACE_FROZEN (1UL << JOBCTL_PTRACE_FROZEN_BIT)
+
+#define JOBCTL_STOPPED (1UL << JOBCTL_STOPPED_BIT)
+#define JOBCTL_TRACED (1UL << JOBCTL_TRACED_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
-#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK)
+#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask);
extern void task_clear_jobctl_trapping(struct task_struct *task);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index f889e332912f..b6543f9d78d6 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -28,7 +28,7 @@ extern struct mm_struct *mm_alloc(void);
*
* Use mmdrop() to release the reference acquired by mmgrab().
*
- * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmgrab(struct mm_struct *mm)
@@ -36,6 +36,11 @@ static inline void mmgrab(struct mm_struct *mm)
atomic_inc(&mm->mm_count);
}
+static inline void smp_mb__after_mmgrab(void)
+{
+ smp_mb__after_atomic();
+}
+
extern void __mmdrop(struct mm_struct *mm);
static inline void mmdrop(struct mm_struct *mm)
@@ -49,29 +54,61 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+#ifdef CONFIG_PREEMPT_RT
/*
- * This has to be called after a get_task_mm()/mmget_not_zero()
- * followed by taking the mmap_lock for writing before modifying the
- * vmas or anything the coredump pretends not to change from under it.
- *
- * It also has to be called when mmgrab() is used in the context of
- * the process, but then the mm_count refcount is transferred outside
- * the context of the process to run down_write() on that pinned mm.
- *
- * NOTE: find_extend_vma() called from GUP context is the only place
- * that can modify the "mm" (notably the vm_start/end) under mmap_lock
- * for reading and outside the context of the process, so it is also
- * the only case that holds the mmap_lock for reading that must call
- * this function. Generally if the mmap_lock is hold for reading
- * there's no need of this check after get_task_mm()/mmget_not_zero().
- *
- * This function can be obsoleted and the check can be removed, after
- * the coredump code will hold the mmap_lock for writing before
- * invoking the ->core_dump methods.
+ * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
+ * by far the least expensive way to do that.
*/
-static inline bool mmget_still_valid(struct mm_struct *mm)
+static inline void __mmdrop_delayed(struct rcu_head *rhp)
+{
+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
+
+ __mmdrop(mm);
+}
+
+/*
+ * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
+ * kernels via RCU.
+ */
+static inline void mmdrop_sched(struct mm_struct *mm)
+{
+ /* Provides a full memory barrier. See mmdrop() */
+ if (atomic_dec_and_test(&mm->mm_count))
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
+#else
+static inline void mmdrop_sched(struct mm_struct *mm)
{
- return likely(!mm->core_state);
+ mmdrop(mm);
+}
+#endif
+
+/* Helpers for lazy TLB mm refcounting */
+static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
+ mmgrab(mm);
+}
+
+static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
+ mmdrop(mm);
+ } else {
+ /*
+ * mmdrop_lazy_tlb must provide a full memory barrier, see the
+ * membarrier comment finish_task_switch which relies on this.
+ */
+ smp_mb();
+ }
+}
+
+static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
+ mmdrop_sched(mm);
+ else
+ smp_mb(); /* see mmdrop_lazy_tlb() above */
}
/**
@@ -87,7 +124,7 @@ static inline bool mmget_still_valid(struct mm_struct *mm)
*
* Use mmput() to release the reference acquired by mmget().
*
- * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmget(struct mm_struct *mm)
@@ -131,6 +168,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_MMU
+#ifndef arch_get_mmap_end
+#define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
+#endif
+
+#ifndef arch_get_mmap_base
+#define arch_get_mmap_base(addr, base) (base)
+#endif
+
extern void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack);
extern unsigned long
@@ -140,6 +185,15 @@ extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
+
+unsigned long
+generic_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+unsigned long
+generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack) {}
@@ -165,7 +219,8 @@ static inline bool in_vfork(struct task_struct *tsk)
* another oom-unkillable task does this it should blame itself.
*/
rcu_read_lock();
- ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
+ ret = tsk->vfork_done &&
+ rcu_dereference(tsk->real_parent)->mm == tsk->mm;
rcu_read_unlock();
return ret;
@@ -175,36 +230,109 @@ static inline bool in_vfork(struct task_struct *tsk)
* Applies per-task gfp context to the given allocation flags.
* PF_MEMALLOC_NOIO implies GFP_NOIO
* PF_MEMALLOC_NOFS implies GFP_NOFS
+ * PF_MEMALLOC_PIN implies !GFP_MOVABLE
*/
static inline gfp_t current_gfp_context(gfp_t flags)
{
unsigned int pflags = READ_ONCE(current->flags);
- if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
+ if (unlikely(pflags & (PF_MEMALLOC_NOIO |
+ PF_MEMALLOC_NOFS |
+ PF_MEMALLOC_NORECLAIM |
+ PF_MEMALLOC_NOWARN |
+ PF_MEMALLOC_PIN))) {
/*
- * NOIO implies both NOIO and NOFS and it is a weaker context
- * so always make sure it makes precedence
+ * Stronger flags before weaker flags:
+ * NORECLAIM implies NOIO, which in turn implies NOFS
*/
- if (pflags & PF_MEMALLOC_NOIO)
+ if (pflags & PF_MEMALLOC_NORECLAIM)
+ flags &= ~__GFP_DIRECT_RECLAIM;
+ else if (pflags & PF_MEMALLOC_NOIO)
flags &= ~(__GFP_IO | __GFP_FS);
else if (pflags & PF_MEMALLOC_NOFS)
flags &= ~__GFP_FS;
+
+ if (pflags & PF_MEMALLOC_NOWARN)
+ flags |= __GFP_NOWARN;
+
+ if (pflags & PF_MEMALLOC_PIN)
+ flags &= ~__GFP_MOVABLE;
}
return flags;
}
#ifdef CONFIG_LOCKDEP
-extern void __fs_reclaim_acquire(void);
-extern void __fs_reclaim_release(void);
+extern void __fs_reclaim_acquire(unsigned long ip);
+extern void __fs_reclaim_release(unsigned long ip);
extern void fs_reclaim_acquire(gfp_t gfp_mask);
extern void fs_reclaim_release(gfp_t gfp_mask);
#else
-static inline void __fs_reclaim_acquire(void) { }
-static inline void __fs_reclaim_release(void) { }
+static inline void __fs_reclaim_acquire(unsigned long ip) { }
+static inline void __fs_reclaim_release(unsigned long ip) { }
static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
static inline void fs_reclaim_release(gfp_t gfp_mask) { }
#endif
+/* Any memory-allocation retry loop should use
+ * memalloc_retry_wait(), and pass the flags for the most
+ * constrained allocation attempt that might have failed.
+ * This provides useful documentation of where loops are,
+ * and a central place to fine tune the waiting as the MM
+ * implementation changes.
+ */
+static inline void memalloc_retry_wait(gfp_t gfp_flags)
+{
+ /* We use io_schedule_timeout because waiting for memory
+ * typically included waiting for dirty pages to be
+ * written out, which requires IO.
+ */
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ gfp_flags = current_gfp_context(gfp_flags);
+ if (gfpflags_allow_blocking(gfp_flags) &&
+ !(gfp_flags & __GFP_NORETRY))
+ /* Probably waited already, no need for much more */
+ io_schedule_timeout(1);
+ else
+ /* Probably didn't wait, and has now released a lock,
+ * so now is a good time to wait
+ */
+ io_schedule_timeout(HZ/50);
+}
+
+/**
+ * might_alloc - Mark possible allocation sites
+ * @gfp_mask: gfp_t flags that would be used to allocate
+ *
+ * Similar to might_sleep() and other annotations, this can be used in functions
+ * that might allocate, but often don't. Compiles to nothing without
+ * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
+ */
+static inline void might_alloc(gfp_t gfp_mask)
+{
+ fs_reclaim_acquire(gfp_mask);
+ fs_reclaim_release(gfp_mask);
+
+ might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+}
+
+/**
+ * memalloc_flags_save - Add a PF_* flag to current->flags, save old value
+ *
+ * This allows PF_* flags to be conveniently added, irrespective of current
+ * value, and then the old version restored with memalloc_flags_restore().
+ */
+static inline unsigned memalloc_flags_save(unsigned flags)
+{
+ unsigned oldflags = ~current->flags & flags;
+ current->flags |= flags;
+ return oldflags;
+}
+
+static inline void memalloc_flags_restore(unsigned flags)
+{
+ current->flags &= ~flags;
+}
+
/**
* memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
*
@@ -214,13 +342,12 @@ static inline void fs_reclaim_release(gfp_t gfp_mask) { }
* point of view. Use memalloc_noio_restore to end the scope with flags
* returned by this function.
*
- * This function is safe to be used from any context.
+ * Context: This function is safe to be used from any context.
+ * Return: The saved flags to be passed to memalloc_noio_restore.
*/
static inline unsigned int memalloc_noio_save(void)
{
- unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
- current->flags |= PF_MEMALLOC_NOIO;
- return flags;
+ return memalloc_flags_save(PF_MEMALLOC_NOIO);
}
/**
@@ -233,7 +360,7 @@ static inline unsigned int memalloc_noio_save(void)
*/
static inline void memalloc_noio_restore(unsigned int flags)
{
- current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
+ memalloc_flags_restore(flags);
}
/**
@@ -245,13 +372,12 @@ static inline void memalloc_noio_restore(unsigned int flags)
* point of view. Use memalloc_nofs_restore to end the scope with flags
* returned by this function.
*
- * This function is safe to be used from any context.
+ * Context: This function is safe to be used from any context.
+ * Return: The saved flags to be passed to memalloc_nofs_restore.
*/
static inline unsigned int memalloc_nofs_save(void)
{
- unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
- current->flags |= PF_MEMALLOC_NOFS;
- return flags;
+ return memalloc_flags_save(PF_MEMALLOC_NOFS);
}
/**
@@ -264,79 +390,115 @@ static inline unsigned int memalloc_nofs_save(void)
*/
static inline void memalloc_nofs_restore(unsigned int flags)
{
- current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
+ memalloc_flags_restore(flags);
}
+/**
+ * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope.
+ *
+ * This function marks the beginning of the __GFP_MEMALLOC allocation scope.
+ * All further allocations will implicitly add the __GFP_MEMALLOC flag, which
+ * prevents entering reclaim and allows access to all memory reserves. This
+ * should only be used when the caller guarantees the allocation will allow more
+ * memory to be freed very shortly, i.e. it needs to allocate some memory in
+ * the process of freeing memory, and cannot reclaim due to potential recursion.
+ *
+ * Users of this scope have to be extremely careful to not deplete the reserves
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory. Usage of a
+ * pre-allocated pool (e.g. mempool) should be always considered before using
+ * this scope.
+ *
+ * Individual allocations under the scope can opt out using __GFP_NOMEMALLOC
+ *
+ * Context: This function should not be used in an interrupt context as that one
+ * does not give PF_MEMALLOC access to reserves.
+ * See __gfp_pfmemalloc_flags().
+ * Return: The saved flags to be passed to memalloc_noreclaim_restore.
+ */
static inline unsigned int memalloc_noreclaim_save(void)
{
- unsigned int flags = current->flags & PF_MEMALLOC;
- current->flags |= PF_MEMALLOC;
- return flags;
+ return memalloc_flags_save(PF_MEMALLOC);
}
+/**
+ * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope.
+ * @flags: Flags to restore.
+ *
+ * Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save
+ * function. Always make sure that the given flags is the return value from the
+ * pairing memalloc_noreclaim_save call.
+ */
static inline void memalloc_noreclaim_restore(unsigned int flags)
{
- current->flags = (current->flags & ~PF_MEMALLOC) | flags;
-}
-
-#ifdef CONFIG_CMA
-static inline unsigned int memalloc_nocma_save(void)
-{
- unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
-
- current->flags |= PF_MEMALLOC_NOCMA;
- return flags;
+ memalloc_flags_restore(flags);
}
-static inline void memalloc_nocma_restore(unsigned int flags)
-{
- current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
-}
-#else
-static inline unsigned int memalloc_nocma_save(void)
+/**
+ * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope.
+ *
+ * This function marks the beginning of the ~__GFP_MOVABLE allocation scope.
+ * All further allocations will implicitly remove the __GFP_MOVABLE flag, which
+ * will constraint the allocations to zones that allow long term pinning, i.e.
+ * not ZONE_MOVABLE zones.
+ *
+ * Return: The saved flags to be passed to memalloc_pin_restore.
+ */
+static inline unsigned int memalloc_pin_save(void)
{
- return 0;
+ return memalloc_flags_save(PF_MEMALLOC_PIN);
}
-static inline void memalloc_nocma_restore(unsigned int flags)
+/**
+ * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope.
+ * @flags: Flags to restore.
+ *
+ * Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function.
+ * Always make sure that the given flags is the return value from the pairing
+ * memalloc_pin_save call.
+ */
+static inline void memalloc_pin_restore(unsigned int flags)
{
+ memalloc_flags_restore(flags);
}
-#endif
#ifdef CONFIG_MEMCG
+DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
/**
- * memalloc_use_memcg - Starts the remote memcg charging scope.
+ * set_active_memcg - Starts the remote memcg charging scope.
* @memcg: memcg to charge.
*
* This function marks the beginning of the remote memcg charging scope. All the
* __GFP_ACCOUNT allocations till the end of the scope will be charged to the
* given memcg.
*
- * NOTE: This function is not nesting safe.
- */
-static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
-{
- WARN_ON_ONCE(current->active_memcg);
- current->active_memcg = memcg;
-}
-
-/**
- * memalloc_unuse_memcg - Ends the remote memcg charging scope.
+ * Please, make sure that caller has a reference to the passed memcg structure,
+ * so its lifetime is guaranteed to exceed the scope between two
+ * set_active_memcg() calls.
*
- * This function marks the end of the remote memcg charging scope started by
- * memalloc_use_memcg().
+ * NOTE: This function can nest. Users must save the return value and
+ * reset the previous value after their own charging scope is over.
*/
-static inline void memalloc_unuse_memcg(void)
+static inline struct mem_cgroup *
+set_active_memcg(struct mem_cgroup *memcg)
{
- current->active_memcg = NULL;
+ struct mem_cgroup *old;
+
+ if (!in_task()) {
+ old = this_cpu_read(int_active_memcg);
+ this_cpu_write(int_active_memcg, memcg);
+ } else {
+ old = current->active_memcg;
+ current->active_memcg = memcg;
+ }
+
+ return old;
}
#else
-static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
-{
-}
-
-static inline void memalloc_unuse_memcg(void)
+static inline struct mem_cgroup *
+set_active_memcg(struct mem_cgroup *memcg)
{
+ return NULL;
}
#endif
@@ -348,10 +510,13 @@ enum {
MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
};
enum {
MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
+ MEMBARRIER_FLAG_RSEQ = (1U << 1),
};
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
@@ -370,6 +535,8 @@ static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
extern void membarrier_exec_mmap(struct mm_struct *mm);
+extern void membarrier_update_current_mm(struct mm_struct *next_mm);
+
#else
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
@@ -384,6 +551,9 @@ static inline void membarrier_exec_mmap(struct mm_struct *mm)
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
}
+static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
+{
+}
#endif
#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
index 3988762efe15..52b22c5c396d 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -15,13 +15,23 @@
#define TNF_FAULT_LOCAL 0x08
#define TNF_MIGRATE_FAIL 0x10
+enum numa_vmaskip_reason {
+ NUMAB_SKIP_UNSUITABLE,
+ NUMAB_SKIP_SHARED_RO,
+ NUMAB_SKIP_INACCESSIBLE,
+ NUMAB_SKIP_SCAN_DELAY,
+ NUMAB_SKIP_PID_INACTIVE,
+ NUMAB_SKIP_IGNORE_PID,
+ NUMAB_SKIP_SEQ_COMPLETED,
+};
+
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p, bool final);
-extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
- int src_nid, int dst_cpu);
+bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
+ int src_nid, int dst_cpu);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
int flags)
@@ -38,7 +48,7 @@ static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
- struct page *page, int src_nid, int dst_cpu)
+ struct folio *folio, int src_nid, int dst_cpu)
{
return true;
}
diff --git a/include/linux/sched/posix-timers.h b/include/linux/sched/posix-timers.h
new file mode 100644
index 000000000000..523a381d6c88
--- /dev/null
+++ b/include/linux/sched/posix-timers.h
@@ -0,0 +1 @@
+#include <linux/posix-timers.h>
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index 7d64feafc408..ab83d85e1183 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -11,16 +11,9 @@
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
* tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
* values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space. This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
*/
-#define MAX_USER_RT_PRIO 100
-#define MAX_RT_PRIO MAX_USER_RT_PRIO
+#define MAX_RT_PRIO 100
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
@@ -34,15 +27,6 @@
#define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO)
/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
-
-/*
* Convert nice value [19,-20] to rlimit style value [1,40].
*/
static inline long nice_to_rlimit(long nice)
diff --git a/include/linux/sched/rseq_api.h b/include/linux/sched/rseq_api.h
new file mode 100644
index 000000000000..cf2af72693e1
--- /dev/null
+++ b/include/linux/sched/rseq_api.h
@@ -0,0 +1 @@
+#include <linux/rseq.h>
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index e5af028c08b4..b2b9e6eb9683 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
}
#ifdef CONFIG_RT_MUTEXES
+extern void rt_mutex_pre_schedule(void);
+extern void rt_mutex_schedule(void);
+extern void rt_mutex_post_schedule(void);
+
/*
* Must hold either p->pi_lock or task_rq(p)->lock.
*/
@@ -39,20 +43,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
}
extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
- return tsk->pi_blocked_on != NULL;
-}
#else
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
{
return NULL;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
- return false;
-}
#endif
extern void normalize_rt_tasks(void);
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
new file mode 100644
index 000000000000..b04a5d04dee9
--- /dev/null
+++ b/include/linux/sched/sd_flags.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sched-domains (multiprocessor balancing) flag declarations.
+ */
+
+#ifndef SD_FLAG
+# error "Incorrect import of SD flags definitions"
+#endif
+
+/*
+ * Hierarchical metaflags
+ *
+ * SHARED_CHILD: These flags are meant to be set from the base domain upwards.
+ * If a domain has this flag set, all of its children should have it set. This
+ * is usually because the flag describes some shared resource (all CPUs in that
+ * domain share the same resource), or because they are tied to a scheduling
+ * behaviour that we want to disable at some point in the hierarchy for
+ * scalability reasons.
+ *
+ * In those cases it doesn't make sense to have the flag set for a domain but
+ * not have it in (some of) its children: sched domains ALWAYS span their child
+ * domains, so operations done with parent domains will cover CPUs in the lower
+ * child domains.
+ *
+ *
+ * SHARED_PARENT: These flags are meant to be set from the highest domain
+ * downwards. If a domain has this flag set, all of its parents should have it
+ * set. This is usually for topology properties that start to appear above a
+ * certain level (e.g. domain starts spanning CPUs outside of the base CPU's
+ * socket).
+ */
+#define SDF_SHARED_CHILD 0x1
+#define SDF_SHARED_PARENT 0x2
+
+/*
+ * Behavioural metaflags
+ *
+ * NEEDS_GROUPS: These flags are only relevant if the domain they are set on has
+ * more than one group. This is usually for balancing flags (load balancing
+ * involves equalizing a metric between groups), or for flags describing some
+ * shared resource (which would be shared between groups).
+ */
+#define SDF_NEEDS_GROUPS 0x4
+
+/*
+ * Balance when about to become idle
+ *
+ * SHARED_CHILD: Set from the base domain up to cpuset.sched_relax_domain_level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_NEWIDLE, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Balance on exec
+ *
+ * SHARED_CHILD: Set from the base domain up to the NUMA reclaim level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_EXEC, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Balance on fork, clone
+ *
+ * SHARED_CHILD: Set from the base domain up to the NUMA reclaim level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_FORK, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Balance on wakeup
+ *
+ * SHARED_CHILD: Set from the base domain up to cpuset.sched_relax_domain_level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_WAKE, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Consider waking task on waking CPU.
+ *
+ * SHARED_CHILD: Set from the base domain up to the NUMA reclaim level.
+ */
+SD_FLAG(SD_WAKE_AFFINE, SDF_SHARED_CHILD)
+
+/*
+ * Domain members have different CPU capacities
+ *
+ * SHARED_PARENT: Set from the topmost domain down to the first domain where
+ * asymmetry is detected.
+ * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups.
+ */
+SD_FLAG(SD_ASYM_CPUCAPACITY, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members have different CPU capacities spanning all unique CPU
+ * capacity values.
+ *
+ * SHARED_PARENT: Set from the topmost domain down to the first domain where
+ * all available CPU capacities are visible
+ * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups.
+ */
+SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members share CPU capacity (i.e. SMT)
+ *
+ * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
+ * CPU capacity.
+ * NEEDS_GROUPS: Capacity is shared between groups.
+ */
+SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members share CPU cluster (LLC tags or L2 cache)
+ *
+ * NEEDS_GROUPS: Clusters are shared between groups.
+ */
+SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members share CPU Last Level Caches
+ *
+ * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
+ * the same cache(s).
+ * NEEDS_GROUPS: Caches are shared between groups.
+ */
+SD_FLAG(SD_SHARE_LLC, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Only a single load balancing instance
+ *
+ * SHARED_PARENT: Set for all NUMA levels above NODE. Could be set from a
+ * different level upwards, but it doesn't change that if a
+ * domain has this flag set, then all of its parents need to have
+ * it too (otherwise the serialization doesn't make sense).
+ * NEEDS_GROUPS: No point in preserving domain if it has a single group.
+ */
+SD_FLAG(SD_SERIALIZE, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Place busy tasks earlier in the domain
+ *
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS)
+
+/*
+ * Prefer to place tasks in a sibling domain
+ *
+ * Set up until domains start spanning NUMA nodes. Close to being a SHARED_CHILD
+ * flag, but cleared below domains with SD_ASYM_CPUCAPACITY.
+ *
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS)
+
+/*
+ * sched_groups of this level overlap
+ *
+ * SHARED_PARENT: Set for all NUMA levels above NODE.
+ * NEEDS_GROUPS: Overlaps can only exist with more than one group.
+ */
+SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Cross-node balancing
+ *
+ * SHARED_PARENT: Set for all NUMA levels above NODE.
+ * NEEDS_GROUPS: No point in preserving domain if it has a single group.
+ */
+SD_FLAG(SD_NUMA, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 1bad18a1d8ba..0a0e23c45406 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -9,6 +9,7 @@
#include <linux/sched/task.h>
#include <linux/cred.h>
#include <linux/refcount.h>
+#include <linux/pid.h>
#include <linux/posix-timers.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>
@@ -72,6 +73,17 @@ struct multiprocess_signals {
struct hlist_node node;
};
+struct core_thread {
+ struct task_struct *task;
+ struct core_thread *next;
+};
+
+struct core_state {
+ atomic_t nr_threads;
+ struct core_thread dumper;
+ struct completion startup;
+};
+
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
@@ -83,6 +95,7 @@ struct signal_struct {
refcount_t sigcnt;
atomic_t live;
int nr_threads;
+ int quick_threads;
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
@@ -98,18 +111,16 @@ struct signal_struct {
/* thread group exit support */
int group_exit_code;
- /* overloaded:
- * - notify group_exit_task when ->count is equal to notify_count
- * - everyone except group_exit_task is stopped during signal delivery
- * of fatal signals, group_exit_task processes the signal.
- */
+ /* notify group_exec_task when notify_count is less or equal to 0 */
int notify_count;
- struct task_struct *group_exit_task;
+ struct task_struct *group_exec_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
unsigned int flags; /* see SIGNAL_* flags below */
+ struct core_state *core_state; /* coredumping support */
+
/*
* PR_SET_CHILD_SUBREAPER marks a process, like a service
* manager, to re-parent orphan (double-forking) child processes
@@ -125,7 +136,7 @@ struct signal_struct {
#ifdef CONFIG_POSIX_TIMERS
/* POSIX.1b Interval Timers */
- int posix_timer_id;
+ unsigned int next_posix_timer_id;
struct list_head posix_timers;
/* ITIMER_REAL timer for the process */
@@ -228,12 +239,13 @@ struct signal_struct {
* credential calculations
* (notably. ptrace)
* Deprecated do not use in new code.
- * Use exec_update_mutex instead.
- */
- struct mutex exec_update_mutex; /* Held while task_struct is being
- * updated during exec, and may have
- * inconsistent permissions.
+ * Use exec_update_lock instead.
*/
+ struct rw_semaphore exec_update_lock; /* Held while task_struct is
+ * being updated during exec,
+ * and may have inconsistent
+ * permissions.
+ */
} __randomize_layout;
/*
@@ -242,7 +254,6 @@ struct signal_struct {
#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
-#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
/*
* Pending notifications to parent.
*/
@@ -258,31 +269,25 @@ struct signal_struct {
static inline void signal_set_stop_flags(struct signal_struct *sig,
unsigned int flags)
{
- WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
}
-/* If true, all threads except ->group_exit_task have pending SIGKILL */
-static inline int signal_group_exit(const struct signal_struct *sig)
-{
- return (sig->flags & SIGNAL_GROUP_EXIT) ||
- (sig->group_exit_task != NULL);
-}
-
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task,
- sigset_t *mask, kernel_siginfo_t *info);
+extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
+ kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
struct task_struct *task = current;
kernel_siginfo_t __info;
+ enum pid_type __type;
int ret;
spin_lock_irq(&task->sighand->siglock);
- ret = dequeue_signal(task, &task->blocked, &__info);
+ ret = dequeue_signal(task, &task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;
@@ -291,42 +296,32 @@ static inline int kernel_dequeue_signal(void)
static inline void kernel_signal_stop(void)
{
spin_lock_irq(&current->sighand->siglock);
- if (current->jobctl & JOBCTL_STOP_DEQUEUED)
+ if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
+ current->jobctl |= JOBCTL_STOPPED;
set_special_state(TASK_STOPPED);
+ }
spin_unlock_irq(&current->sighand->siglock);
schedule();
}
-#ifdef __ARCH_SI_TRAPNO
-# define ___ARCH_SI_TRAPNO(_a1) , _a1
-#else
-# define ___ARCH_SI_TRAPNO(_a1)
-#endif
-#ifdef __ia64__
-# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
-#else
-# define ___ARCH_SI_IA64(_a1, _a2, _a3)
-#endif
-int force_sig_fault_to_task(int sig, int code, void __user *addr
- ___ARCH_SI_TRAPNO(int trapno)
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
- , struct task_struct *t);
-int force_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_TRAPNO(int trapno)
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
-int send_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_TRAPNO(int trapno)
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
- , struct task_struct *t);
+int force_sig_fault_to_task(int sig, int code, void __user *addr,
+ struct task_struct *t);
+int force_sig_fault(int sig, int code, void __user *addr);
+int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t);
int force_sig_mceerr(int code, void __user *, short);
int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
int force_sig_pkuerr(void __user *addr, u32 pkey);
+int send_sig_perf(void __user *addr, u32 type, u64 sig_data);
int force_sig_ptrace_errno_trap(int errno, void __user *addr);
+int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
+int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
+ struct task_struct *t);
+int force_sig_seccomp(int syscall, int reason, bool force_coredump);
extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern void force_sigsegv(int sig);
@@ -340,6 +335,8 @@ extern int kill_pid(struct pid *pid, int sig, int priv);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int);
+extern void force_fatal_sig(int);
+extern void force_exit_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
@@ -347,17 +344,55 @@ extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+static inline void clear_notify_signal(void)
+{
+ clear_thread_flag(TIF_NOTIFY_SIGNAL);
+ smp_mb__after_atomic();
+}
+
+/*
+ * Returns 'true' if kick_process() is needed to force a transition from
+ * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work.
+ */
+static inline bool __set_notify_signal(struct task_struct *task)
+{
+ return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
+ !wake_up_state(task, TASK_INTERRUPTIBLE);
+}
+
+/*
+ * Called to break out of interruptible wait loops, and enter the
+ * exit_to_user_mode_loop().
+ */
+static inline void set_notify_signal(struct task_struct *task)
+{
+ if (__set_notify_signal(task))
+ kick_process(task);
+}
+
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
return -ERESTARTNOINTR;
}
-static inline int signal_pending(struct task_struct *p)
+static inline int task_sigpending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
+static inline int signal_pending(struct task_struct *p)
+{
+ /*
+ * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
+ * behavior in terms of ensuring that we break out of wait loops
+ * so that notify signal callbacks can be processed.
+ */
+ if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
+ return 1;
+ return task_sigpending(p);
+}
+
static inline int __fatal_signal_pending(struct task_struct *p)
{
return unlikely(sigismember(&p->pending.signal, SIGKILL));
@@ -365,10 +400,10 @@ static inline int __fatal_signal_pending(struct task_struct *p)
static inline int fatal_signal_pending(struct task_struct *p)
{
- return signal_pending(p) && __fatal_signal_pending(p);
+ return task_sigpending(p) && __fatal_signal_pending(p);
}
-static inline int signal_pending_state(long state, struct task_struct *p)
+static inline int signal_pending_state(unsigned int state, struct task_struct *p)
{
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
return 0;
@@ -398,19 +433,28 @@ static inline bool fault_signal_pending(vm_fault_t fault_flags,
* This is required every time the blocked sigset_t changes.
* callers must hold sighand->siglock.
*/
-extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void calculate_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
-static inline void signal_wake_up(struct task_struct *t, bool resume)
+static inline void signal_wake_up(struct task_struct *t, bool fatal)
{
- signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+ unsigned int state = 0;
+ if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
+ t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
+ state = TASK_WAKEKILL | __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
- signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+ unsigned int state = 0;
+ if (resume) {
+ t->jobctl &= ~JOBCTL_TRACED;
+ state = __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
void task_join_group_stop(struct task_struct *task);
@@ -502,7 +546,7 @@ extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
static inline void restore_saved_sigmask_unless(bool interrupted)
{
if (interrupted)
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+ WARN_ON(!signal_pending(current));
else
restore_saved_sigmask();
}
@@ -524,6 +568,17 @@ static inline int kill_cad_pid(int sig, int priv)
#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
+static inline int __on_sig_stack(unsigned long sp)
+{
+#ifdef CONFIG_STACK_GROWSUP
+ return sp >= current->sas_ss_sp &&
+ sp - current->sas_ss_sp < current->sas_ss_size;
+#else
+ return sp > current->sas_ss_sp &&
+ sp - current->sas_ss_sp <= current->sas_ss_size;
+#endif
+}
+
/*
* True if we are on the alternate signal stack.
*/
@@ -541,13 +596,7 @@ static inline int on_sig_stack(unsigned long sp)
if (current->sas_ss_flags & SS_AUTODISARM)
return 0;
-#ifdef CONFIG_STACK_GROWSUP
- return sp >= current->sas_ss_sp &&
- sp - current->sas_ss_sp < current->sas_ss_size;
-#else
- return sp > current->sas_ss_sp &&
- sp - current->sas_ss_sp <= current->sas_ss_size;
-#endif
+ return __on_sig_stack(sp);
}
static inline int sas_ss_flags(unsigned long sp)
@@ -591,17 +640,18 @@ extern void flush_itimer_signals(void);
extern bool current_is_single_threaded(void);
/*
- * Careful: do_each_thread/while_each_thread is a double loop so
- * 'break' will not work as expected - use goto instead.
+ * Without tasklist/siglock it is only rcu-safe if g can't exit/exec,
+ * otherwise next_thread(t) will never reach g after list_del_rcu(g).
*/
-#define do_each_thread(g, t) \
- for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
-
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
+#define for_other_threads(p, t) \
+ for (t = p; (t = next_thread(t)) != p; )
+
#define __for_each_thread(signal, t) \
- list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
+ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
+ lockdep_is_held(&tasklist_lock))
#define for_each_thread(p, t) \
__for_each_thread((p)->signal, t)
@@ -660,22 +710,31 @@ bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
return p1->signal == p2->signal;
}
-static inline struct task_struct *next_thread(const struct task_struct *p)
+/*
+ * returns NULL if p is the last thread in the thread group
+ */
+static inline struct task_struct *__next_thread(struct task_struct *p)
+{
+ return list_next_or_null_rcu(&p->signal->thread_head,
+ &p->thread_node,
+ struct task_struct,
+ thread_node);
+}
+
+static inline struct task_struct *next_thread(struct task_struct *p)
{
- return list_entry_rcu(p->thread_group.next,
- struct task_struct, thread_group);
+ return __next_thread(p) ?: p->group_leader;
}
static inline int thread_group_empty(struct task_struct *p)
{
- return list_empty(&p->thread_group);
+ return thread_group_leader(p) &&
+ list_is_last(&p->thread_node, &p->signal->thread_head);
}
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern bool thread_group_exited(struct pid *pid);
-
extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
unsigned long *flags);
@@ -695,6 +754,12 @@ static inline void unlock_task_sighand(struct task_struct *task,
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_task_sighand_held(struct task_struct *task);
+#else
+static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
+#endif
+
static inline unsigned long task_rlimit(const struct task_struct *task,
unsigned int limit)
{
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index 59d3736c454c..fb1e295e7e63 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -17,4 +17,4 @@ static inline bool sched_smt_active(void) { return false; }
void arch_smt_update(void);
-#endif
+#endif /* _LINUX_SCHED_SMT_H */
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index 568286411b43..0108a38bb64d 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -3,6 +3,7 @@
#define _LINUX_SCHED_STAT_H
#include <linux/percpu.h>
+#include <linux/kconfig.h>
/*
* Various counters maintained by the scheduler and fork(),
@@ -16,21 +17,14 @@ extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
-extern unsigned long nr_running(void);
+extern unsigned int nr_running(void);
extern bool single_task_running(void);
-extern unsigned long nr_iowait(void);
-extern unsigned long nr_iowait_cpu(int cpu);
+extern unsigned int nr_iowait(void);
+extern unsigned int nr_iowait_cpu(int cpu);
static inline int sched_info_on(void)
{
-#ifdef CONFIG_SCHEDSTATS
- return 1;
-#elif defined(CONFIG_TASK_DELAY_ACCT)
- extern int delayacct_on;
- return delayacct_on;
-#else
- return 0;
-#endif
+ return IS_ENABLED(CONFIG_SCHED_INFO);
}
#ifdef CONFIG_SCHEDSTATS
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 3c31ba88aca5..5a64582b086b 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -4,98 +4,29 @@
#include <linux/types.h>
-struct ctl_table;
-
#ifdef CONFIG_DETECT_HUNG_TASK
-
-#ifdef CONFIG_SMP
-extern unsigned int sysctl_hung_task_all_cpu_backtrace;
-#else
-#define sysctl_hung_task_all_cpu_backtrace 0
-#endif /* CONFIG_SMP */
-
-extern int sysctl_hung_task_check_count;
-extern unsigned int sysctl_hung_task_panic;
+/* used for hung_task and block/ */
extern unsigned long sysctl_hung_task_timeout_secs;
-extern unsigned long sysctl_hung_task_check_interval_secs;
-extern int sysctl_hung_task_warnings;
-int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
#else
/* Avoid need for ifdefs elsewhere in the code */
enum { sysctl_hung_task_timeout_secs = 0 };
#endif
-extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
-extern unsigned int sysctl_sched_wakeup_granularity;
-extern unsigned int sysctl_sched_child_runs_first;
-
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
SCHED_TUNABLESCALING_LINEAR,
SCHED_TUNABLESCALING_END,
};
-extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
-
-extern unsigned int sysctl_numa_balancing_scan_delay;
-extern unsigned int sysctl_numa_balancing_scan_period_min;
-extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_size;
-
-#ifdef CONFIG_SCHED_DEBUG
-extern __read_mostly unsigned int sysctl_sched_migration_cost;
-extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-
-int sched_proc_update_handler(struct ctl_table *table, int write,
- void *buffer, size_t *length, loff_t *ppos);
-#endif
-
-/*
- * control realtime throttling:
- *
- * /proc/sys/kernel/sched_rt_period_us
- * /proc/sys/kernel/sched_rt_runtime_us
- */
-extern unsigned int sysctl_sched_rt_period;
-extern int sysctl_sched_rt_runtime;
-extern unsigned int sysctl_sched_dl_period_max;
-extern unsigned int sysctl_sched_dl_period_min;
+#define NUMA_BALANCING_DISABLED 0x0
+#define NUMA_BALANCING_NORMAL 0x1
+#define NUMA_BALANCING_MEMORY_TIERING 0x2
-#ifdef CONFIG_UCLAMP_TASK
-extern unsigned int sysctl_sched_uclamp_util_min;
-extern unsigned int sysctl_sched_uclamp_util_max;
-extern unsigned int sysctl_sched_uclamp_util_min_rt_default;
-#endif
-
-#ifdef CONFIG_CFS_BANDWIDTH
-extern unsigned int sysctl_sched_cfs_bandwidth_slice;
-#endif
-
-#ifdef CONFIG_SCHED_AUTOGROUP
-extern unsigned int sysctl_sched_autogroup_enabled;
-#endif
-
-extern int sysctl_sched_rr_timeslice;
-extern int sched_rr_timeslice;
-
-int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-extern unsigned int sysctl_sched_energy_aware;
-int sched_energy_aware_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
+#ifdef CONFIG_NUMA_BALANCING
+extern int sysctl_numa_balancing_mode;
+#else
+#define sysctl_numa_balancing_mode 0
#endif
#endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index a98965007eef..d362aacf9f89 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -7,6 +7,8 @@
* functionality:
*/
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -23,7 +25,12 @@ struct kernel_clone_args {
int __user *pidfd;
int __user *child_tid;
int __user *parent_tid;
+ const char *name;
int exit_signal;
+ u32 kthread:1;
+ u32 io_thread:1;
+ u32 user_worker:1;
+ u32 no_files:1;
unsigned long stack;
unsigned long stack_size;
unsigned long tls;
@@ -31,6 +38,9 @@ struct kernel_clone_args {
/* Number of elements in *set_tid */
size_t set_tid_size;
int cgroup;
+ int idle;
+ int (*fn)(void *);
+ void *fn_arg;
struct cgroup *cgrp;
struct css_set *cset;
};
@@ -47,27 +57,27 @@ extern spinlock_t mmlist_lock;
extern union thread_union init_thread_union;
extern struct task_struct init_task;
-#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
-#endif /* #ifdef CONFIG_PROVE_RCU */
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
+extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
+void __noreturn make_task_dead(int signr);
+extern void mm_cache_init(void);
extern void proc_caches_init(void);
extern void fork_init(void);
extern void release_task(struct task_struct * p);
-extern int copy_thread(unsigned long, unsigned long, unsigned long,
- struct task_struct *, unsigned long);
+extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
extern void flush_thread(void);
@@ -78,15 +88,19 @@ static inline void exit_thread(struct task_struct *tsk)
{
}
#endif
-extern void do_group_exit(int);
+extern __noreturn void do_group_exit(int);
extern void exit_files(struct task_struct *);
-extern void exit_itimers(struct signal_struct *);
+extern void exit_itimers(struct task_struct *);
-extern long _do_fork(struct kernel_clone_args *kargs);
+extern pid_t kernel_clone(struct kernel_clone_args *kargs);
+struct task_struct *copy_process(struct pid *pid, int trace, int node,
+ struct kernel_clone_args *args);
+struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
struct task_struct *fork_idle(int);
-struct mm_struct *copy_init_mm(void);
-extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
+ unsigned long flags);
+extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
int kernel_wait(pid_t pid, int *stat);
@@ -106,13 +120,51 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
}
extern void __put_task_struct(struct task_struct *t);
+extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
static inline void put_task_struct(struct task_struct *t)
{
- if (refcount_dec_and_test(&t->usage))
+ if (!refcount_dec_and_test(&t->usage))
+ return;
+
+ /*
+ * In !RT, it is always safe to call __put_task_struct().
+ * Under RT, we can only call it in preemptible context.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
+ static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
+
+ lock_map_acquire_try(&put_task_map);
__put_task_struct(t);
+ lock_map_release(&put_task_map);
+ return;
+ }
+
+ /*
+ * under PREEMPT_RT, we can't call put_task_struct
+ * in atomic context because it will indirectly
+ * acquire sleeping locks.
+ *
+ * call_rcu() will schedule delayed_put_task_struct_rcu()
+ * to be called in process context.
+ *
+ * __put_task_struct() is called when
+ * refcount_dec_and_test(&t->usage) succeeds.
+ *
+ * This means that it can't "conflict" with
+ * put_task_struct_rcu_user() which abuses ->rcu the same
+ * way; rcu_users has a reference so task->usage can't be
+ * zero after rcu_users 1 -> 0 transition.
+ *
+ * delayed_free_task() also uses ->rcu, but it is only called
+ * when it fails to fork a process. Therefore, there is no
+ * way it can conflict with put_task_struct().
+ */
+ call_rcu(&t->rcu, __put_task_struct_rcu_cb);
}
+DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
+
static inline void put_task_struct_many(struct task_struct *t, int nr)
{
if (refcount_sub_and_test(nr, &t->usage))
@@ -121,6 +173,9 @@ static inline void put_task_struct_many(struct task_struct *t, int nr)
void put_task_struct_rcu_user(struct task_struct *task);
+/* Free all architecture-specific resources held by a thread. */
+void release_thread(struct task_struct *dead_task);
+
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
#else
@@ -157,7 +212,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
- * ->cgroup.subsys[]. And ->vfork_done.
+ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
@@ -173,4 +228,6 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/task_flags.h b/include/linux/sched/task_flags.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/task_flags.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index 2413427e439c..ccd72b978e1f 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/magic.h>
+#include <linux/refcount.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
@@ -16,16 +17,20 @@
* try_get_task_stack() instead. task_stack_page will return a pointer
* that could get freed out from under you.
*/
-static inline void *task_stack_page(const struct task_struct *task)
+static __always_inline void *task_stack_page(const struct task_struct *task)
{
return task->stack;
}
#define setup_thread_stack(new,old) do { } while(0)
-static inline unsigned long *end_of_stack(const struct task_struct *task)
+static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
{
+#ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
+#else
return task->stack;
+#endif
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
@@ -75,6 +80,8 @@ static inline void *try_get_task_stack(struct task_struct *tsk)
static inline void put_task_stack(struct task_struct *tsk) {}
#endif
+void exit_task_stack_account(struct task_struct *tsk);
+
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
diff --git a/include/linux/sched/thread_info_api.h b/include/linux/sched/thread_info_api.h
new file mode 100644
index 000000000000..2c60fbc16c08
--- /dev/null
+++ b/include/linux/sched/thread_info_api.h
@@ -0,0 +1 @@
+#include <linux/thread_info.h>
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 820511289857..18572c9ea724 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -11,32 +11,48 @@
*/
#ifdef CONFIG_SMP
-#define SD_BALANCE_NEWIDLE 0x0001 /* Balance when about to become idle */
-#define SD_BALANCE_EXEC 0x0002 /* Balance on exec */
-#define SD_BALANCE_FORK 0x0004 /* Balance on fork, clone */
-#define SD_BALANCE_WAKE 0x0008 /* Balance on wakeup */
-#define SD_WAKE_AFFINE 0x0010 /* Wake task to waking CPU */
-#define SD_ASYM_CPUCAPACITY 0x0020 /* Domain members have different CPU capacities */
-#define SD_SHARE_CPUCAPACITY 0x0040 /* Domain members share CPU capacity */
-#define SD_SHARE_POWERDOMAIN 0x0080 /* Domain members share power domain */
-#define SD_SHARE_PKG_RESOURCES 0x0100 /* Domain members share CPU pkg resources */
-#define SD_SERIALIZE 0x0200 /* Only a single load balancing instance */
-#define SD_ASYM_PACKING 0x0400 /* Place busy groups earlier in the domain */
-#define SD_PREFER_SIBLING 0x0800 /* Prefer to place tasks in a sibling domain */
-#define SD_OVERLAP 0x1000 /* sched_domains of this level overlap */
-#define SD_NUMA 0x2000 /* cross-node balancing */
+/* Generate SD flag indexes */
+#define SD_FLAG(name, mflags) __##name,
+enum {
+ #include <linux/sched/sd_flags.h>
+ __SD_FLAG_CNT,
+};
+#undef SD_FLAG
+/* Generate SD flag bits */
+#define SD_FLAG(name, mflags) name = 1 << __##name,
+enum {
+ #include <linux/sched/sd_flags.h>
+};
+#undef SD_FLAG
+
+#ifdef CONFIG_SCHED_DEBUG
+
+struct sd_flag_debug {
+ unsigned int meta_flags;
+ char *name;
+};
+extern const struct sd_flag_debug sd_flag_debug[];
+
+#endif
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
- return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
+ return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
+}
+#endif
+
+#ifdef CONFIG_SCHED_CLUSTER
+static inline int cpu_cluster_flags(void)
+{
+ return SD_CLUSTER | SD_SHARE_LLC;
}
#endif
#ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void)
{
- return SD_SHARE_PKG_RESOURCES;
+ return SD_SHARE_LLC;
}
#endif
@@ -65,6 +81,7 @@ struct sched_domain_shared {
atomic_t ref;
atomic_t nr_busy_cpus;
int has_idle_cores;
+ int nr_idle_scan;
};
struct sched_domain {
@@ -77,6 +94,7 @@ struct sched_domain {
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
+ unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
@@ -89,9 +107,7 @@ struct sched_domain {
/* idle_balance() stats */
u64 max_newidle_lb_cost;
- unsigned long next_decay_max_lb_cost;
-
- u64 avg_scan_cost; /* select_idle_sibling */
+ unsigned long last_decay_max_lb_cost;
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
@@ -160,7 +176,9 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
+bool cpus_equal_capacity(int this_cpu, int that_cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
+bool cpus_share_resources(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
@@ -185,7 +203,7 @@ struct sched_domain_topology_level {
#endif
};
-extern void set_sched_topology(struct sched_domain_topology_level *tl);
+extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(type) .name = #type
@@ -209,13 +227,31 @@ partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
{
}
+static inline bool cpus_equal_capacity(int this_cpu, int that_cpu)
+{
+ return true;
+}
+
static inline bool cpus_share_cache(int this_cpu, int that_cpu)
{
return true;
}
+static inline bool cpus_share_resources(int this_cpu, int that_cpu)
+{
+ return true;
+}
+
#endif /* !CONFIG_SMP */
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+extern void rebuild_sched_domains_energy(void);
+#else
+static inline void rebuild_sched_domains_energy(void)
+{
+}
+#endif
+
#ifndef arch_scale_cpu_capacity
/**
* arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
@@ -242,13 +278,21 @@ unsigned long arch_scale_thermal_pressure(int cpu)
}
#endif
-#ifndef arch_set_thermal_pressure
+#ifndef arch_update_thermal_pressure
static __always_inline
-void arch_set_thermal_pressure(const struct cpumask *cpus,
- unsigned long th_pressure)
+void arch_update_thermal_pressure(const struct cpumask *cpus,
+ unsigned long capped_frequency)
{ }
#endif
+#ifndef arch_scale_freq_ref
+static __always_inline
+unsigned int arch_scale_freq_ref(int cpu)
+{
+ return 0;
+}
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h
index 3c3e049224ae..969aaf5ef9d6 100644
--- a/include/linux/sched/types.h
+++ b/include/linux/sched/types.h
@@ -20,4 +20,4 @@ struct task_cputime {
unsigned long long sum_exec_runtime;
};
-#endif
+#endif /* _LINUX_SCHED_TYPES_H */
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index a8ec3b6093fc..4cc52698e214 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
#include <linux/uidgid.h>
#include <linux/atomic.h>
+#include <linux/percpu_counter.h>
#include <linux/refcount.h>
#include <linux/ratelimit.h>
@@ -12,19 +13,9 @@
*/
struct user_struct {
refcount_t __count; /* reference count */
- atomic_t processes; /* How many processes does this user have? */
- atomic_t sigpending; /* How many pending signals does this user have? */
-#ifdef CONFIG_FANOTIFY
- atomic_t fanotify_listeners;
-#endif
#ifdef CONFIG_EPOLL
- atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
-#endif
-#ifdef CONFIG_POSIX_MQUEUE
- /* protected by mq_lock */
- unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
+ struct percpu_counter epoll_watches; /* The number of file descriptors currently watched */
#endif
- unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
@@ -33,7 +24,8 @@ struct user_struct {
kuid_t uid;
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \
- defined(CONFIG_NET) || defined(CONFIG_IO_URING)
+ defined(CONFIG_NET) || defined(CONFIG_IO_URING) || \
+ defined(CONFIG_VFIO_PCI_ZDEV_KVM) || IS_ENABLED(CONFIG_IOMMUFD)
atomic_long_t locked_vm;
#endif
#ifdef CONFIG_WATCH_QUEUE
diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h
new file mode 100644
index 000000000000..bc60243d43b3
--- /dev/null
+++ b/include/linux/sched/vhost_task.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_VHOST_TASK_H
+#define _LINUX_SCHED_VHOST_TASK_H
+
+struct vhost_task;
+
+struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
+ const char *name);
+void vhost_task_start(struct vhost_task *vtsk);
+void vhost_task_stop(struct vhost_task *vtsk);
+void vhost_task_wake(struct vhost_task *vtsk);
+
+#endif /* _LINUX_SCHED_VHOST_TASK_H */
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 26a2013ac39c..06cd8fb2f409 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -42,8 +42,11 @@ struct wake_q_head {
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
-#define DEFINE_WAKE_Q(name) \
- struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+#define WAKE_Q_HEAD_INITIALIZER(name) \
+ { WAKE_Q_TAIL, &name.first }
+
+#define DEFINE_WAKE_Q(name) \
+ struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name)
static inline void wake_q_init(struct wake_q_head *head)
{
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 528718e4ed52..cb41c5edb4d4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -5,6 +5,8 @@
#ifndef LINUX_SCHED_CLOCK
#define LINUX_SCHED_CLOCK
+#include <linux/types.h>
+
#ifdef CONFIG_GENERIC_SCHED_CLOCK
/**
* struct clock_read_data - data required to read from sched_clock()
@@ -14,7 +16,7 @@
* @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
* clocks.
* @read_sched_clock: Current clock source (or dummy source when suspended).
- * @mult: Multipler for scaled math conversion.
+ * @mult: Multiplier for scaled math conversion.
* @shift: Shift value for scaled math conversion.
*
* Care must be taken when updating this structure; it is read by
diff --git a/include/linux/scif.h b/include/linux/scif.h
deleted file mode 100644
index eeb250b73c4b..000000000000
--- a/include/linux/scif.h
+++ /dev/null
@@ -1,1339 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel SCIF driver.
- *
- */
-#ifndef __SCIF_H__
-#define __SCIF_H__
-
-#include <linux/types.h>
-#include <linux/poll.h>
-#include <linux/device.h>
-#include <linux/scif_ioctl.h>
-
-#define SCIF_ACCEPT_SYNC 1
-#define SCIF_SEND_BLOCK 1
-#define SCIF_RECV_BLOCK 1
-
-enum {
- SCIF_PROT_READ = (1 << 0),
- SCIF_PROT_WRITE = (1 << 1)
-};
-
-enum {
- SCIF_MAP_FIXED = 0x10,
- SCIF_MAP_KERNEL = 0x20,
-};
-
-enum {
- SCIF_FENCE_INIT_SELF = (1 << 0),
- SCIF_FENCE_INIT_PEER = (1 << 1),
- SCIF_SIGNAL_LOCAL = (1 << 4),
- SCIF_SIGNAL_REMOTE = (1 << 5)
-};
-
-enum {
- SCIF_RMA_USECPU = (1 << 0),
- SCIF_RMA_USECACHE = (1 << 1),
- SCIF_RMA_SYNC = (1 << 2),
- SCIF_RMA_ORDERED = (1 << 3)
-};
-
-/* End of SCIF Admin Reserved Ports */
-#define SCIF_ADMIN_PORT_END 1024
-
-/* End of SCIF Reserved Ports */
-#define SCIF_PORT_RSVD 1088
-
-typedef struct scif_endpt *scif_epd_t;
-typedef struct scif_pinned_pages *scif_pinned_pages_t;
-
-/**
- * struct scif_range - SCIF registered range used in kernel mode
- * @cookie: cookie used internally by SCIF
- * @nr_pages: number of pages of PAGE_SIZE
- * @prot_flags: R/W protection
- * @phys_addr: Array of bus addresses
- * @va: Array of kernel virtual addresses backed by the pages in the phys_addr
- * array. The va is populated only when called on the host for a remote
- * SCIF connection on MIC. This is required to support the use case of DMA
- * between MIC and another device which is not a SCIF node e.g., an IB or
- * ethernet NIC.
- */
-struct scif_range {
- void *cookie;
- int nr_pages;
- int prot_flags;
- dma_addr_t *phys_addr;
- void __iomem **va;
-};
-
-/**
- * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll
- * @epd: SCIF endpoint
- * @events: requested events
- * @revents: returned events
- */
-struct scif_pollepd {
- scif_epd_t epd;
- __poll_t events;
- __poll_t revents;
-};
-
-/**
- * scif_peer_dev - representation of a peer SCIF device
- *
- * Peer devices show up as PCIe devices for the mgmt node but not the cards.
- * The mgmt node discovers all the cards on the PCIe bus and informs the other
- * cards about their peers. Upon notification of a peer a node adds a peer
- * device to the peer bus to maintain symmetry in the way devices are
- * discovered across all nodes in the SCIF network.
- *
- * @dev: underlying device
- * @dnode - The destination node which this device will communicate with.
- */
-struct scif_peer_dev {
- struct device dev;
- u8 dnode;
-};
-
-/**
- * scif_client - representation of a SCIF client
- * @name: client name
- * @probe - client method called when a peer device is registered
- * @remove - client method called when a peer device is unregistered
- * @si - subsys_interface used internally for implementing SCIF clients
- */
-struct scif_client {
- const char *name;
- void (*probe)(struct scif_peer_dev *spdev);
- void (*remove)(struct scif_peer_dev *spdev);
- struct subsys_interface si;
-};
-
-#define SCIF_OPEN_FAILED ((scif_epd_t)-1)
-#define SCIF_REGISTER_FAILED ((off_t)-1)
-#define SCIF_MMAP_FAILED ((void *)-1)
-
-/**
- * scif_open() - Create an endpoint
- *
- * Return:
- * Upon successful completion, scif_open() returns an endpoint descriptor to
- * be used in subsequent SCIF functions calls to refer to that endpoint;
- * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is
- * returned and errno is set to indicate the error; in kernel mode a NULL
- * scif_epd_t is returned.
- *
- * Errors:
- * ENOMEM - Insufficient kernel memory was available
- */
-scif_epd_t scif_open(void);
-
-/**
- * scif_bind() - Bind an endpoint to a port
- * @epd: endpoint descriptor
- * @pn: port number
- *
- * scif_bind() binds endpoint epd to port pn, where pn is a port number on the
- * local node. If pn is zero, a port number greater than or equal to
- * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to
- * exactly one local port. Ports less than 1024 when requested can only be bound
- * by system (or root) processes or by processes executed by privileged users.
- *
- * Return:
- * Upon successful completion, scif_bind() returns the port number to which epd
- * is bound; otherwise in user mode -1 is returned and errno is set to
- * indicate the error; in kernel mode the negative of one of the following
- * errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * EINVAL - the endpoint or the port is already bound
- * EISCONN - The endpoint is already connected
- * ENOSPC - No port number available for assignment
- * EACCES - The port requested is protected and the user is not the superuser
- */
-int scif_bind(scif_epd_t epd, u16 pn);
-
-/**
- * scif_listen() - Listen for connections on an endpoint
- * @epd: endpoint descriptor
- * @backlog: maximum pending connection requests
- *
- * scif_listen() marks the endpoint epd as a listening endpoint - that is, as
- * an endpoint that will be used to accept incoming connection requests. Once
- * so marked, the endpoint is said to be in the listening state and may not be
- * used as the endpoint of a connection.
- *
- * The endpoint, epd, must have been bound to a port.
- *
- * The backlog argument defines the maximum length to which the queue of
- * pending connections for epd may grow. If a connection request arrives when
- * the queue is full, the client may receive an error with an indication that
- * the connection was refused.
- *
- * Return:
- * Upon successful completion, scif_listen() returns 0; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * EINVAL - the endpoint is not bound to a port
- * EISCONN - The endpoint is already connected or listening
- */
-int scif_listen(scif_epd_t epd, int backlog);
-
-/**
- * scif_connect() - Initiate a connection on a port
- * @epd: endpoint descriptor
- * @dst: global id of port to which to connect
- *
- * The scif_connect() function requests the connection of endpoint epd to remote
- * port dst. If the connection is successful, a peer endpoint, bound to dst, is
- * created on node dst.node. On successful return, the connection is complete.
- *
- * If the endpoint epd has not already been bound to a port, scif_connect()
- * will bind it to an unused local port.
- *
- * A connection is terminated when an endpoint of the connection is closed,
- * either explicitly by scif_close(), or when a process that owns one of the
- * endpoints of the connection is terminated.
- *
- * In user space, scif_connect() supports an asynchronous connection mode
- * if the application has set the O_NONBLOCK flag on the endpoint via the
- * fcntl() system call. Setting this flag will result in the calling process
- * not to wait during scif_connect().
- *
- * Return:
- * Upon successful completion, scif_connect() returns the port ID to which the
- * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is
- * set to indicate the error; in kernel mode the negative of one of the
- * following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNREFUSED - The destination was not listening for connections or refused
- * the connection request
- * EINVAL - dst.port is not a valid port ID
- * EISCONN - The endpoint is already connected
- * ENOMEM - No buffer space is available
- * ENODEV - The destination node does not exist, or the node is lost or existed,
- * but is not currently in the network since it may have crashed
- * ENOSPC - No port number available for assignment
- * EOPNOTSUPP - The endpoint is listening and cannot be connected
- */
-int scif_connect(scif_epd_t epd, struct scif_port_id *dst);
-
-/**
- * scif_accept() - Accept a connection on an endpoint
- * @epd: endpoint descriptor
- * @peer: global id of port to which connected
- * @newepd: new connected endpoint descriptor
- * @flags: flags
- *
- * The scif_accept() call extracts the first connection request from the queue
- * of pending connections for the port on which epd is listening. scif_accept()
- * creates a new endpoint, bound to the same port as epd, and allocates a new
- * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new
- * endpoint is connected to the endpoint through which the connection was
- * requested. epd is unaffected by this call, and remains in the listening
- * state.
- *
- * On successful return, peer holds the global port identifier (node id and
- * local port number) of the port which requested the connection.
- *
- * A connection is terminated when an endpoint of the connection is closed,
- * either explicitly by scif_close(), or when a process that owns one of the
- * endpoints of the connection is terminated.
- *
- * The number of connections that can (subsequently) be accepted on epd is only
- * limited by system resources (memory).
- *
- * The flags argument is formed by OR'ing together zero or more of the
- * following values.
- * SCIF_ACCEPT_SYNC - block until a connection request is presented. If
- * SCIF_ACCEPT_SYNC is not in flags, and no pending
- * connections are present on the queue, scif_accept()
- * fails with an EAGAIN error
- *
- * In user mode, the select() and poll() functions can be used to determine
- * when there is a connection request. In kernel mode, the scif_poll()
- * function may be used for this purpose. A readable event will be delivered
- * when a connection is requested.
- *
- * Return:
- * Upon successful completion, scif_accept() returns 0; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be
- * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete
- * its connection request
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * EINTR - Interrupted function
- * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is
- * NULL, or newepd is NULL
- * ENODEV - The requesting node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOENT - Secondary part of epd registration failed
- */
-int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t
- *newepd, int flags);
-
-/**
- * scif_close() - Close an endpoint
- * @epd: endpoint descriptor
- *
- * scif_close() closes an endpoint and performs necessary teardown of
- * facilities associated with that endpoint.
- *
- * If epd is a listening endpoint then it will no longer accept connection
- * requests on the port to which it is bound. Any pending connection requests
- * are rejected.
- *
- * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs
- * which are in-process through epd or its peer endpoint will complete before
- * scif_close() returns. Registered windows of the local and peer endpoints are
- * released as if scif_unregister() was called against each window.
- *
- * Closing a SCIF endpoint does not affect local registered memory mapped by
- * a SCIF endpoint on a remote node. The local memory remains mapped by the peer
- * SCIF endpoint explicitly removed by calling munmap(..) by the peer.
- *
- * If the peer endpoint's receive queue is not empty at the time that epd is
- * closed, then the peer endpoint can be passed as the endpoint parameter to
- * scif_recv() until the receive queue is empty.
- *
- * epd is freed and may no longer be accessed.
- *
- * Return:
- * Upon successful completion, scif_close() returns 0; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- */
-int scif_close(scif_epd_t epd);
-
-/**
- * scif_send() - Send a message
- * @epd: endpoint descriptor
- * @msg: message buffer address
- * @len: message length
- * @flags: blocking mode flags
- *
- * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data
- * are copied from memory starting at address msg. On successful execution the
- * return value of scif_send() is the number of bytes that were sent, and is
- * zero if no bytes were sent because len was zero. scif_send() may be called
- * only when the endpoint is in a connected state.
- *
- * If a scif_send() call is non-blocking, then it sends only those bytes which
- * can be sent without waiting, up to a maximum of len bytes.
- *
- * If a scif_send() call is blocking, then it normally returns after sending
- * all len bytes. If a blocking call is interrupted or the connection is
- * reset, the call is considered successful if some bytes were sent or len is
- * zero, otherwise the call is considered unsuccessful.
- *
- * In user mode, the select() and poll() functions can be used to determine
- * when the send queue is not full. In kernel mode, the scif_poll() function
- * may be used for this purpose.
- *
- * It is recommended that scif_send()/scif_recv() only be used for short
- * control-type message communication between SCIF endpoints. The SCIF RMA
- * APIs are expected to provide better performance for transfer sizes of
- * 1024 bytes or longer for the current MIC hardware and software
- * implementation.
- *
- * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK
- * is passed as the flags argument.
- *
- * Return:
- * Upon successful completion, scif_send() returns the number of bytes sent;
- * otherwise in user mode -1 is returned and errno is set to indicate the
- * error; in kernel mode the negative of one of the following errors is
- * returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid, or len is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN - The endpoint is not connected
- */
-int scif_send(scif_epd_t epd, void *msg, int len, int flags);
-
-/**
- * scif_recv() - Receive a message
- * @epd: endpoint descriptor
- * @msg: message buffer address
- * @len: message buffer length
- * @flags: blocking mode flags
- *
- * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of
- * data are copied to memory starting at address msg. On successful execution
- * the return value of scif_recv() is the number of bytes that were received,
- * and is zero if no bytes were received because len was zero. scif_recv() may
- * be called only when the endpoint is in a connected state.
- *
- * If a scif_recv() call is non-blocking, then it receives only those bytes
- * which can be received without waiting, up to a maximum of len bytes.
- *
- * If a scif_recv() call is blocking, then it normally returns after receiving
- * all len bytes. If the blocking call was interrupted due to a disconnection,
- * subsequent calls to scif_recv() will copy all bytes received upto the point
- * of disconnection.
- *
- * In user mode, the select() and poll() functions can be used to determine
- * when data is available to be received. In kernel mode, the scif_poll()
- * function may be used for this purpose.
- *
- * It is recommended that scif_send()/scif_recv() only be used for short
- * control-type message communication between SCIF endpoints. The SCIF RMA
- * APIs are expected to provide better performance for transfer sizes of
- * 1024 bytes or longer for the current MIC hardware and software
- * implementation.
- *
- * scif_recv() will block until the entire message is received if
- * SCIF_RECV_BLOCK is passed as the flags argument.
- *
- * Return:
- * Upon successful completion, scif_recv() returns the number of bytes
- * received; otherwise in user mode -1 is returned and errno is set to
- * indicate the error; in kernel mode the negative of one of the following
- * errors is returned.
- *
- * Errors:
- * EAGAIN - The destination node is returning from a low power state
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid, or len is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN - The endpoint is not connected
- */
-int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
-
-/**
- * scif_register() - Mark a memory region for remote access.
- * @epd: endpoint descriptor
- * @addr: starting virtual address
- * @len: length of range
- * @offset: offset of window
- * @prot_flags: read/write protection flags
- * @map_flags: mapping flags
- *
- * The scif_register() function opens a window, a range of whole pages of the
- * registered address space of the endpoint epd, starting at offset po and
- * continuing for len bytes. The value of po, further described below, is a
- * function of the parameters offset and len, and the value of map_flags. Each
- * page of the window represents the physical memory page which backs the
- * corresponding page of the range of virtual address pages starting at addr
- * and continuing for len bytes. addr and len are constrained to be multiples
- * of the page size. A successful scif_register() call returns po.
- *
- * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
- * exactly, and offset is constrained to be a multiple of the page size. The
- * mapping established by scif_register() will not replace any existing
- * registration; an error is returned if any page within the range [offset,
- * offset + len - 1] intersects an existing window.
- *
- * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
- * implementation-defined manner to arrive at po. The po value so chosen will
- * be an area of the registered address space that the implementation deems
- * suitable for a mapping of len bytes. An offset value of 0 is interpreted as
- * granting the implementation complete freedom in selecting po, subject to
- * constraints described below. A non-zero value of offset is taken to be a
- * suggestion of an offset near which the mapping should be placed. When the
- * implementation selects a value for po, it does not replace any extant
- * window. In all cases, po will be a multiple of the page size.
- *
- * The physical pages which are so represented by a window are available for
- * access in calls to mmap(), scif_readfrom(), scif_writeto(),
- * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
- * physical pages represented by the window will not be reused by the memory
- * subsystem for any other purpose. Note that the same physical page may be
- * represented by multiple windows.
- *
- * Subsequent operations which change the memory pages to which virtual
- * addresses are mapped (such as mmap(), munmap()) have no effect on
- * existing window.
- *
- * If the process will fork(), it is recommended that the registered
- * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
- * problems due to copy-on-write semantics.
- *
- * The prot_flags argument is formed by OR'ing together one or more of the
- * following values.
- * SCIF_PROT_READ - allow read operations from the window
- * SCIF_PROT_WRITE - allow write operations to the window
- *
- * Return:
- * Upon successful completion, scif_register() returns the offset at which the
- * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that
- * is (off_t *)-1) is returned and errno is set to indicate the error; in
- * kernel mode the negative of one of the following errors is returned.
- *
- * Errors:
- * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range
- * [offset, offset + len -1] are already registered
- * EAGAIN - The mapping could not be performed due to lack of resources
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is
- * set in flags, and offset is not a multiple of the page size, or addr is not a
- * multiple of the page size, or len is not a multiple of the page size, or is
- * 0, or offset is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN -The endpoint is not connected
- */
-off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
- int prot_flags, int map_flags);
-
-/**
- * scif_unregister() - Mark a memory region for remote access.
- * @epd: endpoint descriptor
- * @offset: start of range to unregister
- * @len: length of range to unregister
- *
- * The scif_unregister() function closes those previously registered windows
- * which are entirely within the range [offset, offset + len - 1]. It is an
- * error to specify a range which intersects only a subrange of a window.
- *
- * On a successful return, pages within the window may no longer be specified
- * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(),
- * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window,
- * however, continues to exist until all previous references against it are
- * removed. A window is referenced if there is a mapping to it created by
- * mmap(), or if scif_get_pages() was called against the window
- * (and the pages have not been returned via scif_put_pages()). A window is
- * also referenced while an RMA, in which some range of the window is a source
- * or destination, is in progress. Finally a window is referenced while some
- * offset in that window was specified to scif_fence_signal(), and the RMAs
- * marked by that call to scif_fence_signal() have not completed. While a
- * window is in this state, its registered address space pages are not
- * available for use in a new registered window.
- *
- * When all such references to the window have been removed, its references to
- * all the physical pages which it represents are removed. Similarly, the
- * registered address space pages of the window become available for
- * registration in a new window.
- *
- * Return:
- * Upon successful completion, scif_unregister() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned. In the event of an
- * error, no windows are unregistered.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a
- * window, or offset is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the
- * registered address space of epd
- */
-int scif_unregister(scif_epd_t epd, off_t offset, size_t len);
-
-/**
- * scif_readfrom() - Copy from a remote address space
- * @epd: endpoint descriptor
- * @loffset: offset in local registered address space to
- * which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space
- * from which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_readfrom() copies len bytes from the remote registered address space of
- * the peer of endpoint epd, starting at the offset roffset to the local
- * registered address space of epd, starting at the offset loffset.
- *
- * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
- * roffset + len - 1] must be within some registered window or windows of the
- * local and remote nodes. A range may intersect multiple registered windows,
- * but only if those windows are contiguous in the registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * The optimal DMA performance will likely be realized if both
- * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if loffset and roffset are not
- * cacheline aligned but are separated by some multiple of 64. The lowest level
- * of performance is likely if loffset and roffset are not separated by a
- * multiple of 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_readfrom() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
- * address space of epd, or, The range [roffset, roffset + len - 1] is invalid
- * for the registered address space of the peer of epd, or loffset or roffset
- * is negative
- */
-int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t
- roffset, int rma_flags);
-
-/**
- * scif_writeto() - Copy to a remote address space
- * @epd: endpoint descriptor
- * @loffset: offset in local registered address space
- * from which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space to
- * which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_writeto() copies len bytes from the local registered address space of
- * epd, starting at the offset loffset to the remote registered address space
- * of the peer of endpoint epd, starting at the offset roffset.
- *
- * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
- * roffset + len - 1] must be within some registered window or windows of the
- * local and remote nodes. A range may intersect multiple registered windows,
- * but only if those windows are contiguous in the registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * The optimal DMA performance will likely be realized if both
- * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if loffset and roffset are not cacheline
- * aligned but are separated by some multiple of 64. The lowest level of
- * performance is likely if loffset and roffset are not separated by a multiple
- * of 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_readfrom() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
- * address space of epd, or, The range [roffset , roffset + len -1] is invalid
- * for the registered address space of the peer of epd, or loffset or roffset
- * is negative
- */
-int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t
- roffset, int rma_flags);
-
-/**
- * scif_vreadfrom() - Copy from a remote address space
- * @epd: endpoint descriptor
- * @addr: address to which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space
- * from which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_vreadfrom() copies len bytes from the remote registered address
- * space of the peer of endpoint epd, starting at the offset roffset, to local
- * memory, starting at addr.
- *
- * The specified range [roffset, roffset + len - 1] must be within some
- * registered window or windows of the remote nodes. The range may
- * intersect multiple registered windows, but only if those windows are
- * contiguous in the registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
- * the specified local memory range may be remain in a pinned state even after
- * the specified transfer completes. This may reduce overhead if some or all of
- * the same virtual address range is referenced in a subsequent call of
- * scif_vreadfrom() or scif_vwriteto().
- *
- * The optimal DMA performance will likely be realized if both
- * addr and roffset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if addr and roffset are not
- * cacheline aligned but are separated by some multiple of 64. The lowest level
- * of performance is likely if addr and roffset are not separated by a
- * multiple of 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_USECACHE - enable registration caching
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
- * registered address space of epd
- */
-int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset,
- int rma_flags);
-
-/**
- * scif_vwriteto() - Copy to a remote address space
- * @epd: endpoint descriptor
- * @addr: address from which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space to
- * which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_vwriteto() copies len bytes from the local memory, starting at addr, to
- * the remote registered address space of the peer of endpoint epd, starting at
- * the offset roffset.
- *
- * The specified range [roffset, roffset + len - 1] must be within some
- * registered window or windows of the remote nodes. The range may intersect
- * multiple registered windows, but only if those windows are contiguous in the
- * registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
- * the specified local memory range may be remain in a pinned state even after
- * the specified transfer completes. This may reduce overhead if some or all of
- * the same virtual address range is referenced in a subsequent call of
- * scif_vreadfrom() or scif_vwriteto().
- *
- * The optimal DMA performance will likely be realized if both
- * addr and offset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if addr and offset are not cacheline
- * aligned but are separated by some multiple of 64. The lowest level of
- * performance is likely if addr and offset are not separated by a multiple of
- * 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_USECACHE - allow registration caching
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_vwriteto() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
- * registered address space of epd
- */
-int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset,
- int rma_flags);
-
-/**
- * scif_fence_mark() - Mark previously issued RMAs
- * @epd: endpoint descriptor
- * @flags: control flags
- * @mark: marked value returned as output.
- *
- * scif_fence_mark() returns after marking the current set of all uncompleted
- * RMAs initiated through the endpoint epd or the current set of all
- * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are
- * marked with a value returned at mark. The application may subsequently call
- * scif_fence_wait(), passing the value returned at mark, to await completion
- * of all RMAs so marked.
- *
- * The flags argument has exactly one of the following values.
- * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
- * epd are marked
- * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
- * of endpoint epd are marked
- *
- * Return:
- * Upon successful completion, scif_fence_mark() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENOMEM - Insufficient kernel memory was available
- */
-int scif_fence_mark(scif_epd_t epd, int flags, int *mark);
-
-/**
- * scif_fence_wait() - Wait for completion of marked RMAs
- * @epd: endpoint descriptor
- * @mark: mark request
- *
- * scif_fence_wait() returns after all RMAs marked with mark have completed.
- * The value passed in mark must have been obtained in a previous call to
- * scif_fence_mark().
- *
- * Return:
- * Upon successful completion, scif_fence_wait() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENOMEM - Insufficient kernel memory was available
- */
-int scif_fence_wait(scif_epd_t epd, int mark);
-
-/**
- * scif_fence_signal() - Request a memory update on completion of RMAs
- * @epd: endpoint descriptor
- * @loff: local offset
- * @lval: local value to write to loffset
- * @roff: remote offset
- * @rval: remote value to write to roffset
- * @flags: flags
- *
- * scif_fence_signal() returns after marking the current set of all uncompleted
- * RMAs initiated through the endpoint epd or marking the current set of all
- * uncompleted RMAs initiated through the peer of endpoint epd.
- *
- * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the
- * marked set, lval is written to memory at the address corresponding to offset
- * loff in the local registered address space of epd. loff must be within a
- * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion
- * of the RMAs in the marked set, rval is written to memory at the address
- * corresponding to offset roff in the remote registered address space of epd.
- * roff must be within a remote registered window of the peer of epd. Note
- * that any specified offset must be DWORD (4 byte / 32 bit) aligned.
- *
- * The flags argument is formed by OR'ing together the following.
- * Exactly one of the following values.
- * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
- * epd are marked
- * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
- * of endpoint epd are marked
- * One or more of the following values.
- * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to
- * memory at the address corresponding to offset loff in the local
- * registered address space of epd.
- * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to
- * memory at the address corresponding to offset roff in the remote
- * registered address space of epd.
- *
- * Return:
- * Upon successful completion, scif_fence_signal() returns 0; otherwise in
- * user mode -1 is returned and errno is set to indicate the error; in kernel
- * mode the negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid, or loff or roff are not DWORD aligned
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - loff is invalid for the registered address of epd, or roff is invalid
- * for the registered address space, of the peer of epd
- */
-int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff,
- u64 rval, int flags);
-
-/**
- * scif_get_node_ids() - Return information about online nodes
- * @nodes: array in which to return online node IDs
- * @len: number of entries in the nodes array
- * @self: address to place the node ID of the local node
- *
- * scif_get_node_ids() fills in the nodes array with up to len node IDs of the
- * nodes in the SCIF network. If there is not enough space in nodes, as
- * indicated by the len parameter, only len node IDs are returned in nodes. The
- * return value of scif_get_node_ids() is the total number of nodes currently in
- * the SCIF network. By checking the return value against the len parameter,
- * the user may determine if enough space for nodes was allocated.
- *
- * The node ID of the local node is returned at self.
- *
- * Return:
- * Upon successful completion, scif_get_node_ids() returns the actual number of
- * online nodes in the SCIF network including 'self'; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode no
- * errors are returned.
- */
-int scif_get_node_ids(u16 *nodes, int len, u16 *self);
-
-/**
- * scif_pin_pages() - Pin a set of pages
- * @addr: Virtual address of range to pin
- * @len: Length of range to pin
- * @prot_flags: Page protection flags
- * @map_flags: Page classification flags
- * @pinned_pages: Handle to pinned pages
- *
- * scif_pin_pages() pins (locks in physical memory) the physical pages which
- * back the range of virtual address pages starting at addr and continuing for
- * len bytes. addr and len are constrained to be multiples of the page size. A
- * successful scif_pin_pages() call returns a handle to pinned_pages which may
- * be used in subsequent calls to scif_register_pinned_pages().
- *
- * The pages will remain pinned as long as there is a reference against the
- * scif_pinned_pages_t value returned by scif_pin_pages() and until
- * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A
- * reference is added to a scif_pinned_pages_t value each time a window is
- * created by calling scif_register_pinned_pages() and passing the
- * scif_pinned_pages_t value. A reference is removed from a
- * scif_pinned_pages_t value each time such a window is deleted.
- *
- * Subsequent operations which change the memory pages to which virtual
- * addresses are mapped (such as mmap(), munmap()) have no effect on the
- * scif_pinned_pages_t value or windows created against it.
- *
- * If the process will fork(), it is recommended that the registered
- * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
- * problems due to copy-on-write semantics.
- *
- * The prot_flags argument is formed by OR'ing together one or more of the
- * following values.
- * SCIF_PROT_READ - allow read operations against the pages
- * SCIF_PROT_WRITE - allow write operations against the pages
- * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a
- * kernel space address. By default, addr is interpreted as a user space
- * address.
- *
- * Return:
- * Upon successful completion, scif_pin_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative
- * ENOMEM - Not enough space
- */
-int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags,
- scif_pinned_pages_t *pinned_pages);
-
-/**
- * scif_unpin_pages() - Unpin a set of pages
- * @pinned_pages: Handle to pinned pages to be unpinned
- *
- * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new
- * windows against pinned_pages. The physical pages represented by pinned_pages
- * will remain pinned until all windows previously registered against
- * pinned_pages are deleted (the window is scif_unregister()'d and all
- * references to the window are removed (see scif_unregister()).
- *
- * pinned_pages must have been obtain from a previous call to scif_pin_pages().
- * After calling scif_unpin_pages(), it is an error to pass pinned_pages to
- * scif_register_pinned_pages().
- *
- * Return:
- * Upon successful completion, scif_unpin_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EINVAL - pinned_pages is not valid
- */
-int scif_unpin_pages(scif_pinned_pages_t pinned_pages);
-
-/**
- * scif_register_pinned_pages() - Mark a memory region for remote access.
- * @epd: endpoint descriptor
- * @pinned_pages: Handle to pinned pages
- * @offset: Registered address space offset
- * @map_flags: Flags which control where pages are mapped
- *
- * The scif_register_pinned_pages() function opens a window, a range of whole
- * pages of the registered address space of the endpoint epd, starting at
- * offset po. The value of po, further described below, is a function of the
- * parameters offset and pinned_pages, and the value of map_flags. Each page of
- * the window represents a corresponding physical memory page of the range
- * represented by pinned_pages; the length of the window is the same as the
- * length of range represented by pinned_pages. A successful
- * scif_register_pinned_pages() call returns po as the return value.
- *
- * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
- * exactly, and offset is constrained to be a multiple of the page size. The
- * mapping established by scif_register_pinned_pages() will not replace any
- * existing registration; an error is returned if any page of the new window
- * would intersect an existing window.
- *
- * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
- * implementation-defined manner to arrive at po. The po so chosen will be an
- * area of the registered address space that the implementation deems suitable
- * for a mapping of the required size. An offset value of 0 is interpreted as
- * granting the implementation complete freedom in selecting po, subject to
- * constraints described below. A non-zero value of offset is taken to be a
- * suggestion of an offset near which the mapping should be placed. When the
- * implementation selects a value for po, it does not replace any extant
- * window. In all cases, po will be a multiple of the page size.
- *
- * The physical pages which are so represented by a window are available for
- * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(),
- * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
- * physical pages represented by the window will not be reused by the memory
- * subsystem for any other purpose. Note that the same physical page may be
- * represented by multiple windows.
- *
- * Windows created by scif_register_pinned_pages() are unregistered by
- * scif_unregister().
- *
- * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
- * fixed offset.
- *
- * Return:
- * Upon successful completion, scif_register_pinned_pages() returns the offset
- * at which the mapping was placed (po); otherwise the negative of one of the
- * following errors is returned.
- *
- * Errors:
- * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window
- * would intersect an existing window
- * EAGAIN - The mapping could not be performed due to lack of resources
- * ECONNRESET - Connection reset by peer
- * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and
- * offset is not a multiple of the page size, or offset is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN - The endpoint is not connected
- */
-off_t scif_register_pinned_pages(scif_epd_t epd,
- scif_pinned_pages_t pinned_pages,
- off_t offset, int map_flags);
-
-/**
- * scif_get_pages() - Add references to remote registered pages
- * @epd: endpoint descriptor
- * @offset: remote registered offset
- * @len: length of range of pages
- * @pages: returned scif_range structure
- *
- * scif_get_pages() returns the addresses of the physical pages represented by
- * those pages of the registered address space of the peer of epd, starting at
- * offset and continuing for len bytes. offset and len are constrained to be
- * multiples of the page size.
- *
- * All of the pages in the specified range [offset, offset + len - 1] must be
- * within a single window of the registered address space of the peer of epd.
- *
- * The addresses are returned as a virtually contiguous array pointed to by the
- * phys_addr component of the scif_range structure whose address is returned in
- * pages. The nr_pages component of scif_range is the length of the array. The
- * prot_flags component of scif_range holds the protection flag value passed
- * when the pages were registered.
- *
- * Each physical page whose address is returned by scif_get_pages() remains
- * available and will not be released for reuse until the scif_range structure
- * is returned in a call to scif_put_pages(). The scif_range structure returned
- * by scif_get_pages() must be unmodified.
- *
- * It is an error to call scif_close() on an endpoint on which a scif_range
- * structure of that endpoint has not been returned to scif_put_pages().
- *
- * Return:
- * Upon successful completion, scif_get_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- * Errors:
- * ECONNRESET - Connection reset by peer.
- * EINVAL - offset is not a multiple of the page size, or offset is negative, or
- * len is not a multiple of the page size
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid
- * for the registered address space of the peer epd
- */
-int scif_get_pages(scif_epd_t epd, off_t offset, size_t len,
- struct scif_range **pages);
-
-/**
- * scif_put_pages() - Remove references from remote registered pages
- * @pages: pages to be returned
- *
- * scif_put_pages() releases a scif_range structure previously obtained by
- * calling scif_get_pages(). The physical pages represented by pages may
- * be reused when the window which represented those pages is unregistered.
- * Therefore, those pages must not be accessed after calling scif_put_pages().
- *
- * Return:
- * Upon successful completion, scif_put_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- * Errors:
- * EINVAL - pages does not point to a valid scif_range structure, or
- * the scif_range structure pointed to by pages was already returned
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- */
-int scif_put_pages(struct scif_range *pages);
-
-/**
- * scif_poll() - Wait for some event on an endpoint
- * @epds: Array of endpoint descriptors
- * @nepds: Length of epds
- * @timeout: Upper limit on time for which scif_poll() will block
- *
- * scif_poll() waits for one of a set of endpoints to become ready to perform
- * an I/O operation.
- *
- * The epds argument specifies the endpoint descriptors to be examined and the
- * events of interest for each endpoint descriptor. epds is a pointer to an
- * array with one member for each open endpoint descriptor of interest.
- *
- * The number of items in the epds array is specified in nepds. The epd field
- * of scif_pollepd is an endpoint descriptor of an open endpoint. The field
- * events is a bitmask specifying the events which the application is
- * interested in. The field revents is an output parameter, filled by the
- * kernel with the events that actually occurred. The bits returned in revents
- * can include any of those specified in events, or one of the values EPOLLERR,
- * EPOLLHUP, or EPOLLNVAL. (These three bits are meaningless in the events
- * field, and will be set in the revents field whenever the corresponding
- * condition is true.)
- *
- * If none of the events requested (and no error) has occurred for any of the
- * endpoint descriptors, then scif_poll() blocks until one of the events occurs.
- *
- * The timeout argument specifies an upper limit on the time for which
- * scif_poll() will block, in milliseconds. Specifying a negative value in
- * timeout means an infinite timeout.
- *
- * The following bits may be set in events and returned in revents.
- * EPOLLIN - Data may be received without blocking. For a connected
- * endpoint, this means that scif_recv() may be called without blocking. For a
- * listening endpoint, this means that scif_accept() may be called without
- * blocking.
- * EPOLLOUT - Data may be sent without blocking. For a connected endpoint, this
- * means that scif_send() may be called without blocking. EPOLLOUT may also be
- * used to block waiting for a non-blocking connect to complete. This bit value
- * has no meaning for a listening endpoint and is ignored if specified.
- *
- * The following bits are only returned in revents, and are ignored if set in
- * events.
- * EPOLLERR - An error occurred on the endpoint
- * EPOLLHUP - The connection to the peer endpoint was disconnected
- * EPOLLNVAL - The specified endpoint descriptor is invalid.
- *
- * Return:
- * Upon successful completion, scif_poll() returns a non-negative value. A
- * positive value indicates the total number of endpoint descriptors that have
- * been selected (that is, endpoint descriptors for which the revents member is
- * non-zero). A value of 0 indicates that the call timed out and no endpoint
- * descriptors have been selected. Otherwise in user mode -1 is returned and
- * errno is set to indicate the error; in kernel mode the negative of one of
- * the following errors is returned.
- *
- * Errors:
- * EINTR - A signal occurred before any requested event
- * EINVAL - The nepds argument is greater than {OPEN_MAX}
- * ENOMEM - There was no space to allocate file descriptor tables
- */
-int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout);
-
-/**
- * scif_client_register() - Register a SCIF client
- * @client: client to be registered
- *
- * scif_client_register() registers a SCIF client. The probe() method
- * of the client is called when SCIF peer devices come online and the
- * remove() method is called when the peer devices disappear.
- *
- * Return:
- * Upon successful completion, scif_client_register() returns a non-negative
- * value. Otherwise the return value is the same as subsys_interface_register()
- * in the kernel.
- */
-int scif_client_register(struct scif_client *client);
-
-/**
- * scif_client_unregister() - Unregister a SCIF client
- * @client: client to be unregistered
- *
- * scif_client_unregister() unregisters a SCIF client.
- *
- * Return:
- * None
- */
-void scif_client_unregister(struct scif_client *client);
-
-#endif /* __SCIF_H__ */
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 7e5dd7d1e221..b807141acc14 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -2,18 +2,20 @@
/*
* SCMI Message Protocol driver header
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#ifndef _LINUX_SCMI_PROTOCOL_H
#define _LINUX_SCMI_PROTOCOL_H
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/types.h>
-#define SCMI_MAX_STR_SIZE 16
-#define SCMI_MAX_NUM_RATES 16
+#define SCMI_MAX_STR_SIZE 64
+#define SCMI_SHORT_NAME_MAX_SIZE 16
+#define SCMI_MAX_NUM_RATES 16
/**
* struct scmi_revision_info - version information structure
@@ -35,13 +37,20 @@ struct scmi_revision_info {
u8 num_protocols;
u8 num_agents;
u32 impl_ver;
- char vendor_id[SCMI_MAX_STR_SIZE];
- char sub_vendor_id[SCMI_MAX_STR_SIZE];
+ char vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
+ char sub_vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_clock_info {
char name[SCMI_MAX_STR_SIZE];
+ unsigned int enable_latency;
bool rate_discrete;
+ bool rate_changed_notifications;
+ bool rate_change_requested_notifications;
+ bool state_ctrl_forbidden;
+ bool rate_ctrl_forbidden;
+ bool parent_ctrl_forbidden;
+ bool extended_config;
union {
struct {
int num_rates;
@@ -53,12 +62,29 @@ struct scmi_clock_info {
u64 step_size;
} range;
};
+ int num_parents;
+ u32 *parents;
+};
+
+enum scmi_power_scale {
+ SCMI_POWER_BOGOWATTS,
+ SCMI_POWER_MILLIWATTS,
+ SCMI_POWER_MICROWATTS
};
struct scmi_handle;
+struct scmi_device;
+struct scmi_protocol_handle;
+
+enum scmi_clock_oem_config {
+ SCMI_CLOCK_CFG_DUTY_CYCLE = 0x1,
+ SCMI_CLOCK_CFG_PHASE,
+ SCMI_CLOCK_CFG_OEM_START = 0x80,
+ SCMI_CLOCK_CFG_OEM_END = 0xFF,
+};
/**
- * struct scmi_clk_ops - represents the various operations provided
+ * struct scmi_clk_proto_ops - represents the various operations provided
* by SCMI Clock Protocol
*
* @count_get: get the count of clocks provided by SCMI
@@ -67,30 +93,55 @@ struct scmi_handle;
* @rate_set: set the clock rate of a clock
* @enable: enables the specified clock
* @disable: disables the specified clock
+ * @state_get: get the status of the specified clock
+ * @config_oem_get: get the value of an OEM specific clock config
+ * @config_oem_set: set the value of an OEM specific clock config
+ * @parent_get: get the parent id of a clk
+ * @parent_set: set the parent of a clock
*/
-struct scmi_clk_ops {
- int (*count_get)(const struct scmi_handle *handle);
+struct scmi_clk_proto_ops {
+ int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_clock_info *(*info_get)
- (const struct scmi_handle *handle, u32 clk_id);
- int (*rate_get)(const struct scmi_handle *handle, u32 clk_id,
+ const struct scmi_clock_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 *rate);
- int (*rate_set)(const struct scmi_handle *handle, u32 clk_id,
+ int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 rate);
- int (*enable)(const struct scmi_handle *handle, u32 clk_id);
- int (*disable)(const struct scmi_handle *handle, u32 clk_id);
+ int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool atomic);
+ int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool atomic);
+ int (*state_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool *enabled, bool atomic);
+ int (*config_oem_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ enum scmi_clock_oem_config oem_type,
+ u32 *oem_val, u32 *attributes, bool atomic);
+ int (*config_oem_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ enum scmi_clock_oem_config oem_type,
+ u32 oem_val, bool atomic);
+ int (*parent_get)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 *parent_id);
+ int (*parent_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 parent_id);
+};
+
+struct scmi_perf_domain_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool set_perf;
};
/**
- * struct scmi_perf_ops - represents the various operations provided
+ * struct scmi_perf_proto_ops - represents the various operations provided
* by SCMI Performance Protocol
*
+ * @num_domains_get: gets the number of supported performance domains
+ * @info_get: get the information of a performance domain
* @limits_set: sets limits on the performance level of a domain
* @limits_get: gets limits on the performance level of a domain
* @level_set: sets the performance level of a domain
* @level_get: gets the performance level of a domain
- * @device_domain_id: gets the scmi domain id for a given device
* @transition_latency_get: gets the DVFS transition latency for a given device
+ * @rate_limit_get: gets the minimum time (us) required between successive
+ * requests
* @device_opps_add: adds all the OPPs for a given device
* @freq_set: sets the frequency for a given device using sustained frequency
* to sustained performance level mapping
@@ -98,33 +149,46 @@ struct scmi_clk_ops {
* to sustained performance level mapping
* @est_power_get: gets the estimated power cost for a given performance domain
* at a given frequency
+ * @fast_switch_possible: indicates if fast DVFS switching is possible or not
+ * for a given device
+ * @fast_switch_rate_limit: gets the minimum time (us) required between
+ * successive fast_switching requests
+ * @power_scale_mw_get: indicates if the power values provided are in milliWatts
+ * or in some other (abstract) scale
*/
-struct scmi_perf_ops {
- int (*limits_set)(const struct scmi_handle *handle, u32 domain,
+struct scmi_perf_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_perf_domain_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain);
+ int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 max_perf, u32 min_perf);
- int (*limits_get)(const struct scmi_handle *handle, u32 domain,
+ int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *max_perf, u32 *min_perf);
- int (*level_set)(const struct scmi_handle *handle, u32 domain,
+ int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 level, bool poll);
- int (*level_get)(const struct scmi_handle *handle, u32 domain,
+ int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *level, bool poll);
- int (*device_domain_id)(struct device *dev);
- int (*transition_latency_get)(const struct scmi_handle *handle,
- struct device *dev);
- int (*device_opps_add)(const struct scmi_handle *handle,
- struct device *dev);
- int (*freq_set)(const struct scmi_handle *handle, u32 domain,
+ int (*transition_latency_get)(const struct scmi_protocol_handle *ph,
+ u32 domain);
+ int (*rate_limit_get)(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *rate_limit);
+ int (*device_opps_add)(const struct scmi_protocol_handle *ph,
+ struct device *dev, u32 domain);
+ int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long rate, bool poll);
- int (*freq_get)(const struct scmi_handle *handle, u32 domain,
+ int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *rate, bool poll);
- int (*est_power_get)(const struct scmi_handle *handle, u32 domain,
+ int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *rate, unsigned long *power);
- bool (*fast_switch_possible)(const struct scmi_handle *handle,
- struct device *dev);
+ bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph,
+ u32 domain);
+ int (*fast_switch_rate_limit)(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *rate_limit);
+ enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph);
};
/**
- * struct scmi_power_ops - represents the various operations provided
+ * struct scmi_power_proto_ops - represents the various operations provided
* by SCMI Power Protocol
*
* @num_domains_get: get the count of power domains provided by SCMI
@@ -132,9 +196,10 @@ struct scmi_perf_ops {
* @state_set: sets the power state of a power domain
* @state_get: gets the power state of a power domain
*/
-struct scmi_power_ops {
- int (*num_domains_get)(const struct scmi_handle *handle);
- char *(*name_get)(const struct scmi_handle *handle, u32 domain);
+struct scmi_power_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const char *(*name_get)(const struct scmi_protocol_handle *ph,
+ u32 domain);
#define SCMI_POWER_STATE_TYPE_SHIFT 30
#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1)
#define SCMI_POWER_STATE_PARAM(type, id) \
@@ -142,19 +207,186 @@ struct scmi_power_ops {
((id) & SCMI_POWER_STATE_ID_MASK))
#define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0)
#define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0)
- int (*state_set)(const struct scmi_handle *handle, u32 domain,
+ int (*state_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 state);
- int (*state_get)(const struct scmi_handle *handle, u32 domain,
+ int (*state_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *state);
};
+/**
+ * struct scmi_sensor_reading - represent a timestamped read
+ *
+ * Used by @reading_get_timestamped method.
+ *
+ * @value: The signed value sensor read.
+ * @timestamp: An unsigned timestamp for the sensor read, as provided by
+ * SCMI platform. Set to zero when not available.
+ */
+struct scmi_sensor_reading {
+ long long value;
+ unsigned long long timestamp;
+};
+
+/**
+ * struct scmi_range_attrs - specifies a sensor or axis values' range
+ * @min_range: The minimum value which can be represented by the sensor/axis.
+ * @max_range: The maximum value which can be represented by the sensor/axis.
+ */
+struct scmi_range_attrs {
+ long long min_range;
+ long long max_range;
+};
+
+/**
+ * struct scmi_sensor_axis_info - describes one sensor axes
+ * @id: The axes ID.
+ * @type: Axes type. Chosen amongst one of @enum scmi_sensor_class.
+ * @scale: Power-of-10 multiplier applied to the axis unit.
+ * @name: NULL-terminated string representing axes name as advertised by
+ * SCMI platform.
+ * @extended_attrs: Flag to indicate the presence of additional extended
+ * attributes for this axes.
+ * @resolution: Extended attribute representing the resolution of the axes.
+ * Set to 0 if not reported by this axes.
+ * @exponent: Extended attribute representing the power-of-10 multiplier that
+ * is applied to the resolution field. Set to 0 if not reported by
+ * this axes.
+ * @attrs: Extended attributes representing minimum and maximum values
+ * measurable by this axes. Set to 0 if not reported by this sensor.
+ */
+struct scmi_sensor_axis_info {
+ unsigned int id;
+ unsigned int type;
+ int scale;
+ char name[SCMI_MAX_STR_SIZE];
+ bool extended_attrs;
+ unsigned int resolution;
+ int exponent;
+ struct scmi_range_attrs attrs;
+};
+
+/**
+ * struct scmi_sensor_intervals_info - describes number and type of available
+ * update intervals
+ * @segmented: Flag for segmented intervals' representation. When True there
+ * will be exactly 3 intervals in @desc, with each entry
+ * representing a member of a segment in this order:
+ * {lowest update interval, highest update interval, step size}
+ * @count: Number of intervals described in @desc.
+ * @desc: Array of @count interval descriptor bitmask represented as detailed in
+ * the SCMI specification: it can be accessed using the accompanying
+ * macros.
+ * @prealloc_pool: A minimal preallocated pool of desc entries used to avoid
+ * lesser-than-64-bytes dynamic allocation for small @count
+ * values.
+ */
+struct scmi_sensor_intervals_info {
+ bool segmented;
+ unsigned int count;
+#define SCMI_SENS_INTVL_SEGMENT_LOW 0
+#define SCMI_SENS_INTVL_SEGMENT_HIGH 1
+#define SCMI_SENS_INTVL_SEGMENT_STEP 2
+ unsigned int *desc;
+#define SCMI_SENS_INTVL_GET_SECS(x) FIELD_GET(GENMASK(20, 5), (x))
+#define SCMI_SENS_INTVL_GET_EXP(x) \
+ ({ \
+ int __signed_exp = FIELD_GET(GENMASK(4, 0), (x)); \
+ \
+ if (__signed_exp & BIT(4)) \
+ __signed_exp |= GENMASK(31, 5); \
+ __signed_exp; \
+ })
+#define SCMI_MAX_PREALLOC_POOL 16
+ unsigned int prealloc_pool[SCMI_MAX_PREALLOC_POOL];
+};
+
+/**
+ * struct scmi_sensor_info - represents information related to one of the
+ * available sensors.
+ * @id: Sensor ID.
+ * @type: Sensor type. Chosen amongst one of @enum scmi_sensor_class.
+ * @scale: Power-of-10 multiplier applied to the sensor unit.
+ * @num_trip_points: Number of maximum configurable trip points.
+ * @async: Flag for asynchronous read support.
+ * @update: Flag for continuouos update notification support.
+ * @timestamped: Flag for timestamped read support.
+ * @tstamp_scale: Power-of-10 multiplier applied to the sensor timestamps to
+ * represent it in seconds.
+ * @num_axis: Number of supported axis if any. Reported as 0 for scalar sensors.
+ * @axis: Pointer to an array of @num_axis descriptors.
+ * @intervals: Descriptor of available update intervals.
+ * @sensor_config: A bitmask reporting the current sensor configuration as
+ * detailed in the SCMI specification: it can accessed and
+ * modified through the accompanying macros.
+ * @name: NULL-terminated string representing sensor name as advertised by
+ * SCMI platform.
+ * @extended_scalar_attrs: Flag to indicate the presence of additional extended
+ * attributes for this sensor.
+ * @sensor_power: Extended attribute representing the average power
+ * consumed by the sensor in microwatts (uW) when it is active.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ * @resolution: Extended attribute representing the resolution of the sensor.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ * @exponent: Extended attribute representing the power-of-10 multiplier that is
+ * applied to the resolution field.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ * @scalar_attrs: Extended attributes representing minimum and maximum
+ * measurable values by this sensor.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ */
struct scmi_sensor_info {
- u32 id;
- u8 type;
- s8 scale;
- u8 num_trip_points;
+ unsigned int id;
+ unsigned int type;
+ int scale;
+ unsigned int num_trip_points;
bool async;
+ bool update;
+ bool timestamped;
+ int tstamp_scale;
+ unsigned int num_axis;
+ struct scmi_sensor_axis_info *axis;
+ struct scmi_sensor_intervals_info intervals;
+ unsigned int sensor_config;
+#define SCMI_SENS_CFG_UPDATE_SECS_MASK GENMASK(31, 16)
+#define SCMI_SENS_CFG_GET_UPDATE_SECS(x) \
+ FIELD_GET(SCMI_SENS_CFG_UPDATE_SECS_MASK, (x))
+
+#define SCMI_SENS_CFG_UPDATE_EXP_MASK GENMASK(15, 11)
+#define SCMI_SENS_CFG_GET_UPDATE_EXP(x) \
+ ({ \
+ int __signed_exp = \
+ FIELD_GET(SCMI_SENS_CFG_UPDATE_EXP_MASK, (x)); \
+ \
+ if (__signed_exp & BIT(4)) \
+ __signed_exp |= GENMASK(31, 5); \
+ __signed_exp; \
+ })
+
+#define SCMI_SENS_CFG_ROUND_MASK GENMASK(10, 9)
+#define SCMI_SENS_CFG_ROUND_AUTO 2
+#define SCMI_SENS_CFG_ROUND_UP 1
+#define SCMI_SENS_CFG_ROUND_DOWN 0
+
+#define SCMI_SENS_CFG_TSTAMP_ENABLED_MASK BIT(1)
+#define SCMI_SENS_CFG_TSTAMP_ENABLE 1
+#define SCMI_SENS_CFG_TSTAMP_DISABLE 0
+#define SCMI_SENS_CFG_IS_TSTAMP_ENABLED(x) \
+ FIELD_GET(SCMI_SENS_CFG_TSTAMP_ENABLED_MASK, (x))
+
+#define SCMI_SENS_CFG_SENSOR_ENABLED_MASK BIT(0)
+#define SCMI_SENS_CFG_SENSOR_ENABLE 1
+#define SCMI_SENS_CFG_SENSOR_DISABLE 0
char name[SCMI_MAX_STR_SIZE];
+#define SCMI_SENS_CFG_IS_ENABLED(x) FIELD_GET(BIT(0), (x))
+ bool extended_scalar_attrs;
+ unsigned int sensor_power;
+ unsigned int resolution;
+ int exponent;
+ struct scmi_range_attrs scalar_attrs;
};
/*
@@ -163,34 +395,137 @@ struct scmi_sensor_info {
*/
enum scmi_sensor_class {
NONE = 0x0,
+ UNSPEC = 0x1,
TEMPERATURE_C = 0x2,
+ TEMPERATURE_F = 0x3,
+ TEMPERATURE_K = 0x4,
VOLTAGE = 0x5,
CURRENT = 0x6,
POWER = 0x7,
ENERGY = 0x8,
+ CHARGE = 0x9,
+ VOLTAMPERE = 0xA,
+ NITS = 0xB,
+ LUMENS = 0xC,
+ LUX = 0xD,
+ CANDELAS = 0xE,
+ KPA = 0xF,
+ PSI = 0x10,
+ NEWTON = 0x11,
+ CFM = 0x12,
+ RPM = 0x13,
+ HERTZ = 0x14,
+ SECS = 0x15,
+ MINS = 0x16,
+ HOURS = 0x17,
+ DAYS = 0x18,
+ WEEKS = 0x19,
+ MILS = 0x1A,
+ INCHES = 0x1B,
+ FEET = 0x1C,
+ CUBIC_INCHES = 0x1D,
+ CUBIC_FEET = 0x1E,
+ METERS = 0x1F,
+ CUBIC_CM = 0x20,
+ CUBIC_METERS = 0x21,
+ LITERS = 0x22,
+ FLUID_OUNCES = 0x23,
+ RADIANS = 0x24,
+ STERADIANS = 0x25,
+ REVOLUTIONS = 0x26,
+ CYCLES = 0x27,
+ GRAVITIES = 0x28,
+ OUNCES = 0x29,
+ POUNDS = 0x2A,
+ FOOT_POUNDS = 0x2B,
+ OUNCE_INCHES = 0x2C,
+ GAUSS = 0x2D,
+ GILBERTS = 0x2E,
+ HENRIES = 0x2F,
+ FARADS = 0x30,
+ OHMS = 0x31,
+ SIEMENS = 0x32,
+ MOLES = 0x33,
+ BECQUERELS = 0x34,
+ PPM = 0x35,
+ DECIBELS = 0x36,
+ DBA = 0x37,
+ DBC = 0x38,
+ GRAYS = 0x39,
+ SIEVERTS = 0x3A,
+ COLOR_TEMP_K = 0x3B,
+ BITS = 0x3C,
+ BYTES = 0x3D,
+ WORDS = 0x3E,
+ DWORDS = 0x3F,
+ QWORDS = 0x40,
+ PERCENTAGE = 0x41,
+ PASCALS = 0x42,
+ COUNTS = 0x43,
+ GRAMS = 0x44,
+ NEWTON_METERS = 0x45,
+ HITS = 0x46,
+ MISSES = 0x47,
+ RETRIES = 0x48,
+ OVERRUNS = 0x49,
+ UNDERRUNS = 0x4A,
+ COLLISIONS = 0x4B,
+ PACKETS = 0x4C,
+ MESSAGES = 0x4D,
+ CHARS = 0x4E,
+ ERRORS = 0x4F,
+ CORRECTED_ERRS = 0x50,
+ UNCORRECTABLE_ERRS = 0x51,
+ SQ_MILS = 0x52,
+ SQ_INCHES = 0x53,
+ SQ_FEET = 0x54,
+ SQ_CM = 0x55,
+ SQ_METERS = 0x56,
+ RADIANS_SEC = 0x57,
+ BPM = 0x58,
+ METERS_SEC_SQUARED = 0x59,
+ METERS_SEC = 0x5A,
+ CUBIC_METERS_SEC = 0x5B,
+ MM_MERCURY = 0x5C,
+ RADIANS_SEC_SQUARED = 0x5D,
+ OEM_UNIT = 0xFF
};
/**
- * struct scmi_sensor_ops - represents the various operations provided
+ * struct scmi_sensor_proto_ops - represents the various operations provided
* by SCMI Sensor Protocol
*
* @count_get: get the count of sensors provided by SCMI
* @info_get: get the information of the specified sensor
* @trip_point_config: selects and configures a trip-point of interest
* @reading_get: gets the current value of the sensor
+ * @reading_get_timestamped: gets the current value and timestamp, when
+ * available, of the sensor. (as of v3.0 spec)
+ * Supports multi-axis sensors for sensors which
+ * supports it and if the @reading array size of
+ * @count entry equals the sensor num_axis
+ * @config_get: Get sensor current configuration
+ * @config_set: Set sensor current configuration
*/
-struct scmi_sensor_ops {
- int (*count_get)(const struct scmi_handle *handle);
- const struct scmi_sensor_info *(*info_get)
- (const struct scmi_handle *handle, u32 sensor_id);
- int (*trip_point_config)(const struct scmi_handle *handle,
+struct scmi_sensor_proto_ops {
+ int (*count_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_sensor_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 sensor_id);
+ int (*trip_point_config)(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 trip_id, u64 trip_value);
- int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id,
+ int (*reading_get)(const struct scmi_protocol_handle *ph, u32 sensor_id,
u64 *value);
+ int (*reading_get_timestamped)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u8 count,
+ struct scmi_sensor_reading *readings);
+ int (*config_get)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u32 *sensor_config);
+ int (*config_set)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u32 sensor_config);
};
/**
- * struct scmi_reset_ops - represents the various operations provided
+ * struct scmi_reset_proto_ops - represents the various operations provided
* by SCMI Reset Protocol
*
* @num_domains_get: get the count of reset domains provided by SCMI
@@ -200,20 +535,217 @@ struct scmi_sensor_ops {
* @assert: explicitly assert reset signal of the specified reset domain
* @deassert: explicitly deassert reset signal of the specified reset domain
*/
-struct scmi_reset_ops {
- int (*num_domains_get)(const struct scmi_handle *handle);
- char *(*name_get)(const struct scmi_handle *handle, u32 domain);
- int (*latency_get)(const struct scmi_handle *handle, u32 domain);
- int (*reset)(const struct scmi_handle *handle, u32 domain);
- int (*assert)(const struct scmi_handle *handle, u32 domain);
- int (*deassert)(const struct scmi_handle *handle, u32 domain);
+struct scmi_reset_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const char *(*name_get)(const struct scmi_protocol_handle *ph,
+ u32 domain);
+ int (*latency_get)(const struct scmi_protocol_handle *ph, u32 domain);
+ int (*reset)(const struct scmi_protocol_handle *ph, u32 domain);
+ int (*assert)(const struct scmi_protocol_handle *ph, u32 domain);
+ int (*deassert)(const struct scmi_protocol_handle *ph, u32 domain);
+};
+
+enum scmi_voltage_level_mode {
+ SCMI_VOLTAGE_LEVEL_SET_AUTO,
+ SCMI_VOLTAGE_LEVEL_SET_SYNC,
+};
+
+/**
+ * struct scmi_voltage_info - describe one available SCMI Voltage Domain
+ *
+ * @id: the domain ID as advertised by the platform
+ * @segmented: defines the layout of the entries of array @levels_uv.
+ * - when True the entries are to be interpreted as triplets,
+ * each defining a segment representing a range of equally
+ * space voltages: <lowest_volts>, <highest_volt>, <step_uV>
+ * - when False the entries simply represent a single discrete
+ * supported voltage level
+ * @negative_volts_allowed: True if any of the entries of @levels_uv represent
+ * a negative voltage.
+ * @async_level_set: True when the voltage domain supports asynchronous level
+ * set commands.
+ * @name: name assigned to the Voltage Domain by platform
+ * @num_levels: number of total entries in @levels_uv.
+ * @levels_uv: array of entries describing the available voltage levels for
+ * this domain.
+ */
+struct scmi_voltage_info {
+ unsigned int id;
+ bool segmented;
+ bool negative_volts_allowed;
+ bool async_level_set;
+ char name[SCMI_MAX_STR_SIZE];
+ unsigned int num_levels;
+#define SCMI_VOLTAGE_SEGMENT_LOW 0
+#define SCMI_VOLTAGE_SEGMENT_HIGH 1
+#define SCMI_VOLTAGE_SEGMENT_STEP 2
+ int *levels_uv;
+};
+
+/**
+ * struct scmi_voltage_proto_ops - represents the various operations provided
+ * by SCMI Voltage Protocol
+ *
+ * @num_domains_get: get the count of voltage domains provided by SCMI
+ * @info_get: get the information of the specified domain
+ * @config_set: set the config for the specified domain
+ * @config_get: get the config of the specified domain
+ * @level_set: set the voltage level for the specified domain
+ * @level_get: get the voltage level of the specified domain
+ */
+struct scmi_voltage_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_voltage_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain_id);
+ int (*config_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 config);
+#define SCMI_VOLTAGE_ARCH_STATE_OFF 0x0
+#define SCMI_VOLTAGE_ARCH_STATE_ON 0x7
+ int (*config_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *config);
+ int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ enum scmi_voltage_level_mode mode, s32 volt_uV);
+ int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ s32 *volt_uV);
+};
+
+/**
+ * struct scmi_powercap_info - Describe one available Powercap domain
+ *
+ * @id: Domain ID as advertised by the platform.
+ * @notify_powercap_cap_change: CAP change notification support.
+ * @notify_powercap_measurement_change: MEASUREMENTS change notifications
+ * support.
+ * @async_powercap_cap_set: Asynchronous CAP set support.
+ * @powercap_cap_config: CAP configuration support.
+ * @powercap_monitoring: Monitoring (measurements) support.
+ * @powercap_pai_config: PAI configuration support.
+ * @powercap_scale_mw: Domain reports power data in milliwatt units.
+ * @powercap_scale_uw: Domain reports power data in microwatt units.
+ * Note that, when both @powercap_scale_mw and
+ * @powercap_scale_uw are set to false, the domain
+ * reports power data on an abstract linear scale.
+ * @name: name assigned to the Powercap Domain by platform.
+ * @min_pai: Minimum configurable PAI.
+ * @max_pai: Maximum configurable PAI.
+ * @pai_step: Step size between two consecutive PAI values.
+ * @min_power_cap: Minimum configurable CAP.
+ * @max_power_cap: Maximum configurable CAP.
+ * @power_cap_step: Step size between two consecutive CAP values.
+ * @sustainable_power: Maximum sustainable power consumption for this domain
+ * under normal conditions.
+ * @accuracy: The accuracy with which the power is measured and reported in
+ * integral multiples of 0.001 percent.
+ * @parent_id: Identifier of the containing parent power capping domain, or the
+ * value 0xFFFFFFFF if this powercap domain is a root domain not
+ * contained in any other domain.
+ */
+struct scmi_powercap_info {
+ unsigned int id;
+ bool notify_powercap_cap_change;
+ bool notify_powercap_measurement_change;
+ bool async_powercap_cap_set;
+ bool powercap_cap_config;
+ bool powercap_monitoring;
+ bool powercap_pai_config;
+ bool powercap_scale_mw;
+ bool powercap_scale_uw;
+ bool fastchannels;
+ char name[SCMI_MAX_STR_SIZE];
+ unsigned int min_pai;
+ unsigned int max_pai;
+ unsigned int pai_step;
+ unsigned int min_power_cap;
+ unsigned int max_power_cap;
+ unsigned int power_cap_step;
+ unsigned int sustainable_power;
+ unsigned int accuracy;
+#define SCMI_POWERCAP_ROOT_ZONE_ID 0xFFFFFFFFUL
+ unsigned int parent_id;
+ struct scmi_fc_info *fc_info;
+};
+
+/**
+ * struct scmi_powercap_proto_ops - represents the various operations provided
+ * by SCMI Powercap Protocol
+ *
+ * @num_domains_get: get the count of powercap domains provided by SCMI.
+ * @info_get: get the information for the specified domain.
+ * @cap_get: get the current CAP value for the specified domain.
+ * On SCMI platforms supporting powercap zone disabling, this could
+ * report a zero value for a zone where powercapping is disabled.
+ * @cap_set: set the CAP value for the specified domain to the provided value;
+ * if the domain supports setting the CAP with an asynchronous command
+ * this request will finally trigger an asynchronous transfer, but, if
+ * @ignore_dresp here is set to true, this call will anyway return
+ * immediately without waiting for the related delayed response.
+ * Note that the powercap requested value must NOT be zero, even if
+ * the platform supports disabling a powercap by setting its cap to
+ * zero (since SCMI v3.2): there are dedicated operations that should
+ * be used for that. (@cap_enable_set/get)
+ * @cap_enable_set: enable or disable the powercapping on the specified domain,
+ * if supported by the SCMI platform implementation.
+ * Note that, by the SCMI specification, the platform can
+ * silently ignore our disable request and decide to enforce
+ * anyway some other powercap value requested by another agent
+ * on the system: for this reason @cap_get and @cap_enable_get
+ * will always report the final platform view of the powercaps.
+ * @cap_enable_get: get the current CAP enable status for the specified domain.
+ * @pai_get: get the current PAI value for the specified domain.
+ * @pai_set: set the PAI value for the specified domain to the provided value.
+ * @measurements_get: retrieve the current average power measurements for the
+ * specified domain and the related PAI upon which is
+ * calculated.
+ * @measurements_threshold_set: set the desired low and high power thresholds
+ * to be used when registering for notification
+ * of type POWERCAP_MEASUREMENTS_NOTIFY with this
+ * powercap domain.
+ * Note that this must be called at least once
+ * before registering any callback with the usual
+ * @scmi_notify_ops; moreover, in case this method
+ * is called with measurement notifications already
+ * enabled it will also trigger, transparently, a
+ * proper update of the power thresholds configured
+ * in the SCMI backend server.
+ * @measurements_threshold_get: get the currently configured low and high power
+ * thresholds used when registering callbacks for
+ * notification POWERCAP_MEASUREMENTS_NOTIFY.
+ */
+struct scmi_powercap_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_powercap_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain_id);
+ int (*cap_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *power_cap);
+ int (*cap_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 power_cap, bool ignore_dresp);
+ int (*cap_enable_set)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool enable);
+ int (*cap_enable_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool *enable);
+ int (*pai_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *pai);
+ int (*pai_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 pai);
+ int (*measurements_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *average_power, u32 *pai);
+ int (*measurements_threshold_set)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_thresh_low,
+ u32 power_thresh_high);
+ int (*measurements_threshold_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_thresh_low,
+ u32 *power_thresh_high);
};
/**
* struct scmi_notify_ops - represents notifications' operations provided by
* SCMI core
- * @register_event_notifier: Register a notifier_block for the requested event
- * @unregister_event_notifier: Unregister a notifier_block for the requested
+ * @devm_event_notifier_register: Managed registration of a notifier_block for
+ * the requested event
+ * @devm_event_notifier_unregister: Managed unregistration of a notifier_block
+ * for the requested event
+ * @event_notifier_register: Register a notifier_block for the requested event
+ * @event_notifier_unregister: Unregister a notifier_block for the requested
* event
*
* A user can register/unregister its own notifier_block against the wanted
@@ -221,7 +753,9 @@ struct scmi_reset_ops {
* tuple: (proto_id, evt_id, src_id) using the provided register/unregister
* interface where:
*
- * @handle: The handle identifying the platform instance to use
+ * @sdev: The scmi_device to use when calling the devres managed ops devm_
+ * @handle: The handle identifying the platform instance to use, when not
+ * calling the managed ops devm_
* @proto_id: The protocol ID as in SCMI Specification
* @evt_id: The message ID of the desired event as in SCMI Specification
* @src_id: A pointer to the desired source ID if different sources are
@@ -244,11 +778,21 @@ struct scmi_reset_ops {
* @report: A custom struct describing the specific event delivered
*/
struct scmi_notify_ops {
- int (*register_event_notifier)(const struct scmi_handle *handle,
- u8 proto_id, u8 evt_id, u32 *src_id,
+ int (*devm_event_notifier_register)(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
+ int (*devm_event_notifier_unregister)(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
+ int (*event_notifier_register)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
struct notifier_block *nb);
- int (*unregister_event_notifier)(const struct scmi_handle *handle,
- u8 proto_id, u8 evt_id, u32 *src_id,
+ int (*event_notifier_unregister)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
struct notifier_block *nb);
};
@@ -257,41 +801,37 @@ struct scmi_notify_ops {
*
* @dev: pointer to the SCMI device
* @version: pointer to the structure containing SCMI version information
- * @power_ops: pointer to set of power protocol operations
- * @perf_ops: pointer to set of performance protocol operations
- * @clk_ops: pointer to set of clock protocol operations
- * @sensor_ops: pointer to set of sensor protocol operations
- * @reset_ops: pointer to set of reset protocol operations
+ * @devm_protocol_acquire: devres managed method to get hold of a protocol,
+ * causing its initialization and related resource
+ * accounting
+ * @devm_protocol_get: devres managed method to acquire a protocol and get specific
+ * operations and a dedicated protocol handler
+ * @devm_protocol_put: devres managed method to release a protocol
+ * @is_transport_atomic: method to check if the underlying transport for this
+ * instance handle is configured to support atomic
+ * transactions for commands.
+ * Some users of the SCMI stack in the upper layers could
+ * be interested to know if they can assume SCMI
+ * command transactions associated to this handle will
+ * never sleep and act accordingly.
+ * An optional atomic threshold value could be returned
+ * where configured.
* @notify_ops: pointer to set of notifications related operations
- * @perf_priv: pointer to private data structure specific to performance
- * protocol(for internal use only)
- * @clk_priv: pointer to private data structure specific to clock
- * protocol(for internal use only)
- * @power_priv: pointer to private data structure specific to power
- * protocol(for internal use only)
- * @sensor_priv: pointer to private data structure specific to sensors
- * protocol(for internal use only)
- * @reset_priv: pointer to private data structure specific to reset
- * protocol(for internal use only)
- * @notify_priv: pointer to private data structure specific to notifications
- * (for internal use only)
*/
struct scmi_handle {
struct device *dev;
struct scmi_revision_info *version;
- struct scmi_perf_ops *perf_ops;
- struct scmi_clk_ops *clk_ops;
- struct scmi_power_ops *power_ops;
- struct scmi_sensor_ops *sensor_ops;
- struct scmi_reset_ops *reset_ops;
- struct scmi_notify_ops *notify_ops;
- /* for protocol internal use */
- void *perf_priv;
- void *clk_priv;
- void *power_priv;
- void *sensor_priv;
- void *reset_priv;
- void *notify_priv;
+
+ int __must_check (*devm_protocol_acquire)(struct scmi_device *sdev,
+ u8 proto);
+ const void __must_check *
+ (*devm_protocol_get)(struct scmi_device *sdev, u8 proto,
+ struct scmi_protocol_handle **ph);
+ void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto);
+ bool (*is_transport_atomic)(const struct scmi_handle *handle,
+ unsigned int *atomic_threshold);
+
+ const struct scmi_notify_ops *notify_ops;
};
enum scmi_std_protocol {
@@ -302,6 +842,17 @@ enum scmi_std_protocol {
SCMI_PROTOCOL_CLOCK = 0x14,
SCMI_PROTOCOL_SENSOR = 0x15,
SCMI_PROTOCOL_RESET = 0x16,
+ SCMI_PROTOCOL_VOLTAGE = 0x17,
+ SCMI_PROTOCOL_POWERCAP = 0x18,
+};
+
+enum scmi_system_events {
+ SCMI_SYSTEM_SHUTDOWN,
+ SCMI_SYSTEM_COLDRESET,
+ SCMI_SYSTEM_WARMRESET,
+ SCMI_SYSTEM_POWERUP,
+ SCMI_SYSTEM_SUSPEND,
+ SCMI_SYSTEM_MAX
};
struct scmi_device {
@@ -314,11 +865,6 @@ struct scmi_device {
#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
-struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol,
- const char *name);
-void scmi_device_destroy(struct scmi_device *scmi_dev);
-
struct scmi_device_id {
u8 protocol_id;
const char *name;
@@ -335,7 +881,7 @@ struct scmi_driver {
#define to_scmi_driver(d) container_of(d, struct scmi_driver, driver)
-#ifdef CONFIG_ARM_SCMI_PROTOCOL
+#if IS_REACHABLE(CONFIG_ARM_SCMI_PROTOCOL)
int scmi_driver_register(struct scmi_driver *driver,
struct module *owner, const char *mod_name);
void scmi_driver_unregister(struct scmi_driver *driver);
@@ -366,18 +912,36 @@ static inline void scmi_driver_unregister(struct scmi_driver *driver) {}
#define module_scmi_driver(__scmi_driver) \
module_driver(__scmi_driver, scmi_register, scmi_unregister)
-typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *);
-int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn);
-void scmi_protocol_unregister(int protocol_id);
+/**
+ * module_scmi_protocol() - Helper macro for registering a scmi protocol
+ * @__scmi_protocol: scmi_protocol structure
+ *
+ * Helper macro for scmi drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_scmi_protocol(__scmi_protocol) \
+ module_driver(__scmi_protocol, \
+ scmi_protocol_register, scmi_protocol_unregister)
+
+struct scmi_protocol;
+int scmi_protocol_register(const struct scmi_protocol *proto);
+void scmi_protocol_unregister(const struct scmi_protocol *proto);
/* SCMI Notification API - Custom Event Reports */
enum scmi_notification_events {
SCMI_EVENT_POWER_STATE_CHANGED = 0x0,
+ SCMI_EVENT_CLOCK_RATE_CHANGED = 0x0,
+ SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED = 0x1,
SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0,
SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1,
SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0,
+ SCMI_EVENT_SENSOR_UPDATE = 0x1,
SCMI_EVENT_RESET_ISSUED = 0x0,
SCMI_EVENT_BASE_ERROR_EVENT = 0x0,
+ SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER = 0x0,
+ SCMI_EVENT_POWERCAP_CAP_CHANGED = 0x0,
+ SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED = 0x1,
};
struct scmi_power_state_changed_report {
@@ -387,12 +951,30 @@ struct scmi_power_state_changed_report {
unsigned int power_state;
};
+struct scmi_clock_rate_notif_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int clock_id;
+ unsigned long long rate;
+};
+
+struct scmi_system_power_state_notifier_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+#define SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(flags) ((flags) & BIT(0))
+ unsigned int flags;
+ unsigned int system_state;
+ unsigned int timeout;
+};
+
struct scmi_perf_limits_report {
ktime_t timestamp;
unsigned int agent_id;
unsigned int domain_id;
unsigned int range_max;
unsigned int range_min;
+ unsigned long range_max_freq;
+ unsigned long range_min_freq;
};
struct scmi_perf_level_report {
@@ -400,6 +982,7 @@ struct scmi_perf_level_report {
unsigned int agent_id;
unsigned int domain_id;
unsigned int performance_level;
+ unsigned long performance_level_freq;
};
struct scmi_sensor_trip_point_report {
@@ -409,6 +992,14 @@ struct scmi_sensor_trip_point_report {
unsigned int trip_point_desc;
};
+struct scmi_sensor_update_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int sensor_id;
+ unsigned int readings_count;
+ struct scmi_sensor_reading readings[];
+};
+
struct scmi_reset_issued_report {
ktime_t timestamp;
unsigned int agent_id;
@@ -424,4 +1015,18 @@ struct scmi_base_error_report {
unsigned long long reports[];
};
+struct scmi_powercap_cap_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power_cap;
+ unsigned int pai;
+};
+
+struct scmi_powercap_meas_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power;
+};
#endif /* _LINUX_SCMI_PROTOCOL_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index afbf8037d8db..d2176a56828a 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -51,6 +51,14 @@ struct scpi_sensor_info {
* OPP is an index to the list return by @dvfs_get_info
* @dvfs_get_info: returns the DVFS capabilities of the given power
* domain. It includes the OPP list and the latency information
+ * @device_domain_id: gets the scpi domain id for a given device
+ * @get_transition_latency: gets the DVFS transition latency for a given device
+ * @add_opps_to_device: adds all the OPPs for a given device
+ * @sensor_get_capability: get the list of capabilities for the sensors
+ * @sensor_get_info: get the information of the specified sensor
+ * @sensor_get_value: gets the current value of the sensor
+ * @device_get_power_state: gets the power state of a power domain
+ * @device_set_power_state: sets the power state of a power domain
*/
struct scpi_ops {
u32 (*get_version)(void);
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index eab7081392d5..75303c126285 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -4,6 +4,132 @@
#include <uapi/linux/screen_info.h>
+#include <linux/bits.h>
+
+/**
+ * SCREEN_INFO_MAX_RESOURCES - maximum number of resources per screen_info
+ */
+#define SCREEN_INFO_MAX_RESOURCES 3
+
+struct pci_dev;
+struct resource;
+
+static inline bool __screen_info_has_lfb(unsigned int type)
+{
+ return (type == VIDEO_TYPE_VLFB) || (type == VIDEO_TYPE_EFI);
+}
+
+static inline u64 __screen_info_lfb_base(const struct screen_info *si)
+{
+ u64 lfb_base = si->lfb_base;
+
+ if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ lfb_base |= (u64)si->ext_lfb_base << 32;
+
+ return lfb_base;
+}
+
+static inline void __screen_info_set_lfb_base(struct screen_info *si, u64 lfb_base)
+{
+ si->lfb_base = lfb_base & GENMASK_ULL(31, 0);
+ si->ext_lfb_base = (lfb_base & GENMASK_ULL(63, 32)) >> 32;
+
+ if (si->ext_lfb_base)
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ else
+ si->capabilities &= ~VIDEO_CAPABILITY_64BIT_BASE;
+}
+
+static inline u64 __screen_info_lfb_size(const struct screen_info *si, unsigned int type)
+{
+ u64 lfb_size = si->lfb_size;
+
+ if (type == VIDEO_TYPE_VLFB)
+ lfb_size <<= 16;
+ return lfb_size;
+}
+
+static inline unsigned int __screen_info_video_type(unsigned int type)
+{
+ switch (type) {
+ case VIDEO_TYPE_MDA:
+ case VIDEO_TYPE_CGA:
+ case VIDEO_TYPE_EGAM:
+ case VIDEO_TYPE_EGAC:
+ case VIDEO_TYPE_VGAC:
+ case VIDEO_TYPE_VLFB:
+ case VIDEO_TYPE_PICA_S3:
+ case VIDEO_TYPE_MIPS_G364:
+ case VIDEO_TYPE_SGI:
+ case VIDEO_TYPE_TGAC:
+ case VIDEO_TYPE_SUN:
+ case VIDEO_TYPE_SUNPCI:
+ case VIDEO_TYPE_PMAC:
+ case VIDEO_TYPE_EFI:
+ return type;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * screen_info_video_type() - Decodes the video type from struct screen_info
+ * @si: an instance of struct screen_info
+ *
+ * Returns:
+ * A VIDEO_TYPE_ constant representing si's type of video display, or 0 otherwise.
+ */
+static inline unsigned int screen_info_video_type(const struct screen_info *si)
+{
+ unsigned int type;
+
+ // check if display output is on
+ if (!si->orig_video_isVGA)
+ return 0;
+
+ // check for a known VIDEO_TYPE_ constant
+ type = __screen_info_video_type(si->orig_video_isVGA);
+ if (type)
+ return si->orig_video_isVGA;
+
+ // check if text mode has been initialized
+ if (!si->orig_video_lines || !si->orig_video_cols)
+ return 0;
+
+ // 80x25 text, mono
+ if (si->orig_video_mode == 0x07) {
+ if ((si->orig_video_ega_bx & 0xff) != 0x10)
+ return VIDEO_TYPE_EGAM;
+ else
+ return VIDEO_TYPE_MDA;
+ }
+
+ // EGA/VGA, 16 colors
+ if ((si->orig_video_ega_bx & 0xff) != 0x10) {
+ if (si->orig_video_isVGA)
+ return VIDEO_TYPE_VGAC;
+ else
+ return VIDEO_TYPE_EGAC;
+ }
+
+ // the rest...
+ return VIDEO_TYPE_CGA;
+}
+
+ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num);
+
+#if defined(CONFIG_PCI)
+void screen_info_apply_fixups(void);
+struct pci_dev *screen_info_pci_dev(const struct screen_info *si);
+#else
+static inline void screen_info_apply_fixups(void)
+{ }
+static inline struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
+{
+ return NULL;
+}
+#endif
+
extern struct screen_info screen_info;
#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/scs.h b/include/linux/scs.h
index 6dec390cf154..4ab5bdc898cf 100644
--- a/include/linux/scs.h
+++ b/include/linux/scs.h
@@ -15,24 +15,18 @@
#ifdef CONFIG_SHADOW_CALL_STACK
-/*
- * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
- * architecture) provided ~40% safety margin on stack usage while keeping
- * memory allocation overhead reasonable.
- */
-#define SCS_SIZE SZ_1K
+#define SCS_ORDER 0
+#define SCS_SIZE (PAGE_SIZE << SCS_ORDER)
#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
/* An illegal pointer value to mark the end of the shadow stack. */
#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA)
-/* Allocate a static per-CPU shadow stack */
-#define DEFINE_SCS(name) \
- DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \
-
#define task_scs(tsk) (task_thread_info(tsk)->scs_base)
#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp)
+void *scs_alloc(int node);
+void scs_free(void *s);
void scs_init(void);
int scs_prepare(struct task_struct *tsk, int node);
void scs_release(struct task_struct *tsk);
@@ -59,13 +53,33 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk)
return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC;
}
+DECLARE_STATIC_KEY_FALSE(dynamic_scs_enabled);
+
+static inline bool scs_is_dynamic(void)
+{
+ if (!IS_ENABLED(CONFIG_DYNAMIC_SCS))
+ return false;
+ return static_branch_likely(&dynamic_scs_enabled);
+}
+
+static inline bool scs_is_enabled(void)
+{
+ if (!IS_ENABLED(CONFIG_DYNAMIC_SCS))
+ return true;
+ return scs_is_dynamic();
+}
+
#else /* CONFIG_SHADOW_CALL_STACK */
+static inline void *scs_alloc(int node) { return NULL; }
+static inline void scs_free(void *s) {}
static inline void scs_init(void) {}
static inline void scs_task_reset(struct task_struct *tsk) {}
static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
static inline void scs_release(struct task_struct *tsk) {}
static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; }
+static inline bool scs_is_enabled(void) { return false; }
+static inline bool scs_is_dynamic(void) { return false; }
#endif /* CONFIG_SHADOW_CALL_STACK */
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 76731230bbc5..836a7e200f39 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -98,6 +98,7 @@ enum sctp_cid {
SCTP_CID_I_FWD_TSN = 0xC2,
SCTP_CID_ASCONF_ACK = 0x80,
SCTP_CID_RECONF = 0x82,
+ SCTP_CID_PAD = 0x84,
}; /* enum */
@@ -221,7 +222,7 @@ struct sctp_datahdr {
__be16 stream;
__be16 ssn;
__u32 ppid;
- __u8 payload[];
+ /* __u8 payload[]; */
};
struct sctp_data_chunk {
@@ -269,7 +270,7 @@ struct sctp_inithdr {
__be16 num_outbound_streams;
__be16 num_inbound_streams;
__be32 initial_tsn;
- __u8 params[];
+ /* __u8 params[]; */
};
struct sctp_init_chunk {
@@ -384,7 +385,7 @@ struct sctp_sackhdr {
__be32 a_rwnd;
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
- union sctp_sack_variable variable[];
+ /* union sctp_sack_variable variable[]; */
};
struct sctp_sack_chunk {
@@ -410,6 +411,12 @@ struct sctp_heartbeat_chunk {
};
+/* PAD chunk could be bundled with heartbeat chunk to probe pmtu */
+struct sctp_pad_chunk {
+ struct sctp_chunkhdr uh;
+};
+
+
/* For the abort and shutdown ACK we must carry the init tag in the
* common header. Just the common header is all that is needed with a
* chunk descriptor.
@@ -436,7 +443,7 @@ struct sctp_shutdown_chunk {
struct sctp_errhdr {
__be16 cause;
__be16 length;
- __u8 variable[];
+ /* __u8 variable[]; */
};
struct sctp_operr_chunk {
@@ -482,11 +489,13 @@ enum sctp_error {
* 11 Restart of an association with new addresses
* 12 User Initiated Abort
* 13 Protocol Violation
+ * 14 Restart of an Association with New Encapsulation Port
*/
SCTP_ERROR_RESTART = cpu_to_be16(0x0b),
SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c),
SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d),
+ SCTP_ERROR_NEW_ENCAP_PORT = cpu_to_be16(0x0e),
/* ADDIP Section 3.3 New Error Causes
*
@@ -594,7 +603,7 @@ struct sctp_fwdtsn_skip {
struct sctp_fwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_fwdtsn_skip skip[];
+ /* struct sctp_fwdtsn_skip skip[]; */
};
struct sctp_fwdtsn_chunk {
@@ -611,7 +620,7 @@ struct sctp_ifwdtsn_skip {
struct sctp_ifwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_ifwdtsn_skip skip[];
+ /* struct sctp_ifwdtsn_skip skip[]; */
};
struct sctp_ifwdtsn_chunk {
@@ -658,7 +667,7 @@ struct sctp_addip_param {
struct sctp_addiphdr {
__be32 serial;
- __u8 params[];
+ /* __u8 params[]; */
};
struct sctp_addip_chunk {
@@ -718,7 +727,7 @@ struct sctp_addip_chunk {
struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
- __u8 hmac[];
+ /* __u8 hmac[]; */
};
struct sctp_auth_chunk {
@@ -733,7 +742,7 @@ struct sctp_infox {
struct sctp_reconf_chunk {
struct sctp_chunkhdr chunk_hdr;
- __u8 params[];
+ /* __u8 params[]; */
};
struct sctp_strreset_outreq {
@@ -793,4 +802,27 @@ enum {
SCTP_FLOWLABEL_VAL_MASK = 0xfffff
};
+/* UDP Encapsulation
+ * draft-tuexen-tsvwg-sctp-udp-encaps-cons-03.html#section-4-4
+ *
+ * The error cause indicating an "Restart of an Association with
+ * New Encapsulation Port"
+ *
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cause Code = 14 | Cause Length = 8 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Current Encapsulation Port | New Encapsulation Port |
+ * +-------------------------------+-------------------------------+
+ */
+struct sctp_new_encap_port_hdr {
+ __be16 cur_port;
+ __be16 new_port;
+};
+
+/* Round an int up to the next multiple of 4. */
+#define SCTP_PAD4(s) (((s)+3)&~3)
+/* Truncate to the previous multiple of 4. */
+#define SCTP_TRUNC4(s) ((s)&~3)
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/sdb.h b/include/linux/sdb.h
deleted file mode 100644
index a2404a2bbd10..000000000000
--- a/include/linux/sdb.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is the official version 1.1 of sdb.h
- */
-#ifndef __SDB_H__
-#define __SDB_H__
-#ifdef __KERNEL__
-#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-
-/*
- * All structures are 64 bytes long and are expected
- * to live in an array, one for each interconnect.
- * Most fields of the structures are shared among the
- * various types, and most-specific fields are at the
- * beginning (for alignment reasons, and to keep the
- * magic number at the head of the interconnect record
- */
-
-/* Product, 40 bytes at offset 24, 8-byte aligned
- *
- * device_id is vendor-assigned; version is device-specific,
- * date is hex (e.g 0x20120501), name is UTF-8, blank-filled
- * and not terminated with a 0 byte.
- */
-struct sdb_product {
- uint64_t vendor_id; /* 0x18..0x1f */
- uint32_t device_id; /* 0x20..0x23 */
- uint32_t version; /* 0x24..0x27 */
- uint32_t date; /* 0x28..0x2b */
- uint8_t name[19]; /* 0x2c..0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/*
- * Component, 56 bytes at offset 8, 8-byte aligned
- *
- * The address range is first to last, inclusive
- * (for example 0x100000 - 0x10ffff)
- */
-struct sdb_component {
- uint64_t addr_first; /* 0x08..0x0f */
- uint64_t addr_last; /* 0x10..0x17 */
- struct sdb_product product; /* 0x18..0x3f */
-};
-
-/* Type of the SDB record */
-enum sdb_record_type {
- sdb_type_interconnect = 0x00,
- sdb_type_device = 0x01,
- sdb_type_bridge = 0x02,
- sdb_type_integration = 0x80,
- sdb_type_repo_url = 0x81,
- sdb_type_synthesis = 0x82,
- sdb_type_empty = 0xFF,
-};
-
-/* Type 0: interconnect (first of the array)
- *
- * sdb_records is the length of the table including this first
- * record, version is 1. The bus type is enumerated later.
- */
-#define SDB_MAGIC 0x5344422d /* "SDB-" */
-struct sdb_interconnect {
- uint32_t sdb_magic; /* 0x00-0x03 */
- uint16_t sdb_records; /* 0x04-0x05 */
- uint8_t sdb_version; /* 0x06 */
- uint8_t sdb_bus_type; /* 0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 1: device
- *
- * class is 0 for "custom device", other values are
- * to be standardized; ABI version is for the driver,
- * bus-specific bits are defined by each bus (see below)
- */
-struct sdb_device {
- uint16_t abi_class; /* 0x00-0x01 */
- uint8_t abi_ver_major; /* 0x02 */
- uint8_t abi_ver_minor; /* 0x03 */
- uint32_t bus_specific; /* 0x04-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 2: bridge
- *
- * child is the address of the nested SDB table
- */
-struct sdb_bridge {
- uint64_t sdb_child; /* 0x00-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 0x80: integration
- *
- * all types with bit 7 set are meta-information, so
- * software can ignore the types it doesn't know. Here we
- * just provide product information for an aggregate device
- */
-struct sdb_integration {
- uint8_t reserved[24]; /* 0x00-0x17 */
- struct sdb_product product; /* 0x08-0x3f */
-};
-
-/* Type 0x81: Top module repository url
- *
- * again, an informative field that software can ignore
- */
-struct sdb_repo_url {
- uint8_t repo_url[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0x82: Synthesis tool information
- *
- * this informative record
- */
-struct sdb_synthesis {
- uint8_t syn_name[16]; /* 0x00-0x0f */
- uint8_t commit_id[16]; /* 0x10-0x1f */
- uint8_t tool_name[8]; /* 0x20-0x27 */
- uint32_t tool_version; /* 0x28-0x2b */
- uint32_t date; /* 0x2c-0x2f */
- uint8_t user_name[15]; /* 0x30-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0xff: empty
- *
- * this allows keeping empty slots during development,
- * so they can be filled later with minimal efforts and
- * no misleading description is ever shipped -- hopefully.
- * It can also be used to pad a table to a desired length.
- */
-struct sdb_empty {
- uint8_t reserved[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* The type of bus, for bus-specific flags */
-enum sdb_bus_type {
- sdb_wishbone = 0x00,
- sdb_data = 0x01,
-};
-
-#define SDB_WB_WIDTH_MASK 0x0f
-#define SDB_WB_ACCESS8 0x01
-#define SDB_WB_ACCESS16 0x02
-#define SDB_WB_ACCESS32 0x04
-#define SDB_WB_ACCESS64 0x08
-#define SDB_WB_LITTLE_ENDIAN 0x80
-
-#define SDB_DATA_READ 0x04
-#define SDB_DATA_WRITE 0x02
-#define SDB_DATA_EXEC 0x01
-
-#endif /* __SDB_H__ */
diff --git a/include/linux/sdla.h b/include/linux/sdla.h
deleted file mode 100644
index 00e8b3b614f0..000000000000
--- a/include/linux/sdla.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * Global definitions for the Frame relay interface.
- *
- * Version: @(#)if_ifrad.h 0.20 13 Apr 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan Structure packing
- *
- * 0.20 Mike McLagan New flags for S508 buffer handling
- */
-#ifndef SDLA_H
-#define SDLA_H
-
-#include <uapi/linux/sdla.h>
-
-
-/* important Z80 window addresses */
-#define SDLA_CONTROL_WND 0xE000
-
-#define SDLA_502_CMD_BUF 0xEF60
-#define SDLA_502_RCV_BUF 0xA900
-#define SDLA_502_TXN_AVAIL 0xFFF1
-#define SDLA_502_RCV_AVAIL 0xFFF2
-#define SDLA_502_EVENT_FLAGS 0xFFF3
-#define SDLA_502_MDM_STATUS 0xFFF4
-#define SDLA_502_IRQ_INTERFACE 0xFFFD
-#define SDLA_502_IRQ_PERMISSION 0xFFFE
-#define SDLA_502_DATA_OFS 0x0010
-
-#define SDLA_508_CMD_BUF 0xE000
-#define SDLA_508_TXBUF_INFO 0xF100
-#define SDLA_508_RXBUF_INFO 0xF120
-#define SDLA_508_EVENT_FLAGS 0xF003
-#define SDLA_508_MDM_STATUS 0xF004
-#define SDLA_508_IRQ_INTERFACE 0xF010
-#define SDLA_508_IRQ_PERMISSION 0xF011
-#define SDLA_508_TSE_OFFSET 0xF012
-
-/* Event flags */
-#define SDLA_EVENT_STATUS 0x01
-#define SDLA_EVENT_DLCI_STATUS 0x02
-#define SDLA_EVENT_BAD_DLCI 0x04
-#define SDLA_EVENT_LINK_DOWN 0x40
-
-/* IRQ Trigger flags */
-#define SDLA_INTR_RX 0x01
-#define SDLA_INTR_TX 0x02
-#define SDLA_INTR_MODEM 0x04
-#define SDLA_INTR_COMPLETE 0x08
-#define SDLA_INTR_STATUS 0x10
-#define SDLA_INTR_TIMER 0x20
-
-/* DLCI status bits */
-#define SDLA_DLCI_DELETED 0x01
-#define SDLA_DLCI_ACTIVE 0x02
-#define SDLA_DLCI_WAITING 0x04
-#define SDLA_DLCI_NEW 0x08
-#define SDLA_DLCI_INCLUDED 0x40
-
-/* valid command codes */
-#define SDLA_INFORMATION_WRITE 0x01
-#define SDLA_INFORMATION_READ 0x02
-#define SDLA_ISSUE_IN_CHANNEL_SIGNAL 0x03
-#define SDLA_SET_DLCI_CONFIGURATION 0x10
-#define SDLA_READ_DLCI_CONFIGURATION 0x11
-#define SDLA_DISABLE_COMMUNICATIONS 0x12
-#define SDLA_ENABLE_COMMUNICATIONS 0x13
-#define SDLA_READ_DLC_STATUS 0x14
-#define SDLA_READ_DLC_STATISTICS 0x15
-#define SDLA_FLUSH_DLC_STATISTICS 0x16
-#define SDLA_LIST_ACTIVE_DLCI 0x17
-#define SDLA_FLUSH_INFORMATION_BUFFERS 0x18
-#define SDLA_ADD_DLCI 0x20
-#define SDLA_DELETE_DLCI 0x21
-#define SDLA_ACTIVATE_DLCI 0x22
-#define SDLA_DEACTIVATE_DLCI 0x23
-#define SDLA_READ_MODEM_STATUS 0x30
-#define SDLA_SET_MODEM_STATUS 0x31
-#define SDLA_READ_COMMS_ERR_STATS 0x32
-#define SDLA_FLUSH_COMMS_ERR_STATS 0x33
-#define SDLA_READ_CODE_VERSION 0x40
-#define SDLA_SET_IRQ_TRIGGER 0x50
-#define SDLA_GET_IRQ_TRIGGER 0x51
-
-/* In channel signal types */
-#define SDLA_ICS_LINK_VERIFY 0x02
-#define SDLA_ICS_STATUS_ENQ 0x03
-
-/* modem status flags */
-#define SDLA_MODEM_DTR_HIGH 0x01
-#define SDLA_MODEM_RTS_HIGH 0x02
-#define SDLA_MODEM_DCD_HIGH 0x08
-#define SDLA_MODEM_CTS_HIGH 0x20
-
-/* used for RET_MODEM interpretation */
-#define SDLA_MODEM_DCD_LOW 0x01
-#define SDLA_MODEM_CTS_LOW 0x02
-
-/* return codes */
-#define SDLA_RET_OK 0x00
-#define SDLA_RET_COMMUNICATIONS 0x01
-#define SDLA_RET_CHANNEL_INACTIVE 0x02
-#define SDLA_RET_DLCI_INACTIVE 0x03
-#define SDLA_RET_DLCI_CONFIG 0x04
-#define SDLA_RET_BUF_TOO_BIG 0x05
-#define SDLA_RET_NO_DATA 0x05
-#define SDLA_RET_BUF_OVERSIZE 0x06
-#define SDLA_RET_CIR_OVERFLOW 0x07
-#define SDLA_RET_NO_BUFS 0x08
-#define SDLA_RET_TIMEOUT 0x0A
-#define SDLA_RET_MODEM 0x10
-#define SDLA_RET_CHANNEL_OFF 0x11
-#define SDLA_RET_CHANNEL_ON 0x12
-#define SDLA_RET_DLCI_STATUS 0x13
-#define SDLA_RET_DLCI_UNKNOWN 0x14
-#define SDLA_RET_COMMAND_INVALID 0x1F
-
-/* Configuration flags */
-#define SDLA_DIRECT_RECV 0x0080
-#define SDLA_TX_NO_EXCEPT 0x0020
-#define SDLA_NO_ICF_MSGS 0x1000
-#define SDLA_TX50_RX50 0x0000
-#define SDLA_TX70_RX30 0x2000
-#define SDLA_TX30_RX70 0x4000
-
-/* IRQ selection flags */
-#define SDLA_IRQ_RECEIVE 0x01
-#define SDLA_IRQ_TRANSMIT 0x02
-#define SDLA_IRQ_MODEM_STAT 0x04
-#define SDLA_IRQ_COMMAND 0x08
-#define SDLA_IRQ_CHANNEL 0x10
-#define SDLA_IRQ_TIMER 0x20
-
-/* definitions for PC memory mapping */
-#define SDLA_8K_WINDOW 0x01
-#define SDLA_S502_SEG_A 0x10
-#define SDLA_S502_SEG_C 0x20
-#define SDLA_S502_SEG_D 0x00
-#define SDLA_S502_SEG_E 0x30
-#define SDLA_S507_SEG_A 0x00
-#define SDLA_S507_SEG_B 0x40
-#define SDLA_S507_SEG_C 0x80
-#define SDLA_S507_SEG_E 0xC0
-#define SDLA_S508_SEG_A 0x00
-#define SDLA_S508_SEG_C 0x10
-#define SDLA_S508_SEG_D 0x08
-#define SDLA_S508_SEG_E 0x18
-
-/* SDLA adapter port constants */
-#define SDLA_IO_EXTENTS 0x04
-
-#define SDLA_REG_CONTROL 0x00
-#define SDLA_REG_PC_WINDOW 0x01 /* offset for PC window select latch */
-#define SDLA_REG_Z80_WINDOW 0x02 /* offset for Z80 window select latch */
-#define SDLA_REG_Z80_CONTROL 0x03 /* offset for Z80 control latch */
-
-#define SDLA_S502_STS 0x00 /* status reg for 502, 502E, 507 */
-#define SDLA_S508_GNRL 0x00 /* general purp. reg for 508 */
-#define SDLA_S508_STS 0x01 /* status reg for 508 */
-#define SDLA_S508_IDR 0x02 /* ID reg for 508 */
-
-/* control register flags */
-#define SDLA_S502A_START 0x00 /* start the CPU */
-#define SDLA_S502A_INTREQ 0x02
-#define SDLA_S502A_INTEN 0x04
-#define SDLA_S502A_HALT 0x08 /* halt the CPU */
-#define SDLA_S502A_NMI 0x10 /* issue an NMI to the CPU */
-
-#define SDLA_S502E_CPUEN 0x01
-#define SDLA_S502E_ENABLE 0x02
-#define SDLA_S502E_INTACK 0x04
-
-#define SDLA_S507_ENABLE 0x01
-#define SDLA_S507_IRQ3 0x00
-#define SDLA_S507_IRQ4 0x20
-#define SDLA_S507_IRQ5 0x40
-#define SDLA_S507_IRQ7 0x60
-#define SDLA_S507_IRQ10 0x80
-#define SDLA_S507_IRQ11 0xA0
-#define SDLA_S507_IRQ12 0xC0
-#define SDLA_S507_IRQ15 0xE0
-
-#define SDLA_HALT 0x00
-#define SDLA_CPUEN 0x02
-#define SDLA_MEMEN 0x04
-#define SDLA_S507_EPROMWR 0x08
-#define SDLA_S507_EPROMCLK 0x10
-#define SDLA_S508_INTRQ 0x08
-#define SDLA_S508_INTEN 0x10
-
-struct sdla_cmd {
- char opp_flag;
- char cmd;
- short length;
- char retval;
- short dlci;
- char flags;
- short rxlost_int;
- long rxlost_app;
- char reserve[2];
- char data[SDLA_MAX_DATA]; /* transfer data buffer */
-} __attribute__((packed));
-
-struct intr_info {
- char flags;
- short txlen;
- char irq;
- char flags2;
- short timeout;
-} __attribute__((packed));
-
-/* found in the 508's control window at RXBUF_INFO */
-struct buf_info {
- unsigned short rse_num;
- unsigned long rse_base;
- unsigned long rse_next;
- unsigned long buf_base;
- unsigned short reserved;
- unsigned long buf_top;
-} __attribute__((packed));
-
-/* structure pointed to by rse_base in RXBUF_INFO struct */
-struct buf_entry {
- char opp_flag;
- short length;
- short dlci;
- char flags;
- short timestamp;
- short reserved[2];
- long buf_addr;
-} __attribute__((packed));
-
-#endif
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 02aef2844c38..709ad84809e1 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,12 +3,14 @@
#define _LINUX_SECCOMP_H
#include <uapi/linux/seccomp.h>
+#include <linux/seccomp_types.h>
#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
SECCOMP_FILTER_FLAG_LOG | \
SECCOMP_FILTER_FLAG_SPEC_ALLOW | \
SECCOMP_FILTER_FLAG_NEW_LISTENER | \
- SECCOMP_FILTER_FLAG_TSYNC_ESRCH)
+ SECCOMP_FILTER_FLAG_TSYNC_ESRCH | \
+ SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV)
/* sizeof() the first published struct seccomp_notif_addfd */
#define SECCOMP_NOTIFY_ADDFD_SIZE_VER0 24
@@ -20,29 +22,11 @@
#include <linux/atomic.h>
#include <asm/seccomp.h>
-struct seccomp_filter;
-/**
- * struct seccomp - the state of a seccomp'ed process
- *
- * @mode: indicates one of the valid values above for controlled
- * system calls available to a process.
- * @filter: must always point to a valid seccomp-filter or NULL as it is
- * accessed without locking during system call entry.
- *
- * @filter must only be accessed from the context of current as there
- * is no read locking.
- */
-struct seccomp {
- int mode;
- atomic_t filter_count;
- struct seccomp_filter *filter;
-};
-
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
extern int __secure_computing(const struct seccomp_data *sd);
static inline int secure_computing(void)
{
- if (unlikely(test_thread_flag(TIF_SECCOMP)))
+ if (unlikely(test_syscall_work(SECCOMP)))
return __secure_computing(NULL);
return 0;
}
@@ -62,8 +46,6 @@ static inline int seccomp_mode(struct seccomp *s)
#include <linux/errno.h>
-struct seccomp { };
-struct seccomp_filter { };
struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
@@ -121,4 +103,13 @@ static inline long seccomp_get_metadata(struct task_struct *task,
return -EINVAL;
}
#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
+
+#ifdef CONFIG_SECCOMP_CACHE_DEBUG
+struct seq_file;
+struct pid_namespace;
+struct pid;
+
+int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
+#endif
#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/seccomp_types.h b/include/linux/seccomp_types.h
new file mode 100644
index 000000000000..cf0a0355024f
--- /dev/null
+++ b/include/linux/seccomp_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SECCOMP_TYPES_H
+#define _LINUX_SECCOMP_TYPES_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_SECCOMP
+
+struct seccomp_filter;
+/**
+ * struct seccomp - the state of a seccomp'ed process
+ *
+ * @mode: indicates one of the valid values above for controlled
+ * system calls available to a process.
+ * @filter_count: number of seccomp filters
+ * @filter: must always point to a valid seccomp-filter or NULL as it is
+ * accessed without locking during system call entry.
+ *
+ * @filter must only be accessed from the context of current as there
+ * is no read locking.
+ */
+struct seccomp {
+ int mode;
+ atomic_t filter_count;
+ struct seccomp_filter *filter;
+};
+
+#else
+
+struct seccomp { };
+struct seccomp_filter { };
+
+#endif
+
+#endif /* _LINUX_SECCOMP_TYPES_H */
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
new file mode 100644
index 000000000000..35f3a4a8ceb1
--- /dev/null
+++ b/include/linux/secretmem.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_SECRETMEM_H
+#define _LINUX_SECRETMEM_H
+
+#ifdef CONFIG_SECRETMEM
+
+extern const struct address_space_operations secretmem_aops;
+
+static inline bool folio_is_secretmem(struct folio *folio)
+{
+ struct address_space *mapping;
+
+ /*
+ * Using folio_mapping() is quite slow because of the actual call
+ * instruction.
+ * We know that secretmem pages are not compound and LRU so we can
+ * save a couple of cycles here.
+ */
+ if (folio_test_large(folio) || !folio_test_lru(folio))
+ return false;
+
+ mapping = (struct address_space *)
+ ((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS);
+
+ if (!mapping || mapping != folio->mapping)
+ return false;
+
+ return mapping->a_ops == &secretmem_aops;
+}
+
+bool vma_is_secretmem(struct vm_area_struct *vma);
+bool secretmem_active(void);
+
+#else
+
+static inline bool vma_is_secretmem(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool folio_is_secretmem(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool secretmem_active(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_SECRETMEM */
+
+#endif /* _LINUX_SECRETMEM_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 0a0a03b36a3b..41a8f667bdfa 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -23,6 +23,7 @@
#ifndef __LINUX_SECURITY_H
#define __LINUX_SECURITY_H
+#include <linux/kernel_read_file.h>
#include <linux/key.h>
#include <linux/capability.h>
#include <linux/fs.h>
@@ -30,6 +31,9 @@
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mm.h>
+#include <linux/sockptr.h>
+#include <linux/bpf.h>
+#include <uapi/linux/lsm.h>
struct linux_binprm;
struct cred;
@@ -58,6 +62,7 @@ struct fs_parameter;
enum fs_value_type;
struct watch;
struct watch_notification;
+struct lsm_ctx;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -66,7 +71,7 @@ struct watch_notification;
/* If capable is being called by a setid function */
#define CAP_OPT_INSETID BIT(2)
-/* LSM Agnostic defines for fs_context::lsm_flags */
+/* LSM Agnostic defines for security_sb_set_mnt_opts() flags */
#define SECURITY_LSM_NATIVE_LABELS 1
struct ctl_table;
@@ -113,23 +118,31 @@ enum lockdown_reason {
LOCKDOWN_IOPORT,
LOCKDOWN_MSR,
LOCKDOWN_ACPI_TABLES,
+ LOCKDOWN_DEVICE_TREE,
LOCKDOWN_PCMCIA_CIS,
LOCKDOWN_TIOCSSERIAL,
LOCKDOWN_MODULE_PARAMETERS,
LOCKDOWN_MMIOTRACE,
LOCKDOWN_DEBUGFS,
LOCKDOWN_XMON_WR,
+ LOCKDOWN_BPF_WRITE_USER,
+ LOCKDOWN_DBG_WRITE_KERNEL,
+ LOCKDOWN_RTAS_ERROR_INJECTION,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
- LOCKDOWN_BPF_READ,
+ LOCKDOWN_BPF_READ_KERNEL,
+ LOCKDOWN_DBG_READ_KERNEL,
LOCKDOWN_PERF,
LOCKDOWN_TRACEFS,
LOCKDOWN_XMON_RW,
+ LOCKDOWN_XFRM_SECRET,
LOCKDOWN_CONFIDENTIALITY_MAX,
};
extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
+extern u32 lsm_active_cnt;
+extern const struct lsm_id *lsm_idlist[];
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
@@ -137,19 +150,22 @@ extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
-extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
+extern int cap_capget(const struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
-extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
-extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
-extern int cap_inode_need_killpriv(struct dentry *dentry);
-extern int cap_inode_killpriv(struct dentry *dentry);
-extern int cap_inode_getsecurity(struct inode *inode, const char *name,
- void **buffer, bool alloc);
+extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file);
+int cap_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+int cap_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name);
+int cap_inode_need_killpriv(struct dentry *dentry);
+int cap_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry);
+int cap_inode_getsecurity(struct mnt_idmap *idmap,
+ struct inode *inode, const char *name, void **buffer,
+ bool alloc);
extern int cap_mmap_addr(unsigned long addr);
extern int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags);
@@ -166,14 +182,14 @@ struct sk_buff;
struct sock;
struct sockaddr;
struct socket;
-struct flowi;
+struct flowi_common;
struct dst_entry;
struct xfrm_selector;
struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
-struct sctp_endpoint;
+struct sctp_association;
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
@@ -250,18 +266,19 @@ int unregister_blocking_lsm_notifier(struct notifier_block *nb);
/* prototypes */
extern int security_init(void);
extern int early_security_init(void);
+extern u64 lsm_name_to_attr(const char *name);
/* Security operations */
-int security_binder_set_context_mgr(struct task_struct *mgr);
-int security_binder_transaction(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to, struct file *file);
+int security_binder_set_context_mgr(const struct cred *mgr);
+int security_binder_transaction(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to, const struct file *file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
-int security_capget(struct task_struct *target,
+int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
@@ -273,24 +290,27 @@ int security_capable(const struct cred *cred,
struct user_namespace *ns,
int cap,
unsigned int opts);
-int security_quotactl(int cmds, int type, int id, struct super_block *sb);
+int security_quotactl(int cmds, int type, int id, const struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
int security_bprm_creds_for_exec(struct linux_binprm *bprm);
-int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file);
int security_bprm_check(struct linux_binprm *bprm);
-void security_bprm_committing_creds(struct linux_binprm *bprm);
-void security_bprm_committed_creds(struct linux_binprm *bprm);
+void security_bprm_committing_creds(const struct linux_binprm *bprm);
+void security_bprm_committed_creds(const struct linux_binprm *bprm);
+int security_fs_context_submount(struct fs_context *fc, struct super_block *reference);
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
int security_sb_alloc(struct super_block *sb);
+void security_sb_delete(struct super_block *sb);
void security_sb_free(struct super_block *sb);
void security_free_mnt_opts(void **mnt_opts);
int security_sb_eat_lsm_opts(char *options, void **mnt_opts);
+int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts);
int security_sb_remount(struct super_block *sb, void *mnt_opts);
-int security_sb_kern_mount(struct super_block *sb);
+int security_sb_kern_mount(const struct super_block *sb);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(const char *dev_name, const struct path *path,
@@ -305,12 +325,11 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb,
unsigned long kern_flags,
unsigned long *set_kern_flags);
-int security_add_mnt_opt(const char *option, const char *val,
- int len, void **mnt_opts);
int security_move_mount(const struct path *from_path, const struct path *to_path);
int security_dentry_init_security(struct dentry *dentry, int mode,
- const struct qstr *name, void **ctx,
- u32 *ctxlen);
+ const struct qstr *name,
+ const char **xattr_name, void **ctx,
+ u32 *ctxlen);
int security_dentry_create_files_as(struct dentry *dentry, int mode,
struct qstr *name,
const struct cred *old,
@@ -322,10 +341,12 @@ void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
initxattrs initxattrs, void *fs_data);
-int security_old_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, const char **name,
- void **value, size_t *len);
+int security_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+void security_inode_post_create_tmpfile(struct mnt_idmap *idmap,
+ struct inode *inode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -341,18 +362,38 @@ int security_inode_readlink(struct dentry *dentry);
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
bool rcu);
int security_inode_permission(struct inode *inode, int mask);
-int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
+int security_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct iattr *attr);
+void security_inode_post_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ int ia_valid);
int security_inode_getattr(const struct path *path);
-int security_inode_setxattr(struct dentry *dentry, const char *name,
+int security_inode_setxattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
+int security_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+void security_inode_post_set_acl(struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+int security_inode_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name);
+int security_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name);
+void security_inode_post_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
int security_inode_getxattr(struct dentry *dentry, const char *name);
int security_inode_listxattr(struct dentry *dentry);
-int security_inode_removexattr(struct dentry *dentry, const char *name);
+int security_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name);
+void security_inode_post_removexattr(struct dentry *dentry, const char *name);
int security_inode_need_killpriv(struct dentry *dentry);
-int security_inode_killpriv(struct dentry *dentry);
-int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc);
+int security_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry);
+int security_inode_getsecurity(struct mnt_idmap *idmap,
+ struct inode *inode, const char *name,
+ void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
void security_inode_getsecid(struct inode *inode, u32 *secid);
@@ -362,8 +403,11 @@ int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn);
int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
+void security_file_release(struct file *file);
void security_file_free(struct file *file);
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg);
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags);
int security_mmap_addr(unsigned long addr);
@@ -376,6 +420,8 @@ int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_file_open(struct file *file);
+int security_file_post_open(struct file *file, int mask);
+int security_file_truncate(struct file *file);
int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -386,18 +432,24 @@ void security_cred_getsecid(const struct cred *c, u32 *secid);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
-int security_kernel_load_data(enum kernel_load_data_id id);
-int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
+int security_kernel_load_data(enum kernel_load_data_id id, bool contents);
+int security_kernel_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id,
+ char *description);
+int security_kernel_read_file(struct file *file, enum kernel_read_file_id id,
+ bool contents);
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
int security_task_fix_setgid(struct cred *new, const struct cred *old,
int flags);
+int security_task_fix_setgroups(struct cred *new, const struct cred *old);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
int security_task_getsid(struct task_struct *p);
-void security_task_getsecid(struct task_struct *p, u32 *secid);
+void security_current_getsecid_subj(u32 *secid);
+void security_task_getsecid_obj(struct task_struct *p, u32 *secid);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
@@ -413,6 +465,7 @@ int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
+int security_create_user_ns(const struct cred *cred);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
int security_msg_msg_alloc(struct msg_msg *msg);
@@ -437,10 +490,13 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
+int security_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ u32 __user *size, u32 flags);
+int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ u32 size, u32 flags);
+int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value);
-int security_setprocattr(const char *lsm, const char *name, void *value,
- size_t size);
+int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
@@ -451,6 +507,8 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
int security_locked_down(enum lockdown_reason what);
+int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len,
+ void *val, size_t val_len, u64 id, u64 flags);
#else /* CONFIG_SECURITY */
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -468,6 +526,11 @@ static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb)
return 0;
}
+static inline u64 lsm_name_to_attr(const char *name)
+{
+ return LSM_ATTR_UNDEF;
+}
+
static inline void security_free_mnt_opts(void **mnt_opts)
{
}
@@ -487,26 +550,26 @@ static inline int early_security_init(void)
return 0;
}
-static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+static inline int security_binder_set_context_mgr(const struct cred *mgr)
{
return 0;
}
-static inline int security_binder_transaction(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transaction(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to,
- struct file *file)
+static inline int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to,
+ const struct file *file)
{
return 0;
}
@@ -522,7 +585,7 @@ static inline int security_ptrace_traceme(struct task_struct *parent)
return cap_ptrace_traceme(parent);
}
-static inline int security_capget(struct task_struct *target,
+static inline int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
@@ -548,7 +611,7 @@ static inline int security_capable(const struct cred *cred,
}
static inline int security_quotactl(int cmds, int type, int id,
- struct super_block *sb)
+ const struct super_block *sb)
{
return 0;
}
@@ -580,7 +643,7 @@ static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
}
static inline int security_bprm_creds_from_file(struct linux_binprm *bprm,
- struct file *file)
+ const struct file *file)
{
return cap_bprm_creds_from_file(bprm, file);
}
@@ -590,14 +653,19 @@ static inline int security_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline void security_bprm_committing_creds(struct linux_binprm *bprm)
+static inline void security_bprm_committing_creds(const struct linux_binprm *bprm)
{
}
-static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
+static inline void security_bprm_committed_creds(const struct linux_binprm *bprm)
{
}
+static inline int security_fs_context_submount(struct fs_context *fc,
+ struct super_block *reference)
+{
+ return 0;
+}
static inline int security_fs_context_dup(struct fs_context *fc,
struct fs_context *src_fc)
{
@@ -614,6 +682,9 @@ static inline int security_sb_alloc(struct super_block *sb)
return 0;
}
+static inline void security_sb_delete(struct super_block *sb)
+{ }
+
static inline void security_sb_free(struct super_block *sb)
{ }
@@ -629,6 +700,13 @@ static inline int security_sb_remount(struct super_block *sb,
return 0;
}
+static inline int security_sb_mnt_opts_compat(struct super_block *sb,
+ void *mnt_opts)
+{
+ return 0;
+}
+
+
static inline int security_sb_kern_mount(struct super_block *sb)
{
return 0;
@@ -679,12 +757,6 @@ static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb,
return 0;
}
-static inline int security_add_mnt_opt(const char *option, const char *val,
- int len, void **mnt_opts)
-{
- return 0;
-}
-
static inline int security_move_mount(const struct path *from_path,
const struct path *to_path)
{
@@ -708,6 +780,7 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_dentry_init_security(struct dentry *dentry,
int mode,
const struct qstr *name,
+ const char **xattr_name,
void **ctx,
u32 *ctxlen)
{
@@ -732,13 +805,11 @@ static inline int security_inode_init_security(struct inode *inode,
return 0;
}
-static inline int security_old_inode_init_security(struct inode *inode,
- struct inode *dir,
- const struct qstr *qstr,
- const char **name,
- void **value, size_t *len)
+static inline int security_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode)
{
- return -EOPNOTSUPP;
+ return 0;
}
static inline int security_inode_create(struct inode *dir,
@@ -748,6 +819,10 @@ static inline int security_inode_create(struct inode *dir,
return 0;
}
+static inline void
+security_inode_post_create_tmpfile(struct mnt_idmap *idmap, struct inode *inode)
+{ }
+
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
@@ -814,23 +889,62 @@ static inline int security_inode_permission(struct inode *inode, int mask)
return 0;
}
-static inline int security_inode_setattr(struct dentry *dentry,
- struct iattr *attr)
+static inline int security_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ struct iattr *attr)
{
return 0;
}
+static inline void
+security_inode_post_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ int ia_valid)
+{ }
+
static inline int security_inode_getattr(const struct path *path)
{
return 0;
}
-static inline int security_inode_setxattr(struct dentry *dentry,
- const char *name, const void *value, size_t size, int flags)
+static inline int security_inode_setxattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
{
return cap_inode_setxattr(dentry, name, value, size, flags);
}
+static inline int security_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return 0;
+}
+
+static inline void security_inode_post_set_acl(struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{ }
+
+static inline int security_inode_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
+static inline int security_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
+static inline void security_inode_post_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{ }
+
static inline void security_inode_post_setxattr(struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
{ }
@@ -846,25 +960,34 @@ static inline int security_inode_listxattr(struct dentry *dentry)
return 0;
}
-static inline int security_inode_removexattr(struct dentry *dentry,
- const char *name)
+static inline int security_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *name)
{
- return cap_inode_removexattr(dentry, name);
+ return cap_inode_removexattr(idmap, dentry, name);
}
+static inline void security_inode_post_removexattr(struct dentry *dentry,
+ const char *name)
+{ }
+
static inline int security_inode_need_killpriv(struct dentry *dentry)
{
return cap_inode_need_killpriv(dentry);
}
-static inline int security_inode_killpriv(struct dentry *dentry)
+static inline int security_inode_killpriv(struct mnt_idmap *idmap,
+ struct dentry *dentry)
{
- return cap_inode_killpriv(dentry);
+ return cap_inode_killpriv(idmap, dentry);
}
-static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
+static inline int security_inode_getsecurity(struct mnt_idmap *idmap,
+ struct inode *inode,
+ const char *name, void **buffer,
+ bool alloc)
{
- return -EOPNOTSUPP;
+ return cap_inode_getsecurity(idmap, inode, name, buffer, alloc);
}
static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
@@ -908,6 +1031,9 @@ static inline int security_file_alloc(struct file *file)
return 0;
}
+static inline void security_file_release(struct file *file)
+{ }
+
static inline void security_file_free(struct file *file)
{ }
@@ -917,6 +1043,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
return 0;
}
+static inline int security_file_ioctl_compat(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
static inline int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
@@ -968,6 +1101,16 @@ static inline int security_file_open(struct file *file)
return 0;
}
+static inline int security_file_post_open(struct file *file, int mask)
+{
+ return 0;
+}
+
+static inline int security_file_truncate(struct file *file)
+{
+ return 0;
+}
+
static inline int security_task_alloc(struct task_struct *task,
unsigned long clone_flags)
{
@@ -997,6 +1140,11 @@ static inline void security_transfer_creds(struct cred *new,
{
}
+static inline void security_cred_getsecid(const struct cred *c, u32 *secid)
+{
+ *secid = 0;
+}
+
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
@@ -1013,13 +1161,21 @@ static inline int security_kernel_module_request(char *kmod_name)
return 0;
}
-static inline int security_kernel_load_data(enum kernel_load_data_id id)
+static inline int security_kernel_load_data(enum kernel_load_data_id id, bool contents)
+{
+ return 0;
+}
+
+static inline int security_kernel_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id,
+ char *description)
{
return 0;
}
static inline int security_kernel_read_file(struct file *file,
- enum kernel_read_file_id id)
+ enum kernel_read_file_id id,
+ bool contents)
{
return 0;
}
@@ -1045,6 +1201,12 @@ static inline int security_task_fix_setgid(struct cred *new,
return 0;
}
+static inline int security_task_fix_setgroups(struct cred *new,
+ const struct cred *old)
+{
+ return 0;
+}
+
static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -1060,7 +1222,12 @@ static inline int security_task_getsid(struct task_struct *p)
return 0;
}
-static inline void security_task_getsecid(struct task_struct *p, u32 *secid)
+static inline void security_current_getsecid_subj(u32 *secid)
+{
+ *secid = 0;
+}
+
+static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
*secid = 0;
}
@@ -1127,6 +1294,11 @@ static inline int security_task_prctl(int option, unsigned long arg2,
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
{ }
+static inline int security_create_user_ns(const struct cred *cred)
+{
+ return 0;
+}
+
static inline int security_ipc_permission(struct kern_ipc_perm *ipcp,
short flag)
{
@@ -1233,14 +1405,28 @@ static inline void security_d_instantiate(struct dentry *dentry,
struct inode *inode)
{ }
-static inline int security_getprocattr(struct task_struct *p, const char *lsm,
- char *name, char **value)
+static inline int security_getselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t __user *size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_setselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_getprocattr(struct task_struct *p, int lsmid,
+ const char *name, char **value)
{
return -EINVAL;
}
-static inline int security_setprocattr(const char *lsm, char *name,
- void *value, size_t size)
+static inline int security_setprocattr(int lsmid, char *name, void *value,
+ size_t size)
{
return -EINVAL;
}
@@ -1291,6 +1477,12 @@ static inline int security_locked_down(enum lockdown_reason what)
{
return 0;
}
+static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx,
+ u32 *uctx_len, void *val, size_t val_len,
+ u64 id, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
@@ -1336,16 +1528,18 @@ int security_socket_getsockopt(struct socket *sock, int level, int optname);
int security_socket_setsockopt(struct socket *sock, int level, int optname);
int security_socket_shutdown(struct socket *sock, int how);
int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb);
-int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
- int __user *optlen, unsigned len);
+int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
+ sockptr_t optlen, unsigned int len);
int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid);
int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
void security_sk_free(struct sock *sk);
void security_sk_clone(const struct sock *sk, struct sock *newsk);
-void security_sk_classify_flow(struct sock *sk, struct flowi *fl);
-void security_req_classify_flow(const struct request_sock *req, struct flowi *fl);
+void security_sk_classify_flow(const struct sock *sk,
+ struct flowi_common *flic);
+void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic);
void security_sock_graft(struct sock*sk, struct socket *parent);
-int security_inet_conn_request(struct sock *sk,
+int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req);
void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req);
@@ -1360,11 +1554,14 @@ int security_tun_dev_create(void);
int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
-int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb);
+int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb);
int security_sctp_bind_connect(struct sock *sk, int optname,
struct sockaddr *address, int addrlen);
-void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
+void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk);
+int security_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb);
+int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk);
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
@@ -1470,8 +1667,10 @@ static inline int security_sock_rcv_skb(struct sock *sk,
return 0;
}
-static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
- int __user *optlen, unsigned len)
+static inline int security_socket_getpeersec_stream(struct socket *sock,
+ sockptr_t optval,
+ sockptr_t optlen,
+ unsigned int len)
{
return -ENOPROTOOPT;
}
@@ -1494,11 +1693,13 @@ static inline void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
}
-static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
+static inline void security_sk_classify_flow(const struct sock *sk,
+ struct flowi_common *flic)
{
}
-static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
+static inline void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic)
{
}
@@ -1506,7 +1707,7 @@ static inline void security_sock_graft(struct sock *sk, struct socket *parent)
{
}
-static inline int security_inet_conn_request(struct sock *sk,
+static inline int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req)
{
return 0;
@@ -1564,7 +1765,7 @@ static inline int security_tun_dev_open(void *security)
return 0;
}
-static inline int security_sctp_assoc_request(struct sctp_endpoint *ep,
+static inline int security_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
return 0;
@@ -1577,11 +1778,22 @@ static inline int security_sctp_bind_connect(struct sock *sk, int optname,
return 0;
}
-static inline void security_sctp_sk_clone(struct sctp_endpoint *ep,
+static inline void security_sctp_sk_clone(struct sctp_association *asoc,
struct sock *sk,
struct sock *newsk)
{
}
+
+static inline int security_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
@@ -1622,12 +1834,12 @@ int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid);
int security_xfrm_state_delete(struct xfrm_state *x);
void security_xfrm_state_free(struct xfrm_state *x);
-int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid);
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
- const struct flowi *fl);
+ const struct flowi_common *flic);
int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
-void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
+void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic);
#else /* CONFIG_SECURITY_NETWORK_XFRM */
@@ -1673,13 +1885,14 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x)
return 0;
}
-static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
return 0;
}
static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, const struct flowi *fl)
+ struct xfrm_policy *xp,
+ const struct flowi_common *flic)
{
return 1;
}
@@ -1689,7 +1902,8 @@ static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
return 0;
}
-static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
+static inline void security_skb_classify_flow(struct sk_buff *skb,
+ struct flowi_common *flic)
{
}
@@ -1701,6 +1915,7 @@ int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t m
int security_path_rmdir(const struct path *dir, struct dentry *dentry);
int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev);
+void security_path_post_mknod(struct mnt_idmap *idmap, struct dentry *dentry);
int security_path_truncate(const struct path *path);
int security_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name);
@@ -1735,6 +1950,10 @@ static inline int security_path_mknod(const struct path *dir, struct dentry *den
return 0;
}
+static inline void security_path_post_mknod(struct mnt_idmap *idmap,
+ struct dentry *dentry)
+{ }
+
static inline int security_path_truncate(const struct path *path)
{
return 0;
@@ -1786,6 +2005,9 @@ void security_key_free(struct key *key);
int security_key_permission(key_ref_t key_ref, const struct cred *cred,
enum key_need_perm need_perm);
int security_key_getsecurity(struct key *key, char **_buffer);
+void security_key_post_create_or_update(struct key *keyring, struct key *key,
+ const void *payload, size_t payload_len,
+ unsigned long flags, bool create);
#else
@@ -1813,6 +2035,14 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
return 0;
}
+static inline void security_key_post_create_or_update(struct key *keyring,
+ struct key *key,
+ const void *payload,
+ size_t payload_len,
+ unsigned long flags,
+ bool create)
+{ }
+
#endif
#endif /* CONFIG_KEYS */
@@ -1894,15 +2124,22 @@ static inline void securityfs_remove(struct dentry *dentry)
union bpf_attr;
struct bpf_map;
struct bpf_prog;
-struct bpf_prog_aux;
+struct bpf_token;
#ifdef CONFIG_SECURITY
extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size);
extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
extern int security_bpf_prog(struct bpf_prog *prog);
-extern int security_bpf_map_alloc(struct bpf_map *map);
+extern int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token);
extern void security_bpf_map_free(struct bpf_map *map);
-extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux);
-extern void security_bpf_prog_free(struct bpf_prog_aux *aux);
+extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token);
+extern void security_bpf_prog_free(struct bpf_prog *prog);
+extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path);
+extern void security_bpf_token_free(struct bpf_token *token);
+extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
+extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
#else
static inline int security_bpf(int cmd, union bpf_attr *attr,
unsigned int size)
@@ -1920,7 +2157,8 @@ static inline int security_bpf_prog(struct bpf_prog *prog)
return 0;
}
-static inline int security_bpf_map_alloc(struct bpf_map *map)
+static inline int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
{
return 0;
}
@@ -1928,13 +2166,33 @@ static inline int security_bpf_map_alloc(struct bpf_map *map)
static inline void security_bpf_map_free(struct bpf_map *map)
{ }
-static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
+static inline int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
{
return 0;
}
-static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
+static inline void security_bpf_prog_free(struct bpf_prog *prog)
{ }
+
+static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
+{
+ return 0;
+}
+
+static inline void security_bpf_token_free(struct bpf_token *token)
+{ }
+
+static inline int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
+{
+ return 0;
+}
+
+static inline int security_bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_BPF_SYSCALL */
@@ -1976,4 +2234,25 @@ static inline int security_perf_event_write(struct perf_event *event)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_PERF_EVENTS */
+#ifdef CONFIG_IO_URING
+#ifdef CONFIG_SECURITY
+extern int security_uring_override_creds(const struct cred *new);
+extern int security_uring_sqpoll(void);
+extern int security_uring_cmd(struct io_uring_cmd *ioucmd);
+#else
+static inline int security_uring_override_creds(const struct cred *new)
+{
+ return 0;
+}
+static inline int security_uring_sqpoll(void)
+{
+ return 0;
+}
+static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_IO_URING */
+
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/sed-opal-key.h b/include/linux/sed-opal-key.h
new file mode 100644
index 000000000000..0ca03054e8f6
--- /dev/null
+++ b/include/linux/sed-opal-key.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SED key operations.
+ *
+ * Copyright (C) 2023 IBM Corporation
+ *
+ * These are the accessor functions (read/write) for SED Opal
+ * keys. Specific keystores can provide overrides.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#ifdef CONFIG_PSERIES_PLPKS_SED
+int sed_read_key(char *keyname, char *key, u_int *keylen);
+int sed_write_key(char *keyname, char *key, u_int keylen);
+#else
+static inline
+int sed_read_key(char *keyname, char *key, u_int *keylen) {
+ return -EOPNOTSUPP;
+}
+static inline
+int sed_write_key(char *keyname, char *key, u_int keylen) {
+ return -EOPNOTSUPP;
+}
+#endif
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 1ac0d712a9c3..2ac50822554e 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -11,7 +11,8 @@
#define LINUX_OPAL_H
#include <uapi/linux/sed-opal.h>
-#include <linux/kernel.h>
+#include <linux/compiler_types.h>
+#include <linux/types.h>
struct opal_dev;
@@ -24,6 +25,9 @@ bool opal_unlock_from_suspend(struct opal_dev *dev);
struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv);
int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr);
+#define OPAL_AUTH_KEY "opal-boot-pin"
+#define OPAL_AUTH_KEY_PREV "opal-boot-pin-prev"
+
static inline bool is_sed_ioctl(unsigned int cmd)
{
switch (cmd) {
@@ -43,6 +47,11 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_MBR_DONE:
case IOC_OPAL_WRITE_SHADOW_MBR:
case IOC_OPAL_GENERIC_TABLE_RW:
+ case IOC_OPAL_GET_STATUS:
+ case IOC_OPAL_GET_LR_STATUS:
+ case IOC_OPAL_GET_GEOMETRY:
+ case IOC_OPAL_DISCOVERY:
+ case IOC_OPAL_REVERT_LSP:
return true;
}
return false;
diff --git a/include/linux/selection.h b/include/linux/selection.h
index 5b890ef5b59f..bab7d30d3446 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -14,17 +14,16 @@
struct tty_struct;
struct vc_data;
-extern void clear_selection(void);
-extern int set_selection_user(const struct tiocl_selection __user *sel,
- struct tty_struct *tty);
-extern int set_selection_kernel(struct tiocl_selection *v,
- struct tty_struct *tty);
-extern int paste_selection(struct tty_struct *tty);
-extern int sel_loadlut(char __user *p);
-extern int mouse_reporting(void);
-extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry);
-
-bool vc_is_sel(struct vc_data *vc);
+void clear_selection(void);
+int set_selection_user(const struct tiocl_selection __user *sel,
+ struct tty_struct *tty);
+int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty);
+int paste_selection(struct tty_struct *tty);
+int sel_loadlut(u32 __user *lut);
+int mouse_reporting(void);
+void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry);
+
+bool vc_is_sel(const struct vc_data *vc);
extern int console_blanked;
@@ -33,22 +32,21 @@ extern unsigned char default_red[];
extern unsigned char default_grn[];
extern unsigned char default_blu[];
-extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed);
-extern u16 screen_glyph(struct vc_data *vc, int offset);
-extern u32 screen_glyph_unicode(struct vc_data *vc, int offset);
-extern void complement_pos(struct vc_data *vc, int offset);
-extern void invert_screen(struct vc_data *vc, int offset, int count, int shift);
+unsigned short *screen_pos(const struct vc_data *vc, int w_offset, bool viewed);
+u16 screen_glyph(const struct vc_data *vc, int offset);
+u32 screen_glyph_unicode(const struct vc_data *vc, int offset);
+void complement_pos(struct vc_data *vc, int offset);
+void invert_screen(struct vc_data *vc, int offset, int count, bool viewed);
-extern void getconsxy(struct vc_data *vc, unsigned char *p);
-extern void putconsxy(struct vc_data *vc, unsigned char *p);
+void getconsxy(const struct vc_data *vc, unsigned char xy[static 2]);
+void putconsxy(struct vc_data *vc, unsigned char xy[static const 2]);
-extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org);
-extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org);
-extern void vcs_scr_updated(struct vc_data *vc);
+u16 vcs_scr_readw(const struct vc_data *vc, const u16 *org);
+void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org);
+void vcs_scr_updated(struct vc_data *vc);
-extern int vc_uniscr_check(struct vc_data *vc);
-extern void vc_uniscr_copy_line(struct vc_data *vc, void *dest, int viewed,
- unsigned int row, unsigned int col,
- unsigned int nr);
+int vc_uniscr_check(struct vc_data *vc);
+void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, bool viewed,
+ unsigned int row, unsigned int col, unsigned int nr);
#endif
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 5608a500c43e..c4deefe42aeb 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -3,25 +3,17 @@
#define _LINUX_SEM_H
#include <uapi/linux/sem.h>
+#include <linux/sem_types.h>
struct task_struct;
-struct sem_undo_list;
#ifdef CONFIG_SYSVIPC
-struct sysv_sem {
- struct sem_undo_list *undo_list;
-};
-
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_sem(struct task_struct *tsk);
#else
-struct sysv_sem {
- /* empty */
-};
-
static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
{
return 0;
diff --git a/include/linux/sem_types.h b/include/linux/sem_types.h
new file mode 100644
index 000000000000..73df1971a7ae
--- /dev/null
+++ b/include/linux/sem_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SEM_TYPES_H
+#define _LINUX_SEM_TYPES_H
+
+struct sem_undo_list;
+
+struct sysv_sem {
+#ifdef CONFIG_SYSVIPC
+ struct sem_undo_list *undo_list;
+#endif
+};
+
+#endif /* _LINUX_SEM_TYPES_H */
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 6694d0019a68..04655faadc2d 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -25,8 +25,14 @@ struct semaphore {
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
-#define DEFINE_SEMAPHORE(name) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+/*
+ * Unlike mutexes, binary semaphores do not have an owner, so up() can
+ * be called in a different thread from the one which called down().
+ * It is also safe to call down_trylock() and up() from interrupt
+ * context.
+ */
+#define DEFINE_SEMAPHORE(_name, _n) \
+ struct semaphore _name = __SEMAPHORE_INITIALIZER(_name, _n)
static inline void sema_init(struct semaphore *sem, int val)
{
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index fb0205d87d3c..fe41da005970 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -2,7 +2,10 @@
#ifndef _LINUX_SEQ_BUF_H
#define _LINUX_SEQ_BUF_H
-#include <linux/fs.h>
+#include <linux/bug.h>
+#include <linux/minmax.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
/*
* Trace sequences are used to allow a function to call several other functions
@@ -10,27 +13,32 @@
*/
/**
- * seq_buf - seq buffer structure
+ * struct seq_buf - seq buffer structure
* @buffer: pointer to the buffer
* @size: size of the buffer
* @len: the amount of data inside the buffer
- * @readpos: The next position to read in the buffer.
*/
struct seq_buf {
char *buffer;
size_t size;
size_t len;
- loff_t readpos;
};
+#define DECLARE_SEQ_BUF(NAME, SIZE) \
+ struct seq_buf NAME = { \
+ .buffer = (char[SIZE]) { 0 }, \
+ .size = SIZE, \
+ }
+
static inline void seq_buf_clear(struct seq_buf *s)
{
s->len = 0;
- s->readpos = 0;
+ if (s->size)
+ s->buffer[0] = '\0';
}
static inline void
-seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
+seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
{
s->buffer = buf;
s->size = size;
@@ -39,7 +47,7 @@ seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
/*
* seq_buf have a buffer that might overflow. When this happens
- * the len and size are set to be equal.
+ * len is set to be greater than size.
*/
static inline bool
seq_buf_has_overflowed(struct seq_buf *s)
@@ -72,11 +80,40 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
}
/**
+ * seq_buf_str - get NUL-terminated C string from seq_buf
+ * @s: the seq_buf handle
+ *
+ * This makes sure that the buffer in @s is NUL-terminated and
+ * safe to read as a string.
+ *
+ * Note, if this is called when the buffer has overflowed, then
+ * the last byte of the buffer is zeroed, and the len will still
+ * point passed it.
+ *
+ * After this function is called, s->buffer is safe to use
+ * in string operations.
+ *
+ * Returns: @s->buf after making sure it is terminated.
+ */
+static inline const char *seq_buf_str(struct seq_buf *s)
+{
+ if (WARN_ON(s->size == 0))
+ return "";
+
+ if (seq_buf_buffer_left(s))
+ s->buffer[s->len] = 0;
+ else
+ s->buffer[s->size - 1] = 0;
+
+ return s->buffer;
+}
+
+/**
* seq_buf_get_buf - get buffer to write arbitrary data to
* @s: the seq_buf handle
* @bufp: the beginning of the buffer is stored here
*
- * Return the number of bytes available in the buffer, or zero if
+ * Returns: the number of bytes available in the buffer, or zero if
* there's no space.
*/
static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
@@ -98,7 +135,7 @@ static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
* @num: the number of bytes to commit
*
* Commit @num bytes of data written to a buffer previously acquired
- * by seq_buf_get. To signal an error condition, or that the data
+ * by seq_buf_get_buf(). To signal an error condition, or that the data
* didn't fit in the available space, pass a negative @num value.
*/
static inline void seq_buf_commit(struct seq_buf *s, int num)
@@ -118,7 +155,7 @@ extern __printf(2, 0)
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
- int cnt);
+ size_t start, int cnt);
extern int seq_buf_puts(struct seq_buf *s, const char *str);
extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
@@ -134,4 +171,6 @@ extern int
seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
#endif
+void seq_buf_do_printk(struct seq_buf *s, const char *lvl);
+
#endif /* _LINUX_SEQ_BUF_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 813614d4b71f..234bcdb1fba4 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/cpumask.h>
@@ -107,6 +108,7 @@ void seq_pad(struct seq_file *m, char c);
char *mangle_path(char *s, const char *p, const char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
+ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
int seq_write(struct seq_file *seq, const void *data, size_t len);
@@ -125,8 +127,30 @@ void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num
void seq_put_hex_ll(struct seq_file *m, const char *delimiter,
unsigned long long v, unsigned int width);
-void seq_escape(struct seq_file *m, const char *s, const char *esc);
-void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz);
+void seq_escape_mem(struct seq_file *m, const char *src, size_t len,
+ unsigned int flags, const char *esc);
+
+static inline void seq_escape_str(struct seq_file *m, const char *src,
+ unsigned int flags, const char *esc)
+{
+ seq_escape_mem(m, src, strlen(src), flags, esc);
+}
+
+/**
+ * seq_escape - print string into buffer, escaping some characters
+ * @m: target buffer
+ * @s: NULL-terminated string
+ * @esc: set of characters that need escaping
+ *
+ * Puts string into buffer, replacing each occurrence of character from
+ * @esc with usual octal escape.
+ *
+ * Use seq_has_overflowed() to check for errors.
+ */
+static inline void seq_escape(struct seq_file *m, const char *s, const char *esc)
+{
+ seq_escape_str(m, s, ESCAPE_OCTAL, esc);
+}
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, const void *buf, size_t len,
@@ -138,6 +162,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *);
int seq_path_root(struct seq_file *m, const struct path *path,
const struct path *root, const char *esc);
+void *single_start(struct seq_file *, loff_t *);
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
int single_release(struct inode *, struct file *);
@@ -145,6 +170,10 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
+#ifdef CONFIG_BINARY_PRINTF
+void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
+#endif
+
#define DEFINE_SEQ_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
@@ -178,12 +207,27 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
-#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
+#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __name ## _show, inode->i_private); \
} \
\
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, pde_data(inode)); \
+} \
+ \
static const struct proc_ops __name ## _proc_ops = { \
.proc_open = __name ## _open, \
.proc_read = seq_read, \
@@ -220,18 +264,19 @@ static inline void seq_show_option(struct seq_file *m, const char *name,
/**
* seq_show_option_n - display mount options with appropriate escapes
- * where @value must be a specific length.
+ * where @value must be a specific length (i.e.
+ * not NUL-terminated).
* @m: the seq_file handle
* @name: the mount option name
* @value: the mount option name's value, cannot be NULL
- * @length: the length of @value to display
+ * @length: the exact length of @value to display, must be constant expression
*
* This is a macro since this uses "length" to define the size of the
* stack buffer.
*/
#define seq_show_option_n(m, name, value, length) { \
char val_buf[length + 1]; \
- strncpy(val_buf, value, length); \
+ memcpy(val_buf, value, length); \
val_buf[length] = '\0'; \
seq_show_option(m, name, val_buf); \
}
@@ -248,6 +293,10 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
extern struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos);
+extern struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_next_rcu(void *v, struct list_head *head, loff_t *ppos);
+
/*
* Helpers for iteration over hlist_head-s in seq_files
*/
diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h
index 0fdbe1ddd8d1..79638395bc32 100644
--- a/include/linux/seq_file_net.h
+++ b/include/linux/seq_file_net.h
@@ -3,13 +3,15 @@
#define __SEQ_FILE_NET_H__
#include <linux/seq_file.h>
+#include <net/net_trackers.h>
struct net;
extern struct net init_net;
struct seq_net_private {
#ifdef CONFIG_NET_NS
- struct net *net;
+ struct net *net;
+ netns_tracker ns_tracker;
#endif
};
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 962d9768945f..d90d8ee29d81 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -18,6 +18,7 @@
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/preempt.h>
+#include <linux/seqlock_types.h>
#include <linux/spinlock.h>
#include <asm/processor.h>
@@ -37,37 +38,6 @@
*/
#define KCSAN_SEQLOCK_REGION_MAX 1000
-/*
- * Sequence counters (seqcount_t)
- *
- * This is the raw counting mechanism, without any writer protection.
- *
- * Write side critical sections must be serialized and non-preemptible.
- *
- * If readers can be invoked from hardirq or softirq contexts,
- * interrupts or bottom halves must also be respectively disabled before
- * entering the write section.
- *
- * This mechanism can't be used if the protected data contains pointers,
- * as the writer can invalidate a pointer that a reader is following.
- *
- * If the write serialization mechanism is one of the common kernel
- * locking primitives, use a sequence counter with associated lock
- * (seqcount_LOCKTYPE_t) instead.
- *
- * If it's desired to automatically handle the sequence counter writer
- * serialization and non-preemptibility requirements, use a sequential
- * lock (seqlock_t) instead.
- *
- * See Documentation/locking/seqlock.rst
- */
-typedef struct seqcount {
- unsigned sequence;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} seqcount_t;
-
static inline void __seqcount_init(seqcount_t *s, const char *name,
struct lock_class_key *key)
{
@@ -117,7 +87,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
/*
- * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
+ * Sequence counters with associated locks (seqcount_LOCKNAME_t)
*
* A sequence counter which associates the lock used for writer
* serialization at initialization time. This enables lockdep to validate
@@ -131,128 +101,170 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* See Documentation/locking/seqlock.rst
*/
-#ifdef CONFIG_LOCKDEP
-#define __SEQ_LOCK(expr) expr
-#else
-#define __SEQ_LOCK(expr)
-#endif
-
-/**
- * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
+/*
+ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
* @seqcount: The real sequence counter
- * @lock: Pointer to the associated spinlock
+ * @lock: Pointer to the associated lock
*
- * A plain sequence counter with external writer synchronization by a
- * spinlock. The spinlock is associated to the sequence count in the
+ * A plain sequence counter with external writer synchronization by
+ * LOCKNAME @lock. The lock is associated to the sequence counter in the
* static initializer or init function. This enables lockdep to validate
* that the write side critical section is properly serialized.
+ *
+ * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex
*/
-/**
+/*
* seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
* @s: Pointer to the seqcount_LOCKNAME_t instance
- * @lock: Pointer to the associated LOCKTYPE
+ * @lock: Pointer to the associated lock
*/
+#define seqcount_LOCKNAME_init(s, _lock, lockname) \
+ do { \
+ seqcount_##lockname##_t *____s = (s); \
+ seqcount_init(&____s->seqcount); \
+ __SEQ_LOCK(____s->lock = (_lock)); \
+ } while (0)
+
+#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
+#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
+#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
+#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
+
/*
- * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
- * @locktype: actual typename
- * @lockname: name
+ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
+ *
+ * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
+ * @locktype: LOCKNAME canonical C data type
* @preemptible: preemptibility of above locktype
- * @lockmember: argument for lockdep_assert_held()
+ * @lockbase: prefix for associated lock/unlock
*/
-#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
-typedef struct seqcount_##lockname { \
- seqcount_t seqcount; \
- __SEQ_LOCK(locktype *lock); \
-} seqcount_##lockname##_t; \
- \
-static __always_inline void \
-seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
+static __always_inline seqcount_t * \
+__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
- seqcount_init(&s->seqcount); \
- __SEQ_LOCK(s->lock = lock); \
+ return &s->seqcount; \
} \
\
-static __always_inline seqcount_t * \
-__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
+static __always_inline const seqcount_t * \
+__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
{ \
return &s->seqcount; \
} \
\
+static __always_inline unsigned \
+__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
+{ \
+ unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ return seq; \
+ \
+ if (preemptible && unlikely(seq & 1)) { \
+ __SEQ_LOCK(lockbase##_lock(s->lock)); \
+ __SEQ_LOCK(lockbase##_unlock(s->lock)); \
+ \
+ /* \
+ * Re-read the sequence counter since the (possibly \
+ * preempted) writer made progress. \
+ */ \
+ seq = READ_ONCE(s->seqcount.sequence); \
+ } \
+ \
+ return seq; \
+} \
+ \
static __always_inline bool \
-__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
{ \
- return preemptible; \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ return preemptible; \
+ \
+ /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
+ return false; \
} \
\
static __always_inline void \
-__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
{ \
- __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+ __SEQ_LOCK(lockdep_assert_held(s->lock)); \
}
/*
* __seqprop() for seqcount_t
*/
-static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
+static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
{
return s;
}
-static inline bool __seqcount_preemptible(seqcount_t *s)
+static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
+{
+ return s;
+}
+
+static inline unsigned __seqprop_sequence(const seqcount_t *s)
+{
+ return READ_ONCE(s->sequence);
+}
+
+static inline bool __seqprop_preemptible(const seqcount_t *s)
{
return false;
}
-static inline void __seqcount_assert(seqcount_t *s)
+static inline void __seqprop_assert(const seqcount_t *s)
{
lockdep_assert_preemption_disabled();
}
-SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
-SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
-SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
-SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
-SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
+#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
-/**
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
+#undef SEQCOUNT_LOCKNAME
+
+/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
* @name: Name of the seqcount_LOCKNAME_t instance
- * @lock: Pointer to the associated LOCKTYPE
+ * @lock: Pointer to the associated LOCKNAME
*/
-#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \
+#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
.seqcount = SEQCNT_ZERO(seq_name.seqcount), \
__SEQ_LOCK(.lock = (assoc_lock)) \
}
-#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-
+#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define __seqprop_case(s, lockname, prop) \
- seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop
#define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqcount_##prop((void *)(s)), \
+ seqcount_t: __seqprop_##prop, \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
- __seqprop_case((s), mutex, prop), \
- __seqprop_case((s), ww_mutex, prop))
+ __seqprop_case((s), mutex, prop))
-#define __seqcount_ptr(s) __seqprop(s, ptr)
-#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
-#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
+#define seqprop_ptr(s) __seqprop(s, ptr)(s)
+#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s)
+#define seqprop_sequence(s) __seqprop(s, sequence)(s)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
+#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -265,56 +277,45 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
- __read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret;
-
-repeat:
- ret = READ_ONCE(s->sequence);
- if (unlikely(ret & 1)) {
- cpu_relax();
- goto repeat;
- }
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned __seq; \
+ \
+ while ((__seq = seqprop_sequence(s)) & 1) \
+ cpu_relax(); \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ __seq; \
+})
/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount_begin(s) \
- raw_read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret = __read_seqcount_t_begin(s);
- smp_rmb();
- return ret;
-}
+({ \
+ unsigned _seq = __read_seqcount_begin(s); \
+ \
+ smp_rmb(); \
+ _seq; \
+})
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
#define read_seqcount_begin(s) \
- read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
-{
- seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_t_begin(s);
-}
+({ \
+ seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \
+ raw_read_seqcount_begin(s); \
+})
/**
* raw_read_seqcount() - read the raw seqcount_t counter value
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* raw_read_seqcount opens a read critical section of the given
* seqcount_t, without any lockdep checking, and without checking or
@@ -324,20 +325,18 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount(s) \
- raw_read_seqcount_t(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
-{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned __seq = seqprop_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ __seq; \
+})
/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
* lockdep and w/o counter stabilization
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* raw_seqcount_begin opens a read critical section of the given
* seqcount_t. Unlike read_seqcount_begin(), this function will not wait
@@ -352,20 +351,17 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_seqcount_begin(s) \
- raw_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
-{
- /*
- * If the counter is odd, let read_seqcount_retry() fail
- * by decrementing the counter.
- */
- return raw_read_seqcount_t(s) & ~1;
-}
+({ \
+ /* \
+ * If the counter is odd, let read_seqcount_retry() fail \
+ * by decrementing the counter. \
+ */ \
+ raw_read_seqcount(s) & ~1; \
+})
/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @start: count, from read_seqcount_begin()
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
@@ -379,9 +375,9 @@ static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
* Return: true if a read section retry is required, else false
*/
#define __read_seqcount_retry(s, start) \
- __read_seqcount_t_retry(__seqcount_ptr(s), start)
+ do___read_seqcount_retry(seqprop_const_ptr(s), start)
-static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
+static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
kcsan_atomic_next(0);
return unlikely(READ_ONCE(s->sequence) != start);
@@ -389,7 +385,7 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
/**
* read_seqcount_retry() - end a seqcount_t read critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @start: count, from read_seqcount_begin()
*
* read_seqcount_retry closes the read critical section of given
@@ -399,27 +395,29 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
* Return: true if a read section retry is required, else false
*/
#define read_seqcount_retry(s, start) \
- read_seqcount_t_retry(__seqcount_ptr(s), start)
+ do_read_seqcount_retry(seqprop_const_ptr(s), start)
-static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
+static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
- return __read_seqcount_t_retry(s, start);
+ return do___read_seqcount_retry(s, start);
}
/**
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Context: check write_seqcount_begin()
*/
#define raw_write_seqcount_begin(s) \
do { \
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_disable(); \
\
- raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
+ do_raw_write_seqcount_begin(seqprop_ptr(s)); \
} while (0)
-static inline void raw_write_seqcount_t_begin(seqcount_t *s)
+static inline void do_raw_write_seqcount_begin(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
@@ -428,17 +426,19 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
/**
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Context: check write_seqcount_end()
*/
#define raw_write_seqcount_end(s) \
do { \
- raw_write_seqcount_t_end(__seqcount_ptr(s)); \
+ do_raw_write_seqcount_end(seqprop_ptr(s)); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_enable(); \
} while (0)
-static inline void raw_write_seqcount_t_end(seqcount_t *s)
+static inline void do_raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
@@ -448,76 +448,77 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
/**
* write_seqcount_begin_nested() - start a seqcount_t write section with
* custom lockdep nesting level
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @subclass: lockdep nesting level
*
* See Documentation/locking/lockdep-design.rst
+ * Context: check write_seqcount_begin()
*/
#define write_seqcount_begin_nested(s, subclass) \
do { \
- __seqcount_assert_lock_held(s); \
+ seqprop_assert(s); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_disable(); \
\
- write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
+ do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
} while (0)
-static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
+static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- raw_write_seqcount_t_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+ do_raw_write_seqcount_begin(s);
}
/**
* write_seqcount_begin() - start a seqcount_t write side critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * write_seqcount_begin opens a write side critical section of the given
- * seqcount_t.
- *
- * Context: seqcount_t write side critical sections must be serialized and
- * non-preemptible. If readers can be invoked from hardirq or softirq
+ * Context: sequence counter write side sections must be serialized and
+ * non-preemptible. Preemption will be automatically disabled if and
+ * only if the seqcount write serialization lock is associated, and
+ * preemptible. If readers can be invoked from hardirq or softirq
* context, interrupts or bottom halves must be respectively disabled.
*/
#define write_seqcount_begin(s) \
do { \
- __seqcount_assert_lock_held(s); \
+ seqprop_assert(s); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_disable(); \
\
- write_seqcount_t_begin(__seqcount_ptr(s)); \
+ do_write_seqcount_begin(seqprop_ptr(s)); \
} while (0)
-static inline void write_seqcount_t_begin(seqcount_t *s)
+static inline void do_write_seqcount_begin(seqcount_t *s)
{
- write_seqcount_t_begin_nested(s, 0);
+ do_write_seqcount_begin_nested(s, 0);
}
/**
* write_seqcount_end() - end a seqcount_t write side critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * The write section must've been opened with write_seqcount_begin().
+ * Context: Preemption will be automatically re-enabled if and only if
+ * the seqcount write serialization lock is associated, and preemptible.
*/
#define write_seqcount_end(s) \
do { \
- write_seqcount_t_end(__seqcount_ptr(s)); \
+ do_write_seqcount_end(seqprop_ptr(s)); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_enable(); \
} while (0)
-static inline void write_seqcount_t_end(seqcount_t *s)
+static inline void do_write_seqcount_end(seqcount_t *s)
{
seqcount_release(&s->dep_map, _RET_IP_);
- raw_write_seqcount_t_end(s);
+ do_raw_write_seqcount_end(s);
}
/**
* raw_write_seqcount_barrier() - do a seqcount_t write barrier
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* This can be used to provide an ordering guarantee instead of the usual
* consistency guarantee. It is one wmb cheaper, because it can collapse
@@ -527,7 +528,7 @@ static inline void write_seqcount_t_end(seqcount_t *s)
* via WRITE_ONCE): a) to ensure the writes become visible to other threads
* atomically, avoiding compiler optimizations; b) to document which writes are
* meant to propagate to the reader critical section. This is necessary because
- * neither writes before and after the barrier are enclosed in a seq-writer
+ * neither writes before nor after the barrier are enclosed in a seq-writer
* critical section that would ensure readers are aware of ongoing writes::
*
* seqcount_t seq;
@@ -557,9 +558,9 @@ static inline void write_seqcount_t_end(seqcount_t *s)
* }
*/
#define raw_write_seqcount_barrier(s) \
- raw_write_seqcount_t_barrier(__seqcount_ptr(s))
+ do_raw_write_seqcount_barrier(seqprop_ptr(s))
-static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
+static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
@@ -571,15 +572,15 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
/**
* write_seqcount_invalidate() - invalidate in-progress seqcount_t read
* side operations
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* After write_seqcount_invalidate, no seqcount_t read side operations
* will complete successfully and see data older than this.
*/
#define write_seqcount_invalidate(s) \
- write_seqcount_t_invalidate(__seqcount_ptr(s))
+ do_write_seqcount_invalidate(seqprop_ptr(s))
-static inline void write_seqcount_t_invalidate(seqcount_t *s)
+static inline void do_write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
kcsan_nestable_atomic_begin();
@@ -587,34 +588,71 @@ static inline void write_seqcount_t_invalidate(seqcount_t *s)
kcsan_nestable_atomic_end();
}
-/**
- * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+/*
+ * Latch sequence counters (seqcount_latch_t)
+ *
+ * A sequence counter variant where the counter even/odd value is used to
+ * switch between two copies of protected data. This allows the read path,
+ * typically NMIs, to safely interrupt the write side critical section.
*
- * Use seqcount_t latching to switch between two storage places protected
- * by a sequence counter. Doing so allows having interruptible, preemptible,
- * seqcount_t write side critical sections.
+ * As the write sections are fully preemptible, no special handling for
+ * PREEMPT_RT is needed.
+ */
+typedef struct {
+ seqcount_t seqcount;
+} seqcount_latch_t;
+
+/**
+ * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
+ * @seq_name: Name of the seqcount_latch_t instance
+ */
+#define SEQCNT_LATCH_ZERO(seq_name) { \
+ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
+}
+
+/**
+ * seqcount_latch_init() - runtime initializer for seqcount_latch_t
+ * @s: Pointer to the seqcount_latch_t instance
+ */
+#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
+
+/**
+ * raw_read_seqcount_latch() - pick even/odd latch data copy
+ * @s: Pointer to seqcount_latch_t
*
- * Check raw_write_seqcount_latch() for more details and a full reader and
- * writer usage example.
+ * See raw_write_seqcount_latch() for details and a full reader/writer
+ * usage example.
*
* Return: sequence counter raw value. Use the lowest bit as an index for
- * picking which data copy to read. The full counter value must then be
- * checked with read_seqcount_retry().
+ * picking which data copy to read. The full counter must then be checked
+ * with raw_read_seqcount_latch_retry().
*/
-#define raw_read_seqcount_latch(s) \
- raw_read_seqcount_t_latch(__seqcount_ptr(s))
+static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
+{
+ /*
+ * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
+ * Due to the dependent load, a full smp_rmb() is not needed.
+ */
+ return READ_ONCE(s->seqcount.sequence);
+}
-static inline int raw_read_seqcount_t_latch(seqcount_t *s)
+/**
+ * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * @s: Pointer to seqcount_latch_t
+ * @start: count, from raw_read_seqcount_latch()
+ *
+ * Return: true if a read section retry is required, else false
+ */
+static __always_inline int
+raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
{
- /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
- int seq = READ_ONCE(s->sequence); /* ^^^ */
- return seq;
+ smp_rmb();
+ return unlikely(READ_ONCE(s->seqcount.sequence) != start);
}
/**
- * raw_write_seqcount_latch() - redirect readers to even/odd copy
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
+ * @s: Pointer to seqcount_latch_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@@ -633,7 +671,7 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s)
* The basic form is a data structure like::
*
* struct latch_struct {
- * seqcount_t seq;
+ * seqcount_latch_t seq;
* struct data_struct data[2];
* };
*
@@ -643,13 +681,13 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s)
* void latch_modify(struct latch_struct *latch, ...)
* {
* smp_wmb(); // Ensure that the last data[1] update is visible
- * latch->seq++;
+ * latch->seq.sequence++;
* smp_wmb(); // Ensure that the seqcount update is visible
*
* modify(latch->data[0], ...);
*
* smp_wmb(); // Ensure that the data[0] update is visible
- * latch->seq++;
+ * latch->seq.sequence++;
* smp_wmb(); // Ensure that the seqcount update is visible
*
* modify(latch->data[1], ...);
@@ -668,8 +706,8 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s)
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
*
- * // read_seqcount_retry() includes needed smp_rmb()
- * } while (read_seqcount_retry(&latch->seq, seq));
+ * // This includes needed smp_rmb()
+ * } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
*
* return entry;
* }
@@ -688,39 +726,21 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s)
* to miss an entire modification sequence, once it resumes it might
* observe the new entry.
*
- * NOTE:
+ * NOTE2:
*
* When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
-#define raw_write_seqcount_latch(s) \
- raw_write_seqcount_t_latch(__seqcount_ptr(s))
-
-static inline void raw_write_seqcount_t_latch(seqcount_t *s)
+static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
{
- smp_wmb(); /* prior stores before incrementing "sequence" */
- s->sequence++;
- smp_wmb(); /* increment "sequence" before following stores */
+ smp_wmb(); /* prior stores before incrementing "sequence" */
+ s->seqcount.sequence++;
+ smp_wmb(); /* increment "sequence" before following stores */
}
-/*
- * Sequential locks (seqlock_t)
- *
- * Sequence counters with an embedded spinlock for writer serialization
- * and non-preemptibility.
- *
- * For more info, see:
- * - Comments on top of seqcount_t
- * - Documentation/locking/seqlock.rst
- */
-typedef struct {
- struct seqcount seqcount;
- spinlock_t lock;
-} seqlock_t;
-
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
- .seqcount = SEQCNT_ZERO(lockname), \
+ .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
}
@@ -730,12 +750,12 @@ typedef struct {
*/
#define seqlock_init(sl) \
do { \
- seqcount_init(&(sl)->seqcount); \
spin_lock_init(&(sl)->lock); \
+ seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
} while (0)
/**
- * DEFINE_SEQLOCK() - Define a statically allocated seqlock_t
+ * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
* @sl: Name of the seqlock_t instance
*/
#define DEFINE_SEQLOCK(sl) \
@@ -778,6 +798,12 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
return read_seqcount_retry(&sl->seqcount, start);
}
+/*
+ * For all seqlock_t write side functions, use the internal
+ * do_write_seqcount_begin() instead of generic write_seqcount_begin().
+ * This way, no redundant lockdep_assert_held() checks are added.
+ */
+
/**
* write_seqlock() - start a seqlock_t write side critical section
* @sl: Pointer to seqlock_t
@@ -794,7 +820,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
/**
@@ -806,7 +832,7 @@ static inline void write_seqlock(seqlock_t *sl)
*/
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
}
@@ -820,7 +846,7 @@ static inline void write_sequnlock(seqlock_t *sl)
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
/**
@@ -833,7 +859,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
}
@@ -847,7 +873,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
/**
@@ -859,7 +885,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -868,7 +894,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- write_seqcount_t_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
return flags;
}
@@ -897,7 +923,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_t_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
diff --git a/include/linux/seqlock_api.h b/include/linux/seqlock_api.h
new file mode 100644
index 000000000000..be91e7d3b826
--- /dev/null
+++ b/include/linux/seqlock_api.h
@@ -0,0 +1 @@
+#include <linux/seqlock.h>
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
new file mode 100644
index 000000000000..dfdf43e3fa3d
--- /dev/null
+++ b/include/linux/seqlock_types.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SEQLOCK_TYPES_H
+#define __LINUX_SEQLOCK_TYPES_H
+
+#include <linux/lockdep_types.h>
+#include <linux/mutex_types.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * Sequence counters (seqcount_t)
+ *
+ * This is the raw counting mechanism, without any writer protection.
+ *
+ * Write side critical sections must be serialized and non-preemptible.
+ *
+ * If readers can be invoked from hardirq or softirq contexts,
+ * interrupts or bottom halves must also be respectively disabled before
+ * entering the write section.
+ *
+ * This mechanism can't be used if the protected data contains pointers,
+ * as the writer can invalidate a pointer that a reader is following.
+ *
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKNAME_t) instead.
+ *
+ * If it's desired to automatically handle the sequence counter writer
+ * serialization and non-preemptibility requirements, use a sequential
+ * lock (seqlock_t) instead.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+typedef struct seqcount {
+ unsigned sequence;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} seqcount_t;
+
+/*
+ * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
+ * disable preemption. It can lead to higher latencies, and the write side
+ * sections will not be able to acquire locks which become sleeping locks
+ * (e.g. spinlock_t).
+ *
+ * To remain preemptible while avoiding a possible livelock caused by the
+ * reader preempting the writer, use a different technique: let the reader
+ * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
+ * case, acquire then release the associated LOCKNAME writer serialization
+ * lock. This will allow any possibly-preempted writer to make progress
+ * until the end of its writer serialization lock critical section.
+ *
+ * This lock-unlock technique must be implemented for all of PREEMPT_RT
+ * sleeping locks. See Documentation/locking/locktypes.rst
+ */
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
+#define __SEQ_LOCK(expr) expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
+typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+} seqcount_##lockname##_t;
+
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
+#undef SEQCOUNT_LOCKNAME
+
+/*
+ * Sequential locks (seqlock_t)
+ *
+ * Sequence counters with an embedded spinlock for writer serialization
+ * and non-preemptibility.
+ *
+ * For more info, see:
+ * - Comments on top of seqcount_t
+ * - Documentation/locking/seqlock.rst
+ */
+typedef struct {
+ /*
+ * Make sure that readers don't starve writers on PREEMPT_RT: use
+ * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
+ */
+ seqcount_spinlock_t seqcount;
+ spinlock_t lock;
+} seqlock_t;
+
+#endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
deleted file mode 100644
index 3cca2b8fac43..000000000000
--- a/include/linux/seqno-fence.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * seqno-fence, using a dma-buf to synchronize fencing
- *
- * Copyright (C) 2012 Texas Instruments
- * Copyright (C) 2012 Canonical Ltd
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- */
-
-#ifndef __LINUX_SEQNO_FENCE_H
-#define __LINUX_SEQNO_FENCE_H
-
-#include <linux/dma-fence.h>
-#include <linux/dma-buf.h>
-
-enum seqno_fence_condition {
- SEQNO_FENCE_WAIT_GEQUAL,
- SEQNO_FENCE_WAIT_NONZERO
-};
-
-struct seqno_fence {
- struct dma_fence base;
-
- const struct dma_fence_ops *ops;
- struct dma_buf *sync_buf;
- uint32_t seqno_ofs;
- enum seqno_fence_condition condition;
-};
-
-extern const struct dma_fence_ops seqno_fence_ops;
-
-/**
- * to_seqno_fence - cast a fence to a seqno_fence
- * @fence: fence to cast to a seqno_fence
- *
- * Returns NULL if the fence is not a seqno_fence,
- * or the seqno_fence otherwise.
- */
-static inline struct seqno_fence *
-to_seqno_fence(struct dma_fence *fence)
-{
- if (fence->ops != &seqno_fence_ops)
- return NULL;
- return container_of(fence, struct seqno_fence, base);
-}
-
-/**
- * seqno_fence_init - initialize a seqno fence
- * @fence: seqno_fence to initialize
- * @lock: pointer to spinlock to use for fence
- * @sync_buf: buffer containing the memory location to signal on
- * @context: the execution context this fence is a part of
- * @seqno_ofs: the offset within @sync_buf
- * @seqno: the sequence # to signal on
- * @cond: fence wait condition
- * @ops: the fence_ops for operations on this seqno fence
- *
- * This function initializes a struct seqno_fence with passed parameters,
- * and takes a reference on sync_buf which is released on fence destruction.
- *
- * A seqno_fence is a dma_fence which can complete in software when
- * enable_signaling is called, but it also completes when
- * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
- *
- * The seqno_fence will take a refcount on the sync_buf until it's
- * destroyed, but actual lifetime of sync_buf may be longer if one of the
- * callers take a reference to it.
- *
- * Certain hardware have instructions to insert this type of wait condition
- * in the command stream, so no intervention from software would be needed.
- * This type of fence can be destroyed before completed, however a reference
- * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
- * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
- * device's vm can be expensive.
- *
- * It is recommended for creators of seqno_fence to call dma_fence_signal()
- * before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check dma_fence_is_signaled()
- * before submitting instructions for the hardware to wait on the fence.
- * However, when ops.enable_signaling is not called, it doesn't have to be
- * done as soon as possible, just before there's any real danger of seqno
- * wraparound.
- */
-static inline void
-seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
- struct dma_buf *sync_buf, uint32_t context,
- uint32_t seqno_ofs, uint32_t seqno,
- enum seqno_fence_condition cond,
- const struct dma_fence_ops *ops)
-{
- BUG_ON(!fence || !sync_buf || !ops);
- BUG_ON(!ops->wait || !ops->enable_signaling ||
- !ops->get_driver_name || !ops->get_timeline_name);
-
- /*
- * ops is used in dma_fence_init for get_driver_name, so needs to be
- * initialized first
- */
- fence->ops = ops;
- dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
- get_dma_buf(sync_buf);
- fence->sync_buf = sync_buf;
- fence->seqno_ofs = seqno_ofs;
- fence->condition = cond;
-}
-
-#endif /* __LINUX_SEQNO_FENCE_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index 9f14f9c12ec4..ff78efc1f60d 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -7,6 +7,8 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/delay.h>
@@ -25,7 +27,7 @@ struct serdev_device;
* not sleep.
*/
struct serdev_device_ops {
- int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
+ size_t (*receive_buf)(struct serdev_device *, const u8 *, size_t);
void (*write_wakeup)(struct serdev_device *);
};
@@ -80,7 +82,7 @@ enum serdev_parity {
* serdev controller structures
*/
struct serdev_controller_ops {
- int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t);
+ ssize_t (*write_buf)(struct serdev_controller *, const u8 *, size_t);
void (*write_flush)(struct serdev_controller *);
int (*write_room)(struct serdev_controller *);
int (*open)(struct serdev_controller *);
@@ -91,17 +93,20 @@ struct serdev_controller_ops {
void (*wait_until_sent)(struct serdev_controller *, long);
int (*get_tiocm)(struct serdev_controller *);
int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int);
+ int (*break_ctl)(struct serdev_controller *ctrl, unsigned int break_state);
};
/**
* struct serdev_controller - interface to the serdev controller
* @dev: Driver model representation of the device.
+ * @host: Serial port hardware controller device
* @nr: number identifier for this controller/bus.
* @serdev: Pointer to slave device for this controller.
* @ops: Controller operations.
*/
struct serdev_controller {
struct device dev;
+ struct device *host;
unsigned int nr;
struct serdev_device *serdev;
const struct serdev_controller_ops *ops;
@@ -164,7 +169,9 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *);
int serdev_device_add(struct serdev_device *);
void serdev_device_remove(struct serdev_device *);
-struct serdev_controller *serdev_controller_alloc(struct device *, size_t);
+struct serdev_controller *serdev_controller_alloc(struct device *host,
+ struct device *parent,
+ size_t size);
int serdev_controller_add(struct serdev_controller *);
void serdev_controller_remove(struct serdev_controller *);
@@ -178,9 +185,9 @@ static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl
serdev->ops->write_wakeup(serdev);
}
-static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
- const unsigned char *data,
- size_t count)
+static inline size_t serdev_controller_receive_buf(struct serdev_controller *ctrl,
+ const u8 *data,
+ size_t count)
{
struct serdev_device *serdev = ctrl->serdev;
@@ -197,12 +204,13 @@ void serdev_device_close(struct serdev_device *);
int devm_serdev_device_open(struct device *, struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
-int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+int serdev_device_write_buf(struct serdev_device *, const u8 *, size_t);
void serdev_device_wait_until_sent(struct serdev_device *, long);
int serdev_device_get_tiocm(struct serdev_device *);
int serdev_device_set_tiocm(struct serdev_device *, int, int);
+int serdev_device_break_ctl(struct serdev_device *serdev, int break_state);
void serdev_device_write_wakeup(struct serdev_device *);
-int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long);
+ssize_t serdev_device_write(struct serdev_device *, const u8 *, size_t, long);
void serdev_device_write_flush(struct serdev_device *);
int serdev_device_write_room(struct serdev_device *);
@@ -240,7 +248,7 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
}
static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
static inline int serdev_device_write_buf(struct serdev_device *serdev,
- const unsigned char *buf,
+ const u8 *buf,
size_t count)
{
return -ENODEV;
@@ -248,14 +256,19 @@ static inline int serdev_device_write_buf(struct serdev_device *serdev,
static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
-static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
- size_t count, unsigned long timeout)
+static inline int serdev_device_break_ctl(struct serdev_device *serdev, int break_state)
+{
+ return -EOPNOTSUPP;
+}
+static inline ssize_t serdev_device_write(struct serdev_device *sdev,
+ const u8 *buf, size_t count,
+ unsigned long timeout)
{
return -ENODEV;
}
@@ -278,18 +291,10 @@ static inline bool serdev_device_get_cts(struct serdev_device *serdev)
static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms)
{
- unsigned long timeout;
bool signal;
- timeout = jiffies + msecs_to_jiffies(timeout_ms);
- while (time_is_after_jiffies(timeout)) {
- signal = serdev_device_get_cts(serdev);
- if (signal == state)
- return 0;
- usleep_range(1000, 2000);
- }
-
- return -ETIMEDOUT;
+ return readx_poll_timeout(serdev_device_get_cts, serdev, signal, signal == state,
+ 2000, timeout_ms * 1000);
}
static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable)
@@ -311,11 +316,13 @@ struct tty_driver;
#ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT
struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *host,
struct device *parent,
struct tty_driver *drv, int idx);
int serdev_tty_port_unregister(struct tty_port *port);
#else
static inline struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *host,
struct device *parent,
struct tty_driver *drv, int idx)
{
@@ -327,4 +334,18 @@ static inline int serdev_tty_port_unregister(struct tty_port *port)
}
#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
+struct acpi_resource;
+struct acpi_resource_uart_serialbus;
+
+#ifdef CONFIG_ACPI
+bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
+ struct acpi_resource_uart_serialbus **uart);
+#else
+static inline bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
+ struct acpi_resource_uart_serialbus **uart)
+{
+ return false;
+}
+#endif /* CONFIG_ACPI */
+
#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 0916107c77f9..bfda927dde15 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -9,9 +9,29 @@
#ifndef _LINUX_SERIAL_H
#define _LINUX_SERIAL_H
-#include <asm/page.h>
#include <uapi/linux/serial.h>
+#include <uapi/linux/serial_reg.h>
+#define UART_IER_ALL_INTR (UART_IER_MSI | \
+ UART_IER_RLSI | \
+ UART_IER_THRI | \
+ UART_IER_RDI)
+
+/* Helper for dealing with UART_LCR_WLEN* defines */
+#define UART_LCR_WLEN(x) ((x) - 5)
+
+/* FIFO and shifting register empty */
+#define UART_LSR_BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static inline bool uart_lsr_tx_empty(u16 lsr)
+{
+ return (lsr & UART_LSR_BOTH_EMPTY) == UART_LSR_BOTH_EMPTY;
+}
+
+#define UART_MSR_STATUS_BITS (UART_MSR_DCD | \
+ UART_MSR_RI | \
+ UART_MSR_DSR | \
+ UART_MSR_CTS)
/*
* Counters of the input lines (CTS, DSR, RI, CD) interrupts
@@ -23,11 +43,6 @@ struct async_icount {
__u32 buf_overrun;
};
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-#define SERIAL_XMIT_SIZE PAGE_SIZE
-
#include <linux/compiler.h>
#endif /* _LINUX_SERIAL_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 2b70f736b091..fd59ed2cca53 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -7,32 +7,52 @@
#ifndef _LINUX_SERIAL_8250_H
#define _LINUX_SERIAL_8250_H
+#include <linux/errno.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
+struct uart_8250_port;
+
/*
* This is the platform device platform_data structure
+ *
+ * @mapsize: Port size for ioremap()
+ * @bugs: Port bugs
+ *
+ * @dl_read: ``u32 ()(struct uart_8250_port *up)``
+ *
+ * UART divisor latch read.
+ *
+ * @dl_write: ``void ()(struct uart_8250_port *up, u32 value)``
+ *
+ * Write @value into UART divisor latch.
+ *
+ * Locking: Caller holds port's lock.
*/
struct plat_serial8250_port {
unsigned long iobase; /* io base address */
void __iomem *membase; /* ioremap cookie or NULL */
resource_size_t mapbase; /* resource base */
+ resource_size_t mapsize;
+ unsigned int uartclk; /* UART clock rate */
unsigned int irq; /* interrupt number */
unsigned long irqflags; /* request_irq flags */
- unsigned int uartclk; /* UART clock rate */
void *private_data;
unsigned char regshift; /* register shift */
unsigned char iotype; /* UPIO_* */
unsigned char hub6;
unsigned char has_sysrq; /* supports magic SysRq */
- upf_t flags; /* UPF_* flags */
unsigned int type; /* If UPF_FIXED_TYPE */
+ upf_t flags; /* UPF_* flags */
+ u16 bugs; /* port bugs */
unsigned int (*serial_in)(struct uart_port *, int);
void (*serial_out)(struct uart_port *, int, int);
+ u32 (*dl_read)(struct uart_8250_port *up);
+ void (*dl_write)(struct uart_8250_port *up, u32 value);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
@@ -74,6 +94,7 @@ struct uart_8250_port;
struct uart_8250_ops {
int (*setup_irq)(struct uart_8250_port *);
void (*release_irq)(struct uart_8250_port *);
+ void (*setup_timer)(struct uart_8250_port *);
};
struct uart_8250_em485 {
@@ -89,23 +110,29 @@ struct uart_8250_em485 {
* their own 8250 ports without registering their own
* platform device. Using these will make your driver
* dependent on the 8250 driver.
+ *
+ * @dl_read: ``u32 ()(struct uart_8250_port *port)``
+ *
+ * UART divisor latch read.
+ *
+ * @dl_write: ``void ()(struct uart_8250_port *port, u32 value)``
+ *
+ * Write @value into UART divisor latch.
+ *
+ * Locking: Caller holds port's lock.
*/
-
struct uart_8250_port {
struct uart_port port;
struct timer_list timer; /* "no irq" timer */
struct list_head list; /* ports on this IRQ */
u32 capabilities; /* port capabilities */
- unsigned short bugs; /* port bugs */
- bool fifo_bug; /* min RX trigger if enabled */
+ u16 bugs; /* port bugs */
unsigned int tx_loadsz; /* transmit fifo load size */
unsigned char acr;
unsigned char fcr;
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
unsigned char cur_iotype; /* Running I/O type */
unsigned int rpm_tx_active;
unsigned char canary; /* non-zero during system sleep
@@ -121,7 +148,8 @@ struct uart_8250_port {
* be immediately processed.
*/
#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
- unsigned char lsr_saved_flags;
+ u16 lsr_saved_flags;
+ u16 lsr_save_mask;
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
unsigned char msr_saved_flags;
@@ -129,8 +157,8 @@ struct uart_8250_port {
const struct uart_8250_ops *ops;
/* 8250 specific callbacks */
- int (*dl_read)(struct uart_8250_port *);
- void (*dl_write)(struct uart_8250_port *, int);
+ u32 (*dl_read)(struct uart_8250_port *up);
+ void (*dl_write)(struct uart_8250_port *up, u32 value);
struct uart_8250_em485 *em485;
void (*rs485_start_tx)(struct uart_8250_port *);
@@ -146,34 +174,30 @@ static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
return container_of(up, struct uart_8250_port, port);
}
-int serial8250_register_8250_port(struct uart_8250_port *);
+int serial8250_register_8250_port(const struct uart_8250_port *);
void serial8250_unregister_port(int line);
void serial8250_suspend_port(int line);
void serial8250_resume_port(int line);
-extern int early_serial_setup(struct uart_port *port);
-
-extern int early_serial8250_setup(struct earlycon_device *device,
- const char *options);
-extern void serial8250_update_uartclk(struct uart_port *port,
- unsigned int uartclk);
-extern void serial8250_do_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old);
-extern void serial8250_do_set_ldisc(struct uart_port *port,
- struct ktermios *termios);
-extern unsigned int serial8250_do_get_mctrl(struct uart_port *port);
-extern int serial8250_do_startup(struct uart_port *port);
-extern void serial8250_do_shutdown(struct uart_port *port);
-extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate);
-extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
-extern void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot,
- unsigned int quot_frac);
-extern int fsl8250_handle_irq(struct uart_port *port);
+int early_serial_setup(struct uart_port *port);
+int early_serial8250_setup(struct earlycon_device *device, const char *options);
+
+void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk);
+void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ const struct ktermios *old);
+void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios);
+unsigned int serial8250_do_get_mctrl(struct uart_port *port);
+int serial8250_do_startup(struct uart_port *port);
+void serial8250_do_shutdown(struct uart_port *port);
+void serial8250_do_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate);
+void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
+void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac);
+int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
-unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
-void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr);
+u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
+void serial8250_read_char(struct uart_8250_port *up, u16 lsr);
void serial8250_tx_chars(struct uart_8250_port *up);
unsigned int serial8250_modem_status(struct uart_8250_port *up);
void serial8250_init_port(struct uart_8250_port *up);
@@ -183,8 +207,21 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
int serial8250_console_exit(struct uart_port *port);
-extern void serial8250_set_isa_configurator(void (*v)
- (int port, struct uart_port *up,
- u32 *capabilities));
+void serial8250_set_isa_configurator(void (*v)(int port, struct uart_port *up,
+ u32 *capabilities));
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+extern int hp300_setup_serial_console(void) __init;
+#else
+static inline int hp300_setup_serial_console(void) { return 0; }
+#endif
+
+#ifdef CONFIG_SERIAL_8250_RT288X
+int rt288x_setup(struct uart_port *p);
+int au_platform_setup(struct plat_serial8250_port *p);
+#else
+static inline int rt288x_setup(struct uart_port *p) { return -ENODEV; }
+static inline int au_platform_setup(struct plat_serial8250_port *p) { return -ENODEV; }
+#endif
#endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 01fc4d9c9c54..0a0f6e21d40e 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -28,12 +28,348 @@
struct uart_port;
struct serial_struct;
+struct serial_port_device;
struct device;
struct gpio_desc;
-/*
+/**
+ * struct uart_ops -- interface between serial_core and the driver
+ *
* This structure describes all the operations that can be done on the
- * physical hardware. See Documentation/driver-api/serial/driver.rst for details.
+ * physical hardware.
+ *
+ * @tx_empty: ``unsigned int ()(struct uart_port *port)``
+ *
+ * This function tests whether the transmitter fifo and shifter for the
+ * @port is empty. If it is empty, this function should return
+ * %TIOCSER_TEMT, otherwise return 0. If the port does not support this
+ * operation, then it should return %TIOCSER_TEMT.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_mctrl: ``void ()(struct uart_port *port, unsigned int mctrl)``
+ *
+ * This function sets the modem control lines for @port to the state
+ * described by @mctrl. The relevant bits of @mctrl are:
+ *
+ * - %TIOCM_RTS RTS signal.
+ * - %TIOCM_DTR DTR signal.
+ * - %TIOCM_OUT1 OUT1 signal.
+ * - %TIOCM_OUT2 OUT2 signal.
+ * - %TIOCM_LOOP Set the port into loopback mode.
+ *
+ * If the appropriate bit is set, the signal should be driven
+ * active. If the bit is clear, the signal should be driven
+ * inactive.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @get_mctrl: ``unsigned int ()(struct uart_port *port)``
+ *
+ * Returns the current state of modem control inputs of @port. The state
+ * of the outputs should not be returned, since the core keeps track of
+ * their state. The state information should include:
+ *
+ * - %TIOCM_CAR state of DCD signal
+ * - %TIOCM_CTS state of CTS signal
+ * - %TIOCM_DSR state of DSR signal
+ * - %TIOCM_RI state of RI signal
+ *
+ * The bit is set if the signal is currently driven active. If
+ * the port does not support CTS, DCD or DSR, the driver should
+ * indicate that the signal is permanently active. If RI is
+ * not available, the signal should not be indicated as active.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @stop_tx: ``void ()(struct uart_port *port)``
+ *
+ * Stop transmitting characters. This might be due to the CTS line
+ * becoming inactive or the tty layer indicating we want to stop
+ * transmission due to an %XOFF character.
+ *
+ * The driver should stop transmitting characters as soon as possible.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @start_tx: ``void ()(struct uart_port *port)``
+ *
+ * Start transmitting characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @throttle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that input buffers for the line discipline are
+ * close to full, and it should somehow signal that no more characters
+ * should be sent to the serial port.
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @unthrottle() and termios modification by the
+ * tty layer.
+ *
+ * @unthrottle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that characters can now be sent to the serial
+ * port without fear of overrunning the input buffers of the line
+ * disciplines.
+ *
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @throttle() and termios modification by the
+ * tty layer.
+ *
+ * @send_xchar: ``void ()(struct uart_port *port, char ch)``
+ *
+ * Transmit a high priority character, even if the port is stopped. This
+ * is used to implement XON/XOFF flow control and tcflow(). If the serial
+ * driver does not implement this function, the tty core will append the
+ * character to the circular buffer and then call start_tx() / stop_tx()
+ * to flush the data out.
+ *
+ * Do not transmit if @ch == '\0' (%__DISABLED_CHAR).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @start_rx: ``void ()(struct uart_port *port)``
+ *
+ * Start receiving characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @stop_rx: ``void ()(struct uart_port *port)``
+ *
+ * Stop receiving characters; the @port is in the process of being closed.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @enable_ms: ``void ()(struct uart_port *port)``
+ *
+ * Enable the modem status interrupts.
+ *
+ * This method may be called multiple times. Modem status interrupts
+ * should be disabled when the @shutdown() method is called.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @break_ctl: ``void ()(struct uart_port *port, int ctl)``
+ *
+ * Control the transmission of a break signal. If @ctl is nonzero, the
+ * break signal should be transmitted. The signal should be terminated
+ * when another call is made with a zero @ctl.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @startup: ``int ()(struct uart_port *port)``
+ *
+ * Grab any interrupt resources and initialise any low level driver state.
+ * Enable the port for reception. It should not activate RTS nor DTR;
+ * this will be done via a separate call to @set_mctrl().
+ *
+ * This method will only be called when the port is initially opened.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: globally disabled.
+ *
+ * @shutdown: ``void ()(struct uart_port *port)``
+ *
+ * Disable the @port, disable any break condition that may be in effect,
+ * and free any interrupt resources. It should not disable RTS nor DTR;
+ * this will have already been done via a separate call to @set_mctrl().
+ *
+ * Drivers must not access @port->state once this call has completed.
+ *
+ * This method will only be called when there are no more users of this
+ * @port.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: caller dependent.
+ *
+ * @flush_buffer: ``void ()(struct uart_port *port)``
+ *
+ * Flush any write buffers, reset any DMA state and stop any ongoing DMA
+ * transfers.
+ *
+ * This will be called whenever the @port->state->xmit circular buffer is
+ * cleared.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @set_termios: ``void ()(struct uart_port *port, struct ktermios *new,
+ * struct ktermios *old)``
+ *
+ * Change the @port parameters, including word length, parity, stop bits.
+ * Update @port->read_status_mask and @port->ignore_status_mask to
+ * indicate the types of events we are interested in receiving. Relevant
+ * ktermios::c_cflag bits are:
+ *
+ * - %CSIZE - word size
+ * - %CSTOPB - 2 stop bits
+ * - %PARENB - parity enable
+ * - %PARODD - odd parity (when %PARENB is in force)
+ * - %ADDRB - address bit (changed through uart_port::rs485_config()).
+ * - %CREAD - enable reception of characters (if not set, still receive
+ * characters from the port, but throw them away).
+ * - %CRTSCTS - if set, enable CTS status change reporting.
+ * - %CLOCAL - if not set, enable modem status change reporting.
+ *
+ * Relevant ktermios::c_iflag bits are:
+ *
+ * - %INPCK - enable frame and parity error events to be passed to the TTY
+ * layer.
+ * - %BRKINT / %PARMRK - both of these enable break events to be passed to
+ * the TTY layer.
+ * - %IGNPAR - ignore parity and framing errors.
+ * - %IGNBRK - ignore break errors. If %IGNPAR is also set, ignore overrun
+ * errors as well.
+ *
+ * The interaction of the ktermios::c_iflag bits is as follows (parity
+ * error given as an example):
+ *
+ * ============ ======= ======= =========================================
+ * Parity error INPCK IGNPAR
+ * ============ ======= ======= =========================================
+ * n/a 0 n/a character received, marked as %TTY_NORMAL
+ * None 1 n/a character received, marked as %TTY_NORMAL
+ * Yes 1 0 character received, marked as %TTY_PARITY
+ * Yes 1 1 character discarded
+ * ============ ======= ======= =========================================
+ *
+ * Other flags may be used (eg, xon/xoff characters) if your hardware
+ * supports hardware "soft" flow control.
+ *
+ * Locking: caller holds tty_port->mutex
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_ldisc: ``void ()(struct uart_port *port, struct ktermios *termios)``
+ *
+ * Notifier for discipline change. See
+ * Documentation/driver-api/tty/tty_ldisc.rst.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @pm: ``void ()(struct uart_port *port, unsigned int state,
+ * unsigned int oldstate)``
+ *
+ * Perform any power management related activities on the specified @port.
+ * @state indicates the new state (defined by enum uart_pm_state),
+ * @oldstate indicates the previous state.
+ *
+ * This function should not be used to grab any resources.
+ *
+ * This will be called when the @port is initially opened and finally
+ * closed, except when the @port is also the system console. This will
+ * occur even if %CONFIG_PM is not set.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @type: ``const char *()(struct uart_port *port)``
+ *
+ * Return a pointer to a string constant describing the specified @port,
+ * or return %NULL, in which case the string 'unknown' is substituted.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @release_port: ``void ()(struct uart_port *port)``
+ *
+ * Release any memory and IO region resources currently in use by the
+ * @port.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @request_port: ``int ()(struct uart_port *port)``
+ *
+ * Request any memory and IO region resources required by the port. If any
+ * fail, no resources should be registered when this function returns, and
+ * it should return -%EBUSY on failure.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @config_port: ``void ()(struct uart_port *port, int type)``
+ *
+ * Perform any autoconfiguration steps required for the @port. @type
+ * contains a bit mask of the required configuration. %UART_CONFIG_TYPE
+ * indicates that the port requires detection and identification.
+ * @port->type should be set to the type found, or %PORT_UNKNOWN if no
+ * port was detected.
+ *
+ * %UART_CONFIG_IRQ indicates autoconfiguration of the interrupt signal,
+ * which should be probed using standard kernel autoprobing techniques.
+ * This is not necessary on platforms where ports have interrupts
+ * internally hard wired (eg, system on a chip implementations).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @verify_port: ``int ()(struct uart_port *port,
+ * struct serial_struct *serinfo)``
+ *
+ * Verify the new serial port information contained within @serinfo is
+ * suitable for this port type.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @ioctl: ``int ()(struct uart_port *port, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * Perform any port specific IOCTLs. IOCTL commands must be defined using
+ * the standard numbering system found in <asm/ioctl.h>.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @poll_init: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to perform the minimal hardware initialization needed to
+ * support @poll_put_char() and @poll_get_char(). Unlike @startup(), this
+ * should not request interrupts.
+ *
+ * Locking: %tty_mutex and tty_port->mutex taken.
+ * Interrupts: n/a.
+ *
+ * @poll_put_char: ``void ()(struct uart_port *port, unsigned char ch)``
+ *
+ * Called by kgdb to write a single character @ch directly to the serial
+ * @port. It can and should block until there is space in the TX FIFO.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @poll_get_char: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to read a single character directly from the serial
+ * port. If data is available, it should be returned; otherwise the
+ * function should return %NO_POLL_CHAR immediately.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
*/
struct uart_ops {
unsigned int (*tx_empty)(struct uart_port *);
@@ -45,32 +381,19 @@ struct uart_ops {
void (*unthrottle)(struct uart_port *);
void (*send_xchar)(struct uart_port *, char ch);
void (*stop_rx)(struct uart_port *);
+ void (*start_rx)(struct uart_port *);
void (*enable_ms)(struct uart_port *);
void (*break_ctl)(struct uart_port *, int ctl);
int (*startup)(struct uart_port *);
void (*shutdown)(struct uart_port *);
void (*flush_buffer)(struct uart_port *);
void (*set_termios)(struct uart_port *, struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *, struct ktermios *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
-
- /*
- * Return a string describing the type of the port
- */
const char *(*type)(struct uart_port *);
-
- /*
- * Release IO and memory resources used by the port.
- * This includes iounmap if necessary.
- */
void (*release_port)(struct uart_port *);
-
- /*
- * Request IO and memory resources used by the port.
- * This includes iomapping the port if necessary.
- */
int (*request_port)(struct uart_port *);
void (*config_port)(struct uart_port *, int);
int (*verify_port)(struct uart_port *, struct serial_struct *);
@@ -100,7 +423,7 @@ struct uart_icount {
__u32 buf_overrun;
};
-typedef unsigned int __bitwise upf_t;
+typedef u64 __bitwise upf_t;
typedef unsigned int __bitwise upstat_t;
struct uart_port {
@@ -111,7 +434,7 @@ struct uart_port {
void (*serial_out)(struct uart_port *, int, int);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
@@ -132,18 +455,22 @@ struct uart_port {
unsigned int old);
void (*handle_break)(struct uart_port *);
int (*rs485_config)(struct uart_port *,
+ struct ktermios *termios,
struct serial_rs485 *rs485);
int (*iso7816_config)(struct uart_port *,
struct serial_iso7816 *iso7816);
+ unsigned int ctrl_id; /* optional serial core controller id */
+ unsigned int port_id; /* optional serial core port id */
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
unsigned int fifosize; /* tx fifo size */
unsigned char x_char; /* xon/xoff char */
unsigned char regshift; /* reg offset shift */
+
unsigned char iotype; /* io access style */
- unsigned char quirks; /* internal quirks */
+#define UPIO_UNKNOWN ((unsigned char)~0U) /* UCHAR_MAX */
#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
#define UPIO_MEM (SERIAL_IO_MEM) /* driver-specific */
@@ -153,7 +480,9 @@ struct uart_port {
#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */
#define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */
- /* quirks must be updated while holding port mutex */
+ unsigned char quirks; /* internal quirks */
+
+ /* internal quirks must be updated while holding port mutex */
#define UPQ_NO_TXEN_TEST BIT(0)
unsigned int read_status_mask; /* driver specific */
@@ -171,7 +500,7 @@ struct uart_port {
* assigned from the serial_struct flags in uart_set_info()
* [for bit definitions in the UPF_CHANGE_MASK]
*
- * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
+ * Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable
* The remaining bits are serial-core specific and not modifiable by
* userspace.
*/
@@ -190,23 +519,24 @@ struct uart_port {
#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
-#define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19))
+#define UPF_NO_THRE_TEST ((__force upf_t) BIT_ULL(19))
/* Port has hardware-assisted h/w flow control */
-#define UPF_AUTO_CTS ((__force upf_t) (1 << 20))
-#define UPF_AUTO_RTS ((__force upf_t) (1 << 21))
+#define UPF_AUTO_CTS ((__force upf_t) BIT_ULL(20))
+#define UPF_AUTO_RTS ((__force upf_t) BIT_ULL(21))
#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
/* Port has hardware-assisted s/w flow control */
-#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22))
-#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
-#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
-#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
-#define UPF_BUG_THRE ((__force upf_t) (1 << 26))
+#define UPF_SOFT_FLOW ((__force upf_t) BIT_ULL(22))
+#define UPF_CONS_FLOW ((__force upf_t) BIT_ULL(23))
+#define UPF_SHARE_IRQ ((__force upf_t) BIT_ULL(24))
+#define UPF_EXAR_EFR ((__force upf_t) BIT_ULL(25))
+#define UPF_BUG_THRE ((__force upf_t) BIT_ULL(26))
/* The exact UART type is known and should not be probed. */
-#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
-#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
-#define UPF_FIXED_PORT ((__force upf_t) (1 << 29))
-#define UPF_DEAD ((__force upf_t) (1 << 30))
-#define UPF_IOREMAP ((__force upf_t) (1 << 31))
+#define UPF_FIXED_TYPE ((__force upf_t) BIT_ULL(27))
+#define UPF_BOOT_AUTOCONF ((__force upf_t) BIT_ULL(28))
+#define UPF_FIXED_PORT ((__force upf_t) BIT_ULL(29))
+#define UPF_DEAD ((__force upf_t) BIT_ULL(30))
+#define UPF_IOREMAP ((__force upf_t) BIT_ULL(31))
+#define UPF_FULL_PROBE ((__force upf_t) BIT_ULL(32))
#define __UPF_CHANGE_MASK 0x17fff
#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
@@ -229,9 +559,9 @@ struct uart_port {
#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4))
#define UPSTAT_SYNC_FIFO ((__force upstat_t) (1 << 5))
- int hw_stopped; /* sw-assisted CTS flow state */
+ bool hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
- unsigned int timeout; /* character-based timeout */
+ unsigned int frame_time; /* frame timing in ns */
unsigned int type; /* port type */
const struct uart_ops *ops;
unsigned int custom_divisor;
@@ -239,24 +569,107 @@ struct uart_port {
unsigned int minor;
resource_size_t mapbase; /* for ioremap */
resource_size_t mapsize;
- struct device *dev; /* parent device */
+ struct device *dev; /* serial port physical parent device */
+ struct serial_port_device *port_dev; /* serial core port device */
unsigned long sysrq; /* sysrq timeout */
- unsigned int sysrq_ch; /* char for sysrq */
+ u8 sysrq_ch; /* char for sysrq */
unsigned char has_sysrq;
unsigned char sysrq_seq; /* index in sysrq_toggle_seq */
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
+ unsigned char console_reinit;
const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
+ struct serial_rs485 rs485_supported; /* Supported mask for serial_rs485 */
struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */
+ struct gpio_desc *rs485_rx_during_tx_gpio; /* Output GPIO that sets the state of RS485 RX during TX */
struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};
+/**
+ * uart_port_lock - Lock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock(struct uart_port *up)
+{
+ spin_lock(&up->lock);
+}
+
+/**
+ * uart_port_lock_irq - Lock the UART port and disable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock_irq(struct uart_port *up)
+{
+ spin_lock_irq(&up->lock);
+}
+
+/**
+ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ */
+static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+}
+
+/**
+ * uart_port_trylock - Try to lock the UART port
+ * @up: Pointer to UART port structure
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock(struct uart_port *up)
+{
+ return spin_trylock(&up->lock);
+}
+
+/**
+ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ return spin_trylock_irqsave(&up->lock, *flags);
+}
+
+/**
+ * uart_port_unlock - Unlock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock(struct uart_port *up)
+{
+ spin_unlock(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock_irq(struct uart_port *up)
+{
+ spin_unlock_irq(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
+ * @up: Pointer to UART port structure
+ * @flags: The saved interrupt flags for restore
+ */
+static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
static inline int serial_port_in(struct uart_port *up, int offset)
{
return up->serial_in(up, offset);
@@ -299,6 +712,23 @@ struct uart_state {
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
+/**
+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
+ * @up: uart_port structure describing the port
+ * @chars: number of characters sent
+ *
+ * This function advances the tail of circular xmit buffer by the number of
+ * @chars transmitted and handles accounting of transmitted bytes (into
+ * @up's icount.tx).
+ */
+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
+{
+ struct circ_buf *xmit = &up->state->xmit;
+
+ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
+ up->icount.tx += chars;
+}
+
struct module;
struct tty_driver;
@@ -321,20 +751,136 @@ struct uart_driver {
void uart_write_wakeup(struct uart_port *port);
+/**
+ * enum UART_TX_FLAGS -- flags for uart_port_tx_flags()
+ *
+ * @UART_TX_NOSTOP: don't call port->ops->stop_tx() on empty buffer
+ */
+enum UART_TX_FLAGS {
+ UART_TX_NOSTOP = BIT(0),
+};
+
+#define __uart_port_tx(uport, ch, flags, tx_ready, put_char, tx_done, \
+ for_test, for_post) \
+({ \
+ struct uart_port *__port = (uport); \
+ struct circ_buf *xmit = &__port->state->xmit; \
+ unsigned int pending; \
+ \
+ for (; (for_test) && (tx_ready); (for_post), __port->icount.tx++) { \
+ if (__port->x_char) { \
+ (ch) = __port->x_char; \
+ (put_char); \
+ __port->x_char = 0; \
+ continue; \
+ } \
+ \
+ if (uart_circ_empty(xmit) || uart_tx_stopped(__port)) \
+ break; \
+ \
+ (ch) = xmit->buf[xmit->tail]; \
+ (put_char); \
+ xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; \
+ } \
+ \
+ (tx_done); \
+ \
+ pending = uart_circ_chars_pending(xmit); \
+ if (pending < WAKEUP_CHARS) { \
+ uart_write_wakeup(__port); \
+ \
+ if (!((flags) & UART_TX_NOSTOP) && pending == 0 && \
+ __port->ops->tx_empty(__port)) \
+ __port->ops->stop_tx(__port); \
+ } \
+ \
+ pending; \
+})
+
+/**
+ * uart_port_tx_limited -- transmit helper for uart_port with count limiting
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @count: a limit of characters to send
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ * @tx_done: function to call after the loop is done
+ *
+ * This helper transmits characters from the xmit buffer to the hardware using
+ * @put_char(). It does so until @count characters are sent and while @tx_ready
+ * evaluates to true.
+ *
+ * Returns: the number of characters in the xmit buffer when done.
+ *
+ * The expression in macro parameters shall be designed as follows:
+ * * **tx_ready:** should evaluate to true if the HW can accept more data to
+ * be sent. This parameter can be %true, which means the HW is always ready.
+ * * **put_char:** shall write @ch to the device of @port.
+ * * **tx_done:** when the write loop is done, this can perform arbitrary
+ * action before potential invocation of ops->stop_tx() happens. If the
+ * driver does not need to do anything, use e.g. ({}).
+ *
+ * For all of them, @port->lock is held, interrupts are locally disabled and
+ * the expressions must not sleep.
+ */
+#define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \
+ unsigned int __count = (count); \
+ __uart_port_tx(port, ch, 0, tx_ready, put_char, tx_done, __count, \
+ __count--); \
+})
+
+/**
+ * uart_port_tx -- transmit helper for uart_port
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx(port, ch, tx_ready, put_char) \
+ __uart_port_tx(port, ch, 0, tx_ready, put_char, ({}), true, ({}))
+
+
+/**
+ * uart_port_tx_flags -- transmit helper for uart_port with flags
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @flags: %UART_TX_NOSTOP or similar
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx_flags(port, ch, flags, tx_ready, put_char) \
+ __uart_port_tx(port, ch, flags, tx_ready, put_char, ({}), true, ({}))
/*
* Baud rate helpers.
*/
void uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud);
unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old, unsigned int min,
+ const struct ktermios *old, unsigned int min,
unsigned int max);
unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud);
+/*
+ * Calculates FIFO drain time.
+ */
+static inline unsigned long uart_fifo_timeout(struct uart_port *port)
+{
+ u64 fifo_timeout = (u64)READ_ONCE(port->frame_time) * port->fifosize;
+
+ /* Add .02 seconds of slop */
+ fifo_timeout += 20 * NSEC_PER_MSEC;
+
+ return max(nsecs_to_jiffies(fifo_timeout), 1UL);
+}
+
/* Base timer interval for polling */
-static inline int uart_poll_timeout(struct uart_port *port)
+static inline unsigned long uart_poll_timeout(struct uart_port *port)
{
- int timeout = port->timeout;
+ unsigned long timeout = uart_fifo_timeout(port);
return timeout > 6 ? (timeout / 2 - 2) : 1;
}
@@ -345,7 +891,7 @@ static inline int uart_poll_timeout(struct uart_port *port)
struct earlycon_device {
struct console *con;
struct uart_port port;
- char options[16]; /* e.g., 115200n8 */
+ char options[32]; /* e.g., 115200n8 */
unsigned int baud;
};
@@ -356,8 +902,8 @@ struct earlycon_id {
int (*setup)(struct earlycon_device *, const char *options);
};
-extern const struct earlycon_id *__earlycon_table[];
-extern const struct earlycon_id *__earlycon_table_end[];
+extern const struct earlycon_id __earlycon_table[];
+extern const struct earlycon_id __earlycon_table_end[];
#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
#define EARLYCON_USED_OR_UNUSED __used
@@ -365,25 +911,18 @@ extern const struct earlycon_id *__earlycon_table_end[];
#define EARLYCON_USED_OR_UNUSED __maybe_unused
#endif
-#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
- static const struct earlycon_id unique_id \
- EARLYCON_USED_OR_UNUSED __initconst \
+#define OF_EARLYCON_DECLARE(_name, compat, fn) \
+ static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
+ EARLYCON_USED_OR_UNUSED __section("__earlycon_table") \
+ __aligned(__alignof__(struct earlycon_id)) \
= { .name = __stringify(_name), \
.compatible = compat, \
- .setup = fn }; \
- static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
- __section(__earlycon_table) \
- * const __PASTE(__p, unique_id) = &unique_id
-
-#define OF_EARLYCON_DECLARE(_name, compat, fn) \
- _OF_EARLYCON_DECLARE(_name, compat, fn, \
- __UNIQUE_ID(__earlycon_##_name))
+ .setup = fn }
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
-extern int of_setup_earlycon(const struct earlycon_id *match,
- unsigned long node,
- const char *options);
+int of_setup_earlycon(const struct earlycon_id *match, unsigned long node,
+ const char *options);
#ifdef CONFIG_SERIAL_EARLYCON
extern bool earlycon_acpi_spcr_enable __initdata;
@@ -393,6 +932,17 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED;
static inline int setup_earlycon(char *buf) { return 0; }
#endif
+/* Variant of uart_console_registered() when the console_list_lock is held. */
+static inline bool uart_console_registered_locked(struct uart_port *port)
+{
+ return uart_console(port) && console_is_registered_locked(port->cons);
+}
+
+static inline bool uart_console_registered(struct uart_port *port)
+{
+ return uart_console(port) && console_is_registered(port->cons);
+}
+
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
@@ -404,7 +954,7 @@ int uart_set_options(struct uart_port *port, struct console *co, int baud,
struct tty_driver *uart_console_device(struct console *co, int *index);
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
- void (*putchar)(struct uart_port *, int));
+ void (*putchar)(struct uart_port *, unsigned char));
/*
* Port/driver registration/removal
@@ -412,8 +962,11 @@ void uart_console_write(struct uart_port *port, const char *s,
int uart_register_driver(struct uart_driver *uart);
void uart_unregister_driver(struct uart_driver *uart);
int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
-int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
-int uart_match_port(struct uart_port *port1, struct uart_port *port2);
+void uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
+int uart_read_port_properties(struct uart_port *port);
+int uart_read_and_validate_port_properties(struct uart_port *port);
+bool uart_match_port(const struct uart_port *port1,
+ const struct uart_port *port2);
/*
* Power Management
@@ -433,7 +986,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
static inline int uart_tx_stopped(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
- if ((tty && tty->stopped) || port->hw_stopped)
+ if ((tty && tty->flow.stopped) || port->hw_stopped)
return 1;
return 0;
}
@@ -454,20 +1007,20 @@ static inline bool uart_softcts_mode(struct uart_port *uport)
* The following are helper functions for the low level drivers.
*/
-extern void uart_handle_dcd_change(struct uart_port *uport,
- unsigned int status);
-extern void uart_handle_cts_change(struct uart_port *uport,
- unsigned int status);
+void uart_handle_dcd_change(struct uart_port *uport, bool active);
+void uart_handle_cts_change(struct uart_port *uport, bool active);
+
+void uart_insert_char(struct uart_port *port, unsigned int status,
+ unsigned int overrun, u8 ch, u8 flag);
-extern void uart_insert_char(struct uart_port *port, unsigned int status,
- unsigned int overrun, unsigned int ch, unsigned int flag);
+void uart_xchar_out(struct uart_port *uport, int offset);
#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
#define SYSRQ_TIMEOUT (HZ * 5)
-bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch);
+bool uart_try_toggle_sysrq(struct uart_port *port, u8 ch);
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -486,7 +1039,7 @@ static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -505,35 +1058,59 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int c
return 0;
}
-static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- int sysrq_ch;
+ u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock_irqrestore(&port->lock, irqflags);
+ uart_port_unlock(port);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock_irqrestore(&port->lock, irqflags);
+ uart_port_unlock(port);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
+
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+{
+ u8 sysrq_ch;
+
+ if (!port->has_sysrq) {
+ uart_port_unlock_irqrestore(port, flags);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ uart_port_unlock_irqrestore(port, flags);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
}
#else /* CONFIG_MAGIC_SYSRQ_SERIAL */
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
-static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
+{
+ uart_port_unlock(port);
+}
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
{
- spin_unlock_irqrestore(&port->lock, irqflags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
diff --git a/include/linux/serial_pnx8xxx.h b/include/linux/serial_pnx8xxx.h
deleted file mode 100644
index 619d748dcd44..000000000000
--- a/include/linux/serial_pnx8xxx.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Embedded Alley Solutions, source@embeddedalley.com.
- */
-
-#ifndef _LINUX_SERIAL_PNX8XXX_H
-#define _LINUX_SERIAL_PNX8XXX_H
-
-#include <linux/serial_core.h>
-
-#define PNX8XXX_NR_PORTS 2
-
-struct pnx8xxx_port {
- struct uart_port port;
- struct timer_list timer;
- unsigned int old_status;
-};
-
-/* register offsets */
-#define PNX8XXX_LCR 0
-#define PNX8XXX_MCR 0x004
-#define PNX8XXX_BAUD 0x008
-#define PNX8XXX_CFG 0x00c
-#define PNX8XXX_FIFO 0x028
-#define PNX8XXX_ISTAT 0xfe0
-#define PNX8XXX_IEN 0xfe4
-#define PNX8XXX_ICLR 0xfe8
-#define PNX8XXX_ISET 0xfec
-#define PNX8XXX_PD 0xff4
-#define PNX8XXX_MID 0xffc
-
-#define PNX8XXX_UART_LCR_TXBREAK (1<<30)
-#define PNX8XXX_UART_LCR_PAREVN 0x10000000
-#define PNX8XXX_UART_LCR_PAREN 0x08000000
-#define PNX8XXX_UART_LCR_2STOPB 0x04000000
-#define PNX8XXX_UART_LCR_8BIT 0x01000000
-#define PNX8XXX_UART_LCR_TX_RST 0x00040000
-#define PNX8XXX_UART_LCR_RX_RST 0x00020000
-#define PNX8XXX_UART_LCR_RX_NEXT 0x00010000
-
-#define PNX8XXX_UART_MCR_SCR 0xFF000000
-#define PNX8XXX_UART_MCR_DCD 0x00800000
-#define PNX8XXX_UART_MCR_CTS 0x00100000
-#define PNX8XXX_UART_MCR_LOOP 0x00000010
-#define PNX8XXX_UART_MCR_RTS 0x00000002
-#define PNX8XXX_UART_MCR_DTR 0x00000001
-
-#define PNX8XXX_UART_INT_TX 0x00000080
-#define PNX8XXX_UART_INT_EMPTY 0x00000040
-#define PNX8XXX_UART_INT_RCVTO 0x00000020
-#define PNX8XXX_UART_INT_RX 0x00000010
-#define PNX8XXX_UART_INT_RXOVRN 0x00000008
-#define PNX8XXX_UART_INT_FRERR 0x00000004
-#define PNX8XXX_UART_INT_BREAK 0x00000002
-#define PNX8XXX_UART_INT_PARITY 0x00000001
-#define PNX8XXX_UART_INT_ALLRX 0x0000003F
-#define PNX8XXX_UART_INT_ALLTX 0x000000C0
-
-#define PNX8XXX_UART_FIFO_TXFIFO 0x001F0000
-#define PNX8XXX_UART_FIFO_TXFIFO_STA (0x1f<<16)
-#define PNX8XXX_UART_FIFO_RXBRK 0x00008000
-#define PNX8XXX_UART_FIFO_RXFE 0x00004000
-#define PNX8XXX_UART_FIFO_RXPAR 0x00002000
-#define PNX8XXX_UART_FIFO_RXFIFO 0x00001F00
-#define PNX8XXX_UART_FIFO_RBRTHR 0x000000FF
-
-#endif
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index 463ed28d2b27..1672cf0810ef 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -83,7 +83,7 @@
#define S3C2410_UCON_RXIRQMODE (1<<0)
#define S3C2410_UCON_RXFIFO_TOI (1<<7)
#define S3C2443_UCON_RXERR_IRQEN (1<<6)
-#define S3C2443_UCON_LOOPBACK (1<<5)
+#define S3C2410_UCON_LOOPBACK (1<<5)
#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -246,6 +246,25 @@
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
+#define APPLE_S5L_UCON_RXTO_ENA 9
+#define APPLE_S5L_UCON_RXTHRESH_ENA 12
+#define APPLE_S5L_UCON_TXTHRESH_ENA 13
+#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA)
+#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA)
+#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA)
+
+#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI)
+#define APPLE_S5L_UCON_MASK (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+ APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
+ APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
+
+#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4)
+#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5)
+#define APPLE_S5L_UTRSTAT_RXTO (1<<9)
+#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0)
+
#ifndef __ASSEMBLY__
#include <linux/serial_core.h>
@@ -254,7 +273,7 @@
* serial port
*
* the pointer is setup by the machine specific initialisation from the
- * arch/arm/mach-s3c2410/ directory.
+ * arch/arm/mach-s3c/ directory.
*/
struct s3c2410_uartcfg {
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 6c27d413da92..7ca41af93b37 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -15,7 +15,7 @@
#include <linux/mod_devicetable.h>
#include <uapi/linux/serio.h>
-extern struct bus_type serio_bus;
+extern const struct bus_type serio_bus;
struct serio {
void *port_data;
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 860e0f843c12..95ac8398ee72 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -14,6 +14,16 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
+#ifndef set_memory_rox
+static inline int set_memory_rox(unsigned long addr, int numpages)
+{
+ int ret = set_memory_ro(addr, numpages);
+ if (ret)
+ return ret;
+ return set_memory_x(addr, numpages);
+}
+#endif
+
#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
static inline int set_direct_map_invalid_noflush(struct page *page)
{
@@ -23,16 +33,33 @@ static inline int set_direct_map_default_noflush(struct page *page)
{
return 0;
}
-#endif
-#ifndef set_mce_nospec
-static inline int set_mce_nospec(unsigned long pfn, bool unmap)
+static inline bool kernel_page_present(struct page *page)
{
- return 0;
+ return true;
+}
+#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
+/*
+ * Some architectures, e.g. ARM64 can disable direct map modifications at
+ * boot time. Let them overrive this query.
+ */
+#ifndef can_set_direct_map
+static inline bool can_set_direct_map(void)
+{
+ return true;
}
+#define can_set_direct_map can_set_direct_map
#endif
+#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
-#ifndef clear_mce_nospec
+#ifdef CONFIG_X86_64
+int set_mce_nospec(unsigned long pfn);
+int clear_mce_nospec(unsigned long pfn);
+#else
+static inline int set_mce_nospec(unsigned long pfn)
+{
+ return 0;
+}
static inline int clear_mce_nospec(unsigned long pfn)
{
return 0;
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
deleted file mode 100644
index e0e1597ef9e6..000000000000
--- a/include/linux/sfi.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/* sfi.h Simple Firmware Interface */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#ifndef _LINUX_SFI_H
-#define _LINUX_SFI_H
-
-#include <linux/init.h>
-#include <linux/types.h>
-
-/* Table signatures reserved by the SFI specification */
-#define SFI_SIG_SYST "SYST"
-#define SFI_SIG_FREQ "FREQ"
-#define SFI_SIG_IDLE "IDLE"
-#define SFI_SIG_CPUS "CPUS"
-#define SFI_SIG_MTMR "MTMR"
-#define SFI_SIG_MRTC "MRTC"
-#define SFI_SIG_MMAP "MMAP"
-#define SFI_SIG_APIC "APIC"
-#define SFI_SIG_XSDT "XSDT"
-#define SFI_SIG_WAKE "WAKE"
-#define SFI_SIG_DEVS "DEVS"
-#define SFI_SIG_GPIO "GPIO"
-
-#define SFI_SIGNATURE_SIZE 4
-#define SFI_OEM_ID_SIZE 6
-#define SFI_OEM_TABLE_ID_SIZE 8
-
-#define SFI_NAME_LEN 16
-
-#define SFI_SYST_SEARCH_BEGIN 0x000E0000
-#define SFI_SYST_SEARCH_END 0x000FFFFF
-
-#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
- ((ptable->header.len - sizeof(struct sfi_table_header)) / \
- (sizeof(entry_type)))
-/*
- * Table structures must be byte-packed to match the SFI specification,
- * as they are provided by the BIOS.
- */
-struct sfi_table_header {
- char sig[SFI_SIGNATURE_SIZE];
- u32 len;
- u8 rev;
- u8 csum;
- char oem_id[SFI_OEM_ID_SIZE];
- char oem_table_id[SFI_OEM_TABLE_ID_SIZE];
-} __packed;
-
-struct sfi_table_simple {
- struct sfi_table_header header;
- u64 pentry[1];
-} __packed;
-
-/* Comply with UEFI spec 2.1 */
-struct sfi_mem_entry {
- u32 type;
- u64 phys_start;
- u64 virt_start;
- u64 pages;
- u64 attrib;
-} __packed;
-
-struct sfi_cpu_table_entry {
- u32 apic_id;
-} __packed;
-
-struct sfi_cstate_table_entry {
- u32 hint; /* MWAIT hint */
- u32 latency; /* latency in ms */
-} __packed;
-
-struct sfi_apic_table_entry {
- u64 phys_addr; /* phy base addr for APIC reg */
-} __packed;
-
-struct sfi_freq_table_entry {
- u32 freq_mhz; /* in MHZ */
- u32 latency; /* transition latency in ms */
- u32 ctrl_val; /* value to write to PERF_CTL */
-} __packed;
-
-struct sfi_wake_table_entry {
- u64 phys_addr; /* pointer to where the wake vector locates */
-} __packed;
-
-struct sfi_timer_table_entry {
- u64 phys_addr; /* phy base addr for the timer */
- u32 freq_hz; /* in HZ */
- u32 irq;
-} __packed;
-
-struct sfi_rtc_table_entry {
- u64 phys_addr; /* phy base addr for the RTC */
- u32 irq;
-} __packed;
-
-struct sfi_device_table_entry {
- u8 type; /* bus type, I2C, SPI or ...*/
-#define SFI_DEV_TYPE_SPI 0
-#define SFI_DEV_TYPE_I2C 1
-#define SFI_DEV_TYPE_UART 2
-#define SFI_DEV_TYPE_HSI 3
-#define SFI_DEV_TYPE_IPC 4
-#define SFI_DEV_TYPE_SD 5
-
- u8 host_num; /* attached to host 0, 1...*/
- u16 addr;
- u8 irq;
- u32 max_freq;
- char name[SFI_NAME_LEN];
-} __packed;
-
-struct sfi_gpio_table_entry {
- char controller_name[SFI_NAME_LEN];
- u16 pin_no;
- char pin_name[SFI_NAME_LEN];
-} __packed;
-
-typedef int (*sfi_table_handler) (struct sfi_table_header *table);
-
-#ifdef CONFIG_SFI
-extern void __init sfi_init(void);
-extern int __init sfi_platform_init(void);
-extern void __init sfi_init_late(void);
-extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
- sfi_table_handler handler);
-
-extern int sfi_disabled;
-static inline void disable_sfi(void)
-{
- sfi_disabled = 1;
-}
-
-#else /* !CONFIG_SFI */
-
-static inline void sfi_init(void)
-{
-}
-
-static inline void sfi_init_late(void)
-{
-}
-
-#define sfi_disabled 0
-
-static inline int sfi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- sfi_table_handler handler)
-{
- return -1;
-}
-
-#endif /* !CONFIG_SFI */
-
-#endif /*_LINUX_SFI_H*/
diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h
deleted file mode 100644
index a6e555cbe05c..000000000000
--- a/include/linux/sfi_acpi.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* sfi.h Simple Firmware Interface */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#ifndef _LINUX_SFI_ACPI_H
-#define _LINUX_SFI_ACPI_H
-
-#include <linux/acpi.h>
-#include <linux/sfi.h>
-
-#ifdef CONFIG_SFI
-extern int sfi_acpi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- int (*handler)(struct acpi_table_header *));
-
-static inline int __init acpi_sfi_table_parse(char *signature,
- int (*handler)(struct acpi_table_header *))
-{
- if (!acpi_table_parse(signature, handler))
- return 0;
-
- return sfi_acpi_table_parse(signature, NULL, NULL, handler);
-}
-#else /* !CONFIG_SFI */
-static inline int sfi_acpi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- int (*handler)(struct acpi_table_header *))
-{
- return -1;
-}
-
-static inline int __init acpi_sfi_table_parse(char *signature,
- int (*handler)(struct acpi_table_header *))
-{
- return acpi_table_parse(signature, handler);
-}
-#endif /* !CONFIG_SFI */
-
-#endif /*_LINUX_SFI_ACPI_H*/
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 38893e4dd0f0..9346cd44814d 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -332,39 +332,39 @@ enum {
/* SFP EEPROM registers */
enum {
- SFP_PHYS_ID = 0x00,
- SFP_PHYS_EXT_ID = 0x01,
- SFP_CONNECTOR = 0x02,
- SFP_COMPLIANCE = 0x03,
- SFP_ENCODING = 0x0b,
- SFP_BR_NOMINAL = 0x0c,
- SFP_RATE_ID = 0x0d,
- SFP_LINK_LEN_SM_KM = 0x0e,
- SFP_LINK_LEN_SM_100M = 0x0f,
- SFP_LINK_LEN_50UM_OM2_10M = 0x10,
- SFP_LINK_LEN_62_5UM_OM1_10M = 0x11,
- SFP_LINK_LEN_COPPER_1M = 0x12,
- SFP_LINK_LEN_50UM_OM4_10M = 0x12,
- SFP_LINK_LEN_50UM_OM3_10M = 0x13,
- SFP_VENDOR_NAME = 0x14,
- SFP_VENDOR_OUI = 0x25,
- SFP_VENDOR_PN = 0x28,
- SFP_VENDOR_REV = 0x38,
- SFP_OPTICAL_WAVELENGTH_MSB = 0x3c,
- SFP_OPTICAL_WAVELENGTH_LSB = 0x3d,
- SFP_CABLE_SPEC = 0x3c,
- SFP_CC_BASE = 0x3f,
- SFP_OPTIONS = 0x40, /* 2 bytes, MSB, LSB */
- SFP_BR_MAX = 0x42,
- SFP_BR_MIN = 0x43,
- SFP_VENDOR_SN = 0x44,
- SFP_DATECODE = 0x54,
- SFP_DIAGMON = 0x5c,
- SFP_ENHOPTS = 0x5d,
- SFP_SFF8472_COMPLIANCE = 0x5e,
- SFP_CC_EXT = 0x5f,
+ SFP_PHYS_ID = 0,
+ SFP_PHYS_EXT_ID = 1,
SFP_PHYS_EXT_ID_SFP = 0x04,
+
+ SFP_CONNECTOR = 2,
+ SFP_COMPLIANCE = 3,
+ SFP_ENCODING = 11,
+ SFP_BR_NOMINAL = 12,
+ SFP_RATE_ID = 13,
+ SFF_RID_8079 = 0x01,
+ SFF_RID_8431_RX_ONLY = 0x02,
+ SFF_RID_8431_TX_ONLY = 0x04,
+ SFF_RID_8431 = 0x06,
+ SFF_RID_10G8G = 0x0e,
+
+ SFP_LINK_LEN_SM_KM = 14,
+ SFP_LINK_LEN_SM_100M = 15,
+ SFP_LINK_LEN_50UM_OM2_10M = 16,
+ SFP_LINK_LEN_62_5UM_OM1_10M = 17,
+ SFP_LINK_LEN_COPPER_1M = 18,
+ SFP_LINK_LEN_50UM_OM4_10M = 18,
+ SFP_LINK_LEN_50UM_OM3_10M = 19,
+ SFP_VENDOR_NAME = 20,
+ SFP_VENDOR_OUI = 37,
+ SFP_VENDOR_PN = 40,
+ SFP_VENDOR_REV = 56,
+ SFP_OPTICAL_WAVELENGTH_MSB = 60,
+ SFP_OPTICAL_WAVELENGTH_LSB = 61,
+ SFP_CABLE_SPEC = 60,
+ SFP_CC_BASE = 63,
+
+ SFP_OPTIONS = 64, /* 2 bytes, MSB, LSB */
SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13),
SFP_OPTIONS_PAGING_A2 = BIT(12),
SFP_OPTIONS_RETIMER = BIT(11),
@@ -378,11 +378,20 @@ enum {
SFP_OPTIONS_TX_FAULT = BIT(3),
SFP_OPTIONS_LOS_INVERTED = BIT(2),
SFP_OPTIONS_LOS_NORMAL = BIT(1),
+
+ SFP_BR_MAX = 66,
+ SFP_BR_MIN = 67,
+ SFP_VENDOR_SN = 68,
+ SFP_DATECODE = 84,
+
+ SFP_DIAGMON = 92,
SFP_DIAGMON_DDM = BIT(6),
SFP_DIAGMON_INT_CAL = BIT(5),
SFP_DIAGMON_EXT_CAL = BIT(4),
SFP_DIAGMON_RXPWR_AVG = BIT(3),
SFP_DIAGMON_ADDRMODE = BIT(2),
+
+ SFP_ENHOPTS = 93,
SFP_ENHOPTS_ALARMWARN = BIT(7),
SFP_ENHOPTS_SOFT_TX_DISABLE = BIT(6),
SFP_ENHOPTS_SOFT_TX_FAULT = BIT(5),
@@ -390,6 +399,8 @@ enum {
SFP_ENHOPTS_SOFT_RATE_SELECT = BIT(3),
SFP_ENHOPTS_APP_SELECT_SFF8079 = BIT(2),
SFP_ENHOPTS_SOFT_RATE_SFF8431 = BIT(1),
+
+ SFP_SFF8472_COMPLIANCE = 94,
SFP_SFF8472_COMPLIANCE_NONE = 0x00,
SFP_SFF8472_COMPLIANCE_REV9_3 = 0x01,
SFP_SFF8472_COMPLIANCE_REV9_5 = 0x02,
@@ -399,68 +410,71 @@ enum {
SFP_SFF8472_COMPLIANCE_REV11_3 = 0x06,
SFP_SFF8472_COMPLIANCE_REV11_4 = 0x07,
SFP_SFF8472_COMPLIANCE_REV12_0 = 0x08,
+
+ SFP_CC_EXT = 95,
};
/* SFP Diagnostics */
enum {
/* Alarm and warnings stored MSB at lower address then LSB */
- SFP_TEMP_HIGH_ALARM = 0x00,
- SFP_TEMP_LOW_ALARM = 0x02,
- SFP_TEMP_HIGH_WARN = 0x04,
- SFP_TEMP_LOW_WARN = 0x06,
- SFP_VOLT_HIGH_ALARM = 0x08,
- SFP_VOLT_LOW_ALARM = 0x0a,
- SFP_VOLT_HIGH_WARN = 0x0c,
- SFP_VOLT_LOW_WARN = 0x0e,
- SFP_BIAS_HIGH_ALARM = 0x10,
- SFP_BIAS_LOW_ALARM = 0x12,
- SFP_BIAS_HIGH_WARN = 0x14,
- SFP_BIAS_LOW_WARN = 0x16,
- SFP_TXPWR_HIGH_ALARM = 0x18,
- SFP_TXPWR_LOW_ALARM = 0x1a,
- SFP_TXPWR_HIGH_WARN = 0x1c,
- SFP_TXPWR_LOW_WARN = 0x1e,
- SFP_RXPWR_HIGH_ALARM = 0x20,
- SFP_RXPWR_LOW_ALARM = 0x22,
- SFP_RXPWR_HIGH_WARN = 0x24,
- SFP_RXPWR_LOW_WARN = 0x26,
- SFP_LASER_TEMP_HIGH_ALARM = 0x28,
- SFP_LASER_TEMP_LOW_ALARM = 0x2a,
- SFP_LASER_TEMP_HIGH_WARN = 0x2c,
- SFP_LASER_TEMP_LOW_WARN = 0x2e,
- SFP_TEC_CUR_HIGH_ALARM = 0x30,
- SFP_TEC_CUR_LOW_ALARM = 0x32,
- SFP_TEC_CUR_HIGH_WARN = 0x34,
- SFP_TEC_CUR_LOW_WARN = 0x36,
- SFP_CAL_RXPWR4 = 0x38,
- SFP_CAL_RXPWR3 = 0x3c,
- SFP_CAL_RXPWR2 = 0x40,
- SFP_CAL_RXPWR1 = 0x44,
- SFP_CAL_RXPWR0 = 0x48,
- SFP_CAL_TXI_SLOPE = 0x4c,
- SFP_CAL_TXI_OFFSET = 0x4e,
- SFP_CAL_TXPWR_SLOPE = 0x50,
- SFP_CAL_TXPWR_OFFSET = 0x52,
- SFP_CAL_T_SLOPE = 0x54,
- SFP_CAL_T_OFFSET = 0x56,
- SFP_CAL_V_SLOPE = 0x58,
- SFP_CAL_V_OFFSET = 0x5a,
- SFP_CHKSUM = 0x5f,
-
- SFP_TEMP = 0x60,
- SFP_VCC = 0x62,
- SFP_TX_BIAS = 0x64,
- SFP_TX_POWER = 0x66,
- SFP_RX_POWER = 0x68,
- SFP_LASER_TEMP = 0x6a,
- SFP_TEC_CUR = 0x6c,
-
- SFP_STATUS = 0x6e,
+ SFP_TEMP_HIGH_ALARM = 0,
+ SFP_TEMP_LOW_ALARM = 2,
+ SFP_TEMP_HIGH_WARN = 4,
+ SFP_TEMP_LOW_WARN = 6,
+ SFP_VOLT_HIGH_ALARM = 8,
+ SFP_VOLT_LOW_ALARM = 10,
+ SFP_VOLT_HIGH_WARN = 12,
+ SFP_VOLT_LOW_WARN = 14,
+ SFP_BIAS_HIGH_ALARM = 16,
+ SFP_BIAS_LOW_ALARM = 18,
+ SFP_BIAS_HIGH_WARN = 20,
+ SFP_BIAS_LOW_WARN = 22,
+ SFP_TXPWR_HIGH_ALARM = 24,
+ SFP_TXPWR_LOW_ALARM = 26,
+ SFP_TXPWR_HIGH_WARN = 28,
+ SFP_TXPWR_LOW_WARN = 30,
+ SFP_RXPWR_HIGH_ALARM = 32,
+ SFP_RXPWR_LOW_ALARM = 34,
+ SFP_RXPWR_HIGH_WARN = 36,
+ SFP_RXPWR_LOW_WARN = 38,
+ SFP_LASER_TEMP_HIGH_ALARM = 40,
+ SFP_LASER_TEMP_LOW_ALARM = 42,
+ SFP_LASER_TEMP_HIGH_WARN = 44,
+ SFP_LASER_TEMP_LOW_WARN = 46,
+ SFP_TEC_CUR_HIGH_ALARM = 48,
+ SFP_TEC_CUR_LOW_ALARM = 50,
+ SFP_TEC_CUR_HIGH_WARN = 52,
+ SFP_TEC_CUR_LOW_WARN = 54,
+ SFP_CAL_RXPWR4 = 56,
+ SFP_CAL_RXPWR3 = 60,
+ SFP_CAL_RXPWR2 = 64,
+ SFP_CAL_RXPWR1 = 68,
+ SFP_CAL_RXPWR0 = 72,
+ SFP_CAL_TXI_SLOPE = 76,
+ SFP_CAL_TXI_OFFSET = 78,
+ SFP_CAL_TXPWR_SLOPE = 80,
+ SFP_CAL_TXPWR_OFFSET = 82,
+ SFP_CAL_T_SLOPE = 84,
+ SFP_CAL_T_OFFSET = 86,
+ SFP_CAL_V_SLOPE = 88,
+ SFP_CAL_V_OFFSET = 90,
+ SFP_CHKSUM = 95,
+
+ SFP_TEMP = 96,
+ SFP_VCC = 98,
+ SFP_TX_BIAS = 100,
+ SFP_TX_POWER = 102,
+ SFP_RX_POWER = 104,
+ SFP_LASER_TEMP = 106,
+ SFP_TEC_CUR = 108,
+
+ SFP_STATUS = 110,
SFP_STATUS_TX_DISABLE = BIT(7),
SFP_STATUS_TX_DISABLE_FORCE = BIT(6),
+ SFP_STATUS_RS0_SELECT = BIT(3),
SFP_STATUS_TX_FAULT = BIT(2),
SFP_STATUS_RX_LOS = BIT(1),
- SFP_ALARM0 = 0x70,
+ SFP_ALARM0 = 112,
SFP_ALARM0_TEMP_HIGH = BIT(7),
SFP_ALARM0_TEMP_LOW = BIT(6),
SFP_ALARM0_VCC_HIGH = BIT(5),
@@ -470,11 +484,11 @@ enum {
SFP_ALARM0_TXPWR_HIGH = BIT(1),
SFP_ALARM0_TXPWR_LOW = BIT(0),
- SFP_ALARM1 = 0x71,
+ SFP_ALARM1 = 113,
SFP_ALARM1_RXPWR_HIGH = BIT(7),
SFP_ALARM1_RXPWR_LOW = BIT(6),
- SFP_WARN0 = 0x74,
+ SFP_WARN0 = 116,
SFP_WARN0_TEMP_HIGH = BIT(7),
SFP_WARN0_TEMP_LOW = BIT(6),
SFP_WARN0_VCC_HIGH = BIT(5),
@@ -484,13 +498,16 @@ enum {
SFP_WARN0_TXPWR_HIGH = BIT(1),
SFP_WARN0_TXPWR_LOW = BIT(0),
- SFP_WARN1 = 0x75,
+ SFP_WARN1 = 117,
SFP_WARN1_RXPWR_HIGH = BIT(7),
SFP_WARN1_RXPWR_LOW = BIT(6),
- SFP_EXT_STATUS = 0x76,
- SFP_VSL = 0x78,
- SFP_PAGE = 0x7f,
+ SFP_EXT_STATUS = 118,
+ SFP_EXT_STATUS_RS1_SELECT = BIT(3),
+ SFP_EXT_STATUS_PWRLVL_SELECT = BIT(0),
+
+ SFP_VSL = 120,
+ SFP_PAGE = 127,
};
struct fwnode_handle;
@@ -535,17 +552,21 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support);
+ unsigned long *support, unsigned long *interfaces);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
unsigned long *link_modes);
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
u8 *data);
+int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
void sfp_upstream_start(struct sfp_bus *bus);
void sfp_upstream_stop(struct sfp_bus *bus);
+void sfp_upstream_set_signal_rate(struct sfp_bus *bus, unsigned int rate_kbd);
void sfp_bus_put(struct sfp_bus *bus);
-struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode);
+struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode);
int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
const struct sfp_upstream_ops *ops);
void sfp_bus_del_upstream(struct sfp_bus *bus);
@@ -565,7 +586,8 @@ static inline bool sfp_may_have_phy(struct sfp_bus *bus,
static inline void sfp_parse_support(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
- unsigned long *support)
+ unsigned long *support,
+ unsigned long *interfaces)
{
}
@@ -587,6 +609,13 @@ static inline int sfp_get_module_eeprom(struct sfp_bus *bus,
return -EOPNOTSUPP;
}
+static inline int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void sfp_upstream_start(struct sfp_bus *bus)
{
}
@@ -595,11 +624,17 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
{
}
+static inline void sfp_upstream_set_signal_rate(struct sfp_bus *bus,
+ unsigned int rate_kbd)
+{
+}
+
static inline void sfp_bus_put(struct sfp_bus *bus)
{
}
-static inline struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
+static inline struct sfp_bus *
+sfp_bus_find_fwnode(const struct fwnode_handle *fwnode)
{
return NULL;
}
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index c255273b0281..27ae79191bdc 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -13,9 +13,9 @@
/*
* Convert back and forth between INTEVT and IRQ values.
*/
-#ifdef CONFIG_CPU_HAS_INTEVT
-#define evt2irq(evt) (((evt) >> 5) - 16)
-#define irq2evt(irq) (((irq) + 16) << 5)
+#ifdef CONFIG_CPU_HAS_INTEVT /* Avoid IRQ0 (invalid for platform devices) */
+#define evt2irq(evt) ((evt) >> 5)
+#define irq2evt(irq) ((irq) << 5)
#else
#define evt2irq(evt) (evt)
#define irq2evt(irq) (irq)
@@ -97,7 +97,10 @@ struct intc_hw_desc {
unsigned int nr_subgroups;
};
-#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a)
+#define _INTC_SIZEOF_OR_ZERO(a) (_Generic(a, \
+ typeof(NULL): 0, \
+ default: sizeof(a)))
+#define _INTC_ARRAY(a) a, _INTC_SIZEOF_OR_ZERO(a)/sizeof(*a)
#define INTC_HW_DESC(vectors, groups, mask_regs, \
prio_regs, sense_regs, ack_regs) \
diff --git a/include/linux/shm.h b/include/linux/shm.h
index d8e69aed3d32..c55bef0538e5 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -2,12 +2,12 @@
#ifndef _LINUX_SHM_H_
#define _LINUX_SHM_H_
-#include <linux/list.h>
+#include <linux/types.h>
#include <asm/page.h>
-#include <uapi/linux/shm.h>
#include <asm/shmparam.h>
struct file;
+struct task_struct;
#ifdef CONFIG_SYSVIPC
struct sysv_shm {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a5a5d1d4d7b1..a4c15db2f5e5 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -9,40 +9,70 @@
#include <linux/percpu_counter.h>
#include <linux/xattr.h>
#include <linux/fs_parser.h>
+#include <linux/userfaultfd_k.h>
/* inode in-kernel data */
+#ifdef CONFIG_TMPFS_QUOTA
+#define SHMEM_MAXQUOTAS 2
+#endif
+
struct shmem_inode_info {
spinlock_t lock;
unsigned int seals; /* shmem seals */
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
unsigned long swapped; /* subtotal assigned to swap */
- struct list_head shrinklist; /* shrinkable hpage inodes */
- struct list_head swaplist; /* chain of maybes on swap */
+ union {
+ struct offset_ctx dir_offsets; /* stable directory offsets */
+ struct {
+ struct list_head shrinklist; /* shrinkable hpage inodes */
+ struct list_head swaplist; /* chain of maybes on swap */
+ };
+ };
+ struct timespec64 i_crtime; /* file creation time */
struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
+ pgoff_t fallocend; /* highest fallocate endindex */
+ unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
atomic_t stop_eviction; /* hold when working on inode */
+#ifdef CONFIG_TMPFS_QUOTA
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
+#endif
struct inode vfs_inode;
};
+#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
+#define SHMEM_FL_USER_MODIFIABLE \
+ (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
+#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
+
+struct shmem_quota_limits {
+ qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */
+ qsize_t usrquota_ihardlimit; /* Default user quota inode hard limit */
+ qsize_t grpquota_bhardlimit; /* Default group quota block hard limit */
+ qsize_t grpquota_ihardlimit; /* Default group quota inode hard limit */
+};
+
struct shmem_sb_info {
unsigned long max_blocks; /* How many blocks are allowed */
struct percpu_counter used_blocks; /* How many are allocated */
unsigned long max_inodes; /* How many inodes are allowed */
- unsigned long free_inodes; /* How many are left for allocation */
- spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
+ unsigned long free_ispace; /* How much ispace left for allocation */
+ raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
umode_t mode; /* Mount mode for root directory */
unsigned char huge; /* Whether to try for hugepages */
kuid_t uid; /* Mount uid for root directory */
kgid_t gid; /* Mount gid for root directory */
bool full_inums; /* If i_ino should be uint or ino_t */
+ bool noswap; /* ignores VM reclaim / swap requests */
ino_t next_ino; /* The next per-sb inode number to use */
ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */
struct mempolicy *mpol; /* default memory policy for mappings */
spinlock_t shrinklist_lock; /* Protects shrinklist */
struct list_head shrinklist; /* List of shinkable inodes */
unsigned long shrinklist_len; /* Length of shrinklist */
+ struct shmem_quota_limits qlimits; /* Default quota limits */
};
static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
@@ -54,7 +84,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
* Functions in mm/shmem.c called directly from elsewhere:
*/
extern const struct fs_parameter_spec shmem_fs_parameters[];
-extern int shmem_init(void);
+extern void shmem_init(void);
extern int shmem_init_fs_context(struct fs_context *fc);
extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
@@ -65,9 +95,9 @@ extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
extern int shmem_zero_setup(struct vm_area_struct *);
extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
-extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
-extern bool shmem_mapping(struct address_space *mapping);
+bool shmem_mapping(struct address_space *mapping);
#else
static inline bool shmem_mapping(struct address_space *mapping)
{
@@ -78,26 +108,40 @@ extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
-extern int shmem_unuse(unsigned int type, bool frontswap,
- unsigned long *fs_pages_to_unuse);
+int shmem_unuse(unsigned int type);
-extern bool shmem_huge_enabled(struct vm_area_struct *vma);
+extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+ struct mm_struct *mm, unsigned long vm_flags);
+#ifdef CONFIG_SHMEM
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
+#else
+static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma)
+{
+ return 0;
+}
+#endif
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-/* Flag allocation requirements to shmem_getpage */
+/* Flag allocation requirements to shmem_get_folio */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
+ SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
- SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
- SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-extern int shmem_getpage(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
+ enum sgp_type sgp);
+struct folio *shmem_read_folio_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp);
+
+static inline struct folio *shmem_read_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping));
+}
static inline struct page *shmem_read_mapping_page(
struct address_space *mapping, pgoff_t index)
@@ -115,24 +159,46 @@ static inline bool shmem_file(struct file *file)
return shmem_mapping(file->f_mapping);
}
+/*
+ * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages
+ * beyond i_size's notion of EOF, which fallocate has committed to reserving:
+ * which split_huge_page() must therefore not delete. This use of a single
+ * "fallocend" per inode errs on the side of not deleting a reservation when
+ * in doubt: there are plenty of cases when it preserves unreserved pages.
+ */
+static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof)
+{
+ return max(eof, SHMEM_I(inode)->fallocend);
+}
+
extern bool shmem_charge(struct inode *inode, long pages);
extern void shmem_uncharge(struct inode *inode, long pages);
+#ifdef CONFIG_USERFAULTFD
#ifdef CONFIG_SHMEM
-extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- struct page **pagep);
-extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
- pmd_t *dst_pmd,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr);
-#else
-#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
-#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
- dst_addr) ({ BUG(); 0; })
-#endif
+ uffd_flags_t flags,
+ struct folio **foliop);
+#else /* !CONFIG_SHMEM */
+#define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
+ src_addr, flags, foliop) ({ BUG(); 0; })
+#endif /* CONFIG_SHMEM */
+#endif /* CONFIG_USERFAULTFD */
+
+/*
+ * Used space is stored as unsigned 64-bit value in bytes but
+ * quota core supports only signed 64-bit values so use that
+ * as a limit
+ */
+#define SHMEM_QUOTA_MAX_SPC_LIMIT 0x7fffffffffffffffLL /* 2^63-1 */
+#define SHMEM_QUOTA_MAX_INO_LIMIT 0x7fffffffffffffffLL
+
+#ifdef CONFIG_TMPFS_QUOTA
+extern const struct dquot_operations shmem_quota_operations;
+extern struct quota_format_type shmem_quota_format;
+#endif /* CONFIG_TMPFS_QUOTA */
#endif
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 0f80123650e2..1a00be90d93a 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -2,9 +2,31 @@
#ifndef _LINUX_SHRINKER_H
#define _LINUX_SHRINKER_H
+#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+
+#define SHRINKER_UNIT_BITS BITS_PER_LONG
+
+/*
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to the memcg.
+ */
+struct shrinker_info_unit {
+ atomic_long_t nr_deferred[SHRINKER_UNIT_BITS];
+ DECLARE_BITMAP(map, SHRINKER_UNIT_BITS);
+};
+
+struct shrinker_info {
+ struct rcu_head rcu;
+ int map_nr_max;
+ struct shrinker_info_unit *unit[];
+};
+
/*
* This struct is used to pass information from page reclaim to the shrinkers.
- * We consolidate the values for easier extention later.
+ * We consolidate the values for easier extension later.
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
@@ -67,29 +89,72 @@ struct shrinker {
int seeks; /* seeks to recreate an obj */
unsigned flags;
+ /*
+ * The reference count of this shrinker. Registered shrinker have an
+ * initial refcount of 1, then the lookup operations are now allowed
+ * to use it via shrinker_try_get(). Later in the unregistration step,
+ * the initial refcount will be discarded, and will free the shrinker
+ * asynchronously via RCU after its refcount reaches 0.
+ */
+ refcount_t refcount;
+ struct completion done; /* use to wait for refcount to reach 0 */
+ struct rcu_head rcu;
+
+ void *private_data;
+
/* These are for internal use */
struct list_head list;
#ifdef CONFIG_MEMCG
/* ID in shrinker_idr */
int id;
#endif
+#ifdef CONFIG_SHRINKER_DEBUG
+ int debugfs_id;
+ const char *name;
+ struct dentry *debugfs_entry;
+#endif
/* objs pending delete, per node */
atomic_long_t *nr_deferred;
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
-/* Flags */
-#define SHRINKER_NUMA_AWARE (1 << 0)
-#define SHRINKER_MEMCG_AWARE (1 << 1)
+/* Internal flags */
+#define SHRINKER_REGISTERED BIT(0)
+#define SHRINKER_ALLOCATED BIT(1)
+
+/* Flags for users to use */
+#define SHRINKER_NUMA_AWARE BIT(2)
+#define SHRINKER_MEMCG_AWARE BIT(3)
/*
* It just makes sense when the shrinker is also MEMCG_AWARE for now,
* non-MEMCG_AWARE shrinker should not have this flag set.
*/
-#define SHRINKER_NONSLAB (1 << 2)
+#define SHRINKER_NONSLAB BIT(4)
-extern int prealloc_shrinker(struct shrinker *shrinker);
-extern void register_shrinker_prepared(struct shrinker *shrinker);
-extern int register_shrinker(struct shrinker *shrinker);
-extern void unregister_shrinker(struct shrinker *shrinker);
-extern void free_prealloced_shrinker(struct shrinker *shrinker);
-#endif
+__printf(2, 3)
+struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...);
+void shrinker_register(struct shrinker *shrinker);
+void shrinker_free(struct shrinker *shrinker);
+
+static inline bool shrinker_try_get(struct shrinker *shrinker)
+{
+ return refcount_inc_not_zero(&shrinker->refcount);
+}
+
+static inline void shrinker_put(struct shrinker *shrinker)
+{
+ if (refcount_dec_and_test(&shrinker->refcount))
+ complete(&shrinker->done);
+}
+
+#ifdef CONFIG_SHRINKER_DEBUG
+extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
+ const char *fmt, ...);
+#else /* CONFIG_SHRINKER_DEBUG */
+static inline __printf(2, 3)
+int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+{
+ return 0;
+}
+#endif /* CONFIG_SHRINKER_DEBUG */
+#endif /* _LINUX_SHRINKER_H */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 7bbc0e9cf084..f19816832f05 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -3,6 +3,7 @@
#define _LINUX_SIGNAL_H
#include <linux/bug.h>
+#include <linux/list.h>
#include <linux/signal_types.h>
#include <linux/string.h>
@@ -40,9 +41,11 @@ enum siginfo_layout {
SIL_TIMER,
SIL_POLL,
SIL_FAULT,
+ SIL_FAULT_TRAPNO,
SIL_FAULT_MCEERR,
SIL_FAULT_BNDERR,
SIL_FAULT_PKUERR,
+ SIL_FAULT_PERF_EVENT,
SIL_CHLD,
SIL_RT,
SIL_SYS,
@@ -124,7 +127,6 @@ static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
#define sigmask(sig) (1UL << ((sig) - 1))
#ifndef __HAVE_ARCH_SIG_SETOPS
-#include <linux/string.h>
#define _SIG_SET_BINOP(name, op) \
static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
@@ -238,6 +240,7 @@ static inline void siginitset(sigset_t *set, unsigned long mask)
memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1));
break;
case 2: set->sig[1] = 0;
+ break;
case 1: ;
}
}
@@ -250,6 +253,7 @@ static inline void siginitsetinv(sigset_t *set, unsigned long mask)
memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1));
break;
case 2: set->sig[1] = -1;
+ break;
case 1: ;
}
}
@@ -279,7 +283,8 @@ extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
-extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+extern int send_signal_locked(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
@@ -458,13 +463,31 @@ int __save_altstack(stack_t __user *, unsigned long);
unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \
unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
} while (0);
+#ifdef CONFIG_DYNAMIC_SIGFRAME
+bool sigaltstack_size_valid(size_t ss_size);
+#else
+static inline bool sigaltstack_size_valid(size_t size) { return true; }
+#endif /* !CONFIG_DYNAMIC_SIGFRAME */
+
#ifdef CONFIG_PROC_FS
struct seq_file;
extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
#endif
+#ifndef arch_untagged_si_addr
+/*
+ * Given a fault address and a signal and si_code which correspond to the
+ * _sigfault union member, returns the address that must appear in si_addr if
+ * the signal handler does not have SA_EXPOSE_TAGBITS enabled in sa_flags.
+ */
+static inline void __user *arch_untagged_si_addr(void __user *addr,
+ unsigned long sig,
+ unsigned long si_code)
+{
+ return addr;
+}
+#endif
+
#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index f8a90ae9c6ec..caf4f7a59ab9 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -6,13 +6,15 @@
* Basic signal handling related data type definitions:
*/
-#include <linux/list.h>
+#include <linux/types.h>
#include <uapi/linux/signal.h>
typedef struct kernel_siginfo {
__SIGINFO;
} kernel_siginfo_t;
+struct ucounts;
+
/*
* Real Time signals may be queued.
*/
@@ -21,7 +23,7 @@ struct sigqueue {
struct list_head list;
int flags;
kernel_siginfo_t info;
- struct user_struct *user;
+ struct ucounts *ucounts;
};
/* flags values. */
@@ -68,4 +70,19 @@ struct ksignal {
int sig;
};
+/* Used to kill the race between sigaction and forced signals */
+#define SA_IMMUTABLE 0x00800000
+
+#ifndef __ARCH_UAPI_SA_FLAGS
+#ifdef SA_RESTORER
+#define __ARCH_UAPI_SA_FLAGS SA_RESTORER
+#else
+#define __ARCH_UAPI_SA_FLAGS 0
+#endif
+#endif
+
+#define UAPI_SA_FLAGS \
+ (SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \
+ SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS | __ARCH_UAPI_SA_FLAGS)
+
#endif /* _LINUX_SIGNAL_TYPES_H */
diff --git a/include/linux/siox.h b/include/linux/siox.h
index da7225bf1877..6bfbda3f634c 100644
--- a/include/linux/siox.h
+++ b/include/linux/siox.h
@@ -36,7 +36,7 @@ bool siox_device_connected(struct siox_device *sdevice);
struct siox_driver {
int (*probe)(struct siox_device *sdevice);
- int (*remove)(struct siox_device *sdevice);
+ void (*remove)(struct siox_device *sdevice);
void (*shutdown)(struct siox_device *sdevice);
/*
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
index bf21591a9e5e..9153e77382e1 100644
--- a/include/linux/siphash.h
+++ b/include/linux/siphash.h
@@ -1,6 +1,5 @@
-/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
@@ -21,15 +20,15 @@ typedef struct {
u64 key[2];
} siphash_key_t;
+#define siphash_aligned_key_t siphash_key_t __aligned(16)
+
static inline bool siphash_key_is_zero(const siphash_key_t *key)
{
return !(key->key[0] | key->key[1]);
}
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -82,10 +81,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
static inline u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
return __siphash_unaligned(data, len, key);
-#endif
return ___siphash_aligned(data, len, key);
}
@@ -96,10 +94,8 @@ typedef struct {
u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#endif
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -135,11 +131,38 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
static inline u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
return __hsiphash_unaligned(data, len, key);
-#endif
return ___hsiphash_aligned(data, len, key);
}
+/*
+ * These macros expose the raw SipHash and HalfSipHash permutations.
+ * Do not use them directly! If you think you have a use for them,
+ * be sure to CC the maintainer of this file explaining why.
+ */
+
+#define SIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
+ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
+ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
+ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
+
+#define SIPHASH_CONST_0 0x736f6d6570736575ULL
+#define SIPHASH_CONST_1 0x646f72616e646f6dULL
+#define SIPHASH_CONST_2 0x6c7967656e657261ULL
+#define SIPHASH_CONST_3 0x7465646279746573ULL
+
+#define HSIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \
+ (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \
+ (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \
+ (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16))
+
+#define HSIPHASH_CONST_0 0U
+#define HSIPHASH_CONST_1 0U
+#define HSIPHASH_CONST_2 0x6c796765U
+#define HSIPHASH_CONST_3 0x74656462U
+
#endif /* _LINUX_SIPHASH_H */
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
deleted file mode 100644
index 50161b6afb61..000000000000
--- a/include/linux/sirfsoc_dma.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SIRFSOC_DMA_H_
-#define _SIRFSOC_DMA_H_
-
-bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
-
-#endif
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
index 9874f6f67537..c3a00b967d18 100644
--- a/include/linux/sizes.h
+++ b/include/linux/sizes.h
@@ -44,6 +44,20 @@
#define SZ_2G 0x80000000
#define SZ_4G _AC(0x100000000, ULL)
+#define SZ_8G _AC(0x200000000, ULL)
+#define SZ_16G _AC(0x400000000, ULL)
+#define SZ_32G _AC(0x800000000, ULL)
+#define SZ_64G _AC(0x1000000000, ULL)
+#define SZ_128G _AC(0x2000000000, ULL)
+#define SZ_256G _AC(0x4000000000, ULL)
+#define SZ_512G _AC(0x8000000000, ULL)
+
+#define SZ_1T _AC(0x10000000000, ULL)
+#define SZ_2T _AC(0x20000000000, ULL)
+#define SZ_4T _AC(0x40000000000, ULL)
+#define SZ_8T _AC(0x80000000000, ULL)
+#define SZ_16T _AC(0x100000000000, ULL)
+#define SZ_32T _AC(0x200000000000, ULL)
#define SZ_64T _AC(0x400000000000, ULL)
#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 04a18e01b362..0c7c67b3a87b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -23,116 +23,128 @@
#include <linux/atomic.h>
#include <asm/types.h>
#include <linux/spinlock.h>
-#include <linux/net.h>
-#include <linux/textsearch.h>
#include <net/checksum.h>
#include <linux/rcupdate.h>
-#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
#include <net/flow_dissector.h>
-#include <linux/splice.h>
#include <linux/in6.h>
#include <linux/if_packet.h>
+#include <linux/llist.h>
#include <net/flow.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
+#include <net/net_debug.h>
+#include <net/dropreason-core.h>
+#include <net/netmem.h>
-/* The interface for checksum offload between the stack and networking drivers
+/**
+ * DOC: skb checksums
+ *
+ * The interface for checksum offload between the stack and networking drivers
* is as follows...
*
- * A. IP checksum related features
+ * IP checksum related features
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Drivers advertise checksum offload capabilities in the features of a device.
* From the stack's point of view these are capabilities offered by the driver.
* A driver typically only advertises features that it is capable of offloading
* to its device.
*
- * The checksum related features are:
- *
- * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
- * IP (one's complement) checksum for any combination
- * of protocols or protocol layering. The checksum is
- * computed and set in a packet per the CHECKSUM_PARTIAL
- * interface (see below).
- *
- * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
- * TCP or UDP packets over IPv4. These are specifically
- * unencapsulated packets of the form IPv4|TCP or
- * IPv4|UDP where the Protocol field in the IPv4 header
- * is TCP or UDP. The IPv4 header may contain IP options.
- * This feature cannot be set in features for a device
- * with NETIF_F_HW_CSUM also set. This feature is being
- * DEPRECATED (see below).
- *
- * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
- * TCP or UDP packets over IPv6. These are specifically
- * unencapsulated packets of the form IPv6|TCP or
- * IPv6|UDP where the Next Header field in the IPv6
- * header is either TCP or UDP. IPv6 extension headers
- * are not supported with this feature. This feature
- * cannot be set in features for a device with
- * NETIF_F_HW_CSUM also set. This feature is being
- * DEPRECATED (see below).
- *
- * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
- * This flag is only used to disable the RX checksum
- * feature for a device. The stack will accept receive
- * checksum indication in packets received on a device
- * regardless of whether NETIF_F_RXCSUM is set.
- *
- * B. Checksumming of received packets by device. Indication of checksum
- * verification is set in skb->ip_summed. Possible values are:
- *
- * CHECKSUM_NONE:
+ * .. flat-table:: Checksum related device features
+ * :widths: 1 10
+ *
+ * * - %NETIF_F_HW_CSUM
+ * - The driver (or its device) is able to compute one
+ * IP (one's complement) checksum for any combination
+ * of protocols or protocol layering. The checksum is
+ * computed and set in a packet per the CHECKSUM_PARTIAL
+ * interface (see below).
+ *
+ * * - %NETIF_F_IP_CSUM
+ * - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv4. These are specifically
+ * unencapsulated packets of the form IPv4|TCP or
+ * IPv4|UDP where the Protocol field in the IPv4 header
+ * is TCP or UDP. The IPv4 header may contain IP options.
+ * This feature cannot be set in features for a device
+ * with NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * * - %NETIF_F_IPV6_CSUM
+ * - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv6. These are specifically
+ * unencapsulated packets of the form IPv6|TCP or
+ * IPv6|UDP where the Next Header field in the IPv6
+ * header is either TCP or UDP. IPv6 extension headers
+ * are not supported with this feature. This feature
+ * cannot be set in features for a device with
+ * NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * * - %NETIF_F_RXCSUM
+ * - Driver (device) performs receive checksum offload.
+ * This flag is only used to disable the RX checksum
+ * feature for a device. The stack will accept receive
+ * checksum indication in packets received on a device
+ * regardless of whether NETIF_F_RXCSUM is set.
+ *
+ * Checksumming of received packets by device
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Indication of checksum verification is set in &sk_buff.ip_summed.
+ * Possible values are:
+ *
+ * - %CHECKSUM_NONE
*
* Device did not checksum this packet e.g. due to lack of capabilities.
* The packet contains full (though not verified) checksum in packet but
* not in skb->csum. Thus, skb->csum is undefined in this case.
*
- * CHECKSUM_UNNECESSARY:
+ * - %CHECKSUM_UNNECESSARY
*
* The hardware you're dealing with doesn't calculate the full checksum
- * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
- * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
- * if their checksums are okay. skb->csum is still undefined in this case
+ * (as in %CHECKSUM_COMPLETE), but it does parse headers and verify checksums
+ * for specific protocols. For such packets it will set %CHECKSUM_UNNECESSARY
+ * if their checksums are okay. &sk_buff.csum is still undefined in this case
* though. A driver or device must never modify the checksum field in the
* packet even if checksum is verified.
*
- * CHECKSUM_UNNECESSARY is applicable to following protocols:
- * TCP: IPv6 and IPv4.
- * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
+ * %CHECKSUM_UNNECESSARY is applicable to following protocols:
+ *
+ * - TCP: IPv6 and IPv4.
+ * - UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
* zero UDP checksum for either IPv4 or IPv6, the networking stack
* may perform further validation in this case.
- * GRE: only if the checksum is present in the header.
- * SCTP: indicates the CRC in SCTP header has been validated.
- * FCOE: indicates the CRC in FC frame has been validated.
+ * - GRE: only if the checksum is present in the header.
+ * - SCTP: indicates the CRC in SCTP header has been validated.
+ * - FCOE: indicates the CRC in FC frame has been validated.
*
- * skb->csum_level indicates the number of consecutive checksums found in
- * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
+ * &sk_buff.csum_level indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as %CHECKSUM_UNNECESSARY.
* For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
* and a device is able to verify the checksums for UDP (possibly zero),
- * GRE (checksum flag is set) and TCP, skb->csum_level would be set to
+ * GRE (checksum flag is set) and TCP, &sk_buff.csum_level would be set to
* two. If the device were only able to verify the UDP checksum and not
* GRE, either because it doesn't support GRE checksum or because GRE
* checksum is bad, skb->csum_level would be set to zero (TCP checksum is
* not considered in this case).
*
- * CHECKSUM_COMPLETE:
+ * - %CHECKSUM_COMPLETE
*
* This is the most generic way. The device supplied checksum of the _whole_
- * packet as seen by netif_rx() and fills in skb->csum. This means the
+ * packet as seen by netif_rx() and fills in &sk_buff.csum. This means the
* hardware doesn't need to parse L3/L4 headers to implement this.
*
* Notes:
+ *
* - Even if device supports only some protocols, but is able to produce
* skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
* - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
*
- * CHECKSUM_PARTIAL:
+ * - %CHECKSUM_PARTIAL
*
* A checksum is set up to be offloaded to a device as described in the
* output description for CHECKSUM_PARTIAL. This may occur on a packet
@@ -144,14 +156,18 @@
* packet that are after the checksum being offloaded are not considered to
* be verified.
*
- * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
- * in the skb->ip_summed for a packet. Values are:
+ * Checksumming on transmit for non-GSO
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
- * CHECKSUM_PARTIAL:
+ * The stack requests checksum offload in the &sk_buff.ip_summed for a packet.
+ * Values are:
+ *
+ * - %CHECKSUM_PARTIAL
*
* The driver is required to checksum the packet as seen by hard_start_xmit()
- * from skb->csum_start up to the end, and to record/write the checksum at
- * offset skb->csum_start + skb->csum_offset. A driver may verify that the
+ * from &sk_buff.csum_start up to the end, and to record/write the checksum at
+ * offset &sk_buff.csum_start + &sk_buff.csum_offset.
+ * A driver may verify that the
* csum_start and csum_offset values are valid values given the length and
* offset of the packet, but it should not attempt to validate that the
* checksum refers to a legitimate transport layer checksum -- it is the
@@ -163,55 +179,66 @@
* checksum calculation to the device, or call skb_checksum_help (in the case
* that the device does not support offload for a particular checksum).
*
- * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
- * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
+ * %NETIF_F_IP_CSUM and %NETIF_F_IPV6_CSUM are being deprecated in favor of
+ * %NETIF_F_HW_CSUM. New devices should use %NETIF_F_HW_CSUM to indicate
* checksum offload capability.
- * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
+ * skb_csum_hwoffload_help() can be called to resolve %CHECKSUM_PARTIAL based
* on network device checksumming capabilities: if a packet does not match
- * them, skb_checksum_help or skb_crc32c_help (depending on the value of
- * csum_not_inet, see item D.) is called to resolve the checksum.
+ * them, skb_checksum_help() or skb_crc32c_help() (depending on the value of
+ * &sk_buff.csum_not_inet, see :ref:`crc`)
+ * is called to resolve the checksum.
*
- * CHECKSUM_NONE:
+ * - %CHECKSUM_NONE
*
* The skb was already checksummed by the protocol, or a checksum is not
* required.
*
- * CHECKSUM_UNNECESSARY:
+ * - %CHECKSUM_UNNECESSARY
*
* This has the same meaning as CHECKSUM_NONE for checksum offload on
* output.
*
- * CHECKSUM_COMPLETE:
+ * - %CHECKSUM_COMPLETE
+ *
* Not used in checksum output. If a driver observes a packet with this value
- * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set.
- *
- * D. Non-IP checksum (CRC) offloads
- *
- * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
- * offloading the SCTP CRC in a packet. To perform this offload the stack
- * will set csum_start and csum_offset accordingly, set ip_summed to
- * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
- * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
- * A driver that supports both IP checksum offload and SCTP CRC32c offload
- * must verify which offload is configured for a packet by testing the
- * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
- * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
- *
- * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
- * offloading the FCOE CRC in a packet. To perform this offload the stack
- * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
- * accordingly. Note that there is no indication in the skbuff that the
- * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
- * both IP checksum offload and FCOE CRC offload must verify which offload
- * is configured for a packet, presumably by inspecting packet headers.
- *
- * E. Checksumming on output with GSO.
- *
- * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
+ * set in skbuff, it should treat the packet as if %CHECKSUM_NONE were set.
+ *
+ * .. _crc:
+ *
+ * Non-IP checksum (CRC) offloads
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * .. flat-table::
+ * :widths: 1 10
+ *
+ * * - %NETIF_F_SCTP_CRC
+ * - This feature indicates that a device is capable of
+ * offloading the SCTP CRC in a packet. To perform this offload the stack
+ * will set csum_start and csum_offset accordingly, set ip_summed to
+ * %CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication
+ * in the skbuff that the %CHECKSUM_PARTIAL refers to CRC32c.
+ * A driver that supports both IP checksum offload and SCTP CRC32c offload
+ * must verify which offload is configured for a packet by testing the
+ * value of &sk_buff.csum_not_inet; skb_crc32c_csum_help() is provided to
+ * resolve %CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
+ *
+ * * - %NETIF_F_FCOE_CRC
+ * - This feature indicates that a device is capable of offloading the FCOE
+ * CRC in a packet. To perform this offload the stack will set ip_summed
+ * to %CHECKSUM_PARTIAL and set csum_start and csum_offset
+ * accordingly. Note that there is no indication in the skbuff that the
+ * %CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
+ * both IP checksum offload and FCOE CRC offload must verify which offload
+ * is configured for a packet, presumably by inspecting packet headers.
+ *
+ * Checksumming on output with GSO
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * In the case of a GSO packet (skb_is_gso() is true), checksum offload
* is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
- * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
+ * gso_type is %SKB_GSO_TCPV4 or %SKB_GSO_TCPV6, TCP checksum offload as
* part of the GSO operation is implied. If a checksum is being offloaded
- * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and
+ * with GSO then ip_summed is %CHECKSUM_PARTIAL, and both csum_start and
* csum_offset are set to refer to the outermost checksum being offloaded
* (two offloaded checksums are possible with UDP encapsulation).
*/
@@ -228,6 +255,14 @@
#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* For X bytes available in skb->head, what is the minimal
+ * allocation needed, knowing struct skb_shared_info needs
+ * to be aligned.
+ */
+#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define SKB_MAX_ORDER(X, ORDER) \
SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
@@ -247,6 +282,7 @@ struct napi_struct;
struct bpf_prog;
union bpf_attr;
struct skb_ext;
+struct ts_config;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
@@ -258,8 +294,9 @@ struct nf_bridge_info {
u8 pkt_otherhost:1;
u8 in_prerouting:1;
u8 bridged_dnat:1;
+ u8 sabotage_in_done:1;
__u16 frag_max_size;
- struct net_device *physindev;
+ int physinif;
/* always valid & non-NULL from FORWARD on, for physdev match */
struct net_device *physoutdev;
@@ -283,15 +320,26 @@ struct nf_bridge_info {
* and read by ovs to recirc_id.
*/
struct tc_skb_ext {
- __u32 chain;
+ union {
+ u64 act_miss_cookie;
+ __u32 chain;
+ };
__u16 mru;
+ __u16 zone;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
+ u8 act_miss:1; /* Set if act_miss_cookie is used */
+ u8 l2_miss:1; /* Set by bridge upon FDB or MDB miss */
};
#endif
struct sk_buff_head {
- /* These two members must be first. */
- struct sk_buff *next;
- struct sk_buff *prev;
+ /* These two members must be first to match sk_buff. */
+ struct_group_tagged(sk_buff_list, list,
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ );
__u32 qlen;
spinlock_t lock;
@@ -299,18 +347,12 @@ struct sk_buff_head {
struct sk_buff;
-/* To allow 64K frame to be packed as single skb without frag_list we
- * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
- * buffers which do not start on a page boundary.
- *
- * Since GRO uses frags we allocate at least 16 regardless of page
- * size.
- */
-#if (65536/PAGE_SIZE + 1) < 16
-#define MAX_SKB_FRAGS 16UL
-#else
-#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+#ifndef CONFIG_MAX_SKB_FRAGS
+# define CONFIG_MAX_SKB_FRAGS 17
#endif
+
+#define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS
+
extern int sysctl_max_skb_frags;
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
@@ -318,7 +360,11 @@ extern int sysctl_max_skb_frags;
*/
#define GSO_BY_FRAGS 0xFFFF
-typedef struct bio_vec skb_frag_t;
+typedef struct skb_frag {
+ netmem_ref netmem;
+ unsigned int len;
+ unsigned int offset;
+} skb_frag_t;
/**
* skb_frag_size() - Returns the size of a skb fragment
@@ -326,7 +372,7 @@ typedef struct bio_vec skb_frag_t;
*/
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
- return frag->bv_len;
+ return frag->len;
}
/**
@@ -336,7 +382,7 @@ static inline unsigned int skb_frag_size(const skb_frag_t *frag)
*/
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
- frag->bv_len = size;
+ frag->len = size;
}
/**
@@ -346,7 +392,7 @@ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
*/
static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
- frag->bv_len += delta;
+ frag->len += delta;
}
/**
@@ -356,7 +402,7 @@ static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
*/
static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
- frag->bv_len -= delta;
+ frag->len -= delta;
}
/**
@@ -366,7 +412,7 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
static inline bool skb_frag_must_loop(struct page *p)
{
#if defined(CONFIG_HIGHMEM)
- if (PageHighMem(p))
+ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
return true;
#endif
return false;
@@ -376,7 +422,7 @@ static inline bool skb_frag_must_loop(struct page *p)
* skb_frag_foreach_page - loop over pages in a fragment
*
* @f: skb frag to operate on
- * @f_off: offset from start of f->bv_page
+ * @f_off: offset from start of f->netmem
* @f_len: length from f_off to loop over
* @p: (temp var) current page
* @p_off: (temp var) offset from start of current page,
@@ -399,12 +445,12 @@ static inline bool skb_frag_must_loop(struct page *p)
copied += p_len, p++, p_off = 0, \
p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
-#define HAVE_HW_TIME_STAMP
-
/**
* struct skb_shared_hwtstamps - hardware time stamps
- * @hwtstamp: hardware time stamp transformed into duration
- * since arbitrary point in time
+ * @hwtstamp: hardware time stamp transformed into duration
+ * since arbitrary point in time
+ * @netdev_data: address/cookie of network device driver used as
+ * reference to actual hardware time stamp
*
* Software time stamps generated by ktime_get_real() are stored in
* skb->tstamp.
@@ -416,7 +462,10 @@ static inline bool skb_frag_must_loop(struct page *p)
* &skb_shared_info. Use skb_hwtstamps() to get a pointer.
*/
struct skb_shared_hwtstamps {
- ktime_t hwtstamp;
+ union {
+ ktime_t hwtstamp;
+ void *netdev_data;
+ };
};
/* Definitions for tx_flags in struct skb_shared_info */
@@ -430,27 +479,53 @@ enum {
/* device driver is going to provide hardware time stamp */
SKBTX_IN_PROGRESS = 1 << 2,
- /* device driver supports TX zero-copy buffers */
- SKBTX_DEV_ZEROCOPY = 1 << 3,
+ /* generate hardware time stamp based on cycles if supported */
+ SKBTX_HW_TSTAMP_USE_CYCLES = 1 << 3,
/* generate wifi status information (where possible) */
SKBTX_WIFI_STATUS = 1 << 4,
+ /* determine hardware time stamp based on time or cycles */
+ SKBTX_HW_TSTAMP_NETDEV = 1 << 5,
+
+ /* generate software time stamp when entering packet scheduling */
+ SKBTX_SCHED_TSTAMP = 1 << 6,
+};
+
+#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
+ SKBTX_SCHED_TSTAMP)
+#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \
+ SKBTX_HW_TSTAMP_USE_CYCLES | \
+ SKBTX_ANY_SW_TSTAMP)
+
+/* Definitions for flags in struct skb_shared_info */
+enum {
+ /* use zcopy routines */
+ SKBFL_ZEROCOPY_ENABLE = BIT(0),
+
/* This indicates at least one fragment might be overwritten
* (as in vmsplice(), sendfile() ...)
* If we need to compute a TX checksum, we'll need to copy
* all frags to avoid possible bad checksum
*/
- SKBTX_SHARED_FRAG = 1 << 5,
+ SKBFL_SHARED_FRAG = BIT(1),
- /* generate software time stamp when entering packet scheduling */
- SKBTX_SCHED_TSTAMP = 1 << 6,
+ /* segment contains only zerocopy data and should not be
+ * charged to the kernel memory.
+ */
+ SKBFL_PURE_ZEROCOPY = BIT(2),
+
+ SKBFL_DONT_ORPHAN = BIT(3),
+
+ /* page references are managed by the ubuf_info, so it's safe to
+ * use frags only up until ubuf_info is released
+ */
+ SKBFL_MANAGED_FRAG_REFS = BIT(4),
};
-#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
-#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
- SKBTX_SCHED_TSTAMP)
-#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
+#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
+#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \
+ SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS)
/*
* The callback notifies userspace to release buffers when skb DMA is done in
@@ -461,7 +536,15 @@ enum {
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
- void (*callback)(struct ubuf_info *, bool zerocopy_success);
+ void (*callback)(struct sk_buff *, struct ubuf_info *,
+ bool zerocopy_success);
+ refcount_t refcnt;
+ u8 flags;
+};
+
+struct ubuf_info_msgzc {
+ struct ubuf_info ubuf;
+
union {
struct {
unsigned long desc;
@@ -474,7 +557,6 @@ struct ubuf_info {
u32 bytelen;
};
};
- refcount_t refcnt;
struct mmpin {
struct user_struct *user;
@@ -483,34 +565,26 @@ struct ubuf_info {
};
#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+#define uarg_to_msgzc(ubuf_ptr) container_of((ubuf_ptr), struct ubuf_info_msgzc, \
+ ubuf)
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
-struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
-struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
- struct ubuf_info *uarg);
-
-static inline void sock_zerocopy_get(struct ubuf_info *uarg)
-{
- refcount_inc(&uarg->refcnt);
-}
-
-void sock_zerocopy_put(struct ubuf_info *uarg);
-void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
-
-void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
-
-int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
-int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
- struct msghdr *msg, int len,
- struct ubuf_info *uarg);
+/* Preserve some data across TX submission and completion.
+ *
+ * Note, this state is stored in the driver. Extending the layout
+ * might need some special care.
+ */
+struct xsk_tx_metadata_compl {
+ __u64 *tx_timestamp;
+};
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
- __u8 __unused;
+ __u8 flags;
__u8 meta_len;
__u8 nr_frags;
__u8 tx_flags;
@@ -518,7 +592,10 @@ struct skb_shared_info {
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
struct sk_buff *frag_list;
- struct skb_shared_hwtstamps hwtstamps;
+ union {
+ struct skb_shared_hwtstamps hwtstamps;
+ struct xsk_tx_metadata_compl xsk_meta;
+ };
unsigned int gso_type;
u32 tskey;
@@ -526,6 +603,7 @@ struct skb_shared_info {
* Warning : all fields before dataref are cleared in __alloc_skb()
*/
atomic_t dataref;
+ unsigned int xdp_frags_size;
/* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */
@@ -535,16 +613,32 @@ struct skb_shared_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-/* We divide dataref into two halves. The higher 16 bits hold references
- * to the payload part of skb->data. The lower 16 bits hold references to
- * the entire skb->data. A clone of a headerless skb holds the length of
- * the header in skb->hdr_len.
- *
- * All users must obey the rule that the skb->data reference count must be
- * greater than or equal to the payload reference count.
- *
- * Holding a reference to the payload part means that the user does not
- * care about modifications to the header part of skb->data.
+/**
+ * DOC: dataref and headerless skbs
+ *
+ * Transport layers send out clones of payload skbs they hold for
+ * retransmissions. To allow lower layers of the stack to prepend their headers
+ * we split &skb_shared_info.dataref into two halves.
+ * The lower 16 bits count the overall number of references.
+ * The higher 16 bits indicate how many of the references are payload-only.
+ * skb_header_cloned() checks if skb is allowed to add / write the headers.
+ *
+ * The creator of the skb (e.g. TCP) marks its skb as &sk_buff.nohdr
+ * (via __skb_header_release()). Any clone created from marked skb will get
+ * &sk_buff.hdr_len populated with the available headroom.
+ * If there's the only clone in existence it's able to modify the headroom
+ * at will. The sequence of calls inside the transport layer is::
+ *
+ * <alloc skb>
+ * skb_reserve()
+ * __skb_header_release()
+ * skb_clone()
+ * // send the clone down the stack
+ *
+ * This is not a very generic construct and it depends on the transport layers
+ * doing the right thing. In practice there's usually only one payload-only skb.
+ * Having multiple payload-only skbs with different lengths of hdr_len is not
+ * possible. The payload-only skbs should never leave their owner.
*/
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
@@ -609,6 +703,46 @@ typedef unsigned char *sk_buff_data_t;
#endif
/**
+ * DOC: Basic sk_buff geometry
+ *
+ * struct sk_buff itself is a metadata structure and does not hold any packet
+ * data. All the data is held in associated buffers.
+ *
+ * &sk_buff.head points to the main "head" buffer. The head buffer is divided
+ * into two parts:
+ *
+ * - data buffer, containing headers and sometimes payload;
+ * this is the part of the skb operated on by the common helpers
+ * such as skb_put() or skb_pull();
+ * - shared info (struct skb_shared_info) which holds an array of pointers
+ * to read-only data in the (page, offset, length) format.
+ *
+ * Optionally &skb_shared_info.frag_list may point to another skb.
+ *
+ * Basic diagram may look like this::
+ *
+ * ---------------
+ * | sk_buff |
+ * ---------------
+ * ,--------------------------- + head
+ * / ,----------------- + data
+ * / / ,----------- + tail
+ * | | | , + end
+ * | | | |
+ * v v v v
+ * -----------------------------------------------
+ * | headroom | data | tailroom | skb_shared_info |
+ * -----------------------------------------------
+ * + [page frag]
+ * + [page frag]
+ * + [page frag]
+ * + [page frag] ---------
+ * + frag_list --> | sk_buff |
+ * ---------
+ *
+ */
+
+/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
@@ -617,6 +751,7 @@ typedef unsigned char *sk_buff_data_t;
* for retransmit timer
* @rbnode: RB tree node, alternative to next/prev for netem/tcp
* @list: queue head
+ * @ll_node: anchor in an llist (eg socket defer_list)
* @sk: Socket we are owned by
* @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
* fragmentation management
@@ -624,7 +759,6 @@ typedef unsigned char *sk_buff_data_t;
* @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @_skb_refdst: destination entry (with norefcount bit)
- * @sp: the security path, used for xfrm
* @len: Length of actual data
* @data_len: Data length
* @mac_len: Length of link layer header
@@ -649,14 +783,15 @@ typedef unsigned char *sk_buff_data_t;
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @redirected: packet was redirected by packet classifier
* @from_ingress: packet was redirected from the ingress path
+ * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
* @protocol: Packet protocol from driver
* @destructor: Destruct function
* @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
+ * @_sk_redir: socket redirection information for skmsg
* @_nfct: Associated connection, if any (with nfctinfo bits)
- * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
* @hash: the packet hash
@@ -664,6 +799,8 @@ typedef unsigned char *sk_buff_data_t;
* @head_frag: skb was allocated from page fragments,
* not allocated by kmalloc() or vmalloc().
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+ * @pp_recycle: mark the packet for recycling instead of freeing (implies
+ * page_pool support on driver)
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
@@ -683,13 +820,19 @@ typedef unsigned char *sk_buff_data_t;
* CHECKSUM_UNNECESSARY (max 3)
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
+ * @slow_gro: state present at GRO time, slower prepare step required
+ * @mono_delivery_time: When set, skb->tstamp has the
+ * delivery_time in mono clock base (i.e. EDT). Otherwise, the
+ * skb->tstamp has the (rcv) timestamp at ingress and
+ * delivery_time at egress.
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
+ * @alloc_cpu: CPU which did the skb allocation.
* @secmark: security marking
* @mark: Generic packet mark
* @reserved_tailroom: (aka @mark) number of bytes of free space available
* at the tail of an sk_buff
- * @vlan_present: VLAN tag is present
+ * @vlan_all: vlan fields (proto & tci)
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
@@ -701,6 +844,7 @@ typedef unsigned char *sk_buff_data_t;
* @transport_header: Transport layer header
* @network_header: Network layer header
* @mac_header: Link layer header
+ * @kcov_handle: KCOV remote handle for remote coverage collection
* @tail: Tail pointer
* @end: End pointer
* @head: Head of buffer
@@ -713,7 +857,7 @@ typedef unsigned char *sk_buff_data_t;
struct sk_buff {
union {
struct {
- /* These two members must be first. */
+ /* These two members must be first to match sk_buff_head. */
struct sk_buff *next;
struct sk_buff *prev;
@@ -728,6 +872,7 @@ struct sk_buff {
};
struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
struct list_head list;
+ struct llist_node ll_node;
};
union {
@@ -753,6 +898,9 @@ struct sk_buff {
void (*destructor)(struct sk_buff *skb);
};
struct list_head tcp_tsorted_anchor;
+#ifdef CONFIG_NET_SOCK_MSG
+ unsigned long _sk_redir;
+#endif
};
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -774,7 +922,7 @@ struct sk_buff {
#else
#define CLONED_MASK 1
#endif
-#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+#define CLONED_OFFSET offsetof(struct sk_buff, __cloned_offset)
/* private: */
__u8 __cloned_offset[0];
@@ -784,85 +932,85 @@ struct sk_buff {
fclone:2,
peeked:1,
head_frag:1,
- pfmemalloc:1;
+ pfmemalloc:1,
+ pp_recycle:1; /* page_pool recycle indicator */
#ifdef CONFIG_SKB_EXTENSIONS
__u8 active_extensions;
#endif
- /* fields enclosed in headers_start/headers_end are copied
+
+ /* Fields enclosed in headers group are copied
* using a single memcpy() in __copy_skb_header()
*/
- /* private: */
- __u32 headers_start[0];
- /* public: */
-
-/* if you move pkt_type around you also must adapt those constants */
-#ifdef __BIG_ENDIAN_BITFIELD
-#define PKT_TYPE_MAX (7 << 5)
-#else
-#define PKT_TYPE_MAX 7
-#endif
-#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
+ struct_group(headers,
/* private: */
__u8 __pkt_type_offset[0];
/* public: */
- __u8 pkt_type:3;
+ __u8 pkt_type:3; /* see PKT_TYPE_MAX */
__u8 ignore_df:1;
- __u8 nf_trace:1;
+ __u8 dst_pending_confirm:1;
__u8 ip_summed:2;
__u8 ooo_okay:1;
+ /* private: */
+ __u8 __mono_tc_offset[0];
+ /* public: */
+ __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */
+#ifdef CONFIG_NET_XGRESS
+ __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
+ __u8 tc_skip_classify:1;
+#endif
+ __u8 remcsum_offload:1;
+ __u8 csum_complete_sw:1;
+ __u8 csum_level:2;
+ __u8 inner_protocol_type:1;
+
__u8 l4_hash:1;
__u8 sw_hash:1;
+#ifdef CONFIG_WIRELESS
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
+#endif
__u8 no_fcs:1;
/* Indicates the inner headers are valid in the skbuff. */
__u8 encapsulation:1;
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
-
-#ifdef __BIG_ENDIAN_BITFIELD
-#define PKT_VLAN_PRESENT_BIT 7
-#else
-#define PKT_VLAN_PRESENT_BIT 0
-#endif
-#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
- /* private: */
- __u8 __pkt_vlan_present_offset[0];
- /* public: */
- __u8 vlan_present:1;
- __u8 csum_complete_sw:1;
- __u8 csum_level:2;
- __u8 csum_not_inet:1;
- __u8 dst_pending_confirm:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
+#if IS_ENABLED(CONFIG_IP_VS)
__u8 ipvs_property:1;
- __u8 inner_protocol_type:1;
- __u8 remcsum_offload:1;
+#endif
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
+ __u8 nf_trace:1;
+#endif
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
__u8 offload_l3_fwd_mark:1;
#endif
-#ifdef CONFIG_NET_CLS_ACT
- __u8 tc_skip_classify:1;
- __u8 tc_at_ingress:1;
-#endif
-#ifdef CONFIG_NET_REDIRECT
__u8 redirected:1;
+#ifdef CONFIG_NET_REDIRECT
__u8 from_ingress:1;
#endif
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ __u8 nf_skip_egress:1;
+#endif
#ifdef CONFIG_TLS_DEVICE
__u8 decrypted:1;
#endif
+ __u8 slow_gro:1;
+#if IS_ENABLED(CONFIG_IP_SCTP)
+ __u8 csum_not_inet:1;
+#endif
-#ifdef CONFIG_NET_SCHED
+#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS)
__u16 tc_index; /* traffic control index */
#endif
+ u16 alloc_cpu;
+
union {
__wsum csum;
struct {
@@ -873,8 +1021,13 @@ struct sk_buff {
__u32 priority;
int skb_iif;
__u32 hash;
- __be16 vlan_proto;
- __u16 vlan_tci;
+ union {
+ u32 vlan_all;
+ struct {
+ __be16 vlan_proto;
+ __u16 vlan_tci;
+ };
+ };
#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
union {
unsigned int napi_id;
@@ -904,9 +1057,11 @@ struct sk_buff {
__u16 network_header;
__u16 mac_header;
- /* private: */
- __u32 headers_end[0];
- /* public: */
+#ifdef CONFIG_KCOV
+ u64 kcov_handle;
+#endif
+
+ ); /* end headers group */
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
@@ -917,11 +1072,31 @@ struct sk_buff {
refcount_t users;
#ifdef CONFIG_SKB_EXTENSIONS
- /* only useable after checking ->active_extensions != 0 */
+ /* only usable after checking ->active_extensions != 0 */
struct skb_ext *extensions;
#endif
};
+/* if you move pkt_type around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX (7 << 5)
+#else
+#define PKT_TYPE_MAX 7
+#endif
+#define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
+
+/* if you move tc_at_ingress or mono_delivery_time
+ * around, you also must adapt these constants.
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define SKB_MONO_DELIVERY_TIME_MASK (1 << 7)
+#define TC_AT_INGRESS_MASK (1 << 6)
+#else
+#define SKB_MONO_DELIVERY_TIME_MASK (1 << 0)
+#define TC_AT_INGRESS_MASK (1 << 1)
+#endif
+#define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset)
+
#ifdef __KERNEL__
/*
* Handling routines are only of interest to the kernel
@@ -974,6 +1149,7 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
*/
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
+ skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst;
}
@@ -990,6 +1166,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
}
@@ -1033,6 +1210,15 @@ static inline unsigned int skb_napi_id(const struct sk_buff *skb)
#endif
}
+static inline bool skb_wifi_acked_valid(const struct sk_buff *skb)
+{
+#ifdef CONFIG_WIRELESS
+ return skb->wifi_acked_valid;
+#else
+ return 0;
+#endif
+}
+
/**
* skb_unref - decrement the skb's reference count
* @skb: buffer
@@ -1051,12 +1237,47 @@ static inline bool skb_unref(struct sk_buff *skb)
return true;
}
+static inline bool skb_data_unref(const struct sk_buff *skb,
+ struct skb_shared_info *shinfo)
+{
+ int bias;
+
+ if (!skb->cloned)
+ return true;
+
+ bias = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
+
+ if (atomic_read(&shinfo->dataref) == bias)
+ smp_rmb();
+ else if (atomic_sub_return(bias, &shinfo->dataref))
+ return false;
+
+ return true;
+}
+
+void __fix_address
+kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);
+
+/**
+ * kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason
+ * @skb: buffer to free
+ */
+static inline void kfree_skb(struct sk_buff *skb)
+{
+ kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
void skb_release_head_state(struct sk_buff *skb);
-void kfree_skb(struct sk_buff *skb);
-void kfree_skb_list(struct sk_buff *segs);
+void kfree_skb_list_reason(struct sk_buff *segs,
+ enum skb_drop_reason reason);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
+static inline void kfree_skb_list(struct sk_buff *segs)
+{
+ kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
#ifdef CONFIG_TRACEPOINTS
void consume_skb(struct sk_buff *skb);
#else
@@ -1068,7 +1289,6 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
-extern struct kmem_cache *skbuff_head_cache;
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
@@ -1080,6 +1300,10 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);
+void skb_attempt_defer_free(struct sk_buff *skb);
+
+struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
+struct sk_buff *slab_build_skb(void *data);
/**
* alloc_skb - allocate a network buffer
@@ -1117,7 +1341,7 @@ struct sk_buff_fclones {
*
* Returns true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
- * so we also check that this didnt happen.
+ * so we also check that didn't happen.
*/
static inline bool skb_fclone_busy(const struct sock *sk,
const struct sk_buff *skb)
@@ -1128,7 +1352,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
return skb->fclone == SKB_FCLONE_ORIG &&
refcount_read(&fclones->fclone_ref) > 1 &&
- fclones->skb2.sk == sk;
+ READ_ONCE(fclones->skb2.sk) == sk;
}
/**
@@ -1161,6 +1385,7 @@ static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
+struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
@@ -1188,7 +1413,7 @@ static inline int skb_pad(struct sk_buff *skb, int pad)
#define dev_kfree_skb(a) consume_skb(a)
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
- int offset, size_t size);
+ int offset, size_t size, size_t max_frags);
struct skb_seq_state {
__u32 lower_offset;
@@ -1198,6 +1423,7 @@ struct skb_seq_state {
struct sk_buff *root_skb;
struct sk_buff *cur_skb;
__u8 *frag_data;
+ __u32 frag_off;
};
void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
@@ -1279,10 +1505,10 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
void __skb_get_hash(struct sk_buff *skb);
u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
const struct flow_keys_basic *keys, int hlen);
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
- void *data, int hlen_proto);
+ const void *data, int hlen_proto);
static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
int thoff, u8 ip_proto)
@@ -1295,15 +1521,14 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
unsigned int key_count);
struct bpf_flow_dissector;
-bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
- __be16 proto, int nhoff, int hlen, unsigned int flags);
+u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
+ __be16 proto, int nhoff, int hlen, unsigned int flags);
bool __skb_flow_dissect(const struct net *net,
const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
- void *target_container,
- void *data, __be16 proto, int nhoff, int hlen,
- unsigned int flags);
+ void *target_container, const void *data,
+ __be16 proto, int nhoff, int hlen, unsigned int flags);
static inline bool skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
@@ -1325,9 +1550,9 @@ static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
static inline bool
skb_flow_dissect_flow_keys_basic(const struct net *net,
const struct sk_buff *skb,
- struct flow_keys_basic *flow, void *data,
- __be16 proto, int nhoff, int hlen,
- unsigned int flags)
+ struct flow_keys_basic *flow,
+ const void *data, __be16 proto,
+ int nhoff, int hlen, unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
@@ -1346,8 +1571,8 @@ void
skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
- u16 *ctinfo_map,
- size_t mapsize);
+ u16 *ctinfo_map, size_t mapsize,
+ bool post_ct, u16 zone);
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
@@ -1392,6 +1617,16 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
to->l4_hash = from->l4_hash;
};
+static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
+ const struct sk_buff *skb2)
+{
+#ifdef CONFIG_TLS_DEVICE
+ return skb2->decrypted - skb1->decrypted;
+#else
+ return 0;
+#endif
+}
+
static inline void skb_copy_decrypted(struct sk_buff *to,
const struct sk_buff *from)
{
@@ -1410,6 +1645,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end;
}
+
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
+{
+ skb->end = offset;
+}
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
@@ -1420,8 +1660,35 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end - skb->head;
}
+
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
+{
+ skb->end = skb->head + offset;
+}
#endif
+struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
+ struct ubuf_info *uarg);
+
+void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
+
+void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success);
+
+int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb, struct iov_iter *from,
+ size_t length);
+
+static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
+ struct msghdr *msg, int len)
+{
+ return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len);
+}
+
+int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
+ struct msghdr *msg, int len,
+ struct ubuf_info *uarg);
+
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
@@ -1432,11 +1699,38 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
{
- bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
+ bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
return is_zcopy ? skb_uarg(skb) : NULL;
}
+static inline bool skb_zcopy_pure(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
+}
+
+static inline bool skb_zcopy_managed(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS;
+}
+
+static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
+ const struct sk_buff *skb2)
+{
+ return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
+}
+
+static inline void net_zcopy_get(struct ubuf_info *uarg)
+{
+ refcount_inc(&uarg->refcnt);
+}
+
+static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
+{
+ skb_shinfo(skb)->destructor_arg = uarg;
+ skb_shinfo(skb)->flags |= uarg->flags;
+}
+
static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
bool *have_ref)
{
@@ -1444,16 +1738,15 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
if (unlikely(have_ref && *have_ref))
*have_ref = false;
else
- sock_zerocopy_get(uarg);
- skb_shinfo(skb)->destructor_arg = uarg;
- skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ net_zcopy_get(uarg);
+ skb_zcopy_init(skb, uarg);
}
}
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
- skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
}
static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
@@ -1466,41 +1759,55 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
}
-/* Release a reference on a zerocopy structure */
-static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
+static inline void net_zcopy_put(struct ubuf_info *uarg)
{
- struct ubuf_info *uarg = skb_zcopy(skb);
+ if (uarg)
+ uarg->callback(NULL, uarg, true);
+}
+static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
+{
if (uarg) {
- if (skb_zcopy_is_nouarg(skb)) {
- /* no notification callback */
- } else if (uarg->callback == sock_zerocopy_callback) {
- uarg->zerocopy = uarg->zerocopy && zerocopy;
- sock_zerocopy_put(uarg);
- } else {
- uarg->callback(uarg, zerocopy);
- }
-
- skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ if (uarg->callback == msg_zerocopy_callback)
+ msg_zerocopy_put_abort(uarg, have_uref);
+ else if (have_uref)
+ net_zcopy_put(uarg);
}
}
-/* Abort a zerocopy operation and revert zckey on error in send syscall */
-static inline void skb_zcopy_abort(struct sk_buff *skb)
+/* Release a reference on a zerocopy structure */
+static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
{
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
- sock_zerocopy_put_abort(uarg, false);
- skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ if (!skb_zcopy_is_nouarg(skb))
+ uarg->callback(skb, uarg, zerocopy_success);
+
+ skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
}
}
+void __skb_zcopy_downgrade_managed(struct sk_buff *skb);
+
+static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb)
+{
+ if (unlikely(skb_zcopy_managed(skb)))
+ __skb_zcopy_downgrade_managed(skb);
+}
+
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
}
+static inline void skb_poison_list(struct sk_buff *skb)
+{
+#ifdef CONFIG_DEBUG_NET
+ skb->next = SKB_LIST_POISON_NEXT;
+#endif
+}
+
/* Iterate through singly-linked GSO fragments of an skb. */
#define skb_list_walk_safe(first, skb, next_skb) \
for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
@@ -1639,6 +1946,22 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
return 0;
}
+/* This variant of skb_unclone() makes sure skb->truesize
+ * and skb_end_offset() are not changed, whenever a new skb->head is needed.
+ *
+ * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X))
+ * when various debugging features are in place.
+ */
+int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
+static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(gfpflags_allow_blocking(pri));
+
+ if (skb_cloned(skb))
+ return __skb_unclone_keeptruesize(skb, pri);
+ return 0;
+}
+
/**
* skb_header_cloned - is the header a clone
* @skb: buffer to check
@@ -1669,8 +1992,10 @@ static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
}
/**
- * __skb_header_release - release reference to header
- * @skb: buffer to operate on
+ * __skb_header_release() - allow clones to use the headroom
+ * @skb: buffer to operate on
+ *
+ * See "DOC: dataref and headerless skbs".
*/
static inline void __skb_header_release(struct sk_buff *skb)
{
@@ -1723,7 +2048,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping
- * a packet thats being forwarded.
+ * a packet that's being forwarded.
*/
/**
@@ -1906,9 +2231,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
*/
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
- WRITE_ONCE(next->prev, newsk);
- WRITE_ONCE(prev->next, newsk);
- list->qlen++;
+ WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk);
+ WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk);
+ WRITE_ONCE(list->qlen, list->qlen + 1);
}
static inline void __skb_queue_splice(const struct sk_buff_head *list,
@@ -2003,7 +2328,7 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *newsk)
{
- __skb_insert(newsk, prev, prev->next, list);
+ __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list);
}
void skb_append(struct sk_buff *old, struct sk_buff *newsk,
@@ -2013,7 +2338,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
struct sk_buff *next,
struct sk_buff *newsk)
{
- __skb_insert(newsk, next->prev, next, list);
+ __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list);
}
/**
@@ -2126,11 +2451,56 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
return skb_headlen(skb) + __skb_pagelen(skb);
}
+static inline void skb_frag_fill_netmem_desc(skb_frag_t *frag,
+ netmem_ref netmem, int off,
+ int size)
+{
+ frag->netmem = netmem;
+ frag->offset = off;
+ skb_frag_size_set(frag, size);
+}
+
+static inline void skb_frag_fill_page_desc(skb_frag_t *frag,
+ struct page *page,
+ int off, int size)
+{
+ skb_frag_fill_netmem_desc(frag, page_to_netmem(page), off, size);
+}
+
+static inline void __skb_fill_netmem_desc_noacc(struct skb_shared_info *shinfo,
+ int i, netmem_ref netmem,
+ int off, int size)
+{
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, netmem, off, size);
+}
+
+static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
+ int i, struct page *page,
+ int off, int size)
+{
+ __skb_fill_netmem_desc_noacc(shinfo, i, page_to_netmem(page), off,
+ size);
+}
+
+/**
+ * skb_len_add - adds a number to len fields of skb
+ * @skb: buffer to add len to
+ * @delta: number of bytes to add
+ */
+static inline void skb_len_add(struct sk_buff *skb, int delta)
+{
+ skb->len += delta;
+ skb->data_len += delta;
+ skb->truesize += delta;
+}
+
/**
- * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * __skb_fill_netmem_desc - initialise a fragment in an skb
* @skb: buffer containing fragment to be initialised
- * @i: paged fragment index to initialise
- * @page: the page to use for this fragment
+ * @i: fragment index to initialise
+ * @netmem: the netmem to use for this fragment
* @off: the offset to the data with @page
* @size: the length of the data
*
@@ -2139,23 +2509,33 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
*
* Does not take any additional reference on the fragment.
*/
-static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
- struct page *page, int off, int size)
+static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off, int size)
{
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = netmem_to_page(netmem);
- /*
- * Propagate page pfmemalloc to the skb if we can. The problem is
+ __skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size);
+
+ /* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
* on page_is_pfmemalloc doing the right thing(tm).
*/
- frag->bv_page = page;
- frag->bv_offset = off;
- skb_frag_size_set(frag, size);
-
page = compound_head(page);
if (page_is_pfmemalloc(page))
- skb->pfmemalloc = true;
+ skb->pfmemalloc = true;
+}
+
+static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
+{
+ __skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
+}
+
+static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off, int size)
+{
+ __skb_fill_netmem_desc(skb, i, netmem, off, size);
+ skb_shinfo(skb)->nr_frags = i + 1;
}
/**
@@ -2175,12 +2555,40 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
- __skb_fill_page_desc(skb, i, page, off, size);
- skb_shinfo(skb)->nr_frags = i + 1;
+ skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
}
-void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
- int size, unsigned int truesize);
+/**
+ * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+ * @i: paged fragment index to initialise
+ * @page: the page to use for this fragment
+ * @off: the offset to the data with @page
+ * @size: the length of the data
+ *
+ * Variant of skb_fill_page_desc() which does not deal with
+ * pfmemalloc, if page is not owned by us.
+ */
+static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
+ struct page *page, int off,
+ int size)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ __skb_fill_page_desc_noacc(shinfo, i, page, off, size);
+ shinfo->nr_frags = i + 1;
+}
+
+void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size, unsigned int truesize);
+
+static inline void skb_add_rx_frag(struct sk_buff *skb, int i,
+ struct page *page, int off, int size,
+ unsigned int truesize)
+{
+ skb_add_rx_frag_netmem(skb, i, page_to_netmem(page), off, size,
+ truesize);
+}
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize);
@@ -2222,6 +2630,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+static inline void skb_assert_len(struct sk_buff *skb)
+{
+#ifdef CONFIG_DEBUG_NET
+ if (WARN_ONCE(!skb->len, "%s\n", __func__))
+ DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+#endif /* CONFIG_DEBUG_NET */
+}
+
/*
* Add data to an sk_buff
*/
@@ -2285,6 +2701,8 @@ static inline void skb_put_u8(struct sk_buff *skb, u8 val)
void *skb_push(struct sk_buff *skb, unsigned int len);
static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->data -= len;
skb->len += len;
return skb->data;
@@ -2293,8 +2711,17 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->len -= len;
- BUG_ON(skb->len < skb->data_len);
+ if (unlikely(skb->len < skb->data_len)) {
+#if defined(CONFIG_DEBUG_NET)
+ skb->len += len;
+ pr_err("__skb_pull(len=%u)\n", len);
+ skb_dump(KERN_ERR, skb, false);
+#endif
+ BUG();
+ }
return skb->data += len;
}
@@ -2303,29 +2730,39 @@ static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
+void *skb_pull_data(struct sk_buff *skb, size_t len);
+
void *__pskb_pull_tail(struct sk_buff *skb, int delta);
-static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
+static inline enum skb_drop_reason
+pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
- if (len > skb_headlen(skb) &&
- !__pskb_pull_tail(skb, len - skb_headlen(skb)))
- return NULL;
- skb->len -= len;
- return skb->data += len;
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
+ if (likely(len <= skb_headlen(skb)))
+ return SKB_NOT_DROPPED_YET;
+
+ if (unlikely(len > skb->len))
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
+
+ if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb))))
+ return SKB_DROP_REASON_NOMEM;
+
+ return SKB_NOT_DROPPED_YET;
}
-static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
+static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
- return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
+ return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
-static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
+static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
{
- if (likely(len <= skb_headlen(skb)))
- return true;
- if (unlikely(len > skb->len))
- return false;
- return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
+ if (!pskb_may_pull(skb, len))
+ return NULL;
+
+ skb->len -= len;
+ return skb->data += len;
}
void skb_condense(struct sk_buff *skb);
@@ -2474,6 +2911,11 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
skb->inner_network_header += offset;
}
+static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb)
+{
+ return skb->inner_network_header > 0;
+}
+
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_mac_header;
@@ -2497,6 +2939,7 @@ static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
return skb->head + skb->transport_header;
}
@@ -2528,8 +2971,14 @@ static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
skb->network_header += offset;
}
+static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+{
+ return skb->mac_header != (typeof(skb->mac_header))~0U;
+}
+
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
return skb->head + skb->mac_header;
}
@@ -2540,12 +2989,13 @@ static inline int skb_mac_offset(const struct sk_buff *skb)
static inline u32 skb_mac_header_len(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
return skb->network_header - skb->mac_header;
}
-static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+static inline void skb_unset_mac_header(struct sk_buff *skb)
{
- return skb->mac_header != (typeof(skb->mac_header))~0U;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
}
static inline void skb_reset_mac_header(struct sk_buff *skb)
@@ -2603,6 +3053,7 @@ static inline int skb_transport_offset(const struct sk_buff *skb)
static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
return skb->transport_header - skb->network_header;
}
@@ -2765,8 +3216,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
if (likely(!skb_zcopy(skb)))
return 0;
- if (!skb_zcopy_is_nouarg(skb) &&
- skb_uarg(skb)->callback == sock_zerocopy_callback)
+ if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN)
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
@@ -2780,24 +3230,59 @@ static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
}
/**
- * __skb_queue_purge - empty a list
+ * __skb_queue_purge_reason - empty a list
* @list: list to empty
+ * @reason: drop reason
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
-static inline void __skb_queue_purge(struct sk_buff_head *list)
+static inline void __skb_queue_purge_reason(struct sk_buff_head *list,
+ enum skb_drop_reason reason)
{
struct sk_buff *skb;
+
while ((skb = __skb_dequeue(list)) != NULL)
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
+}
+
+static inline void __skb_queue_purge(struct sk_buff_head *list)
+{
+ __skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE);
+}
+
+void skb_queue_purge_reason(struct sk_buff_head *list,
+ enum skb_drop_reason reason);
+
+static inline void skb_queue_purge(struct sk_buff_head *list)
+{
+ skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE);
}
-void skb_queue_purge(struct sk_buff_head *list);
unsigned int skb_rbtree_purge(struct rb_root *root);
+void skb_errqueue_purge(struct sk_buff_head *list);
-void *netdev_alloc_frag(unsigned int fragsz);
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+static inline void *netdev_alloc_frag(unsigned int fragsz)
+{
+ return __netdev_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *netdev_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __netdev_alloc_frag_align(fragsz, -align);
+}
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
gfp_t gfp_mask);
@@ -2856,7 +3341,20 @@ static inline void skb_free_frag(void *addr)
page_frag_free(addr);
}
-void *napi_alloc_frag(unsigned int fragsz);
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+static inline void *napi_alloc_frag(unsigned int fragsz)
+{
+ return __napi_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *napi_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __napi_alloc_frag_align(fragsz, -align);
+}
+
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
@@ -2866,8 +3364,8 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
}
void napi_consume_skb(struct sk_buff *skb, int budget);
-void __kfree_skb_flush(void);
-void __kfree_skb_defer(struct sk_buff *skb);
+void napi_skb_free_stolen_head(struct sk_buff *skb);
+void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason);
/**
* __dev_alloc_pages - allocate page for network Rx
@@ -2882,7 +3380,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
unsigned int order)
{
/* This piece of code contains several assumptions.
- * 1. This is for device Rx, therefor a cold page is preferred.
+ * 1. This is for device Rx, therefore a cold page is preferred.
* 2. The expectation is the user wants a compound page.
* 3. If requesting a order 0 page it will not be compound
* due to the check to see if order has a value in prep_new_page
@@ -2918,12 +3416,28 @@ static inline struct page *dev_alloc_page(void)
}
/**
+ * dev_page_is_reusable - check whether a page can be reused for network Rx
+ * @page: the page to test
+ *
+ * A page shouldn't be considered for reusing/recycling if it was allocated
+ * under memory pressure or at a distant memory node.
+ *
+ * Returns false if this page should be returned to page allocator, true
+ * otherwise.
+ */
+static inline bool dev_page_is_reusable(const struct page *page)
+{
+ return likely(page_to_nid(page) == numa_mem_id() &&
+ !page_is_pfmemalloc(page));
+}
+
+/**
* skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
* @page: The page that was allocated from skb_alloc_page
* @skb: The skb that may need pfmemalloc set
*/
-static inline void skb_propagate_pfmemalloc(struct page *page,
- struct sk_buff *skb)
+static inline void skb_propagate_pfmemalloc(const struct page *page,
+ struct sk_buff *skb)
{
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
@@ -2935,7 +3449,7 @@ static inline void skb_propagate_pfmemalloc(struct page *page,
*/
static inline unsigned int skb_frag_off(const skb_frag_t *frag)
{
- return frag->bv_offset;
+ return frag->offset;
}
/**
@@ -2945,7 +3459,7 @@ static inline unsigned int skb_frag_off(const skb_frag_t *frag)
*/
static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
{
- frag->bv_offset += delta;
+ frag->offset += delta;
}
/**
@@ -2955,7 +3469,7 @@ static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
*/
static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
{
- frag->bv_offset = offset;
+ frag->offset = offset;
}
/**
@@ -2966,7 +3480,7 @@ static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
static inline void skb_frag_off_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
- fragto->bv_offset = fragfrom->bv_offset;
+ fragto->offset = fragfrom->offset;
}
/**
@@ -2977,7 +3491,7 @@ static inline void skb_frag_off_copy(skb_frag_t *fragto,
*/
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
- return frag->bv_page;
+ return netmem_to_page(frag->netmem);
}
/**
@@ -3003,15 +3517,45 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
+int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ unsigned int headroom);
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog);
+bool napi_pp_put_page(struct page *page, bool napi_safe);
+
+static inline void
+skb_page_unref(const struct sk_buff *skb, struct page *page, bool napi_safe)
+{
+#ifdef CONFIG_PAGE_POOL
+ if (skb->pp_recycle && napi_pp_put_page(page, napi_safe))
+ return;
+#endif
+ put_page(page);
+}
+
+static inline void
+napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe)
+{
+ struct page *page = skb_frag_page(frag);
+
+#ifdef CONFIG_PAGE_POOL
+ if (recycle && napi_pp_put_page(page, napi_safe))
+ return;
+#endif
+ put_page(page);
+}
+
/**
* __skb_frag_unref - release a reference on a paged fragment.
* @frag: the paged fragment
+ * @recycle: recycle the page if allocated via page_pool
*
- * Releases a reference on the paged fragment @frag.
+ * Releases a reference on the paged fragment @frag
+ * or recycles the page via the page_pool API.
*/
-static inline void __skb_frag_unref(skb_frag_t *frag)
+static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
- put_page(skb_frag_page(frag));
+ napi_frag_unref(frag, recycle, false);
}
/**
@@ -3023,7 +3567,10 @@ static inline void __skb_frag_unref(skb_frag_t *frag)
*/
static inline void skb_frag_unref(struct sk_buff *skb, int f)
{
- __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ if (!skb_zcopy_managed(skb))
+ __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle);
}
/**
@@ -3062,33 +3609,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag)
static inline void skb_frag_page_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
- fragto->bv_page = fragfrom->bv_page;
-}
-
-/**
- * __skb_frag_set_page - sets the page contained in a paged fragment
- * @frag: the paged fragment
- * @page: the page to set
- *
- * Sets the fragment @frag to contain @page.
- */
-static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
-{
- frag->bv_page = page;
-}
-
-/**
- * skb_frag_set_page - sets the page contained in a paged fragment of an skb
- * @skb: the buffer
- * @f: the fragment offset
- * @page: the page to set
- *
- * Sets the @f'th fragment of @skb to contain @page.
- */
-static inline void skb_frag_set_page(struct sk_buff *skb, int f,
- struct page *page)
-{
- __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
+ fragto->netmem = fragfrom->netmem;
}
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
@@ -3253,6 +3774,9 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
return __skb_put_padto(skb, len, true);
}
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
+ __must_check;
+
static inline int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
@@ -3313,7 +3837,7 @@ static inline int skb_linearize(struct sk_buff *skb)
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+ skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
}
/**
@@ -3354,7 +3878,12 @@ __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- __skb_postpull_rcsum(skb, start, len, 0);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = wsum_negate(csum_partial(start, len,
+ wsum_negate(skb->csum)));
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
}
static __always_inline void
@@ -3515,8 +4044,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
unsigned int flags, int *off, int *err);
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
- int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
@@ -3545,12 +4073,13 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
- int len, __wsum csum);
+ int len);
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
+int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -3558,16 +4087,18 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
-bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
-bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
-int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
+int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+int skb_eth_pop(struct sk_buff *skb);
+int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
+ const unsigned char *src);
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
int mac_len, bool ethernet);
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
@@ -3600,14 +4131,13 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
static inline void * __must_check
-__skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *data, int hlen, void *buffer)
+__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
+ const void *data, int hlen, void *buffer)
{
- if (hlen - offset >= len)
- return data + offset;
+ if (likely(hlen - offset >= len))
+ return (void *)data + offset;
- if (!skb ||
- skb_copy_bits(skb, offset, buffer, len) < 0)
+ if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
return NULL;
return buffer;
@@ -3620,6 +4150,14 @@ skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
skb_headlen(skb), buffer);
}
+static inline void * __must_check
+skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len)
+{
+ if (likely(skb_headlen(skb) - offset >= len))
+ return skb->data + offset;
+ return NULL;
+}
+
/**
* skb_needs_linearize - check if we need to linearize a given skb
* depending on the given device features.
@@ -3719,6 +4257,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb,
static inline void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
+ skb->mono_delivery_time = 0;
}
static inline ktime_t net_timedelta(ktime_t t)
@@ -3726,8 +4265,53 @@ static inline ktime_t net_timedelta(ktime_t t)
return ktime_sub(ktime_get_real(), t);
}
-static inline ktime_t net_invalid_timestamp(void)
+static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
+ bool mono)
{
+ skb->tstamp = kt;
+ skb->mono_delivery_time = kt && mono;
+}
+
+DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
+
+/* It is used in the ingress path to clear the delivery_time.
+ * If needed, set the skb->tstamp to the (rcv) timestamp.
+ */
+static inline void skb_clear_delivery_time(struct sk_buff *skb)
+{
+ if (skb->mono_delivery_time) {
+ skb->mono_delivery_time = 0;
+ if (static_branch_unlikely(&netstamp_needed_key))
+ skb->tstamp = ktime_get_real();
+ else
+ skb->tstamp = 0;
+ }
+}
+
+static inline void skb_clear_tstamp(struct sk_buff *skb)
+{
+ if (skb->mono_delivery_time)
+ return;
+
+ skb->tstamp = 0;
+}
+
+static inline ktime_t skb_tstamp(const struct sk_buff *skb)
+{
+ if (skb->mono_delivery_time)
+ return 0;
+
+ return skb->tstamp;
+}
+
+static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
+{
+ if (!skb->mono_delivery_time && skb->tstamp)
+ return skb->tstamp;
+
+ if (static_branch_unlikely(&netstamp_needed_key) || cond)
+ return ktime_get_real();
+
return 0;
}
@@ -3747,10 +4331,13 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
{
const void *a = skb_metadata_end(skb_a);
const void *b = skb_metadata_end(skb_b);
- /* Using more efficient varaiant than plain call to memcmp(). */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 diffs = 0;
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ BITS_PER_LONG != 64)
+ goto slow;
+
+ /* Using more efficient variant than plain call to memcmp(). */
switch (meta_len) {
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
@@ -3770,11 +4357,11 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
fallthrough;
case 4: diffs |= __it_diff(a, b, 32);
break;
+ default:
+slow:
+ return memcmp(a - meta_len, b - meta_len, meta_len);
}
return diffs;
-#else
- return memcmp(a - meta_len, b - meta_len, meta_len);
-#endif
}
static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
@@ -3835,7 +4422,7 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
void skb_complete_tx_timestamp(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps);
-void __skb_tstamp_tx(struct sk_buff *orig_skb,
+void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
struct skb_shared_hwtstamps *hwtstamps,
struct sock *sk, int tstype);
@@ -4093,7 +4680,7 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
return;
}
- if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
__skb_checksum_complete(skb);
skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
}
@@ -4125,6 +4712,7 @@ static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ skb->slow_gro |= !!nfct;
skb->_nfct = nfct;
#endif
}
@@ -4143,6 +4731,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_MPTCP)
SKB_EXT_MPTCP,
#endif
+#if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ SKB_EXT_MCTP,
+#endif
SKB_EXT_NUM, /* must be last */
};
@@ -4253,7 +4844,7 @@ static inline void nf_reset_ct(struct sk_buff *skb)
static inline void nf_reset_trace(struct sk_buff *skb)
{
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
skb->nf_trace = 0;
#endif
}
@@ -4273,7 +4864,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
dst->_nfct = src->_nfct;
nf_conntrack_get(skb_nfct(src));
#endif
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
if (copy)
dst->nf_trace = src->nf_trace;
#endif
@@ -4284,6 +4875,7 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(dst));
#endif
+ dst->slow_gro = src->slow_gro;
__nf_copy(dst, src, true);
}
@@ -4372,75 +4964,6 @@ static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
#endif
}
-/* Keeps track of mac header offset relative to skb->head.
- * It is useful for TSO of Tunneling protocol. e.g. GRE.
- * For non-tunnel skb it points to skb_mac_header() and for
- * tunnel skb it points to outer mac header.
- * Keeps track of level of encapsulation of network headers.
- */
-struct skb_gso_cb {
- union {
- int mac_offset;
- int data_offset;
- };
- int encap_level;
- __wsum csum;
- __u16 csum_start;
-};
-#define SKB_GSO_CB_OFFSET 32
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
-
-static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
-{
- return (skb_mac_header(inner_skb) - inner_skb->head) -
- SKB_GSO_CB(inner_skb)->mac_offset;
-}
-
-static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
-{
- int new_headroom, headroom;
- int ret;
-
- headroom = skb_headroom(skb);
- ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
- if (ret)
- return ret;
-
- new_headroom = skb_headroom(skb);
- SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
- return 0;
-}
-
-static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
-{
- /* Do not update partial checksums if remote checksum is enabled. */
- if (skb->remcsum_offload)
- return;
-
- SKB_GSO_CB(skb)->csum = res;
- SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
-}
-
-/* Compute the checksum for a gso segment. First compute the checksum value
- * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
- * then add in skb->csum (checksum from csum_start to end of packet).
- * skb->csum and csum_start are then updated to reflect the checksum of the
- * resultant packet starting from the transport header-- the resultant checksum
- * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
- * header.
- */
-static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
-{
- unsigned char *csum_start = skb_transport_header(skb);
- int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
- __wsum partial = SKB_GSO_CB(skb)->csum;
-
- SKB_GSO_CB(skb)->csum = res;
- SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
-
- return csum_fold(csum_partial(csum_start, plen, partial));
-}
-
static inline bool skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
@@ -4520,9 +5043,7 @@ static inline void skb_forward_csum(struct sk_buff *skb)
*/
static inline void skb_checksum_none_assert(const struct sk_buff *skb)
{
-#ifdef DEBUG
- BUG_ON(skb->ip_summed != CHECKSUM_NONE);
-#endif
+ DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE);
}
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
@@ -4573,29 +5094,76 @@ static inline __wsum lco_csum(struct sk_buff *skb)
static inline bool skb_is_redirected(const struct sk_buff *skb)
{
-#ifdef CONFIG_NET_REDIRECT
return skb->redirected;
-#else
- return false;
-#endif
}
static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
{
-#ifdef CONFIG_NET_REDIRECT
skb->redirected = 1;
+#ifdef CONFIG_NET_REDIRECT
skb->from_ingress = from_ingress;
if (skb->from_ingress)
- skb->tstamp = 0;
+ skb_clear_tstamp(skb);
#endif
}
static inline void skb_reset_redirect(struct sk_buff *skb)
{
-#ifdef CONFIG_NET_REDIRECT
skb->redirected = 0;
+}
+
+static inline void skb_set_redirected_noclear(struct sk_buff *skb,
+ bool from_ingress)
+{
+ skb->redirected = 1;
+#ifdef CONFIG_NET_REDIRECT
+ skb->from_ingress = from_ingress;
+#endif
+}
+
+static inline bool skb_csum_is_sctp(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IP_SCTP)
+ return skb->csum_not_inet;
+#else
+ return 0;
+#endif
+}
+
+static inline void skb_reset_csum_not_inet(struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+#if IS_ENABLED(CONFIG_IP_SCTP)
+ skb->csum_not_inet = 0;
#endif
}
+static inline void skb_set_kcov_handle(struct sk_buff *skb,
+ const u64 kcov_handle)
+{
+#ifdef CONFIG_KCOV
+ skb->kcov_handle = kcov_handle;
+#endif
+}
+
+static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
+{
+#ifdef CONFIG_KCOV
+ return skb->kcov_handle;
+#else
+ return 0;
+#endif
+}
+
+static inline void skb_mark_for_recycle(struct sk_buff *skb)
+{
+#ifdef CONFIG_PAGE_POOL
+ skb->pp_recycle = 1;
+#endif
+}
+
+ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
+ ssize_t maxsize, gfp_t gfp);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 1e9ed840b9fc..e65ec3fd2799 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -29,7 +29,7 @@ struct sk_msg_sg {
u32 end;
u32 size;
u32 copybreak;
- unsigned long copy;
+ DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
/* The extra two elements:
* 1) used for chaining the front and sections when the list becomes
* partitioned (e.g. end < start). The crypto APIs require the
@@ -38,7 +38,6 @@ struct sk_msg_sg {
*/
struct scatterlist data[MAX_MSG_FRAGS + 2];
};
-static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
struct sk_msg {
@@ -56,12 +55,14 @@ struct sk_msg {
struct sk_psock_progs {
struct bpf_prog *msg_parser;
- struct bpf_prog *skb_parser;
+ struct bpf_prog *stream_parser;
+ struct bpf_prog *stream_verdict;
struct bpf_prog *skb_verdict;
};
enum sk_psock_state_bits {
SK_PSOCK_TX_ENABLED,
+ SK_PSOCK_RX_STRP_ENABLED,
};
struct sk_psock_link {
@@ -70,14 +71,7 @@ struct sk_psock_link {
void *link_raw;
};
-struct sk_psock_parser {
- struct strparser strp;
- bool enabled;
- void (*saved_data_ready)(struct sock *sk);
-};
-
struct sk_psock_work_state {
- struct sk_buff *skb;
u32 len;
u32 off;
};
@@ -88,25 +82,37 @@ struct sk_psock {
u32 apply_bytes;
u32 cork_bytes;
u32 eval;
+ bool redir_ingress; /* undefined if sk_redir is null */
struct sk_msg *cork;
struct sk_psock_progs progs;
- struct sk_psock_parser parser;
+#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
+ struct strparser strp;
+#endif
struct sk_buff_head ingress_skb;
struct list_head ingress_msg;
+ spinlock_t ingress_lock;
unsigned long state;
struct list_head link;
spinlock_t link_lock;
refcount_t refcnt;
void (*saved_unhash)(struct sock *sk);
+ void (*saved_destroy)(struct sock *sk);
void (*saved_close)(struct sock *sk, long timeout);
void (*saved_write_space)(struct sock *sk);
+ void (*saved_data_ready)(struct sock *sk);
+ /* psock_update_sk_prot may be called with restore=false many times
+ * so the handler must be safe for this case. It will be called
+ * exactly once with restore=true when the psock is being destroyed
+ * and psock refcnt is zero, but before an RCU grace period.
+ */
+ int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
+ bool restore);
struct proto *sk_proto;
+ struct mutex work_mutex;
struct sk_psock_work_state work_state;
- struct work_struct work;
- union {
- struct rcu_head rcu;
- struct work_struct gc;
- };
+ struct delayed_work work;
+ struct sock *sk_pair;
+ struct rcu_work rwork;
};
int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
@@ -127,6 +133,9 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
+int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ int len, int flags);
+bool sk_msg_is_readable(struct sock *sk);
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
{
@@ -169,11 +178,6 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end)
#define sk_msg_iter_next(msg, which) \
sk_msg_iter_var_next(msg->sg.which)
-static inline void sk_msg_clear_meta(struct sk_msg *msg)
-{
- memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
-}
-
static inline void sk_msg_init(struct sk_msg *msg)
{
BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
@@ -232,7 +236,7 @@ static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
{
struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
- if (test_bit(msg->sg.start, &msg->sg.copy)) {
+ if (test_bit(msg->sg.start, msg->sg.copy)) {
msg->data = NULL;
msg->data_end = NULL;
} else {
@@ -251,7 +255,7 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
sg_set_page(sge, page, len, offset);
sg_unmark_end(sge);
- __set_bit(msg->sg.end, &msg->sg.copy);
+ __set_bit(msg->sg.end, msg->sg.copy);
msg->sg.size += len;
sk_msg_iter_next(msg, end);
}
@@ -260,9 +264,9 @@ static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
{
do {
if (copy_state)
- __set_bit(i, &msg->sg.copy);
+ __set_bit(i, msg->sg.copy);
else
- __clear_bit(i, &msg->sg.copy);
+ __clear_bit(i, msg->sg.copy);
sk_msg_iter_var_next(i);
if (i == msg->sg.end)
break;
@@ -281,13 +285,81 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
static inline struct sk_psock *sk_psock(const struct sock *sk)
{
- return rcu_dereference_sk_user_data(sk);
+ return __rcu_dereference_sk_user_data_with_flags(sk,
+ SK_USER_DATA_PSOCK);
+}
+
+static inline void sk_psock_set_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ return test_bit(bit, &psock->state);
+}
+
+static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ kfree_skb(skb);
}
static inline void sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
- list_add_tail(&msg->list, &psock->ingress_msg);
+ spin_lock_bh(&psock->ingress_lock);
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+ list_add_tail(&msg->list, &psock->ingress_msg);
+ else {
+ sk_msg_free(psock->sk, msg);
+ kfree(msg);
+ }
+ spin_unlock_bh(&psock->ingress_lock);
+}
+
+static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
+{
+ struct sk_msg *msg;
+
+ spin_lock_bh(&psock->ingress_lock);
+ msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+ if (msg)
+ list_del(&msg->list);
+ spin_unlock_bh(&psock->ingress_lock);
+ return msg;
+}
+
+static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
+{
+ struct sk_msg *msg;
+
+ spin_lock_bh(&psock->ingress_lock);
+ msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+ spin_unlock_bh(&psock->ingress_lock);
+ return msg;
+}
+
+static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
+ struct sk_msg *msg)
+{
+ struct sk_msg *ret;
+
+ spin_lock_bh(&psock->ingress_lock);
+ if (list_is_last(&msg->list, &psock->ingress_msg))
+ ret = NULL;
+ else
+ ret = list_next_entry(msg, list);
+ spin_unlock_bh(&psock->ingress_lock);
+ return ret;
}
static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
@@ -295,19 +367,45 @@ static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
return psock ? list_empty(&psock->ingress_msg) : true;
}
+static inline void kfree_sk_msg(struct sk_msg *msg)
+{
+ if (msg->skb)
+ consume_skb(msg->skb);
+ kfree(msg);
+}
+
static inline void sk_psock_report_error(struct sk_psock *psock, int err)
{
struct sock *sk = psock->sk;
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
struct sk_psock *sk_psock_init(struct sock *sk, int node);
+void sk_psock_stop(struct sk_psock *psock);
+#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
+#else
+static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
+{
+}
+
+static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
+{
+}
+#endif
+
+void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
+void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg);
@@ -325,8 +423,6 @@ static inline void sk_psock_free_link(struct sk_psock_link *link)
struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
-void __sk_psock_purge_ingress_msg(struct sk_psock *psock);
-
static inline void sk_psock_cork_free(struct sk_psock *psock)
{
if (psock->cork) {
@@ -336,60 +432,11 @@ static inline void sk_psock_cork_free(struct sk_psock *psock)
}
}
-static inline void sk_psock_update_proto(struct sock *sk,
- struct sk_psock *psock,
- struct proto *ops)
-{
- /* Initialize saved callbacks and original proto only once, since this
- * function may be called multiple times for a psock, e.g. when
- * psock->progs.msg_parser is updated.
- *
- * Since we've not installed the new proto, psock is not yet in use and
- * we can initialize it without synchronization.
- */
- if (!psock->sk_proto) {
- struct proto *orig = READ_ONCE(sk->sk_prot);
-
- psock->saved_unhash = orig->unhash;
- psock->saved_close = orig->close;
- psock->saved_write_space = sk->sk_write_space;
-
- psock->sk_proto = orig;
- }
-
- /* Pairs with lockless read in sk_clone_lock() */
- WRITE_ONCE(sk->sk_prot, ops);
-}
-
static inline void sk_psock_restore_proto(struct sock *sk,
struct sk_psock *psock)
{
- sk->sk_prot->unhash = psock->saved_unhash;
- if (inet_csk_has_ulp(sk)) {
- tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
- } else {
- sk->sk_write_space = psock->saved_write_space;
- /* Pairs with lockless read in sk_clone_lock() */
- WRITE_ONCE(sk->sk_prot, psock->sk_proto);
- }
-}
-
-static inline void sk_psock_set_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- set_bit(bit, &psock->state);
-}
-
-static inline void sk_psock_clear_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- clear_bit(bit, &psock->state);
-}
-
-static inline bool sk_psock_test_state(const struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- return test_bit(bit, &psock->state);
+ if (psock->psock_update_sk_prot)
+ psock->psock_update_sk_prot(sk, psock, true);
}
static inline struct sk_psock *sk_psock_get(struct sock *sk)
@@ -404,8 +451,6 @@ static inline struct sk_psock *sk_psock_get(struct sock *sk)
return psock;
}
-void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
-void sk_psock_destroy(struct rcu_head *rcu);
void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
@@ -416,8 +461,8 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
{
- if (psock->parser.enabled)
- psock->parser.saved_data_ready(sk);
+ if (psock->saved_data_ready)
+ psock->saved_data_ready(sk);
else
sk->sk_data_ready(sk);
}
@@ -446,7 +491,8 @@ static inline int psock_replace_prog(struct bpf_prog **pprog,
static inline void psock_progs_drop(struct sk_psock_progs *progs)
{
psock_set_prog(&progs->msg_parser, NULL);
- psock_set_prog(&progs->skb_parser, NULL);
+ psock_set_prog(&progs->stream_parser, NULL);
+ psock_set_prog(&progs->stream_verdict, NULL);
psock_set_prog(&progs->skb_verdict, NULL);
}
@@ -456,6 +502,58 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
{
if (!psock)
return false;
- return psock->parser.enabled;
+ return !!psock->saved_data_ready;
+}
+
+#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
+
+#define BPF_F_STRPARSER (1UL << 1)
+
+/* We only have two bits so far. */
+#define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
+
+static inline bool skb_bpf_strparser(const struct sk_buff *skb)
+{
+ unsigned long sk_redir = skb->_sk_redir;
+
+ return sk_redir & BPF_F_STRPARSER;
+}
+
+static inline void skb_bpf_set_strparser(struct sk_buff *skb)
+{
+ skb->_sk_redir |= BPF_F_STRPARSER;
+}
+
+static inline bool skb_bpf_ingress(const struct sk_buff *skb)
+{
+ unsigned long sk_redir = skb->_sk_redir;
+
+ return sk_redir & BPF_F_INGRESS;
+}
+
+static inline void skb_bpf_set_ingress(struct sk_buff *skb)
+{
+ skb->_sk_redir |= BPF_F_INGRESS;
+}
+
+static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
+ bool ingress)
+{
+ skb->_sk_redir = (unsigned long)sk_redir;
+ if (ingress)
+ skb->_sk_redir |= BPF_F_INGRESS;
+}
+
+static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
+{
+ unsigned long sk_redir = skb->_sk_redir;
+
+ return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
+}
+
+static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
+{
+ skb->_sk_redir = 0;
}
+#endif /* CONFIG_NET_SOCK_MSG */
#endif /* _LINUX_SKMSG_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 24df2393ec03..e53cbfa18325 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -12,33 +12,78 @@
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
+#include <linux/cache.h>
#include <linux/gfp.h>
#include <linux/overflow.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
+#include <linux/cleanup.h>
+#include <linux/hash.h>
+
+enum _slab_flag_bits {
+ _SLAB_CONSISTENCY_CHECKS,
+ _SLAB_RED_ZONE,
+ _SLAB_POISON,
+ _SLAB_KMALLOC,
+ _SLAB_HWCACHE_ALIGN,
+ _SLAB_CACHE_DMA,
+ _SLAB_CACHE_DMA32,
+ _SLAB_STORE_USER,
+ _SLAB_PANIC,
+ _SLAB_TYPESAFE_BY_RCU,
+ _SLAB_TRACE,
+#ifdef CONFIG_DEBUG_OBJECTS
+ _SLAB_DEBUG_OBJECTS,
+#endif
+ _SLAB_NOLEAKTRACE,
+ _SLAB_NO_MERGE,
+#ifdef CONFIG_FAILSLAB
+ _SLAB_FAILSLAB,
+#endif
+#ifdef CONFIG_MEMCG_KMEM
+ _SLAB_ACCOUNT,
+#endif
+#ifdef CONFIG_KASAN_GENERIC
+ _SLAB_KASAN,
+#endif
+ _SLAB_NO_USER_FLAGS,
+#ifdef CONFIG_KFENCE
+ _SLAB_SKIP_KFENCE,
+#endif
+#ifndef CONFIG_SLUB_TINY
+ _SLAB_RECLAIM_ACCOUNT,
+#endif
+ _SLAB_OBJECT_POISON,
+ _SLAB_CMPXCHG_DOUBLE,
+ _SLAB_FLAGS_LAST_BIT
+};
+#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
+#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U))
/*
* Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
+ * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/
/* DEBUG: Perform (expensive) checks on alloc/free */
-#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
+#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
/* DEBUG: Red zone objs in a cache */
-#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
+#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE)
/* DEBUG: Poison objects */
-#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
+#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
+/* Indicate a kmalloc slab */
+#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
/* Align objs on cache lines */
-#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
+#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
/* Use GFP_DMA memory */
-#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
+#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
/* Use GFP_DMA32 memory */
-#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
+#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
/* DEBUG: Store the last owner for bug hunting */
-#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
+#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
/* Panic if kmem_cache_create() fails */
-#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
+#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
/*
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
@@ -51,16 +96,18 @@
* stays valid, the trick to using this is relying on an independent
* object validation pass. Something like:
*
- * rcu_read_lock()
- * again:
+ * begin:
+ * rcu_read_lock();
* obj = lockless_lookup(key);
* if (obj) {
* if (!try_get_ref(obj)) // might fail for free objects
- * goto again;
+ * rcu_read_unlock();
+ * goto begin;
*
* if (obj->key != key) { // not the object we expected
* put_ref(obj);
- * goto again;
+ * rcu_read_unlock();
+ * goto begin;
* }
* }
* rcu_read_unlock();
@@ -74,52 +121,87 @@
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*
+ * Note that it is not possible to acquire a lock within a structure
+ * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
+ * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages
+ * are not zeroed before being given to the slab, which means that any
+ * locks must be initialized after each and every kmem_struct_alloc().
+ * Alternatively, make the ctor passed to kmem_cache_create() initialize
+ * the locks at page-allocation time, as is done in __i915_request_ctor(),
+ * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers
+ * to safely acquire those ctor-initialized locks under rcu_read_lock()
+ * protection.
+ *
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
/* Defer freeing slabs to RCU */
-#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
-/* Spread some memory over cpuset */
-#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
+#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
/* Trace allocations and frees */
-#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
+#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
-# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
+# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
#else
-# define SLAB_DEBUG_OBJECTS 0
+# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED
#endif
/* Avoid kmemleak tracing */
-#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
+#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
+
+/*
+ * Prevent merging with compatible kmem caches. This flag should be used
+ * cautiously. Valid use cases:
+ *
+ * - caches created for self-tests (e.g. kunit)
+ * - general caches created and used by a subsystem, only when a
+ * (subsystem-specific) debug option is enabled
+ * - performance critical caches, should be very rare and consulted with slab
+ * maintainers, and not used together with CONFIG_SLUB_TINY
+ */
+#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE)
/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
-# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
+# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB)
#else
-# define SLAB_FAILSLAB 0
+# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
#endif
/* Account to memcg */
#ifdef CONFIG_MEMCG_KMEM
-# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
+# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
#else
-# define SLAB_ACCOUNT 0
+# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
#endif
-#ifdef CONFIG_KASAN
-#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
+#ifdef CONFIG_KASAN_GENERIC
+#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN)
#else
-#define SLAB_KASAN 0
+#define SLAB_KASAN __SLAB_FLAG_UNUSED
+#endif
+
+/*
+ * Ignore user specified debugging flags.
+ * Intended for caches created for self-tests so they have only flags
+ * specified in the code and other flags are ignored.
+ */
+#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
+
+#ifdef CONFIG_KFENCE
+#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
+#else
+#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED
#endif
/* The following flags affect the page allocator grouping pages by mobility */
/* Objects are reclaimable */
-#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
+#ifndef CONFIG_SLUB_TINY
+#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
+#else
+#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED
+#endif
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
-/* Slab deactivation flag */
-#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
-
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
@@ -135,15 +217,13 @@
#include <linux/kasan.h>
+struct list_lru;
struct mem_cgroup;
/*
* struct kmem_cache related prototypes
*/
-void __init kmem_cache_init(void);
bool slab_is_available(void);
-extern bool usercopy_fallback;
-
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
void (*ctor)(void *));
@@ -152,8 +232,8 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
-void kmem_cache_destroy(struct kmem_cache *);
-int kmem_cache_shrink(struct kmem_cache *);
+void kmem_cache_destroy(struct kmem_cache *s);
+int kmem_cache_shrink(struct kmem_cache *s);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -181,33 +261,49 @@ int kmem_cache_shrink(struct kmem_cache *);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc(const void *, size_t, gfp_t);
-void kfree(const void *);
-void kfree_sensitive(const void *);
-size_t __ksize(const void *);
-size_t ksize(const void *);
+void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
+void kfree(const void *objp);
+void kfree_sensitive(const void *objp);
+size_t __ksize(const void *objp);
-#define kzfree(x) kfree_sensitive(x) /* For backward compatibility */
+DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
-#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
-void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
- bool to_user);
+/**
+ * ksize - Report actual allocation size of associated object
+ *
+ * @objp: Pointer returned from a prior kmalloc()-family allocation.
+ *
+ * This should not be used for writing beyond the originally requested
+ * allocation size. Either use krealloc() or round up the allocation size
+ * with kmalloc_size_roundup() prior to allocation. If this is used to
+ * access beyond the originally requested allocation size, UBSAN_BOUNDS
+ * and/or FORTIFY_SOURCE may trip, since they only know about the
+ * originally allocated size via the __alloc_size attribute.
+ */
+size_t ksize(const void *objp);
+
+#ifdef CONFIG_PRINTK
+bool kmem_dump_obj(void *object);
#else
-static inline void __check_heap_object(const void *ptr, unsigned long n,
- struct page *page, bool to_user) { }
+static inline bool kmem_dump_obj(void *object) { return false; }
#endif
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
- * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
+ * Setting ARCH_DMA_MINALIGN in arch headers allows that.
*/
-#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
+#ifdef ARCH_HAS_DMA_MINALIGN
+#if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
-#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
-#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
-#else
+#endif
+#endif
+
+#ifndef ARCH_KMALLOC_MINALIGN
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#elif ARCH_KMALLOC_MINALIGN > 8
+#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
+#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
#endif
/*
@@ -220,9 +316,21 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
#endif
/*
- * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
- * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
- * aligned pointers.
+ * Arches can define this function if they want to decide the minimum slab
+ * alignment at runtime. The value returned by the function must be a power
+ * of two and >= ARCH_SLAB_MINALIGN.
+ */
+#ifndef arch_slab_minalign
+static inline unsigned int arch_slab_minalign(void)
+{
+ return ARCH_SLAB_MINALIGN;
+}
+#endif
+
+/*
+ * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
+ * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
+ * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
*/
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
@@ -232,54 +340,21 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
* Kmalloc array related definitions
*/
-#ifdef CONFIG_SLAB
-/*
- * The largest kmalloc size supported by the SLAB allocators is
- * 32 megabyte (2^25) or the maximum allocatable page order if that is
- * less than 32 MB.
- *
- * WARNING: Its not easy to increase this value since the allocators have
- * to do various tricks to work around compiler limitations in order to
- * ensure proper constant folding.
- */
-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
- (MAX_ORDER + PAGE_SHIFT - 1) : 25)
-#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW 5
-#endif
-#endif
-
-#ifdef CONFIG_SLUB
/*
* SLUB directly allocates requests fitting in to an order-1 page
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW 3
-#endif
-#endif
-
-#ifdef CONFIG_SLOB
-/*
- * SLOB passes all requests larger than one page to the page allocator.
- * No kmalloc array is necessary since objects of different sizes can
- * be allocated from the same page.
- */
-#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
+#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
-#endif
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
-/* Maximum order allocatable via the slab allocagtor */
+/* Maximum order allocatable via the slab allocator */
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
/*
@@ -300,41 +375,85 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
(KMALLOC_MIN_SIZE) : 16)
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+#define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies
+#else
+#define RANDOM_KMALLOC_CACHES_NR 0
+#endif
+
/*
* Whenever changing this, take care of that kmalloc_type() and
* create_kmalloc_caches() still work as intended.
+ *
+ * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
+ * is for accounted but unreclaimable and non-dma objects. All the other
+ * kmem caches can have both accounted and unaccounted objects.
*/
enum kmalloc_cache_type {
KMALLOC_NORMAL = 0,
+#ifndef CONFIG_ZONE_DMA
+ KMALLOC_DMA = KMALLOC_NORMAL,
+#endif
+#ifndef CONFIG_MEMCG_KMEM
+ KMALLOC_CGROUP = KMALLOC_NORMAL,
+#endif
+ KMALLOC_RANDOM_START = KMALLOC_NORMAL,
+ KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
+#ifdef CONFIG_SLUB_TINY
+ KMALLOC_RECLAIM = KMALLOC_NORMAL,
+#else
KMALLOC_RECLAIM,
+#endif
#ifdef CONFIG_ZONE_DMA
KMALLOC_DMA,
#endif
+#ifdef CONFIG_MEMCG_KMEM
+ KMALLOC_CGROUP,
+#endif
NR_KMALLOC_TYPES
};
-#ifndef CONFIG_SLOB
extern struct kmem_cache *
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
-static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
+/*
+ * Define gfp bits that should not be set for KMALLOC_NORMAL.
+ */
+#define KMALLOC_NOT_NORMAL_BITS \
+ (__GFP_RECLAIMABLE | \
+ (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
+ (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
+
+extern unsigned long random_kmalloc_seed;
+
+static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
{
-#ifdef CONFIG_ZONE_DMA
/*
* The most common case is KMALLOC_NORMAL, so test for it
- * with a single branch for both flags.
+ * with a single branch for all the relevant flags.
*/
- if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
+ if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+ /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
+ return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
+ ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
+#else
return KMALLOC_NORMAL;
+#endif
/*
- * At least one of the flags has to be set. If both are, __GFP_DMA
- * is more important.
+ * At least one of the flags has to be set. Their priorities in
+ * decreasing order are:
+ * 1) __GFP_DMA
+ * 2) __GFP_RECLAIMABLE
+ * 3) __GFP_ACCOUNT
*/
- return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
-#else
- return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
-#endif
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
+ return KMALLOC_DMA;
+ if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
+ return KMALLOC_RECLAIM;
+ else
+ return KMALLOC_CGROUP;
}
/*
@@ -344,8 +463,14 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
* 1 = 65 .. 96 bytes
* 2 = 129 .. 192 bytes
* n = 2^(n-1)+1 .. 2^n
+ *
+ * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
+ * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
+ * Callers where !size_is_constant should only be test modules, where runtime
+ * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
*/
-static __always_inline unsigned int kmalloc_index(size_t size)
+static __always_inline unsigned int __kmalloc_index(size_t size,
+ bool size_is_constant)
{
if (!size)
return 0;
@@ -376,21 +501,34 @@ static __always_inline unsigned int kmalloc_index(size_t size)
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
- if (size <= 4 * 1024 * 1024) return 22;
- if (size <= 8 * 1024 * 1024) return 23;
- if (size <= 16 * 1024 * 1024) return 24;
- if (size <= 32 * 1024 * 1024) return 25;
- if (size <= 64 * 1024 * 1024) return 26;
- BUG();
+
+ if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
+ BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
+ else
+ BUG();
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
-#endif /* !CONFIG_SLOB */
+static_assert(PAGE_SHIFT <= 20);
+#define kmalloc_index(s) __kmalloc_index(s, true)
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
-void kmem_cache_free(struct kmem_cache *, void *);
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
+
+/**
+ * kmem_cache_alloc - Allocate an object
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache.
+ * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
+ *
+ * Return: pointer to the new object or %NULL in case of error
+ */
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
+void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+ gfp_t gfpflags) __assume_slab_alignment __malloc;
+void kmem_cache_free(struct kmem_cache *s, void *objp);
/*
* Bulk allocation and freeing operations. These are accelerated in an
@@ -399,94 +537,35 @@ void kmem_cache_free(struct kmem_cache *, void *);
*
* Note that interrupts must be enabled when calling these functions.
*/
-void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
-/*
- * Caller must not use kfree_bulk() on memory not originally allocated
- * by kmalloc(), because the SLOB allocator cannot handle this.
- */
static __always_inline void kfree_bulk(size_t size, void **p)
{
kmem_cache_free_bulk(NULL, size, p);
}
-#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
-#else
-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc(size, flags);
-}
-
-static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
-{
- return kmem_cache_alloc(s, flags);
-}
-#endif
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
-
-#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment __malloc;
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- return kmem_cache_alloc_trace(s, gfpflags, size);
-}
-#endif /* CONFIG_NUMA */
-
-#else /* CONFIG_TRACING */
-static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
-{
- void *ret = kmem_cache_alloc(s, flags);
-
- ret = kasan_kmalloc(s, ret, size, flags);
- return ret;
-}
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
+ __alloc_size(1);
+void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
+ __malloc;
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = kmem_cache_alloc_node(s, gfpflags, node);
+void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_kmalloc_alignment __alloc_size(3);
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-#endif /* CONFIG_TRACING */
+void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size) __assume_kmalloc_alignment
+ __alloc_size(4);
+void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
+ __alloc_size(1);
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
-
-#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
-#else
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
+void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
+ __alloc_size(1);
/**
- * kmalloc - allocate memory
+ * kmalloc - allocate kernel memory
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
+ * @flags: describe the allocation context
*
* kmalloc is the normal method of allocating memory
* for objects smaller than page size in the kernel.
@@ -496,7 +575,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* to be at least to the size.
*
* The @flags argument may be one of the GFP flags defined at
- * include/linux/gfp.h and described at
+ * include/linux/gfp_types.h and described at
* :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
*
* The recommended usage of the @flags is described at
@@ -513,12 +592,12 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* %GFP_ATOMIC
* Allocation will not sleep. May use emergency pools.
*
- * %GFP_HIGHUSER
- * Allocate memory from high memory on behalf of user.
- *
* Also it is possible to set different flags by OR'ing
* in one or more of the following additional @flags:
*
+ * %__GFP_ZERO
+ * Zero the allocated memory before returning. Also see kzalloc().
+ *
* %__GFP_HIGH
* This allocation has high priority and may use emergency pools.
*
@@ -537,43 +616,35 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Try really hard to succeed the allocation but fail
* eventually.
*/
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
- if (__builtin_constant_p(size)) {
-#ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) && size) {
unsigned int index;
-#endif
+
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);
-#ifndef CONFIG_SLOB
- index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
- return kmem_cache_alloc_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
+ index = kmalloc_index(size);
+ return kmalloc_trace(
+ kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, size);
-#endif
}
return __kmalloc(size, flags);
}
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
-#ifndef CONFIG_SLOB
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE) {
- unsigned int i = kmalloc_index(size);
+ if (__builtin_constant_p(size) && size) {
+ unsigned int index;
- if (!i)
- return ZERO_SIZE_PTR;
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
- return kmem_cache_alloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][i],
- flags, node, size);
+ index = kmalloc_index(size);
+ return kmalloc_node_trace(
+ kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
+ flags, node, size);
}
-#endif
return __kmalloc_node(size, flags, node);
}
@@ -583,7 +654,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
@@ -595,16 +666,42 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
}
/**
+ * krealloc_array - reallocate memory for an array.
+ * @p: pointer to the memory chunk to reallocate
+ * @new_n: new number of elements to alloc
+ * @new_size: new size of a single member of the array
+ * @flags: the type of memory to allocate (see kmalloc)
+ */
+static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
+ size_t new_n,
+ size_t new_size,
+ gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return krealloc(p, bytes, flags);
+}
+
+/**
* kcalloc - allocate memory for an array. The memory is set to zero.
* @n: number of elements.
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
{
return kmalloc_array(n, size, flags | __GFP_ZERO);
}
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+ unsigned long caller) __alloc_size(1);
+#define kmalloc_node_track_caller(size, flags, node) \
+ __kmalloc_node_track_caller(size, flags, node, \
+ _RET_IP_)
+
/*
* kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead
@@ -613,12 +710,12 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
- __kmalloc_track_caller(size, flags, _RET_IP_)
+ __kmalloc_node_track_caller(size, flags, \
+ NUMA_NO_NODE, _RET_IP_)
-static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
- int node)
+static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+ int node)
{
size_t bytes;
@@ -629,25 +726,11 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
return __kmalloc_node(bytes, flags, node);
}
-static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
}
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
-#define kmalloc_node_track_caller(size, flags, node) \
- __kmalloc_node_track_caller(size, flags, node, \
- _RET_IP_)
-
-#else /* CONFIG_NUMA */
-
-#define kmalloc_node_track_caller(size, flags, node) \
- kmalloc_track_caller(size, flags)
-
-#endif /* CONFIG_NUMA */
-
/*
* Shortcuts
*/
@@ -661,7 +744,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kzalloc(size_t size, gfp_t flags)
+static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags | __GFP_ZERO);
}
@@ -672,20 +755,65 @@ static inline void *kzalloc(size_t size, gfp_t flags)
* @flags: the type of memory to allocate (see kmalloc).
* @node: memory node from which to allocate
*/
-static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
+static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
+static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc_node(size, flags, NUMA_NO_NODE);
+}
+static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kvmalloc_node(size, flags | __GFP_ZERO, node);
+}
+static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ return kvmalloc(bytes, flags);
+}
+
+static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
+{
+ return kvmalloc_array(n, size, flags | __GFP_ZERO);
+}
+
+extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+ __realloc_size(3);
+extern void kvfree(const void *addr);
+DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
+
+extern void kvfree_sensitive(const void *addr, size_t len);
+
unsigned int kmem_cache_size(struct kmem_cache *s);
-void __init kmem_cache_init_late(void);
-#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
-int slab_prepare_cpu(unsigned int cpu);
-int slab_dead_cpu(unsigned int cpu);
-#else
-#define slab_prepare_cpu NULL
-#define slab_dead_cpu NULL
-#endif
+/**
+ * kmalloc_size_roundup - Report allocation bucket size for the given size
+ *
+ * @size: Number of bytes to round up from.
+ *
+ * This returns the number of bytes that would be available in a kmalloc()
+ * allocation of @size bytes. For example, a 126 byte request would be
+ * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
+ * for the general-purpose kmalloc()-based allocations, and is not for the
+ * pre-sized kmem_cache_alloc()-based allocations.)
+ *
+ * Use this to kmalloc() the full bucket size ahead of time instead of using
+ * ksize() to query the size after an allocation.
+ */
+size_t kmalloc_size_roundup(size_t size);
+
+void __init kmem_cache_init_late(void);
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
deleted file mode 100644
index 9eb430c163c2..000000000000
--- a/include/linux/slab_def.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLAB_DEF_H
-#define _LINUX_SLAB_DEF_H
-
-#include <linux/reciprocal_div.h>
-
-/*
- * Definitions unique to the original Linux SLAB allocator.
- */
-
-struct kmem_cache {
- struct array_cache __percpu *cpu_cache;
-
-/* 1) Cache tunables. Protected by slab_mutex */
- unsigned int batchcount;
- unsigned int limit;
- unsigned int shared;
-
- unsigned int size;
- struct reciprocal_value reciprocal_buffer_size;
-/* 2) touched by every alloc & free from the backend */
-
- slab_flags_t flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
-
-/* 3) cache_grow/shrink */
- /* order of pgs per slab (2^n) */
- unsigned int gfporder;
-
- /* force GFP flags, e.g. GFP_DMA */
- gfp_t allocflags;
-
- size_t colour; /* cache colouring range */
- unsigned int colour_off; /* colour offset */
- struct kmem_cache *freelist_cache;
- unsigned int freelist_size;
-
- /* constructor func */
- void (*ctor)(void *obj);
-
-/* 4) cache creation/removal */
- const char *name;
- struct list_head list;
- int refcount;
- int object_size;
- int align;
-
-/* 5) statistics */
-#ifdef CONFIG_DEBUG_SLAB
- unsigned long num_active;
- unsigned long num_allocations;
- unsigned long high_mark;
- unsigned long grown;
- unsigned long reaped;
- unsigned long errors;
- unsigned long max_freeable;
- unsigned long node_allocs;
- unsigned long node_frees;
- unsigned long node_overflow;
- atomic_t allochit;
- atomic_t allocmiss;
- atomic_t freehit;
- atomic_t freemiss;
-
- /*
- * If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. 'size' contains the total
- * object size including these internal fields, while 'obj_offset'
- * and 'object_size' contain the offset to the user object and its
- * size.
- */
- int obj_offset;
-#endif /* CONFIG_DEBUG_SLAB */
-
-#ifdef CONFIG_KASAN
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
- void *x)
-{
- void *object = x - (x - page->s_mem) % cache->size;
- void *last_object = page->s_mem + (cache->num - 1) * cache->size;
-
- if (unlikely(object > last_object))
- return last_object;
- else
- return object;
-}
-
-/*
- * We want to avoid an expensive divide : (offset / cache->size)
- * Using the fact that size is a constant for a particular cache,
- * we can replace (offset / cache->size) by
- * reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct page *page, void *obj)
-{
- u32 offset = (obj - page->s_mem);
- return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-}
-
-static inline int objs_per_slab_page(const struct kmem_cache *cache,
- const struct page *page)
-{
- return cache->num;
-}
-
-#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h
index 12c9719b2a55..3042385b7b40 100644
--- a/include/linux/slimbus.h
+++ b/include/linux/slimbus.h
@@ -10,7 +10,7 @@
#include <linux/completion.h>
#include <linux/mod_devicetable.h>
-extern struct bus_type slimbus_bus;
+extern const struct bus_type slimbus_bus;
/**
* struct slim_eaddr - Enumeration address for a SLIMbus device
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
deleted file mode 100644
index 1be0ed5befa1..000000000000
--- a/include/linux/slub_def.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLUB_DEF_H
-#define _LINUX_SLUB_DEF_H
-
-/*
- * SLUB : A Slab allocator without object queues.
- *
- * (C) 2007 SGI, Christoph Lameter
- */
-#include <linux/kobject.h>
-#include <linux/reciprocal_div.h>
-
-enum stat_item {
- ALLOC_FASTPATH, /* Allocation from cpu slab */
- ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slab */
- FREE_SLOWPATH, /* Freeing not to cpu slab */
- FREE_FROZEN, /* Freeing to frozen slab */
- FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
- FREE_REMOVE_PARTIAL, /* Freeing removes last object */
- ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
- ALLOC_SLAB, /* Cpu slab acquired from page allocator */
- ALLOC_REFILL, /* Refill cpu slab from slab freelist */
- ALLOC_NODE_MISMATCH, /* Switching cpu slab */
- FREE_SLAB, /* Slab freed to the page allocator */
- CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
- DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
- DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
- DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
- DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
- DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
- DEACTIVATE_BYPASS, /* Implicit deactivation */
- ORDER_FALLBACK, /* Number of times fallback was necessary */
- CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
- CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
- CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
- CPU_PARTIAL_FREE, /* Refill cpu partial on free */
- CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
- CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
- NR_SLUB_STAT_ITEMS };
-
-struct kmem_cache_cpu {
- void **freelist; /* Pointer to next available object */
- unsigned long tid; /* Globally unique transaction id */
- struct page *page; /* The slab from which we are allocating */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- struct page *partial; /* Partially allocated frozen slabs */
-#endif
-#ifdef CONFIG_SLUB_STATS
- unsigned stat[NR_SLUB_STAT_ITEMS];
-#endif
-};
-
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_percpu_partial(c) ((c)->partial)
-
-#define slub_set_percpu_partial(c, p) \
-({ \
- slub_percpu_partial(c) = (p)->next; \
-})
-
-#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
-#else
-#define slub_percpu_partial(c) NULL
-
-#define slub_set_percpu_partial(c, p)
-
-#define slub_percpu_partial_read_once(c) NULL
-#endif // CONFIG_SLUB_CPU_PARTIAL
-
-/*
- * Word size structure that can be atomically updated or read and that
- * contains both the order and the number of objects that a slab of the
- * given order would contain.
- */
-struct kmem_cache_order_objects {
- unsigned int x;
-};
-
-/*
- * Slab cache management.
- */
-struct kmem_cache {
- struct kmem_cache_cpu __percpu *cpu_slab;
- /* Used for retrieving partial slabs, etc. */
- slab_flags_t flags;
- unsigned long min_partial;
- unsigned int size; /* The size of an object including metadata */
- unsigned int object_size;/* The size of an object without metadata */
- struct reciprocal_value reciprocal_size;
- unsigned int offset; /* Free pointer offset */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- /* Number of per cpu partial objects to keep around */
- unsigned int cpu_partial;
-#endif
- struct kmem_cache_order_objects oo;
-
- /* Allocation and freeing of slabs */
- struct kmem_cache_order_objects max;
- struct kmem_cache_order_objects min;
- gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
- unsigned int inuse; /* Offset to metadata */
- unsigned int align; /* Alignment */
- unsigned int red_left_pad; /* Left redzone padding size */
- const char *name; /* Name (only for display!) */
- struct list_head list; /* List of slab caches */
-#ifdef CONFIG_SYSFS
- struct kobject kobj; /* For sysfs */
-#endif
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- unsigned long random;
-#endif
-
-#ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
- */
- unsigned int remote_node_defrag_ratio;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_KASAN
- struct kasan_cache kasan_info;
-#endif
-
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_cpu_partial(s) ((s)->cpu_partial)
-#define slub_set_cpu_partial(s, n) \
-({ \
- slub_cpu_partial(s) = (n); \
-})
-#else
-#define slub_cpu_partial(s) (0)
-#define slub_set_cpu_partial(s, n)
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
-
-#ifdef CONFIG_SYSFS
-#define SLAB_SUPPORTS_SYSFS
-void sysfs_slab_unlink(struct kmem_cache *);
-void sysfs_slab_release(struct kmem_cache *);
-#else
-static inline void sysfs_slab_unlink(struct kmem_cache *s)
-{
-}
-static inline void sysfs_slab_release(struct kmem_cache *s)
-{
-}
-#endif
-
-void object_err(struct kmem_cache *s, struct page *page,
- u8 *object, char *reason);
-
-void *fixup_red_left(struct kmem_cache *s, void *p);
-
-static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
- void *x) {
- void *object = x - (x - page_address(page)) % cache->size;
- void *last_object = page_address(page) +
- (page->objects - 1) * cache->size;
- void *result = (unlikely(object > last_object)) ? last_object : object;
-
- result = fixup_red_left(cache, result);
- return result;
-}
-
-/* Determine object index from a given position */
-static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
-{
- return reciprocal_divide(kasan_reset_tag(obj) - addr,
- cache->reciprocal_size);
-}
-
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct page *page, void *obj)
-{
- return __obj_to_index(cache, page_address(page), obj);
-}
-
-static inline int objs_per_slab_page(const struct kmem_cache *cache,
- const struct page *page)
-{
- return page->objects;
-}
-#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h
deleted file mode 100644
index 8cace8189e74..000000000000
--- a/include/linux/smc911x.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SMC911X_H__
-#define __SMC911X_H__
-
-#define SMC911X_USE_16BIT (1 << 0)
-#define SMC911X_USE_32BIT (1 << 1)
-
-struct smc911x_platdata {
- unsigned long flags;
- unsigned long irq_flags; /* IRQF_... */
- int irq_polarity;
-};
-
-#endif /* __SMC911X_H__ */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 80d557ef8a11..fcd61dfe2af3 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -21,21 +21,23 @@ typedef bool (*smp_cond_func_t)(int cpu, void *info);
* structure shares (partial) layout with struct irq_work
*/
struct __call_single_data {
- union {
- struct __call_single_node node;
- struct {
- struct llist_node llist;
- unsigned int flags;
- };
- };
+ struct __call_single_node node;
smp_call_func_t func;
void *info;
};
+#define CSD_INIT(_func, _info) \
+ (struct __call_single_data){ .func = (_func), .info = (_info), }
+
/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
typedef struct __call_single_data call_single_data_t
__aligned(sizeof(struct __call_single_data));
+#define INIT_CSD(_csd, _func, _info) \
+do { \
+ *(_csd) = CSD_INIT((_func), (_info)); \
+} while (0)
+
/*
* Enqueue a llist_node on the call_single_queue; be very careful, read
* flush_smp_call_function_queue() in detail.
@@ -48,35 +50,70 @@ extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+ void *info, bool wait, const struct cpumask *mask);
+
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+
/*
- * Call a function on all processors
+ * Cpus stopping functions in panic. All have default weak definitions.
+ * Architecture-dependent code may override them.
*/
-void on_each_cpu(smp_call_func_t func, void *info, int wait);
+void __noreturn panic_smp_self_stop(void);
+void __noreturn nmi_panic_self_stop(struct pt_regs *regs);
+void crash_smp_send_stop(void);
/*
- * Call a function on processors specified by mask, which might include
- * the local one.
+ * Call a function on all processors
*/
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
- void *info, bool wait);
+static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+ on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
+}
+
+/**
+ * on_each_cpu_mask(): Run a function on processors specified by
+ * cpumask, which may include the local processor.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ * on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
+ */
+static inline void on_each_cpu_mask(const struct cpumask *mask,
+ smp_call_func_t func, void *info, bool wait)
+{
+ on_each_cpu_cond_mask(NULL, func, info, wait, mask);
+}
/*
* Call a function on each processor for which the supplied function
* cond_func returns a positive value. This may include the local
- * processor.
+ * processor. May be used during early boot while early_boot_irqs_disabled is
+ * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
*/
-void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
- void *info, bool wait);
-
-void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
- void *info, bool wait, const struct cpumask *mask);
+static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
+ smp_call_func_t func, void *info, bool wait)
+{
+ on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
+}
-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+/*
+ * Architecture specific boot CPU setup. Defined as empty weak function in
+ * init/main.c. Architectures can override it.
+ */
+void smp_prepare_boot_cpu(void);
#ifdef CONFIG_SMP
#include <linux/preempt.h>
-#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <asm/smp.h>
@@ -94,8 +131,15 @@ extern void smp_send_stop(void);
/*
* sends a 'reschedule' event to another CPU:
*/
-extern void smp_send_reschedule(int cpu);
-
+extern void arch_smp_send_reschedule(int cpu);
+/*
+ * scheduler_ipi() is inline so can't be passed as callback reason, but the
+ * callsite IP should be sufficient for root-causing IPIs sent from here.
+ */
+#define smp_send_reschedule(cpu) ({ \
+ trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \
+ arch_smp_send_reschedule(cpu); \
+})
/*
* Prepare machine for booting other CPUs.
@@ -133,12 +177,6 @@ void generic_smp_call_function_single_interrupt(void);
#define generic_smp_call_function_interrupt \
generic_smp_call_function_single_interrupt
-/*
- * Mark the boot cpu "online" so that it can call console drivers in
- * printk() and can access its per-cpu storage.
- */
-void smp_prepare_boot_cpu(void);
-
extern unsigned int setup_max_cpus;
extern void __init setup_nr_cpu_ids(void);
extern void __init smp_init(void);
@@ -165,7 +203,6 @@ static inline void up_smp_call_function(smp_call_func_t func, void *info)
(up_smp_call_function(func, info))
static inline void smp_send_reschedule(int cpu) { }
-#define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
static inline void call_function_init(void) { }
@@ -180,6 +217,8 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
static inline void kick_all_cpus_sync(void) { }
static inline void wake_up_all_idle_cpus(void) { }
+#define setup_max_cpus 0
+
#ifdef CONFIG_UP_LATE_INIT
extern void __init up_late_init(void);
static inline void smp_init(void) { up_late_init(); }
@@ -223,7 +262,7 @@ static inline int get_boot_cpu_id(void)
* regular asm read for the stable.
*/
#ifndef __smp_processor_id
-#define __smp_processor_id(x) raw_smp_processor_id(x)
+#define __smp_processor_id() raw_smp_processor_id()
#endif
#ifdef CONFIG_DEBUG_PREEMPT
diff --git a/include/linux/smp_types.h b/include/linux/smp_types.h
index 364b3ae3e41d..2e8461af8df6 100644
--- a/include/linux/smp_types.h
+++ b/include/linux/smp_types.h
@@ -61,6 +61,9 @@ struct __call_single_node {
unsigned int u_flags;
atomic_t a_flags;
};
+#ifdef CONFIG_64BIT
+ u16 src, dst;
+#endif
};
#endif /* __LINUX_SMP_TYPES_H */
diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h
index 1a136271ba6a..1a6a851d2cf8 100644
--- a/include/linux/smscphy.h
+++ b/include/linux/smscphy.h
@@ -28,4 +28,48 @@
#define MII_LAN83C185_MODE_POWERDOWN 0xC0 /* Power Down mode */
#define MII_LAN83C185_MODE_ALL 0xE0 /* All capable mode */
+int smsc_phy_config_intr(struct phy_device *phydev);
+irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev);
+int smsc_phy_config_init(struct phy_device *phydev);
+int lan87xx_read_status(struct phy_device *phydev);
+int smsc_phy_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data);
+int smsc_phy_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data);
+int smsc_phy_probe(struct phy_device *phydev);
+
+#define MII_LAN874X_PHY_MMD_WOL_WUCSR 0x8010
+#define MII_LAN874X_PHY_MMD_WOL_WUF_CFGA 0x8011
+#define MII_LAN874X_PHY_MMD_WOL_WUF_CFGB 0x8012
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK0 0x8021
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK1 0x8022
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK2 0x8023
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK3 0x8024
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK4 0x8025
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK5 0x8026
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK6 0x8027
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK7 0x8028
+#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRA 0x8061
+#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRB 0x8062
+#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRC 0x8063
+#define MII_LAN874X_PHY_MMD_MCFGR 0x8064
+
+#define MII_LAN874X_PHY_PME1_SET (2 << 13)
+#define MII_LAN874X_PHY_PME2_SET (2 << 11)
+#define MII_LAN874X_PHY_PME_SELF_CLEAR BIT(9)
+#define MII_LAN874X_PHY_WOL_PFDA_FR BIT(7)
+#define MII_LAN874X_PHY_WOL_WUFR BIT(6)
+#define MII_LAN874X_PHY_WOL_MPR BIT(5)
+#define MII_LAN874X_PHY_WOL_BCAST_FR BIT(4)
+#define MII_LAN874X_PHY_WOL_PFDAEN BIT(3)
+#define MII_LAN874X_PHY_WOL_WUEN BIT(2)
+#define MII_LAN874X_PHY_WOL_MPEN BIT(1)
+#define MII_LAN874X_PHY_WOL_BCSTEN BIT(0)
+
+#define MII_LAN874X_PHY_WOL_FILTER_EN BIT(15)
+#define MII_LAN874X_PHY_WOL_FILTER_MCASTTEN BIT(9)
+#define MII_LAN874X_PHY_WOL_FILTER_BCSTEN BIT(8)
+
+#define MII_LAN874X_PHY_PME_SELF_CLEAR_DELAY 0x1000 /* 81 milliseconds */
+
#endif /* __LINUX_SMSCPHY_H__ */
diff --git a/include/linux/soc/andes/irq.h b/include/linux/soc/andes/irq.h
new file mode 100644
index 000000000000..edc3182d6e66
--- /dev/null
+++ b/include/linux/soc/andes/irq.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Andes Technology Corporation
+ */
+#ifndef __ANDES_IRQ_H
+#define __ANDES_IRQ_H
+
+/* Andes PMU irq number */
+#define ANDES_RV_IRQ_PMOVI 18
+#define ANDES_RV_IRQ_LAST ANDES_RV_IRQ_PMOVI
+#define ANDES_SLI_CAUSE_BASE 256
+
+/* Andes PMU related registers */
+#define ANDES_CSR_SLIE 0x9c4
+#define ANDES_CSR_SLIP 0x9c5
+#define ANDES_CSR_SCOUNTEROF 0x9d4
+
+#endif /* __ANDES_IRQ_H */
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
new file mode 100644
index 000000000000..8c9ca857ccf6
--- /dev/null
+++ b/include/linux/soc/apple/rtkit.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple RTKit IPC Library
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple's SoCs come with various co-processors running their RTKit operating
+ * system. This protocol library is used by client drivers to use the
+ * features provided by them.
+ */
+#ifndef _LINUX_APPLE_RTKIT_H_
+#define _LINUX_APPLE_RTKIT_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_client.h>
+
+/*
+ * Struct to represent implementation-specific RTKit operations.
+ *
+ * @buffer: Shared memory buffer allocated inside normal RAM.
+ * @iomem: Shared memory buffer controlled by the co-processors.
+ * @size: Size of the shared memory buffer.
+ * @iova: Device VA of shared memory buffer.
+ * @is_mapped: Shared memory buffer is managed by the co-processor.
+ * @private: Private data pointer for the parent driver.
+ */
+
+struct apple_rtkit_shmem {
+ void *buffer;
+ void __iomem *iomem;
+ size_t size;
+ dma_addr_t iova;
+ bool is_mapped;
+ void *private;
+};
+
+/*
+ * Struct to represent implementation-specific RTKit operations.
+ *
+ * @crashed: Called when the co-processor has crashed. Runs in process
+ * context.
+ * @recv_message: Function called when a message from RTKit is received
+ * on a non-system endpoint. Called from a worker thread.
+ * @recv_message_early:
+ * Like recv_message, but called from atomic context. It
+ * should return true if it handled the message. If it
+ * returns false, the message will be passed on to the
+ * worker thread.
+ * @shmem_setup: Setup shared memory buffer. If bfr.is_iomem is true the
+ * buffer is managed by the co-processor and needs to be mapped.
+ * Otherwise the buffer is managed by Linux and needs to be
+ * allocated. If not specified dma_alloc_coherent is used.
+ * Called in process context.
+ * @shmem_destroy: Undo the shared memory buffer setup in shmem_setup. If not
+ * specified dma_free_coherent is used. Called in process
+ * context.
+ */
+struct apple_rtkit_ops {
+ void (*crashed)(void *cookie);
+ void (*recv_message)(void *cookie, u8 endpoint, u64 message);
+ bool (*recv_message_early)(void *cookie, u8 endpoint, u64 message);
+ int (*shmem_setup)(void *cookie, struct apple_rtkit_shmem *bfr);
+ void (*shmem_destroy)(void *cookie, struct apple_rtkit_shmem *bfr);
+};
+
+struct apple_rtkit;
+
+/*
+ * Initializes the internal state required to handle RTKit. This
+ * should usually be called within _probe.
+ *
+ * @dev: Pointer to the device node this coprocessor is assocated with
+ * @cookie: opaque cookie passed to all functions defined in rtkit_ops
+ * @mbox_name: mailbox name used to communicate with the co-processor
+ * @mbox_idx: mailbox index to be used if mbox_name is NULL
+ * @ops: pointer to rtkit_ops to be used for this co-processor
+ */
+struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops);
+
+/*
+ * Non-devm version of devm_apple_rtkit_init. Must be freed with
+ * apple_rtkit_free.
+ *
+ * @dev: Pointer to the device node this coprocessor is assocated with
+ * @cookie: opaque cookie passed to all functions defined in rtkit_ops
+ * @mbox_name: mailbox name used to communicate with the co-processor
+ * @mbox_idx: mailbox index to be used if mbox_name is NULL
+ * @ops: pointer to rtkit_ops to be used for this co-processor
+ */
+struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops);
+
+/*
+ * Free an instance of apple_rtkit.
+ */
+void apple_rtkit_free(struct apple_rtkit *rtk);
+
+/*
+ * Reinitialize internal structures. Must only be called with the co-processor
+ * is held in reset.
+ */
+int apple_rtkit_reinit(struct apple_rtkit *rtk);
+
+/*
+ * Handle RTKit's boot process. Should be called after the CPU of the
+ * co-processor has been started.
+ */
+int apple_rtkit_boot(struct apple_rtkit *rtk);
+
+/*
+ * Quiesce the co-processor.
+ */
+int apple_rtkit_quiesce(struct apple_rtkit *rtk);
+
+/*
+ * Wake the co-processor up from hibernation mode.
+ */
+int apple_rtkit_wake(struct apple_rtkit *rtk);
+
+/*
+ * Shutdown the co-processor
+ */
+int apple_rtkit_shutdown(struct apple_rtkit *rtk);
+
+/*
+ * Put the co-processor into idle mode
+ */
+int apple_rtkit_idle(struct apple_rtkit *rtk);
+
+/*
+ * Checks if RTKit is running and ready to handle messages.
+ */
+bool apple_rtkit_is_running(struct apple_rtkit *rtk);
+
+/*
+ * Checks if RTKit has crashed.
+ */
+bool apple_rtkit_is_crashed(struct apple_rtkit *rtk);
+
+/*
+ * Starts an endpoint. Must be called after boot but before any messages can be
+ * sent or received from that endpoint.
+ */
+int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint);
+
+/*
+ * Send a message to the given endpoint.
+ *
+ * @rtk: RTKit reference
+ * @ep: target endpoint
+ * @message: message to be sent
+ * @completeion: will be completed once the message has been submitted
+ * to the hardware FIFO. Can be NULL.
+ * @atomic: if set to true this function can be called from atomic
+ * context.
+ */
+int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
+ struct completion *completion, bool atomic);
+
+/*
+ * Process incoming messages in atomic context.
+ * This only guarantees that messages arrive as far as the recv_message_early
+ * callback; drivers expecting to handle incoming messages synchronously
+ * by calling this function must do it that way.
+ * Will return 1 if some data was processed, 0 if none was, or a
+ * negative error code on failure.
+ *
+ * @rtk: RTKit reference
+ */
+int apple_rtkit_poll(struct apple_rtkit *rtk);
+
+#endif /* _LINUX_APPLE_RTKIT_H_ */
diff --git a/include/linux/soc/apple/sart.h b/include/linux/soc/apple/sart.h
new file mode 100644
index 000000000000..2249bf6cde09
--- /dev/null
+++ b/include/linux/soc/apple/sart.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple SART device driver
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple SART is a simple address filter for DMA transactions.
+ * Regions of physical memory must be added to the SART's allow
+ * list before any DMA can target these. Unlike a proper
+ * IOMMU no remapping can be done.
+ */
+
+#ifndef _LINUX_SOC_APPLE_SART_H_
+#define _LINUX_SOC_APPLE_SART_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct apple_sart;
+
+/*
+ * Get a reference to the SART attached to dev.
+ *
+ * Looks for the phandle reference in apple,sart and returns a pointer
+ * to the corresponding apple_sart struct to be used with
+ * apple_sart_add_allowed_region and apple_sart_remove_allowed_region.
+ */
+struct apple_sart *devm_apple_sart_get(struct device *dev);
+
+/*
+ * Adds the region [paddr, paddr+size] to the DMA allow list.
+ *
+ * @sart: SART reference
+ * @paddr: Start address of the region to be used for DMA
+ * @size: Size of the region to be used for DMA.
+ */
+int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size);
+
+/*
+ * Removes the region [paddr, paddr+size] from the DMA allow list.
+ *
+ * Note that exact same paddr and size used for apple_sart_add_allowed_region
+ * have to be passed.
+ *
+ * @sart: SART reference
+ * @paddr: Start address of the region no longer used for DMA
+ * @size: Size of the region no longer used for DMA.
+ */
+int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size);
+
+#endif /* _LINUX_SOC_APPLE_SART_H_ */
diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h
index 8e884e0dda0a..f2b768852777 100644
--- a/include/linux/soc/brcmstb/brcmstb.h
+++ b/include/linux/soc/brcmstb/brcmstb.h
@@ -2,6 +2,8 @@
#ifndef __BRCMSTB_SOC_H
#define __BRCMSTB_SOC_H
+#include <linux/kconfig.h>
+
static inline u32 BRCM_ID(u32 reg)
{
return reg >> 28 ? reg >> 16 : reg >> 8;
@@ -12,6 +14,8 @@ static inline u32 BRCM_REV(u32 reg)
return reg & 0xff;
}
+#if IS_ENABLED(CONFIG_SOC_BRCMSTB)
+
/*
* Helper functions for getting family or product id from the
* SoC driver.
@@ -19,4 +23,16 @@ static inline u32 BRCM_REV(u32 reg)
u32 brcmstb_get_family_id(void);
u32 brcmstb_get_product_id(void);
+#else
+static inline u32 brcmstb_get_family_id(void)
+{
+ return 0;
+}
+
+static inline u32 brcmstb_get_product_id(void)
+{
+ return 0;
+}
+#endif
+
#endif /* __BRCMSTB_SOC_H */
diff --git a/include/linux/soc/ixp4xx/cpu.h b/include/linux/soc/ixp4xx/cpu.h
new file mode 100644
index 000000000000..f526ac33afea
--- /dev/null
+++ b/include/linux/soc/ixp4xx/cpu.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IXP4XX cpu type detection
+ *
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ */
+
+#ifndef __SOC_IXP4XX_CPU_H__
+#define __SOC_IXP4XX_CPU_H__
+
+#include <linux/io.h>
+#include <linux/regmap.h>
+#ifdef CONFIG_ARM
+#include <asm/cputype.h>
+#endif
+
+/* Processor id value in CP15 Register 0 */
+#define IXP42X_PROCESSOR_ID_VALUE 0x690541c0 /* including unused 0x690541Ex */
+#define IXP42X_PROCESSOR_ID_MASK 0xffffffc0
+
+#define IXP43X_PROCESSOR_ID_VALUE 0x69054040
+#define IXP43X_PROCESSOR_ID_MASK 0xfffffff0
+
+#define IXP46X_PROCESSOR_ID_VALUE 0x69054200 /* including IXP455 */
+#define IXP46X_PROCESSOR_ID_MASK 0xfffffff0
+
+/* Feature register in the expansion bus controller */
+#define IXP4XX_EXP_CNFG2 0x2c
+
+/* "fuse" bits of IXP_EXP_CFG2 */
+/* All IXP4xx CPUs */
+#define IXP4XX_FEATURE_RCOMP (1 << 0)
+#define IXP4XX_FEATURE_USB_DEVICE (1 << 1)
+#define IXP4XX_FEATURE_HASH (1 << 2)
+#define IXP4XX_FEATURE_AES (1 << 3)
+#define IXP4XX_FEATURE_DES (1 << 4)
+#define IXP4XX_FEATURE_HDLC (1 << 5)
+#define IXP4XX_FEATURE_AAL (1 << 6)
+#define IXP4XX_FEATURE_HSS (1 << 7)
+#define IXP4XX_FEATURE_UTOPIA (1 << 8)
+#define IXP4XX_FEATURE_NPEB_ETH0 (1 << 9)
+#define IXP4XX_FEATURE_NPEC_ETH (1 << 10)
+#define IXP4XX_FEATURE_RESET_NPEA (1 << 11)
+#define IXP4XX_FEATURE_RESET_NPEB (1 << 12)
+#define IXP4XX_FEATURE_RESET_NPEC (1 << 13)
+#define IXP4XX_FEATURE_PCI (1 << 14)
+#define IXP4XX_FEATURE_UTOPIA_PHY_LIMIT (3 << 16)
+#define IXP4XX_FEATURE_XSCALE_MAX_FREQ (3 << 22)
+#define IXP42X_FEATURE_MASK (IXP4XX_FEATURE_RCOMP | \
+ IXP4XX_FEATURE_USB_DEVICE | \
+ IXP4XX_FEATURE_HASH | \
+ IXP4XX_FEATURE_AES | \
+ IXP4XX_FEATURE_DES | \
+ IXP4XX_FEATURE_HDLC | \
+ IXP4XX_FEATURE_AAL | \
+ IXP4XX_FEATURE_HSS | \
+ IXP4XX_FEATURE_UTOPIA | \
+ IXP4XX_FEATURE_NPEB_ETH0 | \
+ IXP4XX_FEATURE_NPEC_ETH | \
+ IXP4XX_FEATURE_RESET_NPEA | \
+ IXP4XX_FEATURE_RESET_NPEB | \
+ IXP4XX_FEATURE_RESET_NPEC | \
+ IXP4XX_FEATURE_PCI | \
+ IXP4XX_FEATURE_UTOPIA_PHY_LIMIT | \
+ IXP4XX_FEATURE_XSCALE_MAX_FREQ)
+
+
+/* IXP43x/46x CPUs */
+#define IXP4XX_FEATURE_ECC_TIMESYNC (1 << 15)
+#define IXP4XX_FEATURE_USB_HOST (1 << 18)
+#define IXP4XX_FEATURE_NPEA_ETH (1 << 19)
+#define IXP43X_FEATURE_MASK (IXP42X_FEATURE_MASK | \
+ IXP4XX_FEATURE_ECC_TIMESYNC | \
+ IXP4XX_FEATURE_USB_HOST | \
+ IXP4XX_FEATURE_NPEA_ETH)
+
+/* IXP46x CPU (including IXP455) only */
+#define IXP4XX_FEATURE_NPEB_ETH_1_TO_3 (1 << 20)
+#define IXP4XX_FEATURE_RSA (1 << 21)
+#define IXP46X_FEATURE_MASK (IXP43X_FEATURE_MASK | \
+ IXP4XX_FEATURE_NPEB_ETH_1_TO_3 | \
+ IXP4XX_FEATURE_RSA)
+
+#ifdef CONFIG_ARCH_IXP4XX
+#define cpu_is_ixp42x_rev_a0() ((read_cpuid_id() & (IXP42X_PROCESSOR_ID_MASK | 0xF)) == \
+ IXP42X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp42x() ((read_cpuid_id() & IXP42X_PROCESSOR_ID_MASK) == \
+ IXP42X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp43x() ((read_cpuid_id() & IXP43X_PROCESSOR_ID_MASK) == \
+ IXP43X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp46x() ((read_cpuid_id() & IXP46X_PROCESSOR_ID_MASK) == \
+ IXP46X_PROCESSOR_ID_VALUE)
+static inline u32 cpu_ixp4xx_features(struct regmap *rmap)
+{
+ u32 val;
+
+ regmap_read(rmap, IXP4XX_EXP_CNFG2, &val);
+ /* For some reason this register is inverted */
+ val = ~val;
+ if (cpu_is_ixp42x_rev_a0())
+ return IXP42X_FEATURE_MASK & ~(IXP4XX_FEATURE_RCOMP |
+ IXP4XX_FEATURE_AES);
+ if (cpu_is_ixp42x())
+ return val & IXP42X_FEATURE_MASK;
+ if (cpu_is_ixp43x())
+ return val & IXP43X_FEATURE_MASK;
+ return val & IXP46X_FEATURE_MASK;
+}
+#else
+#define cpu_is_ixp42x_rev_a0() 0
+#define cpu_is_ixp42x() 0
+#define cpu_is_ixp43x() 0
+#define cpu_is_ixp46x() 0
+static inline u32 cpu_ixp4xx_features(struct regmap *rmap)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_ARCH_CPU_H */
diff --git a/include/linux/soc/ixp4xx/npe.h b/include/linux/soc/ixp4xx/npe.h
index 2a91f465d456..9efeac777da1 100644
--- a/include/linux/soc/ixp4xx/npe.h
+++ b/include/linux/soc/ixp4xx/npe.h
@@ -3,6 +3,7 @@
#define __IXP4XX_NPE_H
#include <linux/kernel.h>
+#include <linux/regmap.h>
extern const char *npe_names[];
@@ -17,6 +18,7 @@ struct npe_regs {
struct npe {
struct npe_regs __iomem *regs;
+ struct regmap *rmap;
int id;
int valid;
};
diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h
new file mode 100644
index 000000000000..d683251a0b40
--- /dev/null
+++ b/include/linux/soc/marvell/octeontx2/asm.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __SOC_OTX2_ASM_H
+#define __SOC_OTX2_ASM_H
+
+#include <linux/types.h>
+#if defined(CONFIG_ARM64)
+/*
+ * otx2_lmt_flush is used for LMT store operation.
+ * On octeontx2 platform CPT instruction enqueue and
+ * NIX packet send are only possible via LMTST
+ * operations and it uses LDEOR instruction targeting
+ * the coprocessor address.
+ */
+#define otx2_lmt_flush(ioaddr) \
+({ \
+ u64 result = 0; \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "ldeor xzr, %x[rf], [%[rs]]" \
+ : [rf]"=r" (result) \
+ : [rs]"r" (ioaddr)); \
+ (result); \
+})
+/*
+ * STEORL store to memory with release semantics.
+ * This will avoid using DMB barrier after each LMTST
+ * operation.
+ */
+#define cn10k_lmt_flush(val, addr) \
+({ \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "steorl %x[rf],[%[rs]]" \
+ : [rf] "+r"(val) \
+ : [rs] "r"(addr)); \
+})
+
+static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ asm volatile (".cpu generic+lse\n"
+ "ldadda %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+#else
+#define otx2_lmt_flush(ioaddr) ({ 0; })
+#define cn10k_lmt_flush(val, addr) ({ addr = val; })
+#define otx2_atomic64_fetch_add(incr, ptr) ({ incr; })
+#endif
+
+#endif /* __SOC_OTX2_ASM_H */
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index fd25f0148566..6c6cccc848f4 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -2,6 +2,400 @@
#ifndef __SOC_MEDIATEK_INFRACFG_H
#define __SOC_MEDIATEK_INFRACFG_H
+#define MT8365_INFRA_TOPAXI_PROTECTEN_STA1 0x228
+#define MT8365_INFRA_TOPAXI_PROTECTEN_SET 0x2a0
+#define MT8365_INFRA_TOPAXI_PROTECTEN_CLR 0x2a4
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM_M0 BIT(1)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MDMCU_M1 BIT(2)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MMAPB_S BIT(6)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_0 BIT(10)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_1 BIT(11)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_AP2CONN_AHB BIT(13)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_CONN2INFRA_AHB BIT(14)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MFG_M0 BIT(21)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_INFRA2MFG BIT(22)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_STA1 0x258
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_SET 0x2a8
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CLR 0x2ac
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU2AP BIT(2)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_0 BIT(16)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_1 BIT(17)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CONN2INFRA_AXI_GALS_MST BIT(18)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CAM2MM_AXI_GALS_MST BIT(19)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU_CBIP_GALS_MST BIT(20)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_INFRA2CONN_AHB_GALS_SLV BIT(21)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_INFRA_GALS_ADB BIT(24)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_MP1_L2C_AFIFO BIT(27)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_AUDIO_M BIT(28)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_M BIT(30)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_S BIT(31)
+
+#define MT8365_INFRA_NAO_TOPAXI_SI0_STA 0x0
+#define MT8365_INFRA_NAO_TOPAXI_SI0_CTRL_UPDATED BIT(24)
+#define MT8365_INFRA_NAO_TOPAXI_SI2_STA 0x28
+#define MT8365_INFRA_NAO_TOPAXI_SI2_CTRL_UPDATED BIT(14)
+#define MT8365_INFRA_TOPAXI_SI0_CTL 0x200
+#define MT8365_INFRA_TOPAXI_SI0_WAY_EN_MMAPB_S BIT(6)
+#define MT8365_INFRA_TOPAXI_SI2_CTL 0x234
+#define MT8365_INFRA_TOPAXI_SI2_WAY_EN_PERI_M1 BIT(5)
+
+#define MT8365_SMI_COMMON_CLAMP_EN 0x3c0
+#define MT8365_SMI_COMMON_CLAMP_EN_SET 0x3c4
+#define MT8365_SMI_COMMON_CLAMP_EN_CLR 0x3c8
+
+#define MT8195_TOP_AXI_PROT_EN_STA1 0x228
+#define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258
+#define MT8195_TOP_AXI_PROT_EN_SET 0x2a0
+#define MT8195_TOP_AXI_PROT_EN_CLR 0x2a4
+#define MT8195_TOP_AXI_PROT_EN_1_SET 0x2a8
+#define MT8195_TOP_AXI_PROT_EN_1_CLR 0x2ac
+#define MT8195_TOP_AXI_PROT_EN_MM_SET 0x2d4
+#define MT8195_TOP_AXI_PROT_EN_MM_CLR 0x2d8
+#define MT8195_TOP_AXI_PROT_EN_MM_STA1 0x2ec
+#define MT8195_TOP_AXI_PROT_EN_2_SET 0x714
+#define MT8195_TOP_AXI_PROT_EN_2_CLR 0x718
+#define MT8195_TOP_AXI_PROT_EN_2_STA1 0x724
+#define MT8195_TOP_AXI_PROT_EN_VDNR_SET 0xb84
+#define MT8195_TOP_AXI_PROT_EN_VDNR_CLR 0xb88
+#define MT8195_TOP_AXI_PROT_EN_VDNR_STA1 0xb90
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_SET 0xba4
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR 0xba8
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1 0xbb0
+#define MT8195_TOP_AXI_PROT_EN_VDNR_2_SET 0xbb8
+#define MT8195_TOP_AXI_PROT_EN_VDNR_2_CLR 0xbbc
+#define MT8195_TOP_AXI_PROT_EN_VDNR_2_STA1 0xbc4
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xbcc
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xbd0
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1 0xbd8
+#define MT8195_TOP_AXI_PROT_EN_MM_2_SET 0xdcc
+#define MT8195_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0
+#define MT8195_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8
+
+#define MT8195_TOP_AXI_PROT_EN_VDOSYS0 BIT(6)
+#define MT8195_TOP_AXI_PROT_EN_VPPSYS0 BIT(10)
+#define MT8195_TOP_AXI_PROT_EN_MFG1 BIT(11)
+#define MT8195_TOP_AXI_PROT_EN_MFG1_2ND GENMASK(22, 21)
+#define MT8195_TOP_AXI_PROT_EN_VPPSYS0_2ND BIT(23)
+#define MT8195_TOP_AXI_PROT_EN_1_MFG1 GENMASK(20, 19)
+#define MT8195_TOP_AXI_PROT_EN_1_CAM BIT(22)
+#define MT8195_TOP_AXI_PROT_EN_2_CAM BIT(0)
+#define MT8195_TOP_AXI_PROT_EN_2_MFG1_2ND GENMASK(6, 5)
+#define MT8195_TOP_AXI_PROT_EN_2_MFG1 BIT(7)
+#define MT8195_TOP_AXI_PROT_EN_2_AUDIO (BIT(9) | BIT(11))
+#define MT8195_TOP_AXI_PROT_EN_2_ADSP (BIT(12) | GENMASK(16, 14))
+#define MT8195_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2) | BIT(4))
+#define MT8195_TOP_AXI_PROT_EN_MM_IPE BIT(1)
+#define MT8195_TOP_AXI_PROT_EN_MM_IMG BIT(3)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS0 GENMASK(21, 17)
+#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1 GENMASK(8, 5)
+#define MT8195_TOP_AXI_PROT_EN_MM_VENC (BIT(9) | BIT(11))
+#define MT8195_TOP_AXI_PROT_EN_MM_VENC_CORE1 (BIT(10) | BIT(12))
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0 BIT(13)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1 BIT(14)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1_2ND BIT(22)
+#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1_2ND BIT(23)
+#define MT8195_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(24)
+#define MT8195_TOP_AXI_PROT_EN_MM_IMG_2ND BIT(25)
+#define MT8195_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(26)
+#define MT8195_TOP_AXI_PROT_EN_MM_WPESYS BIT(27)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0_2ND BIT(28)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1_2ND BIT(29)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1 GENMASK(31, 30)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0_2ND (GENMASK(1, 0) | BIT(4) | BIT(11))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC BIT(2)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC_CORE1 (BIT(3) | BIT(15))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_CAM (BIT(5) | BIT(17))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS1 (GENMASK(7, 6) | BIT(18))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0 GENMASK(9, 8)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDOSYS1 BIT(10)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2_2ND BIT(12)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0_2ND BIT(13)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS_2ND BIT(14)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_IPE BIT(16)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2 BIT(21)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0 BIT(22)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS GENMASK(24, 23)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_EPD_TX BIT(1)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_DP_TX BIT(2)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P0 (BIT(11) | BIT(28))
+#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P1 (BIT(12) | BIT(29))
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P0 BIT(13)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P1 BIT(14)
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1 (BIT(17) | BIT(19))
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0 BIT(20)
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0 BIT(21)
+
+#define MT8192_TOP_AXI_PROT_EN_STA1 0x228
+#define MT8192_TOP_AXI_PROT_EN_1_STA1 0x258
+#define MT8192_TOP_AXI_PROT_EN_SET 0x2a0
+#define MT8192_TOP_AXI_PROT_EN_CLR 0x2a4
+#define MT8192_TOP_AXI_PROT_EN_1_SET 0x2a8
+#define MT8192_TOP_AXI_PROT_EN_1_CLR 0x2ac
+#define MT8192_TOP_AXI_PROT_EN_MM_SET 0x2d4
+#define MT8192_TOP_AXI_PROT_EN_MM_CLR 0x2d8
+#define MT8192_TOP_AXI_PROT_EN_MM_STA1 0x2ec
+#define MT8192_TOP_AXI_PROT_EN_2_SET 0x714
+#define MT8192_TOP_AXI_PROT_EN_2_CLR 0x718
+#define MT8192_TOP_AXI_PROT_EN_2_STA1 0x724
+#define MT8192_TOP_AXI_PROT_EN_VDNR_SET 0xb84
+#define MT8192_TOP_AXI_PROT_EN_VDNR_CLR 0xb88
+#define MT8192_TOP_AXI_PROT_EN_VDNR_STA1 0xb90
+#define MT8192_TOP_AXI_PROT_EN_MM_2_SET 0xdcc
+#define MT8192_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0
+#define MT8192_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8
+
+#define MT8192_TOP_AXI_PROT_EN_DISP (BIT(6) | BIT(23))
+#define MT8192_TOP_AXI_PROT_EN_CONN (BIT(13) | BIT(18))
+#define MT8192_TOP_AXI_PROT_EN_CONN_2ND BIT(14)
+#define MT8192_TOP_AXI_PROT_EN_MFG1 GENMASK(22, 21)
+#define MT8192_TOP_AXI_PROT_EN_1_CONN BIT(10)
+#define MT8192_TOP_AXI_PROT_EN_1_MFG1 BIT(21)
+#define MT8192_TOP_AXI_PROT_EN_1_CAM BIT(22)
+#define MT8192_TOP_AXI_PROT_EN_2_CAM BIT(0)
+#define MT8192_TOP_AXI_PROT_EN_2_ADSP BIT(3)
+#define MT8192_TOP_AXI_PROT_EN_2_AUDIO BIT(4)
+#define MT8192_TOP_AXI_PROT_EN_2_MFG1 GENMASK(6, 5)
+#define MT8192_TOP_AXI_PROT_EN_2_MFG1_2ND BIT(7)
+#define MT8192_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2))
+#define MT8192_TOP_AXI_PROT_EN_MM_DISP (BIT(0) | BIT(2) | \
+ BIT(10) | BIT(12) | \
+ BIT(14) | BIT(16) | \
+ BIT(24) | BIT(26))
+#define MT8192_TOP_AXI_PROT_EN_MM_CAM_2ND (BIT(1) | BIT(3))
+#define MT8192_TOP_AXI_PROT_EN_MM_DISP_2ND (BIT(1) | BIT(3) | \
+ BIT(15) | BIT(17) | \
+ BIT(25) | BIT(27))
+#define MT8192_TOP_AXI_PROT_EN_MM_ISP2 BIT(14)
+#define MT8192_TOP_AXI_PROT_EN_MM_ISP2_2ND BIT(15)
+#define MT8192_TOP_AXI_PROT_EN_MM_IPE BIT(16)
+#define MT8192_TOP_AXI_PROT_EN_MM_IPE_2ND BIT(17)
+#define MT8192_TOP_AXI_PROT_EN_MM_VDEC BIT(24)
+#define MT8192_TOP_AXI_PROT_EN_MM_VDEC_2ND BIT(25)
+#define MT8192_TOP_AXI_PROT_EN_MM_VENC BIT(26)
+#define MT8192_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(27)
+#define MT8192_TOP_AXI_PROT_EN_MM_2_ISP BIT(8)
+#define MT8192_TOP_AXI_PROT_EN_MM_2_DISP (BIT(8) | BIT(12))
+#define MT8192_TOP_AXI_PROT_EN_MM_2_ISP_2ND BIT(9)
+#define MT8192_TOP_AXI_PROT_EN_MM_2_DISP_2ND (BIT(9) | BIT(13))
+#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP BIT(12)
+#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13)
+#define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21)
+
+#define MT8188_TOP_AXI_PROT_EN_SET 0x2A0
+#define MT8188_TOP_AXI_PROT_EN_CLR 0x2A4
+#define MT8188_TOP_AXI_PROT_EN_STA 0x228
+#define MT8188_TOP_AXI_PROT_EN_1_SET 0x2A8
+#define MT8188_TOP_AXI_PROT_EN_1_CLR 0x2AC
+#define MT8188_TOP_AXI_PROT_EN_1_STA 0x258
+#define MT8188_TOP_AXI_PROT_EN_2_SET 0x714
+#define MT8188_TOP_AXI_PROT_EN_2_CLR 0x718
+#define MT8188_TOP_AXI_PROT_EN_2_STA 0x724
+
+#define MT8188_TOP_AXI_PROT_EN_MM_SET 0x2D4
+#define MT8188_TOP_AXI_PROT_EN_MM_CLR 0x2D8
+#define MT8188_TOP_AXI_PROT_EN_MM_STA 0x2EC
+#define MT8188_TOP_AXI_PROT_EN_MM_2_SET 0xDCC
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CLR 0xDD0
+#define MT8188_TOP_AXI_PROT_EN_MM_2_STA 0xDD8
+
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET 0xB84
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR 0xB88
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA 0xB90
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xBCC
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xBD0
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA 0xBD8
+
+#define MT8188_TOP_AXI_PROT_EN_MFG1_STEP1 BIT(11)
+#define MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP2 BIT(7)
+#define MT8188_TOP_AXI_PROT_EN_1_MFG1_STEP3 BIT(19)
+#define MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP4 BIT(5)
+#define MT8188_TOP_AXI_PROT_EN_MFG1_STEP5 GENMASK(22, 21)
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1_STEP6 BIT(17)
+
+#define MT8188_TOP_AXI_PROT_EN_PEXTP_MAC_P0_STEP1 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_PEXTP_MAC_P0_STEP2 (BIT(8) | BIT(18) | BIT(30))
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_ETHER_STEP1 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_HDMI_TX_STEP1 BIT(20)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP1 GENMASK(31, 29)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP2 (GENMASK(4, 3) | BIT(28))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP1 (GENMASK(16, 14) | BIT(23) | \
+ BIT(27))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP2 (GENMASK(19, 17) | GENMASK(26, 25))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP1 GENMASK(11, 8)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP2 GENMASK(22, 21)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP1 BIT(20)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP2 BIT(12)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP1 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP2 BIT(13)
+
+#define MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP1 BIT(10)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP2 GENMASK(9, 8)
+#define MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP3 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP4 (BIT(1) | BIT(4) | BIT(11))
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0_STEP5 (BIT(20))
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS0_STEP1 (GENMASK(18, 17) | GENMASK(21, 20))
+#define MT8188_TOP_AXI_PROT_EN_VDOSYS0_STEP2 BIT(6)
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0_STEP3 BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP1 GENMASK(31, 30)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP2 BIT(22)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VDOSYS1_STEP3 BIT(10)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_DP_TX_STEP1 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_EDP_TX_STEP1 BIT(22)
+
+#define MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP1 GENMASK(6, 5)
+#define MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP2 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS1_STEP3 BIT(18)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP1 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP2 BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC0_STEP1 BIT(13)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VDEC0_STEP2 BIT(13)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP1 BIT(14)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP2 BIT(29)
+#define MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP1 (BIT(9) | BIT(11))
+#define MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP2 BIT(26)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VENC_STEP3 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP1 (BIT(1) | BIT(3))
+#define MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP2 BIT(25)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_VCORE_STEP3 BIT(16)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP1 GENMASK(27, 26)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP2 GENMASK(25, 24)
+#define MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP1 (BIT(2) | BIT(4))
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_VCORE_STEP2 BIT(0)
+#define MT8188_TOP_AXI_PROT_EN_1_CAM_VCORE_STEP3 BIT(22)
+#define MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP4 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_VCORE_STEP5 BIT(17)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP1 GENMASK(31, 30)
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP2 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP3 GENMASK(29, 28)
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP4 BIT(1)
+
+#define MT8188_SMI_COMMON_CLAMP_EN_STA 0x3C0
+#define MT8188_SMI_COMMON_CLAMP_EN_SET 0x3C4
+#define MT8188_SMI_COMMON_CLAMP_EN_CLR 0x3C8
+
+#define MT8188_SMI_COMMON_SMI_CLAMP_DIP_TO_VDO0 GENMASK(3, 1)
+#define MT8188_SMI_COMMON_SMI_CLAMP_DIP_TO_VPP1 GENMASK(2, 1)
+#define MT8188_SMI_COMMON_SMI_CLAMP_IPE_TO_VPP1 BIT(0)
+
+#define MT8188_SMI_COMMON_SMI_CLAMP_CAM_SUBA_TO_VPP0 GENMASK(3, 2)
+#define MT8188_SMI_COMMON_SMI_CLAMP_CAM_SUBB_TO_VDO0 GENMASK(3, 2)
+
+#define MT8188_SMI_LARB10_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11A_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11C_RESET_ADDR 0xC
+#define MT8188_SMI_LARB12_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11B_RESET_ADDR 0xC
+#define MT8188_SMI_LARB15_RESET_ADDR 0xC
+#define MT8188_SMI_LARB16B_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB17B_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB16A_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB17A_RESET_ADDR 0xA0
+
+#define MT8188_SMI_LARB10_RESET BIT(0)
+#define MT8188_SMI_LARB11A_RESET BIT(0)
+#define MT8188_SMI_LARB11C_RESET BIT(0)
+#define MT8188_SMI_LARB12_RESET BIT(8)
+#define MT8188_SMI_LARB11B_RESET BIT(0)
+#define MT8188_SMI_LARB15_RESET BIT(0)
+#define MT8188_SMI_LARB16B_RESET BIT(4)
+#define MT8188_SMI_LARB17B_RESET BIT(4)
+#define MT8188_SMI_LARB16A_RESET BIT(4)
+#define MT8188_SMI_LARB17A_RESET BIT(4)
+
+#define MT8186_TOP_AXI_PROT_EN_SET (0x2A0)
+#define MT8186_TOP_AXI_PROT_EN_CLR (0x2A4)
+#define MT8186_TOP_AXI_PROT_EN_STA (0x228)
+#define MT8186_TOP_AXI_PROT_EN_1_SET (0x2A8)
+#define MT8186_TOP_AXI_PROT_EN_1_CLR (0x2AC)
+#define MT8186_TOP_AXI_PROT_EN_1_STA (0x258)
+#define MT8186_TOP_AXI_PROT_EN_2_SET (0x2B0)
+#define MT8186_TOP_AXI_PROT_EN_2_CLR (0x2B4)
+#define MT8186_TOP_AXI_PROT_EN_2_STA (0x26C)
+#define MT8186_TOP_AXI_PROT_EN_3_SET (0x2B8)
+#define MT8186_TOP_AXI_PROT_EN_3_CLR (0x2BC)
+#define MT8186_TOP_AXI_PROT_EN_3_STA (0x2C8)
+
+/* MFG1 */
+#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP1 (GENMASK(28, 27))
+#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP2 (GENMASK(22, 21))
+#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP3 (BIT(25))
+#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP4 (BIT(29))
+/* DIS */
+#define MT8186_TOP_AXI_PROT_EN_1_DIS_STEP1 (GENMASK(12, 11))
+#define MT8186_TOP_AXI_PROT_EN_DIS_STEP2 (GENMASK(2, 1) | GENMASK(11, 10))
+/* IMG */
+#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP1 (BIT(23))
+#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP2 (BIT(15))
+/* IPE */
+#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP1 (BIT(24))
+#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP2 (BIT(16))
+/* CAM */
+#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP1 (GENMASK(22, 21))
+#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP2 (GENMASK(14, 13))
+/* VENC */
+#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP1 (BIT(31))
+#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP2 (BIT(19))
+/* VDEC */
+#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP1 (BIT(30))
+#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP2 (BIT(17))
+/* WPE */
+#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP1 (BIT(17))
+#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP2 (BIT(16))
+/* CONN_ON */
+#define MT8186_TOP_AXI_PROT_EN_1_CONN_ON_STEP1 (BIT(18))
+#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP2 (BIT(14))
+#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP3 (BIT(13))
+#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP4 (BIT(16))
+/* ADSP_TOP */
+#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP1 (GENMASK(12, 11))
+#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP2 (GENMASK(1, 0))
+
+#define MT8183_TOP_AXI_PROT_EN_STA1 0x228
+#define MT8183_TOP_AXI_PROT_EN_STA1_1 0x258
+#define MT8183_TOP_AXI_PROT_EN_SET 0x2a0
+#define MT8183_TOP_AXI_PROT_EN_CLR 0x2a4
+#define MT8183_TOP_AXI_PROT_EN_1_SET 0x2a8
+#define MT8183_TOP_AXI_PROT_EN_1_CLR 0x2ac
+#define MT8183_TOP_AXI_PROT_EN_MCU_SET 0x2c4
+#define MT8183_TOP_AXI_PROT_EN_MCU_CLR 0x2c8
+#define MT8183_TOP_AXI_PROT_EN_MCU_STA1 0x2e4
+#define MT8183_TOP_AXI_PROT_EN_MM_SET 0x2d4
+#define MT8183_TOP_AXI_PROT_EN_MM_CLR 0x2d8
+#define MT8183_TOP_AXI_PROT_EN_MM_STA1 0x2ec
+
+#define MT8183_TOP_AXI_PROT_EN_DISP (BIT(10) | BIT(11))
+#define MT8183_TOP_AXI_PROT_EN_CONN (BIT(13) | BIT(14))
+#define MT8183_TOP_AXI_PROT_EN_MFG (BIT(21) | BIT(22))
+#define MT8183_TOP_AXI_PROT_EN_CAM BIT(28)
+#define MT8183_TOP_AXI_PROT_EN_VPU_TOP BIT(27)
+#define MT8183_TOP_AXI_PROT_EN_1_DISP (BIT(16) | BIT(17))
+#define MT8183_TOP_AXI_PROT_EN_1_MFG GENMASK(21, 19)
+#define MT8183_TOP_AXI_PROT_EN_MM_ISP (BIT(3) | BIT(8))
+#define MT8183_TOP_AXI_PROT_EN_MM_ISP_2ND BIT(10)
+#define MT8183_TOP_AXI_PROT_EN_MM_CAM (BIT(4) | BIT(5) | \
+ BIT(9) | BIT(13))
+#define MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP (GENMASK(9, 6) | \
+ BIT(12))
+#define MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP_2ND (BIT(10) | BIT(11))
+#define MT8183_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(11)
+#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0_2ND (BIT(0) | BIT(2) | \
+ BIT(4))
+#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1_2ND (BIT(1) | BIT(3) | \
+ BIT(5))
+#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0 BIT(6)
+#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1 BIT(7)
+
+#define MT8183_SMI_COMMON_CLAMP_EN 0x3c0
+#define MT8183_SMI_COMMON_CLAMP_EN_SET 0x3c4
+#define MT8183_SMI_COMMON_CLAMP_EN_CLR 0x3c8
+
+#define MT8183_SMI_COMMON_SMI_CLAMP_DISP GENMASK(7, 0)
+#define MT8183_SMI_COMMON_SMI_CLAMP_VENC BIT(1)
+#define MT8183_SMI_COMMON_SMI_CLAMP_ISP BIT(2)
+#define MT8183_SMI_COMMON_SMI_CLAMP_CAM (BIT(3) | BIT(4))
+#define MT8183_SMI_COMMON_SMI_CLAMP_VPU_TOP (BIT(5) | BIT(6))
+#define MT8183_SMI_COMMON_SMI_CLAMP_VDEC BIT(7)
+
#define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0)
#define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1)
#define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2)
@@ -21,6 +415,14 @@
#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
+#define MT8167_TOP_AXI_PROT_EN_MM_EMI BIT(1)
+#define MT8167_TOP_AXI_PROT_EN_MCU_MFG BIT(2)
+#define MT8167_TOP_AXI_PROT_EN_CONN_EMI BIT(4)
+#define MT8167_TOP_AXI_PROT_EN_MFG_EMI BIT(5)
+#define MT8167_TOP_AXI_PROT_EN_CONN_MCU BIT(8)
+#define MT8167_TOP_AXI_PROT_EN_MCU_CONN BIT(9)
+#define MT8167_TOP_AXI_PROT_EN_MCU_MM BIT(11)
+
#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1)
#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2)
#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8)
@@ -32,6 +434,17 @@
#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \
BIT(7) | BIT(8))
+#define INFRA_TOPAXI_PROTECTEN 0x0220
+#define INFRA_TOPAXI_PROTECTSTA1 0x0228
+#define INFRA_TOPAXI_PROTECTEN_SET 0x0260
+#define INFRA_TOPAXI_PROTECTEN_CLR 0x0264
+
+#define MT8192_INFRA_CTRL 0x290
+#define MT8192_INFRA_CTRL_DISABLE_MFG2ACP BIT(9)
+
+#define REG_INFRA_MISC 0xf00
+#define F_DDR_4GB_SUPPORT_EN BIT(13)
+
int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask,
bool reg_update);
int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask,
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 2249ecaf77e4..649955d2cf5c 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -11,7 +11,8 @@
#include <linux/mailbox/mtk-cmdq-mailbox.h>
#include <linux/timer.h>
-#define CMDQ_NO_TIMEOUT 0xffffffffu
+#define CMDQ_ADDR_HIGH(addr) ((u32)(((addr) >> 16) & GENMASK(31, 0)))
+#define CMDQ_ADDR_LOW(addr) ((u16)(addr) | BIT(1))
struct cmdq_pkt;
@@ -22,14 +23,12 @@ struct cmdq_client_reg {
};
struct cmdq_client {
- spinlock_t lock;
- u32 pkt_cnt;
struct mbox_client client;
struct mbox_chan *chan;
- struct timer_list timer;
- u32 timeout_ms; /* in unit of microsecond */
};
+#if IS_ENABLED(CONFIG_MTK_CMDQ)
+
/**
* cmdq_dev_get_client_reg() - parse cmdq client reg from the device
* node of CMDQ client
@@ -49,13 +48,10 @@ int cmdq_dev_get_client_reg(struct device *dev,
* cmdq_mbox_create() - create CMDQ mailbox client and channel
* @dev: device of CMDQ mailbox client
* @index: index of CMDQ mailbox channel
- * @timeout: timeout of a pkt execution by GCE, in unit of microsecond, set
- * CMDQ_NO_TIMEOUT if a timer is not used.
*
* Return: CMDQ mailbox client pointer
*/
-struct cmdq_client *cmdq_mbox_create(struct device *dev, int index,
- u32 timeout);
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index);
/**
* cmdq_mbox_destroy() - destroy CMDQ mailbox client and channel
@@ -102,14 +98,90 @@ int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
+/*
+ * cmdq_pkt_read_s() - append read_s command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @reg_idx: the CMDQ internal register ID to cache read data
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
+ u16 reg_idx);
+
+/**
+ * cmdq_pkt_write_s() - append write_s command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @src_reg_idx: the CMDQ internal register ID which cache source value
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Support write value to physical address without subsys. Use CMDQ_ADDR_HIGH()
+ * to get high address and call cmdq_pkt_assign() to assign value into internal
+ * reg. Also use CMDQ_ADDR_LOW() to get low address for addr_low parameter when
+ * call to this function.
+ */
+int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx);
+
+/**
+ * cmdq_pkt_write_s_mask() - append write_s with mask command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @src_reg_idx: the CMDQ internal register ID which cache source value
+ * @mask: the specified target address mask, use U32_MAX if no need
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Support write value to physical address without subsys. Use CMDQ_ADDR_HIGH()
+ * to get high address and call cmdq_pkt_assign() to assign value into internal
+ * reg. Also use CMDQ_ADDR_LOW() to get low address for addr_low parameter when
+ * call to this function.
+ */
+int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx, u32 mask);
+
+/**
+ * cmdq_pkt_write_s_value() - append write_s command to the CMDQ packet which
+ * write value to a physical address
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @value: the specified target value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value);
+
+/**
+ * cmdq_pkt_write_s_mask_value() - append write_s command with mask to the CMDQ
+ * packet which write value to a physical
+ * address
+ * @pkt: the CMDQ packet
+ * @high_addr_reg_idx: internal register ID which contains high address of pa
+ * @addr_low: low address of pa
+ * @value: the specified target value
+ * @mask: the specified target mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value, u32 mask);
+
/**
* cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
* @pkt: the CMDQ packet
- * @event: the desired event type to "wait and CLEAR"
+ * @event: the desired event type to wait
+ * @clear: clear event or not after event arrive
*
* Return: 0 for success; else the error code is returned
*/
-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
/**
* cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
@@ -176,6 +248,17 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value);
/**
+ * cmdq_pkt_jump() - Append jump command to the CMDQ packet, ask GCE
+ * to execute an instruction that change current thread PC to
+ * a physical address which should contains more instruction.
+ * @pkt: the CMDQ packet
+ * @addr: physical address of target instruction buffer
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr);
+
+/**
* cmdq_pkt_finalize() - Append EOC and jump command to pkt.
* @pkt: the CMDQ packet
*
@@ -187,8 +270,6 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
* cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
* packet and call back at the end of done packet
* @pkt: the CMDQ packet
- * @cb: called at the end of done packet
- * @data: this data will pass back to cb
*
* Return: 0 for success; else the error code is returned
*
@@ -196,19 +277,118 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
* at the end of done packet. Note that this is an ASYNC function. When the
* function returned, it may or may not be finished.
*/
-int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
- void *data);
+int cmdq_pkt_flush_async(struct cmdq_pkt *pkt);
-/**
- * cmdq_pkt_flush() - trigger CMDQ to execute the CMDQ packet
- * @pkt: the CMDQ packet
- *
- * Return: 0 for success; else the error code is returned
- *
- * Trigger CMDQ to execute the CMDQ packet. Note that this is a
- * synchronous flush function. When the function returned, the recorded
- * commands have been done.
- */
-int cmdq_pkt_flush(struct cmdq_pkt *pkt);
+#else /* IS_ENABLED(CONFIG_MTK_CMDQ) */
+
+static inline int cmdq_dev_get_client_reg(struct device *dev,
+ struct cmdq_client_reg *client_reg, int idx)
+{
+ return -ENODEV;
+}
+
+static inline struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void cmdq_mbox_destroy(struct cmdq_client *client) { }
+
+static inline struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void cmdq_pkt_destroy(struct cmdq_pkt *pkt) { }
+
+static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 reg_idx)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx, u32 mask)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_flush_async(struct cmdq_pkt *pkt)
+{
+ return -EINVAL;
+}
+
+#endif /* IS_ENABLED(CONFIG_MTK_CMDQ) */
#endif /* __MTK_CMDQ_H__ */
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index 7bab5d9a3d31..4885b065b849 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -6,9 +6,84 @@
#ifndef __MTK_MMSYS_H
#define __MTK_MMSYS_H
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
enum mtk_ddp_comp_id;
struct device;
+enum mtk_dpi_out_format_con {
+ MTK_DPI_RGB888_SDR_CON,
+ MTK_DPI_RGB888_DDR_CON,
+ MTK_DPI_RGB565_SDR_CON,
+ MTK_DPI_RGB565_DDR_CON
+};
+
+enum mtk_ddp_comp_id {
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_AAL1,
+ DDP_COMPONENT_BLS,
+ DDP_COMPONENT_CCORR,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_DITHER0,
+ DDP_COMPONENT_DITHER1,
+ DDP_COMPONENT_DP_INTF0,
+ DDP_COMPONENT_DP_INTF1,
+ DDP_COMPONENT_DPI0,
+ DDP_COMPONENT_DPI1,
+ DDP_COMPONENT_DSC0,
+ DDP_COMPONENT_DSC1,
+ DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DSI1,
+ DDP_COMPONENT_DSI2,
+ DDP_COMPONENT_DSI3,
+ DDP_COMPONENT_ETHDR_MIXER,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_MDP_RDMA0,
+ DDP_COMPONENT_MDP_RDMA1,
+ DDP_COMPONENT_MDP_RDMA2,
+ DDP_COMPONENT_MDP_RDMA3,
+ DDP_COMPONENT_MDP_RDMA4,
+ DDP_COMPONENT_MDP_RDMA5,
+ DDP_COMPONENT_MDP_RDMA6,
+ DDP_COMPONENT_MDP_RDMA7,
+ DDP_COMPONENT_MERGE0,
+ DDP_COMPONENT_MERGE1,
+ DDP_COMPONENT_MERGE2,
+ DDP_COMPONENT_MERGE3,
+ DDP_COMPONENT_MERGE4,
+ DDP_COMPONENT_MERGE5,
+ DDP_COMPONENT_OD0,
+ DDP_COMPONENT_OD1,
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_OVL_2L1,
+ DDP_COMPONENT_OVL_2L2,
+ DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_PADDING0,
+ DDP_COMPONENT_PADDING1,
+ DDP_COMPONENT_PADDING2,
+ DDP_COMPONENT_PADDING3,
+ DDP_COMPONENT_PADDING4,
+ DDP_COMPONENT_PADDING5,
+ DDP_COMPONENT_PADDING6,
+ DDP_COMPONENT_PADDING7,
+ DDP_COMPONENT_POSTMASK0,
+ DDP_COMPONENT_PWM0,
+ DDP_COMPONENT_PWM1,
+ DDP_COMPONENT_PWM2,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_RDMA2,
+ DDP_COMPONENT_RDMA4,
+ DDP_COMPONENT_UFOE,
+ DDP_COMPONENT_WDMA0,
+ DDP_COMPONENT_WDMA1,
+ DDP_COMPONENT_ID_MAX,
+};
+
void mtk_mmsys_ddp_connect(struct device *dev,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next);
@@ -17,4 +92,24 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next);
+void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val);
+
+void mtk_mmsys_merge_async_config(struct device *dev, int idx, int width,
+ int height, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_hdr_config(struct device *dev, int be_width, int be_height,
+ struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16 alpha,
+ u8 mode, u32 biwidth, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_mixer_in_channel_swap(struct device *dev, int idx, bool channel_swap,
+ struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_vpp_rsz_merge_config(struct device *dev, u32 id, bool enable,
+ struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_vpp_rsz_dcm_config(struct device *dev, bool enable,
+ struct cmdq_pkt *cmdq_pkt);
+
#endif /* __MTK_MMSYS_H */
diff --git a/include/linux/soc/mediatek/mtk-mutex.h b/include/linux/soc/mediatek/mtk-mutex.h
new file mode 100644
index 000000000000..635218e3ac68
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk-mutex.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#ifndef MTK_MUTEX_H
+#define MTK_MUTEX_H
+
+struct regmap;
+struct device;
+struct mtk_mutex;
+
+enum mtk_mutex_mod_index {
+ /* MDP table index */
+ MUTEX_MOD_IDX_MDP_RDMA0,
+ MUTEX_MOD_IDX_MDP_RSZ0,
+ MUTEX_MOD_IDX_MDP_RSZ1,
+ MUTEX_MOD_IDX_MDP_TDSHP0,
+ MUTEX_MOD_IDX_MDP_WROT0,
+ MUTEX_MOD_IDX_MDP_WDMA,
+ MUTEX_MOD_IDX_MDP_AAL0,
+ MUTEX_MOD_IDX_MDP_CCORR0,
+ MUTEX_MOD_IDX_MDP_HDR0,
+ MUTEX_MOD_IDX_MDP_COLOR0,
+ MUTEX_MOD_IDX_MDP_RDMA1,
+ MUTEX_MOD_IDX_MDP_RDMA2,
+ MUTEX_MOD_IDX_MDP_RDMA3,
+ MUTEX_MOD_IDX_MDP_STITCH0,
+ MUTEX_MOD_IDX_MDP_FG0,
+ MUTEX_MOD_IDX_MDP_FG1,
+ MUTEX_MOD_IDX_MDP_FG2,
+ MUTEX_MOD_IDX_MDP_FG3,
+ MUTEX_MOD_IDX_MDP_HDR1,
+ MUTEX_MOD_IDX_MDP_HDR2,
+ MUTEX_MOD_IDX_MDP_HDR3,
+ MUTEX_MOD_IDX_MDP_AAL1,
+ MUTEX_MOD_IDX_MDP_AAL2,
+ MUTEX_MOD_IDX_MDP_AAL3,
+ MUTEX_MOD_IDX_MDP_RSZ2,
+ MUTEX_MOD_IDX_MDP_RSZ3,
+ MUTEX_MOD_IDX_MDP_MERGE2,
+ MUTEX_MOD_IDX_MDP_MERGE3,
+ MUTEX_MOD_IDX_MDP_TDSHP1,
+ MUTEX_MOD_IDX_MDP_TDSHP2,
+ MUTEX_MOD_IDX_MDP_TDSHP3,
+ MUTEX_MOD_IDX_MDP_COLOR1,
+ MUTEX_MOD_IDX_MDP_COLOR2,
+ MUTEX_MOD_IDX_MDP_COLOR3,
+ MUTEX_MOD_IDX_MDP_OVL0,
+ MUTEX_MOD_IDX_MDP_OVL1,
+ MUTEX_MOD_IDX_MDP_PAD0,
+ MUTEX_MOD_IDX_MDP_PAD1,
+ MUTEX_MOD_IDX_MDP_PAD2,
+ MUTEX_MOD_IDX_MDP_PAD3,
+ MUTEX_MOD_IDX_MDP_TCC0,
+ MUTEX_MOD_IDX_MDP_TCC1,
+ MUTEX_MOD_IDX_MDP_WROT1,
+ MUTEX_MOD_IDX_MDP_WROT2,
+ MUTEX_MOD_IDX_MDP_WROT3,
+
+ MUTEX_MOD_IDX_MAX /* ALWAYS keep at the end */
+};
+
+enum mtk_mutex_sof_index {
+ MUTEX_SOF_IDX_SINGLE_MODE,
+
+ MUTEX_SOF_IDX_MAX /* ALWAYS keep at the end */
+};
+
+struct mtk_mutex *mtk_mutex_get(struct device *dev);
+int mtk_mutex_prepare(struct mtk_mutex *mutex);
+void mtk_mutex_add_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id);
+void mtk_mutex_enable(struct mtk_mutex *mutex);
+int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex,
+ void *pkt);
+void mtk_mutex_disable(struct mtk_mutex *mutex);
+void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id);
+void mtk_mutex_unprepare(struct mtk_mutex *mutex);
+void mtk_mutex_put(struct mtk_mutex *mutex);
+void mtk_mutex_acquire(struct mtk_mutex *mutex);
+void mtk_mutex_release(struct mtk_mutex *mutex);
+int mtk_mutex_write_mod(struct mtk_mutex *mutex,
+ enum mtk_mutex_mod_index idx,
+ bool clear);
+int mtk_mutex_write_sof(struct mtk_mutex *mutex,
+ enum mtk_mutex_sof_index idx);
+
+#endif /* MTK_MUTEX_H */
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
index 082398e0cfb1..0761128b4354 100644
--- a/include/linux/soc/mediatek/mtk_sip_svc.h
+++ b/include/linux/soc/mediatek/mtk_sip_svc.h
@@ -22,4 +22,7 @@
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
ARM_SMCCC_OWNER_SIP, fn_id)
+/* IOMMU related SMC call */
+#define MTK_SIP_KERNEL_IOMMU_CONTROL MTK_SIP_SMC_CMD(0x514)
+
#endif
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
new file mode 100644
index 000000000000..a476648858a6
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -0,0 +1,332 @@
+#ifndef __MTK_WED_H
+#define __MTK_WED_H
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/regmap.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#define MTK_WED_TX_QUEUES 2
+#define MTK_WED_RX_QUEUES 2
+#define MTK_WED_RX_PAGE_QUEUES 3
+
+#define WED_WO_STA_REC 0x6
+
+struct mtk_wed_hw;
+struct mtk_wdma_desc;
+
+enum mtk_wed_wo_cmd {
+ MTK_WED_WO_CMD_WED_CFG,
+ MTK_WED_WO_CMD_WED_RX_STAT,
+ MTK_WED_WO_CMD_RRO_SER,
+ MTK_WED_WO_CMD_DBG_INFO,
+ MTK_WED_WO_CMD_DEV_INFO,
+ MTK_WED_WO_CMD_BSS_INFO,
+ MTK_WED_WO_CMD_STA_REC,
+ MTK_WED_WO_CMD_DEV_INFO_DUMP,
+ MTK_WED_WO_CMD_BSS_INFO_DUMP,
+ MTK_WED_WO_CMD_STA_REC_DUMP,
+ MTK_WED_WO_CMD_BA_INFO_DUMP,
+ MTK_WED_WO_CMD_FBCMD_Q_DUMP,
+ MTK_WED_WO_CMD_FW_LOG_CTRL,
+ MTK_WED_WO_CMD_LOG_FLUSH,
+ MTK_WED_WO_CMD_CHANGE_STATE,
+ MTK_WED_WO_CMD_CPU_STATS_ENABLE,
+ MTK_WED_WO_CMD_CPU_STATS_DUMP,
+ MTK_WED_WO_CMD_EXCEPTION_INIT,
+ MTK_WED_WO_CMD_PROF_CTRL,
+ MTK_WED_WO_CMD_STA_BA_DUMP,
+ MTK_WED_WO_CMD_BA_CTRL_DUMP,
+ MTK_WED_WO_CMD_RXCNT_CTRL,
+ MTK_WED_WO_CMD_RXCNT_INFO,
+ MTK_WED_WO_CMD_SET_CAP,
+ MTK_WED_WO_CMD_CCIF_RING_DUMP,
+ MTK_WED_WO_CMD_WED_END
+};
+
+struct mtk_wed_bm_desc {
+ __le32 buf0;
+ __le32 token;
+} __packed __aligned(4);
+
+enum mtk_wed_bus_tye {
+ MTK_WED_BUS_PCIE,
+ MTK_WED_BUS_AXI,
+};
+
+#define MTK_WED_RING_CONFIGURED BIT(0)
+struct mtk_wed_ring {
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ u32 desc_size;
+ int size;
+ u32 flags;
+
+ u32 reg_base;
+ void __iomem *wpdma;
+};
+
+struct mtk_wed_wo_rx_stats {
+ __le16 wlan_idx;
+ __le16 tid;
+ __le32 rx_pkt_cnt;
+ __le32 rx_byte_cnt;
+ __le32 rx_err_cnt;
+ __le32 rx_drop_cnt;
+};
+
+struct mtk_wed_buf {
+ void *p;
+ dma_addr_t phy_addr;
+};
+
+struct mtk_wed_device {
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ const struct mtk_wed_ops *ops;
+ struct device *dev;
+ struct mtk_wed_hw *hw;
+ bool init_done, running;
+ int wdma_idx;
+ int irq;
+ u8 version;
+
+ /* used by wlan driver */
+ u32 rev_id;
+
+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring txfree_ring;
+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
+ struct mtk_wed_ring ind_cmd_ring;
+
+ struct {
+ int size;
+ struct mtk_wed_buf *pages;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ } tx_buf_ring;
+
+ struct {
+ int size;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } rx_buf_ring;
+
+ struct {
+ struct mtk_wed_ring ring;
+ dma_addr_t miod_phys;
+ dma_addr_t fdbk_phys;
+ } rro;
+
+ struct {
+ int size;
+ struct mtk_wed_buf *pages;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } hw_rro;
+
+ /* filled by driver: */
+ struct {
+ union {
+ struct platform_device *platform_dev;
+ struct pci_dev *pci_dev;
+ };
+ enum mtk_wed_bus_tye bus_type;
+ void __iomem *base;
+ u32 phy_base;
+ u32 id;
+
+ u32 wpdma_phys;
+ u32 wpdma_int;
+ u32 wpdma_mask;
+ u32 wpdma_tx;
+ u32 wpdma_txfree;
+ u32 wpdma_rx_glo;
+ u32 wpdma_rx;
+ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
+ u32 wpdma_rx_pg;
+
+ bool wcid_512;
+ bool hw_rro;
+ bool msi;
+
+ u16 token_start;
+ unsigned int nbuf;
+ unsigned int rx_nbuf;
+ unsigned int rx_npkt;
+ unsigned int rx_size;
+ unsigned int amsdu_max_len;
+
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
+ u8 txfree_tbit;
+ u8 amsdu_max_subframes;
+
+ struct {
+ u8 se_group_nums;
+ u16 win_size;
+ u16 particular_sid;
+ u32 ack_sn_addr;
+ dma_addr_t particular_se_phys;
+ dma_addr_t addr_elem_phys[1024];
+ } ind_cmd;
+
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+ void (*offload_disable)(struct mtk_wed_device *wed);
+ u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size);
+ void (*release_rx_buf)(struct mtk_wed_device *wed);
+ void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
+ struct mtk_wed_wo_rx_stats *stats);
+ int (*reset)(struct mtk_wed_device *wed);
+ void (*reset_complete)(struct mtk_wed_device *wed);
+ } wlan;
+#endif
+};
+
+struct mtk_wed_ops {
+ int (*attach)(struct mtk_wed_device *dev);
+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs, bool reset);
+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs, bool reset);
+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
+ void __iomem *regs);
+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
+ void *data, int len);
+ void (*detach)(struct mtk_wed_device *dev);
+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
+ u32 reason, u32 hash);
+
+ void (*stop)(struct mtk_wed_device *dev);
+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
+ void (*reset_dma)(struct mtk_wed_device *dev);
+
+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
+
+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+ int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
+ enum tc_setup_type type, void *type_data);
+ void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
+ bool reset);
+ void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
+ void __iomem *regs);
+};
+
+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+
+static inline int
+mtk_wed_device_attach(struct mtk_wed_device *dev)
+{
+ int ret = -ENODEV;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ rcu_read_lock();
+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
+ if (dev->ops)
+ ret = dev->ops->attach(dev);
+ else
+ rcu_read_unlock();
+
+ if (ret)
+ dev->ops = NULL;
+#endif
+
+ return ret;
+}
+
+static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (dev->version == 3)
+ return dev->wlan.hw_rro;
+
+ return dev->version != 1;
+#else
+ return false;
+#endif
+}
+
+static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ return dev->version == 3;
+#else
+ return false;
+#endif
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+#define mtk_wed_device_active(_dev) !!(_dev)->ops
+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) \
+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs, _reset)
+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
+#define mtk_wed_device_reg_read(_dev, _reg) \
+ (_dev)->ops->reg_read(_dev, _reg)
+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
+ (_dev)->ops->reg_write(_dev, _reg, _val)
+#define mtk_wed_device_irq_get(_dev, _mask) \
+ (_dev)->ops->irq_get(_dev, _mask)
+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
+ (_dev)->ops->irq_set_mask(_dev, _mask)
+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
+#define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
+#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
+ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
+ (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
+ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
+
+#else
+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+{
+ return false;
+}
+#define mtk_wed_device_detach(_dev) do {} while (0)
+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_reg_read(_dev, _reg) 0
+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
+#define mtk_wed_device_irq_get(_dev, _mask) 0
+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0)
+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
+#define mtk_wed_device_stop(_dev) do {} while (0)
+#define mtk_wed_device_dma_reset(_dev) do {} while (0)
+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
+#endif
+
+#endif
diff --git a/include/linux/soc/mmp/cputype.h b/include/linux/soc/mmp/cputype.h
index 221790761e8e..f13d127fadc4 100644
--- a/include/linux/soc/mmp/cputype.h
+++ b/include/linux/soc/mmp/cputype.h
@@ -26,29 +26,7 @@
extern unsigned int mmp_chip_id;
-#ifdef CONFIG_CPU_PXA168
-static inline int cpu_is_pxa168(void)
-{
- return (((read_cpuid_id() >> 8) & 0xff) == 0x84) &&
- ((mmp_chip_id & 0xfff) == 0x168);
-}
-#else
-#define cpu_is_pxa168() (0)
-#endif
-
-/* cpu_is_pxa910() is shared on both pxa910 and pxa920 */
-#ifdef CONFIG_CPU_PXA910
-static inline int cpu_is_pxa910(void)
-{
- return (((read_cpuid_id() >> 8) & 0xff) == 0x84) &&
- (((mmp_chip_id & 0xfff) == 0x910) ||
- ((mmp_chip_id & 0xfff) == 0x920));
-}
-#else
-#define cpu_is_pxa910() (0)
-#endif
-
-#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
+#if defined(CONFIG_MACH_MMP2_DT)
static inline int cpu_is_mmp2(void)
{
return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
diff --git a/include/linux/soc/pxa/cpu.h b/include/linux/soc/pxa/cpu.h
new file mode 100644
index 000000000000..5782450ee45c
--- /dev/null
+++ b/include/linux/soc/pxa/cpu.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Author: Nicolas Pitre
+ * Created: Jun 15, 2001
+ * Copyright: MontaVista Software Inc.
+ */
+
+#ifndef __SOC_PXA_CPU_H
+#define __SOC_PXA_CPU_H
+
+#ifdef CONFIG_ARM
+#include <asm/cputype.h>
+#endif
+
+/*
+ * CPU Stepping CPU_ID JTAG_ID
+ *
+ * PXA210 B0 0x69052922 0x2926C013
+ * PXA210 B1 0x69052923 0x3926C013
+ * PXA210 B2 0x69052924 0x4926C013
+ * PXA210 C0 0x69052D25 0x5926C013
+ *
+ * PXA250 A0 0x69052100 0x09264013
+ * PXA250 A1 0x69052101 0x19264013
+ * PXA250 B0 0x69052902 0x29264013
+ * PXA250 B1 0x69052903 0x39264013
+ * PXA250 B2 0x69052904 0x49264013
+ * PXA250 C0 0x69052D05 0x59264013
+ *
+ * PXA255 A0 0x69052D06 0x69264013
+ *
+ * PXA26x A0 0x69052903 0x39264013
+ * PXA26x B0 0x69052D05 0x59264013
+ *
+ * PXA27x A0 0x69054110 0x09265013
+ * PXA27x A1 0x69054111 0x19265013
+ * PXA27x B0 0x69054112 0x29265013
+ * PXA27x B1 0x69054113 0x39265013
+ * PXA27x C0 0x69054114 0x49265013
+ * PXA27x C5 0x69054117 0x79265013
+ *
+ * PXA30x A0 0x69056880 0x0E648013
+ * PXA30x A1 0x69056881 0x1E648013
+ * PXA31x A0 0x69056890 0x0E649013
+ * PXA31x A1 0x69056891 0x1E649013
+ * PXA31x A2 0x69056892 0x2E649013
+ * PXA32x B1 0x69056825 0x5E642013
+ * PXA32x B2 0x69056826 0x6E642013
+ *
+ * PXA930 B0 0x69056835 0x5E643013
+ * PXA930 B1 0x69056837 0x7E643013
+ * PXA930 B2 0x69056838 0x8E643013
+ *
+ * PXA935 A0 0x56056931 0x1E653013
+ * PXA935 B0 0x56056936 0x6E653013
+ * PXA935 B1 0x56056938 0x8E653013
+ */
+#ifdef CONFIG_PXA25x
+#define __cpu_is_pxa210(id) \
+ ({ \
+ unsigned int _id = (id) & 0xf3f0; \
+ _id == 0x2120; \
+ })
+
+#define __cpu_is_pxa250(id) \
+ ({ \
+ unsigned int _id = (id) & 0xf3ff; \
+ _id <= 0x2105; \
+ })
+
+#define __cpu_is_pxa255(id) \
+ ({ \
+ unsigned int _id = (id) & 0xffff; \
+ _id == 0x2d06; \
+ })
+
+#define __cpu_is_pxa25x(id) \
+ ({ \
+ unsigned int _id = (id) & 0xf300; \
+ _id == 0x2100; \
+ })
+#else
+#define __cpu_is_pxa210(id) (0)
+#define __cpu_is_pxa250(id) (0)
+#define __cpu_is_pxa255(id) (0)
+#define __cpu_is_pxa25x(id) (0)
+#endif
+
+#ifdef CONFIG_PXA27x
+#define __cpu_is_pxa27x(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x411; \
+ })
+#else
+#define __cpu_is_pxa27x(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA300
+#define __cpu_is_pxa300(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x688; \
+ })
+#else
+#define __cpu_is_pxa300(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA310
+#define __cpu_is_pxa310(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x689; \
+ })
+#else
+#define __cpu_is_pxa310(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA320
+#define __cpu_is_pxa320(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x603 || _id == 0x682; \
+ })
+#else
+#define __cpu_is_pxa320(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA930
+#define __cpu_is_pxa930(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x683; \
+ })
+#else
+#define __cpu_is_pxa930(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA935
+#define __cpu_is_pxa935(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x693; \
+ })
+#else
+#define __cpu_is_pxa935(id) (0)
+#endif
+
+#define cpu_is_pxa210() \
+ ({ \
+ __cpu_is_pxa210(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa250() \
+ ({ \
+ __cpu_is_pxa250(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa255() \
+ ({ \
+ __cpu_is_pxa255(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa25x() \
+ ({ \
+ __cpu_is_pxa25x(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa27x() \
+ ({ \
+ __cpu_is_pxa27x(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa300() \
+ ({ \
+ __cpu_is_pxa300(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa310() \
+ ({ \
+ __cpu_is_pxa310(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa320() \
+ ({ \
+ __cpu_is_pxa320(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa930() \
+ ({ \
+ __cpu_is_pxa930(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa935() \
+ ({ \
+ __cpu_is_pxa935(read_cpuid_id()); \
+ })
+
+
+
+/*
+ * CPUID Core Generation Bit
+ * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
+ */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
+#define __cpu_is_pxa2xx(id) \
+ ({ \
+ unsigned int _id = (id) >> 13 & 0x7; \
+ _id <= 0x2; \
+ })
+#else
+#define __cpu_is_pxa2xx(id) (0)
+#endif
+
+#ifdef CONFIG_PXA3xx
+#define __cpu_is_pxa3xx(id) \
+ ({ \
+ __cpu_is_pxa300(id) \
+ || __cpu_is_pxa310(id) \
+ || __cpu_is_pxa320(id) \
+ || __cpu_is_pxa93x(id); \
+ })
+#else
+#define __cpu_is_pxa3xx(id) (0)
+#endif
+
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
+#define __cpu_is_pxa93x(id) \
+ ({ \
+ __cpu_is_pxa930(id) \
+ || __cpu_is_pxa935(id); \
+ })
+#else
+#define __cpu_is_pxa93x(id) (0)
+#endif
+
+#define cpu_is_pxa2xx() \
+ ({ \
+ __cpu_is_pxa2xx(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa3xx() \
+ ({ \
+ __cpu_is_pxa3xx(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa93x() \
+ ({ \
+ __cpu_is_pxa93x(read_cpuid_id()); \
+ })
+
+#endif
diff --git a/include/linux/soc/pxa/mfp.h b/include/linux/soc/pxa/mfp.h
new file mode 100644
index 000000000000..39779cbed0c0
--- /dev/null
+++ b/include/linux/soc/pxa/mfp.h
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common Multi-Function Pin Definitions
+ *
+ * Copyright (C) 2007 Marvell International Ltd.
+ *
+ * 2007-8-21: eric miao <eric.miao@marvell.com>
+ * initial version
+ */
+
+#ifndef __ASM_PLAT_MFP_H
+#define __ASM_PLAT_MFP_H
+
+#define mfp_to_gpio(m) ((m) % 256)
+
+/* list of all the configurable MFP pins */
+enum {
+ MFP_PIN_INVALID = -1,
+
+ MFP_PIN_GPIO0 = 0,
+ MFP_PIN_GPIO1,
+ MFP_PIN_GPIO2,
+ MFP_PIN_GPIO3,
+ MFP_PIN_GPIO4,
+ MFP_PIN_GPIO5,
+ MFP_PIN_GPIO6,
+ MFP_PIN_GPIO7,
+ MFP_PIN_GPIO8,
+ MFP_PIN_GPIO9,
+ MFP_PIN_GPIO10,
+ MFP_PIN_GPIO11,
+ MFP_PIN_GPIO12,
+ MFP_PIN_GPIO13,
+ MFP_PIN_GPIO14,
+ MFP_PIN_GPIO15,
+ MFP_PIN_GPIO16,
+ MFP_PIN_GPIO17,
+ MFP_PIN_GPIO18,
+ MFP_PIN_GPIO19,
+ MFP_PIN_GPIO20,
+ MFP_PIN_GPIO21,
+ MFP_PIN_GPIO22,
+ MFP_PIN_GPIO23,
+ MFP_PIN_GPIO24,
+ MFP_PIN_GPIO25,
+ MFP_PIN_GPIO26,
+ MFP_PIN_GPIO27,
+ MFP_PIN_GPIO28,
+ MFP_PIN_GPIO29,
+ MFP_PIN_GPIO30,
+ MFP_PIN_GPIO31,
+ MFP_PIN_GPIO32,
+ MFP_PIN_GPIO33,
+ MFP_PIN_GPIO34,
+ MFP_PIN_GPIO35,
+ MFP_PIN_GPIO36,
+ MFP_PIN_GPIO37,
+ MFP_PIN_GPIO38,
+ MFP_PIN_GPIO39,
+ MFP_PIN_GPIO40,
+ MFP_PIN_GPIO41,
+ MFP_PIN_GPIO42,
+ MFP_PIN_GPIO43,
+ MFP_PIN_GPIO44,
+ MFP_PIN_GPIO45,
+ MFP_PIN_GPIO46,
+ MFP_PIN_GPIO47,
+ MFP_PIN_GPIO48,
+ MFP_PIN_GPIO49,
+ MFP_PIN_GPIO50,
+ MFP_PIN_GPIO51,
+ MFP_PIN_GPIO52,
+ MFP_PIN_GPIO53,
+ MFP_PIN_GPIO54,
+ MFP_PIN_GPIO55,
+ MFP_PIN_GPIO56,
+ MFP_PIN_GPIO57,
+ MFP_PIN_GPIO58,
+ MFP_PIN_GPIO59,
+ MFP_PIN_GPIO60,
+ MFP_PIN_GPIO61,
+ MFP_PIN_GPIO62,
+ MFP_PIN_GPIO63,
+ MFP_PIN_GPIO64,
+ MFP_PIN_GPIO65,
+ MFP_PIN_GPIO66,
+ MFP_PIN_GPIO67,
+ MFP_PIN_GPIO68,
+ MFP_PIN_GPIO69,
+ MFP_PIN_GPIO70,
+ MFP_PIN_GPIO71,
+ MFP_PIN_GPIO72,
+ MFP_PIN_GPIO73,
+ MFP_PIN_GPIO74,
+ MFP_PIN_GPIO75,
+ MFP_PIN_GPIO76,
+ MFP_PIN_GPIO77,
+ MFP_PIN_GPIO78,
+ MFP_PIN_GPIO79,
+ MFP_PIN_GPIO80,
+ MFP_PIN_GPIO81,
+ MFP_PIN_GPIO82,
+ MFP_PIN_GPIO83,
+ MFP_PIN_GPIO84,
+ MFP_PIN_GPIO85,
+ MFP_PIN_GPIO86,
+ MFP_PIN_GPIO87,
+ MFP_PIN_GPIO88,
+ MFP_PIN_GPIO89,
+ MFP_PIN_GPIO90,
+ MFP_PIN_GPIO91,
+ MFP_PIN_GPIO92,
+ MFP_PIN_GPIO93,
+ MFP_PIN_GPIO94,
+ MFP_PIN_GPIO95,
+ MFP_PIN_GPIO96,
+ MFP_PIN_GPIO97,
+ MFP_PIN_GPIO98,
+ MFP_PIN_GPIO99,
+ MFP_PIN_GPIO100,
+ MFP_PIN_GPIO101,
+ MFP_PIN_GPIO102,
+ MFP_PIN_GPIO103,
+ MFP_PIN_GPIO104,
+ MFP_PIN_GPIO105,
+ MFP_PIN_GPIO106,
+ MFP_PIN_GPIO107,
+ MFP_PIN_GPIO108,
+ MFP_PIN_GPIO109,
+ MFP_PIN_GPIO110,
+ MFP_PIN_GPIO111,
+ MFP_PIN_GPIO112,
+ MFP_PIN_GPIO113,
+ MFP_PIN_GPIO114,
+ MFP_PIN_GPIO115,
+ MFP_PIN_GPIO116,
+ MFP_PIN_GPIO117,
+ MFP_PIN_GPIO118,
+ MFP_PIN_GPIO119,
+ MFP_PIN_GPIO120,
+ MFP_PIN_GPIO121,
+ MFP_PIN_GPIO122,
+ MFP_PIN_GPIO123,
+ MFP_PIN_GPIO124,
+ MFP_PIN_GPIO125,
+ MFP_PIN_GPIO126,
+ MFP_PIN_GPIO127,
+
+ MFP_PIN_GPIO128,
+ MFP_PIN_GPIO129,
+ MFP_PIN_GPIO130,
+ MFP_PIN_GPIO131,
+ MFP_PIN_GPIO132,
+ MFP_PIN_GPIO133,
+ MFP_PIN_GPIO134,
+ MFP_PIN_GPIO135,
+ MFP_PIN_GPIO136,
+ MFP_PIN_GPIO137,
+ MFP_PIN_GPIO138,
+ MFP_PIN_GPIO139,
+ MFP_PIN_GPIO140,
+ MFP_PIN_GPIO141,
+ MFP_PIN_GPIO142,
+ MFP_PIN_GPIO143,
+ MFP_PIN_GPIO144,
+ MFP_PIN_GPIO145,
+ MFP_PIN_GPIO146,
+ MFP_PIN_GPIO147,
+ MFP_PIN_GPIO148,
+ MFP_PIN_GPIO149,
+ MFP_PIN_GPIO150,
+ MFP_PIN_GPIO151,
+ MFP_PIN_GPIO152,
+ MFP_PIN_GPIO153,
+ MFP_PIN_GPIO154,
+ MFP_PIN_GPIO155,
+ MFP_PIN_GPIO156,
+ MFP_PIN_GPIO157,
+ MFP_PIN_GPIO158,
+ MFP_PIN_GPIO159,
+ MFP_PIN_GPIO160,
+ MFP_PIN_GPIO161,
+ MFP_PIN_GPIO162,
+ MFP_PIN_GPIO163,
+ MFP_PIN_GPIO164,
+ MFP_PIN_GPIO165,
+ MFP_PIN_GPIO166,
+ MFP_PIN_GPIO167,
+ MFP_PIN_GPIO168,
+ MFP_PIN_GPIO169,
+ MFP_PIN_GPIO170,
+ MFP_PIN_GPIO171,
+ MFP_PIN_GPIO172,
+ MFP_PIN_GPIO173,
+ MFP_PIN_GPIO174,
+ MFP_PIN_GPIO175,
+ MFP_PIN_GPIO176,
+ MFP_PIN_GPIO177,
+ MFP_PIN_GPIO178,
+ MFP_PIN_GPIO179,
+ MFP_PIN_GPIO180,
+ MFP_PIN_GPIO181,
+ MFP_PIN_GPIO182,
+ MFP_PIN_GPIO183,
+ MFP_PIN_GPIO184,
+ MFP_PIN_GPIO185,
+ MFP_PIN_GPIO186,
+ MFP_PIN_GPIO187,
+ MFP_PIN_GPIO188,
+ MFP_PIN_GPIO189,
+ MFP_PIN_GPIO190,
+ MFP_PIN_GPIO191,
+
+ MFP_PIN_GPIO255 = 255,
+
+ MFP_PIN_GPIO0_2,
+ MFP_PIN_GPIO1_2,
+ MFP_PIN_GPIO2_2,
+ MFP_PIN_GPIO3_2,
+ MFP_PIN_GPIO4_2,
+ MFP_PIN_GPIO5_2,
+ MFP_PIN_GPIO6_2,
+ MFP_PIN_GPIO7_2,
+ MFP_PIN_GPIO8_2,
+ MFP_PIN_GPIO9_2,
+ MFP_PIN_GPIO10_2,
+ MFP_PIN_GPIO11_2,
+ MFP_PIN_GPIO12_2,
+ MFP_PIN_GPIO13_2,
+ MFP_PIN_GPIO14_2,
+ MFP_PIN_GPIO15_2,
+ MFP_PIN_GPIO16_2,
+ MFP_PIN_GPIO17_2,
+
+ MFP_PIN_ULPI_STP,
+ MFP_PIN_ULPI_NXT,
+ MFP_PIN_ULPI_DIR,
+
+ MFP_PIN_nXCVREN,
+ MFP_PIN_DF_CLE_nOE,
+ MFP_PIN_DF_nADV1_ALE,
+ MFP_PIN_DF_SCLK_E,
+ MFP_PIN_DF_SCLK_S,
+ MFP_PIN_nBE0,
+ MFP_PIN_nBE1,
+ MFP_PIN_DF_nADV2_ALE,
+ MFP_PIN_DF_INT_RnB,
+ MFP_PIN_DF_nCS0,
+ MFP_PIN_DF_nCS1,
+ MFP_PIN_nLUA,
+ MFP_PIN_nLLA,
+ MFP_PIN_DF_nWE,
+ MFP_PIN_DF_ALE_nWE,
+ MFP_PIN_DF_nRE_nOE,
+ MFP_PIN_DF_ADDR0,
+ MFP_PIN_DF_ADDR1,
+ MFP_PIN_DF_ADDR2,
+ MFP_PIN_DF_ADDR3,
+ MFP_PIN_DF_IO0,
+ MFP_PIN_DF_IO1,
+ MFP_PIN_DF_IO2,
+ MFP_PIN_DF_IO3,
+ MFP_PIN_DF_IO4,
+ MFP_PIN_DF_IO5,
+ MFP_PIN_DF_IO6,
+ MFP_PIN_DF_IO7,
+ MFP_PIN_DF_IO8,
+ MFP_PIN_DF_IO9,
+ MFP_PIN_DF_IO10,
+ MFP_PIN_DF_IO11,
+ MFP_PIN_DF_IO12,
+ MFP_PIN_DF_IO13,
+ MFP_PIN_DF_IO14,
+ MFP_PIN_DF_IO15,
+ MFP_PIN_DF_nCS0_SM_nCS2,
+ MFP_PIN_DF_nCS1_SM_nCS3,
+ MFP_PIN_SM_nCS0,
+ MFP_PIN_SM_nCS1,
+ MFP_PIN_DF_WEn,
+ MFP_PIN_DF_REn,
+ MFP_PIN_DF_CLE_SM_OEn,
+ MFP_PIN_DF_ALE_SM_WEn,
+ MFP_PIN_DF_RDY0,
+ MFP_PIN_DF_RDY1,
+
+ MFP_PIN_SM_SCLK,
+ MFP_PIN_SM_BE0,
+ MFP_PIN_SM_BE1,
+ MFP_PIN_SM_ADV,
+ MFP_PIN_SM_ADVMUX,
+ MFP_PIN_SM_RDY,
+
+ MFP_PIN_MMC1_DAT7,
+ MFP_PIN_MMC1_DAT6,
+ MFP_PIN_MMC1_DAT5,
+ MFP_PIN_MMC1_DAT4,
+ MFP_PIN_MMC1_DAT3,
+ MFP_PIN_MMC1_DAT2,
+ MFP_PIN_MMC1_DAT1,
+ MFP_PIN_MMC1_DAT0,
+ MFP_PIN_MMC1_CMD,
+ MFP_PIN_MMC1_CLK,
+ MFP_PIN_MMC1_CD,
+ MFP_PIN_MMC1_WP,
+
+ /* additional pins on PXA930 */
+ MFP_PIN_GSIM_UIO,
+ MFP_PIN_GSIM_UCLK,
+ MFP_PIN_GSIM_UDET,
+ MFP_PIN_GSIM_nURST,
+ MFP_PIN_PMIC_INT,
+ MFP_PIN_RDY,
+
+ /* additional pins on MMP2 */
+ MFP_PIN_TWSI1_SCL,
+ MFP_PIN_TWSI1_SDA,
+ MFP_PIN_TWSI4_SCL,
+ MFP_PIN_TWSI4_SDA,
+ MFP_PIN_CLK_REQ,
+
+ MFP_PIN_MAX,
+};
+
+/*
+ * a possible MFP configuration is represented by a 32-bit integer
+ *
+ * bit 0.. 9 - MFP Pin Number (1024 Pins Maximum)
+ * bit 10..12 - Alternate Function Selection
+ * bit 13..15 - Drive Strength
+ * bit 16..18 - Low Power Mode State
+ * bit 19..20 - Low Power Mode Edge Detection
+ * bit 21..22 - Run Mode Pull State
+ *
+ * to facilitate the definition, the following macros are provided
+ *
+ * MFP_CFG_DEFAULT - default MFP configuration value, with
+ * alternate function = 0,
+ * drive strength = fast 3mA (MFP_DS03X)
+ * low power mode = default
+ * edge detection = none
+ *
+ * MFP_CFG - default MFPR value with alternate function
+ * MFP_CFG_DRV - default MFPR value with alternate function and
+ * pin drive strength
+ * MFP_CFG_LPM - default MFPR value with alternate function and
+ * low power mode
+ * MFP_CFG_X - default MFPR value with alternate function,
+ * pin drive strength and low power mode
+ */
+
+typedef unsigned long mfp_cfg_t;
+
+#define MFP_PIN(x) ((x) & 0x3ff)
+
+#define MFP_AF0 (0x0 << 10)
+#define MFP_AF1 (0x1 << 10)
+#define MFP_AF2 (0x2 << 10)
+#define MFP_AF3 (0x3 << 10)
+#define MFP_AF4 (0x4 << 10)
+#define MFP_AF5 (0x5 << 10)
+#define MFP_AF6 (0x6 << 10)
+#define MFP_AF7 (0x7 << 10)
+#define MFP_AF_MASK (0x7 << 10)
+#define MFP_AF(x) (((x) >> 10) & 0x7)
+
+#define MFP_DS01X (0x0 << 13)
+#define MFP_DS02X (0x1 << 13)
+#define MFP_DS03X (0x2 << 13)
+#define MFP_DS04X (0x3 << 13)
+#define MFP_DS06X (0x4 << 13)
+#define MFP_DS08X (0x5 << 13)
+#define MFP_DS10X (0x6 << 13)
+#define MFP_DS13X (0x7 << 13)
+#define MFP_DS_MASK (0x7 << 13)
+#define MFP_DS(x) (((x) >> 13) & 0x7)
+
+#define MFP_LPM_DEFAULT (0x0 << 16)
+#define MFP_LPM_DRIVE_LOW (0x1 << 16)
+#define MFP_LPM_DRIVE_HIGH (0x2 << 16)
+#define MFP_LPM_PULL_LOW (0x3 << 16)
+#define MFP_LPM_PULL_HIGH (0x4 << 16)
+#define MFP_LPM_FLOAT (0x5 << 16)
+#define MFP_LPM_INPUT (0x6 << 16)
+#define MFP_LPM_STATE_MASK (0x7 << 16)
+#define MFP_LPM_STATE(x) (((x) >> 16) & 0x7)
+
+#define MFP_LPM_EDGE_NONE (0x0 << 19)
+#define MFP_LPM_EDGE_RISE (0x1 << 19)
+#define MFP_LPM_EDGE_FALL (0x2 << 19)
+#define MFP_LPM_EDGE_BOTH (0x3 << 19)
+#define MFP_LPM_EDGE_MASK (0x3 << 19)
+#define MFP_LPM_EDGE(x) (((x) >> 19) & 0x3)
+
+#define MFP_PULL_NONE (0x0 << 21)
+#define MFP_PULL_LOW (0x1 << 21)
+#define MFP_PULL_HIGH (0x2 << 21)
+#define MFP_PULL_BOTH (0x3 << 21)
+#define MFP_PULL_FLOAT (0x4 << 21)
+#define MFP_PULL_MASK (0x7 << 21)
+#define MFP_PULL(x) (((x) >> 21) & 0x7)
+
+#define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_DEFAULT |\
+ MFP_LPM_EDGE_NONE | MFP_PULL_NONE)
+
+#define MFP_CFG(pin, af) \
+ ((MFP_CFG_DEFAULT & ~MFP_AF_MASK) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af))
+
+#define MFP_CFG_DRV(pin, af, drv) \
+ ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK)) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv))
+
+#define MFP_CFG_LPM(pin, af, lpm) \
+ ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_LPM_STATE_MASK)) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_LPM_##lpm))
+
+#define MFP_CFG_X(pin, af, drv, lpm) \
+ ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK | MFP_LPM_STATE_MASK)) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv | MFP_LPM_##lpm))
+
+#if defined(CONFIG_PXA3xx) || defined(CONFIG_ARCH_MMP)
+/*
+ * each MFP pin will have a MFPR register, since the offset of the
+ * register varies between processors, the processor specific code
+ * should initialize the pin offsets by mfp_init()
+ *
+ * mfp_init_base() - accepts a virtual base for all MFPR registers and
+ * initialize the MFP table to a default state
+ *
+ * mfp_init_addr() - accepts a table of "mfp_addr_map" structure, which
+ * represents a range of MFP pins from "start" to "end", with the offset
+ * beginning at "offset", to define a single pin, let "end" = -1.
+ *
+ * use
+ *
+ * MFP_ADDR_X() to define a range of pins
+ * MFP_ADDR() to define a single pin
+ * MFP_ADDR_END to signal the end of pin offset definitions
+ */
+struct mfp_addr_map {
+ unsigned int start;
+ unsigned int end;
+ unsigned long offset;
+};
+
+#define MFP_ADDR_X(start, end, offset) \
+ { MFP_PIN_##start, MFP_PIN_##end, offset }
+
+#define MFP_ADDR(pin, offset) \
+ { MFP_PIN_##pin, -1, offset }
+
+#define MFP_ADDR_END { MFP_PIN_INVALID, 0 }
+
+void mfp_init_base(void __iomem *mfpr_base);
+void mfp_init_addr(struct mfp_addr_map *map);
+
+/*
+ * mfp_{read, write}() - for direct read/write access to the MFPR register
+ * mfp_config() - for configuring a group of MFPR registers
+ * mfp_config_lpm() - configuring all low power MFPR registers for suspend
+ * mfp_config_run() - configuring all run time MFPR registers after resume
+ */
+unsigned long mfp_read(int mfp);
+void mfp_write(int mfp, unsigned long mfpr_val);
+void mfp_config(unsigned long *mfp_cfgs, int num);
+void mfp_config_run(void);
+void mfp_config_lpm(void);
+#endif /* CONFIG_PXA3xx || CONFIG_ARCH_MMP */
+
+#endif /* __ASM_PLAT_MFP_H */
diff --git a/include/linux/soc/pxa/smemc.h b/include/linux/soc/pxa/smemc.h
new file mode 100644
index 000000000000..4feb1dded3ec
--- /dev/null
+++ b/include/linux/soc/pxa/smemc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PXA_REGS_H
+#define __PXA_REGS_H
+
+#include <linux/types.h>
+
+void pxa_smemc_set_pcmcia_timing(int sock, u32 mcmem, u32 mcatt, u32 mcio);
+void pxa_smemc_set_pcmcia_socket(int nr);
+int pxa2xx_smemc_get_sdram_rows(void);
+unsigned int pxa3xx_smemc_get_memclkdiv(void);
+void __iomem *pxa_smemc_get_mdrefr(void);
+
+/*
+ * Once fully converted to the clock framework, all these functions should be
+ * removed, and replaced with a clk_get(NULL, "core").
+ */
+#ifdef CONFIG_PXA25x
+extern unsigned pxa25x_get_clk_frequency_khz(int);
+#else
+#define pxa25x_get_clk_frequency_khz(x) (0)
+#endif
+
+#ifdef CONFIG_PXA27x
+extern unsigned pxa27x_get_clk_frequency_khz(int);
+#else
+#define pxa27x_get_clk_frequency_khz(x) (0)
+#endif
+
+#endif
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index 7f0bc3cf4d61..7161a3183eda 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -7,8 +7,9 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <dt-bindings/soc/qcom,apr.h>
+#include <dt-bindings/soc/qcom,gpr.h>
-extern struct bus_type aprbus;
+extern const struct bus_type aprbus;
#define APR_HDR_LEN(hdr_len) ((hdr_len)/4)
@@ -75,10 +76,65 @@ struct apr_resp_pkt {
int payload_size;
};
+struct gpr_hdr {
+ uint32_t version:4;
+ uint32_t hdr_size:4;
+ uint32_t pkt_size:24;
+ uint32_t dest_domain:8;
+ uint32_t src_domain:8;
+ uint32_t reserved:16;
+ uint32_t src_port;
+ uint32_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+} __packed;
+
+struct gpr_pkt {
+ struct gpr_hdr hdr;
+ uint32_t payload[];
+};
+
+struct gpr_resp_pkt {
+ struct gpr_hdr hdr;
+ void *payload;
+ int payload_size;
+};
+
+#define GPR_HDR_SIZE sizeof(struct gpr_hdr)
+#define GPR_PKT_VER 0x0
+#define GPR_PKT_HEADER_WORD_SIZE ((sizeof(struct gpr_pkt) + 3) >> 2)
+#define GPR_PKT_HEADER_BYTE_SIZE (GPR_PKT_HEADER_WORD_SIZE << 2)
+
+#define GPR_BASIC_RSP_RESULT 0x02001005
+
+struct gpr_ibasic_rsp_result_t {
+ uint32_t opcode;
+ uint32_t status;
+};
+
+#define GPR_BASIC_EVT_ACCEPTED 0x02001006
+
+struct gpr_ibasic_rsp_accepted_t {
+ uint32_t opcode;
+};
+
/* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */
#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF)
#define APR_SVC_MINOR_VERSION(v) (v & 0xFF)
+typedef int (*gpr_port_cb) (struct gpr_resp_pkt *d, void *priv, int op);
+struct packet_router;
+struct pkt_router_svc {
+ struct device *dev;
+ gpr_port_cb callback;
+ struct packet_router *pr;
+ spinlock_t lock;
+ int id;
+ void *priv;
+};
+
+typedef struct pkt_router_svc gpr_port_t;
+
struct apr_device {
struct device dev;
uint16_t svc_id;
@@ -86,21 +142,26 @@ struct apr_device {
uint32_t version;
char name[APR_NAME_SIZE];
const char *service_path;
- spinlock_t lock;
+ struct pkt_router_svc svc;
struct list_head node;
};
+typedef struct apr_device gpr_device_t;
+
#define to_apr_device(d) container_of(d, struct apr_device, dev)
+#define svc_to_apr_device(d) container_of(d, struct apr_device, svc)
struct apr_driver {
int (*probe)(struct apr_device *sl);
- int (*remove)(struct apr_device *sl);
+ void (*remove)(struct apr_device *sl);
int (*callback)(struct apr_device *a,
struct apr_resp_pkt *d);
+ int (*gpr_callback)(struct gpr_resp_pkt *d, void *data, int op);
struct device_driver driver;
const struct apr_device_id *id_table;
};
+typedef struct apr_driver gpr_driver_t;
#define to_apr_driver(d) container_of(d, struct apr_driver, driver)
/*
@@ -113,7 +174,7 @@ void apr_driver_unregister(struct apr_driver *drv);
/**
* module_apr_driver() - Helper macro for registering a aprbus driver
- * @__aprbus_driver: aprbus_driver struct
+ * @__apr_driver: apr_driver struct
*
* Helper macro for aprbus drivers which do not do anything special in
* module init/exit. This eliminates a lot of boilerplate. Each module
@@ -123,7 +184,14 @@ void apr_driver_unregister(struct apr_driver *drv);
#define module_apr_driver(__apr_driver) \
module_driver(__apr_driver, apr_driver_register, \
apr_driver_unregister)
+#define module_gpr_driver(__gpr_driver) module_apr_driver(__gpr_driver)
int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt);
+gpr_port_t *gpr_alloc_port(gpr_device_t *gdev, struct device *dev,
+ gpr_port_cb cb, void *priv);
+void gpr_free_port(gpr_port_t *port);
+int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt);
+int gpr_send_pkt(gpr_device_t *gdev, struct gpr_pkt *pkt);
+
#endif /* __QCOM_APR_H_ */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/soc/qcom/geni-se.h
index 8f385fbe5a0e..0f038a1a0330 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -8,11 +8,24 @@
#include <linux/interconnect.h>
-/* Transfer mode supported by GENI Serial Engines */
+/**
+ * enum geni_se_xfer_mode: Transfer modes supported by Serial Engines
+ *
+ * @GENI_SE_INVALID: Invalid mode
+ * @GENI_SE_FIFO: FIFO mode. Data is transferred with SE FIFO
+ * by programmed IO method
+ * @GENI_SE_DMA: Serial Engine DMA mode. Data is transferred
+ * with SE by DMAengine internal to SE
+ * @GENI_GPI_DMA: GPI DMA mode. Data is transferred using a DMAengine
+ * configured by a firmware residing on a GSI engine. This DMA name is
+ * interchangeably used as GSI or GPI which seem to imply the same DMAengine
+ */
+
enum geni_se_xfer_mode {
GENI_SE_INVALID,
GENI_SE_FIFO,
GENI_SE_DMA,
+ GENI_GPI_DMA,
};
/* Protocols supported by GENI Serial Engines */
@@ -22,6 +35,7 @@ enum geni_se_protocol_type {
GENI_SE_UART,
GENI_SE_I2C,
GENI_SE_I3C,
+ GENI_SE_SPI_SLAVE,
};
struct geni_wrapper;
@@ -47,8 +61,6 @@ struct geni_icc_path {
* @num_clk_levels: Number of valid clock levels in clk_perf_tbl
* @clk_perf_tbl: Table of clock frequency input to serial engine clock
* @icc_paths: Array of ICC paths for SE
- * @opp_table: Pointer to the OPP table
- * @has_opp_table: Specifies if the SE has an OPP table
*/
struct geni_se {
void __iomem *base;
@@ -58,17 +70,18 @@ struct geni_se {
unsigned int num_clk_levels;
unsigned long *clk_perf_tbl;
struct geni_icc_path icc_paths[3];
- struct opp_table *opp_table;
- bool has_opp_table;
};
/* Common SE registers */
#define GENI_FORCE_DEFAULT_REG 0x20
+#define GENI_OUTPUT_CTRL 0x24
#define SE_GENI_STATUS 0x40
#define GENI_SER_M_CLK_CFG 0x48
#define GENI_SER_S_CLK_CFG 0x4c
+#define GENI_IF_DISABLE_RO 0x64
#define GENI_FW_REVISION_RO 0x68
#define SE_GENI_CLK_SEL 0x7c
+#define SE_GENI_CFG_SEQ_START 0x84
#define SE_GENI_DMA_MODE_EN 0x258
#define SE_GENI_M_CMD0 0x600
#define SE_GENI_M_CMD_CTRL_REG 0x604
@@ -93,6 +106,7 @@ struct geni_se {
#define SE_DMA_TX_FSM_RST 0xc58
#define SE_DMA_RX_IRQ_STAT 0xd40
#define SE_DMA_RX_IRQ_CLR 0xd44
+#define SE_DMA_RX_LEN_IN 0xd54
#define SE_DMA_RX_FSM_RST 0xd58
#define SE_HW_PARAM_0 0xe24
#define SE_HW_PARAM_1 0xe28
@@ -100,6 +114,9 @@ struct geni_se {
/* GENI_FORCE_DEFAULT_REG fields */
#define FORCE_DEFAULT BIT(0)
+/* GENI_OUTPUT_CTRL fields */
+#define GENI_IO_MUX_0_EN BIT(0)
+
/* GENI_STATUS fields */
#define M_GENI_CMD_ACTIVE BIT(0)
#define S_GENI_CMD_ACTIVE BIT(12)
@@ -109,6 +126,9 @@ struct geni_se {
#define CLK_DIV_MSK GENMASK(15, 4)
#define CLK_DIV_SHFT 4
+/* GENI_IF_DISABLE_RO fields */
+#define FIFO_IF_DISABLE (BIT(0))
+
/* GENI_FW_REVISION_RO fields */
#define FW_REV_PROTOCOL_MSK GENMASK(15, 8)
#define FW_REV_PROTOCOL_SHFT 8
@@ -116,6 +136,9 @@ struct geni_se {
/* GENI_CLK_SEL fields */
#define CLK_SEL_MSK GENMASK(2, 0)
+/* SE_GENI_CFG_SEQ_START fields */
+#define START_TRIGGER BIT(0)
+
/* SE_GENI_DMA_MODE_EN */
#define GENI_DMA_MODE_EN BIT(0)
@@ -155,6 +178,7 @@ struct geni_se {
#define M_GP_IRQ_3_EN BIT(12)
#define M_GP_IRQ_4_EN BIT(13)
#define M_GP_IRQ_5_EN BIT(14)
+#define M_TX_FIFO_NOT_EMPTY_EN BIT(21)
#define M_IO_DATA_DEASSERT_EN BIT(22)
#define M_IO_DATA_ASSERT_EN BIT(23)
#define M_RX_FIFO_RD_ERR_EN BIT(24)
@@ -222,6 +246,8 @@ struct geni_se {
#define RX_SBE BIT(2)
#define RX_RESET_DONE BIT(3)
#define RX_FLUSH_DONE BIT(4)
+#define RX_DMA_PARITY_ERR BIT(5)
+#define RX_DMA_BREAK GENMASK(8, 7)
#define RX_GENI_GP_IRQ GENMASK(10, 5)
#define RX_GENI_CANCEL_IRQ BIT(11)
#define RX_GENI_GP_IRQ_EXT GENMASK(13, 12)
@@ -229,12 +255,22 @@ struct geni_se {
/* SE_HW_PARAM_0 fields */
#define TX_FIFO_WIDTH_MSK GENMASK(29, 24)
#define TX_FIFO_WIDTH_SHFT 24
+/*
+ * For QUP HW Version >= 3.10 Tx fifo depth support is increased
+ * to 256bytes and corresponding bits are 16 to 23
+ */
+#define TX_FIFO_DEPTH_MSK_256_BYTES GENMASK(23, 16)
#define TX_FIFO_DEPTH_MSK GENMASK(21, 16)
#define TX_FIFO_DEPTH_SHFT 16
/* SE_HW_PARAM_1 fields */
#define RX_FIFO_WIDTH_MSK GENMASK(29, 24)
#define RX_FIFO_WIDTH_SHFT 24
+/*
+ * For QUP HW Version >= 3.10 Rx fifo depth support is increased
+ * to 256bytes and corresponding bits are 16 to 23
+ */
+#define RX_FIFO_DEPTH_MSK_256_BYTES GENMASK(23, 16)
#define RX_FIFO_DEPTH_MSK GENMASK(21, 16)
#define RX_FIFO_DEPTH_SHFT 16
@@ -248,6 +284,9 @@ struct geni_se {
#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
+/* QUP SE VERSION value for major number 2 and minor number 5 */
+#define QUP_SE_VERSION_2_5 0x20050000
+
/*
* Define bandwidth thresholds that cause the underlying Core 2X interconnect
* clock to run at the named frequency. These baseline values are recommended
@@ -296,7 +335,7 @@ static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params)
u32 m_cmd;
m_cmd = (cmd << M_OPCODE_SHFT) | (params & M_PARAMS_MSK);
- writel_relaxed(m_cmd, se->base + SE_GENI_M_CMD0);
+ writel(m_cmd, se->base + SE_GENI_M_CMD0);
}
/**
@@ -316,7 +355,7 @@ static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params)
s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK);
s_cmd |= (cmd << S_OPCODE_SHFT);
s_cmd |= (params & S_PARAMS_MSK);
- writel_relaxed(s_cmd, se->base + SE_GENI_S_CMD0);
+ writel(s_cmd, se->base + SE_GENI_S_CMD0);
}
/**
@@ -372,7 +411,8 @@ static inline void geni_se_abort_s_cmd(struct geni_se *se)
/**
* geni_se_get_tx_fifo_depth() - Get the TX fifo depth of the serial engine
- * @se: Pointer to the concerned serial engine.
+ * based on QUP HW version
+ * @se: Pointer to the concerned serial engine.
*
* This function is used to get the depth i.e. number of elements in the
* TX fifo of the serial engine.
@@ -381,11 +421,20 @@ static inline void geni_se_abort_s_cmd(struct geni_se *se)
*/
static inline u32 geni_se_get_tx_fifo_depth(struct geni_se *se)
{
- u32 val;
+ u32 val, hw_version, hw_major, hw_minor, tx_fifo_depth_mask;
+
+ hw_version = geni_se_get_qup_hw_version(se);
+ hw_major = GENI_SE_VERSION_MAJOR(hw_version);
+ hw_minor = GENI_SE_VERSION_MINOR(hw_version);
+
+ if ((hw_major == 3 && hw_minor >= 10) || hw_major > 3)
+ tx_fifo_depth_mask = TX_FIFO_DEPTH_MSK_256_BYTES;
+ else
+ tx_fifo_depth_mask = TX_FIFO_DEPTH_MSK;
val = readl_relaxed(se->base + SE_HW_PARAM_0);
- return (val & TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT;
+ return (val & tx_fifo_depth_mask) >> TX_FIFO_DEPTH_SHFT;
}
/**
@@ -408,7 +457,8 @@ static inline u32 geni_se_get_tx_fifo_width(struct geni_se *se)
/**
* geni_se_get_rx_fifo_depth() - Get the RX fifo depth of the serial engine
- * @se: Pointer to the concerned serial engine.
+ * based on QUP HW version
+ * @se: Pointer to the concerned serial engine.
*
* This function is used to get the depth i.e. number of elements in the
* RX fifo of the serial engine.
@@ -417,11 +467,20 @@ static inline u32 geni_se_get_tx_fifo_width(struct geni_se *se)
*/
static inline u32 geni_se_get_rx_fifo_depth(struct geni_se *se)
{
- u32 val;
+ u32 val, hw_version, hw_major, hw_minor, rx_fifo_depth_mask;
+
+ hw_version = geni_se_get_qup_hw_version(se);
+ hw_major = GENI_SE_VERSION_MAJOR(hw_version);
+ hw_minor = GENI_SE_VERSION_MINOR(hw_version);
+
+ if ((hw_major == 3 && hw_minor >= 10) || hw_major > 3)
+ rx_fifo_depth_mask = RX_FIFO_DEPTH_MSK_256_BYTES;
+ else
+ rx_fifo_depth_mask = RX_FIFO_DEPTH_MSK;
val = readl_relaxed(se->base + SE_HW_PARAM_1);
- return (val & RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT;
+ return (val & rx_fifo_depth_mask) >> RX_FIFO_DEPTH_SHFT;
}
void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr);
@@ -441,9 +500,13 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
unsigned int *index, unsigned long *res_freq,
bool exact);
+void geni_se_tx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len);
+
int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
dma_addr_t *iova);
+void geni_se_rx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len);
+
int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
dma_addr_t *iova);
@@ -459,7 +522,5 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
int geni_icc_enable(struct geni_se *se);
int geni_icc_disable(struct geni_se *se);
-
-void geni_remove_earlycon_icc_vote(void);
#endif
#endif
diff --git a/include/linux/soc/qcom/irq.h b/include/linux/soc/qcom/irq.h
index 9e1ece58e55b..72b9231e9fdd 100644
--- a/include/linux/soc/qcom/irq.h
+++ b/include/linux/soc/qcom/irq.h
@@ -7,7 +7,7 @@
#define GPIO_NO_WAKE_IRQ ~0U
-/**
+/*
* QCOM specific IRQ domain flags that distinguishes the handling of wakeup
* capable interrupts by different interrupt controllers.
*
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 90b864655822..1a886666bbb6 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -16,6 +16,7 @@
#define LLCC_AUDIO 6
#define LLCC_MDMHPGRW 7
#define LLCC_MDM 8
+#define LLCC_MODHW 9
#define LLCC_CMPT 10
#define LLCC_GPUHTW 11
#define LLCC_GPU 12
@@ -26,9 +27,37 @@
#define LLCC_MDMHPFX 20
#define LLCC_MDMPNG 21
#define LLCC_AUDHW 22
+#define LLCC_NPU 23
+#define LLCC_WLHW 24
+#define LLCC_PIMEM 25
+#define LLCC_ECC 26
+#define LLCC_CVP 28
+#define LLCC_MODPE 29
+#define LLCC_APTCM 30
+#define LLCC_WRCACHE 31
+#define LLCC_CVPFW 32
+#define LLCC_CPUSS1 33
+#define LLCC_CAMEXP0 34
+#define LLCC_CPUMTE 35
+#define LLCC_CPUHWT 36
+#define LLCC_MDMCLAD2 37
+#define LLCC_CAMEXP1 38
+#define LLCC_CMPTHCP 39
+#define LLCC_LCPDARE 40
+#define LLCC_AENPU 45
+#define LLCC_ISLAND1 46
+#define LLCC_ISLAND2 47
+#define LLCC_ISLAND3 48
+#define LLCC_ISLAND4 49
+#define LLCC_CAMEXP2 50
+#define LLCC_CAMEXP3 51
+#define LLCC_CAMEXP4 52
+#define LLCC_DISP_WB 53
+#define LLCC_DISP_1 54
+#define LLCC_VIDVSP 64
/**
- * llcc_slice_desc - Cache slice descriptor
+ * struct llcc_slice_desc - Cache slice descriptor
* @slice_id: llcc slice id
* @slice_size: Size allocated for the llcc slice
*/
@@ -38,11 +67,8 @@ struct llcc_slice_desc {
};
/**
- * llcc_edac_reg_data - llcc edac registers data for each error type
+ * struct llcc_edac_reg_data - llcc edac registers data for each error type
* @name: Name of the error
- * @synd_reg: Syndrome register address
- * @count_status_reg: Status register address to read the error count
- * @ways_status_reg: Status register address to read the error ways
* @reg_cnt: Number of registers
* @count_mask: Mask value to get the error count
* @ways_mask: Mask value to get the error ways
@@ -51,9 +77,6 @@ struct llcc_slice_desc {
*/
struct llcc_edac_reg_data {
char *name;
- u64 synd_reg;
- u64 count_status_reg;
- u64 ways_status_reg;
u32 reg_cnt;
u32 count_mask;
u32 ways_mask;
@@ -61,30 +84,60 @@ struct llcc_edac_reg_data {
u8 ways_shift;
};
+struct llcc_edac_reg_offset {
+ /* LLCC TRP registers */
+ u32 trp_ecc_error_status0;
+ u32 trp_ecc_error_status1;
+ u32 trp_ecc_sb_err_syn0;
+ u32 trp_ecc_db_err_syn0;
+ u32 trp_ecc_error_cntr_clear;
+ u32 trp_interrupt_0_status;
+ u32 trp_interrupt_0_clear;
+ u32 trp_interrupt_0_enable;
+
+ /* LLCC Common registers */
+ u32 cmn_status0;
+ u32 cmn_interrupt_0_enable;
+ u32 cmn_interrupt_2_enable;
+
+ /* LLCC DRP registers */
+ u32 drp_ecc_error_cfg;
+ u32 drp_ecc_error_cntr_clear;
+ u32 drp_interrupt_status;
+ u32 drp_interrupt_clear;
+ u32 drp_interrupt_enable;
+ u32 drp_ecc_error_status0;
+ u32 drp_ecc_error_status1;
+ u32 drp_ecc_sb_err_syn0;
+ u32 drp_ecc_db_err_syn0;
+};
+
/**
- * llcc_drv_data - Data associated with the llcc driver
- * @regmap: regmap associated with the llcc device
+ * struct llcc_drv_data - Data associated with the llcc driver
+ * @regmaps: regmaps associated with the llcc device
* @bcast_regmap: regmap associated with llcc broadcast offset
* @cfg: pointer to the data structure for slice configuration
+ * @edac_reg_offset: Offset of the LLCC EDAC registers
* @lock: mutex associated with each slice
* @cfg_size: size of the config data table
* @max_slices: max slices as read from device tree
* @num_banks: Number of llcc banks
* @bitmap: Bit map to track the active slice ids
- * @offsets: Pointer to the bank offsets array
* @ecc_irq: interrupt for llcc cache error detection and reporting
+ * @version: Indicates the LLCC version
*/
struct llcc_drv_data {
- struct regmap *regmap;
+ struct regmap **regmaps;
struct regmap *bcast_regmap;
const struct llcc_slice_config *cfg;
+ const struct llcc_edac_reg_offset *edac_reg_offset;
struct mutex lock;
u32 cfg_size;
u32 max_slices;
u32 num_banks;
unsigned long *bitmap;
- u32 *offsets;
int ecc_irq;
+ u32 version;
};
#if IS_ENABLED(CONFIG_QCOM_LLCC)
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index e600baec6825..9e8e60421192 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -10,8 +10,14 @@
struct device;
struct firmware;
+struct qcom_scm_pas_metadata;
+
+#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
ssize_t qcom_mdt_get_size(const struct firmware *fw);
+int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, phys_addr_t mem_phys,
+ struct qcom_scm_pas_metadata *pas_metadata_ctx);
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
@@ -21,6 +27,48 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
-void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len);
+void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
+ const char *fw_name, struct device *dev);
+
+#else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
+
+static inline ssize_t qcom_mdt_get_size(const struct firmware *fw)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, phys_addr_t mem_phys,
+ struct qcom_scm_pas_metadata *pas_metadata_ctx)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size, phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_load_no_init(struct device *dev,
+ const struct firmware *fw,
+ const char *fw_name, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size,
+ phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
+static inline void *qcom_mdt_read_metadata(const struct firmware *fw,
+ size_t *data_len, const char *fw_name,
+ struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#endif /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
#endif
diff --git a/include/linux/soc/qcom/pmic_glink.h b/include/linux/soc/qcom/pmic_glink.h
new file mode 100644
index 000000000000..fd124aa18c81
--- /dev/null
+++ b/include/linux/soc/qcom/pmic_glink.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Linaro Ltd
+ */
+#ifndef __SOC_QCOM_PMIC_GLINK_H__
+#define __SOC_QCOM_PMIC_GLINK_H__
+
+struct pmic_glink;
+struct pmic_glink_client;
+
+#define PMIC_GLINK_OWNER_BATTMGR 32778
+#define PMIC_GLINK_OWNER_USBC 32779
+#define PMIC_GLINK_OWNER_USBC_PAN 32780
+
+#define PMIC_GLINK_REQ_RESP 1
+#define PMIC_GLINK_NOTIFY 2
+
+struct pmic_glink_hdr {
+ __le32 owner;
+ __le32 type;
+ __le32 opcode;
+};
+
+int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len);
+
+struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv);
+
+#endif
diff --git a/include/linux/soc/qcom/qcom-pbs.h b/include/linux/soc/qcom/qcom-pbs.h
new file mode 100644
index 000000000000..8a46209ccf13
--- /dev/null
+++ b/include/linux/soc/qcom/qcom-pbs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _QCOM_PBS_H
+#define _QCOM_PBS_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device_node;
+struct pbs_dev;
+
+#if IS_ENABLED(CONFIG_QCOM_PBS)
+int qcom_pbs_trigger_event(struct pbs_dev *pbs, u8 bitmap);
+struct pbs_dev *get_pbs_client_device(struct device *client_dev);
+#else
+static inline int qcom_pbs_trigger_event(struct pbs_dev *pbs, u8 bitmap)
+{
+ return -ENODEV;
+}
+
+static inline struct pbs_dev *get_pbs_client_device(struct device *client_dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#endif
diff --git a/include/linux/soc/qcom/qcom_aoss.h b/include/linux/soc/qcom/qcom_aoss.h
new file mode 100644
index 000000000000..7361ca028752
--- /dev/null
+++ b/include/linux/soc/qcom/qcom_aoss.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCOM_AOSS_H__
+#define __QCOM_AOSS_H__
+
+#include <linux/err.h>
+#include <linux/device.h>
+
+struct qmp;
+
+#if IS_ENABLED(CONFIG_QCOM_AOSS_QMP)
+
+int qmp_send(struct qmp *qmp, const char *fmt, ...);
+struct qmp *qmp_get(struct device *dev);
+void qmp_put(struct qmp *qmp);
+
+#else
+
+static inline int qmp_send(struct qmp *qmp, const char *fmt, ...)
+{
+ return -ENODEV;
+}
+
+static inline struct qmp *qmp_get(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void qmp_put(struct qmp *qmp)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index e712f94b89fc..469e02d2aa0d 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -16,7 +16,7 @@
struct socket;
/**
- * qmi_header - wireformat header of QMI messages
+ * struct qmi_header - wireformat header of QMI messages
* @type: type of message
* @txn_id: transaction id
* @msg_id: message id
@@ -75,7 +75,7 @@ struct qmi_elem_info {
enum qmi_array_type array_type;
u8 tlv_type;
u32 offset;
- struct qmi_elem_info *ei_array;
+ const struct qmi_elem_info *ei_array;
};
#define QMI_RESULT_SUCCESS_V01 0
@@ -93,7 +93,7 @@ struct qmi_elem_info {
#define QMI_ERR_NOT_SUPPORTED_V01 94
/**
- * qmi_response_type_v01 - common response header (decoded)
+ * struct qmi_response_type_v01 - common response header (decoded)
* @result: result of the transaction
* @error: error value, when @result is QMI_RESULT_FAILURE_V01
*/
@@ -102,7 +102,7 @@ struct qmi_response_type_v01 {
u16 error;
};
-extern struct qmi_elem_info qmi_response_type_v01_ei[];
+extern const struct qmi_elem_info qmi_response_type_v01_ei[];
/**
* struct qmi_service - context to track lookup-results
@@ -173,7 +173,7 @@ struct qmi_txn {
struct completion completion;
int result;
- struct qmi_elem_info *ei;
+ const struct qmi_elem_info *ei;
void *dest;
};
@@ -189,7 +189,7 @@ struct qmi_msg_handler {
unsigned int type;
unsigned int msg_id;
- struct qmi_elem_info *ei;
+ const struct qmi_elem_info *ei;
size_t decoded_size;
void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
@@ -249,23 +249,23 @@ void qmi_handle_release(struct qmi_handle *qmi);
ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct);
+ const struct qmi_elem_info *ei, const void *c_struct);
ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct);
+ const struct qmi_elem_info *ei, const void *c_struct);
ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
- int msg_id, size_t len, struct qmi_elem_info *ei,
+ int msg_id, size_t len, const struct qmi_elem_info *ei,
const void *c_struct);
void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
- unsigned int txn_id, struct qmi_elem_info *ei,
+ unsigned int txn_id, const struct qmi_elem_info *ei,
const void *c_struct);
int qmi_decode_message(const void *buf, size_t len,
- struct qmi_elem_info *ei, void *c_struct);
+ const struct qmi_elem_info *ei, void *c_struct);
int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
- struct qmi_elem_info *ei, void *c_struct);
+ const struct qmi_elem_info *ei, void *c_struct);
int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout);
void qmi_txn_cancel(struct qmi_txn *txn);
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index da304ce8c8f7..8190878645f9 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -2,10 +2,13 @@
#ifndef __QCOM_SMD_RPM_H__
#define __QCOM_SMD_RPM_H__
+#include <linux/types.h>
+
struct qcom_smd_rpm;
-#define QCOM_SMD_RPM_ACTIVE_STATE 0
-#define QCOM_SMD_RPM_SLEEP_STATE 1
+#define QCOM_SMD_RPM_ACTIVE_STATE 0
+#define QCOM_SMD_RPM_SLEEP_STATE 1
+#define QCOM_SMD_RPM_STATE_NUM 2
/*
* Constants used for addressing resources in the RPM.
@@ -19,20 +22,43 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63
#define QCOM_SMD_RPM_LDOA 0x616f646c
#define QCOM_SMD_RPM_LDOB 0x626F646C
+#define QCOM_SMD_RPM_LDOE 0x656f646c
+#define QCOM_SMD_RPM_RWCX 0x78637772
+#define QCOM_SMD_RPM_RWMX 0x786d7772
+#define QCOM_SMD_RPM_RWLC 0x636c7772
+#define QCOM_SMD_RPM_RWLM 0x6d6c7772
#define QCOM_SMD_RPM_MEM_CLK 0x326b6c63
#define QCOM_SMD_RPM_MISC_CLK 0x306b6c63
#define QCOM_SMD_RPM_NCPA 0x6170636E
#define QCOM_SMD_RPM_NCPB 0x6270636E
#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f
#define QCOM_SMD_RPM_QPIC_CLK 0x63697071
+#define QCOM_SMD_RPM_QUP_CLK 0x707571
#define QCOM_SMD_RPM_SMPA 0x61706d73
#define QCOM_SMD_RPM_SMPB 0x62706d73
+#define QCOM_SMD_RPM_SMPE 0x65706d73
#define QCOM_SMD_RPM_SPDM 0x63707362
#define QCOM_SMD_RPM_VSA 0x00617376
#define QCOM_SMD_RPM_MMAXI_CLK 0x69786d6d
#define QCOM_SMD_RPM_IPA_CLK 0x617069
#define QCOM_SMD_RPM_CE_CLK 0x6563
#define QCOM_SMD_RPM_AGGR_CLK 0x72676761
+#define QCOM_SMD_RPM_HWKM_CLK 0x6d6b7768
+#define QCOM_SMD_RPM_PKA_CLK 0x616b70
+#define QCOM_SMD_RPM_MCFG_CLK 0x6766636d
+
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
int state,
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
index 86e1b358688a..a36a3b9d4929 100644
--- a/include/linux/soc/qcom/smem.h
+++ b/include/linux/soc/qcom/smem.h
@@ -4,6 +4,7 @@
#define QCOM_SMEM_HOST_ANY -1
+bool qcom_smem_is_available(void);
int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
void *qcom_smem_get(unsigned host, unsigned item, size_t *size);
@@ -11,4 +12,6 @@ int qcom_smem_get_free_space(unsigned host);
phys_addr_t qcom_smem_virt_to_phys(void *p);
+int qcom_smem_get_soc_id(u32 *id);
+
#endif
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index 63ad8cddad14..652c0158baac 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -14,6 +14,7 @@ struct qcom_smem_state_ops {
#ifdef CONFIG_QCOM_SMEM_STATE
struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
+struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
void qcom_smem_state_put(struct qcom_smem_state *);
int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value);
@@ -29,6 +30,13 @@ static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
return ERR_PTR(-EINVAL);
}
+static inline struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline void qcom_smem_state_put(struct qcom_smem_state *state)
{
}
diff --git a/include/linux/soc/qcom/socinfo.h b/include/linux/soc/qcom/socinfo.h
new file mode 100644
index 000000000000..e78777bb0f4a
--- /dev/null
+++ b/include/linux/soc/qcom/socinfo.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_SOCINFO_H__
+#define __QCOM_SOCINFO_H__
+
+/*
+ * SMEM item id, used to acquire handles to respective
+ * SMEM region.
+ */
+#define SMEM_HW_SW_BUILD_ID 137
+
+#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
+#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
+
+/* Socinfo SMEM item structure */
+struct socinfo {
+ __le32 fmt;
+ __le32 id;
+ __le32 ver;
+ char build_id[SMEM_SOCINFO_BUILD_ID_LENGTH];
+ /* Version 2 */
+ __le32 raw_id;
+ __le32 raw_ver;
+ /* Version 3 */
+ __le32 hw_plat;
+ /* Version 4 */
+ __le32 plat_ver;
+ /* Version 5 */
+ __le32 accessory_chip;
+ /* Version 6 */
+ __le32 hw_plat_subtype;
+ /* Version 7 */
+ __le32 pmic_model;
+ __le32 pmic_die_rev;
+ /* Version 8 */
+ __le32 pmic_model_1;
+ __le32 pmic_die_rev_1;
+ __le32 pmic_model_2;
+ __le32 pmic_die_rev_2;
+ /* Version 9 */
+ __le32 foundry_id;
+ /* Version 10 */
+ __le32 serial_num;
+ /* Version 11 */
+ __le32 num_pmics;
+ __le32 pmic_array_offset;
+ /* Version 12 */
+ __le32 chip_family;
+ __le32 raw_device_family;
+ __le32 raw_device_num;
+ /* Version 13 */
+ __le32 nproduct_id;
+ char chip_id[SMEM_SOCINFO_CHIP_ID_LENGTH];
+ /* Version 14 */
+ __le32 num_clusters;
+ __le32 ncluster_array_offset;
+ __le32 num_subset_parts;
+ __le32 nsubset_parts_array_offset;
+ /* Version 15 */
+ __le32 nmodem_supported;
+ /* Version 16 */
+ __le32 feature_code;
+ __le32 pcode;
+ __le32 npartnamemap_offset;
+ __le32 nnum_partname_mapping;
+ /* Version 17 */
+ __le32 oem_variant;
+ /* Version 18 */
+ __le32 num_kvps;
+ __le32 kvps_offset;
+ /* Version 19 */
+ __le32 num_func_clusters;
+ __le32 boot_cluster;
+ __le32 boot_core;
+};
+
+#endif
diff --git a/include/linux/soc/renesas/r9a06g032-sysctrl.h b/include/linux/soc/renesas/r9a06g032-sysctrl.h
new file mode 100644
index 000000000000..066dfb15cbdd
--- /dev/null
+++ b/include/linux/soc/renesas/r9a06g032-sysctrl.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
+#define __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
+
+#ifdef CONFIG_CLK_R9A06G032
+int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val);
+#else
+static inline int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) { return -ENODEV; }
+#endif
+
+#endif /* __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ */
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h
index 7899a5b8c247..1f1fe8bfaa76 100644
--- a/include/linux/soc/renesas/rcar-rst.h
+++ b/include/linux/soc/renesas/rcar-rst.h
@@ -4,8 +4,10 @@
#ifdef CONFIG_RST_RCAR
int rcar_rst_read_mode_pins(u32 *mode);
+int rcar_rst_set_rproc_boot_addr(u64 boot_addr);
#else
static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; }
+static inline int rcar_rst_set_rproc_boot_addr(u64 boot_addr) { return -ENODEV; }
#endif
#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */
diff --git a/include/linux/soc/samsung/exynos-chipid.h b/include/linux/soc/samsung/exynos-chipid.h
index 8bca6763f99c..62f0e2531068 100644
--- a/include/linux/soc/samsung/exynos-chipid.h
+++ b/include/linux/soc/samsung/exynos-chipid.h
@@ -9,10 +9,8 @@
#define __LINUX_SOC_EXYNOS_CHIPID_H
#define EXYNOS_CHIPID_REG_PRO_ID 0x00
-#define EXYNOS_SUBREV_MASK (0xf << 4)
-#define EXYNOS_MAINREV_MASK (0xf << 0)
-#define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \
- EXYNOS_MAINREV_MASK)
+#define EXYNOS_REV_PART_MASK 0xf
+#define EXYNOS_REV_PART_SHIFT 4
#define EXYNOS_MASK 0xfffff000
#define EXYNOS_CHIPID_REG_PKG_ID 0x04
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
index a4f5516cc956..2bd9d12d9a52 100644
--- a/include/linux/soc/samsung/exynos-pmu.h
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -10,6 +10,7 @@
#define __LINUX_SOC_EXYNOS_PMU_H
struct regmap;
+struct device_node;
enum sys_powerdown {
SYS_AFTR,
@@ -20,12 +21,20 @@ enum sys_powerdown {
extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
#ifdef CONFIG_EXYNOS_PMU
-extern struct regmap *exynos_get_pmu_regmap(void);
+struct regmap *exynos_get_pmu_regmap(void);
+struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
+ const char *propname);
#else
static inline struct regmap *exynos_get_pmu_regmap(void)
{
return ERR_PTR(-ENODEV);
}
+
+static inline struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
+ const char *propname)
+{
+ return ERR_PTR(-ENODEV);
+}
#endif
#endif /* __LINUX_SOC_EXYNOS_PMU_H */
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index fc9250fb3133..aa840ed043e1 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -611,12 +611,6 @@
#define EXYNOS5420_FSYS2_OPTION 0x4168
#define EXYNOS5420_PSGEN_OPTION 0x4188
-/* For EXYNOS_CENTRAL_SEQ_OPTION */
-#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16)
-#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17)
-#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24)
-#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25)
-
#define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4)
#define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5)
#define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6)
diff --git a/include/linux/soc/samsung/s3c-pm.h b/include/linux/soc/samsung/s3c-pm.h
new file mode 100644
index 000000000000..5b23d85d20ab
--- /dev/null
+++ b/include/linux/soc/samsung/s3c-pm.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Tomasz Figa <t.figa@samsung.com>
+ * Copyright (c) 2004 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Written by Ben Dooks, <ben@simtec.co.uk>
+ */
+
+#ifndef __LINUX_SOC_SAMSUNG_S3C_PM_H
+#define __LINUX_SOC_SAMSUNG_S3C_PM_H __FILE__
+
+#include <linux/types.h>
+
+/* PM debug functions */
+
+#define S3C_PMDBG(fmt...) pr_debug(fmt)
+
+static inline void s3c_pm_save_uarts(bool is_s3c24xx) { }
+static inline void s3c_pm_restore_uarts(bool is_s3c24xx) { }
+
+/* suspend memory checking */
+
+#ifdef CONFIG_SAMSUNG_PM_CHECK
+extern void s3c_pm_check_prepare(void);
+extern void s3c_pm_check_restore(void);
+extern void s3c_pm_check_cleanup(void);
+extern void s3c_pm_check_store(void);
+#else
+#define s3c_pm_check_prepare() do { } while (0)
+#define s3c_pm_check_restore() do { } while (0)
+#define s3c_pm_check_cleanup() do { } while (0)
+#define s3c_pm_check_store() do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/soc/sunxi/sunxi_sram.h b/include/linux/soc/sunxi/sunxi_sram.h
index c5f663bba9c2..60e274d1b821 100644
--- a/include/linux/soc/sunxi/sunxi_sram.h
+++ b/include/linux/soc/sunxi/sunxi_sram.h
@@ -14,6 +14,6 @@
#define _SUNXI_SRAM_H_
int sunxi_sram_claim(struct device *dev);
-int sunxi_sram_release(struct device *dev);
+void sunxi_sram_release(struct device *dev);
#endif /* _SUNXI_SRAM_H_ */
diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h
index 5a472eca5ee4..39b022b92598 100644
--- a/include/linux/soc/ti/k3-ringacc.h
+++ b/include/linux/soc/ti/k3-ringacc.h
@@ -67,6 +67,10 @@ struct k3_ring;
* few times. It's usable when the same ring is used as Free Host PD ring
* for different flows, for example.
* Note: Locking should be done by consumer if required
+ * @dma_dev: Master device which is using and accessing to the ring
+ * memory when the mode is K3_RINGACC_RING_MODE_RING. Memory allocations
+ * should be done using this device.
+ * @asel: Address Space Select value for physical addresses
*/
struct k3_ring_cfg {
u32 size;
@@ -74,6 +78,9 @@ struct k3_ring_cfg {
enum k3_ring_mode mode;
#define K3_RINGACC_RING_SHARED BIT(1)
u32 flags;
+
+ struct device *dma_dev;
+ u32 asel;
};
#define K3_RINGACC_RING_ID_ANY (-1)
@@ -245,4 +252,19 @@ int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem);
u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring);
+/* DMA ring support */
+struct ti_sci_handle;
+
+/**
+ * struct struct k3_ringacc_init_data - Initialization data for DMA rings
+ */
+struct k3_ringacc_init_data {
+ const struct ti_sci_handle *tisci;
+ u32 tisci_dev_id;
+ u32 num_rings;
+};
+
+struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
+ struct k3_ringacc_init_data *data);
+
#endif /* __SOC_TI_K3_RINGACC_API_H_ */
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 7127ec301537..18d806a8e52c 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Texas Instruments Incorporated
* Authors: Sandeep Nair <sandeep_n@ti.com
* Cyril Chemparathy <cyril@ti.com
Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
index c75ef99c99ca..175f466ebcc3 100644
--- a/include/linux/soc/ti/knav_qmss.h
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Keystone Navigator Queue Management Sub-System header
*
@@ -5,15 +6,6 @@
* Author: Sandeep Nair <sandeep_n@ti.com>
* Cyril Chemparathy <cyril@ti.com>
* Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __SOC_TI_KNAV_QMSS_H__
diff --git a/include/linux/soc/ti/omap1-io.h b/include/linux/soc/ti/omap1-io.h
new file mode 100644
index 000000000000..9a60f45899d3
--- /dev/null
+++ b/include/linux/soc/ti/omap1-io.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_ARCH_OMAP_IO_H
+#define __ASM_ARCH_OMAP_IO_H
+
+#ifndef __ASSEMBLER__
+#include <linux/types.h>
+
+#ifdef CONFIG_ARCH_OMAP1
+/*
+ * NOTE: Please use ioremap + __raw_read/write where possible instead of these
+ */
+extern u8 omap_readb(u32 pa);
+extern u16 omap_readw(u32 pa);
+extern u32 omap_readl(u32 pa);
+extern void omap_writeb(u8 v, u32 pa);
+extern void omap_writew(u16 v, u32 pa);
+extern void omap_writel(u32 v, u32 pa);
+#elif defined(CONFIG_COMPILE_TEST)
+static inline u8 omap_readb(u32 pa) { return 0; }
+static inline u16 omap_readw(u32 pa) { return 0; }
+static inline u32 omap_readl(u32 pa) { return 0; }
+static inline void omap_writeb(u8 v, u32 pa) { }
+static inline void omap_writew(u16 v, u32 pa) { }
+static inline void omap_writel(u32 v, u32 pa) { }
+#endif
+#endif
+
+/*
+ * ----------------------------------------------------------------------------
+ * System control registers
+ * ----------------------------------------------------------------------------
+ */
+#define MOD_CONF_CTRL_0 0xfffe1080
+#define MOD_CONF_CTRL_1 0xfffe1110
+
+/*
+ * ---------------------------------------------------------------------------
+ * UPLD
+ * ---------------------------------------------------------------------------
+ */
+#define ULPD_REG_BASE (0xfffe0800)
+#define ULPD_IT_STATUS (ULPD_REG_BASE + 0x14)
+#define ULPD_SETUP_ANALOG_CELL_3 (ULPD_REG_BASE + 0x24)
+#define ULPD_CLOCK_CTRL (ULPD_REG_BASE + 0x30)
+# define DIS_USB_PVCI_CLK (1 << 5) /* no USB/FAC synch */
+# define USB_MCLK_EN (1 << 4) /* enable W4_USB_CLKO */
+#define ULPD_SOFT_REQ (ULPD_REG_BASE + 0x34)
+# define SOFT_UDC_REQ (1 << 4)
+# define SOFT_USB_CLK_REQ (1 << 3)
+# define SOFT_DPLL_REQ (1 << 0)
+#define ULPD_DPLL_CTRL (ULPD_REG_BASE + 0x3c)
+#define ULPD_STATUS_REQ (ULPD_REG_BASE + 0x40)
+#define ULPD_APLL_CTRL (ULPD_REG_BASE + 0x4c)
+#define ULPD_POWER_CTRL (ULPD_REG_BASE + 0x50)
+#define ULPD_SOFT_DISABLE_REQ_REG (ULPD_REG_BASE + 0x68)
+# define DIS_MMC2_DPLL_REQ (1 << 11)
+# define DIS_MMC1_DPLL_REQ (1 << 10)
+# define DIS_UART3_DPLL_REQ (1 << 9)
+# define DIS_UART2_DPLL_REQ (1 << 8)
+# define DIS_UART1_DPLL_REQ (1 << 7)
+# define DIS_USB_HOST_DPLL_REQ (1 << 6)
+#define ULPD_SDW_CLK_DIV_CTRL_SEL (ULPD_REG_BASE + 0x74)
+#define ULPD_CAM_CLK_CTRL (ULPD_REG_BASE + 0x7c)
+
+/*
+ * ----------------------------------------------------------------------------
+ * Clocks
+ * ----------------------------------------------------------------------------
+ */
+#define CLKGEN_REG_BASE (0xfffece00)
+#define ARM_CKCTL (CLKGEN_REG_BASE + 0x0)
+#define ARM_IDLECT1 (CLKGEN_REG_BASE + 0x4)
+#define ARM_IDLECT2 (CLKGEN_REG_BASE + 0x8)
+#define ARM_EWUPCT (CLKGEN_REG_BASE + 0xC)
+#define ARM_RSTCT1 (CLKGEN_REG_BASE + 0x10)
+#define ARM_RSTCT2 (CLKGEN_REG_BASE + 0x14)
+#define ARM_SYSST (CLKGEN_REG_BASE + 0x18)
+#define ARM_IDLECT3 (CLKGEN_REG_BASE + 0x24)
+
+#define CK_RATEF 1
+#define CK_IDLEF 2
+#define CK_ENABLEF 4
+#define CK_SELECTF 8
+#define SETARM_IDLE_SHIFT
+
+/* DPLL control registers */
+#define DPLL_CTL (0xfffecf00)
+
+/* DSP clock control. Must use __raw_readw() and __raw_writew() with these */
+#define DSP_CONFIG_REG_BASE IOMEM(0xe1008000)
+#define DSP_CKCTL (DSP_CONFIG_REG_BASE + 0x0)
+#define DSP_IDLECT1 (DSP_CONFIG_REG_BASE + 0x4)
+#define DSP_IDLECT2 (DSP_CONFIG_REG_BASE + 0x8)
+#define DSP_RSTCT2 (DSP_CONFIG_REG_BASE + 0x14)
+
+/*
+ * ----------------------------------------------------------------------------
+ * Pulse-Width Light
+ * ----------------------------------------------------------------------------
+ */
+#define OMAP_PWL_BASE 0xfffb5800
+#define OMAP_PWL_ENABLE (OMAP_PWL_BASE + 0x00)
+#define OMAP_PWL_CLK_ENABLE (OMAP_PWL_BASE + 0x04)
+
+/*
+ * ----------------------------------------------------------------------------
+ * Pin multiplexing registers
+ * ----------------------------------------------------------------------------
+ */
+#define FUNC_MUX_CTRL_0 0xfffe1000
+#define FUNC_MUX_CTRL_1 0xfffe1004
+#define FUNC_MUX_CTRL_2 0xfffe1008
+#define COMP_MODE_CTRL_0 0xfffe100c
+#define FUNC_MUX_CTRL_3 0xfffe1010
+#define FUNC_MUX_CTRL_4 0xfffe1014
+#define FUNC_MUX_CTRL_5 0xfffe1018
+#define FUNC_MUX_CTRL_6 0xfffe101C
+#define FUNC_MUX_CTRL_7 0xfffe1020
+#define FUNC_MUX_CTRL_8 0xfffe1024
+#define FUNC_MUX_CTRL_9 0xfffe1028
+#define FUNC_MUX_CTRL_A 0xfffe102C
+#define FUNC_MUX_CTRL_B 0xfffe1030
+#define FUNC_MUX_CTRL_C 0xfffe1034
+#define FUNC_MUX_CTRL_D 0xfffe1038
+#define PULL_DWN_CTRL_0 0xfffe1040
+#define PULL_DWN_CTRL_1 0xfffe1044
+#define PULL_DWN_CTRL_2 0xfffe1048
+#define PULL_DWN_CTRL_3 0xfffe104c
+#define PULL_DWN_CTRL_4 0xfffe10ac
+
+/* OMAP-1610 specific multiplexing registers */
+#define FUNC_MUX_CTRL_E 0xfffe1090
+#define FUNC_MUX_CTRL_F 0xfffe1094
+#define FUNC_MUX_CTRL_10 0xfffe1098
+#define FUNC_MUX_CTRL_11 0xfffe109c
+#define FUNC_MUX_CTRL_12 0xfffe10a0
+#define PU_PD_SEL_0 0xfffe10b4
+#define PU_PD_SEL_1 0xfffe10b8
+#define PU_PD_SEL_2 0xfffe10bc
+#define PU_PD_SEL_3 0xfffe10c0
+#define PU_PD_SEL_4 0xfffe10c4
+
+#endif
diff --git a/include/linux/soc/ti/omap1-mux.h b/include/linux/soc/ti/omap1-mux.h
new file mode 100644
index 000000000000..59c239b5569c
--- /dev/null
+++ b/include/linux/soc/ti/omap1-mux.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __SOC_TI_OMAP1_MUX_H
+#define __SOC_TI_OMAP1_MUX_H
+/*
+ * This should not really be a global header, it reflects the
+ * traditional way that omap1 does pin muxing without the
+ * pinctrl subsystem.
+ */
+
+enum omap7xx_index {
+ /* OMAP 730 keyboard */
+ E2_7XX_KBR0,
+ J7_7XX_KBR1,
+ E1_7XX_KBR2,
+ F3_7XX_KBR3,
+ D2_7XX_KBR4,
+ C2_7XX_KBC0,
+ D3_7XX_KBC1,
+ E4_7XX_KBC2,
+ F4_7XX_KBC3,
+ E3_7XX_KBC4,
+
+ /* USB */
+ AA17_7XX_USB_DM,
+ W16_7XX_USB_PU_EN,
+ W17_7XX_USB_VBUSI,
+ W18_7XX_USB_DMCK_OUT,
+ W19_7XX_USB_DCRST,
+
+ /* MMC */
+ MMC_7XX_CMD,
+ MMC_7XX_CLK,
+ MMC_7XX_DAT0,
+
+ /* I2C */
+ I2C_7XX_SCL,
+ I2C_7XX_SDA,
+
+ /* SPI */
+ SPI_7XX_1,
+ SPI_7XX_2,
+ SPI_7XX_3,
+ SPI_7XX_4,
+ SPI_7XX_5,
+ SPI_7XX_6,
+
+ /* UART */
+ UART_7XX_1,
+ UART_7XX_2,
+};
+
+enum omap1xxx_index {
+ /* UART1 (BT_UART_GATING)*/
+ UART1_TX = 0,
+ UART1_RTS,
+
+ /* UART2 (COM_UART_GATING)*/
+ UART2_TX,
+ UART2_RX,
+ UART2_CTS,
+ UART2_RTS,
+
+ /* UART3 (GIGA_UART_GATING) */
+ UART3_TX,
+ UART3_RX,
+ UART3_CTS,
+ UART3_RTS,
+ UART3_CLKREQ,
+ UART3_BCLK, /* 12MHz clock out */
+ Y15_1610_UART3_RTS,
+
+ /* PWT & PWL */
+ PWT,
+ PWL,
+
+ /* USB master generic */
+ R18_USB_VBUS,
+ R18_1510_USB_GPIO0,
+ W4_USB_PUEN,
+ W4_USB_CLKO,
+ W4_USB_HIGHZ,
+ W4_GPIO58,
+
+ /* USB1 master */
+ USB1_SUSP,
+ USB1_SEO,
+ W13_1610_USB1_SE0,
+ USB1_TXEN,
+ USB1_TXD,
+ USB1_VP,
+ USB1_VM,
+ USB1_RCV,
+ USB1_SPEED,
+ R13_1610_USB1_SPEED,
+ R13_1710_USB1_SE0,
+
+ /* USB2 master */
+ USB2_SUSP,
+ USB2_VP,
+ USB2_TXEN,
+ USB2_VM,
+ USB2_RCV,
+ USB2_SEO,
+ USB2_TXD,
+
+ /* OMAP-1510 GPIO */
+ R18_1510_GPIO0,
+ R19_1510_GPIO1,
+ M14_1510_GPIO2,
+
+ /* OMAP1610 GPIO */
+ P18_1610_GPIO3,
+ Y15_1610_GPIO17,
+
+ /* OMAP-1710 GPIO */
+ R18_1710_GPIO0,
+ V2_1710_GPIO10,
+ N21_1710_GPIO14,
+ W15_1710_GPIO40,
+
+ /* MPUIO */
+ MPUIO2,
+ N15_1610_MPUIO2,
+ MPUIO4,
+ MPUIO5,
+ T20_1610_MPUIO5,
+ W11_1610_MPUIO6,
+ V10_1610_MPUIO7,
+ W11_1610_MPUIO9,
+ V10_1610_MPUIO10,
+ W10_1610_MPUIO11,
+ E20_1610_MPUIO13,
+ U20_1610_MPUIO14,
+ E19_1610_MPUIO15,
+
+ /* MCBSP2 */
+ MCBSP2_CLKR,
+ MCBSP2_CLKX,
+ MCBSP2_DR,
+ MCBSP2_DX,
+ MCBSP2_FSR,
+ MCBSP2_FSX,
+
+ /* MCBSP3 */
+ MCBSP3_CLKX,
+
+ /* Misc ballouts */
+ BALLOUT_V8_ARMIO3,
+ N20_HDQ,
+
+ /* OMAP-1610 MMC2 */
+ W8_1610_MMC2_DAT0,
+ V8_1610_MMC2_DAT1,
+ W15_1610_MMC2_DAT2,
+ R10_1610_MMC2_DAT3,
+ Y10_1610_MMC2_CLK,
+ Y8_1610_MMC2_CMD,
+ V9_1610_MMC2_CMDDIR,
+ V5_1610_MMC2_DATDIR0,
+ W19_1610_MMC2_DATDIR1,
+ R18_1610_MMC2_CLKIN,
+
+ /* OMAP-1610 External Trace Interface */
+ M19_1610_ETM_PSTAT0,
+ L15_1610_ETM_PSTAT1,
+ L18_1610_ETM_PSTAT2,
+ L19_1610_ETM_D0,
+ J19_1610_ETM_D6,
+ J18_1610_ETM_D7,
+
+ /* OMAP16XX GPIO */
+ P20_1610_GPIO4,
+ V9_1610_GPIO7,
+ W8_1610_GPIO9,
+ N20_1610_GPIO11,
+ N19_1610_GPIO13,
+ P10_1610_GPIO22,
+ V5_1610_GPIO24,
+ AA20_1610_GPIO_41,
+ W19_1610_GPIO48,
+ M7_1610_GPIO62,
+ V14_16XX_GPIO37,
+ R9_16XX_GPIO18,
+ L14_16XX_GPIO49,
+
+ /* OMAP-1610 uWire */
+ V19_1610_UWIRE_SCLK,
+ U18_1610_UWIRE_SDI,
+ W21_1610_UWIRE_SDO,
+ N14_1610_UWIRE_CS0,
+ P15_1610_UWIRE_CS3,
+ N15_1610_UWIRE_CS1,
+
+ /* OMAP-1610 SPI */
+ U19_1610_SPIF_SCK,
+ U18_1610_SPIF_DIN,
+ P20_1610_SPIF_DIN,
+ W21_1610_SPIF_DOUT,
+ R18_1610_SPIF_DOUT,
+ N14_1610_SPIF_CS0,
+ N15_1610_SPIF_CS1,
+ T19_1610_SPIF_CS2,
+ P15_1610_SPIF_CS3,
+
+ /* OMAP-1610 Flash */
+ L3_1610_FLASH_CS2B_OE,
+ M8_1610_FLASH_CS2B_WE,
+
+ /* First MMC */
+ MMC_CMD,
+ MMC_DAT1,
+ MMC_DAT2,
+ MMC_DAT0,
+ MMC_CLK,
+ MMC_DAT3,
+
+ /* OMAP-1710 MMC CMDDIR and DATDIR0 */
+ M15_1710_MMC_CLKI,
+ P19_1710_MMC_CMDDIR,
+ P20_1710_MMC_DATDIR0,
+
+ /* OMAP-1610 USB0 alternate pin configuration */
+ W9_USB0_TXEN,
+ AA9_USB0_VP,
+ Y5_USB0_RCV,
+ R9_USB0_VM,
+ V6_USB0_TXD,
+ W5_USB0_SE0,
+ V9_USB0_SPEED,
+ V9_USB0_SUSP,
+
+ /* USB2 */
+ W9_USB2_TXEN,
+ AA9_USB2_VP,
+ Y5_USB2_RCV,
+ R9_USB2_VM,
+ V6_USB2_TXD,
+ W5_USB2_SE0,
+
+ /* 16XX UART */
+ R13_1610_UART1_TX,
+ V14_16XX_UART1_RX,
+ R14_1610_UART1_CTS,
+ AA15_1610_UART1_RTS,
+ R9_16XX_UART2_RX,
+ L14_16XX_UART3_RX,
+
+ /* I2C OMAP-1610 */
+ I2C_SCL,
+ I2C_SDA,
+
+ /* Keypad */
+ F18_1610_KBC0,
+ D20_1610_KBC1,
+ D19_1610_KBC2,
+ E18_1610_KBC3,
+ C21_1610_KBC4,
+ G18_1610_KBR0,
+ F19_1610_KBR1,
+ H14_1610_KBR2,
+ E20_1610_KBR3,
+ E19_1610_KBR4,
+ N19_1610_KBR5,
+
+ /* Power management */
+ T20_1610_LOW_PWR,
+
+ /* MCLK Settings */
+ V5_1710_MCLK_ON,
+ V5_1710_MCLK_OFF,
+ R10_1610_MCLK_ON,
+ R10_1610_MCLK_OFF,
+
+ /* CompactFlash controller */
+ P11_1610_CF_CD2,
+ R11_1610_CF_IOIS16,
+ V10_1610_CF_IREQ,
+ W10_1610_CF_RESET,
+ W11_1610_CF_CD1,
+
+ /* parallel camera */
+ J15_1610_CAM_LCLK,
+ J18_1610_CAM_D7,
+ J19_1610_CAM_D6,
+ J14_1610_CAM_D5,
+ K18_1610_CAM_D4,
+ K19_1610_CAM_D3,
+ K15_1610_CAM_D2,
+ K14_1610_CAM_D1,
+ L19_1610_CAM_D0,
+ L18_1610_CAM_VS,
+ L15_1610_CAM_HS,
+ M19_1610_CAM_RSTZ,
+ Y15_1610_CAM_OUTCLK,
+
+ /* serial camera */
+ H19_1610_CAM_EXCLK,
+ Y12_1610_CCP_CLKP,
+ W13_1610_CCP_CLKM,
+ W14_1610_CCP_DATAP,
+ Y14_1610_CCP_DATAM,
+
+};
+
+#ifdef CONFIG_OMAP_MUX
+extern int omap_cfg_reg(unsigned long reg_cfg);
+#else
+static inline int omap_cfg_reg(unsigned long reg_cfg) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/soc/ti/omap1-soc.h b/include/linux/soc/ti/omap1-soc.h
new file mode 100644
index 000000000000..a42d9aa68648
--- /dev/null
+++ b/include/linux/soc/ti/omap1-soc.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * OMAP cpu type detection
+ *
+ * Copyright (C) 2004, 2008 Nokia Corporation
+ *
+ * Copyright (C) 2009-11 Texas Instruments.
+ *
+ * Written by Tony Lindgren <tony.lindgren@nokia.com>
+ *
+ * Added OMAP4/5 specific defines - Santosh Shilimkar<santosh.shilimkar@ti.com>
+ */
+
+#ifndef __ASM_ARCH_OMAP_CPU_H
+#define __ASM_ARCH_OMAP_CPU_H
+
+/*
+ * Test if multicore OMAP support is needed
+ */
+#undef MULTI_OMAP1
+#undef OMAP_NAME
+
+#ifdef CONFIG_ARCH_OMAP15XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap1510
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap16xx
+# endif
+#endif
+
+/*
+ * omap_rev bits:
+ * CPU id bits (0730, 1510, 1710, 2422...) [31:16]
+ * CPU revision (See _REV_ defined in cpu.h) [15:08]
+ * CPU class bits (15xx, 16xx, 24xx, 34xx...) [07:00]
+ */
+unsigned int omap_rev(void);
+
+/*
+ * Get the CPU revision for OMAP devices
+ */
+#define GET_OMAP_REVISION() ((omap_rev() >> 8) & 0xff)
+
+/*
+ * Macros to group OMAP into cpu classes.
+ * These can be used in most places.
+ * cpu_is_omap15xx(): True for OMAP1510, OMAP5910 and OMAP310
+ * cpu_is_omap16xx(): True for OMAP1610, OMAP5912 and OMAP1710
+ */
+#define GET_OMAP_CLASS (omap_rev() & 0xff)
+
+#define IS_OMAP_CLASS(class, id) \
+static inline int is_omap ##class (void) \
+{ \
+ return (GET_OMAP_CLASS == (id)) ? 1 : 0; \
+}
+
+#define GET_OMAP_SUBCLASS ((omap_rev() >> 20) & 0x0fff)
+
+#define IS_OMAP_SUBCLASS(subclass, id) \
+static inline int is_omap ##subclass (void) \
+{ \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+}
+
+IS_OMAP_CLASS(15xx, 0x15)
+IS_OMAP_CLASS(16xx, 0x16)
+
+#define cpu_is_omap15xx() 0
+#define cpu_is_omap16xx() 0
+
+#if defined(MULTI_OMAP1)
+# if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap15xx
+# define cpu_is_omap15xx() is_omap15xx()
+# endif
+# if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap16xx
+# define cpu_is_omap16xx() is_omap16xx()
+# endif
+#else
+# if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap15xx
+# define cpu_is_omap15xx() 1
+# endif
+# if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap16xx
+# define cpu_is_omap16xx() 1
+# endif
+#endif
+
+/*
+ * Macros to detect individual cpu types.
+ * These are only rarely needed.
+ * cpu_is_omap310(): True for OMAP310
+ * cpu_is_omap1510(): True for OMAP1510
+ * cpu_is_omap1610(): True for OMAP1610
+ * cpu_is_omap1611(): True for OMAP1611
+ * cpu_is_omap5912(): True for OMAP5912
+ * cpu_is_omap1621(): True for OMAP1621
+ * cpu_is_omap1710(): True for OMAP1710
+ */
+#define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff)
+
+#define IS_OMAP_TYPE(type, id) \
+static inline int is_omap ##type (void) \
+{ \
+ return (GET_OMAP_TYPE == (id)) ? 1 : 0; \
+}
+
+IS_OMAP_TYPE(310, 0x0310)
+IS_OMAP_TYPE(1510, 0x1510)
+IS_OMAP_TYPE(1610, 0x1610)
+IS_OMAP_TYPE(1611, 0x1611)
+IS_OMAP_TYPE(5912, 0x1611)
+IS_OMAP_TYPE(1621, 0x1621)
+IS_OMAP_TYPE(1710, 0x1710)
+
+#define cpu_is_omap310() 0
+#define cpu_is_omap1510() 0
+#define cpu_is_omap1610() 0
+#define cpu_is_omap5912() 0
+#define cpu_is_omap1611() 0
+#define cpu_is_omap1621() 0
+#define cpu_is_omap1710() 0
+
+#define cpu_class_is_omap1() 1
+
+/*
+ * Whether we have MULTI_OMAP1 or not, we still need to distinguish
+ * between 310 vs. 1510 and 1611B/5912 vs. 1710.
+ */
+
+#if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap310
+# undef cpu_is_omap1510
+# define cpu_is_omap310() is_omap310()
+# define cpu_is_omap1510() is_omap1510()
+#endif
+
+#if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap1610
+# undef cpu_is_omap1611
+# undef cpu_is_omap5912
+# undef cpu_is_omap1621
+# undef cpu_is_omap1710
+# define cpu_is_omap1610() is_omap1610()
+# define cpu_is_omap1611() is_omap1611()
+# define cpu_is_omap5912() is_omap5912()
+# define cpu_is_omap1621() is_omap1621()
+# define cpu_is_omap1710() is_omap1710()
+#endif
+
+#endif
diff --git a/include/linux/soc/ti/omap1-usb.h b/include/linux/soc/ti/omap1-usb.h
new file mode 100644
index 000000000000..67488698601a
--- /dev/null
+++ b/include/linux/soc/ti/omap1-usb.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_TI_OMAP1_USB
+#define __SOC_TI_OMAP1_USB
+/*
+ * Constants in this file are used all over the place, in platform
+ * code, as well as the udc, phy and ohci drivers.
+ * This is not a great design, but unlikely to get fixed after
+ * such a long time. Don't do this elsewhere.
+ */
+
+#define OMAP1_OTG_BASE 0xfffb0400
+#define OMAP1_UDC_BASE 0xfffb4000
+
+#define OMAP2_UDC_BASE 0x4805e200
+#define OMAP2_OTG_BASE 0x4805e300
+#define OTG_BASE OMAP1_OTG_BASE
+#define UDC_BASE OMAP1_UDC_BASE
+
+/*
+ * OTG and transceiver registers, for OMAPs starting with ARM926
+ */
+#define OTG_REV (OTG_BASE + 0x00)
+#define OTG_SYSCON_1 (OTG_BASE + 0x04)
+# define USB2_TRX_MODE(w) (((w)>>24)&0x07)
+# define USB1_TRX_MODE(w) (((w)>>20)&0x07)
+# define USB0_TRX_MODE(w) (((w)>>16)&0x07)
+# define OTG_IDLE_EN (1 << 15)
+# define HST_IDLE_EN (1 << 14)
+# define DEV_IDLE_EN (1 << 13)
+# define OTG_RESET_DONE (1 << 2)
+# define OTG_SOFT_RESET (1 << 1)
+#define OTG_SYSCON_2 (OTG_BASE + 0x08)
+# define OTG_EN (1 << 31)
+# define USBX_SYNCHRO (1 << 30)
+# define OTG_MST16 (1 << 29)
+# define SRP_GPDATA (1 << 28)
+# define SRP_GPDVBUS (1 << 27)
+# define SRP_GPUVBUS(w) (((w)>>24)&0x07)
+# define A_WAIT_VRISE(w) (((w)>>20)&0x07)
+# define B_ASE_BRST(w) (((w)>>16)&0x07)
+# define SRP_DPW (1 << 14)
+# define SRP_DATA (1 << 13)
+# define SRP_VBUS (1 << 12)
+# define OTG_PADEN (1 << 10)
+# define HMC_PADEN (1 << 9)
+# define UHOST_EN (1 << 8)
+# define HMC_TLLSPEED (1 << 7)
+# define HMC_TLLATTACH (1 << 6)
+# define OTG_HMC(w) (((w)>>0)&0x3f)
+#define OTG_CTRL (OTG_BASE + 0x0c)
+# define OTG_USB2_EN (1 << 29)
+# define OTG_USB2_DP (1 << 28)
+# define OTG_USB2_DM (1 << 27)
+# define OTG_USB1_EN (1 << 26)
+# define OTG_USB1_DP (1 << 25)
+# define OTG_USB1_DM (1 << 24)
+# define OTG_USB0_EN (1 << 23)
+# define OTG_USB0_DP (1 << 22)
+# define OTG_USB0_DM (1 << 21)
+# define OTG_ASESSVLD (1 << 20)
+# define OTG_BSESSEND (1 << 19)
+# define OTG_BSESSVLD (1 << 18)
+# define OTG_VBUSVLD (1 << 17)
+# define OTG_ID (1 << 16)
+# define OTG_DRIVER_SEL (1 << 15)
+# define OTG_A_SETB_HNPEN (1 << 12)
+# define OTG_A_BUSREQ (1 << 11)
+# define OTG_B_HNPEN (1 << 9)
+# define OTG_B_BUSREQ (1 << 8)
+# define OTG_BUSDROP (1 << 7)
+# define OTG_PULLDOWN (1 << 5)
+# define OTG_PULLUP (1 << 4)
+# define OTG_DRV_VBUS (1 << 3)
+# define OTG_PD_VBUS (1 << 2)
+# define OTG_PU_VBUS (1 << 1)
+# define OTG_PU_ID (1 << 0)
+#define OTG_IRQ_EN (OTG_BASE + 0x10) /* 16-bit */
+# define DRIVER_SWITCH (1 << 15)
+# define A_VBUS_ERR (1 << 13)
+# define A_REQ_TMROUT (1 << 12)
+# define A_SRP_DETECT (1 << 11)
+# define B_HNP_FAIL (1 << 10)
+# define B_SRP_TMROUT (1 << 9)
+# define B_SRP_DONE (1 << 8)
+# define B_SRP_STARTED (1 << 7)
+# define OPRT_CHG (1 << 0)
+#define OTG_IRQ_SRC (OTG_BASE + 0x14) /* 16-bit */
+ // same bits as in IRQ_EN
+#define OTG_OUTCTRL (OTG_BASE + 0x18) /* 16-bit */
+# define OTGVPD (1 << 14)
+# define OTGVPU (1 << 13)
+# define OTGPUID (1 << 12)
+# define USB2VDR (1 << 10)
+# define USB2PDEN (1 << 9)
+# define USB2PUEN (1 << 8)
+# define USB1VDR (1 << 6)
+# define USB1PDEN (1 << 5)
+# define USB1PUEN (1 << 4)
+# define USB0VDR (1 << 2)
+# define USB0PDEN (1 << 1)
+# define USB0PUEN (1 << 0)
+#define OTG_TEST (OTG_BASE + 0x20) /* 16-bit */
+#define OTG_VENDOR_CODE (OTG_BASE + 0xfc) /* 16-bit */
+
+/*-------------------------------------------------------------------------*/
+
+/* OMAP1 */
+#define USB_TRANSCEIVER_CTRL (0xfffe1000 + 0x0064)
+# define CONF_USB2_UNI_R (1 << 8)
+# define CONF_USB1_UNI_R (1 << 7)
+# define CONF_USB_PORT0_R(x) (((x)>>4)&0x7)
+# define CONF_USB0_ISOLATE_R (1 << 3)
+# define CONF_USB_PWRDN_DM_R (1 << 2)
+# define CONF_USB_PWRDN_DP_R (1 << 1)
+
+#endif
diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h
index 1f6e76d423cf..543da257a5f2 100644
--- a/include/linux/soc/ti/ti-msgmgr.h
+++ b/include/linux/soc/ti/ti-msgmgr.h
@@ -1,26 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Texas Instruments' Message Manager
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef TI_MSGMGR_H
#define TI_MSGMGR_H
+struct mbox_chan;
+
/**
* struct ti_msgmgr_message - Message Manager structure
* @len: Length of data in the Buffer
* @buf: Buffer pointer
+ * @chan_rx: Expected channel for response, must be provided to use polled rx
+ * @timeout_rx_ms: Timeout value to use if polling for response
*
* This is the structure for data used in mbox_send_message
* the length of data buffer used depends on the SoC integration
@@ -30,6 +26,8 @@
struct ti_msgmgr_message {
size_t len;
u8 *buf;
+ struct mbox_chan *chan_rx;
+ int timeout_rx_ms;
};
#endif /* TI_MSGMGR_H */
diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h
index e3aa8b14612e..4dba2f2aff6f 100644
--- a/include/linux/soc/ti/ti_sci_inta_msi.h
+++ b/include/linux/soc/ti/ti_sci_inta_msi.h
@@ -18,6 +18,4 @@ struct irq_domain
struct irq_domain *parent);
int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
struct ti_sci_resource *res);
-unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 index);
-void ti_sci_inta_msi_domain_free_irqs(struct device *dev);
#endif /* __INCLUDE_LINUX_IRQCHIP_TI_SCI_INTA_H */
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index cf27b080e148..bd0d11af76c5 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -196,6 +196,22 @@ struct ti_sci_clk_ops {
};
/**
+ * struct ti_sci_resource_desc - Description of TI SCI resource instance range.
+ * @start: Start index of the first resource range.
+ * @num: Number of resources in the first range.
+ * @start_sec: Start index of the second resource range.
+ * @num_sec: Number of resources in the second range.
+ * @res_map: Bitmap to manage the allocation of these resources.
+ */
+struct ti_sci_resource_desc {
+ u16 start;
+ u16 num;
+ u16 start_sec;
+ u16 num_sec;
+ unsigned long *res_map;
+};
+
+/**
* struct ti_sci_rm_core_ops - Resource management core operations
* @get_range: Get a range of resources belonging to ti sci host.
* @get_rage_from_shost: Get a range of resources belonging to
@@ -209,15 +225,15 @@ struct ti_sci_clk_ops {
* - dev_id: TISCI device ID.
* - subtype: Resource assignment subtype that is being requested
* from the given device.
- * - range_start: Start index of the resource range
- * - range_end: Number of resources in the range
+ * - desc: Pointer to ti_sci_resource_desc to be updated with the resource
+ * range start index and number of resources
*/
struct ti_sci_rm_core_ops {
int (*get_range)(const struct ti_sci_handle *handle, u32 dev_id,
- u8 subtype, u16 *range_start, u16 *range_num);
+ u8 subtype, struct ti_sci_resource_desc *desc);
int (*get_range_from_shost)(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num);
+ struct ti_sci_resource_desc *desc);
};
#define TI_SCI_RESASG_SUBTYPE_IR_OUTPUT 0
@@ -259,30 +275,46 @@ struct ti_sci_rm_irq_ops {
#define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4)
/* RA config.order_id parameter is valid for RM ring configure TISCI message */
#define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5)
+/* RA config.virtid parameter is valid for RM ring configure TISCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_VIRTID_VALID BIT(6)
+/* RA config.asel parameter is valid for RM ring configure TISCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_ASEL_VALID BIT(7)
#define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \
(TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \
TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \
TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \
TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \
- TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID)
+ TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_ASEL_VALID)
+
+/**
+ * struct ti_sci_msg_rm_ring_cfg - Ring configuration
+ *
+ * Parameters for Navigator Subsystem ring configuration
+ * See @ti_sci_msg_rm_ring_cfg_req
+ */
+struct ti_sci_msg_rm_ring_cfg {
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 count;
+ u8 mode;
+ u8 size;
+ u8 order_id;
+ u16 virtid;
+ u8 asel;
+};
/**
* struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations
- * @config: configure the SoC Navigator Subsystem Ring Accelerator ring
- * @get_config: get the SoC Navigator Subsystem Ring Accelerator ring
- * configuration
+ * @set_cfg: configure the SoC Navigator Subsystem Ring Accelerator ring
*/
struct ti_sci_rm_ringacc_ops {
- int (*config)(const struct ti_sci_handle *handle,
- u32 valid_params, u16 nav_id, u16 index,
- u32 addr_lo, u32 addr_hi, u32 count, u8 mode,
- u8 size, u8 order_id
- );
- int (*get_config)(const struct ti_sci_handle *handle,
- u32 nav_id, u32 index, u8 *mode,
- u32 *addr_lo, u32 *addr_hi, u32 *count,
- u8 *size, u8 *order_id);
+ int (*set_cfg)(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_ring_cfg *params);
};
/**
@@ -320,6 +352,9 @@ struct ti_sci_rm_psil_ops {
#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES 2
#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES 3
+#define TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_TCHAN 0
+#define TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN 1
+
/* UDMAP TX/RX channel valid_params common declarations */
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0)
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1)
@@ -345,6 +380,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg {
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11)
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12)
#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID BIT(15)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID BIT(16)
u16 nav_id;
u16 index;
u8 tx_pause_on_err;
@@ -362,6 +399,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg {
u16 fdepth;
u8 tx_sched_priority;
u8 tx_burst_size;
+ u8 tx_tdtype;
+ u8 extended_ch_type;
};
/**
@@ -521,18 +560,6 @@ struct ti_sci_handle {
#define TI_SCI_RESOURCE_NULL 0xffff
/**
- * struct ti_sci_resource_desc - Description of TI SCI resource instance range.
- * @start: Start index of the resource.
- * @num: Number of resources.
- * @res_map: Bitmap to manage the allocation of these resources.
- */
-struct ti_sci_resource_desc {
- u16 start;
- u16 num;
- unsigned long *res_map;
-};
-
-/**
* struct ti_sci_resource - Structure representing a resource assigned
* to a device.
* @sets: Number of sets available from this resource type
@@ -618,7 +645,7 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
static inline struct ti_sci_resource *
devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
- u32 dev_id, u32 sub_type);
+ u32 dev_id, u32 sub_type)
{
return ERR_PTR(-EINVAL);
}
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 15fe980a27ea..110978dc9af1 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -13,6 +13,7 @@ struct nlmsghdr;
struct sock;
struct sock_diag_handler {
+ struct module *owner;
__u8 family;
int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
int (*get_info)(struct sk_buff *skb, struct sock *sk);
@@ -22,10 +23,27 @@ struct sock_diag_handler {
int sock_diag_register(const struct sock_diag_handler *h);
void sock_diag_unregister(const struct sock_diag_handler *h);
-void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
-void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+struct sock_diag_inet_compat {
+ struct module *owner;
+ int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh);
+};
+
+void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr);
+void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr);
+
+u64 __sock_gen_cookie(struct sock *sk);
+
+static inline u64 sock_gen_cookie(struct sock *sk)
+{
+ u64 cookie;
+
+ preempt_disable();
+ cookie = __sock_gen_cookie(sk);
+ preempt_enable();
+
+ return cookie;
+}
-u64 sock_gen_cookie(struct sock *sk);
int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
diff --git a/include/linux/socket.h b/include/linux/socket.h
index e9cb30d8cbfb..139c330ccf2c 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -14,6 +14,8 @@ struct file;
struct pid;
struct cred;
struct socket;
+struct sock;
+struct sk_buff;
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -31,7 +33,10 @@ typedef __kernel_sa_family_t sa_family_t;
struct sockaddr {
sa_family_t sa_family; /* address family, AF_xxx */
- char sa_data[14]; /* 14 bytes of protocol address */
+ union {
+ char sa_data_min[14]; /* Minimum 14 bytes of protocol address */
+ DECLARE_FLEX_ARRAY(char, sa_data);
+ };
};
struct linger {
@@ -50,6 +55,9 @@ struct linger {
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
+
+ int msg_inq; /* output, data left in socket */
+
struct iov_iter msg_iter; /* data */
/*
@@ -62,9 +70,13 @@ struct msghdr {
void __user *msg_control_user;
};
bool msg_control_is_user : 1;
- __kernel_size_t msg_controllen; /* ancillary data buffer length */
+ bool msg_get_inq : 1;/* return INQ after receive */
unsigned int msg_flags; /* flags on received message */
+ __kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
+ struct ubuf_info *msg_ubuf;
+ int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
};
struct user_msghdr {
@@ -85,7 +97,7 @@ struct mmsghdr {
/*
* POSIX 1003.1g - ancillary data object information
- * Ancillary data consits of a sequence of pairs of
+ * Ancillary data consists of a sequence of pairs of
* (cmsghdr, cmsg_data[])
*/
@@ -165,6 +177,7 @@ static inline size_t msg_data_left(struct msghdr *msg)
#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */
#define SCM_CREDENTIALS 0x02 /* rw: struct ucred */
#define SCM_SECURITY 0x03 /* rw: security label */
+#define SCM_PIDFD 0x04 /* ro: pidfd (int) */
struct ucred {
__u32 pid;
@@ -223,8 +236,11 @@ struct ucred {
* reuses AF_INET address family
*/
#define AF_XDP 44 /* XDP sockets */
+#define AF_MCTP 45 /* Management component
+ * transport protocol
+ */
-#define AF_MAX 45 /* For now.. */
+#define AF_MAX 46 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -274,6 +290,7 @@ struct ucred {
#define PF_QIPCRTR AF_QIPCRTR
#define PF_SMC AF_SMC
#define PF_XDP AF_XDP
+#define PF_MCTP AF_MCTP
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -302,7 +319,6 @@ struct ucred {
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
-#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
@@ -311,6 +327,7 @@ struct ucred {
*/
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
+#define MSG_SPLICE_PAGES 0x8000000 /* Splice the pages from the iterator in sendmsg() */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
descriptor received through
@@ -321,6 +338,9 @@ struct ucred {
#define MSG_CMSG_COMPAT 0 /* We never have 32 bit fixups */
#endif
+/* Flags to be cleared on entry by sendmsg and sendmmsg syscalls */
+#define MSG_INTERNAL_SENDMSG_FLAGS \
+ (MSG_SPLICE_PAGES | MSG_SENDPAGE_NOPOLICY | MSG_SENDPAGE_DECRYPTED)
/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
#define SOL_IP 0
@@ -360,6 +380,10 @@ struct ucred {
#define SOL_KCM 281
#define SOL_TLS 282
#define SOL_XDP 283
+#define SOL_MPTCP 284
+#define SOL_MCTP 285
+#define SOL_SMC 286
+#define SOL_VSOCK 287
/* IPX options */
#define IPX_TYPE 1
@@ -398,17 +422,9 @@ extern long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg,
struct user_msghdr __user *umsg,
struct sockaddr __user *uaddr,
unsigned int flags);
-extern int sendmsg_copy_msghdr(struct msghdr *msg,
- struct user_msghdr __user *umsg, unsigned flags,
- struct iovec **iov);
-extern int recvmsg_copy_msghdr(struct msghdr *msg,
- struct user_msghdr __user *umsg, unsigned flags,
- struct sockaddr __user **uaddr,
- struct iovec **iov);
-extern int __copy_msghdr_from_user(struct msghdr *kmsg,
- struct user_msghdr __user *umsg,
- struct sockaddr __user **save_addr,
- struct iovec __user **uiov, size_t *nsegs);
+extern int __copy_msghdr(struct msghdr *kmsg,
+ struct user_msghdr *umsg,
+ struct sockaddr __user **save_addr);
/* helpers which do the actual work for syscalls */
extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
@@ -417,13 +433,13 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
-extern int __sys_accept4_file(struct file *file, unsigned file_flags,
- struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags,
- unsigned long nofile);
+extern struct file *do_accept(struct file *file, unsigned file_flags,
+ struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
+extern struct file *__sys_socket_file(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
int addrlen, int file_flags);
@@ -436,7 +452,6 @@ extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
int __user *usockaddr_len);
extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
+extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index ea193414298b..307961b41541 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -55,6 +55,29 @@ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
return copy_from_sockptr_offset(dst, src, 0, size);
}
+static inline int copy_struct_from_sockptr(void *dst, size_t ksize,
+ sockptr_t src, size_t usize)
+{
+ size_t size = min(ksize, usize);
+ size_t rest = max(ksize, usize) - size;
+
+ if (!sockptr_is_kernel(src))
+ return copy_struct_from_user(dst, ksize, src.user, size);
+
+ if (usize < ksize) {
+ memset(dst + size, 0, rest);
+ } else if (usize > ksize) {
+ char *p = src.kernel;
+
+ while (rest--) {
+ if (*p++)
+ return -E2BIG;
+ }
+ }
+ memcpy(dst, src.kernel, size);
+ return 0;
+}
+
static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
const void *src, size_t size)
{
@@ -64,6 +87,11 @@ static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
return 0;
}
+static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size)
+{
+ return copy_to_sockptr_offset(dst, 0, src, size);
+}
+
static inline void *memdup_sockptr(sockptr_t src, size_t len)
{
void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
@@ -102,4 +130,12 @@ static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count)
return strncpy_from_user(dst, src.user, count);
}
+static inline int check_zeroed_sockptr(sockptr_t src, size_t offset,
+ size_t size)
+{
+ if (!sockptr_is_kernel(src))
+ return check_zeroed_user(src.user + offset, size);
+ return memchr_inv(src.kernel + offset, 0, size) == NULL;
+}
+
#endif /* _LINUX_SOCKPTR_H */
diff --git a/include/linux/softirq.h b/include/linux/softirq.h
new file mode 100644
index 000000000000..c73d7dcb4cb5
--- /dev/null
+++ b/include/linux/softirq.h
@@ -0,0 +1 @@
+#include <linux/interrupt.h>
diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h
index 374d0fdb0743..1e3c92feea6e 100644
--- a/include/linux/sony-laptop.h
+++ b/include/linux/sony-laptop.h
@@ -31,7 +31,7 @@
#if IS_ENABLED(CONFIG_SONY_LAPTOP)
int sony_pic_camera_command(int command, u8 value);
#else
-static inline int sony_pic_camera_command(int command, u8 value) { return 0; };
+static inline int sony_pic_camera_command(int command, u8 value) { return 0; }
#endif
#endif /* __KERNEL__ */
diff --git a/include/linux/sort.h b/include/linux/sort.h
index b5898725fe9d..e163287ac6c1 100644
--- a/include/linux/sort.h
+++ b/include/linux/sort.h
@@ -6,7 +6,7 @@
void sort_r(void *base, size_t num, size_t size,
cmp_r_func_t cmp_func,
- swap_func_t swap_func,
+ swap_r_func_t swap_func,
const void *priv);
void sort(void *base, size_t num, size_t size,
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 76052f12c9f7..66f814b63a43 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -4,7 +4,12 @@
#ifndef __SOUNDWIRE_H
#define __SOUNDWIRE_H
+#include <linux/bug.h>
+#include <linux/lockdep_types.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
struct sdw_bus;
struct sdw_slave;
@@ -38,7 +43,8 @@ struct sdw_slave;
#define SDW_FRAME_CTRL_BITS 48
#define SDW_MAX_DEVICES 11
-#define SDW_VALID_PORT_RANGE(n) ((n) <= 14 && (n) >= 1)
+#define SDW_MAX_PORTS 15
+#define SDW_VALID_PORT_RANGE(n) ((n) < SDW_MAX_PORTS && (n) >= 1)
enum {
SDW_PORT_DIRN_SINK = 0,
@@ -123,6 +129,12 @@ enum sdw_dpn_grouping {
SDW_BLK_GRP_CNT_4 = 3,
};
+/* block packing mode enum */
+enum sdw_dpn_pkg_mode {
+ SDW_BLK_PKG_PER_PORT = 0,
+ SDW_BLK_PKG_PER_CHANNEL = 1
+};
+
/**
* enum sdw_stream_type: data stream type
*
@@ -355,6 +367,12 @@ struct sdw_dpn_prop {
* @dp0_prop: Data Port 0 properties
* @src_dpn_prop: Source Data Port N properties
* @sink_dpn_prop: Sink Data Port N properties
+ * @scp_int1_mask: SCP_INT1_MASK desired settings
+ * @quirks: bitmask identifying deltas from the MIPI specification
+ * @clock_reg_supported: the Peripheral implements the clock base and scale
+ * registers introduced with the SoundWire 1.2 specification. SDCA devices
+ * do not need to set this boolean property as the registers are required.
+ * @use_domain_irq: call actual IRQ handler on slave, as well as callback
*/
struct sdw_slave_prop {
u32 mipi_revision;
@@ -376,8 +394,14 @@ struct sdw_slave_prop {
struct sdw_dp0_prop *dp0_prop;
struct sdw_dpn_prop *src_dpn_prop;
struct sdw_dpn_prop *sink_dpn_prop;
+ u8 scp_int1_mask;
+ u32 quirks;
+ bool clock_reg_supported;
+ bool use_domain_irq;
};
+#define SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY BIT(0)
+
/**
* struct sdw_master_prop - Master properties
* @revision: MIPI spec version of the implementation
@@ -395,6 +419,7 @@ struct sdw_slave_prop {
* command
* @mclk_freq: clock reference passed to SoundWire Master, in Hz.
* @hw_disabled: if true, the Master is not functional, typically due to pin-mux
+ * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification
*/
struct sdw_master_prop {
u32 revision;
@@ -411,8 +436,29 @@ struct sdw_master_prop {
u32 err_threshold;
u32 mclk_freq;
bool hw_disabled;
+ u64 quirks;
};
+/* Definitions for Master quirks */
+
+/*
+ * In a number of platforms bus clashes are reported after a hardware
+ * reset but without any explanations or evidence of a real problem.
+ * The following quirk will discard all initial bus clash interrupts
+ * but will leave the detection on should real bus clashes happen
+ */
+#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH BIT(0)
+
+/*
+ * Some Slave devices have known issues with incorrect parity errors
+ * reported after a hardware reset. However during integration unexplained
+ * parity errors can be reported by Slave devices, possibly due to electrical
+ * issues at the Master level.
+ * The following quirk will discard all initial parity errors but will leave
+ * the detection on should real parity errors happen.
+ */
+#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY BIT(1)
+
int sdw_master_read_prop(struct sdw_bus *bus);
int sdw_slave_read_prop(struct sdw_slave *slave);
@@ -440,6 +486,11 @@ struct sdw_slave_id {
__u8 sdw_version:4;
};
+struct sdw_extended_slave_id {
+ int link_id;
+ struct sdw_slave_id id;
+};
+
/*
* Helper macros to extract the MIPI-defined IDs
*
@@ -455,20 +506,28 @@ struct sdw_slave_id {
*
* The MIPI DisCo for SoundWire defines in addition the link_id as bits 51:48
*/
-
-#define SDW_DISCO_LINK_ID(adr) (((adr) >> 48) & GENMASK(3, 0))
-#define SDW_VERSION(adr) (((adr) >> 44) & GENMASK(3, 0))
-#define SDW_UNIQUE_ID(adr) (((adr) >> 40) & GENMASK(3, 0))
-#define SDW_MFG_ID(adr) (((adr) >> 24) & GENMASK(15, 0))
-#define SDW_PART_ID(adr) (((adr) >> 8) & GENMASK(15, 0))
-#define SDW_CLASS_ID(adr) ((adr) & GENMASK(7, 0))
+#define SDW_DISCO_LINK_ID_MASK GENMASK_ULL(51, 48)
+#define SDW_VERSION_MASK GENMASK_ULL(47, 44)
+#define SDW_UNIQUE_ID_MASK GENMASK_ULL(43, 40)
+#define SDW_MFG_ID_MASK GENMASK_ULL(39, 24)
+#define SDW_PART_ID_MASK GENMASK_ULL(23, 8)
+#define SDW_CLASS_ID_MASK GENMASK_ULL(7, 0)
+
+#define SDW_DISCO_LINK_ID(addr) FIELD_GET(SDW_DISCO_LINK_ID_MASK, addr)
+#define SDW_VERSION(addr) FIELD_GET(SDW_VERSION_MASK, addr)
+#define SDW_UNIQUE_ID(addr) FIELD_GET(SDW_UNIQUE_ID_MASK, addr)
+#define SDW_MFG_ID(addr) FIELD_GET(SDW_MFG_ID_MASK, addr)
+#define SDW_PART_ID(addr) FIELD_GET(SDW_PART_ID_MASK, addr)
+#define SDW_CLASS_ID(addr) FIELD_GET(SDW_CLASS_ID_MASK, addr)
/**
* struct sdw_slave_intr_status - Slave interrupt status
+ * @sdca_cascade: set if the Slave device reports an SDCA interrupt
* @control_port: control port status
* @port: data port status
*/
struct sdw_slave_intr_status {
+ bool sdca_cascade;
u8 control_port;
u8 port[15];
};
@@ -520,13 +579,15 @@ struct sdw_prepare_ch {
* enum sdw_port_prep_ops: Prepare operations for Data Port
*
* @SDW_OPS_PORT_PRE_PREP: Pre prepare operation for the Port
- * @SDW_OPS_PORT_PREP: Prepare operation for the Port
+ * @SDW_OPS_PORT_PRE_DEPREP: Pre deprepare operation for the Port
* @SDW_OPS_PORT_POST_PREP: Post prepare operation for the Port
+ * @SDW_OPS_PORT_POST_DEPREP: Post deprepare operation for the Port
*/
enum sdw_port_prep_ops {
SDW_OPS_PORT_PRE_PREP = 0,
- SDW_OPS_PORT_PREP = 1,
- SDW_OPS_PORT_POST_PREP = 2,
+ SDW_OPS_PORT_PRE_DEPREP,
+ SDW_OPS_PORT_POST_PREP,
+ SDW_OPS_PORT_POST_DEPREP,
};
/**
@@ -540,6 +601,10 @@ enum sdw_port_prep_ops {
* @bandwidth: Current bandwidth
* @col: Active columns
* @row: Active rows
+ * @s_data_mode: NORMAL, STATIC or PRBS mode for all Slave ports
+ * @m_data_mode: NORMAL, STATIC or PRBS mode for all Master ports. The value
+ * should be the same to detect transmission issues, but can be different to
+ * test the interrupt reports
*/
struct sdw_bus_params {
enum sdw_reg_bank curr_bank;
@@ -549,6 +614,8 @@ struct sdw_bus_params {
unsigned int bandwidth;
unsigned int col;
unsigned int row;
+ int s_data_mode;
+ int m_data_mode;
};
/**
@@ -560,6 +627,7 @@ struct sdw_bus_params {
* @update_status: Update Slave status
* @bus_config: Update the bus config for Slave
* @port_prep: Prepare the port with parameters
+ * @clk_stop: handle imp-def sequences before and after prepare and de-prepare
*/
struct sdw_slave_ops {
int (*read_prop)(struct sdw_slave *sdw);
@@ -572,7 +640,6 @@ struct sdw_slave_ops {
int (*port_prep)(struct sdw_slave *slave,
struct sdw_prepare_ch *prepare_ch,
enum sdw_port_prep_ops pre_ops);
- int (*get_clk_stop_mode)(struct sdw_slave *slave);
int (*clk_stop)(struct sdw_slave *slave,
enum sdw_clk_stop_mode mode,
enum sdw_clk_stop_type type);
@@ -583,19 +650,17 @@ struct sdw_slave_ops {
* struct sdw_slave - SoundWire Slave
* @id: MIPI device ID
* @dev: Linux device
+ * @irq: IRQ number
* @status: Status reported by the Slave
* @bus: Bus handle
- * @ops: Slave callback ops
* @prop: Slave properties
* @debugfs: Slave debugfs
* @node: node for bus list
* @port_ready: Port ready completion flag for each Slave port
+ * @m_port_map: static Master port map for each Slave port
* @dev_num: Current Device Number, values can be 0 or dev_num_sticky
* @dev_num_sticky: one-time static Device Number assigned by Bus
* @probed: boolean tracking driver state
- * @probe_complete: completion utility to control potential races
- * on startup between driver probe/initialization and SoundWire
- * Slave state changes/implementation-defined interrupts
* @enumeration_complete: completion utility to control potential races
* on startup between device enumeration and read/write access to the
* Slave device
@@ -606,27 +671,34 @@ struct sdw_slave_ops {
* between the Master suspending and the codec resuming, and make sure that
* when the Master triggered a reset the Slave is properly enumerated and
* initialized
+ * @first_interrupt_done: status flag tracking if the interrupt handling
+ * for a Slave happens for the first time after enumeration
+ * @is_mockup_device: status flag used to squelch errors in the command/control
+ * protocol for SoundWire mockup devices
+ * @sdw_dev_lock: mutex used to protect callbacks/remove races
*/
struct sdw_slave {
struct sdw_slave_id id;
struct device dev;
+ int irq;
enum sdw_slave_status status;
struct sdw_bus *bus;
- const struct sdw_slave_ops *ops;
struct sdw_slave_prop prop;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
struct list_head node;
- struct completion *port_ready;
- enum sdw_clk_stop_mode curr_clk_stop_mode;
+ struct completion port_ready[SDW_MAX_PORTS];
+ unsigned int m_port_map[SDW_MAX_PORTS];
u16 dev_num;
u16 dev_num_sticky;
bool probed;
- struct completion probe_complete;
struct completion enumeration_complete;
struct completion initialization_complete;
u32 unattach_request;
+ bool first_interrupt_done;
+ bool is_mockup_device;
+ struct mutex sdw_dev_lock; /* protect callbacks/remove races */
};
#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
@@ -777,35 +849,45 @@ struct sdw_defer {
/**
* struct sdw_master_ops - Master driver ops
* @read_prop: Read Master properties
+ * @override_adr: Override value read from firmware (quirk for buggy firmware)
* @xfer_msg: Transfer message callback
- * @xfer_msg_defer: Defer version of transfer message callback
- * @reset_page_addr: Reset the SCP page address registers
+ * @xfer_msg_defer: Defer version of transfer message callback. The message is handled with the
+ * bus struct @sdw_defer
* @set_bus_conf: Set the bus configuration
* @pre_bank_switch: Callback for pre bank switch
* @post_bank_switch: Callback for post bank switch
+ * @read_ping_status: Read status from PING frames, reported with two bits per Device.
+ * Bits 31:24 are reserved.
+ * @get_device_num: Callback for vendor-specific device_number allocation
+ * @put_device_num: Callback for vendor-specific device_number release
+ * @new_peripheral_assigned: Callback to handle enumeration of new peripheral.
*/
struct sdw_master_ops {
int (*read_prop)(struct sdw_bus *bus);
-
+ u64 (*override_adr)
+ (struct sdw_bus *bus, u64 addr);
enum sdw_command_response (*xfer_msg)
(struct sdw_bus *bus, struct sdw_msg *msg);
enum sdw_command_response (*xfer_msg_defer)
- (struct sdw_bus *bus, struct sdw_msg *msg,
- struct sdw_defer *defer);
- enum sdw_command_response (*reset_page_addr)
- (struct sdw_bus *bus, unsigned int dev_num);
+ (struct sdw_bus *bus);
int (*set_bus_conf)(struct sdw_bus *bus,
struct sdw_bus_params *params);
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
-
+ u32 (*read_ping_status)(struct sdw_bus *bus);
+ int (*get_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*put_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*new_peripheral_assigned)(struct sdw_bus *bus,
+ struct sdw_slave *slave,
+ int dev_num);
};
/**
* struct sdw_bus - SoundWire bus
* @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
* @md: Master device
- * @link_id: Link id number, can be 0 to N, unique for each Master
+ * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
+ * @link_id: Link id number, can be 0 to N, unique for each Controller
* @id: bus system-wide unique id
* @slaves: list of Slaves on this bus
* @assigned: Bitmap for Slave device numbers.
@@ -821,22 +903,31 @@ struct sdw_master_ops {
* is used to compute and program bus bandwidth, clock, frame shape,
* transport and port parameters
* @debugfs: Bus debugfs
+ * @domain: IRQ domain
* @defer_msg: Defer message
* @clk_stop_timeout: Clock stop timeout computed
* @bank_switch_timeout: Bank switch timeout computed
* @multi_link: Store bus property that indicates if multi links
* are supported. This flag is populated by drivers after reading
* appropriate firmware (ACPI/DT).
+ * @hw_sync_min_links: Number of links used by a stream above which
+ * hardware-based synchronization is required. This value is only
+ * meaningful if multi_link is set. If set to 1, hardware-based
+ * synchronization will be used even if a stream only uses a single
+ * SoundWire segment.
*/
struct sdw_bus {
struct device *dev;
struct sdw_master_device *md;
+ int controller_id;
unsigned int link_id;
int id;
struct list_head slaves;
DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
struct mutex bus_lock;
+ struct lock_class_key bus_lock_key;
struct mutex msg_lock;
+ struct lock_class_key msg_lock_key;
int (*compute_params)(struct sdw_bus *bus);
const struct sdw_master_ops *ops;
const struct sdw_master_port_ops *port_ops;
@@ -846,16 +937,21 @@ struct sdw_bus {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
struct sdw_defer defer_msg;
unsigned int clk_stop_timeout;
u32 bank_switch_timeout;
bool multi_link;
+ int hw_sync_min_links;
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
struct fwnode_handle *fwnode);
void sdw_bus_master_delete(struct sdw_bus *bus);
+void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay);
+
/**
* sdw_port_config: Master or Slave Port configuration
*
@@ -941,20 +1037,16 @@ struct sdw_stream_runtime {
struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name);
void sdw_release_stream(struct sdw_stream_runtime *stream);
+
+int sdw_compute_params(struct sdw_bus *bus);
+
int sdw_stream_add_master(struct sdw_bus *bus,
struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
- unsigned int num_ports,
- struct sdw_stream_runtime *stream);
-int sdw_stream_add_slave(struct sdw_slave *slave,
- struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream);
int sdw_stream_remove_master(struct sdw_bus *bus,
struct sdw_stream_runtime *stream);
-int sdw_stream_remove_slave(struct sdw_slave *slave,
- struct sdw_stream_runtime *stream);
int sdw_startup_stream(void *sdw_substream);
int sdw_prepare_stream(struct sdw_stream_runtime *stream);
int sdw_enable_stream(struct sdw_stream_runtime *stream);
@@ -965,11 +1057,111 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus);
int sdw_bus_clk_stop(struct sdw_bus *bus);
int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
-/* messaging and data APIs */
+int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
+void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
+
+#if IS_ENABLED(CONFIG_SOUNDWIRE)
+
+int sdw_stream_add_slave(struct sdw_slave *slave,
+ struct sdw_stream_config *stream_config,
+ const struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream);
+int sdw_stream_remove_slave(struct sdw_slave *slave,
+ struct sdw_stream_runtime *stream);
+/* messaging and data APIs */
int sdw_read(struct sdw_slave *slave, u32 addr);
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
+int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value);
+int sdw_read_no_pm(struct sdw_slave *slave, u32 addr);
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
-int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val);
+int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val);
+int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
+int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
+
+#else
+
+static inline int sdw_stream_add_slave(struct sdw_slave *slave,
+ struct sdw_stream_config *stream_config,
+ const struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_stream_remove_slave(struct sdw_slave *slave,
+ struct sdw_stream_runtime *stream)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+/* messaging and data APIs */
+static inline int sdw_read(struct sdw_slave *slave, u32 addr)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+#endif /* CONFIG_SOUNDWIRE */
#endif /* __SOUNDWIRE_H */
diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h
new file mode 100644
index 000000000000..28a4eb77717f
--- /dev/null
+++ b/include/linux/soundwire/sdw_amd.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2023-24 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef __SDW_AMD_H
+#define __SDW_AMD_H
+
+#include <linux/acpi.h>
+#include <linux/soundwire/sdw.h>
+
+/* AMD pm_runtime quirk definitions */
+
+/*
+ * Force the clock to stop(ClockStopMode0) when suspend callback
+ * is invoked.
+ */
+#define AMD_SDW_CLK_STOP_MODE 1
+
+/*
+ * Stop the bus when runtime suspend/system level suspend callback
+ * is invoked. If set, a complete bus reset and re-enumeration will
+ * be performed when the bus restarts. In-band wake interrupts are
+ * not supported in this mode.
+ */
+#define AMD_SDW_POWER_OFF_MODE 2
+#define ACP_SDW0 0
+#define ACP_SDW1 1
+#define AMD_SDW_MAX_MANAGER_COUNT 2
+
+struct acp_sdw_pdata {
+ u16 instance;
+ /* mutex to protect acp common register access */
+ struct mutex *acp_sdw_lock;
+};
+
+/**
+ * struct sdw_amd_dai_runtime: AMD sdw dai runtime data
+ *
+ * @name: SoundWire stream name
+ * @stream: stream runtime
+ * @bus: Bus handle
+ * @stream_type: Stream type
+ */
+struct sdw_amd_dai_runtime {
+ char *name;
+ struct sdw_stream_runtime *stream;
+ struct sdw_bus *bus;
+ enum sdw_stream_type stream_type;
+};
+
+/**
+ * struct amd_sdw_manager - amd manager driver context
+ * @bus: bus handle
+ * @dev: linux device
+ * @mmio: SoundWire registers mmio base
+ * @acp_mmio: acp registers mmio base
+ * @amd_sdw_irq_thread: SoundWire manager irq workqueue
+ * @amd_sdw_work: peripheral status work queue
+ * @acp_sdw_lock: mutex to protect acp share register access
+ * @status: peripheral devices status array
+ * @num_din_ports: number of input ports
+ * @num_dout_ports: number of output ports
+ * @cols_index: Column index in frame shape
+ * @rows_index: Rows index in frame shape
+ * @instance: SoundWire manager instance
+ * @quirks: SoundWire manager quirks
+ * @wake_en_mask: wake enable mask per SoundWire manager
+ * @clk_stopped: flag set to true when clock is stopped
+ * @power_mode_mask: flag interprets amd SoundWire manager power mode
+ * @dai_runtime_array: dai runtime array
+ */
+struct amd_sdw_manager {
+ struct sdw_bus bus;
+ struct device *dev;
+
+ void __iomem *mmio;
+ void __iomem *acp_mmio;
+
+ struct work_struct amd_sdw_irq_thread;
+ struct work_struct amd_sdw_work;
+ /* mutex to protect acp common register access */
+ struct mutex *acp_sdw_lock;
+
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
+
+ int num_din_ports;
+ int num_dout_ports;
+
+ int cols_index;
+ int rows_index;
+
+ u32 instance;
+ u32 quirks;
+ u32 wake_en_mask;
+ u32 power_mode_mask;
+ bool clk_stopped;
+
+ struct sdw_amd_dai_runtime **dai_runtime_array;
+};
+
+/**
+ * struct sdw_amd_acpi_info - Soundwire AMD information found in ACPI tables
+ * @handle: ACPI controller handle
+ * @count: maximum no of soundwire manager links supported on AMD platform.
+ * @link_mask: bit-wise mask listing links enabled by BIOS menu
+ */
+struct sdw_amd_acpi_info {
+ acpi_handle handle;
+ int count;
+ u32 link_mask;
+};
+
+/**
+ * struct sdw_amd_ctx - context allocated by the controller driver probe
+ *
+ * @count: link count
+ * @num_slaves: total number of devices exposed across all enabled links
+ * @link_mask: bit-wise mask listing SoundWire links reported by the
+ * Controller
+ * @ids: array of slave_id, representing Slaves exposed across all enabled
+ * links
+ * @pdev: platform device structure
+ */
+struct sdw_amd_ctx {
+ int count;
+ int num_slaves;
+ u32 link_mask;
+ struct sdw_extended_slave_id *ids;
+ struct platform_device *pdev[AMD_SDW_MAX_MANAGER_COUNT];
+};
+
+/**
+ * struct sdw_amd_res - Soundwire AMD global resource structure,
+ * typically populated by the DSP driver/Legacy driver
+ *
+ * @addr: acp pci device resource start address
+ * @reg_range: ACP register range
+ * @link_mask: bit-wise mask listing links selected by the DSP driver/
+ * legacy driver
+ * @count: link count
+ * @mmio_base: mmio base of SoundWire registers
+ * @handle: ACPI parent handle
+ * @parent: parent device
+ * @dev: device implementing hwparams and free callbacks
+ * @acp_lock: mutex protecting acp common registers access
+ */
+struct sdw_amd_res {
+ u32 addr;
+ u32 reg_range;
+ u32 link_mask;
+ int count;
+ void __iomem *mmio_base;
+ acpi_handle handle;
+ struct device *parent;
+ struct device *dev;
+ /* use to protect acp common registers access */
+ struct mutex *acp_lock;
+};
+
+int sdw_amd_probe(struct sdw_amd_res *res, struct sdw_amd_ctx **ctx);
+
+void sdw_amd_exit(struct sdw_amd_ctx *ctx);
+
+int sdw_amd_get_slave_info(struct sdw_amd_ctx *ctx);
+
+int amd_sdw_scan_controller(struct sdw_amd_acpi_info *info);
+#endif
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 120ffddc03d2..00bb22d96ae5 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -7,6 +7,175 @@
#include <linux/irqreturn.h>
#include <linux/soundwire/sdw.h>
+/*********************************************************************
+ * cAVS and ACE1.x definitions
+ *********************************************************************/
+
+#define SDW_SHIM_BASE 0x2C000
+#define SDW_ALH_BASE 0x2C800
+#define SDW_SHIM_BASE_ACE 0x38000
+#define SDW_ALH_BASE_ACE 0x24000
+#define SDW_LINK_BASE 0x30000
+#define SDW_LINK_SIZE 0x10000
+
+/* Intel SHIM Registers Definition */
+/* LCAP */
+#define SDW_SHIM_LCAP 0x0
+#define SDW_SHIM_LCAP_LCOUNT_MASK GENMASK(2, 0)
+
+/* LCTL */
+#define SDW_SHIM_LCTL 0x4
+
+#define SDW_SHIM_LCTL_SPA BIT(0)
+#define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0)
+#define SDW_SHIM_LCTL_CPA BIT(8)
+#define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8)
+
+/* SYNC */
+#define SDW_SHIM_SYNC 0xC
+
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0)
+#define SDW_SHIM_SYNC_SYNCCPU BIT(15)
+#define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16)
+#define SDW_SHIM_SYNC_CMDSYNC BIT(16)
+#define SDW_SHIM_SYNC_SYNCGO BIT(24)
+
+/* Control stream capabililities and channel mask */
+#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x))
+#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x))
+#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x))
+#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x))
+#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x))
+
+/* PCM Stream capabilities */
+#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x))
+
+#define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0)
+#define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4)
+#define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8)
+
+/* PCM Stream Channel Map */
+#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y)))
+
+/* PCM Stream Channel Count */
+#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y)))
+
+#define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0)
+#define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4)
+#define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8)
+#define SDW_SHIM_PCMSYCM_DIR BIT(15)
+
+/* IO control */
+#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x))
+
+#define SDW_SHIM_IOCTL_MIF BIT(0)
+#define SDW_SHIM_IOCTL_CO BIT(1)
+#define SDW_SHIM_IOCTL_COE BIT(2)
+#define SDW_SHIM_IOCTL_DO BIT(3)
+#define SDW_SHIM_IOCTL_DOE BIT(4)
+#define SDW_SHIM_IOCTL_BKE BIT(5)
+#define SDW_SHIM_IOCTL_WPDD BIT(6)
+#define SDW_SHIM_IOCTL_CIBD BIT(8)
+#define SDW_SHIM_IOCTL_DIBD BIT(9)
+
+/* Wake Enable*/
+#define SDW_SHIM_WAKEEN 0x190
+
+#define SDW_SHIM_WAKEEN_ENABLE BIT(0)
+
+/* Wake Status */
+#define SDW_SHIM_WAKESTS 0x192
+
+#define SDW_SHIM_WAKESTS_STATUS BIT(0)
+
+/* AC Timing control */
+#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x))
+
+#define SDW_SHIM_CTMCTL_DACTQE BIT(0)
+#define SDW_SHIM_CTMCTL_DODS BIT(1)
+#define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3)
+
+/* Intel ALH Register definitions */
+#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x)))
+#define SDW_ALH_NUM_STREAMS 64
+
+#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3
+#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
+#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
+
+/*********************************************************************
+ * ACE2.x definitions for SHIM registers - only accessible when the
+ * HDAudio extended link LCTL.SPA/CPA = 1.
+ *********************************************************************/
+/* x variable is link index */
+#define SDW_SHIM2_GENERIC_BASE(x) (0x00030000 + 0x8000 * (x))
+#define SDW_IP_BASE(x) (0x00030100 + 0x8000 * (x))
+#define SDW_SHIM2_VS_BASE(x) (0x00036000 + 0x8000 * (x))
+
+/* SHIM2 Generic Registers */
+/* Read-only capabilities */
+#define SDW_SHIM2_LECAP 0x00
+#define SDW_SHIM2_LECAP_HDS BIT(0) /* unset -> Host mode */
+#define SDW_SHIM2_LECAP_MLC GENMASK(3, 1) /* Number of Lanes */
+
+/* PCM Stream capabilities */
+#define SDW_SHIM2_PCMSCAP 0x10
+#define SDW_SHIM2_PCMSCAP_ISS GENMASK(3, 0) /* Input-only streams */
+#define SDW_SHIM2_PCMSCAP_OSS GENMASK(7, 4) /* Output-only streams */
+#define SDW_SHIM2_PCMSCAP_BSS GENMASK(12, 8) /* Bidirectional streams */
+
+/* Read-only PCM Stream Channel Count, y variable is stream */
+#define SDW_SHIM2_PCMSYCHC(y) (0x14 + (0x4 * (y)))
+#define SDW_SHIM2_PCMSYCHC_CS GENMASK(3, 0) /* Channels Supported */
+
+/* PCM Stream Channel Map */
+#define SDW_SHIM2_PCMSYCHM(y) (0x16 + (0x4 * (y)))
+#define SDW_SHIM2_PCMSYCHM_LCHAN GENMASK(3, 0) /* Lowest channel used by the FIFO port */
+#define SDW_SHIM2_PCMSYCHM_HCHAN GENMASK(7, 4) /* Lowest channel used by the FIFO port */
+#define SDW_SHIM2_PCMSYCHM_STRM GENMASK(13, 8) /* HDaudio stream tag */
+#define SDW_SHIM2_PCMSYCHM_DIR BIT(15) /* HDaudio stream direction */
+
+/* SHIM2 vendor-specific registers */
+#define SDW_SHIM2_INTEL_VS_LVSCTL 0x04
+#define SDW_SHIM2_INTEL_VS_LVSCTL_FCG BIT(26)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_MLCS GENMASK(29, 27)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_DCGD BIT(30)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_ICGD BIT(31)
+
+#define SDW_SHIM2_MLCS_XTAL_CLK 0x0
+#define SDW_SHIM2_MLCS_CARDINAL_CLK 0x1
+#define SDW_SHIM2_MLCS_AUDIO_PLL_CLK 0x2
+#define SDW_SHIM2_MLCS_MCLK_INPUT_CLK 0x3
+#define SDW_SHIM2_MLCS_WOV_RING_OSC_CLK 0x4
+
+#define SDW_SHIM2_INTEL_VS_WAKEEN 0x08
+#define SDW_SHIM2_INTEL_VS_WAKEEN_PWE BIT(0)
+
+#define SDW_SHIM2_INTEL_VS_WAKESTS 0x0A
+#define SDW_SHIM2_INTEL_VS_WAKEEN_PWS BIT(0)
+
+#define SDW_SHIM2_INTEL_VS_IOCTL 0x0C
+#define SDW_SHIM2_INTEL_VS_IOCTL_MIF BIT(0)
+#define SDW_SHIM2_INTEL_VS_IOCTL_CO BIT(1)
+#define SDW_SHIM2_INTEL_VS_IOCTL_COE BIT(2)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DO BIT(3)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DOE BIT(4)
+#define SDW_SHIM2_INTEL_VS_IOCTL_BKE BIT(5)
+#define SDW_SHIM2_INTEL_VS_IOCTL_WPDD BIT(6)
+#define SDW_SHIM2_INTEL_VS_IOCTL_ODC BIT(7)
+#define SDW_SHIM2_INTEL_VS_IOCTL_CIBD BIT(8)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DIBD BIT(9)
+#define SDW_SHIM2_INTEL_VS_IOCTL_HAMIFD BIT(10)
+
+#define SDW_SHIM2_INTEL_VS_ACTMCTL 0x0E
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE BIT(0)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODS BIT(1)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE BIT(2)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS GENMASK(4, 3)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE BIT(5)
+
/**
* struct sdw_intel_stream_params_data: configuration passed during
* the @params_stream callback, e.g. for interaction with DSP
@@ -40,6 +209,7 @@ struct sdw_intel_ops {
struct sdw_intel_stream_params_data *params_data);
int (*free_stream)(struct device *dev,
struct sdw_intel_stream_free_data *free_data);
+ int (*trigger)(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai);
};
/**
@@ -58,7 +228,7 @@ struct sdw_intel_acpi_info {
u32 link_mask;
};
-struct sdw_intel_link_res;
+struct sdw_intel_link_dev;
/* Intel clock-stop/pm_runtime quirk definitions */
@@ -94,10 +264,7 @@ struct sdw_intel_link_res;
*/
#define SDW_INTEL_CLK_STOP_BUS_RESET BIT(3)
-struct sdw_intel_slave_id {
- int link_id;
- struct sdw_slave_id id;
-};
+struct hdac_bus;
/**
* struct sdw_intel_ctx - context allocated by the controller
@@ -109,13 +276,15 @@ struct sdw_intel_slave_id {
* Controller
* @num_slaves: total number of devices exposed across all enabled links
* @handle: ACPI parent handle
- * @links: information for each link (controller-specific and kept
+ * @ldev: information for each link (controller-specific and kept
* opaque here)
* @ids: array of slave_id, representing Slaves exposed across all enabled
* links
* @link_list: list to handle interrupts across all links
* @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers.
* @shim_mask: flags to track initialization of SHIM shared registers
+ * @shim_base: sdw shim base.
+ * @alh_base: sdw alh base.
*/
struct sdw_intel_ctx {
int count;
@@ -123,17 +292,20 @@ struct sdw_intel_ctx {
u32 link_mask;
int num_slaves;
acpi_handle handle;
- struct sdw_intel_link_res *links;
- struct sdw_intel_slave_id *ids;
+ struct sdw_intel_link_dev **ldev;
+ struct sdw_extended_slave_id *ids;
struct list_head link_list;
struct mutex shim_lock; /* lock for access to shared SHIM registers */
u32 shim_mask;
+ u32 shim_base;
+ u32 alh_base;
};
/**
* struct sdw_intel_res - Soundwire Intel global resource structure,
* typically populated by the DSP driver
*
+ * @hw_ops: abstraction for platform ops
* @count: link count
* @mmio_base: mmio base of SoundWire registers
* @irq: interrupt number
@@ -146,8 +318,15 @@ struct sdw_intel_ctx {
* machine-specific quirks are handled in the DSP driver.
* @clock_stop_quirks: mask array of possible behaviors requested by the
* DSP driver. The quirks are common for all links for now.
+ * @shim_base: sdw shim base.
+ * @alh_base: sdw alh base.
+ * @ext: extended HDaudio link support
+ * @hbus: hdac_bus pointer, needed for power management
+ * @eml_lock: mutex protecting shared registers in the HDaudio multi-link
+ * space
*/
struct sdw_intel_res {
+ const struct sdw_intel_hw_ops *hw_ops;
int count;
void __iomem *mmio_base;
int irq;
@@ -157,6 +336,11 @@ struct sdw_intel_res {
struct device *dev;
u32 link_mask;
u32 clock_stop_quirks;
+ u32 shim_base;
+ u32 alh_base;
+ bool ext;
+ struct hdac_bus *hbus;
+ struct mutex *eml_lock;
};
/*
@@ -183,8 +367,72 @@ int sdw_intel_startup(struct sdw_intel_ctx *ctx);
void sdw_intel_exit(struct sdw_intel_ctx *ctx);
-void sdw_intel_enable_irq(void __iomem *mmio_base, bool enable);
-
irqreturn_t sdw_intel_thread(int irq, void *dev_id);
+#define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1)
+
+struct sdw_intel;
+
+/* struct intel_sdw_hw_ops - SoundWire ops for Intel platforms.
+ * @debugfs_init: initialize all debugfs capabilities
+ * @debugfs_exit: close and cleanup debugfs capabilities
+ * @register_dai: read all PDI information and register DAIs
+ * @check_clock_stop: throw error message if clock is not stopped.
+ * @start_bus: normal start
+ * @start_bus_after_reset: start after reset
+ * @start_bus_after_clock_stop: start after mode0 clock stop
+ * @stop_bus: stop all bus
+ * @link_power_up: power-up using chip-specific helpers
+ * @link_power_down: power-down with chip-specific helpers
+ * @shim_check_wake: check if a wake was received
+ * @shim_wake: enable/disable in-band wake management
+ * @pre_bank_switch: helper for bus management
+ * @post_bank_switch: helper for bus management
+ * @sync_arm: helper for multi-link synchronization
+ * @sync_go_unlocked: helper for multi-link synchronization -
+ * shim_lock is assumed to be locked at higher level
+ * @sync_go: helper for multi-link synchronization
+ * @sync_check_cmdsync_unlocked: helper for multi-link synchronization
+ * and bank switch - shim_lock is assumed to be locked at higher level
+ * @program_sdi: helper for codec command/control based on dev_num
+ */
+struct sdw_intel_hw_ops {
+ void (*debugfs_init)(struct sdw_intel *sdw);
+ void (*debugfs_exit)(struct sdw_intel *sdw);
+
+ int (*register_dai)(struct sdw_intel *sdw);
+
+ void (*check_clock_stop)(struct sdw_intel *sdw);
+ int (*start_bus)(struct sdw_intel *sdw);
+ int (*start_bus_after_reset)(struct sdw_intel *sdw);
+ int (*start_bus_after_clock_stop)(struct sdw_intel *sdw);
+ int (*stop_bus)(struct sdw_intel *sdw, bool clock_stop);
+
+ int (*link_power_up)(struct sdw_intel *sdw);
+ int (*link_power_down)(struct sdw_intel *sdw);
+
+ int (*shim_check_wake)(struct sdw_intel *sdw);
+ void (*shim_wake)(struct sdw_intel *sdw, bool wake_enable);
+
+ int (*pre_bank_switch)(struct sdw_intel *sdw);
+ int (*post_bank_switch)(struct sdw_intel *sdw);
+
+ void (*sync_arm)(struct sdw_intel *sdw);
+ int (*sync_go_unlocked)(struct sdw_intel *sdw);
+ int (*sync_go)(struct sdw_intel *sdw);
+ bool (*sync_check_cmdsync_unlocked)(struct sdw_intel *sdw);
+
+ void (*program_sdi)(struct sdw_intel *sdw, int dev_num);
+};
+
+extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
+extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
+
+/*
+ * IDA min selected to allow for 5 unconstrained devices per link,
+ * and 6 system-unique Device Numbers for wake-capable devices.
+ */
+
+#define SDW_INTEL_DEV_NUM_IDA_MIN 6
+
#endif
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
index 5d3c271af7d1..138bec908c40 100644
--- a/include/linux/soundwire/sdw_registers.h
+++ b/include/linux/soundwire/sdw_registers.h
@@ -5,13 +5,6 @@
#define __SDW_REGISTERS_H
/*
- * typically we define register and shifts but if one observes carefully,
- * the shift can be generated from MASKS using few bit primitaives like ffs
- * etc, so we use that and avoid defining shifts
- */
-#define SDW_REG_SHIFT(n) (ffs(n) - 1)
-
-/*
* SDW registers as defined by MIPI 1.2 Spec
*/
#define SDW_REGADDR GENMASK(14, 0)
@@ -48,6 +41,12 @@
#define SDW_DP0_INT_IMPDEF1 BIT(5)
#define SDW_DP0_INT_IMPDEF2 BIT(6)
#define SDW_DP0_INT_IMPDEF3 BIT(7)
+#define SDW_DP0_INTERRUPTS (SDW_DP0_INT_TEST_FAIL | \
+ SDW_DP0_INT_PORT_READY | \
+ SDW_DP0_INT_BRA_FAILURE | \
+ SDW_DP0_INT_IMPDEF1 | \
+ SDW_DP0_INT_IMPDEF2 | \
+ SDW_DP0_INT_IMPDEF3)
#define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2)
#define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4)
@@ -248,6 +247,11 @@
#define SDW_DPN_INT_IMPDEF1 BIT(5)
#define SDW_DPN_INT_IMPDEF2 BIT(6)
#define SDW_DPN_INT_IMPDEF3 BIT(7)
+#define SDW_DPN_INTERRUPTS (SDW_DPN_INT_TEST_FAIL | \
+ SDW_DPN_INT_PORT_READY | \
+ SDW_DPN_INT_IMPDEF1 | \
+ SDW_DPN_INT_IMPDEF2 | \
+ SDW_DPN_INT_IMPDEF3)
#define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0)
#define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2)
@@ -305,4 +309,36 @@
#define SDW_CASC_PORT_MASK_INTSTAT3 1
#define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2
+/*
+ * v1.2 device - SDCA address mapping
+ *
+ * Spec definition
+ * Bits Contents
+ * 31 0 (required by addressing range)
+ * 30:26 0b10000 (Control Prefix)
+ * 25 0 (Reserved)
+ * 24:22 Function Number [2:0]
+ * 21 Entity[6]
+ * 20:19 Control Selector[5:4]
+ * 18 0 (Reserved)
+ * 17:15 Control Number[5:3]
+ * 14 Next
+ * 13 MBQ
+ * 12:7 Entity[5:0]
+ * 6:3 Control Selector[3:0]
+ * 2:0 Control Number[2:0]
+ */
+
+#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \
+ (((fun) & 0x7) << 22) | \
+ (((ent) & 0x40) << 15) | \
+ (((ent) & 0x3f) << 7) | \
+ (((ctl) & 0x30) << 15) | \
+ (((ctl) & 0x0f) << 3) | \
+ (((ch) & 0x38) << 12) | \
+ ((ch) & 0x07))
+
+#define SDW_SDCA_MBQ_CTL(reg) ((reg) | BIT(13))
+#define SDW_SDCA_NEXT_CTL(reg) ((reg) | BIT(14))
+
#endif /* __SDW_REGISTERS_H */
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index 52eb66cd11bc..693320b4f5c2 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -4,9 +4,9 @@
#ifndef __SOUNDWIRE_TYPES_H
#define __SOUNDWIRE_TYPES_H
-extern struct bus_type sdw_bus_type;
-extern struct device_type sdw_slave_type;
-extern struct device_type sdw_master_type;
+extern const struct bus_type sdw_bus_type;
+extern const struct device_type sdw_slave_type;
+extern const struct device_type sdw_master_type;
static inline int is_sdw_slave(const struct device *dev)
{
@@ -21,7 +21,7 @@ static inline int is_sdw_slave(const struct device *dev)
int __sdw_register_driver(struct sdw_driver *drv, struct module *owner);
void sdw_unregister_driver(struct sdw_driver *drv);
-int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env);
+int sdw_slave_uevent(const struct device *dev, struct kobj_uevent_env *env);
/**
* module_sdw_driver() - Helper macro for registering a Soundwire driver
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
index 1a5eaef3b7f2..a04c1c34c344 100644
--- a/include/linux/spi/ads7846.h
+++ b/include/linux/spi/ads7846.h
@@ -1,17 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* linux/spi/ads7846.h */
-/* Touchscreen characteristics vary between boards and models. The
- * platform_data for the device's "struct device" holds this information.
- *
- * It's OK if the min/max values are zero.
- */
-enum ads7846_filter {
- ADS7846_FILTER_OK,
- ADS7846_FILTER_REPEAT,
- ADS7846_FILTER_IGNORE,
-};
-
struct ads7846_platform_data {
u16 model; /* 7843, 7845, 7846, 7873. */
u16 vref_delay_usecs; /* 0 for external vref; etc */
@@ -46,15 +35,9 @@ struct ads7846_platform_data {
u16 debounce_tol; /* tolerance used for filtering */
u16 debounce_rep; /* additional consecutive good readings
* required after the first two */
- int gpio_pendown; /* the GPIO used to decide the pendown
- * state if get_pendown_state == NULL */
int gpio_pendown_debounce; /* platform specific debounce time for
* the gpio_pendown */
int (*get_pendown_state)(void);
- int (*filter_init) (const struct ads7846_platform_data *pdata,
- void **filter_data);
- int (*filter) (void *filter_data, int data_idx, int *val);
- void (*filter_cleanup)(void *filter_data);
void (*wait_for_sync)(void);
bool wakeup;
unsigned long irq_flags;
diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h
index 2d42641499a6..3b74c3750caf 100644
--- a/include/linux/spi/altera.h
+++ b/include/linux/spi/altera.h
@@ -5,13 +5,16 @@
#ifndef __LINUX_SPI_ALTERA_H
#define __LINUX_SPI_ALTERA_H
+#include <linux/interrupt.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
+#define ALTERA_SPI_MAX_CS 32
+
/**
* struct altera_spi_platform_data - Platform data of the Altera SPI driver
- * @mode_bits: Mode bits of SPI master.
+ * @mode_bits: Mode bits of SPI host.
* @num_chipselect: Number of chipselects.
* @bits_per_word_mask: bitmask of supported bits_per_word for transfers.
* @num_devices: Number of devices that shall be added when the driver
@@ -26,4 +29,22 @@ struct altera_spi_platform_data {
struct spi_board_info *devices;
};
+struct altera_spi {
+ int irq;
+ int len;
+ int count;
+ int bytes_per_word;
+ u32 imr;
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+
+ struct regmap *regmap;
+ u32 regoff;
+ struct device *dev;
+};
+
+extern irqreturn_t altera_spi_irq(int irq, void *dev);
+extern void altera_spi_init_host(struct spi_controller *host);
#endif /* __LINUX_SPI_ALTERA_H */
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
deleted file mode 100644
index d278576ab692..000000000000
--- a/include/linux/spi/at86rf230.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AT86RF230/RF231 driver
- *
- * Copyright (C) 2009-2012 Siemens AG
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
- */
-#ifndef AT86RF230_H
-#define AT86RF230_H
-
-struct at86rf230_platform_data {
- int rstn;
- int slp_tr;
- int dig2;
- u8 xtal_trim;
-};
-
-#endif
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
deleted file mode 100644
index 449bacf10700..000000000000
--- a/include/linux/spi/cc2520.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Header file for cc2520 radio driver
- *
- * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in>
- * Md.Jamal Mohiuddin <mjmohiuddin@cdac.in>
- * P Sowjanya <sowjanyap@cdac.in>
- */
-
-#ifndef __CC2520_H
-#define __CC2520_H
-
-struct cc2520_platform_data {
- int fifo;
- int fifop;
- int cca;
- int sfd;
- int reset;
- int vreg;
-};
-
-#endif
diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h
index 0b857616919c..fc6c1515dc54 100644
--- a/include/linux/spi/corgi_lcd.h
+++ b/include/linux/spi/corgi_lcd.h
@@ -15,4 +15,6 @@ struct corgi_lcd_platform_data {
void (*kick_battery)(void);
};
+void corgi_lcd_limit_intensity(int limit);
+
#endif /* __LINUX_SPI_CORGI_LCD_H */
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h
index aceccf9c71fb..1cca3dd5a748 100644
--- a/include/linux/spi/eeprom.h
+++ b/include/linux/spi/eeprom.h
@@ -14,7 +14,7 @@
struct spi_eeprom {
u32 byte_len;
char name[10];
- u16 page_size; /* for writes */
+ u32 page_size; /* for writes */
u16 flags;
#define EE_ADDR1 0x0001 /* 8 bit addrs */
#define EE_ADDR2 0x0002 /* 16 bit addrs */
diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h
deleted file mode 100644
index 6d19b09139d0..000000000000
--- a/include/linux/spi/ifx_modem.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_IFX_MODEM_H
-#define LINUX_IFX_MODEM_H
-
-struct ifx_modem_platform_data {
- unsigned short tx_pwr; /* modem power threshold */
- unsigned char modem_type; /* Modem type */
- unsigned long max_hz; /* max SPI frequency */
- unsigned short use_dma:1; /* spi protocol driver supplies
- dma-able addrs */
-};
-#define IFX_MODEM_6160 1
-#define IFX_MODEM_6260 2
-
-#endif
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
deleted file mode 100644
index f237b2d062e9..000000000000
--- a/include/linux/spi/lms283gf05.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD
- *
- * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
-*/
-
-#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
-#define _INCLUDE_LINUX_SPI_LMS283GF05_H_
-
-struct lms283gf05_pdata {
- unsigned long reset_gpio;
- bool reset_inverted;
-};
-
-#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */
diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h
index 433c20e2f46e..e392c53758bc 100644
--- a/include/linux/spi/max7301.h
+++ b/include/linux/spi/max7301.h
@@ -2,7 +2,7 @@
#ifndef LINUX_SPI_MAX7301_H
#define LINUX_SPI_MAX7301_H
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/*
* Some registers must be read back to modify.
@@ -31,6 +31,6 @@ struct max7301_platform_data {
u32 input_pullup_active;
};
-extern int __max730x_remove(struct device *dev);
+extern void __max730x_remove(struct device *dev);
extern int __max730x_probe(struct max7301 *ts);
#endif
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h
index 778ae8eb1f3e..9ad9a06e488d 100644
--- a/include/linux/spi/mmc_spi.h
+++ b/include/linux/spi/mmc_spi.h
@@ -35,16 +35,7 @@ struct mmc_spi_platform_data {
void (*setpower)(struct device *, unsigned int maskval);
};
-#ifdef CONFIG_OF
extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi);
extern void mmc_spi_put_pdata(struct spi_device *spi);
-#else
-static inline struct mmc_spi_platform_data *
-mmc_spi_get_pdata(struct spi_device *spi)
-{
- return spi->dev.platform_data;
-}
-static inline void mmc_spi_put_pdata(struct spi_device *spi) {}
-#endif /* CONFIG_OF */
#endif /* __LINUX_SPI_MMC_SPI_H */
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 31f00c7f4f59..ca2cd4e30ead 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -2,25 +2,28 @@
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*/
-#ifndef __linux_pxa2xx_spi_h
-#define __linux_pxa2xx_spi_h
+#ifndef __LINUX_SPI_PXA2XX_SPI_H
+#define __LINUX_SPI_PXA2XX_SPI_H
-#include <linux/pxa2xx_ssp.h>
+#include <linux/dmaengine.h>
+#include <linux/types.h>
-#define PXA2XX_CS_ASSERT (0x01)
-#define PXA2XX_CS_DEASSERT (0x02)
+#include <linux/pxa2xx_ssp.h>
struct dma_chan;
-/* device.platform_data for SSP controller devices */
+/*
+ * The platform data for SSP controller devices
+ * (resides in device.platform_data).
+ */
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
u8 dma_burst_size;
- bool is_slave;
+ bool is_target;
/* DMA engine specific config */
- bool (*dma_filter)(struct dma_chan *chan, void *param);
+ dma_filter_fn dma_filter;
void *tx_param;
void *rx_param;
@@ -28,8 +31,11 @@ struct pxa2xx_spi_controller {
struct ssp_device ssp;
};
-/* spi_board_info.controller_data for SPI slave devices,
- * copied to spi_device.platform_data ... mostly for dma tuning
+/*
+ * The controller specific data for SPI target devices
+ * (resides in spi_board_info.controller_data),
+ * copied to spi_device.platform_data ... mostly for
+ * DMA tuning.
*/
struct pxa2xx_spi_chip {
u8 tx_threshold;
@@ -37,9 +43,6 @@ struct pxa2xx_spi_chip {
u8 rx_threshold;
u8 dma_burst_size;
u32 timeout;
- u8 enable_loopback;
- int gpio_cs;
- void (*cs_control)(u32 command);
};
#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
@@ -49,4 +52,5 @@ struct pxa2xx_spi_chip {
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info);
#endif
-#endif
+
+#endif /* __LINUX_SPI_PXA2XX_SPI_H */
diff --git a/include/linux/spi/s3c24xx.h b/include/linux/spi/s3c24xx.h
deleted file mode 100644
index c91d10b82f08..000000000000
--- a/include/linux/spi/s3c24xx.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - SPI Controller platform_device info
-*/
-
-#ifndef __LINUX_SPI_S3C24XX_H
-#define __LINUX_SPI_S3C24XX_H __FILE__
-
-struct s3c2410_spi_info {
- int pin_cs; /* simple gpio cs */
- unsigned int num_cs; /* total chipselects */
- int bus_num; /* bus number to use. */
-
- unsigned int use_fiq:1; /* use fiq */
-
- void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable);
- void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
-};
-
-extern int s3c24xx_set_fiq(unsigned int irq, bool on);
-
-#endif /* __LINUX_SPI_S3C24XX_H */
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index dc2a0cbd210d..f950d280461b 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -3,8 +3,8 @@
#define __SPI_SH_MSIOF_H__
enum {
- MSIOF_SPI_MASTER,
- MSIOF_SPI_SLAVE,
+ MSIOF_SPI_HOST,
+ MSIOF_SPI_TARGET,
};
struct sh_msiof_spi_info {
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 159463cc659c..f866d5c8ed32 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -89,6 +89,7 @@ enum spi_mem_data_dir {
* @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not
* @data.buswidth: number of IO lanes used to send/receive the data
* @data.dtr: whether the data should be sent in DTR mode or not
+ * @data.ecc: whether error correction is required or not
* @data.dir: direction of the transfer
* @data.nbytes: number of data bytes to send/receive. Can be zero if the
* operation does not involve transferring data
@@ -100,6 +101,7 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
u16 opcode;
} cmd;
@@ -107,6 +109,7 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
u64 val;
} addr;
@@ -114,11 +117,14 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
} dummy;
struct {
u8 buswidth;
u8 dtr : 1;
+ u8 ecc : 1;
+ u8 __pad : 6;
enum spi_mem_data_dir dir;
unsigned int nbytes;
union {
@@ -223,10 +229,12 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
/**
* struct spi_controller_mem_ops - SPI memory operations
* @adjust_op_size: shrink the data xfer of an operation to match controller's
- * limitations (can be alignment of max RX/TX size
+ * limitations (can be alignment or max RX/TX size
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * not all driver provides supports_op(), so it can return -EOPNOTSUPP
+ * if the op is not supported by the driver/controller
* @get_name: get a custom name for the SPI mem device from the controller.
* This might be needed if the controller driver has been ported
* to use the SPI mem layer and a custom name is used to keep
@@ -250,6 +258,9 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* the currently mapped area), and the caller of
* spi_mem_dirmap_write() is responsible for calling it again in
* this case.
+ * @poll_status: poll memory device status until (status & mask) == match or
+ * when the timeout has expired. It fills the data buffer with
+ * the last status value.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
@@ -274,9 +285,28 @@ struct spi_controller_mem_ops {
u64 offs, size_t len, void *buf);
ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf);
+ int (*poll_status)(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms);
};
/**
+ * struct spi_controller_mem_caps - SPI memory controller capabilities
+ * @dtr: Supports DTR operations
+ * @ecc: Supports operations with error correction
+ */
+struct spi_controller_mem_caps {
+ bool dtr;
+ bool ecc;
+};
+
+#define spi_mem_controller_is_capable(ctlr, cap) \
+ ((ctlr)->mem_caps && (ctlr)->mem_caps->cap)
+
+/**
* struct spi_mem_driver - SPI memory driver
* @spidrv: inherit from a SPI driver
* @probe: probe a SPI memory. Usually where detection/initialization takes
@@ -310,7 +340,6 @@ void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
bool spi_mem_default_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
-
#else
static inline int
spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
@@ -333,7 +362,6 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
{
return false;
}
-
#endif /* CONFIG_SPI_MEM */
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
@@ -360,6 +388,13 @@ devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
void devm_spi_mem_dirmap_destroy(struct device *dev,
struct spi_mem_dirmap_desc *desc);
+int spi_mem_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_delay_us,
+ u16 timeout_ms);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 99380c0825db..c459809efee4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -6,30 +6,42 @@
#ifndef __LINUX_SPI_H
#define __LINUX_SPI_H
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/kthread.h>
-#include <linux/completion.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
#include <linux/scatterlist.h>
-#include <linux/gpio/consumer.h>
-#include <linux/ptp_clock_kernel.h>
+#include <linux/slab.h>
+#include <linux/u64_stats_sync.h>
+
+#include <uapi/linux/spi/spi.h>
+
+/* Max no. of CS supported per spi device */
+#define SPI_CS_CNT_MAX 16
struct dma_chan;
-struct property_entry;
+struct software_node;
+struct ptp_system_timestamp;
struct spi_controller;
struct spi_transfer;
struct spi_controller_mem_ops;
+struct spi_controller_mem_caps;
+struct spi_message;
/*
* INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
* and SPI infrastructure.
*/
-extern struct bus_type spi_bus_type;
+extern const struct bus_type spi_bus_type;
/**
* struct spi_statistics - statistics for spi transfers
- * @lock: lock protecting this structure
+ * @syncp: seqcount to protect members in this struct for per-cpu update
+ * on 32-bit systems
*
* @messages: number of spi-messages handled
* @transfers: number of spi_transfers handled
@@ -47,48 +59,55 @@ extern struct bus_type spi_bus_type;
* @bytes_rx: number of bytes received from device
*
* @transfer_bytes_histo:
- * transfer bytes histogramm
+ * transfer bytes histogram
*
* @transfers_split_maxsize:
* number of transfers that have been split because of
* maxsize limit
*/
struct spi_statistics {
- spinlock_t lock; /* lock for the whole structure */
+ struct u64_stats_sync syncp;
- unsigned long messages;
- unsigned long transfers;
- unsigned long errors;
- unsigned long timedout;
+ u64_stats_t messages;
+ u64_stats_t transfers;
+ u64_stats_t errors;
+ u64_stats_t timedout;
- unsigned long spi_sync;
- unsigned long spi_sync_immediate;
- unsigned long spi_async;
+ u64_stats_t spi_sync;
+ u64_stats_t spi_sync_immediate;
+ u64_stats_t spi_async;
- unsigned long long bytes;
- unsigned long long bytes_rx;
- unsigned long long bytes_tx;
+ u64_stats_t bytes;
+ u64_stats_t bytes_rx;
+ u64_stats_t bytes_tx;
#define SPI_STATISTICS_HISTO_SIZE 17
- unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+ u64_stats_t transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
- unsigned long transfers_split_maxsize;
+ u64_stats_t transfers_split_maxsize;
};
-void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
- struct spi_transfer *xfer,
- struct spi_controller *ctlr);
-
-#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
- do { \
- unsigned long flags; \
- spin_lock_irqsave(&(stats)->lock, flags); \
- (stats)->field += count; \
- spin_unlock_irqrestore(&(stats)->lock, flags); \
+#define SPI_STATISTICS_ADD_TO_FIELD(pcpu_stats, field, count) \
+ do { \
+ struct spi_statistics *__lstats; \
+ get_cpu(); \
+ __lstats = this_cpu_ptr(pcpu_stats); \
+ u64_stats_update_begin(&__lstats->syncp); \
+ u64_stats_add(&__lstats->field, count); \
+ u64_stats_update_end(&__lstats->syncp); \
+ put_cpu(); \
} while (0)
-#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
- SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
+#define SPI_STATISTICS_INCREMENT_FIELD(pcpu_stats, field) \
+ do { \
+ struct spi_statistics *__lstats; \
+ get_cpu(); \
+ __lstats = this_cpu_ptr(pcpu_stats); \
+ u64_stats_update_begin(&__lstats->syncp); \
+ u64_stats_inc(&__lstats->field); \
+ u64_stats_update_end(&__lstats->syncp); \
+ put_cpu(); \
+ } while (0)
/**
* struct spi_delay - SPI delay information
@@ -105,16 +124,18 @@ struct spi_delay {
extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer);
extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
+extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
+ struct spi_transfer *xfer);
/**
* struct spi_device - Controller side proxy for an SPI slave device
* @dev: Driver model representation of the device.
* @controller: SPI controller used with the device.
- * @master: Copy of controller, for backwards compatibility.
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Chipselect, distinguishing chips handled by @controller.
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
* @mode: The spi mode defines how data is clocked out and in.
* This may be changed by the device's driver.
* The "active low" default for chipselect mode can be overridden
@@ -137,15 +158,19 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
* for driver coldplugging, and in uevents used for hotplugging
* @driver_override: If the name of a driver is written to this attribute, then
* the device will bind to the named driver and only the named driver.
- * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when
- * not using a GPIO line) use cs_gpiod in new drivers by opting in on
- * the spi_master.
- * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
- * not using a GPIO line)
+ * Do not set directly, because core frees it; use driver_set_override() to
+ * set or clear it.
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
* words of a transfer
- *
- * @statistics: statistics for the spi_device
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ * deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ * two delays will be added up.
+ * @pcpu_statistics: statistics for the spi_device
+ * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
*
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
@@ -159,46 +184,57 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
struct spi_device {
struct device dev;
struct spi_controller *controller;
- struct spi_controller *master; /* compatibility layer */
u32 max_speed_hz;
- u8 chip_select;
+ u8 chip_select[SPI_CS_CNT_MAX];
u8 bits_per_word;
bool rt;
+#define SPI_NO_TX BIT(31) /* No transmit wire */
+#define SPI_NO_RX BIT(30) /* No receive wire */
+ /*
+ * TPM specification defines flow control over SPI. Client device
+ * can insert a wait state on MISO when address is transmitted by
+ * controller on MOSI. Detecting the wait state in software is only
+ * possible for full duplex controllers. For controllers that support
+ * only half-duplex, the wait state detection needs to be implemented
+ * in hardware. TPM devices would set this flag when hardware flow
+ * control is expected from SPI controller.
+ */
+#define SPI_TPM_HW_FLOW BIT(29) /* TPM HW flow control */
+ /*
+ * All bits defined above should be covered by SPI_MODE_KERNEL_MASK.
+ * The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart,
+ * which is defined in 'include/uapi/linux/spi/spi.h'.
+ * The bits defined here are from bit 31 downwards, while in
+ * SPI_MODE_USER_MASK are from 0 upwards.
+ * These bits must not overlap. A static assert check should make sure of that.
+ * If adding extra bits, make sure to decrease the bit index below as well.
+ */
+#define SPI_MODE_KERNEL_MASK (~(BIT(29) - 1))
u32 mode;
-#define SPI_CPHA 0x01 /* clock phase */
-#define SPI_CPOL 0x02 /* clock polarity */
-#define SPI_MODE_0 (0|0) /* (original MicroWire) */
-#define SPI_MODE_1 (0|SPI_CPHA)
-#define SPI_MODE_2 (SPI_CPOL|0)
-#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
-#define SPI_CS_HIGH 0x04 /* chipselect active high? */
-#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
-#define SPI_3WIRE 0x10 /* SI/SO signals shared */
-#define SPI_LOOP 0x20 /* loopback mode */
-#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
-#define SPI_READY 0x80 /* slave pulls low to pause */
-#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
-#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
-#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
-#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
-#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
-#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */
-#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */
-#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */
int irq;
void *controller_state;
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- int cs_gpio; /* LEGACY: chip select gpio */
- struct gpio_desc *cs_gpiod; /* chip select gpio desc */
- struct spi_delay word_delay; /* inter-word delay */
+ struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
+ struct spi_delay word_delay; /* Inter-word delay */
+ /* CS delays */
+ struct spi_delay cs_setup;
+ struct spi_delay cs_hold;
+ struct spi_delay cs_inactive;
- /* the statistics */
- struct spi_statistics statistics;
+ /* The statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
+
+ /* Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array.When the controller is capable to handle
+ * multiple chip selects & memories are connected in parallel
+ * then more than one bit need to be set in cs_index_mask.
+ */
+ u32 cs_index_mask : SPI_CS_CNT_MAX;
/*
- * likely need more hooks for more protocol options affecting how
+ * Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
* - memory packing (12 bit samples into low bits, others zeroed)
* - priority
@@ -207,12 +243,16 @@ struct spi_device {
*/
};
-static inline struct spi_device *to_spi_device(struct device *dev)
+/* Make sure that SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK don't overlap */
+static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
+ "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
+
+static inline struct spi_device *to_spi_device(const struct device *dev)
{
return dev ? container_of(dev, struct spi_device, dev) : NULL;
}
-/* most drivers won't need to care about device refcounting */
+/* Most drivers won't need to care about device refcounting */
static inline struct spi_device *spi_dev_get(struct spi_device *spi)
{
return (spi && get_device(&spi->dev)) ? spi : NULL;
@@ -225,7 +265,7 @@ static inline void spi_dev_put(struct spi_device *spi)
}
/* ctldata is for the bus_controller driver's runtime state */
-static inline void *spi_get_ctldata(struct spi_device *spi)
+static inline void *spi_get_ctldata(const struct spi_device *spi)
{
return spi->controller_state;
}
@@ -235,29 +275,57 @@ static inline void spi_set_ctldata(struct spi_device *spi, void *state)
spi->controller_state = state;
}
-/* device driver data */
+/* Device driver data */
static inline void spi_set_drvdata(struct spi_device *spi, void *data)
{
dev_set_drvdata(&spi->dev, data);
}
-static inline void *spi_get_drvdata(struct spi_device *spi)
+static inline void *spi_get_drvdata(const struct spi_device *spi)
{
return dev_get_drvdata(&spi->dev);
}
-struct spi_message;
-struct spi_transfer;
+static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx)
+{
+ return spi->chip_select[idx];
+}
+
+static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect)
+{
+ spi->chip_select[idx] = chipselect;
+}
+
+static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx)
+{
+ return spi->cs_gpiod[idx];
+}
+
+static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod)
+{
+ spi->cs_gpiod[idx] = csgpiod;
+}
+
+static inline bool spi_is_csgpiod(struct spi_device *spi)
+{
+ u8 idx;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if (spi_get_csgpiod(spi, idx))
+ return true;
+ }
+ return false;
+}
/**
* struct spi_driver - Host side "protocol" driver
* @id_table: List of SPI devices supported by this driver
- * @probe: Binds this driver to the spi device. Drivers can verify
+ * @probe: Binds this driver to the SPI device. Drivers can verify
* that the device is actually present, and may need to configure
* characteristics (such as bits_per_word) which weren't needed for
* the initial configuration done during system setup.
- * @remove: Unbinds this driver from the spi device
+ * @remove: Unbinds this driver from the SPI device
* @shutdown: Standard shutdown callback used during system state
* transitions such as powerdown/halt and kexec
* @driver: SPI device drivers should initialize the name and owner
@@ -278,7 +346,7 @@ struct spi_transfer;
struct spi_driver {
const struct spi_device_id *id_table;
int (*probe)(struct spi_device *spi);
- int (*remove)(struct spi_device *spi);
+ void (*remove)(struct spi_device *spi);
void (*shutdown)(struct spi_device *spi);
struct device_driver driver;
};
@@ -301,7 +369,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
-/* use a define to avoid include chaining to get THIS_MODULE */
+extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select);
+
+/* Use a define to avoid include chaining to get THIS_MODULE */
#define spi_register_driver(driver) \
__spi_register_driver(THIS_MODULE, driver)
@@ -339,11 +409,14 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
* @slave: indicates that this is an SPI slave controller
+ * @target: indicates that this is an SPI target controller
+ * @devm_allocated: whether the allocation of this struct is devres-managed
* @max_transfer_size: function that returns the max transfer size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @max_message_size: function that returns the max message size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @io_mutex: mutex for physical bus access
+ * @add_lock: mutex to avoid adding devices to the same chipselect
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -358,18 +431,28 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
+ * @dma_map_dev: device which can be used for DMA mapping
+ * @cur_rx_dma_dev: device which is currently used for RX DMA mapping
+ * @cur_tx_dma_dev: device which is currently used for TX DMA mapping
* @queued: whether this controller is providing an internal message queue
* @kworker: pointer to thread struct for message pump
* @pump_messages: work struct for scheduling work to the message pump
- * @queue_lock: spinlock to syncronise access to message queue
+ * @queue_lock: spinlock to synchronise access to message queue
* @queue: message queue
- * @idling: the device is entering idle state
* @cur_msg: the currently in-flight message
- * @cur_msg_prepared: spi_prepare_message was called for the currently
- * in-flight message
+ * @cur_msg_completion: a completion for the current in-flight message
+ * @cur_msg_incomplete: Flag used internally to opportunistically skip
+ * the @cur_msg_completion. This flag is used to check if the driver has
+ * already called spi_finalize_current_message().
+ * @cur_msg_need_completion: Flag used internally to opportunistically skip
+ * the @cur_msg_completion. This flag is used to signal the context that
+ * is running spi_finalize_current_message() that it needs to complete()
* @cur_msg_mapped: message has been mapped for DMA
- * @last_cs_enable: was enable true on the last call to set_cs.
+ * @fallback: fallback to PIO if DMA transfer return failure with
+ * SPI_TRANS_FAIL_NO_START.
* @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
+ * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip
+ * selected
* @xfer_completion: used by core transfer_one_message()
* @busy: message pump is busy
* @running: message pump is running
@@ -392,6 +475,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
*
* @set_cs: set the logic level of the chip select line. May be called
* from interrupt context.
+ * @optimize_message: optimize the message for reuse
+ * @unoptimize_message: release resources allocated by optimize_message
* @prepare_message: set up the controller to transfer a single message,
* for example doing DMA mapping. Called from threaded
* context.
@@ -401,40 +486,35 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer. Note: transfer_one and
- * transfer_one_message are mutually exclusive; when both
- * are set, the generic subsystem does not call your
- * transfer_one callback.
+ * can issue the next transfer. If the transfer fails, the
+ * driver must set the flag SPI_TRANS_FAIL_IO to
+ * spi_transfer->error first, before calling
+ * spi_finalize_current_transfer().
+ * Note: transfer_one and transfer_one_message are mutually
+ * exclusive; when both are set, the generic subsystem does
+ * not call your transfer_one callback.
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
* This field is optional and should only be implemented if the
* controller has native support for memory like operations.
+ * @mem_caps: controller capabilities for the handling of memory operations.
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
- * @cs_setup: delay to be introduced by the controller after CS is asserted
- * @cs_hold: delay to be introduced by the controller before CS is deasserted
- * @cs_inactive: delay to be introduced by the controller after CS is
- * deasserted. If @cs_change_delay is used from @spi_transfer, then the
- * two delays will be added up.
- * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
- * CS number. Any individual value may be -ENOENT for CS lines that
- * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
- * in new drivers.
- * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
+ * @target_abort: abort the ongoing transfer request on an SPI target controller
+ * @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
* are not GPIOs (driven by the SPI controller itself).
* @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
- * GPIO descriptors rather than using global GPIO numbers grabbed by the
- * driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
- * and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
+ * GPIO descriptors. This will fill in @cs_gpiods and SPI devices will have
+ * the cs_gpiod assigned if a GPIO line is found for the chipselect.
* @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
* fill in this field with the first unused native CS, to be used by SPI
* controller drivers that need to drive a native CS when using GPIO CS.
* @max_native_cs: When cs_gpiods is used, and this field is filled in,
* spi_register_controller() will validate all native CS (including the
* unused native CS) against this value.
- * @statistics: statistics for the spi_controller
+ * @pcpu_statistics: statistics for the spi_controller
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
@@ -449,8 +529,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* If the driver does not set this, the SPI core takes the snapshot as
* close to the driver hand-over as possible.
* @irq_flags: Interrupt enable state during PTP system timestamping
- * @fallback: fallback to pio if dma transfer return failure with
- * SPI_TRANS_FAIL_NO_START.
+ * @queue_empty: signal green light for opportunistically skipping the queue
+ * for spi_sync transfers.
+ * @must_async: disable all fast paths in the core
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -468,20 +549,22 @@ struct spi_controller {
struct list_head list;
- /* other than negative (== assign one dynamically), bus_num is fully
- * board-specific. usually that simplifies to being SOC-specific.
- * example: one SOC has three SPI controllers, numbered 0..2,
- * and one board's schematics might show it using SPI-2. software
+ /*
+ * Other than negative (== assign one dynamically), bus_num is fully
+ * board-specific. Usually that simplifies to being SoC-specific.
+ * example: one SoC has three SPI controllers, numbered 0..2,
+ * and one board's schematics might show it using SPI-2. Software
* would normally use bus_num=2 for that controller.
*/
s16 bus_num;
- /* chipselects will be integral to many controllers; some others
+ /*
+ * Chipselects will be integral to many controllers; some others
* might use board-specific GPIOs.
*/
u16 num_chipselect;
- /* some SPI controllers pose alignment requirements on DMAable
+ /* Some SPI controllers pose alignment requirements on DMAable
* buffers; let protocol drivers know about these requirements.
*/
u16 dma_alignment;
@@ -492,31 +575,43 @@ struct spi_controller {
/* spi_device.mode flags override flags for this controller */
u32 buswidth_override_bits;
- /* bitmask of supported bits_per_word for transfers */
+ /* Bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1)
- /* limits on transfer speed */
+ /* Limits on transfer speed */
u32 min_speed_hz;
u32 max_speed_hz;
- /* other constraints relevant to this driver */
+ /* Other constraints relevant to this driver */
u16 flags;
-#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */
-#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */
-#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */
-#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */
-#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */
+#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* Can't do full duplex */
+#define SPI_CONTROLLER_NO_RX BIT(1) /* Can't do buffer read */
+#define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */
+#define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
+#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
+#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+ /*
+ * The spi-controller has multi chip select capability and can
+ * assert/de-assert more than one chip select at once.
+ */
+#define SPI_CONTROLLER_MULTI_CS BIT(7)
-#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+ /* Flag indicating if the allocation of this struct is devres-managed */
+ bool devm_allocated;
- /* flag indicating this is an SPI slave controller */
- bool slave;
+ union {
+ /* Flag indicating this is an SPI slave controller */
+ bool slave;
+ /* Flag indicating this is an SPI target controller */
+ bool target;
+ };
/*
- * on some hardware transfer / message size may be constrained
- * the limit may depend on device transfer settings
+ * On some hardware transfer / message size may be constrained
+ * the limit may depend on device transfer settings.
*/
size_t (*max_transfer_size)(struct spi_device *spi);
size_t (*max_message_size)(struct spi_device *spi);
@@ -524,14 +619,18 @@ struct spi_controller {
/* I/O mutex */
struct mutex io_mutex;
- /* lock and mutex for SPI bus locking */
+ /* Used to avoid adding the same CS twice */
+ struct mutex add_lock;
+
+ /* Lock and mutex for SPI bus locking */
spinlock_t bus_lock_spinlock;
struct mutex bus_lock_mutex;
- /* flag indicating that the SPI bus is locked for exclusive use */
+ /* Flag indicating that the SPI bus is locked for exclusive use */
bool bus_lock_flag;
- /* Setup mode and clock, etc (spi driver may call many times).
+ /*
+ * Setup mode and clock, etc (SPI driver may call many times).
*
* IMPORTANT: this may be called when transfers to another
* device are active. DO NOT UPDATE SHARED REGISTERS in ways
@@ -547,21 +646,21 @@ struct spi_controller {
* to configure specific CS timing through spi_set_cs_timing() after
* spi_setup().
*/
- int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
- struct spi_delay *hold, struct spi_delay *inactive);
+ int (*set_cs_timing)(struct spi_device *spi);
- /* bidirectional bulk transfers
+ /*
+ * Bidirectional bulk transfers
*
* + The transfer() method may not sleep; its main role is
* just to add the message to the queue.
* + For now there's no remove-from-queue operation, or
* any other request management
- * + To a given spi_device, message queueing is pure fifo
+ * + To a given spi_device, message queueing is pure FIFO
*
* + The controller's main job is to process its message queue,
* selecting a chip (for masters), then transferring data
* + If there are multiple spi_device children, the i/o queue
- * arbitration algorithm is unspecified (round robin, fifo,
+ * arbitration algorithm is unspecified (round robin, FIFO,
* priority, reservations, preemption, etc)
*
* + Chipselect stays active during the entire message
@@ -572,7 +671,7 @@ struct spi_controller {
int (*transfer)(struct spi_device *spi,
struct spi_message *mesg);
- /* called on release() to free memory provided by spi_controller */
+ /* Called on release() to free memory provided by spi_controller */
void (*cleanup)(struct spi_device *spi);
/*
@@ -585,6 +684,9 @@ struct spi_controller {
bool (*can_dma)(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer);
+ struct device *dma_map_dev;
+ struct device *cur_rx_dma_dev;
+ struct device *cur_tx_dma_dev;
/*
* These hooks are for drivers that want to use the generic
@@ -598,19 +700,23 @@ struct spi_controller {
spinlock_t queue_lock;
struct list_head queue;
struct spi_message *cur_msg;
- bool idling;
+ struct completion cur_msg_completion;
+ bool cur_msg_incomplete;
+ bool cur_msg_need_completion;
bool busy;
bool running;
bool rt;
bool auto_runtime_pm;
- bool cur_msg_prepared;
bool cur_msg_mapped;
- bool last_cs_enable;
- bool last_cs_mode_high;
bool fallback;
+ bool last_cs_mode_high;
+ s8 last_cs[SPI_CS_CNT_MAX];
+ u32 last_cs_index_mask : SPI_CS_CNT_MAX;
struct completion xfer_completion;
size_t max_dma_len;
+ int (*optimize_message)(struct spi_message *msg);
+ int (*unoptimize_message)(struct spi_message *msg);
int (*prepare_transfer_hardware)(struct spi_controller *ctlr);
int (*transfer_one_message)(struct spi_controller *ctlr,
struct spi_message *mesg);
@@ -619,11 +725,14 @@ struct spi_controller {
struct spi_message *message);
int (*unprepare_message)(struct spi_controller *ctlr,
struct spi_message *message);
- int (*slave_abort)(struct spi_controller *ctlr);
+ union {
+ int (*slave_abort)(struct spi_controller *ctlr);
+ int (*target_abort)(struct spi_controller *ctlr);
+ };
/*
* These hooks are for drivers that use a generic implementation
- * of transfer_one_message() provied by the core.
+ * of transfer_one_message() provided by the core.
*/
void (*set_cs)(struct spi_device *spi, bool enable);
int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi,
@@ -633,27 +742,22 @@ struct spi_controller {
/* Optimized handlers for SPI memory-like operations. */
const struct spi_controller_mem_ops *mem_ops;
+ const struct spi_controller_mem_caps *mem_caps;
- /* CS delays */
- struct spi_delay cs_setup;
- struct spi_delay cs_hold;
- struct spi_delay cs_inactive;
-
- /* gpio chip select */
- int *cs_gpios;
+ /* GPIO chip select */
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
- u8 unused_native_cs;
- u8 max_native_cs;
+ s8 unused_native_cs;
+ s8 max_native_cs;
- /* statistics */
- struct spi_statistics statistics;
+ /* Statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
/* DMA channels for use with core dmaengine helpers */
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
- /* dummy data for full duplex devices */
+ /* Dummy data for full duplex devices */
void *dummy_rx;
void *dummy_tx;
@@ -667,6 +771,10 @@ struct spi_controller {
/* Interrupt enable state during PTP system timestamping */
unsigned long irq_flags;
+
+ /* Flag for enabling opportunistic skipping of the queue in spi_sync */
+ bool queue_empty;
+ bool must_async;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
@@ -698,6 +806,11 @@ static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
}
+static inline bool spi_controller_is_target(struct spi_controller *ctlr)
+{
+ return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->target;
+}
+
/* PM calls that need to be issued by the driver */
extern int spi_controller_suspend(struct spi_controller *ctlr);
extern int spi_controller_resume(struct spi_controller *ctlr);
@@ -715,7 +828,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
size_t progress, bool irqs_off);
-/* the spi driver core manages memory for the spi_controller classdev */
+/* The SPI driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
unsigned int size, bool slave);
@@ -734,12 +847,67 @@ static inline struct spi_controller *spi_alloc_slave(struct device *host,
return __spi_alloc_controller(host, size, true);
}
+static inline struct spi_controller *spi_alloc_host(struct device *dev,
+ unsigned int size)
+{
+ return __spi_alloc_controller(dev, size, false);
+}
+
+static inline struct spi_controller *spi_alloc_target(struct device *dev,
+ unsigned int size)
+{
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __spi_alloc_controller(dev, size, true);
+}
+
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool slave);
+
+static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
+ unsigned int size)
+{
+ return __devm_spi_alloc_controller(dev, size, false);
+}
+
+static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
+ unsigned int size)
+{
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __devm_spi_alloc_controller(dev, size, true);
+}
+
+static inline struct spi_controller *devm_spi_alloc_host(struct device *dev,
+ unsigned int size)
+{
+ return __devm_spi_alloc_controller(dev, size, false);
+}
+
+static inline struct spi_controller *devm_spi_alloc_target(struct device *dev,
+ unsigned int size)
+{
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __devm_spi_alloc_controller(dev, size, true);
+}
+
extern int spi_register_controller(struct spi_controller *ctlr);
extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
extern void spi_unregister_controller(struct spi_controller *ctlr);
-extern struct spi_controller *spi_busnum_to_master(u16 busnum);
+#if IS_ENABLED(CONFIG_ACPI)
+extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
+extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index);
+int acpi_spi_count_resources(struct acpi_device *adev);
+#endif
/*
* SPI resource management while processing a SPI message
@@ -750,29 +918,20 @@ typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
void *res);
/**
- * struct spi_res - spi resource management structure
+ * struct spi_res - SPI resource management structure
* @entry: list entry
* @release: release code called prior to freeing this resource
* @data: extra data allocated for the specific use-case
*
- * this is based on ideas from devres, but focused on life-cycle
- * management during spi_message processing
+ * This is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing.
*/
struct spi_res {
struct list_head entry;
spi_res_release_t release;
- unsigned long long data[]; /* guarantee ull alignment */
+ unsigned long long data[]; /* Guarantee ull alignment */
};
-extern void *spi_res_alloc(struct spi_device *spi,
- spi_res_release_t release,
- size_t size, gfp_t gfp);
-extern void spi_res_add(struct spi_message *message, void *res);
-extern void spi_res_free(void *res);
-
-extern void spi_res_release(struct spi_controller *ctlr,
- struct spi_message *message);
-
/*---------------------------------------------------------------------------*/
/*
@@ -783,7 +942,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
*
* The spi_messages themselves consist of a series of read+write transfer
* segments. Those segments always read the same number of bits as they
- * write; but one or the other is easily ignored by passing a null buffer
+ * write; but one or the other is easily ignored by passing a NULL buffer
* pointer. (This is unlike most types of I/O API, because SPI hardware
* is full duplex.)
*
@@ -794,8 +953,8 @@ extern void spi_res_release(struct spi_controller *ctlr,
/**
* struct spi_transfer - a read/write buffer pair
- * @tx_buf: data to be written (dma-safe memory), or NULL
- * @rx_buf: data to be read (dma-safe memory), or NULL
+ * @tx_buf: data to be written (DMA-safe memory), or NULL
+ * @rx_buf: data to be read (DMA-safe memory), or NULL
* @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
* @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
* @tx_nbits: number of bits used for writing. If 0 the default
@@ -807,19 +966,18 @@ extern void spi_res_release(struct spi_controller *ctlr,
* transfer. If 0 the default (from @spi_device) is used.
* @bits_per_word: select a bits_per_word other than the device default
* for this transfer. If 0 the default (from @spi_device) is used.
+ * @dummy_data: indicates transfer is dummy bytes transfer.
+ * @cs_off: performs the transfer with chipselect off.
* @cs_change: affects chipselect after this transfer completes
* @cs_change_delay: delay between cs deassert and assert when
* @cs_change is set and @spi_transfer is not the last in @spi_message
* @delay: delay to be introduced after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
- * @delay_usecs: microseconds to delay after this transfer before
- * (optionally) changing the chipselect status, then starting
- * the next transfer or completing this @spi_message.
* @word_delay: inter word delay to be introduced after each word size
* (set by bits_per_word) transmission.
* @effective_speed_hz: the effective SCK-speed that was used to
- * transfer this transfer. Set to 0 if the spi bus driver does
+ * transfer this transfer. Set to 0 if the SPI bus driver does
* not support it.
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
@@ -848,16 +1006,16 @@ extern void spi_res_release(struct spi_controller *ctlr,
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
* @timestamped: true if the transfer has been timestamped
- * @error: Error status logged by spi controller driver.
+ * @error: Error status logged by SPI controller driver.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
* In some cases, they may also want to provide DMA addresses for
* the data being transferred; that may reduce overhead, when the
- * underlying driver uses dma.
+ * underlying driver uses DMA.
*
- * If the transmit buffer is null, zeroes will be shifted out
- * while filling @rx_buf. If the receive buffer is null, the data
+ * If the transmit buffer is NULL, zeroes will be shifted out
+ * while filling @rx_buf. If the receive buffer is NULL, the data
* shifted in will be discarded. Only "len" bytes shift out (or in).
* It's an error to try to shift out a partial word. (For example, by
* shifting out three bytes with word size of sixteen or twenty bits;
@@ -891,7 +1049,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* Some devices need protocol transactions to be built from a series of
* spi_message submissions, where the content of one message is determined
* by the results of previous messages and where the whole transaction
- * ends when the chipselect goes intactive.
+ * ends when the chipselect goes inactive.
*
* When SPI can transfer in 1x,2x or 4x. It can get this transfer information
* from device through @tx_nbits and @rx_nbits. In Bi-direction, these
@@ -905,28 +1063,35 @@ extern void spi_res_release(struct spi_controller *ctlr,
* and its transfers, ignore them until its completion callback.
*/
struct spi_transfer {
- /* it's ok if tx_buf == rx_buf (right?)
- * for MicroWire, one buffer must be null
- * buffers must work with dma_*map_single() calls, unless
- * spi_message.is_dma_mapped reports a pre-existing mapping
+ /*
+ * It's okay if tx_buf == rx_buf (right?).
+ * For MicroWire, one buffer must be NULL.
+ * Buffers must work with dma_*map_single() calls, unless
+ * spi_message.is_dma_mapped reports a pre-existing mapping.
*/
const void *tx_buf;
void *rx_buf;
unsigned len;
+#define SPI_TRANS_FAIL_NO_START BIT(0)
+#define SPI_TRANS_FAIL_IO BIT(1)
+ u16 error;
+
dma_addr_t tx_dma;
dma_addr_t rx_dma;
struct sg_table tx_sg;
struct sg_table rx_sg;
+ unsigned dummy_data:1;
+ unsigned cs_off:1;
unsigned cs_change:1;
unsigned tx_nbits:3;
unsigned rx_nbits:3;
-#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
-#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
-#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
+ unsigned timestamped:1;
+#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
+#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
+#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
u8 bits_per_word;
- u16 delay_usecs;
struct spi_delay delay;
struct spi_delay cs_change_delay;
struct spi_delay word_delay;
@@ -939,29 +1104,28 @@ struct spi_transfer {
struct ptp_system_timestamp *ptp_sts;
- bool timestamped;
-
struct list_head transfer_list;
-
-#define SPI_TRANS_FAIL_NO_START BIT(0)
- u16 error;
};
/**
* struct spi_message - one multi-segment SPI transaction
* @transfers: list of transfer segments in this transaction
* @spi: SPI device to which the transaction is queued
- * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
+ * @is_dma_mapped: if true, the caller provided both DMA and CPU virtual
* addresses for each transfer buffer
+ * @pre_optimized: peripheral driver pre-optimized the message
+ * @optimized: the message is in the optimized state
+ * @prepared: spi_prepare_message was called for the this message
+ * @status: zero for success, else negative errno
* @complete: called to report transaction completions
* @context: the argument to complete() when it's called
* @frame_length: the total number of bytes in the message
* @actual_length: the total number of bytes that were transferred in all
* successful segments
- * @status: zero for success, else negative errno
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
- * @resources: for resource management when the spi message is processed
+ * @opt_state: for use by whichever driver currently owns the message
+ * @resources: for resource management when the SPI message is processed
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
@@ -984,7 +1148,16 @@ struct spi_message {
unsigned is_dma_mapped:1;
- /* REVISIT: we might want a flag affecting the behavior of the
+ /* spi_optimize_message() was called for this message */
+ bool pre_optimized;
+ /* __spi_optimize_message() was called for this message */
+ bool optimized;
+
+ /* spi_prepare_message() was called for this message */
+ bool prepared;
+
+ /*
+ * REVISIT: we might want a flag affecting the behavior of the
* last transfer ... allowing things like "read 16 bit length L"
* immediately followed by "read L bytes". Basically imposing
* a specific message scheduling algorithm.
@@ -995,21 +1168,27 @@ struct spi_message {
* tell them about such special cases.
*/
- /* completion is reported through a callback */
+ /* Completion is reported through a callback */
+ int status;
void (*complete)(void *context);
void *context;
unsigned frame_length;
unsigned actual_length;
- int status;
- /* for optional use by whatever driver currently owns the
+ /*
+ * For optional use by whatever driver currently owns the
* spi_message ... between calls to spi_async and then later
* complete(), that's the spi_controller controller driver.
*/
struct list_head queue;
void *state;
+ /*
+ * Optional state for use by controller driver between calls to
+ * __spi_optimize_message() and __spi_unoptimize_message().
+ */
+ void *opt_state;
- /* list of spi_res reources when the spi message is processed */
+ /* List of spi_res resources when the SPI message is processed */
struct list_head resources;
};
@@ -1040,21 +1219,13 @@ spi_transfer_del(struct spi_transfer *t)
static inline int
spi_transfer_delay_exec(struct spi_transfer *t)
{
- struct spi_delay d;
-
- if (t->delay_usecs) {
- d.value = t->delay_usecs;
- d.unit = SPI_DELAY_UNIT_USECS;
- return spi_delay_exec(&d, NULL);
- }
-
return spi_delay_exec(&t->delay, t);
}
/**
* spi_message_init_with_transfers - Initialize spi_message and append transfers
* @m: spi_message to be initialized
- * @xfers: An array of spi transfers
+ * @xfers: An array of SPI transfers
* @num_xfers: Number of items in the xfer array
*
* This function initializes the given spi_message and adds each spi_transfer in
@@ -1071,26 +1242,27 @@ struct spi_transfer *xfers, unsigned int num_xfers)
spi_message_add_tail(&xfers[i], m);
}
-/* It's fine to embed message and transaction structures in other data
+/*
+ * It's fine to embed message and transaction structures in other data
* structures so long as you don't free them while they're in use.
*/
-
static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
{
- struct spi_message *m;
-
- m = kzalloc(sizeof(struct spi_message)
- + ntrans * sizeof(struct spi_transfer),
- flags);
- if (m) {
- unsigned i;
- struct spi_transfer *t = (struct spi_transfer *)(m + 1);
-
- spi_message_init_no_memset(m);
- for (i = 0; i < ntrans; i++, t++)
- spi_message_add_tail(t, m);
- }
- return m;
+ struct spi_message_with_transfers {
+ struct spi_message m;
+ struct spi_transfer t[];
+ } *mwt;
+ unsigned i;
+
+ mwt = kzalloc(struct_size(mwt, t, ntrans), flags);
+ if (!mwt)
+ return NULL;
+
+ spi_message_init_no_memset(&mwt->m);
+ for (i = 0; i < ntrans; i++)
+ spi_message_add_tail(&mwt->t[i], &mwt->m);
+
+ return &mwt->m;
}
static inline void spi_message_free(struct spi_message *m)
@@ -1098,16 +1270,13 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
-extern int spi_set_cs_timing(struct spi_device *spi,
- struct spi_delay *setup,
- struct spi_delay *hold,
- struct spi_delay *inactive);
+extern int spi_optimize_message(struct spi_device *spi, struct spi_message *msg);
+extern void spi_unoptimize_message(struct spi_message *msg);
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
-extern int spi_async_locked(struct spi_device *spi,
- struct spi_message *message);
extern int spi_slave_abort(struct spi_device *spi);
+extern int spi_target_abort(struct spi_device *spi);
static inline size_t
spi_max_message_size(struct spi_device *spi)
@@ -1129,7 +1298,7 @@ spi_max_transfer_size(struct spi_device *spi)
if (ctlr->max_transfer_size)
tr_max = ctlr->max_transfer_size(spi);
- /* transfer size limit must not be greater than messsage size limit */
+ /* Transfer size limit must not be greater than message size limit */
return min(tr_max, msg_max);
}
@@ -1145,7 +1314,7 @@ spi_max_transfer_size(struct spi_device *spi)
*/
static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
{
- u32 bpw_mask = spi->master->bits_per_word_mask;
+ u32 bpw_mask = spi->controller->bits_per_word_mask;
if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw)))
return true;
@@ -1153,6 +1322,23 @@ static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
return false;
}
+/**
+ * spi_controller_xfer_timeout - Compute a suitable timeout value
+ * @ctlr: SPI device
+ * @xfer: Transfer descriptor
+ *
+ * Compute a relevant timeout value for the given transfer. We derive the time
+ * that it would take on a single data line and take twice this amount of time
+ * with a minimum of 500ms to avoid false positives on loaded systems.
+ *
+ * Returns: Transfer timeout value in milliseconds.
+ */
+static inline unsigned int spi_controller_xfer_timeout(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ return max(xfer->len * 8 * 2 / (xfer->speed_hz / 1000), 500U);
+}
+
/*---------------------------------------------------------------------------*/
/* SPI transfer replacement methods which make use of spi_res */
@@ -1166,7 +1352,7 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* replacements that have occurred
* so that they can get reverted
* @release: some extra release code to get executed prior to
- * relasing this structure
+ * releasing this structure
* @extradata: pointer to some extra data if requested or NULL
* @replaced_transfers: transfers that have been replaced and which need
* to get restored
@@ -1176,9 +1362,9 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* @inserted_transfers: array of spi_transfers of array-size @inserted,
* that have been replacing replaced_transfers
*
- * note: that @extradata will point to @inserted_transfers[@inserted]
+ * Note: that @extradata will point to @inserted_transfers[@inserted]
* if some extra allocation is requested, so alignment will be the same
- * as for spi_transfers
+ * as for spi_transfers.
*/
struct spi_replaced_transfers {
spi_replaced_release_t release;
@@ -1189,27 +1375,21 @@ struct spi_replaced_transfers {
struct spi_transfer inserted_transfers[];
};
-extern struct spi_replaced_transfers *spi_replace_transfers(
- struct spi_message *msg,
- struct spi_transfer *xfer_first,
- size_t remove,
- size_t insert,
- spi_replaced_release_t release,
- size_t extradatasize,
- gfp_t gfp);
-
/*---------------------------------------------------------------------------*/
/* SPI transfer transformation methods */
extern int spi_split_transfers_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
- size_t maxsize,
- gfp_t gfp);
+ size_t maxsize);
+extern int spi_split_transfers_maxwords(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ size_t maxwords);
/*---------------------------------------------------------------------------*/
-/* All these synchronous SPI transfer routines are utilities layered
+/*
+ * All these synchronous SPI transfer routines are utilities layered
* over the core async transfer primitive. Here, "synchronous" means
* they will sleep uninterruptibly until the async transfer completes.
*/
@@ -1289,7 +1469,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len)
return spi_sync_transfer(spi, &t, 1);
}
-/* this copies txbuf and rxbuf data; for small transfers only! */
+/* This copies txbuf and rxbuf data; for small transfers only! */
extern int spi_write_then_read(struct spi_device *spi,
const void *txbuf, unsigned n_tx,
void *rxbuf, unsigned n_rx);
@@ -1312,7 +1492,7 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
status = spi_write_then_read(spi, &cmd, 1, &result, 1);
- /* return negative errno or unsigned value */
+ /* Return negative errno or unsigned value */
return (status < 0) ? status : result;
}
@@ -1337,7 +1517,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
status = spi_write_then_read(spi, &cmd, 1, &result, 2);
- /* return negative errno or unsigned value */
+ /* Return negative errno or unsigned value */
return (status < 0) ? status : result;
}
@@ -1352,7 +1532,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
*
* Callable only from contexts that can sleep.
*
- * Return: the (unsigned) sixteen bit number returned by the device in cpu
+ * Return: the (unsigned) sixteen bit number returned by the device in CPU
* endianness, or else a negative error code.
*/
static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
@@ -1380,7 +1560,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* As a rule, SPI devices can't be probed. Instead, board init code
* provides a table listing the devices which are present, with enough
* information to bind and set up the device's driver. There's basic
- * support for nonstatic configurations too; enough to handle adding
+ * support for non-static configurations too; enough to handle adding
* parport adapters, or microcontrollers acting as USB-to-SPI bridges.
*/
@@ -1389,7 +1569,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* @modalias: Initializes spi_device.modalias; identifies the driver.
* @platform_data: Initializes spi_device.platform_data; the particular
* data stored there is driver-specific.
- * @properties: Additional device properties for the device.
+ * @swnode: Software node for the device.
* @controller_data: Initializes spi_device.controller_data; some
* controllers need hints about hardware setup, e.g. for DMA.
* @irq: Initializes spi_device.irq; depends on how the board is wired.
@@ -1417,25 +1597,26 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* are active in some dynamic board configuration models.
*/
struct spi_board_info {
- /* the device name and module name are coupled, like platform_bus;
+ /*
+ * The device name and module name are coupled, like platform_bus;
* "modalias" is normally the driver name.
*
* platform_data goes to spi_device.dev.platform_data,
* controller_data goes to spi_device.controller_data,
- * device properties are copied and attached to spi_device,
- * irq is copied too
+ * IRQ is copied too.
*/
char modalias[SPI_NAME_SIZE];
const void *platform_data;
- const struct property_entry *properties;
+ const struct software_node *swnode;
void *controller_data;
int irq;
- /* slower signaling on noisy or low voltage boards */
+ /* Slower signaling on noisy or low voltage boards */
u32 max_speed_hz;
- /* bus_num is board specific and matches the bus_num of some
+ /*
+ * bus_num is board specific and matches the bus_num of some
* spi_controller that will probably be registered later.
*
* chip_select reflects how this chip is wired to that master;
@@ -1444,12 +1625,14 @@ struct spi_board_info {
u16 bus_num;
u16 chip_select;
- /* mode becomes spi_device.mode, and is essential for chips
+ /*
+ * mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
u32 mode;
- /* ... may need additional spi_device chip config data here.
+ /*
+ * ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
* needed to behave without being bound to a driver:
* - quirks like clock rate mattering when not selected
@@ -1460,19 +1643,20 @@ struct spi_board_info {
extern int
spi_register_board_info(struct spi_board_info const *info, unsigned n);
#else
-/* board init code may ignore whether SPI is configured or not */
+/* Board init code may ignore whether SPI is configured or not */
static inline int
spi_register_board_info(struct spi_board_info const *info, unsigned n)
{ return 0; }
#endif
-/* If you're hotplugging an adapter with devices (parport, usb, etc)
+/*
+ * If you're hotplugging an adapter with devices (parport, USB, etc)
* use spi_new_device() to describe each device. You can also call
* spi_unregister_device() to start making that device vanish, but
* normally that would be handled by spi_unregister_controller().
*
* You can also use spi_alloc_device() and spi_add_device() to use a two
- * stage registration sequence for each spi_device. This gives the caller
+ * stage registration sequence for each spi_device. This gives the caller
* some more control over the spi_device structure before it is registered,
* but requires that caller to initialize fields that would otherwise
* be defined using the board info.
@@ -1491,49 +1675,13 @@ extern void spi_unregister_device(struct spi_device *spi);
extern const struct spi_device_id *
spi_get_device_id(const struct spi_device *sdev);
+extern const void *
+spi_get_device_match_data(const struct spi_device *sdev);
+
static inline bool
spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
{
return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
}
-/* OF support code */
-#if IS_ENABLED(CONFIG_OF)
-
-/* must call put_device() when done with returned spi_device device */
-extern struct spi_device *
-of_find_spi_device_by_node(struct device_node *node);
-
-#else
-
-static inline struct spi_device *
-of_find_spi_device_by_node(struct device_node *node)
-{
- return NULL;
-}
-
-#endif /* IS_ENABLED(CONFIG_OF) */
-
-/* Compatibility layer */
-#define spi_master spi_controller
-
-#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
-#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX
-#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX
-#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX
-#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX
-
-#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
-#define spi_master_set_devdata(_ctlr, _data) \
- spi_controller_set_devdata(_ctlr, _data)
-#define spi_master_get(_ctlr) spi_controller_get(_ctlr)
-#define spi_master_put(_ctlr) spi_controller_put(_ctlr)
-#define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr)
-#define spi_master_resume(_ctlr) spi_controller_resume(_ctlr)
-
-#define spi_register_master(_ctlr) spi_register_controller(_ctlr)
-#define devm_spi_register_master(_dev, _ctlr) \
- devm_spi_register_controller(_dev, _ctlr)
-#define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr)
-
#endif /* __LINUX_SPI_H */
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 4444c2a992cb..b930eca2ef7b 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -10,7 +10,7 @@ struct spi_bitbang {
u8 use_dma;
u16 flags; /* extra spi->mode support */
- struct spi_master *master;
+ struct spi_controller *ctlr;
/* setup_transfer() changes clock and/or wordsize to match settings
* for this transfer; zeroes restore defaults from spi_device.
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h
index 9e7e83d8645b..5f0e1407917a 100644
--- a/include/linux/spi/spi_gpio.h
+++ b/include/linux/spi/spi_gpio.h
@@ -15,8 +15,8 @@
*/
/**
- * struct spi_gpio_platform_data - parameter for bitbanged SPI master
- * @num_chipselect: how many slaves to allow
+ * struct spi_gpio_platform_data - parameter for bitbanged SPI host controller
+ * @num_chipselect: how many target devices to allow
*/
struct spi_gpio_platform_data {
u16 num_chipselect;
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
index c15d69d28e68..3934ce789d87 100644
--- a/include/linux/spi/xilinx_spi.h
+++ b/include/linux/spi/xilinx_spi.h
@@ -15,6 +15,7 @@ struct xspi_platform_data {
u8 bits_per_word;
struct spi_board_info *devices;
u8 num_devices;
+ bool force_irq;
};
#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index f2f12d746dbd..3fcd20de6ca8 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
+#define __LINUX_INSIDE_SPINLOCK_H
/*
* include/linux/spinlock.h - generic spinlock/rwlock declarations
@@ -12,6 +13,8 @@
* asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
+ * linux/spinlock_types_raw:
+ * The raw types and initializers
* linux/spinlock_types.h:
* defines the generic type and initializers
*
@@ -31,6 +34,8 @@
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
+ * linux/spinlock_types_raw:
+ * The raw RT types and initializers
* linux/spinlock_types.h:
* defines the generic type and initializers
*
@@ -53,10 +58,10 @@
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
-#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>
@@ -76,7 +81,7 @@
#define LOCK_SECTION_END \
".previous\n\t"
-#define __lockfunc __attribute__((section(".spinlock.text")))
+#define __lockfunc __section(".spinlock.text")
/*
* Pull the arch_spinlock_t and arch_rwlock_t definitions:
@@ -168,12 +173,11 @@ do { \
* Architectures that can implement ACQUIRE better need to take care.
*/
#ifndef smp_mb__after_spinlock
-#define smp_mb__after_spinlock() do { } while (0)
+#define smp_mb__after_spinlock() kcsan_mb()
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
-#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
#else
@@ -184,18 +188,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
mmiowb_spin_lock();
}
-#ifndef arch_spin_lock_flags
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#endif
-
-static inline void
-do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
-{
- __acquire(lock);
- arch_spin_lock_flags(&lock->raw_lock, *flags);
- mmiowb_spin_lock();
-}
-
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&(lock)->raw_lock);
@@ -308,8 +300,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
1 : ({ local_irq_restore(flags); 0; }); \
})
-/* Include rwlock functions */
+#ifndef CONFIG_PREEMPT_RT
+/* Include rwlock functions for !RT */
#include <linux/rwlock.h>
+#endif
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -320,6 +314,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
+/* Non PREEMPT_RT kernel, map to raw spinlocks: */
+#ifndef CONFIG_PREEMPT_RT
+
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
@@ -454,6 +451,41 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
+#else /* !CONFIG_PREEMPT_RT */
+# include <linux/spinlock_rt.h>
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * Does a critical section need to be broken due to another
+ * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
+ * but a general need for low latency)
+ */
+static inline int spin_needbreak(spinlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return spin_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return rwlock_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -476,6 +508,15 @@ extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
+#define atomic_dec_and_raw_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
+
+extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
+ unsigned long *flags);
+#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
+ __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
+
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp, const char *name,
@@ -493,4 +534,76 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
void free_bucket_spinlocks(spinlock_t *locks);
+DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
+ raw_spin_lock(_T->lock),
+ raw_spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
+ raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
+ raw_spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
+ raw_spin_lock_irq(_T->lock),
+ raw_spin_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
+ raw_spin_lock_irqsave(_T->lock, _T->flags),
+ raw_spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
+ raw_spin_trylock_irqsave(_T->lock, _T->flags))
+
+DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
+ spin_lock(_T->lock),
+ spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
+ spin_lock_irq(_T->lock),
+ spin_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
+ spin_trylock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
+ spin_lock_irqsave(_T->lock, _T->flags),
+ spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
+ spin_trylock_irqsave(_T->lock, _T->flags))
+
+DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
+ read_lock(_T->lock),
+ read_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
+ read_lock_irq(_T->lock),
+ read_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
+ read_lock_irqsave(_T->lock, _T->flags),
+ read_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
+ write_lock(_T->lock),
+ write_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
+ write_lock_irq(_T->lock),
+ write_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
+ write_lock_irqsave(_T->lock, _T->flags),
+ write_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api.h b/include/linux/spinlock_api.h
new file mode 100644
index 000000000000..6338b27f98df
--- /dev/null
+++ b/include/linux/spinlock_api.h
@@ -0,0 +1 @@
+#include <linux/spinlock.h>
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 19a9be9d97ee..89eb6f4c659c 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_API_SMP_H
#define __LINUX_SPINLOCK_API_SMP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- /*
- * On lockdep we dont want the hand-coded irq-enable of
- * do_raw_spin_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#ifdef CONFIG_LOCKDEP
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
-#else
- do_raw_spin_lock_flags(lock, &flags);
-#endif
return flags;
}
@@ -187,6 +178,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
return 0;
}
+/* PREEMPT_RT has its own rwlock implementation */
+#ifndef CONFIG_PREEMPT_RT
#include <linux/rwlock_api_smp.h>
+#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d0d188861ad6..819aeba1c87e 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_API_UP_H
#define __LINUX_SPINLOCK_API_UP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -59,6 +59,7 @@
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
new file mode 100644
index 000000000000..61c49b16f69a
--- /dev/null
+++ b/include/linux/spinlock_rt.h
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
+#ifndef __LINUX_INSIDE_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key, bool percpu);
+#else
+static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key, bool percpu)
+{
+}
+#endif
+
+#define spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_base_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key, false); \
+} while (0)
+
+#define local_spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_base_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key, true); \
+} while (0)
+
+extern void rt_spin_lock(spinlock_t *lock);
+extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
+extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
+extern void rt_spin_unlock(spinlock_t *lock);
+extern void rt_spin_lock_unlock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock);
+extern int rt_spin_trylock(spinlock_t *lock);
+
+static __always_inline void spin_lock(spinlock_t *lock)
+{
+ rt_spin_lock(lock);
+}
+
+#ifdef CONFIG_LOCKDEP
+# define __spin_lock_nested(lock, subclass) \
+ rt_spin_lock_nested(lock, subclass)
+
+# define __spin_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
+# define __spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __spin_lock_nested(lock, subclass); \
+ } while (0)
+
+#else
+ /*
+ * Always evaluate the 'subclass' argument to avoid that the compiler
+ * warns about set-but-not-used variables when building with
+ * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
+ */
+# define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_irqsave_nested(lock, flags, subclass) \
+ spin_lock_irqsave(((void)(subclass), (lock)), flags)
+#endif
+
+#define spin_lock_nested(lock, subclass) \
+ __spin_lock_nested(lock, subclass)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+ __spin_lock_nest_lock(lock, nest_lock)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+ __spin_lock_irqsave_nested(lock, flags, subclass)
+
+static __always_inline void spin_lock_bh(spinlock_t *lock)
+{
+ /* Investigate: Drop bh when blocking ? */
+ local_bh_disable();
+ rt_spin_lock(lock);
+}
+
+static __always_inline void spin_lock_irq(spinlock_t *lock)
+{
+ rt_spin_lock(lock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ spin_lock(lock); \
+ } while (0)
+
+static __always_inline void spin_unlock(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+ local_bh_enable();
+}
+
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
+ unsigned long flags)
+{
+ rt_spin_unlock(lock);
+}
+
+#define spin_trylock(lock) \
+ __cond_lock(lock, rt_spin_trylock(lock))
+
+#define spin_trylock_bh(lock) \
+ __cond_lock(lock, rt_spin_trylock_bh(lock))
+
+#define spin_trylock_irq(lock) \
+ __cond_lock(lock, rt_spin_trylock(lock))
+
+#define __spin_trylock_irqsave(lock, flags) \
+({ \
+ int __locked; \
+ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __locked = spin_trylock(lock); \
+ __locked; \
+})
+
+#define spin_trylock_irqsave(lock, flags) \
+ __cond_lock(lock, __spin_trylock_irqsave(lock, flags))
+
+#define spin_is_contended(lock) (((void)(lock), 0))
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return rt_mutex_base_is_locked(&lock->lock);
+}
+
+#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
+
+#include <linux/rwlock_rt.h>
+
+#endif
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index b981caafe8bf..2dfa35ffec76 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,65 +9,11 @@
* Released under the General Public License (GPL).
*/
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep_types.h>
+#include <linux/spinlock_types_raw.h>
-typedef struct raw_spinlock {
- arch_spinlock_t raw_lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RAW_SPIN_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_SPIN, \
- }
-# define SPIN_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_CONFIG, \
- }
-#else
-# define RAW_SPIN_DEP_MAP_INIT(lockname)
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname) \
- .magic = SPINLOCK_MAGIC, \
- .owner_cpu = -1, \
- .owner = SPINLOCK_OWNER_INIT,
-#else
-# define SPIN_DEBUG_INIT(lockname)
-#endif
-
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- SPIN_DEBUG_INIT(lockname) \
- RAW_SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+#ifndef CONFIG_PREEMPT_RT
+/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
typedef struct spinlock {
union {
struct raw_spinlock rlock;
@@ -96,6 +42,35 @@ typedef struct spinlock {
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#else /* !CONFIG_PREEMPT_RT */
+
+/* PREEMPT_RT kernels map spinlock to rt_mutex */
+#include <linux/rtmutex.h>
+
+typedef struct spinlock {
+ struct rt_mutex_base lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} spinlock_t;
+
+#define __SPIN_LOCK_UNLOCKED(name) \
+ { \
+ .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+ SPIN_DEP_MAP_INIT(name) \
+ }
+
+#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \
+ { \
+ .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+ LOCAL_SPIN_DEP_MAP_INIT(name) \
+ }
+
+#define DEFINE_SPINLOCK(name) \
+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
new file mode 100644
index 000000000000..91cb36b65a17
--- /dev/null
+++ b/include/linux/spinlock_types_raw.h
@@ -0,0 +1,73 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
+
+#include <linux/types.h>
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep_types.h>
+
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RAW_SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SPIN, \
+ }
+# define SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
+
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ .lock_type = LD_LOCK_PERCPU, \
+ }
+#else
+# define RAW_SPIN_DEP_MAP_INIT(lockname)
+# define SPIN_DEP_MAP_INIT(lockname)
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
+#else
+# define SPIN_DEBUG_INIT(lockname)
+#endif
+
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+{ \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ RAW_SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_SPINLOCK_TYPES_RAW_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index c09b6407ae1b..7f86a2016ac5 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_TYPES_UP_H
#define __LINUX_SPINLOCK_TYPES_UP_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly"
#endif
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 0ac9112c1bbe..c87204247592 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_UP_H
#define __LINUX_SPINLOCK_UP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -62,7 +62,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched/core.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 5c47013f708e..9dec4861d09f 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -38,6 +38,7 @@ struct splice_desc {
struct file *file; /* file to read/write */
void *data; /* cookie */
} u;
+ void (*splice_eof)(struct splice_desc *sd); /* Unexpected EOF handler */
loff_t pos; /* file position */
loff_t *opos; /* sendfile: output position */
size_t num_spliced; /* number of bytes already spliced */
@@ -67,23 +68,37 @@ typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
typedef int (splice_direct_actor)(struct pipe_inode_info *,
struct splice_desc *);
-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int,
- splice_actor *);
-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
- struct splice_desc *, splice_actor *);
-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
- struct splice_pipe_desc *);
-extern ssize_t add_to_pipe(struct pipe_inode_info *,
- struct pipe_buffer *);
-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
- splice_direct_actor *);
-extern long do_splice(struct file *in, loff_t __user *off_in,
- struct file *out, loff_t __user *off_out,
- size_t len, unsigned int flags);
+ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags,
+ splice_actor *actor);
+ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
+ struct splice_desc *sd, splice_actor *actor);
+ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf);
+ssize_t vfs_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+ssize_t splice_direct_to_actor(struct file *file, struct splice_desc *sd,
+ splice_direct_actor *actor);
+ssize_t do_splice(struct file *in, loff_t *off_in, struct file *out,
+ loff_t *off_out, size_t len, unsigned int flags);
+ssize_t do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len, unsigned int flags);
+ssize_t splice_file_range(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len);
-extern long do_tee(struct file *in, struct file *out, size_t len,
- unsigned int flags);
+static inline long splice_copy_file_range(struct file *in, loff_t pos_in,
+ struct file *out, loff_t pos_out,
+ size_t len)
+{
+ return splice_file_range(in, &pos_in, out, &pos_out, len);
+}
+
+ssize_t do_tee(struct file *in, struct file *out, size_t len,
+ unsigned int flags);
+ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
/*
* for dynamic pipe sizing
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 394a3f68bad5..28e8c8bd3944 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -120,6 +120,9 @@ static inline void spmi_controller_put(struct spmi_controller *ctrl)
int spmi_controller_add(struct spmi_controller *ctrl);
void spmi_controller_remove(struct spmi_controller *ctrl);
+struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size);
+int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl);
+
/**
* struct spmi_driver - SPMI slave device driver
* @driver: SPMI device drivers should initialize name and owner field of
@@ -138,6 +141,7 @@ struct spmi_driver {
struct device_driver driver;
int (*probe)(struct spmi_device *sdev);
void (*remove)(struct spmi_device *sdev);
+ void (*shutdown)(struct spmi_device *sdev);
};
static inline struct spmi_driver *to_spmi_driver(struct device_driver *d)
@@ -163,6 +167,9 @@ static inline void spmi_driver_unregister(struct spmi_driver *sdrv)
module_driver(__spmi_driver, spmi_driver_register, \
spmi_driver_unregister)
+struct device_node;
+
+struct spmi_device *spmi_find_device_by_of_node(struct device_node *np);
int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf);
int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
size_t len);
diff --git a/include/linux/sprintf.h b/include/linux/sprintf.h
new file mode 100644
index 000000000000..33dcbec71925
--- /dev/null
+++ b/include/linux/sprintf.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KERNEL_SPRINTF_H_
+#define _LINUX_KERNEL_SPRINTF_H_
+
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+int num_to_str(char *buf, int size, unsigned long long num, unsigned int width);
+
+__printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
+__printf(2, 0) int vsprintf(char *buf, const char *, va_list);
+__printf(3, 4) int snprintf(char *buf, size_t size, const char *fmt, ...);
+__printf(3, 0) int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+__printf(3, 4) int scnprintf(char *buf, size_t size, const char *fmt, ...);
+__printf(3, 0) int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+__printf(2, 3) __malloc char *kasprintf(gfp_t gfp, const char *fmt, ...);
+__printf(2, 0) __malloc char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+__printf(2, 0) const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
+
+__scanf(2, 3) int sscanf(const char *, const char *, ...);
+__scanf(2, 0) int vsscanf(const char *, const char *, va_list);
+
+/* These are for specific cases, do not use without real need */
+extern bool no_hash_pointers;
+int no_hash_pointers_enable(char *str);
+
+#endif /* _LINUX_KERNEL_SPRINTF_H */
diff --git a/include/linux/sram.h b/include/linux/sram.h
index 4fb405fb0480..d7dee19505c6 100644
--- a/include/linux/sram.h
+++ b/include/linux/sram.h
@@ -1,15 +1,5 @@
-/*
- * Generic SRAM Driver Interface
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Generic SRAM Driver Interface */
#ifndef __LINUX_SRAM_H__
#define __LINUX_SRAM_H__
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index e432cc92c73d..236610e4a8fa 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -47,11 +47,8 @@ int init_srcu_struct(struct srcu_struct *ssp);
#include <linux/srcutiny.h>
#elif defined(CONFIG_TREE_SRCU)
#include <linux/srcutree.h>
-#elif defined(CONFIG_SRCU)
-#error "Unknown SRCU implementation specified to kernel configuration"
#else
-/* Dummy definition for things like notifiers. Actual use gets link error. */
-struct srcu_struct { };
+#error "Unknown SRCU implementation specified to kernel configuration"
#endif
void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
@@ -60,6 +57,25 @@ void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);
+unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
+unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
+bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
+
+#ifdef CONFIG_NEED_SRCU_NMI_SAFE
+int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
+void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
+#else
+static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+{
+ return __srcu_read_lock(ssp);
+}
+static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+{
+ __srcu_read_unlock(ssp, idx);
+}
+#endif /* CONFIG_NEED_SRCU_NMI_SAFE */
+
+void srcu_init(void);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -86,6 +102,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
return lock_is_held(&ssp->dep_map);
}
+/*
+ * Annotations provide deadlock detection for SRCU.
+ *
+ * Similar to other lockdep annotations, except there is an additional
+ * srcu_lock_sync(), which is basically an empty *write*-side critical section,
+ * see lock_sync() for more information.
+ */
+
+/* Annotates a srcu_read_lock() */
+static inline void srcu_lock_acquire(struct lockdep_map *map)
+{
+ lock_map_acquire_read(map);
+}
+
+/* Annotates a srcu_read_lock() */
+static inline void srcu_lock_release(struct lockdep_map *map)
+{
+ lock_map_release(map);
+}
+
+/* Annotates a synchronize_srcu() */
+static inline void srcu_lock_sync(struct lockdep_map *map)
+{
+ lock_map_sync(map);
+}
+
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
@@ -93,8 +135,24 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
return 1;
}
+#define srcu_lock_acquire(m) do { } while (0)
+#define srcu_lock_release(m) do { } while (0)
+#define srcu_lock_sync(m) do { } while (0)
+
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#define SRCU_NMI_UNKNOWN 0x0
+#define SRCU_NMI_UNSAFE 0x1
+#define SRCU_NMI_SAFE 0x2
+
+#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU)
+void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe);
+#else
+static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
+ bool nmi_safe) { }
+#endif
+
+
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
@@ -108,7 +166,8 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* lockdep_is_held() calls.
*/
#define srcu_dereference_check(p, ssp, c) \
- __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || srcu_read_lock_held(ssp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
@@ -151,8 +210,26 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
+ srcu_check_nmi_safety(ssp, false);
retval = __srcu_read_lock(ssp);
- rcu_lock_acquire(&(ssp)->dep_map);
+ srcu_lock_acquire(&ssp->dep_map);
+ return retval;
+}
+
+/**
+ * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section, but in an NMI-safe manner.
+ * See srcu_read_lock() for more information.
+ */
+static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
+{
+ int retval;
+
+ srcu_check_nmi_safety(ssp, true);
+ retval = __srcu_read_lock_nmisafe(ssp);
+ rcu_try_lock_acquire(&ssp->dep_map);
return retval;
}
@@ -162,11 +239,40 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
+ srcu_check_nmi_safety(ssp, false);
retval = __srcu_read_lock(ssp);
return retval;
}
/**
+ * srcu_down_read - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter a semaphore-like SRCU read-side critical section. Note that
+ * SRCU read-side critical sections may be nested. However, it is
+ * illegal to call anything that waits on an SRCU grace period for the
+ * same srcu_struct, whether directly or indirectly. Please note that
+ * one way to indirectly wait on an SRCU grace period is to acquire
+ * a mutex that is held elsewhere while calling synchronize_srcu() or
+ * synchronize_srcu_expedited(). But if you want lockdep to help you
+ * keep this stuff straight, you should instead use srcu_read_lock().
+ *
+ * The semaphore-like nature of srcu_down_read() means that the matching
+ * srcu_up_read() can be invoked from some other context, for example,
+ * from some other task or from an irq handler. However, neither
+ * srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler.
+ *
+ * Calls to srcu_down_read() may be nested, similar to the manner in
+ * which calls to down_read() may be nested.
+ */
+static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
+{
+ WARN_ON_ONCE(in_nmi());
+ srcu_check_nmi_safety(ssp, false);
+ return __srcu_read_lock(ssp);
+}
+
+/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock().
@@ -177,14 +283,49 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
- rcu_lock_release(&(ssp)->dep_map);
+ srcu_check_nmi_safety(ssp, false);
+ srcu_lock_release(&ssp->dep_map);
__srcu_read_unlock(ssp, idx);
}
+/**
+ * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock().
+ *
+ * Exit an SRCU read-side critical section, but in an NMI-safe manner.
+ */
+static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+ __releases(ssp)
+{
+ WARN_ON_ONCE(idx & ~0x1);
+ srcu_check_nmi_safety(ssp, true);
+ rcu_lock_release(&ssp->dep_map);
+ __srcu_read_unlock_nmisafe(ssp, idx);
+}
+
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{
+ srcu_check_nmi_safety(ssp, false);
+ __srcu_read_unlock(ssp, idx);
+}
+
+/**
+ * srcu_up_read - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock().
+ *
+ * Exit an SRCU read-side critical section, but not necessarily from
+ * the same context as the maching srcu_down_read().
+ */
+static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
+ __releases(ssp)
+{
+ WARN_ON_ONCE(idx & ~0x1);
+ WARN_ON_ONCE(in_nmi());
+ srcu_check_nmi_safety(ssp, false);
__srcu_read_unlock(ssp, idx);
}
@@ -202,4 +343,9 @@ static inline void smp_mb__after_srcu_read_unlock(void)
/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
}
+DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
+ _T->idx = srcu_read_lock(_T->lock),
+ srcu_read_unlock(_T->lock, _T->idx),
+ int idx)
+
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 5a5a1941ca15..447133171d95 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -15,9 +15,10 @@
struct srcu_struct {
short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
- short srcu_idx; /* Current reader array element. */
u8 srcu_gp_running; /* GP workqueue running? */
u8 srcu_gp_waiting; /* GP waiting for readers? */
+ unsigned long srcu_idx; /* Current reader array element in bit 0x2. */
+ unsigned long srcu_idx_max; /* Furthest future srcu_idx request. */
struct swait_queue_head srcu_wq;
/* Last srcu_read_unlock() wakes GP. */
struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
@@ -30,7 +31,7 @@ struct srcu_struct {
void srcu_drive_gp(struct work_struct *wp);
-#define __SRCU_STRUCT_INIT(name, __ignored) \
+#define __SRCU_STRUCT_INIT(name, __ignored, ___ignored) \
{ \
.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
.srcu_cb_tail = &name.srcu_cb_head, \
@@ -43,9 +44,13 @@ void srcu_drive_gp(struct work_struct *wp);
* Tree SRCU, which needs some per-CPU data.
*/
#define DEFINE_SRCU(name) \
- struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
#define DEFINE_STATIC_SRCU(name) \
- static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
+
+// Dummy structure for srcu_notifier_head.
+struct srcu_usage { };
+#define __SRCU_USAGE_INIT(name) { }
void synchronize_srcu(struct srcu_struct *ssp);
@@ -59,8 +64,8 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- idx = READ_ONCE(ssp->srcu_idx);
- WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
+ idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
+ WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
return idx;
}
@@ -80,11 +85,13 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
{
int idx;
- idx = READ_ONCE(ssp->srcu_idx) & 0x1;
- pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
+ idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
tt, tf, idx,
- READ_ONCE(ssp->srcu_lock_nesting[!idx]),
- READ_ONCE(ssp->srcu_lock_nesting[idx]));
+ data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
+ data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
+ data_race(READ_ONCE(ssp->srcu_idx)),
+ data_race(READ_ONCE(ssp->srcu_idx_max)));
}
#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 9cfcc8a756ae..8f3f72480e78 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -23,8 +23,9 @@ struct srcu_struct;
*/
struct srcu_data {
/* Read-side state. */
- unsigned long srcu_lock_count[2]; /* Locks per CPU. */
- unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
+ atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
+ atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
+ int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */
/* Update-side state. */
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
@@ -47,11 +48,9 @@ struct srcu_data {
*/
struct srcu_node {
spinlock_t __private lock;
- unsigned long srcu_have_cbs[4]; /* GP seq for children */
- /* having CBs, but only */
- /* is > ->srcu_gq_seq. */
- unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */
- /* have CBs for given GP? */
+ unsigned long srcu_have_cbs[4]; /* GP seq for children having CBs, but only */
+ /* if greater than ->srcu_gp_seq. */
+ unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs have CBs for given GP? */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
struct srcu_node *srcu_parent; /* Next up in tree. */
int grplo; /* Least CPU for node. */
@@ -59,21 +58,25 @@ struct srcu_node {
};
/*
- * Per-SRCU-domain structure, similar in function to rcu_state.
+ * Per-SRCU-domain structure, update-side data linked from srcu_struct.
*/
-struct srcu_struct {
- struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */
+struct srcu_usage {
+ struct srcu_node *node; /* Combining tree. */
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
+ int srcu_size_state; /* Small-to-big transition state. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
- spinlock_t __private lock; /* Protect counters */
+ spinlock_t __private lock; /* Protect counters and size state. */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
- unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ unsigned long srcu_gp_start; /* Last GP start timestamp (jiffies) */
unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
- struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ unsigned long srcu_size_jiffies; /* Current contention-measurement interval. */
+ unsigned long srcu_n_lock_retries; /* Contention events in current interval. */
+ unsigned long srcu_n_exp_nodelay; /* # expedited no-delays in current GP phase. */
+ bool sda_is_static; /* May ->sda be passed to free_percpu()? */
unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
struct completion srcu_barrier_completion;
@@ -81,24 +84,71 @@ struct srcu_struct {
atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */
/* callback for the barrier */
/* operation. */
+ unsigned long reschedule_jiffies;
+ unsigned long reschedule_count;
struct delayed_work work;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct srcu_struct *srcu_ssp;
+};
+
+/*
+ * Per-SRCU-domain structure, similar in function to rcu_state.
+ */
+struct srcu_struct {
+ unsigned int srcu_idx; /* Current rdr array element. */
+ struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
struct lockdep_map dep_map;
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+ struct srcu_usage *srcu_sup; /* Update-side data. */
};
+// Values for size state variable (->srcu_size_state). Once the state
+// has been set to SRCU_SIZE_ALLOC, the grace-period code advances through
+// this state machine one step per grace period until the SRCU_SIZE_BIG state
+// is reached. Otherwise, the state machine remains in the SRCU_SIZE_SMALL
+// state indefinitely.
+#define SRCU_SIZE_SMALL 0 // No srcu_node combining tree, ->node == NULL
+#define SRCU_SIZE_ALLOC 1 // An srcu_node tree is being allocated, initialized,
+ // and then referenced by ->node. It will not be used.
+#define SRCU_SIZE_WAIT_BARRIER 2 // The srcu_node tree starts being used by everything
+ // except call_srcu(), especially by srcu_barrier().
+ // By the end of this state, all CPUs and threads
+ // are aware of this tree's existence.
+#define SRCU_SIZE_WAIT_CALL 3 // The srcu_node tree starts being used by call_srcu().
+ // By the end of this state, all of the call_srcu()
+ // invocations that were running on a non-boot CPU
+ // and using the boot CPU's callback queue will have
+ // completed.
+#define SRCU_SIZE_WAIT_CBS1 4 // Don't trust the ->srcu_have_cbs[] grace-period
+#define SRCU_SIZE_WAIT_CBS2 5 // sequence elements or the ->srcu_data_have_cbs[]
+#define SRCU_SIZE_WAIT_CBS3 6 // CPU-bitmask elements until all four elements of
+#define SRCU_SIZE_WAIT_CBS4 7 // each array have been initialized.
+#define SRCU_SIZE_BIG 8 // The srcu_node combining tree is fully initialized
+ // and all aspects of it are being put to use.
+
/* Values for state variable (bottom bits of ->srcu_gp_seq). */
#define SRCU_STATE_IDLE 0
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
-#define __SRCU_STRUCT_INIT(name, pcpu_name) \
-{ \
- .sda = &pcpu_name, \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .srcu_gp_seq_needed = -1UL, \
- .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
- __SRCU_DEP_MAP_INIT(name) \
+#define __SRCU_USAGE_INIT(name) \
+{ \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .srcu_gp_seq_needed = -1UL, \
+ .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
+}
+
+#define __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
+ .srcu_sup = &usage_name, \
+ __SRCU_DEP_MAP_INIT(name)
+
+#define __SRCU_STRUCT_INIT_MODULE(name, usage_name) \
+{ \
+ __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
+}
+
+#define __SRCU_STRUCT_INIT(name, usage_name, pcpu_name) \
+{ \
+ .sda = &pcpu_name, \
+ __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
}
/*
@@ -121,15 +171,18 @@ struct srcu_struct {
* See include/linux/percpu-defs.h for the rules on per-CPU variables.
*/
#ifdef MODULE
-# define __DEFINE_SRCU(name, is_static) \
- is_static struct srcu_struct name; \
- struct srcu_struct * const __srcu_struct_##name \
+# define __DEFINE_SRCU(name, is_static) \
+ static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT_MODULE(name, name##_srcu_usage); \
+ extern struct srcu_struct * const __srcu_struct_##name; \
+ struct srcu_struct * const __srcu_struct_##name \
__section("___srcu_struct_ptrs") = &name
#else
-# define __DEFINE_SRCU(name, is_static) \
- static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \
- is_static struct srcu_struct name = \
- __SRCU_STRUCT_INIT(name, name##_srcu_data)
+# define __DEFINE_SRCU(name, is_static) \
+ static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \
+ static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
+ is_static struct srcu_struct name = \
+ __SRCU_STRUCT_INIT(name, name##_srcu_usage, name##_srcu_data)
#endif
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 0d5a2691e7e9..1f326da289d3 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
@@ -285,7 +285,7 @@ struct ssb_device {
/* Go from struct device to struct ssb_device. */
static inline
-struct ssb_device * dev_to_ssb_dev(struct device *dev)
+struct ssb_device * dev_to_ssb_dev(const struct device *dev)
{
struct __ssb_dev_wrapper *wrap;
wrap = container_of(dev, struct __ssb_dev_wrapper, dev);
diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h
index 3f8bc973d67d..19253bfacd1a 100644
--- a/include/linux/ssb/ssb_driver_extif.h
+++ b/include/linux/ssb/ssb_driver_extif.h
@@ -197,7 +197,7 @@ struct ssb_extif {
static inline bool ssb_extif_available(struct ssb_extif *extif)
{
- return 0;
+ return false;
}
static inline
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 31593b34608e..28c145a51e57 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -76,7 +76,7 @@ static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
if (dev)
return !!(dev->dev->bus->sprom.boardflags_lo &
SSB_GIGE_BFL_ROBOSWITCH);
- return 0;
+ return false;
}
/* Returns whether we can only do one DMA at once. */
@@ -86,7 +86,7 @@ static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
if (dev)
return ((dev->dev->bus->chip_id == 0x4785) &&
(dev->dev->bus->chip_rev < 2));
- return 0;
+ return false;
}
/* Returns whether we must flush posted writes. */
@@ -95,7 +95,7 @@ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
if (dev)
return (dev->dev->bus->chip_id == 0x4785);
- return 0;
+ return false;
}
/* Get the device MAC address */
@@ -159,7 +159,7 @@ static inline void ssb_gige_exit(void)
static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
{
@@ -167,19 +167,19 @@ static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
}
static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
{
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 24d49c732341..3c6caa5abc7c 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -1,24 +1,258 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * A generic stack depot implementation
+ * Stack depot - a stack trace storage that avoids duplication.
+ *
+ * Stack depot is intended to be used by subsystems that need to store and
+ * later retrieve many potentially duplicated stack traces without wasting
+ * memory.
+ *
+ * For example, KASAN needs to save allocation and free stack traces for each
+ * object. Storing two stack traces per object requires a lot of memory (e.g.
+ * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
+ * stack traces often repeat, using stack depot allows to save about 100x space.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
- * Based on code by Dmitry Chernenkov.
+ * Based on the code by Dmitry Chernenkov.
*/
#ifndef _LINUX_STACKDEPOT_H
#define _LINUX_STACKDEPOT_H
+#include <linux/gfp.h>
+
typedef u32 depot_stack_handle_t;
+/*
+ * Number of bits in the handle that stack depot doesn't use. Users may store
+ * information in them via stack_depot_set/get_extra_bits.
+ */
+#define STACK_DEPOT_EXTRA_BITS 5
+
+#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
+
+#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
+#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
+#define DEPOT_STACK_ALIGN 4
+#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
+ STACK_DEPOT_EXTRA_BITS)
+
+#ifdef CONFIG_STACKDEPOT
+/* Compact structure that stores a reference to a stack. */
+union handle_parts {
+ depot_stack_handle_t handle;
+ struct {
+ /* pool_index is offset by 1 */
+ u32 pool_index : DEPOT_POOL_INDEX_BITS;
+ u32 offset : DEPOT_OFFSET_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
+ };
+};
+
+struct stack_record {
+ struct list_head hash_list; /* Links in the hash table */
+ u32 hash; /* Hash in hash table */
+ u32 size; /* Number of stored frames */
+ union handle_parts handle; /* Constant after initialization */
+ refcount_t count;
+ union {
+ unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
+ struct {
+ /*
+ * An important invariant of the implementation is to
+ * only place a stack record onto the freelist iff its
+ * refcount is zero. Because stack records with a zero
+ * refcount are never considered as valid, it is safe to
+ * union @entries and freelist management state below.
+ * Conversely, as soon as an entry is off the freelist
+ * and its refcount becomes non-zero, the below must not
+ * be accessed until being placed back on the freelist.
+ */
+ struct list_head free_list; /* Links in the freelist */
+ unsigned long rcu_state; /* RCU cookie */
+ };
+ };
+};
+#endif
+
+typedef u32 depot_flags_t;
+
+/*
+ * Flags that can be passed to stack_depot_save_flags(); see the comment next
+ * to its declaration for more details.
+ */
+#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001)
+#define STACK_DEPOT_FLAG_GET ((depot_flags_t)0x0002)
+
+#define STACK_DEPOT_FLAGS_NUM 2
+#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1))
+
+/*
+ * Using stack depot requires its initialization, which can be done in 3 ways:
+ *
+ * 1. Selecting CONFIG_STACKDEPOT_ALWAYS_INIT. This option is suitable in
+ * scenarios where it's known at compile time that stack depot will be used.
+ * Enabling this config makes the kernel initialize stack depot in mm_init().
+ *
+ * 2. Calling stack_depot_request_early_init() during early boot, before
+ * stack_depot_early_init() in mm_init() completes. For example, this can
+ * be done when evaluating kernel boot parameters.
+ *
+ * 3. Calling stack_depot_init(). Possible after boot is complete. This option
+ * is recommended for modules initialized later in the boot process, after
+ * mm_init() completes.
+ *
+ * stack_depot_init() and stack_depot_request_early_init() can be called
+ * regardless of whether CONFIG_STACKDEPOT is enabled and are no-op when this
+ * config is disabled. The save/fetch/print stack depot functions can only be
+ * called from the code that makes sure CONFIG_STACKDEPOT is enabled _and_
+ * initializes stack depot via one of the ways listed above.
+ */
+#ifdef CONFIG_STACKDEPOT
+int stack_depot_init(void);
+
+void __init stack_depot_request_early_init(void);
+
+/* Must be only called from mm_init(). */
+int __init stack_depot_early_init(void);
+#else
+static inline int stack_depot_init(void) { return 0; }
+
+static inline void stack_depot_request_early_init(void) { }
+
+static inline int stack_depot_early_init(void) { return 0; }
+#endif
+
+/**
+ * stack_depot_save_flags - Save a stack trace to stack depot
+ *
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
+ * @depot_flags: Stack depot flags
+ *
+ * Saves a stack trace from @entries array of size @nr_entries.
+ *
+ * If STACK_DEPOT_FLAG_CAN_ALLOC is set in @depot_flags, stack depot can
+ * replenish the stack pools in case no space is left (allocates using GFP
+ * flags of @alloc_flags). Otherwise, stack depot avoids any allocations and
+ * fails if no space is left to store the stack trace.
+ *
+ * If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment
+ * the refcount on the saved stack trace if it already exists in stack depot.
+ * Users of this flag must also call stack_depot_put() when keeping the stack
+ * trace is no longer required to avoid overflowing the refcount.
+ *
+ * If the provided stack trace comes from the interrupt context, only the part
+ * up to the interrupt entry is saved.
+ *
+ * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
+ * alloc_pages() cannot be used from the current context. Currently
+ * this is the case for contexts where neither %GFP_ATOMIC nor
+ * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
+ *
+ * Return: Handle of the stack struct stored in depot, 0 on failure
+ */
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t gfp_flags,
+ depot_flags_t depot_flags);
+
+/**
+ * stack_depot_save - Save a stack trace to stack depot
+ *
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
+ *
+ * Does not increment the refcount on the saved stack trace; see
+ * stack_depot_save_flags() for more details.
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed;
+ * see stack_depot_save_flags() for more details.
+ *
+ * Return: Handle of the stack trace stored in depot, 0 on failure
+ */
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);
+/**
+ * __stack_depot_get_stack_record - Get a pointer to a stack_record struct
+ *
+ * @handle: Stack depot handle
+ *
+ * This function is only for internal purposes.
+ *
+ * Return: Returns a pointer to a stack_record struct
+ */
+struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle);
+
+/**
+ * stack_depot_fetch - Fetch a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @entries: Pointer to store the address of the stack trace
+ *
+ * Return: Number of frames for the fetched stack
+ */
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
-unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
+/**
+ * stack_depot_print - Print a stack trace from stack depot
+ *
+ * @stack: Stack depot handle returned from stack_depot_save()
+ */
+void stack_depot_print(depot_stack_handle_t stack);
+
+/**
+ * stack_depot_snprint - Print a stack trace from stack depot into a buffer
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @buf: Pointer to the print buffer
+ * @size: Size of the print buffer
+ * @spaces: Number of leading spaces to print
+ *
+ * Return: Number of bytes printed
+ */
+int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
+ int spaces);
+
+/**
+ * stack_depot_put - Drop a reference to a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ *
+ * The stack trace is evicted from stack depot once all references to it have
+ * been dropped (once the number of stack_depot_evict() calls matches the
+ * number of stack_depot_save_flags() calls with STACK_DEPOT_FLAG_GET set for
+ * this stack trace).
+ */
+void stack_depot_put(depot_stack_handle_t handle);
+
+/**
+ * stack_depot_set_extra_bits - Set extra bits in a stack depot handle
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @extra_bits: Value to set the extra bits
+ *
+ * Return: Stack depot handle with extra bits set
+ *
+ * Stack depot handles have a few unused bits, which can be used for storing
+ * user-specific information. These bits are transparent to the stack depot.
+ */
+depot_stack_handle_t __must_check stack_depot_set_extra_bits(
+ depot_stack_handle_t handle, unsigned int extra_bits);
+
+/**
+ * stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
+ *
+ * @handle: Stack depot handle with extra bits saved
+ *
+ * Return: Extra bits retrieved from the stack depot handle
+ */
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
#endif
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
index 3d5c3271a9a8..3be2cb564710 100644
--- a/include/linux/stackleak.h
+++ b/include/linux/stackleak.h
@@ -14,19 +14,73 @@
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
#include <asm/stacktrace.h>
+#include <linux/linkage.h>
+
+/*
+ * The lowest address on tsk's stack which we can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_low_bound(const struct task_struct *tsk)
+{
+ /*
+ * The lowest unsigned long on the task stack contains STACK_END_MAGIC,
+ * which we must not corrupt.
+ */
+ return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
+}
+
+/*
+ * The address immediately after the highest address on tsk's stack which we
+ * can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_high_bound(const struct task_struct *tsk)
+{
+ /*
+ * The task's pt_regs lives at the top of the task stack and will be
+ * overwritten by exception entry, so there's no need to erase them.
+ */
+ return (unsigned long)task_pt_regs(tsk);
+}
+
+/*
+ * Find the address immediately above the poisoned region of the stack, where
+ * that region falls between 'low' (inclusive) and 'high' (exclusive).
+ */
+static __always_inline unsigned long
+stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
+{
+ const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+ unsigned int poison_count = 0;
+ unsigned long poison_high = high;
+ unsigned long sp = high;
+
+ while (sp > low && poison_count < depth) {
+ sp -= sizeof(unsigned long);
+
+ if (*(unsigned long *)sp == STACKLEAK_POISON) {
+ poison_count++;
+ } else {
+ poison_count = 0;
+ poison_high = sp;
+ }
+ }
+
+ return poison_high;
+}
static inline void stackleak_task_init(struct task_struct *t)
{
- t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
+ t->lowest_stack = stackleak_task_low_bound(t);
# ifdef CONFIG_STACKLEAK_METRICS
t->prev_lowest_stack = t->lowest_stack;
# endif
}
-#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
-int stack_erasing_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-#endif
+asmlinkage void noinstr stackleak_erase(void);
+asmlinkage void noinstr stackleak_erase_on_task_stack(void);
+asmlinkage void noinstr stackleak_erase_off_task_stack(void);
+void __no_caller_saved_registers noinstr stackleak_track_stack(void);
#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
static inline void stackleak_task_init(struct task_struct *t) { }
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
index 4c678c4fec58..9c88707d9a0f 100644
--- a/include/linux/stackprotector.h
+++ b/include/linux/stackprotector.h
@@ -6,6 +6,25 @@
#include <linux/sched.h>
#include <linux/random.h>
+/*
+ * On 64-bit architectures, protect against non-terminated C string overflows
+ * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ */
+#ifdef CONFIG_64BIT
+# ifdef __LITTLE_ENDIAN
+# define CANARY_MASK 0xffffffffffffff00UL
+# else /* big endian, 64 bits: */
+# define CANARY_MASK 0x00ffffffffffffffUL
+# endif
+#else /* 32 bits: */
+# define CANARY_MASK 0xffffffffUL
+#endif
+
+static inline unsigned long get_random_canary(void)
+{
+ return get_random_long() & CANARY_MASK;
+}
+
#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH)
# include <asm/stackprotector.h>
#else
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index b7af8cc13eda..97455880ac41 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -8,35 +8,17 @@
struct task_struct;
struct pt_regs;
-#ifdef CONFIG_STACKTRACE
-void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
- int spaces);
-int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
- unsigned int nr_entries, int spaces);
-unsigned int stack_trace_save(unsigned long *store, unsigned int size,
- unsigned int skipnr);
-unsigned int stack_trace_save_tsk(struct task_struct *task,
- unsigned long *store, unsigned int size,
- unsigned int skipnr);
-unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
- unsigned int size, unsigned int skipnr);
-unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
-
-/* Internal interfaces. Do not use in generic code */
#ifdef CONFIG_ARCH_STACKWALK
/**
* stack_trace_consume_fn - Callback for arch_stack_walk()
* @cookie: Caller supplied pointer handed back by arch_stack_walk()
* @addr: The stack entry address to consume
- * @reliable: True when the stack entry is reliable. Required by
- * some printk based consumers.
*
* Return: True, if the entry was consumed or skipped
* False, if there is no space left to store
*/
-typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr,
- bool reliable);
+typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr);
/**
* arch_stack_walk - Architecture specific function to walk the stack
* @consume_entry: Callback which is invoked by the architecture code for
@@ -55,12 +37,48 @@ typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr,
*/
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * arch_stack_walk_reliable - Architecture specific function to walk the
+ * stack reliably
+ *
+ * @consume_entry: Callback which is invoked by the architecture code for
+ * each entry.
+ * @cookie: Caller supplied pointer which is handed back to
+ * @consume_entry
+ * @task: Pointer to a task struct, can be NULL
+ *
+ * This function returns an error if it detects any unreliable
+ * features of the stack. Otherwise it guarantees that the stack
+ * trace is reliable.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is
+ * inactive and its stack is pinned.
+ */
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task);
+
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs);
+#endif /* CONFIG_ARCH_STACKWALK */
-#else /* CONFIG_ARCH_STACKWALK */
+#ifdef CONFIG_STACKTRACE
+void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
+ int spaces);
+int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
+ unsigned int nr_entries, int spaces);
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_tsk(struct task_struct *task,
+ unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+ unsigned int size, unsigned int skipnr);
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
+unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
+
+#ifndef CONFIG_ARCH_STACKWALK
+/* Internal interfaces. Do not use in generic code */
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
index 8b369a41c03c..09f994ac87df 100644
--- a/include/linux/start_kernel.h
+++ b/include/linux/start_kernel.h
@@ -8,8 +8,6 @@
/* Define the prototype for start_kernel here, rather than cluttering
up something else. */
-extern asmlinkage void __init start_kernel(void);
-extern void __init arch_call_rest_init(void);
-extern void __ref rest_init(void);
+extern asmlinkage void __init __noreturn start_kernel(void);
#endif /* _LINUX_START_KERNEL_H */
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 56614af83d4a..52150570d37a 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -19,8 +19,6 @@
#include <linux/time.h>
#include <linux/uidgid.h>
-#define KSTAT_QUERY_FLAGS (AT_STATX_SYNC_TYPE)
-
struct kstat {
u32 result_mask; /* What fields the user got */
umode_t mode;
@@ -36,6 +34,10 @@ struct kstat {
STATX_ATTR_ENCRYPTED | \
STATX_ATTR_VERITY \
)/* Attrs corresponding to FS_*_FL flags */
+#define KSTAT_ATTR_VFS_FLAGS \
+ (STATX_ATTR_IMMUTABLE | \
+ STATX_ATTR_APPEND \
+ ) /* Attrs corresponding to S_* flags that are enforced by the VFS */
u64 ino;
dev_t dev;
dev_t rdev;
@@ -48,6 +50,17 @@ struct kstat {
struct timespec64 btime; /* File creation time */
u64 blocks;
u64 mnt_id;
+ u32 dio_mem_align;
+ u32 dio_offset_align;
+ u64 change_cookie;
};
+/* These definitions are internal to the kernel for now. Mainly used by nfsd. */
+
+/* mask values */
+#define STATX_CHANGE_COOKIE 0x40000000U /* Want/got stx_change_attr */
+
+/* file attribute values */
+#define STATX_ATTR_CHANGE_MONOTONIC 0x8000000000000000ULL /* version monotonically increases */
+
#endif
diff --git a/include/linux/statfs.h b/include/linux/statfs.h
index 9bc69edb8f18..02c862686ea3 100644
--- a/include/linux/statfs.h
+++ b/include/linux/statfs.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <asm/statfs.h>
+#include <asm/byteorder.h>
struct kstatfs {
long f_type;
@@ -40,8 +41,21 @@ struct kstatfs {
#define ST_NOATIME 0x0400 /* do not update access times */
#define ST_NODIRATIME 0x0800 /* do not update directory access times */
#define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */
+#define ST_NOSYMFOLLOW 0x2000 /* do not follow symlinks */
struct dentry;
extern int vfs_get_fsid(struct dentry *dentry, __kernel_fsid_t *fsid);
+static inline __kernel_fsid_t u64_to_fsid(u64 v)
+{
+ return (__kernel_fsid_t){.val = {(u32)v, (u32)(v>>32)}};
+}
+
+/* Fold 16 bytes uuid to 64 bit fsid */
+static inline __kernel_fsid_t uuid_to_fsid(__u8 *uuid)
+{
+ return u64_to_fsid(le64_to_cpup((void *)uuid) ^
+ le64_to_cpup((void *)(uuid + sizeof(u64))));
+}
+
#endif
diff --git a/include/linux/static_call.h b/include/linux/static_call.h
new file mode 100644
index 000000000000..141e6b176a1b
--- /dev/null
+++ b/include/linux/static_call.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STATIC_CALL_H
+#define _LINUX_STATIC_CALL_H
+
+/*
+ * Static call support
+ *
+ * Static calls use code patching to hard-code function pointers into direct
+ * branch instructions. They give the flexibility of function pointers, but
+ * with improved performance. This is especially important for cases where
+ * retpolines would otherwise be used, as retpolines can significantly impact
+ * performance.
+ *
+ *
+ * API overview:
+ *
+ * DECLARE_STATIC_CALL(name, func);
+ * DEFINE_STATIC_CALL(name, func);
+ * DEFINE_STATIC_CALL_NULL(name, typename);
+ * DEFINE_STATIC_CALL_RET0(name, typename);
+ *
+ * __static_call_return0;
+ *
+ * static_call(name)(args...);
+ * static_call_cond(name)(args...);
+ * static_call_update(name, func);
+ * static_call_query(name);
+ *
+ * EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}()
+ *
+ * Usage example:
+ *
+ * # Start with the following functions (with identical prototypes):
+ * int func_a(int arg1, int arg2);
+ * int func_b(int arg1, int arg2);
+ *
+ * # Define a 'my_name' reference, associated with func_a() by default
+ * DEFINE_STATIC_CALL(my_name, func_a);
+ *
+ * # Call func_a()
+ * static_call(my_name)(arg1, arg2);
+ *
+ * # Update 'my_name' to point to func_b()
+ * static_call_update(my_name, &func_b);
+ *
+ * # Call func_b()
+ * static_call(my_name)(arg1, arg2);
+ *
+ *
+ * Implementation details:
+ *
+ * This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL).
+ * Otherwise basic indirect calls are used (with function pointers).
+ *
+ * Each static_call() site calls into a trampoline associated with the name.
+ * The trampoline has a direct branch to the default function. Updates to a
+ * name will modify the trampoline's branch destination.
+ *
+ * If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites
+ * themselves will be patched at runtime to call the functions directly,
+ * rather than calling through the trampoline. This requires objtool or a
+ * compiler plugin to detect all the static_call() sites and annotate them
+ * in the .static_call_sites section.
+ *
+ *
+ * Notes on NULL function pointers:
+ *
+ * Static_call()s support NULL functions, with many of the caveats that
+ * regular function pointers have.
+ *
+ * Clearly calling a NULL function pointer is 'BAD', so too for
+ * static_call()s (although when HAVE_STATIC_CALL it might not be immediately
+ * fatal). A NULL static_call can be the result of:
+ *
+ * DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int));
+ *
+ * which is equivalent to declaring a NULL function pointer with just a
+ * typename:
+ *
+ * void (*my_func_ptr)(int arg1) = NULL;
+ *
+ * or using static_call_update() with a NULL function. In both cases the
+ * HAVE_STATIC_CALL implementation will patch the trampoline with a RET
+ * instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE
+ * architectures can patch the trampoline call to a NOP.
+ *
+ * In all cases, any argument evaluation is unconditional. Unlike a regular
+ * conditional function pointer call:
+ *
+ * if (my_func_ptr)
+ * my_func_ptr(arg1)
+ *
+ * where the argument evaludation also depends on the pointer value.
+ *
+ * When calling a static_call that can be NULL, use:
+ *
+ * static_call_cond(name)(arg1);
+ *
+ * which will include the required value tests to avoid NULL-pointer
+ * dereferences.
+ *
+ * To query which function is currently set to be called, use:
+ *
+ * func = static_call_query(name);
+ *
+ *
+ * DEFINE_STATIC_CALL_RET0 / __static_call_return0:
+ *
+ * Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the
+ * conditional void function call, DEFINE_STATIC_CALL_RET0 /
+ * __static_call_return0 optimize the do nothing return 0 function.
+ *
+ * This feature is strictly UB per the C standard (since it casts a function
+ * pointer to a different signature) and relies on the architecture ABI to
+ * make things work. In particular it relies on Caller Stack-cleanup and the
+ * whole return register being clobbered for short return values. All normal
+ * CDECL style ABIs conform.
+ *
+ * In particular the x86_64 implementation replaces the 5 byte CALL
+ * instruction at the callsite with a 5 byte clear of the RAX register,
+ * completely eliding any function call overhead.
+ *
+ * Notably argument setup is unconditional.
+ *
+ *
+ * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP():
+ *
+ * The difference is that the _TRAMP variant tries to only export the
+ * trampoline with the result that a module can use static_call{,_cond}() but
+ * not static_call_update().
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/cpu.h>
+#include <linux/static_call_types.h>
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+#include <asm/static_call.h>
+
+/*
+ * Either @site or @tramp can be NULL.
+ */
+extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail);
+
+#define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
+
+#else
+#define STATIC_CALL_TRAMP_ADDR(name) NULL
+#endif
+
+#define static_call_update(name, func) \
+({ \
+ typeof(&STATIC_CALL_TRAMP(name)) __F = (func); \
+ __static_call_update(&STATIC_CALL_KEY(name), \
+ STATIC_CALL_TRAMP_ADDR(name), __F); \
+})
+
+#define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func))
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+
+extern int __init static_call_init(void);
+
+extern void static_call_force_reinit(void);
+
+struct static_call_mod {
+ struct static_call_mod *next;
+ struct module *mod; /* for vmlinux, mod == NULL */
+ struct static_call_site *sites;
+};
+
+/* For finding the key associated with a trampoline */
+struct static_call_tramp_key {
+ s32 tramp;
+ s32 key;
+};
+
+extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
+extern int static_call_mod_init(struct module *mod);
+extern int static_call_text_reserved(void *start, void *end);
+
+extern long __static_call_return0(void);
+
+#define DEFINE_STATIC_CALL(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = _func, \
+ .type = 1, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
+
+#define DEFINE_STATIC_CALL_NULL(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = NULL, \
+ .type = 1, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = __static_call_return0, \
+ .type = 1, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
+
+#define static_call_cond(name) (void)__static_call(name)
+
+#define EXPORT_STATIC_CALL(name) \
+ EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+
+/* Leave the key unexported, so modules can't change static call targets: */
+#define EXPORT_STATIC_CALL_TRAMP(name) \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \
+ ARCH_ADD_TRAMP_KEY(name)
+#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \
+ ARCH_ADD_TRAMP_KEY(name)
+
+#elif defined(CONFIG_HAVE_STATIC_CALL)
+
+static inline int static_call_init(void) { return 0; }
+
+#define DEFINE_STATIC_CALL(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = _func, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
+
+#define DEFINE_STATIC_CALL_NULL(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = NULL, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = __static_call_return0, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
+
+#define static_call_cond(name) (void)__static_call(name)
+
+static inline
+void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+{
+ cpus_read_lock();
+ WRITE_ONCE(key->func, func);
+ arch_static_call_transform(NULL, tramp, func, false);
+ cpus_read_unlock();
+}
+
+static inline int static_call_text_reserved(void *start, void *end)
+{
+ return 0;
+}
+
+extern long __static_call_return0(void);
+
+#define EXPORT_STATIC_CALL(name) \
+ EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+
+/* Leave the key unexported, so modules can't change static call targets: */
+#define EXPORT_STATIC_CALL_TRAMP(name) \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+
+#else /* Generic implementation */
+
+static inline int static_call_init(void) { return 0; }
+
+static inline long __static_call_return0(void)
+{
+ return 0;
+}
+
+#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = _func_init, \
+ }
+
+#define DEFINE_STATIC_CALL(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, _func)
+
+#define DEFINE_STATIC_CALL_NULL(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, NULL)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
+
+static inline void __static_call_nop(void) { }
+
+/*
+ * This horrific hack takes care of two things:
+ *
+ * - it ensures the compiler will only load the function pointer ONCE,
+ * which avoids a reload race.
+ *
+ * - it ensures the argument evaluation is unconditional, similar
+ * to the HAVE_STATIC_CALL variant.
+ *
+ * Sadly current GCC/Clang (10 for both) do not optimize this properly
+ * and will emit an indirect call for the NULL case :-(
+ */
+#define __static_call_cond(name) \
+({ \
+ void *func = READ_ONCE(STATIC_CALL_KEY(name).func); \
+ if (!func) \
+ func = &__static_call_nop; \
+ (typeof(STATIC_CALL_TRAMP(name))*)func; \
+})
+
+#define static_call_cond(name) (void)__static_call_cond(name)
+
+static inline
+void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+{
+ WRITE_ONCE(key->func, func);
+}
+
+static inline int static_call_text_reserved(void *start, void *end)
+{
+ return 0;
+}
+
+#define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name))
+#define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
+
+#endif /* CONFIG_HAVE_STATIC_CALL */
+
+#endif /* _LINUX_STATIC_CALL_H */
diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
new file mode 100644
index 000000000000..5a00b8b2cf9f
--- /dev/null
+++ b/include/linux/static_call_types.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _STATIC_CALL_TYPES_H
+#define _STATIC_CALL_TYPES_H
+
+#include <linux/types.h>
+#include <linux/stringify.h>
+#include <linux/compiler.h>
+
+#define STATIC_CALL_KEY_PREFIX __SCK__
+#define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
+#define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
+#define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
+#define STATIC_CALL_KEY_STR(name) __stringify(STATIC_CALL_KEY(name))
+
+#define STATIC_CALL_TRAMP_PREFIX __SCT__
+#define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
+#define STATIC_CALL_TRAMP_PREFIX_LEN (sizeof(STATIC_CALL_TRAMP_PREFIX_STR) - 1)
+#define STATIC_CALL_TRAMP(name) __PASTE(STATIC_CALL_TRAMP_PREFIX, name)
+#define STATIC_CALL_TRAMP_STR(name) __stringify(STATIC_CALL_TRAMP(name))
+
+/*
+ * Flags in the low bits of static_call_site::key.
+ */
+#define STATIC_CALL_SITE_TAIL 1UL /* tail call */
+#define STATIC_CALL_SITE_INIT 2UL /* init section */
+#define STATIC_CALL_SITE_FLAGS 3UL
+
+/*
+ * The static call site table needs to be created by external tooling (objtool
+ * or a compiler plugin).
+ */
+struct static_call_site {
+ s32 addr;
+ s32 key;
+};
+
+#define DECLARE_STATIC_CALL(name, func) \
+ extern struct static_call_key STATIC_CALL_KEY(name); \
+ extern typeof(func) STATIC_CALL_TRAMP(name);
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __raw_static_call(name) (&STATIC_CALL_TRAMP(name))
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+
+/*
+ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
+ * the symbol table so that objtool can reference it when it generates the
+ * .static_call_sites section.
+ */
+#define __STATIC_CALL_ADDRESSABLE(name) \
+ __ADDRESSABLE(STATIC_CALL_KEY(name))
+
+#define __static_call(name) \
+({ \
+ __STATIC_CALL_ADDRESSABLE(name); \
+ __raw_static_call(name); \
+})
+
+struct static_call_key {
+ void *func;
+ union {
+ /* bit 0: 0 = mods, 1 = sites */
+ unsigned long type;
+ struct static_call_mod *mods;
+ struct static_call_site *sites;
+ };
+};
+
+#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#define __STATIC_CALL_ADDRESSABLE(name)
+#define __static_call(name) __raw_static_call(name)
+
+struct static_call_key {
+ void *func;
+};
+
+#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#ifdef MODULE
+#define __STATIC_CALL_MOD_ADDRESSABLE(name)
+#define static_call_mod(name) __raw_static_call(name)
+#else
+#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
+#define static_call_mod(name) __static_call(name)
+#endif
+
+#define static_call(name) __static_call(name)
+
+#else
+
+struct static_call_key {
+ void *func;
+};
+
+#define static_call(name) \
+ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
+
+#endif /* CONFIG_HAVE_STATIC_CALL */
+
+#endif /* _STATIC_CALL_TYPES_H */
diff --git a/include/linux/stdarg.h b/include/linux/stdarg.h
new file mode 100644
index 000000000000..c8dc7f4f390c
--- /dev/null
+++ b/include/linux/stdarg.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#ifndef _LINUX_STDARG_H
+#define _LINUX_STDARG_H
+
+typedef __builtin_va_list va_list;
+#define va_start(v, l) __builtin_va_start(v, l)
+#define va_end(v) __builtin_va_end(v)
+#define va_arg(v, T) __builtin_va_arg(v, T)
+#define va_copy(d, s) __builtin_va_copy(d, s)
+
+#endif
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 998a4ba28eba..929d67710cc5 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -13,14 +13,10 @@ enum {
};
#undef offsetof
-#ifdef __compiler_offsetof
-#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
-#else
-#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
-#endif
+#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
/**
- * sizeof_field(TYPE, MEMBER)
+ * sizeof_field() - Report the size of a struct field in bytes
*
* @TYPE: The structure containing the field of interest
* @MEMBER: The field to return the size of
@@ -28,7 +24,7 @@ enum {
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
/**
- * offsetofend(TYPE, MEMBER)
+ * offsetofend() - Report the offset of a struct field within the struct
*
* @TYPE: The type of the structure
* @MEMBER: The member within the structure to get the end offset of
@@ -36,4 +32,65 @@ enum {
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+/**
+ * struct_group() - Wrap a set of declarations in a mirrored struct
+ *
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members.
+ */
+#define struct_group(NAME, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS)
+
+/**
+ * struct_group_attr() - Create a struct_group() with trailing attributes
+ *
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @ATTRS: Any struct attributes to apply
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members. Includes structure attributes argument.
+ */
+#define struct_group_attr(NAME, ATTRS, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, ATTRS, MEMBERS)
+
+/**
+ * struct_group_tagged() - Create a struct_group with a reusable tag
+ *
+ * @TAG: The tag name for the named sub-struct
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members. Includes struct tag argument for the named copy,
+ * so the specified layout can be reused later.
+ */
+#define struct_group_tagged(TAG, NAME, MEMBERS...) \
+ __struct_group(TAG, NAME, /* no attrs */, MEMBERS)
+
+/**
+ * DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+ *
+ * @TYPE: The type of each flexible array element
+ * @NAME: The name of the flexible array member
+ *
+ * In order to have a flexible array member in a union or alone in a
+ * struct, it needs to be wrapped in an anonymous struct with at least 1
+ * named member, but that member can be empty.
+ */
+#define DECLARE_FLEX_ARRAY(TYPE, NAME) \
+ __DECLARE_FLEX_ARRAY(TYPE, NAME)
+
#endif
diff --git a/include/linux/stm.h b/include/linux/stm.h
index c6f577ab6f21..3b22689512be 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -57,7 +57,7 @@ struct stm_device;
*
* Normally, an STM device will have a range of masters available to software
* and the rest being statically assigned to various hardware trace sources.
- * The former is defined by the the range [@sw_start..@sw_end] of the device
+ * The former is defined by the range [@sw_start..@sw_end] of the device
* description. That is, the lowest master that can be allocated to software
* writers is @sw_start and data from this writer will appear is @sw_start
* master in the STP stream.
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index bd964c31d333..dfa1828cd756 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -76,11 +76,14 @@
| DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \
| DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256)
+struct stmmac_priv;
+
/* Platfrom data for platform device structure's platform_data field */
struct stmmac_mdio_bus_data {
unsigned int phy_mask;
unsigned int has_xpcs;
+ unsigned int xpcs_an_inband;
int *irqs;
int probed_phy_irq;
bool needs_reset;
@@ -95,6 +98,8 @@ struct stmmac_dma_cfg {
int mixed_burst;
bool aal;
bool eame;
+ bool multi_msi_en;
+ bool dche;
};
#define AXI_BLEN 7
@@ -112,7 +117,9 @@ struct stmmac_axi {
#define EST_GCL 1024
struct stmmac_est {
+ struct mutex lock;
int enable;
+ u32 btr_reserve[2];
u32 btr_offset[2];
u32 btr[2];
u32 ctr[2];
@@ -120,6 +127,7 @@ struct stmmac_est {
u32 gcl_unaligned[EST_GCL];
u32 gcl[EST_GCL];
u32 gcl_size;
+ u32 max_sdu[MTL_MAX_TX_QUEUES];
};
struct stmmac_rxq_cfg {
@@ -132,6 +140,7 @@ struct stmmac_rxq_cfg {
struct stmmac_txq_cfg {
u32 weight;
+ bool coe_unsupported;
u8 mode_to_use;
/* Credit Base Shaper parameters */
u32 send_slope;
@@ -143,17 +152,103 @@ struct stmmac_txq_cfg {
int tbs_en;
};
+/* FPE link state */
+enum stmmac_fpe_state {
+ FPE_STATE_OFF = 0,
+ FPE_STATE_CAPABLE = 1,
+ FPE_STATE_ENTERING_ON = 2,
+ FPE_STATE_ON = 3,
+};
+
+/* FPE link-partner hand-shaking mPacket type */
+enum stmmac_mpacket_type {
+ MPACKET_VERIFY = 0,
+ MPACKET_RESPONSE = 1,
+};
+
+enum stmmac_fpe_task_state_t {
+ __FPE_REMOVING,
+ __FPE_TASK_SCHED,
+};
+
+struct stmmac_fpe_cfg {
+ bool enable; /* FPE enable */
+ bool hs_enable; /* FPE handshake enable */
+ enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
+ enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
+};
+
+struct stmmac_safety_feature_cfg {
+ u32 tsoee;
+ u32 mrxpee;
+ u32 mestee;
+ u32 mrxee;
+ u32 mtxee;
+ u32 epsi;
+ u32 edpp;
+ u32 prtyen;
+ u32 tmouten;
+};
+
+/* Addresses that may be customized by a platform */
+struct dwmac4_addrs {
+ u32 dma_chan;
+ u32 dma_chan_offset;
+ u32 mtl_chan;
+ u32 mtl_chan_offset;
+ u32 mtl_ets_ctrl;
+ u32 mtl_ets_ctrl_offset;
+ u32 mtl_txq_weight;
+ u32 mtl_txq_weight_offset;
+ u32 mtl_send_slp_cred;
+ u32 mtl_send_slp_cred_offset;
+ u32 mtl_high_cred;
+ u32 mtl_high_cred_offset;
+ u32 mtl_low_cred;
+ u32 mtl_low_cred_offset;
+};
+
+#define STMMAC_FLAG_HAS_INTEGRATED_PCS BIT(0)
+#define STMMAC_FLAG_SPH_DISABLE BIT(1)
+#define STMMAC_FLAG_USE_PHY_WOL BIT(2)
+#define STMMAC_FLAG_HAS_SUN8I BIT(3)
+#define STMMAC_FLAG_TSO_EN BIT(4)
+#define STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP BIT(5)
+#define STMMAC_FLAG_VLAN_FAIL_Q_EN BIT(6)
+#define STMMAC_FLAG_MULTI_MSI_EN BIT(7)
+#define STMMAC_FLAG_EXT_SNAPSHOT_EN BIT(8)
+#define STMMAC_FLAG_INT_SNAPSHOT_EN BIT(9)
+#define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10)
+#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11)
+#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(12)
+
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
- int interface;
+ /* MAC ----- optional PCS ----- SerDes ----- optional PHY ----- Media
+ * ^ ^
+ * mac_interface phy_interface
+ *
+ * mac_interface is the MAC-side interface, which may be the same
+ * as phy_interface if there is no intervening PCS. If there is a
+ * PCS, then mac_interface describes the interface mode between the
+ * MAC and PCS, and phy_interface describes the interface mode
+ * between the PCS and PHY.
+ */
+ phy_interface_t mac_interface;
+ /* phy_interface is the PHY-side interface - the interface used by
+ * an attached PHY.
+ */
phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
- struct device_node *phylink_node;
+ struct fwnode_handle *port_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
struct stmmac_est *est;
+ struct stmmac_fpe_cfg *fpe_cfg;
+ struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
int has_gmac;
int enh_desc;
@@ -170,33 +265,54 @@ struct plat_stmmacenet_data {
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
+ u32 host_dma_width;
u32 rx_queues_to_use;
u32 tx_queues_to_use;
u8 rx_sched_algorithm;
u8 tx_sched_algorithm;
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
- void (*fix_mac_speed)(void *priv, unsigned int speed);
+ void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
+ int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
+ void (*speed_mode_2500)(struct net_device *ndev, void *priv);
+ void (*ptp_clk_freq_config)(struct stmmac_priv *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
struct mac_device_info *(*setup)(void *priv);
+ int (*clks_config)(void *priv, bool enabled);
+ int (*crosststamp)(ktime_t *device, struct system_counterval_t *system,
+ void *ctx);
+ void (*dump_debug_regs)(void *priv);
void *bsp_priv;
struct clk *stmmac_clk;
struct clk *pclk;
struct clk *clk_ptp_ref;
unsigned int clk_ptp_rate;
unsigned int clk_ref_rate;
+ unsigned int mult_fact_100ns;
s32 ptp_max_adj;
+ u32 cdc_error_adj;
struct reset_control *stmmac_rst;
+ struct reset_control *stmmac_ahb_rst;
struct stmmac_axi *axi;
int has_gmac4;
- bool has_sun8i;
- bool tso_en;
int rss_en;
int mac_port_sel_speed;
- bool en_tx_lpi_clockgating;
int has_xgmac;
+ u8 vlan_fail_q;
+ unsigned int eee_usecs_rate;
+ struct pci_dev *pdev;
+ int int_snapshot_num;
+ int msi_mac_vec;
+ int msi_wol_vec;
+ int msi_lpi_vec;
+ int msi_sfty_ce_vec;
+ int msi_sfty_ue_vec;
+ int msi_rx_base_vec;
+ int msi_tx_base_vec;
+ const struct dwmac4_addrs *dwmac4_addrs;
+ unsigned int flags;
};
#endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 76d8b09384a7..ea7a74ea7389 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -24,6 +24,7 @@ typedef int (*cpu_stop_fn_t)(void *arg);
struct cpu_stop_work {
struct list_head list; /* cpu_stopper->works */
cpu_stop_fn_t fn;
+ unsigned long caller;
void *arg;
struct cpu_stop_done *done;
};
@@ -36,6 +37,8 @@ void stop_machine_park(int cpu);
void stop_machine_unpark(int cpu);
void stop_machine_yield(const struct cpumask *cpumask);
+extern void print_stop_info(const char *log_lvl, struct task_struct *task);
+
#else /* CONFIG_SMP */
#include <linux/workqueue.h>
@@ -80,6 +83,8 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu,
return false;
}
+static inline void print_stop_info(const char *log_lvl, struct task_struct *task) { }
+
#endif /* CONFIG_SMP */
/*
@@ -119,11 +124,27 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
*/
int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
+/**
+ * stop_core_cpuslocked: - stop all threads on just one core
+ * @cpu: any cpu in the targeted core
+ * @fn: the function to run
+ * @data: the data ptr for @fn()
+ *
+ * Same as above, but instead of every CPU, only the logical CPUs of a
+ * single core are affected.
+ *
+ * Context: Must be called from within a cpus_read_lock() protected region.
+ *
+ * Return: 0 if all executions of @fn returned 0, any non zero return
+ * value if any returned non zero.
+ */
+int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data);
+
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
-static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
+static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
unsigned long flags;
@@ -134,14 +155,15 @@ static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
return ret;
}
-static inline int stop_machine(cpu_stop_fn_t fn, void *data,
- const struct cpumask *cpus)
+static __always_inline int
+stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
{
return stop_machine_cpuslocked(fn, data, cpus);
}
-static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
- const struct cpumask *cpus)
+static __always_inline int
+stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+ const struct cpumask *cpus)
{
return stop_machine(fn, data, cpus);
}
diff --git a/include/linux/string.h b/include/linux/string.h
index 9b7a0632e87a..9ba8b4597009 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -2,11 +2,15 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
-
+#include <linux/args.h>
+#include <linux/array_size.h>
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
-#include <stdarg.h>
+#include <linux/err.h> /* for ERR_PTR() */
+#include <linux/errno.h> /* for E2BIG */
+#include <linux/overflow.h> /* for check_mul_overflow() */
+#include <linux/stdarg.h>
#include <uapi/linux/string.h>
extern char *strndup_user(const char __user *, long);
@@ -14,6 +18,44 @@ extern void *memdup_user(const void __user *, size_t);
extern void *vmemdup_user(const void __user *, size_t);
extern void *memdup_user_nul(const void __user *, size_t);
+/**
+ * memdup_array_user - duplicate array from user space
+ * @src: source address in user space
+ * @n: number of array members to copy
+ * @size: size of one array member
+ *
+ * Return: an ERR_PTR() on failure. Result is physically
+ * contiguous, to be freed by kfree().
+ */
+static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
+{
+ size_t nbytes;
+
+ if (check_mul_overflow(n, size, &nbytes))
+ return ERR_PTR(-EOVERFLOW);
+
+ return memdup_user(src, nbytes);
+}
+
+/**
+ * vmemdup_array_user - duplicate array from user space
+ * @src: source address in user space
+ * @n: number of array members to copy
+ * @size: size of one array member
+ *
+ * Return: an ERR_PTR() on failure. Result may be not
+ * physically contiguous. Use kvfree() to free.
+ */
+static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
+{
+ size_t nbytes;
+
+ if (check_mul_overflow(n, size, &nbytes))
+ return ERR_PTR(-EOVERFLOW);
+
+ return vmemdup_user(src, nbytes);
+}
+
/*
* Include machine specific inline routines
*/
@@ -25,15 +67,79 @@ extern char * strcpy(char *,const char *);
#ifndef __HAVE_ARCH_STRNCPY
extern char * strncpy(char *,const char *, __kernel_size_t);
#endif
-#ifndef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *, const char *, size_t);
-#endif
-#ifndef __HAVE_ARCH_STRSCPY
-ssize_t strscpy(char *, const char *, size_t);
-#endif
+ssize_t sized_strscpy(char *, const char *, size_t);
+
+/*
+ * The 2 argument style can only be used when dst is an array with a
+ * known size.
+ */
+#define __strscpy0(dst, src, ...) \
+ sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst))
+#define __strscpy1(dst, src, size) sized_strscpy(dst, src, size)
-/* Wraps calls to strscpy()/memset(), no arch specific code required */
-ssize_t strscpy_pad(char *dest, const char *src, size_t count);
+#define __strscpy_pad0(dst, src, ...) \
+ sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst))
+#define __strscpy_pad1(dst, src, size) sized_strscpy_pad(dst, src, size)
+
+/**
+ * strscpy - Copy a C-string into a sized buffer
+ * @dst: Where to copy the string to
+ * @src: Where to copy the string from
+ * @...: Size of destination buffer (optional)
+ *
+ * Copy the source string @src, or as much of it as fits, into the
+ * destination @dst buffer. The behavior is undefined if the string
+ * buffers overlap. The destination @dst buffer is always NUL terminated,
+ * unless it's zero-sized.
+ *
+ * The size argument @... is only required when @dst is not an array, or
+ * when the copy needs to be smaller than sizeof(@dst).
+ *
+ * Preferred to strncpy() since it always returns a valid string, and
+ * doesn't unnecessarily force the tail of the destination buffer to be
+ * zero padded. If padding is desired please use strscpy_pad().
+ *
+ * Returns the number of characters copied in @dst (not including the
+ * trailing %NUL) or -E2BIG if @size is 0 or the copy from @src was
+ * truncated.
+ */
+#define strscpy(dst, src, ...) \
+ CONCATENATE(__strscpy, COUNT_ARGS(__VA_ARGS__))(dst, src, __VA_ARGS__)
+
+#define sized_strscpy_pad(dest, src, count) ({ \
+ char *__dst = (dest); \
+ const char *__src = (src); \
+ const size_t __count = (count); \
+ ssize_t __wrote; \
+ \
+ __wrote = sized_strscpy(__dst, __src, __count); \
+ if (__wrote >= 0 && __wrote < __count) \
+ memset(__dst + __wrote + 1, 0, __count - __wrote - 1); \
+ __wrote; \
+})
+
+/**
+ * strscpy_pad() - Copy a C-string into a sized buffer
+ * @dst: Where to copy the string to
+ * @src: Where to copy the string from
+ * @...: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer. The
+ * behavior is undefined if the string buffers overlap. The destination
+ * buffer is always %NUL terminated, unless it's zero-sized.
+ *
+ * If the source string is shorter than the destination buffer, the
+ * remaining bytes in the buffer will be filled with %NUL bytes.
+ *
+ * For full explanation of why you may want to consider using the
+ * 'strscpy' functions please see the function docstring for strscpy().
+ *
+ * Returns:
+ * * The number of characters copied (not including the trailing %NULs)
+ * * -E2BIG if count is 0 or @src was truncated.
+ */
+#define strscpy_pad(dst, src, ...) \
+ CONCATENATE(__strscpy_pad, COUNT_ARGS(__VA_ARGS__))(dst, src, __VA_ARGS__)
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
@@ -161,41 +267,38 @@ extern int bcmp(const void *,const void *,__kernel_size_t);
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
-#ifndef __HAVE_ARCH_MEMCPY_MCSAFE
-static inline __must_check unsigned long memcpy_mcsafe(void *dst,
- const void *src, size_t cnt)
-{
- memcpy(dst, src, cnt);
- return 0;
-}
-#endif
#ifndef __HAVE_ARCH_MEMCPY_FLUSHCACHE
static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
}
#endif
+
void *memchr_inv(const void *s, int c, size_t n);
-char *strreplace(char *s, char old, char new);
+char *strreplace(char *str, char old, char new);
extern void kfree_const(const void *x);
extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
-extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
+extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
+extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
+extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp);
+/* lib/argv_split.c */
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
-extern bool sysfs_streq(const char *s1, const char *s2);
-extern int kstrtobool(const char *s, bool *res);
-static inline int strtobool(const char *s, bool *res)
-{
- return kstrtobool(s, res);
-}
+/* lib/cmdline.c */
+extern int get_option(char **str, int *pint);
+extern char *get_options(const char *str, int nints, int *ints);
+extern unsigned long long memparse(const char *ptr, char **retptr);
+extern bool parse_option_str(const char *str, const char *option);
+extern char *next_arg(char *args, char **param, char **val);
+extern bool sysfs_streq(const char *s1, const char *s2);
int match_string(const char * const *array, size_t n, const char *string);
int __sysfs_match_string(const char * const *array, size_t n, const char *s);
@@ -262,264 +365,97 @@ static inline const char *kbasename(const char *path)
return tail ? tail + 1 : path;
}
-#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
-#define __RENAME(x) __asm__(#x)
-
-void fortify_panic(const char *name) __noreturn __cold;
-void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
-void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
-void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
-void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
-
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+#include <linux/fortify-string.h>
+#endif
+#ifndef unsafe_memcpy
+#define unsafe_memcpy(dst, src, bytes, justification) \
+ memcpy(dst, src, bytes)
+#endif
-#ifdef CONFIG_KASAN
-extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
-extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
-extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
-extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
-extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
-extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
-extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
-extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
-extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
-extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
-#else
-#define __underlying_memchr __builtin_memchr
-#define __underlying_memcmp __builtin_memcmp
-#define __underlying_memcpy __builtin_memcpy
-#define __underlying_memmove __builtin_memmove
-#define __underlying_memset __builtin_memset
-#define __underlying_strcat __builtin_strcat
-#define __underlying_strcpy __builtin_strcpy
-#define __underlying_strlen __builtin_strlen
-#define __underlying_strncat __builtin_strncat
-#define __underlying_strncpy __builtin_strncpy
-#endif
-
-__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __write_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __underlying_strncpy(p, q, size);
-}
-
-__FORTIFY_INLINE char *strcat(char *p, const char *q)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (p_size == (size_t)-1)
- return __underlying_strcat(p, q);
- if (strlcat(p, q, p_size) >= p_size)
- fortify_panic(__func__);
- return p;
-}
-
-__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
-{
- __kernel_size_t ret;
- size_t p_size = __builtin_object_size(p, 0);
-
- /* Work around gcc excess stack consumption issue */
- if (p_size == (size_t)-1 ||
- (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
- return __underlying_strlen(p);
- ret = strnlen(p, p_size);
- if (p_size <= ret)
- fortify_panic(__func__);
- return ret;
-}
-
-extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
-__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
-{
- size_t p_size = __builtin_object_size(p, 0);
- __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
- if (p_size <= ret && maxlen != ret)
- fortify_panic(__func__);
- return ret;
-}
-
-/* defined after fortified strlen to reuse it */
-extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
-__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
-{
- size_t ret;
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __real_strlcpy(p, q, size);
- ret = strlen(q);
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
- if (__builtin_constant_p(len) && len >= p_size)
- __write_overflow();
- if (len >= p_size)
- fortify_panic(__func__);
- __underlying_memcpy(p, q, len);
- p[len] = '\0';
- }
- return ret;
-}
-
-/* defined after fortified strlen and strnlen to reuse them */
-__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
-{
- size_t p_len, copy_len;
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __underlying_strncat(p, q, count);
- p_len = strlen(p);
- copy_len = strnlen(q, count);
- if (p_size < p_len + copy_len + 1)
- fortify_panic(__func__);
- __underlying_memcpy(p + p_len, q, copy_len);
- p[p_len + copy_len] = '\0';
- return p;
-}
-
-__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __write_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __underlying_memset(p, c, size);
-}
-
-__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (__builtin_constant_p(size)) {
- if (p_size < size)
- __write_overflow();
- if (q_size < size)
- __read_overflow2();
- }
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __underlying_memcpy(p, q, size);
-}
-
-__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (__builtin_constant_p(size)) {
- if (p_size < size)
- __write_overflow();
- if (q_size < size)
- __read_overflow2();
- }
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __underlying_memmove(p, q, size);
-}
-
-extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
-__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __real_memscan(p, c, size);
-}
-
-__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (__builtin_constant_p(size)) {
- if (p_size < size)
- __read_overflow();
- if (q_size < size)
- __read_overflow2();
- }
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __underlying_memcmp(p, q, size);
-}
-
-__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __underlying_memchr(p, c, size);
-}
-
-void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
-__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __real_memchr_inv(p, c, size);
-}
+void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ int pad);
-extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
-__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __real_kmemdup(p, size, gfp);
-}
+/**
+ * strtomem_pad - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ * @pad: Padding character to fill any remaining bytes of @dest after copy
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * an explicit padding character. If padding is not required, use strtomem().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
+ */
+#define strtomem_pad(dest, src, pad) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy_and_pad(dest, _dest_len, src, \
+ strnlen(src, min(_src_len, _dest_len)), pad); \
+} while (0)
-/* defined after fortified strlen and memcpy to reuse them */
-__FORTIFY_INLINE char *strcpy(char *p, const char *q)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __underlying_strcpy(p, q);
- memcpy(p, q, strlen(q) + 1);
- return p;
-}
+/**
+ * strtomem - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * without trailing padding. If padding is required, use strtomem_pad().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
+ */
+#define strtomem(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \
+} while (0)
-/* Don't use these outside the FORITFY_SOURCE implementation */
-#undef __underlying_memchr
-#undef __underlying_memcmp
-#undef __underlying_memcpy
-#undef __underlying_memmove
-#undef __underlying_memset
-#undef __underlying_strcat
-#undef __underlying_strcpy
-#undef __underlying_strlen
-#undef __underlying_strncat
-#undef __underlying_strncpy
-#endif
+/**
+ * memset_after - Set a value after a struct member to the end of a struct
+ *
+ * @obj: Address of target struct instance
+ * @v: Byte value to repeatedly write
+ * @member: after which struct member to start writing bytes
+ *
+ * This is good for clearing padding following the given member.
+ */
+#define memset_after(obj, v, member) \
+({ \
+ u8 *__ptr = (u8 *)(obj); \
+ typeof(v) __val = (v); \
+ memset(__ptr + offsetofend(typeof(*(obj)), member), __val, \
+ sizeof(*(obj)) - offsetofend(typeof(*(obj)), member)); \
+})
/**
- * memcpy_and_pad - Copy one buffer to another with padding
- * @dest: Where to copy to
- * @dest_len: The destination buffer size
- * @src: Where to copy from
- * @count: The number of bytes to copy
- * @pad: Character to use for padding if space is left in destination.
+ * memset_startat - Set a value starting at a member to the end of a struct
+ *
+ * @obj: Address of target struct instance
+ * @v: Byte value to repeatedly write
+ * @member: struct member to start writing at
+ *
+ * Note that if there is padding between the prior member and the target
+ * member, memset_after() should be used to clear the prior padding.
*/
-static inline void memcpy_and_pad(void *dest, size_t dest_len,
- const void *src, size_t count, int pad)
-{
- if (dest_len > count) {
- memcpy(dest, src, count);
- memset(dest + count, pad, dest_len - count);
- } else
- memcpy(dest, src, dest_len);
-}
+#define memset_startat(obj, v, member) \
+({ \
+ u8 *__ptr = (u8 *)(obj); \
+ typeof(v) __val = (v); \
+ memset(__ptr + offsetof(typeof(*(obj)), member), __val, \
+ sizeof(*(obj)) - offsetof(typeof(*(obj)), member)); \
+})
/**
* str_has_prefix - Test if a string has a given prefix
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
new file mode 100644
index 000000000000..d9ebe20229f8
--- /dev/null
+++ b/include/linux/string_choices.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STRING_CHOICES_H_
+#define _LINUX_STRING_CHOICES_H_
+
+#include <linux/types.h>
+
+static inline const char *str_enable_disable(bool v)
+{
+ return v ? "enable" : "disable";
+}
+
+static inline const char *str_enabled_disabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
+static inline const char *str_hi_lo(bool v)
+{
+ return v ? "hi" : "lo";
+}
+#define str_lo_hi(v) str_hi_lo(!(v))
+
+static inline const char *str_high_low(bool v)
+{
+ return v ? "high" : "low";
+}
+#define str_low_high(v) str_high_low(!(v))
+
+static inline const char *str_read_write(bool v)
+{
+ return v ? "read" : "write";
+}
+#define str_write_read(v) str_read_write(!(v))
+
+static inline const char *str_on_off(bool v)
+{
+ return v ? "on" : "off";
+}
+
+static inline const char *str_yes_no(bool v)
+{
+ return v ? "yes" : "no";
+}
+
+/**
+ * str_plural - Return the simple pluralization based on English counts
+ * @num: Number used for deciding pluralization
+ *
+ * If @num is 1, returns empty string, otherwise returns "s".
+ */
+static inline const char *str_plural(size_t num)
+{
+ return num == 1 ? "" : "s";
+}
+
+#endif
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 86f150c2a6b6..e93fbb5b0c01 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -2,29 +2,46 @@
#ifndef _LINUX_STRING_HELPERS_H_
#define _LINUX_STRING_HELPERS_H_
+#include <linux/bits.h>
#include <linux/ctype.h>
+#include <linux/string_choices.h>
+#include <linux/string.h>
#include <linux/types.h>
+struct device;
struct file;
struct task_struct;
-/* Descriptions of the types of units to
- * print in */
+static inline bool string_is_terminated(const char *s, int len)
+{
+ return memchr(s, '\0', len) ? true : false;
+}
+
+/* Descriptions of the types of units to print in */
enum string_size_units {
STRING_UNITS_10, /* use powers of 10^3 (standard SI) */
STRING_UNITS_2, /* use binary powers of 2^10 */
+ STRING_UNITS_MASK = BIT(0),
+
+ /* Modifiers */
+ STRING_UNITS_NO_SPACE = BIT(30),
+ STRING_UNITS_NO_BYTES = BIT(31),
};
-void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
- char *buf, int len);
+int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ char *buf, int len);
+
+int parse_int_array_user(const char __user *from, size_t count, int **array);
-#define UNESCAPE_SPACE 0x01
-#define UNESCAPE_OCTAL 0x02
-#define UNESCAPE_HEX 0x04
-#define UNESCAPE_SPECIAL 0x08
+#define UNESCAPE_SPACE BIT(0)
+#define UNESCAPE_OCTAL BIT(1)
+#define UNESCAPE_HEX BIT(2)
+#define UNESCAPE_SPECIAL BIT(3)
#define UNESCAPE_ANY \
(UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL)
+#define UNESCAPE_ALL_MASK GENMASK(3, 0)
+
int string_unescape(char *src, char *dst, size_t size, unsigned int flags);
static inline int string_unescape_inplace(char *buf, unsigned int flags)
@@ -42,22 +59,24 @@ static inline int string_unescape_any_inplace(char *buf)
return string_unescape_any(buf, buf, 0);
}
-#define ESCAPE_SPACE 0x01
-#define ESCAPE_SPECIAL 0x02
-#define ESCAPE_NULL 0x04
-#define ESCAPE_OCTAL 0x08
+#define ESCAPE_SPACE BIT(0)
+#define ESCAPE_SPECIAL BIT(1)
+#define ESCAPE_NULL BIT(2)
+#define ESCAPE_OCTAL BIT(3)
#define ESCAPE_ANY \
(ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL)
-#define ESCAPE_NP 0x10
+#define ESCAPE_NP BIT(4)
#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
-#define ESCAPE_HEX 0x20
+#define ESCAPE_HEX BIT(5)
+#define ESCAPE_NA BIT(6)
+#define ESCAPE_NAP BIT(7)
+#define ESCAPE_APPEND BIT(8)
+
+#define ESCAPE_ALL_MASK GENMASK(8, 0)
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *only);
-int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
- size_t osz);
-
static inline int string_escape_mem_any_np(const char *src, size_t isz,
char *dst, size_t osz, const char *only)
{
@@ -94,4 +113,11 @@ char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+char *kstrdup_and_replace(const char *src, char old, char new, gfp_t gfp);
+
+char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n);
+void kfree_strarray(char **array, size_t n);
+
+char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n);
+
#endif
diff --git a/include/linux/stringify.h b/include/linux/stringify.h
index 841cec8ed525..0e84cbe65270 100644
--- a/include/linux/stringify.h
+++ b/include/linux/stringify.h
@@ -9,4 +9,6 @@
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
+#define FILE_LINE __FILE__ ":" __stringify(__LINE__)
+
#endif /* !__LINUX_STRINGIFY_H */
diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h
index 3a11fa41a131..c505f30e8b68 100644
--- a/include/linux/sungem_phy.h
+++ b/include/linux/sungem_phy.h
@@ -2,6 +2,8 @@
#ifndef __SUNGEM_PHY_H__
#define __SUNGEM_PHY_H__
+#include <linux/types.h>
+
struct mii_phy;
/* Operations supported by any kind of PHY */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 98da816b5fc2..61e58327b1aa 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -99,6 +99,7 @@ struct rpc_auth_create_args {
/* Flags for rpcauth_lookupcred() */
#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */
+#define RPCAUTH_LOOKUP_ASYNC 0x02 /* Don't block waiting for memory */
/*
* Client authentication ops
@@ -119,6 +120,7 @@ struct rpc_authops {
struct rpcsec_gss_info *);
int (*key_timeout)(struct rpc_auth *,
struct rpc_cred *);
+ int (*ping)(struct rpc_clnt *clnt);
};
struct rpc_credops {
@@ -143,6 +145,7 @@ struct rpc_credops {
extern const struct rpc_authops authunix_ops;
extern const struct rpc_authops authnull_ops;
+extern const struct rpc_authops authtls_ops;
int __init rpc_init_authunix(void);
int __init rpcauth_init_module(void);
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index d796058cdff2..f22bf915dcf6 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
(c) 2008 NetApp. All Rights Reserved.
-NetApp provides this source code under the GPL v2 License.
-The GPL v2 license is available at
-http://opensource.org/licenses/gpl-license.php.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
@@ -34,7 +20,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef CONFIG_SUNRPC_BACKCHANNEL
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
-void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
+void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
+ const struct rpc_timeout *to);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
@@ -83,4 +70,3 @@ static inline void xprt_free_bc_request(struct rpc_rqst *req)
}
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
#endif /* _LINUX_SUNRPC_BC_XPRT_H */
-
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 10891b70fc7b..35766963dd14 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -14,6 +14,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <linux/kstrtox.h>
#include <linux/proc_fs.h>
/*
@@ -45,7 +46,8 @@
*/
struct cache_head {
struct hlist_node cache_list;
- time64_t expiry_time; /* After time time, don't use the data */
+ time64_t expiry_time; /* After time expiry_time, don't use
+ * the data */
time64_t last_refresh; /* If CACHE_PENDING, this is when upcall was
* sent, else this is when update was
* received, though it is alway set to
@@ -54,10 +56,14 @@ struct cache_head {
struct kref ref;
unsigned long flags;
};
-#define CACHE_VALID 0 /* Entry contains valid data */
-#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
-#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
-#define CACHE_CLEANED 3 /* Entry has been cleaned from cache */
+
+/* cache_head.flags */
+enum {
+ CACHE_VALID, /* Entry contains valid data */
+ CACHE_NEGATIVE, /* Negative entry - there is no match for the key */
+ CACHE_PENDING, /* An upcall has been sent but no reply received yet*/
+ CACHE_CLEANED, /* Entry has been cleaned from cache */
+};
#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
@@ -119,17 +125,17 @@ struct cache_detail {
struct net *net;
};
-
/* this must be embedded in any request structure that
* identifies an object that will want a callback on
* a cache fill
*/
struct cache_req {
struct cache_deferred_req *(*defer)(struct cache_req *req);
- int thread_wait; /* How long (jiffies) we can block the
- * current thread to wait for updates.
- */
+ unsigned long thread_wait; /* How long (jiffies) we can block the
+ * current thread to wait for updates.
+ */
};
+
/* this must be embedded in a deferred_request that is being
* delayed awaiting cache-fill
*/
@@ -298,17 +304,18 @@ static inline int get_time(char **bpp, time64_t *time)
return 0;
}
-static inline time64_t get_expiry(char **bpp)
+static inline int get_expiry(char **bpp, time64_t *rvp)
{
- time64_t rv;
+ int error;
struct timespec64 boot;
- if (get_time(bpp, &rv))
- return 0;
- if (rv < 0)
- return 0;
+ error = get_time(bpp, rvp);
+ if (error)
+ return error;
+
getboottime64(&boot);
- return rv - boot.tv_sec;
+ (*rvp) -= boot.tv_sec;
+ return 0;
}
#endif /* _LINUX_SUNRPC_CACHE_H_ */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 02e7a5863d28..5321585c778f 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -14,6 +14,7 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/refcount.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/sched.h>
@@ -29,15 +30,23 @@
#include <linux/sunrpc/xprtmultipath.h>
struct rpc_inode;
+struct rpc_sysfs_client {
+ struct kobject kobject;
+ struct net *net;
+ struct rpc_clnt *clnt;
+ struct rpc_xprt_switch *xprt_switch;
+};
+
/*
* The high-level client handle
*/
struct rpc_clnt {
- atomic_t cl_count; /* Number of references */
+ refcount_t cl_count; /* Number of references */
unsigned int cl_clid; /* client id */
struct list_head cl_clients; /* Global list of clients */
struct list_head cl_tasks; /* List of tasks */
+ atomic_t cl_pid; /* task PID counter */
spinlock_t cl_lock; /* spinlock */
struct rpc_xprt __rcu * cl_xprt; /* transport */
const struct rpc_procinfo *cl_procinfo; /* procedure info */
@@ -54,7 +63,9 @@ struct rpc_clnt {
cl_discrtry : 1,/* disconnect before retry */
cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
- cl_chatty : 1;/* be verbose */
+ cl_chatty : 1,/* be verbose */
+ cl_shutdown : 1;/* rpc immediate -EIO */
+ struct xprtsec_parms cl_xprtsec; /* transport security policy */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
@@ -71,6 +82,7 @@ struct rpc_clnt {
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
+ struct rpc_sysfs_client *cl_sysfs; /* sysfs directory */
/* cl_work is only needed after cl_xpi is no longer used,
* and that are of similar size
*/
@@ -79,6 +91,8 @@ struct rpc_clnt {
struct work_struct cl_work;
};
const struct cred *cl_cred;
+ unsigned int cl_max_connect; /* max number of transports not to the same IP */
+ struct super_block *pipefs_sb;
};
/*
@@ -125,6 +139,7 @@ struct rpc_create_args {
const char *servername;
const char *nodename;
const struct rpc_program *program;
+ struct rpc_stat *stats;
u32 prognumber; /* overrides program->number */
u32 version;
rpc_authflavor_t authflavor;
@@ -133,6 +148,10 @@ struct rpc_create_args {
char *client_name;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
const struct cred *cred;
+ unsigned int max_connect;
+ struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct rpc_add_xprt_test {
@@ -154,6 +173,7 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
+#define RPC_CLNT_CREATE_CONNECTED (1UL << 12)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
@@ -227,13 +247,18 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *,
struct rpc_xprt_switch *,
struct rpc_xprt *,
void *);
+void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *);
+void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *,
+ struct rpc_add_xprt_test *);
const char *rpc_proc_name(const struct rpc_task *task);
-void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
+void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap);
+void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt);
+void rpc_clnt_disconnect(struct rpc_clnt *clnt);
void rpc_cleanup_clids(void);
static inline int rpc_reply_expected(struct rpc_task *task)
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index e8f8ffe7448b..78a80bf3fdcb 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/sunrpc/gss_krb5_types.h
- *
* Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
* lib/gssapi/krb5/gssapiP_krb5.h, and others
*
@@ -36,6 +34,9 @@
*
*/
+#ifndef _LINUX_SUNRPC_GSS_KRB5_H
+#define _LINUX_SUNRPC_GSS_KRB5_H
+
#include <crypto/skcipher.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
@@ -44,80 +45,15 @@
/* Length of constant used in key derivation */
#define GSS_KRB5_K5CLENGTH (5)
-/* Maximum key length (in bytes) for the supported crypto algorithms*/
+/* Maximum key length (in bytes) for the supported crypto algorithms */
#define GSS_KRB5_MAX_KEYLEN (32)
-/* Maximum checksum function output for the supported crypto algorithms */
-#define GSS_KRB5_MAX_CKSUM_LEN (20)
+/* Maximum checksum function output for the supported enctypes */
+#define GSS_KRB5_MAX_CKSUM_LEN (24)
/* Maximum blocksize for the supported crypto algorithms */
#define GSS_KRB5_MAX_BLOCKSIZE (16)
-struct krb5_ctx;
-
-struct gss_krb5_enctype {
- const u32 etype; /* encryption (key) type */
- const u32 ctype; /* checksum type */
- const char *name; /* "friendly" name */
- const char *encrypt_name; /* crypto encrypt name */
- const char *cksum_name; /* crypto checksum name */
- const u16 signalg; /* signing algorithm */
- const u16 sealalg; /* sealing algorithm */
- const u32 blocksize; /* encryption blocksize */
- const u32 conflen; /* confounder length
- (normally the same as
- the blocksize) */
- const u32 cksumlength; /* checksum length */
- const u32 keyed_cksum; /* is it a keyed cksum? */
- const u32 keybytes; /* raw key len, in bytes */
- const u32 keylength; /* final key len, in bytes */
- u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
- void *iv, void *in, void *out,
- int length); /* encryption function */
- u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
- void *iv, void *in, void *out,
- int length); /* decryption function */
- u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *in,
- struct xdr_netobj *out); /* complete key generation */
- u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
- struct xdr_buf *buf,
- struct page **pages); /* v2 encryption function */
- u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
- struct xdr_buf *buf, u32 *headskip,
- u32 *tailskip); /* v2 decryption function */
-};
-
-/* krb5_ctx flags definitions */
-#define KRB5_CTX_FLAG_INITIATOR 0x00000001
-#define KRB5_CTX_FLAG_CFX 0x00000002
-#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
-
-struct krb5_ctx {
- int initiate; /* 1 = initiating, 0 = accepting */
- u32 enctype;
- u32 flags;
- const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
- struct crypto_sync_skcipher *enc;
- struct crypto_sync_skcipher *seq;
- struct crypto_sync_skcipher *acceptor_enc;
- struct crypto_sync_skcipher *initiator_enc;
- struct crypto_sync_skcipher *acceptor_enc_aux;
- struct crypto_sync_skcipher *initiator_enc_aux;
- u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
- u8 cksum[GSS_KRB5_MAX_KEYLEN];
- atomic_t seq_send;
- atomic64_t seq_send64;
- time64_t endtime;
- struct xdr_netobj mech_used;
- u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
- u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
- u8 initiator_seal[GSS_KRB5_MAX_KEYLEN];
- u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN];
- u8 initiator_integ[GSS_KRB5_MAX_KEYLEN];
- u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
-};
-
/* The length of the Kerberos GSS token header */
#define GSS_KRB5_TOK_HDR_LEN (16)
@@ -141,17 +77,21 @@ enum sgn_alg {
SGN_ALG_MD2_5 = 0x0001,
SGN_ALG_DES_MAC = 0x0002,
SGN_ALG_3 = 0x0003, /* not published */
- SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; no support */
SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004
};
enum seal_alg {
SEAL_ALG_NONE = 0xffff,
SEAL_ALG_DES = 0x0000,
SEAL_ALG_1 = 0x0001, /* not published */
- SEAL_ALG_MICROSOFT_RC4 = 0x0010,/* microsoft w2k; no support */
SEAL_ALG_DES3KD = 0x0002
};
+/*
+ * These values are assigned by IANA and published via the
+ * subregistry at the link below:
+ *
+ * https://www.iana.org/assignments/kerberos-parameters/kerberos-parameters.xhtml#kerberos-parameters-2
+ */
#define CKSUMTYPE_CRC32 0x0001
#define CKSUMTYPE_RSA_MD4 0x0002
#define CKSUMTYPE_RSA_MD4_DES 0x0003
@@ -162,6 +102,10 @@ enum seal_alg {
#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c
#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
+#define CKSUMTYPE_CMAC_CAMELLIA128 0x0011
+#define CKSUMTYPE_CMAC_CAMELLIA256 0x0012
+#define CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013
+#define CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014
#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
/* from gssapi_err_krb5.h */
@@ -182,6 +126,11 @@ enum seal_alg {
/* per Kerberos v5 protocol spec crypto types from the wire.
* these get mapped to linux kernel crypto routines.
+ *
+ * These values are assigned by IANA and published via the
+ * subregistry at the link below:
+ *
+ * https://www.iana.org/assignments/kerberos-parameters/kerberos-parameters.xhtml#kerberos-parameters-1
*/
#define ENCTYPE_NULL 0x0000
#define ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */
@@ -195,8 +144,12 @@ enum seal_alg {
#define ENCTYPE_DES3_CBC_SHA1 0x0010
#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
+#define ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013
+#define ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014
#define ENCTYPE_ARCFOUR_HMAC 0x0017
#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
+#define ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019
+#define ENCTYPE_CAMELLIA256_CTS_CMAC 0x001A
#define ENCTYPE_UNKNOWN 0x01ff
/*
@@ -218,112 +171,4 @@ enum seal_alg {
#define KG_USAGE_INITIATOR_SEAL (24)
#define KG_USAGE_INITIATOR_SIGN (25)
-/*
- * This compile-time check verifies that we will not exceed the
- * slack space allotted by the client and server auth_gss code
- * before they call gss_wrap().
- */
-#define GSS_KRB5_MAX_SLACK_NEEDED \
- (GSS_KRB5_TOK_HDR_LEN /* gss token header */ \
- + GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \
- + GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \
- + GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \
- + GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\
- + GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \
- + 4 + 4 /* RPC verifier */ \
- + GSS_KRB5_TOK_HDR_LEN \
- + GSS_KRB5_MAX_CKSUM_LEN)
-
-u32
-make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
- struct xdr_buf *body, int body_offset, u8 *cksumkey,
- unsigned int usage, struct xdr_netobj *cksumout);
-
-u32
-make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen,
- struct xdr_buf *body, int body_offset, u8 *key,
- unsigned int usage, struct xdr_netobj *cksum);
-
-u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
- struct xdr_netobj *);
-
-u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
- struct xdr_netobj *);
-
-u32
-gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
- struct xdr_buf *outbuf, struct page **pages);
-
-u32
-gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
- struct xdr_buf *buf);
-
-
-u32
-krb5_encrypt(struct crypto_sync_skcipher *key,
- void *iv, void *in, void *out, int length);
-
-u32
-krb5_decrypt(struct crypto_sync_skcipher *key,
- void *iv, void *in, void *out, int length);
-
-int
-gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
- int offset, struct page **pages);
-
-int
-gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
- int offset);
-
-s32
-krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_sync_skcipher *key,
- int direction,
- u32 seqnum, unsigned char *cksum, unsigned char *buf);
-
-s32
-krb5_get_seq_num(struct krb5_ctx *kctx,
- unsigned char *cksum,
- unsigned char *buf, int *direction, u32 *seqnum);
-
-int
-xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen);
-
-u32
-krb5_derive_key(const struct gss_krb5_enctype *gk5e,
- const struct xdr_netobj *inkey,
- struct xdr_netobj *outkey,
- const struct xdr_netobj *in_constant,
- gfp_t gfp_mask);
-
-u32
-gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *randombits,
- struct xdr_netobj *key);
-
-u32
-gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *randombits,
- struct xdr_netobj *key);
-
-u32
-gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
- struct xdr_buf *buf,
- struct page **pages);
-
-u32
-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
- struct xdr_buf *buf, u32 *plainoffset,
- u32 *plainlen);
-
-int
-krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
- struct crypto_sync_skcipher *cipher,
- unsigned char *cksum);
-
-int
-krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
- struct crypto_sync_skcipher *cipher,
- s32 seqnum);
-void
-gss_krb5_make_confounder(char *p, u32 conflen);
+#endif /* _LINUX_SUNRPC_GSS_KRB5_H */
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
deleted file mode 100644
index 981c89cef19d..000000000000
--- a/include/linux/sunrpc/gss_krb5_enctypes.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Define the string that exports the set of kernel-supported
- * Kerberos enctypes. This list is sent via upcall to gssd, and
- * is also exposed via the nfsd /proc API. The consumers generally
- * treat this as an ordered list, where the first item in the list
- * is the most preferred.
- */
-
-#ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
-#define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
-
-#ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
-
-/*
- * NB: This list includes encryption types that were deprecated
- * by RFC 8429 (DES3_CBC_SHA1 and ARCFOUR_HMAC).
- *
- * ENCTYPE_AES256_CTS_HMAC_SHA1_96
- * ENCTYPE_AES128_CTS_HMAC_SHA1_96
- * ENCTYPE_DES3_CBC_SHA1
- * ENCTYPE_ARCFOUR_HMAC
- */
-#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23"
-
-#else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
-
-/*
- * NB: This list includes encryption types that were deprecated
- * by RFC 8429 and RFC 6649.
- *
- * ENCTYPE_AES256_CTS_HMAC_SHA1_96
- * ENCTYPE_AES128_CTS_HMAC_SHA1_96
- * ENCTYPE_DES3_CBC_SHA1
- * ENCTYPE_ARCFOUR_HMAC
- * ENCTYPE_DES_CBC_MD5
- * ENCTYPE_DES_CBC_CRC
- * ENCTYPE_DES_CBC_MD4
- */
-#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
-
-#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
-
-#endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index bea40d9f03a1..c4b0eb2b2f04 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -10,9 +10,6 @@
#define RPC_VERSION 2
-/* size of an XDR encoding unit in bytes, i.e. 32bit */
-#define XDR_UNIT (4)
-
/* spec defines authentication flavor as an unsigned 32 bit integer */
typedef u32 rpc_authflavor_t;
@@ -23,6 +20,7 @@ enum rpc_auth_flavors {
RPC_AUTH_DES = 3,
RPC_AUTH_KRB = 4,
RPC_AUTH_GSS = 6,
+ RPC_AUTH_TLS = 7,
RPC_AUTH_MAXFLAVOR = 8,
/* pseudoflavors: */
RPC_AUTH_GSS_KRB5 = 390003,
@@ -36,6 +34,11 @@ enum rpc_auth_flavors {
RPC_AUTH_GSS_SPKMP = 390011,
};
+/* Maximum size (in octets) of the machinename in an AUTH_UNIX
+ * credential (per RFC 5531 Appendix A)
+ */
+#define RPC_MAX_MACHINENAME (255)
+
/* Maximum size (in bytes) of an rpc credential or verifier */
#define RPC_MAX_AUTH_SIZE (400)
@@ -143,7 +146,7 @@ typedef __be32 rpc_fraghdr;
/*
* Well-known netids. See:
*
- * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml
+ * https://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml
*/
#define RPCBIND_NETID_UDP "udp"
#define RPCBIND_NETID_TCP "tcp"
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index cd188a527d16..3b35b6f6533a 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -92,6 +92,11 @@ extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
char __user *, size_t);
extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *);
+/* returns true if the msg is in-flight, i.e., already eaten by the peer */
+static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) {
+ return (msg->copied != 0 && list_empty(&msg->list));
+}
+
struct rpc_clnt;
extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
extern int rpc_remove_client_dir(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index df696efdd675..0c77ba488bba 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -38,6 +38,17 @@ struct rpc_wait {
};
/*
+ * This describes a timeout strategy
+ */
+struct rpc_timeout {
+ unsigned long to_initval, /* initial timeout */
+ to_maxval, /* max timeout */
+ to_increment; /* if !exponential */
+ unsigned int to_retries; /* max # of retries */
+ unsigned char to_exponential;
+};
+
+/*
* This is the RPC task struct
*/
struct rpc_task {
@@ -61,8 +72,6 @@ struct rpc_task {
struct rpc_wait tk_wait; /* RPC wait */
} u;
- int tk_rpc_status; /* Result of last RPC operation */
-
/*
* RPC call state
*/
@@ -82,6 +91,8 @@ struct rpc_task {
ktime_t tk_start; /* RPC task init timestamp */
pid_t tk_owner; /* Process id for batching tasks */
+
+ int tk_rpc_status; /* Result of last RPC operation */
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
@@ -90,8 +101,7 @@ struct rpc_task {
#endif
unsigned char tk_priority : 2,/* Task priority */
tk_garb_retry : 2,
- tk_cred_retry : 2,
- tk_rebind_retry : 2;
+ tk_cred_retry : 2;
};
typedef void (*rpc_action)(struct rpc_task *);
@@ -121,9 +131,9 @@ struct rpc_task_setup {
*/
#define RPC_TASK_ASYNC 0x0001 /* is an async task */
#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
+#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */
#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
-#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
@@ -139,6 +149,7 @@ struct rpc_task_setup {
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
+#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
@@ -148,25 +159,13 @@ struct rpc_task_setup {
#define RPC_TASK_MSG_PIN_WAIT 5
#define RPC_TASK_SIGNALLED 6
-#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
-#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
-#define rpc_clear_running(t) \
- do { \
- smp_mb__before_atomic(); \
- clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
- smp_mb__after_atomic(); \
- } while (0)
+#define rpc_clear_running(t) clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
-#define rpc_clear_queued(t) \
- do { \
- smp_mb__before_atomic(); \
- clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
- smp_mb__after_atomic(); \
- } while (0)
+#define rpc_clear_queued(t) clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
@@ -198,7 +197,7 @@ struct rpc_wait_queue {
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char nr; /* # tasks remaining for cookie */
- unsigned short qlen; /* total # tasks waiting in queue */
+ unsigned int qlen; /* total # tasks waiting in queue */
struct rpc_timer timer_list;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
const char * name;
@@ -217,14 +216,21 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
-struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ struct rpc_timeout *timeout);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
+bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
+void rpc_task_try_cancel(struct rpc_task *task, int error);
void rpc_signal_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
void rpc_killall_tasks(struct rpc_clnt *);
+unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
+ bool (*fnmatch)(const struct rpc_task *,
+ const void *),
+ const void *data);
void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
@@ -263,7 +269,7 @@ int rpc_malloc(struct rpc_task *);
void rpc_free(struct rpc_task *);
int rpciod_up(void);
void rpciod_down(void);
-int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
+int rpc_wait_for_completion_task(struct rpc_task *task);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct net;
void rpc_show_tasks(struct net *);
@@ -273,11 +279,7 @@ void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
extern struct workqueue_struct *xprtiod_workqueue;
void rpc_prepare_task(struct rpc_task *task);
-
-static inline int rpc_wait_for_completion_task(struct rpc_task *task)
-{
- return __rpc_wait_for_completion_task(task, NULL);
-}
+gfp_t rpc_task_gfp_mask(void);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
static inline const char * rpc_qname(const struct rpc_wait_queue *q)
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
index d94d4f410507..3ce1550d1beb 100644
--- a/include/linux/sunrpc/stats.h
+++ b/include/linux/sunrpc/stats.h
@@ -43,22 +43,6 @@ struct net;
#ifdef CONFIG_PROC_FS
int rpc_proc_init(struct net *);
void rpc_proc_exit(struct net *);
-#else
-static inline int rpc_proc_init(struct net *net)
-{
- return 0;
-}
-
-static inline void rpc_proc_exit(struct net *net)
-{
-}
-#endif
-
-#ifdef MODULE
-void rpc_modcount(struct inode *, int);
-#endif
-
-#ifdef CONFIG_PROC_FS
struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *);
void rpc_proc_unregister(struct net *,const char *);
void rpc_proc_zero(const struct rpc_program *);
@@ -69,7 +53,14 @@ void svc_proc_unregister(struct net *, const char *);
void svc_seq_show(struct seq_file *,
const struct svc_stat *);
#else
+static inline int rpc_proc_init(struct net *net)
+{
+ return 0;
+}
+static inline void rpc_proc_exit(struct net *net)
+{
+}
static inline struct proc_dir_entry *rpc_proc_register(struct net *net, struct rpc_stat *s) { return NULL; }
static inline void rpc_proc_unregister(struct net *net, const char *p) {}
static inline void rpc_proc_zero(const struct rpc_program *p) {}
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 386628b36bc7..23617da0e565 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -17,16 +17,10 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/svcauth.h>
+#include <linux/lwq.h>
#include <linux/wait.h>
#include <linux/mm.h>
-
-/* statistics for svc_pool structures */
-struct svc_pool_stats {
- atomic_long_t packets;
- unsigned long sockets_queued;
- atomic_long_t threads_woken;
- atomic_long_t threads_timedout;
-};
+#include <linux/pagevec.h>
/*
*
@@ -40,36 +34,27 @@ struct svc_pool_stats {
*/
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
- spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_sockets; /* pending sockets */
- unsigned int sp_nrthreads; /* # of threads in pool */
+ struct lwq sp_xprts; /* pending transports */
+ atomic_t sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
- struct svc_pool_stats sp_stats; /* statistics on pool operation */
-#define SP_TASK_PENDING (0) /* still work to do even if no
- * xprt is queued. */
-#define SP_CONGESTED (1)
- unsigned long sp_flags;
-} ____cacheline_aligned_in_smp;
-
-struct svc_serv;
-
-struct svc_serv_ops {
- /* Callback to use when last thread exits. */
- void (*svo_shutdown)(struct svc_serv *, struct net *);
+ struct llist_head sp_idle_threads; /* idle server threads */
- /* function for service threads to run */
- int (*svo_function)(void *);
+ /* statistics on pool operation */
+ struct percpu_counter sp_messages_arrived;
+ struct percpu_counter sp_sockets_queued;
+ struct percpu_counter sp_threads_woken;
- /* queue up a transport for servicing */
- void (*svo_enqueue_xprt)(struct svc_xprt *);
-
- /* set up thread (or whatever) execution context */
- int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
+ unsigned long sp_flags;
+} ____cacheline_aligned_in_smp;
- /* optional module to count when adding threads (pooled svcs only) */
- struct module *svo_module;
+/* bits for sp_flags */
+enum {
+ SP_TASK_PENDING, /* still work to do even if no xprt is queued */
+ SP_NEED_VICTIM, /* One thread needs to agree to exit */
+ SP_VICTIM_REMAINS, /* One thread needs to actually exit */
};
+
/*
* RPC service.
*
@@ -101,28 +86,23 @@ struct svc_serv {
unsigned int sv_nrpools; /* number of thread pools */
struct svc_pool * sv_pools; /* array of thread pools */
- const struct svc_serv_ops *sv_ops; /* server operations */
+ int (*sv_threadfn)(void *data);
+
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head sv_cb_list; /* queue for callback requests
+ struct lwq sv_cb_list; /* queue for callback requests
* that arrive over the same
* connection */
- spinlock_t sv_cb_lock; /* protects the svc_cb_list */
- wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
- * entries in the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
-/*
- * We use sv_nrthreads as a reference count. svc_destroy() drops
- * this refcount, so we need to bump it up around operations that
- * change the number of threads. Horrible, but there it is.
- * Should be called with the "service mutex" held.
- */
-static inline void svc_get(struct svc_serv *serv)
-{
- serv->sv_nrthreads++;
-}
+/* This is used by pool_stats to find and lock an svc */
+struct svc_info {
+ struct svc_serv *serv;
+ struct mutex *mutex;
+};
+
+void svc_destroy(struct svc_serv **svcp);
/*
* Maximum payload size supported by a kernel RPC server.
@@ -152,16 +132,15 @@ static inline void svc_get(struct svc_serv *serv)
extern u32 svc_max_payload(const struct svc_rqst *rqstp);
/*
- * RPC Requsts and replies are stored in one or more pages.
+ * RPC Requests and replies are stored in one or more pages.
* We maintain an array of pages for each server thread.
* Requests are copied into these pages as they arrive. Remaining
* pages are available to write the reply into.
*
- * Pages are sent using ->sendpage so each server thread needs to
- * allocate more to replace those used in sending. To help keep track
- * of these pages we have a receive list where all pages initialy live,
- * and a send list where pages are moved to when there are to be part
- * of a reply.
+ * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each server thread
+ * needs to allocate more to replace those used in sending. To help keep track
+ * of these pages we have a receive list where all pages initialy live, and a
+ * send list where pages are moved to when there are to be part of a reply.
*
* We use xdr_buf for holding responses as it fits well with NFS
* read responses (that have a header, and some data pages, and possibly
@@ -181,53 +160,13 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
+ 2 + 1)
-static inline u32 svc_getnl(struct kvec *iov)
-{
- __be32 val, *vp;
- vp = iov->iov_base;
- val = *vp++;
- iov->iov_base = (void*)vp;
- iov->iov_len -= sizeof(__be32);
- return ntohl(val);
-}
-
-static inline void svc_putnl(struct kvec *iov, u32 val)
-{
- __be32 *vp = iov->iov_base + iov->iov_len;
- *vp = htonl(val);
- iov->iov_len += sizeof(__be32);
-}
-
-static inline __be32 svc_getu32(struct kvec *iov)
-{
- __be32 val, *vp;
- vp = iov->iov_base;
- val = *vp++;
- iov->iov_base = (void*)vp;
- iov->iov_len -= sizeof(__be32);
- return val;
-}
-
-static inline void svc_ungetu32(struct kvec *iov)
-{
- __be32 *vp = (__be32 *)iov->iov_base;
- iov->iov_base = (void *)(vp - 1);
- iov->iov_len += sizeof(*vp);
-}
-
-static inline void svc_putu32(struct kvec *iov, __be32 val)
-{
- __be32 *vp = iov->iov_base + iov->iov_len;
- *vp = val;
- iov->iov_len += sizeof(__be32);
-}
-
/*
* The context of a single thread, including the request currently being
* processed.
*/
struct svc_rqst {
struct list_head rq_all; /* all threads list */
+ struct llist_node rq_idle; /* On the idle list */
struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
@@ -245,14 +184,17 @@ struct svc_rqst {
void * rq_xprt_ctxt; /* transport specific context ptr */
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
- size_t rq_xprt_hlen; /* xprt header len */
struct xdr_buf rq_arg;
+ struct xdr_stream rq_arg_stream;
+ struct xdr_stream rq_res_stream;
+ struct page *rq_scratch_page;
struct xdr_buf rq_res;
struct page *rq_pages[RPCSVC_MAXPAGES + 1];
struct page * *rq_respages; /* points into rq_pages */
struct page * *rq_next_page; /* next reply page to use */
struct page * *rq_page_end; /* one past the last page */
+ struct folio_batch rq_fbatch;
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
@@ -262,23 +204,14 @@ struct svc_rqst {
u32 rq_proc; /* procedure number */
u32 rq_prot; /* IP protocol */
int rq_cachetype; /* catering to nfsd */
-#define RQ_SECURE (0) /* secure port */
-#define RQ_LOCAL (1) /* local request */
-#define RQ_USEDEFERRAL (2) /* use deferral */
-#define RQ_DROPME (3) /* drop current reply */
-#define RQ_SPLICE_OK (4) /* turned off in gss privacy
- * to prevent encrypting page
- * cache pages */
-#define RQ_VICTIM (5) /* about to be shut down */
-#define RQ_BUSY (6) /* request is busy */
-#define RQ_DATA (7) /* request has data */
-#define RQ_AUTHERR (8) /* Request status is auth error */
unsigned long rq_flags; /* flags field */
ktime_t rq_qtime; /* enqueue time */
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
+ __be32 *rq_accept_statp;
void * rq_auth_data; /* flavor-specific data */
+ __be32 rq_auth_stat; /* authentication status */
int rq_auth_slack; /* extra space xdr code
* should leave in head
* for krb5i, krb5p.
@@ -294,13 +227,24 @@ struct svc_rqst {
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
- struct svc_cacherep * rq_cacherep; /* cache info */
struct task_struct *rq_task; /* service thread */
- spinlock_t rq_lock; /* per-request lock */
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+ unsigned long bc_to_initval;
+ unsigned int bc_to_retries;
void ** rq_lease_breaker; /* The v4 client breaking a lease */
+ unsigned int rq_status_counter; /* RPC processing counter */
+};
+
+/* bits for rq_flags */
+enum {
+ RQ_SECURE, /* secure port */
+ RQ_LOCAL, /* local request */
+ RQ_USEDEFERRAL, /* use deferral */
+ RQ_DROPME, /* drop current reply */
+ RQ_VICTIM, /* Have agreed to shut down */
+ RQ_DATA, /* request has data */
};
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
@@ -338,38 +282,26 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr;
}
-/*
- * Check buffer bounds after decoding arguments
+/**
+ * svc_thread_should_stop - check if this thread should stop
+ * @rqstp: the thread that might need to stop
+ *
+ * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS
+ * are set. The first thread which sees SP_NEED_VICTIM clears it, becoming
+ * the victim using this function. It should then promptly call
+ * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS
+ * so the task waiting for a thread to exit can wake and continue.
+ *
+ * Return values:
+ * %true: caller should invoke svc_exit_thread()
+ * %false: caller should do nothing
*/
-static inline int
-xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
+static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
{
- char *cp = (char *)p;
- struct kvec *vec = &rqstp->rq_arg.head[0];
- return cp >= (char*)vec->iov_base
- && cp <= (char*)vec->iov_base + vec->iov_len;
-}
-
-static inline int
-xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
-{
- struct kvec *vec = &rqstp->rq_res.head[0];
- char *cp = (char*)p;
-
- vec->iov_len = cp - (char*)vec->iov_base;
+ if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags))
+ set_bit(RQ_VICTIM, &rqstp->rq_flags);
- return vec->iov_len <= PAGE_SIZE;
-}
-
-static inline void svc_free_res_pages(struct svc_rqst *rqstp)
-{
- while (rqstp->rq_next_page != rqstp->rq_respages) {
- struct page **pp = --rqstp->rq_next_page;
- if (*pp) {
- put_page(*pp);
- *pp = NULL;
- }
- }
+ return test_bit(RQ_VICTIM, &rqstp->rq_flags);
}
struct svc_deferred_req {
@@ -379,15 +311,15 @@ struct svc_deferred_req {
size_t addrlen;
struct sockaddr_storage daddr; /* where reply must come from */
size_t daddrlen;
+ void *xprt_ctxt;
struct cache_deferred_req handle;
- size_t xprt_hlen;
int argslen;
__be32 args[];
};
struct svc_process_info {
union {
- int (*dispatch)(struct svc_rqst *, __be32 *);
+ int (*dispatch)(struct svc_rqst *rqstp);
struct {
unsigned int lovers;
unsigned int hivers;
@@ -407,8 +339,7 @@ struct svc_program {
const struct svc_version **pg_vers; /* version array */
char * pg_name; /* service name */
char * pg_class; /* class name: services sharing authentication */
- struct svc_stat * pg_stats; /* rpc statistics */
- int (*pg_authenticate)(struct svc_rqst *);
+ enum svc_auth_status (*pg_authenticate)(struct svc_rqst *rqstp);
__be32 (*pg_init_request)(struct svc_rqst *,
const struct svc_program *,
struct svc_process_info *);
@@ -426,7 +357,7 @@ struct svc_version {
u32 vs_vers; /* version number */
u32 vs_nproc; /* number of procedures */
const struct svc_procedure *vs_proc; /* per-procedure info */
- unsigned int *vs_count; /* call counts */
+ unsigned long __percpu *vs_count; /* call counts */
u32 vs_xdrsize; /* xdrsize needed for this version */
/* Don't register with rpcbind */
@@ -438,11 +369,8 @@ struct svc_version {
/* Need xprt with congestion control */
bool vs_need_cong_ctrl;
- /* Override dispatch function (e.g. when caching replies).
- * A return value of 0 means drop the request.
- * vs_dispatch == NULL means use default dispatcher.
- */
- int (*vs_dispatch)(struct svc_rqst *, __be32 *);
+ /* Dispatch function */
+ int (*vs_dispatch)(struct svc_rqst *rqstp);
};
/*
@@ -452,83 +380,61 @@ struct svc_procedure {
/* process the request: */
__be32 (*pc_func)(struct svc_rqst *);
/* XDR decode args: */
- int (*pc_decode)(struct svc_rqst *, __be32 *data);
+ bool (*pc_decode)(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr);
/* XDR encode result: */
- int (*pc_encode)(struct svc_rqst *, __be32 *data);
+ bool (*pc_encode)(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr);
/* XDR free result: */
void (*pc_release)(struct svc_rqst *);
unsigned int pc_argsize; /* argument struct size */
+ unsigned int pc_argzero; /* how much of argument to clear */
unsigned int pc_ressize; /* result struct size */
unsigned int pc_cachetype; /* cache info (NFS) */
unsigned int pc_xdrressize; /* maximum size of XDR reply */
+ const char * pc_name; /* for display */
};
/*
- * Mode for mapping cpus to pools.
- */
-enum {
- SVC_POOL_AUTO = -1, /* choose one of the others */
- SVC_POOL_GLOBAL, /* no mapping, just a single global pool
- * (legacy & UP mode) */
- SVC_POOL_PERCPU, /* one pool per cpu */
- SVC_POOL_PERNODE /* one pool per numa node */
-};
-
-struct svc_pool_map {
- int count; /* How many svc_servs use us */
- int mode; /* Note: int not enum to avoid
- * warnings about "enumeration value
- * not handled in switch" */
- unsigned int npools;
- unsigned int *pool_to; /* maps pool id to cpu or node */
- unsigned int *to_pool; /* maps cpu or node to pool id */
-};
-
-extern struct svc_pool_map svc_pool_map;
-
-/*
* Function prototypes.
*/
int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
- const struct svc_serv_ops *);
+ int (*threadfn)(void *data));
struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
struct svc_pool *pool, int node);
-struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
- struct svc_pool *pool, int node);
+bool svc_rqst_replace_page(struct svc_rqst *rqstp,
+ struct page *page);
+void svc_rqst_release_pages(struct svc_rqst *rqstp);
void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
-unsigned int svc_pool_map_get(void);
-void svc_pool_map_put(void);
-struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
- const struct svc_serv_ops *);
+struct svc_serv * svc_create_pooled(struct svc_program *prog,
+ struct svc_stat *stats,
+ unsigned int bufsize,
+ int (*threadfn)(void *data));
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
-int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
-int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
-void svc_destroy(struct svc_serv *);
-void svc_shutdown_net(struct svc_serv *, struct net *);
-int svc_process(struct svc_rqst *);
-int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
- struct svc_rqst *);
+int svc_pool_stats_open(struct svc_info *si, struct file *file);
+void svc_process(struct svc_rqst *rqstp);
+void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
int svc_register(const struct svc_serv *, struct net *, const int,
const unsigned short, const unsigned short);
void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
-struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
+void svc_pool_wake_idle_thread(struct svc_pool *pool);
+struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
-int svc_encode_read_payload(struct svc_rqst *rqstp,
- unsigned int offset,
- unsigned int length);
+const char * svc_proc_name(const struct svc_rqst *rqstp);
+int svc_encode_result_payload(struct svc_rqst *rqstp,
+ unsigned int offset,
+ unsigned int length);
unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
- struct page **pages,
- struct kvec *first, size_t total);
+ struct xdr_buf *payload);
char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
struct kvec *first, void *p,
size_t total);
-__be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err);
__be32 svc_generic_init_request(struct svc_rqst *rqstp,
const struct svc_program *progp,
struct svc_process_info *procinfo);
@@ -557,4 +463,108 @@ static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
svc_reserve(rqstp, space + rqstp->rq_auth_slack);
}
+/**
+ * svcxdr_init_decode - Prepare an xdr_stream for Call decoding
+ * @rqstp: controlling server RPC transaction context
+ *
+ */
+static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
+{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct xdr_buf *buf = &rqstp->rq_arg;
+ struct kvec *argv = buf->head;
+
+ WARN_ON(buf->len != buf->head->iov_len + buf->page_len + buf->tail->iov_len);
+ buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
+
+ xdr_init_decode(xdr, buf, argv->iov_base, NULL);
+ xdr_set_scratch_page(xdr, rqstp->rq_scratch_page);
+}
+
+/**
+ * svcxdr_init_encode - Prepare an xdr_stream for svc Reply encoding
+ * @rqstp: controlling server RPC transaction context
+ *
+ */
+static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
+{
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct xdr_buf *buf = &rqstp->rq_res;
+ struct kvec *resv = buf->head;
+
+ xdr_reset_scratch_buffer(xdr);
+
+ xdr->buf = buf;
+ xdr->iov = resv;
+ xdr->p = resv->iov_base + resv->iov_len;
+ xdr->end = resv->iov_base + PAGE_SIZE;
+ buf->len = resv->iov_len;
+ xdr->page_ptr = buf->pages - 1;
+ buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages);
+ xdr->rqst = NULL;
+}
+
+/**
+ * svcxdr_encode_opaque_pages - Insert pages into an xdr_stream
+ * @xdr: xdr_stream to be updated
+ * @pages: array of pages to insert
+ * @base: starting offset of first data byte in @pages
+ * @len: number of data bytes in @pages to insert
+ *
+ * After the @pages are added, the tail iovec is instantiated pointing
+ * to end of the head buffer, and the stream is set up to encode
+ * subsequent items into the tail.
+ */
+static inline void svcxdr_encode_opaque_pages(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct page **pages,
+ unsigned int base,
+ unsigned int len)
+{
+ xdr_write_pages(xdr, pages, base, len);
+ xdr->page_ptr = rqstp->rq_next_page - 1;
+}
+
+/**
+ * svcxdr_set_auth_slack -
+ * @rqstp: RPC transaction
+ * @slack: buffer space to reserve for the transaction's security flavor
+ *
+ * Set the request's slack space requirement, and set aside that much
+ * space in the rqstp's rq_res.head for use when the auth wraps the Reply.
+ */
+static inline void svcxdr_set_auth_slack(struct svc_rqst *rqstp, int slack)
+{
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct xdr_buf *buf = &rqstp->rq_res;
+ struct kvec *resv = buf->head;
+
+ rqstp->rq_auth_slack = slack;
+
+ xdr->end -= XDR_QUADLEN(slack);
+ buf->buflen -= rqstp->rq_auth_slack;
+
+ WARN_ON(xdr->iov != resv);
+ WARN_ON(xdr->p > xdr->end);
+}
+
+/**
+ * svcxdr_set_accept_stat - Reserve space for the accept_stat field
+ * @rqstp: RPC transaction context
+ *
+ * Return values:
+ * %true: Success
+ * %false: No response buffer space was available
+ */
+static inline bool svcxdr_set_accept_stat(struct svc_rqst *rqstp)
+{
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+
+ rqstp->rq_accept_statp = xdr_reserve_space(xdr, XDR_UNIT);
+ if (unlikely(!rqstp->rq_accept_statp))
+ return false;
+ *rqstp->rq_accept_statp = rpc_success;
+ return true;
+}
+
#endif /* SUNRPC_SVC_H */
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 9dc3a3b88391..24cd199dd6f3 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -47,6 +47,9 @@
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/sunrpc/rpc_rdma_cid.h>
+#include <linux/sunrpc/svc_rdma_pcl.h>
+
+#include <linux/percpu_counter.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
@@ -62,16 +65,12 @@ extern unsigned int svcrdma_ord;
extern unsigned int svcrdma_max_requests;
extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size;
+extern struct workqueue_struct *svcrdma_wq;
-extern atomic_t rdma_stat_recv;
-extern atomic_t rdma_stat_read;
-extern atomic_t rdma_stat_write;
-extern atomic_t rdma_stat_sq_starve;
-extern atomic_t rdma_stat_rq_starve;
-extern atomic_t rdma_stat_rq_poll;
-extern atomic_t rdma_stat_rq_prod;
-extern atomic_t rdma_stat_sq_poll;
-extern atomic_t rdma_stat_sq_prod;
+extern struct percpu_counter svcrdma_stat_read;
+extern struct percpu_counter svcrdma_stat_recv;
+extern struct percpu_counter svcrdma_stat_sq_starve;
+extern struct percpu_counter svcrdma_stat_write;
struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
@@ -92,11 +91,14 @@ struct svcxprt_rdma {
struct ib_pd *sc_pd;
spinlock_t sc_send_lock;
- struct list_head sc_send_ctxts;
+ struct llist_head sc_send_ctxts;
spinlock_t sc_rw_ctxt_lock;
- struct list_head sc_rw_ctxts;
+ struct llist_head sc_rw_ctxts;
+ u32 sc_pending_recvs;
+ u32 sc_recv_batch;
struct list_head sc_rq_dto_q;
+ struct list_head sc_read_complete_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq;
@@ -106,7 +108,6 @@ struct svcxprt_rdma {
wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
unsigned long sc_flags;
- struct list_head sc_read_complete_q;
struct work_struct sc_work;
struct llist_head sc_recv_ctxts;
@@ -116,6 +117,13 @@ struct svcxprt_rdma {
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
+static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp)
+{
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+
+ return container_of(xprt, struct svcxprt_rdma, sc_xprt);
+}
+
/*
* Default connection parameters
*/
@@ -127,6 +135,43 @@ enum {
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+/**
+ * svc_rdma_send_cid_init - Initialize a Receive Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_rq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/**
+ * svc_rdma_send_cid_init - Initialize a Send Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_sq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/*
+ * A chunk context tracks all I/O for moving one Read or Write
+ * chunk. This is a set of rdma_rw's that handle data movement
+ * for all segments of one chunk.
+ */
+struct svc_rdma_chunk_ctxt {
+ struct rpc_rdma_cid cc_cid;
+ struct ib_cqe cc_cqe;
+ struct list_head cc_rwctxts;
+ ktime_t cc_posttime;
+ int cc_sqecount;
+};
+
struct svc_rdma_recv_ctxt {
struct llist_node rc_node;
struct list_head rc_list;
@@ -135,28 +180,69 @@ struct svc_rdma_recv_ctxt {
struct rpc_rdma_cid rc_cid;
struct ib_sge rc_recv_sge;
void *rc_recv_buf;
- struct xdr_buf rc_arg;
struct xdr_stream rc_stream;
- bool rc_temp;
u32 rc_byte_len;
- unsigned int rc_page_count;
- unsigned int rc_hdr_count;
u32 rc_inv_rkey;
- __be32 *rc_write_list;
- __be32 *rc_reply_chunk;
- unsigned int rc_read_payload_offset;
- unsigned int rc_read_payload_length;
+ __be32 rc_msgtype;
+
+ /* State for pulling a Read chunk */
+ unsigned int rc_pageoff;
+ unsigned int rc_curpage;
+ unsigned int rc_readbytes;
+ struct xdr_buf rc_saved_arg;
+ struct svc_rdma_chunk_ctxt rc_cc;
+
+ struct svc_rdma_pcl rc_call_pcl;
+
+ struct svc_rdma_pcl rc_read_pcl;
+ struct svc_rdma_chunk *rc_cur_result_payload;
+ struct svc_rdma_pcl rc_write_pcl;
+ struct svc_rdma_pcl rc_reply_pcl;
+
+ unsigned int rc_page_count;
struct page *rc_pages[RPCSVC_MAXPAGES];
};
+/*
+ * State for sending a Write chunk.
+ * - Tracks progress of writing one chunk over all its segments
+ * - Stores arguments for the SGL constructor functions
+ */
+struct svc_rdma_write_info {
+ struct svcxprt_rdma *wi_rdma;
+ struct list_head wi_list;
+
+ const struct svc_rdma_chunk *wi_chunk;
+
+ /* write state of this chunk */
+ unsigned int wi_seg_off;
+ unsigned int wi_seg_no;
+
+ /* SGL constructor arguments */
+ const struct xdr_buf *wi_xdr;
+ unsigned char *wi_base;
+ unsigned int wi_next_off;
+
+ struct svc_rdma_chunk_ctxt wi_cc;
+ struct work_struct wi_work;
+};
+
struct svc_rdma_send_ctxt {
- struct list_head sc_list;
+ struct llist_node sc_node;
struct rpc_rdma_cid sc_cid;
+ struct work_struct sc_work;
+ struct svcxprt_rdma *sc_rdma;
struct ib_send_wr sc_send_wr;
+ struct ib_send_wr *sc_wr_chain;
+ int sc_sqecount;
struct ib_cqe sc_cqe;
struct xdr_buf sc_hdrbuf;
struct xdr_stream sc_stream;
+
+ struct list_head sc_write_info_list;
+ struct svc_rdma_write_info sc_reply_info;
+
void *sc_xprt_buf;
int sc_page_count;
int sc_cur_sge_no;
@@ -171,24 +257,39 @@ extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
/* svc_rdma_recvfrom.c */
extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
+extern struct svc_rdma_recv_ctxt *
+ svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma);
extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt);
extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
-extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
+extern void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *ctxt);
extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
+extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc);
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
-extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma,
- struct svc_rqst *rqstp,
- struct svc_rdma_recv_ctxt *head, __be32 *p);
-extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
- __be32 *wr_ch, struct xdr_buf *xdr,
- unsigned int offset,
- unsigned long length);
-extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
- const struct svc_rdma_recv_ctxt *rctxt,
- struct xdr_buf *xdr);
+extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc);
+extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc,
+ enum dma_data_direction dir);
+extern void svc_rdma_write_chunk_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt);
+extern void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt);
+extern int svc_rdma_prepare_write_list(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_pcl *write_pcl,
+ struct svc_rdma_send_ctxt *sctxt,
+ const struct xdr_buf *xdr);
+extern int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_pcl *write_pcl,
+ const struct svc_rdma_pcl *reply_pcl,
+ struct svc_rdma_send_ctxt *sctxt,
+ const struct xdr_buf *xdr);
+extern int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
+ struct svc_rqst *rqstp,
+ struct svc_rdma_recv_ctxt *head);
/* svc_rdma_sendto.c */
extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma);
@@ -196,19 +297,21 @@ extern struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma);
extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
-extern int svc_rdma_send(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt);
+extern int svc_rdma_post_send(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt);
extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
- const struct svc_rdma_recv_ctxt *rctxt,
- struct xdr_buf *xdr);
+ const struct svc_rdma_pcl *write_pcl,
+ const struct svc_rdma_pcl *reply_pcl,
+ const struct xdr_buf *xdr);
extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
struct svc_rdma_recv_ctxt *rctxt,
int status);
+extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail);
extern int svc_rdma_sendto(struct svc_rqst *);
-extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
- unsigned int length);
+extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
+ unsigned int length);
/* svc_rdma_transport.c */
extern struct svc_xprt_class svc_rdma_class;
diff --git a/include/linux/sunrpc/svc_rdma_pcl.h b/include/linux/sunrpc/svc_rdma_pcl.h
new file mode 100644
index 000000000000..7516ad0fae80
--- /dev/null
+++ b/include/linux/sunrpc/svc_rdma_pcl.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates
+ */
+
+#ifndef SVC_RDMA_PCL_H
+#define SVC_RDMA_PCL_H
+
+#include <linux/list.h>
+
+struct svc_rdma_segment {
+ u32 rs_handle;
+ u32 rs_length;
+ u64 rs_offset;
+};
+
+struct svc_rdma_chunk {
+ struct list_head ch_list;
+
+ u32 ch_position;
+ u32 ch_length;
+ u32 ch_payload_length;
+
+ u32 ch_segcount;
+ struct svc_rdma_segment ch_segments[];
+};
+
+struct svc_rdma_pcl {
+ unsigned int cl_count;
+ struct list_head cl_chunks;
+};
+
+/**
+ * pcl_init - Initialize a parsed chunk list
+ * @pcl: parsed chunk list to initialize
+ *
+ */
+static inline void pcl_init(struct svc_rdma_pcl *pcl)
+{
+ INIT_LIST_HEAD(&pcl->cl_chunks);
+}
+
+/**
+ * pcl_is_empty - Return true if parsed chunk list is empty
+ * @pcl: parsed chunk list
+ *
+ */
+static inline bool pcl_is_empty(const struct svc_rdma_pcl *pcl)
+{
+ return list_empty(&pcl->cl_chunks);
+}
+
+/**
+ * pcl_first_chunk - Return first chunk in a parsed chunk list
+ * @pcl: parsed chunk list
+ *
+ * Returns the first chunk in the list, or NULL if the list is empty.
+ */
+static inline struct svc_rdma_chunk *
+pcl_first_chunk(const struct svc_rdma_pcl *pcl)
+{
+ if (pcl_is_empty(pcl))
+ return NULL;
+ return list_first_entry(&pcl->cl_chunks, struct svc_rdma_chunk,
+ ch_list);
+}
+
+/**
+ * pcl_next_chunk - Return next chunk in a parsed chunk list
+ * @pcl: a parsed chunk list
+ * @chunk: chunk in @pcl
+ *
+ * Returns the next chunk in the list, or NULL if @chunk is already last.
+ */
+static inline struct svc_rdma_chunk *
+pcl_next_chunk(const struct svc_rdma_pcl *pcl, struct svc_rdma_chunk *chunk)
+{
+ if (list_is_last(&chunk->ch_list, &pcl->cl_chunks))
+ return NULL;
+ return list_next_entry(chunk, ch_list);
+}
+
+/**
+ * pcl_for_each_chunk - Iterate over chunks in a parsed chunk list
+ * @pos: the loop cursor
+ * @pcl: a parsed chunk list
+ */
+#define pcl_for_each_chunk(pos, pcl) \
+ for (pos = list_first_entry(&(pcl)->cl_chunks, struct svc_rdma_chunk, ch_list); \
+ &pos->ch_list != &(pcl)->cl_chunks; \
+ pos = list_next_entry(pos, ch_list))
+
+/**
+ * pcl_for_each_segment - Iterate over segments in a parsed chunk
+ * @pos: the loop cursor
+ * @chunk: a parsed chunk
+ */
+#define pcl_for_each_segment(pos, chunk) \
+ for (pos = &(chunk)->ch_segments[0]; \
+ pos <= &(chunk)->ch_segments[(chunk)->ch_segcount - 1]; \
+ pos++)
+
+/**
+ * pcl_chunk_end_offset - Return offset of byte range following @chunk
+ * @chunk: chunk in @pcl
+ *
+ * Returns starting offset of the region just after @chunk
+ */
+static inline unsigned int
+pcl_chunk_end_offset(const struct svc_rdma_chunk *chunk)
+{
+ return xdr_align_size(chunk->ch_position + chunk->ch_payload_length);
+}
+
+struct svc_rdma_recv_ctxt;
+
+extern void pcl_free(struct svc_rdma_pcl *pcl);
+extern bool pcl_alloc_call(struct svc_rdma_recv_ctxt *rctxt, __be32 *p);
+extern bool pcl_alloc_read(struct svc_rdma_recv_ctxt *rctxt, __be32 *p);
+extern bool pcl_alloc_write(struct svc_rdma_recv_ctxt *rctxt,
+ struct svc_rdma_pcl *pcl, __be32 *p);
+extern int pcl_process_nonpayloads(const struct svc_rdma_pcl *pcl,
+ const struct xdr_buf *xdr,
+ int (*actor)(const struct xdr_buf *,
+ void *),
+ void *data);
+
+#endif /* SVC_RDMA_PCL_H */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index aca35ab5cff2..8e20cd60e2e7 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -21,13 +21,13 @@ struct svc_xprt_ops {
int (*xpo_has_wspace)(struct svc_xprt *);
int (*xpo_recvfrom)(struct svc_rqst *);
int (*xpo_sendto)(struct svc_rqst *);
- int (*xpo_read_payload)(struct svc_rqst *, unsigned int,
- unsigned int);
- void (*xpo_release_rqst)(struct svc_rqst *);
+ int (*xpo_result_payload)(struct svc_rqst *, unsigned int,
+ unsigned int);
+ void (*xpo_release_ctxt)(struct svc_xprt *xprt, void *ctxt);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
- void (*xpo_secure_port)(struct svc_rqst *rqstp);
void (*xpo_kill_temp_xprt)(struct svc_xprt *);
+ void (*xpo_handshake)(struct svc_xprt *xprt);
};
struct svc_xprt_class {
@@ -54,22 +54,8 @@ struct svc_xprt {
const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
struct list_head xpt_list;
- struct list_head xpt_ready;
+ struct lwq_node xpt_ready;
unsigned long xpt_flags;
-#define XPT_BUSY 0 /* enqueued/receiving */
-#define XPT_CONN 1 /* conn pending */
-#define XPT_CLOSE 2 /* dead or dying */
-#define XPT_DATA 3 /* data pending */
-#define XPT_TEMP 4 /* connected transport */
-#define XPT_DEAD 6 /* transport closed */
-#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
-#define XPT_DEFERRED 8 /* deferred request pending */
-#define XPT_OLD 9 /* used for xprt aging mark+sweep */
-#define XPT_LISTENER 10 /* listening endpoint */
-#define XPT_CACHE_AUTH 11 /* cache auth info */
-#define XPT_LOCAL 12 /* connection from loopback interface */
-#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
-#define XPT_CONG_CTRL 14 /* has congestion control */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
@@ -88,11 +74,33 @@ struct svc_xprt {
struct list_head xpt_users; /* callbacks on free */
struct net *xpt_net;
+ netns_tracker ns_tracker;
const struct cred *xpt_cred;
struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
};
+/* flag bits for xpt_flags */
+enum {
+ XPT_BUSY, /* enqueued/receiving */
+ XPT_CONN, /* conn pending */
+ XPT_CLOSE, /* dead or dying */
+ XPT_DATA, /* data pending */
+ XPT_TEMP, /* connected transport */
+ XPT_DEAD, /* transport closed */
+ XPT_CHNGBUF, /* need to change snd/rcv buf sizes */
+ XPT_DEFERRED, /* deferred request pending */
+ XPT_OLD, /* used for xprt aging mark+sweep */
+ XPT_LISTENER, /* listening endpoint */
+ XPT_CACHE_AUTH, /* cache auth info */
+ XPT_LOCAL, /* connection from loopback interface */
+ XPT_KILL_TEMP, /* call xpo_kill_temp_xprt before closing */
+ XPT_CONG_CTRL, /* has congestion control */
+ XPT_HANDSHAKE, /* xprt requests a handshake */
+ XPT_TLS_SESSION, /* transport-layer security established */
+ XPT_PEER_AUTH, /* peer has been authenticated */
+};
+
static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
{
spin_lock(&xpt->xpt_lock);
@@ -127,14 +135,16 @@ int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
-int svc_create_xprt(struct svc_serv *, const char *, struct net *,
- const int, const unsigned short, int,
- const struct cred *);
-void svc_xprt_do_enqueue(struct svc_xprt *xprt);
+int svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
+ struct net *net, const int family,
+ const unsigned short port, int flags,
+ const struct cred *cred);
+void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net);
+void svc_xprt_received(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
-void svc_close_xprt(struct svc_xprt *xprt);
+void svc_xprt_close(struct svc_xprt *xprt);
int svc_port_is_privileged(struct sockaddr *sin);
int svc_print_xprts(char *buf, int maxlen);
struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
@@ -143,6 +153,7 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen);
void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt);
void svc_age_temp_xprts_now(struct svc_serv *, struct sockaddr *);
+void svc_xprt_deferred_close(struct svc_xprt *xprt);
static inline void svc_xprt_get(struct svc_xprt *xprt)
{
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index b0003866a249..61c455f1e1f5 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -83,6 +83,19 @@ struct auth_domain {
struct rcu_head rcu_head;
};
+enum svc_auth_status {
+ SVC_GARBAGE = 1,
+ SVC_SYSERR,
+ SVC_VALID,
+ SVC_NEGATIVE,
+ SVC_OK,
+ SVC_DROP,
+ SVC_CLOSE,
+ SVC_DENIED,
+ SVC_PENDING,
+ SVC_COMPLETE,
+};
+
/*
* Each authentication flavour registers an auth_ops
* structure.
@@ -98,6 +111,8 @@ struct auth_domain {
* is (probably) already in place. Certainly space is
* reserved for it.
* DROP - simply drop the request. It may have been deferred
+ * CLOSE - like SVC_DROP, but request is definitely lost.
+ * If there is a tcp connection, it should be closed.
* GARBAGE - rpc garbage_args error
* SYSERR - rpc system_err error
* DENIED - authp holds reason for denial.
@@ -111,60 +126,45 @@ struct auth_domain {
*
* release() is given a request after the procedure has been run.
* It should sign/encrypt the results if needed
- * It should return:
- * OK - the resbuf is ready to be sent
- * DROP - the reply should be quitely dropped
- * DENIED - authp holds a reason for MSG_DENIED
- * SYSERR - rpc system_err
*
* domain_release()
* This call releases a domain.
+ *
* set_client()
- * Givens a pending request (struct svc_rqst), finds and assigns
+ * Given a pending request (struct svc_rqst), finds and assigns
* an appropriate 'auth_domain' as the client.
+ *
+ * pseudoflavor()
+ * Returns RPC_AUTH pseudoflavor in use by @rqstp.
*/
struct auth_ops {
char * name;
struct module *owner;
int flavour;
- int (*accept)(struct svc_rqst *rq, __be32 *authp);
- int (*release)(struct svc_rqst *rq);
- void (*domain_release)(struct auth_domain *);
- int (*set_client)(struct svc_rqst *rq);
-};
-#define SVC_GARBAGE 1
-#define SVC_SYSERR 2
-#define SVC_VALID 3
-#define SVC_NEGATIVE 4
-#define SVC_OK 5
-#define SVC_DROP 6
-#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely
- * lost so if there is a tcp connection, it
- * should be closed
- */
-#define SVC_DENIED 8
-#define SVC_PENDING 9
-#define SVC_COMPLETE 10
+ enum svc_auth_status (*accept)(struct svc_rqst *rqstp);
+ int (*release)(struct svc_rqst *rqstp);
+ void (*domain_release)(struct auth_domain *dom);
+ enum svc_auth_status (*set_client)(struct svc_rqst *rqstp);
+ rpc_authflavor_t (*pseudoflavor)(struct svc_rqst *rqstp);
+};
struct svc_xprt;
-extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp);
+extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
+extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
-extern int svc_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
-extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
extern struct auth_domain *auth_domain_find(char *name);
-extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
-extern int auth_unix_forget_old(struct auth_domain *dom);
extern void svcauth_unix_purge(struct net *net);
extern void svcauth_unix_info_release(struct svc_xprt *xpt);
-extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svcauth_unix_set_client(struct svc_rqst *rqstp);
extern int unix_gid_cache_create(struct net *net);
extern void unix_gid_cache_destroy(struct net *net);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index b7ac7fe68306..7c78ec6356b9 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -36,6 +36,10 @@ struct svc_sock {
* received so far in the fragments making up this rpc: */
u32 sk_datalen;
+ struct page_frag_cache sk_frag_cache;
+
+ struct completion sk_handshake_done;
+
struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
};
@@ -52,19 +56,15 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
/*
* Function prototypes.
*/
-void svc_close_net(struct svc_serv *, struct net *);
-int svc_recv(struct svc_rqst *, long);
-int svc_send(struct svc_rqst *);
+void svc_recv(struct svc_rqst *rqstp);
+void svc_send(struct svc_rqst *rqstp);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
-bool svc_alien_sock(struct net *net, int fd);
-int svc_addsock(struct svc_serv *serv, const int fd,
- char *name_return, const size_t len,
- const struct cred *cred);
+int svc_addsock(struct svc_serv *serv, struct net *net,
+ const int fd, char *name_return, const size_t len,
+ const struct cred *cred);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
-void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 5a6a81b7cd9f..2f8dc47f1eb0 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -20,13 +20,19 @@ struct bio_vec;
struct rpc_rqst;
/*
+ * Size of an XDR encoding unit in bytes, i.e. 32 bits,
+ * as defined in Section 3 of RFC 4506. All encoded
+ * XDR data items are aligned on a boundary of 32 bits.
+ */
+#define XDR_UNIT sizeof(__be32)
+
+/*
* Buffer adjustment
*/
#define XDR_QUADLEN(l) (((l) + 3) >> 2)
/*
- * Generic opaque `network object.' At the kernel level, this type
- * is used only by lockd.
+ * Generic opaque `network object.'
*/
#define XDR_MAX_NETOBJ 1024
struct xdr_netobj {
@@ -89,6 +95,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX)
#define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT)
#define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS)
+#define rpc_auth_tls cpu_to_be32(RPC_AUTH_TLS)
#define rpc_call cpu_to_be32(RPC_CALL)
#define rpc_reply cpu_to_be32(RPC_REPLY)
@@ -128,10 +135,12 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
void xdr_inline_pages(struct xdr_buf *, unsigned int,
struct page **, unsigned int, unsigned int);
-void xdr_terminate_string(struct xdr_buf *, const u32);
-size_t xdr_buf_pagecount(struct xdr_buf *buf);
+void xdr_terminate_string(const struct xdr_buf *, const u32);
+size_t xdr_buf_pagecount(const struct xdr_buf *buf);
int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
void xdr_free_bvec(struct xdr_buf *buf);
+unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size,
+ const struct xdr_buf *xdr);
static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
{
@@ -181,15 +190,14 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
/*
* XDR buffer helper functions
*/
-extern void xdr_shift_buf(struct xdr_buf *, size_t);
-extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
-extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
+extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *);
+extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
-extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
-extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
+extern int read_bytes_from_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int);
+extern int write_bytes_to_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int);
-extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
-extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
+extern int xdr_encode_word(const struct xdr_buf *, unsigned int, u32);
+extern int xdr_decode_word(const struct xdr_buf *, unsigned int, u32 *);
struct xdr_array2_desc;
typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem);
@@ -200,9 +208,9 @@ struct xdr_array2_desc {
xdr_xcode_elem_t xcode;
};
-extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
+extern int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc);
-extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
+extern int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc);
extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
size_t len);
@@ -218,6 +226,7 @@ struct xdr_stream {
struct kvec *iov; /* pointer to the current kvec */
struct kvec scratch; /* Scratch buffer */
struct page **page_ptr; /* pointer to the current page */
+ void *page_kaddr; /* kmapped address of the current page */
unsigned int nwords; /* Remaining decode buffer length */
struct rpc_rqst *rqst; /* For debugging */
@@ -233,22 +242,91 @@ typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
+extern void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
+ struct page **pages, struct rpc_rqst *rqst);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
-extern void xdr_commit_encode(struct xdr_stream *xdr);
+extern int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes);
+extern void __xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
+extern void xdr_truncate_decode(struct xdr_stream *xdr, size_t len);
extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr);
+extern unsigned int xdr_page_pos(const struct xdr_stream *xdr);
extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len);
-extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
+extern void xdr_finish_decode(struct xdr_stream *xdr);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
-extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
+extern int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
+extern void xdr_set_pagelen(struct xdr_stream *, unsigned int len);
+extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
+ unsigned int len);
+extern unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int target, unsigned int length);
+extern unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int length);
+
+/**
+ * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to an empty buffer
+ * @buflen: size of 'buf'
+ *
+ * The scratch buffer is used when decoding from an array of pages.
+ * If an xdr_inline_decode() call spans across page boundaries, then
+ * we copy the data into the scratch buffer in order to allow linear
+ * access.
+ */
+static inline void
+xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
+{
+ xdr->scratch.iov_base = buf;
+ xdr->scratch.iov_len = buflen;
+}
+
+/**
+ * xdr_set_scratch_page - Attach a scratch buffer for decoding data
+ * @xdr: pointer to xdr_stream struct
+ * @page: an anonymous page
+ *
+ * See xdr_set_scratch_buffer().
+ */
+static inline void
+xdr_set_scratch_page(struct xdr_stream *xdr, struct page *page)
+{
+ xdr_set_scratch_buffer(xdr, page_address(page), PAGE_SIZE);
+}
+
+/**
+ * xdr_reset_scratch_buffer - Clear scratch buffer information
+ * @xdr: pointer to xdr_stream struct
+ *
+ * See xdr_set_scratch_buffer().
+ */
+static inline void
+xdr_reset_scratch_buffer(struct xdr_stream *xdr)
+{
+ xdr_set_scratch_buffer(xdr, NULL, 0);
+}
+
+/**
+ * xdr_commit_encode - Ensure all data is written to xdr->buf
+ * @xdr: pointer to xdr_stream
+ *
+ * Handle encoding across page boundaries by giving the caller a
+ * temporary location to write to, then later copying the data into
+ * place. __xdr_commit_encode() does that copying.
+ */
+static inline void xdr_commit_encode(struct xdr_stream *xdr)
+{
+ if (unlikely(xdr->scratch.iov_len))
+ __xdr_commit_encode(xdr);
+}
/**
* xdr_stream_remaining - Return the number of bytes remaining in the stream
@@ -271,6 +349,11 @@ ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str,
size_t size);
ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
size_t maxlen, gfp_t gfp_flags);
+ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor,
+ void **body, unsigned int *body_len);
+ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor,
+ void *body, unsigned int body_len);
+
/**
* xdr_align_size - Calculate padded size of an object
* @n: Size of an object being XDR encoded (in bytes)
@@ -281,7 +364,7 @@ ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
static inline size_t
xdr_align_size(size_t n)
{
- const size_t mask = sizeof(__u32) - 1;
+ const size_t mask = XDR_UNIT - 1;
return (n + mask) & ~mask;
}
@@ -311,7 +394,7 @@ static inline size_t xdr_pad_size(size_t n)
*/
static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr)
{
- const size_t len = sizeof(__be32);
+ const size_t len = XDR_UNIT;
__be32 *p = xdr_reserve_space(xdr, len);
if (unlikely(!p))
@@ -330,7 +413,7 @@ static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr)
*/
static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr)
{
- const size_t len = sizeof(__be32);
+ const size_t len = XDR_UNIT;
__be32 *p = xdr_reserve_space(xdr, len);
if (unlikely(!p))
@@ -340,6 +423,40 @@ static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr)
}
/**
+ * xdr_encode_bool - Encode a boolean item
+ * @p: address in a buffer into which to encode
+ * @n: boolean value to encode
+ *
+ * Return value:
+ * Address of item following the encoded boolean
+ */
+static inline __be32 *xdr_encode_bool(__be32 *p, u32 n)
+{
+ *p++ = n ? xdr_one : xdr_zero;
+ return p;
+}
+
+/**
+ * xdr_stream_encode_bool - Encode a boolean item
+ * @xdr: pointer to xdr_stream
+ * @n: boolean value to encode
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline int xdr_stream_encode_bool(struct xdr_stream *xdr, __u32 n)
+{
+ const size_t len = XDR_UNIT;
+ __be32 *p = xdr_reserve_space(xdr, len);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ xdr_encode_bool(p, n);
+ return len;
+}
+
+/**
* xdr_stream_encode_u32 - Encode a 32-bit integer
* @xdr: pointer to xdr_stream
* @n: integer to encode
@@ -361,6 +478,27 @@ xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n)
}
/**
+ * xdr_stream_encode_be32 - Encode a big-endian 32-bit integer
+ * @xdr: pointer to xdr_stream
+ * @n: integer to encode
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_encode_be32(struct xdr_stream *xdr, __be32 n)
+{
+ const size_t len = sizeof(n);
+ __be32 *p = xdr_reserve_space(xdr, len);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ *p = n;
+ return len;
+}
+
+/**
* xdr_stream_encode_u64 - Encode a 64-bit integer
* @xdr: pointer to xdr_stream
* @n: 64-bit integer to encode
@@ -501,6 +639,27 @@ static inline bool xdr_item_is_present(const __be32 *p)
}
/**
+ * xdr_stream_decode_bool - Decode a boolean
+ * @xdr: pointer to xdr_stream
+ * @ptr: pointer to a u32 in which to store the result
+ *
+ * Return values:
+ * %0 on success
+ * %-EBADMSG on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_decode_bool(struct xdr_stream *xdr, __u32 *ptr)
+{
+ const size_t count = sizeof(*ptr);
+ __be32 *p = xdr_inline_decode(xdr, count);
+
+ if (unlikely(!p))
+ return -EBADMSG;
+ *ptr = (*p != xdr_zero);
+ return 0;
+}
+
+/**
* xdr_stream_decode_u32 - Decode a 32-bit integer
* @xdr: pointer to xdr_stream
* @ptr: location to store integer
@@ -522,6 +681,27 @@ xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr)
}
/**
+ * xdr_stream_decode_u64 - Decode a 64-bit integer
+ * @xdr: pointer to xdr_stream
+ * @ptr: location to store 64-bit integer
+ *
+ * Return values:
+ * %0 on success
+ * %-EBADMSG on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_decode_u64(struct xdr_stream *xdr, __u64 *ptr)
+{
+ const size_t count = sizeof(*ptr);
+ __be32 *p = xdr_inline_decode(xdr, count);
+
+ if (unlikely(!p))
+ return -EBADMSG;
+ xdr_decode_hyper(p, ptr);
+ return 0;
+}
+
+/**
* xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data
* @xdr: pointer to xdr_stream
* @ptr: location to store data
@@ -599,6 +779,8 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
return -EBADMSG;
+ if (U32_MAX >= SIZE_MAX / sizeof(*p) && len > SIZE_MAX / sizeof(*p))
+ return -EBADMSG;
p = xdr_inline_decode(xdr, len * sizeof(*p));
if (unlikely(!p))
return -EBADMSG;
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index a603d48d2b2c..81b952649d35 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -30,17 +30,6 @@
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
-/*
- * This describes a timeout strategy
- */
-struct rpc_timeout {
- unsigned long to_initval, /* initial timeout */
- to_maxval, /* max timeout */
- to_increment; /* if !exponential */
- unsigned int to_retries; /* max # of retries */
- unsigned char to_exponential;
-};
-
enum rpc_display_format_t {
RPC_DISPLAY_ADDR = 0,
RPC_DISPLAY_PORT,
@@ -53,9 +42,11 @@ enum rpc_display_format_t {
struct rpc_task;
struct rpc_xprt;
+struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
+#include <linux/lwq.h>
/*
* This describes a complete RPC request
@@ -120,7 +111,7 @@ struct rpc_rqst {
int rq_ntrans;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head rq_bc_list; /* Callback service list */
+ struct lwq_node rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_SUNRPC_BACKCHANEL */
@@ -128,6 +119,21 @@ struct rpc_rqst {
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
+/* RPC transport layer security policies */
+enum xprtsec_policies {
+ RPC_XPRTSEC_NONE = 0,
+ RPC_XPRTSEC_TLS_ANON,
+ RPC_XPRTSEC_TLS_X509,
+};
+
+struct xprtsec_parms {
+ enum xprtsec_policies policy;
+
+ /* authentication material */
+ key_serial_t cert_serial;
+ key_serial_t privkey_serial;
+};
+
struct rpc_xprt_ops {
void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -138,10 +144,15 @@ struct rpc_xprt_ops {
void (*rpcbind)(struct rpc_task *task);
void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
+ int (*get_srcaddr)(struct rpc_xprt *xprt, char *buf,
+ size_t buflen);
+ unsigned short (*get_srcport)(struct rpc_xprt *xprt);
int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(struct rpc_task *task);
- void (*prepare_request)(struct rpc_rqst *req);
+ int (*prepare_request)(struct rpc_rqst *req,
+ struct xdr_buf *buf);
int (*send_request)(struct rpc_rqst *req);
+ void (*abort_send_request)(struct rpc_rqst *req);
void (*wait_for_reply_request)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*release_request)(struct rpc_task *task);
@@ -180,11 +191,14 @@ enum xprt_transports {
XPRT_TRANSPORT_RDMA = 256,
XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
XPRT_TRANSPORT_LOCAL = 257,
+ XPRT_TRANSPORT_TCP_TLS = 258,
};
+struct rpc_sysfs_xprt;
struct rpc_xprt {
struct kref kref; /* Reference count */
const struct rpc_xprt_ops *ops; /* transport methods */
+ unsigned int id; /* transport id */
const struct rpc_timeout *timeout; /* timeout parms */
struct sockaddr_storage addr; /* server address */
@@ -222,6 +236,7 @@ struct rpc_xprt {
*/
unsigned long bind_timeout,
reestablish_timeout;
+ struct xprtsec_parms xprtsec;
unsigned int connect_cookie; /* A cookie that gets bumped
every time the transport
is reconnected */
@@ -247,6 +262,7 @@ struct rpc_xprt {
struct rpc_task * snd_task; /* Task blocked in send */
struct list_head xmit_queue; /* Send queue */
+ atomic_long_t xmit_queuelen;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -280,13 +296,16 @@ struct rpc_xprt {
} stat;
struct net *xprt_net;
+ netns_tracker ns_tracker;
const char *servername;
const char *address_strings[RPC_DISPLAY_MAX];
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *debugfs; /* debugfs directory */
- atomic_t inject_disconnect;
#endif
struct rcu_head rcu;
+ const struct xprt_class *xprt_class;
+ struct rpc_sysfs_xprt *xprt_sysfs;
+ bool main; /*mark if this is the 1st transport */
};
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -322,6 +341,9 @@ struct xprt_create {
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
struct rpc_xprt_switch *bc_xps;
unsigned int flags;
+ struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct xprt_class {
@@ -330,6 +352,7 @@ struct xprt_class {
struct rpc_xprt * (*setup)(struct xprt_create *);
struct module *owner;
char name[32];
+ const char * netid[];
};
/*
@@ -347,10 +370,9 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_free_slot(struct rpc_xprt *xprt,
struct rpc_rqst *req);
-void xprt_request_prepare(struct rpc_rqst *req);
bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_request_enqueue_transmit(struct rpc_task *task);
-void xprt_request_enqueue_receive(struct rpc_task *task);
+int xprt_request_enqueue_receive(struct rpc_task *task);
void xprt_request_wait_receive(struct rpc_task *task);
void xprt_request_dequeue_xprt(struct rpc_task *task);
bool xprt_request_need_retransmit(struct rpc_task *task);
@@ -366,6 +388,9 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int num_prealloc,
unsigned int max_req);
void xprt_free(struct rpc_xprt *);
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
+void xprt_cleanup_ids(void);
static inline int
xprt_enable_swap(struct rpc_xprt *xprt)
@@ -384,7 +409,7 @@ xprt_disable_swap(struct rpc_xprt *xprt)
*/
int xprt_register_transport(struct xprt_class *type);
int xprt_unregister_transport(struct xprt_class *type);
-int xprt_load_transport(const char *);
+int xprt_find_transport_ident(const char *);
void xprt_wait_for_reply_request_def(struct rpc_task *task);
void xprt_wait_for_reply_request_rtt(struct rpc_task *task);
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
@@ -404,6 +429,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
void xprt_unlock_connect(struct rpc_xprt *, void *);
+void xprt_release_write(struct rpc_xprt *, struct rpc_task *);
/*
* Reserved bit positions in xprt->state
@@ -415,9 +441,12 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
#define XPRT_BOUND (4)
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
+#define XPRT_OFFLINE (7)
+#define XPRT_REMOVE (8)
#define XPRT_CONGESTED (9)
#define XPRT_CWND_WAIT (10)
#define XPRT_WRITE_SPACE (11)
+#define XPRT_SND_IS_COOKIE (12)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
@@ -488,21 +517,7 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
return test_and_set_bit(XPRT_BINDING, &xprt->state);
}
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-extern unsigned int rpc_inject_disconnect;
-static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
-{
- if (!rpc_inject_disconnect)
- return;
- if (atomic_dec_return(&xprt->inject_disconnect))
- return;
- atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
- xprt->ops->inject_disconnect(xprt);
-}
-#else
-static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
-{
-}
-#endif
-
+void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
+void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
+void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
#endif /* _LINUX_SUNRPC_XPRT_H */
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index c6cce3fbf29d..c0514c684b2c 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -10,12 +10,15 @@
#define _NET_SUNRPC_XPRTMULTIPATH_H
struct rpc_xprt_iter_ops;
+struct rpc_sysfs_xprt_switch;
struct rpc_xprt_switch {
spinlock_t xps_lock;
struct kref xps_kref;
+ unsigned int xps_id;
unsigned int xps_nxprts;
unsigned int xps_nactive;
+ unsigned int xps_nunique_destaddr_xprts;
atomic_long_t xps_queuelen;
struct list_head xps_xprt_list;
@@ -23,6 +26,7 @@ struct rpc_xprt_switch {
const struct rpc_xprt_iter_ops *xps_iter_ops;
+ struct rpc_sysfs_xprt_switch *xps_sysfs;
struct rcu_head xps_rcu;
};
@@ -51,7 +55,7 @@ extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps);
extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt);
extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
- struct rpc_xprt *xprt);
+ struct rpc_xprt *xprt, bool offline);
extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
@@ -59,8 +63,13 @@ extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
+extern void xprt_iter_init_listoffline(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *xps);
+
extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi);
+extern void xprt_iter_rewind(struct rpc_xprt_iter *xpi);
+
extern struct rpc_xprt_switch *xprt_iter_xchg_switch(
struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *newswitch);
@@ -71,4 +80,7 @@ extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
const struct sockaddr *sap);
+
+extern void xprt_multipath_cleanup_ids(void);
+
#endif
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 3c1423ee74b4..700a1e6c047c 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -57,9 +57,11 @@ struct sock_xprt {
struct work_struct error_worker;
struct work_struct recv_worker;
struct mutex recv_mutex;
+ struct completion handshake_done;
struct sockaddr_storage srcaddr;
unsigned short srcport;
int xprt_err;
+ struct rpc_clnt *clnt;
/*
* UDP socket buffer size parameters
@@ -88,5 +90,8 @@ struct sock_xprt {
#define XPRT_SOCK_WAKE_WRITE (5)
#define XPRT_SOCK_WAKE_PENDING (6)
#define XPRT_SOCK_WAKE_DISCONNECT (7)
+#define XPRT_SOCK_CONNECT_SENT (8)
+#define XPRT_SOCK_NOSPACE (9)
+#define XPRT_SOCK_IGNORE_RECV (10)
#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/sunxi-rsb.h b/include/linux/sunxi-rsb.h
index 7e75bb0346d0..bf0d365f471c 100644
--- a/include/linux/sunxi-rsb.h
+++ b/include/linux/sunxi-rsb.h
@@ -59,7 +59,7 @@ static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev,
struct sunxi_rsb_driver {
struct device_driver driver;
int (*probe)(struct sunxi_rsb_device *rdev);
- int (*remove)(struct sunxi_rsb_device *rdev);
+ void (*remove)(struct sunxi_rsb_device *rdev);
};
static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d)
diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h
deleted file mode 100644
index 8d3376775813..000000000000
--- a/include/linux/superhyway.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * include/linux/superhyway.h
- *
- * SuperHyway Bus definitions
- *
- * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __LINUX_SUPERHYWAY_H
-#define __LINUX_SUPERHYWAY_H
-
-#include <linux/device.h>
-
-/*
- * SuperHyway IDs
- */
-#define SUPERHYWAY_DEVICE_ID_SH5_DMAC 0x0183
-
-struct superhyway_vcr_info {
- u8 perr_flags; /* P-port Error flags */
- u8 merr_flags; /* Module Error flags */
- u16 mod_vers; /* Module Version */
- u16 mod_id; /* Module ID */
- u8 bot_mb; /* Bottom Memory block */
- u8 top_mb; /* Top Memory block */
-};
-
-struct superhyway_ops {
- int (*read_vcr)(unsigned long base, struct superhyway_vcr_info *vcr);
- int (*write_vcr)(unsigned long base, struct superhyway_vcr_info vcr);
-};
-
-struct superhyway_bus {
- struct superhyway_ops *ops;
-};
-
-extern struct superhyway_bus superhyway_channels[];
-
-struct superhyway_device_id {
- unsigned int id;
- unsigned long driver_data;
-};
-
-struct superhyway_device;
-extern struct bus_type superhyway_bus_type;
-
-struct superhyway_driver {
- char *name;
-
- const struct superhyway_device_id *id_table;
- struct device_driver drv;
-
- int (*probe)(struct superhyway_device *dev, const struct superhyway_device_id *id);
- void (*remove)(struct superhyway_device *dev);
-};
-
-#define to_superhyway_driver(d) container_of((d), struct superhyway_driver, drv)
-
-struct superhyway_device {
- char name[32];
-
- struct device dev;
-
- struct superhyway_device_id id;
- struct superhyway_driver *drv;
- struct superhyway_bus *bus;
-
- int num_resources;
- struct resource *resource;
- struct superhyway_vcr_info vcr;
-};
-
-#define to_superhyway_device(d) container_of((d), struct superhyway_device, dev)
-
-#define superhyway_get_drvdata(d) dev_get_drvdata(&(d)->dev)
-#define superhyway_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
-
-static inline int
-superhyway_read_vcr(struct superhyway_device *dev, unsigned long base,
- struct superhyway_vcr_info *vcr)
-{
- return dev->bus->ops->read_vcr(base, vcr);
-}
-
-static inline int
-superhyway_write_vcr(struct superhyway_device *dev, unsigned long base,
- struct superhyway_vcr_info vcr)
-{
- return dev->bus->ops->write_vcr(base, vcr);
-}
-
-extern int superhyway_scan_bus(struct superhyway_bus *);
-
-/* drivers/sh/superhyway/superhyway.c */
-int superhyway_register_driver(struct superhyway_driver *);
-void superhyway_unregister_driver(struct superhyway_driver *);
-int superhyway_add_device(unsigned long base, struct superhyway_device *, struct superhyway_bus *);
-int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices);
-
-/* drivers/sh/superhyway/superhyway-sysfs.c */
-extern const struct attribute_group *superhyway_dev_groups[];
-
-#endif /* __LINUX_SUPERHYWAY_H */
-
diff --git a/include/linux/surface_acpi_notify.h b/include/linux/surface_acpi_notify.h
new file mode 100644
index 000000000000..8e3e86c7d78c
--- /dev/null
+++ b/include/linux/surface_acpi_notify.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Interface for Surface ACPI Notify (SAN) driver.
+ *
+ * Provides access to discrete GPU notifications sent from ACPI via the SAN
+ * driver, which are not handled by this driver directly.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_ACPI_NOTIFY_H
+#define _LINUX_SURFACE_ACPI_NOTIFY_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+/**
+ * struct san_dgpu_event - Discrete GPU ACPI event.
+ * @category: Category of the event.
+ * @target: Target ID of the event source.
+ * @command: Command ID of the event.
+ * @instance: Instance ID of the event source.
+ * @length: Length of the event's payload data (in bytes).
+ * @payload: Pointer to the event's payload data.
+ */
+struct san_dgpu_event {
+ u8 category;
+ u8 target;
+ u8 command;
+ u8 instance;
+ u16 length;
+ u8 *payload;
+};
+
+int san_client_link(struct device *client);
+int san_dgpu_notifier_register(struct notifier_block *nb);
+int san_dgpu_notifier_unregister(struct notifier_block *nb);
+
+#endif /* _LINUX_SURFACE_ACPI_NOTIFY_H */
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
new file mode 100644
index 000000000000..5b67f0f47d80
--- /dev/null
+++ b/include/linux/surface_aggregator/controller.h
@@ -0,0 +1,994 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface System Aggregator Module (SSAM) controller interface.
+ *
+ * Main communication interface for the SSAM EC. Provides a controller
+ * managing access and communication to and from the SSAM EC, as well as main
+ * communication structures and definitions.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
+#define _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+
+
+/* -- Main data types and definitions --------------------------------------- */
+
+/**
+ * enum ssam_event_flags - Flags for enabling/disabling SAM events
+ * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame.
+ */
+enum ssam_event_flags {
+ SSAM_EVENT_SEQUENCED = BIT(0),
+};
+
+/**
+ * struct ssam_event - SAM event sent from the EC to the host.
+ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
+ * @target_id: Target ID of the event source.
+ * @command_id: Command ID of the event.
+ * @instance_id: Instance ID of the event source.
+ * @length: Length of the event payload in bytes.
+ * @data: Event payload data.
+ */
+struct ssam_event {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u16 length;
+ u8 data[] __counted_by(length);
+};
+
+/**
+ * enum ssam_request_flags - Flags for SAM requests.
+ *
+ * @SSAM_REQUEST_HAS_RESPONSE:
+ * Specifies that the request expects a response. If not set, the request
+ * will be directly completed after its underlying packet has been
+ * transmitted. If set, the request transport system waits for a response
+ * of the request.
+ *
+ * @SSAM_REQUEST_UNSEQUENCED:
+ * Specifies that the request should be transmitted via an unsequenced
+ * packet. If set, the request must not have a response, meaning that this
+ * flag and the %SSAM_REQUEST_HAS_RESPONSE flag are mutually exclusive.
+ */
+enum ssam_request_flags {
+ SSAM_REQUEST_HAS_RESPONSE = BIT(0),
+ SSAM_REQUEST_UNSEQUENCED = BIT(1),
+};
+
+/**
+ * struct ssam_request - SAM request description.
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
+ * @target_id: ID of the request's target.
+ * @command_id: Command ID of the request.
+ * @instance_id: Instance ID of the request's target.
+ * @flags: Flags for the request. See &enum ssam_request_flags.
+ * @length: Length of the request payload in bytes.
+ * @payload: Request payload data.
+ *
+ * This struct fully describes a SAM request with payload. It is intended to
+ * help set up the actual transport struct, e.g. &struct ssam_request_sync,
+ * and specifically its raw message data via ssam_request_write_data().
+ */
+struct ssam_request {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u16 flags;
+ u16 length;
+ const u8 *payload;
+};
+
+/**
+ * struct ssam_response - Response buffer for SAM request.
+ * @capacity: Capacity of the buffer, in bytes.
+ * @length: Length of the actual data stored in the memory pointed to by
+ * @pointer, in bytes. Set by the transport system.
+ * @pointer: Pointer to the buffer's memory, storing the response payload data.
+ */
+struct ssam_response {
+ size_t capacity;
+ size_t length;
+ u8 *pointer;
+};
+
+struct ssam_controller;
+
+struct ssam_controller *ssam_get_controller(void);
+struct ssam_controller *ssam_client_bind(struct device *client);
+int ssam_client_link(struct ssam_controller *ctrl, struct device *client);
+
+struct device *ssam_controller_device(struct ssam_controller *c);
+
+struct ssam_controller *ssam_controller_get(struct ssam_controller *c);
+void ssam_controller_put(struct ssam_controller *c);
+
+void ssam_controller_statelock(struct ssam_controller *c);
+void ssam_controller_stateunlock(struct ssam_controller *c);
+
+ssize_t ssam_request_write_data(struct ssam_span *buf,
+ struct ssam_controller *ctrl,
+ const struct ssam_request *spec);
+
+
+/* -- Synchronous request interface. ---------------------------------------- */
+
+/**
+ * struct ssam_request_sync - Synchronous SAM request struct.
+ * @base: Underlying SSH request.
+ * @comp: Completion used to signal full completion of the request. After the
+ * request has been submitted, this struct may only be modified or
+ * deallocated after the completion has been signaled.
+ * request has been submitted,
+ * @resp: Buffer to store the response.
+ * @status: Status of the request, set after the base request has been
+ * completed or has failed.
+ */
+struct ssam_request_sync {
+ struct ssh_request base;
+ struct completion comp;
+ struct ssam_response *resp;
+ int status;
+};
+
+int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
+ struct ssam_request_sync **rqst,
+ struct ssam_span *buffer);
+
+void ssam_request_sync_free(struct ssam_request_sync *rqst);
+
+int ssam_request_sync_init(struct ssam_request_sync *rqst,
+ enum ssam_request_flags flags);
+
+/**
+ * ssam_request_sync_set_data - Set message data of a synchronous request.
+ * @rqst: The request.
+ * @ptr: Pointer to the request message data.
+ * @len: Length of the request message data.
+ *
+ * Set the request message data of a synchronous request. The provided buffer
+ * needs to live until the request has been completed.
+ */
+static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst,
+ u8 *ptr, size_t len)
+{
+ ssh_request_set_data(&rqst->base, ptr, len);
+}
+
+/**
+ * ssam_request_sync_set_resp - Set response buffer of a synchronous request.
+ * @rqst: The request.
+ * @resp: The response buffer.
+ *
+ * Sets the response buffer of a synchronous request. This buffer will store
+ * the response of the request after it has been completed. May be %NULL if no
+ * response is expected.
+ */
+static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst,
+ struct ssam_response *resp)
+{
+ rqst->resp = resp;
+}
+
+int ssam_request_sync_submit(struct ssam_controller *ctrl,
+ struct ssam_request_sync *rqst);
+
+/**
+ * ssam_request_sync_wait - Wait for completion of a synchronous request.
+ * @rqst: The request to wait for.
+ *
+ * Wait for completion and release of a synchronous request. After this
+ * function terminates, the request is guaranteed to have left the transport
+ * system. After successful submission of a request, this function must be
+ * called before accessing the response of the request, freeing the request,
+ * or freeing any of the buffers associated with the request.
+ *
+ * This function must not be called if the request has not been submitted yet
+ * and may lead to a deadlock/infinite wait if a subsequent request submission
+ * fails in that case, due to the completion never triggering.
+ *
+ * Return: Returns the status of the given request, which is set on completion
+ * of the packet. This value is zero on success and negative on failure.
+ */
+static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
+{
+ wait_for_completion(&rqst->comp);
+ return rqst->status;
+}
+
+int ssam_request_do_sync(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp);
+
+int ssam_request_do_sync_with_buffer(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp,
+ struct ssam_span *buf);
+
+/**
+ * ssam_request_do_sync_onstack - Execute a synchronous request on the stack.
+ * @ctrl: The controller via which the request is submitted.
+ * @rqst: The request specification.
+ * @rsp: The response buffer.
+ * @payload_len: The (maximum) request payload length.
+ *
+ * Allocates a synchronous request with specified payload length on the stack,
+ * fully initializes it via the provided request specification, submits it,
+ * and finally waits for its completion before returning its status. This
+ * helper macro essentially allocates the request message buffer on the stack
+ * and then calls ssam_request_do_sync_with_buffer().
+ *
+ * Note: The @payload_len parameter specifies the maximum payload length, used
+ * for buffer allocation. The actual payload length may be smaller.
+ *
+ * Return: Returns the status of the request or any failure during setup, i.e.
+ * zero on success and a negative value on failure.
+ */
+#define ssam_request_do_sync_onstack(ctrl, rqst, rsp, payload_len) \
+ ({ \
+ u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \
+ struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \
+ \
+ ssam_request_do_sync_with_buffer(ctrl, rqst, rsp, &__buf); \
+ })
+
+/**
+ * __ssam_retry - Retry request in case of I/O errors or timeouts.
+ * @request: The request function to execute. Must return an integer.
+ * @n: Number of tries.
+ * @args: Arguments for the request function.
+ *
+ * Executes the given request function, i.e. calls @request. In case the
+ * request returns %-EREMOTEIO (indicates I/O error) or %-ETIMEDOUT (request
+ * or underlying packet timed out), @request will be re-executed again, up to
+ * @n times in total.
+ *
+ * Return: Returns the return value of the last execution of @request.
+ */
+#define __ssam_retry(request, n, args...) \
+ ({ \
+ int __i, __s = 0; \
+ \
+ for (__i = (n); __i > 0; __i--) { \
+ __s = request(args); \
+ if (__s != -ETIMEDOUT && __s != -EREMOTEIO) \
+ break; \
+ } \
+ __s; \
+ })
+
+/**
+ * ssam_retry - Retry request in case of I/O errors or timeouts up to three
+ * times in total.
+ * @request: The request function to execute. Must return an integer.
+ * @args: Arguments for the request function.
+ *
+ * Executes the given request function, i.e. calls @request. In case the
+ * request returns %-EREMOTEIO (indicates I/O error) or -%ETIMEDOUT (request
+ * or underlying packet timed out), @request will be re-executed again, up to
+ * three times in total.
+ *
+ * See __ssam_retry() for a more generic macro for this purpose.
+ *
+ * Return: Returns the return value of the last execution of @request.
+ */
+#define ssam_retry(request, args...) \
+ __ssam_retry(request, 3, args)
+
+/**
+ * struct ssam_request_spec - Blue-print specification of SAM request.
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
+ * @target_id: ID of the request's target.
+ * @command_id: Command ID of the request.
+ * @instance_id: Instance ID of the request's target.
+ * @flags: Flags for the request. See &enum ssam_request_flags.
+ *
+ * Blue-print specification for a SAM request. This struct describes the
+ * unique static parameters of a request (i.e. type) without specifying any of
+ * its instance-specific data (e.g. payload). It is intended to be used as base
+ * for defining simple request functions via the
+ * ``SSAM_DEFINE_SYNC_REQUEST_x()`` family of macros.
+ */
+struct ssam_request_spec {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u8 flags;
+};
+
+/**
+ * struct ssam_request_spec_md - Blue-print specification for multi-device SAM
+ * request.
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
+ * @command_id: Command ID of the request.
+ * @flags: Flags for the request. See &enum ssam_request_flags.
+ *
+ * Blue-print specification for a multi-device SAM request, i.e. a request
+ * that is applicable to multiple device instances, described by their
+ * individual target and instance IDs. This struct describes the unique static
+ * parameters of a request (i.e. type) without specifying any of its
+ * instance-specific data (e.g. payload) and without specifying any of its
+ * device specific IDs (i.e. target and instance ID). It is intended to be
+ * used as base for defining simple multi-device request functions via the
+ * ``SSAM_DEFINE_SYNC_REQUEST_MD_x()`` and ``SSAM_DEFINE_SYNC_REQUEST_CL_x()``
+ * families of macros.
+ */
+struct ssam_request_spec_md {
+ u8 target_category;
+ u8 command_id;
+ u8 flags;
+};
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_N() - Define synchronous SAM request function
+ * with neither argument nor return value.
+ * @name: Name of the generated function.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request having neither argument nor return value. The
+ * generated function takes care of setting up the request struct and buffer
+ * allocation, as well as execution of the request itself, returning once the
+ * request has been fully completed. The required transport buffer will be
+ * allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl)``, returning the status of the request, which is
+ * zero on success and negative on failure. The ``ctrl`` parameter is the
+ * controller via which the request is being sent.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \
+ static int name(struct ssam_controller *ctrl) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ return ssam_request_do_sync_onstack(ctrl, &rqst, NULL, 0); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_W() - Define synchronous SAM request function with
+ * argument.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking an argument of type @atype and having no
+ * return value. The generated function takes care of setting up the request
+ * struct, buffer allocation, as well as execution of the request itself,
+ * returning once the request has been fully completed. The required transport
+ * buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, const atype *arg)``, returning the status of the
+ * request, which is zero on success and negative on failure. The ``ctrl``
+ * parameter is the controller via which the request is sent. The request
+ * argument is specified via the ``arg`` pointer.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...) \
+ static int name(struct ssam_controller *ctrl, const atype *arg) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ return ssam_request_do_sync_onstack(ctrl, &rqst, NULL, \
+ sizeof(atype)); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_R() - Define synchronous SAM request function with
+ * return value.
+ * @name: Name of the generated function.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking no argument but having a return value of
+ * type @rtype. The generated function takes care of setting up the request
+ * and response structs, buffer allocation, as well as execution of the
+ * request itself, returning once the request has been fully completed. The
+ * required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, rtype *ret)``, returning the status of the request,
+ * which is zero on success and negative on failure. The ``ctrl`` parameter is
+ * the controller via which the request is sent. The request's return value is
+ * written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, rtype *ret) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_do_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_WR() - Define synchronous SAM request function with
+ * both argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. The generated function takes care of setting up the request
+ * and response structs, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, const atype *arg, rtype *ret)``, returning the status
+ * of the request, which is zero on success and negative on failure. The
+ * ``ctrl`` parameter is the controller via which the request is sent. The
+ * request argument is specified via the ``arg`` pointer. The request's return
+ * value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_WR(name, atype, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, const atype *arg, rtype *ret) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_do_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_N() - Define synchronous multi-device SAM
+ * request function with neither argument nor return value.
+ * @name: Name of the generated function.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request having neither argument nor return value. Device
+ * specifying parameters are not hard-coded, but instead must be provided to
+ * the function. The generated function takes care of setting up the request
+ * struct, buffer allocation, as well as execution of the request itself,
+ * returning once the request has been fully completed. The required transport
+ * buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid)``, returning the status of the
+ * request, which is zero on success and negative on failure. The ``ctrl``
+ * parameter is the controller via which the request is sent, ``tid`` the
+ * target ID for the request, and ``iid`` the instance ID.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ return ssam_request_do_sync_onstack(ctrl, &rqst, NULL, 0); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_W() - Define synchronous multi-device SAM
+ * request function with argument.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking an argument of type @atype and having no
+ * return value. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request struct, buffer allocation, as well as execution of
+ * the request itself, returning once the request has been fully completed.
+ * The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the
+ * status of the request, which is zero on success and negative on failure.
+ * The ``ctrl`` parameter is the controller via which the request is sent,
+ * ``tid`` the target ID for the request, and ``iid`` the instance ID. The
+ * request argument is specified via the ``arg`` pointer.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ return ssam_request_do_sync_onstack(ctrl, &rqst, NULL, \
+ sizeof(atype)); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_R() - Define synchronous multi-device SAM
+ * request function with return value.
+ * @name: Name of the generated function.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking no argument but having a return value of
+ * type @rtype. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request and response structs, buffer allocation, as well as
+ * execution of the request itself, returning once the request has been fully
+ * completed. The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status
+ * of the request, which is zero on success and negative on failure. The
+ * ``ctrl`` parameter is the controller via which the request is sent, ``tid``
+ * the target ID for the request, and ``iid`` the instance ID. The request's
+ * return value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_do_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_WR() - Define synchronous multi-device SAM
+ * request function with both argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request and response structs, buffer allocation, as well as
+ * execution of the request itself, returning once the request has been fully
+ * completed. The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg, rtype *ret)``,
+ * returning the status of the request, which is zero on success and negative
+ * on failure. The ``ctrl`` parameter is the controller via which the request
+ * is sent, ``tid`` the target ID for the request, and ``iid`` the instance ID.
+ * The request argument is specified via the ``arg`` pointer. The request's
+ * return value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_WR(name, atype, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, \
+ const atype *arg, rtype *ret) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_do_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+
+/* -- Event notifier/callbacks. --------------------------------------------- */
+
+#define SSAM_NOTIF_STATE_SHIFT 2
+#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1)
+
+/**
+ * enum ssam_notif_flags - Flags used in return values from SSAM notifier
+ * callback functions.
+ *
+ * @SSAM_NOTIF_HANDLED:
+ * Indicates that the notification has been handled. This flag should be
+ * set by the handler if the handler can act/has acted upon the event
+ * provided to it. This flag should not be set if the handler is not a
+ * primary handler intended for the provided event.
+ *
+ * If this flag has not been set by any handler after the notifier chain
+ * has been traversed, a warning will be emitted, stating that the event
+ * has not been handled.
+ *
+ * @SSAM_NOTIF_STOP:
+ * Indicates that the notifier traversal should stop. If this flag is
+ * returned from a notifier callback, notifier chain traversal will
+ * immediately stop and any remaining notifiers will not be called. This
+ * flag is automatically set when ssam_notifier_from_errno() is called
+ * with a negative error value.
+ */
+enum ssam_notif_flags {
+ SSAM_NOTIF_HANDLED = BIT(0),
+ SSAM_NOTIF_STOP = BIT(1),
+};
+
+struct ssam_event_notifier;
+
+typedef u32 (*ssam_notifier_fn_t)(struct ssam_event_notifier *nf,
+ const struct ssam_event *event);
+
+/**
+ * struct ssam_notifier_block - Base notifier block for SSAM event
+ * notifications.
+ * @node: The node for the list of notifiers.
+ * @fn: The callback function of this notifier. This function takes the
+ * respective notifier block and event as input and should return
+ * a notifier value, which can either be obtained from the flags
+ * provided in &enum ssam_notif_flags, converted from a standard
+ * error value via ssam_notifier_from_errno(), or a combination of
+ * both (e.g. ``ssam_notifier_from_errno(e) | SSAM_NOTIF_HANDLED``).
+ * @priority: Priority value determining the order in which notifier callbacks
+ * will be called. A higher value means higher priority, i.e. the
+ * associated callback will be executed earlier than other (lower
+ * priority) callbacks.
+ */
+struct ssam_notifier_block {
+ struct list_head node;
+ ssam_notifier_fn_t fn;
+ int priority;
+};
+
+/**
+ * ssam_notifier_from_errno() - Convert standard error value to notifier
+ * return code.
+ * @err: The error code to convert, must be negative (in case of failure) or
+ * zero (in case of success).
+ *
+ * Return: Returns the notifier return value obtained by converting the
+ * specified @err value. In case @err is negative, the %SSAM_NOTIF_STOP flag
+ * will be set, causing notifier call chain traversal to abort.
+ */
+static inline u32 ssam_notifier_from_errno(int err)
+{
+ if (WARN_ON(err > 0) || err == 0)
+ return 0;
+ else
+ return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP;
+}
+
+/**
+ * ssam_notifier_to_errno() - Convert notifier return code to standard error
+ * value.
+ * @ret: The notifier return value to convert.
+ *
+ * Return: Returns the negative error value encoded in @ret or zero if @ret
+ * indicates success.
+ */
+static inline int ssam_notifier_to_errno(u32 ret)
+{
+ return -(ret >> SSAM_NOTIF_STATE_SHIFT);
+}
+
+
+/* -- Event/notification registry. ------------------------------------------ */
+
+/**
+ * struct ssam_event_registry - Registry specification used for enabling events.
+ * @target_category: Target category for the event registry requests.
+ * @target_id: Target ID for the event registry requests.
+ * @cid_enable: Command ID for the event-enable request.
+ * @cid_disable: Command ID for the event-disable request.
+ *
+ * This struct describes a SAM event registry via the minimal collection of
+ * SAM IDs specifying the requests to use for enabling and disabling an event.
+ * The individual event to be enabled/disabled itself is specified via &struct
+ * ssam_event_id.
+ */
+struct ssam_event_registry {
+ u8 target_category;
+ u8 target_id;
+ u8 cid_enable;
+ u8 cid_disable;
+};
+
+/**
+ * struct ssam_event_id - Unique event ID used for enabling events.
+ * @target_category: Target category of the event source.
+ * @instance: Instance ID of the event source.
+ *
+ * This struct specifies the event to be enabled/disabled via an externally
+ * provided registry. It does not specify the registry to be used itself, this
+ * is done via &struct ssam_event_registry.
+ */
+struct ssam_event_id {
+ u8 target_category;
+ u8 instance;
+};
+
+/**
+ * enum ssam_event_mask - Flags specifying how events are matched to notifiers.
+ *
+ * @SSAM_EVENT_MASK_NONE:
+ * Run the callback for any event with matching target category. Do not
+ * do any additional filtering.
+ *
+ * @SSAM_EVENT_MASK_TARGET:
+ * In addition to filtering by target category, only execute the notifier
+ * callback for events with a target ID matching to the one of the
+ * registry used for enabling/disabling the event.
+ *
+ * @SSAM_EVENT_MASK_INSTANCE:
+ * In addition to filtering by target category, only execute the notifier
+ * callback for events with an instance ID matching to the instance ID
+ * used when enabling the event.
+ *
+ * @SSAM_EVENT_MASK_STRICT:
+ * Do all the filtering above.
+ */
+enum ssam_event_mask {
+ SSAM_EVENT_MASK_TARGET = BIT(0),
+ SSAM_EVENT_MASK_INSTANCE = BIT(1),
+
+ SSAM_EVENT_MASK_NONE = 0,
+ SSAM_EVENT_MASK_STRICT =
+ SSAM_EVENT_MASK_TARGET
+ | SSAM_EVENT_MASK_INSTANCE,
+};
+
+/**
+ * SSAM_EVENT_REGISTRY() - Define a new event registry.
+ * @tc: Target category for the event registry requests.
+ * @tid: Target ID for the event registry requests.
+ * @cid_en: Command ID for the event-enable request.
+ * @cid_dis: Command ID for the event-disable request.
+ *
+ * Return: Returns the &struct ssam_event_registry specified by the given
+ * parameters.
+ */
+#define SSAM_EVENT_REGISTRY(tc, tid, cid_en, cid_dis) \
+ ((struct ssam_event_registry) { \
+ .target_category = (tc), \
+ .target_id = (tid), \
+ .cid_enable = (cid_en), \
+ .cid_disable = (cid_dis), \
+ })
+
+#define SSAM_EVENT_REGISTRY_SAM \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, SSAM_SSH_TID_SAM, 0x0b, 0x0c)
+
+#define SSAM_EVENT_REGISTRY_KIP \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, SSAM_SSH_TID_KIP, 0x27, 0x28)
+
+#define SSAM_EVENT_REGISTRY_REG(tid)\
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, tid, 0x01, 0x02)
+
+/**
+ * enum ssam_event_notifier_flags - Flags for event notifiers.
+ * @SSAM_EVENT_NOTIFIER_OBSERVER:
+ * The corresponding notifier acts as observer. Registering a notifier
+ * with this flag set will not attempt to enable any event. Equally,
+ * unregistering will not attempt to disable any event. Note that a
+ * notifier with this flag may not even correspond to a certain event at
+ * all, only to a specific event target category. Event matching will not
+ * be influenced by this flag.
+ */
+enum ssam_event_notifier_flags {
+ SSAM_EVENT_NOTIFIER_OBSERVER = BIT(0),
+};
+
+/**
+ * struct ssam_event_notifier - Notifier block for SSAM events.
+ * @base: The base notifier block with callback function and priority.
+ * @event: The event for which this block will receive notifications.
+ * @event.reg: Registry via which the event will be enabled/disabled.
+ * @event.id: ID specifying the event.
+ * @event.mask: Flags determining how events are matched to the notifier.
+ * @event.flags: Flags used for enabling the event.
+ * @flags: Notifier flags (see &enum ssam_event_notifier_flags).
+ */
+struct ssam_event_notifier {
+ struct ssam_notifier_block base;
+
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ u8 flags;
+ } event;
+
+ unsigned long flags;
+};
+
+int ssam_notifier_register(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n);
+
+int __ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n, bool disable);
+
+/**
+ * ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero, the event will be disabled.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+static inline int ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n)
+{
+ return __ssam_notifier_unregister(ctrl, n, true);
+}
+
+int ssam_controller_event_enable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags);
+
+int ssam_controller_event_disable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags);
+
+#endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
new file mode 100644
index 000000000000..8cd8c38cf3f3
--- /dev/null
+++ b/include/linux/surface_aggregator/device.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface System Aggregator Module (SSAM) bus and client-device subsystem.
+ *
+ * Main interface for the surface-aggregator bus, surface-aggregator client
+ * devices, and respective drivers building on top of the SSAM controller.
+ * Provides support for non-platform/non-ACPI SSAM clients via dedicated
+ * subsystem.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H
+#define _LINUX_SURFACE_AGGREGATOR_DEVICE_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+
+
+/* -- Surface System Aggregator Module bus. --------------------------------- */
+
+/**
+ * enum ssam_device_domain - SAM device domain.
+ * @SSAM_DOMAIN_VIRTUAL: Virtual device.
+ * @SSAM_DOMAIN_SERIALHUB: Physical device connected via Surface Serial Hub.
+ */
+enum ssam_device_domain {
+ SSAM_DOMAIN_VIRTUAL = 0x00,
+ SSAM_DOMAIN_SERIALHUB = 0x01,
+};
+
+/**
+ * enum ssam_virtual_tc - Target categories for the virtual SAM domain.
+ * @SSAM_VIRTUAL_TC_HUB: Device hub category.
+ */
+enum ssam_virtual_tc {
+ SSAM_VIRTUAL_TC_HUB = 0x00,
+};
+
+/**
+ * struct ssam_device_uid - Unique identifier for SSAM device.
+ * @domain: Domain of the device.
+ * @category: Target category of the device.
+ * @target: Target ID of the device.
+ * @instance: Instance ID of the device.
+ * @function: Sub-function of the device. This field can be used to split a
+ * single SAM device into multiple virtual subdevices to separate
+ * different functionality of that device and allow one driver per
+ * such functionality.
+ */
+struct ssam_device_uid {
+ u8 domain;
+ u8 category;
+ u8 target;
+ u8 instance;
+ u8 function;
+};
+
+/*
+ * Special values for device matching.
+ *
+ * These values are intended to be used with SSAM_DEVICE(), SSAM_VDEV(), and
+ * SSAM_SDEV() exclusively. Specifically, they are used to initialize the
+ * match_flags member of the device ID structure. Do not use them directly
+ * with struct ssam_device_id or struct ssam_device_uid.
+ */
+#define SSAM_SSH_TID_ANY 0xffff
+#define SSAM_SSH_IID_ANY 0xffff
+#define SSAM_SSH_FUN_ANY 0xffff
+
+/**
+ * SSAM_DEVICE() - Initialize a &struct ssam_device_id with the given
+ * parameters.
+ * @d: Domain of the device.
+ * @cat: Target category of the device.
+ * @tid: Target ID of the device.
+ * @iid: Instance ID of the device.
+ * @fun: Sub-function of the device.
+ *
+ * Initializes a &struct ssam_device_id with the given parameters. See &struct
+ * ssam_device_uid for details regarding the parameters. The special values
+ * %SSAM_SSH_TID_ANY, %SSAM_SSH_IID_ANY, and %SSAM_SSH_FUN_ANY can be used to specify that
+ * matching should ignore target ID, instance ID, and/or sub-function,
+ * respectively. This macro initializes the ``match_flags`` field based on the
+ * given parameters.
+ *
+ * Note: The parameters @d and @cat must be valid &u8 values, the parameters
+ * @tid, @iid, and @fun must be either valid &u8 values or %SSAM_SSH_TID_ANY,
+ * %SSAM_SSH_IID_ANY, or %SSAM_SSH_FUN_ANY, respectively. Other non-&u8 values are not
+ * allowed.
+ */
+#define SSAM_DEVICE(d, cat, tid, iid, fun) \
+ .match_flags = (((tid) != SSAM_SSH_TID_ANY) ? SSAM_MATCH_TARGET : 0) \
+ | (((iid) != SSAM_SSH_IID_ANY) ? SSAM_MATCH_INSTANCE : 0) \
+ | (((fun) != SSAM_SSH_FUN_ANY) ? SSAM_MATCH_FUNCTION : 0), \
+ .domain = d, \
+ .category = cat, \
+ .target = __builtin_choose_expr((tid) != SSAM_SSH_TID_ANY, (tid), 0), \
+ .instance = __builtin_choose_expr((iid) != SSAM_SSH_IID_ANY, (iid), 0), \
+ .function = __builtin_choose_expr((fun) != SSAM_SSH_FUN_ANY, (fun), 0)
+
+/**
+ * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
+ * the given parameters.
+ * @cat: Target category of the device.
+ * @tid: Target ID of the device.
+ * @iid: Instance ID of the device.
+ * @fun: Sub-function of the device.
+ *
+ * Initializes a &struct ssam_device_id with the given parameters in the
+ * virtual domain. See &struct ssam_device_uid for details regarding the
+ * parameters. The special values %SSAM_SSH_TID_ANY, %SSAM_SSH_IID_ANY, and
+ * %SSAM_SSH_FUN_ANY can be used to specify that matching should ignore target ID,
+ * instance ID, and/or sub-function, respectively. This macro initializes the
+ * ``match_flags`` field based on the given parameters.
+ *
+ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
+ * @iid, and @fun must be either valid &u8 values or %SSAM_SSH_TID_ANY,
+ * %SSAM_SSH_IID_ANY, or %SSAM_SSH_FUN_ANY, respectively. Other non-&u8 values are not
+ * allowed.
+ */
+#define SSAM_VDEV(cat, tid, iid, fun) \
+ SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, SSAM_SSH_TID_##tid, iid, fun)
+
+/**
+ * SSAM_SDEV() - Initialize a &struct ssam_device_id as physical SSH device
+ * with the given parameters.
+ * @cat: Target category of the device.
+ * @tid: Target ID of the device.
+ * @iid: Instance ID of the device.
+ * @fun: Sub-function of the device.
+ *
+ * Initializes a &struct ssam_device_id with the given parameters in the SSH
+ * domain. See &struct ssam_device_uid for details regarding the parameters.
+ * The special values %SSAM_SSH_TID_ANY, %SSAM_SSH_IID_ANY, and
+ * %SSAM_SSH_FUN_ANY can be used to specify that matching should ignore target
+ * ID, instance ID, and/or sub-function, respectively. This macro initializes
+ * the ``match_flags`` field based on the given parameters.
+ *
+ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
+ * @iid, and @fun must be either valid &u8 values or %SSAM_SSH_TID_ANY,
+ * %SSAM_SSH_IID_ANY, or %SSAM_SSH_FUN_ANY, respectively. Other non-&u8 values
+ * are not allowed.
+ */
+#define SSAM_SDEV(cat, tid, iid, fun) \
+ SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, SSAM_SSH_TID_##tid, iid, fun)
+
+/*
+ * enum ssam_device_flags - Flags for SSAM client devices.
+ * @SSAM_DEVICE_HOT_REMOVED_BIT:
+ * The device has been hot-removed. Further communication with it may time
+ * out and should be avoided.
+ */
+enum ssam_device_flags {
+ SSAM_DEVICE_HOT_REMOVED_BIT = 0,
+};
+
+/**
+ * struct ssam_device - SSAM client device.
+ * @dev: Driver model representation of the device.
+ * @ctrl: SSAM controller managing this device.
+ * @uid: UID identifying the device.
+ * @flags: Device state flags, see &enum ssam_device_flags.
+ */
+struct ssam_device {
+ struct device dev;
+ struct ssam_controller *ctrl;
+
+ struct ssam_device_uid uid;
+
+ unsigned long flags;
+};
+
+/**
+ * struct ssam_device_driver - SSAM client device driver.
+ * @driver: Base driver model structure.
+ * @match_table: Match table specifying which devices the driver should bind to.
+ * @probe: Called when the driver is being bound to a device.
+ * @remove: Called when the driver is being unbound from the device.
+ */
+struct ssam_device_driver {
+ struct device_driver driver;
+
+ const struct ssam_device_id *match_table;
+
+ int (*probe)(struct ssam_device *sdev);
+ void (*remove)(struct ssam_device *sdev);
+};
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+extern const struct device_type ssam_device_type;
+
+/**
+ * is_ssam_device() - Check if the given device is a SSAM client device.
+ * @d: The device to test the type of.
+ *
+ * Return: Returns %true if the specified device is of type &struct
+ * ssam_device, i.e. the device type points to %ssam_device_type, and %false
+ * otherwise.
+ */
+static inline bool is_ssam_device(struct device *d)
+{
+ return d->type == &ssam_device_type;
+}
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline bool is_ssam_device(struct device *d)
+{
+ return false;
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+/**
+ * to_ssam_device() - Casts the given device to a SSAM client device.
+ * @d: The device to cast.
+ *
+ * Casts the given &struct device to a &struct ssam_device. The caller has to
+ * ensure that the given device is actually enclosed in a &struct ssam_device,
+ * e.g. by calling is_ssam_device().
+ *
+ * Return: Returns a pointer to the &struct ssam_device wrapping the given
+ * device @d.
+ */
+#define to_ssam_device(d) container_of_const(d, struct ssam_device, dev)
+
+/**
+ * to_ssam_device_driver() - Casts the given device driver to a SSAM client
+ * device driver.
+ * @d: The driver to cast.
+ *
+ * Casts the given &struct device_driver to a &struct ssam_device_driver. The
+ * caller has to ensure that the given driver is actually enclosed in a
+ * &struct ssam_device_driver.
+ *
+ * Return: Returns the pointer to the &struct ssam_device_driver wrapping the
+ * given device driver @d.
+ */
+#define to_ssam_device_driver(d) container_of_const(d, struct ssam_device_driver, driver)
+
+const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
+ const struct ssam_device_uid uid);
+
+const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev);
+
+const void *ssam_device_get_match_data(const struct ssam_device *dev);
+
+struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
+ struct ssam_device_uid uid);
+
+int ssam_device_add(struct ssam_device *sdev);
+void ssam_device_remove(struct ssam_device *sdev);
+
+/**
+ * ssam_device_mark_hot_removed() - Mark the given device as hot-removed.
+ * @sdev: The device to mark as hot-removed.
+ *
+ * Mark the device as having been hot-removed. This signals drivers using the
+ * device that communication with the device should be avoided and may lead to
+ * timeouts.
+ */
+static inline void ssam_device_mark_hot_removed(struct ssam_device *sdev)
+{
+ dev_dbg(&sdev->dev, "marking device as hot-removed\n");
+ set_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags);
+}
+
+/**
+ * ssam_device_is_hot_removed() - Check if the given device has been
+ * hot-removed.
+ * @sdev: The device to check.
+ *
+ * Checks if the given device has been marked as hot-removed. See
+ * ssam_device_mark_hot_removed() for more details.
+ *
+ * Return: Returns ``true`` if the device has been marked as hot-removed.
+ */
+static inline bool ssam_device_is_hot_removed(struct ssam_device *sdev)
+{
+ return test_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags);
+}
+
+/**
+ * ssam_device_get() - Increment reference count of SSAM client device.
+ * @sdev: The device to increment the reference count of.
+ *
+ * Increments the reference count of the given SSAM client device by
+ * incrementing the reference count of the enclosed &struct device via
+ * get_device().
+ *
+ * See ssam_device_put() for the counter-part of this function.
+ *
+ * Return: Returns the device provided as input.
+ */
+static inline struct ssam_device *ssam_device_get(struct ssam_device *sdev)
+{
+ return sdev ? to_ssam_device(get_device(&sdev->dev)) : NULL;
+}
+
+/**
+ * ssam_device_put() - Decrement reference count of SSAM client device.
+ * @sdev: The device to decrement the reference count of.
+ *
+ * Decrements the reference count of the given SSAM client device by
+ * decrementing the reference count of the enclosed &struct device via
+ * put_device().
+ *
+ * See ssam_device_get() for the counter-part of this function.
+ */
+static inline void ssam_device_put(struct ssam_device *sdev)
+{
+ if (sdev)
+ put_device(&sdev->dev);
+}
+
+/**
+ * ssam_device_get_drvdata() - Get driver-data of SSAM client device.
+ * @sdev: The device to get the driver-data from.
+ *
+ * Return: Returns the driver-data of the given device, previously set via
+ * ssam_device_set_drvdata().
+ */
+static inline void *ssam_device_get_drvdata(struct ssam_device *sdev)
+{
+ return dev_get_drvdata(&sdev->dev);
+}
+
+/**
+ * ssam_device_set_drvdata() - Set driver-data of SSAM client device.
+ * @sdev: The device to set the driver-data of.
+ * @data: The data to set the device's driver-data pointer to.
+ */
+static inline void ssam_device_set_drvdata(struct ssam_device *sdev, void *data)
+{
+ dev_set_drvdata(&sdev->dev, data);
+}
+
+int __ssam_device_driver_register(struct ssam_device_driver *d, struct module *o);
+void ssam_device_driver_unregister(struct ssam_device_driver *d);
+
+/**
+ * ssam_device_driver_register() - Register a SSAM client device driver.
+ * @drv: The driver to register.
+ */
+#define ssam_device_driver_register(drv) \
+ __ssam_device_driver_register(drv, THIS_MODULE)
+
+/**
+ * module_ssam_device_driver() - Helper macro for SSAM device driver
+ * registration.
+ * @drv: The driver managed by this module.
+ *
+ * Helper macro to register a SSAM device driver via module_init() and
+ * module_exit(). This macro may only be used once per module and replaces the
+ * aforementioned definitions.
+ */
+#define module_ssam_device_driver(drv) \
+ module_driver(drv, ssam_device_driver_register, \
+ ssam_device_driver_unregister)
+
+
+/* -- Helpers for controller and hub devices. ------------------------------- */
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node);
+void ssam_remove_clients(struct device *dev);
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ return 0;
+}
+
+static inline void ssam_remove_clients(struct device *dev) {}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+/**
+ * ssam_register_clients() - Register all client devices defined under the
+ * given parent device.
+ * @dev: The parent device under which clients should be registered.
+ * @ctrl: The controller with which client should be registered.
+ *
+ * Register all clients that have via firmware nodes been defined as children
+ * of the given (parent) device. The respective child firmware nodes will be
+ * associated with the correspondingly created child devices.
+ *
+ * The given controller will be used to instantiate the new devices. See
+ * ssam_device_add() for details.
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+static inline int ssam_register_clients(struct device *dev, struct ssam_controller *ctrl)
+{
+ return __ssam_register_clients(dev, ctrl, dev_fwnode(dev));
+}
+
+/**
+ * ssam_device_register_clients() - Register all client devices defined under
+ * the given SSAM parent device.
+ * @sdev: The parent device under which clients should be registered.
+ *
+ * Register all clients that have via firmware nodes been defined as children
+ * of the given (parent) device. The respective child firmware nodes will be
+ * associated with the correspondingly created child devices.
+ *
+ * The controller used by the parent device will be used to instantiate the new
+ * devices. See ssam_device_add() for details.
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+static inline int ssam_device_register_clients(struct ssam_device *sdev)
+{
+ return ssam_register_clients(&sdev->dev, sdev->ctrl);
+}
+
+
+/* -- Helpers for client-device requests. ----------------------------------- */
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_N() - Define synchronous client-device SAM
+ * request function with neither argument nor return value.
+ * @name: Name of the generated function.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request having neither argument nor return value. Device
+ * specifying parameters are not hard-coded, but instead are provided via the
+ * client device, specifically its UID, supplied when calling this function.
+ * The generated function takes care of setting up the request struct, buffer
+ * allocation, as well as execution of the request itself, returning once the
+ * request has been fully completed. The required transport buffer will be
+ * allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev)``, returning the status of the request, which is zero on success and
+ * negative on failure. The ``sdev`` parameter specifies both the target
+ * device of the request and by association the controller via which the
+ * request is sent.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec) \
+ static int name(struct ssam_device *sdev) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_W() - Define synchronous client-device SAM
+ * request function with argument.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking an argument of type @atype and having no
+ * return value. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, const atype *arg)``, returning the status of the request, which is
+ * zero on success and negative on failure. The ``sdev`` parameter specifies
+ * both the target device of the request and by association the controller via
+ * which the request is sent. The request's argument is specified via the
+ * ``arg`` pointer.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec) \
+ static int name(struct ssam_device *sdev, const atype *arg) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, arg); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_R() - Define synchronous client-device SAM
+ * request function with return value.
+ * @name: Name of the generated function.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking no argument but having a return value of
+ * type @rtype. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, rtype *ret)``, returning the status of the request, which is zero on
+ * success and negative on failure. The ``sdev`` parameter specifies both the
+ * target device of the request and by association the controller via which
+ * the request is sent. The request's return value is written to the memory
+ * pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec) \
+ static int name(struct ssam_device *sdev, rtype *ret) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, ret); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_WR() - Define synchronous client-device SAM
+ * request function with argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, const atype *arg, rtype *ret)``, returning the status of the request,
+ * which is zero on success and negative on failure. The ``sdev`` parameter
+ * specifies both the target device of the request and by association the
+ * controller via which the request is sent. The request's argument is
+ * specified via the ``arg`` pointer. The request's return value is written to
+ * the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_WR(name, atype, rtype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_WR(__raw_##name, atype, rtype, spec) \
+ static int name(struct ssam_device *sdev, const atype *arg, rtype *ret) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, arg, ret); \
+ }
+
+
+/* -- Helpers for client-device notifiers. ---------------------------------- */
+
+/**
+ * ssam_device_notifier_register() - Register an event notifier for the
+ * specified client device.
+ * @sdev: The device the notifier should be registered on.
+ * @n: The event notifier to register.
+ *
+ * Register an event notifier. Increment the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the event is not
+ * marked as an observer and is currently not enabled, it will be enabled
+ * during this call. If the notifier is marked as an observer, no attempt will
+ * be made at enabling any event and no reference count will be modified.
+ *
+ * Notifiers marked as observers do not need to be associated with one specific
+ * event, i.e. as long as no event matching is performed, only the event target
+ * category needs to be set.
+ *
+ * Return: Returns zero on success, %-ENOSPC if there have already been
+ * %INT_MAX notifiers for the event ID/type associated with the notifier block
+ * registered, %-ENOMEM if the corresponding event entry could not be
+ * allocated, %-ENODEV if the device is marked as hot-removed. If this is the
+ * first time that a notifier block is registered for the specific associated
+ * event, returns the status of the event-enable EC-command.
+ */
+static inline int ssam_device_notifier_register(struct ssam_device *sdev,
+ struct ssam_event_notifier *n)
+{
+ /*
+ * Note that this check does not provide any guarantees whatsoever as
+ * hot-removal could happen at any point and we can't protect against
+ * it. Nevertheless, if we can detect hot-removal, bail early to avoid
+ * communication timeouts.
+ */
+ if (ssam_device_is_hot_removed(sdev))
+ return -ENODEV;
+
+ return ssam_notifier_register(sdev->ctrl, n);
+}
+
+/**
+ * ssam_device_notifier_unregister() - Unregister an event notifier for the
+ * specified client device.
+ * @sdev: The device the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero, the event will be disabled.
+ *
+ * In case the device has been marked as hot-removed, the event will not be
+ * disabled on the EC, as in those cases any attempt at doing so may time out.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+static inline int ssam_device_notifier_unregister(struct ssam_device *sdev,
+ struct ssam_event_notifier *n)
+{
+ return __ssam_notifier_unregister(sdev->ctrl, n,
+ !ssam_device_is_hot_removed(sdev));
+}
+
+#endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
new file mode 100644
index 000000000000..d8dbef6b7fc2
--- /dev/null
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -0,0 +1,691 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface Serial Hub (SSH) protocol and communication interface.
+ *
+ * Lower-level communication layers and SSH protocol definitions for the
+ * Surface System Aggregator Module (SSAM). Provides the interface for basic
+ * packet- and request-based communication with the SSAM EC via SSH.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
+#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
+
+#include <linux/crc-itu-t.h>
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+
+/* -- Data structures for SAM-over-SSH communication. ----------------------- */
+
+/**
+ * enum ssh_frame_type - Frame types for SSH frames.
+ *
+ * @SSH_FRAME_TYPE_DATA_SEQ:
+ * Indicates a data frame, followed by a payload with the length specified
+ * in the ``struct ssh_frame.len`` field. This frame is sequenced, meaning
+ * that an ACK is required.
+ *
+ * @SSH_FRAME_TYPE_DATA_NSQ:
+ * Same as %SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, meaning that the
+ * message does not have to be ACKed.
+ *
+ * @SSH_FRAME_TYPE_ACK:
+ * Indicates an ACK message.
+ *
+ * @SSH_FRAME_TYPE_NAK:
+ * Indicates an error response for previously sent frame. In general, this
+ * means that the frame and/or payload is malformed, e.g. a CRC is wrong.
+ * For command-type payloads, this can also mean that the command is
+ * invalid.
+ */
+enum ssh_frame_type {
+ SSH_FRAME_TYPE_DATA_SEQ = 0x80,
+ SSH_FRAME_TYPE_DATA_NSQ = 0x00,
+ SSH_FRAME_TYPE_ACK = 0x40,
+ SSH_FRAME_TYPE_NAK = 0x04,
+};
+
+/**
+ * struct ssh_frame - SSH communication frame.
+ * @type: The type of the frame. See &enum ssh_frame_type.
+ * @len: The length of the frame payload directly following the CRC for this
+ * frame. Does not include the final CRC for that payload.
+ * @seq: The sequence number for this message/exchange.
+ */
+struct ssh_frame {
+ u8 type;
+ __le16 len;
+ u8 seq;
+} __packed;
+
+static_assert(sizeof(struct ssh_frame) == 4);
+
+/*
+ * SSH_FRAME_MAX_PAYLOAD_SIZE - Maximum SSH frame payload length in bytes.
+ *
+ * This is the physical maximum length of the protocol. Implementations may
+ * set a more constrained limit.
+ */
+#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX
+
+/**
+ * enum ssh_payload_type - Type indicator for the SSH payload.
+ * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command
+ * payload.
+ */
+enum ssh_payload_type {
+ SSH_PLD_TYPE_CMD = 0x80,
+};
+
+/**
+ * struct ssh_command - Payload of a command-type frame.
+ * @type: The type of the payload. See &enum ssh_payload_type. Should be
+ * SSH_PLD_TYPE_CMD for this struct.
+ * @tc: Command target category.
+ * @tid: Target ID. Indicates the target of the message.
+ * @sid: Source ID. Indicates the source of the message.
+ * @iid: Instance ID.
+ * @rqid: Request ID. Used to match requests with responses and differentiate
+ * between responses and events.
+ * @cid: Command ID.
+ */
+struct ssh_command {
+ u8 type;
+ u8 tc;
+ u8 tid;
+ u8 sid;
+ u8 iid;
+ __le16 rqid;
+ u8 cid;
+} __packed;
+
+static_assert(sizeof(struct ssh_command) == 8);
+
+/*
+ * SSH_COMMAND_MAX_PAYLOAD_SIZE - Maximum SSH command payload length in bytes.
+ *
+ * This is the physical maximum length of the protocol. Implementations may
+ * set a more constrained limit.
+ */
+#define SSH_COMMAND_MAX_PAYLOAD_SIZE \
+ (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command))
+
+/*
+ * SSH_MSG_LEN_BASE - Base-length of a SSH message.
+ *
+ * This is the minimum number of bytes required to form a message. The actual
+ * message length is SSH_MSG_LEN_BASE plus the length of the frame payload.
+ */
+#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16))
+
+/*
+ * SSH_MSG_LEN_CTRL - Length of a SSH control message.
+ *
+ * This is the length of a SSH control message, which is equal to a SSH
+ * message without any payload.
+ */
+#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE
+
+/**
+ * SSH_MESSAGE_LENGTH() - Compute length of SSH message.
+ * @payload_size: Length of the payload inside the SSH frame.
+ *
+ * Return: Returns the length of a SSH message with payload of specified size.
+ */
+#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + (payload_size))
+
+/**
+ * SSH_COMMAND_MESSAGE_LENGTH() - Compute length of SSH command message.
+ * @payload_size: Length of the command payload.
+ *
+ * Return: Returns the length of a SSH command message with command payload of
+ * specified size.
+ */
+#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \
+ SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + (payload_size))
+
+/**
+ * SSH_MSGOFFSET_FRAME() - Compute offset in SSH message to specified field in
+ * frame.
+ * @field: The field for which the offset should be computed.
+ *
+ * Return: Returns the offset of the specified &struct ssh_frame field in the
+ * raw SSH message data as. Takes SYN bytes (u16) preceding the frame into
+ * account.
+ */
+#define SSH_MSGOFFSET_FRAME(field) \
+ (sizeof(u16) + offsetof(struct ssh_frame, field))
+
+/**
+ * SSH_MSGOFFSET_COMMAND() - Compute offset in SSH message to specified field
+ * in command.
+ * @field: The field for which the offset should be computed.
+ *
+ * Return: Returns the offset of the specified &struct ssh_command field in
+ * the raw SSH message data. Takes SYN bytes (u16) preceding the frame and the
+ * frame CRC (u16) between frame and command into account.
+ */
+#define SSH_MSGOFFSET_COMMAND(field) \
+ (2ull * sizeof(u16) + sizeof(struct ssh_frame) \
+ + offsetof(struct ssh_command, field))
+
+/*
+ * SSH_MSG_SYN - SSH message synchronization (SYN) bytes as u16.
+ */
+#define SSH_MSG_SYN ((u16)0x55aa)
+
+/**
+ * ssh_crc() - Compute CRC for SSH messages.
+ * @buf: The pointer pointing to the data for which the CRC should be computed.
+ * @len: The length of the data for which the CRC should be computed.
+ *
+ * Return: Returns the CRC computed on the provided data, as used for SSH
+ * messages.
+ */
+static inline u16 ssh_crc(const u8 *buf, size_t len)
+{
+ return crc_itu_t(0xffff, buf, len);
+}
+
+/*
+ * SSH_NUM_EVENTS - The number of reserved event IDs.
+ *
+ * The number of reserved event IDs, used for registering an SSH event
+ * handler. Valid event IDs are numbers below or equal to this value, with
+ * exception of zero, which is not an event ID. Thus, this is also the
+ * absolute maximum number of event handlers that can be registered.
+ */
+#define SSH_NUM_EVENTS 38
+
+/*
+ * SSH_NUM_TARGETS - The number of communication targets used in the protocol.
+ */
+#define SSH_NUM_TARGETS 2
+
+/**
+ * ssh_rqid_next_valid() - Return the next valid request ID.
+ * @rqid: The current request ID.
+ *
+ * Return: Returns the next valid request ID, following the current request ID
+ * provided to this function. This function skips any request IDs reserved for
+ * events.
+ */
+static inline u16 ssh_rqid_next_valid(u16 rqid)
+{
+ return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u;
+}
+
+/**
+ * ssh_rqid_to_event() - Convert request ID to its corresponding event ID.
+ * @rqid: The request ID to convert.
+ */
+static inline u16 ssh_rqid_to_event(u16 rqid)
+{
+ return rqid - 1u;
+}
+
+/**
+ * ssh_rqid_is_event() - Check if given request ID is a valid event ID.
+ * @rqid: The request ID to check.
+ */
+static inline bool ssh_rqid_is_event(u16 rqid)
+{
+ return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS;
+}
+
+/**
+ * ssh_tc_to_rqid() - Convert target category to its corresponding request ID.
+ * @tc: The target category to convert.
+ */
+static inline u16 ssh_tc_to_rqid(u8 tc)
+{
+ return tc;
+}
+
+/**
+ * ssh_tid_to_index() - Convert target ID to its corresponding target index.
+ * @tid: The target ID to convert.
+ */
+static inline u8 ssh_tid_to_index(u8 tid)
+{
+ return tid - 1u;
+}
+
+/**
+ * ssh_tid_is_valid() - Check if target ID is valid/supported.
+ * @tid: The target ID to check.
+ */
+static inline bool ssh_tid_is_valid(u8 tid)
+{
+ return ssh_tid_to_index(tid) < SSH_NUM_TARGETS;
+}
+
+/**
+ * struct ssam_span - Reference to a buffer region.
+ * @ptr: Pointer to the buffer region.
+ * @len: Length of the buffer region.
+ *
+ * A reference to a (non-owned) buffer segment, consisting of pointer and
+ * length. Use of this struct indicates non-owned data, i.e. data of which the
+ * life-time is managed (i.e. it is allocated/freed) via another pointer.
+ */
+struct ssam_span {
+ u8 *ptr;
+ size_t len;
+};
+
+/**
+ * enum ssam_ssh_tid - Target/source IDs for Serial Hub messages.
+ * @SSAM_SSH_TID_HOST: We as the kernel Serial Hub driver.
+ * @SSAM_SSH_TID_SAM: The Surface Aggregator EC.
+ * @SSAM_SSH_TID_KIP: Keyboard and perihperal controller.
+ * @SSAM_SSH_TID_DEBUG: Debug connector.
+ * @SSAM_SSH_TID_SURFLINK: SurfLink connector.
+ */
+enum ssam_ssh_tid {
+ SSAM_SSH_TID_HOST = 0x00,
+ SSAM_SSH_TID_SAM = 0x01,
+ SSAM_SSH_TID_KIP = 0x02,
+ SSAM_SSH_TID_DEBUG = 0x03,
+ SSAM_SSH_TID_SURFLINK = 0x04,
+};
+
+/*
+ * Known SSH/EC target categories.
+ *
+ * List of currently known target category values; "Known" as in we know they
+ * exist and are valid on at least some device/model. Detailed functionality
+ * or the full category name is only known for some of these categories and
+ * is detailed in the respective comment below.
+ *
+ * These values and abbreviations have been extracted from strings inside the
+ * Windows driver.
+ */
+enum ssam_ssh_tc {
+ /* Category 0x00 is invalid for EC use. */
+ SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */
+ SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */
+ SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */
+ SSAM_SSH_TC_PMC = 0x04,
+ SSAM_SSH_TC_FAN = 0x05,
+ SSAM_SSH_TC_PoM = 0x06,
+ SSAM_SSH_TC_DBG = 0x07,
+ SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */
+ SSAM_SSH_TC_FWU = 0x09,
+ SSAM_SSH_TC_UNI = 0x0a,
+ SSAM_SSH_TC_LPC = 0x0b,
+ SSAM_SSH_TC_TCL = 0x0c,
+ SSAM_SSH_TC_SFL = 0x0d,
+ SSAM_SSH_TC_KIP = 0x0e, /* Manages detachable peripherals (Pro X/8 keyboard cover) */
+ SSAM_SSH_TC_EXT = 0x0f,
+ SSAM_SSH_TC_BLD = 0x10,
+ SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */
+ SSAM_SSH_TC_SEN = 0x12,
+ SSAM_SSH_TC_SRQ = 0x13,
+ SSAM_SSH_TC_MCU = 0x14,
+ SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */
+ SSAM_SSH_TC_TCH = 0x16,
+ SSAM_SSH_TC_BKL = 0x17,
+ SSAM_SSH_TC_TAM = 0x18,
+ SSAM_SSH_TC_ACC0 = 0x19,
+ SSAM_SSH_TC_UFI = 0x1a,
+ SSAM_SSH_TC_USC = 0x1b,
+ SSAM_SSH_TC_PEN = 0x1c,
+ SSAM_SSH_TC_VID = 0x1d,
+ SSAM_SSH_TC_AUD = 0x1e,
+ SSAM_SSH_TC_SMC = 0x1f,
+ SSAM_SSH_TC_KPD = 0x20,
+ SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */
+ SSAM_SSH_TC_SPT = 0x22,
+ SSAM_SSH_TC_SYS = 0x23,
+ SSAM_SSH_TC_ACC1 = 0x24,
+ SSAM_SSH_TC_SHB = 0x25,
+ SSAM_SSH_TC_POS = 0x26, /* For obtaining Laptop Studio screen position. */
+};
+
+
+/* -- Packet transport layer (ptl). ----------------------------------------- */
+
+/**
+ * enum ssh_packet_base_priority - Base priorities for &struct ssh_packet.
+ * @SSH_PACKET_PRIORITY_FLUSH: Base priority for flush packets.
+ * @SSH_PACKET_PRIORITY_DATA: Base priority for normal data packets.
+ * @SSH_PACKET_PRIORITY_NAK: Base priority for NAK packets.
+ * @SSH_PACKET_PRIORITY_ACK: Base priority for ACK packets.
+ */
+enum ssh_packet_base_priority {
+ SSH_PACKET_PRIORITY_FLUSH = 0, /* same as DATA to sequence flush */
+ SSH_PACKET_PRIORITY_DATA = 0,
+ SSH_PACKET_PRIORITY_NAK = 1,
+ SSH_PACKET_PRIORITY_ACK = 2,
+};
+
+/*
+ * Same as SSH_PACKET_PRIORITY() below, only with actual values.
+ */
+#define __SSH_PACKET_PRIORITY(base, try) \
+ (((base) << 4) | ((try) & 0x0f))
+
+/**
+ * SSH_PACKET_PRIORITY() - Compute packet priority from base priority and
+ * number of tries.
+ * @base: The base priority as suffix of &enum ssh_packet_base_priority, e.g.
+ * ``FLUSH``, ``DATA``, ``ACK``, or ``NAK``.
+ * @try: The number of tries (must be less than 16).
+ *
+ * Compute the combined packet priority. The combined priority is dominated by
+ * the base priority, whereas the number of (re-)tries decides the precedence
+ * of packets with the same base priority, giving higher priority to packets
+ * that already have more tries.
+ *
+ * Return: Returns the computed priority as value fitting inside a &u8. A
+ * higher number means a higher priority.
+ */
+#define SSH_PACKET_PRIORITY(base, try) \
+ __SSH_PACKET_PRIORITY(SSH_PACKET_PRIORITY_##base, (try))
+
+/**
+ * ssh_packet_priority_get_try() - Get number of tries from packet priority.
+ * @priority: The packet priority.
+ *
+ * Return: Returns the number of tries encoded in the specified packet
+ * priority.
+ */
+static inline u8 ssh_packet_priority_get_try(u8 priority)
+{
+ return priority & 0x0f;
+}
+
+/**
+ * ssh_packet_priority_get_base - Get base priority from packet priority.
+ * @priority: The packet priority.
+ *
+ * Return: Returns the base priority encoded in the given packet priority.
+ */
+static inline u8 ssh_packet_priority_get_base(u8 priority)
+{
+ return (priority & 0xf0) >> 4;
+}
+
+enum ssh_packet_flags {
+ /* state flags */
+ SSH_PACKET_SF_LOCKED_BIT,
+ SSH_PACKET_SF_QUEUED_BIT,
+ SSH_PACKET_SF_PENDING_BIT,
+ SSH_PACKET_SF_TRANSMITTING_BIT,
+ SSH_PACKET_SF_TRANSMITTED_BIT,
+ SSH_PACKET_SF_ACKED_BIT,
+ SSH_PACKET_SF_CANCELED_BIT,
+ SSH_PACKET_SF_COMPLETED_BIT,
+
+ /* type flags */
+ SSH_PACKET_TY_FLUSH_BIT,
+ SSH_PACKET_TY_SEQUENCED_BIT,
+ SSH_PACKET_TY_BLOCKING_BIT,
+
+ /* mask for state flags */
+ SSH_PACKET_FLAGS_SF_MASK =
+ BIT(SSH_PACKET_SF_LOCKED_BIT)
+ | BIT(SSH_PACKET_SF_QUEUED_BIT)
+ | BIT(SSH_PACKET_SF_PENDING_BIT)
+ | BIT(SSH_PACKET_SF_TRANSMITTING_BIT)
+ | BIT(SSH_PACKET_SF_TRANSMITTED_BIT)
+ | BIT(SSH_PACKET_SF_ACKED_BIT)
+ | BIT(SSH_PACKET_SF_CANCELED_BIT)
+ | BIT(SSH_PACKET_SF_COMPLETED_BIT),
+
+ /* mask for type flags */
+ SSH_PACKET_FLAGS_TY_MASK =
+ BIT(SSH_PACKET_TY_FLUSH_BIT)
+ | BIT(SSH_PACKET_TY_SEQUENCED_BIT)
+ | BIT(SSH_PACKET_TY_BLOCKING_BIT),
+};
+
+struct ssh_ptl;
+struct ssh_packet;
+
+/**
+ * struct ssh_packet_ops - Callback operations for a SSH packet.
+ * @release: Function called when the packet reference count reaches zero.
+ * This callback must be relied upon to ensure that the packet has
+ * left the transport system(s).
+ * @complete: Function called when the packet is completed, either with
+ * success or failure. In case of failure, the reason for the
+ * failure is indicated by the value of the provided status code
+ * argument. This value will be zero in case of success. Note that
+ * a call to this callback does not guarantee that the packet is
+ * not in use by the transport system any more.
+ */
+struct ssh_packet_ops {
+ void (*release)(struct ssh_packet *p);
+ void (*complete)(struct ssh_packet *p, int status);
+};
+
+/**
+ * struct ssh_packet - SSH transport packet.
+ * @ptl: Pointer to the packet transport layer. May be %NULL if the packet
+ * (or enclosing request) has not been submitted yet.
+ * @refcnt: Reference count of the packet.
+ * @priority: Priority of the packet. Must be computed via
+ * SSH_PACKET_PRIORITY(). Must only be accessed while holding the
+ * queue lock after first submission.
+ * @data: Raw message data.
+ * @data.len: Length of the raw message data.
+ * @data.ptr: Pointer to the raw message data buffer.
+ * @state: State and type flags describing current packet state (dynamic)
+ * and type (static). See &enum ssh_packet_flags for possible
+ * options.
+ * @timestamp: Timestamp specifying when the latest transmission of a
+ * currently pending packet has been started. May be %KTIME_MAX
+ * before or in-between transmission attempts. Used for the packet
+ * timeout implementation. Must only be accessed while holding the
+ * pending lock after first submission.
+ * @queue_node: The list node for the packet queue.
+ * @pending_node: The list node for the set of pending packets.
+ * @ops: Packet operations.
+ */
+struct ssh_packet {
+ struct ssh_ptl *ptl;
+ struct kref refcnt;
+
+ u8 priority;
+
+ struct {
+ size_t len;
+ u8 *ptr;
+ } data;
+
+ unsigned long state;
+ ktime_t timestamp;
+
+ struct list_head queue_node;
+ struct list_head pending_node;
+
+ const struct ssh_packet_ops *ops;
+};
+
+struct ssh_packet *ssh_packet_get(struct ssh_packet *p);
+void ssh_packet_put(struct ssh_packet *p);
+
+/**
+ * ssh_packet_set_data() - Set raw message data of packet.
+ * @p: The packet for which the message data should be set.
+ * @ptr: Pointer to the memory holding the message data.
+ * @len: Length of the message data.
+ *
+ * Sets the raw message data buffer of the packet to the provided memory. The
+ * memory is not copied. Instead, the caller is responsible for management
+ * (i.e. allocation and deallocation) of the memory. The caller must ensure
+ * that the provided memory is valid and contains a valid SSH message,
+ * starting from the time of submission of the packet until the ``release``
+ * callback has been called. During this time, the memory may not be altered
+ * in any way.
+ */
+static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len)
+{
+ p->data.ptr = ptr;
+ p->data.len = len;
+}
+
+
+/* -- Request transport layer (rtl). ---------------------------------------- */
+
+enum ssh_request_flags {
+ /* state flags */
+ SSH_REQUEST_SF_LOCKED_BIT,
+ SSH_REQUEST_SF_QUEUED_BIT,
+ SSH_REQUEST_SF_PENDING_BIT,
+ SSH_REQUEST_SF_TRANSMITTING_BIT,
+ SSH_REQUEST_SF_TRANSMITTED_BIT,
+ SSH_REQUEST_SF_RSPRCVD_BIT,
+ SSH_REQUEST_SF_CANCELED_BIT,
+ SSH_REQUEST_SF_COMPLETED_BIT,
+
+ /* type flags */
+ SSH_REQUEST_TY_FLUSH_BIT,
+ SSH_REQUEST_TY_HAS_RESPONSE_BIT,
+
+ /* mask for state flags */
+ SSH_REQUEST_FLAGS_SF_MASK =
+ BIT(SSH_REQUEST_SF_LOCKED_BIT)
+ | BIT(SSH_REQUEST_SF_QUEUED_BIT)
+ | BIT(SSH_REQUEST_SF_PENDING_BIT)
+ | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT)
+ | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT)
+ | BIT(SSH_REQUEST_SF_RSPRCVD_BIT)
+ | BIT(SSH_REQUEST_SF_CANCELED_BIT)
+ | BIT(SSH_REQUEST_SF_COMPLETED_BIT),
+
+ /* mask for type flags */
+ SSH_REQUEST_FLAGS_TY_MASK =
+ BIT(SSH_REQUEST_TY_FLUSH_BIT)
+ | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),
+};
+
+struct ssh_rtl;
+struct ssh_request;
+
+/**
+ * struct ssh_request_ops - Callback operations for a SSH request.
+ * @release: Function called when the request's reference count reaches zero.
+ * This callback must be relied upon to ensure that the request has
+ * left the transport systems (both, packet an request systems).
+ * @complete: Function called when the request is completed, either with
+ * success or failure. The command data for the request response
+ * is provided via the &struct ssh_command parameter (``cmd``),
+ * the command payload of the request response via the &struct
+ * ssh_span parameter (``data``).
+ *
+ * If the request does not have any response or has not been
+ * completed with success, both ``cmd`` and ``data`` parameters will
+ * be NULL. If the request response does not have any command
+ * payload, the ``data`` span will be an empty (zero-length) span.
+ *
+ * In case of failure, the reason for the failure is indicated by
+ * the value of the provided status code argument (``status``). This
+ * value will be zero in case of success and a regular errno
+ * otherwise.
+ *
+ * Note that a call to this callback does not guarantee that the
+ * request is not in use by the transport systems any more.
+ */
+struct ssh_request_ops {
+ void (*release)(struct ssh_request *rqst);
+ void (*complete)(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data, int status);
+};
+
+/**
+ * struct ssh_request - SSH transport request.
+ * @packet: The underlying SSH transport packet.
+ * @node: List node for the request queue and pending set.
+ * @state: State and type flags describing current request state (dynamic)
+ * and type (static). See &enum ssh_request_flags for possible
+ * options.
+ * @timestamp: Timestamp specifying when we start waiting on the response of
+ * the request. This is set once the underlying packet has been
+ * completed and may be %KTIME_MAX before that, or when the request
+ * does not expect a response. Used for the request timeout
+ * implementation.
+ * @ops: Request Operations.
+ */
+struct ssh_request {
+ struct ssh_packet packet;
+ struct list_head node;
+
+ unsigned long state;
+ ktime_t timestamp;
+
+ const struct ssh_request_ops *ops;
+};
+
+/**
+ * to_ssh_request() - Cast a SSH packet to its enclosing SSH request.
+ * @p: The packet to cast.
+ *
+ * Casts the given &struct ssh_packet to its enclosing &struct ssh_request.
+ * The caller is responsible for making sure that the packet is actually
+ * wrapped in a &struct ssh_request.
+ *
+ * Return: Returns the &struct ssh_request wrapping the provided packet.
+ */
+static inline struct ssh_request *to_ssh_request(struct ssh_packet *p)
+{
+ return container_of(p, struct ssh_request, packet);
+}
+
+/**
+ * ssh_request_get() - Increment reference count of request.
+ * @r: The request to increment the reference count of.
+ *
+ * Increments the reference count of the given request by incrementing the
+ * reference count of the underlying &struct ssh_packet, enclosed in it.
+ *
+ * See also ssh_request_put(), ssh_packet_get().
+ *
+ * Return: Returns the request provided as input.
+ */
+static inline struct ssh_request *ssh_request_get(struct ssh_request *r)
+{
+ return r ? to_ssh_request(ssh_packet_get(&r->packet)) : NULL;
+}
+
+/**
+ * ssh_request_put() - Decrement reference count of request.
+ * @r: The request to decrement the reference count of.
+ *
+ * Decrements the reference count of the given request by decrementing the
+ * reference count of the underlying &struct ssh_packet, enclosed in it. If
+ * the reference count reaches zero, the ``release`` callback specified in the
+ * request's &struct ssh_request_ops, i.e. ``r->ops->release``, will be
+ * called.
+ *
+ * See also ssh_request_get(), ssh_packet_put().
+ */
+static inline void ssh_request_put(struct ssh_request *r)
+{
+ if (r)
+ ssh_packet_put(&r->packet);
+}
+
+/**
+ * ssh_request_set_data() - Set raw message data of request.
+ * @r: The request for which the message data should be set.
+ * @ptr: Pointer to the memory holding the message data.
+ * @len: Length of the message data.
+ *
+ * Sets the raw message data buffer of the underlying packet to the specified
+ * buffer. Does not copy the actual message data, just sets the buffer pointer
+ * and length. Refer to ssh_packet_set_data() for more details.
+ */
+static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len)
+{
+ ssh_packet_set_data(&r->packet, ptr, len);
+}
+
+#endif /* _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index cb9afad82a90..da6ebca3ff77 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -40,62 +40,6 @@ typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
-enum suspend_stat_step {
- SUSPEND_FREEZE = 1,
- SUSPEND_PREPARE,
- SUSPEND_SUSPEND,
- SUSPEND_SUSPEND_LATE,
- SUSPEND_SUSPEND_NOIRQ,
- SUSPEND_RESUME_NOIRQ,
- SUSPEND_RESUME_EARLY,
- SUSPEND_RESUME
-};
-
-struct suspend_stats {
- int success;
- int fail;
- int failed_freeze;
- int failed_prepare;
- int failed_suspend;
- int failed_suspend_late;
- int failed_suspend_noirq;
- int failed_resume;
- int failed_resume_early;
- int failed_resume_noirq;
-#define REC_FAILED_NUM 2
- int last_failed_dev;
- char failed_devs[REC_FAILED_NUM][40];
- int last_failed_errno;
- int errno[REC_FAILED_NUM];
- int last_failed_step;
- enum suspend_stat_step failed_steps[REC_FAILED_NUM];
-};
-
-extern struct suspend_stats suspend_stats;
-
-static inline void dpm_save_failed_dev(const char *name)
-{
- strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
- name,
- sizeof(suspend_stats.failed_devs[0]));
- suspend_stats.last_failed_dev++;
- suspend_stats.last_failed_dev %= REC_FAILED_NUM;
-}
-
-static inline void dpm_save_failed_errno(int err)
-{
- suspend_stats.errno[suspend_stats.last_failed_errno] = err;
- suspend_stats.last_failed_errno++;
- suspend_stats.last_failed_errno %= REC_FAILED_NUM;
-}
-
-static inline void dpm_save_failed_step(enum suspend_stat_step step)
-{
- suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
- suspend_stats.last_failed_step++;
- suspend_stats.last_failed_step %= REC_FAILED_NUM;
-}
-
/**
* struct platform_suspend_ops - Callbacks for managing platform dependent
* system sleep states.
@@ -191,6 +135,7 @@ struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
int (*prepare_late)(void);
+ void (*check)(void);
bool (*wake)(void);
void (*restore_early)(void);
void (*restore)(void);
@@ -198,6 +143,7 @@ struct platform_s2idle_ops {
};
#ifdef CONFIG_SUSPEND
+extern suspend_state_t pm_suspend_target_state;
extern suspend_state_t mem_sleep_current;
extern suspend_state_t mem_sleep_default;
@@ -333,6 +279,8 @@ extern bool sync_on_suspend_enabled;
#else /* !CONFIG_SUSPEND */
#define suspend_valid_only_mem NULL
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+
static inline void pm_suspend_clear_flags(void) {}
static inline void pm_set_suspend_via_firmware(void) {}
static inline void pm_set_resume_via_firmware(void) {}
@@ -360,9 +308,6 @@ struct pbe {
struct pbe *next;
};
-/* mm/page_alloc.c */
-extern void mark_free_pages(struct zone *zone);
-
/**
* struct platform_hibernation_ops - hibernation platform support
*
@@ -430,15 +375,7 @@ struct platform_hibernation_ops {
#ifdef CONFIG_HIBERNATION
/* kernel/power/snapshot.c */
-extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
-static inline void __init register_nosave_region(unsigned long b, unsigned long e)
-{
- __register_nosave_region(b, e, 0);
-}
-static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
-{
- __register_nosave_region(b, e, 1);
-}
+extern void register_nosave_region(unsigned long b, unsigned long e);
extern int swsusp_page_is_forbidden(struct page *);
extern void swsusp_set_page_free(struct page *);
extern void swsusp_unset_page_free(struct page *);
@@ -446,6 +383,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
extern asmlinkage int swsusp_arch_suspend(void);
extern asmlinkage int swsusp_arch_resume(void);
+extern u32 swsusp_hardware_signature;
extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
extern int hibernate(void);
extern bool system_entering_hibernation(void);
@@ -455,9 +393,12 @@ extern struct pbe *restore_pblist;
int pfn_is_nosave(unsigned long pfn);
int hibernate_quiet_exec(int (*func)(void *data), void *data);
+int hibernate_resume_nonboot_cpu_disable(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+
#else /* CONFIG_HIBERNATION */
static inline void register_nosave_region(unsigned long b, unsigned long e) {}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
@@ -472,10 +413,12 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
}
#endif /* CONFIG_HIBERNATION */
+int arch_resume_nosmt(void);
+
#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
-int is_hibernate_resume_dev(const struct inode *);
+int is_hibernate_resume_dev(dev_t dev);
#else
-static inline int is_hibernate_resume_dev(const struct inode *i) { return 0; }
+static inline int is_hibernate_resume_dev(dev_t dev) { return 0; }
#endif
/* Hibernation and suspend events */
@@ -496,6 +439,8 @@ void restore_processor_state(void);
extern int register_pm_notifier(struct notifier_block *nb);
extern int unregister_pm_notifier(struct notifier_block *nb);
extern void ksys_sync_helper(void);
+extern void pm_report_hw_sleep_time(u64 t);
+extern void pm_report_max_hw_sleep(u64 t);
#define pm_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
@@ -505,21 +450,25 @@ extern void ksys_sync_helper(void);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
-extern unsigned int pm_wakeup_irq;
-extern suspend_state_t pm_suspend_target_state;
+
+static inline bool pm_suspended_storage(void)
+{
+ return !gfp_has_io_fs(gfp_allowed_mask);
+}
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(unsigned int irq_number);
extern void pm_system_irq_wakeup(unsigned int irq_number);
+extern unsigned int pm_wakeup_irq(void);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
extern void pm_print_active_wakeup_sources(void);
-extern void lock_system_sleep(void);
-extern void unlock_system_sleep(void);
+extern unsigned int lock_system_sleep(void);
+extern void unlock_system_sleep(unsigned int);
#else /* !CONFIG_PM_SLEEP */
@@ -533,39 +482,79 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
return 0;
}
+static inline void pm_report_hw_sleep_time(u64 t) {};
+static inline void pm_report_max_hw_sleep(u64 t) {};
+
static inline void ksys_sync_helper(void) {}
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
+static inline bool pm_suspended_storage(void) { return false; }
static inline bool pm_wakeup_pending(void) { return false; }
static inline void pm_system_wakeup(void) {}
static inline void pm_wakeup_clear(bool reset) {}
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
-static inline void lock_system_sleep(void) {}
-static inline void unlock_system_sleep(void) {}
+static inline unsigned int lock_system_sleep(void) { return 0; }
+static inline void unlock_system_sleep(unsigned int flags) {}
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
extern bool pm_print_times_enabled;
extern bool pm_debug_messages_on;
-extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
+extern bool pm_debug_messages_should_print(void);
+static inline int pm_dyn_debug_messages_on(void)
+{
+#ifdef CONFIG_DYNAMIC_DEBUG
+ return 1;
+#else
+ return 0;
+#endif
+}
+#ifndef pr_fmt
+#define pr_fmt(fmt) "PM: " fmt
+#endif
+#define __pm_pr_dbg(fmt, ...) \
+ do { \
+ if (pm_debug_messages_should_print()) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ else if (pm_dyn_debug_messages_on()) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+#define __pm_deferred_pr_dbg(fmt, ...) \
+ do { \
+ if (pm_debug_messages_should_print()) \
+ printk_deferred(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ } while (0)
#else
#define pm_print_times_enabled (false)
#define pm_debug_messages_on (false)
#include <linux/printk.h>
-#define __pm_pr_dbg(defer, fmt, ...) \
- no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#define __pm_pr_dbg(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#define __pm_deferred_pr_dbg(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
+/**
+ * pm_pr_dbg - print pm sleep debug messages
+ *
+ * If pm_debug_messages_on is enabled and the system is entering/leaving
+ * suspend, print message.
+ * If pm_debug_messages_on is disabled and CONFIG_DYNAMIC_DEBUG is enabled,
+ * print message only from instances explicitly enabled on dynamic debug's
+ * control.
+ * If pm_debug_messages_on is disabled and CONFIG_DYNAMIC_DEBUG is disabled,
+ * don't print message.
+ */
#define pm_pr_dbg(fmt, ...) \
- __pm_pr_dbg(false, fmt, ##__VA_ARGS__)
+ __pm_pr_dbg(fmt, ##__VA_ARGS__)
#define pm_deferred_pr_dbg(fmt, ...) \
- __pm_pr_dbg(true, fmt, ##__VA_ARGS__)
+ __pm_deferred_pr_dbg(fmt, ##__VA_ARGS__)
#ifdef CONFIG_PM_AUTOSLEEP
@@ -578,4 +567,19 @@ static inline void queue_up_suspend_work(void) {}
#endif /* !CONFIG_PM_AUTOSLEEP */
+enum suspend_stat_step {
+ SUSPEND_WORKING = 0,
+ SUSPEND_FREEZE,
+ SUSPEND_PREPARE,
+ SUSPEND_SUSPEND,
+ SUSPEND_SUSPEND_LATE,
+ SUSPEND_SUSPEND_NOIRQ,
+ SUSPEND_RESUME_NOIRQ,
+ SUSPEND_RESUME_EARLY,
+ SUSPEND_RESUME
+};
+
+void dpm_save_failed_dev(const char *name);
+void dpm_save_failed_step(enum suspend_stat_step step);
+
#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/swab.h b/include/linux/swab.h
index bcff5149861a..9b804dbb0d79 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -20,4 +20,29 @@
# define swab64s __swab64s
# define swahw32s __swahw32s
# define swahb32s __swahb32s
+
+static inline void swab16_array(u16 *buf, unsigned int words)
+{
+ while (words--) {
+ swab16s(buf);
+ buf++;
+ }
+}
+
+static inline void swab32_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ swab32s(buf);
+ buf++;
+ }
+}
+
+static inline void swab64_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ swab64s(buf);
+ buf++;
+ }
+}
+
#endif /* _LINUX_SWAB_H */
diff --git a/include/linux/swait.h b/include/linux/swait.h
index 6a8c22b8c2a5..d324419482a0 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -146,7 +146,7 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
-extern void swake_up_locked(struct swait_queue_head *q);
+extern void swake_up_locked(struct swait_queue_head *q, int wake_flags);
extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
diff --git a/include/linux/swait_api.h b/include/linux/swait_api.h
new file mode 100644
index 000000000000..1eeaaaaa5ea7
--- /dev/null
+++ b/include/linux/swait_api.h
@@ -0,0 +1 @@
+#include <linux/swait.h>
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 661046994db4..f53d608daa01 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -10,8 +10,10 @@
#include <linux/sched.h>
#include <linux/node.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/atomic.h>
#include <linux/page-flags.h>
+#include <uapi/linux/mempolicy.h>
#include <asm/page.h>
struct notifier_block;
@@ -54,29 +56,50 @@ static inline int current_is_kswapd(void)
*/
/*
+ * PTE markers are used to persist information onto PTEs that otherwise
+ * should be a none pte. As its name "PTE" hints, it should only be
+ * applied to the leaves of pgtables.
+ */
+#define SWP_PTE_MARKER_NUM 1
+#define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
+ SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
+
+/*
* Unaddressable device memory support. See include/linux/hmm.h and
- * Documentation/vm/hmm.rst. Short description is we need struct pages for
+ * Documentation/mm/hmm.rst. Short description is we need struct pages for
* device memory that is unaddressable (inaccessible) by CPU, so that we can
* migrate part of a process memory to device memory.
*
* When a page is migrated from CPU to device, we set the CPU page table entry
- * to a special SWP_DEVICE_* entry.
+ * to a special SWP_DEVICE_{READ|WRITE} entry.
+ *
+ * When a page is mapped by the device for exclusive access we set the CPU page
+ * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
*/
#ifdef CONFIG_DEVICE_PRIVATE
-#define SWP_DEVICE_NUM 2
+#define SWP_DEVICE_NUM 4
#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
+#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
+#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
#else
#define SWP_DEVICE_NUM 0
#endif
/*
- * NUMA node memory migration support
+ * Page migration support.
+ *
+ * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
+ * indicates that the referenced (part of) an anonymous page is exclusive to
+ * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
+ * (part of) an anonymous page that are mapped writable are exclusive to a
+ * single process.
*/
#ifdef CONFIG_MIGRATION
-#define SWP_MIGRATION_NUM 2
-#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
-#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#define SWP_MIGRATION_NUM 3
+#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
+#define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
#else
#define SWP_MIGRATION_NUM 0
#endif
@@ -93,7 +116,8 @@ static inline int current_is_kswapd(void)
#define MAX_SWAPFILES \
((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
- SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
+ SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
+ SWP_PTE_MARKER_NUM)
/*
* Magic header for a swap area. The first part of the union is
@@ -129,9 +153,28 @@ union swap_header {
* memory reclaim
*/
struct reclaim_state {
- unsigned long reclaimed_slab;
+ /* pages reclaimed outside of LRU-based reclaim */
+ unsigned long reclaimed;
+#ifdef CONFIG_LRU_GEN
+ /* per-thread mm walk data */
+ struct lru_gen_mm_walk *mm_walk;
+#endif
};
+/*
+ * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
+ * reclaim
+ * @pages: number of pages reclaimed
+ *
+ * If the current process is undergoing a reclaim operation, increment the
+ * number of reclaimed pages by @pages.
+ */
+static inline void mm_account_reclaimed_pages(unsigned long pages)
+{
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed += pages;
+}
+
#ifdef __KERNEL__
struct address_space;
@@ -141,8 +184,8 @@ struct zone;
/*
* A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
- * disk blocks. A list of swap extents maps the entire swapfile. (Where the
- * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
+ * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
+ * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
* from setup, they're handled identically.
*
* We always assume that blocks are of size PAGE_SIZE.
@@ -170,12 +213,11 @@ enum {
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
SWP_BLKDEV = (1 << 6), /* its a block device */
SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
- SWP_FS = (1 << 8), /* swap file goes through fs */
+ SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
- SWP_VALID = (1 << 13), /* swap is valid to be operated on? */
/* add others here before... */
SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
};
@@ -238,6 +280,7 @@ struct swap_cluster_list {
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
+ struct percpu_ref users; /* indicate and keep swap device valid. */
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
@@ -255,13 +298,11 @@ struct swap_info_struct {
unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
+ struct file *bdev_file; /* open handle of the bdev */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
-#ifdef CONFIG_FRONTSWAP
- unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
- atomic_t frontswap_pages; /* frontswap pages in-use counter */
-#endif
+ struct completion comp; /* seldom referenced */
spinlock_t lock; /*
* protect map scan related fields like
* swap_map, lowest_bit, highest_bit,
@@ -293,86 +334,78 @@ struct swap_info_struct {
*/
};
-#ifdef CONFIG_64BIT
-#define SWAP_RA_ORDER_CEILING 5
-#else
-/* Avoid stack overflow, because we need to save part of page table */
-#define SWAP_RA_ORDER_CEILING 3
-#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
-#endif
+static inline swp_entry_t page_swap_entry(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+ swp_entry_t entry = folio->swap;
-struct vma_swap_readahead {
- unsigned short win;
- unsigned short offset;
- unsigned short nr_pte;
-#ifdef CONFIG_64BIT
- pte_t *ptes;
-#else
- pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
-#endif
-};
+ entry.val += folio_page_idx(folio, page);
+ return entry;
+}
/* linux/mm/workingset.c */
+bool workingset_test_recent(void *shadow, bool file, bool *workingset);
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
-void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
-void workingset_refault(struct page *page, void *shadow);
-void workingset_activation(struct page *page);
-
-/* Only track the nodes of mappings with shadow entries */
-void workingset_update_node(struct xa_node *node);
-#define mapping_set_update(xas, mapping) do { \
- if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
- xas_set_update(xas, workingset_update_node); \
-} while (0)
+void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
+void workingset_refault(struct folio *folio, void *shadow);
+void workingset_activation(struct folio *folio);
/* linux/mm/page_alloc.c */
extern unsigned long totalreserve_pages;
-extern unsigned long nr_free_buffer_pages(void);
/* Definition of global_zone_page_state not available yet */
#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
/* linux/mm/swap.c */
-extern void lru_note_cost(struct lruvec *lruvec, bool file,
- unsigned int nr_pages);
-extern void lru_note_cost_page(struct page *);
-extern void lru_cache_add(struct page *);
-extern void lru_add_page_tail(struct page *page, struct page *page_tail,
- struct lruvec *lruvec, struct list_head *head);
-extern void activate_page(struct page *);
-extern void mark_page_accessed(struct page *);
+void lru_note_cost(struct lruvec *lruvec, bool file,
+ unsigned int nr_io, unsigned int nr_rotated);
+void lru_note_cost_refault(struct folio *);
+void folio_add_lru(struct folio *);
+void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
+void mark_page_accessed(struct page *);
+void folio_mark_accessed(struct folio *);
+
+extern atomic_t lru_disable_count;
+
+static inline bool lru_cache_disabled(void)
+{
+ return atomic_read(&lru_disable_count);
+}
+
+static inline void lru_cache_enable(void)
+{
+ atomic_dec(&lru_disable_count);
+}
+
+extern void lru_cache_disable(void);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void rotate_reclaimable_page(struct page *page);
-extern void deactivate_file_page(struct page *page);
-extern void deactivate_page(struct page *page);
-extern void mark_page_lazyfree(struct page *page);
+void folio_deactivate(struct folio *folio);
+void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);
-extern void lru_cache_add_inactive_or_unevictable(struct page *page,
- struct vm_area_struct *vma);
-
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
+
+#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
+#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
- bool may_swap);
+ unsigned int reclaim_options);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
-extern int remove_mapping(struct address_space *mapping, struct page *page);
+long remove_mapping(struct address_space *mapping, struct folio *folio);
-extern unsigned long reclaim_pages(struct list_head *page_list);
#ifdef CONFIG_NUMA
extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
@@ -381,63 +414,32 @@ extern int sysctl_min_slab_ratio;
#define node_reclaim_mode 0
#endif
-extern void check_move_unevictable_pages(struct pagevec *pvec);
-
-extern int kswapd_run(int nid);
-extern void kswapd_stop(int nid);
+static inline bool node_reclaim_enabled(void)
+{
+ /* Is any node_reclaim_mode bit set? */
+ return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
+}
-#ifdef CONFIG_SWAP
+void check_move_unevictable_folios(struct folio_batch *fbatch);
-#include <linux/blk_types.h> /* for bio_end_io_t */
+extern void __meminit kswapd_run(int nid);
+extern void __meminit kswapd_stop(int nid);
-/* linux/mm/page_io.c */
-extern int swap_readpage(struct page *page, bool do_poll);
-extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern void end_swap_bio_write(struct bio *bio);
-extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
- bio_end_io_t end_write_func);
-extern int swap_set_page_dirty(struct page *page);
+#ifdef CONFIG_SWAP
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block);
int generic_swapfile_activate(struct swap_info_struct *, struct file *,
sector_t *);
-/* linux/mm/swap_state.c */
-/* One swap address space for each 64M swap space */
-#define SWAP_ADDRESS_SPACE_SHIFT 14
-#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
-extern struct address_space *swapper_spaces[];
-#define swap_address_space(entry) \
- (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
- >> SWAP_ADDRESS_SPACE_SHIFT])
-extern unsigned long total_swapcache_pages(void);
-extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *page);
-extern void *get_shadow_from_swap_cache(swp_entry_t entry);
-extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
- gfp_t gfp, void **shadowp);
-extern void __delete_from_swap_cache(struct page *page,
- swp_entry_t entry, void *shadow);
-extern void delete_from_swap_cache(struct page *);
-extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end);
-extern void free_page_and_swap_cache(struct page *);
-extern void free_pages_and_swap_cache(struct page **, int);
-extern struct page *lookup_swap_cache(swp_entry_t entry,
- struct vm_area_struct *vma,
- unsigned long addr);
-extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr,
- bool do_poll);
-extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated);
-extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
-extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
+static inline unsigned long total_swapcache_pages(void)
+{
+ return global_node_page_state(NR_SWAPCACHE);
+}
+void free_swap_cache(struct folio *folio);
+void free_page_and_swap_cache(struct page *);
+void free_pages_and_swap_cache(struct encoded_page **, int);
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
@@ -456,8 +458,9 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-extern swp_entry_t get_swap_page(struct page *page);
-extern void put_swap_page(struct page *page, swp_entry_t entry);
+swp_entry_t folio_alloc_swap(struct folio *folio);
+bool folio_free_swap(struct folio *folio);
+void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
@@ -467,41 +470,40 @@ extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern int free_swap_and_cache(swp_entry_t);
-extern int swap_type_of(dev_t, sector_t, struct block_device **);
+int swap_type_of(dev_t device, sector_t offset);
+int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
-extern int page_swapcount(struct page *);
extern int __swap_count(swp_entry_t entry);
-extern int __swp_swapcount(swp_entry_t entry);
+extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
-extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
-extern bool reuse_swap_page(struct page *, int *);
-extern int try_to_free_swap(struct page *);
+struct swap_info_struct *swp_swap_info(swp_entry_t entry);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
+sector_t swap_folio_sector(struct folio *folio);
static inline void put_swap_device(struct swap_info_struct *si)
{
- rcu_read_unlock();
+ percpu_ref_put(&si->users);
}
#else /* CONFIG_SWAP */
-
-static inline int swap_readpage(struct page *page, bool do_poll)
+static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
{
- return 0;
+ return NULL;
}
-static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
{
return NULL;
}
-#define swap_address_space(entry) (NULL)
+static inline void put_swap_device(struct swap_info_struct *si)
+{
+}
+
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
@@ -516,13 +518,13 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr));
-static inline void show_swap_cache_info(void)
+/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
+#define free_swap_and_cache(e) is_pfn_swap_entry(e)
+
+static inline void free_swap_cache(struct folio *folio)
{
}
-#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
-#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
-
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
return 0;
@@ -537,79 +539,25 @@ static inline int swap_duplicate(swp_entry_t swp)
return 0;
}
-static inline void swap_free(swp_entry_t swp)
-{
-}
-
-static inline void put_swap_page(struct page *page, swp_entry_t swp)
-{
-}
-
-static inline struct page *swap_cluster_readahead(swp_entry_t entry,
- gfp_t gfp_mask, struct vm_fault *vmf)
-{
- return NULL;
-}
-
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
- struct vm_fault *vmf)
-{
- return NULL;
-}
-
-static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
-{
- return 0;
-}
-
-static inline struct page *lookup_swap_cache(swp_entry_t swp,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- return NULL;
-}
-
-static inline int add_to_swap(struct page *page)
+static inline int swapcache_prepare(swp_entry_t swp)
{
return 0;
}
-static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
-{
- return NULL;
-}
-
-static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
- gfp_t gfp_mask, void **shadowp)
-{
- return -1;
-}
-
-static inline void __delete_from_swap_cache(struct page *page,
- swp_entry_t entry, void *shadow)
-{
-}
-
-static inline void delete_from_swap_cache(struct page *page)
+static inline void swap_free(swp_entry_t swp)
{
}
-static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end)
+static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
{
}
-static inline int page_swapcount(struct page *page)
-{
- return 0;
-}
-
static inline int __swap_count(swp_entry_t entry)
{
return 0;
}
-static inline int __swp_swapcount(swp_entry_t entry)
+static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
{
return 0;
}
@@ -619,21 +567,24 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-#define reuse_swap_page(page, total_map_swapcount) \
- (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
-
-static inline int try_to_free_swap(struct page *page)
-{
- return 0;
-}
-
-static inline swp_entry_t get_swap_page(struct page *page)
+static inline swp_entry_t folio_alloc_swap(struct folio *folio)
{
swp_entry_t entry;
entry.val = 0;
return entry;
}
+static inline bool folio_free_swap(struct folio *folio)
+{
+ return false;
+}
+
+static inline int add_swap_extent(struct swap_info_struct *sis,
+ unsigned long start_page,
+ unsigned long nr_pages, sector_t start_block)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SWAP */
#ifdef CONFIG_THP_SWAP
@@ -650,41 +601,62 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
/* Cgroup2 doesn't have per-cgroup swappiness */
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
- return vm_swappiness;
+ return READ_ONCE(vm_swappiness);
/* root ? */
if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
- return vm_swappiness;
+ return READ_ONCE(vm_swappiness);
- return memcg->swappiness;
+ return READ_ONCE(memcg->swappiness);
}
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
- return vm_swappiness;
+ return READ_ONCE(vm_swappiness);
}
#endif
#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
+void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
+static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __folio_throttle_swaprate(folio, gfp);
+}
#else
-static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
+static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
{
}
#endif
-#ifdef CONFIG_MEMCG_SWAP
-extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
-extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
-extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
+void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
+int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
+ swp_entry_t entry)
+{
+ if (mem_cgroup_disabled())
+ return 0;
+ return __mem_cgroup_try_charge_swap(folio, entry);
+}
+
+extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
+static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge_swap(entry, nr_pages);
+}
+
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
-extern bool mem_cgroup_swap_full(struct page *page);
+extern bool mem_cgroup_swap_full(struct folio *folio);
#else
-static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
}
-static inline int mem_cgroup_try_charge_swap(struct page *page,
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
{
return 0;
@@ -700,7 +672,7 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return get_nr_swap_pages();
}
-static inline bool mem_cgroup_swap_full(struct page *page)
+static inline bool mem_cgroup_swap_full(struct folio *folio)
{
return vm_swap_full();
}
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index a12dd1c3966c..ae73a87775b3 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -4,7 +4,7 @@
#include <linux/swap.h>
-#ifdef CONFIG_MEMCG_SWAP
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
unsigned short old, unsigned short new);
@@ -40,6 +40,6 @@ static inline void swap_cgroup_swapoff(int type)
return;
}
-#endif /* CONFIG_MEMCG_SWAP */
+#endif
#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h
index e36b200c2a77..15adfb8c813a 100644
--- a/include/linux/swap_slots.h
+++ b/include/linux/swap_slots.h
@@ -23,8 +23,8 @@ struct swap_slots_cache {
void disable_swap_slots_cache_lock(void);
void reenable_swap_slots_cache_unlock(void);
-int enable_swap_slots_cache(void);
-int free_swap_slot(swp_entry_t entry);
+void enable_swap_slots_cache(void);
+void free_swap_slot(swp_entry_t entry);
extern bool swap_slot_cache_enabled;
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index e06febf62978..99e3ed469e88 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -2,15 +2,12 @@
#ifndef _LINUX_SWAPFILE_H
#define _LINUX_SWAPFILE_H
-/*
- * these were static in swapfile.c but frontswap.c needs them and we don't
- * want to expose them to the dozens of source files that include swap.h
- */
-extern spinlock_t swap_lock;
-extern struct plist_head swap_active_head;
-extern struct swap_info_struct *swap_info[];
-extern int try_to_unuse(unsigned int, bool, unsigned long);
extern unsigned long generic_max_swapfile_size(void);
-extern unsigned long max_swapfile_size(void);
+unsigned long arch_max_swapfile_size(void);
+
+/* Maximum swapfile size supported for the arch (not inclusive). */
+extern unsigned long swapfile_maximum_size;
+/* Whether swap migration entry supports storing A/D bits for the arch */
+extern bool swap_migration_ad_supported;
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index d9b7c9132c2f..48b700ba1d18 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -8,15 +8,19 @@
#ifdef CONFIG_MMU
+#ifdef CONFIG_SWAP
+#include <linux/swapfile.h>
+#endif /* CONFIG_SWAP */
+
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
* get good packing density in that tree, so the index should be dense in
* the low-order bits.
*
- * We arrange the `type' and `offset' fields so that `type' is at the seven
+ * We arrange the `type' and `offset' fields so that `type' is at the six
* high-order bits of the swp_entry_t and `offset' is right-aligned in the
* remaining bits. Although `type' itself needs only five bits, we allow for
- * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
+ * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
*
* swp_entry_t's are *never* stored anywhere in their arch-dependent format.
*/
@@ -24,6 +28,59 @@
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
/*
+ * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
+ * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
+ * can use the extra bits to store other information besides PFN.
+ */
+#ifdef MAX_PHYSMEM_BITS
+#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#else /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_BITS min_t(int, \
+ sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
+ SWP_TYPE_SHIFT)
+#endif /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
+
+/**
+ * Migration swap entry specific bitfield definitions. Layout:
+ *
+ * |----------+--------------------|
+ * | swp_type | swp_offset |
+ * |----------+--------+-+-+-------|
+ * | | resv |D|A| PFN |
+ * |----------+--------+-+-+-------|
+ *
+ * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
+ * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
+ *
+ * Note: A/D bits will be stored in migration entries iff there're enough
+ * free bits in arch specific swp offset. By default we'll ignore A/D bits
+ * when migrating a page. Please refer to migration_entry_supports_ad()
+ * for more information. If there're more bits besides PFN and A/D bits,
+ * they should be reserved and always be zeros.
+ */
+#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
+#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
+#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
+
+#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
+#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
+
+static inline bool is_pfn_swap_entry(swp_entry_t entry);
+
+/* Clear all flags but only keep swp_entry_t related information */
+static inline pte_t pte_swp_clear_flags(pte_t pte)
+{
+ if (pte_swp_exclusive(pte))
+ pte = pte_swp_clear_exclusive(pte);
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
+ if (pte_swp_uffd_wp(pte))
+ pte = pte_swp_clear_uffd_wp(pte);
+ return pte;
+}
+
+/*
* Store a type+offset into a swp_entry_t in an arch-independent format
*/
static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
@@ -52,6 +109,17 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
return entry.val & SWP_OFFSET_MASK;
}
+/*
+ * This should only be called upon a pfn swap entry to get the PFN stored
+ * in the swap entry. Please refers to is_pfn_swap_entry() for definition
+ * of pfn swap entry.
+ */
+static inline unsigned long swp_offset_pfn(swp_entry_t entry)
+{
+ VM_BUG_ON(!is_pfn_swap_entry(entry));
+ return swp_offset(entry) & SWP_PFN_MASK;
+}
+
/* check whether a pte points to a swap entry */
static inline int is_swap_pte(pte_t pte)
{
@@ -66,10 +134,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
{
swp_entry_t arch_entry;
- if (pte_swp_soft_dirty(pte))
- pte = pte_swp_clear_soft_dirty(pte);
- if (pte_swp_uffd_wp(pte))
- pte = pte_swp_clear_uffd_wp(pte);
+ pte = pte_swp_clear_flags(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
@@ -100,10 +165,14 @@ static inline void *swp_to_radix_entry(swp_entry_t entry)
}
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
-static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
+{
+ return swp_entry(SWP_DEVICE_READ, offset);
+}
+
+static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
- return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
- page_to_pfn(page));
+ return swp_entry(SWP_DEVICE_WRITE, offset);
}
static inline bool is_device_private_entry(swp_entry_t entry)
@@ -112,33 +181,40 @@ static inline bool is_device_private_entry(swp_entry_t entry)
return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
}
-static inline void make_device_private_entry_read(swp_entry_t *entry)
+static inline bool is_writable_device_private_entry(swp_entry_t entry)
{
- *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
+ return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
}
-static inline bool is_write_device_private_entry(swp_entry_t entry)
+static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
{
- return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
+ return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
+}
+
+static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
+{
+ return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
}
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+static inline bool is_device_exclusive_entry(swp_entry_t entry)
{
- return swp_offset(entry);
+ return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
+ swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
}
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
{
- return pfn_to_page(swp_offset(entry));
+ return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
}
#else /* CONFIG_DEVICE_PRIVATE */
-static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
-static inline void make_device_private_entry_read(swp_entry_t *entry)
+static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
+ return swp_entry(0, 0);
}
static inline bool is_device_private_entry(swp_entry_t entry)
@@ -146,105 +222,283 @@ static inline bool is_device_private_entry(swp_entry_t entry)
return false;
}
-static inline bool is_write_device_private_entry(swp_entry_t entry)
+static inline bool is_writable_device_private_entry(swp_entry_t entry)
{
return false;
}
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
{
- return NULL;
+ return swp_entry(0, 0);
}
-#endif /* CONFIG_DEVICE_PRIVATE */
-#ifdef CONFIG_MIGRATION
-static inline swp_entry_t make_migration_entry(struct page *page, int write)
+static inline bool is_device_exclusive_entry(swp_entry_t entry)
{
- BUG_ON(!PageLocked(compound_head(page)));
+ return false;
+}
- return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
- page_to_pfn(page));
+static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
+{
+ return false;
}
+#endif /* CONFIG_DEVICE_PRIVATE */
+#ifdef CONFIG_MIGRATION
static inline int is_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
+ swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
swp_type(entry) == SWP_MIGRATION_WRITE);
}
-static inline int is_write_migration_entry(swp_entry_t entry)
+static inline int is_writable_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+static inline int is_readable_migration_entry(swp_entry_t entry)
{
- return swp_offset(entry);
+ return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
}
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
+static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
{
- struct page *p = pfn_to_page(swp_offset(entry));
- /*
- * Any use of migration entries may only occur while the
- * corresponding page is locked
- */
- BUG_ON(!PageLocked(compound_head(p)));
- return p;
+ return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
+}
+
+static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
+{
+ return swp_entry(SWP_MIGRATION_READ, offset);
+}
+
+static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
+{
+ return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
}
-static inline void make_migration_entry_read(swp_entry_t *entry)
+static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
- *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
+ return swp_entry(SWP_MIGRATION_WRITE, offset);
+}
+
+/*
+ * Returns whether the host has large enough swap offset field to support
+ * carrying over pgtable A/D bits for page migrations. The result is
+ * pretty much arch specific.
+ */
+static inline bool migration_entry_supports_ad(void)
+{
+#ifdef CONFIG_SWAP
+ return swap_migration_ad_supported;
+#else /* CONFIG_SWAP */
+ return false;
+#endif /* CONFIG_SWAP */
+}
+
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_YOUNG);
+ return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_YOUNG;
+ /* Keep the old behavior of aging page after migration */
+ return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_DIRTY);
+ return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_DIRTY;
+ /* Keep the old behavior of clean page after migration */
+ return false;
}
-extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
- spinlock_t *ptl);
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
-extern void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte);
-#else
+extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
+#else /* CONFIG_MIGRATION */
+static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
+{
+ return swp_entry(0, 0);
+}
-#define make_migration_entry(page, write) swp_entry(0, 0)
-static inline int is_migration_entry(swp_entry_t swp)
+static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
+static inline int is_migration_entry(swp_entry_t swp)
{
- return NULL;
+ return 0;
}
-static inline void make_migration_entry_read(swp_entry_t *entryp) { }
-static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
- spinlock_t *ptl) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
- unsigned long address) { }
+ unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte) { }
-static inline int is_write_migration_entry(swp_entry_t entry)
+ pte_t *pte) { }
+static inline int is_writable_migration_entry(swp_entry_t entry)
+{
+ return 0;
+}
+static inline int is_readable_migration_entry(swp_entry_t entry)
{
return 0;
}
-#endif
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+ return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+ return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+ return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+ return false;
+}
+#endif /* CONFIG_MIGRATION */
+
+typedef unsigned long pte_marker;
+
+#define PTE_MARKER_UFFD_WP BIT(0)
+/*
+ * "Poisoned" here is meant in the very general sense of "future accesses are
+ * invalid", instead of referring very specifically to hardware memory errors.
+ * This marker is meant to represent any of various different causes of this.
+ */
+#define PTE_MARKER_POISONED BIT(1)
+#define PTE_MARKER_MASK (BIT(2) - 1)
+
+static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
+{
+ return swp_entry(SWP_PTE_MARKER, marker);
+}
+
+static inline bool is_pte_marker_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_PTE_MARKER;
+}
+
+static inline pte_marker pte_marker_get(swp_entry_t entry)
+{
+ return swp_offset(entry) & PTE_MARKER_MASK;
+}
+
+static inline bool is_pte_marker(pte_t pte)
+{
+ return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte));
+}
+
+static inline pte_t make_pte_marker(pte_marker marker)
+{
+ return swp_entry_to_pte(make_pte_marker_entry(marker));
+}
+
+static inline swp_entry_t make_poisoned_swp_entry(void)
+{
+ return make_pte_marker_entry(PTE_MARKER_POISONED);
+}
+
+static inline int is_poisoned_swp_entry(swp_entry_t entry)
+{
+ return is_pte_marker_entry(entry) &&
+ (pte_marker_get(entry) & PTE_MARKER_POISONED);
+}
+
+/*
+ * This is a special version to check pte_none() just to cover the case when
+ * the pte is a pte marker. It existed because in many cases the pte marker
+ * should be seen as a none pte; it's just that we have stored some information
+ * onto the none pte so it becomes not-none any more.
+ *
+ * It should be used when the pte is file-backed, ram-based and backing
+ * userspace pages, like shmem. It is not needed upon pgtables that do not
+ * support pte markers at all. For example, it's not needed on anonymous
+ * memory, kernel-only memory (including when the system is during-boot),
+ * non-ram based generic file-system. It's fine to be used even there, but the
+ * extra pte marker check will be pure overhead.
+ */
+static inline int pte_none_mostly(pte_t pte)
+{
+ return pte_none(pte) || is_pte_marker(pte);
+}
+
+static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
+{
+ struct page *p = pfn_to_page(swp_offset_pfn(entry));
+
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ BUG_ON(is_migration_entry(entry) && !PageLocked(p));
+
+ return p;
+}
+
+static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
+{
+ struct folio *folio = pfn_folio(swp_offset_pfn(entry));
+
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding folio is locked
+ */
+ BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio));
+
+ return folio;
+}
+
+/*
+ * A pfn swap entry is a special type of swap entry that always has a pfn stored
+ * in the swap offset. They are used to represent unaddressable device memory
+ * and to restrict access to a page undergoing migration.
+ */
+static inline bool is_pfn_swap_entry(swp_entry_t entry)
+{
+ /* Make sure the swp offset can always store the needed fields */
+ BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
+ return is_migration_entry(entry) || is_device_private_entry(entry) ||
+ is_device_exclusive_entry(entry);
+}
struct page_vma_mapped_walk;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page);
extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
@@ -258,6 +512,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
if (pmd_swp_soft_dirty(pmd))
pmd = pmd_swp_clear_soft_dirty(pmd);
+ if (pmd_swp_uffd_wp(pmd))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
arch_entry = __pmd_to_swp_entry(pmd);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
@@ -272,10 +528,10 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
static inline int is_pmd_migration_entry(pmd_t pmd)
{
- return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
+ return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
}
-#else
-static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
BUILD_BUG();
@@ -303,12 +559,10 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
{
return 0;
}
-#endif
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
#ifdef CONFIG_MEMORY_FAILURE
-extern atomic_long_t num_poisoned_pages __read_mostly;
-
/*
* Support for hardware poisoned pages
*/
@@ -323,16 +577,6 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
return swp_type(entry) == SWP_HWPOISON;
}
-static inline void num_poisoned_pages_inc(void)
-{
- atomic_long_inc(&num_poisoned_pages);
-}
-
-static inline void num_poisoned_pages_dec(void)
-{
- atomic_long_dec(&num_poisoned_pages);
-}
-
#else
static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -344,24 +588,12 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
{
return 0;
}
-
-static inline void num_poisoned_pages_inc(void)
-{
-}
#endif
-#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \
- defined(CONFIG_DEVICE_PRIVATE)
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
}
-#else
-static inline int non_swap_entry(swp_entry_t entry)
-{
- return 0;
-}
-#endif
#endif /* CONFIG_MMU */
#endif /* _LINUX_SWAPOPS_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 046bb94bd4d6..ea23097e351f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -2,19 +2,21 @@
#ifndef __LINUX_SWIOTLB_H
#define __LINUX_SWIOTLB_H
+#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
struct device;
struct page;
struct scatterlist;
-enum swiotlb_force {
- SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
- SWIOTLB_FORCE, /* swiotlb=force */
- SWIOTLB_NO_FORCE, /* swiotlb=noforce */
-};
+#define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */
+#define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */
+#define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */
/*
* Maximum allowable number of contiguous slabs to map,
@@ -28,83 +30,260 @@ enum swiotlb_force {
* controllable.
*/
#define IO_TLB_SHIFT 11
+#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
+
+/* default to 64MB */
+#define IO_TLB_DEFAULT_SIZE (64UL<<20)
-extern void swiotlb_init(int verbose);
-int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
-extern unsigned long swiotlb_nr_tbl(void);
unsigned long swiotlb_size_or_default(void);
-extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
+void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ int (*remap)(void *tlb, unsigned long nslabs));
+int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ int (*remap)(void *tlb, unsigned long nslabs));
extern void __init swiotlb_update_mem_attributes(void);
-/*
- * Enumeration for sync targets
- */
-enum dma_sync_target {
- SYNC_FOR_CPU = 0,
- SYNC_FOR_DEVICE = 1,
-};
-
-extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
- dma_addr_t tbl_dma_addr,
- phys_addr_t phys,
- size_t mapping_size,
- size_t alloc_size,
- enum dma_data_direction dir,
- unsigned long attrs);
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
+ size_t mapping_size, size_t alloc_size,
+ unsigned int alloc_aligned_mask, enum dma_data_direction dir,
+ unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
size_t mapping_size,
- size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs);
-extern void swiotlb_tbl_sync_single(struct device *hwdev,
- phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir,
- enum dma_sync_target target);
-
+void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir);
+void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir);
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
#ifdef CONFIG_SWIOTLB
-extern enum swiotlb_force swiotlb_force;
-extern phys_addr_t io_tlb_start, io_tlb_end;
-static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+/**
+ * struct io_tlb_pool - IO TLB memory pool descriptor
+ * @start: The start address of the swiotlb memory pool. Used to do a quick
+ * range check to see if the memory was in fact allocated by this
+ * API.
+ * @end: The end address of the swiotlb memory pool. Used to do a quick
+ * range check to see if the memory was in fact allocated by this
+ * API.
+ * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
+ * may be remapped in the memory encrypted case and store virtual
+ * address for bounce buffer operation.
+ * @nslabs: The number of IO TLB slots between @start and @end. For the
+ * default swiotlb, this can be adjusted with a boot parameter,
+ * see setup_io_tlb_npages().
+ * @late_alloc: %true if allocated using the page allocator.
+ * @nareas: Number of areas in the pool.
+ * @area_nslabs: Number of slots in each area.
+ * @areas: Array of memory area descriptors.
+ * @slots: Array of slot descriptors.
+ * @node: Member of the IO TLB memory pool list.
+ * @rcu: RCU head for swiotlb_dyn_free().
+ * @transient: %true if transient memory pool.
+ */
+struct io_tlb_pool {
+ phys_addr_t start;
+ phys_addr_t end;
+ void *vaddr;
+ unsigned long nslabs;
+ bool late_alloc;
+ unsigned int nareas;
+ unsigned int area_nslabs;
+ struct io_tlb_area *areas;
+ struct io_tlb_slot *slots;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head node;
+ struct rcu_head rcu;
+ bool transient;
+#endif
+};
+
+/**
+ * struct io_tlb_mem - Software IO TLB allocator
+ * @defpool: Default (initial) IO TLB memory pool descriptor.
+ * @pool: IO TLB memory pool descriptor (if not dynamic).
+ * @nslabs: Total number of IO TLB slabs in all pools.
+ * @debugfs: The dentry to debugfs.
+ * @force_bounce: %true if swiotlb bouncing is forced
+ * @for_alloc: %true if the pool is used for memory allocation
+ * @can_grow: %true if more pools can be allocated dynamically.
+ * @phys_limit: Maximum allowed physical address.
+ * @lock: Lock to synchronize changes to the list.
+ * @pools: List of IO TLB memory pool descriptors (if dynamic).
+ * @dyn_alloc: Dynamic IO TLB pool allocation work.
+ * @total_used: The total number of slots in the pool that are currently used
+ * across all areas. Used only for calculating used_hiwater in
+ * debugfs.
+ * @used_hiwater: The high water mark for total_used. Used only for reporting
+ * in debugfs.
+ * @transient_nslabs: The total number of slots in all transient pools that
+ * are currently used across all areas.
+ */
+struct io_tlb_mem {
+ struct io_tlb_pool defpool;
+ unsigned long nslabs;
+ struct dentry *debugfs;
+ bool force_bounce;
+ bool for_alloc;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ bool can_grow;
+ u64 phys_limit;
+ spinlock_t lock;
+ struct list_head pools;
+ struct work_struct dyn_alloc;
+#endif
+#ifdef CONFIG_DEBUG_FS
+ atomic_long_t total_used;
+ atomic_long_t used_hiwater;
+ atomic_long_t transient_nslabs;
+#endif
+};
+
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+
+struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr);
+
+#else
+
+static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
+ phys_addr_t paddr)
{
- return paddr >= io_tlb_start && paddr < io_tlb_end;
+ return &dev->dma_io_tlb_mem->defpool;
}
+#endif
+
+/**
+ * is_swiotlb_buffer() - check if a physical address belongs to a swiotlb
+ * @dev: Device which has mapped the buffer.
+ * @paddr: Physical address within the DMA buffer.
+ *
+ * Check if @paddr points into a bounce buffer.
+ *
+ * Return:
+ * * %true if @paddr points into a bounce buffer
+ * * %false otherwise
+ */
+static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
+{
+ struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+
+ if (!mem)
+ return false;
+
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ /*
+ * All SWIOTLB buffer addresses must have been returned by
+ * swiotlb_tbl_map_single() and passed to a device driver.
+ * If a SWIOTLB address is checked on another CPU, then it was
+ * presumably loaded by the device driver from an unspecified private
+ * data structure. Make sure that this load is ordered before reading
+ * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
+ *
+ * This barrier pairs with smp_mb() in swiotlb_find_slots().
+ */
+ smp_rmb();
+ return READ_ONCE(dev->dma_uses_io_tlb) &&
+ swiotlb_find_pool(dev, paddr);
+#else
+ return paddr >= mem->defpool.start && paddr < mem->defpool.end;
+#endif
+}
+
+static inline bool is_swiotlb_force_bounce(struct device *dev)
+{
+ struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+
+ return mem && mem->force_bounce;
+}
+
+void swiotlb_init(bool addressing_limited, unsigned int flags);
void __init swiotlb_exit(void);
-unsigned int swiotlb_max_segment(void);
+void swiotlb_dev_init(struct device *dev);
size_t swiotlb_max_mapping_size(struct device *dev);
-bool is_swiotlb_active(void);
+bool is_swiotlb_allocated(void);
+bool is_swiotlb_active(struct device *dev);
+void __init swiotlb_adjust_size(unsigned long size);
+phys_addr_t default_swiotlb_base(void);
+phys_addr_t default_swiotlb_limit(void);
#else
-#define swiotlb_force SWIOTLB_NO_FORCE
-static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
+{
+}
+
+static inline void swiotlb_dev_init(struct device *dev)
+{
+}
+
+static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
return false;
}
-static inline void swiotlb_exit(void)
+static inline bool is_swiotlb_force_bounce(struct device *dev)
{
+ return false;
}
-static inline unsigned int swiotlb_max_segment(void)
+static inline void swiotlb_exit(void)
{
- return 0;
}
static inline size_t swiotlb_max_mapping_size(struct device *dev)
{
return SIZE_MAX;
}
-static inline bool is_swiotlb_active(void)
+static inline bool is_swiotlb_allocated(void)
+{
+ return false;
+}
+
+static inline bool is_swiotlb_active(struct device *dev)
{
return false;
}
+
+static inline void swiotlb_adjust_size(unsigned long size)
+{
+}
+
+static inline phys_addr_t default_swiotlb_base(void)
+{
+ return 0;
+}
+
+static inline phys_addr_t default_swiotlb_limit(void)
+{
+ return 0;
+}
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
-extern void swiotlb_set_max_segment(unsigned int);
+
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+struct page *swiotlb_alloc(struct device *dev, size_t size);
+bool swiotlb_free(struct device *dev, struct page *page, size_t size);
+
+static inline bool is_swiotlb_for_alloc(struct device *dev)
+{
+ return dev->dma_io_tlb_mem->for_alloc;
+}
+#else
+static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
+{
+ return NULL;
+}
+static inline bool swiotlb_free(struct device *dev, struct page *page,
+ size_t size)
+{
+ return false;
+}
+static inline bool is_swiotlb_for_alloc(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_DMA_RESTRICTED_POOL */
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 082f1d51957a..8d8fac1626bd 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -19,6 +19,7 @@
#define SWITCHTEC_EVENT_EN_CLI BIT(2)
#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
#define SWITCHTEC_EVENT_FATAL BIT(4)
+#define SWITCHTEC_EVENT_NOT_SUPP BIT(31)
#define SWITCHTEC_DMA_MRPC_EN BIT(0)
@@ -40,6 +41,7 @@ enum {
enum switchtec_gen {
SWITCHTEC_GEN3,
SWITCHTEC_GEN4,
+ SWITCHTEC_GEN5,
};
struct mrpc_regs {
@@ -336,8 +338,6 @@ enum {
NTB_CTRL_REQ_ID_EN = 1 << 0,
NTB_CTRL_LUT_EN = 1 << 0,
-
- NTB_PART_CTRL_ID_PROT_DIS = 1 << 0,
};
struct ntb_ctrl_regs {
diff --git a/include/linux/syscall_user_dispatch.h b/include/linux/syscall_user_dispatch.h
new file mode 100644
index 000000000000..3858a6ffdd5c
--- /dev/null
+++ b/include/linux/syscall_user_dispatch.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Collabora Ltd.
+ */
+#ifndef _SYSCALL_USER_DISPATCH_H
+#define _SYSCALL_USER_DISPATCH_H
+
+#include <linux/thread_info.h>
+#include <linux/syscall_user_dispatch_types.h>
+
+#ifdef CONFIG_GENERIC_ENTRY
+
+int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
+ unsigned long len, char __user *selector);
+
+#define clear_syscall_work_syscall_user_dispatch(tsk) \
+ clear_task_syscall_work(tsk, SYSCALL_USER_DISPATCH)
+
+int syscall_user_dispatch_get_config(struct task_struct *task, unsigned long size,
+ void __user *data);
+
+int syscall_user_dispatch_set_config(struct task_struct *task, unsigned long size,
+ void __user *data);
+
+#else
+
+static inline int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
+ unsigned long len, char __user *selector)
+{
+ return -EINVAL;
+}
+
+static inline void clear_syscall_work_syscall_user_dispatch(struct task_struct *tsk)
+{
+}
+
+static inline int syscall_user_dispatch_get_config(struct task_struct *task,
+ unsigned long size, void __user *data)
+{
+ return -EINVAL;
+}
+
+static inline int syscall_user_dispatch_set_config(struct task_struct *task,
+ unsigned long size, void __user *data)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_GENERIC_ENTRY */
+
+#endif /* _SYSCALL_USER_DISPATCH_H */
diff --git a/include/linux/syscall_user_dispatch_types.h b/include/linux/syscall_user_dispatch_types.h
new file mode 100644
index 000000000000..3be36b06c7d7
--- /dev/null
+++ b/include/linux/syscall_user_dispatch_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SYSCALL_USER_DISPATCH_TYPES_H
+#define _SYSCALL_USER_DISPATCH_TYPES_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_GENERIC_ENTRY
+
+struct syscall_user_dispatch {
+ char __user *selector;
+ unsigned long offset;
+ unsigned long len;
+ bool on_dispatch;
+};
+
+#else
+
+struct syscall_user_dispatch {};
+
+#endif
+
+#endif /* _SYSCALL_USER_DISPATCH_TYPES_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 75ac7f8ae93c..e619ac10cd23 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -58,6 +58,7 @@ struct mq_attr;
struct compat_stat;
struct old_timeval32;
struct robust_list_head;
+struct futex_waitv;
struct getcpu_cache;
struct old_linux_dirent;
struct perf_event_attr;
@@ -68,6 +69,14 @@ union bpf_attr;
struct io_uring_params;
struct clone_args;
struct open_how;
+struct mount_attr;
+struct landlock_ruleset_attr;
+struct lsm_ctx;
+enum landlock_rule_type;
+struct cachestat_range;
+struct cachestat;
+struct statmount;
+struct mnt_id_req;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -119,6 +128,7 @@ struct open_how;
#define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
#define __SC_CAST(t, a) (__force t) a
+#define __SC_TYPE(t, a) t
#define __SC_ARGS(t, a) a
#define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
@@ -144,7 +154,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct trace_event_call __used \
- __attribute__((section("_ftrace_events"))) \
+ __section("_ftrace_events") \
*__event_enter_##sname = &event_enter_##sname;
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
@@ -160,7 +170,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct trace_event_call __used \
- __attribute__((section("_ftrace_events"))) \
+ __section("_ftrace_events") \
*__event_exit_##sname = &event_exit_##sname;
#define SYSCALL_METADATA(sname, nb, ...) \
@@ -184,7 +194,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
}; \
static struct syscall_metadata __used \
- __attribute__((section("__syscalls_metadata"))) \
+ __section("__syscalls_metadata") \
*__p_syscall_meta_##sname = &__syscall_meta_##sname;
static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
@@ -251,25 +261,31 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* __SYSCALL_DEFINEx */
-/*
- * Called before coming back to user-mode. Returning to user-mode with an
- * address limit different than USER_DS can allow to overwrite kernel memory.
- */
-static inline void addr_limit_user_check(void)
-{
-#ifdef TIF_FSCHECK
- if (!test_thread_flag(TIF_FSCHECK))
- return;
+/* For split 64-bit arguments on 32-bit architectures */
+#ifdef __LITTLE_ENDIAN
+#define SC_ARG64(name) u32, name##_lo, u32, name##_hi
+#else
+#define SC_ARG64(name) u32, name##_hi, u32, name##_lo
#endif
-
- if (CHECK_DATA_CORRUPTION(uaccess_kernel(),
- "Invalid address limit on user-mode return"))
- force_sig(SIGKILL);
-
-#ifdef TIF_FSCHECK
- clear_thread_flag(TIF_FSCHECK);
+#define SC_VAL64(type, name) ((type) name##_hi << 32 | name##_lo)
+
+#ifdef CONFIG_COMPAT
+#define SYSCALL32_DEFINE0 COMPAT_SYSCALL_DEFINE0
+#define SYSCALL32_DEFINE1 COMPAT_SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 COMPAT_SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 COMPAT_SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 COMPAT_SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 COMPAT_SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 COMPAT_SYSCALL_DEFINE6
+#else
+#define SYSCALL32_DEFINE0 SYSCALL_DEFINE0
+#define SYSCALL32_DEFINE1 SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 SYSCALL_DEFINE6
#endif
-}
/*
* These syscall function prototypes are kept in the same order as
@@ -317,11 +333,9 @@ asmlinkage long sys_io_uring_setup(u32 entries,
struct io_uring_params __user *p);
asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit,
u32 min_complete, u32 flags,
- const sigset_t __user *sig, size_t sigsz);
+ const void __user *argp, size_t argsz);
asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op,
void __user *arg, unsigned int nr_args);
-
-/* fs/xattr.c */
asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
const void __user *value, size_t size, int flags);
asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
@@ -344,17 +358,8 @@ asmlinkage long sys_removexattr(const char __user *path,
asmlinkage long sys_lremovexattr(const char __user *path,
const char __user *name);
asmlinkage long sys_fremovexattr(int fd, const char __user *name);
-
-/* fs/dcache.c */
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
-
-/* fs/cookies.c */
-asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len);
-
-/* fs/eventfd.c */
asmlinkage long sys_eventfd2(unsigned int count, int flags);
-
-/* fs/eventpoll.c */
asmlinkage long sys_epoll_create1(int flags);
asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
struct epoll_event __user *event);
@@ -362,8 +367,11 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
int maxevents, int timeout,
const sigset_t __user *sigmask,
size_t sigsetsize);
-
-/* fs/fcntl.c */
+asmlinkage long sys_epoll_pwait2(int epfd, struct epoll_event __user *events,
+ int maxevents,
+ const struct __kernel_timespec __user *timeout,
+ const sigset_t __user *sigmask,
+ size_t sigsetsize);
asmlinkage long sys_dup(unsigned int fildes);
asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
@@ -371,25 +379,15 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
asmlinkage long sys_fcntl64(unsigned int fd,
unsigned int cmd, unsigned long arg);
#endif
-
-/* fs/inotify_user.c */
asmlinkage long sys_inotify_init1(int flags);
asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
u32 mask);
asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd);
-
-/* fs/ioctl.c */
asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd,
unsigned long arg);
-
-/* fs/ioprio.c */
asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
asmlinkage long sys_ioprio_get(int which, int who);
-
-/* fs/locks.c */
asmlinkage long sys_flock(unsigned int fd, unsigned int cmd);
-
-/* fs/namei.c */
asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode,
unsigned dev);
asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode);
@@ -400,18 +398,12 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
int newdfd, const char __user *newname, int flags);
asmlinkage long sys_renameat(int olddfd, const char __user * oldname,
int newdfd, const char __user * newname);
-
-/* fs/namespace.c */
asmlinkage long sys_umount(char __user *name, int flags);
asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
char __user *type, unsigned long flags,
void __user *data);
asmlinkage long sys_pivot_root(const char __user *new_root,
const char __user *put_old);
-
-/* fs/nfsctl.c */
-
-/* fs/open.c */
asmlinkage long sys_statfs(const char __user * path,
struct statfs __user *buf);
asmlinkage long sys_statfs64(const char __user *path, size_t sz,
@@ -419,6 +411,12 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz,
asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
struct statfs64 __user *buf);
+asmlinkage long sys_statmount(const struct mnt_id_req __user *req,
+ struct statmount __user *buf, size_t bufsize,
+ unsigned int flags);
+asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
+ u64 __user *mnt_ids, size_t nr_mnt_ids,
+ unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
#if BITS_PER_LONG == 32
@@ -433,8 +431,10 @@ asmlinkage long sys_chdir(const char __user *filename);
asmlinkage long sys_fchdir(unsigned int fd);
asmlinkage long sys_chroot(const char __user *filename);
asmlinkage long sys_fchmod(unsigned int fd, umode_t mode);
-asmlinkage long sys_fchmodat(int dfd, const char __user * filename,
+asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
umode_t mode);
+asmlinkage long sys_fchmodat2(int dfd, const char __user *filename,
+ umode_t mode, unsigned int flags);
asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
gid_t group, int flag);
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
@@ -446,20 +446,14 @@ asmlinkage long sys_close(unsigned int fd);
asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd,
unsigned int flags);
asmlinkage long sys_vhangup(void);
-
-/* fs/pipe.c */
asmlinkage long sys_pipe2(int __user *fildes, int flags);
-
-/* fs/quota.c */
asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special,
qid_t id, void __user *addr);
-
-/* fs/readdir.c */
+asmlinkage long sys_quotactl_fd(unsigned int fd, unsigned int cmd, qid_t id,
+ void __user *addr);
asmlinkage long sys_getdents64(unsigned int fd,
struct linux_dirent64 __user *dirent,
unsigned int count);
-
-/* fs/read_write.c */
asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
unsigned long offset_low, loff_t __user *result,
unsigned int whence);
@@ -482,12 +476,8 @@ asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
-
-/* fs/sendfile.c */
asmlinkage long sys_sendfile64(int out_fd, int in_fd,
loff_t __user *offset, size_t count);
-
-/* fs/select.c */
asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
fd_set __user *, struct __kernel_timespec __user *,
void __user *);
@@ -500,19 +490,13 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
asmlinkage long sys_ppoll_time32(struct pollfd __user *, unsigned int,
struct old_timespec32 __user *, const sigset_t __user *,
size_t);
-
-/* fs/signalfd.c */
asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags);
-
-/* fs/splice.c */
asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
unsigned long nr_segs, unsigned int flags);
asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
int fd_out, loff_t __user *off_out,
size_t len, unsigned int flags);
asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags);
-
-/* fs/stat.c */
asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
int bufsiz);
asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
@@ -523,8 +507,6 @@ asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
struct stat64 __user *statbuf, int flag);
#endif
-
-/* fs/sync.c */
asmlinkage long sys_sync(void);
asmlinkage long sys_fsync(unsigned int fd);
asmlinkage long sys_fdatasync(unsigned int fd);
@@ -532,8 +514,6 @@ asmlinkage long sys_sync_file_range2(int fd, unsigned int flags,
loff_t offset, loff_t nbytes);
asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
unsigned int flags);
-
-/* fs/timerfd.c */
asmlinkage long sys_timerfd_create(int clockid, int flags);
asmlinkage long sys_timerfd_settime(int ufd, int flags,
const struct __kernel_itimerspec __user *utmr,
@@ -544,75 +524,65 @@ asmlinkage long sys_timerfd_gettime32(int ufd,
asmlinkage long sys_timerfd_settime32(int ufd, int flags,
const struct old_itimerspec32 __user *utmr,
struct old_itimerspec32 __user *otmr);
-
-/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
struct __kernel_timespec __user *utimes,
int flags);
asmlinkage long sys_utimensat_time32(unsigned int dfd,
const char __user *filename,
struct old_timespec32 __user *t, int flags);
-
-/* kernel/acct.c */
asmlinkage long sys_acct(const char __user *name);
-
-/* kernel/capability.c */
asmlinkage long sys_capget(cap_user_header_t header,
cap_user_data_t dataptr);
asmlinkage long sys_capset(cap_user_header_t header,
const cap_user_data_t data);
-
-/* kernel/exec_domain.c */
asmlinkage long sys_personality(unsigned int personality);
-
-/* kernel/exit.c */
asmlinkage long sys_exit(int error_code);
asmlinkage long sys_exit_group(int error_code);
asmlinkage long sys_waitid(int which, pid_t pid,
struct siginfo __user *infop,
int options, struct rusage __user *ru);
-
-/* kernel/fork.c */
asmlinkage long sys_set_tid_address(int __user *tidptr);
asmlinkage long sys_unshare(unsigned long unshare_flags);
-
-/* kernel/futex.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
- struct __kernel_timespec __user *utime, u32 __user *uaddr2,
- u32 val3);
+ const struct __kernel_timespec __user *utime,
+ u32 __user *uaddr2, u32 val3);
asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val,
- struct old_timespec32 __user *utime, u32 __user *uaddr2,
- u32 val3);
+ const struct old_timespec32 __user *utime,
+ u32 __user *uaddr2, u32 val3);
asmlinkage long sys_get_robust_list(int pid,
struct robust_list_head __user * __user *head_ptr,
size_t __user *len_ptr);
asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
size_t len);
-/* kernel/hrtimer.c */
+asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
+ unsigned int nr_futexes, unsigned int flags,
+ struct __kernel_timespec __user *timeout, clockid_t clockid);
+
+asmlinkage long sys_futex_wake(void __user *uaddr, unsigned long mask, int nr, unsigned int flags);
+
+asmlinkage long sys_futex_wait(void __user *uaddr, unsigned long val, unsigned long mask,
+ unsigned int flags, struct __kernel_timespec __user *timespec,
+ clockid_t clockid);
+
+asmlinkage long sys_futex_requeue(struct futex_waitv __user *waiters,
+ unsigned int flags, int nr_wake, int nr_requeue);
+
asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
struct __kernel_timespec __user *rmtp);
asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
-
-/* kernel/itimer.c */
asmlinkage long sys_getitimer(int which, struct __kernel_old_itimerval __user *value);
asmlinkage long sys_setitimer(int which,
struct __kernel_old_itimerval __user *value,
struct __kernel_old_itimerval __user *ovalue);
-
-/* kernel/kexec.c */
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
struct kexec_segment __user *segments,
unsigned long flags);
-
-/* kernel/module.c */
asmlinkage long sys_init_module(void __user *umod, unsigned long len,
const char __user *uargs);
asmlinkage long sys_delete_module(const char __user *name_user,
unsigned int flags);
-
-/* kernel/posix-timers.c */
asmlinkage long sys_timer_create(clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id);
@@ -646,15 +616,9 @@ asmlinkage long sys_clock_getres_time32(clockid_t which_clock,
asmlinkage long sys_clock_nanosleep_time32(clockid_t which_clock, int flags,
struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
-
-/* kernel/printk.c */
asmlinkage long sys_syslog(int type, char __user *buf, int len);
-
-/* kernel/ptrace.c */
asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
unsigned long data);
-/* kernel/sched/core.c */
-
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
@@ -673,8 +637,6 @@ asmlinkage long sys_sched_rr_get_interval(pid_t pid,
struct __kernel_timespec __user *interval);
asmlinkage long sys_sched_rr_get_interval_time32(pid_t pid,
struct old_timespec32 __user *interval);
-
-/* kernel/signal.c */
asmlinkage long sys_restart_syscall(void);
asmlinkage long sys_kill(pid_t pid, int sig);
asmlinkage long sys_tkill(pid_t pid, int sig);
@@ -700,8 +662,6 @@ asmlinkage long sys_rt_sigtimedwait_time32(const sigset_t __user *uthese,
const struct old_timespec32 __user *uts,
size_t sigsetsize);
asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
-
-/* kernel/sys.c */
asmlinkage long sys_setpriority(int which, int who, int niceval);
asmlinkage long sys_getpriority(int which, int who);
asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd,
@@ -735,16 +695,12 @@ asmlinkage long sys_umask(int mask);
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
-
-/* kernel/time.c */
asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p);
asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p);
-
-/* kernel/timer.c */
asmlinkage long sys_getpid(void);
asmlinkage long sys_getppid(void);
asmlinkage long sys_getuid(void);
@@ -753,8 +709,6 @@ asmlinkage long sys_getgid(void);
asmlinkage long sys_getegid(void);
asmlinkage long sys_gettid(void);
asmlinkage long sys_sysinfo(struct sysinfo __user *info);
-
-/* ipc/mqueue.c */
asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr);
asmlinkage long sys_mq_unlink(const char __user *name);
asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct __kernel_timespec __user *abs_timeout);
@@ -769,8 +723,6 @@ asmlinkage long sys_mq_timedsend_time32(mqd_t mqdes,
const char __user *u_msg_ptr,
unsigned int msg_len, unsigned int msg_prio,
const struct old_timespec32 __user *u_abs_timeout);
-
-/* ipc/msg.c */
asmlinkage long sys_msgget(key_t key, int msgflg);
asmlinkage long sys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
@@ -778,8 +730,6 @@ asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp,
size_t msgsz, long msgtyp, int msgflg);
asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp,
size_t msgsz, int msgflg);
-
-/* ipc/sem.c */
asmlinkage long sys_semget(key_t key, int nsems, int semflg);
asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
asmlinkage long sys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
@@ -791,15 +741,11 @@ asmlinkage long sys_semtimedop_time32(int semid, struct sembuf __user *sops,
const struct old_timespec32 __user *timeout);
asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
unsigned nsops);
-
-/* ipc/shm.c */
asmlinkage long sys_shmget(key_t key, size_t size, int flag);
asmlinkage long sys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
asmlinkage long sys_shmdt(char __user *shmaddr);
-
-/* net/socket.c */
asmlinkage long sys_socket(int, int, int);
asmlinkage long sys_socketpair(int, int, int, int __user *);
asmlinkage long sys_bind(int, struct sockaddr __user *, int);
@@ -819,18 +765,12 @@ asmlinkage long sys_getsockopt(int fd, int level, int optname,
asmlinkage long sys_shutdown(int, int);
asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
-
-/* mm/filemap.c */
asmlinkage long sys_readahead(int fd, loff_t offset, size_t count);
-
-/* mm/nommu.c, also with MMU */
asmlinkage long sys_brk(unsigned long brk);
asmlinkage long sys_munmap(unsigned long addr, size_t len);
asmlinkage long sys_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr);
-
-/* security/keys/keyctl.c */
asmlinkage long sys_add_key(const char __user *_type,
const char __user *_description,
const void __user *_payload,
@@ -842,8 +782,6 @@ asmlinkage long sys_request_key(const char __user *_type,
key_serial_t destringid);
asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
-
-/* arch/example/kernel/sys_example.c */
#ifdef CONFIG_CLONE_BACKWARDS
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long,
int __user *);
@@ -862,11 +800,9 @@ asmlinkage long sys_clone3(struct clone_args __user *uargs, size_t size);
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp);
-
-/* mm/fadvise.c */
asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
-/* mm/, CONFIG_MMU only */
+/* CONFIG_MMU only */
asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags);
asmlinkage long sys_swapoff(const char __user *specialfile);
asmlinkage long sys_mprotect(unsigned long start, size_t len,
@@ -879,6 +815,9 @@ asmlinkage long sys_munlockall(void);
asmlinkage long sys_mincore(unsigned long start, size_t len,
unsigned char __user * vec);
asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior);
+asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec,
+ size_t vlen, int behavior, unsigned int flags);
+asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags);
asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
unsigned long prot, unsigned long pgoff,
unsigned long flags);
@@ -901,7 +840,6 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
const int __user *nodes,
int __user *status,
int flags);
-
asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
siginfo_t __user *uinfo);
asmlinkage long sys_perf_event_open(
@@ -914,7 +852,6 @@ asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
asmlinkage long sys_recvmmsg_time32(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
struct old_timespec32 __user *timeout);
-
asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
int options, struct rusage __user *ru);
asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
@@ -974,7 +911,7 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp, int flags);
asmlinkage long sys_userfaultfd(int flags);
-asmlinkage long sys_membarrier(int cmd, int flags);
+asmlinkage long sys_membarrier(int cmd, unsigned int flags, int cpu_id);
asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags);
asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in,
int fd_out, loff_t __user *off_out,
@@ -997,6 +934,9 @@ asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags);
asmlinkage long sys_move_mount(int from_dfd, const char __user *from_path,
int to_dfd, const char __user *to_path,
unsigned int ms_flags);
+asmlinkage long sys_mount_setattr(int dfd, const char __user *path,
+ unsigned int flags,
+ struct mount_attr __user *uattr, size_t usize);
asmlinkage long sys_fsopen(const char __user *fs_name, unsigned int flags);
asmlinkage long sys_fsconfig(int fs_fd, unsigned int cmd, const char __user *key,
const void __user *value, int aux);
@@ -1006,12 +946,30 @@ asmlinkage long sys_pidfd_send_signal(int pidfd, int sig,
siginfo_t __user *info,
unsigned int flags);
asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags);
+asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr __user *attr,
+ size_t size, __u32 flags);
+asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type,
+ const void __user *rule_attr, __u32 flags);
+asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags);
+asmlinkage long sys_memfd_secret(unsigned int flags);
+asmlinkage long sys_set_mempolicy_home_node(unsigned long start, unsigned long len,
+ unsigned long home_node,
+ unsigned long flags);
+asmlinkage long sys_cachestat(unsigned int fd,
+ struct cachestat_range __user *cstat_range,
+ struct cachestat __user *cstat, unsigned int flags);
+asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
+asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ u32 *size, u32 flags);
+asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ u32 size, u32 flags);
+asmlinkage long sys_lsm_list_modules(u64 *ids, u32 *size, u32 flags);
/*
* Architecture-specific system calls
*/
-/* arch/x86/kernel/ioport.c */
+/* x86 */
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
/* pciconfig: alpha, arm, arm64, ia64, sparc */
@@ -1113,18 +1071,17 @@ asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf);
asmlinkage long sys_vfork(void);
asmlinkage long sys_recv(int, void __user *, size_t, unsigned);
asmlinkage long sys_send(int, void __user *, size_t, unsigned);
-asmlinkage long sys_bdflush(int func, long data);
asmlinkage long sys_oldumount(char __user *name);
asmlinkage long sys_uselib(const char __user *library);
asmlinkage long sys_sysfs(int option,
unsigned long arg1, unsigned long arg2);
asmlinkage long sys_fork(void);
-/* obsolete: kernel/time/time.c */
+/* obsolete */
asmlinkage long sys_stime(__kernel_old_time_t __user *tptr);
asmlinkage long sys_stime32(old_time32_t __user *tptr);
-/* obsolete: kernel/signal.c */
+/* obsolete */
asmlinkage long sys_sigpending(old_sigset_t __user *uset);
asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set,
old_sigset_t __user *oset);
@@ -1144,19 +1101,19 @@ asmlinkage long sys_sgetmask(void);
asmlinkage long sys_ssetmask(int newmask);
asmlinkage long sys_signal(int sig, __sighandler_t handler);
-/* obsolete: kernel/sched/core.c */
+/* obsolete */
asmlinkage long sys_nice(int increment);
-/* obsolete: kernel/kexec_file.c */
+/* obsolete */
asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd,
unsigned long cmdline_len,
const char __user *cmdline_ptr,
unsigned long flags);
-/* obsolete: kernel/exit.c */
+/* obsolete */
asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options);
-/* obsolete: kernel/uid16.c */
+/* obsolete */
#ifdef CONFIG_HAVE_UID16
asmlinkage long sys_chown16(const char __user *filename,
old_uid_t user, old_gid_t group);
@@ -1183,10 +1140,10 @@ asmlinkage long sys_getgid16(void);
asmlinkage long sys_getegid16(void);
#endif
-/* obsolete: net/socket.c */
+/* obsolete */
asmlinkage long sys_socketcall(int call, unsigned long __user *args);
-/* obsolete: fs/stat.c */
+/* obsolete */
asmlinkage long sys_stat(const char __user *filename,
struct __old_kernel_stat __user *statbuf);
asmlinkage long sys_lstat(const char __user *filename,
@@ -1196,13 +1153,13 @@ asmlinkage long sys_fstat(unsigned int fd,
asmlinkage long sys_readlink(const char __user *path,
char __user *buf, int bufsiz);
-/* obsolete: fs/select.c */
+/* obsolete */
asmlinkage long sys_old_select(struct sel_arg_struct __user *arg);
-/* obsolete: fs/readdir.c */
+/* obsolete */
asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
-/* obsolete: kernel/sys.c */
+/* obsolete */
asmlinkage long sys_gethostname(char __user *name, int len);
asmlinkage long sys_uname(struct old_utsname __user *);
asmlinkage long sys_olduname(struct oldold_utsname __user *);
@@ -1210,11 +1167,11 @@ asmlinkage long sys_olduname(struct oldold_utsname __user *);
asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim);
#endif
-/* obsolete: ipc */
+/* obsolete */
asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
unsigned long third, void __user *ptr, long fifth);
-/* obsolete: mm/ */
+/* obsolete */
asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
@@ -1229,6 +1186,7 @@ asmlinkage long sys_ni_syscall(void);
#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+asmlinkage long sys_ni_posix_timers(void);
/*
* Kernel code should not call syscalls (i.e., sys_xyzyyz()) directly.
@@ -1293,18 +1251,6 @@ static inline long ksys_ftruncate(unsigned int fd, loff_t length)
return do_sys_ftruncate(fd, length, 1);
}
-extern int __close_fd(struct files_struct *files, unsigned int fd);
-
-/*
- * In contrast to sys_close(), this stub does not check whether the syscall
- * should or should not be restarted, but returns the raw error codes from
- * __close_fd().
- */
-static inline int ksys_close(unsigned int fd)
-{
- return __close_fd(current->files, fd);
-}
-
extern long do_sys_truncate(const char __user *pathname, loff_t length);
static inline long ksys_truncate(const char __user *pathname, loff_t length)
@@ -1340,6 +1286,9 @@ long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
const struct old_timespec32 __user *timeout);
+long __do_semtimedop(int semid, struct sembuf *tsems, unsigned int nsops,
+ const struct timespec64 *timeout,
+ struct ipc_namespace *ns);
int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
int __user *optlen);
diff --git a/include/linux/syscalls_api.h b/include/linux/syscalls_api.h
new file mode 100644
index 000000000000..23e012b04db4
--- /dev/null
+++ b/include/linux/syscalls_api.h
@@ -0,0 +1 @@
+#include <linux/syscalls.h>
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 51298a4f4623..ee7d33b89e9e 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -38,22 +38,45 @@ struct ctl_table_header;
struct ctl_dir;
/* Keep the same order as in fs/proc/proc_sysctl.c */
-#define SYSCTL_ZERO ((void *)&sysctl_vals[0])
-#define SYSCTL_ONE ((void *)&sysctl_vals[1])
-#define SYSCTL_INT_MAX ((void *)&sysctl_vals[2])
+#define SYSCTL_ZERO ((void *)&sysctl_vals[0])
+#define SYSCTL_ONE ((void *)&sysctl_vals[1])
+#define SYSCTL_TWO ((void *)&sysctl_vals[2])
+#define SYSCTL_THREE ((void *)&sysctl_vals[3])
+#define SYSCTL_FOUR ((void *)&sysctl_vals[4])
+#define SYSCTL_ONE_HUNDRED ((void *)&sysctl_vals[5])
+#define SYSCTL_TWO_HUNDRED ((void *)&sysctl_vals[6])
+#define SYSCTL_ONE_THOUSAND ((void *)&sysctl_vals[7])
+#define SYSCTL_THREE_THOUSAND ((void *)&sysctl_vals[8])
+#define SYSCTL_INT_MAX ((void *)&sysctl_vals[9])
+
+/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
+#define SYSCTL_MAXOLDUID ((void *)&sysctl_vals[10])
+#define SYSCTL_NEG_ONE ((void *)&sysctl_vals[11])
extern const int sysctl_vals[];
+#define SYSCTL_LONG_ZERO ((void *)&sysctl_long_vals[0])
+#define SYSCTL_LONG_ONE ((void *)&sysctl_long_vals[1])
+#define SYSCTL_LONG_MAX ((void *)&sysctl_long_vals[2])
+
+extern const unsigned long sysctl_long_vals[];
+
typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);
int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dobool(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
+int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *,
loff_t *);
int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *,
@@ -66,7 +89,7 @@ int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
/*
- * Register a set of sysctl names by calling register_sysctl_table
+ * Register a set of sysctl names by calling register_sysctl
* with an initialised array of struct ctl_table's. An entry with
* NULL procname terminates the table. table->de will be
* set up by the registration and need not be initialised in advance.
@@ -114,7 +137,17 @@ struct ctl_table {
void *data;
int maxlen;
umode_t mode;
- struct ctl_table *child; /* Deprecated */
+ /**
+ * enum type - Enumeration to differentiate between ctl target types
+ * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations
+ * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently
+ * empty directory target to serve
+ * as mount point.
+ */
+ enum {
+ SYSCTL_TABLE_TYPE_DEFAULT,
+ SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY
+ } type;
proc_handler *proc_handler; /* Callback for text formatting */
struct ctl_table_poll *poll;
void *extra1;
@@ -126,12 +159,22 @@ struct ctl_node {
struct ctl_table_header *header;
};
-/* struct ctl_table_header is used to maintain dynamic lists of
- struct ctl_table trees. */
+/**
+ * struct ctl_table_header - maintains dynamic lists of struct ctl_table trees
+ * @ctl_table: pointer to the first element in ctl_table array
+ * @ctl_table_size: number of elements pointed by @ctl_table
+ * @used: The entry will never be touched when equal to 0.
+ * @count: Upped every time something is added to @inodes and downed every time
+ * something is removed from inodes
+ * @nreg: When nreg drops to 0 the ctl_table_header will be unregistered.
+ * @rcu: Delays the freeing of the inode. Introduced with "unfuck proc_sysctl ->d_compare()"
+ *
+ */
struct ctl_table_header {
union {
struct {
struct ctl_table *ctl_table;
+ int ctl_table_size;
int used;
int count;
int nreg;
@@ -167,10 +210,8 @@ struct ctl_table_root {
int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
};
-/* struct ctl_path describes where in the hierarchy a table is added */
-struct ctl_path {
- const char *procname;
-};
+#define register_sysctl(path, table) \
+ register_sysctl_sz(path, table, ARRAY_SIZE(table))
#ifdef CONFIG_SYSCTL
@@ -183,43 +224,46 @@ extern void retire_sysctl_set(struct ctl_table_set *set);
struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
- const char *path, struct ctl_table *table);
-struct ctl_table_header *__register_sysctl_paths(
- struct ctl_table_set *set,
- const struct ctl_path *path, struct ctl_table *table);
-struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table);
-struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
-struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
- struct ctl_table *table);
-
+ const char *path, struct ctl_table *table, size_t table_size);
+struct ctl_table_header *register_sysctl_sz(const char *path, struct ctl_table *table,
+ size_t table_size);
void unregister_sysctl_table(struct ctl_table_header * table);
-extern int sysctl_init(void);
+extern int sysctl_init_bases(void);
+extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+ const char *table_name, size_t table_size);
+#define register_sysctl_init(path, table) \
+ __register_sysctl_init(path, table, #table, ARRAY_SIZE(table))
+extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
+
void do_sysctl_args(void);
+bool sysctl_is_alias(char *param);
+int do_proc_douintvec(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data),
+ void *data);
extern int pwrsw_enabled;
extern int unaligned_enabled;
extern int unaligned_dump_stack;
extern int no_unaligned_warning;
-extern struct ctl_table sysctl_mount_point[];
-extern struct ctl_table random_table[];
-extern struct ctl_table firmware_config_table[];
-extern struct ctl_table epoll_table[];
-
#else /* CONFIG_SYSCTL */
-static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
+
+static inline void register_sysctl_init(const char *path, struct ctl_table *table)
{
- return NULL;
}
-static inline struct ctl_table_header *register_sysctl_paths(
- const struct ctl_path *path, struct ctl_table *table)
+static inline struct ctl_table_header *register_sysctl_mount_point(const char *path)
{
return NULL;
}
-static inline struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table)
+static inline struct ctl_table_header *register_sysctl_sz(const char *path,
+ struct ctl_table *table,
+ size_t table_size)
{
return NULL;
}
@@ -237,6 +281,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
static inline void do_sysctl_args(void)
{
}
+
+static inline bool sysctl_is_alias(char *param)
+{
+ return false;
+}
#endif /* CONFIG_SYSCTL */
int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
new file mode 100644
index 000000000000..c9cb657dad08
--- /dev/null
+++ b/include/linux/sysfb.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_SYSFB_H
+#define _LINUX_SYSFB_H
+
+/*
+ * Generic System Framebuffers on x86
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_data/simplefb.h>
+
+struct screen_info;
+
+enum {
+ M_I17, /* 17-Inch iMac */
+ M_I20, /* 20-Inch iMac */
+ M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
+ M_I24, /* 24-Inch iMac */
+ M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
+ M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
+ M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
+ M_MINI, /* Mac Mini */
+ M_MINI_3_1, /* Mac Mini, 3,1th gen */
+ M_MINI_4_1, /* Mac Mini, 4,1th gen */
+ M_MB, /* MacBook */
+ M_MB_2, /* MacBook, 2nd rev. */
+ M_MB_3, /* MacBook, 3rd rev. */
+ M_MB_5_1, /* MacBook, 5th rev. */
+ M_MB_6_1, /* MacBook, 6th rev. */
+ M_MB_7_1, /* MacBook, 7th rev. */
+ M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
+ M_MBA, /* MacBook Air */
+ M_MBA_3, /* Macbook Air, 3rd rev */
+ M_MBP, /* MacBook Pro */
+ M_MBP_2, /* MacBook Pro 2nd gen */
+ M_MBP_2_2, /* MacBook Pro 2,2nd gen */
+ M_MBP_SR, /* MacBook Pro (Santa Rosa) */
+ M_MBP_4, /* MacBook Pro, 4th gen */
+ M_MBP_5_1, /* MacBook Pro, 5,1th gen */
+ M_MBP_5_2, /* MacBook Pro, 5,2th gen */
+ M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
+ M_MBP_6_1, /* MacBook Pro, 6,1th gen */
+ M_MBP_6_2, /* MacBook Pro, 6,2th gen */
+ M_MBP_7_1, /* MacBook Pro, 7,1th gen */
+ M_MBP_8_2, /* MacBook Pro, 8,2nd gen */
+ M_UNKNOWN /* placeholder */
+};
+
+struct efifb_dmi_info {
+ char *optname;
+ unsigned long base;
+ int stride;
+ int width;
+ int height;
+ int flags;
+};
+
+#ifdef CONFIG_SYSFB
+
+void sysfb_disable(void);
+
+#else /* CONFIG_SYSFB */
+
+static inline void sysfb_disable(void)
+{
+}
+
+#endif /* CONFIG_SYSFB */
+
+#ifdef CONFIG_EFI
+
+extern struct efifb_dmi_info efifb_dmi_list[];
+void sysfb_apply_efi_quirks(void);
+void sysfb_set_efifb_fwnode(struct platform_device *pd);
+
+#else /* CONFIG_EFI */
+
+static inline void sysfb_apply_efi_quirks(void)
+{
+}
+
+static inline void sysfb_set_efifb_fwnode(struct platform_device *pd)
+{
+}
+
+#endif /* CONFIG_EFI */
+
+#ifdef CONFIG_SYSFB_SIMPLEFB
+
+bool sysfb_parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode);
+struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode,
+ struct device *parent);
+
+#else /* CONFIG_SYSFB_SIMPLE */
+
+static inline bool sysfb_parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode)
+{
+ return false;
+}
+
+static inline struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode,
+ struct device *parent)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif /* CONFIG_SYSFB_SIMPLE */
+
+#endif /* _LINUX_SYSFB_H */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 34e84122f635..326341c62385 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -61,22 +61,32 @@ do { \
/**
* struct attribute_group - data structure used to declare an attribute group.
* @name: Optional: Attribute group name
- * If specified, the attribute group will be created in
- * a new subdirectory with this name.
+ * If specified, the attribute group will be created in a
+ * new subdirectory with this name. Additionally when a
+ * group is named, @is_visible and @is_bin_visible may
+ * return SYSFS_GROUP_INVISIBLE to control visibility of
+ * the directory itself.
* @is_visible: Optional: Function to return permissions associated with an
- * attribute of the group. Will be called repeatedly for each
- * non-binary attribute in the group. Only read/write
+ * attribute of the group. Will be called repeatedly for
+ * each non-binary attribute in the group. Only read/write
* permissions as well as SYSFS_PREALLOC are accepted. Must
- * return 0 if an attribute is not visible. The returned value
- * will replace static permissions defined in struct attribute.
+ * return 0 if an attribute is not visible. The returned
+ * value will replace static permissions defined in struct
+ * attribute. Use SYSFS_GROUP_VISIBLE() when assigning this
+ * callback to specify separate _group_visible() and
+ * _attr_visible() handlers.
* @is_bin_visible:
* Optional: Function to return permissions associated with a
* binary attribute of the group. Will be called repeatedly
* for each binary attribute in the group. Only read/write
- * permissions as well as SYSFS_PREALLOC are accepted. Must
- * return 0 if a binary attribute is not visible. The returned
- * value will replace static permissions defined in
- * struct bin_attribute.
+ * permissions as well as SYSFS_PREALLOC (and the
+ * visibility flags for named groups) are accepted. Must
+ * return 0 if a binary attribute is not visible. The
+ * returned value will replace static permissions defined
+ * in struct bin_attribute. If @is_visible is not set, Use
+ * SYSFS_GROUP_VISIBLE() when assigning this callback to
+ * specify separate _group_visible() and _attr_visible()
+ * handlers.
* @attrs: Pointer to NULL terminated list of attributes.
* @bin_attrs: Pointer to NULL terminated list of binary attributes.
* Either attrs or bin_attrs or both must be provided.
@@ -91,13 +101,121 @@ struct attribute_group {
struct bin_attribute **bin_attrs;
};
+#define SYSFS_PREALLOC 010000
+#define SYSFS_GROUP_INVISIBLE 020000
+
+/*
+ * DEFINE_SYSFS_GROUP_VISIBLE(name):
+ * A helper macro to pair with the assignment of ".is_visible =
+ * SYSFS_GROUP_VISIBLE(name)", that arranges for the directory
+ * associated with a named attribute_group to optionally be hidden.
+ * This allows for static declaration of attribute_groups, and the
+ * simplification of attribute visibility lifetime that implies,
+ * without polluting sysfs with empty attribute directories.
+ * Ex.
+ *
+ * static umode_t example_attr_visible(struct kobject *kobj,
+ * struct attribute *attr, int n)
+ * {
+ * if (example_attr_condition)
+ * return 0;
+ * else if (ro_attr_condition)
+ * return 0444;
+ * return a->mode;
+ * }
+ *
+ * static bool example_group_visible(struct kobject *kobj)
+ * {
+ * if (example_group_condition)
+ * return false;
+ * return true;
+ * }
+ *
+ * DEFINE_SYSFS_GROUP_VISIBLE(example);
+ *
+ * static struct attribute_group example_group = {
+ * .name = "example",
+ * .is_visible = SYSFS_GROUP_VISIBLE(example),
+ * .attrs = &example_attrs,
+ * };
+ *
+ * Note that it expects <name>_attr_visible and <name>_group_visible to
+ * be defined. For cases where individual attributes do not need
+ * separate visibility consideration, only entire group visibility at
+ * once, see DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE().
+ */
+#define DEFINE_SYSFS_GROUP_VISIBLE(name) \
+ static inline umode_t sysfs_group_visible_##name( \
+ struct kobject *kobj, struct attribute *attr, int n) \
+ { \
+ if (n == 0 && !name##_group_visible(kobj)) \
+ return SYSFS_GROUP_INVISIBLE; \
+ return name##_attr_visible(kobj, attr, n); \
+ }
+
+/*
+ * DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(name):
+ * A helper macro to pair with SYSFS_GROUP_VISIBLE() that like
+ * DEFINE_SYSFS_GROUP_VISIBLE() controls group visibility, but does
+ * not require the implementation of a per-attribute visibility
+ * callback.
+ * Ex.
+ *
+ * static bool example_group_visible(struct kobject *kobj)
+ * {
+ * if (example_group_condition)
+ * return false;
+ * return true;
+ * }
+ *
+ * DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(example);
+ *
+ * static struct attribute_group example_group = {
+ * .name = "example",
+ * .is_visible = SYSFS_GROUP_VISIBLE(example),
+ * .attrs = &example_attrs,
+ * };
+ */
+#define DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(name) \
+ static inline umode_t sysfs_group_visible_##name( \
+ struct kobject *kobj, struct attribute *a, int n) \
+ { \
+ if (n == 0 && !name##_group_visible(kobj)) \
+ return SYSFS_GROUP_INVISIBLE; \
+ return a->mode; \
+ }
+
+/*
+ * Same as DEFINE_SYSFS_GROUP_VISIBLE, but for groups with only binary
+ * attributes. If an attribute_group defines both text and binary
+ * attributes, the group visibility is determined by the function
+ * specified to is_visible() not is_bin_visible()
+ */
+#define DEFINE_SYSFS_BIN_GROUP_VISIBLE(name) \
+ static inline umode_t sysfs_group_visible_##name( \
+ struct kobject *kobj, struct bin_attribute *attr, int n) \
+ { \
+ if (n == 0 && !name##_group_visible(kobj)) \
+ return SYSFS_GROUP_INVISIBLE; \
+ return name##_attr_visible(kobj, attr, n); \
+ }
+
+#define DEFINE_SIMPLE_SYSFS_BIN_GROUP_VISIBLE(name) \
+ static inline umode_t sysfs_group_visible_##name( \
+ struct kobject *kobj, struct bin_attribute *a, int n) \
+ { \
+ if (n == 0 && !name##_group_visible(kobj)) \
+ return SYSFS_GROUP_INVISIBLE; \
+ return a->mode; \
+ }
+
+#define SYSFS_GROUP_VISIBLE(fn) sysfs_group_visible_##fn
+
/*
* Use these macros to make defining attributes easier.
* See include/linux/device.h for examples..
*/
-#define SYSFS_PREALLOC 010000
-
#define __ATTR(_name, _mode, _show, _store) { \
.attr = {.name = __stringify(_name), \
.mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
@@ -162,17 +280,27 @@ static const struct attribute_group _name##_group = { \
}; \
__ATTRIBUTE_GROUPS(_name)
+#define BIN_ATTRIBUTE_GROUPS(_name) \
+static const struct attribute_group _name##_group = { \
+ .bin_attrs = _name##_attrs, \
+}; \
+__ATTRIBUTE_GROUPS(_name)
+
struct file;
struct vm_area_struct;
+struct address_space;
struct bin_attribute {
struct attribute attr;
size_t size;
void *private;
+ struct address_space *(*f_mapping)(void);
ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
+ loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *,
+ loff_t, int);
int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
struct vm_area_struct *vma);
};
@@ -227,6 +355,22 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size)
#define BIN_ATTR_RW(_name, _size) \
struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
+
+#define __BIN_ATTR_ADMIN_RO(_name, _size) { \
+ .attr = { .name = __stringify(_name), .mode = 0400 }, \
+ .read = _name##_read, \
+ .size = _size, \
+}
+
+#define __BIN_ATTR_ADMIN_RW(_name, _size) \
+ __BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size)
+
+#define BIN_ATTR_ADMIN_RO(_name, _size) \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size)
+
+#define BIN_ATTR_ADMIN_RW(_name, _size) \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size)
+
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *, char *);
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
@@ -329,6 +473,10 @@ int sysfs_groups_change_owner(struct kobject *kobj,
int sysfs_group_change_owner(struct kobject *kobj,
const struct attribute_group *groups, kuid_t kuid,
kgid_t kgid);
+__printf(2, 3)
+int sysfs_emit(char *buf, const char *fmt, ...);
+__printf(3, 4)
+int sysfs_emit_at(char *buf, int at, const char *fmt, ...);
#else /* CONFIG_SYSFS */
@@ -576,6 +724,17 @@ static inline int sysfs_group_change_owner(struct kobject *kobj,
return 0;
}
+__printf(2, 3)
+static inline int sysfs_emit(char *buf, const char *fmt, ...)
+{
+ return 0;
+}
+
+__printf(3, 4)
+static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
+{
+ return 0;
+}
#endif /* CONFIG_SYSFS */
static inline int __must_check sysfs_create_file(struct kobject *kobj,
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 86af908e2663..955f80e34d4f 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -8,6 +8,8 @@
#ifndef _LINUX_SYSLOG_H
#define _LINUX_SYSLOG_H
+#include <linux/wait.h>
+
/* Close the log. Currently a NOP. */
#define SYSLOG_ACTION_CLOSE 0
/* Open the log. Currently a NOP. */
@@ -35,5 +37,6 @@
#define SYSLOG_FROM_PROC 1
int do_syslog(int type, char __user *buf, int count, int source);
+extern wait_queue_head_t log_wait;
#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 3a582ec7a2f1..bdca467ebb77 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -30,7 +30,7 @@
#define SYSRQ_ENABLE_RTNICE 0x0100
struct sysrq_key_op {
- void (* const handler)(int);
+ void (* const handler)(u8);
const char * const help_msg;
const char * const action_msg;
const int enable_mask;
@@ -43,10 +43,10 @@ struct sysrq_key_op {
* are available -- else NULL's).
*/
-void handle_sysrq(int key);
-void __handle_sysrq(int key, bool check_mask);
-int register_sysrq_key(int key, const struct sysrq_key_op *op);
-int unregister_sysrq_key(int key, const struct sysrq_key_op *op);
+void handle_sysrq(u8 key);
+void __handle_sysrq(u8 key, bool check_mask);
+int register_sysrq_key(u8 key, const struct sysrq_key_op *op);
+int unregister_sysrq_key(u8 key, const struct sysrq_key_op *op);
extern const struct sysrq_key_op *__sysrq_reboot_op;
int sysrq_toggle_support(int enable_mask);
@@ -54,20 +54,20 @@ int sysrq_mask(void);
#else
-static inline void handle_sysrq(int key)
+static inline void handle_sysrq(u8 key)
{
}
-static inline void __handle_sysrq(int key, bool check_mask)
+static inline void __handle_sysrq(u8 key, bool check_mask)
{
}
-static inline int register_sysrq_key(int key, const struct sysrq_key_op *op)
+static inline int register_sysrq_key(u8 key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
-static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op)
+static inline int unregister_sysrq_key(u8 key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 96305a64a5a7..248f4ac95642 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -3,7 +3,7 @@
#define _LINUX_T10_PI_H
#include <linux/types.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
/*
* A T10 PI-capable target device can be formatted with different
@@ -53,4 +53,33 @@ extern const struct blk_integrity_profile t10_pi_type1_ip;
extern const struct blk_integrity_profile t10_pi_type3_crc;
extern const struct blk_integrity_profile t10_pi_type3_ip;
+struct crc64_pi_tuple {
+ __be64 guard_tag;
+ __be16 app_tag;
+ __u8 ref_tag[6];
+};
+
+/**
+ * lower_48_bits() - return bits 0-47 of a number
+ * @n: the number we're accessing
+ */
+static inline u64 lower_48_bits(u64 n)
+{
+ return n & ((1ull << 48) - 1);
+}
+
+static inline u64 ext_pi_ref_tag(struct request *rq)
+{
+ unsigned int shift = ilog2(queue_logical_block_size(rq->q));
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (rq->q->integrity.interval_exp)
+ shift = rq->q->integrity.interval_exp;
+#endif
+ return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT));
+}
+
+extern const struct blk_integrity_profile ext_pi_type1_crc64;
+extern const struct blk_integrity_profile ext_pi_type3_crc64;
+
#endif
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index 0fb93aafa478..795ef5a68429 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -13,10 +13,23 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
twork->func = func;
}
-#define TWA_RESUME 1
-#define TWA_SIGNAL 2
-int task_work_add(struct task_struct *task, struct callback_head *twork, int);
+enum task_work_notify_mode {
+ TWA_NONE,
+ TWA_RESUME,
+ TWA_SIGNAL,
+ TWA_SIGNAL_NO_IPI,
+};
+static inline bool task_work_pending(struct task_struct *task)
+{
+ return READ_ONCE(task->task_works);
+}
+
+int task_work_add(struct task_struct *task, struct callback_head *twork,
+ enum task_work_notify_mode mode);
+
+struct callback_head *task_work_cancel_match(struct task_struct *task,
+ bool (*match)(struct callback_head *, void *data), void *data);
struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
void task_work_run(void);
diff --git a/include/linux/tboot.h b/include/linux/tboot.h
index 5146d2574e85..d2279160ef39 100644
--- a/include/linux/tboot.h
+++ b/include/linux/tboot.h
@@ -126,7 +126,6 @@ extern void tboot_probe(void);
extern void tboot_shutdown(u32 shutdown_type);
extern struct acpi_table_header *tboot_get_dmar_table(
struct acpi_table_header *dmar_tbl);
-extern int tboot_force_iommu(void);
#else
@@ -136,7 +135,6 @@ extern int tboot_force_iommu(void);
#define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \
do { } while (0)
#define tboot_get_dmar_table(dmar_tbl) (dmar_tbl)
-#define tboot_force_iommu() 0
#endif /* !CONFIG_INTEL_TXT */
diff --git a/include/linux/tc.h b/include/linux/tc.h
index a60639f37963..1638660abf5e 100644
--- a/include/linux/tc.h
+++ b/include/linux/tc.h
@@ -120,7 +120,7 @@ static inline unsigned long tc_get_speed(struct tc_bus *tbus)
#ifdef CONFIG_TC
-extern struct bus_type tc_bus_type;
+extern const struct bus_type tc_bus_type;
extern int tc_register_driver(struct tc_driver *tdrv);
extern void tc_unregister_driver(struct tc_driver *tdrv);
diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
index b0d36a9934cc..5cf6f6f82aa7 100644
--- a/include/linux/tca6416_keypad.h
+++ b/include/linux/tca6416_keypad.h
@@ -25,7 +25,6 @@ struct tca6416_keys_platform_data {
unsigned int rep:1; /* enable input subsystem auto repeat */
uint16_t pinmask;
uint16_t invert;
- int irq_is_gpio;
int use_polling; /* use polling if Interrupt is not connected*/
};
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 14b62d7df942..55399ee2a57e 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -46,6 +46,36 @@ static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
return inner_tcp_hdr(skb)->doff * 4;
}
+/**
+ * skb_tcp_all_headers - Returns size of all headers for a TCP packet
+ * @skb: buffer
+ *
+ * Used in TX path, for a packet known to be a TCP one.
+ *
+ * if (skb_is_gso(skb)) {
+ * int hlen = skb_tcp_all_headers(skb);
+ * ...
+ */
+static inline int skb_tcp_all_headers(const struct sk_buff *skb)
+{
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
+}
+
+/**
+ * skb_inner_tcp_all_headers - Returns size of all headers for an encap TCP packet
+ * @skb: buffer
+ *
+ * Used in TX path, for a packet known to be a TCP one.
+ *
+ * if (skb_is_gso(skb) && skb->encapsulation) {
+ * int hlen = skb_inner_tcp_all_headers(skb);
+ * ...
+ */
+static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb)
+{
+ return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+}
+
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
{
return (tcp_hdr(skb)->doff - 5) * 4;
@@ -92,6 +122,8 @@ struct tcp_options_received {
smc_ok : 1, /* SMC seen on SYN packet */
snd_wscale : 4, /* Window scaling received from sender */
rcv_wscale : 4; /* Window scaling to send to receiver */
+ u8 saw_unknown:1, /* Received unknown option */
+ unused:7;
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
@@ -120,6 +152,7 @@ struct tcp_request_sock {
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
bool is_mptcp;
+ bool req_usec_ts;
#if IS_ENABLED(CONFIG_MPTCP)
bool drop_req;
#endif
@@ -132,6 +165,12 @@ struct tcp_request_sock {
* FastOpen it's the seq#
* after data-in-SYN.
*/
+ u8 syn_tos;
+#ifdef CONFIG_TCP_AO
+ u8 ao_keyid;
+ u8 ao_rcv_next;
+ bool used_tcp_ao;
+#endif
};
static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -139,24 +178,135 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
return (struct tcp_request_sock *)req;
}
+static inline bool tcp_rsk_used_ao(const struct request_sock *req)
+{
+#ifndef CONFIG_TCP_AO
+ return false;
+#else
+ return tcp_rsk(req)->used_tcp_ao;
+#endif
+}
+
+#define TCP_RMEM_TO_WIN_SCALE 8
+
struct tcp_sock {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/tcp_sock.rst.
+ * Please update the document when adding new fields.
+ */
+
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
- u16 tcp_header_len; /* Bytes of tcp header to send */
+
+ /* TX read-mostly hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_read_tx);
+ /* timestamp of last sent data packet (for restart window) */
+ u32 max_window; /* Maximal window ever seen from peer */
+ u32 rcv_ssthresh; /* Current window clamp */
+ u32 reordering; /* Packet reordering metric. */
+ u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
u16 gso_segs; /* Max number of segs per GSO packet */
+ /* from STCP, retrans queue hinting */
+ struct sk_buff *lost_skb_hint;
+ struct sk_buff *retransmit_skb_hint;
+ __cacheline_group_end(tcp_sock_read_tx);
+
+ /* TXRX read-mostly hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_read_txrx);
+ u32 tsoffset; /* timestamp offset */
+ u32 snd_wnd; /* The window we expect to receive */
+ u32 mss_cache; /* Cached effective mss, not including SACKS */
+ u32 snd_cwnd; /* Sending congestion window */
+ u32 prr_out; /* Total number of pkts sent during Recovery. */
+ u32 lost_out; /* Lost packets */
+ u32 sacked_out; /* SACK'd packets */
+ u16 tcp_header_len; /* Bytes of tcp header to send */
+ u8 scaling_ratio; /* see tcp_win_from_space() */
+ u8 chrono_type : 2, /* current chronograph type */
+ repair : 1,
+ tcp_usec_ts : 1, /* TSval values in usec */
+ is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
+ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
+ __cacheline_group_end(tcp_sock_read_txrx);
+
+ /* RX read-mostly hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_read_rx);
+ u32 copied_seq; /* Head of yet unread data */
+ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
+ u32 snd_wl1; /* Sequence for window update */
+ u32 tlp_high_seq; /* snd_nxt at the time of TLP */
+ u32 rttvar_us; /* smoothed mdev_max */
+ u32 retrans_out; /* Retransmitted packets out */
+ u16 advmss; /* Advertised MSS */
+ u16 urg_data; /* Saved octet of OOB data and control flags */
+ u32 lost; /* Total data packets lost incl. rexmits */
+ struct minmax rtt_min;
+ /* OOO segments go in this rbtree. Socket lock must be held. */
+ struct rb_root out_of_order_queue;
+ u32 snd_ssthresh; /* Slow start size threshold */
+ __cacheline_group_end(tcp_sock_read_rx);
+
+ /* TX read-write hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_write_tx) ____cacheline_aligned;
+ u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
+ u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
+ * total number of data bytes sent.
+ */
+ u32 snd_sml; /* Last byte of the most recently transmitted small packet */
+ u32 chrono_start; /* Start time in jiffies of a TCP chrono */
+ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
+ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
+ u32 pushed_seq; /* Last pushed seq, required to talk to windows */
+ u32 lsndtime;
+ u32 mdev_us; /* medium deviation */
+ u32 rtt_seq; /* sequence number to update rttvar */
+ u64 tcp_wstamp_ns; /* departure time for next sent data packet */
+ u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
+ u64 tcp_mstamp; /* most recent packet received/sent */
+ struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
+ struct sk_buff *highest_sack; /* skb just after the highest
+ * skb with SACKed bit set
+ * (validity guaranteed only if
+ * sacked_out > 0)
+ */
+ u8 ecn_flags; /* ECN status bits. */
+ __cacheline_group_end(tcp_sock_write_tx);
+ /* TXRX read-write hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_write_txrx);
/*
* Header prediction flags
* 0x5?10 << 16 + snd_wnd in net byte order
*/
__be32 pred_flags;
-
+ u32 rcv_nxt; /* What we want to receive next */
+ u32 snd_nxt; /* Next sequence we send */
+ u32 snd_una; /* First byte we want an ack for */
+ u32 window_clamp; /* Maximal window to advertise */
+ u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ u32 packets_out; /* Packets which are "in flight" */
+ u32 snd_up; /* Urgent pointer */
+ u32 delivered; /* Total data packets delivered incl. rexmits */
+ u32 delivered_ce; /* Like the above but only ECE marked packets */
+ u32 app_limited; /* limited until "delivered" reaches this val */
+ u32 rcv_wnd; /* Current receiver window */
/*
- * RFC793 variables by their proper names. This means you can
- * read the code and the spec side by side (and laugh ...)
- * See RFC793 and RFC1122. The RFC writes these in capitals.
+ * Options received (usually on last packet, some only on SYN packets).
*/
- u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ struct tcp_options_received rx_opt;
+ u8 nonagle : 4,/* Disable Nagle algorithm? */
+ rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
+ __cacheline_group_end(tcp_sock_write_txrx);
+
+ /* RX read-write hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_write_rx) __aligned(8);
+ u64 bytes_received;
+ /* RFC4898 tcpEStatsAppHCThruOctetsReceived
* sum(delta(rcv_nxt)), or how many bytes
* were acked.
*/
@@ -166,45 +316,42 @@ struct tcp_sock {
u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
* total number of data segments in.
*/
- u32 rcv_nxt; /* What we want to receive next */
- u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
- u32 snd_nxt; /* Next sequence we send */
- u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
- * The total number of segments sent.
- */
- u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
- * total number of data segments sent.
- */
- u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
- * total number of data bytes sent.
- */
+ u32 max_packets_out; /* max packets_out in last window */
+ u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
+ u32 rate_delivered; /* saved rate sample: packets delivered */
+ u32 rate_interval_us; /* saved rate sample: time elapsed */
+ u32 rcv_rtt_last_tsecr;
+ u64 first_tx_mstamp; /* start of window send phase */
+ u64 delivered_mstamp; /* time we reached "delivered" */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
+ struct {
+ u32 rtt_us;
+ u32 seq;
+ u64 time;
+ } rcv_rtt_est;
+/* Receiver queue space */
+ struct {
+ u32 space;
+ u32 seq;
+ u64 time;
+ } rcvq_space;
+ __cacheline_group_end(tcp_sock_write_rx);
+ /* End of Hot Path */
+
+/*
+ * RFC793 variables by their proper names. This means you can
+ * read the code and the spec side by side (and laugh ...)
+ * See RFC793 and RFC1122. The RFC writes these in capitals.
+ */
u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
* total number of DSACK blocks received
*/
- u32 snd_una; /* First byte we want an ack for */
- u32 snd_sml; /* Last byte of the most recently transmitted small packet */
- u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
- u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
- u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
u32 compressed_ack_rcv_nxt;
-
- u32 tsoffset; /* timestamp offset */
-
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
- struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
-
- u32 snd_wl1; /* Sequence for window update */
- u32 snd_wnd; /* The window we expect to receive */
- u32 max_window; /* Maximal window ever seen from peer */
- u32 mss_cache; /* Cached effective mss, not including SACKS */
-
- u32 window_clamp; /* Maximal window to advertise */
- u32 rcv_ssthresh; /* Current window clamp */
/* Information of the most recently (s)acked skb */
struct tcp_rack {
@@ -218,70 +365,35 @@ struct tcp_sock {
dsack_seen:1, /* Whether DSACK seen after last adj */
advanced:1; /* mstamp advanced since last lost marking */
} rack;
- u16 advmss; /* Advertised MSS */
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1, /* TLP is a retransmission */
unused:5;
- u32 chrono_start; /* Start time in jiffies of a TCP chrono */
- u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
- u8 chrono_type:2, /* current chronograph type */
- rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ u8 thin_lto : 1,/* Use linear timeouts for thin streams */
+ recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
- is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
- fastopen_client_fail:2; /* reason why fastopen failed */
- u8 nonagle : 4,/* Disable Nagle algorithm? */
- thin_lto : 1,/* Use linear timeouts for thin streams */
- recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
- repair : 1,
+ fastopen_client_fail:2, /* reason why fastopen failed */
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
- u8 syn_data:1, /* SYN includes data */
+ u8 save_syn:2, /* Save headers of SYN packet */
+ syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
- syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
- save_syn:1, /* Save headers of SYN packet */
- is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
- syn_smc:1; /* SYN includes SMC */
- u32 tlp_high_seq; /* snd_nxt at the time of TLP */
+ syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
+ u8 keepalive_probes; /* num of allowed keep alive probes */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
- u64 tcp_wstamp_ns; /* departure time for next sent data packet */
- u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
/* RTT measurement */
- u64 tcp_mstamp; /* most recent packet received/sent */
- u32 srtt_us; /* smoothed round trip time << 3 in usecs */
- u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
- u32 rttvar_us; /* smoothed mdev_max */
- u32 rtt_seq; /* sequence number to update rttvar */
- struct minmax rtt_min;
-
- u32 packets_out; /* Packets which are "in flight" */
- u32 retrans_out; /* Retransmitted packets out */
- u32 max_packets_out; /* max packets_out in last window */
- u32 max_packets_seq; /* right edge of max_packets_out flight */
- u16 urg_data; /* Saved octet of OOB data and control flags */
- u8 ecn_flags; /* ECN status bits. */
- u8 keepalive_probes; /* num of allowed keep alive probes */
- u32 reordering; /* Packet reordering metric. */
u32 reord_seen; /* number of data packet reordering events */
- u32 snd_up; /* Urgent pointer */
-
-/*
- * Options received (usually on last packet, some only on SYN packets).
- */
- struct tcp_options_received rx_opt;
/*
* Slow start and congestion control (see also Nagle, and Karn & Partridge)
*/
- u32 snd_ssthresh; /* Slow start size threshold */
- u32 snd_cwnd; /* Sending congestion window */
u32 snd_cwnd_cnt; /* Linear increase counter */
u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
u32 snd_cwnd_used;
@@ -289,32 +401,11 @@ struct tcp_sock {
u32 prior_cwnd; /* cwnd right before starting loss recovery */
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
- u32 prr_out; /* Total number of pkts sent during Recovery. */
- u32 delivered; /* Total data packets delivered incl. rexmits */
- u32 delivered_ce; /* Like the above but only ECE marked packets */
- u32 lost; /* Total data packets lost incl. rexmits */
- u32 app_limited; /* limited until "delivered" reaches this val */
- u64 first_tx_mstamp; /* start of window send phase */
- u64 delivered_mstamp; /* time we reached "delivered" */
- u32 rate_delivered; /* saved rate sample: packets delivered */
- u32 rate_interval_us; /* saved rate sample: time elapsed */
-
- u32 rcv_wnd; /* Current receiver window */
- u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
- u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
- u32 pushed_seq; /* Last pushed seq, required to talk to windows */
- u32 lost_out; /* Lost packets */
- u32 sacked_out; /* SACK'd packets */
+ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
struct hrtimer pacing_timer;
struct hrtimer compressed_ack_timer;
- /* from STCP, retrans queue hinting */
- struct sk_buff* lost_skb_hint;
- struct sk_buff *retransmit_skb_hint;
-
- /* OOO segments go in this rbtree. Socket lock must be held. */
- struct rb_root out_of_order_queue;
struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
@@ -323,12 +414,6 @@ struct tcp_sock {
struct tcp_sack_block recv_sack_cache[4];
- struct sk_buff *highest_sack; /* skb just after the highest
- * skb with SACKed bit set
- * (validity guaranteed only if
- * sacked_out > 0)
- */
-
int lost_cnt_hint;
u32 prior_ssthresh; /* ssthresh saved at recovery start */
@@ -343,6 +428,14 @@ struct tcp_sock {
* Total data bytes retransmitted
*/
u32 total_retrans; /* Total retransmits for entire connection */
+ u32 rto_stamp; /* Start time (ms) of last CA_Loss recovery */
+ u16 total_rto; /* Total number of RTO timeouts, including
+ * SYN/SYN-ACK and recurring timeouts.
+ */
+ u16 total_rto_recoveries; /* Total number of RTO recoveries,
+ * including any unfinished recovery.
+ */
+ u32 total_rto_time; /* ms spent in (completed) RTO recoveries. */
u32 urg_seq; /* Seq of received urgent pointer */
unsigned int keepalive_time; /* time before keep alive takes place */
@@ -356,6 +449,12 @@ struct tcp_sock {
u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs
* values defined in uapi/linux/tcp.h
*/
+ u8 bpf_chg_cc_inprogress:1; /* In the middle of
+ * bpf_setsockopt(TCP_CONGESTION),
+ * it is to avoid the bpf_tcp_cc->init()
+ * to recur itself by calling
+ * bpf_setsockopt(TCP_CONGESTION, "itself").
+ */
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG)
#else
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
@@ -365,40 +464,35 @@ struct tcp_sock {
u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */
-/* Receiver side RTT estimation */
- u32 rcv_rtt_last_tsecr;
- struct {
- u32 rtt_us;
- u32 seq;
- u64 time;
- } rcv_rtt_est;
-
-/* Receiver queue space */
- struct {
- u32 space;
- u32 seq;
- u64 time;
- } rcvq_space;
-
/* TCP-specific MTU probe information. */
struct {
u32 probe_seq_start;
u32 probe_seq_end;
} mtu_probe;
+ u32 plb_rehash; /* PLB-triggered rehash attempts */
u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
* while socket was owned by user.
*/
#if IS_ENABLED(CONFIG_MPTCP)
bool is_mptcp;
#endif
+#if IS_ENABLED(CONFIG_SMC)
+ bool syn_smc; /* SYN includes SMC */
+ bool (*smc_hs_congested)(const struct sock *sk);
+#endif
-#ifdef CONFIG_TCP_MD5SIG
-/* TCP AF-Specific parts; only used by MD5 Signature support so far */
+#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
+/* TCP AF-Specific parts; only used by TCP-AO/MD5 Signature support so far */
const struct tcp_sock_af_ops *af_specific;
+#ifdef CONFIG_TCP_MD5SIG
/* TCP MD5 Signature Option information */
struct tcp_md5sig_info __rcu *md5sig_info;
#endif
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info __rcu *ao_info;
+#endif
+#endif
/* TCP fastopen related information */
struct tcp_fastopen_request *fastopen_req;
@@ -406,7 +500,7 @@ struct tcp_sock {
* socket. Used to retransmit SYNACKs etc.
*/
struct request_sock __rcu *fastopen_rsk;
- u32 *saved_syn;
+ struct saved_syn *saved_syn;
};
enum tsq_enum {
@@ -418,21 +512,25 @@ enum tsq_enum {
TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call
* tcp_v{4|6}_mtu_reduced()
*/
+ TCP_ACK_DEFERRED, /* TX pure ack is deferred */
};
enum tsq_flags {
- TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
- TSQF_QUEUED = (1UL << TSQ_QUEUED),
- TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
- TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
- TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
- TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
+ TSQF_THROTTLED = BIT(TSQ_THROTTLED),
+ TSQF_QUEUED = BIT(TSQ_QUEUED),
+ TCPF_TSQ_DEFERRED = BIT(TCP_TSQ_DEFERRED),
+ TCPF_WRITE_TIMER_DEFERRED = BIT(TCP_WRITE_TIMER_DEFERRED),
+ TCPF_DELACK_TIMER_DEFERRED = BIT(TCP_DELACK_TIMER_DEFERRED),
+ TCPF_MTU_REDUCED_DEFERRED = BIT(TCP_MTU_REDUCED_DEFERRED),
+ TCPF_ACK_DEFERRED = BIT(TCP_ACK_DEFERRED),
};
-static inline struct tcp_sock *tcp_sk(const struct sock *sk)
-{
- return (struct tcp_sock *)sk;
-}
+#define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
+
+/* Variant of tcp_sk() upgrading a const sock to a read/write tcp socket.
+ * Used in context of (lockless) tcp listeners.
+ */
+#define tcp_sk_rw(ptr) container_of(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
struct tcp_timewait_sock {
struct inet_timewait_sock tw_sk;
@@ -450,6 +548,9 @@ struct tcp_timewait_sock {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
#endif
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info __rcu *ao_info;
+#endif
};
static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
@@ -468,7 +569,7 @@ static inline void fastopen_queue_tune(struct sock *sk, int backlog)
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
- queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
+ WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn));
}
static inline void tcp_move_syn(struct tcp_sock *tp,
@@ -484,8 +585,15 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
tp->saved_syn = NULL;
}
+static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn)
+{
+ return saved_syn->mac_hdrlen + saved_syn->network_hdrlen +
+ saved_syn->tcp_hdrlen;
+}
+
struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
- const struct sk_buff *orig_skb);
+ const struct sk_buff *orig_skb,
+ const struct sk_buff *ack_skb);
static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
{
@@ -500,14 +608,21 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
int shiftlen);
+void __tcp_sock_set_cork(struct sock *sk, bool on);
void tcp_sock_set_cork(struct sock *sk, bool on);
int tcp_sock_set_keepcnt(struct sock *sk, int val);
int tcp_sock_set_keepidle_locked(struct sock *sk, int val);
int tcp_sock_set_keepidle(struct sock *sk, int val);
int tcp_sock_set_keepintvl(struct sock *sk, int val);
+void __tcp_sock_set_nodelay(struct sock *sk, bool on);
void tcp_sock_set_nodelay(struct sock *sk);
void tcp_sock_set_quickack(struct sock *sk, int val);
int tcp_sock_set_syncnt(struct sock *sk, int val);
-void tcp_sock_set_user_timeout(struct sock *sk, u32 val);
+int tcp_sock_set_user_timeout(struct sock *sk, int val);
+
+static inline bool dst_tcp_usec_ts(const struct dst_entry *dst)
+{
+ return dst_feature(dst, RTAX_FEATURE_TCP_USEC_TS);
+}
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index d074302989dd..71632e3c5f18 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2016, Linaro Limited
+ * Copyright (c) 2015-2022 Linaro Limited
*/
#ifndef __TEE_DRV_H
@@ -20,13 +20,11 @@
* specific TEE driver.
*/
-#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */
-#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */
-#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */
-#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
-#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
-#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
-#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */
+#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */
+ /* in secure world */
+#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
+#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
+#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
struct device;
struct tee_device;
@@ -47,6 +45,8 @@ struct tee_shm_pool;
* and just return with an error code. It is needed for requests
* that arises from TEE based kernel drivers that should be
* non-blocking in nature.
+ * @cap_memref_null: flag indicating if the TEE Client support shared
+ * memory buffer with a NULL pointer.
*/
struct tee_context {
struct tee_device *teedev;
@@ -54,6 +54,7 @@ struct tee_context {
struct kref refcount;
bool releasing;
bool supp_nowait;
+ bool cap_memref_null;
};
struct tee_param_memref {
@@ -83,9 +84,10 @@ struct tee_param {
* @release: release this open file
* @open_session: open a new session
* @close_session: close a session
+ * @system_session: declare session as a system session
* @invoke_func: invoke a trusted function
* @cancel_req: request cancel of an ongoing invoke or open
- * @supp_revc: called for supplicant to get a command
+ * @supp_recv: called for supplicant to get a command
* @supp_send: called for supplicant to send a response
* @shm_register: register shared memory buffer in TEE
* @shm_unregister: unregister shared memory buffer in TEE
@@ -99,6 +101,7 @@ struct tee_driver_ops {
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param);
int (*close_session)(struct tee_context *ctx, u32 session);
+ int (*system_session)(struct tee_context *ctx, u32 session);
int (*invoke_func)(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
@@ -191,9 +194,13 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
* @offset: offset of buffer in user space
* @pages: locked pages from userspace
* @num_pages: number of locked pages
- * @dmabuf: dmabuf used to for exporting to user space
+ * @refcount: reference counter
* @flags: defined by TEE_SHM_* in tee_drv.h
- * @id: unique id of a shared memory object on this device
+ * @id: unique id of a shared memory object on this device, shared
+ * with user space
+ * @sec_world_id:
+ * secure world assigned id of this shared memory object, not
+ * used by all drivers
*
* This pool is only supposed to be accessed directly from the TEE
* subsystem and from drivers that implements their own shm pool manager.
@@ -206,98 +213,46 @@ struct tee_shm {
unsigned int offset;
struct page **pages;
size_t num_pages;
- struct dma_buf *dmabuf;
+ refcount_t refcount;
u32 flags;
int id;
+ u64 sec_world_id;
};
/**
- * struct tee_shm_pool_mgr - shared memory manager
+ * struct tee_shm_pool - shared memory pool
* @ops: operations
* @private_data: private data for the shared memory manager
*/
-struct tee_shm_pool_mgr {
- const struct tee_shm_pool_mgr_ops *ops;
+struct tee_shm_pool {
+ const struct tee_shm_pool_ops *ops;
void *private_data;
};
/**
- * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
+ * struct tee_shm_pool_ops - shared memory pool operations
* @alloc: called when allocating shared memory
* @free: called when freeing shared memory
- * @destroy_poolmgr: called when destroying the pool manager
+ * @destroy_pool: called when destroying the pool
*/
-struct tee_shm_pool_mgr_ops {
- int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
- size_t size);
- void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
- void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr);
+struct tee_shm_pool_ops {
+ int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align);
+ void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm);
+ void (*destroy_pool)(struct tee_shm_pool *pool);
};
-/**
- * tee_shm_pool_alloc() - Create a shared memory pool from shm managers
- * @priv_mgr: manager for driver private shared memory allocations
- * @dmabuf_mgr: manager for dma-buf shared memory allocations
- *
- * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
- * in @dmabuf, others will use the range provided by @priv.
- *
- * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
- */
-struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
- struct tee_shm_pool_mgr *dmabuf_mgr);
-
/*
- * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved
- * memory
+ * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory
* @vaddr: Virtual address of start of pool
* @paddr: Physical address of start of pool
* @size: Size in bytes of the pool
*
- * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure.
- */
-struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
- phys_addr_t paddr,
- size_t size,
- int min_alloc_order);
-
-/**
- * tee_shm_pool_mgr_destroy() - Free a shared memory manager
- */
-static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm)
-{
- poolm->ops->destroy_poolmgr(poolm);
-}
-
-/**
- * struct tee_shm_pool_mem_info - holds information needed to create a shared
- * memory pool
- * @vaddr: Virtual address of start of pool
- * @paddr: Physical address of start of pool
- * @size: Size in bytes of the pool
- */
-struct tee_shm_pool_mem_info {
- unsigned long vaddr;
- phys_addr_t paddr;
- size_t size;
-};
-
-/**
- * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
- * memory range
- * @priv_info: Information for driver private shared memory pool
- * @dmabuf_info: Information for dma-buf shared memory pool
- *
- * Start and end of pools will must be page aligned.
- *
- * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
- * in @dmabuf, others will use the range provided by @priv.
- *
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
*/
-struct tee_shm_pool *
-tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
- struct tee_shm_pool_mem_info *dmabuf_info);
+struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
+ phys_addr_t paddr, size_t size,
+ int min_alloc_order);
/**
* tee_shm_pool_free() - Free a shared memory pool
@@ -306,7 +261,10 @@ tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
* The must be no remaining shared memory allocated from this pool when
* this function is called.
*/
-void tee_shm_pool_free(struct tee_shm_pool *pool);
+static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
+{
+ pool->ops->destroy_pool(pool);
+}
/**
* tee_get_drvdata() - Return driver_data pointer
@@ -314,42 +272,20 @@ void tee_shm_pool_free(struct tee_shm_pool *pool);
*/
void *tee_get_drvdata(struct tee_device *teedev);
-/**
- * tee_shm_alloc() - Allocate shared memory
- * @ctx: Context that allocates the shared memory
- * @size: Requested size of shared memory
- * @flags: Flags setting properties for the requested shared memory.
- *
- * Memory allocated as global shared memory is automatically freed when the
- * TEE file pointer is closed. The @flags field uses the bits defined by
- * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
- * TEE_SHM_DMA_BUF global shared memory will be allocated and associated
- * with a dma-buf handle, else driver private memory.
- *
- * @returns a pointer to 'struct tee_shm'
- */
-struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
+struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
-/**
- * tee_shm_register() - Register shared memory buffer
- * @ctx: Context that registers the shared memory
- * @addr: Address is userspace of the shared buffer
- * @length: Length of the shared buffer
- * @flags: Flags setting properties for the requested shared memory.
- *
- * @returns a pointer to 'struct tee_shm'
- */
-struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
- size_t length, u32 flags);
+struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
+ void *addr, size_t length);
/**
- * tee_shm_is_registered() - Check if shared memory object in registered in TEE
+ * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind
* @shm: Shared memory handle
- * @returns true if object is registered in TEE
+ * @returns true if object is dynamic shared memory
*/
-static inline bool tee_shm_is_registered(struct tee_shm *shm)
+static inline bool tee_shm_is_dynamic(struct tee_shm *shm)
{
- return shm && (shm->flags & TEE_SHM_REGISTER);
+ return shm && (shm->flags & TEE_SHM_DYNAMIC);
}
/**
@@ -365,24 +301,6 @@ void tee_shm_free(struct tee_shm *shm);
void tee_shm_put(struct tee_shm *shm);
/**
- * tee_shm_va2pa() - Get physical address of a virtual address
- * @shm: Shared memory handle
- * @va: Virtual address to tranlsate
- * @pa: Returned physical address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
-
-/**
- * tee_shm_pa2va() - Get virtual address of a physical address
- * @shm: Shared memory handle
- * @pa: Physical address to tranlsate
- * @va: Returned virtual address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
-
-/**
* tee_shm_get_va() - Get virtual address of a shared memory plus an offset
* @shm: Shared memory handle
* @offs: Offset from start of this shared memory
@@ -514,6 +432,20 @@ int tee_client_open_session(struct tee_context *ctx,
int tee_client_close_session(struct tee_context *ctx, u32 session);
/**
+ * tee_client_system_session() - Declare session as a system session
+ * @ctx: TEE Context
+ * @session: Session id
+ *
+ * This function requests TEE to provision an entry context ready to use for
+ * that session only. The provisioned entry context is used for command
+ * invocation and session closure, not for command cancelling requests.
+ * TEE releases the provisioned context upon session closure.
+ *
+ * Return < 0 on error else 0 if an entry context has been provisioned.
+ */
+int tee_client_system_session(struct tee_context *ctx, u32 session);
+
+/**
* tee_client_invoke_func() - Invoke a function in a Trusted Application
* @ctx: TEE Context
* @arg: Invoke arguments, see description of
@@ -550,7 +482,7 @@ static inline bool tee_param_is_memref(struct tee_param *param)
}
}
-extern struct bus_type tee_bus_type;
+extern const struct bus_type tee_bus_type;
/**
* struct tee_client_device - tee based device
@@ -577,4 +509,18 @@ struct tee_client_driver {
#define to_tee_client_driver(d) \
container_of(d, struct tee_client_driver, driver)
+/**
+ * teedev_open() - Open a struct tee_device
+ * @teedev: Device to open
+ *
+ * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
+ */
+struct tee_context *teedev_open(struct tee_device *teedev);
+
+/**
+ * teedev_close_context() - closes a struct tee_context
+ * @ctx: The struct tee_context to close
+ */
+void teedev_close_context(struct tee_context *ctx);
+
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/tegra-icc.h b/include/linux/tegra-icc.h
new file mode 100644
index 000000000000..4b4d4bee290c
--- /dev/null
+++ b/include/linux/tegra-icc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022-2023 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef LINUX_TEGRA_ICC_H
+#define LINUX_TEGRA_ICC_H
+
+enum tegra_icc_client_type {
+ TEGRA_ICC_NONE,
+ TEGRA_ICC_NISO,
+ TEGRA_ICC_ISO_DISPLAY,
+ TEGRA_ICC_ISO_VI,
+ TEGRA_ICC_ISO_AUDIO,
+ TEGRA_ICC_ISO_VIFAL,
+};
+
+/* ICC ID's for MC client's used in BPMP */
+#define TEGRA_ICC_BPMP_DEBUG 1
+#define TEGRA_ICC_BPMP_CPU_CLUSTER0 2
+#define TEGRA_ICC_BPMP_CPU_CLUSTER1 3
+#define TEGRA_ICC_BPMP_CPU_CLUSTER2 4
+#define TEGRA_ICC_BPMP_GPU 5
+#define TEGRA_ICC_BPMP_CACTMON 6
+#define TEGRA_ICC_BPMP_DISPLAY 7
+#define TEGRA_ICC_BPMP_VI 8
+#define TEGRA_ICC_BPMP_EQOS 9
+#define TEGRA_ICC_BPMP_PCIE_0 10
+#define TEGRA_ICC_BPMP_PCIE_1 11
+#define TEGRA_ICC_BPMP_PCIE_2 12
+#define TEGRA_ICC_BPMP_PCIE_3 13
+#define TEGRA_ICC_BPMP_PCIE_4 14
+#define TEGRA_ICC_BPMP_PCIE_5 15
+#define TEGRA_ICC_BPMP_PCIE_6 16
+#define TEGRA_ICC_BPMP_PCIE_7 17
+#define TEGRA_ICC_BPMP_PCIE_8 18
+#define TEGRA_ICC_BPMP_PCIE_9 19
+#define TEGRA_ICC_BPMP_PCIE_10 20
+#define TEGRA_ICC_BPMP_DLA_0 21
+#define TEGRA_ICC_BPMP_DLA_1 22
+#define TEGRA_ICC_BPMP_SDMMC_1 23
+#define TEGRA_ICC_BPMP_SDMMC_2 24
+#define TEGRA_ICC_BPMP_SDMMC_3 25
+#define TEGRA_ICC_BPMP_SDMMC_4 26
+#define TEGRA_ICC_BPMP_NVDEC 27
+#define TEGRA_ICC_BPMP_NVENC 28
+#define TEGRA_ICC_BPMP_NVJPG_0 29
+#define TEGRA_ICC_BPMP_NVJPG_1 30
+#define TEGRA_ICC_BPMP_OFAA 31
+#define TEGRA_ICC_BPMP_XUSB_HOST 32
+#define TEGRA_ICC_BPMP_XUSB_DEV 33
+#define TEGRA_ICC_BPMP_TSEC 34
+#define TEGRA_ICC_BPMP_VIC 35
+#define TEGRA_ICC_BPMP_APE 36
+#define TEGRA_ICC_BPMP_APEDMA 37
+#define TEGRA_ICC_BPMP_SE 38
+#define TEGRA_ICC_BPMP_ISP 39
+#define TEGRA_ICC_BPMP_HDA 40
+#define TEGRA_ICC_BPMP_VIFAL 41
+#define TEGRA_ICC_BPMP_VI2FAL 42
+#define TEGRA_ICC_BPMP_VI2 43
+#define TEGRA_ICC_BPMP_RCE 44
+#define TEGRA_ICC_BPMP_PVA 45
+
+#endif /* LINUX_TEGRA_ICC_H */
diff --git a/include/linux/termios_internal.h b/include/linux/termios_internal.h
new file mode 100644
index 000000000000..d77f29e5e2b7
--- /dev/null
+++ b/include/linux/termios_internal.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TERMIOS_CONV_H
+#define _LINUX_TERMIOS_CONV_H
+
+#include <linux/uaccess.h>
+#include <asm/termios.h>
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^O werase=^W lnext=^V
+ eol2=\0
+*/
+
+#ifdef VDSUSP
+#define INIT_C_CC_VDSUSP_EXTRA [VDSUSP] = 'Y'-0x40,
+#else
+#define INIT_C_CC_VDSUSP_EXTRA
+#endif
+
+#define INIT_C_CC { \
+ [VINTR] = 'C'-0x40, \
+ [VQUIT] = '\\'-0x40, \
+ [VERASE] = '\177', \
+ [VKILL] = 'U'-0x40, \
+ [VEOF] = 'D'-0x40, \
+ [VSTART] = 'Q'-0x40, \
+ [VSTOP] = 'S'-0x40, \
+ [VSUSP] = 'Z'-0x40, \
+ [VREPRINT] = 'R'-0x40, \
+ [VDISCARD] = 'O'-0x40, \
+ [VWERASE] = 'W'-0x40, \
+ [VLNEXT] = 'V'-0x40, \
+ INIT_C_CC_VDSUSP_EXTRA \
+ [VMIN] = 1 }
+
+int user_termio_to_kernel_termios(struct ktermios *, struct termio __user *);
+int kernel_termios_to_user_termio(struct termio __user *, struct ktermios *);
+#ifdef TCGETS2
+int user_termios_to_kernel_termios(struct ktermios *, struct termios2 __user *);
+int kernel_termios_to_user_termios(struct termios2 __user *, struct ktermios *);
+int user_termios_to_kernel_termios_1(struct ktermios *, struct termios __user *);
+int kernel_termios_to_user_termios_1(struct termios __user *, struct ktermios *);
+#else /* TCGETS2 */
+int user_termios_to_kernel_termios(struct ktermios *, struct termios __user *);
+int kernel_termios_to_user_termios(struct termios __user *, struct ktermios *);
+#endif /* TCGETS2 */
+
+#endif /* _LINUX_TERMIOS_CONV_H */
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 13770cfe33ad..6673e4d4ac2e 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -23,7 +23,7 @@ struct ts_config;
struct ts_state
{
unsigned int offset;
- char cb[40];
+ char cb[48];
};
/**
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 42ef807e5d84..c33f50177f51 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -17,9 +17,6 @@
#include <linux/workqueue.h>
#include <uapi/linux/thermal.h>
-#define THERMAL_TRIPS_NONE -1
-#define THERMAL_MAX_TRIPS 12
-
/* invalid cooling state */
#define THERMAL_CSTATE_INVALID -1UL
@@ -35,14 +32,13 @@
struct thermal_zone_device;
struct thermal_cooling_device;
struct thermal_instance;
+struct thermal_debugfs;
struct thermal_attr;
enum thermal_trend {
THERMAL_TREND_STABLE, /* temperature is stable */
THERMAL_TREND_RAISING, /* temperature is raising */
THERMAL_TREND_DROPPING, /* temperature is dropping */
- THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */
- THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
};
/* Thermal notification reason */
@@ -55,8 +51,36 @@ enum thermal_notify_event {
THERMAL_DEVICE_UP, /* Thermal device is up after a down event */
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */
THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */
+ THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */
+ THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */
+ THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */
+ THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */
+};
+
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @threshold: trip crossing notification threshold miliCelsius
+ * @type: trip point type
+ * @priv: pointer to driver data associated with this trip
+ * @flags: flags representing binary properties of the trip
+ */
+struct thermal_trip {
+ int temperature;
+ int hysteresis;
+ int threshold;
+ enum thermal_trip_type type;
+ u8 flags;
+ void *priv;
};
+#define THERMAL_TRIP_FLAG_RW_TEMP BIT(0)
+#define THERMAL_TRIP_FLAG_RW_HYST BIT(1)
+
+#define THERMAL_TRIP_FLAG_RW (THERMAL_TRIP_FLAG_RW_TEMP | \
+ THERMAL_TRIP_FLAG_RW_HYST)
+
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
@@ -66,35 +90,28 @@ struct thermal_zone_device_ops {
int (*set_trips) (struct thermal_zone_device *, int, int);
int (*change_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
- int (*get_trip_type) (struct thermal_zone_device *, int,
- enum thermal_trip_type *);
- int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
int (*set_trip_temp) (struct thermal_zone_device *, int, int);
- int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
- int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
int (*get_crit_temp) (struct thermal_zone_device *, int *);
int (*set_emul_temp) (struct thermal_zone_device *, int);
- int (*get_trend) (struct thermal_zone_device *, int,
- enum thermal_trend *);
- int (*notify) (struct thermal_zone_device *, int,
- enum thermal_trip_type);
+ int (*get_trend) (struct thermal_zone_device *,
+ const struct thermal_trip *, enum thermal_trend *);
+ void (*hot)(struct thermal_zone_device *);
+ void (*critical)(struct thermal_zone_device *);
};
struct thermal_cooling_device_ops {
int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
- int (*get_requested_power)(struct thermal_cooling_device *,
- struct thermal_zone_device *, u32 *);
- int (*state2power)(struct thermal_cooling_device *,
- struct thermal_zone_device *, unsigned long, u32 *);
- int (*power2state)(struct thermal_cooling_device *,
- struct thermal_zone_device *, u32, unsigned long *);
+ int (*get_requested_power)(struct thermal_cooling_device *, u32 *);
+ int (*state2power)(struct thermal_cooling_device *, unsigned long, u32 *);
+ int (*power2state)(struct thermal_cooling_device *, u32, unsigned long *);
};
struct thermal_cooling_device {
int id;
- char type[THERMAL_NAME_LENGTH];
+ const char *type;
+ unsigned long max_state;
struct device device;
struct device_node *np;
void *devdata;
@@ -104,6 +121,9 @@ struct thermal_cooling_device {
struct mutex lock; /* protect thermal_instances list */
struct list_head thermal_instances;
struct list_head node;
+#ifdef CONFIG_THERMAL_DEBUGFS
+ struct thermal_debugfs *debugfs;
+#endif
};
/**
@@ -111,16 +131,16 @@ struct thermal_cooling_device {
* @id: unique id number for each thermal zone
* @type: the thermal zone device type
* @device: &struct device for this thermal zone
+ * @removal: removal completion
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
* @mode: current mode of this thermal zone
* @devdata: private pointer for device private data
- * @trips: number of trip points the thermal zone supports
- * @trips_disabled; bitmap for disabled trips
- * @passive_delay: number of milliseconds to wait between polls when
+ * @num_trips: number of trip points the thermal zone supports
+ * @passive_delay_jiffies: number of jiffies to wait between polls when
* performing passive cooling.
- * @polling_delay: number of milliseconds to wait between polls when
+ * @polling_delay_jiffies: number of jiffies to wait between polls when
* checking whether trip points have been crossed (0 for
* interrupt driven systems)
* @temperature: current temperature. This is only for core code,
@@ -133,9 +153,6 @@ struct thermal_cooling_device {
trip point.
* @prev_high_trip: the above current temperature if you've crossed a
passive trip point.
- * @forced_passive: If > 0, temperature at which to switch on all ACPI
- * processor cooling devices. Currently only used by the
- * step-wise governor.
* @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
* @ops: operations this &thermal_zone_device supports
* @tzp: thermal zone parameters
@@ -148,30 +165,31 @@ struct thermal_cooling_device {
* @node: node in thermal_tz_list (in thermal_core.c)
* @poll_queue: delayed work for polling
* @notify_event: Last notification event
+ * @suspended: thermal zone suspend indicator
+ * @trips: array of struct thermal_trip objects
*/
struct thermal_zone_device {
int id;
char type[THERMAL_NAME_LENGTH];
struct device device;
+ struct completion removal;
struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
struct thermal_attr *trip_hyst_attrs;
enum thermal_device_mode mode;
void *devdata;
- int trips;
- unsigned long trips_disabled; /* bitmap for disabled trips */
- int passive_delay;
- int polling_delay;
+ int num_trips;
+ unsigned long passive_delay_jiffies;
+ unsigned long polling_delay_jiffies;
int temperature;
int last_temperature;
int emul_temperature;
int passive;
int prev_low_trip;
int prev_high_trip;
- unsigned int forced_passive;
atomic_t need_update;
- struct thermal_zone_device_ops *ops;
+ struct thermal_zone_device_ops ops;
struct thermal_zone_params *tzp;
struct thermal_governor *governor;
void *governor_data;
@@ -181,6 +199,11 @@ struct thermal_zone_device {
struct list_head node;
struct delayed_work poll_queue;
enum thermal_notify_event notify_event;
+ bool suspended;
+#ifdef CONFIG_THERMAL_DEBUGFS
+ struct thermal_debugfs *debugfs;
+#endif
+ struct thermal_trip trips[] __counted_by(num_trips);
};
/**
@@ -193,54 +216,24 @@ struct thermal_zone_device {
* thermal zone.
* @throttle: callback called for every trip point even if temperature is
* below the trip point temperature
+ * @update_tz: callback called when thermal zone internals have changed, e.g.
+ * thermal cooling instance was added/removed
* @governor_list: node in thermal_governor_list (in thermal_core.c)
*/
struct thermal_governor {
- char name[THERMAL_NAME_LENGTH];
+ const char *name;
int (*bind_to_tz)(struct thermal_zone_device *tz);
void (*unbind_from_tz)(struct thermal_zone_device *tz);
- int (*throttle)(struct thermal_zone_device *tz, int trip);
+ int (*throttle)(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
+ void (*update_tz)(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason);
struct list_head governor_list;
};
-/* Structure that holds binding parameters for a zone */
-struct thermal_bind_params {
- struct thermal_cooling_device *cdev;
-
- /*
- * This is a measure of 'how effectively these devices can
- * cool 'this' thermal zone. It shall be determined by
- * platform characterization. This value is relative to the
- * rest of the weights so a cooling device whose weight is
- * double that of another cooling device is twice as
- * effective. See Documentation/driver-api/thermal/sysfs-api.rst for more
- * information.
- */
- int weight;
-
- /*
- * This is a bit mask that gives the binding relation between this
- * thermal zone and cdev, for a particular trip point.
- * See Documentation/driver-api/thermal/sysfs-api.rst for more information.
- */
- int trip_mask;
-
- /*
- * This is an array of cooling state limits. Must have exactly
- * 2 * thermal_zone.number_of_trip_points. It is an array consisting
- * of tuples <lower-state upper-state> of state limits. Each trip
- * will be associated with one state limit tuple when binding.
- * A NULL pointer means <THERMAL_NO_LIMITS THERMAL_NO_LIMITS>
- * on all trips.
- */
- unsigned long *binding_limits;
- int (*match) (struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev);
-};
-
/* Structure to define Thermal Zone parameters */
struct thermal_zone_params {
- char governor_name[THERMAL_NAME_LENGTH];
+ const char *governor_name;
/*
* a boolean to indicate if the thermal to hwmon sysfs interface
@@ -249,9 +242,6 @@ struct thermal_zone_params {
*/
bool no_hwmon;
- int num_tbps; /* Number of tbp entries */
- struct thermal_bind_params *tbp;
-
/*
* Sustainable power (heat) that this thermal zone can dissipate in
* mW
@@ -291,91 +281,78 @@ struct thermal_zone_params {
int offset;
};
-/**
- * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones
- *
- * Mandatory:
- * @get_temp: a pointer to a function that reads the sensor temperature.
- *
- * Optional:
- * @get_trend: a pointer to a function that reads the sensor temperature trend.
- * @set_trips: a pointer to a function that sets a temperature window. When
- * this window is left the driver must inform the thermal core via
- * thermal_zone_device_update.
- * @set_emul_temp: a pointer to a function that sets sensor emulated
- * temperature.
- * @set_trip_temp: a pointer to a function that sets the trip temperature on
- * hardware.
- */
-struct thermal_zone_of_device_ops {
- int (*get_temp)(void *, int *);
- int (*get_trend)(void *, int, enum thermal_trend *);
- int (*set_trips)(void *, int, int);
- int (*set_emul_temp)(void *, int);
- int (*set_trip_temp)(void *, int, int);
-};
-
/* Function declarations */
#ifdef CONFIG_THERMAL_OF
-int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
- struct device_node *sensor_np,
- u32 *id);
-struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops);
-void thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz);
-struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
- struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops);
-void devm_thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz);
-#else
+struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_device_ops *ops);
-static inline int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
- struct device_node *sensor_np,
- u32 *id)
-{
- return -ENOENT;
-}
-static inline struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops)
-{
- return ERR_PTR(-ENODEV);
-}
+void devm_thermal_of_zone_unregister(struct device *dev, struct thermal_zone_device *tz);
-static inline
-void thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz)
-{
-}
+#else
-static inline struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
- struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops)
+static inline
+struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_device_ops *ops)
{
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-ENOTSUPP);
}
-static inline
-void devm_thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz)
+static inline void devm_thermal_of_zone_unregister(struct device *dev,
+ struct thermal_zone_device *tz)
{
}
-
#endif
-#ifdef CONFIG_THERMAL
-struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
- void *, struct thermal_zone_device_ops *,
- struct thermal_zone_params *, int, int);
-void thermal_zone_device_unregister(struct thermal_zone_device *);
+int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
+ struct thermal_trip *trip);
+int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
+ struct thermal_trip *trip);
+int for_each_thermal_trip(struct thermal_zone_device *tz,
+ int (*cb)(struct thermal_trip *, void *),
+ void *data);
+int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
+ int (*cb)(struct thermal_trip *, void *),
+ void *data);
+int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
+void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int temp);
+
+int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp);
+#ifdef CONFIG_THERMAL
+struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ const struct thermal_trip *trips,
+ int num_trips, void *devdata,
+ const struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay);
+
+struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ const struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp);
+
+void thermal_zone_device_unregister(struct thermal_zone_device *tz);
+
+void *thermal_zone_device_priv(struct thermal_zone_device *tzd);
+const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
+int thermal_zone_device_id(struct thermal_zone_device *tzd);
+struct device *thermal_zone_device(struct thermal_zone_device *tzd);
+
+int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ unsigned long upper, unsigned long lower,
+ unsigned int weight);
int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *,
unsigned long, unsigned long,
unsigned int);
+int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev);
int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
@@ -391,33 +368,44 @@ devm_thermal_of_cooling_device_register(struct device *dev,
struct device_node *np,
char *type, void *devdata,
const struct thermal_cooling_device_ops *ops);
+void thermal_cooling_device_update(struct thermal_cooling_device *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int thermal_zone_get_slope(struct thermal_zone_device *tz);
int thermal_zone_get_offset(struct thermal_zone_device *tz);
-void thermal_cdev_update(struct thermal_cooling_device *);
-void thermal_notify_framework(struct thermal_zone_device *, int);
int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
+void thermal_zone_device_critical(struct thermal_zone_device *tz);
#else
-static inline struct thermal_zone_device *thermal_zone_device_register(
- const char *type, int trips, int mask, void *devdata,
- struct thermal_zone_device_ops *ops,
- struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay)
+static inline struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ const struct thermal_trip *trips,
+ int num_trips, void *devdata,
+ const struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay)
{ return ERR_PTR(-ENODEV); }
-static inline void thermal_zone_device_unregister(
- struct thermal_zone_device *tz)
+
+static inline struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp)
+{ return ERR_PTR(-ENODEV); }
+
+static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{ }
+
static inline struct thermal_cooling_device *
-thermal_cooling_device_register(char *type, void *devdata,
+thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{ return ERR_PTR(-ENODEV); }
static inline struct thermal_cooling_device *
thermal_of_cooling_device_register(struct device_node *np,
- char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+ const char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
{ return ERR_PTR(-ENODEV); }
static inline struct thermal_cooling_device *
devm_thermal_of_cooling_device_register(struct device *dev,
@@ -443,11 +431,20 @@ static inline int thermal_zone_get_offset(
struct thermal_zone_device *tz)
{ return -ENODEV; }
-static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
-{ }
-static inline void thermal_notify_framework(struct thermal_zone_device *tz,
- int trip)
-{ }
+static inline void *thermal_zone_device_priv(struct thermal_zone_device *tz)
+{
+ return NULL;
+}
+
+static inline const char *thermal_zone_device_type(struct thermal_zone_device *tzd)
+{
+ return NULL;
+}
+
+static inline int thermal_zone_device_id(struct thermal_zone_device *tzd)
+{
+ return -ENODEV;
+}
static inline int thermal_zone_device_enable(struct thermal_zone_device *tz)
{ return -ENODEV; }
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index e93e249a4e9b..9ea0b28068f4 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,8 +9,10 @@
#define _LINUX_THREAD_INFO_H
#include <linux/types.h>
+#include <linux/limits.h>
#include <linux/bug.h>
#include <linux/restart_block.h>
+#include <linux/errno.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -35,10 +37,42 @@ enum {
GOOD_STACK,
};
+#ifdef CONFIG_GENERIC_ENTRY
+enum syscall_work_bit {
+ SYSCALL_WORK_BIT_SECCOMP,
+ SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
+ SYSCALL_WORK_BIT_SYSCALL_TRACE,
+ SYSCALL_WORK_BIT_SYSCALL_EMU,
+ SYSCALL_WORK_BIT_SYSCALL_AUDIT,
+ SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
+ SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
+};
+
+#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
+#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
+#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
+#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
+#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
+#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
+#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
+#endif
+
#include <asm/thread_info.h>
#ifdef __KERNEL__
+#ifndef arch_set_restart_data
+#define arch_set_restart_data(restart) do { } while (0)
+#endif
+
+static inline long set_restart_fn(struct restart_block *restart,
+ long (*fn)(struct restart_block *))
+{
+ restart->fn = fn;
+ arch_set_restart_data(restart);
+ return -ERESTART_RESTARTBLOCK;
+}
+
#ifndef THREAD_ALIGN
#define THREAD_ALIGN THREAD_SIZE
#endif
@@ -84,6 +118,15 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
return test_bit(flag, (unsigned long *)&ti->flags);
}
+/*
+ * This may be used in noinstr code, and needs to be __always_inline to prevent
+ * inadvertent instrumentation.
+ */
+static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
+{
+ return READ_ONCE(ti->flags);
+}
+
#define set_thread_flag(flag) \
set_ti_thread_flag(current_thread_info(), flag)
#define clear_thread_flag(flag) \
@@ -96,8 +139,61 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
test_and_clear_ti_thread_flag(current_thread_info(), flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
+#define read_thread_flags() \
+ read_ti_thread_flags(current_thread_info())
+
+#define read_task_thread_flags(t) \
+ read_ti_thread_flags(task_thread_info(t))
+
+#ifdef CONFIG_GENERIC_ENTRY
+#define set_syscall_work(fl) \
+ set_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
+#define test_syscall_work(fl) \
+ test_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
+#define clear_syscall_work(fl) \
+ clear_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+#define set_task_syscall_work(t, fl) \
+ set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
+#define test_task_syscall_work(t, fl) \
+ test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
+#define clear_task_syscall_work(t, fl) \
+ clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
+
+#else /* CONFIG_GENERIC_ENTRY */
+
+#define set_syscall_work(fl) \
+ set_ti_thread_flag(current_thread_info(), TIF_##fl)
+#define test_syscall_work(fl) \
+ test_ti_thread_flag(current_thread_info(), TIF_##fl)
+#define clear_syscall_work(fl) \
+ clear_ti_thread_flag(current_thread_info(), TIF_##fl)
+
+#define set_task_syscall_work(t, fl) \
+ set_ti_thread_flag(task_thread_info(t), TIF_##fl)
+#define test_task_syscall_work(t, fl) \
+ test_ti_thread_flag(task_thread_info(t), TIF_##fl)
+#define clear_task_syscall_work(t, fl) \
+ clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
+#endif /* !CONFIG_GENERIC_ENTRY */
+
+#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
+
+static __always_inline bool tif_need_resched(void)
+{
+ return arch_test_bit(TIF_NEED_RESCHED,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#else
+
+static __always_inline bool tif_need_resched(void)
+{
+ return test_bit(TIF_NEED_RESCHED,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
@@ -129,15 +225,18 @@ __bad_copy_from(void);
extern void __compiletime_error("copy destination size is too small")
__bad_copy_to(void);
+void __copy_overflow(int size, unsigned long count);
+
static inline void copy_overflow(int size, unsigned long count)
{
- WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+ if (IS_ENABLED(CONFIG_BUG))
+ __copy_overflow(size, count);
}
static __always_inline __must_check bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
- int sz = __compiletime_object_size(addr);
+ int sz = __builtin_object_size(addr, 0);
if (unlikely(sz >= 0 && sz < bytes)) {
if (!__builtin_constant_p(bytes))
copy_overflow(sz, bytes);
@@ -157,6 +256,11 @@ check_copy_size(const void *addr, size_t bytes, bool is_source)
static inline void arch_setup_new_exec(void) { }
#endif
+void arch_task_cache_init(void); /* for CONFIG_SH */
+void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst,
+ struct task_struct *src);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 18d5a74bcc3d..c34173e6c5f1 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -38,7 +38,7 @@
* Define a minimum number of pids per cpu. Heuristically based
* on original pid max of 32k for 32 cpus. Also, increase the
* minimum settable value for pid_max on the running system based
- * on similar defaults. See kernel/pid.c:pidmap_init() for details.
+ * on similar defaults. See kernel/pid.c:pid_idr_init() for details.
*/
#define PIDS_PER_CPU_DEFAULT 1024
#define PIDS_PER_CPU_MIN 8
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 5db2b11ab085..4338ea9ac4fd 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -45,6 +45,8 @@ enum tb_cfg_pkg_type {
* @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
* Thunderbolt dock (and Display Port). All PCIe
* links downstream of the dock are removed.
+ * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the
+ * PCIe tunneling is disabled from the BIOS.
*/
enum tb_security_level {
TB_SECURITY_NONE,
@@ -52,6 +54,7 @@ enum tb_security_level {
TB_SECURITY_SECURE,
TB_SECURITY_DPONLY,
TB_SECURITY_USBONLY,
+ TB_SECURITY_NOPCIE,
};
/**
@@ -83,9 +86,9 @@ struct tb {
unsigned long privdata[];
};
-extern struct bus_type tb_bus_type;
-extern struct device_type tb_service_type;
-extern struct device_type tb_xdomain_type;
+extern const struct bus_type tb_bus_type;
+extern const struct device_type tb_service_type;
+extern const struct device_type tb_xdomain_type;
#define TB_LINKS_PER_PHY_PORT 2
@@ -143,6 +146,7 @@ struct tb_property_dir *tb_property_parse_dir(const u32 *block,
size_t block_len);
ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
size_t block_len);
+struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir);
struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
void tb_property_free_dir(struct tb_property_dir *dir);
int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
@@ -168,6 +172,20 @@ int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
/**
+ * enum tb_link_width - Thunderbolt/USB4 link width
+ * @TB_LINK_WIDTH_SINGLE: Single lane link
+ * @TB_LINK_WIDTH_DUAL: Dual lane symmetric link
+ * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 transmitters
+ * @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers
+ */
+enum tb_link_width {
+ TB_LINK_WIDTH_SINGLE = BIT(0),
+ TB_LINK_WIDTH_DUAL = BIT(1),
+ TB_LINK_WIDTH_ASYM_TX = BIT(2),
+ TB_LINK_WIDTH_ASYM_RX = BIT(3),
+};
+
+/**
* struct tb_xdomain - Cross-domain (XDomain) connection
* @dev: XDomain device
* @tb: Pointer to the domain
@@ -176,30 +194,34 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @route: Route string the other domain can be reached
* @vendor: Vendor ID of the remote domain
* @device: Device ID of the demote domain
+ * @local_max_hopid: Maximum input HopID of this host
+ * @remote_max_hopid: Maximum input HopID of the remote host
* @lock: Lock to serialize access to the following fields of this structure
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
+ * @link_speed: Speed of the link in Gb/s
+ * @link_width: Width of the downstream facing link
+ * @link_usb4: Downstream link is USB4
* @is_unplugged: The XDomain is unplugged
- * @resume: The XDomain is being resumed
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
* queried first
- * @transmit_path: HopID which the remote end expects us to transmit
- * @transmit_ring: Local ring (hop) where outgoing packets are pushed
- * @receive_path: HopID which we expect the remote end to transmit
- * @receive_ring: Local ring (hop) where incoming packets arrive
* @service_ids: Used to generate IDs for the services
- * @properties: Properties exported by the remote domain
- * @property_block_gen: Generation of @properties
- * @properties_lock: Lock protecting @properties.
- * @get_uuid_work: Work used to retrieve @remote_uuid
- * @uuid_retries: Number of times left @remote_uuid is requested before
- * giving up
- * @get_properties_work: Work used to get remote domain properties
- * @properties_retries: Number of times left to read properties
+ * @in_hopids: Input HopIDs for DMA tunneling
+ * @out_hopids; Output HopIDs for DMA tunneling
+ * @local_property_block: Local block of properties
+ * @local_property_block_gen: Generation of @local_property_block
+ * @local_property_block_len: Length of the @local_property_block in dwords
+ * @remote_properties: Properties exported by the remote domain
+ * @remote_property_block_gen: Generation of @remote_properties
+ * @state: Next XDomain discovery state to run
+ * @state_work: Work used to run the next state
+ * @state_retries: Number of retries remain for the state
* @properties_changed_work: Work used to notify the remote domain that
* our properties have changed
* @properties_changed_retries: Number of times left to send properties
* changed notification
+ * @bonding_possible: True if lane bonding is possible on local side
+ * @target_link_width: Target link width from the remote host
* @link: Root switch link the remote domain is connected (ICM only)
* @depth: Depth in the chain the remote domain is connected (ICM only)
*
@@ -220,33 +242,53 @@ struct tb_xdomain {
u64 route;
u16 vendor;
u16 device;
+ unsigned int local_max_hopid;
+ unsigned int remote_max_hopid;
struct mutex lock;
const char *vendor_name;
const char *device_name;
+ unsigned int link_speed;
+ enum tb_link_width link_width;
+ bool link_usb4;
bool is_unplugged;
- bool resume;
bool needs_uuid;
- u16 transmit_path;
- u16 transmit_ring;
- u16 receive_path;
- u16 receive_ring;
struct ida service_ids;
- struct tb_property_dir *properties;
- u32 property_block_gen;
- struct delayed_work get_uuid_work;
- int uuid_retries;
- struct delayed_work get_properties_work;
- int properties_retries;
+ struct ida in_hopids;
+ struct ida out_hopids;
+ u32 *local_property_block;
+ u32 local_property_block_gen;
+ u32 local_property_block_len;
+ struct tb_property_dir *remote_properties;
+ u32 remote_property_block_gen;
+ int state;
+ struct delayed_work state_work;
+ int state_retries;
struct delayed_work properties_changed_work;
int properties_changed_retries;
+ bool bonding_possible;
+ u8 target_link_width;
u8 link;
u8 depth;
};
-int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
- u16 transmit_ring, u16 receive_path,
- u16 receive_ring);
-int tb_xdomain_disable_paths(struct tb_xdomain *xd);
+int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
+void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
+int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid);
+void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid);
+int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid);
+void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid);
+int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
+ int transmit_ring, int receive_path,
+ int receive_ring);
+int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
+ int transmit_ring, int receive_path,
+ int receive_ring);
+
+static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd)
+{
+ return tb_xdomain_disable_paths(xd, -1, -1, -1, -1);
+}
+
struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
@@ -344,6 +386,9 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
* @prtcvers: Protocol version from the properties directory
* @prtcrevs: Protocol software revision from the properties directory
* @prtcstns: Protocol settings mask from the properties directory
+ * @debugfs_dir: Pointer to the service debugfs directory. Always created
+ * when debugfs is enabled. Can be used by service drivers to
+ * add their own entries under the service.
*
* Each domain exposes set of services it supports as collection of
* properties. For each service there will be one corresponding
@@ -357,6 +402,7 @@ struct tb_service {
u32 prtcvers;
u32 prtcrevs;
u32 prtcstns;
+ struct dentry *debugfs_dir;
};
static inline struct tb_service *tb_service_get(struct tb_service *svc)
@@ -436,9 +482,11 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
* @msix_ida: Used to allocate MSI-X vectors for rings
* @going_away: The host controller device is about to disappear so when
* this flag is set, avoid touching the hardware anymore.
+ * @iommu_dma_protection: An IOMMU will isolate external-facing ports.
* @interrupt_work: Work scheduled to handle ring interrupt when no
* MSI-X is used.
* @hop_count: Number of rings (end point hops) supported by NHI.
+ * @quirks: NHI specific quirks if any
*/
struct tb_nhi {
spinlock_t lock;
@@ -449,8 +497,10 @@ struct tb_nhi {
struct tb_ring **rx_rings;
struct ida msix_ida;
bool going_away;
+ bool iommu_dma_protection;
struct work_struct interrupt_work;
u32 hop_count;
+ unsigned long quirks;
};
/**
@@ -471,6 +521,8 @@ struct tb_nhi {
* @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
* @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
* @flags: Ring specific flags
+ * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to
+ * RX ring. For TX ring this should be set to %0.
* @sof_mask: Bit mask used to detect start of frame PDF
* @eof_mask: Bit mask used to detect end of frame PDF
* @start_poll: Called when ring interrupt is triggered to start
@@ -494,6 +546,7 @@ struct tb_ring {
int irq;
u8 vector;
unsigned int flags;
+ int e2e_tx_hop;
u16 sof_mask;
u16 eof_mask;
void (*start_poll)(void *data);
@@ -504,6 +557,8 @@ struct tb_ring {
#define RING_FLAG_NO_SUSPEND BIT(0)
/* Configure the ring to be in frame mode */
#define RING_FLAG_FRAME BIT(1)
+/* Enable end-to-end flow control */
+#define RING_FLAG_E2E BIT(2)
struct ring_frame;
typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
@@ -552,7 +607,8 @@ struct ring_frame {
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags);
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags, u16 sof_mask, u16 eof_mask,
+ unsigned int flags, int e2e_tx_hop,
+ u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *), void *poll_data);
void tb_ring_start(struct tb_ring *ring);
void tb_ring_stop(struct tb_ring *ring);
diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h
index 2fc854155c27..441b2988e66a 100644
--- a/include/linux/ti-emif-sram.h
+++ b/include/linux/ti-emif-sram.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI AM33XX EMIF Routines
*
* Copyright (C) 2016-2017 Texas Instruments Inc.
* Dave Gerlach
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_TI_EMIF_H
#define __LINUX_TI_EMIF_H
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 44a7f9169ac6..10642d4844f0 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -271,7 +271,7 @@ long st_kim_stop(void *);
void st_kim_complete(void *);
void kim_st_list_protocols(struct st_data_s *, void *);
-void st_kim_recv(void *, const unsigned char *, long);
+void st_kim_recv(void *disc_data, const u8 *data, size_t count);
/*
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 7340613c7eff..4924a33700b7 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -11,6 +11,8 @@
#include <linux/context_tracking_state.h>
#include <linux/cpumask.h>
#include <linux/sched.h>
+#include <linux/rcupdate.h>
+#include <linux/static_key.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void);
@@ -18,16 +20,22 @@ extern void __init tick_init(void);
extern void tick_suspend_local(void);
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
extern void tick_resume_local(void);
-extern void tick_handover_do_timer(void);
extern void tick_cleanup_dead_cpu(int cpu);
#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
static inline void tick_suspend_local(void) { }
static inline void tick_resume_local(void) { }
-static inline void tick_handover_do_timer(void) { }
static inline void tick_cleanup_dead_cpu(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU)
+extern int tick_cpu_dying(unsigned int cpu);
+extern void tick_assert_timekeeping_handover(void);
+#else
+#define tick_cpu_dying NULL
+static inline void tick_assert_timekeeping_handover(void) { }
+#endif
+
#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
extern void tick_freeze(void);
extern void tick_unfreeze(void);
@@ -62,18 +70,14 @@ enum tick_broadcast_state {
TICK_BROADCAST_ENTER,
};
+extern struct static_key_false arch_needs_tick_broadcast;
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
extern void tick_broadcast_control(enum tick_broadcast_mode mode);
#else
static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
#endif /* BROADCAST */
-#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU)
-extern void tick_offline_cpu(unsigned int cpu);
-#else
-static inline void tick_offline_cpu(unsigned int cpu) { }
-#endif
-
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
#else
@@ -139,14 +143,6 @@ extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
-
-static inline void tick_nohz_idle_stop_tick_protected(void)
-{
- local_irq_disable();
- tick_nohz_idle_stop_tick();
- local_irq_enable();
-}
-
#else /* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
static inline int tick_nohz_tick_stopped(void) { return 0; }
@@ -169,13 +165,18 @@ static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
-
-static inline void tick_nohz_idle_stop_tick_protected(void) { }
#endif /* !CONFIG_NO_HZ_COMMON */
+/*
+ * Mask of CPUs that are nohz_full.
+ *
+ * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu()
+ * check.
+ */
+extern cpumask_var_t tick_nohz_full_mask;
+
#ifdef CONFIG_NO_HZ_FULL
extern bool tick_nohz_full_running;
-extern cpumask_var_t tick_nohz_full_mask;
static inline bool tick_nohz_full_enabled(void)
{
@@ -185,13 +186,17 @@ static inline bool tick_nohz_full_enabled(void)
return tick_nohz_full_running;
}
-static inline bool tick_nohz_full_cpu(int cpu)
-{
- if (!tick_nohz_full_enabled())
- return false;
-
- return cpumask_test_cpu(cpu, tick_nohz_full_mask);
-}
+/*
+ * Check if a CPU is part of the nohz_full subset. Arrange for evaluating
+ * the cpu expression (typically smp_processor_id()) _after_ the static
+ * key.
+ */
+#define tick_nohz_full_cpu(_cpu) ({ \
+ bool __ret = false; \
+ if (tick_nohz_full_enabled()) \
+ __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \
+ __ret; \
+})
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
{
@@ -207,10 +212,11 @@ extern void tick_nohz_dep_set_task(struct task_struct *tsk,
enum tick_dep_bits bit);
extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit);
-extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
+extern void tick_nohz_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit);
extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit);
+extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
/*
* The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
@@ -252,11 +258,11 @@ static inline void tick_dep_clear_task(struct task_struct *tsk,
if (tick_nohz_full_enabled())
tick_nohz_dep_clear_task(tsk, bit);
}
-static inline void tick_dep_set_signal(struct signal_struct *signal,
+static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit)
{
if (tick_nohz_full_enabled())
- tick_nohz_dep_set_signal(signal, bit);
+ tick_nohz_dep_set_signal(tsk, bit);
}
static inline void tick_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit)
@@ -275,6 +281,7 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
static inline void tick_dep_set(enum tick_dep_bits bit) { }
static inline void tick_dep_clear(enum tick_dep_bits bit) { }
@@ -284,7 +291,7 @@ static inline void tick_dep_set_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
-static inline void tick_dep_set_signal(struct signal_struct *signal,
+static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit) { }
@@ -300,4 +307,10 @@ static inline void tick_nohz_task_switch(void)
__tick_nohz_task_switch();
}
+static inline void tick_nohz_user_enter_prepare(void)
+{
+ if (tick_nohz_full_cpu(smp_processor_id()))
+ rcu_nocb_flush_deferred_wakeup();
+}
+
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index b142cb5f5a53..16cf4522d6f3 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -21,19 +21,6 @@ extern time64_t mktime64(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec);
-/* Some architectures do not supply their own clocksource.
- * This is mainly the case in architectures that get their
- * inter-tick times by reading the counter on their interval
- * timer. Since these timers wrap every tick, they're not really
- * useful as clocksources. Wrapping them to act like one is possible
- * but not very efficient. So we provide a callout these arches
- * can implement for use with the jiffies clocksource to provide
- * finer then tick granular time.
- */
-#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
-extern u32 (*arch_gettimeoffset)(void);
-#endif
-
#ifdef CONFIG_POSIX_TIMERS
extern void clear_itimer(void);
#else
diff --git a/include/linux/time64.h b/include/linux/time64.h
index c9dcb3e5781f..f1bcea8c124a 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -20,12 +20,17 @@ struct itimerspec64 {
struct timespec64 it_value;
};
+/* Parameters used to convert the timespec values: */
+#define PSEC_PER_NSEC 1000L
+
/* Located here for timespec[64]_valid_strict */
#define TIME64_MAX ((s64)~((u64)1 << 63))
#define TIME64_MIN (-TIME64_MAX - 1)
#define KTIME_MAX ((s64)~((u64)1 << 63))
+#define KTIME_MIN (-KTIME_MAX - 1)
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC)
/*
* Limits for settimeofday():
@@ -124,6 +129,13 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts)
*/
static inline s64 timespec64_to_ns(const struct timespec64 *ts)
{
+ /* Prevent multiplication overflow / underflow */
+ if (ts->tv_sec >= KTIME_SEC_MAX)
+ return KTIME_MAX;
+
+ if (ts->tv_sec <= KTIME_SEC_MIN)
+ return KTIME_MIN;
+
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
@@ -133,7 +145,7 @@ static inline s64 timespec64_to_ns(const struct timespec64 *ts)
*
* Returns the timespec64 representation of the nsec parameter.
*/
-extern struct timespec64 ns_to_timespec64(const s64 nsec);
+extern struct timespec64 ns_to_timespec64(s64 nsec);
/**
* timespec64_add_ns - Adds nanoseconds to a timespec64
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
index 5b6031385db0..876e31b4461d 100644
--- a/include/linux/time_namespace.h
+++ b/include/linux/time_namespace.h
@@ -4,21 +4,22 @@
#include <linux/sched.h>
-#include <linux/kref.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
+#include <linux/time64.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
+struct vm_area_struct;
+
struct timens_offsets {
struct timespec64 monotonic;
struct timespec64 boottime;
};
struct time_namespace {
- struct kref kref;
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct ns_common ns;
@@ -37,20 +38,21 @@ extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns);
static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
{
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
return ns;
}
struct time_namespace *copy_time_ns(unsigned long flags,
struct user_namespace *user_ns,
struct time_namespace *old_ns);
-void free_time_ns(struct kref *kref);
-int timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
-struct vdso_data *arch_get_vdso_data(void *vvar_page);
+void free_time_ns(struct time_namespace *ns);
+void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
+struct page *find_timens_vvar_page(struct vm_area_struct *vma);
static inline void put_time_ns(struct time_namespace *ns)
{
- kref_put(&ns->kref, free_time_ns);
+ if (refcount_dec_and_test(&ns->ns.count))
+ free_time_ns(ns);
}
void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m);
@@ -77,6 +79,20 @@ static inline void timens_add_boottime(struct timespec64 *ts)
*ts = timespec64_add(*ts, ns_offsets->boottime);
}
+static inline u64 timens_add_boottime_ns(u64 nsec)
+{
+ struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+ return nsec + timespec64_to_ns(&ns_offsets->boottime);
+}
+
+static inline void timens_sub_boottime(struct timespec64 *ts)
+{
+ struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+ *ts = timespec64_sub(*ts, ns_offsets->boottime);
+}
+
ktime_t do_timens_ktime_to_host(clockid_t clockid, ktime_t tim,
struct timens_offsets *offsets);
@@ -122,18 +138,33 @@ struct time_namespace *copy_time_ns(unsigned long flags,
return old_ns;
}
-static inline int timens_on_fork(struct nsproxy *nsproxy,
+static inline void timens_on_fork(struct nsproxy *nsproxy,
struct task_struct *tsk)
{
- return 0;
+ return;
+}
+
+static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ return NULL;
}
static inline void timens_add_monotonic(struct timespec64 *ts) { }
static inline void timens_add_boottime(struct timespec64 *ts) { }
+
+static inline u64 timens_add_boottime_ns(u64 nsec)
+{
+ return nsec;
+}
+
+static inline void timens_sub_boottime(struct timespec64 *ts) { }
+
static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
{
return tim;
}
#endif
+struct vdso_data *arch_get_vdso_data(void *vvar_page);
+
#endif /* _LINUX_TIMENS_H */
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index 754b74a2167f..c6540ceea143 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -124,7 +124,7 @@ extern u64 timecounter_read(struct timecounter *tc);
* This allows conversion of cycle counter values which were generated
* in the past.
*/
-extern u64 timecounter_cyc2time(struct timecounter *tc,
+extern u64 timecounter_cyc2time(const struct timecounter *tc,
u64 cycle_tstamp);
#endif
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index d5471d6fa778..7e50cbd97f86 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -3,6 +3,8 @@
#define _LINUX_TIMEKEEPING_H
#include <linux/errno.h>
+#include <linux/clocksource_ids.h>
+#include <linux/ktime.h>
/* Included from linux/ktime.h */
@@ -10,8 +12,7 @@ void timekeeping_init(void);
extern int timekeeping_suspended;
/* Architecture timer tick functions: */
-extern void update_process_times(int user);
-extern void xtime_update(unsigned long ticks);
+extern void legacy_timer_tick(unsigned long ticks);
/*
* Get and set timeofday
@@ -177,6 +178,7 @@ static inline u64 ktime_get_raw_ns(void)
extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
extern u64 ktime_get_boot_fast_ns(void);
+extern u64 ktime_get_tai_fast_ns(void);
extern u64 ktime_get_real_fast_ns(void);
/*
@@ -222,6 +224,18 @@ extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
+/*
+ * struct ktime_timestanps - Simultaneous mono/boot/real timestamps
+ * @mono: Monotonic timestamp
+ * @boot: Boottime timestamp
+ * @real: Realtime timestamp
+ */
+struct ktime_timestamps {
+ u64 mono;
+ u64 boot;
+ u64 real;
+};
+
/**
* struct system_time_snapshot - simultaneous raw/real time capture with
* counter value
@@ -232,11 +246,12 @@ extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
* @cs_was_changed_seq: The sequence number of clocksource change events
*/
struct system_time_snapshot {
- u64 cycles;
- ktime_t real;
- ktime_t raw;
- unsigned int clock_was_set_seq;
- u8 cs_was_changed_seq;
+ u64 cycles;
+ ktime_t real;
+ ktime_t raw;
+ enum clocksource_ids cs_id;
+ unsigned int clock_was_set_seq;
+ u8 cs_was_changed_seq;
};
/**
@@ -253,15 +268,17 @@ struct system_device_crosststamp {
};
/**
- * struct system_counterval_t - system counter value with the pointer to the
+ * struct system_counterval_t - system counter value with the ID of the
* corresponding clocksource
* @cycles: System counter value
- * @cs: Clocksource corresponding to system counter value. Used by
- * timekeeping code to verify comparibility of two cycle values
+ * @cs_id: Clocksource ID corresponding to system counter value. Used by
+ * timekeeping code to verify comparability of two cycle values.
+ * The default ID, CSID_GENERIC, does not identify a specific
+ * clocksource.
*/
struct system_counterval_t {
u64 cycles;
- struct clocksource *cs;
+ enum clocksource_ids cs_id;
};
/*
@@ -280,6 +297,9 @@ extern int get_device_system_crosststamp(
*/
extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
+/* NMI safe mono/boot/realtime timestamps */
+extern void ktime_get_fast_timestamps(struct ktime_timestamps *snap);
+
/*
* Persistent clock related interfaces
*/
@@ -288,6 +308,8 @@ extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
struct timespec64 *boot_offset);
+#ifdef CONFIG_GENERIC_CMOS_UPDATE
extern int update_persistent_clock64(struct timespec64 now);
+#endif
#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
deleted file mode 100644
index 266017fc9ee9..000000000000
--- a/include/linux/timekeeping32.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LINUX_TIMEKEEPING32_H
-#define _LINUX_TIMEKEEPING32_H
-/*
- * These interfaces are all based on the old timespec type
- * and should get replaced with the timespec64 based versions
- * over time so we can remove the file here.
- */
-
-static inline unsigned long get_seconds(void)
-{
- return ktime_get_real_seconds();
-}
-
-#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 07910ae5ddd9..14a633ba61d6 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -7,21 +7,7 @@
#include <linux/stddef.h>
#include <linux/debugobjects.h>
#include <linux/stringify.h>
-
-struct timer_list {
- /*
- * All fields that change during normal runtime grouped to the
- * same cacheline
- */
- struct hlist_node entry;
- unsigned long expires;
- void (*function)(struct timer_list *);
- u32 flags;
-
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
-#endif
-};
+#include <linux/timer_types.h>
#ifdef CONFIG_LOCKDEP
/*
@@ -50,16 +36,10 @@ struct timer_list {
* workqueue locking issues. It's not meant for executing random crap
* with interrupts disabled. Abuse is monitored!
*
- * @TIMER_PINNED: A pinned timer will not be affected by any timer
- * placement heuristics (like, NOHZ) and will always expire on the CPU
- * on which the timer was enqueued.
- *
- * Note: Because enqueuing of timers can migrate the timer from one
- * CPU to another, pinned timers are not guaranteed to stay on the
- * initialy selected CPU. They move to the CPU on which the enqueue
- * function is invoked via mod_timer() or add_timer(). If the timer
- * should be placed on a particular CPU, then add_timer_on() has to be
- * used.
+ * @TIMER_PINNED: A pinned timer will always expire on the CPU on which the
+ * timer was enqueued. When a particular CPU is required, add_timer_on()
+ * has to be used. Enqueue via mod_timer() and add_timer() is always done
+ * on the local CPU.
*/
#define TIMER_CPUMASK 0x0003FFFF
#define TIMER_MIGRATING 0x00040000
@@ -67,6 +47,7 @@ struct timer_list {
#define TIMER_DEFERRABLE 0x00080000
#define TIMER_PINNED 0x00100000
#define TIMER_IRQSAFE 0x00200000
+#define TIMER_INIT_FLAGS (TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
#define TIMER_ARRAYSHIFT 22
#define TIMER_ARRAYMASK 0xFFC00000
@@ -76,8 +57,7 @@ struct timer_list {
.entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.flags = (_flags), \
- __TIMER_LOCKDEP_MAP_INITIALIZER( \
- __FILE__ ":" __stringify(__LINE__)) \
+ __TIMER_LOCKDEP_MAP_INITIALIZER(FILE_LINE) \
}
#define DEFINE_TIMER(_name, _function) \
@@ -168,7 +148,6 @@ static inline int timer_pending(const struct timer_list * timer)
}
extern void add_timer_on(struct timer_list *timer, int cpu);
-extern int del_timer(struct timer_list * timer);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
extern int timer_reduce(struct timer_list *timer, unsigned long expires);
@@ -180,30 +159,45 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires);
#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
extern void add_timer(struct timer_list *timer);
+extern void add_timer_local(struct timer_list *timer);
+extern void add_timer_global(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
+extern int timer_delete_sync(struct timer_list *timer);
+extern int timer_delete(struct timer_list *timer);
+extern int timer_shutdown_sync(struct timer_list *timer);
+extern int timer_shutdown(struct timer_list *timer);
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
- extern int del_timer_sync(struct timer_list *timer);
-#else
-# define del_timer_sync(t) del_timer(t)
-#endif
+/**
+ * del_timer_sync - Delete a pending timer and wait for a running callback
+ * @timer: The timer to be deleted
+ *
+ * See timer_delete_sync() for detailed explanation.
+ *
+ * Do not use in new code. Use timer_delete_sync() instead.
+ */
+static inline int del_timer_sync(struct timer_list *timer)
+{
+ return timer_delete_sync(timer);
+}
-#define del_singleshot_timer_sync(t) del_timer_sync(t)
+/**
+ * del_timer - Delete a pending timer
+ * @timer: The timer to be deleted
+ *
+ * See timer_delete() for detailed explanation.
+ *
+ * Do not use in new code. Use timer_delete() instead.
+ */
+static inline int del_timer(struct timer_list *timer)
+{
+ return timer_delete(timer);
+}
extern void init_timers(void);
-extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-struct ctl_table;
-
-extern unsigned int sysctl_timer_migration;
-int timer_migration_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-#endif
-
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
diff --git a/include/linux/timer_types.h b/include/linux/timer_types.h
new file mode 100644
index 000000000000..fae5a388f914
--- /dev/null
+++ b/include/linux/timer_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TIMER_TYPES_H
+#define _LINUX_TIMER_TYPES_H
+
+#include <linux/lockdep_types.h>
+#include <linux/types.h>
+
+struct timer_list {
+ /*
+ * All fields that change during normal runtime grouped to the
+ * same cacheline
+ */
+ struct hlist_node entry;
+ unsigned long expires;
+ void (*function)(struct timer_list *);
+ u32 flags;
+
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
+};
+
+#endif /* _LINUX_TIMER_TYPES_H */
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index 93884086f392..62973f7d4610 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -3,18 +3,7 @@
#define _LINUX_TIMERQUEUE_H
#include <linux/rbtree.h>
-#include <linux/ktime.h>
-
-
-struct timerqueue_node {
- struct rb_node node;
- ktime_t expires;
-};
-
-struct timerqueue_head {
- struct rb_root_cached rb_root;
-};
-
+#include <linux/timerqueue_types.h>
extern bool timerqueue_add(struct timerqueue_head *head,
struct timerqueue_node *node);
@@ -35,7 +24,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
{
struct rb_node *leftmost = rb_first_cached(&head->rb_root);
- return rb_entry(leftmost, struct timerqueue_node, node);
+ return rb_entry_safe(leftmost, struct timerqueue_node, node);
}
static inline void timerqueue_init(struct timerqueue_node *node)
diff --git a/include/linux/timerqueue_types.h b/include/linux/timerqueue_types.h
new file mode 100644
index 000000000000..dc298d0923e3
--- /dev/null
+++ b/include/linux/timerqueue_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TIMERQUEUE_TYPES_H
+#define _LINUX_TIMERQUEUE_TYPES_H
+
+#include <linux/rbtree_types.h>
+#include <linux/types.h>
+
+struct timerqueue_node {
+ struct rb_node node;
+ ktime_t expires;
+};
+
+struct timerqueue_head {
+ struct rb_root_cached rb_root;
+};
+
+#endif /* _LINUX_TIMERQUEUE_TYPES_H */
diff --git a/include/linux/timex.h b/include/linux/timex.h
index ce0859763670..3871b06bd302 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -62,6 +62,8 @@
#include <linux/types.h>
#include <linux/param.h>
+unsigned long random_get_entropy_fallback(void);
+
#include <asm/timex.h>
#ifndef random_get_entropy
@@ -74,8 +76,14 @@
*
* By default we use get_cycles() for this purpose, but individual
* architectures may override this in their asm/timex.h header file.
+ * If a given arch does not have get_cycles(), then we fallback to
+ * using random_get_entropy_fallback().
*/
-#define random_get_entropy() get_cycles()
+#ifdef get_cycles
+#define random_get_entropy() ((unsigned long)get_cycles())
+#else
+#define random_get_entropy() random_get_entropy_fallback()
+#endif
#endif
/*
@@ -133,7 +141,7 @@
/*
* kernel variables
- * Note: maximum error = NTP synch distance = dispersion + delay / 2;
+ * Note: maximum error = NTP sync distance = dispersion + delay / 2;
* estimated error = NTP dispersion.
*/
extern unsigned long tick_usec; /* USER_HZ period (usec) */
@@ -157,7 +165,6 @@ extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex *
extern void hardpps(const struct timespec64 *, const struct timespec64 *);
int read_current_timer(unsigned long *timer_val);
-void ntp_notify_cmos_timer(void);
/* The clock frequency of the i8253/i8254 PIT */
#define PIT_TICK_RATE 1193182ul
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index 498dbcedb451..3c13240077b8 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -21,7 +21,12 @@ struct tnum {
struct tnum tnum_const(u64 value);
/* A completely unknown value */
extern const struct tnum tnum_unknown;
-/* A value that's unknown except that @min <= value <= @max */
+/* An unknown value that is a superset of @min <= value <= @max.
+ *
+ * Could include values outside the range of [@min, @max].
+ * For example tnum_range(0, 2) is represented by {0, 1, 2, *3*},
+ * rather than the intended set of {0, 1, 2}.
+ */
struct tnum tnum_range(u64 min, u64 max);
/* Arithmetic and logical ops */
@@ -73,7 +78,18 @@ static inline bool tnum_is_unknown(struct tnum a)
*/
bool tnum_is_aligned(struct tnum a, u64 size);
-/* Returns true if @b represents a subset of @a. */
+/* Returns true if @b represents a subset of @a.
+ *
+ * Note that using tnum_range() as @a requires extra cautions as tnum_in() may
+ * return true unexpectedly due to tnum limited ability to represent tight
+ * range, e.g.
+ *
+ * tnum_in(tnum_range(0, 2), tnum_const(3)) == true
+ *
+ * As a rule of thumb, if @a is explicitly coded rather than coming from
+ * reg->var_off, it should be in form of tnum_const(), tnum_range(0, 2**n - 1),
+ * or tnum_range(2**n, 2**(n+1) - 1).
+ */
bool tnum_in(struct tnum a, struct tnum b);
/* Formatting functions. These have snprintf-like semantics: they will write
@@ -90,6 +106,10 @@ int tnum_sbin(char *str, size_t size, struct tnum a);
struct tnum tnum_subreg(struct tnum a);
/* Returns the tnum with the lower 32-bit subreg cleared */
struct tnum tnum_clear_subreg(struct tnum a);
+/* Returns the tnum with the lower 32-bit subreg in *reg* set to the lower
+ * 32-bit subreg in *subreg*
+ */
+struct tnum tnum_with_subreg(struct tnum reg, struct tnum subreg);
/* Returns the tnum with the lower 32-bit subreg set to value */
struct tnum tnum_const_subreg(struct tnum a, u32 value);
/* Returns true if 32-bit subreg @a is a known constant*/
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 608fa4aadf0e..52f5850730b3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -48,6 +48,7 @@ int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */
#define LOCAL_DISTANCE 10
#define REMOTE_DISTANCE 20
+#define DISTANCE_BITS 8
#ifndef node_distance
#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
#endif
@@ -179,26 +180,60 @@ static inline int cpu_to_mem(int cpu)
#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
+#if defined(topology_die_id) && defined(topology_die_cpumask)
+#define TOPOLOGY_DIE_SYSFS
+#endif
+#if defined(topology_cluster_id) && defined(topology_cluster_cpumask)
+#define TOPOLOGY_CLUSTER_SYSFS
+#endif
+#if defined(topology_book_id) && defined(topology_book_cpumask)
+#define TOPOLOGY_BOOK_SYSFS
+#endif
+#if defined(topology_drawer_id) && defined(topology_drawer_cpumask)
+#define TOPOLOGY_DRAWER_SYSFS
+#endif
+
#ifndef topology_physical_package_id
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
#endif
#ifndef topology_die_id
#define topology_die_id(cpu) ((void)(cpu), -1)
#endif
+#ifndef topology_cluster_id
+#define topology_cluster_id(cpu) ((void)(cpu), -1)
+#endif
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
+#ifndef topology_book_id
+#define topology_book_id(cpu) ((void)(cpu), -1)
+#endif
+#ifndef topology_drawer_id
+#define topology_drawer_id(cpu) ((void)(cpu), -1)
+#endif
+#ifndef topology_ppin
+#define topology_ppin(cpu) ((void)(cpu), 0ull)
+#endif
#ifndef topology_sibling_cpumask
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
+#ifndef topology_cluster_cpumask
+#define topology_cluster_cpumask(cpu) cpumask_of(cpu)
+#endif
#ifndef topology_die_cpumask
#define topology_die_cpumask(cpu) cpumask_of(cpu)
#endif
+#ifndef topology_book_cpumask
+#define topology_book_cpumask(cpu) cpumask_of(cpu)
+#endif
+#ifndef topology_drawer_cpumask
+#define topology_drawer_cpumask(cpu) cpumask_of(cpu)
+#endif
-#ifdef CONFIG_SCHED_SMT
+#if defined(CONFIG_SCHED_SMT) && !defined(cpu_smt_mask)
static inline const struct cpumask *cpu_smt_mask(int cpu)
{
return topology_sibling_cpumask(cpu);
@@ -210,5 +245,38 @@ static inline const struct cpumask *cpu_cpu_mask(int cpu)
return cpumask_of_node(cpu_to_node(cpu));
}
+#ifdef CONFIG_NUMA
+int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
+extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
+#else
+static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+{
+ return cpumask_nth_and(cpu, cpus, cpu_online_mask);
+}
+
+static inline const struct cpumask *
+sched_numa_hop_mask(unsigned int node, unsigned int hops)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* CONFIG_NUMA */
+
+/**
+ * for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
+ * from a given node.
+ * @mask: the iteration variable.
+ * @node: the NUMA node to start the search from.
+ *
+ * Requires rcu_lock to be held.
+ *
+ * Yields cpu_online_mask for @node == NUMA_NO_NODE.
+ */
+#define for_each_numa_hop_mask(mask, node) \
+ for (unsigned int __hops = 0; \
+ mask = (node != NUMA_NO_NODE || __hops) ? \
+ sched_numa_hop_mask(node, __hops) : \
+ cpu_online_mask, \
+ !IS_ERR_OR_NULL(mask); \
+ __hops++)
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 7f65bd1dd307..1541454da03e 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -21,6 +21,7 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/hrtimer.h>
/* Definitions for a non-string torture-test module parameter. */
#define torture_param(type, name, init, msg) \
@@ -32,11 +33,30 @@
#define TOROUT_STRING(s) \
pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s)
#define VERBOSE_TOROUT_STRING(s) \
- do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
-#define VERBOSE_TOROUT_ERRSTRING(s) \
- do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
+do { \
+ if (verbose) { \
+ verbose_torout_sleep(); \
+ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); \
+ } \
+} while (0)
+#define TOROUT_ERRSTRING(s) \
+ pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s)
+void verbose_torout_sleep(void);
+
+#define torture_init_error(firsterr) \
+({ \
+ int ___firsterr = (firsterr); \
+ \
+ WARN_ONCE(!IS_MODULE(CONFIG_RCU_TORTURE_TEST) && ___firsterr < 0, "Torture-test initialization failed with error code %d\n", ___firsterr); \
+ ___firsterr < 0; \
+})
/* Definitions for online/offline exerciser. */
+#ifdef CONFIG_HOTPLUG_CPU
+int torture_num_online_cpus(void);
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+static inline int torture_num_online_cpus(void) { return 1; }
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
typedef void torture_ofl_func(void);
bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
unsigned long *sum_offl, int *min_onl, int *max_onl);
@@ -61,6 +81,14 @@ static inline void torture_random_init(struct torture_random_state *trsp)
trsp->trs_count = 0;
}
+/* Definitions for high-resolution-timer sleeps. */
+int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
+ struct torture_random_state *trsp);
+int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
+int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
+int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
+int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp);
+
/* Task shuffler, which causes CPUs to occasionally go idle. */
void torture_shuffle_task_register(struct task_struct *tp);
int torture_shuffle_init(long shuffint);
@@ -82,19 +110,27 @@ bool torture_must_stop(void);
bool torture_must_stop_irq(void);
void torture_kthread_stopping(char *title);
int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
- char *f, struct task_struct **tp);
+ char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp));
void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_create_kthread(n, arg, tp) \
_torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
- "Failed to create " #n, &(tp))
+ "Failed to create " #n, &(tp), NULL)
+#define torture_create_kthread_cb(n, arg, tp, cbf) \
+ _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
+ "Failed to create " #n, &(tp), cbf)
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
+/* Scheduler-related definitions. */
#ifdef CONFIG_PREEMPTION
-#define torture_preempt_schedule() preempt_schedule()
+#define torture_preempt_schedule() __preempt_schedule()
#else
#define torture_preempt_schedule() do { } while (0)
#endif
+#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
+long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
+#endif
+
#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 8f4ff39f51e7..4ee9d13749ad 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -31,6 +31,7 @@ struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
+/* if you add a new hash to this, increment TPM_MAX_HASHES below */
enum tpm_algorithms {
TPM_ALG_ERROR = 0x0000,
TPM_ALG_SHA1 = 0x0004,
@@ -42,6 +43,12 @@ enum tpm_algorithms {
TPM_ALG_SM3_256 = 0x0012,
};
+/*
+ * maximum number of hashing algorithms a TPM can have. This is
+ * basically a count of every hash in tpm_algorithms above
+ */
+#define TPM_MAX_HASHES 5
+
struct tpm_digest {
u16 alg_id;
u8 digest[TPM_MAX_DIGEST_SIZE];
@@ -146,7 +153,7 @@ struct tpm_chip {
struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
- const struct attribute_group *groups[3];
+ const struct attribute_group *groups[3 + TPM_MAX_HASHES];
unsigned int groups_cnt;
u32 nr_allocated_banks;
@@ -200,6 +207,7 @@ enum tpm2_return_codes {
TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
TPM2_RC_FAILURE = 0x0101,
TPM2_RC_DISABLED = 0x0120,
+ TPM2_RC_UPGRADE = 0x012D,
TPM2_RC_COMMAND_CODE = 0x0143,
TPM2_RC_TESTING = 0x090A, /* RC_WARN */
TPM2_RC_REFERENCE_H0 = 0x0910,
@@ -257,19 +265,25 @@ enum tpm2_startup_types {
enum tpm2_cc_attrs {
TPM2_CC_ATTR_CHANDLES = 25,
TPM2_CC_ATTR_RHANDLE = 28,
+ TPM2_CC_ATTR_VENDOR = 29,
};
#define TPM_VID_INTEL 0x8086
#define TPM_VID_WINBOND 0x1050
#define TPM_VID_STM 0x104A
+#define TPM_VID_ATML 0x1114
enum tpm_chip_flags {
- TPM_CHIP_FLAG_TPM2 = BIT(1),
- TPM_CHIP_FLAG_IRQ = BIT(2),
- TPM_CHIP_FLAG_VIRTUAL = BIT(3),
- TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
- TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
+ TPM_CHIP_FLAG_BOOTSTRAPPED = BIT(0),
+ TPM_CHIP_FLAG_TPM2 = BIT(1),
+ TPM_CHIP_FLAG_IRQ = BIT(2),
+ TPM_CHIP_FLAG_VIRTUAL = BIT(3),
+ TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
+ TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6),
+ TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7),
+ TPM_CHIP_FLAG_SUSPENDED = BIT(8),
+ TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9),
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
@@ -298,6 +312,8 @@ struct tpm_buf {
};
enum tpm2_object_attributes {
+ TPM2_OA_FIXED_TPM = BIT(1),
+ TPM2_OA_FIXED_PARENT = BIT(4),
TPM2_OA_USER_WITH_AUTH = BIT(6),
};
@@ -389,6 +405,14 @@ static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
tpm_buf_append(buf, (u8 *) &value2, 4);
}
+/*
+ * Check if TPM device is in the firmware upgrade mode.
+ */
+static inline bool tpm_is_firmware_upgrade(struct tpm_chip *chip)
+{
+ return chip->flags & TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+}
+
static inline u32 tpm2_rc_value(u32 rc)
{
return (rc & BIT(7)) ? rc & 0xff : rc;
@@ -397,6 +421,10 @@ static inline u32 tpm2_rc_value(u32 rc)
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
+extern __must_check int tpm_try_get_ops(struct tpm_chip *chip);
+extern void tpm_put_ops(struct tpm_chip *chip);
+extern ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
+ size_t min_rsp_body_length, const char *desc);
extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digest);
extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
@@ -410,7 +438,6 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
return -ENODEV;
}
-
static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx,
struct tpm_digest *digest)
{
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 739ba9a03ec1..7d68a5cc5881 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -157,7 +157,7 @@ struct tcg_algorithm_info {
* Return: size of the event on success, 0 on failure
*/
-static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
struct tcg_pcr_event *event_header,
bool do_mapping)
{
@@ -198,8 +198,8 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
* The loop below will unmap these fields if the log is larger than
* one page, so save them here for reference:
*/
- count = READ_ONCE(event->count);
- event_type = READ_ONCE(event->event_type);
+ count = event->count;
+ event_type = event->event_type;
/* Verify that it's the log header */
if (event_header->pcr_idx != 0 ||
diff --git a/include/linux/trace.h b/include/linux/trace.h
index 36d255d66f88..fdcd76b7be83 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -2,7 +2,10 @@
#ifndef _LINUX_TRACE_H
#define _LINUX_TRACE_H
-#ifdef CONFIG_TRACING
+#define TRACE_EXPORT_FUNCTION BIT(0)
+#define TRACE_EXPORT_EVENT BIT(1)
+#define TRACE_EXPORT_MARKER BIT(2)
+
/*
* The trace export - an export of Ftrace output. The trace_export
* can process traces and export them to a registered destination as
@@ -15,24 +18,80 @@
* next - pointer to the next trace_export
* write - copy traces which have been delt with ->commit() to
* the destination
+ * flags - which ftrace to be exported
*/
struct trace_export {
struct trace_export __rcu *next;
void (*write)(struct trace_export *, const void *, unsigned int);
+ int flags;
};
+struct trace_array;
+
+#ifdef CONFIG_TRACING
+
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
-struct trace_array;
+/**
+ * trace_array_puts - write a constant string into the trace buffer.
+ * @tr: The trace array to write to
+ * @str: The constant string to write
+ */
+#define trace_array_puts(tr, str) \
+ ({ \
+ str ? __trace_array_puts(tr, _THIS_IP_, str, strlen(str)) : -1; \
+ })
+int __trace_array_puts(struct trace_array *tr, unsigned long ip,
+ const char *str, int size);
void trace_printk_init_buffers(void);
+__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip,
- const char *fmt, ...);
+ const char *fmt, ...);
int trace_array_init_printk(struct trace_array *tr);
void trace_array_put(struct trace_array *tr);
-struct trace_array *trace_array_get_by_name(const char *name);
+struct trace_array *trace_array_get_by_name(const char *name, const char *systems);
int trace_array_destroy(struct trace_array *tr);
+
+/* For osnoise tracer */
+int osnoise_arch_register(void);
+void osnoise_arch_unregister(void);
+void osnoise_trace_irq_entry(int id);
+void osnoise_trace_irq_exit(int id, const char *desc);
+
+#else /* CONFIG_TRACING */
+static inline int register_ftrace_export(struct trace_export *export)
+{
+ return -EINVAL;
+}
+static inline int unregister_ftrace_export(struct trace_export *export)
+{
+ return 0;
+}
+static inline void trace_printk_init_buffers(void)
+{
+}
+static inline int trace_array_printk(struct trace_array *tr, unsigned long ip,
+ const char *fmt, ...)
+{
+ return 0;
+}
+static inline int trace_array_init_printk(struct trace_array *tr)
+{
+ return -EINVAL;
+}
+static inline void trace_array_put(struct trace_array *tr)
+{
+}
+static inline struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
+{
+ return NULL;
+}
+static inline int trace_array_destroy(struct trace_array *tr)
+{
+ return 0;
+}
#endif /* CONFIG_TRACING */
#endif /* _LINUX_TRACE_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5c6943354049..6f9bdfb09d1d 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -15,6 +15,10 @@ struct array_buffer;
struct tracer;
struct dentry;
struct bpf_prog;
+union bpf_attr;
+
+/* Used for event string fields when they are NULL */
+#define EVENT_NULL_STR "(null)"
const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
unsigned long flags,
@@ -55,6 +59,19 @@ struct trace_event;
int trace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *event);
+extern __printf(2, 3)
+void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
+
+/* Used to find the offset and length of dynamic fields in trace events */
+struct trace_dynamic_info {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ u16 len;
+ u16 offset;
+#else
+ u16 offset;
+ u16 len;
+#endif
+} __packed;
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -87,12 +104,18 @@ struct trace_iterator {
unsigned long iter_flags;
void *temp; /* temp holder */
unsigned int temp_size;
+ char *fmt; /* modified format holder */
+ unsigned int fmt_size;
+ atomic_t wait_index;
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
cpumask_var_t started;
+ /* Set when the file is closed to prevent new waiters */
+ bool closed;
+
/* it's true when current open file is snapshot */
bool snapshot;
@@ -130,7 +153,6 @@ struct trace_event_functions {
struct trace_event {
struct hlist_node node;
- struct list_head list;
int type;
struct trace_event_functions *funcs;
};
@@ -148,17 +170,76 @@ enum print_line_t {
enum print_line_t trace_handle_return(struct trace_seq *s);
-void tracing_generic_entry_update(struct trace_entry *entry,
- unsigned short type,
- unsigned long flags,
- int pc);
+static inline void tracing_generic_entry_update(struct trace_entry *entry,
+ unsigned short type,
+ unsigned int trace_ctx)
+{
+ entry->preempt_count = trace_ctx & 0xff;
+ entry->pid = current->pid;
+ entry->type = type;
+ entry->flags = trace_ctx >> 16;
+}
+
+unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
+
+enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+ TRACE_FLAG_NMI = 0x40,
+ TRACE_FLAG_BH_OFF = 0x80,
+};
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
+{
+ unsigned int irq_status = irqs_disabled_flags(irqflags) ?
+ TRACE_FLAG_IRQS_OFF : 0;
+ return tracing_gen_ctx_irq_test(irq_status);
+}
+static inline unsigned int tracing_gen_ctx(void)
+{
+ unsigned long irqflags;
+
+ local_save_flags(irqflags);
+ return tracing_gen_ctx_flags(irqflags);
+}
+#else
+
+static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
+{
+ return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
+}
+static inline unsigned int tracing_gen_ctx(void)
+{
+ return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
+}
+#endif
+
+static inline unsigned int tracing_gen_ctx_dec(void)
+{
+ unsigned int trace_ctx;
+
+ trace_ctx = tracing_gen_ctx();
+ /*
+ * Subtract one from the preemption counter if preemption is enabled,
+ * see trace_event_buffer_reserve()for details.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ trace_ctx--;
+ return trace_ctx;
+}
+
struct trace_event_file;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
struct trace_event_file *trace_file,
int type, unsigned long len,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
#define TRACE_RECORD_CMDLINE BIT(0)
#define TRACE_RECORD_TGID BIT(1)
@@ -170,7 +251,8 @@ void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
void tracing_record_cmdline(struct task_struct *task);
void tracing_record_tgid(struct task_struct *task);
-int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
+int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
+ __printf(3, 4);
struct event_filter;
@@ -205,6 +287,7 @@ struct trace_event_fields {
const int align;
const int is_signed;
const int filter_type;
+ const int len;
};
int (*define_fields)(struct trace_event_call *);
};
@@ -232,8 +315,7 @@ struct trace_event_buffer {
struct ring_buffer_event *event;
struct trace_event_file *trace_file;
void *entry;
- unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
struct pt_regs *regs;
};
@@ -249,8 +331,12 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
+ TRACE_EVENT_FL_DYNAMIC_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
TRACE_EVENT_FL_UPROBE_BIT,
+ TRACE_EVENT_FL_EPROBE_BIT,
+ TRACE_EVENT_FL_FPROBE_BIT,
+ TRACE_EVENT_FL_CUSTOM_BIT,
};
/*
@@ -260,8 +346,14 @@ enum {
* NO_SET_FILTER - Set when filter has error and is to be ignored
* IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
* TRACEPOINT - Event is a tracepoint
+ * DYNAMIC - Event is a dynamic event (created at run time)
* KPROBE - Event is a kprobe
* UPROBE - Event is a uprobe
+ * EPROBE - Event is an event probe
+ * FPROBE - Event is an function probe
+ * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
+ * This is set when the custom event has not been attached
+ * to a tracepoint yet, then it is cleared when it is.
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -269,8 +361,12 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
+ TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
+ TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
+ TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
+ TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
};
#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
@@ -286,17 +382,17 @@ struct trace_event_call {
struct trace_event event;
char *print_fmt;
struct event_filter *filter;
- void *mod;
- void *data;
/*
- * bit 0: filter_active
- * bit 1: allow trace by non root (cap any)
- * bit 2: failed to apply filter
- * bit 3: trace internal event (do not enable)
- * bit 4: Event was enabled by module
- * bit 5: use call filter rather than file filter
- * bit 6: Event is a tracepoint
+ * Static events can disappear with modules,
+ * where as dynamic ones need their own ref count.
*/
+ union {
+ void *module;
+ atomic_t refcnt;
+ };
+ void *data;
+
+ /* See the TRACE_EVENT_FL_* flags above */
int flags; /* static flags of different events */
#ifdef CONFIG_PERF_EVENTS
@@ -309,6 +405,42 @@ struct trace_event_call {
#endif
};
+#ifdef CONFIG_DYNAMIC_EVENTS
+bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
+void trace_event_dyn_put_ref(struct trace_event_call *call);
+bool trace_event_dyn_busy(struct trace_event_call *call);
+#else
+static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
+{
+ /* Without DYNAMIC_EVENTS configured, nothing should be calling this */
+ return false;
+}
+static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
+{
+}
+static inline bool trace_event_dyn_busy(struct trace_event_call *call)
+{
+ /* Nothing should call this without DYNAIMIC_EVENTS configured. */
+ return true;
+}
+#endif
+
+static inline bool trace_event_try_get_ref(struct trace_event_call *call)
+{
+ if (call->flags & TRACE_EVENT_FL_DYNAMIC)
+ return trace_event_dyn_try_get_ref(call);
+ else
+ return try_module_get(call->module);
+}
+
+static inline void trace_event_put_ref(struct trace_event_call *call)
+{
+ if (call->flags & TRACE_EVENT_FL_DYNAMIC)
+ trace_event_dyn_put_ref(call);
+ else
+ module_put(call->module);
+}
+
#ifdef CONFIG_PERF_EVENTS
static inline bool bpf_prog_array_valid(struct trace_event_call *call)
{
@@ -336,7 +468,9 @@ static inline bool bpf_prog_array_valid(struct trace_event_call *call)
static inline const char *
trace_event_name(struct trace_event_call *call)
{
- if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
+ if (call->flags & TRACE_EVENT_FL_CUSTOM)
+ return call->name;
+ else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
return call->tp ? call->tp->name : NULL;
else
return call->name;
@@ -350,7 +484,6 @@ trace_get_fields(struct trace_event_call *event_call)
return event_call->class->get_fields(event_call);
}
-struct trace_array;
struct trace_subsystem_dir;
enum {
@@ -365,6 +498,7 @@ enum {
EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT,
EVENT_FILE_FL_WAS_ENABLED_BIT,
+ EVENT_FILE_FL_FREED_BIT,
};
extern struct trace_event_file *trace_get_event_file(const char *instance,
@@ -503,6 +637,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
* TRIGGER_COND - When set, one or more triggers has an associated filter
* PID_FILTER - When set, the event is filtered based on pid
* WAS_ENABLED - Set when enabled to know to clear trace on module removal
+ * FREED - File descriptor is freed, all fields should be considered invalid
*/
enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
@@ -516,13 +651,14 @@ enum {
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
+ EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
};
struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
- struct dentry *dir;
+ struct eventfs_inode *ei;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
@@ -544,6 +680,7 @@ struct trace_event_file {
* caching and such. Which is mostly OK ;-)
*/
unsigned long flags;
+ atomic_t ref; /* ref count for opened files */
atomic_t sm_ref; /* soft-mode reference counter */
atomic_t tm_ref; /* trigger-mode reference counter */
};
@@ -569,9 +706,9 @@ struct trace_event_file {
} \
early_initcall(trace_init_perf_perm_##name);
-#define PERF_MAX_TRACE_SIZE 2048
+#define PERF_MAX_TRACE_SIZE 8192
-#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
+#define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
enum event_trigger_type {
ETT_NONE = (0),
@@ -581,12 +718,14 @@ enum event_trigger_type {
ETT_EVENT_ENABLE = (1 << 3),
ETT_EVENT_HIST = (1 << 4),
ETT_HIST_ENABLE = (1 << 5),
+ ETT_EVENT_EPROBE = (1 << 6),
};
extern int filter_match_preds(struct event_filter *filter, void *rec);
extern enum event_trigger_type
-event_triggers_call(struct trace_event_file *file, void *rec,
+event_triggers_call(struct trace_event_file *file,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event);
extern void
event_triggers_post_call(struct trace_event_file *file,
@@ -594,6 +733,8 @@ event_triggers_post_call(struct trace_event_file *file,
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
+bool __trace_trigger_soft_disabled(struct trace_event_file *file);
+
/**
* trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test
@@ -603,25 +744,25 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
* triggers that require testing the fields, it will return true,
* otherwise false.
*/
-static inline bool
+static __always_inline bool
trace_trigger_soft_disabled(struct trace_event_file *file)
{
unsigned long eflags = file->flags;
- if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
- if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
- event_triggers_call(file, NULL, NULL);
- if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
- return true;
- if (eflags & EVENT_FILE_FL_PID_FILTER)
- return trace_event_ignore_this_pid(file);
- }
- return false;
+ if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
+ EVENT_FILE_FL_SOFT_DISABLED |
+ EVENT_FILE_FL_PID_FILTER))))
+ return false;
+
+ if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
+ return false;
+
+ return __trace_trigger_soft_disabled(file);
}
#ifdef CONFIG_BPF_EVENTS
unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
-int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
+int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
@@ -630,7 +771,10 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
- u64 *probe_offset, u64 *probe_addr);
+ u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed);
+int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
#else
static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
@@ -638,7 +782,7 @@ static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *c
}
static inline int
-perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
+perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
{
return -EOPNOTSUPP;
}
@@ -668,7 +812,17 @@ static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
static inline int bpf_get_perf_event_info(const struct perf_event *event,
u32 *prog_id, u32 *fd_type,
const char **buf, u64 *probe_offset,
- u64 *probe_addr)
+ u64 *probe_addr, unsigned long *missed)
+{
+ return -EOPNOTSUPP;
+}
+static inline int
+bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+static inline int
+bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
return -EOPNOTSUPP;
}
@@ -678,10 +832,13 @@ enum {
FILTER_OTHER = 0,
FILTER_STATIC_STRING,
FILTER_DYN_STRING,
+ FILTER_RDYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
+ FILTER_CPUMASK,
FILTER_COMM,
FILTER_CPU,
+ FILTER_STACKTRACE,
};
extern int trace_event_raw_init(struct trace_event_call *call);
@@ -692,8 +849,6 @@ extern int trace_add_event_call(struct trace_event_call *call);
extern int trace_remove_event_call(struct trace_event_call *call);
extern int trace_event_get_offsets(struct trace_event_call *call);
-#define is_signed_type(type) (((type)(-1)) < (type)1)
-
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
int trace_array_set_clr_event(struct trace_array *tr, const char *system,
@@ -709,7 +864,7 @@ do { \
tracing_record_cmdline(current); \
if (__builtin_constant_p(fmt)) { \
static const char *trace_printk_fmt \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__trace_bprintk(ip, trace_printk_fmt, ##args); \
@@ -733,6 +888,7 @@ extern void perf_kprobe_destroy(struct perf_event *event);
extern int bpf_get_kprobe_info(const struct perf_event *event,
u32 *fd_type, const char **symbol,
u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed,
bool perf_type_tracepoint);
#endif
#ifdef CONFIG_UPROBE_EVENTS
@@ -741,7 +897,8 @@ extern int perf_uprobe_init(struct perf_event *event,
extern void perf_uprobe_destroy(struct perf_event *event);
extern int bpf_get_uprobe_info(const struct perf_event *event,
u32 *fd_type, const char **filename,
- u64 *probe_offset, bool perf_type_tracepoint);
+ u64 *probe_offset, u64 *probe_addr,
+ bool perf_type_tracepoint);
#endif
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
@@ -749,6 +906,9 @@ extern void ftrace_profile_free_filter(struct perf_event *event);
void perf_trace_buf_update(void *record, u16 type);
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
+int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
+void perf_event_free_bpf_prog(struct perf_event *event);
+
void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
@@ -791,4 +951,37 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
#endif
+#define TRACE_EVENT_STR_MAX 512
+
+/*
+ * gcc warns that you can not use a va_list in an inlined
+ * function. But lets me make it into a macro :-/
+ */
+#define __trace_event_vstr_len(fmt, va) \
+({ \
+ va_list __ap; \
+ int __ret; \
+ \
+ va_copy(__ap, *(va)); \
+ __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \
+ va_end(__ap); \
+ \
+ min(__ret, TRACE_EVENT_STR_MAX); \
+})
+
#endif /* _LINUX_TRACE_EVENT_H */
+
+/*
+ * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
+ * This is due to the way trace custom events work. If a file includes two
+ * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
+ * will override the TRACE_CUSTOM_EVENT and break the second include.
+ */
+
+#ifndef TRACE_CUSTOM_EVENT
+
+#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args)
+#define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
+
+#endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
new file mode 100644
index 000000000000..d48cd92d2364
--- /dev/null
+++ b/include/linux/trace_recursion.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TRACE_RECURSION_H
+#define _LINUX_TRACE_RECURSION_H
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_TRACING
+
+/* Only current can touch trace_recursion */
+
+/*
+ * For function tracing recursion:
+ * The order of these bits are important.
+ *
+ * When function tracing occurs, the following steps are made:
+ * If arch does not support a ftrace feature:
+ * call internal function (uses INTERNAL bits) which calls...
+ * The function callback, which can use the FTRACE bits to
+ * check for recursion.
+ */
+enum {
+ /* Function recursion bits */
+ TRACE_FTRACE_BIT,
+ TRACE_FTRACE_NMI_BIT,
+ TRACE_FTRACE_IRQ_BIT,
+ TRACE_FTRACE_SIRQ_BIT,
+ TRACE_FTRACE_TRANSITION_BIT,
+
+ /* Internal use recursion bits */
+ TRACE_INTERNAL_BIT,
+ TRACE_INTERNAL_NMI_BIT,
+ TRACE_INTERNAL_IRQ_BIT,
+ TRACE_INTERNAL_SIRQ_BIT,
+ TRACE_INTERNAL_TRANSITION_BIT,
+
+ TRACE_BRANCH_BIT,
+/*
+ * Abuse of the trace_recursion.
+ * As we need a way to maintain state if we are tracing the function
+ * graph in irq because we want to trace a particular function that
+ * was called in irq context but we have irq tracing off. Since this
+ * can only be modified by current, we can reuse trace_recursion.
+ */
+ TRACE_IRQ_BIT,
+
+ /* Set if the function is in the set_graph_function file */
+ TRACE_GRAPH_BIT,
+
+ /*
+ * In the very unlikely case that an interrupt came in
+ * at a start of graph tracing, and we want to trace
+ * the function in that interrupt, the depth can be greater
+ * than zero, because of the preempted start of a previous
+ * trace. In an even more unlikely case, depth could be 2
+ * if a softirq interrupted the start of graph tracing,
+ * followed by an interrupt preempting a start of graph
+ * tracing in the softirq, and depth can even be 3
+ * if an NMI came in at the start of an interrupt function
+ * that preempted a softirq start of a function that
+ * preempted normal context!!!! Luckily, it can't be
+ * greater than 3, so the next two bits are a mask
+ * of what the depth is when we set TRACE_GRAPH_BIT
+ */
+
+ TRACE_GRAPH_DEPTH_START_BIT,
+ TRACE_GRAPH_DEPTH_END_BIT,
+
+ /*
+ * To implement set_graph_notrace, if this bit is set, we ignore
+ * function graph tracing of called functions, until the return
+ * function is called to clear it.
+ */
+ TRACE_GRAPH_NOTRACE_BIT,
+
+ /* Used to prevent recursion recording from recursing. */
+ TRACE_RECORD_RECURSION_BIT,
+};
+
+#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
+#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
+#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
+
+#define trace_recursion_depth() \
+ (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
+#define trace_recursion_set_depth(depth) \
+ do { \
+ current->trace_recursion &= \
+ ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
+ current->trace_recursion |= \
+ ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
+ } while (0)
+
+#define TRACE_CONTEXT_BITS 4
+
+#define TRACE_FTRACE_START TRACE_FTRACE_BIT
+
+#define TRACE_LIST_START TRACE_INTERNAL_BIT
+
+#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
+
+/*
+ * Used for setting context
+ * NMI = 0
+ * IRQ = 1
+ * SOFTIRQ = 2
+ * NORMAL = 3
+ */
+enum {
+ TRACE_CTX_NMI,
+ TRACE_CTX_IRQ,
+ TRACE_CTX_SOFTIRQ,
+ TRACE_CTX_NORMAL,
+ TRACE_CTX_TRANSITION,
+};
+
+static __always_inline int trace_get_context_bit(void)
+{
+ unsigned char bit = interrupt_context_level();
+
+ return TRACE_CTX_NORMAL - bit;
+}
+
+#ifdef CONFIG_FTRACE_RECORD_RECURSION
+extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
+# define do_ftrace_record_recursion(ip, pip) \
+ do { \
+ if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
+ trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
+ ftrace_record_recursion(ip, pip); \
+ trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
+ } \
+ } while (0)
+#else
+# define do_ftrace_record_recursion(ip, pip) do { } while (0)
+#endif
+
+#ifdef CONFIG_ARCH_WANTS_NO_INSTR
+# define trace_warn_on_no_rcu(ip) \
+ ({ \
+ bool __ret = !rcu_is_watching(); \
+ if (__ret && !trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
+ trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
+ WARN_ONCE(true, "RCU not on for: %pS\n", (void *)ip); \
+ trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
+ } \
+ __ret; \
+ })
+#else
+# define trace_warn_on_no_rcu(ip) false
+#endif
+
+/*
+ * Preemption is promised to be disabled when return bit >= 0.
+ */
+static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
+ int start)
+{
+ unsigned int val = READ_ONCE(current->trace_recursion);
+ int bit;
+
+ if (trace_warn_on_no_rcu(ip))
+ return -1;
+
+ bit = trace_get_context_bit() + start;
+ if (unlikely(val & (1 << bit))) {
+ /*
+ * If an interrupt occurs during a trace, and another trace
+ * happens in that interrupt but before the preempt_count is
+ * updated to reflect the new interrupt context, then this
+ * will think a recursion occurred, and the event will be dropped.
+ * Let a single instance happen via the TRANSITION_BIT to
+ * not drop those events.
+ */
+ bit = TRACE_CTX_TRANSITION + start;
+ if (val & (1 << bit)) {
+ do_ftrace_record_recursion(ip, pip);
+ return -1;
+ }
+ }
+
+ val |= 1 << bit;
+ current->trace_recursion = val;
+ barrier();
+
+ preempt_disable_notrace();
+
+ return bit;
+}
+
+/*
+ * Preemption will be enabled (if it was previously enabled).
+ */
+static __always_inline void trace_clear_recursion(int bit)
+{
+ preempt_enable_notrace();
+ barrier();
+ trace_recursion_clear(bit);
+}
+
+/**
+ * ftrace_test_recursion_trylock - tests for recursion in same context
+ *
+ * Use this for ftrace callbacks. This will detect if the function
+ * tracing recursed in the same context (normal vs interrupt),
+ *
+ * Returns: -1 if a recursion happened.
+ * >= 0 if no recursion.
+ */
+static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
+ unsigned long parent_ip)
+{
+ return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
+}
+
+/**
+ * ftrace_test_recursion_unlock - called when function callback is complete
+ * @bit: The return of a successful ftrace_test_recursion_trylock()
+ *
+ * This is used at the end of a ftrace callback.
+ */
+static __always_inline void ftrace_test_recursion_unlock(int bit)
+{
+ trace_clear_recursion(bit);
+}
+
+#endif /* CONFIG_TRACING */
+#endif /* _LINUX_TRACE_RECURSION_H */
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 6c30508fca19..1ef95c0287f0 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -8,20 +8,31 @@
/*
* Trace sequences are used to allow a function to call several other functions
- * to create a string of data to use (up to a max of PAGE_SIZE).
+ * to create a string of data to use.
+ *
+ * Have the trace seq to be 8K which is typically PAGE_SIZE * 2 on
+ * most architectures. The TRACE_SEQ_BUFFER_SIZE (which is
+ * TRACE_SEQ_SIZE minus the other fields of trace_seq), is the
+ * max size the output of a trace event may be.
*/
+#define TRACE_SEQ_SIZE 8192
+#define TRACE_SEQ_BUFFER_SIZE (TRACE_SEQ_SIZE - \
+ (sizeof(struct seq_buf) + sizeof(size_t) + sizeof(int)))
+
struct trace_seq {
- unsigned char buffer[PAGE_SIZE];
+ char buffer[TRACE_SEQ_BUFFER_SIZE];
struct seq_buf seq;
+ size_t readpos;
int full;
};
static inline void
trace_seq_init(struct trace_seq *s)
{
- seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
+ seq_buf_init(&s->seq, s->buffer, TRACE_SEQ_BUFFER_SIZE);
s->full = 0;
+ s->readpos = 0;
}
/**
@@ -51,7 +62,7 @@ static inline int trace_seq_used(struct trace_seq *s)
* that is about to be written to and then return the result
* of that write.
*/
-static inline unsigned char *
+static inline char *
trace_seq_buffer_ptr(struct trace_seq *s)
{
return s->buffer + seq_buf_used(&s->seq);
@@ -95,9 +106,11 @@ extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
+char *trace_seq_acquire(struct trace_seq *s, unsigned int len);
#else /* CONFIG_TRACING */
-static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+static inline __printf(2, 3)
+void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
}
static inline void
@@ -138,6 +151,10 @@ static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
{
return 0;
}
+static inline char *trace_seq_acquire(struct trace_seq *s, unsigned int len)
+{
+ return NULL;
+}
#endif /* CONFIG_TRACING */
#endif /* _LINUX_TRACE_SEQ_H */
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 99912445974c..7a5fe17b6bf9 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -21,6 +21,72 @@ struct file_operations;
#ifdef CONFIG_TRACING
+struct eventfs_file;
+
+/**
+ * eventfs_callback - A callback function to create dynamic files in eventfs
+ * @name: The name of the file that is to be created
+ * @mode: return the file mode for the file (RW access, etc)
+ * @data: data to pass to the created file ops
+ * @fops: the file operations of the created file
+ *
+ * The evetnfs files are dynamically created. The struct eventfs_entry array
+ * is passed to eventfs_create_dir() or eventfs_create_events_dir() that will
+ * be used to create the files within those directories. When a lookup
+ * or access to a file within the directory is made, the struct eventfs_entry
+ * array is used to find a callback() with the matching name that is being
+ * referenced (for lookups, the entire array is iterated and each callback
+ * will be called).
+ *
+ * The callback will be called with @name for the name of the file to create.
+ * The callback can return less than 1 to indicate that no file should be
+ * created.
+ *
+ * If a file is to be created, then @mode should be populated with the file
+ * mode (permissions) for which the file is created for. This would be
+ * used to set the created inode i_mode field.
+ *
+ * The @data should be set to the data passed to the other file operations
+ * (read, write, etc). Note, @data will also point to the data passed in
+ * to eventfs_create_dir() or eventfs_create_events_dir(), but the callback
+ * can replace the data if it chooses to. Otherwise, the original data
+ * will be used for the file operation functions.
+ *
+ * The @fops should be set to the file operations that will be used to create
+ * the inode.
+ *
+ * NB. This callback is called while holding internal locks of the eventfs
+ * system. The callback must not call any code that might also call into
+ * the tracefs or eventfs system or it will risk creating a deadlock.
+ */
+typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops);
+
+/**
+ * struct eventfs_entry - dynamically created eventfs file call back handler
+ * @name: Then name of the dynamic file in an eventfs directory
+ * @callback: The callback to get the fops of the file when it is created
+ *
+ * See evenfs_callback() typedef for how to set up @callback.
+ */
+struct eventfs_entry {
+ const char *name;
+ eventfs_callback callback;
+};
+
+struct eventfs_inode;
+
+struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data);
+
+struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data);
+
+void eventfs_remove_events_dir(struct eventfs_inode *ei);
+void eventfs_remove_dir(struct eventfs_inode *ei);
+
struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
deleted file mode 100644
index 36fb3bbed6b2..000000000000
--- a/include/linux/tracehook.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Tracing hooks
- *
- * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
- *
- * This file defines hook entry points called by core code where
- * user tracing/debugging support might need to do something. These
- * entry points are called tracehook_*(). Each hook declared below
- * has a detailed kerneldoc comment giving the context (locking et
- * al) from which it is called, and the meaning of its return value.
- *
- * Each function here typically has only one call site, so it is ok
- * to have some nontrivial tracehook_*() inlines. In all cases, the
- * fast path when no tracing is enabled should be very short.
- *
- * The purpose of this file and the tracehook_* layer is to consolidate
- * the interface that the kernel core and arch code uses to enable any
- * user debugging or tracing facility (such as ptrace). The interfaces
- * here are carefully documented so that maintainers of core and arch
- * code do not need to think about the implementation details of the
- * tracing facilities. Likewise, maintainers of the tracing code do not
- * need to understand all the calling core or arch code in detail, just
- * documented circumstances of each call, such as locking conditions.
- *
- * If the calling core code changes so that locking is different, then
- * it is ok to change the interface documented here. The maintainer of
- * core code changing should notify the maintainers of the tracing code
- * that they need to work out the change.
- *
- * Some tracehook_*() inlines take arguments that the current tracing
- * implementations might not necessarily use. These function signatures
- * are chosen to pass in all the information that is on hand in the
- * caller and might conceivably be relevant to a tracer, so that the
- * core code won't have to be updated when tracing adds more features.
- * If a call site changes so that some of those parameters are no longer
- * already on hand without extra work, then the tracehook_* interface
- * can change so there is no make-work burden on the core code. The
- * maintainer of core code changing should notify the maintainers of the
- * tracing code that they need to work out the change.
- */
-
-#ifndef _LINUX_TRACEHOOK_H
-#define _LINUX_TRACEHOOK_H 1
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/security.h>
-#include <linux/task_work.h>
-#include <linux/memcontrol.h>
-#include <linux/blk-cgroup.h>
-struct linux_binprm;
-
-/*
- * ptrace report for syscall entry and exit looks identical.
- */
-static inline int ptrace_report_syscall(struct pt_regs *regs,
- unsigned long message)
-{
- int ptrace = current->ptrace;
-
- if (!(ptrace & PT_PTRACED))
- return 0;
-
- current->ptrace_message = message;
- ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
-
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
-
- current->ptrace_message = 0;
- return fatal_signal_pending(current);
-}
-
-/**
- * tracehook_report_syscall_entry - task is about to attempt a system call
- * @regs: user register state of current task
- *
- * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
- * when the current task has just entered the kernel for a system call.
- * Full user register state is available here. Changing the values
- * in @regs can affect the system call number and arguments to be tried.
- * It is safe to block here, preventing the system call from beginning.
- *
- * Returns zero normally, or nonzero if the calling arch code should abort
- * the system call. That must prevent normal entry so no system call is
- * made. If @task ever returns to user mode after this, its register state
- * is unspecified, but should be something harmless like an %ENOSYS error
- * return. It should preserve enough information so that syscall_rollback()
- * can work (see asm-generic/syscall.h).
- *
- * Called without locks, just after entering kernel mode.
- */
-static inline __must_check int tracehook_report_syscall_entry(
- struct pt_regs *regs)
-{
- return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY);
-}
-
-/**
- * tracehook_report_syscall_exit - task has just finished a system call
- * @regs: user register state of current task
- * @step: nonzero if simulating single-step or block-step
- *
- * This will be called if %TIF_SYSCALL_TRACE has been set, when the
- * current task has just finished an attempted system call. Full
- * user register state is available here. It is safe to block here,
- * preventing signals from being processed.
- *
- * If @step is nonzero, this report is also in lieu of the normal
- * trap that would follow the system call instruction because
- * user_enable_block_step() or user_enable_single_step() was used.
- * In this case, %TIF_SYSCALL_TRACE might not be set.
- *
- * Called without locks, just before checking for pending signals.
- */
-static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
-{
- if (step)
- user_single_step_report(regs);
- else
- ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT);
-}
-
-/**
- * tracehook_signal_handler - signal handler setup is complete
- * @stepping: nonzero if debugger single-step or block-step in use
- *
- * Called by the arch code after a signal handler has been set up.
- * Register and stack state reflects the user handler about to run.
- * Signal mask changes have already been made.
- *
- * Called without locks, shortly before returning to user mode
- * (or handling more signals).
- */
-static inline void tracehook_signal_handler(int stepping)
-{
- if (stepping)
- ptrace_notify(SIGTRAP);
-}
-
-/**
- * set_notify_resume - cause tracehook_notify_resume() to be called
- * @task: task that will call tracehook_notify_resume()
- *
- * Calling this arranges that @task will call tracehook_notify_resume()
- * before returning to user mode. If it's already running in user mode,
- * it will enter the kernel and call tracehook_notify_resume() soon.
- * If it's blocked, it will not be woken.
- */
-static inline void set_notify_resume(struct task_struct *task)
-{
-#ifdef TIF_NOTIFY_RESUME
- if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
- kick_process(task);
-#endif
-}
-
-/**
- * tracehook_notify_resume - report when about to return to user mode
- * @regs: user-mode registers of @current task
- *
- * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
- * about to return to user mode, and the user state in @regs can be
- * inspected or adjusted. The caller in arch code has cleared
- * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
- * asynchronously, this will be called again before we return to
- * user mode.
- *
- * Called without locks.
- */
-static inline void tracehook_notify_resume(struct pt_regs *regs)
-{
- /*
- * The caller just cleared TIF_NOTIFY_RESUME. This barrier
- * pairs with task_work_add()->set_notify_resume() after
- * hlist_add_head(task->task_works);
- */
- smp_mb__after_atomic();
- if (unlikely(current->task_works))
- task_work_run();
-
-#ifdef CONFIG_KEYS_REQUEST_CACHE
- if (unlikely(current->cached_requested_key)) {
- key_put(current->cached_requested_key);
- current->cached_requested_key = NULL;
- }
-#endif
-
- mem_cgroup_handle_over_high();
- blkcg_maybe_throttle_current();
-}
-
-#endif /* <linux/tracehook.h> */
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index b29950a19205..4dc4955f0fbf 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -11,6 +11,8 @@
#include <linux/atomic.h>
#include <linux/static_key.h>
+struct static_call_key;
+
struct trace_print_flags {
unsigned long mask;
const char *name;
@@ -30,6 +32,10 @@ struct tracepoint_func {
struct tracepoint {
const char *name; /* Tracepoint name */
struct static_key key;
+ struct static_call_key *static_call_key;
+ void *static_call_tramp;
+ void *iterator;
+ void *probestub;
int (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs;
@@ -48,4 +54,38 @@ struct bpf_raw_event_map {
u32 writable_size;
} __aligned(32);
+/*
+ * If a tracepoint needs to be called from a header file, it is not
+ * recommended to call it directly, as tracepoints in header files
+ * may cause side-effects and bloat the kernel. Instead, use
+ * tracepoint_enabled() to test if the tracepoint is enabled, then if
+ * it is, call a wrapper function defined in a C file that will then
+ * call the tracepoint.
+ *
+ * For "trace_foo_bar()", you would need to create a wrapper function
+ * in a C file to call trace_foo_bar():
+ * void do_trace_foo_bar(args) { trace_foo_bar(args); }
+ * Then in the header file, declare the tracepoint:
+ * DECLARE_TRACEPOINT(foo_bar);
+ * And call your wrapper:
+ * static inline void some_inlined_function() {
+ * [..]
+ * if (tracepoint_enabled(foo_bar))
+ * do_trace_foo_bar(args);
+ * [..]
+ * }
+ *
+ * Note: tracepoint_enabled(foo_bar) is equivalent to trace_foo_bar_enabled()
+ * but is safe to have in headers, where trace_foo_bar_enabled() is not.
+ */
+#define DECLARE_TRACEPOINT(tp) \
+ extern struct tracepoint __tracepoint_##tp
+
+#ifdef CONFIG_TRACEPOINTS
+# define tracepoint_enabled(tp) \
+ static_key_false(&(__tracepoint_##tp).key)
+#else
+# define tracepoint_enabled(tracepoint) false
+#endif
+
#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 598fec9f9dbf..689b6d71590e 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -19,6 +19,7 @@
#include <linux/cpumask.h>
#include <linux/rcupdate.h>
#include <linux/tracepoint-defs.h>
+#include <linux/static_call.h>
struct module;
struct tracepoint;
@@ -40,7 +41,17 @@ extern int
tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
int prio);
extern int
+tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data,
+ int prio);
+extern int
tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
+static inline int
+tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe,
+ void *data)
+{
+ return tracepoint_probe_register_prio_may_exist(tp, probe, data,
+ TRACEPOINT_DEFAULT_PRIO);
+}
extern void
for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
void *priv);
@@ -92,7 +103,9 @@ extern int syscall_regfunc(void);
extern void syscall_unregfunc(void);
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
+#ifndef PARAMS
#define PARAMS(args...) args
+#endif
#define TRACE_DEFINE_ENUM(x)
#define TRACE_DEFINE_SIZEOF(x)
@@ -116,7 +129,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __TRACEPOINT_ENTRY(name) \
static tracepoint_ptr_t __tracepoint_ptr_##name __used \
- __section(__tracepoints_ptrs) = &__tracepoint_##name
+ __section("__tracepoints_ptrs") = &__tracepoint_##name
#endif
#endif /* _LINUX_TRACEPOINT_H */
@@ -138,7 +151,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
/*
* Individual subsystem my have a separate configuration to
* enable their tracepoints. By default, this file will create
- * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem
+ * the tracepoints if CONFIG_TRACEPOINTS is defined. If a subsystem
* wants to be able to disable its tracepoints from being created
* it can define NOTRACE before including the tracepoint headers.
*/
@@ -148,27 +161,47 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#ifdef TRACEPOINTS_ENABLED
+#ifdef CONFIG_HAVE_STATIC_CALL
+#define __DO_TRACE_CALL(name, args) \
+ do { \
+ struct tracepoint_func *it_func_ptr; \
+ void *__data; \
+ it_func_ptr = \
+ rcu_dereference_raw((&__tracepoint_##name)->funcs); \
+ if (it_func_ptr) { \
+ __data = (it_func_ptr)->data; \
+ static_call(tp_func_##name)(__data, args); \
+ } \
+ } while (0)
+#else
+#define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args)
+#endif /* CONFIG_HAVE_STATIC_CALL */
+
+/*
+ * ARCH_WANTS_NO_INSTR archs are expected to have sanitized entry and idle
+ * code that disallow any/all tracing/instrumentation when RCU isn't watching.
+ */
+#ifdef CONFIG_ARCH_WANTS_NO_INSTR
+#define RCUIDLE_COND(rcuidle) (rcuidle)
+#else
+/* srcu can't be used from NMI */
+#define RCUIDLE_COND(rcuidle) (rcuidle && in_nmi())
+#endif
+
/*
* it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL.
- *
- * Note, the proto and args passed in includes "__data" as the first parameter.
- * The reason for this is to handle the "void" prototype. If a tracepoint
- * has a "void" prototype, then it is invalid to declare a function
- * as "(void *, void)".
*/
-#define __DO_TRACE(tp, proto, args, cond, rcuidle) \
+#define __DO_TRACE(name, args, cond, rcuidle) \
do { \
- struct tracepoint_func *it_func_ptr; \
- void *it_func; \
- void *__data; \
int __maybe_unused __idx = 0; \
\
if (!(cond)) \
return; \
\
- /* srcu can't be used from NMI */ \
- WARN_ON_ONCE(rcuidle && in_nmi()); \
+ if (WARN_ONCE(RCUIDLE_COND(rcuidle), \
+ "Bad RCU usage for tracepoint")) \
+ return; \
\
/* keep srcu and sched-rcu usage consistent */ \
preempt_disable_notrace(); \
@@ -179,21 +212,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
*/ \
if (rcuidle) { \
__idx = srcu_read_lock_notrace(&tracepoint_srcu);\
- rcu_irq_enter_irqson(); \
+ ct_irq_enter_irqson(); \
} \
\
- it_func_ptr = rcu_dereference_raw((tp)->funcs); \
- \
- if (it_func_ptr) { \
- do { \
- it_func = (it_func_ptr)->func; \
- __data = (it_func_ptr)->data; \
- ((void(*)(proto))(it_func))(args); \
- } while ((++it_func_ptr)->func); \
- } \
+ __DO_TRACE_CALL(name, TP_ARGS(args)); \
\
if (rcuidle) { \
- rcu_irq_exit_irqson(); \
+ ct_irq_exit_irqson(); \
srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
} \
\
@@ -201,17 +226,16 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} while (0)
#ifndef MODULE
-#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE_RCU(name, proto, args, cond) \
static inline void trace_##name##_rcuidle(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
- __DO_TRACE(&__tracepoint_##name, \
- TP_PROTO(data_proto), \
- TP_ARGS(data_args), \
+ __DO_TRACE(name, \
+ TP_ARGS(args), \
TP_CONDITION(cond), 1); \
}
#else
-#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
+#define __DECLARE_TRACE_RCU(name, proto, args, cond)
#endif
/*
@@ -219,30 +243,29 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*
- * When lockdep is enabled, we make sure to always do the RCU portions of
- * the tracepoint code, regardless of whether tracing is on. However,
- * don't check if the condition is false, due to interaction with idle
- * instrumentation. This lets us find RCU issues triggered with tracepoints
- * even when this tracepoint is off. This code has no purpose other than
- * poking RCU a bit.
+ * When lockdep is enabled, we make sure to always test if RCU is
+ * "watching" regardless if the tracepoint is enabled or not. Tracepoints
+ * require RCU to be active, and it should always warn at the tracepoint
+ * site if it is not watching, as it will need to be active when the
+ * tracepoint is enabled.
*/
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
+ extern int __traceiter_##name(data_proto); \
+ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
- __DO_TRACE(&__tracepoint_##name, \
- TP_PROTO(data_proto), \
- TP_ARGS(data_args), \
+ __DO_TRACE(name, \
+ TP_ARGS(args), \
TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
- rcu_read_lock_sched_notrace(); \
- rcu_dereference_sched(__tracepoint_##name.funcs);\
- rcu_read_unlock_sched_notrace(); \
+ WARN_ONCE(!rcu_is_watching(), \
+ "RCU not watching for tracepoint"); \
} \
} \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
- PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
+ PARAMS(cond)) \
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
{ \
@@ -277,24 +300,60 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* structures, so we create an array of pointers that will be used for iteration
* on the tracepoints.
*/
-#define DEFINE_TRACE_FN(name, reg, unreg) \
- static const char __tpstrtab_##name[] \
- __section(__tracepoints_strings) = #name; \
- struct tracepoint __tracepoint_##name __used \
- __section(__tracepoints) = \
- { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
- __TRACEPOINT_ENTRY(name);
+#define DEFINE_TRACE_FN(_name, _reg, _unreg, proto, args) \
+ static const char __tpstrtab_##_name[] \
+ __section("__tracepoints_strings") = #_name; \
+ extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \
+ int __traceiter_##_name(void *__data, proto); \
+ void __probestub_##_name(void *__data, proto); \
+ struct tracepoint __tracepoint_##_name __used \
+ __section("__tracepoints") = { \
+ .name = __tpstrtab_##_name, \
+ .key = STATIC_KEY_INIT_FALSE, \
+ .static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \
+ .static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
+ .iterator = &__traceiter_##_name, \
+ .probestub = &__probestub_##_name, \
+ .regfunc = _reg, \
+ .unregfunc = _unreg, \
+ .funcs = NULL }; \
+ __TRACEPOINT_ENTRY(_name); \
+ int __traceiter_##_name(void *__data, proto) \
+ { \
+ struct tracepoint_func *it_func_ptr; \
+ void *it_func; \
+ \
+ it_func_ptr = \
+ rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
+ if (it_func_ptr) { \
+ do { \
+ it_func = READ_ONCE((it_func_ptr)->func); \
+ __data = (it_func_ptr)->data; \
+ ((void(*)(void *, proto))(it_func))(__data, args); \
+ } while ((++it_func_ptr)->func); \
+ } \
+ return 0; \
+ } \
+ void __probestub_##_name(void *__data, proto) \
+ { \
+ } \
+ DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
-#define DEFINE_TRACE(name) \
- DEFINE_TRACE_FN(name, NULL, NULL);
+#define DEFINE_TRACE(name, proto, args) \
+ DEFINE_TRACE_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args));
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
- EXPORT_SYMBOL_GPL(__tracepoint_##name)
+ EXPORT_SYMBOL_GPL(__tracepoint_##name); \
+ EXPORT_SYMBOL_GPL(__traceiter_##name); \
+ EXPORT_STATIC_CALL_GPL(tp_func_##name)
#define EXPORT_TRACEPOINT_SYMBOL(name) \
- EXPORT_SYMBOL(__tracepoint_##name)
+ EXPORT_SYMBOL(__tracepoint_##name); \
+ EXPORT_SYMBOL(__traceiter_##name); \
+ EXPORT_STATIC_CALL(tp_func_##name)
+
#else /* !TRACEPOINTS_ENABLED */
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
static inline void trace_##name(proto) \
{ } \
static inline void trace_##name##_rcuidle(proto) \
@@ -320,8 +379,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
return false; \
}
-#define DEFINE_TRACE_FN(name, reg, unreg)
-#define DEFINE_TRACE(name)
+#define DEFINE_TRACE_FN(name, reg, unreg, proto, args)
+#define DEFINE_TRACE(name, proto, args)
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
@@ -360,7 +419,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static const char *___tp_str __tracepoint_string = str; \
___tp_str; \
})
-#define __tracepoint_string __used __section(__tracepoint_str)
+#define __tracepoint_string __used __section("__tracepoint_str")
#else
/*
* tracepoint_string() is used to save the string address for userspace
@@ -374,14 +433,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define DECLARE_TRACE(name, proto, args) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()), \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ PARAMS(void *__data, proto))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ PARAMS(void *__data, proto))
#define TRACE_EVENT_FLAGS(event, flag)
@@ -429,11 +486,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* * This is how the trace record is structured and will
* * be saved into the ring buffer. These are the fields
* * that will be exposed to user-space in
- * * /sys/kernel/debug/tracing/events/<*>/format.
+ * * /sys/kernel/tracing/events/<*>/format.
* *
* * The declared 'local variable' is called '__entry'
* *
- * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
+ * * __field(pid_t, prev_pid) is equivalent to a standard declaration:
* *
* * pid_t prev_pid;
* *
@@ -489,7 +546,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* tracepoint callback (this is used by programmatic plugins and
* can also by used by generic instrumentation like SystemTap), and
* it is also used to expose a structured trace record in
- * /sys/kernel/debug/tracing/events/.
+ * /sys/kernel/tracing/events/.
*
* A set of (un)registration functions can be passed to the variant
* TRACE_EVENT_FN to perform any (un)registration work.
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
index 63076fb835e3..2efc271a96fa 100644
--- a/include/linux/transport_class.h
+++ b/include/linux/transport_class.h
@@ -70,8 +70,14 @@ void transport_destroy_device(struct device *);
static inline int
transport_register_device(struct device *dev)
{
+ int ret;
+
transport_setup_device(dev);
- return transport_add_device(dev);
+ ret = transport_add_device(dev);
+ if (ret)
+ transport_destroy_device(dev);
+
+ return ret;
}
static inline void
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
new file mode 100644
index 000000000000..de8324a2223c
--- /dev/null
+++ b/include/linux/tsm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TSM_H
+#define __TSM_H
+
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define TSM_INBLOB_MAX 64
+#define TSM_OUTBLOB_MAX SZ_32K
+
+/*
+ * Privilege level is a nested permission concept to allow confidential
+ * guests to partition address space, 4-levels are supported.
+ */
+#define TSM_PRIVLEVEL_MAX 3
+
+/**
+ * struct tsm_desc - option descriptor for generating tsm report blobs
+ * @privlevel: optional privilege level to associate with @outblob
+ * @inblob_len: sizeof @inblob
+ * @inblob: arbitrary input data
+ */
+struct tsm_desc {
+ unsigned int privlevel;
+ size_t inblob_len;
+ u8 inblob[TSM_INBLOB_MAX];
+};
+
+/**
+ * struct tsm_report - track state of report generation relative to options
+ * @desc: input parameters to @report_new()
+ * @outblob_len: sizeof(@outblob)
+ * @outblob: generated evidence to provider to the attestation agent
+ * @auxblob_len: sizeof(@auxblob)
+ * @auxblob: (optional) auxiliary data to the report (e.g. certificate data)
+ */
+struct tsm_report {
+ struct tsm_desc desc;
+ size_t outblob_len;
+ u8 *outblob;
+ size_t auxblob_len;
+ u8 *auxblob;
+};
+
+/**
+ * struct tsm_ops - attributes and operations for tsm instances
+ * @name: tsm id reflected in /sys/kernel/config/tsm/report/$report/provider
+ * @privlevel_floor: convey base privlevel for nested scenarios
+ * @report_new: Populate @report with the report blob and auxblob
+ * (optional), return 0 on successful population, or -errno otherwise
+ *
+ * Implementation specific ops, only one is expected to be registered at
+ * a time i.e. only one of "sev-guest", "tdx-guest", etc.
+ */
+struct tsm_ops {
+ const char *name;
+ const unsigned int privlevel_floor;
+ int (*report_new)(struct tsm_report *report, void *data);
+};
+
+extern const struct config_item_type tsm_report_default_type;
+
+/* publish @privlevel, @privlevel_floor, and @auxblob attributes */
+extern const struct config_item_type tsm_report_extra_type;
+
+int tsm_register(const struct tsm_ops *ops, void *priv,
+ const struct config_item_type *type);
+int tsm_unregister(const struct tsm_ops *ops);
+#endif /* __TSM_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index a99e9b8e4e31..2b2e6f0a54d6 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -8,39 +8,15 @@
#include <linux/workqueue.h>
#include <linux/tty_driver.h>
#include <linux/tty_ldisc.h>
+#include <linux/tty_port.h>
#include <linux/mutex.h>
#include <linux/tty_flags.h>
-#include <linux/seq_file.h>
#include <uapi/linux/tty.h>
#include <linux/rwsem.h>
#include <linux/llist.h>
/*
- * Lock subclasses for tty locks
- *
- * TTY_LOCK_NORMAL is for normal ttys and master ptys.
- * TTY_LOCK_SLAVE is for slave ptys only.
- *
- * Lock subclasses are necessary for handling nested locking with pty pairs.
- * tty locks which use nested locking:
- *
- * legacy_mutex - Nested tty locks are necessary for releasing pty pairs.
- * The stable lock order is master pty first, then slave pty.
- * termios_rwsem - The stable lock order is tty_buffer lock->termios_rwsem.
- * Subclassing this lock enables the slave pty to hold its
- * termios_rwsem when claiming the master tty_buffer lock.
- * tty_buffer lock - slave ptys can claim nested buffer lock when handling
- * signal chars. The stable lock order is slave pty, then
- * master.
- */
-
-enum {
- TTY_LOCK_NORMAL = 0,
- TTY_LOCK_SLAVE,
-};
-
-/*
* (Note: the *_driver.minor_start values 1, 64, 128, 192 are
* hardcoded at present.)
*/
@@ -55,54 +31,6 @@ enum {
*/
#define __DISABLED_CHAR '\0'
-struct tty_buffer {
- union {
- struct tty_buffer *next;
- struct llist_node free;
- };
- int used;
- int size;
- int commit;
- int read;
- int flags;
- /* Data points here */
- unsigned long data[];
-};
-
-/* Values for .flags field of tty_buffer */
-#define TTYB_NORMAL 1 /* buffer has no flags buffer */
-
-static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
-{
- return ((unsigned char *)b->data) + ofs;
-}
-
-static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs)
-{
- return (char *)char_buf_ptr(b, ofs) + b->size;
-}
-
-struct tty_bufhead {
- struct tty_buffer *head; /* Queue head */
- struct work_struct work;
- struct mutex lock;
- atomic_t priority;
- struct tty_buffer sentinel;
- struct llist_head free; /* Free queue head */
- atomic_t mem_used; /* In-use buffers excluding free list */
- int mem_limit;
- struct tty_buffer *tail; /* Active buffer */
-};
-/*
- * When a break, frame error, or parity error happens, these codes are
- * stuffed into the flags buffer.
- */
-#define TTY_NORMAL 0
-#define TTY_BREAK 1
-#define TTY_FRAME 2
-#define TTY_PARITY 3
-#define TTY_OVERRUN 4
-
#define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR])
#define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT])
#define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE])
@@ -188,136 +116,120 @@ struct tty_bufhead {
struct device;
struct signal_struct;
+struct tty_operations;
-/*
- * Port level information. Each device keeps its own port level information
- * so provide a common structure for those ports wanting to use common support
- * routines.
+/**
+ * struct tty_struct - state associated with a tty while open
*
- * The tty port has a different lifetime to the tty so must be kept apart.
- * In addition be careful as tty -> port mappings are valid for the life
- * of the tty object but in many cases port -> tty mappings are valid only
- * until a hangup so don't use the wrong path.
- */
-
-struct tty_port;
-
-struct tty_port_operations {
- /* Return 1 if the carrier is raised */
- int (*carrier_raised)(struct tty_port *port);
- /* Control the DTR line */
- void (*dtr_rts)(struct tty_port *port, int raise);
- /* Called when the last close completes or a hangup finishes
- IFF the port was initialized. Do not use to free resources. Called
- under the port mutex to serialize against activate/shutdowns */
- void (*shutdown)(struct tty_port *port);
- /* Called under the port mutex from tty_port_open, serialized using
- the port mutex */
- /* FIXME: long term getting the tty argument *out* of this would be
- good for consoles */
- int (*activate)(struct tty_port *port, struct tty_struct *tty);
- /* Called on the final put of a port */
- void (*destruct)(struct tty_port *port);
-};
-
-struct tty_port_client_operations {
- int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t);
- void (*write_wakeup)(struct tty_port *port);
-};
-
-extern const struct tty_port_client_operations tty_port_default_client_ops;
-
-struct tty_port {
- struct tty_bufhead buf; /* Locked internally */
- struct tty_struct *tty; /* Back pointer */
- struct tty_struct *itty; /* internal back ptr */
- const struct tty_port_operations *ops; /* Port operations */
- const struct tty_port_client_operations *client_ops; /* Port client operations */
- spinlock_t lock; /* Lock protecting tty field */
- int blocked_open; /* Waiting to open */
- int count; /* Usage count */
- wait_queue_head_t open_wait; /* Open waiters */
- wait_queue_head_t delta_msr_wait; /* Modem status change */
- unsigned long flags; /* User TTY flags ASYNC_ */
- unsigned long iflags; /* Internal flags TTY_PORT_ */
- unsigned char console:1, /* port is a console */
- low_latency:1; /* optional: tune for latency */
- struct mutex mutex; /* Locking */
- struct mutex buf_mutex; /* Buffer alloc lock */
- unsigned char *xmit_buf; /* Optional buffer */
- unsigned int close_delay; /* Close port delay */
- unsigned int closing_wait; /* Delay for output */
- int drain_delay; /* Set to zero if no pure time
- based drain is needed else
- set to size of fifo */
- struct kref kref; /* Ref counter */
- void *client_data;
-};
-
-/* tty_port::iflags bits -- use atomic bit ops */
-#define TTY_PORT_INITIALIZED 0 /* device is initialized */
-#define TTY_PORT_SUSPENDED 1 /* device is suspended */
-#define TTY_PORT_ACTIVE 2 /* device is open */
-
-/*
- * uart drivers: use the uart_port::status field and the UPSTAT_* defines
- * for s/w-based flow control steering and carrier detection status
- */
-#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */
-#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */
-#define TTY_PORT_KOPENED 5 /* device exclusively opened by
- kernel */
-
-/*
- * Where all of the state associated with a tty is kept while the tty
- * is open. Since the termios state should be kept even if the tty
- * has been closed --- for things like the baud rate, etc --- it is
- * not stored here, but rather a pointer to the real state is stored
- * here. Possible the winsize structure should have the same
- * treatment, but (1) the default 80x24 is usually right and (2) it's
- * most often used by a windowing system, which will set the correct
- * size each time the window is created or resized anyway.
- * - TYT, 9/14/92
+ * @kref: reference counting by tty_kref_get() and tty_kref_put(), reaching zero
+ * frees the structure
+ * @dev: class device or %NULL (e.g. ptys, serdev)
+ * @driver: &struct tty_driver operating this tty
+ * @ops: &struct tty_operations of @driver for this tty (open, close, etc.)
+ * @index: index of this tty (e.g. to construct @name like tty12)
+ * @ldisc_sem: protects line discipline changes (@ldisc) -- lock tty not pty
+ * @ldisc: the current line discipline for this tty (n_tty by default)
+ * @atomic_write_lock: protects against concurrent writers, i.e. locks
+ * @write_cnt, @write_buf and similar
+ * @legacy_mutex: leftover from history (BKL -> BTM -> @legacy_mutex),
+ * protecting several operations on this tty
+ * @throttle_mutex: protects against concurrent tty_throttle_safe() and
+ * tty_unthrottle_safe() (but not tty_unthrottle())
+ * @termios_rwsem: protects @termios and @termios_locked
+ * @winsize_mutex: protects @winsize
+ * @termios: termios for the current tty, copied from/to @driver.termios
+ * @termios_locked: locked termios (by %TIOCGLCKTRMIOS and %TIOCSLCKTRMIOS
+ * ioctls)
+ * @name: name of the tty constructed by tty_line_name() (e.g. ttyS3)
+ * @flags: bitwise OR of %TTY_THROTTLED, %TTY_IO_ERROR, ...
+ * @count: count of open processes, reaching zero cancels all the work for
+ * this tty and drops a @kref too (but does not free this tty)
+ * @winsize: size of the terminal "window" (cf. @winsize_mutex)
+ * @flow: flow settings grouped together, see also @flow.unused
+ * @flow.lock: lock for @flow members
+ * @flow.stopped: tty stopped/started by stop_tty()/start_tty()
+ * @flow.tco_stopped: tty stopped/started by %TCOOFF/%TCOON ioctls (it has
+ * precedence over @flow.stopped)
+ * @flow.unused: alignment for Alpha, so that no members other than @flow.* are
+ * modified by the same 64b word store. The @flow's __aligned is
+ * there for the very same reason.
+ * @ctrl: control settings grouped together, see also @ctrl.unused
+ * @ctrl.lock: lock for @ctrl members
+ * @ctrl.pgrp: process group of this tty (setpgrp(2))
+ * @ctrl.session: session of this tty (setsid(2)). Writes are protected by both
+ * @ctrl.lock and @legacy_mutex, readers must use at least one of
+ * them.
+ * @ctrl.pktstatus: packet mode status (bitwise OR of %TIOCPKT_ constants)
+ * @ctrl.packet: packet mode enabled
+ * @ctrl.unused: alignment for Alpha, see @flow.unused for explanation
+ * @hw_stopped: not controlled by the tty layer, under @driver's control for CTS
+ * handling
+ * @receive_room: bytes permitted to feed to @ldisc without any being lost
+ * @flow_change: controls behavior of throttling, see tty_throttle_safe() and
+ * tty_unthrottle_safe()
+ * @link: link to another pty (master -> slave and vice versa)
+ * @fasync: state for %O_ASYNC (for %SIGIO); managed by fasync_helper()
+ * @write_wait: concurrent writers are waiting in this queue until they are
+ * allowed to write
+ * @read_wait: readers wait for data in this queue
+ * @hangup_work: normally a work to perform a hangup (do_tty_hangup()); while
+ * freeing the tty, (re)used to release_one_tty()
+ * @disc_data: pointer to @ldisc's private data (e.g. to &struct n_tty_data)
+ * @driver_data: pointer to @driver's private data (e.g. &struct uart_state)
+ * @files_lock: protects @tty_files list
+ * @tty_files: list of (re)openers of this tty (i.e. linked &struct
+ * tty_file_private)
+ * @closing: when set during close, n_tty processes only START & STOP chars
+ * @write_buf: temporary buffer used during tty_write() to copy user data to
+ * @write_cnt: count of bytes written in tty_write() to @write_buf
+ * @SAK_work: if the tty has a pending do_SAK, it is queued here
+ * @port: persistent storage for this device (i.e. &struct tty_port)
+ *
+ * All of the state associated with a tty while the tty is open. Persistent
+ * storage for tty devices is referenced here as @port and is documented in
+ * &struct tty_port.
*/
-
-struct tty_operations;
-
struct tty_struct {
- int magic;
struct kref kref;
+ int index;
struct device *dev;
struct tty_driver *driver;
+ struct tty_port *port;
const struct tty_operations *ops;
- int index;
- /* Protects ldisc changes: Lock tty not pty */
- struct ld_semaphore ldisc_sem;
struct tty_ldisc *ldisc;
+ struct ld_semaphore ldisc_sem;
struct mutex atomic_write_lock;
struct mutex legacy_mutex;
struct mutex throttle_mutex;
struct rw_semaphore termios_rwsem;
struct mutex winsize_mutex;
- spinlock_t ctrl_lock;
- spinlock_t flow_lock;
- /* Termios values are protected by the termios rwsem */
struct ktermios termios, termios_locked;
- struct termiox *termiox; /* May be NULL for unsupported */
char name[64];
- struct pid *pgrp; /* Protected by ctrl lock */
- struct pid *session;
unsigned long flags;
int count;
- struct winsize winsize; /* winsize_mutex */
- unsigned long stopped:1, /* flow_lock */
- flow_stopped:1,
- unused:BITS_PER_LONG - 2;
- int hw_stopped;
- unsigned long ctrl_status:8, /* ctrl_lock */
- packet:1,
- unused_ctrl:BITS_PER_LONG - 9;
- unsigned int receive_room; /* Bytes free for queue */
+ unsigned int receive_room;
+ struct winsize winsize;
+
+ struct {
+ spinlock_t lock;
+ bool stopped;
+ bool tco_stopped;
+ unsigned long unused[0];
+ } __aligned(sizeof(unsigned long)) flow;
+
+ struct {
+ struct pid *pgrp;
+ struct pid *session;
+ spinlock_t lock;
+ unsigned char pktstatus;
+ bool packet;
+ unsigned long unused[0];
+ } __aligned(sizeof(unsigned long)) ctrl;
+
+ bool hw_stopped;
+ bool closing;
int flow_change;
struct tty_struct *link;
@@ -327,17 +239,14 @@ struct tty_struct {
struct work_struct hangup_work;
void *disc_data;
void *driver_data;
- spinlock_t files_lock; /* protects tty_files list */
+ spinlock_t files_lock;
+ int write_cnt;
+ u8 *write_buf;
+
struct list_head tty_files;
#define N_TTY_BUF_SIZE 4096
-
- int closing;
- unsigned char *write_buf;
- int write_cnt;
- /* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
- struct tty_port *port;
} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
@@ -347,44 +256,72 @@ struct tty_file_private {
struct list_head list;
};
-/* tty magic number */
-#define TTY_MAGIC 0x5401
-
-/*
- * These bits are used in the flags field of the tty structure.
+/**
+ * DOC: TTY Struct Flags
+ *
+ * These bits are used in the :c:member:`tty_struct.flags` field.
*
* So that interrupts won't be able to mess up the queues,
* copy_to_cooked must be atomic with respect to itself, as must
* tty->write. Thus, you must use the inline functions set_bit() and
* clear_bit() to make things atomic.
+ *
+ * TTY_THROTTLED
+ * Driver input is throttled. The ldisc should call
+ * :c:member:`tty_driver.unthrottle()` in order to resume reception when
+ * it is ready to process more data (at threshold min).
+ *
+ * TTY_IO_ERROR
+ * If set, causes all subsequent userspace read/write calls on the tty to
+ * fail, returning -%EIO. (May be no ldisc too.)
+ *
+ * TTY_OTHER_CLOSED
+ * Device is a pty and the other side has closed.
+ *
+ * TTY_EXCLUSIVE
+ * Exclusive open mode (a single opener).
+ *
+ * TTY_DO_WRITE_WAKEUP
+ * If set, causes the driver to call the
+ * :c:member:`tty_ldisc_ops.write_wakeup()` method in order to resume
+ * transmission when it can accept more data to transmit.
+ *
+ * TTY_LDISC_OPEN
+ * Indicates that a line discipline is open. For debugging purposes only.
+ *
+ * TTY_PTY_LOCK
+ * A flag private to pty code to implement %TIOCSPTLCK/%TIOCGPTLCK logic.
+ *
+ * TTY_NO_WRITE_SPLIT
+ * Prevent driver from splitting up writes into smaller chunks (preserve
+ * write boundaries to driver).
+ *
+ * TTY_HUPPED
+ * The TTY was hung up. This is set post :c:member:`tty_driver.hangup()`.
+ *
+ * TTY_HUPPING
+ * The TTY is in the process of hanging up to abort potential readers.
+ *
+ * TTY_LDISC_CHANGING
+ * Line discipline for this TTY is being changed. I/O should not block
+ * when this is set. Use tty_io_nonblock() to check.
+ *
+ * TTY_LDISC_HALTED
+ * Line discipline for this TTY was stopped. No work should be queued to
+ * this ldisc.
*/
-#define TTY_THROTTLED 0 /* Call unthrottle() at threshold min */
-#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */
-#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */
-#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
-#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
-#define TTY_LDISC_OPEN 11 /* Line discipline is open */
-#define TTY_PTY_LOCK 16 /* pty private */
-#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
-#define TTY_HUPPED 18 /* Post driver->hangup() */
-#define TTY_HUPPING 19 /* Hangup in progress */
-#define TTY_LDISC_CHANGING 20 /* Change pending - non-block IO */
-#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
-
-/* Values for tty->flow_change */
-#define TTY_THROTTLE_SAFE 1
-#define TTY_UNTHROTTLE_SAFE 2
-
-static inline void __tty_set_flow_change(struct tty_struct *tty, int val)
-{
- tty->flow_change = val;
-}
-
-static inline void tty_set_flow_change(struct tty_struct *tty, int val)
-{
- tty->flow_change = val;
- smp_mb();
-}
+#define TTY_THROTTLED 0
+#define TTY_IO_ERROR 1
+#define TTY_OTHER_CLOSED 2
+#define TTY_EXCLUSIVE 3
+#define TTY_DO_WRITE_WAKEUP 5
+#define TTY_LDISC_OPEN 11
+#define TTY_PTY_LOCK 16
+#define TTY_NO_WRITE_SPLIT 17
+#define TTY_HUPPED 18
+#define TTY_HUPPING 19
+#define TTY_LDISC_CHANGING 20
+#define TTY_LDISC_HALTED 22
static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file)
{
@@ -403,21 +340,20 @@ static inline bool tty_throttled(struct tty_struct *tty)
}
#ifdef CONFIG_TTY
-extern void tty_kref_put(struct tty_struct *tty);
-extern struct pid *tty_get_pgrp(struct tty_struct *tty);
-extern void tty_vhangup_self(void);
-extern void disassociate_ctty(int priv);
-extern dev_t tty_devnum(struct tty_struct *tty);
-extern void proc_clear_tty(struct task_struct *p);
-extern struct tty_struct *get_current_tty(void);
+void tty_kref_put(struct tty_struct *tty);
+struct pid *tty_get_pgrp(struct tty_struct *tty);
+void tty_vhangup_self(void);
+void disassociate_ctty(int priv);
+dev_t tty_devnum(struct tty_struct *tty);
+void proc_clear_tty(struct task_struct *p);
+struct tty_struct *get_current_tty(void);
/* tty_io.c */
-extern int __init tty_init(void);
-extern const char *tty_name(const struct tty_struct *tty);
-extern struct tty_struct *tty_kopen(dev_t device);
-extern void tty_kclose(struct tty_struct *tty);
-extern int tty_dev_name_to_number(const char *name, dev_t *number);
-extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
-extern void tty_ldisc_unlock(struct tty_struct *tty);
+int __init tty_init(void);
+const char *tty_name(const struct tty_struct *tty);
+struct tty_struct *tty_kopen_exclusive(dev_t device);
+struct tty_struct *tty_kopen_shared(dev_t device);
+void tty_kclose(struct tty_struct *tty);
+int tty_dev_name_to_number(const char *name, dev_t *number);
#else
static inline void tty_kref_put(struct tty_struct *tty)
{ }
@@ -438,7 +374,7 @@ static inline int __init tty_init(void)
{ return 0; }
static inline const char *tty_name(const struct tty_struct *tty)
{ return "(none)"; }
-static inline struct tty_struct *tty_kopen(dev_t device)
+static inline struct tty_struct *tty_kopen_exclusive(dev_t device)
{ return ERR_PTR(-ENODEV); }
static inline void tty_kclose(struct tty_struct *tty)
{ }
@@ -448,19 +384,19 @@ static inline int tty_dev_name_to_number(const char *name, dev_t *number)
extern struct ktermios tty_std_termios;
-extern int vcs_init(void);
+int vcs_init(void);
-extern struct class *tty_class;
+extern const struct class tty_class;
/**
- * tty_kref_get - get a tty reference
- * @tty: tty device
+ * tty_kref_get - get a tty reference
+ * @tty: tty device
*
- * Return a new reference to a tty object. The caller must hold
- * sufficient locks/counts to ensure that their existing reference cannot
- * go away
+ * Returns: a new reference to a tty object
+ *
+ * Locking: The caller must hold sufficient locks/counts to ensure that their
+ * existing reference cannot go away.
*/
-
static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
{
if (tty)
@@ -468,279 +404,83 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
return tty;
}
-extern const char *tty_driver_name(const struct tty_struct *tty);
-extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
-extern int __tty_check_change(struct tty_struct *tty, int sig);
-extern int tty_check_change(struct tty_struct *tty);
-extern void __stop_tty(struct tty_struct *tty);
-extern void stop_tty(struct tty_struct *tty);
-extern void __start_tty(struct tty_struct *tty);
-extern void start_tty(struct tty_struct *tty);
-extern int tty_register_driver(struct tty_driver *driver);
-extern int tty_unregister_driver(struct tty_driver *driver);
-extern struct device *tty_register_device(struct tty_driver *driver,
- unsigned index, struct device *dev);
-extern struct device *tty_register_device_attr(struct tty_driver *driver,
- unsigned index, struct device *device,
- void *drvdata,
- const struct attribute_group **attr_grp);
-extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
-extern void tty_write_message(struct tty_struct *tty, char *msg);
-extern int tty_send_xchar(struct tty_struct *tty, char ch);
-extern int tty_put_char(struct tty_struct *tty, unsigned char c);
-extern int tty_chars_in_buffer(struct tty_struct *tty);
-extern int tty_write_room(struct tty_struct *tty);
-extern void tty_driver_flush_buffer(struct tty_struct *tty);
-extern void tty_throttle(struct tty_struct *tty);
-extern void tty_unthrottle(struct tty_struct *tty);
-extern int tty_throttle_safe(struct tty_struct *tty);
-extern int tty_unthrottle_safe(struct tty_struct *tty);
-extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
-extern int is_current_pgrp_orphaned(void);
-extern void tty_hangup(struct tty_struct *tty);
-extern void tty_vhangup(struct tty_struct *tty);
-extern void tty_vhangup_session(struct tty_struct *tty);
-extern int tty_hung_up_p(struct file *filp);
-extern void do_SAK(struct tty_struct *tty);
-extern void __do_SAK(struct tty_struct *tty);
-extern void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
-extern int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
-extern void session_clear_tty(struct pid *session);
-extern void no_tty(void);
-extern void tty_buffer_free_all(struct tty_port *port);
-extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
-extern void tty_buffer_init(struct tty_port *port);
-extern void tty_buffer_set_lock_subclass(struct tty_port *port);
-extern bool tty_buffer_restart_work(struct tty_port *port);
-extern bool tty_buffer_cancel_work(struct tty_port *port);
-extern void tty_buffer_flush_work(struct tty_port *port);
-extern speed_t tty_termios_baud_rate(struct ktermios *termios);
-extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
-extern void tty_termios_encode_baud_rate(struct ktermios *termios,
- speed_t ibaud, speed_t obaud);
-extern void tty_encode_baud_rate(struct tty_struct *tty,
- speed_t ibaud, speed_t obaud);
+const char *tty_driver_name(const struct tty_struct *tty);
+void tty_wait_until_sent(struct tty_struct *tty, long timeout);
+void stop_tty(struct tty_struct *tty);
+void start_tty(struct tty_struct *tty);
+void tty_write_message(struct tty_struct *tty, char *msg);
+int tty_send_xchar(struct tty_struct *tty, u8 ch);
+int tty_put_char(struct tty_struct *tty, u8 c);
+unsigned int tty_chars_in_buffer(struct tty_struct *tty);
+unsigned int tty_write_room(struct tty_struct *tty);
+void tty_driver_flush_buffer(struct tty_struct *tty);
+void tty_unthrottle(struct tty_struct *tty);
+bool tty_throttle_safe(struct tty_struct *tty);
+bool tty_unthrottle_safe(struct tty_struct *tty);
+int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
+int tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount);
+int tty_get_tiocm(struct tty_struct *tty);
+int is_current_pgrp_orphaned(void);
+void tty_hangup(struct tty_struct *tty);
+void tty_vhangup(struct tty_struct *tty);
+int tty_hung_up_p(struct file *filp);
+void do_SAK(struct tty_struct *tty);
+void __do_SAK(struct tty_struct *tty);
+void no_tty(void);
+speed_t tty_termios_baud_rate(const struct ktermios *termios);
+void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud,
+ speed_t obaud);
+void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud,
+ speed_t obaud);
/**
- * tty_get_baud_rate - get tty bit rates
- * @tty: tty to query
+ * tty_get_baud_rate - get tty bit rates
+ * @tty: tty to query
*
- * Returns the baud rate as an integer for this terminal. The
- * termios lock must be held by the caller and the terminal bit
- * flags may be updated.
+ * Returns: the baud rate as an integer for this terminal
*
- * Locking: none
+ * Locking: The termios lock must be held by the caller.
*/
-static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
+static inline speed_t tty_get_baud_rate(const struct tty_struct *tty)
{
return tty_termios_baud_rate(&tty->termios);
}
-extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
-extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
-extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
-
-extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
-extern void tty_ldisc_deref(struct tty_ldisc *);
-extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
-extern void tty_ldisc_hangup(struct tty_struct *tty, bool reset);
-extern int tty_ldisc_reinit(struct tty_struct *tty, int disc);
-extern const struct seq_operations tty_ldiscs_seq_ops;
-
-extern void tty_wakeup(struct tty_struct *tty);
-extern void tty_ldisc_flush(struct tty_struct *tty);
-
-extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
-extern long tty_jobctrl_ioctl(struct tty_struct *tty, struct tty_struct *real_tty,
- struct file *file, unsigned int cmd, unsigned long arg);
-extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
-extern void tty_default_fops(struct file_operations *fops);
-extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx);
-extern int tty_alloc_file(struct file *file);
-extern void tty_add_file(struct tty_struct *tty, struct file *file);
-extern void tty_free_file(struct file *file);
-extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
-extern void tty_release_struct(struct tty_struct *tty, int idx);
-extern int tty_release(struct inode *inode, struct file *filp);
-extern void tty_init_termios(struct tty_struct *tty);
-extern void tty_save_termios(struct tty_struct *tty);
-extern int tty_standard_install(struct tty_driver *driver,
- struct tty_struct *tty);
-
-extern struct mutex tty_mutex;
-
-#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
-
-extern void tty_port_init(struct tty_port *port);
-extern void tty_port_link_device(struct tty_port *port,
- struct tty_driver *driver, unsigned index);
-extern struct device *tty_port_register_device(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *device);
-extern struct device *tty_port_register_device_attr(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *device, void *drvdata,
- const struct attribute_group **attr_grp);
-extern struct device *tty_port_register_device_serdev(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *device);
-extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *device, void *drvdata,
- const struct attribute_group **attr_grp);
-extern void tty_port_unregister_device(struct tty_port *port,
- struct tty_driver *driver, unsigned index);
-extern int tty_port_alloc_xmit_buf(struct tty_port *port);
-extern void tty_port_free_xmit_buf(struct tty_port *port);
-extern void tty_port_destroy(struct tty_port *port);
-extern void tty_port_put(struct tty_port *port);
-
-static inline struct tty_port *tty_port_get(struct tty_port *port)
-{
- if (port && kref_get_unless_zero(&port->kref))
- return port;
- return NULL;
-}
-
-/* If the cts flow control is enabled, return true. */
-static inline bool tty_port_cts_enabled(struct tty_port *port)
-{
- return test_bit(TTY_PORT_CTS_FLOW, &port->iflags);
-}
-
-static inline void tty_port_set_cts_flow(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_CTS_FLOW, &port->iflags);
- else
- clear_bit(TTY_PORT_CTS_FLOW, &port->iflags);
-}
-
-static inline bool tty_port_active(struct tty_port *port)
-{
- return test_bit(TTY_PORT_ACTIVE, &port->iflags);
-}
-
-static inline void tty_port_set_active(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_ACTIVE, &port->iflags);
- else
- clear_bit(TTY_PORT_ACTIVE, &port->iflags);
-}
+unsigned char tty_get_char_size(unsigned int cflag);
+unsigned char tty_get_frame_size(unsigned int cflag);
-static inline bool tty_port_check_carrier(struct tty_port *port)
-{
- return test_bit(TTY_PORT_CHECK_CD, &port->iflags);
-}
+void tty_termios_copy_hw(struct ktermios *new, const struct ktermios *old);
+bool tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
+int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
-static inline void tty_port_set_check_carrier(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_CHECK_CD, &port->iflags);
- else
- clear_bit(TTY_PORT_CHECK_CD, &port->iflags);
-}
+void tty_wakeup(struct tty_struct *tty);
-static inline bool tty_port_suspended(struct tty_port *port)
-{
- return test_bit(TTY_PORT_SUSPENDED, &port->iflags);
-}
-
-static inline void tty_port_set_suspended(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_SUSPENDED, &port->iflags);
- else
- clear_bit(TTY_PORT_SUSPENDED, &port->iflags);
-}
-
-static inline bool tty_port_initialized(struct tty_port *port)
-{
- return test_bit(TTY_PORT_INITIALIZED, &port->iflags);
-}
-
-static inline void tty_port_set_initialized(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_INITIALIZED, &port->iflags);
- else
- clear_bit(TTY_PORT_INITIALIZED, &port->iflags);
-}
-
-static inline bool tty_port_kopened(struct tty_port *port)
-{
- return test_bit(TTY_PORT_KOPENED, &port->iflags);
-}
-
-static inline void tty_port_set_kopened(struct tty_port *port, bool val)
-{
- if (val)
- set_bit(TTY_PORT_KOPENED, &port->iflags);
- else
- clear_bit(TTY_PORT_KOPENED, &port->iflags);
-}
-
-extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
-extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
-extern int tty_port_carrier_raised(struct tty_port *port);
-extern void tty_port_raise_dtr_rts(struct tty_port *port);
-extern void tty_port_lower_dtr_rts(struct tty_port *port);
-extern void tty_port_hangup(struct tty_port *port);
-extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal);
-extern void tty_port_tty_wakeup(struct tty_port *port);
-extern int tty_port_block_til_ready(struct tty_port *port,
- struct tty_struct *tty, struct file *filp);
-extern int tty_port_close_start(struct tty_port *port,
- struct tty_struct *tty, struct file *filp);
-extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty);
-extern void tty_port_close(struct tty_port *port,
- struct tty_struct *tty, struct file *filp);
-extern int tty_port_install(struct tty_port *port, struct tty_driver *driver,
- struct tty_struct *tty);
-extern int tty_port_open(struct tty_port *port,
- struct tty_struct *tty, struct file *filp);
-static inline int tty_port_users(struct tty_port *port)
-{
- return port->count + port->blocked_open;
-}
+int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
+struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
+void tty_release_struct(struct tty_struct *tty, int idx);
+void tty_init_termios(struct tty_struct *tty);
+void tty_save_termios(struct tty_struct *tty);
+int tty_standard_install(struct tty_driver *driver,
+ struct tty_struct *tty);
-extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
-extern int tty_unregister_ldisc(int disc);
-extern int tty_set_ldisc(struct tty_struct *tty, int disc);
-extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
-extern void tty_ldisc_release(struct tty_struct *tty);
-extern int __must_check tty_ldisc_init(struct tty_struct *tty);
-extern void tty_ldisc_deinit(struct tty_struct *tty);
-extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
- char *f, int count);
+extern struct mutex tty_mutex;
/* n_tty.c */
-extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
#ifdef CONFIG_TTY
-extern void __init n_tty_init(void);
+void __init n_tty_init(void);
#else
static inline void n_tty_init(void) { }
#endif
/* tty_audit.c */
#ifdef CONFIG_AUDIT
-extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size);
-extern void tty_audit_exit(void);
-extern void tty_audit_fork(struct signal_struct *sig);
-extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
-extern int tty_audit_push(void);
+void tty_audit_exit(void);
+void tty_audit_fork(struct signal_struct *sig);
+int tty_audit_push(void);
#else
-static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size)
-{
-}
-static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
-{
-}
static inline void tty_audit_exit(void)
{
}
@@ -754,44 +494,23 @@ static inline int tty_audit_push(void)
#endif
/* tty_ioctl.c */
-extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
+int n_tty_ioctl_helper(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
/* vt.c */
-extern int vt_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
+int vt_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
-extern long vt_compat_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
+long vt_compat_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
/* tty_mutex.c */
/* functions for preparation of BKL removal */
-extern void tty_lock(struct tty_struct *tty);
-extern int tty_lock_interruptible(struct tty_struct *tty);
-extern void tty_unlock(struct tty_struct *tty);
-extern void tty_lock_slave(struct tty_struct *tty);
-extern void tty_unlock_slave(struct tty_struct *tty);
-extern void tty_set_lock_subclass(struct tty_struct *tty);
-
-#ifdef CONFIG_PROC_FS
-extern void proc_tty_register_driver(struct tty_driver *);
-extern void proc_tty_unregister_driver(struct tty_driver *);
-#else
-static inline void proc_tty_register_driver(struct tty_driver *d) {}
-static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
-#endif
-
-#define tty_msg(fn, tty, f, ...) \
- fn("%s %s: " f, tty_driver_name(tty), tty_name(tty), ##__VA_ARGS__)
-
-#define tty_debug(tty, f, ...) tty_msg(pr_debug, tty, f, ##__VA_ARGS__)
-#define tty_info(tty, f, ...) tty_msg(pr_info, tty, f, ##__VA_ARGS__)
-#define tty_notice(tty, f, ...) tty_msg(pr_notice, tty, f, ##__VA_ARGS__)
-#define tty_warn(tty, f, ...) tty_msg(pr_warn, tty, f, ##__VA_ARGS__)
-#define tty_err(tty, f, ...) tty_msg(pr_err, tty, f, ##__VA_ARGS__)
-
-#define tty_info_ratelimited(tty, f, ...) \
- tty_msg(pr_info_ratelimited, tty, f, ##__VA_ARGS__)
+void tty_lock(struct tty_struct *tty);
+int tty_lock_interruptible(struct tty_struct *tty);
+void tty_unlock(struct tty_struct *tty);
+void tty_lock_slave(struct tty_struct *tty);
+void tty_unlock_slave(struct tty_struct *tty);
+void tty_set_lock_subclass(struct tty_struct *tty);
#endif
diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h
new file mode 100644
index 000000000000..31125e3be3c5
--- /dev/null
+++ b/include/linux/tty_buffer.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TTY_BUFFER_H
+#define _LINUX_TTY_BUFFER_H
+
+#include <linux/atomic.h>
+#include <linux/llist.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct tty_buffer {
+ union {
+ struct tty_buffer *next;
+ struct llist_node free;
+ };
+ unsigned int used;
+ unsigned int size;
+ unsigned int commit;
+ unsigned int lookahead; /* Lazy update on recv, can become less than "read" */
+ unsigned int read;
+ bool flags;
+ /* Data points here */
+ u8 data[] __aligned(sizeof(unsigned long));
+};
+
+static inline u8 *char_buf_ptr(struct tty_buffer *b, unsigned int ofs)
+{
+ return b->data + ofs;
+}
+
+static inline u8 *flag_buf_ptr(struct tty_buffer *b, unsigned int ofs)
+{
+ return char_buf_ptr(b, ofs) + b->size;
+}
+
+struct tty_bufhead {
+ struct tty_buffer *head; /* Queue head */
+ struct work_struct work;
+ struct mutex lock;
+ atomic_t priority;
+ struct tty_buffer sentinel;
+ struct llist_head free; /* Free queue head */
+ atomic_t mem_used; /* In-use buffers excluding free list */
+ int mem_limit;
+ struct tty_buffer *tail; /* Active buffer */
+};
+
+/*
+ * When a break, frame error, or parity error happens, these codes are
+ * stuffed into the flags buffer.
+ */
+#define TTY_NORMAL 0
+#define TTY_BREAK 1
+#define TTY_FRAME 2
+#define TTY_PARITY 3
+#define TTY_OVERRUN 4
+
+#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 358446247ccd..7372124fbf90 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -2,255 +2,350 @@
#ifndef _LINUX_TTY_DRIVER_H
#define _LINUX_TTY_DRIVER_H
-/*
- * This structure defines the interface between the low-level tty
- * driver and the tty routines. The following routines can be
- * defined; unless noted otherwise, they are optional, and can be
- * filled in with a null pointer.
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/termios.h>
+#include <linux/seq_file.h>
+
+struct tty_struct;
+struct tty_driver;
+struct serial_icounter_struct;
+struct serial_struct;
+
+/**
+ * struct tty_operations -- interface between driver and tty
+ *
+ * @lookup: ``struct tty_struct *()(struct tty_driver *self, struct file *,
+ * int idx)``
*
- * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
+ * Return the tty device corresponding to @idx, %NULL if there is not
+ * one currently in use and an %ERR_PTR value on error. Called under
+ * %tty_mutex (for now!)
*
- * Return the tty device corresponding to idx, NULL if there is not
- * one currently in use and an ERR_PTR value on error. Called under
- * tty_mutex (for now!)
+ * Optional method. Default behaviour is to use the @self->ttys array.
*
- * Optional method. Default behaviour is to use the ttys array
+ * @install: ``int ()(struct tty_driver *self, struct tty_struct *tty)``
*
- * int (*install)(struct tty_driver *self, struct tty_struct *tty)
+ * Install a new @tty into the @self's internal tables. Used in
+ * conjunction with @lookup and @remove methods.
*
- * Install a new tty into the tty driver internal tables. Used in
- * conjunction with lookup and remove methods.
+ * Optional method. Default behaviour is to use the @self->ttys array.
*
- * Optional method. Default behaviour is to use the ttys array
+ * @remove: ``void ()(struct tty_driver *self, struct tty_struct *tty)``
*
- * void (*remove)(struct tty_driver *self, struct tty_struct *tty)
+ * Remove a closed @tty from the @self's internal tables. Used in
+ * conjunction with @lookup and @remove methods.
*
- * Remove a closed tty from the tty driver internal tables. Used in
- * conjunction with lookup and remove methods.
+ * Optional method. Default behaviour is to use the @self->ttys array.
*
- * Optional method. Default behaviour is to use the ttys array
+ * @open: ``int ()(struct tty_struct *tty, struct file *)``
*
- * int (*open)(struct tty_struct * tty, struct file * filp);
+ * This routine is called when a particular @tty device is opened. This
+ * routine is mandatory; if this routine is not filled in, the attempted
+ * open will fail with %ENODEV.
*
- * This routine is called when a particular tty device is opened.
- * This routine is mandatory; if this routine is not filled in,
- * the attempted open will fail with ENODEV.
+ * Required method. Called with tty lock held. May sleep.
*
- * Required method. Called with tty lock held.
+ * @close: ``void ()(struct tty_struct *tty, struct file *)``
*
- * void (*close)(struct tty_struct * tty, struct file * filp);
+ * This routine is called when a particular @tty device is closed. At the
+ * point of return from this call the driver must make no further ldisc
+ * calls of any kind.
*
- * This routine is called when a particular tty device is closed.
- * Note: called even if the corresponding open() failed.
+ * Remark: called even if the corresponding @open() failed.
*
- * Required method. Called with tty lock held.
+ * Required method. Called with tty lock held. May sleep.
*
- * void (*shutdown)(struct tty_struct * tty);
+ * @shutdown: ``void ()(struct tty_struct *tty)``
*
- * This routine is called under the tty lock when a particular tty device
- * is closed for the last time. It executes before the tty resources
- * are freed so may execute while another function holds a tty kref.
+ * This routine is called under the tty lock when a particular @tty device
+ * is closed for the last time. It executes before the @tty resources
+ * are freed so may execute while another function holds a @tty kref.
*
- * void (*cleanup)(struct tty_struct * tty);
+ * @cleanup: ``void ()(struct tty_struct *tty)``
*
- * This routine is called asynchronously when a particular tty device
+ * This routine is called asynchronously when a particular @tty device
* is closed for the last time freeing up the resources. This is
* actually the second part of shutdown for routines that might sleep.
*
+ * @write: ``ssize_t ()(struct tty_struct *tty, const u8 *buf, size_t count)``
*
- * int (*write)(struct tty_struct * tty,
- * const unsigned char *buf, int count);
- *
- * This routine is called by the kernel to write a series of
- * characters to the tty device. The characters may come from
- * user space or kernel space. This routine will return the
+ * This routine is called by the kernel to write a series (@count) of
+ * characters (@buf) to the @tty device. The characters may come from
+ * user space or kernel space. This routine will return the
* number of characters actually accepted for writing.
*
- * Optional: Required for writable devices.
+ * May occur in parallel in special cases. Because this includes panic
+ * paths drivers generally shouldn't try and do clever locking here.
*
- * int (*put_char)(struct tty_struct *tty, unsigned char ch);
+ * Optional: Required for writable devices. May not sleep.
*
- * This routine is called by the kernel to write a single
- * character to the tty device. If the kernel uses this routine,
- * it must call the flush_chars() routine (if defined) when it is
- * done stuffing characters into the driver. If there is no room
- * in the queue, the character is ignored.
+ * @put_char: ``int ()(struct tty_struct *tty, u8 ch)``
*
- * Optional: Kernel will use the write method if not provided.
+ * This routine is called by the kernel to write a single character @ch to
+ * the @tty device. If the kernel uses this routine, it must call the
+ * @flush_chars() routine (if defined) when it is done stuffing characters
+ * into the driver. If there is no room in the queue, the character is
+ * ignored.
*
- * Note: Do not call this function directly, call tty_put_char
+ * Optional: Kernel will use the @write method if not provided. Do not
+ * call this function directly, call tty_put_char().
*
- * void (*flush_chars)(struct tty_struct *tty);
+ * @flush_chars: ``void ()(struct tty_struct *tty)``
*
- * This routine is called by the kernel after it has written a
- * series of characters to the tty device using put_char().
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using @put_char().
*
- * Optional:
+ * Optional. Do not call this function directly, call
+ * tty_driver_flush_chars().
*
- * Note: Do not call this function directly, call tty_driver_flush_chars
- *
- * int (*write_room)(struct tty_struct *tty);
+ * @write_room: ``unsigned int ()(struct tty_struct *tty)``
*
- * This routine returns the numbers of characters the tty driver
- * will accept for queuing to be written. This number is subject
- * to change as output buffers get emptied, or if the output flow
+ * This routine returns the numbers of characters the @tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
* control is acted.
*
- * Required if write method is provided else not needed.
+ * The ldisc is responsible for being intelligent about multi-threading of
+ * write_room/write calls
+ *
+ * Required if @write method is provided else not needed. Do not call this
+ * function directly, call tty_write_room()
+ *
+ * @chars_in_buffer: ``unsigned int ()(struct tty_struct *tty)``
+ *
+ * This routine returns the number of characters in the device private
+ * output queue. Used in tty_wait_until_sent() and for poll()
+ * implementation.
+ *
+ * Optional: if not provided, it is assumed there is no queue on the
+ * device. Do not call this function directly, call tty_chars_in_buffer().
+ *
+ * @ioctl: ``int ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
*
- * Note: Do not call this function directly, call tty_write_room
- *
- * int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+ * This routine allows the @tty driver to implement device-specific
+ * ioctls. If the ioctl number passed in @cmd is not recognized by the
+ * driver, it should return %ENOIOCTLCMD.
*
- * This routine allows the tty driver to implement
- * device-specific ioctls. If the ioctl number passed in cmd
- * is not recognized by the driver, it should return ENOIOCTLCMD.
+ * Optional.
*
- * Optional
+ * @compat_ioctl: ``long ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
*
- * long (*compat_ioctl)(struct tty_struct *tty,,
- * unsigned int cmd, unsigned long arg);
+ * Implement ioctl processing for 32 bit process on 64 bit system.
*
- * implement ioctl processing for 32 bit process on 64 bit system
+ * Optional.
*
- * Optional
- *
- * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
+ * @set_termios: ``void ()(struct tty_struct *tty, const struct ktermios *old)``
*
- * This routine allows the tty driver to be notified when
- * device's termios settings have changed.
+ * This routine allows the @tty driver to be notified when device's
+ * termios settings have changed. New settings are in @tty->termios.
+ * Previous settings are passed in the @old argument.
*
- * Optional: Called under the termios lock
+ * The API is defined such that the driver should return the actual modes
+ * selected. This means that the driver is responsible for modifying any
+ * bits in @tty->termios it cannot fulfill to indicate the actual modes
+ * being used.
*
+ * Optional. Called under the @tty->termios_rwsem. May sleep.
*
- * void (*set_ldisc)(struct tty_struct *tty);
+ * @set_ldisc: ``void ()(struct tty_struct *tty)``
*
- * This routine allows the tty driver to be notified when the
- * device's termios settings have changed.
+ * This routine allows the @tty driver to be notified when the device's
+ * line discipline is being changed. At the point this is done the
+ * discipline is not yet usable.
*
- * Optional: Called under BKL (currently)
- *
- * void (*throttle)(struct tty_struct * tty);
+ * Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem.
*
- * This routine notifies the tty driver that input buffers for
- * the line discipline are close to full, and it should somehow
- * signal that no more characters should be sent to the tty.
+ * @throttle: ``void ()(struct tty_struct *tty)``
*
- * Optional: Always invoke via tty_throttle(), called under the
- * termios lock.
- *
- * void (*unthrottle)(struct tty_struct * tty);
+ * This routine notifies the @tty driver that input buffers for the line
+ * discipline are close to full, and it should somehow signal that no more
+ * characters should be sent to the @tty.
*
- * This routine notifies the tty drivers that it should signals
- * that characters can now be sent to the tty without fear of
- * overrunning the input buffers of the line disciplines.
- *
- * Optional: Always invoke via tty_unthrottle(), called under the
- * termios lock.
+ * Serialization including with @unthrottle() is the job of the ldisc
+ * layer.
*
- * void (*stop)(struct tty_struct *tty);
+ * Optional: Always invoke via tty_throttle_safe(). Called under the
+ * @tty->termios_rwsem.
*
- * This routine notifies the tty driver that it should stop
- * outputting characters to the tty device.
+ * @unthrottle: ``void ()(struct tty_struct *tty)``
*
- * Called with ->flow_lock held. Serialized with start() method.
+ * This routine notifies the @tty driver that it should signal that
+ * characters can now be sent to the @tty without fear of overrunning the
+ * input buffers of the line disciplines.
*
- * Optional:
+ * Optional. Always invoke via tty_unthrottle(). Called under the
+ * @tty->termios_rwsem.
*
- * Note: Call stop_tty not this method.
- *
- * void (*start)(struct tty_struct *tty);
+ * @stop: ``void ()(struct tty_struct *tty)``
*
- * This routine notifies the tty driver that it resume sending
+ * This routine notifies the @tty driver that it should stop outputting
* characters to the tty device.
*
- * Called with ->flow_lock held. Serialized with stop() method.
+ * Called with @tty->flow.lock held. Serialized with @start() method.
*
- * Optional:
+ * Optional. Always invoke via stop_tty().
*
- * Note: Call start_tty not this method.
- *
- * void (*hangup)(struct tty_struct *tty);
+ * @start: ``void ()(struct tty_struct *tty)``
*
- * This routine notifies the tty driver that it should hang up the
- * tty device.
+ * This routine notifies the @tty driver that it resumed sending
+ * characters to the @tty device.
*
- * Optional:
+ * Called with @tty->flow.lock held. Serialized with stop() method.
*
- * Called with tty lock held.
+ * Optional. Always invoke via start_tty().
*
- * int (*break_ctl)(struct tty_struct *tty, int state);
+ * @hangup: ``void ()(struct tty_struct *tty)``
*
- * This optional routine requests the tty driver to turn on or
- * off BREAK status on the RS-232 port. If state is -1,
- * then the BREAK status should be turned on; if state is 0, then
- * BREAK should be turned off.
+ * This routine notifies the @tty driver that it should hang up the @tty
+ * device.
*
- * If this routine is implemented, the high-level tty driver will
- * handle the following ioctls: TCSBRK, TCSBRKP, TIOCSBRK,
- * TIOCCBRK.
+ * Optional. Called with tty lock held.
*
- * If the driver sets TTY_DRIVER_HARDWARE_BREAK then the interface
- * will also be called with actual times and the hardware is expected
- * to do the delay work itself. 0 and -1 are still used for on/off.
+ * @break_ctl: ``int ()(struct tty_struct *tty, int state)``
*
- * Optional: Required for TCSBRK/BRKP/etc handling.
+ * This optional routine requests the @tty driver to turn on or off BREAK
+ * status on the RS-232 port. If @state is -1, then the BREAK status
+ * should be turned on; if @state is 0, then BREAK should be turned off.
*
- * void (*wait_until_sent)(struct tty_struct *tty, int timeout);
- *
- * This routine waits until the device has written out all of the
- * characters in its transmitter FIFO.
+ * If this routine is implemented, the high-level tty driver will handle
+ * the following ioctls: %TCSBRK, %TCSBRKP, %TIOCSBRK, %TIOCCBRK.
*
- * Optional: If not provided the device is assumed to have no FIFO
+ * If the driver sets %TTY_DRIVER_HARDWARE_BREAK in tty_alloc_driver(),
+ * then the interface will also be called with actual times and the
+ * hardware is expected to do the delay work itself. 0 and -1 are still
+ * used for on/off.
*
- * Note: Usually correct to call tty_wait_until_sent
+ * Optional: Required for %TCSBRK/%BRKP/etc. handling. May sleep.
*
- * void (*send_xchar)(struct tty_struct *tty, char ch);
+ * @flush_buffer: ``void ()(struct tty_struct *tty)``
*
- * This routine is used to send a high-priority XON/XOFF
- * character to the device.
+ * This routine discards device private output buffer. Invoked on close,
+ * hangup, to implement %TCOFLUSH ioctl and similar.
*
- * Optional: If not provided then the write method is called under
- * the atomic write lock to keep it serialized with the ldisc.
+ * Optional: if not provided, it is assumed there is no queue on the
+ * device. Do not call this function directly, call
+ * tty_driver_flush_buffer().
*
- * int (*resize)(struct tty_struct *tty, struct winsize *ws)
+ * @wait_until_sent: ``void ()(struct tty_struct *tty, int timeout)``
*
- * Called when a termios request is issued which changes the
- * requested terminal geometry.
+ * This routine waits until the device has written out all of the
+ * characters in its transmitter FIFO. Or until @timeout (in jiffies) is
+ * reached.
+ *
+ * Optional: If not provided, the device is assumed to have no FIFO.
+ * Usually correct to invoke via tty_wait_until_sent(). May sleep.
+ *
+ * @send_xchar: ``void ()(struct tty_struct *tty, u8 ch)``
+ *
+ * This routine is used to send a high-priority XON/XOFF character (@ch)
+ * to the @tty device.
+ *
+ * Optional: If not provided, then the @write method is called under
+ * the @tty->atomic_write_lock to keep it serialized with the ldisc.
+ *
+ * @tiocmget: ``int ()(struct tty_struct *tty)``
+ *
+ * This routine is used to obtain the modem status bits from the @tty
+ * driver.
+ *
+ * Optional: If not provided, then %ENOTTY is returned from the %TIOCMGET
+ * ioctl. Do not call this function directly, call tty_tiocmget().
+ *
+ * @tiocmset: ``int ()(struct tty_struct *tty,
+ * unsigned int set, unsigned int clear)``
+ *
+ * This routine is used to set the modem status bits to the @tty driver.
+ * First, @clear bits should be cleared, then @set bits set.
+ *
+ * Optional: If not provided, then %ENOTTY is returned from the %TIOCMSET
+ * ioctl. Do not call this function directly, call tty_tiocmset().
+ *
+ * @resize: ``int ()(struct tty_struct *tty, struct winsize *ws)``
+ *
+ * Called when a termios request is issued which changes the requested
+ * terminal geometry to @ws.
*
* Optional: the default action is to update the termios structure
* without error. This is usually the correct behaviour. Drivers should
- * not force errors here if they are not resizable objects (eg a serial
+ * not force errors here if they are not resizable objects (e.g. a serial
* line). See tty_do_resize() if you need to wrap the standard method
- * in your own logic - the usual case.
+ * in your own logic -- the usual case.
+ *
+ * @get_icount: ``int ()(struct tty_struct *tty,
+ * struct serial_icounter *icount)``
+ *
+ * Called when the @tty device receives a %TIOCGICOUNT ioctl. Passed a
+ * kernel structure @icount to complete.
+ *
+ * Optional: called only if provided, otherwise %ENOTTY will be returned.
+ *
+ * @get_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)``
+ *
+ * Called when the @tty device receives a %TIOCGSERIAL ioctl. Passed a
+ * kernel structure @p (&struct serial_struct) to complete.
+ *
+ * Optional: called only if provided, otherwise %ENOTTY will be returned.
+ * Do not call this function directly, call tty_tiocgserial().
*
- * void (*set_termiox)(struct tty_struct *tty, struct termiox *new);
+ * @set_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)``
*
- * Called when the device receives a termiox based ioctl. Passes down
- * the requested data from user space. This method will not be invoked
- * unless the tty also has a valid tty->termiox pointer.
+ * Called when the @tty device receives a %TIOCSSERIAL ioctl. Passed a
+ * kernel structure @p (&struct serial_struct) to set the values from.
*
- * Optional: Called under the termios lock
+ * Optional: called only if provided, otherwise %ENOTTY will be returned.
+ * Do not call this function directly, call tty_tiocsserial().
*
- * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount);
+ * @show_fdinfo: ``void ()(struct tty_struct *tty, struct seq_file *m)``
*
- * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
- * structure to complete. This method is optional and will only be called
- * if provided (otherwise EINVAL will be returned).
+ * Called when the @tty device file descriptor receives a fdinfo request
+ * from VFS (to show in /proc/<pid>/fdinfo/). @m should be filled with
+ * information.
+ *
+ * Optional: called only if provided, otherwise nothing is written to @m.
+ * Do not call this function directly, call tty_show_fdinfo().
+ *
+ * @poll_init: ``int ()(struct tty_driver *driver, int line, char *options)``
+ *
+ * kgdboc support (Documentation/dev-tools/kgdb.rst). This routine is
+ * called to initialize the HW for later use by calling @poll_get_char or
+ * @poll_put_char.
+ *
+ * Optional: called only if provided, otherwise skipped as a non-polling
+ * driver.
+ *
+ * @poll_get_char: ``int ()(struct tty_driver *driver, int line)``
+ *
+ * kgdboc support (see @poll_init). @driver should read a character from a
+ * tty identified by @line and return it.
+ *
+ * Optional: called only if @poll_init provided.
+ *
+ * @poll_put_char: ``void ()(struct tty_driver *driver, int line, char ch)``
+ *
+ * kgdboc support (see @poll_init). @driver should write character @ch to
+ * a tty identified by @line.
+ *
+ * Optional: called only if @poll_init provided.
+ *
+ * @proc_show: ``int ()(struct seq_file *m, void *driver)``
+ *
+ * Driver @driver (cast to &struct tty_driver) can show additional info in
+ * /proc/tty/driver/<driver_name>. It is enough to fill in the information
+ * into @m.
+ *
+ * Optional: called only if provided, otherwise no /proc entry created.
+ *
+ * This structure defines the interface between the low-level tty driver and
+ * the tty routines. These routines can be defined. Unless noted otherwise,
+ * they are optional, and can be filled in with a %NULL pointer.
*/
-
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/cdev.h>
-#include <linux/termios.h>
-#include <linux/seq_file.h>
-
-struct tty_struct;
-struct tty_driver;
-struct serial_icounter_struct;
-struct serial_struct;
-
struct tty_operations {
struct tty_struct * (*lookup)(struct tty_driver *driver,
struct file *filp, int idx);
@@ -260,17 +355,16 @@ struct tty_operations {
void (*close)(struct tty_struct * tty, struct file * filp);
void (*shutdown)(struct tty_struct *tty);
void (*cleanup)(struct tty_struct *tty);
- int (*write)(struct tty_struct * tty,
- const unsigned char *buf, int count);
- int (*put_char)(struct tty_struct *tty, unsigned char ch);
+ ssize_t (*write)(struct tty_struct *tty, const u8 *buf, size_t count);
+ int (*put_char)(struct tty_struct *tty, u8 ch);
void (*flush_chars)(struct tty_struct *tty);
- int (*write_room)(struct tty_struct *tty);
- int (*chars_in_buffer)(struct tty_struct *tty);
+ unsigned int (*write_room)(struct tty_struct *tty);
+ unsigned int (*chars_in_buffer)(struct tty_struct *tty);
int (*ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
long (*compat_ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
- void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
+ void (*set_termios)(struct tty_struct *tty, const struct ktermios *old);
void (*throttle)(struct tty_struct * tty);
void (*unthrottle)(struct tty_struct * tty);
void (*stop)(struct tty_struct *tty);
@@ -280,12 +374,11 @@ struct tty_operations {
void (*flush_buffer)(struct tty_struct *tty);
void (*set_ldisc)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, int timeout);
- void (*send_xchar)(struct tty_struct *tty, char ch);
+ void (*send_xchar)(struct tty_struct *tty, u8 ch);
int (*tiocmget)(struct tty_struct *tty);
int (*tiocmset)(struct tty_struct *tty,
unsigned int set, unsigned int clear);
int (*resize)(struct tty_struct *tty, struct winsize *ws);
- int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
int (*get_icount)(struct tty_struct *tty,
struct serial_icounter_struct *icount);
int (*get_serial)(struct tty_struct *tty, struct serial_struct *p);
@@ -296,26 +389,62 @@ struct tty_operations {
int (*poll_get_char)(struct tty_driver *driver, int line);
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
- int (*proc_show)(struct seq_file *, void *);
+ int (*proc_show)(struct seq_file *m, void *driver);
} __randomize_layout;
+/**
+ * struct tty_driver -- driver for TTY devices
+ *
+ * @kref: reference counting. Reaching zero frees all the internals and the
+ * driver.
+ * @cdevs: allocated/registered character /dev devices
+ * @owner: modules owning this driver. Used drivers cannot be rmmod'ed.
+ * Automatically set by tty_alloc_driver().
+ * @driver_name: name of the driver used in /proc/tty
+ * @name: used for constructing /dev node name
+ * @name_base: used as a number base for constructing /dev node name
+ * @major: major /dev device number (zero for autoassignment)
+ * @minor_start: the first minor /dev device number
+ * @num: number of devices allocated
+ * @type: type of tty driver (%TTY_DRIVER_TYPE_)
+ * @subtype: subtype of tty driver (%SYSTEM_TYPE_, %PTY_TYPE_, %SERIAL_TYPE_)
+ * @init_termios: termios to set to each tty initially (e.g. %tty_std_termios)
+ * @flags: tty driver flags (%TTY_DRIVER_)
+ * @proc_entry: proc fs entry, used internally
+ * @other: driver of the linked tty; only used for the PTY driver
+ * @ttys: array of active &struct tty_struct, set by tty_standard_install()
+ * @ports: array of &struct tty_port; can be set during initialization by
+ * tty_port_link_device() and similar
+ * @termios: storage for termios at each TTY close for the next open
+ * @driver_state: pointer to driver's arbitrary data
+ * @ops: driver hooks for TTYs. Set them using tty_set_operations(). Use &struct
+ * tty_port helpers in them as much as possible.
+ * @tty_drivers: used internally to link tty_drivers together
+ *
+ * The usual handling of &struct tty_driver is to allocate it by
+ * tty_alloc_driver(), set up all the necessary members, and register it by
+ * tty_register_driver(). At last, the driver is torn down by calling
+ * tty_unregister_driver() followed by tty_driver_kref_put().
+ *
+ * The fields required to be set before calling tty_register_driver() include
+ * @driver_name, @name, @type, @subtype, @init_termios, and @ops.
+ */
struct tty_driver {
- int magic; /* magic number for this structure */
- struct kref kref; /* Reference management */
+ struct kref kref;
struct cdev **cdevs;
struct module *owner;
const char *driver_name;
const char *name;
- int name_base; /* offset of printed name */
- int major; /* major device number */
- int minor_start; /* start of minor device number */
- unsigned int num; /* number of devices allocated */
- short type; /* type of tty driver */
- short subtype; /* subtype of tty driver */
- struct ktermios init_termios; /* Initial termios */
- unsigned long flags; /* tty driver flags */
- struct proc_dir_entry *proc_entry; /* /proc fs entry */
- struct tty_driver *other; /* only used for the PTY driver */
+ int name_base;
+ int major;
+ int minor_start;
+ unsigned int num;
+ short type;
+ short subtype;
+ struct ktermios init_termios;
+ unsigned long flags;
+ struct proc_dir_entry *proc_entry;
+ struct tty_driver *other;
/*
* Pointer to the tty data structures
@@ -335,83 +464,75 @@ struct tty_driver {
extern struct list_head tty_drivers;
-extern struct tty_driver *__tty_alloc_driver(unsigned int lines,
- struct module *owner, unsigned long flags);
-extern void put_tty_driver(struct tty_driver *driver);
-extern void tty_set_operations(struct tty_driver *driver,
- const struct tty_operations *op);
-extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
+struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
+ unsigned long flags);
+struct tty_driver *tty_find_polling_driver(char *name, int *line);
-extern void tty_driver_kref_put(struct tty_driver *driver);
+void tty_driver_kref_put(struct tty_driver *driver);
/* Use TTY_DRIVER_* flags below */
#define tty_alloc_driver(lines, flags) \
__tty_alloc_driver(lines, THIS_MODULE, flags)
-/*
- * DEPRECATED Do not use this in new code, use tty_alloc_driver instead.
- * (And change the return value checks.)
- */
-static inline struct tty_driver *alloc_tty_driver(unsigned int lines)
-{
- struct tty_driver *ret = tty_alloc_driver(lines, 0);
- if (IS_ERR(ret))
- return NULL;
- return ret;
-}
-
static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
{
kref_get(&d->kref);
return d;
}
-/* tty driver magic number */
-#define TTY_DRIVER_MAGIC 0x5402
+static inline void tty_set_operations(struct tty_driver *driver,
+ const struct tty_operations *op)
+{
+ driver->ops = op;
+}
-/*
- * tty driver flags
- *
- * TTY_DRIVER_RESET_TERMIOS --- requests the tty layer to reset the
- * termios setting when the last process has closed the device.
- * Used for PTY's, in particular.
- *
- * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will
- * guarantee never not to set any special character handling
- * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR ||
- * !INPCK)). That is, if there is no reason for the driver to
- * send notifications of parity and break characters up to the
- * line driver, it won't do so. This allows the line driver to
- * optimize for this case if this flag is set. (Note that there
- * is also a promise, if the above case is true, not to signal
- * overruns, either.)
- *
- * TTY_DRIVER_DYNAMIC_DEV --- if set, the individual tty devices need
- * to be registered with a call to tty_register_device() when the
- * device is found in the system and unregistered with a call to
- * tty_unregister_device() so the devices will be show up
- * properly in sysfs. If not set, driver->num entries will be
- * created by the tty core in sysfs when tty_register_driver() is
- * called. This is to be used by drivers that have tty devices
- * that can appear and disappear while the main tty driver is
- * registered with the tty core.
- *
- * TTY_DRIVER_DEVPTS_MEM -- don't use the standard arrays, instead
- * use dynamic memory keyed through the devpts filesystem. This
- * is only applicable to the pty driver.
- *
- * TTY_DRIVER_HARDWARE_BREAK -- hardware handles break signals. Pass
- * the requested timeout to the caller instead of using a simple
- * on/off interface.
- *
- * TTY_DRIVER_DYNAMIC_ALLOC -- do not allocate structures which are
- * needed per line for this driver as it would waste memory.
- * The driver will take care.
- *
- * TTY_DRIVER_UNNUMBERED_NODE -- do not create numbered /dev nodes. In
- * other words create /dev/ttyprintk and not /dev/ttyprintk0.
- * Applicable only when a driver for a single tty device is
- * being allocated.
+/**
+ * DOC: TTY Driver Flags
+ *
+ * TTY_DRIVER_RESET_TERMIOS
+ * Requests the tty layer to reset the termios setting when the last
+ * process has closed the device. Used for PTYs, in particular.
+ *
+ * TTY_DRIVER_REAL_RAW
+ * Indicates that the driver will guarantee not to set any special
+ * character handling flags if this is set for the tty:
+ *
+ * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)``
+ *
+ * That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the line
+ * driver, it won't do so. This allows the line driver to optimize for
+ * this case if this flag is set. (Note that there is also a promise, if
+ * the above case is true, not to signal overruns, either.)
+ *
+ * TTY_DRIVER_DYNAMIC_DEV
+ * The individual tty devices need to be registered with a call to
+ * tty_register_device() when the device is found in the system and
+ * unregistered with a call to tty_unregister_device() so the devices will
+ * be show up properly in sysfs. If not set, all &tty_driver.num entries
+ * will be created by the tty core in sysfs when tty_register_driver() is
+ * called. This is to be used by drivers that have tty devices that can
+ * appear and disappear while the main tty driver is registered with the
+ * tty core.
+ *
+ * TTY_DRIVER_DEVPTS_MEM
+ * Don't use the standard arrays (&tty_driver.ttys and
+ * &tty_driver.termios), instead use dynamic memory keyed through the
+ * devpts filesystem. This is only applicable to the PTY driver.
+ *
+ * TTY_DRIVER_HARDWARE_BREAK
+ * Hardware handles break signals. Pass the requested timeout to the
+ * &tty_operations.break_ctl instead of using a simple on/off interface.
+ *
+ * TTY_DRIVER_DYNAMIC_ALLOC
+ * Do not allocate structures which are needed per line for this driver
+ * (&tty_driver.ports) as it would waste memory. The driver will take
+ * care. This is only applicable to the PTY driver.
+ *
+ * TTY_DRIVER_UNNUMBERED_NODE
+ * Do not create numbered ``/dev`` nodes. For example, create
+ * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a
+ * driver for a single tty device is being allocated.
*/
#define TTY_DRIVER_INSTALLED 0x0001
#define TTY_DRIVER_RESET_TERMIOS 0x0002
@@ -443,4 +564,21 @@ static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
/* serial subtype definitions */
#define SERIAL_TYPE_NORMAL 1
+int tty_register_driver(struct tty_driver *driver);
+void tty_unregister_driver(struct tty_driver *driver);
+struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+ struct device *dev);
+struct device *tty_register_device_attr(struct tty_driver *driver,
+ unsigned index, struct device *device, void *drvdata,
+ const struct attribute_group **attr_grp);
+void tty_unregister_device(struct tty_driver *driver, unsigned index);
+
+#ifdef CONFIG_PROC_FS
+void proc_tty_register_driver(struct tty_driver *);
+void proc_tty_unregister_driver(struct tty_driver *);
+#else
+static inline void proc_tty_register_driver(struct tty_driver *d) {}
+static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
+#endif
+
#endif /* #ifdef _LINUX_TTY_DRIVER_H */
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index 767f62086bd9..af4fce98f64e 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -2,42 +2,91 @@
#ifndef _LINUX_TTY_FLIP_H
#define _LINUX_TTY_FLIP_H
-extern int tty_buffer_set_limit(struct tty_port *port, int limit);
-extern int tty_buffer_space_avail(struct tty_port *port);
-extern int tty_buffer_request_room(struct tty_port *port, size_t size);
-extern int tty_insert_flip_string_flags(struct tty_port *port,
- const unsigned char *chars, const char *flags, size_t size);
-extern int tty_insert_flip_string_fixed_flag(struct tty_port *port,
- const unsigned char *chars, char flag, size_t size);
-extern int tty_prepare_flip_string(struct tty_port *port,
- unsigned char **chars, size_t size);
-extern void tty_flip_buffer_push(struct tty_port *port);
-void tty_schedule_flip(struct tty_port *port);
-int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
-
-static inline int tty_insert_flip_char(struct tty_port *port,
- unsigned char ch, char flag)
+#include <linux/tty_buffer.h>
+#include <linux/tty_port.h>
+
+struct tty_ldisc;
+
+int tty_buffer_set_limit(struct tty_port *port, int limit);
+unsigned int tty_buffer_space_avail(struct tty_port *port);
+int tty_buffer_request_room(struct tty_port *port, size_t size);
+size_t __tty_insert_flip_string_flags(struct tty_port *port, const u8 *chars,
+ const u8 *flags, bool mutable_flags,
+ size_t size);
+size_t tty_prepare_flip_string(struct tty_port *port, u8 **chars, size_t size);
+void tty_flip_buffer_push(struct tty_port *port);
+
+/**
+ * tty_insert_flip_string_fixed_flag - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flag: flag value for each character
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. All the characters passed are
+ * marked with the supplied flag.
+ *
+ * Returns: the number added.
+ */
+static inline size_t tty_insert_flip_string_fixed_flag(struct tty_port *port,
+ const u8 *chars, u8 flag,
+ size_t size)
+{
+ return __tty_insert_flip_string_flags(port, chars, &flag, false, size);
+}
+
+/**
+ * tty_insert_flip_string_flags - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flags: flag bytes
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. For each character the flags
+ * array indicates the status of the character.
+ *
+ * Returns: the number added.
+ */
+static inline size_t tty_insert_flip_string_flags(struct tty_port *port,
+ const u8 *chars,
+ const u8 *flags, size_t size)
+{
+ return __tty_insert_flip_string_flags(port, chars, flags, true, size);
+}
+
+/**
+ * tty_insert_flip_char - add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
+ *
+ * Queue a single byte @ch to the tty buffering, with an optional flag.
+ */
+static inline size_t tty_insert_flip_char(struct tty_port *port, u8 ch, u8 flag)
{
struct tty_buffer *tb = port->buf.tail;
int change;
- change = (tb->flags & TTYB_NORMAL) && (flag != TTY_NORMAL);
+ change = !tb->flags && (flag != TTY_NORMAL);
if (!change && tb->used < tb->size) {
- if (~tb->flags & TTYB_NORMAL)
+ if (tb->flags)
*flag_buf_ptr(tb, tb->used) = flag;
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return __tty_insert_flip_char(port, ch, flag);
+ return __tty_insert_flip_string_flags(port, &ch, &flag, false, 1);
}
-static inline int tty_insert_flip_string(struct tty_port *port,
- const unsigned char *chars, size_t size)
+static inline size_t tty_insert_flip_string(struct tty_port *port,
+ const u8 *chars, size_t size)
{
return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size);
}
-extern void tty_buffer_lock_exclusive(struct tty_port *port);
-extern void tty_buffer_unlock_exclusive(struct tty_port *port);
+size_t tty_ldisc_receive_buf(struct tty_ldisc *ld, const u8 *p, const u8 *f,
+ size_t count);
+
+void tty_buffer_lock_exclusive(struct tty_port *port);
+void tty_buffer_unlock_exclusive(struct tty_port *port);
#endif /* _LINUX_TTY_FLIP_H */
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index b1e6043e9917..af01e89074b2 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -2,130 +2,14 @@
#ifndef _LINUX_TTY_LDISC_H
#define _LINUX_TTY_LDISC_H
-/*
- * This structure defines the interface between the tty line discipline
- * implementation and the tty routines. The following routines can be
- * defined; unless noted otherwise, they are optional, and can be
- * filled in with a null pointer.
- *
- * int (*open)(struct tty_struct *);
- *
- * This function is called when the line discipline is associated
- * with the tty. The line discipline can use this as an
- * opportunity to initialize any state needed by the ldisc routines.
- *
- * void (*close)(struct tty_struct *);
- *
- * This function is called when the line discipline is being
- * shutdown, either because the tty is being closed or because
- * the tty is being changed to use a new line discipline
- *
- * void (*flush_buffer)(struct tty_struct *tty);
- *
- * This function instructs the line discipline to clear its
- * buffers of any input characters it may have queued to be
- * delivered to the user mode process.
- *
- * ssize_t (*read)(struct tty_struct * tty, struct file * file,
- * unsigned char * buf, size_t nr);
- *
- * This function is called when the user requests to read from
- * the tty. The line discipline will return whatever characters
- * it has buffered up for the user. If this function is not
- * defined, the user will receive an EIO error.
- *
- * ssize_t (*write)(struct tty_struct * tty, struct file * file,
- * const unsigned char * buf, size_t nr);
- *
- * This function is called when the user requests to write to the
- * tty. The line discipline will deliver the characters to the
- * low-level tty device for transmission, optionally performing
- * some processing on the characters first. If this function is
- * not defined, the user will receive an EIO error.
- *
- * int (*ioctl)(struct tty_struct * tty, struct file * file,
- * unsigned int cmd, unsigned long arg);
- *
- * This function is called when the user requests an ioctl which
- * is not handled by the tty layer or the low-level tty driver.
- * It is intended for ioctls which affect line discpline
- * operation. Note that the search order for ioctls is (1) tty
- * layer, (2) tty low-level driver, (3) line discpline. So a
- * low-level driver can "grab" an ioctl request before the line
- * discpline has a chance to see it.
- *
- * int (*compat_ioctl)(struct tty_struct * tty, struct file * file,
- * unsigned int cmd, unsigned long arg);
- *
- * Process ioctl calls from 32-bit process on 64-bit system
- *
- * NOTE: only ioctls that are neither "pointer to compatible
- * structure" nor tty-generic. Something private that takes
- * an integer or a pointer to wordsize-sensitive structure
- * belongs here, but most of ldiscs will happily leave
- * it NULL.
- *
- * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
- *
- * This function notifies the line discpline that a change has
- * been made to the termios structure.
- *
- * int (*poll)(struct tty_struct * tty, struct file * file,
- * poll_table *wait);
- *
- * This function is called when a user attempts to select/poll on a
- * tty device. It is solely the responsibility of the line
- * discipline to handle poll requests.
- *
- * void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
- * char *fp, int count);
- *
- * This function is called by the low-level tty driver to send
- * characters received by the hardware to the line discpline for
- * processing. <cp> is a pointer to the buffer of input
- * character received by the device. <fp> is a pointer to a
- * pointer of flag bytes which indicate whether a character was
- * received with a parity error, etc. <fp> may be NULL to indicate
- * all data received is TTY_NORMAL.
- *
- * void (*write_wakeup)(struct tty_struct *);
- *
- * This function is called by the low-level tty driver to signal
- * that line discpline should try to send more characters to the
- * low-level driver for transmission. If the line discpline does
- * not have any more data to send, it can just return. If the line
- * discipline does have some data to send, please arise a tasklet
- * or workqueue to do the real data transfer. Do not send data in
- * this hook, it may leads to a deadlock.
- *
- * int (*hangup)(struct tty_struct *)
- *
- * Called on a hangup. Tells the discipline that it should
- * cease I/O to the tty driver. Can sleep. The driver should
- * seek to perform this action quickly but should wait until
- * any pending driver I/O is completed.
- *
- * void (*dcd_change)(struct tty_struct *tty, unsigned int status)
- *
- * Tells the discipline that the DCD pin has changed its status.
- * Used exclusively by the N_PPS (Pulse-Per-Second) line discipline.
- *
- * int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
- * char *fp, int count);
- *
- * This function is called by the low-level tty driver to send
- * characters received by the hardware to the line discpline for
- * processing. <cp> is a pointer to the buffer of input
- * character received by the device. <fp> is a pointer to a
- * pointer of flag bytes which indicate whether a character was
- * received with a parity error, etc. <fp> may be NULL to indicate
- * all data received is TTY_NORMAL.
- * If assigned, prefer this function for automatic flow control.
- */
+struct tty_struct;
#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/seq_file.h>
/*
* the semaphore definition
@@ -141,7 +25,7 @@ struct ld_semaphore {
#endif
};
-extern void __init_ldsem(struct ld_semaphore *sem, const char *name,
+void __init_ldsem(struct ld_semaphore *sem, const char *name,
struct lock_class_key *key);
#define init_ldsem(sem) \
@@ -152,18 +36,18 @@ do { \
} while (0)
-extern int ldsem_down_read(struct ld_semaphore *sem, long timeout);
-extern int ldsem_down_read_trylock(struct ld_semaphore *sem);
-extern int ldsem_down_write(struct ld_semaphore *sem, long timeout);
-extern int ldsem_down_write_trylock(struct ld_semaphore *sem);
-extern void ldsem_up_read(struct ld_semaphore *sem);
-extern void ldsem_up_write(struct ld_semaphore *sem);
+int ldsem_down_read(struct ld_semaphore *sem, long timeout);
+int ldsem_down_read_trylock(struct ld_semaphore *sem);
+int ldsem_down_write(struct ld_semaphore *sem, long timeout);
+int ldsem_down_write_trylock(struct ld_semaphore *sem);
+void ldsem_up_read(struct ld_semaphore *sem);
+void ldsem_up_write(struct ld_semaphore *sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass,
- long timeout);
-extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
- long timeout);
+int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass,
+ long timeout);
+int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
+ long timeout);
#else
# define ldsem_down_read_nested(sem, subclass, timeout) \
ldsem_down_read(sem, timeout)
@@ -171,45 +55,215 @@ extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
ldsem_down_write(sem, timeout)
#endif
-
+/**
+ * struct tty_ldisc_ops - ldisc operations
+ *
+ * @name: name of this ldisc rendered in /proc/tty/ldiscs
+ * @num: ``N_*`` number (%N_TTY, %N_HDLC, ...) reserved to this ldisc
+ *
+ * @open: [TTY] ``int ()(struct tty_struct *tty)``
+ *
+ * This function is called when the line discipline is associated with the
+ * @tty. No other call into the line discipline for this tty will occur
+ * until it completes successfully. It should initialize any state needed
+ * by the ldisc, and set @tty->receive_room to the maximum amount of data
+ * the line discipline is willing to accept from the driver with a single
+ * call to @receive_buf(). Returning an error will prevent the ldisc from
+ * being attached.
+ *
+ * Optional. Can sleep.
+ *
+ * @close: [TTY] ``void ()(struct tty_struct *tty)``
+ *
+ * This function is called when the line discipline is being shutdown,
+ * either because the @tty is being closed or because the @tty is being
+ * changed to use a new line discipline. At the point of execution no
+ * further users will enter the ldisc code for this tty.
+ *
+ * Optional. Can sleep.
+ *
+ * @flush_buffer: [TTY] ``void ()(struct tty_struct *tty)``
+ *
+ * This function instructs the line discipline to clear its buffers of any
+ * input characters it may have queued to be delivered to the user mode
+ * process. It may be called at any point between open and close.
+ *
+ * Optional.
+ *
+ * @read: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file, u8 *buf,
+ * size_t nr)``
+ *
+ * This function is called when the user requests to read from the @tty.
+ * The line discipline will return whatever characters it has buffered up
+ * for the user. If this function is not defined, the user will receive
+ * an %EIO error. Multiple read calls may occur in parallel and the ldisc
+ * must deal with serialization issues.
+ *
+ * Optional: %EIO unless provided. Can sleep.
+ *
+ * @write: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
+ * const u8 *buf, size_t nr)``
+ *
+ * This function is called when the user requests to write to the @tty.
+ * The line discipline will deliver the characters to the low-level tty
+ * device for transmission, optionally performing some processing on the
+ * characters first. If this function is not defined, the user will
+ * receive an %EIO error.
+ *
+ * Optional: %EIO unless provided. Can sleep.
+ *
+ * @ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * This function is called when the user requests an ioctl which is not
+ * handled by the tty layer or the low-level tty driver. It is intended
+ * for ioctls which affect line discpline operation. Note that the search
+ * order for ioctls is (1) tty layer, (2) tty low-level driver, (3) line
+ * discpline. So a low-level driver can "grab" an ioctl request before
+ * the line discpline has a chance to see it.
+ *
+ * Optional.
+ *
+ * @compat_ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * Process ioctl calls from 32-bit process on 64-bit system.
+ *
+ * Note that only ioctls that are neither "pointer to compatible
+ * structure" nor tty-generic. Something private that takes an integer or
+ * a pointer to wordsize-sensitive structure belongs here, but most of
+ * ldiscs will happily leave it %NULL.
+ *
+ * Optional.
+ *
+ * @set_termios: [TTY] ``void ()(struct tty_struct *tty, const struct ktermios *old)``
+ *
+ * This function notifies the line discpline that a change has been made
+ * to the termios structure.
+ *
+ * Optional.
+ *
+ * @poll: [TTY] ``int ()(struct tty_struct *tty, struct file *file,
+ * struct poll_table_struct *wait)``
+ *
+ * This function is called when a user attempts to select/poll on a @tty
+ * device. It is solely the responsibility of the line discipline to
+ * handle poll requests.
+ *
+ * Optional.
+ *
+ * @hangup: [TTY] ``void ()(struct tty_struct *tty)``
+ *
+ * Called on a hangup. Tells the discipline that it should cease I/O to
+ * the tty driver. The driver should seek to perform this action quickly
+ * but should wait until any pending driver I/O is completed. No further
+ * calls into the ldisc code will occur.
+ *
+ * Optional. Can sleep.
+ *
+ * @receive_buf: [DRV] ``void ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
+ *
+ * This function is called by the low-level tty driver to send characters
+ * received by the hardware to the line discpline for processing. @cp is
+ * a pointer to the buffer of input character received by the device. @fp
+ * is a pointer to an array of flag bytes which indicate whether a
+ * character was received with a parity error, etc. @fp may be %NULL to
+ * indicate all data received is %TTY_NORMAL.
+ *
+ * Optional.
+ *
+ * @write_wakeup: [DRV] ``void ()(struct tty_struct *tty)``
+ *
+ * This function is called by the low-level tty driver to signal that line
+ * discpline should try to send more characters to the low-level driver
+ * for transmission. If the line discpline does not have any more data to
+ * send, it can just return. If the line discipline does have some data to
+ * send, please arise a tasklet or workqueue to do the real data transfer.
+ * Do not send data in this hook, it may lead to a deadlock.
+ *
+ * Optional.
+ *
+ * @dcd_change: [DRV] ``void ()(struct tty_struct *tty, bool active)``
+ *
+ * Tells the discipline that the DCD pin has changed its status. Used
+ * exclusively by the %N_PPS (Pulse-Per-Second) line discipline.
+ *
+ * Optional.
+ *
+ * @receive_buf2: [DRV] ``ssize_t ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
+ *
+ * This function is called by the low-level tty driver to send characters
+ * received by the hardware to the line discpline for processing. @cp is a
+ * pointer to the buffer of input character received by the device. @fp
+ * is a pointer to an array of flag bytes which indicate whether a
+ * character was received with a parity error, etc. @fp may be %NULL to
+ * indicate all data received is %TTY_NORMAL. If assigned, prefer this
+ * function for automatic flow control.
+ *
+ * Optional.
+ *
+ * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
+ *
+ * This function is called by the low-level tty driver for characters
+ * not eaten by ->receive_buf() or ->receive_buf2(). It is useful for
+ * processing high-priority characters such as software flow-control
+ * characters that could otherwise get stuck into the intermediate
+ * buffer until tty has room to receive them. Ldisc must be able to
+ * handle later a ->receive_buf() or ->receive_buf2() call for the
+ * same characters (e.g. by skipping the actions for high-priority
+ * characters already handled by ->lookahead_buf()).
+ *
+ * Optional.
+ *
+ * @owner: module containting this ldisc (for reference counting)
+ *
+ * This structure defines the interface between the tty line discipline
+ * implementation and the tty routines. The above routines can be defined.
+ * Unless noted otherwise, they are optional, and can be filled in with a %NULL
+ * pointer.
+ *
+ * Hooks marked [TTY] are invoked from the TTY core, the [DRV] ones from the
+ * tty_driver side.
+ */
struct tty_ldisc_ops {
- int magic;
char *name;
int num;
- int flags;
/*
* The following routines are called from above.
*/
- int (*open)(struct tty_struct *);
- void (*close)(struct tty_struct *);
+ int (*open)(struct tty_struct *tty);
+ void (*close)(struct tty_struct *tty);
void (*flush_buffer)(struct tty_struct *tty);
- ssize_t (*read)(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr);
+ ssize_t (*read)(struct tty_struct *tty, struct file *file, u8 *buf,
+ size_t nr, void **cookie, unsigned long offset);
ssize_t (*write)(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr);
- int (*ioctl)(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
- int (*compat_ioctl)(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
- void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
- __poll_t (*poll)(struct tty_struct *, struct file *,
- struct poll_table_struct *);
- int (*hangup)(struct tty_struct *tty);
+ const u8 *buf, size_t nr);
+ int (*ioctl)(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+ int (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, const struct ktermios *old);
+ __poll_t (*poll)(struct tty_struct *tty, struct file *file,
+ struct poll_table_struct *wait);
+ void (*hangup)(struct tty_struct *tty);
/*
* The following routines are called from below.
*/
- void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
- char *fp, int count);
- void (*write_wakeup)(struct tty_struct *);
- void (*dcd_change)(struct tty_struct *, unsigned int);
- int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
- char *fp, int count);
+ void (*receive_buf)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
+ void (*write_wakeup)(struct tty_struct *tty);
+ void (*dcd_change)(struct tty_struct *tty, bool active);
+ size_t (*receive_buf2)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
+ void (*lookahead_buf)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
struct module *owner;
-
- int refcount;
};
struct tty_ldisc {
@@ -217,11 +271,19 @@ struct tty_ldisc {
struct tty_struct *tty;
};
-#define TTY_LDISC_MAGIC 0x5403
-
-#define LDISC_FLAG_DEFINED 0x00000001
-
#define MODULE_ALIAS_LDISC(ldisc) \
MODULE_ALIAS("tty-ldisc-" __stringify(ldisc))
+extern const struct seq_operations tty_ldiscs_seq_ops;
+
+struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
+void tty_ldisc_deref(struct tty_ldisc *);
+struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
+
+void tty_ldisc_flush(struct tty_struct *tty);
+
+int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc);
+void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc);
+int tty_set_ldisc(struct tty_struct *tty, int disc);
+
#endif /* _LINUX_TTY_LDISC_H */
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
new file mode 100644
index 000000000000..1b861f2100b6
--- /dev/null
+++ b/include/linux/tty_port.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TTY_PORT_H
+#define _LINUX_TTY_PORT_H
+
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/tty_buffer.h>
+#include <linux/wait.h>
+
+struct attribute_group;
+struct tty_driver;
+struct tty_port;
+struct tty_struct;
+
+/**
+ * struct tty_port_operations -- operations on tty_port
+ * @carrier_raised: return true if the carrier is raised on @port
+ * @dtr_rts: raise the DTR line if @active is true, otherwise lower DTR
+ * @shutdown: called when the last close completes or a hangup finishes IFF the
+ * port was initialized. Do not use to free resources. Turn off the device
+ * only. Called under the port mutex to serialize against @activate and
+ * @shutdown.
+ * @activate: called under the port mutex from tty_port_open(), serialized using
+ * the port mutex. Supposed to turn on the device.
+ *
+ * FIXME: long term getting the tty argument *out* of this would be good
+ * for consoles.
+ *
+ * @destruct: called on the final put of a port. Free resources, possibly incl.
+ * the port itself.
+ */
+struct tty_port_operations {
+ bool (*carrier_raised)(struct tty_port *port);
+ void (*dtr_rts)(struct tty_port *port, bool active);
+ void (*shutdown)(struct tty_port *port);
+ int (*activate)(struct tty_port *port, struct tty_struct *tty);
+ void (*destruct)(struct tty_port *port);
+};
+
+struct tty_port_client_operations {
+ size_t (*receive_buf)(struct tty_port *port, const u8 *cp, const u8 *fp,
+ size_t count);
+ void (*lookahead_buf)(struct tty_port *port, const u8 *cp,
+ const u8 *fp, size_t count);
+ void (*write_wakeup)(struct tty_port *port);
+};
+
+extern const struct tty_port_client_operations tty_port_default_client_ops;
+
+/**
+ * struct tty_port -- port level information
+ *
+ * @buf: buffer for this port, locked internally
+ * @tty: back pointer to &struct tty_struct, valid only if the tty is open. Use
+ * tty_port_tty_get() to obtain it (and tty_kref_put() to release).
+ * @itty: internal back pointer to &struct tty_struct. Avoid this. It should be
+ * eliminated in the long term.
+ * @ops: tty port operations (like activate, shutdown), see &struct
+ * tty_port_operations
+ * @client_ops: tty port client operations (like receive_buf, write_wakeup).
+ * By default, tty_port_default_client_ops is used.
+ * @lock: lock protecting @tty
+ * @blocked_open: # of procs waiting for open in tty_port_block_til_ready()
+ * @count: usage count
+ * @open_wait: open waiters queue (waiting e.g. for a carrier)
+ * @delta_msr_wait: modem status change queue (waiting for MSR changes)
+ * @flags: user TTY flags (%ASYNC_)
+ * @iflags: internal flags (%TTY_PORT_)
+ * @console: when set, the port is a console
+ * @mutex: locking, for open, shutdown and other port operations
+ * @buf_mutex: @xmit_buf alloc lock
+ * @xmit_buf: optional xmit buffer used by some drivers
+ * @xmit_fifo: optional xmit buffer used by some drivers
+ * @close_delay: delay in jiffies to wait when closing the port
+ * @closing_wait: delay in jiffies for output to be sent before closing
+ * @drain_delay: set to zero if no pure time based drain is needed else set to
+ * size of fifo
+ * @kref: references counter. Reaching zero calls @ops->destruct() if non-%NULL
+ * or frees the port otherwise.
+ * @client_data: pointer to private data, for @client_ops
+ *
+ * Each device keeps its own port level information. &struct tty_port was
+ * introduced as a common structure for such information. As every TTY device
+ * shall have a backing tty_port structure, every driver can use these members.
+ *
+ * The tty port has a different lifetime to the tty so must be kept apart.
+ * In addition be careful as tty -> port mappings are valid for the life
+ * of the tty object but in many cases port -> tty mappings are valid only
+ * until a hangup so don't use the wrong path.
+ *
+ * Tty port shall be initialized by tty_port_init() and shut down either by
+ * tty_port_destroy() (refcounting not used), or tty_port_put() (refcounting).
+ *
+ * There is a lot of helpers around &struct tty_port too. To name the most
+ * significant ones: tty_port_open(), tty_port_close() (or
+ * tty_port_close_start() and tty_port_close_end() separately if need be), and
+ * tty_port_hangup(). These call @ops->activate() and @ops->shutdown() as
+ * needed.
+ */
+struct tty_port {
+ struct tty_bufhead buf;
+ struct tty_struct *tty;
+ struct tty_struct *itty;
+ const struct tty_port_operations *ops;
+ const struct tty_port_client_operations *client_ops;
+ spinlock_t lock;
+ int blocked_open;
+ int count;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t delta_msr_wait;
+ unsigned long flags;
+ unsigned long iflags;
+ unsigned char console:1;
+ struct mutex mutex;
+ struct mutex buf_mutex;
+ u8 *xmit_buf;
+ DECLARE_KFIFO_PTR(xmit_fifo, u8);
+ unsigned int close_delay;
+ unsigned int closing_wait;
+ int drain_delay;
+ struct kref kref;
+ void *client_data;
+};
+
+/* tty_port::iflags bits -- use atomic bit ops */
+#define TTY_PORT_INITIALIZED 0 /* device is initialized */
+#define TTY_PORT_SUSPENDED 1 /* device is suspended */
+#define TTY_PORT_ACTIVE 2 /* device is open */
+
+/*
+ * uart drivers: use the uart_port::status field and the UPSTAT_* defines
+ * for s/w-based flow control steering and carrier detection status
+ */
+#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */
+#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */
+#define TTY_PORT_KOPENED 5 /* device exclusively opened by
+ kernel */
+
+void tty_port_init(struct tty_port *port);
+void tty_port_link_device(struct tty_port *port, struct tty_driver *driver,
+ unsigned index);
+struct device *tty_port_register_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device);
+struct device *tty_port_register_device_attr(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device, void *drvdata,
+ const struct attribute_group **attr_grp);
+struct device *tty_port_register_device_serdev(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *host, struct device *parent);
+struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *host, struct device *parent, void *drvdata,
+ const struct attribute_group **attr_grp);
+void tty_port_unregister_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index);
+int tty_port_alloc_xmit_buf(struct tty_port *port);
+void tty_port_free_xmit_buf(struct tty_port *port);
+void tty_port_destroy(struct tty_port *port);
+void tty_port_put(struct tty_port *port);
+
+static inline struct tty_port *tty_port_get(struct tty_port *port)
+{
+ if (port && kref_get_unless_zero(&port->kref))
+ return port;
+ return NULL;
+}
+
+/* If the cts flow control is enabled, return true. */
+static inline bool tty_port_cts_enabled(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_CTS_FLOW, &port->iflags);
+}
+
+static inline void tty_port_set_cts_flow(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val);
+}
+
+static inline bool tty_port_active(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_ACTIVE, &port->iflags);
+}
+
+static inline void tty_port_set_active(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_ACTIVE, &port->iflags, val);
+}
+
+static inline bool tty_port_check_carrier(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_CHECK_CD, &port->iflags);
+}
+
+static inline void tty_port_set_check_carrier(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val);
+}
+
+static inline bool tty_port_suspended(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_SUSPENDED, &port->iflags);
+}
+
+static inline void tty_port_set_suspended(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val);
+}
+
+static inline bool tty_port_initialized(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_INITIALIZED, &port->iflags);
+}
+
+static inline void tty_port_set_initialized(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val);
+}
+
+static inline bool tty_port_kopened(const struct tty_port *port)
+{
+ return test_bit(TTY_PORT_KOPENED, &port->iflags);
+}
+
+static inline void tty_port_set_kopened(struct tty_port *port, bool val)
+{
+ assign_bit(TTY_PORT_KOPENED, &port->iflags, val);
+}
+
+struct tty_struct *tty_port_tty_get(struct tty_port *port);
+void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
+bool tty_port_carrier_raised(struct tty_port *port);
+void tty_port_raise_dtr_rts(struct tty_port *port);
+void tty_port_lower_dtr_rts(struct tty_port *port);
+void tty_port_hangup(struct tty_port *port);
+void tty_port_tty_hangup(struct tty_port *port, bool check_clocal);
+void tty_port_tty_wakeup(struct tty_port *port);
+int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp);
+int tty_port_close_start(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp);
+void tty_port_close_end(struct tty_port *port, struct tty_struct *tty);
+void tty_port_close(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp);
+int tty_port_install(struct tty_port *port, struct tty_driver *driver,
+ struct tty_struct *tty);
+int tty_port_open(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp);
+
+static inline int tty_port_users(struct tty_port *port)
+{
+ return port->count + port->blocked_open;
+}
+
+#endif
diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h
index 20d310331eb5..46b15e2aaefb 100644
--- a/include/linux/typecheck.h
+++ b/include/linux/typecheck.h
@@ -22,4 +22,13 @@
(void)__tmp; \
})
+/*
+ * Check at compile time that something is a pointer type.
+ */
+#define typecheck_pointer(x) \
+({ typeof(x) __dummy; \
+ (void)sizeof(*__dummy); \
+ 1; \
+})
+
#endif /* TYPECHECK_H_INCLUDED */
diff --git a/include/linux/types.h b/include/linux/types.h
index a147977602b5..2bc8766ba20c 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -10,11 +10,16 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
+#ifdef __SIZEOF_INT128__
+typedef __s128 s128;
+typedef __u128 u128;
+#endif
+
typedef u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
-typedef __kernel_ino_t ino_t;
+typedef __kernel_ulong_t ino_t;
typedef __kernel_mode_t mode_t;
typedef unsigned short umode_t;
typedef u32 nlink_t;
@@ -35,6 +40,7 @@ typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
+typedef long intptr_t;
#ifdef CONFIG_HAVE_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
@@ -114,6 +120,9 @@ typedef s64 int64_t;
#define aligned_be64 __aligned_be64
#define aligned_le64 __aligned_le64
+/* Nanosecond scalar representation for kernel time values */
+typedef s64 ktime_t;
+
/**
* The type used for indexing onto a disc or disc partition.
*
@@ -175,6 +184,12 @@ typedef struct {
} atomic64_t;
#endif
+typedef struct {
+ atomic_t refcnt;
+} rcuref_t;
+
+#define RCUREF_INIT(i) { .refcnt = ATOMIC_INIT(i - 1) }
+
struct list_head {
struct list_head *next, *prev;
};
@@ -189,7 +204,11 @@ struct hlist_node {
struct ustat {
__kernel_daddr_t f_tfree;
- __kernel_ino_t f_tinode;
+#ifdef CONFIG_ARCH_32BIT_USTAT_F_TINODE
+ unsigned int f_tinode;
+#else
+ unsigned long f_tinode;
+#endif
char f_fname[6];
char f_fpack[6];
};
@@ -222,6 +241,7 @@ struct callback_head {
typedef void (*rcu_callback_t)(struct rcu_head *head);
typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+typedef void (*swap_r_func_t)(void *a, void *b, int size, const void *priv);
typedef void (*swap_func_t)(void *a, void *b, int size);
typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index c6abb79501b3..ffe48e69b3f3 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -8,7 +8,7 @@
*
* Key points :
*
- * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
+ * - Use a seqcount on 32-bit
* - The whole thing is a no-op on 64-bit architectures.
*
* Usage constraints:
@@ -20,7 +20,8 @@
* writer and also spin forever.
*
* 3) Write side must use the _irqsave() variant if other writers, or a reader,
- * can be invoked from an IRQ context.
+ * can be invoked from an IRQ context. On 64bit systems this variant does not
+ * disable interrupts.
*
* 4) If reader fetches several counters, there is no guarantee the whole values
* are consistent w.r.t. each other (remember point #2: seqcounts are not
@@ -29,11 +30,6 @@
* 5) Readers are allowed to sleep or be preempted/interrupted: they perform
* pure reads.
*
- * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
- * might be updated from a hardirq or softirq context (remember point #1:
- * seqcounts are not used for UP kernels). 32-bit UP stat readers could read
- * corrupted 64-bit values otherwise.
- *
* Usage :
*
* Stats producer (writer) should use following template granted it already got
@@ -66,7 +62,7 @@
#include <linux/seqlock.h>
struct u64_stats_sync {
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+#if BITS_PER_LONG == 32
seqcount_t seq;
#endif
};
@@ -83,6 +79,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return local64_read(&p->v);
}
+static inline void u64_stats_set(u64_stats_t *p, u64 val)
+{
+ local64_set(&p->v, val);
+}
+
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
{
local64_add(val, &p->v);
@@ -93,7 +94,22 @@ static inline void u64_stats_inc(u64_stats_t *p)
local64_inc(&p->v);
}
-#else
+static inline void u64_stats_init(struct u64_stats_sync *syncp) { }
+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) { }
+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) { }
+static inline unsigned long __u64_stats_irqsave(void) { return 0; }
+static inline void __u64_stats_irqrestore(unsigned long flags) { }
+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+ return 0;
+}
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+ return false;
+}
+
+#else /* 64 bit */
typedef struct {
u64 v;
@@ -104,6 +120,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return p->v;
}
+static inline void u64_stats_set(u64_stats_t *p, u64 val)
+{
+ p->v = val;
+}
+
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
{
p->v += val;
@@ -113,107 +134,82 @@ static inline void u64_stats_inc(u64_stats_t *p)
{
p->v++;
}
-#endif
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
seqcount_init(&syncp->seq);
-#endif
}
-static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ preempt_disable_nested();
write_seqcount_begin(&syncp->seq);
-#endif
}
-static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_end(&syncp->seq);
-#endif
+ preempt_enable_nested();
}
-static inline unsigned long
-u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+static inline unsigned long __u64_stats_irqsave(void)
{
- unsigned long flags = 0;
+ unsigned long flags;
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
local_irq_save(flags);
- write_seqcount_begin(&syncp->seq);
-#endif
return flags;
}
-static inline void
-u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
- unsigned long flags)
+static inline void __u64_stats_irqrestore(unsigned long flags)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- write_seqcount_end(&syncp->seq);
local_irq_restore(flags);
-#endif
}
static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
-#else
- return 0;
-#endif
}
-static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
{
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
- preempt_disable();
-#endif
- return __u64_stats_fetch_begin(syncp);
+ return read_seqcount_retry(&syncp->seq, start);
}
+#endif /* !64 bit */
-static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
- unsigned int start)
+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- return read_seqcount_retry(&syncp->seq, start);
-#else
- return false;
-#endif
+ __u64_stats_update_begin(syncp);
}
-static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
- unsigned int start)
+static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
- preempt_enable();
-#endif
- return __u64_stats_fetch_retry(syncp, start);
+ __u64_stats_update_end(syncp);
}
-/*
- * In case irq handlers can update u64 counters, readers can use following helpers
- * - SMP 32bit arches use seqcount protection, irq safe.
- * - UP 32bit must disable irqs.
- * - 64bit have no problem atomically reading u64 values, irq safe.
- */
-static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
+static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+{
+ unsigned long flags = __u64_stats_irqsave();
+
+ __u64_stats_update_begin(syncp);
+ return flags;
+}
+
+static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
+ unsigned long flags)
+{
+ __u64_stats_update_end(syncp);
+ __u64_stats_irqrestore(flags);
+}
+
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
- local_irq_disable();
-#endif
return __u64_stats_fetch_begin(syncp);
}
-static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
- unsigned int start)
+static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
{
-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
- local_irq_enable();
-#endif
return __u64_stats_fetch_retry(syncp, start);
}
diff --git a/include/linux/u64_stats_sync_api.h b/include/linux/u64_stats_sync_api.h
new file mode 100644
index 000000000000..c72ca63da44b
--- /dev/null
+++ b/include/linux/u64_stats_sync_api.h
@@ -0,0 +1 @@
+#include <linux/u64_stats_sync.h>
diff --git a/include/linux/uacce.h b/include/linux/uacce.h
index 454c2f6672d7..e290c0269944 100644
--- a/include/linux/uacce.h
+++ b/include/linux/uacce.h
@@ -8,6 +8,7 @@
#define UACCE_NAME "uacce"
#define UACCE_MAX_REGION 2
#define UACCE_MAX_NAME_SIZE 64
+#define UACCE_MAX_ERR_THRESHOLD 65535
struct uacce_queue;
struct uacce_device;
@@ -30,6 +31,9 @@ struct uacce_qfile_region {
* @is_q_updated: check whether the task is finished
* @mmap: mmap addresses of queue to user space
* @ioctl: ioctl for user space users of the queue
+ * @get_isolate_state: get the device state after set the isolate strategy
+ * @isolate_err_threshold_write: stored the isolate error threshold to the device
+ * @isolate_err_threshold_read: read the isolate error threshold value from the device
*/
struct uacce_ops {
int (*get_available_instances)(struct uacce_device *uacce);
@@ -43,6 +47,9 @@ struct uacce_ops {
struct uacce_qfile_region *qfr);
long (*ioctl)(struct uacce_queue *q, unsigned int cmd,
unsigned long arg);
+ enum uacce_dev_state (*get_isolate_state)(struct uacce_device *uacce);
+ int (*isolate_err_threshold_write)(struct uacce_device *uacce, u32 num);
+ u32 (*isolate_err_threshold_read)(struct uacce_device *uacce);
};
/**
@@ -57,6 +64,11 @@ struct uacce_interface {
const struct uacce_ops *ops;
};
+enum uacce_dev_state {
+ UACCE_DEV_NORMAL,
+ UACCE_DEV_ISOLATE,
+};
+
enum uacce_q_state {
UACCE_Q_ZOMBIE = 0,
UACCE_Q_INIT,
@@ -70,9 +82,11 @@ enum uacce_q_state {
* @wait: wait queue head
* @list: index into uacce queues list
* @qfrs: pointer of qfr regions
+ * @mutex: protects queue state
* @state: queue state machine
* @pasid: pasid associated to the mm
* @handle: iommu_sva handle returned by iommu_sva_bind_device()
+ * @mapping: user space mapping of the queue
*/
struct uacce_queue {
struct uacce_device *uacce;
@@ -80,9 +94,11 @@ struct uacce_queue {
wait_queue_head_t wait;
struct list_head list;
struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
+ struct mutex mutex;
enum uacce_q_state state;
- int pasid;
+ u32 pasid;
struct iommu_sva *handle;
+ struct address_space *mapping;
};
/**
@@ -97,10 +113,9 @@ struct uacce_queue {
* @dev_id: id of the uacce device
* @cdev: cdev of the uacce
* @dev: dev of the uacce
+ * @mutex: protects uacce operation
* @priv: private pointer of the uacce
* @queues: list of queues
- * @queues_lock: lock for queues list
- * @inode: core vfs
*/
struct uacce_device {
const char *algs;
@@ -113,10 +128,9 @@ struct uacce_device {
u32 dev_id;
struct cdev *cdev;
struct device dev;
+ struct mutex mutex;
void *priv;
struct list_head queues;
- struct mutex queues_lock;
- struct inode *inode;
};
#if IS_ENABLED(CONFIG_UACCE)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 94b285411659..3064314f4832 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,29 +2,35 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
+#include <linux/minmax.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <asm/uaccess.h>
/*
- * Force the uaccess routines to be wired up for actual userspace access,
- * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone
- * using force_uaccess_end below.
+ * Architectures that support memory tagging (assigning tags to memory regions,
+ * embedding these tags into addresses that point to these memory regions, and
+ * checking that the memory and the pointer tags match on memory accesses)
+ * redefine this macro to strip tags from pointers.
+ *
+ * Passing down mm_struct allows to define untagging rules on per-process
+ * basis.
+ *
+ * It's defined as noop for architectures that don't support memory tagging.
*/
-static inline mm_segment_t force_uaccess_begin(void)
-{
- mm_segment_t fs = get_fs();
-
- set_fs(USER_DS);
- return fs;
-}
+#ifndef untagged_addr
+#define untagged_addr(addr) (addr)
+#endif
-static inline void force_uaccess_end(mm_segment_t oldfs)
-{
- set_fs(oldfs);
-}
+#ifndef untagged_addr_remote
+#define untagged_addr_remote(mm, addr) ({ \
+ mmap_assert_locked(mm); \
+ untagged_addr(addr); \
+})
+#endif
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
@@ -74,18 +80,28 @@ static inline void force_uaccess_end(mm_segment_t oldfs)
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- instrument_copy_from_user(to, from, n);
+ unsigned long res;
+
+ instrument_copy_from_user_before(to, from, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ return res;
}
static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res;
+
might_fault();
- instrument_copy_from_user(to, from, n);
+ instrument_copy_from_user_before(to, from, n);
+ if (should_fail_usercopy())
+ return n;
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ return res;
}
/**
@@ -104,6 +120,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
+ if (should_fail_usercopy())
+ return n;
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
@@ -113,6 +131,8 @@ static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
+ if (should_fail_usercopy())
+ return n;
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
@@ -124,9 +144,10 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
- if (likely(access_ok(from, n))) {
- instrument_copy_from_user(to, from, n);
+ if (!should_fail_usercopy() && likely(access_ok(from, n))) {
+ instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
@@ -142,6 +163,8 @@ static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
+ if (should_fail_usercopy())
+ return n;
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
@@ -156,7 +179,7 @@ _copy_to_user(void __user *, const void *, unsigned long);
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (likely(check_copy_size(to, n, false)))
+ if (check_copy_size(to, n, false))
n = _copy_from_user(to, from, n);
return n;
}
@@ -164,18 +187,21 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (likely(check_copy_size(from, n, true)))
+ if (check_copy_size(from, n, true))
n = _copy_to_user(to, from, n);
return n;
}
-#ifdef CONFIG_COMPAT
-static __always_inline unsigned long __must_check
-copy_in_user(void __user *to, const void __user *from, unsigned long n)
+
+#ifndef copy_mc_to_kernel
+/*
+ * Without arch opt-in this generic copy_mc_to_kernel() will not handle
+ * #MC (or arch equivalent) during source read.
+ */
+static inline unsigned long __must_check
+copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
{
- might_fault();
- if (access_ok(to, n) && access_ok(from, n))
- n = raw_copy_in_user(to, from, n);
- return n;
+ memcpy(dst, src, cnt);
+ return 0;
}
#endif
@@ -236,6 +262,28 @@ static inline bool pagefault_disabled(void)
*/
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
+#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
+
+/**
+ * probe_subpage_writeable: probe the user range for write faults at sub-page
+ * granularity (e.g. arm64 MTE)
+ * @uaddr: start of address range
+ * @size: size of address range
+ *
+ * Returns 0 on success, the number of bytes not probed on fault.
+ *
+ * It is expected that the caller checked for the write permission of each
+ * page in the range either by put_user() or GUP. The architecture port can
+ * implement a more efficient get_user() probing if the same sub-page faults
+ * are triggered by either a read or a write.
+ */
+static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
+
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline __must_check unsigned long
@@ -303,6 +351,10 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
size_t size = min(ksize, usize);
size_t rest = max(ksize, usize) - size;
+ /* Double check if ksize is larger than a known object size. */
+ if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
+ return -E2BIG;
+
/* Deal with trailing bytes. */
if (usize < ksize) {
memset(dst + size, 0, rest);
@@ -333,6 +385,25 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
long count);
long strnlen_user_nofault(const void __user *unsafe_addr, long count);
+#ifndef __get_kernel_nofault
+#define __get_kernel_nofault(dst, src, type, label) \
+do { \
+ type __user *p = (type __force __user *)(src); \
+ type data; \
+ if (__get_user(data, p)) \
+ goto label; \
+ *(type *)dst = data; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, label) \
+do { \
+ type __user *p = (type __force __user *)(dst); \
+ type data = *(type *)src; \
+ if (__put_user(data, p)) \
+ goto label; \
+} while (0)
+#endif
+
/**
* get_kernel_nofault(): safely attempt to read from a location
* @val: read into this variable
@@ -352,6 +423,7 @@ long strnlen_user_nofault(const void __user *unsafe_addr, long count);
#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
+#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
#endif
@@ -365,8 +437,6 @@ static inline void user_access_restore(unsigned long flags) { }
#endif
#ifdef CONFIG_HARDENED_USERCOPY
-void usercopy_warn(const char *name, const char *detail, bool to_user,
- unsigned long offset, unsigned long len);
void __noreturn usercopy_abort(const char *name, const char *detail,
bool to_user, unsigned long offset,
unsigned long len);
diff --git a/include/linux/ubsan.h b/include/linux/ubsan.h
new file mode 100644
index 000000000000..bff7445498de
--- /dev/null
+++ b/include/linux/ubsan.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UBSAN_H
+#define _LINUX_UBSAN_H
+
+#ifdef CONFIG_UBSAN_TRAP
+const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type);
+#endif
+
+#endif
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
deleted file mode 100644
index 0968ef458447..000000000000
--- a/include/linux/ucb1400.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Register definitions and functions for:
- * Philips UCB1400 driver
- *
- * Based on ucb1400_ts:
- * Author: Nicolas Pitre
- * Created: September 25, 2006
- * Copyright: MontaVista Software, Inc.
- *
- * Spliting done by: Marek Vasut <marek.vasut@gmail.com>
- * If something doesn't work and it worked before spliting, e-mail me,
- * dont bother Nicolas please ;-)
- *
- * This code is heavily based on ucb1x00-*.c copyrighted by Russell King
- * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has
- * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request.
- */
-
-#ifndef _LINUX__UCB1400_H
-#define _LINUX__UCB1400_H
-
-#include <sound/ac97_codec.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-/*
- * UCB1400 AC-link registers
- */
-
-#define UCB_IO_DATA 0x5a
-#define UCB_IO_DIR 0x5c
-#define UCB_IE_RIS 0x5e
-#define UCB_IE_FAL 0x60
-#define UCB_IE_STATUS 0x62
-#define UCB_IE_CLEAR 0x62
-#define UCB_IE_ADC (1 << 11)
-#define UCB_IE_TSPX (1 << 12)
-
-#define UCB_TS_CR 0x64
-#define UCB_TS_CR_TSMX_POW (1 << 0)
-#define UCB_TS_CR_TSPX_POW (1 << 1)
-#define UCB_TS_CR_TSMY_POW (1 << 2)
-#define UCB_TS_CR_TSPY_POW (1 << 3)
-#define UCB_TS_CR_TSMX_GND (1 << 4)
-#define UCB_TS_CR_TSPX_GND (1 << 5)
-#define UCB_TS_CR_TSMY_GND (1 << 6)
-#define UCB_TS_CR_TSPY_GND (1 << 7)
-#define UCB_TS_CR_MODE_INT (0 << 8)
-#define UCB_TS_CR_MODE_PRES (1 << 8)
-#define UCB_TS_CR_MODE_POS (2 << 8)
-#define UCB_TS_CR_BIAS_ENA (1 << 11)
-#define UCB_TS_CR_TSPX_LOW (1 << 12)
-#define UCB_TS_CR_TSMX_LOW (1 << 13)
-
-#define UCB_ADC_CR 0x66
-#define UCB_ADC_SYNC_ENA (1 << 0)
-#define UCB_ADC_VREFBYP_CON (1 << 1)
-#define UCB_ADC_INP_TSPX (0 << 2)
-#define UCB_ADC_INP_TSMX (1 << 2)
-#define UCB_ADC_INP_TSPY (2 << 2)
-#define UCB_ADC_INP_TSMY (3 << 2)
-#define UCB_ADC_INP_AD0 (4 << 2)
-#define UCB_ADC_INP_AD1 (5 << 2)
-#define UCB_ADC_INP_AD2 (6 << 2)
-#define UCB_ADC_INP_AD3 (7 << 2)
-#define UCB_ADC_EXT_REF (1 << 5)
-#define UCB_ADC_START (1 << 7)
-#define UCB_ADC_ENA (1 << 15)
-
-#define UCB_ADC_DATA 0x68
-#define UCB_ADC_DAT_VALID (1 << 15)
-
-#define UCB_FCSR 0x6c
-#define UCB_FCSR_AVE (1 << 12)
-
-#define UCB_ADC_DAT_MASK 0x3ff
-
-#define UCB_ID 0x7e
-#define UCB_ID_1400 0x4304
-
-struct ucb1400_gpio {
- struct gpio_chip gc;
- struct snd_ac97 *ac97;
- int gpio_offset;
- int (*gpio_setup)(struct device *dev, int ngpio);
- int (*gpio_teardown)(struct device *dev, int ngpio);
-};
-
-struct ucb1400_ts {
- struct input_dev *ts_idev;
- int id;
- int irq;
- struct snd_ac97 *ac97;
- wait_queue_head_t ts_wait;
- bool stopped;
-};
-
-struct ucb1400 {
- struct platform_device *ucb1400_ts;
- struct platform_device *ucb1400_gpio;
-};
-
-struct ucb1400_pdata {
- int irq;
- int gpio_offset;
- int (*gpio_setup)(struct device *dev, int ngpio);
- int (*gpio_teardown)(struct device *dev, int ngpio);
-};
-
-static inline u16 ucb1400_reg_read(struct snd_ac97 *ac97, u16 reg)
-{
- return ac97->bus->ops->read(ac97, reg);
-}
-
-static inline void ucb1400_reg_write(struct snd_ac97 *ac97, u16 reg, u16 val)
-{
- ac97->bus->ops->write(ac97, reg, val);
-}
-
-static inline u16 ucb1400_gpio_get_value(struct snd_ac97 *ac97, u16 gpio)
-{
- return ucb1400_reg_read(ac97, UCB_IO_DATA) & (1 << gpio);
-}
-
-static inline void ucb1400_gpio_set_value(struct snd_ac97 *ac97, u16 gpio,
- u16 val)
-{
- ucb1400_reg_write(ac97, UCB_IO_DATA, val ?
- ucb1400_reg_read(ac97, UCB_IO_DATA) | (1 << gpio) :
- ucb1400_reg_read(ac97, UCB_IO_DATA) & ~(1 << gpio));
-}
-
-static inline u16 ucb1400_gpio_get_direction(struct snd_ac97 *ac97, u16 gpio)
-{
- return ucb1400_reg_read(ac97, UCB_IO_DIR) & (1 << gpio);
-}
-
-static inline void ucb1400_gpio_set_direction(struct snd_ac97 *ac97, u16 gpio,
- u16 dir)
-{
- ucb1400_reg_write(ac97, UCB_IO_DIR, dir ?
- ucb1400_reg_read(ac97, UCB_IO_DIR) | (1 << gpio) :
- ucb1400_reg_read(ac97, UCB_IO_DIR) & ~(1 << gpio));
-}
-
-static inline void ucb1400_adc_enable(struct snd_ac97 *ac97)
-{
- ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA);
-}
-
-static inline void ucb1400_adc_disable(struct snd_ac97 *ac97)
-{
- ucb1400_reg_write(ac97, UCB_ADC_CR, 0);
-}
-
-
-unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
- int adcsync);
-
-#endif
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
index cf3ada3e820e..c499ae809c7d 100644
--- a/include/linux/ucs2_string.h
+++ b/include/linux/ucs2_string.h
@@ -10,6 +10,7 @@ typedef u16 ucs2_char_t;
unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength);
unsigned long ucs2_strlen(const ucs2_char_t *s);
unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+ssize_t ucs2_strscpy(ucs2_char_t *dst, const ucs2_char_t *src, size_t count);
int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
unsigned long ucs2_utf8size(const ucs2_char_t *src);
diff --git a/include/linux/udp.h b/include/linux/udp.h
index aa84597bdc33..3748e82b627b 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -23,35 +23,39 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
return (struct udphdr *)skb_transport_header(skb);
}
-static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb)
-{
- return (struct udphdr *)skb_inner_transport_header(skb);
-}
-
+#define UDP_HTABLE_SIZE_MIN_PERNET 128
#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
+#define UDP_HTABLE_SIZE_MAX 65536
static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
{
return (num + net_hash_mix(net)) & mask;
}
+enum {
+ UDP_FLAGS_CORK, /* Cork is required */
+ UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
+ UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
+ UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
+ UDP_FLAGS_ACCEPT_FRAGLIST,
+ UDP_FLAGS_ACCEPT_L4,
+ UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
+ UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
+ UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
+};
+
struct udp_sock {
/* inet_sock has to be the first member */
struct inet_sock inet;
#define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
+
+ unsigned long udp_flags;
+
int pending; /* Any pending frames ? */
- unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
- unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
- encap_enabled:1, /* This socket enabled encap
- * processing; UDP tunnels and
- * different encapsulation layer set
- * this
- */
- gro_enabled:1; /* Can accept GRO packets */
+
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -63,16 +67,12 @@ struct udp_sock {
*/
__u16 pcslen;
__u16 pcrlen;
-/* indicator bits used by pcflag: */
-#define UDPLITE_BIT 0x1 /* set by udplite proto init function */
-#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
-#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
- __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
- __u8 unused[3];
/*
* For encapsulation sockets.
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload);
int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
@@ -89,33 +89,54 @@ struct udp_sock {
/* This field is dirtied by udp_recvmsg() */
int forward_deficit;
+
+ /* This fields follows rcvbuf value, and is touched by udp_recvmsg */
+ int forward_threshold;
+
+ /* Cache friendly copy of sk->sk_peek_off >= 0 */
+ bool peeking_with_offset;
};
+#define udp_test_bit(nr, sk) \
+ test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_set_bit(nr, sk) \
+ set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_test_and_set_bit(nr, sk) \
+ test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_clear_bit(nr, sk) \
+ clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_assign_bit(nr, sk, val) \
+ assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
+
#define UDP_MAX_SEGMENTS (1 << 6UL)
-static inline struct udp_sock *udp_sk(const struct sock *sk)
+#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
+
+static inline int udp_set_peek_off(struct sock *sk, int val)
{
- return (struct udp_sock *)sk;
+ sk_set_peek_off(sk, val);
+ WRITE_ONCE(udp_sk(sk)->peeking_with_offset, val >= 0);
+ return 0;
}
static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
{
- udp_sk(sk)->no_check6_tx = val;
+ udp_assign_bit(NO_CHECK6_TX, sk, val);
}
static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
{
- udp_sk(sk)->no_check6_rx = val;
+ udp_assign_bit(NO_CHECK6_RX, sk, val);
}
-static inline bool udp_get_no_check6_tx(struct sock *sk)
+static inline bool udp_get_no_check6_tx(const struct sock *sk)
{
- return udp_sk(sk)->no_check6_tx;
+ return udp_test_bit(NO_CHECK6_TX, sk);
}
-static inline bool udp_get_no_check6_rx(struct sock *sk)
+static inline bool udp_get_no_check6_rx(const struct sock *sk)
{
- return udp_sk(sk)->no_check6_rx;
+ return udp_test_bit(NO_CHECK6_RX, sk);
}
static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
@@ -131,8 +152,24 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
{
- return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
- skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
+ if (!skb_is_gso(skb))
+ return false;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
+ !udp_test_bit(ACCEPT_L4, sk))
+ return true;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
+ !udp_test_bit(ACCEPT_FRAGLIST, sk))
+ return true;
+
+ return false;
+}
+
+static inline void udp_allow_gso(struct sock *sk)
+{
+ udp_set_bit(ACCEPT_L4, sk);
+ udp_set_bit(ACCEPT_FRAGLIST, sk);
}
#define udp_portaddr_for_each_entry(__sk, list) \
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index b0542cd11aeb..f85ec5613721 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -12,20 +12,12 @@
* to detect when we overlook these differences.
*
*/
-#include <linux/types.h>
+#include <linux/uidgid_types.h>
#include <linux/highuid.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
-
-typedef struct {
- uid_t val;
-} kuid_t;
-
-
-typedef struct {
- gid_t val;
-} kgid_t;
+struct uid_gid_map;
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }
@@ -138,6 +130,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return from_kgid(ns, gid) != (gid_t) -1;
}
+u32 map_id_down(struct uid_gid_map *map, u32 id);
+u32 map_id_up(struct uid_gid_map *map, u32 id);
+
#else
static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid)
@@ -186,6 +181,15 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return gid_valid(gid);
}
+static inline u32 map_id_down(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
+
+static inline u32 map_id_up(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
#endif /* CONFIG_USER_NS */
#endif /* _LINUX_UIDGID_H */
diff --git a/include/linux/uidgid_types.h b/include/linux/uidgid_types.h
new file mode 100644
index 000000000000..b35ac4955a33
--- /dev/null
+++ b/include/linux/uidgid_types.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UIDGID_TYPES_H
+#define _LINUX_UIDGID_TYPES_H
+
+#include <linux/types.h>
+
+typedef struct {
+ uid_t val;
+} kuid_t;
+
+typedef struct {
+ gid_t val;
+} kgid_t;
+
+#endif /* _LINUX_UIDGID_TYPES_H */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 3835a8a8e9ea..00cebe2b70de 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -7,10 +7,12 @@
#include <linux/kernel.h>
#include <linux/thread_info.h>
+#include <linux/mm_types.h>
#include <uapi/linux/uio.h>
struct page;
-struct pipe_inode_info;
+
+typedef unsigned int __bitwise iov_iter_extraction_t;
struct kvec {
void *iov_base; /* and that should *never* hold a userland pointer */
@@ -19,40 +21,89 @@ struct kvec {
enum iter_type {
/* iter types */
- ITER_IOVEC = 4,
- ITER_KVEC = 8,
- ITER_BVEC = 16,
- ITER_PIPE = 32,
- ITER_DISCARD = 64,
+ ITER_UBUF,
+ ITER_IOVEC,
+ ITER_BVEC,
+ ITER_KVEC,
+ ITER_XARRAY,
+ ITER_DISCARD,
+};
+
+#define ITER_SOURCE 1 // == WRITE
+#define ITER_DEST 0 // == READ
+
+struct iov_iter_state {
+ size_t iov_offset;
+ size_t count;
+ unsigned long nr_segs;
};
struct iov_iter {
+ u8 iter_type;
+ bool nofault;
+ bool data_source;
+ size_t iov_offset;
/*
- * Bit 0 is the read/write bit, set if we're writing.
- * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
- * the caller isn't expecting to drop a page reference when done.
+ * Hack alert: overlay ubuf_iovec with iovec + count, so
+ * that the members resolve correctly regardless of the type
+ * of iterator used. This means that you can use:
+ *
+ * &iter->__ubuf_iovec or iter->__iov
+ *
+ * interchangably for the user_backed cases, hence simplifying
+ * some of the cases that need to deal with both.
*/
- unsigned int type;
- size_t iov_offset;
- size_t count;
union {
- const struct iovec *iov;
- const struct kvec *kvec;
- const struct bio_vec *bvec;
- struct pipe_inode_info *pipe;
+ /*
+ * This really should be a const, but we cannot do that without
+ * also modifying any of the zero-filling iter init functions.
+ * Leave it non-const for now, but it should be treated as such.
+ */
+ struct iovec __ubuf_iovec;
+ struct {
+ union {
+ /* use iter_iov() to get the current vec */
+ const struct iovec *__iov;
+ const struct kvec *kvec;
+ const struct bio_vec *bvec;
+ struct xarray *xarray;
+ void __user *ubuf;
+ };
+ size_t count;
+ };
};
union {
unsigned long nr_segs;
- struct {
- unsigned int head;
- unsigned int start_head;
- };
+ loff_t xarray_start;
};
};
+static inline const struct iovec *iter_iov(const struct iov_iter *iter)
+{
+ if (iter->iter_type == ITER_UBUF)
+ return (const struct iovec *) &iter->__ubuf_iovec;
+ return iter->__iov;
+}
+
+#define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset)
+#define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset)
+
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
{
- return i->type & ~(READ | WRITE);
+ return i->iter_type;
+}
+
+static inline void iov_iter_save_state(struct iov_iter *iter,
+ struct iov_iter_state *state)
+{
+ state->iov_offset = iter->iov_offset;
+ state->count = iter->count;
+ state->nr_segs = iter->nr_segs;
+}
+
+static inline bool iter_is_ubuf(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_UBUF;
}
static inline bool iter_is_iovec(const struct iov_iter *i)
@@ -70,19 +121,24 @@ static inline bool iov_iter_is_bvec(const struct iov_iter *i)
return iov_iter_type(i) == ITER_BVEC;
}
-static inline bool iov_iter_is_pipe(const struct iov_iter *i)
+static inline bool iov_iter_is_discard(const struct iov_iter *i)
{
- return iov_iter_type(i) == ITER_PIPE;
+ return iov_iter_type(i) == ITER_DISCARD;
}
-static inline bool iov_iter_is_discard(const struct iov_iter *i)
+static inline bool iov_iter_is_xarray(const struct iov_iter *i)
{
- return iov_iter_type(i) == ITER_DISCARD;
+ return iov_iter_type(i) == ITER_XARRAY;
}
static inline unsigned char iov_iter_rw(const struct iov_iter *i)
{
- return i->type & (READ | WRITE);
+ return i->data_source ? WRITE : READ;
+}
+
+static inline bool user_backed_iter(const struct iov_iter *i)
+{
+ return iter_is_ubuf(i) || iter_is_iovec(i);
}
/*
@@ -102,20 +158,12 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
return ret;
}
-static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
-{
- return (struct iovec) {
- .iov_base = iter->iov->iov_base + iter->iov_offset,
- .iov_len = min(iter->count,
- iter->iov->iov_len - iter->iov_offset),
- };
-}
-
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
+ size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
+size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
@@ -124,59 +172,71 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
-bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
-bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
+
+static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
+ size_t bytes, struct iov_iter *i)
+{
+ return copy_page_to_iter(&folio->page, offset, bytes, i);
+}
+
+static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
+ size_t offset, size_t bytes, struct iov_iter *i)
+{
+ return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
+}
+
+size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
+ size_t bytes, struct iov_iter *i);
static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, true))
return _copy_to_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return false;
- else
- return _copy_from_iter_full(addr, bytes, i);
+ size_t copied = copy_from_iter(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
}
static __always_inline __must_check
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter_nocache(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return false;
- else
- return _copy_from_iter_full_nocache(addr, bytes, i);
+ size_t copied = copy_from_iter_nocache(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
}
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
* Note, users like pmem that depend on the stricter semantics of
- * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
+ * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
* IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
* destination is flushed from the cache on return.
*/
@@ -185,31 +245,15 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#define _copy_from_iter_flushcache _copy_from_iter_nocache
#endif
-#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
-size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
#else
-#define _copy_to_iter_mcsafe _copy_to_iter
+#define _copy_mc_to_iter _copy_to_iter
#endif
-static __always_inline __must_check
-size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
- return _copy_from_iter_flushcache(addr, bytes, i);
-}
-
-static __always_inline __must_check
-size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
- return _copy_to_iter_mcsafe(addr, bytes, i);
-}
-
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
+bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
@@ -218,14 +262,15 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec
unsigned long nr_segs, size_t count);
void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
-void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
- size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
-ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
+void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
+ loff_t start, size_t count);
+ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
-ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
+ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
@@ -260,28 +305,80 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i);
-
-ssize_t import_iovec(int type, const struct iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i);
-
-#ifdef CONFIG_COMPAT
-struct compat_iovec;
-ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i);
-#endif
-int import_single_range(int type, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i);
+static inline int
+iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
+{
+ size_t shorted = 0;
+ int npages;
+
+ if (iov_iter_count(i) > max_bytes) {
+ shorted = iov_iter_count(i) - max_bytes;
+ iov_iter_truncate(i, max_bytes);
+ }
+ npages = iov_iter_npages(i, maxpages);
+ if (shorted)
+ iov_iter_reexpand(i, iov_iter_count(i) + shorted);
+
+ return npages;
+}
+
+struct iovec *iovec_from_user(const struct iovec __user *uvector,
+ unsigned long nr_segs, unsigned long fast_segs,
+ struct iovec *fast_iov, bool compat);
+ssize_t import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
+ struct iov_iter *i);
+ssize_t __import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
+ struct iov_iter *i, bool compat);
+int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
+
+static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
+ void __user *buf, size_t count)
+{
+ WARN_ON(direction & ~(READ | WRITE));
+ *i = (struct iov_iter) {
+ .iter_type = ITER_UBUF,
+ .data_source = direction,
+ .ubuf = buf,
+ .count = count,
+ .nr_segs = 1
+ };
+}
+/* Flags for iov_iter_get/extract_pages*() */
+/* Allow P2PDMA on the extracted pages */
+#define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01)
+
+ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
+ size_t maxsize, unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0);
+
+/**
+ * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
+ * @iter: The iterator
+ *
+ * Examine the iterator and indicate by returning true or false as to how, if
+ * at all, pages extracted from the iterator will be retained by the extraction
+ * function.
+ *
+ * %true indicates that the pages will have a pin placed in them that the
+ * caller must unpin. This is must be done for DMA/async DIO to force fork()
+ * to forcibly copy a page for the child (the parent must retain the original
+ * page).
+ *
+ * %false indicates that no measures are taken and that it's up to the caller
+ * to retain the pages.
+ */
+static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
+{
+ return user_backed_iter(iter);
+}
-int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
- int (*f)(struct kvec *vec, void *context),
- void *context);
+struct sg_table;
+ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
+ struct sg_table *sgtable, unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags);
#endif
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 54bf6b118401..18238dc8bfd3 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -28,19 +28,26 @@ struct uio_map;
* logical, virtual, or physical & phys_addr_t
* should always be large enough to handle any of
* the address types)
+ * @dma_addr: DMA handle set by dma_alloc_coherent, used with
+ * UIO_MEM_DMA_COHERENT only (@addr should be the
+ * void * returned from the same dma_alloc_coherent call)
* @offs: offset of device memory within the page
* @size: size of IO (multiple of page size)
* @memtype: type of memory addr points to
* @internal_addr: ioremap-ped version of addr, for driver internal use
+ * @dma_device: device struct that was passed to dma_alloc_coherent,
+ * used with UIO_MEM_DMA_COHERENT only
* @map: for use by the UIO core only.
*/
struct uio_mem {
const char *name;
phys_addr_t addr;
+ dma_addr_t dma_addr;
unsigned long offs;
resource_size_t size;
int memtype;
void __iomem *internal_addr;
+ struct device *dma_device;
struct uio_map *map;
};
@@ -117,6 +124,14 @@ extern int __must_check
struct uio_info *info);
/* use a define to avoid include chaining to get THIS_MODULE */
+
+/**
+ * uio_register_device - register a new userspace IO device
+ * @parent: parent device
+ * @info: UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
#define uio_register_device(parent, info) \
__uio_register_device(THIS_MODULE, parent, info)
@@ -129,6 +144,14 @@ extern int __must_check
struct uio_info *info);
/* use a define to avoid include chaining to get THIS_MODULE */
+
+/**
+ * devm_uio_register_device - Resource managed uio_register_device()
+ * @parent: parent device
+ * @info: UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
#define devm_uio_register_device(parent, info) \
__devm_uio_register_device(THIS_MODULE, parent, info)
@@ -142,6 +165,12 @@ extern int __must_check
#define UIO_MEM_LOGICAL 2
#define UIO_MEM_VIRTUAL 3
#define UIO_MEM_IOVA 4
+/*
+ * UIO_MEM_DMA_COHERENT exists for legacy drivers that had been getting by with
+ * improperly mapping DMA coherent allocations through the other modes.
+ * Do not use in new drivers.
+ */
+#define UIO_MEM_DMA_COHERENT 5
/* defines for uio_port->porttype */
#define UIO_PORT_NONE 0
diff --git a/include/linux/ulpi/driver.h b/include/linux/ulpi/driver.h
index c7a1810373e3..a8cb617a3028 100644
--- a/include/linux/ulpi/driver.h
+++ b/include/linux/ulpi/driver.h
@@ -15,9 +15,9 @@ struct ulpi_ops;
* @dev: device interface
*/
struct ulpi {
+ struct device dev;
struct ulpi_device_id id;
const struct ulpi_ops *ops;
- struct device dev;
};
#define to_ulpi_dev(d) container_of(d, struct ulpi, dev)
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 244aff638220..daa6a7048c11 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -11,10 +11,11 @@
struct cred;
struct file;
-#define UMH_NO_WAIT 0 /* don't wait at all */
-#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */
-#define UMH_WAIT_PROC 2 /* wait for the process to complete */
-#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
+#define UMH_NO_WAIT 0x00 /* don't wait at all */
+#define UMH_WAIT_EXEC 0x01 /* wait for the exec, but not the process */
+#define UMH_WAIT_PROC 0x02 /* wait for the process to complete */
+#define UMH_KILLABLE 0x04 /* wait for EXEC/PROC killable */
+#define UMH_FREEZABLE 0x08 /* wait for EXEC/PROC freezable */
struct subprocess_info {
struct work_struct work;
@@ -41,8 +42,6 @@ call_usermodehelper_setup(const char *path, char **argv, char **envp,
extern int
call_usermodehelper_exec(struct subprocess_info *info, int wait);
-extern struct ctl_table usermodehelper_table[];
-
enum umh_disable_depth {
UMH_ENABLED = 0,
UMH_FREEZING,
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
deleted file mode 100644
index 167aa849c0ce..000000000000
--- a/include/linux/unaligned/access_ok.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
-#define _LINUX_UNALIGNED_ACCESS_OK_H
-
-#include <linux/kernel.h>
-#include <asm/byteorder.h>
-
-static __always_inline u16 get_unaligned_le16(const void *p)
-{
- return le16_to_cpup((__le16 *)p);
-}
-
-static __always_inline u32 get_unaligned_le32(const void *p)
-{
- return le32_to_cpup((__le32 *)p);
-}
-
-static __always_inline u64 get_unaligned_le64(const void *p)
-{
- return le64_to_cpup((__le64 *)p);
-}
-
-static __always_inline u16 get_unaligned_be16(const void *p)
-{
- return be16_to_cpup((__be16 *)p);
-}
-
-static __always_inline u32 get_unaligned_be32(const void *p)
-{
- return be32_to_cpup((__be32 *)p);
-}
-
-static __always_inline u64 get_unaligned_be64(const void *p)
-{
- return be64_to_cpup((__be64 *)p);
-}
-
-static __always_inline void put_unaligned_le16(u16 val, void *p)
-{
- *((__le16 *)p) = cpu_to_le16(val);
-}
-
-static __always_inline void put_unaligned_le32(u32 val, void *p)
-{
- *((__le32 *)p) = cpu_to_le32(val);
-}
-
-static __always_inline void put_unaligned_le64(u64 val, void *p)
-{
- *((__le64 *)p) = cpu_to_le64(val);
-}
-
-static __always_inline void put_unaligned_be16(u16 val, void *p)
-{
- *((__be16 *)p) = cpu_to_be16(val);
-}
-
-static __always_inline void put_unaligned_be32(u32 val, void *p)
-{
- *((__be32 *)p) = cpu_to_be32(val);
-}
-
-static __always_inline void put_unaligned_be64(u64 val, void *p)
-{
- *((__be64 *)p) = cpu_to_be64(val);
-}
-
-#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h
deleted file mode 100644
index c43ff5918c8a..000000000000
--- a/include/linux/unaligned/be_byteshift.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H
-#define _LINUX_UNALIGNED_BE_BYTESHIFT_H
-
-#include <linux/types.h>
-
-static inline u16 __get_unaligned_be16(const u8 *p)
-{
- return p[0] << 8 | p[1];
-}
-
-static inline u32 __get_unaligned_be32(const u8 *p)
-{
- return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
-}
-
-static inline u64 __get_unaligned_be64(const u8 *p)
-{
- return (u64)__get_unaligned_be32(p) << 32 |
- __get_unaligned_be32(p + 4);
-}
-
-static inline void __put_unaligned_be16(u16 val, u8 *p)
-{
- *p++ = val >> 8;
- *p++ = val;
-}
-
-static inline void __put_unaligned_be32(u32 val, u8 *p)
-{
- __put_unaligned_be16(val >> 16, p);
- __put_unaligned_be16(val, p + 2);
-}
-
-static inline void __put_unaligned_be64(u64 val, u8 *p)
-{
- __put_unaligned_be32(val >> 32, p);
- __put_unaligned_be32(val, p + 4);
-}
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return __get_unaligned_be16(p);
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_be32(p);
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return __get_unaligned_be64(p);
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- __put_unaligned_be16(val, p);
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
- __put_unaligned_be32(val, p);
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
- __put_unaligned_be64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h
deleted file mode 100644
index 7164214a4ba1..000000000000
--- a/include/linux/unaligned/be_memmove.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H
-#define _LINUX_UNALIGNED_BE_MEMMOVE_H
-
-#include <linux/unaligned/memmove.h>
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return __get_unaligned_memmove16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_memmove32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return __get_unaligned_memmove64((const u8 *)p);
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- __put_unaligned_memmove16(val, p);
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
- __put_unaligned_memmove32(val, p);
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
- __put_unaligned_memmove64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h
deleted file mode 100644
index 15ea503a13fc..000000000000
--- a/include/linux/unaligned/be_struct.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_BE_STRUCT_H
-#define _LINUX_UNALIGNED_BE_STRUCT_H
-
-#include <linux/unaligned/packed_struct.h>
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return __get_unaligned_cpu16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_cpu32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return __get_unaligned_cpu64((const u8 *)p);
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- __put_unaligned_cpu16(val, p);
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
- __put_unaligned_cpu32(val, p);
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
- __put_unaligned_cpu64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
deleted file mode 100644
index 303289492859..000000000000
--- a/include/linux/unaligned/generic.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_GENERIC_H
-#define _LINUX_UNALIGNED_GENERIC_H
-
-#include <linux/types.h>
-
-/*
- * Cause a link-time error if we try an unaligned access other than
- * 1,2,4 or 8 bytes long
- */
-extern void __bad_unaligned_access_size(void);
-
-#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
- __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
- __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
- __bad_unaligned_access_size())))); \
- }))
-
-#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
- __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
- __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
- __bad_unaligned_access_size())))); \
- }))
-
-#define __put_unaligned_le(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_le16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_le32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_le64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
-#define __put_unaligned_be(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_be16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_be32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_be64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
-static inline u32 __get_unaligned_be24(const u8 *p)
-{
- return p[0] << 16 | p[1] << 8 | p[2];
-}
-
-static inline u32 get_unaligned_be24(const void *p)
-{
- return __get_unaligned_be24(p);
-}
-
-static inline u32 __get_unaligned_le24(const u8 *p)
-{
- return p[0] | p[1] << 8 | p[2] << 16;
-}
-
-static inline u32 get_unaligned_le24(const void *p)
-{
- return __get_unaligned_le24(p);
-}
-
-static inline void __put_unaligned_be24(const u32 val, u8 *p)
-{
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
-}
-
-static inline void put_unaligned_be24(const u32 val, void *p)
-{
- __put_unaligned_be24(val, p);
-}
-
-static inline void __put_unaligned_le24(const u32 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
-}
-
-static inline void put_unaligned_le24(const u32 val, void *p)
-{
- __put_unaligned_le24(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_GENERIC_H */
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h
deleted file mode 100644
index 2248dcb0df76..000000000000
--- a/include/linux/unaligned/le_byteshift.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H
-#define _LINUX_UNALIGNED_LE_BYTESHIFT_H
-
-#include <linux/types.h>
-
-static inline u16 __get_unaligned_le16(const u8 *p)
-{
- return p[0] | p[1] << 8;
-}
-
-static inline u32 __get_unaligned_le32(const u8 *p)
-{
- return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
-}
-
-static inline u64 __get_unaligned_le64(const u8 *p)
-{
- return (u64)__get_unaligned_le32(p + 4) << 32 |
- __get_unaligned_le32(p);
-}
-
-static inline void __put_unaligned_le16(u16 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
-}
-
-static inline void __put_unaligned_le32(u32 val, u8 *p)
-{
- __put_unaligned_le16(val >> 16, p + 2);
- __put_unaligned_le16(val, p);
-}
-
-static inline void __put_unaligned_le64(u64 val, u8 *p)
-{
- __put_unaligned_le32(val >> 32, p + 4);
- __put_unaligned_le32(val, p);
-}
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return __get_unaligned_le16(p);
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return __get_unaligned_le32(p);
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return __get_unaligned_le64(p);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- __put_unaligned_le16(val, p);
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
- __put_unaligned_le32(val, p);
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
- __put_unaligned_le64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h
deleted file mode 100644
index 9202e864d026..000000000000
--- a/include/linux/unaligned/le_memmove.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H
-#define _LINUX_UNALIGNED_LE_MEMMOVE_H
-
-#include <linux/unaligned/memmove.h>
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return __get_unaligned_memmove16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return __get_unaligned_memmove32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return __get_unaligned_memmove64((const u8 *)p);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- __put_unaligned_memmove16(val, p);
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
- __put_unaligned_memmove32(val, p);
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
- __put_unaligned_memmove64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h
deleted file mode 100644
index 9977987883a6..000000000000
--- a/include/linux/unaligned/le_struct.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_LE_STRUCT_H
-#define _LINUX_UNALIGNED_LE_STRUCT_H
-
-#include <linux/unaligned/packed_struct.h>
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return __get_unaligned_cpu16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return __get_unaligned_cpu32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return __get_unaligned_cpu64((const u8 *)p);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- __put_unaligned_cpu16(val, p);
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
- __put_unaligned_cpu32(val, p);
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
- __put_unaligned_cpu64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */
diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h
deleted file mode 100644
index ac71b53bc6dc..000000000000
--- a/include/linux/unaligned/memmove.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_MEMMOVE_H
-#define _LINUX_UNALIGNED_MEMMOVE_H
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-
-/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
-
-static inline u16 __get_unaligned_memmove16(const void *p)
-{
- u16 tmp;
- memmove(&tmp, p, 2);
- return tmp;
-}
-
-static inline u32 __get_unaligned_memmove32(const void *p)
-{
- u32 tmp;
- memmove(&tmp, p, 4);
- return tmp;
-}
-
-static inline u64 __get_unaligned_memmove64(const void *p)
-{
- u64 tmp;
- memmove(&tmp, p, 8);
- return tmp;
-}
-
-static inline void __put_unaligned_memmove16(u16 val, void *p)
-{
- memmove(p, &val, 2);
-}
-
-static inline void __put_unaligned_memmove32(u32 val, void *p)
-{
- memmove(p, &val, 4);
-}
-
-static inline void __put_unaligned_memmove64(u64 val, void *p)
-{
- memmove(p, &val, 8);
-}
-
-#endif /* _LINUX_UNALIGNED_MEMMOVE_H */
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
index c0d817de4df2..f4c8eaf4d012 100644
--- a/include/linux/unaligned/packed_struct.h
+++ b/include/linux/unaligned/packed_struct.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
#define _LINUX_UNALIGNED_PACKED_STRUCT_H
-#include <linux/kernel.h>
+#include <linux/types.h>
struct __una_u16 { u16 x; } __packed;
struct __una_u32 { u32 x; } __packed;
diff --git a/include/linux/unicode.h b/include/linux/unicode.h
index 990aa97d8049..4d39e6e11a95 100644
--- a/include/linux/unicode.h
+++ b/include/linux/unicode.h
@@ -5,9 +5,52 @@
#include <linux/init.h>
#include <linux/dcache.h>
+struct utf8data;
+struct utf8data_table;
+
+#define UNICODE_MAJ_SHIFT 16
+#define UNICODE_MIN_SHIFT 8
+
+#define UNICODE_AGE(MAJ, MIN, REV) \
+ (((unsigned int)(MAJ) << UNICODE_MAJ_SHIFT) | \
+ ((unsigned int)(MIN) << UNICODE_MIN_SHIFT) | \
+ ((unsigned int)(REV)))
+
+static inline u8 unicode_major(unsigned int age)
+{
+ return (age >> UNICODE_MAJ_SHIFT) & 0xff;
+}
+
+static inline u8 unicode_minor(unsigned int age)
+{
+ return (age >> UNICODE_MIN_SHIFT) & 0xff;
+}
+
+static inline u8 unicode_rev(unsigned int age)
+{
+ return age & 0xff;
+}
+
+/*
+ * Two normalization forms are supported:
+ * 1) NFDI
+ * - Apply unicode normalization form NFD.
+ * - Remove any Default_Ignorable_Code_Point.
+ * 2) NFDICF
+ * - Apply unicode normalization form NFD.
+ * - Remove any Default_Ignorable_Code_Point.
+ * - Apply a full casefold (C + F).
+ */
+enum utf8_normalization {
+ UTF8_NFDI = 0,
+ UTF8_NFDICF,
+ UTF8_NMAX,
+};
+
struct unicode_map {
- const char *charset;
- int version;
+ unsigned int version;
+ const struct utf8data *ntab[UTF8_NMAX];
+ const struct utf8data_table *tables;
};
int utf8_validate(const struct unicode_map *um, const struct qstr *str);
@@ -27,7 +70,10 @@ int utf8_normalize(const struct unicode_map *um, const struct qstr *str,
int utf8_casefold(const struct unicode_map *um, const struct qstr *str,
unsigned char *dest, size_t dlen);
-struct unicode_map *utf8_load(const char *version);
+int utf8_casefold_hash(const struct unicode_map *um, const void *salt,
+ struct qstr *str);
+
+struct unicode_map *utf8_load(unsigned int version);
void utf8_unload(struct unicode_map *um);
#endif /* _LINUX_UNICODE_H */
diff --git a/include/linux/units.h b/include/linux/units.h
index aaf716364ec3..00e15de33eca 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -2,7 +2,42 @@
#ifndef _LINUX_UNITS_H
#define _LINUX_UNITS_H
-#include <linux/kernel.h>
+#include <linux/bits.h>
+#include <linux/math.h>
+
+/* Metric prefixes in accordance with Système international (d'unités) */
+#define PETA 1000000000000000ULL
+#define TERA 1000000000000ULL
+#define GIGA 1000000000UL
+#define MEGA 1000000UL
+#define KILO 1000UL
+#define HECTO 100UL
+#define DECA 10UL
+#define DECI 10UL
+#define CENTI 100UL
+#define MILLI 1000UL
+#define MICRO 1000000UL
+#define NANO 1000000000UL
+#define PICO 1000000000000ULL
+#define FEMTO 1000000000000000ULL
+
+#define NANOHZ_PER_HZ 1000000000UL
+#define MICROHZ_PER_HZ 1000000UL
+#define MILLIHZ_PER_HZ 1000UL
+
+#define HZ_PER_KHZ 1000UL
+#define HZ_PER_MHZ 1000000UL
+
+#define KHZ_PER_MHZ 1000UL
+#define KHZ_PER_GHZ 1000000UL
+
+#define MILLIWATT_PER_WATT 1000UL
+#define MICROWATT_PER_MILLIWATT 1000UL
+#define MICROWATT_PER_WATT 1000000UL
+
+#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE)
+#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE)
+#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE)
#define ABSOLUTE_ZERO_MILLICELSIUS -273150
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 20c555db4621..9e52179872a5 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -25,7 +25,6 @@
struct usb_device;
struct usb_driver;
-struct wusb_dev;
/*-------------------------------------------------------------------------*/
@@ -170,6 +169,12 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
return usb_find_common_endpoints_reverse(alt, NULL, NULL, NULL, int_out);
}
+enum usb_wireless_status {
+ USB_WIRELESS_STATUS_NA = 0,
+ USB_WIRELESS_STATUS_DISCONNECTED,
+ USB_WIRELESS_STATUS_CONNECTED,
+};
+
/**
* struct usb_interface - what usb device drivers talk to
* @altsetting: array of interface structures, one for each alternate
@@ -197,6 +202,10 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
* following a reset or suspend operation it doesn't support.
* @authorized: This allows to (de)authorize individual interfaces instead
* a whole device in contrast to the device authorization.
+ * @wireless_status: if the USB device uses a receiver/emitter combo, whether
+ * the emitter is connected.
+ * @wireless_status_work: Used for scheduling wireless status changes
+ * from atomic context.
* @dev: driver model's view of this device
* @usb_dev: if an interface is bound to the USB major, this will point
* to the sysfs representation for that device.
@@ -253,18 +262,32 @@ struct usb_interface {
unsigned needs_binding:1; /* needs delayed unbind/rebind */
unsigned resetting_device:1; /* true: bandwidth alloc after reset */
unsigned authorized:1; /* used for interface authorization */
+ enum usb_wireless_status wireless_status;
+ struct work_struct wireless_status_work;
struct device dev; /* interface specific device info */
struct device *usb_dev;
struct work_struct reset_ws; /* for resets in atomic context */
};
-#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
+
+#define to_usb_interface(__dev) container_of_const(__dev, struct usb_interface, dev)
static inline void *usb_get_intfdata(struct usb_interface *intf)
{
return dev_get_drvdata(&intf->dev);
}
+/**
+ * usb_set_intfdata() - associate driver-specific data with an interface
+ * @intf: USB interface
+ * @data: driver data
+ *
+ * Drivers can use this function in their probe() callbacks to associate
+ * driver-specific data with an interface.
+ *
+ * Note that there is generally no need to clear the driver-data pointer even
+ * if some drivers do so for historical or implementation-specific reasons.
+ */
static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
{
dev_set_drvdata(&intf->dev, data);
@@ -279,6 +302,11 @@ void usb_put_intf(struct usb_interface *intf);
#define USB_MAXINTERFACES 32
#define USB_MAXIADS (USB_MAXINTERFACES/2)
+bool usb_check_bulk_endpoints(
+ const struct usb_interface *intf, const u8 *ep_addrs);
+bool usb_check_int_endpoints(
+ const struct usb_interface *intf, const u8 *ep_addrs);
+
/*
* USB Resume Timer: Every Host controller driver should drive the resume
* signalling on the bus for the amount of time defined by this macro.
@@ -396,7 +424,6 @@ struct usb_host_config {
struct usb_host_bos {
struct usb_bos_descriptor *desc;
- /* wireless cap descriptor is handled by wusb */
struct usb_ext_cap_descriptor *ext_cap;
struct usb_ss_cap_descriptor *ss_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
@@ -473,12 +500,6 @@ struct usb_dev_state;
struct usb_tt;
-enum usb_device_removable {
- USB_DEVICE_REMOVABLE_UNKNOWN = 0,
- USB_DEVICE_REMOVABLE,
- USB_DEVICE_FIXED,
-};
-
enum usb_port_connect_type {
USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
USB_PORT_CONNECT_TYPE_HOT_PLUG,
@@ -560,6 +581,7 @@ struct usb3_lpm_parameters {
* @speed: device speed: high/full/low (or error)
* @rx_lanes: number of rx lanes in use, USB 3.2 adds dual-lane support
* @tx_lanes: number of tx lanes in use, USB 3.2 adds dual-lane support
+ * @ssp_rate: SuperSpeed Plus phy signaling rate and lane count
* @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
* @ttport: device port on that tt hub
* @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints
@@ -580,6 +602,7 @@ struct usb3_lpm_parameters {
* @devaddr: device address, XHCI: assigned by HW, others: same as devnum
* @can_submit: URBs may be submitted
* @persist_enabled: USB_PERSIST enabled for this device
+ * @reset_in_progress: the device is being reset
* @have_langid: whether string_langid is valid
* @authorized: policy has said we can use it;
* (user space) policy determines if we authorize this device to be
@@ -587,8 +610,8 @@ struct usb3_lpm_parameters {
* WUSB devices are not, until we authorize them from user space.
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
- * @wusb: device is Wireless USB
* @lpm_capable: device supports LPM
+ * @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
* @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
* @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
@@ -608,10 +631,7 @@ struct usb3_lpm_parameters {
* @do_remote_wakeup: remote wakeup should be enabled
* @reset_resume: needs reset instead of resume
* @port_is_suspended: the upstream port is suspended (L2 or U3)
- * @wusb_dev: if this is a Wireless USB device, link to the WUSB
- * specific data for the device.
* @slot_id: Slot ID assigned by xHCI
- * @removable: Device can be physically removed from this port
* @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
* @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
* @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout.
@@ -636,6 +656,7 @@ struct usb_device {
enum usb_device_speed speed;
unsigned int rx_lanes;
unsigned int tx_lanes;
+ enum usb_ssp_rate ssp_rate;
struct usb_tt *tt;
int ttport;
@@ -665,11 +686,12 @@ struct usb_device {
unsigned can_submit:1;
unsigned persist_enabled:1;
+ unsigned reset_in_progress:1;
unsigned have_langid:1;
unsigned authorized:1;
unsigned authenticated:1;
- unsigned wusb:1;
unsigned lpm_capable:1;
+ unsigned lpm_devinit_allow:1;
unsigned usb2_hw_lpm_capable:1;
unsigned usb2_hw_lpm_besl_capable:1;
unsigned usb2_hw_lpm_enabled:1;
@@ -692,16 +714,13 @@ struct usb_device {
unsigned long active_duration;
-#ifdef CONFIG_PM
unsigned long connect_time;
unsigned do_remote_wakeup:1;
unsigned reset_resume:1;
unsigned port_is_suspended:1;
-#endif
- struct wusb_dev *wusb_dev;
+
int slot_id;
- enum usb_device_removable removable;
struct usb2_lpm_parameters l1_params;
struct usb3_lpm_parameters u1_params;
struct usb3_lpm_parameters u2_params;
@@ -710,12 +729,22 @@ struct usb_device {
u16 hub_delay;
unsigned use_generic_driver:1;
};
-#define to_usb_device(d) container_of(d, struct usb_device, dev)
-static inline struct usb_device *interface_to_usbdev(struct usb_interface *intf)
+#define to_usb_device(__dev) container_of_const(__dev, struct usb_device, dev)
+
+static inline struct usb_device *__intf_to_usbdev(struct usb_interface *intf)
{
return to_usb_device(intf->dev.parent);
}
+static inline const struct usb_device *__intf_to_usbdev_const(const struct usb_interface *intf)
+{
+ return to_usb_device((const struct device *)intf->dev.parent);
+}
+
+#define interface_to_usbdev(intf) \
+ _Generic((intf), \
+ const struct usb_interface *: __intf_to_usbdev_const, \
+ struct usb_interface *: __intf_to_usbdev)(intf)
extern struct usb_device *usb_get_dev(struct usb_device *dev);
extern void usb_put_dev(struct usb_device *dev);
@@ -746,15 +775,20 @@ extern int usb_lock_device_for_reset(struct usb_device *udev,
extern int usb_reset_device(struct usb_device *dev);
extern void usb_queue_reset_device(struct usb_interface *dev);
+extern struct device *usb_intf_get_dma_device(struct usb_interface *intf);
+
#ifdef CONFIG_ACPI
extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
bool enable);
extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index);
+extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index);
#else
static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index,
bool enable) { return 0; }
static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
{ return true; }
+static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
+ { return 0; }
#endif
/* USB autosuspend and autoresume */
@@ -839,7 +873,7 @@ extern int usb_free_streams(struct usb_interface *interface,
/* used these for multi-interface device registration */
extern int usb_driver_claim_interface(struct usb_driver *driver,
- struct usb_interface *iface, void *priv);
+ struct usb_interface *iface, void *data);
/**
* usb_interface_claimed - returns true iff an interface is claimed
@@ -861,6 +895,10 @@ static inline int usb_interface_claimed(struct usb_interface *iface)
extern void usb_driver_release_interface(struct usb_driver *driver,
struct usb_interface *iface);
+
+int usb_set_wireless_status(struct usb_interface *iface,
+ enum usb_wireless_status status);
+
const struct usb_device_id *usb_match_id(struct usb_interface *interface,
const struct usb_device_id *id);
extern int usb_match_one_id(struct usb_interface *interface,
@@ -1106,16 +1144,6 @@ extern ssize_t usb_store_new_id(struct usb_dynids *dynids,
extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf);
/**
- * struct usbdrv_wrap - wrapper for driver-model structure
- * @driver: The driver-model core driver structure.
- * @for_devices: Non-zero for device drivers, 0 for interface drivers.
- */
-struct usbdrv_wrap {
- struct device_driver driver;
- int for_devices;
-};
-
-/**
* struct usb_driver - identifies USB interface driver to usbcore
* @name: The driver name should be unique among USB drivers,
* and should normally be the same as the module name.
@@ -1155,7 +1183,7 @@ struct usbdrv_wrap {
* is bound to the driver.
* @dynids: used internally to hold the list of dynamically added device
* ids for this driver.
- * @drvwrap: Driver-model core structure wrapper.
+ * @driver: The driver-model core driver structure.
* @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be
* added to this driver by preventing the sysfs file from being created.
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
@@ -1203,13 +1231,13 @@ struct usb_driver {
const struct attribute_group **dev_groups;
struct usb_dynids dynids;
- struct usbdrv_wrap drvwrap;
+ struct device_driver driver;
unsigned int no_dynamic_id:1;
unsigned int supports_autosuspend:1;
unsigned int disable_hub_initiated_lpm:1;
unsigned int soft_unbind:1;
};
-#define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver)
+#define to_usb_driver(d) container_of(d, struct usb_driver, driver)
/**
* struct usb_device_driver - identifies USB device driver to usbcore
@@ -1225,9 +1253,12 @@ struct usb_driver {
* module is being unloaded.
* @suspend: Called when the device is going to be suspended by the system.
* @resume: Called when the device is being resumed by the system.
+ * @choose_configuration: If non-NULL, called instead of the default
+ * usb_choose_configuration(). If this returns an error then we'll go
+ * on to call the normal usb_choose_configuration().
* @dev_groups: Attributes attached to the device that will be created once it
* is bound to the driver.
- * @drvwrap: Driver-model core structure wrapper.
+ * @driver: The driver-model core driver structure.
* @id_table: used with @match() to select better matching driver at
* probe() time.
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
@@ -1236,7 +1267,7 @@ struct usb_driver {
* resume and suspend functions will be called in addition to the driver's
* own, so this part of the setup does not need to be replicated.
*
- * USB drivers must provide all the fields listed above except drvwrap,
+ * USB drivers must provide all the fields listed above except driver,
* match, and id_table.
*/
struct usb_device_driver {
@@ -1248,16 +1279,17 @@ struct usb_device_driver {
int (*suspend) (struct usb_device *udev, pm_message_t message);
int (*resume) (struct usb_device *udev, pm_message_t message);
+
+ int (*choose_configuration) (struct usb_device *udev);
+
const struct attribute_group **dev_groups;
- struct usbdrv_wrap drvwrap;
+ struct device_driver driver;
const struct usb_device_id *id_table;
unsigned int supports_autosuspend:1;
unsigned int generic_subclass:1;
};
#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
- drvwrap.driver)
-
-extern struct bus_type usb_bus_type;
+ driver)
/**
* struct usb_class_driver - identifies a USB driver that wants to use the USB major number
@@ -1273,7 +1305,7 @@ extern struct bus_type usb_bus_type;
*/
struct usb_class_driver {
char *name;
- char *(*devnode)(struct device *dev, umode_t *mode);
+ char *(*devnode)(const struct device *dev, umode_t *mode);
const struct file_operations *fops;
int minor_base;
};
@@ -1474,7 +1506,7 @@ typedef void (*usb_complete_t)(struct urb *);
*
* Note that transfer_buffer must still be set if the controller
* does not support DMA (as indicated by hcd_uses_dma()) and when talking
- * to root hub. If you have to trasfer between highmem zone and the device
+ * to root hub. If you have to transfer between highmem zone and the device
* on such controller, create a bounce buffer or bail out with an error.
* If transfer_buffer cannot be set (is in highmem) and the controller is DMA
* capable, assign NULL to it, so that usbmon knows not to use the value.
@@ -1604,14 +1636,25 @@ struct urb {
* @urb: pointer to the urb to initialize.
* @dev: pointer to the struct usb_device for this urb.
* @pipe: the endpoint pipe
- * @setup_packet: pointer to the setup_packet buffer
- * @transfer_buffer: pointer to the transfer buffer
+ * @setup_packet: pointer to the setup_packet buffer. The buffer must be
+ * suitable for DMA.
+ * @transfer_buffer: pointer to the transfer buffer. The buffer must be
+ * suitable for DMA.
* @buffer_length: length of the transfer buffer
* @complete_fn: pointer to the usb_complete_t function
* @context: what to set the urb context to.
*
* Initializes a control urb with the proper information needed to submit
* it to a device.
+ *
+ * The transfer buffer and the setup_packet buffer will most likely be filled
+ * or read via DMA. The simplest way to get a buffer that can be DMAed to is
+ * allocating it via kmalloc() or equivalent, even for very small buffers.
+ * If the buffers are embedded in a bigger structure, there is a risk that
+ * the buffer itself, the previous fields and/or the next fields are corrupted
+ * due to cache incoherencies; or slowed down if they are evicted from the
+ * cache. For more information, check &struct urb.
+ *
*/
static inline void usb_fill_control_urb(struct urb *urb,
struct usb_device *dev,
@@ -1636,13 +1679,17 @@ static inline void usb_fill_control_urb(struct urb *urb,
* @urb: pointer to the urb to initialize.
* @dev: pointer to the struct usb_device for this urb.
* @pipe: the endpoint pipe
- * @transfer_buffer: pointer to the transfer buffer
+ * @transfer_buffer: pointer to the transfer buffer. The buffer must be
+ * suitable for DMA.
* @buffer_length: length of the transfer buffer
* @complete_fn: pointer to the usb_complete_t function
* @context: what to set the urb context to.
*
* Initializes a bulk urb with the proper information needed to submit it
* to a device.
+ *
+ * Refer to usb_fill_control_urb() for a description of the requirements for
+ * transfer_buffer.
*/
static inline void usb_fill_bulk_urb(struct urb *urb,
struct usb_device *dev,
@@ -1665,7 +1712,8 @@ static inline void usb_fill_bulk_urb(struct urb *urb,
* @urb: pointer to the urb to initialize.
* @dev: pointer to the struct usb_device for this urb.
* @pipe: the endpoint pipe
- * @transfer_buffer: pointer to the transfer buffer
+ * @transfer_buffer: pointer to the transfer buffer. The buffer must be
+ * suitable for DMA.
* @buffer_length: length of the transfer buffer
* @complete_fn: pointer to the usb_complete_t function
* @context: what to set the urb context to.
@@ -1675,15 +1723,13 @@ static inline void usb_fill_bulk_urb(struct urb *urb,
* Initializes a interrupt urb with the proper information needed to submit
* it to a device.
*
+ * Refer to usb_fill_control_urb() for a description of the requirements for
+ * transfer_buffer.
+ *
* Note that High Speed and SuperSpeed(+) interrupt endpoints use a logarithmic
* encoding of the endpoint interval, and express polling intervals in
* microframes (eight per millisecond) rather than in frames (one per
* millisecond).
- *
- * Wireless USB also uses the logarithmic encoding, but specifies it in units of
- * 128us instead of 125us. For Wireless USB devices, the interval is passed
- * through to the host controller, rather than being translated into microframe
- * units.
*/
static inline void usb_fill_int_urb(struct urb *urb,
struct usb_device *dev,
@@ -1764,6 +1810,7 @@ static inline int usb_urb_dir_out(struct urb *urb)
return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT;
}
+int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe);
int usb_urb_ep_type_check(const struct urb *urb);
void *usb_alloc_coherent(struct usb_device *dev, size_t size,
@@ -1771,22 +1818,6 @@ void *usb_alloc_coherent(struct usb_device *dev, size_t size,
void usb_free_coherent(struct usb_device *dev, size_t size,
void *addr, dma_addr_t dma);
-#if 0
-struct urb *usb_buffer_map(struct urb *urb);
-void usb_buffer_dmasync(struct urb *urb);
-void usb_buffer_unmap(struct urb *urb);
-#endif
-
-struct scatterlist;
-int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int nents);
-#if 0
-void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents);
-#endif
-void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents);
-
/*-------------------------------------------------------------------*
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
@@ -1801,6 +1832,14 @@ extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
int timeout);
/* wrappers around usb_control_msg() for the most common standard requests */
+int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index,
+ const void *data, __u16 size, int timeout,
+ gfp_t memflags);
+int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index,
+ void *data, __u16 size, int timeout,
+ gfp_t memflags);
extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype,
unsigned char descindex, void *buf, int size);
extern int usb_get_status(struct usb_device *dev,
@@ -1821,6 +1860,7 @@ static inline int usb_get_ptm_status(struct usb_device *dev, void *data)
extern int usb_string(struct usb_device *dev, int index,
char *buf, size_t size);
+extern char *usb_cache_string(struct usb_device *udev, int index);
/* wrappers that also update important state inside usbcore */
extern int usb_clear_halt(struct usb_device *dev, int pipe);
@@ -1965,21 +2005,10 @@ usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe)
return eps[usb_pipeendpoint(pipe)];
}
-/*-------------------------------------------------------------------------*/
-
-static inline __u16
-usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
+static inline u16 usb_maxpacket(struct usb_device *udev, int pipe)
{
- struct usb_host_endpoint *ep;
- unsigned epnum = usb_pipeendpoint(pipe);
+ struct usb_host_endpoint *ep = usb_pipe_endpoint(udev, pipe);
- if (is_out) {
- WARN_ON(usb_pipein(pipe));
- ep = udev->ep_out[epnum];
- } else {
- WARN_ON(usb_pipeout(pipe));
- ep = udev->ep_in[epnum];
- }
if (!ep)
return 0;
@@ -1987,8 +2016,6 @@ usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
return usb_endpoint_maxp(&ep->desc);
}
-/* ----------------------------------------------------------------------- */
-
/* translate USB error codes to codes user space understands */
static inline int usb_translate_errors(int error_code)
{
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index ead8c9a47c6a..6e5555610010 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -2,9 +2,6 @@
/*
* Copyright (c) 2010 Daniel Mack <daniel@caiaq.de>
*
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
* This file holds USB constants and structures defined
* by the USB Device Class Definition for Audio Devices in version 2.0.
* Comments below reference relevant sections of the documents contained
@@ -85,7 +82,7 @@ struct uac_clock_source_descriptor {
#define UAC_CLOCK_SOURCE_TYPE_INT_PROG 0x3
#define UAC_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 2)
-/* 4.7.2.2 Clock Source Descriptor */
+/* 4.7.2.2 Clock Selector Descriptor */
struct uac_clock_selector_descriptor {
__u8 bLength;
@@ -94,7 +91,7 @@ struct uac_clock_selector_descriptor {
__u8 bClockID;
__u8 bNrInPins;
__u8 baCSourceID[];
- /* bmControls and iClockSource omitted */
+ /* bmControls and iClockSelector omitted */
} __attribute__((packed));
/* 4.7.2.3 Clock Multiplier Descriptor */
@@ -156,6 +153,20 @@ struct uac2_feature_unit_descriptor {
__u8 bmaControls[]; /* variable length */
} __attribute__((packed));
+#define UAC2_DT_FEATURE_UNIT_SIZE(ch) (6 + ((ch) + 1) * 4)
+
+/* As above, but more useful for defining your own descriptors: */
+#define DECLARE_UAC2_FEATURE_UNIT_DESCRIPTOR(ch) \
+struct uac2_feature_unit_descriptor_##ch { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bUnitID; \
+ __u8 bSourceID; \
+ __le32 bmaControls[ch + 1]; \
+ __u8 iFeature; \
+} __packed
+
/* 4.7.2.10 Effect Unit Descriptor */
struct uac2_effect_unit_descriptor {
diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
index 170acd500ea1..0747b24a1a7c 100644
--- a/include/linux/usb/audio.h
+++ b/include/linux/usb/audio.h
@@ -6,9 +6,6 @@
* Developed for Thumtronics by Grey Innovation
* Ben Williamson <ben.williamson@greyinnovation.com>
*
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
* This file holds USB constants and structures defined
* by the USB Device Class Definition for Audio Devices.
* Comments below reference relevant sections of that document:
diff --git a/include/linux/usb/c67x00.h b/include/linux/usb/c67x00.h
index 2fc39e3b7281..45e0757e58f3 100644
--- a/include/linux/usb/c67x00.h
+++ b/include/linux/usb/c67x00.h
@@ -3,21 +3,6 @@
* usb_c67x00.h: platform definitions for the Cypress C67X00 USB chip
*
* Copyright (C) 2006-2008 Barco N.V.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#ifndef _LINUX_USB_C67X00_H
diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h
index 9b895f93d8de..85417f00a89a 100644
--- a/include/linux/usb/cdc-wdm.h
+++ b/include/linux/usb/cdc-wdm.h
@@ -3,20 +3,17 @@
* USB CDC Device Management subdriver
*
* Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef __LINUX_USB_CDC_WDM_H
#define __LINUX_USB_CDC_WDM_H
+#include <linux/wwan.h>
#include <uapi/linux/usb/cdc-wdm.h>
extern struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep,
- int bufsize,
+ int bufsize, enum wwan_port_type type,
int (*manage_power)(struct usb_interface *, int));
#endif /* __LINUX_USB_CDC_WDM_H */
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index 35d784cf32a4..0af0db51bc89 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -3,10 +3,6 @@
* USB CDC common helpers
*
* Copyright (c) 2015 Oliver Neukum <oneukum@suse.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef __LINUX_USB_CDC_H
#define __LINUX_USB_CDC_H
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 0ce4377545f8..2d207cb4837d 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -53,8 +53,8 @@
#define USB_CDC_NCM_NDP32_LENGTH_MIN 0x20
/* Maximum NTB length */
-#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
-#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
+#define CDC_NCM_NTB_MAX_SIZE_TX 65536 /* bytes */
+#define CDC_NCM_NTB_MAX_SIZE_RX 65536 /* bytes */
/* Initial NTB length */
#define CDC_NCM_NTB_DEF_SIZE_TX 16384 /* bytes */
@@ -98,6 +98,8 @@ struct cdc_ncm_ctx {
struct hrtimer tx_timer;
struct tasklet_struct bh;
+ struct usbnet *dev;
+
const struct usb_cdc_ncm_desc *func_desc;
const struct usb_cdc_mbim_desc *mbim_desc;
const struct usb_cdc_mbim_extended_desc *mbim_extended_desc;
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 604c6c514a50..c93b410b314a 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -3,7 +3,7 @@
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
- * Wireless USB 1.0 (spread around). Linux has several APIs in C that
+ * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that
* need these:
*
* - the host side Linux-USB kernel driver API;
@@ -14,9 +14,6 @@
* act either as a USB host or as a USB device. That means the host and
* device side APIs benefit from working well together.
*
- * There's also "Wireless USB", using low power short range radios for
- * peripheral interconnection but otherwise building on the USB framework.
- *
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
@@ -33,65 +30,28 @@
#ifndef __LINUX_USB_CH9_H
#define __LINUX_USB_CH9_H
-#include <linux/device.h>
#include <uapi/linux/usb/ch9.h>
-/**
- * usb_ep_type_string() - Returns human readable-name of the endpoint type.
- * @ep_type: The endpoint type to return human-readable name for. If it's not
- * any of the types: USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT},
- * usually got by usb_endpoint_type(), the string 'unknown' will be returned.
- */
-extern const char *usb_ep_type_string(int ep_type);
+/* USB 3.2 SuperSpeed Plus phy signaling rate generation and lane count */
-/**
- * usb_speed_string() - Returns human readable-name of the speed.
- * @speed: The speed to return human-readable name for. If it's not
- * any of the speeds defined in usb_device_speed enum, string for
- * USB_SPEED_UNKNOWN will be returned.
- */
-extern const char *usb_speed_string(enum usb_device_speed speed);
+enum usb_ssp_rate {
+ USB_SSP_GEN_UNKNOWN = 0,
+ USB_SSP_GEN_2x1,
+ USB_SSP_GEN_1x2,
+ USB_SSP_GEN_2x2,
+};
-/**
- * usb_get_maximum_speed - Get maximum requested speed for a given USB
- * controller.
- * @dev: Pointer to the given USB controller device
- *
- * The function gets the maximum speed string from property "maximum-speed",
- * and returns the corresponding enum usb_device_speed.
- */
-extern enum usb_device_speed usb_get_maximum_speed(struct device *dev);
+struct device;
-/**
- * usb_state_string - Returns human readable name for the state.
- * @state: The state to return a human-readable name for. If it's not
- * any of the states devices in usb_device_state_string enum,
- * the string UNKNOWN will be returned.
- */
+extern const char *usb_ep_type_string(int ep_type);
+extern const char *usb_speed_string(enum usb_device_speed speed);
+extern enum usb_device_speed usb_get_maximum_speed(struct device *dev);
+extern enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev);
extern const char *usb_state_string(enum usb_device_state state);
+unsigned int usb_decode_interval(const struct usb_endpoint_descriptor *epd,
+ enum usb_device_speed speed);
#ifdef CONFIG_TRACING
-/**
- * usb_decode_ctrl - Returns human readable representation of control request.
- * @str: buffer to return a human-readable representation of control request.
- * This buffer should have about 200 bytes.
- * @size: size of str buffer.
- * @bRequestType: matches the USB bmRequestType field
- * @bRequest: matches the USB bRequest field
- * @wValue: matches the USB wValue field (CPU byte order)
- * @wIndex: matches the USB wIndex field (CPU byte order)
- * @wLength: matches the USB wLength field (CPU byte order)
- *
- * Function returns decoded, formatted and human-readable description of
- * control request packet.
- *
- * The usage scenario for this is for tracepoints, so function as a return
- * use the same value as in parameters. This approach allows to use this
- * function in TP_printk
- *
- * Important: wValue, wIndex, wLength parameters before invoking this function
- * should be processed by le16_to_cpu macro.
- */
extern const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
__u8 bRequest, __u16 wValue, __u16 wIndex,
__u16 wLength);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 025b41687ce9..5a7f96684ea2 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -62,6 +62,9 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
#define CI_HDRC_IMX_IS_HSIC BIT(14)
#define CI_HDRC_PMQOS BIT(15)
+#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
+#define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17)
+#define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
@@ -88,6 +91,12 @@ struct ci_hdrc_platform_data {
struct pinctrl_state *pins_default;
struct pinctrl_state *pins_host;
struct pinctrl_state *pins_device;
+
+ /* platform-specific hooks */
+ int (*hub_control)(struct ci_hdrc *ci, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength,
+ bool *done, unsigned long *flags);
+ void (*enter_lpm)(struct ci_hdrc *ci, bool enable);
};
/* Default offset of capability registers */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 2040696d75b6..af3cd2aae4bc 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -3,20 +3,6 @@
* composite.h -- framework for usb gadgets which are composite devices
*
* Copyright (C) 2006-2008 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __LINUX_USB_COMPOSITE_H
@@ -39,6 +25,7 @@
#include <linux/version.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/webusb.h>
#include <linux/log2.h>
#include <linux/configfs.h>
@@ -48,6 +35,14 @@
* are ready. The control transfer will then be kept from completing till
* all the function drivers that requested for USB_GADGET_DELAYED_STAUS
* invoke usb_composite_setup_continue().
+ *
+ * NOTE: USB_GADGET_DELAYED_STATUS must not be used in UDC drivers: they
+ * must delay completing the status stage for 0-length control transfers
+ * regardless of the whether USB_GADGET_DELAYED_STATUS is returned from
+ * the gadget driver's setup() callback.
+ * Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS,
+ * which is a bug. These drivers must be fixed and USB_GADGET_DELAYED_STATUS
+ * must be contained within the composite framework.
*/
#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */
@@ -163,6 +158,9 @@ struct usb_os_desc_table {
* GetStatus() request when the recipient is Interface.
* @func_suspend: callback to be called when
* SetFeature(FUNCTION_SUSPEND) is reseived
+ * @func_suspended: Indicates whether the function is in function suspend state.
+ * @func_wakeup_armed: Indicates whether the function is armed by the host for
+ * wakeup signaling.
*
* A single USB function uses one or more interfaces, and should in most
* cases support operation at both full and high speeds. Each function is
@@ -233,6 +231,8 @@ struct usb_function {
int (*get_status)(struct usb_function *);
int (*func_suspend)(struct usb_function *,
u8 suspend_opt);
+ bool func_suspended;
+ bool func_wakeup_armed;
/* private: */
/* internals */
struct list_head list;
@@ -254,6 +254,7 @@ int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f,
int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
struct usb_ep *_ep);
+int usb_func_wakeup(struct usb_function *func);
#define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */
@@ -271,7 +272,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
* @bConfigurationValue: Copied into configuration descriptor.
* @iConfiguration: Copied into configuration descriptor.
* @bmAttributes: Copied into configuration descriptor.
- * @MaxPower: Power consumtion in mA. Used to compute bMaxPower in the
+ * @MaxPower: Power consumption in mA. Used to compute bMaxPower in the
* configuration descriptor after considering the bus speed.
* @cdev: assigned by @usb_add_config() before calling @bind(); this is
* the device associated with this configuration.
@@ -426,6 +427,8 @@ extern int composite_dev_prepare(struct usb_composite_driver *composite,
extern int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
struct usb_ep *ep0);
void composite_dev_cleanup(struct usb_composite_dev *cdev);
+void check_remote_wakeup_config(struct usb_gadget *g,
+ struct usb_configuration *c);
static inline struct usb_composite_driver *to_cdriver(
struct usb_gadget_driver *gdrv)
@@ -437,7 +440,7 @@ static inline struct usb_composite_driver *to_cdriver(
#define OS_STRING_IDX 0xEE
/**
- * struct usb_composite_device - represents one composite usb gadget
+ * struct usb_composite_dev - represents one composite usb gadget
* @gadget: read-only, abstracts the gadget's usb peripheral controller
* @req: used for control responses; buffer is pre-allocated
* @os_desc_req: used for OS descriptors responses; buffer is pre-allocated
@@ -445,35 +448,16 @@ static inline struct usb_composite_driver *to_cdriver(
* @qw_sign: qwSignature part of the OS string
* @b_vendor_code: bMS_VendorCode part of the OS string
* @use_os_string: false by default, interested gadgets set it
+ * @bcd_webusb_version: 0x0100 by default, WebUSB specification version
+ * @b_webusb_vendor_code: 0x0 by default, vendor code for WebUSB
+ * @landing_page: empty by default, landing page to announce in WebUSB
+ * @use_webusb: false by default, interested gadgets set it
* @os_desc_config: the configuration to be used with OS descriptors
* @setup_pending: true when setup request is queued but not completed
* @os_desc_pending: true when os_desc request is queued but not completed
*
* One of these devices is allocated and initialized before the
* associated device driver's bind() is called.
- *
- * OPEN ISSUE: it appears that some WUSB devices will need to be
- * built by combining a normal (wired) gadget with a wireless one.
- * This revision of the gadget framework should probably try to make
- * sure doing that won't hurt too much.
- *
- * One notion for how to handle Wireless USB devices involves:
- *
- * (a) a second gadget here, discovery mechanism TBD, but likely
- * needing separate "register/unregister WUSB gadget" calls;
- * (b) updates to usb_gadget to include flags "is it wireless",
- * "is it wired", plus (presumably in a wrapper structure)
- * bandgroup and PHY info;
- * (c) presumably a wireless_ep wrapping a usb_ep, and reporting
- * wireless-specific parameters like maxburst and maxsequence;
- * (d) configurations that are specific to wireless links;
- * (e) function drivers that understand wireless configs and will
- * support wireless for (additional) function instances;
- * (f) a function to support association setup (like CBAF), not
- * necessarily requiring a wireless adapter;
- * (g) composite device setup that can create one or more wireless
- * configs, including appropriate association setup support;
- * (h) more, TBD.
*/
struct usb_composite_dev {
struct usb_gadget *gadget;
@@ -488,6 +472,12 @@ struct usb_composite_dev {
struct usb_configuration *os_desc_config;
unsigned int use_os_string:1;
+ /* WebUSB */
+ u16 bcd_webusb_version;
+ u8 b_webusb_vendor_code;
+ char landing_page[WEBUSB_URL_RAW_MAX_LENGTH];
+ unsigned int use_webusb:1;
+
/* private: */
/* internals */
unsigned int suspended:1;
@@ -497,6 +487,7 @@ struct usb_composite_dev {
struct usb_composite_driver *driver;
u8 next_string_id;
char *def_manufacturer;
+ struct usb_string *usb_strings;
/* the gadget driver won't enable the data pullup
* while the deactivation count is nonzero.
@@ -525,6 +516,8 @@ extern struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev,
extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
extern void composite_disconnect(struct usb_gadget *gadget);
+extern void composite_reset(struct usb_gadget *gadget);
+
extern int composite_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl);
extern void composite_suspend(struct usb_gadget *gadget);
@@ -573,8 +566,8 @@ static inline u16 get_default_bcdDevice(void)
{
u16 bcdDevice;
- bcdDevice = bin2bcd((LINUX_VERSION_CODE >> 16 & 0xff)) << 8;
- bcdDevice |= bin2bcd((LINUX_VERSION_CODE >> 8 & 0xff));
+ bcdDevice = bin2bcd(LINUX_VERSION_MAJOR) << 8;
+ bcdDevice |= bin2bcd(LINUX_VERSION_PATCHLEVEL);
return bcdDevice;
}
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index 78e006355557..fbabadd3b372 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -1,20 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_USB_EHCI_DEF_H
@@ -45,6 +31,7 @@ struct ehci_caps {
#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */
#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+#define HCS_N_PORTS_MAX 15 /* N_PORTS valid 0x1-0xF */
u32 hcc_params; /* HCCPARAMS - offset 0x8 */
/* EHCI 1.1 addendum */
@@ -126,8 +113,9 @@ struct ehci_regs {
u32 configured_flag;
#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
- /* PORTSC: offset 0x44 */
- u32 port_status[0]; /* up to N_PORTS */
+ union {
+ /* PORTSC: offset 0x44 */
+ u32 port_status[HCS_N_PORTS_MAX]; /* up to N_PORTS */
/* EHCI 1.1 addendum */
#define PORTSC_SUSPEND_STS_ACK 0
#define PORTSC_SUSPEND_STS_NYET 1
@@ -164,28 +152,35 @@ struct ehci_regs {
#define PORT_CSC (1<<1) /* connect status change */
#define PORT_CONNECT (1<<0) /* device connected */
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
-
- u32 reserved3[9];
-
- /* USBMODE: offset 0x68 */
- u32 usbmode; /* USB Device mode */
+ struct {
+ u32 reserved3[9];
+ /* USBMODE: offset 0x68 */
+ u32 usbmode; /* USB Device mode */
+ };
#define USBMODE_SDIS (1<<3) /* Stream disable */
#define USBMODE_BE (1<<2) /* BE/LE endianness select */
#define USBMODE_CM_HC (3<<0) /* host controller mode */
#define USBMODE_CM_IDLE (0<<0) /* idle state */
-
- u32 reserved4[6];
+ };
/* Moorestown has some non-standard registers, partially due to the fact that
* its EHCI controller has both TT and LPM support. HOSTPCx are extensions to
* PORTSCx
*/
- /* HOSTPC: offset 0x84 */
- u32 hostpc[0]; /* HOSTPC extension */
+ union {
+ struct {
+ u32 reserved4;
+ /* HOSTPC: offset 0x84 */
+ u32 hostpc[HCS_N_PORTS_MAX];
#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
#define HOSTPC_PSPD (3<<25) /* Port speed detection */
+ };
+
+ /* Broadcom-proprietary USB_EHCI_INSNREG00 @ 0x80 */
+ u32 brcm_insnreg[4];
+ };
- u32 reserved5[17];
+ u32 reserved5[2];
/* USBMODE_EX: offset 0xc8 */
u32 usbmode_ex; /* USB Device mode extension */
diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h
index dd742afdc03f..0f1b166f5aa0 100644
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -1,20 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __USB_CORE_EHCI_PDRIVER_H
@@ -50,6 +36,7 @@ struct usb_ehci_pdata {
unsigned no_io_watchdog:1;
unsigned reset_on_resume:1;
unsigned dma_mask_64:1;
+ unsigned spurious_oc:1;
/* Turn on all power and clocks */
int (*power_on)(struct platform_device *pdev);
diff --git a/include/linux/usb/g_hid.h b/include/linux/usb/g_hid.h
index 7581e488c237..d56bfedeb079 100644
--- a/include/linux/usb/g_hid.h
+++ b/include/linux/usb/g_hid.h
@@ -3,20 +3,6 @@
* g_hid.h -- Header file for USB HID gadget driver
*
* Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_G_HID_H
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 52ce1f6b8f83..56dda8e1562d 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -10,13 +10,12 @@
*
* (C) Copyright 2002-2004 by David Brownell
* All Rights Reserved.
- *
- * This software is licensed under the GNU GPL version 2.
*/
#ifndef __LINUX_USB_GADGET_H
#define __LINUX_USB_GADGET_H
+#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -53,6 +52,7 @@ struct usb_ep;
* @short_not_ok: When reading data, makes short packets be
* treated as errors (queue stops advancing till cleanup).
* @dma_mapped: Indicates if request has been mapped to DMA (internal)
+ * @sg_was_mapped: Set if the scatterlist has been mapped before the request
* @complete: Function called when request completes, so this request and
* its buffer may be re-used. The function will always be called with
* interrupts disabled, and it must not sleep.
@@ -112,6 +112,7 @@ struct usb_request {
unsigned zero:1;
unsigned short_not_ok:1;
unsigned dma_mapped:1;
+ unsigned sg_was_mapped:1;
void (*complete)(struct usb_ep *ep,
struct usb_request *req);
@@ -197,7 +198,7 @@ struct usb_ep_caps {
* @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
* @ops: Function pointers used to access hardware-specific operations.
* @ep_list:the gadget's ep_list holds all of its endpoints
- * @caps:The structure describing types and directions supported by endoint.
+ * @caps:The structure describing types and directions supported by endpoint.
* @enabled: The current endpoint enabled/disabled state.
* @claimed: True if this endpoint is claimed by a function.
* @maxpacket:The maximum packet size used on this endpoint. The initial
@@ -311,6 +312,8 @@ struct usb_udc;
struct usb_gadget_ops {
int (*get_frame)(struct usb_gadget *);
int (*wakeup)(struct usb_gadget *);
+ int (*func_wakeup)(struct usb_gadget *gadget, int intf_id);
+ int (*set_remote_wakeup)(struct usb_gadget *, int set);
int (*set_selfpowered) (struct usb_gadget *, int is_selfpowered);
int (*vbus_session) (struct usb_gadget *, int is_active);
int (*vbus_draw) (struct usb_gadget *, unsigned mA);
@@ -323,9 +326,13 @@ struct usb_gadget_ops {
struct usb_gadget_driver *);
int (*udc_stop)(struct usb_gadget *);
void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed);
+ void (*udc_set_ssp_rate)(struct usb_gadget *gadget,
+ enum usb_ssp_rate rate);
+ void (*udc_async_callbacks)(struct usb_gadget *gadget, bool enable);
struct usb_ep *(*match_ep)(struct usb_gadget *,
struct usb_endpoint_descriptor *,
struct usb_ss_ep_comp_descriptor *);
+ int (*check_config)(struct usb_gadget *gadget);
};
/**
@@ -339,6 +346,10 @@ struct usb_gadget_ops {
* @speed: Speed of current connection to USB host.
* @max_speed: Maximal speed the UDC can handle. UDC must support this
* and all slower speeds.
+ * @ssp_rate: Current connected SuperSpeed Plus signaling rate and lane count.
+ * @max_ssp_rate: Maximum SuperSpeed Plus signaling rate and lane count the UDC
+ * can handle. The UDC must support this and all slower speeds and lower
+ * number of lanes.
* @state: the state we are now (attached, suspended, configured, etc)
* @name: Identifies the controller hardware type. Used in diagnostics
* and sometimes configuration.
@@ -377,7 +388,10 @@ struct usb_gadget_ops {
* @connected: True if gadget is connected.
* @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag
* indicates that it supports LPM as per the LPM ECN & errata.
+ * @wakeup_capable: True if gadget is capable of sending remote wakeup.
+ * @wakeup_armed: True if gadget is armed by the host for remote wakeup.
* @irq: the interrupt number for device controller.
+ * @id_number: a unique ID number for ensuring that gadget names are distinct
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -406,6 +420,11 @@ struct usb_gadget {
struct list_head ep_list; /* of usb_ep */
enum usb_device_speed speed;
enum usb_device_speed max_speed;
+
+ /* USB SuperSpeed Plus only */
+ enum usb_ssp_rate ssp_rate;
+ enum usb_ssp_rate max_ssp_rate;
+
enum usb_device_state state;
const char *name;
struct device dev;
@@ -432,10 +451,14 @@ struct usb_gadget {
unsigned deactivated:1;
unsigned connected:1;
unsigned lpm_capable:1;
+ unsigned wakeup_capable:1;
+ unsigned wakeup_armed:1;
int irq;
+ int id_number;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
+/* Interface to the device model */
static inline void set_gadget_data(struct usb_gadget *gadget, void *data)
{ dev_set_drvdata(&gadget->dev, data); }
static inline void *get_gadget_data(struct usb_gadget *gadget)
@@ -444,6 +467,26 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
{
return container_of(dev, struct usb_gadget, dev);
}
+static inline struct usb_gadget *usb_get_gadget(struct usb_gadget *gadget)
+{
+ get_device(&gadget->dev);
+ return gadget;
+}
+static inline void usb_put_gadget(struct usb_gadget *gadget)
+{
+ put_device(&gadget->dev);
+}
+extern void usb_initialize_gadget(struct device *parent,
+ struct usb_gadget *gadget, void (*release)(struct device *dev));
+extern int usb_add_gadget(struct usb_gadget *gadget);
+extern void usb_del_gadget(struct usb_gadget *gadget);
+
+/* Legacy device-model interface */
+extern int usb_add_gadget_udc_release(struct device *parent,
+ struct usb_gadget *gadget, void (*release)(struct device *dev));
+extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
+extern void usb_del_gadget_udc(struct usb_gadget *gadget);
+extern char *usb_get_gadget_udc_name(void);
/* iterates the non-control endpoints; 'tmp' is a struct usb_ep pointer */
#define gadget_for_each_ep(tmp, gadget) \
@@ -458,7 +501,7 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
*/
static inline size_t usb_ep_align(struct usb_ep *ep, size_t len)
{
- int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff;
+ int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc);
return round_up(len, max_packet_size);
}
@@ -566,6 +609,7 @@ static inline int gadget_is_otg(struct usb_gadget *g)
#if IS_ENABLED(CONFIG_USB_GADGET)
int usb_gadget_frame_number(struct usb_gadget *gadget);
int usb_gadget_wakeup(struct usb_gadget *gadget);
+int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set);
int usb_gadget_set_selfpowered(struct usb_gadget *gadget);
int usb_gadget_clear_selfpowered(struct usb_gadget *gadget);
int usb_gadget_vbus_connect(struct usb_gadget *gadget);
@@ -575,11 +619,14 @@ int usb_gadget_connect(struct usb_gadget *gadget);
int usb_gadget_disconnect(struct usb_gadget *gadget);
int usb_gadget_deactivate(struct usb_gadget *gadget);
int usb_gadget_activate(struct usb_gadget *gadget);
+int usb_gadget_check_config(struct usb_gadget *gadget);
#else
static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
{ return 0; }
static inline int usb_gadget_wakeup(struct usb_gadget *gadget)
{ return 0; }
+static inline int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set)
+{ return 0; }
static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
{ return 0; }
static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
@@ -598,6 +645,8 @@ static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
{ return 0; }
static inline int usb_gadget_activate(struct usb_gadget *gadget)
{ return 0; }
+static inline int usb_gadget_check_config(struct usb_gadget *gadget)
+{ return 0; }
#endif /* CONFIG_USB_GADGET */
/*-------------------------------------------------------------------------*/
@@ -627,9 +676,9 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
* @driver: Driver model state for this driver.
* @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
* this driver will be bound to any available UDC.
- * @pending: UDC core private data used for deferred probe of this driver.
- * @match_existing_only: If udc is not found, return an error and don't add this
- * gadget driver to list of pending driver
+ * @match_existing_only: If udc is not found, return an error and fail
+ * the driver registration
+ * @is_bound: Allow a driver to be bound to only one gadget
*
* Devices are disabled till a gadget driver successfully bind()s, which
* means the driver will handle setup() requests needed to enumerate (and
@@ -664,6 +713,15 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
* get_interface. Setting a configuration (or interface) is where
* endpoints should be activated or (config 0) shut down.
*
+ * The gadget driver's setup() callback does not have to queue a response to
+ * ep0 within the setup() call, the driver can do it after setup() returns.
+ * The UDC driver must wait until such a response is queued before proceeding
+ * with the data/status stages of the control transfer.
+ *
+ * NOTE: Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS
+ * being returned from the setup() callback, which is a bug. See the comment
+ * next to USB_GADGET_DELAYED_STATUS for details.
+ *
* (Note that only the default control endpoint is supported. Neither
* hosts nor devices generally support control traffic except to ep0.)
*
@@ -692,8 +750,8 @@ struct usb_gadget_driver {
struct device_driver driver;
char *udc_name;
- struct list_head pending;
unsigned match_existing_only:1;
+ bool is_bound:1;
};
@@ -703,22 +761,30 @@ struct usb_gadget_driver {
/* driver modules register and unregister, as usual.
* these calls must be made in a context that can sleep.
*
- * these will usually be implemented directly by the hardware-dependent
- * usb bus interface driver, which will only support a single driver.
+ * A gadget driver can be bound to only one gadget at a time.
*/
/**
- * usb_gadget_probe_driver - probe a gadget driver
+ * usb_gadget_register_driver_owner - register a gadget driver
* @driver: the driver being registered
+ * @owner: the driver module
+ * @mod_name: the driver module's build name
* Context: can sleep
*
* Call this in your gadget driver's module initialization function,
- * to tell the underlying usb controller driver about your driver.
+ * to tell the underlying UDC controller driver about your driver.
* The @bind() function will be called to bind it to a gadget before this
* registration call returns. It's expected that the @bind() function will
* be in init sections.
+ *
+ * Use the macro defined below instead of calling this directly.
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver);
+int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
+ struct module *owner, const char *mod_name);
+
+/* use a define to avoid include chaining to get THIS_MODULE & friends */
+#define usb_gadget_register_driver(driver) \
+ usb_gadget_register_driver_owner(driver, THIS_MODULE, KBUILD_MODNAME)
/**
* usb_gadget_unregister_driver - unregister a gadget driver
@@ -735,12 +801,6 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver);
*/
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver);
-extern int usb_add_gadget_udc_release(struct device *parent,
- struct usb_gadget *gadget, void (*release)(struct device *dev));
-extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
-extern void usb_del_gadget_udc(struct usb_gadget *gadget);
-extern char *usb_get_gadget_udc_name(void);
-
/*-------------------------------------------------------------------------*/
/* utility to simplify dealing with string descriptors */
@@ -782,6 +842,16 @@ int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *bu
/* check if the given language identifier is valid */
bool usb_validate_langid(u16 langid);
+struct gadget_string {
+ struct config_item item;
+ struct list_head list;
+ char string[USB_MAX_STRING_LEN];
+ struct usb_string usb_string;
+};
+
+#define to_gadget_string(str_item)\
+container_of(str_item, struct gadget_string, item)
+
/*-------------------------------------------------------------------------*/
/* utility to simplify managing config descriptors */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 3dbb42c637c1..ac95e7c89df5 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -1,20 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __USB_CORE_HCD_H
@@ -59,16 +45,17 @@
* USB Host Controller Driver (usb_hcd) framework
*
* Since "struct usb_bus" is so thin, you can't share much code in it.
- * This framework is a layer over that, and should be more sharable.
+ * This framework is a layer over that, and should be more shareable.
*/
/*-------------------------------------------------------------------------*/
struct giveback_urb_bh {
bool running;
+ bool high_prio;
spinlock_t lock;
struct list_head head;
- struct tasklet_struct bh;
+ struct work_struct bh;
struct usb_host_endpoint *completing_ep;
};
@@ -124,6 +111,7 @@ struct usb_hcd {
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
+#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -134,6 +122,7 @@ struct usb_hcd {
#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
/*
* Specifies if interfaces are authorized by default
@@ -165,7 +154,6 @@ struct usb_hcd {
/* The next flag is a stopgap, to be removed when all the HCDs
* support the new root-hub polling mechanism. */
unsigned uses_new_polling:1;
- unsigned wireless:1; /* Wireless USB HCD */
unsigned has_tt:1; /* Integrated TT in root hub */
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
@@ -260,7 +248,6 @@ struct hc_driver {
#define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */
#define HCD_USB11 0x0010 /* USB 1.1 */
#define HCD_USB2 0x0020 /* USB 2.0 */
-#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/
#define HCD_USB3 0x0040 /* USB 3.0 */
#define HCD_USB31 0x0050 /* USB 3.1 */
#define HCD_USB32 0x0060 /* USB 3.2 */
@@ -278,7 +265,10 @@ struct hc_driver {
int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup);
/* called after entering D0 (etc), before resuming the hub */
- int (*pci_resume)(struct usb_hcd *hcd, bool hibernated);
+ int (*pci_resume)(struct usb_hcd *hcd, pm_message_t state);
+
+ /* called just before hibernate final D3 state, allows host to poweroff parts */
+ int (*pci_poweroff_late)(struct usb_hcd *hcd, bool do_wakeup);
/* cleanly make HCD stop writing memory and doing I/O */
void (*stop) (struct usb_hcd *hcd);
@@ -299,7 +289,7 @@ struct hc_driver {
* (optional) these hooks allow an HCD to override the default DMA
* mapping and unmapping routines. In general, they shouldn't be
* necessary unless the host controller has special DMA requirements,
- * such as alignment contraints. If these are not specified, the
+ * such as alignment constraints. If these are not specified, the
* general usb_hcd_(un)?map_urb_for_dma functions will be used instead
* (and it may be a good idea to call these functions in your HCD
* implementation)
@@ -382,8 +372,9 @@ struct hc_driver {
* or bandwidth constraints.
*/
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
- /* Returns the hardware-chosen device address */
- int (*address_device)(struct usb_hcd *, struct usb_device *udev);
+ /* Set the hardware-chosen device address */
+ int (*address_device)(struct usb_hcd *, struct usb_device *udev,
+ unsigned int timeout_ms);
/* prepares the hardware to send commands to the device */
int (*enable_device)(struct usb_hcd *, struct usb_device *udev);
/* Notifies the HCD after a hub descriptor is fetched.
@@ -409,7 +400,10 @@ struct hc_driver {
int (*find_raw_port_number)(struct usb_hcd *, int);
/* Call for power on/off the port if necessary */
int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
-
+ /* Call for SINGLE_STEP_SET_FEATURE Test for USB2 EH certification */
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
+ int (*submit_single_step_set_feature)(struct usb_hcd *,
+ struct urb *, int);
};
static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -474,21 +468,43 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
struct platform_device;
extern void usb_hcd_platform_shutdown(struct platform_device *dev);
+#ifdef CONFIG_USB_HCD_TEST_MODE
+extern int ehset_single_step_set_feature(struct usb_hcd *hcd, int port);
+#else
+static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+ return 0;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
#ifdef CONFIG_USB_PCI
struct pci_dev;
struct pci_device_id;
extern int usb_hcd_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id,
const struct hc_driver *driver);
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+#ifdef CONFIG_USB_PCI_AMD
extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
-#ifdef CONFIG_PM
-extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
+static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev,
+ const struct hc_driver *driver)
+{
+ if (!usb_hcd_amd_remote_wakeup_quirk(dev))
+ return false;
+ if (driver->flags & (HCD_USB11 | HCD_USB3))
+ return true;
+ return false;
+}
+#else /* CONFIG_USB_PCI_AMD */
+static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev,
+ const struct hc_driver *driver)
+{
+ return false;
+}
#endif
+extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif /* CONFIG_USB_PCI */
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
@@ -501,6 +517,11 @@ void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
void hcd_buffer_free(struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
+void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
+ size_t size, gfp_t mem_flags, dma_addr_t *dma);
+void hcd_buffer_free_pages(struct usb_hcd *hcd,
+ size_t size, void *addr, dma_addr_t dma);
+
/* generic bus glue, needed for host controllers that don't use PCI */
extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
@@ -734,10 +755,6 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
/* random stuff */
-#define RUN_CONTEXT (in_irq() ? "in_irq" \
- : (in_interrupt() ? "in_interrupt" : "can sleep"))
-
-
/* This rwsem is for use only by the hub driver and ehci-hcd.
* Nobody else should touch it.
*/
diff --git a/include/linux/usb/input.h b/include/linux/usb/input.h
index 974befa72ac0..5e759b2cf551 100644
--- a/include/linux/usb/input.h
+++ b/include/linux/usb/input.h
@@ -1,10 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Dmitry Torokhov
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __LINUX_USB_INPUT_H
diff --git a/include/linux/usb/isp1301.h b/include/linux/usb/isp1301.h
index dedb3b2473e8..fa986b926a12 100644
--- a/include/linux/usb/isp1301.h
+++ b/include/linux/usb/isp1301.h
@@ -3,16 +3,6 @@
* NXP ISP1301 USB transceiver driver
*
* Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_USB_ISP1301_H
diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h
deleted file mode 100644
index b75ded28db81..000000000000
--- a/include/linux/usb/isp1760.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * board initialization should put one of these into dev->platform_data
- * and place the isp1760 onto platform_bus named "isp1760-hcd".
- */
-
-#ifndef __LINUX_USB_ISP1760_H
-#define __LINUX_USB_ISP1760_H
-
-struct isp1760_platform_data {
- unsigned is_isp1761:1; /* Chip is ISP1761 */
- unsigned bus_width_16:1; /* 16/32-bit data bus width */
- unsigned port1_otg:1; /* Port 1 supports OTG */
- unsigned analog_oc:1; /* Analog overcurrent */
- unsigned dack_polarity_high:1; /* DACK active high */
- unsigned dreq_polarity_high:1; /* DREQ active high */
-};
-
-#endif /* __LINUX_USB_ISP1760_H */
diff --git a/include/linux/usb/ljca.h b/include/linux/usb/ljca.h
new file mode 100644
index 000000000000..47661feda96c
--- /dev/null
+++ b/include/linux/usb/ljca.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_USB_LJCA_H_
+#define _LINUX_USB_LJCA_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define LJCA_MAX_GPIO_NUM 64
+
+#define auxiliary_dev_to_ljca_client(auxiliary_dev) \
+ container_of(auxiliary_dev, struct ljca_client, auxdev)
+
+struct ljca_adapter;
+
+/**
+ * typedef ljca_event_cb_t - event callback function signature
+ *
+ * @context: the execution context of who registered this callback
+ * @cmd: the command from device for this event
+ * @evt_data: the event data payload
+ * @len: the event data payload length
+ *
+ * The callback function is called in interrupt context and the data payload is
+ * only valid during the call. If the user needs later access of the data, it
+ * must copy it.
+ */
+typedef void (*ljca_event_cb_t)(void *context, u8 cmd, const void *evt_data, int len);
+
+/**
+ * struct ljca_client - represent a ljca client device
+ *
+ * @type: ljca client type
+ * @id: ljca client id within same client type
+ * @link: ljca client on the same ljca adapter
+ * @auxdev: auxiliary device object
+ * @adapter: ljca adapter the ljca client sit on
+ * @context: the execution context of the event callback
+ * @event_cb: ljca client driver register this callback to get
+ * firmware asynchronous rx buffer pending notifications
+ * @event_cb_lock: spinlock to protect event callback
+ */
+struct ljca_client {
+ u8 type;
+ u8 id;
+ struct list_head link;
+ struct auxiliary_device auxdev;
+ struct ljca_adapter *adapter;
+
+ void *context;
+ ljca_event_cb_t event_cb;
+ /* lock to protect event_cb */
+ spinlock_t event_cb_lock;
+};
+
+/**
+ * struct ljca_gpio_info - ljca gpio client device info
+ *
+ * @num: ljca gpio client device pin number
+ * @valid_pin_map: ljca gpio client device valid pin mapping
+ */
+struct ljca_gpio_info {
+ unsigned int num;
+ DECLARE_BITMAP(valid_pin_map, LJCA_MAX_GPIO_NUM);
+};
+
+/**
+ * struct ljca_i2c_info - ljca i2c client device info
+ *
+ * @id: ljca i2c client device identification number
+ * @capacity: ljca i2c client device capacity
+ * @intr_pin: ljca i2c client device interrupt pin number if exists
+ */
+struct ljca_i2c_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+};
+
+/**
+ * struct ljca_spi_info - ljca spi client device info
+ *
+ * @id: ljca spi client device identification number
+ * @capacity: ljca spi client device capacity
+ */
+struct ljca_spi_info {
+ u8 id;
+ u8 capacity;
+};
+
+/**
+ * ljca_register_event_cb - register a callback function to receive events
+ *
+ * @client: ljca client device
+ * @event_cb: callback function
+ * @context: execution context of event callback
+ *
+ * Return: 0 in case of success, negative value in case of error
+ */
+int ljca_register_event_cb(struct ljca_client *client, ljca_event_cb_t event_cb, void *context);
+
+/**
+ * ljca_unregister_event_cb - unregister the callback function for an event
+ *
+ * @client: ljca client device
+ */
+void ljca_unregister_event_cb(struct ljca_client *client);
+
+/**
+ * ljca_transfer - issue a LJCA command and wait for a response
+ *
+ * @client: ljca client device
+ * @cmd: the command to be sent to the device
+ * @obuf: the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len: the size of the buffer to be sent to the device; it should
+ * be 0 when obuf is NULL
+ * @ibuf: any data associated with the response will be copied here; it can be
+ * NULL if the user doesn't need the response data
+ * @ibuf_len: must be initialized to the input buffer size
+ *
+ * Return: the actual length of response data for success, negative value for errors
+ */
+int ljca_transfer(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len, u8 *ibuf, u8 ibuf_len);
+
+/**
+ * ljca_transfer_noack - issue a LJCA command without a response
+ *
+ * @client: ljca client device
+ * @cmd: the command to be sent to the device
+ * @obuf: the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len: the size of the buffer to be sent to the device
+ *
+ * Return: 0 for success, negative value for errors
+ */
+int ljca_transfer_noack(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len);
+
+#endif
diff --git a/include/linux/usb/m66592.h b/include/linux/usb/m66592.h
index 2dfe68183495..5f04de2b47fd 100644
--- a/include/linux/usb/m66592.h
+++ b/include/linux/usb/m66592.h
@@ -3,20 +3,6 @@
* M66592 driver platform data
*
* Copyright (C) 2009 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_USB_M66592_H
diff --git a/include/linux/usb/midi-v2.h b/include/linux/usb/midi-v2.h
new file mode 100644
index 000000000000..16f09d959a2d
--- /dev/null
+++ b/include/linux/usb/midi-v2.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * <linux/usb/midi-v2.h> -- USB MIDI 2.0 definitions.
+ */
+
+#ifndef __LINUX_USB_MIDI_V2_H
+#define __LINUX_USB_MIDI_V2_H
+
+#include <linux/types.h>
+#include <linux/usb/midi.h>
+
+/* A.1 MS Class-Specific Interface Descriptor Types */
+#define USB_DT_CS_GR_TRM_BLOCK 0x26
+
+/* A.1 MS Class-Specific Interface Descriptor Subtypes */
+/* same as MIDI 1.0 */
+
+/* A.2 MS Class-Specific Endpoint Descriptor Subtypes */
+#define USB_MS_GENERAL_2_0 0x02
+
+/* A.3 MS Class-Specific Group Terminal Block Descriptor Subtypes */
+#define USB_MS_GR_TRM_BLOCK_UNDEFINED 0x00
+#define USB_MS_GR_TRM_BLOCK_HEADER 0x01
+#define USB_MS_GR_TRM_BLOCK 0x02
+
+/* A.4 MS Interface Header MIDIStreaming Class Revision */
+#define USB_MS_REV_MIDI_1_0 0x0100
+#define USB_MS_REV_MIDI_2_0 0x0200
+
+/* A.5 MS MIDI IN and OUT Jack Types */
+/* same as MIDI 1.0 */
+
+/* A.6 Group Terminal Block Types */
+#define USB_MS_GR_TRM_BLOCK_TYPE_BIDIRECTIONAL 0x00
+#define USB_MS_GR_TRM_BLOCK_TYPE_INPUT_ONLY 0x01
+#define USB_MS_GR_TRM_BLOCK_TYPE_OUTPUT_ONLY 0x02
+
+/* A.7 Group Terminal Default MIDI Protocol */
+#define USB_MS_MIDI_PROTO_UNKNOWN 0x00 /* Unknown (Use MIDI-CI) */
+#define USB_MS_MIDI_PROTO_1_0_64 0x01 /* MIDI 1.0, UMP up to 64bits */
+#define USB_MS_MIDI_PROTO_1_0_64_JRTS 0x02 /* MIDI 1.0, UMP up to 64bits, Jitter Reduction Timestamps */
+#define USB_MS_MIDI_PROTO_1_0_128 0x03 /* MIDI 1.0, UMP up to 128bits */
+#define USB_MS_MIDI_PROTO_1_0_128_JRTS 0x04 /* MIDI 1.0, UMP up to 128bits, Jitter Reduction Timestamps */
+#define USB_MS_MIDI_PROTO_2_0 0x11 /* MIDI 2.0 */
+#define USB_MS_MIDI_PROTO_2_0_JRTS 0x12 /* MIDI 2.0, Jitter Reduction Timestamps */
+
+/* 5.2.2.1 Class-Specific MS Interface Header Descriptor */
+/* Same as MIDI 1.0, use struct usb_ms_header_descriptor */
+
+/* 5.3.2 Class-Specific MIDI Streaming Data Endpoint Descriptor */
+struct usb_ms20_endpoint_descriptor {
+ __u8 bLength; /* 4+n */
+ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */
+ __u8 bDescriptorSubtype; /* USB_MS_GENERAL_2_0 */
+ __u8 bNumGrpTrmBlock; /* Number of Group Terminal Blocks: n */
+ __u8 baAssoGrpTrmBlkID[]; /* ID of the Group Terminal Blocks [n] */
+} __packed;
+
+#define USB_DT_MS20_ENDPOINT_SIZE(n) (4 + (n))
+
+/* As above, but more useful for defining your own descriptors: */
+#define DECLARE_USB_MS20_ENDPOINT_DESCRIPTOR(n) \
+struct usb_ms20_endpoint_descriptor_##n { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bNumGrpTrmBlock; \
+ __u8 baAssoGrpTrmBlkID[n]; \
+} __packed
+
+/* 5.4.1 Class-Specific Group Terminal Block Header Descriptor */
+struct usb_ms20_gr_trm_block_header_descriptor {
+ __u8 bLength; /* 5 */
+ __u8 bDescriptorType; /* USB_DT_CS_GR_TRM_BLOCK */
+ __u8 bDescriptorSubtype; /* USB_MS_GR_TRM_BLOCK_HEADER */
+ __le16 wTotalLength; /* Total number of bytes */
+} __packed;
+
+/* 5.4.2.1 Group Terminal Block Descriptor */
+struct usb_ms20_gr_trm_block_descriptor {
+ __u8 bLength; /* 13 */
+ __u8 bDescriptorType; /* USB_DT_CS_GR_TRM_BLOCK */
+ __u8 bDescriptorSubtype; /* USB_MS_GR_TRM_BLOCK */
+ __u8 bGrpTrmBlkID; /* ID of this Group Terminal Block */
+ __u8 bGrpTrmBlkType; /* Group Terminal Block Type */
+ __u8 nGroupTrm; /* The first member Group Terminal in this block */
+ __u8 nNumGroupTrm; /* Number of member Group Terminals spanned */
+ __u8 iBlockItem; /* String ID of Block item */
+ __u8 bMIDIProtocol; /* Default MIDI protocol */
+ __le16 wMaxInputBandwidth; /* Max input bandwidth capability in 4kB/s */
+ __le16 wMaxOutputBandwidth; /* Max output bandwidth capability in 4kB/s */
+} __packed;
+
+#endif /* __LINUX_USB_MIDI_V2_H */
diff --git a/include/linux/usb/musb-ux500.h b/include/linux/usb/musb-ux500.h
index c4b7ad9850ca..d60dcfc56b5a 100644
--- a/include/linux/usb/musb-ux500.h
+++ b/include/linux/usb/musb-ux500.h
@@ -1,16 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2013 ST-Ericsson AB
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MUSB_UX500_H__
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index fc6c77918481..3963e55e88a3 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -99,9 +99,6 @@ struct musb_hdrc_platform_data {
/* (HOST or OTG) program PHY for external Vbus */
unsigned extvbus:1;
- /* Power the device on or off */
- int (*set_power)(int state);
-
/* MUSB configuration-specific details */
const struct musb_hdrc_config *config;
@@ -135,16 +132,4 @@ static inline int musb_mailbox(enum musb_vbus_id_status status)
#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */
#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */
-#ifdef CONFIG_ARCH_OMAP2
-
-extern int __init tusb6010_setup_interface(
- struct musb_hdrc_platform_data *data,
- unsigned ps_refclk, unsigned waitpin,
- unsigned async_cs, unsigned sync_cs,
- unsigned irq, unsigned dmachan);
-
-extern int tusb6010_platform_retime(unsigned is_refclk);
-
-#endif /* OMAP2 */
-
#endif /* __LINUX_USB_MUSB_H */
diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h
index 08b85caecfaf..f29fe6a1f415 100644
--- a/include/linux/usb/net2280.h
+++ b/include/linux/usb/net2280.h
@@ -5,20 +5,6 @@
*
* Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
* Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_NET2280_H
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index dba55ccb9b53..de42f14bd280 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -1,13 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/*
* OF helpers for usb devices.
- *
- * This file is released under the GPLv2
*/
#ifndef __LINUX_USB_OF_H
#define __LINUX_USB_OF_H
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
@@ -19,6 +18,7 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
bool of_usb_host_tpl_support(struct device_node *np);
int of_usb_update_otg_caps(struct device_node *np,
struct usb_otg_caps *otg_caps);
+enum usb_port_connect_type usb_of_get_connect_type(struct usb_device *hub, int port1);
struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1);
bool usb_of_has_combined_node(struct usb_device *udev);
struct device_node *usb_of_get_interface_node(struct usb_device *udev,
@@ -39,6 +39,11 @@ static inline int of_usb_update_otg_caps(struct device_node *np,
{
return 0;
}
+static inline enum usb_port_connect_type
+usb_of_get_connect_type(const struct usb_device *hub, int port1)
+{
+ return USB_PORT_CONNECT_TYPE_UNKNOWN;
+}
static inline struct device_node *
usb_of_get_device_node(struct usb_device *hub, int port1)
{
diff --git a/include/linux/usb/ohci_pdriver.h b/include/linux/usb/ohci_pdriver.h
index 7eb16cf587ee..2447c78b1766 100644
--- a/include/linux/usb/ohci_pdriver.h
+++ b/include/linux/usb/ohci_pdriver.h
@@ -1,20 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __USB_CORE_OHCI_PDRIVER_H
diff --git a/include/linux/usb/onboard_hub.h b/include/linux/usb/onboard_hub.h
new file mode 100644
index 000000000000..d9373230556e
--- /dev/null
+++ b/include/linux/usb/onboard_hub.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_USB_ONBOARD_HUB_H
+#define __LINUX_USB_ONBOARD_HUB_H
+
+struct usb_device;
+struct list_head;
+
+#if IS_ENABLED(CONFIG_USB_ONBOARD_HUB)
+void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list);
+void onboard_hub_destroy_pdevs(struct list_head *pdev_list);
+#else
+static inline void onboard_hub_create_pdevs(struct usb_device *parent_hub,
+ struct list_head *pdev_list) {}
+static inline void onboard_hub_destroy_pdevs(struct list_head *pdev_list) {}
+#endif
+
+#endif /* __LINUX_USB_ONBOARD_HUB_H */
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index e78eb577d0fa..6135d076c53d 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -1,19 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
-/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
+/*
+ * Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
*/
#ifndef __LINUX_USB_OTG_FSM_H
@@ -98,7 +85,7 @@ enum otg_fsm_timer {
* @b_bus_req: TRUE during the time that the Application running on the
* B-device wants to use the bus
*
- * Auxilary inputs (OTG v1.3 only. Obsolete now.)
+ * Auxiliary inputs (OTG v1.3 only. Obsolete now.)
* @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD
* @b_bus_suspend: TRUE when the A-device detects that the B-device has put
* the bus into suspend
@@ -153,7 +140,7 @@ struct otg_fsm {
int a_bus_req;
int b_bus_req;
- /* Auxilary inputs */
+ /* Auxiliary inputs */
int a_sess_vld;
int b_bus_resume;
int b_bus_suspend;
@@ -177,7 +164,7 @@ struct otg_fsm {
int a_bus_req_inf;
int a_clr_err_inf;
int b_bus_req_inf;
- /* Auxilary informative variables */
+ /* Auxiliary informative variables */
int a_suspend_req_inf;
/* Timeout indicator for timers */
@@ -196,6 +183,7 @@ struct otg_fsm {
struct mutex lock;
u8 *host_req_flag;
struct delayed_work hnp_polling_work;
+ bool hnp_work_inited;
bool state_changed;
};
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 69f1b6328532..6475f880be37 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -125,8 +125,9 @@ enum usb_dr_mode {
* @dev: Pointer to the given device
*
* The function gets phy interface string from property 'dr_mode',
- * and returns the correspondig enum usb_dr_mode
+ * and returns the corresponding enum usb_dr_mode
*/
extern enum usb_dr_mode usb_get_dr_mode(struct device *dev);
+extern enum usb_dr_mode usb_get_role_switch_default_mode(struct device *dev);
#endif /* __LINUX_USB_OTG_H */
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index b6c233e79bd4..d50098fb16b5 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -219,14 +219,18 @@ enum pd_pdo_type {
#define PDO_CURR_MASK 0x3ff
#define PDO_PWR_MASK 0x3ff
-#define PDO_FIXED_DUAL_ROLE BIT(29) /* Power role swap supported */
-#define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (Source) */
-#define PDO_FIXED_HIGHER_CAP BIT(28) /* Requires more than vSafe5V (Sink) */
-#define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */
-#define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */
-#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */
-#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
-#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
+#define PDO_FIXED_DUAL_ROLE BIT(29) /* Power role swap supported */
+#define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (Source) */
+#define PDO_FIXED_HIGHER_CAP BIT(28) /* Requires more than vSafe5V (Sink) */
+#define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */
+#define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */
+#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */
+#define PDO_FIXED_UNCHUNK_EXT BIT(24) /* Unchunked Extended Message supported (Source) */
+#define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */
+#define PDO_FIXED_FRS_CURR_SHIFT 23
+#define PDO_FIXED_PEAK_CURR_SHIFT 20
+#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
+#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
#define PDO_FIXED_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_FIXED_VOLT_SHIFT)
#define PDO_FIXED_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_FIXED_CURR_SHIFT)
@@ -454,15 +458,17 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */
#define PD_T_SEND_SOURCE_CAP 150 /* 100 - 200 ms */
#define PD_T_SENDER_RESPONSE 60 /* 24 - 30 ms, relaxed */
+#define PD_T_RECEIVER_RESPONSE 15 /* 15ms max */
#define PD_T_SOURCE_ACTIVITY 45
#define PD_T_SINK_ACTIVITY 135
-#define PD_T_SINK_WAIT_CAP 240
+#define PD_T_SINK_WAIT_CAP 310 /* 310 - 620 ms */
#define PD_T_PS_TRANSITION 500
#define PD_T_SRC_TRANSITION 35
#define PD_T_DRP_SNK 40
#define PD_T_DRP_SRC 30
#define PD_T_PS_SOURCE_OFF 920
#define PD_T_PS_SOURCE_ON 480
+#define PD_T_PS_SOURCE_ON_PRS 450 /* 390 - 480ms */
#define PD_T_PS_HARD_RESET 30
#define PD_T_SRC_RECOVER 760
#define PD_T_SRC_RECOVER_MAX 1000
@@ -471,17 +477,62 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_VCONN_SOURCE_ON 100
#define PD_T_SINK_REQUEST 100 /* 100 ms minimum */
#define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */
-#define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */
-#define PD_T_NEWSRC 250 /* Maximum of 275ms */
+#define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */
+#define PD_T_NEWSRC 250 /* Maximum of 275ms */
+#define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */
+#define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */
+#define PD_T_SINK_TX 16 /* 16 - 20 ms */
+#define PD_T_CHUNK_NOT_SUPP 42 /* 40 - 50 ms */
+#define PD_T_VCONN_STABLE 50
#define PD_T_DRP_TRY 100 /* 75 - 150 ms */
#define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */
#define PD_T_CC_DEBOUNCE 200 /* 100 - 200 ms */
#define PD_T_PD_DEBOUNCE 20 /* 10 - 20 ms */
+#define PD_T_TRY_CC_DEBOUNCE 15 /* 10 - 20 ms */
#define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
#define PD_N_HARD_RESET_COUNT 2
-#define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */
+#define PD_P_SNK_STDBY_MW 2500 /* 2500 mW */
+
+#if IS_ENABLED(CONFIG_TYPEC)
+
+struct usb_power_delivery;
+
+/**
+ * usb_power_delivery_desc - USB Power Delivery Descriptor
+ * @revision: USB Power Delivery Specification Revision
+ * @version: USB Power Delivery Specicication Version - optional
+ */
+struct usb_power_delivery_desc {
+ u16 revision;
+ u16 version;
+};
+
+/**
+ * usb_power_delivery_capabilities_desc - Description of USB Power Delivery Capabilities Message
+ * @pdo: The Power Data Objects in the Capability Message
+ * @role: Power role of the capabilities
+ */
+struct usb_power_delivery_capabilities_desc {
+ u32 pdo[PDO_MAX_OBJECTS];
+ enum typec_role role;
+};
+
+struct usb_power_delivery_capabilities *
+usb_power_delivery_register_capabilities(struct usb_power_delivery *pd,
+ struct usb_power_delivery_capabilities_desc *desc);
+void usb_power_delivery_unregister_capabilities(struct usb_power_delivery_capabilities *cap);
+
+struct usb_power_delivery *usb_power_delivery_register(struct device *parent,
+ struct usb_power_delivery_desc *desc);
+void usb_power_delivery_unregister(struct usb_power_delivery *pd);
+
+int usb_power_delivery_link_device(struct usb_power_delivery *pd, struct device *dev);
+void usb_power_delivery_unlink_device(struct usb_power_delivery *pd, struct device *dev);
+
+#endif /* CONFIG_TYPEC */
+
#endif /* __LINUX_USB_PD_H */
diff --git a/include/linux/usb/pd_bdo.h b/include/linux/usb/pd_bdo.h
index 033fe3e17141..7c25b88d79f9 100644
--- a/include/linux/usb/pd_bdo.h
+++ b/include/linux/usb/pd_bdo.h
@@ -15,7 +15,7 @@
#define BDO_MODE_CARRIER2 (5 << 28)
#define BDO_MODE_CARRIER3 (6 << 28)
#define BDO_MODE_EYE (7 << 28)
-#define BDO_MODE_TESTDATA (8 << 28)
+#define BDO_MODE_TESTDATA (8U << 28)
#define BDO_MODE_MASK(mode) ((mode) & 0xf0000000)
diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h
index 0eb83ce19597..b517ebc8f0ff 100644
--- a/include/linux/usb/pd_ext_sdb.h
+++ b/include/linux/usb/pd_ext_sdb.h
@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
#define USB_PD_EXT_SDB_EVENT_OVP BIT(3)
#define USB_PD_EXT_SDB_EVENT_CF_CV_MODE BIT(4)
-#define USB_PD_EXT_SDB_PPS_EVENTS (USB_PD_EXT_SDB_EVENT_OCP | \
- USB_PD_EXT_SDB_EVENT_OTP | \
- USB_PD_EXT_SDB_EVENT_OVP)
-
#endif /* __LINUX_USB_PD_EXT_SDB_H */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index 68bdc4e2f5a9..5c48e8a81403 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -7,6 +7,7 @@
#define __LINUX_USB_PD_VDO_H
#include "pd.h"
+#include <linux/bitfield.h>
/*
* VDO : Vendor Defined Message Object
@@ -21,22 +22,24 @@
* ----------
* <31:16> :: SVID
* <15> :: VDM type ( 1b == structured, 0b == unstructured )
- * <14:13> :: Structured VDM version (can only be 00 == 1.0 currently)
+ * <14:13> :: Structured VDM version
* <12:11> :: reserved
* <10:8> :: object position (1-7 valid ... used for enter/exit mode only)
* <7:6> :: command type (SVDM only?)
* <5> :: reserved (SVDM), command type (UVDM)
* <4:0> :: command
*/
-#define VDO(vid, type, custom) \
+#define VDO(vid, type, ver, custom) \
(((vid) << 16) | \
((type) << 15) | \
+ ((ver) << 13) | \
((custom) & 0x7FFF))
#define VDO_SVDM_TYPE (1 << 15)
#define VDO_SVDM_VERS(x) ((x) << 13)
#define VDO_OPOS(x) ((x) << 8)
#define VDO_CMDT(x) ((x) << 6)
+#define VDO_SVDM_VERS_MASK VDO_SVDM_VERS(0x3)
#define VDO_OPOS_MASK VDO_OPOS(0x7)
#define VDO_CMDT_MASK VDO_CMDT(0x3)
@@ -74,6 +77,7 @@
#define PD_VDO_VID(vdo) ((vdo) >> 16)
#define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1)
+#define PD_VDO_SVDM_VER(vdo) (((vdo) >> 13) & 0x3)
#define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7)
#define PD_VDO_CMD(vdo) ((vdo) & 0x1f)
#define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3)
@@ -83,12 +87,15 @@
*
* Request is simply properly formatted SVDM header
*
- * Response is 4 data objects:
+ * Response is 4 data objects for Power Delivery 2.0 and Passive Cables for
+ * Power Delivery 3.0. Active Cables in Power Delivery 3.0 have 5 data objects.
* [0] :: SVDM header
* [1] :: Identitiy header
* [2] :: Cert Stat VDO
* [3] :: (Product | Cable) VDO
+ * [4] :: Cable VDO 1
* [4] :: AMA VDO
+ * [5] :: Cable VDO 2
*
*/
#define VDO_INDEX_HDR 0
@@ -97,31 +104,59 @@
#define VDO_INDEX_CABLE 3
#define VDO_INDEX_PRODUCT 3
#define VDO_INDEX_AMA 4
+#define VDO_INDEX_CABLE_1 4
+#define VDO_INDEX_CABLE_2 5
/*
* SVDM Identity Header
* --------------------
* <31> :: data capable as a USB host
* <30> :: data capable as a USB device
- * <29:27> :: product type
+ * <29:27> :: product type (UFP / Cable / VPD)
* <26> :: modal operation supported (1b == yes)
- * <25:16> :: Reserved, Shall be set to zero
+ * <25:23> :: product type (DFP) (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <22:21> :: connector type (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <20:16> :: Reserved, Shall be set to zero
* <15:0> :: USB-IF assigned VID for this cable vendor
*/
+
+/* PD Rev2.0 definition */
#define IDH_PTYPE_UNDEF 0
+
+/* SOP Product Type (UFP) */
+#define IDH_PTYPE_NOT_UFP 0
#define IDH_PTYPE_HUB 1
#define IDH_PTYPE_PERIPH 2
+#define IDH_PTYPE_PSD 3
+#define IDH_PTYPE_AMA 5
+
+/* SOP' Product Type (Cable Plug / VPD) */
+#define IDH_PTYPE_NOT_CABLE 0
#define IDH_PTYPE_PCABLE 3
#define IDH_PTYPE_ACABLE 4
-#define IDH_PTYPE_AMA 5
+#define IDH_PTYPE_VPD 6
-#define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \
- ((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \
- | (is_modal) << 26 | ((vid) & 0xffff))
+/* SOP Product Type (DFP) */
+#define IDH_PTYPE_NOT_DFP 0
+#define IDH_PTYPE_DFP_HUB 1
+#define IDH_PTYPE_DFP_HOST 2
+#define IDH_PTYPE_DFP_PB 3
+
+/* ID Header Mask */
+#define IDH_DFP_MASK GENMASK(25, 23)
+#define IDH_CONN_MASK GENMASK(22, 21)
+
+#define VDO_IDH(usbh, usbd, ufp_cable, is_modal, dfp, conn, vid) \
+ ((usbh) << 31 | (usbd) << 30 | ((ufp_cable) & 0x7) << 27 \
+ | (is_modal) << 26 | ((dfp) & 0x7) << 23 | ((conn) & 0x3) << 21 \
+ | ((vid) & 0xffff))
#define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7)
#define PD_IDH_VID(vdo) ((vdo) & 0xffff)
#define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26))
+#define PD_IDH_DFP_PTYPE(vdo) (((vdo) >> 23) & 0x7)
+#define PD_IDH_CONN_TYPE(vdo) (((vdo) >> 21) & 0x3)
+#define PD_IDH_HOST_SUPP(vdo) ((vdo) & (1 << 31))
/*
* Cert Stat VDO
@@ -129,6 +164,7 @@
* <31:0> : USB-IF assigned XID for this cable
*/
#define PD_CSTAT_XID(vdo) (vdo)
+#define VDO_CERT(xid) ((xid) & 0xffffffff)
/*
* Product VDO
@@ -140,77 +176,272 @@
#define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff)
/*
- * UFP VDO1
+ * UFP VDO (PD Revision 3.0+ only)
* --------
* <31:29> :: UFP VDO version
* <28> :: Reserved
* <27:24> :: Device capability
- * <23:6> :: Reserved
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:11> :: Reserved
+ * <10:8> :: Vconn power (AMA only)
+ * <7> :: Vconn required (AMA only, 0b == no, 1b == yes)
+ * <6> :: Vbus required (AMA only, 0b == yes, 1b == no)
* <5:3> :: Alternate modes
* <2:0> :: USB highest speed
*/
-#define PD_VDO1_UFP_DEVCAP(vdo) (((vdo) & GENMASK(27, 24)) >> 24)
+#define PD_VDO_UFP_DEVCAP(vdo) FIELD_GET(GENMASK(27, 24), vdo)
+/* UFP VDO Version */
+#define UFP_VDO_VER1_2 2
+
+/* Device Capability */
#define DEV_USB2_CAPABLE BIT(0)
#define DEV_USB2_BILLBOARD BIT(1)
#define DEV_USB3_CAPABLE BIT(2)
#define DEV_USB4_CAPABLE BIT(3)
+/* Connector Type */
+#define UFP_RECEPTACLE 2
+#define UFP_CAPTIVE 3
+
+/* Vconn Power (AMA only, set to AMA_VCONN_NOT_REQ if Vconn is not required) */
+#define AMA_VCONN_PWR_1W 0
+#define AMA_VCONN_PWR_1W5 1
+#define AMA_VCONN_PWR_2W 2
+#define AMA_VCONN_PWR_3W 3
+#define AMA_VCONN_PWR_4W 4
+#define AMA_VCONN_PWR_5W 5
+#define AMA_VCONN_PWR_6W 6
+
+/* Vconn Required (AMA only) */
+#define AMA_VCONN_NOT_REQ 0
+#define AMA_VCONN_REQ 1
+
+/* Vbus Required (AMA only) */
+#define AMA_VBUS_REQ 0
+#define AMA_VBUS_NOT_REQ 1
+
+/* Alternate Modes */
+#define UFP_ALTMODE_NOT_SUPP 0
+#define UFP_ALTMODE_TBT3 BIT(0)
+#define UFP_ALTMODE_RECFG BIT(1)
+#define UFP_ALTMODE_NO_RECFG BIT(2)
+
+/* USB Highest Speed */
+#define UFP_USB2_ONLY 0
+#define UFP_USB32_GEN1 1
+#define UFP_USB32_4_GEN2 2
+#define UFP_USB4_GEN3 3
+
+#define VDO_UFP(ver, cap, conn, vcpwr, vcr, vbr, alt, spd) \
+ (((ver) & 0x7) << 29 | ((cap) & 0xf) << 24 | ((conn) & 0x3) << 22 \
+ | ((vcpwr) & 0x7) << 8 | (vcr) << 7 | (vbr) << 6 | ((alt) & 0x7) << 3 \
+ | ((spd) & 0x7))
+
/*
- * DFP VDO
+ * DFP VDO (PD Revision 3.0+ only)
* --------
* <31:29> :: DFP VDO version
* <28:27> :: Reserved
* <26:24> :: Host capability
- * <23:5> :: Reserved
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:5> :: Reserved
* <4:0> :: Port number
*/
-#define PD_VDO_DFP_HOSTCAP(vdo) (((vdo) & GENMASK(26, 24)) >> 24)
+#define PD_VDO_DFP_HOSTCAP(vdo) FIELD_GET(GENMASK(26, 24), vdo)
+#define DFP_VDO_VER1_1 1
#define HOST_USB2_CAPABLE BIT(0)
#define HOST_USB3_CAPABLE BIT(1)
#define HOST_USB4_CAPABLE BIT(2)
+#define DFP_RECEPTACLE 2
+#define DFP_CAPTIVE 3
+
+#define VDO_DFP(ver, cap, conn, pnum) \
+ (((ver) & 0x7) << 29 | ((cap) & 0x7) << 24 | ((conn) & 0x3) << 22 \
+ | ((pnum) & 0x1f))
/*
- * Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
* <23:20> :: Reserved, Shall be set to zero
- * <19:18> :: type-C to Type-A/B/C (00b == A, 01 == B, 10 == C)
- * <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle)
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17> :: Reserved, Shall be set to zero
* <16:13> :: cable latency (0001 == <10ns(~1m length))
* <12:11> :: cable termination type (11b == both ends active VCONN req)
* <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
* <9> :: SSTX2 Directionality support
* <8> :: SSRX1 Directionality support
* <7> :: SSRX2 Directionality support
- * <6:5> :: Vbus current handling capability
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
* <4> :: Vbus through cable (0b == no, 1b == yes)
* <3> :: SOP" controller present? (0b == no, 1b == yes)
* <2:0> :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Type-C to Type-C/Captive (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == Vconn not req, 01b == Vconn req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8:7> :: Reserved, Shall be set to zero
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4:3> :: Reserved, Shall be set to zero
+ * <2:0> :: USB highest speed
+ *
+ * Active Cable VDO 1 (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Connector type (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == one end active, 11b == both ends active VCONN req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8> :: SBU supported (0b == supported, 1b == not supported)
+ * <7> :: SBU type (0b == passive, 1b == active)
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB highest speed
*/
+/* Cable VDO Version */
+#define CABLE_VDO_VER1_0 0
+#define CABLE_VDO_VER1_3 3
+
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
#define CABLE_ATYPE 0
#define CABLE_BTYPE 1
#define CABLE_CTYPE 2
-#define CABLE_PLUG 0
-#define CABLE_RECEPTACLE 1
-#define CABLE_CURR_1A5 0
+#define CABLE_CAPTIVE 3
+
+/* Cable Latency */
+#define CABLE_LATENCY_1M 1
+#define CABLE_LATENCY_2M 2
+#define CABLE_LATENCY_3M 3
+#define CABLE_LATENCY_4M 4
+#define CABLE_LATENCY_5M 5
+#define CABLE_LATENCY_6M 6
+#define CABLE_LATENCY_7M 7
+#define CABLE_LATENCY_7M_PLUS 8
+
+/* Cable Termination Type */
+#define PCABLE_VCONN_NOT_REQ 0
+#define PCABLE_VCONN_REQ 1
+#define ACABLE_ONE_END 2
+#define ACABLE_BOTH_END 3
+
+/* Maximum Vbus Voltage */
+#define CABLE_MAX_VBUS_20V 0
+#define CABLE_MAX_VBUS_30V 1
+#define CABLE_MAX_VBUS_40V 2
+#define CABLE_MAX_VBUS_50V 3
+
+/* Active Cable SBU Supported/Type */
+#define ACABLE_SBU_SUPP 0
+#define ACABLE_SBU_NOT_SUPP 1
+#define ACABLE_SBU_PASSIVE 0
+#define ACABLE_SBU_ACTIVE 1
+
+/* Vbus Current Handling Capability */
+#define CABLE_CURR_DEF 0
#define CABLE_CURR_3A 1
#define CABLE_CURR_5A 2
+
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
#define CABLE_USBSS_U2_ONLY 0
#define CABLE_USBSS_U31_GEN1 1
#define CABLE_USBSS_U31_GEN2 2
-#define VDO_CABLE(hw, fw, cbl, gdr, lat, term, tx1d, tx2d, rx1d, rx2d, cur,\
- vps, sopp, usbss) \
- (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
- | (gdr) << 17 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 \
- | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \
- | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \
- | ((usbss) & 0x7))
+
+/* USB Highest Speed */
+#define CABLE_USB2_ONLY 0
+#define CABLE_USB32_GEN1 1
+#define CABLE_USB32_4_GEN2 2
+#define CABLE_USB4_GEN3 3
+
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \
+ | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
+ | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
+#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7))
+#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
+ | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7))
+
+#define VDO_TYPEC_CABLE_SPEED(vdo) ((vdo) & 0x7)
+#define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3)
/*
- * AMA VDO
+ * Active Cable VDO 2
+ * ---------
+ * <31:24> :: Maximum operating temperature
+ * <23:16> :: Shutdown temperature
+ * <15> :: Reserved, Shall be set to zero
+ * <14:12> :: U3/CLd power
+ * <11> :: U3 to U0 transition mode (0b == direct, 1b == through U3S)
+ * <10> :: Physical connection (0b == copper, 1b == optical)
+ * <9> :: Active element (0b == redriver, 1b == retimer)
+ * <8> :: USB4 supported (0b == yes, 1b == no)
+ * <7:6> :: USB2 hub hops consumed
+ * <5> :: USB2 supported (0b == yes, 1b == no)
+ * <4> :: USB3.2 supported (0b == yes, 1b == no)
+ * <3> :: USB lanes supported (0b == one lane, 1b == two lanes)
+ * <2> :: Optically isolated active cable (0b == no, 1b == yes)
+ * <1> :: Reserved, Shall be set to zero
+ * <0> :: USB gen (0b == gen1, 1b == gen2+)
+ */
+/* U3/CLd Power*/
+#define ACAB2_U3_CLD_10MW_PLUS 0
+#define ACAB2_U3_CLD_10MW 1
+#define ACAB2_U3_CLD_5MW 2
+#define ACAB2_U3_CLD_1MW 3
+#define ACAB2_U3_CLD_500UW 4
+#define ACAB2_U3_CLD_200UW 5
+#define ACAB2_U3_CLD_50UW 6
+
+/* Other Active Cable VDO 2 Fields */
+#define ACAB2_U3U0_DIRECT 0
+#define ACAB2_U3U0_U3S 1
+#define ACAB2_PHY_COPPER 0
+#define ACAB2_PHY_OPTICAL 1
+#define ACAB2_REDRIVER 0
+#define ACAB2_RETIMER 1
+#define ACAB2_USB4_SUPP 0
+#define ACAB2_USB4_NOT_SUPP 1
+#define ACAB2_USB2_SUPP 0
+#define ACAB2_USB2_NOT_SUPP 1
+#define ACAB2_USB32_SUPP 0
+#define ACAB2_USB32_NOT_SUPP 1
+#define ACAB2_LANES_ONE 0
+#define ACAB2_LANES_TWO 1
+#define ACAB2_OPT_ISO_NO 0
+#define ACAB2_OPT_ISO_YES 1
+#define ACAB2_GEN_1 0
+#define ACAB2_GEN_2_PLUS 1
+
+#define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen) \
+ (((mtemp) & 0xff) << 24 | ((stemp) & 0xff) << 16 | ((u3p) & 0x7) << 12 \
+ | (trans) << 11 | (phy) << 10 | (ele) << 9 | (u4) << 8 \
+ | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3 \
+ | (iso) << 2 | (gen))
+
+/*
+ * AMA VDO (PD Rev2.0)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -233,19 +464,41 @@
#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
-#define AMA_VCONN_PWR_1W 0
-#define AMA_VCONN_PWR_1W5 1
-#define AMA_VCONN_PWR_2W 2
-#define AMA_VCONN_PWR_3W 3
-#define AMA_VCONN_PWR_4W 4
-#define AMA_VCONN_PWR_5W 5
-#define AMA_VCONN_PWR_6W 6
#define AMA_USBSS_U2_ONLY 0
#define AMA_USBSS_U31_GEN1 1
#define AMA_USBSS_U31_GEN2 2
#define AMA_USBSS_BBONLY 3
/*
+ * VPD VDO
+ * ---------
+ * <31:28> :: HW version
+ * <27:24> :: FW version
+ * <23:21> :: VDO version
+ * <20:17> :: Reserved, Shall be set to zero
+ * <16:15> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <14> :: Charge through current support (0b == 3A, 1b == 5A)
+ * <13> :: Reserved, Shall be set to zero
+ * <12:7> :: Vbus impedance
+ * <6:1> :: Ground impedance
+ * <0> :: Charge through support (0b == no, 1b == yes)
+ */
+#define VPD_VDO_VER1_0 0
+#define VPD_MAX_VBUS_20V 0
+#define VPD_MAX_VBUS_30V 1
+#define VPD_MAX_VBUS_40V 2
+#define VPD_MAX_VBUS_50V 3
+#define VPDCT_CURR_3A 0
+#define VPDCT_CURR_5A 1
+#define VPDCT_NOT_SUPP 0
+#define VPDCT_SUPP 1
+
+#define VDO_VPD(hw, fw, ver, vbm, curr, vbi, gi, ct) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((vbm) & 0x3) << 15 | (curr) << 14 | ((vbi) & 0x3f) << 7 \
+ | ((gi) & 0x3f) << 1 | (ct))
+
+/*
* SVDM Discover SVIDs request -> response
*
* Request is properly formatted VDM Header with discover SVIDs command.
diff --git a/include/linux/usb/phy_companion.h b/include/linux/usb/phy_companion.h
index 263196f05015..862aaeca2319 100644
--- a/include/linux/usb/phy_companion.h
+++ b/include/linux/usb/phy_companion.h
@@ -3,18 +3,8 @@
* phy-companion.h -- phy companion to indicate the comparator part of PHY
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __DRIVERS_PHY_COMPANION_H
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 5e4c497f54d6..59409c1fc3de 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -32,7 +32,7 @@
#define USB_QUIRK_DELAY_INIT BIT(6)
/*
- * For high speed and super speed interupt endpoints, the USB 2.0 and
+ * For high speed and super speed interrupt endpoints, the USB 2.0 and
* USB 3.0 spec require the interval in microframes
* (1 microframe = 125 microseconds) to be calculated as
* interval = 2 ^ (bInterval-1).
@@ -72,4 +72,7 @@
/* device has endpoints that should be ignored */
#define USB_QUIRK_ENDPOINT_IGNORE BIT(15)
+/* short SET_ADDRESS request timeout */
+#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT BIT(16)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
new file mode 100644
index 000000000000..33a4c146dc19
--- /dev/null
+++ b/include/linux/usb/r8152.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Realtek Semiconductor Corp. All rights reserved.
+ */
+
+#ifndef __LINUX_R8152_H
+#define __LINUX_R8152_H
+
+#define RTL8152_REQT_READ 0xc0
+#define RTL8152_REQT_WRITE 0x40
+#define RTL8152_REQ_GET_REGS 0x05
+#define RTL8152_REQ_SET_REGS 0x05
+
+#define BYTE_EN_DWORD 0xff
+#define BYTE_EN_WORD 0x33
+#define BYTE_EN_BYTE 0x11
+#define BYTE_EN_SIX_BYTES 0x3f
+#define BYTE_EN_START_MASK 0x0f
+#define BYTE_EN_END_MASK 0xf0
+
+#define MCU_TYPE_PLA 0x0100
+#define MCU_TYPE_USB 0x0000
+
+/* Define these values to match your device */
+#define VENDOR_ID_REALTEK 0x0bda
+#define VENDOR_ID_MICROSOFT 0x045e
+#define VENDOR_ID_SAMSUNG 0x04e8
+#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
+#define VENDOR_ID_NVIDIA 0x0955
+#define VENDOR_ID_TPLINK 0x2357
+#define VENDOR_ID_DLINK 0x2001
+#define VENDOR_ID_ASUS 0x0b05
+
+#if IS_REACHABLE(CONFIG_USB_RTL8152)
+extern u8 rtl8152_get_version(struct usb_interface *intf);
+#endif
+
+#endif /* __LINUX_R8152_H */
diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h
index c0753d026bbf..f0fa7ddadbaa 100644
--- a/include/linux/usb/r8a66597.h
+++ b/include/linux/usb/r8a66597.h
@@ -5,20 +5,6 @@
* Copyright (C) 2009 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_USB_R8A66597_H
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index d418c55523a7..372898d9eeb0 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -5,16 +5,6 @@
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2019 Renesas Electronics Corporation
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_H
#define RENESAS_USB_H
diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h
index 809bccd08455..489cfb1d00f6 100644
--- a/include/linux/usb/rndis_host.h
+++ b/include/linux/usb/rndis_host.h
@@ -2,20 +2,6 @@
/*
* Host Side support for RNDIS Networking Links
* Copyright (C) 2005 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_RNDIS_HOST_H
@@ -197,6 +183,7 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */
/* Flags for driver_info::data */
#define RNDIS_DRIVER_DATA_POLL_STATUS 1 /* poll status before control */
+#define RNDIS_DRIVER_DATA_DST_MAC_FIXUP 2 /* device ignores configured MAC address */
extern void rndis_status(struct usbnet *dev, struct urb *urb);
extern int
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
index 0164fed31b06..b5deafd91f67 100644
--- a/include/linux/usb/role.h
+++ b/include/linux/usb/role.h
@@ -65,6 +65,7 @@ void usb_role_switch_unregister(struct usb_role_switch *sw);
void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data);
void *usb_role_switch_get_drvdata(struct usb_role_switch *sw);
+const char *usb_role_string(enum usb_role role);
#else
static inline int usb_role_switch_set_role(struct usb_role_switch *sw,
enum usb_role role)
@@ -91,6 +92,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node)
static inline void usb_role_switch_put(struct usb_role_switch *sw) { }
static inline struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct usb_role_switch *
usb_role_switch_register(struct device *parent,
const struct usb_role_switch_desc *desc)
{
@@ -109,6 +116,11 @@ static inline void *usb_role_switch_get_drvdata(struct usb_role_switch *sw)
return NULL;
}
+static inline const char *usb_role_string(enum usb_role role)
+{
+ return "unknown";
+}
+
#endif
#endif /* __LINUX_USB_ROLE_H */
diff --git a/include/linux/usb/rzv2m_usb3drd.h b/include/linux/usb/rzv2m_usb3drd.h
new file mode 100644
index 000000000000..4cc848de229f
--- /dev/null
+++ b/include/linux/usb/rzv2m_usb3drd.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __RZV2M_USB3DRD_H
+#define __RZV2M_USB3DRD_H
+
+#include <linux/types.h>
+
+struct rzv2m_usb3drd {
+ void __iomem *reg;
+ int drd_irq;
+ struct device *dev;
+ struct reset_control *drd_rstc;
+};
+
+#if IS_ENABLED(CONFIG_USB_RZV2M_USB3DRD)
+void rzv2m_usb3drd_reset(struct device *dev, bool host);
+#else
+static inline void rzv2m_usb3drd_reset(struct device *dev, bool host) { }
+#endif
+
+#endif /* __RZV2M_USB3DRD_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 8e67eff9039f..1a0a4dc87980 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -4,11 +4,6 @@
*
* Copyright (C) 1999 - 2012
* Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
*/
#ifndef __LINUX_USB_SERIAL_H
@@ -62,7 +57,6 @@
* @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
* port.
* @flags: usb serial port flags
- * @write_wait: a wait_queue_head_t used by the port.
* @work: work queue entry for the line discipline waking up.
* @dev: pointer to the serial device
*
@@ -108,7 +102,6 @@ struct usb_serial_port {
int tx_bytes;
unsigned long flags;
- wait_queue_head_t write_wait;
struct work_struct work;
unsigned long sysrq; /* sysrq timeout */
struct device dev;
@@ -132,6 +125,8 @@ static inline void usb_set_serial_port_data(struct usb_serial_port *port,
* @dev: pointer to the struct usb_device for this device
* @type: pointer to the struct usb_serial_driver for this device
* @interface: pointer to the struct usb_interface for this device
+ * @sibling: pointer to the struct usb_interface of any sibling interface
+ * @suspend_count: number of suspended (sibling) interfaces
* @num_ports: the number of ports this device has
* @num_interrupt_in: number of interrupt in endpoints we have
* @num_interrupt_out: number of interrupt out endpoints we have
@@ -147,8 +142,9 @@ struct usb_serial {
struct usb_device *dev;
struct usb_serial_driver *type;
struct usb_interface *interface;
+ struct usb_interface *sibling;
+ unsigned int suspend_count;
unsigned char disconnected:1;
- unsigned char suspending:1;
unsigned char attached:1;
unsigned char minors_reserved:1;
unsigned char num_ports;
@@ -262,7 +258,7 @@ struct usb_serial_driver {
void (*release)(struct usb_serial *serial);
int (*port_probe)(struct usb_serial_port *port);
- int (*port_remove)(struct usb_serial_port *port);
+ void (*port_remove)(struct usb_serial_port *port);
int (*suspend)(struct usb_serial *serial, pm_message_t message);
int (*resume)(struct usb_serial *serial);
@@ -275,15 +271,15 @@ struct usb_serial_driver {
int (*write)(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
/* Called only by the tty layer */
- int (*write_room)(struct tty_struct *tty);
+ unsigned int (*write_room)(struct tty_struct *tty);
int (*ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
- int (*get_serial)(struct tty_struct *tty, struct serial_struct *ss);
+ void (*get_serial)(struct tty_struct *tty, struct serial_struct *ss);
int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss);
- void (*set_termios)(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
- void (*break_ctl)(struct tty_struct *tty, int break_state);
- int (*chars_in_buffer)(struct tty_struct *tty);
+ void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port,
+ const struct ktermios *old);
+ int (*break_ctl)(struct tty_struct *tty, int break_state);
+ unsigned int (*chars_in_buffer)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, long timeout);
bool (*tx_empty)(struct usb_serial_port *port);
void (*throttle)(struct tty_struct *tty);
@@ -337,14 +333,17 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {}
/* Functions needed by other parts of the usbserial core */
struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor);
void usb_serial_put(struct usb_serial *serial);
+
+int usb_serial_claim_interface(struct usb_serial *serial, struct usb_interface *intf);
+
int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port);
int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags);
int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
void usb_serial_generic_close(struct usb_serial_port *port);
int usb_serial_generic_resume(struct usb_serial *serial);
-int usb_serial_generic_write_room(struct tty_struct *tty);
-int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
+unsigned int usb_serial_generic_write_room(struct tty_struct *tty);
+unsigned int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout);
void usb_serial_generic_read_bulk_callback(struct urb *urb);
void usb_serial_generic_write_bulk_callback(struct urb *urb);
@@ -379,7 +378,7 @@ void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
int usb_serial_bus_register(struct usb_serial_driver *device);
void usb_serial_bus_deregister(struct usb_serial_driver *device);
-extern struct bus_type usb_serial_bus_type;
+extern const struct bus_type usb_serial_bus_type;
extern struct tty_driver *usb_serial_tty_driver;
static inline void usb_serial_debug_data(struct device *dev,
@@ -391,7 +390,7 @@ static inline void usb_serial_debug_data(struct device *dev,
}
/*
- * Macro for reporting errors in write path to avoid inifinite loop
+ * Macro for reporting errors in write path to avoid infinite loop
* when port is used as a console.
*/
#define dev_err_console(usport, fmt, ...) \
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h
index e0240f864548..2827ce72e502 100644
--- a/include/linux/usb/storage.h
+++ b/include/linux/usb/storage.h
@@ -9,8 +9,6 @@
*
* This file contains definitions taken from the
* USB Mass Storage Class Specification Overview
- *
- * Distributed under the terms of the GNU GPL, version two.
*/
/* Storage subclass codes */
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
new file mode 100644
index 000000000000..47a86b8a4a50
--- /dev/null
+++ b/include/linux/usb/tcpci.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2017 Google, Inc
+ *
+ * USB Type-C Port Controller Interface.
+ */
+
+#ifndef __LINUX_USB_TCPCI_H
+#define __LINUX_USB_TCPCI_H
+
+#include <linux/usb/typec.h>
+#include <linux/usb/tcpm.h>
+
+#define TCPC_VENDOR_ID 0x0
+#define TCPC_PRODUCT_ID 0x2
+#define TCPC_BCD_DEV 0x4
+#define TCPC_TC_REV 0x6
+#define TCPC_PD_REV 0x8
+#define TCPC_PD_INT_REV 0xa
+
+#define TCPC_ALERT 0x10
+#define TCPC_ALERT_EXTND BIT(14)
+#define TCPC_ALERT_EXTENDED_STATUS BIT(13)
+#define TCPC_ALERT_VBUS_DISCNCT BIT(11)
+#define TCPC_ALERT_RX_BUF_OVF BIT(10)
+#define TCPC_ALERT_FAULT BIT(9)
+#define TCPC_ALERT_V_ALARM_LO BIT(8)
+#define TCPC_ALERT_V_ALARM_HI BIT(7)
+#define TCPC_ALERT_TX_SUCCESS BIT(6)
+#define TCPC_ALERT_TX_DISCARDED BIT(5)
+#define TCPC_ALERT_TX_FAILED BIT(4)
+#define TCPC_ALERT_RX_HARD_RST BIT(3)
+#define TCPC_ALERT_RX_STATUS BIT(2)
+#define TCPC_ALERT_POWER_STATUS BIT(1)
+#define TCPC_ALERT_CC_STATUS BIT(0)
+
+#define TCPC_ALERT_MASK 0x12
+#define TCPC_POWER_STATUS_MASK 0x14
+
+#define TCPC_FAULT_STATUS_MASK 0x15
+#define TCPC_FAULT_STATUS_MASK_VCONN_OC BIT(1)
+
+#define TCPC_EXTENDED_STATUS_MASK 0x16
+#define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0)
+
+#define TCPC_ALERT_EXTENDED_MASK 0x17
+#define TCPC_SINK_FAST_ROLE_SWAP BIT(0)
+
+#define TCPC_CONFIG_STD_OUTPUT 0x18
+
+#define TCPC_TCPC_CTRL 0x19
+#define TCPC_TCPC_CTRL_ORIENTATION BIT(0)
+#define PLUG_ORNT_CC1 0
+#define PLUG_ORNT_CC2 1
+#define TCPC_TCPC_CTRL_BIST_TM BIT(1)
+#define TCPC_TCPC_CTRL_EN_LK4CONN_ALRT BIT(6)
+
+#define TCPC_EXTENDED_STATUS 0x20
+#define TCPC_EXTENDED_STATUS_VSAFE0V BIT(0)
+
+#define TCPC_ROLE_CTRL 0x1a
+#define TCPC_ROLE_CTRL_DRP BIT(6)
+#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4
+#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3
+#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0
+#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1
+#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2
+#define TCPC_ROLE_CTRL_CC2_SHIFT 2
+#define TCPC_ROLE_CTRL_CC2_MASK 0x3
+#define TCPC_ROLE_CTRL_CC1_SHIFT 0
+#define TCPC_ROLE_CTRL_CC1_MASK 0x3
+#define TCPC_ROLE_CTRL_CC_RA 0x0
+#define TCPC_ROLE_CTRL_CC_RP 0x1
+#define TCPC_ROLE_CTRL_CC_RD 0x2
+#define TCPC_ROLE_CTRL_CC_OPEN 0x3
+
+#define TCPC_FAULT_CTRL 0x1b
+
+#define TCPC_POWER_CTRL 0x1c
+#define TCPC_POWER_CTRL_VCONN_ENABLE BIT(0)
+#define TCPC_POWER_CTRL_BLEED_DISCHARGE BIT(3)
+#define TCPC_POWER_CTRL_AUTO_DISCHARGE BIT(4)
+#define TCPC_DIS_VOLT_ALRM BIT(5)
+#define TCPC_POWER_CTRL_VBUS_VOLT_MON BIT(6)
+#define TCPC_FAST_ROLE_SWAP_EN BIT(7)
+
+#define TCPC_CC_STATUS 0x1d
+#define TCPC_CC_STATUS_TOGGLING BIT(5)
+#define TCPC_CC_STATUS_TERM BIT(4)
+#define TCPC_CC_STATUS_TERM_RP 0
+#define TCPC_CC_STATUS_TERM_RD 1
+#define TCPC_CC_STATE_SRC_OPEN 0
+#define TCPC_CC_STATUS_CC2_SHIFT 2
+#define TCPC_CC_STATUS_CC2_MASK 0x3
+#define TCPC_CC_STATUS_CC1_SHIFT 0
+#define TCPC_CC_STATUS_CC1_MASK 0x3
+
+#define TCPC_POWER_STATUS 0x1e
+#define TCPC_POWER_STATUS_DBG_ACC_CON BIT(7)
+#define TCPC_POWER_STATUS_UNINIT BIT(6)
+#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4)
+#define TCPC_POWER_STATUS_VBUS_DET BIT(3)
+#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
+#define TCPC_POWER_STATUS_VCONN_PRES BIT(1)
+#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
+
+#define TCPC_FAULT_STATUS 0x1f
+#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
+#define TCPC_FAULT_STATUS_VCONN_OC BIT(1)
+
+#define TCPC_ALERT_EXTENDED 0x21
+
+#define TCPC_COMMAND 0x23
+#define TCPC_CMD_WAKE_I2C 0x11
+#define TCPC_CMD_DISABLE_VBUS_DETECT 0x22
+#define TCPC_CMD_ENABLE_VBUS_DETECT 0x33
+#define TCPC_CMD_DISABLE_SINK_VBUS 0x44
+#define TCPC_CMD_SINK_VBUS 0x55
+#define TCPC_CMD_DISABLE_SRC_VBUS 0x66
+#define TCPC_CMD_SRC_VBUS_DEFAULT 0x77
+#define TCPC_CMD_SRC_VBUS_HIGH 0x88
+#define TCPC_CMD_LOOK4CONNECTION 0x99
+#define TCPC_CMD_RXONEMORE 0xAA
+#define TCPC_CMD_I2C_IDLE 0xFF
+
+#define TCPC_DEV_CAP_1 0x24
+#define TCPC_DEV_CAP_2 0x26
+#define TCPC_STD_INPUT_CAP 0x28
+#define TCPC_STD_OUTPUT_CAP 0x29
+
+#define TCPC_MSG_HDR_INFO 0x2e
+#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3)
+#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0)
+#define TCPC_MSG_HDR_INFO_REV_SHIFT 1
+#define TCPC_MSG_HDR_INFO_REV_MASK 0x3
+
+#define TCPC_RX_DETECT 0x2f
+#define TCPC_RX_DETECT_HARD_RESET BIT(5)
+#define TCPC_RX_DETECT_SOP BIT(0)
+#define TCPC_RX_DETECT_SOP1 BIT(1)
+#define TCPC_RX_DETECT_SOP2 BIT(2)
+#define TCPC_RX_DETECT_DBG1 BIT(3)
+#define TCPC_RX_DETECT_DBG2 BIT(4)
+
+#define TCPC_RX_BYTE_CNT 0x30
+#define TCPC_RX_BUF_FRAME_TYPE 0x31
+#define TCPC_RX_BUF_FRAME_TYPE_SOP 0
+#define TCPC_RX_BUF_FRAME_TYPE_SOP1 1
+#define TCPC_RX_HDR 0x32
+#define TCPC_RX_DATA 0x34 /* through 0x4f */
+
+#define TCPC_TRANSMIT 0x50
+#define TCPC_TRANSMIT_RETRY_SHIFT 4
+#define TCPC_TRANSMIT_RETRY_MASK 0x3
+#define TCPC_TRANSMIT_TYPE_SHIFT 0
+#define TCPC_TRANSMIT_TYPE_MASK 0x7
+
+#define TCPC_TX_BYTE_CNT 0x51
+#define TCPC_TX_HDR 0x52
+#define TCPC_TX_DATA 0x54 /* through 0x6f */
+
+#define TCPC_VBUS_VOLTAGE 0x70
+#define TCPC_VBUS_VOLTAGE_MASK 0x3ff
+#define TCPC_VBUS_VOLTAGE_LSB_MV 25
+#define TCPC_VBUS_SINK_DISCONNECT_THRESH 0x72
+#define TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV 25
+#define TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX 0x3ff
+#define TCPC_VBUS_STOP_DISCHARGE_THRESH 0x74
+#define TCPC_VBUS_VOLTAGE_ALARM_HI_CFG 0x76
+#define TCPC_VBUS_VOLTAGE_ALARM_LO_CFG 0x78
+
+/* I2C_WRITE_BYTE_COUNT + 1 when TX_BUF_BYTE_x is only accessible I2C_WRITE_BYTE_COUNT */
+#define TCPC_TRANSMIT_BUFFER_MAX_LEN 31
+
+#define tcpc_presenting_rd(reg, cc) \
+ (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
+ (((reg) & (TCPC_ROLE_CTRL_## cc ##_MASK << TCPC_ROLE_CTRL_## cc ##_SHIFT)) == \
+ (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_## cc ##_SHIFT)))
+
+struct tcpci;
+
+/*
+ * @TX_BUF_BYTE_x_hidden:
+ * optional; Set when TX_BUF_BYTE_x can only be accessed through I2C_WRITE_BYTE_COUNT.
+ * @frs_sourcing_vbus:
+ * Optional; Callback to perform chip specific operations when FRS
+ * is sourcing vbus.
+ * @auto_discharge_disconnect:
+ * Optional; Enables TCPC to autonously discharge vbus on disconnect.
+ * @vbus_vsafe0v:
+ * optional; Set when TCPC can detect whether vbus is at VSAFE0V.
+ * @set_partner_usb_comm_capable:
+ * Optional; The USB Communications Capable bit indicates if port
+ * partner is capable of communication over the USB data lines
+ * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
+ * @check_contaminant:
+ * Optional; The callback is invoked when chiplevel drivers indicated
+ * that the USB port needs to be checked for contaminant presence.
+ * Chip level drivers are expected to check for contaminant and call
+ * tcpm_clean_port when the port is clean to put the port back into
+ * toggling state.
+ * @cable_comm_capable
+ * optional; Set when TCPC can communicate with cable plugs over SOP'
+ * @attempt_vconn_swap_discovery:
+ * Optional; The callback is called by the TCPM when the result of
+ * a Discover Identity request indicates that the port partner is
+ * a receptacle capable of modal operation. Chip level TCPCI drivers
+ * can implement their own policy to determine if and when a Vconn
+ * swap following Discover Identity on SOP' occurs.
+ * Return true when the TCPM is allowed to request a Vconn swap
+ * after Discovery Identity on SOP.
+ */
+struct tcpci_data {
+ struct regmap *regmap;
+ unsigned char TX_BUF_BYTE_x_hidden:1;
+ unsigned char auto_discharge_disconnect:1;
+ unsigned char vbus_vsafe0v:1;
+ unsigned char cable_comm_capable:1;
+
+ int (*init)(struct tcpci *tcpci, struct tcpci_data *data);
+ int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data,
+ bool enable);
+ int (*start_drp_toggling)(struct tcpci *tcpci, struct tcpci_data *data,
+ enum typec_cc_status cc);
+ int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink);
+ void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data);
+ void (*set_partner_usb_comm_capable)(struct tcpci *tcpci, struct tcpci_data *data,
+ bool capable);
+ void (*check_contaminant)(struct tcpci *tcpci, struct tcpci_data *data);
+ bool (*attempt_vconn_swap_discovery)(struct tcpci *tcpci, struct tcpci_data *data);
+};
+
+struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data);
+void tcpci_unregister_port(struct tcpci *tcpci);
+irqreturn_t tcpci_irq(struct tcpci *tcpci);
+
+struct tcpm_port;
+struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci);
+
+static inline enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
+{
+ switch (cc) {
+ case 0x1:
+ return sink ? TYPEC_CC_RP_DEF : TYPEC_CC_RA;
+ case 0x2:
+ return sink ? TYPEC_CC_RP_1_5 : TYPEC_CC_RD;
+ case 0x3:
+ if (sink)
+ return TYPEC_CC_RP_3_0;
+ fallthrough;
+ case 0x0:
+ default:
+ return TYPEC_CC_OPEN;
+ }
+}
+#endif /* __LINUX_USB_TCPCI_H */
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 89f58760cf48..061da9546a81 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -19,6 +19,10 @@ enum typec_cc_status {
TYPEC_CC_RP_3_0,
};
+/* Collision Avoidance */
+#define SINK_TX_NG TYPEC_CC_RP_1_5
+#define SINK_TX_OK TYPEC_CC_RP_3_0
+
enum typec_cc_polarity {
TYPEC_POLARITY_CC1,
TYPEC_POLARITY_CC2,
@@ -62,6 +66,8 @@ enum tcpm_transmit_type {
* For example, some tcpcs may include BC1.2 charger detection
* and use that in this case.
* @set_cc: Called to set value of CC pins
+ * @apply_rc: Optional; Needed to move TCPCI based chipset to APPLY_RC state
+ * as stated by the TCPCI specification.
* @get_cc: Called to read current CC pin values
* @set_polarity:
* Called to set polarity
@@ -78,8 +84,52 @@ enum tcpm_transmit_type {
* automatically if a connection is established.
* @try_role: Optional; called to set a preferred role
* @pd_transmit:Called to transmit PD message
- * @mux: Pointer to multiplexer data
* @set_bist_data: Turn on/off bist data mode for compliance testing
+ * @enable_frs:
+ * Optional; Called to enable/disable PD 3.0 fast role swap.
+ * Enabling frs is accessory dependent as not all PD3.0
+ * accessories support fast role swap.
+ * @frs_sourcing_vbus:
+ * Optional; Called to notify that vbus is now being sourced.
+ * Low level drivers can perform chip specific operations, if any.
+ * @enable_auto_vbus_discharge:
+ * Optional; TCPCI spec based TCPC implementations can optionally
+ * support hardware to autonomously dischrge vbus upon disconnecting
+ * as sink or source. TCPM signals TCPC to enable the mechanism upon
+ * entering connected state and signals disabling upon disconnect.
+ * @set_auto_vbus_discharge_threshold:
+ * Mandatory when enable_auto_vbus_discharge is implemented. TCPM
+ * calls this function to allow lower levels drivers to program the
+ * vbus threshold voltage below which the vbus discharge circuit
+ * will be turned on. requested_vbus_voltage is set to 0 when vbus
+ * is going to disappear knowingly i.e. during PR_SWAP and
+ * HARD_RESET etc.
+ * @is_vbus_vsafe0v:
+ * Optional; TCPCI spec based TCPC implementations are expected to
+ * detect VSAFE0V voltage level at vbus. When detection of VSAFE0V
+ * is supported by TCPC, set this callback for TCPM to query
+ * whether vbus is at VSAFE0V when needed.
+ * Returns true when vbus is at VSAFE0V, false otherwise.
+ * @set_partner_usb_comm_capable:
+ * Optional; The USB Communications Capable bit indicates if port
+ * partner is capable of communication over the USB data lines
+ * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
+ * @check_contaminant:
+ * Optional; The callback is called when CC pins report open status
+ * at the end of the deboumce period or when the port is still
+ * toggling. Chip level drivers are expected to check for contaminant
+ * and call tcpm_clean_port when the port is clean.
+ * @cable_comm_capable
+ * Optional; Returns whether cable communication over SOP' is supported
+ * by the tcpc
+ * @attempt_vconn_swap_discovery:
+ * Optional; The callback is called by the TCPM when the result of
+ * a Discover Identity request indicates that the port partner is
+ * a receptacle capable of modal operation. Chip level TCPCI drivers
+ * can implement their own policy to determine if and when a Vconn
+ * swap following Discover Identity on SOP' occurs.
+ * Return true when the TCPM is allowed to request a Vconn swap
+ * after Discovery Identity on SOP.
*/
struct tcpc_dev {
struct fwnode_handle *fwnode;
@@ -88,10 +138,14 @@ struct tcpc_dev {
int (*get_vbus)(struct tcpc_dev *dev);
int (*get_current_limit)(struct tcpc_dev *dev);
int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc);
+ int (*apply_rc)(struct tcpc_dev *dev, enum typec_cc_status cc,
+ enum typec_cc_polarity polarity);
int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1,
enum typec_cc_status *cc2);
int (*set_polarity)(struct tcpc_dev *dev,
enum typec_cc_polarity polarity);
+ int (*set_orientation)(struct tcpc_dev *dev,
+ enum typec_orientation orientation);
int (*set_vconn)(struct tcpc_dev *dev, bool on);
int (*set_vbus)(struct tcpc_dev *dev, bool on, bool charge);
int (*set_current_limit)(struct tcpc_dev *dev, u32 max_ma, u32 mv);
@@ -103,8 +157,18 @@ struct tcpc_dev {
enum typec_cc_status cc);
int (*try_role)(struct tcpc_dev *dev, int role);
int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
- const struct pd_message *msg);
+ const struct pd_message *msg, unsigned int negotiated_rev);
int (*set_bist_data)(struct tcpc_dev *dev, bool on);
+ int (*enable_frs)(struct tcpc_dev *dev, bool enable);
+ void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
+ int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
+ int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+ bool pps_active, u32 requested_vbus_voltage);
+ bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
+ void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
+ void (*check_contaminant)(struct tcpc_dev *dev);
+ bool (*cable_comm_capable)(struct tcpc_dev *dev);
+ bool (*attempt_vconn_swap_discovery)(struct tcpc_dev *dev);
};
struct tcpm_port;
@@ -114,11 +178,17 @@ void tcpm_unregister_port(struct tcpm_port *port);
void tcpm_vbus_change(struct tcpm_port *port);
void tcpm_cc_change(struct tcpm_port *port);
+void tcpm_sink_frs(struct tcpm_port *port);
+void tcpm_sourcing_vbus(struct tcpm_port *port);
void tcpm_pd_receive(struct tcpm_port *port,
- const struct pd_message *msg);
+ const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type);
void tcpm_pd_transmit_complete(struct tcpm_port *port,
enum tcpm_transmit_status status);
void tcpm_pd_hard_reset(struct tcpm_port *port);
void tcpm_tcpc_reset(struct tcpm_port *port);
+void tcpm_port_clean(struct tcpm_port *port);
+bool tcpm_port_is_toggling(struct tcpm_port *port);
+void tcpm_port_error_recovery(struct tcpm_port *port);
#endif /* __LINUX_USB_TCPM_H */
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index c29d1b4c9381..46e73584b6e6 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -1,16 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __TEGRA_USB_PHY_H
@@ -18,6 +8,7 @@
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/usb/otg.h>
@@ -30,6 +21,7 @@
* enter host mode
* requires_extra_tuning_parameters: true if xcvr_hsslew, hssquelch_level
* and hsdiscon_level should be set for adequate signal quality
+ * requires_pmc_ao_power_up: true if USB AO is powered down by default
*/
struct tegra_phy_soc_config {
@@ -37,6 +29,7 @@ struct tegra_phy_soc_config {
bool has_hostpc;
bool requires_usbmode_setup;
bool requires_extra_tuning_parameters;
+ bool requires_pmc_ao_power_up;
};
struct tegra_utmip_config {
@@ -62,6 +55,7 @@ enum tegra_usb_phy_port_speed {
struct tegra_xtal_freq;
struct tegra_usb_phy {
+ int irq;
int instance;
const struct tegra_xtal_freq *freq;
void __iomem *regs;
@@ -70,6 +64,7 @@ struct tegra_usb_phy {
struct clk *pll_u;
struct clk *pad_clk;
struct regulator *vbus;
+ struct regmap *pmc_regmap;
enum usb_dr_mode mode;
void *config;
const struct tegra_phy_soc_config *soc_config;
@@ -79,6 +74,8 @@ struct tegra_usb_phy {
bool is_ulpi_phy;
struct gpio_desc *reset_gpio;
struct reset_control *pad_rst;
+ bool wakeup_enabled;
+ bool pad_wakeup;
bool powered_on;
};
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 9cb1bec94b71..b35b427561ab 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -17,10 +17,15 @@ struct typec_partner;
struct typec_cable;
struct typec_plug;
struct typec_port;
+struct typec_altmode_ops;
+struct typec_cable_ops;
struct fwnode_handle;
struct device;
+struct usb_power_delivery;
+struct usb_power_delivery_desc;
+
enum typec_port_type {
TYPEC_PORT_SRC,
TYPEC_PORT_SNK,
@@ -51,6 +56,16 @@ enum typec_role {
TYPEC_SOURCE,
};
+static inline int is_sink(enum typec_role role)
+{
+ return role == TYPEC_SINK;
+}
+
+static inline int is_source(enum typec_role role)
+{
+ return role == TYPEC_SOURCE;
+}
+
enum typec_pwr_opmode {
TYPEC_PWR_MODE_USB,
TYPEC_PWR_MODE_1_5A,
@@ -126,15 +141,26 @@ struct typec_altmode_desc {
enum typec_port_data roles;
};
+void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision);
+int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes);
struct typec_altmode
*typec_partner_register_altmode(struct typec_partner *partner,
const struct typec_altmode_desc *desc);
+int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes);
struct typec_altmode
*typec_plug_register_altmode(struct typec_plug *plug,
const struct typec_altmode_desc *desc);
struct typec_altmode
*typec_port_register_altmode(struct typec_port *port,
const struct typec_altmode_desc *desc);
+
+void typec_port_register_altmodes(struct typec_port *port,
+ const struct typec_altmode_ops *ops, void *drvdata,
+ struct typec_altmode **altmodes, size_t n);
+
+void typec_port_register_cable_ops(struct typec_altmode **altmodes, int max_altmodes,
+ const struct typec_cable_ops *ops);
+
void typec_unregister_altmode(struct typec_altmode *altmode);
struct typec_port *typec_altmode2port(struct typec_altmode *alt);
@@ -162,6 +188,7 @@ struct typec_plug_desc {
* @type: The plug type from USB PD Cable VDO
* @active: Is the cable active or passive
* @identity: Result of Discover Identity command
+ * @pd_revision: USB Power Delivery Specification revision if supported
*
* Represents USB Type-C Cable attached to USB Type-C port.
*/
@@ -169,6 +196,8 @@ struct typec_cable_desc {
enum typec_plug_type type;
unsigned int active:1;
struct usb_pd_identity *identity;
+ u16 pd_revision; /* 0300H = "3.0" */
+
};
/*
@@ -176,15 +205,27 @@ struct typec_cable_desc {
* @usb_pd: USB Power Delivery support
* @accessory: Audio, Debug or none.
* @identity: Discover Identity command data
+ * @pd_revision: USB Power Delivery Specification Revision if supported
+ * @attach: Notification about attached USB device
+ * @deattach: Notification about removed USB device
*
* Details about a partner that is attached to USB Type-C port. If @identity
* member exists when partner is registered, a directory named "identity" is
* created to sysfs for the partner device.
+ *
+ * @pd_revision is based on the setting of the "Specification Revision" field
+ * in the message header on the initial "Source Capabilities" message received
+ * from the partner, or a "Request" message received from the partner, depending
+ * on whether our port is a Sink or a Source.
*/
struct typec_partner_desc {
unsigned int usb_pd:1;
enum typec_accessory accessory;
struct usb_pd_identity *identity;
+ u16 pd_revision; /* 0300H = "3.0" */
+
+ void (*attach)(struct typec_partner *partner, struct device *dev);
+ void (*deattach)(struct typec_partner *partner, struct device *dev);
};
/**
@@ -194,6 +235,8 @@ struct typec_partner_desc {
* @pr_set: Set Power Role
* @vconn_set: Source VCONN
* @port_type_set: Set port type
+ * @pd_get: Get available USB Power Delivery Capabilities.
+ * @pd_set: Set USB Power Delivery Capabilities.
*/
struct typec_operations {
int (*try_role)(struct typec_port *port, int role);
@@ -202,6 +245,14 @@ struct typec_operations {
int (*vconn_set)(struct typec_port *port, enum typec_role role);
int (*port_type_set)(struct typec_port *port,
enum typec_port_type type);
+ struct usb_power_delivery **(*pd_get)(struct typec_port *port);
+ int (*pd_set)(struct typec_port *port, struct usb_power_delivery *pd);
+};
+
+enum usb_pd_svdm_ver {
+ SVDM_VER_1_0 = 0,
+ SVDM_VER_2_0 = 1,
+ SVDM_VER_MAX = SVDM_VER_2_0,
};
/*
@@ -210,10 +261,12 @@ struct typec_operations {
* @data: Supported data role of the port
* @revision: USB Type-C Specification release. Binary coded decimal
* @pd_revision: USB Power Delivery Specification revision if supported
+ * @svdm_version: USB PD Structured VDM version if supported
* @prefer_role: Initial role preference (DRP ports).
* @accessory: Supported Accessory Modes
* @fwnode: Optional fwnode of the port
* @driver_data: Private pointer for driver specific info
+ * @pd: Optional USB Power Delivery Support
* @ops: Port operations vector
*
* Static capabilities of a single USB Type-C port.
@@ -223,6 +276,7 @@ struct typec_capability {
enum typec_port_data data;
u16 revision; /* 0120H = "1.2" */
u16 pd_revision; /* 0300H = "3.0" */
+ enum usb_pd_svdm_ver svdm_version;
int prefer_role;
enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
unsigned int orientation_aware:1;
@@ -230,6 +284,8 @@ struct typec_capability {
struct fwnode_handle *fwnode;
void *driver_data;
+ struct usb_power_delivery *pd;
+
const struct typec_operations *ops;
};
@@ -268,8 +324,59 @@ int typec_set_mode(struct typec_port *port, int mode);
void *typec_get_drvdata(struct typec_port *port);
+int typec_get_fw_cap(struct typec_capability *cap,
+ struct fwnode_handle *fwnode);
+
+int typec_find_pwr_opmode(const char *name);
int typec_find_orientation(const char *name);
int typec_find_port_power_role(const char *name);
int typec_find_power_role(const char *name);
int typec_find_port_data_role(const char *name);
+
+void typec_partner_set_svdm_version(struct typec_partner *partner,
+ enum usb_pd_svdm_ver svdm_version);
+int typec_get_negotiated_svdm_version(struct typec_port *port);
+
+int typec_get_cable_svdm_version(struct typec_port *port);
+void typec_cable_set_svdm_version(struct typec_cable *cable, enum usb_pd_svdm_ver svdm_version);
+
+struct usb_power_delivery *typec_partner_usb_power_delivery_register(struct typec_partner *partner,
+ struct usb_power_delivery_desc *desc);
+
+int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_delivery *pd);
+int typec_partner_set_usb_power_delivery(struct typec_partner *partner,
+ struct usb_power_delivery *pd);
+
+/**
+ * struct typec_connector - Representation of Type-C port for external drivers
+ * @attach: notification about device removal
+ * @deattach: notification about device removal
+ *
+ * Drivers that control the USB and other ports (DisplayPorts, etc.), that are
+ * connected to the Type-C connectors, can use these callbacks to inform the
+ * Type-C connector class about connections and disconnections. That information
+ * can then be used by the typec-port drivers to power on or off parts that are
+ * needed or not needed - as an example, in USB mode if USB2 device is
+ * enumerated, USB3 components (retimers, phys, and what have you) do not need
+ * to be powered on.
+ *
+ * The attached (enumerated) devices will be liked with the typec-partner device.
+ */
+struct typec_connector {
+ void (*attach)(struct typec_connector *con, struct device *dev);
+ void (*deattach)(struct typec_connector *con, struct device *dev);
+};
+
+static inline void typec_attach(struct typec_connector *con, struct device *dev)
+{
+ if (con && con->attach)
+ con->attach(con, dev);
+}
+
+static inline void typec_deattach(struct typec_connector *con, struct device *dev)
+{
+ if (con && con->deattach)
+ con->deattach(con, dev);
+}
+
#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index a4b65eaa0f62..b3c0866ea70f 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -20,6 +20,7 @@ struct typec_altmode_ops;
* @active: Tells has the mode been entered or not
* @desc: Optional human readable description of the mode
* @ops: Operations vector from the driver
+ * @cable_ops: Cable operations vector from the driver.
*/
struct typec_altmode {
struct device dev;
@@ -30,6 +31,7 @@ struct typec_altmode {
char *desc;
const struct typec_altmode_ops *ops;
+ const struct typec_cable_ops *cable_ops;
};
#define to_typec_altmode(d) container_of(d, struct typec_altmode, dev)
@@ -67,7 +69,7 @@ struct typec_altmode_ops {
int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
int typec_altmode_exit(struct typec_altmode *altmode);
-void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
+int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
int typec_altmode_vdm(struct typec_altmode *altmode,
const u32 header, const u32 *vdo, int count);
int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,
@@ -75,6 +77,34 @@ int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,
const struct typec_altmode *
typec_altmode_get_partner(struct typec_altmode *altmode);
+/**
+ * struct typec_cable_ops - Cable alternate mode operations vector
+ * @enter: Operations to be executed with Enter Mode Command
+ * @exit: Operations to be executed with Exit Mode Command
+ * @vdm: Callback for SVID specific commands
+ */
+struct typec_cable_ops {
+ int (*enter)(struct typec_altmode *altmode, enum typec_plug_index sop, u32 *vdo);
+ int (*exit)(struct typec_altmode *altmode, enum typec_plug_index sop);
+ int (*vdm)(struct typec_altmode *altmode, enum typec_plug_index sop,
+ const u32 hdr, const u32 *vdo, int cnt);
+};
+
+int typec_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop, u32 *vdo);
+int typec_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop);
+int typec_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
+ const u32 header, const u32 *vdo, int count);
+
+/**
+ * typec_altmode_get_cable_svdm_version - Get negotiated SVDM version for cable plug
+ * @altmode: Handle to the alternate mode
+ */
+static inline int
+typec_altmode_get_cable_svdm_version(struct typec_altmode *altmode)
+{
+ return typec_get_cable_svdm_version(typec_altmode2port(altmode));
+}
+
/*
* These are the connector states (USB, Safe and Alt Mode) defined in USB Type-C
* Specification. SVID specific connector states are expected to follow and
@@ -124,7 +154,7 @@ struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
/**
* typec_altmode_get_orientation - Get cable plug orientation
- * altmode: Handle to the alternate mode
+ * @altmode: Handle to the alternate mode
*/
static inline enum typec_orientation
typec_altmode_get_orientation(struct typec_altmode *altmode)
@@ -133,6 +163,16 @@ typec_altmode_get_orientation(struct typec_altmode *altmode)
}
/**
+ * typec_altmode_get_svdm_version - Get negotiated SVDM version
+ * @altmode: Handle to the alternate mode
+ */
+static inline int
+typec_altmode_get_svdm_version(struct typec_altmode *altmode)
+{
+ return typec_get_negotiated_svdm_version(typec_altmode2port(altmode));
+}
+
+/**
* struct typec_altmode_driver - USB Type-C alternate mode device driver
* @id_table: Null terminated array of SVIDs
* @probe: Callback for device binding
@@ -152,10 +192,26 @@ struct typec_altmode_driver {
#define to_altmode_driver(d) container_of(d, struct typec_altmode_driver, \
driver)
+/**
+ * typec_altmode_register_driver - registers a USB Type-C alternate mode
+ * device driver
+ * @drv: pointer to struct typec_altmode_driver
+ *
+ * These drivers will be bind to the partner alternate mode devices. They will
+ * handle all SVID specific communication.
+ */
#define typec_altmode_register_driver(drv) \
__typec_altmode_register_driver(drv, THIS_MODULE)
int __typec_altmode_register_driver(struct typec_altmode_driver *drv,
struct module *module);
+/**
+ * typec_altmode_unregister_driver - unregisters a USB Type-C alternate mode
+ * device driver
+ * @drv: pointer to struct typec_altmode_driver
+ *
+ * These drivers will be bind to the partner alternate mode devices. They will
+ * handle all SVID specific communication.
+ */
void typec_altmode_unregister_driver(struct typec_altmode_driver *drv);
#define module_typec_altmode_driver(__typec_altmode_driver) \
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index fc4c7edb2e8a..f2da264d9c14 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -3,6 +3,7 @@
#define __USB_TYPEC_DP_H
#include <linux/usb/typec_altmode.h>
+#include <linux/bitfield.h>
#define USB_TYPEC_DP_SID 0xff01
/* USB IF has not assigned a Standard ID (SID) for VirtualLink,
@@ -67,12 +68,26 @@ enum {
#define DP_CAP_UFP_D 1
#define DP_CAP_DFP_D 2
#define DP_CAP_DFP_D_AND_UFP_D 3
-#define DP_CAP_DP_SIGNALING BIT(2) /* Always set */
-#define DP_CAP_GEN2 BIT(3) /* Reserved after v1.0b */
+#define DP_CAP_DP_SIGNALLING(_cap_) FIELD_GET(GENMASK(5, 2), _cap_)
+#define DP_CAP_SIGNALLING_HBR3 1
+#define DP_CAP_SIGNALLING_UHBR10 2
+#define DP_CAP_SIGNALLING_UHBR20 3
#define DP_CAP_RECEPTACLE BIT(6)
#define DP_CAP_USB BIT(7)
-#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
-#define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16)
+#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) FIELD_GET(GENMASK(15, 8), _cap_)
+#define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) FIELD_GET(GENMASK(23, 16), _cap_)
+/* Get pin assignment taking plug & receptacle into consideration */
+#define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
+#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
+#define DP_CAP_UHBR_13_5_SUPPORT BIT(26)
+#define DP_CAP_CABLE_TYPE(_cap_) FIELD_GET(GENMASK(29, 28), _cap_)
+#define DP_CAP_CABLE_TYPE_PASSIVE 0
+#define DP_CAP_CABLE_TYPE_RE_TIMER 1
+#define DP_CAP_CABLE_TYPE_RE_DRIVER 2
+#define DP_CAP_CABLE_TYPE_OPTICAL 3
+#define DP_CAP_DPAM_VERSION BIT(30)
/* DisplayPort Status Update VDO bits */
#define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
@@ -92,13 +107,24 @@ enum {
#define DP_CONF_CURRENTLY(_conf_) ((_conf_) & 3)
#define DP_CONF_UFP_U_AS_DFP_D BIT(0)
#define DP_CONF_UFP_U_AS_UFP_D BIT(1)
-#define DP_CONF_SIGNALING_DP BIT(2)
-#define DP_CONF_SIGNALING_GEN_2 BIT(3) /* Reserved after v1.0b */
+#define DP_CONF_SIGNALLING_MASK GENMASK(5, 2)
+#define DP_CONF_SIGNALLING_SHIFT 2
+#define DP_CONF_SIGNALLING_HBR3 1
+#define DP_CONF_SIGNALLING_UHBR10 2
+#define DP_CONF_SIGNALLING_UHBR20 3
#define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8
#define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8)
-/* Helper for setting/getting the pin assignement value to the configuration */
+/* Helper for setting/getting the pin assignment value to the configuration */
#define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8)
-#define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8)
+#define DP_CONF_GET_PIN_ASSIGN(_conf_) FIELD_GET(GENMASK(15, 8), _conf_)
+#define DP_CONF_UHBR13_5_SUPPORT BIT(26)
+#define DP_CONF_CABLE_TYPE_MASK GENMASK(29, 28)
+#define DP_CONF_CABLE_TYPE_SHIFT 28
+#define DP_CONF_CABLE_TYPE_PASSIVE 0
+#define DP_CONF_CABLE_TYPE_RE_TIMER 1
+#define DP_CONF_CABLE_TYPE_RE_DRIVER 2
+#define DP_CONF_CABLE_TYPE_OPTICAL 3
+#define DP_CONF_DPAM_VERSION BIT(30)
#endif /* __USB_TYPEC_DP_H */
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index a9d9957933dc..2489a7857d8e 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -8,11 +8,13 @@
struct device;
struct typec_mux;
+struct typec_mux_dev;
struct typec_switch;
+struct typec_switch_dev;
struct typec_altmode;
struct fwnode_handle;
-typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw,
+typedef int (*typec_switch_set_fn_t)(struct typec_switch_dev *sw,
enum typec_orientation orientation);
struct typec_switch_desc {
@@ -32,13 +34,13 @@ static inline struct typec_switch *typec_switch_get(struct device *dev)
return fwnode_typec_switch_get(dev_fwnode(dev));
}
-struct typec_switch *
+struct typec_switch_dev *
typec_switch_register(struct device *parent,
const struct typec_switch_desc *desc);
-void typec_switch_unregister(struct typec_switch *sw);
+void typec_switch_unregister(struct typec_switch_dev *sw);
-void typec_switch_set_drvdata(struct typec_switch *sw, void *data);
-void *typec_switch_get_drvdata(struct typec_switch *sw);
+void typec_switch_set_drvdata(struct typec_switch_dev *sw, void *data);
+void *typec_switch_get_drvdata(struct typec_switch_dev *sw);
struct typec_mux_state {
struct typec_altmode *alt;
@@ -46,7 +48,7 @@ struct typec_mux_state {
void *data;
};
-typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux,
+typedef int (*typec_mux_set_fn_t)(struct typec_mux_dev *mux,
struct typec_mux_state *state);
struct typec_mux_desc {
@@ -56,22 +58,51 @@ struct typec_mux_desc {
void *drvdata;
};
-struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
- const struct typec_altmode_desc *desc);
+#if IS_ENABLED(CONFIG_TYPEC)
+
+struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode);
void typec_mux_put(struct typec_mux *mux);
int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state);
-static inline struct typec_mux *
-typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc)
+struct typec_mux_dev *
+typec_mux_register(struct device *parent, const struct typec_mux_desc *desc);
+void typec_mux_unregister(struct typec_mux_dev *mux);
+
+void typec_mux_set_drvdata(struct typec_mux_dev *mux, void *data);
+void *typec_mux_get_drvdata(struct typec_mux_dev *mux);
+
+#else
+
+static inline struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode)
{
- return fwnode_typec_mux_get(dev_fwnode(dev), desc);
+ return NULL;
}
-struct typec_mux *
-typec_mux_register(struct device *parent, const struct typec_mux_desc *desc);
-void typec_mux_unregister(struct typec_mux *mux);
+static inline void typec_mux_put(struct typec_mux *mux) {}
+
+static inline int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+{
+ return 0;
+}
-void typec_mux_set_drvdata(struct typec_mux *mux, void *data);
-void *typec_mux_get_drvdata(struct typec_mux *mux);
+static inline struct typec_mux_dev *
+typec_mux_register(struct device *parent, const struct typec_mux_desc *desc)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline void typec_mux_unregister(struct typec_mux_dev *mux) {}
+
+static inline void typec_mux_set_drvdata(struct typec_mux_dev *mux, void *data) {}
+static inline void *typec_mux_get_drvdata(struct typec_mux_dev *mux)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+#endif /* CONFIG_TYPEC */
+
+static inline struct typec_mux *typec_mux_get(struct device *dev)
+{
+ return fwnode_typec_mux_get(dev_fwnode(dev));
+}
#endif /* __USB_TYPEC_MUX */
diff --git a/include/linux/usb/typec_retimer.h b/include/linux/usb/typec_retimer.h
new file mode 100644
index 000000000000..5e036b3360e2
--- /dev/null
+++ b/include/linux/usb/typec_retimer.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __USB_TYPEC_RETIMER
+#define __USB_TYPEC_RETIMER
+
+#include <linux/property.h>
+#include <linux/usb/typec.h>
+
+struct device;
+struct typec_retimer;
+struct typec_altmode;
+struct fwnode_handle;
+
+struct typec_retimer_state {
+ struct typec_altmode *alt;
+ unsigned long mode;
+ void *data;
+};
+
+typedef int (*typec_retimer_set_fn_t)(struct typec_retimer *retimer,
+ struct typec_retimer_state *state);
+
+struct typec_retimer_desc {
+ struct fwnode_handle *fwnode;
+ typec_retimer_set_fn_t set;
+ const char *name;
+ void *drvdata;
+};
+
+struct typec_retimer *fwnode_typec_retimer_get(struct fwnode_handle *fwnode);
+void typec_retimer_put(struct typec_retimer *retimer);
+int typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state);
+
+static inline struct typec_retimer *typec_retimer_get(struct device *dev)
+{
+ return fwnode_typec_retimer_get(dev_fwnode(dev));
+}
+
+struct typec_retimer *
+typec_retimer_register(struct device *parent, const struct typec_retimer_desc *desc);
+void typec_retimer_unregister(struct typec_retimer *retimer);
+
+void *typec_retimer_get_drvdata(struct typec_retimer *retimer);
+
+#endif /* __USB_TYPEC_RETIMER */
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
index 47c2d501ddce..fa97d7e00f5c 100644
--- a/include/linux/usb/typec_tbt.h
+++ b/include/linux/usb/typec_tbt.h
@@ -3,6 +3,7 @@
#define __USB_TYPEC_TBT_H
#include <linux/usb/typec_altmode.h>
+#include <linux/bitfield.h>
#define USB_TYPEC_VENDOR_INTEL 0x8087
/* Alias for convenience */
@@ -25,7 +26,7 @@ struct typec_thunderbolt_data {
/* TBT3 Device Discover Mode VDO bits */
#define TBT_MODE BIT(0)
-#define TBT_ADAPTER(_vdo_) (((_vdo_) & BIT(16)) >> 16)
+#define TBT_ADAPTER(_vdo_) FIELD_GET(BIT(16), _vdo_)
#define TBT_ADAPTER_LEGACY 0
#define TBT_ADAPTER_TBT3 1
#define TBT_INTEL_SPECIFIC_B0 BIT(26)
@@ -35,16 +36,21 @@ struct typec_thunderbolt_data {
#define TBT_SET_ADAPTER(a) (((a) & 1) << 16)
/* TBT3 Cable Discover Mode VDO bits */
-#define TBT_CABLE_SPEED(_vdo_) (((_vdo_) & GENMASK(18, 16)) >> 16)
+#define TBT_CABLE_SPEED(_vdo_) FIELD_GET(GENMASK(18, 16), _vdo_)
#define TBT_CABLE_USB3_GEN1 1
#define TBT_CABLE_USB3_PASSIVE 2
#define TBT_CABLE_10_AND_20GBPS 3
-#define TBT_CABLE_ROUNDED BIT(19)
+#define TBT_CABLE_ROUNDED_SUPPORT(_vdo_) FIELD_GET(GENMASK(20, 19), _vdo_)
+
+#define TBT_GEN3_NON_ROUNDED 0
+#define TBT_GEN3_GEN4_ROUNDED_NON_ROUNDED 1
#define TBT_CABLE_OPTICAL BIT(21)
#define TBT_CABLE_RETIMER BIT(22)
#define TBT_CABLE_LINK_TRAINING BIT(23)
+#define TBT_CABLE_ACTIVE_PASSIVE BIT(25)
#define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16)
+#define TBT_SET_CABLE_ROUNDED(_g_) (((_g_) & GENMASK(1, 0)) << 19)
/* TBT3 Device Enter Mode VDO bits */
#define TBT_ENTER_MODE_CABLE_SPEED(s) TBT_SET_CABLE_SPEED(s)
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 36c2982780ad..5050f502c1ed 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -3,10 +3,6 @@
* ulpi.h -- ULPI defines and function prorotypes
*
* Copyright (C) 2010 Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * version 2 of that License.
*/
#ifndef __LINUX_USB_ULPI_H
diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h
index 20020c1336d5..70a7e3cdb3c9 100644
--- a/include/linux/usb/usb338x.h
+++ b/include/linux/usb/usb338x.h
@@ -6,17 +6,6 @@
* Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
* Copyright (C) 2003 David Brownell
* Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_USB_USB338X_H
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 2e4f7721fc4e..9f08a584d707 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -4,25 +4,17 @@
*
* Copyright (C) 2000-2005 by David Brownell <dbrownell@users.sourceforge.net>
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_USBNET_H
#define __LINUX_USB_USBNET_H
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
/* interface from usbnet core to each USB networking link we handle */
struct usbnet {
/* housekeeping */
@@ -53,6 +45,9 @@ struct usbnet {
u32 hard_mtu; /* count any extra framing */
size_t rx_urb_size; /* size for rx urbs */
struct mii_if_info mii;
+ long rx_speed; /* If MII not used */
+ long tx_speed; /* If MII not used */
+# define SPEED_UNSET -1
/* various kinds of pending driver work */
struct sk_buff_head rxq;
@@ -65,8 +60,6 @@ struct usbnet {
struct usb_anchor deferred;
struct tasklet_struct bh;
- struct pcpu_sw_netstats __percpu *stats64;
-
struct work_struct kevent;
unsigned long flags;
# define EVENT_TX_HALT 0
@@ -213,6 +206,7 @@ extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_status(struct usbnet *, struct urb *);
+extern int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */
#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
@@ -267,10 +261,12 @@ extern void usbnet_pause_rx(struct usbnet *);
extern void usbnet_resume_rx(struct usbnet *);
extern void usbnet_purge_paused_rxq(struct usbnet *);
-extern int usbnet_get_link_ksettings(struct net_device *net,
+extern int usbnet_get_link_ksettings_mii(struct net_device *net,
struct ethtool_link_ksettings *cmd);
-extern int usbnet_set_link_ksettings(struct net_device *net,
+extern int usbnet_set_link_ksettings_mii(struct net_device *net,
const struct ethtool_link_ksettings *cmd);
+extern int usbnet_get_link_ksettings_internal(struct net_device *net,
+ struct ethtool_link_ksettings *cmd);
extern u32 usbnet_get_link(struct net_device *net);
extern u32 usbnet_get_msglevel(struct net_device *);
extern void usbnet_set_msglevel(struct net_device *, u32);
@@ -285,7 +281,5 @@ extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
extern void usbnet_status_stop(struct usbnet *dev);
extern void usbnet_update_max_qlen(struct usbnet *dev);
-extern void usbnet_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h
new file mode 100644
index 000000000000..88d96095bcb1
--- /dev/null
+++ b/include/linux/usb/uvc.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * v4l2 uvc internal API header
+ *
+ * Some commonly needed functions for uvc drivers
+ */
+
+#ifndef __LINUX_V4L2_UVC_H
+#define __LINUX_V4L2_UVC_H
+
+/* ------------------------------------------------------------------------
+ * GUIDs
+ */
+#define UVC_GUID_UVC_CAMERA \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
+#define UVC_GUID_UVC_OUTPUT \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}
+#define UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+#define UVC_GUID_UVC_PROCESSING \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01}
+#define UVC_GUID_UVC_SELECTOR \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02}
+#define UVC_GUID_EXT_GPIO_CONTROLLER \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
+
+#define UVC_GUID_FORMAT_MJPEG \
+ { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YUY2 \
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YUY2_ISIGHT \
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_NV12 \
+ { 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YV12 \
+ { 'Y', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_I420 \
+ { 'I', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_UYVY \
+ { 'U', 'Y', 'V', 'Y', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y800 \
+ { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8 \
+ { 'Y', '8', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y10 \
+ { 'Y', '1', '0', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12 \
+ { 'Y', '1', '2', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y16 \
+ { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BY8 \
+ { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BA81 \
+ { 'B', 'A', '8', '1', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GBRG \
+ { 'G', 'B', 'R', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GRBG \
+ { 'G', 'R', 'B', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RGGB \
+ { 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BG16 \
+ { 'B', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GB16 \
+ { 'G', 'B', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RG16 \
+ { 'R', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GR16 \
+ { 'G', 'R', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RGBP \
+ { 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BGR3 \
+ { 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
+#define UVC_GUID_FORMAT_BGR4 \
+ { 0x7e, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
+#define UVC_GUID_FORMAT_M420 \
+ { 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+#define UVC_GUID_FORMAT_H264 \
+ { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_H265 \
+ { 'H', '2', '6', '5', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8I \
+ { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12I \
+ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Z16 \
+ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RW10 \
+ { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_INVZ \
+ { 'I', 'N', 'V', 'Z', 0x90, 0x2d, 0x58, 0x4a, \
+ 0x92, 0x0b, 0x77, 0x3f, 0x1f, 0x2c, 0x55, 0x6b}
+#define UVC_GUID_FORMAT_INZI \
+ { 'I', 'N', 'Z', 'I', 0x66, 0x1a, 0x42, 0xa2, \
+ 0x90, 0x65, 0xd0, 0x18, 0x14, 0xa8, 0xef, 0x8a}
+#define UVC_GUID_FORMAT_INVI \
+ { 'I', 'N', 'V', 'I', 0xdb, 0x57, 0x49, 0x5e, \
+ 0x8e, 0x3f, 0xf4, 0x79, 0x53, 0x2b, 0x94, 0x6f}
+#define UVC_GUID_FORMAT_CNF4 \
+ { 'C', ' ', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+#define UVC_GUID_FORMAT_D3DFMT_L8 \
+ {0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \
+ {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+#define UVC_GUID_FORMAT_HEVC \
+ { 'H', 'E', 'V', 'C', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+struct uvc_format_desc {
+ u8 guid[16];
+ u32 fcc;
+};
+
+const struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16]);
+
+#endif /* __LINUX_V4L2_UVC_H */
diff --git a/include/linux/usb/webusb.h b/include/linux/usb/webusb.h
new file mode 100644
index 000000000000..fe43020b4a48
--- /dev/null
+++ b/include/linux/usb/webusb.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * WebUSB descriptors and constants
+ *
+ * Copyright (C) 2023 Jó Ágila Bitsch <jgilab@gmail.com>
+ */
+
+#ifndef __LINUX_USB_WEBUSB_H
+#define __LINUX_USB_WEBUSB_H
+
+#include "uapi/linux/usb/ch9.h"
+
+/*
+ * Little Endian PlatformCapablityUUID for WebUSB
+ * 3408b638-09a9-47a0-8bfd-a0768815b665
+ * to identify Platform Device Capability descriptors as referring to WebUSB.
+ */
+#define WEBUSB_UUID \
+ GUID_INIT(0x3408b638, 0x09a9, 0x47a0, 0x8b, 0xfd, 0xa0, 0x76, 0x88, 0x15, 0xb6, 0x65)
+
+/*
+ * WebUSB Platform Capability data
+ *
+ * A device announces support for the
+ * WebUSB command set by including the following Platform Descriptor Data in its
+ * Binary Object Store associated with the WebUSB_UUID above.
+ * See: https://wicg.github.io/webusb/#webusb-platform-capability-descriptor
+ */
+struct usb_webusb_cap_data {
+ __le16 bcdVersion;
+#define WEBUSB_VERSION_1_00 cpu_to_le16(0x0100) /* currently only version 1.00 is defined */
+ u8 bVendorCode;
+ u8 iLandingPage;
+#define WEBUSB_LANDING_PAGE_NOT_PRESENT 0
+#define WEBUSB_LANDING_PAGE_PRESENT 1 /* we chose the fixed index 1 for the URL descriptor */
+} __packed;
+
+#define USB_WEBUSB_CAP_DATA_SIZE 4
+
+/*
+ * Get URL Request
+ *
+ * The request to fetch an URL is defined in https://wicg.github.io/webusb/#get-url as:
+ * bmRequestType: (USB_DIR_IN | USB_TYPE_VENDOR) = 11000000B
+ * bRequest: bVendorCode
+ * wValue: iLandingPage
+ * wIndex: GET_URL = 2
+ * wLength: Descriptor Length (typically U8_MAX = 255)
+ * Data: URL Descriptor
+ */
+#define WEBUSB_GET_URL 2
+
+/*
+ * This descriptor contains a single URL and is returned by the Get URL request.
+ *
+ * See: https://wicg.github.io/webusb/#url-descriptor
+ */
+struct webusb_url_descriptor {
+ u8 bLength;
+#define WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH 3
+ u8 bDescriptorType;
+#define WEBUSB_URL_DESCRIPTOR_TYPE 3
+ u8 bScheme;
+#define WEBUSB_URL_SCHEME_HTTP 0
+#define WEBUSB_URL_SCHEME_HTTPS 1
+#define WEBUSB_URL_SCHEME_NONE 255
+ u8 URL[U8_MAX - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH];
+} __packed;
+
+/*
+ * Buffer size to hold the longest URL that can be in an URL descriptor
+ *
+ * The descriptor can be U8_MAX bytes long.
+ * WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH bytes are used for a header.
+ * Since the longest prefix that might be stripped is "https://", we may accommodate an additional
+ * 8 bytes.
+ */
+#define WEBUSB_URL_RAW_MAX_LENGTH (U8_MAX - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + 8)
+
+#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h
index 0a37f1283bf0..171fd74b1cfc 100644
--- a/include/linux/usb/xhci-dbgp.h
+++ b/include/linux/usb/xhci-dbgp.h
@@ -5,17 +5,13 @@
* Copyright (C) 2016 Intel Corporation
*
* Author: Lu Baolu <baolu.lu@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_XHCI_DBGP_H
#define __LINUX_XHCI_DBGP_H
#ifdef CONFIG_EARLY_PRINTK_USB_XDBC
-int __init early_xdbc_parse_parameter(char *s);
+int __init early_xdbc_parse_parameter(char *s, int keep_early);
int __init early_xdbc_setup_hardware(void);
void __init early_xdbc_register_console(void);
#else
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 4a19ac3f24d0..712363c7a2e8 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -84,6 +84,10 @@
/* Cannot handle REPORT_LUNS */ \
US_FLAG(ALWAYS_SYNC, 0x20000000) \
/* lies about caching, so always sync */ \
+ US_FLAG(NO_SAME, 0x40000000) \
+ /* Cannot handle WRITE_SAME */ \
+ US_FLAG(SENSE_AFTER_SYNC, 0x80000000) \
+ /* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/user_events.h b/include/linux/user_events.h
new file mode 100644
index 000000000000..8afa8c3a0973
--- /dev/null
+++ b/include/linux/user_events.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Microsoft Corporation.
+ *
+ * Authors:
+ * Beau Belgrave <beaub@linux.microsoft.com>
+ */
+
+#ifndef _LINUX_USER_EVENTS_H
+#define _LINUX_USER_EVENTS_H
+
+#include <linux/list.h>
+#include <linux/refcount.h>
+#include <linux/mm_types.h>
+#include <linux/workqueue.h>
+#include <uapi/linux/user_events.h>
+
+#ifdef CONFIG_USER_EVENTS
+struct user_event_mm {
+ struct list_head mms_link;
+ struct list_head enablers;
+ struct mm_struct *mm;
+ /* Used for one-shot lists, protected by event_mutex */
+ struct user_event_mm *next;
+ refcount_t refcnt;
+ refcount_t tasks;
+ struct rcu_work put_rwork;
+};
+
+extern void user_event_mm_dup(struct task_struct *t,
+ struct user_event_mm *old_mm);
+
+extern void user_event_mm_remove(struct task_struct *t);
+
+static inline void user_events_fork(struct task_struct *t,
+ unsigned long clone_flags)
+{
+ struct user_event_mm *old_mm;
+
+ if (!t || !current->user_event_mm)
+ return;
+
+ old_mm = current->user_event_mm;
+
+ if (clone_flags & CLONE_VM) {
+ t->user_event_mm = old_mm;
+ refcount_inc(&old_mm->tasks);
+ return;
+ }
+
+ user_event_mm_dup(t, old_mm);
+}
+
+static inline void user_events_execve(struct task_struct *t)
+{
+ if (!t || !t->user_event_mm)
+ return;
+
+ user_event_mm_remove(t);
+}
+
+static inline void user_events_exit(struct task_struct *t)
+{
+ if (!t || !t->user_event_mm)
+ return;
+
+ user_event_mm_remove(t);
+}
+#else
+static inline void user_events_fork(struct task_struct *t,
+ unsigned long clone_flags)
+{
+}
+
+static inline void user_events_execve(struct task_struct *t)
+{
+}
+
+static inline void user_events_exit(struct task_struct *t)
+{
+}
+#endif /* CONFIG_USER_EVENTS */
+
+#endif /* _LINUX_USER_EVENTS_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 6ef1c7109fc4..6030a8235617 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -50,20 +50,38 @@ enum ucount_type {
UCOUNT_INOTIFY_INSTANCES,
UCOUNT_INOTIFY_WATCHES,
#endif
+#ifdef CONFIG_FANOTIFY
+ UCOUNT_FANOTIFY_GROUPS,
+ UCOUNT_FANOTIFY_MARKS,
+#endif
UCOUNT_COUNTS,
};
+enum rlimit_type {
+ UCOUNT_RLIMIT_NPROC,
+ UCOUNT_RLIMIT_MSGQUEUE,
+ UCOUNT_RLIMIT_SIGPENDING,
+ UCOUNT_RLIMIT_MEMLOCK,
+ UCOUNT_RLIMIT_COUNTS,
+};
+
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+struct binfmt_misc;
+#endif
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
struct uid_gid_map projid_map;
- atomic_t count;
struct user_namespace *parent;
int level;
kuid_t owner;
kgid_t group;
struct ns_common ns;
unsigned long flags;
+ /* parent_could_setfcap: true if the creator if this ns had CAP_SETFCAP
+ * in its effective capability set at the child ns creation time. */
+ bool parent_could_setfcap;
#ifdef CONFIG_KEYS
/* List of joinable keyrings in this namespace. Modification access of
@@ -86,30 +104,62 @@ struct user_namespace {
struct ctl_table_header *sysctls;
#endif
struct ucounts *ucounts;
- int ucount_max[UCOUNT_COUNTS];
+ long ucount_max[UCOUNT_COUNTS];
+ long rlimit_max[UCOUNT_RLIMIT_COUNTS];
+
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+ struct binfmt_misc *binfmt_misc;
+#endif
} __randomize_layout;
struct ucounts {
struct hlist_node node;
struct user_namespace *ns;
kuid_t uid;
- int count;
- atomic_t ucount[UCOUNT_COUNTS];
+ atomic_t count;
+ atomic_long_t ucount[UCOUNT_COUNTS];
+ atomic_long_t rlimit[UCOUNT_RLIMIT_COUNTS];
};
extern struct user_namespace init_user_ns;
+extern struct ucounts init_ucounts;
bool setup_userns_sysctls(struct user_namespace *ns);
void retire_userns_sysctls(struct user_namespace *ns);
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
+struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
+struct ucounts * __must_check get_ucounts(struct ucounts *ucounts);
+void put_ucounts(struct ucounts *ucounts);
+
+static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type type)
+{
+ return atomic_long_read(&ucounts->rlimit[type]);
+}
+
+long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
+bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long max);
+
+static inline long get_userns_rlimit_max(struct user_namespace *ns, enum rlimit_type type)
+{
+ return READ_ONCE(ns->rlimit_max[type]);
+}
+
+static inline void set_userns_rlimit_max(struct user_namespace *ns,
+ enum rlimit_type type, unsigned long max)
+{
+ ns->rlimit_max[type] = max <= LONG_MAX ? max : LONG_MAX;
+}
#ifdef CONFIG_USER_NS
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
return ns;
}
@@ -119,7 +169,7 @@ extern void __put_user_ns(struct user_namespace *ns);
static inline void put_user_ns(struct user_namespace *ns)
{
- if (ns && atomic_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->ns.count))
__put_user_ns(ns);
}
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index a8e5f3ea9bb2..05d59f74fc88 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -15,7 +15,13 @@
#include <linux/fcntl.h>
#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include <asm-generic/pgtable_uffd.h>
+#include <linux/hugetlb_inline.h>
+
+/* The set of all possible UFFD-related VM flags. */
+#define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -30,20 +36,114 @@
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
-extern int sysctl_unprivileged_userfaultfd;
+/*
+ * Start with fault_pending_wqh and fault_wqh so they're more likely
+ * to be in the same cacheline.
+ *
+ * Locking order:
+ * fd_wqh.lock
+ * fault_pending_wqh.lock
+ * fault_wqh.lock
+ * event_wqh.lock
+ *
+ * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
+ * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
+ * also taken in IRQ context.
+ */
+struct userfaultfd_ctx {
+ /* waitqueue head for the pending (i.e. not read) userfaults */
+ wait_queue_head_t fault_pending_wqh;
+ /* waitqueue head for the userfaults */
+ wait_queue_head_t fault_wqh;
+ /* waitqueue head for the pseudo fd to wakeup poll/read */
+ wait_queue_head_t fd_wqh;
+ /* waitqueue head for events */
+ wait_queue_head_t event_wqh;
+ /* a refile sequence protected by fault_pending_wqh lock */
+ seqcount_spinlock_t refile_seq;
+ /* pseudo fd refcounting */
+ refcount_t refcount;
+ /* userfaultfd syscall flags */
+ unsigned int flags;
+ /* features requested from the userspace */
+ unsigned int features;
+ /* released */
+ bool released;
+ /*
+ * Prevents userfaultfd operations (fill/move/wp) from happening while
+ * some non-cooperative event(s) is taking place. Increments are done
+ * in write-mode. Whereas, userfaultfd operations, which includes
+ * reading mmap_changing, is done under read-mode.
+ */
+ struct rw_semaphore map_changing_lock;
+ /* memory mappings are changing because of non-cooperative event */
+ atomic_t mmap_changing;
+ /* mm with one ore more vmas attached to this userfaultfd_ctx */
+ struct mm_struct *mm;
+};
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
-extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
- unsigned long src_start, unsigned long len,
- bool *mmap_changing, __u64 mode);
-extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
- unsigned long dst_start,
- unsigned long len,
- bool *mmap_changing);
-extern int mwriteprotect_range(struct mm_struct *dst_mm,
- unsigned long start, unsigned long len,
- bool enable_wp, bool *mmap_changing);
+/* A combined operation mode + behavior flags. */
+typedef unsigned int __bitwise uffd_flags_t;
+
+/* Mutually exclusive modes of operation. */
+enum mfill_atomic_mode {
+ MFILL_ATOMIC_COPY,
+ MFILL_ATOMIC_ZEROPAGE,
+ MFILL_ATOMIC_CONTINUE,
+ MFILL_ATOMIC_POISON,
+ NR_MFILL_ATOMIC_MODES,
+};
+
+#define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
+#define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr))
+#define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr))
+#define MFILL_ATOMIC_MODE_MASK ((__force uffd_flags_t) (MFILL_ATOMIC_BIT(0) - 1))
+
+static inline bool uffd_flags_mode_is(uffd_flags_t flags, enum mfill_atomic_mode expected)
+{
+ return (flags & MFILL_ATOMIC_MODE_MASK) == ((__force uffd_flags_t) expected);
+}
+
+static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_atomic_mode mode)
+{
+ flags &= ~MFILL_ATOMIC_MODE_MASK;
+ return flags | ((__force uffd_flags_t) mode);
+}
+
+/* Flags controlling behavior. These behavior changes are mode-independent. */
+#define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0)
+
+extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, struct page *page,
+ bool newly_allocated, uffd_flags_t flags);
+
+extern ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
+ unsigned long src_start, unsigned long len,
+ uffd_flags_t flags);
+extern ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
+ unsigned long dst_start,
+ unsigned long len);
+extern ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long dst_start,
+ unsigned long len, uffd_flags_t flags);
+extern ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
+ unsigned long len, uffd_flags_t flags);
+extern int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
+ unsigned long len, bool enable_wp);
+extern long uffd_wp_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long len, bool enable_wp);
+
+/* move_pages */
+void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
+void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
+ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
+ unsigned long src_start, unsigned long len, __u64 flags);
+int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr);
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
@@ -52,6 +152,34 @@ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
}
+/*
+ * Never enable huge pmd sharing on some uffd registered vmas:
+ *
+ * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry.
+ *
+ * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for
+ * VMAs which share huge pmds. (If you have two mappings to the same
+ * underlying pages, and fault in the non-UFFD-registered one with a write,
+ * with huge pmd sharing this would *also* setup the second UFFD-registered
+ * mapping, and we'd not get minor faults.)
+ */
+static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
+}
+
+/*
+ * Don't do fault around for either WP or MINOR registered uffd range. For
+ * MINOR registered range, fault around will be a total disaster and ptes can
+ * be installed without notifications; for WP it should mostly be fine as long
+ * as the fault around checks for pte_none() before the installation, however
+ * to be super safe we just forbid it.
+ */
+static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
+}
+
static inline bool userfaultfd_missing(struct vm_area_struct *vma)
{
return vma->vm_flags & VM_UFFD_MISSING;
@@ -62,6 +190,11 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma)
return vma->vm_flags & VM_UFFD_WP;
}
+static inline bool userfaultfd_minor(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_UFFD_MINOR;
+}
+
static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
pte_t pte)
{
@@ -76,7 +209,39 @@ static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
static inline bool userfaultfd_armed(struct vm_area_struct *vma)
{
- return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
+ return vma->vm_flags & __VM_UFFD_FLAGS;
+}
+
+static inline bool vma_can_userfault(struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ bool wp_async)
+{
+ vm_flags &= __VM_UFFD_FLAGS;
+
+ if ((vm_flags & VM_UFFD_MINOR) &&
+ (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
+ return false;
+
+ /*
+ * If wp async enabled, and WP is the only mode enabled, allow any
+ * memory type.
+ */
+ if (wp_async && (vm_flags == VM_UFFD_WP))
+ return true;
+
+#ifndef CONFIG_PTE_MARKER_UFFD_WP
+ /*
+ * If user requested uffd-wp but not enabled pte markers for
+ * uffd-wp, then shmem & hugetlbfs are not supported but only
+ * anonymous.
+ */
+ if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
+ return false;
+#endif
+
+ /* By default, allow any of anon|shmem|hugetlb */
+ return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
+ vma_is_shmem(vma);
}
extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
@@ -93,10 +258,11 @@ extern bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long end);
extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf);
+ unsigned long start, unsigned long end, struct list_head *uf);
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
struct list_head *uf);
+extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
+extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
#else /* CONFIG_USERFAULTFD */
@@ -107,6 +273,13 @@ static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
return VM_FAULT_SIGBUS;
}
+static inline long uffd_wp_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long len,
+ bool enable_wp)
+{
+ return false;
+}
+
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
{
@@ -123,6 +296,11 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma)
return false;
}
+static inline bool userfaultfd_minor(struct vm_area_struct *vma)
+{
+ return false;
+}
+
static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
pte_t pte)
{
@@ -182,6 +360,83 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
{
}
+static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
+{
+ return false;
+}
+
#endif /* CONFIG_USERFAULTFD */
+static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
+{
+ /* Only wr-protect mode uses pte markers */
+ if (!userfaultfd_wp(vma))
+ return false;
+
+ /* File-based uffd-wp always need markers */
+ if (!vma_is_anonymous(vma))
+ return true;
+
+ /*
+ * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED
+ * enabled (to apply markers on zero pages).
+ */
+ return userfaultfd_wp_unpopulated(vma);
+}
+
+static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
+{
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
+ return is_pte_marker_entry(entry) &&
+ (pte_marker_get(entry) & PTE_MARKER_UFFD_WP);
+#else
+ return false;
+#endif
+}
+
+static inline bool pte_marker_uffd_wp(pte_t pte)
+{
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
+ swp_entry_t entry;
+
+ if (!is_swap_pte(pte))
+ return false;
+
+ entry = pte_to_swp_entry(pte);
+
+ return pte_marker_entry_uffd_wp(entry);
+#else
+ return false;
+#endif
+}
+
+/*
+ * Returns true if this is a swap pte and was uffd-wp wr-protected in either
+ * forms (pte marker or a normal swap pte), false otherwise.
+ */
+static inline bool pte_swp_uffd_wp_any(pte_t pte)
+{
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
+ if (!is_swap_pte(pte))
+ return false;
+
+ if (pte_swp_uffd_wp(pte))
+ return true;
+
+ if (pte_marker_uffd_wp(pte))
+ return true;
+#endif
+ return false;
+}
+
#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h
index 073a9e0ec07d..ad970416260d 100644
--- a/include/linux/usermode_driver.h
+++ b/include/linux/usermode_driver.h
@@ -14,5 +14,6 @@ struct umd_info {
int umd_load_blob(struct umd_info *info, const void *data, size_t len);
int umd_unload_blob(struct umd_info *info);
int fork_usermode_driver(struct umd_info *info);
+void umd_cleanup_helper(struct umd_info *info);
#endif /* __LINUX_USERMODE_DRIVER_H__ */
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
index 72299f261b25..6bb460c3e818 100644
--- a/include/linux/util_macros.h
+++ b/include/linux/util_macros.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_HELPER_MACROS_H_
#define _LINUX_HELPER_MACROS_H_
+#include <linux/math.h>
+
#define __find_closest(x, a, as, op) \
({ \
typeof(as) __fc_i, __fc_as = (as) - 1; \
@@ -38,4 +40,16 @@
*/
#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
+/**
+ * is_insidevar - check if the @ptr points inside the @var memory range.
+ * @ptr: the pointer to a memory address.
+ * @var: the variable which address and size identify the memory range.
+ *
+ * Evaluates to true if the address in @ptr lies within the memory
+ * range allocated to @var.
+ */
+#define is_insidevar(ptr, var) \
+ ((uintptr_t)(ptr) >= (uintptr_t)(var) && \
+ (uintptr_t)(ptr) < (uintptr_t)(var) + sizeof(var))
+
#endif
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 44429d9142ca..bf7613ba412b 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -4,13 +4,13 @@
#include <linux/sched.h>
-#include <linux/kref.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
#include <uapi/linux/utsname.h>
enum uts_proc {
+ UTS_PROC_ARCH,
UTS_PROC_OSTYPE,
UTS_PROC_OSRELEASE,
UTS_PROC_VERSION,
@@ -22,7 +22,6 @@ struct user_namespace;
extern struct user_namespace init_user_ns;
struct uts_namespace {
- struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -33,16 +32,17 @@ extern struct uts_namespace init_uts_ns;
#ifdef CONFIG_UTS_NS
static inline void get_uts_ns(struct uts_namespace *ns)
{
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
}
extern struct uts_namespace *copy_utsname(unsigned long flags,
struct user_namespace *user_ns, struct uts_namespace *old_ns);
-extern void free_uts_ns(struct kref *kref);
+extern void free_uts_ns(struct uts_namespace *ns);
static inline void put_uts_ns(struct uts_namespace *ns)
{
- kref_put(&ns->kref, free_uts_ns);
+ if (refcount_dec_and_test(&ns->ns.count))
+ free_uts_ns(ns);
}
void uts_ns_init(void);
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 8cdc0d3567cd..43d4a79b273d 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -8,15 +8,25 @@
#ifndef _LINUX_UUID_H_
#define _LINUX_UUID_H_
-#include <uapi/linux/uuid.h>
#include <linux/string.h>
#define UUID_SIZE 16
typedef struct {
__u8 b[UUID_SIZE];
+} guid_t;
+
+typedef struct {
+ __u8 b[UUID_SIZE];
} uuid_t;
+#define GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+((guid_t) \
+{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
((uuid_t) \
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
@@ -97,10 +107,4 @@ extern const u8 uuid_index[16];
int guid_parse(const char *uuid, guid_t *u);
int uuid_parse(const char *uuid, uuid_t *u);
-/* backwards compatibility, don't use in new code */
-static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
-{
- return memcmp(&u1, &u2, sizeof(guid_t));
-}
-
#endif
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index eae0bfd87d91..7977ca03ac7a 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -6,19 +6,28 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/vhost_iotlb.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_blk.h>
+#include <linux/if_ether.h>
/**
- * vDPA callback definition.
+ * struct vdpa_callback - vDPA callback definition.
* @callback: interrupt callback function
* @private: the data passed to the callback function
+ * @trigger: the eventfd for the callback (Optional).
+ * When it is set, the vDPA driver must guarantee that
+ * signaling it is functional equivalent to triggering
+ * the callback. Then vDPA parent can signal it directly
+ * instead of triggering the callback.
*/
struct vdpa_callback {
irqreturn_t (*callback)(void *data);
void *private;
+ struct eventfd_ctx *trigger;
};
/**
- * vDPA notification area
+ * struct vdpa_notification_area - vDPA notification area
* @addr: base address of the notification area
* @size: size of the notification area
*/
@@ -28,32 +37,101 @@ struct vdpa_notification_area {
};
/**
- * vDPA vq_state definition
+ * struct vdpa_vq_state_split - vDPA split virtqueue state
* @avail_index: available index
*/
-struct vdpa_vq_state {
+struct vdpa_vq_state_split {
u16 avail_index;
};
/**
- * vDPA device - representation of a vDPA device
+ * struct vdpa_vq_state_packed - vDPA packed virtqueue state
+ * @last_avail_counter: last driver ring wrap counter observed by device
+ * @last_avail_idx: device available index
+ * @last_used_counter: device ring wrap counter
+ * @last_used_idx: used index
+ */
+struct vdpa_vq_state_packed {
+ u16 last_avail_counter:1;
+ u16 last_avail_idx:15;
+ u16 last_used_counter:1;
+ u16 last_used_idx:15;
+};
+
+struct vdpa_vq_state {
+ union {
+ struct vdpa_vq_state_split split;
+ struct vdpa_vq_state_packed packed;
+ };
+};
+
+struct vdpa_mgmt_dev;
+
+/**
+ * struct vdpa_device - representation of a vDPA device
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
* @config: the configuration ops for this device.
+ * @cf_lock: Protects get and set access to configuration layout.
* @index: device index
* @features_valid: were features initialized? for legacy guests
+ * @ngroups: the number of virtqueue groups
+ * @nas: the number of address spaces
+ * @use_va: indicate whether virtual address must be used by this device
+ * @nvqs: maximum number of supported virtqueues
+ * @mdev: management device pointer; caller must setup when registering device as part
+ * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
*/
struct vdpa_device {
struct device dev;
struct device *dma_dev;
+ const char *driver_override;
const struct vdpa_config_ops *config;
+ struct rw_semaphore cf_lock; /* Protects get/set config */
unsigned int index;
bool features_valid;
- int nvqs;
+ bool use_va;
+ u32 nvqs;
+ struct vdpa_mgmt_dev *mdev;
+ unsigned int ngroups;
+ unsigned int nas;
+};
+
+/**
+ * struct vdpa_iova_range - the IOVA range support by the device
+ * @first: start of the IOVA range
+ * @last: end of the IOVA range
+ */
+struct vdpa_iova_range {
+ u64 first;
+ u64 last;
+};
+
+struct vdpa_dev_set_config {
+ u64 device_features;
+ struct {
+ u8 mac[ETH_ALEN];
+ u16 mtu;
+ u16 max_vq_pairs;
+ } net;
+ u64 mask;
+};
+
+/**
+ * struct vdpa_map_file - file area for device memory mapping
+ * @file: vma->vm_file for the mapping
+ * @offset: mapping offset in the vm_file
+ */
+struct vdpa_map_file {
+ struct file *file;
+ u64 offset;
};
/**
- * vDPA_config_ops - operations for configuring a vDPA device.
+ * struct vdpa_config_ops - operations for configuring a vDPA device.
* Note: vDPA device drivers are required to implement all of the
* operations unless it is mentioned to be optional in the following
* list.
@@ -72,6 +150,14 @@ struct vdpa_device {
* @kick_vq: Kick the virtqueue
* @vdev: vdpa device
* @idx: virtqueue index
+ * @kick_vq_with_data: Kick the virtqueue and supply extra data
+ * (only if VIRTIO_F_NOTIFICATION_DATA is negotiated)
+ * @vdev: vdpa device
+ * @data for split virtqueue:
+ * 16 bits vqn and 16 bits next available index.
+ * @data for packed virtqueue:
+ * 16 bits vqn, 15 least significant bits of
+ * next available index and 1 bit next_wrap.
* @set_vq_cb: Set the interrupt callback function for
* a virtqueue
* @vdev: vdpa device
@@ -94,34 +180,68 @@ struct vdpa_device {
* @vdev: vdpa device
* @idx: virtqueue index
* @state: pointer to returned state (last_avail_idx)
- * @get_vq_notification: Get the notification area for a virtqueue
+ * @get_vendor_vq_stats: Get the vendor statistics of a device.
* @vdev: vdpa device
* @idx: virtqueue index
- * Returns the notifcation area
+ * @msg: socket buffer holding stats message
+ * @extack: extack for reporting error messages
+ * Returns integer: success (0) or error (< 0)
+ * @get_vq_notification: Get the notification area for a virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns the notification area
* @get_vq_irq: Get the irq number of a virtqueue (optional,
* but must implemented if require vq irq offloading)
* @vdev: vdpa device
* @idx: virtqueue index
* Returns int: irq number of a virtqueue,
* negative number if no irq assigned.
+ * @get_vq_size: Get the size of a specific virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Return u16: the size of the virtqueue
* @get_vq_align: Get the virtqueue align requirement
* for the device
* @vdev: vdpa device
* Returns virtqueue algin requirement
- * @get_features: Get virtio features supported by the device
+ * @get_vq_group: Get the group id for a specific
+ * virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns u32: group id for this virtqueue
+ * @get_vq_desc_group: Get the group id for the descriptor table of
+ * a specific virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns u32: group id for the descriptor table
+ * portion of this virtqueue. Could be different
+ * than the one from @get_vq_group, in which case
+ * the access to the descriptor table can be
+ * confined to a separate asid, isolating from
+ * the virtqueue's buffer address access.
+ * @get_device_features: Get virtio features supported by the device
* @vdev: vdpa device
* Returns the virtio features support by the
* device
- * @set_features: Set virtio features supported by the driver
+ * @get_backend_features: Get parent-specific backend features (optional)
+ * Returns the vdpa features supported by the
+ * device.
+ * @set_driver_features: Set virtio features supported by the driver
* @vdev: vdpa device
* @features: feature support by the driver
* Returns integer: success (0) or error (< 0)
+ * @get_driver_features: Get the virtio driver features in action
+ * @vdev: vdpa device
+ * Returns the virtio features accepted
* @set_config_cb: Set the config interrupt callback
* @vdev: vdpa device
* @cb: virtio-vdev interrupt callback structure
* @get_vq_num_max: Get the max size of virtqueue
* @vdev: vdpa device
* Returns u16: max size of virtqueue
+ * @get_vq_num_min: Get the min size of virtqueue (optional)
+ * @vdev: vdpa device
+ * Returns u16: min size of virtqueue
* @get_device_id: Get virtio device id
* @vdev: vdpa device
* Returns u32: virtio device id
@@ -134,6 +254,30 @@ struct vdpa_device {
* @set_status: Set the device status
* @vdev: vdpa device
* @status: virtio device status
+ * @reset: Reset device
+ * @vdev: vdpa device
+ * Returns integer: success (0) or error (< 0)
+ * @compat_reset: Reset device with compatibility quirks to
+ * accommodate older userspace. Only needed by
+ * parent driver which used to have bogus reset
+ * behaviour, and has to maintain such behaviour
+ * for compatibility with older userspace.
+ * Historically compliant driver only has to
+ * implement .reset, Historically non-compliant
+ * driver should implement both.
+ * @vdev: vdpa device
+ * @flags: compatibility quirks for reset
+ * Returns integer: success (0) or error (< 0)
+ * @suspend: Suspend the device (optional)
+ * @vdev: vdpa device
+ * Returns integer: success (0) or error (< 0)
+ * @resume: Resume the device (optional)
+ * @vdev: vdpa device
+ * Returns integer: success (0) or error (< 0)
+ * @get_config_size: Get the size of the configuration space includes
+ * fields that are conditional on feature bits.
+ * @vdev: vdpa device
+ * Returns size_t: configuration size
* @get_config: Read from device specific configuration space
* @vdev: vdpa device
* @offset: offset from the beginning of
@@ -151,10 +295,30 @@ struct vdpa_device {
* @get_generation: Get device config generation (optional)
* @vdev: vdpa device
* Returns u32: device generation
+ * @get_iova_range: Get supported iova range (optional)
+ * @vdev: vdpa device
+ * Returns the iova range supported by
+ * the device.
+ * @set_vq_affinity: Set the affinity of virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * @cpu_mask: the affinity mask
+ * Returns integer: success (0) or error (< 0)
+ * @get_vq_affinity: Get the affinity of virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns the affinity mask
+ * @set_group_asid: Set address space identifier for a
+ * virtqueue group (optional)
+ * @vdev: vdpa device
+ * @group: virtqueue group
+ * @asid: address space id for this group
+ * Returns integer: success (0) or error (< 0)
* @set_map: Set device memory mapping (optional)
* Needed for device that using device
* specific DMA translation (on-chip IOMMU)
* @vdev: vdpa device
+ * @asid: address space identifier
* @iotlb: vhost memory mapping to be
* used by the vDPA
* Returns integer: success (0) or error (< 0)
@@ -163,6 +327,7 @@ struct vdpa_device {
* specific DMA translation (on-chip IOMMU)
* and preferring incremental map.
* @vdev: vdpa device
+ * @asid: address space identifier
* @iova: iova to be mapped
* @size: size of the area
* @pa: physical address for the map
@@ -174,9 +339,32 @@ struct vdpa_device {
* specific DMA translation (on-chip IOMMU)
* and preferring incremental unmap.
* @vdev: vdpa device
+ * @asid: address space identifier
* @iova: iova to be unmapped
* @size: size of the area
* Returns integer: success (0) or error (< 0)
+ * @reset_map: Reset device memory mapping to the default
+ * state (optional)
+ * Needed for devices that are using device
+ * specific DMA translation and prefer mapping
+ * to be decoupled from the virtio life cycle,
+ * i.e. device .reset op does not reset mapping
+ * @vdev: vdpa device
+ * @asid: address space identifier
+ * Returns integer: success (0) or error (< 0)
+ * @get_vq_dma_dev: Get the dma device for a specific
+ * virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns pointer to structure device or error (NULL)
+ * @bind_mm: Bind the device to a specific address space
+ * so the vDPA framework can use VA when this
+ * callback is implemented. (optional)
+ * @vdev: vdpa device
+ * @mm: address space to bind
+ * @unbind_mm: Unbind the device from the address space
+ * bound using the bind_mm callback. (optional)
+ * @vdev: vdpa device
* @free: Free resources that belongs to vDPA (optional)
* @vdev: vdpa device
*/
@@ -187,6 +375,7 @@ struct vdpa_config_ops {
u64 device_area);
void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
+ void (*kick_vq_with_data)(struct vdpa_device *vdev, u32 data);
void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
struct vdpa_callback *cb);
void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
@@ -195,33 +384,61 @@ struct vdpa_config_ops {
const struct vdpa_vq_state *state);
int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
struct vdpa_vq_state *state);
+ int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack);
struct vdpa_notification_area
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* vq irq is not expected to be changed once DRIVER_OK is set */
- int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx);
+ int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx);
+ u16 (*get_vq_size)(struct vdpa_device *vdev, u16 idx);
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
- u64 (*get_features)(struct vdpa_device *vdev);
- int (*set_features)(struct vdpa_device *vdev, u64 features);
+ u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
+ u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx);
+ u64 (*get_device_features)(struct vdpa_device *vdev);
+ u64 (*get_backend_features)(const struct vdpa_device *vdev);
+ int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
+ u64 (*get_driver_features)(struct vdpa_device *vdev);
void (*set_config_cb)(struct vdpa_device *vdev,
struct vdpa_callback *cb);
u16 (*get_vq_num_max)(struct vdpa_device *vdev);
+ u16 (*get_vq_num_min)(struct vdpa_device *vdev);
u32 (*get_device_id)(struct vdpa_device *vdev);
u32 (*get_vendor_id)(struct vdpa_device *vdev);
u8 (*get_status)(struct vdpa_device *vdev);
void (*set_status)(struct vdpa_device *vdev, u8 status);
+ int (*reset)(struct vdpa_device *vdev);
+ int (*compat_reset)(struct vdpa_device *vdev, u32 flags);
+#define VDPA_RESET_F_CLEAN_MAP 1
+ int (*suspend)(struct vdpa_device *vdev);
+ int (*resume)(struct vdpa_device *vdev);
+ size_t (*get_config_size)(struct vdpa_device *vdev);
void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len);
void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
const void *buf, unsigned int len);
u32 (*get_generation)(struct vdpa_device *vdev);
+ struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
+ int (*set_vq_affinity)(struct vdpa_device *vdev, u16 idx,
+ const struct cpumask *cpu_mask);
+ const struct cpumask *(*get_vq_affinity)(struct vdpa_device *vdev,
+ u16 idx);
/* DMA ops */
- int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb);
- int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size,
- u64 pa, u32 perm);
- int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size);
+ int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
+ struct vhost_iotlb *iotlb);
+ int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
+ u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
+ int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
+ u64 iova, u64 size);
+ int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
+ int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
+ unsigned int asid);
+ struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
+ int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm);
+ void (*unbind_mm)(struct vdpa_device *vdev);
/* Free device resources */
void (*free)(struct vdpa_device *vdev);
@@ -229,22 +446,41 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- int nvqs,
- size_t size);
+ unsigned int ngroups, unsigned int nas,
+ size_t size, const char *name,
+ bool use_va);
-#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \
- container_of(__vdpa_alloc_device( \
- parent, config, nvqs, \
- sizeof(dev_struct) + \
+/**
+ * vdpa_alloc_device - allocate and initilaize a vDPA device
+ *
+ * @dev_struct: the type of the parent structure
+ * @member: the name of struct vdpa_device within the @dev_struct
+ * @parent: the parent device
+ * @config: the bus operations that is supported by this device
+ * @ngroups: the number of virtqueue groups supported by this device
+ * @nas: the number of address spaces
+ * @name: name of the vdpa device
+ * @use_va: indicate whether virtual address must be used by this device
+ *
+ * Return allocated data structure or ERR_PTR upon error
+ */
+#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
+ name, use_va) \
+ container_of((__vdpa_alloc_device( \
+ parent, config, ngroups, nas, \
+ (sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
- dev_struct, member))), \
+ dev_struct, member))), name, use_va)), \
dev_struct, member)
-int vdpa_register_device(struct vdpa_device *vdev);
+int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
void vdpa_unregister_device(struct vdpa_device *vdev);
+int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
+void _vdpa_unregister_device(struct vdpa_device *vdev);
+
/**
- * vdpa_driver - operations for a vDPA driver
+ * struct vdpa_driver - operations for a vDPA driver
* @driver: underlying device driver
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @remove: the function to call when a device is removed.
@@ -289,35 +525,92 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
return vdev->dma_dev;
}
-static inline void vdpa_reset(struct vdpa_device *vdev)
+static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
{
- const struct vdpa_config_ops *ops = vdev->config;
+ const struct vdpa_config_ops *ops = vdev->config;
+ int ret;
+ down_write(&vdev->cf_lock);
vdev->features_valid = false;
- ops->set_status(vdev, 0);
+ if (ops->compat_reset && flags)
+ ret = ops->compat_reset(vdev, flags);
+ else
+ ret = ops->reset(vdev);
+ up_write(&vdev->cf_lock);
+ return ret;
}
-static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
+static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
{
- const struct vdpa_config_ops *ops = vdev->config;
+ const struct vdpa_config_ops *ops = vdev->config;
+ int ret;
vdev->features_valid = true;
- return ops->set_features(vdev, features);
-}
+ ret = ops->set_driver_features(vdev, features);
+ return ret;
+}
-static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset,
- void *buf, unsigned int len)
+static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
{
- const struct vdpa_config_ops *ops = vdev->config;
-
- /*
- * Config accesses aren't supposed to trigger before features are set.
- * If it does happen we assume a legacy guest.
- */
- if (!vdev->features_valid)
- vdpa_set_features(vdev, 0);
- ops->get_config(vdev, offset, buf, len);
+ int ret;
+
+ down_write(&vdev->cf_lock);
+ ret = vdpa_set_features_unlocked(vdev, features);
+ up_write(&vdev->cf_lock);
+
+ return ret;
}
+void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
+ void *buf, unsigned int len);
+void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
+ const void *buf, unsigned int length);
+void vdpa_set_status(struct vdpa_device *vdev, u8 status);
+
+/**
+ * struct vdpa_mgmtdev_ops - vdpa device ops
+ * @dev_add: Add a vdpa device using alloc and register
+ * @mdev: parent device to use for device addition
+ * @name: name of the new vdpa device
+ * @config: config attributes to apply to the device under creation
+ * Driver need to add a new device using _vdpa_register_device()
+ * after fully initializing the vdpa device. Driver must return 0
+ * on success or appropriate error code.
+ * @dev_del: Remove a vdpa device using unregister
+ * @mdev: parent device to use for device removal
+ * @dev: vdpa device to remove
+ * Driver need to remove the specified device by calling
+ * _vdpa_unregister_device().
+ */
+struct vdpa_mgmtdev_ops {
+ int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
+ const struct vdpa_dev_set_config *config);
+ void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
+};
+
+/**
+ * struct vdpa_mgmt_dev - vdpa management device
+ * @device: Management parent device
+ * @ops: operations supported by management device
+ * @id_table: Pointer to device id table of supported ids
+ * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
+ * management device support during dev_add callback
+ * @list: list entry
+ * @supported_features: features supported by device
+ * @max_supported_vqs: maximum number of virtqueues supported by device
+ */
+struct vdpa_mgmt_dev {
+ struct device *device;
+ const struct vdpa_mgmtdev_ops *ops;
+ struct virtio_device_id *id_table;
+ u64 config_attr_mask;
+ struct list_head list;
+ u64 supported_features;
+ u32 max_supported_vqs;
+};
+
+int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
+void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
+
#endif /* _LINUX_VDPA_H */
diff --git a/include/linux/verification.h b/include/linux/verification.h
index 911ab7c2b1ab..cb2d47f28091 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -8,6 +8,9 @@
#ifndef _LINUX_VERIFICATION_H
#define _LINUX_VERIFICATION_H
+#include <linux/errno.h>
+#include <linux/types.h>
+
/*
* Indicate that both builtin trusted keys and secondary trusted keys
* should be used.
@@ -15,6 +18,14 @@
#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
#define VERIFY_USE_PLATFORM_KEYRING ((struct key *)2UL)
+static inline int system_keyring_id_check(u64 id)
+{
+ if (id > (unsigned long)VERIFY_USE_PLATFORM_KEYRING)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* The use to which an asymmetric key is being put.
*/
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 1eaaa93c37bf..a54046bf37e5 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -15,7 +15,7 @@
#else
#define MODULE_VERMAGIC_SMP ""
#endif
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_BUILD
#define MODULE_VERMAGIC_PREEMPT "preempt "
#elif defined(CONFIG_PREEMPT_RT)
#define MODULE_VERMAGIC_PREEMPT "preempt_rt "
@@ -32,11 +32,11 @@
#else
#define MODULE_VERMAGIC_MODVERSIONS ""
#endif
-#ifdef RANDSTRUCT_PLUGIN
-#include <generated/randomize_layout_hash.h>
-#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
+#ifdef RANDSTRUCT
+#include <generated/randstruct_hash.h>
+#define MODULE_RANDSTRUCT "RANDSTRUCT_" RANDSTRUCT_HASHED_SEED
#else
-#define MODULE_RANDSTRUCT_PLUGIN
+#define MODULE_RANDSTRUCT
#endif
#define VERMAGIC_STRING \
@@ -44,6 +44,6 @@
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
MODULE_ARCH_VERMAGIC \
- MODULE_RANDSTRUCT_PLUGIN
+ MODULE_RANDSTRUCT
#endif /* _LINUX_VERMAGIC_H */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 38d3c6a8dc7e..8b1a29820409 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -13,13 +13,85 @@
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/poll.h>
+#include <linux/cdev.h>
#include <uapi/linux/vfio.h>
+#include <linux/iova_bitmap.h>
+
+struct kvm;
+struct iommufd_ctx;
+struct iommufd_device;
+struct iommufd_access;
+
+/*
+ * VFIO devices can be placed in a set, this allows all devices to share this
+ * structure and the VFIO core will provide a lock that is held around
+ * open_device()/close_device() for all devices in the set.
+ */
+struct vfio_device_set {
+ void *set_id;
+ struct mutex lock;
+ struct list_head device_list;
+ unsigned int device_count;
+};
+
+struct vfio_device {
+ struct device *dev;
+ const struct vfio_device_ops *ops;
+ /*
+ * mig_ops/log_ops is a static property of the vfio_device which must
+ * be set prior to registering the vfio_device.
+ */
+ const struct vfio_migration_ops *mig_ops;
+ const struct vfio_log_ops *log_ops;
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
+ struct vfio_group *group;
+ struct list_head group_next;
+ struct list_head iommu_entry;
+#endif
+ struct vfio_device_set *dev_set;
+ struct list_head dev_set_list;
+ unsigned int migration_flags;
+ struct kvm *kvm;
+
+ /* Members below here are private, not for driver use */
+ unsigned int index;
+ struct device device; /* device.kref covers object life circle */
+#if IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV)
+ struct cdev cdev;
+#endif
+ refcount_t refcount; /* user count on registered device*/
+ unsigned int open_count;
+ struct completion comp;
+ struct iommufd_access *iommufd_access;
+ void (*put_kvm)(struct kvm *kvm);
+#if IS_ENABLED(CONFIG_IOMMUFD)
+ struct iommufd_device *iommufd_device;
+ u8 iommufd_attached:1;
+#endif
+ u8 cdev_opened:1;
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * debug_root is a static property of the vfio_device
+ * which must be set prior to registering the vfio_device.
+ */
+ struct dentry *debug_root;
+#endif
+};
/**
* struct vfio_device_ops - VFIO bus driver device callbacks
*
- * @open: Called when userspace creates new file descriptor for device
- * @release: Called when userspace releases file descriptor for device
+ * @name: Name of the device driver.
+ * @init: initialize private fields in device structure
+ * @release: Reclaim private fields in device structure
+ * @bind_iommufd: Called when binding the device to an iommufd
+ * @unbind_iommufd: Opposite of bind_iommufd
+ * @attach_ioas: Called when attaching device to an IOAS/HWPT managed by the
+ * bound iommufd. Undo in unbind_iommufd if @detach_ioas is not
+ * called.
+ * @detach_ioas: Opposite of attach_ioas
+ * @open_device: Called when the first file descriptor is opened for this device
+ * @close_device: Opposite of open_device
* @read: Perform read(2) on device file descriptor
* @write: Perform write(2) on device file descriptor
* @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
@@ -29,125 +101,228 @@
* @match: Optional device name match callback (return: 0 for no-match, >0 for
* match, -errno for abort (ex. match with insufficient or incorrect
* additional args)
+ * @dma_unmap: Called when userspace unmaps IOVA from the container
+ * this device is attached to.
+ * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
*/
struct vfio_device_ops {
char *name;
- int (*open)(void *device_data);
- void (*release)(void *device_data);
- ssize_t (*read)(void *device_data, char __user *buf,
+ int (*init)(struct vfio_device *vdev);
+ void (*release)(struct vfio_device *vdev);
+ int (*bind_iommufd)(struct vfio_device *vdev,
+ struct iommufd_ctx *ictx, u32 *out_device_id);
+ void (*unbind_iommufd)(struct vfio_device *vdev);
+ int (*attach_ioas)(struct vfio_device *vdev, u32 *pt_id);
+ void (*detach_ioas)(struct vfio_device *vdev);
+ int (*open_device)(struct vfio_device *vdev);
+ void (*close_device)(struct vfio_device *vdev);
+ ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
size_t count, loff_t *ppos);
- ssize_t (*write)(void *device_data, const char __user *buf,
+ ssize_t (*write)(struct vfio_device *vdev, const char __user *buf,
size_t count, loff_t *size);
- long (*ioctl)(void *device_data, unsigned int cmd,
+ long (*ioctl)(struct vfio_device *vdev, unsigned int cmd,
unsigned long arg);
- int (*mmap)(void *device_data, struct vm_area_struct *vma);
- void (*request)(void *device_data, unsigned int count);
- int (*match)(void *device_data, char *buf);
+ int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
+ void (*request)(struct vfio_device *vdev, unsigned int count);
+ int (*match)(struct vfio_device *vdev, char *buf);
+ void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
+ int (*device_feature)(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz);
};
-extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
-extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
+#if IS_ENABLED(CONFIG_IOMMUFD)
+struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev);
+int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx);
+int vfio_iommufd_physical_bind(struct vfio_device *vdev,
+ struct iommufd_ctx *ictx, u32 *out_device_id);
+void vfio_iommufd_physical_unbind(struct vfio_device *vdev);
+int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
+void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev);
+int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
+ struct iommufd_ctx *ictx, u32 *out_device_id);
+void vfio_iommufd_emulated_unbind(struct vfio_device *vdev);
+int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
+void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev);
+#else
+static inline struct iommufd_ctx *
+vfio_iommufd_device_ictx(struct vfio_device *vdev)
+{
+ return NULL;
+}
-extern int vfio_add_group_dev(struct device *dev,
- const struct vfio_device_ops *ops,
- void *device_data);
+static inline int
+vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
+{
+ return VFIO_PCI_DEVID_NOT_OWNED;
+}
-extern void *vfio_del_group_dev(struct device *dev);
-extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
-extern void vfio_device_put(struct vfio_device *device);
-extern void *vfio_device_data(struct vfio_device *device);
+#define vfio_iommufd_physical_bind \
+ ((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
+ u32 *out_device_id)) NULL)
+#define vfio_iommufd_physical_unbind \
+ ((void (*)(struct vfio_device *vdev)) NULL)
+#define vfio_iommufd_physical_attach_ioas \
+ ((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
+#define vfio_iommufd_physical_detach_ioas \
+ ((void (*)(struct vfio_device *vdev)) NULL)
+#define vfio_iommufd_emulated_bind \
+ ((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
+ u32 *out_device_id)) NULL)
+#define vfio_iommufd_emulated_unbind \
+ ((void (*)(struct vfio_device *vdev)) NULL)
+#define vfio_iommufd_emulated_attach_ioas \
+ ((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
+#define vfio_iommufd_emulated_detach_ioas \
+ ((void (*)(struct vfio_device *vdev)) NULL)
+#endif
+
+static inline bool vfio_device_cdev_opened(struct vfio_device *device)
+{
+ return device->cdev_opened;
+}
/**
- * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
+ * struct vfio_migration_ops - VFIO bus device driver migration callbacks
+ *
+ * @migration_set_state: Optional callback to change the migration state for
+ * devices that support migration. It's mandatory for
+ * VFIO_DEVICE_FEATURE_MIGRATION migration support.
+ * The returned FD is used for data transfer according to the FSM
+ * definition. The driver is responsible to ensure that FD reaches end
+ * of stream or error whenever the migration FSM leaves a data transfer
+ * state or before close_device() returns.
+ * @migration_get_state: Optional callback to get the migration state for
+ * devices that support migration. It's mandatory for
+ * VFIO_DEVICE_FEATURE_MIGRATION migration support.
+ * @migration_get_data_size: Optional callback to get the estimated data
+ * length that will be required to complete stop copy. It's mandatory for
+ * VFIO_DEVICE_FEATURE_MIGRATION migration support.
*/
-struct vfio_iommu_driver_ops {
- char *name;
- struct module *owner;
- void *(*open)(unsigned long arg);
- void (*release)(void *iommu_data);
- ssize_t (*read)(void *iommu_data, char __user *buf,
- size_t count, loff_t *ppos);
- ssize_t (*write)(void *iommu_data, const char __user *buf,
- size_t count, loff_t *size);
- long (*ioctl)(void *iommu_data, unsigned int cmd,
- unsigned long arg);
- int (*mmap)(void *iommu_data, struct vm_area_struct *vma);
- int (*attach_group)(void *iommu_data,
- struct iommu_group *group);
- void (*detach_group)(void *iommu_data,
- struct iommu_group *group);
- int (*pin_pages)(void *iommu_data,
- struct iommu_group *group,
- unsigned long *user_pfn,
- int npage, int prot,
- unsigned long *phys_pfn);
- int (*unpin_pages)(void *iommu_data,
- unsigned long *user_pfn, int npage);
- int (*register_notifier)(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb);
- int (*unregister_notifier)(void *iommu_data,
- struct notifier_block *nb);
- int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
- void *data, size_t count, bool write);
+struct vfio_migration_ops {
+ struct file *(*migration_set_state)(
+ struct vfio_device *device,
+ enum vfio_device_mig_state new_state);
+ int (*migration_get_state)(struct vfio_device *device,
+ enum vfio_device_mig_state *curr_state);
+ int (*migration_get_data_size)(struct vfio_device *device,
+ unsigned long *stop_copy_length);
};
-extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
-
-extern void vfio_unregister_iommu_driver(
- const struct vfio_iommu_driver_ops *ops);
+/**
+ * struct vfio_log_ops - VFIO bus device driver logging callbacks
+ *
+ * @log_start: Optional callback to ask the device start DMA logging.
+ * @log_stop: Optional callback to ask the device stop DMA logging.
+ * @log_read_and_clear: Optional callback to ask the device read
+ * and clear the dirty DMAs in some given range.
+ *
+ * The vfio core implementation of the DEVICE_FEATURE_DMA_LOGGING_ set
+ * of features does not track logging state relative to the device,
+ * therefore the device implementation of vfio_log_ops must handle
+ * arbitrary user requests. This includes rejecting subsequent calls
+ * to log_start without an intervening log_stop, as well as graceful
+ * handling of log_stop and log_read_and_clear from invalid states.
+ */
+struct vfio_log_ops {
+ int (*log_start)(struct vfio_device *device,
+ struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
+ int (*log_stop)(struct vfio_device *device);
+ int (*log_read_and_clear)(struct vfio_device *device,
+ unsigned long iova, unsigned long length,
+ struct iova_bitmap *dirty);
+};
-/*
- * External user API
+/**
+ * vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl
+ * @flags: Arg from the device_feature op
+ * @argsz: Arg from the device_feature op
+ * @supported_ops: Combination of VFIO_DEVICE_FEATURE_GET and SET the driver
+ * supports
+ * @minsz: Minimum data size the driver accepts
+ *
+ * For use in a driver's device_feature op. Checks that the inputs to the
+ * VFIO_DEVICE_FEATURE ioctl are correct for the driver's feature. Returns 1 if
+ * the driver should execute the get or set, otherwise the relevant
+ * value should be returned.
*/
-extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
-extern void vfio_group_put_external_user(struct vfio_group *group);
-extern struct vfio_group *vfio_group_get_external_user_from_dev(struct device
- *dev);
-extern bool vfio_external_group_match_file(struct vfio_group *group,
- struct file *filep);
-extern int vfio_external_user_iommu_id(struct vfio_group *group);
-extern long vfio_external_check_extension(struct vfio_group *group,
- unsigned long arg);
+static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops,
+ size_t minsz)
+{
+ if ((flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)) &
+ ~supported_ops)
+ return -EINVAL;
+ if (flags & VFIO_DEVICE_FEATURE_PROBE)
+ return 0;
+ /* Without PROBE one of GET or SET must be requested */
+ if (!(flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)))
+ return -EINVAL;
+ if (argsz < minsz)
+ return -EINVAL;
+ return 1;
+}
-#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
+struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
+ const struct vfio_device_ops *ops);
+#define vfio_alloc_device(dev_struct, member, dev, ops) \
+ container_of(_vfio_alloc_device(sizeof(struct dev_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof( \
+ struct dev_struct, member)), \
+ dev, ops), \
+ struct dev_struct, member)
-extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
- int npage, int prot, unsigned long *phys_pfn);
-extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
- int npage);
+static inline void vfio_put_device(struct vfio_device *device)
+{
+ put_device(&device->device);
+}
-extern int vfio_group_pin_pages(struct vfio_group *group,
- unsigned long *user_iova_pfn, int npage,
- int prot, unsigned long *phys_pfn);
-extern int vfio_group_unpin_pages(struct vfio_group *group,
- unsigned long *user_iova_pfn, int npage);
+int vfio_register_group_dev(struct vfio_device *device);
+int vfio_register_emulated_iommu_dev(struct vfio_device *device);
+void vfio_unregister_group_dev(struct vfio_device *device);
-extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
- void *data, size_t len, bool write);
+int vfio_assign_device_set(struct vfio_device *device, void *set_id);
+unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
+struct vfio_device *
+vfio_find_device_in_devset(struct vfio_device_set *dev_set,
+ struct device *dev);
-/* each type has independent events */
-enum vfio_notify_type {
- VFIO_IOMMU_NOTIFY = 0,
- VFIO_GROUP_NOTIFY = 1,
-};
+int vfio_mig_get_next_state(struct vfio_device *device,
+ enum vfio_device_mig_state cur_fsm,
+ enum vfio_device_mig_state new_fsm,
+ enum vfio_device_mig_state *next_fsm);
-/* events for VFIO_IOMMU_NOTIFY */
-#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0)
+void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes);
+
+/*
+ * External user API
+ */
+struct iommu_group *vfio_file_iommu_group(struct file *file);
+
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
+bool vfio_file_is_group(struct file *file);
+bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
+#else
+static inline bool vfio_file_is_group(struct file *file)
+{
+ return false;
+}
-/* events for VFIO_GROUP_NOTIFY */
-#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0)
+static inline bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
+{
+ return false;
+}
+#endif
+bool vfio_file_is_valid(struct file *file);
+bool vfio_file_enforced_coherent(struct file *file);
+void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
-extern int vfio_register_notifier(struct device *dev,
- enum vfio_notify_type type,
- unsigned long *required_events,
- struct notifier_block *nb);
-extern int vfio_unregister_notifier(struct device *dev,
- enum vfio_notify_type type,
- struct notifier_block *nb);
+#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
-struct kvm;
-extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
+int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages);
+void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage);
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova,
+ void *data, size_t len, bool write);
/*
* Sub-module helpers
@@ -156,41 +331,17 @@ struct vfio_info_cap {
struct vfio_info_cap_header *buf;
size_t size;
};
-extern struct vfio_info_cap_header *vfio_info_cap_add(
- struct vfio_info_cap *caps, size_t size, u16 id, u16 version);
-extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
-
-extern int vfio_info_add_capability(struct vfio_info_cap *caps,
- struct vfio_info_cap_header *cap,
- size_t size);
-
-extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
- int num_irqs, int max_irq_type,
- size_t *data_size);
-
-struct pci_dev;
-#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH)
-extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
-extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
-extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
- unsigned int cmd,
- unsigned long arg);
-#else
-static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev)
-{
-}
+struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
+ size_t size, u16 id,
+ u16 version);
+void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
-static inline void vfio_spapr_pci_eeh_release(struct pci_dev *pdev)
-{
-}
+int vfio_info_add_capability(struct vfio_info_cap *caps,
+ struct vfio_info_cap_header *cap, size_t size);
-static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
- unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-#endif /* CONFIG_VFIO_SPAPR_EEH */
+int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
+ int num_irqs, int max_irq_type,
+ size_t *data_size);
/*
* IRQfd - generic
@@ -205,13 +356,14 @@ struct virqfd {
wait_queue_entry_t wait;
poll_table pt;
struct work_struct shutdown;
+ struct work_struct flush_inject;
struct virqfd **pvirqfd;
};
-extern int vfio_virqfd_enable(void *opaque,
- int (*handler)(void *, void *),
- void (*thread)(void *, void *),
- void *data, struct virqfd **pvirqfd, int fd);
-extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
+int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
+ void (*thread)(void *, void *), void *data,
+ struct virqfd **pvirqfd, int fd);
+void vfio_virqfd_disable(struct virqfd **pvirqfd);
+void vfio_virqfd_flush_thread(struct virqfd **pvirqfd);
#endif /* VFIO_H */
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
new file mode 100644
index 000000000000..a2c8b8bba711
--- /dev/null
+++ b/include/linux/vfio_pci_core.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/vfio.h>
+#include <linux/irqbypass.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/notifier.h>
+
+#ifndef VFIO_PCI_CORE_H
+#define VFIO_PCI_CORE_H
+
+#define VFIO_PCI_OFFSET_SHIFT 40
+#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+
+struct vfio_pci_core_device;
+struct vfio_pci_region;
+
+struct vfio_pci_regops {
+ ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+ void (*release)(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_region *region);
+ int (*mmap)(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_region *region,
+ struct vm_area_struct *vma);
+ int (*add_capability)(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_region *region,
+ struct vfio_info_cap *caps);
+};
+
+struct vfio_pci_region {
+ u32 type;
+ u32 subtype;
+ const struct vfio_pci_regops *ops;
+ void *data;
+ size_t size;
+ u32 flags;
+};
+
+struct vfio_pci_core_device {
+ struct vfio_device vdev;
+ struct pci_dev *pdev;
+ void __iomem *barmap[PCI_STD_NUM_BARS];
+ bool bar_mmap_supported[PCI_STD_NUM_BARS];
+ u8 *pci_config_map;
+ u8 *vconfig;
+ struct perm_bits *msi_perm;
+ spinlock_t irqlock;
+ struct mutex igate;
+ struct xarray ctx;
+ int irq_type;
+ int num_regions;
+ struct vfio_pci_region *region;
+ u8 msi_qmax;
+ u8 msix_bar;
+ u16 msix_size;
+ u32 msix_offset;
+ u32 rbar[7];
+ bool has_dyn_msix:1;
+ bool pci_2_3:1;
+ bool virq_disabled:1;
+ bool reset_works:1;
+ bool extended_caps:1;
+ bool bardirty:1;
+ bool has_vga:1;
+ bool needs_reset:1;
+ bool nointx:1;
+ bool needs_pm_restore:1;
+ bool pm_intx_masked:1;
+ bool pm_runtime_engaged:1;
+ struct pci_saved_state *pci_saved_state;
+ struct pci_saved_state *pm_save;
+ int ioeventfds_nr;
+ struct eventfd_ctx *err_trigger;
+ struct eventfd_ctx *req_trigger;
+ struct eventfd_ctx *pm_wake_eventfd_ctx;
+ struct list_head dummy_resources_list;
+ struct mutex ioeventfds_lock;
+ struct list_head ioeventfds_list;
+ struct vfio_pci_vf_token *vf_token;
+ struct list_head sriov_pfs_item;
+ struct vfio_pci_core_device *sriov_pf_core_dev;
+ struct notifier_block nb;
+ struct mutex vma_lock;
+ struct list_head vma_list;
+ struct rw_semaphore memory_lock;
+};
+
+/* Will be exported for vfio pci drivers usage */
+int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data);
+void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga,
+ bool is_disable_idle_d3);
+void vfio_pci_core_close_device(struct vfio_device *core_vdev);
+int vfio_pci_core_init_dev(struct vfio_device *core_vdev);
+void vfio_pci_core_release_dev(struct vfio_device *core_vdev);
+int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
+void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
+extern const struct pci_error_handlers vfio_pci_core_err_handlers;
+int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
+ int nr_virtfn);
+long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+ unsigned long arg);
+int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz);
+ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
+ size_t count, loff_t *ppos);
+ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
+ size_t count, loff_t *ppos);
+int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
+void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
+int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
+int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
+void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
+void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
+int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar);
+pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
+ void __iomem *io, char __user *buf,
+ loff_t off, size_t count, size_t x_start,
+ size_t x_end, bool iswrite);
+bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt,
+ loff_t reg_start, size_t reg_cnt,
+ loff_t *buf_offset,
+ size_t *intersect_count,
+ size_t *register_offset);
+#define VFIO_IOWRITE_DECLATION(size) \
+int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev, \
+ bool test_mem, u##size val, void __iomem *io);
+
+VFIO_IOWRITE_DECLATION(8)
+VFIO_IOWRITE_DECLATION(16)
+VFIO_IOWRITE_DECLATION(32)
+#ifdef iowrite64
+VFIO_IOWRITE_DECLATION(64)
+#endif
+
+#define VFIO_IOREAD_DECLATION(size) \
+int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \
+ bool test_mem, u##size *val, void __iomem *io);
+
+VFIO_IOREAD_DECLATION(8)
+VFIO_IOREAD_DECLATION(16)
+VFIO_IOREAD_DECLATION(32)
+
+#endif /* VFIO_PCI_CORE_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 977caf96c8d2..97129a1bbb7d 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: MIT */
+
/*
* The VGA aribiter manages VGA space routing and VGA resource decode to
* allow multiple VGA devices to be used in a system in a safe way.
@@ -5,27 +7,6 @@
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS
- * IN THE SOFTWARE.
- *
*/
#ifndef LINUX_VGA_H
@@ -33,6 +14,8 @@
#include <video/vga.h>
+struct pci_dev;
+
/* Legacy VGA regions */
#define VGA_RSRC_NONE 0x00
#define VGA_RSRC_LEGACY_IO 0x01
@@ -42,42 +25,45 @@
#define VGA_RSRC_NORMAL_IO 0x04
#define VGA_RSRC_NORMAL_MEM 0x08
-/* Passing that instead of a pci_dev to use the system "default"
- * device, that is the one used by vgacon. Archs will probably
- * have to provide their own vga_default_device();
- */
-#define VGA_DEFAULT_DEVICE (NULL)
-
-struct pci_dev;
-
-/* For use by clients */
-
-/**
- * vga_set_legacy_decoding
- *
- * @pdev: pci device of the VGA card
- * @decodes: bit mask of what legacy regions the card decodes
- *
- * Indicates to the arbiter if the card decodes legacy VGA IOs,
- * legacy VGA Memory, both, or none. All cards default to both,
- * the card driver (fbdev for example) should tell the arbiter
- * if it has disabled legacy decoding, so the card can be left
- * out of the arbitration process (and can be safe to take
- * interrupts at any time.
- */
-#if defined(CONFIG_VGA_ARB)
-extern void vga_set_legacy_decoding(struct pci_dev *pdev,
- unsigned int decodes);
-#else
+#ifdef CONFIG_VGA_ARB
+void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes);
+int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
+void vga_put(struct pci_dev *pdev, unsigned int rsrc);
+struct pci_dev *vga_default_device(void);
+void vga_set_default_device(struct pci_dev *pdev);
+int vga_remove_vgacon(struct pci_dev *pdev);
+int vga_client_register(struct pci_dev *pdev,
+ unsigned int (*set_decode)(struct pci_dev *pdev, bool state));
+#else /* CONFIG_VGA_ARB */
static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
- unsigned int decodes) { };
-#endif
-
-#if defined(CONFIG_VGA_ARB)
-extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
-#else
-static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
-#endif
+ unsigned int decodes)
+{
+};
+static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc,
+ int interruptible)
+{
+ return 0;
+}
+static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc)
+{
+}
+static inline struct pci_dev *vga_default_device(void)
+{
+ return NULL;
+}
+static inline void vga_set_default_device(struct pci_dev *pdev)
+{
+}
+static inline int vga_remove_vgacon(struct pci_dev *pdev)
+{
+ return 0;
+}
+static inline int vga_client_register(struct pci_dev *pdev,
+ unsigned int (*set_decode)(struct pci_dev *pdev, bool state))
+{
+ return 0;
+}
+#endif /* CONFIG_VGA_ARB */
/**
* vga_get_interruptible
@@ -91,7 +77,7 @@ static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interrupt
static inline int vga_get_interruptible(struct pci_dev *pdev,
unsigned int rsrc)
{
- return vga_get(pdev, rsrc, 1);
+ return vga_get(pdev, rsrc, 1);
}
/**
@@ -106,49 +92,12 @@ static inline int vga_get_interruptible(struct pci_dev *pdev,
static inline int vga_get_uninterruptible(struct pci_dev *pdev,
unsigned int rsrc)
{
- return vga_get(pdev, rsrc, 0);
+ return vga_get(pdev, rsrc, 0);
}
-#if defined(CONFIG_VGA_ARB)
-extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
-#else
-#define vga_put(pdev, rsrc)
-#endif
-
-
-#ifdef CONFIG_VGA_ARB
-extern struct pci_dev *vga_default_device(void);
-extern void vga_set_default_device(struct pci_dev *pdev);
-extern int vga_remove_vgacon(struct pci_dev *pdev);
-#else
-static inline struct pci_dev *vga_default_device(void) { return NULL; };
-static inline void vga_set_default_device(struct pci_dev *pdev) { };
-static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; };
-#endif
-
-/*
- * Architectures should define this if they have several
- * independent PCI domains that can afford concurrent VGA
- * decoding
- */
-#ifndef __ARCH_HAS_VGA_CONFLICT
-static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
+static inline void vga_client_unregister(struct pci_dev *pdev)
{
- return 1;
-}
-#endif
-
-#if defined(CONFIG_VGA_ARB)
-int vga_client_register(struct pci_dev *pdev, void *cookie,
- void (*irq_set_state)(void *cookie, bool state),
- unsigned int (*set_vga_decode)(void *cookie, bool state));
-#else
-static inline int vga_client_register(struct pci_dev *pdev, void *cookie,
- void (*irq_set_state)(void *cookie, bool state),
- unsigned int (*set_vga_decode)(void *cookie, bool state))
-{
- return 0;
+ vga_client_register(pdev, NULL);
}
-#endif
#endif /* LINUX_VGA_H */
diff --git a/include/linux/vhost_iotlb.h b/include/linux/vhost_iotlb.h
index 6b09b786a762..e79a40838998 100644
--- a/include/linux/vhost_iotlb.h
+++ b/include/linux/vhost_iotlb.h
@@ -17,6 +17,7 @@ struct vhost_iotlb_map {
u32 perm;
u32 flags_padding;
u64 __subtree_last;
+ void *opaque;
};
#define VHOST_IOTLB_FLAG_RETIRE 0x1
@@ -29,10 +30,14 @@ struct vhost_iotlb {
unsigned int flags;
};
+int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, u64 start, u64 last,
+ u64 addr, unsigned int perm, void *opaque);
int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last,
u64 addr, unsigned int perm);
void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last);
+void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
+ unsigned int flags);
struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags);
void vhost_iotlb_free(struct vhost_iotlb *iotlb);
void vhost_iotlb_reset(struct vhost_iotlb *iotlb);
diff --git a/include/linux/via-core.h b/include/linux/via-core.h
index 9e802deedb2d..8737599b9148 100644
--- a/include/linux/via-core.h
+++ b/include/linux/via-core.h
@@ -47,7 +47,6 @@ struct via_port_cfg {
/*
* Allow subdevs to register suspend/resume hooks.
*/
-#ifdef CONFIG_PM
struct viafb_pm_hooks {
struct list_head list;
int (*suspend)(void *private);
@@ -57,7 +56,6 @@ struct viafb_pm_hooks {
void viafb_pm_register(struct viafb_pm_hooks *hooks);
void viafb_pm_unregister(struct viafb_pm_hooks *hooks);
-#endif /* CONFIG_PM */
/*
* This is the global viafb "device" containing stuff needed by
diff --git a/include/linux/via-gpio.h b/include/linux/via-gpio.h
deleted file mode 100644
index ac34668fd442..000000000000
--- a/include/linux/via-gpio.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Support for viafb GPIO ports.
- *
- * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
- */
-
-#ifndef __VIA_GPIO_H__
-#define __VIA_GPIO_H__
-
-extern int viafb_gpio_lookup(const char *name);
-extern int viafb_gpio_init(void);
-extern void viafb_gpio_exit(void);
-#endif
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index a493eac08393..b0201747a263 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -9,9 +9,10 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
/**
- * virtqueue - a queue to register buffers for sending or receiving.
+ * struct virtqueue - a queue to register buffers for sending or receiving.
* @list: the chain of virtqueues for this device
* @callback: the function to call when buffers are consumed (can be NULL).
* @name: the name of this virtqueue (mainly for debugging)
@@ -19,6 +20,8 @@
* @priv: a pointer for the virtqueue implementation to use.
* @index: the zero-based ordinal number for this queue.
* @num_free: number of elements we expect to be able to fit.
+ * @num_max: the maximum number of elements supported by the device.
+ * @reset: vq is in reset state or not.
*
* A note on @num_free: with indirect buffers, each buffer needs one
* element in the queue, otherwise a buffer will need one element per
@@ -31,6 +34,8 @@ struct virtqueue {
struct virtio_device *vdev;
unsigned int index;
unsigned int num_free;
+ unsigned int num_max;
+ bool reset;
void *priv;
};
@@ -57,6 +62,8 @@ int virtqueue_add_sgs(struct virtqueue *vq,
void *data,
gfp_t gfp);
+struct device *virtqueue_dma_dev(struct virtqueue *vq);
+
bool virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq);
@@ -74,28 +81,44 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
+int virtqueue_set_dma_premapped(struct virtqueue *_vq);
+
bool virtqueue_poll(struct virtqueue *vq, unsigned);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
void *virtqueue_detach_unused_buf(struct virtqueue *vq);
-unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
+unsigned int virtqueue_get_vring_size(const struct virtqueue *vq);
+
+bool virtqueue_is_broken(const struct virtqueue *vq);
-bool virtqueue_is_broken(struct virtqueue *vq);
+const struct vring *virtqueue_get_vring(const struct virtqueue *vq);
+dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *vq);
+dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq);
+dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
-const struct vring *virtqueue_get_vring(struct virtqueue *vq);
-dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq);
-dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
-dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
+int virtqueue_resize(struct virtqueue *vq, u32 num,
+ void (*recycle)(struct virtqueue *vq, void *buf));
+int virtqueue_reset(struct virtqueue *vq,
+ void (*recycle)(struct virtqueue *vq, void *buf));
+
+struct virtio_admin_cmd {
+ __le16 opcode;
+ __le16 group_type;
+ __le64 group_member_id;
+ struct scatterlist *data_sg;
+ struct scatterlist *result_sg;
+};
/**
- * virtio_device - representation of a device using virtio
+ * struct virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
* @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
* @config_enabled: configuration change reporting enabled
* @config_change_pending: configuration change reported while disabled
* @config_lock: protects configuration change reporting
+ * @vqs_list_lock: protects @vqs.
* @dev: underlying device.
* @id: the device type identification (used to match it with a driver).
* @config: the configuration ops for this device.
@@ -110,6 +133,7 @@ struct virtio_device {
bool config_enabled;
bool config_change_pending;
spinlock_t config_lock;
+ spinlock_t vqs_list_lock;
struct device dev;
struct virtio_device_id id;
const struct virtio_config_ops *config;
@@ -119,39 +143,41 @@ struct virtio_device {
void *priv;
};
-static inline struct virtio_device *dev_to_virtio(struct device *_dev)
-{
- return container_of(_dev, struct virtio_device, dev);
-}
+#define dev_to_virtio(_dev) container_of_const(_dev, struct virtio_device, dev)
void virtio_add_status(struct virtio_device *dev, unsigned int status);
int register_virtio_device(struct virtio_device *dev);
void unregister_virtio_device(struct virtio_device *dev);
+bool is_virtio_device(struct device *dev);
void virtio_break_device(struct virtio_device *dev);
+void __virtio_unbreak_device(struct virtio_device *dev);
+
+void __virtqueue_break(struct virtqueue *_vq);
+void __virtqueue_unbreak(struct virtqueue *_vq);
void virtio_config_changed(struct virtio_device *dev);
-void virtio_config_disable(struct virtio_device *dev);
-void virtio_config_enable(struct virtio_device *dev);
-int virtio_finalize_features(struct virtio_device *dev);
#ifdef CONFIG_PM_SLEEP
int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
+void virtio_reset_device(struct virtio_device *dev);
-size_t virtio_max_dma_size(struct virtio_device *vdev);
+size_t virtio_max_dma_size(const struct virtio_device *vdev);
#define virtio_device_for_each_vq(vdev, vq) \
list_for_each_entry(vq, &vdev->vqs, list)
/**
- * virtio_driver - operations for a virtio I/O driver
+ * struct virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this driver.
* @feature_table_size: number of entries in the feature table array.
* @feature_table_legacy: same as feature_table but when working in legacy mode.
* @feature_table_size_legacy: number of entries in feature table legacy array.
+ * @validate: the function to call to validate features and config space.
+ * Returns 0 or -errno.
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @scan: optional function to call after successful probe; intended
* for virtio-scsi to invoke a scan.
@@ -173,10 +199,8 @@ struct virtio_driver {
void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
void (*config_changed)(struct virtio_device *dev);
-#ifdef CONFIG_PM
int (*freeze)(struct virtio_device *dev);
int (*restore)(struct virtio_device *dev);
-#endif
};
static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
@@ -195,4 +219,19 @@ void unregister_virtio_driver(struct virtio_driver *drv);
#define module_virtio_driver(__virtio_driver) \
module_driver(__virtio_driver, register_virtio_driver, \
unregister_virtio_driver)
+
+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
+
+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
#endif /* _LINUX_VIRTIO_H */
diff --git a/include/linux/virtio_anchor.h b/include/linux/virtio_anchor.h
new file mode 100644
index 000000000000..432e6c00b3ca
--- /dev/null
+++ b/include/linux/virtio_anchor.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_ANCHOR_H
+#define _LINUX_VIRTIO_ANCHOR_H
+
+#ifdef CONFIG_VIRTIO_ANCHOR
+struct virtio_device;
+
+bool virtio_require_restricted_mem_acc(struct virtio_device *dev);
+extern bool (*virtio_check_mem_acc_cb)(struct virtio_device *dev);
+
+static inline void virtio_set_mem_acc_cb(bool (*func)(struct virtio_device *))
+{
+ virtio_check_mem_acc_cb = func;
+}
+#else
+#define virtio_set_mem_acc_cb(func) do { } while (0)
+#endif
+
+#endif /* _LINUX_VIRTIO_ANCHOR_H */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 8fe857e27ef3..da9b271b54db 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -11,8 +11,15 @@
struct irq_affinity;
+struct virtio_shm_region {
+ u64 addr;
+ u64 len;
+};
+
+typedef void vq_callback_t(struct virtqueue *);
+
/**
- * virtio_config_ops - operations for configuring a virtio device
+ * struct virtio_config_ops - operations for configuring a virtio device
* Note: Do not assume that a transport implements all of the operations
* getting/setting a value as a simple read/write! Generally speaking,
* any of @get/@set, @get_status/@set_status, or @get_features/
@@ -52,13 +59,20 @@ struct irq_affinity;
* include a NULL entry for vqs unused by driver
* Returns 0 on success or error status
* @del_vqs: free virtqueues found by find_vqs().
+ * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
+ * The function guarantees that all memory operations on the
+ * queue before it are visible to the vring_interrupt() that is
+ * called after it.
+ * vdev: the virtio_device
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
* Returns the first 64 feature bits (all we currently need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
- * This gives the final feature bits for the device: it can change
+ * This sends the driver feature bits to the device: it can change
* the dev->feature bits if it wants.
+ * Note that despite the name this can be called any number of
+ * times.
* Returns 0 on success or error status
* @bus_name: return the bus name associated with the device (optional)
* vdev: the virtio_device
@@ -66,8 +80,22 @@ struct irq_affinity;
* the caller can then copy.
* @set_vq_affinity: set the affinity for a virtqueue (optional).
* @get_vq_affinity: get the affinity for a virtqueue (optional).
+ * @get_shm_region: get a shared memory region based on the index.
+ * @disable_vq_and_reset: reset a queue individually (optional).
+ * vq: the virtqueue
+ * Returns 0 on success or error status
+ * disable_vq_and_reset will guarantee that the callbacks are disabled and
+ * synchronized.
+ * Except for the callback, the caller should guarantee that the vring is
+ * not accessed by any functions of virtqueue.
+ * @enable_vq_after_reset: enable a reset queue
+ * vq: the virtqueue
+ * Returns 0 on success or error status
+ * If disable_vq_and_reset is set, then enable_vq_after_reset must also be
+ * set.
+ * @create_avq: create admin virtqueue resource.
+ * @destroy_avq: destroy admin virtqueue resource.
*/
-typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
@@ -82,6 +110,7 @@ struct virtio_config_ops {
const char * const names[], const bool *ctx,
struct irq_affinity *desc);
void (*del_vqs)(struct virtio_device *);
+ void (*synchronize_cbs)(struct virtio_device *);
u64 (*get_features)(struct virtio_device *vdev);
int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
@@ -89,6 +118,12 @@ struct virtio_config_ops {
const struct cpumask *cpu_mask);
const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
int index);
+ bool (*get_shm_region)(struct virtio_device *vdev,
+ struct virtio_shm_region *region, u8 id);
+ int (*disable_vq_and_reset)(struct virtqueue *vq);
+ int (*enable_vq_after_reset)(struct virtqueue *vq);
+ int (*create_avq)(struct virtio_device *vdev);
+ void (*destroy_avq)(struct virtio_device *vdev);
};
/* If driver didn't advertise the feature, it will never appear. */
@@ -209,8 +244,27 @@ int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
}
/**
+ * virtio_synchronize_cbs - synchronize with virtqueue callbacks
+ * @dev: the virtio device
+ */
+static inline
+void virtio_synchronize_cbs(struct virtio_device *dev)
+{
+ if (dev->config->synchronize_cbs) {
+ dev->config->synchronize_cbs(dev);
+ } else {
+ /*
+ * A best effort fallback to synchronize with
+ * interrupts, preemption and softirq disabled
+ * regions. See comment above synchronize_rcu().
+ */
+ synchronize_rcu();
+ }
+}
+
+/**
* virtio_device_ready - enable vq use in probe function
- * @vdev: the device
+ * @dev: the virtio device
*
* Driver must call this to use vqs in the probe function.
*
@@ -221,7 +275,29 @@ void virtio_device_ready(struct virtio_device *dev)
{
unsigned status = dev->config->get_status(dev);
- BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+ WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ /*
+ * The virtio_synchronize_cbs() makes sure vring_interrupt()
+ * will see the driver specific setup if it sees vq->broken
+ * as false (even if the notifications come before DRIVER_OK).
+ */
+ virtio_synchronize_cbs(dev);
+ __virtio_unbreak_device(dev);
+#endif
+ /*
+ * The transport should ensure the visibility of vq->broken
+ * before setting DRIVER_OK. See the comments for the transport
+ * specific set_status() method.
+ *
+ * A well behaved device will only notify a virtqueue after
+ * DRIVER_OK, this means the device should "see" the coherenct
+ * memory write that set vq->broken as false which is done by
+ * the driver when it sees DRIVER_OK, then the following
+ * driver's vring_interrupt() will see vq->broken as false so
+ * we won't lose any notification.
+ */
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
}
@@ -236,7 +312,7 @@ const char *virtio_bus_name(struct virtio_device *vdev)
/**
* virtqueue_set_affinity - setting affinity for a virtqueue
* @vq: the virtqueue
- * @cpu: the cpu no.
+ * @cpu_mask: the cpu mask
*
* Pay attention the function are best-effort: the affinity hint may not be set
* due to config support, irq type and sharing.
@@ -251,6 +327,15 @@ int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
return 0;
}
+static inline
+bool virtio_get_shm_region(struct virtio_device *vdev,
+ struct virtio_shm_region *region, u8 id)
+{
+ if (!vdev->config->get_shm_region)
+ return false;
+ return vdev->config->get_shm_region(vdev, region, id);
+}
+
static inline bool virtio_is_little_endian(struct virtio_device *vdev)
{
return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
@@ -540,4 +625,5 @@ static inline void virtio_cwrite64(struct virtio_device *vdev,
virtio_cread_le((vdev), structname, member, ptr); \
_r; \
})
+
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
deleted file mode 100644
index d2e2785af602..000000000000
--- a/include/linux/virtio_console.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
- * anyone can use the definitions to implement compatible drivers/servers:
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of IBM nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Copyright (C) Red Hat, Inc., 2009, 2010, 2011
- * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
- */
-#ifndef _LINUX_VIRTIO_CONSOLE_H
-#define _LINUX_VIRTIO_CONSOLE_H
-
-#include <uapi/linux/virtio_console.h>
-
-int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
-#endif /* _LINUX_VIRTIO_CONSOLE_H */
diff --git a/include/linux/virtio_dma_buf.h b/include/linux/virtio_dma_buf.h
new file mode 100644
index 000000000000..a2fdf217ac62
--- /dev/null
+++ b/include/linux/virtio_dma_buf.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dma-bufs for virtio exported objects
+ *
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#ifndef _LINUX_VIRTIO_DMA_BUF_H
+#define _LINUX_VIRTIO_DMA_BUF_H
+
+#include <linux/dma-buf.h>
+#include <linux/uuid.h>
+#include <linux/virtio.h>
+
+/**
+ * struct virtio_dma_buf_ops - operations possible on exported object dma-buf
+ * @ops: the base dma_buf_ops. ops.attach MUST be virtio_dma_buf_attach.
+ * @device_attach: [optional] callback invoked by virtio_dma_buf_attach during
+ * all attach operations.
+ * @get_uid: [required] callback to get the uuid of the exported object.
+ */
+struct virtio_dma_buf_ops {
+ struct dma_buf_ops ops;
+ int (*device_attach)(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach);
+ int (*get_uuid)(struct dma_buf *dma_buf, uuid_t *uuid);
+};
+
+int virtio_dma_buf_attach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach);
+
+struct dma_buf *virtio_dma_buf_export
+ (const struct dma_buf_export_info *exp_info);
+bool is_virtio_dma_buf(struct dma_buf *dma_buf);
+int virtio_dma_buf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid);
+
+#endif /* _LINUX_VIRTIO_DMA_BUF_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index e8a924eeea3d..4dfa9b69ca8d 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -3,16 +3,38 @@
#define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
#include <uapi/linux/tcp.h>
-#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
+static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
+{
+ switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ return protocol == cpu_to_be16(ETH_P_IP);
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ return protocol == cpu_to_be16(ETH_P_IPV6);
+ case VIRTIO_NET_HDR_GSO_UDP:
+ case VIRTIO_NET_HDR_GSO_UDP_L4:
+ return protocol == cpu_to_be16(ETH_P_IP) ||
+ protocol == cpu_to_be16(ETH_P_IPV6);
+ default:
+ return false;
+ }
+}
+
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
const struct virtio_net_hdr *hdr)
{
+ if (skb->protocol)
+ return 0;
+
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_UDP:
+ case VIRTIO_NET_HDR_GSO_UDP_L4:
skb->protocol = cpu_to_be16(ETH_P_IP);
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
@@ -29,6 +51,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
const struct virtio_net_hdr *hdr,
bool little_endian)
{
+ unsigned int nh_min_len = sizeof(struct iphdr);
unsigned int gso_type = 0;
unsigned int thlen = 0;
unsigned int p_off = 0;
@@ -45,12 +68,18 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
gso_type = SKB_GSO_TCPV6;
ip_proto = IPPROTO_TCP;
thlen = sizeof(struct tcphdr);
+ nh_min_len = sizeof(struct ipv6hdr);
break;
case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP;
ip_proto = IPPROTO_UDP;
thlen = sizeof(struct udphdr);
break;
+ case VIRTIO_NET_HDR_GSO_UDP_L4:
+ gso_type = SKB_GSO_UDP_L4;
+ ip_proto = IPPROTO_UDP;
+ thlen = sizeof(struct udphdr);
+ break;
default:
return -EINVAL;
}
@@ -62,15 +91,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
return -EINVAL;
}
+ skb_reset_mac_header(skb);
+
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
- u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+ u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
+ u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+ u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
+
+ if (!pskb_may_pull(skb, needed))
+ return -EINVAL;
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
- p_off = skb_transport_offset(skb) + thlen;
- if (p_off > skb_headlen(skb))
+ nh_min_len = max_t(u32, nh_min_len, skb_transport_offset(skb));
+ p_off = nh_min_len + thlen;
+ if (!pskb_may_pull(skb, p_off))
return -EINVAL;
} else {
/* gso packets without NEEDS_CSUM do not set transport_offset.
@@ -79,8 +115,16 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (gso_type && skb->network_header) {
struct flow_keys_basic keys;
- if (!skb->protocol)
- virtio_net_hdr_set_proto(skb, hdr);
+ if (!skb->protocol) {
+ __be16 protocol = dev_parse_header_protocol(skb);
+
+ if (!protocol)
+ virtio_net_hdr_set_proto(skb, hdr);
+ else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
+ return -EINVAL;
+ else
+ skb->protocol = protocol;
+ }
retry:
if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
NULL, 0, 0, 0,
@@ -95,24 +139,46 @@ retry:
}
p_off = keys.control.thoff + thlen;
- if (p_off > skb_headlen(skb) ||
+ if (!pskb_may_pull(skb, p_off) ||
keys.basic.ip_proto != ip_proto)
return -EINVAL;
skb_set_transport_header(skb, keys.control.thoff);
} else if (gso_type) {
- p_off = thlen;
- if (p_off > skb_headlen(skb))
+ p_off = nh_min_len + thlen;
+ if (!pskb_may_pull(skb, p_off))
return -EINVAL;
}
}
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
+ unsigned int nh_off = p_off;
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ switch (gso_type & ~SKB_GSO_TCP_ECN) {
+ case SKB_GSO_UDP:
+ /* UFO may not include transport header in gso_size. */
+ nh_off -= thlen;
+ break;
+ case SKB_GSO_UDP_L4:
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return -EINVAL;
+ if (skb->csum_offset != offsetof(struct udphdr, check))
+ return -EINVAL;
+ if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
+ return -EINVAL;
+ if (gso_type != SKB_GSO_UDP_L4)
+ return -EINVAL;
+ break;
+ }
+
+ /* Kernel has a special handling for GSO_BY_FRAGS. */
+ if (gso_size == GSO_BY_FRAGS)
+ return -EINVAL;
+
/* Too small packets are not really GSO ones. */
- if (skb->len - p_off > gso_size) {
+ if (skb->len - nh_off > gso_size) {
shinfo->gso_size = gso_size;
shinfo->gso_type = gso_type;
@@ -145,6 +211,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP_L4)
+ hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP_L4;
else
return -EINVAL;
if (sinfo->gso_type & SKB_GSO_TCP_ECN)
diff --git a/include/linux/virtio_pci_admin.h b/include/linux/virtio_pci_admin.h
new file mode 100644
index 000000000000..f4a100a0fe2e
--- /dev/null
+++ b/include/linux/virtio_pci_admin.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_ADMIN_H
+#define _LINUX_VIRTIO_PCI_ADMIN_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
+bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev);
+int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
+ u8 req_bar_flags, u8 *bar,
+ u64 *bar_offset);
+#endif
+
+#endif /* _LINUX_VIRTIO_PCI_ADMIN_H */
diff --git a/include/linux/virtio_pci_legacy.h b/include/linux/virtio_pci_legacy.h
new file mode 100644
index 000000000000..a8dc757d0367
--- /dev/null
+++ b/include/linux/virtio_pci_legacy.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_LEGACY_H
+#define _LINUX_VIRTIO_PCI_LEGACY_H
+
+#include "linux/mod_devicetable.h"
+#include <linux/pci.h>
+#include <linux/virtio_pci.h>
+
+struct virtio_pci_legacy_device {
+ struct pci_dev *pci_dev;
+
+ /* Where to read and clear interrupt */
+ u8 __iomem *isr;
+ /* The IO mapping for the PCI config space (legacy mode only) */
+ void __iomem *ioaddr;
+
+ struct virtio_device_id id;
+};
+
+u64 vp_legacy_get_features(struct virtio_pci_legacy_device *ldev);
+u64 vp_legacy_get_driver_features(struct virtio_pci_legacy_device *ldev);
+void vp_legacy_set_features(struct virtio_pci_legacy_device *ldev,
+ u32 features);
+u8 vp_legacy_get_status(struct virtio_pci_legacy_device *ldev);
+void vp_legacy_set_status(struct virtio_pci_legacy_device *ldev,
+ u8 status);
+u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
+ u16 idx, u16 vector);
+u16 vp_legacy_config_vector(struct virtio_pci_legacy_device *ldev,
+ u16 vector);
+void vp_legacy_set_queue_address(struct virtio_pci_legacy_device *ldev,
+ u16 index, u32 queue_pfn);
+bool vp_legacy_get_queue_enable(struct virtio_pci_legacy_device *ldev,
+ u16 idx);
+u16 vp_legacy_get_queue_size(struct virtio_pci_legacy_device *ldev,
+ u16 idx);
+int vp_legacy_probe(struct virtio_pci_legacy_device *ldev);
+void vp_legacy_remove(struct virtio_pci_legacy_device *ldev);
+
+#endif
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
new file mode 100644
index 000000000000..c0b1b1ca1163
--- /dev/null
+++ b/include/linux/virtio_pci_modern.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_MODERN_H
+#define _LINUX_VIRTIO_PCI_MODERN_H
+
+#include <linux/pci.h>
+#include <linux/virtio_pci.h>
+
+/**
+ * struct virtio_pci_modern_device - info for modern PCI virtio
+ * @pci_dev: Ptr to the PCI device struct
+ * @common: Position of the common capability in the PCI config
+ * @device: Device-specific data (non-legacy mode)
+ * @notify_base: Base of vq notifications (non-legacy mode)
+ * @notify_pa: Physical base of vq notifications
+ * @isr: Where to read and clear interrupt
+ * @notify_len: So we can sanity-check accesses
+ * @device_len: So we can sanity-check accesses
+ * @notify_map_cap: Capability for when we need to map notifications per-vq
+ * @notify_offset_multiplier: Multiply queue_notify_off by this value
+ * (non-legacy mode).
+ * @modern_bars: Bitmask of BARs
+ * @id: Device and vendor id
+ * @device_id_check: Callback defined before vp_modern_probe() to be used to
+ * verify the PCI device is a vendor's expected device rather
+ * than the standard virtio PCI device
+ * Returns the found device id or ERRNO
+ * @dma_mask: Optional mask instead of the traditional DMA_BIT_MASK(64),
+ * for vendor devices with DMA space address limitations
+ */
+struct virtio_pci_modern_device {
+ struct pci_dev *pci_dev;
+
+ struct virtio_pci_common_cfg __iomem *common;
+ void __iomem *device;
+ void __iomem *notify_base;
+ resource_size_t notify_pa;
+ u8 __iomem *isr;
+
+ size_t notify_len;
+ size_t device_len;
+ size_t common_len;
+
+ int notify_map_cap;
+
+ u32 notify_offset_multiplier;
+ int modern_bars;
+ struct virtio_device_id id;
+
+ int (*device_id_check)(struct pci_dev *pdev);
+ u64 dma_mask;
+};
+
+/*
+ * Type-safe wrappers for io accesses.
+ * Use these to enforce at compile time the following spec requirement:
+ *
+ * The driver MUST access each field using the “natural” access
+ * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
+ * for 16-bit fields and 8-bit accesses for 8-bit fields.
+ */
+static inline u8 vp_ioread8(const u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+static inline u16 vp_ioread16 (const __le16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 vp_ioread32(const __le32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static inline void vp_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo,
+ __le32 __iomem *hi)
+{
+ vp_iowrite32((u32)val, lo);
+ vp_iowrite32(val >> 32, hi);
+}
+
+u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
+u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev);
+void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
+ u64 features);
+u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
+u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
+void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
+ u8 status);
+u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
+ u16 idx, u16 vector);
+u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
+ u16 vector);
+void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
+ u16 index, u64 desc_addr, u64 driver_addr,
+ u64 device_addr);
+void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 idx, bool enable);
+bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 idx, u16 size);
+u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
+void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
+ u16 index, resource_size_t *pa);
+int vp_modern_probe(struct virtio_pci_modern_device *mdev);
+void vp_modern_remove(struct virtio_pci_modern_device *mdev);
+int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
+void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
+u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev);
+u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev);
+#endif
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index b485b13fa50b..9b33df741b63 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -58,6 +58,7 @@ do { \
struct virtio_device;
struct virtqueue;
+struct device;
/*
* Creates a virtqueue and allocates the descriptor ring. If
@@ -76,15 +77,21 @@ struct virtqueue *vring_create_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name);
-/* Creates a virtqueue with a custom layout. */
-struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring vring,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool ctx,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name);
+/*
+ * Creates a virtqueue and allocates the descriptor ring with per
+ * virtqueue DMA device.
+ */
+struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool ctx,
+ bool (*notify)(struct virtqueue *vq),
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ struct device *dma_dev);
/*
* Creates a virtqueue with a standard layout but a caller-allocated
@@ -111,4 +118,6 @@ void vring_del_virtqueue(struct virtqueue *vq);
void vring_transport_features(struct virtio_device *vdev);
irqreturn_t vring_interrupt(int irq, void *_vq);
+
+u32 vring_notification_data(struct virtqueue *_vq);
#endif /* _LINUX_VIRTIO_RING_H */
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index dc636b727179..c82089dee0c8 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -7,6 +7,110 @@
#include <net/sock.h>
#include <net/af_vsock.h>
+#define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))
+
+struct virtio_vsock_skb_cb {
+ bool reply;
+ bool tap_delivered;
+ u32 offset;
+};
+
+#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
+
+static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
+{
+ return (struct virtio_vsock_hdr *)skb->head;
+}
+
+static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
+{
+ return VIRTIO_VSOCK_SKB_CB(skb)->reply;
+}
+
+static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
+{
+ VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
+}
+
+static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
+{
+ return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
+}
+
+static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
+{
+ VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
+}
+
+static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
+{
+ VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
+}
+
+static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
+{
+ u32 len;
+
+ len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
+
+ if (len > 0)
+ skb_put(skb, len);
+}
+
+static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
+{
+ struct sk_buff *skb;
+
+ if (size < VIRTIO_VSOCK_SKB_HEADROOM)
+ return NULL;
+
+ skb = alloc_skb(size, mask);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
+ return skb;
+}
+
+static inline void
+virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
+{
+ spin_lock_bh(&list->lock);
+ __skb_queue_head(list, skb);
+ spin_unlock_bh(&list->lock);
+}
+
+static inline void
+virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
+{
+ spin_lock_bh(&list->lock);
+ __skb_queue_tail(list, skb);
+ spin_unlock_bh(&list->lock);
+}
+
+static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ spin_lock_bh(&list->lock);
+ skb = __skb_dequeue(list);
+ spin_unlock_bh(&list->lock);
+
+ return skb;
+}
+
+static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
+{
+ spin_lock_bh(&list->lock);
+ __skb_queue_purge(list);
+ spin_unlock_bh(&list->lock);
+}
+
+static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
+{
+ return (size_t)(skb_end_pointer(skb) - skb->head);
+}
+
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
@@ -35,20 +139,8 @@ struct virtio_vsock_sock {
u32 last_fwd_cnt;
u32 rx_bytes;
u32 buf_alloc;
- struct list_head rx_queue;
-};
-
-struct virtio_vsock_pkt {
- struct virtio_vsock_hdr hdr;
- struct list_head list;
- /* socket refcnt not held, only use for cancellation */
- struct vsock_sock *vsk;
- void *buf;
- u32 buf_len;
- u32 len;
- u32 off;
- bool reply;
- bool tap_delivered;
+ struct sk_buff_head rx_queue;
+ u32 msg_count;
};
struct virtio_vsock_pkt_info {
@@ -67,7 +159,16 @@ struct virtio_transport {
struct vsock_transport transport;
/* Takes ownership of the packet */
- int (*send_pkt)(struct virtio_vsock_pkt *pkt);
+ int (*send_pkt)(struct sk_buff *skb);
+
+ /* Used in MSG_ZEROCOPY mode. Checks, that provided data
+ * (number of buffers) could be transmitted with zerocopy
+ * mode. If this callback is not implemented for the current
+ * transport - this means that this transport doesn't need
+ * extra checks and can perform zerocopy transmission by
+ * default.
+ */
+ bool (*can_msgzerocopy)(int bufs_num);
};
ssize_t
@@ -80,8 +181,17 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len, int flags);
+int
+virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len);
+ssize_t
+virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ int flags);
s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
+u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk);
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
struct vsock_sock *psk);
@@ -139,11 +249,12 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
void virtio_transport_recv_pkt(struct virtio_transport *t,
- struct virtio_vsock_pkt *pkt);
-void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
-void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
+ struct sk_buff *skb);
+void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
-void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt);
-
+void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
+int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
+int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
+int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val);
#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/linux/visorbus.h b/include/linux/visorbus.h
deleted file mode 100644
index 0d8bd6769b13..000000000000
--- a/include/linux/visorbus.h
+++ /dev/null
@@ -1,344 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/*
- * This header file is to be included by other kernel mode components that
- * implement a particular kind of visor_device. Each of these other kernel
- * mode components is called a visor device driver. Refer to visortemplate
- * for a minimal sample visor device driver.
- *
- * There should be nothing in this file that is private to the visorbus
- * bus implementation itself.
- */
-
-#ifndef __VISORBUS_H__
-#define __VISORBUS_H__
-
-#include <linux/device.h>
-
-#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E')
-
-/*
- * enum channel_serverstate
- * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state.
- * @CHANNELSRV_READY: Channel has been initialized by server.
- */
-enum channel_serverstate {
- CHANNELSRV_UNINITIALIZED = 0,
- CHANNELSRV_READY = 1
-};
-
-/*
- * enum channel_clientstate
- * @CHANNELCLI_DETACHED:
- * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it
- * unless given TBD* explicit request
- * (should actually be < DETACHED).
- * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach.
- * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time.
- * @CHANNELCLI_BUSY: Client either wants to use or is using channel.
- * @CHANNELCLI_OWNED: "No worries" state - client can access channel
- * anytime.
- */
-enum channel_clientstate {
- CHANNELCLI_DETACHED = 0,
- CHANNELCLI_DISABLED = 1,
- CHANNELCLI_ATTACHING = 2,
- CHANNELCLI_ATTACHED = 3,
- CHANNELCLI_BUSY = 4,
- CHANNELCLI_OWNED = 5
-};
-
-/*
- * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that
- * a guest can look at the FeatureFlags in the io channel, and configure the
- * driver to use interrupts or not based on this setting. All feature bits for
- * all channels should be defined here. The io channel feature bits are defined
- * below.
- */
-#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1)
-#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3)
-#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4)
-#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5)
-#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
-
-/*
- * struct channel_header - Common Channel Header
- * @signature: Signature.
- * @legacy_state: DEPRECATED - being replaced by.
- * @header_size: sizeof(struct channel_header).
- * @size: Total size of this channel in bytes.
- * @features: Flags to modify behavior.
- * @chtype: Channel type: data, bus, control, etc..
- * @partition_handle: ID of guest partition.
- * @handle: Device number of this channel in client.
- * @ch_space_offset: Offset in bytes to channel specific area.
- * @version_id: Struct channel_header Version ID.
- * @partition_index: Index of guest partition.
- * @zone_uuid: Guid of Channel's zone.
- * @cli_str_offset: Offset from channel header to null-terminated
- * ClientString (0 if ClientString not present).
- * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this
- * channel.
- * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
- * ServerStateUp, ServerStateDown, etc).
- * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel.
- * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>.
- * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
- * ServerStateUp, ServerStateDown, etc).
- * @srv_state: CHANNEL_SERVERSTATE.
- * @cli_error_boot: Bits to indicate err states for boot clients, so err
- * messages can be throttled.
- * @cli_error_os: Bits to indicate err states for OS clients, so err
- * messages can be throttled.
- * @filler: Pad out to 128 byte cacheline.
- * @recover_channel: Please add all new single-byte values below here.
- */
-struct channel_header {
- u64 signature;
- u32 legacy_state;
- /* SrvState, CliStateBoot, and CliStateOS below */
- u32 header_size;
- u64 size;
- u64 features;
- guid_t chtype;
- u64 partition_handle;
- u64 handle;
- u64 ch_space_offset;
- u32 version_id;
- u32 partition_index;
- guid_t zone_guid;
- u32 cli_str_offset;
- u32 cli_state_boot;
- u32 cmd_state_cli;
- u32 cli_state_os;
- u32 ch_characteristic;
- u32 cmd_state_srv;
- u32 srv_state;
- u8 cli_error_boot;
- u8 cli_error_os;
- u8 filler[1];
- u8 recover_channel;
-} __packed;
-
-#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0)
-
-/*
- * struct signal_queue_header - Subheader for the Signal Type variation of the
- * Common Channel.
- * @version: SIGNAL_QUEUE_HEADER Version ID.
- * @chtype: Queue type: storage, network.
- * @size: Total size of this queue in bytes.
- * @sig_base_offset: Offset to signal queue area.
- * @features: Flags to modify behavior.
- * @num_sent: Total # of signals placed in this queue.
- * @num_overflows: Total # of inserts failed due to full queue.
- * @signal_size: Total size of a signal for this queue.
- * @max_slots: Max # of slots in queue, 1 slot is always empty.
- * @max_signals: Max # of signals in queue (MaxSignalSlots-1).
- * @head: Queue head signal #.
- * @num_received: Total # of signals removed from this queue.
- * @tail: Queue tail signal.
- * @reserved1: Reserved field.
- * @reserved2: Reserved field.
- * @client_queue:
- * @num_irq_received: Total # of Interrupts received. This is incremented by the
- * ISR in the guest windows driver.
- * @num_empty: Number of times that visor_signal_remove is called and
- * returned Empty Status.
- * @errorflags: Error bits set during SignalReinit to denote trouble with
- * client's fields.
- * @filler: Pad out to 64 byte cacheline.
- */
-struct signal_queue_header {
- /* 1st cache line */
- u32 version;
- u32 chtype;
- u64 size;
- u64 sig_base_offset;
- u64 features;
- u64 num_sent;
- u64 num_overflows;
- u32 signal_size;
- u32 max_slots;
- u32 max_signals;
- u32 head;
- /* 2nd cache line */
- u64 num_received;
- u32 tail;
- u32 reserved1;
- u64 reserved2;
- u64 client_queue;
- u64 num_irq_received;
- u64 num_empty;
- u32 errorflags;
- u8 filler[12];
-} __packed;
-
-/* VISORCHANNEL Guids */
-/* {414815ed-c58c-11da-95a9-00e08161165f} */
-#define VISOR_VHBA_CHANNEL_GUID \
- GUID_INIT(0x414815ed, 0xc58c, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-#define VISOR_VHBA_CHANNEL_GUID_STR \
- "414815ed-c58c-11da-95a9-00e08161165f"
-struct visorchipset_state {
- u32 created:1;
- u32 attached:1;
- u32 configured:1;
- u32 running:1;
- /* Remaining bits in this 32-bit word are reserved. */
-};
-
-/**
- * struct visor_device - A device type for things "plugged" into the visorbus
- * bus
- * @visorchannel: Points to the channel that the device is
- * associated with.
- * @channel_type_guid: Identifies the channel type to the bus driver.
- * @device: Device struct meant for use by the bus driver
- * only.
- * @list_all: Used by the bus driver to enumerate devices.
- * @timer: Timer fired periodically to do interrupt-type
- * activity.
- * @being_removed: Indicates that the device is being removed from
- * the bus. Private bus driver use only.
- * @visordriver_callback_lock: Used by the bus driver to lock when adding and
- * removing devices.
- * @pausing: Indicates that a change towards a paused state.
- * is in progress. Only modified by the bus driver.
- * @resuming: Indicates that a change towards a running state
- * is in progress. Only modified by the bus driver.
- * @chipset_bus_no: Private field used by the bus driver.
- * @chipset_dev_no: Private field used the bus driver.
- * @state: Used to indicate the current state of the
- * device.
- * @inst: Unique GUID for this instance of the device.
- * @name: Name of the device.
- * @pending_msg_hdr: For private use by bus driver to respond to
- * hypervisor requests.
- * @vbus_hdr_info: A pointer to header info. Private use by bus
- * driver.
- * @partition_guid: Indicates client partion id. This should be the
- * same across all visor_devices in the current
- * guest. Private use by bus driver only.
- */
-struct visor_device {
- struct visorchannel *visorchannel;
- guid_t channel_type_guid;
- /* These fields are for private use by the bus driver only. */
- struct device device;
- struct list_head list_all;
- struct timer_list timer;
- bool timer_active;
- bool being_removed;
- struct mutex visordriver_callback_lock; /* synchronize probe/remove */
- bool pausing;
- bool resuming;
- u32 chipset_bus_no;
- u32 chipset_dev_no;
- struct visorchipset_state state;
- guid_t inst;
- u8 *name;
- struct controlvm_message_header *pending_msg_hdr;
- void *vbus_hdr_info;
- guid_t partition_guid;
- struct dentry *debugfs_dir;
- struct dentry *debugfs_bus_info;
-};
-
-#define to_visor_device(x) container_of(x, struct visor_device, device)
-
-typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
- int status);
-
-/*
- * This struct describes a specific visor channel, by providing its GUID, name,
- * and sizes.
- */
-struct visor_channeltype_descriptor {
- const guid_t guid;
- const char *name;
- u64 min_bytes;
- u32 version;
-};
-
-/**
- * struct visor_driver - Information provided by each visor driver when it
- * registers with the visorbus driver
- * @name: Name of the visor driver.
- * @owner: The module owner.
- * @channel_types: Types of channels handled by this driver, ending with
- * a zero GUID. Our specialized BUS.match() method knows
- * about this list, and uses it to determine whether this
- * driver will in fact handle a new device that it has
- * detected.
- * @probe: Called when a new device comes online, by our probe()
- * function specified by driver.probe() (triggered
- * ultimately by some call to driver_register(),
- * bus_add_driver(), or driver_attach()).
- * @remove: Called when a new device is removed, by our remove()
- * function specified by driver.remove() (triggered
- * ultimately by some call to device_release_driver()).
- * @channel_interrupt: Called periodically, whenever there is a possiblity
- * that "something interesting" may have happened to the
- * channel.
- * @pause: Called to initiate a change of the device's state. If
- * the return valu`e is < 0, there was an error and the
- * state transition will NOT occur. If the return value
- * is >= 0, then the state transition was INITIATED
- * successfully, and complete_func() will be called (or
- * was just called) with the final status when either the
- * state transition fails or completes successfully.
- * @resume: Behaves similar to pause.
- * @driver: Private reference to the device driver. For use by bus
- * driver only.
- */
-struct visor_driver {
- const char *name;
- struct module *owner;
- struct visor_channeltype_descriptor *channel_types;
- int (*probe)(struct visor_device *dev);
- void (*remove)(struct visor_device *dev);
- void (*channel_interrupt)(struct visor_device *dev);
- int (*pause)(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
- int (*resume)(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
-
- /* These fields are for private use by the bus driver only. */
- struct device_driver driver;
-};
-
-#define to_visor_driver(x) (container_of(x, struct visor_driver, driver))
-
-int visor_check_channel(struct channel_header *ch, struct device *dev,
- const guid_t *expected_uuid, char *chname,
- u64 expected_min_bytes, u32 expected_version,
- u64 expected_signature);
-
-int visorbus_register_visor_driver(struct visor_driver *drv);
-void visorbus_unregister_visor_driver(struct visor_driver *drv);
-int visorbus_read_channel(struct visor_device *dev,
- unsigned long offset, void *dest,
- unsigned long nbytes);
-int visorbus_write_channel(struct visor_device *dev,
- unsigned long offset, void *src,
- unsigned long nbytes);
-int visorbus_enable_channel_interrupts(struct visor_device *dev);
-void visorbus_disable_channel_interrupts(struct visor_device *dev);
-
-int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
- void *msg);
-int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
- void *msg);
-bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
-const guid_t *visorchannel_get_guid(struct visorchannel *channel);
-
-#define BUS_ROOT_DEVICE UINT_MAX
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
- struct visor_device *from);
-#endif
diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h
deleted file mode 100644
index e9c0cd36c48a..000000000000
--- a/include/linux/vlynq.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#ifndef __VLYNQ_H__
-#define __VLYNQ_H__
-
-#include <linux/device.h>
-#include <linux/types.h>
-
-struct module;
-
-#define VLYNQ_NUM_IRQS 32
-
-struct vlynq_mapping {
- u32 size;
- u32 offset;
-};
-
-enum vlynq_divisor {
- vlynq_div_auto = 0,
- vlynq_ldiv1,
- vlynq_ldiv2,
- vlynq_ldiv3,
- vlynq_ldiv4,
- vlynq_ldiv5,
- vlynq_ldiv6,
- vlynq_ldiv7,
- vlynq_ldiv8,
- vlynq_rdiv1,
- vlynq_rdiv2,
- vlynq_rdiv3,
- vlynq_rdiv4,
- vlynq_rdiv5,
- vlynq_rdiv6,
- vlynq_rdiv7,
- vlynq_rdiv8,
- vlynq_div_external
-};
-
-struct vlynq_device_id {
- u32 id;
- enum vlynq_divisor divisor;
- unsigned long driver_data;
-};
-
-struct vlynq_regs;
-struct vlynq_device {
- u32 id, dev_id;
- int local_irq;
- int remote_irq;
- enum vlynq_divisor divisor;
- u32 regs_start, regs_end;
- u32 mem_start, mem_end;
- u32 irq_start, irq_end;
- int irq;
- int enabled;
- struct vlynq_regs *local;
- struct vlynq_regs *remote;
- struct device dev;
-};
-
-struct vlynq_driver {
- char *name;
- struct vlynq_device_id *id_table;
- int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id);
- void (*remove)(struct vlynq_device *dev);
- struct device_driver driver;
-};
-
-struct plat_vlynq_ops {
- int (*on)(struct vlynq_device *dev);
- void (*off)(struct vlynq_device *dev);
-};
-
-static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv)
-{
- return container_of(drv, struct vlynq_driver, driver);
-}
-
-static inline struct vlynq_device *to_vlynq_device(struct device *device)
-{
- return container_of(device, struct vlynq_device, dev);
-}
-
-extern struct bus_type vlynq_bus_type;
-
-extern int __vlynq_register_driver(struct vlynq_driver *driver,
- struct module *owner);
-
-static inline int vlynq_register_driver(struct vlynq_driver *driver)
-{
- return __vlynq_register_driver(driver, THIS_MODULE);
-}
-
-static inline void *vlynq_get_drvdata(struct vlynq_device *dev)
-{
- return dev_get_drvdata(&dev->dev);
-}
-
-static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data)
-{
- dev_set_drvdata(&dev->dev, data);
-}
-
-static inline u32 vlynq_mem_start(struct vlynq_device *dev)
-{
- return dev->mem_start;
-}
-
-static inline u32 vlynq_mem_end(struct vlynq_device *dev)
-{
- return dev->mem_end;
-}
-
-static inline u32 vlynq_mem_len(struct vlynq_device *dev)
-{
- return dev->mem_end - dev->mem_start + 1;
-}
-
-static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq)
-{
- int irq = dev->irq_start + virq;
- if ((irq < dev->irq_start) || (irq > dev->irq_end))
- return -EINVAL;
-
- return irq;
-}
-
-static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq)
-{
- if ((irq < dev->irq_start) || (irq > dev->irq_end))
- return -EINVAL;
-
- return irq - dev->irq_start;
-}
-
-extern void vlynq_unregister_driver(struct vlynq_driver *driver);
-extern int vlynq_enable_device(struct vlynq_device *dev);
-extern void vlynq_disable_device(struct vlynq_device *dev);
-extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
- struct vlynq_mapping *mapping);
-extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
- struct vlynq_mapping *mapping);
-extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq);
-extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq);
-
-#endif /* __VLYNQ_H__ */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 18e75974d4e3..747943bc8cc2 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -20,12 +20,19 @@
#define HIGHMEM_ZONE(xx)
#endif
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
+#ifdef CONFIG_ZONE_DEVICE
+#define DEVICE_ZONE(xx) xx##_DEVICE,
+#else
+#define DEVICE_ZONE(xx)
+#endif
+
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, \
+ HIGHMEM_ZONE(xx) xx##_MOVABLE, DEVICE_ZONE(xx)
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
- FOR_ALL_ZONES(PGALLOC),
- FOR_ALL_ZONES(ALLOCSTALL),
- FOR_ALL_ZONES(PGSCAN_SKIP),
+ FOR_ALL_ZONES(PGALLOC)
+ FOR_ALL_ZONES(ALLOCSTALL)
+ FOR_ALL_ZONES(PGSCAN_SKIP)
PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
@@ -33,8 +40,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGREUSE,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
+ PGSTEAL_KHUGEPAGED,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
+ PGSCAN_KHUGEPAGED,
PGSCAN_DIRECT_THROTTLE,
PGSCAN_ANON,
PGSCAN_FILE,
@@ -71,6 +80,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
#endif
+#ifdef CONFIG_CMA
+ CMA_ALLOC_SUCCESS,
+ CMA_ALLOC_FAIL,
+#endif
UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
@@ -92,6 +105,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
THP_SPLIT_PMD,
+ THP_SCAN_EXCEED_NONE_PTE,
+ THP_SCAN_EXCEED_SWAP_PTE,
+ THP_SCAN_EXCEED_SHARED_PTE,
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
THP_SPLIT_PUD,
#endif
@@ -113,13 +129,30 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE,
#endif /* CONFIG_DEBUG_TLBFLUSH */
-#ifdef CONFIG_DEBUG_VM_VMACACHE
- VMACACHE_FIND_CALLS,
- VMACACHE_FIND_HITS,
-#endif
#ifdef CONFIG_SWAP
SWAP_RA,
SWAP_RA_HIT,
+#ifdef CONFIG_KSM
+ KSM_SWPIN_COPY,
+#endif
+#endif
+#ifdef CONFIG_KSM
+ COW_KSM,
+#endif
+#ifdef CONFIG_ZSWAP
+ ZSWPIN,
+ ZSWPOUT,
+ ZSWPWB,
+#endif
+#ifdef CONFIG_X86
+ DIRECT_MAP_LEVEL2_SPLIT,
+ DIRECT_MAP_LEVEL3_SPLIT,
+#endif
+#ifdef CONFIG_PER_VMA_LOCK_STATS
+ VMA_LOCK_SUCCESS,
+ VMA_LOCK_ABORT,
+ VMA_LOCK_RETRY,
+ VMA_LOCK_MISS,
#endif
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
deleted file mode 100644
index 6fce268a4588..000000000000
--- a/include/linux/vmacache.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_VMACACHE_H
-#define __LINUX_VMACACHE_H
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-
-static inline void vmacache_flush(struct task_struct *tsk)
-{
- memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
-}
-
-extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
-extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
- unsigned long addr);
-
-#ifndef CONFIG_MMU
-extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
-#endif
-
-static inline void vmacache_invalidate(struct mm_struct *mm)
-{
- mm->vmacache_seqnum++;
-}
-
-#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 0221f852a7e1..98ea90e90439 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -14,6 +14,7 @@
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
struct notifier_block; /* in notifier.h */
+struct iov_iter; /* in uio.h */
/* bits in flags of vmalloc's vm_struct below */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
@@ -22,31 +23,25 @@ struct notifier_block; /* in notifier.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
-#define VM_NO_GUARD 0x00000040 /* don't add guard page */
+#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
+#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
+#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
-/*
- * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
- *
- * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
- * shadow memory has been mapped. It's used to handle allocation errors so that
- * we don't try to poision shadow on free if it was never allocated.
- *
- * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
- * determine which allocations need the module shadow freed.
- */
-
-/*
- * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
- * vfree_atomic().
- */
-#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */
+#else
+#define VM_DEFER_KMEMLEAK 0
+#endif
+#define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
* Maximum alignment for ioremap() regions.
- * Can be overriden by arch-specific value.
+ * Can be overridden by arch-specific value.
*/
#ifndef IOREMAP_MAX_ORDER
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
@@ -58,6 +53,9 @@ struct vm_struct {
unsigned long size;
unsigned long flags;
struct page **pages;
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
+ unsigned int page_order;
+#endif
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
@@ -71,19 +69,62 @@ struct vmap_area {
struct list_head list; /* address sorted list */
/*
- * The following three variables can be packed, because
- * a vmap_area object is always one of the three states:
- * 1) in "free" tree (root is vmap_area_root)
- * 2) in "busy" tree (root is free_vmap_area_root)
- * 3) in purge list (head is vmap_purge_list)
+ * The following two variables can be packed, because
+ * a vmap_area object can be either:
+ * 1) in "free" tree (root is free_vmap_area_root)
+ * 2) or "busy" tree (root is vmap_area_root)
*/
union {
unsigned long subtree_max_size; /* in "free" tree */
struct vm_struct *vm; /* in "busy" tree */
- struct llist_node purge_list; /* in purge list */
};
+ unsigned long flags; /* mark type of vm_map_ram area */
};
+/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
+#ifndef arch_vmap_p4d_supported
+static inline bool arch_vmap_p4d_supported(pgprot_t prot)
+{
+ return false;
+}
+#endif
+
+#ifndef arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ return false;
+}
+#endif
+
+#ifndef arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return false;
+}
+#endif
+
+#ifndef arch_vmap_pte_range_map_size
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ return PAGE_SIZE;
+}
+#endif
+
+#ifndef arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ return PAGE_SHIFT;
+}
+#endif
+
+#ifndef arch_vmap_pgprot_tagged
+static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
+{
+ return prot;
+}
+#endif
+
/*
* Highlevel APIs for driver use
*/
@@ -92,35 +133,38 @@ extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU
-extern void __init vmalloc_init(void);
extern unsigned long vmalloc_nr_pages(void);
#else
-static inline void vmalloc_init(void)
-{
-}
static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif
-extern void *vmalloc(unsigned long size);
-extern void *vzalloc(unsigned long size);
-extern void *vmalloc_user(unsigned long size);
-extern void *vmalloc_node(unsigned long size, int node);
-extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_32(unsigned long size);
-extern void *vmalloc_32_user(unsigned long size);
-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
+extern void *vmalloc(unsigned long size) __alloc_size(1);
+extern void *vzalloc(unsigned long size) __alloc_size(1);
+extern void *vmalloc_user(unsigned long size) __alloc_size(1);
+extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
+extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
+extern void *vmalloc_32(unsigned long size) __alloc_size(1);
+extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller);
+ const void *caller) __alloc_size(1);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
- int node, const void *caller);
+ int node, const void *caller) __alloc_size(1);
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+
+extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
+extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
+extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
+extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
+void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
@@ -167,16 +211,33 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
unsigned long flags,
unsigned long start, unsigned long end,
const void *caller);
+void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
+struct vmap_area *find_vmap_area(unsigned long addr);
+
+static inline bool is_vm_area_hugepages(const void *addr)
+{
+ /*
+ * This may not 100% tell if the area is mapped with > PAGE_SIZE
+ * page table entries, if for some reason the architecture indicates
+ * larger sizes are available but decides not to use them, nothing
+ * prevents that. This only indicates the size of the physical page
+ * allocated in the vmalloc layer.
+ */
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
+ return find_vm_area(addr)->page_order > 0;
+#else
+ return false;
+#endif
+}
#ifdef CONFIG_MMU
-extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
- pgprot_t prot, struct page **pages);
-int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
- struct page **pages);
-extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
-extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+int vm_area_map_pages(struct vm_struct *area, unsigned long start,
+ unsigned long end, struct page **pages);
+void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
+ unsigned long end);
+void vunmap_range(unsigned long addr, unsigned long end);
static inline void set_vm_flush_reset_perms(void *addr)
{
struct vm_struct *vm = find_vm_area(addr);
@@ -184,36 +245,19 @@ static inline void set_vm_flush_reset_perms(void *addr)
if (vm)
vm->flags |= VM_FLUSH_RESET_PERMS;
}
+
#else
-static inline int
-map_kernel_range_noflush(unsigned long start, unsigned long size,
- pgprot_t prot, struct page **pages)
-{
- return size >> PAGE_SHIFT;
-}
-#define map_kernel_range map_kernel_range_noflush
-static inline void
-unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
-{
-}
-#define unmap_kernel_range unmap_kernel_range_noflush
static inline void set_vm_flush_reset_perms(void *addr)
{
}
#endif
-/* Allocate/destroy a 'vmalloc' VM area. */
-extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
-extern void free_vm_area(struct vm_struct *area);
-
-/* for /dev/kmem */
-extern long vread(char *buf, char *addr, unsigned long count);
-extern long vwrite(char *buf, char *addr, unsigned long count);
+/* for /proc/kcore */
+extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
/*
- * Internals. Dont't use..
+ * Internals. Don't use..
*/
-extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
@@ -249,4 +293,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
int register_vmap_purge_notifier(struct notifier_block *nb);
int unregister_vmap_purge_notifier(struct notifier_block *nb);
+#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
+bool vmalloc_dump_obj(void *object);
+#else
+static inline bool vmalloc_dump_obj(void *object) { return false; }
+#endif
+
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmcore_info.h b/include/linux/vmcore_info.h
new file mode 100644
index 000000000000..e1dec1a6a749
--- /dev/null
+++ b/include/linux/vmcore_info.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_VMCORE_INFO_H
+#define LINUX_VMCORE_INFO_H
+
+#include <linux/linkage.h>
+#include <linux/elfcore.h>
+#include <linux/elf.h>
+
+#define CRASH_CORE_NOTE_NAME "CORE"
+#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
+#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
+#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+
+/*
+ * The per-cpu notes area is a list of notes terminated by a "NULL"
+ * note header. For kdump, the code in vmcore.c runs in the context
+ * of the second kernel to combine them into one note.
+ */
+#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
+ CRASH_CORE_NOTE_NAME_BYTES + \
+ CRASH_CORE_NOTE_DESC_BYTES)
+
+#define VMCOREINFO_BYTES PAGE_SIZE
+#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
+#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
+#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
+ VMCOREINFO_NOTE_NAME_BYTES + \
+ VMCOREINFO_BYTES)
+
+typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
+/* Per cpu memory for storing cpu states in case of system crash. */
+extern note_buf_t __percpu *crash_notes;
+
+void crash_update_vmcoreinfo_safecopy(void *ptr);
+void crash_save_vmcoreinfo(void);
+void arch_crash_save_vmcoreinfo(void);
+__printf(1, 2)
+void vmcoreinfo_append_str(const char *fmt, ...);
+phys_addr_t paddr_vmcoreinfo_note(void);
+
+#define VMCOREINFO_OSRELEASE(value) \
+ vmcoreinfo_append_str("OSRELEASE=%s\n", value)
+#define VMCOREINFO_BUILD_ID() \
+ ({ \
+ static_assert(sizeof(vmlinux_build_id) == 20); \
+ vmcoreinfo_append_str("BUILD-ID=%20phN\n", vmlinux_build_id); \
+ })
+
+#define VMCOREINFO_PAGESIZE(value) \
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
+#define VMCOREINFO_SYMBOL(name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SYMBOL_ARRAY(name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
+#define VMCOREINFO_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(name))
+#define VMCOREINFO_STRUCT_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(struct name))
+#define VMCOREINFO_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(struct name, field))
+#define VMCOREINFO_TYPE_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(name, field))
+#define VMCOREINFO_LENGTH(name, value) \
+ vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
+#define VMCOREINFO_NUMBER(name) \
+ vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
+#define VMCOREINFO_CONFIG(name) \
+ vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+
+extern unsigned char *vmcoreinfo_data;
+extern size_t vmcoreinfo_size;
+extern u32 *vmcoreinfo_note;
+
+Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
+ void *data, size_t data_len);
+void final_note(Elf_Word *buf);
+#endif /* LINUX_VMCORE_INFO_H */
diff --git a/include/linux/vme.h b/include/linux/vme.h
deleted file mode 100644
index 7e82bf500f01..000000000000
--- a/include/linux/vme.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VME_H_
-#define _VME_H_
-
-/* Resource Type */
-enum vme_resource_type {
- VME_MASTER,
- VME_SLAVE,
- VME_DMA,
- VME_LM
-};
-
-/* VME Address Spaces */
-#define VME_A16 0x1
-#define VME_A24 0x2
-#define VME_A32 0x4
-#define VME_A64 0x8
-#define VME_CRCSR 0x10
-#define VME_USER1 0x20
-#define VME_USER2 0x40
-#define VME_USER3 0x80
-#define VME_USER4 0x100
-
-#define VME_A16_MAX 0x10000ULL
-#define VME_A24_MAX 0x1000000ULL
-#define VME_A32_MAX 0x100000000ULL
-#define VME_A64_MAX 0x10000000000000000ULL
-#define VME_CRCSR_MAX 0x1000000ULL
-
-
-/* VME Cycle Types */
-#define VME_SCT 0x1
-#define VME_BLT 0x2
-#define VME_MBLT 0x4
-#define VME_2eVME 0x8
-#define VME_2eSST 0x10
-#define VME_2eSSTB 0x20
-
-#define VME_2eSST160 0x100
-#define VME_2eSST267 0x200
-#define VME_2eSST320 0x400
-
-#define VME_SUPER 0x1000
-#define VME_USER 0x2000
-#define VME_PROG 0x4000
-#define VME_DATA 0x8000
-
-/* VME Data Widths */
-#define VME_D8 0x1
-#define VME_D16 0x2
-#define VME_D32 0x4
-#define VME_D64 0x8
-
-/* Arbitration Scheduling Modes */
-#define VME_R_ROBIN_MODE 0x1
-#define VME_PRIORITY_MODE 0x2
-
-#define VME_DMA_PATTERN (1<<0)
-#define VME_DMA_PCI (1<<1)
-#define VME_DMA_VME (1<<2)
-
-#define VME_DMA_PATTERN_BYTE (1<<0)
-#define VME_DMA_PATTERN_WORD (1<<1)
-#define VME_DMA_PATTERN_INCREMENT (1<<2)
-
-#define VME_DMA_VME_TO_MEM (1<<0)
-#define VME_DMA_MEM_TO_VME (1<<1)
-#define VME_DMA_VME_TO_VME (1<<2)
-#define VME_DMA_MEM_TO_MEM (1<<3)
-#define VME_DMA_PATTERN_TO_VME (1<<4)
-#define VME_DMA_PATTERN_TO_MEM (1<<5)
-
-struct vme_dma_attr {
- u32 type;
- void *private;
-};
-
-struct vme_resource {
- enum vme_resource_type type;
- struct list_head *entry;
-};
-
-extern struct bus_type vme_bus_type;
-
-/* Number of VME interrupt vectors */
-#define VME_NUM_STATUSID 256
-
-/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */
-#define VME_MAX_BRIDGES (sizeof(unsigned int)*8)
-#define VME_MAX_SLOTS 32
-
-#define VME_SLOT_CURRENT -1
-#define VME_SLOT_ALL -2
-
-/**
- * struct vme_dev - Structure representing a VME device
- * @num: The device number
- * @bridge: Pointer to the bridge device this device is on
- * @dev: Internal device structure
- * @drv_list: List of devices (per driver)
- * @bridge_list: List of devices (per bridge)
- */
-struct vme_dev {
- int num;
- struct vme_bridge *bridge;
- struct device dev;
- struct list_head drv_list;
- struct list_head bridge_list;
-};
-
-/**
- * struct vme_driver - Structure representing a VME driver
- * @name: Driver name, should be unique among VME drivers and usually the same
- * as the module name.
- * @match: Callback used to determine whether probe should be run.
- * @probe: Callback for device binding, called when new device is detected.
- * @remove: Callback, called on device removal.
- * @driver: Underlying generic device driver structure.
- * @devices: List of VME devices (struct vme_dev) associated with this driver.
- */
-struct vme_driver {
- const char *name;
- int (*match)(struct vme_dev *);
- int (*probe)(struct vme_dev *);
- int (*remove)(struct vme_dev *);
- struct device_driver driver;
- struct list_head devices;
-};
-
-void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *);
-void vme_free_consistent(struct vme_resource *, size_t, void *,
- dma_addr_t);
-
-size_t vme_get_size(struct vme_resource *);
-int vme_check_window(u32 aspace, unsigned long long vme_base,
- unsigned long long size);
-
-struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
-int vme_slave_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, u32, u32);
-int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, dma_addr_t *, u32 *, u32 *);
-void vme_slave_free(struct vme_resource *);
-
-struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
-int vme_master_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, u32, u32, u32);
-int vme_master_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, u32 *, u32 *, u32 *);
-ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
-ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
-unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
- unsigned int, loff_t);
-int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma);
-void vme_master_free(struct vme_resource *);
-
-struct vme_resource *vme_dma_request(struct vme_dev *, u32);
-struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
-struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32);
-struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
-struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
-void vme_dma_free_attribute(struct vme_dma_attr *);
-int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
- struct vme_dma_attr *, size_t);
-int vme_dma_list_exec(struct vme_dma_list *);
-int vme_dma_list_free(struct vme_dma_list *);
-int vme_dma_free(struct vme_resource *);
-
-int vme_irq_request(struct vme_dev *, int, int,
- void (*callback)(int, int, void *), void *);
-void vme_irq_free(struct vme_dev *, int, int);
-int vme_irq_generate(struct vme_dev *, int, int);
-
-struct vme_resource *vme_lm_request(struct vme_dev *);
-int vme_lm_count(struct vme_resource *);
-int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
-int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
-int vme_lm_attach(struct vme_resource *, int, void (*callback)(void *), void *);
-int vme_lm_detach(struct vme_resource *, int);
-void vme_lm_free(struct vme_resource *);
-
-int vme_slot_num(struct vme_dev *);
-int vme_bus_num(struct vme_dev *);
-
-int vme_register_driver(struct vme_driver *, unsigned int);
-void vme_unregister_driver(struct vme_driver *);
-
-
-#endif /* _VME_H_ */
-
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 6d28bc433c1c..6a2f51ebbfd3 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -37,7 +37,7 @@ extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
extern void vmpressure_init(struct vmpressure *vmpr);
extern void vmpressure_cleanup(struct vmpressure *vmpr);
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
-extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
+extern struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr);
extern int vmpressure_register_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd,
const char *args);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 91220ace31da..343906a98d6e 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -28,7 +28,7 @@ struct reclaim_stat {
unsigned nr_writeback;
unsigned nr_immediate;
unsigned nr_pageout;
- unsigned nr_activate[2];
+ unsigned nr_activate[ANON_AND_FILE];
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
unsigned nr_lazyfree_fail;
@@ -125,10 +125,10 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
#endif
-#ifdef CONFIG_DEBUG_VM_VMACACHE
-#define count_vm_vmacache_event(x) count_vm_event(x)
+#ifdef CONFIG_PER_VMA_LOCK_STATS
+#define count_vm_vma_lock_event(x) count_vm_event(x)
#else
-#define count_vm_vmacache_event(x) do {} while (0)
+#define count_vm_vma_lock_event(x) do {} while (0)
#endif
#define __count_zid_vm_events(item, zid, delta) \
@@ -138,34 +138,27 @@ static inline void vm_events_fold_cpu(int cpu)
* Zone and node-based page accounting with per cpu differentials.
*/
extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
-extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
+extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
#ifdef CONFIG_NUMA
-static inline void zone_numa_state_add(long x, struct zone *zone,
- enum numa_stat_item item)
+static inline void zone_numa_event_add(long x, struct zone *zone,
+ enum numa_stat_item item)
{
- atomic_long_add(x, &zone->vm_numa_stat[item]);
- atomic_long_add(x, &vm_numa_stat[item]);
+ atomic_long_add(x, &zone->vm_numa_event[item]);
+ atomic_long_add(x, &vm_numa_event[item]);
}
-static inline unsigned long global_numa_state(enum numa_stat_item item)
+static inline unsigned long zone_numa_event_state(struct zone *zone,
+ enum numa_stat_item item)
{
- long x = atomic_long_read(&vm_numa_stat[item]);
-
- return x;
+ return atomic_long_read(&zone->vm_numa_event[item]);
}
-static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
- enum numa_stat_item item)
+static inline unsigned long
+global_numa_event_state(enum numa_stat_item item)
{
- long x = atomic_long_read(&zone->vm_numa_stat[item]);
- int cpu;
-
- for_each_online_cpu(cpu)
- x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
-
- return x;
+ return atomic_long_read(&vm_numa_event[item]);
}
#endif /* CONFIG_NUMA */
@@ -236,7 +229,7 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
#ifdef CONFIG_SMP
int cpu;
for_each_online_cpu(cpu)
- x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+ x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
if (x < 0)
x = 0;
@@ -245,18 +238,38 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
}
#ifdef CONFIG_NUMA
-extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
+/* See __count_vm_event comment on why raw_cpu_inc is used. */
+static inline void
+__count_numa_event(struct zone *zone, enum numa_stat_item item)
+{
+ struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
+
+ raw_cpu_inc(pzstats->vm_numa_event[item]);
+}
+
+static inline void
+__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
+{
+ struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
+
+ raw_cpu_add(pzstats->vm_numa_event[item], delta);
+}
+
extern unsigned long sum_zone_node_page_state(int node,
enum zone_stat_item item);
-extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
+extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
extern unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item);
extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
enum node_stat_item item);
+extern void fold_vm_numa_events(void);
#else
#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
#define node_page_state(node, item) global_node_page_state(item)
#define node_page_state_pages(node, item) global_node_page_state_pages(item)
+static inline void fold_vm_numa_events(void)
+{
+}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_SMP
@@ -291,7 +304,7 @@ struct ctl_table;
int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
loff_t *ppos);
-void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
+void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
@@ -312,6 +325,17 @@ static inline void __mod_zone_page_state(struct zone *zone,
static inline void __mod_node_page_state(struct pglist_data *pgdat,
enum node_stat_item item, int delta)
{
+ if (vmstat_item_in_bytes(item)) {
+ /*
+ * Only cgroups use subpage accounting right now; at
+ * the global level, these items still change in
+ * multiples of whole pages. Store them as pages
+ * internally to keep the per-cpu counters compact.
+ */
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
+ delta >>= PAGE_SHIFT;
+ }
+
node_page_state_add(delta, pgdat, item);
}
@@ -388,9 +412,81 @@ static inline void cpu_vm_stats_fold(int cpu) { }
static inline void quiet_vmstat(void) { }
static inline void drain_zonestat(struct zone *zone,
- struct per_cpu_pageset *pset) { }
+ struct per_cpu_zonestat *pzstats) { }
#endif /* CONFIG_SMP */
+static inline void __zone_stat_mod_folio(struct folio *folio,
+ enum zone_stat_item item, long nr)
+{
+ __mod_zone_page_state(folio_zone(folio), item, nr);
+}
+
+static inline void __zone_stat_add_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
+}
+
+static inline void __zone_stat_sub_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
+}
+
+static inline void zone_stat_mod_folio(struct folio *folio,
+ enum zone_stat_item item, long nr)
+{
+ mod_zone_page_state(folio_zone(folio), item, nr);
+}
+
+static inline void zone_stat_add_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
+}
+
+static inline void zone_stat_sub_folio(struct folio *folio,
+ enum zone_stat_item item)
+{
+ mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
+}
+
+static inline void __node_stat_mod_folio(struct folio *folio,
+ enum node_stat_item item, long nr)
+{
+ __mod_node_page_state(folio_pgdat(folio), item, nr);
+}
+
+static inline void __node_stat_add_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
+}
+
+static inline void __node_stat_sub_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
+}
+
+static inline void node_stat_mod_folio(struct folio *folio,
+ enum node_stat_item item, long nr)
+{
+ mod_node_page_state(folio_pgdat(folio), item, nr);
+}
+
+static inline void node_stat_add_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
+}
+
+static inline void node_stat_sub_folio(struct folio *folio,
+ enum node_stat_item item)
+{
+ mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
+}
+
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
int migratetype)
{
@@ -417,7 +513,7 @@ static inline const char *numa_stat_name(enum numa_stat_item item)
static inline const char *node_stat_name(enum node_stat_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NUMA_EVENT_ITEMS +
item];
}
@@ -429,7 +525,7 @@ static inline const char *lru_list_name(enum lru_list lru)
static inline const char *writeback_stat_name(enum writeback_stat_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NUMA_EVENT_ITEMS +
NR_VM_NODE_STAT_ITEMS +
item];
}
@@ -438,11 +534,102 @@ static inline const char *writeback_stat_name(enum writeback_stat_item item)
static inline const char *vm_event_name(enum vm_event_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NUMA_EVENT_ITEMS +
NR_VM_NODE_STAT_ITEMS +
NR_VM_WRITEBACK_STAT_ITEMS +
item];
}
#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
+#ifdef CONFIG_MEMCG
+
+void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ int val);
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __mod_lruvec_state(lruvec, idx, val);
+ local_irq_restore(flags);
+}
+
+void __lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val);
+
+static inline void lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __lruvec_stat_mod_folio(folio, idx, val);
+ local_irq_restore(flags);
+}
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ lruvec_stat_mod_folio(page_folio(page), idx, val);
+}
+
+#else
+
+static inline void __mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void __lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
+{
+ __mod_node_page_state(folio_pgdat(folio), idx, val);
+}
+
+static inline void lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(folio_pgdat(folio), idx, val);
+}
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(page_pgdat(page), idx, val);
+}
+
+#endif /* CONFIG_MEMCG */
+
+static inline void __lruvec_stat_add_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
+}
+
+static inline void __lruvec_stat_sub_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
+}
+
+static inline void lruvec_stat_add_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
+}
+
+static inline void lruvec_stat_sub_folio(struct folio *folio,
+ enum node_stat_item idx)
+{
+ lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
+}
#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index be0afe6f379b..6fb663b36f72 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -12,15 +12,20 @@
#include <linux/bits.h>
/* Register offsets. */
-#define VMCI_STATUS_ADDR 0x00
-#define VMCI_CONTROL_ADDR 0x04
-#define VMCI_ICR_ADDR 0x08
-#define VMCI_IMR_ADDR 0x0c
-#define VMCI_DATA_OUT_ADDR 0x10
-#define VMCI_DATA_IN_ADDR 0x14
-#define VMCI_CAPS_ADDR 0x18
-#define VMCI_RESULT_LOW_ADDR 0x1c
-#define VMCI_RESULT_HIGH_ADDR 0x20
+#define VMCI_STATUS_ADDR 0x00
+#define VMCI_CONTROL_ADDR 0x04
+#define VMCI_ICR_ADDR 0x08
+#define VMCI_IMR_ADDR 0x0c
+#define VMCI_DATA_OUT_ADDR 0x10
+#define VMCI_DATA_IN_ADDR 0x14
+#define VMCI_CAPS_ADDR 0x18
+#define VMCI_RESULT_LOW_ADDR 0x1c
+#define VMCI_RESULT_HIGH_ADDR 0x20
+#define VMCI_DATA_OUT_LOW_ADDR 0x24
+#define VMCI_DATA_OUT_HIGH_ADDR 0x28
+#define VMCI_DATA_IN_LOW_ADDR 0x2c
+#define VMCI_DATA_IN_HIGH_ADDR 0x30
+#define VMCI_GUEST_PAGE_SHIFT 0x34
/* Max number of devices. */
#define VMCI_MAX_DEVICES 1
@@ -39,17 +44,27 @@
#define VMCI_CAPS_DATAGRAM BIT(2)
#define VMCI_CAPS_NOTIFICATIONS BIT(3)
#define VMCI_CAPS_PPN64 BIT(4)
+#define VMCI_CAPS_DMA_DATAGRAM BIT(5)
/* Interrupt Cause register bits. */
#define VMCI_ICR_DATAGRAM BIT(0)
#define VMCI_ICR_NOTIFICATION BIT(1)
+#define VMCI_ICR_DMA_DATAGRAM BIT(2)
/* Interrupt Mask register bits. */
#define VMCI_IMR_DATAGRAM BIT(0)
#define VMCI_IMR_NOTIFICATION BIT(1)
+#define VMCI_IMR_DMA_DATAGRAM BIT(2)
-/* Maximum MSI/MSI-X interrupt vectors in the device. */
-#define VMCI_MAX_INTRS 2
+/*
+ * Maximum MSI/MSI-X interrupt vectors in the device.
+ * If VMCI_CAPS_DMA_DATAGRAM is supported by the device,
+ * VMCI_MAX_INTRS_DMA_DATAGRAM vectors are available,
+ * otherwise only VMCI_MAX_INTRS_NOTIFICATION.
+ */
+#define VMCI_MAX_INTRS_NOTIFICATION 2
+#define VMCI_MAX_INTRS_DMA_DATAGRAM 3
+#define VMCI_MAX_INTRS VMCI_MAX_INTRS_DMA_DATAGRAM
/*
* Supported interrupt vectors. There is one for each ICR value above,
@@ -58,6 +73,7 @@
enum {
VMCI_INTR_DATAGRAM = 0,
VMCI_INTR_NOTIFICATION = 1,
+ VMCI_INTR_DMA_DATAGRAM = 2,
};
/*
@@ -66,7 +82,7 @@ enum {
* consists of at least two pages, the memory limit also dictates the
* number of queue pairs a guest can create.
*/
-#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
+#define VMCI_MAX_GUEST_QP_MEMORY ((size_t)(128 * 1024 * 1024))
#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
/*
@@ -80,7 +96,53 @@ enum {
* too much kernel memory (especially on vmkernel). We limit a queuepair to
* 32 KB, or 16 KB per queue for symmetrical pairs.
*/
-#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
+#define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024))
+
+/*
+ * The version of the VMCI device that supports MMIO access to registers
+ * requests 256KB for BAR1 whereas the version of VMCI that supports
+ * MSI/MSI-X only requests 8KB. The layout of the larger 256KB region is:
+ * - the first 128KB are used for MSI/MSI-X.
+ * - the following 64KB are used for MMIO register access.
+ * - the remaining 64KB are unused.
+ */
+#define VMCI_WITH_MMIO_ACCESS_BAR_SIZE ((size_t)(256 * 1024))
+#define VMCI_MMIO_ACCESS_OFFSET ((size_t)(128 * 1024))
+#define VMCI_MMIO_ACCESS_SIZE ((size_t)(64 * 1024))
+
+/*
+ * For VMCI devices supporting the VMCI_CAPS_DMA_DATAGRAM capability, the
+ * sending and receiving of datagrams can be performed using DMA to/from
+ * a driver allocated buffer.
+ * Sending and receiving will be handled as follows:
+ * - when sending datagrams, the driver initializes the buffer where the
+ * data part will refer to the outgoing VMCI datagram, sets the busy flag
+ * to 1 and writes the address of the buffer to VMCI_DATA_OUT_HIGH_ADDR
+ * and VMCI_DATA_OUT_LOW_ADDR. Writing to VMCI_DATA_OUT_LOW_ADDR triggers
+ * the device processing of the buffer. When the device has processed the
+ * buffer, it will write the result value to the buffer and then clear the
+ * busy flag.
+ * - when receiving datagrams, the driver initializes the buffer where the
+ * data part will describe the receive buffer, clears the busy flag and
+ * writes the address of the buffer to VMCI_DATA_IN_HIGH_ADDR and
+ * VMCI_DATA_IN_LOW_ADDR. Writing to VMCI_DATA_IN_LOW_ADDR triggers the
+ * device processing of the buffer. The device will copy as many available
+ * datagrams into the buffer as possible, and then sets the busy flag.
+ * When the busy flag is set, the driver will process the datagrams in the
+ * buffer.
+ */
+struct vmci_data_in_out_header {
+ uint32_t busy;
+ uint32_t opcode;
+ uint32_t size;
+ uint32_t rsvd;
+ uint64_t result;
+};
+
+struct vmci_sg_elem {
+ uint64_t addr;
+ uint64_t size;
+};
/*
* We have a fixed set of resource IDs available in the VMX.
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 59bd50f99291..c3a8117dabe8 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -14,6 +14,7 @@
#include <linux/virtio_byteorder.h>
#include <linux/uio.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
#include <linux/dma-direction.h>
#include <linux/vhost_iotlb.h>
@@ -31,6 +32,9 @@ struct vringh {
/* Can we get away with weak barriers? */
bool weak_barriers;
+ /* Use user's VA */
+ bool use_va;
+
/* Last available index we saw (ie. where we're up to). */
u16 last_avail_idx;
@@ -46,10 +50,16 @@ struct vringh {
/* IOTLB for this vring */
struct vhost_iotlb *iotlb;
+ /* spinlock to synchronize IOTLB accesses */
+ spinlock_t *iotlb_lock;
+
/* The function to call to notify the guest about added buffers */
void (*notify)(struct vringh *);
};
+struct virtio_device;
+typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
+
/**
* struct vringh_config_ops - ops for creating a host vring from a virtio driver
* @find_vrhs: find the host vrings and instantiate them
@@ -61,8 +71,6 @@ struct vringh {
* Returns 0 on success or error status
* @del_vrhs: free the host vrings found by find_vrhs().
*/
-struct virtio_device;
-typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
struct vringh_config_ops {
int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
struct vringh *vrhs[], vrh_callback_t *callbacks[]);
@@ -77,6 +85,12 @@ struct vringh_range {
/**
* struct vringh_iov - iovec mangler.
+ * @iov: array of iovecs to operate on
+ * @consumed: number of bytes consumed within iov[i]
+ * @i: index of current iovec
+ * @used: number of iovecs present in @iov
+ * @max_num: maximum number of iovecs.
+ * corresponds to allocated memory of @iov
*
* Mangles iovec in place, and restores it.
* Remaining data is iov + i, of used - i elements.
@@ -88,7 +102,13 @@ struct vringh_iov {
};
/**
- * struct vringh_iov - kvec mangler.
+ * struct vringh_kiov - kvec mangler.
+ * @iov: array of iovecs to operate on
+ * @consumed: number of bytes consumed within iov[i]
+ * @i: index of current iovec
+ * @used: number of iovecs present in @iov
+ * @max_num: maximum number of iovecs.
+ * corresponds to allocated memory of @iov
*
* Mangles kvec in place, and restores it.
* Remaining data is iov + i, of used - i elements.
@@ -196,6 +216,19 @@ static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
kiov->iov = NULL;
}
+static inline size_t vringh_kiov_length(struct vringh_kiov *kiov)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = kiov->i; i < kiov->used; i++)
+ len += kiov->iov[i].iov_len;
+
+ return len;
+}
+
+void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len);
+
int vringh_getdesc_kern(struct vringh *vrh,
struct vringh_kiov *riov,
struct vringh_kiov *wiov,
@@ -258,7 +291,8 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
-void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb);
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
+ spinlock_t *iotlb_lock);
int vringh_init_iotlb(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
@@ -266,6 +300,12 @@ int vringh_init_iotlb(struct vringh *vrh, u64 features,
struct vring_avail *avail,
struct vring_used *used);
+int vringh_init_iotlb_va(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used);
+
int vringh_getdesc_iotlb(struct vringh *vrh,
struct vringh_kiov *riov,
struct vringh_kiov *wiov,
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index 848db1b1569f..919d999a8c1d 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -16,7 +16,7 @@
#include <linux/string.h>
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+#if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE)
#include <asm/vga.h>
#endif
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 349e39c3ab60..d008c3d0a9bb 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -16,18 +16,6 @@
#include <linux/consolemap.h>
#include <linux/notifier.h>
-/*
- * Presently, a lot of graphics programs do not restore the contents of
- * the higher font pages. Defining this flag will avoid use of them, but
- * will lose support for PIO_FONTRESET. Note that many font operations are
- * not likely to work with these programs anyway; they need to be
- * fixed. The linux/Documentation directory includes a code snippet
- * to save and restore the text font.
- */
-#ifdef CONFIG_VGA_CONSOLE
-#define BROKEN_GRAPHICS_PROGRAMS 1
-#endif
-
void kd_mksound(unsigned int hz, unsigned int ticks);
int kbd_rate(struct kbd_repeat *rep);
@@ -37,12 +25,12 @@ extern int fg_console, last_console, want_console;
int vc_allocate(unsigned int console);
int vc_cons_allocated(unsigned int console);
-int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines);
+int __vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines,
+ bool from_user);
struct vc_data *vc_deallocate(unsigned int console);
void reset_palette(struct vc_data *vc);
void do_blank_screen(int entering_gfx);
void do_unblank_screen(int leaving_gfx);
-void unblank_screen(void);
void poke_blanked_console(void);
int con_font_op(struct vc_data *vc, struct console_font_op *op);
int con_set_cmap(unsigned char __user *cmap);
@@ -55,6 +43,12 @@ void redraw_screen(struct vc_data *vc, int is_switch);
#define update_screen(x) redraw_screen(x, 0)
#define switch_screen(x) redraw_screen(x, 1)
+static inline int vc_resize(struct vc_data *vc, unsigned int cols,
+ unsigned int lines)
+{
+ return __vc_resize(vc, cols, lines, false);
+}
+
struct tty_struct;
int tioclinux(struct tty_struct *tty, unsigned long arg);
@@ -160,29 +154,25 @@ void hide_boot_cursor(bool hide);
/* keyboard provided interfaces */
int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm);
-int vt_do_kdskbmode(int console, unsigned int arg);
-int vt_do_kdskbmeta(int console, unsigned int arg);
+int vt_do_kdskbmode(unsigned int console, unsigned int arg);
+int vt_do_kdskbmeta(unsigned int console, unsigned int arg);
int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc,
int perm);
int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
- int console);
+ unsigned int console);
int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm);
-int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm);
-int vt_do_kdgkbmode(int console);
-int vt_do_kdgkbmeta(int console);
-void vt_reset_unicode(int console);
+int vt_do_kdskled(unsigned int console, int cmd, unsigned long arg, int perm);
+int vt_do_kdgkbmode(unsigned int console);
+int vt_do_kdgkbmeta(unsigned int console);
+void vt_reset_unicode(unsigned int console);
int vt_get_shift_state(void);
-void vt_reset_keyboard(int console);
-int vt_get_leds(int console, int flag);
-int vt_get_kbd_mode_bit(int console, int bit);
-void vt_set_kbd_mode_bit(int console, int bit);
-void vt_clr_kbd_mode_bit(int console, int bit);
-void vt_set_led_state(int console, int leds);
-void vt_set_led_state(int console, int leds);
-void vt_kbd_con_start(int console);
-void vt_kbd_con_stop(int console);
-
-void vc_scrolldelta_helper(struct vc_data *c, int lines,
- unsigned int rolled_over, void *_base, unsigned int size);
+void vt_reset_keyboard(unsigned int console);
+int vt_get_leds(unsigned int console, int flag);
+int vt_get_kbd_mode_bit(unsigned int console, int bit);
+void vt_set_kbd_mode_bit(unsigned int console, int bit);
+void vt_clr_kbd_mode_bit(unsigned int console, int bit);
+void vt_set_led_state(unsigned int console, int leds);
+void vt_kbd_con_start(unsigned int console);
+void vt_kbd_con_stop(unsigned int console);
#endif /* _VT_KERN_H */
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index 2cdeca062db3..3684487d01e1 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -3,12 +3,46 @@
#define _LINUX_KERNEL_VTIME_H
#include <linux/context_tracking_state.h>
+#include <linux/sched.h>
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm/vtime.h>
#endif
+/*
+ * Common vtime APIs
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void vtime_account_kernel(struct task_struct *tsk);
+extern void vtime_account_idle(struct task_struct *tsk);
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern void arch_vtime_task_switch(struct task_struct *tsk);
+extern void vtime_user_enter(struct task_struct *tsk);
+extern void vtime_user_exit(struct task_struct *tsk);
+extern void vtime_guest_enter(struct task_struct *tsk);
+extern void vtime_guest_exit(struct task_struct *tsk);
+extern void vtime_init_idle(struct task_struct *tsk, int cpu);
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+static inline void vtime_user_enter(struct task_struct *tsk) { }
+static inline void vtime_user_exit(struct task_struct *tsk) { }
+static inline void vtime_guest_enter(struct task_struct *tsk) { }
+static inline void vtime_guest_exit(struct task_struct *tsk) { }
+static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
+#endif
-struct task_struct;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
+extern void vtime_account_softirq(struct task_struct *tsk);
+extern void vtime_account_hardirq(struct task_struct *tsk);
+extern void vtime_flush(struct task_struct *tsk);
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
+static inline void vtime_account_softirq(struct task_struct *tsk) { }
+static inline void vtime_account_hardirq(struct task_struct *tsk) { }
+static inline void vtime_flush(struct task_struct *tsk) { }
+#endif
/*
* vtime_accounting_enabled_this_cpu() definitions/declarations
@@ -18,6 +52,18 @@ struct task_struct;
static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
extern void vtime_task_switch(struct task_struct *prev);
+static __always_inline void vtime_account_guest_enter(void)
+{
+ vtime_account_kernel(current);
+ current->flags |= PF_VCPU;
+}
+
+static __always_inline void vtime_account_guest_exit(void)
+{
+ vtime_account_kernel(current);
+ current->flags &= ~PF_VCPU;
+}
+
#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
/*
@@ -49,70 +95,68 @@ static inline void vtime_task_switch(struct task_struct *prev)
vtime_task_switch_generic(prev);
}
+static __always_inline void vtime_account_guest_enter(void)
+{
+ if (vtime_accounting_enabled_this_cpu())
+ vtime_guest_enter(current);
+ else
+ current->flags |= PF_VCPU;
+}
+
+static __always_inline void vtime_account_guest_exit(void)
+{
+ if (vtime_accounting_enabled_this_cpu())
+ vtime_guest_exit(current);
+ else
+ current->flags &= ~PF_VCPU;
+}
+
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
-static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; }
static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
static inline void vtime_task_switch(struct task_struct *prev) { }
-#endif
-
-/*
- * Common vtime APIs
- */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void vtime_account_kernel(struct task_struct *tsk);
-extern void vtime_account_idle(struct task_struct *tsk);
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
-static inline void vtime_account_kernel(struct task_struct *tsk) { }
-#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void arch_vtime_task_switch(struct task_struct *tsk);
-extern void vtime_user_enter(struct task_struct *tsk);
-extern void vtime_user_exit(struct task_struct *tsk);
-extern void vtime_guest_enter(struct task_struct *tsk);
-extern void vtime_guest_exit(struct task_struct *tsk);
-extern void vtime_init_idle(struct task_struct *tsk, int cpu);
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-static inline void vtime_user_enter(struct task_struct *tsk) { }
-static inline void vtime_user_exit(struct task_struct *tsk) { }
-static inline void vtime_guest_enter(struct task_struct *tsk) { }
-static inline void vtime_guest_exit(struct task_struct *tsk) { }
-static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
-#endif
+static __always_inline void vtime_account_guest_enter(void)
+{
+ current->flags |= PF_VCPU;
+}
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-extern void vtime_account_irq_enter(struct task_struct *tsk);
-static inline void vtime_account_irq_exit(struct task_struct *tsk)
+static __always_inline void vtime_account_guest_exit(void)
{
- /* On hard|softirq exit we always account to hard|softirq cputime */
- vtime_account_kernel(tsk);
+ current->flags &= ~PF_VCPU;
}
-extern void vtime_flush(struct task_struct *tsk);
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
-static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
-static inline void vtime_flush(struct task_struct *tsk) { }
+
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-extern void irqtime_account_irq(struct task_struct *tsk);
+extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);
#else
-static inline void irqtime_account_irq(struct task_struct *tsk) { }
+static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
#endif
-static inline void account_irq_enter_time(struct task_struct *tsk)
+static inline void account_softirq_enter(struct task_struct *tsk)
+{
+ vtime_account_irq(tsk, SOFTIRQ_OFFSET);
+ irqtime_account_irq(tsk, SOFTIRQ_OFFSET);
+}
+
+static inline void account_softirq_exit(struct task_struct *tsk)
+{
+ vtime_account_softirq(tsk);
+ irqtime_account_irq(tsk, 0);
+}
+
+static inline void account_hardirq_enter(struct task_struct *tsk)
{
- vtime_account_irq_enter(tsk);
- irqtime_account_irq(tsk);
+ vtime_account_irq(tsk, HARDIRQ_OFFSET);
+ irqtime_account_irq(tsk, HARDIRQ_OFFSET);
}
-static inline void account_irq_exit_time(struct task_struct *tsk)
+static inline void account_hardirq_exit(struct task_struct *tsk)
{
- vtime_account_irq_exit(tsk);
- irqtime_account_irq(tsk);
+ vtime_account_hardirq(tsk);
+ irqtime_account_irq(tsk, 0);
}
#endif /* _LINUX_KERNEL_VTIME_H */
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
deleted file mode 100644
index 3495fd0dc790..000000000000
--- a/include/linux/w1-gpio.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * w1-gpio interface to platform code
- *
- * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
- */
-#ifndef _LINUX_W1_GPIO_H
-#define _LINUX_W1_GPIO_H
-
-struct gpio_desc;
-
-/**
- * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
- */
-struct w1_gpio_platform_data {
- struct gpio_desc *gpiod;
- struct gpio_desc *pullup_gpiod;
- void (*enable_external_pullup)(int enable);
- unsigned int pullup_duration;
-};
-
-#endif /* _LINUX_W1_GPIO_H */
diff --git a/include/linux/w1.h b/include/linux/w1.h
index cebf3464bc03..9a2a0ef39018 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -269,7 +269,7 @@ struct w1_family {
struct list_head family_entry;
u8 fid;
- struct w1_family_ops *fops;
+ const struct w1_family_ops *fops;
const struct of_device_id *of_match_table;
@@ -280,7 +280,7 @@ int w1_register_family(struct w1_family *family);
void w1_unregister_family(struct w1_family *family);
/**
- * module_w1_driver() - Helper macro for registering a 1-Wire families
+ * module_w1_family() - Helper macro for registering a 1-Wire families
* @__w1_family: w1_family struct
*
* Helper macro for 1-Wire families which do not do anything special in module
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 898c890fc153..8aa3372f21a0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -9,7 +9,6 @@
#include <linux/spinlock.h>
#include <asm/current.h>
-#include <uapi/linux/wait.h>
typedef struct wait_queue_entry wait_queue_entry_t;
@@ -19,8 +18,9 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
/* wait_queue_entry::flags */
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
-#define WQ_FLAG_BOOKMARK 0x04
-#define WQ_FLAG_CUSTOM 0x08
+#define WQ_FLAG_CUSTOM 0x04
+#define WQ_FLAG_DONE 0x08
+#define WQ_FLAG_PRIORITY 0x10
/*
* A single wait-queue entry structure:
@@ -54,7 +54,7 @@ struct task_struct;
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .head = { &(name).head, &(name).head } }
+ .head = LIST_HEAD_INIT(name.head) }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
@@ -163,11 +163,20 @@ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
- list_add(&wq_entry->entry, &wq_head->head);
+ struct list_head *head = &wq_head->head;
+ struct wait_queue_entry *wq;
+
+ list_for_each_entry(wq, &wq_head->head, entry) {
+ if (!(wq->flags & WQ_FLAG_PRIORITY))
+ break;
+ head = &wq->entry;
+ }
+ list_add(&wq_entry->entry, head);
}
/*
@@ -198,14 +207,14 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
list_del(&wq_entry->entry);
}
-void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
-void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
- unsigned int mode, void *key, wait_queue_entry_t *bookmark);
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
+void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -225,6 +234,8 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
#define wake_up_poll(x, m) \
__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
+#define wake_up_poll_on_current_cpu(x, m) \
+ __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m))
#define wake_up_locked_poll(x, m) \
__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
#define wake_up_interruptible_poll(x, m) \
@@ -234,6 +245,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
#define wake_up_interruptible_sync_poll_locked(x, m) \
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
+/**
+ * wake_up_pollfree - signal that a polled waitqueue is going away
+ * @wq_head: the wait queue head
+ *
+ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
+ * lifetime is tied to a task rather than to the 'struct file' being polled,
+ * this function must be called before the waitqueue is freed so that
+ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
+ *
+ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
+ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
+ */
+static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+ /*
+ * For performance reasons, we don't always take the queue lock here.
+ * Therefore, we might race with someone removing the last entry from
+ * the queue, and proceed while they still hold the queue lock.
+ * However, rcu_read_lock() is required to be held in such cases, so we
+ * can safely proceed with an RCU-delayed free.
+ */
+ if (waitqueue_active(wq_head))
+ __wake_up_pollfree(wq_head);
+}
+
#define ___wait_cond_timeout(condition) \
({ \
bool __cond = (condition); \
@@ -244,7 +280,7 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
#define ___wait_is_interruptible(state) \
(!__builtin_constant_p(state) || \
- state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
+ (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
@@ -324,8 +360,8 @@ do { \
} while (0)
#define __wait_event_freezable(wq_head, condition) \
- ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
- freezable_schedule())
+ ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \
+ 0, 0, schedule())
/**
* wait_event_freezable - sleep (or freeze) until a condition gets true
@@ -383,8 +419,8 @@ do { \
#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
- TASK_INTERRUPTIBLE, 0, timeout, \
- __ret = freezable_schedule_timeout(__ret))
+ (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \
+ __ret = schedule_timeout(__ret))
/*
* like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
@@ -507,10 +543,11 @@ do { \
\
hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
HRTIMER_MODE_REL); \
- if ((timeout) != KTIME_MAX) \
- hrtimer_start_range_ns(&__t.timer, timeout, \
- current->timer_slack_ns, \
- HRTIMER_MODE_REL); \
+ if ((timeout) != KTIME_MAX) { \
+ hrtimer_set_expires_range_ns(&__t.timer, timeout, \
+ current->timer_slack_ns); \
+ hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
+ } \
\
__ret = ___wait_event(wq_head, condition, state, 0, 0, \
if (!__t.task) { \
@@ -604,8 +641,8 @@ do { \
#define __wait_event_freezable_exclusive(wq, condition) \
- ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
- freezable_schedule())
+ ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\
+ schedule())
#define wait_event_freezable_exclusive(wq, condition) \
({ \
@@ -894,6 +931,34 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
__ret; \
})
+#define __wait_event_state(wq, condition, state) \
+ ___wait_event(wq, condition, state, 0, 0, schedule())
+
+/**
+ * wait_event_state - sleep until a condition gets true
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @state: state to sleep in
+ *
+ * The process is put to sleep (@state) until the @condition evaluates to true
+ * or a signal is received (when allowed by @state). The @condition is checked
+ * each time the waitqueue @wq_head is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a signal
+ * (when allowed by @state) and 0 if @condition evaluated to true.
+ */
+#define wait_event_state(wq_head, condition, state) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_state(wq_head, condition, state); \
+ __ret; \
+})
+
#define __wait_event_killable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_KILLABLE, 0, timeout, \
@@ -1125,7 +1190,7 @@ do { \
* Waitqueues which are removed from the waitqueue_head at wakeup time
*/
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
-void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
@@ -1149,6 +1214,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
(wait)->flags = 0; \
} while (0)
-bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
+typedef int (*task_call_f)(struct task_struct *p, void *arg);
+extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
#endif /* _LINUX_WAIT_H */
diff --git a/include/linux/wait_api.h b/include/linux/wait_api.h
new file mode 100644
index 000000000000..4e930548935a
--- /dev/null
+++ b/include/linux/wait_api.h
@@ -0,0 +1 @@
+#include <linux/wait.h>
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 7dec36aecbd9..7725b7579b78 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -71,7 +71,7 @@ static inline int
wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait,
@@ -96,7 +96,7 @@ static inline int
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait_io,
@@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
unsigned long timeout)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit_timeout(word, bit,
bit_wait_timeout,
@@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit, action, mode);
}
diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h
index 5e08db2adc31..429c7b6afead 100644
--- a/include/linux/watch_queue.h
+++ b/include/linux/watch_queue.h
@@ -4,7 +4,7 @@
* Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
- * See Documentation/watch_queue.rst
+ * See Documentation/core-api/watch_queue.rst
*/
#ifndef _LINUX_WATCH_QUEUE_H
@@ -28,16 +28,17 @@ struct watch_type_filter {
struct watch_filter {
union {
struct rcu_head rcu;
- unsigned long type_filter[2]; /* Bitmask of accepted types */
+ /* Bitmask of accepted types */
+ DECLARE_BITMAP(type_filter, WATCH_TYPE__NR);
};
u32 nr_filters; /* Number of filters */
- struct watch_type_filter filters[];
+ struct watch_type_filter filters[] __counted_by(nr_filters);
};
struct watch_queue {
struct rcu_head rcu;
struct watch_filter __rcu *filter;
- struct pipe_inode_info *pipe; /* The pipe we're using as a buffer */
+ struct pipe_inode_info *pipe; /* Pipe we use as a buffer, NULL if queue closed */
struct hlist_head watches; /* Contributory watches */
struct page **notes; /* Preallocated notifications */
unsigned long *notes_bitmap; /* Allocation bitmap for notes */
@@ -45,7 +46,6 @@ struct watch_queue {
spinlock_t lock;
unsigned int nr_notes; /* Number of notes */
unsigned int nr_pages; /* Number of pages in notes[] */
- bool defunct; /* T when queues closed */
};
/*
@@ -122,6 +122,12 @@ static inline void remove_watch_list(struct watch_list *wlist, u64 id)
*/
#define watch_sizeof(STRUCT) (sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT)
+#else
+static inline int watch_queue_init(struct pipe_inode_info *pipe)
+{
+ return -ENOPKG;
+}
+
#endif
#endif /* _LINUX_WATCH_QUEUE_H */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 9b19e6bb68b5..99660197a36c 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -107,6 +107,7 @@ struct watchdog_device {
unsigned int max_hw_heartbeat_ms;
struct notifier_block reboot_nb;
struct notifier_block restart_nb;
+ struct notifier_block pm_nb;
void *driver_data;
struct watchdog_core_data *wd_data;
unsigned long status;
@@ -116,6 +117,7 @@ struct watchdog_device {
#define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */
#define WDOG_HW_RUNNING 3 /* True if HW watchdog running */
#define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */
+#define WDOG_NO_PING_ON_SUSPEND 5 /* Ping worker should be stopped on suspend */
struct list_head deferred;
};
@@ -156,6 +158,12 @@ static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd)
set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status);
}
+/* Use the following function to stop the wdog ping worker when suspending */
+static inline void watchdog_stop_ping_on_suspend(struct watchdog_device *wdd)
+{
+ set_bit(WDOG_NO_PING_ON_SUSPEND, &wdd->status);
+}
+
/* Use the following function to check if a timeout value is invalid */
static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t)
{
@@ -209,6 +217,8 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
unsigned int timeout_parm, struct device *dev);
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
+int watchdog_dev_suspend(struct watchdog_device *wdd);
+int watchdog_dev_resume(struct watchdog_device *wdd);
int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int);
diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h
deleted file mode 100644
index cdae052bcdcd..000000000000
--- a/include/linux/wimax/debug.h
+++ /dev/null
@@ -1,491 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Linux WiMAX
- * Collection of tools to manage debug operations.
- *
- * Copyright (C) 2005-2007 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Don't #include this file directly, read on!
- *
- * EXECUTING DEBUGGING ACTIONS OR NOT
- *
- * The main thing this framework provides is decission power to take a
- * debug action (like printing a message) if the current debug level
- * allows it.
- *
- * The decission power is at two levels: at compile-time (what does
- * not make it is compiled out) and at run-time. The run-time
- * selection is done per-submodule (as they are declared by the user
- * of the framework).
- *
- * A call to d_test(L) (L being the target debug level) returns true
- * if the action should be taken because the current debug levels
- * allow it (both compile and run time).
- *
- * It follows that a call to d_test() that can be determined to be
- * always false at compile time will get the code depending on it
- * compiled out by optimization.
- *
- * DEBUG LEVELS
- *
- * It is up to the caller to define how much a debugging level is.
- *
- * Convention sets 0 as "no debug" (so an action marked as debug level 0
- * will always be taken). The increasing debug levels are used for
- * increased verbosity.
- *
- * USAGE
- *
- * Group the code in modules and submodules inside each module [which
- * in most cases maps to Linux modules and .c files that compose
- * those].
- *
- * For each module, there is:
- *
- * - a MODULENAME (single word, legal C identifier)
- *
- * - a debug-levels.h header file that declares the list of
- * submodules and that is included by all .c files that use
- * the debugging tools. The file name can be anything.
- *
- * - some (optional) .c code to manipulate the runtime debug levels
- * through debugfs.
- *
- * The debug-levels.h file would look like:
- *
- * #ifndef __debug_levels__h__
- * #define __debug_levels__h__
- *
- * #define D_MODULENAME modulename
- * #define D_MASTER 10
- *
- * #include <linux/wimax/debug.h>
- *
- * enum d_module {
- * D_SUBMODULE_DECLARE(submodule_1),
- * D_SUBMODULE_DECLARE(submodule_2),
- * ...
- * D_SUBMODULE_DECLARE(submodule_N)
- * };
- *
- * #endif
- *
- * D_MASTER is the maximum compile-time debug level; any debug actions
- * above this will be out. D_MODULENAME is the module name (legal C
- * identifier), which has to be unique for each module (to avoid
- * namespace collisions during linkage). Note those #defines need to
- * be done before #including debug.h
- *
- * We declare N different submodules whose debug level can be
- * independently controlled during runtime.
- *
- * In a .c file of the module (and only in one of them), define the
- * following code:
- *
- * struct d_level D_LEVEL[] = {
- * D_SUBMODULE_DEFINE(submodule_1),
- * D_SUBMODULE_DEFINE(submodule_2),
- * ...
- * D_SUBMODULE_DEFINE(submodule_N),
- * };
- * size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
- *
- * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used
- * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros
- * #defined also in this file.
- *
- * To manipulate from user space the levels, create a debugfs dentry
- * and then register each submodule with:
- *
- * d_level_register_debugfs("PREFIX_", submodule_X, parent);
- *
- * Where PREFIX_ is a name of your chosing. This will create debugfs
- * file with a single numeric value that can be use to tweak it. To
- * remove the entires, just use debugfs_remove_recursive() on 'parent'.
- *
- * NOTE: remember that even if this will show attached to some
- * particular instance of a device, the settings are *global*.
- *
- * On each submodule (for example, .c files), the debug infrastructure
- * should be included like this:
- *
- * #define D_SUBMODULE submodule_x // matches one in debug-levels.h
- * #include "debug-levels.h"
- *
- * after #including all your include files.
- *
- * Now you can use the d_*() macros below [d_test(), d_fnstart(),
- * d_fnend(), d_printf(), d_dump()].
- *
- * If their debug level is greater than D_MASTER, they will be
- * compiled out.
- *
- * If their debug level is lower or equal than D_MASTER but greater
- * than the current debug level of their submodule, they'll be
- * ignored.
- *
- * Otherwise, the action will be performed.
- */
-#ifndef __debug__h__
-#define __debug__h__
-
-#include <linux/types.h>
-#include <linux/slab.h>
-
-struct device;
-
-/* Backend stuff */
-
-/*
- * Debug backend: generate a message header from a 'struct device'
- *
- * @head: buffer where to place the header
- * @head_size: length of @head
- * @dev: pointer to device used to generate a header from. If NULL,
- * an empty ("") header is generated.
- */
-static inline
-void __d_head(char *head, size_t head_size,
- struct device *dev)
-{
- if (dev == NULL)
- head[0] = 0;
- else if ((unsigned long)dev < 4096) {
- printk(KERN_ERR "E: Corrupt dev %p\n", dev);
- WARN_ON(1);
- } else
- snprintf(head, head_size, "%s %s: ",
- dev_driver_string(dev), dev_name(dev));
-}
-
-
-/*
- * Debug backend: log some message if debugging is enabled
- *
- * @l: intended debug level
- * @tag: tag to prefix the message with
- * @dev: 'struct device' associated to this message
- * @f: printf-like format and arguments
- *
- * Note this is optimized out if it doesn't pass the compile-time
- * check; however, it is *always* compiled. This is useful to make
- * sure the printf-like formats and variables are always checked and
- * they don't get bit rot if you have all the debugging disabled.
- */
-#define _d_printf(l, tag, dev, f, a...) \
-do { \
- char head[64]; \
- if (!d_test(l)) \
- break; \
- __d_head(head, sizeof(head), dev); \
- printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \
-} while (0)
-
-
-/*
- * CPP syntactic sugar to generate A_B like symbol names when one of
- * the arguments is a preprocessor #define.
- */
-#define __D_PASTE__(varname, modulename) varname##_##modulename
-#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename))
-#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name))
-
-
-/*
- * Store a submodule's runtime debug level and name
- */
-struct d_level {
- u8 level;
- const char *name;
-};
-
-
-/*
- * List of available submodules and their debug levels
- *
- * We call them d_level_MODULENAME and d_level_size_MODULENAME; the
- * macros D_LEVEL and D_LEVEL_SIZE contain the name already for
- * convenience.
- *
- * This array and the size are defined on some .c file that is part of
- * the current module.
- */
-#define D_LEVEL __D_PASTE(d_level, D_MODULENAME)
-#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME)
-
-extern struct d_level D_LEVEL[];
-extern size_t D_LEVEL_SIZE;
-
-
-/*
- * Frontend stuff
- *
- *
- * Stuff you need to declare prior to using the actual "debug" actions
- * (defined below).
- */
-
-#ifndef D_MODULENAME
-#error D_MODULENAME is not defined in your debug-levels.h file
-/**
- * D_MODULE - Name of the current module
- *
- * #define in your module's debug-levels.h, making sure it is
- * unique. This has to be a legal C identifier.
- */
-#define D_MODULENAME undefined_modulename
-#endif
-
-
-#ifndef D_MASTER
-#warning D_MASTER not defined, but debug.h included! [see docs]
-/**
- * D_MASTER - Compile time maximum debug level
- *
- * #define in your debug-levels.h file to the maximum debug level the
- * runtime code will be allowed to have. This allows you to provide a
- * main knob.
- *
- * Anything above that level will be optimized out of the compile.
- *
- * Defaults to zero (no debug code compiled in).
- *
- * Maximum one definition per module (at the debug-levels.h file).
- */
-#define D_MASTER 0
-#endif
-
-#ifndef D_SUBMODULE
-#error D_SUBMODULE not defined, but debug.h included! [see docs]
-/**
- * D_SUBMODULE - Name of the current submodule
- *
- * #define in your submodule .c file before #including debug-levels.h
- * to the name of the current submodule as previously declared and
- * defined with D_SUBMODULE_DECLARE() (in your module's
- * debug-levels.h) and D_SUBMODULE_DEFINE().
- *
- * This is used to provide runtime-control over the debug levels.
- *
- * Maximum one per .c file! Can be shared among different .c files
- * (meaning they belong to the same submodule categorization).
- */
-#define D_SUBMODULE undefined_module
-#endif
-
-
-/**
- * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control
- *
- * @_name: name of the submodule, restricted to the chars that make up a
- * valid C identifier ([a-zA-Z0-9_]).
- *
- * Declare in the module's debug-levels.h header file as:
- *
- * enum d_module {
- * D_SUBMODULE_DECLARE(submodule_1),
- * D_SUBMODULE_DECLARE(submodule_2),
- * D_SUBMODULE_DECLARE(submodule_3),
- * };
- *
- * Some corresponding .c file needs to have a matching
- * D_SUBMODULE_DEFINE().
- */
-#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name
-
-
-/**
- * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control
- *
- * @_name: name of the submodule, restricted to the chars that make up a
- * valid C identifier ([a-zA-Z0-9_]).
- *
- * Use once per module (in some .c file) as:
- *
- * static
- * struct d_level d_level_SUBMODULENAME[] = {
- * D_SUBMODULE_DEFINE(submodule_1),
- * D_SUBMODULE_DEFINE(submodule_2),
- * D_SUBMODULE_DEFINE(submodule_3),
- * };
- * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME);
- *
- * Matching D_SUBMODULE_DECLARE()s have to be present in a
- * debug-levels.h header file.
- */
-#define D_SUBMODULE_DEFINE(_name) \
-[__D_SUBMODULE_##_name] = { \
- .level = 0, \
- .name = #_name \
-}
-
-
-
-/* The actual "debug" operations */
-
-
-/**
- * d_test - Returns true if debugging should be enabled
- *
- * @l: intended debug level (unsigned)
- *
- * If the master debug switch is enabled and the current settings are
- * higher or equal to the requested level, then debugging
- * output/actions should be enabled.
- *
- * NOTE:
- *
- * This needs to be coded so that it can be evaluated in compile
- * time; this is why the ugly BUG_ON() is placed in there, so the
- * D_MASTER evaluation compiles all out if it is compile-time false.
- */
-#define d_test(l) \
-({ \
- unsigned __l = l; /* type enforcer */ \
- (D_MASTER) >= __l \
- && ({ \
- BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\
- D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l; \
- }); \
-})
-
-
-/**
- * d_fnstart - log message at function start if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a)
-
-
-/**
- * d_fnend - log message at function end if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a)
-
-
-/**
- * d_printf - log message if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a)
-
-
-/**
- * d_dump - log buffer hex dump if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_dump(l, dev, ptr, size) \
-do { \
- char head[64]; \
- if (!d_test(l)) \
- break; \
- __d_head(head, sizeof(head), dev); \
- print_hex_dump(KERN_ERR, head, 0, 16, 1, \
- ((void *) ptr), (size), 0); \
-} while (0)
-
-
-/**
- * Export a submodule's debug level over debugfs as PREFIXSUBMODULE
- *
- * @prefix: string to prefix the name with
- * @submodule: name of submodule (not a string, just the name)
- * @dentry: debugfs parent dentry
- *
- * For removing, just use debugfs_remove_recursive() on the parent.
- */
-#define d_level_register_debugfs(prefix, name, parent) \
-({ \
- debugfs_create_u8( \
- prefix #name, 0600, parent, \
- &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \
-})
-
-
-static inline
-void d_submodule_set(struct d_level *d_level, size_t d_level_size,
- const char *submodule, u8 level, const char *tag)
-{
- struct d_level *itr, *top;
- int index = -1;
-
- for (itr = d_level, top = itr + d_level_size; itr < top; itr++) {
- index++;
- if (itr->name == NULL) {
- printk(KERN_ERR "%s: itr->name NULL?? (%p, #%d)\n",
- tag, itr, index);
- continue;
- }
- if (!strcmp(itr->name, submodule)) {
- itr->level = level;
- return;
- }
- }
- printk(KERN_ERR "%s: unknown submodule %s\n", tag, submodule);
-}
-
-
-/**
- * d_parse_params - Parse a string with debug parameters from the
- * command line
- *
- * @d_level: level structure (D_LEVEL)
- * @d_level_size: number of items in the level structure
- * (D_LEVEL_SIZE).
- * @_params: string with the parameters; this is a space (not tab!)
- * separated list of NAME:VALUE, where value is the debug level
- * and NAME is the name of the submodule.
- * @tag: string for error messages (example: MODULE.ARGNAME).
- */
-static inline
-void d_parse_params(struct d_level *d_level, size_t d_level_size,
- const char *_params, const char *tag)
-{
- char submodule[130], *params, *params_orig, *token, *colon;
- unsigned level, tokens;
-
- if (_params == NULL)
- return;
- params_orig = kstrdup(_params, GFP_KERNEL);
- params = params_orig;
- while (1) {
- token = strsep(&params, " ");
- if (token == NULL)
- break;
- if (*token == '\0') /* eat joint spaces */
- continue;
- /* kernel's sscanf %s eats until whitespace, so we
- * replace : by \n so it doesn't get eaten later by
- * strsep */
- colon = strchr(token, ':');
- if (colon != NULL)
- *colon = '\n';
- tokens = sscanf(token, "%s\n%u", submodule, &level);
- if (colon != NULL)
- *colon = ':'; /* set back, for error messages */
- if (tokens == 2)
- d_submodule_set(d_level, d_level_size,
- submodule, level, tag);
- else
- printk(KERN_ERR "%s: can't parse '%s' as a "
- "SUBMODULE:LEVEL (%d tokens)\n",
- tag, token, tokens);
- }
- kfree(params_orig);
-}
-
-#endif /* #ifndef __debug__h__ */
diff --git a/include/linux/win_minmax.h b/include/linux/win_minmax.h
index 4ca2842d2842..6a5bb052fcc2 100644
--- a/include/linux/win_minmax.h
+++ b/include/linux/win_minmax.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
- * lib/minmax.c: windowed min/max tracker by Kathleen Nichols.
+/*
+ * win_minmax.h: windowed min/max tracker by Kathleen Nichols.
*
*/
#ifndef MINMAX_H
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 2d1b54556eff..e6e34d74dda0 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -26,7 +26,15 @@ struct compat_iw_point {
struct __compat_iw_event {
__u16 len; /* Real length of this stuff */
__u16 cmd; /* Wireless IOCTL */
- compat_caddr_t pointer;
+
+ union {
+ compat_caddr_t pointer;
+
+ /* we need ptr_bytes to make memcpy() run-time destination
+ * buffer bounds checking happy, nothing special
+ */
+ DECLARE_FLEX_ARRAY(__u8, ptr_bytes);
+ };
};
#define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer)
#define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length)
diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h
index 3f496967b538..5e1b26f988e2 100644
--- a/include/linux/wkup_m3_ipc.h
+++ b/include/linux/wkup_m3_ipc.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Wakeup M3 for AMx3 SoCs Power Management Routines
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach <d-gerlach@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_WKUP_M3_IPC_H
@@ -33,7 +25,13 @@ struct wkup_m3_ipc {
int mem_type;
unsigned long resume_addr;
+ int vtt_conf;
+ int isolation_conf;
int state;
+ u32 halt;
+
+ unsigned long volt_scale_offsets;
+ const char *sd_fw_name;
struct completion sync_complete;
struct mbox_client mbox_client;
@@ -41,6 +39,7 @@ struct wkup_m3_ipc {
struct wkup_m3_ipc_ops *ops;
int is_rtc_only;
+ struct dentry *dbg_path;
};
struct wkup_m3_wakeup_src {
@@ -48,6 +47,12 @@ struct wkup_m3_wakeup_src {
char src[10];
};
+struct wkup_m3_scale_data_header {
+ u16 magic;
+ u8 sleep_offset;
+ u8 wake_offset;
+} __packed;
+
struct wkup_m3_ipc_ops {
void (*set_mem_type)(struct wkup_m3_ipc *m3_ipc, int mem_type);
void (*set_resume_address)(struct wkup_m3_ipc *m3_ipc, void *addr);
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
deleted file mode 100644
index 03d61f1d23ab..000000000000
--- a/include/linux/wl12xx.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * This file is part of wl12xx
- *
- * Copyright (C) 2009 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- */
-
-#ifndef _LINUX_WL12XX_H
-#define _LINUX_WL12XX_H
-
-#include <linux/err.h>
-
-struct wl1251_platform_data {
- int power_gpio;
- /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
- int irq;
- bool use_eeprom;
-};
-
-#ifdef CONFIG_WILINK_PLATFORM_DATA
-
-int wl1251_set_platform_data(const struct wl1251_platform_data *data);
-
-struct wl1251_platform_data *wl1251_get_platform_data(void);
-
-#else
-
-static inline
-int wl1251_set_platform_data(const struct wl1251_platform_data *data)
-{
- return -ENOSYS;
-}
-
-static inline
-struct wl1251_platform_data *wl1251_get_platform_data(void)
-{
- return ERR_PTR(-ENODATA);
-}
-
-#endif
-
-#endif
diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h
index 58e082dadc68..332d2b0f29b9 100644
--- a/include/linux/wm97xx.h
+++ b/include/linux/wm97xx.h
@@ -254,9 +254,6 @@ struct wm97xx_mach_ops {
int (*acc_startup) (struct wm97xx *);
void (*acc_shutdown) (struct wm97xx *);
- /* interrupt mask control - required for accelerated operation */
- void (*irq_enable) (struct wm97xx *, int enable);
-
/* GPIO pin used for accelerated operation */
int irq_gpio;
@@ -281,7 +278,6 @@ struct wm97xx {
unsigned long ts_reader_min_interval; /* Minimum interval */
unsigned int pen_irq; /* Pen IRQ number in use */
struct workqueue_struct *ts_workq;
- struct work_struct pen_event_work;
u16 acc_slot; /* AC97 slot used for acc touch data */
u16 acc_rate; /* acc touch data rate */
unsigned pen_is_down:1; /* Pen is down */
@@ -294,7 +290,6 @@ struct wm97xx {
struct wm97xx_batt_pdata {
int batt_aux;
int temp_aux;
- int charge_gpio;
int min_voltage;
int max_voltage;
int batt_div;
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 8ef7e7faea1e..63cca3b58d6d 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -11,43 +11,83 @@
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
-#include <uapi/linux/wmi.h>
+/**
+ * struct wmi_device - WMI device structure
+ * @dev: Device associated with this WMI device
+ * @setable: True for devices implementing the Set Control Method
+ *
+ * This represents WMI devices discovered by the WMI driver core.
+ */
struct wmi_device {
struct device dev;
-
- /* True for data blocks implementing the Set Control Method */
bool setable;
};
-/* evaluate the ACPI method associated with this device */
+/**
+ * to_wmi_device() - Helper macro to cast a device to a wmi_device
+ * @device: device struct
+ *
+ * Cast a struct device to a struct wmi_device.
+ */
+#define to_wmi_device(device) container_of(device, struct wmi_device, dev)
+
extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev,
u8 instance, u32 method_id,
const struct acpi_buffer *in,
struct acpi_buffer *out);
-/* Caller must kfree the result. */
extern union acpi_object *wmidev_block_query(struct wmi_device *wdev,
u8 instance);
-extern int set_required_buffer_size(struct wmi_device *wdev, u64 length);
+acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct acpi_buffer *in);
+
+u8 wmidev_instance_count(struct wmi_device *wdev);
+/**
+ * struct wmi_driver - WMI driver structure
+ * @driver: Driver model structure
+ * @id_table: List of WMI GUIDs supported by this driver
+ * @no_notify_data: Driver supports WMI events which provide no event data
+ * @no_singleton: Driver can be instantiated multiple times
+ * @probe: Callback for device binding
+ * @remove: Callback for device unbinding
+ * @notify: Callback for receiving WMI events
+ *
+ * This represents WMI drivers which handle WMI devices.
+ */
struct wmi_driver {
struct device_driver driver;
const struct wmi_device_id *id_table;
+ bool no_notify_data;
+ bool no_singleton;
int (*probe)(struct wmi_device *wdev, const void *context);
- int (*remove)(struct wmi_device *wdev);
+ void (*remove)(struct wmi_device *wdev);
void (*notify)(struct wmi_device *device, union acpi_object *data);
- long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd,
- struct wmi_ioctl_buffer *arg);
};
extern int __must_check __wmi_driver_register(struct wmi_driver *driver,
struct module *owner);
extern void wmi_driver_unregister(struct wmi_driver *driver);
+
+/**
+ * wmi_driver_register() - Helper macro to register a WMI driver
+ * @driver: wmi_driver struct
+ *
+ * Helper macro for registering a WMI driver. It automatically passes
+ * THIS_MODULE to the underlying function.
+ */
#define wmi_driver_register(driver) __wmi_driver_register((driver), THIS_MODULE)
+/**
+ * module_wmi_driver() - Helper macro to register/unregister a WMI driver
+ * @__wmi_driver: wmi_driver struct
+ *
+ * Helper macro for WMI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit().
+ */
#define module_wmi_driver(__wmi_driver) \
module_driver(__wmi_driver, wmi_driver_register, \
wmi_driver_unregister)
diff --git a/include/linux/wordpart.h b/include/linux/wordpart.h
new file mode 100644
index 000000000000..f6f8f83b15b0
--- /dev/null
+++ b/include/linux/wordpart.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_WORDPART_H
+#define _LINUX_WORDPART_H
+
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ *
+ * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
+ * the "right shift count >= width of type" warning when that quantity is
+ * 32-bits.
+ */
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
+
+/**
+ * upper_16_bits - return bits 16-31 of a number
+ * @n: the number we're accessing
+ */
+#define upper_16_bits(n) ((u16)((n) >> 16))
+
+/**
+ * lower_16_bits - return bits 0-15 of a number
+ * @n: the number we're accessing
+ */
+#define lower_16_bits(n) ((u16)((n) & 0xffff))
+
+/**
+ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
+#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+
+#endif // _LINUX_WORDPART_H
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 26de0cae2a0a..158784dd189a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -14,12 +14,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/rcupdate.h>
-
-struct workqueue_struct;
-
-struct work_struct;
-typedef void (*work_func_t)(struct work_struct *work);
-void delayed_work_timer_fn(struct timer_list *t);
+#include <linux/workqueue_types.h>
/*
* The first word is the work queue pointer and the flags rolled into
@@ -27,22 +22,56 @@ void delayed_work_timer_fn(struct timer_list *t);
*/
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
-enum {
+enum work_bits {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
- WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
- WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
- WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
+ WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */
+ WORK_STRUCT_PWQ_BIT, /* data points to pwq */
+ WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK
- WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
- WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
-#else
- WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
+ WORK_STRUCT_STATIC_BIT, /* static initializer (debugobjects) */
#endif
+ WORK_STRUCT_FLAG_BITS,
+ /* color for workqueue flushing */
+ WORK_STRUCT_COLOR_SHIFT = WORK_STRUCT_FLAG_BITS,
WORK_STRUCT_COLOR_BITS = 4,
+ /*
+ * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/
+ * debugobjects turned off. This makes pwqs aligned to 256 bytes (512
+ * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors.
+ *
+ * MSB
+ * [ pwq pointer ] [ flush color ] [ STRUCT flags ]
+ * 4 bits 4 or 5 bits
+ */
+ WORK_STRUCT_PWQ_SHIFT = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS,
+
+ /*
+ * data contains off-queue information when !WORK_STRUCT_PWQ.
+ *
+ * MSB
+ * [ pool ID ] [ OFFQ flags ] [ STRUCT flags ]
+ * 1 bit 4 or 5 bits
+ */
+ WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS,
+ WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT,
+ WORK_OFFQ_FLAG_END,
+ WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,
+
+ /*
+ * When a work item is off queue, the high bits encode off-queue flags
+ * and the last pool it was on. Cap pool ID to 31 bits and use the
+ * highest number to indicate that no pool is associated.
+ */
+ WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS,
+ WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
+ WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
+};
+
+enum work_flags {
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
- WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
+ WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -50,47 +79,14 @@ enum {
#else
WORK_STRUCT_STATIC = 0,
#endif
+};
- /*
- * The last color is no color used for works which don't
- * participate in workqueue flushing.
- */
- WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
- WORK_NO_COLOR = WORK_NR_COLORS,
+enum wq_misc_consts {
+ WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS),
/* not bound to any CPU, prefer the local CPU */
WORK_CPU_UNBOUND = NR_CPUS,
- /*
- * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
- * This makes pwqs aligned to 256 bytes and allows 15 workqueue
- * flush colors.
- */
- WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
- WORK_STRUCT_COLOR_BITS,
-
- /* data contains off-queue information when !WORK_STRUCT_PWQ */
- WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
-
- __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
- WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
-
- /*
- * When a work item is off queue, its high bits point to the last
- * pool it was on. Cap at 31 bits and use the highest number to
- * indicate that no pool is associated.
- */
- WORK_OFFQ_FLAG_BITS = 1,
- WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
- WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
- WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
- WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
-
- /* convenience constants */
- WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
- WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
- WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
-
/* bit mask for work_busy() return values */
WORK_BUSY_PENDING = 1 << 0,
WORK_BUSY_RUNNING = 1 << 1,
@@ -99,14 +95,11 @@ enum {
WORKER_DESC_LEN = 24,
};
-struct work_struct {
- atomic_long_t data;
- struct list_head entry;
- work_func_t func;
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
-#endif
-};
+/* Convenience constants - of type 'unsigned long', not 'enum'! */
+#define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT)
+#define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
+#define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
+#define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
#define WORK_DATA_STATIC_INIT() \
@@ -129,6 +122,17 @@ struct rcu_work {
struct workqueue_struct *wq;
};
+enum wq_affn_scope {
+ WQ_AFFN_DFL, /* use system default */
+ WQ_AFFN_CPU, /* one pod per CPU */
+ WQ_AFFN_SMT, /* one pod poer SMT */
+ WQ_AFFN_CACHE, /* one pod per LLC */
+ WQ_AFFN_NUMA, /* one pod per NUMA node */
+ WQ_AFFN_SYSTEM, /* one pod across the whole system */
+
+ WQ_AFFN_NR_TYPES,
+};
+
/**
* struct workqueue_attrs - A struct for workqueue attributes.
*
@@ -142,17 +146,58 @@ struct workqueue_attrs {
/**
* @cpumask: allowed CPUs
+ *
+ * Work items in this workqueue are affine to these CPUs and not allowed
+ * to execute on other CPUs. A pool serving a workqueue must have the
+ * same @cpumask.
*/
cpumask_var_t cpumask;
/**
- * @no_numa: disable NUMA affinity
+ * @__pod_cpumask: internal attribute used to create per-pod pools
+ *
+ * Internal use only.
*
- * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
- * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
- * doesn't participate in pool hash calculations or equality comparisons.
+ * Per-pod unbound worker pools are used to improve locality. Always a
+ * subset of ->cpumask. A workqueue can be associated with multiple
+ * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
+ * of a pool's @__pod_cpumask is strict depends on @affn_strict.
+ */
+ cpumask_var_t __pod_cpumask;
+
+ /**
+ * @affn_strict: affinity scope is strict
+ *
+ * If clear, workqueue will make a best-effort attempt at starting the
+ * worker inside @__pod_cpumask but the scheduler is free to migrate it
+ * outside.
+ *
+ * If set, workers are only allowed to run inside @__pod_cpumask.
+ */
+ bool affn_strict;
+
+ /*
+ * Below fields aren't properties of a worker_pool. They only modify how
+ * :c:func:`apply_workqueue_attrs` select pools and thus don't
+ * participate in pool hash calculations or equality comparisons.
*/
- bool no_numa;
+
+ /**
+ * @affn_scope: unbound CPU affinity scope
+ *
+ * CPU pods are used to improve execution locality of unbound work
+ * items. There are multiple pod types, one for each wq_affn_scope, and
+ * every CPU in the system belongs to one pod in every pod type. CPUs
+ * that belong to the same pod share the worker pool. For example,
+ * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
+ * pool for each NUMA node.
+ */
+ enum wq_affn_scope affn_scope;
+
+ /**
+ * @ordered: work items must be executed one by one in queueing order
+ */
+ bool ordered;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -226,18 +271,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
* to generate better code.
*/
#ifdef CONFIG_LOCKDEP
-#define __INIT_WORK(_work, _func, _onstack) \
+#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
do { \
- static struct lock_class_key __key; \
- \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
- lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
+ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \
} while (0)
#else
-#define __INIT_WORK(_work, _func, _onstack) \
+#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
do { \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
@@ -246,12 +289,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
} while (0)
#endif
+#define __INIT_WORK(_work, _func, _onstack) \
+ do { \
+ static __maybe_unused struct lock_class_key __key; \
+ \
+ __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
+ } while (0)
+
#define INIT_WORK(_work, _func) \
__INIT_WORK((_work), (_func), 0)
#define INIT_WORK_ONSTACK(_work, _func) \
__INIT_WORK((_work), (_func), 1)
+#define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
+ __INIT_WORK_KEY((_work), (_func), 1, _key)
+
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
@@ -305,13 +358,14 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
* Workqueue flags and constants. For details, please refer to
* Documentation/core-api/workqueue.rst.
*/
-enum {
+enum wq_flags {
+ WQ_BH = 1 << 0, /* execute in bottom half (softirq) context */
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
- WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
+ WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
/*
* Per-cpu workqueues are generally preferred because they tend to
@@ -324,7 +378,7 @@ enum {
* to execute and tries to keep idle cores idle to conserve power;
* however, for example, a per-cpu work item scheduled from an
* interrupt handler on an idle CPU will force the scheduler to
- * excute the work item on that CPU breaking the idleness, which in
+ * execute the work item on that CPU breaking the idleness, which in
* turn may lead to more scheduling choices which are sub-optimal
* in terms of power consumption.
*
@@ -340,19 +394,27 @@ enum {
*/
WQ_POWER_EFFICIENT = 1 << 7,
+ __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
- __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
+ /* BH wq only allows the following flags */
+ __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI,
+};
+
+enum wq_consts {
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
- WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
+ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
-};
-/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
-#define WQ_UNBOUND_MAX_ACTIVE \
- max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
+ /*
+ * Per-node default cap on min_active. Unless explicitly set, min_active
+ * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
+ * workqueue_struct->min_active definition.
+ */
+ WQ_DFL_MIN_ACTIVE = 8,
+};
/*
* System-wide workqueues which are always present.
@@ -381,6 +443,9 @@ enum {
* they are same as their non-power-efficient counterparts - e.g.
* system_power_efficient_wq is identical to system_wq if
* 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
+ *
+ * system_bh[_highpri]_wq are convenience interface to softirq. BH work items
+ * are executed in the queueing CPU's BH context in the queueing order.
*/
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_highpri_wq;
@@ -389,6 +454,11 @@ extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
+extern struct workqueue_struct *system_bh_wq;
+extern struct workqueue_struct *system_bh_highpri_wq;
+
+void workqueue_softirq_action(bool highpri);
+void workqueue_softirq_dead(unsigned int cpu);
/**
* alloc_workqueue - allocate a workqueue
@@ -397,22 +467,43 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* @max_active: max in-flight work items, 0 for default
* remaining args: args for @fmt
*
- * Allocate a workqueue with the specified parameters. For detailed
- * information on WQ_* flags, please refer to
+ * For a per-cpu workqueue, @max_active limits the number of in-flight work
+ * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
+ * executing at most one work item for the workqueue.
+ *
+ * For unbound workqueues, @max_active limits the number of in-flight work items
+ * for the whole system. e.g. @max_active of 16 indicates that that there can be
+ * at most 16 work items executing for the workqueue in the whole system.
+ *
+ * As sharing the same active counter for an unbound workqueue across multiple
+ * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
+ * according to the proportion of the number of online CPUs and enforced
+ * independently.
+ *
+ * Depending on online CPU distribution, a node may end up with per-node
+ * max_active which is significantly lower than @max_active, which can lead to
+ * deadlocks if the per-node concurrency limit is lower than the maximum number
+ * of interdependent work items for the workqueue.
+ *
+ * To guarantee forward progress regardless of online CPU distribution, the
+ * concurrency limit on every node is guaranteed to be equal to or greater than
+ * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
+ * that the sum of per-node max_active's may be larger than @max_active.
+ *
+ * For detailed information on %WQ_* flags, please refer to
* Documentation/core-api/workqueue.rst.
*
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
-struct workqueue_struct *alloc_workqueue(const char *fmt,
- unsigned int flags,
- int max_active, ...);
+__printf(1, 4) struct workqueue_struct *
+alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args...: args for @fmt
+ * @args: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are
@@ -422,8 +513,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#define alloc_ordered_workqueue(fmt, flags, args...) \
- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
- __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
+ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
@@ -433,13 +523,16 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
#define create_singlethread_workqueue(name) \
alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
+#define from_work(var, callback_work, work_fieldname) \
+ container_of(callback_work, typeof(*var), work_fieldname)
+
extern void destroy_workqueue(struct workqueue_struct *wq);
struct workqueue_attrs *alloc_workqueue_attrs(void);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
-int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
+extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
@@ -451,7 +544,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
-extern void flush_workqueue(struct workqueue_struct *wq);
+extern void __flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern int schedule_on_each_cpu(work_func_t func);
@@ -459,6 +552,7 @@ extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
+extern bool cancel_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
@@ -469,13 +563,17 @@ extern bool flush_rcu_work(struct rcu_work *rwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
+extern void workqueue_set_min_active(struct workqueue_struct *wq,
+ int min_active);
extern struct work_struct *current_work(void);
extern bool current_is_workqueue_rescuer(void);
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
-extern void show_workqueue_state(void);
+extern void show_all_workqueues(void);
+extern void show_freezable_workqueues(void);
+extern void show_one_workqueue(struct workqueue_struct *wq);
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
/**
@@ -568,34 +666,44 @@ static inline bool schedule_work(struct work_struct *work)
return queue_work(system_wq, work);
}
-/**
- * flush_scheduled_work - ensure that any scheduled work has run to completion.
- *
- * Forces execution of the kernel-global workqueue and blocks until its
- * completion.
- *
- * Think twice before calling this function! It's very easy to get into
- * trouble if you don't take great care. Either of the following situations
- * will lead to deadlock:
- *
- * One of the work items currently on the workqueue needs to acquire
- * a lock held by your code or its caller.
- *
- * Your code is running in the context of a work routine.
- *
- * They will be detected by lockdep when they occur, but the first might not
- * occur very often. It depends on what work items are on the workqueue and
- * what locks they need, which you have no control over.
+/*
+ * Detect attempt to flush system-wide workqueues at compile time when possible.
+ * Warn attempt to flush system-wide workqueues at runtime.
*
- * In most situations flushing the entire workqueue is overkill; you merely
- * need to know that a particular work item isn't queued and isn't running.
- * In such cases you should use cancel_delayed_work_sync() or
- * cancel_work_sync() instead.
+ * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
+ * for reasons and steps for converting system-wide workqueues into local workqueues.
*/
-static inline void flush_scheduled_work(void)
-{
- flush_workqueue(system_wq);
-}
+extern void __warn_flushing_systemwide_wq(void)
+ __compiletime_warning("Please avoid flushing system-wide workqueues.");
+
+/* Please stop using this function, for this function will be removed in near future. */
+#define flush_scheduled_work() \
+({ \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(system_wq); \
+})
+
+#define flush_workqueue(wq) \
+({ \
+ struct workqueue_struct *_wq = (wq); \
+ \
+ if ((__builtin_constant_p(_wq == system_wq) && \
+ _wq == system_wq) || \
+ (__builtin_constant_p(_wq == system_highpri_wq) && \
+ _wq == system_highpri_wq) || \
+ (__builtin_constant_p(_wq == system_long_wq) && \
+ _wq == system_long_wq) || \
+ (__builtin_constant_p(_wq == system_unbound_wq) && \
+ _wq == system_unbound_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_wq) && \
+ _wq == system_freezable_wq) || \
+ (__builtin_constant_p(_wq == system_power_efficient_wq) && \
+ _wq == system_power_efficient_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
+ _wq == system_freezable_power_efficient_wq)) \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(_wq); \
+})
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
@@ -636,8 +744,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
return fn(arg);
}
#else
-long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
+long work_on_cpu_key(int cpu, long (*fn)(void *),
+ void *arg, struct lock_class_key *key);
+/*
+ * A new key is defined for each caller to make sure the work
+ * associated with the function doesn't share its locking class.
+ */
+#define work_on_cpu(_cpu, _fn, _arg) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ work_on_cpu_key(_cpu, _fn, _arg, &__key); \
+})
+
+long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
+ void *arg, struct lock_class_key *key);
+
+/*
+ * A new key is defined for each caller to make sure the work
+ * associated with the function doesn't share its locking class.
+ */
+#define work_on_cpu_safe(_cpu, _fn, _arg) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
+})
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
@@ -667,5 +799,6 @@ int workqueue_offline_cpu(unsigned int cpu);
void __init workqueue_init_early(void);
void __init workqueue_init(void);
+void __init workqueue_init_topology(void);
#endif
diff --git a/include/linux/workqueue_api.h b/include/linux/workqueue_api.h
new file mode 100644
index 000000000000..77debb5d2760
--- /dev/null
+++ b/include/linux/workqueue_api.h
@@ -0,0 +1 @@
+#include <linux/workqueue.h>
diff --git a/include/linux/workqueue_types.h b/include/linux/workqueue_types.h
new file mode 100644
index 000000000000..4c38824f3ab4
--- /dev/null
+++ b/include/linux/workqueue_types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_WORKQUEUE_TYPES_H
+#define _LINUX_WORKQUEUE_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/lockdep_types.h>
+#include <linux/timer_types.h>
+#include <linux/types.h>
+
+struct workqueue_struct;
+
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+void delayed_work_timer_fn(struct timer_list *t);
+
+struct work_struct {
+ atomic_long_t data;
+ struct list_head entry;
+ work_func_t func;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
+};
+
+#endif /* _LINUX_WORKQUEUE_TYPES_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 8e5c5bb16e2d..9845cb62e40b 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -11,27 +11,19 @@
#include <linux/flex_proportions.h>
#include <linux/backing-dev-defs.h>
#include <linux/blk_types.h>
-#include <linux/blk-cgroup.h>
+#include <linux/pagevec.h>
struct bio;
DECLARE_PER_CPU(int, dirty_throttle_leaks);
/*
- * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
- *
- * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
- *
- * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
- * time) for the dirty pages to drop, unless written enough pages.
- *
* The global dirty threshold is normally equal to the global dirty limit,
* except when the system suddenly allocates a lot of anonymous memory and
* knocks down the global dirty threshold quickly, in which case the global
* dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
*/
#define DIRTY_SCOPE 8
-#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
struct backing_dev_info;
@@ -49,6 +41,7 @@ enum writeback_sync_modes {
* in a manner such that unspecified fields are set to zero.
*/
struct writeback_control {
+ /* public fields that can be set and/or consumed by the caller: */
long nr_to_write; /* Write this many pages, and decrement
this for each page written */
long pages_skipped; /* Pages which were not written */
@@ -69,6 +62,7 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
+ unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */
/*
* When writeback IOs are bounced through async layers, only the
@@ -78,7 +72,17 @@ struct writeback_control {
*/
unsigned no_cgroup_owner:1;
- unsigned punt_to_cgroup:1; /* cgrp punting, see __REQ_CGROUP_PUNT */
+ /* To enable batching of swap writes to non-block-device backends,
+ * "plug" can be set point to a 'struct swap_iocb *'. When all swap
+ * writes have been submitted, if with swap_iocb is not NULL,
+ * swap_write_unplug() should be called.
+ */
+ struct swap_iocb **swap_plug;
+
+ /* internal fields used by the ->writepages implementation: */
+ struct folio_batch fbatch;
+ pgoff_t index;
+ int saved_err;
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *wb; /* wb this writeback is issued under */
@@ -94,12 +98,9 @@ struct writeback_control {
#endif
};
-static inline int wbc_to_write_flags(struct writeback_control *wbc)
+static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc)
{
- int flags = 0;
-
- if (wbc->punt_to_cgroup)
- flags = REQ_CGROUP_PUNT;
+ blk_opf_t flags = 0;
if (wbc->sync_mode == WB_SYNC_ALL)
flags |= REQ_SYNC;
@@ -109,15 +110,12 @@ static inline int wbc_to_write_flags(struct writeback_control *wbc)
return flags;
}
-static inline struct cgroup_subsys_state *
-wbc_blkcg_css(struct writeback_control *wbc)
-{
#ifdef CONFIG_CGROUP_WRITEBACK
- if (wbc->wb)
- return wbc->wb->blkcg_css;
-#endif
- return blkcg_root_css;
-}
+#define wbc_blkcg_css(wbc) \
+ ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css)
+#else
+#define wbc_blkcg_css(wbc) (blkcg_root_css)
+#endif /* CONFIG_CGROUP_WRITEBACK */
/*
* A wb_domain represents a domain that wb's (bdi_writeback's) belong to
@@ -202,7 +200,6 @@ void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
- might_sleep();
wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
}
@@ -211,30 +208,31 @@ static inline void wait_on_inode(struct inode *inode)
#include <linux/cgroup.h>
#include <linux/bio.h>
-void __inode_attach_wb(struct inode *inode, struct page *page);
+void __inode_attach_wb(struct inode *inode, struct folio *folio);
void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
struct inode *inode)
__releases(&inode->i_lock);
void wbc_detach_inode(struct writeback_control *wbc);
void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
size_t bytes);
-int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr_pages,
+int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
enum wb_reason reason, struct wb_completion *done);
void cgroup_writeback_umount(void);
+bool cleanup_offline_cgwb(struct bdi_writeback *wb);
/**
* inode_attach_wb - associate an inode with its wb
* @inode: inode of interest
- * @page: page being dirtied (may be NULL)
+ * @folio: folio being dirtied (may be NULL)
*
* If @inode doesn't have its wb, associate it with the wb matching the
- * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
+ * memcg of @folio or, if @folio is NULL, %current. May be called w/ or w/o
* @inode->i_lock.
*/
-static inline void inode_attach_wb(struct inode *inode, struct page *page)
+static inline void inode_attach_wb(struct inode *inode, struct folio *folio)
{
if (!inode->i_wb)
- __inode_attach_wb(inode, page);
+ __inode_attach_wb(inode, folio);
}
/**
@@ -293,7 +291,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
#else /* CONFIG_CGROUP_WRITEBACK */
-static inline void inode_attach_wb(struct inode *inode, struct page *page)
+static inline void inode_attach_wb(struct inode *inode, struct folio *folio)
{
}
@@ -335,14 +333,9 @@ static inline void cgroup_writeback_umount(void)
/*
* mm/page-writeback.c
*/
-#ifdef CONFIG_BLOCK
void laptop_io_completion(struct backing_dev_info *info);
void laptop_sync_completion(void);
-void laptop_mode_sync(struct work_struct *work);
void laptop_mode_timer_fn(struct timer_list *t);
-#else
-static inline void laptop_sync_completion(void) { }
-#endif
bool node_dirty_ok(struct pglist_data *pgdat);
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -352,44 +345,34 @@ void wb_domain_exit(struct wb_domain *dom);
extern struct wb_domain global_wb_domain;
/* These are exported to sysctl. */
-extern int dirty_background_ratio;
-extern unsigned long dirty_background_bytes;
-extern int vm_dirty_ratio;
-extern unsigned long vm_dirty_bytes;
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
extern unsigned int dirtytime_expire_interval;
-extern int vm_highmem_is_dirtyable;
-extern int block_dump;
extern int laptop_mode;
-int dirty_background_ratio_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int dirty_background_bytes_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int dirty_ratio_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int dirty_bytes_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
int dirtytime_interval_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
-void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
+void wb_update_bandwidth(struct bdi_writeback *wb);
+
+/* Invoke balance dirty pages in async mode. */
+#define BDP_ASYNC 0x0001
+
void balance_dirty_pages_ratelimited(struct address_space *mapping);
+int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
+ unsigned int flags);
+
bool wb_over_bg_thresh(struct bdi_writeback *wb);
-typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
+struct folio *writeback_iter(struct address_space *mapping,
+ struct writeback_control *wbc, struct folio *folio, int *error);
+
+typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc,
void *data);
-int generic_writepages(struct address_space *mapping,
- struct writeback_control *wbc);
-void tag_pages_for_writeback(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data);
@@ -398,7 +381,9 @@ void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-void account_page_redirty(struct page *page);
+bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
+bool folio_redirty_for_writepage(struct writeback_control *, struct folio *);
+bool redirty_page_for_writepage(struct writeback_control *, struct page *);
void sb_mark_inode_writeback(struct inode *inode);
void sb_clear_inode_writeback(struct inode *inode);
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 850424e5d030..bb763085479a 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -18,6 +18,22 @@
#define __LINUX_WW_MUTEX_H
#include <linux/mutex.h>
+#include <linux/rtmutex.h>
+
+#if defined(CONFIG_DEBUG_MUTEXES) || \
+ (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES))
+#define DEBUG_WW_MUTEXES
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+#define WW_MUTEX_BASE mutex
+#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k)
+#define ww_mutex_base_is_locked(b) mutex_is_locked((b))
+#else
+#define WW_MUTEX_BASE rt_mutex
+#define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k)
+#define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex)
+#endif
struct ww_class {
atomic_long_t stamp;
@@ -28,16 +44,24 @@ struct ww_class {
unsigned int is_wait_die;
};
+struct ww_mutex {
+ struct WW_MUTEX_BASE base;
+ struct ww_acquire_ctx *ctx;
+#ifdef DEBUG_WW_MUTEXES
+ struct ww_class *ww_class;
+#endif
+};
+
struct ww_acquire_ctx {
struct task_struct *task;
unsigned long stamp;
unsigned int acquired;
unsigned short wounded;
unsigned short is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
unsigned int done_acquire;
struct ww_class *ww_class;
- struct ww_mutex *contending_lock;
+ void *contending_lock;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -48,48 +72,35 @@ struct ww_acquire_ctx {
#endif
};
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
- , .ww_class = class
-#else
-# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
-#endif
-
#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \
{ .stamp = ATOMIC_LONG_INIT(0) \
, .acquire_name = #ww_class "_acquire" \
, .mutex_name = #ww_class "_mutex" \
, .is_wait_die = _is_wait_die }
-#define __WW_MUTEX_INITIALIZER(lockname, class) \
- { .base = __MUTEX_INITIALIZER(lockname.base) \
- __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
-
#define DEFINE_WD_CLASS(classname) \
struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
#define DEFINE_WW_CLASS(classname) \
struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
-#define DEFINE_WW_MUTEX(mutexname, ww_class) \
- struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
-
/**
* ww_mutex_init - initialize the w/w mutex
* @lock: the mutex to be initialized
* @ww_class: the w/w class the mutex should belong to
*
* Initialize the w/w mutex to unlocked state and associate it with the given
- * class.
+ * class. Static define macro for w/w mutex is not provided and this function
+ * is the only way to properly initialize the w/w mutex.
*
* It is not allowed to initialize an already locked mutex.
*/
static inline void ww_mutex_init(struct ww_mutex *lock,
struct ww_class *ww_class)
{
- __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
+ ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
lock->ctx = NULL;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
lock->ww_class = ww_class;
#endif
}
@@ -126,7 +137,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
ctx->acquired = 0;
ctx->wounded = false;
ctx->is_wait_die = ww_class->is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
ctx->ww_class = ww_class;
ctx->done_acquire = 0;
ctx->contending_lock = NULL;
@@ -156,7 +167,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
lockdep_assert_held(ctx);
DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
@@ -173,9 +184,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->dep_map, _THIS_IP_);
-
+#endif
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(ctx->acquired);
if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
/*
@@ -281,7 +293,7 @@ static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
ret = ww_mutex_lock(lock, ctx);
@@ -317,7 +329,7 @@ static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
return ww_mutex_lock_interruptible(lock, ctx);
@@ -325,17 +337,8 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
extern void ww_mutex_unlock(struct ww_mutex *lock);
-/**
- * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
- * @lock: mutex to lock
- *
- * Trylocks a mutex without acquire context, so no deadlock detection is
- * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
- */
-static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
-{
- return mutex_trylock(&lock->base);
-}
+extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx);
/***
* ww_mutex_destroy - mark a w/w mutex unusable
@@ -347,7 +350,9 @@ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
{
+#ifndef CONFIG_PREEMPT_RT
mutex_destroy(&lock->base);
+#endif
}
/**
@@ -358,7 +363,7 @@ static inline void ww_mutex_destroy(struct ww_mutex *lock)
*/
static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
{
- return mutex_is_locked(&lock->base);
+ return ww_mutex_base_is_locked(&lock->base);
}
#endif
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
new file mode 100644
index 000000000000..170fdee6339c
--- /dev/null
+++ b/include/linux/wwan.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+
+#ifndef __WWAN_H
+#define __WWAN_H
+
+#include <linux/poll.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+/**
+ * enum wwan_port_type - WWAN port types
+ * @WWAN_PORT_AT: AT commands
+ * @WWAN_PORT_MBIM: Mobile Broadband Interface Model control
+ * @WWAN_PORT_QMI: Qcom modem/MSM interface for modem control
+ * @WWAN_PORT_QCDM: Qcom Modem diagnostic interface
+ * @WWAN_PORT_FIREHOSE: XML based command protocol
+ * @WWAN_PORT_XMMRPC: Control protocol for Intel XMM modems
+ * @WWAN_PORT_FASTBOOT: Fastboot protocol control
+ *
+ * @WWAN_PORT_MAX: Highest supported port types
+ * @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type
+ * @__WWAN_PORT_MAX: Internal use
+ */
+enum wwan_port_type {
+ WWAN_PORT_AT,
+ WWAN_PORT_MBIM,
+ WWAN_PORT_QMI,
+ WWAN_PORT_QCDM,
+ WWAN_PORT_FIREHOSE,
+ WWAN_PORT_XMMRPC,
+ WWAN_PORT_FASTBOOT,
+
+ /* Add new port types above this line */
+
+ __WWAN_PORT_MAX,
+ WWAN_PORT_MAX = __WWAN_PORT_MAX - 1,
+ WWAN_PORT_UNKNOWN,
+};
+
+struct device;
+struct file;
+struct netlink_ext_ack;
+struct sk_buff;
+struct wwan_port;
+
+/** struct wwan_port_ops - The WWAN port operations
+ * @start: The routine for starting the WWAN port device.
+ * @stop: The routine for stopping the WWAN port device.
+ * @tx: Non-blocking routine that sends WWAN port protocol data to the device.
+ * @tx_blocking: Optional blocking routine that sends WWAN port protocol data
+ * to the device.
+ * @tx_poll: Optional routine that sets additional TX poll flags.
+ *
+ * The wwan_port_ops structure contains a list of low-level operations
+ * that control a WWAN port device. All functions are mandatory unless specified.
+ */
+struct wwan_port_ops {
+ int (*start)(struct wwan_port *port);
+ void (*stop)(struct wwan_port *port);
+ int (*tx)(struct wwan_port *port, struct sk_buff *skb);
+
+ /* Optional operations */
+ int (*tx_blocking)(struct wwan_port *port, struct sk_buff *skb);
+ __poll_t (*tx_poll)(struct wwan_port *port, struct file *filp,
+ poll_table *wait);
+};
+
+/** struct wwan_port_caps - The WWAN port capbilities
+ * @frag_len: WWAN port TX fragments length
+ * @headroom_len: WWAN port TX fragments reserved headroom length
+ */
+struct wwan_port_caps {
+ size_t frag_len;
+ unsigned int headroom_len;
+};
+
+/**
+ * wwan_create_port - Add a new WWAN port
+ * @parent: Device to use as parent and shared by all WWAN ports
+ * @type: WWAN port type
+ * @ops: WWAN port operations
+ * @caps: WWAN port capabilities
+ * @drvdata: Pointer to caller driver data
+ *
+ * Allocate and register a new WWAN port. The port will be automatically exposed
+ * to user as a character device and attached to the right virtual WWAN device,
+ * based on the parent pointer. The parent pointer is the device shared by all
+ * components of a same WWAN modem (e.g. USB dev, PCI dev, MHI controller...).
+ *
+ * drvdata will be placed in the WWAN port device driver data and can be
+ * retrieved with wwan_port_get_drvdata().
+ *
+ * This function must be balanced with a call to wwan_remove_port().
+ *
+ * Returns a valid pointer to wwan_port on success or PTR_ERR on failure
+ */
+struct wwan_port *wwan_create_port(struct device *parent,
+ enum wwan_port_type type,
+ const struct wwan_port_ops *ops,
+ struct wwan_port_caps *caps,
+ void *drvdata);
+
+/**
+ * wwan_remove_port - Remove a WWAN port
+ * @port: WWAN port to remove
+ *
+ * Remove a previously created port.
+ */
+void wwan_remove_port(struct wwan_port *port);
+
+/**
+ * wwan_port_rx - Receive data from the WWAN port
+ * @port: WWAN port for which data is received
+ * @skb: Pointer to the rx buffer
+ *
+ * A port driver calls this function upon data reception (MBIM, AT...).
+ */
+void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb);
+
+/**
+ * wwan_port_txoff - Stop TX on WWAN port
+ * @port: WWAN port for which TX must be stopped
+ *
+ * Used for TX flow control, a port driver calls this function to indicate TX
+ * is temporary unavailable (e.g. due to ring buffer fullness).
+ */
+void wwan_port_txoff(struct wwan_port *port);
+
+
+/**
+ * wwan_port_txon - Restart TX on WWAN port
+ * @port: WWAN port for which TX must be restarted
+ *
+ * Used for TX flow control, a port driver calls this function to indicate TX
+ * is available again.
+ */
+void wwan_port_txon(struct wwan_port *port);
+
+/**
+ * wwan_port_get_drvdata - Retrieve driver data from a WWAN port
+ * @port: Related WWAN port
+ */
+void *wwan_port_get_drvdata(struct wwan_port *port);
+
+/**
+ * struct wwan_netdev_priv - WWAN core network device private data
+ * @link_id: WWAN device data link id
+ * @drv_priv: driver private data area, size is determined in &wwan_ops
+ */
+struct wwan_netdev_priv {
+ u32 link_id;
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+static inline void *wwan_netdev_drvpriv(struct net_device *dev)
+{
+ return ((struct wwan_netdev_priv *)netdev_priv(dev))->drv_priv;
+}
+
+/*
+ * Used to indicate that the WWAN core should not create a default network
+ * link.
+ */
+#define WWAN_NO_DEFAULT_LINK U32_MAX
+
+/**
+ * struct wwan_ops - WWAN device ops
+ * @priv_size: size of private netdev data area
+ * @setup: set up a new netdev
+ * @newlink: register the new netdev
+ * @dellink: remove the given netdev
+ */
+struct wwan_ops {
+ unsigned int priv_size;
+ void (*setup)(struct net_device *dev);
+ int (*newlink)(void *ctxt, struct net_device *dev,
+ u32 if_id, struct netlink_ext_ack *extack);
+ void (*dellink)(void *ctxt, struct net_device *dev,
+ struct list_head *head);
+};
+
+int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
+ void *ctxt, u32 def_link_id);
+
+void wwan_unregister_ops(struct device *parent);
+
+#ifdef CONFIG_WWAN_DEBUGFS
+struct dentry *wwan_get_debugfs_dir(struct device *parent);
+void wwan_put_debugfs_dir(struct dentry *dir);
+#else
+static inline struct dentry *wwan_get_debugfs_dir(struct device *parent)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void wwan_put_debugfs_dir(struct dentry *dir) {}
+#endif
+
+#endif /* __WWAN_H */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index b4d70e7568b2..cb571dfcf4b1 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -9,12 +9,14 @@
* See Documentation/core-api/xarray.rst for how to use the XArray.
*/
+#include <linux/bitmap.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/gfp.h>
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
+#include <linux/sched/mm.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -229,9 +231,10 @@ static inline int xa_err(void *entry)
*
* This structure is used either directly or via the XA_LIMIT() macro
* to communicate the range of IDs that are valid for allocation.
- * Two common ranges are predefined for you:
+ * Three common ranges are predefined for you:
* * xa_limit_32b - [0 - UINT_MAX]
* * xa_limit_31b - [0 - INT_MAX]
+ * * xa_limit_16b - [0 - USHRT_MAX]
*/
struct xa_limit {
u32 max;
@@ -242,6 +245,7 @@ struct xa_limit {
#define xa_limit_32b XA_LIMIT(0, UINT_MAX)
#define xa_limit_31b XA_LIMIT(0, INT_MAX)
+#define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
typedef unsigned __bitwise xa_mark_t;
#define XA_MARK_0 ((__force xa_mark_t)0U)
@@ -583,6 +587,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_bh(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_bh(xa);
@@ -609,6 +614,7 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_irq(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_irq(xa);
@@ -684,6 +690,7 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock(xa);
@@ -711,6 +718,7 @@ static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_bh(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_bh(xa);
@@ -738,6 +746,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_irq(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_irq(xa);
@@ -767,6 +776,7 @@ static inline int __must_check xa_insert(struct xarray *xa,
{
int err;
+ might_alloc(gfp);
xa_lock(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock(xa);
@@ -796,6 +806,7 @@ static inline int __must_check xa_insert_bh(struct xarray *xa,
{
int err;
+ might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_bh(xa);
@@ -825,6 +836,7 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
{
int err;
+ might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_irq(xa);
@@ -844,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -854,6 +869,7 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
{
int err;
+ might_alloc(gfp);
xa_lock(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock(xa);
@@ -873,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -883,6 +902,7 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
{
int err;
+ might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_bh(xa);
@@ -902,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -912,6 +935,7 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
{
int err;
+ might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_irq(xa);
@@ -934,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -945,6 +972,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
{
int err;
+ might_alloc(gfp);
xa_lock(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock(xa);
@@ -967,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -978,6 +1009,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
{
int err;
+ might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_bh(xa);
@@ -1000,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -1011,6 +1046,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
{
int err;
+ might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_irq(xa);
@@ -1286,6 +1322,8 @@ static inline bool xa_is_advanced(const void *entry)
*/
typedef void (*xa_update_node_t)(struct xa_node *node);
+void xa_delete_node(struct xa_node *, xa_update_node_t);
+
/*
* The xa_state is opaque to its users. It contains various different pieces
* of state involved in the current operation on the XArray. It should be
@@ -1313,6 +1351,7 @@ struct xa_state {
struct xa_node *xa_node;
struct xa_node *xa_alloc;
xa_update_node_t xa_update;
+ struct list_lru *xa_lru;
};
/*
@@ -1332,7 +1371,8 @@ struct xa_state {
.xa_pad = 0, \
.xa_node = XAS_RESTART, \
.xa_alloc = NULL, \
- .xa_update = NULL \
+ .xa_update = NULL, \
+ .xa_lru = NULL, \
}
/**
@@ -1501,10 +1541,33 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
void xas_init_marks(const struct xa_state *);
bool xas_nomem(struct xa_state *, gfp_t);
+void xas_destroy(struct xa_state *);
void xas_pause(struct xa_state *);
void xas_create_range(struct xa_state *);
+#ifdef CONFIG_XARRAY_MULTI
+int xa_get_order(struct xarray *, unsigned long index);
+void xas_split(struct xa_state *, void *entry, unsigned int order);
+void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
+#else
+static inline int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ return 0;
+}
+
+static inline void xas_split(struct xa_state *xas, void *entry,
+ unsigned int order)
+{
+ xas_store(xas, entry);
+}
+
+static inline void xas_split_alloc(struct xa_state *xas, void *entry,
+ unsigned int order, gfp_t gfp)
+{
+}
+#endif
+
/**
* xas_reload() - Refetch an entry from the xarray.
* @xas: XArray operation state.
@@ -1522,10 +1585,21 @@ void xas_create_range(struct xa_state *);
static inline void *xas_reload(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
-
- if (node)
- return xa_entry(xas->xa, node, xas->xa_offset);
- return xa_head(xas->xa);
+ void *entry;
+ char offset;
+
+ if (!node)
+ return xa_head(xas->xa);
+ if (IS_ENABLED(CONFIG_XARRAY_MULTI)) {
+ offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
+ entry = xa_entry(xas->xa, node, offset);
+ if (!xa_is_sibling(entry))
+ return entry;
+ offset = xa_to_sibling(entry);
+ } else {
+ offset = xas->xa_offset;
+ }
+ return xa_entry(xas->xa, node, offset);
}
/**
@@ -1544,6 +1618,24 @@ static inline void xas_set(struct xa_state *xas, unsigned long index)
}
/**
+ * xas_advance() - Skip over sibling entries.
+ * @xas: XArray operation state.
+ * @index: Index of last sibling entry.
+ *
+ * Move the operation state to refer to the last sibling entry.
+ * This is useful for loops that normally want to see sibling
+ * entries but sometimes want to skip them. Use xas_set() if you
+ * want to move to an index which is not part of this entry.
+ */
+static inline void xas_advance(struct xa_state *xas, unsigned long index)
+{
+ unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;
+
+ xas->xa_index = index;
+ xas->xa_offset = (index >> shift) & XA_CHUNK_MASK;
+}
+
+/**
* xas_set_order() - Set up XArray operation state for a multislot entry.
* @xas: XArray operation state.
* @index: Target of the operation.
@@ -1569,13 +1661,19 @@ static inline void xas_set_order(struct xa_state *xas, unsigned long index,
* @update: Function to call when updating a node.
*
* The XArray can notify a caller after it has updated an xa_node.
- * This is advanced functionality and is only needed by the page cache.
+ * This is advanced functionality and is only needed by the page
+ * cache and swap cache.
*/
static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
{
xas->xa_update = update;
}
+static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
+{
+ xas->xa_lru = lru;
+}
+
/**
* xas_next_entry() - Advance iterator to next present entry.
* @xas: XArray operation state.
@@ -1714,13 +1812,12 @@ enum {
* @xas: XArray operation state.
* @entry: Entry retrieved from the array.
*
- * The loop body will be executed for each entry in the XArray that lies
- * within the range specified by @xas. If the loop completes successfully,
- * any entries that lie in this range will be replaced by @entry. The caller
- * may break out of the loop; if they do so, the contents of the XArray will
- * be unchanged. The operation may fail due to an out of memory condition.
- * The caller may also call xa_set_err() to exit the loop while setting an
- * error to record the reason.
+ * The loop body will be executed for each entry in the XArray that
+ * lies within the range specified by @xas. If the loop terminates
+ * normally, @entry will be %NULL. The user may break out of the loop,
+ * which will leave @entry set to the conflicting entry. The caller
+ * may also call xa_set_err() to exit the loop while setting an error
+ * to record the reason.
*/
#define xas_for_each_conflict(xas, entry) \
while ((entry = xas_find_conflict(xas)))
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 10b4dc2709f0..d20051865800 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -16,11 +16,18 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/user_namespace.h>
#include <uapi/linux/xattr.h>
struct inode;
struct dentry;
+static inline bool is_posix_acl_xattr(const char *name)
+{
+ return (strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
+ (strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0);
+}
+
/*
* struct xattr_handler: When @name is set, match attributes with exactly that
* name. When @prefix is set instead, match attributes with that prefix and
@@ -34,11 +41,28 @@ struct xattr_handler {
int (*get)(const struct xattr_handler *, struct dentry *dentry,
struct inode *inode, const char *name, void *buffer,
size_t size);
- int (*set)(const struct xattr_handler *, struct dentry *dentry,
+ int (*set)(const struct xattr_handler *,
+ struct mnt_idmap *idmap, struct dentry *dentry,
struct inode *inode, const char *name, const void *buffer,
size_t size, int flags);
};
+/**
+ * xattr_handler_can_list - check whether xattr can be listed
+ * @handler: handler for this type of xattr
+ * @dentry: dentry whose inode xattr to list
+ *
+ * Determine whether the xattr associated with @dentry can be listed given
+ * @handler.
+ *
+ * Return: true if xattr can be listed, false if not.
+ */
+static inline bool xattr_handler_can_list(const struct xattr_handler *handler,
+ struct dentry *dentry)
+{
+ return handler && (!handler->list || handler->list(dentry));
+}
+
const char *xattr_full_name(const struct xattr_handler *, const char *);
struct xattr {
@@ -48,21 +72,29 @@ struct xattr {
};
ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t);
-ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
+ssize_t vfs_getxattr(struct mnt_idmap *, struct dentry *, const char *,
+ void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
-int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
-int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
-int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **);
-int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
-int __vfs_removexattr(struct dentry *, const char *);
-int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **);
-int vfs_removexattr(struct dentry *, const char *);
+int __vfs_setxattr(struct mnt_idmap *, struct dentry *, struct inode *,
+ const char *, const void *, size_t, int);
+int __vfs_setxattr_noperm(struct mnt_idmap *, struct dentry *,
+ const char *, const void *, size_t, int);
+int __vfs_setxattr_locked(struct mnt_idmap *, struct dentry *,
+ const char *, const void *, size_t, int,
+ struct inode **);
+int vfs_setxattr(struct mnt_idmap *, struct dentry *, const char *,
+ const void *, size_t, int);
+int __vfs_removexattr(struct mnt_idmap *, struct dentry *, const char *);
+int __vfs_removexattr_locked(struct mnt_idmap *, struct dentry *,
+ const char *, struct inode **);
+int vfs_removexattr(struct mnt_idmap *, struct dentry *, const char *);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
-ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
- char **xattr_value, size_t size, gfp_t flags);
+int vfs_getxattr_alloc(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
+ char **xattr_value, size_t size, gfp_t flags);
-int xattr_supported_namespace(struct inode *inode, const char *prefix);
+int xattr_supports_user_prefix(struct inode *inode);
static inline const char *xattr_prefix(const struct xattr_handler *handler)
{
@@ -70,48 +102,31 @@ static inline const char *xattr_prefix(const struct xattr_handler *handler)
}
struct simple_xattrs {
- struct list_head head;
- spinlock_t lock;
+ struct rb_root rb_root;
+ rwlock_t lock;
};
struct simple_xattr {
- struct list_head list;
+ struct rb_node rb_node;
char *name;
size_t size;
char value[];
};
-/*
- * initialize the simple_xattrs structure
- */
-static inline void simple_xattrs_init(struct simple_xattrs *xattrs)
-{
- INIT_LIST_HEAD(&xattrs->head);
- spin_lock_init(&xattrs->lock);
-}
-
-/*
- * free all the xattrs
- */
-static inline void simple_xattrs_free(struct simple_xattrs *xattrs)
-{
- struct simple_xattr *xattr, *node;
-
- list_for_each_entry_safe(xattr, node, &xattrs->head, list) {
- kfree(xattr->name);
- kvfree(xattr);
- }
-}
-
+void simple_xattrs_init(struct simple_xattrs *xattrs);
+void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space);
+size_t simple_xattr_space(const char *name, size_t size);
struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
+void simple_xattr_free(struct simple_xattr *xattr);
int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
void *buffer, size_t size);
-int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
- const void *value, size_t size, int flags,
- ssize_t *removed_size);
-ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer,
- size_t size);
-void simple_xattr_list_add(struct simple_xattrs *xattrs,
- struct simple_xattr *new_xattr);
+struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
+ const char *name, const void *value,
+ size_t size, int flags);
+ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
+ char *buffer, size_t size);
+void simple_xattr_add(struct simple_xattrs *xattrs,
+ struct simple_xattr *new_xattr);
+int xattr_list_one(char **buffer, ssize_t *remaining_size, const char *name);
#endif /* _LINUX_XATTR_H */
diff --git a/include/linux/xz.h b/include/linux/xz.h
index 9884c8440188..7285ca5d56e9 100644
--- a/include/linux/xz.h
+++ b/include/linux/xz.h
@@ -234,6 +234,112 @@ XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
XZ_EXTERN void xz_dec_end(struct xz_dec *s);
/*
+ * Decompressor for MicroLZMA, an LZMA variant with a very minimal header.
+ * See xz_dec_microlzma_alloc() below for details.
+ *
+ * These functions aren't used or available in preboot code and thus aren't
+ * marked with XZ_EXTERN. This avoids warnings about static functions that
+ * are never defined.
+ */
+/**
+ * struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state
+ */
+struct xz_dec_microlzma;
+
+/**
+ * xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder
+ * @mode XZ_SINGLE or XZ_PREALLOC
+ * @dict_size LZMA dictionary size. This must be at least 4 KiB and
+ * at most 3 GiB.
+ *
+ * In contrast to xz_dec_init(), this function only allocates the memory
+ * and remembers the dictionary size. xz_dec_microlzma_reset() must be used
+ * before calling xz_dec_microlzma_run().
+ *
+ * The amount of allocated memory is a little less than 30 KiB with XZ_SINGLE.
+ * With XZ_PREALLOC also a dictionary buffer of dict_size bytes is allocated.
+ *
+ * On success, xz_dec_microlzma_alloc() returns a pointer to
+ * struct xz_dec_microlzma. If memory allocation fails or
+ * dict_size is invalid, NULL is returned.
+ *
+ * The compressed format supported by this decoder is a raw LZMA stream
+ * whose first byte (always 0x00) has been replaced with bitwise-negation
+ * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
+ * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
+ * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
+ * marker must not be used. The unused values are reserved for future use.
+ * This MicroLZMA header format was created for use in EROFS but may be used
+ * by others too.
+ */
+extern struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size);
+
+/**
+ * xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state
+ * @s Decoder state allocated using xz_dec_microlzma_alloc()
+ * @comp_size Compressed size of the input stream
+ * @uncomp_size Uncompressed size of the input stream. A value smaller
+ * than the real uncompressed size of the input stream can
+ * be specified if uncomp_size_is_exact is set to false.
+ * uncomp_size can never be set to a value larger than the
+ * expected real uncompressed size because it would eventually
+ * result in XZ_DATA_ERROR.
+ * @uncomp_size_is_exact This is an int instead of bool to avoid
+ * requiring stdbool.h. This should normally be set to true.
+ * When this is set to false, error detection is weaker.
+ */
+extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
+ uint32_t comp_size, uint32_t uncomp_size,
+ int uncomp_size_is_exact);
+
+/**
+ * xz_dec_microlzma_run() - Run the MicroLZMA decoder
+ * @s Decoder state initialized using xz_dec_microlzma_reset()
+ * @b: Input and output buffers
+ *
+ * This works similarly to xz_dec_run() with a few important differences.
+ * Only the differences are documented here.
+ *
+ * The only possible return values are XZ_OK, XZ_STREAM_END, and
+ * XZ_DATA_ERROR. This function cannot return XZ_BUF_ERROR: if no progress
+ * is possible due to lack of input data or output space, this function will
+ * keep returning XZ_OK. Thus, the calling code must be written so that it
+ * will eventually provide input and output space matching (or exceeding)
+ * comp_size and uncomp_size arguments given to xz_dec_microlzma_reset().
+ * If the caller cannot do this (for example, if the input file is truncated
+ * or otherwise corrupt), the caller must detect this error by itself to
+ * avoid an infinite loop.
+ *
+ * If the compressed data seems to be corrupt, XZ_DATA_ERROR is returned.
+ * This can happen also when incorrect dictionary, uncompressed, or
+ * compressed sizes have been specified.
+ *
+ * With XZ_PREALLOC only: As an extra feature, b->out may be NULL to skip over
+ * uncompressed data. This way the caller doesn't need to provide a temporary
+ * output buffer for the bytes that will be ignored.
+ *
+ * With XZ_SINGLE only: In contrast to xz_dec_run(), the return value XZ_OK
+ * is also possible and thus XZ_SINGLE is actually a limited multi-call mode.
+ * After XZ_OK the bytes decoded so far may be read from the output buffer.
+ * It is possible to continue decoding but the variables b->out and b->out_pos
+ * MUST NOT be changed by the caller. Increasing the value of b->out_size is
+ * allowed to make more output space available; one doesn't need to provide
+ * space for the whole uncompressed data on the first call. The input buffer
+ * may be changed normally like with XZ_PREALLOC. This way input data can be
+ * provided from non-contiguous memory.
+ */
+extern enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s,
+ struct xz_buf *b);
+
+/**
+ * xz_dec_microlzma_end() - Free the memory allocated for the decoder state
+ * @s: Decoder state allocated using xz_dec_microlzma_alloc().
+ * If s is NULL, this function does nothing.
+ */
+extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
+
+/*
* Standalone build (userspace build or in-kernel build for boot time use)
* needs a CRC32 implementation. For normal in-kernel use, kernel's own
* CRC32 module is used instead, and users of this module don't need to
diff --git a/include/linux/z2_battery.h b/include/linux/z2_battery.h
deleted file mode 100644
index eaba53ff387c..000000000000
--- a/include/linux/z2_battery.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_Z2_BATTERY_H
-#define _LINUX_Z2_BATTERY_H
-
-struct z2_battery_info {
- int batt_I2C_bus;
- int batt_I2C_addr;
- int batt_I2C_reg;
- int charge_gpio;
- int min_voltage;
- int max_voltage;
- int batt_div;
- int batt_mult;
- int batt_tech;
- char *batt_name;
-};
-
-#endif
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
deleted file mode 100644
index b1eaf6e31735..000000000000
--- a/include/linux/zbud.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ZBUD_H_
-#define _ZBUD_H_
-
-#include <linux/types.h>
-
-struct zbud_pool;
-
-struct zbud_ops {
- int (*evict)(struct zbud_pool *pool, unsigned long handle);
-};
-
-struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
-void zbud_destroy_pool(struct zbud_pool *pool);
-int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
- unsigned long *handle);
-void zbud_free(struct zbud_pool *pool, unsigned long handle);
-int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
-void *zbud_map(struct zbud_pool *pool, unsigned long handle);
-void zbud_unmap(struct zbud_pool *pool, unsigned long handle);
-u64 zbud_get_pool_size(struct zbud_pool *pool);
-
-#endif /* _ZBUD_H_ */
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index e2e4de188d84..db7416ed6057 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -29,7 +29,6 @@
struct zorro_dev {
struct ExpansionRom rom;
zorro_id id;
- struct zorro_driver *driver; /* which driver has allocated this device */
struct device dev; /* Generic device interface */
u16 slotaddr;
u16 slotsize;
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 51bf43076165..3296438eec06 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -14,10 +14,6 @@
struct zpool;
-struct zpool_ops {
- int (*evict)(struct zpool *pool, unsigned long handle);
-};
-
/*
* Control how a handle is mapped. It will be ignored if the
* implementation does not support it. Its use is optional.
@@ -39,8 +35,7 @@ enum zpool_mapmode {
bool zpool_has_pool(char *type);
-struct zpool *zpool_create_pool(const char *type, const char *name,
- gfp_t gfp, const struct zpool_ops *ops);
+struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp);
const char *zpool_get_type(struct zpool *pool);
@@ -53,9 +48,6 @@ int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
void zpool_free(struct zpool *pool, unsigned long handle);
-int zpool_shrink(struct zpool *pool, unsigned int pages,
- unsigned int *reclaimed);
-
void *zpool_map_handle(struct zpool *pool, unsigned long handle,
enum zpool_mapmode mm);
@@ -72,7 +64,7 @@ u64 zpool_get_total_size(struct zpool *pool);
* @destroy: destroy a pool.
* @malloc: allocate mem from a pool.
* @free: free mem from a pool.
- * @shrink: shrink the pool.
+ * @sleep_mapped: whether zpool driver can sleep during map.
* @map: map a handle.
* @unmap: unmap a handle.
* @total_size: get total size of a pool.
@@ -86,10 +78,7 @@ struct zpool_driver {
atomic_t refcount;
struct list_head list;
- void *(*create)(const char *name,
- gfp_t gfp,
- const struct zpool_ops *ops,
- struct zpool *zpool);
+ void *(*create)(const char *name, gfp_t gfp);
void (*destroy)(void *pool);
bool malloc_support_movable;
@@ -97,9 +86,7 @@ struct zpool_driver {
unsigned long *handle);
void (*free)(void *pool, unsigned long handle);
- int (*shrink)(void *pool, unsigned int pages,
- unsigned int *reclaimed);
-
+ bool sleep_mapped;
void *(*map)(void *pool, unsigned long handle,
enum zpool_mapmode mm);
void (*unmap)(void *pool, unsigned long handle);
@@ -111,6 +98,6 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver);
-bool zpool_evictable(struct zpool *pool);
+bool zpool_can_sleep_mapped(struct zpool *pool);
#endif
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 0fdbf653b173..a48cd0ffe57d 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -20,7 +20,6 @@
* zsmalloc mapping modes
*
* NOTE: These only make a difference when a mapped object spans pages.
- * They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected.
*/
enum zs_mapmode {
ZS_MM_RW, /* normal read-write mapping */
@@ -36,7 +35,7 @@ enum zs_mapmode {
struct zs_pool_stats {
/* How many pages were migrated (freed) */
- unsigned long pages_compacted;
+ atomic_long_t pages_compacted;
};
struct zs_pool;
@@ -56,5 +55,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);
+unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
+
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
#endif
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
index 249575e2485f..113408eef6ec 100644
--- a/include/linux/zstd.h
+++ b/include/linux/zstd.h
@@ -1,138 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd) and
+ * the GPLv2 (found in the COPYING file in the root directory of
+ * https://github.com/facebook/zstd). You may select, at your option, one of the
+ * above-listed licenses.
*/
-#ifndef ZSTD_H
-#define ZSTD_H
+#ifndef LINUX_ZSTD_H
+#define LINUX_ZSTD_H
-/* ====== Dependency ======*/
-#include <linux/types.h> /* size_t */
+/**
+ * This is a kernel-style API that wraps the upstream zstd API, which cannot be
+ * used directly because the symbols aren't exported. It exposes the minimal
+ * functionality which is currently required by users of zstd in the kernel.
+ * Expose extra functions from lib/zstd/zstd.h as needed.
+ */
+/* ====== Dependency ====== */
+#include <linux/types.h>
+#include <linux/zstd_errors.h>
+#include <linux/zstd_lib.h>
-/*-*****************************************************************************
- * Introduction
+/* ====== Helper Functions ====== */
+/**
+ * zstd_compress_bound() - maximum compressed size in worst case scenario
+ * @src_size: The size of the data to compress.
*
- * zstd, short for Zstandard, is a fast lossless compression algorithm,
- * targeting real-time compression scenarios at zlib-level and better
- * compression ratios. The zstd compression library provides in-memory
- * compression and decompression functions. The library supports compression
- * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
- * ultra, should be used with caution, as they require more memory.
- * Compression can be done in:
- * - a single step, reusing a context (described as Explicit memory management)
- * - unbounded multiple steps (described as Streaming compression)
- * The compression ratio achievable on small data can be highly improved using
- * compression with a dictionary in:
- * - a single step (described as Simple dictionary API)
- * - a single step, reusing a dictionary (described as Fast dictionary API)
- ******************************************************************************/
-
-/*====== Helper functions ======*/
+ * Return: The maximum compressed size in the worst case scenario.
+ */
+size_t zstd_compress_bound(size_t src_size);
/**
- * enum ZSTD_ErrorCode - zstd error codes
+ * zstd_is_error() - tells if a size_t function result is an error code
+ * @code: The function result to check for error.
*
- * Functions that return size_t can be checked for errors using ZSTD_isError()
- * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
+ * Return: Non-zero iff the code is an error.
+ */
+unsigned int zstd_is_error(size_t code);
+
+/**
+ * enum zstd_error_code - zstd error codes
*/
-typedef enum {
- ZSTD_error_no_error,
- ZSTD_error_GENERIC,
- ZSTD_error_prefix_unknown,
- ZSTD_error_version_unsupported,
- ZSTD_error_parameter_unknown,
- ZSTD_error_frameParameter_unsupported,
- ZSTD_error_frameParameter_unsupportedBy32bits,
- ZSTD_error_frameParameter_windowTooLarge,
- ZSTD_error_compressionParameter_unsupported,
- ZSTD_error_init_missing,
- ZSTD_error_memory_allocation,
- ZSTD_error_stage_wrong,
- ZSTD_error_dstSize_tooSmall,
- ZSTD_error_srcSize_wrong,
- ZSTD_error_corruption_detected,
- ZSTD_error_checksum_wrong,
- ZSTD_error_tableLog_tooLarge,
- ZSTD_error_maxSymbolValue_tooLarge,
- ZSTD_error_maxSymbolValue_tooSmall,
- ZSTD_error_dictionary_corrupted,
- ZSTD_error_dictionary_wrong,
- ZSTD_error_dictionaryCreation_failed,
- ZSTD_error_maxCode
-} ZSTD_ErrorCode;
+typedef ZSTD_ErrorCode zstd_error_code;
/**
- * ZSTD_maxCLevel() - maximum compression level available
+ * zstd_get_error_code() - translates an error function result to an error code
+ * @code: The function result for which zstd_is_error(code) is true.
*
- * Return: Maximum compression level available.
+ * Return: A unique error code for this error.
*/
-int ZSTD_maxCLevel(void);
+zstd_error_code zstd_get_error_code(size_t code);
+
/**
- * ZSTD_compressBound() - maximum compressed size in worst case scenario
- * @srcSize: The size of the data to compress.
+ * zstd_get_error_name() - translates an error function result to a string
+ * @code: The function result for which zstd_is_error(code) is true.
*
- * Return: The maximum compressed size in the worst case scenario.
+ * Return: An error string corresponding to the error code.
*/
-size_t ZSTD_compressBound(size_t srcSize);
+const char *zstd_get_error_name(size_t code);
+
/**
- * ZSTD_isError() - tells if a size_t function result is an error code
- * @code: The function result to check for error.
+ * zstd_min_clevel() - minimum allowed compression level
*
- * Return: Non-zero iff the code is an error.
+ * Return: The minimum allowed compression level.
*/
-static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
-{
- return code > (size_t)-ZSTD_error_maxCode;
-}
+int zstd_min_clevel(void);
+
/**
- * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
- * @functionResult: The result of a function for which ZSTD_isError() is true.
+ * zstd_max_clevel() - maximum allowed compression level
*
- * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0
- * if the functionResult isn't an error.
+ * Return: The maximum allowed compression level.
*/
-static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
- size_t functionResult)
-{
- if (!ZSTD_isError(functionResult))
- return (ZSTD_ErrorCode)0;
- return (ZSTD_ErrorCode)(0 - functionResult);
-}
+int zstd_max_clevel(void);
+
+/* ====== Parameter Selection ====== */
/**
- * enum ZSTD_strategy - zstd compression search strategy
+ * enum zstd_strategy - zstd compression search strategy
*
- * From faster to stronger.
+ * From faster to stronger. See zstd_lib.h.
*/
-typedef enum {
- ZSTD_fast,
- ZSTD_dfast,
- ZSTD_greedy,
- ZSTD_lazy,
- ZSTD_lazy2,
- ZSTD_btlazy2,
- ZSTD_btopt,
- ZSTD_btopt2
-} ZSTD_strategy;
+typedef ZSTD_strategy zstd_strategy;
/**
- * struct ZSTD_compressionParameters - zstd compression parameters
+ * struct zstd_compression_parameters - zstd compression parameters
* @windowLog: Log of the largest match distance. Larger means more
* compression, and more memory needed during decompression.
- * @chainLog: Fully searched segment. Larger means more compression, slower,
- * and more memory (useless for fast).
+ * @chainLog: Fully searched segment. Larger means more compression,
+ * slower, and more memory (useless for fast).
* @hashLog: Dispatch table. Larger means more compression,
* slower, and more memory.
* @searchLog: Number of searches. Larger means more compression and slower.
@@ -141,1017 +100,348 @@ typedef enum {
* @targetLength: Acceptable match size for optimal parser (only). Larger means
* more compression, and slower.
* @strategy: The zstd compression strategy.
+ *
+ * See zstd_lib.h.
*/
-typedef struct {
- unsigned int windowLog;
- unsigned int chainLog;
- unsigned int hashLog;
- unsigned int searchLog;
- unsigned int searchLength;
- unsigned int targetLength;
- ZSTD_strategy strategy;
-} ZSTD_compressionParameters;
+typedef ZSTD_compressionParameters zstd_compression_parameters;
/**
- * struct ZSTD_frameParameters - zstd frame parameters
- * @contentSizeFlag: Controls whether content size will be present in the frame
- * header (when known).
- * @checksumFlag: Controls whether a 32-bit checksum is generated at the end
- * of the frame for error detection.
- * @noDictIDFlag: Controls whether dictID will be saved into the frame header
- * when using dictionary compression.
+ * struct zstd_frame_parameters - zstd frame parameters
+ * @contentSizeFlag: Controls whether content size will be present in the
+ * frame header (when known).
+ * @checksumFlag: Controls whether a 32-bit checksum is generated at the
+ * end of the frame for error detection.
+ * @noDictIDFlag: Controls whether dictID will be saved into the frame
+ * header when using dictionary compression.
*
- * The default value is all fields set to 0.
+ * The default value is all fields set to 0. See zstd_lib.h.
*/
-typedef struct {
- unsigned int contentSizeFlag;
- unsigned int checksumFlag;
- unsigned int noDictIDFlag;
-} ZSTD_frameParameters;
+typedef ZSTD_frameParameters zstd_frame_parameters;
/**
- * struct ZSTD_parameters - zstd parameters
+ * struct zstd_parameters - zstd parameters
* @cParams: The compression parameters.
* @fParams: The frame parameters.
*/
-typedef struct {
- ZSTD_compressionParameters cParams;
- ZSTD_frameParameters fParams;
-} ZSTD_parameters;
+typedef ZSTD_parameters zstd_parameters;
/**
- * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
- * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
- * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
- * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
+ * zstd_get_params() - returns zstd_parameters for selected level
+ * @level: The compression level
+ * @estimated_src_size: The estimated source size to compress or 0
+ * if unknown.
*
- * Return: The selected ZSTD_compressionParameters.
+ * Return: The selected zstd_parameters.
*/
-ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
- unsigned long long estimatedSrcSize, size_t dictSize);
+zstd_parameters zstd_get_params(int level,
+ unsigned long long estimated_src_size);
-/**
- * ZSTD_getParams() - returns ZSTD_parameters for selected level
- * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
- * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
- * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
- *
- * The same as ZSTD_getCParams() except also selects the default frame
- * parameters (all zero).
- *
- * Return: The selected ZSTD_parameters.
- */
-ZSTD_parameters ZSTD_getParams(int compressionLevel,
- unsigned long long estimatedSrcSize, size_t dictSize);
+/* ====== Single-pass Compression ====== */
-/*-*************************************
- * Explicit memory management
- **************************************/
+typedef ZSTD_CCtx zstd_cctx;
/**
- * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
- * @cParams: The compression parameters to be used for compression.
+ * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
+ * @parameters: The compression parameters to be used.
*
* If multiple compression parameters might be used, the caller must call
- * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
+ * zstd_cctx_workspace_bound() for each set of parameters and use the maximum
* size.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCCtx().
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_cctx().
*/
-size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
+size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
/**
- * struct ZSTD_CCtx - the zstd compression context
- *
- * When compressing many times it is recommended to allocate a context just once
- * and reuse it for each successive compression operation.
- */
-typedef struct ZSTD_CCtx_s ZSTD_CCtx;
-/**
- * ZSTD_initCCtx() - initialize a zstd compression context
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
- * determine how large the workspace must be.
- *
- * Return: A compression context emplaced into workspace.
- */
-ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
-
-/**
- * ZSTD_compressCCtx() - compress src into dst
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, ZSTD_parameters params);
-
-/**
- * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
- *
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDCtx().
- */
-size_t ZSTD_DCtxWorkspaceBound(void);
-
-/**
- * struct ZSTD_DCtx - the zstd decompression context
- *
- * When decompressing many times it is recommended to allocate a context just
- * once and reuse it for each successive decompression operation.
- */
-typedef struct ZSTD_DCtx_s ZSTD_DCtx;
-/**
- * ZSTD_initDCtx() - initialize a zstd decompression context
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
- * determine how large the workspace must be.
- *
- * Return: A decompression context emplaced into workspace.
- */
-ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
-
-/**
- * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- *
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-
-/*-************************
- * Simple dictionary API
- **************************/
-
-/**
- * ZSTD_compress_usingDict() - compress src into dst using a dictionary
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @dict: The dictionary to use for compression.
- * @dictSize: The size of the dictionary.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Compression using a predefined dictionary. The same dictionary must be used
- * during decompression.
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const void *dict, size_t dictSize,
- ZSTD_parameters params);
-
-/**
- * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- * @dict: The dictionary to use for decompression. The same dictionary
- * must've been used to compress the data.
- * @dictSize: The size of the dictionary.
- *
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const void *dict, size_t dictSize);
-
-/*-**************************
- * Fast dictionary API
- ***************************/
-
-/**
- * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
- * @cParams: The compression parameters to be used for compression.
+ * zstd_init_cctx() - initialize a zstd compression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace. Use zstd_cctx_workspace_bound() to
+ * determine how large the workspace must be.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCDict().
- */
-size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
-
-/**
- * struct ZSTD_CDict - a digested dictionary to be used for compression
+ * Return: A zstd compression context or NULL on error.
*/
-typedef struct ZSTD_CDict_s ZSTD_CDict;
+zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
/**
- * ZSTD_initCDict() - initialize a digested dictionary for compression
- * @dictBuffer: The dictionary to digest. The buffer is referenced by the
- * ZSTD_CDict so it must outlive the returned ZSTD_CDict.
- * @dictSize: The size of the dictionary.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- * @workspace: The workspace. It must outlive the returned ZSTD_CDict.
- * @workspaceSize: The workspace size. Must be at least
- * ZSTD_CDictWorkspaceBound(params.cParams).
+ * zstd_compress_cctx() - compress src into dst with the initialized parameters
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @dst: The buffer to compress src into.
+ * @dst_capacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @src_size: The size of the data to compress.
+ * @parameters: The compression parameters to be used.
*
- * When compressing multiple messages / blocks with the same dictionary it is
- * recommended to load it just once. The ZSTD_CDict merely references the
- * dictBuffer, so it must outlive the returned ZSTD_CDict.
- *
- * Return: The digested dictionary emplaced into workspace.
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
*/
-ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
- ZSTD_parameters params, void *workspace, size_t workspaceSize);
+size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size, const zstd_parameters *parameters);
-/**
- * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(cParams) where
- * cParams are the compression parameters used to initialize the
- * cdict.
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @cdict: The digested dictionary to use for compression.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Compression using a digested dictionary. The same dictionary must be used
- * during decompression.
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const ZSTD_CDict *cdict);
+/* ====== Single-pass Decompression ====== */
+typedef ZSTD_DCtx zstd_dctx;
/**
- * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
+ * zstd_dctx_workspace_bound() - max memory needed to initialize a zstd_dctx
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDDict().
- */
-size_t ZSTD_DDictWorkspaceBound(void);
-
-/**
- * struct ZSTD_DDict - a digested dictionary to be used for decompression
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_dctx().
*/
-typedef struct ZSTD_DDict_s ZSTD_DDict;
+size_t zstd_dctx_workspace_bound(void);
/**
- * ZSTD_initDDict() - initialize a digested dictionary for decompression
- * @dictBuffer: The dictionary to digest. The buffer is referenced by the
- * ZSTD_DDict so it must outlive the returned ZSTD_DDict.
- * @dictSize: The size of the dictionary.
- * @workspace: The workspace. It must outlive the returned ZSTD_DDict.
- * @workspaceSize: The workspace size. Must be at least
- * ZSTD_DDictWorkspaceBound().
- *
- * When decompressing multiple messages / blocks with the same dictionary it is
- * recommended to load it just once. The ZSTD_DDict merely references the
- * dictBuffer, so it must outlive the returned ZSTD_DDict.
+ * zstd_init_dctx() - initialize a zstd decompression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace. Use zstd_dctx_workspace_bound() to
+ * determine how large the workspace must be.
*
- * Return: The digested dictionary emplaced into workspace.
+ * Return: A zstd decompression context or NULL on error.
*/
-ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
- void *workspace, size_t workspaceSize);
+zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
/**
- * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- * @ddict: The digested dictionary to use for decompression. The same
- * dictionary must've been used to compress the data.
+ * zstd_decompress_dctx() - decompress zstd compressed src into dst
+ * @dctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dst_capacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @src_size: The exact size of the data to decompress.
*
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
+ * Return: The decompressed size or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
- size_t dstCapacity, const void *src, size_t srcSize,
- const ZSTD_DDict *ddict);
+size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size);
-
-/*-**************************
- * Streaming
- ***************************/
+/* ====== Streaming Buffers ====== */
/**
- * struct ZSTD_inBuffer - input buffer for streaming
+ * struct zstd_in_buffer - input buffer for streaming
* @src: Start of the input buffer.
* @size: Size of the input buffer.
* @pos: Position where reading stopped. Will be updated.
* Necessarily 0 <= pos <= size.
+ *
+ * See zstd_lib.h.
*/
-typedef struct ZSTD_inBuffer_s {
- const void *src;
- size_t size;
- size_t pos;
-} ZSTD_inBuffer;
+typedef ZSTD_inBuffer zstd_in_buffer;
/**
- * struct ZSTD_outBuffer - output buffer for streaming
+ * struct zstd_out_buffer - output buffer for streaming
* @dst: Start of the output buffer.
* @size: Size of the output buffer.
* @pos: Position where writing stopped. Will be updated.
* Necessarily 0 <= pos <= size.
+ *
+ * See zstd_lib.h.
*/
-typedef struct ZSTD_outBuffer_s {
- void *dst;
- size_t size;
- size_t pos;
-} ZSTD_outBuffer;
+typedef ZSTD_outBuffer zstd_out_buffer;
+/* ====== Streaming Compression ====== */
-
-/*-*****************************************************************************
- * Streaming compression - HowTo
- *
- * A ZSTD_CStream object is required to track streaming operation.
- * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
- * ZSTD_CStream objects can be reused multiple times on consecutive compression
- * operations. It is recommended to re-use ZSTD_CStream in situations where many
- * streaming operations will be achieved consecutively. Use one separate
- * ZSTD_CStream per thread for parallel execution.
- *
- * Use ZSTD_compressStream() repetitively to consume input stream.
- * The function will automatically update both `pos` fields.
- * Note that it may not consume the entire input, in which case `pos < size`,
- * and it's up to the caller to present again remaining data.
- * It returns a hint for the preferred number of bytes to use as an input for
- * the next function call.
- *
- * At any moment, it's possible to flush whatever data remains within internal
- * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
- * still be some content left within the internal buffer if `output->size` is
- * too small. It returns the number of bytes left in the internal buffer and
- * must be called until it returns 0.
- *
- * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
- * write frame epilogue. The epilogue is required for decoders to consider a
- * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
- * the full content if `output->size` is too small. In which case, call again
- * ZSTD_endStream() to complete the flush. It returns the number of bytes left
- * in the internal buffer and must be called until it returns 0.
- ******************************************************************************/
+typedef ZSTD_CStream zstd_cstream;
/**
- * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
- * @cParams: The compression parameters to be used for compression.
+ * zstd_cstream_workspace_bound() - memory needed to initialize a zstd_cstream
+ * @cparams: The compression parameters to be used for compression.
*
* Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
- */
-size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
-
-/**
- * struct ZSTD_CStream - the zstd streaming compression context
- */
-typedef struct ZSTD_CStream_s ZSTD_CStream;
-
-/*===== ZSTD_CStream management functions =====*/
-/**
- * ZSTD_initCStream() - initialize a zstd streaming compression context
- * @params: The zstd compression parameters.
- * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
- * pass the source size (zero means empty source). Otherwise,
- * the caller may optionally pass the source size, or zero if
- * unknown.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
- * how large the workspace must be.
- *
- * Return: The zstd streaming compression context.
+ * zstd_init_cstream().
*/
-ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
- unsigned long long pledgedSrcSize, void *workspace,
- size_t workspaceSize);
+size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
/**
- * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
- * @cdict: The digested dictionary to use for compression.
- * @pledgedSrcSize: Optionally the source size, or zero if unknown.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound()
- * with the cParams used to initialize the cdict to determine
- * how large the workspace must be.
+ * zstd_init_cstream() - initialize a zstd streaming compression context
+ * @parameters The zstd parameters to use for compression.
+ * @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller
+ * must pass the source size (zero means empty source).
+ * Otherwise, the caller may optionally pass the source
+ * size, or zero if unknown.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace.
+ * Use zstd_cstream_workspace_bound(params->cparams) to
+ * determine how large the workspace must be.
*
- * Return: The zstd streaming compression context.
+ * Return: The zstd streaming compression context or NULL on error.
*/
-ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
- unsigned long long pledgedSrcSize, void *workspace,
- size_t workspaceSize);
+zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
+ unsigned long long pledged_src_size, void *workspace, size_t workspace_size);
-/*===== Streaming compression functions =====*/
/**
- * ZSTD_resetCStream() - reset the context using parameters from creation
- * @zcs: The zstd streaming compression context to reset.
- * @pledgedSrcSize: Optionally the source size, or zero if unknown.
+ * zstd_reset_cstream() - reset the context using parameters from creation
+ * @cstream: The zstd streaming compression context to reset.
+ * @pledged_src_size: Optionally the source size, or zero if unknown.
*
* Resets the context using the parameters from creation. Skips dictionary
- * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
+ * loading, since it can be reused. If `pledged_src_size` is non-zero the frame
* content size is always written into the frame header.
*
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: Zero or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
+size_t zstd_reset_cstream(zstd_cstream *cstream,
+ unsigned long long pledged_src_size);
+
/**
- * ZSTD_compressStream() - streaming compress some of input into output
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
- * @input: Source buffer. `input->pos` is updated to indicate how much data was
- * read. Note that it may not consume the entire input, in which case
- * `input->pos < input->size`, and it's up to the caller to present
- * remaining data again.
+ * zstd_compress_stream() - streaming compress some of input into output
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
+ * @input: Source buffer. `input->pos` is updated to indicate how much data
+ * was read. Note that it may not consume the entire input, in which
+ * case `input->pos < input->size`, and it's up to the caller to
+ * present remaining data again.
*
* The `input` and `output` buffers may be any size. Guaranteed to make some
* forward progress if `input` and `output` are not empty.
*
- * Return: A hint for the number of bytes to use as the input for the next
- * function call or an error, which can be checked using
- * ZSTD_isError().
+ * Return: A hint for the number of bytes to use as the input for the next
+ * function call or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
- ZSTD_inBuffer *input);
+size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
+ zstd_in_buffer *input);
+
/**
- * ZSTD_flushStream() - flush internal buffers into output
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
+ * zstd_flush_stream() - flush internal buffers into output
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
*
- * ZSTD_flushStream() must be called until it returns 0, meaning all the data
- * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
+ * zstd_flush_stream() must be called until it returns 0, meaning all the data
+ * has been flushed. Since zstd_flush_stream() causes a block to be ended,
* calling it too often will degrade the compression ratio.
*
- * Return: The number of bytes still present within internal buffers or an
- * error, which can be checked using ZSTD_isError().
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
-/**
- * ZSTD_endStream() - flush internal buffers into output and end the frame
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
- *
- * ZSTD_endStream() must be called until it returns 0, meaning all the data has
- * been flushed and the frame epilogue has been written.
- *
- * Return: The number of bytes still present within internal buffers or an
- * error, which can be checked using ZSTD_isError().
- */
-size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
+size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output);
/**
- * ZSTD_CStreamInSize() - recommended size for the input buffer
- *
- * Return: The recommended size for the input buffer.
- */
-size_t ZSTD_CStreamInSize(void);
-/**
- * ZSTD_CStreamOutSize() - recommended size for the output buffer
+ * zstd_end_stream() - flush internal buffers into output and end the frame
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
*
- * When the output buffer is at least this large, it is guaranteed to be large
- * enough to flush at least one complete compressed block.
+ * zstd_end_stream() must be called until it returns 0, meaning all the data has
+ * been flushed and the frame epilogue has been written.
*
- * Return: The recommended size for the output buffer.
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_CStreamOutSize(void);
+size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output);
+/* ====== Streaming Decompression ====== */
-
-/*-*****************************************************************************
- * Streaming decompression - HowTo
- *
- * A ZSTD_DStream object is required to track streaming operations.
- * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
- * ZSTD_DStream objects can be re-used multiple times.
- *
- * Use ZSTD_decompressStream() repetitively to consume your input.
- * The function will update both `pos` fields.
- * If `input->pos < input->size`, some input has not been consumed.
- * It's up to the caller to present again remaining data.
- * If `output->pos < output->size`, decoder has flushed everything it could.
- * Returns 0 iff a frame is completely decoded and fully flushed.
- * Otherwise it returns a suggested next input size that will never load more
- * than the current frame.
- ******************************************************************************/
+typedef ZSTD_DStream zstd_dstream;
/**
- * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
- * @maxWindowSize: The maximum window size allowed for compressed frames.
+ * zstd_dstream_workspace_bound() - memory needed to initialize a zstd_dstream
+ * @max_window_size: The maximum window size allowed for compressed frames.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
+ * Return: A lower bound on the size of the workspace that is passed
+ * to zstd_init_dstream().
*/
-size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
+size_t zstd_dstream_workspace_bound(size_t max_window_size);
/**
- * struct ZSTD_DStream - the zstd streaming decompression context
- */
-typedef struct ZSTD_DStream_s ZSTD_DStream;
-/*===== ZSTD_DStream management functions =====*/
-/**
- * ZSTD_initDStream() - initialize a zstd streaming decompression context
- * @maxWindowSize: The maximum window size allowed for compressed frames.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
- * how large the workspace must be.
- *
- * Return: The zstd streaming decompression context.
- */
-ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
- size_t workspaceSize);
-/**
- * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
- * @maxWindowSize: The maximum window size allowed for compressed frames.
- * @ddict: The digested dictionary to use for decompression.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
- * how large the workspace must be.
+ * zstd_init_dstream() - initialize a zstd streaming decompression context
+ * @max_window_size: The maximum window size allowed for compressed frames.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace.
+ * Use zstd_dstream_workspace_bound(max_window_size) to
+ * determine how large the workspace must be.
*
- * Return: The zstd streaming decompression context.
+ * Return: The zstd streaming decompression context.
*/
-ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
- const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
+zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
+ size_t workspace_size);
-/*===== Streaming decompression functions =====*/
/**
- * ZSTD_resetDStream() - reset the context using parameters from creation
- * @zds: The zstd streaming decompression context to reset.
+ * zstd_reset_dstream() - reset the context using parameters from creation
+ * @dstream: The zstd streaming decompression context to reset.
*
* Resets the context using the parameters from creation. Skips dictionary
* loading, since it can be reused.
*
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: Zero or an error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_resetDStream(ZSTD_DStream *zds);
+size_t zstd_reset_dstream(zstd_dstream *dstream);
+
/**
- * ZSTD_decompressStream() - streaming decompress some of input into output
- * @zds: The zstd streaming decompression context.
- * @output: Destination buffer. `output.pos` is updated to indicate how much
- * decompressed data was written.
- * @input: Source buffer. `input.pos` is updated to indicate how much data was
- * read. Note that it may not consume the entire input, in which case
- * `input.pos < input.size`, and it's up to the caller to present
- * remaining data again.
+ * zstd_decompress_stream() - streaming decompress some of input into output
+ * @dstream: The zstd streaming decompression context.
+ * @output: Destination buffer. `output.pos` is updated to indicate how much
+ * decompressed data was written.
+ * @input: Source buffer. `input.pos` is updated to indicate how much data was
+ * read. Note that it may not consume the entire input, in which case
+ * `input.pos < input.size`, and it's up to the caller to present
+ * remaining data again.
*
* The `input` and `output` buffers may be any size. Guaranteed to make some
* forward progress if `input` and `output` are not empty.
- * ZSTD_decompressStream() will not consume the last byte of the frame until
+ * zstd_decompress_stream() will not consume the last byte of the frame until
* the entire frame is flushed.
*
- * Return: Returns 0 iff a frame is completely decoded and fully flushed.
- * Otherwise returns a hint for the number of bytes to use as the input
- * for the next function call or an error, which can be checked using
- * ZSTD_isError(). The size hint will never load more than the frame.
+ * Return: Returns 0 iff a frame is completely decoded and fully flushed.
+ * Otherwise returns a hint for the number of bytes to use as the
+ * input for the next function call or an error, which can be checked
+ * using zstd_is_error(). The size hint will never load more than the
+ * frame.
*/
-size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
- ZSTD_inBuffer *input);
+size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
+ zstd_in_buffer *input);
-/**
- * ZSTD_DStreamInSize() - recommended size for the input buffer
- *
- * Return: The recommended size for the input buffer.
- */
-size_t ZSTD_DStreamInSize(void);
-/**
- * ZSTD_DStreamOutSize() - recommended size for the output buffer
- *
- * When the output buffer is at least this large, it is guaranteed to be large
- * enough to flush at least one complete decompressed block.
- *
- * Return: The recommended size for the output buffer.
- */
-size_t ZSTD_DStreamOutSize(void);
-
-
-/* --- Constants ---*/
-#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */
-#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U
-
-#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
-#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
-
-#define ZSTD_WINDOWLOG_MAX_32 27
-#define ZSTD_WINDOWLOG_MAX_64 27
-#define ZSTD_WINDOWLOG_MAX \
- ((unsigned int)(sizeof(size_t) == 4 \
- ? ZSTD_WINDOWLOG_MAX_32 \
- : ZSTD_WINDOWLOG_MAX_64))
-#define ZSTD_WINDOWLOG_MIN 10
-#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
-#define ZSTD_HASHLOG_MIN 6
-#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1)
-#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
-#define ZSTD_HASHLOG3_MAX 17
-#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
-#define ZSTD_SEARCHLOG_MIN 1
-/* only for ZSTD_fast, other strategies are limited to 6 */
-#define ZSTD_SEARCHLENGTH_MAX 7
-/* only for ZSTD_btopt, other strategies are limited to 4 */
-#define ZSTD_SEARCHLENGTH_MIN 3
-#define ZSTD_TARGETLENGTH_MIN 4
-#define ZSTD_TARGETLENGTH_MAX 999
-
-/* for static allocation */
-#define ZSTD_FRAMEHEADERSIZE_MAX 18
-#define ZSTD_FRAMEHEADERSIZE_MIN 6
-static const size_t ZSTD_frameHeaderSize_prefix = 5;
-static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
-static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
-/* magic number + skippable frame length */
-static const size_t ZSTD_skippableHeaderSize = 8;
-
-
-/*-*************************************
- * Compressed size functions
- **************************************/
-
-/**
- * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
- * @src: Source buffer. It should point to the start of a zstd encoded frame
- * or a skippable frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * size of the frame.
- *
- * Return: The compressed size of the frame pointed to by `src` or an error,
- * which can be check with ZSTD_isError().
- * Suitable to pass to ZSTD_decompress() or similar functions.
- */
-size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
-
-/*-*************************************
- * Decompressed size functions
- **************************************/
-/**
- * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
- * @src: It should point to the start of a zstd encoded frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
- *
- * Return: The frame content size stored in the frame header if known.
- * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
- * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
- */
-unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+/* ====== Frame Inspection Functions ====== */
/**
- * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
- * @src: It should point to the start of a series of zstd encoded and/or
- * skippable frames.
- * @srcSize: The exact size of the series of frames.
+ * zstd_find_frame_compressed_size() - returns the size of a compressed frame
+ * @src: Source buffer. It should point to the start of a zstd encoded
+ * frame or a skippable frame.
+ * @src_size: The size of the source buffer. It must be at least as large as the
+ * size of the frame.
*
- * If any zstd encoded frame in the series doesn't have the frame content size
- * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
- * set when using ZSTD_compress(). The decompressed size can be very large.
- * If the source is untrusted, the decompressed size could be wrong or
- * intentionally modified. Always ensure the result fits within the
- * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
- * frames, and so it must traverse the input to read each frame header. This is
- * efficient as most of the data is skipped, however it does mean that all frame
- * data must be present and valid.
- *
- * Return: Decompressed size of all the data contained in the frames if known.
- * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
- * `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
- */
-unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
-
-/*-*************************************
- * Advanced compression functions
- **************************************/
-/**
- * ZSTD_checkCParams() - ensure parameter values remain within authorized range
- * @cParams: The zstd compression parameters.
- *
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: The compressed size of the frame pointed to by `src` or an error,
+ * which can be check with zstd_is_error().
+ * Suitable to pass to ZSTD_decompress() or similar functions.
*/
-size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
+size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
/**
- * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
- * @srcSize: Optionally the estimated source size, or zero if unknown.
- * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
- *
- * Return: The optimized parameters.
- */
-ZSTD_compressionParameters ZSTD_adjustCParams(
- ZSTD_compressionParameters cParams, unsigned long long srcSize,
- size_t dictSize);
-
-/*--- Advanced decompression functions ---*/
-
-/**
- * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
- * @buffer: The source buffer to check.
- * @size: The size of the source buffer, must be at least 4 bytes.
- *
- * Return: True iff the buffer starts with a zstd or skippable frame identifier.
- */
-unsigned int ZSTD_isFrame(const void *buffer, size_t size);
-
-/**
- * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
- * @dict: The dictionary buffer.
- * @dictSize: The size of the dictionary buffer.
- *
- * Return: The dictionary id stored within the dictionary or 0 if the
- * dictionary is not a zstd dictionary. If it returns 0 the
- * dictionary can still be loaded as a content-only dictionary.
- */
-unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
-
-/**
- * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
- * @ddict: The ddict to find the id of.
- *
- * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
- * a zstd dictionary. If it returns 0 `ddict` will be loaded as a
- * content-only dictionary.
- */
-unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
-
-/**
- * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
- * @src: Source buffer. It must be a zstd encoded frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
- *
- * Return: The dictionary id required to decompress the frame stored within
- * `src` or 0 if the dictionary id could not be decoded. It can return
- * 0 if the frame does not require a dictionary, the dictionary id
- * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
- * is too small.
- */
-unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
-
-/**
- * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
- * @frameContentSize: The frame content size, or 0 if not present.
+ * struct zstd_frame_params - zstd frame parameters stored in the frame header
+ * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
+ * present.
* @windowSize: The window size, or 0 if the frame is a skippable frame.
+ * @blockSizeMax: The maximum block size.
+ * @frameType: The frame type (zstd or skippable)
+ * @headerSize: The size of the frame header.
* @dictID: The dictionary id, or 0 if not present.
* @checksumFlag: Whether a checksum was used.
+ *
+ * See zstd_lib.h.
*/
-typedef struct {
- unsigned long long frameContentSize;
- unsigned int windowSize;
- unsigned int dictID;
- unsigned int checksumFlag;
-} ZSTD_frameParams;
+typedef ZSTD_frameHeader zstd_frame_header;
/**
- * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
- * @fparamsPtr: On success the frame parameters are written here.
- * @src: The source buffer. It must point to a zstd or skippable frame.
- * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is
- * always large enough to succeed.
+ * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
+ * @params: On success the frame parameters are written here.
+ * @src: The source buffer. It must point to a zstd or skippable frame.
+ * @src_size: The size of the source buffer.
*
- * Return: 0 on success. If more data is required it returns how many bytes
- * must be provided to make forward progress. Otherwise it returns
- * an error, which can be checked using ZSTD_isError().
+ * Return: 0 on success. If more data is required it returns how many bytes
+ * must be provided to make forward progress. Otherwise it returns
+ * an error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
- size_t srcSize);
-
-/*-*****************************************************************************
- * Buffer-less and synchronous inner streaming functions
- *
- * This is an advanced API, giving full control over buffer management, for
- * users which need direct control over memory.
- * But it's also a complex one, with many restrictions (documented below).
- * Prefer using normal streaming API for an easier experience
- ******************************************************************************/
-
-/*-*****************************************************************************
- * Buffer-less streaming compression (synchronous mode)
- *
- * A ZSTD_CCtx object is required to track streaming operations.
- * Use ZSTD_initCCtx() to initialize a context.
- * ZSTD_CCtx object can be re-used multiple times within successive compression
- * operations.
- *
- * Start by initializing a context.
- * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
- * compression,
- * or ZSTD_compressBegin_advanced(), for finer parameter control.
- * It's also possible to duplicate a reference context which has already been
- * initialized, using ZSTD_copyCCtx()
- *
- * Then, consume your input using ZSTD_compressContinue().
- * There are some important considerations to keep in mind when using this
- * advanced function :
- * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
- * buffer only.
- * - Interface is synchronous : input is consumed entirely and produce 1+
- * (or more) compressed blocks.
- * - Caller must ensure there is enough space in `dst` to store compressed data
- * under worst case scenario. Worst case evaluation is provided by
- * ZSTD_compressBound().
- * ZSTD_compressContinue() doesn't guarantee recover after a failed
- * compression.
- * - ZSTD_compressContinue() presumes prior input ***is still accessible and
- * unmodified*** (up to maximum distance size, see WindowLog).
- * It remembers all previous contiguous blocks, plus one separated memory
- * segment (which can itself consists of multiple contiguous blocks)
- * - ZSTD_compressContinue() detects that prior input has been overwritten when
- * `src` buffer overlaps. In which case, it will "discard" the relevant memory
- * section from its history.
- *
- * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
- * and optional checksum. It's possible to use srcSize==0, in which case, it
- * will write a final empty block to end the frame. Without last block mark,
- * frames will be considered unfinished (corrupted) by decoders.
- *
- * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
- * frame.
- ******************************************************************************/
-
-/*===== Buffer-less streaming compression functions =====*/
-size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
- size_t dictSize, int compressionLevel);
-size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
- size_t dictSize, ZSTD_parameters params,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-
-
-
-/*-*****************************************************************************
- * Buffer-less streaming decompression (synchronous mode)
- *
- * A ZSTD_DCtx object is required to track streaming operations.
- * Use ZSTD_initDCtx() to initialize a context.
- * A ZSTD_DCtx object can be re-used multiple times.
- *
- * First typical operation is to retrieve frame parameters, using
- * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
- * important information to correctly decode the frame, such as the minimum
- * rolling buffer size to allocate to decompress data (`windowSize`), and the
- * dictionary ID used.
- * Note: content size is optional, it may not be present. 0 means unknown.
- * Note that these values could be wrong, either because of data malformation,
- * or because an attacker is spoofing deliberate false information. As a
- * consequence, check that values remain within valid application range,
- * especially `windowSize`, before allocation. Each application can set its own
- * limit, depending on local restrictions. For extended interoperability, it is
- * recommended to support at least 8 MB.
- * Frame parameters are extracted from the beginning of the compressed frame.
- * Data fragment must be large enough to ensure successful decoding, typically
- * `ZSTD_frameHeaderSize_max` bytes.
- * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
- * >0: `srcSize` is too small, provide at least this many bytes.
- * errorCode, which can be tested using ZSTD_isError().
- *
- * Start decompression, with ZSTD_decompressBegin() or
- * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
- * context, using ZSTD_copyDCtx().
- *
- * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
- * alternatively.
- * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
- * to ZSTD_decompressContinue().
- * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
- * fail.
- *
- * The result of ZSTD_decompressContinue() is the number of bytes regenerated
- * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
- * error; it just means ZSTD_decompressContinue() has decoded some metadata
- * item. It can also be an error code, which can be tested with ZSTD_isError().
- *
- * ZSTD_decompressContinue() needs previous data blocks during decompression, up
- * to `windowSize`. They should preferably be located contiguously, prior to
- * current block. Alternatively, a round buffer of sufficient size is also
- * possible. Sufficient size is determined by frame parameters.
- * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
- * follow each other, make sure that either the compressor breaks contiguity at
- * the same place, or that previous contiguous segment is large enough to
- * properly handle maximum back-reference.
- *
- * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
- * Context can then be reset to start a new decompression.
- *
- * Note: it's possible to know if next input to present is a header or a block,
- * using ZSTD_nextInputType(). This information is not required to properly
- * decode a frame.
- *
- * == Special case: skippable frames ==
- *
- * Skippable frames allow integration of user-defined data into a flow of
- * concatenated frames. Skippable frames will be ignored (skipped) by a
- * decompressor. The format of skippable frames is as follows:
- * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
- * 0x184D2A50 to 0x184D2A5F
- * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
- * c) Frame Content - any content (User Data) of length equal to Frame Size
- * For skippable frames ZSTD_decompressContinue() always returns 0.
- * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
- * what means that a frame is skippable.
- * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
- * actually be a zstd encoded frame with no content. For purposes of
- * decompression, it is valid in both cases to skip the frame using
- * ZSTD_findFrameCompressedSize() to find its size in bytes.
- * It also returns frame size as fparamsPtr->frameContentSize.
- ******************************************************************************/
-
-/*===== Buffer-less streaming decompression functions =====*/
-size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
-size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
- size_t dictSize);
-void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
-size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
-size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-typedef enum {
- ZSTDnit_frameHeader,
- ZSTDnit_blockHeader,
- ZSTDnit_block,
- ZSTDnit_lastBlock,
- ZSTDnit_checksum,
- ZSTDnit_skippableFrame
-} ZSTD_nextInputType_e;
-ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
-
-/*-*****************************************************************************
- * Block functions
- *
- * Block functions produce and decode raw zstd blocks, without frame metadata.
- * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
- * very small blocks (< 100 bytes). User will have to take in charge required
- * information to regenerate data, such as compressed and content sizes.
- *
- * A few rules to respect:
- * - Compressing and decompressing require a context structure
- * + Use ZSTD_initCCtx() and ZSTD_initDCtx()
- * - It is necessary to init context before starting
- * + compression : ZSTD_compressBegin()
- * + decompression : ZSTD_decompressBegin()
- * + variants _usingDict() are also allowed
- * + copyCCtx() and copyDCtx() work too
- * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
- * + If you need to compress more, cut data into multiple blocks
- * + Consider using the regular ZSTD_compress() instead, as frame metadata
- * costs become negligible when source size is large.
- * - When a block is considered not compressible enough, ZSTD_compressBlock()
- * result will be zero. In which case, nothing is produced into `dst`.
- * + User must test for such outcome and deal directly with uncompressed data
- * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
- * + In case of multiple successive blocks, decoder must be informed of
- * uncompressed block existence to follow proper history. Use
- * ZSTD_insertBlock() in such a case.
- ******************************************************************************/
-
-/* Define for static allocation */
-#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
-/*===== Raw zstd block functions =====*/
-size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
-size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
- size_t blockSize);
+size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
+ size_t src_size);
-#endif /* ZSTD_H */
+#endif /* LINUX_ZSTD_H */
diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
new file mode 100644
index 000000000000..58b6dd45a969
--- /dev/null
+++ b/include/linux/zstd_errors.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_ERRORS_H_398273423
+#define ZSTD_ERRORS_H_398273423
+
+
+/*===== dependency =====*/
+#include <linux/types.h> /* size_t */
+
+
+/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
+#define ZSTDERRORLIB_VISIBILITY
+#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+
+/*-*********************************************
+ * Error codes list
+ *-*********************************************
+ * Error codes _values_ are pinned down since v1.3.1 only.
+ * Therefore, don't rely on values if you may link to any version < v1.3.1.
+ *
+ * Only values < 100 are considered stable.
+ *
+ * note 1 : this API shall be used with static linking only.
+ * dynamic linking is not yet officially supported.
+ * note 2 : Prefer relying on the enum than on its value whenever possible
+ * This is the only supported way to use the error list < v1.3.1
+ * note 3 : ZSTD_isError() is always correct, whatever the library version.
+ **********************************************/
+typedef enum {
+ ZSTD_error_no_error = 0,
+ ZSTD_error_GENERIC = 1,
+ ZSTD_error_prefix_unknown = 10,
+ ZSTD_error_version_unsupported = 12,
+ ZSTD_error_frameParameter_unsupported = 14,
+ ZSTD_error_frameParameter_windowTooLarge = 16,
+ ZSTD_error_corruption_detected = 20,
+ ZSTD_error_checksum_wrong = 22,
+ ZSTD_error_dictionary_corrupted = 30,
+ ZSTD_error_dictionary_wrong = 32,
+ ZSTD_error_dictionaryCreation_failed = 34,
+ ZSTD_error_parameter_unsupported = 40,
+ ZSTD_error_parameter_outOfBound = 42,
+ ZSTD_error_tableLog_tooLarge = 44,
+ ZSTD_error_maxSymbolValue_tooLarge = 46,
+ ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_stage_wrong = 60,
+ ZSTD_error_init_missing = 62,
+ ZSTD_error_memory_allocation = 64,
+ ZSTD_error_workSpace_tooSmall= 66,
+ ZSTD_error_dstSize_tooSmall = 70,
+ ZSTD_error_srcSize_wrong = 72,
+ ZSTD_error_dstBuffer_null = 74,
+ /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
+ ZSTD_error_frameIndex_tooLarge = 100,
+ ZSTD_error_seekableIO = 102,
+ ZSTD_error_dstBuffer_wrong = 104,
+ ZSTD_error_srcBuffer_wrong = 105,
+ ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
+} ZSTD_ErrorCode;
+
+/*! ZSTD_getErrorCode() :
+ convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
+ which can be used to compare with enum list published above */
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /*< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
+
+
+
+#endif /* ZSTD_ERRORS_H_398273423 */
diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
new file mode 100644
index 000000000000..79d55465d5c1
--- /dev/null
+++ b/include/linux/zstd_lib.h
@@ -0,0 +1,2551 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_H_235446
+#define ZSTD_H_235446
+
+/* ====== Dependency ======*/
+#include <linux/limits.h> /* INT_MAX */
+#include <linux/types.h> /* size_t */
+
+
+/* ===== ZSTDLIB_API : control library symbols visibility ===== */
+#ifndef ZSTDLIB_VISIBLE
+# if (__GNUC__ >= 4) && !defined(__MINGW32__)
+# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default")))
+# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden")))
+# else
+# define ZSTDLIB_VISIBLE
+# define ZSTDLIB_HIDDEN
+# endif
+#endif
+#define ZSTDLIB_API ZSTDLIB_VISIBLE
+
+
+/* *****************************************************************************
+ Introduction
+
+ zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
+ real-time compression scenarios at zlib-level and better compression ratios.
+ The zstd compression library provides in-memory compression and decompression
+ functions.
+
+ The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
+ which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
+ caution, as they require more memory. The library also offers negative
+ compression levels, which extend the range of speed vs. ratio preferences.
+ The lower the level, the faster the speed (at the cost of compression).
+
+ Compression can be done in:
+ - a single step (described as Simple API)
+ - a single step, reusing a context (described as Explicit context)
+ - unbounded multiple steps (described as Streaming compression)
+
+ The compression ratio achievable on small data can be highly improved using
+ a dictionary. Dictionary compression can be performed in:
+ - a single step (described as Simple dictionary API)
+ - a single step, reusing a dictionary (described as Bulk-processing
+ dictionary API)
+
+ Advanced experimental functions can be accessed using
+ `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
+
+ Advanced experimental APIs should never be used with a dynamically-linked
+ library. They are not "stable"; their definitions or signatures may change in
+ the future. Only static linking is allowed.
+*******************************************************************************/
+
+/*------ Version ------*/
+#define ZSTD_VERSION_MAJOR 1
+#define ZSTD_VERSION_MINOR 5
+#define ZSTD_VERSION_RELEASE 2
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+/*! ZSTD_versionNumber() :
+ * Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
+ZSTDLIB_API unsigned ZSTD_versionNumber(void);
+
+#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
+#define ZSTD_QUOTE(str) #str
+#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
+#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
+
+/*! ZSTD_versionString() :
+ * Return runtime library version, like "1.4.5". Requires v1.3.0+. */
+ZSTDLIB_API const char* ZSTD_versionString(void);
+
+/* *************************************
+ * Default constant
+ ***************************************/
+#ifndef ZSTD_CLEVEL_DEFAULT
+# define ZSTD_CLEVEL_DEFAULT 3
+#endif
+
+/* *************************************
+ * Constants
+ ***************************************/
+
+/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
+#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */
+#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
+#define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0
+
+#define ZSTD_BLOCKSIZELOG_MAX 17
+#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX)
+
+
+/* *************************************
+* Simple API
+***************************************/
+/*! ZSTD_compress() :
+ * Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress() :
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize);
+
+/*! ZSTD_getFrameContentSize() : requires v1.3.0+
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * @return : - decompressed size of `src` frame content, if known
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
+/*! ZSTD_getDecompressedSize() :
+ * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+ * Both functions work the same way, but ZSTD_getDecompressedSize() blends
+ * "empty", "unknown" and "error" results to the same return value (0),
+ * while ZSTD_getFrameContentSize() gives them separate return values.
+ * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
+ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+
+ * `src` should point to the start of a ZSTD frame or skippable frame.
+ * `srcSize` must be >= first frame size
+ * @return : the compressed size of the first frame starting at `src`,
+ * suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
+ * or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
+
+/*====== Helper functions ======*/
+#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
+
+
+/* *************************************
+* Explicit context
+***************************************/
+/*= Compression context
+ * When compressing many times,
+ * it is recommended to allocate a context just once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Note : re-using context is just a speed / resource optimization.
+ * It doesn't change the compression ratio, which remains identical.
+ * Note 2 : In multi-threaded environments,
+ * use one different context per thread for parallel execution.
+ */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */
+
+/*! ZSTD_compressCCtx() :
+ * Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
+ * Important : in order to behave similarly to `ZSTD_compress()`,
+ * this function compresses at requested compression level,
+ * __ignoring any other parameter__ .
+ * If any advanced parameter was set using the advanced API,
+ * they will all be reset. Only `compressionLevel` remains.
+ */
+ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*= Decompression context
+ * When decompressing many times,
+ * it is recommended to allocate a context only once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution. */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
+ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer */
+
+/*! ZSTD_decompressDCtx() :
+ * Same as ZSTD_decompress(),
+ * requires an allocated ZSTD_DCtx.
+ * Compatible with sticky parameters.
+ */
+ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/* *******************************************
+* Advanced compression API (Requires v1.4.0+)
+**********************************************/
+
+/* API design :
+ * Parameters are pushed one by one into an existing context,
+ * using ZSTD_CCtx_set*() functions.
+ * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
+ * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
+ * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
+ *
+ * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
+ *
+ * This API supersedes all other "advanced" API entry points in the experimental section.
+ * In the future, we expect to remove from experimental API entry points which are redundant with this API.
+ */
+
+
+/* Compression strategies, listed from fastest to strongest */
+typedef enum { ZSTD_fast=1,
+ ZSTD_dfast=2,
+ ZSTD_greedy=3,
+ ZSTD_lazy=4,
+ ZSTD_lazy2=5,
+ ZSTD_btlazy2=6,
+ ZSTD_btopt=7,
+ ZSTD_btultra=8,
+ ZSTD_btultra2=9
+ /* note : new strategies _might_ be added in the future.
+ Only the order (from fast to strong) is guaranteed */
+} ZSTD_strategy;
+
+typedef enum {
+
+ /* compression parameters
+ * Note: When compressing with a ZSTD_CDict these parameters are superseded
+ * by the parameters used to construct the ZSTD_CDict.
+ * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
+ ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
+ * Note that exact compression parameters are dynamically determined,
+ * depending on both compression level and srcSize (when known).
+ * Default level is ZSTD_CLEVEL_DEFAULT==3.
+ * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
+ * Note 1 : it's possible to pass a negative compression level.
+ * Note 2 : setting a level does not automatically set all other compression parameters
+ * to default. Setting this will however eventually dynamically impact the compression
+ * parameters which have not been manually set. The manually set
+ * ones will 'stick'. */
+ /* Advanced compression parameters :
+ * It's possible to pin down compression parameters to some specific values.
+ * In which case, these values are no longer dynamically selected by the compressor */
+ ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2.
+ * This will set a memory budget for streaming decompression,
+ * with larger values requiring more memory
+ * and typically compressing more.
+ * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
+ * Special: value 0 means "use default windowLog".
+ * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
+ * requires explicitly allowing such size at streaming decompression stage. */
+ ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2.
+ * Resulting memory usage is (1 << (hashLog+2)).
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
+ * Larger tables improve compression ratio of strategies <= dFast,
+ * and improve speed of strategies > dFast.
+ * Special: value 0 means "use default hashLog". */
+ ZSTD_c_chainLog=103, /* Size of the multi-probe search table, as a power of 2.
+ * Resulting memory usage is (1 << (chainLog+2)).
+ * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
+ * Larger tables result in better and slower compression.
+ * This parameter is useless for "fast" strategy.
+ * It's still useful when using "dfast" strategy,
+ * in which case it defines a secondary probe table.
+ * Special: value 0 means "use default chainLog". */
+ ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2.
+ * More attempts result in better and slower compression.
+ * This parameter is useless for "fast" and "dFast" strategies.
+ * Special: value 0 means "use default searchLog". */
+ ZSTD_c_minMatch=105, /* Minimum size of searched matches.
+ * Note that Zstandard can still find matches of smaller size,
+ * it just tweaks its search algorithm to look for this size and larger.
+ * Larger values increase compression and decompression speed, but decrease ratio.
+ * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
+ * Note that currently, for all strategies < btopt, effective minimum is 4.
+ * , for all strategies > fast, effective maximum is 6.
+ * Special: value 0 means "use default minMatchLength". */
+ ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
+ * For strategies btopt, btultra & btultra2:
+ * Length of Match considered "good enough" to stop search.
+ * Larger values make compression stronger, and slower.
+ * For strategy fast:
+ * Distance between match sampling.
+ * Larger values make compression faster, and weaker.
+ * Special: value 0 means "use default targetLength". */
+ ZSTD_c_strategy=107, /* See ZSTD_strategy enum definition.
+ * The higher the value of selected strategy, the more complex it is,
+ * resulting in stronger and slower compression.
+ * Special: value 0 means "use default strategy". */
+ /* LDM mode parameters */
+ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
+ * This parameter is designed to improve compression ratio
+ * for large inputs, by finding large matches at long distance.
+ * It increases memory usage and window size.
+ * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
+ * except when expressly set to a different value.
+ * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
+ * compression strategy >= ZSTD_btopt (== compression level 16+) */
+ ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2.
+ * Larger values increase memory usage and compression ratio,
+ * but decrease compression speed.
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+ * default: windowlog - 7.
+ * Special: value 0 means "automatically determine hashlog". */
+ ZSTD_c_ldmMinMatch=162, /* Minimum match size for long distance matcher.
+ * Larger/too small values usually decrease compression ratio.
+ * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
+ * Special: value 0 means "use default value" (default: 64). */
+ ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
+ * Larger values improve collision resolution but decrease compression speed.
+ * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
+ * Special: value 0 means "use default value" (default: 3). */
+ ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
+ * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
+ * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
+ * Larger values improve compression speed.
+ * Deviating far from default value will likely result in a compression ratio decrease.
+ * Special: value 0 means "automatically determine hashRateLog". */
+
+ /* frame parameters */
+ ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
+ * Content size must be known at the beginning of compression.
+ * This is automatically the case when using ZSTD_compress2(),
+ * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
+ ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
+ ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */
+
+ /* multi-threading parameters */
+ /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
+ * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
+ * In a situation where it's unknown if the linked library supports multi-threading or not,
+ * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
+ */
+ ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel.
+ * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
+ * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
+ * while compression is performed in parallel, within worker thread(s).
+ * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
+ * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
+ * More workers improve speed, but also increase memory usage.
+ * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
+ * compression is performed inside Caller's thread, and all invocations are blocking */
+ ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
+ * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
+ * 0 means default, which is dynamically determined based on compression parameters.
+ * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest.
+ * The minimum size is automatically and transparently enforced. */
+ ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size.
+ * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
+ * It helps preserve compression ratio, while each job is compressed in parallel.
+ * This value is enforced only when nbWorkers >= 1.
+ * Larger values increase compression ratio, but decrease speed.
+ * Possible values range from 0 to 9 :
+ * - 0 means "default" : value will be determined by the library, depending on strategy
+ * - 1 means "no overlap"
+ * - 9 means "full overlap", using a full window size.
+ * Each intermediate rank increases/decreases load size by a factor 2 :
+ * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default
+ * default value varies between 6 and 9, depending on strategy */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_c_rsyncable
+ * ZSTD_c_format
+ * ZSTD_c_forceMaxWindow
+ * ZSTD_c_forceAttachDict
+ * ZSTD_c_literalCompressionMode
+ * ZSTD_c_targetCBlockSize
+ * ZSTD_c_srcSizeHint
+ * ZSTD_c_enableDedicatedDictSearch
+ * ZSTD_c_stableInBuffer
+ * ZSTD_c_stableOutBuffer
+ * ZSTD_c_blockDelimiters
+ * ZSTD_c_validateSequences
+ * ZSTD_c_useBlockSplitter
+ * ZSTD_c_useRowMatchFinder
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly;
+ * also, the enums values themselves are unstable and can still change.
+ */
+ ZSTD_c_experimentalParam1=500,
+ ZSTD_c_experimentalParam2=10,
+ ZSTD_c_experimentalParam3=1000,
+ ZSTD_c_experimentalParam4=1001,
+ ZSTD_c_experimentalParam5=1002,
+ ZSTD_c_experimentalParam6=1003,
+ ZSTD_c_experimentalParam7=1004,
+ ZSTD_c_experimentalParam8=1005,
+ ZSTD_c_experimentalParam9=1006,
+ ZSTD_c_experimentalParam10=1007,
+ ZSTD_c_experimentalParam11=1008,
+ ZSTD_c_experimentalParam12=1009,
+ ZSTD_c_experimentalParam13=1010,
+ ZSTD_c_experimentalParam14=1011,
+ ZSTD_c_experimentalParam15=1012
+} ZSTD_cParameter;
+
+typedef struct {
+ size_t error;
+ int lowerBound;
+ int upperBound;
+} ZSTD_bounds;
+
+/*! ZSTD_cParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - lower and upper bounds, both inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
+
+/*! ZSTD_CCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is generally only possible during frame initialization (before starting compression).
+ * Exception : when using multi-threading mode (nbWorkers >= 1),
+ * the following parameters can be updated _during_ compression (within same frame):
+ * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
+ * new parameters will be active for next job only (after a flush()).
+ * @return : an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtx_setPledgedSrcSize() :
+ * Total input data size to be compressed as a single frame.
+ * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
+ * This value will also be controlled at end of frame, and trigger an error if not respected.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
+ * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
+ * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
+ * Note 2 : pledgedSrcSize is only valid once, for the next frame.
+ * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note 3 : Whenever all input data is provided and consumed in a single round,
+ * for example with ZSTD_compress2(),
+ * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
+ * this value is automatically overridden by srcSize instead.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
+
+typedef enum {
+ ZSTD_reset_session_only = 1,
+ ZSTD_reset_parameters = 2,
+ ZSTD_reset_session_and_parameters = 3
+} ZSTD_ResetDirective;
+
+/*! ZSTD_CCtx_reset() :
+ * There are 2 different things that can be reset, independently or jointly :
+ * - The session : will stop compressing current frame, and make CCtx ready to start a new one.
+ * Useful after an error, or to interrupt any ongoing compression.
+ * Any internal data not yet flushed is cancelled.
+ * Compression parameters and dictionary remain unchanged.
+ * They will be used to compress next frame.
+ * Resetting session never fails.
+ * - The parameters : changes all parameters back to "default".
+ * This removes any reference to any dictionary too.
+ * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
+ * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
+ * - Both : similar to resetting the session, followed by resetting parameters.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
+
+/*! ZSTD_compress2() :
+ * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
+ * ZSTD_compress2() always starts a new frame.
+ * Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - The function is always blocking, returns when compression is completed.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/* *********************************************
+* Advanced decompression API (Requires v1.4.0+)
+************************************************/
+
+/* The advanced API pushes parameters one by one into an existing DCtx context.
+ * Parameters are sticky, and remain valid for all following frames
+ * using the same DCtx context.
+ * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
+ * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
+ * Therefore, no new decompression function is necessary.
+ */
+
+typedef enum {
+
+ ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
+ * the streaming API will refuse to allocate memory buffer
+ * in order to protect the host from unreasonable memory requirements.
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
+ * Special: value 0 means "use default maximum windowLog". */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_d_format
+ * ZSTD_d_stableOutBuffer
+ * ZSTD_d_forceIgnoreChecksum
+ * ZSTD_d_refMultipleDDicts
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly
+ */
+ ZSTD_d_experimentalParam1=1000,
+ ZSTD_d_experimentalParam2=1001,
+ ZSTD_d_experimentalParam3=1002,
+ ZSTD_d_experimentalParam4=1003
+
+} ZSTD_dParameter;
+
+/*! ZSTD_dParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - both lower and upper bounds, inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
+
+/*! ZSTD_DCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_dParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is only possible during frame initialization (before starting decompression).
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
+
+/*! ZSTD_DCtx_reset() :
+ * Return a DCtx to clean state.
+ * Session and parameters can be reset jointly or separately.
+ * Parameters can only be reset when no active frame is being decompressed.
+ * @return : 0, or an error code, which can be tested with ZSTD_isError()
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
+
+
+/* **************************
+* Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+ const void* src; /*< start of input buffer */
+ size_t size; /*< size of input buffer */
+ size_t pos; /*< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+ void* dst; /*< start of output buffer */
+ size_t size; /*< size of output buffer */
+ size_t pos; /*< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+* Streaming compression - HowTo
+*
+* A ZSTD_CStream object is required to track streaming operation.
+* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+*
+* For parallel execution, use one separate ZSTD_CStream per thread.
+*
+* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
+*
+* Parameters are sticky : when starting a new compression on the same context,
+* it will re-use the same sticky parameters as previous compression session.
+* When in doubt, it's recommended to fully initialize the context before usage.
+* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
+* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
+* set more specific parameters, the pledged source size, or load a dictionary.
+*
+* Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
+* consume input stream. The function will automatically update both `pos`
+* fields within `input` and `output`.
+* Note that the function may not consume the entire input, for example, because
+* the output buffer is already full, in which case `input.pos < input.size`.
+* The caller must check if input has been entirely consumed.
+* If not, the caller must make some room to receive more compressed data,
+* and then present again remaining input data.
+* note: ZSTD_e_continue is guaranteed to make some forward progress when called,
+* but doesn't guarantee maximal forward progress. This is especially relevant
+* when compressing with multiple threads. The call won't block if it can
+* consume some input, but if it can't it will wait for some, but not all,
+* output to be flushed.
+* @return : provides a minimum amount of data remaining to be flushed from internal buffers
+* or an error code, which can be tested using ZSTD_isError().
+*
+* At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+* using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
+* Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
+* In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
+* operation.
+* note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if internal buffers are entirely flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
+* It will perform a flush and write frame epilogue.
+* The epilogue is required for decoders to consider a frame completed.
+* flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
+* start a new frame.
+* note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if frame fully completed and fully flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef ZSTD_CCtx ZSTD_CStream; /*< CCtx and CStream are now effectively same object (>= v1.3.0) */
+ /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
+/*===== ZSTD_CStream management functions =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /* accept NULL pointer */
+
+/*===== Streaming compression functions =====*/
+typedef enum {
+ ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
+ ZSTD_e_flush=1, /* flush any data provided so far,
+ * it creates (at least) one new block, that can be decoded immediately on reception;
+ * frame will continue: any future data can still reference previously compressed data, improving compression.
+ * note : multithreaded compression will block to flush as much output as possible. */
+ ZSTD_e_end=2 /* flush any remaining data _and_ close current frame.
+ * note that frame is only closed after compressed data is fully flushed (return value == 0).
+ * After that point, any additional data starts a new frame.
+ * note : each frame is independent (does not reference any content from previous frame).
+ : note : multithreaded compression will block to flush as much output as possible. */
+} ZSTD_EndDirective;
+
+/*! ZSTD_compressStream2() : Requires v1.4.0+
+ * Behaves about the same as ZSTD_compressStream, with additional control on end directive.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
+ * - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+ * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ * - endOp must be a valid directive
+ * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
+ * - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
+ * and then immediately returns, just indicating that there is some data remaining to be flushed.
+ * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
+ * - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ * or an error code, which can be tested using ZSTD_isError().
+ * if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ * only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ * Before starting a new compression job, or changing compression parameters,
+ * it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp);
+
+
+/* These buffer sizes are softly recommended.
+ * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
+ * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
+ * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
+ *
+ * However, note that these recommendations are from the perspective of a C caller program.
+ * If the streaming interface is invoked from some other language,
+ * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
+ * a major performance rule is to reduce crossing such interface to an absolute minimum.
+ * It's not rare that performance ends being spent more into the interface, rather than compression itself.
+ * In which cases, prefer using large buffers, as large as practical,
+ * for both input and output, to reduce the nb of roundtrips.
+ */
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /*< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
+
+
+/* *****************************************************************************
+ * This following is a legacy streaming API, available since v1.0+ .
+ * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
+ * It is redundant, but remains fully supported.
+ * Streaming in combination with advanced parameters and dictionary compression
+ * can only be used through the new API.
+ ******************************************************************************/
+
+/*!
+ * Equivalent to:
+ *
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ */
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+/*!
+ * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
+ * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
+ * the next read size (if non-zero and not an error). ZSTD_compressStream2()
+ * returns the minimum nb of bytes left to flush (if non-zero and not an error).
+ */
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+
+/*-***************************************************************************
+* Streaming decompression - HowTo
+*
+* A ZSTD_DStream object is required to track streaming operations.
+* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+* ZSTD_DStream objects can be re-used multiple times.
+*
+* Use ZSTD_initDStream() to start a new decompression operation.
+* @return : recommended first input size
+* Alternatively, use advanced API to set specific properties.
+*
+* Use ZSTD_decompressStream() repetitively to consume your input.
+* The function will update both `pos` fields.
+* If `input.pos < input.size`, some input has not been consumed.
+* It's up to the caller to present again remaining data.
+* The function tries to flush all data decoded immediately, respecting output buffer size.
+* If `output.pos < output.size`, decoder has flushed everything it could.
+* But if `output.pos == output.size`, there might be some data left within internal buffers.,
+* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
+* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
+* @return : 0 when a frame is completely decoded and fully flushed,
+* or an error code, which can be tested using ZSTD_isError(),
+* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
+* the return value is a suggested next input size (just a hint for better latency)
+* that will never request more than the remaining frame size.
+* *******************************************************************************/
+
+typedef ZSTD_DCtx ZSTD_DStream; /*< DCtx and DStream are now effectively same object (>= v1.3.0) */
+ /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
+/*===== ZSTD_DStream management functions =====*/
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer */
+
+/*===== Streaming decompression functions =====*/
+
+/* This function is redundant with the advanced API and equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, NULL);
+ */
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+
+/* ************************
+* Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ * Compression at an explicit compression level using a Dictionary.
+ * A dictionary can be any arbitrary data segment (also called a prefix),
+ * or a buffer with specified information (see zdict.h).
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ * Decompression using a known Dictionary.
+ * Dictionary must be identical to the one used during compression.
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/* *********************************
+ * Bulk processing dictionary API
+ **********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ * When compressing multiple messages or blocks using the same dictionary,
+ * it's recommended to digest the dictionary only once, since it's a costly operation.
+ * ZSTD_createCDict() will create a state from digesting a dictionary.
+ * The resulting state can be used for future compression operations with very limited startup cost.
+ * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
+ * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
+ * in which case the only thing that it transports is the @compressionLevel.
+ * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
+ * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ * Function frees memory allocated by ZSTD_createCDict().
+ * If a NULL pointer is passed, no operation is performed. */
+ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times.
+ * Note : compression level is _decided at dictionary creation time_,
+ * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ * Function frees memory allocated with ZSTD_createDDict()
+ * If a NULL pointer is passed, no operation is performed. */
+ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ * Decompression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict);
+
+
+/* ******************************
+ * Dictionary helper functions
+ *******************************/
+
+/*! ZSTD_getDictID_fromDict() : Requires v1.4.0+
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromCDict() : Requires v1.5.0+
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
+
+/*! ZSTD_getDictID_fromDDict() : Requires v1.4.0+
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() : Requires v1.4.0+
+ * Provides the dictID required to decompressed the frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary to be decoded (most common case).
+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/* *****************************************************************************
+ * Advanced dictionary and prefix API (Requires v1.4.0+)
+ *
+ * This API allows dictionaries to be used with ZSTD_compress2(),
+ * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and
+ * only reset with the context is reset with ZSTD_reset_parameters or
+ * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ ******************************************************************************/
+
+
+/*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+
+ * Create an internal CDict from `dict` buffer.
+ * Decompression will have to use same dictionary.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
+ * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+ * Note 2 : Loading a dictionary involves building tables.
+ * It's also a CPU consuming operation, with non-negligible impact on latency.
+ * Tables are dependent on compression parameters, and for this reason,
+ * compression parameters can no longer be changed after loading a dictionary.
+ * Note 3 :`dict` content will be copied internally.
+ * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
+ * In such a case, dictionary buffer must outlive its users.
+ * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
+ * to precisely select how dictionary content must be interpreted. */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_refCDict() : Requires v1.4.0+
+ * Reference a prepared dictionary, to be used for all next compressed frames.
+ * Note that compression parameters are enforced from within CDict,
+ * and supersede any compression parameter previously set within CCtx.
+ * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
+ * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
+ * The dictionary will remain valid for future compressed frames using same CCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Referencing a NULL CDict means "return to no-dictionary mode".
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
+ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+/*! ZSTD_CCtx_refPrefix() : Requires v1.4.0+
+ * Reference a prefix (single-usage dictionary) for next compressed frame.
+ * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
+ * Decompression will need same prefix to properly regenerate data.
+ * Compressing with a prefix is similar in outcome as performing a diff and compressing it,
+ * but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
+ * Note 1 : Prefix buffer is referenced. It **must** outlive compression.
+ * Its content must remain unmodified during compression.
+ * Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
+ * ensure that the window size is large enough to contain the entire source.
+ * See ZSTD_c_windowLog.
+ * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ * It's a CPU consuming operation, with non-negligible impact on latency.
+ * If there is a need to use the same prefix multiple times, consider loadDictionary instead.
+ * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
+ * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+ const void* prefix, size_t prefixSize);
+
+/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+
+ * Create an internal DDict from dict buffer,
+ * to be used to decompress next frames.
+ * The dictionary remains valid for all future frames, until explicitly invalidated.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Loading a dictionary involves building tables,
+ * which has a non-negligible impact on CPU usage and latency.
+ * It's recommended to "load once, use many times", to amortize the cost
+ * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
+ * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
+ * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
+ * how dictionary content is loaded and interpreted.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_refDDict() : Requires v1.4.0+
+ * Reference a prepared dictionary, to be used to decompress next frames.
+ * The dictionary remains active for decompression of future frames using same DCtx.
+ *
+ * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
+ * will store the DDict references in a table, and the DDict used for decompression
+ * will be determined at decompression time, as per the dict ID in the frame.
+ * The memory for the table is allocated on the first call to refDDict, and can be
+ * freed with ZSTD_freeDCtx().
+ *
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Special: referencing a NULL DDict means "return to no-dictionary mode".
+ * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+/*! ZSTD_DCtx_refPrefix() : Requires v1.4.0+
+ * Reference a prefix (single-usage dictionary) to decompress next frame.
+ * This is the reverse operation of ZSTD_CCtx_refPrefix(),
+ * and must use the same prefix as the one used during compression.
+ * Prefix is **only used once**. Reference is discarded at end of frame.
+ * End of frame is reached when ZSTD_decompressStream() returns 0.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ * Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
+ * Prefix buffer must remain unmodified up to the end of frame,
+ * reached when ZSTD_decompressStream() returns 0.
+ * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
+ * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
+ * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ * A full dictionary is more costly, as it requires building tables.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
+ const void* prefix, size_t prefixSize);
+
+/* === Memory management === */
+
+/*! ZSTD_sizeof_*() : Requires v1.4.0+
+ * These functions give the _current_ memory usage of selected object.
+ * Note that object memory usage can evolve (increase or decrease) over time. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+#endif /* ZSTD_H_235446 */
+
+
+/* **************************************************************************************
+ * ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
+ * The definitions in the following section are considered experimental.
+ * They are provided for advanced scenarios.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+
+#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
+/* This can be overridden externally to hide static symbols. */
+#ifndef ZSTDLIB_STATIC_API
+#define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE
+#endif
+
+/* Deprecation warnings :
+ * Should these warnings be a problem, it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
+ * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
+ */
+#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
+# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API /* disable deprecation warnings */
+#else
+# if (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
+# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated(message)))
+# elif (__GNUC__ >= 3)
+# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated))
+# else
+# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
+# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API
+# endif
+#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
+
+/* **************************************************************************************
+ * experimental API (static linking only)
+ ****************************************************************************************
+ * The following symbols and constants
+ * are not planned to join "stable API" status in the near future.
+ * They can still change in future versions.
+ * Some of them are planned to remain in the static_only section indefinitely.
+ * Some of them might be removed in the future (especially when redundant with existing stable functions)
+ * ***************************************************************************************/
+
+#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2)
+#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */
+#define ZSTD_SKIPPABLEHEADERSIZE 8
+
+/* compression parameter bounds */
+#define ZSTD_WINDOWLOG_MAX_32 30
+#define ZSTD_WINDOWLOG_MAX_64 31
+#define ZSTD_WINDOWLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN 10
+#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
+#define ZSTD_HASHLOG_MIN 6
+#define ZSTD_CHAINLOG_MAX_32 29
+#define ZSTD_CHAINLOG_MAX_64 30
+#define ZSTD_CHAINLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
+#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+#define ZSTD_MINMATCH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_MINMATCH_MIN 3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
+#define ZSTD_STRATEGY_MIN ZSTD_fast
+#define ZSTD_STRATEGY_MAX ZSTD_btultra2
+
+
+#define ZSTD_OVERLAPLOG_MIN 0
+#define ZSTD_OVERLAPLOG_MAX 9
+
+#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame
+ * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
+ * to preserve host's memory from unreasonable requirements.
+ * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
+ * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
+
+
+/* LDM parameter bounds */
+#define ZSTD_LDM_HASHLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_LDM_HASHLOG_MAX ZSTD_HASHLOG_MAX
+#define ZSTD_LDM_MINMATCH_MIN 4
+#define ZSTD_LDM_MINMATCH_MAX 4096
+#define ZSTD_LDM_BUCKETSIZELOG_MIN 1
+#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
+#define ZSTD_LDM_HASHRATELOG_MIN 0
+#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
+
+/* Advanced parameter bounds */
+#define ZSTD_TARGETCBLOCKSIZE_MIN 64
+#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_SRCSIZEHINT_MIN 0
+#define ZSTD_SRCSIZEHINT_MAX INT_MAX
+
+
+/* --- Advanced types --- */
+
+typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
+
+typedef struct {
+ unsigned int offset; /* The offset of the match. (NOT the same as the offset code)
+ * If offset == 0 and matchLength == 0, this sequence represents the last
+ * literals in the block of litLength size.
+ */
+
+ unsigned int litLength; /* Literal length of the sequence. */
+ unsigned int matchLength; /* Match length of the sequence. */
+
+ /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
+ * In this case, we will treat the sequence as a marker for a block boundary.
+ */
+
+ unsigned int rep; /* Represents which repeat offset is represented by the field 'offset'.
+ * Ranges from [0, 3].
+ *
+ * Repeat offsets are essentially previous offsets from previous sequences sorted in
+ * recency order. For more detail, see doc/zstd_compression_format.md
+ *
+ * If rep == 0, then 'offset' does not contain a repeat offset.
+ * If rep > 0:
+ * If litLength != 0:
+ * rep == 1 --> offset == repeat_offset_1
+ * rep == 2 --> offset == repeat_offset_2
+ * rep == 3 --> offset == repeat_offset_3
+ * If litLength == 0:
+ * rep == 1 --> offset == repeat_offset_2
+ * rep == 2 --> offset == repeat_offset_3
+ * rep == 3 --> offset == repeat_offset_1 - 1
+ *
+ * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
+ * 'rep', but repeat offsets do not necessarily need to be calculated from an external
+ * sequence provider's perspective. For example, ZSTD_compressSequences() does not
+ * use this 'rep' field at all (as of now).
+ */
+} ZSTD_Sequence;
+
+typedef struct {
+ unsigned windowLog; /*< largest match distance : larger == more compression, more memory needed during decompression */
+ unsigned chainLog; /*< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
+ unsigned hashLog; /*< dispatch table : larger == faster, more memory */
+ unsigned searchLog; /*< nb of searches : larger == more compression, slower */
+ unsigned minMatch; /*< match length searched : larger == faster decompression, sometimes less compression */
+ unsigned targetLength; /*< acceptable match size for optimal parser (only) : larger == more compression, slower */
+ ZSTD_strategy strategy; /*< see ZSTD_strategy definition above */
+} ZSTD_compressionParameters;
+
+typedef struct {
+ int contentSizeFlag; /*< 1: content size will be in frame header (when known) */
+ int checksumFlag; /*< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
+ int noDictIDFlag; /*< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
+} ZSTD_frameParameters;
+
+typedef struct {
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+typedef enum {
+ ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
+ ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
+ ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
+} ZSTD_dictContentType_e;
+
+typedef enum {
+ ZSTD_dlm_byCopy = 0, /*< Copy dictionary content internally */
+ ZSTD_dlm_byRef = 1 /*< Reference dictionary content -- the dictionary buffer must outlive its users. */
+} ZSTD_dictLoadMethod_e;
+
+typedef enum {
+ ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
+ ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number.
+ * Useful to save 4 bytes per generated frame.
+ * Decoder cannot recognise automatically this format, requiring this instruction. */
+} ZSTD_format_e;
+
+typedef enum {
+ /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
+ ZSTD_d_validateChecksum = 0,
+ ZSTD_d_ignoreChecksum = 1
+} ZSTD_forceIgnoreChecksum_e;
+
+typedef enum {
+ /* Note: this enum controls ZSTD_d_refMultipleDDicts */
+ ZSTD_rmd_refSingleDDict = 0,
+ ZSTD_rmd_refMultipleDDicts = 1
+} ZSTD_refMultipleDDicts_e;
+
+typedef enum {
+ /* Note: this enum and the behavior it controls are effectively internal
+ * implementation details of the compressor. They are expected to continue
+ * to evolve and should be considered only in the context of extremely
+ * advanced performance tuning.
+ *
+ * Zstd currently supports the use of a CDict in three ways:
+ *
+ * - The contents of the CDict can be copied into the working context. This
+ * means that the compression can search both the dictionary and input
+ * while operating on a single set of internal tables. This makes
+ * the compression faster per-byte of input. However, the initial copy of
+ * the CDict's tables incurs a fixed cost at the beginning of the
+ * compression. For small compressions (< 8 KB), that copy can dominate
+ * the cost of the compression.
+ *
+ * - The CDict's tables can be used in-place. In this model, compression is
+ * slower per input byte, because the compressor has to search two sets of
+ * tables. However, this model incurs no start-up cost (as long as the
+ * working context's tables can be reused). For small inputs, this can be
+ * faster than copying the CDict's tables.
+ *
+ * - The CDict's tables are not used at all, and instead we use the working
+ * context alone to reload the dictionary and use params based on the source
+ * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
+ * This method is effective when the dictionary sizes are very small relative
+ * to the input size, and the input size is fairly large to begin with.
+ *
+ * Zstd has a simple internal heuristic that selects which strategy to use
+ * at the beginning of a compression. However, if experimentation shows that
+ * Zstd is making poor choices, it is possible to override that choice with
+ * this enum.
+ */
+ ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
+ ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */
+ ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */
+ ZSTD_dictForceLoad = 3 /* Always reload the dictionary */
+} ZSTD_dictAttachPref_e;
+
+typedef enum {
+ ZSTD_lcm_auto = 0, /*< Automatically determine the compression mode based on the compression level.
+ * Negative compression levels will be uncompressed, and positive compression
+ * levels will be compressed. */
+ ZSTD_lcm_huffman = 1, /*< Always attempt Huffman compression. Uncompressed literals will still be
+ * emitted if Huffman compression is not profitable. */
+ ZSTD_lcm_uncompressed = 2 /*< Always emit uncompressed literals. */
+} ZSTD_literalCompressionMode_e;
+
+typedef enum {
+ /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final
+ * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable
+ * or ZSTD_ps_disable allow for a force enable/disable the feature.
+ */
+ ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */
+ ZSTD_ps_enable = 1, /* Force-enable the feature */
+ ZSTD_ps_disable = 2 /* Do not use the feature */
+} ZSTD_paramSwitch_e;
+
+/* *************************************
+* Frame size functions
+***************************************/
+
+/*! ZSTD_findDecompressedSize() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - decompressed size of all data in all successive frames
+ * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 2 : decompressed size is always present when compression is done with ZSTD_compress()
+ * note 3 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure result fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
+ * read each contained frame header. This is fast as most of the data is skipped,
+ * however it does mean that all frame data must be present and valid. */
+ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_decompressBound() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - upper-bound for the decompressed size of all data in all successive frames
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame.
+ * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
+ * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
+ * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
+ * upper-bound = # blocks * min(128 KB, Window_Size)
+ */
+ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
+
+/*! ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+
+typedef enum {
+ ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
+ ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
+} ZSTD_sequenceFormat_e;
+
+/*! ZSTD_generateSequences() :
+ * Generate sequences using ZSTD_compress2, given a source buffer.
+ *
+ * Each block will end with a dummy sequence
+ * with offset == 0, matchLength == 0, and litLength == length of last literals.
+ * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
+ * simply acts as a block delimiter.
+ *
+ * zc can be used to insert custom compression params.
+ * This function invokes ZSTD_compress2
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
+ * @return : number of sequences generated
+ */
+
+ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ size_t outSeqsSize, const void* src, size_t srcSize);
+
+/*! ZSTD_mergeBlockDelimiters() :
+ * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
+ * by merging them into the literals of the next sequence.
+ *
+ * As such, the final generated result has no explicit representation of block boundaries,
+ * and the final last literals segment is not represented in the sequences.
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
+ * @return : number of sequences left after merging
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
+
+/*! ZSTD_compressSequences() :
+ * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
+ * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
+ * The entire source is compressed into a single frame.
+ *
+ * The compression behavior changes based on cctx params. In particular:
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on
+ * the block size derived from the cctx, and sequences may be split. This is the default setting.
+ *
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ *
+ * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
+ * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
+ *
+ * In addition to the two adjustable experimental params, there are other important cctx params.
+ * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
+ * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
+ * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
+ * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
+ *
+ * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
+ * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
+ * and cannot emit an RLE block that disagrees with the repcode history
+ * @return : final compressed size or a ZSTD error.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize);
+
+
+/*! ZSTD_writeSkippableFrame() :
+ * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ *
+ * Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,
+ * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
+ * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
+ * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, if the source size is not representable
+ * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, unsigned magicVariant);
+
+/*! ZSTD_readSkippableFrame() :
+ * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ *
+ * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
+ * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
+ * in the magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, or if the frame is not skippable.
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
+ const void* src, size_t srcSize);
+
+/*! ZSTD_isSkippableFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
+ */
+ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
+
+
+
+/* *************************************
+* Memory management
+***************************************/
+
+/*! ZSTD_estimate*() :
+ * These functions make it possible to estimate memory usage
+ * of a future {D,C}Ctx, before its creation.
+ *
+ * ZSTD_estimateCCtxSize() will provide a memory budget large enough
+ * for any compression level up to selected one.
+ * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
+ * does not include space for a window buffer.
+ * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
+ * The estimate will assume the input may be arbitrarily large,
+ * which is the worst case.
+ *
+ * When srcSize can be bound by a known and rather "small" value,
+ * this fact can be used to provide a tighter estimation
+ * because the CCtx compression context will need less memory.
+ * This tighter estimation can be provided by more advanced functions
+ * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
+ * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
+ * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
+ *
+ * Note 2 : only single-threaded compression is supported.
+ * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
+
+/*! ZSTD_estimateCStreamSize() :
+ * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
+ * It will also consider src size to be arbitrarily "large", which is worst case.
+ * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * Note : CStream size estimation is only correct for single-threaded compression.
+ * ZSTD_DStream memory budget depends on window Size.
+ * This information can be passed manually, using ZSTD_estimateDStreamSize,
+ * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ * Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
+ * an internal ?Dict will be created, which additional size is not estimated here.
+ * In this case, get total size by adding ZSTD_estimate?DictSize */
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+
+/*! ZSTD_estimate?DictSize() :
+ * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
+ * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
+ * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
+
+/*! ZSTD_initStatic*() :
+ * Initialize an object using a pre-allocated fixed-size buffer.
+ * workspace: The memory area to emplace the object into.
+ * Provided pointer *must be 8-bytes aligned*.
+ * Buffer must outlive object.
+ * workspaceSize: Use ZSTD_estimate*Size() to determine
+ * how large workspace must be to support target scenario.
+ * @return : pointer to object (same address as workspace, just different type),
+ * or NULL if error (size too small, incorrect alignment, etc.)
+ * Note : zstd will never resize nor malloc() when using a static buffer.
+ * If the object requires more memory than available,
+ * zstd will just error out (typically ZSTD_error_memory_allocation).
+ * Note 2 : there is no corresponding "free" function.
+ * Since workspace is allocated externally, it must be freed externally too.
+ * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
+ * into its associated cParams.
+ * Limitation 1 : currently not compatible with internal dictionary creation, triggered by
+ * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
+ * Limitation 2 : static cctx currently not compatible with multi-threading.
+ * Limitation 3 : static dctx is incompatible with legacy support.
+ */
+ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticCCtx() */
+
+ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticDCtx() */
+
+ZSTDLIB_STATIC_API const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams);
+
+ZSTDLIB_STATIC_API const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType);
+
+
+/*! Custom memory allocation :
+ * These prototypes make it possible to pass your own allocation/free functions.
+ * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
+ * All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
+ */
+typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
+typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
+static
+__attribute__((__unused__))
+ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /*< this constant defers to stdlib's functions */
+
+ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+
+ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem);
+
+/*! Thread pool :
+ * These prototypes make it possible to share a thread pool among multiple compression contexts.
+ * This can limit resources for applications with multiple threads where each one uses
+ * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
+ * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
+ * Note that the lifetime of such pool must exist while being used.
+ * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
+ * to use an internal thread pool).
+ * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
+ */
+typedef struct POOL_ctx_s ZSTD_threadPool;
+ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
+ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
+
+
+/*
+ * This API is temporary and is expected to change or disappear in the future!
+ */
+ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced2(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_customMem customMem);
+
+ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem);
+
+
+/* *************************************
+* Advanced compression functions
+***************************************/
+
+/*! ZSTD_createCDict_byReference() :
+ * Create a digested dictionary for compression
+ * Dictionary content is just referenced, not duplicated.
+ * As a consequence, `dictBuffer` **must** outlive CDict,
+ * and its content must remain unmodified throughout the lifetime of CDict.
+ * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
+ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
+
+/*! ZSTD_getCParams() :
+ * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+ * `estimatedSrcSize` value is optional, select 0 if not known */
+ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_getParams() :
+ * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+ * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_checkCParams() :
+ * Ensure param values remain within authorized range.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
+ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+
+/*! ZSTD_adjustCParams() :
+ * optimize params for a given `srcSize` and `dictSize`.
+ * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
+ * `dictSize` must be `0` when there is no dictionary.
+ * cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
+ * This function never fails (wide contract) */
+ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+
+/*! ZSTD_compress_advanced() :
+ * Note : this function is now DEPRECATED.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
+ * This prototype will generate compilation warnings. */
+ZSTD_DEPRECATED("use ZSTD_compress2")
+size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params);
+
+/*! ZSTD_compress_usingCDict_advanced() :
+ * Note : this function is now DEPRECATED.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
+ * This prototype will generate compilation warnings. */
+ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
+size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams);
+
+
+/*! ZSTD_CCtx_loadDictionary_byReference() :
+ * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
+ * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_loadDictionary_advanced() :
+ * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?) */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_CCtx_refPrefix_advanced() :
+ * Same as ZSTD_CCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/* === experimental parameters === */
+/* these parameters can be used with ZSTD_setParameter()
+ * they are not guaranteed to remain supported in the future */
+
+ /* Enables rsyncable mode,
+ * which makes compressed files more rsync friendly
+ * by adding periodic synchronization points to the compressed data.
+ * The target average block size is ZSTD_c_jobSize / 2.
+ * It's possible to modify the job size to increase or decrease
+ * the granularity of the synchronization point.
+ * Once the jobSize is smaller than the window size,
+ * it will result in compression ratio degradation.
+ * NOTE 1: rsyncable mode only works when multithreading is enabled.
+ * NOTE 2: rsyncable performs poorly in combination with long range mode,
+ * since it will decrease the effectiveness of synchronization points,
+ * though mileage may vary.
+ * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
+ * If the selected compression level is already running significantly slower,
+ * the overall speed won't be significantly impacted.
+ */
+ #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
+
+/* Select a compression format.
+ * The value must be of type ZSTD_format_e.
+ * See ZSTD_format_e enum definition for details */
+#define ZSTD_c_format ZSTD_c_experimentalParam2
+
+/* Force back-reference distances to remain < windowSize,
+ * even when referencing into Dictionary content (default:0) */
+#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
+
+/* Controls whether the contents of a CDict
+ * are used in place, or copied into the working context.
+ * Accepts values from the ZSTD_dictAttachPref_e enum.
+ * See the comments on that enum for an explanation of the feature. */
+#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
+
+/* Controlled with ZSTD_paramSwitch_e enum.
+ * Default is ZSTD_ps_auto.
+ * Set to ZSTD_ps_disable to never compress literals.
+ * Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals
+ * may still be emitted if huffman is not beneficial to use.)
+ *
+ * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
+ * literals compression based on the compression parameters - specifically,
+ * negative compression levels do not use literal compression.
+ */
+#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
+
+/* Tries to fit compressed block size to be around targetCBlockSize.
+ * No target when targetCBlockSize == 0.
+ * There is no guarantee on compressed block size (default:0) */
+#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
+
+/* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size,
+ * but compression ratio may regress significantly if guess considerably underestimates */
+#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
+
+/* Controls whether the new and experimental "dedicated dictionary search
+ * structure" can be used. This feature is still rough around the edges, be
+ * prepared for surprising behavior!
+ *
+ * How to use it:
+ *
+ * When using a CDict, whether to use this feature or not is controlled at
+ * CDict creation, and it must be set in a CCtxParams set passed into that
+ * construction (via ZSTD_createCDict_advanced2()). A compression will then
+ * use the feature or not based on how the CDict was constructed; the value of
+ * this param, set in the CCtx, will have no effect.
+ *
+ * However, when a dictionary buffer is passed into a CCtx, such as via
+ * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control
+ * whether the CDict that is created internally can use the feature or not.
+ *
+ * What it does:
+ *
+ * Normally, the internal data structures of the CDict are analogous to what
+ * would be stored in a CCtx after compressing the contents of a dictionary.
+ * To an approximation, a compression using a dictionary can then use those
+ * data structures to simply continue what is effectively a streaming
+ * compression where the simulated compression of the dictionary left off.
+ * Which is to say, the search structures in the CDict are normally the same
+ * format as in the CCtx.
+ *
+ * It is possible to do better, since the CDict is not like a CCtx: the search
+ * structures are written once during CDict creation, and then are only read
+ * after that, while the search structures in the CCtx are both read and
+ * written as the compression goes along. This means we can choose a search
+ * structure for the dictionary that is read-optimized.
+ *
+ * This feature enables the use of that different structure.
+ *
+ * Note that some of the members of the ZSTD_compressionParameters struct have
+ * different semantics and constraints in the dedicated search structure. It is
+ * highly recommended that you simply set a compression level in the CCtxParams
+ * you pass into the CDict creation call, and avoid messing with the cParams
+ * directly.
+ *
+ * Effects:
+ *
+ * This will only have any effect when the selected ZSTD_strategy
+ * implementation supports this feature. Currently, that's limited to
+ * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2.
+ *
+ * Note that this means that the CDict tables can no longer be copied into the
+ * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
+ * usable. The dictionary can only be attached or reloaded.
+ *
+ * In general, you should expect compression to be faster--sometimes very much
+ * so--and CDict creation to be slightly slower. Eventually, we will probably
+ * make this mode the default.
+ */
+#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8
+
+/* ZSTD_c_stableInBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
+ * between calls, except for the modifications that zstd makes to pos (the
+ * caller must not modify pos). This is checked by the compressor, and
+ * compression will fail if it ever changes. This means the only flush
+ * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
+ * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
+ * MUST not be modified during compression or you will get data corruption.
+ *
+ * When this flag is enabled zstd won't allocate an input window buffer,
+ * because the user guarantees it can reference the ZSTD_inBuffer until
+ * the frame is complete. But, it will still allocate an output buffer
+ * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
+ * avoid the memcpy() from the input buffer to the input window buffer.
+ *
+ * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
+ * That means this flag cannot be used with ZSTD_compressStream().
+ *
+ * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
+ * this flag is ALWAYS memory safe, and will never access out-of-bounds
+ * memory. However, compression WILL fail if you violate the preconditions.
+ *
+ * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
+ * not be modified during compression or you will get data corruption. This
+ * is because zstd needs to reference data in the ZSTD_inBuffer to find
+ * matches. Normally zstd maintains its own window buffer for this purpose,
+ * but passing this flag tells zstd to use the user provided buffer.
+ */
+#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
+
+/* ZSTD_c_stableOutBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells he compressor that the ZSTD_outBuffer will not be resized between
+ * calls. Specifically: (out.size - out.pos) will never grow. This gives the
+ * compressor the freedom to say: If the compressed data doesn't fit in the
+ * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to
+ * always decompress directly into the output buffer, instead of decompressing
+ * into an internal buffer and copying to the output buffer.
+ *
+ * When this flag is enabled zstd won't allocate an output buffer, because
+ * it can write directly to the ZSTD_outBuffer. It will still allocate the
+ * input window buffer (see ZSTD_c_stableInBuffer).
+ *
+ * Zstd will check that (out.size - out.pos) never grows and return an error
+ * if it does. While not strictly necessary, this should prevent surprises.
+ */
+#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10
+
+/* ZSTD_c_blockDelimiters
+ * Default is 0 == ZSTD_sf_noBlockDelimiters.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ *
+ * Designates whether or not the given array of ZSTD_Sequence contains block delimiters
+ * and last literals, which are defined as sequences with offset == 0 and matchLength == 0.
+ * See the definition of ZSTD_Sequence for more specifics.
+ */
+#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11
+
+/* ZSTD_c_validateSequences
+ * Default is 0 == disabled. Set to 1 to enable sequence validation.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
+ * during function execution.
+ *
+ * Without validation, providing a sequence that does not conform to the zstd spec will cause
+ * undefined behavior, and may produce a corrupted block.
+ *
+ * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and
+ * return an error.
+ *
+ */
+#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
+
+/* ZSTD_c_useBlockSplitter
+ * Controlled with ZSTD_paramSwitch_e enum.
+ * Default is ZSTD_ps_auto.
+ * Set to ZSTD_ps_disable to never use block splitter.
+ * Set to ZSTD_ps_enable to always use block splitter.
+ *
+ * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
+ * block splitting based on the compression parameters.
+ */
+#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13
+
+/* ZSTD_c_useRowMatchFinder
+ * Controlled with ZSTD_paramSwitch_e enum.
+ * Default is ZSTD_ps_auto.
+ * Set to ZSTD_ps_disable to never use row-based matchfinder.
+ * Set to ZSTD_ps_enable to force usage of row-based matchfinder.
+ *
+ * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
+ * the row-based matchfinder based on support for SIMD instructions and the window log.
+ * Note that this only pertains to compression strategies: greedy, lazy, and lazy2
+ */
+#define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14
+
+/* ZSTD_c_deterministicRefPrefix
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Zstd produces different results for prefix compression when the prefix is
+ * directly adjacent to the data about to be compressed vs. when it isn't.
+ * This is because zstd detects that the two buffers are contiguous and it can
+ * use a more efficient match finding algorithm. However, this produces different
+ * results than when the two buffers are non-contiguous. This flag forces zstd
+ * to always load the prefix in non-contiguous mode, even if it happens to be
+ * adjacent to the data, to guarantee determinism.
+ *
+ * If you really care about determinism when using a dictionary or prefix,
+ * like when doing delta compression, you should select this option. It comes
+ * at a speed penalty of about ~2.5% if the dictionary and data happened to be
+ * contiguous, and is free if they weren't contiguous. We don't expect that
+ * intentionally making the dictionary and data contiguous will be worth the
+ * cost to memcpy() the data.
+ */
+#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15
+
+/*! ZSTD_CCtx_getParameter() :
+ * Get the requested compression parameter value, selected by enum ZSTD_cParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
+
+
+/*! ZSTD_CCtx_params :
+ * Quick howto :
+ * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
+ * an existing ZSTD_CCtx_params structure.
+ * This is similar to
+ * ZSTD_CCtx_setParameter().
+ * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ * an existing CCtx.
+ * These parameters will be applied to
+ * all subsequent frames.
+ * - ZSTD_compressStream2() : Do compression using the CCtx.
+ * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.
+ *
+ * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ * for static allocation of CCtx for single-threaded compression.
+ */
+ZSTDLIB_STATIC_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
+
+/*! ZSTD_CCtxParams_reset() :
+ * Reset params to default values.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_init() :
+ * Initializes the compression parameters of cctxParams according to
+ * compression level. All other parameters are reset to their default values.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+
+/*! ZSTD_CCtxParams_init_advanced() :
+ * Initializes the compression and frame parameters of cctxParams according to
+ * params. All other parameters are reset to their default values.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+
+/*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+
+ * Similar to ZSTD_CCtx_setParameter.
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * Parameters must be applied to a ZSTD_CCtx using
+ * ZSTD_CCtx_setParametersUsingCCtxParams().
+ * @result : a code representing success or failure (which can be tested with
+ * ZSTD_isError()).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtxParams_getParameter() :
+ * Similar to ZSTD_CCtx_getParameter.
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+
+/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * Apply a set of ZSTD_CCtx_params to the compression context.
+ * This can be done even after compression is started,
+ * if nbWorkers==0, this will have no impact until a new compression is started.
+ * if nbWorkers>=1, new parameters will be picked up at next job,
+ * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+
+/*! ZSTD_compressStream2_simpleArgs() :
+ * Same as ZSTD_compressStream2(),
+ * but using only integral types as arguments.
+ * This variant might be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp);
+
+
+/* *************************************
+* Advanced decompression functions
+***************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+ZSTDLIB_STATIC_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * Dictionary content is referenced, and therefore stays in dictBuffer.
+ * It is important that dictBuffer outlives DDict,
+ * it must remain read accessible throughout the lifetime of DDict */
+ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_byReference() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but references `dict` content instead of copying it into `dctx`.
+ * This saves memory if `dict` remains around.,
+ * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_advanced() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but gives direct control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?). */
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_refPrefix_advanced() :
+ * Same as ZSTD_DCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_setMaxWindowSize() :
+ * Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ * This protects a decoder context from reserving too much memory for itself (potential attack scenario).
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+
+/*! ZSTD_DCtx_getParameter() :
+ * Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
+
+/* ZSTD_d_format
+ * experimental parameter,
+ * allowing selection between ZSTD_format_e input compression formats
+ */
+#define ZSTD_d_format ZSTD_d_experimentalParam1
+/* ZSTD_d_stableOutBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same
+ * between calls, except for the modifications that zstd makes to pos (the
+ * caller must not modify pos). This is checked by the decompressor, and
+ * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer
+ * MUST be large enough to fit the entire decompressed frame. This will be
+ * checked when the frame content size is known. The data in the ZSTD_outBuffer
+ * in the range [dst, dst + pos) MUST not be modified during decompression
+ * or you will get data corruption.
+ *
+ * When this flags is enabled zstd won't allocate an output buffer, because
+ * it can write directly to the ZSTD_outBuffer, but it will still allocate
+ * an input buffer large enough to fit any compressed block. This will also
+ * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
+ * If you need to avoid the input buffer allocation use the buffer-less
+ * streaming API.
+ *
+ * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using
+ * this flag is ALWAYS memory safe, and will never access out-of-bounds
+ * memory. However, decompression WILL fail if you violate the preconditions.
+ *
+ * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST
+ * not be modified during decompression or you will get data corruption. This
+ * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate
+ * matches. Normally zstd maintains its own buffer for this purpose, but passing
+ * this flag tells zstd to use the user provided buffer.
+ */
+#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
+
+/* ZSTD_d_forceIgnoreChecksum
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * Tells the decompressor to skip checksum validation during decompression, regardless
+ * of whether checksumming was specified during compression. This offers some
+ * slight performance benefits, and may be useful for debugging.
+ * Param has values of type ZSTD_forceIgnoreChecksum_e
+ */
+#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
+
+/* ZSTD_d_refMultipleDDicts
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * If enabled and dctx is allocated on the heap, then additional memory will be allocated
+ * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
+ * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
+ * store all references. At decompression time, the appropriate dictID is selected
+ * from the set of DDicts based on the dictID in the frame.
+ *
+ * Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
+ *
+ * Param has values of byte ZSTD_refMultipleDDicts_e
+ *
+ * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
+ * allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
+ * Memory is allocated as per ZSTD_DCtx::customMem.
+ *
+ * Although this function allocates memory for the table, the user is still responsible for
+ * memory management of the underlying ZSTD_DDict* themselves.
+ */
+#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
+
+
+/*! ZSTD_DCtx_setFormat() :
+ * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().
+ * Instruct the decoder context about what kind of data to decode next.
+ * This instruction is mandatory to decode data without a fully-formed header,
+ * such ZSTD_f_zstd1_magicless for example.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
+ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+
+/*! ZSTD_decompressStream_simpleArgs() :
+ * Same as ZSTD_decompressStream(),
+ * but using only integral types as arguments.
+ * This can be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos);
+
+
+/* ******************************************************************
+* Advanced streaming functions
+* Warning : most of these functions are now redundant with the Advanced API.
+* Once Advanced API reaches "stable" status,
+* redundant functions will be deprecated, and then at some point removed.
+********************************************************************/
+
+/*===== Advanced Streaming compression functions =====*/
+
+/*! ZSTD_initCStream_srcSize() :
+ * This function is DEPRECATED, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * pledgedSrcSize must be correct. If it is not known at init time, use
+ * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
+ * "0" also disables frame content size field. It may be enabled in the future.
+ * This prototype will generate compilation warnings.
+ */
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
+ int compressionLevel,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_initCStream_usingDict() :
+ * This function is DEPRECATED, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * Creates of an internal CDict (incompatible with static CCtx), except if
+ * dict == NULL or dictSize < 8, in which case no dict is used.
+ * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
+ * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
+ * This prototype will generate compilation warnings.
+ */
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_initCStream_advanced() :
+ * This function is DEPRECATED, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * // Pseudocode: Set each zstd parameter and leave the rest as-is.
+ * for ((param, value) : params) {
+ * ZSTD_CCtx_setParameter(zcs, param, value);
+ * }
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
+ * pledgedSrcSize must be correct.
+ * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * This prototype will generate compilation warnings.
+ */
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_initCStream_usingCDict() :
+ * This function is DEPRECATED, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * note : cdict will just be referenced, and must outlive compression session
+ * This prototype will generate compilation warnings.
+ */
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+
+/*! ZSTD_initCStream_usingCDict_advanced() :
+ * This function is DEPRECATED, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
+ * for ((fParam, value) : fParams) {
+ * ZSTD_CCtx_setParameter(zcs, fParam, value);
+ * }
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN.
+ * This prototype will generate compilation warnings.
+ */
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_resetCStream() :
+ * This function is DEPRECATED, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but
+ * ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be
+ * explicitly specified.
+ *
+ * start a new frame, using same parameters from previous frame.
+ * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
+ * Note that zcs must be init at least once before using ZSTD_resetCStream().
+ * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
+ * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
+ * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
+ * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError())
+ * This prototype will generate compilation warnings.
+ */
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
+
+
+typedef struct {
+ unsigned long long ingested; /* nb input bytes read and buffered */
+ unsigned long long consumed; /* nb input bytes actually compressed */
+ unsigned long long produced; /* nb of compressed bytes generated and buffered */
+ unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
+ unsigned currentJobID; /* MT only : latest started job nb */
+ unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */
+} ZSTD_frameProgression;
+
+/* ZSTD_getFrameProgression() :
+ * tells how much data has been ingested (read from input)
+ * consumed (input actually compressed) and produced (output) for current frame.
+ * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
+ * Aggregates progression inside active worker threads.
+ */
+ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+
+/*! ZSTD_toFlushNow() :
+ * Tell how many bytes are ready to be flushed immediately.
+ * Useful for multithreading scenarios (nbWorkers >= 1).
+ * Probe the oldest active job, defined as oldest job not yet entirely flushed,
+ * and check its output buffer.
+ * @return : amount of data stored in oldest job and ready to be flushed immediately.
+ * if @return == 0, it means either :
+ * + there is no active job (could be checked with ZSTD_frameProgression()), or
+ * + oldest job is still actively compressing data,
+ * but everything it has produced has also been flushed so far,
+ * therefore flush speed is limited by production speed of oldest job
+ * irrespective of the speed of concurrent (and newer) jobs.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
+
+
+/*===== Advanced Streaming decompression functions =====*/
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
+ *
+ * note: no dictionary will be used if dict == NULL or dictSize < 8
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, ddict);
+ *
+ * note : ddict is referenced, it must outlive decompression session
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *
+ * re-use decompression parameters from previous init; saves dictionary loading
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+
+
+/* *******************************************************************
+* Buffer-less and synchronous inner streaming functions
+*
+* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
+* But it's also a complex one, with several restrictions, documented below.
+* Prefer normal streaming API for an easier experience.
+********************************************************************* */
+
+/*
+ Buffer-less streaming compression (synchronous mode)
+
+ A ZSTD_CCtx object is required to track streaming operations.
+ Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
+ ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+
+ Start by initializing a context.
+ Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
+ It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
+
+ Then, consume your input using ZSTD_compressContinue().
+ There are some important considerations to keep in mind when using this advanced function :
+ - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+ - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
+ - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
+ Worst case evaluation is provided by ZSTD_compressBound().
+ ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
+ - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
+ It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
+ - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
+ In which case, it will "discard" the relevant memory section from its history.
+
+ Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
+ It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
+
+ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+*/
+
+/*===== Buffer-less streaming compression functions =====*/
+ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*< note: fails if cdict==NULL */
+ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
+ZSTD_DEPRECATED("use advanced API to access custom parameters")
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTD_DEPRECATED("use advanced API to access custom parameters")
+size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
+/*
+ Buffer-less streaming decompression (synchronous mode)
+
+ A ZSTD_DCtx object is required to track streaming operations.
+ Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+ A ZSTD_DCtx object can be re-used multiple times.
+
+ First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
+ Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
+ Data fragment must be large enough to ensure successful decoding.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+ >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+ errorCode, which can be tested using ZSTD_isError().
+
+ It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+ Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+ As a consequence, check that values remain within valid application range.
+ For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+ Each application can set its own limits, depending on local restrictions.
+ For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+ ZSTD_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+ There are multiple ways to guarantee this condition.
+
+ The most memory efficient way is to use a round buffer of sufficient size.
+ Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+ which can @return an error code if required value is too large for current system (in 32-bits mode).
+ In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+ up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+ which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+ At which point, decoding can resume from the beginning of the buffer.
+ Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+ There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+ Finally, if you control the compression process, you can also ignore all buffer size rules,
+ as long as the encoder and decoder progress in "lock-step",
+ aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+ Once buffers are setup, start decompression, with ZSTD_decompressBegin().
+ If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
+
+ Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
+
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
+ It can also be an error code, which can be tested with ZSTD_isError().
+
+ A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+
+ Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
+ This information is not required to properly decode a frame.
+
+ == Special case : skippable frames ==
+
+ Skippable frames allow integration of user-defined data into a flow of concatenated frames.
+ Skippable frames will be ignored (skipped) by decompressor.
+ The format of skippable frames is as follows :
+ a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+ b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+ c) Frame Content - any content (User Data) of length equal to Frame Size
+ For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+ For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
+*/
+
+/*===== Buffer-less streaming decompression functions =====*/
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+typedef struct {
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
+ ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ unsigned headerSize;
+ unsigned dictID;
+ unsigned checksumFlag;
+} ZSTD_frameHeader;
+
+/*! ZSTD_getFrameHeader() :
+ * decode Frame Header, or requires larger `srcSize`.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /*< doesn't consume input */
+/*! ZSTD_getFrameHeader_advanced() :
+ * same as ZSTD_getFrameHeader(),
+ * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
+ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* misc */
+ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
+ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+
+
+
+
+/* ============================ */
+/* Block level API */
+/* ============================ */
+
+/*!
+ Block functions produce and decode raw zstd blocks, without frame metadata.
+ Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+ But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
+
+ A few rules to respect :
+ - Compressing and decompressing require a context structure
+ + Use ZSTD_createCCtx() and ZSTD_createDCtx()
+ - It is necessary to init context before starting
+ + compression : any ZSTD_compressBegin*() variant, including with dictionary
+ + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ + copyCCtx() and copyDCtx() can be used too
+ - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ + If input is larger than a block size, it's necessary to split input data into multiple blocks
+ + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
+ Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
+ - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
+ ===> In which case, nothing is produced into `dst` !
+ + User __must__ test for such outcome and deal directly with uncompressed data
+ + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
+ Doing so would mess up with statistics history, leading to potential data corruption.
+ + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ + In case of multiple successive blocks, should some of them be uncompressed,
+ decoder must be informed of their existence in order to follow proper history.
+ Use ZSTD_insertBlock() for such a case.
+*/
+
+/*===== Raw zstd block functions =====*/
+ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
+ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
+
+
+#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
+
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
new file mode 100644
index 000000000000..341aea490070
--- /dev/null
+++ b/include/linux/zswap.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ZSWAP_H
+#define _LINUX_ZSWAP_H
+
+#include <linux/types.h>
+#include <linux/mm_types.h>
+
+struct lruvec;
+
+extern u64 zswap_pool_total_size;
+extern atomic_t zswap_stored_pages;
+
+#ifdef CONFIG_ZSWAP
+
+struct zswap_lruvec_state {
+ /*
+ * Number of pages in zswap that should be protected from the shrinker.
+ * This number is an estimate of the following counts:
+ *
+ * a) Recent page faults.
+ * b) Recent insertion to the zswap LRU. This includes new zswap stores,
+ * as well as recent zswap LRU rotations.
+ *
+ * These pages are likely to be warm, and might incur IO if the are written
+ * to swap.
+ */
+ atomic_long_t nr_zswap_protected;
+};
+
+bool zswap_store(struct folio *folio);
+bool zswap_load(struct folio *folio);
+void zswap_invalidate(swp_entry_t swp);
+int zswap_swapon(int type, unsigned long nr_pages);
+void zswap_swapoff(int type);
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
+void zswap_lruvec_state_init(struct lruvec *lruvec);
+void zswap_folio_swapin(struct folio *folio);
+bool is_zswap_enabled(void);
+#else
+
+struct zswap_lruvec_state {};
+
+static inline bool zswap_store(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool zswap_load(struct folio *folio)
+{
+ return false;
+}
+
+static inline void zswap_invalidate(swp_entry_t swp) {}
+static inline int zswap_swapon(int type, unsigned long nr_pages)
+{
+ return 0;
+}
+static inline void zswap_swapoff(int type) {}
+static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
+static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
+static inline void zswap_folio_swapin(struct folio *folio) {}
+
+static inline bool is_zswap_enabled(void)
+{
+ return false;
+}
+
+#endif
+
+#endif /* _LINUX_ZSWAP_H */
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index 143568d64b20..8ce066c035cf 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -338,7 +338,7 @@ do { \
FP_SET_EXCEPTION(FP_EX_INVALID | FP_EX_INVALID_ISI); \
break; \
} \
- /* FALLTHRU */ \
+ fallthrough; \
\
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
@@ -662,12 +662,14 @@ do { \
if (X##_e < 0) \
{ \
FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ fallthrough; \
case FP_CLS_ZERO: \
r = 0; \
} \
else if (X##_e >= rsize - (rsigned > 0 || X##_s) \
|| (!rsigned && X##_s)) \
{ /* overflow */ \
+ fallthrough; \
case FP_CLS_NAN: \
case FP_CLS_INF: \
if (rsigned == 2) \
@@ -767,6 +769,7 @@ do { \
if (X##_e >= rsize - (rsigned > 0 || X##_s) \
|| (!rsigned && X##_s)) \
{ /* overflow */ \
+ fallthrough; \
case FP_CLS_NAN: \
case FP_CLS_INF: \
if (!rsigned) \
diff --git a/include/media/cec.h b/include/media/cec.h
index c48b5f2e4b50..10c9cf6058b7 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -26,13 +26,17 @@
* @dev: cec device
* @cdev: cec character device
* @minor: device node minor number
+ * @lock: lock to serialize open/release and registration
* @registered: the device was correctly registered
* @unregistered: the device was unregistered
- * @fhs_lock: lock to control access to the filehandle list
+ * @lock_fhs: lock to control access to @fhs
* @fhs: the list of open filehandles (cec_fh)
*
* This structure represents a cec-related device node.
*
+ * To add or remove filehandles from @fhs the @lock must be taken first,
+ * followed by @lock_fhs. It is safe to access @fhs if either lock is held.
+ *
* The @parent is a physical device. It must be set by core or device drivers
* before registering the node.
*/
@@ -43,10 +47,13 @@ struct cec_devnode {
/* device info */
int minor;
+ /* serialize open/release and registration */
+ struct mutex lock;
bool registered;
bool unregistered;
+ /* protect access to fhs */
+ struct mutex lock_fhs;
struct list_head fhs;
- struct mutex lock;
};
struct cec_adapter;
@@ -106,21 +113,25 @@ struct cec_fh {
#define CEC_FREE_TIME_TO_USEC(ft) ((ft) * 2400)
struct cec_adap_ops {
- /* Low-level callbacks */
+ /* Low-level callbacks, called with adap->lock held */
int (*adap_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
+ void (*adap_unconfigured)(struct cec_adapter *adap);
int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg);
+ void (*adap_nb_transmit_canceled)(struct cec_adapter *adap,
+ const struct cec_msg *msg);
void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
void (*adap_free)(struct cec_adapter *adap);
- /* Error injection callbacks */
+ /* Error injection callbacks, called without adap->lock held */
int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf);
bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line);
- /* High-level CEC message callback */
+ /* High-level CEC message callback, called without adap->lock held */
+ void (*configured)(struct cec_adapter *adap);
int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
};
@@ -156,6 +167,13 @@ struct cec_adap_ops {
* @wait_queue: queue of transmits waiting for a reply
* @transmitting: CEC messages currently being transmitted
* @transmit_in_progress: true if a transmit is in progress
+ * @transmit_in_progress_aborted: true if a transmit is in progress is to be
+ * aborted. This happens if the logical address is
+ * invalidated while the transmit is ongoing. In that
+ * case the transmit will finish, but will not retransmit
+ * and be marked as ABORTED.
+ * @xfer_timeout_ms: the transfer timeout in ms.
+ * If 0, then timeout after 2.1 ms.
* @kthread_config: kthread used to configure a CEC adapter
* @config_completion: used to signal completion of the config kthread
* @kthread: main CEC processing thread
@@ -168,7 +186,9 @@ struct cec_adap_ops {
* @needs_hpd: if true, then the HDMI HotPlug Detect pin must be high
* in order to transmit or receive CEC messages. This is usually a HW
* limitation.
+ * @is_enabled: the CEC adapter is enabled
* @is_configuring: the CEC adapter is configuring (i.e. claiming LAs)
+ * @must_reconfigure: while configuring, the PA changed, so reclaim LAs
* @is_configured: the CEC adapter is configured (i.e. has claimed LAs)
* @cec_pin_is_high: if true then the CEC pin is high. Only used with the
* CEC pin framework.
@@ -187,12 +207,23 @@ struct cec_adap_ops {
* passthrough mode.
* @log_addrs: current logical addresses
* @conn_info: current connector info
- * @tx_timeouts: number of transmit timeouts
+ * @tx_timeout_cnt: count the number of Timed Out transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_low_drive_cnt: count the number of Low Drive transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_error_cnt: count the number of Error transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_arb_lost_cnt: count the number of Arb Lost transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_low_drive_log_cnt: number of logged Low Drive transmits since the
+ * adapter was enabled. Used to avoid flooding the kernel
+ * log if this happens a lot.
+ * @tx_error_log_cnt: number of logged Error transmits since the adapter was
+ * enabled. Used to avoid flooding the kernel log if this
+ * happens a lot.
* @notifier: CEC notifier
* @pin: CEC pin status struct
* @cec_dir: debugfs cec directory
- * @status_file: debugfs cec status file
- * @error_inj_file: debugfs cec error injection file
* @sequence: transmit sequence counter
* @input_phys: remote control input_phys name
*
@@ -210,6 +241,8 @@ struct cec_adapter {
struct list_head wait_queue;
struct cec_data *transmitting;
bool transmit_in_progress;
+ bool transmit_in_progress_aborted;
+ unsigned int xfer_timeout_ms;
struct task_struct *kthread_config;
struct completion config_completion;
@@ -224,7 +257,9 @@ struct cec_adapter {
u16 phys_addr;
bool needs_hpd;
+ bool is_enabled;
bool is_configuring;
+ bool must_reconfigure;
bool is_configured;
bool cec_pin_is_high;
bool adap_controls_phys_addr;
@@ -238,7 +273,12 @@ struct cec_adapter {
struct cec_log_addrs log_addrs;
struct cec_connector_info conn_info;
- u32 tx_timeouts;
+ u32 tx_timeout_cnt;
+ u32 tx_low_drive_cnt;
+ u32 tx_error_cnt;
+ u32 tx_arb_lost_cnt;
+ u32 tx_low_drive_log_cnt;
+ u32 tx_error_log_cnt;
#ifdef CONFIG_CEC_NOTIFIER
struct cec_notifier *notifier;
@@ -248,12 +288,10 @@ struct cec_adapter {
#endif
struct dentry *cec_dir;
- struct dentry *status_file;
- struct dentry *error_inj_file;
u32 sequence;
- char input_phys[32];
+ char input_phys[40];
};
static inline void *cec_get_drvdata(const struct cec_adapter *adap)
diff --git a/include/media/davinci/ccdc_types.h b/include/media/davinci/ccdc_types.h
deleted file mode 100644
index 971984dc1ce4..000000000000
--- a/include/media/davinci/ccdc_types.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- **************************************************************************/
-#ifndef _CCDC_TYPES_H
-#define _CCDC_TYPES_H
-enum ccdc_pixfmt {
- CCDC_PIXFMT_RAW,
- CCDC_PIXFMT_YCBCR_16BIT,
- CCDC_PIXFMT_YCBCR_8BIT
-};
-
-enum ccdc_frmfmt {
- CCDC_FRMFMT_PROGRESSIVE,
- CCDC_FRMFMT_INTERLACED
-};
-
-/* PIXEL ORDER IN MEMORY from LSB to MSB */
-/* only applicable for 8-bit input mode */
-enum ccdc_pixorder {
- CCDC_PIXORDER_YCBYCR,
- CCDC_PIXORDER_CBYCRY,
-};
-
-enum ccdc_buftype {
- CCDC_BUFTYPE_FLD_INTERLEAVED,
- CCDC_BUFTYPE_FLD_SEPARATED
-};
-#endif
diff --git a/include/media/davinci/dm355_ccdc.h b/include/media/davinci/dm355_ccdc.h
deleted file mode 100644
index 1f3d00aa46d1..000000000000
--- a/include/media/davinci/dm355_ccdc.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2005-2009 Texas Instruments Inc
- */
-#ifndef _DM355_CCDC_H
-#define _DM355_CCDC_H
-#include <media/davinci/ccdc_types.h>
-#include <media/davinci/vpfe_types.h>
-
-/* enum for No of pixel per line to be avg. in Black Clamping */
-enum ccdc_sample_length {
- CCDC_SAMPLE_1PIXELS,
- CCDC_SAMPLE_2PIXELS,
- CCDC_SAMPLE_4PIXELS,
- CCDC_SAMPLE_8PIXELS,
- CCDC_SAMPLE_16PIXELS
-};
-
-/* enum for No of lines in Black Clamping */
-enum ccdc_sample_line {
- CCDC_SAMPLE_1LINES,
- CCDC_SAMPLE_2LINES,
- CCDC_SAMPLE_4LINES,
- CCDC_SAMPLE_8LINES,
- CCDC_SAMPLE_16LINES
-};
-
-/* enum for Alaw gamma width */
-enum ccdc_gamma_width {
- CCDC_GAMMA_BITS_13_4,
- CCDC_GAMMA_BITS_12_3,
- CCDC_GAMMA_BITS_11_2,
- CCDC_GAMMA_BITS_10_1,
- CCDC_GAMMA_BITS_09_0
-};
-
-enum ccdc_colpats {
- CCDC_RED,
- CCDC_GREEN_RED,
- CCDC_GREEN_BLUE,
- CCDC_BLUE
-};
-
-struct ccdc_col_pat {
- enum ccdc_colpats olop;
- enum ccdc_colpats olep;
- enum ccdc_colpats elop;
- enum ccdc_colpats elep;
-};
-
-enum ccdc_datasft {
- CCDC_DATA_NO_SHIFT,
- CCDC_DATA_SHIFT_1BIT,
- CCDC_DATA_SHIFT_2BIT,
- CCDC_DATA_SHIFT_3BIT,
- CCDC_DATA_SHIFT_4BIT,
- CCDC_DATA_SHIFT_5BIT,
- CCDC_DATA_SHIFT_6BIT
-};
-
-enum ccdc_data_size {
- CCDC_DATA_16BITS,
- CCDC_DATA_15BITS,
- CCDC_DATA_14BITS,
- CCDC_DATA_13BITS,
- CCDC_DATA_12BITS,
- CCDC_DATA_11BITS,
- CCDC_DATA_10BITS,
- CCDC_DATA_8BITS
-};
-enum ccdc_mfilt1 {
- CCDC_NO_MEDIAN_FILTER1,
- CCDC_AVERAGE_FILTER1,
- CCDC_MEDIAN_FILTER1
-};
-
-enum ccdc_mfilt2 {
- CCDC_NO_MEDIAN_FILTER2,
- CCDC_AVERAGE_FILTER2,
- CCDC_MEDIAN_FILTER2
-};
-
-/* structure for ALaw */
-struct ccdc_a_law {
- /* Enable/disable A-Law */
- unsigned char enable;
- /* Gamma Width Input */
- enum ccdc_gamma_width gamma_wd;
-};
-
-/* structure for Black Clamping */
-struct ccdc_black_clamp {
- /* only if bClampEnable is TRUE */
- unsigned char b_clamp_enable;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_length sample_pixel;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_line sample_ln;
- /* only if bClampEnable is TRUE */
- unsigned short start_pixel;
- /* only if bClampEnable is FALSE */
- unsigned short sgain;
- unsigned short dc_sub;
-};
-
-/* structure for Black Level Compensation */
-struct ccdc_black_compensation {
- /* Constant value to subtract from Red component */
- unsigned char r;
- /* Constant value to subtract from Gr component */
- unsigned char gr;
- /* Constant value to subtract from Blue component */
- unsigned char b;
- /* Constant value to subtract from Gb component */
- unsigned char gb;
-};
-
-struct ccdc_float {
- int integer;
- unsigned int decimal;
-};
-
-#define CCDC_CSC_COEFF_TABLE_SIZE 16
-/* structure for color space converter */
-struct ccdc_csc {
- unsigned char enable;
- /*
- * S8Q5. Use 2 decimal precision, user values range from -3.00 to 3.99.
- * example - to use 1.03, set integer part as 1, and decimal part as 3
- * to use -1.03, set integer part as -1 and decimal part as 3
- */
- struct ccdc_float coeff[CCDC_CSC_COEFF_TABLE_SIZE];
-};
-
-/* Structures for Vertical Defect Correction*/
-enum ccdc_vdf_csl {
- CCDC_VDF_NORMAL,
- CCDC_VDF_HORZ_INTERPOL_SAT,
- CCDC_VDF_HORZ_INTERPOL
-};
-
-enum ccdc_vdf_cuda {
- CCDC_VDF_WHOLE_LINE_CORRECT,
- CCDC_VDF_UPPER_DISABLE
-};
-
-enum ccdc_dfc_mwr {
- CCDC_DFC_MWR_WRITE_COMPLETE,
- CCDC_DFC_WRITE_REG
-};
-
-enum ccdc_dfc_mrd {
- CCDC_DFC_READ_COMPLETE,
- CCDC_DFC_READ_REG
-};
-
-enum ccdc_dfc_ma_rst {
- CCDC_DFC_INCR_ADDR,
- CCDC_DFC_CLR_ADDR
-};
-
-enum ccdc_dfc_mclr {
- CCDC_DFC_CLEAR_COMPLETE,
- CCDC_DFC_CLEAR
-};
-
-struct ccdc_dft_corr_ctl {
- enum ccdc_vdf_csl vdfcsl;
- enum ccdc_vdf_cuda vdfcuda;
- unsigned int vdflsft;
-};
-
-struct ccdc_dft_corr_mem_ctl {
- enum ccdc_dfc_mwr dfcmwr;
- enum ccdc_dfc_mrd dfcmrd;
- enum ccdc_dfc_ma_rst dfcmarst;
- enum ccdc_dfc_mclr dfcmclr;
-};
-
-#define CCDC_DFT_TABLE_SIZE 16
-/*
- * Main Structure for vertical defect correction. Vertical defect
- * correction can correct up to 16 defects if defects less than 16
- * then pad the rest with 0
- */
-struct ccdc_vertical_dft {
- unsigned char ver_dft_en;
- unsigned char gen_dft_en;
- unsigned int saturation_ctl;
- struct ccdc_dft_corr_ctl dft_corr_ctl;
- struct ccdc_dft_corr_mem_ctl dft_corr_mem_ctl;
- int table_size;
- unsigned int dft_corr_horz[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_vert[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_sub1[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_sub2[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_sub3[CCDC_DFT_TABLE_SIZE];
-};
-
-struct ccdc_data_offset {
- unsigned char horz_offset;
- unsigned char vert_offset;
-};
-
-/*
- * Structure for CCDC configuration parameters for raw capture mode passed
- * by application
- */
-struct ccdc_config_params_raw {
- /* data shift to be applied before storing */
- enum ccdc_datasft datasft;
- /* data size value from 8 to 16 bits */
- enum ccdc_data_size data_sz;
- /* median filter for sdram */
- enum ccdc_mfilt1 mfilt1;
- enum ccdc_mfilt2 mfilt2;
- /* low pass filter enable/disable */
- unsigned char lpf_enable;
- /* Threshold of median filter */
- int med_filt_thres;
- /*
- * horz and vertical data offset. Applicable for defect correction
- * and lsc
- */
- struct ccdc_data_offset data_offset;
- /* Structure for Optional A-Law */
- struct ccdc_a_law alaw;
- /* Structure for Optical Black Clamp */
- struct ccdc_black_clamp blk_clamp;
- /* Structure for Black Compensation */
- struct ccdc_black_compensation blk_comp;
- /* structure for vertical Defect Correction Module Configuration */
- struct ccdc_vertical_dft vertical_dft;
- /* structure for color space converter Module Configuration */
- struct ccdc_csc csc;
- /* color patters for bayer capture */
- struct ccdc_col_pat col_pat_field0;
- struct ccdc_col_pat col_pat_field1;
-};
-
-#ifdef __KERNEL__
-#include <linux/io.h>
-
-#define CCDC_WIN_PAL {0, 0, 720, 576}
-#define CCDC_WIN_VGA {0, 0, 640, 480}
-
-struct ccdc_params_ycbcr {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* enable BT.656 embedded sync mode */
- int bt656_enable;
- /* cb:y:cr:y or y:cb:y:cr in memory */
- enum ccdc_pixorder pix_order;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
-};
-
-/* Gain applied to Raw Bayer data */
-struct ccdc_gain {
- unsigned short r_ye;
- unsigned short gr_cy;
- unsigned short gb_g;
- unsigned short b_mg;
-};
-
-/* Structure for CCDC configuration parameters for raw capture mode */
-struct ccdc_params_raw {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
- /* Gain values */
- struct ccdc_gain gain;
- /* offset */
- unsigned int ccdc_offset;
- /* horizontal flip enable */
- unsigned char horz_flip_enable;
- /*
- * enable to store the image in inverse order in memory
- * (bottom to top)
- */
- unsigned char image_invert_enable;
- /* Configurable part of raw data */
- struct ccdc_config_params_raw config_params;
-};
-
-#endif
-#endif /* DM355_CCDC_H */
diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h
deleted file mode 100644
index c20dba3d76d6..000000000000
--- a/include/media/davinci/dm644x_ccdc.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006-2009 Texas Instruments Inc
- */
-#ifndef _DM644X_CCDC_H
-#define _DM644X_CCDC_H
-#include <media/davinci/ccdc_types.h>
-#include <media/davinci/vpfe_types.h>
-
-/* enum for No of pixel per line to be avg. in Black Clamping*/
-enum ccdc_sample_length {
- CCDC_SAMPLE_1PIXELS,
- CCDC_SAMPLE_2PIXELS,
- CCDC_SAMPLE_4PIXELS,
- CCDC_SAMPLE_8PIXELS,
- CCDC_SAMPLE_16PIXELS
-};
-
-/* enum for No of lines in Black Clamping */
-enum ccdc_sample_line {
- CCDC_SAMPLE_1LINES,
- CCDC_SAMPLE_2LINES,
- CCDC_SAMPLE_4LINES,
- CCDC_SAMPLE_8LINES,
- CCDC_SAMPLE_16LINES
-};
-
-/* enum for Alaw gamma width */
-enum ccdc_gamma_width {
- CCDC_GAMMA_BITS_15_6, /* use bits 15-6 for gamma */
- CCDC_GAMMA_BITS_14_5,
- CCDC_GAMMA_BITS_13_4,
- CCDC_GAMMA_BITS_12_3,
- CCDC_GAMMA_BITS_11_2,
- CCDC_GAMMA_BITS_10_1,
- CCDC_GAMMA_BITS_09_0 /* use bits 9-0 for gamma */
-};
-
-/* returns the highest bit used for the gamma */
-static inline u8 ccdc_gamma_width_max_bit(enum ccdc_gamma_width width)
-{
- return 15 - width;
-}
-
-enum ccdc_data_size {
- CCDC_DATA_16BITS,
- CCDC_DATA_15BITS,
- CCDC_DATA_14BITS,
- CCDC_DATA_13BITS,
- CCDC_DATA_12BITS,
- CCDC_DATA_11BITS,
- CCDC_DATA_10BITS,
- CCDC_DATA_8BITS
-};
-
-/* returns the highest bit used for this data size */
-static inline u8 ccdc_data_size_max_bit(enum ccdc_data_size sz)
-{
- return sz == CCDC_DATA_8BITS ? 7 : 15 - sz;
-}
-
-/* structure for ALaw */
-struct ccdc_a_law {
- /* Enable/disable A-Law */
- unsigned char enable;
- /* Gamma Width Input */
- enum ccdc_gamma_width gamma_wd;
-};
-
-/* structure for Black Clamping */
-struct ccdc_black_clamp {
- unsigned char enable;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_length sample_pixel;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_line sample_ln;
- /* only if bClampEnable is TRUE */
- unsigned short start_pixel;
- /* only if bClampEnable is TRUE */
- unsigned short sgain;
- /* only if bClampEnable is FALSE */
- unsigned short dc_sub;
-};
-
-/* structure for Black Level Compensation */
-struct ccdc_black_compensation {
- /* Constant value to subtract from Red component */
- char r;
- /* Constant value to subtract from Gr component */
- char gr;
- /* Constant value to subtract from Blue component */
- char b;
- /* Constant value to subtract from Gb component */
- char gb;
-};
-
-/* Structure for CCDC configuration parameters for raw capture mode passed
- * by application
- */
-struct ccdc_config_params_raw {
- /* data size value from 8 to 16 bits */
- enum ccdc_data_size data_sz;
- /* Structure for Optional A-Law */
- struct ccdc_a_law alaw;
- /* Structure for Optical Black Clamp */
- struct ccdc_black_clamp blk_clamp;
- /* Structure for Black Compensation */
- struct ccdc_black_compensation blk_comp;
-};
-
-
-#ifdef __KERNEL__
-#include <linux/io.h>
-/* Define to enable/disable video port */
-#define FP_NUM_BYTES 4
-/* Define for extra pixel/line and extra lines/frame */
-#define NUM_EXTRAPIXELS 8
-#define NUM_EXTRALINES 8
-
-/* settings for commonly used video formats */
-#define CCDC_WIN_PAL {0, 0, 720, 576}
-/* ntsc square pixel */
-#define CCDC_WIN_VGA {0, 0, (640 + NUM_EXTRAPIXELS), (480 + NUM_EXTRALINES)}
-
-/* Structure for CCDC configuration parameters for raw capture mode */
-struct ccdc_params_raw {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
- /*
- * enable to store the image in inverse
- * order in memory(bottom to top)
- */
- unsigned char image_invert_enable;
- /* configurable parameters */
- struct ccdc_config_params_raw config_params;
-};
-
-struct ccdc_params_ycbcr {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* enable BT.656 embedded sync mode */
- int bt656_enable;
- /* cb:y:cr:y or y:cb:y:cr in memory */
- enum ccdc_pixorder pix_order;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
-};
-#endif
-#endif /* _DM644X_CCDC_H */
diff --git a/include/media/davinci/isif.h b/include/media/davinci/isif.h
deleted file mode 100644
index e66589c4022d..000000000000
--- a/include/media/davinci/isif.h
+++ /dev/null
@@ -1,518 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * isif header file
- */
-#ifndef _ISIF_H
-#define _ISIF_H
-
-#include <media/davinci/ccdc_types.h>
-#include <media/davinci/vpfe_types.h>
-
-/* isif float type S8Q8/U8Q8 */
-struct isif_float_8 {
- /* 8 bit integer part */
- __u8 integer;
- /* 8 bit decimal part */
- __u8 decimal;
-};
-
-/* isif float type U16Q16/S16Q16 */
-struct isif_float_16 {
- /* 16 bit integer part */
- __u16 integer;
- /* 16 bit decimal part */
- __u16 decimal;
-};
-
-/************************************************************************
- * Vertical Defect Correction parameters
- ***********************************************************************/
-/* Defect Correction (DFC) table entry */
-struct isif_vdfc_entry {
- /* vertical position of defect */
- __u16 pos_vert;
- /* horizontal position of defect */
- __u16 pos_horz;
- /*
- * Defect level of Vertical line defect position. This is subtracted
- * from the data at the defect position
- */
- __u8 level_at_pos;
- /*
- * Defect level of the pixels upper than the vertical line defect.
- * This is subtracted from the data
- */
- __u8 level_up_pixels;
- /*
- * Defect level of the pixels lower than the vertical line defect.
- * This is subtracted from the data
- */
- __u8 level_low_pixels;
-};
-
-#define ISIF_VDFC_TABLE_SIZE 8
-struct isif_dfc {
- /* enable vertical defect correction */
- __u8 en;
- /* Defect level subtraction. Just fed through if saturating */
-#define ISIF_VDFC_NORMAL 0
- /*
- * Defect level subtraction. Horizontal interpolation ((i-2)+(i+2))/2
- * if data saturating
- */
-#define ISIF_VDFC_HORZ_INTERPOL_IF_SAT 1
- /* Horizontal interpolation (((i-2)+(i+2))/2) */
-#define ISIF_VDFC_HORZ_INTERPOL 2
- /* one of the vertical defect correction modes above */
- __u8 corr_mode;
- /* 0 - whole line corrected, 1 - not pixels upper than the defect */
- __u8 corr_whole_line;
-#define ISIF_VDFC_NO_SHIFT 0
-#define ISIF_VDFC_SHIFT_1 1
-#define ISIF_VDFC_SHIFT_2 2
-#define ISIF_VDFC_SHIFT_3 3
-#define ISIF_VDFC_SHIFT_4 4
- /*
- * defect level shift value. level_at_pos, level_upper_pos,
- * and level_lower_pos can be shifted up by this value. Choose
- * one of the values above
- */
- __u8 def_level_shift;
- /* defect saturation level */
- __u16 def_sat_level;
- /* number of vertical defects. Max is ISIF_VDFC_TABLE_SIZE */
- __u16 num_vdefects;
- /* VDFC table ptr */
- struct isif_vdfc_entry table[ISIF_VDFC_TABLE_SIZE];
-};
-
-struct isif_horz_bclamp {
-
- /* Horizontal clamp disabled. Only vertical clamp value is subtracted */
-#define ISIF_HORZ_BC_DISABLE 0
- /*
- * Horizontal clamp value is calculated and subtracted from image data
- * along with vertical clamp value
- */
-#define ISIF_HORZ_BC_CLAMP_CALC_ENABLED 1
- /*
- * Horizontal clamp value calculated from previous image is subtracted
- * from image data along with vertical clamp value.
- */
-#define ISIF_HORZ_BC_CLAMP_NOT_UPDATED 2
- /* horizontal clamp mode. One of the values above */
- __u8 mode;
- /*
- * pixel value limit enable.
- * 0 - limit disabled
- * 1 - pixel value limited to 1023
- */
- __u8 clamp_pix_limit;
- /* Select Most left window for bc calculation */
-#define ISIF_SEL_MOST_LEFT_WIN 0
- /* Select Most right window for bc calculation */
-#define ISIF_SEL_MOST_RIGHT_WIN 1
- /* Select most left or right window for clamp val calculation */
- __u8 base_win_sel_calc;
- /* Window count per color for calculation. range 1-32 */
- __u8 win_count_calc;
- /* Window start position - horizontal for calculation. 0 - 8191 */
- __u16 win_start_h_calc;
- /* Window start position - vertical for calculation 0 - 8191 */
- __u16 win_start_v_calc;
-#define ISIF_HORZ_BC_SZ_H_2PIXELS 0
-#define ISIF_HORZ_BC_SZ_H_4PIXELS 1
-#define ISIF_HORZ_BC_SZ_H_8PIXELS 2
-#define ISIF_HORZ_BC_SZ_H_16PIXELS 3
- /* Width of the sample window in pixels for calculation */
- __u8 win_h_sz_calc;
-#define ISIF_HORZ_BC_SZ_V_32PIXELS 0
-#define ISIF_HORZ_BC_SZ_V_64PIXELS 1
-#define ISIF_HORZ_BC_SZ_V_128PIXELS 2
-#define ISIF_HORZ_BC_SZ_V_256PIXELS 3
- /* Height of the sample window in pixels for calculation */
- __u8 win_v_sz_calc;
-};
-
-/************************************************************************
- * Black Clamp parameters
- ***********************************************************************/
-struct isif_vert_bclamp {
- /* Reset value used is the clamp value calculated */
-#define ISIF_VERT_BC_USE_HORZ_CLAMP_VAL 0
- /* Reset value used is reset_clamp_val configured */
-#define ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL 1
- /* No update, previous image value is used */
-#define ISIF_VERT_BC_NO_UPDATE 2
- /*
- * Reset value selector for vertical clamp calculation. Use one of
- * the above values
- */
- __u8 reset_val_sel;
- /* U8Q8. Line average coefficient used in vertical clamp calculation */
- __u8 line_ave_coef;
- /* Height of the optical black region for calculation */
- __u16 ob_v_sz_calc;
- /* Optical black region start position - horizontal. 0 - 8191 */
- __u16 ob_start_h;
- /* Optical black region start position - vertical 0 - 8191 */
- __u16 ob_start_v;
-};
-
-struct isif_black_clamp {
- /*
- * This offset value is added irrespective of the clamp enable status.
- * S13
- */
- __u16 dc_offset;
- /*
- * Enable black/digital clamp value to be subtracted from the image data
- */
- __u8 en;
- /*
- * black clamp mode. same/separate clamp for 4 colors
- * 0 - disable - same clamp value for all colors
- * 1 - clamp value calculated separately for all colors
- */
- __u8 bc_mode_color;
- /* Vrtical start position for bc subtraction */
- __u16 vert_start_sub;
- /* Black clamp for horizontal direction */
- struct isif_horz_bclamp horz;
- /* Black clamp for vertical direction */
- struct isif_vert_bclamp vert;
-};
-
-/*************************************************************************
-** Color Space Conversion (CSC)
-*************************************************************************/
-#define ISIF_CSC_NUM_COEFF 16
-struct isif_color_space_conv {
- /* Enable color space conversion */
- __u8 en;
- /*
- * csc coeffient table. S8Q5, M00 at index 0, M01 at index 1, and
- * so forth
- */
- struct isif_float_8 coeff[ISIF_CSC_NUM_COEFF];
-};
-
-
-/*************************************************************************
-** Black Compensation parameters
-*************************************************************************/
-struct isif_black_comp {
- /* Comp for Red */
- __s8 r_comp;
- /* Comp for Gr */
- __s8 gr_comp;
- /* Comp for Blue */
- __s8 b_comp;
- /* Comp for Gb */
- __s8 gb_comp;
-};
-
-/*************************************************************************
-** Gain parameters
-*************************************************************************/
-struct isif_gain {
- /* Gain for Red or ye */
- struct isif_float_16 r_ye;
- /* Gain for Gr or cy */
- struct isif_float_16 gr_cy;
- /* Gain for Gb or g */
- struct isif_float_16 gb_g;
- /* Gain for Blue or mg */
- struct isif_float_16 b_mg;
-};
-
-#define ISIF_LINEAR_TAB_SIZE 192
-/*************************************************************************
-** Linearization parameters
-*************************************************************************/
-struct isif_linearize {
- /* Enable or Disable linearization of data */
- __u8 en;
- /* Shift value applied */
- __u8 corr_shft;
- /* scale factor applied U11Q10 */
- struct isif_float_16 scale_fact;
- /* Size of the linear table */
- __u16 table[ISIF_LINEAR_TAB_SIZE];
-};
-
-/* Color patterns */
-#define ISIF_RED 0
-#define ISIF_GREEN_RED 1
-#define ISIF_GREEN_BLUE 2
-#define ISIF_BLUE 3
-struct isif_col_pat {
- __u8 olop;
- __u8 olep;
- __u8 elop;
- __u8 elep;
-};
-
-/*************************************************************************
-** Data formatter parameters
-*************************************************************************/
-struct isif_fmtplen {
- /*
- * number of program entries for SET0, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen0;
- /*
- * number of program entries for SET1, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen1;
- /**
- * number of program entries for SET2, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen2;
- /**
- * number of program entries for SET3, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen3;
-};
-
-struct isif_fmt_cfg {
-#define ISIF_SPLIT 0
-#define ISIF_COMBINE 1
- /* Split or combine or line alternate */
- __u8 fmtmode;
- /* enable or disable line alternating mode */
- __u8 ln_alter_en;
-#define ISIF_1LINE 0
-#define ISIF_2LINES 1
-#define ISIF_3LINES 2
-#define ISIF_4LINES 3
- /* Split/combine line number */
- __u8 lnum;
- /* Address increment Range 1 - 16 */
- __u8 addrinc;
-};
-
-struct isif_fmt_addr_ptr {
- /* Initial address */
- __u32 init_addr;
- /* output line number */
-#define ISIF_1STLINE 0
-#define ISIF_2NDLINE 1
-#define ISIF_3RDLINE 2
-#define ISIF_4THLINE 3
- __u8 out_line;
-};
-
-struct isif_fmtpgm_ap {
- /* program address pointer */
- __u8 pgm_aptr;
- /* program address increment or decrement */
- __u8 pgmupdt;
-};
-
-struct isif_data_formatter {
- /* Enable/Disable data formatter */
- __u8 en;
- /* data formatter configuration */
- struct isif_fmt_cfg cfg;
- /* Formatter program entries length */
- struct isif_fmtplen plen;
- /* first pixel in a line fed to formatter */
- __u16 fmtrlen;
- /* HD interval for output line. Only valid when split line */
- __u16 fmthcnt;
- /* formatter address pointers */
- struct isif_fmt_addr_ptr fmtaddr_ptr[16];
- /* program enable/disable */
- __u8 pgm_en[32];
- /* program address pointers */
- struct isif_fmtpgm_ap fmtpgm_ap[32];
-};
-
-struct isif_df_csc {
- /* Color Space Conversion confguration, 0 - csc, 1 - df */
- __u8 df_or_csc;
- /* csc configuration valid if df_or_csc is 0 */
- struct isif_color_space_conv csc;
- /* data formatter configuration valid if df_or_csc is 1 */
- struct isif_data_formatter df;
- /* start pixel in a line at the input */
- __u32 start_pix;
- /* number of pixels in input line */
- __u32 num_pixels;
- /* start line at the input */
- __u32 start_line;
- /* number of lines at the input */
- __u32 num_lines;
-};
-
-struct isif_gain_offsets_adj {
- /* Gain adjustment per color */
- struct isif_gain gain;
- /* Offset adjustment */
- __u16 offset;
- /* Enable or Disable Gain adjustment for SDRAM data */
- __u8 gain_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- __u8 gain_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- __u8 gain_h3a_en;
- /* Enable or Disable Gain adjustment for SDRAM data */
- __u8 offset_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- __u8 offset_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- __u8 offset_h3a_en;
-};
-
-struct isif_cul {
- /* Horizontal Cull pattern for odd lines */
- __u8 hcpat_odd;
- /* Horizontal Cull pattern for even lines */
- __u8 hcpat_even;
- /* Vertical Cull pattern */
- __u8 vcpat;
- /* Enable or disable lpf. Apply when cull is enabled */
- __u8 en_lpf;
-};
-
-struct isif_compress {
-#define ISIF_ALAW 0
-#define ISIF_DPCM 1
-#define ISIF_NO_COMPRESSION 2
- /* Compression Algorithm used */
- __u8 alg;
- /* Choose Predictor1 for DPCM compression */
-#define ISIF_DPCM_PRED1 0
- /* Choose Predictor2 for DPCM compression */
-#define ISIF_DPCM_PRED2 1
- /* Predictor for DPCM compression */
- __u8 pred;
-};
-
-/* all the stuff in this struct will be provided by userland */
-struct isif_config_params_raw {
- /* Linearization parameters for image sensor data input */
- struct isif_linearize linearize;
- /* Data formatter or CSC */
- struct isif_df_csc df_csc;
- /* Defect Pixel Correction (DFC) confguration */
- struct isif_dfc dfc;
- /* Black/Digital Clamp configuration */
- struct isif_black_clamp bclamp;
- /* Gain, offset adjustments */
- struct isif_gain_offsets_adj gain_offset;
- /* Culling */
- struct isif_cul culling;
- /* A-Law and DPCM compression options */
- struct isif_compress compress;
- /* horizontal offset for Gain/LSC/DFC */
- __u16 horz_offset;
- /* vertical offset for Gain/LSC/DFC */
- __u16 vert_offset;
- /* color pattern for field 0 */
- struct isif_col_pat col_pat_field0;
- /* color pattern for field 1 */
- struct isif_col_pat col_pat_field1;
-#define ISIF_NO_SHIFT 0
-#define ISIF_1BIT_SHIFT 1
-#define ISIF_2BIT_SHIFT 2
-#define ISIF_3BIT_SHIFT 3
-#define ISIF_4BIT_SHIFT 4
-#define ISIF_5BIT_SHIFT 5
-#define ISIF_6BIT_SHIFT 6
- /* Data shift applied before storing to SDRAM */
- __u8 data_shift;
- /* enable input test pattern generation */
- __u8 test_pat_gen;
-};
-
-#ifdef __KERNEL__
-struct isif_ycbcr_config {
- /* isif pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* isif frame format */
- enum ccdc_frmfmt frm_fmt;
- /* ISIF crop window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* isif pix order. Only used for ycbcr capture */
- enum ccdc_pixorder pix_order;
- /* isif buffer type. Only used for ycbcr capture */
- enum ccdc_buftype buf_type;
-};
-
-/* MSB of image data connected to sensor port */
-enum isif_data_msb {
- ISIF_BIT_MSB_15,
- ISIF_BIT_MSB_14,
- ISIF_BIT_MSB_13,
- ISIF_BIT_MSB_12,
- ISIF_BIT_MSB_11,
- ISIF_BIT_MSB_10,
- ISIF_BIT_MSB_9,
- ISIF_BIT_MSB_8,
- ISIF_BIT_MSB_7
-};
-
-enum isif_cfa_pattern {
- ISIF_CFA_PAT_MOSAIC,
- ISIF_CFA_PAT_STRIPE
-};
-
-struct isif_params_raw {
- /* isif pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* isif frame format */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* buffer type. Applicable for interlaced mode */
- enum ccdc_buftype buf_type;
- /* Gain values */
- struct isif_gain gain;
- /* cfa pattern */
- enum isif_cfa_pattern cfa_pat;
- /* Data MSB position */
- enum isif_data_msb data_msb;
- /* Enable horizontal flip */
- unsigned char horz_flip_en;
- /* Enable image invert vertically */
- unsigned char image_invert_en;
-
- /* all the userland defined stuff*/
- struct isif_config_params_raw config_params;
-};
-
-enum isif_data_pack {
- ISIF_PACK_16BIT,
- ISIF_PACK_12BIT,
- ISIF_PACK_8BIT
-};
-
-#define ISIF_WIN_NTSC {0, 0, 720, 480}
-#define ISIF_WIN_VGA {0, 0, 640, 480}
-
-#endif
-#endif
diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h
deleted file mode 100644
index e74a93475d21..000000000000
--- a/include/media/davinci/vpbe.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2010 Texas Instruments Inc
- */
-#ifndef _VPBE_H
-#define _VPBE_H
-
-#include <linux/videodev2.h>
-#include <linux/i2c.h>
-
-#include <media/v4l2-dev.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-device.h>
-#include <media/davinci/vpbe_osd.h>
-#include <media/davinci/vpbe_venc.h>
-#include <media/davinci/vpbe_types.h>
-
-/* OSD configuration info */
-struct osd_config_info {
- char module_name[32];
-};
-
-struct vpbe_output {
- struct v4l2_output output;
- /*
- * If output capabilities include dv_timings, list supported timings
- * below
- */
- char *subdev_name;
- /*
- * defualt_mode identifies the default timings set at the venc or
- * external encoder.
- */
- char *default_mode;
- /*
- * Fields below are used for supporting multiple modes. For example,
- * LCD panel might support different modes and they are listed here.
- * Similarly for supporting external encoders, lcd controller port
- * requires a set of non-standard timing values to be listed here for
- * each supported mode since venc is used in non-standard timing mode
- * for interfacing with external encoder similar to configuring lcd
- * panel timings
- */
- unsigned int num_modes;
- struct vpbe_enc_mode_info *modes;
- /*
- * Bus configuration goes here for external encoders. Some encoders
- * may require multiple interface types for each of the output. For
- * example, SD modes would use YCC8 where as HD mode would use YCC16.
- * Not sure if this is needed on a per mode basis instead of per
- * output basis. If per mode is needed, we may have to move this to
- * mode_info structure
- */
- u32 if_params;
-};
-
-/* encoder configuration info */
-struct encoder_config_info {
- char module_name[32];
- /* Is this an i2c device ? */
- unsigned int is_i2c:1;
- /* i2c subdevice board info */
- struct i2c_board_info board_info;
-};
-
-/*amplifier configuration info */
-struct amp_config_info {
- char module_name[32];
- /* Is this an i2c device ? */
- unsigned int is_i2c:1;
- /* i2c subdevice board info */
- struct i2c_board_info board_info;
-};
-
-/* structure for defining vpbe display subsystem components */
-struct vpbe_config {
- char module_name[32];
- /* i2c bus adapter no */
- int i2c_adapter_id;
- struct osd_config_info osd;
- struct encoder_config_info venc;
- /* external encoder information goes here */
- int num_ext_encoders;
- struct encoder_config_info *ext_encoders;
- /* amplifier information goes here */
- struct amp_config_info *amp;
- unsigned int num_outputs;
- /* Order is venc outputs followed by LCD and then external encoders */
- struct vpbe_output *outputs;
-};
-
-struct vpbe_device;
-
-struct vpbe_device_ops {
- /* Enumerate the outputs */
- int (*enum_outputs)(struct vpbe_device *vpbe_dev,
- struct v4l2_output *output);
-
- /* Set output to the given index */
- int (*set_output)(struct vpbe_device *vpbe_dev,
- int index);
-
- /* Get current output */
- unsigned int (*get_output)(struct vpbe_device *vpbe_dev);
-
- /* Set DV preset at current output */
- int (*s_dv_timings)(struct vpbe_device *vpbe_dev,
- struct v4l2_dv_timings *dv_timings);
-
- /* Get DV presets supported at the output */
- int (*g_dv_timings)(struct vpbe_device *vpbe_dev,
- struct v4l2_dv_timings *dv_timings);
-
- /* Enumerate the DV Presets supported at the output */
- int (*enum_dv_timings)(struct vpbe_device *vpbe_dev,
- struct v4l2_enum_dv_timings *timings_info);
-
- /* Set std at the output */
- int (*s_std)(struct vpbe_device *vpbe_dev, v4l2_std_id std_id);
-
- /* Get the current std at the output */
- int (*g_std)(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id);
-
- /* initialize the device */
- int (*initialize)(struct device *dev, struct vpbe_device *vpbe_dev);
-
- /* De-initialize the device */
- void (*deinitialize)(struct device *dev, struct vpbe_device *vpbe_dev);
-
- /* Get the current mode info */
- int (*get_mode_info)(struct vpbe_device *vpbe_dev,
- struct vpbe_enc_mode_info*);
-
- /*
- * Set the current mode in the encoder. Alternate way of setting
- * standard or DV preset or custom timings in the encoder
- */
- int (*set_mode)(struct vpbe_device *vpbe_dev,
- struct vpbe_enc_mode_info*);
- /* Power management operations */
- int (*suspend)(struct vpbe_device *vpbe_dev);
- int (*resume)(struct vpbe_device *vpbe_dev);
-};
-
-/* struct for vpbe device */
-struct vpbe_device {
- /* V4l2 device */
- struct v4l2_device v4l2_dev;
- /* vpbe dispay controller cfg */
- struct vpbe_config *cfg;
- /* parent device */
- struct device *pdev;
- /* external encoder v4l2 sub devices */
- struct v4l2_subdev **encoders;
- /* current encoder index */
- int current_sd_index;
- /* external amplifier v4l2 subdevice */
- struct v4l2_subdev *amp;
- struct mutex lock;
- /* device initialized */
- int initialized;
- /* vpbe dac clock */
- struct clk *dac_clk;
- /* osd_device pointer */
- struct osd_state *osd_device;
- /* venc device pointer */
- struct venc_platform_data *venc_device;
- /*
- * fields below are accessed by users of vpbe_device. Not the
- * ones above
- */
-
- /* current output */
- int current_out_index;
- /* lock used by caller to do atomic operation on vpbe device */
- /* current timings set in the controller */
- struct vpbe_enc_mode_info current_timings;
- /* venc sub device */
- struct v4l2_subdev *venc;
- /* device operations below */
- struct vpbe_device_ops ops;
-};
-
-#endif
diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h
deleted file mode 100644
index 6d2a93740130..000000000000
--- a/include/media/davinci/vpbe_display.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
- */
-#ifndef VPBE_DISPLAY_H
-#define VPBE_DISPLAY_H
-
-/* Header files */
-#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-fh.h>
-#include <media/videobuf2-v4l2.h>
-#include <media/videobuf2-dma-contig.h>
-#include <media/davinci/vpbe_types.h>
-#include <media/davinci/vpbe_osd.h>
-#include <media/davinci/vpbe.h>
-
-#define VPBE_DISPLAY_MAX_DEVICES 2
-
-enum vpbe_display_device_id {
- VPBE_DISPLAY_DEVICE_0,
- VPBE_DISPLAY_DEVICE_1
-};
-
-#define VPBE_DISPLAY_DRV_NAME "vpbe-display"
-
-#define VPBE_DISPLAY_MAJOR_RELEASE 1
-#define VPBE_DISPLAY_MINOR_RELEASE 0
-#define VPBE_DISPLAY_BUILD 1
-#define VPBE_DISPLAY_VERSION_CODE ((VPBE_DISPLAY_MAJOR_RELEASE << 16) | \
- (VPBE_DISPLAY_MINOR_RELEASE << 8) | \
- VPBE_DISPLAY_BUILD)
-
-#define VPBE_DISPLAY_VALID_FIELD(field) ((V4L2_FIELD_NONE == field) || \
- (V4L2_FIELD_ANY == field) || (V4L2_FIELD_INTERLACED == field))
-
-/* Exp ratio numerator and denominator constants */
-#define VPBE_DISPLAY_H_EXP_RATIO_N 9
-#define VPBE_DISPLAY_H_EXP_RATIO_D 8
-#define VPBE_DISPLAY_V_EXP_RATIO_N 6
-#define VPBE_DISPLAY_V_EXP_RATIO_D 5
-
-/* Zoom multiplication factor */
-#define VPBE_DISPLAY_ZOOM_4X 4
-#define VPBE_DISPLAY_ZOOM_2X 2
-
-/* Structures */
-struct display_layer_info {
- int enable;
- /* Layer ID used by Display Manager */
- enum osd_layer id;
- struct osd_layer_config config;
- enum osd_zoom_factor h_zoom;
- enum osd_zoom_factor v_zoom;
- enum osd_h_exp_ratio h_exp;
- enum osd_v_exp_ratio v_exp;
-};
-
-struct vpbe_disp_buffer {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
-};
-
-/* vpbe display object structure */
-struct vpbe_layer {
- /* Pointer to the vpbe_display */
- struct vpbe_display *disp_dev;
- /* Pointer pointing to current v4l2_buffer */
- struct vpbe_disp_buffer *cur_frm;
- /* Pointer pointing to next v4l2_buffer */
- struct vpbe_disp_buffer *next_frm;
- /* videobuf specific parameters
- * Buffer queue used in video-buf
- */
- struct vb2_queue buffer_queue;
- /* Queue of filled frames */
- struct list_head dma_queue;
- /* Used in video-buf */
- spinlock_t irqlock;
- /* V4l2 specific parameters */
- /* Identifies video device for this layer */
- struct video_device video_dev;
- /* Used to store pixel format */
- struct v4l2_pix_format pix_fmt;
- enum v4l2_field buf_field;
- /* Video layer configuration params */
- struct display_layer_info layer_info;
- /* vpbe specific parameters
- * enable window for display
- */
- unsigned char window_enable;
- /* number of open instances of the layer */
- unsigned int usrs;
- /* Indicates id of the field which is being displayed */
- unsigned int field_id;
- /* Identifies device object */
- enum vpbe_display_device_id device_id;
- /* facilitation of ioctl ops lock by v4l2*/
- struct mutex opslock;
- u8 layer_first_int;
-};
-
-/* vpbe device structure */
-struct vpbe_display {
- /* layer specific parameters */
- /* lock for isr updates to buf layers*/
- spinlock_t dma_queue_lock;
- /* C-Plane offset from start of y-plane */
- unsigned int cbcr_ofst;
- struct vpbe_layer *dev[VPBE_DISPLAY_MAX_DEVICES];
- struct vpbe_device *vpbe_dev;
- struct osd_state *osd_device;
-};
-
-struct buf_config_params {
- unsigned char min_numbuffers;
- unsigned char numbuffers[VPBE_DISPLAY_MAX_DEVICES];
- unsigned int min_bufsize[VPBE_DISPLAY_MAX_DEVICES];
- unsigned int layer_bufsize[VPBE_DISPLAY_MAX_DEVICES];
-};
-
-#endif /* VPBE_DISPLAY_H */
diff --git a/include/media/davinci/vpbe_osd.h b/include/media/davinci/vpbe_osd.h
deleted file mode 100644
index e1b1c76aa50f..000000000000
--- a/include/media/davinci/vpbe_osd.h
+++ /dev/null
@@ -1,382 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2007-2009 Texas Instruments Inc
- * Copyright (C) 2007 MontaVista Software, Inc.
- *
- * Andy Lowe (alowe@mvista.com), MontaVista Software
- * - Initial version
- * Murali Karicheri (mkaricheri@gmail.com), Texas Instruments Ltd.
- * - ported to sub device interface
- */
-#ifndef _OSD_H
-#define _OSD_H
-
-#include <media/davinci/vpbe_types.h>
-
-#define DM644X_VPBE_OSD_SUBDEV_NAME "dm644x,vpbe-osd"
-#define DM365_VPBE_OSD_SUBDEV_NAME "dm365,vpbe-osd"
-#define DM355_VPBE_OSD_SUBDEV_NAME "dm355,vpbe-osd"
-
-/**
- * enum osd_layer
- * @WIN_OSD0: On-Screen Display Window 0
- * @WIN_VID0: Video Window 0
- * @WIN_OSD1: On-Screen Display Window 1
- * @WIN_VID1: Video Window 1
- *
- * Description:
- * An enumeration of the osd display layers.
- */
-enum osd_layer {
- WIN_OSD0,
- WIN_VID0,
- WIN_OSD1,
- WIN_VID1,
-};
-
-/**
- * enum osd_win_layer
- * @OSDWIN_OSD0: On-Screen Display Window 0
- * @OSDWIN_OSD1: On-Screen Display Window 1
- *
- * Description:
- * An enumeration of the OSD Window layers.
- */
-enum osd_win_layer {
- OSDWIN_OSD0,
- OSDWIN_OSD1,
-};
-
-/**
- * enum osd_pix_format
- * @PIXFMT_1BPP: 1-bit-per-pixel bitmap
- * @PIXFMT_2BPP: 2-bits-per-pixel bitmap
- * @PIXFMT_4BPP: 4-bits-per-pixel bitmap
- * @PIXFMT_8BPP: 8-bits-per-pixel bitmap
- * @PIXFMT_RGB565: 16-bits-per-pixel RGB565
- * @PIXFMT_YCbCrI: YUV 4:2:2
- * @PIXFMT_RGB888: 24-bits-per-pixel RGB888
- * @PIXFMT_YCrCbI: YUV 4:2:2 with chroma swap
- * @PIXFMT_NV12: YUV 4:2:0 planar
- * @PIXFMT_OSD_ATTR: OSD Attribute Window pixel format (4bpp)
- *
- * Description:
- * An enumeration of the DaVinci pixel formats.
- */
-enum osd_pix_format {
- PIXFMT_1BPP = 0,
- PIXFMT_2BPP,
- PIXFMT_4BPP,
- PIXFMT_8BPP,
- PIXFMT_RGB565,
- PIXFMT_YCBCRI,
- PIXFMT_RGB888,
- PIXFMT_YCRCBI,
- PIXFMT_NV12,
- PIXFMT_OSD_ATTR,
-};
-
-/**
- * enum osd_h_exp_ratio
- * @H_EXP_OFF: no expansion (1/1)
- * @H_EXP_9_OVER_8: 9/8 expansion ratio
- * @H_EXP_3_OVER_2: 3/2 expansion ratio
- *
- * Description:
- * An enumeration of the available horizontal expansion ratios.
- */
-enum osd_h_exp_ratio {
- H_EXP_OFF,
- H_EXP_9_OVER_8,
- H_EXP_3_OVER_2,
-};
-
-/**
- * enum osd_v_exp_ratio
- * @V_EXP_OFF: no expansion (1/1)
- * @V_EXP_6_OVER_5: 6/5 expansion ratio
- *
- * Description:
- * An enumeration of the available vertical expansion ratios.
- */
-enum osd_v_exp_ratio {
- V_EXP_OFF,
- V_EXP_6_OVER_5,
-};
-
-/**
- * enum osd_zoom_factor
- * @ZOOM_X1: no zoom (x1)
- * @ZOOM_X2: x2 zoom
- * @ZOOM_X4: x4 zoom
- *
- * Description:
- * An enumeration of the available zoom factors.
- */
-enum osd_zoom_factor {
- ZOOM_X1,
- ZOOM_X2,
- ZOOM_X4,
-};
-
-/**
- * enum osd_clut
- * @ROM_CLUT: ROM CLUT
- * @RAM_CLUT: RAM CLUT
- *
- * Description:
- * An enumeration of the available Color Lookup Tables (CLUTs).
- */
-enum osd_clut {
- ROM_CLUT,
- RAM_CLUT,
-};
-
-/**
- * enum osd_rom_clut
- * @ROM_CLUT0: Macintosh CLUT
- * @ROM_CLUT1: CLUT from DM270 and prior devices
- *
- * Description:
- * An enumeration of the ROM Color Lookup Table (CLUT) options.
- */
-enum osd_rom_clut {
- ROM_CLUT0,
- ROM_CLUT1,
-};
-
-/**
- * enum osd_blending_factor
- * @OSD_0_VID_8: OSD pixels are fully transparent
- * @OSD_1_VID_7: OSD pixels contribute 1/8, video pixels contribute 7/8
- * @OSD_2_VID_6: OSD pixels contribute 2/8, video pixels contribute 6/8
- * @OSD_3_VID_5: OSD pixels contribute 3/8, video pixels contribute 5/8
- * @OSD_4_VID_4: OSD pixels contribute 4/8, video pixels contribute 4/8
- * @OSD_5_VID_3: OSD pixels contribute 5/8, video pixels contribute 3/8
- * @OSD_6_VID_2: OSD pixels contribute 6/8, video pixels contribute 2/8
- * @OSD_8_VID_0: OSD pixels are fully opaque
- *
- * Description:
- * An enumeration of the DaVinci pixel blending factor options.
- */
-enum osd_blending_factor {
- OSD_0_VID_8,
- OSD_1_VID_7,
- OSD_2_VID_6,
- OSD_3_VID_5,
- OSD_4_VID_4,
- OSD_5_VID_3,
- OSD_6_VID_2,
- OSD_8_VID_0,
-};
-
-/**
- * enum osd_blink_interval
- * @BLINK_X1: blink interval is 1 vertical refresh cycle
- * @BLINK_X2: blink interval is 2 vertical refresh cycles
- * @BLINK_X3: blink interval is 3 vertical refresh cycles
- * @BLINK_X4: blink interval is 4 vertical refresh cycles
- *
- * Description:
- * An enumeration of the DaVinci pixel blinking interval options.
- */
-enum osd_blink_interval {
- BLINK_X1,
- BLINK_X2,
- BLINK_X3,
- BLINK_X4,
-};
-
-/**
- * enum osd_cursor_h_width
- * @H_WIDTH_1: horizontal line width is 1 pixel
- * @H_WIDTH_4: horizontal line width is 4 pixels
- * @H_WIDTH_8: horizontal line width is 8 pixels
- * @H_WIDTH_12: horizontal line width is 12 pixels
- * @H_WIDTH_16: horizontal line width is 16 pixels
- * @H_WIDTH_20: horizontal line width is 20 pixels
- * @H_WIDTH_24: horizontal line width is 24 pixels
- * @H_WIDTH_28: horizontal line width is 28 pixels
- */
-enum osd_cursor_h_width {
- H_WIDTH_1,
- H_WIDTH_4,
- H_WIDTH_8,
- H_WIDTH_12,
- H_WIDTH_16,
- H_WIDTH_20,
- H_WIDTH_24,
- H_WIDTH_28,
-};
-
-/**
- * enum davinci_cursor_v_width
- * @V_WIDTH_1: vertical line width is 1 line
- * @V_WIDTH_2: vertical line width is 2 lines
- * @V_WIDTH_4: vertical line width is 4 lines
- * @V_WIDTH_6: vertical line width is 6 lines
- * @V_WIDTH_8: vertical line width is 8 lines
- * @V_WIDTH_10: vertical line width is 10 lines
- * @V_WIDTH_12: vertical line width is 12 lines
- * @V_WIDTH_14: vertical line width is 14 lines
- */
-enum osd_cursor_v_width {
- V_WIDTH_1,
- V_WIDTH_2,
- V_WIDTH_4,
- V_WIDTH_6,
- V_WIDTH_8,
- V_WIDTH_10,
- V_WIDTH_12,
- V_WIDTH_14,
-};
-
-/**
- * struct osd_cursor_config
- * @xsize: horizontal size in pixels
- * @ysize: vertical size in lines
- * @xpos: horizontal offset in pixels from the left edge of the display
- * @ypos: vertical offset in lines from the top of the display
- * @interlaced: Non-zero if the display is interlaced, or zero otherwise
- * @h_width: horizontal line width
- * @v_width: vertical line width
- * @clut: the CLUT selector (ROM or RAM) for the cursor color
- * @clut_index: an index into the CLUT for the cursor color
- *
- * Description:
- * A structure describing the configuration parameters of the hardware
- * rectangular cursor.
- */
-struct osd_cursor_config {
- unsigned xsize;
- unsigned ysize;
- unsigned xpos;
- unsigned ypos;
- int interlaced;
- enum osd_cursor_h_width h_width;
- enum osd_cursor_v_width v_width;
- enum osd_clut clut;
- unsigned char clut_index;
-};
-
-/**
- * struct osd_layer_config
- * @pixfmt: pixel format
- * @line_length: offset in bytes between start of each line in memory
- * @xsize: number of horizontal pixels displayed per line
- * @ysize: number of lines displayed
- * @xpos: horizontal offset in pixels from the left edge of the display
- * @ypos: vertical offset in lines from the top of the display
- * @interlaced: Non-zero if the display is interlaced, or zero otherwise
- *
- * Description:
- * A structure describing the configuration parameters of an On-Screen Display
- * (OSD) or video layer related to how the image is stored in memory.
- * @line_length must be a multiple of the cache line size (32 bytes).
- */
-struct osd_layer_config {
- enum osd_pix_format pixfmt;
- unsigned line_length;
- unsigned xsize;
- unsigned ysize;
- unsigned xpos;
- unsigned ypos;
- int interlaced;
-};
-
-/* parameters that apply on a per-window (OSD or video) basis */
-struct osd_window_state {
- int is_allocated;
- int is_enabled;
- unsigned long fb_base_phys;
- enum osd_zoom_factor h_zoom;
- enum osd_zoom_factor v_zoom;
- struct osd_layer_config lconfig;
-};
-
-/* parameters that apply on a per-OSD-window basis */
-struct osd_osdwin_state {
- enum osd_clut clut;
- enum osd_blending_factor blend;
- int colorkey_blending;
- unsigned colorkey;
- int rec601_attenuation;
- /* index is pixel value */
- unsigned char palette_map[16];
-};
-
-/* hardware rectangular cursor parameters */
-struct osd_cursor_state {
- int is_enabled;
- struct osd_cursor_config config;
-};
-
-struct osd_state;
-
-struct vpbe_osd_ops {
- int (*initialize)(struct osd_state *sd);
- int (*request_layer)(struct osd_state *sd, enum osd_layer layer);
- void (*release_layer)(struct osd_state *sd, enum osd_layer layer);
- int (*enable_layer)(struct osd_state *sd, enum osd_layer layer,
- int otherwin);
- void (*disable_layer)(struct osd_state *sd, enum osd_layer layer);
- int (*set_layer_config)(struct osd_state *sd, enum osd_layer layer,
- struct osd_layer_config *lconfig);
- void (*get_layer_config)(struct osd_state *sd, enum osd_layer layer,
- struct osd_layer_config *lconfig);
- void (*start_layer)(struct osd_state *sd, enum osd_layer layer,
- unsigned long fb_base_phys,
- unsigned long cbcr_ofst);
- void (*set_left_margin)(struct osd_state *sd, u32 val);
- void (*set_top_margin)(struct osd_state *sd, u32 val);
- void (*set_interpolation_filter)(struct osd_state *sd, int filter);
- int (*set_vid_expansion)(struct osd_state *sd,
- enum osd_h_exp_ratio h_exp,
- enum osd_v_exp_ratio v_exp);
- void (*get_vid_expansion)(struct osd_state *sd,
- enum osd_h_exp_ratio *h_exp,
- enum osd_v_exp_ratio *v_exp);
- void (*set_zoom)(struct osd_state *sd, enum osd_layer layer,
- enum osd_zoom_factor h_zoom,
- enum osd_zoom_factor v_zoom);
-};
-
-struct osd_state {
- enum vpbe_version vpbe_type;
- spinlock_t lock;
- struct device *dev;
- dma_addr_t osd_base_phys;
- void __iomem *osd_base;
- unsigned long osd_size;
- /* 1-->the isr will toggle the VID0 ping-pong buffer */
- int pingpong;
- int interpolation_filter;
- int field_inversion;
- enum osd_h_exp_ratio osd_h_exp;
- enum osd_v_exp_ratio osd_v_exp;
- enum osd_h_exp_ratio vid_h_exp;
- enum osd_v_exp_ratio vid_v_exp;
- enum osd_clut backg_clut;
- unsigned backg_clut_index;
- enum osd_rom_clut rom_clut;
- int is_blinking;
- /* attribute window blinking enabled */
- enum osd_blink_interval blink;
- /* YCbCrI or YCrCbI */
- enum osd_pix_format yc_pixfmt;
- /* columns are Y, Cb, Cr */
- unsigned char clut_ram[256][3];
- struct osd_cursor_state cursor;
- /* OSD0, VID0, OSD1, VID1 */
- struct osd_window_state win[4];
- /* OSD0, OSD1 */
- struct osd_osdwin_state osdwin[2];
- /* OSD device Operations */
- struct vpbe_osd_ops ops;
-};
-
-struct osd_platform_data {
- int field_inv_wa_enable;
-};
-
-#endif
diff --git a/include/media/davinci/vpbe_types.h b/include/media/davinci/vpbe_types.h
deleted file mode 100644
index 6015cda235cc..000000000000
--- a/include/media/davinci/vpbe_types.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2010 Texas Instruments Inc
- */
-#ifndef _VPBE_TYPES_H
-#define _VPBE_TYPES_H
-
-enum vpbe_version {
- VPBE_VERSION_1 = 1,
- VPBE_VERSION_2,
- VPBE_VERSION_3,
-};
-
-/* vpbe_timing_type - Timing types used in vpbe device */
-enum vpbe_enc_timings_type {
- VPBE_ENC_STD = 0x1,
- VPBE_ENC_DV_TIMINGS = 0x4,
- /* Used when set timings through FB device interface */
- VPBE_ENC_TIMINGS_INVALID = 0x8,
-};
-
-/*
- * struct vpbe_enc_mode_info
- * @name: ptr to name string of the standard, "NTSC", "PAL" etc
- * @std: standard or non-standard mode. 1 - standard, 0 - nonstandard
- * @interlaced: 1 - interlaced, 0 - non interlaced/progressive
- * @xres: x or horizontal resolution of the display
- * @yres: y or vertical resolution of the display
- * @fps: frame per second
- * @left_margin: left margin of the display
- * @right_margin: right margin of the display
- * @upper_margin: upper margin of the display
- * @lower_margin: lower margin of the display
- * @hsync_len: h-sync length
- * @vsync_len: v-sync length
- * @flags: bit field: bit usage is documented below
- *
- * Description:
- * Structure holding timing and resolution information of a standard.
- * Used by vpbe_device to set required non-standard timing in the
- * venc when lcd controller output is connected to a external encoder.
- * A table of timings is maintained in vpbe device to set this in
- * venc when external encoder is connected to lcd controller output.
- * Encoder may provide a g_dv_timings() API to override these values
- * as needed.
- *
- * Notes
- * ------
- * if_type should be used only by encoder manager and encoder.
- * flags usage
- * b0 (LSB) - hsync polarity, 0 - negative, 1 - positive
- * b1 - vsync polarity, 0 - negative, 1 - positive
- * b2 - field id polarity, 0 - negative, 1 - positive
- */
-struct vpbe_enc_mode_info {
- unsigned char *name;
- enum vpbe_enc_timings_type timings_type;
- v4l2_std_id std_id;
- struct v4l2_dv_timings dv_timings;
- unsigned int interlaced;
- unsigned int xres;
- unsigned int yres;
- struct v4l2_fract aspect;
- struct v4l2_fract fps;
- unsigned int left_margin;
- unsigned int right_margin;
- unsigned int upper_margin;
- unsigned int lower_margin;
- unsigned int hsync_len;
- unsigned int vsync_len;
- unsigned int flags;
-};
-
-#endif
diff --git a/include/media/davinci/vpbe_venc.h b/include/media/davinci/vpbe_venc.h
deleted file mode 100644
index 93cf6a5fb49d..000000000000
--- a/include/media/davinci/vpbe_venc.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2010 Texas Instruments Inc
- */
-#ifndef _VPBE_VENC_H
-#define _VPBE_VENC_H
-
-#include <media/v4l2-subdev.h>
-#include <media/davinci/vpbe_types.h>
-
-#define DM644X_VPBE_VENC_SUBDEV_NAME "dm644x,vpbe-venc"
-#define DM365_VPBE_VENC_SUBDEV_NAME "dm365,vpbe-venc"
-#define DM355_VPBE_VENC_SUBDEV_NAME "dm355,vpbe-venc"
-
-/* venc events */
-#define VENC_END_OF_FRAME BIT(0)
-#define VENC_FIRST_FIELD BIT(1)
-#define VENC_SECOND_FIELD BIT(2)
-
-struct venc_platform_data {
- int (*setup_pinmux)(u32 if_type, int field);
- int (*setup_clock)(enum vpbe_enc_timings_type type,
- unsigned int pixclock);
- int (*setup_if_config)(u32 pixcode);
- /* Number of LCD outputs supported */
- int num_lcd_outputs;
- struct vpbe_if_params *lcd_if_params;
-};
-
-enum venc_ioctls {
- VENC_GET_FLD = 1,
-};
-
-/* exported functions */
-struct v4l2_subdev *venc_sub_dev_init(struct v4l2_device *v4l2_dev,
- const char *venc_name);
-#endif
diff --git a/include/media/davinci/vpfe_capture.h b/include/media/davinci/vpfe_capture.h
deleted file mode 100644
index 4ad53031e2f7..000000000000
--- a/include/media/davinci/vpfe_capture.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- */
-
-#ifndef _VPFE_CAPTURE_H
-#define _VPFE_CAPTURE_H
-
-#ifdef __KERNEL__
-
-/* Header files */
-#include <media/v4l2-dev.h>
-#include <linux/videodev2.h>
-#include <linux/clk.h>
-#include <linux/i2c.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-device.h>
-#include <media/videobuf-dma-contig.h>
-#include <media/davinci/vpfe_types.h>
-
-#define VPFE_CAPTURE_NUM_DECODERS 5
-
-/* Macros */
-#define VPFE_MAJOR_RELEASE 0
-#define VPFE_MINOR_RELEASE 0
-#define VPFE_BUILD 1
-#define VPFE_CAPTURE_VERSION_CODE ((VPFE_MAJOR_RELEASE << 16) | \
- (VPFE_MINOR_RELEASE << 8) | \
- VPFE_BUILD)
-
-#define CAPTURE_DRV_NAME "vpfe-capture"
-
-struct vpfe_pixel_format {
- u32 pixelformat;
- /* bytes per pixel */
- int bpp;
-};
-
-struct vpfe_std_info {
- int active_pixels;
- int active_lines;
- /* current frame format */
- int frame_format;
-};
-
-struct vpfe_route {
- u32 input;
- u32 output;
-};
-
-struct vpfe_subdev_info {
- /* Sub device name */
- char name[32];
- /* Sub device group id */
- int grp_id;
- /* Number of inputs supported */
- int num_inputs;
- /* inputs available at the sub device */
- struct v4l2_input *inputs;
- /* Sub dev routing information for each input */
- struct vpfe_route *routes;
- /* check if sub dev supports routing */
- int can_route;
- /* ccdc bus/interface configuration */
- struct vpfe_hw_if_param ccdc_if_params;
- /* i2c subdevice board info */
- struct i2c_board_info board_info;
-};
-
-struct vpfe_config {
- /* Number of sub devices connected to vpfe */
- int num_subdevs;
- /* i2c bus adapter no */
- int i2c_adapter_id;
- /* information about each subdev */
- struct vpfe_subdev_info *sub_devs;
- /* evm card info */
- char *card_name;
- /* ccdc name */
- char *ccdc;
- /* vpfe clock */
- struct clk *vpssclk;
- struct clk *slaveclk;
- /* Function for Clearing the interrupt */
- void (*clr_intr)(int vdint);
-};
-
-struct vpfe_device {
- /* V4l2 specific parameters */
- /* Identifies video device for this channel */
- struct video_device video_dev;
- /* sub devices */
- struct v4l2_subdev **sd;
- /* vpfe cfg */
- struct vpfe_config *cfg;
- /* V4l2 device */
- struct v4l2_device v4l2_dev;
- /* parent device */
- struct device *pdev;
- /* number of open instances of the channel */
- u32 usrs;
- /* Indicates id of the field which is being displayed */
- u32 field_id;
- /* flag to indicate whether decoder is initialized */
- u8 initialized;
- /* current interface type */
- struct vpfe_hw_if_param vpfe_if_params;
- /* ptr to currently selected sub device */
- struct vpfe_subdev_info *current_subdev;
- /* current input at the sub device */
- int current_input;
- /* Keeps track of the information about the standard */
- struct vpfe_std_info std_info;
- /* std index into std table */
- int std_index;
- /* CCDC IRQs used when CCDC/ISIF output to SDRAM */
- unsigned int ccdc_irq0;
- unsigned int ccdc_irq1;
- /* number of buffers in fbuffers */
- u32 numbuffers;
- /* List of buffer pointers for storing frames */
- u8 *fbuffers[VIDEO_MAX_FRAME];
- /* Pointer pointing to current v4l2_buffer */
- struct videobuf_buffer *cur_frm;
- /* Pointer pointing to next v4l2_buffer */
- struct videobuf_buffer *next_frm;
- /*
- * This field keeps track of type of buffer exchange mechanism
- * user has selected
- */
- enum v4l2_memory memory;
- /* Used to store pixel format */
- struct v4l2_format fmt;
- /*
- * used when IMP is chained to store the crop window which
- * is different from the image window
- */
- struct v4l2_rect crop;
- /* Buffer queue used in video-buf */
- struct videobuf_queue buffer_queue;
- /* Queue of filled frames */
- struct list_head dma_queue;
- /* Used in video-buf */
- spinlock_t irqlock;
- /* IRQ lock for DMA queue */
- spinlock_t dma_queue_lock;
- /* lock used to access this structure */
- struct mutex lock;
- /* number of users performing IO */
- u32 io_usrs;
- /* Indicates whether streaming started */
- u8 started;
- /*
- * offset where second field starts from the starting of the
- * buffer for field separated YCbCr formats
- */
- u32 field_off;
-};
-
-/* File handle structure */
-struct vpfe_fh {
- struct v4l2_fh fh;
- struct vpfe_device *vpfe_dev;
- /* Indicates whether this file handle is doing IO */
- u8 io_allowed;
-};
-
-struct vpfe_config_params {
- u8 min_numbuffers;
- u8 numbuffers;
- u32 min_bufsize;
- u32 device_bufsize;
-};
-
-#endif /* End of __KERNEL__ */
-#endif /* _DAVINCI_VPFE_H */
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index 8439e46fb993..6cce1f09c721 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -48,8 +48,6 @@ struct vpif_display_config {
int i2c_adapter_id;
struct vpif_display_chan_config chan_config[VPIF_DISPLAY_MAX_CHANNELS];
const char *card_name;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- int *asd_sizes; /* 0-terminated array of asd group sizes */
};
struct vpif_input {
@@ -74,7 +72,7 @@ struct vpif_capture_config {
int i2c_adapter_id;
const char *card_name;
- struct v4l2_async_subdev *asd[VPIF_CAPTURE_MAX_CHANNELS];
+ struct v4l2_async_connection *asd[VPIF_CAPTURE_MAX_CHANNELS];
int asd_sizes[VPIF_CAPTURE_MAX_CHANNELS];
};
#endif /* _VPIF_TYPES_H */
diff --git a/include/media/davinci/vpss.h b/include/media/davinci/vpss.h
deleted file mode 100644
index 315fa77e238c..000000000000
--- a/include/media/davinci/vpss.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 Texas Instruments Inc
- *
- * vpss - video processing subsystem module header file.
- *
- * Include this header file if a driver needs to configure vpss system
- * module. It exports a set of library functions for video drivers to
- * configure vpss system module functions such as clock enable/disable,
- * vpss interrupt mux to arm, and other common vpss system module
- * functions.
- */
-#ifndef _VPSS_H
-#define _VPSS_H
-
-/* selector for ccdc input selection on DM355 */
-enum vpss_ccdc_source_sel {
- VPSS_CCDCIN,
- VPSS_HSSIIN,
- VPSS_PGLPBK, /* for DM365 only */
- VPSS_CCDCPG /* for DM365 only */
-};
-
-struct vpss_sync_pol {
- unsigned int ccdpg_hdpol:1;
- unsigned int ccdpg_vdpol:1;
-};
-
-struct vpss_pg_frame_size {
- short hlpfr;
- short pplen;
-};
-
-/* Used for enable/disable VPSS Clock */
-enum vpss_clock_sel {
- /* DM355/DM365 */
- VPSS_CCDC_CLOCK,
- VPSS_IPIPE_CLOCK,
- VPSS_H3A_CLOCK,
- VPSS_CFALD_CLOCK,
- /*
- * When using VPSS_VENC_CLOCK_SEL in vpss_enable_clock() api
- * following applies:-
- * en = 0 selects ENC_CLK
- * en = 1 selects ENC_CLK/2
- */
- VPSS_VENC_CLOCK_SEL,
- VPSS_VPBE_CLOCK,
- /* DM365 only clocks */
- VPSS_IPIPEIF_CLOCK,
- VPSS_RSZ_CLOCK,
- VPSS_BL_CLOCK,
- /*
- * When using VPSS_PCLK_INTERNAL in vpss_enable_clock() api
- * following applies:-
- * en = 0 disable internal PCLK
- * en = 1 enables internal PCLK
- */
- VPSS_PCLK_INTERNAL,
- /*
- * When using VPSS_PSYNC_CLOCK_SEL in vpss_enable_clock() api
- * following applies:-
- * en = 0 enables MMR clock
- * en = 1 enables VPSS clock
- */
- VPSS_PSYNC_CLOCK_SEL,
- VPSS_LDC_CLOCK_SEL,
- VPSS_OSD_CLOCK_SEL,
- VPSS_FDIF_CLOCK,
- VPSS_LDC_CLOCK
-};
-
-/* select input to ccdc on dm355 */
-int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel);
-/* enable/disable a vpss clock, 0 - success, -1 - failure */
-int vpss_enable_clock(enum vpss_clock_sel clock_sel, int en);
-/* set sync polarity, only for DM365*/
-void dm365_vpss_set_sync_pol(struct vpss_sync_pol);
-/* set the PG_FRAME_SIZE register, only for DM365 */
-void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size);
-
-/* wbl reset for dm644x */
-enum vpss_wbl_sel {
- VPSS_PCR_AEW_WBL_0 = 16,
- VPSS_PCR_AF_WBL_0,
- VPSS_PCR_RSZ4_WBL_0,
- VPSS_PCR_RSZ3_WBL_0,
- VPSS_PCR_RSZ2_WBL_0,
- VPSS_PCR_RSZ1_WBL_0,
- VPSS_PCR_PREV_WBL_0,
- VPSS_PCR_CCDC_WBL_O,
-};
-/* clear wbl overflow flag for DM6446 */
-int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel);
-
-/* set sync polarity*/
-void vpss_set_sync_pol(struct vpss_sync_pol sync);
-/* set the PG_FRAME_SIZE register */
-void vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size);
-/*
- * vpss_check_and_clear_interrupt - check and clear interrupt
- * @irq - common enumerator for IRQ
- *
- * Following return values used:-
- * 0 - interrupt occurred and cleared
- * 1 - interrupt not occurred
- * 2 - interrupt status not available
- */
-int vpss_dma_complete_interrupt(void);
-
-#endif
diff --git a/include/media/dmxdev.h b/include/media/dmxdev.h
index baafa3b8aca4..63219a699370 100644
--- a/include/media/dmxdev.h
+++ b/include/media/dmxdev.h
@@ -21,7 +21,6 @@
#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/wait.h>
diff --git a/include/media/drv-intf/s3c_camif.h b/include/media/drv-intf/s3c_camif.h
index d1200b40f53a..f746851a5ce6 100644
--- a/include/media/drv-intf/s3c_camif.h
+++ b/include/media/drv-intf/s3c_camif.h
@@ -35,8 +35,4 @@ struct s3c_camif_plat_data {
int (*gpio_put)(void);
};
-/* Platform default helper functions */
-int s3c_camif_gpio_get(void);
-int s3c_camif_gpio_put(void);
-
#endif /* MEDIA_S3C_CAMIF_ */
diff --git a/include/media/drv-intf/saa7146_vv.h b/include/media/drv-intf/saa7146_vv.h
index 635805fb35e8..55c7d70b9feb 100644
--- a/include/media/drv-intf/saa7146_vv.h
+++ b/include/media/drv-intf/saa7146_vv.h
@@ -6,7 +6,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fh.h>
#include <media/drv-intf/saa7146.h>
-#include <media/videobuf-dma-sg.h>
+#include <media/videobuf2-dma-sg.h>
#define MAX_SAA7146_CAPTURE_BUFFERS 32 /* arbitrary */
#define BUFFER_TIMEOUT (HZ/2) /* 0.5 seconds */
@@ -57,10 +57,10 @@ struct saa7146_standard
/* buffer for one video/vbi frame */
struct saa7146_buf {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
/* saa7146 specific */
- struct v4l2_pix_format *fmt;
int (*activate)(struct saa7146_dev *dev,
struct saa7146_buf *buf,
struct saa7146_buf *next);
@@ -74,58 +74,23 @@ struct saa7146_dmaqueue {
struct saa7146_buf *curr;
struct list_head queue;
struct timer_list timeout;
+ struct vb2_queue q;
};
-struct saa7146_overlay {
- struct saa7146_fh *fh;
- struct v4l2_window win;
- struct v4l2_clip clips[16];
- int nclips;
-};
-
-/* per open data */
-struct saa7146_fh {
- /* Must be the first field! */
- struct v4l2_fh fh;
- struct saa7146_dev *dev;
-
- /* video capture */
- struct videobuf_queue video_q;
-
- /* vbi capture */
- struct videobuf_queue vbi_q;
-
- unsigned int resources; /* resource management for device open */
-};
-
-#define STATUS_OVERLAY 0x01
-#define STATUS_CAPTURE 0x02
-
struct saa7146_vv
{
/* vbi capture */
struct saa7146_dmaqueue vbi_dmaq;
struct v4l2_vbi_format vbi_fmt;
struct timer_list vbi_read_timeout;
- struct file *vbi_read_timeout_file;
/* vbi workaround interrupt queue */
wait_queue_head_t vbi_wq;
- int vbi_fieldcount;
- struct saa7146_fh *vbi_streaming;
-
- int video_status;
- struct saa7146_fh *video_fh;
-
- /* video overlay */
- struct saa7146_overlay ov;
- struct v4l2_framebuffer ov_fb;
- struct saa7146_format *ov_fmt;
- struct saa7146_fh *ov_suspend;
/* video capture */
struct saa7146_dmaqueue video_dmaq;
struct v4l2_pix_format video_fmt;
enum v4l2_field last_field;
+ u32 seqnr;
/* common: fixme? shouldn't this be in saa7146_fh?
(this leads to a more complicated question: shall the driver
@@ -140,9 +105,7 @@ struct saa7146_vv
int current_hps_source;
int current_hps_sync;
- struct saa7146_dma d_clipping; /* pointer to clipping memory */
-
- unsigned int resources; /* resource management for device */
+ unsigned int resources; /* resource management for device */
};
/* flags */
@@ -172,10 +135,7 @@ struct saa7146_ext_vv
struct saa7146_use_ops {
void (*init)(struct saa7146_dev *, struct saa7146_vv *);
- int(*open)(struct saa7146_dev *, struct file *);
- void (*release)(struct saa7146_dev *, struct file *);
void (*irq_done)(struct saa7146_dev *, unsigned long status);
- ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
};
/* from saa7146_fops.c */
@@ -185,16 +145,11 @@ void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,
void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi);
int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf);
void saa7146_buffer_timeout(struct timer_list *t);
-void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q,
- struct saa7146_buf *buf);
int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv);
int saa7146_vv_release(struct saa7146_dev* dev);
/* from saa7146_hlp.c */
-int saa7146_enable_overlay(struct saa7146_fh *fh);
-void saa7146_disable_overlay(struct saa7146_fh *fh);
-
void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next);
void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma) ;
void saa7146_set_hps_source_and_sync(struct saa7146_dev *saa, int source, int sync);
@@ -204,17 +159,17 @@ void saa7146_set_gpio(struct saa7146_dev *saa, u8 pin, u8 data);
extern const struct v4l2_ioctl_ops saa7146_video_ioctl_ops;
extern const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops;
extern const struct saa7146_use_ops saa7146_video_uops;
-int saa7146_start_preview(struct saa7146_fh *fh);
-int saa7146_stop_preview(struct saa7146_fh *fh);
+extern const struct vb2_ops video_qops;
long saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg);
int saa7146_s_ctrl(struct v4l2_ctrl *ctrl);
/* from saa7146_vbi.c */
extern const struct saa7146_use_ops saa7146_vbi_uops;
+extern const struct vb2_ops vbi_qops;
/* resource management functions */
-int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit);
-void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits);
+int saa7146_res_get(struct saa7146_dev *dev, unsigned int bit);
+void saa7146_res_free(struct saa7146_dev *dev, unsigned int bits);
#define RESOURCE_DMA1_HPS 0x1
#define RESOURCE_DMA2_CLP 0x2
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
index d37cb74b769c..1b7d10f3d4aa 100644
--- a/include/media/dvb-usb-ids.h
+++ b/include/media/dvb-usb-ids.h
@@ -10,81 +10,88 @@
#ifndef _DVB_USB_IDS_H_
#define _DVB_USB_IDS_H_
+#include <linux/usb.h>
+
+#define DVB_USB_DEV(pid, vid) \
+ [vid] = { USB_DEVICE(USB_VID_ ## pid, USB_PID_ ## vid) }
+
+#define DVB_USB_DEV_VER(pid, vid, lo, hi) \
+ [vid] = { USB_DEVICE_VER(USB_VID_ ## pid, USB_PID_ ## vid, lo, hi) }
+
/* Vendor IDs */
-#define USB_VID_ADSTECH 0x06e1
-#define USB_VID_AFATECH 0x15a4
+
+#define USB_VID_774 0x7a69
+#define USB_VID_ADSTECH 0x06e1
+#define USB_VID_AFATECH 0x15a4
#define USB_VID_ALCOR_MICRO 0x058f
#define USB_VID_ALINK 0x05e3
+#define USB_VID_AME 0x06be
#define USB_VID_AMT 0x1c73
#define USB_VID_ANCHOR 0x0547
-#define USB_VID_ANSONIC 0x10b9
+#define USB_VID_ANSONIC 0x10b9
#define USB_VID_ANUBIS_ELECTRONIC 0x10fd
#define USB_VID_ASUS 0x0b05
#define USB_VID_AVERMEDIA 0x07ca
+#define USB_VID_AZUREWAVE 0x13d3
#define USB_VID_COMPRO 0x185b
#define USB_VID_COMPRO_UNK 0x145f
#define USB_VID_CONEXANT 0x0572
-#define USB_VID_CYPRESS 0x04b4
-#define USB_VID_DEXATEK 0x1d19
+#define USB_VID_CYPRESS 0x04b4
+#define USB_VID_DEXATEK 0x1d19
#define USB_VID_DIBCOM 0x10b8
#define USB_VID_DPOSH 0x1498
#define USB_VID_DVICO 0x0fe9
#define USB_VID_E3C 0x18b4
#define USB_VID_ELGATO 0x0fd9
#define USB_VID_EMPIA 0xeb1a
+#define USB_VID_EVOLUTEPC 0x1e59
#define USB_VID_GENPIX 0x09c0
+#define USB_VID_GIGABYTE 0x1044
+#define USB_VID_GOTVIEW 0x1fe1
#define USB_VID_GRANDTEC 0x5032
#define USB_VID_GTEK 0x1f4d
-#define USB_VID_HANFTEK 0x15f4
+#define USB_VID_HAMA 0x147f
+#define USB_VID_HANFTEK 0x15f4
#define USB_VID_HAUPPAUGE 0x2040
+#define USB_VID_HUMAX_COEX 0x10b9
#define USB_VID_HYPER_PALTEK 0x1025
#define USB_VID_INTEL 0x8086
-#define USB_VID_ITETECH 0x048d
+#define USB_VID_ITETECH 0x048d
#define USB_VID_KWORLD 0xeb2a
#define USB_VID_KWORLD_2 0x1b80
#define USB_VID_KYE 0x0458
-#define USB_VID_LEADTEK 0x0413
+#define USB_VID_LEADTEK 0x0413
#define USB_VID_LITEON 0x04ca
#define USB_VID_MEDION 0x1660
+#define USB_VID_MICROSOFT 0x045e
#define USB_VID_MIGLIA 0x18f3
#define USB_VID_MSI 0x0db0
#define USB_VID_MSI_2 0x1462
#define USB_VID_OPERA1 0x695c
-#define USB_VID_PINNACLE 0x2304
#define USB_VID_PCTV 0x2013
+#define USB_VID_PINNACLE 0x2304
#define USB_VID_PIXELVIEW 0x1554
-#define USB_VID_REALTEK 0x0bda
+#define USB_VID_PROF_1 0x3011
+#define USB_VID_PROF_2 0x3034
+#define USB_VID_REALTEK 0x0bda
+#define USB_VID_SONY 0x1415
+#define USB_VID_TECHNISAT 0x14f7
#define USB_VID_TECHNOTREND 0x0b48
+#define USB_VID_TELESTAR 0x10b9
#define USB_VID_TERRATEC 0x0ccd
#define USB_VID_TERRATEC_2 0x153b
-#define USB_VID_TELESTAR 0x10b9
-#define USB_VID_VISIONPLUS 0x13d3
-#define USB_VID_SONY 0x1415
-#define USB_PID_TEVII_S421 0xd421
-#define USB_PID_TEVII_S480_1 0xd481
-#define USB_PID_TEVII_S480_2 0xd482
-#define USB_PID_TEVII_S630 0xd630
-#define USB_PID_TEVII_S632 0xd632
-#define USB_PID_TEVII_S650 0xd650
-#define USB_PID_TEVII_S660 0xd660
-#define USB_PID_TEVII_S662 0xd662
-#define USB_VID_TWINHAN 0x1822
+#define USB_VID_TEVII 0x9022
+#define USB_VID_TWINHAN 0x1822
#define USB_VID_ULTIMA_ELECTRONIC 0x05d8
-#define USB_VID_UNIWILL 0x1584
+#define USB_VID_UNIWILL 0x1584
+#define USB_VID_VISIONPLUS 0x13d3
#define USB_VID_WIDEVIEW 0x14aa
-#define USB_VID_GIGABYTE 0x1044
-#define USB_VID_YUAN 0x1164
#define USB_VID_XTENSIONS 0x1ae7
+#define USB_VID_YUAN 0x1164
#define USB_VID_ZYDAS 0x0ace
-#define USB_VID_HUMAX_COEX 0x10b9
-#define USB_VID_774 0x7a69
-#define USB_VID_EVOLUTEPC 0x1e59
-#define USB_VID_AZUREWAVE 0x13d3
-#define USB_VID_TECHNISAT 0x14f7
-#define USB_VID_HAMA 0x147f
-#define USB_VID_MICROSOFT 0x045e
/* Product IDs */
+
#define USB_PID_ADSTECH_USB2_COLD 0xa333
#define USB_PID_ADSTECH_USB2_WARM 0xa334
#define USB_PID_AFATECH_AF9005 0x9020
@@ -95,339 +102,370 @@
#define USB_PID_AFATECH_AF9035_1002 0x1002
#define USB_PID_AFATECH_AF9035_1003 0x1003
#define USB_PID_AFATECH_AF9035_9035 0x9035
-#define USB_PID_TREKSTOR_DVBT 0x901b
-#define USB_PID_TREKSTOR_TERRES_2_0 0xC803
#define USB_PID_ALINK_DTU 0xf170
+#define USB_PID_AME_DTV5100 0xa232
+#define USB_PID_ANCHOR_NEBULA_DIGITV 0x0201
#define USB_PID_ANSONIC_DVBT_USB 0x6000
+#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514
+#define USB_PID_ANUBIS_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513
+#define USB_PID_ANUBIS_MSI_DIGI_VOX_MINI_II 0x1513
#define USB_PID_ANYSEE 0x861f
-#define USB_PID_AZUREWAVE_AD_TU700 0x3237
-#define USB_PID_AZUREWAVE_6007 0x0ccd
-#define USB_PID_AVERMEDIA_DVBT_USB_COLD 0x0001
-#define USB_PID_AVERMEDIA_DVBT_USB_WARM 0x0002
+#define USB_PID_ASUS_U3000 0x171f
+#define USB_PID_ASUS_U3000H 0x1736
+#define USB_PID_ASUS_U3100 0x173f
+#define USB_PID_ASUS_U3100MINI_PLUS 0x1779
+#define USB_PID_AVERMEDIA_1867 0x1867
+#define USB_PID_AVERMEDIA_A309 0xa309
+#define USB_PID_AVERMEDIA_A310 0xa310
+#define USB_PID_AVERMEDIA_A805 0xa805
+#define USB_PID_AVERMEDIA_A815M 0x815a
+#define USB_PID_AVERMEDIA_A835 0xa835
+#define USB_PID_AVERMEDIA_A835B_1835 0x1835
+#define USB_PID_AVERMEDIA_A835B_2835 0x2835
+#define USB_PID_AVERMEDIA_A835B_3835 0x3835
+#define USB_PID_AVERMEDIA_A835B_4835 0x4835
+#define USB_PID_AVERMEDIA_A850 0x850a
+#define USB_PID_AVERMEDIA_A850T 0x850b
+#define USB_PID_AVERMEDIA_A867 0xa867
+#define USB_PID_AVERMEDIA_B835 0xb835
#define USB_PID_AVERMEDIA_DVBT_USB2_COLD 0xa800
#define USB_PID_AVERMEDIA_DVBT_USB2_WARM 0xa801
+#define USB_PID_AVERMEDIA_EXPRESS 0xb568
+#define USB_PID_AVERMEDIA_H335 0x0335
+#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039
+#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039
+#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039
+#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228
+#define USB_PID_AVERMEDIA_TD110 0xa110
+#define USB_PID_AVERMEDIA_TD310 0x1871
+#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
+#define USB_PID_AVERMEDIA_VOLAR 0xa807
+#define USB_PID_AVERMEDIA_VOLAR_2 0xb808
+#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868
+#define USB_PID_AVERMEDIA_VOLAR_X 0xa815
+#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150
+#define USB_PID_AZUREWAVE_6007 0x0ccd
+#define USB_PID_AZUREWAVE_AD_TU700 0x3237
+#define USB_PID_AZUREWAVE_AZ6027 0x3275
+#define USB_PID_AZUREWAVE_TWINHAN_VP7049 0x3219
#define USB_PID_COMPRO_DVBU2000_COLD 0xd000
-#define USB_PID_COMPRO_DVBU2000_WARM 0xd001
#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c
#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d
+#define USB_PID_COMPRO_DVBU2000_WARM 0xd001
#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78
#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80
#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397
#define USB_PID_CONEXANT_D680_DMB 0x86d6
-#define USB_PID_CREATIX_CTX1921 0x1921
+#define USB_PID_CPYTO_REDI_PC50A 0xa803
+#define USB_PID_CTVDIGDUAL_V2 0xe410
+#define USB_PID_CYPRESS_DW2101 0x2101
+#define USB_PID_CYPRESS_DW2102 0x2102
+#define USB_PID_CYPRESS_DW2104 0x2104
+#define USB_PID_CYPRESS_DW3101 0x3101
+#define USB_PID_CYPRESS_OPERA1_COLD 0x2830
#define USB_PID_DELOCK_USB2_DVBT 0xb803
+#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064
#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065
#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8
#define USB_PID_DIBCOM_MOD3000_WARM 0x0bb9
#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6
#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7
-#define USB_PID_DIBCOM_STK7700P 0x1e14
+#define USB_PID_DIBCOM_NIM7090 0x1bb2
+#define USB_PID_DIBCOM_NIM8096MD 0x1fa8
+#define USB_PID_DIBCOM_NIM9090M 0x2383
+#define USB_PID_DIBCOM_NIM9090MD 0x2384
+#define USB_PID_DIBCOM_STK7070P 0x1ebc
+#define USB_PID_DIBCOM_STK7070PD 0x1ebe
+#define USB_PID_DIBCOM_STK7700D 0x1ef0
+#define USB_PID_DIBCOM_STK7700P 0x1e14
#define USB_PID_DIBCOM_STK7700P_PC 0x1e78
-#define USB_PID_DIBCOM_STK7700D 0x1ef0
#define USB_PID_DIBCOM_STK7700_U7000 0x7001
-#define USB_PID_DIBCOM_STK7070P 0x1ebc
-#define USB_PID_DIBCOM_STK7070PD 0x1ebe
-#define USB_PID_DIBCOM_STK807XP 0x1f90
+#define USB_PID_DIBCOM_STK7770P 0x1e80
+#define USB_PID_DIBCOM_STK807XP 0x1f90
#define USB_PID_DIBCOM_STK807XPVR 0x1f98
-#define USB_PID_DIBCOM_STK8096GP 0x1fa0
-#define USB_PID_DIBCOM_STK8096PVR 0x1faa
-#define USB_PID_DIBCOM_NIM8096MD 0x1fa8
-#define USB_PID_DIBCOM_TFE8096P 0x1f9C
-#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
-#define USB_PID_DIBCOM_STK7770P 0x1e80
-#define USB_PID_DIBCOM_NIM7090 0x1bb2
+#define USB_PID_DIBCOM_STK8096GP 0x1fa0
+#define USB_PID_DIBCOM_STK8096PVR 0x1faa
#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4
-#define USB_PID_DIBCOM_TFE7790P 0x1e6e
-#define USB_PID_DIBCOM_NIM9090M 0x2383
-#define USB_PID_DIBCOM_NIM9090MD 0x2384
+#define USB_PID_DIBCOM_TFE7790P 0x1e6e
+#define USB_PID_DIBCOM_TFE8096P 0x1f9C
+#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54
+#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55
#define USB_PID_DPOSH_M9206_COLD 0x9206
#define USB_PID_DPOSH_M9206_WARM 0xa090
+#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50
+#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51
+#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58
+#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59
+#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78
+#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98
+#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70
+#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71
+#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
+#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501
+#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
+#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00
+#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01
+#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10
+#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11
#define USB_PID_E3C_EC168 0x1689
#define USB_PID_E3C_EC168_2 0xfffa
#define USB_PID_E3C_EC168_3 0xfffb
#define USB_PID_E3C_EC168_4 0x1001
#define USB_PID_E3C_EC168_5 0x1002
+#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
+#define USB_PID_ELGATO_EYETV_DTT 0x0021
+#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
+#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
+#define USB_PID_ELGATO_EYETV_SAT 0x002a
+#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025
+#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036
+#define USB_PID_EMPIA_DIGIVOX_MINI_SL_COLD 0xe360
+#define USB_PID_EMPIA_DIGIVOX_MINI_SL_WARM 0xe361
+#define USB_PID_EMPIA_VSTREAM_COLD 0x17de
+#define USB_PID_EMPIA_VSTREAM_WARM 0x17df
+#define USB_PID_EVOLUTEPC_TVWAY_PLUS 0x0002
+#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
#define USB_PID_FREECOM_DVBT 0x0160
#define USB_PID_FREECOM_DVBT_2 0x0161
-#define USB_PID_UNIWILL_STK7700P 0x6003
+#define USB_PID_FRIIO_WHITE 0x0001
+#define USB_PID_GENIATECH_SU3000 0x3000
+#define USB_PID_GENIATECH_T220 0xd220
+#define USB_PID_GENIATECH_X3M_SPC1400HD 0x3100
#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012
+#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
+#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201
+#define USB_PID_GENPIX_8PSK_REV_2 0x0202
+#define USB_PID_GENPIX_SKYWALKER_1 0x0203
+#define USB_PID_GENPIX_SKYWALKER_2 0x0206
+#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204
+#define USB_PID_GIGABYTE_U7000 0x7001
+#define USB_PID_GIGABYTE_U8000 0x7002
+#define USB_PID_GOTVIEW_SAT_HD 0x5456
+#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6
+#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7
#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
-#define USB_PID_GOTVIEW_SAT_HD 0x5456
+#define USB_PID_GRANDTEC_MOD3000_COLD 0x0bb8
+#define USB_PID_GRANDTEC_MOD3000_WARM 0x0bb9
+#define USB_PID_HAMA_DVBT_HYBRID 0x2758
+#define USB_PID_HANFTEK_UMT_010_COLD 0x0001
+#define USB_PID_HANFTEK_UMT_010_WARM 0x0015
+#define USB_PID_HAUPPAUGE_MAX_S2 0xd900
+#define USB_PID_HAUPPAUGE_MYTV_T 0x7080
+#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580
+#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200
+#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941
+#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950
+#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400
+#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050
+#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060
+#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070
+#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200
+#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210
+#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_COLD 0x9300
+#define USB_PID_HAUPPAUGE_WINTV_NOVA_T_USB2_WARM 0x9301
+#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_COLD 0x5000
+#define USB_PID_HUMAX_DVB_T_STICK_HIGH_SPEED_WARM 0x5001
#define USB_PID_INTEL_CE9500 0x9500
#define USB_PID_ITETECH_IT9135 0x9135
#define USB_PID_ITETECH_IT9135_9005 0x9005
#define USB_PID_ITETECH_IT9135_9006 0x9006
#define USB_PID_ITETECH_IT9303 0x9306
-#define USB_PID_KWORLD_399U 0xe399
-#define USB_PID_KWORLD_399U_2 0xe400
#define USB_PID_KWORLD_395U 0xe396
#define USB_PID_KWORLD_395U_2 0xe39b
#define USB_PID_KWORLD_395U_3 0xe395
#define USB_PID_KWORLD_395U_4 0xe39a
+#define USB_PID_KWORLD_399U 0xe399
+#define USB_PID_KWORLD_399U_2 0xe400
#define USB_PID_KWORLD_MC810 0xc810
-#define USB_PID_KWORLD_PC160_2T 0xc160
+#define USB_PID_KWORLD_PC160_2T 0xc160
#define USB_PID_KWORLD_PC160_T 0xc161
#define USB_PID_KWORLD_UB383_T 0xe383
#define USB_PID_KWORLD_UB499_2T_T09 0xe409
#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
-#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
+#define USB_PID_KYE_DVB_T_COLD 0x701e
+#define USB_PID_KYE_DVB_T_WARM 0x701f
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_COLD 0x6025
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_H 0x60f6
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P 0x6f00
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
+#define USB_PID_LEADTEK_WINFAST_DTV_DONGLE_WARM 0x6026
+#define USB_PID_LITEON_DVB_T_COLD 0xf000
+#define USB_PID_LITEON_DVB_T_WARM 0xf001
+#define USB_PID_MEDION_CREATIX_CTX1921 0x1921
+#define USB_PID_MEDION_MD95700 0x0932
+#define USB_PID_MICROSOFT_XBOX_ONE_TUNER 0x02d5
+#define USB_PID_MIGLIA_WT220U_ZAP250_COLD 0x0220
+#define USB_PID_MSI_DIGIVOX_DUO 0x8801
+#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
+#define USB_PID_MSI_MEGASKY580 0x5580
+#define USB_PID_MSI_MEGASKY580_55801 0x5581
+#define USB_PID_MYGICA_D689 0xd811
+#define USB_PID_MYGICA_T230 0xc688
+#define USB_PID_MYGICA_T230A 0x689a
+#define USB_PID_MYGICA_T230C 0xc689
+#define USB_PID_MYGICA_T230C2 0xc68a
+#define USB_PID_MYGICA_T230C2_LITE 0xc69a
+#define USB_PID_MYGICA_T230C_LITE 0xc699
+#define USB_PID_NOXON_DAB_STICK 0x00b3
+#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
+#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4
+#define USB_PID_OPERA1_WARM 0x3829
+#define USB_PID_PCTV_2002E 0x025c
+#define USB_PID_PCTV_2002E_SE 0x025d
+#define USB_PID_PCTV_200E 0x020e
+#define USB_PID_PCTV_78E 0x025a
+#define USB_PID_PCTV_79E 0x0262
+#define USB_PID_PCTV_DIBCOM_STK8096PVR 0x1faa
+#define USB_PID_PCTV_PINNACLE_PCTV282E 0x0248
+#define USB_PID_PCTV_PINNACLE_PCTV73ESE 0x0245
+#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
+#define USB_PID_PINNACLE_PCTV2000E 0x022c
+#define USB_PID_PINNACLE_PCTV282E 0x0248
+#define USB_PID_PINNACLE_PCTV340E 0x023d
+#define USB_PID_PINNACLE_PCTV340E_SE 0x023e
+#define USB_PID_PINNACLE_PCTV71E 0x022b
+#define USB_PID_PINNACLE_PCTV72E 0x0236
+#define USB_PID_PINNACLE_PCTV73A 0x0243
+#define USB_PID_PINNACLE_PCTV73E 0x0237
+#define USB_PID_PINNACLE_PCTV73ESE 0x0245
+#define USB_PID_PINNACLE_PCTV74E 0x0246
+#define USB_PID_PINNACLE_PCTV801E 0x023a
+#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
+#define USB_PID_PINNACLE_PCTV_400E 0x020f
+#define USB_PID_PINNACLE_PCTV_450E 0x0222
+#define USB_PID_PINNACLE_PCTV_452E 0x021f
+#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229
+#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
+#define USB_PID_PIXELVIEW_SBTVD 0x5010
#define USB_PID_PROF_1100 0xb012
-#define USB_PID_TERRATEC_CINERGY_S 0x0064
-#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
-#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
-#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
-#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
-#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
-#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9
-#define USB_PID_TERRATEC_CINERGY_TC2_STICK 0x10b2
-#define USB_PID_TWINHAN_VP7041_COLD 0x3201
-#define USB_PID_TWINHAN_VP7041_WARM 0x3202
-#define USB_PID_TWINHAN_VP7020_COLD 0x3203
-#define USB_PID_TWINHAN_VP7020_WARM 0x3204
-#define USB_PID_TWINHAN_VP7045_COLD 0x3205
-#define USB_PID_TWINHAN_VP7045_WARM 0x3206
-#define USB_PID_TWINHAN_VP7021_COLD 0x3207
-#define USB_PID_TWINHAN_VP7021_WARM 0x3208
-#define USB_PID_TWINHAN_VP7049 0x3219
-#define USB_PID_TINYTWIN 0x3226
-#define USB_PID_TINYTWIN_2 0xe402
-#define USB_PID_TINYTWIN_3 0x9016
-#define USB_PID_DNTV_TINYUSB2_COLD 0x3223
-#define USB_PID_DNTV_TINYUSB2_WARM 0x3224
-#define USB_PID_ULTIMA_TVBOX_COLD 0x8105
-#define USB_PID_ULTIMA_TVBOX_WARM 0x8106
-#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107
-#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108
-#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235
-#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109
-#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a
-#define USB_PID_ARTEC_T14_COLD 0x810b
-#define USB_PID_ARTEC_T14_WARM 0x810c
-#define USB_PID_ARTEC_T14BR 0x810f
-#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613
-#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002
-#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e
-#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f
-#define USB_PID_HANFTEK_UMT_010_COLD 0x0001
-#define USB_PID_HANFTEK_UMT_010_WARM 0x0015
-#define USB_PID_DTT200U_COLD 0x0201
-#define USB_PID_DTT200U_WARM 0x0301
-#define USB_PID_WT220U_ZAP250_COLD 0x0220
-#define USB_PID_WT220U_COLD 0x0222
-#define USB_PID_WT220U_WARM 0x0221
-#define USB_PID_WT220U_FC_COLD 0x0225
-#define USB_PID_WT220U_FC_WARM 0x0226
-#define USB_PID_WT220U_ZL0353_COLD 0x022a
-#define USB_PID_WT220U_ZL0353_WARM 0x022b
-#define USB_PID_WINTV_NOVA_T_USB2_COLD 0x9300
-#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301
-#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941
-#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950
-#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400
-#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050
-#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060
-#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070
-#define USB_PID_HAUPPAUGE_MYTV_T 0x7080
-#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580
-#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200
-#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200
-#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210
-#define USB_PID_AVERMEDIA_EXPRESS 0xb568
-#define USB_PID_AVERMEDIA_VOLAR 0xa807
-#define USB_PID_AVERMEDIA_VOLAR_2 0xb808
-#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868
-#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228
-#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039
-#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039
-#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039
-#define USB_PID_AVERMEDIA_VOLAR_X 0xa815
-#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150
-#define USB_PID_AVERMEDIA_A309 0xa309
-#define USB_PID_AVERMEDIA_A310 0xa310
-#define USB_PID_AVERMEDIA_A850 0x850a
-#define USB_PID_AVERMEDIA_A850T 0x850b
-#define USB_PID_AVERMEDIA_A805 0xa805
-#define USB_PID_AVERMEDIA_A815M 0x815a
-#define USB_PID_AVERMEDIA_A835 0xa835
-#define USB_PID_AVERMEDIA_B835 0xb835
-#define USB_PID_AVERMEDIA_A835B_1835 0x1835
-#define USB_PID_AVERMEDIA_A835B_2835 0x2835
-#define USB_PID_AVERMEDIA_A835B_3835 0x3835
-#define USB_PID_AVERMEDIA_A835B_4835 0x4835
-#define USB_PID_AVERMEDIA_1867 0x1867
-#define USB_PID_AVERMEDIA_A867 0xa867
-#define USB_PID_AVERMEDIA_H335 0x0335
-#define USB_PID_AVERMEDIA_TD110 0xa110
-#define USB_PID_AVERMEDIA_TD310 0x1871
-#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
-#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
-#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
-#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
-#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011
-#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
+#define USB_PID_PROF_7500 0x7500
+#define USB_PID_PROLECTRIX_DV107669 0xd803
+#define USB_PID_REALTEK_RTL2831U 0x2831
+#define USB_PID_REALTEK_RTL2832U 0x2832
+#define USB_PID_SIGMATEK_DVB_110 0x6610
+#define USB_PID_SONY_PLAYTV 0x0003
+#define USB_PID_SVEON_STV20 0xe39d
+#define USB_PID_SVEON_STV20_RTL2832U 0xd39d
+#define USB_PID_SVEON_STV21 0xd3b0
+#define USB_PID_SVEON_STV22 0xe401
+#define USB_PID_SVEON_STV22_IT9137 0xe411
+#define USB_PID_SVEON_STV27 0xd3af
+#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004
+#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003
+#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
+#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
+#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
+#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015
-#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
+#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
+#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
+#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
+#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
+#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a
+#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011
#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017
+#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
+#define USB_PID_TELESTAR_STARSTICK_2 0x8000
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
-#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058
#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060
-#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062
-#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078
-#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab
+#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058
+#define USB_PID_TERRATEC_CINERGY_S 0x0064
+#define USB_PID_TERRATEC_CINERGY_S2_1 0x1181
+#define USB_PID_TERRATEC_CINERGY_S2_2 0x1182
+#define USB_PID_TERRATEC_CINERGY_S2_BOX 0x0105
#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8
#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0
#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102
#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105
-#define USB_PID_TERRATEC_CINERGY_S2_1 0x1181
-#define USB_PID_TERRATEC_CINERGY_S2_2 0x1182
+#define USB_PID_TERRATEC_CINERGY_T2 0x0038
+#define USB_PID_TERRATEC_CINERGY_TC2_STICK 0x10b2
+#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062
+#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
+#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9
+#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
+#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
+#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
+#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
+#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078
+#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab
+#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
+#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
#define USB_PID_TERRATEC_H7 0x10b4
#define USB_PID_TERRATEC_H7_2 0x10a3
#define USB_PID_TERRATEC_H7_3 0x10a5
#define USB_PID_TERRATEC_T1 0x10ae
#define USB_PID_TERRATEC_T3 0x10a0
#define USB_PID_TERRATEC_T5 0x10a1
-#define USB_PID_NOXON_DAB_STICK 0x00b3
-#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
-#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4
-#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
-#define USB_PID_PINNACLE_PCTV2000E 0x022c
-#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
-#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229
-#define USB_PID_PINNACLE_PCTV71E 0x022b
-#define USB_PID_PINNACLE_PCTV72E 0x0236
-#define USB_PID_PINNACLE_PCTV73E 0x0237
-#define USB_PID_PINNACLE_PCTV310E 0x3211
-#define USB_PID_PINNACLE_PCTV801E 0x023a
-#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
-#define USB_PID_PINNACLE_PCTV340E 0x023d
-#define USB_PID_PINNACLE_PCTV340E_SE 0x023e
-#define USB_PID_PINNACLE_PCTV73A 0x0243
-#define USB_PID_PINNACLE_PCTV73ESE 0x0245
-#define USB_PID_PINNACLE_PCTV74E 0x0246
-#define USB_PID_PINNACLE_PCTV282E 0x0248
-#define USB_PID_PIXELVIEW_SBTVD 0x5010
-#define USB_PID_PCTV_200E 0x020e
-#define USB_PID_PCTV_400E 0x020f
-#define USB_PID_PCTV_450E 0x0222
-#define USB_PID_PCTV_452E 0x021f
-#define USB_PID_PCTV_78E 0x025a
-#define USB_PID_PCTV_79E 0x0262
-#define USB_PID_REALTEK_RTL2831U 0x2831
-#define USB_PID_REALTEK_RTL2832U 0x2832
-#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
-#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a
-#define USB_PID_NEBULA_DIGITV 0x0201
-#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
-#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
-#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501
-#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00
-#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01
-#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10
-#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11
-#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50
-#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51
-#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58
-#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59
-#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78
-#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98
-#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70
-#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71
-#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54
-#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55
-#define USB_PID_MEDION_MD95700 0x0932
-#define USB_PID_MSI_MEGASKY580 0x5580
-#define USB_PID_MSI_MEGASKY580_55801 0x5581
-#define USB_PID_KYE_DVB_T_COLD 0x701e
-#define USB_PID_KYE_DVB_T_WARM 0x701f
-#define USB_PID_LITEON_DVB_T_COLD 0xf000
-#define USB_PID_LITEON_DVB_T_WARM 0xf001
-#define USB_PID_DIGIVOX_MINI_SL_COLD 0xe360
-#define USB_PID_DIGIVOX_MINI_SL_WARM 0xe361
-#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6
-#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7
+#define USB_PID_TEVII_S421 0xd421
+#define USB_PID_TEVII_S480_1 0xd481
+#define USB_PID_TEVII_S480_2 0xd482
+#define USB_PID_TEVII_S482_1 0xd483
+#define USB_PID_TEVII_S482_2 0xd484
+#define USB_PID_TEVII_S630 0xd630
+#define USB_PID_TEVII_S632 0xd632
+#define USB_PID_TEVII_S650 0xd650
+#define USB_PID_TEVII_S660 0xd660
+#define USB_PID_TEVII_S662 0xd662
+#define USB_PID_TINYTWIN 0x3226
+#define USB_PID_TINYTWIN_2 0xe402
+#define USB_PID_TINYTWIN_3 0x9016
+#define USB_PID_TREKSTOR_DVBT 0x901b
+#define USB_PID_TREKSTOR_TERRES_2_0 0xC803
+#define USB_PID_TURBOX_DTT_2000 0xd3a4
+#define USB_PID_TWINHAN_VP7021_WARM 0x3208
+#define USB_PID_TWINHAN_VP7041_COLD 0x3201
+#define USB_PID_TWINHAN_VP7041_WARM 0x3202
+#define USB_PID_ULTIMA_ARTEC_T14BR 0x810f
+#define USB_PID_ULTIMA_ARTEC_T14_COLD 0x810b
+#define USB_PID_ULTIMA_ARTEC_T14_WARM 0x810c
+#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107
+#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108
+#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235
+#define USB_PID_ULTIMA_TVBOX_COLD 0x8105
+#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109
+#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613
+#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002
+#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a
+#define USB_PID_ULTIMA_TVBOX_WARM 0x8106
+#define USB_PID_UNIWILL_STK7700P 0x6003
+#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e
+#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f
+#define USB_PID_VISIONPLUS_PINNACLE_PCTV310E 0x3211
+#define USB_PID_VISIONPLUS_TINYUSB2_COLD 0x3223
+#define USB_PID_VISIONPLUS_TINYUSB2_WARM 0x3224
+#define USB_PID_VISIONPLUS_VP7020_COLD 0x3203
+#define USB_PID_VISIONPLUS_VP7020_WARM 0x3204
+#define USB_PID_VISIONPLUS_VP7021_COLD 0x3207
+#define USB_PID_VISIONPLUS_VP7041_COLD 0x3201
+#define USB_PID_VISIONPLUS_VP7041_WARM 0x3202
+#define USB_PID_VISIONPLUS_VP7045_COLD 0x3205
+#define USB_PID_VISIONPLUS_VP7045_WARM 0x3206
+#define USB_PID_WIDEVIEW_DTT200U_COLD 0x0201
+#define USB_PID_WIDEVIEW_DTT200U_WARM 0x0301
+#define USB_PID_WIDEVIEW_DVBT_USB_COLD 0x0001
+#define USB_PID_WIDEVIEW_DVBT_USB_WARM 0x0002
+#define USB_PID_WIDEVIEW_WT220U_COLD 0x0222
+#define USB_PID_WIDEVIEW_WT220U_FC_COLD 0x0225
+#define USB_PID_WIDEVIEW_WT220U_FC_WARM 0x0226
+#define USB_PID_WIDEVIEW_WT220U_WARM 0x0221
+#define USB_PID_WIDEVIEW_WT220U_ZAP250_COLD 0x0220
+#define USB_PID_WIDEVIEW_WT220U_ZL0353_COLD 0x022a
+#define USB_PID_WIDEVIEW_WT220U_ZL0353_WARM 0x022b
#define USB_PID_WINFAST_DTV2000DS 0x6a04
#define USB_PID_WINFAST_DTV2000DS_PLUS 0x6f12
-#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025
-#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026
-#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00
-#define USB_PID_WINFAST_DTV_DONGLE_H 0x60f6
-#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
-#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
+#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f
-#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
-#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201
-#define USB_PID_GENPIX_8PSK_REV_2 0x0202
-#define USB_PID_GENPIX_SKYWALKER_1 0x0203
-#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204
-#define USB_PID_GENPIX_SKYWALKER_2 0x0206
-#define USB_PID_SIGMATEK_DVB_110 0x6610
-#define USB_PID_MSI_DIGI_VOX_MINI_II 0x1513
-#define USB_PID_MSI_DIGIVOX_DUO 0x8801
-#define USB_PID_OPERA1_COLD 0x2830
-#define USB_PID_OPERA1_WARM 0x3829
-#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514
-#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513
-#define USB_PID_GIGABYTE_U7000 0x7001
-#define USB_PID_GIGABYTE_U8000 0x7002
-#define USB_PID_ASUS_U3000 0x171f
-#define USB_PID_ASUS_U3000H 0x1736
-#define USB_PID_ASUS_U3100 0x173f
-#define USB_PID_ASUS_U3100MINI_PLUS 0x1779
+#define USB_PID_WINTV_SOLOHD 0x0264
+#define USB_PID_WINTV_SOLOHD_2 0x8268
+#define USB_PID_XTENSIONS_XD_380 0x0381
#define USB_PID_YUAN_EC372S 0x1edc
-#define USB_PID_YUAN_STK7700PH 0x1f08
-#define USB_PID_YUAN_PD378S 0x2edc
#define USB_PID_YUAN_MC770 0x0871
+#define USB_PID_YUAN_PD378S 0x2edc
#define USB_PID_YUAN_STK7700D 0x1efc
-#define USB_PID_YUAN_STK7700D_2 0x1e8c
-#define USB_PID_DW2102 0x2102
-#define USB_PID_DW2104 0x2104
-#define USB_PID_DW3101 0x3101
-#define USB_PID_XTENSIONS_XD_380 0x0381
-#define USB_PID_TELESTAR_STARSTICK_2 0x8000
-#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
-#define USB_PID_SONY_PLAYTV 0x0003
-#define USB_PID_MYGICA_D689 0xd811
-#define USB_PID_MYGICA_T230 0xc688
-#define USB_PID_MYGICA_T230C 0xc689
-#define USB_PID_MYGICA_T230C2 0xc68a
-#define USB_PID_MYGICA_T230C_LITE 0xc699
-#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
-#define USB_PID_ELGATO_EYETV_DTT 0x0021
-#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
-#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
-#define USB_PID_ELGATO_EYETV_SAT 0x002a
-#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025
-#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036
-#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
-#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_WARM 0x5001
-#define USB_PID_FRIIO_WHITE 0x0001
-#define USB_PID_TVWAY_PLUS 0x0002
-#define USB_PID_SVEON_STV20 0xe39d
-#define USB_PID_SVEON_STV20_RTL2832U 0xd39d
-#define USB_PID_SVEON_STV21 0xd3b0
-#define USB_PID_SVEON_STV22 0xe401
-#define USB_PID_SVEON_STV22_IT9137 0xe411
-#define USB_PID_AZUREWAVE_AZ6027 0x3275
-#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
-#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
-#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
-#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
-#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003
-#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004
-#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
-#define USB_PID_CPYTO_REDI_PC50A 0xa803
-#define USB_PID_CTVDIGDUAL_V2 0xe410
-#define USB_PID_PCTV_2002E 0x025c
-#define USB_PID_PCTV_2002E_SE 0x025d
-#define USB_PID_SVEON_STV27 0xd3af
-#define USB_PID_TURBOX_DTT_2000 0xd3a4
-#define USB_PID_WINTV_SOLOHD 0x0264
-#define USB_PID_WINTV_SOLOHD_2 0x8268
-#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
-#define USB_PID_HAMA_DVBT_HYBRID 0x2758
-#define USB_PID_XBOX_ONE_TUNER 0x02d5
-#define USB_PID_PROLECTRIX_DV107669 0xd803
+#define USB_PID_YUAN_STK7700D_2 0x1e8c
+#define USB_PID_YUAN_STK7700PH 0x1f08
+
#endif
diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
index 0d76fa4551b3..e7c44870f20d 100644
--- a/include/media/dvb_frontend.h
+++ b/include/media/dvb_frontend.h
@@ -364,6 +364,10 @@ struct dvb_frontend_internal_info {
* allocated by the driver.
* @init: callback function used to initialize the tuner device.
* @sleep: callback function used to put the tuner to sleep.
+ * @suspend: callback function used to inform that the Kernel will
+ * suspend.
+ * @resume: callback function used to inform that the Kernel is
+ * resuming from suspend.
* @write: callback function used by some demod legacy drivers to
* allow other drivers to write data into their registers.
* Should not be used on new drivers.
@@ -443,6 +447,8 @@ struct dvb_frontend_ops {
int (*init)(struct dvb_frontend* fe);
int (*sleep)(struct dvb_frontend* fe);
+ int (*suspend)(struct dvb_frontend *fe);
+ int (*resume)(struct dvb_frontend *fe);
int (*write)(struct dvb_frontend* fe, const u8 buf[], int len);
@@ -755,7 +761,8 @@ void dvb_frontend_detach(struct dvb_frontend *fe);
* &dvb_frontend_ops.tuner_ops.suspend\(\) is available, it calls it. Otherwise,
* it will call &dvb_frontend_ops.tuner_ops.sleep\(\), if available.
*
- * It will also call &dvb_frontend_ops.sleep\(\) to put the demod to suspend.
+ * It will also call &dvb_frontend_ops.suspend\(\) to put the demod to suspend,
+ * if available. Otherwise it will call &dvb_frontend_ops.sleep\(\).
*
* The drivers should also call dvb_frontend_suspend\(\) as part of their
* handler for the &device_driver.suspend\(\).
@@ -769,7 +776,9 @@ int dvb_frontend_suspend(struct dvb_frontend *fe);
*
* This function resumes the usual operation of the tuner after resume.
*
- * In order to resume the frontend, it calls the demod &dvb_frontend_ops.init\(\).
+ * In order to resume the frontend, it calls the demod
+ * &dvb_frontend_ops.resume\(\) if available. Otherwise it calls demod
+ * &dvb_frontend_ops.init\(\).
*
* If &dvb_frontend_ops.tuner_ops.resume\(\) is available, It, it calls it.
* Otherwise,t will call &dvb_frontend_ops.tuner_ops.init\(\), if available.
diff --git a/include/media/dvb_net.h b/include/media/dvb_net.h
index 5e31d37f25fa..4a921ea96091 100644
--- a/include/media/dvb_net.h
+++ b/include/media/dvb_net.h
@@ -19,13 +19,11 @@
#define _DVB_NET_H_
#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
#include <media/dvbdev.h>
+struct net_device;
+
#define DVB_NET_DEVICES_MAX 10
#ifdef CONFIG_DVB_NET
@@ -41,6 +39,9 @@
* @exit: flag to indicate when the device is being removed.
* @demux: pointer to &struct dmx_demux.
* @ioctl_mutex: protect access to this struct.
+ * @remove_mutex: mutex that avoids a race condition between a callback
+ * called when the hardware is disconnected and the
+ * file_operations of dvb_net.
*
* Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
* devices.
@@ -53,6 +54,7 @@ struct dvb_net {
unsigned int exit:1;
struct dmx_demux *demux;
struct mutex ioctl_mutex;
+ struct mutex remove_mutex;
};
/**
diff --git a/include/media/dvb_ringbuffer.h b/include/media/dvb_ringbuffer.h
index 8ed6bcc3a56e..029c8b615e49 100644
--- a/include/media/dvb_ringbuffer.h
+++ b/include/media/dvb_ringbuffer.h
@@ -214,7 +214,7 @@ extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
* @buf: Buffer to write.
* @len: Length of buffer (currently limited to 65535 bytes max).
*
- * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
+ * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EINVAL.
*/
extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf,
size_t len);
diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
index e547cbeee431..e5a00d126612 100644
--- a/include/media/dvbdev.h
+++ b/include/media/dvbdev.h
@@ -87,7 +87,11 @@ struct dvb_frontend;
* @device: pointer to struct device
* @module: pointer to struct module
* @mfe_shared: indicates mutually exclusive frontends.
- * Use of this flag is currently deprecated.
+ * 1 = legacy exclusion behavior: blocking any open() call
+ * 2 = enhanced exclusion behavior, emulating the standard
+ * behavior of busy frontends: allowing read-only sharing
+ * and otherwise returning immediately with -EBUSY when any
+ * of the frontends is already opened with write access.
* @mfe_dvbdev: Frontend device in use, in the case of MFE
* @mfe_lock: Lock to prevent using the other frontends when MFE is
* used.
@@ -126,6 +130,7 @@ struct dvb_adapter {
* struct dvb_device - represents a DVB device node
*
* @list_head: List head with all DVB devices
+ * @ref: reference count for this device
* @fops: pointer to struct file_operations
* @adapter: pointer to the adapter that holds this device node
* @type: type of the device, as defined by &enum dvb_device_type.
@@ -156,6 +161,7 @@ struct dvb_adapter {
*/
struct dvb_device {
struct list_head list_head;
+ struct kref ref;
const struct file_operations *fops;
struct dvb_adapter *adapter;
enum dvb_device_type type;
@@ -188,6 +194,35 @@ struct dvb_device {
};
/**
+ * struct dvbdevfops_node - fops nodes registered in dvbdevfops_list
+ *
+ * @fops: Dynamically allocated fops for ->owner registration
+ * @type: type of dvb_device
+ * @template: dvb_device used for registration
+ * @list_head: list_head for dvbdevfops_list
+ */
+struct dvbdevfops_node {
+ struct file_operations *fops;
+ enum dvb_device_type type;
+ const struct dvb_device *template;
+ struct list_head list_head;
+};
+
+/**
+ * dvb_device_get - Increase dvb_device reference
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+struct dvb_device *dvb_device_get(struct dvb_device *dvbdev);
+
+/**
+ * dvb_device_put - Decrease dvb_device reference
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_device_put(struct dvb_device *dvbdev);
+
+/**
* dvb_register_adapter - Registers a new DVB adapter
*
* @adap: pointer to struct dvb_adapter
@@ -231,29 +266,17 @@ int dvb_register_device(struct dvb_adapter *adap,
/**
* dvb_remove_device - Remove a registered DVB device
*
- * This does not free memory. To do that, call dvb_free_device().
- *
* @dvbdev: pointer to struct dvb_device
+ *
+ * This does not free memory. dvb_free_device() will do that when
+ * reference counter is empty
*/
void dvb_remove_device(struct dvb_device *dvbdev);
-/**
- * dvb_free_device - Free memory occupied by a DVB device.
- *
- * Call dvb_unregister_device() before calling this function.
- *
- * @dvbdev: pointer to struct dvb_device
- */
-void dvb_free_device(struct dvb_device *dvbdev);
/**
* dvb_unregister_device - Unregisters a DVB device
*
- * This is a combination of dvb_remove_device() and dvb_free_device().
- * Using this function is usually a mistake, and is often an indicator
- * for a use-after-free bug (when a userspace process keeps a file
- * handle to a detached device).
- *
* @dvbdev: pointer to struct dvb_device
*/
void dvb_unregister_device(struct dvb_device *dvbdev);
@@ -321,7 +344,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
int dvb_generic_open(struct inode *inode, struct file *file);
/**
- * dvb_generic_close - Digital TV close function, used by DVB devices
+ * dvb_generic_release - Digital TV close function, used by DVB devices
*
* @inode: pointer to &struct inode.
* @file: pointer to &struct file.
@@ -421,7 +444,7 @@ void dvb_module_release(struct i2c_client *client);
* dvb_attach - attaches a DVB frontend into the DVB core.
*
* @FUNCTION: function on a frontend module to be called.
- * @ARGS...: @FUNCTION arguments.
+ * @ARGS: @FUNCTION arguments.
*
* This ancillary function loads a frontend module in runtime and runs
* the @FUNCTION function there, with @ARGS.
diff --git a/include/media/frame_vector.h b/include/media/frame_vector.h
new file mode 100644
index 000000000000..541c71a2c7be
--- /dev/null
+++ b/include/media/frame_vector.h
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _MEDIA_FRAME_VECTOR_H
+#define _MEDIA_FRAME_VECTOR_H
+
+/* Container for pinned pfns / pages in frame_vector.c */
+struct frame_vector {
+ unsigned int nr_allocated; /* Number of frames we have space for */
+ unsigned int nr_frames; /* Number of frames stored in ptrs array */
+ bool got_ref; /* Did we pin pages by getting page ref? */
+ bool is_pfns; /* Does array contain pages or pfns? */
+ void *ptrs[]; /* Array of pinned pfns / pages. Use
+ * pfns_vector_pages() or pfns_vector_pfns()
+ * for access */
+};
+
+struct frame_vector *frame_vector_create(unsigned int nr_frames);
+void frame_vector_destroy(struct frame_vector *vec);
+int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
+ bool write, struct frame_vector *vec);
+void put_vaddr_frames(struct frame_vector *vec);
+int frame_vector_to_pages(struct frame_vector *vec);
+void frame_vector_to_pfns(struct frame_vector *vec);
+
+static inline unsigned int frame_vector_count(struct frame_vector *vec)
+{
+ return vec->nr_frames;
+}
+
+static inline struct page **frame_vector_pages(struct frame_vector *vec)
+{
+ if (vec->is_pfns) {
+ int err = frame_vector_to_pages(vec);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+ return (struct page **)(vec->ptrs);
+}
+
+static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
+{
+ if (!vec->is_pfns)
+ frame_vector_to_pfns(vec);
+ return (unsigned long *)(vec->ptrs);
+}
+
+#endif /* _MEDIA_FRAME_VECTOR_H */
diff --git a/include/media/fwht-ctrls.h b/include/media/fwht-ctrls.h
deleted file mode 100644
index 615027410e47..000000000000
--- a/include/media/fwht-ctrls.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the FWHT state controls for use with stateless FWHT
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _FWHT_CTRLS_H_
-#define _FWHT_CTRLS_H_
-
-#define V4L2_CTRL_TYPE_FWHT_PARAMS 0x0105
-
-#define V4L2_CID_MPEG_VIDEO_FWHT_PARAMS (V4L2_CID_MPEG_BASE + 292)
-
-struct v4l2_ctrl_fwht_params {
- __u64 backward_ref_ts;
- __u32 version;
- __u32 width;
- __u32 height;
- __u32 flags;
- __u32 colorspace;
- __u32 xfer_func;
- __u32 ycbcr_enc;
- __u32 quantization;
-};
-
-
-#endif
diff --git a/include/media/h264-ctrls.h b/include/media/h264-ctrls.h
deleted file mode 100644
index 080fd1293c42..000000000000
--- a/include/media/h264-ctrls.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the H.264 state controls for use with stateless H.264
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _H264_CTRLS_H_
-#define _H264_CTRLS_H_
-
-#include <linux/videodev2.h>
-
-/*
- * Maximum DPB size, as specified by section 'A.3.1 Level limits
- * common to the Baseline, Main, and Extended profiles'.
- */
-#define V4L2_H264_NUM_DPB_ENTRIES 16
-
-/* Our pixel format isn't stable at the moment */
-#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
-
-/*
- * This is put insanely high to avoid conflicting with controls that
- * would be added during the phase where those controls are not
- * stable. It should be fixed eventually.
- */
-#define V4L2_CID_MPEG_VIDEO_H264_SPS (V4L2_CID_MPEG_BASE+1000)
-#define V4L2_CID_MPEG_VIDEO_H264_PPS (V4L2_CID_MPEG_BASE+1001)
-#define V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX (V4L2_CID_MPEG_BASE+1002)
-#define V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS (V4L2_CID_MPEG_BASE+1003)
-#define V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS (V4L2_CID_MPEG_BASE+1004)
-#define V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE (V4L2_CID_MPEG_BASE+1005)
-#define V4L2_CID_MPEG_VIDEO_H264_START_CODE (V4L2_CID_MPEG_BASE+1006)
-
-/* enum v4l2_ctrl_type type values */
-#define V4L2_CTRL_TYPE_H264_SPS 0x0110
-#define V4L2_CTRL_TYPE_H264_PPS 0x0111
-#define V4L2_CTRL_TYPE_H264_SCALING_MATRIX 0x0112
-#define V4L2_CTRL_TYPE_H264_SLICE_PARAMS 0x0113
-#define V4L2_CTRL_TYPE_H264_DECODE_PARAMS 0x0114
-
-enum v4l2_mpeg_video_h264_decode_mode {
- V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED,
- V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
-};
-
-enum v4l2_mpeg_video_h264_start_code {
- V4L2_MPEG_VIDEO_H264_START_CODE_NONE,
- V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
-};
-
-#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01
-#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02
-#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04
-#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08
-#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10
-#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20
-
-#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01
-#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02
-#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04
-#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08
-#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10
-#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20
-#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40
-
-struct v4l2_ctrl_h264_sps {
- __u8 profile_idc;
- __u8 constraint_set_flags;
- __u8 level_idc;
- __u8 seq_parameter_set_id;
- __u8 chroma_format_idc;
- __u8 bit_depth_luma_minus8;
- __u8 bit_depth_chroma_minus8;
- __u8 log2_max_frame_num_minus4;
- __u8 pic_order_cnt_type;
- __u8 log2_max_pic_order_cnt_lsb_minus4;
- __u8 max_num_ref_frames;
- __u8 num_ref_frames_in_pic_order_cnt_cycle;
- __s32 offset_for_ref_frame[255];
- __s32 offset_for_non_ref_pic;
- __s32 offset_for_top_to_bottom_field;
- __u16 pic_width_in_mbs_minus1;
- __u16 pic_height_in_map_units_minus1;
- __u32 flags;
-};
-
-#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001
-#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002
-#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004
-#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008
-#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010
-#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020
-#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040
-#define V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT 0x0080
-
-struct v4l2_ctrl_h264_pps {
- __u8 pic_parameter_set_id;
- __u8 seq_parameter_set_id;
- __u8 num_slice_groups_minus1;
- __u8 num_ref_idx_l0_default_active_minus1;
- __u8 num_ref_idx_l1_default_active_minus1;
- __u8 weighted_bipred_idc;
- __s8 pic_init_qp_minus26;
- __s8 pic_init_qs_minus26;
- __s8 chroma_qp_index_offset;
- __s8 second_chroma_qp_index_offset;
- __u16 flags;
-};
-
-struct v4l2_ctrl_h264_scaling_matrix {
- __u8 scaling_list_4x4[6][16];
- __u8 scaling_list_8x8[6][64];
-};
-
-struct v4l2_h264_weight_factors {
- __s16 luma_weight[32];
- __s16 luma_offset[32];
- __s16 chroma_weight[32][2];
- __s16 chroma_offset[32][2];
-};
-
-struct v4l2_h264_pred_weight_table {
- __u16 luma_log2_weight_denom;
- __u16 chroma_log2_weight_denom;
- struct v4l2_h264_weight_factors weight_factors[2];
-};
-
-#define V4L2_H264_SLICE_TYPE_P 0
-#define V4L2_H264_SLICE_TYPE_B 1
-#define V4L2_H264_SLICE_TYPE_I 2
-#define V4L2_H264_SLICE_TYPE_SP 3
-#define V4L2_H264_SLICE_TYPE_SI 4
-
-#define V4L2_H264_SLICE_FLAG_FIELD_PIC 0x01
-#define V4L2_H264_SLICE_FLAG_BOTTOM_FIELD 0x02
-#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x04
-#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x08
-
-struct v4l2_ctrl_h264_slice_params {
- /* Size in bytes, including header */
- __u32 size;
-
- /* Offset in bytes to the start of slice in the OUTPUT buffer. */
- __u32 start_byte_offset;
-
- /* Offset in bits to slice_data() from the beginning of this slice. */
- __u32 header_bit_size;
-
- __u16 first_mb_in_slice;
- __u8 slice_type;
- __u8 pic_parameter_set_id;
- __u8 colour_plane_id;
- __u8 redundant_pic_cnt;
- __u16 frame_num;
- __u16 idr_pic_id;
- __u16 pic_order_cnt_lsb;
- __s32 delta_pic_order_cnt_bottom;
- __s32 delta_pic_order_cnt0;
- __s32 delta_pic_order_cnt1;
-
- struct v4l2_h264_pred_weight_table pred_weight_table;
- /* Size in bits of dec_ref_pic_marking() syntax element. */
- __u32 dec_ref_pic_marking_bit_size;
- /* Size in bits of pic order count syntax. */
- __u32 pic_order_cnt_bit_size;
-
- __u8 cabac_init_idc;
- __s8 slice_qp_delta;
- __s8 slice_qs_delta;
- __u8 disable_deblocking_filter_idc;
- __s8 slice_alpha_c0_offset_div2;
- __s8 slice_beta_offset_div2;
- __u8 num_ref_idx_l0_active_minus1;
- __u8 num_ref_idx_l1_active_minus1;
- __u32 slice_group_change_cycle;
-
- /*
- * Entries on each list are indices into
- * v4l2_ctrl_h264_decode_params.dpb[].
- */
- __u8 ref_pic_list0[32];
- __u8 ref_pic_list1[32];
-
- __u32 flags;
-};
-
-#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01
-#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02
-#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04
-#define V4L2_H264_DPB_ENTRY_FLAG_FIELD 0x08
-#define V4L2_H264_DPB_ENTRY_FLAG_BOTTOM_FIELD 0x10
-
-struct v4l2_h264_dpb_entry {
- __u64 reference_ts;
- __u16 frame_num;
- __u16 pic_num;
- /* Note that field is indicated by v4l2_buffer.field */
- __s32 top_field_order_cnt;
- __s32 bottom_field_order_cnt;
- __u32 flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */
-};
-
-#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01
-
-struct v4l2_ctrl_h264_decode_params {
- struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES];
- __u16 num_slices;
- __u16 nal_ref_idc;
- __s32 top_field_order_cnt;
- __s32 bottom_field_order_cnt;
- __u32 flags; /* V4L2_H264_DECODE_PARAM_FLAG_* */
-};
-
-#endif
diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
deleted file mode 100644
index 1009cf0891cc..000000000000
--- a/include/media/hevc-ctrls.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the HEVC state controls for use with stateless HEVC
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _HEVC_CTRLS_H_
-#define _HEVC_CTRLS_H_
-
-#include <linux/videodev2.h>
-
-/* The pixel format isn't stable at the moment and will likely be renamed. */
-#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
-
-#define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_MPEG_BASE + 1008)
-#define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_MPEG_BASE + 1009)
-#define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_MPEG_BASE + 1010)
-#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_MPEG_BASE + 1015)
-#define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_MPEG_BASE + 1016)
-
-/* enum v4l2_ctrl_type type values */
-#define V4L2_CTRL_TYPE_HEVC_SPS 0x0120
-#define V4L2_CTRL_TYPE_HEVC_PPS 0x0121
-#define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122
-
-enum v4l2_mpeg_video_hevc_decode_mode {
- V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
- V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
-};
-
-enum v4l2_mpeg_video_hevc_start_code {
- V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
- V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
-};
-
-#define V4L2_HEVC_SLICE_TYPE_B 0
-#define V4L2_HEVC_SLICE_TYPE_P 1
-#define V4L2_HEVC_SLICE_TYPE_I 2
-
-#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0)
-#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1)
-#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2)
-#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3)
-#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4)
-#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5)
-#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6)
-#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7)
-#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8)
-
-/* The controls are not stable at the moment and will likely be reworked. */
-struct v4l2_ctrl_hevc_sps {
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: Sequence parameter set */
- __u16 pic_width_in_luma_samples;
- __u16 pic_height_in_luma_samples;
- __u8 bit_depth_luma_minus8;
- __u8 bit_depth_chroma_minus8;
- __u8 log2_max_pic_order_cnt_lsb_minus4;
- __u8 sps_max_dec_pic_buffering_minus1;
- __u8 sps_max_num_reorder_pics;
- __u8 sps_max_latency_increase_plus1;
- __u8 log2_min_luma_coding_block_size_minus3;
- __u8 log2_diff_max_min_luma_coding_block_size;
- __u8 log2_min_luma_transform_block_size_minus2;
- __u8 log2_diff_max_min_luma_transform_block_size;
- __u8 max_transform_hierarchy_depth_inter;
- __u8 max_transform_hierarchy_depth_intra;
- __u8 pcm_sample_bit_depth_luma_minus1;
- __u8 pcm_sample_bit_depth_chroma_minus1;
- __u8 log2_min_pcm_luma_coding_block_size_minus3;
- __u8 log2_diff_max_min_pcm_luma_coding_block_size;
- __u8 num_short_term_ref_pic_sets;
- __u8 num_long_term_ref_pics_sps;
- __u8 chroma_format_idc;
-
- __u8 padding;
-
- __u64 flags;
-};
-
-#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0)
-#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
-#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
-#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
-#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4)
-#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5)
-#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6)
-#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7)
-#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8)
-#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9)
-#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10)
-#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11)
-#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12)
-#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13)
-#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14)
-#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15)
-#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
-#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
-#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
-
-struct v4l2_ctrl_hevc_pps {
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */
- __u8 num_extra_slice_header_bits;
- __s8 init_qp_minus26;
- __u8 diff_cu_qp_delta_depth;
- __s8 pps_cb_qp_offset;
- __s8 pps_cr_qp_offset;
- __u8 num_tile_columns_minus1;
- __u8 num_tile_rows_minus1;
- __u8 column_width_minus1[20];
- __u8 row_height_minus1[22];
- __s8 pps_beta_offset_div2;
- __s8 pps_tc_offset_div2;
- __u8 log2_parallel_merge_level_minus2;
-
- __u8 padding[4];
- __u64 flags;
-};
-
-#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE 0x01
-#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER 0x02
-#define V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR 0x03
-
-#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16
-
-struct v4l2_hevc_dpb_entry {
- __u64 timestamp;
- __u8 rps;
- __u8 field_pic;
- __u16 pic_order_cnt[2];
- __u8 padding[2];
-};
-
-struct v4l2_hevc_pred_weight_table {
- __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
- __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
-
- __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
- __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
-
- __u8 padding[6];
-
- __u8 luma_log2_weight_denom;
- __s8 delta_chroma_log2_weight_denom;
-};
-
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
-#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
-
-struct v4l2_ctrl_hevc_slice_params {
- __u32 bit_size;
- __u32 data_bit_offset;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
- __u8 nal_unit_type;
- __u8 nuh_temporal_id_plus1;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
- __u8 slice_type;
- __u8 colour_plane_id;
- __u16 slice_pic_order_cnt;
- __u8 num_ref_idx_l0_active_minus1;
- __u8 num_ref_idx_l1_active_minus1;
- __u8 collocated_ref_idx;
- __u8 five_minus_max_num_merge_cand;
- __s8 slice_qp_delta;
- __s8 slice_cb_qp_offset;
- __s8 slice_cr_qp_offset;
- __s8 slice_act_y_qp_offset;
- __s8 slice_act_cb_qp_offset;
- __s8 slice_act_cr_qp_offset;
- __s8 slice_beta_offset_div2;
- __s8 slice_tc_offset_div2;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
- __u8 pic_struct;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
- __u8 num_active_dpb_entries;
- __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
-
- __u8 num_rps_poc_st_curr_before;
- __u8 num_rps_poc_st_curr_after;
- __u8 num_rps_poc_lt_curr;
-
- __u8 padding;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
- struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
- struct v4l2_hevc_pred_weight_table pred_weight_table;
-
- __u64 flags;
-};
-
-#endif
diff --git a/include/media/i2c/ad9389b.h b/include/media/i2c/ad9389b.h
deleted file mode 100644
index 30f9ea9a1273..000000000000
--- a/include/media/i2c/ad9389b.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Analog Devices AD9389B/AD9889B video encoder driver header
- *
- * Copyright 2012 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- */
-
-#ifndef AD9389B_H
-#define AD9389B_H
-
-enum ad9389b_tmds_pll_gear {
- AD9389B_TMDS_PLL_GEAR_AUTOMATIC,
- AD9389B_TMDS_PLL_GEAR_SEMI_AUTOMATIC,
-};
-
-/* Platform dependent definitions */
-struct ad9389b_platform_data {
- enum ad9389b_tmds_pll_gear tmds_pll_gear ;
- /* Differential Data/Clock Output Drive Strength (reg. 0xa2/0xa3) */
- u8 diff_data_drive_strength;
- u8 diff_clk_drive_strength;
-};
-
-/* notify events */
-#define AD9389B_MONITOR_DETECT 0
-#define AD9389B_EDID_DETECT 1
-
-struct ad9389b_monitor_detect {
- int present;
-};
-
-struct ad9389b_edid_detect {
- int present;
- int segment;
-};
-
-#endif
diff --git a/include/media/i2c/adv7343.h b/include/media/i2c/adv7343.h
index e4142b1ef8cd..d35d3e925795 100644
--- a/include/media/i2c/adv7343.h
+++ b/include/media/i2c/adv7343.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ADV7343 header file
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef ADV7343_H
@@ -21,7 +13,7 @@
#define ADV7343_SVIDEO_ID (2)
/**
- * adv7343_power_mode - power mode configuration.
+ * struct adv7343_power_mode - power mode configuration.
* @sleep_mode: on enable the current consumption is reduced to micro ampere
* level. All DACs and the internal PLL circuit are disabled.
* Registers can be read from and written in sleep mode.
diff --git a/include/media/i2c/adv7393.h b/include/media/i2c/adv7393.h
index b28edf351842..c73b36321d06 100644
--- a/include/media/i2c/adv7393.h
+++ b/include/media/i2c/adv7393.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ADV7393 header file
*
@@ -7,15 +8,6 @@
* Based on ADV7343 driver,
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef ADV7393_H
diff --git a/include/media/i2c/ds90ub9xx.h b/include/media/i2c/ds90ub9xx.h
new file mode 100644
index 000000000000..0245198469ec
--- /dev/null
+++ b/include/media/i2c/ds90ub9xx.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEDIA_I2C_DS90UB9XX_H__
+#define __MEDIA_I2C_DS90UB9XX_H__
+
+#include <linux/types.h>
+
+struct i2c_atr;
+
+/**
+ * struct ds90ub9xx_platform_data - platform data for FPD-Link Serializers.
+ * @port: Deserializer RX port for this Serializer
+ * @atr: I2C ATR
+ * @bc_rate: back-channel clock rate
+ */
+struct ds90ub9xx_platform_data {
+ u32 port;
+ struct i2c_atr *atr;
+ unsigned long bc_rate;
+};
+
+#endif /* __MEDIA_I2C_DS90UB9XX_H__ */
diff --git a/include/media/i2c/ir-kbd-i2c.h b/include/media/i2c/ir-kbd-i2c.h
index 9f47d6a48cff..0b58f8b9e7a4 100644
--- a/include/media/i2c/ir-kbd-i2c.h
+++ b/include/media/i2c/ir-kbd-i2c.h
@@ -35,6 +35,7 @@ enum ir_kbd_get_key_fn {
IR_KBD_GET_KEY_PIXELVIEW,
IR_KBD_GET_KEY_HAUP,
IR_KBD_GET_KEY_KNC1,
+ IR_KBD_GET_KEY_GENIATECH,
IR_KBD_GET_KEY_FUSIONHDTV,
IR_KBD_GET_KEY_HAUP_XVR,
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS,
diff --git a/include/media/i2c/m5mols.h b/include/media/i2c/m5mols.h
deleted file mode 100644
index 9cec5a09e125..000000000000
--- a/include/media/i2c/m5mols.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Driver header for M-5MOLS 8M Pixel camera sensor with ISP
- *
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim <riverful.kim@samsung.com>
- *
- * Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
- */
-
-#ifndef MEDIA_M5MOLS_H
-#define MEDIA_M5MOLS_H
-
-/**
- * struct m5mols_platform_data - platform data for M-5MOLS driver
- * @gpio_reset: GPIO driving the reset pin of M-5MOLS
- * @reset_polarity: active state for gpio_reset pin, 0 or 1
- * @set_power: an additional callback to the board setup code
- * to be called after enabling and before disabling
- * the sensor's supply regulators
- */
-struct m5mols_platform_data {
- int gpio_reset;
- u8 reset_polarity;
- int (*set_power)(struct device *dev, int on);
-};
-
-#endif /* MEDIA_M5MOLS_H */
diff --git a/include/media/i2c/mt9m032.h b/include/media/i2c/mt9m032.h
deleted file mode 100644
index 1bd58757717a..000000000000
--- a/include/media/i2c/mt9m032.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Driver for MT9M032 CMOS Image Sensor from Micron
- *
- * Copyright (C) 2010-2011 Lund Engineering
- * Contact: Gil Lund <gwlund@lundeng.com>
- * Author: Martin Hostettler <martin@neutronstar.dyndns.org>
- */
-
-#ifndef MT9M032_H
-#define MT9M032_H
-
-#define MT9M032_NAME "mt9m032"
-#define MT9M032_I2C_ADDR (0xb8 >> 1)
-
-struct mt9m032_platform_data {
- u32 ext_clock;
- u32 pix_clock;
- bool invert_pixclock;
-
-};
-#endif /* MT9M032_H */
diff --git a/include/media/i2c/mt9p031.h b/include/media/i2c/mt9p031.h
index 7c29c53aa988..f933cd0be8e5 100644
--- a/include/media/i2c/mt9p031.h
+++ b/include/media/i2c/mt9p031.h
@@ -10,6 +10,7 @@ struct v4l2_subdev;
* @target_freq: Pixel clock frequency
*/
struct mt9p031_platform_data {
+ unsigned int pixclk_pol:1;
int ext_freq;
int target_freq;
};
diff --git a/include/media/i2c/mt9t001.h b/include/media/i2c/mt9t001.h
deleted file mode 100644
index 4b1090554270..000000000000
--- a/include/media/i2c/mt9t001.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MEDIA_MT9T001_H
-#define _MEDIA_MT9T001_H
-
-struct mt9t001_platform_data {
- unsigned int clk_pol:1;
- unsigned int ext_clk;
-};
-
-#endif
diff --git a/include/media/i2c/mt9t112.h b/include/media/i2c/mt9t112.h
index cc80d5cc2104..825b4a169da8 100644
--- a/include/media/i2c/mt9t112.h
+++ b/include/media/i2c/mt9t112.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 */
/* mt9t112 Camera
*
* Copyright (C) 2009 Renesas Solutions Corp.
@@ -14,7 +14,7 @@ struct mt9t112_pll_divider {
};
/**
- * mt9t112_platform_data - mt9t112 driver interface
+ * struct mt9t112_platform_data - mt9t112 driver interface
* @flags: Sensor media bus configuration.
* @divider: Sensor PLL configuration
*/
diff --git a/include/media/i2c/noon010pc30.h b/include/media/i2c/noon010pc30.h
deleted file mode 100644
index a035d2d9a564..000000000000
--- a/include/media/i2c/noon010pc30.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Driver header for NOON010PC30L camera sensor chip.
- *
- * Copyright (c) 2010 Samsung Electronics, Co. Ltd
- * Contact: Sylwester Nawrocki <s.nawrocki@samsung.com>
- */
-
-#ifndef NOON010PC30_H
-#define NOON010PC30_H
-
-/**
- * @clk_rate: the clock frequency in Hz
- * @gpio_nreset: GPIO driving nRESET pin
- * @gpio_nstby: GPIO driving nSTBY pin
- */
-
-struct noon010pc30_platform_data {
- unsigned long clk_rate;
- int gpio_nreset;
- int gpio_nstby;
-};
-
-#endif /* NOON010PC30_H */
diff --git a/include/media/i2c/ov2659.h b/include/media/i2c/ov2659.h
index 4216adc1ede2..c9ea318a8fc3 100644
--- a/include/media/i2c/ov2659.h
+++ b/include/media/i2c/ov2659.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Omnivision OV2659 CMOS Image Sensor driver
*
@@ -5,19 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef OV2659_H
diff --git a/include/media/i2c/ov772x.h b/include/media/i2c/ov772x.h
index a1702d420087..26f363ea4001 100644
--- a/include/media/i2c/ov772x.h
+++ b/include/media/i2c/ov772x.h
@@ -46,7 +46,7 @@ struct ov772x_edge_ctrl {
}
/**
- * ov772x_camera_info - ov772x driver interface structure
+ * struct ov772x_camera_info - ov772x driver interface structure
* @flags: Sensor configuration flags
* @edgectrl: Sensor edge control
*/
diff --git a/include/media/i2c/ov9650.h b/include/media/i2c/ov9650.h
deleted file mode 100644
index 3ec7e06955b4..000000000000
--- a/include/media/i2c/ov9650.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * OV9650/OV9652 camera sensors driver
- *
- * Copyright (C) 2013 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
- */
-#ifndef OV9650_H_
-#define OV9650_H_
-
-/**
- * struct ov9650_platform_data - ov9650 driver platform data
- * @mclk_frequency: the sensor's master clock frequency in Hz
- * @gpio_pwdn: number of a GPIO connected to OV965X PWDN pin
- * @gpio_reset: number of a GPIO connected to OV965X RESET pin
- *
- * If any of @gpio_pwdn or @gpio_reset are unused then they should be
- * set to a negative value. @mclk_frequency must always be specified.
- */
-struct ov9650_platform_data {
- unsigned long mclk_frequency;
- int gpio_pwdn;
- int gpio_reset;
-};
-#endif /* OV9650_H_ */
diff --git a/include/media/i2c/s5c73m3.h b/include/media/i2c/s5c73m3.h
deleted file mode 100644
index ccb9e5448762..000000000000
--- a/include/media/i2c/s5c73m3.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Samsung LSI S5C73M3 8M pixel camera driver
- *
- * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
- * Sylwester Nawrocki <s.nawrocki@samsung.com>
- * Andrzej Hajda <a.hajda@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef MEDIA_S5C73M3__
-#define MEDIA_S5C73M3__
-
-#include <linux/videodev2.h>
-#include <media/v4l2-mediabus.h>
-
-/**
- * struct s5c73m3_gpio - data structure describing a GPIO
- * @gpio: GPIO number
- * @level: indicates active state of the @gpio
- */
-struct s5c73m3_gpio {
- int gpio;
- int level;
-};
-
-/**
- * struct s5c73m3_platform_data - s5c73m3 driver platform data
- * @mclk_frequency: sensor's master clock frequency in Hz
- * @gpio_reset: GPIO driving RESET pin
- * @gpio_stby: GPIO driving STBY pin
- * @nlanes: maximum number of MIPI-CSI lanes used
- * @horiz_flip: default horizontal image flip value, non zero to enable
- * @vert_flip: default vertical image flip value, non zero to enable
- */
-
-struct s5c73m3_platform_data {
- unsigned long mclk_frequency;
-
- struct s5c73m3_gpio gpio_reset;
- struct s5c73m3_gpio gpio_stby;
-
- enum v4l2_mbus_type bus_type;
- u8 nlanes;
- u8 horiz_flip;
- u8 vert_flip;
-};
-
-#endif /* MEDIA_S5C73M3__ */
diff --git a/include/media/i2c/s5k4ecgx.h b/include/media/i2c/s5k4ecgx.h
deleted file mode 100644
index fccb7be8ed8f..000000000000
--- a/include/media/i2c/s5k4ecgx.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * S5K4ECGX image sensor header file
- *
- * Copyright (C) 2012, Linaro
- * Copyright (C) 2012, Samsung Electronics Co., Ltd.
- */
-
-#ifndef S5K4ECGX_H
-#define S5K4ECGX_H
-
-/**
- * struct s5k4ecgx_gpio - data structure describing a GPIO
- * @gpio : GPIO number
- * @level: indicates active state of the @gpio
- */
-struct s5k4ecgx_gpio {
- int gpio;
- int level;
-};
-
-/**
- * struct ss5k4ecgx_platform_data- s5k4ecgx driver platform data
- * @gpio_reset: GPIO driving RESET pin
- * @gpio_stby : GPIO driving STBY pin
- */
-
-struct s5k4ecgx_platform_data {
- struct s5k4ecgx_gpio gpio_reset;
- struct s5k4ecgx_gpio gpio_stby;
-};
-
-#endif /* S5K4ECGX_H */
diff --git a/include/media/i2c/s5k6aa.h b/include/media/i2c/s5k6aa.h
deleted file mode 100644
index fd78e85e8b78..000000000000
--- a/include/media/i2c/s5k6aa.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * S5K6AAFX camera sensor driver header
- *
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- */
-
-#ifndef S5K6AA_H
-#define S5K6AA_H
-
-#include <media/v4l2-mediabus.h>
-
-/**
- * struct s5k6aa_gpio - data structure describing a GPIO
- * @gpio: GPIO number
- * @level: indicates active state of the @gpio
- */
-struct s5k6aa_gpio {
- int gpio;
- int level;
-};
-
-/**
- * struct s5k6aa_platform_data - s5k6aa driver platform data
- * @set_power: an additional callback to the board code, called
- * after enabling the regulators and before switching
- * the sensor off
- * @mclk_frequency: sensor's master clock frequency in Hz
- * @gpio_reset: GPIO driving RESET pin
- * @gpio_stby: GPIO driving STBY pin
- * @nlanes: maximum number of MIPI-CSI lanes used
- * @horiz_flip: default horizontal image flip value, non zero to enable
- * @vert_flip: default vertical image flip value, non zero to enable
- */
-
-struct s5k6aa_platform_data {
- int (*set_power)(int enable);
- unsigned long mclk_frequency;
- struct s5k6aa_gpio gpio_reset;
- struct s5k6aa_gpio gpio_stby;
- enum v4l2_mbus_type bus_type;
- u8 nlanes;
- u8 horiz_flip;
- u8 vert_flip;
-};
-
-#endif /* S5K6AA_H */
diff --git a/include/media/i2c/sr030pc30.h b/include/media/i2c/sr030pc30.h
deleted file mode 100644
index 84c602d681fa..000000000000
--- a/include/media/i2c/sr030pc30.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Driver header for SR030PC30 camera sensor
- *
- * Copyright (c) 2010 Samsung Electronics, Co. Ltd
- * Contact: Sylwester Nawrocki <s.nawrocki@samsung.com>
- */
-
-#ifndef SR030PC30_H
-#define SR030PC30_H
-
-struct sr030pc30_platform_data {
- unsigned long clk_rate; /* master clock frequency in Hz */
- int (*set_power)(struct device *dev, int on);
-};
-
-#endif /* SR030PC30_H */
diff --git a/include/media/i2c/ths7303.h b/include/media/i2c/ths7303.h
index 95492d12786d..fee2818c558d 100644
--- a/include/media/i2c/ths7303.h
+++ b/include/media/i2c/ths7303.h
@@ -10,8 +10,8 @@
* Martin Bugge <marbugge@cisco.com>
*/
-#ifndef THS7353_H
-#define THS7353_H
+#ifndef THS7303_H
+#define THS7303_H
/**
* struct ths7303_platform_data - Platform dependent data
diff --git a/include/media/i2c/tvp514x.h b/include/media/i2c/tvp514x.h
index 0c1bb04bdbcb..837efff0a6a0 100644
--- a/include/media/i2c/tvp514x.h
+++ b/include/media/i2c/tvp514x.h
@@ -29,10 +29,7 @@
#define PAL_NUM_ACTIVE_PIXELS (720)
#define PAL_NUM_ACTIVE_LINES (576)
-/**
- * enum tvp514x_input - enum for different decoder input pin
- * configuration.
- */
+/* enum for different decoder input pin configuration */
enum tvp514x_input {
/*
* CVBS input selection
@@ -69,11 +66,7 @@ enum tvp514x_input {
INPUT_INVALID
};
-/**
- * enum tvp514x_output - enum for output format
- * supported.
- *
- */
+/* enum for output format supported. */
enum tvp514x_output {
OUTPUT_10BIT_422_EMBEDDED_SYNC = 0,
OUTPUT_20BIT_422_SEPERATE_SYNC,
diff --git a/include/media/i2c/tw9910.h b/include/media/i2c/tw9910.h
index 92d31bd1afe6..77da94f909e3 100644
--- a/include/media/i2c/tw9910.h
+++ b/include/media/i2c/tw9910.h
@@ -13,9 +13,7 @@
#ifndef __TW9910_H__
#define __TW9910_H__
-/**
- * tw9910_mpout_pin - MPOUT (multi-purpose output) pin functions
- */
+/* MPOUT (multi-purpose output) pin functions */
enum tw9910_mpout_pin {
TW9910_MPO_VLOSS,
TW9910_MPO_HLOCK,
@@ -28,10 +26,10 @@ enum tw9910_mpout_pin {
};
/**
- * tw9910_video_info - tw9910 driver interface structure
+ * struct tw9910_video_info - tw9910 driver interface structure
* @buswidth: Parallel data bus width (8 or 16).
* @mpout: Selected function of MPOUT (multi-purpose output) pin.
- * See &enum tw9910_mpout_pin
+ * See enum tw9910_mpout_pin
*/
struct tw9910_video_info {
unsigned long buswidth;
diff --git a/include/media/i2c/wm8775.h b/include/media/i2c/wm8775.h
index 83675817639e..6ccdeb3817ab 100644
--- a/include/media/i2c/wm8775.h
+++ b/include/media/i2c/wm8775.h
@@ -23,7 +23,7 @@
struct wm8775_platform_data {
/*
- * FIXME: Instead, we should parametrize the params
+ * FIXME: Instead, we should parameterize the params
* that need different settings between ivtv, pvrusb2, and Nova-S
*/
bool is_nova_s;
diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h
new file mode 100644
index 000000000000..783bda6d5cc3
--- /dev/null
+++ b/include/media/ipu-bridge.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Author: Dan Scally <djrscally@gmail.com> */
+#ifndef __IPU_BRIDGE_H
+#define __IPU_BRIDGE_H
+
+#include <linux/property.h>
+#include <linux/types.h>
+#include <media/v4l2-fwnode.h>
+
+#define IPU_HID "INT343E"
+#define IPU_MAX_LANES 4
+#define IPU_MAX_PORTS 4
+#define MAX_NUM_LINK_FREQS 3
+
+/* Values are educated guesses as we don't have a spec */
+#define IPU_SENSOR_ROTATION_NORMAL 0
+#define IPU_SENSOR_ROTATION_INVERTED 1
+
+#define IPU_SENSOR_CONFIG(_HID, _NR, ...) \
+ (const struct ipu_sensor_config) { \
+ .hid = _HID, \
+ .nr_link_freqs = _NR, \
+ .link_freqs = { __VA_ARGS__ } \
+ }
+
+#define NODE_SENSOR(_HID, _PROPS) \
+ (const struct software_node) { \
+ .name = _HID, \
+ .properties = _PROPS, \
+ }
+
+#define NODE_PORT(_PORT, _SENSOR_NODE) \
+ (const struct software_node) { \
+ .name = _PORT, \
+ .parent = _SENSOR_NODE, \
+ }
+
+#define NODE_ENDPOINT(_EP, _PORT, _PROPS) \
+ (const struct software_node) { \
+ .name = _EP, \
+ .parent = _PORT, \
+ .properties = _PROPS, \
+ }
+
+#define NODE_VCM(_TYPE) \
+ (const struct software_node) { \
+ .name = _TYPE, \
+ }
+
+enum ipu_sensor_swnodes {
+ SWNODE_SENSOR_HID,
+ SWNODE_SENSOR_PORT,
+ SWNODE_SENSOR_ENDPOINT,
+ SWNODE_IPU_PORT,
+ SWNODE_IPU_ENDPOINT,
+ /* below are optional / maybe empty */
+ SWNODE_IVSC_HID,
+ SWNODE_IVSC_SENSOR_PORT,
+ SWNODE_IVSC_SENSOR_ENDPOINT,
+ SWNODE_IVSC_IPU_PORT,
+ SWNODE_IVSC_IPU_ENDPOINT,
+ SWNODE_VCM,
+ SWNODE_COUNT
+};
+
+/* Data representation as it is in ACPI SSDB buffer */
+struct ipu_sensor_ssdb {
+ u8 version;
+ u8 sku;
+ u8 guid_csi2[16];
+ u8 devfunction;
+ u8 bus;
+ u32 dphylinkenfuses;
+ u32 clockdiv;
+ u8 link;
+ u8 lanes;
+ u32 csiparams[10];
+ u32 maxlanespeed;
+ u8 sensorcalibfileidx;
+ u8 sensorcalibfileidxInMBZ[3];
+ u8 romtype;
+ u8 vcmtype;
+ u8 platforminfo;
+ u8 platformsubinfo;
+ u8 flash;
+ u8 privacyled;
+ u8 degree;
+ u8 mipilinkdefined;
+ u32 mclkspeed;
+ u8 controllogicid;
+ u8 reserved1[3];
+ u8 mclkport;
+ u8 reserved2[13];
+} __packed;
+
+struct ipu_property_names {
+ char clock_frequency[16];
+ char rotation[9];
+ char orientation[12];
+ char bus_type[9];
+ char data_lanes[11];
+ char remote_endpoint[16];
+ char link_frequencies[17];
+};
+
+struct ipu_node_names {
+ char port[7];
+ char ivsc_sensor_port[7];
+ char ivsc_ipu_port[7];
+ char endpoint[11];
+ char remote_port[9];
+ char vcm[16];
+};
+
+struct ipu_sensor_config {
+ const char *hid;
+ const u8 nr_link_freqs;
+ const u64 link_freqs[MAX_NUM_LINK_FREQS];
+};
+
+struct ipu_sensor {
+ /* append ssdb.link(u8) in "-%u" format as suffix of HID */
+ char name[ACPI_ID_LEN + 4];
+ struct acpi_device *adev;
+
+ struct device *csi_dev;
+ struct acpi_device *ivsc_adev;
+ char ivsc_name[ACPI_ID_LEN + 4];
+
+ /* SWNODE_COUNT + 1 for terminating NULL */
+ const struct software_node *group[SWNODE_COUNT + 1];
+ struct software_node swnodes[SWNODE_COUNT];
+ struct ipu_node_names node_names;
+
+ u8 link;
+ u8 lanes;
+ u32 mclkspeed;
+ u32 rotation;
+ enum v4l2_fwnode_orientation orientation;
+ const char *vcm_type;
+
+ struct ipu_property_names prop_names;
+ struct property_entry ep_properties[5];
+ struct property_entry dev_properties[5];
+ struct property_entry ipu_properties[3];
+ struct property_entry ivsc_properties[1];
+ struct property_entry ivsc_sensor_ep_properties[4];
+ struct property_entry ivsc_ipu_ep_properties[4];
+
+ struct software_node_ref_args local_ref[1];
+ struct software_node_ref_args remote_ref[1];
+ struct software_node_ref_args vcm_ref[1];
+ struct software_node_ref_args ivsc_sensor_ref[1];
+ struct software_node_ref_args ivsc_ipu_ref[1];
+};
+
+typedef int (*ipu_parse_sensor_fwnode_t)(struct acpi_device *adev,
+ struct ipu_sensor *sensor);
+
+struct ipu_bridge {
+ struct device *dev;
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode;
+ char ipu_node_name[ACPI_ID_LEN];
+ struct software_node ipu_hid_node;
+ u32 data_lanes[4];
+ unsigned int n_sensors;
+ struct ipu_sensor sensors[IPU_MAX_PORTS];
+};
+
+#if IS_ENABLED(CONFIG_IPU_BRIDGE)
+int ipu_bridge_init(struct device *dev,
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode);
+int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor);
+int ipu_bridge_instantiate_vcm(struct device *sensor);
+#else
+/* Use a define to avoid the @parse_sensor_fwnode argument getting evaluated */
+#define ipu_bridge_init(dev, parse_sensor_fwnode) (0)
+static inline int ipu_bridge_instantiate_vcm(struct device *s) { return 0; }
+#endif
+
+#endif
diff --git a/include/media/jpeg.h b/include/media/jpeg.h
new file mode 100644
index 000000000000..a01e142e99a7
--- /dev/null
+++ b/include/media/jpeg.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _MEDIA_JPEG_H_
+#define _MEDIA_JPEG_H_
+
+/* JPEG markers */
+#define JPEG_MARKER_TEM 0x01
+#define JPEG_MARKER_SOF0 0xc0
+#define JPEG_MARKER_DHT 0xc4
+#define JPEG_MARKER_RST 0xd0
+#define JPEG_MARKER_SOI 0xd8
+#define JPEG_MARKER_EOI 0xd9
+#define JPEG_MARKER_SOS 0xda
+#define JPEG_MARKER_DQT 0xdb
+#define JPEG_MARKER_DRI 0xdd
+#define JPEG_MARKER_DHP 0xde
+#define JPEG_MARKER_APP0 0xe0
+#define JPEG_MARKER_COM 0xfe
+
+#endif /* _MEDIA_JPEG_H_ */
diff --git a/include/media/media-dev-allocator.h b/include/media/media-dev-allocator.h
index b35ea6062596..2ab54d426c64 100644
--- a/include/media/media-dev-allocator.h
+++ b/include/media/media-dev-allocator.h
@@ -19,7 +19,7 @@
struct usb_device;
-#if defined(CONFIG_MEDIA_CONTROLLER) && defined(CONFIG_USB)
+#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB)
/**
* media_device_usb_allocate() - Allocate and return struct &media device
*
diff --git a/include/media/media-device.h b/include/media/media-device.h
index 1345e6da688a..2c146d0b2b1c 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -13,12 +13,13 @@
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <media/media-devnode.h>
#include <media/media-entity.h>
struct ida;
-struct device;
struct media_device;
/**
@@ -181,8 +182,7 @@ struct media_device {
atomic_t request_id;
};
-/* We don't need to include pci.h or usb.h here */
-struct pci_dev;
+/* We don't need to include usb.h here */
struct usb_device;
#ifdef CONFIG_MEDIA_CONTROLLER
@@ -192,21 +192,6 @@ struct usb_device;
#define MEDIA_DEV_NOTIFY_POST_LINK_CH 1
/**
- * media_entity_enum_init - Initialise an entity enumeration
- *
- * @ent_enum: Entity enumeration to be initialised
- * @mdev: The related media device
- *
- * Return: zero on success or a negative error code.
- */
-static inline __must_check int media_entity_enum_init(
- struct media_entity_enum *ent_enum, struct media_device *mdev)
-{
- return __media_entity_enum_init(ent_enum,
- mdev->entity_internal_idx_max + 1);
-}
-
-/**
* media_device_init() - Initializes a media device element
*
* @mdev: pointer to struct &media_device
@@ -219,6 +204,15 @@ static inline __must_check int media_entity_enum_init(
* So drivers need to first initialize the media device, register any entity
* within the media device, create pad to pad links and then finally register
* the media device by calling media_device_register() as a final step.
+ *
+ * The caller is responsible for initializing the media device before
+ * registration. The following fields must be set:
+ *
+ * - dev must point to the parent device
+ * - model must be filled with the device model name
+ *
+ * The bus_info field is set by media_device_init() for PCI and platform devices
+ * if the field begins with '\0'.
*/
void media_device_init(struct media_device *mdev);
@@ -243,28 +237,25 @@ void media_device_cleanup(struct media_device *mdev);
* The caller is responsible for initializing the &media_device structure
* before registration. The following fields of &media_device must be set:
*
- * - &media_entity.dev must point to the parent device (usually a &pci_dev,
- * &usb_interface or &platform_device instance).
- *
- * - &media_entity.model must be filled with the device model name as a
+ * - &media_device.model must be filled with the device model name as a
* NUL-terminated UTF-8 string. The device/model revision must not be
* stored in this field.
*
* The following fields are optional:
*
- * - &media_entity.serial is a unique serial number stored as a
+ * - &media_device.serial is a unique serial number stored as a
* NUL-terminated ASCII string. The field is big enough to store a GUID
* in text form. If the hardware doesn't provide a unique serial number
* this field must be left empty.
*
- * - &media_entity.bus_info represents the location of the device in the
+ * - &media_device.bus_info represents the location of the device in the
* system as a NUL-terminated ASCII string. For PCI/PCIe devices
- * &media_entity.bus_info must be set to "PCI:" (or "PCIe:") followed by
+ * &media_device.bus_info must be set to "PCI:" (or "PCIe:") followed by
* the value of pci_name(). For USB devices,the usb_make_path() function
* must be used. This field is used by applications to distinguish between
* otherwise identical devices that don't provide a serial number.
*
- * - &media_entity.hw_revision is the hardware device revision in a
+ * - &media_device.hw_revision is the hardware device revision in a
* driver-specific format. When possible the revision should be formatted
* with the KERNEL_VERSION() macro.
*
@@ -373,7 +364,7 @@ void media_device_unregister_entity(struct media_entity *entity);
* media_entity_notify callbacks are invoked.
*/
-int __must_check media_device_register_entity_notify(struct media_device *mdev,
+void media_device_register_entity_notify(struct media_device *mdev,
struct media_entity_notify *nptr);
/**
@@ -453,11 +444,10 @@ static inline int media_device_register_entity(struct media_device *mdev,
static inline void media_device_unregister_entity(struct media_entity *entity)
{
}
-static inline int media_device_register_entity_notify(
+static inline void media_device_register_entity_notify(
struct media_device *mdev,
struct media_entity_notify *nptr)
{
- return 0;
}
static inline void media_device_unregister_entity_notify(
struct media_device *mdev,
@@ -496,4 +486,27 @@ static inline void __media_device_usb_init(struct media_device *mdev,
#define media_device_usb_init(mdev, udev, name) \
__media_device_usb_init(mdev, udev, name, KBUILD_MODNAME)
+/**
+ * media_set_bus_info() - Set bus_info field
+ *
+ * @bus_info: Variable where to write the bus info (char array)
+ * @bus_info_size: Length of the bus_info
+ * @dev: Related struct device
+ *
+ * Sets bus information based on &dev. This is currently done for PCI and
+ * platform devices. dev is required to be non-NULL for this to happen.
+ *
+ * This function is not meant to be called from drivers.
+ */
+static inline void
+media_set_bus_info(char *bus_info, size_t bus_info_size, struct device *dev)
+{
+ if (!dev)
+ strscpy(bus_info, "no bus info", bus_info_size);
+ else if (dev_is_platform(dev))
+ snprintf(bus_info, bus_info_size, "platform:%s", dev_name(dev));
+ else if (dev_is_pci(dev))
+ snprintf(bus_info, bus_info_size, "PCI:%s", dev_name(dev));
+}
+
#endif
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index cbdfcb79d0d0..0393b23129eb 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -13,10 +13,12 @@
#include <linux/bitmap.h>
#include <linux/bug.h>
+#include <linux/container_of.h>
#include <linux/fwnode.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/media.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
/* Enums used internally at the media controller to represent graphs */
@@ -98,12 +100,54 @@ struct media_graph {
/**
* struct media_pipeline - Media pipeline related information
*
- * @streaming_count: Streaming start count - streaming stop count
- * @graph: Media graph walk during pipeline start / stop
+ * @allocated: Media pipeline allocated and freed by the framework
+ * @mdev: The media device the pipeline is part of
+ * @pads: List of media_pipeline_pad
+ * @start_count: Media pipeline start - stop count
*/
struct media_pipeline {
- int streaming_count;
- struct media_graph graph;
+ bool allocated;
+ struct media_device *mdev;
+ struct list_head pads;
+ int start_count;
+};
+
+/**
+ * struct media_pipeline_pad - A pad part of a media pipeline
+ *
+ * @list: Entry in the media_pad pads list
+ * @pipe: The media_pipeline that the pad is part of
+ * @pad: The media pad
+ *
+ * This structure associate a pad with a media pipeline. Instances of
+ * media_pipeline_pad are created by media_pipeline_start() when it builds the
+ * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop()
+ * removes the entries from the list and deletes them.
+ */
+struct media_pipeline_pad {
+ struct list_head list;
+ struct media_pipeline *pipe;
+ struct media_pad *pad;
+};
+
+/**
+ * struct media_pipeline_pad_iter - Iterator for media_pipeline_for_each_pad
+ *
+ * @cursor: The current element
+ */
+struct media_pipeline_pad_iter {
+ struct list_head *cursor;
+};
+
+/**
+ * struct media_pipeline_entity_iter - Iterator for media_pipeline_for_each_entity
+ *
+ * @ent_enum: The entity enumeration tracker
+ * @cursor: The current element
+ */
+struct media_pipeline_entity_iter {
+ struct media_entity_enum ent_enum;
+ struct list_head *cursor;
};
/**
@@ -155,7 +199,7 @@ struct media_link {
* uniquely identified by the pad number.
* @PAD_SIGNAL_ANALOG:
* The pad contains an analog signal. It can be Radio Frequency,
- * Intermediate Frequency, a baseband signal or sub-cariers.
+ * Intermediate Frequency, a baseband signal or sub-carriers.
* Tuner inputs, IF-PLL demodulators, composite and s-video signals
* should use it.
* @PAD_SIGNAL_DV:
@@ -181,17 +225,27 @@ enum media_pad_signal_type {
* @graph_obj: Embedded structure containing the media object common data
* @entity: Entity this pad belongs to
* @index: Pad index in the entity pads array, numbered from 0 to n
+ * @num_links: Number of links connected to this pad
* @sig_type: Type of the signal inside a media pad
* @flags: Pad flags, as defined in
* :ref:`include/uapi/linux/media.h <media_header>`
* (seek for ``MEDIA_PAD_FL_*``)
+ * @pipe: Pipeline this pad belongs to. Use media_entity_pipeline() to
+ * access this field.
*/
struct media_pad {
struct media_gobj graph_obj; /* must be first field in struct */
struct media_entity *entity;
u16 index;
+ u16 num_links;
enum media_pad_signal_type sig_type;
unsigned long flags;
+
+ /*
+ * The fields below are private, and should only be accessed via
+ * appropriate functions.
+ */
+ struct media_pipeline *pipe;
};
/**
@@ -205,6 +259,16 @@ struct media_pad {
* @link_validate: Return whether a link is valid from the entity point of
* view. The media_pipeline_start() function
* validates all links by calling this operation. Optional.
+ * @has_pad_interdep: Return whether two pads of the entity are
+ * interdependent. If two pads are interdependent they are
+ * part of the same pipeline and enabling one of the pads
+ * means that the other pad will become "locked" and
+ * doesn't allow configuration changes. pad0 and pad1 are
+ * guaranteed to not both be sinks or sources. Never call
+ * the .has_pad_interdep() operation directly, always use
+ * media_entity_has_pad_interdep().
+ * Optional: If the operation isn't implemented all pads
+ * will be considered as interdependent.
*
* .. note::
*
@@ -218,6 +282,8 @@ struct media_entity_operations {
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
int (*link_validate)(struct media_link *link);
+ bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1);
};
/**
@@ -267,25 +333,18 @@ enum media_entity_type {
* @pads: Pads array with the size defined by @num_pads.
* @links: List of data links.
* @ops: Entity operations.
- * @stream_count: Stream count for the entity.
* @use_count: Use count for the entity.
- * @pipe: Pipeline this entity belongs to.
* @info: Union with devnode information. Kept just for backward
* compatibility.
* @info.dev: Contains device major and minor info.
* @info.dev.major: device node major, if the device is a devnode.
* @info.dev.minor: device node minor, if the device is a devnode.
- * @major: Devnode major number (zero if not applicable). Kept just
- * for backward compatibility.
- * @minor: Devnode minor number (zero if not applicable). Kept just
- * for backward compatibility.
*
* .. note::
*
- * @stream_count and @use_count reference counts must never be
- * negative, but are signed integers on purpose: a simple ``WARN_ON(<0)``
- * check can be used to detect reference count bugs that would make them
- * negative.
+ * The @use_count reference count must never be negative, but is a signed
+ * integer on purpose: a simple ``WARN_ON(<0)`` check can be used to detect
+ * reference count bugs that would make it negative.
*/
struct media_entity {
struct media_gobj graph_obj; /* must be first field in struct */
@@ -304,11 +363,8 @@ struct media_entity {
const struct media_entity_operations *ops;
- int stream_count;
int use_count;
- struct media_pipeline *pipe;
-
union {
struct {
u32 major;
@@ -318,6 +374,18 @@ struct media_entity {
};
/**
+ * media_entity_for_each_pad - Iterate on all pads in an entity
+ * @entity: The entity the pads belong to
+ * @iter: The iterator pad
+ *
+ * Iterate on all pads in a media entity.
+ */
+#define media_entity_for_each_pad(entity, iter) \
+ for (iter = (entity)->pads; \
+ iter < &(entity)->pads[(entity)->num_pads]; \
+ ++iter)
+
+/**
* struct media_interface - A media interface graph object.
*
* @graph_obj: embedded graph object
@@ -428,15 +496,15 @@ static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity)
}
/**
- * __media_entity_enum_init - Initialise an entity enumeration
+ * media_entity_enum_init - Initialise an entity enumeration
*
* @ent_enum: Entity enumeration to be initialised
- * @idx_max: Maximum number of entities in the enumeration
+ * @mdev: The related media device
*
- * Return: Returns zero on success or a negative error code.
+ * Return: zero on success or a negative error code.
*/
-__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum,
- int idx_max);
+__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum,
+ struct media_device *mdev);
/**
* media_entity_enum_cleanup - Release resources of an entity enumeration
@@ -656,6 +724,10 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
*
* This function must be called during the cleanup phase after unregistering
* the entity (currently, it does nothing).
+ *
+ * Calling media_entity_cleanup() on a media_entity whose memory has been
+ * zeroed but that has not been initialized with media_entity_pad_init() is
+ * valid and is a no-op.
*/
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
static inline void media_entity_cleanup(struct media_entity *entity) {}
@@ -667,7 +739,7 @@ static inline void media_entity_cleanup(struct media_entity *entity) {}
* media_get_pad_index() - retrieves a pad index from an entity
*
* @entity: entity where the pads belong
- * @is_sink: true if the pad is a sink, false if it is a source
+ * @pad_type: the type of the pad, one of MEDIA_PAD_FL_* pad types
* @sig_type: type of signal of the pad to be search
*
* This helper function finds the first pad index inside an entity that
@@ -678,7 +750,7 @@ static inline void media_entity_cleanup(struct media_entity *entity) {}
* On success, return the pad number. If the pad was not found or the media
* entity is a NULL pointer, return -EINVAL.
*/
-int media_get_pad_index(struct media_entity *entity, bool is_sink,
+int media_get_pad_index(struct media_entity *entity, u32 pad_type,
enum media_pad_signal_type sig_type);
/**
@@ -846,7 +918,7 @@ struct media_link *media_entity_find_link(struct media_pad *source,
struct media_pad *sink);
/**
- * media_entity_remote_pad - Find the pad at the remote end of a link
+ * media_pad_remote_pad_first - Find the first pad at the remote end of a link
* @pad: Pad at the local end of the link
*
* Search for a remote pad connected to the given pad by iterating over all
@@ -855,7 +927,135 @@ struct media_link *media_entity_find_link(struct media_pad *source,
* Return: returns a pointer to the pad at the remote end of the first found
* enabled link, or %NULL if no enabled link has been found.
*/
-struct media_pad *media_entity_remote_pad(const struct media_pad *pad);
+struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad);
+
+/**
+ * media_pad_remote_pad_unique - Find a remote pad connected to a pad
+ * @pad: The pad
+ *
+ * Search for and return a remote pad connected to @pad through an enabled
+ * link. If multiple (or no) remote pads are found, an error is returned.
+ *
+ * The uniqueness constraint makes this helper function suitable for entities
+ * that support a single active source at a time on a given pad.
+ *
+ * Return: A pointer to the remote pad, or one of the following error pointers
+ * if an error occurs:
+ *
+ * * -ENOTUNIQ - Multiple links are enabled
+ * * -ENOLINK - No connected pad found
+ */
+struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad);
+
+/**
+ * media_entity_remote_pad_unique - Find a remote pad connected to an entity
+ * @entity: The entity
+ * @type: The type of pad to find (MEDIA_PAD_FL_SINK or MEDIA_PAD_FL_SOURCE)
+ *
+ * Search for and return a remote pad of @type connected to @entity through an
+ * enabled link. If multiple (or no) remote pads match these criteria, an error
+ * is returned.
+ *
+ * The uniqueness constraint makes this helper function suitable for entities
+ * that support a single active source or sink at a time.
+ *
+ * Return: A pointer to the remote pad, or one of the following error pointers
+ * if an error occurs:
+ *
+ * * -ENOTUNIQ - Multiple links are enabled
+ * * -ENOLINK - No connected pad found
+ */
+struct media_pad *
+media_entity_remote_pad_unique(const struct media_entity *entity,
+ unsigned int type);
+
+/**
+ * media_entity_remote_source_pad_unique - Find a remote source pad connected to
+ * an entity
+ * @entity: The entity
+ *
+ * Search for and return a remote source pad connected to @entity through an
+ * enabled link. If multiple (or no) remote pads match these criteria, an error
+ * is returned.
+ *
+ * The uniqueness constraint makes this helper function suitable for entities
+ * that support a single active source at a time.
+ *
+ * Return: A pointer to the remote pad, or one of the following error pointers
+ * if an error occurs:
+ *
+ * * -ENOTUNIQ - Multiple links are enabled
+ * * -ENOLINK - No connected pad found
+ */
+static inline struct media_pad *
+media_entity_remote_source_pad_unique(const struct media_entity *entity)
+{
+ return media_entity_remote_pad_unique(entity, MEDIA_PAD_FL_SOURCE);
+}
+
+/**
+ * media_pad_is_streaming - Test if a pad is part of a streaming pipeline
+ * @pad: The pad
+ *
+ * Return: True if the pad is part of a pipeline started with the
+ * media_pipeline_start() function, false otherwise.
+ */
+static inline bool media_pad_is_streaming(const struct media_pad *pad)
+{
+ return pad->pipe;
+}
+
+/**
+ * media_entity_is_streaming - Test if an entity is part of a streaming pipeline
+ * @entity: The entity
+ *
+ * Return: True if the entity is part of a pipeline started with the
+ * media_pipeline_start() function, false otherwise.
+ */
+static inline bool media_entity_is_streaming(const struct media_entity *entity)
+{
+ struct media_pad *pad;
+
+ media_entity_for_each_pad(entity, pad) {
+ if (media_pad_is_streaming(pad))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * media_entity_pipeline - Get the media pipeline an entity is part of
+ * @entity: The entity
+ *
+ * DEPRECATED: use media_pad_pipeline() instead.
+ *
+ * This function returns the media pipeline that an entity has been associated
+ * with when constructing the pipeline with media_pipeline_start(). The pointer
+ * remains valid until media_pipeline_stop() is called.
+ *
+ * In general, entities can be part of multiple pipelines, when carrying
+ * multiple streams (either on different pads, or on the same pad using
+ * multiplexed streams). This function is to be used only for entities that
+ * do not support multiple pipelines.
+ *
+ * Return: The media_pipeline the entity is part of, or NULL if the entity is
+ * not part of any pipeline.
+ */
+struct media_pipeline *media_entity_pipeline(struct media_entity *entity);
+
+/**
+ * media_pad_pipeline - Get the media pipeline a pad is part of
+ * @pad: The pad
+ *
+ * This function returns the media pipeline that a pad has been associated
+ * with when constructing the pipeline with media_pipeline_start(). The pointer
+ * remains valid until media_pipeline_stop() is called.
+ *
+ * Return: The media_pipeline the pad is part of, or NULL if the pad is
+ * not part of any pipeline.
+ */
+struct media_pipeline *media_pad_pipeline(struct media_pad *pad);
/**
* media_entity_get_fwnode_pad - Get pad number from fwnode
@@ -877,7 +1077,7 @@ struct media_pad *media_entity_remote_pad(const struct media_pad *pad);
* Return: returns the pad number on success or a negative error code.
*/
int media_entity_get_fwnode_pad(struct media_entity *entity,
- struct fwnode_handle *fwnode,
+ const struct fwnode_handle *fwnode,
unsigned long direction_flags);
/**
@@ -885,6 +1085,13 @@ int media_entity_get_fwnode_pad(struct media_entity *entity,
*
* @graph: Media graph structure that will be used to walk the graph
* @mdev: Pointer to the &media_device that contains the object
+ *
+ * This function is deprecated, use media_pipeline_for_each_pad() instead.
+ *
+ * The caller is required to hold the media_device graph_mutex during the graph
+ * walk until the graph state is released.
+ *
+ * Returns zero on success or a negative error code otherwise.
*/
__must_check int media_graph_walk_init(
struct media_graph *graph, struct media_device *mdev);
@@ -893,6 +1100,8 @@ __must_check int media_graph_walk_init(
* media_graph_walk_cleanup - Release resources used by graph walk.
*
* @graph: Media graph structure that will be used to walk the graph
+ *
+ * This function is deprecated, use media_pipeline_for_each_pad() instead.
*/
void media_graph_walk_cleanup(struct media_graph *graph);
@@ -903,6 +1112,8 @@ void media_graph_walk_cleanup(struct media_graph *graph);
* @graph: Media graph structure that will be used to walk the graph
* @entity: Starting entity
*
+ * This function is deprecated, use media_pipeline_for_each_pad() instead.
+ *
* Before using this function, media_graph_walk_init() must be
* used to allocate resources used for walking the graph. This
* function initializes the graph traversal structure to walk the
@@ -918,6 +1129,8 @@ void media_graph_walk_start(struct media_graph *graph,
* media_graph_walk_next - Get the next entity in the graph
* @graph: Media graph structure
*
+ * This function is deprecated, use media_pipeline_for_each_pad() instead.
+ *
* Perform a depth-first traversal of the given media entities graph.
*
* The graph structure must have been previously initialized with a call to
@@ -930,53 +1143,136 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph);
/**
* media_pipeline_start - Mark a pipeline as streaming
- * @entity: Starting entity
- * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ * @pad: Starting pad
+ * @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
- * Mark all entities connected to a given entity through enabled links, either
+ * Mark all pads connected to a given pad through enabled links, either
* directly or indirectly, as streaming. The given pipeline object is assigned
- * to every entity in the pipeline and stored in the media_entity pipe field.
+ * to every pad in the pipeline and stored in the media_pad pipe field.
*
* Calls to this function can be nested, in which case the same number of
* media_pipeline_stop() calls will be required to stop streaming. The
* pipeline pointer must be identical for all nested calls to
* media_pipeline_start().
*/
-__must_check int media_pipeline_start(struct media_entity *entity,
+__must_check int media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe);
/**
* __media_pipeline_start - Mark a pipeline as streaming
*
- * @entity: Starting entity
- * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ * @pad: Starting pad
+ * @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
* ..note:: This is the non-locking version of media_pipeline_start()
*/
-__must_check int __media_pipeline_start(struct media_entity *entity,
+__must_check int __media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe);
/**
* media_pipeline_stop - Mark a pipeline as not streaming
- * @entity: Starting entity
+ * @pad: Starting pad
*
- * Mark all entities connected to a given entity through enabled links, either
- * directly or indirectly, as not streaming. The media_entity pipe field is
+ * Mark all pads connected to a given pad through enabled links, either
+ * directly or indirectly, as not streaming. The media_pad pipe field is
* reset to %NULL.
*
* If multiple calls to media_pipeline_start() have been made, the same
* number of calls to this function are required to mark the pipeline as not
* streaming.
*/
-void media_pipeline_stop(struct media_entity *entity);
+void media_pipeline_stop(struct media_pad *pad);
/**
* __media_pipeline_stop - Mark a pipeline as not streaming
*
- * @entity: Starting entity
+ * @pad: Starting pad
*
* .. note:: This is the non-locking version of media_pipeline_stop()
*/
-void __media_pipeline_stop(struct media_entity *entity);
+void __media_pipeline_stop(struct media_pad *pad);
+
+struct media_pad *
+__media_pipeline_pad_iter_next(struct media_pipeline *pipe,
+ struct media_pipeline_pad_iter *iter,
+ struct media_pad *pad);
+
+/**
+ * media_pipeline_for_each_pad - Iterate on all pads in a media pipeline
+ * @pipe: The pipeline
+ * @iter: The iterator (struct media_pipeline_pad_iter)
+ * @pad: The iterator pad
+ *
+ * Iterate on all pads in a media pipeline. This is only valid after the
+ * pipeline has been built with media_pipeline_start() and before it gets
+ * destroyed with media_pipeline_stop().
+ */
+#define media_pipeline_for_each_pad(pipe, iter, pad) \
+ for (pad = __media_pipeline_pad_iter_next((pipe), iter, NULL); \
+ pad != NULL; \
+ pad = __media_pipeline_pad_iter_next((pipe), iter, pad))
+
+/**
+ * media_pipeline_entity_iter_init - Initialize a pipeline entity iterator
+ * @pipe: The pipeline
+ * @iter: The iterator
+ *
+ * This function must be called to initialize the iterator before using it in a
+ * media_pipeline_for_each_entity() loop. The iterator must be destroyed by a
+ * call to media_pipeline_entity_iter_cleanup after the loop (including in code
+ * paths that break from the loop).
+ *
+ * The same iterator can be used in multiple consecutive loops without being
+ * destroyed and reinitialized.
+ *
+ * Return: 0 on success or a negative error code otherwise.
+ */
+int media_pipeline_entity_iter_init(struct media_pipeline *pipe,
+ struct media_pipeline_entity_iter *iter);
+
+/**
+ * media_pipeline_entity_iter_cleanup - Destroy a pipeline entity iterator
+ * @iter: The iterator
+ *
+ * This function must be called to destroy iterators initialized with
+ * media_pipeline_entity_iter_init().
+ */
+void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter);
+
+struct media_entity *
+__media_pipeline_entity_iter_next(struct media_pipeline *pipe,
+ struct media_pipeline_entity_iter *iter,
+ struct media_entity *entity);
+
+/**
+ * media_pipeline_for_each_entity - Iterate on all entities in a media pipeline
+ * @pipe: The pipeline
+ * @iter: The iterator (struct media_pipeline_entity_iter)
+ * @entity: The iterator entity
+ *
+ * Iterate on all entities in a media pipeline. This is only valid after the
+ * pipeline has been built with media_pipeline_start() and before it gets
+ * destroyed with media_pipeline_stop(). The iterator must be initialized with
+ * media_pipeline_entity_iter_init() before iteration, and destroyed with
+ * media_pipeline_entity_iter_cleanup() after (including in code paths that
+ * break from the loop).
+ */
+#define media_pipeline_for_each_entity(pipe, iter, entity) \
+ for (entity = __media_pipeline_entity_iter_next((pipe), iter, NULL); \
+ entity != NULL; \
+ entity = __media_pipeline_entity_iter_next((pipe), iter, entity))
+
+/**
+ * media_pipeline_alloc_start - Mark a pipeline as streaming
+ * @pad: Starting pad
+ *
+ * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead
+ * of working on a given pipeline the function will use an existing pipeline if
+ * the pad is already part of a pipeline, or allocate a new pipeline.
+ *
+ * Calls to media_pipeline_alloc_start() must be matched with
+ * media_pipeline_stop().
+ */
+__must_check int media_pipeline_alloc_start(struct media_pad *pad);
/**
* media_devnode_create() - creates and initializes a device node interface
@@ -1011,7 +1307,6 @@ __must_check media_devnode_create(struct media_device *mdev,
* removed.
*/
void media_devnode_remove(struct media_intf_devnode *devnode);
-struct media_link *
/**
* media_create_intf_link() - creates a link between an entity and an interface
@@ -1042,6 +1337,7 @@ struct media_link *
* the interface and media_device_register_entity() should be called for the
* interface that will be part of the link.
*/
+struct media_link *
__must_check media_create_intf_link(struct media_entity *entity,
struct media_interface *intf,
u32 flags);
@@ -1102,4 +1398,53 @@ void media_remove_intf_links(struct media_interface *intf);
(((entity)->ops && (entity)->ops->operation) ? \
(entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD)
+/**
+ * media_create_ancillary_link() - create an ancillary link between two
+ * instances of &media_entity
+ *
+ * @primary: pointer to the primary &media_entity
+ * @ancillary: pointer to the ancillary &media_entity
+ *
+ * Create an ancillary link between two entities, indicating that they
+ * represent two connected pieces of hardware that form a single logical unit.
+ * A typical example is a camera lens controller being linked to the sensor that
+ * it is supporting.
+ *
+ * The function sets both MEDIA_LNK_FL_ENABLED and MEDIA_LNK_FL_IMMUTABLE for
+ * the new link.
+ */
+struct media_link *
+media_create_ancillary_link(struct media_entity *primary,
+ struct media_entity *ancillary);
+
+/**
+ * __media_entity_next_link() - Iterate through a &media_entity's links
+ *
+ * @entity: pointer to the &media_entity
+ * @link: pointer to a &media_link to hold the iterated values
+ * @link_type: one of the MEDIA_LNK_FL_LINK_TYPE flags
+ *
+ * Return the next link against an entity matching a specific link type. This
+ * allows iteration through an entity's links whilst guaranteeing all of the
+ * returned links are of the given type.
+ */
+struct media_link *__media_entity_next_link(struct media_entity *entity,
+ struct media_link *link,
+ unsigned long link_type);
+
+/**
+ * for_each_media_entity_data_link() - Iterate through an entity's data links
+ *
+ * @entity: pointer to the &media_entity
+ * @link: pointer to a &media_link to hold the iterated values
+ *
+ * Iterate over a &media_entity's data links
+ */
+#define for_each_media_entity_data_link(entity, link) \
+ for (link = __media_entity_next_link(entity, NULL, \
+ MEDIA_LNK_FL_DATA_LINK); \
+ link; \
+ link = __media_entity_next_link(entity, link, \
+ MEDIA_LNK_FL_DATA_LINK))
+
#endif
diff --git a/include/media/mipi-csi2.h b/include/media/mipi-csi2.h
new file mode 100644
index 000000000000..40fc0264250d
--- /dev/null
+++ b/include/media/mipi-csi2.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MIPI CSI-2 Data Types
+ *
+ * Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef _MEDIA_MIPI_CSI2_H
+#define _MEDIA_MIPI_CSI2_H
+
+/* Short packet data types */
+#define MIPI_CSI2_DT_FS 0x00
+#define MIPI_CSI2_DT_FE 0x01
+#define MIPI_CSI2_DT_LS 0x02
+#define MIPI_CSI2_DT_LE 0x03
+#define MIPI_CSI2_DT_GENERIC_SHORT(n) (0x08 + (n)) /* 0..7 */
+
+/* Long packet data types */
+#define MIPI_CSI2_DT_NULL 0x10
+#define MIPI_CSI2_DT_BLANKING 0x11
+#define MIPI_CSI2_DT_EMBEDDED_8B 0x12
+#define MIPI_CSI2_DT_GENERIC_LONG(n) (0x13 + (n) - 1) /* 1..4 */
+#define MIPI_CSI2_DT_YUV420_8B 0x18
+#define MIPI_CSI2_DT_YUV420_10B 0x19
+#define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a
+#define MIPI_CSI2_DT_YUV420_8B_CS 0x1c
+#define MIPI_CSI2_DT_YUV420_10B_CS 0x1d
+#define MIPI_CSI2_DT_YUV422_8B 0x1e
+#define MIPI_CSI2_DT_YUV422_10B 0x1f
+#define MIPI_CSI2_DT_RGB444 0x20
+#define MIPI_CSI2_DT_RGB555 0x21
+#define MIPI_CSI2_DT_RGB565 0x22
+#define MIPI_CSI2_DT_RGB666 0x23
+#define MIPI_CSI2_DT_RGB888 0x24
+#define MIPI_CSI2_DT_RAW28 0x26
+#define MIPI_CSI2_DT_RAW24 0x27
+#define MIPI_CSI2_DT_RAW6 0x28
+#define MIPI_CSI2_DT_RAW7 0x29
+#define MIPI_CSI2_DT_RAW8 0x2a
+#define MIPI_CSI2_DT_RAW10 0x2b
+#define MIPI_CSI2_DT_RAW12 0x2c
+#define MIPI_CSI2_DT_RAW14 0x2d
+#define MIPI_CSI2_DT_RAW16 0x2e
+#define MIPI_CSI2_DT_RAW20 0x2f
+#define MIPI_CSI2_DT_USER_DEFINED(n) (0x30 + (n)) /* 0..7 */
+
+#endif /* _MEDIA_MIPI_CSI2_H */
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
deleted file mode 100644
index 6601455b3d5e..000000000000
--- a/include/media/mpeg2-ctrls.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the MPEG2 state controls for use with stateless MPEG-2
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _MPEG2_CTRLS_H_
-#define _MPEG2_CTRLS_H_
-
-#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
-#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
-
-/* enum v4l2_ctrl_type type values */
-#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103
-#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104
-
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
-
-struct v4l2_mpeg2_sequence {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
- __u16 horizontal_size;
- __u16 vertical_size;
- __u32 vbv_buffer_size;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
- __u16 profile_and_level_indication;
- __u8 progressive_sequence;
- __u8 chroma_format;
-};
-
-struct v4l2_mpeg2_picture {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
- __u8 picture_coding_type;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
- __u8 f_code[2][2];
- __u8 intra_dc_precision;
- __u8 picture_structure;
- __u8 top_field_first;
- __u8 frame_pred_frame_dct;
- __u8 concealment_motion_vectors;
- __u8 q_scale_type;
- __u8 intra_vlc_format;
- __u8 alternate_scan;
- __u8 repeat_first_field;
- __u16 progressive_frame;
-};
-
-struct v4l2_ctrl_mpeg2_slice_params {
- __u32 bit_size;
- __u32 data_bit_offset;
- __u64 backward_ref_ts;
- __u64 forward_ref_ts;
-
- struct v4l2_mpeg2_sequence sequence;
- struct v4l2_mpeg2_picture picture;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
- __u32 quantiser_scale_code;
-};
-
-struct v4l2_ctrl_mpeg2_quantization {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
- __u8 load_intra_quantiser_matrix;
- __u8 load_non_intra_quantiser_matrix;
- __u8 load_chroma_intra_quantiser_matrix;
- __u8 load_chroma_non_intra_quantiser_matrix;
-
- __u8 intra_quantiser_matrix[64];
- __u8 non_intra_quantiser_matrix[64];
- __u8 chroma_intra_quantiser_matrix[64];
- __u8 chroma_non_intra_quantiser_matrix[64];
-};
-
-#endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index d3f85df64bb2..803349599c27 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -59,7 +59,6 @@ enum rc_filter_type {
* @rc: rcdev for this lirc chardev
* @carrier_low: when setting the carrier range, first the low end must be
* set with an ioctl and then the high end with another ioctl
- * @send_timeout_reports: report timeouts in lirc raw IR.
* @rawir: queue for incoming raw IR
* @scancodes: queue for incoming decoded scancodes
* @wait_poll: poll struct for lirc device
@@ -72,7 +71,6 @@ struct lirc_fh {
struct list_head list;
struct rc_dev *rc;
int carrier_low;
- bool send_timeout_reports;
DECLARE_KFIFO_PTR(rawir, unsigned int);
DECLARE_KFIFO_PTR(scancodes, struct lirc_scancode);
wait_queue_head_t wait_poll;
@@ -128,13 +126,11 @@ struct lirc_fh {
* @timeout: optional time after which device stops sending data
* @min_timeout: minimum timeout supported by device
* @max_timeout: maximum timeout supported by device
- * @rx_resolution : resolution (in ns) of input sampler
- * @tx_resolution: resolution (in ns) of output sampler
+ * @rx_resolution : resolution (in us) of input sampler
+ * @tx_resolution: resolution (in us) of output sampler
* @lirc_dev: lirc device
* @lirc_cdev: lirc char cdev
- * @gap_start: time when gap starts
- * @gap_duration: duration of initial gap
- * @gap: true if we're in a gap
+ * @gap_start: start time for gap after timeout if non-zero
* @lirc_fh_lock: protects lirc_fh list
* @lirc_fh: list of open files
* @registered: set to true by rc_register_device(), false by
@@ -151,13 +147,13 @@ struct lirc_fh {
* @tx_ir: transmit IR
* @s_idle: enable/disable hardware idle mode, upon which,
* device doesn't interrupt host until it sees IR pulses
- * @s_learning_mode: enable wide band receiver used for learning
+ * @s_wideband_receiver: enable wide band receiver used for learning
* @s_carrier_report: enable carrier reports
* @s_filter: set the scancode filter
* @s_wakeup_filter: set the wakeup scancode filter. If the mask is zero
* then wakeup should be disabled. wakeup_protocol will be set to
* a valid protocol if mask is nonzero.
- * @s_timeout: set hardware timeout in ns
+ * @s_timeout: set hardware timeout in us
*/
struct rc_dev {
struct device dev;
@@ -203,8 +199,6 @@ struct rc_dev {
struct device lirc_dev;
struct cdev lirc_cdev;
ktime_t gap_start;
- u64 gap_duration;
- bool gap;
spinlock_t lirc_fh_lock;
struct list_head lirc_fh;
#endif
@@ -218,7 +212,7 @@ struct rc_dev {
int (*s_rx_carrier_range)(struct rc_dev *dev, u32 min, u32 max);
int (*tx_ir)(struct rc_dev *dev, unsigned *txbuf, unsigned n);
void (*s_idle)(struct rc_dev *dev, bool enable);
- int (*s_learning_mode)(struct rc_dev *dev, int enable);
+ int (*s_wideband_receiver)(struct rc_dev *dev, int enable);
int (*s_carrier_report) (struct rc_dev *dev, int enable);
int (*s_filter)(struct rc_dev *dev,
struct rc_scancode_filter *filter);
@@ -304,16 +298,16 @@ struct ir_raw_event {
u8 duty_cycle;
unsigned pulse:1;
- unsigned reset:1;
+ unsigned overflow:1;
unsigned timeout:1;
unsigned carrier_report:1;
};
-#define IR_DEFAULT_TIMEOUT MS_TO_NS(125)
-#define IR_MAX_DURATION 500000000 /* 500 ms */
#define US_TO_NS(usec) ((usec) * 1000)
#define MS_TO_US(msec) ((msec) * 1000)
-#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
+#define IR_MAX_DURATION MS_TO_US(500)
+#define IR_DEFAULT_TIMEOUT MS_TO_US(125)
+#define IR_MAX_TIMEOUT LIRC_VALUE_MASK
void ir_raw_event_handle(struct rc_dev *dev);
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -327,9 +321,9 @@ int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max);
int ir_raw_encode_carrier(enum rc_proto protocol);
-static inline void ir_raw_event_reset(struct rc_dev *dev)
+static inline void ir_raw_event_overflow(struct rc_dev *dev)
{
- ir_raw_event_store(dev, &((struct ir_raw_event) { .reset = true }));
+ ir_raw_event_store(dev, &((struct ir_raw_event) { .overflow = true }));
dev->idle = true;
ir_raw_event_handle(dev);
}
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 7dbb91c601a7..4676545ffd8f 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -175,6 +175,13 @@ struct rc_map_list {
struct rc_map map;
};
+#ifdef CONFIG_MEDIA_CEC_RC
+/*
+ * rc_map_list from rc-cec.c
+ */
+extern struct rc_map_list cec_map;
+#endif
+
/* Routines from rc-map.c */
/**
@@ -218,12 +225,14 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_AVERTV_303 "rc-avertv-303"
#define RC_MAP_AZUREWAVE_AD_TU700 "rc-azurewave-ad-tu700"
#define RC_MAP_BEELINK_GS1 "rc-beelink-gs1"
+#define RC_MAP_BEELINK_MXIII "rc-beelink-mxiii"
#define RC_MAP_BEHOLD "rc-behold"
#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus"
#define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old"
#define RC_MAP_CEC "rc-cec"
#define RC_MAP_CINERGY "rc-cinergy"
#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
+#define RC_MAP_CT_90405 "rc-ct-90405"
#define RC_MAP_D680_DMB "rc-d680-dmb"
#define RC_MAP_DELOCK_61959 "rc-delock-61959"
#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec"
@@ -233,6 +242,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
+#define RC_MAP_DREAMBOX "rc-dreambox"
#define RC_MAP_DTT200U "rc-dtt200u"
#define RC_MAP_DVBSKY "rc-dvbsky"
#define RC_MAP_DVICO_MCE "rc-dvico-mce"
@@ -263,15 +273,19 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_IT913X_V2 "rc-it913x-v2"
#define RC_MAP_KAIOMY "rc-kaiomy"
#define RC_MAP_KHADAS "rc-khadas"
+#define RC_MAP_KHAMSIN "rc-khamsin"
#define RC_MAP_KWORLD_315U "rc-kworld-315u"
#define RC_MAP_KWORLD_PC150U "rc-kworld-pc150u"
#define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog"
#define RC_MAP_LEADTEK_Y04G0051 "rc-leadtek-y04g0051"
#define RC_MAP_LME2510 "rc-lme2510"
#define RC_MAP_MANLI "rc-manli"
+#define RC_MAP_MECOOL_KII_PRO "rc-mecool-kii-pro"
+#define RC_MAP_MECOOL_KIII_PRO "rc-mecool-kiii-pro"
#define RC_MAP_MEDION_X10 "rc-medion-x10"
#define RC_MAP_MEDION_X10_DIGITAINER "rc-medion-x10-digitainer"
#define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x"
+#define RC_MAP_MINIX_NEO "rc-minix-neo"
#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii"
#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii"
#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere"
@@ -282,6 +296,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_NPGTECH "rc-npgtech"
#define RC_MAP_ODROID "rc-odroid"
#define RC_MAP_PCTV_SEDNA "rc-pctv-sedna"
+#define RC_MAP_PINE64 "rc-pine64"
#define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color"
#define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey"
#define RC_MAP_PINNACLE_PCTV_HD "rc-pinnacle-pctv-hd"
@@ -300,7 +315,6 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
#define RC_MAP_STREAMZAP "rc-streamzap"
#define RC_MAP_SU3000 "rc-su3000"
-#define RC_MAP_TANGO "rc-tango"
#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini"
#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max"
#define RC_MAP_TBS_NEC "rc-tbs-nec"
@@ -329,6 +343,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_WINFAST "rc-winfast"
#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe"
#define RC_MAP_X96MAX "rc-x96max"
+#define RC_MAP_XBOX_360 "rc-xbox-360"
#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_ZX_IRDEC "rc-zx-irdec"
diff --git a/include/media/tpg/v4l2-tpg.h b/include/media/tpg/v4l2-tpg.h
index 0b0ddb87380e..a55088921d1d 100644
--- a/include/media/tpg/v4l2-tpg.h
+++ b/include/media/tpg/v4l2-tpg.h
@@ -210,6 +210,7 @@ struct tpg_data {
bool show_square;
bool insert_sav;
bool insert_eav;
+ bool insert_hdmi_video_guard_band;
/* Test pattern movement */
enum tpg_move_mode mv_hor_mode;
@@ -325,6 +326,7 @@ static inline void tpg_s_saturation(struct tpg_data *tpg,
static inline void tpg_s_hue(struct tpg_data *tpg,
s16 hue)
{
+ hue = clamp_t(s16, hue, -128, 128);
if (tpg->hue == hue)
return;
tpg->hue = hue;
@@ -590,6 +592,21 @@ static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav)
tpg->insert_eav = insert_eav;
}
+/*
+ * This inserts 4 pixels of the RGB color 0xab55ab at the left hand side of the
+ * image. This is only done for 3 or 4 byte RGB pixel formats. This pixel value
+ * equals the Video Guard Band value as defined by HDMI (see section 5.2.2.1
+ * in the HDMI 1.3 Specification) that preceeds the first actual pixel. If the
+ * HDMI receiver doesn't handle this correctly, then it might keep skipping
+ * these Video Guard Band patterns and end up with a shorter video line. So this
+ * is a nice pattern to test with.
+ */
+static inline void tpg_s_insert_hdmi_video_guard_band(struct tpg_data *tpg,
+ bool insert_hdmi_video_guard_band)
+{
+ tpg->insert_hdmi_video_guard_band = insert_hdmi_video_guard_band;
+}
+
void tpg_update_mv_step(struct tpg_data *tpg);
static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg,
diff --git a/include/media/tuner.h b/include/media/tuner.h
index ff85d7227f91..a7796e0a3659 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -132,6 +132,7 @@
#define TUNER_SONY_BTF_PG472Z 89 /* PAL+SECAM */
#define TUNER_SONY_BTF_PK467Z 90 /* NTSC_JP */
#define TUNER_SONY_BTF_PB463Z 91 /* NTSC */
+#define TUNER_SI2157 92
/* tv card specific */
#define TDA9887_PRESENT (1<<0)
diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h
index b56eaee82aa5..f37c9b15ffdb 100644
--- a/include/media/tveeprom.h
+++ b/include/media/tveeprom.h
@@ -5,7 +5,7 @@
* eeproms.
*/
-#include <linux/if_ether.h>
+#include <uapi/linux/if_ether.h>
/**
* enum tveeprom_audio_processor - Specifies the type of audio processor
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 8319284c93cb..9bd326d31181 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
+struct dentry;
struct device;
struct device_node;
struct v4l2_device;
@@ -21,97 +22,85 @@ struct v4l2_async_notifier;
* enum v4l2_async_match_type - type of asynchronous subdevice logic to be used
* in order to identify a match
*
- * @V4L2_ASYNC_MATCH_CUSTOM: Match will use the logic provided by &struct
- * v4l2_async_subdev.match ops
- * @V4L2_ASYNC_MATCH_DEVNAME: Match will use the device name
- * @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address
- * @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node
+ * @V4L2_ASYNC_MATCH_TYPE_I2C: Match will check for I2C adapter ID and address
+ * @V4L2_ASYNC_MATCH_TYPE_FWNODE: Match will use firmware node
*
- * This enum is used by the asyncrhronous sub-device logic to define the
+ * This enum is used by the asynchronous connection logic to define the
* algorithm that will be used to match an asynchronous device.
*/
enum v4l2_async_match_type {
- V4L2_ASYNC_MATCH_CUSTOM,
- V4L2_ASYNC_MATCH_DEVNAME,
- V4L2_ASYNC_MATCH_I2C,
- V4L2_ASYNC_MATCH_FWNODE,
+ V4L2_ASYNC_MATCH_TYPE_I2C,
+ V4L2_ASYNC_MATCH_TYPE_FWNODE,
};
/**
- * struct v4l2_async_subdev - sub-device descriptor, as known to a bridge
- *
- * @match_type: type of match that will be used
- * @match: union of per-bus type matching data sets
- * @match.fwnode:
- * pointer to &struct fwnode_handle to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_FWNODE.
- * @match.device_name:
- * string containing the device name to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_DEVNAME.
- * @match.i2c: embedded struct with I2C parameters to be matched.
+ * struct v4l2_async_match_desc - async connection match information
+ *
+ * @type: type of match that will be used
+ * @fwnode: pointer to &struct fwnode_handle to be matched.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_FWNODE.
+ * @i2c: embedded struct with I2C parameters to be matched.
* Both @match.i2c.adapter_id and @match.i2c.address
* should be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.i2c.adapter_id:
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
+ * @i2c.adapter_id:
* I2C adapter ID to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.i2c.address:
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
+ * @i2c.address:
* I2C address to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.custom:
- * Driver-specific match criteria.
- * Used if @match_type is %V4L2_ASYNC_MATCH_CUSTOM.
- * @match.custom.match:
- * Driver-specific match function to be used if
- * %V4L2_ASYNC_MATCH_CUSTOM.
- * @match.custom.priv:
- * Driver-specific private struct with match parameters
- * to be used if %V4L2_ASYNC_MATCH_CUSTOM.
- * @asd_list: used to add struct v4l2_async_subdev objects to the
- * master notifier @asd_list
- * @list: used to link struct v4l2_async_subdev objects, waiting to be
- * probed, to a notifier->waiting list
- *
- * When this struct is used as a member in a driver specific struct,
- * the driver specific struct shall contain the &struct
- * v4l2_async_subdev as its first member.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
*/
-struct v4l2_async_subdev {
- enum v4l2_async_match_type match_type;
+struct v4l2_async_match_desc {
+ enum v4l2_async_match_type type;
union {
struct fwnode_handle *fwnode;
- const char *device_name;
struct {
int adapter_id;
unsigned short address;
} i2c;
- struct {
- bool (*match)(struct device *dev,
- struct v4l2_async_subdev *sd);
- void *priv;
- } custom;
- } match;
+ };
+};
- /* v4l2-async core private: not to be used by drivers */
- struct list_head list;
- struct list_head asd_list;
+/**
+ * struct v4l2_async_connection - sub-device connection descriptor, as known to
+ * a bridge
+ *
+ * @match: struct of match type and per-bus type matching data sets
+ * @notifier: the async notifier the connection is related to
+ * @asc_entry: used to add struct v4l2_async_connection objects to the
+ * notifier @waiting_list or @done_list
+ * @asc_subdev_entry: entry in struct v4l2_async_subdev.asc_list list
+ * @sd: the related sub-device
+ *
+ * When this struct is used as a member in a driver specific struct, the driver
+ * specific struct shall contain the &struct v4l2_async_connection as its first
+ * member.
+ */
+struct v4l2_async_connection {
+ struct v4l2_async_match_desc match;
+ struct v4l2_async_notifier *notifier;
+ struct list_head asc_entry;
+ struct list_head asc_subdev_entry;
+ struct v4l2_subdev *sd;
};
/**
* struct v4l2_async_notifier_operations - Asynchronous V4L2 notifier operations
- * @bound: a subdevice driver has successfully probed one of the subdevices
- * @complete: All subdevices have been probed successfully. The complete
+ * @bound: a sub-device has been bound by the given connection
+ * @complete: All connections have been bound successfully. The complete
* callback is only executed for the root notifier.
* @unbind: a subdevice is leaving
+ * @destroy: the asc is about to be freed
*/
struct v4l2_async_notifier_operations {
int (*bound)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
+ struct v4l2_async_connection *asc);
int (*complete)(struct v4l2_async_notifier *notifier);
void (*unbind)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
+ struct v4l2_async_connection *asc);
+ void (*destroy)(struct v4l2_async_connection *asc);
};
/**
@@ -121,174 +110,199 @@ struct v4l2_async_notifier_operations {
* @v4l2_dev: v4l2_device of the root notifier, NULL otherwise
* @sd: sub-device that registered the notifier, NULL otherwise
* @parent: parent notifier
- * @asd_list: master list of struct v4l2_async_subdev
- * @waiting: list of struct v4l2_async_subdev, waiting for their drivers
- * @done: list of struct v4l2_subdev, already probed
- * @list: member in a global list of notifiers
+ * @waiting_list: list of struct v4l2_async_connection, waiting for their
+ * drivers
+ * @done_list: list of struct v4l2_subdev, already probed
+ * @notifier_entry: member in a global list of notifiers
*/
struct v4l2_async_notifier {
const struct v4l2_async_notifier_operations *ops;
struct v4l2_device *v4l2_dev;
struct v4l2_subdev *sd;
struct v4l2_async_notifier *parent;
- struct list_head asd_list;
- struct list_head waiting;
- struct list_head done;
- struct list_head list;
+ struct list_head waiting_list;
+ struct list_head done_list;
+ struct list_head notifier_entry;
};
/**
- * v4l2_async_notifier_init - Initialize a notifier.
+ * struct v4l2_async_subdev_endpoint - Entry in sub-device's fwnode list
+ *
+ * @async_subdev_endpoint_entry: An entry in async_subdev_endpoint_list of
+ * &struct v4l2_subdev
+ * @endpoint: Endpoint fwnode agains which to match the sub-device
+ */
+struct v4l2_async_subdev_endpoint {
+ struct list_head async_subdev_endpoint_entry;
+ struct fwnode_handle *endpoint;
+};
+
+/**
+ * v4l2_async_debug_init - Initialize debugging tools.
+ *
+ * @debugfs_dir: pointer to the parent debugfs &struct dentry
+ */
+void v4l2_async_debug_init(struct dentry *debugfs_dir);
+
+/**
+ * v4l2_async_nf_init - Initialize a notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
+ * @v4l2_dev: pointer to &struct v4l2_device
*
- * This function initializes the notifier @asd_list. It must be called
- * before the first call to @v4l2_async_notifier_add_subdev.
+ * This function initializes the notifier @asc_entry. It must be called
+ * before adding a subdevice to a notifier, using one of:
+ * v4l2_async_nf_add_fwnode_remote(),
+ * v4l2_async_nf_add_fwnode() or
+ * v4l2_async_nf_add_i2c().
*/
-void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev);
/**
- * v4l2_async_notifier_add_subdev - Add an async subdev to the
- * notifier's master asd list.
+ * v4l2_async_subdev_nf_init - Initialize a sub-device notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @asd: pointer to &struct v4l2_async_subdev
+ * @sd: pointer to &struct v4l2_subdev
*
- * Call this function before registering a notifier to link the
- * provided asd to the notifiers master @asd_list.
+ * This function initializes the notifier @asc_list. It must be called
+ * before adding a subdevice to a notifier, using one of:
+ * v4l2_async_nf_add_fwnode_remote(), v4l2_async_nf_add_fwnode() or
+ * v4l2_async_nf_add_i2c().
*/
-int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd);
+void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd);
+struct v4l2_async_connection *
+__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asc_struct_size);
/**
- * v4l2_async_notifier_add_fwnode_subdev - Allocate and add a fwnode async
- * subdev to the notifier's master asd_list.
+ * v4l2_async_nf_add_fwnode - Allocate and add a fwnode async
+ * subdev to the notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @fwnode: fwnode handle of the sub-device to be matched
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- *
- * Allocate a fwnode-matched asd of size asd_struct_size, and add it to the
- * notifiers @asd_list. The function also gets a reference of the fwnode which
+ * @fwnode: fwnode handle of the sub-device to be matched, pointer to
+ * &struct fwnode_handle
+ * @type: Type of the driver's async sub-device or connection struct. The
+ * &struct v4l2_async_connection shall be the first member of the
+ * driver's async struct, i.e. both begin at the same memory address.
+ *
+ * Allocate a fwnode-matched asc of size asc_struct_size, and add it to the
+ * notifiers @asc_list. The function also gets a reference of the fwnode which
* is released later at notifier cleanup time.
*/
-struct v4l2_async_subdev *
-v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
- struct fwnode_handle *fwnode,
- unsigned int asd_struct_size);
+#define v4l2_async_nf_add_fwnode(notifier, fwnode, type) \
+ ((type *)__v4l2_async_nf_add_fwnode(notifier, fwnode, sizeof(type)))
+struct v4l2_async_connection *
+__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ unsigned int asc_struct_size);
/**
- * v4l2_async_notifier_add_fwnode_remote_subdev - Allocate and add a fwnode
+ * v4l2_async_nf_add_fwnode_remote - Allocate and add a fwnode
* remote async subdev to the
- * notifier's master asd_list.
+ * notifier's master asc_list.
*
- * @notif: pointer to &struct v4l2_async_notifier
- * @endpoint: local endpoint pointing to the remote sub-device to be matched
- * @asd: Async sub-device struct allocated by the caller. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @notifier: pointer to &struct v4l2_async_notifier
+ * @ep: local endpoint pointing to the remote connection to be matched,
+ * pointer to &struct fwnode_handle
+ * @type: Type of the driver's async connection struct. The &struct
+ * v4l2_async_connection shall be the first member of the driver's async
+ * connection struct, i.e. both begin at the same memory address.
*
* Gets the remote endpoint of a given local endpoint, set it up for fwnode
- * matching and adds the async sub-device to the notifier's @asd_list. The
+ * matching and adds the async connection to the notifier's @asc_list. The
* function also gets a reference of the fwnode which is released later at
* notifier cleanup time.
*
- * This is just like @v4l2_async_notifier_add_fwnode_subdev, but with the
- * exception that the fwnode refers to a local endpoint, not the remote one, and
- * the function relies on the caller to allocate the async sub-device struct.
+ * This is just like v4l2_async_nf_add_fwnode(), but with the
+ * exception that the fwnode refers to a local endpoint, not the remote one.
*/
-int
-v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
- struct fwnode_handle *endpoint,
- struct v4l2_async_subdev *asd);
+#define v4l2_async_nf_add_fwnode_remote(notifier, ep, type) \
+ ((type *)__v4l2_async_nf_add_fwnode_remote(notifier, ep, sizeof(type)))
+struct v4l2_async_connection *
+__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier,
+ int adapter_id, unsigned short address,
+ unsigned int asc_struct_size);
/**
- * v4l2_async_notifier_add_i2c_subdev - Allocate and add an i2c async
- * subdev to the notifier's master asd_list.
+ * v4l2_async_nf_add_i2c - Allocate and add an i2c async
+ * subdev to the notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @adapter_id: I2C adapter ID to be matched
- * @address: I2C address of sub-device to be matched
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- *
- * Same as above but for I2C matched sub-devices.
+ * @adapter: I2C adapter ID to be matched
+ * @address: I2C address of connection to be matched
+ * @type: Type of the driver's async connection struct. The &struct
+ * v4l2_async_connection shall be the first member of the driver's async
+ * connection struct, i.e. both begin at the same memory address.
+ *
+ * Same as v4l2_async_nf_add_fwnode() but for I2C matched
+ * connections.
*/
-struct v4l2_async_subdev *
-v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
- int adapter_id, unsigned short address,
- unsigned int asd_struct_size);
+#define v4l2_async_nf_add_i2c(notifier, adapter, address, type) \
+ ((type *)__v4l2_async_nf_add_i2c(notifier, adapter, address, \
+ sizeof(type)))
/**
- * v4l2_async_notifier_add_devname_subdev - Allocate and add a device-name
- * async subdev to the notifier's master asd_list.
+ * v4l2_async_subdev_endpoint_add - Add an endpoint fwnode to async sub-device
+ * matching list
*
- * @notifier: pointer to &struct v4l2_async_notifier
- * @device_name: device name string to be matched
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- *
- * Same as above but for device-name matched sub-devices.
+ * @sd: the sub-device
+ * @fwnode: the endpoint fwnode to match
+ *
+ * Add a fwnode to the async sub-device's matching list. This allows registering
+ * multiple async sub-devices from a single device.
+ *
+ * Note that calling v4l2_subdev_cleanup() as part of the sub-device's cleanup
+ * if endpoints have been added to the sub-device's fwnode matching list.
+ *
+ * Returns an error on failure, 0 on success.
*/
-struct v4l2_async_subdev *
-v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
- const char *device_name,
- unsigned int asd_struct_size);
+int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd,
+ struct fwnode_handle *fwnode);
/**
- * v4l2_async_notifier_register - registers a subdevice asynchronous notifier
+ * v4l2_async_connection_unique - return a unique &struct v4l2_async_connection
+ * for a sub-device
+ * @sd: the sub-device
*
- * @v4l2_dev: pointer to &struct v4l2_device
- * @notifier: pointer to &struct v4l2_async_notifier
+ * Return an async connection for a sub-device, when there is a single
+ * one only.
*/
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier);
+struct v4l2_async_connection *
+v4l2_async_connection_unique(struct v4l2_subdev *sd);
/**
- * v4l2_async_subdev_notifier_register - registers a subdevice asynchronous
- * notifier for a sub-device
+ * v4l2_async_nf_register - registers a subdevice asynchronous notifier
*
- * @sd: pointer to &struct v4l2_subdev
* @notifier: pointer to &struct v4l2_async_notifier
*/
-int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_nf_register(struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_notifier_unregister - unregisters a subdevice
+ * v4l2_async_nf_unregister - unregisters a subdevice
* asynchronous notifier
*
* @notifier: pointer to &struct v4l2_async_notifier
*/
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_notifier_cleanup - clean up notifier resources
+ * v4l2_async_nf_cleanup - clean up notifier resources
* @notifier: the notifier the resources of which are to be cleaned up
*
* Release memory resources related to a notifier, including the async
- * sub-devices allocated for the purposes of the notifier but not the notifier
+ * connections allocated for the purposes of the notifier but not the notifier
* itself. The user is responsible for calling this function to clean up the
- * notifier after calling
- * @v4l2_async_notifier_add_subdev,
- * @v4l2_async_notifier_parse_fwnode_endpoints or
- * @v4l2_fwnode_reference_parse_sensor_common.
+ * notifier after calling v4l2_async_nf_add_fwnode_remote(),
+ * v4l2_async_nf_add_fwnode() or v4l2_async_nf_add_i2c().
*
- * There is no harm from calling v4l2_async_notifier_cleanup in other
+ * There is no harm from calling v4l2_async_nf_cleanup() in other
* cases as long as its memory has been zeroed after it has been
* allocated.
*/
-void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier);
/**
* v4l2_async_register_subdev - registers a sub-device to the asynchronous
@@ -299,16 +313,16 @@ void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier);
int v4l2_async_register_subdev(struct v4l2_subdev *sd);
/**
- * v4l2_async_register_subdev_sensor_common - registers a sensor sub-device to
- * the asynchronous sub-device
- * framework and parse set up common
- * sensor related devices
+ * v4l2_async_register_subdev_sensor - registers a sensor sub-device to the
+ * asynchronous sub-device framework and
+ * parse set up common sensor related
+ * devices
*
* @sd: pointer to struct &v4l2_subdev
*
* This function is just like v4l2_async_register_subdev() with the exception
* that calling it will also parse firmware interfaces for remote references
- * using v4l2_async_notifier_parse_fwnode_sensor_common() and registers the
+ * using v4l2_async_nf_parse_fwnode_sensor() and registers the
* async sub-devices. The sub-device is similarly unregistered by calling
* v4l2_async_unregister_subdev().
*
@@ -318,7 +332,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd);
* to register it.
*/
int __must_check
-v4l2_async_register_subdev_sensor_common(struct v4l2_subdev *sd);
+v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd);
/**
* v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous
diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
new file mode 100644
index 000000000000..4e96e90ee636
--- /dev/null
+++ b/include/media/v4l2-cci.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MIPI Camera Control Interface (CCI) register access helpers.
+ *
+ * Copyright (C) 2023 Hans de Goede <hansg@kernel.org>
+ */
+#ifndef _V4L2_CCI_H
+#define _V4L2_CCI_H
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct i2c_client;
+struct regmap;
+
+/**
+ * struct cci_reg_sequence - An individual write from a sequence of CCI writes
+ *
+ * @reg: Register address, use CCI_REG#() macros to encode reg width
+ * @val: Register value
+ *
+ * Register/value pairs for sequences of writes.
+ */
+struct cci_reg_sequence {
+ u32 reg;
+ u64 val;
+};
+
+/*
+ * Macros to define register address with the register width encoded
+ * into the higher bits.
+ */
+#define CCI_REG_ADDR_MASK GENMASK(15, 0)
+#define CCI_REG_WIDTH_SHIFT 16
+#define CCI_REG_WIDTH_MASK GENMASK(19, 16)
+/*
+ * Private CCI register flags, for the use of drivers.
+ */
+#define CCI_REG_PRIVATE_SHIFT 28U
+#define CCI_REG_PRIVATE_MASK GENMASK(31U, CCI_REG_PRIVATE_SHIFT)
+
+#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x)
+#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3)
+#define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x)
+#define CCI_REG_LE BIT(20)
+
+#define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG16_LE(x) (CCI_REG_LE | (2U << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG24_LE(x) (CCI_REG_LE | (3U << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG32_LE(x) (CCI_REG_LE | (4U << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG64_LE(x) (CCI_REG_LE | (8U << CCI_REG_WIDTH_SHIFT) | (x))
+
+/**
+ * cci_read() - Read a value from a single CCI register
+ *
+ * @map: Register map to read from
+ * @reg: Register address to read, use CCI_REG#() macros to encode reg width
+ * @val: Pointer to store read value
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the read will be skipped
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_read(struct regmap *map, u32 reg, u64 *val, int *err);
+
+/**
+ * cci_write() - Write a value to a single CCI register
+ *
+ * @map: Register map to write to
+ * @reg: Register address to write, use CCI_REG#() macros to encode reg width
+ * @val: Value to be written
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the write will be skipped
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_write(struct regmap *map, u32 reg, u64 val, int *err);
+
+/**
+ * cci_update_bits() - Perform a read/modify/write cycle on
+ * a single CCI register
+ *
+ * @map: Register map to update
+ * @reg: Register address to update, use CCI_REG#() macros to encode reg width
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the update will be skipped
+ *
+ * Note this uses read-modify-write to update the bits, atomicity with regards
+ * to other cci_*() register access functions is NOT guaranteed.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err);
+
+/**
+ * cci_multi_reg_write() - Write multiple registers to the device
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register-address, -value pairs to be
+ * written, register-addresses use CCI_REG#() macros to encode reg width
+ * @num_regs: Number of registers to write
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the write will be skipped
+ *
+ * Write multiple registers to the device where the set of register, value
+ * pairs are supplied in any order, possibly not all in a single range.
+ *
+ * Use of the CCI_REG#() macros to encode reg width is mandatory.
+ *
+ * For raw lists of register-address, -value pairs with only 8 bit
+ * wide writes regmap_multi_reg_write() can be used instead.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err);
+
+#if IS_ENABLED(CONFIG_V4L2_CCI_I2C)
+/**
+ * devm_cci_regmap_init_i2c() - Create regmap to use with cci_*() register
+ * access functions
+ *
+ * @client: i2c_client to create the regmap for
+ * @reg_addr_bits: register address width to use (8 or 16)
+ *
+ * Note the memory for the created regmap is devm() managed, tied to the client.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client,
+ int reg_addr_bits);
+#endif
+
+#endif
diff --git a/include/media/v4l2-clk.h b/include/media/v4l2-clk.h
deleted file mode 100644
index d9d21a43a834..000000000000
--- a/include/media/v4l2-clk.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * V4L2 clock service
- *
- * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * ATTENTION: This is a temporary API and it shall be replaced by the generic
- * clock API, when the latter becomes widely available.
- */
-
-#ifndef MEDIA_V4L2_CLK_H
-#define MEDIA_V4L2_CLK_H
-
-#include <linux/atomic.h>
-#include <linux/export.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-
-struct module;
-struct device;
-
-struct clk;
-struct v4l2_clk {
- struct list_head list;
- const struct v4l2_clk_ops *ops;
- const char *dev_id;
- int enable;
- struct mutex lock; /* Protect the enable count */
- atomic_t use_count;
- struct clk *clk;
- void *priv;
-};
-
-struct v4l2_clk_ops {
- struct module *owner;
- int (*enable)(struct v4l2_clk *clk);
- void (*disable)(struct v4l2_clk *clk);
- unsigned long (*get_rate)(struct v4l2_clk *clk);
- int (*set_rate)(struct v4l2_clk *clk, unsigned long);
-};
-
-struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
- const char *dev_name,
- void *priv);
-void v4l2_clk_unregister(struct v4l2_clk *clk);
-struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id);
-void v4l2_clk_put(struct v4l2_clk *clk);
-int v4l2_clk_enable(struct v4l2_clk *clk);
-void v4l2_clk_disable(struct v4l2_clk *clk);
-unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk);
-int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate);
-
-struct module;
-
-struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
- unsigned long rate, struct module *owner);
-void v4l2_clk_unregister_fixed(struct v4l2_clk *clk);
-
-static inline struct v4l2_clk *v4l2_clk_register_fixed(const char *dev_id,
- unsigned long rate)
-{
- return __v4l2_clk_register_fixed(dev_id, rate, THIS_MODULE);
-}
-
-#define V4L2_CLK_NAME_SIZE 64
-
-#define v4l2_clk_name_i2c(name, size, adap, client) snprintf(name, size, \
- "%d-%04x", adap, client)
-
-#define v4l2_clk_name_of(name, size, node) snprintf(name, size, \
- "of-%pOF", node)
-
-#endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 150ee16ebd81..63ad36f04f72 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -175,7 +175,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
*
* @sd: pointer to &struct v4l2_subdev
* @client: pointer to struct i2c_client
- * @devname: the name of the device; if NULL, the I²C device's name will be used
+ * @devname: the name of the device; if NULL, the I²C device drivers's name
+ * will be used
* @postfix: sub-device specific string to put right after the I²C device name;
* may be NULL
*/
@@ -277,13 +278,13 @@ static inline void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd)
*
*
* @v4l2_dev: pointer to &struct v4l2_device.
- * @master: pointer to struct spi_master.
+ * @ctlr: pointer to struct spi_controller.
* @info: pointer to struct spi_board_info.
*
* returns a &struct v4l2_subdev pointer.
*/
struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
- struct spi_master *master, struct spi_board_info *info);
+ struct spi_controller *ctlr, struct spi_board_info *info);
/**
* v4l2_spi_subdev_init - Initialize a v4l2_subdev with data from an
@@ -307,7 +308,7 @@ void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd);
static inline struct v4l2_subdev *
v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
- struct spi_master *master, struct spi_board_info *info)
+ struct spi_controller *ctlr, struct spi_board_info *info)
{
return NULL;
}
@@ -424,7 +425,7 @@ __v4l2_find_nearest_size(const void *array, size_t array_size,
/**
* v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by
- * calling the g_frame_interval op of the given subdev. It only works
+ * calling the get_frame_interval op of the given subdev. It only works
* for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the
* function name.
*
@@ -437,7 +438,7 @@ int v4l2_g_parm_cap(struct video_device *vdev,
/**
* v4l2_s_parm_cap - helper routine for vidioc_s_parm to fill this in by
- * calling the s_frame_interval op of the given subdev. It only works
+ * calling the set_frame_interval op of the given subdev. It only works
* for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the
* function name.
*
@@ -479,6 +480,7 @@ enum v4l2_pixel_encoding {
* @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4).
* @comp_planes: Number of component planes, which includes the alpha plane (1 to 4).
* @bpp: Array of per-plane bytes per pixel
+ * @bpp_div: Array of per-plane bytes per pixel divisors to support fractional pixel sizes.
* @hdiv: Horizontal chroma subsampling factor
* @vdiv: Vertical chroma subsampling factor
* @block_w: Per-plane macroblock pixel width (optional)
@@ -490,6 +492,7 @@ struct v4l2_format_info {
u8 mem_planes;
u8 comp_planes;
u8 bpp[4];
+ u8 bpp_div[4];
u8 hdiv;
u8 vdiv;
u8 block_w[4];
@@ -519,6 +522,57 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
u32 width, u32 height);
+/**
+ * v4l2_get_link_freq - Get link rate from transmitter
+ *
+ * @handler: The transmitter's control handler
+ * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on
+ * D-PHY, samples per clock on parallel. 0 otherwise.
+ * @div: The divisor between pixel rate and link frequency. Number of data lanes
+ * times two on D-PHY, 1 on parallel. 0 otherwise.
+ *
+ * This function is intended for obtaining the link frequency from the
+ * transmitter sub-devices. It returns the link rate, either from the
+ * V4L2_CID_LINK_FREQ control implemented by the transmitter, or value
+ * calculated based on the V4L2_CID_PIXEL_RATE implemented by the transmitter.
+ *
+ * Return:
+ * * >0: Link frequency
+ * * %-ENOENT: Link frequency or pixel rate control not found
+ * * %-EINVAL: Invalid link frequency value
+ */
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
+ unsigned int div);
+
+void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
+ unsigned int n_terms, unsigned int threshold);
+u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator);
+
+/**
+ * v4l2_link_freq_to_bitmap - Figure out platform-supported link frequencies
+ * @dev: The struct device
+ * @fw_link_freqs: Array of link frequencies from firmware
+ * @num_of_fw_link_freqs: Number of entries in @fw_link_freqs
+ * @driver_link_freqs: Array of link frequencies supported by the driver
+ * @num_of_driver_link_freqs: Number of entries in @driver_link_freqs
+ * @bitmap: Bitmap of driver-supported link frequencies found in @fw_link_freqs
+ *
+ * This function checks which driver-supported link frequencies are enabled in
+ * system firmware and sets the corresponding bits in @bitmap (after first
+ * zeroing it).
+ *
+ * Return:
+ * * %0: Success
+ * * %-ENOENT: No match found between driver-supported link frequencies and
+ * those available in firmware.
+ * * %-ENODATA: No link frequencies were specified in firmware.
+ */
+int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
+ unsigned int num_of_fw_link_freqs,
+ const s64 *driver_link_freqs,
+ unsigned int num_of_driver_link_freqs,
+ unsigned long *bitmap);
+
static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
{
/*
@@ -539,4 +593,33 @@ static inline void v4l2_buffer_set_timestamp(struct v4l2_buffer *buf,
buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
}
+static inline bool v4l2_is_colorspace_valid(__u32 colorspace)
+{
+ return colorspace > V4L2_COLORSPACE_DEFAULT &&
+ colorspace < V4L2_COLORSPACE_LAST;
+}
+
+static inline bool v4l2_is_xfer_func_valid(__u32 xfer_func)
+{
+ return xfer_func > V4L2_XFER_FUNC_DEFAULT &&
+ xfer_func < V4L2_XFER_FUNC_LAST;
+}
+
+static inline bool v4l2_is_ycbcr_enc_valid(__u8 ycbcr_enc)
+{
+ return ycbcr_enc > V4L2_YCBCR_ENC_DEFAULT &&
+ ycbcr_enc < V4L2_YCBCR_ENC_LAST;
+}
+
+static inline bool v4l2_is_hsv_enc_valid(__u8 hsv_enc)
+{
+ return hsv_enc == V4L2_HSV_ENC_180 || hsv_enc == V4L2_HSV_ENC_256;
+}
+
+static inline bool v4l2_is_quant_valid(__u8 quantization)
+{
+ return quantization == V4L2_QUANTIZATION_FULL_RANGE ||
+ quantization == V4L2_QUANTIZATION_LIM_RANGE;
+}
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index f40e2cbb21d3..59679a42b3e7 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -13,16 +13,6 @@
#include <linux/videodev2.h>
#include <media/media-request.h>
-/*
- * Include the stateless codec compound control definitions.
- * This will move to the public headers once this API is fully stable.
- */
-#include <media/mpeg2-ctrls.h>
-#include <media/fwht-ctrls.h>
-#include <media/h264-ctrls.h>
-#include <media/vp8-ctrls.h>
-#include <media/hevc-ctrls.h>
-
/* forward references */
struct file;
struct poll_table_struct;
@@ -43,19 +33,29 @@ struct video_device;
* @p_u16: Pointer to a 16-bit unsigned value.
* @p_u32: Pointer to a 32-bit unsigned value.
* @p_char: Pointer to a string.
- * @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure.
- * @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure.
+ * @p_mpeg2_sequence: Pointer to a MPEG2 sequence structure.
+ * @p_mpeg2_picture: Pointer to a MPEG2 picture structure.
+ * @p_mpeg2_quantisation: Pointer to a MPEG2 quantisation data structure.
* @p_fwht_params: Pointer to a FWHT stateless parameters structure.
* @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps.
* @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps.
* @p_h264_scaling_matrix: Pointer to a struct v4l2_ctrl_h264_scaling_matrix.
* @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params.
* @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params.
- * @p_vp8_frame_header: Pointer to a VP8 frame header structure.
+ * @p_h264_pred_weights: Pointer to a struct v4l2_ctrl_h264_pred_weights.
+ * @p_vp8_frame: Pointer to a VP8 frame params structure.
+ * @p_vp9_compressed_hdr_probs: Pointer to a VP9 frame compressed header probs structure.
+ * @p_vp9_frame: Pointer to a VP9 frame params structure.
* @p_hevc_sps: Pointer to an HEVC sequence parameter set structure.
* @p_hevc_pps: Pointer to an HEVC picture parameter set structure.
* @p_hevc_slice_params: Pointer to an HEVC slice parameters structure.
+ * @p_hdr10_cll: Pointer to an HDR10 Content Light Level structure.
+ * @p_hdr10_mastering: Pointer to an HDR10 Mastering Display structure.
* @p_area: Pointer to an area.
+ * @p_av1_sequence: Pointer to an AV1 sequence structure.
+ * @p_av1_tile_group_entry: Pointer to an AV1 tile group entry structure.
+ * @p_av1_frame: Pointer to an AV1 frame structure.
+ * @p_av1_film_grain: Pointer to an AV1 film grain structure.
* @p: Pointer to a compound value.
* @p_const: Pointer to a constant compound value.
*/
@@ -66,19 +66,29 @@ union v4l2_ctrl_ptr {
u16 *p_u16;
u32 *p_u32;
char *p_char;
- struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
- struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization;
+ struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
+ struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation;
struct v4l2_ctrl_fwht_params *p_fwht_params;
struct v4l2_ctrl_h264_sps *p_h264_sps;
struct v4l2_ctrl_h264_pps *p_h264_pps;
struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
struct v4l2_ctrl_h264_decode_params *p_h264_decode_params;
- struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights;
+ struct v4l2_ctrl_vp8_frame *p_vp8_frame;
struct v4l2_ctrl_hevc_sps *p_hevc_sps;
struct v4l2_ctrl_hevc_pps *p_hevc_pps;
struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs;
+ struct v4l2_ctrl_vp9_frame *p_vp9_frame;
+ struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll;
+ struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
struct v4l2_area *p_area;
+ struct v4l2_ctrl_av1_sequence *p_av1_sequence;
+ struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry;
+ struct v4l2_ctrl_av1_frame *p_av1_frame;
+ struct v4l2_ctrl_av1_film_grain *p_av1_film_grain;
void *p;
const void *p_const;
};
@@ -119,21 +129,19 @@ struct v4l2_ctrl_ops {
* struct v4l2_ctrl_type_ops - The control type operations that the driver
* has to provide.
*
- * @equal: return true if both values are equal.
- * @init: initialize the value.
+ * @equal: return true if all ctrl->elems array elements are equal.
+ * @init: initialize the value for array elements from from_idx to ctrl->elems.
* @log: log the value.
- * @validate: validate the value. Return 0 on success and a negative value
- * otherwise.
+ * @validate: validate the value for ctrl->new_elems array elements.
+ * Return 0 on success and a negative value otherwise.
*/
struct v4l2_ctrl_type_ops {
- bool (*equal)(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr1,
- union v4l2_ctrl_ptr ptr2);
- void (*init)(const struct v4l2_ctrl *ctrl, u32 idx,
+ bool (*equal)(const struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
+ void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx,
union v4l2_ctrl_ptr ptr);
void (*log)(const struct v4l2_ctrl *ctrl);
- int (*validate)(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr);
+ int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr);
};
/**
@@ -177,6 +185,8 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* and/or has type %V4L2_CTRL_TYPE_STRING. In other words, &struct
* v4l2_ext_control uses field p to point to the data.
* @is_array: If set, then this control contains an N-dimensional array.
+ * @is_dyn_array: If set, then this control contains a dynamically sized 1-dimensional array.
+ * If this is set, then @is_array is also set.
* @has_volatiles: If set, then one or more members of the cluster are volatile.
* Drivers should never touch this flag.
* @call_notify: If set, then call the handler's notify function whenever the
@@ -197,6 +207,9 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* @step: The control's step value for non-menu controls.
* @elems: The number of elements in the N-dimensional array.
* @elem_size: The size in bytes of the control.
+ * @new_elems: The number of elements in p_new. This is the same as @elems,
+ * except for dynamic arrays. In that case it is in the range of
+ * 1 to @p_array_alloc_elems.
* @dims: The size of each dimension.
* @nr_of_dims:The number of dimensions in @dims.
* @menu_skip_mask: The control's skip mask for menu controls. This makes it
@@ -215,15 +228,20 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* :math:`ceil(\frac{maximum - minimum}{step}) + 1`.
* Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU.
* @flags: The control's flags.
- * @cur: Structure to store the current value.
- * @cur.val: The control's current value, if the @type is represented via
- * a u32 integer (see &enum v4l2_ctrl_type).
- * @val: The control's new s32 value.
* @priv: The control's private pointer. For use by the driver. It is
* untouched by the control framework. Note that this pointer is
* not freed when the control is deleted. Should this be needed
* then a new internal bitfield can be added to tell the framework
* to free this pointer.
+ * @p_array: Pointer to the allocated array. Only valid if @is_array is true.
+ * @p_array_alloc_elems: The number of elements in the allocated
+ * array for both the cur and new values. So @p_array is actually
+ * sized for 2 * @p_array_alloc_elems * @elem_size. Only valid if
+ * @is_array is true.
+ * @cur: Structure to store the current value.
+ * @cur.val: The control's current value, if the @type is represented via
+ * a u32 integer (see &enum v4l2_ctrl_type).
+ * @val: The control's new s32 value.
* @p_def: The control's default value represented via a union which
* provides a standard way of accessing control types
* through a pointer (for compound controls only).
@@ -252,6 +270,7 @@ struct v4l2_ctrl {
unsigned int is_string:1;
unsigned int is_ptr:1;
unsigned int is_array:1;
+ unsigned int is_dyn_array:1;
unsigned int has_volatiles:1;
unsigned int call_notify:1;
unsigned int manual_mode_value:8;
@@ -264,6 +283,7 @@ struct v4l2_ctrl {
s64 minimum, maximum, default_value;
u32 elems;
u32 elem_size;
+ u32 new_elems;
u32 dims[V4L2_CTRL_MAX_DIMS];
u32 nr_of_dims;
union {
@@ -276,6 +296,8 @@ struct v4l2_ctrl {
};
unsigned long flags;
void *priv;
+ void *p_array;
+ u32 p_array_alloc_elems;
s32 val;
struct {
s32 val;
@@ -301,12 +323,24 @@ struct v4l2_ctrl {
* the control has been applied. This prevents applying controls
* from a cluster with multiple controls twice (when the first
* control of a cluster is applied, they all are).
- * @req: If set, this refers to another request that sets this control.
+ * @p_req_valid: If set, then p_req contains the control value for the request.
+ * @p_req_array_enomem: If set, then p_req is invalid since allocating space for
+ * an array failed. Attempting to read this value shall
+ * result in ENOMEM. Only valid if ctrl->is_array is true.
+ * @p_req_array_alloc_elems: The number of elements allocated for the
+ * array. Only valid if @p_req_valid and ctrl->is_array are
+ * true.
+ * @p_req_elems: The number of elements in @p_req. This is the same as
+ * ctrl->elems, except for dynamic arrays. In that case it is in
+ * the range of 1 to @p_req_array_alloc_elems. Only valid if
+ * @p_req_valid is true.
* @p_req: If the control handler containing this control reference
* is bound to a media request, then this points to the
- * value of the control that should be applied when the request
+ * value of the control that must be applied when the request
* is executed, or to the value of the control at the time
- * that the request was completed.
+ * that the request was completed. If @p_req_valid is false,
+ * then this control was never set for this request and the
+ * control will not be updated when this request is applied.
*
* Each control handler has a list of these refs. The list_head is used to
* keep a sorted-by-control-ID list of all controls, while the next pointer
@@ -319,7 +353,10 @@ struct v4l2_ctrl_ref {
struct v4l2_ctrl_helper *helper;
bool from_other_dev;
bool req_done;
- struct v4l2_ctrl_ref *req;
+ bool p_req_valid;
+ bool p_req_array_enomem;
+ u32 p_req_array_alloc_elems;
+ u32 p_req_elems;
union v4l2_ctrl_ptr p_req;
};
@@ -346,7 +383,7 @@ struct v4l2_ctrl_ref {
* @error: The error code of the first failed control addition.
* @request_is_queued: True if the request was queued.
* @requests: List to keep track of open control handler request objects.
- * For the parent control handler (@req_obj.req == NULL) this
+ * For the parent control handler (@req_obj.ops == NULL) this
* is the list header. When the parent control handler is
* removed, it has to unbind and put all these requests since
* they refer to the parent.
@@ -927,6 +964,59 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
}
/**
+ *__v4l2_ctrl_modify_dimensions() - Unlocked variant of v4l2_ctrl_modify_dimensions()
+ *
+ * @ctrl: The control to update.
+ * @dims: The control's new dimensions.
+ *
+ * Update the dimensions of an array control on the fly. The elements of the
+ * array are reset to their default value, even if the dimensions are
+ * unchanged.
+ *
+ * An error is returned if @dims is invalid for this control.
+ *
+ * The caller is responsible for acquiring the control handler mutex on behalf
+ * of __v4l2_ctrl_modify_dimensions().
+ *
+ * Note: calling this function when the same control is used in pending requests
+ * is untested. It should work (a request with the wrong size of the control
+ * will drop that control silently), but it will be very confusing.
+ */
+int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
+ u32 dims[V4L2_CTRL_MAX_DIMS]);
+
+/**
+ * v4l2_ctrl_modify_dimensions() - Update the dimensions of an array control.
+ *
+ * @ctrl: The control to update.
+ * @dims: The control's new dimensions.
+ *
+ * Update the dimensions of an array control on the fly. The elements of the
+ * array are reset to their default value, even if the dimensions are
+ * unchanged.
+ *
+ * An error is returned if @dims is invalid for this control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ *
+ * Note: calling this function when the same control is used in pending requests
+ * is untested. It should work (a request with the wrong size of the control
+ * will drop that control silently), but it will be very confusing.
+ */
+static inline int v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
+ u32 dims[V4L2_CTRL_MAX_DIMS])
+{
+ int rval;
+
+ v4l2_ctrl_lock(ctrl);
+ rval = __v4l2_ctrl_modify_dimensions(ctrl, dims);
+ v4l2_ctrl_unlock(ctrl);
+
+ return rval;
+}
+
+/**
* v4l2_ctrl_notify() - Function to set a notify callback for a control.
*
* @ctrl: The control.
@@ -1261,7 +1351,7 @@ void v4l2_ctrl_request_complete(struct media_request *req,
* @parent: The parent control handler ('priv' in media_request_object_find())
*
* This function finds the control handler in the request. It may return
- * NULL if not found. When done, you must call v4l2_ctrl_request_put_hdl()
+ * NULL if not found. When done, you must call v4l2_ctrl_request_hdl_put()
* with the returned handler pointer.
*
* If the request is not in state VALIDATING or QUEUED, then this function
@@ -1290,7 +1380,7 @@ static inline void v4l2_ctrl_request_hdl_put(struct v4l2_ctrl_handler *hdl)
}
/**
- * v4l2_ctrl_request_ctrl_find() - Find a control with the given ID.
+ * v4l2_ctrl_request_hdl_ctrl_find() - Find a control with the given ID.
*
* @hdl: The control handler from the request.
* @id: The ID of the control to find.
@@ -1454,4 +1544,48 @@ int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd);
int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ctrl_ops,
const struct v4l2_fwnode_device_properties *p);
+
+/**
+ * v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ * @ptr1: A v4l2 control value.
+ * @ptr2: A v4l2 control value.
+ *
+ * Return: true if values are equal, otherwise false.
+ */
+bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
+
+/**
+ * v4l2_ctrl_type_op_init - Default v4l2_ctrl_type_ops init callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ * @from_idx: Starting element index.
+ * @ptr: The v4l2 control value.
+ *
+ * Return: void
+ */
+void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ union v4l2_ctrl_ptr ptr);
+
+/**
+ * v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ *
+ * Return: void
+ */
+void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl);
+
+/**
+ * v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ * @ptr: The v4l2 control value.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr);
+
#endif
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index ad2d41952442..d82dfdbf6e58 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -43,8 +43,8 @@ enum vfl_devnode_type {
};
/**
- * enum vfl_direction - Identifies if a &struct video_device corresponds
- * to a receiver, a transmitter or a mem-to-mem device.
+ * enum vfl_devnode_direction - Identifies if a &struct video_device
+ * corresponds to a receiver, a transmitter or a mem-to-mem device.
*
* @VFL_DIR_RX: device is a receiver.
* @VFL_DIR_TX: device is a transmitter.
@@ -260,8 +260,7 @@ struct v4l2_file_operations {
* Only set @dev_parent if that can't be deduced from @v4l2_dev.
*/
-struct video_device
-{
+struct video_device {
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_entity entity;
struct media_intf_devnode *intf_devnode;
@@ -285,7 +284,7 @@ struct video_device
struct v4l2_prio_state *prio;
/* device info */
- char name[32];
+ char name[64];
enum vfl_devnode_type vfl_type;
enum vfl_devnode_direction vfl_dir;
int minor;
@@ -540,4 +539,106 @@ static inline int video_is_registered(struct video_device *vdev)
return test_bit(V4L2_FL_REGISTERED, &vdev->flags);
}
+#if defined(CONFIG_MEDIA_CONTROLLER)
+
+/**
+ * video_device_pipeline_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ *
+ * Mark all entities connected to a given video device through enabled links,
+ * either directly or indirectly, as streaming. The given pipeline object is
+ * assigned to every pad in the pipeline and stored in the media_pad pipe
+ * field.
+ *
+ * Calls to this function can be nested, in which case the same number of
+ * video_device_pipeline_stop() calls will be required to stop streaming. The
+ * pipeline pointer must be identical for all nested calls to
+ * video_device_pipeline_start().
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_pipeline_start().
+ */
+__must_check int video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe);
+
+/**
+ * __video_device_pipeline_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ *
+ * ..note:: This is the non-locking version of video_device_pipeline_start()
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around __media_pipeline_start().
+ */
+__must_check int __video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe);
+
+/**
+ * video_device_pipeline_stop - Mark a pipeline as not streaming
+ * @vdev: Starting video device
+ *
+ * Mark all entities connected to a given video device through enabled links,
+ * either directly or indirectly, as not streaming. The media_pad pipe field
+ * is reset to %NULL.
+ *
+ * If multiple calls to media_pipeline_start() have been made, the same
+ * number of calls to this function are required to mark the pipeline as not
+ * streaming.
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_pipeline_stop().
+ */
+void video_device_pipeline_stop(struct video_device *vdev);
+
+/**
+ * __video_device_pipeline_stop - Mark a pipeline as not streaming
+ * @vdev: Starting video device
+ *
+ * .. note:: This is the non-locking version of media_pipeline_stop()
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around __media_pipeline_stop().
+ */
+void __video_device_pipeline_stop(struct video_device *vdev);
+
+/**
+ * video_device_pipeline_alloc_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ *
+ * video_device_pipeline_alloc_start() is similar to video_device_pipeline_start()
+ * but instead of working on a given pipeline the function will use an
+ * existing pipeline if the video device is already part of a pipeline, or
+ * allocate a new pipeline.
+ *
+ * Calls to video_device_pipeline_alloc_start() must be matched with
+ * video_device_pipeline_stop().
+ */
+__must_check int video_device_pipeline_alloc_start(struct video_device *vdev);
+
+/**
+ * video_device_pipeline - Get the media pipeline a video device is part of
+ * @vdev: The video device
+ *
+ * This function returns the media pipeline that a video device has been
+ * associated with when constructing the pipeline with
+ * video_device_pipeline_start(). The pointer remains valid until
+ * video_device_pipeline_stop() is called.
+ *
+ * Return: The media_pipeline the video device is part of, or NULL if the video
+ * device is not part of any pipeline.
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_entity_pipeline().
+ */
+struct media_pipeline *video_device_pipeline(struct video_device *vdev);
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
#endif /* _V4L2_DEV_H */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 64ec4de948e9..f6f111fae33c 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -13,8 +13,6 @@
#include <media/v4l2-subdev.h>
#include <media/v4l2-dev.h>
-#define V4L2_DEVICE_NAME_SIZE (20 + 16)
-
struct v4l2_ctrl_handler;
/**
@@ -49,7 +47,7 @@ struct v4l2_device {
struct media_device *mdev;
struct list_head subdevs;
spinlock_t lock;
- char name[V4L2_DEVICE_NAME_SIZE];
+ char name[36];
void (*notify)(struct v4l2_subdev *sd,
unsigned int notification, void *arg);
struct v4l2_ctrl_handler *ctrl_handler;
@@ -174,7 +172,7 @@ int __must_check v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
void v4l2_device_unregister_subdev(struct v4l2_subdev *sd);
/**
- * __v4l2_device_register_ro_subdev_nodes - Registers device nodes for
+ * __v4l2_device_register_subdev_nodes - Registers device nodes for
* all subdevs of the v4l2 device that are marked with the
* %V4L2_SUBDEV_FL_HAS_DEVNODE flag.
*
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 2cc0cabc124f..8fa963326bf6 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -224,7 +224,7 @@ static inline bool can_reduce_fps(struct v4l2_bt_timings *bt)
}
/**
- * struct v4l2_hdmi_rx_colorimetry - describes the HDMI colorimetry information
+ * struct v4l2_hdmi_colorimetry - describes the HDMI colorimetry information
* @colorspace: enum v4l2_colorspace, the colorspace
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
* @quantization: enum v4l2_quantization, colorspace quantization
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 3f0281d06ec7..3a0e2588361c 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -78,7 +78,7 @@ struct v4l2_subscribed_event {
unsigned int elems;
unsigned int first;
unsigned int in_use;
- struct v4l2_kevent events[];
+ struct v4l2_kevent events[] __counted_by(elems);
};
/**
@@ -101,7 +101,7 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
*
* .. note::
* The driver's only responsibility is to fill in the type and the data
- * fields.The other fields will be filled in by V4L2.
+ * fields. The other fields will be filled in by V4L2.
*/
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev);
@@ -116,11 +116,20 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev);
*
* .. note::
* The driver's only responsibility is to fill in the type and the data
- * fields.The other fields will be filled in by V4L2.
+ * fields. The other fields will be filled in by V4L2.
*/
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev);
/**
+ * v4l2_event_wake_all - Wake all filehandles.
+ *
+ * Used when unregistering a video device.
+ *
+ * @vdev: pointer to &struct video_device
+ */
+void v4l2_event_wake_all(struct video_device *vdev);
+
+/**
* v4l2_event_pending - Check if an event is available
*
* @fh: pointer to &struct v4l2_fh
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index c47b70636e42..f7c57c776589 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -21,72 +21,19 @@
#include <media/v4l2-mediabus.h>
-struct fwnode_handle;
-struct v4l2_async_notifier;
-struct v4l2_async_subdev;
-
-#define V4L2_FWNODE_CSI2_MAX_DATA_LANES 4
-
-/**
- * struct v4l2_fwnode_bus_mipi_csi2 - MIPI CSI-2 bus data structure
- * @flags: media bus (V4L2_MBUS_*) flags
- * @data_lanes: an array of physical data lane indexes
- * @clock_lane: physical lane index of the clock lane
- * @num_data_lanes: number of data lanes
- * @lane_polarities: polarity of the lanes. The order is the same of
- * the physical lanes.
- */
-struct v4l2_fwnode_bus_mipi_csi2 {
- unsigned int flags;
- unsigned char data_lanes[V4L2_FWNODE_CSI2_MAX_DATA_LANES];
- unsigned char clock_lane;
- unsigned short num_data_lanes;
- bool lane_polarities[1 + V4L2_FWNODE_CSI2_MAX_DATA_LANES];
-};
-
-/**
- * struct v4l2_fwnode_bus_parallel - parallel data bus data structure
- * @flags: media bus (V4L2_MBUS_*) flags
- * @bus_width: bus width in bits
- * @data_shift: data shift in bits
- */
-struct v4l2_fwnode_bus_parallel {
- unsigned int flags;
- unsigned char bus_width;
- unsigned char data_shift;
-};
-
-/**
- * struct v4l2_fwnode_bus_mipi_csi1 - CSI-1/CCP2 data bus structure
- * @clock_inv: polarity of clock/strobe signal
- * false - not inverted, true - inverted
- * @strobe: false - data/clock, true - data/strobe
- * @lane_polarity: the polarities of the clock (index 0) and data lanes
- * index (1)
- * @data_lane: the number of the data lane
- * @clock_lane: the number of the clock lane
- */
-struct v4l2_fwnode_bus_mipi_csi1 {
- unsigned char clock_inv:1;
- unsigned char strobe:1;
- bool lane_polarity[2];
- unsigned char data_lane;
- unsigned char clock_lane;
-};
-
/**
* struct v4l2_fwnode_endpoint - the endpoint data structure
* @base: fwnode endpoint of the v4l2_fwnode
* @bus_type: bus type
- * @bus: union with bus configuration data structure
- * @bus.parallel: embedded &struct v4l2_fwnode_bus_parallel.
+ * @bus: bus configuration data structure
+ * @bus.parallel: embedded &struct v4l2_mbus_config_parallel.
* Used if the bus is parallel.
- * @bus.mipi_csi1: embedded &struct v4l2_fwnode_bus_mipi_csi1.
+ * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1.
* Used if the bus is MIPI Alliance's Camera Serial
* Interface version 1 (MIPI CSI1) or Standard
* Mobile Imaging Architecture's Compact Camera Port 2
* (SMIA CCP2).
- * @bus.mipi_csi2: embedded &struct v4l2_fwnode_bus_mipi_csi2.
+ * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2.
* Used if the bus is MIPI Alliance's Camera Serial
* Interface version 2 (MIPI CSI2).
* @link_frequencies: array of supported link frequencies
@@ -94,15 +41,11 @@ struct v4l2_fwnode_bus_mipi_csi1 {
*/
struct v4l2_fwnode_endpoint {
struct fwnode_endpoint base;
- /*
- * Fields below this line will be zeroed by
- * v4l2_fwnode_endpoint_parse()
- */
enum v4l2_mbus_type bus_type;
- union {
- struct v4l2_fwnode_bus_parallel parallel;
- struct v4l2_fwnode_bus_mipi_csi1 mipi_csi1;
- struct v4l2_fwnode_bus_mipi_csi2 mipi_csi2;
+ struct {
+ struct v4l2_mbus_config_parallel parallel;
+ struct v4l2_mbus_config_mipi_csi1 mipi_csi1;
+ struct v4l2_mbus_config_mipi_csi2 mipi_csi2;
} bus;
u64 *link_frequencies;
unsigned int nr_of_link_frequencies;
@@ -214,29 +157,64 @@ struct v4l2_fwnode_connector {
};
/**
+ * enum v4l2_fwnode_bus_type - Video bus types defined by firmware properties
+ * @V4L2_FWNODE_BUS_TYPE_GUESS: Default value if no bus-type fwnode property
+ * @V4L2_FWNODE_BUS_TYPE_CSI2_CPHY: MIPI CSI-2 bus, C-PHY physical layer
+ * @V4L2_FWNODE_BUS_TYPE_CSI1: MIPI CSI-1 bus
+ * @V4L2_FWNODE_BUS_TYPE_CCP2: SMIA Compact Camera Port 2 bus
+ * @V4L2_FWNODE_BUS_TYPE_CSI2_DPHY: MIPI CSI-2 bus, D-PHY physical layer
+ * @V4L2_FWNODE_BUS_TYPE_PARALLEL: Camera Parallel Interface bus
+ * @V4L2_FWNODE_BUS_TYPE_BT656: BT.656 video format bus-type
+ * @V4L2_FWNODE_BUS_TYPE_DPI: Video Parallel Interface bus
+ * @NR_OF_V4L2_FWNODE_BUS_TYPE: Number of bus-types
+ */
+enum v4l2_fwnode_bus_type {
+ V4L2_FWNODE_BUS_TYPE_GUESS = 0,
+ V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
+ V4L2_FWNODE_BUS_TYPE_CSI1,
+ V4L2_FWNODE_BUS_TYPE_CCP2,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
+ V4L2_FWNODE_BUS_TYPE_PARALLEL,
+ V4L2_FWNODE_BUS_TYPE_BT656,
+ V4L2_FWNODE_BUS_TYPE_DPI,
+ NR_OF_V4L2_FWNODE_BUS_TYPE
+};
+
+/**
* v4l2_fwnode_endpoint_parse() - parse all fwnode node properties
* @fwnode: pointer to the endpoint's fwnode handle
* @vep: pointer to the V4L2 fwnode data structure
*
* This function parses the V4L2 fwnode endpoint specific parameters from the
- * firmware. The caller is responsible for assigning @vep.bus_type to a valid
- * media bus type. The caller may also set the default configuration for the
- * endpoint --- a configuration that shall be in line with the DT binding
- * documentation. Should a device support multiple bus types, the caller may
- * call this function once the correct type is found --- with a default
- * configuration valid for that type.
- *
- * As a compatibility means guessing the bus type is also supported by setting
- * @vep.bus_type to V4L2_MBUS_UNKNOWN. The caller may not provide a default
- * configuration in this case as the defaults are specific to a given bus type.
- * This functionality is deprecated and should not be used in new drivers and it
- * is only supported for CSI-2 D-PHY, parallel and Bt.656 buses.
+ * firmware. There are two ways to use this function, either by letting it
+ * obtain the type of the bus (by setting the @vep.bus_type field to
+ * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum
+ * v4l2_mbus_type types.
+ *
+ * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type"
+ * property to determine the type when it is available. The caller is
+ * responsible for validating the contents of @vep.bus_type field after the call
+ * returns.
+ *
+ * As a deprecated functionality to support older DT bindings without "bus-type"
+ * property for devices that support multiple types, if the "bus-type" property
+ * does not exist, the function will attempt to guess the type based on the
+ * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW
+ * DRIVERS OR BINDINGS.
+ *
+ * It is also possible to set @vep.bus_type corresponding to an actual bus. In
+ * this case the function will only attempt to parse properties related to this
+ * bus, and it will return an error if the value of the "bus-type" property
+ * corresponds to a different bus.
+ *
+ * The caller is required to initialise all fields of @vep, either with
+ * explicitly values, or by zeroing them.
*
* The function does not change the V4L2 fwnode endpoint state if it fails.
*
- * NOTE: This function does not parse properties the size of which is variable
- * without a low fixed limit. Please use v4l2_fwnode_endpoint_alloc_parse() in
- * new drivers instead.
+ * NOTE: This function does not parse "link-frequencies" property as its size is
+ * not known in advance. Please use v4l2_fwnode_endpoint_alloc_parse() if you
+ * need properties of variable size.
*
* Return: %0 on success or a negative error code on failure:
* %-ENOMEM on memory allocation failure
@@ -262,18 +240,29 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
* @vep: pointer to the V4L2 fwnode data structure
*
* This function parses the V4L2 fwnode endpoint specific parameters from the
- * firmware. The caller is responsible for assigning @vep.bus_type to a valid
- * media bus type. The caller may also set the default configuration for the
- * endpoint --- a configuration that shall be in line with the DT binding
- * documentation. Should a device support multiple bus types, the caller may
- * call this function once the correct type is found --- with a default
- * configuration valid for that type.
- *
- * As a compatibility means guessing the bus type is also supported by setting
- * @vep.bus_type to V4L2_MBUS_UNKNOWN. The caller may not provide a default
- * configuration in this case as the defaults are specific to a given bus type.
- * This functionality is deprecated and should not be used in new drivers and it
- * is only supported for CSI-2 D-PHY, parallel and Bt.656 buses.
+ * firmware. There are two ways to use this function, either by letting it
+ * obtain the type of the bus (by setting the @vep.bus_type field to
+ * V4L2_MBUS_UNKNOWN) or specifying the bus type explicitly to one of the &enum
+ * v4l2_mbus_type types.
+ *
+ * When @vep.bus_type is V4L2_MBUS_UNKNOWN, the function will use the "bus-type"
+ * property to determine the type when it is available. The caller is
+ * responsible for validating the contents of @vep.bus_type field after the call
+ * returns.
+ *
+ * As a deprecated functionality to support older DT bindings without "bus-type"
+ * property for devices that support multiple types, if the "bus-type" property
+ * does not exist, the function will attempt to guess the type based on the
+ * endpoint properties available. NEVER RELY ON GUESSING THE BUS TYPE IN NEW
+ * DRIVERS OR BINDINGS.
+ *
+ * It is also possible to set @vep.bus_type corresponding to an actual bus. In
+ * this case the function will only attempt to parse properties related to this
+ * bus, and it will return an error if the value of the "bus-type" property
+ * corresponds to a different bus.
+ *
+ * The caller is required to initialise all fields of @vep, either with
+ * explicitly values, or by zeroing them.
*
* The function does not change the V4L2 fwnode endpoint state if it fails.
*
@@ -400,142 +389,6 @@ int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
int v4l2_fwnode_device_parse(struct device *dev,
struct v4l2_fwnode_device_properties *props);
-/**
- * typedef parse_endpoint_func - Driver's callback function to be called on
- * each V4L2 fwnode endpoint.
- *
- * @dev: pointer to &struct device
- * @vep: pointer to &struct v4l2_fwnode_endpoint
- * @asd: pointer to &struct v4l2_async_subdev
- *
- * Return:
- * * %0 on success
- * * %-ENOTCONN if the endpoint is to be skipped but this
- * should not be considered as an error
- * * %-EINVAL if the endpoint configuration is invalid
- */
-typedef int (*parse_endpoint_func)(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd);
-
-/**
- * v4l2_async_notifier_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
- * device node
- * @dev: the device the endpoints of which are to be parsed
- * @notifier: notifier for @dev
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
- * endpoint. Optional.
- *
- * Parse the fwnode endpoints of the @dev device and populate the async sub-
- * devices list in the notifier. The @parse_endpoint callback function is
- * called for each endpoint with the corresponding async sub-device pointer to
- * let the caller initialize the driver-specific part of the async sub-device
- * structure.
- *
- * The notifier memory shall be zeroed before this function is called on the
- * notifier.
- *
- * This function may not be called on a registered notifier and may be called on
- * a notifier only once.
- *
- * The &struct v4l2_fwnode_endpoint passed to the callback function
- * @parse_endpoint is released once the function is finished. If there is a need
- * to retain that configuration, the user needs to allocate memory for it.
- *
- * Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_cleanup() after it has been unregistered and the async
- * sub-devices are no longer in use, even if the function returned an error.
- *
- * Return: %0 on success, including when no async sub-devices are found
- * %-ENOMEM if memory allocation failed
- * %-EINVAL if graph or endpoint parsing failed
- * Other error codes as returned by @parse_endpoint
- */
-int
-v4l2_async_notifier_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint);
-
-/**
- * v4l2_async_notifier_parse_fwnode_endpoints_by_port - Parse V4L2 fwnode
- * endpoints of a port in a
- * device node
- * @dev: the device the endpoints of which are to be parsed
- * @notifier: notifier for @dev
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- * @port: port number where endpoints are to be parsed
- * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
- * endpoint. Optional.
- *
- * This function is just like v4l2_async_notifier_parse_fwnode_endpoints() with
- * the exception that it only parses endpoints in a given port. This is useful
- * on devices that have both sinks and sources: the async sub-devices connected
- * to sources have already been configured by another driver (on capture
- * devices). In this case the driver must know which ports to parse.
- *
- * Parse the fwnode endpoints of the @dev device on a given @port and populate
- * the async sub-devices list of the notifier. The @parse_endpoint callback
- * function is called for each endpoint with the corresponding async sub-device
- * pointer to let the caller initialize the driver-specific part of the async
- * sub-device structure.
- *
- * The notifier memory shall be zeroed before this function is called on the
- * notifier the first time.
- *
- * This function may not be called on a registered notifier and may be called on
- * a notifier only once per port.
- *
- * The &struct v4l2_fwnode_endpoint passed to the callback function
- * @parse_endpoint is released once the function is finished. If there is a need
- * to retain that configuration, the user needs to allocate memory for it.
- *
- * Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_cleanup() after it has been unregistered and the async
- * sub-devices are no longer in use, even if the function returned an error.
- *
- * Return: %0 on success, including when no async sub-devices are found
- * %-ENOMEM if memory allocation failed
- * %-EINVAL if graph or endpoint parsing failed
- * Other error codes as returned by @parse_endpoint
- */
-int
-v4l2_async_notifier_parse_fwnode_endpoints_by_port(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- unsigned int port,
- parse_endpoint_func parse_endpoint);
-
-/**
- * v4l2_fwnode_reference_parse_sensor_common - parse common references on
- * sensors for async sub-devices
- * @dev: the device node the properties of which are parsed for references
- * @notifier: the async notifier where the async subdevs will be added
- *
- * Parse common sensor properties for remote devices related to the
- * sensor and set up async sub-devices for them.
- *
- * Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_release() after it has been unregistered and the async
- * sub-devices are no longer in use, even in the case the function returned an
- * error.
- *
- * Return: 0 on success
- * -ENOMEM if memory allocation failed
- * -EINVAL if property parsing failed
- */
-int v4l2_async_notifier_parse_fwnode_sensor_common(struct device *dev,
- struct v4l2_async_notifier *notifier);
-
/* Helper macros to access the connector links. */
/** v4l2_connector_last_link - Helper macro to get the first
diff --git a/include/media/v4l2-h264.h b/include/media/v4l2-h264.h
index bc9ebb560ccf..0d9eaa956123 100644
--- a/include/media/v4l2-h264.h
+++ b/include/media/v4l2-h264.h
@@ -10,17 +10,18 @@
#ifndef _MEDIA_V4L2_H264_H
#define _MEDIA_V4L2_H264_H
-#include <media/h264-ctrls.h>
+#include <media/v4l2-ctrls.h>
/**
* struct v4l2_h264_reflist_builder - Reference list builder object
*
- * @refs.pic_order_count: reference picture order count
+ * @refs.top_field_order_cnt: top field order count
+ * @refs.bottom_field_order_cnt: bottom field order count
* @refs.frame_num: reference frame number
- * @refs.pic_num: reference picture number
* @refs.longterm: set to true for a long term reference
* @refs: array of references
* @cur_pic_order_count: picture order count of the frame being decoded
+ * @cur_pic_fields: fields present in the frame being decoded
* @unordered_reflist: unordered list of references. Will be used to generate
* ordered P/B0/B1 lists
* @num_valid: number of valid references in the refs array
@@ -31,20 +32,22 @@
*/
struct v4l2_h264_reflist_builder {
struct {
- s32 pic_order_count;
+ s32 top_field_order_cnt;
+ s32 bottom_field_order_cnt;
int frame_num;
- u16 pic_num;
u16 longterm : 1;
} refs[V4L2_H264_NUM_DPB_ENTRIES];
+
s32 cur_pic_order_count;
- u8 unordered_reflist[V4L2_H264_NUM_DPB_ENTRIES];
+ u8 cur_pic_fields;
+
+ struct v4l2_h264_reference unordered_reflist[V4L2_H264_REF_LIST_LEN];
u8 num_valid;
};
void
v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
const struct v4l2_ctrl_h264_decode_params *dec_params,
- const struct v4l2_ctrl_h264_slice_params *slice_params,
const struct v4l2_ctrl_h264_sps *sps,
const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]);
@@ -52,10 +55,10 @@ v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
* v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists
*
* @builder: reference list builder context
- * @b0_reflist: 16-bytes array used to store the B0 reference list. Each entry
- * is an index in the DPB
- * @b1_reflist: 16-bytes array used to store the B1 reference list. Each entry
- * is an index in the DPB
+ * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry
+ * is a v4l2_h264_reference structure
+ * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry
+ * is a v4l2_h264_reference structure
*
* This functions builds the B0/B1 reference lists. This procedure is described
* in section '8.2.4 Decoding process for reference picture lists construction'
@@ -64,14 +67,15 @@ v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
*/
void
v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder,
- u8 *b0_reflist, u8 *b1_reflist);
+ struct v4l2_h264_reference *b0_reflist,
+ struct v4l2_h264_reference *b1_reflist);
/**
- * v4l2_h264_build_b_ref_lists() - Build the P reference list
+ * v4l2_h264_build_p_ref_list() - Build the P reference list
*
* @builder: reference list builder context
- * @p_reflist: 16-bytes array used to store the P reference list. Each entry
- * is an index in the DPB
+ * @reflist: 32 sized array used to store the P reference list. Each entry
+ * is a v4l2_h264_reference structure
*
* This functions builds the P reference lists. This procedure is describe in
* section '8.2.4 Decoding process for reference picture lists construction'
@@ -80,6 +84,6 @@ v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder,
*/
void
v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder,
- u8 *reflist);
+ struct v4l2_h264_reference *reflist);
#endif /* _MEDIA_V4L2_H264_H */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 86878fba332b..edb733f21604 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -686,6 +686,16 @@ long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd,
unsigned long arg);
#endif
+unsigned int v4l2_compat_translate_cmd(unsigned int cmd);
+int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd);
+int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd);
+int v4l2_compat_get_array_args(struct file *file, void *mbuf,
+ void __user *user_ptr, size_t array_size,
+ unsigned int cmd, void *arg);
+int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr,
+ void *mbuf, size_t array_size,
+ unsigned int cmd, void *arg);
+
/**
* typedef v4l2_kioctl - Typedef used to pass an ioctl handler.
*
diff --git a/include/media/v4l2-jpeg.h b/include/media/v4l2-jpeg.h
index ddba2a56c321..2dba843ce3bd 100644
--- a/include/media/v4l2-jpeg.h
+++ b/include/media/v4l2-jpeg.h
@@ -88,10 +88,30 @@ struct v4l2_jpeg_scan_header {
};
/**
+ * enum v4l2_jpeg_app14_tf - APP14 transform flag
+ * According to Rec. ITU-T T.872 (06/2012) 6.5.3
+ * APP14 segment is for color encoding, it contains a transform flag,
+ * which may have values of 0, 1 and 2 and are interpreted as follows:
+ * @V4L2_JPEG_APP14_TF_CMYK_RGB: CMYK for images encoded with four components
+ * RGB for images encoded with three components
+ * @V4L2_JPEG_APP14_TF_YCBCR: an image encoded with three components using YCbCr
+ * @V4L2_JPEG_APP14_TF_YCCK: an image encoded with four components using YCCK
+ * @V4L2_JPEG_APP14_TF_UNKNOWN: indicate app14 is not present
+ */
+enum v4l2_jpeg_app14_tf {
+ V4L2_JPEG_APP14_TF_CMYK_RGB = 0,
+ V4L2_JPEG_APP14_TF_YCBCR = 1,
+ V4L2_JPEG_APP14_TF_YCCK = 2,
+ V4L2_JPEG_APP14_TF_UNKNOWN = -1,
+};
+
+/**
* struct v4l2_jpeg_header - parsed JPEG header
* @sof: pointer to frame header and size
* @sos: pointer to scan header and size
+ * @num_dht: number of entries in @dht
* @dht: pointers to huffman tables and sizes
+ * @num_dqt: number of entries in @dqt
* @dqt: pointers to quantization tables and sizes
* @frame: parsed frame header
* @scan: pointer to parsed scan header, optional
@@ -100,6 +120,7 @@ struct v4l2_jpeg_scan_header {
* order, optional
* @restart_interval: number of MCU per restart interval, Ri
* @ecs_offset: buffer offset in bytes to the entropy coded segment
+ * @app14_tf: transform flag from app14 data
*
* When this structure is passed to v4l2_jpeg_parse_header, the optional scan,
* quantization_tables, and huffman_tables pointers must be initialized to NULL
@@ -119,6 +140,7 @@ struct v4l2_jpeg_header {
struct v4l2_jpeg_reference *huffman_tables;
u16 restart_interval;
size_t ecs_offset;
+ enum v4l2_jpeg_app14_tf app14_tf;
};
int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out);
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index bdaa5f2f8ca2..ed0a44b6eada 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -87,16 +87,22 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q);
/**
* v4l2_create_fwnode_links_to_pad - Create fwnode-based links from a
- * source subdev to a sink subdev pad.
+ * source subdev to a sink pad.
*
* @src_sd: pointer to a source subdev
- * @sink: pointer to a subdev sink pad
+ * @sink: pointer to a sink pad
+ * @flags: the link flags
*
* This function searches for fwnode endpoint connections from a source
* subdevice to a single sink pad, and if suitable connections are found,
* translates them into media links to that pad. The function can be
- * called by the sink subdevice, in its v4l2-async notifier subdev bound
- * callback, to create links from a bound source subdevice.
+ * called by the sink, in its v4l2-async notifier bound callback, to create
+ * links from a bound source subdevice.
+ *
+ * The @flags argument specifies the link flags. The caller shall ensure that
+ * the flags are valid regardless of the number of links that may be created.
+ * For instance, setting the MEDIA_LNK_FL_ENABLED flag will cause all created
+ * links to be enabled, which isn't valid if more than one link is created.
*
* .. note::
*
@@ -107,7 +113,7 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q);
* Return 0 on success or a negative error code on failure.
*/
int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
- struct media_pad *sink);
+ struct media_pad *sink, u32 flags);
/**
* v4l2_create_fwnode_links - Create fwnode-based links from a source
@@ -137,6 +143,9 @@ int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd,
* v4l2_pipeline_pm_get - Increase the use count of a pipeline
* @entity: The root entity of a pipeline
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* Update the use count of all entities in the pipeline and power entities on.
*
* This function is intended to be called in video node open. It uses
@@ -151,6 +160,9 @@ int v4l2_pipeline_pm_get(struct media_entity *entity);
* v4l2_pipeline_pm_put - Decrease the use count of a pipeline
* @entity: The root entity of a pipeline
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* Update the use count of all entities in the pipeline and power entities off.
*
* This function is intended to be called in video node release. It uses
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 45f88f0248c4..5bce6e423e94 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -11,9 +11,31 @@
#include <linux/v4l2-mediabus.h>
#include <linux/bitops.h>
+/*
+ * How to use the V4L2_MBUS_* flags:
+ * Flags are defined for each of the possible states and values of a media
+ * bus configuration parameter. One and only one bit of each group of flags
+ * shall be set by the users of the v4l2_subdev_pad_ops.get_mbus_config
+ * operation to ensure that no conflicting settings are specified when
+ * reporting the media bus configuration. For example, it is invalid to set or
+ * clear both the V4L2_MBUS_HSYNC_ACTIVE_HIGH and the
+ * V4L2_MBUS_HSYNC_ACTIVE_LOW flag at the same time. Instead either flag
+ * V4L2_MBUS_HSYNC_ACTIVE_HIGH or flag V4L2_MBUS_HSYNC_ACTIVE_LOW shall be set.
+ *
+ * TODO: replace the existing V4L2_MBUS_* flags with structures of fields
+ * to avoid conflicting settings.
+ *
+ * In example:
+ * #define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2)
+ * #define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3)
+ * will be replaced by a field whose value reports the intended active state of
+ * the signal:
+ * unsigned int v4l2_mbus_hsync_active : 1;
+ */
+
/* Parallel flags */
/*
- * Can the client run in master or in slave mode. By "Master mode" an operation
+ * The client runs in master or in slave mode. By "Master mode" an operation
* mode is meant, when the client (e.g., a camera sensor) is producing
* horizontal and vertical synchronisation. In "Slave mode" the host is
* providing these signals to the slave.
@@ -32,41 +54,71 @@
#define V4L2_MBUS_VSYNC_ACTIVE_LOW BIT(5)
#define V4L2_MBUS_PCLK_SAMPLE_RISING BIT(6)
#define V4L2_MBUS_PCLK_SAMPLE_FALLING BIT(7)
-#define V4L2_MBUS_DATA_ACTIVE_HIGH BIT(8)
-#define V4L2_MBUS_DATA_ACTIVE_LOW BIT(9)
+#define V4L2_MBUS_PCLK_SAMPLE_DUALEDGE BIT(8)
+#define V4L2_MBUS_DATA_ACTIVE_HIGH BIT(9)
+#define V4L2_MBUS_DATA_ACTIVE_LOW BIT(10)
/* FIELD = 0/1 - Field1 (odd)/Field2 (even) */
-#define V4L2_MBUS_FIELD_EVEN_HIGH BIT(10)
+#define V4L2_MBUS_FIELD_EVEN_HIGH BIT(11)
/* FIELD = 1/0 - Field1 (odd)/Field2 (even) */
-#define V4L2_MBUS_FIELD_EVEN_LOW BIT(11)
+#define V4L2_MBUS_FIELD_EVEN_LOW BIT(12)
/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */
-#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(12)
-#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(13)
-#define V4L2_MBUS_DATA_ENABLE_HIGH BIT(14)
-#define V4L2_MBUS_DATA_ENABLE_LOW BIT(15)
+#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(13)
+#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(14)
+#define V4L2_MBUS_DATA_ENABLE_HIGH BIT(15)
+#define V4L2_MBUS_DATA_ENABLE_LOW BIT(16)
/* Serial flags */
-/* How many lanes the client can use */
-#define V4L2_MBUS_CSI2_1_LANE BIT(0)
-#define V4L2_MBUS_CSI2_2_LANE BIT(1)
-#define V4L2_MBUS_CSI2_3_LANE BIT(2)
-#define V4L2_MBUS_CSI2_4_LANE BIT(3)
-/* On which channels it can send video data */
-#define V4L2_MBUS_CSI2_CHANNEL_0 BIT(4)
-#define V4L2_MBUS_CSI2_CHANNEL_1 BIT(5)
-#define V4L2_MBUS_CSI2_CHANNEL_2 BIT(6)
-#define V4L2_MBUS_CSI2_CHANNEL_3 BIT(7)
-/* Does it support only continuous or also non-continuous clock mode */
-#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK BIT(8)
-#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(9)
-
-#define V4L2_MBUS_CSI2_LANES (V4L2_MBUS_CSI2_1_LANE | \
- V4L2_MBUS_CSI2_2_LANE | \
- V4L2_MBUS_CSI2_3_LANE | \
- V4L2_MBUS_CSI2_4_LANE)
-#define V4L2_MBUS_CSI2_CHANNELS (V4L2_MBUS_CSI2_CHANNEL_0 | \
- V4L2_MBUS_CSI2_CHANNEL_1 | \
- V4L2_MBUS_CSI2_CHANNEL_2 | \
- V4L2_MBUS_CSI2_CHANNEL_3)
+/* Clock non-continuous mode support. */
+#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(0)
+
+#define V4L2_MBUS_CSI2_MAX_DATA_LANES 8
+
+/**
+ * struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration
+ * @flags: media bus (V4L2_MBUS_*) flags
+ * @data_lanes: an array of physical data lane indexes
+ * @clock_lane: physical lane index of the clock lane
+ * @num_data_lanes: number of data lanes
+ * @lane_polarities: polarity of the lanes. The order is the same of
+ * the physical lanes.
+ */
+struct v4l2_mbus_config_mipi_csi2 {
+ unsigned int flags;
+ unsigned char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES];
+ unsigned char clock_lane;
+ unsigned char num_data_lanes;
+ bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
+};
+
+/**
+ * struct v4l2_mbus_config_parallel - parallel data bus configuration
+ * @flags: media bus (V4L2_MBUS_*) flags
+ * @bus_width: bus width in bits
+ * @data_shift: data shift in bits
+ */
+struct v4l2_mbus_config_parallel {
+ unsigned int flags;
+ unsigned char bus_width;
+ unsigned char data_shift;
+};
+
+/**
+ * struct v4l2_mbus_config_mipi_csi1 - CSI-1/CCP2 data bus configuration
+ * @clock_inv: polarity of clock/strobe signal
+ * false - not inverted, true - inverted
+ * @strobe: false - data/clock, true - data/strobe
+ * @lane_polarity: the polarities of the clock (index 0) and data lanes
+ * index (1)
+ * @data_lane: the number of the data lane
+ * @clock_lane: the number of the clock lane
+ */
+struct v4l2_mbus_config_mipi_csi1 {
+ unsigned char clock_inv:1;
+ unsigned char strobe:1;
+ bool lane_polarity[2];
+ unsigned char data_lane;
+ unsigned char clock_lane;
+};
/**
* enum v4l2_mbus_type - media bus type
@@ -78,6 +130,8 @@
* @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2)
* @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY
* @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY
+ * @V4L2_MBUS_DPI: MIPI VIDEO DPI interface
+ * @V4L2_MBUS_INVALID: invalid bus type (keep as last)
*/
enum v4l2_mbus_type {
V4L2_MBUS_UNKNOWN,
@@ -87,16 +141,32 @@ enum v4l2_mbus_type {
V4L2_MBUS_CCP2,
V4L2_MBUS_CSI2_DPHY,
V4L2_MBUS_CSI2_CPHY,
+ V4L2_MBUS_DPI,
+ V4L2_MBUS_INVALID,
};
/**
* struct v4l2_mbus_config - media bus configuration
- * @type: in: interface type
- * @flags: in / out: configuration flags, depending on @type
+ * @type: interface type
+ * @bus: bus configuration data structure
+ * @bus.parallel: embedded &struct v4l2_mbus_config_parallel.
+ * Used if the bus is parallel or BT.656.
+ * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1.
+ * Used if the bus is MIPI Alliance's Camera Serial
+ * Interface version 1 (MIPI CSI1) or Standard
+ * Mobile Imaging Architecture's Compact Camera Port 2
+ * (SMIA CCP2).
+ * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2.
+ * Used if the bus is MIPI Alliance's Camera Serial
+ * Interface version 2 (MIPI CSI2).
*/
struct v4l2_mbus_config {
enum v4l2_mbus_type type;
- unsigned int flags;
+ union {
+ struct v4l2_mbus_config_parallel parallel;
+ struct v4l2_mbus_config_mipi_csi1 mipi_csi1;
+ struct v4l2_mbus_config_mipi_csi2 mipi_csi2;
+ } bus;
};
/**
@@ -120,7 +190,7 @@ v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
}
/**
- * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_fill_mbus_format - Ancillary routine that fills a &struct
* v4l2_mbus_framefmt from a &struct v4l2_pix_format and a
* data format code.
*
@@ -143,7 +213,7 @@ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
}
/**
- * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_fill_pix_format_mplane - Ancillary routine that fills a &struct
* v4l2_pix_format_mplane fields from a media bus structure.
*
* @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled
@@ -163,7 +233,7 @@ v4l2_fill_pix_format_mplane(struct v4l2_pix_format_mplane *pix_mp_fmt,
}
/**
- * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_fill_mbus_format_mplane - Ancillary routine that fills a &struct
* v4l2_mbus_framefmt from a &struct v4l2_pix_format_mplane.
*
* @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 98753f00df7e..7f1af1f7f912 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -84,6 +84,12 @@ struct v4l2_m2m_queue_ctx {
* @last_src_buf: indicate the last source buffer for draining
* @next_buf_last: next capture queud buffer will be tagged as last
* @has_stopped: indicate the device has been stopped
+ * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE
+ * queue is not streaming. This allows firmware to
+ * analyze the bitstream header which arrives on the
+ * OUTPUT queue. The driver must implement the job_ready
+ * callback correctly to make sure that the requirements
+ * for actual decoding are met.
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @cap_q_ctx: Capture (output to memory) queue context
* @out_q_ctx: Output (input from memory) queue context
@@ -106,6 +112,7 @@ struct v4l2_m2m_ctx {
struct vb2_v4l2_buffer *last_src_buf;
bool next_buf_last;
bool has_stopped;
+ bool ignore_cap_streaming;
/* internal use only */
struct v4l2_m2m_dev *m2m_dev;
@@ -305,6 +312,28 @@ void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *vbuf);
/**
+ * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
+ * to finish
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ *
+ * Called by a driver in the suspend hook. Stop new jobs from being run, and
+ * wait for current running job to finish.
+ */
+void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
+
+/**
+ * v4l2_m2m_resume() - resume job running and try to run a queued job
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ *
+ * Called by a driver in the resume hook. This reverts the operation of
+ * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
+ * there is any.
+ */
+void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
+
+/**
* v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
*
* @file: pointer to struct &file
@@ -464,15 +493,20 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
* @vma: pointer to struct &vm_area_struct
*
* Call from driver's mmap() function. Will handle mmap() for both queues
- * seamlessly for videobuffer, which will receive normal per-queue offsets and
- * proper videobuf queue pointers. The differentiation is made outside videobuf
- * by adding a predefined offset to buffers from one of the queues and
- * subtracting it before passing it back to videobuf. Only drivers (and
+ * seamlessly for the video buffer, which will receive normal per-queue offsets
+ * and proper vb2 queue pointers. The differentiation is made outside
+ * vb2 by adding a predefined offset to buffers from one of the queues
+ * and subtracting it before passing it back to vb2. Only drivers (and
* thus applications) receive modified offsets.
*/
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct vm_area_struct *vma);
+#ifndef CONFIG_MMU
+unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+#endif
/**
* v4l2_m2m_init() - initialize per-driver m2m data
*
@@ -517,7 +551,7 @@ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @drv_priv: driver's instance private data
* @queue_init: a callback for queue type-specific initialization function
- * to be used for initializing videobuf_queues
+ * to be used for initializing vb2_queues
*
* Usually called from driver's ``open()`` function.
*/
@@ -552,7 +586,7 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
* @vbuf: pointer to struct &vb2_v4l2_buffer
*
- * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
+ * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback.
*/
void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *vbuf);
@@ -566,7 +600,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
static inline
unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
- return m2m_ctx->out_q_ctx.num_rdy;
+ unsigned int num_buf_rdy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+ num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+
+ return num_buf_rdy;
}
/**
@@ -578,7 +619,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
static inline
unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
- return m2m_ctx->cap_q_ctx.num_rdy;
+ unsigned int num_buf_rdy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+ num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+
+ return num_buf_rdy;
}
/**
@@ -620,7 +668,7 @@ v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
/**
- * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
+ * v4l2_m2m_last_src_buf() - return last source buffer from the list of
* ready buffers
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index d4e3b44cf14c..a9e6b8146279 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -38,6 +38,7 @@ struct v4l2_subdev;
struct v4l2_subdev_fh;
struct tuner_setup;
struct v4l2_mbus_frame_desc;
+struct led_classdev;
/**
* struct v4l2_decode_vbi_line - used to decode_vbi_line
@@ -162,6 +163,9 @@ struct v4l2_subdev_io_pin_config {
* @s_gpio: set GPIO pins. Very simple right now, might need to be extended with
* a direction argument if needed.
*
+ * @command: called by in-kernel drivers in order to call functions internal
+ * to subdev drivers driver that have a separate callback.
+ *
* @ioctl: called at the end of ioctl() syscall handler at the V4L2 core.
* used to provide support for private ioctls used on the driver.
*
@@ -173,7 +177,10 @@ struct v4l2_subdev_io_pin_config {
* @s_register: callback for VIDIOC_DBG_S_REGISTER() ioctl handler code.
*
* @s_power: puts subdevice in power saving mode (on == 0) or normal operation
- * mode (on == 1).
+ * mode (on == 1). DEPRECATED. See
+ * Documentation/driver-api/media/camera-sensor.rst . pre_streamon and
+ * post_streamoff callbacks can be used for e.g. setting the bus to LP-11
+ * mode before s_stream is called.
*
* @interrupt_service_routine: Called by the bridge chip's interrupt service
* handler, when an interrupt status has be raised due to this subdev,
@@ -193,6 +200,7 @@ struct v4l2_subdev_core_ops {
int (*load_fw)(struct v4l2_subdev *sd);
int (*reset)(struct v4l2_subdev *sd, u32 val);
int (*s_gpio)(struct v4l2_subdev *sd, u32 val);
+ long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd,
@@ -309,7 +317,18 @@ struct v4l2_subdev_audio_ops {
};
/**
- * enum v4l2_mbus_frame_desc_entry - media bus frame description flags
+ * struct v4l2_mbus_frame_desc_entry_csi2
+ *
+ * @vc: CSI-2 virtual channel
+ * @dt: CSI-2 data type ID
+ */
+struct v4l2_mbus_frame_desc_entry_csi2 {
+ u8 vc;
+ u8 dt;
+};
+
+/**
+ * enum v4l2_mbus_frame_desc_flags - media bus frame description flags
*
* @V4L2_MBUS_FRAME_DESC_FL_LEN_MAX:
* Indicates that &struct v4l2_mbus_frame_desc_entry->length field
@@ -327,30 +346,71 @@ enum v4l2_mbus_frame_desc_flags {
* struct v4l2_mbus_frame_desc_entry - media bus frame description structure
*
* @flags: bitmask flags, as defined by &enum v4l2_mbus_frame_desc_flags.
+ * @stream: stream in routing configuration
* @pixelcode: media bus pixel code, valid if @flags
* %FRAME_DESC_FL_BLOB is not set.
* @length: number of octets per frame, valid if @flags
* %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX is set.
+ * @bus: Bus-specific frame descriptor parameters
+ * @bus.csi2: CSI-2-specific bus configuration
*/
struct v4l2_mbus_frame_desc_entry {
enum v4l2_mbus_frame_desc_flags flags;
+ u32 stream;
u32 pixelcode;
u32 length;
+ union {
+ struct v4l2_mbus_frame_desc_entry_csi2 csi2;
+ } bus;
};
-#define V4L2_FRAME_DESC_ENTRY_MAX 4
+ /*
+ * If this number is too small, it should be dropped altogether and the
+ * API switched to a dynamic number of frame descriptor entries.
+ */
+#define V4L2_FRAME_DESC_ENTRY_MAX 8
+
+/**
+ * enum v4l2_mbus_frame_desc_type - media bus frame description type
+ *
+ * @V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED:
+ * Undefined frame desc type. Drivers should not use this, it is
+ * for backwards compatibility.
+ * @V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL:
+ * Parallel media bus.
+ * @V4L2_MBUS_FRAME_DESC_TYPE_CSI2:
+ * CSI-2 media bus. Frame desc parameters must be set in
+ * &struct v4l2_mbus_frame_desc_entry->csi2.
+ */
+enum v4l2_mbus_frame_desc_type {
+ V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED = 0,
+ V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL,
+ V4L2_MBUS_FRAME_DESC_TYPE_CSI2,
+};
/**
* struct v4l2_mbus_frame_desc - media bus data frame description
+ * @type: type of the bus (enum v4l2_mbus_frame_desc_type)
* @entry: frame descriptors array
* @num_entries: number of entries in @entry array
*/
struct v4l2_mbus_frame_desc {
+ enum v4l2_mbus_frame_desc_type type;
struct v4l2_mbus_frame_desc_entry entry[V4L2_FRAME_DESC_ENTRY_MAX];
unsigned short num_entries;
};
/**
+ * enum v4l2_subdev_pre_streamon_flags - Flags for pre_streamon subdev core op
+ *
+ * @V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP: Set the transmitter to either LP-11
+ * or LP-111 mode before call to s_stream().
+ */
+enum v4l2_subdev_pre_streamon_flags {
+ V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP = BIT(0),
+};
+
+/**
* struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened
* in video mode.
*
@@ -381,19 +441,17 @@ struct v4l2_mbus_frame_desc {
* OUTPUT device. This is ignored by video capture devices.
*
* @g_input_status: get input status. Same as the status field in the
- * &struct &v4l2_input
+ * &struct v4l2_input
*
- * @s_stream: used to notify the driver that a video stream will start or has
- * stopped.
+ * @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the
+ * sub-device. Failure on stop will remove any resources acquired in
+ * streaming start, while the error code is still returned by the driver.
+ * The caller shall track the subdev state, and shall not start or stop an
+ * already started or stopped subdev. Also see call_s_stream wrapper in
+ * v4l2-subdev.c.
*
* @g_pixelaspect: callback to return the pixelaspect ratio.
*
- * @g_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL()
- * ioctl handler code.
- *
- * @s_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL()
- * ioctl handler code.
- *
* @s_dv_timings: Set custom dv timings in the sub device. This is used
* when sub device is capable of setting detailed timing information
* in the hardware to generate/detect the video signal.
@@ -402,15 +460,22 @@ struct v4l2_mbus_frame_desc {
*
* @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code.
*
- * @g_mbus_config: get supported mediabus configurations
- *
- * @s_mbus_config: set a certain mediabus configuration. This operation is added
- * for compatibility with soc-camera drivers and should not be used by new
- * software.
- *
* @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
* can adjust @size to a lower value and must not write more data to the
* buffer starting at @data than the original value of @size.
+ *
+ * @pre_streamon: May be called before streaming is actually started, to help
+ * initialising the bus. Current usage is to set a CSI-2 transmitter to
+ * LP-11 or LP-111 mode before streaming. See &enum
+ * v4l2_subdev_pre_streamon_flags.
+ *
+ * pre_streamon shall return error if it cannot perform the operation as
+ * indicated by the flags argument. In particular, -EACCES indicates lack
+ * of support for the operation. The caller shall call post_streamoff for
+ * each successful call of pre_streamon.
+ *
+ * @post_streamoff: Called after streaming is stopped, but if and only if
+ * pre_streamon was called earlier.
*/
struct v4l2_subdev_video_ops {
int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
@@ -425,22 +490,16 @@ struct v4l2_subdev_video_ops {
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
int (*s_stream)(struct v4l2_subdev *sd, int enable);
int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect);
- int (*g_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval);
- int (*s_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval);
int (*s_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*g_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*query_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
- int (*g_mbus_config)(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg);
- int (*s_mbus_config)(struct v4l2_subdev *sd,
- const struct v4l2_mbus_config *cfg);
int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf,
unsigned int *size);
+ int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags);
+ int (*post_streamoff)(struct v4l2_subdev *sd);
};
/**
@@ -619,24 +678,93 @@ struct v4l2_subdev_ir_ops {
/**
* struct v4l2_subdev_pad_config - Used for storing subdev pad information.
*
- * @try_fmt: &struct v4l2_mbus_framefmt
- * @try_crop: &struct v4l2_rect to be used for crop
- * @try_compose: &struct v4l2_rect to be used for compose
+ * @format: &struct v4l2_mbus_framefmt
+ * @crop: &struct v4l2_rect to be used for crop
+ * @compose: &struct v4l2_rect to be used for compose
+ * @interval: frame interval
+ */
+struct v4l2_subdev_pad_config {
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ struct v4l2_fract interval;
+};
+
+/**
+ * struct v4l2_subdev_stream_config - Used for storing stream configuration.
+ *
+ * @pad: pad number
+ * @stream: stream number
+ * @enabled: has the stream been enabled with v4l2_subdev_enable_stream()
+ * @fmt: &struct v4l2_mbus_framefmt
+ * @crop: &struct v4l2_rect to be used for crop
+ * @compose: &struct v4l2_rect to be used for compose
+ * @interval: frame interval
+ *
+ * This structure stores configuration for a stream.
+ */
+struct v4l2_subdev_stream_config {
+ u32 pad;
+ u32 stream;
+ bool enabled;
+
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ struct v4l2_fract interval;
+};
+
+/**
+ * struct v4l2_subdev_stream_configs - A collection of stream configs.
+ *
+ * @num_configs: number of entries in @config.
+ * @configs: an array of &struct v4l2_subdev_stream_configs.
+ */
+struct v4l2_subdev_stream_configs {
+ u32 num_configs;
+ struct v4l2_subdev_stream_config *configs;
+};
+
+/**
+ * struct v4l2_subdev_krouting - subdev routing table
+ *
+ * @num_routes: number of routes
+ * @routes: &struct v4l2_subdev_route
+ *
+ * This structure contains the routing table for a subdev.
+ */
+struct v4l2_subdev_krouting {
+ unsigned int num_routes;
+ struct v4l2_subdev_route *routes;
+};
+
+/**
+ * struct v4l2_subdev_state - Used for storing subdev state information.
+ *
+ * @_lock: default for 'lock'
+ * @lock: mutex for the state. May be replaced by the user.
+ * @sd: the sub-device which the state is related to
+ * @pads: &struct v4l2_subdev_pad_config array
+ * @routing: routing table for the subdev
+ * @stream_configs: stream configurations (only for V4L2_SUBDEV_FL_STREAMS)
*
* This structure only needs to be passed to the pad op if the 'which' field
* of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For
* %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL.
*/
-struct v4l2_subdev_pad_config {
- struct v4l2_mbus_framefmt try_fmt;
- struct v4l2_rect try_crop;
- struct v4l2_rect try_compose;
+struct v4l2_subdev_state {
+ /* lock for the struct v4l2_subdev_state fields */
+ struct mutex _lock;
+ struct mutex *lock;
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev_pad_config *pads;
+ struct v4l2_subdev_krouting routing;
+ struct v4l2_subdev_stream_configs stream_configs;
};
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
*
- * @init_cfg: initialize the pad config to default values
* @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler
* code.
* @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler
@@ -653,6 +781,12 @@ struct v4l2_subdev_pad_config {
*
* @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code.
*
+ * @get_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL()
+ * ioctl handler code.
+ *
+ * @set_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL()
+ * ioctl handler code.
+ *
* @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code.
*
* @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code.
@@ -670,31 +804,64 @@ struct v4l2_subdev_pad_config {
*
* @set_frame_desc: set the low level media bus frame parameters, @fd array
* may be adjusted by the subdev driver to device capabilities.
+ *
+ * @get_mbus_config: get the media bus configuration of a remote sub-device.
+ * The media bus configuration is usually retrieved from the
+ * firmware interface at sub-device probe time, immediately
+ * applied to the hardware and eventually adjusted by the
+ * driver. Remote sub-devices (usually video receivers) shall
+ * use this operation to query the transmitting end bus
+ * configuration in order to adjust their own one accordingly.
+ * Callers should make sure they get the most up-to-date as
+ * possible configuration from the remote end, likely calling
+ * this operation as close as possible to stream on time. The
+ * operation shall fail if the pad index it has been called on
+ * is not valid or in case of unrecoverable failures.
+ *
+ * @set_routing: Enable or disable data connection routes described in the
+ * subdevice routing table. Subdevs that implement this operation
+ * must set the V4L2_SUBDEV_FL_STREAMS flag.
+ *
+ * @enable_streams: Enable the streams defined in streams_mask on the given
+ * source pad. Subdevs that implement this operation must use the active
+ * state management provided by the subdev core (enabled through a call to
+ * v4l2_subdev_init_finalize() at initialization time). Do not call
+ * directly, use v4l2_subdev_enable_streams() instead.
+ *
+ * @disable_streams: Disable the streams defined in streams_mask on the given
+ * source pad. Subdevs that implement this operation must use the active
+ * state management provided by the subdev core (enabled through a call to
+ * v4l2_subdev_init_finalize() at initialization time). Do not call
+ * directly, use v4l2_subdev_disable_streams() instead.
*/
struct v4l2_subdev_pad_ops {
- int (*init_cfg)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg);
int (*enum_mbus_code)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code);
int (*enum_frame_size)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse);
int (*enum_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_interval_enum *fie);
int (*get_fmt)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format);
int (*set_fmt)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format);
int (*get_selection)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel);
int (*set_selection)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel);
+ int (*get_frame_interval)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *interval);
+ int (*set_frame_interval)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *interval);
int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*dv_timings_cap)(struct v4l2_subdev *sd,
@@ -710,6 +877,18 @@ struct v4l2_subdev_pad_ops {
struct v4l2_mbus_frame_desc *fd);
int (*set_frame_desc)(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_frame_desc *fd);
+ int (*get_mbus_config)(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_config *config);
+ int (*set_routing)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *route);
+ int (*enable_streams)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask);
+ int (*disable_streams)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask);
};
/**
@@ -738,6 +917,8 @@ struct v4l2_subdev_ops {
/**
* struct v4l2_subdev_internal_ops - V4L2 subdev internal ops
*
+ * @init_state: initialize the subdev state to default values
+ *
* @registered: called when this subdev is registered. When called the v4l2_dev
* field is set to the correct v4l2_device.
*
@@ -763,6 +944,8 @@ struct v4l2_subdev_ops {
* these ops.
*/
struct v4l2_subdev_internal_ops {
+ int (*init_state)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state);
int (*registered)(struct v4l2_subdev *sd);
void (*unregistered)(struct v4l2_subdev *sd);
int (*open)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
@@ -770,8 +953,6 @@ struct v4l2_subdev_internal_ops {
void (*release)(struct v4l2_subdev *sd);
};
-#define V4L2_SUBDEV_NAME_SIZE 32
-
/* Set this flag if this subdev is a i2c device. */
#define V4L2_SUBDEV_FL_IS_I2C (1U << 0)
/* Set this flag if this subdev is a spi device. */
@@ -784,6 +965,17 @@ struct v4l2_subdev_internal_ops {
* should set this flag.
*/
#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3)
+/*
+ * Set this flag if this subdev supports multiplexed streams. This means
+ * that the driver supports routing and handles the stream parameter in its
+ * v4l2_subdev_pad_ops handlers. More specifically, this means:
+ *
+ * - Centrally managed subdev active state is enabled
+ * - Legacy pad config is _not_ supported (state->pads is NULL)
+ * - Routing ioctls are available
+ * - Multiple streams per pad are supported
+ */
+#define V4L2_SUBDEV_FL_STREAMS (1U << 4)
struct regulator_bulk_data;
@@ -831,13 +1023,26 @@ struct v4l2_subdev_platform_data {
* @dev: pointer to the physical device, if any
* @fwnode: The fwnode_handle of the subdev, usually the same as
* either dev->of_node->fwnode or dev->fwnode (whichever is non-NULL).
- * @async_list: Links this subdev to a global subdev_list or @notifier->done
- * list.
- * @asd: Pointer to respective &struct v4l2_async_subdev.
- * @notifier: Pointer to the managing notifier.
+ * @async_list: Links this subdev to a global subdev_list or
+ * @notifier->done_list list.
+ * @async_subdev_endpoint_list: List entry in async_subdev_endpoint_entry of
+ * &struct v4l2_async_subdev_endpoint.
* @subdev_notifier: A sub-device notifier implicitly registered for the sub-
- * device using v4l2_device_register_sensor_subdev().
+ * device using v4l2_async_register_subdev_sensor().
+ * @asc_list: Async connection list, of &struct
+ * v4l2_async_connection.subdev_entry.
* @pdata: common part of subdevice platform data
+ * @state_lock: A pointer to a lock used for all the subdev's states, set by the
+ * driver. This is optional. If NULL, each state instance will get
+ * a lock of its own.
+ * @privacy_led: Optional pointer to a LED classdev for the privacy LED for sensors.
+ * @active_state: Active state for the subdev (NULL for subdevs tracking the
+ * state internally). Initialized by calling
+ * v4l2_subdev_init_finalize().
+ * @enabled_streams: Bitmask of enabled streams used by
+ * v4l2_subdev_enable_streams() and
+ * v4l2_subdev_disable_streams() helper functions for fallback
+ * cases.
*
* Each instance of a subdev driver should create this struct, either
* stand-alone or embedded in a larger struct.
@@ -857,7 +1062,7 @@ struct v4l2_subdev {
const struct v4l2_subdev_ops *ops;
const struct v4l2_subdev_internal_ops *internal_ops;
struct v4l2_ctrl_handler *ctrl_handler;
- char name[V4L2_SUBDEV_NAME_SIZE];
+ char name[52];
u32 grp_id;
void *dev_priv;
void *host_priv;
@@ -865,10 +1070,27 @@ struct v4l2_subdev {
struct device *dev;
struct fwnode_handle *fwnode;
struct list_head async_list;
- struct v4l2_async_subdev *asd;
- struct v4l2_async_notifier *notifier;
+ struct list_head async_subdev_endpoint_list;
struct v4l2_async_notifier *subdev_notifier;
+ struct list_head asc_list;
struct v4l2_subdev_platform_data *pdata;
+ struct mutex *state_lock;
+
+ /*
+ * The fields below are private, and should only be accessed via
+ * appropriate functions.
+ */
+
+ struct led_classdev *privacy_led;
+
+ /*
+ * TODO: active_state should most likely be changed from a pointer to an
+ * embedded field. For the time being it's kept as a pointer to more
+ * easily catch uses of active_state in the cases where the driver
+ * doesn't support it.
+ */
+ struct v4l2_subdev_state *active_state;
+ u64 enabled_streams;
};
@@ -900,14 +1122,16 @@ struct v4l2_subdev {
* struct v4l2_subdev_fh - Used for storing subdev information per file handle
*
* @vfh: pointer to &struct v4l2_fh
- * @pad: pointer to &struct v4l2_subdev_pad_config
+ * @state: pointer to &struct v4l2_subdev_state
* @owner: module pointer to the owner of this file handle
+ * @client_caps: bitmask of ``V4L2_SUBDEV_CLIENT_CAP_*``
*/
struct v4l2_subdev_fh {
struct v4l2_fh vfh;
struct module *owner;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- struct v4l2_subdev_pad_config *pad;
+ struct v4l2_subdev_state *state;
+ u64 client_caps;
#endif
};
@@ -920,64 +1144,6 @@ struct v4l2_subdev_fh {
#define to_v4l2_subdev_fh(fh) \
container_of(fh, struct v4l2_subdev_fh, vfh)
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
-
-/**
- * v4l2_subdev_get_try_format - ancillary routine to call
- * &struct v4l2_subdev_pad_config->try_fmt
- *
- * @sd: pointer to &struct v4l2_subdev
- * @cfg: pointer to &struct v4l2_subdev_pad_config array.
- * @pad: index of the pad in the @cfg array.
- */
-static inline struct v4l2_mbus_framefmt *
-v4l2_subdev_get_try_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
-{
- if (WARN_ON(pad >= sd->entity.num_pads))
- pad = 0;
- return &cfg[pad].try_fmt;
-}
-
-/**
- * v4l2_subdev_get_try_crop - ancillary routine to call
- * &struct v4l2_subdev_pad_config->try_crop
- *
- * @sd: pointer to &struct v4l2_subdev
- * @cfg: pointer to &struct v4l2_subdev_pad_config array.
- * @pad: index of the pad in the @cfg array.
- */
-static inline struct v4l2_rect *
-v4l2_subdev_get_try_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
-{
- if (WARN_ON(pad >= sd->entity.num_pads))
- pad = 0;
- return &cfg[pad].try_crop;
-}
-
-/**
- * v4l2_subdev_get_try_compose - ancillary routine to call
- * &struct v4l2_subdev_pad_config->try_compose
- *
- * @sd: pointer to &struct v4l2_subdev
- * @cfg: pointer to &struct v4l2_subdev_pad_config array.
- * @pad: index of the pad in the @cfg array.
- */
-static inline struct v4l2_rect *
-v4l2_subdev_get_try_compose(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
-{
- if (WARN_ON(pad >= sd->entity.num_pads))
- pad = 0;
- return &cfg[pad].try_compose;
-}
-
-#endif
-
extern const struct v4l2_file_operations v4l2_subdev_fops;
/**
@@ -1075,23 +1241,549 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
int v4l2_subdev_link_validate(struct media_link *link);
/**
- * v4l2_subdev_alloc_pad_config - Allocates memory for pad config
+ * v4l2_subdev_has_pad_interdep - MC has_pad_interdep implementation for subdevs
+ *
+ * @entity: pointer to &struct media_entity
+ * @pad0: pad number for the first pad
+ * @pad1: pad number for the second pad
+ *
+ * This function is an implementation of the
+ * media_entity_operations.has_pad_interdep operation for subdevs that
+ * implement the multiplexed streams API (as indicated by the
+ * V4L2_SUBDEV_FL_STREAMS subdev flag).
*
- * @sd: pointer to struct v4l2_subdev
+ * It considers two pads interdependent if there is an active route between pad0
+ * and pad1.
*/
-struct
-v4l2_subdev_pad_config *v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd);
+bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
+ unsigned int pad0, unsigned int pad1);
/**
- * v4l2_subdev_free_pad_config - Frees memory allocated by
- * v4l2_subdev_alloc_pad_config().
+ * __v4l2_subdev_state_alloc - allocate v4l2_subdev_state
*
- * @cfg: pointer to &struct v4l2_subdev_pad_config
+ * @sd: pointer to &struct v4l2_subdev for which the state is being allocated.
+ * @lock_name: name of the state lock
+ * @key: lock_class_key for the lock
+ *
+ * Must call __v4l2_subdev_state_free() when state is no longer needed.
+ *
+ * Not to be called directly by the drivers.
+ */
+struct v4l2_subdev_state *__v4l2_subdev_state_alloc(struct v4l2_subdev *sd,
+ const char *lock_name,
+ struct lock_class_key *key);
+
+/**
+ * __v4l2_subdev_state_free - free a v4l2_subdev_state
+ *
+ * @state: v4l2_subdev_state to be freed.
+ *
+ * Not to be called directly by the drivers.
+ */
+void __v4l2_subdev_state_free(struct v4l2_subdev_state *state);
+
+/**
+ * v4l2_subdev_init_finalize() - Finalizes the initialization of the subdevice
+ * @sd: The subdev
+ *
+ * This function finalizes the initialization of the subdev, including
+ * allocation of the active state for the subdev.
+ *
+ * This function must be called by the subdev drivers that use the centralized
+ * active state, after the subdev struct has been initialized and
+ * media_entity_pads_init() has been called, but before registering the
+ * subdev.
+ *
+ * The user must call v4l2_subdev_cleanup() when the subdev is being removed.
*/
-void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg);
+#define v4l2_subdev_init_finalize(sd) \
+ ({ \
+ static struct lock_class_key __key; \
+ const char *name = KBUILD_BASENAME \
+ ":" __stringify(__LINE__) ":sd->active_state->lock"; \
+ __v4l2_subdev_init_finalize(sd, name, &__key); \
+ })
+
+int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
+ struct lock_class_key *key);
+
+/**
+ * v4l2_subdev_cleanup() - Releases the resources allocated by the subdevice
+ * @sd: The subdevice
+ *
+ * Clean up a V4L2 async sub-device. Must be called for a sub-device as part of
+ * its release if resources have been associated with it using
+ * v4l2_async_subdev_endpoint_add() or v4l2_subdev_init_finalize().
+ */
+void v4l2_subdev_cleanup(struct v4l2_subdev *sd);
+
+/*
+ * A macro to generate the macro or function name for sub-devices state access
+ * wrapper macros below.
+ */
+#define __v4l2_subdev_state_gen_call(NAME, _1, ARG, ...) \
+ __v4l2_subdev_state_get_ ## NAME ## ARG
+
+/**
+ * v4l2_subdev_state_get_format() - Get pointer to a stream format
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
+ *
+ * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad +
+ * stream in the subdev state.
+ *
+ * For stream-unaware drivers the format for the corresponding pad is returned.
+ * If the pad does not exist, NULL is returned.
+ */
+/*
+ * Wrap v4l2_subdev_state_get_format(), allowing the function to be called with
+ * two or three arguments. The purpose of the __v4l2_subdev_state_get_format()
+ * macro below is to come up with the name of the function or macro to call,
+ * using the last two arguments (_stream and _pad). The selected function or
+ * macro is then called using the arguments specified by the caller. A similar
+ * arrangement is used for v4l2_subdev_state_crop() and
+ * v4l2_subdev_state_compose() below.
+ */
+#define v4l2_subdev_state_get_format(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(format, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_format_pad(state, pad) \
+ __v4l2_subdev_state_get_format(state, pad, 0)
+struct v4l2_mbus_framefmt *
+__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
+
+/**
+ * v4l2_subdev_state_get_crop() - Get pointer to a stream crop rectangle
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
+ *
+ * This returns a pointer to crop rectangle for the given pad + stream in the
+ * subdev state.
+ *
+ * For stream-unaware drivers the crop rectangle for the corresponding pad is
+ * returned. If the pad does not exist, NULL is returned.
+ */
+#define v4l2_subdev_state_get_crop(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(crop, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_crop_pad(state, pad) \
+ __v4l2_subdev_state_get_crop(state, pad, 0)
+struct v4l2_rect *
+__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
+ u32 stream);
+
+/**
+ * v4l2_subdev_state_get_compose() - Get pointer to a stream compose rectangle
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
+ *
+ * This returns a pointer to compose rectangle for the given pad + stream in the
+ * subdev state.
+ *
+ * For stream-unaware drivers the compose rectangle for the corresponding pad is
+ * returned. If the pad does not exist, NULL is returned.
+ */
+#define v4l2_subdev_state_get_compose(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(compose, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_compose_pad(state, pad) \
+ __v4l2_subdev_state_get_compose(state, pad, 0)
+struct v4l2_rect *
+__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
+
+/**
+ * v4l2_subdev_state_get_interval() - Get pointer to a stream frame interval
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
+ *
+ * This returns a pointer to the frame interval for the given pad + stream in
+ * the subdev state.
+ *
+ * For stream-unaware drivers the frame interval for the corresponding pad is
+ * returned. If the pad does not exist, NULL is returned.
+ */
+#define v4l2_subdev_state_get_interval(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(interval, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_interval_pad(state, pad) \
+ __v4l2_subdev_state_get_interval(state, pad, 0)
+struct v4l2_fract *
+__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+
+/**
+ * v4l2_subdev_get_fmt() - Fill format based on state
+ * @sd: subdevice
+ * @state: subdevice state
+ * @format: pointer to &struct v4l2_subdev_format
+ *
+ * Fill @format->format field based on the information in the @format struct.
+ *
+ * This function can be used by the subdev drivers which support active state to
+ * implement v4l2_subdev_pad_ops.get_fmt if the subdev driver does not need to
+ * do anything special in their get_fmt op.
+ *
+ * Returns 0 on success, error value otherwise.
+ */
+int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format);
+
+/**
+ * v4l2_subdev_get_frame_interval() - Fill frame interval based on state
+ * @sd: subdevice
+ * @state: subdevice state
+ * @fi: pointer to &struct v4l2_subdev_frame_interval
+ *
+ * Fill @fi->interval field based on the information in the @fi struct.
+ *
+ * This function can be used by the subdev drivers which support active state to
+ * implement v4l2_subdev_pad_ops.get_frame_interval if the subdev driver does
+ * not need to do anything special in their get_frame_interval op.
+ *
+ * Returns 0 on success, error value otherwise.
+ */
+int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi);
+
+/**
+ * v4l2_subdev_set_routing() - Set given routing to subdev state
+ * @sd: The subdevice
+ * @state: The subdevice state
+ * @routing: Routing that will be copied to subdev state
+ *
+ * This will release old routing table (if any) from the state, allocate
+ * enough space for the given routing, and copy the routing.
+ *
+ * This can be used from the subdev driver's set_routing op, after validating
+ * the routing.
+ */
+int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ const struct v4l2_subdev_krouting *routing);
+
+struct v4l2_subdev_route *
+__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
+ struct v4l2_subdev_route *route);
+
+/**
+ * for_each_active_route - iterate on all active routes of a routing table
+ * @routing: The routing table
+ * @route: The route iterator
+ */
+#define for_each_active_route(routing, route) \
+ for ((route) = NULL; \
+ ((route) = __v4l2_subdev_next_active_route((routing), (route)));)
+
+/**
+ * v4l2_subdev_set_routing_with_fmt() - Set given routing and format to subdev
+ * state
+ * @sd: The subdevice
+ * @state: The subdevice state
+ * @routing: Routing that will be copied to subdev state
+ * @fmt: Format used to initialize all the streams
+ *
+ * This is the same as v4l2_subdev_set_routing, but additionally initializes
+ * all the streams using the given format.
+ */
+int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ const struct v4l2_subdev_krouting *routing,
+ const struct v4l2_mbus_framefmt *fmt);
+
+/**
+ * v4l2_subdev_routing_find_opposite_end() - Find the opposite stream
+ * @routing: routing used to find the opposite side
+ * @pad: pad id
+ * @stream: stream id
+ * @other_pad: pointer used to return the opposite pad
+ * @other_stream: pointer used to return the opposite stream
+ *
+ * This function uses the routing table to find the pad + stream which is
+ * opposite the given pad + stream.
+ *
+ * @other_pad and/or @other_stream can be NULL if the caller does not need the
+ * value.
+ *
+ * Returns 0 on success, or -EINVAL if no matching route is found.
+ */
+int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
+ u32 pad, u32 stream, u32 *other_pad,
+ u32 *other_stream);
+
+/**
+ * v4l2_subdev_state_get_opposite_stream_format() - Get pointer to opposite
+ * stream format
+ * @state: subdevice state
+ * @pad: pad id
+ * @stream: stream id
+ *
+ * This returns a pointer to &struct v4l2_mbus_framefmt for the pad + stream
+ * that is opposite the given pad + stream in the subdev state.
+ *
+ * If the state does not contain the given pad + stream, NULL is returned.
+ */
+struct v4l2_mbus_framefmt *
+v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
+ u32 pad, u32 stream);
+
+/**
+ * v4l2_subdev_state_xlate_streams() - Translate streams from one pad to another
+ *
+ * @state: Subdevice state
+ * @pad0: The first pad
+ * @pad1: The second pad
+ * @streams: Streams bitmask on the first pad
+ *
+ * Streams on sink pads of a subdev are routed to source pads as expressed in
+ * the subdev state routing table. Stream numbers don't necessarily match on
+ * the sink and source side of a route. This function translates stream numbers
+ * on @pad0, expressed as a bitmask in @streams, to the corresponding streams
+ * on @pad1 using the routing table from the @state. It returns the stream mask
+ * on @pad1, and updates @streams with the streams that have been found in the
+ * routing table.
+ *
+ * @pad0 and @pad1 must be a sink and a source, in any order.
+ *
+ * Return: The bitmask of streams of @pad1 that are routed to @streams on @pad0.
+ */
+u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
+ u32 pad0, u32 pad1, u64 *streams);
+
+/**
+ * enum v4l2_subdev_routing_restriction - Subdevice internal routing restrictions
+ *
+ * @V4L2_SUBDEV_ROUTING_NO_1_TO_N:
+ * an input stream shall not be routed to multiple output streams (stream
+ * duplication)
+ * @V4L2_SUBDEV_ROUTING_NO_N_TO_1:
+ * multiple input streams shall not be routed to the same output stream
+ * (stream merging)
+ * @V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX:
+ * all streams from a sink pad must be routed to a single source pad
+ * @V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX:
+ * all streams on a source pad must originate from a single sink pad
+ * @V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING:
+ * source pads shall not contain multiplexed streams
+ * @V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING:
+ * sink pads shall not contain multiplexed streams
+ * @V4L2_SUBDEV_ROUTING_ONLY_1_TO_1:
+ * only non-overlapping 1-to-1 stream routing is allowed (a combination of
+ * @V4L2_SUBDEV_ROUTING_NO_1_TO_N and @V4L2_SUBDEV_ROUTING_NO_N_TO_1)
+ * @V4L2_SUBDEV_ROUTING_NO_STREAM_MIX:
+ * all streams from a sink pad must be routed to a single source pad, and
+ * that source pad shall not get routes from any other sink pad
+ * (a combination of @V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX and
+ * @V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX)
+ * @V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING:
+ * no multiplexed streams allowed on either source or sink sides.
+ */
+enum v4l2_subdev_routing_restriction {
+ V4L2_SUBDEV_ROUTING_NO_1_TO_N = BIT(0),
+ V4L2_SUBDEV_ROUTING_NO_N_TO_1 = BIT(1),
+ V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX = BIT(2),
+ V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX = BIT(3),
+ V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING = BIT(4),
+ V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING = BIT(5),
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 =
+ V4L2_SUBDEV_ROUTING_NO_1_TO_N |
+ V4L2_SUBDEV_ROUTING_NO_N_TO_1,
+ V4L2_SUBDEV_ROUTING_NO_STREAM_MIX =
+ V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX |
+ V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX,
+ V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING =
+ V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING |
+ V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING,
+};
+
+/**
+ * v4l2_subdev_routing_validate() - Verify that routes comply with driver
+ * constraints
+ * @sd: The subdevice
+ * @routing: Routing to verify
+ * @disallow: Restrictions on routes
+ *
+ * This verifies that the given routing complies with the @disallow constraints.
+ *
+ * Returns 0 on success, error value otherwise.
+ */
+int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
+ const struct v4l2_subdev_krouting *routing,
+ enum v4l2_subdev_routing_restriction disallow);
+
+/**
+ * v4l2_subdev_enable_streams() - Enable streams on a pad
+ * @sd: The subdevice
+ * @pad: The pad
+ * @streams_mask: Bitmask of streams to enable
+ *
+ * This function enables streams on a source @pad of a subdevice. The pad is
+ * identified by its index, while the streams are identified by the
+ * @streams_mask bitmask. This allows enabling multiple streams on a pad at
+ * once.
+ *
+ * Enabling a stream that is already enabled isn't allowed. If @streams_mask
+ * contains an already enabled stream, this function returns -EALREADY without
+ * performing any operation.
+ *
+ * Per-stream enable is only available for subdevs that implement the
+ * .enable_streams() and .disable_streams() operations. For other subdevs, this
+ * function implements a best-effort compatibility by calling the .s_stream()
+ * operation, limited to subdevs that have a single source pad.
+ *
+ * Return:
+ * * 0: Success
+ * * -EALREADY: One of the streams in streams_mask is already enabled
+ * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad
+ * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is
+ * impossible because the subdev has multiple source pads
+ */
+int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask);
+
+/**
+ * v4l2_subdev_disable_streams() - Disable streams on a pad
+ * @sd: The subdevice
+ * @pad: The pad
+ * @streams_mask: Bitmask of streams to disable
+ *
+ * This function disables streams on a source @pad of a subdevice. The pad is
+ * identified by its index, while the streams are identified by the
+ * @streams_mask bitmask. This allows disabling multiple streams on a pad at
+ * once.
+ *
+ * Disabling a streams that is not enabled isn't allowed. If @streams_mask
+ * contains a disabled stream, this function returns -EALREADY without
+ * performing any operation.
+ *
+ * Per-stream disable is only available for subdevs that implement the
+ * .enable_streams() and .disable_streams() operations. For other subdevs, this
+ * function implements a best-effort compatibility by calling the .s_stream()
+ * operation, limited to subdevs that have a single source pad.
+ *
+ * Return:
+ * * 0: Success
+ * * -EALREADY: One of the streams in streams_mask is not enabled
+ * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad
+ * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is
+ * impossible because the subdev has multiple source pads
+ */
+int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask);
+
+/**
+ * v4l2_subdev_s_stream_helper() - Helper to implement the subdev s_stream
+ * operation using enable_streams and disable_streams
+ * @sd: The subdevice
+ * @enable: Enable or disable streaming
+ *
+ * Subdevice drivers that implement the streams-aware
+ * &v4l2_subdev_pad_ops.enable_streams and &v4l2_subdev_pad_ops.disable_streams
+ * operations can use this helper to implement the legacy
+ * &v4l2_subdev_video_ops.s_stream operation.
+ *
+ * This helper can only be used by subdevs that have a single source pad.
+ *
+ * Return: 0 on success, or a negative error code otherwise.
+ */
+int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable);
+
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
#endif /* CONFIG_MEDIA_CONTROLLER */
/**
+ * v4l2_subdev_lock_state() - Locks the subdev state
+ * @state: The subdevice state
+ *
+ * Locks the given subdev state.
+ *
+ * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ */
+static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state)
+{
+ mutex_lock(state->lock);
+}
+
+/**
+ * v4l2_subdev_unlock_state() - Unlocks the subdev state
+ * @state: The subdevice state
+ *
+ * Unlocks the given subdev state.
+ */
+static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state)
+{
+ mutex_unlock(state->lock);
+}
+
+/**
+ * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state
+ * is unlocked and returns it
+ * @sd: The subdevice
+ *
+ * Returns the active state for the subdevice, or NULL if the subdev does not
+ * support active state. If the state is not NULL, calls
+ * lockdep_assert_not_held() to issue a warning if the state is locked.
+ *
+ * This function is to be used e.g. when getting the active state for the sole
+ * purpose of passing it forward, without accessing the state fields.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ lockdep_assert_not_held(sd->active_state->lock);
+ return sd->active_state;
+}
+
+/**
+ * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state
+ * is locked and returns it
+ *
+ * @sd: The subdevice
+ *
+ * Returns the active state for the subdevice, or NULL if the subdev does not
+ * support active state. If the state is not NULL, calls lockdep_assert_held()
+ * to issue a warning if the state is not locked.
+ *
+ * This function is to be used when the caller knows that the active state is
+ * already locked.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ lockdep_assert_held(sd->active_state->lock);
+ return sd->active_state;
+}
+
+/**
+ * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev
+ * state for the subdevice
+ * @sd: The subdevice
+ *
+ * Returns the locked active state for the subdevice, or NULL if the subdev
+ * does not support active state.
+ *
+ * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ v4l2_subdev_lock_state(sd->active_state);
+ return sd->active_state;
+}
+
+/**
* v4l2_subdev_init - initializes the sub-device struct
*
* @sd: pointer to the &struct v4l2_subdev to be initialized
@@ -1133,6 +1825,70 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
})
/**
+ * v4l2_subdev_call_state_active - call an operation of a v4l2_subdev which
+ * takes state as a parameter, passing the
+ * subdev its active state.
+ *
+ * @sd: pointer to the &struct v4l2_subdev
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of callbacks functions.
+ * @f: callback function to be called.
+ * The callback functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args: arguments for @f.
+ *
+ * This is similar to v4l2_subdev_call(), except that this version can only be
+ * used for ops that take a subdev state as a parameter. The macro will get the
+ * active state, lock it before calling the op and unlock it after the call.
+ */
+#define v4l2_subdev_call_state_active(sd, o, f, args...) \
+ ({ \
+ int __result; \
+ struct v4l2_subdev_state *state; \
+ state = v4l2_subdev_get_unlocked_active_state(sd); \
+ if (state) \
+ v4l2_subdev_lock_state(state); \
+ __result = v4l2_subdev_call(sd, o, f, state, ##args); \
+ if (state) \
+ v4l2_subdev_unlock_state(state); \
+ __result; \
+ })
+
+/**
+ * v4l2_subdev_call_state_try - call an operation of a v4l2_subdev which
+ * takes state as a parameter, passing the
+ * subdev a newly allocated try state.
+ *
+ * @sd: pointer to the &struct v4l2_subdev
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of callbacks functions.
+ * @f: callback function to be called.
+ * The callback functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args: arguments for @f.
+ *
+ * This is similar to v4l2_subdev_call_state_active(), except that as this
+ * version allocates a new state, this is only usable for
+ * V4L2_SUBDEV_FORMAT_TRY use cases.
+ *
+ * Note: only legacy non-MC drivers may need this macro.
+ */
+#define v4l2_subdev_call_state_try(sd, o, f, args...) \
+ ({ \
+ int __result; \
+ static struct lock_class_key __key; \
+ const char *name = KBUILD_BASENAME \
+ ":" __stringify(__LINE__) ":state->lock"; \
+ struct v4l2_subdev_state *state = \
+ __v4l2_subdev_state_alloc(sd, name, &__key); \
+ v4l2_subdev_lock_state(state); \
+ __result = v4l2_subdev_call(sd, o, f, state, ##args); \
+ v4l2_subdev_unlock_state(state); \
+ __v4l2_subdev_state_free(state); \
+ __result; \
+ })
+
+/**
* v4l2_subdev_has_op - Checks if a subdev defines a certain operation.
*
* @sd: pointer to the &struct v4l2_subdev
@@ -1156,4 +1912,4 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
const struct v4l2_event *ev);
-#endif
+#endif /* _V4L2_SUBDEV_H */
diff --git a/include/media/v4l2-vp9.h b/include/media/v4l2-vp9.h
new file mode 100644
index 000000000000..05478ad6d4ab
--- /dev/null
+++ b/include/media/v4l2-vp9.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper functions for vp9 codecs.
+ *
+ * Copyright (c) 2021 Collabora, Ltd.
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+ */
+
+#ifndef _MEDIA_V4L2_VP9_H
+#define _MEDIA_V4L2_VP9_H
+
+#include <media/v4l2-ctrls.h>
+
+/**
+ * struct v4l2_vp9_frame_mv_context - motion vector-related probabilities
+ *
+ * @joint: motion vector joint probabilities.
+ * @sign: motion vector sign probabilities.
+ * @classes: motion vector class probabilities.
+ * @class0_bit: motion vector class0 bit probabilities.
+ * @bits: motion vector bits probabilities.
+ * @class0_fr: motion vector class0 fractional bit probabilities.
+ * @fr: motion vector fractional bit probabilities.
+ * @class0_hp: motion vector class0 high precision fractional bit probabilities.
+ * @hp: motion vector high precision fractional bit probabilities.
+ *
+ * A member of v4l2_vp9_frame_context.
+ */
+struct v4l2_vp9_frame_mv_context {
+ u8 joint[3];
+ u8 sign[2];
+ u8 classes[2][10];
+ u8 class0_bit[2];
+ u8 bits[2][10];
+ u8 class0_fr[2][2][3];
+ u8 fr[2][3];
+ u8 class0_hp[2];
+ u8 hp[2];
+};
+
+/**
+ * struct v4l2_vp9_frame_context - frame probabilities, including motion-vector related
+ *
+ * @tx8: TX 8x8 probabilities.
+ * @tx16: TX 16x16 probabilities.
+ * @tx32: TX 32x32 probabilities.
+ * @coef: coefficient probabilities.
+ * @skip: skip probabilities.
+ * @inter_mode: inter mode probabilities.
+ * @interp_filter: interpolation filter probabilities.
+ * @is_inter: is inter-block probabilities.
+ * @comp_mode: compound prediction mode probabilities.
+ * @single_ref: single ref probabilities.
+ * @comp_ref: compound ref probabilities.
+ * @y_mode: Y prediction mode probabilities.
+ * @uv_mode: UV prediction mode probabilities.
+ * @partition: partition probabilities.
+ * @mv: motion vector probabilities.
+ *
+ * Drivers which need to keep track of frame context(s) can use this struct.
+ * The members correspond to probability tables, which are specified only implicitly in the
+ * vp9 spec. Section 10.5 "Default probability tables" contains all the types of involved
+ * tables, i.e. the actual tables are of the same kind, and when they are reset (which is
+ * mandated by the spec sometimes) they are overwritten with values from the default tables.
+ */
+struct v4l2_vp9_frame_context {
+ u8 tx8[2][1];
+ u8 tx16[2][2];
+ u8 tx32[2][3];
+ u8 coef[4][2][2][6][6][3];
+ u8 skip[3];
+ u8 inter_mode[7][3];
+ u8 interp_filter[4][2];
+ u8 is_inter[4];
+ u8 comp_mode[5];
+ u8 single_ref[5][2];
+ u8 comp_ref[5];
+ u8 y_mode[4][9];
+ u8 uv_mode[10][9];
+ u8 partition[16][3];
+
+ struct v4l2_vp9_frame_mv_context mv;
+};
+
+/**
+ * struct v4l2_vp9_frame_symbol_counts - pointers to arrays of symbol counts
+ *
+ * @partition: partition counts.
+ * @skip: skip counts.
+ * @intra_inter: is inter-block counts.
+ * @tx32p: TX32 counts.
+ * @tx16p: TX16 counts.
+ * @tx8p: TX8 counts.
+ * @y_mode: Y prediction mode counts.
+ * @uv_mode: UV prediction mode counts.
+ * @comp: compound prediction mode counts.
+ * @comp_ref: compound ref counts.
+ * @single_ref: single ref counts.
+ * @mv_mode: inter mode counts.
+ * @filter: interpolation filter counts.
+ * @mv_joint: motion vector joint counts.
+ * @sign: motion vector sign counts.
+ * @classes: motion vector class counts.
+ * @class0: motion vector class0 bit counts.
+ * @bits: motion vector bits counts.
+ * @class0_fp: motion vector class0 fractional bit counts.
+ * @fp: motion vector fractional bit counts.
+ * @class0_hp: motion vector class0 high precision fractional bit counts.
+ * @hp: motion vector high precision fractional bit counts.
+ * @coeff: coefficient counts.
+ * @eob: eob counts
+ *
+ * The fields correspond to what is specified in section 8.3 "Clear counts process" of the spec.
+ * Different pieces of hardware can report the counts in different order, so we cannot rely on
+ * simply overlaying a struct on a relevant block of memory. Instead we provide pointers to
+ * arrays or array of pointers to arrays in case of coeff, or array of pointers for eob.
+ */
+struct v4l2_vp9_frame_symbol_counts {
+ u32 (*partition)[16][4];
+ u32 (*skip)[3][2];
+ u32 (*intra_inter)[4][2];
+ u32 (*tx32p)[2][4];
+ u32 (*tx16p)[2][4];
+ u32 (*tx8p)[2][2];
+ u32 (*y_mode)[4][10];
+ u32 (*uv_mode)[10][10];
+ u32 (*comp)[5][2];
+ u32 (*comp_ref)[5][2];
+ u32 (*single_ref)[5][2][2];
+ u32 (*mv_mode)[7][4];
+ u32 (*filter)[4][3];
+ u32 (*mv_joint)[4];
+ u32 (*sign)[2][2];
+ u32 (*classes)[2][11];
+ u32 (*class0)[2][2];
+ u32 (*bits)[2][10][2];
+ u32 (*class0_fp)[2][2][4];
+ u32 (*fp)[2][4];
+ u32 (*class0_hp)[2][2];
+ u32 (*hp)[2][2];
+ u32 (*coeff[4][2][2][6][6])[3];
+ u32 *eob[4][2][2][6][6][2];
+};
+
+extern const u8 v4l2_vp9_kf_y_mode_prob[10][10][9]; /* Section 10.4 of the spec */
+extern const u8 v4l2_vp9_kf_partition_probs[16][3]; /* Section 10.4 of the spec */
+extern const u8 v4l2_vp9_kf_uv_mode_prob[10][9]; /* Section 10.4 of the spec */
+extern const struct v4l2_vp9_frame_context v4l2_vp9_default_probs; /* Section 10.5 of the spec */
+
+/**
+ * v4l2_vp9_fw_update_probs() - Perform forward update of vp9 probabilities
+ *
+ * @probs: current probabilities values
+ * @deltas: delta values from compressed header
+ * @dec_params: vp9 frame decoding parameters
+ *
+ * This function performs forward updates of probabilities for the vp9 boolean decoder.
+ * The frame header can contain a directive to update the probabilities (deltas), if so, then
+ * the deltas are provided in the header, too. The userspace parses those and passes the said
+ * deltas struct to the kernel.
+ */
+void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params);
+
+/**
+ * v4l2_vp9_reset_frame_ctx() - Reset appropriate frame context
+ *
+ * @dec_params: vp9 frame decoding parameters
+ * @frame_context: array of the 4 frame contexts
+ *
+ * This function resets appropriate frame contexts, based on what's in dec_params.
+ *
+ * Returns the frame context index after the update, which might be reset to zero if
+ * mandated by the spec.
+ */
+u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct v4l2_vp9_frame_context *frame_context);
+
+/**
+ * v4l2_vp9_adapt_coef_probs() - Perform backward update of vp9 coefficients probabilities
+ *
+ * @probs: current probabilities values
+ * @counts: values of symbol counts after the current frame has been decoded
+ * @use_128: flag to request that 128 is used as update factor if true, otherwise 112 is used
+ * @frame_is_intra: flag indicating that FrameIsIntra is true
+ *
+ * This function performs backward updates of coefficients probabilities for the vp9 boolean
+ * decoder. After a frame has been decoded the counts of how many times a given symbol has
+ * occurred are known and are used to update the probability of each symbol.
+ */
+void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ bool use_128,
+ bool frame_is_intra);
+
+/**
+ * v4l2_vp9_adapt_noncoef_probs() - Perform backward update of vp9 non-coefficients probabilities
+ *
+ * @probs: current probabilities values
+ * @counts: values of symbol counts after the current frame has been decoded
+ * @reference_mode: specifies the type of inter prediction to be used. See
+ * &v4l2_vp9_reference_mode for more details
+ * @interpolation_filter: specifies the filter selection used for performing inter prediction.
+ * See &v4l2_vp9_interpolation_filter for more details
+ * @tx_mode: specifies the TX mode. See &v4l2_vp9_tx_mode for more details
+ * @flags: combination of V4L2_VP9_FRAME_FLAG_* flags
+ *
+ * This function performs backward updates of non-coefficients probabilities for the vp9 boolean
+ * decoder. After a frame has been decoded the counts of how many times a given symbol has
+ * occurred are known and are used to update the probability of each symbol.
+ */
+void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ u8 reference_mode, u8 interpolation_filter, u8 tx_mode,
+ u32 flags);
+
+/**
+ * v4l2_vp9_seg_feat_enabled() - Check if a segmentation feature is enabled
+ *
+ * @feature_enabled: array of 8-bit flags (for all segments)
+ * @feature: id of the feature to check
+ * @segid: id of the segment to look up
+ *
+ * This function returns true if a given feature is active in a given segment.
+ */
+bool
+v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled,
+ unsigned int feature,
+ unsigned int segid);
+
+#endif /* _MEDIA_V4L2_VP9_H */
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
deleted file mode 100644
index 2e01b2e9a1c0..000000000000
--- a/include/media/videobuf-core.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * generic helper functions for handling video4linux capture buffers
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-
-#ifndef _VIDEOBUF_CORE_H
-#define _VIDEOBUF_CORE_H
-
-#include <linux/poll.h>
-#include <linux/videodev2.h>
-
-#define UNSET (-1U)
-
-
-struct videobuf_buffer;
-struct videobuf_queue;
-
-/* --------------------------------------------------------------------- */
-
-/*
- * A small set of helper functions to manage video4linux buffers.
- *
- * struct videobuf_buffer holds the data structures used by the helper
- * functions, additionally some commonly used fields for v4l buffers
- * (width, height, lists, waitqueue) are in there. That struct should
- * be used as first element in the drivers buffer struct.
- *
- * about the mmap helpers (videobuf_mmap_*):
- *
- * The mmaper function allows to map any subset of contiguous buffers.
- * This includes one mmap() call for all buffers (which the original
- * video4linux API uses) as well as one mmap() for every single buffer
- * (which v4l2 uses).
- *
- * If there is a valid mapping for a buffer, buffer->baddr/bsize holds
- * userspace address + size which can be fed into the
- * videobuf_dma_init_user function listed above.
- *
- */
-
-struct videobuf_mapping {
- unsigned int count;
- struct videobuf_queue *q;
-};
-
-enum videobuf_state {
- VIDEOBUF_NEEDS_INIT = 0,
- VIDEOBUF_PREPARED = 1,
- VIDEOBUF_QUEUED = 2,
- VIDEOBUF_ACTIVE = 3,
- VIDEOBUF_DONE = 4,
- VIDEOBUF_ERROR = 5,
- VIDEOBUF_IDLE = 6,
-};
-
-struct videobuf_buffer {
- unsigned int i;
- u32 magic;
-
- /* info about the buffer */
- unsigned int width;
- unsigned int height;
- unsigned int bytesperline; /* use only if != 0 */
- unsigned long size;
- enum v4l2_field field;
- enum videobuf_state state;
- struct list_head stream; /* QBUF/DQBUF list */
-
- /* touched by irq handler */
- struct list_head queue;
- wait_queue_head_t done;
- unsigned int field_count;
- u64 ts;
-
- /* Memory type */
- enum v4l2_memory memory;
-
- /* buffer size */
- size_t bsize;
-
- /* buffer offset (mmap + overlay) */
- size_t boff;
-
- /* buffer addr (userland ptr!) */
- unsigned long baddr;
-
- /* for mmap'ed buffers */
- struct videobuf_mapping *map;
-
- /* Private pointer to allow specific methods to store their data */
- int privsize;
- void *priv;
-};
-
-struct videobuf_queue_ops {
- int (*buf_setup)(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size);
- int (*buf_prepare)(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field);
- void (*buf_queue)(struct videobuf_queue *q,
- struct videobuf_buffer *vb);
- void (*buf_release)(struct videobuf_queue *q,
- struct videobuf_buffer *vb);
-};
-
-#define MAGIC_QTYPE_OPS 0x12261003
-
-/* Helper operations - device type dependent */
-struct videobuf_qtype_ops {
- u32 magic;
-
- struct videobuf_buffer *(*alloc_vb)(size_t size);
- void *(*vaddr) (struct videobuf_buffer *buf);
- int (*iolock) (struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf);
- int (*sync) (struct videobuf_queue *q,
- struct videobuf_buffer *buf);
- int (*mmap_mapper) (struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- struct vm_area_struct *vma);
-};
-
-struct videobuf_queue {
- struct mutex vb_lock;
- struct mutex *ext_lock;
- spinlock_t *irqlock;
- struct device *dev;
-
- wait_queue_head_t wait; /* wait if queue is empty */
-
- enum v4l2_buf_type type;
- unsigned int msize;
- enum v4l2_field field;
- enum v4l2_field last; /* for field=V4L2_FIELD_ALTERNATE */
- struct videobuf_buffer *bufs[VIDEO_MAX_FRAME];
- const struct videobuf_queue_ops *ops;
- struct videobuf_qtype_ops *int_ops;
-
- unsigned int streaming:1;
- unsigned int reading:1;
-
- /* capture via mmap() + ioctl(QBUF/DQBUF) */
- struct list_head stream;
-
- /* capture via read() */
- unsigned int read_off;
- struct videobuf_buffer *read_buf;
-
- /* driver private data */
- void *priv_data;
-};
-
-static inline void videobuf_queue_lock(struct videobuf_queue *q)
-{
- if (!q->ext_lock)
- mutex_lock(&q->vb_lock);
-}
-
-static inline void videobuf_queue_unlock(struct videobuf_queue *q)
-{
- if (!q->ext_lock)
- mutex_unlock(&q->vb_lock);
-}
-
-int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
- int non_blocking, int intr);
-int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf);
-
-struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q);
-
-/* Used on videobuf-dvb */
-void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
- struct videobuf_buffer *buf);
-
-void videobuf_queue_core_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct videobuf_qtype_ops *int_ops,
- struct mutex *ext_lock);
-int videobuf_queue_is_busy(struct videobuf_queue *q);
-void videobuf_queue_cancel(struct videobuf_queue *q);
-
-enum v4l2_field videobuf_next_field(struct videobuf_queue *q);
-int videobuf_reqbufs(struct videobuf_queue *q,
- struct v4l2_requestbuffers *req);
-int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b);
-int videobuf_qbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b);
-int videobuf_dqbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b, int nonblocking);
-int videobuf_streamon(struct videobuf_queue *q);
-int videobuf_streamoff(struct videobuf_queue *q);
-
-void videobuf_stop(struct videobuf_queue *q);
-
-int videobuf_read_start(struct videobuf_queue *q);
-void videobuf_read_stop(struct videobuf_queue *q);
-ssize_t videobuf_read_stream(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int vbihack, int nonblocking);
-ssize_t videobuf_read_one(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int nonblocking);
-__poll_t videobuf_poll_stream(struct file *file,
- struct videobuf_queue *q,
- poll_table *wait);
-
-int videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory);
-int __videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory);
-int videobuf_mmap_free(struct videobuf_queue *q);
-int videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma);
-
-#endif
diff --git a/include/media/videobuf-dma-contig.h b/include/media/videobuf-dma-contig.h
deleted file mode 100644
index 525883b2c53e..000000000000
--- a/include/media/videobuf-dma-contig.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for physically contiguous capture buffers
- *
- * The functions support hardware lacking scatter gather support
- * (i.e. the buffers must be linear in physical memory)
- *
- * Copyright (c) 2008 Magnus Damm
- */
-#ifndef _VIDEOBUF_DMA_CONTIG_H
-#define _VIDEOBUF_DMA_CONTIG_H
-
-#include <linux/dma-mapping.h>
-#include <media/videobuf-core.h>
-
-void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock);
-
-dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
-void videobuf_dma_contig_free(struct videobuf_queue *q,
- struct videobuf_buffer *buf);
-
-#endif /* _VIDEOBUF_DMA_CONTIG_H */
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
deleted file mode 100644
index 34450f7ad510..000000000000
--- a/include/media/videobuf-dma-sg.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for SG DMA video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-#ifndef _VIDEOBUF_DMA_SG_H
-#define _VIDEOBUF_DMA_SG_H
-
-#include <media/videobuf-core.h>
-
-/* --------------------------------------------------------------------- */
-
-/*
- * A small set of helper functions to manage buffers (both userland
- * and kernel) for DMA.
- *
- * videobuf_dma_init_*()
- * creates a buffer. The userland version takes a userspace
- * pointer + length. The kernel version just wants the size and
- * does memory allocation too using vmalloc_32().
- *
- * videobuf_dma_*()
- * see Documentation/core-api/dma-api-howto.rst, these functions to
- * basically the same. The map function does also build a
- * scatterlist for the buffer (and unmap frees it ...)
- *
- * videobuf_dma_free()
- * no comment ...
- *
- */
-
-struct videobuf_dmabuf {
- u32 magic;
-
- /* for userland buffer */
- int offset;
- size_t size;
- struct page **pages;
-
- /* for kernel buffers */
- void *vaddr;
- struct page **vaddr_pages;
- dma_addr_t *dma_addr;
- struct device *dev;
-
- /* for overlay buffers (pci-pci dma) */
- dma_addr_t bus_addr;
-
- /* common */
- struct scatterlist *sglist;
- int sglen;
- int nr_pages;
- int direction;
-};
-
-struct videobuf_dma_sg_memory {
- u32 magic;
-
- /* for mmap'ed buffers */
- struct videobuf_dmabuf dma;
-};
-
-/*
- * Scatter-gather DMA buffer API.
- *
- * These functions provide a simple way to create a page list and a
- * scatter-gather list from a kernel, userspace of physical address and map the
- * memory for DMA operation.
- *
- * Despite the name, this is totally unrelated to videobuf, except that
- * videobuf-dma-sg uses the same API internally.
- */
-int videobuf_dma_free(struct videobuf_dmabuf *dma);
-
-int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma);
-struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
-
-void *videobuf_sg_alloc(size_t size);
-
-void videobuf_queue_sg_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock);
-
-#endif /* _VIDEOBUF_DMA_SG_H */
-
diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h
deleted file mode 100644
index e930dbb9d7f4..000000000000
--- a/include/media/videobuf-vmalloc.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for vmalloc capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- */
-#ifndef _VIDEOBUF_VMALLOC_H
-#define _VIDEOBUF_VMALLOC_H
-
-#include <media/videobuf-core.h>
-
-/* --------------------------------------------------------------------- */
-
-struct videobuf_vmalloc_memory {
- u32 magic;
-
- void *vaddr;
-
- /* remap_vmalloc_range seems to need to run
- * after mmap() on some cases */
- struct vm_area_struct *vma;
-};
-
-void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock);
-
-void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
-
-void videobuf_vmalloc_free(struct videobuf_buffer *buf);
-
-#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 52ef92049073..8b86996b2719 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -18,6 +18,7 @@
#include <linux/dma-buf.h>
#include <linux/bitops.h>
#include <media/media-request.h>
+#include <media/frame_vector.h>
#define VB2_MAX_FRAME (32)
#define VB2_MAX_PLANES (8)
@@ -45,6 +46,7 @@ enum vb2_memory {
struct vb2_fileio_data;
struct vb2_threadio_data;
+struct vb2_buffer;
/**
* struct vb2_mem_ops - memory handling/memory allocator operations.
@@ -52,10 +54,8 @@ struct vb2_threadio_data;
* return ERR_PTR() on failure or a pointer to allocator private,
* per-buffer data on success; the returned private structure
* will then be passed as @buf_priv argument to other ops in this
- * structure. Additional gfp_flags to use when allocating the
- * are also passed to this operation. These flags are from the
- * gfp_flags field of vb2_queue. The size argument to this function
- * shall be *page aligned*.
+ * structure. The size argument to this function shall be
+ * *page aligned*.
* @put: inform the allocator that the buffer will no longer be used;
* usually will result in the allocator freeing the buffer (if
* no other users of this buffer are present); the @buf_priv
@@ -65,13 +65,17 @@ struct vb2_threadio_data;
* DMABUF memory types.
* @get_userptr: acquire userspace memory for a hardware operation; used for
* USERPTR memory types; vaddr is the address passed to the
- * videobuf layer when queuing a video buffer of USERPTR type;
+ * videobuf2 layer when queuing a video buffer of USERPTR type;
* should return an allocator private per-buffer structure
* associated with the buffer on success, ERR_PTR() on failure;
* the returned private structure will then be passed as @buf_priv
* argument to other ops in this structure.
* @put_userptr: inform the allocator that a USERPTR buffer will no longer
* be used.
+ * @prepare: called every time the buffer is passed from userspace to the
+ * driver, useful for cache synchronisation, optional.
+ * @finish: called every time the buffer is passed back from the driver
+ * to the userspace, also optional.
* @attach_dmabuf: attach a shared &struct dma_buf for a hardware operation;
* used for DMABUF memory types; dev is the alloc device
* dbuf is the shared dma_buf; returns ERR_PTR() on failure;
@@ -86,10 +90,6 @@ struct vb2_threadio_data;
* dmabuf.
* @unmap_dmabuf: releases access control to the dmabuf - allocator is notified
* that this driver is done using the dmabuf for now.
- * @prepare: called every time the buffer is passed from userspace to the
- * driver, useful for cache synchronisation, optional.
- * @finish: called every time the buffer is passed back from the driver
- * to the userspace, also optional.
* @vaddr: return a kernel virtual address to a given memory buffer
* associated with the passed private structure or NULL if no
* such mapping exists.
@@ -97,7 +97,7 @@ struct vb2_threadio_data;
* associated with the passed private structure or NULL if not
* available.
* @num_users: return the current number of users of a memory buffer;
- * return 1 if the videobuf layer (or actually the driver using
+ * return 1 if the videobuf2 layer (or actually the driver using
* it) is the only user.
* @mmap: setup a userspace mapping for a given memory buffer under
* the provided virtual memory region.
@@ -116,31 +116,33 @@ struct vb2_threadio_data;
* map_dmabuf, unmap_dmabuf.
*/
struct vb2_mem_ops {
- void *(*alloc)(struct device *dev, unsigned long attrs,
- unsigned long size,
- enum dma_data_direction dma_dir,
- gfp_t gfp_flags);
+ void *(*alloc)(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long size);
void (*put)(void *buf_priv);
- struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
-
- void *(*get_userptr)(struct device *dev, unsigned long vaddr,
- unsigned long size,
- enum dma_data_direction dma_dir);
+ struct dma_buf *(*get_dmabuf)(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags);
+
+ void *(*get_userptr)(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long vaddr,
+ unsigned long size);
void (*put_userptr)(void *buf_priv);
void (*prepare)(void *buf_priv);
void (*finish)(void *buf_priv);
- void *(*attach_dmabuf)(struct device *dev,
+ void *(*attach_dmabuf)(struct vb2_buffer *vb,
+ struct device *dev,
struct dma_buf *dbuf,
- unsigned long size,
- enum dma_data_direction dma_dir);
+ unsigned long size);
void (*detach_dmabuf)(void *buf_priv);
int (*map_dmabuf)(void *buf_priv);
void (*unmap_dmabuf)(void *buf_priv);
- void *(*vaddr)(void *buf_priv);
- void *(*cookie)(void *buf_priv);
+ void *(*vaddr)(struct vb2_buffer *vb, void *buf_priv);
+ void *(*cookie)(struct vb2_buffer *vb, void *buf_priv);
unsigned int (*num_users)(void *buf_priv);
@@ -153,9 +155,11 @@ struct vb2_mem_ops {
* @dbuf: dma_buf - shared buffer object.
* @dbuf_mapped: flag to show whether dbuf is mapped or not
* @bytesused: number of bytes occupied by data in the plane (payload).
- * @length: size of this plane (NOT the payload) in bytes.
+ * @length: size of this plane (NOT the payload) in bytes. The maximum
+ * valid size is MAX_UINT - PAGE_SIZE.
* @min_length: minimum required size of this plane (NOT the payload) in bytes.
- * @length is always greater or equal to @min_length.
+ * @length is always greater or equal to @min_length, and like
+ * @length, it is limited to MAX_UINT - PAGE_SIZE.
* @m: Union with memtype-specific data.
* @m.offset: when memory in the associated struct vb2_buffer is
* %VB2_MEMORY_MMAP, equals the offset from the start of
@@ -206,11 +210,11 @@ enum vb2_io_modes {
* enum vb2_buffer_state - current video buffer state.
* @VB2_BUF_STATE_DEQUEUED: buffer under userspace control.
* @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request.
- * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf.
- * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver.
+ * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf2.
+ * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf2, but not in driver.
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
* in a hardware operation.
- * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
+ * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf2, but
* not yet dequeued to userspace.
* @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer
* has ended with an error, which will be reported
@@ -263,22 +267,22 @@ struct vb2_buffer {
* after the 'buf_finish' op is called.
* copied_timestamp: the timestamp of this capture buffer was copied
* from an output buffer.
- * need_cache_sync_on_prepare: when set buffer's ->prepare() function
- * performs cache sync/invalidation.
- * need_cache_sync_on_finish: when set buffer's ->finish() function
- * performs cache sync/invalidation.
+ * skip_cache_sync_on_prepare: when set buffer's ->prepare() function
+ * skips cache sync/invalidation.
+ * skip_cache_sync_on_finish: when set buffer's ->finish() function
+ * skips cache sync/invalidation.
+ * planes: per-plane information; do not change
* queued_entry: entry on the queued buffers list, which holds
* all buffers queued from userspace
* done_entry: entry on the list that stores all buffers ready
* to be dequeued to userspace
- * vb2_plane: per-plane information; do not change
*/
enum vb2_buffer_state state;
unsigned int synced:1;
unsigned int prepared:1;
unsigned int copied_timestamp:1;
- unsigned int need_cache_sync_on_prepare:1;
- unsigned int need_cache_sync_on_finish:1;
+ unsigned int skip_cache_sync_on_prepare:1;
+ unsigned int skip_cache_sync_on_finish:1;
struct vb2_plane planes[VB2_MAX_PLANES];
struct list_head queued_entry;
@@ -382,6 +386,12 @@ struct vb2_buffer {
* the buffer contents will be ignored anyway.
* @buf_cleanup: called once before the buffer is freed; drivers may
* perform any additional cleanup; optional.
+ * @prepare_streaming: called once to prepare for 'streaming' state; this is
+ * where validation can be done to verify everything is
+ * okay and streaming resources can be claimed. It is
+ * called when the VIDIOC_STREAMON ioctl is called. The
+ * actual streaming starts when @start_streaming is called.
+ * Optional.
* @start_streaming: called once to enter 'streaming' state; the driver may
* receive buffers with @buf_queue callback
* before @start_streaming is called; the driver gets the
@@ -392,7 +402,7 @@ struct vb2_buffer {
* by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED.
* If you need a minimum number of buffers before you can
* start streaming, then set
- * &vb2_queue->min_buffers_needed. If that is non-zero
+ * &vb2_queue->min_queued_buffers. If that is non-zero
* then @start_streaming won't be called until at least
* that many buffers have been queued up by userspace.
* @stop_streaming: called when 'streaming' state must be disabled; driver
@@ -401,6 +411,10 @@ struct vb2_buffer {
* callback by calling vb2_buffer_done() with either
* %VB2_BUF_STATE_DONE or %VB2_BUF_STATE_ERROR; may use
* vb2_wait_for_all_buffers() function
+ * @unprepare_streaming:called as counterpart to @prepare_streaming; any claimed
+ * streaming resources can be released here. It is
+ * called when the VIDIOC_STREAMOFF ioctls is called or
+ * when the streaming filehandle is closed. Optional.
* @buf_queue: passes buffer vb to the driver; driver may start
* hardware operation on this buffer; driver should give
* the buffer back by calling vb2_buffer_done() function;
@@ -428,8 +442,10 @@ struct vb2_ops {
void (*buf_finish)(struct vb2_buffer *vb);
void (*buf_cleanup)(struct vb2_buffer *vb);
+ int (*prepare_streaming)(struct vb2_queue *q);
int (*start_streaming)(struct vb2_queue *q, unsigned int count);
void (*stop_streaming)(struct vb2_queue *q);
+ void (*unprepare_streaming)(struct vb2_queue *q);
void (*buf_queue)(struct vb2_buffer *vb);
@@ -462,13 +478,12 @@ struct vb2_buf_ops {
};
/**
- * struct vb2_queue - a videobuf queue.
+ * struct vb2_queue - a videobuf2 queue.
*
* @type: private buffer type whose content is defined by the vb2-core
* caller. For example, for V4L2, it should match
* the types defined on &enum v4l2_buf_type.
* @io_modes: supported io methods (see &enum vb2_io_modes).
- * @alloc_devs: &struct device memory type/allocator-specific per-plane device
* @dev: device to use for the default allocation context if the driver
* doesn't fill in the @alloc_devs array.
* @dma_attrs: DMA attributes to use for the DMA.
@@ -500,6 +515,8 @@ struct vb2_buf_ops {
* @allow_cache_hints: when set user-space can pass cache management hints in
* order to skip cache flush/invalidation on ->prepare() or/and
* ->finish().
+ * @non_coherent_mem: when set queue will attempt to allocate buffers using
+ * non-coherent memory.
* @lock: pointer to a mutex that protects the &struct vb2_queue. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -528,18 +545,25 @@ struct vb2_buf_ops {
* @gfp_flags: additional gfp flags used when allocating the buffers.
* Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32
* to force the buffer allocation to a specific memory zone.
- * @min_buffers_needed: the minimum number of buffers needed before
+ * @min_queued_buffers: the minimum number of queued buffers needed before
* @start_streaming can be called. Used when a DMA engine
* cannot be started unless at least this number of buffers
* have been queued into the driver.
+ * VIDIOC_REQBUFS will ensure at least @min_queued_buffers
+ * buffers will be allocated. Note that VIDIOC_CREATE_BUFS will not
+ * modify the requested buffer count.
+ * @alloc_devs: &struct device memory type/allocator-specific per-plane device
*/
/*
* Private elements (won't appear at the uAPI book):
* @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @dma_dir: DMA mapping direction.
- * @bufs: videobuf buffer structures
+ * @bufs: videobuf2 buffer structures
* @num_buffers: number of allocated/used buffers
+ * @max_num_buffers: upper limit of number of allocated/used buffers.
+ * If set to 0 v4l2 core will change it VB2_MAX_FRAME
+ * for backward compatibility.
* @queued_list: list of buffers currently queued from userspace
* @queued_count: number of buffers queued and ready for streaming.
* @owned_by_drv_count: number of buffers owned by the driver
@@ -553,6 +577,9 @@ struct vb2_buf_ops {
* @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
* buffers. Only set for capture queues if qbuf has not yet been
* called since poll() needs to return %EPOLLERR in that situation.
+ * @waiting_in_dqbuf: set by the core for the duration of a blocking DQBUF, when
+ * it has to wait for a buffer to become available with vb2_queue->lock
+ * released. Used to prevent destroying the queue by other threads.
* @is_multiplanar: set if buffer type is multiplanar
* @is_output: set if buffer type is output
* @copy_timestamp: set if vb2-core should set timestamps
@@ -579,6 +606,7 @@ struct vb2_queue {
unsigned int uses_qbuf:1;
unsigned int uses_requests:1;
unsigned int allow_cache_hints:1;
+ unsigned int non_coherent_mem:1;
struct mutex *lock;
void *owner;
@@ -592,7 +620,7 @@ struct vb2_queue {
unsigned int buf_struct_size;
u32 timestamp_flags;
gfp_t gfp_flags;
- u32 min_buffers_needed;
+ u32 min_queued_buffers;
struct device *alloc_devs[VB2_MAX_PLANES];
@@ -600,8 +628,9 @@ struct vb2_queue {
struct mutex mmap_lock;
unsigned int memory;
enum dma_data_direction dma_dir;
- struct vb2_buffer *bufs[VB2_MAX_FRAME];
+ struct vb2_buffer **bufs;
unsigned int num_buffers;
+ unsigned int max_num_buffers;
struct list_head queued_list;
unsigned int queued_count;
@@ -634,8 +663,10 @@ struct vb2_queue {
u32 cnt_queue_setup;
u32 cnt_wait_prepare;
u32 cnt_wait_finish;
+ u32 cnt_prepare_streaming;
u32 cnt_start_streaming;
u32 cnt_stop_streaming;
+ u32 cnt_unprepare_streaming;
#endif
};
@@ -676,7 +707,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
/**
- * vb2_buffer_done() - inform videobuf that an operation on a buffer
+ * vb2_buffer_done() - inform videobuf2 that an operation on a buffer
* is finished.
* @vb: pointer to &struct vb2_buffer to be used.
* @state: state of the buffer, as defined by &enum vb2_buffer_state.
@@ -726,7 +757,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q);
/**
* vb2_core_querybuf() - query video buffer information.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @index: id number of the buffer.
+ * @vb: pointer to struct &vb2_buffer.
* @pb: buffer struct passed from userspace.
*
* Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called
@@ -738,14 +769,14 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q);
*
* Return: returns zero on success; an error code otherwise.
*/
-void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
+void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb);
/**
* vb2_core_reqbufs() - Initiate streaming.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @memory: memory type, as defined by &enum vb2_memory.
* @flags: auxiliary queue/buffer management flags. Currently, the only
- * used flag is %V4L2_FLAG_MEMORY_NON_CONSISTENT.
+ * used flag is %V4L2_MEMORY_FLAG_NON_COHERENT.
* @count: requested buffer count.
*
* Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called
@@ -770,7 +801,7 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int flags, unsigned int *count);
+ unsigned int flags, unsigned int *count);
/**
* vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
@@ -802,7 +833,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
* vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
* to the kernel.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @index: id number of the buffer.
+ * @vb: pointer to struct &vb2_buffer.
* @pb: buffer structure passed from userspace to
* &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver.
*
@@ -818,13 +849,13 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
*
* Return: returns zero on success; an error code otherwise.
*/
-int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
+int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb);
/**
* vb2_core_qbuf() - Queue a buffer from userspace
*
* @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @index: id number of the buffer
+ * @vb: pointer to struct &vb2_buffer.
* @pb: buffer structure passed from userspace to
* v4l2_ioctl_ops->vidioc_qbuf handler in driver
* @req: pointer to &struct media_request, may be NULL.
@@ -846,7 +877,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
*
* Return: returns zero on success; an error code otherwise.
*/
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb,
struct media_request *req);
/**
@@ -910,7 +941,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
* @fd: pointer to the file descriptor associated with DMABUF
* (set by driver).
* @type: buffer type.
- * @index: id number of the buffer.
+ * @vb: pointer to struct &vb2_buffer.
* @plane: index of the plane to be exported, 0 for single plane queues
* @flags: file flags for newly created file, as defined at
* include/uapi/asm-generic/fcntl.h.
@@ -924,7 +955,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
- unsigned int index, unsigned int plane, unsigned int flags);
+ struct vb2_buffer *vb, unsigned int plane, unsigned int flags);
/**
* vb2_core_queue_init() - initialize a videobuf2 queue
@@ -1046,7 +1077,7 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
loff_t *ppos, int nonblock);
/**
- * vb2_read() - implements write() syscall logic.
+ * vb2_write() - implements write() syscall logic.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @data: pointed to target userspace buffer
* @count: number of bytes to write
@@ -1119,6 +1150,15 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q)
}
/**
+ * vb2_get_num_buffers() - get the number of buffer in a queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ */
+static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q)
+{
+ return q->num_buffers;
+}
+
+/**
* vb2_is_busy() - return busy status of the queue.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
*
@@ -1126,7 +1166,7 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q)
*/
static inline bool vb2_is_busy(struct vb2_queue *q)
{
- return (q->num_buffers > 0);
+ return vb2_get_num_buffers(q) > 0;
}
/**
@@ -1148,8 +1188,15 @@ static inline void *vb2_get_drv_priv(struct vb2_queue *q)
static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
unsigned int plane_no, unsigned long size)
{
- if (plane_no < vb->num_planes)
+ /*
+ * size must never be larger than the buffer length, so
+ * warn and clamp to the buffer length if that's the case.
+ */
+ if (plane_no < vb->num_planes) {
+ if (WARN_ON_ONCE(size > vb->planes[plane_no].length))
+ size = vb->planes[plane_no].length;
vb->planes[plane_no].bytesused = size;
+ }
}
/**
@@ -1211,6 +1258,12 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q,
unsigned int index)
{
+ if (!q->bufs)
+ return NULL;
+
+ if (index >= q->max_num_buffers)
+ return NULL;
+
if (index < q->num_buffers)
return q->bufs[index];
return NULL;
diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h
index 8605366ec87c..2d577b945637 100644
--- a/include/media/videobuf2-dvb.h
+++ b/include/media/videobuf2-dvb.h
@@ -24,7 +24,7 @@ struct vb2_dvb {
struct dvb_frontend *frontend;
struct vb2_queue dvbq;
- /* video-buf-dvb state info */
+ /* vb2-dvb state info */
struct mutex lock;
int nfeeds;
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index cd4a46331531..4b5b84f93538 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -34,7 +34,8 @@ struct vb2_vmarea_handler {
extern const struct vm_operations_struct vb2_common_vm_ops;
struct frame_vector *vb2_create_framevec(unsigned long start,
- unsigned long length);
+ unsigned long length,
+ bool write);
void vb2_destroy_framevec(struct frame_vector *vec);
#endif
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index b7b5a9cb5a28..5a845887850b 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -23,6 +23,8 @@
#error VB2_MAX_PLANES != VIDEO_MAX_PLANES
#endif
+struct video_device;
+
/**
* struct vb2_v4l2_buffer - video buffer information for v4l2.
*
@@ -61,20 +63,14 @@ struct vb2_v4l2_buffer {
container_of(vb, struct vb2_v4l2_buffer, vb2_buf)
/**
- * vb2_find_timestamp() - Find buffer with given timestamp in the queue
+ * vb2_find_buffer() - Find a buffer with given timestamp
*
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @timestamp: the timestamp to find.
- * @start_idx: the start index (usually 0) in the buffer array to start
- * searching from. Note that there may be multiple buffers
- * with the same timestamp value, so you can restart the search
- * by setting @start_idx to the previously found index + 1.
*
- * Returns the buffer index of the buffer with the given @timestamp, or
- * -1 if no buffer with @timestamp was found.
+ * Returns the buffer with the given @timestamp, or NULL if not found.
*/
-int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
- unsigned int start_idx);
+struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp);
int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
@@ -260,6 +256,22 @@ int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name);
void vb2_queue_release(struct vb2_queue *q);
/**
+ * vb2_queue_change_type() - change the type of an inactive vb2_queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @type: the type to change to (V4L2_BUF_TYPE_VIDEO_*)
+ *
+ * This function changes the type of the vb2_queue. This is only possible
+ * if the queue is not busy (i.e. no buffers have been allocated).
+ *
+ * vb2_queue_change_type() can be used to support multiple buffer types using
+ * the same queue. The driver can implement v4l2_ioctl_ops.vidioc_reqbufs and
+ * v4l2_ioctl_ops.vidioc_create_bufs functions and call vb2_queue_change_type()
+ * before calling vb2_ioctl_reqbufs() or vb2_ioctl_create_bufs(), and thus
+ * "lock" the buffer type until the buffers have been released.
+ */
+int vb2_queue_change_type(struct vb2_queue *q, unsigned int type);
+
+/**
* vb2_poll() - implements poll userspace operation
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @file: file argument passed to the poll file operation handler
@@ -284,10 +296,29 @@ __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
* The following functions are not part of the vb2 core API, but are simple
* helper functions that you can use in your struct v4l2_file_operations,
* struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock
- * or video_device->lock is set, and they will set and test vb2_queue->owner
- * to check if the calling filehandle is permitted to do the queuing operation.
+ * or video_device->lock is set, and they will set and test the queue owner
+ * (vb2_queue->owner) to check if the calling filehandle is permitted to do the
+ * queuing operation.
*/
+/**
+ * vb2_queue_is_busy() - check if the queue is busy
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @file: file through which the vb2 queue access is performed
+ *
+ * The queue is considered busy if it has an owner and the owner is not the
+ * @file.
+ *
+ * Queue ownership is acquired and checked by some of the v4l2_ioctl_ops helpers
+ * below. Drivers can also use this function directly when they need to
+ * open-code ioctl handlers, for instance to add additional checks between the
+ * queue ownership test and the call to the corresponding vb2 operation.
+ */
+static inline bool vb2_queue_is_busy(struct vb2_queue *q, struct file *file)
+{
+ return q->owner && q->owner != file->private_data;
+}
+
/* struct v4l2_ioctl_ops helpers */
int vb2_ioctl_reqbufs(struct file *file, void *priv,
@@ -320,6 +351,21 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
#endif
/**
+ * vb2_video_unregister_device - unregister the video device and release queue
+ *
+ * @vdev: pointer to &struct video_device
+ *
+ * If the driver uses vb2_fop_release()/_vb2_fop_release(), then it should use
+ * vb2_video_unregister_device() instead of video_unregister_device().
+ *
+ * This function will call video_unregister_device() and then release the
+ * vb2_queue if streaming is in progress. This will stop streaming and
+ * this will simplify the unbind sequence since after this call all subdevs
+ * will have stopped streaming as well.
+ */
+void vb2_video_unregister_device(struct video_device *vdev);
+
+/**
* vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue
*
* @vq: pointer to &struct vb2_queue
diff --git a/include/media/vp8-ctrls.h b/include/media/vp8-ctrls.h
deleted file mode 100644
index 53cba826e482..000000000000
--- a/include/media/vp8-ctrls.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the VP8 state controls for use with stateless VP8
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _VP8_CTRLS_H_
-#define _VP8_CTRLS_H_
-
-#include <linux/types.h>
-
-#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F')
-
-#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER (V4L2_CID_MPEG_BASE + 2000)
-#define V4L2_CTRL_TYPE_VP8_FRAME_HEADER 0x301
-
-#define V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED 0x01
-#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP 0x02
-#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_FEATURE_DATA 0x04
-#define V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE 0x08
-
-struct v4l2_vp8_segment_header {
- __s8 quant_update[4];
- __s8 lf_update[4];
- __u8 segment_probs[3];
- __u8 padding;
- __u32 flags;
-};
-
-#define V4L2_VP8_LF_HEADER_ADJ_ENABLE 0x01
-#define V4L2_VP8_LF_HEADER_DELTA_UPDATE 0x02
-#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04
-struct v4l2_vp8_loopfilter_header {
- __s8 ref_frm_delta[4];
- __s8 mb_mode_delta[4];
- __u8 sharpness_level;
- __u8 level;
- __u16 padding;
- __u32 flags;
-};
-
-struct v4l2_vp8_quantization_header {
- __u8 y_ac_qi;
- __s8 y_dc_delta;
- __s8 y2_dc_delta;
- __s8 y2_ac_delta;
- __s8 uv_dc_delta;
- __s8 uv_ac_delta;
- __u16 padding;
-};
-
-struct v4l2_vp8_entropy_header {
- __u8 coeff_probs[4][8][3][11];
- __u8 y_mode_probs[4];
- __u8 uv_mode_probs[3];
- __u8 mv_probs[2][19];
- __u8 padding[3];
-};
-
-struct v4l2_vp8_entropy_coder_state {
- __u8 range;
- __u8 value;
- __u8 bit_count;
- __u8 padding;
-};
-
-#define V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME 0x01
-#define V4L2_VP8_FRAME_HEADER_FLAG_EXPERIMENTAL 0x02
-#define V4L2_VP8_FRAME_HEADER_FLAG_SHOW_FRAME 0x04
-#define V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF 0x08
-#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN 0x10
-#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT 0x20
-
-#define VP8_FRAME_IS_KEY_FRAME(hdr) \
- (!!((hdr)->flags & V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME))
-
-struct v4l2_ctrl_vp8_frame_header {
- struct v4l2_vp8_segment_header segment_header;
- struct v4l2_vp8_loopfilter_header lf_header;
- struct v4l2_vp8_quantization_header quant_header;
- struct v4l2_vp8_entropy_header entropy_header;
- struct v4l2_vp8_entropy_coder_state coder_state;
-
- __u16 width;
- __u16 height;
-
- __u8 horizontal_scale;
- __u8 vertical_scale;
-
- __u8 version;
- __u8 prob_skip_false;
- __u8 prob_intra;
- __u8 prob_last;
- __u8 prob_gf;
- __u8 num_dct_parts;
-
- __u32 first_part_size;
- __u32 first_part_header_bits;
- __u32 dct_part_sizes[8];
-
- __u64 last_frame_ts;
- __u64 golden_frame_ts;
- __u64 alt_frame_ts;
-
- __u64 flags;
-};
-
-#endif
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index cc1b0d42ce95..48f4a5023d81 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -51,6 +51,7 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
* @dst: destination rectangle on the display (integer coordinates)
* @alpha: alpha value (0: fully transparent, 255: fully opaque)
* @zpos: Z position of the plane (from 0 to number of planes minus 1)
+ * @premult: true for premultiplied alpha
*/
struct vsp1_du_atomic_config {
u32 pixelformat;
@@ -60,6 +61,7 @@ struct vsp1_du_atomic_config {
struct v4l2_rect dst;
unsigned int alpha;
unsigned int zpos;
+ bool premult;
};
/**
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index 9ad136682c47..b8fa30fd6b50 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -10,6 +10,7 @@
#ifndef __RENESAS_RPC_IF_H
#define __RENESAS_RPC_IF_H
+#include <linux/pm_runtime.h>
#include <linux/types.h>
enum rpcif_data_dir {
@@ -18,7 +19,7 @@ enum rpcif_data_dir {
RPCIF_DATA_OUT,
};
-struct rpcif_op {
+struct rpcif_op {
struct {
u8 buswidth;
u8 opcode;
@@ -56,32 +57,23 @@ struct rpcif_op {
} data;
};
-struct rpcif {
+enum rpcif_type {
+ RPCIF_RCAR_GEN3,
+ RPCIF_RCAR_GEN4,
+ RPCIF_RZ_G2L,
+};
+
+struct rpcif {
struct device *dev;
void __iomem *dirmap;
- struct regmap *regmap;
- struct reset_control *rstc;
size_t size;
- enum rpcif_data_dir dir;
- u8 bus_size;
- void *buffer;
- u32 xferlen;
- u32 smcr;
- u32 smadr;
- u32 command; /* DRCMR or SMCMR */
- u32 option; /* DROPR or SMOPR */
- u32 enable; /* DRENR or SMENR */
- u32 dummy; /* DRDMCR or SMDMCR */
- u32 ddr; /* DRDRENR or SMDRENR */
};
-int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
-void rpcif_hw_init(struct rpcif *rpc, bool hyperflash);
-void rpcif_enable_rpm(struct rpcif *rpc);
-void rpcif_disable_rpm(struct rpcif *rpc);
-void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
+int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
+int rpcif_hw_init(struct device *dev, bool hyperflash);
+void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
size_t *len);
-int rpcif_manual_xfer(struct rpcif *rpc);
-ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf);
+int rpcif_manual_xfer(struct device *dev);
+ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf);
#endif // __RENESAS_RPC_IF_H
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 0410412de16b..d8044299d654 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -30,7 +30,7 @@ unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev);
/*
* Context lifetime overview:
*
- * An AFU context may be inited and then started and stoppped multiple times
+ * An AFU context may be inited and then started and stopped multiple times
* before it's released. ie.
* - cxl_dev_context_init()
* - cxl_start_context()
diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h
index 357ef1aadbc0..3ed736da02c8 100644
--- a/include/misc/ocxl.h
+++ b/include/misc/ocxl.h
@@ -447,7 +447,7 @@ void ocxl_link_release(struct pci_dev *dev, void *link_handle);
* defined
*/
int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
- u64 amr, struct mm_struct *mm,
+ u64 amr, u16 bdf, struct mm_struct *mm,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data);
@@ -460,14 +460,8 @@ int ocxl_link_remove_pe(void *link_handle, int pasid);
* Allocate an AFU interrupt associated to the link.
*
* 'hw_irq' is the hardware interrupt number
- * 'obj_handle' is the 64-bit object handle to be passed to the AFU to
- * trigger the interrupt.
- * On P9, 'obj_handle' is an address, which, if written, triggers the
- * interrupt. It is an MMIO address which needs to be remapped (one
- * page).
- */
-int ocxl_link_irq_alloc(void *link_handle, int *hw_irq,
- u64 *obj_handle);
+ */
+int ocxl_link_irq_alloc(void *link_handle, int *hw_irq);
/*
* Free a previously allocated AFU interrupt
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 03614de86942..60cad0d200a4 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/net/9p/9p.h
- *
* 9P protocol definitions.
*
* Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
@@ -32,18 +30,20 @@
*/
enum p9_debug_flags {
- P9_DEBUG_ERROR = (1<<0),
- P9_DEBUG_9P = (1<<2),
+ P9_DEBUG_ERROR = (1<<0),
+ P9_DEBUG_9P = (1<<2),
P9_DEBUG_VFS = (1<<3),
P9_DEBUG_CONV = (1<<4),
P9_DEBUG_MUX = (1<<5),
P9_DEBUG_TRANS = (1<<6),
- P9_DEBUG_SLABS = (1<<7),
+ P9_DEBUG_SLABS = (1<<7),
P9_DEBUG_FCALL = (1<<8),
P9_DEBUG_FID = (1<<9),
P9_DEBUG_PKT = (1<<10),
P9_DEBUG_FSC = (1<<11),
P9_DEBUG_VPKT = (1<<12),
+ P9_DEBUG_CACHE = (1<<13),
+ P9_DEBUG_MMAP = (1<<14),
};
#ifdef CONFIG_NET_9P_DEBUG
@@ -215,6 +215,10 @@ enum p9_open_mode_t {
P9_ORCLOSE = 0x40,
P9_OAPPEND = 0x80,
P9_OEXCL = 0x1000,
+ P9L_MODE_MASK = 0x1FFF, /* don't send anything under this to server */
+ P9L_DIRECT = 0x2000, /* cache disabled */
+ P9L_NOWRITECACHE = 0x4000, /* no write caching */
+ P9L_LOOSE = 0x8000, /* loose cache */
};
/**
@@ -317,8 +321,8 @@ enum p9_qid_t {
};
/* 9P Magic Numbers */
-#define P9_NOTAG (u16)(~0)
-#define P9_NOFID (u32)(~0)
+#define P9_NOTAG ((u16)(~0))
+#define P9_NOFID ((u32)(~0))
#define P9_MAXWELEM 16
/* Minimal header size: size[4] type[1] tag[2] */
@@ -333,6 +337,9 @@ enum p9_qid_t {
/* size of header for zero copy read/write */
#define P9_ZC_HDR_SZ 4096
+/* maximum length of an error string */
+#define P9_ERRMAX 128
+
/**
* struct p9_qid - file system entity information
* @type: 8-bit type &p9_qid_t
@@ -530,6 +537,7 @@ struct p9_rstatfs {
* @offset: used by marshalling routines to track current position in buffer
* @capacity: used by marshalling routines to track total malloc'd capacity
* @sdata: payload
+ * @zc: whether zero-copy is used
*
* &p9_fcall represents the structure for all 9P RPC
* transactions. Requests are packaged into fcalls, and reponses
@@ -548,11 +556,10 @@ struct p9_fcall {
struct kmem_cache *cache;
u8 *sdata;
+ bool zc;
};
int p9_errstr2errno(char *errstr, int len);
int p9_error_init(void);
-int p9_trans_fd_init(void);
-void p9_trans_fd_exit(void);
#endif /* NET_9P_H */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index dd5b5bd781a4..78ebcf782ce5 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/net/9p/client.h
- *
* 9P Client Definitions
*
* Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
@@ -13,6 +11,7 @@
#include <linux/utsname.h>
#include <linux/idr.h>
+#include <linux/tracepoint-defs.h>
/* Number of requests per row */
#define P9_ROW_MAXTAG 255
@@ -23,7 +22,7 @@
* @p9_proto_2000L: 9P2000.L extension
*/
-enum p9_proto_versions{
+enum p9_proto_versions {
p9_proto_legacy,
p9_proto_2000u,
p9_proto_2000L,
@@ -78,7 +77,7 @@ enum p9_req_status_t {
struct p9_req_t {
int status;
int t_err;
- struct kref refcount;
+ refcount_t refcount;
wait_queue_head_t wq;
struct p9_fcall tc;
struct p9_fcall rc;
@@ -140,10 +139,16 @@ struct p9_client {
*
* TODO: This needs lots of explanation.
*/
+enum fid_source {
+ FID_FROM_OTHER,
+ FID_FROM_INODE,
+ FID_FROM_DENTRY,
+};
struct p9_fid {
struct p9_client *clnt;
u32 fid;
+ refcount_t count;
int mode;
struct p9_qid qid;
u32 iounit;
@@ -152,6 +157,7 @@ struct p9_fid {
void *rdir;
struct hlist_node dlist; /* list of all fids attached to a dentry */
+ struct hlist_node ilist;
};
/**
@@ -212,36 +218,80 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
u64 request_mask);
int p9_client_mknod_dotl(struct p9_fid *oldfid, const char *name, int mode,
- dev_t rdev, kgid_t gid, struct p9_qid *);
+ dev_t rdev, kgid_t gid, struct p9_qid *qid);
int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode,
- kgid_t gid, struct p9_qid *);
+ kgid_t gid, struct p9_qid *qid);
int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
void p9_fcall_fini(struct p9_fcall *fc);
-struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
+struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag);
static inline void p9_req_get(struct p9_req_t *r)
{
- kref_get(&r->refcount);
+ refcount_inc(&r->refcount);
}
static inline int p9_req_try_get(struct p9_req_t *r)
{
- return kref_get_unless_zero(&r->refcount);
+ return refcount_inc_not_zero(&r->refcount);
}
-int p9_req_put(struct p9_req_t *r);
+int p9_req_put(struct p9_client *c, struct p9_req_t *r);
+
+/* We cannot have the real tracepoints in header files,
+ * use a wrapper function */
+DECLARE_TRACEPOINT(9p_fid_ref);
+void do_trace_9p_fid_get(struct p9_fid *fid);
+void do_trace_9p_fid_put(struct p9_fid *fid);
+
+/* fid reference counting helpers:
+ * - fids used for any length of time should always be referenced through
+ * p9_fid_get(), and released with p9_fid_put()
+ * - v9fs_fid_lookup() or similar will automatically call get for you
+ * and also require a put
+ * - the *_fid_add() helpers will stash the fid in the inode,
+ * at which point it is the responsibility of evict_inode()
+ * to call the put
+ * - the last put will automatically send a clunk to the server
+ */
+static inline struct p9_fid *p9_fid_get(struct p9_fid *fid)
+{
+ if (tracepoint_enabled(9p_fid_ref))
+ do_trace_9p_fid_get(fid);
+
+ refcount_inc(&fid->count);
+
+ return fid;
+}
+
+static inline int p9_fid_put(struct p9_fid *fid)
+{
+ if (!fid || IS_ERR(fid))
+ return 0;
+
+ if (tracepoint_enabled(9p_fid_ref))
+ do_trace_9p_fid_put(fid);
+
+ if (!refcount_dec_and_test(&fid->count))
+ return 0;
+
+ return p9_client_clunk(fid);
+}
void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
-int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
-int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
-void p9stat_free(struct p9_wstat *);
+int p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type,
+ int16_t *tag, int rewind);
+int p9stat_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_wstat *st);
+void p9stat_free(struct p9_wstat *stbuf);
int p9_is_proto_dotu(struct p9_client *clnt);
int p9_is_proto_dotl(struct p9_client *clnt);
-struct p9_fid *p9_client_xattrwalk(struct p9_fid *, const char *, u64 *);
-int p9_client_xattrcreate(struct p9_fid *, const char *, u64, int);
+struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
+ const char *attr_name, u64 *attr_size);
+int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
+ u64 attr_size, int flags);
int p9_client_readlink(struct p9_fid *fid, char **target);
int p9_client_init(void);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 3eb4261b2958..766ec07c9599 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/net/9p/transport.h
- *
* Transport Definition
*
* Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
@@ -11,6 +9,8 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
+#include <linux/module.h>
+
#define P9_DEF_MIN_RESVPORT (665U)
#define P9_DEF_MAX_RESVPORT (1023U)
@@ -19,6 +19,10 @@
* @list: used to maintain a list of currently available transports
* @name: the human-readable name of the transport
* @maxsize: transport provided maximum packet size
+ * @pooled_rbuffers: currently only set for RDMA transport which pulls the
+ * response buffers from a shared pool, and accordingly
+ * we're less flexible when choosing the response message
+ * size in this case
* @def: set if this transport should be considered the default
* @create: member function to create a new connection on this transport
* @close: member function to discard a connection on this transport
@@ -38,21 +42,28 @@ struct p9_trans_module {
struct list_head list;
char *name; /* name of transport */
int maxsize; /* max message size of transport */
+ bool pooled_rbuffers;
int def; /* this transport should be default */
struct module *owner;
- int (*create)(struct p9_client *, const char *, char *);
- void (*close) (struct p9_client *);
- int (*request) (struct p9_client *, struct p9_req_t *req);
- int (*cancel) (struct p9_client *, struct p9_req_t *req);
- int (*cancelled)(struct p9_client *, struct p9_req_t *req);
- int (*zc_request)(struct p9_client *, struct p9_req_t *,
- struct iov_iter *, struct iov_iter *, int , int, int);
- int (*show_options)(struct seq_file *, struct p9_client *);
+ int (*create)(struct p9_client *client,
+ const char *devname, char *args);
+ void (*close)(struct p9_client *client);
+ int (*request)(struct p9_client *client, struct p9_req_t *req);
+ int (*cancel)(struct p9_client *client, struct p9_req_t *req);
+ int (*cancelled)(struct p9_client *client, struct p9_req_t *req);
+ int (*zc_request)(struct p9_client *client, struct p9_req_t *req,
+ struct iov_iter *uidata, struct iov_iter *uodata,
+ int inlen, int outlen, int in_hdr_len);
+ int (*show_options)(struct seq_file *m, struct p9_client *client);
};
void v9fs_register_trans(struct p9_trans_module *m);
void v9fs_unregister_trans(struct p9_trans_module *m);
-struct p9_trans_module *v9fs_get_trans_by_name(char *s);
+struct p9_trans_module *v9fs_get_trans_by_name(const char *s);
struct p9_trans_module *v9fs_get_default_trans(void);
void v9fs_put_trans(struct p9_trans_module *m);
+
+#define MODULE_ALIAS_9P(transport) \
+ MODULE_ALIAS("9p-" transport)
+
#endif /* NET_9P_TRANSPORT_H */
diff --git a/include/net/Space.h b/include/net/Space.h
index 9cce0d80d37a..ef42629f4258 100644
--- a/include/net/Space.h
+++ b/include/net/Space.h
@@ -3,28 +3,10 @@
* ethernet adaptor have the name "eth[0123...]".
*/
-struct net_device *hp100_probe(int unit);
struct net_device *ultra_probe(int unit);
struct net_device *wd_probe(int unit);
struct net_device *ne_probe(int unit);
-struct net_device *fmv18x_probe(int unit);
-struct net_device *i82596_probe(int unit);
-struct net_device *ni65_probe(int unit);
-struct net_device *sonic_probe(int unit);
struct net_device *smc_init(int unit);
-struct net_device *atarilance_probe(int unit);
-struct net_device *sun3lance_probe(int unit);
-struct net_device *sun3_82586_probe(int unit);
-struct net_device *apne_probe(int unit);
struct net_device *cs89x0_probe(int unit);
-struct net_device *mvme147lance_probe(int unit);
struct net_device *tc515_probe(int unit);
struct net_device *lance_probe(int unit);
-struct net_device *cops_probe(int unit);
-struct net_device *ltpc_probe(void);
-
-/* Fibre Channel adapters */
-int iph5526_probe(struct net_device *dev);
-
-/* SBNI adapters */
-int sbni_probe(int unit);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index cb382a89ea58..77ee0c657e2c 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -7,6 +7,7 @@
*/
#include <linux/refcount.h>
+#include <net/flow_offload.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
@@ -30,20 +31,21 @@ struct tc_action {
atomic_t tcfa_bindcnt;
int tcfa_action;
struct tcf_t tcfa_tm;
- struct gnet_stats_basic_packed tcfa_bstats;
- struct gnet_stats_basic_packed tcfa_bstats_hw;
+ struct gnet_stats_basic_sync tcfa_bstats;
+ struct gnet_stats_basic_sync tcfa_bstats_hw;
struct gnet_stats_queue tcfa_qstats;
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct tc_cookie __rcu *act_cookie;
+ struct tc_cookie __rcu *user_cookie;
struct tcf_chain __rcu *goto_chain;
u32 tcfa_flags;
u8 hw_stats;
u8 used_hw_stats;
bool used_hw_stats_valid;
+ u32 in_hw_count;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -58,6 +60,15 @@ struct tc_action {
#define TCA_ACT_HW_STATS_ANY (TCA_ACT_HW_STATS_IMMEDIATE | \
TCA_ACT_HW_STATS_DELAYED)
+/* Reserve 16 bits for user-space. See TCA_ACT_FLAGS_NO_PERCPU_STATS. */
+#define TCA_ACT_FLAGS_USER_BITS 16
+#define TCA_ACT_FLAGS_USER_MASK 0xffff
+#define TCA_ACT_FLAGS_POLICE (1U << TCA_ACT_FLAGS_USER_BITS)
+#define TCA_ACT_FLAGS_BIND (1U << (TCA_ACT_FLAGS_USER_BITS + 1))
+#define TCA_ACT_FLAGS_REPLACE (1U << (TCA_ACT_FLAGS_USER_BITS + 2))
+#define TCA_ACT_FLAGS_NO_RTNL (1U << (TCA_ACT_FLAGS_USER_BITS + 3))
+#define TCA_ACT_FLAGS_AT_INGRESS (1U << (TCA_ACT_FLAGS_USER_BITS + 4))
+
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
*/
@@ -80,10 +91,15 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
dtm->expires = jiffies_to_clock_t(stm->expires);
}
-#ifdef CONFIG_NET_CLS_ACT
+static inline enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
+{
+ if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
+ return FLOW_ACTION_HW_STATS_DONT_CARE;
+ else if (!hw_stats)
+ return FLOW_ACTION_HW_STATS_DISABLED;
-#define ACT_P_CREATED 1
-#define ACT_P_DELETED 1
+ return hw_stats;
+}
typedef void (*tc_action_priv_destructor)(void *priv);
@@ -91,6 +107,7 @@ struct tc_action_ops {
struct list_head head;
char kind[IFNAMSIZ];
enum tca_id id; /* identifier should match kind */
+ unsigned int net_id;
size_t size;
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *,
@@ -99,8 +116,8 @@ struct tc_action_ops {
void (*cleanup)(struct tc_action *);
int (*lookup)(struct net *net, struct tc_action **a, u32 index);
int (*init)(struct net *net, struct nlattr *nla,
- struct nlattr *est, struct tc_action **act, int ovr,
- int bind, bool rtnl_held, struct tcf_proto *tp,
+ struct nlattr *est, struct tc_action **act,
+ struct tcf_proto *tp,
u32 flags, struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
@@ -113,8 +130,17 @@ struct tc_action_ops {
struct psample_group *
(*get_psample_group)(const struct tc_action *a,
tc_action_priv_destructor *destructor);
+ int (*offload_act_setup)(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind,
+ struct netlink_ext_ack *extack);
};
+#ifdef CONFIG_NET_CLS_ACT
+
+#define ACT_P_BOUND 0
+#define ACT_P_CREATED 1
+#define ACT_P_DELETED 1
+
struct tc_action_net {
struct tcf_idrinfo *idrinfo;
const struct tc_action_ops *ops;
@@ -166,33 +192,30 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
struct nlattr *est, struct tc_action **a,
const struct tc_action_ops *ops, int bind,
u32 flags);
-void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
-
+void tcf_idr_insert_many(struct tc_action *actions[], int init_res[]);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
-int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
-
-static inline int tcf_idr_release(struct tc_action *a, bool bind)
-{
- return __tcf_idr_release(a, bind, false);
-}
+int tcf_idr_release(struct tc_action *a, bool bind);
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
struct pernet_operations *ops);
+#define NET_ACT_ALIAS_PREFIX "net-act-"
+#define MODULE_ALIAS_NET_ACT(kind) MODULE_ALIAS(NET_ACT_ALIAS_PREFIX kind)
int tcf_action_destroy(struct tc_action *actions[], int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
- struct nlattr *est, char *name, int ovr, int bind,
- struct tc_action *actions[], size_t *attr_size,
- bool rtnl_held, struct netlink_ext_ack *extack);
+ struct nlattr *est,
+ struct tc_action *actions[], int init_res[], size_t *attr_size,
+ u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
+struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags,
+ struct netlink_ext_ack *extack);
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
- char *name, int ovr, int bind,
- bool rtnl_held,
- struct netlink_ext_ack *extack);
+ struct tc_action_ops *a_o, int *init_res,
+ u32 flags, struct netlink_ext_ack *extack);
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
int ref, bool terse);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
@@ -202,7 +225,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb)
{
if (likely(a->cpu_bstats)) {
- bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
return;
}
spin_lock(&a->tcfa_lock);
@@ -236,11 +259,28 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
u64 drops, bool hw);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
+int tcf_action_update_hw_stats(struct tc_action *action);
+int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
+ void *cb_priv, bool add);
int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
struct tcf_chain **handle,
struct netlink_ext_ack *newchain);
struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
struct tcf_chain *newchain);
+
+#ifdef CONFIG_INET
+DECLARE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
+#endif
+
+int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
+
+#else /* !CONFIG_NET_CLS_ACT */
+
+static inline int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
+ void *cb_priv, bool add) {
+ return 0;
+}
+
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 18f783dcd55f..9d06eb945509 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -6,8 +6,11 @@
#define RTR_SOLICITATION_INTERVAL (4*HZ)
#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
-#define TEMP_VALID_LIFETIME (7*86400)
-#define TEMP_PREFERRED_LIFETIME (86400)
+#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
+
+#define TEMP_VALID_LIFETIME (7*86400) /* 1 week */
+#define TEMP_PREFERRED_LIFETIME (86400) /* 24 hours */
+#define REGEN_MIN_ADVANCE (2) /* 2 seconds */
#define REGEN_MAX_RETRY (3)
#define MAX_DESYNC_FACTOR (600)
@@ -29,17 +32,22 @@ struct prefix_info {
__u8 length;
__u8 prefix_len;
+ union __packed {
+ __u8 flags;
+ struct __packed {
#if defined(__BIG_ENDIAN_BITFIELD)
- __u8 onlink : 1,
+ __u8 onlink : 1,
autoconf : 1,
reserved : 6;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 reserved : 6,
+ __u8 reserved : 6,
autoconf : 1,
onlink : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
+ };
+ };
__be32 valid;
__be32 prefered;
__be32 reserved2;
@@ -47,6 +55,9 @@ struct prefix_info {
struct in6_addr prefix;
};
+/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
+static_assert(sizeof(struct prefix_info) == 32);
+
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <net/if_inet6.h>
@@ -62,6 +73,8 @@ struct ifa6_config {
const struct in6_addr *pfx;
unsigned int plen;
+ u8 ifa_proto;
+
const struct in6_addr *peer_pfx;
u32 rt_priority;
@@ -107,8 +120,6 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- u32 banned_flags);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
@@ -221,7 +232,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
void __ipv6_sock_mc_close(struct sock *sk);
void ipv6_sock_mc_close(struct sock *sk);
-bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
+bool inet6_mc_check(const struct sock *sk, const struct in6_addr *mc_addr,
const struct in6_addr *src_addr);
int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
@@ -233,7 +244,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
void ipv6_mc_remap(struct inet6_dev *idev);
void ipv6_mc_init_dev(struct inet6_dev *idev);
void ipv6_mc_destroy_dev(struct inet6_dev *idev);
-int ipv6_mc_check_icmpv6(struct sk_buff *skb);
int ipv6_mc_check_mld(struct sk_buff *skb);
void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
@@ -404,7 +414,10 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
{
const struct inet6_dev *idev = __in6_dev_get(dev);
- return !!idev->cnf.ignore_routes_with_linkdown;
+ if (unlikely(!idev))
+ return true;
+
+ return !!READ_ONCE(idev->cnf.ignore_routes_with_linkdown);
}
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index f6abcc0bbd6e..0754c463224a 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -15,6 +15,8 @@ struct key;
struct sock;
struct socket;
struct rxrpc_call;
+struct rxrpc_peer;
+enum rxrpc_abort_reason;
enum rxrpc_interruptibility {
RXRPC_INTERRUPTIBLE, /* Call is interruptible */
@@ -39,39 +41,45 @@ typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
void rxrpc_kernel_new_call_notification(struct socket *,
rxrpc_notify_new_call_t,
rxrpc_discard_new_call_t);
-struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
- struct sockaddr_rxrpc *,
- struct key *,
- unsigned long,
- s64,
- gfp_t,
- rxrpc_notify_rx_t,
- bool,
- enum rxrpc_interruptibility,
- unsigned int);
+struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
+ struct rxrpc_peer *peer,
+ struct key *key,
+ unsigned long user_call_ID,
+ s64 tx_total_len,
+ u32 hard_timeout,
+ gfp_t gfp,
+ rxrpc_notify_rx_t notify_rx,
+ u16 service_id,
+ bool upgrade,
+ enum rxrpc_interruptibility interruptibility,
+ unsigned int debug_id);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
rxrpc_notify_end_tx_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
- struct iov_iter *, bool, u32 *, u16 *);
+ struct iov_iter *, size_t *, bool, u32 *, u16 *);
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
- u32, int, const char *);
-void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
-void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
- struct sockaddr_rxrpc *);
-bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
+ u32, int, enum rxrpc_abort_reason);
+void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call);
+void rxrpc_kernel_put_call(struct socket *sock, struct rxrpc_call *call);
+struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock,
+ struct sockaddr_rxrpc *srx, gfp_t gfp);
+void rxrpc_kernel_put_peer(struct rxrpc_peer *peer);
+struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer);
+struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call);
+const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer);
+const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer);
+unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
-bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
- ktime_t *);
-bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *,
unsigned long);
int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val);
+int rxrpc_sock_set_security_keyring(struct sock *, struct key *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index f42fdddecd41..627ea8e2d915 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -8,25 +8,32 @@
#include <linux/refcount.h>
#include <net/sock.h>
+#if IS_ENABLED(CONFIG_UNIX)
+struct unix_sock *unix_get_socket(struct file *filp);
+#else
+static inline struct unix_sock *unix_get_socket(struct file *filp)
+{
+ return NULL;
+}
+#endif
+
+extern spinlock_t unix_gc_lock;
+extern unsigned int unix_tot_inflight;
+
void unix_inflight(struct user_struct *user, struct file *fp);
void unix_notinflight(struct user_struct *user, struct file *fp);
-void unix_destruct_scm(struct sk_buff *skb);
void unix_gc(void);
-void wait_for_unix_gc(void);
-struct sock *unix_get_socket(struct file *filp);
+void wait_for_unix_gc(struct scm_fp_list *fpl);
+
struct sock *unix_peer_get(struct sock *sk);
-#define UNIX_HASH_SIZE 256
+#define UNIX_HASH_MOD (256 - 1)
+#define UNIX_HASH_SIZE (256 * 2)
#define UNIX_HASH_BITS 8
-extern unsigned int unix_tot_inflight;
-extern spinlock_t unix_table_lock;
-extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
-
struct unix_address {
refcount_t refcnt;
int len;
- unsigned int hash;
struct sockaddr_un name[];
};
@@ -47,12 +54,6 @@ struct scm_stat {
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
-#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
-#define unix_state_lock_nested(s) \
- spin_lock_nested(&unix_sk(s)->lock, \
- SINGLE_DEPTH_NESTING)
-
/* The AF_UNIX socket */
struct unix_sock {
/* WARNING: sk has to be the first member */
@@ -62,7 +63,7 @@ struct unix_sock {
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
- atomic_long_t inflight;
+ unsigned long inflight;
spinlock_t lock;
unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0
@@ -70,11 +71,26 @@ struct unix_sock {
struct socket_wq peer_wq;
wait_queue_entry_t peer_wake;
struct scm_stat scm_stat;
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ struct sk_buff *oob_skb;
+#endif
};
-static inline struct unix_sock *unix_sk(const struct sock *sk)
+#define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
+#define unix_peer(sk) (unix_sk(sk)->peer)
+
+#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
+#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
+enum unix_socket_lock_class {
+ U_LOCK_NORMAL,
+ U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
+ U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+};
+
+static inline void unix_state_lock_nested(struct sock *sk,
+ enum unix_socket_lock_class subclass)
{
- return (struct unix_sock *)sk;
+ spin_lock_nested(&unix_sk(sk)->lock, subclass);
}
#define peer_wait peer_wq.wait
@@ -82,6 +98,10 @@ static inline struct unix_sock *unix_sk(const struct sock *sk)
long unix_inq_len(struct sock *sk);
long unix_outq_len(struct sock *sk);
+int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
+ int flags);
+int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
+ int flags);
#ifdef CONFIG_SYSCTL
int unix_sysctl_register(struct net *net);
void unix_sysctl_unregister(struct net *net);
@@ -89,4 +109,16 @@ void unix_sysctl_unregister(struct net *net);
static inline int unix_sysctl_register(struct net *net) { return 0; }
static inline void unix_sysctl_unregister(struct net *net) {}
#endif
+
+#ifdef CONFIG_BPF_SYSCALL
+extern struct proto unix_dgram_proto;
+extern struct proto unix_stream_proto;
+
+int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+void __init unix_bpf_build_proto(void);
+#else
+static inline void __init unix_bpf_build_proto(void)
+{}
+#endif
#endif
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index b1c717286993..535701efc1e5 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
+#include <net/sock.h>
#include <uapi/linux/vm_sockets.h>
#include "vsock_addr.h"
@@ -74,9 +75,11 @@ struct vsock_sock {
void *trans;
};
+s64 vsock_connectible_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
struct sock *vsock_create_connected(struct sock *parent);
+void vsock_data_ready(struct sock *sk);
/**** TRANSPORT ****/
@@ -135,6 +138,14 @@ struct vsock_transport {
bool (*stream_is_active)(struct vsock_sock *);
bool (*stream_allow)(u32 cid, u32 port);
+ /* SEQ_PACKET. */
+ ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+ int flags);
+ int (*seqpacket_enqueue)(struct vsock_sock *vsk, struct msghdr *msg,
+ size_t len);
+ bool (*seqpacket_allow)(u32 remote_cid);
+ u32 (*seqpacket_has_data)(struct vsock_sock *vsk);
+
/* Notification. */
int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
int (*notify_poll_out)(struct vsock_sock *, size_t, bool *);
@@ -156,12 +167,19 @@ struct vsock_transport {
struct vsock_transport_send_notify_data *);
/* sk_lock held by the caller */
void (*notify_buffer_size)(struct vsock_sock *, u64 *);
+ int (*notify_set_rcvlowat)(struct vsock_sock *vsk, int val);
/* Shutdown. */
int (*shutdown)(struct vsock_sock *, int);
/* Addressing. */
u32 (*get_local_cid)(void);
+
+ /* Read a single skb */
+ int (*read_skb)(struct vsock_sock *, skb_read_actor_t);
+
+ /* Zero-copy. */
+ bool (*msgzerocopy_allow)(void);
};
/**** CORE ****/
@@ -186,7 +204,6 @@ static inline bool __vsock_in_connected_table(struct vsock_sock *vsk)
return !list_empty(&vsk->connected_table);
}
-void vsock_release_pending(struct sock *pending);
void vsock_add_pending(struct sock *listener, struct sock *pending);
void vsock_remove_pending(struct sock *listener, struct sock *pending);
void vsock_enqueue_accept(struct sock *listener, struct sock *connected);
@@ -197,7 +214,8 @@ struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst);
void vsock_remove_sock(struct vsock_sock *vsk);
-void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
+void vsock_for_each_connected_socket(struct vsock_transport *transport,
+ void (*fn)(struct sock *sk));
int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
bool vsock_find_cid(unsigned int cid);
@@ -209,9 +227,25 @@ struct vsock_tap {
struct list_head list;
};
-int vsock_init_tap(void);
int vsock_add_tap(struct vsock_tap *vt);
int vsock_remove_tap(struct vsock_tap *vt);
void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
-
+int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
+int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags);
+
+#ifdef CONFIG_BPF_SYSCALL
+extern struct proto vsock_proto;
+int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+void __init vsock_bpf_build_proto(void);
+#else
+static inline void __init vsock_bpf_build_proto(void)
+{}
+#endif
+
+static inline bool vsock_msgzerocopy_allow(const struct vsock_transport *t)
+{
+ return t->msgzerocopy_allow && t->msgzerocopy_allow();
+}
#endif /* __AF_VSOCK_H__ */
diff --git a/include/net/amt.h b/include/net/amt.h
new file mode 100644
index 000000000000..c881bc8b673b
--- /dev/null
+++ b/include/net/amt.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com>
+ */
+#ifndef _NET_AMT_H_
+#define _NET_AMT_H_
+
+#include <linux/siphash.h>
+#include <linux/jhash.h>
+#include <linux/netdevice.h>
+#include <net/gro_cells.h>
+#include <net/rtnetlink.h>
+
+enum amt_msg_type {
+ AMT_MSG_DISCOVERY = 1,
+ AMT_MSG_ADVERTISEMENT,
+ AMT_MSG_REQUEST,
+ AMT_MSG_MEMBERSHIP_QUERY,
+ AMT_MSG_MEMBERSHIP_UPDATE,
+ AMT_MSG_MULTICAST_DATA,
+ AMT_MSG_TEARDOWN,
+ __AMT_MSG_MAX,
+};
+
+#define AMT_MSG_MAX (__AMT_MSG_MAX - 1)
+
+enum amt_ops {
+ /* A*B */
+ AMT_OPS_INT,
+ /* A+B */
+ AMT_OPS_UNI,
+ /* A-B */
+ AMT_OPS_SUB,
+ /* B-A */
+ AMT_OPS_SUB_REV,
+ __AMT_OPS_MAX,
+};
+
+#define AMT_OPS_MAX (__AMT_OPS_MAX - 1)
+
+enum amt_filter {
+ AMT_FILTER_FWD,
+ AMT_FILTER_D_FWD,
+ AMT_FILTER_FWD_NEW,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_FILTER_ALL,
+ AMT_FILTER_NONE_NEW,
+ AMT_FILTER_BOTH,
+ AMT_FILTER_BOTH_NEW,
+ __AMT_FILTER_MAX,
+};
+
+#define AMT_FILTER_MAX (__AMT_FILTER_MAX - 1)
+
+enum amt_act {
+ AMT_ACT_GMI,
+ AMT_ACT_GMI_ZERO,
+ AMT_ACT_GT,
+ AMT_ACT_STATUS_FWD_NEW,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ AMT_ACT_STATUS_NONE_NEW,
+ __AMT_ACT_MAX,
+};
+
+#define AMT_ACT_MAX (__AMT_ACT_MAX - 1)
+
+enum amt_status {
+ AMT_STATUS_INIT,
+ AMT_STATUS_SENT_DISCOVERY,
+ AMT_STATUS_RECEIVED_DISCOVERY,
+ AMT_STATUS_SENT_ADVERTISEMENT,
+ AMT_STATUS_RECEIVED_ADVERTISEMENT,
+ AMT_STATUS_SENT_REQUEST,
+ AMT_STATUS_RECEIVED_REQUEST,
+ AMT_STATUS_SENT_QUERY,
+ AMT_STATUS_RECEIVED_QUERY,
+ AMT_STATUS_SENT_UPDATE,
+ AMT_STATUS_RECEIVED_UPDATE,
+ __AMT_STATUS_MAX,
+};
+
+#define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1)
+
+/* Gateway events only */
+enum amt_event {
+ AMT_EVENT_NONE,
+ AMT_EVENT_RECEIVE,
+ AMT_EVENT_SEND_DISCOVERY,
+ AMT_EVENT_SEND_REQUEST,
+ __AMT_EVENT_MAX,
+};
+
+struct amt_header {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 type:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 version:4,
+ type:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_header_discovery {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_advertisement {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+ __be32 ip4;
+} __packed;
+
+struct amt_header_request {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved1:7,
+ p:1,
+ reserved2:16;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ p:1,
+ reserved1:7,
+ reserved2:16;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_query {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:6,
+ l:1,
+ g:1,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ g:1,
+ l:1,
+ reserved:6,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_update {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:8,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ reserved:8,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_mcast_data {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type:4,
+ version:4,
+ reserved:8;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 version:4,
+ type:4,
+ reserved:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_advertisement advertisement;
+ struct amt_header_request request;
+ struct amt_header_membership_query query;
+ struct amt_header_membership_update update;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_gw_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_request request;
+ struct amt_header_membership_update update;
+ };
+} __packed;
+
+struct amt_relay_headers {
+ union {
+ struct amt_header_advertisement advertisement;
+ struct amt_header_membership_query query;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_skb_cb {
+ struct amt_tunnel_list *tunnel;
+};
+
+struct amt_tunnel_list {
+ struct list_head list;
+ /* Protect All resources under an amt_tunne_list */
+ spinlock_t lock;
+ struct amt_dev *amt;
+ u32 nr_groups;
+ u32 nr_sources;
+ enum amt_status status;
+ struct delayed_work gc_wq;
+ __be16 source_port;
+ __be32 ip4;
+ __be32 nonce;
+ siphash_key_t key;
+ u64 mac:48,
+ reserved:16;
+ struct rcu_head rcu;
+ struct hlist_head groups[];
+};
+
+union amt_addr {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+};
+
+/* RFC 3810
+ *
+ * When the router is in EXCLUDE mode, the router state is represented
+ * by the notation EXCLUDE (X,Y), where X is called the "Requested List"
+ * and Y is called the "Exclude List". All sources, except those from
+ * the Exclude List, will be forwarded by the router
+ */
+enum amt_source_status {
+ AMT_SOURCE_STATUS_NONE,
+ /* Node of Requested List */
+ AMT_SOURCE_STATUS_FWD,
+ /* Node of Exclude List */
+ AMT_SOURCE_STATUS_D_FWD,
+};
+
+/* protected by gnode->lock */
+struct amt_source_node {
+ struct hlist_node node;
+ struct amt_group_node *gnode;
+ struct delayed_work source_timer;
+ union amt_addr source_addr;
+ enum amt_source_status status;
+#define AMT_SOURCE_OLD 0
+#define AMT_SOURCE_NEW 1
+ u8 flags;
+ struct rcu_head rcu;
+};
+
+/* Protected by amt_tunnel_list->lock */
+struct amt_group_node {
+ struct amt_dev *amt;
+ union amt_addr group_addr;
+ union amt_addr host_addr;
+ bool v6;
+ u8 filter_mode;
+ u32 nr_sources;
+ struct amt_tunnel_list *tunnel_list;
+ struct hlist_node node;
+ struct delayed_work group_timer;
+ struct rcu_head rcu;
+ struct hlist_head sources[];
+};
+
+#define AMT_MAX_EVENTS 16
+struct amt_events {
+ enum amt_event event;
+ struct sk_buff *skb;
+};
+
+struct amt_dev {
+ struct net_device *dev;
+ struct net_device *stream_dev;
+ struct net *net;
+ /* Global lock for amt device */
+ spinlock_t lock;
+ /* Used only in relay mode */
+ struct list_head tunnel_list;
+ struct gro_cells gro_cells;
+
+ /* Protected by RTNL */
+ struct delayed_work discovery_wq;
+ /* Protected by RTNL */
+ struct delayed_work req_wq;
+ /* Protected by RTNL */
+ struct delayed_work secret_wq;
+ struct work_struct event_wq;
+ /* AMT status */
+ enum amt_status status;
+ /* Generated key */
+ siphash_key_t key;
+ struct socket __rcu *sock;
+ u32 max_groups;
+ u32 max_sources;
+ u32 hash_buckets;
+ u32 hash_seed;
+ /* Default 128 */
+ u32 max_tunnels;
+ /* Default 128 */
+ u32 nr_tunnels;
+ /* Gateway or Relay mode */
+ u32 mode;
+ /* Default 2268 */
+ __be16 relay_port;
+ /* Default 2268 */
+ __be16 gw_port;
+ /* Outer local ip */
+ __be32 local_ip;
+ /* Outer remote ip */
+ __be32 remote_ip;
+ /* Outer discovery ip */
+ __be32 discovery_ip;
+ /* Only used in gateway mode */
+ __be32 nonce;
+ /* Gateway sent request and received query */
+ bool ready4;
+ bool ready6;
+ u8 req_cnt;
+ u8 qi;
+ u64 qrv;
+ u64 qri;
+ /* Used only in gateway mode */
+ u64 mac:48,
+ reserved:16;
+ /* AMT gateway side message handler queue */
+ struct amt_events events[AMT_MAX_EVENTS];
+ u8 event_idx;
+ u8 nr_events;
+};
+
+#define AMT_TOS 0xc0
+#define AMT_IPHDR_OPTS 4
+#define AMT_IP6HDR_OPTS 8
+#define AMT_GC_INTERVAL (30 * 1000)
+#define AMT_MAX_GROUP 32
+#define AMT_MAX_SOURCE 128
+#define AMT_HSIZE_SHIFT 8
+#define AMT_HSIZE (1 << AMT_HSIZE_SHIFT)
+
+#define AMT_DISCOVERY_TIMEOUT 5000
+#define AMT_INIT_REQ_TIMEOUT 1
+#define AMT_INIT_QUERY_INTERVAL 125
+#define AMT_MAX_REQ_TIMEOUT 120
+#define AMT_MAX_REQ_COUNT 3
+#define AMT_SECRET_TIMEOUT 60000
+#define IANA_AMT_UDP_PORT 2268
+#define AMT_MAX_TUNNELS 128
+#define AMT_MAX_REQS 128
+#define AMT_GW_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_gw_headers))
+#define AMT_RELAY_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_relay_headers))
+
+static inline bool netif_is_amt(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "amt");
+}
+
+static inline u64 amt_gmi(const struct amt_dev *amt)
+{
+ return ((amt->qrv * amt->qi) + amt->qri) * 1000;
+}
+
+#endif /* _NET_AMT_H_ */
diff --git a/include/net/arp.h b/include/net/arp.h
index 4950191f6b2b..e8747e0713c7 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -38,11 +38,11 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
{
struct neighbour *n;
- rcu_read_lock_bh();
+ rcu_read_lock();
n = __ipv4_neigh_lookup_noref(dev, key);
if (n && !refcount_inc_not_zero(&n->refcnt))
n = NULL;
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return n;
}
@@ -51,16 +51,10 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
{
struct neighbour *n;
- rcu_read_lock_bh();
+ rcu_read_lock();
n = __ipv4_neigh_lookup_noref(dev, key);
- if (n) {
- unsigned long now = jiffies;
-
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
- }
- rcu_read_unlock_bh();
+ neigh_confirm(n);
+ rcu_read_unlock();
}
void arp_init(void);
@@ -71,6 +65,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
const unsigned char *src_hw, const unsigned char *th);
int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
void arp_ifdown(struct net_device *dev);
+int arp_invalidate(struct net_device *dev, __be32 ip, bool force);
struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
struct net_device *dev, __be32 src_ip,
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 8b7eb46ad72d..0d939e5aee4e 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -187,18 +187,12 @@ typedef struct {
typedef struct ax25_route {
struct ax25_route *next;
- refcount_t refcount;
ax25_address callsign;
struct net_device *dev;
ax25_digi *digipeat;
char ip_mode;
} ax25_route;
-static inline void ax25_hold_route(ax25_route *ax25_rt)
-{
- refcount_inc(&ax25_rt->refcount);
-}
-
void __ax25_put_route(ax25_route *ax25_rt);
extern rwlock_t ax25_route_lock;
@@ -213,12 +207,6 @@ static inline void ax25_route_lock_unuse(void)
read_unlock(&ax25_route_lock);
}
-static inline void ax25_put_route(ax25_route *ax25_rt)
-{
- if (refcount_dec_and_test(&ax25_rt->refcount))
- __ax25_put_route(ax25_rt);
-}
-
typedef struct {
char slave; /* slave_mode? */
struct timer_list slave_timer; /* timeout timer */
@@ -229,13 +217,18 @@ struct ctl_table;
typedef struct ax25_dev {
struct ax25_dev *next;
+
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
struct net_device *forward;
struct ctl_table_header *sysheader;
int values[AX25_MAX_VALUES];
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_dama_info dama;
#endif
+ refcount_t refcount;
+ bool device_up;
} ax25_dev;
typedef struct ax25_cb {
@@ -243,6 +236,7 @@ typedef struct ax25_cb {
ax25_address source_addr, dest_addr;
ax25_digi *digipeat;
ax25_dev *ax25_dev;
+ netdevice_tracker dev_tracker;
unsigned char iamdigi;
unsigned char state, modulus, pidincl;
unsigned short vs, vr, va;
@@ -266,10 +260,7 @@ struct ax25_sock {
struct ax25_cb *cb;
};
-static inline struct ax25_sock *ax25_sk(const struct sock *sk)
-{
- return (struct ax25_sock *) sk;
-}
+#define ax25_sk(ptr) container_of_const(ptr, struct ax25_sock, sk)
static inline struct ax25_cb *sk_to_ax25(const struct sock *sk)
{
@@ -290,6 +281,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25)
}
}
+static inline void ax25_dev_hold(ax25_dev *ax25_dev)
+{
+ refcount_inc(&ax25_dev->refcount);
+}
+
+static inline void ax25_dev_put(ax25_dev *ax25_dev)
+{
+ if (refcount_dec_and_test(&ax25_dev->refcount)) {
+ kfree(ax25_dev);
+ }
+}
static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
@@ -304,7 +306,7 @@ extern spinlock_t ax25_list_lock;
void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
-ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *,
+ax25_cb *ax25_find_cb(const ax25_address *, ax25_address *, ax25_digi *,
struct net_device *);
void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
void ax25_destroy_socket(ax25_cb *);
@@ -384,10 +386,11 @@ struct ax25_linkfail {
void ax25_linkfail_register(struct ax25_linkfail *lf);
void ax25_linkfail_release(struct ax25_linkfail *lf);
-int __must_check ax25_listen_register(ax25_address *, struct net_device *);
-void ax25_listen_release(ax25_address *, struct net_device *);
+int __must_check ax25_listen_register(const ax25_address *,
+ struct net_device *);
+void ax25_listen_release(const ax25_address *, struct net_device *);
int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
-int ax25_listen_mine(ax25_address *, struct net_device *);
+int ax25_listen_mine(const ax25_address *, struct net_device *);
void ax25_link_failed(ax25_cb *, int);
int ax25_protocol_is_registered(unsigned int);
@@ -401,8 +404,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
-ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
- ax25_digi *, struct net_device *);
+ax25_cb *ax25_send_frame(struct sk_buff *, int, const ax25_address *,
+ ax25_address *, ax25_digi *, struct net_device *);
void ax25_output(ax25_cb *, int, struct sk_buff *);
void ax25_kick(ax25_cb *);
void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
diff --git a/include/net/ax88796.h b/include/net/ax88796.h
index aa52b2e8ff7b..303100f08ab8 100644
--- a/include/net/ax88796.h
+++ b/include/net/ax88796.h
@@ -8,6 +8,8 @@
#ifndef __NET_AX88796_PLAT_H
#define __NET_AX88796_PLAT_H
+#include <linux/types.h>
+
struct sk_buff;
struct net_device;
struct platform_device;
@@ -32,10 +34,13 @@ struct ax_plat_data {
const unsigned char *buf, int star_page);
void (*block_input)(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
- /* returns nonzero if a pending interrupt request might by caused by
- * the ax88786. Handles all interrupts if set to NULL
+ /* returns nonzero if a pending interrupt request might be caused by
+ * the ax88796. Handles all interrupts if set to NULL
*/
int (*check_irq)(struct platform_device *pdev);
};
+/* exported from ax88796.c for xsurf100.c */
+extern void ax_NS8390_reinit(struct net_device *dev);
+
#endif /* __NET_AX88796_PLAT_H */
diff --git a/include/net/bareudp.h b/include/net/bareudp.h
index dc65a0d71d9b..17610c8d6361 100644
--- a/include/net/bareudp.h
+++ b/include/net/bareudp.h
@@ -3,21 +3,10 @@
#ifndef __NET_BAREUDP_H
#define __NET_BAREUDP_H
+#include <linux/netdevice.h>
#include <linux/types.h>
-#include <linux/skbuff.h>
#include <net/rtnetlink.h>
-struct bareudp_conf {
- __be16 ethertype;
- __be16 port;
- u16 sport_min;
- bool multi_proto_mode;
-};
-
-struct net_device *bareudp_dev_create(struct net *net, const char *name,
- u8 name_assign_type,
- struct bareudp_conf *info);
-
static inline bool netif_is_bareudp(const struct net_device *dev)
{
return dev->rtnl_link_ops &&
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 9125effbf448..9fe95a22abeb 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright 2023 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -55,6 +56,8 @@
#define BTPROTO_CMTP 5
#define BTPROTO_HIDP 6
#define BTPROTO_AVDTP 7
+#define BTPROTO_ISO 8
+#define BTPROTO_LAST BTPROTO_ISO
#define SOL_HCI 0
#define SOL_L2CAP 6
@@ -149,10 +152,95 @@ struct bt_voice {
#define BT_MODE_LE_FLOWCTL 0x03
#define BT_MODE_EXT_FLOWCTL 0x04
-#define BT_PKT_STATUS 16
+#define BT_PKT_STATUS 16
#define BT_SCM_PKT_STATUS 0x03
+#define BT_ISO_QOS 17
+
+#define BT_ISO_QOS_CIG_UNSET 0xff
+#define BT_ISO_QOS_CIS_UNSET 0xff
+
+#define BT_ISO_QOS_BIG_UNSET 0xff
+#define BT_ISO_QOS_BIS_UNSET 0xff
+
+#define BT_ISO_SYNC_TIMEOUT 0x07d0 /* 20 secs */
+
+struct bt_iso_io_qos {
+ __u32 interval;
+ __u16 latency;
+ __u16 sdu;
+ __u8 phy;
+ __u8 rtn;
+};
+
+struct bt_iso_ucast_qos {
+ __u8 cig;
+ __u8 cis;
+ __u8 sca;
+ __u8 packing;
+ __u8 framing;
+ struct bt_iso_io_qos in;
+ struct bt_iso_io_qos out;
+};
+
+struct bt_iso_bcast_qos {
+ __u8 big;
+ __u8 bis;
+ __u8 sync_factor;
+ __u8 packing;
+ __u8 framing;
+ struct bt_iso_io_qos in;
+ struct bt_iso_io_qos out;
+ __u8 encryption;
+ __u8 bcode[16];
+ __u8 options;
+ __u16 skip;
+ __u16 sync_timeout;
+ __u8 sync_cte_type;
+ __u8 mse;
+ __u16 timeout;
+};
+
+struct bt_iso_qos {
+ union {
+ struct bt_iso_ucast_qos ucast;
+ struct bt_iso_bcast_qos bcast;
+ };
+};
+
+#define BT_ISO_PHY_1M 0x01
+#define BT_ISO_PHY_2M 0x02
+#define BT_ISO_PHY_CODED 0x04
+#define BT_ISO_PHY_ANY (BT_ISO_PHY_1M | BT_ISO_PHY_2M | \
+ BT_ISO_PHY_CODED)
+
+#define BT_CODEC 19
+
+struct bt_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct bt_codec {
+ __u8 id;
+ __u16 cid;
+ __u16 vid;
+ __u8 data_path;
+ __u8 num_caps;
+} __packed;
+
+struct bt_codecs {
+ __u8 num_codecs;
+ struct bt_codec codecs[];
+} __packed;
+
+#define BT_CODEC_CVSD 0x02
+#define BT_CODEC_TRANSPARENT 0x03
+#define BT_CODEC_MSBC 0x05
+
+#define BT_ISO_BASE 20
+
__printf(1, 2)
void bt_info(const char *fmt, ...);
__printf(1, 2)
@@ -180,19 +268,21 @@ void bt_err_ratelimited(const char *fmt, ...);
#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
#endif
+#define bt_dev_name(hdev) ((hdev) ? (hdev)->name : "null")
+
#define bt_dev_info(hdev, fmt, ...) \
- BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ BT_INFO("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_warn(hdev, fmt, ...) \
- BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ BT_WARN("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_err(hdev, fmt, ...) \
- BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ BT_ERR("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_dbg(hdev, fmt, ...) \
- BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ BT_DBG("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_warn_ratelimited(hdev, fmt, ...) \
- bt_warn_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ bt_warn_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
#define bt_dev_err_ratelimited(hdev, fmt, ...) \
- bt_err_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
/* Connection and socket states */
enum {
@@ -298,6 +388,7 @@ struct bt_sock {
enum {
BT_SK_DEFER_SETUP,
BT_SK_SUSPEND,
+ BT_SK_PKT_STATUS
};
struct bt_sock_list {
@@ -312,6 +403,8 @@ int bt_sock_register(int proto, const struct net_proto_family *ops);
void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
+struct sock *bt_sock_alloc(struct net *net, struct socket *sock,
+ struct proto *prot, int proto, gfp_t prio, int kern);
int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
@@ -319,7 +412,7 @@ int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
-int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
+int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags);
void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
void bt_accept_unlink(struct sock *sk);
@@ -342,10 +435,6 @@ struct l2cap_ctrl {
struct l2cap_chan *chan;
};
-struct sco_ctrl {
- u8 pkt_status;
-};
-
struct hci_dev;
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
@@ -356,6 +445,7 @@ typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
#define HCI_REQ_SKB BIT(1)
struct hci_ctrl {
+ struct sock *sk;
u16 opcode;
u8 req_flags;
u8 req_event;
@@ -365,22 +455,32 @@ struct hci_ctrl {
};
};
+struct mgmt_ctrl {
+ struct hci_dev *hdev;
+ u16 opcode;
+};
+
struct bt_skb_cb {
u8 pkt_type;
u8 force_active;
u16 expect;
u8 incoming:1;
+ u8 pkt_status:2;
union {
struct l2cap_ctrl l2cap;
- struct sco_ctrl sco;
struct hci_ctrl hci;
+ struct mgmt_ctrl mgmt;
+ struct scm_creds creds;
};
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
+#define hci_skb_pkt_status(skb) bt_cb((skb))->pkt_status
#define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
+#define hci_skb_event(skb) bt_cb((skb))->hci.req_event
+#define hci_skb_sk(skb) bt_cb((skb))->hci.sk
static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
{
@@ -420,7 +520,73 @@ out:
return NULL;
}
+/* Shall not be called with lock_sock held */
+static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb;
+ size_t size = min_t(size_t, len, mtu);
+ int err;
+
+ skb = bt_skb_send_alloc(sk, size + headroom + tailroom,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return ERR_PTR(err);
+
+ skb_reserve(skb, headroom);
+ skb_tailroom_reserve(skb, mtu, tailroom);
+
+ if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) {
+ kfree_skb(skb);
+ return ERR_PTR(-EFAULT);
+ }
+
+ skb->priority = READ_ONCE(sk->sk_priority);
+
+ return skb;
+}
+
+/* Similar to bt_skb_sendmsg but can split the msg into multiple fragments
+ * accourding to the MTU.
+ */
+static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb, **frag;
+
+ skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR(skb))
+ return skb;
+
+ len -= skb->len;
+ if (!len)
+ return skb;
+
+ /* Add remaining data over MTU as continuation fragments */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len) {
+ struct sk_buff *tmp;
+
+ tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR(tmp)) {
+ return skb;
+ }
+
+ len -= tmp->len;
+
+ *frag = tmp;
+ frag = &(*frag)->next;
+ }
+
+ return skb;
+}
+
int bt_to_errno(u16 code);
+__u8 bt_status(int err);
void hci_sock_set_flag(struct sock *sk, int nr);
void hci_sock_clear_flag(struct sock *sk, int nr);
@@ -458,8 +624,30 @@ static inline void sco_exit(void)
}
#endif
+#if IS_ENABLED(CONFIG_BT_LE)
+int iso_init(void);
+int iso_exit(void);
+bool iso_enabled(void);
+#else
+static inline int iso_init(void)
+{
+ return 0;
+}
+
+static inline int iso_exit(void)
+{
+ return 0;
+}
+
+static inline bool iso_enabled(void)
+{
+ return false;
+}
+#endif
+
int mgmt_init(void);
void mgmt_exit(void);
+void mgmt_cleanup(struct sock *sk);
void bt_sock_reclassify_lock(struct sock *sk, int proto);
diff --git a/include/net/bluetooth/coredump.h b/include/net/bluetooth/coredump.h
new file mode 100644
index 000000000000..72f51b587a04
--- /dev/null
+++ b/include/net/bluetooth/coredump.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Google Corporation
+ */
+
+#ifndef __COREDUMP_H
+#define __COREDUMP_H
+
+#define DEVCOREDUMP_TIMEOUT msecs_to_jiffies(10000) /* 10 sec */
+
+typedef void (*coredump_t)(struct hci_dev *hdev);
+typedef void (*dmp_hdr_t)(struct hci_dev *hdev, struct sk_buff *skb);
+typedef void (*notify_change_t)(struct hci_dev *hdev, int state);
+
+/* struct hci_devcoredump - Devcoredump state
+ *
+ * @supported: Indicates if FW dump collection is supported by driver
+ * @state: Current state of dump collection
+ * @timeout: Indicates a timeout for collecting the devcoredump
+ *
+ * @alloc_size: Total size of the dump
+ * @head: Start of the dump
+ * @tail: Pointer to current end of dump
+ * @end: head + alloc_size for easy comparisons
+ *
+ * @dump_q: Dump queue for state machine to process
+ * @dump_rx: Devcoredump state machine work
+ * @dump_timeout: Devcoredump timeout work
+ *
+ * @coredump: Called from the driver's .coredump() function.
+ * @dmp_hdr: Create a dump header to identify controller/fw/driver info
+ * @notify_change: Notify driver when devcoredump state has changed
+ */
+struct hci_devcoredump {
+ bool supported;
+
+ enum devcoredump_state {
+ HCI_DEVCOREDUMP_IDLE,
+ HCI_DEVCOREDUMP_ACTIVE,
+ HCI_DEVCOREDUMP_DONE,
+ HCI_DEVCOREDUMP_ABORT,
+ HCI_DEVCOREDUMP_TIMEOUT,
+ } state;
+
+ unsigned long timeout;
+
+ size_t alloc_size;
+ char *head;
+ char *tail;
+ char *end;
+
+ struct sk_buff_head dump_q;
+ struct work_struct dump_rx;
+ struct delayed_work dump_timeout;
+
+ coredump_t coredump;
+ dmp_hdr_t dmp_hdr;
+ notify_change_t notify_change;
+};
+
+#ifdef CONFIG_DEV_COREDUMP
+
+void hci_devcd_reset(struct hci_dev *hdev);
+void hci_devcd_rx(struct work_struct *work);
+void hci_devcd_timeout(struct work_struct *work);
+
+int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump,
+ dmp_hdr_t dmp_hdr, notify_change_t notify_change);
+int hci_devcd_init(struct hci_dev *hdev, u32 dump_size);
+int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb);
+int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len);
+int hci_devcd_complete(struct hci_dev *hdev);
+int hci_devcd_abort(struct hci_dev *hdev);
+
+#else
+
+static inline void hci_devcd_reset(struct hci_dev *hdev) {}
+static inline void hci_devcd_rx(struct work_struct *work) {}
+static inline void hci_devcd_timeout(struct work_struct *work) {}
+
+static inline int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump,
+ dmp_hdr_t dmp_hdr,
+ notify_change_t notify_change)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hci_devcd_init(struct hci_dev *hdev, u32 dump_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hci_devcd_append_pattern(struct hci_dev *hdev,
+ u8 pattern, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hci_devcd_complete(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hci_devcd_abort(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* __COREDUMP_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index c8e67042a3b1..8701ca5f31ee 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright 2023 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -36,7 +37,7 @@
#define HCI_MAX_AMP_ASSOC_SIZE 672
-#define HCI_MAX_CSB_DATA_SIZE 252
+#define HCI_MAX_CPB_DATA_SIZE 252
/* HCI dev events */
#define HCI_DEV_REG 1
@@ -238,6 +239,105 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
+
+ /*
+ * When this quirk is set, then the hci_suspend_notifier is not
+ * registered. This is intended for devices which drop completely
+ * from the bus on system-suspend and which will show up as a new
+ * HCI after resume.
+ */
+ HCI_QUIRK_NO_SUSPEND_NOTIFIER,
+
+ /*
+ * When this quirk is set, LE tx power is not queried on startup
+ * and the min/max tx power values default to HCI_TX_POWER_INVALID.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER,
+
+ /* When this quirk is set, HCI_OP_SET_EVENT_FLT requests with
+ * HCI_FLT_CLEAR_ALL are ignored and event filtering is
+ * completely avoided. A subset of the CSR controller
+ * clones struggle with this and instantly lock up.
+ *
+ * Note that devices using this must (separately) disable
+ * runtime suspend, because event filtering takes place there.
+ */
+ HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL,
+
+ /*
+ * When this quirk is set, disables the use of
+ * HCI_OP_ENHANCED_SETUP_SYNC_CONN command to setup SCO connections.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN,
+
+ /*
+ * When this quirk is set, the HCI_OP_LE_SET_EXT_SCAN_ENABLE command is
+ * disabled. This is required for some Broadcom controllers which
+ * erroneously claim to support extended scanning.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_EXT_SCAN,
+
+ /*
+ * When this quirk is set, the HCI_OP_GET_MWS_TRANSPORT_CONFIG command is
+ * disabled. This is required for some Broadcom controllers which
+ * erroneously claim to support MWS Transport Layer Configuration.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG,
+
+ /* When this quirk is set, max_page for local extended features
+ * is set to 1, even if controller reports higher number. Some
+ * controllers (e.g. RTL8723CS) report more pages, but they
+ * don't actually support features declared there.
+ */
+ HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
+
+ /*
+ * When this quirk is set, the HCI_OP_LE_SET_RPA_TIMEOUT command is
+ * skipped during initialization. This is required for the Actions
+ * Semiconductor ATS2851 based controllers, which erroneously claims
+ * to support it.
+ */
+ HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
+
+ /* When this quirk is set, MSFT extension monitor tracking by
+ * address filter is supported. Since tracking quantity of each
+ * pattern is limited, this feature supports tracking multiple
+ * devices concurrently if controller supports multiple
+ * address filters.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER,
+
+ /*
+ * When this quirk is set, LE Coded PHY shall not be used. This is
+ * required for some Intel controllers which erroneously claim to
+ * support it but it causes problems with extended scanning.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_LE_CODED,
+
+ /*
+ * When this quirk is set, the HCI_OP_READ_ENC_KEY_SIZE command is
+ * skipped during an HCI_EV_ENCRYPT_CHANGE event. This is required
+ * for Actions Semiconductor ATS2851 based controllers, which erroneously
+ * claim to support it.
+ */
+ HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
};
/* HCI device flags */
@@ -279,6 +379,8 @@ enum {
enum {
HCI_SETUP,
HCI_CONFIG,
+ HCI_DEBUGFS_CREATED,
+ HCI_POWERING_DOWN,
HCI_AUTO_OFF,
HCI_RFKILLED,
HCI_MGMT,
@@ -291,6 +393,7 @@ enum {
HCI_USER_CHANNEL,
HCI_EXT_CONFIGURED,
HCI_LE_ADV,
+ HCI_LE_PER_ADV,
HCI_LE_SCAN,
HCI_SSP_ENABLED,
HCI_SC_ENABLED,
@@ -299,7 +402,6 @@ enum {
HCI_LIMITED_PRIVACY,
HCI_RPA_EXPIRED,
HCI_RPA_RESOLVING,
- HCI_HS_ENABLED,
HCI_LE_ENABLED,
HCI_ADVERTISING,
HCI_ADVERTISING_CONNECTABLE,
@@ -312,6 +414,8 @@ enum {
HCI_BREDR_ENABLED,
HCI_LE_SCAN_INTERRUPTED,
HCI_WIDEBAND_SPEECH_ENABLED,
+ HCI_EVENT_FILTER_CONFIGURED,
+ HCI_PA_SYNC,
HCI_DUT_MODE,
HCI_VENDOR_DIAG,
@@ -321,6 +425,14 @@ enum {
HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING,
HCI_FORCE_NO_MITM,
+ HCI_QUALITY_REPORT,
+ HCI_OFFLOAD_CODECS_ENABLED,
+ HCI_LE_SIMULTANEOUS_ROLES,
+ HCI_CMD_DRAIN_WORKQUEUE,
+
+ HCI_MESH_EXPERIMENTAL,
+ HCI_MESH,
+ HCI_MESH_SENDING,
__HCI_NUM_FLAGS,
};
@@ -330,9 +442,10 @@ enum {
#define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */
#define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */
#define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
-#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
+#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
@@ -455,6 +568,7 @@ enum {
#define LMP_EXT_INQ 0x01
#define LMP_SIMUL_LE_BR 0x02
#define LMP_SIMPLE_PAIR 0x08
+#define LMP_ERR_DATA_REPORTING 0x20
#define LMP_NO_FLUSH 0x40
#define LMP_LSTO 0x01
@@ -462,10 +576,10 @@ enum {
#define LMP_EXTFEATURES 0x80
/* Extended LMP features */
-#define LMP_CSB_MASTER 0x01
-#define LMP_CSB_SLAVE 0x02
-#define LMP_SYNC_TRAIN 0x04
-#define LMP_SYNC_SCAN 0x08
+#define LMP_CPB_CENTRAL 0x01
+#define LMP_CPB_PERIPHERAL 0x02
+#define LMP_SYNC_TRAIN 0x04
+#define LMP_SYNC_SCAN 0x08
#define LMP_SC 0x01
#define LMP_PING 0x02
@@ -479,7 +593,7 @@ enum {
/* LE features */
#define HCI_LE_ENCRYPTION 0x01
#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
-#define HCI_LE_SLAVE_FEATURES 0x08
+#define HCI_LE_PERIPHERAL_FEATURES 0x08
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_LL_PRIVACY 0x40
@@ -487,9 +601,12 @@ enum {
#define HCI_LE_PHY_2M 0x01
#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_EXT_ADV 0x10
+#define HCI_LE_PERIODIC_ADV 0x20
#define HCI_LE_CHAN_SEL_ALG2 0x40
-#define HCI_LE_CIS_MASTER 0x10
-#define HCI_LE_CIS_SLAVE 0x20
+#define HCI_LE_CIS_CENTRAL 0x10
+#define HCI_LE_CIS_PERIPHERAL 0x20
+#define HCI_LE_ISO_BROADCASTER 0x40
+#define HCI_LE_ISO_SYNC_RECEIVER 0x80
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -544,16 +661,20 @@ enum {
#define HCI_ERROR_PIN_OR_KEY_MISSING 0x06
#define HCI_ERROR_MEMORY_EXCEEDED 0x07
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_COMMAND_DISALLOWED 0x0c
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
+#define HCI_ERROR_INVALID_PARAMETERS 0x12
#define HCI_ERROR_REMOTE_USER_TERM 0x13
#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
#define HCI_ERROR_LOCAL_HOST_TERM 0x16
#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
+#define HCI_ERROR_CANCELLED_BY_HOST 0x44
/* Flow control modes */
#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
@@ -563,6 +684,8 @@ enum {
#define HCI_TX_POWER_INVALID 127
#define HCI_RSSI_INVALID 127
+#define HCI_SYNC_HANDLE_INVALID 0xffff
+
#define HCI_ROLE_MASTER 0x00
#define HCI_ROLE_SLAVE 0x01
@@ -582,6 +705,7 @@ enum {
#define EIR_SSP_RAND_R192 0x0F /* Simple Pairing Randomizer R-192 */
#define EIR_DEVICE_ID 0x10 /* device ID */
#define EIR_APPEARANCE 0x19 /* Device appearance */
+#define EIR_SERVICE_DATA 0x16 /* Service Data */
#define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */
#define EIR_LE_ROLE 0x1C /* LE role */
#define EIR_SSP_HASH_C256 0x1D /* Simple Pairing Hash C-256 */
@@ -861,23 +985,57 @@ struct hci_cp_logical_link_cancel {
__u8 flow_spec_id;
} __packed;
+#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
+struct hci_coding_format {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+} __packed;
+
+struct hci_cp_enhanced_setup_sync_conn {
+ __le16 handle;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ struct hci_coding_format tx_coding_format;
+ struct hci_coding_format rx_coding_format;
+ __le16 tx_codec_frame_size;
+ __le16 rx_codec_frame_size;
+ __le32 in_bandwidth;
+ __le32 out_bandwidth;
+ struct hci_coding_format in_coding_format;
+ struct hci_coding_format out_coding_format;
+ __le16 in_coded_data_size;
+ __le16 out_coded_data_size;
+ __u8 in_pcm_data_format;
+ __u8 out_pcm_data_format;
+ __u8 in_pcm_sample_payload_msb_pos;
+ __u8 out_pcm_sample_payload_msb_pos;
+ __u8 in_data_path;
+ __u8 out_data_path;
+ __u8 in_transport_unit_size;
+ __u8 out_transport_unit_size;
+ __le16 max_latency;
+ __le16 pkt_type;
+ __u8 retrans_effort;
+} __packed;
+
struct hci_rp_logical_link_cancel {
__u8 status;
__u8 phy_handle;
__u8 flow_spec_id;
} __packed;
-#define HCI_OP_SET_CSB 0x0441
-struct hci_cp_set_csb {
+#define HCI_OP_SET_CPB 0x0441
+struct hci_cp_set_cpb {
__u8 enable;
__u8 lt_addr;
__u8 lpo_allowed;
__le16 packet_type;
__le16 interval_min;
__le16 interval_max;
- __le16 csb_sv_tout;
+ __le16 cpb_sv_tout;
} __packed;
-struct hci_rp_set_csb {
+struct hci_rp_set_cpb {
__u8 status;
__u8 lt_addr;
__le16 interval;
@@ -1000,8 +1158,8 @@ struct hci_cp_read_stored_link_key {
} __packed;
struct hci_rp_read_stored_link_key {
__u8 status;
- __u8 max_keys;
- __u8 num_keys;
+ __le16 max_keys;
+ __le16 num_keys;
} __packed;
#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
@@ -1011,7 +1169,7 @@ struct hci_cp_delete_stored_link_key {
} __packed;
struct hci_rp_delete_stored_link_key {
__u8 status;
- __u8 num_keys;
+ __le16 num_keys;
} __packed;
#define HCI_MAX_NAME_LENGTH 248
@@ -1174,14 +1332,14 @@ struct hci_rp_delete_reserved_lt_addr {
__u8 lt_addr;
} __packed;
-#define HCI_OP_SET_CSB_DATA 0x0c76
-struct hci_cp_set_csb_data {
+#define HCI_OP_SET_CPB_DATA 0x0c76
+struct hci_cp_set_cpb_data {
__u8 lt_addr;
__u8 fragment;
__u8 data_length;
- __u8 data[HCI_MAX_CSB_DATA_SIZE];
+ __u8 data[HCI_MAX_CPB_DATA_SIZE];
} __packed;
-struct hci_rp_set_csb_data {
+struct hci_rp_set_cpb_data {
__u8 status;
__u8 lt_addr;
} __packed;
@@ -1240,6 +1398,14 @@ struct hci_rp_read_local_oob_ext_data {
__u8 rand256[16];
} __packed;
+#define HCI_CONFIGURE_DATA_PATH 0x0c83
+struct hci_op_configure_data_path {
+ __u8 direction;
+ __u8 data_path_id;
+ __u8 vnd_len;
+ __u8 vnd_data[];
+} __packed;
+
#define HCI_OP_READ_LOCAL_VERSION 0x1001
struct hci_rp_read_local_version {
__u8 status;
@@ -1297,6 +1463,28 @@ struct hci_rp_read_data_block_size {
} __packed;
#define HCI_OP_READ_LOCAL_CODECS 0x100b
+struct hci_std_codecs {
+ __u8 num;
+ __u8 codec[];
+} __packed;
+
+struct hci_vnd_codec {
+ /* company id */
+ __le16 cid;
+ /* vendor codec id */
+ __le16 vid;
+} __packed;
+
+struct hci_vnd_codecs {
+ __u8 num;
+ struct hci_vnd_codec codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs {
+ __u8 status;
+ struct hci_std_codecs std_codecs;
+ struct hci_vnd_codecs vnd_codecs;
+} __packed;
#define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c
struct hci_rp_read_local_pairing_opts {
@@ -1305,6 +1493,53 @@ struct hci_rp_read_local_pairing_opts {
__u8 max_key_size;
} __packed;
+#define HCI_OP_READ_LOCAL_CODECS_V2 0x100d
+struct hci_std_codec_v2 {
+ __u8 id;
+ __u8 transport;
+} __packed;
+
+struct hci_std_codecs_v2 {
+ __u8 num;
+ struct hci_std_codec_v2 codec[];
+} __packed;
+
+struct hci_vnd_codec_v2 {
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+} __packed;
+
+struct hci_vnd_codecs_v2 {
+ __u8 num;
+ struct hci_vnd_codec_v2 codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs_v2 {
+ __u8 status;
+ struct hci_std_codecs_v2 std_codecs;
+ struct hci_vnd_codecs_v2 vendor_codecs;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODEC_CAPS 0x100e
+struct hci_op_read_local_codec_caps {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+ __u8 direction;
+} __packed;
+
+struct hci_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct hci_rp_read_local_codec_caps {
+ __u8 status;
+ __u8 num_caps;
+} __packed;
+
#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
struct hci_rp_read_page_scan_activity {
__u8 status;
@@ -1495,7 +1730,7 @@ struct hci_cp_le_set_scan_enable {
} __packed;
#define HCI_LE_USE_PEER_ADDR 0x00
-#define HCI_LE_USE_WHITELIST 0x01
+#define HCI_LE_USE_ACCEPT_LIST 0x01
#define HCI_OP_LE_CREATE_CONN 0x200d
struct hci_cp_le_create_conn {
@@ -1515,22 +1750,22 @@ struct hci_cp_le_create_conn {
#define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e
-#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f
-struct hci_rp_le_read_white_list_size {
+#define HCI_OP_LE_READ_ACCEPT_LIST_SIZE 0x200f
+struct hci_rp_le_read_accept_list_size {
__u8 status;
__u8 size;
} __packed;
-#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010
+#define HCI_OP_LE_CLEAR_ACCEPT_LIST 0x2010
-#define HCI_OP_LE_ADD_TO_WHITE_LIST 0x2011
-struct hci_cp_le_add_to_white_list {
+#define HCI_OP_LE_ADD_TO_ACCEPT_LIST 0x2011
+struct hci_cp_le_add_to_accept_list {
__u8 bdaddr_type;
bdaddr_t bdaddr;
} __packed;
-#define HCI_OP_LE_DEL_FROM_WHITE_LIST 0x2012
-struct hci_cp_le_del_from_white_list {
+#define HCI_OP_LE_DEL_FROM_ACCEPT_LIST 0x2012
+struct hci_cp_le_del_from_accept_list {
__u8 bdaddr_type;
bdaddr_t bdaddr;
} __packed;
@@ -1718,6 +1953,22 @@ struct hci_cp_le_ext_conn_param {
__le16 max_ce_len;
} __packed;
+#define HCI_OP_LE_PA_CREATE_SYNC 0x2044
+struct hci_cp_le_pa_create_sync {
+ __u8 options;
+ __u8 sid;
+ __u8 addr_type;
+ bdaddr_t addr;
+ __le16 skip;
+ __le16 sync_timeout;
+ __u8 sync_cte_type;
+} __packed;
+
+#define HCI_OP_LE_PA_TERM_SYNC 0x2046
+struct hci_cp_le_pa_term_sync {
+ __le16 handle;
+} __packed;
+
#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
struct hci_rp_le_read_num_supported_adv_sets {
__u8 status;
@@ -1752,26 +2003,21 @@ struct hci_rp_le_set_ext_adv_params {
__u8 tx_power;
} __packed;
-#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
-struct hci_cp_le_set_ext_adv_enable {
- __u8 enable;
- __u8 num_of_sets;
- __u8 data[];
-} __packed;
-
struct hci_cp_ext_adv_set {
__u8 handle;
__le16 duration;
__u8 max_events;
} __packed;
+#define HCI_MAX_EXT_AD_LENGTH 251
+
#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
struct hci_cp_le_set_ext_adv_data {
__u8 handle;
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[HCI_MAX_AD_LENGTH];
+ __u8 data[];
} __packed;
#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
@@ -1780,7 +2026,39 @@ struct hci_cp_le_set_ext_scan_rsp_data {
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[HCI_MAX_AD_LENGTH];
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
+struct hci_cp_le_set_ext_adv_enable {
+ __u8 enable;
+ __u8 num_of_sets;
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_PER_ADV_PARAMS 0x203e
+struct hci_cp_le_set_per_adv_params {
+ __u8 handle;
+ __le16 min_interval;
+ __le16 max_interval;
+ __le16 periodic_properties;
+} __packed;
+
+#define HCI_MAX_PER_AD_LENGTH 252
+#define HCI_MAX_PER_AD_TOT_LEN 1650
+
+#define HCI_OP_LE_SET_PER_ADV_DATA 0x203f
+struct hci_cp_le_set_per_adv_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+#define HCI_OP_LE_SET_PER_ADV_ENABLE 0x2040
+struct hci_cp_le_set_per_adv_enable {
+ __u8 enable;
+ __u8 handle;
} __packed;
#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
@@ -1797,6 +2075,23 @@ struct hci_cp_le_set_adv_set_rand_addr {
bdaddr_t bdaddr;
} __packed;
+#define HCI_OP_LE_READ_TRANSMIT_POWER 0x204b
+struct hci_rp_le_read_transmit_power {
+ __u8 status;
+ __s8 min_le_tx_power;
+ __s8 max_le_tx_power;
+} __packed;
+
+#define HCI_NETWORK_PRIVACY 0x00
+#define HCI_DEVICE_PRIVACY 0x01
+
+#define HCI_OP_LE_SET_PRIVACY_MODE 0x204e
+struct hci_cp_le_set_privacy_mode {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 mode;
+} __packed;
+
#define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060
struct hci_rp_le_read_buffer_size_v2 {
__u8 status;
@@ -1822,23 +2117,23 @@ struct hci_rp_le_read_iso_tx_sync {
#define HCI_OP_LE_SET_CIG_PARAMS 0x2062
struct hci_cis_params {
__u8 cis_id;
- __le16 m_sdu;
- __le16 s_sdu;
- __u8 m_phy;
- __u8 s_phy;
- __u8 m_rtn;
- __u8 s_rtn;
+ __le16 c_sdu;
+ __le16 p_sdu;
+ __u8 c_phy;
+ __u8 p_phy;
+ __u8 c_rtn;
+ __u8 p_rtn;
} __packed;
struct hci_cp_le_set_cig_params {
__u8 cig_id;
- __u8 m_interval[3];
- __u8 s_interval[3];
+ __u8 c_interval[3];
+ __u8 p_interval[3];
__u8 sca;
__u8 packing;
__u8 framing;
- __le16 m_latency;
- __le16 s_latency;
+ __le16 c_latency;
+ __le16 p_latency;
__u8 num_cis;
struct hci_cis_params cis[];
} __packed;
@@ -1877,7 +2172,78 @@ struct hci_cp_le_reject_cis {
__u8 reason;
} __packed;
+#define HCI_OP_LE_CREATE_BIG 0x2068
+struct hci_bis {
+ __u8 sdu_interval[3];
+ __le16 sdu;
+ __le16 latency;
+ __u8 rtn;
+ __u8 phy;
+ __u8 packing;
+ __u8 framing;
+ __u8 encryption;
+ __u8 bcode[16];
+} __packed;
+
+struct hci_cp_le_create_big {
+ __u8 handle;
+ __u8 adv_handle;
+ __u8 num_bis;
+ struct hci_bis bis;
+} __packed;
+
+#define HCI_OP_LE_TERM_BIG 0x206a
+struct hci_cp_le_term_big {
+ __u8 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_LE_BIG_CREATE_SYNC 0x206b
+struct hci_cp_le_big_create_sync {
+ __u8 handle;
+ __le16 sync_handle;
+ __u8 encryption;
+ __u8 bcode[16];
+ __u8 mse;
+ __le16 timeout;
+ __u8 num_bis;
+ __u8 bis[];
+} __packed;
+
+#define HCI_OP_LE_BIG_TERM_SYNC 0x206c
+struct hci_cp_le_big_term_sync {
+ __u8 handle;
+} __packed;
+
+#define HCI_OP_LE_SETUP_ISO_PATH 0x206e
+struct hci_cp_le_setup_iso_path {
+ __le16 handle;
+ __u8 direction;
+ __u8 path;
+ __u8 codec;
+ __le16 codec_cid;
+ __le16 codec_vid;
+ __u8 delay[3];
+ __u8 codec_cfg_len;
+ __u8 codec_cfg[];
+} __packed;
+
+struct hci_rp_le_setup_iso_path {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_SET_HOST_FEATURE 0x2074
+struct hci_cp_le_set_host_feature {
+ __u8 bit_number;
+ __u8 bit_value;
+} __packed;
+
/* ---- HCI Events ---- */
+struct hci_ev_status {
+ __u8 status;
+} __packed;
+
#define HCI_EV_INQUIRY_COMPLETE 0x01
#define HCI_EV_INQUIRY_RESULT 0x02
@@ -1890,6 +2256,11 @@ struct inquiry_info {
__le16 clock_offset;
} __packed;
+struct hci_ev_inquiry_result {
+ __u8 num;
+ struct inquiry_info info[];
+};
+
#define HCI_EV_CONN_COMPLETE 0x03
struct hci_ev_conn_complete {
__u8 status;
@@ -2001,7 +2372,7 @@ struct hci_comp_pkts_info {
} __packed;
struct hci_ev_num_comp_pkts {
- __u8 num_hndl;
+ __u8 num;
struct hci_comp_pkts_info handles[];
} __packed;
@@ -2051,7 +2422,7 @@ struct hci_ev_pscan_rep_mode {
} __packed;
#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
-struct inquiry_info_with_rssi {
+struct inquiry_info_rssi {
bdaddr_t bdaddr;
__u8 pscan_rep_mode;
__u8 pscan_period_mode;
@@ -2059,7 +2430,7 @@ struct inquiry_info_with_rssi {
__le16 clock_offset;
__s8 rssi;
} __packed;
-struct inquiry_info_with_rssi_and_pscan_mode {
+struct inquiry_info_rssi_pscan {
bdaddr_t bdaddr;
__u8 pscan_rep_mode;
__u8 pscan_period_mode;
@@ -2068,6 +2439,10 @@ struct inquiry_info_with_rssi_and_pscan_mode {
__le16 clock_offset;
__s8 rssi;
} __packed;
+struct hci_ev_inquiry_result_rssi {
+ __u8 num;
+ __u8 data[];
+} __packed;
#define HCI_EV_REMOTE_EXT_FEATURES 0x23
struct hci_ev_remote_ext_features {
@@ -2122,6 +2497,11 @@ struct extended_inquiry_info {
__u8 data[240];
} __packed;
+struct hci_ev_ext_inquiry_result {
+ __u8 num;
+ struct extended_inquiry_info info[];
+} __packed;
+
#define HCI_EV_KEY_REFRESH_COMPLETE 0x30
struct hci_ev_key_refresh_complete {
__u8 status;
@@ -2243,7 +2623,7 @@ struct hci_ev_sync_train_complete {
__u8 status;
} __packed;
-#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54
+#define HCI_EV_PERIPHERAL_PAGE_RESP_TIMEOUT 0x54
#define HCI_EV_LE_CONN_COMPLETE 0x01
struct hci_ev_le_conn_complete {
@@ -2281,6 +2661,7 @@ struct hci_ev_le_conn_complete {
#define LE_EXT_ADV_DIRECT_IND 0x0004
#define LE_EXT_ADV_SCAN_RSP 0x0008
#define LE_EXT_ADV_LEGACY_PDU 0x0010
+#define LE_EXT_ADV_EVT_TYPE_MASK 0x007f
#define ADDR_LE_DEV_PUBLIC 0x00
#define ADDR_LE_DEV_RANDOM 0x01
@@ -2289,13 +2670,18 @@ struct hci_ev_le_conn_complete {
#define HCI_EV_LE_ADVERTISING_REPORT 0x02
struct hci_ev_le_advertising_info {
- __u8 evt_type;
+ __u8 type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 length;
__u8 data[];
} __packed;
+struct hci_ev_le_advertising_report {
+ __u8 num;
+ struct hci_ev_le_advertising_info info[];
+} __packed;
+
#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
struct hci_ev_le_conn_update_complete {
__u8 status;
@@ -2339,7 +2725,7 @@ struct hci_ev_le_data_len_change {
#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
struct hci_ev_le_direct_adv_info {
- __u8 evt_type;
+ __u8 type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 direct_addr_type;
@@ -2347,6 +2733,11 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi;
} __packed;
+struct hci_ev_le_direct_adv_report {
+ __u8 num;
+ struct hci_ev_le_direct_adv_info info[];
+} __packed;
+
#define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c
struct hci_ev_le_phy_update_complete {
__u8 status;
@@ -2356,8 +2747,8 @@ struct hci_ev_le_phy_update_complete {
} __packed;
#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
-struct hci_ev_le_ext_adv_report {
- __le16 evt_type;
+struct hci_ev_le_ext_adv_info {
+ __le16 type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 primary_phy;
@@ -2365,11 +2756,28 @@ struct hci_ev_le_ext_adv_report {
__u8 sid;
__u8 tx_power;
__s8 rssi;
- __le16 interval;
- __u8 direct_addr_type;
+ __le16 interval;
+ __u8 direct_addr_type;
bdaddr_t direct_addr;
- __u8 length;
- __u8 data[];
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+struct hci_ev_le_ext_adv_report {
+ __u8 num;
+ struct hci_ev_le_ext_adv_info info[];
+} __packed;
+
+#define HCI_EV_LE_PA_SYNC_ESTABLISHED 0x0e
+struct hci_ev_le_pa_sync_established {
+ __u8 status;
+ __le16 handle;
+ __u8 sid;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 phy;
+ __le16 interval;
+ __u8 clock_accuracy;
} __packed;
#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
@@ -2387,6 +2795,21 @@ struct hci_ev_le_enh_conn_complete {
__u8 clk_accurancy;
} __packed;
+#define HCI_EV_LE_PER_ADV_REPORT 0x0f
+struct hci_ev_le_per_adv_report {
+ __le16 sync_handle;
+ __u8 tx_power;
+ __u8 rssi;
+ __u8 cte_type;
+ __u8 data_status;
+ __u8 length;
+ __u8 data[];
+} __packed;
+
+#define LE_PA_DATA_COMPLETE 0x00
+#define LE_PA_DATA_MORE_TO_COME 0x01
+#define LE_PA_DATA_TRUNCATED 0x02
+
#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
struct hci_evt_le_ext_adv_set_term {
__u8 status;
@@ -2401,17 +2824,17 @@ struct hci_evt_le_cis_established {
__le16 handle;
__u8 cig_sync_delay[3];
__u8 cis_sync_delay[3];
- __u8 m_latency[3];
- __u8 s_latency[3];
- __u8 m_phy;
- __u8 s_phy;
+ __u8 c_latency[3];
+ __u8 p_latency[3];
+ __u8 c_phy;
+ __u8 p_phy;
__u8 nse;
- __u8 m_bn;
- __u8 s_bn;
- __u8 m_ft;
- __u8 s_ft;
- __le16 m_mtu;
- __le16 s_mtu;
+ __u8 c_bn;
+ __u8 p_bn;
+ __u8 c_ft;
+ __u8 p_ft;
+ __le16 c_mtu;
+ __le16 p_mtu;
__le16 interval;
} __packed;
@@ -2423,6 +2846,55 @@ struct hci_evt_le_cis_req {
__u8 cis_id;
} __packed;
+#define HCI_EVT_LE_CREATE_BIG_COMPLETE 0x1b
+struct hci_evt_le_create_big_complete {
+ __u8 status;
+ __u8 handle;
+ __u8 sync_delay[3];
+ __u8 transport_delay[3];
+ __u8 phy;
+ __u8 nse;
+ __u8 bn;
+ __u8 pto;
+ __u8 irc;
+ __le16 max_pdu;
+ __le16 interval;
+ __u8 num_bis;
+ __le16 bis_handle[];
+} __packed;
+
+#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d
+struct hci_evt_le_big_sync_estabilished {
+ __u8 status;
+ __u8 handle;
+ __u8 latency[3];
+ __u8 nse;
+ __u8 bn;
+ __u8 pto;
+ __u8 irc;
+ __le16 max_pdu;
+ __le16 interval;
+ __u8 num_bis;
+ __le16 bis[];
+} __packed;
+
+#define HCI_EVT_LE_BIG_INFO_ADV_REPORT 0x22
+struct hci_evt_le_big_info_adv_report {
+ __le16 sync_handle;
+ __u8 num_bis;
+ __u8 nse;
+ __le16 iso_interval;
+ __u8 bn;
+ __u8 pto;
+ __u8 irc;
+ __le16 max_pdu;
+ __u8 sdu_interval[3];
+ __le16 max_sdu;
+ __u8 phy;
+ __u8 framing;
+ __u8 encryption;
+} __packed;
+
#define HCI_EV_VENDOR 0xff
/* Internal events generated by Bluetooth stack */
@@ -2532,6 +3004,9 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_iso_data_len(h) ((h) & 0x3fff)
#define hci_iso_data_flags(h) ((h) >> 14)
+/* codec transport types */
+#define HCI_TRANSPORT_SCO_ESCO 0x01
+
/* le24 support */
static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
{
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8caac20556b4..56fb42df44a3 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -30,11 +31,16 @@
#include <linux/rculist.h>
#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_sync.h>
#include <net/bluetooth/hci_sock.h>
+#include <net/bluetooth/coredump.h>
/* HCI priority */
#define HCI_PRIO_MAX 7
+/* HCI maximum id value */
+#define HCI_MAX_ID 10000
+
/* HCI Core structures */
struct inquiry_data {
bdaddr_t bdaddr;
@@ -77,7 +83,7 @@ struct discovery_state {
u8 last_adv_addr_type;
s8 last_adv_rssi;
u32 last_adv_flags;
- u8 last_adv_data[HCI_MAX_AD_LENGTH];
+ u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH];
u8 last_adv_data_len;
bool report_invalid_rssi;
bool result_filtering;
@@ -87,6 +93,7 @@ struct discovery_state {
u8 (*uuids)[16];
unsigned long scan_start;
unsigned long scan_duration;
+ unsigned long name_resolve_timeout;
};
#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
@@ -105,6 +112,8 @@ enum suspend_tasks {
SUSPEND_POWERING_DOWN,
SUSPEND_PREPARE_NOTIFIER,
+
+ SUSPEND_SET_ADV_FILTER,
__SUSPEND_NUM_TASKS
};
@@ -119,8 +128,9 @@ struct hci_conn_hash {
unsigned int acl_num;
unsigned int amp_num;
unsigned int sco_num;
+ unsigned int iso_num;
unsigned int le_num;
- unsigned int le_num_slave;
+ unsigned int le_num_peripheral;
};
struct bdaddr_list {
@@ -129,6 +139,17 @@ struct bdaddr_list {
u8 bdaddr_type;
};
+struct codec_list {
+ struct list_head list;
+ u8 id;
+ __u16 cid;
+ __u16 vid;
+ u8 transport;
+ u8 num_caps;
+ u32 len;
+ struct hci_codec_caps caps[];
+};
+
struct bdaddr_list_with_irk {
struct list_head list;
bdaddr_t bdaddr;
@@ -137,23 +158,20 @@ struct bdaddr_list_with_irk {
u8 local_irk[16];
};
+/* Bitmask of connection flags */
+enum hci_conn_flags {
+ HCI_CONN_FLAG_REMOTE_WAKEUP = 1,
+ HCI_CONN_FLAG_DEVICE_PRIVACY = 2,
+};
+typedef u8 hci_conn_flags_t;
+
struct bdaddr_list_with_flags {
struct list_head list;
bdaddr_t bdaddr;
u8 bdaddr_type;
- u32 current_flags;
-};
-
-enum hci_conn_flags {
- HCI_CONN_FLAG_REMOTE_WAKEUP,
- HCI_CONN_FLAG_MAX
+ hci_conn_flags_t flags;
};
-#define hci_conn_test_flag(nr, flags) ((flags) & (1U << nr))
-
-/* Make sure number of flags doesn't exceed sizeof(current_flags) */
-static_assert(HCI_CONN_FLAG_MAX < 32);
-
struct bt_uuid {
struct list_head list;
u8 uuid[16];
@@ -171,6 +189,7 @@ struct blocked_key {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 link_type;
u8 type;
u8 val[16];
};
@@ -180,6 +199,7 @@ struct smp_ltk {
struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 link_type;
u8 authenticated;
u8 type;
u8 enc_size;
@@ -194,6 +214,7 @@ struct smp_irk {
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
+ u8 link_type;
u8 val[16];
};
@@ -201,6 +222,8 @@ struct link_key {
struct list_head list;
struct rcu_head rcu;
bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 link_type;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
@@ -219,17 +242,26 @@ struct oob_data {
struct adv_info {
struct list_head list;
- bool pending;
+ bool enabled;
+ bool pending;
+ bool periodic;
+ __u8 mesh;
__u8 instance;
__u32 flags;
__u16 timeout;
__u16 remaining_time;
__u16 duration;
__u16 adv_data_len;
- __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
+ bool adv_data_changed;
__u16 scan_rsp_len;
- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
+ bool scan_rsp_changed;
+ __u16 per_adv_data_len;
+ __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH];
__s8 tx_power;
+ __u32 min_interval;
+ __u32 max_interval;
bdaddr_t random_addr;
bool rpa_expired;
struct delayed_work rpa_expired_cb;
@@ -238,26 +270,65 @@ struct adv_info {
#define HCI_MAX_ADV_INSTANCES 5
#define HCI_DEFAULT_ADV_DURATION 2
+#define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F
+
+#define DATA_CMP(_d1, _l1, _d2, _l2) \
+ (_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2)
+
+#define ADV_DATA_CMP(_adv, _data, _len) \
+ DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len)
+
+#define SCAN_RSP_CMP(_adv, _data, _len) \
+ DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len)
+
+struct monitored_device {
+ struct list_head list;
+
+ bdaddr_t bdaddr;
+ __u8 addr_type;
+ __u16 handle;
+ bool notified;
+};
+
struct adv_pattern {
struct list_head list;
__u8 ad_type;
__u8 offset;
__u8 length;
- __u8 value[HCI_MAX_AD_LENGTH];
+ __u8 value[HCI_MAX_EXT_AD_LENGTH];
+};
+
+struct adv_rssi_thresholds {
+ __s8 low_threshold;
+ __s8 high_threshold;
+ __u16 low_threshold_timeout;
+ __u16 high_threshold_timeout;
+ __u8 sampling_period;
};
struct adv_monitor {
struct list_head patterns;
- bool active;
+ struct adv_rssi_thresholds rssi;
__u16 handle;
+
+ enum {
+ ADV_MONITOR_STATE_NOT_REGISTERED,
+ ADV_MONITOR_STATE_REGISTERED,
+ ADV_MONITOR_STATE_OFFLOADED
+ } state;
};
#define HCI_MIN_ADV_MONITOR_HANDLE 1
-#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
+#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16
+#define HCI_ADV_MONITOR_EXT_NONE 1
+#define HCI_ADV_MONITOR_EXT_MSFT 2
#define HCI_MAX_SHORT_NAME_LENGTH 10
+#define HCI_CONN_HANDLE_MAX 0x0eff
+#define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX)
+
/* Min encryption key size to match with SMP */
#define HCI_MIN_ENC_KEY_SIZE 7
@@ -284,7 +355,9 @@ struct hci_dev {
struct list_head list;
struct mutex lock;
- char name[8];
+ struct ida unset_handle_ida;
+
+ const char *name;
unsigned long flags;
__u16 id;
__u8 bus;
@@ -305,10 +378,12 @@ struct hci_dev {
__u8 max_page;
__u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8];
- __u8 le_white_list_size;
+ __u8 le_accept_list_size;
__u8 le_resolv_list_size;
__u8 le_num_of_adv_sets;
__u8 le_states[8];
+ __u8 mesh_ad_types[16];
+ __u8 mesh_send_ref;
__u8 commands[64];
__u8 hci_ver;
__u16 hci_rev;
@@ -317,8 +392,8 @@ struct hci_dev {
__u16 lmp_subver;
__u16 voice_setting;
__u8 num_iac;
- __u8 stored_max_keys;
- __u8 stored_num_keys;
+ __u16 stored_max_keys;
+ __u16 stored_num_keys;
__u8 io_capability;
__s8 inq_tx_power;
__u8 err_data_reporting;
@@ -361,6 +436,9 @@ struct hci_dev {
__u8 ssp_debug_mode;
__u8 hw_error_code;
__u32 clock;
+ __u16 advmon_allowlist_duration;
+ __u16 advmon_no_filter_duration;
+ __u8 enable_advmon_interleave_scan;
__u16 devid_source;
__u16 devid_vendor;
@@ -377,6 +455,8 @@ struct hci_dev {
__u16 def_page_timeout;
__u16 def_multi_adv_rotation_duration;
__u16 def_le_autoconnect_timeout;
+ __s8 min_le_tx_power;
+ __s8 max_le_tx_power;
__u16 pkt_type;
__u16 esco_type;
@@ -410,13 +490,16 @@ struct hci_dev {
unsigned int acl_cnt;
unsigned int sco_cnt;
unsigned int le_cnt;
+ unsigned int iso_cnt;
unsigned int acl_mtu;
unsigned int sco_mtu;
unsigned int le_mtu;
+ unsigned int iso_mtu;
unsigned int acl_pkts;
unsigned int sco_pkts;
unsigned int le_pkts;
+ unsigned int iso_pkts;
__u16 block_len;
__u16 block_mtu;
@@ -436,6 +519,12 @@ struct hci_dev {
struct work_struct power_on;
struct delayed_work power_off;
struct work_struct error_reset;
+ struct work_struct cmd_sync_work;
+ struct list_head cmd_sync_work_list;
+ struct mutex cmd_sync_work_lock;
+ struct mutex unregister_lock;
+ struct work_struct cmd_sync_cancel_work;
+ struct work_struct reenable_adv_work;
__u16 discov_timeout;
struct delayed_work discov_off;
@@ -443,30 +532,27 @@ struct hci_dev {
struct delayed_work service_cache;
struct delayed_work cmd_timer;
+ struct delayed_work ncmd_timer;
struct work_struct rx_work;
struct work_struct cmd_work;
struct work_struct tx_work;
- struct work_struct discov_update;
- struct work_struct bg_scan_update;
- struct work_struct scan_update;
- struct work_struct connectable_update;
- struct work_struct discoverable_update;
struct delayed_work le_scan_disable;
- struct delayed_work le_scan_restart;
struct sk_buff_head rx_q;
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
struct sk_buff *sent_cmd;
+ struct sk_buff *recv_event;
struct mutex req_lock;
wait_queue_head_t req_wait_q;
__u32 req_status;
__u32 req_result;
struct sk_buff *req_skb;
+ struct sk_buff *req_rsp;
void *smp_data;
void *smp_bredr_data;
@@ -479,31 +565,32 @@ struct hci_dev {
bool advertising_paused;
struct notifier_block suspend_notifier;
- struct work_struct suspend_prepare;
enum suspended_state suspend_state_next;
enum suspended_state suspend_state;
bool scanning_paused;
bool suspended;
-
- wait_queue_head_t suspend_wait_q;
- DECLARE_BITMAP(suspend_tasks, __SUSPEND_NUM_TASKS);
+ u8 wake_reason;
+ bdaddr_t wake_addr;
+ u8 wake_addr_type;
struct hci_conn_hash conn_hash;
+ struct list_head mesh_pending;
struct list_head mgmt_pending;
- struct list_head blacklist;
- struct list_head whitelist;
+ struct list_head reject_list;
+ struct list_head accept_list;
struct list_head uuids;
struct list_head link_keys;
struct list_head long_term_keys;
struct list_head identity_resolving_keys;
struct list_head remote_oob_data;
- struct list_head le_white_list;
+ struct list_head le_accept_list;
struct list_head le_resolv_list;
struct list_head le_conn_params;
struct list_head pend_le_conns;
struct list_head pend_le_reports;
struct list_head blocked_keys;
+ struct list_head local_codecs;
struct hci_dev_stats stat;
@@ -513,17 +600,22 @@ struct hci_dev {
const char *fw_info;
struct dentry *debugfs;
+ struct hci_devcoredump dump;
+
struct device dev;
struct rfkill *rfkill;
DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
+ hci_conn_flags_t conn_flags;
__s8 adv_tx_power;
- __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
__u8 adv_data_len;
- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
__u8 scan_rsp_data_len;
+ __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH];
+ __u8 per_adv_data_len;
struct list_head adv_instances;
unsigned int adv_instance_cnt;
@@ -539,6 +631,19 @@ struct hci_dev {
struct delayed_work rpa_expired;
bdaddr_t rpa;
+ struct delayed_work mesh_send_done;
+
+ enum {
+ INTERLEAVE_SCAN_NONE,
+ INTERLEAVE_SCAN_NO_FILTER,
+ INTERLEAVE_SCAN_ALLOWLIST
+ } interleave_scan_state;
+
+ struct delayed_work interleave_scan;
+
+ struct list_head monitored_devices;
+ bool advmon_pend_notify;
+
#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
#endif
@@ -546,6 +651,12 @@ struct hci_dev {
#if IS_ENABLED(CONFIG_BT_MSFTEXT)
__u16 msft_opcode;
void *msft_data;
+ bool msft_curve_validity;
+#endif
+
+#if IS_ENABLED(CONFIG_BT_AOSPEXT)
+ bool aosp_capable;
+ bool aosp_quality_report;
#endif
int (*open)(struct hci_dev *hdev);
@@ -560,7 +671,13 @@ struct hci_dev {
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
void (*cmd_timeout)(struct hci_dev *hdev);
- bool (*prevent_wake)(struct hci_dev *hdev);
+ void (*reset)(struct hci_dev *hdev);
+ bool (*wakeup)(struct hci_dev *hdev);
+ int (*set_quality_report)(struct hci_dev *hdev, bool enable);
+ int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path);
+ int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -569,6 +686,7 @@ enum conn_reasons {
CONN_REASON_PAIR_DEVICE,
CONN_REASON_L2CAP_CHAN,
CONN_REASON_SCO_CONNECT,
+ CONN_REASON_ISO_CONNECT,
};
struct hci_conn {
@@ -584,7 +702,9 @@ struct hci_conn {
__u8 init_addr_type;
bdaddr_t resp_addr;
__u8 resp_addr_type;
+ __u8 adv_instance;
__u16 handle;
+ __u16 sync_handle;
__u16 state;
__u8 mode;
__u8 type;
@@ -613,16 +733,21 @@ struct hci_conn {
__u16 le_conn_interval;
__u16 le_conn_latency;
__u16 le_supv_timeout;
- __u8 le_adv_data[HCI_MAX_AD_LENGTH];
+ __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH];
__u8 le_adv_data_len;
+ __u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
+ __u16 le_per_adv_data_len;
+ __u16 le_per_adv_data_offset;
__u8 le_tx_phy;
__u8 le_rx_phy;
__s8 rssi;
__s8 tx_power;
__s8 max_tx_power;
+ struct bt_iso_qos iso_qos;
unsigned long flags;
enum conn_reasons conn_reason;
+ __u8 abort_reason;
__u32 clock;
__u16 clock_accuracy;
@@ -642,7 +767,6 @@ struct hci_conn {
struct delayed_work auto_accept_work;
struct delayed_work idle_work;
struct delayed_work le_conn_timeout;
- struct work_struct le_scan_cleanup;
struct device dev;
struct dentry *debugfs;
@@ -650,13 +774,25 @@ struct hci_conn {
struct hci_dev *hdev;
void *l2cap_data;
void *sco_data;
+ void *iso_data;
struct amp_mgr *amp_mgr;
- struct hci_conn *link;
+ struct list_head link_list;
+ struct hci_conn *parent;
+ struct hci_link *link;
+
+ struct bt_codec codec;
void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
+
+ void (*cleanup)(struct hci_conn *conn);
+};
+
+struct hci_link {
+ struct list_head list;
+ struct hci_conn *conn;
};
struct hci_chan {
@@ -666,6 +802,7 @@ struct hci_chan {
struct sk_buff_head data_q;
unsigned int sent;
__u8 state;
+ bool amp;
};
struct hci_conn_params {
@@ -691,7 +828,9 @@ struct hci_conn_params {
struct hci_conn *conn;
bool explicit_connect;
- u32 current_flags;
+ /* Accessed without hdev->lock: */
+ hci_conn_flags_t flags;
+ u8 privacy_mode;
};
extern struct list_head hci_dev_list;
@@ -713,8 +852,15 @@ extern struct mutex hci_cb_list_lock;
hci_dev_clear_flag(hdev, HCI_LE_ADV); \
hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
} while (0)
+#define hci_dev_le_state_simultaneous(hdev) \
+ (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && \
+ (hdev->le_states[4] & 0x08) && /* Central */ \
+ (hdev->le_states[4] & 0x40) && /* Peripheral */ \
+ (hdev->le_states[3] & 0x10)) /* Simultaneous */
+
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
int l2cap_disconn_ind(struct hci_conn *hcon);
@@ -735,6 +881,21 @@ static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
}
#endif
+#if IS_ENABLED(CONFIG_BT_LE)
+int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+#else
+static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 *flags)
+{
+ return 0;
+}
+static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
+ u16 flags)
+{
+}
+#endif
+
/* ----- Inquiry cache ----- */
#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
@@ -797,7 +958,6 @@ void hci_inquiry_cache_flush(struct hci_dev *hdev);
/* ----- HCI Connections ----- */
enum {
HCI_CONN_AUTH_PEND,
- HCI_CONN_REAUTH_PEND,
HCI_CONN_ENCRYPT_PEND,
HCI_CONN_RSWITCH_PEND,
HCI_CONN_MODE_CHANGE_PEND,
@@ -815,10 +975,18 @@ enum {
HCI_CONN_STK_ENCRYPT,
HCI_CONN_AUTH_INITIATOR,
HCI_CONN_DROP,
+ HCI_CONN_CANCEL,
HCI_CONN_PARAM_REMOVAL_PEND,
HCI_CONN_NEW_LINK_KEY,
HCI_CONN_SCANNING,
HCI_CONN_AUTH_FAILURE,
+ HCI_CONN_PER_ADV,
+ HCI_CONN_BIG_CREATED,
+ HCI_CONN_CREATE_CIS,
+ HCI_CONN_BIG_SYNC,
+ HCI_CONN_BIG_SYNC_FAILED,
+ HCI_CONN_PA_SYNC,
+ HCI_CONN_PA_SYNC_FAILED,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -838,7 +1006,7 @@ static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- list_add_rcu(&c->list, &h->list);
+ list_add_tail_rcu(&c->list, &h->list);
switch (c->type) {
case ACL_LINK:
h->acl_num++;
@@ -849,12 +1017,15 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
case LE_LINK:
h->le_num++;
if (c->role == HCI_ROLE_SLAVE)
- h->le_num_slave++;
+ h->le_num_peripheral++;
break;
case SCO_LINK:
case ESCO_LINK:
h->sco_num++;
break;
+ case ISO_LINK:
+ h->iso_num++;
+ break;
}
}
@@ -875,12 +1046,15 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
case LE_LINK:
h->le_num--;
if (c->role == HCI_ROLE_SLAVE)
- h->le_num_slave--;
+ h->le_num_peripheral--;
break;
case SCO_LINK:
case ESCO_LINK:
h->sco_num--;
break;
+ case ISO_LINK:
+ h->iso_num--;
+ break;
}
}
@@ -897,6 +1071,8 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
case SCO_LINK:
case ESCO_LINK:
return h->sco_num;
+ case ISO_LINK:
+ return h->iso_num;
default:
return 0;
}
@@ -906,7 +1082,25 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
{
struct hci_conn_hash *c = &hdev->conn_hash;
- return c->acl_num + c->amp_num + c->sco_num + c->le_num;
+ return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
+}
+
+static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c == conn) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
}
static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
@@ -929,6 +1123,54 @@ static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
return type;
}
+static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
+ bdaddr_t *ba, __u8 bis)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, ba) || c->type != ISO_LINK)
+ continue;
+
+ if (c->iso_qos.bcast.bis == bis) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
+hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 big, __u8 bis)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, ba) || c->type != ISO_LINK ||
+ !test_bit(HCI_CONN_PER_ADV, &c->flags))
+ continue;
+
+ if (c->iso_qos.bcast.big == big &&
+ c->iso_qos.bcast.bis == bis) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
__u16 handle)
{
@@ -992,6 +1234,157 @@ static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
return NULL;
}
+static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 ba_type,
+ __u8 cig,
+ __u8 id)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
+ continue;
+
+ /* Match CIG ID if set */
+ if (cig != c->iso_qos.ucast.cig)
+ continue;
+
+ /* Match CIS ID if set */
+ if (id != c->iso_qos.ucast.cis)
+ continue;
+
+ /* Match destination address if set */
+ if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev,
+ __u8 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
+ continue;
+
+ if (handle == c->iso_qos.ucast.cig) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
+ __u8 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK)
+ continue;
+
+ if (handle == c->iso_qos.bcast.big) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
+hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK ||
+ c->state != state)
+ continue;
+
+ if (handle == c->iso_qos.bcast.big) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
+hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK ||
+ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
+ continue;
+
+ if (c->iso_qos.bcast.big == big) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
+hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK ||
+ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
+ continue;
+
+ if (c->sync_handle == sync_handle) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
__u8 type, __u16 state)
{
@@ -1012,6 +1405,47 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
return NULL;
}
+typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data);
+static inline void hci_conn_hash_list_state(struct hci_dev *hdev,
+ hci_conn_func_t func, __u8 type,
+ __u16 state, void *data)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ if (!func)
+ return;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->state == state)
+ func(c, data);
+ }
+
+ rcu_read_unlock();
+}
+
+static inline void hci_conn_hash_list_flag(struct hci_dev *hdev,
+ hci_conn_func_t func, __u8 type,
+ __u8 flag, void *data)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ if (!func)
+ return;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && test_bit(flag, &c->flags))
+ func(c, data);
+ }
+
+ rcu_read_unlock();
+}
+
static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1032,15 +1466,40 @@ static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
return NULL;
}
+/* Returns true if an le connection is in the scanning state */
+static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return false;
+}
+
int hci_disconnect(struct hci_conn *conn, __u8 reason);
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
+bool hci_iso_setup_path(struct hci_conn *conn);
+int hci_le_create_cis_pending(struct hci_dev *hdev);
+int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- u8 role);
-int hci_conn_del(struct hci_conn *conn);
+ u8 role, u16 handle);
+struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
+ bdaddr_t *dst, u8 role);
+void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
-void hci_conn_check_pending(struct hci_dev *hdev);
struct hci_chan *hci_chan_create(struct hci_conn *conn);
void hci_chan_del(struct hci_chan *chan);
@@ -1052,13 +1511,30 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u16 conn_timeout,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
- u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role, bdaddr_t *direct_rpa);
+ u8 dst_type, bool dst_resolved, u8 sec_level,
+ u16 conn_timeout, u8 role);
+void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
- enum conn_reasons conn_reason);
+ enum conn_reasons conn_reason, u16 timeout);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting);
+ __u16 setting, struct bt_codec *codec,
+ u16 timeout);
+struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos);
+struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ struct bt_iso_qos *qos,
+ __u8 base_len, __u8 *base);
+struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos);
+struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos,
+ __u8 data_len, __u8 *data);
+struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
+int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ struct bt_iso_qos *qos,
+ __u16 sync_handle, __u8 num_bis, __u8 bis[]);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
@@ -1067,7 +1543,8 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
-void hci_le_conn_failed(struct hci_conn *conn, u8 status);
+void hci_conn_failed(struct hci_conn *conn, u8 status);
+u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle);
/*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
@@ -1101,12 +1578,14 @@ static inline void hci_conn_put(struct hci_conn *conn)
put_device(&conn->dev);
}
-static inline void hci_conn_hold(struct hci_conn *conn)
+static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn)
{
BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
atomic_inc(&conn->refcnt);
cancel_delayed_work(&conn->disc_work);
+
+ return conn;
}
static inline void hci_conn_drop(struct hci_conn *conn)
@@ -1178,13 +1657,27 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
dev_set_drvdata(&hdev->dev, data);
}
+static inline void *hci_get_priv(struct hci_dev *hdev)
+{
+ return (char *)hdev + sizeof(*hdev);
+}
+
struct hci_dev *hci_dev_get(int index);
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type);
-struct hci_dev *hci_alloc_dev(void);
+struct hci_dev *hci_alloc_dev_priv(int sizeof_priv);
+
+static inline struct hci_dev *hci_alloc_dev(void)
+{
+ return hci_alloc_dev_priv(0);
+}
+
void hci_free_dev(struct hci_dev *hdev);
int hci_register_dev(struct hci_dev *hdev);
void hci_unregister_dev(struct hci_dev *hdev);
+void hci_release_dev(struct hci_dev *hdev);
+int hci_register_suspend_notifier(struct hci_dev *hdev);
+int hci_unregister_suspend_notifier(struct hci_dev *hdev);
int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev);
@@ -1200,6 +1693,22 @@ static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode)
#endif
}
+static inline void hci_set_aosp_capable(struct hci_dev *hdev)
+{
+#if IS_ENABLED(CONFIG_BT_AOSPEXT)
+ hdev->aosp_capable = true;
+#endif
+}
+
+static inline void hci_devcd_setup(struct hci_dev *hdev)
+{
+#ifdef CONFIG_DEV_COREDUMP
+ INIT_WORK(&hdev->dump.dump_rx, hci_devcd_rx);
+ INIT_DELAYED_WORK(&hdev->dump.dump_timeout, hci_devcd_timeout);
+ skb_queue_head_init(&hdev->dump.dump_q);
+#endif
+}
+
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_do_close(struct hci_dev *hdev);
@@ -1239,7 +1748,11 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type);
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
void hci_conn_params_clear_disabled(struct hci_dev *hdev);
+void hci_conn_params_free(struct hci_conn_params *param);
+void hci_pend_le_list_del_init(struct hci_conn_params *param);
+void hci_pend_le_list_add(struct hci_conn_params *param,
+ struct list_head *list);
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
bdaddr_t *addr,
u8 addr_type);
@@ -1284,18 +1797,30 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
void hci_adv_instances_clear(struct hci_dev *hdev);
struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
-int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
+struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
+ u32 flags, u16 adv_data_len, u8 *adv_data,
+ u16 scan_rsp_len, u8 *scan_rsp_data,
+ u16 timeout, u16 duration, s8 tx_power,
+ u32 min_interval, u32 max_interval,
+ u8 mesh_handle);
+struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
+ u32 flags, u8 data_len, u8 *data,
+ u32 min_interval, u32 max_interval);
+int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
u16 adv_data_len, u8 *adv_data,
- u16 scan_rsp_len, u8 *scan_rsp_data,
- u16 timeout, u16 duration);
+ u16 scan_rsp_len, u8 *scan_rsp_data);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
+u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance);
+bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance);
void hci_adv_monitors_clear(struct hci_dev *hdev);
-void hci_free_adv_monitor(struct adv_monitor *monitor);
+void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
-int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle);
+int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle);
+int hci_remove_all_adv_monitor(struct hci_dev *hdev);
bool hci_is_adv_monitoring(struct hci_dev *hdev);
+int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@@ -1305,6 +1830,7 @@ void hci_conn_add_sysfs(struct hci_conn *conn);
void hci_conn_del_sysfs(struct hci_conn *conn);
#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
+#define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent)
/* ----- LMP capabilities ----- */
#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
@@ -1318,6 +1844,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
+#define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M)
#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
@@ -1332,8 +1859,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
/* ----- Extended LMP capabilities ----- */
-#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
-#define lmp_csb_slave_capable(dev) ((dev)->features[2][0] & LMP_CSB_SLAVE)
+#define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL)
+#define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL)
#define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
#define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN)
#define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC)
@@ -1349,28 +1876,82 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
!hci_dev_test_flag(dev, HCI_AUTO_OFF))
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
hci_dev_test_flag(dev, HCI_SC_ENABLED))
+#define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \
+ !hci_dev_test_flag(dev, HCI_RPA_EXPIRED))
+#define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \
+ !adv->rpa_expired)
#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
+#define le_2m_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_2M))
+
#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
+#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \
+ !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \
+ &(dev)->quirks))
+
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+#define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+
/* Use LL Privacy based address resolution if supported */
-#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+#define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
+
+#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
+ (hdev->commands[39] & 0x04))
+
+/* Use enhanced synchronous connection if command is supported and its quirk
+ * has not been set.
+ */
+#define enhanced_sync_conn_capable(dev) \
+ (((dev)->commands[29] & 0x08) && \
+ !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks))
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
- ((dev)->commands[37] & 0x40))
+ ((dev)->commands[37] & 0x40) && \
+ !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
+
/* Use ext create connection if command is supported */
#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+/* Maximum advertising length */
+#define max_adv_len(dev) \
+ (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH)
+
+/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
+ *
+ * C24: Mandatory if the LE Controller supports Connection State and either
+ * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
+ */
+#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
+ ext_adv_capable(dev))
+
+/* Periodic advertising support */
+#define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))
+
+/* CIS Master/Slave and BIS support */
+#define iso_capable(dev) (cis_capable(dev) || bis_capable(dev))
+#define cis_capable(dev) \
+ (cis_central_capable(dev) || cis_peripheral_capable(dev))
+#define cis_central_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL)
+#define cis_peripheral_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
+#define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
+#define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER)
+
+#define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
+ (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks)))
+
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1385,6 +1966,9 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
case ESCO_LINK:
return sco_connect_ind(hdev, bdaddr, flags);
+ case ISO_LINK:
+ return iso_connect_ind(hdev, bdaddr, flags);
+
default:
BT_ERR("unknown link type %d", type);
return -EINVAL;
@@ -1529,43 +2113,6 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
mutex_unlock(&hci_cb_list_lock);
}
-static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type,
- size_t *data_len)
-{
- size_t parsed = 0;
-
- if (eir_len < 2)
- return NULL;
-
- while (parsed < eir_len - 1) {
- u8 field_len = eir[0];
-
- if (field_len == 0)
- break;
-
- parsed += field_len + 1;
-
- if (parsed > eir_len)
- break;
-
- if (eir[1] != type) {
- eir += field_len + 1;
- continue;
- }
-
- /* Zero length data */
- if (field_len == 1)
- return NULL;
-
- if (data_len)
- *data_len = field_len - 1;
-
- return &eir[2];
- }
-
- return NULL;
-}
-
static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
{
if (addr_type != ADDR_LE_DEV_RANDOM)
@@ -1622,10 +2169,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout);
-struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u8 event, u32 timeout);
int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param);
@@ -1633,11 +2176,10 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
const void *param);
void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
+void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
-
-struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout);
+void *hci_recv_event_data(struct hci_dev *hdev, __u8 event);
u32 hci_conn_get_phy(struct hci_conn *conn);
@@ -1694,8 +2236,15 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
-#define DISCOV_LE_FAST_ADV_INT_MIN 100 /* msec */
-#define DISCOV_LE_FAST_ADV_INT_MAX 150 /* msec */
+#define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */
+#define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */
+#define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */
+#define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */
+#define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */
+#define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */
+#define INTERVAL_TO_MS(x) (((x) * 10) / 0x10)
+
+#define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */
void mgmt_fill_version_info(void *ver);
int mgmt_new_settings(struct hci_dev *hdev);
@@ -1707,7 +2256,7 @@ void __mgmt_power_off(struct hci_dev *hdev);
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent);
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
- u32 flags, u8 *name, u8 name_len);
+ u8 *name, u8 name_len);
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 reason,
bool mgmt_connected);
@@ -1738,7 +2287,6 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 entered);
void mgmt_auth_failed(struct hci_conn *conn, u8 status);
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
-void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
@@ -1746,10 +2294,14 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);
void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
- u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
+ u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
+ u64 instant);
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, s8 rssi, u8 *name, u8 name_len);
void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+void mgmt_suspending(struct hci_dev *hdev, u8 state);
+void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
+ u8 addr_type);
bool mgmt_powering_down(struct hci_dev *hdev);
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
@@ -1760,15 +2312,17 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
u16 max_interval, u16 latency, u16 timeout);
void mgmt_smp_complete(struct hci_conn *conn, bool complete);
bool mgmt_get_connectable(struct hci_dev *hdev);
-void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status);
-void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status);
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev);
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance);
+void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle);
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
+void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
+ bdaddr_t *bdaddr, u8 addr_type);
+int hci_abort_conn(struct hci_conn *conn, u8 reason);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
@@ -1781,4 +2335,9 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
#define SCO_AIRMODE_CVSD 0x0000
#define SCO_AIRMODE_TRANSP 0x0003
+#define LOCAL_CODEC_ACL_MASK BIT(0)
+#define LOCAL_CODEC_SCO_MASK BIT(1)
+
+#define TRANSPORT_TYPE_MAX 0x04
+
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 2d5fcda1bcd0..082f89531b88 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -56,7 +56,7 @@ struct hci_mon_new_index {
__u8 type;
__u8 bus;
bdaddr_t bdaddr;
- char name[8];
+ char name[8] __nonstring;
} __packed;
#define HCI_MON_NEW_INDEX_SIZE 16
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
new file mode 100644
index 000000000000..6a9d063e9f47
--- /dev/null
+++ b/include/net/bluetooth/hci_sync.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#define UINT_PTR(_handle) ((void *)((uintptr_t)_handle))
+#define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr))
+
+typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
+typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
+ int err);
+
+struct hci_cmd_sync_work_entry {
+ struct list_head list;
+ hci_cmd_sync_work_func_t func;
+ void *data;
+ hci_cmd_sync_work_destroy_t destroy;
+};
+
+struct adv_info;
+/* Function with sync suffix shall not be called with hdev->lock held as they
+ * wait the command to complete and in the meantime an event could be received
+ * which could attempt to acquire hdev->lock causing a deadlock.
+ */
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout);
+struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk);
+int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk);
+
+void hci_cmd_sync_init(struct hci_dev *hdev);
+void hci_cmd_sync_clear(struct hci_dev *hdev);
+void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err);
+
+int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+struct hci_cmd_sync_work_entry *
+hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry);
+bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
+ hci_cmd_sync_work_func_t func, void *data,
+ hci_cmd_sync_work_destroy_t destroy);
+
+int hci_update_eir_sync(struct hci_dev *hdev);
+int hci_update_class_sync(struct hci_dev *hdev);
+
+int hci_update_eir_sync(struct hci_dev *hdev);
+int hci_update_class_sync(struct hci_dev *hdev);
+int hci_update_name_sync(struct hci_dev *hdev);
+int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode);
+
+int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ bool use_rpa, struct adv_info *adv_instance,
+ u8 *own_addr_type, bdaddr_t *rand_addr);
+
+int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
+ bool rpa, u8 *own_addr_type);
+
+int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance);
+int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance);
+int hci_update_adv_data(struct hci_dev *hdev, u8 instance);
+int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ bool force);
+
+int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance);
+int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance);
+int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance);
+int hci_enable_advertising_sync(struct hci_dev *hdev);
+int hci_enable_advertising(struct hci_dev *hdev);
+
+int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
+ u8 *data, u32 flags, u16 min_interval,
+ u16 max_interval, u16 sync_interval);
+
+int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance);
+
+int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force);
+int hci_disable_advertising_sync(struct hci_dev *hdev);
+int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force);
+int hci_update_passive_scan_sync(struct hci_dev *hdev);
+int hci_update_passive_scan(struct hci_dev *hdev);
+int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle);
+int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type);
+int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val);
+int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp);
+
+int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable);
+int hci_update_scan_sync(struct hci_dev *hdev);
+int hci_update_scan(struct hci_dev *hdev);
+
+int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul);
+int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ struct sock *sk);
+int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance);
+struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool ext,
+ struct sock *sk);
+
+int hci_reset_sync(struct hci_dev *hdev);
+int hci_dev_open_sync(struct hci_dev *hdev);
+int hci_dev_close_sync(struct hci_dev *hdev);
+
+int hci_powered_update_sync(struct hci_dev *hdev);
+int hci_set_powered_sync(struct hci_dev *hdev, u8 val);
+
+int hci_update_discoverable_sync(struct hci_dev *hdev);
+int hci_update_discoverable(struct hci_dev *hdev);
+
+int hci_update_connectable_sync(struct hci_dev *hdev);
+
+int hci_start_discovery_sync(struct hci_dev *hdev);
+int hci_stop_discovery_sync(struct hci_dev *hdev);
+
+int hci_suspend_sync(struct hci_dev *hdev);
+int hci_resume_sync(struct hci_dev *hdev);
+
+struct hci_conn;
+
+int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
+
+int hci_le_create_cis_sync(struct hci_dev *hdev);
+
+int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
+
+int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason);
+
+int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle);
+
+int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle);
+
+int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
diff --git a/include/net/bluetooth/iso.h b/include/net/bluetooth/iso.h
new file mode 100644
index 000000000000..3f4fe8b78e1b
--- /dev/null
+++ b/include/net/bluetooth/iso.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef __ISO_H
+#define __ISO_H
+
+/* ISO defaults */
+#define ISO_DEFAULT_MTU 251
+#define ISO_MAX_NUM_BIS 0x1f
+
+/* ISO socket broadcast address */
+struct sockaddr_iso_bc {
+ bdaddr_t bc_bdaddr;
+ __u8 bc_bdaddr_type;
+ __u8 bc_sid;
+ __u8 bc_num_bis;
+ __u8 bc_bis[ISO_MAX_NUM_BIS];
+};
+
+/* ISO socket address */
+struct sockaddr_iso {
+ sa_family_t iso_family;
+ bdaddr_t iso_bdaddr;
+ __u8 iso_bdaddr_type;
+ struct sockaddr_iso_bc iso_bc[];
+};
+
+#endif /* __ISO_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 8f1e6a7a2df8..a4278aa618ab 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -59,8 +59,6 @@
#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
-#define L2CAP_A2MP_DEFAULT_MTU 670
-
/* L2CAP socket address */
struct sockaddr_l2 {
sa_family_t l2_family;
@@ -109,12 +107,6 @@ struct l2cap_conninfo {
#define L2CAP_ECHO_RSP 0x09
#define L2CAP_INFO_REQ 0x0a
#define L2CAP_INFO_RSP 0x0b
-#define L2CAP_CREATE_CHAN_REQ 0x0c
-#define L2CAP_CREATE_CHAN_RSP 0x0d
-#define L2CAP_MOVE_CHAN_REQ 0x0e
-#define L2CAP_MOVE_CHAN_RSP 0x0f
-#define L2CAP_MOVE_CHAN_CFM 0x10
-#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
#define L2CAP_LE_CONN_REQ 0x14
@@ -144,7 +136,6 @@ struct l2cap_conninfo {
/* L2CAP fixed channels */
#define L2CAP_FC_SIG_BREDR 0x02
#define L2CAP_FC_CONNLESS 0x04
-#define L2CAP_FC_A2MP 0x08
#define L2CAP_FC_ATT 0x10
#define L2CAP_FC_SIG_LE 0x20
#define L2CAP_FC_SMP_LE 0x40
@@ -207,6 +198,7 @@ struct l2cap_hdr {
__le16 len;
__le16 cid;
} __packed;
+#define L2CAP_LEN_SIZE 2
#define L2CAP_HDR_SIZE 4
#define L2CAP_ENH_HDR_SIZE 6
#define L2CAP_EXT_HDR_SIZE 8
@@ -266,7 +258,6 @@ struct l2cap_conn_rsp {
/* channel identifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
-#define L2CAP_CID_A2MP 0x0003
#define L2CAP_CID_ATT 0x0004
#define L2CAP_CID_LE_SIGNALING 0x0005
#define L2CAP_CID_SMP 0x0006
@@ -281,7 +272,6 @@ struct l2cap_conn_rsp {
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
-#define L2CAP_CR_BAD_AMP 0x0005
#define L2CAP_CR_INVALID_SCID 0x0006
#define L2CAP_CR_SCID_IN_USE 0x0007
@@ -403,29 +393,6 @@ struct l2cap_info_rsp {
__u8 data[];
} __packed;
-struct l2cap_create_chan_req {
- __le16 psm;
- __le16 scid;
- __u8 amp_id;
-} __packed;
-
-struct l2cap_create_chan_rsp {
- __le16 dcid;
- __le16 scid;
- __le16 result;
- __le16 status;
-} __packed;
-
-struct l2cap_move_chan_req {
- __le16 icid;
- __u8 dest_amp_id;
-} __packed;
-
-struct l2cap_move_chan_rsp {
- __le16 icid;
- __le16 result;
-} __packed;
-
#define L2CAP_MR_SUCCESS 0x0000
#define L2CAP_MR_PEND 0x0001
#define L2CAP_MR_BAD_ID 0x0002
@@ -493,6 +460,7 @@ struct l2cap_le_credits {
#define L2CAP_ECRED_MIN_MTU 64
#define L2CAP_ECRED_MIN_MPS 64
+#define L2CAP_ECRED_MAX_CID 5
struct l2cap_ecred_conn_req {
__le16 psm;
@@ -537,8 +505,6 @@ struct l2cap_seq_list {
struct l2cap_chan {
struct l2cap_conn *conn;
- struct hci_conn *hs_hcon;
- struct hci_chan *hs_hchan;
struct kref kref;
atomic_t nesting;
@@ -589,12 +555,6 @@ struct l2cap_chan {
unsigned long conn_state;
unsigned long flags;
- __u8 remote_amp_id;
- __u8 local_amp_id;
- __u8 move_id;
- __u8 move_state;
- __u8 move_role;
-
__u16 next_tx_seq;
__u16 expected_ack_seq;
__u16 expected_tx_seq;
@@ -665,6 +625,8 @@ struct l2cap_ops {
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb);
+ int (*filter) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
};
struct l2cap_conn {
@@ -690,7 +652,7 @@ struct l2cap_conn {
struct sk_buff_head pending_rx;
struct work_struct pending_rx_work;
- struct work_struct id_addr_update_work;
+ struct delayed_work id_addr_timer;
__u8 disc_reason;
@@ -843,6 +805,7 @@ enum {
};
void l2cap_chan_hold(struct l2cap_chan *c);
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
void l2cap_chan_put(struct l2cap_chan *c);
static inline void l2cap_chan_lock(struct l2cap_chan *chan)
@@ -976,7 +939,7 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
struct l2cap_chan *l2cap_chan_create(void);
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
- bdaddr_t *dst, u8 dst_type);
+ bdaddr_t *dst, u8 dst_type, u16 timeout);
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index beae5c3980f0..d382679efd2b 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -91,24 +91,28 @@ struct mgmt_rp_read_index_list {
#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1)
#define MGMT_MAX_SHORT_NAME_LENGTH (HCI_MAX_SHORT_NAME_LENGTH + 1)
-#define MGMT_SETTING_POWERED 0x00000001
-#define MGMT_SETTING_CONNECTABLE 0x00000002
-#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004
-#define MGMT_SETTING_DISCOVERABLE 0x00000008
-#define MGMT_SETTING_BONDABLE 0x00000010
-#define MGMT_SETTING_LINK_SECURITY 0x00000020
-#define MGMT_SETTING_SSP 0x00000040
-#define MGMT_SETTING_BREDR 0x00000080
-#define MGMT_SETTING_HS 0x00000100
-#define MGMT_SETTING_LE 0x00000200
-#define MGMT_SETTING_ADVERTISING 0x00000400
-#define MGMT_SETTING_SECURE_CONN 0x00000800
-#define MGMT_SETTING_DEBUG_KEYS 0x00001000
-#define MGMT_SETTING_PRIVACY 0x00002000
-#define MGMT_SETTING_CONFIGURATION 0x00004000
-#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
-#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
-#define MGMT_SETTING_WIDEBAND_SPEECH 0x00020000
+#define MGMT_SETTING_POWERED BIT(0)
+#define MGMT_SETTING_CONNECTABLE BIT(1)
+#define MGMT_SETTING_FAST_CONNECTABLE BIT(2)
+#define MGMT_SETTING_DISCOVERABLE BIT(3)
+#define MGMT_SETTING_BONDABLE BIT(4)
+#define MGMT_SETTING_LINK_SECURITY BIT(5)
+#define MGMT_SETTING_SSP BIT(6)
+#define MGMT_SETTING_BREDR BIT(7)
+#define MGMT_SETTING_HS BIT(8)
+#define MGMT_SETTING_LE BIT(9)
+#define MGMT_SETTING_ADVERTISING BIT(10)
+#define MGMT_SETTING_SECURE_CONN BIT(11)
+#define MGMT_SETTING_DEBUG_KEYS BIT(12)
+#define MGMT_SETTING_PRIVACY BIT(13)
+#define MGMT_SETTING_CONFIGURATION BIT(14)
+#define MGMT_SETTING_STATIC_ADDRESS BIT(15)
+#define MGMT_SETTING_PHY_CONFIGURATION BIT(16)
+#define MGMT_SETTING_WIDEBAND_SPEECH BIT(17)
+#define MGMT_SETTING_CIS_CENTRAL BIT(18)
+#define MGMT_SETTING_CIS_PERIPHERAL BIT(19)
+#define MGMT_SETTING_ISO_BROADCASTER BIT(20)
+#define MGMT_SETTING_ISO_SYNC_RECEIVER BIT(21)
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -202,7 +206,7 @@ struct mgmt_cp_load_link_keys {
struct mgmt_ltk_info {
struct mgmt_addr_info addr;
__u8 type;
- __u8 master;
+ __u8 initiator;
__u8 enc_size;
__le16 ediv;
__le64 rand;
@@ -572,6 +576,13 @@ struct mgmt_rp_add_advertising {
#define MGMT_ADV_FLAG_SEC_1M BIT(7)
#define MGMT_ADV_FLAG_SEC_2M BIT(8)
#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
+#define MGMT_ADV_FLAG_CAN_SET_TX_POWER BIT(10)
+#define MGMT_ADV_FLAG_HW_OFFLOAD BIT(11)
+#define MGMT_ADV_PARAM_DURATION BIT(12)
+#define MGMT_ADV_PARAM_TIMEOUT BIT(13)
+#define MGMT_ADV_PARAM_INTERVALS BIT(14)
+#define MGMT_ADV_PARAM_TX_POWER BIT(15)
+#define MGMT_ADV_PARAM_SCAN_RSP BIT(16)
#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
MGMT_ADV_FLAG_SEC_CODED)
@@ -619,28 +630,28 @@ struct mgmt_cp_set_appearance {
#define MGMT_SET_APPEARANCE_SIZE 2
#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
-struct mgmt_rp_get_phy_confguration {
+struct mgmt_rp_get_phy_configuration {
__le32 supported_phys;
__le32 configurable_phys;
__le32 selected_phys;
} __packed;
#define MGMT_GET_PHY_CONFIGURATION_SIZE 0
-#define MGMT_PHY_BR_1M_1SLOT 0x00000001
-#define MGMT_PHY_BR_1M_3SLOT 0x00000002
-#define MGMT_PHY_BR_1M_5SLOT 0x00000004
-#define MGMT_PHY_EDR_2M_1SLOT 0x00000008
-#define MGMT_PHY_EDR_2M_3SLOT 0x00000010
-#define MGMT_PHY_EDR_2M_5SLOT 0x00000020
-#define MGMT_PHY_EDR_3M_1SLOT 0x00000040
-#define MGMT_PHY_EDR_3M_3SLOT 0x00000080
-#define MGMT_PHY_EDR_3M_5SLOT 0x00000100
-#define MGMT_PHY_LE_1M_TX 0x00000200
-#define MGMT_PHY_LE_1M_RX 0x00000400
-#define MGMT_PHY_LE_2M_TX 0x00000800
-#define MGMT_PHY_LE_2M_RX 0x00001000
-#define MGMT_PHY_LE_CODED_TX 0x00002000
-#define MGMT_PHY_LE_CODED_RX 0x00004000
+#define MGMT_PHY_BR_1M_1SLOT BIT(0)
+#define MGMT_PHY_BR_1M_3SLOT BIT(1)
+#define MGMT_PHY_BR_1M_5SLOT BIT(2)
+#define MGMT_PHY_EDR_2M_1SLOT BIT(3)
+#define MGMT_PHY_EDR_2M_3SLOT BIT(4)
+#define MGMT_PHY_EDR_2M_5SLOT BIT(5)
+#define MGMT_PHY_EDR_3M_1SLOT BIT(6)
+#define MGMT_PHY_EDR_3M_3SLOT BIT(7)
+#define MGMT_PHY_EDR_3M_5SLOT BIT(8)
+#define MGMT_PHY_LE_1M_TX BIT(9)
+#define MGMT_PHY_LE_1M_RX BIT(10)
+#define MGMT_PHY_LE_2M_TX BIT(11)
+#define MGMT_PHY_LE_2M_RX BIT(12)
+#define MGMT_PHY_LE_CODED_TX BIT(13)
+#define MGMT_PHY_LE_CODED_RX BIT(14)
#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
@@ -656,7 +667,7 @@ struct mgmt_rp_get_phy_confguration {
MGMT_PHY_LE_CODED_RX)
#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
-struct mgmt_cp_set_phy_confguration {
+struct mgmt_cp_set_phy_configuration {
__le32 selected_phys;
} __packed;
#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
@@ -680,11 +691,16 @@ struct mgmt_cp_set_blocked_keys {
#define MGMT_OP_SET_WIDEBAND_SPEECH 0x0047
-#define MGMT_OP_READ_SECURITY_INFO 0x0048
-#define MGMT_READ_SECURITY_INFO_SIZE 0
-struct mgmt_rp_read_security_info {
- __le16 sec_len;
- __u8 sec[];
+#define MGMT_CAP_SEC_FLAGS 0x01
+#define MGMT_CAP_MAX_ENC_KEY_SIZE 0x02
+#define MGMT_CAP_SMP_MAX_ENC_KEY_SIZE 0x03
+#define MGMT_CAP_LE_TX_PWR 0x04
+
+#define MGMT_OP_READ_CONTROLLER_CAP 0x0048
+#define MGMT_READ_CONTROLLER_CAP_SIZE 0
+struct mgmt_rp_read_controller_cap {
+ __le16 cap_len;
+ __u8 cap[];
} __packed;
#define MGMT_OP_READ_EXP_FEATURES_INFO 0x0049
@@ -780,6 +796,88 @@ struct mgmt_rp_remove_adv_monitor {
__le16 monitor_handle;
} __packed;
+#define MGMT_OP_ADD_EXT_ADV_PARAMS 0x0054
+struct mgmt_cp_add_ext_adv_params {
+ __u8 instance;
+ __le32 flags;
+ __le16 duration;
+ __le16 timeout;
+ __le32 min_interval;
+ __le32 max_interval;
+ __s8 tx_power;
+} __packed;
+#define MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE 18
+struct mgmt_rp_add_ext_adv_params {
+ __u8 instance;
+ __s8 tx_power;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+} __packed;
+
+#define MGMT_OP_ADD_EXT_ADV_DATA 0x0055
+struct mgmt_cp_add_ext_adv_data {
+ __u8 instance;
+ __u8 adv_data_len;
+ __u8 scan_rsp_len;
+ __u8 data[];
+} __packed;
+#define MGMT_ADD_EXT_ADV_DATA_SIZE 3
+struct mgmt_rp_add_ext_adv_data {
+ __u8 instance;
+} __packed;
+
+struct mgmt_adv_rssi_thresholds {
+ __s8 high_threshold;
+ __le16 high_threshold_timeout;
+ __s8 low_threshold;
+ __le16 low_threshold_timeout;
+ __u8 sampling_period;
+} __packed;
+
+#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI 0x0056
+struct mgmt_cp_add_adv_patterns_monitor_rssi {
+ struct mgmt_adv_rssi_thresholds rssi;
+ __u8 pattern_count;
+ struct mgmt_adv_pattern patterns[];
+} __packed;
+#define MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE 8
+#define MGMT_OP_SET_MESH_RECEIVER 0x0057
+struct mgmt_cp_set_mesh {
+ __u8 enable;
+ __le16 window;
+ __le16 period;
+ __u8 num_ad_types;
+ __u8 ad_types[];
+} __packed;
+#define MGMT_SET_MESH_RECEIVER_SIZE 6
+
+#define MGMT_OP_MESH_READ_FEATURES 0x0058
+#define MGMT_MESH_READ_FEATURES_SIZE 0
+#define MESH_HANDLES_MAX 3
+struct mgmt_rp_mesh_read_features {
+ __le16 index;
+ __u8 max_handles;
+ __u8 used_handles;
+ __u8 handles[MESH_HANDLES_MAX];
+} __packed;
+
+#define MGMT_OP_MESH_SEND 0x0059
+struct mgmt_cp_mesh_send {
+ struct mgmt_addr_info addr;
+ __le64 instant;
+ __le16 delay;
+ __u8 cnt;
+ __u8 adv_data_len;
+ __u8 adv_data[];
+} __packed;
+#define MGMT_MESH_SEND_SIZE 19
+
+#define MGMT_OP_MESH_SEND_CANCEL 0x005A
+struct mgmt_cp_mesh_send_cancel {
+ __u8 handle;
+} __packed;
+#define MGMT_MESH_SEND_CANCEL_SIZE 1
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -840,6 +938,7 @@ struct mgmt_ev_device_connected {
#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02
#define MGMT_DEV_DISCONN_REMOTE 0x03
#define MGMT_DEV_DISCONN_AUTH_FAILURE 0x04
+#define MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND 0x05
#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
struct mgmt_ev_device_disconnected {
@@ -877,9 +976,12 @@ struct mgmt_ev_auth_failed {
__u8 status;
} __packed;
-#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
-#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
-#define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04
+#define MGMT_DEV_FOUND_CONFIRM_NAME BIT(0)
+#define MGMT_DEV_FOUND_LEGACY_PAIRING BIT(1)
+#define MGMT_DEV_FOUND_NOT_CONNECTABLE BIT(2)
+#define MGMT_DEV_FOUND_INITIATED_CONN BIT(3)
+#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED BIT(4)
+#define MGMT_DEV_FOUND_SCAN_RSP BIT(5)
#define MGMT_EV_DEVICE_FOUND 0x0012
struct mgmt_ev_device_found {
@@ -1028,3 +1130,50 @@ struct mgmt_ev_adv_monitor_added {
struct mgmt_ev_adv_monitor_removed {
__le16 monitor_handle;
} __packed;
+
+#define MGMT_EV_CONTROLLER_SUSPEND 0x002d
+struct mgmt_ev_controller_suspend {
+ __u8 suspend_state;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_RESUME 0x002e
+struct mgmt_ev_controller_resume {
+ __u8 wake_reason;
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_WAKE_REASON_NON_BT_WAKE 0x0
+#define MGMT_WAKE_REASON_UNEXPECTED 0x1
+#define MGMT_WAKE_REASON_REMOTE_WAKE 0x2
+
+#define MGMT_EV_ADV_MONITOR_DEVICE_FOUND 0x002f
+struct mgmt_ev_adv_monitor_device_found {
+ __le16 monitor_handle;
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+#define MGMT_EV_ADV_MONITOR_DEVICE_LOST 0x0030
+struct mgmt_ev_adv_monitor_device_lost {
+ __le16 monitor_handle;
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_MESH_DEVICE_FOUND 0x0031
+struct mgmt_ev_mesh_device_found {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le64 instant;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+
+#define MGMT_EV_MESH_PACKET_CMPLT 0x0032
+struct mgmt_ev_mesh_pkt_cmplt {
+ __u8 handle;
+} __packed;
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 1aa2e14b6c94..f40ddb4264fc 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -46,6 +46,4 @@ struct sco_conninfo {
__u8 dev_class[3];
};
-#define SCO_CMSG_PKT_STATUS 0x01
-
#endif /* __SCO_H */
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index c8696a230b7d..9ce5ac2bfbad 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -15,8 +15,6 @@
#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
-
#define AD_LACP_SLOW 0
#define AD_LACP_FAST 1
@@ -56,6 +54,8 @@ typedef enum {
AD_MUX_DETACHED, /* mux machine */
AD_MUX_WAITING, /* mux machine */
AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING, /* mux machine */
+ AD_MUX_DISTRIBUTING, /* mux machine */
AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
} mux_states_t;
@@ -262,7 +262,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
struct bond_3ad_stats stats;
- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
@@ -290,7 +290,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
}
/* ========== AD Exported functions to the main bonding code ========== */
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
+void bond_3ad_initialize(struct bonding *bond);
void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
index f6af76c87a6c..9dc082b2d543 100644
--- a/include/net/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -126,7 +126,7 @@ struct tlb_slave_info {
struct alb_bond_info {
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
u32 unbalanced_load;
- int tx_rebalance_counter;
+ atomic_t tx_rebalance_counter;
int lp_counter;
/* -------- rlb parameters -------- */
int rlb_enabled;
@@ -156,8 +156,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave);
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
-int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
-int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
struct sk_buff *skb);
struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 9d382f2f0bc5..473a0147769e 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -7,6 +7,14 @@
#ifndef _NET_BOND_OPTIONS_H
#define _NET_BOND_OPTIONS_H
+#include <linux/bits.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+struct netlink_ext_ack;
+struct nlattr;
+
#define BOND_OPT_MAX_NAMELEN 32
#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST)
#define BOND_MODE_ALL_EX(x) (~(x))
@@ -64,19 +72,31 @@ enum {
BOND_OPT_AD_USER_PORT_KEY,
BOND_OPT_NUM_PEER_NOTIF_ALIAS,
BOND_OPT_PEER_NOTIF_DELAY,
+ BOND_OPT_LACP_ACTIVE,
+ BOND_OPT_MISSED_MAX,
+ BOND_OPT_NS_TARGETS,
+ BOND_OPT_PRIO,
+ BOND_OPT_COUPLED_CONTROL,
BOND_OPT_LAST
};
/* This structure is used for storing option values and for passing option
* values when changing an option. The logic when used as an arg is as follows:
- * - if string != NULL -> parse it, if the opt is RAW type then return it, else
- * return the parse result
- * - if string == NULL -> parse value
+ * - if value != ULLONG_MAX -> parse value
+ * - if string != NULL -> parse string
+ * - if the opt is RAW data and length less than maxlen,
+ * copy the data to extra storage
*/
+
+#define BOND_OPT_EXTRA_MAXLEN 16
struct bond_opt_value {
char *string;
u64 value;
u32 flags;
+ union {
+ char extra[BOND_OPT_EXTRA_MAXLEN];
+ struct net_device *slave_dev;
+ };
};
struct bonding;
@@ -100,7 +120,8 @@ struct bond_option {
};
int __bond_opt_set(struct bonding *bond, unsigned int option,
- struct bond_opt_value *val);
+ struct bond_opt_value *val,
+ struct nlattr *bad_attr, struct netlink_ext_ack *extack);
int __bond_opt_set_notify(struct bonding *bond, unsigned int option,
struct bond_opt_value *val);
int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
@@ -116,18 +137,29 @@ const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
* When value is ULLONG_MAX then string will be used.
*/
static inline void __bond_opt_init(struct bond_opt_value *optval,
- char *string, u64 value)
+ char *string, u64 value,
+ void *extra, size_t extra_len)
{
memset(optval, 0, sizeof(*optval));
optval->value = ULLONG_MAX;
- if (value == ULLONG_MAX)
- optval->string = string;
- else
+ if (value != ULLONG_MAX)
optval->value = value;
+ else if (string)
+ optval->string = string;
+
+ if (extra && extra_len <= BOND_OPT_EXTRA_MAXLEN)
+ memcpy(optval->extra, extra, extra_len);
}
-#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
-#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
+#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value, NULL, 0)
+#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX, NULL, 0)
+#define bond_opt_initextra(optval, extra, extra_len) \
+ __bond_opt_init(optval, NULL, ULLONG_MAX, extra, extra_len)
+#define bond_opt_slave_initval(optval, slave_dev, value) \
+ __bond_opt_init(optval, NULL, value, slave_dev, sizeof(struct net_device *))
void bond_option_arp_ip_targets_clear(struct bonding *bond);
+#if IS_ENABLED(CONFIG_IPV6)
+void bond_option_ns_ip6_targets_clear(struct bonding *bond);
+#endif
#endif /* _NET_BOND_OPTIONS_H */
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 7d132cc1e584..b61fb1aa3a56 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
*
@@ -7,9 +8,6 @@
* BUT, I'm the one who modified it for ethernet, so:
* (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
*
- * This software may be used and distributed according to the terms
- * of the GNU Public License, incorporated herein by reference.
- *
*/
#ifndef _NET_BONDING_H
@@ -29,8 +27,11 @@
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
#include <net/bond_options.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
#define BOND_MAX_ARP_TARGETS 16
+#define BOND_MAX_NS_TARGETS BOND_MAX_ARP_TARGETS
#define BOND_DEFAULT_MIIMON 100
@@ -86,10 +87,8 @@
#define bond_for_each_slave_rcu(bond, pos, iter) \
netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
-#ifdef CONFIG_XFRM_OFFLOAD
#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
-#endif /* CONFIG_XFRM_OFFLOAD */
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -121,6 +120,7 @@ struct bond_params {
int xmit_policy;
int miimon;
u8 num_peer_notif;
+ u8 missed_max;
int arp_interval;
int arp_validate;
int arp_all_targets;
@@ -129,6 +129,7 @@ struct bond_params {
int updelay;
int downdelay;
int peer_notif_delay;
+ int lacp_active;
int lacp_fast;
unsigned int min_links;
int ad_select;
@@ -144,22 +145,22 @@ struct bond_params {
struct reciprocal_value reciprocal_packets_per_slave;
u16 ad_actor_sys_prio;
u16 ad_user_port_key;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
+#endif
+ int coupled_control;
/* 2 bytes of padding : see ether_addr_equal_64bits() */
u8 ad_actor_system[ETH_ALEN + 2];
};
-struct bond_parm_tbl {
- char *modename;
- int mode;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
int delay;
- /* all three in jiffies */
+ /* all 4 in jiffies */
unsigned long last_link_up;
+ unsigned long last_tx;
unsigned long last_rx;
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
s8 link; /* one of BOND_LINK_XXXX */
@@ -167,6 +168,7 @@ struct slave {
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
inactive:1, /* indicates inactive slave */
+ rx_disabled:1, /* indicates whether slave's Rx is disabled */
should_notify:1, /* indicates whether the state changed */
should_notify_link:1; /* indicates whether the link changed */
u8 duplex;
@@ -175,6 +177,7 @@ struct slave {
u32 speed;
u16 queue_id;
u8 perm_hwaddr[MAX_ADDR_LEN];
+ int prio;
struct ad_slave_info *ad_info;
struct tlb_slave_info tlb_info;
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -185,6 +188,11 @@ struct slave {
struct rtnl_link_stats64 slave_stats;
};
+static inline struct slave *to_slave(struct kobject *kobj)
+{
+ return container_of(kobj, struct slave, kobj);
+}
+
struct bond_up_slave {
unsigned int count;
struct rcu_head rcu;
@@ -196,6 +204,11 @@ struct bond_up_slave {
*/
#define BOND_LINK_NOCHANGE -1
+struct bond_ipsec {
+ struct list_head list;
+ struct xfrm_state *xs;
+};
+
/*
* Here are the locking policies for the two bonding locks:
* Get rcu_read_lock when reading or RTNL when writing slave list.
@@ -208,6 +221,7 @@ struct bonding {
struct bond_up_slave __rcu *usable_slaves;
struct bond_up_slave __rcu *all_slaves;
bool force_primary;
+ bool notifier_ctx;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
@@ -220,14 +234,14 @@ struct bonding {
*/
spinlock_t mode_lock;
spinlock_t stats_lock;
- u8 send_peer_notif;
+ u32 send_peer_notif;
u8 igmp_retrans;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- u32 rr_tx_counter;
+ u32 __percpu *rr_tx_counter;
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
struct bond_params params;
@@ -244,8 +258,11 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */
struct rtnl_link_stats64 bond_stats;
#ifdef CONFIG_XFRM_OFFLOAD
- struct xfrm_state *xs;
+ struct list_head ipsec_list;
+ /* protecting ipsec_list */
+ spinlock_t ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
+ struct bpf_prog *xdp_prog;
};
#define bond_slave_get_rcu(dev) \
@@ -262,7 +279,7 @@ struct bond_vlan_tag {
unsigned short vlan_id;
};
-/**
+/*
* Returns NULL if the net_device does not belong to any of the bond's slaves
*
* Caller must hold bond lock for read
@@ -334,7 +351,7 @@ static inline bool bond_uses_primary(struct bonding *bond)
static inline struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
{
- struct slave *slave = rcu_dereference(bond->curr_active_slave);
+ struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
return bond_uses_primary(bond) && slave ? slave->dev : NULL;
}
@@ -487,6 +504,15 @@ static inline int bond_is_ip_target_ok(__be32 addr)
return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int bond_is_ip6_target_ok(struct in6_addr *addr)
+{
+ return !ipv6_addr_any(addr) &&
+ !ipv6_addr_loopback(addr) &&
+ !ipv6_addr_is_multicast(addr);
+}
+#endif
+
/* Get the oldest arp which we've received on this slave for bond's
* arp_targets.
*/
@@ -512,6 +538,16 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
return slave->last_rx;
}
+static inline void slave_update_last_tx(struct slave *slave)
+{
+ WRITE_ONCE(slave->last_tx, jiffies);
+}
+
+static inline unsigned long slave_last_tx(struct slave *slave)
+{
+ return READ_ONCE(slave->last_tx);
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
struct sk_buff *skb)
@@ -534,6 +570,14 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave,
bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
+ slave->rx_disabled = 1;
+}
+
+static inline void bond_set_slave_tx_disabled_flags(struct slave *slave,
+ bool notify)
+{
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
}
static inline void bond_set_slave_active_flags(struct slave *slave,
@@ -541,6 +585,14 @@ static inline void bond_set_slave_active_flags(struct slave *slave,
{
bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
slave->inactive = 0;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
+ slave->rx_disabled = 0;
+}
+
+static inline void bond_set_slave_rx_enabled_flags(struct slave *slave,
+ bool notify)
+{
+ slave->rx_disabled = 0;
}
static inline bool bond_is_slave_inactive(struct slave *slave)
@@ -548,6 +600,11 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive;
}
+static inline bool bond_is_slave_rx_disabled(struct slave *slave)
+{
+ return slave->rx_disabled;
+}
+
static inline void bond_propose_link_state(struct slave *slave, int state)
{
slave->link_new_state = state;
@@ -616,7 +673,7 @@ struct bond_net {
struct class_attribute class_attr_bonding_masters;
};
-int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
+int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
@@ -624,6 +681,7 @@ void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
int bond_sysfs_slave_add(struct slave *slave);
void bond_sysfs_slave_del(struct slave *slave);
+void bond_xdp_set_features(struct net_device *bond_dev);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
struct netlink_ext_ack *extack);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
@@ -687,37 +745,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
}
/* Caller must hold rcu_read_lock() for read */
-static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
- const u8 *mac)
+static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
{
struct list_head *iter;
struct slave *tmp;
bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
- return tmp;
-
- return NULL;
-}
-
-/* Caller must hold rcu_read_lock() for read */
-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
-{
- struct list_head *iter;
- struct slave *tmp;
- struct netdev_hw_addr *ha;
-
- bond_for_each_slave_rcu(bond, tmp, iter)
- if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return true;
-
- if (netdev_uc_empty(bond->dev))
- return false;
-
- netdev_for_each_uc_addr(ha, bond->dev)
- if (ether_addr_equal_64bits(mac, ha->addr))
- return true;
-
return false;
}
@@ -737,22 +772,40 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
return -1;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
+{
+ struct in6_addr mcaddr;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if ((ipv6_addr_equal(&targets[i], ip)) ||
+ (ipv6_addr_equal(&mcaddr, ip)))
+ return i;
+ else if (ipv6_addr_any(&targets[i]))
+ break;
+ }
+
+ return -1;
+}
+#endif
+
/* exported from bond_main.c */
extern unsigned int bond_net_id;
-extern const struct bond_parm_tbl bond_lacp_tbl[];
-extern const struct bond_parm_tbl xmit_hashtype_tbl[];
-extern const struct bond_parm_tbl arp_validate_tbl[];
-extern const struct bond_parm_tbl arp_all_targets_tbl[];
-extern const struct bond_parm_tbl fail_over_mac_tbl[];
-extern const struct bond_parm_tbl pri_reselect_tbl[];
-extern struct bond_parm_tbl ad_select_tbl[];
/* exported from bond_netlink.c */
extern struct rtnl_link_ops bond_link_ops;
+/* exported from bond_sysfs_slave.c */
+extern const struct sysfs_ops slave_sysfs_ops;
+
+/* exported from bond_3ad.c */
+extern const u8 lacpdu_mcast_addr[];
+
static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
{
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
dev_kfree_skb_any(skb);
return NET_XMIT_DROP;
}
diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
index 5036c94c0503..2926f1f00d65 100644
--- a/include/net/bpf_sk_storage.h
+++ b/include/net/bpf_sk_storage.h
@@ -3,17 +3,30 @@
#ifndef _BPF_SK_STORAGE_H
#define _BPF_SK_STORAGE_H
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/bpf.h>
+#include <net/sock.h>
+#include <uapi/linux/sock_diag.h>
+#include <uapi/linux/btf.h>
+#include <linux/bpf_local_storage.h>
+
struct sock;
void bpf_sk_storage_free(struct sock *sk);
extern const struct bpf_func_proto bpf_sk_storage_get_proto;
extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
+extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
+struct bpf_local_storage_elem;
struct bpf_sk_storage_diag;
struct sk_buff;
struct nlattr;
-struct sock;
#ifdef CONFIG_BPF_SYSCALL
int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk);
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index b001fa91c14e..9b09acac538e 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -16,6 +16,7 @@
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include <net/ip.h>
+#include <net/xdp.h>
/* 0 - Reserved to indicate value not set
* 1..NR_CPUS - Reserved for sender_cpu
@@ -23,6 +24,8 @@
*/
#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+#define BUSY_POLL_BUDGET 8
+
#ifdef CONFIG_NET_RX_BUSY_POLL
struct napi_struct;
@@ -31,19 +34,23 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
static inline bool net_busy_loop_on(void)
{
- return sysctl_net_busy_poll;
+ return READ_ONCE(sysctl_net_busy_poll);
}
static inline bool sk_can_busy_loop(const struct sock *sk)
{
- return sk->sk_ll_usec && !signal_pending(current);
+ return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
}
bool sk_busy_loop_end(void *p, unsigned long start_time);
void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
- void *loop_end_arg);
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+
+void napi_busy_loop_rcu(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget);
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
@@ -105,7 +112,9 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
if (napi_id >= MIN_NAPI_ID)
- napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk);
+ napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
+ READ_ONCE(sk->sk_prefer_busy_poll),
+ READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
#endif
}
@@ -126,18 +135,47 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+#endif
+ sk_rx_queue_update(sk, skb);
+}
+
+/* Variant of sk_mark_napi_id() for passive flow setup,
+ * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
+ * needs to be set.
+ */
+static inline void sk_mark_napi_id_set(struct sock *sk,
+ const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
sk_rx_queue_set(sk, skb);
}
+static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (!READ_ONCE(sk->sk_napi_id))
+ WRITE_ONCE(sk->sk_napi_id, napi_id);
+#endif
+}
+
/* variant used for unconnected sockets */
static inline void sk_mark_napi_id_once(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- if (!READ_ONCE(sk->sk_napi_id))
- WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+ __sk_mark_napi_id_once(sk, skb->napi_id);
+#endif
+}
+
+static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
+ const struct xdp_buff *xdp)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
#endif
}
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 48ecca8530ff..b655d8666f55 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
* The link_support layer is used to add any Link Layer specific
* framing.
*/
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room,
struct cflayer **layer, int (**rcv_func)(
struct sk_buff *, struct net_device *,
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
deleted file mode 100644
index 552cf68d28d2..000000000000
--- a/include/net/caif/caif_hsi.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson / daniel.martensson@stericsson.com
- * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
- */
-
-#ifndef CAIF_HSI_H_
-#define CAIF_HSI_H_
-
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_device.h>
-#include <linux/atomic.h>
-
-/*
- * Maximum number of CAIF frames that can reside in the same HSI frame.
- */
-#define CFHSI_MAX_PKTS 15
-
-/*
- * Maximum number of bytes used for the frame that can be embedded in the
- * HSI descriptor.
- */
-#define CFHSI_MAX_EMB_FRM_SZ 96
-
-/*
- * Decides if HSI buffers should be prefilled with 0xFF pattern for easier
- * debugging. Both TX and RX buffers will be filled before the transfer.
- */
-#define CFHSI_DBG_PREFILL 0
-
-/* Structure describing a HSI packet descriptor. */
-#pragma pack(1) /* Byte alignment. */
-struct cfhsi_desc {
- u8 header;
- u8 offset;
- u16 cffrm_len[CFHSI_MAX_PKTS];
- u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ];
-};
-#pragma pack() /* Default alignment. */
-
-/* Size of the complete HSI packet descriptor. */
-#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc))
-
-/*
- * Size of the complete HSI packet descriptor excluding the optional embedded
- * CAIF frame.
- */
-#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ)
-
-/*
- * Maximum bytes transferred in one transfer.
- */
-#define CFHSI_MAX_CAIF_FRAME_SZ 4096
-
-#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ)
-
-/* Size of the complete HSI TX buffer. */
-#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Size of the complete HSI RX buffer. */
-#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Bitmasks for the HSI descriptor. */
-#define CFHSI_PIGGY_DESC (0x01 << 7)
-
-#define CFHSI_TX_STATE_IDLE 0
-#define CFHSI_TX_STATE_XFER 1
-
-#define CFHSI_RX_STATE_DESC 0
-#define CFHSI_RX_STATE_PAYLOAD 1
-
-/* Bitmasks for power management. */
-#define CFHSI_WAKE_UP 0
-#define CFHSI_WAKE_UP_ACK 1
-#define CFHSI_WAKE_DOWN_ACK 2
-#define CFHSI_AWAKE 3
-#define CFHSI_WAKELOCK_HELD 4
-#define CFHSI_SHUTDOWN 5
-#define CFHSI_FLUSH_FIFO 6
-
-#ifndef CFHSI_INACTIVITY_TOUT
-#define CFHSI_INACTIVITY_TOUT (1 * HZ)
-#endif /* CFHSI_INACTIVITY_TOUT */
-
-#ifndef CFHSI_WAKE_TOUT
-#define CFHSI_WAKE_TOUT (3 * HZ)
-#endif /* CFHSI_WAKE_TOUT */
-
-#ifndef CFHSI_MAX_RX_RETRIES
-#define CFHSI_MAX_RX_RETRIES (10 * HZ)
-#endif
-
-/* Structure implemented by the CAIF HSI driver. */
-struct cfhsi_cb_ops {
- void (*tx_done_cb) (struct cfhsi_cb_ops *drv);
- void (*rx_done_cb) (struct cfhsi_cb_ops *drv);
- void (*wake_up_cb) (struct cfhsi_cb_ops *drv);
- void (*wake_down_cb) (struct cfhsi_cb_ops *drv);
-};
-
-/* Structure implemented by HSI device. */
-struct cfhsi_ops {
- int (*cfhsi_up) (struct cfhsi_ops *dev);
- int (*cfhsi_down) (struct cfhsi_ops *dev);
- int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev);
- int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev);
- int (*cfhsi_wake_up) (struct cfhsi_ops *dev);
- int (*cfhsi_wake_down) (struct cfhsi_ops *dev);
- int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status);
- int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy);
- int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev);
- struct cfhsi_cb_ops *cb_ops;
-};
-
-/* Structure holds status of received CAIF frames processing */
-struct cfhsi_rx_state {
- int state;
- int nfrms;
- int pld_len;
- int retries;
- bool piggy_desc;
-};
-
-/* Priority mapping */
-enum {
- CFHSI_PRIO_CTL = 0,
- CFHSI_PRIO_VI,
- CFHSI_PRIO_VO,
- CFHSI_PRIO_BEBK,
- CFHSI_PRIO_LAST,
-};
-
-struct cfhsi_config {
- u32 inactivity_timeout;
- u32 aggregation_timeout;
- u32 head_align;
- u32 tail_align;
- u32 q_high_mark;
- u32 q_low_mark;
-};
-
-/* Structure implemented by CAIF HSI drivers. */
-struct cfhsi {
- struct caif_dev_common cfdev;
- struct net_device *ndev;
- struct platform_device *pdev;
- struct sk_buff_head qhead[CFHSI_PRIO_LAST];
- struct cfhsi_cb_ops cb_ops;
- struct cfhsi_ops *ops;
- int tx_state;
- struct cfhsi_rx_state rx_state;
- struct cfhsi_config cfg;
- int rx_len;
- u8 *rx_ptr;
- u8 *tx_buf;
- u8 *rx_buf;
- u8 *rx_flip_buf;
- spinlock_t lock;
- int flow_off_sent;
- struct list_head list;
- struct work_struct wake_up_work;
- struct work_struct wake_down_work;
- struct work_struct out_of_sync_work;
- struct workqueue_struct *wq;
- wait_queue_head_t wake_up_wait;
- wait_queue_head_t wake_down_wait;
- wait_queue_head_t flush_fifo_wait;
- struct timer_list inactivity_timer;
- struct timer_list rx_slowpath_timer;
-
- /* TX aggregation */
- int aggregation_len;
- struct timer_list aggregation_timer;
-
- unsigned long bits;
-};
-extern struct platform_driver cfhsi_driver;
-
-/**
- * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters.
- * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before
- * taking the HSI wakeline down, in milliseconds.
- * When using RT Netlink to create, destroy or configure a CAIF HSI interface,
- * enum ifla_caif_hsi is used to specify the configuration attributes.
- */
-enum ifla_caif_hsi {
- __IFLA_CAIF_HSI_UNSPEC,
- __IFLA_CAIF_HSI_INACTIVITY_TOUT,
- __IFLA_CAIF_HSI_AGGREGATION_TOUT,
- __IFLA_CAIF_HSI_HEAD_ALIGN,
- __IFLA_CAIF_HSI_TAIL_ALIGN,
- __IFLA_CAIF_HSI_QHIGH_WATERMARK,
- __IFLA_CAIF_HSI_QLOW_WATERMARK,
- __IFLA_CAIF_HSI_MAX
-};
-
-struct cfhsi_ops *cfhsi_get_ops(void);
-
-#endif /* CAIF_HSI_H_ */
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
deleted file mode 100644
index a0bf4cbce71b..000000000000
--- a/include/net/caif/caif_spi.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
- */
-
-#ifndef CAIF_SPI_H_
-#define CAIF_SPI_H_
-
-#include <net/caif/caif_device.h>
-
-#define SPI_CMD_WR 0x00
-#define SPI_CMD_RD 0x01
-#define SPI_CMD_EOT 0x02
-#define SPI_CMD_IND 0x04
-
-#define SPI_DMA_BUF_LEN 8192
-
-#define WL_SZ 2 /* 16 bits. */
-#define SPI_CMD_SZ 4 /* 32 bits. */
-#define SPI_IND_SZ 4 /* 32 bits. */
-
-#define SPI_XFER 0
-#define SPI_SS_ON 1
-#define SPI_SS_OFF 2
-#define SPI_TERMINATE 3
-
-/* Minimum time between different levels is 50 microseconds. */
-#define MIN_TRANSITION_TIME_USEC 50
-
-/* Defines for calculating duration of SPI transfers for a particular
- * number of bytes.
- */
-#define SPI_MASTER_CLK_MHZ 13
-#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk)
-
-/* Normally this should be aligned on the modem in order to benefit from full
- * duplex transfers. However a size of 8188 provokes errors when running with
- * the modem. These errors occur when packet sizes approaches 4 kB of data.
- */
-#define CAIF_MAX_SPI_FRAME 4092
-
-/* Maximum number of uplink CAIF frames that can reside in the same SPI frame.
- * This number should correspond with the modem setting. The application side
- * CAIF accepts any number of embedded downlink CAIF frames.
- */
-#define CAIF_MAX_SPI_PKTS 9
-
-/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier
- * debugging. Both TX and RX buffers will be filled before the transfer.
- */
-#define CFSPI_DBG_PREFILL 0
-
-/* Structure describing a SPI transfer. */
-struct cfspi_xfer {
- u16 tx_dma_len;
- u16 rx_dma_len;
- void *va_tx[2];
- dma_addr_t pa_tx[2];
- void *va_rx;
- dma_addr_t pa_rx;
-};
-
-/* Structure implemented by the SPI interface. */
-struct cfspi_ifc {
- void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
- void (*xfer_done_cb) (struct cfspi_ifc *ifc);
- void *priv;
-};
-
-/* Structure implemented by SPI clients. */
-struct cfspi_dev {
- int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev);
- void (*sig_xfer) (bool xfer, struct cfspi_dev *dev);
- struct cfspi_ifc *ifc;
- char *name;
- u32 clk_mhz;
- void *priv;
-};
-
-/* Enumeration describing the CAIF SPI state. */
-enum cfspi_state {
- CFSPI_STATE_WAITING = 0,
- CFSPI_STATE_AWAKE,
- CFSPI_STATE_FETCH_PKT,
- CFSPI_STATE_GET_NEXT,
- CFSPI_STATE_INIT_XFER,
- CFSPI_STATE_WAIT_ACTIVE,
- CFSPI_STATE_SIG_ACTIVE,
- CFSPI_STATE_WAIT_XFER_DONE,
- CFSPI_STATE_XFER_DONE,
- CFSPI_STATE_WAIT_INACTIVE,
- CFSPI_STATE_SIG_INACTIVE,
- CFSPI_STATE_DELIVER_PKT,
- CFSPI_STATE_MAX,
-};
-
-/* Structure implemented by SPI physical interfaces. */
-struct cfspi {
- struct caif_dev_common cfdev;
- struct net_device *ndev;
- struct platform_device *pdev;
- struct sk_buff_head qhead;
- struct sk_buff_head chead;
- u16 cmd;
- u16 tx_cpck_len;
- u16 tx_npck_len;
- u16 rx_cpck_len;
- u16 rx_npck_len;
- struct cfspi_ifc ifc;
- struct cfspi_xfer xfer;
- struct cfspi_dev *dev;
- unsigned long state;
- struct work_struct work;
- struct workqueue_struct *wq;
- struct list_head list;
- int flow_off_sent;
- u32 qd_low_mark;
- u32 qd_high_mark;
- struct completion comp;
- wait_queue_head_t wait;
- spinlock_t lock;
- bool flow_stop;
- bool slave;
- bool slave_talked;
-#ifdef CONFIG_DEBUG_FS
- enum cfspi_state dbg_state;
- u16 pcmd;
- u16 tx_ppck_len;
- u16 rx_ppck_len;
- struct dentry *dbgfs_dir;
- struct dentry *dbgfs_state;
- struct dentry *dbgfs_frame;
-#endif /* CONFIG_DEBUG_FS */
-};
-
-extern int spi_frm_align;
-extern int spi_up_head_align;
-extern int spi_up_tail_align;
-extern int spi_down_head_align;
-extern int spi_down_tail_align;
-extern struct platform_driver cfspi_spi_driver;
-
-void cfspi_dbg_state(struct cfspi *cfspi, int state);
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_xmitlen(struct cfspi *cfspi);
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_spi_remove(struct platform_device *pdev);
-int cfspi_spi_probe(struct platform_device *pdev);
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_xmitlen(struct cfspi *cfspi);
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-void cfspi_xfer(struct work_struct *work);
-
-#endif /* CAIF_SPI_H_ */
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 2aa5e91d8457..8819ff4db35a 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
* @fcs: Specify if checksum is used in CAIF Framing Layer.
* @head_room: Head space needed by link specific protocol.
*/
-void
+int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref,
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
index 14a55e03bb3c..67cce8757175 100644
--- a/include/net/caif/cfserl.h
+++ b/include/net/caif/cfserl.h
@@ -9,4 +9,5 @@
#include <net/caif/caif_layer.h>
struct cflayer *cfserl_create(int instance, bool use_stx);
+void cfserl_release(struct cflayer *layer);
#endif
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index bd5440977f7f..5ee7b322e18b 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -33,9 +33,6 @@ struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info,
int mtu_size);
struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
-void cfsrvl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
- int phyid);
-
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
void cfsrvl_init(struct cfsrvl *service,
diff --git a/include/net/cfg80211-wext.h b/include/net/cfg80211-wext.h
index ad77caf2ffde..0ee36d97e068 100644
--- a/include/net/cfg80211-wext.h
+++ b/include/net/cfg80211-wext.h
@@ -19,34 +19,34 @@
*/
int cfg80211_wext_giwname(struct net_device *dev,
struct iw_request_info *info,
- char *name, char *extra);
+ union iwreq_data *wrqu, char *extra);
int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
- u32 *mode, char *extra);
+ union iwreq_data *wrqu, char *extra);
int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
- u32 *mode, char *extra);
+ union iwreq_data *wrqu, char *extra);
int cfg80211_wext_siwscan(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra);
int cfg80211_wext_giwscan(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *extra);
+ union iwreq_data *wrqu, char *extra);
int cfg80211_wext_giwrange(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *extra);
+ union iwreq_data *wrqu, char *extra);
int cfg80211_wext_siwrts(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rts, char *extra);
+ union iwreq_data *wrqu, char *extra);
int cfg80211_wext_giwrts(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rts, char *extra);
+ union iwreq_data *wrqu, char *extra);
int cfg80211_wext_siwfrag(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *frag, char *extra);
+ union iwreq_data *wrqu, char *extra);
int cfg80211_wext_giwfrag(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *frag, char *extra);
+ union iwreq_data *wrqu, char *extra);
int cfg80211_wext_giwretry(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *retry, char *extra);
+ union iwreq_data *wrqu, char *extra);
#endif /* __NET_CFG80211_WEXT_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index d9e6b9fbd95b..2e2be4fd2bb6 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,9 +7,11 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
+#include <linux/ethtool.h>
+#include <uapi/linux/rfkill.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include <linux/list.h>
@@ -20,6 +22,7 @@
#include <linux/if_ether.h>
#include <linux/ieee80211.h>
#include <linux/net.h>
+#include <linux/rfkill.h>
#include <net/regulatory.h>
/**
@@ -49,7 +52,7 @@
* such wiphy can have zero, one, or many virtual interfaces associated with
* it, which need to be identified as such by pointing the network interface's
* @ieee80211_ptr pointer to a &struct wireless_dev which further describes
- * the wireless part of the interface, normally this struct is embedded in the
+ * the wireless part of the interface. Normally this struct is embedded in the
* network interface's private data area. Drivers can optionally allow creating
* or destroying virtual interfaces on the fly, but without at least one or the
* ability to create some the wireless device isn't useful.
@@ -73,6 +76,8 @@ struct wiphy;
* @IEEE80211_CHAN_DISABLED: This channel is disabled.
* @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes
* sending probe requests or beaconing.
+ * @IEEE80211_CHAN_PSD: Power spectral density (in dBm) is set for this
+ * channel.
* @IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
* @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
* is not permitted.
@@ -96,12 +101,35 @@ struct wiphy;
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
- *
+ * @IEEE80211_CHAN_1MHZ: 1 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_2MHZ: 2 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_4MHZ: 4 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_8MHZ: 8 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band,
+ * this flag indicates that a 320 MHz channel cannot use this
+ * channel as the control or any of the secondary channels.
+ * This may be due to the driver or due to regulatory bandwidth
+ * restrictions.
+ * @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
+ * @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT
+ * @IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT: Client connection with VLP AP
+ * not permitted using this channel
+ * @IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT: Client connection with AFC AP
+ * not permitted using this channel
+ * @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor
+ * mode even in the presence of other (regulatory) restrictions,
+ * even if it is otherwise disabled.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
IEEE80211_CHAN_NO_IR = 1<<1,
- /* hole at 1<<2 */
+ IEEE80211_CHAN_PSD = 1<<2,
IEEE80211_CHAN_RADAR = 1<<3,
IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
@@ -113,6 +141,17 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_20MHZ = 1<<11,
IEEE80211_CHAN_NO_10MHZ = 1<<12,
IEEE80211_CHAN_NO_HE = 1<<13,
+ IEEE80211_CHAN_1MHZ = 1<<14,
+ IEEE80211_CHAN_2MHZ = 1<<15,
+ IEEE80211_CHAN_4MHZ = 1<<16,
+ IEEE80211_CHAN_8MHZ = 1<<17,
+ IEEE80211_CHAN_16MHZ = 1<<18,
+ IEEE80211_CHAN_NO_320MHZ = 1<<19,
+ IEEE80211_CHAN_NO_EHT = 1<<20,
+ IEEE80211_CHAN_DFS_CONCURRENT = 1<<21,
+ IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 1<<22,
+ IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 1<<23,
+ IEEE80211_CHAN_CAN_MONITOR = 1<<24,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -146,6 +185,7 @@ enum ieee80211_channel_flags {
* on this channel.
* @dfs_state_entered: timestamp (jiffies) when the dfs state was entered.
* @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
+ * @psd: power spectral density (in dBm)
*/
struct ieee80211_channel {
enum nl80211_band band;
@@ -162,6 +202,7 @@ struct ieee80211_channel {
enum nl80211_dfs_state dfs_state;
unsigned long dfs_state_entered;
unsigned int dfs_cac_ms;
+ s8 psd;
};
/**
@@ -238,7 +279,7 @@ enum ieee80211_privacy {
* are only for driver use when pointers to this structure are
* passed around.
*
- * @flags: rate-specific flags
+ * @flags: rate-specific flags from &enum ieee80211_rate_flags
* @bitrate: bitrate in units of 100 Kbps
* @hw_value: driver/hardware value for this rate
* @hw_value_short: driver/hardware value for this rate when
@@ -254,13 +295,23 @@ struct ieee80211_rate {
* struct ieee80211_he_obss_pd - AP settings for spatial reuse
*
* @enable: is the feature enabled.
+ * @sr_ctrl: The SR Control field of SRP element.
+ * @non_srg_max_offset: non-SRG maximum tx power offset
* @min_offset: minimal tx power offset an associated station shall use
* @max_offset: maximum tx power offset an associated station shall use
+ * @bss_color_bitmap: bitmap that indicates the BSS color values used by
+ * members of the SRG
+ * @partial_bssid_bitmap: bitmap that indicates the partial BSSID values
+ * used by members of the SRG
*/
struct ieee80211_he_obss_pd {
bool enable;
+ u8 sr_ctrl;
+ u8 non_srg_max_offset;
u8 min_offset;
u8 max_offset;
+ u8 bss_color_bitmap[8];
+ u8 partial_bssid_bitmap[8];
};
/**
@@ -277,19 +328,6 @@ struct cfg80211_he_bss_color {
};
/**
- * struct ieee80211_he_bss_color - AP settings for BSS coloring
- *
- * @color: the current color.
- * @disabled: is the feature disabled.
- * @partial: define the AID equation.
- */
-struct ieee80211_he_bss_color {
- u8 color;
- bool disabled;
- bool partial;
-};
-
-/**
* struct ieee80211_sta_ht_cap - STA's HT capabilities
*
* This structure describes most essential parameters needed
@@ -346,7 +384,63 @@ struct ieee80211_sta_he_cap {
};
/**
- * struct ieee80211_sband_iftype_data
+ * struct ieee80211_eht_mcs_nss_supp - EHT max supported NSS per MCS
+ *
+ * See P802.11be_D1.3 Table 9-401k - "Subfields of the Supported EHT-MCS
+ * and NSS Set field"
+ *
+ * @only_20mhz: MCS/NSS support for 20 MHz-only STA.
+ * @bw: MCS/NSS support for 80, 160 and 320 MHz
+ * @bw._80: MCS/NSS support for BW <= 80 MHz
+ * @bw._160: MCS/NSS support for BW = 160 MHz
+ * @bw._320: MCS/NSS support for BW = 320 MHz
+ */
+struct ieee80211_eht_mcs_nss_supp {
+ union {
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz;
+ struct {
+ struct ieee80211_eht_mcs_nss_supp_bw _80;
+ struct ieee80211_eht_mcs_nss_supp_bw _160;
+ struct ieee80211_eht_mcs_nss_supp_bw _320;
+ } __packed bw;
+ } __packed;
+} __packed;
+
+#define IEEE80211_EHT_PPE_THRES_MAX_LEN 32
+
+/**
+ * struct ieee80211_sta_eht_cap - STA's EHT capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11be EHT capabilities for a STA.
+ *
+ * @has_eht: true iff EHT data is valid.
+ * @eht_cap_elem: Fixed portion of the eht capabilities element.
+ * @eht_mcs_nss_supp: The supported NSS/MCS combinations.
+ * @eht_ppe_thres: Holds the PPE Thresholds data.
+ */
+struct ieee80211_sta_eht_cap {
+ bool has_eht;
+ struct ieee80211_eht_cap_elem_fixed eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp;
+ u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN];
+};
+
+/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
+#ifdef __CHECKER__
+/*
+ * This is used to mark the sband->iftype_data pointer which is supposed
+ * to be an array with special access semantics (per iftype), but a lot
+ * of code got it wrong in the past, so with this marking sparse will be
+ * noisy when the pointer is used directly.
+ */
+# define __iftd __attribute__((noderef, address_space(__iftype_data)))
+#else
+# define __iftd
+#endif /* __CHECKER__ */
+
+/**
+ * struct ieee80211_sband_iftype_data - sband data per interface type
*
* This structure encapsulates sband data that is relevant for the
* interface types defined in @types_mask. Each type in the
@@ -356,11 +450,20 @@ struct ieee80211_sta_he_cap {
* @he_cap: holds the HE capabilities
* @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a
* 6 GHz band channel (and 0 may be valid value).
+ * @eht_cap: STA's EHT capabilities
+ * @vendor_elems: vendor element(s) to advertise
+ * @vendor_elems.data: vendor element(s) data
+ * @vendor_elems.len: vendor element(s) length
*/
struct ieee80211_sband_iftype_data {
u16 types_mask;
struct ieee80211_sta_he_cap he_cap;
struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct ieee80211_sta_eht_cap eht_cap;
+ struct {
+ const u8 *data;
+ unsigned int len;
+ } vendor_elems;
};
/**
@@ -423,7 +526,7 @@ struct ieee80211_edmg {
* This structure describes most essential parameters needed
* to describe 802.11ah S1G capabilities for a STA.
*
- * @s1g_supported: is STA an S1G STA
+ * @s1g: is STA an S1G STA
* @cap: S1G capabilities information
* @nss_mcs: Supported NSS MCS set
*/
@@ -449,7 +552,9 @@ struct ieee80211_sta_s1g_cap {
* @n_bitrates: Number of bitrates in @bitrates
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
+ * @s1g_cap: S1G capabilities in this band
* @edmg_cap: EDMG capabilities in this band
+ * @s1g_cap: S1G capabilities in this band (S1B band only, of course)
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
@@ -467,10 +572,48 @@ struct ieee80211_supported_band {
struct ieee80211_sta_s1g_cap s1g_cap;
struct ieee80211_edmg edmg_cap;
u16 n_iftype_data;
- const struct ieee80211_sband_iftype_data *iftype_data;
+ const struct ieee80211_sband_iftype_data __iftd *iftype_data;
};
/**
+ * _ieee80211_set_sband_iftype_data - set sband iftype data array
+ * @sband: the sband to initialize
+ * @iftd: the iftype data array pointer
+ * @n_iftd: the length of the iftype data array
+ *
+ * Set the sband iftype data array; use this where the length cannot
+ * be derived from the ARRAY_SIZE() of the argument, but prefer
+ * ieee80211_set_sband_iftype_data() where it can be used.
+ */
+static inline void
+_ieee80211_set_sband_iftype_data(struct ieee80211_supported_band *sband,
+ const struct ieee80211_sband_iftype_data *iftd,
+ u16 n_iftd)
+{
+ sband->iftype_data = (const void __iftd __force *)iftd;
+ sband->n_iftype_data = n_iftd;
+}
+
+/**
+ * ieee80211_set_sband_iftype_data - set sband iftype data array
+ * @sband: the sband to initialize
+ * @iftd: the iftype data array
+ */
+#define ieee80211_set_sband_iftype_data(sband, iftd) \
+ _ieee80211_set_sband_iftype_data(sband, iftd, ARRAY_SIZE(iftd))
+
+/**
+ * for_each_sband_iftype_data - iterate sband iftype data entries
+ * @sband: the sband whose iftype_data array to iterate
+ * @i: iterator counter
+ * @iftd: iftype data pointer to set
+ */
+#define for_each_sband_iftype_data(sband, i, iftd) \
+ for (i = 0, iftd = (const void __force *)&(sband)->iftype_data[i]; \
+ i < (sband)->n_iftype_data; \
+ i++, iftd = (const void __force *)&(sband)->iftype_data[i])
+
+/**
* ieee80211_get_sband_iftype_data - return sband data for a given iftype
* @sband: the sband to search for the STA on
* @iftype: enum nl80211_iftype
@@ -481,15 +624,16 @@ static inline const struct ieee80211_sband_iftype_data *
ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
u8 iftype)
{
+ const struct ieee80211_sband_iftype_data *data;
int i;
if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
return NULL;
- for (i = 0; i < sband->n_iftype_data; i++) {
- const struct ieee80211_sband_iftype_data *data =
- &sband->iftype_data[i];
+ if (iftype == NL80211_IFTYPE_AP_VLAN)
+ iftype = NL80211_IFTYPE_AP;
+ for_each_sband_iftype_data(sband, i, data) {
if (data->types_mask & BIT(iftype))
return data;
}
@@ -518,18 +662,6 @@ ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband,
}
/**
- * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
- * @sband: the sband to search for the STA on
- *
- * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
- */
-static inline const struct ieee80211_sta_he_cap *
-ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
-{
- return ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_STATION);
-}
-
-/**
* ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities
* @sband: the sband to search for the STA on
* @iftype: the iftype to search for
@@ -550,6 +682,26 @@ ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband,
}
/**
+ * ieee80211_get_eht_iftype_cap - return ETH capabilities for an sband's iftype
+ * @sband: the sband to search for the iftype on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to the struct ieee80211_sta_eht_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_eht_cap *
+ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *sband,
+ enum nl80211_iftype iftype)
+{
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, iftype);
+
+ if (data && data->eht_cap.has_eht)
+ return &data->eht_cap;
+
+ return NULL;
+}
+
+/**
* wiphy_read_of_freq_limits - read frequency limits from device tree
*
* @wiphy: the wireless device to get extra limits for
@@ -660,6 +812,9 @@ struct key_params {
* chan will define the primary channel and all other
* parameters are ignored.
* @freq1_offset: offset from @center_freq1, in KHz
+ * @punctured: mask of the punctured 20 MHz subchannels, with
+ * bits turned on being disabled (punctured); numbered
+ * from lower to higher frequency (like in the spec)
*/
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
@@ -668,6 +823,7 @@ struct cfg80211_chan_def {
u32 center_freq2;
struct ieee80211_edmg edmg;
u16 freq1_offset;
+ u16 punctured;
};
/*
@@ -678,7 +834,10 @@ struct cfg80211_bitrate_mask {
u32 legacy;
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ u16 he_mcs[NL80211_HE_NSS_MAX];
enum nl80211_txrate_gi gi;
+ enum nl80211_he_gi he_gi;
+ enum nl80211_he_ltf he_ltf;
} control[NUM_NL80211_BANDS];
};
@@ -721,7 +880,35 @@ struct cfg80211_tid_cfg {
struct cfg80211_tid_config {
const u8 *peer;
u32 n_tid_conf;
- struct cfg80211_tid_cfg tid_conf[];
+ struct cfg80211_tid_cfg tid_conf[] __counted_by(n_tid_conf);
+};
+
+/**
+ * struct cfg80211_fils_aad - FILS AAD data
+ * @macaddr: STA MAC address
+ * @kek: FILS KEK
+ * @kek_len: FILS KEK length
+ * @snonce: STA Nonce
+ * @anonce: AP Nonce
+ */
+struct cfg80211_fils_aad {
+ const u8 *macaddr;
+ const u8 *kek;
+ u8 kek_len;
+ const u8 *snonce;
+ const u8 *anonce;
+};
+
+/**
+ * struct cfg80211_set_hw_timestamp - enable/disable HW timestamping
+ * @macaddr: peer MAC address. NULL to enable/disable HW timestamping for all
+ * addresses.
+ * @enable: if set, enable HW timestamping for the specified MAC address.
+ * Otherwise disable HW timestamping for the specified MAC address.
+ */
+struct cfg80211_set_hw_timestamp {
+ const u8 *macaddr;
+ bool enable;
};
/**
@@ -777,7 +964,8 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
chandef1->width == chandef2->width &&
chandef1->center_freq1 == chandef2->center_freq1 &&
chandef1->freq1_offset == chandef2->freq1_offset &&
- chandef1->center_freq2 == chandef2->center_freq2);
+ chandef1->center_freq2 == chandef2->center_freq2 &&
+ chandef1->punctured == chandef2->punctured);
}
/**
@@ -806,6 +994,15 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
const struct cfg80211_chan_def *chandef2);
/**
+ * nl80211_chan_width_to_mhz - get the channel width in MHz
+ * @chan_width: the channel width from &enum nl80211_chan_width
+ *
+ * Return: channel width in MHz if the chan_width from &enum nl80211_chan_width
+ * is valid. -1 otherwise.
+ */
+int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width);
+
+/**
* cfg80211_chandef_valid - check if a channel definition is valid
* @chandef: the channel definition to check
* Return: %true if the channel definition is valid. %false otherwise.
@@ -836,19 +1033,65 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
enum nl80211_iftype iftype);
/**
- * ieee80211_chandef_rate_flags - returns rate flags for a channel
+ * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable and we
+ * can/need start CAC on such channel
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ *
+ * Return: true if all channels available and at least
+ * one channel requires CAC (NL80211_DFS_USABLE)
+ */
+bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef);
+
+/**
+ * cfg80211_chandef_dfs_cac_time - get the DFS CAC time (in ms) for given
+ * channel definition
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ *
+ * Returns: DFS CAC time (in ms) which applies for this channel definition
+ */
+unsigned int
+cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef);
+
+/**
+ * cfg80211_chandef_primary - calculate primary 40/80/160 MHz freq
+ * @chandef: chandef to calculate for
+ * @primary_chan_width: primary channel width to calculate center for
+ * @punctured: punctured sub-channel bitmap, will be recalculated
+ * according to the new bandwidth, can be %NULL
+ *
+ * Returns: the primary 40/80/160 MHz channel center frequency, or -1
+ * for errors, updating the punctured bitmap
+ */
+int cfg80211_chandef_primary(const struct cfg80211_chan_def *chandef,
+ enum nl80211_chan_width primary_chan_width,
+ u16 *punctured);
+
+/**
+ * nl80211_send_chandef - sends the channel definition.
+ * @msg: the msg to send channel definition
+ * @chandef: the channel definition to check
+ *
+ * Returns: 0 if sent the channel definition to msg, < 0 on error
+ **/
+int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef);
+
+/**
+ * ieee80211_chanwidth_rate_flags - return rate flags for channel width
+ * @width: the channel width of the channel
*
* In some channel types, not all rates may be used - for example CCK
* rates may not be used in 5/10 MHz channels.
*
- * @chandef: channel definition for the channel
- *
- * Returns: rate flags which apply for this channel
+ * Returns: rate flags which apply for this channel width
*/
static inline enum ieee80211_rate_flags
-ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
+ieee80211_chanwidth_rate_flags(enum nl80211_chan_width width)
{
- switch (chandef->width) {
+ switch (width) {
case NL80211_CHAN_WIDTH_5:
return IEEE80211_RATE_SUPPORTS_5MHZ;
case NL80211_CHAN_WIDTH_10:
@@ -860,6 +1103,20 @@ ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
}
/**
+ * ieee80211_chandef_rate_flags - returns rate flags for a channel
+ * @chandef: channel definition for the channel
+ *
+ * See ieee80211_chanwidth_rate_flags().
+ *
+ * Returns: rate flags which apply for this channel
+ */
+static inline enum ieee80211_rate_flags
+ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
+{
+ return ieee80211_chanwidth_rate_flags(chandef->width);
+}
+
+/**
* ieee80211_chandef_max_power - maximum transmission power for the chandef
*
* In some regulations, the transmit power may depend on the configured channel
@@ -887,6 +1144,17 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
}
/**
+ * cfg80211_any_usable_channels - check for usable channels
+ * @wiphy: the wiphy to check for
+ * @band_mask: which bands to check on
+ * @prohibited_flags: which channels to not consider usable,
+ * %IEEE80211_CHAN_DISABLED is always taken into account
+ */
+bool cfg80211_any_usable_channels(struct wiphy *wiphy,
+ unsigned long band_mask,
+ u32 prohibited_flags);
+
+/**
* enum survey_info_flags - survey information flags
*
* @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
@@ -948,7 +1216,7 @@ struct survey_info {
s8 noise;
};
-#define CFG80211_MAX_WEP_KEYS 4
+#define CFG80211_MAX_NUM_AKM_SUITES 10
/**
* struct cfg80211_crypto_settings - Crypto settings
@@ -971,13 +1239,25 @@ struct survey_info {
* port frames over NL80211 instead of the network interface.
* @control_port_no_preauth: disables pre-auth rx over the nl80211 control
* port for mac80211
- * @wep_keys: static WEP keys, if not NULL points to an array of
- * CFG80211_MAX_WEP_KEYS WEP keys
- * @wep_tx_key: key index (0..3) of the default TX static WEP key
* @psk: PSK (for devices supporting 4-way-handshake offload)
* @sae_pwd: password for SAE authentication (for devices supporting SAE
* offload)
* @sae_pwd_len: length of SAE password (for devices supporting SAE offload)
+ * @sae_pwe: The mechanisms allowed for SAE PWE derivation:
+ *
+ * NL80211_SAE_PWE_UNSPECIFIED
+ * Not-specified, used to indicate userspace did not specify any
+ * preference. The driver should follow its internal policy in
+ * such a scenario.
+ *
+ * NL80211_SAE_PWE_HUNT_AND_PECK
+ * Allow hunting-and-pecking loop only
+ *
+ * NL80211_SAE_PWE_HASH_TO_ELEMENT
+ * Allow hash-to-element only
+ *
+ * NL80211_SAE_PWE_BOTH
+ * Allow either hunting-and-pecking loop or hash-to-element
*/
struct cfg80211_crypto_settings {
u32 wpa_versions;
@@ -985,21 +1265,68 @@ struct cfg80211_crypto_settings {
int n_ciphers_pairwise;
u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES];
int n_akm_suites;
- u32 akm_suites[NL80211_MAX_NR_AKM_SUITES];
+ u32 akm_suites[CFG80211_MAX_NUM_AKM_SUITES];
bool control_port;
__be16 control_port_ethertype;
bool control_port_no_encrypt;
bool control_port_over_nl80211;
bool control_port_no_preauth;
- struct key_params *wep_keys;
- int wep_tx_key;
const u8 *psk;
const u8 *sae_pwd;
u8 sae_pwd_len;
+ enum nl80211_sae_pwe_mechanism sae_pwe;
+};
+
+/**
+ * struct cfg80211_mbssid_config - AP settings for multi bssid
+ *
+ * @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @index: index of this AP in the multi bssid group.
+ * @ema: set to true if the beacons should be sent out in EMA mode.
+ */
+struct cfg80211_mbssid_config {
+ struct wireless_dev *tx_wdev;
+ u8 index;
+ bool ema;
+};
+
+/**
+ * struct cfg80211_mbssid_elems - Multiple BSSID elements
+ *
+ * @cnt: Number of elements in array %elems.
+ *
+ * @elem: Array of multiple BSSID element(s) to be added into Beacon frames.
+ * @elem.data: Data for multiple BSSID elements.
+ * @elem.len: Length of data.
+ */
+struct cfg80211_mbssid_elems {
+ u8 cnt;
+ struct {
+ const u8 *data;
+ size_t len;
+ } elem[] __counted_by(cnt);
+};
+
+/**
+ * struct cfg80211_rnr_elems - Reduced neighbor report (RNR) elements
+ *
+ * @cnt: Number of elements in array %elems.
+ *
+ * @elem: Array of RNR element(s) to be added into Beacon frames.
+ * @elem.data: Data for RNR elements.
+ * @elem.len: Length of data.
+ */
+struct cfg80211_rnr_elems {
+ u8 cnt;
+ struct {
+ const u8 *data;
+ size_t len;
+ } elem[] __counted_by(cnt);
};
/**
* struct cfg80211_beacon_data - beacon data
+ * @link_id: the link ID for the AP MLD link sending this beacon
* @head: head portion of beacon (before TIM IE)
* or %NULL if not changed
* @tail: tail portion of beacon (after TIM IE)
@@ -1016,6 +1343,8 @@ struct cfg80211_crypto_settings {
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @mbssid_ies: multiple BSSID elements
+ * @rnr_ies: reduced neighbor report elements
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
* @lci: Measurement Report element content, starting with Measurement Token
@@ -1024,8 +1353,13 @@ struct cfg80211_crypto_settings {
* Token (measurement type 11)
* @lci_len: LCI data length
* @civicloc_len: Civic location data length
+ * @he_bss_color: BSS Color settings
+ * @he_bss_color_valid: indicates whether bss color
+ * attribute is present in beacon data or not.
*/
struct cfg80211_beacon_data {
+ unsigned int link_id;
+
const u8 *head, *tail;
const u8 *beacon_ies;
const u8 *proberesp_ies;
@@ -1033,6 +1367,8 @@ struct cfg80211_beacon_data {
const u8 *probe_resp;
const u8 *lci;
const u8 *civicloc;
+ struct cfg80211_mbssid_elems *mbssid_ies;
+ struct cfg80211_rnr_elems *rnr_ies;
s8 ftm_responder;
size_t head_len, tail_len;
@@ -1042,6 +1378,8 @@ struct cfg80211_beacon_data {
size_t probe_resp_len;
size_t lci_len;
size_t civicloc_len;
+ struct cfg80211_he_bss_color he_bss_color;
+ bool he_bss_color_valid;
};
struct mac_address {
@@ -1061,18 +1399,44 @@ struct cfg80211_acl_data {
int n_acl_entries;
/* Keep it last */
- struct mac_address mac_addrs[];
+ struct mac_address mac_addrs[] __counted_by(n_acl_entries);
};
/**
- * enum cfg80211_ap_settings_flags - AP settings flags
+ * struct cfg80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
*
- * Used by cfg80211_ap_settings
+ * @update: Set to true if the feature configuration should be updated.
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ * @tmpl_len: Template length
+ * @tmpl: Template data for FILS discovery frame including the action
+ * frame headers.
+ */
+struct cfg80211_fils_discovery {
+ bool update;
+ u32 min_interval;
+ u32 max_interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
+};
+
+/**
+ * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe
+ * response parameters in 6GHz.
*
- * @AP_SETTINGS_EXTERNAL_AUTH_SUPPORT: AP supports external authentication
+ * @update: Set to true if the feature configuration should be updated.
+ * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned
+ * in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive
+ * scanning
+ * @tmpl_len: Template length
+ * @tmpl: Template data for probe response
*/
-enum cfg80211_ap_settings_flags {
- AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = BIT(0),
+struct cfg80211_unsol_bcast_probe_resp {
+ bool update;
+ u32 interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
};
/**
@@ -1103,14 +1467,19 @@ enum cfg80211_ap_settings_flags {
* @ht_cap: HT capabilities (or %NULL if HT isn't enabled)
* @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled)
* @he_cap: HE capabilities (or %NULL if HE isn't enabled)
+ * @eht_cap: EHT capabilities (or %NULL if EHT isn't enabled)
+ * @eht_oper: EHT operation IE (or %NULL if EHT isn't enabled)
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
* @twt_responder: Enable Target Wait Time
* @he_required: stations must support HE
- * @flags: flags, as defined in enum cfg80211_ap_settings_flags
+ * @sae_h2e_required: stations must support direct H2E technique in SAE
+ * @flags: flags, as defined in &enum nl80211_ap_settings_flags
* @he_obss_pd: OBSS Packet Detection settings
- * @he_bss_color: BSS Color settings
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
+ * @fils_discovery: FILS discovery transmission parameters
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @mbssid_config: AP settings for multiple bssid
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1136,11 +1505,31 @@ struct cfg80211_ap_settings {
const struct ieee80211_vht_cap *vht_cap;
const struct ieee80211_he_cap_elem *he_cap;
const struct ieee80211_he_operation *he_oper;
- bool ht_required, vht_required, he_required;
+ const struct ieee80211_eht_cap_elem *eht_cap;
+ const struct ieee80211_eht_operation *eht_oper;
+ bool ht_required, vht_required, he_required, sae_h2e_required;
bool twt_responder;
u32 flags;
struct ieee80211_he_obss_pd he_obss_pd;
- struct cfg80211_he_bss_color he_bss_color;
+ struct cfg80211_fils_discovery fils_discovery;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_mbssid_config mbssid_config;
+};
+
+
+/**
+ * struct cfg80211_ap_update - AP configuration update
+ *
+ * Subset of &struct cfg80211_ap_settings, for updating a running AP.
+ *
+ * @beacon: beacon data
+ * @fils_discovery: FILS discovery transmission parameters
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ */
+struct cfg80211_ap_update {
+ struct cfg80211_beacon_data beacon;
+ struct cfg80211_fils_discovery fils_discovery;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
};
/**
@@ -1158,6 +1547,8 @@ struct cfg80211_ap_settings {
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
+ * @link_id: defines the link on which channel switch is expected during
+ * MLO. 0 in case of non-MLO.
*/
struct cfg80211_csa_settings {
struct cfg80211_chan_def chandef;
@@ -1170,9 +1561,29 @@ struct cfg80211_csa_settings {
bool radar_required;
bool block_tx;
u8 count;
+ u8 link_id;
};
-#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
+/**
+ * struct cfg80211_color_change_settings - color change settings
+ *
+ * Used for bss color change
+ *
+ * @beacon_color_change: beacon data while performing the color countdown
+ * @counter_offset_beacon: offsets of the counters within the beacon (tail)
+ * @counter_offset_presp: offsets of the counters within the probe response
+ * @beacon_next: beacon data to be used after the color change
+ * @count: number of beacons until the color change
+ * @color: the color used after the change
+ */
+struct cfg80211_color_change_settings {
+ struct cfg80211_beacon_data beacon_color_change;
+ u16 counter_offset_beacon;
+ u16 counter_offset_presp;
+ struct cfg80211_beacon_data beacon_next;
+ u8 count;
+ u8 color;
+};
/**
* struct iface_combination_params - input parameters for interface combinations
@@ -1211,7 +1622,6 @@ enum station_parameters_apply_mask {
STATION_PARAM_APPLY_UAPSD = BIT(0),
STATION_PARAM_APPLY_CAPABILITY = BIT(1),
STATION_PARAM_APPLY_PLINK_STATE = BIT(2),
- STATION_PARAM_APPLY_STA_TXPOWER = BIT(3),
};
/**
@@ -1235,14 +1645,81 @@ struct sta_txpwr {
};
/**
- * struct station_parameters - station parameters
+ * struct link_station_parameters - link station parameters
*
- * Used to change and create a new station.
+ * Used to change and create a new link station.
*
- * @vlan: vlan interface station should belong to
+ * @mld_mac: MAC address of the station
+ * @link_id: the link id (-1 for non-MLD station)
+ * @link_mac: MAC address of the link
* @supported_rates: supported rates in IEEE 802.11 format
* (or NULL for no change)
* @supported_rates_len: number of supported rates
+ * @ht_capa: HT capabilities of station
+ * @vht_capa: VHT capabilities of station
+ * @opmode_notif: operating mode field from Operating Mode Notification
+ * @opmode_notif_used: information if operating mode field is used
+ * @he_capa: HE capabilities of station
+ * @he_capa_len: the length of the HE capabilities
+ * @txpwr: transmit power for an associated station
+ * @txpwr_set: txpwr field is set
+ * @he_6ghz_capa: HE 6 GHz Band capabilities of station
+ * @eht_capa: EHT capabilities of station
+ * @eht_capa_len: the length of the EHT capabilities
+ */
+struct link_station_parameters {
+ const u8 *mld_mac;
+ int link_id;
+ const u8 *link_mac;
+ const u8 *supported_rates;
+ u8 supported_rates_len;
+ const struct ieee80211_ht_cap *ht_capa;
+ const struct ieee80211_vht_cap *vht_capa;
+ u8 opmode_notif;
+ bool opmode_notif_used;
+ const struct ieee80211_he_cap_elem *he_capa;
+ u8 he_capa_len;
+ struct sta_txpwr txpwr;
+ bool txpwr_set;
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+ const struct ieee80211_eht_cap_elem *eht_capa;
+ u8 eht_capa_len;
+};
+
+/**
+ * struct link_station_del_parameters - link station deletion parameters
+ *
+ * Used to delete a link station entry (or all stations).
+ *
+ * @mld_mac: MAC address of the station
+ * @link_id: the link id
+ */
+struct link_station_del_parameters {
+ const u8 *mld_mac;
+ u32 link_id;
+};
+
+/**
+ * struct cfg80211_ttlm_params: TID to link mapping parameters
+ *
+ * Used for setting a TID to link mapping.
+ *
+ * @dlink: Downlink TID to link mapping, as defined in section 9.4.2.314
+ * (TID-To-Link Mapping element) in Draft P802.11be_D4.0.
+ * @ulink: Uplink TID to link mapping, as defined in section 9.4.2.314
+ * (TID-To-Link Mapping element) in Draft P802.11be_D4.0.
+ */
+struct cfg80211_ttlm_params {
+ u16 dlink[8];
+ u16 ulink[8];
+};
+
+/**
+ * struct station_parameters - station parameters
+ *
+ * Used to change and create a new station.
+ *
+ * @vlan: vlan interface station should belong to
* @sta_flags_mask: station flags that changed
* (bitmask of BIT(%NL80211_STA_FLAG_...))
* @sta_flags_set: station flags values
@@ -1253,8 +1730,6 @@ struct sta_txpwr {
* @peer_aid: mesh peer AID or zero for no change
* @plink_action: plink action to take
* @plink_state: set the peer link state for a station
- * @ht_capa: HT capabilities of station
- * @vht_capa: VHT capabilities of station
* @uapsd_queues: bitmap of queues configured for uapsd. same format
* as the AC bitmap in the QoS info field
* @max_sp: max Service Period. same format as the MAX_SP in the
@@ -1271,17 +1746,11 @@ struct sta_txpwr {
* @supported_channels_len: number of supported channels
* @supported_oper_classes: supported oper classes in IEEE 802.11 format
* @supported_oper_classes_len: number of supported operating classes
- * @opmode_notif: operating mode field from Operating Mode Notification
- * @opmode_notif_used: information if operating mode field is used
* @support_p2p_ps: information if station supports P2P PS mechanism
- * @he_capa: HE capabilities of station
- * @he_capa_len: the length of the HE capabilities
* @airtime_weight: airtime scheduler weight for this station
- * @txpwr: transmit power for an associated station
- * @he_6ghz_capa: HE 6 GHz Band capabilities of station
+ * @link_sta_params: link related params.
*/
struct station_parameters {
- const u8 *supported_rates;
struct net_device *vlan;
u32 sta_flags_mask, sta_flags_set;
u32 sta_modify_mask;
@@ -1289,11 +1758,8 @@ struct station_parameters {
u16 aid;
u16 vlan_id;
u16 peer_aid;
- u8 supported_rates_len;
u8 plink_action;
u8 plink_state;
- const struct ieee80211_ht_cap *ht_capa;
- const struct ieee80211_vht_cap *vht_capa;
u8 uapsd_queues;
u8 max_sp;
enum nl80211_mesh_power_mode local_pm;
@@ -1304,14 +1770,9 @@ struct station_parameters {
u8 supported_channels_len;
const u8 *supported_oper_classes;
u8 supported_oper_classes_len;
- u8 opmode_notif;
- bool opmode_notif_used;
int support_p2p_ps;
- const struct ieee80211_he_cap_elem *he_capa;
- u8 he_capa_len;
u16 airtime_weight;
- struct sta_txpwr txpwr;
- const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+ struct link_station_parameters link_sta_params;
};
/**
@@ -1323,11 +1784,15 @@ struct station_parameters {
* @subtype: Management frame subtype to use for indicating removal
* (10 = Disassociation, 12 = Deauthentication)
* @reason_code: Reason code for the Disassociation/Deauthentication frame
+ * @link_id: Link ID indicating a link that stations to be flushed must be
+ * using; valid only for MLO, but can also be -1 for MLO to really
+ * remove all stations.
*/
struct station_del_parameters {
const u8 *mac;
u8 subtype;
u16 reason_code;
+ int link_id;
};
/**
@@ -1377,7 +1842,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
enum cfg80211_station_type statype);
/**
- * enum station_info_rate_flags - bitrate info flags
+ * enum rate_info_flags - bitrate info flags
*
* Used by the driver to indicate the specific rate transmission
* type for 802.11n transmissions.
@@ -1388,6 +1853,9 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @RATE_INFO_FLAGS_DMG: 60GHz MCS
* @RATE_INFO_FLAGS_HE_MCS: HE MCS information
* @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
+ * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS
+ * @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information
+ * @RATE_INFO_FLAGS_S1G_MCS: MCS field filled with S1G MCS
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
@@ -1396,6 +1864,9 @@ enum rate_info_flags {
RATE_INFO_FLAGS_DMG = BIT(3),
RATE_INFO_FLAGS_HE_MCS = BIT(4),
RATE_INFO_FLAGS_EDMG = BIT(5),
+ RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6),
+ RATE_INFO_FLAGS_EHT_MCS = BIT(7),
+ RATE_INFO_FLAGS_S1G_MCS = BIT(8),
};
/**
@@ -1410,6 +1881,13 @@ enum rate_info_flags {
* @RATE_INFO_BW_80: 80 MHz bandwidth
* @RATE_INFO_BW_160: 160 MHz bandwidth
* @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
+ * @RATE_INFO_BW_320: 320 MHz bandwidth
+ * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation
+ * @RATE_INFO_BW_1: 1 MHz bandwidth
+ * @RATE_INFO_BW_2: 2 MHz bandwidth
+ * @RATE_INFO_BW_4: 4 MHz bandwidth
+ * @RATE_INFO_BW_8: 8 MHz bandwidth
+ * @RATE_INFO_BW_16: 16 MHz bandwidth
*/
enum rate_info_bw {
RATE_INFO_BW_20 = 0,
@@ -1419,6 +1897,13 @@ enum rate_info_bw {
RATE_INFO_BW_80,
RATE_INFO_BW_160,
RATE_INFO_BW_HE_RU,
+ RATE_INFO_BW_320,
+ RATE_INFO_BW_EHT_RU,
+ RATE_INFO_BW_1,
+ RATE_INFO_BW_2,
+ RATE_INFO_BW_4,
+ RATE_INFO_BW_8,
+ RATE_INFO_BW_16,
};
/**
@@ -1427,8 +1912,8 @@ enum rate_info_bw {
* Information about a receiving or transmitting bitrate
*
* @flags: bitflag of flags from &enum rate_info_flags
- * @mcs: mcs index if struct describes an HT/VHT/HE rate
* @legacy: bitrate in 100kbit/s for 802.11abg
+ * @mcs: mcs index if struct describes an HT/VHT/HE/EHT/S1G rate
* @nss: number of streams (VHT & HE only)
* @bw: bandwidth (from &enum rate_info_bw)
* @he_gi: HE guard interval (from &enum nl80211_he_gi)
@@ -1436,21 +1921,26 @@ enum rate_info_bw {
* @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
* only valid if bw is %RATE_INFO_BW_HE_RU)
* @n_bonded_ch: In case of EDMG the number of bonded channels (1-4)
+ * @eht_gi: EHT guard interval (from &enum nl80211_eht_gi)
+ * @eht_ru_alloc: EHT RU allocation (from &enum nl80211_eht_ru_alloc,
+ * only valid if bw is %RATE_INFO_BW_EHT_RU)
*/
struct rate_info {
- u8 flags;
- u8 mcs;
+ u16 flags;
u16 legacy;
+ u8 mcs;
u8 nss;
u8 bw;
u8 he_gi;
u8 he_dcm;
u8 he_ru_alloc;
u8 n_bonded_ch;
+ u8 eht_gi;
+ u8 eht_ru_alloc;
};
/**
- * enum station_info_rate_flags - bitrate info flags
+ * enum bss_param_flags - bitrate info flags
*
* Used by the driver to indicate the specific rate transmission
* type for 802.11n transmissions.
@@ -1599,6 +2089,24 @@ struct cfg80211_tid_stats {
* received packet with an FCS error matches the peer MAC address.
* @airtime_link_metric: mesh airtime link metric.
* @connected_to_as: true if mesh STA has a path to authentication server
+ * @mlo_params_valid: Indicates @assoc_link_id and @mld_addr fields are filled
+ * by driver. Drivers use this only in cfg80211_new_sta() calls when AP
+ * MLD's MLME/SME is offload to driver. Drivers won't fill this
+ * information in cfg80211_del_sta_sinfo(), get_station() and
+ * dump_station() callbacks.
+ * @assoc_link_id: Indicates MLO link ID of the AP, with which the station
+ * completed (re)association. This information filled for both MLO
+ * and non-MLO STA connections when the AP affiliated with an MLD.
+ * @mld_addr: For MLO STA connection, filled with MLD address of the station.
+ * For non-MLO STA connection, filled with all zeros.
+ * @assoc_resp_ies: IEs from (Re)Association Response.
+ * This is used only when in AP mode with drivers that do not use user
+ * space MLME/SME implementation. The information is provided only for the
+ * cfg80211_new_sta() calls to notify user space of the IEs. Drivers won't
+ * fill this information in cfg80211_del_sta_sinfo(), get_station() and
+ * dump_station() callbacks. User space needs this information to determine
+ * the accepted and rejected affiliated links of the connected station.
+ * @assoc_resp_ies_len: Length of @assoc_resp_ies buffer in octets.
*/
struct station_info {
u64 filled;
@@ -1658,6 +2166,60 @@ struct station_info {
u32 airtime_link_metric;
u8 connected_to_as;
+
+ bool mlo_params_valid;
+ u8 assoc_link_id;
+ u8 mld_addr[ETH_ALEN] __aligned(2);
+ const u8 *assoc_resp_ies;
+ size_t assoc_resp_ies_len;
+};
+
+/**
+ * struct cfg80211_sar_sub_specs - sub specs limit
+ * @power: power limitation in 0.25dbm
+ * @freq_range_index: index the power limitation applies to
+ */
+struct cfg80211_sar_sub_specs {
+ s32 power;
+ u32 freq_range_index;
+};
+
+/**
+ * struct cfg80211_sar_specs - sar limit specs
+ * @type: it's set with power in 0.25dbm or other types
+ * @num_sub_specs: number of sar sub specs
+ * @sub_specs: memory to hold the sar sub specs
+ */
+struct cfg80211_sar_specs {
+ enum nl80211_sar_type type;
+ u32 num_sub_specs;
+ struct cfg80211_sar_sub_specs sub_specs[];
+};
+
+
+/**
+ * struct cfg80211_sar_freq_ranges - sar frequency ranges
+ * @start_freq: start range edge frequency
+ * @end_freq: end range edge frequency
+ */
+struct cfg80211_sar_freq_ranges {
+ u32 start_freq;
+ u32 end_freq;
+};
+
+/**
+ * struct cfg80211_sar_capa - sar limit capability
+ * @type: it's set via power in 0.25dbm or other types
+ * @num_freq_ranges: number of frequency ranges
+ * @freq_ranges: memory to hold the freq ranges.
+ *
+ * Note: WLAN driver may append new ranges or split an existing
+ * range to small ones and then append them.
+ */
+struct cfg80211_sar_capa {
+ enum nl80211_sar_type type;
+ u32 num_freq_ranges;
+ const struct cfg80211_sar_freq_ranges *freq_ranges;
};
#if IS_ENABLED(CONFIG_CFG80211)
@@ -1744,7 +2306,7 @@ enum mpath_info_flags {
* @sn: target sequence number
* @metric: metric (cost) of this mesh path
* @exptime: expiration time for the mesh path from now, in msecs
- * @flags: mesh path flags
+ * @flags: mesh path flags from &enum mesh_path_flags
* @discovery_timeout: total mesh path discovery timeout, in msecs
* @discovery_retries: mesh path discovery retries
* @generation: generation number for nl80211 dumps.
@@ -1774,6 +2336,7 @@ struct mpath_info {
*
* Used to change BSS parameters (mainly for AP mode).
*
+ * @link_id: link_id or -1 for non-MLD
* @use_cts_prot: Whether to use CTS protection
* (0 = no, 1 = yes, -1 = do not change)
* @use_short_preamble: Whether the use of short preambles is allowed
@@ -1784,12 +2347,14 @@ struct mpath_info {
* (or NULL for no change)
* @basic_rates_len: number of basic rates
* @ap_isolate: do not forward packets between connected stations
+ * (0 = no, 1 = yes, -1 = do not change)
* @ht_opmode: HT Operation mode
* (u16 = opmode, -1 = do not change)
* @p2p_ctwindow: P2P CT Window (-1 = no change)
* @p2p_opp_ps: P2P opportunistic PS (-1 = no change)
*/
struct bss_parameters {
+ int link_id;
int use_cts_prot;
int use_short_preamble;
int use_short_slot_time;
@@ -1869,6 +2434,9 @@ struct bss_parameters {
* @plink_timeout: If no tx activity is seen from a STA we've established
* peering with for longer than this time (in seconds), then remove it
* from the STA's list of peers. Default is 30 minutes.
+ * @dot11MeshConnectedToAuthServer: if set to true then this mesh STA
+ * will advertise that it is connected to a authentication server
+ * in the mesh formation field.
* @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is
* connected to a mesh gate in mesh formation info. If false, the
* value in mesh formation is determined by the presence of root paths
@@ -1929,7 +2497,7 @@ struct mesh_config {
* @user_mpm: userspace handles all MPM functions
* @dtim_period: DTIM period to use
* @beacon_interval: beacon interval to use
- * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
+ * @mcast_rate: multicast rate for Mesh Node [6Mbps is the default for 802.11a]
* @basic_rates: basic rates to use when creating the mesh
* @beacon_rate: bitrate to be used for beacons
* @userspace_handles_dfs: whether user space controls DFS operation, i.e.
@@ -1981,6 +2549,7 @@ struct ocb_setup {
* @cwmax: Maximum contention window [a value of the form 2^n-1 in the range
* 1..32767]
* @aifs: Arbitration interframe space [0..255]
+ * @link_id: link_id or -1 for non-MLD
*/
struct ieee80211_txq_params {
enum nl80211_ac ac;
@@ -1988,6 +2557,7 @@ struct ieee80211_txq_params {
u16 cwmin;
u16 cwmax;
u8 aifs;
+ int link_id;
};
/**
@@ -2039,13 +2609,35 @@ struct cfg80211_scan_info {
};
/**
+ * struct cfg80211_scan_6ghz_params - relevant for 6 GHz only
+ *
+ * @short_ssid: short ssid to scan for
+ * @bssid: bssid to scan for
+ * @channel_idx: idx of the channel in the channel array in the scan request
+ * which the above info is relevant to
+ * @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU
+ * @short_ssid_valid: @short_ssid is valid and can be used
+ * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait
+ * 20 TUs before starting to send probe requests.
+ * @psd_20: The AP's 20 MHz PSD value.
+ */
+struct cfg80211_scan_6ghz_params {
+ u32 short_ssid;
+ u32 channel_idx;
+ u8 bssid[ETH_ALEN];
+ bool unsolicited_probe;
+ bool short_ssid_valid;
+ bool psc_no_listen;
+ s8 psd_20;
+};
+
+/**
* struct cfg80211_scan_request - scan request description
*
* @ssids: SSIDs to scan for (active scan only)
* @n_ssids: number of SSIDs
* @channels: channels to scan on.
* @n_channels: total number of channels to scan
- * @scan_width: channel width for scanning
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
* @duration: how long to listen on each channel, in TUs. If
@@ -2053,7 +2645,7 @@ struct cfg80211_scan_info {
* the actual dwell time may be shorter.
* @duration_mandatory: if set, the scan duration must be as specified by the
* %duration field.
- * @flags: bit field of flags controlling operation
+ * @flags: control flags from &enum nl80211_scan_flags
* @rates: bitmap of rates to advertise for each band
* @wiphy: the wiphy this was for
* @scan_start: time (in jiffies) when the scan started
@@ -2065,13 +2657,18 @@ struct cfg80211_scan_info {
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @scan_6ghz: relevant for split scan request only,
+ * true if this is the second scan request
+ * @n_6ghz_params: number of 6 GHz params
+ * @scan_6ghz_params: 6 GHz params
* @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
+ * @tsf_report_link_id: for MLO, indicates the link ID of the BSS that should be
+ * used for TSF reporting. Can be set to -1 to indicate no preference.
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
- enum nl80211_bss_scan_width scan_width;
const u8 *ie;
size_t ie_len;
u16 duration;
@@ -2092,9 +2689,13 @@ struct cfg80211_scan_request {
struct cfg80211_scan_info info;
bool notified;
bool no_cck;
+ bool scan_6ghz;
+ u32 n_6ghz_params;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+ s8 tsf_report_link_id;
/* keep last */
- struct ieee80211_channel *channels[];
+ struct ieee80211_channel *channels[] __counted_by(n_channels);
};
static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
@@ -2116,19 +2717,11 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
* @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match
* or no match (RSSI only)
* @rssi_thold: don't report scan results below this threshold (in s32 dBm)
- * @per_band_rssi_thold: Minimum rssi threshold for each band to be applied
- * for filtering out scan results received. Drivers advertize this support
- * of band specific rssi based filtering through the feature capability
- * %NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band
- * specific rssi thresholds take precedence over rssi_thold, if specified.
- * If not specified for any band, it will be assigned with rssi_thold of
- * corresponding matchset.
*/
struct cfg80211_match_set {
struct cfg80211_ssid ssid;
u8 bssid[ETH_ALEN];
s32 rssi_thold;
- s32 per_band_rssi_thold[NUM_NL80211_BANDS];
};
/**
@@ -2163,14 +2756,13 @@ struct cfg80211_bss_select_adjust {
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
* @n_ssids: number of SSIDs
* @n_channels: total number of channels to scan
- * @scan_width: channel width for scanning
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
- * @flags: bit field of flags controlling operation
+ * @flags: control flags from &enum nl80211_scan_flags
* @match_sets: sets of parameters to be matched for a scan result
* entry to be considered valid and to be passed to the host
* (others are filtered out).
- * If ommited, all results are passed.
+ * If omitted, all results are passed.
* @n_match_sets: number of match sets
* @report_results: indicates that results were reported for this request
* @wiphy: the wiphy this was for
@@ -2204,14 +2796,13 @@ struct cfg80211_bss_select_adjust {
* to the specified band while deciding whether a better BSS is reported
* using @relative_rssi. If delta is a negative number, the BSSs that
* belong to the specified band will be penalized by delta dB in relative
- * comparisions.
+ * comparisons.
*/
struct cfg80211_sched_scan_request {
u64 reqid;
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
- enum nl80211_bss_scan_width scan_width;
const u8 *ie;
size_t ie_len;
u32 flags;
@@ -2259,7 +2850,6 @@ enum cfg80211_signal_type {
/**
* struct cfg80211_inform_bss - BSS inform data
* @chan: channel the frame was received on
- * @scan_width: scan width that was used
* @signal: signal strength value, according to the wiphy's
* signal type
* @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was
@@ -2275,16 +2865,28 @@ enum cfg80211_signal_type {
* the BSS that requested the scan in which the beacon/probe was received.
* @chains: bitmask for filled values in @chain_signal.
* @chain_signal: per-chain signal strength of last received BSS in dBm.
+ * @restrict_use: restrict usage, if not set, assume @use_for is
+ * %NL80211_BSS_USE_FOR_NORMAL.
+ * @use_for: bitmap of possible usage for this BSS, see
+ * &enum nl80211_bss_use_for
+ * @cannot_use_reasons: the reasons (bitmap) for not being able to connect,
+ * if @restrict_use is set and @use_for is zero (empty); may be 0 for
+ * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons
+ * @drv_data: Data to be passed through to @inform_bss
*/
struct cfg80211_inform_bss {
struct ieee80211_channel *chan;
- enum nl80211_bss_scan_width scan_width;
s32 signal;
u64 boottime_ns;
u64 parent_tsf;
u8 parent_bssid[ETH_ALEN] __aligned(2);
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+
+ u8 restrict_use:1, use_for:7;
+ u8 cannot_use_reasons;
+
+ void *drv_data;
};
/**
@@ -2310,7 +2912,6 @@ struct cfg80211_bss_ies {
* for use in scan results and similar.
*
* @channel: channel this BSS is on
- * @scan_width: width of the control channel
* @bssid: BSSID of the BSS
* @beacon_interval: the beacon interval as from the frame
* @capability: the capability field in host byte order
@@ -2323,6 +2924,8 @@ struct cfg80211_bss_ies {
* own the beacon_ies, but they're just pointers to the ones from the
* @hidden_beacon_bss struct)
* @proberesp_ies: the information elements from the last Probe Response frame
+ * @proberesp_ecsa_stuck: ECSA element is stuck in the Probe Response frame,
+ * cannot rely on it having valid data
* @hidden_beacon_bss: in case this BSS struct represents a probe response from
* a BSS that hides the SSID in its beacon, this points to the BSS struct
* that holds the beacon data. @beacon_ies is still valid, of course, and
@@ -2336,11 +2939,15 @@ struct cfg80211_bss_ies {
* @chain_signal: per-chain signal strength of last received BSS in dBm.
* @bssid_index: index in the multiple BSS set
* @max_bssid_indicator: max number of members in the BSS set
+ * @use_for: bitmap of possible usage for this BSS, see
+ * &enum nl80211_bss_use_for
+ * @cannot_use_reasons: the reasons (bitmap) for not being able to connect,
+ * if @restrict_use is set and @use_for is zero (empty); may be 0 for
+ * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
struct ieee80211_channel *channel;
- enum nl80211_bss_scan_width scan_width;
const struct cfg80211_bss_ies __rcu *ies;
const struct cfg80211_bss_ies __rcu *beacon_ies;
@@ -2359,9 +2966,14 @@ struct cfg80211_bss {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+ u8 proberesp_ecsa_stuck:1;
+
u8 bssid_index;
u8 max_bssid_indicator;
+ u8 use_for;
+ u8 cannot_use_reasons;
+
u8 priv[] __aligned(sizeof(void *));
};
@@ -2387,7 +2999,7 @@ const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id);
*/
static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
{
- return (void *)ieee80211_bss_get_elem(bss, id);
+ return (const void *)ieee80211_bss_get_elem(bss, id);
}
@@ -2410,6 +3022,12 @@ static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
* Authentication algorithm number, i.e., starting at the Authentication
* transaction sequence number field.
* @auth_data_len: Length of auth_data buffer in octets
+ * @link_id: if >= 0, indicates authentication should be done as an MLD,
+ * the interface address is included as the MLD address and the
+ * necessary link (with the given link_id) will be created (and
+ * given an MLD address) by the driver
+ * @ap_mld_addr: AP MLD address in case of authentication request with
+ * an AP MLD, valid iff @link_id >= 0
*/
struct cfg80211_auth_request {
struct cfg80211_bss *bss;
@@ -2417,9 +3035,31 @@ struct cfg80211_auth_request {
size_t ie_len;
enum nl80211_auth_type auth_type;
const u8 *key;
- u8 key_len, key_idx;
+ u8 key_len;
+ s8 key_idx;
const u8 *auth_data;
size_t auth_data_len;
+ s8 link_id;
+ const u8 *ap_mld_addr;
+};
+
+/**
+ * struct cfg80211_assoc_link - per-link information for MLO association
+ * @bss: the BSS pointer, see also &struct cfg80211_assoc_request::bss;
+ * if this is %NULL for a link, that link is not requested
+ * @elems: extra elements for the per-STA profile for this link
+ * @elems_len: length of the elements
+ * @disabled: If set this link should be included during association etc. but it
+ * should not be used until enabled by the AP MLD.
+ * @error: per-link error code, must be <= 0. If there is an error, then the
+ * operation as a whole must fail.
+ */
+struct cfg80211_assoc_link {
+ struct cfg80211_bss *bss;
+ const u8 *elems;
+ size_t elems_len;
+ bool disabled;
+ int error;
};
/**
@@ -2432,12 +3072,22 @@ struct cfg80211_auth_request {
* authentication capability. Drivers can offload authentication to
* userspace if this flag is set. Only applicable for cfg80211_connect()
* request (connect callback).
+ * @ASSOC_REQ_DISABLE_HE: Disable HE
+ * @ASSOC_REQ_DISABLE_EHT: Disable EHT
+ * @CONNECT_REQ_MLO_SUPPORT: Userspace indicates support for handling MLD links.
+ * Drivers shall disable MLO features for the current association if this
+ * flag is not set.
+ * @ASSOC_REQ_SPP_AMSDU: SPP A-MSDUs will be used on this connection (if any)
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
ASSOC_REQ_DISABLE_VHT = BIT(1),
ASSOC_REQ_USE_RRM = BIT(2),
CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3),
+ ASSOC_REQ_DISABLE_HE = BIT(4),
+ ASSOC_REQ_DISABLE_EHT = BIT(5),
+ CONNECT_REQ_MLO_SUPPORT = BIT(6),
+ ASSOC_REQ_SPP_AMSDU = BIT(7),
};
/**
@@ -2449,6 +3099,8 @@ enum cfg80211_assoc_req_flags {
* given a reference that it must give back to cfg80211_send_rx_assoc()
* or to cfg80211_assoc_timeout(). To ensure proper refcounting, new
* association requests while already associating must be rejected.
+ * This also applies to the @links.bss parameter, which is used instead
+ * of this one (it is %NULL) for MLO associations.
* @ie: Extra IEs to add to (Re)Association Request frame or %NULL
* @ie_len: Length of ie buffer in octets
* @use_mfp: Use management frame protection (IEEE 802.11w) in this association
@@ -2471,6 +3123,13 @@ enum cfg80211_assoc_req_flags {
* @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
* Request/Response frame or %NULL if FILS is not used. This field starts
* with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
+ * @s1g_capa: S1G capability override
+ * @s1g_capa_mask: S1G capability override mask
+ * @links: per-link information for MLO connections
+ * @link_id: >= 0 for MLO connections, where links are given, and indicates
+ * the link on which the association request should be sent
+ * @ap_mld_addr: AP MLD address in case of MLO association request,
+ * valid iff @link_id >= 0
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -2485,6 +3144,10 @@ struct cfg80211_assoc_request {
const u8 *fils_kek;
size_t fils_kek_len;
const u8 *fils_nonces;
+ struct ieee80211_s1g_cap s1g_capa, s1g_capa_mask;
+ struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS];
+ const u8 *ap_mld_addr;
+ s8 link_id;
};
/**
@@ -2493,7 +3156,7 @@ struct cfg80211_assoc_request {
* This structure provides information needed to complete IEEE 802.11
* deauthentication.
*
- * @bssid: the BSSID of the BSS to deauthenticate from
+ * @bssid: the BSSID or AP MLD address to deauthenticate from
* @ie: Extra IEs to add to Deauthentication frame or %NULL
* @ie_len: Length of ie buffer in octets
* @reason_code: The reason code for the deauthentication
@@ -2514,7 +3177,7 @@ struct cfg80211_deauth_request {
* This structure provides information needed to complete IEEE 802.11
* disassociation.
*
- * @bss: the BSS to disassociate from
+ * @ap_addr: the BSSID or AP MLD address to disassociate from
* @ie: Extra IEs to add to Disassociation frame or %NULL
* @ie_len: Length of ie buffer in octets
* @reason_code: The reason code for the disassociation
@@ -2522,7 +3185,7 @@ struct cfg80211_deauth_request {
* Disassociation frame is to be transmitted.
*/
struct cfg80211_disassoc_request {
- struct cfg80211_bss *bss;
+ const u8 *ap_addr;
const u8 *ie;
size_t ie_len;
u16 reason_code;
@@ -2590,8 +3253,8 @@ struct cfg80211_ibss_params {
*
* @behaviour: requested BSS selection behaviour.
* @param: parameters for requestion behaviour.
- * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
- * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
+ * @param.band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
+ * @param.adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
*/
struct cfg80211_bss_selection {
enum nl80211_bss_select_attr behaviour;
@@ -2949,12 +3612,15 @@ struct cfg80211_wowlan_nd_info {
* @tcp_connlost: TCP connection lost or failed to establish
* @tcp_nomoretokens: TCP data ran out of tokens
* @net_detect: if not %NULL, woke up because of net detect
+ * @unprot_deauth_disassoc: woke up due to unprotected deauth or
+ * disassoc frame (in MFP).
*/
struct cfg80211_wowlan_wakeup {
bool disconnect, magic_pkt, gtk_rekey_failure,
eap_identity_req, four_way_handshake,
rfkill_release, packet_80211,
- tcp_match, tcp_connlost, tcp_nomoretokens;
+ tcp_match, tcp_connlost, tcp_nomoretokens,
+ unprot_deauth_disassoc;
s32 pattern_idx;
u32 packet_present_len, packet_len;
const void *packet;
@@ -2967,7 +3633,7 @@ struct cfg80211_wowlan_wakeup {
* @kck: key confirmation key (@kck_len bytes)
* @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes)
* @kek_len: length of kek
- * @kck_len length of kck
+ * @kck_len: length of kck
* @akm: akm (oui, id)
*/
struct cfg80211_gtk_rekey_data {
@@ -2997,7 +3663,7 @@ struct cfg80211_update_ft_ies_params {
* This structure provides information needed to transmit a mgmt frame
*
* @chan: channel to use
- * @offchan: indicates wether off channel operation is required
+ * @offchan: indicates whether off channel operation is required
* @wait: duration for ROC
* @buf: buffer to transmit
* @len: buffer length
@@ -3005,6 +3671,9 @@ struct cfg80211_update_ft_ies_params {
* @dont_wait_for_ack: tells the low level not to wait for an ack
* @n_csa_offsets: length of csa_offsets array
* @csa_offsets: array of all the csa offsets in the frame
+ * @link_id: for MLO, the link ID to transmit on, -1 if not given; note
+ * that the link ID isn't validated (much), it's in range but the
+ * link might not exist (or be used by the receiver STA)
*/
struct cfg80211_mgmt_tx_params {
struct ieee80211_channel *chan;
@@ -3016,6 +3685,7 @@ struct cfg80211_mgmt_tx_params {
bool dont_wait_for_ack;
int n_csa_offsets;
const u16 *csa_offsets;
+ int link_id;
};
/**
@@ -3111,7 +3781,7 @@ struct cfg80211_nan_func_filter {
* @publish_bcast: if true, the solicited publish should be broadcasted
* @subscribe_active: if true, the subscribe is active
* @followup_id: the instance ID for follow up
- * @followup_reqid: the requestor instance ID for follow up
+ * @followup_reqid: the requester instance ID for follow up
* @followup_dest: MAC address of the recipient of the follow up
* @ttl: time to live counter in DW.
* @serv_spec_info: Service Specific Info
@@ -3192,6 +3862,17 @@ struct cfg80211_pmk_conf {
* the real status code for failures. Used only for the authentication
* response command interface (user space to driver).
* @pmkid: The identifier to refer a PMKSA.
+ * @mld_addr: MLD address of the peer. Used by the authentication request event
+ * interface. Driver indicates this to enable MLO during the authentication
+ * offload to user space. Driver shall look at %NL80211_ATTR_MLO_SUPPORT
+ * flag capability in NL80211_CMD_CONNECT to know whether the user space
+ * supports enabling MLO during the authentication offload.
+ * User space should use the address of the interface (on which the
+ * authentication request event reported) as self MLD address. User space
+ * and driver should use MLD addresses in RA, TA and BSSID fields of
+ * authentication frames sent or received via cfg80211. The driver
+ * translates the MLD addresses to/from link addresses based on the link
+ * chosen for the authentication.
*/
struct cfg80211_external_auth_params {
enum nl80211_external_auth_action action;
@@ -3200,6 +3881,7 @@ struct cfg80211_external_auth_params {
unsigned int key_mgmt_suite;
u16 status;
const u8 *pmkid;
+ u8 mld_addr[ETH_ALEN] __aligned(2);
};
/**
@@ -3329,6 +4011,7 @@ struct cfg80211_pmsr_ftm_result {
* @type: type of the measurement reported, note that we only support reporting
* one type at a time, but you can report multiple results separately and
* they're all aggregated for userspace.
+ * @ftm: FTM result
*/
struct cfg80211_pmsr_result {
u64 host_time, ap_tsf;
@@ -3364,6 +4047,11 @@ struct cfg80211_pmsr_result {
* @non_trigger_based: use non trigger based ranging for the measurement
* If neither @trigger_based nor @non_trigger_based is set,
* EDCA based ranging will be used.
+ * @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either
+ * @trigger_based or @non_trigger_based is set.
+ * @bss_color: the bss color of the responder. Optional. Set to zero to
+ * indicate the driver should set the BSS color. Only valid if
+ * @non_trigger_based or @trigger_based is set.
*
* See also nl80211 for the respective attribute documentation.
*/
@@ -3375,11 +4063,13 @@ struct cfg80211_pmsr_ftm_request_peer {
request_lci:1,
request_civicloc:1,
trigger_based:1,
- non_trigger_based:1;
+ non_trigger_based:1,
+ lmr_feedback:1;
u8 num_bursts_exp;
u8 burst_duration;
u8 ftms_per_burst;
u8 ftmr_retries;
+ u8 bss_color;
};
/**
@@ -3425,7 +4115,7 @@ struct cfg80211_pmsr_request {
struct list_head list;
- struct cfg80211_pmsr_request_peer peers[];
+ struct cfg80211_pmsr_request_peer peers[] __counted_by(n_peers);
};
/**
@@ -3446,12 +4136,22 @@ struct cfg80211_pmsr_request {
* the IEs of the remote peer in the event from the host driver and
* the constructed IEs by the user space in the request interface.
* @ie_len: Length of IEs in octets.
+ * @assoc_link_id: MLO link ID of the AP, with which (re)association requested
+ * by peer. This will be filled by driver for both MLO and non-MLO station
+ * connections when the AP affiliated with an MLD. For non-MLD AP mode, it
+ * will be -1. Used only with OWE update event (driver to user space).
+ * @peer_mld_addr: For MLO connection, MLD address of the peer. For non-MLO
+ * connection, it will be all zeros. This is applicable only when
+ * @assoc_link_id is not -1, i.e., the AP affiliated with an MLD. Used only
+ * with OWE update event (driver to user space).
*/
struct cfg80211_update_owe_info {
u8 peer[ETH_ALEN] __aligned(2);
u16 status;
const u8 *ie;
size_t ie_len;
+ int assoc_link_id;
+ u8 peer_mld_addr[ETH_ALEN] __aligned(2);
};
/**
@@ -3460,7 +4160,7 @@ struct cfg80211_update_owe_info {
* for the entire device
* @interface_stypes: bitmap of management frame subtypes registered
* for the given interface
- * @global_mcast_rx: mcast RX is needed globally for these subtypes
+ * @global_mcast_stypes: mcast RX is needed globally for these subtypes
* @interface_mcast_stypes: mcast RX is needed on this interface
* for these subtypes
*/
@@ -3478,9 +4178,10 @@ struct mgmt_frame_regs {
* All callbacks except where otherwise noted should return 0
* on success or a negative error code.
*
- * All operations are currently invoked under rtnl for consistency with the
- * wireless extensions but this is subject to reevaluation as soon as this
- * code is used more widely and we have a first user without wext.
+ * All operations are invoked with the wiphy mutex held. The RTNL may be
+ * held in addition (due to wireless extensions) but this cannot be relied
+ * upon except in cases where documented below. Note that due to ordering,
+ * the RTNL also cannot be acquired in any handlers.
*
* @suspend: wiphy device needs to be suspended. The variable @wow will
* be %NULL or contain the enabled Wake-on-Wireless triggers that are
@@ -3495,29 +4196,48 @@ struct mgmt_frame_regs {
* the new netdev in the wiphy's network namespace! Returns the struct
* wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
* also set the address member in the wdev.
+ * This additionally holds the RTNL to be able to do netdev changes.
*
* @del_virtual_intf: remove the virtual interface
+ * This additionally holds the RTNL to be able to do netdev changes.
*
* @change_virtual_intf: change type/configuration of virtual interface,
* keep the struct wireless_dev's iftype updated.
+ * This additionally holds the RTNL to be able to do netdev changes.
+ *
+ * @add_intf_link: Add a new MLO link to the given interface. Note that
+ * the wdev->link[] data structure has been updated, so the new link
+ * address is available.
+ * @del_intf_link: Remove an MLO link from the given interface.
*
* @add_key: add a key with the given parameters. @mac_addr will be %NULL
- * when adding a group key.
+ * when adding a group key. @link_id will be -1 for non-MLO connection.
+ * For MLO connection, @link_id will be >= 0 for group key and -1 for
+ * pairwise key, @mac_addr will be peer's MLD address for MLO pairwise key.
*
* @get_key: get information about the key with the given parameters.
* @mac_addr will be %NULL when requesting information for a group
* key. All pointers given to the @callback function need not be valid
* after it returns. This function should return an error if it is
* not possible to retrieve the key, -ENOENT if it doesn't exist.
+ * @link_id will be -1 for non-MLO connection. For MLO connection,
+ * @link_id will be >= 0 for group key and -1 for pairwise key, @mac_addr
+ * will be peer's MLD address for MLO pairwise key.
*
* @del_key: remove a key given the @mac_addr (%NULL for a group key)
- * and @key_index, return -ENOENT if the key doesn't exist.
+ * and @key_index, return -ENOENT if the key doesn't exist. @link_id will
+ * be -1 for non-MLO connection. For MLO connection, @link_id will be >= 0
+ * for group key and -1 for pairwise key, @mac_addr will be peer's MLD
+ * address for MLO pairwise key.
*
- * @set_default_key: set the default key on an interface
+ * @set_default_key: set the default key on an interface. @link_id will be >= 0
+ * for MLO connection and -1 for non-MLO connection.
*
- * @set_default_mgmt_key: set the default management frame key on an interface
+ * @set_default_mgmt_key: set the default management frame key on an interface.
+ * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
*
- * @set_default_beacon_key: set the default Beacon frame key on an interface
+ * @set_default_beacon_key: set the default Beacon frame key on an interface.
+ * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
*
* @set_rekey_data: give the data necessary for GTK rekeying to the driver
*
@@ -3556,6 +4276,13 @@ struct mgmt_frame_regs {
*
* @change_bss: Modify parameters for a given BSS.
*
+ * @inform_bss: Called by cfg80211 while being informed about new BSS data
+ * for every BSS found within the reported data or frame. This is called
+ * from within the cfg8011 inform_bss handlers while holding the bss_lock.
+ * The data parameter is passed through from drv_data inside
+ * struct cfg80211_inform_bss.
+ * The new IE data for the BSS is explicitly passed.
+ *
* @set_txq_params: Set TX queue parameters
*
* @libertas_set_mesh_channel: Only for backward compatibility for libertas,
@@ -3637,8 +4364,6 @@ struct mgmt_frame_regs {
* @get_tx_power: store the current TX power into the dbm variable;
* return 0 if successful
*
- * @set_wds_peer: set the WDS peer for a WDS interface
- *
* @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting
* functions to adjust rfkill hw state
*
@@ -3822,6 +4547,29 @@ struct mgmt_frame_regs {
* This callback may sleep.
* @reset_tid_config: Reset TID specific configuration for the peer, for the
* given TIDs. This callback may sleep.
+ *
+ * @set_sar_specs: Update the SAR (TX power) settings.
+ *
+ * @color_change: Initiate a color change.
+ *
+ * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use
+ * those to decrypt (Re)Association Request and encrypt (Re)Association
+ * Response frame.
+ *
+ * @set_radar_background: Configure dedicated offchannel chain available for
+ * radar/CAC detection on some hw. This chain can't be used to transmit
+ * or receive frames and it is bounded to a running wdev.
+ * Background radar/CAC detection allows to avoid the CAC downtime
+ * switching to a different channel during CAC detection on the selected
+ * radar channel.
+ * The caller is expected to set chandef pointer to NULL in order to
+ * disable background CAC/radar detection.
+ * @add_link_station: Add a link to a station.
+ * @mod_link_station: Modify a link of a station.
+ * @del_link_station: Remove a link of a station.
+ *
+ * @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames.
+ * @set_ttlm: set the TID to link mapping.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3840,30 +4588,40 @@ struct cfg80211_ops {
enum nl80211_iftype type,
struct vif_params *params);
+ int (*add_intf_link)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int link_id);
+ void (*del_intf_link)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int link_id);
+
int (*add_key)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params);
int (*get_key)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- void *cookie,
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie, struct key_params*));
int (*del_key)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr);
int (*set_default_key)(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct net_device *netdev, int link_id,
u8 key_index, bool unicast, bool multicast);
int (*set_default_mgmt_key)(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct net_device *netdev, int link_id,
u8 key_index);
int (*set_default_beacon_key)(struct wiphy *wiphy,
struct net_device *netdev,
+ int link_id,
u8 key_index);
int (*start_ap)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings);
int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_beacon_data *info);
- int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev);
+ struct cfg80211_ap_update *info);
+ int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id);
int (*add_station)(struct wiphy *wiphy, struct net_device *dev,
@@ -3913,6 +4671,9 @@ struct cfg80211_ops {
int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
struct bss_parameters *params);
+ void (*inform_bss)(struct wiphy *wiphy, struct cfg80211_bss *bss,
+ const struct cfg80211_bss_ies *ies, void *data);
+
int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_txq_params *params);
@@ -3959,9 +4720,6 @@ struct cfg80211_ops {
int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
int *dbm);
- int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev,
- const u8 *addr);
-
void (*rfkill_poll)(struct wiphy *wiphy);
#ifdef CONFIG_NL80211_TESTMODE
@@ -3974,6 +4732,7 @@ struct cfg80211_ops {
int (*set_bitrate_mask)(struct wiphy *wiphy,
struct net_device *dev,
+ unsigned int link_id,
const u8 *peer,
const struct cfg80211_bitrate_mask *mask);
@@ -4034,9 +4793,10 @@ struct cfg80211_ops {
struct cfg80211_gtk_rekey_data *data);
int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
- const u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, u32 peer_capability,
- bool initiator, const u8 *buf, size_t len);
+ const u8 *peer, int link_id,
+ u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, bool initiator,
+ const u8 *buf, size_t len);
int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper);
@@ -4049,6 +4809,7 @@ struct cfg80211_ops {
int (*get_channel)(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef);
int (*start_p2p_device)(struct wiphy *wiphy,
@@ -4085,6 +4846,7 @@ struct cfg80211_ops {
struct cfg80211_qos_map *qos_map);
int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef);
int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
@@ -4131,7 +4893,7 @@ struct cfg80211_ops {
struct net_device *dev,
const u8 *buf, size_t len,
const u8 *dest, const __be16 proto,
- const bool noencrypt,
+ const bool noencrypt, int link_id,
u64 *cookie);
int (*get_ftm_responder_stats)(struct wiphy *wiphy,
@@ -4150,6 +4912,25 @@ struct cfg80211_ops {
struct cfg80211_tid_config *tid_conf);
int (*reset_tid_config)(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, u8 tids);
+ int (*set_sar_specs)(struct wiphy *wiphy,
+ struct cfg80211_sar_specs *sar);
+ int (*color_change)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_color_change_settings *params);
+ int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_fils_aad *fils_aad);
+ int (*set_radar_background)(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef);
+ int (*add_link_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_parameters *params);
+ int (*mod_link_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_parameters *params);
+ int (*del_link_station)(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_del_parameters *params);
+ int (*set_hw_timestamp)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_set_hw_timestamp *hwts);
+ int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ttlm_params *params);
};
/*
@@ -4160,6 +4941,8 @@ struct cfg80211_ops {
/**
* enum wiphy_flags - wiphy capability flags
*
+ * @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split
+ * into two, first for legacy bands and second for 6 GHz.
* @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
* wiphy at all
* @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
@@ -4196,14 +4979,23 @@ struct cfg80211_ops {
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
* @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
* beaconing mode (AP, IBSS, Mesh, ...).
- * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
- * before connection.
* @WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK: The device supports bigger kek and kck keys
+ * @WIPHY_FLAG_SUPPORTS_MLO: This is a temporary flag gating the MLO APIs,
+ * in order to not have them reachable in normal drivers, until we have
+ * complete feature/interface combinations/etc. advertisement. No driver
+ * should set this flag for now.
+ * @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys.
+ * @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for
+ * NL80211_REGDOM_SET_BY_DRIVER.
+ * @WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON: reg_call_notifier() is called if driver
+ * set this flag to update channels on beacon hints.
+ * @WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY: support connection to non-primary link
+ * of an NSTR mobile AP MLD.
*/
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
- /* use hole at 1 */
- /* use hole at 2 */
+ WIPHY_FLAG_SUPPORTS_MLO = BIT(1),
+ WIPHY_FLAG_SPLIT_SCAN_6GHZ = BIT(2),
WIPHY_FLAG_NETNS_OK = BIT(3),
WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
WIPHY_FLAG_4ADDR_AP = BIT(5),
@@ -4211,8 +5003,8 @@ enum wiphy_flags {
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
WIPHY_FLAG_IBSS_RSN = BIT(8),
WIPHY_FLAG_MESH_AUTH = BIT(10),
- /* use hole at 11 */
- /* use hole at 12 */
+ WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
+ WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = BIT(12),
WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
WIPHY_FLAG_AP_UAPSD = BIT(14),
WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
@@ -4224,7 +5016,8 @@ enum wiphy_flags {
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
- WIPHY_FLAG_HAS_STATIC_WEP = BIT(24),
+ WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = BIT(24),
+ WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON = BIT(25),
};
/**
@@ -4251,7 +5044,7 @@ struct ieee80211_iface_limit {
*
* struct ieee80211_iface_limit limits1[] = {
* { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
- * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, },
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_AP), },
* };
* struct ieee80211_iface_combination combination1 = {
* .limits = limits1,
@@ -4523,19 +5316,32 @@ struct wiphy_vendor_command {
* 802.11-2012 8.4.2.29 for the defined fields.
* @extended_capabilities_mask: mask of the valid values
* @extended_capabilities_len: length of the extended capabilities
+ * @eml_capabilities: EML capabilities (for MLO)
+ * @mld_capa_and_ops: MLD capabilities and operations (for MLO)
*/
struct wiphy_iftype_ext_capab {
enum nl80211_iftype iftype;
const u8 *extended_capabilities;
const u8 *extended_capabilities_mask;
u8 extended_capabilities_len;
+ u16 eml_capabilities;
+ u16 mld_capa_and_ops;
};
/**
+ * cfg80211_get_iftype_ext_capa - lookup interface type extended capability
+ * @wiphy: the wiphy to look up from
+ * @type: the interface type to look up
+ */
+const struct wiphy_iftype_ext_capab *
+cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type);
+
+/**
* struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities
* @max_peers: maximum number of peers in a single measurement
* @report_ap_tsf: can report assoc AP's TSF for radio resource measurement
* @randomize_mac_addr: can randomize MAC address for measurement
+ * @ftm: FTM measurement data
* @ftm.supported: FTM measurement is supported
* @ftm.asap: ASAP-mode is supported
* @ftm.non_asap: non-ASAP-mode is supported
@@ -4586,8 +5392,11 @@ struct wiphy_iftype_akm_suites {
int n_akm_suites;
};
+#define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff
+
/**
* struct wiphy - wireless hardware description
+ * @mtx: mutex for the data (structures) of this device
* @reg_notifier: the driver's regulatory notification callback,
* note that if your driver uses wiphy_apply_custom_regulatory()
* the reg_notifier's request can be passed as NULL
@@ -4778,8 +5587,33 @@ struct wiphy_iftype_akm_suites {
* @max_data_retry_count: maximum supported per TID retry count for
* configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
+ * @sar_capa: SAR control capabilities
+ * @rfkill: a pointer to the rfkill structure
+ *
+ * @mbssid_max_interfaces: maximum number of interfaces supported by the driver
+ * in a multiple BSSID set. This field must be set to a non-zero value
+ * by the driver to advertise MBSSID support.
+ * @ema_max_profile_periodicity: maximum profile periodicity supported by
+ * the driver. Setting this field to a non-zero value indicates that the
+ * driver supports enhanced multi-BSSID advertisements (EMA AP).
+ * @max_num_akm_suites: maximum number of AKM suites allowed for
+ * configuration through %NL80211_CMD_CONNECT, %NL80211_CMD_ASSOCIATE and
+ * %NL80211_CMD_START_AP. Set to NL80211_MAX_NR_AKM_SUITES if not set by
+ * driver. If set by driver minimum allowed value is
+ * NL80211_MAX_NR_AKM_SUITES in order to avoid compatibility issues with
+ * legacy userspace and maximum allowed value is
+ * CFG80211_MAX_NUM_AKM_SUITES.
+ *
+ * @hw_timestamp_max_peers: maximum number of peers that the driver supports
+ * enabling HW timestamping for concurrently. Setting this field to a
+ * non-zero value indicates that the driver supports HW timestamping.
+ * A value of %CFG80211_HW_TIMESTAMP_ALL_PEERS indicates the driver
+ * supports enabling HW timestamping for all peers (i.e. no need to
+ * specify a mac address).
*/
struct wiphy {
+ struct mutex mtx;
+
/* assign these fields before you register the wiphy */
u8 perm_addr[ETH_ALEN];
@@ -4916,6 +5750,16 @@ struct wiphy {
u8 max_data_retry_count;
+ const struct cfg80211_sar_capa *sar_capa;
+
+ struct rfkill *rfkill;
+
+ u8 mbssid_max_interfaces;
+ u8 ema_max_profile_periodicity;
+ u16 max_num_akm_suites;
+
+ u16 hw_timestamp_max_peers;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -5030,6 +5874,37 @@ static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops,
*/
int wiphy_register(struct wiphy *wiphy);
+/* this is a define for better error reporting (file/line) */
+#define lockdep_assert_wiphy(wiphy) lockdep_assert_held(&(wiphy)->mtx)
+
+/**
+ * rcu_dereference_wiphy - rcu_dereference with debug checking
+ * @wiphy: the wiphy to check the locking on
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or RTNL. Note: Please prefer wiphy_dereference() or rcu_dereference().
+ */
+#define rcu_dereference_wiphy(wiphy, p) \
+ rcu_dereference_check(p, lockdep_is_held(&wiphy->mtx))
+
+/**
+ * wiphy_dereference - fetch RCU pointer when updates are prevented by wiphy mtx
+ * @wiphy: the wiphy to check the locking on
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit the
+ * READ_ONCE(), because caller holds the wiphy mutex used for updates.
+ */
+#define wiphy_dereference(wiphy, p) \
+ rcu_dereference_protected(p, lockdep_is_held(&wiphy->mtx))
+
+/**
+ * get_wiphy_regdom - get custom regdomain for the given wiphy
+ * @wiphy: the wiphy to get the regdomain from
+ */
+const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy);
+
/**
* wiphy_unregister - deregister a wiphy from cfg80211
*
@@ -5055,13 +5930,153 @@ struct cfg80211_cached_keys;
struct cfg80211_cqm_config;
/**
+ * wiphy_lock - lock the wiphy
+ * @wiphy: the wiphy to lock
+ *
+ * This is needed around registering and unregistering netdevs that
+ * aren't created through cfg80211 calls, since that requires locking
+ * in cfg80211 when the notifiers is called, but that cannot
+ * differentiate which way it's called.
+ *
+ * It can also be used by drivers for their own purposes.
+ *
+ * When cfg80211 ops are called, the wiphy is already locked.
+ *
+ * Note that this makes sure that no workers that have been queued
+ * with wiphy_queue_work() are running.
+ */
+static inline void wiphy_lock(struct wiphy *wiphy)
+ __acquires(&wiphy->mtx)
+{
+ mutex_lock(&wiphy->mtx);
+ __acquire(&wiphy->mtx);
+}
+
+/**
+ * wiphy_unlock - unlock the wiphy again
+ * @wiphy: the wiphy to unlock
+ */
+static inline void wiphy_unlock(struct wiphy *wiphy)
+ __releases(&wiphy->mtx)
+{
+ __release(&wiphy->mtx);
+ mutex_unlock(&wiphy->mtx);
+}
+
+struct wiphy_work;
+typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
+
+struct wiphy_work {
+ struct list_head entry;
+ wiphy_work_func_t func;
+};
+
+static inline void wiphy_work_init(struct wiphy_work *work,
+ wiphy_work_func_t func)
+{
+ INIT_LIST_HEAD(&work->entry);
+ work->func = func;
+}
+
+/**
+ * wiphy_work_queue - queue work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @work: the work item
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that the wiphy mutex will be
+ * held as if wiphy_lock() was called, and that it cannot be running
+ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
+ * use just cancel_work() instead of cancel_work_sync(), it requires
+ * being in a section protected by wiphy_lock().
+ */
+void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
+
+/**
+ * wiphy_work_cancel - cancel previously queued work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
+
+/**
+ * wiphy_work_flush - flush previously queued work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to flush, this can be %NULL to flush all work
+ *
+ * Flush the work (i.e. run it if pending). This must be called
+ * under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
+
+struct wiphy_delayed_work {
+ struct wiphy_work work;
+ struct wiphy *wiphy;
+ struct timer_list timer;
+};
+
+void wiphy_delayed_work_timer(struct timer_list *t);
+
+static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork,
+ wiphy_work_func_t func)
+{
+ timer_setup(&dwork->timer, wiphy_delayed_work_timer, 0);
+ wiphy_work_init(&dwork->work, func);
+}
+
+/**
+ * wiphy_delayed_work_queue - queue delayed work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @dwork: the delayable worker
+ * @delay: number of jiffies to wait before queueing
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that the wiphy mutex will be
+ * held as if wiphy_lock() was called, and that it cannot be running
+ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
+ * use just cancel_work() instead of cancel_work_sync(), it requires
+ * being in a section protected by wiphy_lock().
+ */
+void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork,
+ unsigned long delay);
+
+/**
+ * wiphy_delayed_work_cancel - cancel previously queued delayed work
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
+/**
+ * wiphy_delayed_work_flush - flush previously queued delayed work
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work to flush
+ *
+ * Flush the work (i.e. run it if pending). This must be called
+ * under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_delayed_work_flush(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
+/**
* struct wireless_dev - wireless device state
*
* For netdevs, this structure must be allocated by the driver
* that uses the ieee80211_ptr field in struct net_device (this
* is intentional so it can be allocated along with the netdev.)
* It need not be registered then as netdev registration will
- * be intercepted by cfg80211 to see the new wireless device.
+ * be intercepted by cfg80211 to see the new wireless device,
+ * however, drivers must lock the wiphy before registering or
+ * unregistering netdevs if they pre-create any netdevs (in ops
+ * called from cfg80211, the wiphy is already locked.)
*
* For non-netdev uses, it must also be allocated by the driver
* in response to the cfg80211 callbacks that require it, as
@@ -5070,20 +6085,15 @@ struct cfg80211_cqm_config;
*
* @wiphy: pointer to hardware description
* @iftype: interface type
+ * @registered: is this wdev already registered with cfg80211
+ * @registering: indicates we're doing registration under wiphy lock
+ * for the notifier
* @list: (private) Used to collect the interfaces
* @netdev: (private) Used to reference back to the netdev, may be %NULL
* @identifier: (private) Identifier used in nl80211 to identify this
* wireless device if it has no netdev
- * @current_bss: (private) Used by the internal configuration code
- * @chandef: (private) Used by the internal configuration code to track
- * the user-set channel definition.
- * @preset_chandef: (private) Used by the internal configuration code to
- * track the channel to be used for AP later
- * @bssid: (private) Used by the internal configuration code
- * @ssid: (private) Used by the internal configuration code
- * @ssid_len: (private) Used by the internal configuration code
- * @mesh_id_len: (private) Used by the internal configuration code
- * @mesh_id_up_len: (private) Used by the internal configuration code
+ * @u: union containing data specific to @iftype
+ * @connected: indicates if connected or not (STA mode)
* @wext: (private) Used by the internal wireless extensions compat code
* @wext.ibss: (private) IBSS data part of wext handling
* @wext.connect: (private) connection handling data
@@ -5101,13 +6111,8 @@ struct cfg80211_cqm_config;
* netdev and may otherwise be used by driver read-only, will be update
* by cfg80211 on change_interface
* @mgmt_registrations: list of registrations for management frames
- * @mgmt_registrations_lock: lock for the list
* @mgmt_registrations_need_update: mgmt registrations were updated,
* need to propagate the update to the driver
- * @mtx: mutex used to lock data in this struct, may be used by drivers
- * and some API functions require it held
- * @beacon_interval: beacon interval used on this device for transmitting
- * beacons, 0 when not valid
* @address: The address for this device, valid only if @netdev is %NULL
* @is_running: true if this is a non-netdev device that has been started, e.g.
* the P2P Device.
@@ -5124,18 +6129,20 @@ struct cfg80211_cqm_config;
* @conn_owner_nlportid: (private) connection owner socket port ID
* @disconnect_wk: (private) auto-disconnect work
* @disconnect_bssid: (private) the BSSID to use for auto-disconnect
- * @ibss_fixed: (private) IBSS is using fixed BSSID
- * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
* @event_list: (private) list for internal event processing
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
* @nl_owner_dead: (private) owner socket went away
+ * @cqm_rssi_work: (private) CQM RSSI reporting work
* @cqm_config: (private) nl80211 RSSI monitor state
* @pmsr_list: (private) peer measurement requests
* @pmsr_lock: (private) peer measurements requests/results lock
* @pmsr_free_wk: (private) peer measurements cleanup work
* @unprot_beacon_reported: (private) timestamp of last
* unprotected beacon report
+ * @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr
+ * @ap and @client for each link
+ * @valid_links: bitmap describing what elements of @links are valid
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -5148,18 +6155,13 @@ struct wireless_dev {
u32 identifier;
struct list_head mgmt_registrations;
- spinlock_t mgmt_registrations_lock;
u8 mgmt_registrations_need_update:1;
- struct mutex mtx;
-
- bool use_4addr, is_running;
+ bool use_4addr, is_running, registered, registering;
u8 address[ETH_ALEN] __aligned(sizeof(u16));
/* currently used for IBSS and SME - might be rearranged later */
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len, mesh_id_len, mesh_id_up_len;
struct cfg80211_conn *conn;
struct cfg80211_cached_keys *connect_keys;
enum ieee80211_bss_type conn_bss_type;
@@ -5171,23 +6173,17 @@ struct wireless_dev {
struct list_head event_list;
spinlock_t event_lock;
- struct cfg80211_internal_bss *current_bss; /* associated / joined */
- struct cfg80211_chan_def preset_chandef;
- struct cfg80211_chan_def chandef;
-
- bool ibss_fixed;
- bool ibss_dfs_possible;
+ u8 connected:1;
bool ps;
int ps_timeout;
- int beacon_interval;
-
u32 ap_unexpected_nlportid;
u32 owner_nlportid;
bool nl_owner_dead;
+ /* FIXME: need to rework radar detection for MLO */
bool cac_started;
unsigned long cac_start_time;
unsigned int cac_time_ms;
@@ -5208,16 +6204,61 @@ struct wireless_dev {
} wext;
#endif
- struct cfg80211_cqm_config *cqm_config;
+ struct wiphy_work cqm_rssi_work;
+ struct cfg80211_cqm_config __rcu *cqm_config;
struct list_head pmsr_list;
spinlock_t pmsr_lock;
struct work_struct pmsr_free_wk;
unsigned long unprot_beacon_reported;
-};
-static inline u8 *wdev_address(struct wireless_dev *wdev)
+ union {
+ struct {
+ u8 connected_addr[ETH_ALEN] __aligned(2);
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ } client;
+ struct {
+ int beacon_interval;
+ struct cfg80211_chan_def preset_chandef;
+ struct cfg80211_chan_def chandef;
+ u8 id[IEEE80211_MAX_MESH_ID_LEN];
+ u8 id_len, id_up_len;
+ } mesh;
+ struct {
+ struct cfg80211_chan_def preset_chandef;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ } ap;
+ struct {
+ struct cfg80211_internal_bss *current_bss;
+ struct cfg80211_chan_def chandef;
+ int beacon_interval;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ } ibss;
+ struct {
+ struct cfg80211_chan_def chandef;
+ } ocb;
+ } u;
+
+ struct {
+ u8 addr[ETH_ALEN] __aligned(2);
+ union {
+ struct {
+ unsigned int beacon_interval;
+ struct cfg80211_chan_def chandef;
+ } ap;
+ struct {
+ struct cfg80211_internal_bss *current_bss;
+ } client;
+ };
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+ u16 valid_links;
+};
+
+static inline const u8 *wdev_address(struct wireless_dev *wdev)
{
if (wdev->netdev)
return wdev->netdev->dev_addr;
@@ -5244,6 +6285,32 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
}
/**
+ * wdev_chandef - return chandef pointer from wireless_dev
+ * @wdev: the wdev
+ * @link_id: the link ID for MLO
+ *
+ * Return: The chandef depending on the mode, or %NULL.
+ */
+struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
+ unsigned int link_id);
+
+static inline void WARN_INVALID_LINK_ID(struct wireless_dev *wdev,
+ unsigned int link_id)
+{
+ WARN_ON(link_id && !wdev->valid_links);
+ WARN_ON(wdev->valid_links &&
+ !(wdev->valid_links & BIT(link_id)));
+}
+
+#define for_each_valid_link(link_info, link_id) \
+ for (link_id = 0; \
+ link_id < ((link_info)->valid_links ? \
+ ARRAY_SIZE((link_info)->links) : 1); \
+ link_id++) \
+ if (!(link_info)->valid_links || \
+ ((link_info)->valid_links & BIT(link_id)))
+
+/**
* DOC: Utility functions
*
* cfg80211 offers a number of utility functions that can be useful.
@@ -5276,6 +6343,16 @@ ieee80211_channel_to_khz(const struct ieee80211_channel *chan)
}
/**
+ * ieee80211_s1g_channel_width - get allowed channel width from @chan
+ *
+ * Only allowed for band NL80211_BAND_S1GHZ
+ * @chan: channel
+ * Return: The allowed channel width for this center_freq
+ */
+enum nl80211_chan_width
+ieee80211_s1g_channel_width(const struct ieee80211_channel *chan);
+
+/**
* ieee80211_channel_to_freq_khz - convert channel number to frequency
* @chan: channel number
* @band: band, necessary due to channel number overlap
@@ -5363,20 +6440,18 @@ static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan)
* which is, for this function, given as a bitmap of indices of
* rates in the band's bitrate table.
*/
-struct ieee80211_rate *
+const struct ieee80211_rate *
ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
u32 basic_rates, int bitrate);
/**
* ieee80211_mandatory_rates - get mandatory rates for a given band
* @sband: the band to look for rates in
- * @scan_width: width of the control channel
*
* This function returns a bitmap of the mandatory rates for the given
* band, bits are set according to the rate position in the bitrates array.
*/
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
- enum nl80211_bss_scan_width scan_width);
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband);
/*
* Radiotap parsing functions -- for controlled injection support
@@ -5509,11 +6584,12 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
* @addr: the device MAC address
* @iftype: the virtual interface type
* @data_offset: offset of payload after the 802.11 header
+ * @is_amsdu: true if the 802.11 header is A-MSDU
* Return: 0 on success. Non-zero on error.
*/
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
- u8 data_offset);
+ u8 data_offset, bool is_amsdu);
/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
@@ -5525,10 +6601,26 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
- return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
}
/**
+ * ieee80211_is_valid_amsdu - check if subframe lengths of an A-MSDU are valid
+ *
+ * This is used to detect non-standard A-MSDU frames, e.g. the ones generated
+ * by ath10k and ath11k, where the subframe length includes the length of the
+ * mesh control field.
+ *
+ * @skb: The input A-MSDU frame without any headers.
+ * @mesh_hdr: the type of mesh header to test
+ * 0: non-mesh A-MSDU length field
+ * 1: big-endian mesh A-MSDU length field
+ * 2: little-endian mesh A-MSDU length field
+ * Returns: true if subframe header lengths are valid for the @mesh_hdr mode
+ */
+bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr);
+
+/**
* ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
*
* Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
@@ -5543,11 +6635,36 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
* @check_da: DA to check in the inner ethernet header, or NULL
* @check_sa: SA to check in the inner ethernet header, or NULL
+ * @mesh_control: see mesh_hdr in ieee80211_is_valid_amsdu
*/
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- const u8 *check_da, const u8 *check_sa);
+ const u8 *check_da, const u8 *check_sa,
+ u8 mesh_control);
+
+/**
+ * ieee80211_get_8023_tunnel_proto - get RFC1042 or bridge tunnel encap protocol
+ *
+ * Check for RFC1042 or bridge tunnel header and fetch the encapsulated
+ * protocol.
+ *
+ * @hdr: pointer to the MSDU payload
+ * @proto: destination pointer to store the protocol
+ * Return: true if encapsulation was found
+ */
+bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto);
+
+/**
+ * ieee80211_strip_8023_mesh_hdr - strip mesh header from converted 802.3 frames
+ *
+ * Strip the mesh header, which was left in by ieee80211_data_to_8023 as part
+ * of the MSDU data. Also move any source/destination addresses from the mesh
+ * header to the ethernet header (if present).
+ *
+ * @skb: The 802.3 frame with embedded mesh header
+ */
+int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb);
/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
@@ -5621,9 +6738,9 @@ cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len,
(!match_len && match_offset)))
return NULL;
- return (void *)cfg80211_find_elem_match(eid, ies, len,
- match, match_len,
- match_offset ?
+ return (const void *)cfg80211_find_elem_match(eid, ies, len,
+ match, match_len,
+ match_offset ?
match_offset - 2 : 0);
}
@@ -5675,7 +6792,7 @@ static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
* @ies: data consisting of IEs
* @len: length of data
*
- * Return: %NULL if the etended element could not be found or if
+ * Return: %NULL if the extended element could not be found or if
* the element is invalid (claims to be longer than the given
* data) or if the byte array doesn't match; otherwise return the
* requested element struct.
@@ -5750,10 +6867,64 @@ static inline const u8 *
cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, unsigned int len)
{
- return (void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
+ return (const void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
}
/**
+ * enum cfg80211_rnr_iter_ret - reduced neighbor report iteration state
+ * @RNR_ITER_CONTINUE: continue iterating with the next entry
+ * @RNR_ITER_BREAK: break iteration and return success
+ * @RNR_ITER_ERROR: break iteration and return error
+ */
+enum cfg80211_rnr_iter_ret {
+ RNR_ITER_CONTINUE,
+ RNR_ITER_BREAK,
+ RNR_ITER_ERROR,
+};
+
+/**
+ * cfg80211_iter_rnr - iterate reduced neighbor report entries
+ * @elems: the frame elements to iterate RNR elements and then
+ * their entries in
+ * @elems_len: length of the elements
+ * @iter: iteration function, see also &enum cfg80211_rnr_iter_ret
+ * for the return value
+ * @iter_data: additional data passed to the iteration function
+ * Return: %true on success (after successfully iterating all entries
+ * or if the iteration function returned %RNR_ITER_BREAK),
+ * %false on error (iteration function returned %RNR_ITER_ERROR
+ * or elements were malformed.)
+ */
+bool cfg80211_iter_rnr(const u8 *elems, size_t elems_len,
+ enum cfg80211_rnr_iter_ret
+ (*iter)(void *data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len),
+ void *iter_data);
+
+/**
+ * cfg80211_defragment_element - Defrag the given element data into a buffer
+ *
+ * @elem: the element to defragment
+ * @ies: elements where @elem is contained
+ * @ieslen: length of @ies
+ * @data: buffer to store element data, or %NULL to just determine size
+ * @data_len: length of @data, or 0
+ * @frag_id: the element ID of fragments
+ *
+ * Return: length of @data, or -EINVAL on error
+ *
+ * Copy out all data from an element that may be fragmented into @data, while
+ * skipping all headers.
+ *
+ * The function uses memmove() internally. It is acceptable to defragment an
+ * element in-place.
+ */
+ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+ size_t ieslen, u8 *data, size_t data_len,
+ u8 frag_id);
+
+/**
* cfg80211_send_layer2_update - send layer 2 update frame
*
* @dev: network device
@@ -5800,7 +6971,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
/**
* regulatory_set_wiphy_regd - set regdom info for self managed drivers
* @wiphy: the wireless device we want to process the regulatory domain on
- * @rd: the regulatory domain informatoin to use for this wiphy
+ * @rd: the regulatory domain information to use for this wiphy
*
* Set the regulatory domain information for self-managed wiphys, only they
* may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more
@@ -5812,18 +6983,18 @@ int regulatory_set_wiphy_regd(struct wiphy *wiphy,
struct ieee80211_regdomain *rd);
/**
- * regulatory_set_wiphy_regd_sync_rtnl - set regdom for self-managed drivers
+ * regulatory_set_wiphy_regd_sync - set regdom for self-managed drivers
* @wiphy: the wireless device we want to process the regulatory domain on
* @rd: the regulatory domain information to use for this wiphy
*
- * This functions requires the RTNL to be held and applies the new regdomain
- * synchronously to this wiphy. For more details see
- * regulatory_set_wiphy_regd().
+ * This functions requires the RTNL and the wiphy mutex to be held and
+ * applies the new regdomain synchronously to this wiphy. For more details
+ * see regulatory_set_wiphy_regd().
*
* Return: 0 on success. -EINVAL, -EPERM
*/
-int regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy,
- struct ieee80211_regdomain *rd);
+int regulatory_set_wiphy_regd_sync(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd);
/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
@@ -5891,7 +7062,7 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
* Regulatory self-managed driver can use it to proactively
*
* @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
- * @freq: the freqency(in MHz) to be queried.
+ * @freq: the frequency (in MHz) to be queried.
* @rule: pointer to store the wmm rule from the regulatory db.
*
* Self-managed wireless drivers can use this function to query
@@ -5941,7 +7112,7 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid);
void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid);
/**
- * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
+ * cfg80211_sched_scan_stopped_locked - notify that the scheduled scan has stopped
*
* @wiphy: the wiphy on which the scheduled scan stopped
* @reqid: identifier for the related scheduled scan request
@@ -5949,9 +7120,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid);
* The driver can call this function to inform cfg80211 that the
* scheduled scan had to be stopped, for whatever reason. The driver
* is then called back via the sched_scan_stop operation when done.
- * This function should be called with rtnl locked.
+ * This function should be called with the wiphy mutex held.
*/
-void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy, u64 reqid);
+void cfg80211_sched_scan_stopped_locked(struct wiphy *wiphy, u64 reqid);
/**
* cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame
@@ -5974,22 +7145,6 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
gfp_t gfp);
static inline struct cfg80211_bss * __must_check
-cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
- struct ieee80211_channel *rx_channel,
- enum nl80211_bss_scan_width scan_width,
- struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp)
-{
- struct cfg80211_inform_bss data = {
- .chan = rx_channel,
- .scan_width = scan_width,
- .signal = signal,
- };
-
- return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
-}
-
-static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss_frame(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
struct ieee80211_mgmt *mgmt, size_t len,
@@ -5997,7 +7152,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
{
struct cfg80211_inform_bss data = {
.chan = rx_channel,
- .scan_width = NL80211_BSS_CHAN_WIDTH_20,
.signal = signal,
};
@@ -6053,14 +7207,44 @@ size_t cfg80211_merge_profile(const u8 *ie, size_t ielen,
* from a beacon or probe response
* @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon
* @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response
+ * @CFG80211_BSS_FTYPE_S1G_BEACON: data comes from an S1G beacon
*/
enum cfg80211_bss_frame_type {
CFG80211_BSS_FTYPE_UNKNOWN,
CFG80211_BSS_FTYPE_BEACON,
CFG80211_BSS_FTYPE_PRESP,
+ CFG80211_BSS_FTYPE_S1G_BEACON,
};
/**
+ * cfg80211_get_ies_channel_number - returns the channel number from ies
+ * @ie: IEs
+ * @ielen: length of IEs
+ * @band: enum nl80211_band of the channel
+ *
+ * Returns the channel number, or -1 if none could be determined.
+ */
+int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band);
+
+/**
+ * cfg80211_ssid_eq - compare two SSIDs
+ * @a: first SSID
+ * @b: second SSID
+ *
+ * Return: %true if SSIDs are equal, %false otherwise.
+ */
+static inline bool
+cfg80211_ssid_eq(struct cfg80211_ssid *a, struct cfg80211_ssid *b)
+{
+ if (WARN_ON(!a || !b))
+ return false;
+ if (a->ssid_len != b->ssid_len)
+ return false;
+ return memcmp(a->ssid, b->ssid, a->ssid_len) ? false : true;
+}
+
+/**
* cfg80211_inform_bss_data - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
@@ -6089,26 +7273,6 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
gfp_t gfp);
static inline struct cfg80211_bss * __must_check
-cfg80211_inform_bss_width(struct wiphy *wiphy,
- struct ieee80211_channel *rx_channel,
- enum nl80211_bss_scan_width scan_width,
- enum cfg80211_bss_frame_type ftype,
- const u8 *bssid, u64 tsf, u16 capability,
- u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp)
-{
- struct cfg80211_inform_bss data = {
- .chan = rx_channel,
- .scan_width = scan_width,
- .signal = signal,
- };
-
- return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
- capability, beacon_interval, ie, ielen,
- gfp);
-}
-
-static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
enum cfg80211_bss_frame_type ftype,
@@ -6118,7 +7282,6 @@ cfg80211_inform_bss(struct wiphy *wiphy,
{
struct cfg80211_inform_bss data = {
.chan = rx_channel,
- .scan_width = NL80211_BSS_CHAN_WIDTH_20,
.signal = signal,
};
@@ -6128,6 +7291,25 @@ cfg80211_inform_bss(struct wiphy *wiphy,
}
/**
+ * __cfg80211_get_bss - get a BSS reference
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @channel: the channel to search on (or %NULL)
+ * @bssid: the desired BSSID (or %NULL)
+ * @ssid: the desired SSID (or %NULL)
+ * @ssid_len: length of the SSID (or 0)
+ * @bss_type: type of BSS, see &enum ieee80211_bss_type
+ * @privacy: privacy filter, see &enum ieee80211_privacy
+ * @use_for: indicates which use is intended
+ */
+struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ const u8 *bssid,
+ const u8 *ssid, size_t ssid_len,
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy privacy,
+ u32 use_for);
+
+/**
* cfg80211_get_bss - get a BSS reference
* @wiphy: the wiphy this BSS struct belongs to
* @channel: the channel to search on (or %NULL)
@@ -6136,13 +7318,20 @@ cfg80211_inform_bss(struct wiphy *wiphy,
* @ssid_len: length of the SSID (or 0)
* @bss_type: type of BSS, see &enum ieee80211_bss_type
* @privacy: privacy filter, see &enum ieee80211_privacy
+ *
+ * This version implies regular usage, %NL80211_BSS_USE_FOR_NORMAL.
*/
-struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- const u8 *bssid,
- const u8 *ssid, size_t ssid_len,
- enum ieee80211_bss_type bss_type,
- enum ieee80211_privacy privacy);
+static inline struct cfg80211_bss *
+cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel,
+ const u8 *bssid, const u8 *ssid, size_t ssid_len,
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy privacy)
+{
+ return __cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len,
+ bss_type, privacy,
+ NL80211_BSS_USE_FOR_NORMAL);
+}
+
static inline struct cfg80211_bss *
cfg80211_get_ibss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
@@ -6203,19 +7392,6 @@ void cfg80211_bss_iter(struct wiphy *wiphy,
void *data),
void *iter_data);
-static inline enum nl80211_bss_scan_width
-cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef)
-{
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_5:
- return NL80211_BSS_CHAN_WIDTH_5;
- case NL80211_CHAN_WIDTH_10:
- return NL80211_BSS_CHAN_WIDTH_10;
- default:
- return NL80211_BSS_CHAN_WIDTH_20;
- }
-}
-
/**
* cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
* @dev: network device
@@ -6248,16 +7424,39 @@ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
/**
- * cfg80211_rx_assoc_resp - notification of processed association response
- * @dev: network device
- * @bss: the BSS that association was requested with, ownership of the pointer
- * moves to cfg80211 in this call
+ * struct cfg80211_rx_assoc_resp_data - association response data
* @buf: (Re)Association Response frame (header + body)
* @len: length of the frame data
* @uapsd_queues: bitmap of queues configured for uapsd. Same format
* as the AC bitmap in the QoS info field
* @req_ies: information elements from the (Re)Association Request frame
* @req_ies_len: length of req_ies data
+ * @ap_mld_addr: AP MLD address (in case of MLO)
+ * @links: per-link information indexed by link ID, use links[0] for
+ * non-MLO connections
+ * @links.bss: the BSS that association was requested with, ownership of the
+ * pointer moves to cfg80211 in the call to cfg80211_rx_assoc_resp()
+ * @links.status: Set this (along with a BSS pointer) for links that
+ * were rejected by the AP.
+ */
+struct cfg80211_rx_assoc_resp_data {
+ const u8 *buf;
+ size_t len;
+ const u8 *req_ies;
+ size_t req_ies_len;
+ int uapsd_queues;
+ const u8 *ap_mld_addr;
+ struct {
+ u8 addr[ETH_ALEN] __aligned(2);
+ struct cfg80211_bss *bss;
+ u16 status;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+/**
+ * cfg80211_rx_assoc_resp - notification of processed association response
+ * @dev: network device
+ * @data: association response data, &struct cfg80211_rx_assoc_resp_data
*
* After being asked to associate via cfg80211_ops::assoc() the driver must
* call either this function or cfg80211_auth_timeout().
@@ -6265,43 +7464,47 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
* This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
void cfg80211_rx_assoc_resp(struct net_device *dev,
- struct cfg80211_bss *bss,
- const u8 *buf, size_t len,
- int uapsd_queues,
- const u8 *req_ies, size_t req_ies_len);
+ const struct cfg80211_rx_assoc_resp_data *data);
/**
- * cfg80211_assoc_timeout - notification of timed out association
- * @dev: network device
- * @bss: The BSS entry with which association timed out.
- *
- * This function may sleep. The caller must hold the corresponding wdev's mutex.
+ * struct cfg80211_assoc_failure - association failure data
+ * @ap_mld_addr: AP MLD address, or %NULL
+ * @bss: list of BSSes, must use entry 0 for non-MLO connections
+ * (@ap_mld_addr is %NULL)
+ * @timeout: indicates the association failed due to timeout, otherwise
+ * the association was abandoned for a reason reported through some
+ * other API (e.g. deauth RX)
*/
-void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss);
+struct cfg80211_assoc_failure {
+ const u8 *ap_mld_addr;
+ struct cfg80211_bss *bss[IEEE80211_MLD_MAX_NUM_LINKS];
+ bool timeout;
+};
/**
- * cfg80211_abandon_assoc - notify cfg80211 of abandoned association attempt
+ * cfg80211_assoc_failure - notification of association failure
* @dev: network device
- * @bss: The BSS entry with which association was abandoned.
+ * @data: data describing the association failure
*
- * Call this whenever - for reasons reported through other API, like deauth RX,
- * an association attempt was abandoned.
* This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
-void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss);
+void cfg80211_assoc_failure(struct net_device *dev,
+ struct cfg80211_assoc_failure *data);
/**
* cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame
* @dev: network device
* @buf: 802.11 frame (header + body)
* @len: length of the frame data
+ * @reconnect: immediate reconnect is desired (include the nl80211 attribute)
*
* This function is called whenever deauthentication has been processed in
* station mode. This includes both received deauthentication frames and
* locally generated ones. This function may sleep. The caller must hold the
* corresponding wdev's mutex.
*/
-void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
+void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len,
+ bool reconnect);
/**
* cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame
@@ -6356,12 +7559,14 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
struct ieee80211_channel *channel, gfp_t gfp);
/**
- * cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate
+ * cfg80211_notify_new_peer_candidate - notify cfg80211 of a new mesh peer
+ * candidate
*
* @dev: network device
* @macaddr: the MAC address of the new candidate
* @ie: information elements advertised by the peer candidate
* @ie_len: length of the information elements buffer
+ * @sig_dbm: signal level in dBm
* @gfp: allocation flags
*
* This function notifies cfg80211 that the mesh peer candidate has been
@@ -6378,7 +7583,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev,
* RFkill integration in cfg80211 is almost invisible to drivers,
* as cfg80211 automatically registers an rfkill instance for each
* wireless device it knows about. Soft kill is also translated
- * into disconnecting and turning all interfaces off, drivers are
+ * into disconnecting and turning all interfaces off. Drivers are
* expected to turn off the device when all interfaces are down.
*
* However, devices may have a hard RFkill line, in which case they
@@ -6387,11 +7592,19 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev,
*/
/**
- * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state
+ * wiphy_rfkill_set_hw_state_reason - notify cfg80211 about hw block state
* @wiphy: the wiphy
* @blocked: block status
+ * @reason: one of reasons in &enum rfkill_hard_block_reasons
*/
-void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked);
+void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
+ enum rfkill_hard_block_reasons reason);
+
+static inline void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
+{
+ wiphy_rfkill_set_hw_state_reason(wiphy, blocked,
+ RFKILL_HARD_BLOCK_SIGNAL);
+}
/**
* wiphy_rfkill_start_polling - start polling rfkill
@@ -6403,7 +7616,10 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy);
* wiphy_rfkill_stop_polling - stop polling rfkill
* @wiphy: the wiphy
*/
-void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
+static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
+{
+ rfkill_pause_polling(wiphy->rfkill);
+}
/**
* DOC: Vendor commands
@@ -6415,7 +7631,7 @@ void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
* the configuration mechanism.
*
* A driver supporting vendor commands must register them as an array
- * in struct wiphy, with handlers for each one, each command has an
+ * in struct wiphy, with handlers for each one. Each command has an
* OUI and sub command ID to identify it.
*
* Note that this feature should not be (ab)used to implement protocol
@@ -6485,7 +7701,7 @@ cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
/**
- * cfg80211_vendor_cmd_get_sender
+ * cfg80211_vendor_cmd_get_sender - get the current sender netlink ID
* @wiphy: the wiphy
*
* Return the current netlink port ID in a vendor command handler.
@@ -6579,7 +7795,7 @@ static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
* interact with driver-specific tools to aid, for instance,
* factory programming.
*
- * This chapter describes how drivers interact with it, for more
+ * This chapter describes how drivers interact with it. For more
* information see the nl80211 book's chapter on it.
*/
@@ -6715,13 +7931,6 @@ struct cfg80211_fils_resp_params {
* indicate that this is a failure, but without a status code.
* @timeout_reason is used to report the reason for the timeout in that
* case.
- * @bssid: The BSSID of the AP (may be %NULL)
- * @bss: Entry of bss to which STA got connected to, can be obtained through
- * cfg80211_get_bss() (may be %NULL). But it is recommended to store the
- * bss from the connect_request and hold a reference to it and return
- * through this param to avoid a warning if the bss is expired during the
- * connection, esp. for those drivers implementing connect op.
- * Only one parameter among @bssid and @bss needs to be specified.
* @req_ie: Association request IEs (may be %NULL)
* @req_ie_len: Association request IEs length
* @resp_ie: Association response IEs (may be %NULL)
@@ -6733,17 +7942,45 @@ struct cfg80211_fils_resp_params {
* not known. This value is used only if @status < 0 to indicate that the
* failure is due to a timeout and not due to explicit rejection by the AP.
* This value is ignored in other cases (@status >= 0).
+ * @valid_links: For MLO connection, BIT mask of the valid link ids. Otherwise
+ * zero.
+ * @ap_mld_addr: For MLO connection, MLD address of the AP. Otherwise %NULL.
+ * @links : For MLO connection, contains link info for the valid links indicated
+ * using @valid_links. For non-MLO connection, links[0] contains the
+ * connected AP info.
+ * @links.addr: For MLO connection, MAC address of the STA link. Otherwise
+ * %NULL.
+ * @links.bssid: For MLO connection, MAC address of the AP link. For non-MLO
+ * connection, links[0].bssid points to the BSSID of the AP (may be %NULL).
+ * @links.bss: For MLO connection, entry of bss to which STA link is connected.
+ * For non-MLO connection, links[0].bss points to entry of bss to which STA
+ * is connected. It can be obtained through cfg80211_get_bss() (may be
+ * %NULL). It is recommended to store the bss from the connect_request and
+ * hold a reference to it and return through this param to avoid a warning
+ * if the bss is expired during the connection, esp. for those drivers
+ * implementing connect op. Only one parameter among @bssid and @bss needs
+ * to be specified.
+ * @links.status: per-link status code, to report a status code that's not
+ * %WLAN_STATUS_SUCCESS for a given link, it must also be in the
+ * @valid_links bitmap and may have a BSS pointer (which is then released)
*/
struct cfg80211_connect_resp_params {
int status;
- const u8 *bssid;
- struct cfg80211_bss *bss;
const u8 *req_ie;
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
struct cfg80211_fils_resp_params fils;
enum nl80211_timeout_reason timeout_reason;
+
+ const u8 *ap_mld_addr;
+ u16 valid_links;
+ struct {
+ const u8 *addr;
+ const u8 *bssid;
+ struct cfg80211_bss *bss;
+ u16 status;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
@@ -6813,8 +8050,8 @@ cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
memset(&params, 0, sizeof(params));
params.status = status;
- params.bssid = bssid;
- params.bss = bss;
+ params.links[0].bssid = bssid;
+ params.links[0].bss = bss;
params.req_ie = req_ie;
params.req_ie_len = req_ie_len;
params.resp_ie = resp_ie;
@@ -6885,24 +8122,40 @@ cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
/**
* struct cfg80211_roam_info - driver initiated roaming information
*
- * @channel: the channel of the new AP
- * @bss: entry of bss to which STA got roamed (may be %NULL if %bssid is set)
- * @bssid: the BSSID of the new AP (may be %NULL if %bss is set)
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
* @fils: FILS related roaming information.
+ * @valid_links: For MLO roaming, BIT mask of the new valid links is set.
+ * Otherwise zero.
+ * @ap_mld_addr: For MLO roaming, MLD address of the new AP. Otherwise %NULL.
+ * @links : For MLO roaming, contains new link info for the valid links set in
+ * @valid_links. For non-MLO roaming, links[0] contains the new AP info.
+ * @links.addr: For MLO roaming, MAC address of the STA link. Otherwise %NULL.
+ * @links.bssid: For MLO roaming, MAC address of the new AP link. For non-MLO
+ * roaming, links[0].bssid points to the BSSID of the new AP. May be
+ * %NULL if %links.bss is set.
+ * @links.channel: the channel of the new AP.
+ * @links.bss: For MLO roaming, entry of new bss to which STA link got
+ * roamed. For non-MLO roaming, links[0].bss points to entry of bss to
+ * which STA got roamed (may be %NULL if %links.bssid is set)
*/
struct cfg80211_roam_info {
- struct ieee80211_channel *channel;
- struct cfg80211_bss *bss;
- const u8 *bssid;
const u8 *req_ie;
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
struct cfg80211_fils_resp_params fils;
+
+ const u8 *ap_mld_addr;
+ u16 valid_links;
+ struct {
+ const u8 *addr;
+ const u8 *bssid;
+ struct ieee80211_channel *channel;
+ struct cfg80211_bss *bss;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
@@ -6930,7 +8183,10 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
* cfg80211_port_authorized - notify cfg80211 of successful security association
*
* @dev: network device
- * @bssid: the BSSID of the AP
+ * @peer_addr: BSSID of the AP/P2P GO in case of STA/GC or STA/GC MAC address
+ * in case of AP/P2P GO
+ * @td_bitmap: transition disable policy
+ * @td_bitmap_len: Length of transition disable policy
* @gfp: allocation flags
*
* This function should be called by a driver that supports 4 way handshake
@@ -6939,9 +8195,12 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
* should be preceded with a call to cfg80211_connect_result(),
* cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to
* indicate the 802.11 association.
+ * This function can also be called by AP/P2P GO driver that supports
+ * authentication offload. In this case the peer_mac passed is that of
+ * associated STA/GC.
*/
-void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid,
- gfp_t gfp);
+void cfg80211_port_authorized(struct net_device *dev, const u8 *peer_addr,
+ const u8* td_bitmap, u8 td_bitmap_len, gfp_t gfp);
/**
* cfg80211_disconnected - notify cfg80211 that connection was dropped
@@ -7029,7 +8288,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
/**
* cfg80211_del_sta_sinfo - notify userspace about deletion of a station
* @dev: the netdev
- * @mac_addr: the station's address
+ * @mac_addr: the station's address. For MLD station, MLD address is used.
* @sinfo: the station information/statistics
* @gfp: allocation flags
*/
@@ -7040,7 +8299,7 @@ void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
* cfg80211_del_sta - notify userspace about deletion of a station
*
* @dev: the netdev
- * @mac_addr: the station's address
+ * @mac_addr: the station's address. For MLD station, MLD address is used.
* @gfp: allocation flags
*/
static inline void cfg80211_del_sta(struct net_device *dev,
@@ -7069,6 +8328,48 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
gfp_t gfp);
/**
+ * struct cfg80211_rx_info - received management frame info
+ *
+ * @freq: Frequency on which the frame was received in kHz
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
+ * @have_link_id: indicates the frame was received on a link of
+ * an MLD, i.e. the @link_id field is valid
+ * @link_id: the ID of the link the frame was received on
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @flags: flags, as defined in &enum nl80211_rxmgmt_flags
+ * @rx_tstamp: Hardware timestamp of frame RX in nanoseconds
+ * @ack_tstamp: Hardware timestamp of ack TX in nanoseconds
+ */
+struct cfg80211_rx_info {
+ int freq;
+ int sig_dbm;
+ bool have_link_id;
+ u8 link_id;
+ const u8 *buf;
+ size_t len;
+ u32 flags;
+ u64 rx_tstamp;
+ u64 ack_tstamp;
+};
+
+/**
+ * cfg80211_rx_mgmt_ext - management frame notification with extended info
+ * @wdev: wireless device receiving the frame
+ * @info: RX info as defined in struct cfg80211_rx_info
+ *
+ * This function is called whenever an Action frame is received for a station
+ * mode interface, but is not processed in kernel.
+ *
+ * Return: %true if a user space application has registered for this frame.
+ * For action frames, that makes it responsible for rejecting unrecognized
+ * action frames; %false otherwise, in which case for action frames the
+ * driver is responsible for rejecting the frame.
+ */
+bool cfg80211_rx_mgmt_ext(struct wireless_dev *wdev,
+ struct cfg80211_rx_info *info);
+
+/**
* cfg80211_rx_mgmt_khz - notification of received, unprocessed management frame
* @wdev: wireless device receiving the frame
* @freq: Frequency on which the frame was received in KHz
@@ -7085,8 +8386,20 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* action frames; %false otherwise, in which case for action frames the
* driver is responsible for rejecting the frame.
*/
-bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm,
- const u8 *buf, size_t len, u32 flags);
+static inline bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq,
+ int sig_dbm, const u8 *buf, size_t len,
+ u32 flags)
+{
+ struct cfg80211_rx_info info = {
+ .freq = freq,
+ .sig_dbm = sig_dbm,
+ .buf = buf,
+ .len = len,
+ .flags = flags
+ };
+
+ return cfg80211_rx_mgmt_ext(wdev, &info);
+}
/**
* cfg80211_rx_mgmt - notification of received, unprocessed management frame
@@ -7109,11 +8422,50 @@ static inline bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq,
int sig_dbm, const u8 *buf, size_t len,
u32 flags)
{
- return cfg80211_rx_mgmt_khz(wdev, MHZ_TO_KHZ(freq), sig_dbm, buf, len,
- flags);
+ struct cfg80211_rx_info info = {
+ .freq = MHZ_TO_KHZ(freq),
+ .sig_dbm = sig_dbm,
+ .buf = buf,
+ .len = len,
+ .flags = flags
+ };
+
+ return cfg80211_rx_mgmt_ext(wdev, &info);
}
/**
+ * struct cfg80211_tx_status - TX status for management frame information
+ *
+ * @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
+ * @tx_tstamp: hardware TX timestamp in nanoseconds
+ * @ack_tstamp: hardware ack RX timestamp in nanoseconds
+ * @buf: Management frame (header + body)
+ * @len: length of the frame data
+ * @ack: Whether frame was acknowledged
+ */
+struct cfg80211_tx_status {
+ u64 cookie;
+ u64 tx_tstamp;
+ u64 ack_tstamp;
+ const u8 *buf;
+ size_t len;
+ bool ack;
+};
+
+/**
+ * cfg80211_mgmt_tx_status_ext - TX status notification with extended info
+ * @wdev: wireless device receiving the frame
+ * @status: TX status data
+ * @gfp: context flags
+ *
+ * This function is called whenever a management frame was requested to be
+ * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
+ * transmission attempt with extended info.
+ */
+void cfg80211_mgmt_tx_status_ext(struct wireless_dev *wdev,
+ struct cfg80211_tx_status *status, gfp_t gfp);
+
+/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
* @wdev: wireless device receiving the frame
* @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
@@ -7126,8 +8478,19 @@ static inline bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq,
* transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
* transmission attempt.
*/
-void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
- const u8 *buf, size_t len, bool ack, gfp_t gfp);
+static inline void cfg80211_mgmt_tx_status(struct wireless_dev *wdev,
+ u64 cookie, const u8 *buf,
+ size_t len, bool ack, gfp_t gfp)
+{
+ struct cfg80211_tx_status status = {
+ .cookie = cookie,
+ .buf = buf,
+ .len = len,
+ .ack = ack
+ };
+
+ cfg80211_mgmt_tx_status_ext(wdev, &status, gfp);
+}
/**
* cfg80211_control_port_tx_status - notification of TX status for control
@@ -7156,6 +8519,7 @@ void cfg80211_control_port_tx_status(struct wireless_dev *wdev, u64 cookie,
* responsible for any cleanup. The caller must also ensure that
* skb->protocol is set appropriately.
* @unencrypted: Whether the frame was received unencrypted
+ * @link_id: the link the frame was received on, -1 if not applicable or unknown
*
* This function is used to inform userspace about a received control port
* frame. It should only be used if userspace indicated it wants to receive
@@ -7166,8 +8530,8 @@ void cfg80211_control_port_tx_status(struct wireless_dev *wdev, u64 cookie,
*
* Return: %true if the frame was passed to userspace
*/
-bool cfg80211_rx_control_port(struct net_device *dev,
- struct sk_buff *skb, bool unencrypted);
+bool cfg80211_rx_control_port(struct net_device *dev, struct sk_buff *skb,
+ bool unencrypted, int link_id);
/**
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
@@ -7220,15 +8584,33 @@ void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer,
void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp);
/**
- * cfg80211_radar_event - radar detection event
+ * __cfg80211_radar_event - radar detection event
* @wiphy: the wiphy
* @chandef: chandef for the current channel
+ * @offchan: the radar has been detected on the offchannel chain
* @gfp: context flags
*
* This function is called when a radar is detected on the current chanenl.
*/
-void cfg80211_radar_event(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef, gfp_t gfp);
+void __cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ bool offchan, gfp_t gfp);
+
+static inline void
+cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ gfp_t gfp)
+{
+ __cfg80211_radar_event(wiphy, chandef, false, gfp);
+}
+
+static inline void
+cfg80211_background_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ gfp_t gfp)
+{
+ __cfg80211_radar_event(wiphy, chandef, true, gfp);
+}
/**
* cfg80211_sta_opmode_change_notify - STA's ht/vht operation mode change event
@@ -7259,6 +8641,14 @@ void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
enum nl80211_radar_event event, gfp_t gfp);
+/**
+ * cfg80211_background_cac_abort - Channel Availability Check offchan abort event
+ * @wiphy: the wiphy
+ *
+ * This function is called by the driver when a Channel Availability Check
+ * (CAC) is aborted by a offchannel dedicated chain.
+ */
+void cfg80211_background_cac_abort(struct wiphy *wiphy);
/**
* cfg80211_gtk_rekey_notify - notify userspace about driver rekeying
@@ -7385,7 +8775,7 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
* also checks if IR-relaxation conditions apply, to allow beaconing under
* more permissive conditions.
*
- * Requires the RTNL to be held.
+ * Requires the wiphy mutex to be held.
*/
bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
@@ -7395,18 +8785,22 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* cfg80211_ch_switch_notify - update wdev channel and notify userspace
* @dev: the device which switched channels
* @chandef: the new channel definition
+ * @link_id: the link ID for MLO, must be 0 for non-MLO
*
- * Caller must acquire wdev_lock, therefore must only be called from sleepable
+ * Caller must hold wiphy mutex, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
- struct cfg80211_chan_def *chandef);
+ struct cfg80211_chan_def *chandef,
+ unsigned int link_id);
/*
* cfg80211_ch_switch_started_notify - notify channel switch start
* @dev: the device on which the channel switch started
* @chandef: the future channel definition
+ * @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
+ * @quiet: whether or not immediate quiet was requested by the AP
*
* Inform the userspace about the channel switch that has just
* started, so that it can take appropriate actions (eg. starting
@@ -7414,7 +8808,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
*/
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u8 count);
+ unsigned int link_id, u8 count,
+ bool quiet);
/**
* ieee80211_operating_class_to_band - convert operating class to band
@@ -7428,6 +8823,19 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
enum nl80211_band *band);
/**
+ * ieee80211_operating_class_to_chandef - convert operating class to chandef
+ *
+ * @operating_class: the operating class to convert
+ * @chan: the ieee80211_channel to convert
+ * @chandef: a pointer to the resulting chandef
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_operating_class_to_chandef(u8 operating_class,
+ struct ieee80211_channel *chan,
+ struct cfg80211_chan_def *chandef);
+
+/**
* ieee80211_chandef_to_operating_class - convert chandef to operation class
*
* @chandef: the chandef to convert
@@ -7482,20 +8890,49 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate);
* cfg80211_unregister_wdev - remove the given wdev
* @wdev: struct wireless_dev to remove
*
- * Call this function only for wdevs that have no netdev assigned,
- * e.g. P2P Devices. It removes the device from the list so that
- * it can no longer be used. It is necessary to call this function
- * even when cfg80211 requests the removal of the interface by
- * calling the del_virtual_intf() callback. The function must also
- * be called when the driver wishes to unregister the wdev, e.g.
- * when the device is unbound from the driver.
+ * This function removes the device so it can no longer be used. It is necessary
+ * to call this function even when cfg80211 requests the removal of the device
+ * by calling the del_virtual_intf() callback. The function must also be called
+ * when the driver wishes to unregister the wdev, e.g. when the hardware device
+ * is unbound from the driver.
*
- * Requires the RTNL to be held.
+ * Requires the RTNL and wiphy mutex to be held.
*/
void cfg80211_unregister_wdev(struct wireless_dev *wdev);
/**
- * struct cfg80211_ft_event - FT Information Elements
+ * cfg80211_register_netdevice - register the given netdev
+ * @dev: the netdev to register
+ *
+ * Note: In contexts coming from cfg80211 callbacks, you must call this rather
+ * than register_netdevice(), unregister_netdev() is impossible as the RTNL is
+ * held. Otherwise, both register_netdevice() and register_netdev() are usable
+ * instead as well.
+ *
+ * Requires the RTNL and wiphy mutex to be held.
+ */
+int cfg80211_register_netdevice(struct net_device *dev);
+
+/**
+ * cfg80211_unregister_netdevice - unregister the given netdev
+ * @dev: the netdev to register
+ *
+ * Note: In contexts coming from cfg80211 callbacks, you must call this rather
+ * than unregister_netdevice(), unregister_netdev() is impossible as the RTNL
+ * is held. Otherwise, both unregister_netdevice() and unregister_netdev() are
+ * usable instead as well.
+ *
+ * Requires the RTNL and wiphy mutex to be held.
+ */
+static inline void cfg80211_unregister_netdevice(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_CFG80211)
+ cfg80211_unregister_wdev(dev->ieee80211_ptr);
+#endif
+}
+
+/**
+ * struct cfg80211_ft_event_params - FT Information Elements
* @ies: FT IEs
* @ies_len: length of the FT IE in bytes
* @target_ap: target AP's MAC address
@@ -7602,6 +9039,18 @@ static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
}
/**
+ * ieee80211_fragment_element - fragment the last element in skb
+ * @skb: The skbuf that the element was added to
+ * @len_pos: Pointer to length of the element to fragment
+ * @frag_id: The element ID to use for fragments
+ *
+ * This function fragments all data after @len_pos, adding fragmentation
+ * elements with the given ID as appropriate. The SKB will grow in size
+ * accordingly.
+ */
+void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id);
+
+/**
* cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
* @wdev: the wireless device reporting the wakeup
* @wakeup: the wakeup report
@@ -7848,6 +9297,18 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
bool is_4addr, u8 check_swif);
+/**
+ * cfg80211_assoc_comeback - notification of association that was
+ * temporarily rejected with a comeback
+ * @netdev: network device
+ * @ap_addr: AP (MLD) address that rejected the association
+ * @timeout: timeout interval value TUs.
+ *
+ * this function may sleep. the caller must hold the corresponding wdev's mutex.
+ */
+void cfg80211_assoc_comeback(struct net_device *netdev,
+ const u8 *ap_addr, u32 timeout);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
@@ -7868,6 +9329,8 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
dev_notice(&(wiphy)->dev, format, ##args)
#define wiphy_info(wiphy, format, args...) \
dev_info(&(wiphy)->dev, format, ##args)
+#define wiphy_info_once(wiphy, format, args...) \
+ dev_info_once(&(wiphy)->dev, format, ##args)
#define wiphy_err_ratelimited(wiphy, format, args...) \
dev_err_ratelimited(&(wiphy)->dev, format, ##args)
@@ -7915,4 +9378,135 @@ void cfg80211_update_owe_info_event(struct net_device *netdev,
*/
void cfg80211_bss_flush(struct wiphy *wiphy);
+/**
+ * cfg80211_bss_color_notify - notify about bss color event
+ * @dev: network device
+ * @cmd: the actual event we want to notify
+ * @count: the number of TBTTs until the color change happens
+ * @color_bitmap: representations of the colors that the local BSS is aware of
+ */
+int cfg80211_bss_color_notify(struct net_device *dev,
+ enum nl80211_commands cmd, u8 count,
+ u64 color_bitmap);
+
+/**
+ * cfg80211_obss_color_collision_notify - notify about bss color collision
+ * @dev: network device
+ * @color_bitmap: representations of the colors that the local BSS is aware of
+ */
+static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
+ u64 color_bitmap)
+{
+ return cfg80211_bss_color_notify(dev, NL80211_CMD_OBSS_COLOR_COLLISION,
+ 0, color_bitmap);
+}
+
+/**
+ * cfg80211_color_change_started_notify - notify color change start
+ * @dev: the device on which the color is switched
+ * @count: the number of TBTTs until the color change happens
+ *
+ * Inform the userspace about the color change that has started.
+ */
+static inline int cfg80211_color_change_started_notify(struct net_device *dev,
+ u8 count)
+{
+ return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_STARTED,
+ count, 0);
+}
+
+/**
+ * cfg80211_color_change_aborted_notify - notify color change abort
+ * @dev: the device on which the color is switched
+ *
+ * Inform the userspace about the color change that has aborted.
+ */
+static inline int cfg80211_color_change_aborted_notify(struct net_device *dev)
+{
+ return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_ABORTED,
+ 0, 0);
+}
+
+/**
+ * cfg80211_color_change_notify - notify color change completion
+ * @dev: the device on which the color was switched
+ *
+ * Inform the userspace about the color change that has completed.
+ */
+static inline int cfg80211_color_change_notify(struct net_device *dev)
+{
+ return cfg80211_bss_color_notify(dev,
+ NL80211_CMD_COLOR_CHANGE_COMPLETED,
+ 0, 0);
+}
+
+/**
+ * cfg80211_links_removed - Notify about removed STA MLD setup links.
+ * @dev: network device.
+ * @link_mask: BIT mask of removed STA MLD setup link IDs.
+ *
+ * Inform cfg80211 and the userspace about removed STA MLD setup links due to
+ * AP MLD removing the corresponding affiliated APs with Multi-Link
+ * reconfiguration. Note that it's not valid to remove all links, in this
+ * case disconnect instead.
+ * Also note that the wdev mutex must be held.
+ */
+void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
+
+/**
+ * cfg80211_schedule_channels_check - schedule regulatory check if needed
+ * @wdev: the wireless device to check
+ *
+ * In case the device supports NO_IR or DFS relaxations, schedule regulatory
+ * channels check, as previous concurrent operation conditions may not
+ * hold anymore.
+ */
+void cfg80211_schedule_channels_check(struct wireless_dev *wdev);
+
+#ifdef CONFIG_CFG80211_DEBUGFS
+/**
+ * wiphy_locked_debugfs_read - do a locked read in debugfs
+ * @wiphy: the wiphy to use
+ * @file: the file being read
+ * @buf: the buffer to fill and then read from
+ * @bufsize: size of the buffer
+ * @userbuf: the user buffer to copy to
+ * @count: read count
+ * @ppos: read position
+ * @handler: the read handler to call (under wiphy lock)
+ * @data: additional data to pass to the read handler
+ */
+ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize,
+ char __user *userbuf, size_t count,
+ loff_t *ppos,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t bufsize,
+ void *data),
+ void *data);
+
+/**
+ * wiphy_locked_debugfs_write - do a locked write in debugfs
+ * @wiphy: the wiphy to use
+ * @file: the file being written to
+ * @buf: the buffer to copy the user data to
+ * @bufsize: size of the buffer
+ * @userbuf: the user buffer to copy from
+ * @count: read count
+ * @handler: the write handler to call (under wiphy lock)
+ * @data: additional data to pass to the write handler
+ */
+ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize,
+ const char __user *userbuf, size_t count,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t count,
+ void *data),
+ void *data);
+#endif
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 6ed07844eb24..76d2cd2e2b30 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -11,13 +11,16 @@
#include <linux/ieee802154.h>
#include <linux/netdevice.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/bug.h>
#include <net/nl802154.h>
struct wpan_phy;
struct wpan_phy_cca;
+struct cfg802154_scan_request;
+struct cfg802154_beacon_request;
+struct ieee802154_addr;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
struct ieee802154_llsec_device_key;
@@ -67,6 +70,20 @@ struct cfg802154_ops {
struct wpan_dev *wpan_dev, bool mode);
int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, bool ackreq);
+ int (*trigger_scan)(struct wpan_phy *wpan_phy,
+ struct cfg802154_scan_request *request);
+ int (*abort_scan)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
+ int (*send_beacons)(struct wpan_phy *wpan_phy,
+ struct cfg802154_beacon_request *request);
+ int (*stop_beacons)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
+ int (*associate)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *coord);
+ int (*disassociate)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *target);
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
void (*get_llsec_table)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
@@ -160,17 +177,24 @@ wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
}
/**
- * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
+ * enum wpan_phy_flags - WPAN PHY state flags
+ * @WPAN_PHY_FLAG_TXPOWER: Indicates that transceiver will support
* transmit power setting.
* @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
* level setting.
* @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
* setting.
+ * @WPAN_PHY_FLAG_STATE_QUEUE_STOPPED: Indicates that the transmit queue was
+ * temporarily stopped.
+ * @WPAN_PHY_FLAG_DATAGRAMS_ONLY: Indicates that transceiver is only able to
+ * send/receive datagrams.
*/
enum wpan_phy_flags {
WPAN_PHY_FLAG_TXPOWER = BIT(1),
WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2),
WPAN_PHY_FLAG_CCA_MODE = BIT(3),
+ WPAN_PHY_FLAG_STATE_QUEUE_STOPPED = BIT(4),
+ WPAN_PHY_FLAG_DATAGRAMS_ONLY = BIT(5),
};
struct wpan_phy {
@@ -182,7 +206,7 @@ struct wpan_phy {
*/
const void *privid;
- u32 flags;
+ unsigned long flags;
/*
* This is a PIB according to 802.15.4-2011.
@@ -203,8 +227,8 @@ struct wpan_phy {
/* PHY depended MAC PIB values */
- /* 802.15.4 acronym: Tdsym in usec */
- u8 symbol_duration;
+ /* 802.15.4 acronym: Tdsym in nsec */
+ u32 symbol_duration;
/* lifs and sifs periods timing */
u16 lifs_period;
u16 sifs_period;
@@ -214,6 +238,17 @@ struct wpan_phy {
/* the network namespace this phy lives in currently */
possible_net_t _net;
+ /* Transmission monitoring and control */
+ spinlock_t queue_lock;
+ atomic_t ongoing_txs;
+ atomic_t hold_txs;
+ wait_queue_head_t sync_txq;
+
+ /* Current filtering level on reception.
+ * Only allowed to be changed if phy is not operational.
+ */
+ enum ieee802154_filtering_level filtering;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -227,6 +262,27 @@ static inline void wpan_phy_net_set(struct wpan_phy *wpan_phy, struct net *net)
write_pnet(&wpan_phy->_net, net);
}
+static inline bool ieee802154_chan_is_valid(struct wpan_phy *phy,
+ u8 page, u8 channel)
+{
+ if (page > IEEE802154_MAX_PAGE ||
+ channel > IEEE802154_MAX_CHANNEL ||
+ !(phy->supported.channels[page] & BIT(channel)))
+ return false;
+
+ return true;
+}
+
+/**
+ * struct ieee802154_addr - IEEE802.15.4 device address
+ * @mode: Address mode from frame header. Can be one of:
+ * - @IEEE802154_ADDR_NONE
+ * - @IEEE802154_ADDR_SHORT
+ * - @IEEE802154_ADDR_LONG
+ * @pan_id: The PAN ID this address belongs to
+ * @short_addr: address if @mode is @IEEE802154_ADDR_SHORT
+ * @extended_addr: address if @mode is @IEEE802154_ADDR_LONG
+ */
struct ieee802154_addr {
u8 mode;
__le16 pan_id;
@@ -236,6 +292,94 @@ struct ieee802154_addr {
};
};
+/**
+ * struct ieee802154_coord_desc - Coordinator descriptor
+ * @addr: PAN ID and coordinator address
+ * @page: page this coordinator is using
+ * @channel: channel this coordinator is using
+ * @superframe_spec: SuperFrame specification as received
+ * @link_quality: link quality indicator at which the beacon was received
+ * @gts_permit: the coordinator accepts GTS requests
+ */
+struct ieee802154_coord_desc {
+ struct ieee802154_addr addr;
+ u8 page;
+ u8 channel;
+ u16 superframe_spec;
+ u8 link_quality;
+ bool gts_permit;
+};
+
+/**
+ * struct ieee802154_pan_device - PAN device information
+ * @pan_id: the PAN ID of this device
+ * @mode: the preferred mode to reach the device
+ * @short_addr: the short address of this device
+ * @extended_addr: the extended address of this device
+ * @node: the list node
+ */
+struct ieee802154_pan_device {
+ __le16 pan_id;
+ u8 mode;
+ __le16 short_addr;
+ __le64 extended_addr;
+ struct list_head node;
+};
+
+/**
+ * struct cfg802154_scan_request - Scan request
+ *
+ * @type: type of scan to be performed
+ * @page: page on which to perform the scan
+ * @channels: channels in te %page to be scanned
+ * @duration: time spent on each channel, calculated with:
+ * aBaseSuperframeDuration * (2 ^ duration + 1)
+ * @wpan_dev: the wpan device on which to perform the scan
+ * @wpan_phy: the wpan phy on which to perform the scan
+ */
+struct cfg802154_scan_request {
+ enum nl802154_scan_types type;
+ u8 page;
+ u32 channels;
+ u8 duration;
+ struct wpan_dev *wpan_dev;
+ struct wpan_phy *wpan_phy;
+};
+
+/**
+ * struct cfg802154_beacon_request - Beacon request descriptor
+ *
+ * @interval: interval n between sendings, in multiple order of the super frame
+ * duration: aBaseSuperframeDuration * (2^n) unless the interval
+ * order is greater or equal to 15, in this case beacons won't be
+ * passively sent out at a fixed rate but instead inform the device
+ * that it should answer beacon requests as part of active scan
+ * procedures
+ * @wpan_dev: the concerned wpan device
+ * @wpan_phy: the wpan phy this was for
+ */
+struct cfg802154_beacon_request {
+ u8 interval;
+ struct wpan_dev *wpan_dev;
+ struct wpan_phy *wpan_phy;
+};
+
+/**
+ * struct cfg802154_mac_pkt - MAC packet descriptor (beacon/command)
+ * @node: MAC packets to process list member
+ * @skb: the received sk_buff
+ * @sdata: the interface on which @skb was received
+ * @page: page configuration when @skb was received
+ * @channel: channel configuration when @skb was received
+ */
+struct cfg802154_mac_pkt {
+ struct list_head node;
+ struct sk_buff *skb;
+ struct ieee802154_sub_if_data *sdata;
+ u8 page;
+ u8 channel;
+};
+
struct ieee802154_llsec_key_id {
u8 mode;
u8 id;
@@ -257,6 +401,7 @@ struct ieee802154_llsec_key {
struct ieee802154_llsec_key_entry {
struct list_head list;
+ struct rcu_head rcu;
struct ieee802154_llsec_key_id id;
struct ieee802154_llsec_key *key;
@@ -355,14 +500,20 @@ struct wpan_dev {
bool lbt;
- bool promiscuous_mode;
-
/* fallback for acknowledgment bit setting */
bool ackreq;
+
+ /* Associations */
+ struct mutex association_lock;
+ struct ieee802154_pan_device *parent;
+ struct list_head children;
+ unsigned int max_associations;
+ unsigned int nchildren;
};
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
+#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
static inline int
wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
const struct ieee802154_addr *daddr,
@@ -373,6 +524,7 @@ wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len);
}
+#endif
struct wpan_phy *
wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
@@ -405,4 +557,49 @@ static inline const char *wpan_phy_name(struct wpan_phy *phy)
return dev_name(&phy->dev);
}
+void ieee802154_configure_durations(struct wpan_phy *phy,
+ unsigned int page, unsigned int channel);
+
+/**
+ * cfg802154_device_is_associated - Checks whether we are associated to any device
+ * @wpan_dev: the wpan device
+ * @return: true if we are associated
+ */
+bool cfg802154_device_is_associated(struct wpan_dev *wpan_dev);
+
+/**
+ * cfg802154_device_is_parent - Checks if a device is our coordinator
+ * @wpan_dev: the wpan device
+ * @target: the expected parent
+ * @return: true if @target is our coordinator
+ */
+bool cfg802154_device_is_parent(struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *target);
+
+/**
+ * cfg802154_device_is_child - Checks whether a device is associated to us
+ * @wpan_dev: the wpan device
+ * @target: the expected child
+ * @return: the PAN device
+ */
+struct ieee802154_pan_device *
+cfg802154_device_is_child(struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *target);
+
+/**
+ * cfg802154_set_max_associations - Limit the number of future associations
+ * @wpan_dev: the wpan device
+ * @max: the maximum number of devices we accept to associate
+ * @return: the old maximum value
+ */
+unsigned int cfg802154_set_max_associations(struct wpan_dev *wpan_dev,
+ unsigned int max);
+
+/**
+ * cfg802154_get_free_short_addr - Get a free address among the known devices
+ * @wpan_dev: the wpan device
+ * @return: a random short address expectedly unused on our PAN
+ */
+__le16 cfg802154_get_free_short_addr(struct wpan_dev *wpan_dev);
+
#endif /* __NET_CFG802154_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 46754ba9d7b7..1338cb92c8e7 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -18,37 +18,45 @@
#include <linux/errno.h>
#include <asm/types.h>
#include <asm/byteorder.h>
-#include <linux/uaccess.h>
#include <asm/checksum.h>
+#if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER)
+#include <linux/uaccess.h>
+#endif
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
-static inline
+static __always_inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len)
{
if (copy_from_user(dst, src, len))
- *err_ptr = -EFAULT;
- return csum_partial(dst, len, sum);
+ return 0;
+ return csum_partial(dst, len, ~0U);
}
#endif
#ifndef HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user
-(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr)
+static __always_inline __wsum csum_and_copy_to_user
+(const void *src, void __user *dst, int len)
{
- sum = csum_partial(src, len, sum);
+ __wsum sum = csum_partial(src, len, ~0U);
if (copy_to_user(dst, src, len) == 0)
return sum;
- if (len)
- *err_ptr = -EFAULT;
+ return 0;
+}
+#endif
- return (__force __wsum)-1; /* invalid checksum */
+#ifndef _HAVE_ARCH_CSUM_AND_COPY
+static __always_inline __wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, 0);
}
#endif
#ifndef HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
u32 res = (__force u32)csum;
res += (__force u32)addend;
@@ -56,12 +64,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
}
#endif
-static inline __wsum csum_sub(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
{
return csum_add(csum, ~addend);
}
-static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
{
u16 res = (__force u16)csum;
@@ -69,53 +77,58 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
return (__force __sum16)(res + (res < (__force u16)addend));
}
-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
{
return csum16_add(csum, ~addend);
}
-static inline __wsum
-csum_block_add(__wsum csum, __wsum csum2, int offset)
+#ifndef HAVE_ARCH_CSUM_SHIFT
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
{
- u32 sum = (__force u32)csum2;
-
/* rotate sum to align it with a 16b boundary */
if (offset & 1)
- sum = ror32(sum, 8);
+ return (__force __wsum)ror32((__force u32)sum, 8);
+ return sum;
+}
+#endif
- return csum_add(csum, (__force __wsum)sum);
+static __always_inline __wsum
+csum_block_add(__wsum csum, __wsum csum2, int offset)
+{
+ return csum_add(csum, csum_shift(csum2, offset));
}
-static inline __wsum
+static __always_inline __wsum
csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
{
return csum_block_add(csum, csum2, offset);
}
-static inline __wsum
+static __always_inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
return csum_block_add(csum, ~csum2, offset);
}
-static inline __wsum csum_unfold(__sum16 n)
+static __always_inline __wsum csum_unfold(__sum16 n)
{
return (__force __wsum)n;
}
-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+static __always_inline
+__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
{
return csum_partial(buff, len, sum);
}
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
{
*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
}
-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
+static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
@@ -128,11 +141,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
* m : old value of a 16bit field
* m' : new value of a 16bit field
*/
-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
+static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
{
*sum = ~csum16_add(csum16_sub(~(*sum), old), new);
}
+static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
+{
+ *csum = csum_add(csum_sub(*csum, old), new);
+}
+
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, bool pseudohdr);
@@ -142,16 +160,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
__wsum diff, bool pseudohdr);
-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
- __be16 from, __be16 to,
- bool pseudohdr)
+static __always_inline
+void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
+ __be16 from, __be16 to, bool pseudohdr)
{
inet_proto_csum_replace4(sum, skb, (__force __be32)from,
(__force __be32)to, pseudohdr);
}
-static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
- int start, int offset)
+static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+ int start, int offset)
{
__sum16 *psum = (__sum16 *)(ptr + offset);
__wsum delta;
@@ -167,9 +185,13 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
return delta;
}
-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
+static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
{
*psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
}
+static __always_inline __wsum wsum_negate(__wsum val)
+{
+ return (__force __wsum)-((__force u32)val);
+}
#endif
diff --git a/include/net/codel.h b/include/net/codel.h
index a6e428f80135..aa80f744826c 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -44,8 +44,6 @@
#include <linux/types.h>
#include <linux/ktime.h>
#include <linux/skbuff.h>
-#include <net/pkt_sched.h>
-#include <net/inet_ecn.h>
/* Controlling Queue Delay (CoDel) algorithm
* =========================================
@@ -102,6 +100,9 @@ static inline u32 codel_time_to_us(codel_time_t val)
* @interval: width of moving time window
* @mtu: device mtu, or minimal queue backlog in bytes.
* @ecn: is Explicit Congestion Notification enabled
+ * @ce_threshold_selector: apply ce_threshold to packets matching this value
+ * in the diffserv/ECN byte of the IP header
+ * @ce_threshold_mask: mask to apply to ce_threshold_selector comparison
*/
struct codel_params {
codel_time_t target;
@@ -109,6 +110,8 @@ struct codel_params {
codel_time_t interval;
u32 mtu;
bool ecn;
+ u8 ce_threshold_selector;
+ u8 ce_threshold_mask;
};
/**
@@ -142,8 +145,8 @@ struct codel_vars {
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
* @drop_len: bytes of dropped packets in dequeue()
- * ecn_mark: number of packets we ECN marked instead of dropping
- * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
+ * @ecn_mark: number of packets we ECN marked instead of dropping
+ * @ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/
struct codel_stats {
u32 maxpacket;
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
index d289b91dcd65..78a27ac73070 100644
--- a/include/net/codel_impl.h
+++ b/include/net/codel_impl.h
@@ -49,11 +49,15 @@
* Implemented on linux by Dave Taht and Eric Dumazet
*/
+#include <net/inet_ecn.h>
+
static void codel_params_init(struct codel_params *params)
{
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+ params->ce_threshold_mask = 0;
+ params->ce_threshold_selector = 0;
params->ecn = false;
}
@@ -246,9 +250,19 @@ static struct sk_buff *codel_dequeue(void *ctx,
vars->rec_inv_sqrt);
}
end:
- if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
- INET_ECN_set_ce(skb))
- stats->ce_mark++;
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) {
+ bool set_ce = true;
+
+ if (params->ce_threshold_mask) {
+ int dsfield = skb_get_dsfield(skb);
+
+ set_ce = (dsfield >= 0 &&
+ (((u8)dsfield & params->ce_threshold_mask) ==
+ params->ce_threshold_selector));
+ }
+ if (set_ce && INET_ECN_set_ce(skb))
+ stats->ce_mark++;
+ }
return skb;
}
diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h
index 098630f83a55..7d3d9219f4fe 100644
--- a/include/net/codel_qdisc.h
+++ b/include/net/codel_qdisc.h
@@ -49,6 +49,9 @@
* Implemented on linux by Dave Taht and Eric Dumazet
*/
+#include <net/codel.h>
+#include <net/pkt_sched.h>
+
/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
struct codel_skb_cb {
codel_time_t enqueue_time;
diff --git a/include/net/compat.h b/include/net/compat.h
index 745db0d605b6..84c163f40f38 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -5,8 +5,6 @@
struct sock;
-#if defined(CONFIG_COMPAT)
-
#include <linux/compat.h>
struct compat_msghdr {
@@ -48,17 +46,8 @@ struct compat_rtentry {
unsigned short rt_irtt; /* Initial RTT */
};
-#else /* defined(CONFIG_COMPAT) */
-/*
- * To avoid compiler warnings:
- */
-#define compat_msghdr msghdr
-#define compat_mmsghdr mmsghdr
-#endif /* defined(CONFIG_COMPAT) */
-
-int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg,
- struct sockaddr __user **save_addr, compat_uptr_t *ptr,
- compat_size_t *len);
+int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr *msg,
+ struct sockaddr __user **save_addr);
int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
int put_cmsg_compat(struct msghdr*, int, int, int, void *);
@@ -81,13 +70,26 @@ struct compat_group_source_req {
} __packed;
struct compat_group_filter {
- __u32 gf_interface;
- struct __kernel_sockaddr_storage gf_group
- __aligned(4);
- __u32 gf_fmode;
- __u32 gf_numsrc;
- struct __kernel_sockaddr_storage gf_slist[1]
- __aligned(4);
+ union {
+ struct {
+ __u32 gf_interface_aux;
+ struct __kernel_sockaddr_storage gf_group_aux
+ __aligned(4);
+ __u32 gf_fmode_aux;
+ __u32 gf_numsrc_aux;
+ struct __kernel_sockaddr_storage gf_slist[1]
+ __aligned(4);
+ } __packed;
+ struct {
+ __u32 gf_interface;
+ struct __kernel_sockaddr_storage gf_group
+ __aligned(4);
+ __u32 gf_fmode;
+ __u32 gf_numsrc;
+ struct __kernel_sockaddr_storage gf_slist_flex[]
+ __aligned(4);
+ } __packed;
+ };
} __packed;
#endif /* NET_COMPAT_H */
diff --git a/include/net/datalink.h b/include/net/datalink.h
index a9663229b913..6c529a40e00d 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -2,6 +2,13 @@
#ifndef _NET_INET_DATALINK_H_
#define _NET_INET_DATALINK_H_
+#include <linux/list.h>
+
+struct llc_sap;
+struct net_device;
+struct packet_type;
+struct sk_buff;
+
struct datalink_proto {
unsigned char type[8];
@@ -12,10 +19,8 @@ struct datalink_proto {
int (*rcvfunc)(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
int (*request)(struct datalink_proto *, struct sk_buff *,
- unsigned char *);
+ const unsigned char *);
struct list_head node;
};
-struct datalink_proto *make_EII_client(void);
-void destroy_EII_client(struct datalink_proto *dl);
#endif
diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h
index 43e34131a53f..02700262f71a 100644
--- a/include/net/dcbevent.h
+++ b/include/net/dcbevent.h
@@ -8,6 +8,8 @@
#ifndef _DCB_EVENT_H
#define _DCB_EVENT_H
+struct notifier_block;
+
enum dcbevent_notif_type {
DCB_APP_EVENT = 1,
};
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index e4ad58c4062c..42207fc44660 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -10,6 +10,8 @@
#include <linux/dcbnl.h>
+struct net_device;
+
struct dcb_app_type {
int ifindex;
struct dcb_app app;
@@ -17,18 +19,32 @@ struct dcb_app_type {
u8 dcbx;
};
+u16 dcb_getrewr(struct net_device *dev, struct dcb_app *app);
+int dcb_setrewr(struct net_device *dev, struct dcb_app *app);
+int dcb_delrewr(struct net_device *dev, struct dcb_app *app);
+
int dcb_setapp(struct net_device *, struct dcb_app *);
u8 dcb_getapp(struct net_device *, struct dcb_app *);
int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
+struct dcb_rewr_prio_pcp_map {
+ u16 map[IEEE_8021QAZ_MAX_TCS];
+};
+
+void dcb_getrewr_prio_pcp_mask_map(const struct net_device *dev,
+ struct dcb_rewr_prio_pcp_map *p_map);
+
struct dcb_ieee_app_prio_map {
u64 map[IEEE_8021QAZ_MAX_TCS];
};
void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
struct dcb_ieee_app_prio_map *p_map);
+void dcb_getrewr_prio_dscp_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_prio_map *p_map);
+
struct dcb_ieee_app_dscp_map {
u8 map[64];
};
@@ -107,6 +123,14 @@ struct dcbnl_rtnl_ops {
/* buffer settings */
int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *);
int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *);
+
+ /* apptrust */
+ int (*dcbnl_setapptrust)(struct net_device *, u8 *, int);
+ int (*dcbnl_getapptrust)(struct net_device *, u8 *, int *);
+
+ /* rewrite */
+ int (*dcbnl_setrewr)(struct net_device *dev, struct dcb_app *app);
+ int (*dcbnl_delrewr)(struct net_device *dev, struct dcb_app *app);
};
#endif /* __NET_DCBNL_H__ */
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 8f3c8a443238..9ac394bdfbe4 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -19,35 +19,10 @@
#include <net/flow_offload.h>
#include <uapi/linux/devlink.h>
#include <linux/xarray.h>
+#include <linux/firmware.h>
-struct devlink_ops;
-
-struct devlink {
- struct list_head list;
- struct list_head port_list;
- struct list_head sb_list;
- struct list_head dpipe_table_list;
- struct list_head resource_list;
- struct list_head param_list;
- struct list_head region_list;
- struct list_head reporter_list;
- struct mutex reporters_lock; /* protects reporter_list */
- struct devlink_dpipe_headers *dpipe_headers;
- struct list_head trap_list;
- struct list_head trap_group_list;
- struct list_head trap_policer_list;
- const struct devlink_ops *ops;
- struct xarray snapshot_ids;
- struct device *dev;
- possible_net_t _net;
- struct mutex lock; /* Serializes access to devlink instance specific objects such as
- * port, sb, dpipe, resource, params, region, traps and more.
- */
- u8 reload_failed:1,
- reload_enabled:1,
- registered:1;
- char priv[0] __aligned(NETDEV_ALIGN);
-};
+struct devlink;
+struct devlink_linecard;
struct devlink_port_phys_attrs {
u32 port_number; /* Same value as "split group".
@@ -57,13 +32,44 @@ struct devlink_port_phys_attrs {
u32 split_subport_number; /* If the port is split, this is the number of subport. */
};
+/**
+ * struct devlink_port_pci_pf_attrs - devlink port's PCI PF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
struct devlink_port_pci_pf_attrs {
- u16 pf; /* Associated PCI PF for this port. */
+ u32 controller;
+ u16 pf;
+ u8 external:1;
};
+/**
+ * struct devlink_port_pci_vf_attrs - devlink port's PCI VF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @vf: Associated PCI VF for of the PCI PF for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
struct devlink_port_pci_vf_attrs {
- u16 pf; /* Associated PCI PF for this port. */
- u16 vf; /* Associated PCI VF for of the PCI PF for this port. */
+ u32 controller;
+ u16 pf;
+ u16 vf;
+ u8 external:1;
+};
+
+/**
+ * struct devlink_port_pci_sf_attrs - devlink port's PCI SF attributes
+ * @controller: Associated controller number
+ * @sf: Associated PCI SF for of the PCI PF for this port.
+ * @pf: Associated PCI PF number for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
+struct devlink_port_pci_sf_attrs {
+ u32 controller;
+ u32 sf;
+ u16 pf;
+ u8 external:1;
};
/**
@@ -73,6 +79,10 @@ struct devlink_port_pci_vf_attrs {
* @splittable: indicates if the port can be split.
* @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink.
* @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL
+ * @phys: physical port attributes
+ * @pci_pf: PCI PF port attributes
+ * @pci_vf: PCI VF port attributes
+ * @pci_sf: PCI SF port attributes
*/
struct devlink_port_attrs {
u8 split:1,
@@ -84,27 +94,108 @@ struct devlink_port_attrs {
struct devlink_port_phys_attrs phys;
struct devlink_port_pci_pf_attrs pci_pf;
struct devlink_port_pci_vf_attrs pci_vf;
+ struct devlink_port_pci_sf_attrs pci_sf;
+ };
+};
+
+struct devlink_rate {
+ struct list_head list;
+ enum devlink_rate_type type;
+ struct devlink *devlink;
+ void *priv;
+ u64 tx_share;
+ u64 tx_max;
+
+ struct devlink_rate *parent;
+ union {
+ struct devlink_port *devlink_port;
+ struct {
+ char *name;
+ refcount_t refcnt;
+ };
};
+
+ u32 tx_priority;
+ u32 tx_weight;
};
struct devlink_port {
struct list_head list;
- struct list_head param_list;
+ struct list_head region_list;
struct devlink *devlink;
+ const struct devlink_port_ops *ops;
unsigned int index;
- bool registered;
- spinlock_t type_lock; /* Protects type and type_dev
- * pointer consistency.
+ spinlock_t type_lock; /* Protects type and type_eth/ib
+ * structures consistency.
*/
enum devlink_port_type type;
enum devlink_port_type desired_type;
- void *type_dev;
+ union {
+ struct {
+ struct net_device *netdev;
+ int ifindex;
+ char ifname[IFNAMSIZ];
+ } type_eth;
+ struct {
+ struct ib_device *ibdev;
+ } type_ib;
+ };
struct devlink_port_attrs attrs;
u8 attrs_set:1,
- switch_port:1;
+ switch_port:1,
+ registered:1,
+ initialized:1;
struct delayed_work type_warn_dw;
struct list_head reporter_list;
- struct mutex reporters_lock; /* Protects reporter_list */
+
+ struct devlink_rate *devlink_rate;
+ struct devlink_linecard *linecard;
+ u32 rel_index;
+};
+
+struct devlink_port_new_attrs {
+ enum devlink_port_flavour flavour;
+ unsigned int port_index;
+ u32 controller;
+ u32 sfnum;
+ u16 pfnum;
+ u8 port_index_valid:1,
+ controller_valid:1,
+ sfnum_valid:1;
+};
+
+/**
+ * struct devlink_linecard_ops - Linecard operations
+ * @provision: callback to provision the linecard slot with certain
+ * type of linecard. As a result of this operation,
+ * driver is expected to eventually (could be after
+ * the function call returns) call one of:
+ * devlink_linecard_provision_set()
+ * devlink_linecard_provision_fail()
+ * @unprovision: callback to unprovision the linecard slot. As a result
+ * of this operation, driver is expected to eventually
+ * (could be after the function call returns) call
+ * devlink_linecard_provision_clear()
+ * devlink_linecard_provision_fail()
+ * @same_provision: callback to ask the driver if linecard is already
+ * provisioned in the same way user asks this linecard to be
+ * provisioned.
+ * @types_count: callback to get number of supported types
+ * @types_get: callback to get next type in list
+ */
+struct devlink_linecard_ops {
+ int (*provision)(struct devlink_linecard *linecard, void *priv,
+ const char *type, const void *type_priv,
+ struct netlink_ext_ack *extack);
+ int (*unprovision)(struct devlink_linecard *linecard, void *priv,
+ struct netlink_ext_ack *extack);
+ bool (*same_provision)(struct devlink_linecard *linecard, void *priv,
+ const char *type, const void *type_priv);
+ unsigned int (*types_count)(struct devlink_linecard *linecard,
+ void *priv);
+ void (*types_get)(struct devlink_linecard *linecard,
+ void *priv, unsigned int index, const char **type,
+ const void **type_priv);
};
struct devlink_sb_pool_info {
@@ -131,7 +222,7 @@ struct devlink_dpipe_field {
/**
* struct devlink_dpipe_header - dpipe header object
* @name: header name
- * @id: index, global/local detrmined by global bit
+ * @id: index, global/local determined by global bit
* @fields: fields
* @fields_count: number of fields
* @global: indicates if header is shared like most protocol header
@@ -151,7 +242,7 @@ struct devlink_dpipe_header {
* @header_index: header index (packets can have several headers of same
* type like in case of tunnels)
* @header: header
- * @fieled_id: field index
+ * @field_id: field index
*/
struct devlink_dpipe_match {
enum devlink_dpipe_match_type type;
@@ -166,7 +257,7 @@ struct devlink_dpipe_match {
* @header_index: header index (packets can have several headers of same
* type like in case of tunnels)
* @header: header
- * @fieled_id: field index
+ * @field_id: field index
*/
struct devlink_dpipe_action {
enum devlink_dpipe_action_type type;
@@ -202,7 +293,7 @@ struct devlink_dpipe_value {
* struct devlink_dpipe_entry - table entry object
* @index: index of the entry in the table
* @match_values: match values
- * @matche_values_count: count of matches tuples
+ * @match_values_count: count of matches tuples
* @action_values: actions values
* @action_values_count: count of actions values
* @counter: value of counter
@@ -252,7 +343,9 @@ struct devlink_dpipe_table_ops;
*/
struct devlink_dpipe_table {
void *priv;
+ /* private: */
struct list_head list;
+ /* public: */
const char *name;
bool counters_enabled;
bool counter_control_extern;
@@ -265,13 +358,13 @@ struct devlink_dpipe_table {
/**
* struct devlink_dpipe_table_ops - dpipe_table ops
- * @actions_dump - dumps all tables actions
- * @matches_dump - dumps all tables matches
- * @entries_dump - dumps all active entries in the table
- * @counters_set_update - when changing the counter status hardware sync
+ * @actions_dump: dumps all tables actions
+ * @matches_dump: dumps all tables matches
+ * @entries_dump: dumps all active entries in the table
+ * @counters_set_update: when changing the counter status hardware sync
* maybe needed to allocate/free counter related
* resources
- * @size_get - get size
+ * @size_get: get size
*/
struct devlink_dpipe_table_ops {
int (*actions_dump)(void *priv, struct sk_buff *skb);
@@ -284,8 +377,8 @@ struct devlink_dpipe_table_ops {
/**
* struct devlink_dpipe_headers - dpipe headers
- * @headers - header array can be shared (global bit) or driver specific
- * @headers_count - count of headers
+ * @headers: header array can be shared (global bit) or driver specific
+ * @headers_count: count of headers
*/
struct devlink_dpipe_headers {
struct devlink_dpipe_header **headers;
@@ -297,7 +390,7 @@ struct devlink_dpipe_headers {
* @size_min: minimum size which can be set
* @size_max: maximum size which can be set
* @size_granularity: size granularity
- * @size_unit: resource's basic unit
+ * @unit: resource's basic unit
*/
struct devlink_resource_size_params {
u64 size_min;
@@ -320,35 +413,10 @@ devlink_resource_size_params_init(struct devlink_resource_size_params *size_para
typedef u64 devlink_resource_occ_get_t(void *priv);
-/**
- * struct devlink_resource - devlink resource
- * @name: name of the resource
- * @id: id, per devlink instance
- * @size: size of the resource
- * @size_new: updated size of the resource, reload is needed
- * @size_valid: valid in case the total size of the resource is valid
- * including its children
- * @parent: parent resource
- * @size_params: size parameters
- * @list: parent list
- * @resource_list: list of child resources
- */
-struct devlink_resource {
- const char *name;
- u64 id;
- u64 size;
- u64 size_new;
- bool size_valid;
- struct devlink_resource *parent;
- struct devlink_resource_size_params size_params;
- struct list_head list;
- struct list_head resource_list;
- devlink_resource_occ_get_t *occ_get;
- void *occ_get_priv;
-};
-
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+#define DEVLINK_RESOURCE_GENERIC_NAME_PORTS "physical_ports"
+
#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
enum devlink_param_type {
DEVLINK_PARAM_TYPE_U8,
@@ -372,7 +440,27 @@ struct devlink_param_gset_ctx {
};
/**
+ * struct devlink_flash_notify - devlink dev flash notify data
+ * @status_msg: current status string
+ * @component: firmware component being updated
+ * @done: amount of work completed of total amount
+ * @total: amount of work expected to be done
+ * @timeout: expected max timeout in seconds
+ *
+ * These are values to be given to userland to be displayed in order
+ * to show current activity in a firmware update process.
+ */
+struct devlink_flash_notify {
+ const char *status_msg;
+ const char *component;
+ unsigned long done;
+ unsigned long total;
+ unsigned long timeout;
+};
+
+/**
* struct devlink_param - devlink configuration parameter data
+ * @id: devlink parameter id number
* @name: name of the parameter
* @generic: indicates if the parameter is generic or driver specific
* @type: parameter type
@@ -406,7 +494,10 @@ struct devlink_param_item {
const struct devlink_param *param;
union devlink_param_value driverinit_value;
bool driverinit_value_valid;
- bool published;
+ union devlink_param_value driverinit_value_new; /* Not reachable
+ * until reload.
+ */
+ bool driverinit_value_new_valid;
};
enum devlink_param_generic_id {
@@ -420,6 +511,13 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -457,6 +555,27 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME "enable_remote_dev_reset"
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME "enable_eth"
+#define DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME "enable_rdma"
+#define DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME "enable_vnet"
+#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME "enable_iwarp"
+#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME "io_eq_size"
+#define DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME "event_eq_size"
+#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -510,6 +629,26 @@ enum devlink_param_generic_id {
#define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce"
/* Firmware bundle identifier */
#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id"
+/* Bootloader */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER "fw.bootloader"
+
+/**
+ * struct devlink_flash_update_params - Flash Update parameters
+ * @fw: pointer to the firmware data to update from
+ * @component: the flash component to update
+ * @overwrite_mask: which types of flash update are supported (may be %0)
+ *
+ * With the exception of fw, drivers must opt-in to parameters by
+ * setting the appropriate bit in the supported_flash_update_params field in
+ * their devlink_ops structure.
+ */
+struct devlink_flash_update_params {
+ const struct firmware *fw;
+ const char *component;
+ u32 overwrite_mask;
+};
+
+#define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(0)
struct devlink_region;
struct devlink_info_req;
@@ -522,12 +661,52 @@ struct devlink_info_req;
* the data variable must be updated to point to the snapshot data.
* The function will be called while the devlink instance lock is
* held.
+ * @read: callback to directly read a portion of the region. On success,
+ * the data pointer will be updated with the contents of the
+ * requested portion of the region. The function will be called
+ * while the devlink instance lock is held.
+ * @priv: Pointer to driver private data for the region operation
*/
struct devlink_region_ops {
const char *name;
void (*destructor)(const void *data);
- int (*snapshot)(struct devlink *devlink, struct netlink_ext_ack *extack,
+ int (*snapshot)(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
u8 **data);
+ int (*read)(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u64 offset, u32 size, u8 *data);
+ void *priv;
+};
+
+/**
+ * struct devlink_port_region_ops - Region operations for a port
+ * @name: region name
+ * @destructor: callback used to free snapshot memory when deleting
+ * @snapshot: callback to request an immediate snapshot. On success,
+ * the data variable must be updated to point to the snapshot data.
+ * The function will be called while the devlink instance lock is
+ * held.
+ * @read: callback to directly read a portion of the region. On success,
+ * the data pointer will be updated with the contents of the
+ * requested portion of the region. The function will be called
+ * while the devlink instance lock is held.
+ * @priv: Pointer to driver private data for the region operation
+ */
+struct devlink_port_region_ops {
+ const char *name;
+ void (*destructor)(const void *data);
+ int (*snapshot)(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data);
+ int (*read)(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u64 offset, u32 size, u8 *data);
+ void *priv;
};
struct devlink_fmsg;
@@ -546,6 +725,7 @@ enum devlink_health_reporter_state {
* @dump: callback to dump an object
* if priv_ctx is NULL, run a full dump
* @diagnose: callback to diagnose the current status
+ * @test: callback to trigger a test event
*/
struct devlink_health_reporter_ops {
@@ -558,6 +738,28 @@ struct devlink_health_reporter_ops {
int (*diagnose)(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack);
+ int (*test)(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack);
+};
+
+/**
+ * struct devlink_trap_metadata - Packet trap metadata.
+ * @trap_name: Trap name.
+ * @trap_group_name: Trap group name.
+ * @input_dev: Input netdevice.
+ * @dev_tracker: refcount tracker for @input_dev.
+ * @fa_cookie: Flow action user cookie.
+ * @trap_type: Trap type.
+ */
+struct devlink_trap_metadata {
+ const char *trap_name;
+ const char *trap_group_name;
+
+ struct net_device *input_dev;
+ netdevice_tracker dev_tracker;
+
+ const struct flow_action_cookie *fa_cookie;
+ enum devlink_trap_type trap_type;
};
/**
@@ -704,6 +906,26 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE,
DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP,
DEVLINK_TRAP_GENERIC_ID_EARLY_DROP,
+ DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ARP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GRE_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_UDP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_TCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ESP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_NEXTHOP,
+ DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_EAPOL,
+ DEVLINK_TRAP_GENERIC_ID_LOCKED_PORT,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -739,6 +961,8 @@ enum devlink_trap_group_generic_id {
DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL,
DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE,
DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_EAPOL,
/* Add new generic trap group IDs above */
__DEVLINK_TRAP_GROUP_GENERIC_ID_MAX,
@@ -894,6 +1118,46 @@ enum devlink_trap_group_generic_id {
"flow_action_trap"
#define DEVLINK_TRAP_GENERIC_NAME_EARLY_DROP \
"early_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_VXLAN_PARSING \
+ "vxlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_LLC_SNAP_PARSING \
+ "llc_snap_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_VLAN_PARSING \
+ "vlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_PPPOE_PPP_PARSING \
+ "pppoe_ppp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_MPLS_PARSING \
+ "mpls_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_PARSING \
+ "arp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_1_PARSING \
+ "ip_1_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_N_PARSING \
+ "ip_n_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GRE_PARSING \
+ "gre_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_UDP_PARSING \
+ "udp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_TCP_PARSING \
+ "tcp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IPSEC_PARSING \
+ "ipsec_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_SCTP_PARSING \
+ "sctp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_DCCP_PARSING \
+ "dccp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GTP_PARSING \
+ "gtp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ESP_PARSING \
+ "esp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_BLACKHOLE_NEXTHOP \
+ "blackhole_nexthop"
+#define DEVLINK_TRAP_GENERIC_NAME_DMAC_FILTER \
+ "dmac_filter"
+#define DEVLINK_TRAP_GENERIC_NAME_EAPOL \
+ "eapol"
+#define DEVLINK_TRAP_GENERIC_NAME_LOCKED_PORT \
+ "locked_port"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
@@ -945,6 +1209,10 @@ enum devlink_trap_group_generic_id {
"acl_sample"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_TRAP \
"acl_trap"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PARSER_ERROR_DROPS \
+ "parser_error_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_EAPOL \
+ "eapol"
#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group_id, \
_metadata_cap) \
@@ -990,17 +1258,27 @@ enum devlink_trap_group_generic_id {
.min_burst = _min_burst, \
}
+enum {
+ /* device supports reload operations */
+ DEVLINK_F_RELOAD = 1UL << 0,
+};
+
struct devlink_ops {
+ /**
+ * @supported_flash_update_params:
+ * mask of parameters supported by the driver's .flash_update
+ * implementation.
+ */
+ u32 supported_flash_update_params;
+ unsigned long reload_actions;
+ unsigned long reload_limits;
int (*reload_down)(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack);
- int (*reload_up)(struct devlink *devlink,
+ int (*reload_up)(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack);
- int (*port_type_set)(struct devlink_port *devlink_port,
- enum devlink_port_type port_type);
- int (*port_split)(struct devlink *devlink, unsigned int port_index,
- unsigned int count, struct netlink_ext_ack *extack);
- int (*port_unsplit)(struct devlink *devlink, unsigned int port_index,
- struct netlink_ext_ack *extack);
int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -1051,8 +1329,15 @@ struct devlink_ops {
struct netlink_ext_ack *extack);
int (*info_get)(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack);
- int (*flash_update)(struct devlink *devlink, const char *file_name,
- const char *component,
+ /**
+ * @flash_update: Device flash update function
+ *
+ * Used to perform a flash update for the device. The set of
+ * parameters supported by the driver should be set in
+ * supported_flash_update_params.
+ */
+ int (*flash_update)(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack);
/**
* @trap_init: Trap initialization function.
@@ -1098,6 +1383,26 @@ struct devlink_ops {
const struct devlink_trap_policer *policer,
struct netlink_ext_ack *extack);
/**
+ * @trap_group_action_set: Trap group action set function.
+ *
+ * If this callback is populated, it will take precedence over looping
+ * over all traps in a group and calling .trap_action_set().
+ */
+ int (*trap_group_action_set)(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_drop_counter_get: Trap drop counter get function.
+ *
+ * Should be used by device drivers to report number of packets
+ * that have been dropped, and cannot be passed to the devlink
+ * subsystem by the underlying device.
+ */
+ int (*trap_drop_counter_get)(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops);
+ /**
* @trap_policer_init: Trap policer initialization function.
*
* Should be used by device drivers to initialize the trap policer in
@@ -1130,96 +1435,312 @@ struct devlink_ops {
const struct devlink_trap_policer *policer,
u64 *p_drops);
/**
- * @port_function_hw_addr_get: Port function's hardware address get function.
+ * port_new() - Add a new port function of a specified flavor
+ * @devlink: Devlink instance
+ * @attrs: attributes of the new port
+ * @extack: extack for reporting error messages
+ * @devlink_port: pointer to store new devlink port pointer
+ *
+ * Devlink core will call this device driver function upon user request
+ * to create a new port function of a specified flavor and optional
+ * attributes
*
- * Should be used by device drivers to report the hardware address of a function managed
- * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
- * function handling for a particular port.
+ * Notes:
+ * - On success, drivers must register a port with devlink core
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+ int (*port_new)(struct devlink *devlink,
+ const struct devlink_port_new_attrs *attrs,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port);
+
+ /**
+ * Rate control callbacks.
+ */
+ int (*rate_leaf_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack);
+ int (*rate_leaf_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack);
+ int (*rate_leaf_tx_priority_set)(struct devlink_rate *devlink_rate, void *priv,
+ u32 tx_priority, struct netlink_ext_ack *extack);
+ int (*rate_leaf_tx_weight_set)(struct devlink_rate *devlink_rate, void *priv,
+ u32 tx_weight, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_priority_set)(struct devlink_rate *devlink_rate, void *priv,
+ u32 tx_priority, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_weight_set)(struct devlink_rate *devlink_rate, void *priv,
+ u32 tx_weight, struct netlink_ext_ack *extack);
+ int (*rate_node_new)(struct devlink_rate *rate_node, void **priv,
+ struct netlink_ext_ack *extack);
+ int (*rate_node_del)(struct devlink_rate *rate_node, void *priv,
+ struct netlink_ext_ack *extack);
+ int (*rate_leaf_parent_set)(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack);
+ int (*rate_node_parent_set)(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack);
+ /**
+ * selftests_check() - queries if selftest is supported
+ * @devlink: devlink instance
+ * @id: test index
+ * @extack: extack for reporting error messages
*
- * Note: @extack can be NULL when port notifier queries the port function.
+ * Return: true if test is supported by the driver
*/
- int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port,
- u8 *hw_addr, int *hw_addr_len,
- struct netlink_ext_ack *extack);
+ bool (*selftest_check)(struct devlink *devlink, unsigned int id,
+ struct netlink_ext_ack *extack);
/**
- * @port_function_hw_addr_set: Port function's hardware address set function.
+ * selftest_run() - Runs a selftest
+ * @devlink: devlink instance
+ * @id: test index
+ * @extack: extack for reporting error messages
*
- * Should be used by device drivers to set the hardware address of a function managed
- * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
- * function handling for a particular port.
+ * Return: status of the test
*/
- int (*port_function_hw_addr_set)(struct devlink *devlink, struct devlink_port *port,
- const u8 *hw_addr, int hw_addr_len,
- struct netlink_ext_ack *extack);
+ enum devlink_selftest_status
+ (*selftest_run)(struct devlink *devlink, unsigned int id,
+ struct netlink_ext_ack *extack);
};
-static inline void *devlink_priv(struct devlink *devlink)
-{
- BUG_ON(!devlink);
- return &devlink->priv;
-}
+void *devlink_priv(struct devlink *devlink);
+struct devlink *priv_to_devlink(void *priv);
+struct device *devlink_to_dev(const struct devlink *devlink);
+
+/* Devlink instance explicit locking */
+void devl_lock(struct devlink *devlink);
+int devl_trylock(struct devlink *devlink);
+void devl_unlock(struct devlink *devlink);
+void devl_assert_locked(struct devlink *devlink);
+bool devl_lock_is_held(struct devlink *devlink);
+
+struct ib_device;
-static inline struct devlink *priv_to_devlink(void *priv)
+struct net *devlink_net(const struct devlink *devlink);
+/* This call is intended for software devices that can create
+ * devlink instances in other namespaces than init_net.
+ *
+ * Drivers that operate on real HW must use devlink_alloc() instead.
+ */
+struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
+ size_t priv_size, struct net *net,
+ struct device *dev);
+static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
+ size_t priv_size,
+ struct device *dev)
{
- BUG_ON(!priv);
- return container_of(priv, struct devlink, priv);
+ return devlink_alloc_ns(ops, priv_size, &init_net, dev);
}
-static inline struct devlink_port *
-netdev_to_devlink_port(struct net_device *dev)
+int devl_register(struct devlink *devlink);
+void devl_unregister(struct devlink *devlink);
+void devlink_register(struct devlink *devlink);
+void devlink_unregister(struct devlink *devlink);
+void devlink_free(struct devlink *devlink);
+
+/**
+ * struct devlink_port_ops - Port operations
+ * @port_split: Callback used to split the port into multiple ones.
+ * @port_unsplit: Callback used to unsplit the port group back into
+ * a single port.
+ * @port_type_set: Callback used to set a type of a port.
+ * @port_del: Callback used to delete selected port along with related function.
+ * Devlink core calls this upon user request to delete
+ * a port previously created by devlink_ops->port_new().
+ * @port_fn_hw_addr_get: Callback used to set port function's hardware address.
+ * Should be used by device drivers to report
+ * the hardware address of a function managed
+ * by the devlink port.
+ * @port_fn_hw_addr_set: Callback used to set port function's hardware address.
+ * Should be used by device drivers to set the hardware
+ * address of a function managed by the devlink port.
+ * @port_fn_roce_get: Callback used to get port function's RoCE capability.
+ * Should be used by device drivers to report
+ * the current state of RoCE capability of a function
+ * managed by the devlink port.
+ * @port_fn_roce_set: Callback used to set port function's RoCE capability.
+ * Should be used by device drivers to enable/disable
+ * RoCE capability of a function managed
+ * by the devlink port.
+ * @port_fn_migratable_get: Callback used to get port function's migratable
+ * capability. Should be used by device drivers
+ * to report the current state of migratable capability
+ * of a function managed by the devlink port.
+ * @port_fn_migratable_set: Callback used to set port function's migratable
+ * capability. Should be used by device drivers
+ * to enable/disable migratable capability of
+ * a function managed by the devlink port.
+ * @port_fn_state_get: Callback used to get port function's state.
+ * Should be used by device drivers to report
+ * the current admin and operational state of a
+ * function managed by the devlink port.
+ * @port_fn_state_set: Callback used to get port function's state.
+ * Should be used by device drivers set
+ * the admin state of a function managed
+ * by the devlink port.
+ * @port_fn_ipsec_crypto_get: Callback used to get port function's ipsec_crypto
+ * capability. Should be used by device drivers
+ * to report the current state of ipsec_crypto
+ * capability of a function managed by the devlink
+ * port.
+ * @port_fn_ipsec_crypto_set: Callback used to set port function's ipsec_crypto
+ * capability. Should be used by device drivers to
+ * enable/disable ipsec_crypto capability of a
+ * function managed by the devlink port.
+ * @port_fn_ipsec_packet_get: Callback used to get port function's ipsec_packet
+ * capability. Should be used by device drivers
+ * to report the current state of ipsec_packet
+ * capability of a function managed by the devlink
+ * port.
+ * @port_fn_ipsec_packet_set: Callback used to set port function's ipsec_packet
+ * capability. Should be used by device drivers to
+ * enable/disable ipsec_packet capability of a
+ * function managed by the devlink port.
+ *
+ * Note: Driver should return -EOPNOTSUPP if it doesn't support
+ * port function (@port_fn_*) handling for a particular port.
+ */
+struct devlink_port_ops {
+ int (*port_split)(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack);
+ int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack);
+ int (*port_type_set)(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type);
+ int (*port_del)(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_hw_addr_get)(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_hw_addr_set)(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_roce_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_roce_set)(struct devlink_port *devlink_port,
+ bool enable, struct netlink_ext_ack *extack);
+ int (*port_fn_migratable_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_migratable_set)(struct devlink_port *devlink_port,
+ bool enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_state_get)(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_state_set)(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_crypto_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_crypto_set)(struct devlink_port *devlink_port,
+ bool enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_packet_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_packet_set)(struct devlink_port *devlink_port,
+ bool enable,
+ struct netlink_ext_ack *extack);
+};
+
+void devlink_port_init(struct devlink *devlink,
+ struct devlink_port *devlink_port);
+void devlink_port_fini(struct devlink_port *devlink_port);
+
+int devl_port_register_with_ops(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index,
+ const struct devlink_port_ops *ops);
+
+static inline int devl_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index)
{
- if (dev->netdev_ops->ndo_get_devlink_port)
- return dev->netdev_ops->ndo_get_devlink_port(dev);
- return NULL;
+ return devl_port_register_with_ops(devlink, devlink_port,
+ port_index, NULL);
}
-static inline struct devlink *netdev_to_devlink(struct net_device *dev)
-{
- struct devlink_port *devlink_port = netdev_to_devlink_port(dev);
+int devlink_port_register_with_ops(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index,
+ const struct devlink_port_ops *ops);
- if (devlink_port)
- return devlink_port->devlink;
- return NULL;
+static inline int devlink_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index)
+{
+ return devlink_port_register_with_ops(devlink, devlink_port,
+ port_index, NULL);
}
-struct ib_device;
-
-struct net *devlink_net(const struct devlink *devlink);
-void devlink_net_set(struct devlink *devlink, struct net *net);
-struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
-int devlink_register(struct devlink *devlink, struct device *dev);
-void devlink_unregister(struct devlink *devlink);
-void devlink_reload_enable(struct devlink *devlink);
-void devlink_reload_disable(struct devlink *devlink);
-void devlink_free(struct devlink *devlink);
-int devlink_port_register(struct devlink *devlink,
- struct devlink_port *devlink_port,
- unsigned int port_index);
+void devl_port_unregister(struct devlink_port *devlink_port);
void devlink_port_unregister(struct devlink_port *devlink_port);
-void devlink_port_type_eth_set(struct devlink_port *devlink_port,
- struct net_device *netdev);
+void devlink_port_type_eth_set(struct devlink_port *devlink_port);
void devlink_port_type_ib_set(struct devlink_port *devlink_port,
struct ib_device *ibdev);
void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_attrs_set(struct devlink_port *devlink_port,
struct devlink_port_attrs *devlink_port_attrs);
-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf);
-void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
- u16 pf, u16 vf);
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, bool external);
+void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, u16 vf, bool external);
+void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
+ u32 controller, u16 pf, u32 sf,
+ bool external);
+int devl_port_fn_devlink_set(struct devlink_port *devlink_port,
+ struct devlink *fn_devlink);
+struct devlink_rate *
+devl_rate_node_create(struct devlink *devlink, void *priv, char *node_name,
+ struct devlink_rate *parent);
+int
+devl_rate_leaf_create(struct devlink_port *devlink_port, void *priv,
+ struct devlink_rate *parent);
+void devl_rate_leaf_destroy(struct devlink_port *devlink_port);
+void devl_rate_nodes_destroy(struct devlink *devlink);
+void devlink_port_linecard_set(struct devlink_port *devlink_port,
+ struct devlink_linecard *linecard);
+struct devlink_linecard *
+devl_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+ const struct devlink_linecard_ops *ops, void *priv);
+void devl_linecard_destroy(struct devlink_linecard *linecard);
+void devlink_linecard_provision_set(struct devlink_linecard *linecard,
+ const char *type);
+void devlink_linecard_provision_clear(struct devlink_linecard *linecard);
+void devlink_linecard_provision_fail(struct devlink_linecard *linecard);
+void devlink_linecard_activate(struct devlink_linecard *linecard);
+void devlink_linecard_deactivate(struct devlink_linecard *linecard);
+int devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
+ struct devlink *nested_devlink);
+int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
u16 egress_tc_count);
+void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index);
void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
-int devlink_dpipe_table_register(struct devlink *devlink,
- const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
- void *priv, bool counter_control_extern);
-void devlink_dpipe_table_unregister(struct devlink *devlink,
- const char *table_name);
-int devlink_dpipe_headers_register(struct devlink *devlink,
- struct devlink_dpipe_headers *dpipe_headers);
-void devlink_dpipe_headers_unregister(struct devlink *devlink);
+int devl_dpipe_table_register(struct devlink *devlink,
+ const char *table_name,
+ struct devlink_dpipe_table_ops *table_ops,
+ void *priv, bool counter_control_extern);
+void devl_dpipe_table_unregister(struct devlink *devlink,
+ const char *table_name);
+void devl_dpipe_headers_register(struct devlink *devlink,
+ struct devlink_dpipe_headers *dpipe_headers);
+void devl_dpipe_headers_unregister(struct devlink *devlink);
bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
const char *table_name);
int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx);
@@ -1235,60 +1756,69 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
+int devl_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
int devlink_resource_register(struct devlink *devlink,
const char *resource_name,
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
const struct devlink_resource_size_params *size_params);
-void devlink_resources_unregister(struct devlink *devlink,
- struct devlink_resource *resource);
-int devlink_resource_size_get(struct devlink *devlink,
- u64 resource_id,
- u64 *p_resource_size);
-int devlink_dpipe_table_resource_set(struct devlink *devlink,
- const char *table_name, u64 resource_id,
- u64 resource_units);
+void devl_resources_unregister(struct devlink *devlink);
+void devlink_resources_unregister(struct devlink *devlink);
+int devl_resource_size_get(struct devlink *devlink,
+ u64 resource_id,
+ u64 *p_resource_size);
+int devl_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units);
+void devl_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
void devlink_resource_occ_get_register(struct devlink *devlink,
u64 resource_id,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv);
+void devl_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id);
+
void devlink_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
+int devl_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
int devlink_params_register(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
+void devl_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
void devlink_params_unregister(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
-void devlink_params_publish(struct devlink *devlink);
-void devlink_params_unpublish(struct devlink *devlink);
-int devlink_port_params_register(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
-void devlink_port_params_unregister(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
-int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
- union devlink_param_value *init_val);
-int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val);
-int
-devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value *init_val);
-int devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value init_val);
-void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
-void devlink_port_param_value_changed(struct devlink_port *devlink_port,
- u32 param_id);
-void devlink_param_value_str_fill(union devlink_param_value *dst_val,
- const char *src);
+int devl_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *val);
+void devl_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val);
+void devl_param_value_changed(struct devlink *devlink, u32 param_id);
+struct devlink_region *devl_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots,
+ u64 region_size);
struct devlink_region *
devlink_region_create(struct devlink *devlink,
const struct devlink_region_ops *ops,
u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+devlink_port_region_create(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+void devl_region_destroy(struct devlink_region *region);
void devlink_region_destroy(struct devlink_region *region);
int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id);
void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id);
@@ -1296,69 +1826,90 @@ int devlink_region_snapshot_create(struct devlink_region *region,
u8 *data, u32 snapshot_id);
int devlink_info_serial_number_put(struct devlink_info_req *req,
const char *sn);
-int devlink_info_driver_name_put(struct devlink_info_req *req,
- const char *name);
int devlink_info_board_serial_number_put(struct devlink_info_req *req,
const char *bsn);
+
+enum devlink_info_version_type {
+ DEVLINK_INFO_VERSION_TYPE_NONE,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT, /* May be used as flash update
+ * component by name.
+ */
+};
+
int devlink_info_version_fixed_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
int devlink_info_version_stored_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
+int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type);
int devlink_info_version_running_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
-
-int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg);
-int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg);
-
-int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name);
-int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg);
-
-int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
- const char *name);
-int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg);
-int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
- const char *name);
-int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg);
-
-int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value);
-int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value);
-int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
-int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value);
-int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
-int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len);
-
-int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
- bool value);
-int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u8 value);
-int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u32 value);
-int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u64 value);
-int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const char *value);
-int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const void *value, u32 value_len);
+int devlink_info_version_running_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type);
+
+void devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg);
+void devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg);
+
+void devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name);
+void devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg);
+
+void devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name);
+void devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg);
+void devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name);
+void devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg);
+
+void devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
+void devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
+void devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len);
+
+void devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ bool value);
+void devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u8 value);
+void devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u32 value);
+void devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u64 value);
+void devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const char *value);
+void devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const void *value, u32 value_len);
struct devlink_health_reporter *
-devlink_health_reporter_create(struct devlink *devlink,
- const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+devl_port_health_reporter_create(struct devlink_port *port,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
struct devlink_health_reporter *
devlink_port_health_reporter_create(struct devlink_port *port,
const struct devlink_health_reporter_ops *ops,
u64 graceful_period, void *priv);
+struct devlink_health_reporter *
+devl_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
+struct devlink_health_reporter *
+devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
void
-devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
+devl_health_reporter_destroy(struct devlink_health_reporter *reporter);
void
-devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter);
+devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
void *
devlink_health_reporter_priv(struct devlink_health_reporter *reporter);
@@ -1370,19 +1921,32 @@ devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
void
devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
+int devl_nested_devlink_set(struct devlink *devlink,
+ struct devlink *nested_devlink);
bool devlink_is_reload_failed(const struct devlink *devlink);
+void devlink_remote_reload_actions_performed(struct devlink *devlink,
+ enum devlink_reload_limit limit,
+ u32 actions_performed);
-void devlink_flash_update_begin_notify(struct devlink *devlink);
-void devlink_flash_update_end_notify(struct devlink *devlink);
void devlink_flash_update_status_notify(struct devlink *devlink,
const char *status_msg,
const char *component,
unsigned long done,
unsigned long total);
-
+void devlink_flash_update_timeout_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long timeout);
+
+int devl_traps_register(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count, void *priv);
int devlink_traps_register(struct devlink *devlink,
const struct devlink_trap *traps,
size_t traps_count, void *priv);
+void devl_traps_unregister(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count);
void devlink_traps_unregister(struct devlink *devlink,
const struct devlink_trap *traps,
size_t traps_count);
@@ -1390,40 +1954,61 @@ void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
void *trap_ctx, struct devlink_port *in_devlink_port,
const struct flow_action_cookie *fa_cookie);
void *devlink_trap_ctx_priv(void *trap_ctx);
+int devl_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
int devlink_trap_groups_register(struct devlink *devlink,
const struct devlink_trap_group *groups,
size_t groups_count);
+void devl_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
void devlink_trap_groups_unregister(struct devlink *devlink,
const struct devlink_trap_group *groups,
size_t groups_count);
int
-devlink_trap_policers_register(struct devlink *devlink,
- const struct devlink_trap_policer *policers,
- size_t policers_count);
+devl_trap_policers_register(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count);
void
-devlink_trap_policers_unregister(struct devlink *devlink,
- const struct devlink_trap_policer *policers,
- size_t policers_count);
+devl_trap_policers_unregister(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count);
#if IS_ENABLED(CONFIG_NET_DEVLINK)
-void devlink_compat_running_version(struct net_device *dev,
+struct devlink *__must_check devlink_try_get(struct devlink *devlink);
+void devlink_put(struct devlink *devlink);
+
+void devlink_compat_running_version(struct devlink *devlink,
char *buf, size_t len);
-int devlink_compat_flash_update(struct net_device *dev, const char *file_name);
+int devlink_compat_flash_update(struct devlink *devlink, const char *file_name);
int devlink_compat_phys_port_name_get(struct net_device *dev,
char *name, size_t len);
int devlink_compat_switch_id_get(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+int devlink_nl_port_handle_fill(struct sk_buff *msg, struct devlink_port *devlink_port);
+size_t devlink_nl_port_handle_size(struct devlink_port *devlink_port);
+
#else
+static inline struct devlink *devlink_try_get(struct devlink *devlink)
+{
+ return NULL;
+}
+
+static inline void devlink_put(struct devlink *devlink)
+{
+}
+
static inline void
-devlink_compat_running_version(struct net_device *dev, char *buf, size_t len)
+devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len)
{
}
static inline int
-devlink_compat_flash_update(struct net_device *dev, const char *file_name)
+devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
{
return -EOPNOTSUPP;
}
@@ -1442,6 +2027,17 @@ devlink_compat_switch_id_get(struct net_device *dev,
return -EOPNOTSUPP;
}
+static inline int
+devlink_nl_port_handle_fill(struct sk_buff *msg, struct devlink_port *devlink_port)
+{
+ return 0;
+}
+
+static inline size_t devlink_nl_port_handle_size(struct devlink_port *devlink_port)
+{
+ return 0;
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dn.h b/include/net/dn.h
deleted file mode 100644
index 56ab0726c641..000000000000
--- a/include/net/dn.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_H
-#define _NET_DN_H
-
-#include <linux/dn.h>
-#include <net/sock.h>
-#include <net/flow.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-
-struct dn_scp /* Session Control Port */
-{
- unsigned char state;
-#define DN_O 1 /* Open */
-#define DN_CR 2 /* Connect Receive */
-#define DN_DR 3 /* Disconnect Reject */
-#define DN_DRC 4 /* Discon. Rej. Complete*/
-#define DN_CC 5 /* Connect Confirm */
-#define DN_CI 6 /* Connect Initiate */
-#define DN_NR 7 /* No resources */
-#define DN_NC 8 /* No communication */
-#define DN_CD 9 /* Connect Delivery */
-#define DN_RJ 10 /* Rejected */
-#define DN_RUN 11 /* Running */
-#define DN_DI 12 /* Disconnect Initiate */
-#define DN_DIC 13 /* Disconnect Complete */
-#define DN_DN 14 /* Disconnect Notificat */
-#define DN_CL 15 /* Closed */
-#define DN_CN 16 /* Closed Notification */
-
- __le16 addrloc;
- __le16 addrrem;
- __u16 numdat;
- __u16 numoth;
- __u16 numoth_rcv;
- __u16 numdat_rcv;
- __u16 ackxmt_dat;
- __u16 ackxmt_oth;
- __u16 ackrcv_dat;
- __u16 ackrcv_oth;
- __u8 flowrem_sw;
- __u8 flowloc_sw;
-#define DN_SEND 2
-#define DN_DONTSEND 1
-#define DN_NOCHANGE 0
- __u16 flowrem_dat;
- __u16 flowrem_oth;
- __u16 flowloc_dat;
- __u16 flowloc_oth;
- __u8 services_rem;
- __u8 services_loc;
- __u8 info_rem;
- __u8 info_loc;
-
- __u16 segsize_rem;
- __u16 segsize_loc;
-
- __u8 nonagle;
- __u8 multi_ireq;
- __u8 accept_mode;
- unsigned long seg_total; /* Running total of current segment */
-
- struct optdata_dn conndata_in;
- struct optdata_dn conndata_out;
- struct optdata_dn discdata_in;
- struct optdata_dn discdata_out;
- struct accessdata_dn accessdata;
-
- struct sockaddr_dn addr; /* Local address */
- struct sockaddr_dn peer; /* Remote address */
-
- /*
- * In this case the RTT estimation is not specified in the
- * docs, nor is any back off algorithm. Here we follow well
- * known tcp algorithms with a few small variations.
- *
- * snd_window: Max number of packets we send before we wait for
- * an ack to come back. This will become part of a
- * more complicated scheme when we support flow
- * control.
- *
- * nsp_srtt: Round-Trip-Time (x8) in jiffies. This is a rolling
- * average.
- * nsp_rttvar: Round-Trip-Time-Varience (x4) in jiffies. This is the
- * varience of the smoothed average (but calculated in
- * a simpler way than for normal statistical varience
- * calculations).
- *
- * nsp_rxtshift: Backoff counter. Value is zero normally, each time
- * a packet is lost is increases by one until an ack
- * is received. Its used to index an array of backoff
- * multipliers.
- */
-#define NSP_MIN_WINDOW 1
-#define NSP_MAX_WINDOW (0x07fe)
- unsigned long max_window;
- unsigned long snd_window;
-#define NSP_INITIAL_SRTT (HZ)
- unsigned long nsp_srtt;
-#define NSP_INITIAL_RTTVAR (HZ*3)
- unsigned long nsp_rttvar;
-#define NSP_MAXRXTSHIFT 12
- unsigned long nsp_rxtshift;
-
- /*
- * Output queues, one for data, one for otherdata/linkservice
- */
- struct sk_buff_head data_xmit_queue;
- struct sk_buff_head other_xmit_queue;
-
- /*
- * Input queue for other data
- */
- struct sk_buff_head other_receive_queue;
- int other_report;
-
- /*
- * Stuff to do with the slow timer
- */
- unsigned long stamp; /* time of last transmit */
- unsigned long persist;
- int (*persist_fxn)(struct sock *sk);
- unsigned long keepalive;
- void (*keepalive_fxn)(struct sock *sk);
-
-};
-
-static inline struct dn_scp *DN_SK(struct sock *sk)
-{
- return (struct dn_scp *)(sk + 1);
-}
-
-/*
- * src,dst : Source and Destination DECnet addresses
- * hops : Number of hops through the network
- * dst_port, src_port : NSP port numbers
- * services, info : Useful data extracted from conninit messages
- * rt_flags : Routing flags byte
- * nsp_flags : NSP layer flags byte
- * segsize : Size of segment
- * segnum : Number, for data, otherdata and linkservice
- * xmit_count : Number of times we've transmitted this skb
- * stamp : Time stamp of most recent transmission, used in RTT calculations
- * iif: Input interface number
- *
- * As a general policy, this structure keeps all addresses in network
- * byte order, and all else in host byte order. Thus dst, src, dst_port
- * and src_port are in network order. All else is in host order.
- *
- */
-#define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb)
-struct dn_skb_cb {
- __le16 dst;
- __le16 src;
- __u16 hops;
- __le16 dst_port;
- __le16 src_port;
- __u8 services;
- __u8 info;
- __u8 rt_flags;
- __u8 nsp_flags;
- __u16 segsize;
- __u16 segnum;
- __u16 xmit_count;
- unsigned long stamp;
- int iif;
-};
-
-static inline __le16 dn_eth2dn(unsigned char *ethaddr)
-{
- return get_unaligned((__le16 *)(ethaddr + 4));
-}
-
-static inline __le16 dn_saddr2dn(struct sockaddr_dn *saddr)
-{
- return *(__le16 *)saddr->sdn_nodeaddr;
-}
-
-static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr)
-{
- __u16 a = le16_to_cpu(addr);
- ethaddr[0] = 0xAA;
- ethaddr[1] = 0x00;
- ethaddr[2] = 0x04;
- ethaddr[3] = 0x00;
- ethaddr[4] = (__u8)(a & 0xff);
- ethaddr[5] = (__u8)(a >> 8);
-}
-
-static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
-{
- fld->fld_sport = scp->addrloc;
- fld->fld_dport = scp->addrrem;
-}
-
-unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
-void dn_register_sysctl(void);
-void dn_unregister_sysctl(void);
-
-#define DN_MENUVER_ACC 0x01
-#define DN_MENUVER_USR 0x02
-#define DN_MENUVER_PRX 0x04
-#define DN_MENUVER_UIC 0x08
-
-struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
-struct sock *dn_find_by_skb(struct sk_buff *skb);
-#define DN_ASCBUF_LEN 9
-char *dn_addr2asc(__u16, char *);
-int dn_destroy_timer(struct sock *sk);
-
-int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf,
- unsigned char type);
-int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr,
- unsigned char *type);
-
-void dn_start_slow_timer(struct sock *sk);
-void dn_stop_slow_timer(struct sock *sk);
-
-extern __le16 decnet_address;
-extern int decnet_debug_level;
-extern int decnet_time_wait;
-extern int decnet_dn_count;
-extern int decnet_di_count;
-extern int decnet_dr_count;
-extern int decnet_no_fc_max_cwnd;
-
-extern long sysctl_decnet_mem[3];
-extern int sysctl_decnet_wmem[3];
-extern int sysctl_decnet_rmem[3];
-
-#endif /* _NET_DN_H */
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
deleted file mode 100644
index 595b4f6c1eb1..000000000000
--- a/include/net/dn_dev.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_DEV_H
-#define _NET_DN_DEV_H
-
-
-struct dn_dev;
-
-struct dn_ifaddr {
- struct dn_ifaddr __rcu *ifa_next;
- struct dn_dev *ifa_dev;
- __le16 ifa_local;
- __le16 ifa_address;
- __u32 ifa_flags;
- __u8 ifa_scope;
- char ifa_label[IFNAMSIZ];
- struct rcu_head rcu;
-};
-
-#define DN_DEV_S_RU 0 /* Run - working normally */
-#define DN_DEV_S_CR 1 /* Circuit Rejected */
-#define DN_DEV_S_DS 2 /* Data Link Start */
-#define DN_DEV_S_RI 3 /* Routing Layer Initialize */
-#define DN_DEV_S_RV 4 /* Routing Layer Verify */
-#define DN_DEV_S_RC 5 /* Routing Layer Complete */
-#define DN_DEV_S_OF 6 /* Off */
-#define DN_DEV_S_HA 7 /* Halt */
-
-
-/*
- * The dn_dev_parms structure contains the set of parameters
- * for each device (hence inclusion in the dn_dev structure)
- * and an array is used to store the default types of supported
- * device (in dn_dev.c).
- *
- * The type field matches the ARPHRD_ constants and is used in
- * searching the list for supported devices when new devices
- * come up.
- *
- * The mode field is used to find out if a device is broadcast,
- * multipoint, or pointopoint. Please note that DECnet thinks
- * different ways about devices to the rest of the kernel
- * so the normal IFF_xxx flags are invalid here. For devices
- * which can be any combination of the previously mentioned
- * attributes, you can set this on a per device basis by
- * installing an up() routine.
- *
- * The device state field, defines the initial state in which the
- * device will come up. In the dn_dev structure, it is the actual
- * state.
- *
- * Things have changed here. I've killed timer1 since it's a user space
- * issue for a user space routing deamon to sort out. The kernel does
- * not need to be bothered with it.
- *
- * Timers:
- * t2 - Rate limit timer, min time between routing and hello messages
- * t3 - Hello timer, send hello messages when it expires
- *
- * Callbacks:
- * up() - Called to initialize device, return value can veto use of
- * device with DECnet.
- * down() - Called to turn device off when it goes down
- * timer3() - Called once for each ifaddr when timer 3 goes off
- *
- * sysctl - Hook for sysctl things
- *
- */
-struct dn_dev_parms {
- int type; /* ARPHRD_xxx */
- int mode; /* Broadcast, Unicast, Mulitpoint */
-#define DN_DEV_BCAST 1
-#define DN_DEV_UCAST 2
-#define DN_DEV_MPOINT 4
- int state; /* Initial state */
- int forwarding; /* 0=EndNode, 1=L1Router, 2=L2Router */
- unsigned long t2; /* Default value of t2 */
- unsigned long t3; /* Default value of t3 */
- int priority; /* Priority to be a router */
- char *name; /* Name for sysctl */
- int (*up)(struct net_device *);
- void (*down)(struct net_device *);
- void (*timer3)(struct net_device *, struct dn_ifaddr *ifa);
- void *sysctl;
-};
-
-
-struct dn_dev {
- struct dn_ifaddr __rcu *ifa_list;
- struct net_device *dev;
- struct dn_dev_parms parms;
- char use_long;
- struct timer_list timer;
- unsigned long t3;
- struct neigh_parms *neigh_parms;
- __u8 addr[ETH_ALEN];
- struct neighbour *router; /* Default router on circuit */
- struct neighbour *peer; /* Peer on pointopoint links */
- unsigned long uptime; /* Time device went up in jiffies */
-};
-
-struct dn_short_packet {
- __u8 msgflg;
- __le16 dstnode;
- __le16 srcnode;
- __u8 forward;
-} __packed;
-
-struct dn_long_packet {
- __u8 msgflg;
- __u8 d_area;
- __u8 d_subarea;
- __u8 d_id[6];
- __u8 s_area;
- __u8 s_subarea;
- __u8 s_id[6];
- __u8 nl2;
- __u8 visit_ct;
- __u8 s_class;
- __u8 pt;
-} __packed;
-
-/*------------------------- DRP - Routing messages ---------------------*/
-
-struct endnode_hello_message {
- __u8 msgflg;
- __u8 tiver[3];
- __u8 id[6];
- __u8 iinfo;
- __le16 blksize;
- __u8 area;
- __u8 seed[8];
- __u8 neighbor[6];
- __le16 timer;
- __u8 mpd;
- __u8 datalen;
- __u8 data[2];
-} __packed;
-
-struct rtnode_hello_message {
- __u8 msgflg;
- __u8 tiver[3];
- __u8 id[6];
- __u8 iinfo;
- __le16 blksize;
- __u8 priority;
- __u8 area;
- __le16 timer;
- __u8 mpd;
-} __packed;
-
-
-void dn_dev_init(void);
-void dn_dev_cleanup(void);
-
-int dn_dev_ioctl(unsigned int cmd, void __user *arg);
-
-void dn_dev_devices_off(void);
-void dn_dev_devices_on(void);
-
-void dn_dev_init_pkt(struct sk_buff *skb);
-void dn_dev_veri_pkt(struct sk_buff *skb);
-void dn_dev_hello(struct sk_buff *skb);
-
-void dn_dev_up(struct net_device *);
-void dn_dev_down(struct net_device *);
-
-int dn_dev_set_default(struct net_device *dev, int force);
-struct net_device *dn_dev_get_default(void);
-int dn_dev_bind_default(__le16 *addr);
-
-int register_dnaddr_notifier(struct notifier_block *nb);
-int unregister_dnaddr_notifier(struct notifier_block *nb);
-
-static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
-{
- struct dn_dev *dn_db;
- struct dn_ifaddr *ifa;
- int res = 0;
-
- rcu_read_lock();
- dn_db = rcu_dereference(dev->dn_ptr);
- if (dn_db == NULL) {
- printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
- goto out;
- }
-
- for (ifa = rcu_dereference(dn_db->ifa_list);
- ifa != NULL;
- ifa = rcu_dereference(ifa->ifa_next))
- if ((addr ^ ifa->ifa_local) == 0) {
- res = 1;
- break;
- }
-out:
- rcu_read_unlock();
- return res;
-}
-
-#endif /* _NET_DN_DEV_H */
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
deleted file mode 100644
index ccc6e9df178b..000000000000
--- a/include/net/dn_fib.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_FIB_H
-#define _NET_DN_FIB_H
-
-#include <linux/netlink.h>
-#include <linux/refcount.h>
-
-extern const struct nla_policy rtm_dn_policy[];
-
-struct dn_fib_res {
- struct fib_rule *r;
- struct dn_fib_info *fi;
- unsigned char prefixlen;
- unsigned char nh_sel;
- unsigned char type;
- unsigned char scope;
-};
-
-struct dn_fib_nh {
- struct net_device *nh_dev;
- unsigned int nh_flags;
- unsigned char nh_scope;
- int nh_weight;
- int nh_power;
- int nh_oif;
- __le16 nh_gw;
-};
-
-struct dn_fib_info {
- struct dn_fib_info *fib_next;
- struct dn_fib_info *fib_prev;
- int fib_treeref;
- refcount_t fib_clntref;
- int fib_dead;
- unsigned int fib_flags;
- int fib_protocol;
- __le16 fib_prefsrc;
- __u32 fib_priority;
- __u32 fib_metrics[RTAX_MAX];
- int fib_nhs;
- int fib_power;
- struct dn_fib_nh fib_nh[0];
-#define dn_fib_dev fib_nh[0].nh_dev
-};
-
-
-#define DN_FIB_RES_RESET(res) ((res).nh_sel = 0)
-#define DN_FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
-
-#define DN_FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __dn_fib_res_prefsrc(&res))
-#define DN_FIB_RES_GW(res) (DN_FIB_RES_NH(res).nh_gw)
-#define DN_FIB_RES_DEV(res) (DN_FIB_RES_NH(res).nh_dev)
-#define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif)
-
-typedef struct {
- __le16 datum;
-} dn_fib_key_t;
-
-typedef struct {
- __le16 datum;
-} dn_fib_hash_t;
-
-typedef struct {
- __u16 datum;
-} dn_fib_idx_t;
-
-struct dn_fib_node {
- struct dn_fib_node *fn_next;
- struct dn_fib_info *fn_info;
-#define DN_FIB_INFO(f) ((f)->fn_info)
- dn_fib_key_t fn_key;
- u8 fn_type;
- u8 fn_scope;
- u8 fn_state;
-};
-
-
-struct dn_fib_table {
- struct hlist_node hlist;
- u32 n;
-
- int (*insert)(struct dn_fib_table *t, struct rtmsg *r,
- struct nlattr *attrs[], struct nlmsghdr *n,
- struct netlink_skb_parms *req);
- int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
- struct nlattr *attrs[], struct nlmsghdr *n,
- struct netlink_skb_parms *req);
- int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld,
- struct dn_fib_res *res);
- int (*flush)(struct dn_fib_table *t);
- int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
-
- unsigned char data[];
-};
-
-#ifdef CONFIG_DECNET_ROUTER
-/*
- * dn_fib.c
- */
-void dn_fib_init(void);
-void dn_fib_cleanup(void);
-
-int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
- struct nlattr *attrs[],
- const struct nlmsghdr *nlh, int *errp);
-int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
- const struct flowidn *fld, struct dn_fib_res *res);
-void dn_fib_release_info(struct dn_fib_info *fi);
-void dn_fib_flush(void);
-void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res);
-
-/*
- * dn_tables.c
- */
-struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
-struct dn_fib_table *dn_fib_empty_table(void);
-void dn_fib_table_init(void);
-void dn_fib_table_cleanup(void);
-
-/*
- * dn_rules.c
- */
-void dn_fib_rules_init(void);
-void dn_fib_rules_cleanup(void);
-unsigned int dnet_addr_type(__le16 addr);
-int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
-
-int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
-
-void dn_fib_free_info(struct dn_fib_info *fi);
-
-static inline void dn_fib_info_put(struct dn_fib_info *fi)
-{
- if (refcount_dec_and_test(&fi->fib_clntref))
- dn_fib_free_info(fi);
-}
-
-static inline void dn_fib_res_put(struct dn_fib_res *res)
-{
- if (res->fi)
- dn_fib_info_put(res->fi);
- if (res->r)
- fib_rule_put(res->r);
-}
-
-#else /* Endnode */
-
-#define dn_fib_init() do { } while(0)
-#define dn_fib_cleanup() do { } while(0)
-
-#define dn_fib_lookup(fl, res) (-ESRCH)
-#define dn_fib_info_put(fi) do { } while(0)
-#define dn_fib_select_multipath(fl, res) do { } while(0)
-#define dn_fib_rules_policy(saddr,res,flags) (0)
-#define dn_fib_res_put(res) do { } while(0)
-
-#endif /* CONFIG_DECNET_ROUTER */
-
-static inline __le16 dnet_make_mask(int n)
-{
- if (n)
- return cpu_to_le16(~((1 << (16 - n)) - 1));
- return cpu_to_le16(0);
-}
-
-#endif /* _NET_DN_FIB_H */
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
deleted file mode 100644
index 2e3e7793973a..000000000000
--- a/include/net/dn_neigh.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_NEIGH_H
-#define _NET_DN_NEIGH_H
-
-/*
- * The position of the first two fields of
- * this structure are critical - SJW
- */
-struct dn_neigh {
- struct neighbour n;
- __le16 addr;
- unsigned long flags;
-#define DN_NDFLAG_R1 0x0001 /* Router L1 */
-#define DN_NDFLAG_R2 0x0002 /* Router L2 */
-#define DN_NDFLAG_P3 0x0004 /* Phase III Node */
- unsigned long blksize;
- __u8 priority;
-};
-
-void dn_neigh_init(void);
-void dn_neigh_cleanup(void);
-int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
-int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
-void dn_neigh_pointopoint_hello(struct sk_buff *skb);
-int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
-int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-
-extern struct neigh_table dn_neigh_table;
-
-#endif /* _NET_DN_NEIGH_H */
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
deleted file mode 100644
index f83932b864a9..000000000000
--- a/include/net/dn_nsp.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_DN_NSP_H
-#define _NET_DN_NSP_H
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-/* dn_nsp.c functions prototyping */
-
-void dn_nsp_send_data_ack(struct sock *sk);
-void dn_nsp_send_oth_ack(struct sock *sk);
-void dn_send_conn_ack(struct sock *sk);
-void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
-void dn_nsp_send_disc(struct sock *sk, unsigned char type,
- unsigned short reason, gfp_t gfp);
-void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
- unsigned short reason);
-void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
-void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
-
-void dn_nsp_output(struct sock *sk);
-int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
- struct sk_buff_head *q, unsigned short acknum);
-void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
- int oob);
-unsigned long dn_nsp_persist(struct sock *sk);
-int dn_nsp_xmit_timeout(struct sock *sk);
-
-int dn_nsp_rx(struct sk_buff *);
-int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
- long timeo, int *err);
-
-#define NSP_REASON_OK 0 /* No error */
-#define NSP_REASON_NR 1 /* No resources */
-#define NSP_REASON_UN 2 /* Unrecognised node name */
-#define NSP_REASON_SD 3 /* Node shutting down */
-#define NSP_REASON_ID 4 /* Invalid destination end user */
-#define NSP_REASON_ER 5 /* End user lacks resources */
-#define NSP_REASON_OB 6 /* Object too busy */
-#define NSP_REASON_US 7 /* Unspecified error */
-#define NSP_REASON_TP 8 /* Third-Party abort */
-#define NSP_REASON_EA 9 /* End user has aborted the link */
-#define NSP_REASON_IF 10 /* Invalid node name format */
-#define NSP_REASON_LS 11 /* Local node shutdown */
-#define NSP_REASON_LL 32 /* Node lacks logical-link resources */
-#define NSP_REASON_LE 33 /* End user lacks logical-link resources */
-#define NSP_REASON_UR 34 /* Unacceptable RQSTRID or PASSWORD field */
-#define NSP_REASON_UA 36 /* Unacceptable ACCOUNT field */
-#define NSP_REASON_TM 38 /* End user timed out logical link */
-#define NSP_REASON_NU 39 /* Node unreachable */
-#define NSP_REASON_NL 41 /* No-link message */
-#define NSP_REASON_DC 42 /* Disconnect confirm */
-#define NSP_REASON_IO 43 /* Image data field overflow */
-
-#define NSP_DISCINIT 0x38
-#define NSP_DISCCONF 0x48
-
-/*------------------------- NSP - messages ------------------------------*/
-/* Data Messages */
-/*---------------*/
-
-/* Data Messages (data segment/interrupt/link service) */
-
-struct nsp_data_seg_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
-} __packed;
-
-struct nsp_data_opt_msg {
- __le16 acknum;
- __le16 segnum;
- __le16 lsflgs;
-} __packed;
-
-struct nsp_data_opt_msg1 {
- __le16 acknum;
- __le16 segnum;
-} __packed;
-
-
-/* Acknowledgment Message (data/other data) */
-struct nsp_data_ack_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
- __le16 acknum;
-} __packed;
-
-/* Connect Acknowledgment Message */
-struct nsp_conn_ack_msg {
- __u8 msgflg;
- __le16 dstaddr;
-} __packed;
-
-
-/* Connect Initiate/Retransmit Initiate/Connect Confirm */
-struct nsp_conn_init_msg {
- __u8 msgflg;
-#define NSP_CI 0x18 /* Connect Initiate */
-#define NSP_RCI 0x68 /* Retrans. Conn Init */
- __le16 dstaddr;
- __le16 srcaddr;
- __u8 services;
-#define NSP_FC_NONE 0x00 /* Flow Control None */
-#define NSP_FC_SRC 0x04 /* Seg Req. Count */
-#define NSP_FC_SCMC 0x08 /* Sess. Control Mess */
-#define NSP_FC_MASK 0x0c /* FC type mask */
- __u8 info;
- __le16 segsize;
-} __packed;
-
-/* Disconnect Initiate/Disconnect Confirm */
-struct nsp_disconn_init_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
- __le16 reason;
-} __packed;
-
-
-
-struct srcobj_fmt {
- __u8 format;
- __u8 task;
- __le16 grpcode;
- __le16 usrcode;
- __u8 dlen;
-} __packed;
-
-/*
- * A collection of functions for manipulating the sequence
- * numbers used in NSP. Similar in operation to the functions
- * of the same name in TCP.
- */
-static __inline__ int dn_before(__u16 seq1, __u16 seq2)
-{
- seq1 &= 0x0fff;
- seq2 &= 0x0fff;
-
- return (int)((seq1 - seq2) & 0x0fff) > 2048;
-}
-
-
-static __inline__ int dn_after(__u16 seq1, __u16 seq2)
-{
- seq1 &= 0x0fff;
- seq2 &= 0x0fff;
-
- return (int)((seq2 - seq1) & 0x0fff) > 2048;
-}
-
-static __inline__ int dn_equal(__u16 seq1, __u16 seq2)
-{
- return ((seq1 ^ seq2) & 0x0fff) == 0;
-}
-
-static __inline__ int dn_before_or_equal(__u16 seq1, __u16 seq2)
-{
- return (dn_before(seq1, seq2) || dn_equal(seq1, seq2));
-}
-
-static __inline__ void seq_add(__u16 *seq, __u16 off)
-{
- (*seq) += off;
- (*seq) &= 0x0fff;
-}
-
-static __inline__ int seq_next(__u16 seq1, __u16 seq2)
-{
- return dn_equal(seq1 + 1, seq2);
-}
-
-/*
- * Can we delay the ack ?
- */
-static __inline__ int sendack(__u16 seq)
-{
- return (int)((seq & 0x1000) ? 0 : 1);
-}
-
-/*
- * Is socket congested ?
- */
-static __inline__ int dn_congested(struct sock *sk)
-{
- return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
-}
-
-#define DN_MAX_NSP_DATA_HEADER (11)
-
-#endif /* _NET_DN_NSP_H */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
deleted file mode 100644
index 6f1e94ac0bdf..000000000000
--- a/include/net/dn_route.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_DN_ROUTE_H
-#define _NET_DN_ROUTE_H
-
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
- struct sock *sk, int flags);
-int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-void dn_rt_cache_flush(int delay);
-int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev);
-
-/* Masks for flags field */
-#define DN_RT_F_PID 0x07 /* Mask for packet type */
-#define DN_RT_F_PF 0x80 /* Padding Follows */
-#define DN_RT_F_VER 0x40 /* Version =0 discard packet if ==1 */
-#define DN_RT_F_IE 0x20 /* Intra Ethernet, Reserved in short pkt */
-#define DN_RT_F_RTS 0x10 /* Packet is being returned to sender */
-#define DN_RT_F_RQR 0x08 /* Return packet to sender upon non-delivery */
-
-/* Mask for types of routing packets */
-#define DN_RT_PKT_MSK 0x06
-/* Types of routing packets */
-#define DN_RT_PKT_SHORT 0x02 /* Short routing packet */
-#define DN_RT_PKT_LONG 0x06 /* Long routing packet */
-
-/* Mask for control/routing selection */
-#define DN_RT_PKT_CNTL 0x01 /* Set to 1 if a control packet */
-/* Types of control packets */
-#define DN_RT_CNTL_MSK 0x0f /* Mask for control packets */
-#define DN_RT_PKT_INIT 0x01 /* Initialisation packet */
-#define DN_RT_PKT_VERI 0x03 /* Verification Message */
-#define DN_RT_PKT_HELO 0x05 /* Hello and Test Message */
-#define DN_RT_PKT_L1RT 0x07 /* Level 1 Routing Message */
-#define DN_RT_PKT_L2RT 0x09 /* Level 2 Routing Message */
-#define DN_RT_PKT_ERTH 0x0b /* Ethernet Router Hello */
-#define DN_RT_PKT_EEDH 0x0d /* Ethernet EndNode Hello */
-
-/* Values for info field in hello message */
-#define DN_RT_INFO_TYPE 0x03 /* Type mask */
-#define DN_RT_INFO_L1RT 0x02 /* L1 Router */
-#define DN_RT_INFO_L2RT 0x01 /* L2 Router */
-#define DN_RT_INFO_ENDN 0x03 /* EndNode */
-#define DN_RT_INFO_VERI 0x04 /* Verification Reqd. */
-#define DN_RT_INFO_RJCT 0x08 /* Reject Flag, Reserved */
-#define DN_RT_INFO_VFLD 0x10 /* Verification Failed, Reserved */
-#define DN_RT_INFO_NOML 0x20 /* No Multicast traffic accepted */
-#define DN_RT_INFO_BLKR 0x40 /* Blocking Requested */
-
-/*
- * The fl structure is what we used to look up the route.
- * The rt_saddr & rt_daddr entries are the same as key.saddr & key.daddr
- * except for local input routes, where the rt_saddr = fl.fld_dst and
- * rt_daddr = fl.fld_src to allow the route to be used for returning
- * packets to the originating host.
- */
-struct dn_route {
- struct dst_entry dst;
- struct dn_route __rcu *dn_next;
-
- struct neighbour *n;
-
- struct flowidn fld;
-
- __le16 rt_saddr;
- __le16 rt_daddr;
- __le16 rt_gateway;
- __le16 rt_local_src; /* Source used for forwarding packets */
- __le16 rt_src_map;
- __le16 rt_dst_map;
-
- unsigned int rt_flags;
- unsigned int rt_type;
-};
-
-static inline bool dn_is_input_route(struct dn_route *rt)
-{
- return rt->fld.flowidn_iif != 0;
-}
-
-static inline bool dn_is_output_route(struct dn_route *rt)
-{
- return rt->fld.flowidn_iif == 0;
-}
-
-void dn_route_init(void);
-void dn_route_cleanup(void);
-
-#include <net/sock.h>
-#include <linux/if_arp.h>
-
-static inline void dn_rt_send(struct sk_buff *skb)
-{
- dev_queue_xmit(skb);
-}
-
-static inline void dn_rt_finish_output(struct sk_buff *skb, char *dst, char *src)
-{
- struct net_device *dev = skb->dev;
-
- if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
- dst = NULL;
-
- if (dev_hard_header(skb, dev, ETH_P_DNA_RT, dst, src, skb->len) >= 0)
- dn_rt_send(skb);
- else
- kfree_skb(skb);
-}
-
-#endif /* _NET_DN_ROUTE_H */
diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h
deleted file mode 100644
index 3f5b6ddb3179..000000000000
--- a/include/net/drop_monitor.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef _NET_DROP_MONITOR_H_
-#define _NET_DROP_MONITOR_H_
-
-#include <linux/ktime.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <net/flow_offload.h>
-
-/**
- * struct net_dm_hw_metadata - Hardware-supplied packet metadata.
- * @trap_group_name: Hardware trap group name.
- * @trap_name: Hardware trap name.
- * @input_dev: Input netdevice.
- * @fa_cookie: Flow action user cookie.
- */
-struct net_dm_hw_metadata {
- const char *trap_group_name;
- const char *trap_name;
- struct net_device *input_dev;
- const struct flow_action_cookie *fa_cookie;
-};
-
-#if IS_REACHABLE(CONFIG_NET_DROP_MONITOR)
-void net_dm_hw_report(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata);
-#else
-static inline void
-net_dm_hw_report(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata)
-{
-}
-#endif
-
-#endif /* _NET_DROP_MONITOR_H_ */
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
new file mode 100644
index 000000000000..9707ab54fdd5
--- /dev/null
+++ b/include/net/dropreason-core.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LINUX_DROPREASON_CORE_H
+#define _LINUX_DROPREASON_CORE_H
+
+#define DEFINE_DROP_REASON(FN, FNe) \
+ FN(NOT_SPECIFIED) \
+ FN(NO_SOCKET) \
+ FN(PKT_TOO_SMALL) \
+ FN(TCP_CSUM) \
+ FN(SOCKET_FILTER) \
+ FN(UDP_CSUM) \
+ FN(NETFILTER_DROP) \
+ FN(OTHERHOST) \
+ FN(IP_CSUM) \
+ FN(IP_INHDR) \
+ FN(IP_RPFILTER) \
+ FN(UNICAST_IN_L2_MULTICAST) \
+ FN(XFRM_POLICY) \
+ FN(IP_NOPROTO) \
+ FN(SOCKET_RCVBUFF) \
+ FN(PROTO_MEM) \
+ FN(TCP_AUTH_HDR) \
+ FN(TCP_MD5NOTFOUND) \
+ FN(TCP_MD5UNEXPECTED) \
+ FN(TCP_MD5FAILURE) \
+ FN(TCP_AONOTFOUND) \
+ FN(TCP_AOUNEXPECTED) \
+ FN(TCP_AOKEYNOTFOUND) \
+ FN(TCP_AOFAILURE) \
+ FN(SOCKET_BACKLOG) \
+ FN(TCP_FLAGS) \
+ FN(TCP_ABORT_ON_DATA) \
+ FN(TCP_ZEROWINDOW) \
+ FN(TCP_OLD_DATA) \
+ FN(TCP_OVERWINDOW) \
+ FN(TCP_OFOMERGE) \
+ FN(TCP_RFC7323_PAWS) \
+ FN(TCP_OLD_SEQUENCE) \
+ FN(TCP_INVALID_SEQUENCE) \
+ FN(TCP_INVALID_ACK_SEQUENCE) \
+ FN(TCP_RESET) \
+ FN(TCP_INVALID_SYN) \
+ FN(TCP_CLOSE) \
+ FN(TCP_FASTOPEN) \
+ FN(TCP_OLD_ACK) \
+ FN(TCP_TOO_OLD_ACK) \
+ FN(TCP_ACK_UNSENT_DATA) \
+ FN(TCP_OFO_QUEUE_PRUNE) \
+ FN(TCP_OFO_DROP) \
+ FN(IP_OUTNOROUTES) \
+ FN(BPF_CGROUP_EGRESS) \
+ FN(IPV6DISABLED) \
+ FN(NEIGH_CREATEFAIL) \
+ FN(NEIGH_FAILED) \
+ FN(NEIGH_QUEUEFULL) \
+ FN(NEIGH_DEAD) \
+ FN(TC_EGRESS) \
+ FN(SECURITY_HOOK) \
+ FN(QDISC_DROP) \
+ FN(CPU_BACKLOG) \
+ FN(XDP) \
+ FN(TC_INGRESS) \
+ FN(UNHANDLED_PROTO) \
+ FN(SKB_CSUM) \
+ FN(SKB_GSO_SEG) \
+ FN(SKB_UCOPY_FAULT) \
+ FN(DEV_HDR) \
+ FN(DEV_READY) \
+ FN(FULL_RING) \
+ FN(NOMEM) \
+ FN(HDR_TRUNC) \
+ FN(TAP_FILTER) \
+ FN(TAP_TXFILTER) \
+ FN(ICMP_CSUM) \
+ FN(INVALID_PROTO) \
+ FN(IP_INADDRERRORS) \
+ FN(IP_INNOROUTES) \
+ FN(PKT_TOO_BIG) \
+ FN(DUP_FRAG) \
+ FN(FRAG_REASM_TIMEOUT) \
+ FN(FRAG_TOO_FAR) \
+ FN(TCP_MINTTL) \
+ FN(IPV6_BAD_EXTHDR) \
+ FN(IPV6_NDISC_FRAG) \
+ FN(IPV6_NDISC_HOP_LIMIT) \
+ FN(IPV6_NDISC_BAD_CODE) \
+ FN(IPV6_NDISC_BAD_OPTIONS) \
+ FN(IPV6_NDISC_NS_OTHERHOST) \
+ FN(QUEUE_PURGE) \
+ FN(TC_COOKIE_ERROR) \
+ FN(PACKET_SOCK_ERROR) \
+ FN(TC_CHAIN_NOTFOUND) \
+ FN(TC_RECLASSIFY_LOOP) \
+ FNe(MAX)
+
+/**
+ * enum skb_drop_reason - the reasons of skb drops
+ *
+ * The reason of skb drop, which is used in kfree_skb_reason().
+ */
+enum skb_drop_reason {
+ /**
+ * @SKB_NOT_DROPPED_YET: skb is not dropped yet (used for no-drop case)
+ */
+ SKB_NOT_DROPPED_YET = 0,
+ /** @SKB_CONSUMED: packet has been consumed */
+ SKB_CONSUMED,
+ /** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */
+ SKB_DROP_REASON_NOT_SPECIFIED,
+ /**
+ * @SKB_DROP_REASON_NO_SOCKET: no valid socket that can be used.
+ * Reason could be one of three cases:
+ * 1) no established/listening socket found during lookup process
+ * 2) no valid request socket during 3WHS process
+ * 3) no valid child socket during 3WHS process
+ */
+ SKB_DROP_REASON_NO_SOCKET,
+ /** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */
+ SKB_DROP_REASON_PKT_TOO_SMALL,
+ /** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */
+ SKB_DROP_REASON_TCP_CSUM,
+ /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */
+ SKB_DROP_REASON_SOCKET_FILTER,
+ /** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */
+ SKB_DROP_REASON_UDP_CSUM,
+ /** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */
+ SKB_DROP_REASON_NETFILTER_DROP,
+ /**
+ * @SKB_DROP_REASON_OTHERHOST: packet don't belong to current host
+ * (interface is in promisc mode)
+ */
+ SKB_DROP_REASON_OTHERHOST,
+ /** @SKB_DROP_REASON_IP_CSUM: IP checksum error */
+ SKB_DROP_REASON_IP_CSUM,
+ /**
+ * @SKB_DROP_REASON_IP_INHDR: there is something wrong with IP header (see
+ * IPSTATS_MIB_INHDRERRORS)
+ */
+ SKB_DROP_REASON_IP_INHDR,
+ /**
+ * @SKB_DROP_REASON_IP_RPFILTER: IP rpfilter validate failed. see the
+ * document for rp_filter in ip-sysctl.rst for more information
+ */
+ SKB_DROP_REASON_IP_RPFILTER,
+ /**
+ * @SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST: destination address of L2 is
+ * multicast, but L3 is unicast.
+ */
+ SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST,
+ /** @SKB_DROP_REASON_XFRM_POLICY: xfrm policy check failed */
+ SKB_DROP_REASON_XFRM_POLICY,
+ /** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */
+ SKB_DROP_REASON_IP_NOPROTO,
+ /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */
+ SKB_DROP_REASON_SOCKET_RCVBUFF,
+ /**
+ * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet
+ * drop out of udp_memory_allocated.
+ */
+ SKB_DROP_REASON_PROTO_MEM,
+ /**
+ * @SKB_DROP_REASON_TCP_AUTH_HDR: TCP-MD5 or TCP-AO hashes are met
+ * twice or set incorrectly.
+ */
+ SKB_DROP_REASON_TCP_AUTH_HDR,
+ /**
+ * @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected,
+ * corresponding to LINUX_MIB_TCPMD5NOTFOUND
+ */
+ SKB_DROP_REASON_TCP_MD5NOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TCP_MD5UNEXPECTED: MD5 hash and we're not expecting
+ * one, corresponding to LINUX_MIB_TCPMD5UNEXPECTED
+ */
+ SKB_DROP_REASON_TCP_MD5UNEXPECTED,
+ /**
+ * @SKB_DROP_REASON_TCP_MD5FAILURE: MD5 hash and its wrong, corresponding
+ * to LINUX_MIB_TCPMD5FAILURE
+ */
+ SKB_DROP_REASON_TCP_MD5FAILURE,
+ /**
+ * @SKB_DROP_REASON_TCP_AONOTFOUND: no TCP-AO hash and one was expected,
+ * corresponding to LINUX_MIB_TCPAOREQUIRED
+ */
+ SKB_DROP_REASON_TCP_AONOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TCP_AOUNEXPECTED: TCP-AO hash is present and it
+ * was not expected, corresponding to LINUX_MIB_TCPAOKEYNOTFOUND
+ */
+ SKB_DROP_REASON_TCP_AOUNEXPECTED,
+ /**
+ * @SKB_DROP_REASON_TCP_AOKEYNOTFOUND: TCP-AO key is unknown,
+ * corresponding to LINUX_MIB_TCPAOKEYNOTFOUND
+ */
+ SKB_DROP_REASON_TCP_AOKEYNOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TCP_AOFAILURE: TCP-AO hash is wrong,
+ * corresponding to LINUX_MIB_TCPAOBAD
+ */
+ SKB_DROP_REASON_TCP_AOFAILURE,
+ /**
+ * @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog (
+ * see LINUX_MIB_TCPBACKLOGDROP)
+ */
+ SKB_DROP_REASON_SOCKET_BACKLOG,
+ /** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */
+ SKB_DROP_REASON_TCP_FLAGS,
+ /**
+ * @SKB_DROP_REASON_TCP_ABORT_ON_DATA: abort on data, corresponding to
+ * LINUX_MIB_TCPABORTONDATA
+ */
+ SKB_DROP_REASON_TCP_ABORT_ON_DATA,
+ /**
+ * @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero,
+ * see LINUX_MIB_TCPZEROWINDOWDROP
+ */
+ SKB_DROP_REASON_TCP_ZEROWINDOW,
+ /**
+ * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already
+ * received before (spurious retrans may happened), see
+ * LINUX_MIB_DELAYEDACKLOST
+ */
+ SKB_DROP_REASON_TCP_OLD_DATA,
+ /**
+ * @SKB_DROP_REASON_TCP_OVERWINDOW: the TCP data is out of window,
+ * the seq of the first byte exceed the right edges of receive
+ * window
+ */
+ SKB_DROP_REASON_TCP_OVERWINDOW,
+ /**
+ * @SKB_DROP_REASON_TCP_OFOMERGE: the data of skb is already in the ofo
+ * queue, corresponding to LINUX_MIB_TCPOFOMERGE
+ */
+ SKB_DROP_REASON_TCP_OFOMERGE,
+ /**
+ * @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to
+ * LINUX_MIB_PAWSESTABREJECTED, LINUX_MIB_PAWSACTIVEREJECTED
+ */
+ SKB_DROP_REASON_TCP_RFC7323_PAWS,
+ /** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */
+ SKB_DROP_REASON_TCP_OLD_SEQUENCE,
+ /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
+ SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
+ /**
+ * @SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE: Not acceptable ACK SEQ
+ * field because ack sequence is not in the window between snd_una
+ * and snd_nxt
+ */
+ SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE,
+ /** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */
+ SKB_DROP_REASON_TCP_RESET,
+ /**
+ * @SKB_DROP_REASON_TCP_INVALID_SYN: Incoming packet has unexpected
+ * SYN flag
+ */
+ SKB_DROP_REASON_TCP_INVALID_SYN,
+ /** @SKB_DROP_REASON_TCP_CLOSE: TCP socket in CLOSE state */
+ SKB_DROP_REASON_TCP_CLOSE,
+ /** @SKB_DROP_REASON_TCP_FASTOPEN: dropped by FASTOPEN request socket */
+ SKB_DROP_REASON_TCP_FASTOPEN,
+ /** @SKB_DROP_REASON_TCP_OLD_ACK: TCP ACK is old, but in window */
+ SKB_DROP_REASON_TCP_OLD_ACK,
+ /** @SKB_DROP_REASON_TCP_TOO_OLD_ACK: TCP ACK is too old */
+ SKB_DROP_REASON_TCP_TOO_OLD_ACK,
+ /**
+ * @SKB_DROP_REASON_TCP_ACK_UNSENT_DATA: TCP ACK for data we haven't
+ * sent yet
+ */
+ SKB_DROP_REASON_TCP_ACK_UNSENT_DATA,
+ /** @SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE: pruned from TCP OFO queue */
+ SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE,
+ /** @SKB_DROP_REASON_TCP_OFO_DROP: data already in receive queue */
+ SKB_DROP_REASON_TCP_OFO_DROP,
+ /** @SKB_DROP_REASON_IP_OUTNOROUTES: route lookup failed */
+ SKB_DROP_REASON_IP_OUTNOROUTES,
+ /**
+ * @SKB_DROP_REASON_BPF_CGROUP_EGRESS: dropped by BPF_PROG_TYPE_CGROUP_SKB
+ * eBPF program
+ */
+ SKB_DROP_REASON_BPF_CGROUP_EGRESS,
+ /** @SKB_DROP_REASON_IPV6DISABLED: IPv6 is disabled on the device */
+ SKB_DROP_REASON_IPV6DISABLED,
+ /** @SKB_DROP_REASON_NEIGH_CREATEFAIL: failed to create neigh entry */
+ SKB_DROP_REASON_NEIGH_CREATEFAIL,
+ /** @SKB_DROP_REASON_NEIGH_FAILED: neigh entry in failed state */
+ SKB_DROP_REASON_NEIGH_FAILED,
+ /** @SKB_DROP_REASON_NEIGH_QUEUEFULL: arp_queue for neigh entry is full */
+ SKB_DROP_REASON_NEIGH_QUEUEFULL,
+ /** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */
+ SKB_DROP_REASON_NEIGH_DEAD,
+ /** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */
+ SKB_DROP_REASON_TC_EGRESS,
+ /** @SKB_DROP_REASON_SECURITY_HOOK: dropped due to security HOOK */
+ SKB_DROP_REASON_SECURITY_HOOK,
+ /**
+ * @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting (
+ * failed to enqueue to current qdisc)
+ */
+ SKB_DROP_REASON_QDISC_DROP,
+ /**
+ * @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU
+ * backlog queue. This can be caused by backlog queue full (see
+ * netdev_max_backlog in net.rst) or RPS flow limit
+ */
+ SKB_DROP_REASON_CPU_BACKLOG,
+ /** @SKB_DROP_REASON_XDP: dropped by XDP in input path */
+ SKB_DROP_REASON_XDP,
+ /** @SKB_DROP_REASON_TC_INGRESS: dropped in TC ingress HOOK */
+ SKB_DROP_REASON_TC_INGRESS,
+ /** @SKB_DROP_REASON_UNHANDLED_PROTO: protocol not implemented or not supported */
+ SKB_DROP_REASON_UNHANDLED_PROTO,
+ /** @SKB_DROP_REASON_SKB_CSUM: sk_buff checksum computation error */
+ SKB_DROP_REASON_SKB_CSUM,
+ /** @SKB_DROP_REASON_SKB_GSO_SEG: gso segmentation error */
+ SKB_DROP_REASON_SKB_GSO_SEG,
+ /**
+ * @SKB_DROP_REASON_SKB_UCOPY_FAULT: failed to copy data from user space,
+ * e.g., via zerocopy_sg_from_iter() or skb_orphan_frags_rx()
+ */
+ SKB_DROP_REASON_SKB_UCOPY_FAULT,
+ /** @SKB_DROP_REASON_DEV_HDR: device driver specific header/metadata is invalid */
+ SKB_DROP_REASON_DEV_HDR,
+ /**
+ * @SKB_DROP_REASON_DEV_READY: the device is not ready to xmit/recv due to
+ * any of its data structure that is not up/ready/initialized,
+ * e.g., the IFF_UP is not set, or driver specific tun->tfiles[txq]
+ * is not initialized
+ */
+ SKB_DROP_REASON_DEV_READY,
+ /** @SKB_DROP_REASON_FULL_RING: ring buffer is full */
+ SKB_DROP_REASON_FULL_RING,
+ /** @SKB_DROP_REASON_NOMEM: error due to OOM */
+ SKB_DROP_REASON_NOMEM,
+ /**
+ * @SKB_DROP_REASON_HDR_TRUNC: failed to trunc/extract the header from
+ * networking data, e.g., failed to pull the protocol header from
+ * frags via pskb_may_pull()
+ */
+ SKB_DROP_REASON_HDR_TRUNC,
+ /**
+ * @SKB_DROP_REASON_TAP_FILTER: dropped by (ebpf) filter directly attached
+ * to tun/tap, e.g., via TUNSETFILTEREBPF
+ */
+ SKB_DROP_REASON_TAP_FILTER,
+ /**
+ * @SKB_DROP_REASON_TAP_TXFILTER: dropped by tx filter implemented at
+ * tun/tap, e.g., check_filter()
+ */
+ SKB_DROP_REASON_TAP_TXFILTER,
+ /** @SKB_DROP_REASON_ICMP_CSUM: ICMP checksum error */
+ SKB_DROP_REASON_ICMP_CSUM,
+ /**
+ * @SKB_DROP_REASON_INVALID_PROTO: the packet doesn't follow RFC 2211,
+ * such as a broadcasts ICMP_TIMESTAMP
+ */
+ SKB_DROP_REASON_INVALID_PROTO,
+ /**
+ * @SKB_DROP_REASON_IP_INADDRERRORS: host unreachable, corresponding to
+ * IPSTATS_MIB_INADDRERRORS
+ */
+ SKB_DROP_REASON_IP_INADDRERRORS,
+ /**
+ * @SKB_DROP_REASON_IP_INNOROUTES: network unreachable, corresponding to
+ * IPSTATS_MIB_INADDRERRORS
+ */
+ SKB_DROP_REASON_IP_INNOROUTES,
+ /**
+ * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the
+ * MTU)
+ */
+ SKB_DROP_REASON_PKT_TOO_BIG,
+ /** @SKB_DROP_REASON_DUP_FRAG: duplicate fragment */
+ SKB_DROP_REASON_DUP_FRAG,
+ /** @SKB_DROP_REASON_FRAG_REASM_TIMEOUT: fragment reassembly timeout */
+ SKB_DROP_REASON_FRAG_REASM_TIMEOUT,
+ /**
+ * @SKB_DROP_REASON_FRAG_TOO_FAR: ipv4 fragment too far.
+ * (/proc/sys/net/ipv4/ipfrag_max_dist)
+ */
+ SKB_DROP_REASON_FRAG_TOO_FAR,
+ /**
+ * @SKB_DROP_REASON_TCP_MINTTL: ipv4 ttl or ipv6 hoplimit below
+ * the threshold (IP_MINTTL or IPV6_MINHOPCOUNT).
+ */
+ SKB_DROP_REASON_TCP_MINTTL,
+ /** @SKB_DROP_REASON_IPV6_BAD_EXTHDR: Bad IPv6 extension header. */
+ SKB_DROP_REASON_IPV6_BAD_EXTHDR,
+ /** @SKB_DROP_REASON_IPV6_NDISC_FRAG: invalid frag (suppress_frag_ndisc). */
+ SKB_DROP_REASON_IPV6_NDISC_FRAG,
+ /** @SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT: invalid hop limit. */
+ SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT,
+ /** @SKB_DROP_REASON_IPV6_NDISC_BAD_CODE: invalid NDISC icmp6 code. */
+ SKB_DROP_REASON_IPV6_NDISC_BAD_CODE,
+ /** @SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS: invalid NDISC options. */
+ SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS,
+ /**
+ * @SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST: NEIGHBOUR SOLICITATION
+ * for another host.
+ */
+ SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST,
+ /** @SKB_DROP_REASON_QUEUE_PURGE: bulk free. */
+ SKB_DROP_REASON_QUEUE_PURGE,
+ /**
+ * @SKB_DROP_REASON_TC_COOKIE_ERROR: An error occurred whilst
+ * processing a tc ext cookie.
+ */
+ SKB_DROP_REASON_TC_COOKIE_ERROR,
+ /**
+ * @SKB_DROP_REASON_PACKET_SOCK_ERROR: generic packet socket errors
+ * after its filter matches an incoming packet.
+ */
+ SKB_DROP_REASON_PACKET_SOCK_ERROR,
+ /** @SKB_DROP_REASON_TC_CHAIN_NOTFOUND: tc chain lookup failed. */
+ SKB_DROP_REASON_TC_CHAIN_NOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TC_RECLASSIFY_LOOP: tc exceeded max reclassify loop
+ * iterations.
+ */
+ SKB_DROP_REASON_TC_RECLASSIFY_LOOP,
+ /**
+ * @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
+ * shouldn't be used as a real 'reason' - only for tracing code gen
+ */
+ SKB_DROP_REASON_MAX,
+
+ /**
+ * @SKB_DROP_REASON_SUBSYS_MASK: subsystem mask in drop reasons,
+ * see &enum skb_drop_reason_subsys
+ */
+ SKB_DROP_REASON_SUBSYS_MASK = 0xffff0000,
+};
+
+#define SKB_DROP_REASON_SUBSYS_SHIFT 16
+
+#define SKB_DR_INIT(name, reason) \
+ enum skb_drop_reason name = SKB_DROP_REASON_##reason
+#define SKB_DR(name) \
+ SKB_DR_INIT(name, NOT_SPECIFIED)
+#define SKB_DR_SET(name, reason) \
+ (name = SKB_DROP_REASON_##reason)
+#define SKB_DR_OR(name, reason) \
+ do { \
+ if (name == SKB_DROP_REASON_NOT_SPECIFIED || \
+ name == SKB_NOT_DROPPED_YET) \
+ SKB_DR_SET(name, reason); \
+ } while (0)
+
+#endif
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
new file mode 100644
index 000000000000..56cb7be92244
--- /dev/null
+++ b/include/net/dropreason.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LINUX_DROPREASON_H
+#define _LINUX_DROPREASON_H
+#include <net/dropreason-core.h>
+
+/**
+ * enum skb_drop_reason_subsys - subsystem tag for (extended) drop reasons
+ */
+enum skb_drop_reason_subsys {
+ /** @SKB_DROP_REASON_SUBSYS_CORE: core drop reasons defined above */
+ SKB_DROP_REASON_SUBSYS_CORE,
+
+ /**
+ * @SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE: mac80211 drop reasons
+ * for unusable frames, see net/mac80211/drop.h
+ */
+ SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE,
+
+ /**
+ * @SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR: mac80211 drop reasons
+ * for frames still going to monitor, see net/mac80211/drop.h
+ */
+ SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR,
+
+ /**
+ * @SKB_DROP_REASON_SUBSYS_OPENVSWITCH: openvswitch drop reasons,
+ * see net/openvswitch/drop.h
+ */
+ SKB_DROP_REASON_SUBSYS_OPENVSWITCH,
+
+ /** @SKB_DROP_REASON_SUBSYS_NUM: number of subsystems defined */
+ SKB_DROP_REASON_SUBSYS_NUM
+};
+
+struct drop_reason_list {
+ const char * const *reasons;
+ size_t n_reasons;
+};
+
+/* Note: due to dynamic registrations, access must be under RCU */
+extern const struct drop_reason_list __rcu *
+drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM];
+
+void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys,
+ const struct drop_reason_list *list);
+void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys);
+
+#endif
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 75c8fac82017..7c0da9effe4e 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -22,6 +22,7 @@
#include <net/devlink.h>
#include <net/switchdev.h>
+struct dsa_8021q_context;
struct tc_action;
struct phy_device;
struct fixed_phy_status;
@@ -45,10 +46,21 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_OCELOT_VALUE 15
#define DSA_TAG_PROTO_AR9331_VALUE 16
#define DSA_TAG_PROTO_RTL4_A_VALUE 17
+#define DSA_TAG_PROTO_HELLCREEK_VALUE 18
+#define DSA_TAG_PROTO_XRS700X_VALUE 19
+#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20
+#define DSA_TAG_PROTO_SEVILLE_VALUE 21
+#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
+#define DSA_TAG_PROTO_SJA1110_VALUE 23
+#define DSA_TAG_PROTO_RTL8_4_VALUE 24
+#define DSA_TAG_PROTO_RTL8_4T_VALUE 25
+#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
+#define DSA_TAG_PROTO_LAN937X_VALUE 27
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
+ DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
@@ -65,58 +77,52 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
+ DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE,
+ DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE,
+ DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
+ DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
+ DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
+ DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE,
+ DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
+ DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
+ DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
};
-struct packet_type;
struct dsa_switch;
struct dsa_device_ops {
struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
- struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt);
- int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
- int *offset);
- /* Used to determine which traffic should match the DSA filter in
- * eth_type_trans, and which, if any, should bypass it and be processed
- * as regular on the master net device.
- */
- bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
- unsigned int overhead;
+ struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
+ void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
+ int *offset);
+ int (*connect)(struct dsa_switch *ds);
+ void (*disconnect)(struct dsa_switch *ds);
+ unsigned int needed_headroom;
+ unsigned int needed_tailroom;
const char *name;
enum dsa_tag_protocol proto;
+ /* Some tagging protocols either mangle or shift the destination MAC
+ * address, in which case the DSA conduit would drop packets on ingress
+ * if what it understands out of the destination MAC address is not in
+ * its RX filter.
+ */
+ bool promisc_on_conduit;
};
-/* This structure defines the control interfaces that are overlayed by the
- * DSA layer on top of the DSA CPU/management net_device instance. This is
- * used by the core net_device layer while calling various net_device_ops
- * function pointers.
- */
-struct dsa_netdevice_ops {
- int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr,
- int cmd);
-};
-
-#define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
-#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
- MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
-
-struct dsa_skb_cb {
- struct sk_buff *clone;
-};
-
-struct __dsa_skb_cb {
- struct dsa_skb_cb cb;
- u8 priv[48 - sizeof(struct dsa_skb_cb)];
+struct dsa_lag {
+ struct net_device *dev;
+ unsigned int id;
+ struct mutex fdb_lock;
+ struct list_head fdbs;
+ refcount_t refcount;
};
-#define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb))
-
-#define DSA_SKB_CB_PRIV(skb) \
- ((void *)(skb)->cb + offsetof(struct __dsa_skb_cb, priv))
-
struct dsa_switch_tree {
struct list_head list;
+ /* List of switch ports */
+ struct list_head ports;
+
/* Notifier chain for switch-wide events */
struct raw_notifier_head nh;
@@ -126,6 +132,19 @@ struct dsa_switch_tree {
/* Number of switches attached to this tree */
struct kref refcount;
+ /* Maps offloaded LAG netdevs to a zero-based linear ID for
+ * drivers that need it.
+ */
+ struct dsa_lag **lags;
+
+ /* Tagging protocol operations */
+ const struct dsa_device_ops *tag_ops;
+
+ /* Default tagging protocol preferred by the switches in this
+ * tree.
+ */
+ enum dsa_tag_protocol default_proto;
+
/* Has this tree been applied to the hardware? */
bool setup;
@@ -135,13 +154,51 @@ struct dsa_switch_tree {
*/
struct dsa_platform_data *pd;
- /* List of switch ports */
- struct list_head ports;
-
/* List of DSA links composing the routing table */
struct list_head rtable;
+
+ /* Length of "lags" array */
+ unsigned int lags_len;
+
+ /* Track the largest switch index within a tree */
+ unsigned int last_switch;
};
+/* LAG IDs are one-based, the dst->lags array is zero-based */
+#define dsa_lags_foreach_id(_id, _dst) \
+ for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \
+ if ((_dst)->lags[(_id) - 1])
+
+#define dsa_lag_foreach_port(_dp, _dst, _lag) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_offloads_lag((_dp), (_lag)))
+
+#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
+
+static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
+ unsigned int id)
+{
+ /* DSA LAG IDs are one-based, dst->lags is zero-based */
+ return dst->lags[id - 1];
+}
+
+static inline int dsa_lag_id(struct dsa_switch_tree *dst,
+ struct net_device *lag_dev)
+{
+ unsigned int id;
+
+ dsa_lags_foreach_id(id, dst) {
+ struct dsa_lag *lag = dsa_lag_by_id(dst, id);
+
+ if (lag->dev == lag_dev)
+ return lag->id;
+ }
+
+ return -ENODEV;
+}
+
/* TC matchall action types */
enum dsa_port_mall_action_type {
DSA_PORT_MALL_MIRROR,
@@ -171,24 +228,34 @@ struct dsa_mall_tc_entry {
};
};
+struct dsa_bridge {
+ struct net_device *dev;
+ unsigned int num;
+ bool tx_fwd_offload;
+ refcount_t refcount;
+};
struct dsa_port {
- /* A CPU port is physically connected to a master device.
- * A user port exposed to userspace has a slave device.
+ /* A CPU port is physically connected to a conduit device. A user port
+ * exposes a network device to user-space, called 'user' here.
*/
union {
- struct net_device *master;
- struct net_device *slave;
+ struct net_device *conduit;
+ struct net_device *user;
};
- /* CPU port tagging operations used by master or slave devices */
+ /* Copy of the tagging protocol operations, for quicker access
+ * in the data path. Valid only for the CPU ports.
+ */
const struct dsa_device_ops *tag_ops;
- /* Copies for faster access in master receive hot path */
+ /* Copies for faster access in conduit receive hot path */
struct dsa_switch_tree *dst;
- struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt);
- bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
+ struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
+
+ struct dsa_switch *ds;
+
+ unsigned int index;
enum {
DSA_PORT_TYPE_UNUSED = 0,
@@ -197,39 +264,67 @@ struct dsa_port {
DSA_PORT_TYPE_USER,
} type;
- struct dsa_switch *ds;
- unsigned int index;
const char *name;
struct dsa_port *cpu_dp;
- const char *mac;
+ u8 mac[ETH_ALEN];
+
+ u8 stp_state;
+
+ /* Warning: the following bit fields are not atomic, and updating them
+ * can only be done from code paths where concurrency is not possible
+ * (probe time or under rtnl_lock).
+ */
+ u8 vlan_filtering:1;
+
+ /* Managed by DSA on user ports and by drivers on CPU and DSA ports */
+ u8 learning:1;
+
+ u8 lag_tx_enabled:1;
+
+ /* conduit state bits, valid only on CPU ports */
+ u8 conduit_admin_up:1;
+ u8 conduit_oper_up:1;
+
+ /* Valid only on user ports */
+ u8 cpu_port_in_lag:1;
+
+ u8 setup:1;
+
struct device_node *dn;
unsigned int ageing_time;
- bool vlan_filtering;
- u8 stp_state;
- struct net_device *bridge_dev;
+
+ struct dsa_bridge *bridge;
struct devlink_port devlink_port;
struct phylink *pl;
struct phylink_config pl_config;
+ struct dsa_lag *lag;
+ struct net_device *hsr_dev;
struct list_head list;
/*
- * Give the switch driver somewhere to hang its per-port private data
- * structures (accessible from the tagger).
- */
- void *priv;
-
- /*
- * Original copy of the master netdev ethtool_ops
+ * Original copy of the conduit netdev ethtool_ops
*/
const struct ethtool_ops *orig_ethtool_ops;
- /*
- * Original copy of the master netdev net_device_ops
+ /* List of MAC addresses that must be forwarded on this port.
+ * These are only valid on CPU ports and DSA links.
*/
- const struct dsa_netdevice_ops *netdev_ops;
+ struct mutex addr_lists_lock;
+ struct list_head fdbs;
+ struct list_head mdbs;
- bool setup;
+ struct mutex vlans_lock;
+ union {
+ /* List of VLANs that CPU and DSA ports are members of.
+ * Access to this is serialized by the sleepable @vlans_lock.
+ */
+ struct list_head vlans;
+ /* List of VLANs that user ports are members of.
+ * Access to this is serialized by netif_addr_lock_bh().
+ */
+ struct list_head user_vlans;
+ };
};
/* TODO: ideally DSA ports would have a single dp->link_dp member,
@@ -243,9 +338,37 @@ struct dsa_link {
struct list_head list;
};
-struct dsa_switch {
- bool setup;
+enum dsa_db_type {
+ DSA_DB_PORT,
+ DSA_DB_LAG,
+ DSA_DB_BRIDGE,
+};
+
+struct dsa_db {
+ enum dsa_db_type type;
+
+ union {
+ const struct dsa_port *dp;
+ struct dsa_lag lag;
+ struct dsa_bridge bridge;
+ };
+};
+
+struct dsa_mac_addr {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ refcount_t refcount;
+ struct list_head list;
+ struct dsa_db db;
+};
+
+struct dsa_vlan {
+ u16 vid;
+ refcount_t refcount;
+ struct list_head list;
+};
+struct dsa_switch {
struct device *dev;
/*
@@ -254,6 +377,59 @@ struct dsa_switch {
struct dsa_switch_tree *dst;
unsigned int index;
+ /* Warning: the following bit fields are not atomic, and updating them
+ * can only be done from code paths where concurrency is not possible
+ * (probe time or under rtnl_lock).
+ */
+ u32 setup:1;
+
+ /* Disallow bridge core from requesting different VLAN awareness
+ * settings on ports if not hardware-supported
+ */
+ u32 vlan_filtering_is_global:1;
+
+ /* Keep VLAN filtering enabled on ports not offloading any upper */
+ u32 needs_standalone_vlan_filtering:1;
+
+ /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
+ * that have vlan_filtering=0. All drivers should ideally set this (and
+ * then the option would get removed), but it is unknown whether this
+ * would break things or not.
+ */
+ u32 configure_vlan_while_not_filtering:1;
+
+ /* If the switch driver always programs the CPU port as egress tagged
+ * despite the VLAN configuration indicating otherwise, then setting
+ * @untag_bridge_pvid will force the DSA receive path to pop the
+ * bridge's default_pvid VLAN tagged frames to offer a consistent
+ * behavior between a vlan_filtering=0 and vlan_filtering=1 bridge
+ * device.
+ */
+ u32 untag_bridge_pvid:1;
+
+ /* Let DSA manage the FDB entries towards the
+ * CPU, based on the software bridge database.
+ */
+ u32 assisted_learning_on_cpu_port:1;
+
+ /* In case vlan_filtering_is_global is set, the VLAN awareness state
+ * should be retrieved from here and not from the per-port settings.
+ */
+ u32 vlan_filtering:1;
+
+ /* For switches that only have the MRU configurable. To ensure the
+ * configured MTU is not exceeded, normalization of MRU on all bridged
+ * interfaces is needed.
+ */
+ u32 mtu_enforcement_ingress:1;
+
+ /* Drivers that isolate the FDBs of multiple bridges must set this
+ * to true to receive the bridge as an argument in .port_fdb_{add,del}
+ * and .port_mdb_{add,del}. Otherwise, the bridge.num will always be
+ * passed as zero.
+ */
+ u32 fdb_isolation:1;
+
/* Listener for switch fabric events */
struct notifier_block nb;
@@ -263,6 +439,8 @@ struct dsa_switch {
*/
void *priv;
+ void *tagger_data;
+
/*
* Configuration data for this switch.
*/
@@ -274,50 +452,40 @@ struct dsa_switch {
const struct dsa_switch_ops *ops;
/*
- * Slave mii_bus and devices for the individual ports.
+ * User mii_bus and devices for the individual ports.
*/
u32 phys_mii_mask;
- struct mii_bus *slave_mii_bus;
+ struct mii_bus *user_mii_bus;
/* Ageing Time limits in msecs */
unsigned int ageing_time_min;
unsigned int ageing_time_max;
+ /* Storage for drivers using tag_8021q */
+ struct dsa_8021q_context *tag_8021q_ctx;
+
/* devlink used to represent this switch device */
struct devlink *devlink;
/* Number of switch port queues */
unsigned int num_tx_queues;
- /* Disallow bridge core from requesting different VLAN awareness
- * settings on ports if not hardware-supported
- */
- bool vlan_filtering_is_global;
-
- /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
- * that have vlan_filtering=0. All drivers should ideally set this (and
- * then the option would get removed), but it is unknown whether this
- * would break things or not.
- */
- bool configure_vlan_while_not_filtering;
-
- /* In case vlan_filtering_is_global is set, the VLAN awareness state
- * should be retrieved from here and not from the per-port settings.
- */
- bool vlan_filtering;
-
- /* MAC PCS does not provide link state change interrupt, and requires
- * polling. Flag passed on to PHYLINK.
+ /* Drivers that benefit from having an ID associated with each
+ * offloaded LAG should set this to the maximum number of
+ * supported IDs. DSA will then maintain a mapping of _at
+ * least_ these many IDs, accessible to drivers via
+ * dsa_lag_id().
*/
- bool pcs_poll;
+ unsigned int num_lag_ids;
- /* For switches that only have the MRU configurable. To ensure the
- * configured MTU is not exceeded, normalization of MRU on all bridged
- * interfaces is needed.
+ /* Drivers that support bridge forwarding offload or FDB isolation
+ * should set this to the maximum number of bridges spanning the same
+ * switch tree (or all trees, in the case of cross-tree bridging
+ * support) that can be offloaded.
*/
- bool mtu_enforcement_ingress;
+ unsigned int max_num_bridges;
- size_t num_ports;
+ unsigned int num_ports;
};
static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
@@ -332,6 +500,32 @@ static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
return NULL;
}
+static inline bool dsa_port_is_dsa(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_DSA;
+}
+
+static inline bool dsa_port_is_cpu(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_CPU;
+}
+
+static inline bool dsa_port_is_user(struct dsa_port *dp)
+{
+ return dp->type == DSA_PORT_TYPE_USER;
+}
+
+static inline bool dsa_port_is_unused(struct dsa_port *dp)
+{
+ return dp->type == DSA_PORT_TYPE_UNUSED;
+}
+
+static inline bool dsa_port_conduit_is_operational(struct dsa_port *dp)
+{
+ return dsa_port_is_cpu(dp) && dp->conduit_admin_up &&
+ dp->conduit_oper_up;
+}
+
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
{
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
@@ -352,14 +546,64 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
}
+#define dsa_tree_for_each_user_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_tree_for_each_user_port_continue_reverse(_dp, _dst) \
+ list_for_each_entry_continue_reverse((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_tree_for_each_cpu_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_cpu((_dp)))
+
+#define dsa_switch_for_each_port(_dp, _ds) \
+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
+ list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
+ list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_available_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (!dsa_port_is_unused((_dp)))
+
+#define dsa_switch_for_each_user_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_switch_for_each_cpu_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_cpu((_dp)))
+
+#define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \
+ dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
+ if (dsa_port_is_cpu((_dp)))
+
static inline u32 dsa_user_ports(struct dsa_switch *ds)
{
+ struct dsa_port *dp;
+ u32 mask = 0;
+
+ dsa_switch_for_each_user_port(dp, ds)
+ mask |= BIT(dp->index);
+
+ return mask;
+}
+
+static inline u32 dsa_cpu_ports(struct dsa_switch *ds)
+{
+ struct dsa_port *cpu_dp;
u32 mask = 0;
- int p;
- for (p = 0; p < ds->num_ports; p++)
- if (dsa_is_user_port(ds, p))
- mask |= BIT(p);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ mask |= BIT(cpu_dp->index);
return mask;
}
@@ -399,6 +643,50 @@ static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
}
+/* Return true if this is the local port used to reach the CPU port */
+static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
+{
+ if (dsa_is_unused_port(ds, port))
+ return false;
+
+ return port == dsa_upstream_port(ds, port);
+}
+
+/* Return true if this is a DSA port leading away from the CPU */
+static inline bool dsa_is_downstream_port(struct dsa_switch *ds, int port)
+{
+ return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port);
+}
+
+/* Return the local port used to reach the CPU port */
+static inline unsigned int dsa_switch_upstream_port(struct dsa_switch *ds)
+{
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_available_port(dp, ds) {
+ return dsa_upstream_port(ds, dp->index);
+ }
+
+ return ds->num_ports;
+}
+
+/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
+ * that the routing port from @downstream_ds to @upstream_ds is also the port
+ * which @downstream_ds uses to reach its dedicated CPU.
+ */
+static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
+ struct dsa_switch *downstream_ds)
+{
+ int routing_port;
+
+ if (upstream_ds == downstream_ds)
+ return true;
+
+ routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
+
+ return dsa_is_upstream_port(downstream_ds, routing_port);
+}
+
static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
{
const struct dsa_switch *ds = dp->ds;
@@ -409,15 +697,157 @@ static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
return dp->vlan_filtering;
}
+static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
+{
+ return dp->lag ? dp->lag->id : 0;
+}
+
+static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
+{
+ return dp->lag ? dp->lag->dev : NULL;
+}
+
+static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
+ const struct dsa_lag *lag)
+{
+ return dsa_port_lag_dev_get(dp) == lag->dev;
+}
+
+static inline struct net_device *dsa_port_to_conduit(const struct dsa_port *dp)
+{
+ if (dp->cpu_port_in_lag)
+ return dsa_port_lag_dev_get(dp->cpu_dp);
+
+ return dp->cpu_dp->conduit;
+}
+
+static inline
+struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
+{
+ if (!dp->bridge)
+ return NULL;
+
+ if (dp->lag)
+ return dp->lag->dev;
+ else if (dp->hsr_dev)
+ return dp->hsr_dev;
+
+ return dp->user;
+}
+
+static inline struct net_device *
+dsa_port_bridge_dev_get(const struct dsa_port *dp)
+{
+ return dp->bridge ? dp->bridge->dev : NULL;
+}
+
+static inline unsigned int dsa_port_bridge_num_get(struct dsa_port *dp)
+{
+ return dp->bridge ? dp->bridge->num : 0;
+}
+
+static inline bool dsa_port_bridge_same(const struct dsa_port *a,
+ const struct dsa_port *b)
+{
+ struct net_device *br_a = dsa_port_bridge_dev_get(a);
+ struct net_device *br_b = dsa_port_bridge_dev_get(b);
+
+ /* Standalone ports are not in the same bridge with one another */
+ return (!br_a || !br_b) ? false : (br_a == br_b);
+}
+
+static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
+ const struct net_device *dev)
+{
+ return dsa_port_to_bridge_port(dp) == dev;
+}
+
+static inline bool
+dsa_port_offloads_bridge_dev(struct dsa_port *dp,
+ const struct net_device *bridge_dev)
+{
+ /* DSA ports connected to a bridge, and event was emitted
+ * for the bridge.
+ */
+ return dsa_port_bridge_dev_get(dp) == bridge_dev;
+}
+
+static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
+ const struct dsa_bridge *bridge)
+{
+ return dsa_port_bridge_dev_get(dp) == bridge->dev;
+}
+
+/* Returns true if any port of this tree offloads the given net_device */
+static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
+ const struct net_device *dev)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_offloads_bridge_port(dp, dev))
+ return true;
+
+ return false;
+}
+
+/* Returns true if any port of this tree offloads the given bridge */
+static inline bool
+dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
+ const struct net_device *bridge_dev)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_offloads_bridge_dev(dp, bridge_dev))
+ return true;
+
+ return false;
+}
+
+static inline bool dsa_port_tree_same(const struct dsa_port *a,
+ const struct dsa_port *b)
+{
+ return a->ds->dst == b->ds->dst;
+}
+
typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
+ /*
+ * Tagging protocol helpers called for the CPU ports and DSA links.
+ * @get_tag_protocol retrieves the initial tagging protocol and is
+ * mandatory. Switches which can operate using multiple tagging
+ * protocols should implement @change_tag_protocol and report in
+ * @get_tag_protocol the tagger in current use.
+ */
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mprot);
+ int (*change_tag_protocol)(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto);
+ /*
+ * Method for switch drivers to connect to the tagging protocol driver
+ * in current use. The switch driver can provide handlers for certain
+ * types of packets for switch management.
+ */
+ int (*connect_tag_protocol)(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto);
+ int (*port_change_conduit)(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack);
+
+ /* Optional switch-wide initialization and destruction methods */
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
+
+ /* Per-port initialization and destruction methods. Mandatory if the
+ * driver registers devlink port regions, optional otherwise.
+ */
+ int (*port_setup)(struct dsa_switch *ds, int port);
+ void (*port_teardown)(struct dsa_switch *ds, int port);
+
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/*
@@ -438,15 +868,20 @@ struct dsa_switch_ops {
/*
* PHYLINK integration
*/
- void (*phylink_validate)(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
- int (*phylink_mac_link_state)(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
+ void (*phylink_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
+ int port,
+ phy_interface_t iface);
+ int (*phylink_mac_prepare)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
void (*phylink_mac_config)(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state);
- void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port);
+ int (*phylink_mac_finish)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
@@ -459,7 +894,7 @@ struct dsa_switch_ops {
void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
struct phylink_link_state *state);
/*
- * ethtool hardware statistics.
+ * Port statistics counters.
*/
void (*get_strings)(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data);
@@ -468,6 +903,21 @@ struct dsa_switch_ops {
int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
+ void (*get_eth_phy_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+ void (*get_stats64)(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s);
+ void (*get_pause_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats);
+ void (*self_test)(struct dsa_switch *ds, int port,
+ struct ethtool_test *etest, u64 *data);
/*
* ethtool Wake-on-LAN
@@ -484,6 +934,29 @@ struct dsa_switch_ops {
struct ethtool_ts_info *ts);
/*
+ * ethtool MAC merge layer
+ */
+ int (*get_mm)(struct dsa_switch *ds, int port,
+ struct ethtool_mm_state *state);
+ int (*set_mm)(struct dsa_switch *ds, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+ void (*get_mm_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_mm_stats *stats);
+
+ /*
+ * DCB ops
+ */
+ int (*port_get_default_prio)(struct dsa_switch *ds, int port);
+ int (*port_set_default_prio)(struct dsa_switch *ds, int port,
+ u8 prio);
+ int (*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp);
+ int (*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio);
+ int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio);
+
+ /*
* Suspend and resume
*/
int (*suspend)(struct dsa_switch *ds);
@@ -496,13 +969,31 @@ struct dsa_switch_ops {
struct phy_device *phy);
void (*port_disable)(struct dsa_switch *ds, int port);
+
+ /*
+ * Notification for MAC address changes on user ports. Drivers can
+ * currently only veto operations. They should not use the method to
+ * program the hardware, since the operation is not rolled back in case
+ * of other errors.
+ */
+ int (*port_set_mac_address)(struct dsa_switch *ds, int port,
+ const unsigned char *addr);
+
+ /*
+ * Compatibility between device trees defining multiple CPU ports and
+ * drivers which are not OK to use by default the numerically smallest
+ * CPU port of a switch for its local ports. This can return NULL,
+ * meaning "don't know/don't care".
+ */
+ struct dsa_port *(*preferred_default_local_cpu_port)(struct dsa_switch *ds);
+
/*
* Port's MAC EEE settings
*/
int (*set_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_eee *e);
+ struct ethtool_keee *e);
int (*get_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_eee *e);
+ struct ethtool_keee *e);
/* EEPROM access */
int (*get_eeprom_len)(struct dsa_switch *ds);
@@ -519,49 +1010,77 @@ struct dsa_switch_ops {
struct ethtool_regs *regs, void *p);
/*
+ * Upper device tracking.
+ */
+ int (*port_prechangeupper)(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info);
+
+ /*
* Bridge integration
*/
int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
int (*port_bridge_join)(struct dsa_switch *ds, int port,
- struct net_device *bridge);
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
void (*port_bridge_leave)(struct dsa_switch *ds, int port,
- struct net_device *bridge);
+ struct dsa_bridge bridge);
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
+ int (*port_mst_state_set)(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *state);
void (*port_fast_age)(struct dsa_switch *ds, int port);
- int (*port_egress_floods)(struct dsa_switch *ds, int port,
- bool unicast, bool multicast);
+ int (*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid);
+ int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+ int (*port_bridge_flags)(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+ void (*port_set_host_flood)(struct dsa_switch *ds, int port,
+ bool uc, bool mc);
/*
* VLAN support
*/
int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
- bool vlan_filtering);
- int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
- void (*port_vlan_add)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+ int (*port_vlan_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
int (*port_vlan_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
+ int (*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti);
+
/*
* Forwarding database
*/
int (*port_fdb_add)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int (*port_fdb_del)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
+ int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+ int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
/*
* Multicast database
*/
- int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
- void (*port_mdb_add)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ int (*port_mdb_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int (*port_mdb_del)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
/*
* RXNFC
*/
@@ -581,7 +1100,7 @@ struct dsa_switch_ops {
struct flow_cls_offload *cls, bool ingress);
int (*port_mirror_add)(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress);
+ bool ingress, struct netlink_ext_ack *extack);
void (*port_mirror_del)(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
int (*port_policer_add)(struct dsa_switch *ds, int port,
@@ -595,10 +1114,19 @@ struct dsa_switch_ops {
*/
int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index,
int sw_index, int port,
- struct net_device *br);
+ struct dsa_bridge bridge,
+ struct netlink_ext_ack *extack);
void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
int sw_index, int port,
- struct net_device *br);
+ struct dsa_bridge bridge);
+ int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
+ int port);
+ int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
+ int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+ int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
+ int port, struct dsa_lag lag);
/*
* PTP functionality
@@ -607,16 +1135,53 @@ struct dsa_switch_ops {
struct ifreq *ifr);
int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
struct ifreq *ifr);
- bool (*port_txtstamp)(struct dsa_switch *ds, int port,
- struct sk_buff *clone, unsigned int type);
+ void (*port_txtstamp)(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
- /* Devlink parameters */
+ /* Devlink parameters, etc */
int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
+ int (*devlink_info_get)(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_pool_get)(struct dsa_switch *ds,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
+ int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
+ unsigned int sb_index);
+ int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
+ unsigned int sb_index);
+ int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
/*
* MTU change functionality. Switches can also adjust their MRU through
@@ -627,6 +1192,52 @@ struct dsa_switch_ops {
int (*port_change_mtu)(struct dsa_switch *ds, int port,
int new_mtu);
int (*port_max_mtu)(struct dsa_switch *ds, int port);
+
+ /*
+ * LAG integration
+ */
+ int (*port_lag_change)(struct dsa_switch *ds, int port);
+ int (*port_lag_join)(struct dsa_switch *ds, int port,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+ int (*port_lag_leave)(struct dsa_switch *ds, int port,
+ struct dsa_lag lag);
+
+ /*
+ * HSR integration
+ */
+ int (*port_hsr_join)(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack);
+ int (*port_hsr_leave)(struct dsa_switch *ds, int port,
+ struct net_device *hsr);
+
+ /*
+ * MRP integration
+ */
+ int (*port_mrp_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp);
+ int (*port_mrp_del)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp);
+ int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+ int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+
+ /*
+ * tag_8021q operations
+ */
+ int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags);
+ int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
+
+ /*
+ * DSA conduit tracking operations
+ */
+ void (*conduit_state_change)(struct dsa_switch *ds,
+ const struct net_device *conduit,
+ bool operational);
};
#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
@@ -658,18 +1269,55 @@ void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
void *occ_get_priv);
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
u64 resource_id);
+struct devlink_region *
+dsa_devlink_region_create(struct dsa_switch *ds,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+dsa_devlink_port_region_create(struct dsa_switch *ds,
+ int port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+void dsa_devlink_region_destroy(struct devlink_region *region);
+
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
struct dsa_devlink_priv {
struct dsa_switch *ds;
};
+static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
+{
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline
+struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
+{
+ struct devlink *dl = port->devlink;
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline int dsa_devlink_port_to_port(struct devlink_port *port)
+{
+ return port->index;
+}
+
struct dsa_switch_driver {
struct list_head list;
const struct dsa_switch_ops *ops;
};
-struct net_device *dsa_dev_to_net_device(struct device *dev);
+bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
/* Keep inline for faster access in hot path */
static inline bool netdev_uses_dsa(const struct net_device *dev)
@@ -680,54 +1328,37 @@ static inline bool netdev_uses_dsa(const struct net_device *dev)
return false;
}
-static inline bool dsa_can_decode(const struct sk_buff *skb,
- struct net_device *dev)
+/* All DSA tags that push the EtherType to the right (basically all except tail
+ * tags, which don't break dissection) can be treated the same from the
+ * perspective of the flow dissector.
+ *
+ * We need to return:
+ * - offset: the (B - A) difference between:
+ * A. the position of the real EtherType and
+ * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
+ * after the normal EtherType was supposed to be)
+ * The offset in bytes is exactly equal to the tagger overhead (and half of
+ * that, in __be16 shorts).
+ *
+ * - proto: the value of the real EtherType.
+ */
+static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
+ __be16 *proto, int *offset)
{
#if IS_ENABLED(CONFIG_NET_DSA)
- return !dev->dsa_ptr->filter || dev->dsa_ptr->filter(skb, dev);
-#endif
- return false;
-}
-
-#if IS_ENABLED(CONFIG_NET_DSA)
-static inline int __dsa_netdevice_ops_check(struct net_device *dev)
-{
- int err = -EOPNOTSUPP;
-
- if (!dev->dsa_ptr)
- return err;
+ const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
+ int tag_len = ops->needed_headroom;
- if (!dev->dsa_ptr->netdev_ops)
- return err;
-
- return 0;
-}
-
-static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
-{
- const struct dsa_netdevice_ops *ops;
- int err;
-
- err = __dsa_netdevice_ops_check(dev);
- if (err)
- return err;
-
- ops = dev->dsa_ptr->netdev_ops;
-
- return ops->ndo_do_ioctl(dev, ifr, cmd);
-}
-#else
-static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
-{
- return -EOPNOTSUPP;
-}
+ *offset = tag_len;
+ *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
#endif
+}
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
+void dsa_switch_shutdown(struct dsa_switch *ds);
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
+void dsa_flush_workqueue(void);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
int dsa_switch_resume(struct dsa_switch *ds);
@@ -742,127 +1373,16 @@ static inline int dsa_switch_resume(struct dsa_switch *ds)
}
#endif /* CONFIG_PM_SLEEP */
-enum dsa_notifier_type {
- DSA_PORT_REGISTER,
- DSA_PORT_UNREGISTER,
-};
-
-struct dsa_notifier_info {
- struct net_device *dev;
-};
-
-struct dsa_notifier_register_info {
- struct dsa_notifier_info info; /* must be first */
- struct net_device *master;
- unsigned int port_number;
- unsigned int switch_number;
-};
-
-static inline struct net_device *
-dsa_notifier_info_to_dev(const struct dsa_notifier_info *info)
-{
- return info->dev;
-}
-
#if IS_ENABLED(CONFIG_NET_DSA)
-int register_dsa_notifier(struct notifier_block *nb);
-int unregister_dsa_notifier(struct notifier_block *nb);
-int call_dsa_notifiers(unsigned long val, struct net_device *dev,
- struct dsa_notifier_info *info);
+bool dsa_user_dev_check(const struct net_device *dev);
#else
-static inline int register_dsa_notifier(struct notifier_block *nb)
+static inline bool dsa_user_dev_check(const struct net_device *dev)
{
- return 0;
-}
-
-static inline int unregister_dsa_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev,
- struct dsa_notifier_info *info)
-{
- return NOTIFY_DONE;
+ return false;
}
#endif
-/* Broadcom tag specific helpers to insert and extract queue/port number */
-#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
-#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
-#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
-
-
netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
-int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data);
-int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data);
-int dsa_port_get_phy_sset_count(struct dsa_port *dp);
void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
-struct dsa_tag_driver {
- const struct dsa_device_ops *ops;
- struct list_head list;
- struct module *owner;
-};
-
-void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
- unsigned int count,
- struct module *owner);
-void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
- unsigned int count);
-
-#define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
-static int __init dsa_tag_driver_module_init(void) \
-{ \
- dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
- THIS_MODULE); \
- return 0; \
-} \
-module_init(dsa_tag_driver_module_init); \
- \
-static void __exit dsa_tag_driver_module_exit(void) \
-{ \
- dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
-} \
-module_exit(dsa_tag_driver_module_exit)
-
-/**
- * module_dsa_tag_drivers() - Helper macro for registering DSA tag
- * drivers
- * @__ops_array: Array of tag driver strucutres
- *
- * Helper macro for DSA tag drivers which do not do anything special
- * in module init/exit. Each module may only use this macro once, and
- * calling it replaces module_init() and module_exit().
- */
-#define module_dsa_tag_drivers(__ops_array) \
-dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
-
-#define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
-
-/* Create a static structure we can build a linked list of dsa_tag
- * drivers
- */
-#define DSA_TAG_DRIVER(__ops) \
-static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
- .ops = &__ops, \
-}
-
-/**
- * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
- * driver
- * @__ops: Single tag driver structures
- *
- * Helper macro for DSA tag drivers which do not do anything special
- * in module init/exit. Each module may only use this macro once, and
- * calling it replaces module_init() and module_exit().
- */
-#define module_dsa_tag_driver(__ops) \
-DSA_TAG_DRIVER(__ops); \
- \
-static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
- &DSA_TAG_DRIVER_NAME(__ops) \
-}; \
-module_dsa_tag_drivers(dsa_tag_driver_array)
#endif
-
diff --git a/include/net/dsa_stubs.h b/include/net/dsa_stubs.h
new file mode 100644
index 000000000000..6f384897f287
--- /dev/null
+++ b/include/net/dsa_stubs.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/net/dsa_stubs.h - Stubs for the Distributed Switch Architecture framework
+ */
+
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <net/dsa.h>
+
+#if IS_ENABLED(CONFIG_NET_DSA)
+
+extern const struct dsa_stubs *dsa_stubs;
+
+struct dsa_stubs {
+ int (*conduit_hwtstamp_validate)(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+};
+
+static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ if (!netdev_uses_dsa(dev))
+ return 0;
+
+ /* rtnl_lock() is a sufficient guarantee, because as long as
+ * netdev_uses_dsa() returns true, the dsa_core module is still
+ * registered, and so, dsa_unregister_stubs() couldn't have run.
+ * For netdev_uses_dsa() to start returning false, it would imply that
+ * dsa_conduit_teardown() has executed, which requires rtnl_lock().
+ */
+ ASSERT_RTNL();
+
+ return dsa_stubs->conduit_hwtstamp_validate(dev, config, extack);
+}
+
+#else
+
+static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 6ae2e625050d..0aa331bd2fdb 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -16,8 +16,10 @@
#include <linux/bug.h>
#include <linux/jiffies.h>
#include <linux/refcount.h>
+#include <linux/rcuref.h>
#include <net/neighbour.h>
#include <asm/processor.h>
+#include <linux/indirect_call_wrapper.h>
struct sk_buff;
@@ -60,21 +62,35 @@ struct dst_entry {
unsigned short trailer_len; /* space to reserve at tail */
/*
- * __refcnt wants to be on a different cache line from
+ * __rcuref wants to be on a different cache line from
* input/output/ops or performance tanks badly
*/
#ifdef CONFIG_64BIT
- atomic_t __refcnt; /* 64-bit offset 64 */
+ rcuref_t __rcuref; /* 64-bit offset 64 */
#endif
int __use;
unsigned long lastuse;
- struct lwtunnel_state *lwtstate;
struct rcu_head rcu_head;
short error;
short __pad;
__u32 tclassid;
#ifndef CONFIG_64BIT
- atomic_t __refcnt; /* 32-bit offset 64 */
+ struct lwtunnel_state *lwtstate;
+ rcuref_t __rcuref; /* 32-bit offset 64 */
+#endif
+ netdevice_tracker dev_tracker;
+
+ /*
+ * Used by rtable and rt6_info. Moves lwtstate into the next cache
+ * line on 64bit so that lwtstate does not cause false sharing with
+ * __rcuref under contention of __rcuref. This also puts the
+ * frequently accessed members of rtable and rt6_info out of the
+ * __rcuref cache line.
+ */
+ struct list_head rt_uncached;
+ struct uncached_list *rt_uncached_list;
+#ifdef CONFIG_64BIT
+ struct lwtunnel_state *lwtstate;
#endif
};
@@ -193,9 +209,11 @@ dst_feature(const struct dst_entry *dst, u32 feature)
return dst_metric(dst, RTAX_FEATURES) & feature;
}
+INDIRECT_CALLABLE_DECLARE(unsigned int ip6_mtu(const struct dst_entry *));
+INDIRECT_CALLABLE_DECLARE(unsigned int ipv4_mtu(const struct dst_entry *));
static inline u32 dst_mtu(const struct dst_entry *dst)
{
- return dst->ops->mtu(dst);
+ return INDIRECT_CALL_INET(dst->ops->mtu, ip6_mtu, ipv4_mtu, dst);
}
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
@@ -204,27 +222,20 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr
return msecs_to_jiffies(dst_metric(dst, metric));
}
-static inline u32
-dst_allfrag(const struct dst_entry *dst)
-{
- int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
- return ret;
-}
-
static inline int
dst_metric_locked(const struct dst_entry *dst, int metric)
{
- return dst_metric(dst, RTAX_LOCK) & (1<<metric);
+ return dst_metric(dst, RTAX_LOCK) & (1 << metric);
}
static inline void dst_hold(struct dst_entry *dst)
{
/*
* If your kernel compilation stops here, please check
- * the placement of __refcnt in struct dst_entry
+ * the placement of __rcuref in struct dst_entry
*/
- BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
- WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
+ BUILD_BUG_ON(offsetof(struct dst_entry, __rcuref) & 63);
+ WARN_ON(!rcuref_get(&dst->__rcuref));
}
static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
@@ -235,12 +246,6 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
}
}
-static inline void dst_hold_and_use(struct dst_entry *dst, unsigned long time)
-{
- dst_hold(dst);
- dst_use_noref(dst, time);
-}
-
static inline struct dst_entry *dst_clone(struct dst_entry *dst)
{
if (dst)
@@ -274,6 +279,7 @@ static inline void skb_dst_drop(struct sk_buff *skb)
static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
{
+ nskb->slow_gro |= !!refdst;
nskb->_skb_refdst = refdst;
if (!(nskb->_skb_refdst & SKB_DST_NOREF))
dst_clone(skb_dst(nskb));
@@ -293,7 +299,7 @@ static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb
*/
static inline bool dst_hold_safe(struct dst_entry *dst)
{
- return atomic_inc_not_zero(&dst->__refcnt);
+ return rcuref_get(&dst->__rcuref);
}
/**
@@ -313,6 +319,7 @@ static inline bool skb_dst_force(struct sk_buff *skb)
dst = NULL;
skb->_skb_refdst = (unsigned long)dst;
+ skb->slow_gro |= !!dst;
}
return skb->_skb_refdst != 0UL;
@@ -356,9 +363,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
struct net *net)
{
- /* TODO : stats should be SMP safe */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ DEV_STATS_INC(dev, rx_packets);
+ DEV_STATS_ADD(dev, rx_bytes, skb->len);
__skb_tunnel_rx(skb, dev, net);
}
@@ -379,12 +385,11 @@ static inline int dst_discard(struct sk_buff *skb)
{
return dst_discard_out(&init_net, skb->sk, skb);
}
-void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
int initial_obsolete, unsigned short flags);
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
- struct net_device *dev, int initial_ref, int initial_obsolete,
+ struct net_device *dev, int initial_obsolete,
unsigned short flags);
-struct dst_entry *dst_destroy(struct dst_entry *dst);
void dst_dev_put(struct dst_entry *dst);
static inline void dst_confirm(struct dst_entry *dst)
@@ -400,14 +405,12 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co
static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
struct sk_buff *skb)
{
- struct neighbour *n = NULL;
+ struct neighbour *n;
- /* The packets from tunnel devices (eg bareudp) may have only
- * metadata in the dst pointer of skb. Hence a pointer check of
- * neigh_lookup is needed.
- */
- if (dst->ops->neigh_lookup)
- n = dst->ops->neigh_lookup(dst, skb, NULL);
+ if (WARN_ON_ONCE(!dst->ops->neigh_lookup))
+ return NULL;
+
+ n = dst->ops->neigh_lookup(dst, skb, NULL);
return IS_ERR(n) ? NULL : n;
}
@@ -437,22 +440,36 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
dst->expires = expires;
}
+INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
+ struct sk_buff *));
/* Output packet to network from transport. */
static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- return skb_dst(skb)->output(net, sk, skb);
+ return INDIRECT_CALL_INET(skb_dst(skb)->output,
+ ip6_output, ip_output,
+ net, sk, skb);
}
+INDIRECT_CALLABLE_DECLARE(int ip6_input(struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *));
/* Input packet from network to transport. */
static inline int dst_input(struct sk_buff *skb)
{
- return skb_dst(skb)->input(skb);
+ return INDIRECT_CALL_INET(skb_dst(skb)->input,
+ ip6_input, ip_local_deliver, skb);
}
+INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
+ u32));
+INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
+ u32));
static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
{
if (dst->obsolete)
- dst = dst->ops->check(dst, cookie);
+ dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check,
+ ipv4_dst_check, dst, cookie);
return dst;
}
@@ -535,4 +552,15 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
+struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
+void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu, bool confirm_neigh);
+void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
+u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
+struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
+unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
+
#endif /* _NET_DST_H */
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
index 67634675e919..df6622a5fe98 100644
--- a/include/net/dst_cache.h
+++ b/include/net/dst_cache.h
@@ -80,6 +80,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache)
}
/**
+ * dst_cache_reset_now - invalidate the cache contents immediately
+ * @dst_cache: the cache
+ *
+ * The caller must be sure there are no concurrent users, as this frees
+ * all dst_cache users immediately, rather than waiting for the next
+ * per-cpu usage like dst_cache_reset does. Most callers should use the
+ * higher speed lazily-freed dst_cache_reset function instead.
+ */
+void dst_cache_reset_now(struct dst_cache *dst_cache);
+
+/**
* dst_cache_init - initialize the cache, allocating the required storage
* @dst_cache: the cache
* @gfp: allocation flags
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 56cb3c38569a..1b7fae4c6b24 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -4,11 +4,14 @@
#include <linux/skbuff.h>
#include <net/ip_tunnels.h>
+#include <net/macsec.h>
#include <net/dst.h>
enum metadata_type {
METADATA_IP_TUNNEL,
METADATA_HW_PORT_MUX,
+ METADATA_MACSEC,
+ METADATA_XFRM,
};
struct hw_port_info {
@@ -16,12 +19,24 @@ struct hw_port_info {
u32 port_id;
};
+struct macsec_info {
+ sci_t sci;
+};
+
+struct xfrm_md_info {
+ u32 if_id;
+ int link;
+ struct dst_entry *dst_orig;
+};
+
struct metadata_dst {
struct dst_entry dst;
enum metadata_type type;
union {
struct ip_tunnel_info tun_info;
struct hw_port_info port_info;
+ struct macsec_info macsec_info;
+ struct xfrm_md_info xfrm_info;
} u;
};
@@ -45,12 +60,35 @@ skb_tunnel_info(const struct sk_buff *skb)
return &md_dst->u.tun_info;
dst = skb_dst(skb);
- if (dst && dst->lwtstate)
+ if (dst && dst->lwtstate &&
+ (dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
+ dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
return lwt_tun_info(dst->lwtstate);
return NULL;
}
+static inline struct xfrm_md_info *lwt_xfrm_info(struct lwtunnel_state *lwt)
+{
+ return (struct xfrm_md_info *)lwt->data;
+}
+
+static inline struct xfrm_md_info *skb_xfrm_md_info(const struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct dst_entry *dst;
+
+ if (md_dst && md_dst->type == METADATA_XFRM)
+ return &md_dst->u.xfrm_info;
+
+ dst = skb_dst(skb);
+ if (dst && dst->lwtstate &&
+ dst->lwtstate->type == LWTUNNEL_ENCAP_XFRM)
+ return lwt_xfrm_info(dst->lwtstate);
+
+ return NULL;
+}
+
static inline bool skb_valid_dst(const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -80,6 +118,12 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
return memcmp(&a->u.tun_info, &b->u.tun_info,
sizeof(a->u.tun_info) +
a->u.tun_info.options_len);
+ case METADATA_MACSEC:
+ return memcmp(&a->u.macsec_info, &b->u.macsec_info,
+ sizeof(a->u.macsec_info));
+ case METADATA_XFRM:
+ return memcmp(&a->u.xfrm_info, &b->u.xfrm_info,
+ sizeof(a->u.xfrm_info));
default:
return 1;
}
@@ -121,8 +165,20 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
sizeof(struct ip_tunnel_info) + md_size);
+#ifdef CONFIG_DST_CACHE
+ /* Unclone the dst cache if there is one */
+ if (new_md->u.tun_info.dst_cache.cache) {
+ int ret;
+
+ ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
+ if (ret) {
+ metadata_dst_free(new_md);
+ return ERR_PTR(ret);
+ }
+ }
+#endif
+
skb_dst_drop(skb);
- dst_hold(&new_md->dst);
skb_dst_set(skb, &new_md->dst);
return new_md;
}
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 88ff7bb2bb9b..6d1c8541183d 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -16,14 +16,14 @@ struct dst_ops {
unsigned short family;
unsigned int gc_thresh;
- int (*gc)(struct dst_ops *ops);
+ void (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*mtu)(const struct dst_entry *);
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
- struct net_device *dev, int how);
+ struct net_device *dev);
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
diff --git a/include/net/eee.h b/include/net/eee.h
new file mode 100644
index 000000000000..84837aba3cd9
--- /dev/null
+++ b/include/net/eee.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _EEE_H
+#define _EEE_H
+
+#include <linux/types.h>
+
+struct eee_config {
+ u32 tx_lpi_timer;
+ bool tx_lpi_enabled;
+ bool eee_enabled;
+};
+
+static inline bool eeecfg_mac_can_tx_lpi(const struct eee_config *eeecfg)
+{
+ /* eee_enabled is the master on/off */
+ if (!eeecfg->eee_enabled || !eeecfg->tx_lpi_enabled)
+ return false;
+
+ return true;
+}
+
+static inline void eeecfg_to_eee(struct ethtool_keee *eee,
+ const struct eee_config *eeecfg)
+{
+ eee->tx_lpi_timer = eeecfg->tx_lpi_timer;
+ eee->tx_lpi_enabled = eeecfg->tx_lpi_enabled;
+ eee->eee_enabled = eeecfg->eee_enabled;
+}
+
+static inline void eee_to_eeecfg(struct eee_config *eeecfg,
+ const struct ethtool_keee *eee)
+{
+ eeecfg->tx_lpi_timer = eee->tx_lpi_timer;
+ eeecfg->tx_lpi_enabled = eee->tx_lpi_enabled;
+ eeecfg->eee_enabled = eee->eee_enabled;
+}
+
+#endif
diff --git a/include/net/erspan.h b/include/net/erspan.h
index 0d9e86bd9893..6cb4cbd6a48f 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -58,6 +58,9 @@
* GRE proto ERSPAN type I/II = 0x88BE, type III = 0x22EB
*/
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
#include <uapi/linux/erspan.h>
#define ERSPAN_VERSION 0x1 /* ERSPAN type II */
diff --git a/include/net/esp.h b/include/net/esp.h
index 9c5637d41d95..322950727dd0 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -5,6 +5,7 @@
#include <linux/skbuff.h>
struct ip_esp_hdr;
+struct xfrm_state;
static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
{
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 78519ed42ab4..73810f3ca492 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -10,6 +10,9 @@
#ifndef LINUX_NET_ETHOC_H
#define LINUX_NET_ETHOC_H 1
+#include <linux/if.h>
+#include <linux/types.h>
+
struct ethoc_platform_data {
u8 hwaddr[IFHWADDRLEN];
s8 phy_id;
diff --git a/include/net/failover.h b/include/net/failover.h
index bb15438f39c7..f2b42b4b9cd6 100644
--- a/include/net/failover.h
+++ b/include/net/failover.h
@@ -25,6 +25,7 @@ struct failover_ops {
struct failover {
struct list_head list;
struct net_device __rcu *failover_dev;
+ netdevice_tracker dev_tracker;
struct failover_ops __rcu *ops;
};
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 4b10676c69d1..d17855c52ef9 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -69,7 +69,7 @@ struct fib_rules_ops {
int (*action)(struct fib_rule *,
struct flowi *, int,
struct fib_lookup_arg *);
- bool (*suppress)(struct fib_rule *,
+ bool (*suppress)(struct fib_rule *, int,
struct fib_lookup_arg *);
int (*match)(struct fib_rule *,
struct flowi *, int);
@@ -91,7 +91,6 @@ struct fib_rules_ops {
void (*flush_cache)(struct fib_rules_ops *ops);
int nlgroup;
- const struct nla_policy *policy;
struct list_head rules_list;
struct module *owner;
struct net *fro_net;
@@ -103,26 +102,6 @@ struct fib_rule_notifier_info {
struct fib_rule *rule;
};
-#define FRA_GENERIC_POLICY \
- [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 }, \
- [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
- [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
- [FRA_PRIORITY] = { .type = NLA_U32 }, \
- [FRA_FWMARK] = { .type = NLA_U32 }, \
- [FRA_TUN_ID] = { .type = NLA_U64 }, \
- [FRA_FWMASK] = { .type = NLA_U32 }, \
- [FRA_TABLE] = { .type = NLA_U32 }, \
- [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
- [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
- [FRA_GOTO] = { .type = NLA_U32 }, \
- [FRA_L3MDEV] = { .type = NLA_U8 }, \
- [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }, \
- [FRA_PROTOCOL] = { .type = NLA_U8 }, \
- [FRA_IP_PROTO] = { .type = NLA_U8 }, \
- [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, \
- [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }
-
-
static inline void fib_rule_get(struct fib_rule *rule)
{
refcount_inc(&rule->refcnt);
@@ -193,8 +172,7 @@ void fib_rules_unregister(struct fib_rules_ops *);
int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
-int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
- u32 flags);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table);
bool fib_rule_matchall(const struct fib_rule *rule);
int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
struct netlink_ext_ack *extack);
@@ -218,7 +196,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
struct fib_lookup_arg *arg));
INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg));
INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg));
#endif
diff --git a/include/net/firewire.h b/include/net/firewire.h
index 299e5df38552..8fbff8d77865 100644
--- a/include/net/firewire.h
+++ b/include/net/firewire.h
@@ -2,6 +2,8 @@
#ifndef _NET_FIREWIRE_H
#define _NET_FIREWIRE_H
+#include <linux/types.h>
+
/* Pseudo L2 address */
#define FWNET_ALEN 16
union fwnet_hwaddr {
@@ -11,8 +13,7 @@ union fwnet_hwaddr {
__be64 uniq_id; /* EUI-64 */
u8 max_rec; /* max packet size */
u8 sspd; /* max speed */
- __be16 fifo_hi; /* hi 16bits of FIFO addr */
- __be32 fifo_lo; /* lo 32bits of FIFO addr */
+ u8 fifo[6]; /* FIFO addr */
} __packed uc;
};
diff --git a/include/net/flow.h b/include/net/flow.h
index 929d3ca614d0..335bbc52171c 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -8,12 +8,13 @@
#ifndef _NET_FLOW_H
#define _NET_FLOW_H
-#include <linux/socket.h>
#include <linux/in6.h>
#include <linux/atomic.h>
-#include <net/flow_dissector.h>
+#include <linux/container_of.h>
#include <linux/uidgid.h>
+struct flow_keys;
+
/*
* ifindex generation is per-net namespace, and loopback is
* always the 1st device in ns (see net_dev_init), thus any
@@ -29,6 +30,7 @@ struct flowi_tunnel {
struct flowi_common {
int flowic_oif;
int flowic_iif;
+ int flowic_l3mdev;
__u32 flowic_mark;
__u8 flowic_tos;
__u8 flowic_scope;
@@ -36,11 +38,10 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
-#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
kuid_t flowic_uid;
- struct flowi_tunnel flowic_tun_key;
__u32 flowic_multipath_hash;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
@@ -54,12 +55,6 @@ union flowi_uli {
__u8 code;
} icmpt;
- struct {
- __le16 dport;
- __le16 sport;
- } dnports;
-
- __be32 spi;
__be32 gre_key;
struct {
@@ -71,6 +66,7 @@ struct flowi4 {
struct flowi_common __fl_common;
#define flowi4_oif __fl_common.flowic_oif
#define flowi4_iif __fl_common.flowic_iif
+#define flowi4_l3mdev __fl_common.flowic_l3mdev
#define flowi4_mark __fl_common.flowic_mark
#define flowi4_tos __fl_common.flowic_tos
#define flowi4_scope __fl_common.flowic_scope
@@ -90,7 +86,6 @@ struct flowi4 {
#define fl4_dport uli.ports.dport
#define fl4_icmp_type uli.icmpt.type
#define fl4_icmp_code uli.icmpt.code
-#define fl4_ipsec_spi uli.spi
#define fl4_mh_type uli.mht.type
#define fl4_gre_key uli.gre_key
} __attribute__((__aligned__(BITS_PER_LONG/8)));
@@ -104,6 +99,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
{
fl4->flowi4_oif = oif;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
+ fl4->flowi4_l3mdev = 0;
fl4->flowi4_mark = mark;
fl4->flowi4_tos = tos;
fl4->flowi4_scope = scope;
@@ -116,14 +112,14 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
+ fl4->flowi4_multipath_hash = 0;
}
/* Reset some input parameters after previous lookup */
-static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
+static inline void flowi4_update_output(struct flowi4 *fl4, int oif,
__be32 daddr, __be32 saddr)
{
fl4->flowi4_oif = oif;
- fl4->flowi4_tos = tos;
fl4->daddr = daddr;
fl4->saddr = saddr;
}
@@ -133,6 +129,7 @@ struct flowi6 {
struct flowi_common __fl_common;
#define flowi6_oif __fl_common.flowic_oif
#define flowi6_iif __fl_common.flowic_iif
+#define flowi6_l3mdev __fl_common.flowic_l3mdev
#define flowi6_mark __fl_common.flowic_mark
#define flowi6_scope __fl_common.flowic_scope
#define flowi6_proto __fl_common.flowic_proto
@@ -149,36 +146,20 @@ struct flowi6 {
#define fl6_dport uli.ports.dport
#define fl6_icmp_type uli.icmpt.type
#define fl6_icmp_code uli.icmpt.code
-#define fl6_ipsec_spi uli.spi
#define fl6_mh_type uli.mht.type
#define fl6_gre_key uli.gre_key
__u32 mp_hash;
} __attribute__((__aligned__(BITS_PER_LONG/8)));
-struct flowidn {
- struct flowi_common __fl_common;
-#define flowidn_oif __fl_common.flowic_oif
-#define flowidn_iif __fl_common.flowic_iif
-#define flowidn_mark __fl_common.flowic_mark
-#define flowidn_scope __fl_common.flowic_scope
-#define flowidn_proto __fl_common.flowic_proto
-#define flowidn_flags __fl_common.flowic_flags
- __le16 daddr;
- __le16 saddr;
- union flowi_uli uli;
-#define fld_sport uli.ports.sport
-#define fld_dport uli.ports.dport
-} __attribute__((__aligned__(BITS_PER_LONG/8)));
-
struct flowi {
union {
struct flowi_common __fl_common;
struct flowi4 ip4;
struct flowi6 ip6;
- struct flowidn dn;
} u;
#define flowi_oif u.__fl_common.flowic_oif
#define flowi_iif u.__fl_common.flowic_iif
+#define flowi_l3mdev u.__fl_common.flowic_l3mdev
#define flowi_mark u.__fl_common.flowic_mark
#define flowi_tos u.__fl_common.flowic_tos
#define flowi_scope u.__fl_common.flowic_scope
@@ -194,14 +175,19 @@ static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
return container_of(fl4, struct flowi, u.ip4);
}
+static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4)
+{
+ return &(fl4->__fl_common);
+}
+
static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
{
return container_of(fl6, struct flowi, u.ip6);
}
-static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
+static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6)
{
- return container_of(fldn, struct flowi, u.dn);
+ return &(fl6->__fl_common);
}
__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index cc10b10dc3a1..1a7131d6cb0e 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -14,7 +14,9 @@ struct sk_buff;
/**
* struct flow_dissector_key_control:
- * @thoff: Transport header offset
+ * @thoff: Transport header offset
+ * @addr_type: Type of key. One of FLOW_DISSECTOR_KEY_*
+ * @flags: Key flags. Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAGENCAPSULATION)
*/
struct flow_dissector_key_control {
u16 thoff;
@@ -36,8 +38,9 @@ enum flow_dissect_ret {
/**
* struct flow_dissector_key_basic:
- * @n_proto: Network header protocol (eg. IPv4/IPv6)
+ * @n_proto: Network header protocol (eg. IPv4/IPv6)
* @ip_proto: Transport header protocol (eg. TCP/UDP)
+ * @padding: Unused
*/
struct flow_dissector_key_basic {
__be16 n_proto;
@@ -59,6 +62,8 @@ struct flow_dissector_key_vlan {
__be16 vlan_tci;
};
__be16 vlan_tpid;
+ __be16 vlan_eth_type;
+ u16 padding;
};
struct flow_dissector_mpls_lse {
@@ -133,6 +138,7 @@ struct flow_dissector_key_tipc {
* struct flow_dissector_key_addrs:
* @v4addrs: IPv4 addresses
* @v6addrs: IPv6 addresses
+ * @tipckey: TIPC key
*/
struct flow_dissector_key_addrs {
union {
@@ -143,14 +149,12 @@ struct flow_dissector_key_addrs {
};
/**
- * flow_dissector_key_arp:
- * @ports: Operation, source and target addresses for an ARP header
- * for Ethernet hardware addresses and IPv4 protocol addresses
- * sip: Sender IP address
- * tip: Target IP address
- * op: Operation
- * sha: Sender hardware address
- * tpa: Target hardware address
+ * struct flow_dissector_key_arp:
+ * @sip: Sender IP address
+ * @tip: Target IP address
+ * @op: Operation
+ * @sha: Sender hardware address
+ * @tha: Target hardware address
*/
struct flow_dissector_key_arp {
__u32 sip;
@@ -161,10 +165,10 @@ struct flow_dissector_key_arp {
};
/**
- * flow_dissector_key_tp_ports:
- * @ports: port numbers of Transport header
- * src: source port number
- * dst: destination port number
+ * struct flow_dissector_key_ports:
+ * @ports: port numbers of Transport header
+ * @src: source port number
+ * @dst: destination port number
*/
struct flow_dissector_key_ports {
union {
@@ -177,10 +181,26 @@ struct flow_dissector_key_ports {
};
/**
- * flow_dissector_key_icmp:
- * type: ICMP type
- * code: ICMP code
- * id: session identifier
+ * struct flow_dissector_key_ports_range
+ * @tp: port number from packet
+ * @tp_min: min port number in range
+ * @tp_max: max port number in range
+ */
+struct flow_dissector_key_ports_range {
+ union {
+ struct flow_dissector_key_ports tp;
+ struct {
+ struct flow_dissector_key_ports tp_min;
+ struct flow_dissector_key_ports tp_max;
+ };
+ };
+};
+
+/**
+ * struct flow_dissector_key_icmp:
+ * @type: ICMP type
+ * @code: ICMP code
+ * @id: Session identifier
*/
struct flow_dissector_key_icmp {
struct {
@@ -223,10 +243,12 @@ struct flow_dissector_key_ip {
* struct flow_dissector_key_meta:
* @ingress_ifindex: ingress ifindex
* @ingress_iftype: ingress interface type
+ * @l2_miss: packet did not match an L2 entry during forwarding
*/
struct flow_dissector_key_meta {
int ingress_ifindex;
u16 ingress_iftype;
+ u8 l2_miss;
};
/**
@@ -251,6 +273,62 @@ struct flow_dissector_key_hash {
u32 hash;
};
+/**
+ * struct flow_dissector_key_num_of_vlans:
+ * @num_of_vlans: num_of_vlans value
+ */
+struct flow_dissector_key_num_of_vlans {
+ u8 num_of_vlans;
+};
+
+/**
+ * struct flow_dissector_key_pppoe:
+ * @session_id: pppoe session id
+ * @ppp_proto: ppp protocol
+ * @type: pppoe eth type
+ */
+struct flow_dissector_key_pppoe {
+ __be16 session_id;
+ __be16 ppp_proto;
+ __be16 type;
+};
+
+/**
+ * struct flow_dissector_key_l2tpv3:
+ * @session_id: identifier for a l2tp session
+ */
+struct flow_dissector_key_l2tpv3 {
+ __be32 session_id;
+};
+
+/**
+ * struct flow_dissector_key_ipsec:
+ * @spi: identifier for a ipsec connection
+ */
+struct flow_dissector_key_ipsec {
+ __be32 spi;
+};
+
+/**
+ * struct flow_dissector_key_cfm
+ * @mdl_ver: maintenance domain level (mdl) and cfm protocol version
+ * @opcode: code specifying a type of cfm protocol packet
+ *
+ * See 802.1ag, ITU-T G.8013/Y.1731
+ * 1 2
+ * |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mdl | version | opcode |
+ * +-----+---------+-+-+-+-+-+-+-+-+
+ */
+struct flow_dissector_key_cfm {
+ u8 mdl_ver;
+ u8 opcode;
+};
+
+#define FLOW_DIS_CFM_MDL_MASK GENMASK(7, 5)
+#define FLOW_DIS_CFM_MDL_MAX 7
+
enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
@@ -280,6 +358,11 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */
FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */
FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */
+ FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */
+ FLOW_DISSECTOR_KEY_PPPOE, /* struct flow_dissector_key_pppoe */
+ FLOW_DISSECTOR_KEY_L2TPV3, /* struct flow_dissector_key_l2tpv3 */
+ FLOW_DISSECTOR_KEY_CFM, /* struct flow_dissector_key_cfm */
+ FLOW_DISSECTOR_KEY_IPSEC, /* struct flow_dissector_key_ipsec */
FLOW_DISSECTOR_KEY_MAX,
};
@@ -287,6 +370,7 @@ enum flow_dissector_key_id {
#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0)
#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(1)
#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(2)
+#define FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP BIT(3)
struct flow_dissector_key {
enum flow_dissector_key_id key_id;
@@ -295,7 +379,8 @@ struct flow_dissector_key {
};
struct flow_dissector {
- unsigned int used_keys; /* each bit repesents presence of one key id */
+ unsigned long long used_keys;
+ /* each bit represents presence of one key id */
unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
};
@@ -350,12 +435,12 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys)
u32 flow_hash_from_keys(struct flow_keys *keys);
void skb_flow_get_icmp_tci(const struct sk_buff *skb,
struct flow_dissector_key_icmp *key_icmp,
- void *data, int thoff, int hlen);
+ const void *data, int thoff, int hlen);
static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
{
- return flow_dissector->used_keys & (1 << key_id);
+ return flow_dissector->used_keys & (1ULL << key_id);
}
static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissector,
@@ -368,8 +453,8 @@ static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissec
struct bpf_flow_dissector {
struct bpf_flow_keys *flow_keys;
const struct sk_buff *skb;
- void *data;
- void *data_end;
+ const void *data;
+ const void *data_end;
};
static inline void
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 123b1e9ea304..314087a5e181 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -32,6 +32,10 @@ struct flow_match_vlan {
struct flow_dissector_key_vlan *key, *mask;
};
+struct flow_match_arp {
+ struct flow_dissector_key_arp *key, *mask;
+};
+
struct flow_match_ipv4_addrs {
struct flow_dissector_key_ipv4_addrs *key, *mask;
};
@@ -48,6 +52,10 @@ struct flow_match_ports {
struct flow_dissector_key_ports *key, *mask;
};
+struct flow_match_ports_range {
+ struct flow_dissector_key_ports_range *key, *mask;
+};
+
struct flow_match_icmp {
struct flow_dissector_key_icmp *key, *mask;
};
@@ -56,6 +64,10 @@ struct flow_match_tcp {
struct flow_dissector_key_tcp *key, *mask;
};
+struct flow_match_ipsec {
+ struct flow_dissector_key_ipsec *key, *mask;
+};
+
struct flow_match_mpls {
struct flow_dissector_key_mpls *key, *mask;
};
@@ -72,6 +84,14 @@ struct flow_match_ct {
struct flow_dissector_key_ct *key, *mask;
};
+struct flow_match_pppoe {
+ struct flow_dissector_key_pppoe *key, *mask;
+};
+
+struct flow_match_l2tpv3 {
+ struct flow_dissector_key_l2tpv3 *key, *mask;
+};
+
struct flow_rule;
void flow_rule_match_meta(const struct flow_rule *rule,
@@ -86,6 +106,8 @@ void flow_rule_match_vlan(const struct flow_rule *rule,
struct flow_match_vlan *out);
void flow_rule_match_cvlan(const struct flow_rule *rule,
struct flow_match_vlan *out);
+void flow_rule_match_arp(const struct flow_rule *rule,
+ struct flow_match_arp *out);
void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
struct flow_match_ipv4_addrs *out);
void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
@@ -94,8 +116,12 @@ void flow_rule_match_ip(const struct flow_rule *rule,
struct flow_match_ip *out);
void flow_rule_match_ports(const struct flow_rule *rule,
struct flow_match_ports *out);
+void flow_rule_match_ports_range(const struct flow_rule *rule,
+ struct flow_match_ports_range *out);
void flow_rule_match_tcp(const struct flow_rule *rule,
struct flow_match_tcp *out);
+void flow_rule_match_ipsec(const struct flow_rule *rule,
+ struct flow_match_ipsec *out);
void flow_rule_match_icmp(const struct flow_rule *rule,
struct flow_match_icmp *out);
void flow_rule_match_mpls(const struct flow_rule *rule,
@@ -116,6 +142,10 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule,
struct flow_match_enc_opts *out);
void flow_rule_match_ct(const struct flow_rule *rule,
struct flow_match_ct *out);
+void flow_rule_match_pppoe(const struct flow_rule *rule,
+ struct flow_match_pppoe *out);
+void flow_rule_match_l2tpv3(const struct flow_rule *rule,
+ struct flow_match_l2tpv3 *out);
enum flow_action_id {
FLOW_ACTION_ACCEPT = 0,
@@ -137,6 +167,7 @@ enum flow_action_id {
FLOW_ACTION_MARK,
FLOW_ACTION_PTYPE,
FLOW_ACTION_PRIORITY,
+ FLOW_ACTION_RX_QUEUE_MAPPING,
FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE,
@@ -147,6 +178,12 @@ enum flow_action_id {
FLOW_ACTION_MPLS_POP,
FLOW_ACTION_MPLS_MANGLE,
FLOW_ACTION_GATE,
+ FLOW_ACTION_PPPOE_PUSH,
+ FLOW_ACTION_JUMP,
+ FLOW_ACTION_PIPE,
+ FLOW_ACTION_VLAN_PUSH_ETH,
+ FLOW_ACTION_VLAN_POP_ETH,
+ FLOW_ACTION_CONTINUE,
NUM_FLOW_ACTIONS,
};
@@ -196,6 +233,9 @@ void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
struct flow_action_entry {
enum flow_action_id id;
+ u32 hw_index;
+ unsigned long cookie;
+ u64 miss_cookie;
enum flow_action_hw_stats hw_stats;
action_destr destructor;
void *destructor_priv;
@@ -207,6 +247,10 @@ struct flow_action_entry {
__be16 proto;
u8 prio;
} vlan;
+ struct { /* FLOW_ACTION_VLAN_PUSH_ETH */
+ unsigned char dst[ETH_ALEN];
+ unsigned char src[ETH_ALEN];
+ } vlan_push_eth;
struct { /* FLOW_ACTION_MANGLE */
/* FLOW_ACTION_ADD */
enum flow_action_mangle_base htype;
@@ -218,6 +262,7 @@ struct flow_action_entry {
u32 csum_flags; /* FLOW_ACTION_CSUM */
u32 mark; /* FLOW_ACTION_MARK */
u16 ptype; /* FLOW_ACTION_PTYPE */
+ u16 rx_queue; /* FLOW_ACTION_RX_QUEUE_MAPPING */
u32 priority; /* FLOW_ACTION_PRIORITY */
struct { /* FLOW_ACTION_QUEUE */
u32 ctx;
@@ -231,10 +276,18 @@ struct flow_action_entry {
bool truncate;
} sample;
struct { /* FLOW_ACTION_POLICE */
- u32 index;
u32 burst;
u64 rate_bytes_ps;
+ u64 peakrate_bytes_ps;
+ u32 avrate;
+ u16 overhead;
+ u64 burst_pkt;
+ u64 rate_pkt_ps;
u32 mtu;
+ struct {
+ enum flow_action_id act_id;
+ u32 extval;
+ } exceed, notexceed;
} police;
struct { /* FLOW_ACTION_CT */
int action;
@@ -245,6 +298,7 @@ struct flow_action_entry {
unsigned long cookie;
u32 mark;
u32 labels[4];
+ bool orig_dir;
} ct_metadata;
struct { /* FLOW_ACTION_MPLS_PUSH */
u32 label;
@@ -263,7 +317,6 @@ struct flow_action_entry {
u8 ttl;
} mpls_mangle;
struct {
- u32 index;
s32 prio;
u64 basetime;
u64 cycletime;
@@ -271,13 +324,16 @@ struct flow_action_entry {
u32 num_entries;
struct action_gate_entry *entries;
} gate;
+ struct { /* FLOW_ACTION_PPPOE_PUSH */
+ u16 sid;
+ } pppoe;
};
- struct flow_action_cookie *cookie; /* user defined action cookie */
+ struct flow_action_cookie *user_cookie; /* user defined action cookie */
};
struct flow_action {
unsigned int num_entries;
- struct flow_action_entry entries[];
+ struct flow_action_entry entries[] __counted_by(num_entries);
};
static inline bool flow_action_has_entries(const struct flow_action *action)
@@ -286,7 +342,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
}
/**
- * flow_action_has_one_action() - check if exactly one action is present
+ * flow_offload_has_one_action() - check if exactly one action is present
* @action: tc filter flow offload action
*
* Returns true if exactly one action is present.
@@ -296,6 +352,12 @@ static inline bool flow_offload_has_one_action(const struct flow_action *action)
return action->num_entries == 1;
}
+static inline bool flow_action_is_last_entry(const struct flow_action *action,
+ const struct flow_action_entry *entry)
+{
+ return entry == &action->entries[action->num_entries - 1];
+}
+
#define flow_action_for_each(__i, __act, __actions) \
for (__i = 0, __act = &(__actions)->entries[0]; \
__i < (__actions)->num_entries; \
@@ -444,6 +506,7 @@ struct flow_block_offload {
struct list_head *driver_block_list;
struct netlink_ext_ack *extack;
struct Qdisc *sch;
+ struct list_head *cb_list_head;
};
enum tc_setup_type;
@@ -538,12 +601,31 @@ struct flow_cls_common_offload {
struct flow_cls_offload {
struct flow_cls_common_offload common;
enum flow_cls_command command;
+ bool use_act_stats;
unsigned long cookie;
struct flow_rule *rule;
struct flow_stats stats;
u32 classid;
};
+enum offload_act_command {
+ FLOW_ACT_REPLACE,
+ FLOW_ACT_DESTROY,
+ FLOW_ACT_STATS,
+};
+
+struct flow_offload_action {
+ struct netlink_ext_ack *extack; /* NULL in FLOW_ACT_STATS process*/
+ enum offload_act_command command;
+ enum flow_action_id id;
+ u32 index;
+ unsigned long cookie;
+ struct flow_stats stats;
+ struct flow_action action;
+};
+
+struct flow_offload_action *offload_action_alloc(unsigned int num_actions);
+
static inline struct flow_rule *
flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
{
@@ -567,5 +649,6 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb));
+bool flow_indr_dev_exists(void);
#endif /* _NET_FLOW_OFFLOAD_H */
diff --git a/include/net/fou.h b/include/net/fou.h
index 80f56e275b08..824eb4b231fd 100644
--- a/include/net/fou.h
+++ b/include/net/fou.h
@@ -17,4 +17,6 @@ int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, __be16 *sport, int type);
+int register_fou_bpf(void);
+
#endif
diff --git a/include/net/fq.h b/include/net/fq.h
index e39f3f8d5f8a..99fbe4127b95 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -7,6 +7,10 @@
#ifndef __NET_SCHED_FQ_H
#define __NET_SCHED_FQ_H
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
struct fq_tin;
/**
@@ -19,8 +23,6 @@ struct fq_tin;
* @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++
* (deficit round robin) based round robin queuing similar to the one
* found in net/sched/sch_fq_codel.c
- * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of
- * fat flows and efficient head-dropping if packet limit is reached
* @queue: sk_buff queue to hold packets
* @backlog: number of bytes pending in the queue. The number of packets can be
* found in @queue.qlen
@@ -29,7 +31,6 @@ struct fq_tin;
struct fq_flow {
struct fq_tin *tin;
struct list_head flowchain;
- struct list_head backlogchain;
struct sk_buff_head queue;
u32 backlog;
int deficit;
@@ -47,6 +48,8 @@ struct fq_flow {
struct fq_tin {
struct list_head new_flows;
struct list_head old_flows;
+ struct list_head tin_list;
+ struct fq_flow default_flow;
u32 backlog_bytes;
u32 backlog_packets;
u32 overlimit;
@@ -59,14 +62,14 @@ struct fq_tin {
/**
* struct fq - main container for fair queuing purposes
*
- * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient
- * head-dropping when @backlog reaches @limit
* @limit: max number of packets that can be queued across all flows
* @backlog: number of packets queued across all flows
*/
struct fq {
struct fq_flow *flows;
- struct list_head backlogs;
+ unsigned long *flows_bitmap;
+
+ struct list_head tin_backlog;
spinlock_t lock;
u32 flows_cnt;
u32 limit;
@@ -95,9 +98,4 @@ typedef bool fq_skb_filter_t(struct fq *,
struct sk_buff *,
void *);
-typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
- struct fq_tin *,
- int idx,
- struct sk_buff *);
-
#endif
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index e73d74d2fabf..9467e33dfb36 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -11,35 +11,37 @@
/* functions that are embedded into includer */
-static void fq_adjust_removal(struct fq *fq,
- struct fq_flow *flow,
- struct sk_buff *skb)
+
+static void
+__fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets,
+ unsigned int bytes, unsigned int truesize)
{
struct fq_tin *tin = flow->tin;
+ int idx;
- tin->backlog_bytes -= skb->len;
- tin->backlog_packets--;
- flow->backlog -= skb->len;
- fq->backlog--;
- fq->memory_usage -= skb->truesize;
-}
+ tin->backlog_bytes -= bytes;
+ tin->backlog_packets -= packets;
+ flow->backlog -= bytes;
+ fq->backlog -= packets;
+ fq->memory_usage -= truesize;
-static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow)
-{
- struct fq_flow *i;
+ if (flow->backlog)
+ return;
- if (flow->backlog == 0) {
- list_del_init(&flow->backlogchain);
- } else {
- i = flow;
+ if (flow == &tin->default_flow) {
+ list_del_init(&tin->tin_list);
+ return;
+ }
- list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
- if (i->backlog < flow->backlog)
- break;
+ idx = flow - fq->flows;
+ __clear_bit(idx, fq->flows_bitmap);
+}
- list_move_tail(&flow->backlogchain,
- &i->backlogchain);
- }
+static void fq_adjust_removal(struct fq *fq,
+ struct fq_flow *flow,
+ struct sk_buff *skb)
+{
+ __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize);
}
static struct sk_buff *fq_flow_dequeue(struct fq *fq,
@@ -54,11 +56,37 @@ static struct sk_buff *fq_flow_dequeue(struct fq *fq,
return NULL;
fq_adjust_removal(fq, flow, skb);
- fq_rejigger_backlog(fq, flow);
return skb;
}
+static int fq_flow_drop(struct fq *fq, struct fq_flow *flow,
+ fq_skb_free_t free_func)
+{
+ unsigned int packets = 0, bytes = 0, truesize = 0;
+ struct fq_tin *tin = flow->tin;
+ struct sk_buff *skb;
+ int pending;
+
+ lockdep_assert_held(&fq->lock);
+
+ pending = min_t(int, 32, skb_queue_len(&flow->queue) / 2);
+ do {
+ skb = __skb_dequeue(&flow->queue);
+ if (!skb)
+ break;
+
+ packets++;
+ bytes += skb->len;
+ truesize += skb->truesize;
+ free_func(fq, tin, flow, skb);
+ } while (packets < pending);
+
+ __fq_adjust_removal(fq, flow, packets, bytes, truesize);
+
+ return packets;
+}
+
static struct sk_buff *fq_tin_dequeue(struct fq *fq,
struct fq_tin *tin,
fq_tin_dequeue_t dequeue_func)
@@ -115,8 +143,7 @@ static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
static struct fq_flow *fq_flow_classify(struct fq *fq,
struct fq_tin *tin, u32 idx,
- struct sk_buff *skb,
- fq_flow_get_default_t get_default_func)
+ struct sk_buff *skb)
{
struct fq_flow *flow;
@@ -124,7 +151,7 @@ static struct fq_flow *fq_flow_classify(struct fq *fq,
flow = &fq->flows[idx];
if (flow->tin && flow->tin != tin) {
- flow = get_default_func(fq, tin, idx, skb);
+ flow = &tin->default_flow;
tin->collisions++;
fq->collisions++;
}
@@ -135,45 +162,68 @@ static struct fq_flow *fq_flow_classify(struct fq *fq,
return flow;
}
-static void fq_recalc_backlog(struct fq *fq,
- struct fq_tin *tin,
- struct fq_flow *flow)
+static struct fq_flow *fq_find_fattest_flow(struct fq *fq)
{
- struct fq_flow *i;
+ struct fq_tin *tin;
+ struct fq_flow *flow = NULL;
+ u32 len = 0;
+ int i;
- if (list_empty(&flow->backlogchain))
- list_add_tail(&flow->backlogchain, &fq->backlogs);
+ for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) {
+ struct fq_flow *cur = &fq->flows[i];
+ unsigned int cur_len;
- i = flow;
- list_for_each_entry_continue_reverse(i, &fq->backlogs,
- backlogchain)
- if (i->backlog > flow->backlog)
- break;
+ cur_len = cur->backlog;
+ if (cur_len <= len)
+ continue;
+
+ flow = cur;
+ len = cur_len;
+ }
+
+ list_for_each_entry(tin, &fq->tin_backlog, tin_list) {
+ unsigned int cur_len = tin->default_flow.backlog;
+
+ if (cur_len <= len)
+ continue;
+
+ flow = &tin->default_flow;
+ len = cur_len;
+ }
- list_move(&flow->backlogchain, &i->backlogchain);
+ return flow;
}
static void fq_tin_enqueue(struct fq *fq,
struct fq_tin *tin, u32 idx,
struct sk_buff *skb,
- fq_skb_free_t free_func,
- fq_flow_get_default_t get_default_func)
+ fq_skb_free_t free_func)
{
struct fq_flow *flow;
+ struct sk_buff *next;
bool oom;
lockdep_assert_held(&fq->lock);
- flow = fq_flow_classify(fq, tin, idx, skb, get_default_func);
+ flow = fq_flow_classify(fq, tin, idx, skb);
- flow->tin = tin;
- flow->backlog += skb->len;
- tin->backlog_bytes += skb->len;
- tin->backlog_packets++;
- fq->memory_usage += skb->truesize;
- fq->backlog++;
+ if (!flow->backlog) {
+ if (flow != &tin->default_flow)
+ __set_bit(idx, fq->flows_bitmap);
+ else if (list_empty(&tin->tin_list))
+ list_add(&tin->tin_list, &fq->tin_backlog);
+ }
- fq_recalc_backlog(fq, tin, flow);
+ flow->tin = tin;
+ skb_list_walk_safe(skb, skb, next) {
+ skb_mark_not_on_list(skb);
+ flow->backlog += skb->len;
+ tin->backlog_bytes += skb->len;
+ tin->backlog_packets++;
+ fq->memory_usage += skb->truesize;
+ fq->backlog++;
+ __skb_queue_tail(&flow->queue, skb);
+ }
if (list_empty(&flow->flowchain)) {
flow->deficit = fq->quantum;
@@ -181,21 +231,15 @@ static void fq_tin_enqueue(struct fq *fq,
&tin->new_flows);
}
- __skb_queue_tail(&flow->queue, skb);
oom = (fq->memory_usage > fq->memory_limit);
while (fq->backlog > fq->limit || oom) {
- flow = list_first_entry_or_null(&fq->backlogs,
- struct fq_flow,
- backlogchain);
+ flow = fq_find_fattest_flow(fq);
if (!flow)
return;
- skb = fq_flow_dequeue(fq, flow);
- if (!skb)
+ if (!fq_flow_drop(fq, flow, free_func))
return;
- free_func(fq, flow->tin, flow, skb);
-
flow->tin->overlimit++;
fq->overlimit++;
if (oom) {
@@ -224,8 +268,6 @@ static void fq_flow_filter(struct fq *fq,
fq_adjust_removal(fq, flow, skb);
free_func(fq, tin, flow, skb);
}
-
- fq_rejigger_backlog(fq, flow);
}
static void fq_tin_filter(struct fq *fq,
@@ -248,16 +290,18 @@ static void fq_flow_reset(struct fq *fq,
struct fq_flow *flow,
fq_skb_free_t free_func)
{
+ struct fq_tin *tin = flow->tin;
struct sk_buff *skb;
while ((skb = fq_flow_dequeue(fq, flow)))
- free_func(fq, flow->tin, flow, skb);
+ free_func(fq, tin, flow, skb);
- if (!list_empty(&flow->flowchain))
+ if (!list_empty(&flow->flowchain)) {
list_del_init(&flow->flowchain);
-
- if (!list_empty(&flow->backlogchain))
- list_del_init(&flow->backlogchain);
+ if (list_empty(&tin->new_flows) &&
+ list_empty(&tin->old_flows))
+ list_del_init(&tin->tin_list);
+ }
flow->tin = NULL;
@@ -283,6 +327,7 @@ static void fq_tin_reset(struct fq *fq,
fq_flow_reset(fq, flow, free_func);
}
+ WARN_ON_ONCE(!list_empty(&tin->tin_list));
WARN_ON_ONCE(tin->backlog_bytes);
WARN_ON_ONCE(tin->backlog_packets);
}
@@ -290,7 +335,6 @@ static void fq_tin_reset(struct fq *fq,
static void fq_flow_init(struct fq_flow *flow)
{
INIT_LIST_HEAD(&flow->flowchain);
- INIT_LIST_HEAD(&flow->backlogchain);
__skb_queue_head_init(&flow->queue);
}
@@ -298,6 +342,8 @@ static void fq_tin_init(struct fq_tin *tin)
{
INIT_LIST_HEAD(&tin->new_flows);
INIT_LIST_HEAD(&tin->old_flows);
+ INIT_LIST_HEAD(&tin->tin_list);
+ fq_flow_init(&tin->default_flow);
}
static int fq_init(struct fq *fq, int flows_cnt)
@@ -305,8 +351,8 @@ static int fq_init(struct fq *fq, int flows_cnt)
int i;
memset(fq, 0, sizeof(fq[0]));
- INIT_LIST_HEAD(&fq->backlogs);
spin_lock_init(&fq->lock);
+ INIT_LIST_HEAD(&fq->tin_backlog);
fq->flows_cnt = max_t(u32, flows_cnt, 1);
fq->quantum = 300;
fq->limit = 8192;
@@ -316,6 +362,13 @@ static int fq_init(struct fq *fq, int flows_cnt)
if (!fq->flows)
return -ENOMEM;
+ fq->flows_bitmap = bitmap_zalloc(fq->flows_cnt, GFP_KERNEL);
+ if (!fq->flows_bitmap) {
+ kvfree(fq->flows);
+ fq->flows = NULL;
+ return -ENOMEM;
+ }
+
for (i = 0; i < fq->flows_cnt; i++)
fq_flow_init(&fq->flows[i]);
@@ -332,6 +385,9 @@ static void fq_reset(struct fq *fq,
kvfree(fq->flows);
fq->flows = NULL;
+
+ bitmap_free(fq->flows_bitmap);
+ fq->flows_bitmap = NULL;
}
#endif
diff --git a/include/net/garp.h b/include/net/garp.h
index 4d9a0c6a2e5f..59a07b171def 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -2,6 +2,8 @@
#ifndef _NET_GARP_H
#define _NET_GARP_H
+#include <linux/if_ether.h>
+#include <linux/types.h>
#include <net/stp.h>
#define GARP_PROTOCOL_ID 0x1
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 1424e02cef90..7aa2b8e1fb29 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -7,14 +7,17 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
-/* Note: this used to be in include/uapi/linux/gen_stats.h */
-struct gnet_stats_basic_packed {
- __u64 bytes;
- __u64 packets;
-};
-
-struct gnet_stats_basic_cpu {
- struct gnet_stats_basic_packed bstats;
+/* Throughput stats.
+ * Must be initialized beforehand with gnet_stats_basic_sync_init().
+ *
+ * If no reads can ever occur parallel to writes (e.g. stack-allocated
+ * bstats), then the internal stat values can be written to and read
+ * from directly. Otherwise, use _bstats_set/update() for writes and
+ * gnet_stats_add_basic() for reads.
+ */
+struct gnet_stats_basic_sync {
+ u64_stats_t bytes;
+ u64_stats_t packets;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
@@ -34,6 +37,7 @@ struct gnet_dump {
struct tc_stats tc_stats;
};
+void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d, int padattr);
@@ -42,41 +46,38 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d,
int padattr);
-int gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-void __gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-int gnet_stats_copy_basic_hw(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+int gnet_stats_copy_basic_hw(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
-void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
- const struct gnet_stats_queue __percpu *cpu_q,
- const struct gnet_stats_queue *q, __u32 qlen);
+void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu_q,
+ const struct gnet_stats_queue *q);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
-int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
-int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
struct gnet_stats_rate_est64 *sample);
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 6e5f1e1aa822..9ece6e5a3ea8 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -8,20 +8,26 @@
#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN)
+/* Binding to multicast group requires %CAP_NET_ADMIN */
+#define GENL_MCAST_CAP_NET_ADMIN BIT(0)
+/* Binding to multicast group requires %CAP_SYS_ADMIN */
+#define GENL_MCAST_CAP_SYS_ADMIN BIT(1)
+
/**
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
+ * @flags: GENL_MCAST_* flags
*/
struct genl_multicast_group {
char name[GENL_NAMSIZ];
+ u8 flags;
};
-struct genl_ops;
+struct genl_split_ops;
struct genl_info;
/**
* struct genl_family - generic netlink family
- * @id: protocol family identifier (private)
* @hdrsize: length of user specific header in bytes
* @name: name of family
* @version: protocol version
@@ -35,43 +41,79 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @bind: called when family multicast group is added to a netlink socket
+ * @unbind: called when family multicast group is removed from a netlink socket
+ * @module: pointer to the owning module (set to THIS_MODULE)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
- * @mcgrp_offset: starting number of multicast group IDs in this family
- * (private)
+ * @resv_start_op: first operation for which reserved fields of the header
+ * can be validated and policies are required (see below);
+ * new families should leave this field at zero
* @ops: the operations supported by this family
* @n_ops: number of operations supported by this family
+ * @small_ops: the small-struct operations supported by this family
+ * @n_small_ops: number of small-struct operations supported by this family
+ * @split_ops: the split do/dump form of operation definition
+ * @n_split_ops: number of entries in @split_ops, not that with split do/dump
+ * ops the number of entries is not the same as number of commands
+ * @sock_priv_size: the size of per-socket private memory
+ * @sock_priv_init: the per-socket private memory initializer
+ * @sock_priv_destroy: the per-socket private memory destructor
+ *
+ * Attribute policies (the combination of @policy and @maxattr fields)
+ * can be attached at the family level or at the operation level.
+ * If both are present the per-operation policy takes precedence.
+ * For operations before @resv_start_op lack of policy means that the core
+ * will perform no attribute parsing or validation. For newer operations
+ * if policy is not provided core will reject all TLV attributes.
*/
struct genl_family {
- int id; /* private */
unsigned int hdrsize;
char name[GENL_NAMSIZ];
unsigned int version;
unsigned int maxattr;
- bool netnsok;
- bool parallel_ops;
+ u8 netnsok:1;
+ u8 parallel_ops:1;
+ u8 n_ops;
+ u8 n_small_ops;
+ u8 n_split_ops;
+ u8 n_mcgrps;
+ u8 resv_start_op;
const struct nla_policy *policy;
- int (*pre_doit)(const struct genl_ops *ops,
+ int (*pre_doit)(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
- void (*post_doit)(const struct genl_ops *ops,
+ void (*post_doit)(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
+ int (*bind)(int mcgrp);
+ void (*unbind)(int mcgrp);
const struct genl_ops * ops;
+ const struct genl_small_ops *small_ops;
+ const struct genl_split_ops *split_ops;
const struct genl_multicast_group *mcgrps;
- unsigned int n_ops;
- unsigned int n_mcgrps;
- unsigned int mcgrp_offset; /* private */
struct module *module;
+
+ size_t sock_priv_size;
+ void (*sock_priv_init)(void *priv);
+ void (*sock_priv_destroy)(void *priv);
+
+/* private: internal use only */
+ /* protocol family identifier */
+ int id;
+ /* starting number of multicast group IDs in this family */
+ unsigned int mcgrp_offset;
+ /* list of per-socket privs */
+ struct xarray *sock_privs;
};
/**
* struct genl_info - receiving information
* @snd_seq: sending sequence number
* @snd_portid: netlink portid of sender
+ * @family: generic netlink family
* @nlhdr: netlink message header
* @genlhdr: generic netlink message header
- * @userhdr: user specific header
* @attrs: netlink attributes
* @_net: network namespace
* @user_ptr: user pointers
@@ -80,16 +122,16 @@ struct genl_family {
struct genl_info {
u32 snd_seq;
u32 snd_portid;
- struct nlmsghdr * nlhdr;
+ const struct genl_family *family;
+ const struct nlmsghdr * nlhdr;
struct genlmsghdr * genlhdr;
- void * userhdr;
struct nlattr ** attrs;
possible_net_t _net;
void * user_ptr[2];
struct netlink_ext_ack *extack;
};
-static inline struct net *genl_info_net(struct genl_info *info)
+static inline struct net *genl_info_net(const struct genl_info *info)
{
return read_pnet(&info->_net);
}
@@ -99,15 +141,22 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
write_pnet(&info->_net, net);
}
+static inline void *genl_info_userhdr(const struct genl_info *info)
+{
+ return (u8 *)info->genlhdr + GENL_HDRLEN;
+}
+
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
-static inline int genl_err_attr(struct genl_info *info, int err,
- const struct nlattr *attr)
-{
- info->extack->bad_attr = attr;
+#define GENL_SET_ERR_MSG_FMT(info, msg, args...) \
+ NL_SET_ERR_MSG_FMT((info)->extack, msg, ##args)
- return err;
-}
+/* Report that a root attribute is missing */
+#define GENL_REQ_ATTR_CHECK(info, attr) ({ \
+ const struct genl_info *__info = (info); \
+ \
+ NL_REQ_ATTR_CHECK(__info->extack, NULL, __info->attrs, (attr)); \
+})
enum genl_validate_flags {
GENL_DONT_VALIDATE_STRICT = BIT(0),
@@ -116,28 +165,34 @@ enum genl_validate_flags {
};
/**
- * struct genl_info - info that is available during dumpit op call
- * @family: generic netlink family - for internal genl code usage
- * @ops: generic netlink ops - for internal genl code usage
- * @attrs: netlink attributes
+ * struct genl_small_ops - generic netlink operations (small version)
+ * @cmd: command identifier
+ * @internal_flags: flags used by the family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @validate: validation flags from enum genl_validate_flags
+ * @doit: standard command callback
+ * @dumpit: callback for dumpers
+ *
+ * This is a cut-down version of struct genl_ops for users who don't need
+ * most of the ancillary infra and want to save space.
*/
-struct genl_dumpit_info {
- const struct genl_family *family;
- const struct genl_ops *ops;
- struct nlattr **attrs;
+struct genl_small_ops {
+ int (*doit)(struct sk_buff *skb, struct genl_info *info);
+ int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb);
+ u8 cmd;
+ u8 internal_flags;
+ u8 flags;
+ u8 validate;
};
-static inline const struct genl_dumpit_info *
-genl_dumpit_info(struct netlink_callback *cb)
-{
- return cb->data;
-}
-
/**
* struct genl_ops - generic netlink operations
* @cmd: command identifier
* @internal_flags: flags used by the family
- * @flags: flags
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @maxattr: maximum number of attributes supported
+ * @policy: netlink policy (takes precedence over family policy)
+ * @validate: validation flags from enum genl_validate_flags
* @doit: standard command callback
* @start: start callback for dumps
* @dumpit: callback for dumpers
@@ -150,12 +205,117 @@ struct genl_ops {
int (*dumpit)(struct sk_buff *skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
+ const struct nla_policy *policy;
+ unsigned int maxattr;
+ u8 cmd;
+ u8 internal_flags;
+ u8 flags;
+ u8 validate;
+};
+
+/**
+ * struct genl_split_ops - generic netlink operations (do/dump split version)
+ * @cmd: command identifier
+ * @internal_flags: flags used by the family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @validate: validation flags from enum genl_validate_flags
+ * @policy: netlink policy (takes precedence over family policy)
+ * @maxattr: maximum number of attributes supported
+ *
+ * Do callbacks:
+ * @pre_doit: called before an operation's @doit callback, it may
+ * do additional, common, filtering and return an error
+ * @doit: standard command callback
+ * @post_doit: called after an operation's @doit callback, it may
+ * undo operations done by pre_doit, for example release locks
+ *
+ * Dump callbacks:
+ * @start: start callback for dumps
+ * @dumpit: callback for dumpers
+ * @done: completion callback for dumps
+ *
+ * Do callbacks can be used if %GENL_CMD_CAP_DO is set in @flags.
+ * Dump callbacks can be used if %GENL_CMD_CAP_DUMP is set in @flags.
+ * Exactly one of those flags must be set.
+ */
+struct genl_split_ops {
+ union {
+ struct {
+ int (*pre_doit)(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
+ struct genl_info *info);
+ int (*doit)(struct sk_buff *skb,
+ struct genl_info *info);
+ void (*post_doit)(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
+ struct genl_info *info);
+ };
+ struct {
+ int (*start)(struct netlink_callback *cb);
+ int (*dumpit)(struct sk_buff *skb,
+ struct netlink_callback *cb);
+ int (*done)(struct netlink_callback *cb);
+ };
+ };
+ const struct nla_policy *policy;
+ unsigned int maxattr;
u8 cmd;
u8 internal_flags;
u8 flags;
u8 validate;
};
+/**
+ * struct genl_dumpit_info - info that is available during dumpit op call
+ * @op: generic netlink ops - for internal genl code usage
+ * @attrs: netlink attributes
+ * @info: struct genl_info describing the request
+ */
+struct genl_dumpit_info {
+ struct genl_split_ops op;
+ struct genl_info info;
+};
+
+static inline const struct genl_dumpit_info *
+genl_dumpit_info(struct netlink_callback *cb)
+{
+ return cb->data;
+}
+
+static inline const struct genl_info *
+genl_info_dump(struct netlink_callback *cb)
+{
+ return &genl_dumpit_info(cb)->info;
+}
+
+/**
+ * genl_info_init_ntf() - initialize genl_info for notifications
+ * @info: genl_info struct to set up
+ * @family: pointer to the genetlink family
+ * @cmd: command to be used in the notification
+ *
+ * Initialize a locally declared struct genl_info to pass to various APIs.
+ * Intended to be used when creating notifications.
+ */
+static inline void
+genl_info_init_ntf(struct genl_info *info, const struct genl_family *family,
+ u8 cmd)
+{
+ struct genlmsghdr *hdr = (void *) &info->user_ptr[0];
+
+ memset(info, 0, sizeof(*info));
+ info->family = family;
+ info->genlhdr = hdr;
+ hdr->cmd = cmd;
+}
+
+static inline bool genl_info_is_ntf(const struct genl_info *info)
+{
+ return !info->nlhdr;
+}
+
+void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk);
+void *genl_sk_priv_get(struct genl_family *family, struct sock *sk);
int genl_register_family(struct genl_family *family);
int genl_unregister_family(const struct genl_family *family);
void genl_notify(const struct genl_family *family, struct sk_buff *skb,
@@ -164,6 +324,32 @@ void genl_notify(const struct genl_family *family, struct sk_buff *skb,
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
const struct genl_family *family, int flags, u8 cmd);
+static inline void *
+__genlmsg_iput(struct sk_buff *skb, const struct genl_info *info, int flags)
+{
+ return genlmsg_put(skb, info->snd_portid, info->snd_seq, info->family,
+ flags, info->genlhdr->cmd);
+}
+
+/**
+ * genlmsg_iput - start genetlink message based on genl_info
+ * @skb: skb in which message header will be placed
+ * @info: genl_info as provided to do/dump handlers
+ *
+ * Convenience wrapper which starts a genetlink message based on
+ * information in user request. @info should be either the struct passed
+ * by genetlink core to do/dump handlers (when constructing replies to
+ * such requests) or a struct initialized by genl_info_init_ntf()
+ * when constructing notifications.
+ *
+ * Returns pointer to new genetlink header.
+ */
+static inline void *
+genlmsg_iput(struct sk_buff *skb, const struct genl_info *info)
+{
+ return __genlmsg_iput(skb, info, 0);
+}
+
/**
* genlmsg_nlhdr - Obtain netlink header from user specified header
* @user_hdr: user header as returned from genlmsg_put()
@@ -270,6 +456,35 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
}
/**
+ * genlmsg_multicast_netns_filtered - multicast a netlink message
+ * to a specific netns with filter
+ * function
+ * @family: the generic netlink family
+ * @net: the net namespace
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
+ * @flags: allocation flags
+ * @filter: filter function
+ * @filter_data: filter function private data
+ *
+ * Return: 0 on success, negative error code for failure.
+ */
+static inline int
+genlmsg_multicast_netns_filtered(const struct genl_family *family,
+ struct net *net, struct sk_buff *skb,
+ u32 portid, unsigned int group, gfp_t flags,
+ netlink_filter_fn filter,
+ void *filter_data)
+{
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+ group = family->mcgrp_offset + group;
+ return nlmsg_multicast_filtered(net->genl_sock, skb, portid, group,
+ flags, filter, filter_data);
+}
+
+/**
* genlmsg_multicast_netns - multicast a netlink message to a specific netns
* @family: the generic netlink family
* @net: the net namespace
@@ -282,10 +497,8 @@ static inline int genlmsg_multicast_netns(const struct genl_family *family,
struct net *net, struct sk_buff *skb,
u32 portid, unsigned int group, gfp_t flags)
{
- if (WARN_ON_ONCE(group >= family->n_mcgrps))
- return -EINVAL;
- group = family->mcgrp_offset + group;
- return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
+ return genlmsg_multicast_netns_filtered(family, net, skb, portid,
+ group, flags, NULL, NULL);
}
/**
@@ -320,6 +533,7 @@ int genlmsg_multicast_allns(const struct genl_family *family,
/**
* genlmsg_unicast - unicast a netlink message
+ * @net: network namespace to look up @portid in
* @skb: netlink message as socket buffer
* @portid: netlink portid of the destination socket
*/
@@ -339,7 +553,7 @@ static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
}
/**
- * gennlmsg_data - head of message payload
+ * genlmsg_data - head of message payload
* @gnlh: genetlink message header
*/
static inline void *genlmsg_data(const struct genlmsghdr *gnlh)
diff --git a/include/net/geneve.h b/include/net/geneve.h
index bced0b1d9fe4..5c96827a487e 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -59,7 +59,7 @@ struct genevehdr {
__be16 proto_type;
u8 vni[3];
u8 rsvd2;
- struct geneve_opt options[];
+ u8 options[];
};
static inline bool netif_is_geneve(const struct net_device *dev)
diff --git a/include/net/gre.h b/include/net/gre.h
index b60f212c16c6..4e209708b754 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -106,17 +106,6 @@ static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
return flags;
}
-static inline __sum16 gre_checksum(struct sk_buff *skb)
-{
- __wsum csum;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- csum = lco_csum(skb);
- else
- csum = skb_checksum(skb, 0, skb->len, 0);
- return csum_fold(csum);
-}
-
static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
__be16 flags, __be16 proto,
__be32 key, __be32 seq)
@@ -146,7 +135,13 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0;
- *(__sum16 *)ptr = gre_checksum(skb);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ *(__sum16 *)ptr = csum_fold(lco_csum(skb));
+ } else {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = sizeof(*greh);
+ }
}
}
}
diff --git a/include/net/gro.h b/include/net/gro.h
new file mode 100644
index 000000000000..50f1e403dbbb
--- /dev/null
+++ b/include/net/gro.h
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_GRO_H
+#define _NET_GRO_H
+
+#include <linux/indirect_call_wrapper.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/skbuff.h>
+#include <net/udp.h>
+#include <net/hotdata.h>
+
+struct napi_gro_cb {
+ union {
+ struct {
+ /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
+ void *frag0;
+
+ /* Length of frag0. */
+ unsigned int frag0_len;
+ };
+
+ struct {
+ /* used in skb_gro_receive() slow path */
+ struct sk_buff *last;
+
+ /* jiffies when first packet was created/queued */
+ unsigned long age;
+ };
+ };
+
+ /* This indicates where we are processing relative to skb->data. */
+ int data_offset;
+
+ /* This is non-zero if the packet cannot be merged with the new skb. */
+ u16 flush;
+
+ /* Save the IP ID here and check when we get to the transport layer */
+ u16 flush_id;
+
+ /* Number of segments aggregated. */
+ u16 count;
+
+ /* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */
+ u16 proto;
+
+/* Used in napi_gro_cb::free */
+#define NAPI_GRO_FREE 1
+#define NAPI_GRO_FREE_STOLEN_HEAD 2
+ /* portion of the cb set to zero at every gro iteration */
+ struct_group(zeroed,
+
+ /* Start offset for remote checksum offload */
+ u16 gro_remcsum_start;
+
+ /* This is non-zero if the packet may be of the same flow. */
+ u8 same_flow:1;
+
+ /* Used in tunnel GRO receive */
+ u8 encap_mark:1;
+
+ /* GRO checksum is valid */
+ u8 csum_valid:1;
+
+ /* Number of checksums via CHECKSUM_UNNECESSARY */
+ u8 csum_cnt:3;
+
+ /* Free the skb? */
+ u8 free:2;
+
+ /* Used in foo-over-udp, set in udp[46]_gro_receive */
+ u8 is_ipv6:1;
+
+ /* Used in GRE, set in fou/gue_gro_receive */
+ u8 is_fou:1;
+
+ /* Used to determine if flush_id can be ignored */
+ u8 is_atomic:1;
+
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* GRO is done by frag_list pointer chaining. */
+ u8 is_flist:1;
+ );
+
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
+};
+
+#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
+static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
+typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
+ struct sk_buff *);
+static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(sk, head, skb);
+}
+
+static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
+{
+ return NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline unsigned int skb_gro_len(const struct sk_buff *skb)
+{
+ return skb->len - NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
+{
+ NAPI_GRO_CB(skb)->data_offset += len;
+}
+
+static inline void *skb_gro_header_fast(const struct sk_buff *skb,
+ unsigned int offset)
+{
+ return NAPI_GRO_CB(skb)->frag0 + offset;
+}
+
+static inline bool skb_gro_may_pull(const struct sk_buff *skb,
+ unsigned int hlen)
+{
+ return likely(hlen <= NAPI_GRO_CB(skb)->frag0_len);
+}
+
+static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
+{
+ if (!pskb_may_pull(skb, hlen))
+ return NULL;
+
+ return skb->data + offset;
+}
+
+static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
+{
+ void *ptr;
+
+ ptr = skb_gro_header_fast(skb, offset);
+ if (!skb_gro_may_pull(skb, hlen))
+ ptr = skb_gro_header_slow(skb, hlen, offset);
+ return ptr;
+}
+
+static inline void *skb_gro_network_header(const struct sk_buff *skb)
+{
+ if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
+ return skb_gro_header_fast(skb, skb_network_offset(skb));
+
+ return skb_network_header(skb);
+}
+
+static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
+ int proto)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+
+ return csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ skb_gro_len(skb), proto, 0);
+}
+
+static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid)
+ NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
+ wsum_negate(NAPI_GRO_CB(skb)->csum)));
+}
+
+/* GRO checksum functions. These are logical equivalents of the normal
+ * checksum functions (in skbuff.h) except that they operate on the GRO
+ * offsets and fields in sk_buff.
+ */
+
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
+
+static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
+}
+
+static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
+ bool zero_okay,
+ __sum16 check)
+{
+ return ((skb->ip_summed != CHECKSUM_PARTIAL ||
+ skb_checksum_start_offset(skb) <
+ skb_gro_offset(skb)) &&
+ !skb_at_gro_remcsum_start(skb) &&
+ NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ (!zero_okay || check));
+}
+
+static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
+ __wsum psum)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid &&
+ !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
+ return 0;
+
+ NAPI_GRO_CB(skb)->csum = psum;
+
+ return __skb_gro_checksum_complete(skb);
+}
+
+static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
+{
+ if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
+ /* Consume a checksum from CHECKSUM_UNNECESSARY */
+ NAPI_GRO_CB(skb)->csum_cnt--;
+ } else {
+ /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
+ * verified a new top level checksum or an encapsulated one
+ * during GRO. This saves work if we fallback to normal path.
+ */
+ __skb_incr_checksum_unnecessary(skb);
+ }
+}
+
+#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
+ compute_pseudo) \
+({ \
+ __sum16 __ret = 0; \
+ if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
+ __ret = __skb_gro_checksum_validate_complete(skb, \
+ compute_pseudo(skb, proto)); \
+ if (!__ret) \
+ skb_gro_incr_csum_unnecessary(skb); \
+ __ret; \
+})
+
+#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
+
+#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
+ compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
+
+#define skb_gro_checksum_simple_validate(skb) \
+ __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
+
+static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ !NAPI_GRO_CB(skb)->csum_valid);
+}
+
+static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
+ __wsum pseudo)
+{
+ NAPI_GRO_CB(skb)->csum = ~pseudo;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+}
+
+#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
+do { \
+ if (__skb_gro_checksum_convert_check(skb)) \
+ __skb_gro_checksum_convert(skb, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
+struct gro_remcsum {
+ int offset;
+ __wsum delta;
+};
+
+static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
+{
+ grc->offset = 0;
+ grc->delta = 0;
+}
+
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ unsigned int off, size_t hdrlen,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
+{
+ __wsum delta;
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+
+ BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+ if (!nopartial) {
+ NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+ return ptr;
+ }
+
+ ptr = skb_gro_header(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
+
+ delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+ start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+ grc->offset = off + hdrlen + offset;
+ grc->delta = delta;
+
+ return ptr;
+}
+
+static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
+ struct gro_remcsum *grc)
+{
+ void *ptr;
+ size_t plen = grc->offset + sizeof(u16);
+
+ if (!grc->delta)
+ return;
+
+ ptr = skb_gro_header(skb, plen, grc->offset);
+ if (!ptr)
+ return;
+
+ remcsum_unadjust((__sum16 *)ptr, grc->delta);
+}
+
+#ifdef CONFIG_XFRM_OFFLOAD
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS)
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS) {
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+ }
+}
+#else
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+}
+#endif
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
+
+#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
+})
+
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct udphdr *uh, struct sock *sk);
+int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
+
+static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
+{
+ struct udphdr *uh;
+ unsigned int hlen, off;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*uh);
+ uh = skb_gro_header(skb, hlen, off);
+
+ return uh;
+}
+
+static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
+ int proto)
+{
+ const struct ipv6hdr *iph = skb_gro_network_header(skb);
+
+ return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ skb_gro_len(skb), proto, 0));
+}
+
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static inline void gro_normal_list(struct napi_struct *napi)
+{
+ if (!napi->rx_count)
+ return;
+ netif_receive_skb_list_internal(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
+{
+ list_add_tail(&skb->list, &napi->rx_list);
+ napi->rx_count += segs;
+ if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
+ gro_normal_list(napi);
+}
+
+/* This function is the alternative of 'inet_iif' and 'inet_sdif'
+ * functions in case we can not rely on fields of IPCB.
+ *
+ * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
+ * The caller must hold the RCU read lock.
+ */
+static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
+{
+ *iif = inet_iif(skb) ?: skb->dev->ifindex;
+ *sdif = 0;
+
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (netif_is_l3_slave(skb->dev)) {
+ struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
+
+ *sdif = *iif;
+ *iif = master ? master->ifindex : 0;
+ }
+#endif
+}
+
+/* This function is the alternative of 'inet6_iif' and 'inet6_sdif'
+ * functions in case we can not rely on fields of IP6CB.
+ *
+ * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
+ * The caller must hold the RCU read lock.
+ */
+static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
+{
+ /* using skb->dev->ifindex because skb_dst(skb) is not initialized */
+ *iif = skb->dev->ifindex;
+ *sdif = 0;
+
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (netif_is_l3_slave(skb->dev)) {
+ struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
+
+ *sdif = *iif;
+ *iif = master ? master->ifindex : 0;
+ }
+#endif
+}
+
+struct packet_offload *gro_find_receive_by_type(__be16 type);
+struct packet_offload *gro_find_complete_by_type(__be16 type);
+
+#endif /* _NET_GRO_H */
diff --git a/include/net/gso.h b/include/net/gso.h
new file mode 100644
index 000000000000..29975440cad5
--- /dev/null
+++ b/include/net/gso.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_GSO_H
+#define _NET_GSO_H
+
+#include <linux/skbuff.h>
+
+/* Keeps track of mac header offset relative to skb->head.
+ * It is useful for TSO of Tunneling protocol. e.g. GRE.
+ * For non-tunnel skb it points to skb_mac_header() and for
+ * tunnel skb it points to outer mac header.
+ * Keeps track of level of encapsulation of network headers.
+ */
+struct skb_gso_cb {
+ union {
+ int mac_offset;
+ int data_offset;
+ };
+ int encap_level;
+ __wsum csum;
+ __u16 csum_start;
+};
+#define SKB_GSO_CB_OFFSET 32
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
+
+static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
+{
+ return (skb_mac_header(inner_skb) - inner_skb->head) -
+ SKB_GSO_CB(inner_skb)->mac_offset;
+}
+
+static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
+{
+ int new_headroom, headroom;
+ int ret;
+
+ headroom = skb_headroom(skb);
+ ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ new_headroom = skb_headroom(skb);
+ SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
+ return 0;
+}
+
+static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
+{
+ /* Do not update partial checksums if remote checksum is enabled. */
+ if (skb->remcsum_offload)
+ return;
+
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
+}
+
+/* Compute the checksum for a gso segment. First compute the checksum value
+ * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
+ * then add in skb->csum (checksum from csum_start to end of packet).
+ * skb->csum and csum_start are then updated to reflect the checksum of the
+ * resultant packet starting from the transport header-- the resultant checksum
+ * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
+ * header.
+ */
+static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
+{
+ unsigned char *csum_start = skb_transport_header(skb);
+ int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
+ __wsum partial = SKB_GSO_CB(skb)->csum;
+
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
+
+ return csum_fold(csum_partial(csum_start, plen, partial));
+}
+
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, bool tx_path);
+
+static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ return __skb_gso_segment(skb, features, true);
+}
+
+struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, __be16 type);
+
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
+
+bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
+
+bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
+
+static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+ int pulled_hlen, u16 mac_offset,
+ int mac_len)
+{
+ skb->protocol = protocol;
+ skb->encapsulation = 1;
+ skb_push(skb, pulled_hlen);
+ skb_reset_transport_header(skb);
+ skb->mac_header = mac_offset;
+ skb->network_header = skb->mac_header + mac_len;
+ skb->mac_len = mac_len;
+}
+
+#endif /* _NET_GSO_H */
diff --git a/include/net/gtp.h b/include/net/gtp.h
index 0e16ebb2a82d..2a503f035d18 100644
--- a/include/net/gtp.h
+++ b/include/net/gtp.h
@@ -2,13 +2,22 @@
#ifndef _GTP_H_
#define _GTP_H_
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/rtnetlink.h>
+
/* General GTP protocol related definitions. */
#define GTP0_PORT 3386
#define GTP1U_PORT 2152
+/* GTP messages types */
+#define GTP_ECHO_REQ 1 /* Echo Request */
+#define GTP_ECHO_RSP 2 /* Echo Response */
#define GTP_TPDU 255
+#define GTPIE_RECOVERY 14
+
struct gtp0_header { /* According to GSM TS 09.60. */
__u8 flags;
__u8 type;
@@ -27,6 +36,43 @@ struct gtp1_header { /* According to 3GPP TS 29.060. */
__be32 tid;
} __attribute__ ((packed));
+struct gtp1_header_long { /* According to 3GPP TS 29.060. */
+ __u8 flags;
+ __u8 type;
+ __be16 length;
+ __be32 tid;
+ __be16 seq;
+ __u8 npdu;
+ __u8 next;
+} __packed;
+
+/* GTP Information Element */
+struct gtp_ie {
+ __u8 tag;
+ __u8 val;
+} __packed;
+
+struct gtp0_packet {
+ struct gtp0_header gtp0_h;
+ struct gtp_ie ie;
+} __packed;
+
+struct gtp1u_packet {
+ struct gtp1_header_long gtp1u_h;
+ struct gtp_ie ie;
+} __packed;
+
+struct gtp_pdu_session_info { /* According to 3GPP TS 38.415. */
+ u8 pdu_type;
+ u8 qfi;
+};
+
+static inline bool netif_is_gtp(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "gtp");
+}
+
#define GTP1_F_NPDU 0x01
#define GTP1_F_SEQ 0x02
#define GTP1_F_EXTHDR 0x04
diff --git a/include/net/gue.h b/include/net/gue.h
index e42402f180b7..dfca298bec9c 100644
--- a/include/net/gue.h
+++ b/include/net/gue.h
@@ -30,6 +30,9 @@
* may refer to options placed after this field.
*/
+#include <asm/byteorder.h>
+#include <linux/types.h>
+
struct guehdr {
union {
struct {
diff --git a/include/net/handshake.h b/include/net/handshake.h
new file mode 100644
index 000000000000..8ebd4f9ed26e
--- /dev/null
+++ b/include/net/handshake.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Generic netlink HANDSHAKE service.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2023, Oracle and/or its affiliates.
+ */
+
+#ifndef _NET_HANDSHAKE_H
+#define _NET_HANDSHAKE_H
+
+enum {
+ TLS_NO_KEYRING = 0,
+ TLS_NO_PEERID = 0,
+ TLS_NO_CERT = 0,
+ TLS_NO_PRIVKEY = 0,
+};
+
+typedef void (*tls_done_func_t)(void *data, int status,
+ key_serial_t peerid);
+
+struct tls_handshake_args {
+ struct socket *ta_sock;
+ tls_done_func_t ta_done;
+ void *ta_data;
+ const char *ta_peername;
+ unsigned int ta_timeout_ms;
+ key_serial_t ta_keyring;
+ key_serial_t ta_my_cert;
+ key_serial_t ta_my_privkey;
+ unsigned int ta_num_peerids;
+ key_serial_t ta_my_peerids[5];
+};
+
+int tls_client_hello_anon(const struct tls_handshake_args *args, gfp_t flags);
+int tls_client_hello_x509(const struct tls_handshake_args *args, gfp_t flags);
+int tls_client_hello_psk(const struct tls_handshake_args *args, gfp_t flags);
+int tls_server_hello_x509(const struct tls_handshake_args *args, gfp_t flags);
+int tls_server_hello_psk(const struct tls_handshake_args *args, gfp_t flags);
+
+bool tls_handshake_cancel(struct sock *sk);
+void tls_handshake_close(struct socket *sock);
+
+u8 tls_get_record_type(const struct sock *sk, const struct cmsghdr *msg);
+void tls_alert_recv(const struct sock *sk, const struct msghdr *msg,
+ u8 *level, u8 *description);
+
+#endif /* _NET_HANDSHAKE_H */
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
new file mode 100644
index 000000000000..003667a1efd6
--- /dev/null
+++ b/include/net/hotdata.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_HOTDATA_H
+#define _NET_HOTDATA_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <net/protocol.h>
+
+/* Read mostly data used in network fast paths. */
+struct net_hotdata {
+#if IS_ENABLED(CONFIG_INET)
+ struct packet_offload ip_packet_offload;
+ struct net_offload tcpv4_offload;
+ struct net_protocol tcp_protocol;
+ struct net_offload udpv4_offload;
+ struct net_protocol udp_protocol;
+ struct packet_offload ipv6_packet_offload;
+ struct net_offload tcpv6_offload;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_protocol tcpv6_protocol;
+ struct inet6_protocol udpv6_protocol;
+#endif
+ struct net_offload udpv6_offload;
+#endif
+ struct list_head offload_base;
+ struct list_head ptype_all;
+ struct kmem_cache *skbuff_cache;
+ struct kmem_cache *skbuff_fclone_cache;
+ struct kmem_cache *skb_small_head_cache;
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+ u32 rps_cpu_mask;
+#endif
+ int gro_normal_batch;
+ int netdev_budget;
+ int netdev_budget_usecs;
+ int tstamp_prequeue;
+ int max_backlog;
+ int dev_tx_weight;
+ int dev_rx_weight;
+};
+
+#define inet_ehash_secret net_hotdata.tcp_protocol.secret
+#define udp_ehash_secret net_hotdata.udp_protocol.secret
+#define inet6_ehash_secret net_hotdata.tcpv6_protocol.secret
+#define tcp_ipv6_hash_secret net_hotdata.tcpv6_offload.secret
+#define udp6_ehash_secret net_hotdata.udpv6_protocol.secret
+#define udp_ipv6_hash_secret net_hotdata.udpv6_offload.secret
+
+extern struct net_hotdata net_hotdata;
+
+#endif /* _NET_HOTDATA_H */
diff --git a/include/net/hwbm.h b/include/net/hwbm.h
index c81444611a22..aa495decec35 100644
--- a/include/net/hwbm.h
+++ b/include/net/hwbm.h
@@ -2,6 +2,8 @@
#ifndef _HWBM_H
#define _HWBM_H
+#include <linux/mutex.h>
+
struct hwbm_pool {
/* Capacity of the pool */
int size;
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 9ac2d2672a93..caddf4a59ad1 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -46,12 +46,17 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
#if IS_ENABLED(CONFIG_NF_NAT)
void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
#else
-#define icmp_ndo_send icmp_send
+static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ struct ip_options opts = { 0 };
+ __icmp_send(skb_in, type, code, info, &opts);
+}
#endif
int icmp_rcv(struct sk_buff *skb);
int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
void icmp_out_count(struct net *net, unsigned char type);
+bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr);
#endif /* _ICMP_H */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 19c00d100096..925bac726a92 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2017 Intel Deutschland GmbH
- * Copyright (c) 2018-2019 Intel Corporation
+ * Copyright (c) 2018-2019, 2021-2022 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -21,7 +21,7 @@
#include <asm/unaligned.h>
/**
- * struct ieee82011_radiotap_header - base radiotap header
+ * struct ieee80211_radiotap_header - base radiotap header
*/
struct ieee80211_radiotap_header {
/**
@@ -43,6 +43,11 @@ struct ieee80211_radiotap_header {
* @it_present: (first) present word
*/
__le32 it_present;
+
+ /**
+ * @it_optional: all remaining presence bitmaps
+ */
+ __le32 it_optional[];
} __packed;
/* version is always 0 */
@@ -77,11 +82,14 @@ enum ieee80211_radiotap_presence {
IEEE80211_RADIOTAP_HE_MU = 24,
IEEE80211_RADIOTAP_ZERO_LEN_PSDU = 26,
IEEE80211_RADIOTAP_LSIG = 27,
+ IEEE80211_RADIOTAP_TLV = 28,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
- IEEE80211_RADIOTAP_EXT = 31
+ IEEE80211_RADIOTAP_EXT = 31,
+ IEEE80211_RADIOTAP_EHT_USIG = 33,
+ IEEE80211_RADIOTAP_EHT = 34,
};
/* for IEEE80211_RADIOTAP_FLAGS */
@@ -118,6 +126,7 @@ enum ieee80211_radiotap_tx_flags {
IEEE80211_RADIOTAP_F_TX_RTS = 0x0004,
IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008,
IEEE80211_RADIOTAP_F_TX_NOSEQNO = 0x0010,
+ IEEE80211_RADIOTAP_F_TX_ORDER = 0x0020,
};
/* for IEEE80211_RADIOTAP_MCS "have" flags */
@@ -354,12 +363,229 @@ enum ieee80211_radiotap_zero_len_psdu_type {
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR = 0xff,
};
+struct ieee80211_radiotap_tlv {
+ __le16 type;
+ __le16 len;
+ u8 data[];
+} __packed;
+
+/**
+ * struct ieee80211_radiotap_vendor_content - radiotap vendor data content
+ * @oui: radiotap vendor namespace OUI
+ * @oui_subtype: radiotap vendor sub namespace
+ * @vendor_type: radiotap vendor type
+ * @reserved: should always be set to zero (to avoid leaking memory)
+ * @data: the actual vendor namespace data
+ */
+struct ieee80211_radiotap_vendor_content {
+ u8 oui[3];
+ u8 oui_subtype;
+ __le16 vendor_type;
+ __le16 reserved;
+ u8 data[];
+} __packed;
+
+/**
+ * struct ieee80211_radiotap_vendor_tlv - vendor radiotap data information
+ * @type: should always be set to IEEE80211_RADIOTAP_VENDOR_NAMESPACE
+ * @len: length of data
+ * @content: vendor content see @ieee80211_radiotap_vendor_content
+ */
+struct ieee80211_radiotap_vendor_tlv {
+ __le16 type; /* IEEE80211_RADIOTAP_VENDOR_NAMESPACE */
+ __le16 len;
+ struct ieee80211_radiotap_vendor_content content;
+};
+
+/* ieee80211_radiotap_eht_usig - content of U-SIG tlv (type 33)
+ * see www.radiotap.org/fields/U-SIG.html for details
+ */
+struct ieee80211_radiotap_eht_usig {
+ __le32 common;
+ __le32 value;
+ __le32 mask;
+} __packed;
+
+/* ieee80211_radiotap_eht - content of EHT tlv (type 34)
+ * see www.radiotap.org/fields/EHT.html for details
+ */
+struct ieee80211_radiotap_eht {
+ __le32 known;
+ __le32 data[9];
+ __le32 user_info[];
+} __packed;
+
+/* Known field for EHT TLV
+ * The ending defines for what the field applies as following
+ * O - OFDMA (including TB), M - MU-MIMO, S - EHT sounding.
+ */
+enum ieee80211_radiotap_eht_known {
+ IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE = 0x00000002,
+ IEEE80211_RADIOTAP_EHT_KNOWN_GI = 0x00000004,
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF = 0x00000010,
+ IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM = 0x00000020,
+ IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM = 0x00000040,
+ IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM = 0x00000080,
+ IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O = 0x00000100,
+ IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S = 0x00000200,
+ IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 = 0x00002000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1 = 0x00004000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_CRC2_O = 0x00008000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_TAIL2_O = 0x00010000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S = 0x00020000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S = 0x00040000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M = 0x00080000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_ENCODING_BLOCK_CRC_M = 0x00100000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_ENCODING_BLOCK_TAIL_M = 0x00200000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM = 0x00400000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM = 0x00800000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT = 0x01000000,
+ IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80 = 0x02000000,
+};
+
+enum ieee80211_radiotap_eht_data {
+ /* Data 0 */
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE = 0x00000078,
+ IEEE80211_RADIOTAP_EHT_DATA0_GI = 0x00000180,
+ IEEE80211_RADIOTAP_EHT_DATA0_LTF = 0x00000600,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF = 0x00003800,
+ IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM = 0x00004000,
+ IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM = 0x00018000,
+ IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM = 0x00020000,
+ IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S = 0x000c0000,
+ IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O = 0x003c0000,
+ IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O = 0x03c00000,
+ IEEE80211_RADIOTAP_EHT_DATA0_TAIL1_O = 0xfc000000,
+ /* Data 1 */
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE = 0x0000001f,
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX = 0x00001fe0,
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1 = 0x003fe000,
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN = 0x00400000,
+ IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80 = 0xc0000000,
+ /* Data 2 */
+ IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_2_1_1 = 0x000001ff,
+ IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_2_1_1_KNOWN = 0x00000200,
+ IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2 = 0x0007fc00,
+ IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN = 0x00080000,
+ IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_2_1_2 = 0x1ff00000,
+ IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_2_1_2_KNOWN = 0x20000000,
+ /* Data 3 */
+ IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1 = 0x000001ff,
+ IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN = 0x00000200,
+ IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_2_2_1 = 0x0007fc00,
+ IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_2_2_1_KNOWN = 0x00080000,
+ IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2 = 0x1ff00000,
+ IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN = 0x20000000,
+ /* Data 4 */
+ IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_2 = 0x000001ff,
+ IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_2_KNOWN = 0x00000200,
+ IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3 = 0x0007fc00,
+ IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN = 0x00080000,
+ IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_3 = 0x1ff00000,
+ IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_3_KNOWN = 0x20000000,
+ /* Data 5 */
+ IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4 = 0x000001ff,
+ IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN = 0x00000200,
+ IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_2_2_4 = 0x0007fc00,
+ IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_2_2_4_KNOWN = 0x00080000,
+ IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5 = 0x1ff00000,
+ IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN = 0x20000000,
+ /* Data 6 */
+ IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_5 = 0x000001ff,
+ IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_5_KNOWN = 0x00000200,
+ IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6 = 0x0007fc00,
+ IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN = 0x00080000,
+ IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_6 = 0x1ff00000,
+ IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_6_KNOWN = 0x20000000,
+ /* Data 7 */
+ IEEE80211_RADIOTAP_EHT_DATA7_CRC2_O = 0x0000000f,
+ IEEE80211_RADIOTAP_EHT_DATA7_TAIL_2_O = 0x000003f0,
+ IEEE80211_RADIOTAP_EHT_DATA7_NSS_S = 0x0000f000,
+ IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S = 0x00010000,
+ IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS = 0x000e0000,
+ IEEE80211_RADIOTAP_EHT_DATA7_USER_ENCODING_BLOCK_CRC = 0x00f00000,
+ IEEE80211_RADIOTAP_EHT_DATA7_USER_ENCODING_BLOCK_TAIL = 0x3f000000,
+ /* Data 8 */
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160 = 0x00000001,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0 = 0x00000002,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1 = 0x000001fc,
+};
+
+enum ieee80211_radiotap_eht_user_info {
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN = 0x00000001,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN = 0x00000002,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN = 0x00000004,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O = 0x00000010,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O = 0x00000020,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M = 0x00000040,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER = 0x00000080,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID = 0x0007ff00,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING = 0x00080000,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS = 0x00f00000,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O = 0x0f000000,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O = 0x20000000,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M = 0x3f000000,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_RESEVED_c0000000 = 0xc0000000,
+};
+
+enum ieee80211_radiotap_eht_usig_common {
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN = 0x00000001,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN = 0x00000002,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN = 0x00000004,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN = 0x00000008,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN = 0x00000010,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC = 0x00000020,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED = 0x00000040,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK = 0x00000080,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER = 0x00007000,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW = 0x00038000,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ = 0,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ = 1,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ = 2,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ = 3,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1 = 4,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_2 = 5,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL = 0x00040000,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR = 0x01f80000,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP = 0xfe000000,
+};
+
+enum ieee80211_radiotap_eht_usig_mu {
+ /* MU-USIG-1 */
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD = 0x0000001f,
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE = 0x00000020,
+ /* MU-USIG-2 */
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE = 0x000000c0,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE = 0x00000100,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO = 0x00003e00,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE = 0x00004000,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS = 0x00018000,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS = 0x003e0000,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC = 0x03c00000,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL = 0xfc000000,
+};
+
+enum ieee80211_radiotap_eht_usig_tb {
+ /* TB-USIG-1 */
+ IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD = 0x0000001f,
+
+ /* TB-USIG-2 */
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE = 0x000000c0,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE = 0x00000100,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1 = 0x00001e00,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2 = 0x0001e000,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD = 0x003e0000,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC = 0x03c00000,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL = 0xfc000000,
+};
+
/**
* ieee80211_get_radiotap_len - get radiotap header length
+ * @data: pointer to the header
*/
static inline u16 ieee80211_get_radiotap_len(const char *data)
{
- struct ieee80211_radiotap_header *hdr = (void *)data;
+ const struct ieee80211_radiotap_header *hdr = (const void *)data;
return get_unaligned_le16(&hdr->it_len);
}
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index d0d188c3294b..4de858f9929e 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -15,6 +15,22 @@
#ifndef IEEE802154_NETDEVICE_H
#define IEEE802154_NETDEVICE_H
+#define IEEE802154_REQUIRED_SIZE(struct_type, member) \
+ (offsetof(typeof(struct_type), member) + \
+ sizeof(((typeof(struct_type) *)(NULL))->member))
+
+#define IEEE802154_ADDR_OFFSET \
+ offsetof(typeof(struct sockaddr_ieee802154), addr)
+
+#define IEEE802154_MIN_NAMELEN (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, addr_type))
+
+#define IEEE802154_NAMELEN_SHORT (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, short_addr))
+
+#define IEEE802154_NAMELEN_LONG (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, hwaddr))
+
#include <net/af_ieee802154.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
@@ -22,6 +38,46 @@
#include <net/cfg802154.h>
+struct ieee802154_beacon_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 beacon_order:4,
+ superframe_order:4,
+ final_cap_slot:4,
+ battery_life_ext:1,
+ reserved0:1,
+ pan_coordinator:1,
+ assoc_permit:1;
+ u8 gts_count:3,
+ gts_reserved:4,
+ gts_permit:1;
+ u8 pend_short_addr_count:3,
+ reserved1:1,
+ pend_ext_addr_count:3,
+ reserved2:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 assoc_permit:1,
+ pan_coordinator:1,
+ reserved0:1,
+ battery_life_ext:1,
+ final_cap_slot:4,
+ superframe_order:4,
+ beacon_order:4;
+ u8 gts_permit:1,
+ gts_reserved:4,
+ gts_count:3;
+ u8 reserved2:1,
+ pend_ext_addr_count:3,
+ reserved1:1,
+ pend_short_addr_count:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct ieee802154_mac_cmd_pl {
+ u8 cmd_id;
+} __packed;
+
struct ieee802154_sechdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 level:3,
@@ -69,6 +125,63 @@ struct ieee802154_hdr_fc {
#endif
};
+struct ieee802154_assoc_req_pl {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 reserved1:1,
+ device_type:1,
+ power_source:1,
+ rx_on_when_idle:1,
+ assoc_type:1,
+ reserved2:1,
+ security_cap:1,
+ alloc_addr:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 alloc_addr:1,
+ security_cap:1,
+ reserved2:1,
+ assoc_type:1,
+ rx_on_when_idle:1,
+ power_source:1,
+ device_type:1,
+ reserved1:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct ieee802154_assoc_resp_pl {
+ __le16 short_addr;
+ u8 status;
+} __packed;
+
+enum ieee802154_frame_version {
+ IEEE802154_2003_STD,
+ IEEE802154_2006_STD,
+ IEEE802154_STD,
+ IEEE802154_RESERVED_STD,
+ IEEE802154_MULTIPURPOSE_STD = IEEE802154_2003_STD,
+};
+
+enum ieee802154_addressing_mode {
+ IEEE802154_NO_ADDRESSING,
+ IEEE802154_RESERVED,
+ IEEE802154_SHORT_ADDRESSING,
+ IEEE802154_EXTENDED_ADDRESSING,
+};
+
+enum ieee802154_association_status {
+ IEEE802154_ASSOCIATION_SUCCESSFUL = 0x00,
+ IEEE802154_PAN_AT_CAPACITY = 0x01,
+ IEEE802154_PAN_ACCESS_DENIED = 0x02,
+ IEEE802154_HOPPING_SEQUENCE_OFFSET_DUP = 0x03,
+ IEEE802154_FAST_ASSOCIATION_SUCCESSFUL = 0x80,
+};
+
+enum ieee802154_disassociation_reason {
+ IEEE802154_COORD_WISHES_DEVICE_TO_LEAVE = 0x1,
+ IEEE802154_DEVICE_WISHES_TO_LEAVE = 0x2,
+};
+
struct ieee802154_hdr {
struct ieee802154_hdr_fc fc;
u8 seq;
@@ -77,6 +190,39 @@ struct ieee802154_hdr {
struct ieee802154_sechdr sec;
};
+struct ieee802154_beacon_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_beacon_hdr mac_pl;
+};
+
+struct ieee802154_mac_cmd_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+};
+
+struct ieee802154_beacon_req_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+};
+
+struct ieee802154_association_req_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+ struct ieee802154_assoc_req_pl assoc_req_pl;
+};
+
+struct ieee802154_association_resp_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+ struct ieee802154_assoc_resp_pl assoc_resp_pl;
+};
+
+struct ieee802154_disassociation_notif_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+ u8 disassoc_pl;
+};
+
/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from
* the contents of hdr will be, and the actual value of those bits in
* hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
@@ -102,6 +248,14 @@ int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
*/
int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
+/* pushes/pulls various frame types into/from an skb */
+int ieee802154_beacon_push(struct sk_buff *skb,
+ struct ieee802154_beacon_frame *beacon);
+int ieee802154_mac_cmd_push(struct sk_buff *skb, void *frame,
+ const void *pl, unsigned int pl_len);
+int ieee802154_mac_cmd_pl_pull(struct sk_buff *skb,
+ struct ieee802154_mac_cmd_pl *mac_pl);
+
int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
static inline int
@@ -165,6 +319,33 @@ static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr)
memcpy(raw, &temp, IEEE802154_ADDR_LEN);
}
+static inline int
+ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len)
+{
+ struct ieee802154_addr_sa *sa;
+ int ret = 0;
+
+ sa = &daddr->addr;
+ if (len < IEEE802154_MIN_NAMELEN)
+ return -EINVAL;
+ switch (sa->addr_type) {
+ case IEEE802154_ADDR_NONE:
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (len < IEEE802154_NAMELEN_SHORT)
+ ret = -EINVAL;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (len < IEEE802154_NAMELEN_LONG)
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
const struct ieee802154_addr_sa *sa)
{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 8bf5906073bc..238ad3349456 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -22,10 +22,6 @@
#define IF_RS_SENT 0x10
#define IF_READY 0x80000000
-/* prefix flags */
-#define IF_PREFIX_ONLINK 0x01
-#define IF_PREFIX_AUTOCONF 0x02
-
enum {
INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
@@ -64,6 +60,14 @@ struct inet6_ifaddr {
struct hlist_node addr_lst;
struct list_head if_list;
+ /*
+ * Used to safely traverse idev->addr_list in process context
+ * if the idev->lock needed to protect idev->addr_list cannot be held.
+ * In that case, add the items to this list temporarily and iterate
+ * without holding idev->lock.
+ * See addrconf_ifdown and dev_forward_change.
+ */
+ struct list_head if_list_aux;
struct list_head tmp_list;
struct inet6_ifaddr *ifpub;
@@ -71,6 +75,8 @@ struct inet6_ifaddr {
bool tokenized;
+ u8 ifa_proto;
+
struct rcu_head rcu;
struct in6_addr peer_addr;
};
@@ -78,12 +84,10 @@ struct inet6_ifaddr {
struct ip6_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
- struct in6_addr sl_addr[];
+ struct rcu_head rcu;
+ struct in6_addr sl_addr[] __counted_by(sl_max);
};
-#define IP6_SFLSIZE(count) (sizeof(struct ip6_sf_socklist) + \
- (count) * sizeof(struct in6_addr))
-
#define IP6_SFBLOCK 10 /* allocate this many at once */
struct ipv6_mc_socklist {
@@ -91,18 +95,18 @@ struct ipv6_mc_socklist {
int ifindex;
unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
struct ipv6_mc_socklist __rcu *next;
- rwlock_t sflock;
- struct ip6_sf_socklist *sflist;
+ struct ip6_sf_socklist __rcu *sflist;
struct rcu_head rcu;
};
struct ip6_sf_list {
- struct ip6_sf_list *sf_next;
+ struct ip6_sf_list __rcu *sf_next;
struct in6_addr sf_addr;
unsigned long sf_count[2]; /* include/exclude counts */
unsigned char sf_gsresp; /* include in g & s response? */
unsigned char sf_oldin; /* change state */
unsigned char sf_crcount; /* retrans. left to send */
+ struct rcu_head rcu;
};
#define MAF_TIMER_RUNNING 0x01
@@ -114,19 +118,19 @@ struct ip6_sf_list {
struct ifmcaddr6 {
struct in6_addr mca_addr;
struct inet6_dev *idev;
- struct ifmcaddr6 *next;
- struct ip6_sf_list *mca_sources;
- struct ip6_sf_list *mca_tomb;
+ struct ifmcaddr6 __rcu *next;
+ struct ip6_sf_list __rcu *mca_sources;
+ struct ip6_sf_list __rcu *mca_tomb;
unsigned int mca_sfmode;
unsigned char mca_crcount;
unsigned long mca_sfcount[2];
- struct timer_list mca_timer;
+ struct delayed_work mca_work;
unsigned int mca_flags;
int mca_users;
refcount_t mca_refcnt;
- spinlock_t mca_lock;
unsigned long mca_cstamp;
unsigned long mca_tstamp;
+ struct rcu_head rcu;
};
/* Anycast stuff */
@@ -140,7 +144,7 @@ struct ipv6_ac_socklist {
struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
- struct ifacaddr6 *aca_next;
+ struct ifacaddr6 __rcu *aca_next;
struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
@@ -162,12 +166,12 @@ struct ipv6_devstat {
struct inet6_dev {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
struct list_head addr_list;
- struct ifmcaddr6 *mc_list;
- struct ifmcaddr6 *mc_tomb;
- spinlock_t mc_lock;
+ struct ifmcaddr6 __rcu *mc_list;
+ struct ifmcaddr6 __rcu *mc_tomb;
unsigned char mc_qrv; /* Query Robustness Variable */
unsigned char mc_gq_running;
@@ -179,11 +183,20 @@ struct inet6_dev {
unsigned long mc_qri; /* Query Response Interval */
unsigned long mc_maxdelay;
- struct timer_list mc_gq_timer; /* general query timer */
- struct timer_list mc_ifc_timer; /* interface change timer */
- struct timer_list mc_dad_timer; /* dad complete mc timer */
+ struct delayed_work mc_gq_work; /* general query work */
+ struct delayed_work mc_ifc_work; /* interface change work */
+ struct delayed_work mc_dad_work; /* dad complete mc work */
+ struct delayed_work mc_query_work; /* mld query work */
+ struct delayed_work mc_report_work; /* mld report work */
+
+ struct sk_buff_head mc_query_queue; /* mld query queue */
+ struct sk_buff_head mc_report_queue; /* mld report queue */
- struct ifacaddr6 *ac_list;
+ spinlock_t mc_query_lock; /* mld query queue lock */
+ spinlock_t mc_report_lock; /* mld query report lock */
+ struct mutex mc_lock; /* mld global lock */
+
+ struct ifacaddr6 __rcu *ac_list;
rwlock_t lock;
refcount_t refcnt;
__u32 if_flags;
@@ -204,6 +217,8 @@ struct inet6_dev {
unsigned long tstamp; /* ipv6InterfaceTable update timestamp */
struct rcu_head rcu;
+
+ unsigned int ra_mtu;
};
static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf)
diff --git a/include/net/ila.h b/include/net/ila.h
deleted file mode 100644
index f98dcd5791b0..000000000000
--- a/include/net/ila.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * ILA kernel interface
- *
- * Copyright (c) 2015 Tom Herbert <tom@herbertland.com>
- */
-
-#ifndef _NET_ILA_H
-#define _NET_ILA_H
-
-int ila_xlat_outgoing(struct sk_buff *skb);
-int ila_xlat_incoming(struct sk_buff *skb);
-
-#endif /* _NET_ILA_H */
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 7392f959a405..025bd8d3c769 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -11,6 +11,8 @@
#include <linux/types.h>
+struct flowi;
+struct flowi6;
struct request_sock;
struct sk_buff;
struct sock;
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 81b965953036..533a7337865a 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -48,6 +48,22 @@ struct sock *__inet6_lookup_established(struct net *net,
const u16 hnum, const int dif,
const int sdif);
+typedef u32 (inet6_ehashfn_t)(const struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport);
+
+inet6_ehashfn_t inet6_ehashfn;
+
+INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn);
+
+struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
+ struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr,
+ __be16 sport,
+ const struct in6_addr *daddr,
+ unsigned short hnum,
+ inet6_ehashfn_t *ehashfn);
+
struct sock *inet6_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
@@ -57,6 +73,15 @@ struct sock *inet6_lookup_listener(struct net *net,
const unsigned short hnum,
const int dif, const int sdif);
+struct sock *inet6_lookup_run_sk_lookup(struct net *net,
+ int protocol,
+ struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 hnum, const int dif,
+ inet6_ehashfn_t *ehashfn);
+
static inline struct sock *__inet6_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
@@ -78,6 +103,46 @@ static inline struct sock *__inet6_lookup(struct net *net,
daddr, hnum, dif, sdif);
}
+static inline
+struct sock *inet6_steal_sock(struct net *net, struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr, const __be16 sport,
+ const struct in6_addr *daddr, const __be16 dport,
+ bool *refcounted, inet6_ehashfn_t *ehashfn)
+{
+ struct sock *sk, *reuse_sk;
+ bool prefetched;
+
+ sk = skb_steal_sock(skb, refcounted, &prefetched);
+ if (!sk)
+ return NULL;
+
+ if (!prefetched || !sk_fullsock(sk))
+ return sk;
+
+ if (sk->sk_protocol == IPPROTO_TCP) {
+ if (sk->sk_state != TCP_LISTEN)
+ return sk;
+ } else if (sk->sk_protocol == IPPROTO_UDP) {
+ if (sk->sk_state != TCP_CLOSE)
+ return sk;
+ } else {
+ return sk;
+ }
+
+ reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff,
+ saddr, sport, daddr, ntohs(dport),
+ ehashfn);
+ if (!reuse_sk)
+ return sk;
+
+ /* We've chosen a new reuseport sock which is never refcounted. This
+ * implies that sk also isn't refcounted.
+ */
+ WARN_ON_ONCE(*refcounted);
+
+ return reuse_sk;
+}
+
static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be16 sport,
@@ -85,14 +150,20 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
int iif, int sdif,
bool *refcounted)
{
- struct sock *sk = skb_steal_sock(skb, refcounted);
-
+ struct net *net = dev_net(skb_dst(skb)->dev);
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct sock *sk;
+
+ sk = inet6_steal_sock(net, skb, doff, &ip6h->saddr, sport, &ip6h->daddr, dport,
+ refcounted, inet6_ehashfn);
+ if (IS_ERR(sk))
+ return NULL;
if (sk)
return sk;
- return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
- doff, &ipv6_hdr(skb)->saddr, sport,
- &ipv6_hdr(skb)->daddr, ntohs(dport),
+ return __inet6_lookup(net, hashinfo, skb,
+ doff, &ip6h->saddr, sport,
+ &ip6h->daddr, ntohs(dport),
iif, sdif, refcounted);
}
@@ -103,15 +174,24 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
const int dif);
int inet6_hash(struct sock *sk);
-#endif /* IS_ENABLED(CONFIG_IPV6) */
-#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif, __sdif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_family == AF_INET6) && \
- ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
- ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
- (((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
- net_eq(sock_net(__sk), (__net)))
+static inline bool inet6_match(struct net *net, const struct sock *sk,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ const __portpair ports,
+ const int dif, const int sdif)
+{
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_family != AF_INET6 ||
+ sk->sk_portpair != ports ||
+ !ipv6_addr_equal(&sk->sk_v6_daddr, saddr) ||
+ !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return false;
+
+ /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
+ return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
+ sdif);
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index cb2818862919..f50a644d87a9 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -3,6 +3,10 @@
#define _INET_COMMON_H
#include <linux/indirect_call_wrapper.h>
+#include <linux/net.h>
+#include <linux/netdev_features.h>
+#include <linux/types.h>
+#include <net/sock.h>
extern const struct proto_ops inet_stream_ops;
extern const struct proto_ops inet_dgram_ops;
@@ -12,6 +16,8 @@ extern const struct proto_ops inet_dgram_ops;
*/
struct msghdr;
+struct net;
+struct page;
struct sock;
struct sockaddr;
struct socket;
@@ -25,22 +31,27 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
int inet_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern);
+void __inet_accept(struct socket *sock, struct socket *newsock,
+ struct sock *newsk);
int inet_send_prepare(struct sock *sk);
int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
-ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
+void inet_splice_eof(struct socket *sock);
int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int inet_shutdown(struct socket *sock, int how);
int inet_listen(struct socket *sock, int backlog);
+int __inet_listen_sk(struct sock *sk, int backlog);
void inet_sock_destruct(struct sock *sk);
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len);
/* Don't allocate port at this moment, defer to connect. */
#define BIND_FORCE_ADDRESS_NO_PORT (1 << 0)
/* Grab and release socket lock. */
#define BIND_WITH_LOCK (1 << 1)
/* Called from BPF program. */
#define BIND_FROM_BPF (1 << 2)
+/* Skip CAP_NET_BIND_SERVICE check. */
+#define BIND_NO_CAP_NET_BIND_SERVICE (1 << 3)
int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
u32 flags);
int inet_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index aa8893c68c50..9ab4bf704e86 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -25,6 +25,7 @@
#undef INET_CSK_CLEAR_TIMERS
struct inet_bind_bucket;
+struct inet_bind2_bucket;
struct tcp_congestion_ops;
/*
@@ -43,7 +44,6 @@ struct inet_connection_sock_af_ops {
struct request_sock *req_unhash,
bool *own_req);
u16 net_header_len;
- u16 net_frag_header_len;
u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
@@ -57,6 +57,7 @@ struct inet_connection_sock_af_ops {
*
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
+ * @icsk_bind2_hash: Bind node in the bhash2 table
* @icsk_timeout: Timeout
* @icsk_retransmit_timer: Resend (no ack)
* @icsk_rto: Retransmit timeout
@@ -66,7 +67,6 @@ struct inet_connection_sock_af_ops {
* @icsk_ulp_ops Pluggable ULP control hook
* @icsk_ulp_data ULP private data
* @icsk_clean_acked Clean acked data hook
- * @icsk_listen_portaddr_node hash to the portaddr listener hashtable
* @icsk_ca_state: Congestion control state
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
@@ -76,25 +76,30 @@ struct inet_connection_sock_af_ops {
* @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options)
* @icsk_ack: Delayed ACK control data
* @icsk_mtup; MTU probing control data
+ * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack)
+ * @icsk_user_timeout: TCP_USER_TIMEOUT value
*/
struct inet_connection_sock {
/* inet_sock has to be the first member! */
struct inet_sock icsk_inet;
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
+ struct inet_bind2_bucket *icsk_bind2_hash;
unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
+ __u32 icsk_rto_min;
+ __u32 icsk_delack_max;
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
const struct tcp_ulp_ops *icsk_ulp_ops;
void __rcu *icsk_ulp_data;
void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
- struct hlist_node icsk_listen_portaddr_node;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
- __u8 icsk_ca_state:6,
+ __u8 icsk_ca_state:5,
+ icsk_ca_initialized:1,
icsk_ca_setsockopt:1,
icsk_ca_dst_locked:1;
__u8 icsk_retransmits;
@@ -107,35 +112,38 @@ struct inet_connection_sock {
__u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */
- __u8 blocked; /* Delayed ACK was blocked by socket lock */
- __u32 ato; /* Predicted tick of soft clock */
+ __u8 retry; /* Number of attempts */
+ #define ATO_BITS 8
+ __u32 ato:ATO_BITS, /* Predicted tick of soft clock */
+ lrcv_flowlabel:20, /* last received ipv6 flowlabel */
+ unused:4;
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */
__u16 last_seg_size; /* Size of last incoming segment */
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
} icsk_ack;
struct {
- int enabled;
-
/* Range of MTUs to search */
int search_high;
int search_low;
/* Information on the current probe. */
- int probe_size;
+ u32 probe_size:31,
+ /* Is the MTUP feature enabled for this connection? */
+ enabled:1;
u32 probe_timestamp;
} icsk_mtup;
+ u32 icsk_probes_tstamp;
u32 icsk_user_timeout;
u64 icsk_ca_priv[104 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64))
+#define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv)
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
#define ICSK_TIME_DACK 2 /* Delayed ack timer */
#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
-#define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */
#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */
#define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */
@@ -158,7 +166,8 @@ enum inet_csk_ack_state_t {
ICSK_ACK_TIMER = 2,
ICSK_ACK_PUSHED = 4,
ICSK_ACK_PUSHED2 = 8,
- ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
+ ICSK_ACK_NOW = 16, /* Send the next ACK immediately (once) */
+ ICSK_ACK_NOMEM = 32,
};
void inet_csk_init_xmit_timers(struct sock *sk,
@@ -195,7 +204,8 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
#endif
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
+ icsk->icsk_ack.pending = 0;
+ icsk->icsk_ack.retry = 0;
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer);
#endif
@@ -220,8 +230,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
}
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
- what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE ||
- what == ICSK_TIME_REO_TIMEOUT) {
+ what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
icsk->icsk_pending = what;
icsk->icsk_timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
@@ -277,14 +286,22 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
}
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
+static inline unsigned long
+reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
+{
+ u64 timeout = (u64)req->timeout << req->num_timeout;
+
+ return (unsigned long)min_t(u64, timeout, max_timeout);
+}
+
static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
{
/* The below has to be done to allow calling inet_csk_destroy_sock */
sock_set_flag(sk, SOCK_DEAD);
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(*sk->sk_prot->orphan_count);
}
void inet_csk_destroy_sock(struct sock *sk);
@@ -299,7 +316,7 @@ static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
(EPOLLIN | EPOLLRDNORM) : 0;
}
-int inet_csk_listen_start(struct sock *sk, int backlog);
+int inet_csk_listen_start(struct sock *sk);
void inet_csk_listen_stop(struct sock *sk);
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
@@ -310,11 +327,10 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
-#define TCP_PINGPONG_THRESH 3
-
static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
{
- inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
+ inet_csk(sk)->icsk_ack.pingpong =
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh);
}
static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
@@ -324,7 +340,8 @@ static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
{
- return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
+ return inet_csk(sk)->icsk_ack.pingpong >=
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh);
}
static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
@@ -335,9 +352,17 @@ static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
icsk->icsk_ack.pingpong++;
}
-static inline bool inet_csk_has_ulp(struct sock *sk)
+static inline bool inet_csk_has_ulp(const struct sock *sk)
+{
+ return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
+}
+
+static inline void inet_init_csk_locks(struct sock *sk)
{
- return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
+ spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
}
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_dscp.h b/include/net/inet_dscp.h
new file mode 100644
index 000000000000..72f250dffada
--- /dev/null
+++ b/include/net/inet_dscp.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * inet_dscp.h: helpers for handling differentiated services codepoints (DSCP)
+ *
+ * DSCP is defined in RFC 2474:
+ *
+ * 0 1 2 3 4 5 6 7
+ * +---+---+---+---+---+---+---+---+
+ * | DSCP | CU |
+ * +---+---+---+---+---+---+---+---+
+ *
+ * DSCP: differentiated services codepoint
+ * CU: currently unused
+ *
+ * The whole DSCP + CU bits form the DS field.
+ * The DS field is also commonly called TOS or Traffic Class (for IPv6).
+ *
+ * Note: the CU bits are now used for Explicit Congestion Notification
+ * (RFC 3168).
+ */
+
+#ifndef _INET_DSCP_H
+#define _INET_DSCP_H
+
+#include <linux/types.h>
+
+/* Special type for storing DSCP values.
+ *
+ * A dscp_t variable stores a DS field with the CU (ECN) bits cleared.
+ * Using dscp_t allows to strictly separate DSCP and ECN bits, thus avoiding
+ * bugs where ECN bits are erroneously taken into account during FIB lookups
+ * or policy routing.
+ *
+ * Note: to get the real DSCP value contained in a dscp_t variable one would
+ * have to do a bit shift after calling inet_dscp_to_dsfield(). We could have
+ * a helper for that, but there's currently no users.
+ */
+typedef u8 __bitwise dscp_t;
+
+#define INET_DSCP_MASK 0xfc
+
+static inline dscp_t inet_dsfield_to_dscp(__u8 dsfield)
+{
+ return (__force dscp_t)(dsfield & INET_DSCP_MASK);
+}
+
+static inline __u8 inet_dscp_to_dsfield(dscp_t dscp)
+{
+ return (__force __u8)dscp;
+}
+
+static inline bool inet_validate_dscp(__u8 val)
+{
+ return !(val & ~INET_DSCP_MASK);
+}
+
+#endif /* _INET_DSCP_H */
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index e1eaf1780288..ea32393464a2 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -8,6 +8,7 @@
#include <net/inet_sock.h>
#include <net/dsfield.h>
+#include <net/checksum.h>
enum {
INET_ECN_NOT_ECT = 0,
@@ -75,8 +76,8 @@ static inline void INET_ECN_dontxmit(struct sock *sk)
static inline int IP_ECN_set_ce(struct iphdr *iph)
{
- u32 check = (__force u32)iph->check;
u32 ecn = (iph->tos + 1) & INET_ECN_MASK;
+ __be16 check_add;
/*
* After the last operation we have (in binary):
@@ -93,23 +94,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph)
* INET_ECN_ECT_1 => check += htons(0xFFFD)
* INET_ECN_ECT_0 => check += htons(0xFFFE)
*/
- check += (__force u16)htons(0xFFFB) + (__force u16)htons(ecn);
+ check_add = (__force __be16)((__force u16)htons(0xFFFB) +
+ (__force u16)htons(ecn));
- iph->check = (__force __sum16)(check + (check>=0xFFFF));
+ iph->check = csum16_add(iph->check, check_add);
iph->tos |= INET_ECN_CE;
return 1;
}
static inline int IP_ECN_set_ect1(struct iphdr *iph)
{
- u32 check = (__force u32)iph->check;
-
if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
return 0;
- check += (__force u16)htons(0x100);
-
- iph->check = (__force __sum16)(check + (check>=0xFFFF));
+ iph->check = csum16_add(iph->check, htons(0x1));
iph->tos ^= INET_ECN_MASK;
return 1;
}
@@ -190,6 +188,23 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
return 0;
}
+static inline int skb_get_dsfield(struct sk_buff *skb)
+{
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ break;
+ return ipv4_get_dsfield(ip_hdr(skb));
+
+ case cpu_to_be16(ETH_P_IPV6):
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ break;
+ return ipv6_get_dsfield(ipv6_hdr(skb));
+ }
+
+ return -1;
+}
+
static inline int INET_ECN_set_ect1(struct sk_buff *skb)
{
switch (skb_protocol(skb, true)) {
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index bac79e817776..153960663ce4 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -4,6 +4,10 @@
#include <linux/rhashtable-types.h>
#include <linux/completion.h>
+#include <linux/in6.h>
+#include <linux/rbtree_types.h>
+#include <linux/refcount.h>
+#include <net/dropreason-core.h>
/* Per netns frag queues directory */
struct fqdir {
@@ -21,21 +25,24 @@ struct fqdir {
/* Keep atomic mem on separate cachelines in structs that include it */
atomic_long_t mem ____cacheline_aligned_in_smp;
struct work_struct destroy_work;
+ struct llist_node free_list;
};
/**
- * fragment queue flags
+ * enum: fragment queue flags
*
* @INET_FRAG_FIRST_IN: first fragment has arrived
* @INET_FRAG_LAST_IN: final fragment has arrived
* @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
* @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable
+ * @INET_FRAG_DROP: if skbs must be dropped (instead of being consumed)
*/
enum {
INET_FRAG_FIRST_IN = BIT(0),
INET_FRAG_LAST_IN = BIT(1),
INET_FRAG_COMPLETE = BIT(2),
INET_FRAG_HASH_DEAD = BIT(3),
+ INET_FRAG_DROP = BIT(4),
};
struct frag_v4_compare_key {
@@ -69,6 +76,7 @@ struct frag_v6_compare_key {
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
+ * @mono_delivery_time: stamp has a mono delivery time (EDT)
* @flags: fragment queue flags
* @max_size: maximum received fragment size
* @fqdir: pointer to struct fqdir
@@ -89,6 +97,7 @@ struct inet_frag_queue {
ktime_t stamp;
int len;
int meat;
+ u8 mono_delivery_time;
__u8 flags;
u16 max_size;
struct fqdir *fqdir;
@@ -116,8 +125,15 @@ int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
static inline void fqdir_pre_exit(struct fqdir *fqdir)
{
- fqdir->high_thresh = 0; /* prevent creation of new frags */
- fqdir->dead = true;
+ /* Prevent creation of new frags.
+ * Pairs with READ_ONCE() in inet_frag_find().
+ */
+ WRITE_ONCE(fqdir->high_thresh, 0);
+
+ /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
+ * and ip6frag_expire_frag_queue().
+ */
+ WRITE_ONCE(fqdir->dead, true);
}
void fqdir_exit(struct fqdir *fqdir);
@@ -126,7 +142,8 @@ void inet_frag_destroy(struct inet_frag_queue *q);
struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
/* Free all skbs in the queue; return the sum of their truesizes. */
-unsigned int inet_frag_rbtree_purge(struct rb_root *root);
+unsigned int inet_frag_rbtree_purge(struct rb_root *root,
+ enum skb_drop_reason reason);
static inline void inet_frag_put(struct inet_frag_queue *q)
{
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 92560974ea67..7f1b38458743 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -23,6 +23,7 @@
#include <net/inet_connection_sock.h>
#include <net/inet_sock.h>
+#include <net/ip.h>
#include <net/sock.h>
#include <net/route.h>
#include <net/tcp_states.h>
@@ -87,10 +88,33 @@ struct inet_bind_bucket {
unsigned short fast_sk_family;
bool fast_ipv6_only;
struct hlist_node node;
+ struct hlist_head bhash2;
+};
+
+struct inet_bind2_bucket {
+ possible_net_t ib_net;
+ int l3mdev;
+ unsigned short port;
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned short addr_type;
+ struct in6_addr v6_rcv_saddr;
+#define rcv_saddr v6_rcv_saddr.s6_addr32[3]
+#else
+ __be32 rcv_saddr;
+#endif
+ /* Node in the bhash2 inet_bind_hashbucket chain */
+ struct hlist_node node;
+ struct hlist_node bhash_node;
+ /* List of sockets hashed to this bucket */
struct hlist_head owners;
};
-static inline struct net *ib_net(struct inet_bind_bucket *ib)
+static inline struct net *ib_net(const struct inet_bind_bucket *ib)
+{
+ return read_pnet(&ib->ib_net);
+}
+
+static inline struct net *ib2_net(const struct inet_bind2_bucket *ib)
{
return read_pnet(&ib->ib_net);
}
@@ -111,11 +135,7 @@ struct inet_bind_hashbucket {
#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
- unsigned int count;
- union {
- struct hlist_head head;
- struct hlist_nulls_head nulls_head;
- };
+ struct hlist_nulls_head nulls_head;
};
/* This is for listening sockets, thus all sockets which possess wildcards. */
@@ -137,31 +157,32 @@ struct inet_hashinfo {
* TCP hash as well as the others for fast bind/connect.
*/
struct kmem_cache *bind_bucket_cachep;
+ /* This bind table is hashed by local port */
struct inet_bind_hashbucket *bhash;
+ struct kmem_cache *bind2_bucket_cachep;
+ /* This bind table is hashed by local port and sk->sk_rcv_saddr (ipv4)
+ * or sk->sk_v6_rcv_saddr (ipv6). This 2nd bind table is used
+ * primarily for expediting bind conflict resolution.
+ */
+ struct inet_bind_hashbucket *bhash2;
unsigned int bhash_size;
/* The 2nd listener table hashed by local port and address */
unsigned int lhash2_mask;
struct inet_listen_hashbucket *lhash2;
- /* All the above members are written once at bootup and
- * never written again _or_ are predominantly read-access.
- *
- * Now align to a new cache line as all the following members
- * might be often dirty.
- */
- /* All sockets in TCP_LISTEN state will be in listening_hash.
- * This is the only table where wildcard'd TCP sockets can
- * exist. listening_hash is only hashed by local port number.
- * If lhash2 is initialized, the same socket will also be hashed
- * to lhash2 by port and address.
- */
- struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
- ____cacheline_aligned_in_smp;
-};
+ bool pernet;
+} ____cacheline_aligned_in_smp;
-#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
- hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
+static inline struct inet_hashinfo *tcp_or_dccp_get_hashinfo(const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IP_DCCP)
+ return sk->sk_prot->h.hashinfo ? :
+ sock_net(sk)->ipv4.tcp_death_row.hashinfo;
+#else
+ return sock_net(sk)->ipv4.tcp_death_row.hashinfo;
+#endif
+}
static inline struct inet_listen_hashbucket *
inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
@@ -197,16 +218,9 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
hashinfo->ehash_locks = NULL;
}
-static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
- int dif, int sdif)
-{
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
- bound_dev_if, dif, sdif);
-#else
- return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
-#endif
-}
+struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
+ unsigned int ehash_entries);
+void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo);
struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
@@ -215,40 +229,77 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
+bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
+ const struct net *net, unsigned short port,
+ int l3mdev);
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
+ struct inet_bind_hashbucket *head,
+ struct inet_bind_bucket *tb,
+ const struct sock *sk);
+
+void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind2_bucket *tb);
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_find(const struct inet_bind_hashbucket *head,
+ const struct net *net,
+ unsigned short port, int l3mdev,
+ const struct sock *sk);
+
+bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb,
+ const struct net *net, unsigned short port,
+ int l3mdev, const struct sock *sk);
+
static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
const u32 bhash_size)
{
return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
-void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum);
-
-/* These can have wildcards, don't try too hard. */
-static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
+static inline struct inet_bind_hashbucket *
+inet_bhashfn_portaddr(const struct inet_hashinfo *hinfo, const struct sock *sk,
+ const struct net *net, unsigned short port)
{
- return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
-}
+ u32 hash;
-static inline int inet_sk_listen_hashfn(const struct sock *sk)
-{
- return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ hash = ipv6_portaddr_hash(net, &sk->sk_v6_rcv_saddr, port);
+ else
+#endif
+ hash = ipv4_portaddr_hash(net, sk->sk_rcv_saddr, port);
+ return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
}
+struct inet_bind_hashbucket *
+inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port);
+
+/* This should be called whenever a socket's sk_rcv_saddr (ipv4) or
+ * sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
+ * rcv_saddr field should already have been updated when this is called.
+ */
+int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family);
+void inet_bhash2_reset_saddr(struct sock *sk);
+
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ struct inet_bind2_bucket *tb2, unsigned short port);
+
/* Caller must disable local BH processing. */
int __inet_inherit_port(const struct sock *sk, struct sock *child);
void inet_put_port(struct sock *sk);
-void inet_hashinfo_init(struct inet_hashinfo *h);
void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
unsigned long numentries, int scale,
unsigned long low_limit,
unsigned long high_limit);
int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
-bool inet_ehash_insert(struct sock *sk, struct sock *osk);
-bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
+bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
+ bool *found_dup_sk);
int __inet_hash(struct sock *sk, struct sock *osk);
int inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
@@ -288,7 +339,6 @@ static inline struct sock *inet_lookup_listener(struct net *net,
((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
#endif
-#if (BITS_PER_LONG == 64)
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __addrpair __name = (__force __addrpair) ( \
@@ -300,24 +350,20 @@ static inline struct sock *inet_lookup_listener(struct net *net,
(((__force __u64)(__be32)(__daddr)) << 32) | \
((__force __u64)(__be32)(__saddr)))
#endif /* __BIG_ENDIAN */
-#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_addrpair == (__cookie)) && \
- (((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
- net_eq(sock_net(__sk), (__net)))
-#else /* 32-bit arch */
-#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
- const int __name __deprecated __attribute__((unused))
-#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_daddr == (__saddr)) && \
- ((__sk)->sk_rcv_saddr == (__daddr)) && \
- (((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
- net_eq(sock_net(__sk), (__net)))
-#endif /* 64-bit arch */
+static inline bool inet_match(struct net *net, const struct sock *sk,
+ const __addrpair cookie, const __portpair ports,
+ int dif, int sdif)
+{
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_portpair != ports ||
+ sk->sk_addrpair != cookie)
+ return false;
+
+ /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
+ return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
+ sdif);
+}
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM
@@ -328,6 +374,27 @@ struct sock *__inet_lookup_established(struct net *net,
const __be32 daddr, const u16 hnum,
const int dif, const int sdif);
+typedef u32 (inet_ehashfn_t)(const struct net *net,
+ const __be32 laddr, const __u16 lport,
+ const __be32 faddr, const __be16 fport);
+
+inet_ehashfn_t inet_ehashfn;
+
+INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn);
+
+struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
+ struct sk_buff *skb, int doff,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned short hnum,
+ inet_ehashfn_t *ehashfn);
+
+struct sock *inet_lookup_run_sk_lookup(struct net *net,
+ int protocol,
+ struct sk_buff *skb, int doff,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, u16 hnum, const int dif,
+ inet_ehashfn_t *ehashfn);
+
static inline struct sock *
inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
@@ -377,6 +444,46 @@ static inline struct sock *inet_lookup(struct net *net,
return sk;
}
+static inline
+struct sock *inet_steal_sock(struct net *net, struct sk_buff *skb, int doff,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
+ bool *refcounted, inet_ehashfn_t *ehashfn)
+{
+ struct sock *sk, *reuse_sk;
+ bool prefetched;
+
+ sk = skb_steal_sock(skb, refcounted, &prefetched);
+ if (!sk)
+ return NULL;
+
+ if (!prefetched || !sk_fullsock(sk))
+ return sk;
+
+ if (sk->sk_protocol == IPPROTO_TCP) {
+ if (sk->sk_state != TCP_LISTEN)
+ return sk;
+ } else if (sk->sk_protocol == IPPROTO_UDP) {
+ if (sk->sk_state != TCP_CLOSE)
+ return sk;
+ } else {
+ return sk;
+ }
+
+ reuse_sk = inet_lookup_reuseport(net, sk, skb, doff,
+ saddr, sport, daddr, ntohs(dport),
+ ehashfn);
+ if (!reuse_sk)
+ return sk;
+
+ /* We've chosen a new reuseport sock which is never refcounted. This
+ * implies that sk also isn't refcounted.
+ */
+ WARN_ON_ONCE(*refcounted);
+
+ return reuse_sk;
+}
+
static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb,
int doff,
@@ -385,22 +492,23 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
const int sdif,
bool *refcounted)
{
- struct sock *sk = skb_steal_sock(skb, refcounted);
+ struct net *net = dev_net(skb_dst(skb)->dev);
const struct iphdr *iph = ip_hdr(skb);
+ struct sock *sk;
+ sk = inet_steal_sock(net, skb, doff, iph->saddr, sport, iph->daddr, dport,
+ refcounted, inet_ehashfn);
+ if (IS_ERR(sk))
+ return NULL;
if (sk)
return sk;
- return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
+ return __inet_lookup(net, hashinfo, skb,
doff, iph->saddr, sport,
iph->daddr, dport, inet_iif(skb), sdif,
refcounted);
}
-u32 inet6_ehashfn(const struct net *net,
- const struct in6_addr *laddr, const u16 lport,
- const struct in6_addr *faddr, const __be16 fport);
-
static inline void sk_daddr_set(struct sock *sk, __be32 addr)
{
sk->sk_daddr = addr; /* alias of inet_daddr */
@@ -418,7 +526,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
}
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk, u32 port_offset,
+ struct sock *sk, u64 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16,
struct inet_timewait_sock **));
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a3702d1d4875..f9ddd47dc4f8 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -107,23 +107,26 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
{
- if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
+ u32 mark = READ_ONCE(sk->sk_mark);
+
+ if (!mark && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
return skb->mark;
- return sk->sk_mark;
+ return mark;
}
static inline int inet_request_bound_dev_if(const struct sock *sk,
struct sk_buff *skb)
{
+ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
#ifdef CONFIG_NET_L3_MASTER_DEV
struct net *net = sock_net(sk);
- if (!sk->sk_bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept)
+ if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
return l3mdev_master_ifindex_by_index(net, skb->skb_iif);
#endif
- return sk->sk_bound_dev_if;
+ return bound_dev_if;
}
static inline int inet_sk_bound_l3mdev(const struct sock *sk)
@@ -131,7 +134,7 @@ static inline int inet_sk_bound_l3mdev(const struct sock *sk)
#ifdef CONFIG_NET_L3_MASTER_DEV
struct net *net = sock_net(sk);
- if (!net->ipv4.sysctl_tcp_l3mdev_accept)
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
return l3mdev_master_ifindex_by_index(net,
sk->sk_bound_dev_if);
#endif
@@ -147,6 +150,17 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
return bound_dev_if == dif || bound_dev_if == sdif;
}
+static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept),
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
struct inet_cork {
unsigned int flags;
__be32 addr;
@@ -180,13 +194,13 @@ struct rtable;
* @inet_rcv_saddr - Bound local IPv4 addr
* @inet_dport - Destination port
* @inet_num - Local port
+ * @inet_flags - various atomic flags
* @inet_saddr - Sending source
* @uc_ttl - Unicast TTL
* @inet_sport - Source port
* @inet_id - ID counter for DF pkts
* @tos - TOS
* @mc_ttl - Multicasting TTL
- * @is_icsk - is this an inet_connection_sock?
* @uc_index - Unicast outgoing device index
* @mc_index - Multicast device index
* @mc_list - Group array
@@ -204,54 +218,95 @@ struct inet_sock {
#define inet_dport sk.__sk_common.skc_dport
#define inet_num sk.__sk_common.skc_num
+ unsigned long inet_flags;
__be32 inet_saddr;
__s16 uc_ttl;
- __u16 cmsg_flags;
__be16 inet_sport;
- __u16 inet_id;
-
struct ip_options_rcu __rcu *inet_opt;
- int rx_dst_ifindex;
+ atomic_t inet_id;
+
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
__u8 pmtudisc;
- __u8 recverr:1,
- is_icsk:1,
- freebind:1,
- hdrincl:1,
- mc_loop:1,
- transparent:1,
- mc_all:1,
- nodefrag:1;
- __u8 bind_address_no_port:1,
- recverr_rfc4884:1,
- defer_connect:1; /* Indicates that fastopen_connect is set
- * and cookie exists so we defer connect
- * until first data frame is written
- */
__u8 rcv_tos;
__u8 convert_csum;
int uc_index;
int mc_index;
__be32 mc_addr;
+ u32 local_port_range; /* high << 16 | low */
+
struct ip_mc_socklist __rcu *mc_list;
struct inet_cork_full cork;
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
-#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
+
+enum {
+ INET_FLAGS_PKTINFO = 0,
+ INET_FLAGS_TTL = 1,
+ INET_FLAGS_TOS = 2,
+ INET_FLAGS_RECVOPTS = 3,
+ INET_FLAGS_RETOPTS = 4,
+ INET_FLAGS_PASSSEC = 5,
+ INET_FLAGS_ORIGDSTADDR = 6,
+ INET_FLAGS_CHECKSUM = 7,
+ INET_FLAGS_RECVFRAGSIZE = 8,
+
+ INET_FLAGS_RECVERR = 9,
+ INET_FLAGS_RECVERR_RFC4884 = 10,
+ INET_FLAGS_FREEBIND = 11,
+ INET_FLAGS_HDRINCL = 12,
+ INET_FLAGS_MC_LOOP = 13,
+ INET_FLAGS_MC_ALL = 14,
+ INET_FLAGS_TRANSPARENT = 15,
+ INET_FLAGS_IS_ICSK = 16,
+ INET_FLAGS_NODEFRAG = 17,
+ INET_FLAGS_BIND_ADDRESS_NO_PORT = 18,
+ INET_FLAGS_DEFER_CONNECT = 19,
+ INET_FLAGS_MC6_LOOP = 20,
+ INET_FLAGS_RECVERR6_RFC4884 = 21,
+ INET_FLAGS_MC6_ALL = 22,
+ INET_FLAGS_AUTOFLOWLABEL_SET = 23,
+ INET_FLAGS_AUTOFLOWLABEL = 24,
+ INET_FLAGS_DONTFRAG = 25,
+ INET_FLAGS_RECVERR6 = 26,
+ INET_FLAGS_REPFLOW = 27,
+ INET_FLAGS_RTALERT_ISOLATE = 28,
+ INET_FLAGS_SNDFLOW = 29,
+ INET_FLAGS_RTALERT = 30,
+};
/* cmsg flags for inet */
-#define IP_CMSG_PKTINFO BIT(0)
-#define IP_CMSG_TTL BIT(1)
-#define IP_CMSG_TOS BIT(2)
-#define IP_CMSG_RECVOPTS BIT(3)
-#define IP_CMSG_RETOPTS BIT(4)
-#define IP_CMSG_PASSSEC BIT(5)
-#define IP_CMSG_ORIGDSTADDR BIT(6)
-#define IP_CMSG_CHECKSUM BIT(7)
-#define IP_CMSG_RECVFRAGSIZE BIT(8)
+#define IP_CMSG_PKTINFO BIT(INET_FLAGS_PKTINFO)
+#define IP_CMSG_TTL BIT(INET_FLAGS_TTL)
+#define IP_CMSG_TOS BIT(INET_FLAGS_TOS)
+#define IP_CMSG_RECVOPTS BIT(INET_FLAGS_RECVOPTS)
+#define IP_CMSG_RETOPTS BIT(INET_FLAGS_RETOPTS)
+#define IP_CMSG_PASSSEC BIT(INET_FLAGS_PASSSEC)
+#define IP_CMSG_ORIGDSTADDR BIT(INET_FLAGS_ORIGDSTADDR)
+#define IP_CMSG_CHECKSUM BIT(INET_FLAGS_CHECKSUM)
+#define IP_CMSG_RECVFRAGSIZE BIT(INET_FLAGS_RECVFRAGSIZE)
+
+#define IP_CMSG_ALL (IP_CMSG_PKTINFO | IP_CMSG_TTL | \
+ IP_CMSG_TOS | IP_CMSG_RECVOPTS | \
+ IP_CMSG_RETOPTS | IP_CMSG_PASSSEC | \
+ IP_CMSG_ORIGDSTADDR | IP_CMSG_CHECKSUM | \
+ IP_CMSG_RECVFRAGSIZE)
+
+static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
+{
+ return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL;
+}
+
+#define inet_test_bit(nr, sk) \
+ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet_set_bit(nr, sk) \
+ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet_clear_bit(nr, sk) \
+ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
/**
* sk_to_full_sk - Access to a full socket
@@ -284,10 +339,7 @@ static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
return sk_to_full_sk(skb->sk);
}
-static inline struct inet_sock *inet_sk(const struct sock *sk)
-{
- return (struct inet_sock *)sk;
-}
+#define inet_sk(ptr) container_of_const(ptr, struct inet_sock, sk)
static inline void __inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from,
@@ -296,13 +348,6 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->sk_prot->obj_size - ancestor_size);
}
-#if !(IS_ENABLED(CONFIG_IPV6))
-static inline void inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from)
-{
- __inet_sk_copy_descendant(sk_to, sk_from, sizeof(struct inet_sock));
-}
-#endif
int inet_sk_rebuild_header(struct sock *sk);
@@ -351,7 +396,7 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
__u8 flags = 0;
- if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
+ if (inet_test_bit(TRANSPARENT, sk) || inet_test_bit(HDRINCL, sk))
flags |= FLOWI_FLAG_ANYSRC;
return flags;
}
@@ -376,8 +421,21 @@ static inline bool inet_get_convert_csum(struct sock *sk)
static inline bool inet_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
- return net->ipv4.sysctl_ip_nonlocal_bind ||
- inet->freebind || inet->transparent;
+ return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) ||
+ test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) ||
+ test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags);
+}
+
+static inline bool inet_addr_valid_or_nonlocal(struct net *net,
+ struct inet_sock *inet,
+ __be32 addr,
+ int addr_type)
+{
+ return inet_can_nonlocal_bind(net, inet) ||
+ addr == htonl(INADDR_ANY) ||
+ addr_type == RTN_LOCAL ||
+ addr_type == RTN_MULTICAST ||
+ addr_type == RTN_BROADCAST;
}
#endif /* _INET_SOCK_H */
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index dfd919b3119e..f28da08a37b4 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -65,15 +65,16 @@ struct inet_timewait_sock {
/* these three are in inet_sock */
__be16 tw_sport;
/* And these are ours. */
- unsigned int tw_kill : 1,
- tw_transparent : 1,
+ unsigned int tw_transparent : 1,
tw_flowlabel : 20,
+ tw_usec_ts : 1,
tw_pad : 2, /* 2 bits hole */
tw_tos : 8;
u32 tw_txhash;
u32 tw_priority;
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
+ struct inet_bind2_bucket *tw_tb2;
};
#define tw_tclass tw_tos
diff --git a/include/net/ioam6.h b/include/net/ioam6.h
new file mode 100644
index 000000000000..2cbbee6e806a
--- /dev/null
+++ b/include/net/ioam6.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM implementation
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+
+#ifndef _NET_IOAM6_H
+#define _NET_IOAM6_H
+
+#include <linux/net.h>
+#include <linux/ipv6.h>
+#include <linux/ioam6.h>
+#include <linux/ioam6_genl.h>
+#include <linux/rhashtable-types.h>
+
+struct ioam6_namespace {
+ struct rhash_head head;
+ struct rcu_head rcu;
+
+ struct ioam6_schema __rcu *schema;
+
+ __be16 id;
+ __be32 data;
+ __be64 data_wide;
+};
+
+struct ioam6_schema {
+ struct rhash_head head;
+ struct rcu_head rcu;
+
+ struct ioam6_namespace __rcu *ns;
+
+ u32 id;
+ int len;
+ __be32 hdr;
+
+ u8 data[];
+};
+
+struct ioam6_pernet_data {
+ struct mutex lock;
+ struct rhashtable namespaces;
+ struct rhashtable schemas;
+};
+
+static inline struct ioam6_pernet_data *ioam6_pernet(struct net *net)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ return net->ipv6.ioam6_data;
+#else
+ return NULL;
+#endif
+}
+
+struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id);
+void ioam6_fill_trace_data(struct sk_buff *skb,
+ struct ioam6_namespace *ns,
+ struct ioam6_trace_hdr *trace,
+ bool is_input);
+
+int ioam6_init(void);
+void ioam6_exit(void);
+
+int ioam6_iptunnel_init(void);
+void ioam6_iptunnel_exit(void);
+
+void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
+ void *opt, unsigned int opt_len);
+
+#endif /* _NET_IOAM6_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index b09c48d862cc..25cb688bdc62 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -24,6 +24,7 @@
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/sockptr.h>
+#include <linux/static_key.h>
#include <net/inet_sock.h>
#include <net/route.h>
@@ -31,6 +32,7 @@
#include <net/flow.h>
#include <net/flow_dissector.h>
#include <net/netns/hash.h>
+#include <net/lwtunnel.h>
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -54,6 +56,8 @@ struct inet_skb_parm {
#define IPSKB_DOREDIRECT BIT(5)
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_L3SLAVE BIT(7)
+#define IPSKB_NOPOLICY BIT(8)
+#define IPSKB_MULTIPATH BIT(9)
u16 frag_max_size;
};
@@ -73,6 +77,7 @@ struct ipcm_cookie {
__be32 addr;
int oif;
struct ip_options_rcu *opt;
+ __u8 protocol;
__u8 ttl;
__s16 tos;
char priority;
@@ -89,17 +94,18 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
{
ipcm_init(ipcm);
- ipcm->sockc.mark = inet->sk.sk_mark;
- ipcm->sockc.tsflags = inet->sk.sk_tsflags;
- ipcm->oif = inet->sk.sk_bound_dev_if;
+ ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
+ ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
+ ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
+ ipcm->protocol = inet->inet_num;
}
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
/* return enslaved device index if relevant */
-static inline int inet_sdif(struct sk_buff *skb)
+static inline int inet_sdif(const struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
@@ -151,7 +157,7 @@ int igmp_mc_init(void);
int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr,
- struct ip_options_rcu *opt);
+ struct ip_options_rcu *opt, u8 tos);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
@@ -217,8 +223,6 @@ int ip_append_data(struct sock *sk, struct flowi4 *fl4,
unsigned int flags);
int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb);
-ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
- int offset, size_t size, int flags);
struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork);
@@ -239,14 +243,22 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
}
-static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
+/* Get the route scope that should be used when sending a packet. */
+static inline u8 ip_sendmsg_scope(const struct inet_sock *inet,
+ const struct ipcm_cookie *ipc,
+ const struct msghdr *msg)
{
- return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+ if (sock_flag(&inet->sk, SOCK_LOCALROUTE) ||
+ msg->msg_flags & MSG_DONTROUTE ||
+ (ipc->opt && ipc->opt->opt.is_strictroute))
+ return RT_SCOPE_LINK;
+
+ return RT_SCOPE_UNIVERSE;
}
-static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
+static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
{
- return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
+ return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos));
}
/* datagram.c */
@@ -277,7 +289,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
- unsigned int len, u64 transmit_time);
+ unsigned int len, u64 transmit_time, u32 txhash);
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
@@ -290,7 +302,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
+static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+ return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
@@ -333,7 +349,14 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
} \
}
-void inet_get_local_port_range(struct net *net, int *low, int *high);
+static inline void inet_get_local_port_range(const struct net *net, int *low, int *high)
+{
+ u32 range = READ_ONCE(net->ipv4.ip_local_ports.range);
+
+ *low = range & 0xffff;
+ *high = range >> 16;
+}
+bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
#ifdef CONFIG_SYSCTL
static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
@@ -350,7 +373,7 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
{
- return port < net->ipv4.sysctl_ip_prot_sock;
+ return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
}
#else
@@ -377,7 +400,7 @@ void ipfrag_init(void);
void ip_static_sysctl_init(void);
#define IP4_REPLY_MARK(net, mark) \
- ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
+ (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
static inline bool ip_is_fragment(const struct iphdr *iph)
{
@@ -417,44 +440,70 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
static inline bool ip_sk_accept_pmtu(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
- inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
+
+ return pmtudisc != IP_PMTUDISC_INTERFACE &&
+ pmtudisc != IP_PMTUDISC_OMIT;
}
static inline bool ip_sk_use_pmtu(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
+ return READ_ONCE(inet_sk(sk)->pmtudisc) < IP_PMTUDISC_PROBE;
}
static inline bool ip_sk_ignore_df(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
- inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
+
+ return pmtudisc < IP_PMTUDISC_DO || pmtudisc == IP_PMTUDISC_OMIT;
}
static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
+ const struct rtable *rt = container_of(dst, struct rtable, dst);
struct net *net = dev_net(dst->dev);
+ unsigned int mtu;
- if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
+ if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
ip_mtu_locked(dst) ||
- !forwarding)
- return dst_mtu(dst);
+ !forwarding) {
+ mtu = rt->rt_pmtu;
+ if (mtu && time_before(jiffies, rt->dst.expires))
+ goto out;
+ }
- return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ /* 'forwarding = true' case should always honour route mtu */
+ mtu = dst_metric_raw(dst, RTAX_MTU);
+ if (mtu)
+ goto out;
+
+ mtu = READ_ONCE(dst->dev->mtu);
+
+ if (unlikely(ip_mtu_locked(dst))) {
+ if (rt->rt_uses_gateway && mtu > 576)
+ mtu = 576;
+ }
+
+out:
+ mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
+
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
const struct sk_buff *skb)
{
+ unsigned int mtu;
+
if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
}
- return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
}
struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
@@ -488,7 +537,6 @@ void ip_dst_metrics_put(struct dst_entry *dst)
kfree(p);
}
-u32 ip_idents_reserve(u32 hash, int segs);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
@@ -496,19 +544,29 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
{
struct iphdr *iph = ip_hdr(skb);
- if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
- /* This is only to work around buggy Windows95/2000
- * VJ compression implementations. If the ID field
- * does not change, they drop every other packet in
- * a TCP stream using header compression.
+ /* We had many attacks based on IPID, use the private
+ * generator as much as we can.
+ */
+ if (sk && inet_sk(sk)->inet_daddr) {
+ int val;
+
+ /* avoid atomic operations for TCP,
+ * as we hold socket lock at this point.
*/
- if (sk && inet_sk(sk)->inet_daddr) {
- iph->id = htons(inet_sk(sk)->inet_id);
- inet_sk(sk)->inet_id += segs;
+ if (sk_is_tcp(sk)) {
+ sock_owned_by_me(sk);
+ val = atomic_read(&inet_sk(sk)->inet_id);
+ atomic_set(&inet_sk(sk)->inet_id, val + segs);
} else {
- iph->id = 0;
+ val = atomic_add_return(segs, &inet_sk(sk)->inet_id);
}
+ iph->id = htons(val);
+ return;
+ }
+ if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
+ iph->id = 0;
} else {
+ /* Unfortunately we need the big hammer to get a suitable IPID */
__ip_select_ident(net, iph, segs);
}
}
@@ -535,18 +593,10 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
offsetof(typeof(flow->addrs), v4addrs.src) +
sizeof(flow->addrs.v4addrs.src));
- memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+ memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
-static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
-{
- const struct iphdr *iph = skb_gro_network_header(skb);
-
- return csum_tcpudp_nofold(iph->saddr, iph->daddr,
- skb_gro_len(skb), proto, 0);
-}
-
/*
* Map a multicast IP onto multicast MAC for type ethernet.
*/
@@ -692,7 +742,7 @@ int ip_forward(struct sk_buff *skb);
*/
void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
- __be32 daddr, struct rtable *rt, int is_frag);
+ __be32 daddr, struct rtable *rt);
int __ip_options_echo(struct net *net, struct ip_options *dopt,
struct sk_buff *skb, const struct ip_options *sopt);
@@ -717,13 +767,18 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
* Functions provided by ip_sockglue.c
*/
-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst);
void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
+DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
+int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
+int do_ip_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int __user *optlen);
int ip_ra_control(struct sock *sk, unsigned char on,
@@ -761,5 +816,6 @@ int ip_sock_set_mtu_discover(struct sock *sk, int val);
void ip_sock_set_pktinfo(struct sock *sk);
void ip_sock_set_recverr(struct sock *sk);
void ip_sock_set_tos(struct sock *sk, int val);
+void __ip_sock_set_tos(struct sock *sk, int val);
#endif /* _IP_H */
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index b3f4eaa88672..c8a96b888277 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -43,14 +43,6 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0));
}
-static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
-{
- const struct ipv6hdr *iph = skb_gro_network_header(skb);
-
- return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
- skb_gro_len(skb), proto, 0));
-}
-
static __inline__ __sum16 tcp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
@@ -65,15 +57,9 @@ static inline void __tcp_v6_send_check(struct sk_buff *skb,
{
struct tcphdr *th = tcp_hdr(skb);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct tcphdr, check);
- } else {
- th->check = tcp_v6_check(skb->len, saddr, daddr,
- csum_partial(th, th->doff << 2,
- skb->csum));
- }
+ th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
}
static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb)
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index ac5ff3c3afb1..323c94f1845b 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -20,6 +20,7 @@
#include <net/inetpeer.h>
#include <net/fib_notifier.h>
#include <linux/indirect_call_wrapper.h>
+#include <uapi/linux/bpf.h>
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
#define FIB6_TABLE_HASHSZ 256
@@ -29,12 +30,6 @@
#define RT6_DEBUG 2
-#if RT6_DEBUG >= 3
-#define RT6_TRACE(x...) pr_debug(x)
-#else
-#define RT6_TRACE(x...) do { ; } while (0)
-#endif
-
struct rt6_info;
struct fib6_info;
@@ -178,6 +173,9 @@ struct fib6_info {
refcount_t fib6_ref;
unsigned long expires;
+
+ struct hlist_node gc_link;
+
struct dst_metrics *fib6_metrics;
#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
@@ -189,13 +187,16 @@ struct fib6_info {
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
+
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
+
u8 should_flush:1,
dst_nocount:1,
dst_nopolicy:1,
fib6_destroying:1,
- offload:1,
- trap:1,
- unused:2;
+ unused:4;
struct rcu_head rcu;
struct nexthop *nh;
@@ -213,9 +214,6 @@ struct rt6_info {
struct inet6_dev *rt6i_idev;
u32 rt6i_flags;
- struct list_head rt6i_uncached;
- struct uncached_list *rt6i_uncached_list;
-
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len;
};
@@ -246,12 +244,18 @@ static inline bool fib6_requires_src(const struct fib6_info *rt)
return rt->fib6_src.plen > 0;
}
+/* The callers should hold f6i->fib6_table->tb6_lock if a route has ever
+ * been added to a table before.
+ */
static inline void fib6_clean_expires(struct fib6_info *f6i)
{
f6i->fib6_flags &= ~RTF_EXPIRES;
f6i->expires = 0;
}
+/* The callers should hold f6i->fib6_table->tb6_lock if a route has ever
+ * been added to a table before.
+ */
static inline void fib6_set_expires(struct fib6_info *f6i,
unsigned long expires)
{
@@ -266,7 +270,7 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i)
return false;
}
-/* Function to safely get fn->sernum for passed in rt
+/* Function to safely get fn->fn_sernum for passed in rt
* and store result in passed in cookie.
* Return true if we can get cookie safely
* Return false if not
@@ -280,8 +284,8 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
fn = rcu_dereference(f6i->fib6_node);
if (fn) {
- *cookie = fn->fn_sernum;
- /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
+ *cookie = READ_ONCE(fn->fn_sernum);
+ /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */
smp_rmb();
status = true;
}
@@ -332,15 +336,10 @@ static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
static inline void fib6_info_release(struct fib6_info *f6i)
{
- if (f6i && refcount_dec_and_test(&f6i->fib6_ref))
+ if (f6i && refcount_dec_and_test(&f6i->fib6_ref)) {
+ DEBUG_NET_WARN_ON_ONCE(!hlist_unhashed(&f6i->gc_link));
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
-}
-
-static inline void fib6_info_hw_flags_set(struct fib6_info *f6i, bool offload,
- bool trap)
-{
- f6i->offload = offload;
- f6i->trap = trap;
+ }
}
enum fib6_walk_state {
@@ -372,9 +371,8 @@ struct rt6_statistics {
__u32 fib_rt_cache; /* cached rt entries in exception table */
__u32 fib_discarded_routes; /* total number of routes delete */
- /* The following stats are not protected by any lock */
+ /* The following stat is not protected by any lock */
atomic_t fib_rt_alloc; /* total number of routes alloced */
- atomic_t fib_rt_uncache; /* rt entries in uncached list */
};
#define RTN_TL_ROOT 0x0001
@@ -395,6 +393,7 @@ struct fib6_table {
struct inet_peer_base tb6_peers;
unsigned int flags;
unsigned int fib_seq;
+ struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -476,13 +475,10 @@ void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
rcu_read_lock();
from = rcu_dereference(rt->from);
- if (from) {
+ if (from)
*addr = from->fib6_prefsrc.addr;
- } else {
- struct in6_addr in6_zero = {};
-
- *addr = in6_zero;
- }
+ else
+ *addr = in6addr_any;
rcu_read_unlock();
}
@@ -491,6 +487,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void fib6_nh_release(struct fib6_nh *fib6_nh);
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
int call_fib6_entry_notifiers(struct net *net,
enum fib_event_type event_type,
@@ -513,6 +510,38 @@ void fib6_gc_cleanup(void);
int fib6_init(void);
+/* Add the route to the gc list if it is not already there
+ *
+ * The callers should hold f6i->fib6_table->tb6_lock.
+ */
+static inline void fib6_add_gc_list(struct fib6_info *f6i)
+{
+ /* If fib6_node is null, the f6i is not in (or removed from) the
+ * table.
+ *
+ * There is a gap between finding the f6i from the table and
+ * calling this function without the protection of the tb6_lock.
+ * This check makes sure the f6i is not added to the gc list when
+ * it is not on the table.
+ */
+ if (!rcu_dereference_protected(f6i->fib6_node,
+ lockdep_is_held(&f6i->fib6_table->tb6_lock)))
+ return;
+
+ if (hlist_unhashed(&f6i->gc_link))
+ hlist_add_head(&f6i->gc_link, &f6i->fib6_table->tb6_gc_hlist);
+}
+
+/* Remove the route from the gc list if it is on the list.
+ *
+ * The callers should hold f6i->fib6_table->tb6_lock.
+ */
+static inline void fib6_remove_gc_list(struct fib6_info *f6i)
+{
+ if (!hlist_unhashed(&f6i->gc_link))
+ hlist_del_init(&f6i->gc_link);
+}
+
struct ipv6_route_iter {
struct seq_net_private p;
struct fib6_walker w;
@@ -545,6 +574,8 @@ static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric)
{
return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric));
}
+void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
+ bool offload, bool trap, bool offload_failed);
#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
struct bpf_iter__ipv6_route {
@@ -611,7 +642,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
if (!net->ipv6.fib6_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl6->fl6_sport = flkeys->ports.src;
fl6->fl6_dport = flkeys->ports.dst;
fl6->flowi6_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2a5277758379..a30c6aa9e5cf 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -2,6 +2,16 @@
#ifndef _NET_IP6_ROUTE_H
#define _NET_IP6_ROUTE_H
+#include <net/addrconf.h>
+#include <net/flow.h>
+#include <net/ip6_fib.h>
+#include <net/sock.h>
+#include <net/lwtunnel.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/route.h>
+#include <net/nexthop.h>
+
struct route_info {
__u8 type;
__u8 length;
@@ -19,16 +29,6 @@ struct route_info {
__u8 prefix[]; /* 0,8 or 16 */
};
-#include <net/addrconf.h>
-#include <net/flow.h>
-#include <net/ip6_fib.h>
-#include <net/sock.h>
-#include <net/lwtunnel.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/route.h>
-#include <net/nexthop.h>
-
#define RT6_LOOKUP_F_IFACE 0x00000001
#define RT6_LOOKUP_F_REACHABLE 0x00000002
#define RT6_LOOKUP_F_HAS_SADDR 0x00000004
@@ -53,13 +53,12 @@ struct route_info {
*/
static inline int rt6_srcprefs2flags(unsigned int srcprefs)
{
- /* No need to bitmask because srcprefs have only 3 bits. */
- return srcprefs << 3;
+ return (srcprefs & IPV6_PREFER_SRC_MASK) << 3;
}
static inline unsigned int rt6_flags2srcprefs(int flags)
{
- return (flags >> 3) & 7;
+ return (flags >> 3) & IPV6_PREFER_SRC_MASK;
}
static inline bool rt6_need_strict(const struct in6_addr *daddr)
@@ -84,10 +83,6 @@ struct dst_entry *ip6_route_input_lookup(struct net *net,
struct flowi6 *fl6,
const struct sk_buff *skb, int flags);
-struct dst_entry *ip6_route_output_flags_noref(struct net *net,
- const struct sock *sk,
- struct flowi6 *fl6, int flags);
-
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);
@@ -104,7 +99,7 @@ static inline struct dst_entry *ip6_route_output(struct net *net,
static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags)
{
if (!(flags & RT6_LOOKUP_F_DST_NOREF) ||
- !list_empty(&rt->rt6i_uncached))
+ !list_empty(&rt->dst.rt_uncached))
ip6_rt_put(rt);
}
@@ -160,7 +155,7 @@ void fib6_force_start_gc(struct net *net);
struct fib6_info *addrconf_f6i_alloc(struct net *net, struct inet6_dev *idev,
const struct in6_addr *addr, bool anycast,
- gfp_t gfp_flags);
+ gfp_t gfp_flags, struct netlink_ext_ack *extack);
struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
int flags);
@@ -174,7 +169,9 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
struct net_device *dev);
struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
- struct net_device *dev, unsigned int pref);
+ struct net_device *dev, unsigned int pref,
+ u32 defrtr_usr_metric,
+ int lifetime);
void rt6_purge_dflt_routers(struct net *net);
@@ -262,25 +259,36 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
-static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
{
- struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ const struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
+ const struct dst_entry *dst = skb_dst(skb);
+ unsigned int mtu;
- return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+ if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) {
+ mtu = READ_ONCE(dst->dev->mtu);
+ mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
+ } else {
+ mtu = dst_mtu(dst);
+ }
+ return mtu;
}
static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
{
- return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE &&
- inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc);
+
+ return pmtudisc != IPV6_PMTUDISC_INTERFACE &&
+ pmtudisc != IPV6_PMTUDISC_OMIT;
}
static inline bool ip6_sk_ignore_df(const struct sock *sk)
{
- return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
- inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc);
+
+ return pmtudisc < IPV6_PMTUDISC_DO ||
+ pmtudisc == IPV6_PMTUDISC_OMIT;
}
static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt,
@@ -308,25 +316,27 @@ static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *
!lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws);
}
-static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
+static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ bool forwarding)
{
struct inet6_dev *idev;
unsigned int mtu;
- if (dst_metric_locked(dst, RTAX_MTU)) {
+ if (!forwarding || dst_metric_locked(dst, RTAX_MTU)) {
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
- return mtu;
+ goto out;
}
mtu = IPV6_MIN_MTU;
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (idev)
- mtu = idev->cnf.mtu6;
+ mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
- return mtu;
+out:
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
u32 ip6_mtu_from_fib6(const struct fib6_result *res,
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 028eaea1c854..74b369bddf49 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -46,6 +46,7 @@ struct __ip6_tnl_parm {
struct ip6_tnl {
struct ip6_tnl __rcu *next; /* next tunnel in list */
struct net_device *dev; /* virtual device associated with tunnel */
+ netdevice_tracker dev_tracker;
struct net *net; /* netns for packet i/o */
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
@@ -57,7 +58,7 @@ struct ip6_tnl {
/* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
- __u32 o_seqno; /* The last output seqno */
+ atomic_t o_seqno; /* The last output seqno */
int hlen; /* tun_hlen + encap_hlen */
int tun_hlen; /* Precalculated header length */
int encap_hlen; /* Encap header length (FOU,GUE) */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 2ec062aaa978..9b2f69ba5e49 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -17,6 +17,7 @@
#include <linux/rcupdate.h>
#include <net/fib_notifier.h>
#include <net/fib_rules.h>
+#include <net/inet_dscp.h>
#include <net/inetpeer.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
@@ -24,7 +25,7 @@
struct fib_config {
u8 fc_dst_len;
- u8 fc_tos;
+ dscp_t fc_dscp;
u8 fc_protocol;
u8 fc_scope;
u8 fc_type;
@@ -79,6 +80,7 @@ struct fnhe_hash_bucket {
struct fib_nh_common {
struct net_device *nhc_dev;
+ netdevice_tracker nhc_dev_tracker;
int nhc_oif;
unsigned char nhc_scope;
u8 nhc_family;
@@ -111,6 +113,7 @@ struct fib_nh {
int nh_saddr_genid;
#define fib_nh_family nh_common.nhc_family
#define fib_nh_dev nh_common.nhc_dev
+#define fib_nh_dev_tracker nh_common.nhc_dev_tracker
#define fib_nh_oif nh_common.nhc_oif
#define fib_nh_flags nh_common.nhc_flags
#define fib_nh_lws nh_common.nhc_lwtstate
@@ -133,7 +136,7 @@ struct fib_info {
struct hlist_node fib_lhash;
struct list_head nh_list;
struct net *fib_net;
- int fib_treeref;
+ refcount_t fib_treeref;
refcount_t fib_clntref;
unsigned int fib_flags;
unsigned char fib_dead;
@@ -151,9 +154,10 @@ struct fib_info {
int fib_nhs;
bool fib_nh_is_v6;
bool nh_updated;
+ bool pfsrc_removed;
struct nexthop *nh;
struct rcu_head rcu;
- struct fib_nh fib_nh[];
+ struct fib_nh fib_nh[] __counted_by(fib_nhs);
};
@@ -209,11 +213,12 @@ struct fib_rt_info {
u32 tb_id;
__be32 dst;
int dst_len;
- u8 tos;
+ dscp_t dscp;
u8 type;
u8 offload:1,
trap:1,
- unused:6;
+ offload_failed:1,
+ unused:5;
};
struct fib_entry_notifier_info {
@@ -221,7 +226,7 @@ struct fib_entry_notifier_info {
u32 dst;
int dst_len;
struct fib_info *fi;
- u8 tos;
+ dscp_t dscp;
u8 type;
u32 tb_id;
};
@@ -259,6 +264,7 @@ struct fib_dump_filter {
bool filter_set;
bool dump_routes;
bool dump_exceptions;
+ bool rtnl_held;
unsigned char protocol;
unsigned char rt_type;
unsigned int flags;
@@ -414,7 +420,10 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
if (!net->ipv4.fib_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl4->fl4_sport = flkeys->ports.src;
fl4->fl4_dport = flkeys->ports.dst;
fl4->flowi4_proto = flkeys->basic.ip_proto;
@@ -437,7 +446,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
#ifdef CONFIG_IP_ROUTE_CLASSID
static inline int fib_num_tclassid_users(struct net *net)
{
- return net->ipv4.fib_num_tclassid_users;
+ return atomic_read(&net->ipv4.fib_num_tclassid_users);
}
#else
static inline int fib_num_tclassid_users(struct net *net)
@@ -465,6 +474,49 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags);
void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig);
+/* Fields used for sysctl_fib_multipath_hash_fields.
+ * Common to IPv4 and IPv6.
+ *
+ * Add new fields at the end. This is user API.
+ */
+#define FIB_MULTIPATH_HASH_FIELD_SRC_IP BIT(0)
+#define FIB_MULTIPATH_HASH_FIELD_DST_IP BIT(1)
+#define FIB_MULTIPATH_HASH_FIELD_IP_PROTO BIT(2)
+#define FIB_MULTIPATH_HASH_FIELD_FLOWLABEL BIT(3)
+#define FIB_MULTIPATH_HASH_FIELD_SRC_PORT BIT(4)
+#define FIB_MULTIPATH_HASH_FIELD_DST_PORT BIT(5)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP BIT(6)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP BIT(7)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO BIT(8)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL BIT(9)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT BIT(10)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT BIT(11)
+
+#define FIB_MULTIPATH_HASH_FIELD_OUTER_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_IP_PROTO | \
+ FIB_MULTIPATH_HASH_FIELD_FLOWLABEL | \
+ FIB_MULTIPATH_HASH_FIELD_SRC_PORT | \
+ FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+
+#define FIB_MULTIPATH_HASH_FIELD_INNER_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
+
+#define FIB_MULTIPATH_HASH_FIELD_ALL_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_OUTER_MASK | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
+
+#define FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
const struct sk_buff *skb, struct flow_keys *flkeys);
@@ -553,5 +605,5 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
u8 rt_family, unsigned char *flags, bool skip_oif);
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
- int nh_weight, u8 rt_family);
+ int nh_weight, u8 rt_family, u32 nh_tclassid);
#endif /* _NET_FIB_H */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 02ccd32542d0..5cd64bb2104d 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -52,8 +52,17 @@ struct ip_tunnel_key {
u8 tos; /* TOS for IPv4, TC for IPv6 */
u8 ttl; /* TTL for IPv4, HL for IPv6 */
__be32 label; /* Flow Label for IPv6 */
+ u32 nhid;
__be16 tp_src;
__be16 tp_dst;
+ __u8 flow_flags;
+};
+
+struct ip_tunnel_encap {
+ u16 type;
+ u16 flags;
+ __be16 sport;
+ __be16 dport;
};
/* Flags for ip_tunnel_info mode. */
@@ -66,8 +75,15 @@ struct ip_tunnel_key {
GENMASK((sizeof_field(struct ip_tunnel_info, \
options_len) * BITS_PER_BYTE) - 1, 0)
+#define ip_tunnel_info_opts(info) \
+ _Generic(info, \
+ const struct ip_tunnel_info * : ((const void *)((info) + 1)),\
+ struct ip_tunnel_info * : ((void *)((info) + 1))\
+ )
+
struct ip_tunnel_info {
struct ip_tunnel_key key;
+ struct ip_tunnel_encap encap;
#ifdef CONFIG_DST_CACHE
struct dst_cache dst_cache;
#endif
@@ -85,13 +101,6 @@ struct ip_tunnel_6rd_parm {
};
#endif
-struct ip_tunnel_encap {
- u16 type;
- u16 flags;
- __be16 sport;
- __be16 dport;
-};
-
struct ip_tunnel_prl_entry {
struct ip_tunnel_prl_entry __rcu *next;
__be32 addr;
@@ -104,7 +113,10 @@ struct metadata_dst;
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
+
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
struct net *net; /* netns for packet i/o */
unsigned long err_time; /* Time when the last ICMP error
@@ -113,7 +125,7 @@ struct ip_tunnel {
/* These four fields used only by GRE */
u32 i_seqno; /* The last seen seqno */
- u32 o_seqno; /* The last output seqno */
+ atomic_t o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
/* These four fields used only by ERSPAN */
@@ -240,11 +252,19 @@ static inline __be32 tunnel_id_to_key32(__be64 tun_id)
static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
int proto,
__be32 daddr, __be32 saddr,
- __be32 key, __u8 tos, int oif,
- __u32 mark, __u32 tun_inner_hash)
+ __be32 key, __u8 tos,
+ struct net *net, int oif,
+ __u32 mark, __u32 tun_inner_hash,
+ __u8 flow_flags)
{
memset(fl4, 0, sizeof(*fl4));
- fl4->flowi4_oif = oif;
+
+ if (oif) {
+ fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
+ /* Legacy VRF/l3mdev use case */
+ fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
+ }
+
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->flowi4_tos = tos;
@@ -252,6 +272,7 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
fl4->fl4_gre_key = key;
fl4->flowi4_mark = mark;
fl4->flowi4_multipath_hash = tun_inner_hash;
+ fl4->flowi4_flags = flow_flags;
}
int ip_tunnel_init(struct net_device *dev);
@@ -263,24 +284,25 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
- struct rtnl_link_ops *ops);
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const u8 proto, int tunnel_hlen);
int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
-int ip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
-void ip_tunnel_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *tot);
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
int link, __be16 flags,
__be32 remote, __be32 local,
__be32 key);
+void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info);
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error);
@@ -290,6 +312,12 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
+bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
+ struct ip_tunnel_encap *encap);
+
+void ip_tunnel_netlink_parms(struct nlattr *data[],
+ struct ip_tunnel_parm *parms);
+
extern const struct header_ops ip_tunnel_header_ops;
__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
@@ -353,22 +381,23 @@ static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
return hlen;
}
-static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+static inline int ip_tunnel_encap(struct sk_buff *skb,
+ struct ip_tunnel_encap *e,
u8 *protocol, struct flowi4 *fl4)
{
const struct ip_tunnel_encap_ops *ops;
int ret = -EINVAL;
- if (t->encap.type == TUNNEL_ENCAP_NONE)
+ if (e->type == TUNNEL_ENCAP_NONE)
return 0;
- if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
+ if (e->type >= MAX_IPTUN_ENCAP_OPS)
return -EINVAL;
rcu_read_lock();
- ops = rcu_dereference(iptun_encaps[t->encap.type]);
+ ops = rcu_dereference(iptun_encaps[e->type]);
if (likely(ops && ops->build_header))
- ret = ops->build_header(skb, &t->encap, protocol, fl4);
+ ret = ops->build_header(skb, e, protocol, fl4);
rcu_read_unlock();
return ret;
@@ -378,20 +407,35 @@ static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
const struct sk_buff *skb)
{
- if (skb->protocol == htons(ETH_P_IP))
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IP))
return iph->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (payload_protocol == htons(ETH_P_IPV6))
return ipv6_get_dsfield((const struct ipv6hdr *)iph);
else
return 0;
}
+static inline __be32 ip_tunnel_get_flowlabel(const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IPV6))
+ return ip6_flowlabel((const struct ipv6hdr *)iph);
+ else
+ return 0;
+}
+
static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
const struct sk_buff *skb)
{
- if (skb->protocol == htons(ETH_P_IP))
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IP))
return iph->ttl;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (payload_protocol == htons(ETH_P_IPV6))
return ((const struct ipv6hdr *)iph)->hop_limit;
else
return 0;
@@ -447,25 +491,19 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += pkt_len;
- tstats->tx_packets++;
+ u64_stats_add(&tstats->tx_bytes, pkt_len);
+ u64_stats_inc(&tstats->tx_packets);
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
- } else {
- struct net_device_stats *err_stats = &dev->stats;
-
- if (pkt_len < 0) {
- err_stats->tx_errors++;
- err_stats->tx_aborted_errors++;
- } else {
- err_stats->tx_dropped++;
- }
+ return;
}
-}
-static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
-{
- return info + 1;
+ if (pkt_len < 0) {
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
+ } else {
+ DEV_STATS_INC(dev, tx_dropped);
+ }
}
static inline void ip_tunnel_info_opts_get(void *to,
@@ -478,9 +516,11 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len,
__be16 flags)
{
- memcpy(ip_tunnel_info_opts(info), from, len);
info->options_len = len;
- info->key.tun_flags |= flags;
+ if (len > 0) {
+ memcpy(ip_tunnel_info_opts(info), from, len);
+ info->key.tun_flags |= flags;
+ }
}
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
@@ -526,7 +566,6 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
__be16 flags)
{
info->options_len = 0;
- info->key.tun_flags |= flags;
}
#endif /* CONFIG_INET */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 9a59a33787cb..ff406ef4fd4a 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -25,13 +25,11 @@
#include <linux/ip.h>
#include <linux/ipv6.h> /* for struct ipv6hdr */
#include <net/ipv6.h>
-#if IS_ENABLED(CONFIG_IP_VS_IPV6)
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#endif
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
#include <net/net_namespace.h> /* Netw namespace */
+#include <linux/sched/isolation.h>
#define IP_VS_HDR_INVERSE 1
#define IP_VS_HDR_ICMP 2
@@ -45,6 +43,8 @@ static inline struct netns_ipvs *net_ipvs(struct net* net)
/* Connections' size value needed by ip_vs_ctl.c */
extern int ip_vs_conn_tab_size;
+extern struct mutex __ip_vs_mutex;
+
struct ip_vs_iphdr {
int hdr_flags; /* ipvs flags */
__u32 off; /* Where IP or IPv4 header starts */
@@ -265,26 +265,6 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
pr_err(msg, ##__VA_ARGS__); \
} while (0)
-#ifdef CONFIG_IP_VS_DEBUG
-#define EnterFunction(level) \
- do { \
- if (level <= ip_vs_get_debug_level()) \
- printk(KERN_DEBUG \
- pr_fmt("Enter: %s, %s line %i\n"), \
- __func__, __FILE__, __LINE__); \
- } while (0)
-#define LeaveFunction(level) \
- do { \
- if (level <= ip_vs_get_debug_level()) \
- printk(KERN_DEBUG \
- pr_fmt("Leave: %s, %s line %i\n"), \
- __func__, __FILE__, __LINE__); \
- } while (0)
-#else
-#define EnterFunction(level) do {} while (0)
-#define LeaveFunction(level) do {} while (0)
-#endif
-
/* The port number of FTP service (in network order). */
#define FTPPORT cpu_to_be16(21)
#define FTPDATA cpu_to_be16(20)
@@ -354,11 +334,11 @@ struct ip_vs_seq {
/* counters per cpu */
struct ip_vs_counters {
- __u64 conns; /* connections scheduled */
- __u64 inpkts; /* incoming packets */
- __u64 outpkts; /* outgoing packets */
- __u64 inbytes; /* incoming bytes */
- __u64 outbytes; /* outgoing bytes */
+ u64_stats_t conns; /* connections scheduled */
+ u64_stats_t inpkts; /* incoming packets */
+ u64_stats_t outpkts; /* outgoing packets */
+ u64_stats_t inbytes; /* incoming bytes */
+ u64_stats_t outbytes; /* outgoing bytes */
};
/* Stats per cpu */
struct ip_vs_cpu_stats {
@@ -366,9 +346,12 @@ struct ip_vs_cpu_stats {
struct u64_stats_sync syncp;
};
+/* Default nice for estimator kthreads */
+#define IPVS_EST_NICE 0
+
/* IPVS statistics objects */
struct ip_vs_estimator {
- struct list_head list;
+ struct hlist_node list;
u64 last_inbytes;
u64 last_outbytes;
@@ -381,6 +364,10 @@ struct ip_vs_estimator {
u64 outpps;
u64 inbps;
u64 outbps;
+
+ s32 ktid:16, /* kthread ID, -1=temp list */
+ ktrow:8, /* row/tick ID for kthread */
+ ktcid:8; /* chain ID for kthread tick */
};
/*
@@ -408,6 +395,77 @@ struct ip_vs_stats {
struct ip_vs_kstats kstats0; /* reset values */
};
+struct ip_vs_stats_rcu {
+ struct ip_vs_stats s;
+ struct rcu_head rcu_head;
+};
+
+int ip_vs_stats_init_alloc(struct ip_vs_stats *s);
+struct ip_vs_stats *ip_vs_stats_alloc(void);
+void ip_vs_stats_release(struct ip_vs_stats *stats);
+void ip_vs_stats_free(struct ip_vs_stats *stats);
+
+/* Process estimators in multiple timer ticks (20/50/100, see ktrow) */
+#define IPVS_EST_NTICKS 50
+/* Estimation uses a 2-second period containing ticks (in jiffies) */
+#define IPVS_EST_TICK ((2 * HZ) / IPVS_EST_NTICKS)
+
+/* Limit of CPU load per kthread (8 for 12.5%), ratio of CPU capacity (1/C).
+ * Value of 4 and above ensures kthreads will take work without exceeding
+ * the CPU capacity under different circumstances.
+ */
+#define IPVS_EST_LOAD_DIVISOR 8
+
+/* Kthreads should not have work that exceeds the CPU load above 50% */
+#define IPVS_EST_CPU_KTHREADS (IPVS_EST_LOAD_DIVISOR / 2)
+
+/* Desired number of chains per timer tick (chain load factor in 100us units),
+ * 48=4.8ms of 40ms tick (12% CPU usage):
+ * 2 sec * 1000 ms in sec * 10 (100us in ms) / 8 (12.5%) / 50
+ */
+#define IPVS_EST_CHAIN_FACTOR \
+ ALIGN_DOWN(2 * 1000 * 10 / IPVS_EST_LOAD_DIVISOR / IPVS_EST_NTICKS, 8)
+
+/* Compiled number of chains per tick
+ * The defines should match cond_resched_rcu
+ */
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
+#define IPVS_EST_TICK_CHAINS IPVS_EST_CHAIN_FACTOR
+#else
+#define IPVS_EST_TICK_CHAINS 1
+#endif
+
+#if IPVS_EST_NTICKS > 127
+#error Too many timer ticks for ktrow
+#endif
+
+/* Multiple chains processed in same tick */
+struct ip_vs_est_tick_data {
+ struct rcu_head rcu_head;
+ struct hlist_head chains[IPVS_EST_TICK_CHAINS];
+ DECLARE_BITMAP(present, IPVS_EST_TICK_CHAINS);
+ DECLARE_BITMAP(full, IPVS_EST_TICK_CHAINS);
+ int chain_len[IPVS_EST_TICK_CHAINS];
+};
+
+/* Context for estimation kthread */
+struct ip_vs_est_kt_data {
+ struct netns_ipvs *ipvs;
+ struct task_struct *task; /* task if running */
+ struct ip_vs_est_tick_data __rcu *ticks[IPVS_EST_NTICKS];
+ DECLARE_BITMAP(avail, IPVS_EST_NTICKS); /* tick has space for ests */
+ unsigned long est_timer; /* estimation timer (jiffies) */
+ struct ip_vs_stats *calc_stats; /* Used for calculation */
+ int tick_len[IPVS_EST_NTICKS]; /* est count */
+ int id; /* ktid per netns */
+ int chain_max; /* max ests per tick chain */
+ int tick_max; /* max ests per tick */
+ int est_count; /* attached ests to kthread */
+ int est_max_count; /* max ests per kthread */
+ int add_row; /* row for new ests */
+ int est_row; /* estimated row */
+};
+
struct dst_entry;
struct iphdr;
struct ip_vs_conn;
@@ -526,7 +584,7 @@ struct ip_vs_conn {
spinlock_t lock; /* lock for state transition */
volatile __u16 state; /* state info */
volatile __u16 old_state; /* old state, to be used for
- * state transition triggerd
+ * state transition triggered
* synchronization
*/
__u32 fwmark; /* Fire wall mark from skb */
@@ -552,8 +610,10 @@ struct ip_vs_conn {
*/
struct ip_vs_app *app; /* bound ip_vs_app object */
void *app_data; /* Application private data */
- struct ip_vs_seq in_seq; /* incoming seq. struct */
- struct ip_vs_seq out_seq; /* outgoing seq. struct */
+ struct_group(sync_conn_opt,
+ struct ip_vs_seq in_seq; /* incoming seq. struct */
+ struct ip_vs_seq out_seq; /* outgoing seq. struct */
+ );
const struct ip_vs_pe *pe;
char *pe_data;
@@ -575,7 +635,7 @@ struct ip_vs_service_user_kern {
u16 protocol;
union nf_inet_addr addr; /* virtual ip address */
__be16 port;
- u32 fwmark; /* firwall mark of service */
+ u32 fwmark; /* firewall mark of service */
/* virtual service options */
char *sched_name;
@@ -691,6 +751,7 @@ struct ip_vs_dest {
union nf_inet_addr vaddr; /* virtual IP address */
__u32 vfwmark; /* firewall mark of service */
+ struct rcu_head rcu_head;
struct list_head t_list; /* in dest_trash */
unsigned int in_rs_table:1; /* we are in rs_table */
};
@@ -872,7 +933,7 @@ struct netns_ipvs {
atomic_t conn_count; /* connection counter */
/* ip_vs_ctl */
- struct ip_vs_stats tot_stats; /* Statistics & est. */
+ struct ip_vs_stats_rcu *tot_stats; /* Statistics & est. */
int num_services; /* no of virtual services */
int num_services6; /* IPv6 virtual services */
@@ -934,6 +995,13 @@ struct netns_ipvs {
int sysctl_conn_reuse_mode;
int sysctl_schedule_icmp;
int sysctl_ignore_tunneled;
+ int sysctl_run_estimation;
+#ifdef CONFIG_SYSCTL
+ cpumask_var_t sysctl_est_cpulist; /* kthread cpumask */
+ int est_cpulist_valid; /* cpulist set */
+ int sysctl_est_nice; /* kthread nice */
+ int est_stopped; /* stop tasks */
+#endif
/* ip_vs_lblc */
int sysctl_lblc_expiration;
@@ -944,9 +1012,17 @@ struct netns_ipvs {
struct ctl_table_header *lblcr_ctl_header;
struct ctl_table *lblcr_ctl_table;
/* ip_vs_est */
- struct list_head est_list; /* estimator list */
- spinlock_t est_lock;
- struct timer_list est_timer; /* Estimation timer */
+ struct delayed_work est_reload_work;/* Reload kthread tasks */
+ struct mutex est_mutex; /* protect kthread tasks */
+ struct hlist_head est_temp_list; /* Ests during calc phase */
+ struct ip_vs_est_kt_data **est_kt_arr; /* Array of kthread data ptrs */
+ unsigned long est_max_threads;/* Hard limit of kthreads */
+ int est_calc_phase; /* Calculation phase */
+ int est_chain_max; /* Calculated chain_max */
+ int est_kt_count; /* Allocated ptrs */
+ int est_add_ktid; /* ktid where to add ests */
+ atomic_t est_genid; /* kthreads reload genid */
+ atomic_t est_genid_done; /* applied genid */
/* ip_vs_sync */
spinlock_t sync_lock;
struct ipvs_master_sync_state *ms;
@@ -960,7 +1036,7 @@ struct netns_ipvs {
struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */
/* net name space ptr */
struct net *net; /* Needed by timer routines */
- /* Number of heterogeneous destinations, needed becaus heterogeneous
+ /* Number of heterogeneous destinations, needed because heterogeneous
* are not supported when synchronization is enabled.
*/
unsigned int mixed_address_family_dests;
@@ -1074,6 +1150,24 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return ipvs->sysctl_cache_bypass;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_run_estimation;
+}
+
+static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
+{
+ if (ipvs->est_cpulist_valid)
+ return ipvs->sysctl_est_cpulist;
+ else
+ return housekeeping_cpumask(HK_TYPE_KTHREAD);
+}
+
+static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_est_nice;
+}
+
#else
static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1166,6 +1260,21 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return 0;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
+static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
+{
+ return housekeeping_cpumask(HK_TYPE_KTHREAD);
+}
+
+static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
+{
+ return IPVS_EST_NICE;
+}
+
#endif
/* IPVS core functions
@@ -1467,10 +1576,41 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state);
void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts);
/* IPVS rate estimator prototypes (from ip_vs_est.c) */
-void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
+int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
void ip_vs_zero_estimator(struct ip_vs_stats *stats);
void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats);
+void ip_vs_est_reload_start(struct netns_ipvs *ipvs);
+int ip_vs_est_kthread_start(struct netns_ipvs *ipvs,
+ struct ip_vs_est_kt_data *kd);
+void ip_vs_est_kthread_stop(struct ip_vs_est_kt_data *kd);
+
+static inline void ip_vs_est_stopped_recalc(struct netns_ipvs *ipvs)
+{
+#ifdef CONFIG_SYSCTL
+ /* Stop tasks while cpulist is empty or if disabled with flag */
+ ipvs->est_stopped = !sysctl_run_estimation(ipvs) ||
+ (ipvs->est_cpulist_valid &&
+ cpumask_empty(sysctl_est_cpulist(ipvs)));
+#endif
+}
+
+static inline bool ip_vs_est_stopped(struct netns_ipvs *ipvs)
+{
+#ifdef CONFIG_SYSCTL
+ return ipvs->est_stopped;
+#else
+ return false;
+#endif
+}
+
+static inline int ip_vs_est_max_threads(struct netns_ipvs *ipvs)
+{
+ unsigned int limit = IPVS_EST_CPU_KTHREADS *
+ cpumask_weight(sysctl_est_cpulist(ipvs));
+
+ return max(1U, limit);
+}
/* Various IPVS packet transmitters (from ip_vs_xmit.c) */
int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1715,4 +1855,15 @@ ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
atomic_read(&dest->inactconns);
}
+#ifdef CONFIG_IP_VS_PROTO_TCP
+INDIRECT_CALLABLE_DECLARE(int
+ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+INDIRECT_CALLABLE_DECLARE(int
+ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
#endif /* _NET_IP_VS_H */
diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h
index fee6fc451597..8660a2a6d1fc 100644
--- a/include/net/ipcomp.h
+++ b/include/net/ipcomp.h
@@ -2,11 +2,13 @@
#ifndef _NET_IPCOMP_H
#define _NET_IPCOMP_H
+#include <linux/skbuff.h>
#include <linux/types.h>
#define IPCOMP_SCRATCH_SIZE 65400
struct crypto_comp;
+struct ip_comp_hdr;
struct ipcomp_data {
u16 threshold;
@@ -20,7 +22,7 @@ struct xfrm_state;
int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb);
int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb);
void ipcomp_destroy(struct xfrm_state *x);
-int ipcomp_init_state(struct xfrm_state *x);
+int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb)
{
diff --git a/include/net/ipconfig.h b/include/net/ipconfig.h
index e3534299bd2a..8276897d0c2e 100644
--- a/include/net/ipconfig.h
+++ b/include/net/ipconfig.h
@@ -7,6 +7,8 @@
/* The following are initdata: */
+#include <linux/types.h>
+
extern int ic_proto_enabled; /* Protocols enabled (see IC_xxx) */
extern int ic_set_manually; /* IPconfig parameters set manually */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index bd1f396cc9c7..88a8e554f7a1 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -15,12 +15,14 @@
#include <linux/refcount.h>
#include <linux/jump_label_ratelimit.h>
#include <net/if_inet6.h>
-#include <net/ndisc.h>
#include <net/flow.h>
#include <net/flow_dissector.h>
+#include <net/inet_dscp.h>
#include <net/snmp.h>
#include <net/netns/hash.h>
+struct ip_tunnel_info;
+
#define SIN6_LEN_RFC2133 24
#define IPV6_MAXPLEN 65535
@@ -30,6 +32,7 @@
*/
#define NEXTHDR_HOP 0 /* Hop-by-hop option header. */
+#define NEXTHDR_IPV4 4 /* IPv4 in IPv6 */
#define NEXTHDR_TCP 6 /* TCP segment. */
#define NEXTHDR_UDP 17 /* UDP message. */
#define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
@@ -148,6 +151,17 @@ struct frag_hdr {
__be32 identification;
};
+/*
+ * Jumbo payload option, as described in RFC 2675 2.
+ */
+struct hop_jumbo_hdr {
+ u8 nexthdr;
+ u8 hdrlen;
+ u8 tlv_type; /* IPV6_TLV_JUMBO, 0xC2 */
+ u8 tlv_len; /* 4 */
+ __be32 jumbo_payload_len;
+};
+
#define IP6_MF 0x0001
#define IP6_OFFSET 0xFFF8
@@ -344,9 +358,9 @@ struct ipcm6_cookie {
struct sockcm_cookie sockc;
__s16 hlimit;
__s16 tclass;
+ __u16 gso_size;
__s8 dontfrag;
struct ipv6_txoptions *opt;
- __u16 gso_size;
};
static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
@@ -359,12 +373,12 @@ static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
}
static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
- const struct ipv6_pinfo *np)
+ const struct sock *sk)
{
*ipc6 = (struct ipcm6_cookie) {
.hlimit = -1,
- .tclass = np->tclass,
- .dontfrag = np->dontfrag,
+ .tclass = inet6_sk(sk)->tclass,
+ .dontfrag = inet6_test_bit(DONTFRAG, sk),
};
}
@@ -390,17 +404,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt)
kfree_rcu(opt, rcu);
}
+#if IS_ENABLED(CONFIG_IPV6)
struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
__be32 label)
{
- if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
+ if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
+ READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
return NULL;
}
+#endif
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
@@ -411,7 +428,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
int flags);
int ip6_flowlabel_init(void);
void ip6_flowlabel_cleanup(void);
-bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
+bool ip6_autoflowlabel(struct net *net, const struct sock *sk);
static inline void fl6_sock_release(struct ip6_flowlabel *fl)
{
@@ -419,7 +436,8 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
atomic_dec(&fl->users);
}
-void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
+ u8 code, __be32 info);
void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct icmp6hdr *thdr, int len);
@@ -434,21 +452,97 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *opt,
int newtype,
struct ipv6_opt_hdr *newopt);
-struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
- struct ipv6_txoptions *opt);
+struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space,
+ struct ipv6_txoptions *opt);
+
+static inline struct ipv6_txoptions *
+ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt)
+{
+ if (!opt)
+ return NULL;
+ return __ipv6_fixup_options(opt_space, opt);
+}
bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
const struct inet6_skb_parm *opt);
struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
struct ipv6_txoptions *opt);
-static inline bool ipv6_accept_ra(struct inet6_dev *idev)
+/* This helper is specialized for BIG TCP needs.
+ * It assumes the hop_jumbo_hdr will immediately follow the IPV6 header.
+ * It assumes headers are already in skb->head.
+ * Returns 0, or IPPROTO_TCP if a BIG TCP packet is there.
+ */
+static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb)
+{
+ const struct hop_jumbo_hdr *jhdr;
+ const struct ipv6hdr *nhdr;
+
+ if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
+ return 0;
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ return 0;
+
+ if (skb_network_offset(skb) +
+ sizeof(struct ipv6hdr) +
+ sizeof(struct hop_jumbo_hdr) > skb_headlen(skb))
+ return 0;
+
+ nhdr = ipv6_hdr(skb);
+
+ if (nhdr->nexthdr != NEXTHDR_HOP)
+ return 0;
+
+ jhdr = (const struct hop_jumbo_hdr *) (nhdr + 1);
+ if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
+ jhdr->nexthdr != IPPROTO_TCP)
+ return 0;
+ return jhdr->nexthdr;
+}
+
+/* Return 0 if HBH header is successfully removed
+ * Or if HBH removal is unnecessary (packet is not big TCP)
+ * Return error to indicate dropping the packet
+ */
+static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb)
{
+ const int hophdr_len = sizeof(struct hop_jumbo_hdr);
+ int nexthdr = ipv6_has_hopopt_jumbo(skb);
+ struct ipv6hdr *h6;
+
+ if (!nexthdr)
+ return 0;
+
+ if (skb_cow_head(skb, 0))
+ return -1;
+
+ /* Remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][L4 Header]
+ */
+ memmove(skb_mac_header(skb) + hophdr_len, skb_mac_header(skb),
+ skb_network_header(skb) - skb_mac_header(skb) +
+ sizeof(struct ipv6hdr));
+
+ __skb_pull(skb, hophdr_len);
+ skb->network_header += hophdr_len;
+ skb->mac_header += hophdr_len;
+
+ h6 = ipv6_hdr(skb);
+ h6->nexthdr = nexthdr;
+
+ return 0;
+}
+
+static inline bool ipv6_accept_ra(const struct inet6_dev *idev)
+{
+ s32 accept_ra = READ_ONCE(idev->cnf.accept_ra);
+
/* If forwarding is enabled, RA are not accepted unless the special
* hybrid mode (accept_ra=2) is enabled.
*/
- return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
- idev->cnf.accept_ra;
+ return READ_ONCE(idev->cnf.forwarding) ? accept_ra == 2 :
+ accept_ra;
}
#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
@@ -660,12 +754,8 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
/* more secured version of ipv6_addr_hash() */
static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
{
- u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
-
- return jhash_3words(v,
- (__force u32)a->s6_addr32[2],
- (__force u32)a->s6_addr32[3],
- initval);
+ return jhash2((__force const u32 *)a->s6_addr32,
+ ARRAY_SIZE(a->s6_addr32), initval);
}
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
@@ -821,9 +911,9 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
int hlimit;
if (ipv6_addr_is_multicast(&fl6->daddr))
- hlimit = np->mcast_hops;
+ hlimit = READ_ONCE(np->mcast_hops);
else
- hlimit = np->hop_limit;
+ hlimit = READ_ONCE(np->hop_limit);
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
return hlimit;
@@ -839,7 +929,7 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
offsetof(typeof(flow->addrs), v6addrs.src) +
sizeof(flow->addrs.v6addrs.src));
- memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+ memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
@@ -849,7 +939,8 @@ static inline bool ipv6_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return net->ipv6.sysctl.ip_nonlocal_bind ||
- inet->freebind || inet->transparent;
+ test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) ||
+ test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags);
}
/* Sysctl settings for net ipv6.auto_flowlabels */
@@ -925,11 +1016,19 @@ static inline int ip6_multipath_hash_policy(const struct net *net)
{
return net->ipv6.sysctl.multipath_hash_policy;
}
+static inline u32 ip6_multipath_hash_fields(const struct net *net)
+{
+ return net->ipv6.sysctl.multipath_hash_fields;
+}
#else
static inline int ip6_multipath_hash_policy(const struct net *net)
{
return 0;
}
+static inline u32 ip6_multipath_hash_fields(const struct net *net)
+{
+ return 0;
+}
#endif
/*
@@ -956,6 +1055,11 @@ static inline u8 ip6_tclass(__be32 flowinfo)
return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
}
+static inline dscp_t ip6_dscp(__be32 flowinfo)
+{
+ return inet_dsfield_to_dscp(ip6_tclass(flowinfo));
+}
+
static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
{
return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
@@ -992,7 +1096,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
int ip6_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
- void *from, int length, int transhdrlen,
+ void *from, size_t length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags);
@@ -1008,8 +1112,8 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
struct sk_buff *ip6_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
- void *from, int length, int transhdrlen,
- struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+ void *from, size_t length, int transhdrlen,
+ struct ipcm6_cookie *ipc6,
struct rt6_info *rt, unsigned int flags,
struct inet_cork_full *cork);
@@ -1026,12 +1130,6 @@ struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, st
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,
bool connected);
-struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
- struct net_device *dev,
- struct net *net, struct socket *sock,
- struct in6_addr *saddr,
- const struct ip_tunnel_info *info,
- u8 protocol, bool use_cache);
struct dst_entry *ip6_blackhole_route(struct net *net,
struct dst_entry *orig_dst);
@@ -1083,9 +1181,14 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
/*
* socket options (ipv6_sockglue.c)
*/
+DECLARE_STATIC_KEY_FALSE(ip6_min_hopcount);
+int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
+int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
int ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
@@ -1106,8 +1209,11 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+void inet6_cleanup_sock(struct sock *sk);
+void inet6_sock_destruct(struct sock *sk);
int inet6_release(struct socket *sock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
@@ -1135,7 +1241,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
struct sockaddr_storage *list);
int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
- struct sockaddr_storage __user *p);
+ sockptr_t optval, size_t ss_offset);
#ifdef CONFIG_PROC_FS
int ac6_proc_init(struct net *net);
@@ -1162,7 +1268,9 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
#ifdef CONFIG_SYSCTL
struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+size_t ipv6_icmp_sysctl_table_size(void);
struct ctl_table *ipv6_route_sysctl_init(struct net *net);
+size_t ipv6_route_sysctl_table_size(struct net *net);
int ipv6_sysctl_register(void);
void ipv6_sysctl_unregister(void);
#endif
@@ -1186,15 +1294,16 @@ static inline int ip6_sock_set_v6only(struct sock *sk)
static inline void ip6_sock_set_recverr(struct sock *sk)
{
- lock_sock(sk);
- inet6_sk(sk)->recverr = true;
- release_sock(sk);
+ inet6_set_bit(RECVERR6, sk);
}
-static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
+#define IPV6_PREFER_SRC_MASK (IPV6_PREFER_SRC_TMP | IPV6_PREFER_SRC_PUBLIC | \
+ IPV6_PREFER_SRC_COA)
+
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
+ unsigned int prefmask = ~IPV6_PREFER_SRC_MASK;
unsigned int pref = 0;
- unsigned int prefmask = ~0;
/* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
switch (val & (IPV6_PREFER_SRC_PUBLIC |
@@ -1244,20 +1353,11 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
return -EINVAL;
}
- inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref;
+ WRITE_ONCE(inet6_sk(sk)->srcprefs,
+ (READ_ONCE(inet6_sk(sk)->srcprefs) & prefmask) | pref);
return 0;
}
-static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
-{
- int ret;
-
- lock_sock(sk);
- ret = __ip6_sock_set_addr_preferences(sk, val);
- release_sock(sk);
- return ret;
-}
-
static inline void ip6_sock_set_recvpktinfo(struct sock *sk)
{
lock_sock(sk);
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index a21e8b1381a1..7321ffe3a108 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IPV6_FRAG_H
#define _IPV6_FRAG_H
+#include <linux/icmpv6.h>
#include <linux/kernel.h>
#include <net/addrconf.h>
#include <net/ipv6.h>
@@ -67,13 +68,15 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
struct sk_buff *head;
rcu_read_lock();
- if (fq->q.fqdir->dead)
+ /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
+ if (READ_ONCE(fq->q.fqdir->dead))
goto out_rcu_unlock;
spin_lock(&fq->q.lock);
if (fq->q.flags & INET_FRAG_COMPLETE)
goto out;
+ fq->q.flags |= INET_FRAG_DROP;
inet_frag_kill(&fq->q);
dev = dev_get_by_index_rcu(net, fq->iif);
@@ -99,7 +102,7 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
spin_unlock(&fq->q.lock);
icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
- kfree_skb(head);
+ kfree_skb_reason(head, SKB_DROP_REASON_FRAG_REASM_TIMEOUT);
goto out_rcu_unlock;
out:
@@ -108,5 +111,35 @@ out_rcu_unlock:
rcu_read_unlock();
inet_frag_put(&fq->q);
}
+
+/* Check if the upper layer header is truncated in the first fragment. */
+static inline bool
+ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp)
+{
+ u8 nexthdr = *nexthdrp;
+ __be16 frag_off;
+ int offset;
+
+ offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off);
+ if (offset < 0 || (frag_off & htons(IP6_OFFSET)))
+ return false;
+ switch (nexthdr) {
+ case NEXTHDR_TCP:
+ offset += sizeof(struct tcphdr);
+ break;
+ case NEXTHDR_UDP:
+ offset += sizeof(struct udphdr);
+ break;
+ case NEXTHDR_ICMP:
+ offset += sizeof(struct icmp6hdr);
+ break;
+ default:
+ offset += 1;
+ }
+ if (offset > skb->len)
+ return true;
+ return false;
+}
+
#endif
#endif
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index d7a7f7c81e7b..485c39a89866 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -47,6 +47,7 @@ struct ipv6_stub {
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
+ void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
@@ -59,10 +60,18 @@ struct ipv6_stub {
#if IS_ENABLED(CONFIG_XFRM)
void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu);
int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ struct sk_buff *(*xfrm6_gro_udp_encap_rcv)(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb);
int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
#endif
struct neigh_table *nd_tbl;
+
+ int (*ipv6_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
+ struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev);
};
extern const struct ipv6_stub *ipv6_stub __read_mostly;
@@ -75,6 +84,15 @@ struct ipv6_bpf_stub {
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
struct sk_buff *skb);
+ int (*ipv6_setsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+ int (*ipv6_getsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
+ int (*ipv6_dev_get_saddr)(struct net *net,
+ const struct net_device *dst_dev,
+ const struct in6_addr *daddr,
+ unsigned int prefs,
+ struct in6_addr *saddr);
};
extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
diff --git a/include/net/ipx.h b/include/net/ipx.h
deleted file mode 100644
index 9d1342807b59..000000000000
--- a/include/net/ipx.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_INET_IPX_H_
-#define _NET_INET_IPX_H_
-/*
- * The following information is in its entirety obtained from:
- *
- * Novell 'IPX Router Specification' Version 1.10
- * Part No. 107-000029-001
- *
- * Which is available from ftp.novell.com
- */
-
-#include <linux/netdevice.h>
-#include <net/datalink.h>
-#include <linux/ipx.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/refcount.h>
-
-struct ipx_address {
- __be32 net;
- __u8 node[IPX_NODE_LEN];
- __be16 sock;
-};
-
-#define ipx_broadcast_node "\377\377\377\377\377\377"
-#define ipx_this_node "\0\0\0\0\0\0"
-
-#define IPX_MAX_PPROP_HOPS 8
-
-struct ipxhdr {
- __be16 ipx_checksum __packed;
-#define IPX_NO_CHECKSUM cpu_to_be16(0xFFFF)
- __be16 ipx_pktsize __packed;
- __u8 ipx_tctrl;
- __u8 ipx_type;
-#define IPX_TYPE_UNKNOWN 0x00
-#define IPX_TYPE_RIP 0x01 /* may also be 0 */
-#define IPX_TYPE_SAP 0x04 /* may also be 0 */
-#define IPX_TYPE_SPX 0x05 /* SPX protocol */
-#define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */
-#define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast */
- struct ipx_address ipx_dest __packed;
- struct ipx_address ipx_source __packed;
-};
-
-/* From af_ipx.c */
-extern int sysctl_ipx_pprop_broadcasting;
-
-struct ipx_interface {
- /* IPX address */
- __be32 if_netnum;
- unsigned char if_node[IPX_NODE_LEN];
- refcount_t refcnt;
-
- /* physical device info */
- struct net_device *if_dev;
- struct datalink_proto *if_dlink;
- __be16 if_dlink_type;
-
- /* socket support */
- unsigned short if_sknum;
- struct hlist_head if_sklist;
- spinlock_t if_sklist_lock;
-
- /* administrative overhead */
- int if_ipx_offset;
- unsigned char if_internal;
- unsigned char if_primary;
-
- struct list_head node; /* node in ipx_interfaces list */
-};
-
-struct ipx_route {
- __be32 ir_net;
- struct ipx_interface *ir_intrfc;
- unsigned char ir_routed;
- unsigned char ir_router_node[IPX_NODE_LEN];
- struct list_head node; /* node in ipx_routes list */
- refcount_t refcnt;
-};
-
-struct ipx_cb {
- u8 ipx_tctrl;
- __be32 ipx_dest_net;
- __be32 ipx_source_net;
- struct {
- __be32 netnum;
- int index;
- } last_hop;
-};
-
-#include <net/sock.h>
-
-struct ipx_sock {
- /* struct sock has to be the first member of ipx_sock */
- struct sock sk;
- struct ipx_address dest_addr;
- struct ipx_interface *intrfc;
- __be16 port;
-#ifdef CONFIG_IPX_INTERN
- unsigned char node[IPX_NODE_LEN];
-#endif
- unsigned short type;
- /*
- * To handle special ncp connection-handling sockets for mars_nwe,
- * the connection number must be stored in the socket.
- */
- unsigned short ipx_ncp_conn;
-};
-
-static inline struct ipx_sock *ipx_sk(struct sock *sk)
-{
- return (struct ipx_sock *)sk;
-}
-
-#define IPX_SKB_CB(__skb) ((struct ipx_cb *)&((__skb)->cb[0]))
-
-#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
-#define IPX_MAX_EPHEMERAL_SOCKET 0x7fff
-
-extern struct list_head ipx_routes;
-extern rwlock_t ipx_routes_lock;
-
-extern struct list_head ipx_interfaces;
-struct ipx_interface *ipx_interfaces_head(void);
-extern spinlock_t ipx_interfaces_lock;
-
-extern struct ipx_interface *ipx_primary_net;
-
-int ipx_proc_init(void);
-void ipx_proc_exit(void);
-
-const char *ipx_frame_name(__be16);
-const char *ipx_device_name(struct ipx_interface *intrfc);
-
-static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
-{
- refcount_inc(&intrfc->refcnt);
-}
-
-void ipxitf_down(struct ipx_interface *intrfc);
-struct ipx_interface *ipxitf_find_using_net(__be32 net);
-int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node);
-__be16 ipx_cksum(struct ipxhdr *packet, int length);
-int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
- unsigned char *node);
-void ipxrtr_del_routes(struct ipx_interface *intrfc);
-int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
- struct msghdr *msg, size_t len, int noblock);
-int ipxrtr_route_skb(struct sk_buff *skb);
-struct ipx_route *ipxrtr_lookup(__be32 net);
-int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
-
-static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
-{
- if (refcount_dec_and_test(&intrfc->refcnt))
- ipxitf_down(intrfc);
-}
-
-static __inline__ void ipxrtr_hold(struct ipx_route *rt)
-{
- refcount_inc(&rt->refcnt);
-}
-
-static __inline__ void ipxrtr_put(struct ipx_route *rt)
-{
- if (refcount_dec_and_test(&rt->refcnt))
- kfree(rt);
-}
-#endif /* _NET_INET_IPX_H_ */
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 9259ce2b22f3..df85d19fbf84 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -112,10 +112,12 @@ enum iucv_tx_notify {
struct iucv_sock {
struct sock sk;
- char src_user_id[8];
- char src_name[8];
- char dst_user_id[8];
- char dst_name[8];
+ struct_group(init,
+ char src_user_id[8];
+ char src_name[8];
+ char dst_user_id[8];
+ char dst_name[8];
+ );
struct list_head accept_q;
spinlock_t accept_q_lock;
struct sock *parent;
@@ -128,11 +130,12 @@ struct iucv_sock {
u8 flags;
u16 msglimit;
u16 msglimit_peer;
+ atomic_t skbs_in_xmit;
atomic_t msg_sent;
atomic_t msg_recv;
atomic_t pendings;
int transport;
- void (*sk_txnotify)(struct sk_buff *skb,
+ void (*sk_txnotify)(struct sock *sk,
enum iucv_tx_notify n);
};
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index f9e88401d7da..5cd7871127c9 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/slab.h>
+#include <asm/dma-types.h>
#include <asm/debug.h>
/*
@@ -76,11 +77,11 @@
* and iucv_message_reply if IUCV_IPBUFLST or IUCV_IPANSLST are used.
*/
struct iucv_array {
- u32 address;
+ dma32_t address;
u32 length;
} __attribute__ ((aligned (8)));
-extern struct bus_type iucv_bus;
+extern const struct bus_type iucv_bus;
extern struct device *iucv_root;
/*
@@ -489,7 +490,7 @@ struct iucv_interface {
int (*path_sever)(struct iucv_path *path, u8 userdata[16]);
int (*iucv_register)(struct iucv_handler *handler, int smp);
void (*iucv_unregister)(struct iucv_handler *handler, int smp);
- struct bus_type *bus;
+ const struct bus_type *bus;
struct device *root;
};
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index d2ea5863eedc..b2cf243ebe44 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -426,17 +426,10 @@ struct iw_public_data {
/**************************** PROTOTYPES ****************************/
/*
- * Functions part of the Wireless Extensions (defined in net/core/wireless.c).
- * Those may be called only within the kernel.
+ * Functions part of the Wireless Extensions (defined in net/wireless/wext-core.c).
+ * Those may be called by driver modules.
*/
-/* First : function strictly used inside the kernel */
-
-/* Handle /proc/net/wireless, called in net/code/dev.c */
-int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
-
-/* Second : functions that may be called by driver modules */
-
/* Send a single event to user space */
void wireless_send_event(struct net_device *dev, unsigned int cmd,
union iwreq_data *wrqu, const char *extra);
diff --git a/include/net/kcm.h b/include/net/kcm.h
index 2d704f8f4905..90279e5e09a5 100644
--- a/include/net/kcm.h
+++ b/include/net/kcm.h
@@ -47,9 +47,9 @@ struct kcm_stats {
struct kcm_tx_msg {
unsigned int sent;
- unsigned int fragidx;
unsigned int frag_offset;
unsigned int msg_flags;
+ bool started_tx;
struct sk_buff *frag_skb;
struct sk_buff *last_skb;
};
diff --git a/include/net/lapb.h b/include/net/lapb.h
index ccc3d1f020b0..124ee122f2c8 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -92,6 +92,7 @@ struct lapb_cb {
unsigned short n2, n2count;
unsigned short t1, t2;
struct timer_list t1timer, t2timer;
+ bool t1timer_running, t2timer_running;
/* Internal control information */
struct sk_buff_head write_queue;
@@ -103,6 +104,7 @@ struct lapb_cb {
struct lapb_frame frmr_data;
unsigned char frmr_type;
+ spinlock_t lock;
refcount_t refcnt;
};
diff --git a/include/net/llc.h b/include/net/llc.h
index df282d9b4017..e250dca03963 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -72,7 +72,9 @@ struct llc_sap {
static inline
struct hlist_head *llc_sk_dev_hash(struct llc_sap *sap, int ifindex)
{
- return &sap->sk_dev_hash[ifindex % LLC_SK_DEV_HASH_ENTRIES];
+ u32 bucket = hash_32(ifindex, LLC_SK_DEV_HASH_BITS);
+
+ return &sap->sk_dev_hash[bucket];
}
static inline
@@ -133,7 +135,7 @@ static inline void llc_sap_put(struct llc_sap *sap)
struct llc_sap *llc_sap_find(unsigned char sap_value);
int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
- unsigned char *dmac, unsigned char dsap);
+ const unsigned char *dmac, unsigned char dsap);
void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
index e766300b3e99..7620a9196922 100644
--- a/include/net/llc_c_ac.h
+++ b/include/net/llc_c_ac.h
@@ -16,6 +16,13 @@
* Connection state transition actions
* (Fb = F bit; Pb = P bit; Xb = X bit)
*/
+
+#include <linux/types.h>
+
+struct sk_buff;
+struct sock;
+struct timer_list;
+
#define LLC_CONN_AC_CLR_REMOTE_BUSY 1
#define LLC_CONN_AC_CONN_IND 2
#define LLC_CONN_AC_CONN_CONFIRM 3
@@ -168,7 +175,6 @@ int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
-int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
void llc_conn_busy_tmr_cb(struct timer_list *t);
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
index 3948cf111dd0..241889955157 100644
--- a/include/net/llc_c_ev.h
+++ b/include/net/llc_c_ev.h
@@ -158,7 +158,6 @@ int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
-int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
/* NOT_USED functions and their variations */
int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
index 48f3f891b2f9..53823d61d8b6 100644
--- a/include/net/llc_c_st.h
+++ b/include/net/llc_c_st.h
@@ -11,6 +11,10 @@
*
* See the GNU General Public License for more details.
*/
+
+#include <net/llc_c_ac.h>
+#include <net/llc_c_ev.h>
+
/* Connection component state management */
/* connection states */
#define LLC_CONN_OUT_OF_SVC 0 /* prior to allocation */
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index ea985aa7a6c5..374411b3066c 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -38,6 +38,7 @@ struct llc_sock {
struct llc_addr laddr; /* lsap/mac pair */
struct llc_addr daddr; /* dsap/mac pair */
struct net_device *dev; /* device to send to remote */
+ netdevice_tracker dev_tracker;
u32 copied_seq; /* head of yet unread data */
u8 retry_count; /* number of retries */
u8 ack_must_be_send;
@@ -110,7 +111,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
- struct llc_addr *laddr);
+ struct llc_addr *laddr, const struct net *net);
void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index 8d5c543cd620..c72570a21a4f 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,7 +62,8 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */
-int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac,
+ u8 dsap);
int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
int llc_send_disc(struct sock *sk);
#endif /* LLC_IF_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index c0f0a13ed818..1d55ba7c45be 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -15,9 +15,11 @@
#include <linux/if_ether.h>
/* Lengths of frame formats */
-#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
-#define LLC_PDU_LEN_S 4
-#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
+#define LLC_PDU_LEN_S 4
+#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+/* header and 1 control byte and XID info */
+#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
/* Known SAP addresses */
#define LLC_GLOBAL_SAP 0xFF
#define LLC_NULL_SAP 0x00 /* not network-layer visible */
@@ -50,9 +52,10 @@
#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
#define LLC_PDU_TYPE_MASK 0x03
-#define LLC_PDU_TYPE_I 0 /* first bit */
-#define LLC_PDU_TYPE_S 1 /* first two bits */
-#define LLC_PDU_TYPE_U 3 /* first two bits */
+#define LLC_PDU_TYPE_I 0 /* first bit */
+#define LLC_PDU_TYPE_S 1 /* first two bits */
+#define LLC_PDU_TYPE_U 3 /* first two bits */
+#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
#define LLC_PDU_TYPE_IS_I(pdu) \
((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
@@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
u8 ssap, u8 dsap, u8 cr)
{
- const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
+ int hlen = 4; /* default value for I and S types */
struct llc_pdu_un *pdu;
+ switch (type) {
+ case LLC_PDU_TYPE_U:
+ hlen = 3;
+ break;
+ case LLC_PDU_TYPE_U_XID:
+ hlen = 6;
+ break;
+ }
+
skb_push(skb, hlen);
skb_reset_network_header(skb);
pdu = llc_pdu_un_hdr(skb);
@@ -250,21 +262,19 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
*/
static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
{
- if (skb->protocol == htons(ETH_P_802_2))
- memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+ memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
}
/**
* llc_pdu_decode_da - extracts dest address of input frame
* @skb: input skb that destination address must be extracted from it
- * @sa: pointer to destination address (6 byte array).
+ * @da: pointer to destination address (6 byte array).
*
* This function extracts destination address(MAC) of input frame.
*/
static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
{
- if (skb->protocol == htons(ETH_P_802_2))
- memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+ memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
}
/**
@@ -309,7 +319,7 @@ static inline void llc_pdu_init_as_ui_cmd(struct sk_buff *skb)
/**
* llc_pdu_init_as_test_cmd - sets PDU as TEST
- * @skb - Address of the skb to build
+ * @skb: Address of the skb to build
*
* Sets a PDU as TEST
*/
@@ -357,6 +367,8 @@ struct llc_xid_info {
/**
* llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
* @skb: input skb that header must be set into it.
+ * @svcs_supported: The class of the LLC (I or II)
+ * @rx_window: The size of the receive window of the LLC
*
* This function sets third,fourth,fifth and sixth bytes of LLC header as
* a XID PDU.
@@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
xid_info->type = svcs_supported;
xid_info->rw = rx_window << 1; /* size of receive window */
- skb_put(skb, sizeof(struct llc_xid_info));
+
+ /* no need to push/put since llc_pdu_header_init() has already
+ * pushed 3 + 3 bytes
+ */
}
/**
diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
index a61b98c108ee..f71790305bc9 100644
--- a/include/net/llc_s_ac.h
+++ b/include/net/llc_s_ac.h
@@ -11,6 +11,10 @@
*
* See the GNU General Public License for more details.
*/
+
+struct llc_sap;
+struct sk_buff;
+
/* SAP component actions */
#define SAP_ACT_UNITDATA_IND 1
#define SAP_ACT_SEND_UI 2
diff --git a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h
index 84db3a59ed28..fb7df1d70af3 100644
--- a/include/net/llc_s_ev.h
+++ b/include/net/llc_s_ev.h
@@ -13,6 +13,7 @@
*/
#include <linux/skbuff.h>
+#include <net/llc.h>
/* Defines SAP component events */
/* Types of events (possible values in 'ev->type') */
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
index c4359e203013..ed5b2fa40d32 100644
--- a/include/net/llc_s_st.h
+++ b/include/net/llc_s_st.h
@@ -12,6 +12,12 @@
* See the GNU General Public License for more details.
*/
+#include <linux/types.h>
+#include <net/llc_s_ac.h>
+#include <net/llc_s_ev.h>
+
+struct llc_sap_state_trans;
+
#define LLC_NR_SAP_STATES 2 /* size of state table */
/* structures and types */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 05cfd6ff6528..53bd2d02a4f0 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -16,9 +16,12 @@
#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
#define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2)
+/* LWTUNNEL_XMIT_CONTINUE should be distinguishable from dst_output return
+ * values (NET_XMIT_xxx and NETDEV_TX_xxx in linux/netdevice.h) for safety.
+ */
enum {
LWTUNNEL_XMIT_DONE,
- LWTUNNEL_XMIT_CONTINUE,
+ LWTUNNEL_XMIT_CONTINUE = 0x100,
};
@@ -51,6 +54,9 @@ struct lwtunnel_encap_ops {
};
#ifdef CONFIG_LWTUNNEL
+
+DECLARE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled);
+
void lwtstate_free(struct lwtunnel_state *lws);
static inline struct lwtunnel_state *
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 66e2bfd165e8..353488ab94a2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#ifndef MAC80211_H
@@ -18,6 +18,7 @@
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
+#include <linux/lockdep.h>
#include <net/cfg80211.h>
#include <net/codel.h>
#include <net/ieee80211_radiotap.h>
@@ -78,7 +79,7 @@
* helpers for sanity checking. Drivers must ensure all work added onto the
* mac80211 workqueue should be cancelled on the driver stop() callback.
*
- * mac80211 will flushed the workqueue upon interface removal and during
+ * mac80211 will flush the workqueue upon interface removal and during
* suspend.
*
* All work performed on the mac80211 workqueue must not acquire the RTNL lock.
@@ -88,15 +89,13 @@
/**
* DOC: mac80211 software tx queueing
*
- * mac80211 provides an optional intermediate queueing implementation designed
- * to allow the driver to keep hardware queues short and provide some fairness
- * between different stations/interfaces.
- * In this model, the driver pulls data frames from the mac80211 queue instead
- * of letting mac80211 push them via drv_tx().
- * Other frames (e.g. control or management) are still pushed using drv_tx().
+ * mac80211 uses an intermediate queueing implementation, designed to allow the
+ * driver to keep hardware queues short and to provide some fairness between
+ * different stations/interfaces.
*
- * Drivers indicate that they use this model by implementing the .wake_tx_queue
- * driver operation.
+ * Drivers must provide the .wake_tx_queue driver operation by either
+ * linking it to ieee80211_handle_wake_tx_queue() or implementing a custom
+ * handler.
*
* Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with
* another per-sta for non-data/non-mgmt and bufferable management frames, and
@@ -105,9 +104,12 @@
* The driver is expected to initialize its private per-queue data for stations
* and interfaces in the .add_interface and .sta_add ops.
*
- * The driver can't access the queue directly. To dequeue a frame from a
- * txq, it calls ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a
- * queue, it calls the .wake_tx_queue driver op.
+ * The driver can't access the internal TX queues (iTXQs) directly.
+ * Whenever mac80211 adds a new frame to a queue, it calls the .wake_tx_queue
+ * driver op.
+ * Drivers implementing a custom .wake_tx_queue op can get them by calling
+ * ieee80211_tx_dequeue(). Drivers using ieee80211_handle_wake_tx_queue() will
+ * simply get the individual frames pushed via the .tx driver operation.
*
* Drivers can optionally delegate responsibility for scheduling queues to
* mac80211, to take advantage of airtime fairness accounting. In this case, to
@@ -125,6 +127,22 @@
* via the usual ieee80211_tx_dequeue).
*/
+/**
+ * DOC: HW timestamping
+ *
+ * Timing Measurement and Fine Timing Measurement require accurate timestamps
+ * of the action frames TX/RX and their respective acks.
+ *
+ * To report hardware timestamps for Timing Measurement or Fine Timing
+ * Measurement frame RX, the low level driver should set the SKB's hwtstamp
+ * field to the frame RX timestamp and report the ack TX timestamp in the
+ * ieee80211_rx_status struct.
+ *
+ * Similarly, to report hardware timestamps for Timing Measurement or Fine
+ * Timing Measurement frame TX, the driver should set the SKB's hwtstamp field
+ * to the frame TX timestamp and report the ack RX timestamp in the
+ * ieee80211_tx_status struct.
+ */
struct device;
/**
@@ -196,6 +214,10 @@ struct ieee80211_low_level_stats {
* @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
* this is used only with channel switching with CSA
* @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_AP: The AP channel definition changed, so (wider
+ * bandwidth) OFDMA settings need to be changed
+ * @IEEE80211_CHANCTX_CHANGE_PUNCTURING: The punctured channel(s) bitmap
+ * was changed.
*/
enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0),
@@ -203,6 +225,19 @@ enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4),
+ IEEE80211_CHANCTX_CHANGE_AP = BIT(5),
+ IEEE80211_CHANCTX_CHANGE_PUNCTURING = BIT(6),
+};
+
+/**
+ * struct ieee80211_chan_req - A channel "request"
+ * @oper: channel definition to use for operation
+ * @ap: the channel definition of the AP, if any
+ * (otherwise the chan member is %NULL)
+ */
+struct ieee80211_chan_req {
+ struct cfg80211_chan_def oper;
+ struct cfg80211_chan_def ap;
};
/**
@@ -213,6 +248,8 @@ enum ieee80211_chanctx_change {
*
* @def: the channel definition
* @min_def: the minimum channel definition currently required.
+ * @ap: the channel definition the AP actually is operating as,
+ * for use with (wider bandwidth) OFDMA
* @rx_chains_static: The number of RX chains that must always be
* active on the channel to receive MIMO transmissions
* @rx_chains_dynamic: The number of RX chains that must be enabled
@@ -225,6 +262,7 @@ enum ieee80211_chanctx_change {
struct ieee80211_chanctx_conf {
struct cfg80211_chan_def def;
struct cfg80211_chan_def min_def;
+ struct cfg80211_chan_def ap;
u8 rx_chains_static, rx_chains_dynamic;
@@ -261,11 +299,13 @@ enum ieee80211_chanctx_switch_mode {
* done.
*
* @vif: the vif that should be switched from old_ctx to new_ctx
+ * @link_conf: the link conf that's switching
* @old_ctx: the old context to which the vif was assigned
* @new_ctx: the new context to which the vif must be assigned
*/
struct ieee80211_vif_chanctx_switch {
struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_chanctx_conf *old_ctx;
struct ieee80211_chanctx_conf *new_ctx;
};
@@ -273,8 +313,8 @@ struct ieee80211_vif_chanctx_switch {
/**
* enum ieee80211_bss_change - BSS change notification flags
*
- * These flags are used with the bss_info_changed() callback
- * to indicate which BSS parameter changed.
+ * These flags are used with the bss_info_changed(), link_info_changed()
+ * and vif_cfg_changed() callbacks to indicate which parameter(s) changed.
*
* @BSS_CHANGED_ASSOC: association status changed (associated/disassociated),
* also implies a change in the AID.
@@ -317,7 +357,11 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_TWT: TWT status changed
* @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
* @BSS_CHANGED_HE_BSS_COLOR: BSS Color has changed
- *
+ * @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
+ * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
+ * status changed.
+ * @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
+ * @BSS_CHANGED_MLD_TTLM: TID to link mapping was changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -350,6 +394,10 @@ enum ieee80211_bss_change {
BSS_CHANGED_TWT = 1<<27,
BSS_CHANGED_HE_OBSS_PD = 1<<28,
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
+ BSS_CHANGED_FILS_DISCOVERY = 1<<30,
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
+ BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33),
+ BSS_CHANGED_MLD_TTLM = BIT_ULL(34),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -448,9 +496,9 @@ struct ieee80211_ba_event {
/**
* struct ieee80211_event - event to be sent to the driver
* @type: The event itself. See &enum ieee80211_event_type.
- * @rssi: relevant if &type is %RSSI_EVENT
- * @mlme: relevant if &type is %AUTH_EVENT
- * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
+ * @u.rssi: relevant if &type is %RSSI_EVENT
+ * @u.mlme: relevant if &type is %AUTH_EVENT
+ * @u.ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
* @u:union holding the fields above
*/
struct ieee80211_event {
@@ -491,16 +539,32 @@ struct ieee80211_ftm_responder_params {
};
/**
+ * struct ieee80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ */
+struct ieee80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
* to that BSS) that can change during the lifetime of the BSS.
*
+ * @vif: reference to owning VIF
+ * @bss: the cfg80211 bss descriptor. Valid only for a station, and only
+ * when associated. Note: This contains information which is not
+ * necessarily authenticated. For example, information coming from probe
+ * responses.
+ * @addr: (link) address used locally
+ * @link_id: link ID, or 0 for non-MLO
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
- * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
* @uora_exists: is the UORA element advertised by AP
- * @ack_enabled: indicates support to receive a multi-TID that solicits either
- * ACK, BACK or both
* @uora_ocw_range: UORA element's OCW Range field
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @he_support: does this BSS support HE
@@ -509,11 +573,7 @@ struct ieee80211_ftm_responder_params {
* @twt_responder: does this BSS support TWT requester (relevant for managed
* mode only, set if the AP advertises TWT responder role)
* @twt_protected: does this BSS support protected TWT frames
- * @assoc: association status
- * @ibss_joined: indicates whether this station is part of an IBSS
- * or not
- * @ibss_creator: indicates if a new IBSS network is being created
- * @aid: association ID number, valid only when @assoc is true
+ * @twt_broadcast: does this BSS support broadcast TWT
* @use_cts_prot: use CTS protection
* @use_short_preamble: use 802.11b short preamble
* @use_short_slot: use short slot time (only relevant for ERP)
@@ -534,6 +594,8 @@ struct ieee80211_ftm_responder_params {
* IMPORTANT: These three sync_* parameters would possibly be out of sync
* by the time the driver will use them. The synchronized view is currently
* guaranteed only in certain callbacks.
+ * Note also that this is not used with MLD associations, mac80211 doesn't
+ * know how to track beacons for all of the links for this.
* @beacon_int: beacon interval
* @assoc_capability: capabilities taken from assoc resp
* @basic_rates: bitmap of basic rates, each bit stands for an
@@ -543,7 +605,7 @@ struct ieee80211_ftm_responder_params {
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
* @bssid: The BSSID for this BSS
* @enable_beacon: whether beaconing should be enabled or not
- * @chandef: Channel definition for this BSS -- the hardware might be
+ * @chanreq: Channel request for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
* @mu_group: VHT MU-MIMO group membership data
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
@@ -559,21 +621,7 @@ struct ieee80211_ftm_responder_params {
* threshold event and can't be enabled simultaneously with it.
* @cqm_rssi_high: Connection quality monitor RSSI upper threshold.
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
- * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
- * may filter ARP queries targeted for other addresses than listed here.
- * The driver must allow ARP queries targeted for all address listed here
- * to pass through. An empty list implies no ARP queries need to pass.
- * @arp_addr_cnt: Number of addresses currently on the list. Note that this
- * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list
- * array size), it's up to the driver what to do in that case.
* @qos: This is a QoS-enabled BSS.
- * @idle: This interface is idle. There's also a global idle flag in the
- * hardware config which may be more appropriate depending on what
- * your driver/device needs to do.
- * @ps: power-save mode (STA only). This flag is NOT affected by
- * offchannel/dynamic_ps operations.
- * @ssid: The SSID of the current vif. Valid in AP and IBSS mode.
- * @ssid_len: Length of SSID given in @ssid.
* @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
* @txpower: TX power in dBm. INT_MIN means not configured.
* @txpower_type: TX power adjustment used to control per packet Transmit
@@ -604,26 +652,72 @@ struct ieee80211_ftm_responder_params {
* nontransmitted BSSIDs
* @profile_periodicity: the least number of beacon frames need to be received
* in order to discover all the nontransmitted BSSIDs in the set.
- * @he_oper: HE operation information of the AP we are connected to
+ * @he_oper: HE operation information of the BSS (AP/Mesh) or of the AP we are
+ * connected to (STA)
* @he_obss_pd: OBSS Packet Detection parameters.
* @he_bss_color: BSS coloring settings, if BSS supports HE
+ * @fils_discovery: FILS discovery configuration
+ * @unsol_bcast_probe_resp_interval: Unsolicited broadcast probe response
+ * interval.
+ * @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
+ * to driver when rate control is offloaded to firmware.
+ * @power_type: power type of BSS for 6 GHz
+ * @tx_pwr_env: transmit power envelope array of BSS.
+ * @tx_pwr_env_num: number of @tx_pwr_env.
+ * @pwr_reduction: power constraint of BSS.
+ * @eht_support: does this BSS support EHT
+ * @csa_active: marks whether a channel switch is going on.
+ * @mu_mimo_owner: indicates interface owns MU-MIMO capability
+ * @chanctx_conf: The channel context this interface is assigned to, or %NULL
+ * when it is not assigned. This pointer is RCU-protected due to the TX
+ * path needing to access it; even though the netdev carrier will always
+ * be off when it is %NULL there can still be races and packets could be
+ * processed after it switches back to %NULL.
+ * @color_change_active: marks whether a color change is ongoing.
+ * @color_change_color: the bss color that will be used after the change.
+ * @ht_ldpc: in AP mode, indicates interface has HT LDPC capability.
+ * @vht_ldpc: in AP mode, indicates interface has VHT LDPC capability.
+ * @he_ldpc: in AP mode, indicates interface has HE LDPC capability.
+ * @vht_su_beamformer: in AP mode, does this BSS support operation as an VHT SU
+ * beamformer
+ * @vht_su_beamformee: in AP mode, does this BSS support operation as an VHT SU
+ * beamformee
+ * @vht_mu_beamformer: in AP mode, does this BSS support operation as an VHT MU
+ * beamformer
+ * @vht_mu_beamformee: in AP mode, does this BSS support operation as an VHT MU
+ * beamformee
+ * @he_su_beamformer: in AP-mode, does this BSS support operation as an HE SU
+ * beamformer
+ * @he_su_beamformee: in AP-mode, does this BSS support operation as an HE SU
+ * beamformee
+ * @he_mu_beamformer: in AP-mode, does this BSS support operation as an HE MU
+ * beamformer
+ * @he_full_ul_mumimo: does this BSS support the reception (AP) or transmission
+ * (non-AP STA) of an HE TB PPDU on an RU that spans the entire PPDU
+ * bandwidth
+ * @eht_su_beamformer: in AP-mode, does this BSS enable operation as an EHT SU
+ * beamformer
+ * @eht_su_beamformee: in AP-mode, does this BSS enable operation as an EHT SU
+ * beamformee
+ * @eht_mu_beamformer: in AP-mode, does this BSS enable operation as an EHT MU
+ * beamformer
*/
struct ieee80211_bss_conf {
+ struct ieee80211_vif *vif;
+ struct cfg80211_bss *bss;
+
const u8 *bssid;
+ unsigned int link_id;
+ u8 addr[ETH_ALEN] __aligned(2);
u8 htc_trig_based_pkt_ext;
- bool multi_sta_back_32bit;
bool uora_exists;
- bool ack_enabled;
u8 uora_ocw_range;
u16 frame_time_rts_th;
bool he_support;
bool twt_requester;
bool twt_responder;
bool twt_protected;
- /* association related data */
- bool assoc, ibss_joined;
- bool ibss_creator;
- u16 aid;
+ bool twt_broadcast;
/* erp related data */
bool use_cts_prot;
bool use_short_preamble;
@@ -643,15 +737,9 @@ struct ieee80211_bss_conf {
u32 cqm_rssi_hyst;
s32 cqm_rssi_low;
s32 cqm_rssi_high;
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq;
struct ieee80211_mu_group_data mu_group;
- __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
- int arp_addr_cnt;
bool qos;
- bool idle;
- bool ps;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- size_t ssid_len;
bool hidden_ssid;
int txpower;
enum nl80211_tx_power_setting txpower_type;
@@ -674,6 +762,37 @@ struct ieee80211_bss_conf {
} he_oper;
struct ieee80211_he_obss_pd he_obss_pd;
struct cfg80211_he_bss_color he_bss_color;
+ struct ieee80211_fils_discovery fils_discovery;
+ u32 unsol_bcast_probe_resp_interval;
+ struct cfg80211_bitrate_mask beacon_tx_rate;
+ enum ieee80211_ap_reg_power power_type;
+ struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
+ u8 tx_pwr_env_num;
+ u8 pwr_reduction;
+ bool eht_support;
+
+ bool csa_active;
+
+ bool mu_mimo_owner;
+ struct ieee80211_chanctx_conf __rcu *chanctx_conf;
+
+ bool color_change_active;
+ u8 color_change_color;
+
+ bool ht_ldpc;
+ bool vht_ldpc;
+ bool he_ldpc;
+ bool vht_su_beamformer;
+ bool vht_su_beamformee;
+ bool vht_mu_beamformer;
+ bool vht_mu_beamformee;
+ bool he_su_beamformer;
+ bool he_su_beamformee;
+ bool he_mu_beamformer;
+ bool he_full_ul_mumimo;
+ bool eht_su_beamformer;
+ bool eht_su_beamformee;
+ bool eht_mu_beamformer;
};
/**
@@ -720,9 +839,8 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate
* that a frame can be transmitted while the queues are stopped for
* off-channel operation.
- * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
- * used to indicate that a pending frame requires TX processing before
- * it can be sent out.
+ * @IEEE80211_TX_CTL_HW_80211_ENCAP: This frame uses hardware encapsulation
+ * (header conversion)
* @IEEE80211_TX_INTFL_RETRIED: completely internal to mac80211,
* used to indicate that a frame was already retried due to PS
* @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211,
@@ -791,7 +909,7 @@ enum mac80211_tx_info_flags {
IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
IEEE80211_TX_INTFL_OFFCHAN_TX_OK = BIT(13),
- IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
+ IEEE80211_TX_CTL_HW_80211_ENCAP = BIT(14),
IEEE80211_TX_INTFL_RETRIED = BIT(15),
IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
IEEE80211_TX_CTL_NO_PS_BUFFER = BIT(17),
@@ -812,6 +930,8 @@ enum mac80211_tx_info_flags {
#define IEEE80211_TX_CTL_STBC_SHIFT 23
+#define IEEE80211_TX_RC_S1G_MCS IEEE80211_TX_RC_VHT_MCS
+
/**
* enum mac80211_tx_control_flags - flags to describe transmit control
*
@@ -823,10 +943,22 @@ enum mac80211_tx_info_flags {
* @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
* @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
* @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup
- * @IEEE80211_TX_CTRL_HW_80211_ENCAP: This frame uses hardware encapsulation
- * (header conversion)
+ * @IEEE80211_TX_INTCFL_NEED_TXPROCESSING: completely internal to mac80211,
+ * used to indicate that a pending frame requires TX processing before
+ * it can be sent out.
* @IEEE80211_TX_CTRL_NO_SEQNO: Do not overwrite the sequence number that
* has already been assigned to this frame.
+ * @IEEE80211_TX_CTRL_DONT_REORDER: This frame should not be reordered
+ * relative to other frames that have this flag set, independent
+ * of their QoS TID or other priority field values.
+ * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
+ * for sequence number assignment
+ * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
+ * frame should be transmitted on the specific link. This really is
+ * only relevant for frames that do not have data present, and is
+ * also not used for 802.3 format frames. Note that even if the frame
+ * is on a specific link, address translation might still apply if
+ * it's intended for an MLD.
*
* These flags are used in tx_info->control.flags.
*/
@@ -837,8 +969,27 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_AMSDU = BIT(3),
IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5),
- IEEE80211_TX_CTRL_HW_80211_ENCAP = BIT(6),
+ IEEE80211_TX_INTCFL_NEED_TXPROCESSING = BIT(6),
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
+ IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
+ IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
+ IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
+};
+
+#define IEEE80211_LINK_UNSPECIFIED 0xf
+#define IEEE80211_TX_CTRL_MLO_LINK_UNSPEC \
+ u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, \
+ IEEE80211_TX_CTRL_MLO_LINK)
+
+/**
+ * enum mac80211_tx_status_flags - flags to describe transmit status
+ *
+ * @IEEE80211_TX_STATUS_ACK_SIGNAL_VALID: ACK signal is valid
+ *
+ * These flags are used in tx_info->status.flags.
+ */
+enum mac80211_tx_status_flags {
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID = BIT(0),
};
/*
@@ -948,6 +1099,11 @@ struct ieee80211_tx_rate {
#define IEEE80211_MAX_TX_RETRY 31
+static inline bool ieee80211_rate_valid(struct ieee80211_tx_rate *rate)
+{
+ return rate->idx >= 0 && rate->count > 0;
+}
+
static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate,
u8 mcs, u8 nss)
{
@@ -977,9 +1133,13 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* (3) TX status information - driver tells mac80211 what happened
*
* @flags: transmit info flags, defined above
- * @band: the band to transmit on (use for checking for races)
+ * @band: the band to transmit on (use e.g. for checking for races),
+ * not valid if the interface is an MLD since we won't know which
+ * link the frame will be transmitted on
* @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
- * @ack_frame_id: internal frame ID for TX status, used internally
+ * @status_data: internal data for TX status handling, assigned privately,
+ * see also &enum ieee80211_status_data for the internal documentation
+ * @status_data_idr: indicates status data is IDR allocated ID for ack frame
* @tx_time_est: TX time estimate in units of 4us, used internally
* @control: union part for control data
* @control.rates: TX rates array to try
@@ -1002,26 +1162,23 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @status.ampdu_ack_len: AMPDU ack length
* @status.ampdu_len: AMPDU length
* @status.antenna: (legacy, kept only for iwlegacy)
- * @status.tx_time: airtime consumed for transmission
- * @status.is_valid_ack_signal: ACK signal is valid
+ * @status.tx_time: airtime consumed for transmission; note this is only
+ * used for WMM AC, not for airtime fairness
+ * @status.flags: status flags, see &enum mac80211_tx_status_flags
* @status.status_driver_data: driver use area
* @ack: union part for pure ACK data
* @ack.cookie: cookie for the ACK
* @driver_data: array of driver_data pointers
- * @ampdu_ack_len: number of acked aggregated frames.
- * relevant only if IEEE80211_TX_STAT_AMPDU was set.
- * @ampdu_len: number of aggregated frames.
- * relevant only if IEEE80211_TX_STAT_AMPDU was set.
- * @ack_signal: signal strength of the ACK frame
*/
struct ieee80211_tx_info {
/* common information */
u32 flags;
u32 band:3,
- ack_frame_id:13,
+ status_data_idr:1,
+ status_data:13,
hw_queue:4,
tx_time_est:10;
- /* 2 free bits */
+ /* 1 free bit */
union {
struct {
@@ -1035,7 +1192,11 @@ struct ieee80211_tx_info {
u8 use_cts_prot:1;
u8 short_preamble:1;
u8 skip_table:1;
- /* 2 bytes free */
+
+ /* for injection only (bitmap) */
+ u8 antennas:2;
+
+ /* 14 bits free */
};
/* only needed before rate control */
unsigned long jiffies;
@@ -1055,9 +1216,11 @@ struct ieee80211_tx_info {
u8 ampdu_ack_len;
u8 ampdu_len;
u8 antenna;
+ u8 pad;
u16 tx_time;
- bool is_valid_ack_signal;
- void *status_driver_data[19 / sizeof(void *)];
+ u8 flags;
+ u8 pad2;
+ void *status_driver_data[16 / sizeof(void *)];
} status;
struct {
struct ieee80211_tx_rate driver_rates[
@@ -1088,19 +1251,46 @@ ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info)
return info->tx_time_est << 2;
}
+/***
+ * struct ieee80211_rate_status - mrr stage for status path
+ *
+ * This struct is used in struct ieee80211_tx_status to provide drivers a
+ * dynamic way to report about used rates and power levels per packet.
+ *
+ * @rate_idx The actual used rate.
+ * @try_count How often the rate was tried.
+ * @tx_power_idx An idx into the ieee80211_hw->tx_power_levels list of the
+ * corresponding wifi hardware. The idx shall point to the power level
+ * that was used when sending the packet.
+ */
+struct ieee80211_rate_status {
+ struct rate_info rate_idx;
+ u8 try_count;
+ u8 tx_power_idx;
+};
+
/**
* struct ieee80211_tx_status - extended tx status info for rate control
*
* @sta: Station that the packet was transmitted for
* @info: Basic tx status information
* @skb: Packet skb (can be NULL if not provided by the driver)
- * @rate: The TX rate that was used when sending the packet
+ * @rates: Mrr stages that were used when sending the packet
+ * @n_rates: Number of mrr stages (count of instances for @rates)
+ * @free_list: list where processed skbs are stored to be free'd by the driver
+ * @ack_hwtstamp: Hardware timestamp of the received ack in nanoseconds
+ * Only needed for Timing measurement and Fine timing measurement action
+ * frames. Only reported by devices that have timestamping enabled.
*/
struct ieee80211_tx_status {
struct ieee80211_sta *sta;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- struct rate_info *rate;
+ struct ieee80211_rate_status *rates;
+ ktime_t ack_hwtstamp;
+ u8 n_rates;
+
+ struct list_head *free_list;
};
/**
@@ -1143,9 +1333,9 @@ static inline struct ieee80211_rx_status *IEEE80211_SKB_RXCB(struct sk_buff *skb
* in the TX status but the rate control information (it does clear
* the count since you need to fill that in anyway).
*
- * NOTE: You can only use this function if you do NOT use
- * info->driver_data! Use info->rate_driver_data
- * instead if you need only the less space that allows.
+ * NOTE: While the rates array is kept intact, this will wipe all of the
+ * driver_data fields in info, so it's up to the driver to restore
+ * any fields it needs after calling this helper.
*/
static inline void
ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
@@ -1160,12 +1350,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
/* clear the rate counts */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++)
info->status.rates[i].count = 0;
-
- BUILD_BUG_ON(
- offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
- memset(&info->status.ampdu_ack_len, 0,
- sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
+ memset_after(&info->status, 0, rates);
}
@@ -1191,6 +1376,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* the frame.
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
* the frame.
+ * @RX_FLAG_MACTIME: The timestamp passed in the RX status (@mactime
+ * field) is valid if this field is non-zero, and the position
+ * where the timestamp was sampled depends on the value.
* @RX_FLAG_MACTIME_START: The timestamp passed in the RX status (@mactime
* field) is valid and contains the time the first symbol of the MPDU
* was received. This is useful in monitor mode and for proper IBSS
@@ -1200,6 +1388,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* (including FCS) was received.
* @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime
* field) is valid and contains the time the SYNC preamble was received.
+ * @RX_FLAG_MACTIME_IS_RTAP_TS64: The timestamp passed in the RX status @mactime
+ * is only for use in the radiotap timestamp header, not otherwise a valid
+ * @mactime value. Note this is a separate flag so that we continue to see
+ * %RX_FLAG_MACTIME as unset. Also note that in this case the timestamp is
+ * reported to be 64 bits wide, not just 32.
* @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
* Valid only for data frames (mainly A-MPDU)
* @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
@@ -1231,9 +1424,12 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* subframes share the same sequence number. Reported subframes can be
* either regular MSDU or singly A-MSDUs. Subframes must not be
* interleaved with other frames.
- * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific
- * radiotap data in the skb->data (before the frame) as described by
- * the &struct ieee80211_vendor_radiotap.
+ * @RX_FLAG_RADIOTAP_TLV_AT_END: This frame contains radiotap TLVs in the
+ * skb->data (before the 802.11 header).
+ * If used, the SKB's mac_header pointer must be set to point
+ * to the 802.11 header after the TLVs, and any padding added after TLV
+ * data to align to 4 must be cleared by the driver putting the TLVs
+ * in the skb.
* @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
* This is used for AMSDU subframes which can have the same PN as
* the first subframe.
@@ -1261,16 +1457,18 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* the "0-length PSDU" field included there. The value for it is
* in &struct ieee80211_rx_status. Note that if this value isn't
* known the frame shouldn't be reported.
+ * @RX_FLAG_8023: the frame has an 802.3 header (decap offload performed by
+ * hardware or driver)
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
RX_FLAG_DECRYPTED = BIT(1),
- RX_FLAG_MACTIME_PLCP_START = BIT(2),
+ RX_FLAG_ONLY_MONITOR = BIT(2),
RX_FLAG_MMIC_STRIPPED = BIT(3),
RX_FLAG_IV_STRIPPED = BIT(4),
RX_FLAG_FAILED_FCS_CRC = BIT(5),
RX_FLAG_FAILED_PLCP_CRC = BIT(6),
- RX_FLAG_MACTIME_START = BIT(7),
+ RX_FLAG_MACTIME_IS_RTAP_TS64 = BIT(7),
RX_FLAG_NO_SIGNAL_VAL = BIT(8),
RX_FLAG_AMPDU_DETAILS = BIT(9),
RX_FLAG_PN_VALIDATED = BIT(10),
@@ -1279,11 +1477,13 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_IS_LAST = BIT(13),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14),
RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15),
- RX_FLAG_MACTIME_END = BIT(16),
- RX_FLAG_ONLY_MONITOR = BIT(17),
+ RX_FLAG_MACTIME = BIT(16) | BIT(17),
+ RX_FLAG_MACTIME_PLCP_START = 1 << 16,
+ RX_FLAG_MACTIME_START = 2 << 16,
+ RX_FLAG_MACTIME_END = 3 << 16,
RX_FLAG_SKIP_MONITOR = BIT(18),
RX_FLAG_AMSDU_MORE = BIT(19),
- RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(20),
+ RX_FLAG_RADIOTAP_TLV_AT_END = BIT(20),
RX_FLAG_MIC_STRIPPED = BIT(21),
RX_FLAG_ALLOW_SAME_PN = BIT(22),
RX_FLAG_ICV_STRIPPED = BIT(23),
@@ -1293,6 +1493,7 @@ enum mac80211_rx_flags {
RX_FLAG_RADIOTAP_HE_MU = BIT(27),
RX_FLAG_RADIOTAP_LSIG = BIT(28),
RX_FLAG_NO_PSDU = BIT(29),
+ RX_FLAG_8023 = BIT(30),
};
/**
@@ -1324,6 +1525,7 @@ enum mac80211_rx_encoding {
RX_ENC_HT,
RX_ENC_VHT,
RX_ENC_HE,
+ RX_ENC_EHT,
};
/**
@@ -1337,6 +1539,9 @@ enum mac80211_rx_encoding {
* (TSF) timer when the first data symbol (MPDU) arrived at the hardware.
* @boottime_ns: CLOCK_BOOTTIME timestamp the frame was received at, this is
* needed only for beacons and probe responses that update the scan cache.
+ * @ack_tx_hwtstamp: Hardware timestamp for the ack TX in nanoseconds. Only
+ * needed for Timing measurement and Fine timing measurement action frames.
+ * Only reported by devices that have timestamping enabled.
* @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use
* it but can store it and pass it back to the driver for synchronisation
* @band: the active band when this frame was received
@@ -1354,7 +1559,7 @@ enum mac80211_rx_encoding {
* @antenna: antenna used
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
- * @nss: number of streams (VHT and HE only)
+ * @nss: number of streams (VHT, HE and EHT only)
* @flag: %RX_FLAG_\*
* @encoding: &enum mac80211_rx_encoding
* @bw: &enum rate_info_bw
@@ -1362,22 +1567,42 @@ enum mac80211_rx_encoding {
* @he_ru: HE RU, from &enum nl80211_he_ru_alloc
* @he_gi: HE GI, from &enum nl80211_he_gi
* @he_dcm: HE DCM value
+ * @eht: EHT specific rate information
+ * @eht.ru: EHT RU, from &enum nl80211_eht_ru_alloc
+ * @eht.gi: EHT GI, from &enum nl80211_eht_gi
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
* @ampdu_delimiter_crc: A-MPDU delimiter CRC
* @zero_length_psdu_type: radiotap type of the 0-length PSDU
+ * @link_valid: if the link which is identified by @link_id is valid. This flag
+ * is set only when connection is MLO.
+ * @link_id: id of the link used to receive the packet. This is used along with
+ * @link_valid.
*/
struct ieee80211_rx_status {
u64 mactime;
- u64 boottime_ns;
+ union {
+ u64 boottime_ns;
+ ktime_t ack_tx_hwtstamp;
+ };
u32 device_timestamp;
u32 ampdu_reference;
u32 flag;
u16 freq: 13, freq_offset: 1;
u8 enc_flags;
- u8 encoding:2, bw:3, he_ru:3;
- u8 he_gi:2, he_dcm:1;
+ u8 encoding:3, bw:4;
+ union {
+ struct {
+ u8 he_ru:3;
+ u8 he_gi:2;
+ u8 he_dcm:1;
+ };
+ struct {
+ u8 ru:4;
+ u8 gi:2;
+ } eht;
+ };
u8 rate_idx;
u8 nss;
u8 rx_flags;
@@ -1388,6 +1613,7 @@ struct ieee80211_rx_status {
s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 ampdu_delimiter_crc;
u8 zero_length_psdu_type;
+ u8 link_valid:1, link_id:4;
};
static inline u32
@@ -1398,39 +1624,6 @@ ieee80211_rx_status_to_khz(struct ieee80211_rx_status *rx_status)
}
/**
- * struct ieee80211_vendor_radiotap - vendor radiotap data information
- * @present: presence bitmap for this vendor namespace
- * (this could be extended in the future if any vendor needs more
- * bits, the radiotap spec does allow for that)
- * @align: radiotap vendor namespace alignment. This defines the needed
- * alignment for the @data field below, not for the vendor namespace
- * description itself (which has a fixed 2-byte alignment)
- * Must be a power of two, and be set to at least 1!
- * @oui: radiotap vendor namespace OUI
- * @subns: radiotap vendor sub namespace
- * @len: radiotap vendor sub namespace skip length, if alignment is done
- * then that's added to this, i.e. this is only the length of the
- * @data field.
- * @pad: number of bytes of padding after the @data, this exists so that
- * the skb data alignment can be preserved even if the data has odd
- * length
- * @data: the actual vendor namespace data
- *
- * This struct, including the vendor data, goes into the skb->data before
- * the 802.11 header. It's split up in mac80211 using the align/oui/subns
- * data.
- */
-struct ieee80211_vendor_radiotap {
- u32 present;
- u8 align;
- u8 oui[3];
- u8 subns;
- u8 pad;
- u16 len;
- u8 data[];
-} __packed;
-
-/**
* enum ieee80211_conf_flags - configuration flags
*
* Flags to define PHY configuration options
@@ -1570,8 +1763,9 @@ struct ieee80211_conf {
* @chandef: the new channel to switch to
* @count: the number of TBTT's until the channel switch event
* @delay: maximum delay between the time the AP transmitted the last beacon in
- * current channel and the expected time of the first beacon in the new
- * channel, expressed in TU.
+ * current channel and the expected time of the first beacon in the new
+ * channel, expressed in TU.
+ * @link_id: the link ID of the link doing the channel switch, 0 for non-MLO
*/
struct ieee80211_channel_switch {
u64 timestamp;
@@ -1579,6 +1773,7 @@ struct ieee80211_channel_switch {
bool block_tx;
struct cfg80211_chan_def chandef;
u8 count;
+ u8 link_id;
u32 delay;
};
@@ -1598,12 +1793,116 @@ struct ieee80211_channel_switch {
* @IEEE80211_VIF_GET_NOA_UPDATE: request to handle NOA attributes
* and send P2P_PS notification to the driver if NOA changed, even
* this is not pure P2P vif.
+ * @IEEE80211_VIF_EML_ACTIVE: The driver indicates that EML operation is
+ * enabled for the interface.
+ * @IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW: Ignore wider bandwidth OFDMA
+ * operation on this interface and request a channel context without
+ * the AP definition. Use this e.g. because the device is able to
+ * handle OFDMA (downlink and trigger for uplink) on a per-AP basis.
*/
enum ieee80211_vif_flags {
IEEE80211_VIF_BEACON_FILTER = BIT(0),
IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1),
IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2),
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
+ IEEE80211_VIF_EML_ACTIVE = BIT(4),
+ IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW = BIT(5),
+};
+
+
+/**
+ * enum ieee80211_offload_flags - virtual interface offload flags
+ *
+ * @IEEE80211_OFFLOAD_ENCAP_ENABLED: tx encapsulation offload is enabled
+ * The driver supports sending frames passed as 802.3 frames by mac80211.
+ * It must also support sending 802.11 packets for the same interface.
+ * @IEEE80211_OFFLOAD_ENCAP_4ADDR: support 4-address mode encapsulation offload
+ * @IEEE80211_OFFLOAD_DECAP_ENABLED: rx encapsulation offload is enabled
+ * The driver supports passing received 802.11 frames as 802.3 frames to
+ * mac80211.
+ */
+
+enum ieee80211_offload_flags {
+ IEEE80211_OFFLOAD_ENCAP_ENABLED = BIT(0),
+ IEEE80211_OFFLOAD_ENCAP_4ADDR = BIT(1),
+ IEEE80211_OFFLOAD_DECAP_ENABLED = BIT(2),
+};
+
+/**
+ * struct ieee80211_vif_cfg - interface configuration
+ * @assoc: association status
+ * @ibss_joined: indicates whether this station is part of an IBSS or not
+ * @ibss_creator: indicates if a new IBSS network is being created
+ * @ps: power-save mode (STA only). This flag is NOT affected by
+ * offchannel/dynamic_ps operations.
+ * @aid: association ID number, valid only when @assoc is true
+ * @eml_cap: EML capabilities as described in P802.11be_D4.1 Figure 9-1001j.
+ * @eml_med_sync_delay: Medium Synchronization delay as described in
+ * P802.11be_D4.1 Figure 9-1001i.
+ * @mld_capa_op: MLD Capabilities and Operations per P802.11be_D4.1
+ * Figure 9-1001k
+ * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
+ * may filter ARP queries targeted for other addresses than listed here.
+ * The driver must allow ARP queries targeted for all address listed here
+ * to pass through. An empty list implies no ARP queries need to pass.
+ * @arp_addr_cnt: Number of addresses currently on the list. Note that this
+ * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list
+ * array size), it's up to the driver what to do in that case.
+ * @ssid: The SSID of the current vif. Valid in AP and IBSS mode.
+ * @ssid_len: Length of SSID given in @ssid.
+ * @s1g: BSS is S1G BSS (affects Association Request format).
+ * @idle: This interface is idle. There's also a global idle flag in the
+ * hardware config which may be more appropriate depending on what
+ * your driver/device needs to do.
+ * @ap_addr: AP MLD address, or BSSID for non-MLO connections
+ * (station mode only)
+ */
+struct ieee80211_vif_cfg {
+ /* association related data */
+ bool assoc, ibss_joined;
+ bool ibss_creator;
+ bool ps;
+ u16 aid;
+ u16 eml_cap;
+ u16 eml_med_sync_delay;
+ u16 mld_capa_op;
+
+ __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ int arp_addr_cnt;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ bool s1g;
+ bool idle;
+ u8 ap_addr[ETH_ALEN] __aligned(2);
+};
+
+#define IEEE80211_TTLM_NUM_TIDS 8
+
+/**
+ * struct ieee80211_neg_ttlm - negotiated TID to link map info
+ *
+ * @downlink: bitmap of active links per TID for downlink, or 0 if mapping for
+ * this TID is not included.
+ * @uplink: bitmap of active links per TID for uplink, or 0 if mapping for this
+ * TID is not included.
+ * @valid: info is valid or not.
+ */
+struct ieee80211_neg_ttlm {
+ u16 downlink[IEEE80211_TTLM_NUM_TIDS];
+ u16 uplink[IEEE80211_TTLM_NUM_TIDS];
+ bool valid;
+};
+
+/**
+ * enum ieee80211_neg_ttlm_res - return value for negotiated TTLM handling
+ * @NEG_TTLM_RES_ACCEPT: accept the request
+ * @NEG_TTLM_RES_REJECT: reject the request
+ * @NEG_TTLM_RES_SUGGEST_PREFERRED: reject and suggest a new mapping
+ */
+enum ieee80211_neg_ttlm_res {
+ NEG_TTLM_RES_ACCEPT,
+ NEG_TTLM_RES_REJECT,
+ NEG_TTLM_RES_SUGGEST_PREFERRED
};
/**
@@ -1613,26 +1912,40 @@ enum ieee80211_vif_flags {
* use during the life of a virtual interface.
*
* @type: type of this virtual interface
+ * @cfg: vif configuration, see &struct ieee80211_vif_cfg
* @bss_conf: BSS configuration for this interface, either our own
* or the BSS we're associated to
+ * @link_conf: in case of MLD, the per-link BSS configuration,
+ * indexed by link ID
+ * @valid_links: bitmap of valid links, or 0 for non-MLO.
+ * @active_links: The bitmap of active links, or 0 for non-MLO.
+ * The driver shouldn't change this directly, but use the
+ * API calls meant for that purpose.
+ * @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO.
+ * Must be a subset of valid_links.
+ * @suspended_links: subset of dormant_links representing links that are
+ * suspended.
+ * 0 for non-MLO.
+ * @neg_ttlm: negotiated TID to link mapping info.
+ * see &struct ieee80211_neg_ttlm.
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
- * @csa_active: marks whether a channel switch is going on. Internally it is
- * write-protected by sdata_lock and local->mtx so holding either is fine
- * for read access.
- * @mu_mimo_owner: indicates interface owns MU-MIMO capability
+ * @netdev_features: tx netdev features supported by the hardware for this
+ * vif. mac80211 initializes this to hw->netdev_features, and the driver
+ * can mask out specific tx features. mac80211 will handle software fixup
+ * for masked offloads (GSO, CSUM)
* @driver_flags: flags/capabilities the driver has for this interface,
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
* at runtime, mac80211 will never touch this field
+ * @offload_flags: hardware offload capabilities/flags for this interface.
+ * These are initialized by mac80211 before calling .add_interface,
+ * .change_interface or .update_vif_offload and updated by the driver
+ * within these ops, based on supported features or runtime change
+ * restrictions.
* @hw_queue: hardware queue for each AC
* @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
- * @chanctx_conf: The channel context this interface is assigned to, or %NULL
- * when it is not assigned. This pointer is RCU-protected due to the TX
- * path needing to access it; even though the netdev carrier will always
- * be off when it is %NULL there can still be races and packets could be
- * processed after it switches back to %NULL.
* @debugfs_dir: debugfs dentry, can be used by drivers to create own per
* interface debug files. Note that it will be NULL for the virtual
* monitor interface (if that is requested.)
@@ -1642,26 +1955,29 @@ enum ieee80211_vif_flags {
* for this interface.
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void \*).
- * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
- * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
- * protected by fq->lock.
+ * @txq: the multicast data TX queue
+ * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
+ * &enum ieee80211_offload_flags.
+ * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
+ struct ieee80211_vif_cfg cfg;
struct ieee80211_bss_conf bss_conf;
+ struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
+ u16 valid_links, active_links, dormant_links, suspended_links;
+ struct ieee80211_neg_ttlm neg_ttlm;
u8 addr[ETH_ALEN] __aligned(2);
bool p2p;
- bool csa_active;
- bool mu_mimo_owner;
u8 cab_queue;
u8 hw_queue[IEEE80211_NUM_ACS];
struct ieee80211_txq *txq;
- struct ieee80211_chanctx_conf __rcu *chanctx_conf;
-
+ netdev_features_t netdev_features;
u32 driver_flags;
+ u32 offload_flags;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *debugfs_dir;
@@ -1670,12 +1986,54 @@ struct ieee80211_vif {
bool probe_req_reg;
bool rx_mcast_action_reg;
- bool txqs_stopped[IEEE80211_NUM_ACS];
+ struct ieee80211_vif *mbssid_tx_vif;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
+/**
+ * ieee80211_vif_usable_links - Return the usable links for the vif
+ * @vif: the vif for which the usable links are requested
+ * Return: the usable link bitmap
+ */
+static inline u16 ieee80211_vif_usable_links(const struct ieee80211_vif *vif)
+{
+ return vif->valid_links & ~vif->dormant_links;
+}
+
+/**
+ * ieee80211_vif_is_mld - Returns true iff the vif is an MLD one
+ * @vif: the vif
+ * Return: %true if the vif is an MLD, %false otherwise.
+ */
+static inline bool ieee80211_vif_is_mld(const struct ieee80211_vif *vif)
+{
+ /* valid_links != 0 indicates this vif is an MLD */
+ return vif->valid_links != 0;
+}
+
+/**
+ * ieee80211_vif_link_active - check if a given link is active
+ * @vif: the vif
+ * @link_id: the link ID to check
+ * Return: %true if the vif is an MLD and the link is active, or if
+ * the vif is not an MLD and the link ID is 0; %false otherwise.
+ */
+static inline bool ieee80211_vif_link_active(const struct ieee80211_vif *vif,
+ unsigned int link_id)
+{
+ if (!ieee80211_vif_is_mld(vif))
+ return link_id == 0;
+ return vif->active_links & BIT(link_id);
+}
+
+#define for_each_vif_active_link(vif, link, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \
+ if ((!(vif)->active_links || \
+ (vif)->active_links & BIT(link_id)) && \
+ (link = link_conf_dereference_check(vif, link_id)))
+
static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
{
#ifdef CONFIG_MAC80211_MESH
@@ -1703,13 +2061,23 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* This can be used by mac80211 drivers with direct cfg80211 APIs
* (like the vendor commands) that needs to get the wdev for a vif.
- *
- * Note that this function may return %NULL if the given wdev isn't
- * associated with a vif that the driver knows about (e.g. monitor
- * or AP_VLAN interfaces.)
+ * This can also be useful to get the netdev associated to a vif.
*/
struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
+static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif)
+{
+ return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->wiphy->mtx);
+}
+
+#define link_conf_dereference_protected(vif, link_id) \
+ rcu_dereference_protected((vif)->link_conf[link_id], \
+ lockdep_vif_wiphy_mutex_held(vif))
+
+#define link_conf_dereference_check(vif, link_id) \
+ rcu_dereference_check((vif)->link_conf[link_id], \
+ lockdep_vif_wiphy_mutex_held(vif))
+
/**
* enum ieee80211_key_flags - key flags
*
@@ -1754,6 +2122,8 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
* @IEEE80211_KEY_FLAG_GENERATE_MMIE: This flag should be set by the driver
* for a AES_CMAC key to indicate that it requires sequence number
* generation only
+ * @IEEE80211_KEY_FLAG_SPP_AMSDU: SPP A-MSDUs can be used with this key
+ * (set by mac80211 from the sta->spp_amsdu flag)
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -1767,6 +2137,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8),
IEEE80211_KEY_FLAG_NO_AUTO_TX = BIT(9),
IEEE80211_KEY_FLAG_GENERATE_MMIE = BIT(10),
+ IEEE80211_KEY_FLAG_SPP_AMSDU = BIT(11),
};
/**
@@ -1791,6 +2162,7 @@ enum ieee80211_key_flags {
* - Temporal Authenticator Rx MIC Key (64 bits)
* @icv_len: The ICV length for this key type
* @iv_len: The IV length for this key type
+ * @link_id: the link ID for MLO, or -1 for non-MLO or pairwise keys
*/
struct ieee80211_key_conf {
atomic64_t tx_pn;
@@ -1800,6 +2172,7 @@ struct ieee80211_key_conf {
u8 hw_key_idx;
s8 keyidx;
u16 flags;
+ s8 link_id;
u8 keylen;
u8 key[];
};
@@ -1849,36 +2222,6 @@ struct ieee80211_key_seq {
};
/**
- * struct ieee80211_cipher_scheme - cipher scheme
- *
- * This structure contains a cipher scheme information defining
- * the secure packet crypto handling.
- *
- * @cipher: a cipher suite selector
- * @iftype: a cipher iftype bit mask indicating an allowed cipher usage
- * @hdr_len: a length of a security header used the cipher
- * @pn_len: a length of a packet number in the security header
- * @pn_off: an offset of pn from the beginning of the security header
- * @key_idx_off: an offset of key index byte in the security header
- * @key_idx_mask: a bit mask of key_idx bits
- * @key_idx_shift: a bit shift needed to get key_idx
- * key_idx value calculation:
- * (sec_header_base[key_idx_off] & key_idx_mask) >> key_idx_shift
- * @mic_len: a mic length in bytes
- */
-struct ieee80211_cipher_scheme {
- u32 cipher;
- u16 iftype;
- u8 hdr_len;
- u8 pn_len;
- u8 pn_off;
- u8 key_idx_off;
- u8 key_idx_mask;
- u8 key_idx_shift;
- u8 mic_len;
-};
-
-/**
* enum set_key_cmd - key command
*
* Used with the set_key() callback in &struct ieee80211_ops, this
@@ -1917,6 +2260,7 @@ enum ieee80211_sta_state {
* @IEEE80211_STA_RX_BW_80: station can receive up to 80 MHz
* @IEEE80211_STA_RX_BW_160: station can receive up to 160 MHz
* (including 80+80 MHz)
+ * @IEEE80211_STA_RX_BW_320: station can receive up to 320 MHz
*
* Implementation note: 20 must be zero to be initialized
* correctly, the values must be sorted.
@@ -1926,6 +2270,7 @@ enum ieee80211_sta_rx_bandwidth {
IEEE80211_STA_RX_BW_40,
IEEE80211_STA_RX_BW_80,
IEEE80211_STA_RX_BW_160,
+ IEEE80211_STA_RX_BW_320,
};
/**
@@ -1965,6 +2310,81 @@ struct ieee80211_sta_txpwr {
};
/**
+ * struct ieee80211_sta_aggregates - info that is aggregated from active links
+ *
+ * Used for any per-link data that needs to be aggregated and updated in the
+ * main &struct ieee80211_sta when updated or the active links change.
+ *
+ * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes.
+ * This field is always valid for packets with a VHT preamble.
+ * For packets with a HT preamble, additional limits apply:
+ *
+ * * If the skb is transmitted as part of a BA agreement, the
+ * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
+ * * If the skb is not part of a BA agreement, the A-MSDU maximal
+ * size is min(max_amsdu_len, 7935) bytes.
+ *
+ * Both additional HT limits must be enforced by the low level
+ * driver. This is defined by the spec (IEEE 802.11-2012 section
+ * 8.3.2.2 NOTE 2).
+ * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
+ * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
+ */
+struct ieee80211_sta_aggregates {
+ u16 max_amsdu_len;
+
+ u16 max_rc_amsdu_len;
+ u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
+};
+
+/**
+ * struct ieee80211_link_sta - station Link specific info
+ * All link specific info for a STA link for a non MLD STA(single)
+ * or a MLD STA(multiple entries) are stored here.
+ *
+ * @sta: reference to owning STA
+ * @addr: MAC address of the Link STA. For non-MLO STA this is same as the addr
+ * in ieee80211_sta. For MLO Link STA this addr can be same or different
+ * from addr in ieee80211_sta (representing MLD STA addr)
+ * @link_id: the link ID for this link STA (0 for deflink)
+ * @smps_mode: current SMPS mode (off, static or dynamic)
+ * @supp_rates: Bitmap of supported rates
+ * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
+ * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @he_cap: HE capabilities of this STA
+ * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
+ * @eht_cap: EHT capabilities of this STA
+ * @agg: per-link data for multi-link aggregation
+ * @bandwidth: current bandwidth the station can receive with
+ * @rx_nss: in HT/VHT, the maximum number of spatial streams the
+ * station can receive at the moment, changed by operating mode
+ * notifications and capabilities. The value is only valid after
+ * the station moves to associated state.
+ * @txpwr: the station tx power configuration
+ *
+ */
+struct ieee80211_link_sta {
+ struct ieee80211_sta *sta;
+
+ u8 addr[ETH_ALEN];
+ u8 link_id;
+ enum ieee80211_smps_mode smps_mode;
+
+ u32 supp_rates[NUM_NL80211_BANDS];
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_he_cap he_cap;
+ struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct ieee80211_sta_eht_cap eht_cap;
+
+ struct ieee80211_sta_aggregates agg;
+
+ u8 rx_nss;
+ enum ieee80211_sta_rx_bandwidth bandwidth;
+ struct ieee80211_sta_txpwr txpwr;
+};
+
+/**
* struct ieee80211_sta - station table entry
*
* A station table entry represents a station we are possibly
@@ -1973,14 +2393,11 @@ struct ieee80211_sta_txpwr {
* either be protected by rcu_read_lock() explicitly or implicitly,
* or you must take good care to not use such a pointer after a
* call to your sta_remove callback that removed it.
+ * This also represents the MLD STA in case of MLO association
+ * and holds pointers to various link STA's
*
* @addr: MAC address
* @aid: AID we assigned to the station if we're an AP
- * @supp_rates: Bitmap of supported rates (per band)
- * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
- * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
- * @he_cap: HE capabilities of this STA
- * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
* @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
* that this station is allowed to transmit to us.
* Can be modified by driver.
@@ -1992,75 +2409,87 @@ struct ieee80211_sta_txpwr {
* if wme is supported. The bits order is like in
* IEEE80211_WMM_IE_STA_QOSINFO_AC_*.
* @max_sp: max Service Period. Only valid if wme is supported.
- * @bandwidth: current bandwidth the station can receive with
- * @rx_nss: in HT/VHT, the maximum number of spatial streams the
- * station can receive at the moment, changed by operating mode
- * notifications and capabilities. The value is only valid after
- * the station moves to associated state.
- * @smps_mode: current SMPS mode (off, static or dynamic)
* @rates: rate control selection table
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
* @mfp: indicates whether the STA uses management frame protection or not.
+ * @mlo: indicates whether the STA is MLO station.
* @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
* A-MSDU. Taken from the Extended Capabilities element. 0 means
* unlimited.
+ * @cur: currently valid data as aggregated from the active links
+ * For non MLO STA it will point to the deflink data. For MLO STA
+ * ieee80211_sta_recalc_aggregates() must be called to update it.
* @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
- * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
- * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
- * @txpwr: the station tx power configuration
- * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that
- * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames
+ * @txq: per-TID data TX queues; note that the last entry (%IEEE80211_NUM_TIDS)
+ * is used for non-data frames
+ * @deflink: This holds the default link STA information, for non MLO STA all link
+ * specific STA information is accessed through @deflink or through
+ * link[0] which points to address of @deflink. For MLO Link STA
+ * the first added link STA will point to deflink.
+ * @link: reference to Link Sta entries. For Non MLO STA, except 1st link,
+ * i.e link[0] all links would be assigned to NULL by default and
+ * would access link information via @deflink or link[0]. For MLO
+ * STA, first link STA being added will point its link pointer to
+ * @deflink address and remaining would be allocated and the address
+ * would be assigned to link[link_id] where link_id is the id assigned
+ * by the AP.
+ * @valid_links: bitmap of valid links, or 0 for non-MLO
+ * @spp_amsdu: indicates whether the STA uses SPP A-MSDU or not.
*/
struct ieee80211_sta {
- u32 supp_rates[NUM_NL80211_BANDS];
u8 addr[ETH_ALEN];
u16 aid;
- struct ieee80211_sta_ht_cap ht_cap;
- struct ieee80211_sta_vht_cap vht_cap;
- struct ieee80211_sta_he_cap he_cap;
- struct ieee80211_he_6ghz_capa he_6ghz_capa;
u16 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
- u8 rx_nss;
- enum ieee80211_sta_rx_bandwidth bandwidth;
- enum ieee80211_smps_mode smps_mode;
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
bool mfp;
+ bool mlo;
+ bool spp_amsdu;
u8 max_amsdu_subframes;
- /**
- * @max_amsdu_len:
- * indicates the maximal length of an A-MSDU in bytes.
- * This field is always valid for packets with a VHT preamble.
- * For packets with a HT preamble, additional limits apply:
- *
- * * If the skb is transmitted as part of a BA agreement, the
- * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
- * * If the skb is not part of a BA agreement, the A-MSDU maximal
- * size is min(max_amsdu_len, 7935) bytes.
- *
- * Both additional HT limits must be enforced by the low level
- * driver. This is defined by the spec (IEEE 802.11-2012 section
- * 8.3.2.2 NOTE 2).
- */
- u16 max_amsdu_len;
+ struct ieee80211_sta_aggregates *cur;
+
bool support_p2p_ps;
- u16 max_rc_amsdu_len;
- u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
- struct ieee80211_sta_txpwr txpwr;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1];
+ u16 valid_links;
+ struct ieee80211_link_sta deflink;
+ struct ieee80211_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
+#ifdef CONFIG_LOCKDEP
+bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta);
+#else
+static inline bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta)
+{
+ return true;
+}
+#endif
+
+#define link_sta_dereference_protected(sta, link_id) \
+ rcu_dereference_protected((sta)->link[link_id], \
+ lockdep_sta_mutex_held(sta))
+
+#define link_sta_dereference_check(sta, link_id) \
+ rcu_dereference_check((sta)->link[link_id], \
+ lockdep_sta_mutex_held(sta))
+
+#define for_each_sta_active_link(vif, sta, link_sta, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) \
+ if ((!(vif)->active_links || \
+ (vif)->active_links & BIT(link_id)) && \
+ ((link_sta) = link_sta_dereference_check(sta, link_id)))
+
/**
* enum sta_notify_cmd - sta notify command
*
@@ -2328,6 +2757,32 @@ struct ieee80211_txq {
* aggregating MPDUs with the same keyid, allowing mac80211 to keep Tx
* A-MPDU sessions active while rekeying with Extended Key ID.
*
+ * @IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD: Hardware supports tx encapsulation
+ * offload
+ *
+ * @IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD: Hardware supports rx decapsulation
+ * offload
+ *
+ * @IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP: Hardware supports concurrent rx
+ * decapsulation offload and passing raw 802.11 frames for monitor iface.
+ * If this is supported, the driver must pass both 802.3 frames for real
+ * usage and 802.11 frames with %RX_FLAG_ONLY_MONITOR set for monitor to
+ * the stack.
+ *
+ * @IEEE80211_HW_DETECTS_COLOR_COLLISION: HW/driver has support for BSS color
+ * collision detection and doesn't need it in software.
+ *
+ * @IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX: Hardware/driver handles transmitting
+ * multicast frames on all links, mac80211 should not do that.
+ *
+ * @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT
+ * and connecting with a lower bandwidth instead
+ *
+ * @IEEE80211_HW_HANDLES_QUIET_CSA: HW/driver handles quieting for CSA, so
+ * no need to stop queues. This really should be set by a driver that
+ * implements MLO, so operation can continue on other links when one
+ * link is switching.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2380,6 +2835,13 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_MULTI_BSSID,
IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID,
IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT,
+ IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD,
+ IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD,
+ IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP,
+ IEEE80211_HW_DETECTS_COLOR_COLLISION,
+ IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
+ IEEE80211_HW_DISALLOW_PUNCTURING,
+ IEEE80211_HW_HANDLES_QUIET_CSA,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2468,8 +2930,6 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
*
- * @radiotap_he: HE radiotap validity flags
- *
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
* @units_pos member is set to a non-negative value then the timestamp
* field will be added and populated from the &struct ieee80211_rx_status
@@ -2495,9 +2955,6 @@ enum ieee80211_hw_flags {
* deliver to a WMM STA during any Service Period triggered by the WMM STA.
* Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct values.
*
- * @n_cipher_schemes: a size of an array of cipher schemes definitions.
- * @cipher_schemes: a pointer to an array of cipher scheme definitions
- * supported by HW.
* @max_nan_de_entries: maximum number of NAN DE functions supported by the
* device.
*
@@ -2509,6 +2966,12 @@ enum ieee80211_hw_flags {
* refilling deficit of each TXQ.
*
* @max_mtu: the max mtu could be set.
+ *
+ * @tx_power_levels: a list of power levels supported by the wifi hardware.
+ * The power levels can be specified either as integer or fractions.
+ * The power level at idx 0 shall be the maximum positive power level.
+ *
+ * @max_txpwr_levels_idx: the maximum valid idx of 'tx_power_levels' list.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2541,12 +3004,12 @@ struct ieee80211_hw {
netdev_features_t netdev_features;
u8 uapsd_queues;
u8 uapsd_max_sp_len;
- u8 n_cipher_schemes;
- const struct ieee80211_cipher_scheme *cipher_schemes;
u8 max_nan_de_entries;
u8 tx_sk_pacing_shift;
u8 weight_multiplier;
u32 max_mtu;
+ const s8 *tx_power_levels;
+ u8 max_txpwr_levels_idx;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
@@ -2696,7 +3159,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* The set_key() call for the %SET_KEY command should return 0 if
* the key is now in use, -%EOPNOTSUPP or -%ENOSPC if it couldn't be
* added; if you return 0 then hw_key_idx must be assigned to the
- * hardware key index, you are free to use the full u8 range.
+ * hardware key index. You are free to use the full u8 range.
*
* Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is
* set, mac80211 will not automatically fall back to software crypto if
@@ -2706,7 +3169,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* When the cmd is %DISABLE_KEY then it must succeed.
*
* Note that it is permissible to not decrypt a frame even if a key
- * for it has been uploaded to hardware, the stack will not make any
+ * for it has been uploaded to hardware. The stack will not make any
* decision based on whether a key has been uploaded or not but rather
* based on the receive flags.
*
@@ -2721,7 +3184,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* The update_tkip_key() call updates the driver with the new phase 1 key.
* This happens every time the iv16 wraps around (every 65536 packets). The
* set_key() call will happen only once for each key (unless the AP did
- * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is
+ * rekeying); it will not include a valid phase 1 key. The valid phase 1 key is
* provided by update_tkip_key only. The trigger that makes mac80211 call this
* handler is software decryption with wrap around of iv16.
*
@@ -2732,13 +3195,13 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
* when they are able to replace in-use PTK keys according to the following
* requirements:
- * 1) They do not hand over frames decrypted with the old key to
- mac80211 once the call to set_key() with command %DISABLE_KEY has been
- completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,
+ * 1) They do not hand over frames decrypted with the old key to mac80211
+ once the call to set_key() with command %DISABLE_KEY has been completed,
2) either drop or continue to use the old key for any outgoing frames queued
at the time of the key deletion (including re-transmits),
3) never send out a frame queued prior to the set_key() %SET_KEY command
- encrypted with the new key and
+ encrypted with the new key when also needing
+ @IEEE80211_KEY_FLAG_GENERATE_IV and
4) never send out a frame unencrypted when it should be encrypted.
Mac80211 will not queue any new frames for a deleted key to the driver.
*/
@@ -2748,7 +3211,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
*
* mac80211 has support for various powersave implementations.
*
- * First, it can support hardware that handles all powersaving by itself,
+ * First, it can support hardware that handles all powersaving by itself;
* such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS hardware
* flag. In that case, it will be told about the desired powersave mode
* with the %IEEE80211_CONF_PS flag depending on the association status.
@@ -2773,12 +3236,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still
* required to pass up beacons. The hardware is still required to handle
* waking up for multicast traffic; if it cannot the driver must handle that
- * as best as it can, mac80211 is too slow to do that.
+ * as best as it can; mac80211 is too slow to do that.
*
* Dynamic powersave is an extension to normal powersave in which the
* hardware stays awake for a user-specified period of time after sending a
* frame so that reply frames need not be buffered and therefore delayed to
- * the next wakeup. It's compromise of getting good enough latency when
+ * the next wakeup. It's a compromise of getting good enough latency when
* there's data traffic and still saving significantly power in idle
* periods.
*
@@ -2843,7 +3306,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* Note that change, for the sake of simplification, also includes information
* elements appearing or disappearing from the beacon.
*
- * Some hardware supports an "ignore list" instead, just make sure nothing
+ * Some hardware supports an "ignore list" instead. Just make sure nothing
* that was requested is on the ignore list, and include commonly changing
* information element IDs in the ignore list, for example 11 (BSS load) and
* the various vendor-assigned IEs with unknown contents (128, 129, 133-136,
@@ -2854,7 +3317,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* In addition to these capabilities, hardware should support notifying the
* host of changes in the beacon RSSI. This is relevant to implement roaming
* when no traffic is flowing (when traffic is flowing we see the RSSI of
- * the received data packets). This can consist in notifying the host when
+ * the received data packets). This can consist of notifying the host when
* the RSSI changes significantly or when it drops below or rises above
* configurable thresholds. In the future these thresholds will also be
* configured by mac80211 (which gets them from userspace) to implement
@@ -3001,8 +3464,8 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* period starts for any reason, @release_buffered_frames is called
* with the number of frames to be released and which TIDs they are
* to come from. In this case, the driver is responsible for setting
- * the EOSP (for uAPSD) and MORE_DATA bits in the released frames,
- * to help the @more_data parameter is passed to tell the driver if
+ * the EOSP (for uAPSD) and MORE_DATA bits in the released frames.
+ * To help the @more_data parameter is passed to tell the driver if
* there is more data on other TIDs -- the TIDs to release frames
* from are ignored since mac80211 doesn't know how many frames the
* buffers for those TIDs contain.
@@ -3051,7 +3514,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* Additionally, the driver has to then use these HW queue IDs for the queue
* management functions (ieee80211_stop_queue() et al.)
*
- * The driver is free to set up the queue mappings as needed, multiple virtual
+ * The driver is free to set up the queue mappings as needed; multiple virtual
* interfaces may map to the same hardware queues if needed. The setup has to
* happen during add_interface or change_interface callbacks. For example, a
* driver supporting station+station and station+AP modes might decide to have
@@ -3252,7 +3715,7 @@ enum ieee80211_roc_type {
};
/**
- * enum ieee80211_reconfig_complete_type - reconfig type
+ * enum ieee80211_reconfig_type - reconfig type
*
* This enum is used by the reconfig_complete() callback to indicate what
* reconfiguration type was completed.
@@ -3268,6 +3731,24 @@ enum ieee80211_reconfig_type {
};
/**
+ * struct ieee80211_prep_tx_info - prepare TX information
+ * @duration: if non-zero, hint about the required duration,
+ * only used with the mgd_prepare_tx() method.
+ * @subtype: frame subtype (auth, (re)assoc, deauth, disassoc)
+ * @success: whether the frame exchange was successful, only
+ * used with the mgd_complete_tx() method, and then only
+ * valid for auth and (re)assoc.
+ * @link_id: the link id on which the frame will be TX'ed.
+ * Only used with the mgd_prepare_tx() method.
+ */
+struct ieee80211_prep_tx_info {
+ u16 duration;
+ u16 subtype;
+ u8 success:1;
+ int link_id;
+};
+
+/**
* struct ieee80211_ops - callbacks from mac80211 to the driver
*
* This structure contains various callbacks that the driver may
@@ -3357,6 +3838,22 @@ enum ieee80211_reconfig_type {
* for association indication. The @changed parameter indicates which
* of the bss parameters has changed when a call is made. The callback
* can sleep.
+ * Note: this callback is called if @vif_cfg_changed or @link_info_changed
+ * are not implemented.
+ *
+ * @vif_cfg_changed: Handler for configuration requests related to interface
+ * (MLD) parameters from &struct ieee80211_vif_cfg that vary during the
+ * lifetime of the interface (e.g. assoc status, IP addresses, etc.)
+ * The @changed parameter indicates which value changed.
+ * The callback can sleep.
+ *
+ * @link_info_changed: Handler for configuration requests related to link
+ * parameters from &struct ieee80211_bss_conf that are related to an
+ * individual link. e.g. legacy/HT/VHT/... rate information.
+ * The @changed parameter indicates which value changed, and the @link_id
+ * parameter indicates the link ID. Note that the @link_id will be 0 for
+ * non-MLO connections.
+ * The callback can sleep.
*
* @prepare_multicast: Prepare for multicast filter configuration.
* This callback is optional, and its return value is passed
@@ -3472,11 +3969,28 @@ enum ieee80211_reconfig_type {
* the station. See @sta_pre_rcu_remove if needed.
* This callback can sleep.
*
+ * @vif_add_debugfs: Drivers can use this callback to add a debugfs vif
+ * directory with its files. This callback should be within a
+ * CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep.
+ *
+ * @link_add_debugfs: Drivers can use this callback to add debugfs files
+ * when a link is added to a mac80211 vif. This callback should be within
+ * a CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep.
+ * For non-MLO the callback will be called once for the default bss_conf
+ * with the vif's directory rather than a separate subdirectory.
+ *
* @sta_add_debugfs: Drivers can use this callback to add debugfs files
* when a station is added to mac80211's station list. This callback
* should be within a CONFIG_MAC80211_DEBUGFS conditional. This
* callback can sleep.
*
+ * @link_sta_add_debugfs: Drivers can use this callback to add debugfs files
+ * when a link is added to a mac80211 station. This callback
+ * should be within a CONFIG_MAC80211_DEBUGFS conditional. This
+ * callback can sleep.
+ * For non-MLO the callback will be called once for the deflink with the
+ * station's directory rather than a separate subdirectory.
+ *
* @sta_notify: Notifies low level driver about power state transition of an
* associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
* in AP mode, this callback will not be called when the flag
@@ -3580,6 +4094,10 @@ enum ieee80211_reconfig_type {
* Note that vif can be NULL.
* The callback can sleep.
*
+ * @flush_sta: Flush or drop all pending frames from the hardware queue(s) for
+ * the given station, as it's about to be removed.
+ * The callback can sleep.
+ *
* @channel_switch: Drivers that need (or want) to offload the channel
* switch operation for CSAs received from the AP may implement this
* callback. They must then call ieee80211_chswitch_done() to indicate
@@ -3659,11 +4177,15 @@ enum ieee80211_reconfig_type {
* This callback must be atomic.
*
* @get_et_sset_count: Ethtool API to get string-set count.
+ * Note that the wiphy mutex is not held for this callback since it's
+ * expected to return a static value.
*
* @get_et_stats: Ethtool API to get a set of u64 stats.
*
* @get_et_strings: Ethtool API to get a set of strings to describe stats
* and perhaps other supported types of ethtool data-sets.
+ * Note that the wiphy mutex is not held for this callback since it's
+ * expected to return a static value.
*
* @mgd_prepare_tx: Prepare for transmitting a management frame for association
* before associated. In multi-channel scenarios, a virtual interface is
@@ -3679,9 +4201,13 @@ enum ieee80211_reconfig_type {
* frame in case that no beacon was heard from the AP/P2P GO.
* The callback will be called before each transmission and upon return
* mac80211 will transmit the frame right away.
- * If duration is greater than zero, mac80211 hints to the driver the
- * duration for which the operation is requested.
+ * Additional information is passed in the &struct ieee80211_prep_tx_info
+ * data. If duration there is greater than zero, mac80211 hints to the
+ * driver the duration for which the operation is requested.
* The callback is optional and can (should!) sleep.
+ * @mgd_complete_tx: Notify the driver that the response frame for a previously
+ * transmitted frame announced with @mgd_prepare_tx was received, the data
+ * is filled similarly to @mgd_prepare_tx though the duration is not used.
*
* @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
* a TDLS discovery-request, we expect a reply to arrive on the AP's
@@ -3736,7 +4262,7 @@ enum ieee80211_reconfig_type {
* decremented, and when they reach 1 the driver must call
* ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
* get the csa counter decremented by mac80211, but must check if it is
- * 1 using ieee80211_csa_is_complete() after the beacon has been
+ * 1 using ieee80211_beacon_counter_is_complete() after the beacon has been
* transmitted and then call ieee80211_csa_finish().
* If the CSA count starts as zero or 1, this function will not be called,
* since there won't be any time to beacon before the switch anyway.
@@ -3748,7 +4274,7 @@ enum ieee80211_reconfig_type {
* after a channel switch procedure is completed, allowing the
* driver to go back to a normal configuration.
* @abort_channel_switch: This is an optional callback that is called
- * when channel switch procedure was completed, allowing the
+ * when channel switch procedure was aborted, allowing the
* driver to go back to a normal configuration.
* @channel_switch_rx_beacon: This is an optional callback that is called
* when channel switch procedure is in progress and additional beacon with
@@ -3814,6 +4340,55 @@ enum ieee80211_reconfig_type {
* @set_tid_config: Apply TID specific configurations. This callback may sleep.
* @reset_tid_config: Reset TID specific configuration for the peer.
* This callback may sleep.
+ * @update_vif_offload: Update virtual interface offload flags
+ * This callback may sleep.
+ * @sta_set_4addr: Called to notify the driver when a station starts/stops using
+ * 4-address mode
+ * @set_sar_specs: Update the SAR (TX power) settings.
+ * @sta_set_decap_offload: Called to notify the driver when a station is allowed
+ * to use rx decapsulation offload
+ * @add_twt_setup: Update hw with TWT agreement parameters received from the peer.
+ * This callback allows the hw to check if requested parameters
+ * are supported and if there is enough room for a new agreement.
+ * The hw is expected to set agreement result in the req_type field of
+ * twt structure.
+ * @twt_teardown_request: Update the hw with TWT teardown request received
+ * from the peer.
+ * @set_radar_background: Configure dedicated offchannel chain available for
+ * radar/CAC detection on some hw. This chain can't be used to transmit
+ * or receive frames and it is bounded to a running wdev.
+ * Background radar/CAC detection allows to avoid the CAC downtime
+ * switching to a different channel during CAC detection on the selected
+ * radar channel.
+ * The caller is expected to set chandef pointer to NULL in order to
+ * disable background CAC/radar detection.
+ * @net_fill_forward_path: Called from .ndo_fill_forward_path in order to
+ * resolve a path for hardware flow offloading
+ * @can_activate_links: Checks if a specific active_links bitmap is
+ * supported by the driver.
+ * @change_vif_links: Change the valid links on an interface, note that while
+ * removing the old link information is still valid (link_conf pointer),
+ * but may immediately disappear after the function returns. The old or
+ * new links bitmaps may be 0 if going from/to a non-MLO situation.
+ * The @old array contains pointers to the old bss_conf structures
+ * that were already removed, in case they're needed.
+ * This callback can sleep.
+ * @change_sta_links: Change the valid links of a station, similar to
+ * @change_vif_links. This callback can sleep.
+ * Note that a sta can also be inserted or removed with valid links,
+ * i.e. passed to @sta_add/@sta_state with sta->valid_links not zero.
+ * In fact, cannot change from having valid_links and not having them.
+ * @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames. This is
+ * not restored at HW reset by mac80211 so drivers need to take care of
+ * that.
+ * @net_setup_tc: Called from .ndo_setup_tc in order to prepare hardware
+ * flow offloading for flows originating from the vif.
+ * Note that the driver must not assume that the vif driver_data is valid
+ * at this point, since the callback can be called during netdev teardown.
+ * @can_neg_ttlm: for managed interface, requests the driver to determine
+ * if the requested TID-To-Link mapping can be accepted or not.
+ * If it's not accepted the driver may suggest a preferred mapping and
+ * modify @ttlm parameter with the suggested TID-to-Link mapping.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3837,10 +4412,19 @@ struct ieee80211_ops {
void (*bss_info_changed)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed);
+ u64 changed);
+ void (*vif_cfg_changed)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changed);
+ void (*link_info_changed)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed);
- int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
- void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+ void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
u64 (*prepare_multicast)(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
@@ -3894,10 +4478,20 @@ struct ieee80211_ops {
int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
#ifdef CONFIG_MAC80211_DEBUGFS
+ void (*vif_add_debugfs)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*link_add_debugfs)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir);
void (*sta_add_debugfs)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
+ void (*link_sta_add_debugfs)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir);
#endif
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, struct ieee80211_sta *sta);
@@ -3923,7 +4517,8 @@ struct ieee80211_ops {
struct ieee80211_sta *sta,
struct station_info *sinfo);
int (*conf_tx)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params);
u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -3980,6 +4575,8 @@ struct ieee80211_ops {
#endif
void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
+ void (*flush_sta)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void (*channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch);
@@ -4025,10 +4622,14 @@ struct ieee80211_ops {
void (*mgd_prepare_tx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u16 duration);
+ struct ieee80211_prep_tx_info *info);
+ void (*mgd_complete_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info);
void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ unsigned int link_id);
int (*add_chanctx)(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx);
@@ -4039,9 +4640,11 @@ struct ieee80211_ops {
u32 changed);
int (*assign_vif_chanctx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
void (*unassign_vif_chanctx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
int (*switch_vif_chanctx)(struct ieee80211_hw *hw,
struct ieee80211_vif_chanctx_switch *vifs,
@@ -4064,9 +4667,11 @@ struct ieee80211_ops {
struct ieee80211_channel_switch *ch_switch);
int (*post_channel_switch)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
void (*abort_channel_switch)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch);
@@ -4125,6 +4730,49 @@ struct ieee80211_ops {
int (*reset_tid_config)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u8 tids);
+ void (*update_vif_offload)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*sta_set_4addr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled);
+ int (*set_sar_specs)(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar);
+ void (*sta_set_decap_offload)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled);
+ void (*add_twt_setup)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt);
+ void (*twt_teardown_request)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 flowid);
+ int (*set_radar_background)(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef);
+ int (*net_fill_forward_path)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct net_device_path_ctx *ctx,
+ struct net_device_path *path);
+ bool (*can_activate_links)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links);
+ int (*change_vif_links)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]);
+ int (*change_sta_links)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links);
+ int (*set_hw_timestamp)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_set_hw_timestamp *hwts);
+ int (*net_setup_tc)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct net_device *dev,
+ enum tc_setup_type type,
+ void *type_data);
+ enum ieee80211_neg_ttlm_res
+ (*can_neg_ttlm)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *ttlm);
};
/**
@@ -4373,7 +5021,7 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw);
* for a single hardware must be synchronized against each other. Calls to
* this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
* mixed for a single hardware. Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* This function must be called with BHs disabled and RCU read lock
*
@@ -4398,7 +5046,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
* for a single hardware must be synchronized against each other. Calls to
* this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
* mixed for a single hardware. Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* This function must be called with BHs disabled.
*
@@ -4423,7 +5071,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
* for a single hardware must be synchronized against each other. Calls to
* this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
* mixed for a single hardware. Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* In process context use instead ieee80211_rx_ni().
*
@@ -4443,7 +5091,7 @@ static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
*
* Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not
* be mixed for a single hardware.Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
@@ -4458,7 +5106,7 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb);
*
* Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may
* not be mixed for a single hardware. Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
@@ -4634,7 +5282,7 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info);
/**
- * ieee80211_tx_status - transmit status callback
+ * ieee80211_tx_status_skb - transmit status callback
*
* Call this function for all transmitted frames after they have been
* transmitted. It is permissible to not call this function for
@@ -4649,13 +5297,13 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
* @hw: the hardware the frame was transmitted by
* @skb: the frame that was transmitted, owned by mac80211 after this call
*/
-void ieee80211_tx_status(struct ieee80211_hw *hw,
- struct sk_buff *skb);
+void ieee80211_tx_status_skb(struct ieee80211_hw *hw,
+ struct sk_buff *skb);
/**
* ieee80211_tx_status_ext - extended transmit status callback
*
- * This function can be used as a replacement for ieee80211_tx_status
+ * This function can be used as a replacement for ieee80211_tx_status_skb()
* in drivers that may want to provide extra information that does not
* fit into &struct ieee80211_tx_info.
*
@@ -4672,7 +5320,7 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
/**
* ieee80211_tx_status_noskb - transmit status callback without skb
*
- * This function can be used as a replacement for ieee80211_tx_status
+ * This function can be used as a replacement for ieee80211_tx_status_skb()
* in drivers that cannot reliably map tx status information back to
* specific skbs.
*
@@ -4700,9 +5348,9 @@ static inline void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
/**
* ieee80211_tx_status_ni - transmit status callback (in process context)
*
- * Like ieee80211_tx_status() but can be called in process context.
+ * Like ieee80211_tx_status_skb() but can be called in process context.
*
- * Calls to this function, ieee80211_tx_status() and
+ * Calls to this function, ieee80211_tx_status_skb() and
* ieee80211_tx_status_irqsafe() may not be mixed
* for a single hardware.
*
@@ -4713,17 +5361,17 @@ static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
local_bh_disable();
- ieee80211_tx_status(hw, skb);
+ ieee80211_tx_status_skb(hw, skb);
local_bh_enable();
}
/**
* ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback
*
- * Like ieee80211_tx_status() but can be called in IRQ context
+ * Like ieee80211_tx_status_skb() but can be called in IRQ context
* (internally defers to a tasklet.)
*
- * Calls to this function, ieee80211_tx_status() and
+ * Calls to this function, ieee80211_tx_status_skb() and
* ieee80211_tx_status_ni() may not be mixed for a single hardware.
*
* @hw: the hardware the frame was transmitted by
@@ -4733,26 +5381,6 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
struct sk_buff *skb);
/**
- * ieee80211_tx_status_8023 - transmit status callback for 802.3 frame format
- *
- * Call this function for all transmitted data frames after their transmit
- * completion. This callback should only be called for data frames which
- * are using driver's (or hardware's) offload capability of encap/decap
- * 802.11 frames.
- *
- * This function may not be called in IRQ context. Calls to this function
- * for a single hardware must be synchronized against each other and all
- * calls in the same tx status family.
- *
- * @hw: the hardware the frame was transmitted by
- * @vif: the interface for which the frame was transmitted
- * @skb: the frame that was transmitted, owned by mac80211 after this call
- */
-void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct sk_buff *skb);
-
-/**
* ieee80211_report_low_ack - report non-responding station
*
* When operating in AP-mode, call this function to report a non-responding
@@ -4763,21 +5391,23 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
*/
void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
-#define IEEE80211_MAX_CSA_COUNTERS_NUM 2
+#define IEEE80211_MAX_CNTDWN_COUNTERS_NUM 2
/**
* struct ieee80211_mutable_offsets - mutable beacon offsets
* @tim_offset: position of TIM element
* @tim_length: size of TIM element
- * @csa_counter_offs: array of IEEE80211_MAX_CSA_COUNTERS_NUM offsets
- * to CSA counters. This array can contain zero values which
+ * @cntdwn_counter_offs: array of IEEE80211_MAX_CNTDWN_COUNTERS_NUM offsets
+ * to countdown counters. This array can contain zero values which
* should be ignored.
+ * @mbssid_off: position of the multiple bssid element
*/
struct ieee80211_mutable_offsets {
u16 tim_offset;
u16 tim_length;
- u16 csa_counter_offs[IEEE80211_MAX_CSA_COUNTERS_NUM];
+ u16 cntdwn_counter_offs[IEEE80211_MAX_CNTDWN_COUNTERS_NUM];
+ u16 mbssid_off;
};
/**
@@ -4786,6 +5416,8 @@ struct ieee80211_mutable_offsets {
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @offs: &struct ieee80211_mutable_offsets pointer to struct that will
* receive the offsets that may be updated by the driver.
+ * @link_id: the link id to which the beacon belongs (or 0 for an AP STA
+ * that is not associated with AP MLD).
*
* If the driver implements beaconing modes, it must use this function to
* obtain the beacon template.
@@ -4802,7 +5434,76 @@ struct ieee80211_mutable_offsets {
struct sk_buff *
ieee80211_beacon_get_template(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_mutable_offsets *offs);
+ struct ieee80211_mutable_offsets *offs,
+ unsigned int link_id);
+
+/**
+ * ieee80211_beacon_get_template_ema_index - EMA beacon template generation
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @offs: &struct ieee80211_mutable_offsets pointer to struct that will
+ * receive the offsets that may be updated by the driver.
+ * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP).
+ * @ema_index: index of the beacon in the EMA set.
+ *
+ * This function follows the same rules as ieee80211_beacon_get_template()
+ * but returns a beacon template which includes multiple BSSID element at the
+ * requested index.
+ *
+ * Return: The beacon template. %NULL indicates the end of EMA templates.
+ */
+struct sk_buff *
+ieee80211_beacon_get_template_ema_index(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_mutable_offsets *offs,
+ unsigned int link_id, u8 ema_index);
+
+/**
+ * struct ieee80211_ema_beacons - List of EMA beacons
+ * @cnt: count of EMA beacons.
+ *
+ * @bcn: array of EMA beacons.
+ * @bcn.skb: the skb containing this specific beacon
+ * @bcn.offs: &struct ieee80211_mutable_offsets pointer to struct that will
+ * receive the offsets that may be updated by the driver.
+ */
+struct ieee80211_ema_beacons {
+ u8 cnt;
+ struct {
+ struct sk_buff *skb;
+ struct ieee80211_mutable_offsets offs;
+ } bcn[];
+};
+
+/**
+ * ieee80211_beacon_get_template_ema_list - EMA beacon template generation
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
+ *
+ * This function follows the same rules as ieee80211_beacon_get_template()
+ * but allocates and returns a pointer to list of all beacon templates required
+ * to cover all profiles in the multiple BSSID set. Each template includes only
+ * one multiple BSSID element.
+ *
+ * Driver must call ieee80211_beacon_free_ema_list() to free the memory.
+ *
+ * Return: EMA beacon templates of type struct ieee80211_ema_beacons *.
+ * %NULL on error.
+ */
+struct ieee80211_ema_beacons *
+ieee80211_beacon_get_template_ema_list(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id);
+
+/**
+ * ieee80211_beacon_free_ema_list - free an EMA beacon template list
+ * @ema_beacons: list of EMA beacons of type &struct ieee80211_ema_beacons pointers.
+ *
+ * This function will free a list previously acquired by calling
+ * ieee80211_beacon_get_template_ema_list()
+ */
+void ieee80211_beacon_free_ema_list(struct ieee80211_ema_beacons *ema_beacons);
/**
* ieee80211_beacon_get_tim - beacon generation function
@@ -4813,6 +5514,8 @@ ieee80211_beacon_get_template(struct ieee80211_hw *hw,
* @tim_length: pointer to variable that will receive the TIM IE length,
* (including the ID and length bytes!).
* Set to 0 if invalid (in non-AP modes).
+ * @link_id: the link id to which the beacon belongs (or 0 for an AP STA
+ * that is not associated with AP MLD).
*
* If the driver implements beaconing modes, it must use this function to
* obtain the beacon frame.
@@ -4828,68 +5531,86 @@ ieee80211_beacon_get_template(struct ieee80211_hw *hw,
*/
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u16 *tim_offset, u16 *tim_length);
+ u16 *tim_offset, u16 *tim_length,
+ unsigned int link_id);
/**
* ieee80211_beacon_get - beacon generation function
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: the link id to which the beacon belongs (or 0 for an AP STA
+ * that is not associated with AP MLD).
*
* See ieee80211_beacon_get_tim().
*
* Return: See ieee80211_beacon_get_tim().
*/
static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ unsigned int link_id)
{
- return ieee80211_beacon_get_tim(hw, vif, NULL, NULL);
+ return ieee80211_beacon_get_tim(hw, vif, NULL, NULL, link_id);
}
/**
- * ieee80211_csa_update_counter - request mac80211 to decrement the csa counter
+ * ieee80211_beacon_update_cntdwn - request mac80211 to decrement the beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
- * The csa counter should be updated after each beacon transmission.
+ * The beacon counter should be updated after each beacon transmission.
* This function is called implicitly when
* ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the
* beacon frames are generated by the device, the driver should call this
- * function after each beacon transmission to sync mac80211's csa counters.
+ * function after each beacon transmission to sync mac80211's beacon countdown.
*
- * Return: new csa counter value
+ * Return: new countdown value
*/
-u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif);
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif,
+ unsigned int link_id);
/**
- * ieee80211_csa_set_counter - request mac80211 to set csa counter
+ * ieee80211_beacon_set_cntdwn - request mac80211 to set beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @counter: the new value for the counter
*
- * The csa counter can be changed by the device, this API should be
+ * The beacon countdown can be changed by the device, this API should be
* used by the device driver to update csa counter in mac80211.
*
- * It should never be used together with ieee80211_csa_update_counter(),
+ * It should never be used together with ieee80211_beacon_update_cntdwn(),
* as it will cause a race condition around the counter value.
*/
-void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter);
+void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter);
/**
* ieee80211_csa_finish - notify mac80211 about channel switch
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* After a channel switch announcement was scheduled and the counter in this
* announcement hits 1, this function must be called by the driver to
* notify mac80211 that the channel can be changed.
*/
-void ieee80211_csa_finish(struct ieee80211_vif *vif);
+void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id);
/**
- * ieee80211_csa_is_complete - find out if counters reached 1
+ * ieee80211_beacon_cntdwn_is_complete - find out if countdown reached 1
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
- * This function returns whether the channel switch counters reached zero.
+ * This function returns whether the countdown reached zero.
*/
-bool ieee80211_csa_is_complete(struct ieee80211_vif *vif);
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
+ unsigned int link_id);
+/**
+ * ieee80211_color_change_finish - notify mac80211 about color change
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * After a color change announcement was scheduled and the counter in this
+ * announcement hits 1, this function must be called by the driver to
+ * notify mac80211 that the color can be changed
+ */
+void ieee80211_color_change_finish(struct ieee80211_vif *vif);
/**
* ieee80211_proberesp_get - retrieve a Probe Response template
@@ -4927,6 +5648,9 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
* ieee80211_nullfunc_get - retrieve a nullfunc template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: If the vif is an MLD, get a frame with the link addresses
+ * for the given link ID. For a link_id < 0 you get a frame with
+ * MLD addresses, however useful that might be.
* @qos_ok: QoS NDP is acceptable to the caller, this should be set
* if at all possible
*
@@ -4944,7 +5668,7 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
*/
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- bool qos_ok);
+ int link_id, bool qos_ok);
/**
* ieee80211_probereq_get - retrieve a Probe Request template
@@ -5200,12 +5924,11 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
* ieee80211_remove_key - remove the given key
* @keyconf: the parameter passed with the set key
*
+ * Context: Must be called with the wiphy mutex held.
+ *
* Remove the given key. If the key was uploaded to the hardware at the
* time this function is called, it is not deleted in the hardware but
* instead assumed to have been removed already.
- *
- * Note that due to locking considerations this function can (currently)
- * only be called during key iteration (ieee80211_iter_keys().)
*/
void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
@@ -5213,6 +5936,7 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
* ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
* @vif: the virtual interface to add the key on
* @keyconf: new key data
+ * @link_id: the link id of the key or -1 for non-MLO
*
* When GTK rekeying was done while the system was suspended, (a) new
* key(s) will be available. These will be needed by mac80211 for proper
@@ -5240,7 +5964,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
*/
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf);
+ struct ieee80211_key_conf *keyconf,
+ int link_id);
/**
* ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying
@@ -5253,11 +5978,31 @@ void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp);
/**
+ * ieee80211_key_mic_failure - increment MIC failure counter for the key
+ *
+ * Note: this is really only safe if no other RX function is called
+ * at the same time.
+ *
+ * @keyconf: the key in question
+ */
+void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_key_replay - increment replay counter for the key
+ *
+ * Note: this is really only safe if no other RX function is called
+ * at the same time.
+ *
+ * @keyconf: the key in question
+ */
+void ieee80211_key_replay(struct ieee80211_key_conf *keyconf);
+
+/**
* ieee80211_wake_queue - wake specific queue
* @hw: pointer as obtained from ieee80211_alloc_hw().
* @queue: queue number (counted from zero).
*
- * Drivers should use this function instead of netif_wake_queue.
+ * Drivers must use this function instead of netif_wake_queue.
*/
void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue);
@@ -5266,7 +6011,7 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue);
* @hw: pointer as obtained from ieee80211_alloc_hw().
* @queue: queue number (counted from zero).
*
- * Drivers should use this function instead of netif_stop_queue.
+ * Drivers must use this function instead of netif_stop_queue.
*/
void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue);
@@ -5275,7 +6020,7 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue);
* @hw: pointer as obtained from ieee80211_alloc_hw().
* @queue: queue number (counted from zero).
*
- * Drivers should use this function instead of netif_stop_queue.
+ * Drivers must use this function instead of netif_queue_stopped.
*
* Return: %true if the queue is stopped. %false otherwise.
*/
@@ -5286,7 +6031,7 @@ int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue);
* ieee80211_stop_queues - stop all queues
* @hw: pointer as obtained from ieee80211_alloc_hw().
*
- * Drivers should use this function instead of netif_stop_queue.
+ * Drivers must use this function instead of netif_tx_stop_all_queues.
*/
void ieee80211_stop_queues(struct ieee80211_hw *hw);
@@ -5294,7 +6039,7 @@ void ieee80211_stop_queues(struct ieee80211_hw *hw);
* ieee80211_wake_queues - wake all queues
* @hw: pointer as obtained from ieee80211_alloc_hw().
*
- * Drivers should use this function instead of netif_wake_queue.
+ * Drivers must use this function instead of netif_tx_wake_all_queues.
*/
void ieee80211_wake_queues(struct ieee80211_hw *hw);
@@ -5344,11 +6089,15 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
* @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
* interfaces, even if they haven't been re-added to the driver yet.
* @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up).
+ * @IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER: Skip any interfaces where SDATA
+ * is not in the driver. This may fix crashes during firmware recovery
+ * for instance.
*/
enum ieee80211_interface_iteration_flags {
IEEE80211_IFACE_ITER_NORMAL = 0,
IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
IEEE80211_IFACE_ITER_ACTIVE = BIT(1),
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER = BIT(2),
};
/**
@@ -5417,23 +6166,23 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
void *data);
/**
- * ieee80211_iterate_active_interfaces_rtnl - iterate active interfaces
+ * ieee80211_iterate_active_interfaces_mtx - iterate active interfaces
*
* This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them.
- * This version can only be used while holding the RTNL.
+ * This version can only be used while holding the wiphy mutex.
*
* @hw: the hardware struct of which the interfaces should be iterated over
* @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
* @iterator: the iterator function to call, cannot sleep
* @data: first argument of the iterator function
*/
-void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw,
- u32 iter_flags,
- void (*iterator)(void *data,
+void ieee80211_iterate_active_interfaces_mtx(struct ieee80211_hw *hw,
+ u32 iter_flags,
+ void (*iterator)(void *data,
u8 *mac,
struct ieee80211_vif *vif),
- void *data);
+ void *data);
/**
* ieee80211_iterate_stations_atomic - iterate stations
@@ -5477,6 +6226,20 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
unsigned long delay);
/**
+ * ieee80211_refresh_tx_agg_session_timer - Refresh a tx agg session timer.
+ * @sta: the station for which to start a BA session
+ * @tid: the TID to BA on.
+ *
+ * This function allows low level driver to refresh tx agg session timer
+ * to maintain BA session, the session level will still be managed by the
+ * mac80211.
+ *
+ * Note: must be called in an RCU critical section.
+ */
+void ieee80211_refresh_tx_agg_session_timer(struct ieee80211_sta *sta,
+ u16 tid);
+
+/**
* ieee80211_start_tx_ba_session - Start a tx Block Ack session.
* @sta: the station for which to start a BA session
* @tid: the TID to BA on.
@@ -5571,6 +6334,22 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
const u8 *localaddr);
/**
+ * ieee80211_find_sta_by_link_addrs - find STA by link addresses
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @addr: remote station's link address
+ * @localaddr: local link address, use %NULL for any (but avoid that)
+ * @link_id: pointer to obtain the link ID if the STA is found,
+ * may be %NULL if the link ID is not needed
+ *
+ * Obtain the STA by link address, must use RCU protection.
+ */
+struct ieee80211_sta *
+ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw,
+ const u8 *addr,
+ const u8 *localaddr,
+ unsigned int *link_id);
+
+/**
* ieee80211_sta_block_awake - block station from waking up
* @hw: the hardware
* @pubsta: the station
@@ -5646,9 +6425,22 @@ void ieee80211_sta_eosp(struct ieee80211_sta *pubsta);
void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid);
/**
+ * ieee80211_sta_recalc_aggregates - recalculate aggregate data after a change
+ * @pubsta: the station
+ *
+ * Call this function after changing a per-link aggregate data as referenced in
+ * &struct ieee80211_sta_aggregates by accessing the agg field of
+ * &struct ieee80211_link_sta.
+ *
+ * With non MLO the data in deflink will be referenced directly. In that case
+ * there is no need to call this function.
+ */
+void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta);
+
+/**
* ieee80211_sta_register_airtime - register airtime usage for a sta/tid
*
- * Register airtime usage for a given sta on a given tid. The driver can call
+ * Register airtime usage for a given sta on a given tid. The driver must call
* this function to notify mac80211 that a station used a certain amount of
* airtime. This information will be used by the TXQ scheduler to schedule
* stations in a way that ensures airtime fairness.
@@ -5692,12 +6484,12 @@ ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
* @iter: iterator function that will be called for each key
* @iter_data: custom data to pass to the iterator function
*
+ * Context: Must be called with wiphy mutex held; can sleep.
+ *
* This function can be used to iterate all the keys known to
* mac80211, even those that weren't previously programmed into
* the device. This is intended for use in WoWLAN if the device
- * needs reprogramming of the keys during suspend. Note that due
- * to locking reasons, it is also only safe to call this at few
- * spots since it must hold the RTNL and be able to sleep.
+ * needs reprogramming of the keys during suspend.
*
* The order in which the keys are iterated matches the order
* in which they were originally installed and handed to the
@@ -5807,6 +6599,17 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
void ieee80211_connection_loss(struct ieee80211_vif *vif);
/**
+ * ieee80211_disconnect - request disconnection
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @reconnect: immediate reconnect is desired
+ *
+ * Request disconnection from the current network and, if enabled, send a
+ * hint to the higher layers that immediate reconnect is desired.
+ */
+void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect);
+
+/**
* ieee80211_resume_disconnect - disconnect from AP after resume
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -5830,6 +6633,16 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif);
void ieee80211_resume_disconnect(struct ieee80211_vif *vif);
/**
+ * ieee80211_hw_restart_disconnect - disconnect from AP after
+ * hardware restart
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Instructs mac80211 to disconnect from the AP after
+ * hardware restart.
+ */
+void ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif);
+
+/**
* ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
* rssi threshold triggered
*
@@ -5866,22 +6679,38 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw);
* ieee80211_chswitch_done - Complete channel switch process
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @success: make the channel switch successful or not
+ * @link_id: the link_id on which the switch was done. Ignored if success is
+ * false.
*
* Complete the channel switch post-process: set the new operational channel
* and wake up the suspended queues.
*/
-void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success);
+void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
+ unsigned int link_id);
+
+/**
+ * ieee80211_channel_switch_disconnect - disconnect due to channel switch error
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @block_tx: if %true, do not send deauth frame.
+ *
+ * Instruct mac80211 to disconnect due to a channel switch error. The channel
+ * switch can request to block the tx and so, we need to make sure we do not send
+ * a deauth frame in this case.
+ */
+void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif,
+ bool block_tx);
/**
* ieee80211_request_smps - request SM PS transition
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: link ID for MLO, or 0
* @smps_mode: new SM PS mode
*
* This allows the driver to request an SM PS transition in managed
* mode. This is useful when the driver has more information than
* the stack about possible interference, for example by bluetooth.
*/
-void ieee80211_request_smps(struct ieee80211_vif *vif,
+void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id,
enum ieee80211_smps_mode smps_mode);
/**
@@ -5926,6 +6755,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
* marks frames marked in the bitmap as having been filtered. Afterwards, it
* checks if any frames in the window starting from @ssn can now be released
* (in case they were only waiting for frames that were filtered.)
+ * (Only work correctly if @max_rx_aggregation_subframes <= 64 frames)
*/
void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
u16 ssn, u64 filtered,
@@ -6057,6 +6887,11 @@ enum rate_control_capabilities {
* otherwise the NSS difference doesn't bother us.
*/
RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0),
+ /**
+ * @RATE_CTRL_CAPA_AMPDU_TRIGGER:
+ * mac80211 should start A-MPDU sessions on tx
+ */
+ RATE_CTRL_CAPA_AMPDU_TRIGGER = BIT(1),
};
struct rate_control_ops {
@@ -6097,7 +6932,7 @@ static inline int rate_supported(struct ieee80211_sta *sta,
enum nl80211_band band,
int index)
{
- return (sta == NULL || sta->supp_rates[band] & BIT(index));
+ return (sta == NULL || sta->deflink.supp_rates[band] & BIT(index));
}
static inline s8
@@ -6205,9 +7040,52 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
}
/**
+ * ieee80211_get_he_iftype_cap_vif - return HE capabilities for sband/vif
+ * @sband: the sband to search for the iftype on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: pointer to the struct ieee80211_sta_he_cap, or %NULL is none found
+ */
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_iftype_cap_vif(const struct ieee80211_supported_band *sband,
+ struct ieee80211_vif *vif)
+{
+ return ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
+ * ieee80211_get_he_6ghz_capa_vif - return HE 6 GHz capabilities
+ * @sband: the sband to search for the STA on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: the 6GHz capabilities
+ */
+static inline __le16
+ieee80211_get_he_6ghz_capa_vif(const struct ieee80211_supported_band *sband,
+ struct ieee80211_vif *vif)
+{
+ return ieee80211_get_he_6ghz_capa(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
+ * ieee80211_get_eht_iftype_cap_vif - return ETH capabilities for sband/vif
+ * @sband: the sband to search for the iftype on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: pointer to the struct ieee80211_sta_eht_cap, or %NULL is none found
+ */
+static inline const struct ieee80211_sta_eht_cap *
+ieee80211_get_eht_iftype_cap_vif(const struct ieee80211_supported_band *sband,
+ struct ieee80211_vif *vif)
+{
+ return ieee80211_get_eht_iftype_cap(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
* ieee80211_update_mu_groups - set the VHT MU-MIMO groud data
*
* @vif: the specified virtual interface
+ * @link_id: the link ID for MLO, otherwise 0
* @membership: 64 bits array - a bit is set if station is member of the group
* @position: 2 bits per group id indicating the position in the group
*
@@ -6216,7 +7094,7 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
* matching GroupId management frame.
* Calls to this function need to be serialized with RX path.
*/
-void ieee80211_update_mu_groups(struct ieee80211_vif *vif,
+void ieee80211_update_mu_groups(struct ieee80211_vif *vif, unsigned int link_id,
const u8 *membership, const u8 *position);
void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
@@ -6264,7 +7142,13 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
int band, struct ieee80211_sta **sta);
/**
- * Sanity-check and parse the radiotap header of injected frames
+ * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
+ * of injected frames.
+ *
+ * To accurately parse and take into account rate and retransmission fields,
+ * you must initialize the chandef field in the ieee80211_tx_info structure
+ * of the skb before calling this function.
+ *
* @skb: packet injected by userspace
* @dev: the &struct device of this 802.11 device
*/
@@ -6319,7 +7203,7 @@ int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf);
/**
- * ieee80211_tdls_oper - request userspace to perform a TDLS operation
+ * ieee80211_tdls_oper_request - request userspace to perform a TDLS operation
* @vif: virtual interface
* @peer: the peer's destination address
* @oper: the requested TDLS operation
@@ -6416,6 +7300,18 @@ static inline struct sk_buff *ieee80211_tx_dequeue_ni(struct ieee80211_hw *hw,
}
/**
+ * ieee80211_handle_wake_tx_queue - mac80211 handler for wake_tx_queue callback
+ *
+ * @hw: pointer as obtained from wake_tx_queue() callback().
+ * @txq: pointer as obtained from wake_tx_queue() callback().
+ *
+ * Drivers can use this function for the mandatory mac80211 wake_tx_queue
+ * callback in struct ieee80211_ops. They should not call this function.
+ */
+void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+
+/**
* ieee80211_next_txq - get next tx queue to pull packets from
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
@@ -6486,7 +7382,7 @@ ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
*
* This function is used to check whether given txq is allowed to transmit by
* the airtime scheduler, and can be used by drivers to access the airtime
- * fairness accounting without going using the scheduling order enfored by
+ * fairness accounting without using the scheduling order enforced by
* next_txq().
*
* Returns %true if the airtime scheduler thinks the TXQ should be allowed to
@@ -6594,4 +7490,113 @@ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
*/
bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable);
+/**
+ * ieee80211_get_fils_discovery_tmpl - Get FILS discovery template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: FILS discovery template. %NULL on error.
+ */
+struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_get_unsol_bcast_probe_resp_tmpl - Get unsolicited broadcast
+ * probe response template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: Unsolicited broadcast probe response template. %NULL on error.
+ */
+struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_obss_color_collision_notify - notify userland about a BSS color
+ * collision.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is
+ * aware of.
+ */
+void
+ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
+ u64 color_bitmap);
+
+/**
+ * ieee80211_is_tx_data - check if frame is a data frame
+ *
+ * The function is used to check if a frame is a data frame. Frames with
+ * hardware encapsulation enabled are data frames.
+ *
+ * @skb: the frame to be transmitted.
+ */
+static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+
+ return info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+ ieee80211_is_data(hdr->frame_control);
+}
+
+/**
+ * ieee80211_set_active_links - set active links in client mode
+ * @vif: interface to set active links on
+ * @active_links: the new active links bitmap
+ *
+ * Context: Must be called with wiphy mutex held; may sleep; calls
+ * back into the driver.
+ *
+ * This changes the active links on an interface. The interface
+ * must be in client mode (in AP mode, all links are always active),
+ * and @active_links must be a subset of the vif's valid_links.
+ *
+ * If a link is switched off and another is switched on at the same
+ * time (e.g. active_links going from 0x1 to 0x10) then you will get
+ * a sequence of calls like
+ *
+ * - change_vif_links(0x11)
+ * - unassign_vif_chanctx(link_id=0)
+ * - change_sta_links(0x11) for each affected STA (the AP)
+ * (TDLS connections on now inactive links should be torn down)
+ * - remove group keys on the old link (link_id 0)
+ * - add new group keys (GTK/IGTK/BIGTK) on the new link (link_id 4)
+ * - change_sta_links(0x10) for each affected STA (the AP)
+ * - assign_vif_chanctx(link_id=4)
+ * - change_vif_links(0x10)
+ */
+int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
+
+/**
+ * ieee80211_set_active_links_async - asynchronously set active links
+ * @vif: interface to set active links on
+ * @active_links: the new active links bitmap
+ *
+ * See ieee80211_set_active_links() for more information, the only
+ * difference here is that the link change is triggered async and
+ * can be called in any context, but the link switch will only be
+ * completed after it returns.
+ */
+void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
+ u16 active_links);
+
+/* for older drivers - let's not document these ... */
+int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode);
+
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index d524ffb9eb25..4a3a9de9da73 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -111,9 +111,6 @@ struct ieee802154_hw {
* promiscuous mode setting.
*
* @IEEE802154_HW_RX_OMIT_CKSUM: Indicates that receiver omits FCS.
- *
- * @IEEE802154_HW_RX_DROP_BAD_CKSUM: Indicates that receiver will not filter
- * frames with bad checksum.
*/
enum ieee802154_hw_flags {
IEEE802154_HW_TX_OMIT_CKSUM = BIT(0),
@@ -123,7 +120,6 @@ enum ieee802154_hw_flags {
IEEE802154_HW_AFILT = BIT(4),
IEEE802154_HW_PROMISCUOUS = BIT(5),
IEEE802154_HW_RX_OMIT_CKSUM = BIT(6),
- IEEE802154_HW_RX_DROP_BAD_CKSUM = BIT(7),
};
/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
@@ -460,30 +456,34 @@ void ieee802154_unregister_hw(struct ieee802154_hw *hw);
*/
void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
u8 lqi);
+
/**
- * ieee802154_wake_queue - wake ieee802154 queue
- * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * ieee802154_xmit_complete - frame transmission complete
*
- * Drivers should use this function instead of netif_wake_queue.
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @ifs_handling: indicate interframe space handling
*/
-void ieee802154_wake_queue(struct ieee802154_hw *hw);
+void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
+ bool ifs_handling);
/**
- * ieee802154_stop_queue - stop ieee802154 queue
- * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * ieee802154_xmit_error - offloaded frame transmission failed
*
- * Drivers should use this function instead of netif_stop_queue.
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @reason: error code
*/
-void ieee802154_stop_queue(struct ieee802154_hw *hw);
+void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
+ int reason);
/**
- * ieee802154_xmit_complete - frame transmission complete
+ * ieee802154_xmit_hw_error - frame could not be offloaded to the transmitter
+ * because of a hardware error (bus error, timeout, etc)
*
* @hw: pointer as obtained from ieee802154_alloc_hw().
* @skb: buffer for transmission
- * @ifs_handling: indicate interframe space handling
*/
-void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
- bool ifs_handling);
+void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb);
#endif /* NET_MAC802154_H */
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 52874cdfe226..dbd22180cc5c 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -8,18 +8,34 @@
#define _NET_MACSEC_H_
#include <linux/u64_stats_sync.h>
+#include <linux/if_vlan.h>
#include <uapi/linux/if_link.h>
#include <uapi/linux/if_macsec.h>
#define MACSEC_DEFAULT_PN_LEN 4
#define MACSEC_XPN_PN_LEN 8
-#define MACSEC_SALT_LEN 12
#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+#define MACSEC_SCI_LEN 8
+#define MACSEC_PORT_ES (htons(0x0001))
+
+#define MACSEC_TCI_VERSION 0x80
+#define MACSEC_TCI_ES 0x40 /* end station */
+#define MACSEC_TCI_SC 0x20 /* SCI present */
+#define MACSEC_TCI_SCB 0x10 /* epon */
+#define MACSEC_TCI_E 0x08 /* encryption */
+#define MACSEC_TCI_C 0x04 /* changed text */
+#define MACSEC_AN_MASK 0x03 /* association number */
+#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
+
+#define MACSEC_DEFAULT_ICV_LEN 16
+
typedef u64 __bitwise sci_t;
typedef u32 __bitwise ssci_t;
+struct metadata_dst;
+
typedef union salt {
struct {
u32 ssci;
@@ -183,6 +199,7 @@ struct macsec_tx_sa {
* @scb: single copy broadcast flag
* @sa: array of secure associations
* @stats: stats for this TXSC
+ * @md_dst: MACsec offload metadata dst
*/
struct macsec_tx_sc {
bool active;
@@ -193,6 +210,7 @@ struct macsec_tx_sc {
bool scb;
struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
struct pcpu_tx_sc_stats __percpu *stats;
+ struct metadata_dst *md_dst;
};
/**
@@ -229,6 +247,23 @@ struct macsec_secy {
/**
* struct macsec_context - MACsec context for hardware offloading
+ * @netdev: a valid pointer to a struct net_device if @offload ==
+ * MACSEC_OFFLOAD_MAC
+ * @phydev: a valid pointer to a struct phy_device if @offload ==
+ * MACSEC_OFFLOAD_PHY
+ * @offload: MACsec offload status
+ * @secy: pointer to a MACsec SecY
+ * @rx_sc: pointer to a RX SC
+ * @update_pn: when updating the SA, update the next PN
+ * @assoc_num: association number of the target SA
+ * @key: key of the target SA
+ * @rx_sa: pointer to an RX SA if a RX SA is added/updated/removed
+ * @tx_sa: pointer to an TX SA if a TX SA is added/updated/removed
+ * @tx_sc_stats: pointer to TX SC stats structure
+ * @tx_sa_stats: pointer to TX SA stats structure
+ * @rx_sc_stats: pointer to RX SC stats structure
+ * @rx_sa_stats: pointer to RX SA stats structure
+ * @dev_stats: pointer to dev stats structure
*/
struct macsec_context {
union {
@@ -240,8 +275,9 @@ struct macsec_context {
struct macsec_secy *secy;
struct macsec_rx_sc *rx_sc;
struct {
+ bool update_pn;
unsigned char assoc_num;
- u8 key[MACSEC_KEYID_LEN];
+ u8 key[MACSEC_MAX_KEY_LEN];
union {
struct macsec_rx_sa *rx_sa;
struct macsec_tx_sa *tx_sa;
@@ -254,12 +290,37 @@ struct macsec_context {
struct macsec_rx_sa_stats *rx_sa_stats;
struct macsec_dev_stats *dev_stats;
} stats;
-
- u8 prepare:1;
};
/**
* struct macsec_ops - MACsec offloading operations
+ * @mdo_dev_open: called when the MACsec interface transitions to the up state
+ * @mdo_dev_stop: called when the MACsec interface transitions to the down
+ * state
+ * @mdo_add_secy: called when a new SecY is added
+ * @mdo_upd_secy: called when the SecY flags are changed or the MAC address of
+ * the MACsec interface is changed
+ * @mdo_del_secy: called when the hw offload is disabled or the MACsec
+ * interface is removed
+ * @mdo_add_rxsc: called when a new RX SC is added
+ * @mdo_upd_rxsc: called when a certain RX SC is updated
+ * @mdo_del_rxsc: called when a certain RX SC is removed
+ * @mdo_add_rxsa: called when a new RX SA is added
+ * @mdo_upd_rxsa: called when a certain RX SA is updated
+ * @mdo_del_rxsa: called when a certain RX SA is removed
+ * @mdo_add_txsa: called when a new TX SA is added
+ * @mdo_upd_txsa: called when a certain TX SA is updated
+ * @mdo_del_txsa: called when a certain TX SA is removed
+ * @mdo_get_dev_stats: called when dev stats are read
+ * @mdo_get_tx_sc_stats: called when TX SC stats are read
+ * @mdo_get_tx_sa_stats: called when TX SA stats are read
+ * @mdo_get_rx_sc_stats: called when RX SC stats are read
+ * @mdo_get_rx_sa_stats: called when RX SA stats are read
+ * @mdo_insert_tx_tag: called to insert the TX tag
+ * @needed_headroom: number of bytes reserved at the beginning of the sk_buff
+ * for the TX tag
+ * @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
+ * TX tag
*/
struct macsec_ops {
/* Device wide */
@@ -286,8 +347,36 @@ struct macsec_ops {
int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
+ /* Offload tag */
+ int (*mdo_insert_tx_tag)(struct phy_device *phydev,
+ struct sk_buff *skb);
+ unsigned int needed_headroom;
+ unsigned int needed_tailroom;
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
+static inline bool macsec_send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
+struct net_device *macsec_get_real_dev(const struct net_device *dev);
+bool macsec_netdev_is_offloaded(struct net_device *dev);
+
+static inline void *macsec_netdev_priv(const struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ if (is_vlan_dev(dev))
+ return netdev_priv(vlan_dev_priv(dev)->real_dev);
+#endif
+ return netdev_priv(dev);
+}
+
+static inline u64 sci_to_cpu(sci_t sci)
+{
+ return be64_to_cpu((__force __be64)sci);
+}
#endif /* _NET_MACSEC_H_ */
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
new file mode 100644
index 000000000000..27684135bb4d
--- /dev/null
+++ b/include/net/mana/gdma.h
@@ -0,0 +1,871 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _GDMA_H
+#define _GDMA_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+
+#include "shm_channel.h"
+
+#define GDMA_STATUS_MORE_ENTRIES 0x00000105
+
+/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+enum gdma_request_type {
+ GDMA_VERIFY_VF_DRIVER_VERSION = 1,
+ GDMA_QUERY_MAX_RESOURCES = 2,
+ GDMA_LIST_DEVICES = 3,
+ GDMA_REGISTER_DEVICE = 4,
+ GDMA_DEREGISTER_DEVICE = 5,
+ GDMA_GENERATE_TEST_EQE = 10,
+ GDMA_CREATE_QUEUE = 12,
+ GDMA_DISABLE_QUEUE = 13,
+ GDMA_ALLOCATE_RESOURCE_RANGE = 22,
+ GDMA_DESTROY_RESOURCE_RANGE = 24,
+ GDMA_CREATE_DMA_REGION = 25,
+ GDMA_DMA_REGION_ADD_PAGES = 26,
+ GDMA_DESTROY_DMA_REGION = 27,
+ GDMA_CREATE_PD = 29,
+ GDMA_DESTROY_PD = 30,
+ GDMA_CREATE_MR = 31,
+ GDMA_DESTROY_MR = 32,
+ GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
+};
+
+#define GDMA_RESOURCE_DOORBELL_PAGE 27
+
+enum gdma_queue_type {
+ GDMA_INVALID_QUEUE,
+ GDMA_SQ,
+ GDMA_RQ,
+ GDMA_CQ,
+ GDMA_EQ,
+};
+
+enum gdma_work_request_flags {
+ GDMA_WR_NONE = 0,
+ GDMA_WR_OOB_IN_SGL = BIT(0),
+ GDMA_WR_PAD_BY_SGE0 = BIT(1),
+};
+
+enum gdma_eqe_type {
+ GDMA_EQE_COMPLETION = 3,
+ GDMA_EQE_TEST_EVENT = 64,
+ GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
+ GDMA_EQE_HWC_INIT_DATA = 130,
+ GDMA_EQE_HWC_INIT_DONE = 131,
+ GDMA_EQE_HWC_SOC_RECONFIG = 132,
+ GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
+};
+
+enum {
+ GDMA_DEVICE_NONE = 0,
+ GDMA_DEVICE_HWC = 1,
+ GDMA_DEVICE_MANA = 2,
+ GDMA_DEVICE_MANA_IB = 3,
+};
+
+struct gdma_resource {
+ /* Protect the bitmap */
+ spinlock_t lock;
+
+ /* The bitmap size in bits. */
+ u32 size;
+
+ /* The bitmap tracks the resources. */
+ unsigned long *map;
+};
+
+union gdma_doorbell_entry {
+ u64 as_uint64;
+
+ struct {
+ u64 id : 24;
+ u64 reserved : 8;
+ u64 tail_ptr : 31;
+ u64 arm : 1;
+ } cq;
+
+ struct {
+ u64 id : 24;
+ u64 wqe_cnt : 8;
+ u64 tail_ptr : 32;
+ } rq;
+
+ struct {
+ u64 id : 24;
+ u64 reserved : 8;
+ u64 tail_ptr : 32;
+ } sq;
+
+ struct {
+ u64 id : 16;
+ u64 reserved : 16;
+ u64 tail_ptr : 31;
+ u64 arm : 1;
+ } eq;
+}; /* HW DATA */
+
+struct gdma_msg_hdr {
+ u32 hdr_type;
+ u32 msg_type;
+ u16 msg_version;
+ u16 hwc_msg_id;
+ u32 msg_size;
+}; /* HW DATA */
+
+struct gdma_dev_id {
+ union {
+ struct {
+ u16 type;
+ u16 instance;
+ };
+
+ u32 as_uint32;
+ };
+}; /* HW DATA */
+
+struct gdma_req_hdr {
+ struct gdma_msg_hdr req;
+ struct gdma_msg_hdr resp; /* The expected response */
+ struct gdma_dev_id dev_id;
+ u32 activity_id;
+}; /* HW DATA */
+
+struct gdma_resp_hdr {
+ struct gdma_msg_hdr response;
+ struct gdma_dev_id dev_id;
+ u32 activity_id;
+ u32 status;
+ u32 reserved;
+}; /* HW DATA */
+
+struct gdma_general_req {
+ struct gdma_req_hdr hdr;
+}; /* HW DATA */
+
+#define GDMA_MESSAGE_V1 1
+#define GDMA_MESSAGE_V2 2
+#define GDMA_MESSAGE_V3 3
+
+struct gdma_general_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+#define GDMA_STANDARD_HEADER_TYPE 0
+
+static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
+ u32 req_size, u32 resp_size)
+{
+ hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
+ hdr->req.msg_type = code;
+ hdr->req.msg_version = GDMA_MESSAGE_V1;
+ hdr->req.msg_size = req_size;
+
+ hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
+ hdr->resp.msg_type = code;
+ hdr->resp.msg_version = GDMA_MESSAGE_V1;
+ hdr->resp.msg_size = resp_size;
+}
+
+/* The 16-byte struct is part of the GDMA work queue entry (WQE). */
+struct gdma_sge {
+ u64 address;
+ u32 mem_key;
+ u32 size;
+}; /* HW DATA */
+
+struct gdma_wqe_request {
+ struct gdma_sge *sgl;
+ u32 num_sge;
+
+ u32 inline_oob_size;
+ const void *inline_oob_data;
+
+ u32 flags;
+ u32 client_data_unit;
+};
+
+enum gdma_page_type {
+ GDMA_PAGE_TYPE_4K,
+};
+
+#define GDMA_INVALID_DMA_REGION 0
+
+struct gdma_mem_info {
+ struct device *dev;
+
+ dma_addr_t dma_handle;
+ void *virt_addr;
+ u64 length;
+
+ /* Allocated by the PF driver */
+ u64 dma_region_handle;
+};
+
+#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
+
+struct gdma_dev {
+ struct gdma_context *gdma_context;
+
+ struct gdma_dev_id dev_id;
+
+ u32 pdid;
+ u32 doorbell;
+ u32 gpa_mkey;
+
+ /* GDMA driver specific pointer */
+ void *driver_data;
+
+ struct auxiliary_device *adev;
+};
+
+#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
+
+#define GDMA_CQE_SIZE 64
+#define GDMA_EQE_SIZE 16
+#define GDMA_MAX_SQE_SIZE 512
+#define GDMA_MAX_RQE_SIZE 256
+
+#define GDMA_COMP_DATA_SIZE 0x3C
+
+#define GDMA_EVENT_DATA_SIZE 0xC
+
+/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
+#define GDMA_WQE_BU_SIZE 32
+
+#define INVALID_PDID UINT_MAX
+#define INVALID_DOORBELL UINT_MAX
+#define INVALID_MEM_KEY UINT_MAX
+#define INVALID_QUEUE_ID UINT_MAX
+#define INVALID_PCI_MSIX_INDEX UINT_MAX
+
+struct gdma_comp {
+ u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
+ u32 wq_num;
+ bool is_sq;
+};
+
+struct gdma_event {
+ u32 details[GDMA_EVENT_DATA_SIZE / 4];
+ u8 type;
+};
+
+struct gdma_queue;
+
+struct mana_eq {
+ struct gdma_queue *eq;
+};
+
+typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
+ struct gdma_event *e);
+
+typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
+
+/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
+ * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
+ * driver increases the 'head' in BUs rather than in bytes, and notifies
+ * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
+ * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
+ *
+ * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
+ * processed, the driver increases the 'tail' to indicate that WQEs have
+ * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
+ *
+ * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
+ * that the EQ/CQ is big enough so they can't overflow, and the driver uses
+ * the owner bits mechanism to detect if the queue has become empty.
+ */
+struct gdma_queue {
+ struct gdma_dev *gdma_dev;
+
+ enum gdma_queue_type type;
+ u32 id;
+
+ struct gdma_mem_info mem_info;
+
+ void *queue_mem_ptr;
+ u32 queue_size;
+
+ bool monitor_avl_buf;
+
+ u32 head;
+ u32 tail;
+ struct list_head entry;
+
+ /* Extra fields specific to EQ/CQ. */
+ union {
+ struct {
+ bool disable_needed;
+
+ gdma_eq_callback *callback;
+ void *context;
+
+ unsigned int msix_index;
+
+ u32 log2_throttle_limit;
+ } eq;
+
+ struct {
+ gdma_cq_callback *callback;
+ void *context;
+
+ struct gdma_queue *parent; /* For CQ/EQ relationship */
+ } cq;
+ };
+};
+
+struct gdma_queue_spec {
+ enum gdma_queue_type type;
+ bool monitor_avl_buf;
+ unsigned int queue_size;
+
+ /* Extra fields specific to EQ/CQ. */
+ union {
+ struct {
+ gdma_eq_callback *callback;
+ void *context;
+
+ unsigned long log2_throttle_limit;
+ unsigned int msix_index;
+ } eq;
+
+ struct {
+ gdma_cq_callback *callback;
+ void *context;
+
+ struct gdma_queue *parent_eq;
+
+ } cq;
+ };
+};
+
+#define MANA_IRQ_NAME_SZ 32
+
+struct gdma_irq_context {
+ void (*handler)(void *arg);
+ /* Protect the eq_list */
+ spinlock_t lock;
+ struct list_head eq_list;
+ char name[MANA_IRQ_NAME_SZ];
+};
+
+struct gdma_context {
+ struct device *dev;
+
+ /* Per-vPort max number of queues */
+ unsigned int max_num_queues;
+ unsigned int max_num_msix;
+ unsigned int num_msix_usable;
+ struct gdma_irq_context *irq_contexts;
+
+ /* L2 MTU */
+ u16 adapter_mtu;
+
+ /* This maps a CQ index to the queue structure. */
+ unsigned int max_num_cqs;
+ struct gdma_queue **cq_table;
+
+ /* Protect eq_test_event and test_event_eq_id */
+ struct mutex eq_test_event_mutex;
+ struct completion eq_test_event;
+ u32 test_event_eq_id;
+
+ bool is_pf;
+ phys_addr_t bar0_pa;
+ void __iomem *bar0_va;
+ void __iomem *shm_base;
+ void __iomem *db_page_base;
+ phys_addr_t phys_db_page_base;
+ u32 db_page_size;
+ int numa_node;
+
+ /* Shared memory chanenl (used to bootstrap HWC) */
+ struct shm_channel shm_channel;
+
+ /* Hardware communication channel (HWC) */
+ struct gdma_dev hwc;
+
+ /* Azure network adapter */
+ struct gdma_dev mana;
+
+ /* Azure RDMA adapter */
+ struct gdma_dev mana_ib;
+};
+
+#define MAX_NUM_GDMA_DEVICES 4
+
+static inline bool mana_gd_is_mana(struct gdma_dev *gd)
+{
+ return gd->dev_id.type == GDMA_DEVICE_MANA;
+}
+
+static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
+{
+ return gd->dev_id.type == GDMA_DEVICE_HWC;
+}
+
+u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
+u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
+
+int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
+
+int mana_gd_create_hwc_queue(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr);
+
+int mana_gd_create_mana_eq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr);
+
+int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr);
+
+void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
+
+int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
+
+void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
+
+struct gdma_wqe {
+ u32 reserved :24;
+ u32 last_vbytes :8;
+
+ union {
+ u32 flags;
+
+ struct {
+ u32 num_sge :8;
+ u32 inline_oob_size_div4:3;
+ u32 client_oob_in_sgl :1;
+ u32 reserved1 :4;
+ u32 client_data_unit :14;
+ u32 reserved2 :2;
+ };
+ };
+}; /* HW DATA */
+
+#define INLINE_OOB_SMALL_SIZE 8
+#define INLINE_OOB_LARGE_SIZE 24
+
+#define MAX_TX_WQE_SIZE 512
+#define MAX_RX_WQE_SIZE 256
+
+#define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \
+ sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
+ sizeof(struct gdma_sge))
+
+#define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \
+ sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
+
+struct gdma_cqe {
+ u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
+
+ union {
+ u32 as_uint32;
+
+ struct {
+ u32 wq_num : 24;
+ u32 is_sq : 1;
+ u32 reserved : 4;
+ u32 owner_bits : 3;
+ };
+ } cqe_info;
+}; /* HW DATA */
+
+#define GDMA_CQE_OWNER_BITS 3
+
+#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
+
+#define SET_ARM_BIT 1
+
+#define GDMA_EQE_OWNER_BITS 3
+
+union gdma_eqe_info {
+ u32 as_uint32;
+
+ struct {
+ u32 type : 8;
+ u32 reserved1 : 8;
+ u32 client_id : 2;
+ u32 reserved2 : 11;
+ u32 owner_bits : 3;
+ };
+}; /* HW DATA */
+
+#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
+#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
+
+struct gdma_eqe {
+ u32 details[GDMA_EVENT_DATA_SIZE / 4];
+ u32 eqe_info;
+}; /* HW DATA */
+
+#define GDMA_REG_DB_PAGE_OFFSET 8
+#define GDMA_REG_DB_PAGE_SIZE 0x10
+#define GDMA_REG_SHM_OFFSET 0x18
+
+#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
+#define GDMA_PF_REG_DB_PAGE_OFF 0xC8
+#define GDMA_PF_REG_SHM_OFF 0x70
+
+#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
+
+#define MANA_PF_DEVICE_ID 0x00B9
+#define MANA_VF_DEVICE_ID 0x00BA
+
+struct gdma_posted_wqe_info {
+ u32 wqe_size_in_bu;
+};
+
+/* GDMA_GENERATE_TEST_EQE */
+struct gdma_generate_test_event_req {
+ struct gdma_req_hdr hdr;
+ u32 queue_index;
+}; /* HW DATA */
+
+/* GDMA_VERIFY_VF_DRIVER_VERSION */
+enum {
+ GDMA_PROTOCOL_V1 = 1,
+ GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
+ GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
+};
+
+#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
+
+/* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
+ * so the driver is able to reliably support features like busy_poll.
+ */
+#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
+#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
+
+#define GDMA_DRV_CAP_FLAGS1 \
+ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
+ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
+ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG)
+
+#define GDMA_DRV_CAP_FLAGS2 0
+
+#define GDMA_DRV_CAP_FLAGS3 0
+
+#define GDMA_DRV_CAP_FLAGS4 0
+
+struct gdma_verify_ver_req {
+ struct gdma_req_hdr hdr;
+
+ /* Mandatory fields required for protocol establishment */
+ u64 protocol_ver_min;
+ u64 protocol_ver_max;
+
+ /* Gdma Driver Capability Flags */
+ u64 gd_drv_cap_flags1;
+ u64 gd_drv_cap_flags2;
+ u64 gd_drv_cap_flags3;
+ u64 gd_drv_cap_flags4;
+
+ /* Advisory fields */
+ u64 drv_ver;
+ u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
+ u32 reserved;
+ u32 os_ver_major;
+ u32 os_ver_minor;
+ u32 os_ver_build;
+ u32 os_ver_platform;
+ u64 reserved_2;
+ u8 os_ver_str1[128];
+ u8 os_ver_str2[128];
+ u8 os_ver_str3[128];
+ u8 os_ver_str4[128];
+}; /* HW DATA */
+
+struct gdma_verify_ver_resp {
+ struct gdma_resp_hdr hdr;
+ u64 gdma_protocol_ver;
+ u64 pf_cap_flags1;
+ u64 pf_cap_flags2;
+ u64 pf_cap_flags3;
+ u64 pf_cap_flags4;
+}; /* HW DATA */
+
+/* GDMA_QUERY_MAX_RESOURCES */
+struct gdma_query_max_resources_resp {
+ struct gdma_resp_hdr hdr;
+ u32 status;
+ u32 max_sq;
+ u32 max_rq;
+ u32 max_cq;
+ u32 max_eq;
+ u32 max_db;
+ u32 max_mst;
+ u32 max_cq_mod_ctx;
+ u32 max_mod_cq;
+ u32 max_msix;
+}; /* HW DATA */
+
+/* GDMA_LIST_DEVICES */
+struct gdma_list_devices_resp {
+ struct gdma_resp_hdr hdr;
+ u32 num_of_devs;
+ u32 reserved;
+ struct gdma_dev_id devs[64];
+}; /* HW DATA */
+
+/* GDMA_REGISTER_DEVICE */
+struct gdma_register_device_resp {
+ struct gdma_resp_hdr hdr;
+ u32 pdid;
+ u32 gpa_mkey;
+ u32 db_id;
+}; /* HW DATA */
+
+struct gdma_allocate_resource_range_req {
+ struct gdma_req_hdr hdr;
+ u32 resource_type;
+ u32 num_resources;
+ u32 alignment;
+ u32 allocated_resources;
+};
+
+struct gdma_allocate_resource_range_resp {
+ struct gdma_resp_hdr hdr;
+ u32 allocated_resources;
+};
+
+struct gdma_destroy_resource_range_req {
+ struct gdma_req_hdr hdr;
+ u32 resource_type;
+ u32 num_resources;
+ u32 allocated_resources;
+};
+
+/* GDMA_CREATE_QUEUE */
+struct gdma_create_queue_req {
+ struct gdma_req_hdr hdr;
+ u32 type;
+ u32 reserved1;
+ u32 pdid;
+ u32 doolbell_id;
+ u64 gdma_region;
+ u32 reserved2;
+ u32 queue_size;
+ u32 log2_throttle_limit;
+ u32 eq_pci_msix_index;
+ u32 cq_mod_ctx_id;
+ u32 cq_parent_eq_id;
+ u8 rq_drop_on_overrun;
+ u8 rq_err_on_wqe_overflow;
+ u8 rq_chain_rec_wqes;
+ u8 sq_hw_db;
+ u32 reserved3;
+}; /* HW DATA */
+
+struct gdma_create_queue_resp {
+ struct gdma_resp_hdr hdr;
+ u32 queue_index;
+}; /* HW DATA */
+
+/* GDMA_DISABLE_QUEUE */
+struct gdma_disable_queue_req {
+ struct gdma_req_hdr hdr;
+ u32 type;
+ u32 queue_index;
+ u32 alloc_res_id_on_creation;
+}; /* HW DATA */
+
+/* GDMA_QUERY_HWC_TIMEOUT */
+struct gdma_query_hwc_timeout_req {
+ struct gdma_req_hdr hdr;
+ u32 timeout_ms;
+ u32 reserved;
+};
+
+struct gdma_query_hwc_timeout_resp {
+ struct gdma_resp_hdr hdr;
+ u32 timeout_ms;
+ u32 reserved;
+};
+
+enum atb_page_size {
+ ATB_PAGE_SIZE_4K,
+ ATB_PAGE_SIZE_8K,
+ ATB_PAGE_SIZE_16K,
+ ATB_PAGE_SIZE_32K,
+ ATB_PAGE_SIZE_64K,
+ ATB_PAGE_SIZE_128K,
+ ATB_PAGE_SIZE_256K,
+ ATB_PAGE_SIZE_512K,
+ ATB_PAGE_SIZE_1M,
+ ATB_PAGE_SIZE_2M,
+ ATB_PAGE_SIZE_MAX,
+};
+
+enum gdma_mr_access_flags {
+ GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
+ GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
+ GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
+ GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
+ GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
+};
+
+/* GDMA_CREATE_DMA_REGION */
+struct gdma_create_dma_region_req {
+ struct gdma_req_hdr hdr;
+
+ /* The total size of the DMA region */
+ u64 length;
+
+ /* The offset in the first page */
+ u32 offset_in_page;
+
+ /* enum gdma_page_type */
+ u32 gdma_page_type;
+
+ /* The total number of pages */
+ u32 page_count;
+
+ /* If page_addr_list_len is smaller than page_count,
+ * the remaining page addresses will be added via the
+ * message GDMA_DMA_REGION_ADD_PAGES.
+ */
+ u32 page_addr_list_len;
+ u64 page_addr_list[];
+}; /* HW DATA */
+
+struct gdma_create_dma_region_resp {
+ struct gdma_resp_hdr hdr;
+ u64 dma_region_handle;
+}; /* HW DATA */
+
+/* GDMA_DMA_REGION_ADD_PAGES */
+struct gdma_dma_region_add_pages_req {
+ struct gdma_req_hdr hdr;
+
+ u64 dma_region_handle;
+
+ u32 page_addr_list_len;
+ u32 reserved3;
+
+ u64 page_addr_list[];
+}; /* HW DATA */
+
+/* GDMA_DESTROY_DMA_REGION */
+struct gdma_destroy_dma_region_req {
+ struct gdma_req_hdr hdr;
+
+ u64 dma_region_handle;
+}; /* HW DATA */
+
+enum gdma_pd_flags {
+ GDMA_PD_FLAG_INVALID = 0,
+};
+
+struct gdma_create_pd_req {
+ struct gdma_req_hdr hdr;
+ enum gdma_pd_flags flags;
+ u32 reserved;
+};/* HW DATA */
+
+struct gdma_create_pd_resp {
+ struct gdma_resp_hdr hdr;
+ u64 pd_handle;
+ u32 pd_id;
+ u32 reserved;
+};/* HW DATA */
+
+struct gdma_destroy_pd_req {
+ struct gdma_req_hdr hdr;
+ u64 pd_handle;
+};/* HW DATA */
+
+struct gdma_destory_pd_resp {
+ struct gdma_resp_hdr hdr;
+};/* HW DATA */
+
+enum gdma_mr_type {
+ /* Guest Virtual Address - MRs of this type allow access
+ * to memory mapped by PTEs associated with this MR using a virtual
+ * address that is set up in the MST
+ */
+ GDMA_MR_TYPE_GVA = 2,
+};
+
+struct gdma_create_mr_params {
+ u64 pd_handle;
+ enum gdma_mr_type mr_type;
+ union {
+ struct {
+ u64 dma_region_handle;
+ u64 virtual_address;
+ enum gdma_mr_access_flags access_flags;
+ } gva;
+ };
+};
+
+struct gdma_create_mr_request {
+ struct gdma_req_hdr hdr;
+ u64 pd_handle;
+ enum gdma_mr_type mr_type;
+ u32 reserved_1;
+
+ union {
+ struct {
+ u64 dma_region_handle;
+ u64 virtual_address;
+ enum gdma_mr_access_flags access_flags;
+ } gva;
+
+ };
+ u32 reserved_2;
+};/* HW DATA */
+
+struct gdma_create_mr_response {
+ struct gdma_resp_hdr hdr;
+ u64 mr_handle;
+ u32 lkey;
+ u32 rkey;
+};/* HW DATA */
+
+struct gdma_destroy_mr_request {
+ struct gdma_req_hdr hdr;
+ u64 mr_handle;
+};/* HW DATA */
+
+struct gdma_destroy_mr_response {
+ struct gdma_resp_hdr hdr;
+};/* HW DATA */
+
+int mana_gd_verify_vf_version(struct pci_dev *pdev);
+
+int mana_gd_register_device(struct gdma_dev *gd);
+int mana_gd_deregister_device(struct gdma_dev *gd);
+
+int mana_gd_post_work_request(struct gdma_queue *wq,
+ const struct gdma_wqe_request *wqe_req,
+ struct gdma_posted_wqe_info *wqe_info);
+
+int mana_gd_post_and_ring(struct gdma_queue *queue,
+ const struct gdma_wqe_request *wqe,
+ struct gdma_posted_wqe_info *wqe_info);
+
+int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
+void mana_gd_free_res_map(struct gdma_resource *r);
+
+void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
+ struct gdma_queue *queue);
+
+int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
+ struct gdma_mem_info *gmi);
+
+void mana_gd_free_memory(struct gdma_mem_info *gmi);
+
+int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
+ u32 resp_len, void *resp);
+
+int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
+
+#endif /* _GDMA_H */
diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h
new file mode 100644
index 000000000000..158b125692c2
--- /dev/null
+++ b/include/net/mana/hw_channel.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _HW_CHANNEL_H
+#define _HW_CHANNEL_H
+
+#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4
+
+#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000
+#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000
+
+#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1
+
+#define HWC_INIT_DATA_CQID 1
+#define HWC_INIT_DATA_RQID 2
+#define HWC_INIT_DATA_SQID 3
+#define HWC_INIT_DATA_QUEUE_DEPTH 4
+#define HWC_INIT_DATA_MAX_REQUEST 5
+#define HWC_INIT_DATA_MAX_RESPONSE 6
+#define HWC_INIT_DATA_MAX_NUM_CQS 7
+#define HWC_INIT_DATA_PDID 8
+#define HWC_INIT_DATA_GPA_MKEY 9
+#define HWC_INIT_DATA_PF_DEST_RQ_ID 10
+#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
+
+#define HWC_DATA_CFG_HWC_TIMEOUT 1
+
+#define HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS 30000
+
+/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+union hwc_init_eq_id_db {
+ u32 as_uint32;
+
+ struct {
+ u32 eq_id : 16;
+ u32 doorbell : 16;
+ };
+}; /* HW DATA */
+
+union hwc_init_type_data {
+ u32 as_uint32;
+
+ struct {
+ u32 value : 24;
+ u32 type : 8;
+ };
+}; /* HW DATA */
+
+struct hwc_rx_oob {
+ u32 type : 6;
+ u32 eom : 1;
+ u32 som : 1;
+ u32 vendor_err : 8;
+ u32 reserved1 : 16;
+
+ u32 src_virt_wq : 24;
+ u32 src_vfid : 8;
+
+ u32 reserved2;
+
+ union {
+ u32 wqe_addr_low;
+ u32 wqe_offset;
+ };
+
+ u32 wqe_addr_high;
+
+ u32 client_data_unit : 14;
+ u32 reserved3 : 18;
+
+ u32 tx_oob_data_size;
+
+ u32 chunk_offset : 21;
+ u32 reserved4 : 11;
+}; /* HW DATA */
+
+struct hwc_tx_oob {
+ u32 reserved1;
+
+ u32 reserved2;
+
+ u32 vrq_id : 24;
+ u32 dest_vfid : 8;
+
+ u32 vrcq_id : 24;
+ u32 reserved3 : 8;
+
+ u32 vscq_id : 24;
+ u32 loopback : 1;
+ u32 lso_override: 1;
+ u32 dest_pf : 1;
+ u32 reserved4 : 5;
+
+ u32 vsq_id : 24;
+ u32 reserved5 : 8;
+}; /* HW DATA */
+
+struct hwc_work_request {
+ void *buf_va;
+ void *buf_sge_addr;
+ u32 buf_len;
+ u32 msg_size;
+
+ struct gdma_wqe_request wqe_req;
+ struct hwc_tx_oob tx_oob;
+
+ struct gdma_sge sge;
+};
+
+/* hwc_dma_buf represents the array of in-flight WQEs.
+ * mem_info as know as the GDMA mapped memory is partitioned and used by
+ * in-flight WQEs.
+ * The number of WQEs is determined by the number of in-flight messages.
+ */
+struct hwc_dma_buf {
+ struct gdma_mem_info mem_info;
+
+ u32 gpa_mkey;
+
+ u32 num_reqs;
+ struct hwc_work_request reqs[] __counted_by(num_reqs);
+};
+
+typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id,
+ const struct hwc_rx_oob *rx_oob);
+
+typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id,
+ const struct hwc_rx_oob *rx_oob);
+
+struct hwc_cq {
+ struct hw_channel_context *hwc;
+
+ struct gdma_queue *gdma_cq;
+ struct gdma_queue *gdma_eq;
+ struct gdma_comp *comp_buf;
+ u16 queue_depth;
+
+ hwc_rx_event_handler_t *rx_event_handler;
+ void *rx_event_ctx;
+
+ hwc_tx_event_handler_t *tx_event_handler;
+ void *tx_event_ctx;
+};
+
+struct hwc_wq {
+ struct hw_channel_context *hwc;
+
+ struct gdma_queue *gdma_wq;
+ struct hwc_dma_buf *msg_buf;
+ u16 queue_depth;
+
+ struct hwc_cq *hwc_cq;
+};
+
+struct hwc_caller_ctx {
+ struct completion comp_event;
+ void *output_buf;
+ u32 output_buflen;
+
+ u32 error; /* Linux error code */
+ u32 status_code;
+};
+
+struct hw_channel_context {
+ struct gdma_dev *gdma_dev;
+ struct device *dev;
+
+ u16 num_inflight_msg;
+ u32 max_req_msg_size;
+
+ u16 hwc_init_q_depth_max;
+ u32 hwc_init_max_req_msg_size;
+ u32 hwc_init_max_resp_msg_size;
+
+ struct completion hwc_init_eqe_comp;
+
+ struct hwc_wq *rxq;
+ struct hwc_wq *txq;
+ struct hwc_cq *cq;
+
+ struct semaphore sema;
+ struct gdma_resource inflight_msg_res;
+
+ u32 pf_dest_vrq_id;
+ u32 pf_dest_vrcq_id;
+ u32 hwc_timeout;
+
+ struct hwc_caller_ctx *caller_ctx;
+};
+
+int mana_hwc_create_channel(struct gdma_context *gc);
+void mana_hwc_destroy_channel(struct gdma_context *gc);
+
+int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
+ const void *req, u32 resp_len, void *resp);
+
+#endif /* _HW_CHANNEL_H */
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
new file mode 100644
index 000000000000..76147feb0d10
--- /dev/null
+++ b/include/net/mana/mana.h
@@ -0,0 +1,799 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _MANA_H
+#define _MANA_H
+
+#include <net/xdp.h>
+
+#include "gdma.h"
+#include "hw_channel.h"
+
+/* Microsoft Azure Network Adapter (MANA)'s definitions
+ *
+ * Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+/* MANA protocol version */
+#define MANA_MAJOR_VERSION 0
+#define MANA_MINOR_VERSION 1
+#define MANA_MICRO_VERSION 1
+
+typedef u64 mana_handle_t;
+#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
+
+enum TRI_STATE {
+ TRI_STATE_UNKNOWN = -1,
+ TRI_STATE_FALSE = 0,
+ TRI_STATE_TRUE = 1
+};
+
+/* Number of entries for hardware indirection table must be in power of 2 */
+#define MANA_INDIRECT_TABLE_SIZE 64
+#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
+
+/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
+#define MANA_HASH_KEY_SIZE 40
+
+#define COMP_ENTRY_SIZE 64
+
+#define RX_BUFFERS_PER_QUEUE 512
+#define MANA_RX_DATA_ALIGN 64
+
+#define MAX_SEND_BUFFERS_PER_QUEUE 256
+
+#define EQ_SIZE (8 * PAGE_SIZE)
+#define LOG2_EQ_THROTTLE 3
+
+#define MAX_PORTS_IN_MANA_DEV 256
+
+/* Update this count whenever the respective structures are changed */
+#define MANA_STATS_RX_COUNT 5
+#define MANA_STATS_TX_COUNT 11
+
+struct mana_stats_rx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ u64 xdp_redirect;
+ struct u64_stats_sync syncp;
+};
+
+struct mana_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 tso_inner_packets;
+ u64 tso_inner_bytes;
+ u64 short_pkt_fmt;
+ u64 long_pkt_fmt;
+ u64 csum_partial;
+ u64 mana_map_err;
+ struct u64_stats_sync syncp;
+};
+
+struct mana_txq {
+ struct gdma_queue *gdma_sq;
+
+ union {
+ u32 gdma_txq_id;
+ struct {
+ u32 reserved1 : 10;
+ u32 vsq_frame : 14;
+ u32 reserved2 : 8;
+ };
+ };
+
+ u16 vp_offset;
+
+ struct net_device *ndev;
+
+ /* The SKBs are sent to the HW and we are waiting for the CQEs. */
+ struct sk_buff_head pending_skbs;
+ struct netdev_queue *net_txq;
+
+ atomic_t pending_sends;
+
+ struct mana_stats_tx stats;
+};
+
+/* skb data and frags dma mappings */
+struct mana_skb_head {
+ /* GSO pkts may have 2 SGEs for the linear part*/
+ dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
+
+ u32 size[MAX_SKB_FRAGS + 2];
+};
+
+#define MANA_HEADROOM sizeof(struct mana_skb_head)
+
+enum mana_tx_pkt_format {
+ MANA_SHORT_PKT_FMT = 0,
+ MANA_LONG_PKT_FMT = 1,
+};
+
+struct mana_tx_short_oob {
+ u32 pkt_fmt : 2;
+ u32 is_outer_ipv4 : 1;
+ u32 is_outer_ipv6 : 1;
+ u32 comp_iphdr_csum : 1;
+ u32 comp_tcp_csum : 1;
+ u32 comp_udp_csum : 1;
+ u32 supress_txcqe_gen : 1;
+ u32 vcq_num : 24;
+
+ u32 trans_off : 10; /* Transport header offset */
+ u32 vsq_frame : 14;
+ u32 short_vp_offset : 8;
+}; /* HW DATA */
+
+struct mana_tx_long_oob {
+ u32 is_encap : 1;
+ u32 inner_is_ipv6 : 1;
+ u32 inner_tcp_opt : 1;
+ u32 inject_vlan_pri_tag : 1;
+ u32 reserved1 : 12;
+ u32 pcp : 3; /* 802.1Q */
+ u32 dei : 1; /* 802.1Q */
+ u32 vlan_id : 12; /* 802.1Q */
+
+ u32 inner_frame_offset : 10;
+ u32 inner_ip_rel_offset : 6;
+ u32 long_vp_offset : 12;
+ u32 reserved2 : 4;
+
+ u32 reserved3;
+ u32 reserved4;
+}; /* HW DATA */
+
+struct mana_tx_oob {
+ struct mana_tx_short_oob s_oob;
+ struct mana_tx_long_oob l_oob;
+}; /* HW DATA */
+
+enum mana_cq_type {
+ MANA_CQ_TYPE_RX,
+ MANA_CQ_TYPE_TX,
+};
+
+enum mana_cqe_type {
+ CQE_INVALID = 0,
+ CQE_RX_OKAY = 1,
+ CQE_RX_COALESCED_4 = 2,
+ CQE_RX_OBJECT_FENCE = 3,
+ CQE_RX_TRUNCATED = 4,
+
+ CQE_TX_OKAY = 32,
+ CQE_TX_SA_DROP = 33,
+ CQE_TX_MTU_DROP = 34,
+ CQE_TX_INVALID_OOB = 35,
+ CQE_TX_INVALID_ETH_TYPE = 36,
+ CQE_TX_HDR_PROCESSING_ERROR = 37,
+ CQE_TX_VF_DISABLED = 38,
+ CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
+ CQE_TX_VPORT_DISABLED = 40,
+ CQE_TX_VLAN_TAGGING_VIOLATION = 41,
+};
+
+#define MANA_CQE_COMPLETION 1
+
+struct mana_cqe_header {
+ u32 cqe_type : 6;
+ u32 client_type : 2;
+ u32 vendor_err : 24;
+}; /* HW DATA */
+
+/* NDIS HASH Types */
+#define NDIS_HASH_IPV4 BIT(0)
+#define NDIS_HASH_TCP_IPV4 BIT(1)
+#define NDIS_HASH_UDP_IPV4 BIT(2)
+#define NDIS_HASH_IPV6 BIT(3)
+#define NDIS_HASH_TCP_IPV6 BIT(4)
+#define NDIS_HASH_UDP_IPV6 BIT(5)
+#define NDIS_HASH_IPV6_EX BIT(6)
+#define NDIS_HASH_TCP_IPV6_EX BIT(7)
+#define NDIS_HASH_UDP_IPV6_EX BIT(8)
+
+#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
+#define MANA_HASH_L4 \
+ (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
+ NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
+
+struct mana_rxcomp_perpkt_info {
+ u32 pkt_len : 16;
+ u32 reserved1 : 16;
+ u32 reserved2;
+ u32 pkt_hash;
+}; /* HW DATA */
+
+#define MANA_RXCOMP_OOB_NUM_PPI 4
+
+/* Receive completion OOB */
+struct mana_rxcomp_oob {
+ struct mana_cqe_header cqe_hdr;
+
+ u32 rx_vlan_id : 12;
+ u32 rx_vlantag_present : 1;
+ u32 rx_outer_iphdr_csum_succeed : 1;
+ u32 rx_outer_iphdr_csum_fail : 1;
+ u32 reserved1 : 1;
+ u32 rx_hashtype : 9;
+ u32 rx_iphdr_csum_succeed : 1;
+ u32 rx_iphdr_csum_fail : 1;
+ u32 rx_tcp_csum_succeed : 1;
+ u32 rx_tcp_csum_fail : 1;
+ u32 rx_udp_csum_succeed : 1;
+ u32 rx_udp_csum_fail : 1;
+ u32 reserved2 : 1;
+
+ struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
+
+ u32 rx_wqe_offset;
+}; /* HW DATA */
+
+struct mana_tx_comp_oob {
+ struct mana_cqe_header cqe_hdr;
+
+ u32 tx_data_offset;
+
+ u32 tx_sgl_offset : 5;
+ u32 tx_wqe_offset : 27;
+
+ u32 reserved[12];
+}; /* HW DATA */
+
+struct mana_rxq;
+
+#define CQE_POLLING_BUFFER 512
+
+struct mana_cq {
+ struct gdma_queue *gdma_cq;
+
+ /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
+ u32 gdma_id;
+
+ /* Type of the CQ: TX or RX */
+ enum mana_cq_type type;
+
+ /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
+ * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
+ */
+ struct mana_rxq *rxq;
+
+ /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
+ * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
+ */
+ struct mana_txq *txq;
+
+ /* Buffer which the CQ handler can copy the CQE's into. */
+ struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
+
+ /* NAPI data */
+ struct napi_struct napi;
+ int work_done;
+ int budget;
+};
+
+struct mana_recv_buf_oob {
+ /* A valid GDMA work request representing the data buffer. */
+ struct gdma_wqe_request wqe_req;
+
+ void *buf_va;
+ bool from_pool; /* allocated from a page pool */
+
+ /* SGL of the buffer going to be sent has part of the work request. */
+ u32 num_sge;
+ struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
+
+ /* Required to store the result of mana_gd_post_work_request.
+ * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
+ * work queue when the WQE is consumed.
+ */
+ struct gdma_posted_wqe_info wqe_inf;
+};
+
+#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
+ + ETH_HLEN)
+
+#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
+
+struct mana_rxq {
+ struct gdma_queue *gdma_rq;
+ /* Cache the gdma receive queue id */
+ u32 gdma_id;
+
+ /* Index of RQ in the vPort, not gdma receive queue id */
+ u32 rxq_idx;
+
+ u32 datasize;
+ u32 alloc_size;
+ u32 headroom;
+
+ mana_handle_t rxobj;
+
+ struct mana_cq rx_cq;
+
+ struct completion fence_event;
+
+ struct net_device *ndev;
+
+ /* Total number of receive buffers to be allocated */
+ u32 num_rx_buf;
+
+ u32 buf_index;
+
+ struct mana_stats_rx stats;
+
+ struct bpf_prog __rcu *bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
+ void *xdp_save_va; /* for reusing */
+ bool xdp_flush;
+ int xdp_rc; /* XDP redirect return code */
+
+ struct page_pool *page_pool;
+
+ /* MUST BE THE LAST MEMBER:
+ * Each receive buffer has an associated mana_recv_buf_oob.
+ */
+ struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
+};
+
+struct mana_tx_qp {
+ struct mana_txq txq;
+
+ struct mana_cq tx_cq;
+
+ mana_handle_t tx_object;
+};
+
+struct mana_ethtool_stats {
+ u64 stop_queue;
+ u64 wake_queue;
+ u64 hc_rx_discards_no_wqe;
+ u64 hc_rx_err_vport_disabled;
+ u64 hc_rx_bytes;
+ u64 hc_rx_ucast_pkts;
+ u64 hc_rx_ucast_bytes;
+ u64 hc_rx_bcast_pkts;
+ u64 hc_rx_bcast_bytes;
+ u64 hc_rx_mcast_pkts;
+ u64 hc_rx_mcast_bytes;
+ u64 hc_tx_err_gf_disabled;
+ u64 hc_tx_err_vport_disabled;
+ u64 hc_tx_err_inval_vportoffset_pkt;
+ u64 hc_tx_err_vlan_enforcement;
+ u64 hc_tx_err_eth_type_enforcement;
+ u64 hc_tx_err_sa_enforcement;
+ u64 hc_tx_err_sqpdid_enforcement;
+ u64 hc_tx_err_cqpdid_enforcement;
+ u64 hc_tx_err_mtu_violation;
+ u64 hc_tx_err_inval_oob;
+ u64 hc_tx_bytes;
+ u64 hc_tx_ucast_pkts;
+ u64 hc_tx_ucast_bytes;
+ u64 hc_tx_bcast_pkts;
+ u64 hc_tx_bcast_bytes;
+ u64 hc_tx_mcast_pkts;
+ u64 hc_tx_mcast_bytes;
+ u64 hc_tx_err_gdma;
+ u64 tx_cqe_err;
+ u64 tx_cqe_unknown_type;
+ u64 rx_coalesced_err;
+ u64 rx_cqe_unknown_type;
+};
+
+struct mana_context {
+ struct gdma_dev *gdma_dev;
+
+ u16 num_ports;
+
+ struct mana_eq *eqs;
+
+ struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
+};
+
+struct mana_port_context {
+ struct mana_context *ac;
+ struct net_device *ndev;
+
+ u8 mac_addr[ETH_ALEN];
+
+ enum TRI_STATE rss_state;
+
+ mana_handle_t default_rxobj;
+ bool tx_shortform_allowed;
+ u16 tx_vp_offset;
+
+ struct mana_tx_qp *tx_qp;
+
+ /* Indirection Table for RX & TX. The values are queue indexes */
+ u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
+
+ /* Indirection table containing RxObject Handles */
+ mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
+
+ /* Hash key used by the NIC */
+ u8 hashkey[MANA_HASH_KEY_SIZE];
+
+ /* This points to an array of num_queues of RQ pointers. */
+ struct mana_rxq **rxqs;
+
+ /* pre-allocated rx buffer array */
+ void **rxbufs_pre;
+ dma_addr_t *das_pre;
+ int rxbpre_total;
+ u32 rxbpre_datasize;
+ u32 rxbpre_alloc_size;
+ u32 rxbpre_headroom;
+
+ struct bpf_prog *bpf_prog;
+
+ /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
+ unsigned int max_queues;
+ unsigned int num_queues;
+
+ mana_handle_t port_handle;
+ mana_handle_t pf_filter_handle;
+
+ /* Mutex for sharing access to vport_use_count */
+ struct mutex vport_mutex;
+ int vport_use_count;
+
+ u16 port_idx;
+
+ bool port_is_up;
+ bool port_st_save; /* Saved port state */
+
+ struct mana_ethtool_stats eth_stats;
+};
+
+netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
+ bool update_hash, bool update_tab);
+
+int mana_alloc_queues(struct net_device *ndev);
+int mana_attach(struct net_device *ndev);
+int mana_detach(struct net_device *ndev, bool from_close);
+
+int mana_probe(struct gdma_dev *gd, bool resuming);
+void mana_remove(struct gdma_dev *gd, bool suspending);
+
+void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
+int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
+ u32 flags);
+u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
+ struct xdp_buff *xdp, void *buf_va, uint pkt_len);
+struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
+void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
+int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+void mana_query_gf_stats(struct mana_port_context *apc);
+
+extern const struct ethtool_ops mana_ethtool_ops;
+
+/* A CQ can be created not associated with any EQ */
+#define GDMA_CQ_NO_EQ 0xffff
+
+struct mana_obj_spec {
+ u32 queue_index;
+ u64 gdma_region;
+ u32 queue_size;
+ u32 attached_eq;
+ u32 modr_ctx_id;
+};
+
+enum mana_command_code {
+ MANA_QUERY_DEV_CONFIG = 0x20001,
+ MANA_QUERY_GF_STAT = 0x20002,
+ MANA_CONFIG_VPORT_TX = 0x20003,
+ MANA_CREATE_WQ_OBJ = 0x20004,
+ MANA_DESTROY_WQ_OBJ = 0x20005,
+ MANA_FENCE_RQ = 0x20006,
+ MANA_CONFIG_VPORT_RX = 0x20007,
+ MANA_QUERY_VPORT_CONFIG = 0x20008,
+
+ /* Privileged commands for the PF mode */
+ MANA_REGISTER_FILTER = 0x28000,
+ MANA_DEREGISTER_FILTER = 0x28001,
+ MANA_REGISTER_HW_PORT = 0x28003,
+ MANA_DEREGISTER_HW_PORT = 0x28004,
+};
+
+/* Query Device Configuration */
+struct mana_query_device_cfg_req {
+ struct gdma_req_hdr hdr;
+
+ /* MANA Nic Driver Capability flags */
+ u64 mn_drv_cap_flags1;
+ u64 mn_drv_cap_flags2;
+ u64 mn_drv_cap_flags3;
+ u64 mn_drv_cap_flags4;
+
+ u32 proto_major_ver;
+ u32 proto_minor_ver;
+ u32 proto_micro_ver;
+
+ u32 reserved;
+}; /* HW DATA */
+
+struct mana_query_device_cfg_resp {
+ struct gdma_resp_hdr hdr;
+
+ u64 pf_cap_flags1;
+ u64 pf_cap_flags2;
+ u64 pf_cap_flags3;
+ u64 pf_cap_flags4;
+
+ u16 max_num_vports;
+ u16 reserved;
+ u32 max_num_eqs;
+
+ /* response v2: */
+ u16 adapter_mtu;
+ u16 reserved2;
+ u32 reserved3;
+}; /* HW DATA */
+
+/* Query vPort Configuration */
+struct mana_query_vport_cfg_req {
+ struct gdma_req_hdr hdr;
+ u32 vport_index;
+}; /* HW DATA */
+
+struct mana_query_vport_cfg_resp {
+ struct gdma_resp_hdr hdr;
+ u32 max_num_sq;
+ u32 max_num_rq;
+ u32 num_indirection_ent;
+ u32 reserved1;
+ u8 mac_addr[6];
+ u8 reserved2[2];
+ mana_handle_t vport;
+}; /* HW DATA */
+
+/* Configure vPort */
+struct mana_config_vport_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u32 pdid;
+ u32 doorbell_pageid;
+}; /* HW DATA */
+
+struct mana_config_vport_resp {
+ struct gdma_resp_hdr hdr;
+ u16 tx_vport_offset;
+ u8 short_form_allowed;
+ u8 reserved;
+}; /* HW DATA */
+
+/* Create WQ Object */
+struct mana_create_wqobj_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u32 wq_type;
+ u32 reserved;
+ u64 wq_gdma_region;
+ u64 cq_gdma_region;
+ u32 wq_size;
+ u32 cq_size;
+ u32 cq_moderation_ctx_id;
+ u32 cq_parent_qid;
+}; /* HW DATA */
+
+struct mana_create_wqobj_resp {
+ struct gdma_resp_hdr hdr;
+ u32 wq_id;
+ u32 cq_id;
+ mana_handle_t wq_obj;
+}; /* HW DATA */
+
+/* Destroy WQ Object */
+struct mana_destroy_wqobj_req {
+ struct gdma_req_hdr hdr;
+ u32 wq_type;
+ u32 reserved;
+ mana_handle_t wq_obj_handle;
+}; /* HW DATA */
+
+struct mana_destroy_wqobj_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Fence RQ */
+struct mana_fence_rq_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t wq_obj_handle;
+}; /* HW DATA */
+
+struct mana_fence_rq_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Query stats RQ */
+struct mana_query_gf_stat_req {
+ struct gdma_req_hdr hdr;
+ u64 req_stats;
+}; /* HW DATA */
+
+struct mana_query_gf_stat_resp {
+ struct gdma_resp_hdr hdr;
+ u64 reported_stats;
+ /* rx errors/discards */
+ u64 rx_discards_nowqe;
+ u64 rx_err_vport_disabled;
+ /* rx bytes/packets */
+ u64 hc_rx_bytes;
+ u64 hc_rx_ucast_pkts;
+ u64 hc_rx_ucast_bytes;
+ u64 hc_rx_bcast_pkts;
+ u64 hc_rx_bcast_bytes;
+ u64 hc_rx_mcast_pkts;
+ u64 hc_rx_mcast_bytes;
+ /* tx errors */
+ u64 tx_err_gf_disabled;
+ u64 tx_err_vport_disabled;
+ u64 tx_err_inval_vport_offset_pkt;
+ u64 tx_err_vlan_enforcement;
+ u64 tx_err_ethtype_enforcement;
+ u64 tx_err_SA_enforcement;
+ u64 tx_err_SQPDID_enforcement;
+ u64 tx_err_CQPDID_enforcement;
+ u64 tx_err_mtu_violation;
+ u64 tx_err_inval_oob;
+ /* tx bytes/packets */
+ u64 hc_tx_bytes;
+ u64 hc_tx_ucast_pkts;
+ u64 hc_tx_ucast_bytes;
+ u64 hc_tx_bcast_pkts;
+ u64 hc_tx_bcast_bytes;
+ u64 hc_tx_mcast_pkts;
+ u64 hc_tx_mcast_bytes;
+ /* tx error */
+ u64 tx_err_gdma;
+}; /* HW DATA */
+
+/* Configure vPort Rx Steering */
+struct mana_cfg_rx_steer_req_v2 {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u16 num_indir_entries;
+ u16 indir_tab_offset;
+ u32 rx_enable;
+ u32 rss_enable;
+ u8 update_default_rxobj;
+ u8 update_hashkey;
+ u8 update_indir_tab;
+ u8 reserved;
+ mana_handle_t default_rxobj;
+ u8 hashkey[MANA_HASH_KEY_SIZE];
+ u8 cqe_coalescing_enable;
+ u8 reserved2[7];
+}; /* HW DATA */
+
+struct mana_cfg_rx_steer_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Register HW vPort */
+struct mana_register_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ u16 attached_gfid;
+ u8 is_pf_default_vport;
+ u8 reserved1;
+ u8 allow_all_ether_types;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+}; /* HW DATA */
+
+struct mana_register_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+/* Deregister HW vPort */
+struct mana_deregister_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+struct mana_deregister_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Register filter */
+struct mana_register_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u8 mac_addr[6];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+ u16 reserved5;
+ u32 reserved6;
+ u32 reserved7;
+ u32 reserved8;
+}; /* HW DATA */
+
+struct mana_register_filter_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+/* Deregister filter */
+struct mana_deregister_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+struct mana_deregister_filter_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Requested GF stats Flags */
+/* Rx discards/Errors */
+#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001
+#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002
+/* Rx bytes/pkts */
+#define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004
+#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008
+#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010
+#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020
+#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040
+#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080
+#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100
+/* Tx errors */
+#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200
+#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400
+#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \
+ 0x0000000000000800
+#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000
+#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \
+ 0x0000000000002000
+#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000
+#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000
+#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000
+#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000
+#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000
+/* Tx bytes/pkts */
+#define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000
+#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000
+#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000
+#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000
+#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000
+#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000
+#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000
+/* Tx error */
+#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
+
+#define MANA_MAX_NUM_QUEUES 64
+
+#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
+
+struct mana_tx_package {
+ struct gdma_wqe_request wqe_req;
+ struct gdma_sge sgl_array[5];
+ struct gdma_sge *sgl_ptr;
+
+ struct mana_tx_oob tx_oob;
+
+ struct gdma_posted_wqe_info wqe_info;
+};
+
+int mana_create_wq_obj(struct mana_port_context *apc,
+ mana_handle_t vport,
+ u32 wq_type, struct mana_obj_spec *wq_spec,
+ struct mana_obj_spec *cq_spec,
+ mana_handle_t *wq_obj);
+
+void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
+ mana_handle_t wq_obj);
+
+int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
+ u32 doorbell_pg_id);
+void mana_uncfg_vport(struct mana_port_context *apc);
+#endif /* _MANA_H */
diff --git a/include/net/mana/mana_auxiliary.h b/include/net/mana/mana_auxiliary.h
new file mode 100644
index 000000000000..373d59756846
--- /dev/null
+++ b/include/net/mana/mana_auxiliary.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022, Microsoft Corporation. */
+
+#include "mana.h"
+#include <linux/auxiliary_bus.h>
+
+struct mana_adev {
+ struct auxiliary_device adev;
+ struct gdma_dev *mdev;
+};
diff --git a/include/net/mana/shm_channel.h b/include/net/mana/shm_channel.h
new file mode 100644
index 000000000000..5199b41497ff
--- /dev/null
+++ b/include/net/mana/shm_channel.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _SHM_CHANNEL_H
+#define _SHM_CHANNEL_H
+
+struct shm_channel {
+ struct device *dev;
+ void __iomem *base;
+};
+
+void mana_smc_init(struct shm_channel *sc, struct device *dev,
+ void __iomem *base);
+
+int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+ u64 cq_addr, u64 rq_addr, u64 sq_addr,
+ u32 eq_msix_index);
+
+int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf);
+
+#endif /* _SHM_CHANNEL_H */
diff --git a/include/net/mctp.h b/include/net/mctp.h
new file mode 100644
index 000000000000..7b17c52e8ce2
--- /dev/null
+++ b/include/net/mctp.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Management Component Transport Protocol (MCTP)
+ *
+ * Copyright (c) 2021 Code Construct
+ * Copyright (c) 2021 Google
+ */
+
+#ifndef __NET_MCTP_H
+#define __NET_MCTP_H
+
+#include <linux/bits.h>
+#include <linux/mctp.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+/* MCTP packet definitions */
+struct mctp_hdr {
+ u8 ver;
+ u8 dest;
+ u8 src;
+ u8 flags_seq_tag;
+};
+
+#define MCTP_VER_MIN 1
+#define MCTP_VER_MAX 1
+
+/* Definitions for flags_seq_tag field */
+#define MCTP_HDR_FLAG_SOM BIT(7)
+#define MCTP_HDR_FLAG_EOM BIT(6)
+#define MCTP_HDR_FLAG_TO BIT(3)
+#define MCTP_HDR_FLAGS GENMASK(5, 3)
+#define MCTP_HDR_SEQ_SHIFT 4
+#define MCTP_HDR_SEQ_MASK GENMASK(1, 0)
+#define MCTP_HDR_TAG_SHIFT 0
+#define MCTP_HDR_TAG_MASK GENMASK(2, 0)
+
+#define MCTP_INITIAL_DEFAULT_NET 1
+
+static inline bool mctp_address_unicast(mctp_eid_t eid)
+{
+ return eid >= 8 && eid < 255;
+}
+
+static inline bool mctp_address_broadcast(mctp_eid_t eid)
+{
+ return eid == 255;
+}
+
+static inline bool mctp_address_null(mctp_eid_t eid)
+{
+ return eid == 0;
+}
+
+static inline bool mctp_address_matches(mctp_eid_t match, mctp_eid_t eid)
+{
+ return match == eid || match == MCTP_ADDR_ANY;
+}
+
+static inline struct mctp_hdr *mctp_hdr(struct sk_buff *skb)
+{
+ return (struct mctp_hdr *)skb_network_header(skb);
+}
+
+/* socket implementation */
+struct mctp_sock {
+ struct sock sk;
+
+ /* bind() params */
+ unsigned int bind_net;
+ mctp_eid_t bind_addr;
+ __u8 bind_type;
+
+ /* sendmsg()/recvmsg() uses struct sockaddr_mctp_ext */
+ bool addr_ext;
+
+ /* list of mctp_sk_key, for incoming tag lookup. updates protected
+ * by sk->net->keys_lock
+ */
+ struct hlist_head keys;
+
+ /* mechanism for expiring allocated keys; will release an allocated
+ * tag, and any netdev state for a request/response pairing
+ */
+ struct timer_list key_expiry;
+};
+
+/* Key for matching incoming packets to sockets or reassembly contexts.
+ * Packets are matched on (peer EID, local EID, tag).
+ *
+ * Lifetime / locking requirements:
+ *
+ * - individual key data (ie, the struct itself) is protected by key->lock;
+ * changes must be made with that lock held.
+ *
+ * - the lookup fields: peer_addr, local_addr and tag are set before the
+ * key is added to lookup lists, and never updated.
+ *
+ * - A ref to the key must be held (throuh key->refs) if a pointer to the
+ * key is to be accessed after key->lock is released.
+ *
+ * - a mctp_sk_key contains a reference to a struct sock; this is valid
+ * for the life of the key. On sock destruction (through unhash), the key is
+ * removed from lists (see below), and marked invalid.
+ *
+ * - these mctp_sk_keys appear on two lists:
+ * 1) the struct mctp_sock->keys list
+ * 2) the struct netns_mctp->keys list
+ *
+ * presences on these lists requires a (single) refcount to be held; both
+ * lists are updated as a single operation.
+ *
+ * Updates and lookups in either list are performed under the
+ * netns_mctp->keys lock. Lookup functions will need to lock the key and
+ * take a reference before unlocking the keys_lock. Consequently, the list's
+ * keys_lock *cannot* be acquired with the individual key->lock held.
+ *
+ * - a key may have a sk_buff attached as part of an in-progress message
+ * reassembly (->reasm_head). The reasm data is protected by the individual
+ * key->lock.
+ *
+ * - there are two destruction paths for a mctp_sk_key:
+ *
+ * - through socket unhash (see mctp_sk_unhash). This performs the list
+ * removal under keys_lock.
+ *
+ * - where a key is established to receive a reply message: after receiving
+ * the (complete) reply, or during reassembly errors. Here, we clean up
+ * the reassembly context (marking reasm_dead, to prevent another from
+ * starting), and remove the socket from the netns & socket lists.
+ *
+ * - through an expiry timeout, on a per-socket timer
+ */
+struct mctp_sk_key {
+ unsigned int net;
+ mctp_eid_t peer_addr;
+ mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */
+ __u8 tag; /* incoming tag match; invert TO for local */
+
+ /* we hold a ref to sk when set */
+ struct sock *sk;
+
+ /* routing lookup list */
+ struct hlist_node hlist;
+
+ /* per-socket list */
+ struct hlist_node sklist;
+
+ /* lock protects against concurrent updates to the reassembly and
+ * expiry data below.
+ */
+ spinlock_t lock;
+
+ /* Keys are referenced during the output path, which may sleep */
+ refcount_t refs;
+
+ /* incoming fragment reassembly context */
+ struct sk_buff *reasm_head;
+ struct sk_buff **reasm_tailp;
+ bool reasm_dead;
+ u8 last_seq;
+
+ /* key validity */
+ bool valid;
+
+ /* expiry timeout; valid (above) cleared on expiry */
+ unsigned long expiry;
+
+ /* free to use for device flow state tracking. Initialised to
+ * zero on initial key creation
+ */
+ unsigned long dev_flow_state;
+ struct mctp_dev *dev;
+
+ /* a tag allocated with SIOCMCTPALLOCTAG ioctl will not expire
+ * automatically on timeout or response, instead SIOCMCTPDROPTAG
+ * is used.
+ */
+ bool manual_alloc;
+};
+
+struct mctp_skb_cb {
+ unsigned int magic;
+ unsigned int net;
+ int ifindex; /* extended/direct addressing if set */
+ mctp_eid_t src;
+ unsigned char halen;
+ unsigned char haddr[MAX_ADDR_LEN];
+};
+
+/* skb control-block accessors with a little extra debugging for initial
+ * development.
+ *
+ * TODO: remove checks & mctp_skb_cb->magic; replace callers of __mctp_cb
+ * with mctp_cb().
+ *
+ * __mctp_cb() is only for the initial ingress code; we should see ->magic set
+ * at all times after this.
+ */
+static inline struct mctp_skb_cb *__mctp_cb(struct sk_buff *skb)
+{
+ struct mctp_skb_cb *cb = (void *)skb->cb;
+
+ cb->magic = 0x4d435450;
+ return cb;
+}
+
+static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
+{
+ struct mctp_skb_cb *cb = (void *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb));
+ WARN_ON(cb->magic != 0x4d435450);
+ return (void *)(skb->cb);
+}
+
+/* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension,
+ * indicating the flow to the device driver.
+ */
+struct mctp_flow {
+ struct mctp_sk_key *key;
+};
+
+/* Route definition.
+ *
+ * These are held in the pernet->mctp.routes list, with RCU protection for
+ * removed routes. We hold a reference to the netdev; routes need to be
+ * dropped on NETDEV_UNREGISTER events.
+ *
+ * Updates to the route table are performed under rtnl; all reads under RCU,
+ * so routes cannot be referenced over a RCU grace period. Specifically: A
+ * caller cannot block between mctp_route_lookup and mctp_route_release()
+ */
+struct mctp_route {
+ mctp_eid_t min, max;
+
+ unsigned char type;
+ unsigned int mtu;
+ struct mctp_dev *dev;
+ int (*output)(struct mctp_route *route,
+ struct sk_buff *skb);
+
+ struct list_head list;
+ refcount_t refs;
+ struct rcu_head rcu;
+};
+
+/* route interfaces */
+struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
+ mctp_eid_t daddr);
+
+/* always takes ownership of skb */
+int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
+
+void mctp_key_unref(struct mctp_sk_key *key);
+struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
+ unsigned int netid,
+ mctp_eid_t local, mctp_eid_t peer,
+ bool manual, u8 *tagp);
+
+/* routing <--> device interface */
+unsigned int mctp_default_net(struct net *net);
+int mctp_default_net_set(struct net *net, unsigned int index);
+int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr);
+int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr);
+void mctp_route_remove_dev(struct mctp_dev *mdev);
+
+/* neighbour definitions */
+enum mctp_neigh_source {
+ MCTP_NEIGH_STATIC,
+ MCTP_NEIGH_DISCOVER,
+};
+
+struct mctp_neigh {
+ struct mctp_dev *dev;
+ mctp_eid_t eid;
+ enum mctp_neigh_source source;
+
+ unsigned char ha[MAX_ADDR_LEN];
+
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+int mctp_neigh_init(void);
+void mctp_neigh_exit(void);
+
+// ret_hwaddr may be NULL, otherwise must have space for MAX_ADDR_LEN
+int mctp_neigh_lookup(struct mctp_dev *dev, mctp_eid_t eid,
+ void *ret_hwaddr);
+void mctp_neigh_remove_dev(struct mctp_dev *mdev);
+
+int mctp_routes_init(void);
+void mctp_routes_exit(void);
+
+void mctp_device_init(void);
+void mctp_device_exit(void);
+
+#endif /* __NET_MCTP_H */
diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h
new file mode 100644
index 000000000000..5c0d04b5c12c
--- /dev/null
+++ b/include/net/mctpdevice.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Management Component Transport Protocol (MCTP) - device
+ * definitions.
+ *
+ * Copyright (c) 2021 Code Construct
+ * Copyright (c) 2021 Google
+ */
+
+#ifndef __NET_MCTPDEVICE_H
+#define __NET_MCTPDEVICE_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/refcount.h>
+
+struct mctp_sk_key;
+
+struct mctp_dev {
+ struct net_device *dev;
+
+ refcount_t refs;
+
+ unsigned int net;
+
+ const struct mctp_netdev_ops *ops;
+
+ /* Only modified under RTNL. Reads have addrs_lock held */
+ u8 *addrs;
+ size_t num_addrs;
+ spinlock_t addrs_lock;
+
+ struct rcu_head rcu;
+};
+
+struct mctp_netdev_ops {
+ void (*release_flow)(struct mctp_dev *dev,
+ struct mctp_sk_key *key);
+};
+
+#define MCTP_INITIAL_DEFAULT_NET 1
+
+struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev);
+struct mctp_dev *__mctp_dev_get(const struct net_device *dev);
+
+int mctp_register_netdev(struct net_device *dev,
+ const struct mctp_netdev_ops *ops);
+void mctp_unregister_netdev(struct net_device *dev);
+
+void mctp_dev_hold(struct mctp_dev *mdev);
+void mctp_dev_put(struct mctp_dev *mdev);
+
+void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+
+#endif /* __NET_MCTPDEVICE_H */
diff --git a/include/net/mld.h b/include/net/mld.h
index 496bddb59942..c07359808493 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -92,6 +92,9 @@ struct mld2_query {
#define MLD_EXP_MIN_LIMIT 32768UL
#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1)
+#define MLD_MAX_QUEUE 8
+#define MLD_MAX_SKBS 32
+
static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
{
/* RFC3810, 5.1.3. Maximum Response Code */
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 9deb3a3735da..0c71c27979fb 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -6,6 +6,9 @@
#ifndef _NET_MPLS_IPTUNNEL_H
#define _NET_MPLS_IPTUNNEL_H 1
+#include <linux/types.h>
+#include <net/lwtunnel.h>
+
struct mpls_iptunnel_encap {
u8 labels;
u8 ttl_propagate;
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 3525d2822abe..fb996124b3d5 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -12,6 +12,8 @@
#include <linux/tcp.h>
#include <linux/types.h>
+struct mptcp_info;
+struct mptcp_sock;
struct seq_file;
/* MPTCP sk_buff extension data */
@@ -23,43 +25,99 @@ struct mptcp_ext {
u64 data_seq;
u32 subflow_seq;
u16 data_len;
+ __sum16 csum;
u8 use_map:1,
dsn64:1,
data_fin:1,
use_ack:1,
ack64:1,
mpc_map:1,
- __unused:2;
- /* one byte hole */
+ frozen:1,
+ reset_transient:1;
+ u8 reset_reason:4,
+ csum_reqd:1,
+ infinite_map:1;
};
-struct mptcp_out_options {
-#if IS_ENABLED(CONFIG_MPTCP)
- u16 suboptions;
- u64 sndr_key;
- u64 rcvr_key;
+#define MPTCPOPT_HMAC_LEN 20
+#define MPTCP_RM_IDS_MAX 8
+
+struct mptcp_rm_list {
+ u8 ids[MPTCP_RM_IDS_MAX];
+ u8 nr;
+};
+
+struct mptcp_addr_info {
+ u8 id;
+ sa_family_t family;
+ __be16 port;
union {
- struct in_addr addr;
+ struct in_addr addr;
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
- struct in6_addr addr6;
+ struct in6_addr addr6;
#endif
};
- u8 addr_id;
- u64 ahmac;
- u8 rm_id;
+};
+
+struct mptcp_out_options {
+#if IS_ENABLED(CONFIG_MPTCP)
+ u16 suboptions;
+ struct mptcp_rm_list rm_list;
u8 join_id;
u8 backup;
- u32 nonce;
- u64 thmac;
- u32 token;
- u8 hmac[20];
- struct mptcp_ext ext_copy;
+ u8 reset_reason:4,
+ reset_transient:1,
+ csum_reqd:1,
+ allow_join_id0:1;
+ union {
+ struct {
+ u64 sndr_key;
+ u64 rcvr_key;
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ __sum16 csum;
+ };
+ struct {
+ struct mptcp_addr_info addr;
+ u64 ahmac;
+ };
+ struct {
+ struct mptcp_ext ext_copy;
+ u64 fail_seq;
+ };
+ struct {
+ u32 nonce;
+ u32 token;
+ u64 thmac;
+ u8 hmac[MPTCPOPT_HMAC_LEN];
+ };
+ };
#endif
};
-#ifdef CONFIG_MPTCP
-extern struct request_sock_ops mptcp_subflow_request_sock_ops;
+#define MPTCP_SCHED_NAME_MAX 16
+#define MPTCP_SUBFLOWS_MAX 8
+
+struct mptcp_sched_data {
+ bool reinject;
+ u8 subflows;
+ struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
+};
+
+struct mptcp_sched_ops {
+ int (*get_subflow)(struct mptcp_sock *msk,
+ struct mptcp_sched_data *data);
+
+ char name[MPTCP_SCHED_NAME_MAX];
+ struct module *owner;
+ struct list_head list;
+
+ void (*init)(struct mptcp_sock *msk);
+ void (*release)(struct mptcp_sock *msk);
+} ____cacheline_aligned_in_smp;
+#ifdef CONFIG_MPTCP
void mptcp_init(void);
static inline bool sk_is_mptcp(const struct sock *sk)
@@ -85,10 +143,12 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
unsigned int *size, unsigned int remaining,
struct mptcp_out_options *opts);
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
- struct tcp_options_received *opt_rx);
+bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
+
+void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
+ struct mptcp_out_options *opts);
-void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts);
+void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info);
/* move the skb extension owership, with the assumption that 'to' is
* newly allocated
@@ -107,6 +167,19 @@ static inline void mptcp_skb_ext_move(struct sk_buff *to,
from->active_extensions = 0;
}
+static inline void mptcp_skb_ext_copy(struct sk_buff *to,
+ struct sk_buff *from)
+{
+ struct mptcp_ext *from_ext;
+
+ from_ext = skb_ext_find(from, SKB_EXT_MPTCP);
+ if (!from_ext)
+ return;
+
+ from_ext->frozen = 1;
+ skb_ext_copy(to, from);
+}
+
static inline bool mptcp_ext_matches(const struct mptcp_ext *to_ext,
const struct mptcp_ext *from_ext)
{
@@ -134,6 +207,19 @@ void mptcp_seq_show(struct seq_file *seq);
int mptcp_subflow_init_cookie_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb);
+struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener,
+ bool attach_listener);
+
+__be32 mptcp_get_reset_option(const struct sk_buff *skb);
+
+static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
+{
+ if (skb_ext_exist(skb, SKB_EXT_MPTCP))
+ return mptcp_get_reset_option(skb);
+
+ return htonl(0u);
+}
#else
static inline void mptcp_init(void)
@@ -155,12 +241,6 @@ static inline bool rsk_drop_req(const struct request_sock *req)
return false;
}
-static inline void mptcp_parse_option(const struct sk_buff *skb,
- const unsigned char *ptr, int opsize,
- struct tcp_options_received *opt_rx)
-{
-}
-
static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
unsigned int *size,
struct mptcp_out_options *opts)
@@ -184,10 +264,10 @@ static inline bool mptcp_established_options(struct sock *sk,
return false;
}
-static inline void mptcp_incoming_options(struct sock *sk,
- struct sk_buff *skb,
- struct tcp_options_received *opt_rx)
+static inline bool mptcp_incoming_options(struct sock *sk,
+ struct sk_buff *skb)
{
+ return true;
}
static inline void mptcp_skb_ext_move(struct sk_buff *to,
@@ -195,6 +275,11 @@ static inline void mptcp_skb_ext_move(struct sk_buff *to,
{
}
+static inline void mptcp_skb_ext_copy(struct sk_buff *to,
+ struct sk_buff *from)
+{
+}
+
static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
@@ -210,6 +295,15 @@ static inline int mptcp_subflow_init_cookie_req(struct request_sock *req,
{
return 0; /* TCP fallback */
}
+
+static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener,
+ bool attach_listener)
+{
+ return NULL;
+}
+
+static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); }
#endif /* CONFIG_MPTCP */
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -220,4 +314,14 @@ static inline int mptcpv6_init(void) { return 0; }
static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
#endif
+#if defined(CONFIG_MPTCP) && defined(CONFIG_BPF_SYSCALL)
+struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk);
+#else
+static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; }
+#endif
+
+#if !IS_ENABLED(CONFIG_MPTCP)
+struct mptcp_sock { };
+#endif
+
#endif /* __NET_MPTCP_H */
diff --git a/include/net/mrp.h b/include/net/mrp.h
index 1c308c034e1a..b28915ffea28 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -2,6 +2,10 @@
#ifndef _NET_MRP_H
#define _NET_MRP_H
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
#define MRP_END_MARK 0x0
struct mrp_pdu_hdr {
@@ -120,6 +124,7 @@ struct mrp_applicant {
struct sk_buff *pdu;
struct rb_root mad;
struct rcu_head rcu;
+ bool active;
};
struct mrp_port {
diff --git a/include/net/ncsi.h b/include/net/ncsi.h
index fbefe80361ee..08a50d9acb0a 100644
--- a/include/net/ncsi.h
+++ b/include/net/ncsi.h
@@ -2,6 +2,8 @@
#ifndef __NET_NCSI_H
#define __NET_NCSI_H
+#include <linux/types.h>
+
/*
* The NCSI device states seen from external. More NCSI device states are
* only visible internally (in net/ncsi/internal.h). When the NCSI device
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 38e4094960ce..9bbdf6eaa942 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -137,7 +137,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
u8 *opt, int opt_len,
struct ndisc_options *ndopts);
-void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
+void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
int data_len, int pad);
#define NDISC_OPS_REDIRECT_DATA_SPACE 2
@@ -395,11 +395,11 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
{
struct neighbour *n;
- rcu_read_lock_bh();
+ rcu_read_lock();
n = __ipv6_neigh_lookup_noref(dev, pkey);
if (n && !refcount_inc_not_zero(&n->refcnt))
n = NULL;
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return n;
}
@@ -409,16 +409,10 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
{
struct neighbour *n;
- rcu_read_lock_bh();
+ rcu_read_lock();
n = __ipv6_neigh_lookup_noref(dev, pkey);
- if (n) {
- unsigned long now = jiffies;
-
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
- }
- rcu_read_unlock_bh();
+ neigh_confirm(n);
+ rcu_read_unlock();
}
static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
@@ -426,16 +420,10 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
{
struct neighbour *n;
- rcu_read_lock_bh();
+ rcu_read_lock();
n = __ipv6_neigh_lookup_noref_stub(dev, pkey);
- if (n) {
- unsigned long now = jiffies;
-
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
- }
- rcu_read_unlock_bh();
+ neigh_confirm(n);
+ rcu_read_unlock();
}
/* uses ipv6_stub and is meant for use outside of IPv6 core */
@@ -457,12 +445,17 @@ int ndisc_late_init(void);
void ndisc_late_cleanup(void);
void ndisc_cleanup(void);
-int ndisc_rcv(struct sk_buff *skb);
+enum skb_drop_reason ndisc_rcv(struct sk_buff *skb);
+struct sk_buff *ndisc_ns_create(struct net_device *dev, const struct in6_addr *solicit,
+ const struct in6_addr *saddr, u64 nonce);
void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
const struct in6_addr *daddr, const struct in6_addr *saddr,
u64 nonce);
+void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+
void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr);
void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
@@ -487,17 +480,14 @@ int igmp6_late_init(void);
void igmp6_cleanup(void);
void igmp6_late_cleanup(void);
-int igmp6_event_query(struct sk_buff *skb);
+void igmp6_event_query(struct sk_buff *skb);
-int igmp6_event_report(struct sk_buff *skb);
+void igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen);
#endif
void inet6_ifinfo_notify(int event, struct inet6_dev *idev);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 81ee17594c32..0d28172193fa 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -48,6 +48,7 @@ enum {
NEIGH_VAR_RETRANS_TIME,
NEIGH_VAR_BASE_REACHABLE_TIME,
NEIGH_VAR_DELAY_PROBE_TIME,
+ NEIGH_VAR_INTERVAL_PROBE_TIME_MS,
NEIGH_VAR_GC_STALETIME,
NEIGH_VAR_QUEUE_LEN_BYTES,
NEIGH_VAR_PROXY_QLEN,
@@ -70,6 +71,7 @@ enum {
struct neigh_parms {
possible_net_t net;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
struct list_head list;
int (*neigh_setup)(struct neighbour *);
struct neigh_table *tbl;
@@ -81,6 +83,7 @@ struct neigh_parms {
struct rcu_head rcu_head;
int reachable_time;
+ u32 qlen;
int data[NEIGH_VAR_DATA_MAX];
DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};
@@ -144,20 +147,22 @@ struct neighbour {
struct timer_list timer;
unsigned long used;
atomic_t probes;
- __u8 flags;
- __u8 nud_state;
- __u8 type;
- __u8 dead;
+ u8 nud_state;
+ u8 type;
+ u8 dead;
u8 protocol;
+ u32 flags;
seqlock_t ha_lock;
unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
struct hh_cache hh;
int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
struct list_head gc_list;
+ struct list_head managed_list;
struct rcu_head rcu;
struct net_device *dev;
- u8 primary_key[0];
+ netdevice_tracker dev_tracker;
+ u8 primary_key[];
} __randomize_layout;
struct neigh_ops {
@@ -172,9 +177,10 @@ struct pneigh_entry {
struct pneigh_entry *next;
possible_net_t net;
struct net_device *dev;
- u8 flags;
+ netdevice_tracker dev_tracker;
+ u32 flags;
u8 protocol;
- u8 key[];
+ u32 key[];
};
/*
@@ -204,6 +210,7 @@ struct neigh_table {
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
void (*proxy_redo)(struct sk_buff *skb);
+ int (*is_multicast)(const void *pkey);
bool (*allow_add)(const struct net_device *dev,
struct netlink_ext_ack *extack);
char *id;
@@ -215,11 +222,13 @@ struct neigh_table {
int gc_thresh3;
unsigned long last_flush;
struct delayed_work gc_work;
+ struct delayed_work managed_work;
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
atomic_t gc_entries;
struct list_head gc_list;
+ struct list_head managed_list;
rwlock_t lock;
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
@@ -249,20 +258,24 @@ static inline void *neighbour_priv(const struct neighbour *n)
}
/* flags for neigh_update() */
-#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
-#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
-#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004
-#define NEIGH_UPDATE_F_EXT_LEARNED 0x20000000
-#define NEIGH_UPDATE_F_ISROUTER 0x40000000
-#define NEIGH_UPDATE_F_ADMIN 0x80000000
+#define NEIGH_UPDATE_F_OVERRIDE BIT(0)
+#define NEIGH_UPDATE_F_WEAK_OVERRIDE BIT(1)
+#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER BIT(2)
+#define NEIGH_UPDATE_F_USE BIT(3)
+#define NEIGH_UPDATE_F_MANAGED BIT(4)
+#define NEIGH_UPDATE_F_EXT_LEARNED BIT(5)
+#define NEIGH_UPDATE_F_ISROUTER BIT(6)
+#define NEIGH_UPDATE_F_ADMIN BIT(7)
+
+/* In-kernel representation for NDA_FLAGS_EXT flags: */
+#define NTF_OLD_MASK 0xff
+#define NTF_EXT_SHIFT 8
+#define NTF_EXT_MASK (NTF_EXT_MANAGED)
+
+#define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT)
extern const struct nla_policy nda_policy[];
-static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
-{
- return *(const u16 *)n->primary_key == *(const u16 *)pkey;
-}
-
static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
{
return *(const u32 *)n->primary_key == *(const u32 *)pkey;
@@ -286,14 +299,14 @@ static inline struct neighbour *___neigh_lookup_noref(
const void *pkey,
struct net_device *dev)
{
- struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
+ struct neigh_hash_table *nht = rcu_dereference(tbl->nht);
struct neighbour *n;
u32 hash_val;
hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+ for (n = rcu_dereference(nht->hash_buckets[hash_val]);
n != NULL;
- n = rcu_dereference_bh(n->next)) {
+ n = rcu_dereference(n->next)) {
if (n->dev == dev && key_eq(n, pkey))
return n;
}
@@ -308,12 +321,21 @@ static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
}
+static inline void neigh_confirm(struct neighbour *n)
+{
+ if (n) {
+ unsigned long now = jiffies;
+
+ /* avoid dirtying neighbour */
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
+ }
+}
+
void neigh_table_init(int index, struct neigh_table *tbl);
int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct net_device *dev);
-struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
- const void *pkey);
struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
struct net_device *dev, bool want_ref);
static inline struct neighbour *neigh_create(struct neigh_table *tbl,
@@ -323,7 +345,8 @@ static inline struct neighbour *neigh_create(struct neigh_table *tbl,
return __neigh_create(tbl, pkey, dev, true);
}
void neigh_destroy(struct neighbour *neigh);
-int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
+ const bool immediate_ok);
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
@@ -371,8 +394,6 @@ void neigh_for_each(struct neigh_table *tbl,
void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *));
int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
-void pneigh_for_each(struct neigh_table *tbl,
- void (*cb)(struct pneigh_entry *));
struct neigh_seq_state {
struct seq_net_private p;
@@ -433,17 +454,24 @@ static inline struct neighbour * neigh_clone(struct neighbour *neigh)
#define neigh_hold(n) refcount_inc(&(n)->refcnt)
-static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
+ struct sk_buff *skb,
+ const bool immediate_ok)
{
unsigned long now = jiffies;
-
+
if (READ_ONCE(neigh->used) != now)
WRITE_ONCE(neigh->used, now);
- if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
- return __neigh_event_send(neigh, skb);
+ if (!(READ_ONCE(neigh->nud_state) & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
+ return __neigh_event_send(neigh, skb, immediate_ok);
return 0;
}
+static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+{
+ return neigh_event_send_probe(neigh, skb, true);
+}
+
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
{
@@ -503,10 +531,15 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
{
const struct hh_cache *hh = &n->hh;
- if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache)
+ /* n->nud_state and hh->hh_len could be changed under us.
+ * neigh_hh_output() is taking care of the race later.
+ */
+ if (!skip_cache &&
+ (READ_ONCE(n->nud_state) & NUD_CONNECTED) &&
+ READ_ONCE(hh->hh_len))
return neigh_hh_output(hh, skb);
- else
- return n->output(n, skb);
+
+ return READ_ONCE(n->output)(n, skb);
}
static inline struct neighbour *
diff --git a/include/net/net_debug.h b/include/net/net_debug.h
new file mode 100644
index 000000000000..1e74684cbbdb
--- /dev/null
+++ b/include/net/net_debug.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NET_DEBUG_H
+#define _LINUX_NET_DEBUG_H
+
+#include <linux/bug.h>
+#include <linux/kern_levels.h>
+
+struct net_device;
+
+__printf(3, 4) __cold
+void netdev_printk(const char *level, const struct net_device *dev,
+ const char *format, ...);
+__printf(2, 3) __cold
+void netdev_emerg(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_alert(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_crit(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_err(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_warn(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_notice(const struct net_device *dev, const char *format, ...);
+__printf(2, 3) __cold
+void netdev_info(const struct net_device *dev, const char *format, ...);
+
+#define netdev_level_once(level, dev, fmt, ...) \
+do { \
+ static bool __section(".data.once") __print_once; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define netdev_emerg_once(dev, fmt, ...) \
+ netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
+#define netdev_alert_once(dev, fmt, ...) \
+ netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
+#define netdev_crit_once(dev, fmt, ...) \
+ netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
+#define netdev_err_once(dev, fmt, ...) \
+ netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
+#define netdev_warn_once(dev, fmt, ...) \
+ netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
+#define netdev_notice_once(dev, fmt, ...) \
+ netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
+#define netdev_info_once(dev, fmt, ...) \
+ netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+#define netdev_dbg(__dev, format, args...) \
+do { \
+ dynamic_netdev_dbg(__dev, format, ##args); \
+} while (0)
+#elif defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#else
+#define netdev_dbg(__dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+})
+#endif
+
+#if defined(VERBOSE_DEBUG)
+#define netdev_vdbg netdev_dbg
+#else
+
+#define netdev_vdbg(dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+/* netif printk helpers, similar to netdev_printk */
+
+#define netif_printk(priv, type, level, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_printk(level, (dev), fmt, ##args); \
+} while (0)
+
+#define netif_level(level, priv, type, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_##level(dev, fmt, ##args); \
+} while (0)
+
+#define netif_emerg(priv, type, dev, fmt, args...) \
+ netif_level(emerg, priv, type, dev, fmt, ##args)
+#define netif_alert(priv, type, dev, fmt, args...) \
+ netif_level(alert, priv, type, dev, fmt, ##args)
+#define netif_crit(priv, type, dev, fmt, args...) \
+ netif_level(crit, priv, type, dev, fmt, ##args)
+#define netif_err(priv, type, dev, fmt, args...) \
+ netif_level(err, priv, type, dev, fmt, ##args)
+#define netif_warn(priv, type, dev, fmt, args...) \
+ netif_level(warn, priv, type, dev, fmt, ##args)
+#define netif_notice(priv, type, dev, fmt, args...) \
+ netif_level(notice, priv, type, dev, fmt, ##args)
+#define netif_info(priv, type, dev, fmt, args...) \
+ netif_level(info, priv, type, dev, fmt, ##args)
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+#define netif_dbg(priv, type, netdev, format, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ dynamic_netdev_dbg(netdev, format, ##args); \
+} while (0)
+#elif defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
+#else
+#define netif_dbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+/* if @cond then downgrade to debug, else print at @level */
+#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
+ do { \
+ if (cond) \
+ netif_dbg(priv, type, netdev, fmt, ##args); \
+ else \
+ netif_ ## level(priv, type, netdev, fmt, ##args); \
+ } while (0)
+
+#if defined(VERBOSE_DEBUG)
+#define netif_vdbg netif_dbg
+#else
+#define netif_vdbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+
+#if defined(CONFIG_DEBUG_NET)
+#define DEBUG_NET_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#else
+#define DEBUG_NET_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#endif
+
+#endif /* _LINUX_NET_DEBUG_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 2ee5901bec7a..20c34bd7a077 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -22,22 +22,27 @@
#include <net/netns/nexthop.h>
#include <net/netns/ieee802154_6lowpan.h>
#include <net/netns/sctp.h>
-#include <net/netns/dccp.h>
#include <net/netns/netfilter.h>
-#include <net/netns/x_tables.h>
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netns/conntrack.h>
#endif
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+#include <net/netns/flow_table.h>
+#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
#include <net/netns/mpls.h>
#include <net/netns/can.h>
#include <net/netns/xdp.h>
+#include <net/netns/smc.h>
#include <net/netns/bpf.h>
+#include <net/netns/mctp.h>
+#include <net/net_trackers.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
#include <linux/skbuff.h>
#include <linux/notifier.h>
+#include <linux/xarray.h>
struct user_namespace;
struct proc_dir_entry;
@@ -60,15 +65,10 @@ struct net {
refcount_t passive; /* To decide when the network
* namespace should be freed.
*/
- refcount_t count; /* To decided when the network
- * namespace should be shut down.
- */
spinlock_t rules_mod_lock;
- unsigned int dev_unreg_count;
-
unsigned int dev_base_seq; /* protected by rtnl_mutex */
- int ifindex;
+ u32 ifindex;
spinlock_t nsid_lock;
atomic_t fnhe_genid;
@@ -90,7 +90,10 @@ struct net {
struct idr netns_ids;
struct ns_common ns;
-
+ struct ref_tracker_dir refcnt_tracker;
+ struct ref_tracker_dir notrefcnt_tracker; /* tracker for objects not
+ * refcounted against netns
+ */
struct list_head dev_base_head;
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
@@ -106,6 +109,7 @@ struct net {
struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
+ struct xarray dev_by_index;
struct raw_notifier_head netdev_chain;
/* Note that @hash_mix can be read millions times per second,
@@ -121,7 +125,9 @@ struct net {
struct netns_core core;
struct netns_mib mib;
struct netns_packet packet;
+#if IS_ENABLED(CONFIG_UNIX)
struct netns_unix unx;
+#endif
struct netns_nexthop nexthop;
struct netns_ipv4 ipv4;
#if IS_ENABLED(CONFIG_IPV6)
@@ -133,29 +139,16 @@ struct net {
#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
struct netns_sctp sctp;
#endif
-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
- struct netns_dccp dccp;
-#endif
#ifdef CONFIG_NETFILTER
struct netns_nf nf;
- struct netns_xt xt;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct netns_ct ct;
#endif
#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
struct netns_nftables nft;
#endif
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
- struct netns_nf_frag nf_frag;
- struct ctl_table_header *nf_frag_frags_hdr;
-#endif
- struct sock *nfnl;
- struct sock *nfnl_stash;
-#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
- struct list_head nfnl_acct_list;
-#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
- struct list_head nfct_timeout_list;
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ struct netns_ft ft;
#endif
#endif
#ifdef CONFIG_WEXT_CORE
@@ -171,7 +164,7 @@ struct net {
struct netns_xfrm xfrm;
#endif
- atomic64_t net_cookie; /* written once */
+ u64 net_cookie; /* written once */
#if IS_ENABLED(CONFIG_IP_VS)
struct netns_ipvs *ipvs;
@@ -185,10 +178,16 @@ struct net {
#ifdef CONFIG_XDP_SOCKETS
struct netns_xdp xdp;
#endif
+#if IS_ENABLED(CONFIG_MCTP)
+ struct netns_mctp mctp;
+#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
struct sock *crypto_nlsk;
#endif
struct sock *diag_nlsk;
+#if IS_ENABLED(CONFIG_SMC)
+ struct netns_smc smc;
+#endif
} __randomize_layout;
#include <linux/seq_file_net.h>
@@ -203,6 +202,9 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
void net_ns_barrier(void);
+
+struct ns_common *get_net_ns(struct ns_common *ns);
+struct net *get_net_ns_by_fd(int fd);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
#include <linux/nsproxy.h>
@@ -222,15 +224,22 @@ static inline void net_ns_get_ownership(const struct net *net,
}
static inline void net_ns_barrier(void) {}
+
+static inline struct ns_common *get_net_ns(struct ns_common *ns)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct net *get_net_ns_by_fd(int fd)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_NET_NS */
extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int fd);
-
-u64 net_gen_cookie(struct net *net);
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
@@ -243,9 +252,10 @@ void ipx_unregister_sysctl(void);
#ifdef CONFIG_NET_NS
void __put_net(struct net *net);
+/* Try using get_net_track() instead */
static inline struct net *get_net(struct net *net)
{
- refcount_inc(&net->count);
+ refcount_inc(&net->ns.count);
return net;
}
@@ -256,14 +266,15 @@ static inline struct net *maybe_get_net(struct net *net)
* exists. If the reference count is zero this
* function fails and returns NULL.
*/
- if (!refcount_inc_not_zero(&net->count))
+ if (!refcount_inc_not_zero(&net->ns.count))
net = NULL;
return net;
}
+/* Try using put_net_track() instead */
static inline void put_net(struct net *net)
{
- if (refcount_dec_and_test(&net->count))
+ if (refcount_dec_and_test(&net->ns.count))
__put_net(net);
}
@@ -275,7 +286,7 @@ int net_eq(const struct net *net1, const struct net *net2)
static inline int check_net(const struct net *net)
{
- return refcount_read(&net->count) != 0;
+ return refcount_read(&net->ns.count) != 0;
}
void net_drop_ns(void *);
@@ -311,23 +322,74 @@ static inline int check_net(const struct net *net)
#endif
+static inline void __netns_tracker_alloc(struct net *net,
+ netns_tracker *tracker,
+ bool refcounted,
+ gfp_t gfp)
+{
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+ ref_tracker_alloc(refcounted ? &net->refcnt_tracker :
+ &net->notrefcnt_tracker,
+ tracker, gfp);
+#endif
+}
+
+static inline void netns_tracker_alloc(struct net *net, netns_tracker *tracker,
+ gfp_t gfp)
+{
+ __netns_tracker_alloc(net, tracker, true, gfp);
+}
+
+static inline void __netns_tracker_free(struct net *net,
+ netns_tracker *tracker,
+ bool refcounted)
+{
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+ ref_tracker_free(refcounted ? &net->refcnt_tracker :
+ &net->notrefcnt_tracker, tracker);
+#endif
+}
+
+static inline struct net *get_net_track(struct net *net,
+ netns_tracker *tracker, gfp_t gfp)
+{
+ get_net(net);
+ netns_tracker_alloc(net, tracker, gfp);
+ return net;
+}
+
+static inline void put_net_track(struct net *net, netns_tracker *tracker)
+{
+ __netns_tracker_free(net, tracker, true);
+ put_net(net);
+}
+
typedef struct {
#ifdef CONFIG_NET_NS
- struct net *net;
+ struct net __rcu *net;
#endif
} possible_net_t;
static inline void write_pnet(possible_net_t *pnet, struct net *net)
{
#ifdef CONFIG_NET_NS
- pnet->net = net;
+ rcu_assign_pointer(pnet->net, net);
#endif
}
static inline struct net *read_pnet(const possible_net_t *pnet)
{
#ifdef CONFIG_NET_NS
- return pnet->net;
+ return rcu_dereference_protected(pnet->net, true);
+#else
+ return &init_net;
+#endif
+}
+
+static inline struct net *read_pnet_rcu(possible_net_t *pnet)
+{
+#ifdef CONFIG_NET_NS
+ return rcu_dereference(pnet->net);
#else
return &init_net;
#endif
@@ -386,6 +448,9 @@ struct pernet_operations {
void (*pre_exit)(struct net *net);
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
+ /* Following method is called with RTNL held. */
+ void (*exit_batch_rtnl)(struct list_head *net_exit_list,
+ struct list_head *dev_kill_list);
unsigned int *id;
size_t size;
};
@@ -415,17 +480,18 @@ int register_pernet_device(struct pernet_operations *);
void unregister_pernet_device(struct pernet_operations *);
struct ctl_table;
-struct ctl_table_header;
+#define register_net_sysctl(net, path, table) \
+ register_net_sysctl_sz(net, path, table, ARRAY_SIZE(table))
#ifdef CONFIG_SYSCTL
int net_sysctl_init(void);
-struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
- struct ctl_table *table);
+struct ctl_table_header *register_net_sysctl_sz(struct net *net, const char *path,
+ struct ctl_table *table, size_t table_size);
void unregister_net_sysctl_table(struct ctl_table_header *header);
#else
static inline int net_sysctl_init(void) { return 0; }
-static inline struct ctl_table_header *register_net_sysctl(struct net *net,
- const char *path, struct ctl_table *table)
+static inline struct ctl_table_header *register_net_sysctl_sz(struct net *net,
+ const char *path, struct ctl_table *table, size_t table_size)
{
return NULL;
}
@@ -483,4 +549,10 @@ static inline void fnhe_genid_bump(struct net *net)
atomic_inc(&net->fnhe_genid);
}
+#ifdef CONFIG_NET
+void net_ns_init(void);
+#else
+static inline void net_ns_init(void) {}
+#endif
+
#endif /* __NET_NET_NAMESPACE_H */
diff --git a/include/net/net_trackers.h b/include/net/net_trackers.h
new file mode 100644
index 000000000000..d94c76cf15a9
--- /dev/null
+++ b/include/net/net_trackers.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_NET_TRACKERS_H
+#define __NET_NET_TRACKERS_H
+#include <linux/ref_tracker.h>
+
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+typedef struct ref_tracker *netdevice_tracker;
+#else
+typedef struct {} netdevice_tracker;
+#endif
+
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+typedef struct ref_tracker *netns_tracker;
+#else
+typedef struct {} netns_tracker;
+#endif
+
+#endif /* __NET_NET_TRACKERS_H */
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
new file mode 100644
index 000000000000..1ec408585373
--- /dev/null
+++ b/include/net/netdev_queues.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NET_QUEUES_H
+#define _LINUX_NET_QUEUES_H
+
+#include <linux/netdevice.h>
+
+/* See the netdev.yaml spec for definition of each statistic */
+struct netdev_queue_stats_rx {
+ u64 bytes;
+ u64 packets;
+ u64 alloc_fail;
+};
+
+struct netdev_queue_stats_tx {
+ u64 bytes;
+ u64 packets;
+};
+
+/**
+ * struct netdev_stat_ops - netdev ops for fine grained stats
+ * @get_queue_stats_rx: get stats for a given Rx queue
+ * @get_queue_stats_tx: get stats for a given Tx queue
+ * @get_base_stats: get base stats (not belonging to any live instance)
+ *
+ * Query stats for a given object. The values of the statistics are undefined
+ * on entry (specifically they are *not* zero-initialized). Drivers should
+ * assign values only to the statistics they collect. Statistics which are not
+ * collected must be left undefined.
+ *
+ * Queue objects are not necessarily persistent, and only currently active
+ * queues are queried by the per-queue callbacks. This means that per-queue
+ * statistics will not generally add up to the total number of events for
+ * the device. The @get_base_stats callback allows filling in the delta
+ * between events for currently live queues and overall device history.
+ * When the statistics for the entire device are queried, first @get_base_stats
+ * is issued to collect the delta, and then a series of per-queue callbacks.
+ * Only statistics which are set in @get_base_stats will be reported
+ * at the device level, meaning that unlike in queue callbacks, setting
+ * a statistic to zero in @get_base_stats is a legitimate thing to do.
+ * This is because @get_base_stats has a second function of designating which
+ * statistics are in fact correct for the entire device (e.g. when history
+ * for some of the events is not maintained, and reliable "total" cannot
+ * be provided).
+ *
+ * Device drivers can assume that when collecting total device stats,
+ * the @get_base_stats and subsequent per-queue calls are performed
+ * "atomically" (without releasing the rtnl_lock).
+ *
+ * Device drivers are encouraged to reset the per-queue statistics when
+ * number of queues change. This is because the primary use case for
+ * per-queue statistics is currently to detect traffic imbalance.
+ */
+struct netdev_stat_ops {
+ void (*get_queue_stats_rx)(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *stats);
+ void (*get_queue_stats_tx)(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *stats);
+ void (*get_base_stats)(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx);
+};
+
+/**
+ * DOC: Lockless queue stopping / waking helpers.
+ *
+ * The netif_txq_maybe_stop() and __netif_txq_completed_wake()
+ * macros are designed to safely implement stopping
+ * and waking netdev queues without full lock protection.
+ *
+ * We assume that there can be no concurrent stop attempts and no concurrent
+ * wake attempts. The try-stop should happen from the xmit handler,
+ * while wake up should be triggered from NAPI poll context.
+ * The two may run concurrently (single producer, single consumer).
+ *
+ * The try-stop side is expected to run from the xmit handler and therefore
+ * it does not reschedule Tx (netif_tx_start_queue() instead of
+ * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit
+ * handler may lead to xmit queue being enabled but not run.
+ * The waking side does not have similar context restrictions.
+ *
+ * The macros guarantee that rings will not remain stopped if there's
+ * space available, but they do *not* prevent false wake ups when
+ * the ring is full! Drivers should check for ring full at the start
+ * for the xmit handler.
+ *
+ * All descriptor ring indexes (and other relevant shared state) must
+ * be updated before invoking the macros.
+ */
+
+#define netif_txq_try_stop(txq, get_desc, start_thrs) \
+ ({ \
+ int _res; \
+ \
+ netif_tx_stop_queue(txq); \
+ /* Producer index and stop bit must be visible \
+ * to consumer before we recheck. \
+ * Pairs with a barrier in __netif_txq_completed_wake(). \
+ */ \
+ smp_mb__after_atomic(); \
+ \
+ /* We need to check again in a case another \
+ * CPU has just made room available. \
+ */ \
+ _res = 0; \
+ if (unlikely(get_desc >= start_thrs)) { \
+ netif_tx_start_queue(txq); \
+ _res = -1; \
+ } \
+ _res; \
+ }) \
+
+/**
+ * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed
+ * @txq: struct netdev_queue to stop/start
+ * @get_desc: get current number of free descriptors (see requirements below!)
+ * @stop_thrs: minimal number of available descriptors for queue to be left
+ * enabled
+ * @start_thrs: minimal number of descriptors to re-enable the queue, can be
+ * equal to @stop_thrs or higher to avoid frequent waking
+ *
+ * All arguments may be evaluated multiple times, beware of side effects.
+ * @get_desc must be a formula or a function call, it must always
+ * return up-to-date information when evaluated!
+ * Expected to be used from ndo_start_xmit, see the comment on top of the file.
+ *
+ * Returns:
+ * 0 if the queue was stopped
+ * 1 if the queue was left enabled
+ * -1 if the queue was re-enabled (raced with waking)
+ */
+#define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs) \
+ ({ \
+ int _res; \
+ \
+ _res = 1; \
+ if (unlikely(get_desc < stop_thrs)) \
+ _res = netif_txq_try_stop(txq, get_desc, start_thrs); \
+ _res; \
+ }) \
+
+/* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if
+ * @bytes != 0, regardless of kernel config.
+ */
+static inline void
+netdev_txq_completed_mb(struct netdev_queue *dev_queue,
+ unsigned int pkts, unsigned int bytes)
+{
+ if (IS_ENABLED(CONFIG_BQL))
+ netdev_tx_completed_queue(dev_queue, pkts, bytes);
+ else if (bytes)
+ smp_mb();
+}
+
+/**
+ * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed
+ * @txq: struct netdev_queue to stop/start
+ * @pkts: number of packets completed
+ * @bytes: number of bytes completed
+ * @get_desc: get current number of free descriptors (see requirements below!)
+ * @start_thrs: minimal number of descriptors to re-enable the queue
+ * @down_cond: down condition, predicate indicating that the queue should
+ * not be woken up even if descriptors are available
+ *
+ * All arguments may be evaluated multiple times.
+ * @get_desc must be a formula or a function call, it must always
+ * return up-to-date information when evaluated!
+ * Reports completed pkts/bytes to BQL.
+ *
+ * Returns:
+ * 0 if the queue was woken up
+ * 1 if the queue was already enabled (or disabled but @down_cond is true)
+ * -1 if the queue was left unchanged (@start_thrs not reached)
+ */
+#define __netif_txq_completed_wake(txq, pkts, bytes, \
+ get_desc, start_thrs, down_cond) \
+ ({ \
+ int _res; \
+ \
+ /* Report to BQL and piggy back on its barrier. \
+ * Barrier makes sure that anybody stopping the queue \
+ * after this point sees the new consumer index. \
+ * Pairs with barrier in netif_txq_try_stop(). \
+ */ \
+ netdev_txq_completed_mb(txq, pkts, bytes); \
+ \
+ _res = -1; \
+ if (pkts && likely(get_desc >= start_thrs)) { \
+ _res = 1; \
+ if (unlikely(netif_tx_queue_stopped(txq)) && \
+ !(down_cond)) { \
+ netif_tx_wake_queue(txq); \
+ _res = 0; \
+ } \
+ } \
+ _res; \
+ })
+
+#define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \
+ __netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false)
+
+/* subqueue variants follow */
+
+#define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs) \
+ ({ \
+ struct netdev_queue *txq; \
+ \
+ txq = netdev_get_tx_queue(dev, idx); \
+ netif_txq_try_stop(txq, get_desc, start_thrs); \
+ })
+
+#define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
+ ({ \
+ struct netdev_queue *txq; \
+ \
+ txq = netdev_get_tx_queue(dev, idx); \
+ netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
+ })
+
+#define netif_subqueue_completed_wake(dev, idx, pkts, bytes, \
+ get_desc, start_thrs) \
+ ({ \
+ struct netdev_queue *txq; \
+ \
+ txq = netdev_get_tx_queue(dev, idx); \
+ netif_txq_completed_wake(txq, pkts, bytes, \
+ get_desc, start_thrs); \
+ })
+
+#endif
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
new file mode 100644
index 000000000000..aa1716fb0e53
--- /dev/null
+++ b/include/net/netdev_rx_queue.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NETDEV_RX_QUEUE_H
+#define _LINUX_NETDEV_RX_QUEUE_H
+
+#include <linux/kobject.h>
+#include <linux/netdevice.h>
+#include <linux/sysfs.h>
+#include <net/xdp.h>
+
+/* This structure contains an instance of an RX queue. */
+struct netdev_rx_queue {
+ struct xdp_rxq_info xdp_rxq;
+#ifdef CONFIG_RPS
+ struct rps_map __rcu *rps_map;
+ struct rps_dev_flow_table __rcu *rps_flow_table;
+#endif
+ struct kobject kobj;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
+#ifdef CONFIG_XDP_SOCKETS
+ struct xsk_buff_pool *pool;
+#endif
+ /* NAPI instance for the queue
+ * Readers and writers must hold RTNL
+ */
+ struct napi_struct *napi;
+} ____cacheline_aligned_in_smp;
+
+/*
+ * RX queue sysfs structures and functions.
+ */
+struct rx_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
+ ssize_t (*store)(struct netdev_rx_queue *queue,
+ const char *buf, size_t len);
+};
+
+static inline struct netdev_rx_queue *
+__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
+{
+ return dev->_rx + rxq;
+}
+
+#ifdef CONFIG_SYSFS
+static inline unsigned int
+get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
+{
+ struct net_device *dev = queue->dev;
+ int index = queue - dev->_rx;
+
+ BUG_ON(index >= dev->num_rx_queues);
+ return index;
+}
+#endif
+#endif
diff --git a/include/net/netevent.h b/include/net/netevent.h
index 4107016c3bb4..1be3757a8b7f 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -14,6 +14,7 @@
struct dst_entry;
struct neighbour;
+struct notifier_block ;
struct netevent_redirect {
struct dst_entry *old;
diff --git a/include/net/netfilter/ipv4/nf_defrag_ipv4.h b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
index bcbd724cc048..7fda9ce9f694 100644
--- a/include/net/netfilter/ipv4/nf_defrag_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
@@ -3,6 +3,7 @@
#define _NF_DEFRAG_IPV4_H
struct net;
-int nf_defrag_ipv4_enable(struct net *);
+int nf_defrag_ipv4_enable(struct net *net);
+void nf_defrag_ipv4_disable(struct net *net);
#endif /* _NF_DEFRAG_IPV4_H */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 40e0e0623f46..c653fcb88354 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -8,8 +8,8 @@
#include <net/netfilter/nf_reject.h>
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
-void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook);
-
+void nf_send_reset(struct net *net, struct sock *, struct sk_buff *oldskb,
+ int hook);
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook);
struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
@@ -18,4 +18,14 @@ struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth);
+struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code);
+struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook);
+
+
#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 7b3c873f8839..e95483192d1b 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -4,7 +4,4 @@
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
-#include <linux/sysctl.h>
-extern struct ctl_table nf_ct_ipv6_sysctl_table[];
-
#endif /* _NF_CONNTRACK_IPV6_H*/
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 6d31cd041143..ceadf8ba25a4 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -5,7 +5,8 @@
#include <linux/skbuff.h>
#include <linux/types.h>
-int nf_defrag_ipv6_enable(struct net *);
+int nf_defrag_ipv6_enable(struct net *net);
+void nf_defrag_ipv6_disable(struct net *net);
int nf_ct_frag6_init(void);
void nf_ct_frag6_cleanup(void);
@@ -13,4 +14,9 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
struct inet_frags_ctl;
+struct nft_ct_frag6_pernet {
+ struct ctl_table_header *nf_frag_frags_hdr;
+ struct fqdir *fqdir;
+};
+
#endif /* _NF_DEFRAG_IPV6_H */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 4a3ef9ebdf6f..d729344ba644 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -7,9 +7,8 @@
void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
unsigned int hooknum);
-
-void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
-
+void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ int hook);
const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *otcph,
unsigned int *otcplen, int hook);
@@ -20,4 +19,13 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
const struct tcphdr *oth, unsigned int otcplen);
+struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook);
+struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code);
+
#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/net/netfilter/nf_bpf_link.h b/include/net/netfilter/nf_bpf_link.h
new file mode 100644
index 000000000000..6c984b0ea838
--- /dev/null
+++ b/include/net/netfilter/nf_bpf_link.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+struct bpf_nf_ctx {
+ const struct nf_hook_state *state;
+ struct sk_buff *skb;
+};
+
+#if IS_ENABLED(CONFIG_NETFILTER_BPF_LINK)
+int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+#else
+static inline int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+#endif
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 439379ca9ffa..cba3ccf03fcc 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -43,10 +43,27 @@ union nf_conntrack_expect_proto {
/* insert expect proto private data here */
};
+struct nf_conntrack_net_ecache {
+ struct delayed_work dwork;
+ spinlock_t dying_lock;
+ struct hlist_nulls_head dying_list;
+};
+
struct nf_conntrack_net {
+ /* only used when new connection is allocated: */
+ atomic_t count;
+ unsigned int expect_count;
+
+ /* only used from work queues, configuration plane, and so on: */
unsigned int users4;
unsigned int users6;
unsigned int users_bridge;
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_header;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ struct nf_conntrack_net_ecache ecache;
+#endif
};
#include <linux/types.h>
@@ -62,6 +79,8 @@ struct nf_conn {
* Hint, SKB address this struct and refcnt via skb->_nfct and
* helpers nf_conntrack_get() and nf_conntrack_put().
* Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
+ * except that the latter uses internal indirection and does not
+ * result in a conntrack module dependency.
* beware nf_ct_get() is different and don't inc refcnt.
*/
struct nf_conntrack ct_general;
@@ -80,7 +99,6 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
- u16 cpu;
possible_net_t ct_net;
#if IS_ENABLED(CONFIG_NF_NAT)
@@ -108,6 +126,12 @@ struct nf_conn {
};
static inline struct nf_conn *
+nf_ct_to_nf_conn(const struct nf_conntrack *nfct)
+{
+ return container_of(nfct, struct nf_conn, ct_general);
+}
+
+static inline struct nf_conn *
nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
{
return container_of(hash, struct nf_conn,
@@ -136,10 +160,6 @@ static inline struct net *nf_ct_net(const struct nf_conn *ct)
return read_pnet(&ct->ct_net);
}
-/* Alter reply tuple (maybe alter helper). */
-void nf_conntrack_alter_reply(struct nf_conn *ct,
- const struct nf_conntrack_tuple *newreply);
-
/* Is this tuple taken? (ignoring any belonging to the given
conntrack). */
int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
@@ -155,17 +175,17 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
return (struct nf_conn *)(nfct & NFCT_PTRMASK);
}
+void nf_ct_destroy(struct nf_conntrack *nfct);
+
+void nf_conntrack_tcp_set_closing(struct nf_conn *ct);
+
/* decrement reference count on a conntrack */
static inline void nf_ct_put(struct nf_conn *ct)
{
- WARN_ON(!ct);
- nf_conntrack_put(&ct->ct_general);
+ if (ct && refcount_dec_and_test(&ct->ct_general.use))
+ nf_ct_destroy(&ct->ct_general);
}
-/* Protocol module loading */
-int nf_ct_l3proto_try_module_get(unsigned short l3proto);
-void nf_ct_l3proto_module_put(unsigned short l3proto);
-
/* load module; enable/disable conntrack in this namespace */
int nf_ct_netns_get(struct net *net, u8 nfproto);
void nf_ct_netns_put(struct net *net, u8 nfproto);
@@ -214,13 +234,16 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
return nf_ct_delete(ct, 0, 0);
}
-/* Set all unconfirmed conntrack as dying */
-void nf_ct_unconfirmed_destroy(struct net *);
+struct nf_ct_iter_data {
+ struct net *net;
+ void *data;
+ u32 portid;
+ int report;
+};
/* Iterate over all conntracks: if iter returns true, it's deleted. */
-void nf_ct_iterate_cleanup_net(struct net *net,
- int (*iter)(struct nf_conn *i, void *data),
- void *data, u32 portid, int report);
+void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
+ const struct nf_ct_iter_data *iter_data);
/* also set unconfirmed conntracks as dying. Only use in module exit path. */
void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
@@ -257,19 +280,29 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
}
+static inline void nf_conntrack_alter_reply(struct nf_conn *ct,
+ const struct nf_conntrack_tuple *newreply)
+{
+ /* Must be unconfirmed, so not in hash table yet */
+ if (WARN_ON(nf_ct_is_confirmed(ct)))
+ return;
+
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+}
+
#define nfct_time_stamp ((u32)(jiffies))
/* jiffies until ct expires, 0 if already expired */
static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
{
- s32 timeout = ct->timeout - nfct_time_stamp;
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
- return timeout > 0 ? timeout : 0;
+ return max(timeout, 0);
}
static inline bool nf_ct_is_expired(const struct nf_conn *ct)
{
- return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+ return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
}
/* use after obtaining a reference count */
@@ -288,7 +321,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
static inline void nf_ct_offload_timeout(struct nf_conn *ct)
{
if (nf_ct_expires(ct) < NF_CT_DAY / 2)
- ct->timeout = nfct_time_stamp + NF_CT_DAY;
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
}
struct kernel_param;
@@ -324,6 +357,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
void nf_ct_tmpl_free(struct nf_conn *tmpl);
u32 nf_ct_get_id(const struct nf_conn *ct);
+u32 nf_conntrack_count(const struct net *net);
static inline void
nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
@@ -331,6 +365,17 @@ nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
skb_set_nfct(skb, (unsigned long)ct | info);
}
+extern unsigned int nf_conntrack_net_id;
+
+static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net)
+{
+ return net_generic(net, nf_conntrack_net_id);
+}
+
+int nf_ct_skb_network_trim(struct sk_buff *skb, int family);
+int nf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
+ u16 zone, u8 family, u8 *proto, u16 *mru);
+
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 7f44a771530e..a120685cac93 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -78,7 +78,4 @@ static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir,
void nf_conntrack_acct_pernet_init(struct net *net);
-int nf_conntrack_acct_init(void);
-void nf_conntrack_acct_fini(void);
-
#endif /* _NF_CONNTRACK_ACCT_H */
diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
new file mode 100644
index 000000000000..e5f2f0b73a9a
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_act_ct.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NF_CONNTRACK_ACT_CT_H
+#define _NF_CONNTRACK_ACT_CT_H
+
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_act_ct_ext {
+ int ifindex[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ return nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+#else
+ return NULL;
+#endif
+}
+
+static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct_ext;
+
+ act_ct_ext = nf_conn_act_ct_ext_find(ct);
+ if (dev_net(skb->dev) == &init_net && act_ct_ext)
+ act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+#endif
+}
+
+static inline struct
+nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+
+ if (act_ct)
+ return act_ct;
+
+ act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+ return act_ct;
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _NF_CONNTRACK_ACT_CT_H */
diff --git a/include/net/netfilter/nf_conntrack_bpf.h b/include/net/netfilter/nf_conntrack_bpf.h
new file mode 100644
index 000000000000..2d0da478c8e0
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_bpf.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NF_CONNTRACK_BPF_H
+#define _NF_CONNTRACK_BPF_H
+
+#include <linux/kconfig.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct nf_conn___init {
+ struct nf_conn ct;
+};
+
+#if (IS_BUILTIN(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+
+extern int register_nf_conntrack_bpf(void);
+extern void cleanup_nf_conntrack_bpf(void);
+
+#else
+
+static inline int register_nf_conntrack_bpf(void)
+{
+ return 0;
+}
+
+static inline void cleanup_nf_conntrack_bpf(void)
+{
+}
+
+#endif
+
+#if (IS_BUILTIN(CONFIG_NF_NAT) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_NAT) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+
+extern int register_nf_nat_bpf(void);
+
+#else
+
+static inline int register_nf_nat_bpf(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _NF_CONNTRACK_BPF_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 09f2efea0b97..3384859a8921 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -30,7 +30,6 @@ void nf_conntrack_cleanup_net(struct net *net);
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
void nf_conntrack_proto_pernet_init(struct net *net);
-void nf_conntrack_proto_pernet_fini(struct net *net);
int nf_conntrack_proto_init(void);
void nf_conntrack_proto_fini(void);
@@ -59,16 +58,20 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
int ret = NF_ACCEPT;
if (ct) {
- if (!nf_ct_is_confirmed(ct))
+ if (!nf_ct_is_confirmed(ct)) {
ret = __nf_conntrack_confirm(skb);
- if (likely(ret == NF_ACCEPT))
+
+ if (ret == NF_ACCEPT)
+ ct = (struct nf_conn *)skb_nfct(skb);
+ }
+
+ if (ret == NF_ACCEPT && nf_ct_ecache_exist(ct))
nf_ct_deliver_cached_events(ct);
}
return ret;
}
-unsigned int nf_confirm(struct sk_buff *skb, unsigned int protoff,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo);
+unsigned int nf_confirm(void *priv, struct sk_buff *skb, const struct nf_hook_state *state);
void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l4proto *proto);
@@ -80,4 +83,21 @@ void nf_conntrack_lock(spinlock_t *lock);
extern spinlock_t nf_conntrack_expect_lock;
+/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */
+
+static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout)
+{
+ if (timeout > INT_MAX)
+ timeout = INT_MAX;
+
+ if (nf_ct_is_confirmed(ct))
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
+ else
+ ct->timeout = (u32)timeout;
+}
+
+int __nf_ct_change_timeout(struct nf_conn *ct, u64 cta_timeout);
+void __nf_ct_change_status(struct nf_conn *ct, unsigned long on, unsigned long off);
+int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status);
+
#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 9645b47fa7e4..e227d997fc71 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -10,6 +10,7 @@ struct nf_conncount_data;
struct nf_conncount_list {
spinlock_t list_lock;
+ u32 last_gc; /* jiffies at most recent gc */
struct list_head head; /* connections with the same filtering key */
unsigned int count; /* length of list */
};
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index eb81f9195e28..0c1dac318e02 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -14,17 +14,15 @@
#include <net/netfilter/nf_conntrack_extend.h>
enum nf_ct_ecache_state {
- NFCT_ECACHE_UNKNOWN, /* destroy event not sent */
NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */
};
struct nf_conntrack_ecache {
unsigned long cache; /* bitops want long */
- u16 missed; /* missed events */
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
- enum nf_ct_ecache_state state:8;/* ecache state */
+ u32 missed; /* missed events */
u32 portid; /* netlink portid of destroyer */
};
@@ -38,28 +36,12 @@ nf_ct_ecache_find(const struct nf_conn *ct)
#endif
}
-static inline struct nf_conntrack_ecache *
-nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
+static inline bool nf_ct_ecache_exist(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct net *net = nf_ct_net(ct);
- struct nf_conntrack_ecache *e;
-
- if (!ctmask && !expmask && net->ct.sysctl_events) {
- ctmask = ~0;
- expmask = ~0;
- }
- if (!ctmask && !expmask)
- return NULL;
-
- e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
- if (e) {
- e->ctmask = ctmask;
- e->expmask = expmask;
- }
- return e;
+ return nf_ct_ext_exist(ct, NF_CT_EXT_ECACHE);
#else
- return NULL;
+ return false;
#endif
}
@@ -72,19 +54,26 @@ struct nf_ct_event {
int report;
};
+struct nf_exp_event {
+ struct nf_conntrack_expect *exp;
+ u32 portid;
+ int report;
+};
+
struct nf_ct_event_notifier {
- int (*fcn)(unsigned int events, struct nf_ct_event *item);
+ int (*ct_event)(unsigned int events, const struct nf_ct_event *item);
+ int (*exp_event)(unsigned int events, const struct nf_exp_event *item);
};
-int nf_conntrack_register_notifier(struct net *net,
- struct nf_ct_event_notifier *nb);
-void nf_conntrack_unregister_notifier(struct net *net,
- struct nf_ct_event_notifier *nb);
+void nf_conntrack_register_notifier(struct net *net,
+ const struct nf_ct_event_notifier *nb);
+void nf_conntrack_unregister_notifier(struct net *net);
void nf_ct_deliver_cached_events(struct nf_conn *ct);
int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
u32 portid, int report);
+bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp);
#else
static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct)
@@ -99,6 +88,10 @@ static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
return 0;
}
+static inline bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
+{
+ return false;
+}
#endif
static inline void
@@ -124,59 +117,38 @@ nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
u32 portid, int report)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- const struct net *net = nf_ct_net(ct);
-
- if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
- return 0;
-
- return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
-#else
- return 0;
+ if (nf_ct_ecache_exist(ct))
+ return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
#endif
+ return 0;
}
static inline int
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- const struct net *net = nf_ct_net(ct);
-
- if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
- return 0;
-
- return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
-#else
- return 0;
+ if (nf_ct_ecache_exist(ct))
+ return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
#endif
+ return 0;
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
-
-struct nf_exp_event {
- struct nf_conntrack_expect *exp;
- u32 portid;
- int report;
-};
-
-struct nf_exp_event_notifier {
- int (*fcn)(unsigned int events, struct nf_exp_event *item);
-};
-
-int nf_ct_expect_register_notifier(struct net *net,
- struct nf_exp_event_notifier *nb);
-void nf_ct_expect_unregister_notifier(struct net *net,
- struct nf_exp_event_notifier *nb);
-
void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
struct nf_conntrack_expect *exp,
u32 portid, int report);
+void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state);
+
void nf_conntrack_ecache_pernet_init(struct net *net);
void nf_conntrack_ecache_pernet_fini(struct net *net);
-int nf_conntrack_ecache_init(void);
-void nf_conntrack_ecache_fini(void);
+struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net);
+static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net)
+{
+ return net->ct.ecache_dwork_pending;
+}
#else /* CONFIG_NF_CONNTRACK_EVENTS */
static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
@@ -186,43 +158,18 @@ static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
{
}
-static inline void nf_conntrack_ecache_pernet_init(struct net *net)
+static inline void nf_conntrack_ecache_work(struct net *net,
+ enum nf_ct_ecache_state s)
{
}
-static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
+static inline void nf_conntrack_ecache_pernet_init(struct net *net)
{
}
-static inline int nf_conntrack_ecache_init(void)
-{
- return 0;
-}
-
-static inline void nf_conntrack_ecache_fini(void)
+static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
{
}
-
+static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net) { return false; }
#endif /* CONFIG_NF_CONNTRACK_EVENTS */
-
-static inline void nf_conntrack_ecache_delayed_work(struct net *net)
-{
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- if (!delayed_work_pending(&net->ct.ecache_dwork)) {
- schedule_delayed_work(&net->ct.ecache_dwork, HZ);
- net->ct.ecache_dwork_pending = true;
- }
-#endif
-}
-
-static inline void nf_conntrack_ecache_work(struct net *net)
-{
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- if (net->ct.ecache_dwork_pending) {
- net->ct.ecache_dwork_pending = false;
- mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0);
- }
-#endif
-}
-
#endif /*_NF_CONNTRACK_ECACHE_H*/
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 0855b60fba17..165e7a03b8e9 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -26,6 +26,15 @@ struct nf_conntrack_expect {
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_mask mask;
+ /* Usage count. */
+ refcount_t use;
+
+ /* Flags */
+ unsigned int flags;
+
+ /* Expectation class */
+ unsigned int class;
+
/* Function to call after setup and insertion */
void (*expectfn)(struct nf_conn *new,
struct nf_conntrack_expect *this);
@@ -39,15 +48,6 @@ struct nf_conntrack_expect {
/* Timer function; deletes the expectation. */
struct timer_list timeout;
- /* Usage count. */
- refcount_t use;
-
- /* Flags */
- unsigned int flags;
-
- /* Expectation class */
- unsigned int class;
-
#if IS_ENABLED(CONFIG_NF_NAT)
union nf_inet_addr saved_addr;
/* This is the original per-proto part, used to map the
@@ -100,7 +100,7 @@ nf_ct_expect_find_get(struct net *net,
struct nf_conntrack_expect *
nf_ct_find_expectation(struct net *net,
const struct nf_conntrack_zone *zone,
- const struct nf_conntrack_tuple *tuple);
+ const struct nf_conntrack_tuple *tuple, bool unlink);
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
u32 portid, int report);
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index e1e588387103..0b247248b032 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -28,24 +28,18 @@ enum nf_ct_ext_id {
#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
NF_CT_EXT_SYNPROXY,
#endif
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ NF_CT_EXT_ACT_CT,
+#endif
NF_CT_EXT_NUM,
};
-#define NF_CT_EXT_HELPER_TYPE struct nf_conn_help
-#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
-#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj
-#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct
-#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
-#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
-#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
-#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
-#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
-
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
u8 offset[NF_CT_EXT_NUM];
u8 len;
- char data[];
+ unsigned int gen_id;
+ char data[] __aligned(8);
};
static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id)
@@ -58,33 +52,28 @@ static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
return (ct->ext && __nf_ct_ext_exist(ct->ext, id));
}
-static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
+void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id);
+
+static inline void *nf_ct_ext_find(const struct nf_conn *ct, u8 id)
{
- if (!nf_ct_ext_exist(ct, id))
+ struct nf_ct_ext *ext = ct->ext;
+
+ if (!ext || !__nf_ct_ext_exist(ext, id))
return NULL;
+ if (unlikely(ext->gen_id))
+ return __nf_ct_ext_find(ext, id);
+
return (void *)ct->ext + ct->ext->offset[id];
}
-#define nf_ct_ext_find(ext, id) \
- ((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
-
-/* Destroy all relationships */
-void nf_ct_ext_destroy(struct nf_conn *ct);
/* Add this type, returns pointer to data or NULL. */
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
-struct nf_ct_ext_type {
- /* Destroys relationships (can be NULL). */
- void (*destroy)(struct nf_conn *ct);
-
- enum nf_ct_ext_id id;
-
- /* Length and min alignment. */
- u8 len;
- u8 align;
-};
+/* ext genid. if ext->id != ext_genid, extensions cannot be used
+ * anymore unless conntrack has CONFIRMED bit set.
+ */
+extern atomic_t nf_conntrack_ext_genid;
+void nf_ct_ext_bump_genid(void);
-int nf_ct_extend_register(const struct nf_ct_ext_type *type);
-void nf_ct_extend_unregister(const struct nf_ct_ext_type *type);
#endif /* _NF_CONNTRACK_EXTEND_H */
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 37f0fbefb060..de2f956abf34 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -115,6 +115,11 @@ struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags);
+int nf_ct_helper(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, u16 proto);
+int nf_ct_add_helper(struct nf_conn *ct, const char *name, u8 family,
+ u8 proto, bool nat, struct nf_conntrack_helper **hp);
+
void nf_ct_helper_destroy(struct nf_conn *ct);
static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
@@ -131,8 +136,6 @@ static inline void *nfct_help_data(const struct nf_conn *ct)
return (void *)help->data;
}
-void nf_conntrack_helper_pernet_init(struct net *net);
-
int nf_conntrack_helper_init(void);
void nf_conntrack_helper_fini(void);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 88186b95b3c2..1f47bef51722 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -32,7 +32,7 @@ struct nf_conntrack_l4proto {
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
- struct nf_conn *ct);
+ struct nf_conn *ct, bool destroy);
/* convert nfnetlink attributes to protoinfo */
int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
@@ -159,22 +159,26 @@ unsigned int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
-__printf(3, 4) __cold
+__printf(4, 5) __cold
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
+ const struct nf_hook_state *state,
const char *fmt, ...);
-__printf(5, 6) __cold
+__printf(4, 5) __cold
void nf_l4proto_log_invalid(const struct sk_buff *skb,
- struct net *net,
- u16 pf, u8 protonum,
+ const struct nf_hook_state *state,
+ u8 protonum,
const char *fmt, ...);
#else
-static inline __printf(5, 6) __cold
-void nf_l4proto_log_invalid(const struct sk_buff *skb, struct net *net,
- u16 pf, u8 protonum, const char *fmt, ...) {}
-static inline __printf(3, 4) __cold
+static inline __printf(4, 5) __cold
+void nf_l4proto_log_invalid(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ u8 protonum,
+ const char *fmt, ...) {}
+static inline __printf(4, 5) __cold
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
+ const struct nf_hook_state *state,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
@@ -203,6 +207,20 @@ static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.icmpv6;
}
+
+/* Caller must check nf_ct_protonum(ct) is IPPROTO_TCP before calling. */
+static inline void nf_ct_set_tcp_be_liberal(struct nf_conn *ct)
+{
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+}
+
+/* Caller must check nf_ct_protonum(ct) is IPPROTO_TCP before calling. */
+static inline bool nf_conntrack_tcp_established(const struct nf_conn *ct)
+{
+ return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
+ test_bit(IPS_ASSURED_BIT, &ct->status);
+}
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index ba916411c4e1..6903f72bcc15 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -17,10 +17,18 @@ struct nf_conn_labels {
unsigned long bits[NF_CT_LABELS_MAX_SIZE / sizeof(long)];
};
+/* Can't use nf_ct_ext_find(), flow dissector cannot use symbols
+ * exported by nf_conntrack module.
+ */
static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_LABELS
- return nf_ct_ext_find(ct, NF_CT_EXT_LABELS);
+ struct nf_ct_ext *ext = ct->ext;
+
+ if (!ext || !__nf_ct_ext_exist(ext, NF_CT_EXT_LABELS))
+ return NULL;
+
+ return (void *)ct->ext + ct->ext->offset[NF_CT_EXT_LABELS];
#else
return NULL;
#endif
@@ -31,7 +39,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
#ifdef CONFIG_NF_CONNTRACK_LABELS
struct net *net = nf_ct_net(ct);
- if (net->ct.labels_used == 0)
+ if (atomic_read(&net->ct.labels_used) == 0)
return NULL;
return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC);
@@ -44,13 +52,9 @@ int nf_connlabels_replace(struct nf_conn *ct,
const u32 *data, const u32 *mask, unsigned int words);
#ifdef CONFIG_NF_CONNTRACK_LABELS
-int nf_conntrack_labels_init(void);
-void nf_conntrack_labels_fini(void);
int nf_connlabels_get(struct net *net, unsigned int bit);
void nf_connlabels_put(struct net *net);
#else
-static inline int nf_conntrack_labels_init(void) { return 0; }
-static inline void nf_conntrack_labels_fini(void) {}
static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; }
static inline void nf_connlabels_put(struct net *net) {}
#endif
diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h
index 0a10b50537ae..883c414b768e 100644
--- a/include/net/netfilter/nf_conntrack_seqadj.h
+++ b/include/net/netfilter/nf_conntrack_seqadj.h
@@ -42,7 +42,4 @@ int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, unsigned int protoff);
s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq);
-int nf_conntrack_seqadj_init(void);
-void nf_conntrack_seqadj_fini(void);
-
#endif /* _NF_CONNTRACK_SEQADJ_H */
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 659b0ea25b4d..9fdaba911de6 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -17,14 +17,6 @@ struct nf_ct_timeout {
char data[];
};
-struct ctnl_timeout {
- struct list_head head;
- struct rcu_head rcu_head;
- refcount_t refcnt;
- char name[CTNL_TIMEOUT_NAME_MAX];
- struct nf_ct_timeout timeout;
-};
-
struct nf_conn_timeout {
struct nf_ct_timeout __rcu *timeout;
};
@@ -89,23 +81,11 @@ static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-int nf_conntrack_timeout_init(void);
-void nf_conntrack_timeout_fini(void);
void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout);
int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, u8 l3num, u8 l4num,
const char *timeout_name);
void nf_ct_destroy_timeout(struct nf_conn *ct);
#else
-static inline int nf_conntrack_timeout_init(void)
-{
- return 0;
-}
-
-static inline void nf_conntrack_timeout_fini(void)
-{
- return;
-}
-
static inline int nf_ct_set_timeout(struct net *net, struct nf_conn *ct,
u8 l3num, u8 l4num,
const char *timeout_name)
@@ -120,8 +100,12 @@ static inline void nf_ct_destroy_timeout(struct nf_conn *ct)
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
-extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
+struct nf_ct_timeout_hooks {
+ struct nf_ct_timeout *(*timeout_find_get)(struct net *net, const char *name);
+ void (*timeout_put)(struct nf_ct_timeout *timeout);
+};
+
+extern const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook;
#endif
#endif /* _NF_CONNTRACK_TIMEOUT_H */
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
index 820ea34b6029..57138d974a9f 100644
--- a/include/net/netfilter/nf_conntrack_timestamp.h
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -40,21 +40,8 @@ struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
void nf_conntrack_tstamp_pernet_init(struct net *net);
-
-int nf_conntrack_tstamp_init(void);
-void nf_conntrack_tstamp_fini(void);
#else
static inline void nf_conntrack_tstamp_pernet_init(struct net *net) {}
-
-static inline int nf_conntrack_tstamp_init(void)
-{
- return 0;
-}
-
-static inline void nf_conntrack_tstamp_fini(void)
-{
- return;
-}
#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
#endif /* _NF_CONNTRACK_TSTAMP_H */
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 9334371c94e2..f7dd950ff250 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -67,6 +67,9 @@ struct nf_conntrack_tuple {
/* The protocol. */
u_int8_t protonum;
+ /* The direction must be ignored for the tuplehash */
+ struct { } __nfct_hash_offsetend;
+
/* The direction (for tuplehash) */
u_int8_t dir;
} dst;
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 16e8b2f8d006..a763dd327c6e 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -10,6 +10,8 @@
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/flow_offload.h>
#include <net/dst.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
struct nf_flowtable;
struct nf_flow_rule;
@@ -21,6 +23,8 @@ struct nf_flow_key {
struct flow_dissector_key_control control;
struct flow_dissector_key_control enc_control;
struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
union {
struct flow_dissector_key_ipv4_addrs ipv4;
struct flow_dissector_key_ipv6_addrs ipv6;
@@ -49,14 +53,17 @@ struct nf_flowtable_type {
struct list_head list;
int family;
int (*init)(struct nf_flowtable *ft);
+ bool (*gc)(const struct flow_offload *flow);
int (*setup)(struct nf_flowtable *ft,
struct net_device *dev,
enum flow_block_command cmd);
int (*action)(struct net *net,
- const struct flow_offload *flow,
+ struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
+ void (*get)(struct nf_flowtable *ft);
+ void (*put)(struct nf_flowtable *ft);
nf_hookfn *hook;
struct module *owner;
};
@@ -67,12 +74,13 @@ enum nf_flowtable_flags {
};
struct nf_flowtable {
- struct list_head list;
- struct rhashtable rhashtable;
- int priority;
+ unsigned int flags; /* readonly in datapath */
+ int priority; /* control path (padding hole) */
+ struct rhashtable rhashtable; /* datapath, read-mostly members come first */
+
+ struct list_head list; /* slowpath parts */
const struct nf_flowtable_type *type;
struct delayed_work gc_work;
- unsigned int flags;
struct flow_block flow_block;
struct rw_semaphore flow_block_lock; /* Guards flow_block */
possible_net_t net;
@@ -86,8 +94,18 @@ static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
enum flow_offload_tuple_dir {
FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
- FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
};
+#define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX
+
+enum flow_offload_xmit_type {
+ FLOW_OFFLOAD_XMIT_UNSPEC = 0,
+ FLOW_OFFLOAD_XMIT_NEIGH,
+ FLOW_OFFLOAD_XMIT_XFRM,
+ FLOW_OFFLOAD_XMIT_DIRECT,
+ FLOW_OFFLOAD_XMIT_TC,
+};
+
+#define NF_FLOW_TABLE_ENCAP_MAX 2
struct flow_offload_tuple {
union {
@@ -107,11 +125,34 @@ struct flow_offload_tuple {
u8 l3proto;
u8 l4proto;
- u8 dir;
+ struct {
+ u16 id;
+ __be16 proto;
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
- u16 mtu;
+ /* All members above are keys for lookups, see flow_offload_hash(). */
+ struct { } __hash;
- struct dst_entry *dst_cache;
+ u8 dir:2,
+ xmit_type:3,
+ encap_num:2,
+ in_vlan_ingress:2;
+ u16 mtu;
+ union {
+ struct {
+ struct dst_entry *dst_cache;
+ u32 dst_cookie;
+ };
+ struct {
+ u32 ifidx;
+ u32 hw_ifidx;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+ } out;
+ struct {
+ u32 iifidx;
+ } tc;
+ };
};
struct flow_offload_tuple_rhash {
@@ -126,8 +167,9 @@ enum nf_flow_flags {
NF_FLOW_HW,
NF_FLOW_HW_DYING,
NF_FLOW_HW_DEAD,
- NF_FLOW_HW_REFRESH,
NF_FLOW_HW_PENDING,
+ NF_FLOW_HW_BIDIRECTIONAL,
+ NF_FLOW_HW_ESTABLISHED,
};
enum flow_offload_type {
@@ -147,6 +189,8 @@ struct flow_offload {
#define NF_FLOW_TIMEOUT (30 * HZ)
#define nf_flowtable_time_stamp (u32)jiffies
+unsigned long flow_offload_get_timeout(struct flow_offload *flow);
+
static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
{
return (__s32)(timeout - nf_flowtable_time_stamp);
@@ -154,7 +198,23 @@ static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
struct nf_flow_route {
struct {
- struct dst_entry *dst;
+ struct dst_entry *dst;
+ struct {
+ u32 ifindex;
+ struct {
+ u16 id;
+ __be16 proto;
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
+ u8 num_encaps:2,
+ ingress_vlans:2;
+ } in;
+ struct {
+ u32 ifindex;
+ u32 hw_ifindex;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+ } out;
+ enum flow_offload_xmit_type xmit_type;
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
@@ -183,6 +243,11 @@ nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
}
list_add_tail(&block_cb->list, &block->cb_list);
+ up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->get)
+ flow_table->type->get(flow_table);
+ return 0;
unlock:
up_write(&flow_table->flow_block_lock);
@@ -205,17 +270,21 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
WARN_ON(true);
}
up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->put)
+ flow_table->type->put(flow_table);
}
-int flow_offload_route_init(struct flow_offload *flow,
- const struct nf_flow_route *route);
+void flow_offload_route_init(struct flow_offload *flow,
+ struct nf_flow_route *route);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
void flow_offload_refresh(struct nf_flowtable *flow_table,
- struct flow_offload *flow);
+ struct flow_offload *flow, bool force);
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
+void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
struct net_device *dev);
void nf_flow_table_cleanup(struct net_device *dev);
@@ -225,12 +294,12 @@ void nf_flow_table_free(struct nf_flowtable *flow_table);
void flow_offload_teardown(struct flow_offload *flow);
-int nf_flow_snat_port(const struct flow_offload *flow,
- struct sk_buff *skb, unsigned int thoff,
- u8 protocol, enum flow_offload_tuple_dir dir);
-int nf_flow_dnat_port(const struct flow_offload *flow,
- struct sk_buff *skb, unsigned int thoff,
- u8 protocol, enum flow_offload_tuple_dir dir);
+void nf_flow_snat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
+void nf_flow_dnat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
struct flow_ports {
__be16 source, dest;
@@ -252,17 +321,56 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
struct flow_offload *flow);
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
+void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
+
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd);
-int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
-int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
+int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
int nf_flow_table_offload_init(void);
void nf_flow_table_offload_exit(void);
+static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+{
+ __be16 proto;
+
+ proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+ sizeof(struct pppoe_hdr)));
+ switch (proto) {
+ case htons(PPP_IP):
+ return htons(ETH_P_IP);
+ case htons(PPP_IPV6):
+ return htons(ETH_P_IPV6);
+ }
+
+ return 0;
+}
+
+#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
+#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
+#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
+ this_cpu_inc((net)->ft.stat->count)
+#define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count) \
+ this_cpu_dec((net)->ft.stat->count)
+
+#ifdef CONFIG_NF_FLOW_TABLE_PROCFS
+int nf_flow_table_init_proc(struct net *net);
+void nf_flow_table_fini_proc(struct net *net);
+#else
+static inline int nf_flow_table_init_proc(struct net *net)
+{
+ return 0;
+}
+
+static inline void nf_flow_table_fini_proc(struct net *net)
+{
+}
+#endif /* CONFIG_NF_FLOW_TABLE_PROCFS */
+
#endif /* _NF_FLOW_TABLE_H */
diff --git a/include/net/netfilter/nf_hooks_lwtunnel.h b/include/net/netfilter/nf_hooks_lwtunnel.h
new file mode 100644
index 000000000000..52e27920f829
--- /dev/null
+++ b/include/net/netfilter/nf_hooks_lwtunnel.h
@@ -0,0 +1,7 @@
+#include <linux/sysctl.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_SYSCTL
+int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+#endif
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 0d3920896d50..e55eedc84ed7 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -68,7 +68,6 @@ void nf_log_unbind_pf(struct net *net, u_int8_t pf);
int nf_logger_find_get(int pf, enum nf_log_type type);
void nf_logger_put(int pf, enum nf_log_type type);
-void nf_logger_request_module(int pf, enum nf_log_type type);
#define MODULE_ALIAS_NF_LOGGER(family, type) \
MODULE_ALIAS("nf-logger-" __stringify(family) "-" __stringify(type))
@@ -99,27 +98,4 @@ struct nf_log_buf;
struct nf_log_buf *nf_log_buf_open(void);
__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...);
void nf_log_buf_close(struct nf_log_buf *m);
-
-/* common logging functions */
-int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
- u8 proto, int fragment, unsigned int offset);
-int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
- u8 proto, int fragment, unsigned int offset,
- unsigned int logflags);
-void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
- struct sock *sk);
-void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
- unsigned int hooknum, const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct nf_loginfo *loginfo,
- const char *prefix);
-void nf_log_l2packet(struct net *net, u_int8_t pf,
- __be16 protocol,
- unsigned int hooknum,
- const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct nf_loginfo *loginfo, const char *prefix);
-
#endif /* _NF_LOG_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 0d412dd63707..9877f064548a 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -104,9 +104,11 @@ unsigned int
nf_nat_inet_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
-int nf_xfrm_me_harder(struct net *n, struct sk_buff *s, unsigned int family);
+int nf_ct_nat(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, int *action,
+ const struct nf_nat_range2 *range, bool commit);
-static inline int nf_nat_initialized(struct nf_conn *ct,
+static inline int nf_nat_initialized(const struct nf_conn *ct,
enum nf_nat_manip_type manip)
{
if (manip == NF_NAT_MANIP_SRC)
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index efae84646353..44c421b9be85 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -38,4 +38,5 @@ bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
* to port ct->master->saved_proto. */
void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
+u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port);
#endif
diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h
index 2418653a66db..279380de904c 100644
--- a/include/net/netfilter/nf_nat_redirect.h
+++ b/include/net/netfilter/nf_nat_redirect.h
@@ -6,8 +6,7 @@
#include <uapi/linux/netfilter/nf_nat.h>
unsigned int
-nf_nat_redirect_ipv4(struct sk_buff *skb,
- const struct nf_nat_ipv4_multi_range_compat *mr,
+nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
unsigned int hooknum);
unsigned int
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index e770bba00066..4aeffddb7586 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -33,17 +33,16 @@ struct nf_queue_handler {
void (*nf_hook_drop)(struct net *net);
};
-void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
-void nf_unregister_queue_handler(struct net *net);
-void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
+void nf_register_queue_handler(const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(void);
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_free(struct nf_queue_entry *entry);
static inline void init_hashrandom(u32 *jhash_initval)
{
while (*jhash_initval == 0)
- *jhash_initval = prandom_u32();
+ *jhash_initval = get_random_u32();
}
static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h
index 9051c3a0c8e7..7c669792fb9c 100644
--- a/include/net/netfilter/nf_reject.h
+++ b/include/net/netfilter/nf_reject.h
@@ -5,12 +5,28 @@
#include <linux/types.h>
#include <uapi/linux/in.h>
-static inline bool nf_reject_verify_csum(__u8 proto)
+static inline bool nf_reject_verify_csum(struct sk_buff *skb, int dataoff,
+ __u8 proto)
{
/* Skip protocols that don't use 16-bit one's complement checksum
* of the entire payload.
*/
switch (proto) {
+ /* Protocols with optional checksums. */
+ case IPPROTO_UDP: {
+ const struct udphdr *udp_hdr;
+ struct udphdr _udp_hdr;
+
+ udp_hdr = skb_header_pointer(skb, dataoff,
+ sizeof(_udp_hdr),
+ &_udp_hdr);
+ if (!udp_hdr || udp_hdr->check)
+ return true;
+
+ return false;
+ }
+ case IPPROTO_GRE:
+
/* Protocols with other integrity checks. */
case IPPROTO_AH:
case IPPROTO_ESP:
@@ -19,9 +35,6 @@ static inline bool nf_reject_verify_csum(__u8 proto)
/* Protocols with partial checksums. */
case IPPROTO_UDPLITE:
case IPPROTO_DCCP:
-
- /* Protocols with optional checksums. */
- case IPPROTO_GRE:
return false;
}
return true;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 224d194ad29d..e27c28b612e4 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -13,42 +13,63 @@
#include <net/netfilter/nf_flow_table.h>
#include <net/netlink.h>
#include <net/flow_offload.h>
+#include <net/netns/generic.h>
+
+#define NFT_MAX_HOOKS (NF_INET_INGRESS + 1)
struct module;
#define NFT_JUMP_STACK_SIZE 16
+enum {
+ NFT_PKTINFO_L4PROTO = (1 << 0),
+ NFT_PKTINFO_INNER = (1 << 1),
+ NFT_PKTINFO_INNER_FULL = (1 << 2),
+};
+
struct nft_pktinfo {
struct sk_buff *skb;
- bool tprot_set;
+ const struct nf_hook_state *state;
+ u8 flags;
u8 tprot;
- /* for x_tables compatibility */
- struct xt_action_param xt;
+ u16 fragoff;
+ u16 thoff;
+ u16 inneroff;
};
+static inline struct sock *nft_sk(const struct nft_pktinfo *pkt)
+{
+ return pkt->state->sk;
+}
+
+static inline unsigned int nft_thoff(const struct nft_pktinfo *pkt)
+{
+ return pkt->thoff;
+}
+
static inline struct net *nft_net(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->net;
+ return pkt->state->net;
}
static inline unsigned int nft_hook(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->hook;
+ return pkt->state->hook;
}
static inline u8 nft_pf(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->pf;
+ return pkt->state->pf;
}
static inline const struct net_device *nft_in(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->in;
+ return pkt->state->in;
}
static inline const struct net_device *nft_out(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->out;
+ return pkt->state->out;
}
static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
@@ -56,16 +77,15 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
const struct nf_hook_state *state)
{
pkt->skb = skb;
- pkt->xt.state = state;
+ pkt->state = state;
}
-static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt)
{
- pkt->tprot_set = false;
+ pkt->flags = 0;
pkt->tprot = 0;
- pkt->xt.thoff = 0;
- pkt->xt.fragoff = 0;
+ pkt->thoff = 0;
+ pkt->fragoff = 0;
}
/**
@@ -86,6 +106,8 @@ struct nft_data {
};
} __attribute__((aligned(__alignof__(u64))));
+#define NFT_REG32_NUM 20
+
/**
* struct nft_regs - nf_tables register set
*
@@ -96,11 +118,22 @@ struct nft_data {
*/
struct nft_regs {
union {
- u32 data[20];
+ u32 data[NFT_REG32_NUM];
struct nft_verdict verdict;
};
};
+struct nft_regs_track {
+ struct {
+ const struct nft_expr *selector;
+ const struct nft_expr *bitwise;
+ u8 num_reg;
+ } regs[NFT_REG32_NUM];
+
+ const struct nft_expr *cur;
+ const struct nft_expr *last;
+};
+
/* Store/load an u8, u16 or u64 integer to/from the u32 data register.
*
* Note, when using concatenations, register allocation happens at 32-bit
@@ -125,14 +158,29 @@ static inline void nft_reg_store16(u32 *dreg, u16 val)
*(u16 *)dreg = val;
}
+static inline void nft_reg_store_be16(u32 *dreg, __be16 val)
+{
+ nft_reg_store16(dreg, (__force __u16)val);
+}
+
static inline u16 nft_reg_load16(const u32 *sreg)
{
return *(u16 *)sreg;
}
-static inline void nft_reg_store64(u32 *dreg, u64 val)
+static inline __be16 nft_reg_load_be16(const u32 *sreg)
+{
+ return (__force __be16)nft_reg_load16(sreg);
+}
+
+static inline __be32 nft_reg_load_be32(const u32 *sreg)
+{
+ return *(__force __be32 *)sreg;
+}
+
+static inline void nft_reg_store64(u64 *dreg, u64 val)
{
- put_unaligned(val, (u64 *)dreg);
+ put_unaligned(val, dreg);
}
static inline u64 nft_reg_load64(const u32 *sreg)
@@ -148,13 +196,6 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
memcpy(dst, src, len);
}
-static inline void nft_data_debug(const struct nft_data *data)
-{
- pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n",
- data->data[0], data->data[1],
- data->data[2], data->data[3]);
-}
-
/**
* struct nft_ctx - nf_tables rule/set context
*
@@ -164,6 +205,7 @@ static inline void nft_data_debug(const struct nft_data *data)
* @nla: netlink attributes
* @portid: netlink portID of the original message
* @seq: netlink sequence number
+ * @flags: modifiers to new request
* @family: protocol family
* @level: depth of the chains
* @report: notify via unicast netlink message
@@ -181,13 +223,18 @@ struct nft_ctx {
bool report;
};
+enum nft_data_desc_flags {
+ NFT_DATA_DESC_SETELEM = (1 << 0),
+};
+
struct nft_data_desc {
enum nft_data_types type;
+ unsigned int size;
unsigned int len;
+ unsigned int flags;
};
-int nft_data_init(const struct nft_ctx *ctx,
- struct nft_data *data, unsigned int size,
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
struct nft_data_desc *desc, const struct nlattr *nla);
void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
void nft_data_release(const struct nft_data *data, enum nft_data_types type);
@@ -205,14 +252,13 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
}
int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
-unsigned int nft_parse_register(const struct nlattr *attr);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
-int nft_validate_register_load(enum nft_registers reg, unsigned int len);
-int nft_validate_register_store(const struct nft_ctx *ctx,
- enum nft_registers reg,
- const struct nft_data *data,
- enum nft_data_types type, unsigned int len);
+int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
+int nft_parse_register_store(const struct nft_ctx *ctx,
+ const struct nlattr *attr, u8 *dreg,
+ const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
/**
* struct nft_userdata - user defined data associated with an object
@@ -229,11 +275,15 @@ struct nft_userdata {
unsigned char data[];
};
+/* placeholder structure for opaque set element backend representation. */
+struct nft_elem_priv { };
+
/**
* struct nft_set_elem - generic representation of set elements
*
* @key: element key
* @key_end: closing element key
+ * @data: element data
* @priv: element private data and extensions
*/
struct nft_set_elem {
@@ -249,9 +299,14 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} data;
- void *priv;
+ struct nft_elem_priv *priv;
};
+static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
+{
+ return (void *)priv;
+}
+
struct nft_set;
struct nft_set_iter {
u8 genmask;
@@ -261,23 +316,35 @@ struct nft_set_iter {
int (*fn)(const struct nft_ctx *ctx,
struct nft_set *set,
const struct nft_set_iter *iter,
- struct nft_set_elem *elem);
+ struct nft_elem_priv *elem_priv);
};
/**
* struct nft_set_desc - description of set elements
*
+ * @ktype: key type
* @klen: key length
+ * @dtype: data type
* @dlen: data length
+ * @objtype: object type
* @size: number of set elements
+ * @policy: set policy
+ * @gc_int: garbage collector interval
+ * @timeout: element timeout
* @field_len: length of each field in concatenation, bytes
* @field_count: number of concatenated fields in element
* @expr: set must support for expressions
*/
struct nft_set_desc {
+ u32 ktype;
unsigned int klen;
+ u32 dtype;
unsigned int dlen;
+ u32 objtype;
unsigned int size;
+ u32 policy;
+ u32 gc_int;
+ u64 timeout;
u8 field_len[NFT_REG32_COUNT];
u8 field_count;
bool expr;
@@ -286,9 +353,9 @@ struct nft_set_desc {
/**
* enum nft_set_class - performance class
*
- * @NFT_LOOKUP_O_1: constant, O(1)
- * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
- * @NFT_LOOKUP_O_N: linear, O(N)
+ * @NFT_SET_CLASS_O_1: constant, O(1)
+ * @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N)
+ * @NFT_SET_CLASS_O_N: linear, O(N)
*/
enum nft_set_class {
NFT_SET_CLASS_O_1,
@@ -310,8 +377,39 @@ struct nft_set_estimate {
enum nft_set_class space;
};
+#define NFT_EXPR_MAXATTR 16
+#define NFT_EXPR_SIZE(size) (sizeof(struct nft_expr) + \
+ ALIGN(size, __alignof__(struct nft_expr)))
+
+/**
+ * struct nft_expr - nf_tables expression
+ *
+ * @ops: expression ops
+ * @data: expression private data
+ */
+struct nft_expr {
+ const struct nft_expr_ops *ops;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_expr_priv(const struct nft_expr *expr)
+{
+ return (void *)expr->data;
+}
+
+struct nft_expr_info;
+
+int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
+ struct nft_expr_info *info);
+int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src);
+void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
+int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
+ const struct nft_expr *expr, bool reset);
+bool nft_expr_reduce_bitwise(struct nft_regs_track *track,
+ const struct nft_expr *expr);
+
struct nft_set_ext;
-struct nft_expr;
/**
* struct nft_set_ops - nf_tables set operations
@@ -326,9 +424,13 @@ struct nft_expr;
* @remove: remove element from set
* @walk: iterate over all set elements
* @get: get set elements
+ * @commit: commit set elements
+ * @abort: abort set elements
* @privsize: function to return size of set private data
+ * @estimate: estimate the required memory size and the lookup complexity class
* @init: initialize private data of new set instance
* @destroy: destroy private data of set instance
+ * @gc_init: initialize garbage collection
* @elemsize: element private size
*
* Operations lookup, update and delete have simpler interfaces, are faster
@@ -342,7 +444,8 @@ struct nft_set_ops {
const struct nft_set_ext **ext);
bool (*update)(struct nft_set *set,
const u32 *key,
- void *(*new)(struct nft_set *,
+ struct nft_elem_priv *
+ (*new)(struct nft_set *,
const struct nft_expr *,
struct nft_regs *),
const struct nft_expr *expr,
@@ -354,27 +457,28 @@ struct nft_set_ops {
int (*insert)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem,
- struct nft_set_ext **ext);
+ struct nft_elem_priv **priv);
void (*activate)(const struct net *net,
const struct nft_set *set,
- const struct nft_set_elem *elem);
- void * (*deactivate)(const struct net *net,
+ struct nft_elem_priv *elem_priv);
+ struct nft_elem_priv * (*deactivate)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem);
- bool (*flush)(const struct net *net,
+ void (*flush)(const struct net *net,
const struct nft_set *set,
- void *priv);
+ struct nft_elem_priv *priv);
void (*remove)(const struct net *net,
const struct nft_set *set,
- const struct nft_set_elem *elem);
+ struct nft_elem_priv *elem_priv);
void (*walk)(const struct nft_ctx *ctx,
struct nft_set *set,
struct nft_set_iter *iter);
- void * (*get)(const struct net *net,
+ struct nft_elem_priv * (*get)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem,
unsigned int flags);
-
+ void (*commit)(struct nft_set *set);
+ void (*abort)(const struct nft_set *set);
u64 (*privsize)(const struct nlattr * const nla[],
const struct nft_set_desc *desc);
bool (*estimate)(const struct nft_set_desc *desc,
@@ -383,7 +487,8 @@ struct nft_set_ops {
int (*init)(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const nla[]);
- void (*destroy)(const struct nft_set *set);
+ void (*destroy)(const struct nft_ctx *ctx,
+ const struct nft_set *set);
void (*gc_init)(const struct nft_set *set);
unsigned int elemsize;
@@ -401,11 +506,28 @@ struct nft_set_type {
};
#define to_set_type(o) container_of(o, struct nft_set_type, ops)
+struct nft_set_elem_expr {
+ u8 size;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+#define nft_setelem_expr_at(__elem_expr, __offset) \
+ ((struct nft_expr *)&__elem_expr->data[__offset])
+
+#define nft_setelem_expr_foreach(__expr, __elem_expr, __size) \
+ for (__expr = nft_setelem_expr_at(__elem_expr, 0), __size = 0; \
+ __size < (__elem_expr)->size; \
+ __size += (__expr)->ops->size, __expr = ((void *)(__expr)) + (__expr)->ops->size)
+
+#define NFT_SET_EXPR_MAX 2
+
/**
* struct nft_set - nf_tables set instance
*
* @list: table set list node
* @bindings: list of set bindings
+ * @refs: internal refcounting for async set destruction
* @table: table this set belongs to
* @net: netnamespace this set belongs to
* @name: name of the set
@@ -424,17 +546,22 @@ struct nft_set_type {
* @policy: set parameterization (see enum nft_set_policies)
* @udlen: user data length
* @udata: user data
- * @expr: stateful expression
+ * @pending_update: list of pending update set element
* @ops: set ops
* @flags: set flags
+ * @dead: set will be freed, never cleared
* @genmask: generation mask
* @klen: key length
* @dlen: data length
+ * @num_exprs: numbers of exprs
+ * @exprs: stateful expression
+ * @catchall_list: list of catch-all set element
* @data: private set data
*/
struct nft_set {
struct list_head list;
struct list_head bindings;
+ refcount_t refs;
struct nft_table *table;
possible_net_t net;
char *name;
@@ -453,13 +580,17 @@ struct nft_set {
u16 policy;
u16 udlen;
unsigned char *udata;
- struct nft_expr *expr;
+ struct list_head pending_update;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
- u16 flags:14,
+ u16 flags:13,
+ dead:1,
genmask:2;
u8 klen;
u8 dlen;
+ u8 num_exprs;
+ struct nft_expr *exprs[NFT_SET_EXPR_MAX];
+ struct list_head catchall_list;
unsigned char data[]
__attribute__((aligned(__alignof__(u64))));
};
@@ -474,6 +605,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline bool nft_set_gc_is_pending(const struct nft_set *s)
+{
+ return refcount_read(&s->refs) != 1;
+}
+
static inline struct nft_set *nft_set_container_of(const void *priv)
{
return (void *)priv - offsetof(struct nft_set, data);
@@ -485,9 +621,14 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
const struct nlattr *nla_set_id,
u8 genmask);
+struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
+ const struct nft_set *set);
+
static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
{
- return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ;
+ u32 gc_int = READ_ONCE(set->gc_int);
+
+ return gc_int ? msecs_to_jiffies(gc_int) : HZ;
}
/**
@@ -507,6 +648,7 @@ struct nft_set_binding {
};
enum nft_trans_phase;
+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase);
@@ -524,7 +666,7 @@ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
* @NFT_SET_EXT_TIMEOUT: element timeout
* @NFT_SET_EXT_EXPIRATION: element expiration time
* @NFT_SET_EXT_USERDATA: user data associated with the element
- * @NFT_SET_EXT_EXPR: expression assiociated with the element
+ * @NFT_SET_EXT_EXPRESSIONS: expressions assiciated with the element
* @NFT_SET_EXT_OBJREF: stateful object reference associated with element
* @NFT_SET_EXT_NUM: number of extension types
*/
@@ -536,7 +678,7 @@ enum nft_set_extensions {
NFT_SET_EXT_TIMEOUT,
NFT_SET_EXT_EXPIRATION,
NFT_SET_EXT_USERDATA,
- NFT_SET_EXT_EXPR,
+ NFT_SET_EXT_EXPRESSIONS,
NFT_SET_EXT_OBJREF,
NFT_SET_EXT_NUM
};
@@ -559,10 +701,12 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
*
* @len: length of extension area
* @offset: offsets of individual extension types
+ * @ext_len: length of the expected extension(used to sanity check)
*/
struct nft_set_ext_tmpl {
u16 len;
u8 offset[NFT_SET_EXT_NUM];
+ u8 ext_len[NFT_SET_EXT_NUM];
};
/**
@@ -584,18 +728,23 @@ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
tmpl->len = sizeof(struct nft_set_ext);
}
-static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
- unsigned int len)
+static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
+ unsigned int len)
{
tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align);
- BUG_ON(tmpl->len > U8_MAX);
+ if (tmpl->len > U8_MAX)
+ return -EINVAL;
+
tmpl->offset[id] = tmpl->len;
- tmpl->len += nft_set_ext_types[id].len + len;
+ tmpl->ext_len[id] = nft_set_ext_types[id].len + len;
+ tmpl->len += tmpl->ext_len[id];
+
+ return 0;
}
-static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
+static inline int nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
{
- nft_set_ext_add_length(tmpl, id, 0);
+ return nft_set_ext_add_length(tmpl, id, 0);
}
static inline void nft_set_ext_init(struct nft_set_ext *ext,
@@ -654,21 +803,27 @@ static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext
return nft_set_ext(ext, NFT_SET_EXT_USERDATA);
}
-static inline struct nft_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
+static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
{
- return nft_set_ext(ext, NFT_SET_EXT_EXPR);
+ return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS);
}
-static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext,
+ u64 tstamp)
{
return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
- time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext));
+ time_after_eq64(tstamp, *nft_set_ext_expiration(ext));
+}
+
+static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+{
+ return __nft_set_elem_expired(ext, get_jiffies_64());
}
static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
- void *elem)
+ const struct nft_elem_priv *elem_priv)
{
- return elem + set->ops->elemsize;
+ return (void *)elem_priv + set->ops->elemsize;
}
static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext)
@@ -680,68 +835,19 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
const struct nft_set *set,
const struct nlattr *attr);
-void *nft_set_elem_init(const struct nft_set *set,
- const struct nft_set_ext_tmpl *tmpl,
- const u32 *key, const u32 *key_end, const u32 *data,
- u64 timeout, u64 expiration, gfp_t gfp);
-void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+struct nft_elem_priv *nft_set_elem_init(const struct nft_set *set,
+ const struct nft_set_ext_tmpl *tmpl,
+ const u32 *key, const u32 *key_end,
+ const u32 *data,
+ u64 timeout, u64 expiration, gfp_t gfp);
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_expr *expr_array[]);
+void nft_set_elem_destroy(const struct nft_set *set,
+ const struct nft_elem_priv *elem_priv,
bool destroy_expr);
-
-/**
- * struct nft_set_gc_batch_head - nf_tables set garbage collection batch
- *
- * @rcu: rcu head
- * @set: set the elements belong to
- * @cnt: count of elements
- */
-struct nft_set_gc_batch_head {
- struct rcu_head rcu;
- const struct nft_set *set;
- unsigned int cnt;
-};
-
-#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \
- sizeof(struct nft_set_gc_batch_head)) / \
- sizeof(void *))
-
-/**
- * struct nft_set_gc_batch - nf_tables set garbage collection batch
- *
- * @head: GC batch head
- * @elems: garbage collection elements
- */
-struct nft_set_gc_batch {
- struct nft_set_gc_batch_head head;
- void *elems[NFT_SET_GC_BATCH_SIZE];
-};
-
-struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
- gfp_t gfp);
-void nft_set_gc_batch_release(struct rcu_head *rcu);
-
-static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb)
-{
- if (gcb != NULL)
- call_rcu(&gcb->head.rcu, nft_set_gc_batch_release);
-}
-
-static inline struct nft_set_gc_batch *
-nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb,
- gfp_t gfp)
-{
- if (gcb != NULL) {
- if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems))
- return gcb;
- nft_set_gc_batch_complete(gcb);
- }
- return nft_set_gc_batch_alloc(set, gfp);
-}
-
-static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
- void *elem)
-{
- gcb->elems[gcb->head.cnt++] = elem;
-}
+void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nft_elem_priv *elem_priv);
struct nft_expr_ops;
/**
@@ -750,6 +856,7 @@ struct nft_expr_ops;
* @select_ops: function to select nft_expr_ops
* @release_ops: release nft_expr_ops
* @ops: default ops, used when no select_ops functions is present
+ * @inner_ops: inner ops, used for inner packet operation
* @list: used internally
* @name: Identifier
* @owner: module reference
@@ -763,6 +870,7 @@ struct nft_expr_type {
const struct nlattr * const tb[]);
void (*release_ops)(const struct nft_expr_ops *ops);
const struct nft_expr_ops *ops;
+ const struct nft_expr_ops *inner_ops;
struct list_head list;
const char *name;
struct module *owner;
@@ -777,6 +885,7 @@ struct nft_expr_type {
enum nft_trans_phase {
NFT_TRANS_PREPARE,
+ NFT_TRANS_PREPARE_ERROR,
NFT_TRANS_ABORT,
NFT_TRANS_COMMIT,
NFT_TRANS_RELEASE
@@ -789,17 +898,24 @@ struct nft_offload_ctx;
* struct nft_expr_ops - nf_tables expression operations
*
* @eval: Expression evaluation function
+ * @clone: Expression clone function
* @size: full expression size, including private data size
* @init: initialization function
* @activate: activate expression in the next generation
* @deactivate: deactivate expression in next generation
* @destroy: destruction function, called after synchronize_rcu
+ * @destroy_clone: destruction clone function
* @dump: function to dump parameters
- * @type: expression type
* @validate: validate expression, called during loop detection
+ * @reduce: reduce expression
+ * @gc: garbage collection expression
+ * @offload: hardware offload expression
+ * @offload_action: function to report true/false to allocate one slot or not in the flow
+ * offload array
+ * @offload_stats: function to synchronize hardware stats via updating the counter expression
+ * @type: expression type
* @data: extra data to attach to this expression operation
*/
-struct nft_expr;
struct nft_expr_ops {
void (*eval)(const struct nft_expr *expr,
struct nft_regs *regs,
@@ -821,46 +937,25 @@ struct nft_expr_ops {
void (*destroy_clone)(const struct nft_ctx *ctx,
const struct nft_expr *expr);
int (*dump)(struct sk_buff *skb,
- const struct nft_expr *expr);
+ const struct nft_expr *expr,
+ bool reset);
int (*validate)(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data);
+ bool (*reduce)(struct nft_regs_track *track,
+ const struct nft_expr *expr);
bool (*gc)(struct net *net,
const struct nft_expr *expr);
int (*offload)(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr);
- u32 offload_flags;
+ bool (*offload_action)(const struct nft_expr *expr);
+ void (*offload_stats)(struct nft_expr *expr,
+ const struct flow_stats *stats);
const struct nft_expr_type *type;
void *data;
};
-#define NFT_EXPR_MAXATTR 16
-#define NFT_EXPR_SIZE(size) (sizeof(struct nft_expr) + \
- ALIGN(size, __alignof__(struct nft_expr)))
-
-/**
- * struct nft_expr - nf_tables expression
- *
- * @ops: expression ops
- * @data: expression private data
- */
-struct nft_expr {
- const struct nft_expr_ops *ops;
- unsigned char data[]
- __attribute__((aligned(__alignof__(u64))));
-};
-
-static inline void *nft_expr_priv(const struct nft_expr *expr)
-{
- return (void *)expr->data;
-}
-
-int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src);
-void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
-int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
- const struct nft_expr *expr);
-
/**
* struct nft_rule - nf_tables rule
*
@@ -896,22 +991,37 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
return (struct nft_expr *)&rule->data[rule->dlen];
}
+static inline bool nft_expr_more(const struct nft_rule *rule,
+ const struct nft_expr *expr)
+{
+ return expr != nft_expr_last(rule) && expr->ops;
+}
+
static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
{
return (void *)&rule->data[rule->dlen];
}
-void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule);
+void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule);
+void nft_rule_expr_deactivate(const struct nft_ctx *ctx, struct nft_rule *rule,
+ enum nft_trans_phase phase);
+void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule);
static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
+ struct nft_set_elem_expr *elem_expr;
struct nft_expr *expr;
-
- if (__nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) {
- expr = nft_set_ext_expr(ext);
- expr->ops->eval(expr, regs, pkt);
+ u32 size;
+
+ if (__nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS)) {
+ elem_expr = nft_set_ext_expr(ext);
+ nft_setelem_expr_foreach(expr, elem_expr, size) {
+ expr->ops->eval(expr, regs, pkt);
+ if (regs->verdict.code == NFT_BREAK)
+ return;
+ }
}
}
@@ -927,21 +1037,54 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
#define NFT_CHAIN_POLICY_UNSET U8_MAX
+struct nft_rule_dp {
+ u64 is_last:1,
+ dlen:12,
+ handle:42; /* for tracing */
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+struct nft_rule_dp_last {
+ struct nft_rule_dp end; /* end of nft_rule_blob marker */
+ struct rcu_head h; /* call_rcu head */
+ struct nft_rule_blob *blob; /* ptr to free via call_rcu */
+ const struct nft_chain *chain; /* for nftables tracing */
+};
+
+static inline const struct nft_rule_dp *nft_rule_next(const struct nft_rule_dp *rule)
+{
+ return (void *)rule + sizeof(*rule) + rule->dlen;
+}
+
+struct nft_rule_blob {
+ unsigned long size;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_rule_dp))));
+};
+
/**
* struct nft_chain - nf_tables chain
*
+ * @blob_gen_0: rule blob pointer to the current generation
+ * @blob_gen_1: rule blob pointer to the future generation
* @rules: list of rules in the chain
* @list: used internally
* @rhlhead: used internally
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
- * @flags: bitmask of enum nft_chain_flags
+ * @flags: bitmask of enum NFTA_CHAIN_FLAGS
+ * @bound: bind or not
+ * @genmask: generation mask
* @name: name of the chain
+ * @udlen: user data length
+ * @udata: user data in the chain
+ * @blob_next: rule blob pointer to the next in the chain
*/
struct nft_chain {
- struct nft_rule *__rcu *rules_gen_0;
- struct nft_rule *__rcu *rules_gen_1;
+ struct nft_rule_blob __rcu *blob_gen_0;
+ struct nft_rule_blob __rcu *blob_gen_1;
struct list_head rules;
struct list_head list;
struct rhlist_head rhlhead;
@@ -952,12 +1095,20 @@ struct nft_chain {
bound:1,
genmask:2;
char *name;
+ u16 udlen;
+ u8 *udata;
/* Only used during control plane commit phase: */
- struct nft_rule **rules_next;
+ struct nft_rule_blob *blob_next;
};
int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
+int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
+ const struct nft_set_iter *iter,
+ struct nft_elem_priv *elem_priv);
+int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set);
+int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
+void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
enum nft_chain_types {
NFT_CHAIN_T_DEFAULT = 0,
@@ -984,7 +1135,7 @@ struct nft_chain_type {
int family;
struct module *owner;
unsigned int hook_mask;
- nf_hookfn *hooks[NF_MAX_HOOKS];
+ nf_hookfn *hooks[NFT_MAX_HOOKS];
int (*ops_register)(struct net *net, const struct nf_hook_ops *ops);
void (*ops_unregister)(struct net *net, const struct nf_hook_ops *ops);
};
@@ -994,11 +1145,17 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
int nft_chain_validate_hooks(const struct nft_chain *chain,
unsigned int hook_flags);
+static inline bool nft_chain_binding(const struct nft_chain *chain)
+{
+ return chain->flags & NFT_CHAIN_BINDING;
+}
+
static inline bool nft_chain_is_bound(struct nft_chain *chain)
{
return (chain->flags & NFT_CHAIN_BINDING) && chain->bound;
}
+int nft_chain_add(struct nft_table *table, struct nft_chain *chain);
void nft_chain_del(struct nft_chain *chain);
void nf_tables_chain_destroy(struct nft_ctx *ctx);
@@ -1010,7 +1167,6 @@ struct nft_stats {
struct nft_hook {
struct list_head list;
- bool inactive;
struct nf_hook_ops ops;
struct rcu_head rcu;
};
@@ -1022,6 +1178,7 @@ struct nft_hook {
* @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
* @type: chain type
* @policy: default policy
+ * @flags: indicate the base chain disabled or not
* @stats: per-cpu chain stats
* @chain: the chain
* @flow_block: flow block (for hardware offload)
@@ -1051,6 +1208,29 @@ int __nft_release_basechain(struct nft_ctx *ctx);
unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
+static inline bool nft_use_inc(u32 *use)
+{
+ if (*use == UINT_MAX)
+ return false;
+
+ (*use)++;
+
+ return true;
+}
+
+static inline void nft_use_dec(u32 *use)
+{
+ WARN_ON_ONCE((*use)-- == 0);
+}
+
+/* For error and abort path: restore use counter to previous state. */
+static inline void nft_use_inc_restore(u32 *use)
+{
+ WARN_ON_ONCE(!nft_use_inc(use));
+}
+
+#define nft_use_dec_restore nft_use_dec
+
/**
* struct nft_table - nf_tables table
*
@@ -1063,10 +1243,14 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
* @hgenerator: handle generator state
* @handle: table handle
* @use: number of chain references to this table
+ * @family:address family
* @flags: table flag (see enum nft_table_flags)
* @genmask: generation mask
- * @afinfo: address family info
+ * @nlpid: netlink port ID
* @name: name of the table
+ * @udlen: length of the user data
+ * @udata: user data
+ * @validate_state: internal, set when transaction adds jumps
*/
struct nft_table {
struct list_head list;
@@ -1081,9 +1265,30 @@ struct nft_table {
u16 family:6,
flags:8,
genmask:2;
+ u32 nlpid;
char *name;
+ u16 udlen;
+ u8 *udata;
+ u8 validate_state;
};
+static inline bool nft_table_has_owner(const struct nft_table *table)
+{
+ return table->flags & NFT_TABLE_F_OWNER;
+}
+
+static inline bool nft_table_is_orphan(const struct nft_table *table)
+{
+ return (table->flags & (NFT_TABLE_F_OWNER | NFT_TABLE_F_PERSIST)) ==
+ NFT_TABLE_F_PERSIST;
+}
+
+static inline bool nft_base_chain_netdev(int family, u32 hooknum)
+{
+ return family == NFPROTO_NETDEV ||
+ (family == NFPROTO_INET && hooknum == NF_INET_INGRESS);
+}
+
void nft_register_chain_type(const struct nft_chain_type *);
void nft_unregister_chain_type(const struct nft_chain_type *);
@@ -1108,11 +1313,13 @@ struct nft_object_hash_key {
* struct nft_object - nf_tables stateful object
*
* @list: table stateful object list node
- * @key: keys that identify this object
* @rhlhead: nft_objname_ht node
+ * @key: keys that identify this object
* @genmask: generation mask
* @use: number of references to this stateful object
* @handle: unique object handle
+ * @udlen: length of user data
+ * @udata: user data
* @ops: object operations
* @data: object data, layout depends on type
*/
@@ -1120,9 +1327,11 @@ struct nft_object {
struct list_head list;
struct rhlist_head rhlhead;
struct nft_object_hash_key key;
- u32 genmask:2,
- use:30;
+ u32 genmask:2;
+ u32 use;
u64 handle;
+ u16 udlen;
+ u8 *udata;
/* runtime data below here */
const struct nft_object_ops *ops ____cacheline_aligned;
unsigned char data[]
@@ -1143,7 +1352,7 @@ struct nft_object *nft_obj_lookup(const struct net *net,
void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq,
- int event, int family, int report, gfp_t gfp);
+ int event, u16 flags, int family, int report, gfp_t gfp);
/**
* struct nft_object_type - stateful object type
@@ -1154,6 +1363,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
* @type: stateful object numeric type
* @owner: module owner
* @maxattr: maximum netlink attribute
+ * @family: address family for AF-specific object types
* @policy: netlink attribute policy
*/
struct nft_object_type {
@@ -1163,6 +1373,7 @@ struct nft_object_type {
struct list_head list;
u32 type;
unsigned int maxattr;
+ u8 family;
struct module *owner;
const struct nla_policy *policy;
};
@@ -1176,6 +1387,7 @@ struct nft_object_type {
* @destroy: release existing stateful object
* @dump: netlink dump stateful object
* @update: update stateful object
+ * @type: pointer to object type
*/
struct nft_object_ops {
void (*eval)(struct nft_object *obj,
@@ -1211,9 +1423,8 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
* @genmask: generation mask
* @use: number of references to this flow table
* @handle: unique object handle
- * @dev_name: array of device names
+ * @hook_list: hook list for hooks per net_device in flowtables
* @data: rhashtable and garbage collector
- * @ops: array of hooks
*/
struct nft_flowtable {
struct list_head list;
@@ -1221,8 +1432,8 @@ struct nft_flowtable {
char *name;
int hooknum;
int ops_len;
- u32 genmask:2,
- use:30;
+ u32 genmask:2;
+ u32 use;
u64 handle;
/* runtime data below here */
struct list_head hook_list ____cacheline_aligned;
@@ -1243,31 +1454,29 @@ void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
/**
* struct nft_traceinfo - nft tracing information and state
*
- * @pkt: pktinfo currently processed
- * @basechain: base chain currently processed
- * @chain: chain currently processed
- * @rule: rule that was evaluated
- * @verdict: verdict given by rule
+ * @trace: other struct members are initialised
+ * @nf_trace: copy of skb->nf_trace before rule evaluation
* @type: event type (enum nft_trace_types)
+ * @skbid: hash of skb to be used as trace id
* @packet_dumped: packet headers sent in a previous traceinfo message
- * @trace: other struct members are initialised
+ * @basechain: base chain currently processed
*/
struct nft_traceinfo {
- const struct nft_pktinfo *pkt;
- const struct nft_base_chain *basechain;
- const struct nft_chain *chain;
- const struct nft_rule *rule;
- const struct nft_verdict *verdict;
- enum nft_trace_types type;
- bool packet_dumped;
bool trace;
+ bool nf_trace;
+ bool packet_dumped;
+ enum nft_trace_types type:8;
+ u32 skbid;
+ const struct nft_base_chain *basechain;
};
void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
- const struct nft_verdict *verdict,
const struct nft_chain *basechain);
-void nft_trace_notify(struct nft_traceinfo *info);
+void nft_trace_notify(const struct nft_pktinfo *pkt,
+ const struct nft_verdict *verdict,
+ const struct nft_rule_dp *rule,
+ struct nft_traceinfo *info);
#define MODULE_ALIAS_NFT_CHAIN(family, name) \
MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
@@ -1358,45 +1567,37 @@ static inline void nft_set_elem_change_active(const struct net *net,
#endif /* IS_ENABLED(CONFIG_NF_TABLES) */
-/*
- * We use a free bit in the genmask field to indicate the element
- * is busy, meaning it is currently being processed either by
- * the netlink API or GC.
- *
- * Even though the genmask is only a single byte wide, this works
- * because the extension structure if fully constant once initialized,
- * so there are no non-atomic write accesses unless it is already
- * marked busy.
- */
-#define NFT_SET_ELEM_BUSY_MASK (1 << 2)
+#define NFT_SET_ELEM_DEAD_MASK (1 << 2)
#if defined(__LITTLE_ENDIAN_BITFIELD)
-#define NFT_SET_ELEM_BUSY_BIT 2
+#define NFT_SET_ELEM_DEAD_BIT 2
#elif defined(__BIG_ENDIAN_BITFIELD)
-#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
+#define NFT_SET_ELEM_DEAD_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
#else
#error
#endif
-static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext)
+static inline void nft_set_elem_dead(struct nft_set_ext *ext)
{
unsigned long *word = (unsigned long *)ext;
BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
- return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word);
+ set_bit(NFT_SET_ELEM_DEAD_BIT, word);
}
-static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
+static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext)
{
unsigned long *word = (unsigned long *)ext;
- clear_bit(NFT_SET_ELEM_BUSY_BIT, word);
+ BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+ return test_bit(NFT_SET_ELEM_DEAD_BIT, word);
}
/**
* struct nft_trans - nf_tables object update in transaction
*
* @list: used internally
+ * @binding_list: list of objects with possible bindings
* @msg_type: message type
* @put_net: ctx->net needs to be put
* @ctx: transaction context
@@ -1404,6 +1605,7 @@ static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
*/
struct nft_trans {
struct list_head list;
+ struct list_head binding_list;
int msg_type;
bool put_net;
struct nft_ctx ctx;
@@ -1414,6 +1616,7 @@ struct nft_trans_rule {
struct nft_rule *rule;
struct nft_flow_rule *flow;
u32 rule_id;
+ bool bound;
};
#define nft_trans_rule(trans) \
@@ -1422,11 +1625,17 @@ struct nft_trans_rule {
(((struct nft_trans_rule *)trans->data)->flow)
#define nft_trans_rule_id(trans) \
(((struct nft_trans_rule *)trans->data)->rule_id)
+#define nft_trans_rule_bound(trans) \
+ (((struct nft_trans_rule *)trans->data)->bound)
struct nft_trans_set {
struct nft_set *set;
u32 set_id;
+ u32 gc_int;
+ u64 timeout;
+ bool update;
bool bound;
+ u32 size;
};
#define nft_trans_set(trans) \
@@ -1435,15 +1644,29 @@ struct nft_trans_set {
(((struct nft_trans_set *)trans->data)->set_id)
#define nft_trans_set_bound(trans) \
(((struct nft_trans_set *)trans->data)->bound)
+#define nft_trans_set_update(trans) \
+ (((struct nft_trans_set *)trans->data)->update)
+#define nft_trans_set_timeout(trans) \
+ (((struct nft_trans_set *)trans->data)->timeout)
+#define nft_trans_set_gc_int(trans) \
+ (((struct nft_trans_set *)trans->data)->gc_int)
+#define nft_trans_set_size(trans) \
+ (((struct nft_trans_set *)trans->data)->size)
struct nft_trans_chain {
+ struct nft_chain *chain;
bool update;
char *name;
struct nft_stats __percpu *stats;
u8 policy;
+ bool bound;
u32 chain_id;
+ struct nft_base_chain *basechain;
+ struct list_head hook_list;
};
+#define nft_trans_chain(trans) \
+ (((struct nft_trans_chain *)trans->data)->chain)
#define nft_trans_chain_update(trans) \
(((struct nft_trans_chain *)trans->data)->update)
#define nft_trans_chain_name(trans) \
@@ -1452,29 +1675,32 @@ struct nft_trans_chain {
(((struct nft_trans_chain *)trans->data)->stats)
#define nft_trans_chain_policy(trans) \
(((struct nft_trans_chain *)trans->data)->policy)
+#define nft_trans_chain_bound(trans) \
+ (((struct nft_trans_chain *)trans->data)->bound)
#define nft_trans_chain_id(trans) \
(((struct nft_trans_chain *)trans->data)->chain_id)
+#define nft_trans_basechain(trans) \
+ (((struct nft_trans_chain *)trans->data)->basechain)
+#define nft_trans_chain_hooks(trans) \
+ (((struct nft_trans_chain *)trans->data)->hook_list)
struct nft_trans_table {
bool update;
- bool enable;
};
#define nft_trans_table_update(trans) \
(((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_enable(trans) \
- (((struct nft_trans_table *)trans->data)->enable)
struct nft_trans_elem {
struct nft_set *set;
- struct nft_set_elem elem;
+ struct nft_elem_priv *elem_priv;
bool bound;
};
#define nft_trans_elem_set(trans) \
(((struct nft_trans_elem *)trans->data)->set)
-#define nft_trans_elem(trans) \
- (((struct nft_trans_elem *)trans->data)->elem)
+#define nft_trans_elem_priv(trans) \
+ (((struct nft_trans_elem *)trans->data)->elem_priv)
#define nft_trans_elem_set_bound(trans) \
(((struct nft_trans_elem *)trans->data)->bound)
@@ -1495,6 +1721,7 @@ struct nft_trans_flowtable {
struct nft_flowtable *flowtable;
bool update;
struct list_head hook_list;
+ u32 flags;
};
#define nft_trans_flowtable(trans) \
@@ -1503,6 +1730,41 @@ struct nft_trans_flowtable {
(((struct nft_trans_flowtable *)trans->data)->update)
#define nft_trans_flowtable_hooks(trans) \
(((struct nft_trans_flowtable *)trans->data)->hook_list)
+#define nft_trans_flowtable_flags(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->flags)
+
+#define NFT_TRANS_GC_BATCHCOUNT 256
+
+struct nft_trans_gc {
+ struct list_head list;
+ struct net *net;
+ struct nft_set *set;
+ u32 seq;
+ u16 count;
+ struct nft_elem_priv *priv[NFT_TRANS_GC_BATCHCOUNT];
+ struct rcu_head rcu;
+};
+
+struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
+ unsigned int gc_seq, gfp_t gfp);
+void nft_trans_gc_destroy(struct nft_trans_gc *trans);
+
+struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq, gfp_t gfp);
+void nft_trans_gc_queue_async_done(struct nft_trans_gc *gc);
+
+struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp);
+void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans);
+
+void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
+
+struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq);
+struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc);
+
+void nft_setelem_data_deactivate(const struct net *net,
+ const struct nft_set *set,
+ struct nft_elem_priv *elem_priv);
int __init nft_chain_filter_init(void);
void nft_chain_filter_fini(void);
@@ -1511,4 +1773,61 @@ void __init nft_chain_route_init(void);
void nft_chain_route_fini(void);
void nf_tables_trans_destroy_flush_work(void);
+
+int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
+__be64 nf_jiffies64_to_msecs(u64 input);
+
+#ifdef CONFIG_MODULES
+__printf(2, 3) int nft_request_module(struct net *net, const char *fmt, ...);
+#else
+static inline int nft_request_module(struct net *net, const char *fmt, ...) { return -ENOENT; }
+#endif
+
+struct nftables_pernet {
+ struct list_head tables;
+ struct list_head commit_list;
+ struct list_head binding_list;
+ struct list_head module_list;
+ struct list_head notify_list;
+ struct mutex commit_mutex;
+ u64 table_handle;
+ u64 tstamp;
+ unsigned int base_seq;
+ unsigned int gc_seq;
+ u8 validate_state;
+};
+
+extern unsigned int nf_tables_net_id;
+
+static inline struct nftables_pernet *nft_pernet(const struct net *net)
+{
+ return net_generic(net, nf_tables_net_id);
+}
+
+static inline u64 nft_net_tstamp(const struct net *net)
+{
+ return nft_pernet(net)->tstamp;
+}
+
+#define __NFT_REDUCE_READONLY 1UL
+#define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY
+
+static inline bool nft_reduce_is_readonly(const struct nft_expr *expr)
+{
+ return expr->ops->reduce == NFT_REDUCE_READONLY;
+}
+
+void nft_reg_track_update(struct nft_regs_track *track,
+ const struct nft_expr *expr, u8 dreg, u8 len);
+void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len);
+void __nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg);
+
+static inline bool nft_reg_track_cmp(struct nft_regs_track *track,
+ const struct nft_expr *expr, u8 dreg)
+{
+ return track->regs[dreg].selector &&
+ track->regs[dreg].selector->ops == expr->ops &&
+ track->regs[dreg].num_reg == 0;
+}
+
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 78516de14d31..ff27cb2e1662 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -3,9 +3,11 @@
#define _NET_NF_TABLES_CORE_H
#include <net/netfilter/nf_tables.h>
+#include <linux/indirect_call_wrapper.h>
extern struct nft_expr_type nft_imm_type;
extern struct nft_expr_type nft_cmp_type;
+extern struct nft_expr_type nft_counter_type;
extern struct nft_expr_type nft_lookup_type;
extern struct nft_expr_type nft_bitwise_type;
extern struct nft_expr_type nft_byteorder_type;
@@ -15,57 +17,71 @@ extern struct nft_expr_type nft_range_type;
extern struct nft_expr_type nft_meta_type;
extern struct nft_expr_type nft_rt_type;
extern struct nft_expr_type nft_exthdr_type;
+extern struct nft_expr_type nft_last_type;
+extern struct nft_expr_type nft_objref_type;
+extern struct nft_expr_type nft_inner_type;
#ifdef CONFIG_NETWORK_SECMARK
extern struct nft_object_type nft_secmark_obj_type;
#endif
+extern struct nft_object_type nft_counter_obj_type;
int nf_tables_core_module_init(void);
void nf_tables_core_module_exit(void);
+struct nft_bitwise_fast_expr {
+ u32 mask;
+ u32 xor;
+ u8 sreg;
+ u8 dreg;
+};
+
struct nft_cmp_fast_expr {
u32 data;
- enum nft_registers sreg:8;
+ u32 mask;
+ u8 sreg;
u8 len;
+ bool inv;
+};
+
+struct nft_cmp16_fast_expr {
+ struct nft_data data;
+ struct nft_data mask;
+ u8 sreg;
+ u8 len;
+ bool inv;
};
struct nft_immediate_expr {
struct nft_data data;
- enum nft_registers dreg:8;
+ u8 dreg;
u8 dlen;
};
-/* Calculate the mask for the nft_cmp_fast expression. On big endian the
- * mask needs to include the *upper* bytes when interpreting that data as
- * something smaller than the full u32, therefore a cpu_to_le32 is done.
- */
-static inline u32 nft_cmp_fast_mask(unsigned int len)
-{
- return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
- data) * BITS_PER_BYTE - len));
-}
-
extern const struct nft_expr_ops nft_cmp_fast_ops;
+extern const struct nft_expr_ops nft_cmp16_fast_ops;
-struct nft_payload {
- enum nft_payload_bases base:8;
- u8 offset;
+struct nft_ct {
+ enum nft_ct_keys key:8;
+ enum ip_conntrack_dir dir:8;
u8 len;
- enum nft_registers dreg:8;
+ union {
+ u8 dreg;
+ u8 sreg;
+ };
};
-struct nft_payload_set {
+struct nft_payload {
enum nft_payload_bases base:8;
u8 offset;
u8 len;
- enum nft_registers sreg:8;
- u8 csum_type;
- u8 csum_offset;
- u8 csum_flags;
+ u8 dreg;
};
extern const struct nft_expr_ops nft_payload_fast_ops;
+extern const struct nft_expr_ops nft_bitwise_fast_ops;
+
extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
@@ -77,6 +93,38 @@ extern const struct nft_set_type nft_set_bitmap_type;
extern const struct nft_set_type nft_set_pipapo_type;
extern const struct nft_set_type nft_set_pipapo_avx2_type;
+#ifdef CONFIG_MITIGATION_RETPOLINE
+bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_hash_lookup_fast(const struct net *net,
+ const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+#else
+static inline bool
+nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+{
+ return set->ops->lookup(net, set, key, ext);
+}
+#endif
+
+/* called from nft_pipapo_avx2.c */
+bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+/* called from nft_set_pipapo.c */
+bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+
+void nft_counter_init_seqcount(void);
+
struct nft_expr;
struct nft_regs;
struct nft_pktinfo;
@@ -100,4 +148,36 @@ void nft_dynset_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
void nft_rt_get_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_counter_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+void nft_ct_get_fast_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+
+enum {
+ NFT_PAYLOAD_CTX_INNER_TUN = (1 << 0),
+ NFT_PAYLOAD_CTX_INNER_LL = (1 << 1),
+ NFT_PAYLOAD_CTX_INNER_NH = (1 << 2),
+ NFT_PAYLOAD_CTX_INNER_TH = (1 << 3),
+};
+
+struct nft_inner_tun_ctx {
+ u16 type;
+ u16 inner_tunoff;
+ u16 inner_lloff;
+ u16 inner_nhoff;
+ u16 inner_thoff;
+ __be16 llproto;
+ u8 l4proto;
+ u8 flags;
+};
+
+int nft_payload_inner_offset(const struct nft_pktinfo *pkt);
+void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt,
+ struct nft_inner_tun_ctx *ctx);
+
+void nft_objref_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+void nft_objref_map_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index ed7b511f0a59..60a7d0ce3080 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -5,52 +5,86 @@
#include <net/netfilter/nf_tables.h>
#include <net/ip.h>
-static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
{
struct iphdr *ip;
ip = ip_hdr(pkt->skb);
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = ip->protocol;
- pkt->xt.thoff = ip_hdrlen(pkt->skb);
- pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+ pkt->thoff = ip_hdrlen(pkt->skb);
+ pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
-static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
{
struct iphdr *iph, _iph;
u32 len, thoff;
- iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
- &_iph);
+ iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*iph), &_iph);
if (!iph)
return -1;
if (iph->ihl < 5 || iph->version != 4)
return -1;
- len = ntohs(iph->tot_len);
- thoff = iph->ihl * 4;
- if (skb->len < len)
+ len = iph_totlen(pkt->skb, iph);
+ thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
+ if (pkt->skb->len < len)
return -1;
else if (len < thoff)
return -1;
+ else if (thoff < sizeof(*iph))
+ return -1;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+ pkt->thoff = thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
return 0;
}
-static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+{
+ if (__nft_set_pktinfo_ipv4_validate(pkt) < 0)
+ nft_set_pktinfo_unspec(pkt);
+}
+
+static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
{
- if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
- nft_set_pktinfo_unspec(pkt, skb);
+ struct iphdr *iph;
+ u32 len, thoff;
+
+ if (!pskb_may_pull(pkt->skb, sizeof(*iph)))
+ return -1;
+
+ iph = ip_hdr(pkt->skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ goto inhdr_error;
+
+ len = iph_totlen(pkt->skb, iph);
+ thoff = iph->ihl * 4;
+ if (pkt->skb->len < len) {
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ } else if (len < thoff) {
+ goto inhdr_error;
+ } else if (thoff < sizeof(*iph)) {
+ return -1;
+ }
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = iph->protocol;
+ pkt->thoff = thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+
+inhdr_error:
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INHDRERRORS);
+ return -1;
}
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index d0f1c537b017..467d59b9e533 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -6,27 +6,25 @@
#include <net/ipv6.h>
#include <net/netfilter/nf_tables.h>
-static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt)
{
unsigned int flags = IP6_FH_F_AUTH;
int protohdr, thoff = 0;
unsigned short frag_off;
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
- if (protohdr < 0) {
- nft_set_pktinfo_unspec(pkt, skb);
+ if (protohdr < 0 || thoff > U16_MAX) {
+ nft_set_pktinfo_unspec(pkt);
return;
}
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = frag_off;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
}
-static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
{
#if IS_ENABLED(CONFIG_IPV6)
unsigned int flags = IP6_FH_F_AUTH;
@@ -36,8 +34,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
int protohdr;
u32 pkt_len;
- ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
- &_ip6h);
+ ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*ip6h), &_ip6h);
if (!ip6h)
return -1;
@@ -45,17 +43,17 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
return -1;
pkt_len = ntohs(ip6h->payload_len);
- if (pkt_len + sizeof(*ip6h) > skb->len)
+ if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
return -1;
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
- if (protohdr < 0)
+ if (protohdr < 0 || thoff > U16_MAX)
return -1;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = frag_off;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
return 0;
#else
@@ -63,11 +61,55 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
#endif
}
-static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
{
- if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
- nft_set_pktinfo_unspec(pkt, skb);
+ if (__nft_set_pktinfo_ipv6_validate(pkt) < 0)
+ nft_set_pktinfo_unspec(pkt);
+}
+
+static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned int flags = IP6_FH_F_AUTH;
+ unsigned short frag_off;
+ unsigned int thoff = 0;
+ struct inet6_dev *idev;
+ struct ipv6hdr *ip6h;
+ int protohdr;
+ u32 pkt_len;
+
+ if (!pskb_may_pull(pkt->skb, sizeof(*ip6h)))
+ return -1;
+
+ ip6h = ipv6_hdr(pkt->skb);
+ if (ip6h->version != 6)
+ goto inhdr_error;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > pkt->skb->len) {
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ }
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+ if (protohdr < 0 || thoff > U16_MAX)
+ goto inhdr_error;
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = protohdr;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
+
+ return 0;
+
+inhdr_error:
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INHDRERRORS);
+ return -1;
+#else
+ return -1;
+#endif
}
#endif
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index ea7d1d78b92d..3568b6a2f5f0 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -4,11 +4,16 @@
#include <net/flow_offload.h>
#include <net/netfilter/nf_tables.h>
+enum nft_offload_reg_flags {
+ NFT_OFFLOAD_F_NETWORK2HOST = (1 << 0),
+};
+
struct nft_offload_reg {
u32 key;
u32 len;
u32 base_offset;
u32 offset;
+ u32 flags;
struct nft_data data;
struct nft_data mask;
};
@@ -37,6 +42,7 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
struct nft_flow_key {
struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_control control;
union {
struct flow_dissector_key_ipv4_addrs ipv4;
struct flow_dissector_key_ipv6_addrs ipv6;
@@ -44,6 +50,7 @@ struct nft_flow_key {
struct flow_dissector_key_ports tp;
struct flow_dissector_key_ip ip;
struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
struct flow_dissector_key_eth_addrs eth_addrs;
struct flow_dissector_key_meta meta;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
@@ -60,23 +67,32 @@ struct nft_flow_rule {
struct flow_rule *rule;
};
-#define NFT_OFFLOAD_F_ACTION (1 << 0)
+void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
+ enum flow_dissector_key_id addr_type);
struct nft_rule;
struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule);
+int nft_flow_rule_stats(const struct nft_chain *chain, const struct nft_rule *rule);
void nft_flow_rule_destroy(struct nft_flow_rule *flow);
int nft_flow_rule_offload_commit(struct net *net);
-#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
+#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
(__reg)->base_offset = \
offsetof(struct nft_flow_key, __base); \
(__reg)->offset = \
offsetof(struct nft_flow_key, __base.__field); \
(__reg)->len = __len; \
(__reg)->key = __key; \
+ (__reg)->flags = __flags;
+
+#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
+ NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
+
+#define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg) \
+ NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
memset(&(__reg)->mask, 0xff, (__reg)->len);
-int nft_chain_offload_priority(struct nft_base_chain *basechain);
+bool nft_chain_offload_support(const struct nft_base_chain *basechain);
int nft_offload_init(void);
void nft_offload_exit(void);
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index 82d0e41b76f2..faa108b1ba67 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -17,6 +17,13 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
return false;
}
+static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw)
+{
+ local_bh_disable();
+ inet_twsk_deschedule_put(tw);
+ local_bh_enable();
+}
+
/* assign a socket to the skb -- consumes sk */
static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
{
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 628b6fa579cd..167640b843ef 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -5,7 +5,7 @@
#include <net/netfilter/nf_tables.h>
struct nft_fib {
- enum nft_registers dreg:8;
+ u8 dreg;
u8 result;
u32 flags;
};
@@ -18,7 +18,7 @@ nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}
-int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
+int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset);
int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[]);
int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
@@ -37,4 +37,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
void nft_fib_store_result(void *reg, const struct nft_fib *priv,
const struct net_device *dev);
+
+bool nft_fib_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr);
#endif
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index 07e2fd507963..ba1238f12a48 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -6,9 +6,10 @@
struct nft_meta {
enum nft_meta_keys key:8;
+ u8 len;
union {
- enum nft_registers dreg:8;
- enum nft_registers sreg:8;
+ u8 dreg;
+ u8 sreg;
};
};
@@ -23,10 +24,10 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[]);
int nft_meta_get_dump(struct sk_buff *skb,
- const struct nft_expr *expr);
+ const struct nft_expr *expr, bool reset);
int nft_meta_set_dump(struct sk_buff *skb,
- const struct nft_expr *expr);
+ const struct nft_expr *expr, bool reset);
void nft_meta_get_eval(const struct nft_expr *expr,
struct nft_regs *regs,
@@ -43,4 +44,12 @@ int nft_meta_set_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data);
+bool nft_meta_get_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr);
+
+struct nft_inner_tun_ctx;
+void nft_meta_inner_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt,
+ struct nft_inner_tun_ctx *tun_ctx);
+
#endif
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 56b123a42220..6d9ba62efd75 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -22,7 +22,8 @@ int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[]);
-int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
+int nft_reject_dump(struct sk_buff *skb,
+ const struct nft_expr *expr, bool reset);
int nft_reject_icmp_code(u8 code);
int nft_reject_icmpv6_code(u8 code);
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 832ab69efda5..4c3809e141f4 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -6,7 +6,7 @@
struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
spinlock_t lock;
diff --git a/include/net/netkit.h b/include/net/netkit.h
new file mode 100644
index 000000000000..9ec0163739f4
--- /dev/null
+++ b/include/net/netkit.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Isovalent */
+#ifndef __NET_NETKIT_H
+#define __NET_NETKIT_H
+
+#include <linux/bpf.h>
+
+#ifdef CONFIG_NETKIT
+int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
+int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
+INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev));
+#else
+static inline int netkit_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int netkit_link_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int netkit_prog_detach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int netkit_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline struct net_device *netkit_peer_dev(struct net_device *dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_NETKIT */
+#endif /* __NET_NETKIT_H */
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 43ae50337685..f3ab0b8a4b18 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -145,15 +145,14 @@ struct netlbl_lsm_cache {
* processing.
*
*/
-#define NETLBL_CATMAP_MAPTYPE u64
#define NETLBL_CATMAP_MAPCNT 4
-#define NETLBL_CATMAP_MAPSIZE (sizeof(NETLBL_CATMAP_MAPTYPE) * 8)
+#define NETLBL_CATMAP_MAPSIZE (sizeof(u64) * 8)
#define NETLBL_CATMAP_SIZE (NETLBL_CATMAP_MAPSIZE * \
NETLBL_CATMAP_MAPCNT)
-#define NETLBL_CATMAP_BIT (NETLBL_CATMAP_MAPTYPE)0x01
+#define NETLBL_CATMAP_BIT ((u64)0x01)
struct netlbl_lsm_catmap {
u32 startbit;
- NETLBL_CATMAP_MAPTYPE bitmap[NETLBL_CATMAP_MAPCNT];
+ u64 bitmap[NETLBL_CATMAP_MAPCNT];
struct netlbl_lsm_catmap *next;
};
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 8e0eb2c9c528..c19ff921b661 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -128,6 +128,8 @@
* nla_len(nla) length of attribute payload
*
* Attribute Payload Access for Basic Types:
+ * nla_get_uint(nla) get payload for a uint attribute
+ * nla_get_sint(nla) get payload for a sint attribute
* nla_get_u8(nla) get payload for a u8 attribute
* nla_get_u16(nla) get payload for a u16 attribute
* nla_get_u32(nla) get payload for a u32 attribute
@@ -142,7 +144,7 @@
* Attribute Misc:
* nla_memcpy(dest, nla, count) copy attribute into memory
* nla_memcmp(nla, data, size) compare attribute with memory area
- * nla_strlcpy(dst, nla, size) copy attribute to a sized string
+ * nla_strscpy(dst, nla, size) copy attribute to a sized string
* nla_strcmp(nla, str) compare attribute with string
*
* Attribute Parsing:
@@ -181,8 +183,10 @@ enum {
NLA_S64,
NLA_BITFIELD32,
NLA_REJECT,
- NLA_EXACT_LEN,
- NLA_MIN_LEN,
+ NLA_BE16,
+ NLA_BE32,
+ NLA_SINT,
+ NLA_UINT,
__NLA_TYPE_MAX,
};
@@ -199,11 +203,12 @@ struct netlink_range_validation_signed {
enum nla_policy_validation {
NLA_VALIDATE_NONE,
NLA_VALIDATE_RANGE,
+ NLA_VALIDATE_RANGE_WARN_TOO_LONG,
NLA_VALIDATE_MIN,
NLA_VALIDATE_MAX,
+ NLA_VALIDATE_MASK,
NLA_VALIDATE_RANGE_PTR,
NLA_VALIDATE_FUNCTION,
- NLA_VALIDATE_WARN_TOO_LONG,
};
/**
@@ -222,26 +227,23 @@ enum nla_policy_validation {
* NLA_NUL_STRING Maximum length of string (excluding NUL)
* NLA_FLAG Unused
* NLA_BINARY Maximum length of attribute payload
- * NLA_MIN_LEN Minimum length of attribute payload
+ * (but see also below with the validation type)
* NLA_NESTED,
* NLA_NESTED_ARRAY Length verification is done by checking len of
* nested header (or empty); len field is used if
* nested_policy is also used, for the max attr
* number in the nested policy.
+ * NLA_SINT, NLA_UINT,
* NLA_U8, NLA_U16,
* NLA_U32, NLA_U64,
* NLA_S8, NLA_S16,
* NLA_S32, NLA_S64,
+ * NLA_BE16, NLA_BE32,
* NLA_MSECS Leaving the length field zero will verify the
* given type fits, using it verifies minimum length
* just like "All other"
* NLA_BITFIELD32 Unused
* NLA_REJECT Unused
- * NLA_EXACT_LEN Attribute should have exactly this length, otherwise
- * it is rejected or warned about, the latter happening
- * if and only if the `validation_type' is set to
- * NLA_VALIDATE_WARN_TOO_LONG.
- * NLA_MIN_LEN Minimum length of attribute payload
* All other Minimum length of attribute payload
*
* Meaning of validation union:
@@ -263,10 +265,14 @@ enum nla_policy_validation {
* while an array has the nested attributes at another
* level down and the attribute types directly in the
* nesting don't matter.
+ * NLA_UINT,
* NLA_U8,
* NLA_U16,
* NLA_U32,
* NLA_U64,
+ * NLA_BE16,
+ * NLA_BE32,
+ * NLA_SINT,
* NLA_S8,
* NLA_S16,
* NLA_S32,
@@ -277,9 +283,11 @@ enum nla_policy_validation {
* Note that in the interest of code simplicity and
* struct size both limits are s16, so you cannot
* enforce a range that doesn't fall within the range
- * of s16 - do that as usual in the code instead.
+ * of s16 - do that using the NLA_POLICY_FULL_RANGE()
+ * or NLA_POLICY_FULL_RANGE_SIGNED() macros instead.
* Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and
* NLA_POLICY_RANGE() macros.
+ * NLA_UINT,
* NLA_U8,
* NLA_U16,
* NLA_U32,
@@ -288,6 +296,7 @@ enum nla_policy_validation {
* to a struct netlink_range_validation that indicates
* the min/max values.
* Use NLA_POLICY_FULL_RANGE().
+ * NLA_SINT,
* NLA_S8,
* NLA_S16,
* NLA_S32,
@@ -296,6 +305,11 @@ enum nla_policy_validation {
* pointer to a struct netlink_range_validation_signed
* that indicates the min/max values.
* Use NLA_POLICY_FULL_RANGE_SIGNED().
+ *
+ * NLA_BINARY If the validation type is like the ones for integers
+ * above, then the min/max length (not value like for
+ * integers) of the attribute is enforced.
+ *
* All other Unused - but note that it's a union
*
* Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
@@ -309,7 +323,7 @@ enum nla_policy_validation {
* static const struct nla_policy my_policy[ATTR_MAX+1] = {
* [ATTR_FOO] = { .type = NLA_U16 },
* [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
- * [ATTR_BAZ] = { .type = NLA_EXACT_LEN, .len = sizeof(struct mystruct) },
+ * [ATTR_BAZ] = NLA_POLICY_EXACT_LEN(sizeof(struct mystruct)),
* [ATTR_GOO] = NLA_POLICY_BITFIELD32(myvalidflags),
* };
*/
@@ -318,26 +332,20 @@ struct nla_policy {
u8 validation_type;
u16 len;
union {
- const u32 bitfield32_valid;
- const char *reject_message;
- const struct nla_policy *nested_policy;
- struct netlink_range_validation *range;
- struct netlink_range_validation_signed *range_signed;
- struct {
- s16 min, max;
- };
- int (*validate)(const struct nlattr *attr,
- struct netlink_ext_ack *extack);
- /* This entry is special, and used for the attribute at index 0
+ /**
+ * @strict_start_type: first attribute to validate strictly
+ *
+ * This entry is special, and used for the attribute at index 0
* only, and specifies special data about the policy, namely it
* specifies the "boundary type" where strict length validation
* starts for any attribute types >= this value, also, strict
* nesting validation starts here.
*
* Additionally, it means that NLA_UNSPEC is actually NLA_REJECT
- * for any types >= this, so need to use NLA_MIN_LEN to get the
- * previous pure { .len = xyz } behaviour. The advantage of this
- * is that types not specified in the policy will be rejected.
+ * for any types >= this, so need to use NLA_POLICY_MIN_LEN() to
+ * get the previous pure { .len = xyz } behaviour. The advantage
+ * of this is that types not specified in the policy will be
+ * rejected.
*
* For completely new families it should be set to 1 so that the
* validation is enforced for all attributes. For existing ones
@@ -346,15 +354,22 @@ struct nla_policy {
* was added to enforce strict validation from thereon.
*/
u16 strict_start_type;
+
+ /* private: use NLA_POLICY_*() to set */
+ const u32 bitfield32_valid;
+ const u32 mask;
+ const char *reject_message;
+ const struct nla_policy *nested_policy;
+ const struct netlink_range_validation *range;
+ const struct netlink_range_validation_signed *range_signed;
+ struct {
+ s16 min, max;
+ };
+ int (*validate)(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
};
};
-#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len }
-#define NLA_POLICY_EXACT_LEN_WARN(_len) \
- { .type = NLA_EXACT_LEN, .len = _len, \
- .validation_type = NLA_VALIDATE_WARN_TOO_LONG, }
-#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_MIN_LEN, .len = _len }
-
#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
@@ -369,20 +384,28 @@ struct nla_policy {
#define NLA_POLICY_BITFIELD32(valid) \
{ .type = NLA_BITFIELD32, .bitfield32_valid = valid }
+#define __NLA_IS_UINT_TYPE(tp) \
+ (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || \
+ tp == NLA_U64 || tp == NLA_UINT || \
+ tp == NLA_BE16 || tp == NLA_BE32)
+#define __NLA_IS_SINT_TYPE(tp) \
+ (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64 || \
+ tp == NLA_SINT)
+
#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
#define NLA_ENSURE_UINT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_U8 || tp == NLA_U16 || \
- tp == NLA_U32 || tp == NLA_U64 || \
- tp == NLA_MSECS) + tp)
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_UINT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
#define NLA_ENSURE_SINT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_S16 || \
- tp == NLA_S32 || tp == NLA_S64) + tp)
-#define NLA_ENSURE_INT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \
- tp == NLA_S16 || tp == NLA_U16 || \
- tp == NLA_S32 || tp == NLA_U32 || \
- tp == NLA_S64 || tp == NLA_U64 || \
- tp == NLA_MSECS) + tp)
+ (__NLA_ENSURE(__NLA_IS_SINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ __NLA_IS_SINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
(__NLA_ENSURE(tp != NLA_BITFIELD32 && \
tp != NLA_REJECT && \
@@ -390,14 +413,14 @@ struct nla_policy {
tp != NLA_NESTED_ARRAY) + tp)
#define NLA_POLICY_RANGE(tp, _min, _max) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE, \
.min = _min, \
.max = _max \
}
#define NLA_POLICY_FULL_RANGE(tp, _range) { \
- .type = NLA_ENSURE_UINT_TYPE(tp), \
+ .type = NLA_ENSURE_UINT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE_PTR, \
.range = _range, \
}
@@ -409,17 +432,23 @@ struct nla_policy {
}
#define NLA_POLICY_MIN(tp, _min) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MIN, \
.min = _min, \
}
#define NLA_POLICY_MAX(tp, _max) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MAX, \
.max = _max, \
}
+#define NLA_POLICY_MASK(tp, _mask) { \
+ .type = NLA_ENSURE_UINT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MASK, \
+ .mask = _mask, \
+}
+
#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \
.type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \
.validation_type = NLA_VALIDATE_FUNCTION, \
@@ -427,6 +456,15 @@ struct nla_policy {
.len = __VA_ARGS__ + 0, \
}
+#define NLA_POLICY_EXACT_LEN(_len) NLA_POLICY_RANGE(NLA_BINARY, _len, _len)
+#define NLA_POLICY_EXACT_LEN_WARN(_len) { \
+ .type = NLA_BINARY, \
+ .validation_type = NLA_VALIDATE_RANGE_WARN_TOO_LONG, \
+ .min = _len, \
+ .max = _len \
+}
+#define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len)
+
/**
* struct nl_info - netlink source information
* @nlh: Netlink message header of original request
@@ -491,7 +529,7 @@ int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
struct netlink_ext_ack *extack);
int nla_policy_len(const struct nla_policy *, int);
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
-size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
+ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize);
char *nla_strdup(const struct nlattr *nla, gfp_t flags);
int nla_memcpy(void *dest, const struct nlattr *src, int count);
int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
@@ -726,6 +764,7 @@ static inline int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse()
@@ -745,6 +784,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse_deprecated()
@@ -764,6 +804,7 @@ static inline int nlmsg_parse_deprecated(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse_deprecated_strict()
@@ -799,7 +840,6 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
- * @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
@@ -870,7 +910,18 @@ static inline int nlmsg_validate_deprecated(const struct nlmsghdr *nlh,
*/
static inline int nlmsg_report(const struct nlmsghdr *nlh)
{
- return !!(nlh->nlmsg_flags & NLM_F_ECHO);
+ return nlh ? !!(nlh->nlmsg_flags & NLM_F_ECHO) : 0;
+}
+
+/**
+ * nlmsg_seq - return the seq number of netlink message
+ * @nlh: netlink message header
+ *
+ * Returns 0 if netlink message is NULL
+ */
+static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
+{
+ return nlh ? nlh->nlmsg_seq : 0;
}
/**
@@ -906,6 +957,27 @@ static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 se
}
/**
+ * nlmsg_append - Add more data to a nlmsg in a skb
+ * @skb: socket buffer to store message in
+ * @size: length of message payload
+ *
+ * Append data to an existing nlmsg, used when constructing a message
+ * with multiple fixed-format headers (which is rare).
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the extra payload.
+ */
+static inline void *nlmsg_append(struct sk_buff *skb, u32 size)
+{
+ if (unlikely(skb_tailroom(skb) < NLMSG_ALIGN(size)))
+ return NULL;
+
+ if (NLMSG_ALIGN(size) - size)
+ memset(skb_tail_pointer(skb) + size, 0,
+ NLMSG_ALIGN(size) - size);
+ return __skb_put(skb, NLMSG_ALIGN(size));
+}
+
+/**
* nlmsg_put_answer - Add a new callback based netlink message to an skb
* @skb: socket buffer to store message in
* @cb: netlink callback
@@ -939,6 +1011,20 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
}
/**
+ * nlmsg_new_large - Allocate a new netlink message with non-contiguous
+ * physical memory
+ * @payload: size of the message payload
+ *
+ * The allocated skb is unable to have frag page for shinfo->frags*,
+ * as the NULL setting for skb->head in netlink_skb_destructor() will
+ * bypass most of the handling in skb_release_data()
+ */
+static inline struct sk_buff *nlmsg_new_large(size_t payload)
+{
+ return netlink_alloc_large_skb(nlmsg_total_size(payload), 0);
+}
+
+/**
* nlmsg_end - Finalize a netlink message
* @skb: socket buffer the message is stored in
* @nlh: netlink message header
@@ -1001,21 +1087,29 @@ static inline void nlmsg_free(struct sk_buff *skb)
}
/**
- * nlmsg_multicast - multicast a netlink message
+ * nlmsg_multicast_filtered - multicast a netlink message with filter function
* @sk: netlink socket to spread messages to
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: multicast group id
* @flags: allocation flags
+ * @filter: filter function
+ * @filter_data: filter function private data
+ *
+ * Return: 0 on success, negative error code for failure.
*/
-static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
- u32 portid, unsigned int group, gfp_t flags)
+static inline int nlmsg_multicast_filtered(struct sock *sk, struct sk_buff *skb,
+ u32 portid, unsigned int group,
+ gfp_t flags,
+ netlink_filter_fn filter,
+ void *filter_data)
{
int err;
NETLINK_CB(skb).dst_group = group;
- err = netlink_broadcast(sk, skb, portid, group, flags);
+ err = netlink_broadcast_filtered(sk, skb, portid, group, flags,
+ filter, filter_data);
if (err > 0)
err = 0;
@@ -1023,6 +1117,21 @@ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
}
/**
+ * nlmsg_multicast - multicast a netlink message
+ * @sk: netlink socket to spread messages to
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: multicast group id
+ * @flags: allocation flags
+ */
+static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
+ u32 portid, unsigned int group, gfp_t flags)
+{
+ return nlmsg_multicast_filtered(sk, skb, portid, group, flags,
+ NULL, NULL);
+}
+
+/**
* nlmsg_unicast - unicast a netlink message
* @sk: netlink socket to spread message to
* @skb: netlink message as socket buffer
@@ -1128,7 +1237,7 @@ static inline void *nla_data(const struct nlattr *nla)
* nla_len - length of payload
* @nla: netlink attribute
*/
-static inline int nla_len(const struct nlattr *nla)
+static inline u16 nla_len(const struct nlattr *nla)
{
return nla->nla_len - NLA_HDRLEN;
}
@@ -1297,6 +1406,22 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
}
/**
+ * nla_put_uint - Add a variable-size unsigned int to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_uint(struct sk_buff *skb, int attrtype, u64 value)
+{
+ u64 tmp64 = value;
+ u32 tmp32 = value;
+
+ if (tmp64 == tmp32)
+ return nla_put_u32(skb, attrtype, tmp32);
+ return nla_put(skb, attrtype, sizeof(u64), &tmp64);
+}
+
+/**
* nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
@@ -1451,6 +1576,22 @@ static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
}
/**
+ * nla_put_sint - Add a variable-size signed int to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_sint(struct sk_buff *skb, int attrtype, s64 value)
+{
+ s64 tmp64 = value;
+ s32 tmp32 = value;
+
+ if (tmp64 == tmp32)
+ return nla_put_s32(skb, attrtype, tmp32);
+ return nla_put(skb, attrtype, sizeof(s64), &tmp64);
+}
+
+/**
* nla_put_string - Add a string netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
@@ -1607,6 +1748,17 @@ static inline u64 nla_get_u64(const struct nlattr *nla)
}
/**
+ * nla_get_uint - return payload of uint attribute
+ * @nla: uint netlink attribute
+ */
+static inline u64 nla_get_uint(const struct nlattr *nla)
+{
+ if (nla_len(nla) == sizeof(u32))
+ return nla_get_u32(nla);
+ return nla_get_u64(nla);
+}
+
+/**
* nla_get_be64 - return payload of __be64 attribute
* @nla: __be64 netlink attribute
*/
@@ -1669,6 +1821,17 @@ static inline s64 nla_get_s64(const struct nlattr *nla)
}
/**
+ * nla_get_sint - return payload of uint attribute
+ * @nla: uint netlink attribute
+ */
+static inline s64 nla_get_sint(const struct nlattr *nla)
+{
+ if (nla_len(nla) == sizeof(s32))
+ return nla_get_s32(nla);
+ return nla_get_s64(nla);
+}
+
+/**
* nla_get_flag - return payload of flag attribute
* @nla: flag netlink attribute
*/
@@ -1931,10 +2094,21 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
void nla_get_range_signed(const struct nla_policy *pt,
struct netlink_range_validation_signed *range);
-int netlink_policy_dump_start(const struct nla_policy *policy,
- unsigned int maxtype,
- unsigned long *state);
-bool netlink_policy_dump_loop(unsigned long *state);
-int netlink_policy_dump_write(struct sk_buff *skb, unsigned long state);
+struct netlink_policy_dump_state;
+
+int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+int netlink_policy_dump_get_policy_idx(struct netlink_policy_dump_state *state,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+bool netlink_policy_dump_loop(struct netlink_policy_dump_state *state);
+int netlink_policy_dump_write(struct sk_buff *skb,
+ struct netlink_policy_dump_state *state);
+int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt);
+int netlink_policy_dump_write_attr(struct sk_buff *skb,
+ const struct nla_policy *pt,
+ int nestattr);
+void netlink_policy_dump_free(struct netlink_policy_dump_state *state);
#endif
diff --git a/include/net/netmem.h b/include/net/netmem.h
new file mode 100644
index 000000000000..d8b810245c1d
--- /dev/null
+++ b/include/net/netmem.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Network memory
+ *
+ * Author: Mina Almasry <almasrymina@google.com>
+ */
+
+#ifndef _NET_NETMEM_H
+#define _NET_NETMEM_H
+
+/**
+ * typedef netmem_ref - a nonexistent type marking a reference to generic
+ * network memory.
+ *
+ * A netmem_ref currently is always a reference to a struct page. This
+ * abstraction is introduced so support for new memory types can be added.
+ *
+ * Use the supplied helpers to obtain the underlying memory pointer and fields.
+ */
+typedef unsigned long __bitwise netmem_ref;
+
+/* This conversion fails (returns NULL) if the netmem_ref is not struct page
+ * backed.
+ *
+ * Currently struct page is the only possible netmem, and this helper never
+ * fails.
+ */
+static inline struct page *netmem_to_page(netmem_ref netmem)
+{
+ return (__force struct page *)netmem;
+}
+
+/* Converting from page to netmem is always safe, because a page can always be
+ * a netmem.
+ */
+static inline netmem_ref page_to_netmem(struct page *page)
+{
+ return (__force netmem_ref)page;
+}
+
+#endif /* _NET_NETMEM_H */
diff --git a/include/net/netns/bpf.h b/include/net/netns/bpf.h
index 0ca6a1b87185..2c01a278d1eb 100644
--- a/include/net/netns/bpf.h
+++ b/include/net/netns/bpf.h
@@ -6,11 +6,18 @@
#ifndef __NETNS_BPF_H__
#define __NETNS_BPF_H__
-#include <linux/bpf-netns.h>
+#include <linux/list.h>
struct bpf_prog;
struct bpf_prog_array;
+enum netns_bpf_attach_type {
+ NETNS_BPF_INVALID = -1,
+ NETNS_BPF_FLOW_DISSECTOR = 0,
+ NETNS_BPF_SK_LOOKUP,
+ MAX_NETNS_BPF_ATTACH_TYPE
+};
+
struct netns_bpf {
/* Array of programs to run compiled from progs or links */
struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE];
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
index b6ab7d1530d7..48b79f7e6236 100644
--- a/include/net/netns/can.h
+++ b/include/net/netns/can.h
@@ -7,6 +7,7 @@
#define __NETNS_CAN_H__
#include <linux/spinlock.h>
+#include <linux/timer.h>
struct can_dev_rcv_lists;
struct can_pkg_stats;
@@ -15,7 +16,6 @@ struct can_rcv_lists_stats;
struct netns_can {
#if IS_ENABLED(CONFIG_PROC_FS)
struct proc_dir_entry *proc_dir;
- struct proc_dir_entry *pde_version;
struct proc_dir_entry *pde_stats;
struct proc_dir_entry *pde_reset_stats;
struct proc_dir_entry *pde_rcvlist_all;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 806454e767bf..bae914815aa3 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -24,9 +24,13 @@ struct nf_generic_net {
struct nf_tcp_net {
unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
- int tcp_loose;
- int tcp_be_liberal;
- int tcp_max_retrans;
+ u8 tcp_loose;
+ u8 tcp_be_liberal;
+ u8 tcp_max_retrans;
+ u8 tcp_ignore_invalid_rst;
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+#endif
};
enum udp_conntrack {
@@ -37,6 +41,9 @@ enum udp_conntrack {
struct nf_udp_net {
unsigned int timeouts[UDP_CT_MAX];
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+#endif
};
struct nf_icmp_net {
@@ -45,7 +52,7 @@ struct nf_icmp_net {
#ifdef CONFIG_NF_CT_PROTO_DCCP
struct nf_dccp_net {
- int dccp_loose;
+ u8 dccp_loose;
unsigned int dccp_timeout[CT_DCCP_MAX + 1];
};
#endif
@@ -86,37 +93,21 @@ struct nf_ip_net {
#endif
};
-struct ct_pcpu {
- spinlock_t lock;
- struct hlist_nulls_head unconfirmed;
- struct hlist_nulls_head dying;
-};
-
struct netns_ct {
- atomic_t count;
- unsigned int expect_count;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct delayed_work ecache_dwork;
bool ecache_dwork_pending;
#endif
- bool auto_assign_helper_warned;
-#ifdef CONFIG_SYSCTL
- struct ctl_table_header *sysctl_header;
-#endif
- unsigned int sysctl_log_invalid; /* Log invalid packets */
- int sysctl_events;
- int sysctl_acct;
- int sysctl_auto_assign_helper;
- int sysctl_tstamp;
- int sysctl_checksum;
+ u8 sysctl_log_invalid; /* Log invalid packets */
+ u8 sysctl_events;
+ u8 sysctl_acct;
+ u8 sysctl_tstamp;
+ u8 sysctl_checksum;
- struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat;
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
- struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
- unsigned int labels_used;
+ atomic_t labels_used;
#endif
};
#endif
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 36c2d998a43c..78214f1b43a2 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -2,19 +2,27 @@
#ifndef __NETNS_CORE_H__
#define __NETNS_CORE_H__
+#include <linux/types.h>
+
struct ctl_table_header;
struct prot_inuse;
+struct cpumask;
struct netns_core {
/* core sysctls */
struct ctl_table_header *sysctl_hdr;
int sysctl_somaxconn;
+ int sysctl_optmem_max;
+ u8 sysctl_txrehash;
#ifdef CONFIG_PROC_FS
- int __percpu *sock_inuse;
struct prot_inuse __percpu *prot_inuse;
#endif
+
+#if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL)
+ struct cpumask *rps_default_mask;
+#endif
};
#endif
diff --git a/include/net/netns/dccp.h b/include/net/netns/dccp.h
deleted file mode 100644
index cdbc4f5b8390..000000000000
--- a/include/net/netns/dccp.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NETNS_DCCP_H__
-#define __NETNS_DCCP_H__
-
-struct sock;
-
-struct netns_dccp {
- struct sock *v4_ctl_sk;
- struct sock *v6_ctl_sk;
-};
-
-#endif
diff --git a/include/net/netns/flow_table.h b/include/net/netns/flow_table.h
new file mode 100644
index 000000000000..1c5fc657e267
--- /dev/null
+++ b/include/net/netns/flow_table.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_FLOW_TABLE_H
+#define __NETNS_FLOW_TABLE_H
+
+struct nf_flow_table_stat {
+ unsigned int count_wq_add;
+ unsigned int count_wq_del;
+ unsigned int count_wq_stats;
+};
+
+struct netns_ft {
+ struct nf_flow_table_stat __percpu *stat;
+};
+#endif
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 8a1ab47c3fb3..00c399edeed1 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -8,6 +8,7 @@
#include <linux/bug.h>
#include <linux/rcupdate.h>
+#include <net/net_namespace.h>
/*
* Generic net pointers are to be used by modules to put some private
@@ -32,7 +33,7 @@ struct net_generic {
struct rcu_head rcu;
} s;
- void *ptr[0];
+ DECLARE_FLEX_ARRAY(void *, ptr);
};
};
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 9e36738c1fe1..c356c458b340 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -9,9 +9,9 @@
#include <linux/uidgid.h>
#include <net/inet_frag.h>
#include <linux/rcupdate.h>
+#include <linux/seqlock.h>
#include <linux/siphash.h>
-struct tcpm_hash_bucket;
struct ctl_table_header;
struct ipv4_devconf;
struct fib_rules_ops;
@@ -19,8 +19,7 @@ struct hlist_head;
struct fib_table;
struct sock;
struct local_ports {
- seqlock_t lock;
- int range[2];
+ u32 range; /* high << 16 | low */
bool warned;
};
@@ -32,8 +31,9 @@ struct ping_group_range {
struct inet_hashinfo;
struct inet_timewait_death_row {
- atomic_t tw_count;
+ refcount_t tw_refcount;
+ /* Padding to avoid false sharing, tw_refcount can be often written */
struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
int sysctl_max_tw_buckets;
};
@@ -41,6 +41,41 @@ struct inet_timewait_death_row {
struct tcp_fastopen_context;
struct netns_ipv4 {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst.
+ * Please update the document when adding new fields.
+ */
+
+ /* TX readonly hotpath cache lines */
+ __cacheline_group_begin(netns_ipv4_read_tx);
+ u8 sysctl_tcp_early_retrans;
+ u8 sysctl_tcp_tso_win_divisor;
+ u8 sysctl_tcp_tso_rtt_log;
+ u8 sysctl_tcp_autocorking;
+ int sysctl_tcp_min_snd_mss;
+ unsigned int sysctl_tcp_notsent_lowat;
+ int sysctl_tcp_limit_output_bytes;
+ int sysctl_tcp_min_rtt_wlen;
+ int sysctl_tcp_wmem[3];
+ u8 sysctl_ip_fwd_use_pmtu;
+ __cacheline_group_end(netns_ipv4_read_tx);
+
+ /* TXRX readonly hotpath cache lines */
+ __cacheline_group_begin(netns_ipv4_read_txrx);
+ u8 sysctl_tcp_moderate_rcvbuf;
+ __cacheline_group_end(netns_ipv4_read_txrx);
+
+ /* RX readonly hotpath cache line */
+ __cacheline_group_begin(netns_ipv4_read_rx);
+ u8 sysctl_ip_early_demux;
+ u8 sysctl_tcp_early_demux;
+ int sysctl_tcp_reordering;
+ int sysctl_tcp_rmem[3];
+ __cacheline_group_end(netns_ipv4_read_rx);
+
+ struct inet_timewait_death_row tcp_death_row;
+ struct udp_table *udp_table;
+
#ifdef CONFIG_SYSCTL
struct ctl_table_header *forw_hdr;
struct ctl_table_header *frags_hdr;
@@ -54,152 +89,148 @@ struct netns_ipv4 {
struct mutex ra_mutex;
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
- bool fib_has_custom_rules;
- unsigned int fib_rules_require_fldissect;
struct fib_table __rcu *fib_main;
struct fib_table __rcu *fib_default;
+ unsigned int fib_rules_require_fldissect;
+ bool fib_has_custom_rules;
#endif
bool fib_has_custom_local_routes;
+ bool fib_offload_disabled;
+ u8 sysctl_tcp_shrink_window;
#ifdef CONFIG_IP_ROUTE_CLASSID
- int fib_num_tclassid_users;
+ atomic_t fib_num_tclassid_users;
#endif
struct hlist_head *fib_table_hash;
- bool fib_offload_disabled;
struct sock *fibnl;
- struct sock * __percpu *icmp_sk;
struct sock *mc_autojoin_sk;
struct inet_peer_base *peers;
- struct sock * __percpu *tcp_sk;
struct fqdir *fqdir;
-#ifdef CONFIG_NETFILTER
- struct xt_table *iptable_filter;
- struct xt_table *iptable_mangle;
- struct xt_table *iptable_raw;
- struct xt_table *arptable_filter;
-#ifdef CONFIG_SECURITY
- struct xt_table *iptable_security;
-#endif
- struct xt_table *nat_table;
-#endif
- int sysctl_icmp_echo_ignore_all;
- int sysctl_icmp_echo_ignore_broadcasts;
- int sysctl_icmp_ignore_bogus_error_responses;
+ u8 sysctl_icmp_echo_ignore_all;
+ u8 sysctl_icmp_echo_enable_probe;
+ u8 sysctl_icmp_echo_ignore_broadcasts;
+ u8 sysctl_icmp_ignore_bogus_error_responses;
+ u8 sysctl_icmp_errors_use_inbound_ifaddr;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
- int sysctl_icmp_errors_use_inbound_ifaddr;
+
+ u32 ip_rt_min_pmtu;
+ int ip_rt_mtu_expires;
+ int ip_rt_min_advmss;
struct local_ports ip_local_ports;
- int sysctl_tcp_ecn;
- int sysctl_tcp_ecn_fallback;
+ u8 sysctl_tcp_ecn;
+ u8 sysctl_tcp_ecn_fallback;
- int sysctl_ip_default_ttl;
- int sysctl_ip_no_pmtu_disc;
- int sysctl_ip_fwd_use_pmtu;
- int sysctl_ip_fwd_update_priority;
- int sysctl_ip_nonlocal_bind;
- int sysctl_ip_autobind_reuse;
+ u8 sysctl_ip_default_ttl;
+ u8 sysctl_ip_no_pmtu_disc;
+ u8 sysctl_ip_fwd_update_priority;
+ u8 sysctl_ip_nonlocal_bind;
+ u8 sysctl_ip_autobind_reuse;
/* Shall we try to damage output packets if routing dev changes? */
- int sysctl_ip_dynaddr;
- int sysctl_ip_early_demux;
+ u8 sysctl_ip_dynaddr;
#ifdef CONFIG_NET_L3_MASTER_DEV
- int sysctl_raw_l3mdev_accept;
+ u8 sysctl_raw_l3mdev_accept;
#endif
- int sysctl_tcp_early_demux;
- int sysctl_udp_early_demux;
+ u8 sysctl_udp_early_demux;
- int sysctl_nexthop_compat_mode;
+ u8 sysctl_nexthop_compat_mode;
- int sysctl_fwmark_reflect;
- int sysctl_tcp_fwmark_accept;
+ u8 sysctl_fwmark_reflect;
+ u8 sysctl_tcp_fwmark_accept;
#ifdef CONFIG_NET_L3_MASTER_DEV
- int sysctl_tcp_l3mdev_accept;
+ u8 sysctl_tcp_l3mdev_accept;
#endif
- int sysctl_tcp_mtu_probing;
+ u8 sysctl_tcp_mtu_probing;
int sysctl_tcp_mtu_probe_floor;
int sysctl_tcp_base_mss;
- int sysctl_tcp_min_snd_mss;
int sysctl_tcp_probe_threshold;
u32 sysctl_tcp_probe_interval;
int sysctl_tcp_keepalive_time;
- int sysctl_tcp_keepalive_probes;
int sysctl_tcp_keepalive_intvl;
+ u8 sysctl_tcp_keepalive_probes;
- int sysctl_tcp_syn_retries;
- int sysctl_tcp_synack_retries;
- int sysctl_tcp_syncookies;
- int sysctl_tcp_reordering;
- int sysctl_tcp_retries1;
- int sysctl_tcp_retries2;
- int sysctl_tcp_orphan_retries;
+ u8 sysctl_tcp_syn_retries;
+ u8 sysctl_tcp_synack_retries;
+ u8 sysctl_tcp_syncookies;
+ u8 sysctl_tcp_migrate_req;
+ u8 sysctl_tcp_comp_sack_nr;
+ u8 sysctl_tcp_backlog_ack_defer;
+ u8 sysctl_tcp_pingpong_thresh;
+
+ u8 sysctl_tcp_retries1;
+ u8 sysctl_tcp_retries2;
+ u8 sysctl_tcp_orphan_retries;
+ u8 sysctl_tcp_tw_reuse;
int sysctl_tcp_fin_timeout;
- unsigned int sysctl_tcp_notsent_lowat;
- int sysctl_tcp_tw_reuse;
- int sysctl_tcp_sack;
- int sysctl_tcp_window_scaling;
- int sysctl_tcp_timestamps;
- int sysctl_tcp_early_retrans;
- int sysctl_tcp_recovery;
- int sysctl_tcp_thin_linear_timeouts;
- int sysctl_tcp_slow_start_after_idle;
- int sysctl_tcp_retrans_collapse;
- int sysctl_tcp_stdurg;
- int sysctl_tcp_rfc1337;
- int sysctl_tcp_abort_on_overflow;
- int sysctl_tcp_fack;
+ u8 sysctl_tcp_sack;
+ u8 sysctl_tcp_window_scaling;
+ u8 sysctl_tcp_timestamps;
+ u8 sysctl_tcp_recovery;
+ u8 sysctl_tcp_thin_linear_timeouts;
+ u8 sysctl_tcp_slow_start_after_idle;
+ u8 sysctl_tcp_retrans_collapse;
+ u8 sysctl_tcp_stdurg;
+ u8 sysctl_tcp_rfc1337;
+ u8 sysctl_tcp_abort_on_overflow;
+ u8 sysctl_tcp_fack; /* obsolete */
int sysctl_tcp_max_reordering;
- int sysctl_tcp_dsack;
- int sysctl_tcp_app_win;
- int sysctl_tcp_adv_win_scale;
- int sysctl_tcp_frto;
- int sysctl_tcp_nometrics_save;
- int sysctl_tcp_no_ssthresh_metrics_save;
- int sysctl_tcp_moderate_rcvbuf;
- int sysctl_tcp_tso_win_divisor;
- int sysctl_tcp_workaround_signed_windows;
- int sysctl_tcp_limit_output_bytes;
+ int sysctl_tcp_adv_win_scale; /* obsolete */
+ u8 sysctl_tcp_dsack;
+ u8 sysctl_tcp_app_win;
+ u8 sysctl_tcp_frto;
+ u8 sysctl_tcp_nometrics_save;
+ u8 sysctl_tcp_no_ssthresh_metrics_save;
+ u8 sysctl_tcp_workaround_signed_windows;
int sysctl_tcp_challenge_ack_limit;
- int sysctl_tcp_min_tso_segs;
- int sysctl_tcp_min_rtt_wlen;
- int sysctl_tcp_autocorking;
+ u8 sysctl_tcp_min_tso_segs;
+ u8 sysctl_tcp_reflect_tos;
int sysctl_tcp_invalid_ratelimit;
int sysctl_tcp_pacing_ss_ratio;
int sysctl_tcp_pacing_ca_ratio;
- int sysctl_tcp_wmem[3];
- int sysctl_tcp_rmem[3];
- int sysctl_tcp_comp_sack_nr;
+ unsigned int sysctl_tcp_child_ehash_entries;
unsigned long sysctl_tcp_comp_sack_delay_ns;
unsigned long sysctl_tcp_comp_sack_slack_ns;
- struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
int sysctl_tcp_fastopen;
const struct tcp_congestion_ops __rcu *tcp_congestion_control;
struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
- spinlock_t tcp_fastopen_ctx_lock;
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
atomic_t tfo_active_disable_times;
unsigned long tfo_active_disable_stamp;
+ u32 tcp_challenge_timestamp;
+ u32 tcp_challenge_count;
+ u8 sysctl_tcp_plb_enabled;
+ u8 sysctl_tcp_plb_idle_rehash_rounds;
+ u8 sysctl_tcp_plb_rehash_rounds;
+ u8 sysctl_tcp_plb_suspend_rto_sec;
+ int sysctl_tcp_plb_cong_thresh;
int sysctl_udp_wmem_min;
int sysctl_udp_rmem_min;
+ u8 sysctl_fib_notify_on_flag_change;
+ u8 sysctl_tcp_syn_linear_timeouts;
+
#ifdef CONFIG_NET_L3_MASTER_DEV
- int sysctl_udp_l3mdev_accept;
+ u8 sysctl_udp_l3mdev_accept;
#endif
+ u8 sysctl_igmp_llm_reports;
int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf;
- int sysctl_igmp_llm_reports;
int sysctl_igmp_qrv;
struct ping_group_range ping_group_range;
atomic_t dev_addr_genid;
+ unsigned int sysctl_udp_child_hash_entries;
+
#ifdef CONFIG_SYSCTL
unsigned long *sysctl_local_reserved_ports;
int sysctl_ip_prot_sock;
@@ -214,8 +245,9 @@ struct netns_ipv4 {
#endif
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- int sysctl_fib_multipath_use_neigh;
- int sysctl_fib_multipath_hash_policy;
+ u32 sysctl_fib_multipath_hash_fields;
+ u8 sysctl_fib_multipath_use_neigh;
+ u8 sysctl_fib_multipath_hash_policy;
#endif
struct fib_notifier_ops *notifier_ops;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 5ec054473d81..5f2cfd84570a 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -20,7 +20,6 @@ struct netns_sysctl_ipv6 {
struct ctl_table_header *frags_hdr;
struct ctl_table_header *xfrm6_hdr;
#endif
- int bindv6only;
int flush_delay;
int ip6_rt_max_size;
int ip6_rt_gc_min_interval;
@@ -29,45 +28,45 @@ struct netns_sysctl_ipv6 {
int ip6_rt_gc_elasticity;
int ip6_rt_mtu_expires;
int ip6_rt_min_advmss;
- int multipath_hash_policy;
- int flowlabel_consistency;
- int auto_flowlabels;
+ u32 multipath_hash_fields;
+ u8 multipath_hash_policy;
+ u8 bindv6only;
+ u8 flowlabel_consistency;
+ u8 auto_flowlabels;
int icmpv6_time;
- int icmpv6_echo_ignore_all;
- int icmpv6_echo_ignore_multicast;
- int icmpv6_echo_ignore_anycast;
+ u8 icmpv6_echo_ignore_all;
+ u8 icmpv6_echo_ignore_multicast;
+ u8 icmpv6_echo_ignore_anycast;
DECLARE_BITMAP(icmpv6_ratemask, ICMPV6_MSG_MAX + 1);
unsigned long *icmpv6_ratemask_ptr;
- int anycast_src_echo_reply;
- int ip_nonlocal_bind;
- int fwmark_reflect;
+ u8 anycast_src_echo_reply;
+ u8 ip_nonlocal_bind;
+ u8 fwmark_reflect;
+ u8 flowlabel_state_ranges;
int idgen_retries;
int idgen_delay;
- int flowlabel_state_ranges;
int flowlabel_reflect;
int max_dst_opts_cnt;
int max_hbh_opts_cnt;
int max_dst_opts_len;
int max_hbh_opts_len;
int seg6_flowlabel;
- bool skip_notify_on_dev_down;
+ u32 ioam6_id;
+ u64 ioam6_id_wide;
+ u8 skip_notify_on_dev_down;
+ u8 fib_notify_on_flag_change;
+ u8 icmpv6_error_anycast_as_unicast;
};
struct netns_ipv6 {
+ /* Keep ip6_dst_ops at the beginning of netns_sysctl_ipv6 */
+ struct dst_ops ip6_dst_ops;
+
struct netns_sysctl_ipv6 sysctl;
struct ipv6_devconf *devconf_all;
struct ipv6_devconf *devconf_dflt;
struct inet_peer_base *peers;
struct fqdir *fqdir;
-#ifdef CONFIG_NETFILTER
- struct xt_table *ip6table_filter;
- struct xt_table *ip6table_mangle;
- struct xt_table *ip6table_raw;
-#ifdef CONFIG_SECURITY
- struct xt_table *ip6table_security;
-#endif
- struct xt_table *ip6table_nat;
-#endif
struct fib6_info *fib6_null_entry;
struct rt6_info *ip6_null_entry;
struct rt6_statistics *rt6_stats;
@@ -75,14 +74,14 @@ struct netns_ipv6 {
struct hlist_head *fib_table_hash;
struct fib6_table *fib6_main_tbl;
struct list_head fib6_walkers;
- struct dst_ops ip6_dst_ops;
rwlock_t fib6_walker_lock;
spinlock_t fib6_gc_lock;
- unsigned int ip6_rt_gc_expire;
- unsigned long ip6_rt_last_gc;
+ atomic_t ip6_rt_gc_expire;
+ unsigned long ip6_rt_last_gc;
+ unsigned char flowlabel_has_excl;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- unsigned int fib6_rules_require_fldissect;
bool fib6_has_custom_rules;
+ unsigned int fib6_rules_require_fldissect;
#ifdef CONFIG_IPV6_SUBTREES
unsigned int fib6_routes_require_src;
#endif
@@ -91,11 +90,15 @@ struct netns_ipv6 {
struct fib6_table *fib6_local_tbl;
struct fib_rules_ops *fib6_rules_ops;
#endif
- struct sock * __percpu *icmp_sk;
struct sock *ndisc_sk;
struct sock *tcp_sk;
struct sock *igmp_sk;
struct sock *mc_autojoin_sk;
+
+ struct hlist_head *inet6_addr_lst;
+ spinlock_t addrconf_hash_lock;
+ struct delayed_work addr_chk_work;
+
#ifdef CONFIG_IPV6_MROUTE
#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
struct mr_table *mrt6;
@@ -115,6 +118,7 @@ struct netns_ipv6 {
spinlock_t lock;
u32 seq;
} ip6addrlbl_table;
+ struct ioam6_pernet_data *ioam6_data;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netns/mctp.h b/include/net/netns/mctp.h
new file mode 100644
index 000000000000..1db8f9aaddb4
--- /dev/null
+++ b/include/net/netns/mctp.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MCTP per-net structures
+ */
+
+#ifndef __NETNS_MCTP_H__
+#define __NETNS_MCTP_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct netns_mctp {
+ /* Only updated under RTNL, entries freed via RCU */
+ struct list_head routes;
+
+ /* Bound sockets: list of sockets bound by type.
+ * This list is updated from non-atomic contexts (under bind_lock),
+ * and read (under rcu) in packet rx
+ */
+ struct mutex bind_lock;
+ struct hlist_head binds;
+
+ /* tag allocations. This list is read and updated from atomic contexts,
+ * but elements are free()ed after a RCU grace-period
+ */
+ spinlock_t keys_lock;
+ struct hlist_head keys;
+
+ /* MCTP network */
+ unsigned int default_net;
+
+ /* neighbour table */
+ struct mutex neigh_lock;
+ struct list_head neighbours;
+};
+
+#endif /* __NETNS_MCTP_H__ */
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index 59b2c3a3db42..7e373664b1e7 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -5,22 +5,19 @@
#include <net/snmp.h>
struct netns_mib {
- DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
+#endif
+
+ DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
DEFINE_SNMP_STAT(struct linux_mib, net_statistics);
- DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
- DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
- DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
- DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
+ DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
#if IS_ENABLED(CONFIG_IPV6)
- struct proc_dir_entry *proc_net_devsnmp6;
DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
- DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
- DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
- DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
- DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
#endif
+
#ifdef CONFIG_XFRM_STATISTICS
DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
#endif
@@ -30,6 +27,19 @@ struct netns_mib {
#ifdef CONFIG_MPTCP
DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics);
#endif
+
+ DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
+#endif
+
+ DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
+#if IS_ENABLED(CONFIG_IPV6)
+ DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
+ struct proc_dir_entry *proc_net_devsnmp6;
+#endif
};
#endif
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
index a7bdcfbb0b28..19ad2574b267 100644
--- a/include/net/netns/mpls.h
+++ b/include/net/netns/mpls.h
@@ -6,6 +6,8 @@
#ifndef __NETNS_MPLS_H__
#define __NETNS_MPLS_H__
+#include <linux/types.h>
+
struct mpls_route;
struct ctl_table_header;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index ca043342c0eb..02bbdc577f8e 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -12,7 +12,6 @@ struct netns_nf {
#if defined CONFIG_PROC_FS
struct proc_dir_entry *proc_netfilter;
#endif
- const struct nf_queue_handler __rcu *queue_handler;
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
@@ -25,14 +24,11 @@ struct netns_nf {
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
#endif
-#if IS_ENABLED(CONFIG_DECNET)
- struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
-#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
- bool defrag_ipv4;
+ unsigned int defrag_ipv4_users;
#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
- bool defrag_ipv6;
+ unsigned int defrag_ipv6_users;
#endif
};
#endif
diff --git a/include/net/netns/nexthop.h b/include/net/netns/nexthop.h
index 1937476c94a0..434239b37014 100644
--- a/include/net/netns/nexthop.h
+++ b/include/net/netns/nexthop.h
@@ -6,6 +6,7 @@
#ifndef __NETNS_NEXTHOP_H__
#define __NETNS_NEXTHOP_H__
+#include <linux/notifier.h>
#include <linux/rbtree.h>
struct netns_nexthop {
@@ -14,6 +15,6 @@ struct netns_nexthop {
unsigned int seq; /* protected by rtnl_mutex */
u32 last_id_allocated;
- struct atomic_notifier_head notifier_chain;
+ struct blocking_notifier_head notifier_chain;
};
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 6c0806bd8d1e..cc8060c017d5 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -2,17 +2,8 @@
#ifndef _NETNS_NFTABLES_H_
#define _NETNS_NFTABLES_H_
-#include <linux/list.h>
-
struct netns_nftables {
- struct list_head tables;
- struct list_head commit_list;
- struct list_head module_list;
- struct list_head notify_list;
- struct mutex commit_mutex;
- unsigned int base_seq;
u8 gencursor;
- u8 validate_state;
};
#endif
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index d8d02e4188d1..7eff3d981b89 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -2,6 +2,9 @@
#ifndef __NETNS_SCTP_H__
#define __NETNS_SCTP_H__
+#include <linux/timer.h>
+#include <net/snmp.h>
+
struct sock;
struct proc_dir_entry;
struct sctp_mib;
@@ -22,6 +25,14 @@ struct netns_sctp {
*/
struct sock *ctl_sock;
+ /* UDP tunneling listening sock. */
+ struct sock *udp4_sock;
+ struct sock *udp6_sock;
+ /* UDP tunneling listening port. */
+ int udp_port;
+ /* UDP tunneling remote encap port. */
+ int encap_port;
+
/* This is the global local address list.
* We actively maintain this complete list of addresses on
* the system by catching address add/delete events.
@@ -76,6 +87,9 @@ struct netns_sctp {
/* HB.interval - 30 seconds */
unsigned int hb_interval;
+ /* The interval for PLPMTUD probe timer */
+ unsigned int probe_interval;
+
/* Association.Max.Retrans - 10 attempts
* Path.Max.Retrans - 5 attempts (per destination address)
* Max.Init.Retransmits - 8 attempts
@@ -161,6 +175,10 @@ struct netns_sctp {
/* Threshold for autoclose timeout, in seconds. */
unsigned long max_autoclose;
+
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ int l3mdev_accept;
+#endif
};
#endif /* __NETNS_SCTP_H__ */
diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h
new file mode 100644
index 000000000000..fc752a50f91b
--- /dev/null
+++ b/include/net/netns/smc.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_SMC_H__
+#define __NETNS_SMC_H__
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+
+struct smc_stats_rsn;
+struct smc_stats;
+struct netns_smc {
+ /* per cpu counters for SMC */
+ struct smc_stats __percpu *smc_stats;
+ /* protect fback_rsn */
+ struct mutex mutex_fback_rsn;
+ struct smc_stats_rsn *fback_rsn;
+
+ bool limit_smc_hs; /* constraint on handshake */
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *smc_hdr;
+#endif
+ unsigned int sysctl_autocorking_size;
+ unsigned int sysctl_smcr_buf_type;
+ int sysctl_smcr_testlink_time;
+ int sysctl_wmem;
+ int sysctl_rmem;
+ int sysctl_max_links_per_lgr;
+ int sysctl_max_conns_per_lgr;
+};
+#endif
diff --git a/include/net/netns/unix.h b/include/net/netns/unix.h
index 91a3d7e39198..9859d134d5a8 100644
--- a/include/net/netns/unix.h
+++ b/include/net/netns/unix.h
@@ -5,8 +5,16 @@
#ifndef __NETNS_UNIX_H__
#define __NETNS_UNIX_H__
+#include <linux/spinlock.h>
+
+struct unix_table {
+ spinlock_t *locks;
+ struct hlist_head *buckets;
+};
+
struct ctl_table_header;
struct netns_unix {
+ struct unix_table table;
int sysctl_max_dgram_qlen;
struct ctl_table_header *ctl;
};
diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h
deleted file mode 100644
index 9bc5a12fdbb0..000000000000
--- a/include/net/netns/x_tables.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NETNS_X_TABLES_H
-#define __NETNS_X_TABLES_H
-
-#include <linux/list.h>
-#include <linux/netfilter_defs.h>
-
-struct ebt_table;
-
-struct netns_xt {
- struct list_head tables[NFPROTO_NUMPROTO];
- bool notrack_deprecated_warning;
- bool clusterip_deprecated_warning;
-#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \
- defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE)
- struct ebt_table *broute_table;
- struct ebt_table *frame_filter;
- struct ebt_table *frame_nat;
-#endif
-};
-#endif
diff --git a/include/net/netns/xdp.h b/include/net/netns/xdp.h
index e5734261ba0a..21a4f25a187a 100644
--- a/include/net/netns/xdp.h
+++ b/include/net/netns/xdp.h
@@ -2,8 +2,8 @@
#ifndef __NETNS_XDP_H__
#define __NETNS_XDP_H__
-#include <linux/rculist.h>
#include <linux/mutex.h>
+#include <linux/types.h>
struct netns_xdp {
struct mutex lock;
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 59f45b1e9dac..423b52eca908 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -42,6 +42,7 @@ struct netns_xfrm {
struct hlist_head __rcu *state_bydst;
struct hlist_head __rcu *state_bysrc;
struct hlist_head __rcu *state_byspi;
+ struct hlist_head __rcu *state_byseq;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
@@ -49,6 +50,7 @@ struct netns_xfrm {
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
+ unsigned int idx_generator;
struct hlist_head policy_inexact[XFRM_POLICY_MAX];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
@@ -64,6 +66,9 @@ struct netns_xfrm {
u32 sysctl_aevent_rseqth;
int sysctl_larval_drop;
u32 sysctl_acq_expires;
+
+ u8 policy_default[XFRM_POLICY_MAX];
+
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_hdr;
#endif
@@ -72,7 +77,10 @@ struct netns_xfrm {
#if IS_ENABLED(CONFIG_IPV6)
struct dst_ops xfrm6_dst_ops;
#endif
- spinlock_t xfrm_state_lock;
+ spinlock_t xfrm_state_lock;
+ seqcount_spinlock_t xfrm_state_hash_generation;
+ seqcount_spinlock_t xfrm_policy_hash_generation;
+
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
};
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 80f15b1c1a48..f0565a5987d1 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -14,6 +14,7 @@
#include <net/sock.h>
#include <linux/refcount.h>
#include <linux/seq_file.h>
+#include <net/ax25.h>
#define NR_NETWORK_LEN 15
#define NR_TRANSPORT_LEN 5
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3a4f9e3b91a5..7ca315ad500e 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -40,6 +40,14 @@ struct nh_config {
struct nlattr *nh_grp;
u16 nh_grp_type;
+ u16 nh_grp_res_num_buckets;
+ unsigned long nh_grp_res_idle_timer;
+ unsigned long nh_grp_res_unbalanced_timer;
+ bool nh_grp_res_has_num_buckets;
+ bool nh_grp_res_has_idle_timer;
+ bool nh_grp_res_has_unbalanced_timer;
+
+ bool nh_hw_stats;
struct nlattr *nh_encap;
u16 nh_encap_type;
@@ -63,22 +71,72 @@ struct nh_info {
};
};
+struct nh_res_bucket {
+ struct nh_grp_entry __rcu *nh_entry;
+ atomic_long_t used_time;
+ unsigned long migrated_time;
+ bool occupied;
+ u8 nh_flags;
+};
+
+struct nh_res_table {
+ struct net *net;
+ u32 nhg_id;
+ struct delayed_work upkeep_dw;
+
+ /* List of NHGEs that have too few buckets ("uw" for underweight).
+ * Reclaimed buckets will be given to entries in this list.
+ */
+ struct list_head uw_nh_entries;
+ unsigned long unbalanced_since;
+
+ u32 idle_timer;
+ u32 unbalanced_timer;
+
+ u16 num_nh_buckets;
+ struct nh_res_bucket nh_buckets[] __counted_by(num_nh_buckets);
+};
+
+struct nh_grp_entry_stats {
+ u64_stats_t packets;
+ struct u64_stats_sync syncp;
+};
+
struct nh_grp_entry {
struct nexthop *nh;
+ struct nh_grp_entry_stats __percpu *stats;
u8 weight;
- atomic_t upper_bound;
+
+ union {
+ struct {
+ atomic_t upper_bound;
+ } hthr;
+ struct {
+ /* Member on uw_nh_entries. */
+ struct list_head uw_nh_entry;
+
+ u16 count_buckets;
+ u16 wants_buckets;
+ } res;
+ };
struct list_head nh_list;
struct nexthop *nh_parent; /* nexthop of group with this entry */
+ u64 packets_hw;
};
struct nh_group {
struct nh_group *spare; /* spare group for removals */
u16 num_nh;
- bool mpath;
+ bool is_multipath;
+ bool hash_threshold;
+ bool resilient;
bool fdb_nh;
bool has_v4;
- struct nh_grp_entry nh_entries[];
+ bool hw_stats;
+
+ struct nh_res_table __rcu *res_table;
+ struct nh_grp_entry nh_entries[] __counted_by(num_nh);
};
struct nexthop {
@@ -105,15 +163,97 @@ struct nexthop {
};
enum nexthop_event_type {
- NEXTHOP_EVENT_ADD,
- NEXTHOP_EVENT_DEL
+ NEXTHOP_EVENT_DEL,
+ NEXTHOP_EVENT_REPLACE,
+ NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
+ NEXTHOP_EVENT_BUCKET_REPLACE,
+ NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
+};
+
+enum nh_notifier_info_type {
+ NH_NOTIFIER_INFO_TYPE_SINGLE,
+ NH_NOTIFIER_INFO_TYPE_GRP,
+ NH_NOTIFIER_INFO_TYPE_RES_TABLE,
+ NH_NOTIFIER_INFO_TYPE_RES_BUCKET,
+ NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS,
+};
+
+struct nh_notifier_single_info {
+ struct net_device *dev;
+ u8 gw_family;
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ };
+ u32 id;
+ u8 is_reject:1,
+ is_fdb:1,
+ has_encap:1;
+};
+
+struct nh_notifier_grp_entry_info {
+ u8 weight;
+ struct nh_notifier_single_info nh;
+};
+
+struct nh_notifier_grp_info {
+ u16 num_nh;
+ bool is_fdb;
+ bool hw_stats;
+ struct nh_notifier_grp_entry_info nh_entries[] __counted_by(num_nh);
+};
+
+struct nh_notifier_res_bucket_info {
+ u16 bucket_index;
+ unsigned int idle_timer_ms;
+ bool force;
+ struct nh_notifier_single_info old_nh;
+ struct nh_notifier_single_info new_nh;
+};
+
+struct nh_notifier_res_table_info {
+ u16 num_nh_buckets;
+ bool hw_stats;
+ struct nh_notifier_single_info nhs[] __counted_by(num_nh_buckets);
+};
+
+struct nh_notifier_grp_hw_stats_entry_info {
+ u32 id;
+ u64 packets;
+};
+
+struct nh_notifier_grp_hw_stats_info {
+ u16 num_nh;
+ bool hw_stats_used;
+ struct nh_notifier_grp_hw_stats_entry_info stats[] __counted_by(num_nh);
+};
+
+struct nh_notifier_info {
+ struct net *net;
+ struct netlink_ext_ack *extack;
+ u32 id;
+ enum nh_notifier_info_type type;
+ union {
+ struct nh_notifier_single_info *nh;
+ struct nh_notifier_grp_info *nh_grp;
+ struct nh_notifier_res_table_info *nh_res_table;
+ struct nh_notifier_res_bucket_info *nh_res_bucket;
+ struct nh_notifier_grp_hw_stats_info *nh_grp_hw_stats;
+ };
};
-int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
- enum nexthop_event_type event_type,
- struct nexthop *nh);
-int register_nexthop_notifier(struct net *net, struct notifier_block *nb);
+int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
+int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
+void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
+void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
+ bool offload, bool trap);
+void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
+ unsigned long *activity);
+void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
+ unsigned int nh_idx,
+ u64 delta_packets);
/* caller is holding rcu or rtnl; no reference taken to nexthop */
struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
@@ -168,7 +308,7 @@ static inline bool nexthop_is_multipath(const struct nexthop *nh)
struct nh_group *nh_grp;
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
- return nh_grp->mpath;
+ return nh_grp->is_multipath;
}
return false;
}
@@ -183,7 +323,7 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
struct nh_group *nh_grp;
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
- if (nh_grp->mpath)
+ if (nh_grp->is_multipath)
rc = nh_grp->num_nh;
}
@@ -206,7 +346,7 @@ static inline
int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
u8 rt_family)
{
- struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
int i;
for (i = 0; i < nhg->num_nh; i++) {
@@ -215,7 +355,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
struct fib_nh_common *nhc = &nhi->fib_nhc;
int weight = nhg->nh_entries[i].weight;
- if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
+ if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
return -EMSGSIZE;
}
@@ -264,7 +404,7 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel)
struct nh_group *nh_grp;
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
- if (nh_grp->mpath) {
+ if (nh_grp->is_multipath) {
nh = nexthop_mpath_select(nh_grp, nhsel);
if (!nh)
return NULL;
@@ -366,6 +506,7 @@ static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
struct netlink_ext_ack *extack);
+/* Caller should either hold rcu_read_lock(), or RTNL. */
static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
{
struct nh_info *nhi;
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
index 963db96bcbbb..bb3e8fdc0692 100644
--- a/include/net/nfc/digital.h
+++ b/include/net/nfc/digital.h
@@ -191,7 +191,7 @@ struct digital_poll_tech {
struct nfc_digital_dev {
struct nfc_dev *nfc_dev;
- struct nfc_digital_ops *ops;
+ const struct nfc_digital_ops *ops;
u32 protocols;
@@ -236,7 +236,7 @@ struct nfc_digital_dev {
void (*skb_add_crc)(struct sk_buff *skb);
};
-struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
+struct nfc_digital_dev *nfc_digital_allocate_device(const struct nfc_digital_ops *ops,
__u32 supported_protocols,
__u32 driver_capabilities,
int tx_headroom,
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index b35f37a57686..756c11084f65 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -118,7 +118,7 @@ struct nfc_hci_dev {
struct sk_buff_head msg_rx_queue;
- struct nfc_hci_ops *ops;
+ const struct nfc_hci_ops *ops;
struct nfc_llc *llc;
@@ -151,7 +151,7 @@ struct nfc_hci_dev {
};
/* hci device allocation */
-struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
+struct nfc_hci_dev *nfc_hci_allocate_device(const struct nfc_hci_ops *ops,
struct nfc_hci_init_data *init_data,
unsigned long quirks,
u32 protocols,
@@ -168,7 +168,7 @@ void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
static inline int nfc_hci_set_vendor_cmds(struct nfc_hci_dev *hdev,
- struct nfc_vendor_cmd *cmds,
+ const struct nfc_vendor_cmd *cmds,
int n_cmds)
{
return nfc_set_vendor_cmds(hdev->ndev, cmds, n_cmds);
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 0550e0380b8d..e82f55f543bb 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -25,6 +25,8 @@
#define NCI_MAX_PARAM_LEN 251
#define NCI_MAX_PAYLOAD_SIZE 255
#define NCI_MAX_PACKET_SIZE 258
+#define NCI_MAX_LARGE_PARAMS_NCI_v2 15
+#define NCI_VER_2_MASK 0x20
/* NCI Status Codes */
#define NCI_STATUS_OK 0x00
@@ -131,6 +133,9 @@
#define NCI_LF_CON_BITR_F_212 0x02
#define NCI_LF_CON_BITR_F_424 0x04
+/* NCI 2.x Feature Enable Bit */
+#define NCI_FEATURE_DISABLE 0x00
+
/* NCI Reset types */
#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
#define NCI_RESET_TYPE_RESET_CONFIG 0x01
@@ -220,6 +225,11 @@ struct nci_core_reset_cmd {
} __packed;
#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
+/* To support NCI 2.x */
+struct nci_core_init_v2_cmd {
+ u8 feature1;
+ u8 feature2;
+};
#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02)
struct set_config_param {
@@ -334,6 +344,20 @@ struct nci_core_init_rsp_2 {
__le32 manufact_specific_info;
} __packed;
+/* To support NCI ver 2.x */
+struct nci_core_init_rsp_nci_ver2 {
+ u8 status;
+ __le32 nfcc_features;
+ u8 max_logical_connections;
+ __le16 max_routing_table_size;
+ u8 max_ctrl_pkt_payload_len;
+ u8 max_data_pkt_hci_payload_len;
+ u8 number_of_hci_credit;
+ __le16 max_nfc_v_frame_size;
+ u8 num_supported_rf_interfaces;
+ u8 supported_rf_interfaces[];
+} __packed;
+
#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02)
struct nci_core_set_config_rsp {
__u8 status;
@@ -372,6 +396,16 @@ struct nci_nfcee_discover_rsp {
/* --------------------------- */
/* ---- NCI Notifications ---- */
/* --------------------------- */
+#define NCI_OP_CORE_RESET_NTF nci_opcode_pack(NCI_GID_CORE, 0x00)
+struct nci_core_reset_ntf {
+ u8 reset_trigger;
+ u8 config_status;
+ u8 nci_ver;
+ u8 manufact_id;
+ u8 manufacturer_specific_len;
+ __le32 manufact_specific_info;
+} __packed;
+
#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x06)
struct conn_credit_entry {
__u8 conn_id;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 43c9c5d2bedb..ea8595651c38 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -30,6 +30,7 @@ enum nci_flag {
NCI_UP,
NCI_DATA_EXCHANGE,
NCI_DATA_EXCHANGE_TO,
+ NCI_UNREG,
};
/* NCI device states */
@@ -82,10 +83,10 @@ struct nci_ops {
void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
struct sk_buff *skb);
- struct nci_driver_ops *prop_ops;
+ const struct nci_driver_ops *prop_ops;
size_t n_prop_ops;
- struct nci_driver_ops *core_ops;
+ const struct nci_driver_ops *core_ops;
size_t n_core_ops;
};
@@ -194,7 +195,7 @@ struct nci_hci_dev {
/* NCI Core structures */
struct nci_dev {
struct nfc_dev *nfc_dev;
- struct nci_ops *ops;
+ const struct nci_ops *ops;
struct nci_hci_dev *hci_dev;
int tx_headroom;
@@ -267,7 +268,7 @@ struct nci_dev {
};
/* ----- NCI Devices ----- */
-struct nci_dev *nci_allocate_device(struct nci_ops *ops,
+struct nci_dev *nci_allocate_device(const struct nci_ops *ops,
__u32 supported_protocols,
int tx_headroom,
int tx_tailroom);
@@ -276,28 +277,31 @@ int nci_register_device(struct nci_dev *ndev);
void nci_unregister_device(struct nci_dev *ndev);
int nci_request(struct nci_dev *ndev,
void (*req)(struct nci_dev *ndev,
- unsigned long opt),
- unsigned long opt, __u32 timeout);
-int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
-int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, __u8 *payload);
+ const void *opt),
+ const void *opt, __u32 timeout);
+int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len,
+ const __u8 *payload);
+int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len,
+ const __u8 *payload);
int nci_core_reset(struct nci_dev *ndev);
int nci_core_init(struct nci_dev *ndev);
int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb);
-int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
+int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, const __u8 *val);
int nci_nfcee_discover(struct nci_dev *ndev, u8 action);
int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode);
int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
u8 number_destination_params,
size_t params_len,
- struct core_conn_create_dest_spec_params *params);
+ const struct core_conn_create_dest_spec_params *params);
int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id);
-int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
+int nci_nfcc_loopback(struct nci_dev *ndev, const void *data, size_t data_len,
struct sk_buff **resp);
struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+void nci_hci_deallocate(struct nci_dev *ndev);
int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
const u8 *param, size_t param_len);
int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
@@ -342,7 +346,7 @@ static inline void *nci_get_drvdata(struct nci_dev *ndev)
}
static inline int nci_set_vendor_cmds(struct nci_dev *ndev,
- struct nfc_vendor_cmd *cmds,
+ const struct nfc_vendor_cmd *cmds,
int n_cmds)
{
return nfc_set_vendor_cmds(ndev->nfc_dev, cmds, n_cmds);
@@ -359,7 +363,7 @@ int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode,
int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
struct sk_buff *skb);
void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
-int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
+int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, const void *payload);
int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
int nci_conn_max_data_pkt_payload_size(struct nci_dev *ndev, __u8 conn_id);
void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
@@ -377,7 +381,7 @@ void nci_req_complete(struct nci_dev *ndev, int result);
struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
int conn_id);
int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type,
- struct dest_spec_params *params);
+ const struct dest_spec_params *params);
/* ----- NCI status code ----- */
int nci_to_errno(__u8 code);
@@ -430,8 +434,6 @@ struct nci_uart_ops {
int (*open)(struct nci_uart *nci_uart);
void (*close)(struct nci_uart *nci_uart);
int (*recv)(struct nci_uart *nci_uart, struct sk_buff *skb);
- int (*recv_buf)(struct nci_uart *nci_uart, const u8 *data, char *flags,
- int count);
int (*send)(struct nci_uart *nci_uart, struct sk_buff *skb);
void (*tx_start)(struct nci_uart *nci_uart);
void (*tx_done)(struct nci_uart *nci_uart);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 2cd3a261bcbc..3d07abacf08b 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -188,17 +188,17 @@ struct nfc_dev {
struct rfkill *rfkill;
- struct nfc_vendor_cmd *vendor_cmds;
+ const struct nfc_vendor_cmd *vendor_cmds;
int n_vendor_cmds;
- struct nfc_ops *ops;
+ const struct nfc_ops *ops;
struct genl_info *cur_cmd_info;
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
-extern struct class nfc_class;
+extern const struct class nfc_class;
-struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
+struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops,
u32 supported_protocols,
int tx_headroom,
int tx_tailroom);
@@ -245,7 +245,7 @@ static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
*
* @dev: The nfc device
*/
-static inline void *nfc_get_drvdata(struct nfc_dev *dev)
+static inline void *nfc_get_drvdata(const struct nfc_dev *dev)
{
return dev_get_drvdata(&dev->dev);
}
@@ -255,7 +255,7 @@ static inline void *nfc_get_drvdata(struct nfc_dev *dev)
*
* @dev: The nfc device whose name to return
*/
-static inline const char *nfc_device_name(struct nfc_dev *dev)
+static inline const char *nfc_device_name(const struct nfc_dev *dev)
{
return dev_name(&dev->dev);
}
@@ -266,7 +266,7 @@ struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
int nfc_set_remote_general_bytes(struct nfc_dev *dev,
- u8 *gt, u8 gt_len);
+ const u8 *gt, u8 gt_len);
u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
@@ -280,7 +280,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode);
int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
- u8 *gb, size_t gb_len);
+ const u8 *gb, size_t gb_len);
int nfc_tm_deactivated(struct nfc_dev *dev);
int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
@@ -297,7 +297,7 @@ void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
u8 payload_type, u8 direction);
static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
- struct nfc_vendor_cmd *cmds,
+ const struct nfc_vendor_cmd *cmds,
int n_cmds)
{
if (dev->vendor_cmds || dev->n_vendor_cmds)
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index ddcee128f5d9..4c752f799957 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -19,6 +19,8 @@
*
*/
+#include <linux/types.h>
+
#define NL802154_GENL_NAME "nl802154"
enum nl802154_commands {
@@ -56,9 +58,6 @@ enum nl802154_commands {
NL802154_CMD_SET_WPAN_PHY_NETNS,
- /* add new commands above here */
-
-#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
NL802154_CMD_SET_SEC_PARAMS,
NL802154_CMD_GET_SEC_KEY, /* can dump */
NL802154_CMD_NEW_SEC_KEY,
@@ -72,7 +71,19 @@ enum nl802154_commands {
NL802154_CMD_GET_SEC_LEVEL, /* can dump */
NL802154_CMD_NEW_SEC_LEVEL,
NL802154_CMD_DEL_SEC_LEVEL,
-#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
+ NL802154_CMD_SCAN_EVENT,
+ NL802154_CMD_TRIGGER_SCAN,
+ NL802154_CMD_ABORT_SCAN,
+ NL802154_CMD_SCAN_DONE,
+ NL802154_CMD_SEND_BEACONS,
+ NL802154_CMD_STOP_BEACONS,
+ NL802154_CMD_ASSOCIATE,
+ NL802154_CMD_DISASSOCIATE,
+ NL802154_CMD_SET_MAX_ASSOCIATIONS,
+ NL802154_CMD_LIST_ASSOCIATIONS,
+
+ /* add new commands above here */
/* used to define NL802154_CMD_MAX below */
__NL802154_CMD_AFTER_LAST,
@@ -131,6 +142,18 @@ enum nl802154_attrs {
NL802154_ATTR_PID,
NL802154_ATTR_NETNS_FD,
+ NL802154_ATTR_COORDINATOR,
+ NL802154_ATTR_SCAN_TYPE,
+ NL802154_ATTR_SCAN_FLAGS,
+ NL802154_ATTR_SCAN_CHANNELS,
+ NL802154_ATTR_SCAN_PREAMBLE_CODES,
+ NL802154_ATTR_SCAN_MEAN_PRF,
+ NL802154_ATTR_SCAN_DURATION,
+ NL802154_ATTR_SCAN_DONE_REASON,
+ NL802154_ATTR_BEACON_INTERVAL,
+ NL802154_ATTR_MAX_ASSOCIATIONS,
+ NL802154_ATTR_PEER,
+
/* add attributes here, update the policy in nl802154.c */
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
@@ -150,10 +173,9 @@ enum nl802154_attrs {
};
enum nl802154_iftype {
- /* for backwards compatibility TODO */
- NL802154_IFTYPE_UNSPEC = -1,
+ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
- NL802154_IFTYPE_NODE,
+ NL802154_IFTYPE_NODE = 0,
NL802154_IFTYPE_MONITOR,
NL802154_IFTYPE_COORD,
@@ -218,6 +240,93 @@ enum nl802154_wpan_phy_capability_attr {
};
/**
+ * enum nl802154_coord - Netlink attributes for a coord
+ *
+ * @__NL802154_COORD_INVALID: invalid
+ * @NL802154_COORD_PANID: PANID of the coordinator (2 bytes)
+ * @NL802154_COORD_ADDR: coordinator address, (8 bytes or 2 bytes)
+ * @NL802154_COORD_CHANNEL: channel number, related to @NL802154_COORD_PAGE (u8)
+ * @NL802154_COORD_PAGE: channel page, related to @NL802154_COORD_CHANNEL (u8)
+ * @NL802154_COORD_PREAMBLE_CODE: Preamble code used when the beacon was received,
+ * this is PHY dependent and optional (u8)
+ * @NL802154_COORD_MEAN_PRF: Mean PRF used when the beacon was received,
+ * this is PHY dependent and optional (u8)
+ * @NL802154_COORD_SUPERFRAME_SPEC: superframe specification of the PAN (u16)
+ * @NL802154_COORD_LINK_QUALITY: signal quality of beacon in unspecified units,
+ * scaled to 0..255 (u8)
+ * @NL802154_COORD_GTS_PERMIT: set to true if GTS is permitted on this PAN
+ * @NL802154_COORD_PAYLOAD_DATA: binary data containing the raw data from the
+ * frame payload, (only if beacon or probe response had data)
+ * @NL802154_COORD_PAD: attribute used for padding for 64-bit alignment
+ * @NL802154_COORD_MAX: highest coordinator attribute
+ */
+enum nl802154_coord {
+ __NL802154_COORD_INVALID,
+ NL802154_COORD_PANID,
+ NL802154_COORD_ADDR,
+ NL802154_COORD_CHANNEL,
+ NL802154_COORD_PAGE,
+ NL802154_COORD_PREAMBLE_CODE,
+ NL802154_COORD_MEAN_PRF,
+ NL802154_COORD_SUPERFRAME_SPEC,
+ NL802154_COORD_LINK_QUALITY,
+ NL802154_COORD_GTS_PERMIT,
+ NL802154_COORD_PAYLOAD_DATA,
+ NL802154_COORD_PAD,
+
+ /* keep last */
+ NL802154_COORD_MAX,
+};
+
+/**
+ * enum nl802154_scan_types - Scan types
+ *
+ * @__NL802154_SCAN_INVALID: scan type number 0 is reserved
+ * @NL802154_SCAN_ED: An ED scan allows a device to obtain a measure of the peak
+ * energy in each requested channel
+ * @NL802154_SCAN_ACTIVE: Locate any coordinator transmitting Beacon frames using
+ * a Beacon Request command
+ * @NL802154_SCAN_PASSIVE: Locate any coordinator transmitting Beacon frames
+ * @NL802154_SCAN_ORPHAN: Relocate coordinator following a loss of synchronisation
+ * @NL802154_SCAN_ENHANCED_ACTIVE: Same as Active using Enhanced Beacon Request
+ * command instead of Beacon Request command
+ * @NL802154_SCAN_RIT_PASSIVE: Passive scan for RIT Data Request command frames
+ * instead of Beacon frames
+ * @NL802154_SCAN_ATTR_MAX: Maximum SCAN attribute number
+ */
+enum nl802154_scan_types {
+ __NL802154_SCAN_INVALID,
+ NL802154_SCAN_ED,
+ NL802154_SCAN_ACTIVE,
+ NL802154_SCAN_PASSIVE,
+ NL802154_SCAN_ORPHAN,
+ NL802154_SCAN_ENHANCED_ACTIVE,
+ NL802154_SCAN_RIT_PASSIVE,
+
+ /* keep last */
+ NL802154_SCAN_ATTR_MAX,
+};
+
+/**
+ * enum nl802154_scan_done_reasons - End of scan reasons
+ *
+ * @__NL802154_SCAN_DONE_REASON_INVALID: scan done reason number 0 is reserved.
+ * @NL802154_SCAN_DONE_REASON_FINISHED: The scan just finished naturally after
+ * going through all the requested and possible (complex) channels.
+ * @NL802154_SCAN_DONE_REASON_ABORTED: The scan was aborted upon user request.
+ * a Beacon Request command
+ * @NL802154_SCAN_DONE_REASON_MAX: Maximum scan done reason attribute number.
+ */
+enum nl802154_scan_done_reasons {
+ __NL802154_SCAN_DONE_REASON_INVALID,
+ NL802154_SCAN_DONE_REASON_FINISHED,
+ NL802154_SCAN_DONE_REASON_ABORTED,
+
+ /* keep last */
+ NL802154_SCAN_DONE_REASON_MAX,
+};
+
+/**
* enum nl802154_cca_modes - cca modes
*
* @__NL802154_CCA_INVALID: cca mode number 0 is reserved
@@ -282,8 +391,6 @@ enum nl802154_supported_bool_states {
NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
};
-#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
-
enum nl802154_dev_addr_modes {
NL802154_DEV_ADDR_NONE,
__NL802154_DEV_ADDR_INVALID,
@@ -303,12 +410,26 @@ enum nl802154_dev_addr_attrs {
NL802154_DEV_ADDR_ATTR_SHORT,
NL802154_DEV_ADDR_ATTR_EXTENDED,
NL802154_DEV_ADDR_ATTR_PAD,
+ NL802154_DEV_ADDR_ATTR_PEER_TYPE,
/* keep last */
__NL802154_DEV_ADDR_ATTR_AFTER_LAST,
NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1
};
+enum nl802154_peer_type {
+ NL802154_PEER_TYPE_UNSPEC,
+
+ NL802154_PEER_TYPE_PARENT,
+ NL802154_PEER_TYPE_CHILD,
+
+ /* keep last */
+ __NL802154_PEER_TYPE_AFTER_LAST,
+ NL802154_PEER_TYPE_MAX = __NL802154_PEER_TYPE_AFTER_LAST - 1
+};
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+
enum nl802154_key_id_modes {
NL802154_KEY_ID_MODE_IMPLICIT,
NL802154_KEY_ID_MODE_INDEX,
diff --git a/include/net/nsh.h b/include/net/nsh.h
index 350b1ad11c7f..16a751093896 100644
--- a/include/net/nsh.h
+++ b/include/net/nsh.h
@@ -192,7 +192,7 @@
/**
* struct nsh_md1_ctx - Keeps track of NSH context data
- * @nshc<1-4>: NSH Contexts.
+ * @context: NSH Contexts.
*/
struct nsh_md1_ctx {
__be32 context[4];
diff --git a/include/net/p8022.h b/include/net/p8022.h
index c2bacc66bfbc..a29e224ac498 100644
--- a/include/net/p8022.h
+++ b/include/net/p8022.h
@@ -1,6 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NET_P8022_H
#define _NET_P8022_H
+
+struct net_device;
+struct packet_type;
+struct sk_buff;
+
struct datalink_proto *
register_8022_client(unsigned char type,
int (*func)(struct sk_buff *skb,
@@ -8,7 +13,4 @@ register_8022_client(unsigned char type,
struct packet_type *pt,
struct net_device *orig_dev));
void unregister_8022_client(struct datalink_proto *proto);
-
-struct datalink_proto *make_8023_client(void);
-void destroy_8023_client(struct datalink_proto *dl);
#endif
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
deleted file mode 100644
index 81d7773f96cd..000000000000
--- a/include/net/page_pool.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * page_pool.h
- * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
- * Copyright (C) 2016 Red Hat, Inc.
- */
-
-/**
- * DOC: page_pool allocator
- *
- * This page_pool allocator is optimized for the XDP mode that
- * uses one-frame-per-page, but have fallbacks that act like the
- * regular page allocator APIs.
- *
- * Basic use involve replacing alloc_pages() calls with the
- * page_pool_alloc_pages() call. Drivers should likely use
- * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
- *
- * API keeps track of in-flight pages, in-order to let API user know
- * when it is safe to dealloactor page_pool object. Thus, API users
- * must make sure to call page_pool_release_page() when a page is
- * "leaving" the page_pool. Or call page_pool_put_page() where
- * appropiate. For maintaining correct accounting.
- *
- * API user must only call page_pool_put_page() once on a page, as it
- * will either recycle the page, or in case of elevated refcnt, it
- * will release the DMA mapping and in-flight state accounting. We
- * hope to lift this requirement in the future.
- */
-#ifndef _NET_PAGE_POOL_H
-#define _NET_PAGE_POOL_H
-
-#include <linux/mm.h> /* Needed by ptr_ring */
-#include <linux/ptr_ring.h>
-#include <linux/dma-direction.h>
-
-#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
- * map/unmap
- */
-#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
- * from page_pool will be
- * DMA-synced-for-device according to
- * the length provided by the device
- * driver.
- * Please note DMA-sync-for-CPU is still
- * device driver responsibility
- */
-#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
-
-/*
- * Fast allocation side cache array/stack
- *
- * The cache size and refill watermark is related to the network
- * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
- * ring is usually refilled and the max consumed elements will be 64,
- * thus a natural max size of objects needed in the cache.
- *
- * Keeping room for more objects, is due to XDP_DROP use-case. As
- * XDP_DROP allows the opportunity to recycle objects directly into
- * this array, as it shares the same softirq/NAPI protection. If
- * cache is already full (or partly full) then the XDP_DROP recycles
- * would have to take a slower code path.
- */
-#define PP_ALLOC_CACHE_SIZE 128
-#define PP_ALLOC_CACHE_REFILL 64
-struct pp_alloc_cache {
- u32 count;
- void *cache[PP_ALLOC_CACHE_SIZE];
-};
-
-struct page_pool_params {
- unsigned int flags;
- unsigned int order;
- unsigned int pool_size;
- int nid; /* Numa node id to allocate from pages from */
- struct device *dev; /* device, for DMA pre-mapping purposes */
- enum dma_data_direction dma_dir; /* DMA mapping direction */
- unsigned int max_len; /* max DMA sync memory size */
- unsigned int offset; /* DMA addr offset */
-};
-
-struct page_pool {
- struct page_pool_params p;
-
- struct delayed_work release_dw;
- void (*disconnect)(void *);
- unsigned long defer_start;
- unsigned long defer_warn;
-
- u32 pages_state_hold_cnt;
-
- /*
- * Data structure for allocation side
- *
- * Drivers allocation side usually already perform some kind
- * of resource protection. Piggyback on this protection, and
- * require driver to protect allocation side.
- *
- * For NIC drivers this means, allocate a page_pool per
- * RX-queue. As the RX-queue is already protected by
- * Softirq/BH scheduling and napi_schedule. NAPI schedule
- * guarantee that a single napi_struct will only be scheduled
- * on a single CPU (see napi_schedule).
- */
- struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
-
- /* Data structure for storing recycled pages.
- *
- * Returning/freeing pages is more complicated synchronization
- * wise, because free's can happen on remote CPUs, with no
- * association with allocation resource.
- *
- * Use ptr_ring, as it separates consumer and producer
- * effeciently, it a way that doesn't bounce cache-lines.
- *
- * TODO: Implement bulk return pages into this structure.
- */
- struct ptr_ring ring;
-
- atomic_t pages_state_release_cnt;
-
- /* A page_pool is strictly tied to a single RX-queue being
- * protected by NAPI, due to above pp_alloc_cache. This
- * refcnt serves purpose is to simplify drivers error handling.
- */
- refcount_t user_cnt;
-
- u64 destroy_cnt;
-};
-
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
-
-static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
-{
- gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
-
- return page_pool_alloc_pages(pool, gfp);
-}
-
-/* get the stored dma direction. A driver might decide to treat this locally and
- * avoid the extra cache line from page_pool to determine the direction
- */
-static
-inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
-{
- return pool->p.dma_dir;
-}
-
-struct page_pool *page_pool_create(const struct page_pool_params *params);
-
-#ifdef CONFIG_PAGE_POOL
-void page_pool_destroy(struct page_pool *pool);
-void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
-void page_pool_release_page(struct page_pool *pool, struct page *page);
-#else
-static inline void page_pool_destroy(struct page_pool *pool)
-{
-}
-
-static inline void page_pool_use_xdp_mem(struct page_pool *pool,
- void (*disconnect)(void *))
-{
-}
-static inline void page_pool_release_page(struct page_pool *pool,
- struct page *page)
-{
-}
-#endif
-
-void page_pool_put_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size, bool allow_direct);
-
-/* Same as above but will try to sync the entire area pool->max_len */
-static inline void page_pool_put_full_page(struct page_pool *pool,
- struct page *page, bool allow_direct)
-{
- /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
- * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
- */
-#ifdef CONFIG_PAGE_POOL
- page_pool_put_page(pool, page, -1, allow_direct);
-#endif
-}
-
-/* Same as above but the caller must guarantee safe context. e.g NAPI */
-static inline void page_pool_recycle_direct(struct page_pool *pool,
- struct page *page)
-{
- page_pool_put_full_page(pool, page, true);
-}
-
-static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
-{
- return page->dma_addr;
-}
-
-static inline bool is_page_pool_compiled_in(void)
-{
-#ifdef CONFIG_PAGE_POOL
- return true;
-#else
- return false;
-#endif
-}
-
-static inline bool page_pool_put(struct page_pool *pool)
-{
- return refcount_dec_and_test(&pool->user_cnt);
-}
-
-/* Caller must provide appropriate safe context, e.g. NAPI. */
-void page_pool_update_nid(struct page_pool *pool, int new_nid);
-static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
-{
- if (unlikely(pool->p.nid != new_nid))
- page_pool_update_nid(pool, new_nid);
-}
-#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
new file mode 100644
index 000000000000..1d397c1a0043
--- /dev/null
+++ b/include/net/page_pool/helpers.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * page_pool/helpers.h
+ * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
+ * Copyright (C) 2016 Red Hat, Inc.
+ */
+
+/**
+ * DOC: page_pool allocator
+ *
+ * The page_pool allocator is optimized for recycling page or page fragment used
+ * by skb packet and xdp frame.
+ *
+ * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(),
+ * which allocate memory with or without page splitting depending on the
+ * requested memory size.
+ *
+ * If the driver knows that it always requires full pages or its allocations are
+ * always smaller than half a page, it can use one of the more specific API
+ * calls:
+ *
+ * 1. page_pool_alloc_pages(): allocate memory without page splitting when
+ * driver knows that the memory it need is always bigger than half of the page
+ * allocated from page pool. There is no cache line dirtying for 'struct page'
+ * when a page is recycled back to the page pool.
+ *
+ * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver
+ * knows that the memory it need is always smaller than or equal to half of the
+ * page allocated from page pool. Page splitting enables memory saving and thus
+ * avoids TLB/cache miss for data access, but there also is some cost to
+ * implement page splitting, mainly some cache line dirtying/bouncing for
+ * 'struct page' and atomic operation for page->pp_ref_count.
+ *
+ * The API keeps track of in-flight pages, in order to let API users know when
+ * it is safe to free a page_pool object, the API users must call
+ * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or
+ * attach the page_pool object to a page_pool-aware object like skbs marked with
+ * skb_mark_for_recycle().
+ *
+ * page_pool_put_page() may be called multiple times on the same page if a page
+ * is split into multiple fragments. For the last fragment, it will either
+ * recycle the page, or in case of page->_refcount > 1, it will release the DMA
+ * mapping and in-flight state accounting.
+ *
+ * dma_sync_single_range_for_device() is only called for the last fragment when
+ * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the
+ * last freed fragment to do the sync_for_device operation for all fragments in
+ * the same page when a page is split. The API user must setup pool->p.max_len
+ * and pool->p.offset correctly and ensure that page_pool_put_page() is called
+ * with dma_sync_size being -1 for fragment API.
+ */
+#ifndef _NET_PAGE_POOL_HELPERS_H
+#define _NET_PAGE_POOL_HELPERS_H
+
+#include <net/page_pool/types.h>
+
+#ifdef CONFIG_PAGE_POOL_STATS
+/* Deprecated driver-facing API, use netlink instead */
+int page_pool_ethtool_stats_get_count(void);
+u8 *page_pool_ethtool_stats_get_strings(u8 *data);
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
+
+bool page_pool_get_stats(const struct page_pool *pool,
+ struct page_pool_stats *stats);
+#else
+static inline int page_pool_ethtool_stats_get_count(void)
+{
+ return 0;
+}
+
+static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
+{
+ return data;
+}
+
+static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+{
+ return data;
+}
+#endif
+
+/**
+ * page_pool_dev_alloc_pages() - allocate a page.
+ * @pool: pool from which to allocate
+ *
+ * Get a page from the page allocator or page_pool caches.
+ */
+static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_pages(pool, gfp);
+}
+
+/**
+ * page_pool_dev_alloc_frag() - allocate a page fragment.
+ * @pool: pool from which to allocate
+ * @offset: offset to the allocated page
+ * @size: requested size
+ *
+ * Get a page fragment from the page allocator or page_pool caches.
+ *
+ * Return:
+ * Return allocated page fragment, otherwise return NULL.
+ */
+static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_frag(pool, offset, size, gfp);
+}
+
+static inline struct page *page_pool_alloc(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size, gfp_t gfp)
+{
+ unsigned int max_size = PAGE_SIZE << pool->p.order;
+ struct page *page;
+
+ if ((*size << 1) > max_size) {
+ *size = max_size;
+ *offset = 0;
+ return page_pool_alloc_pages(pool, gfp);
+ }
+
+ page = page_pool_alloc_frag(pool, offset, *size, gfp);
+ if (unlikely(!page))
+ return NULL;
+
+ /* There is very likely not enough space for another fragment, so append
+ * the remaining size to the current fragment to avoid truesize
+ * underestimate problem.
+ */
+ if (pool->frag_offset + *size > max_size) {
+ *size = max_size - *offset;
+ pool->frag_offset = max_size;
+ }
+
+ return page;
+}
+
+/**
+ * page_pool_dev_alloc() - allocate a page or a page fragment.
+ * @pool: pool from which to allocate
+ * @offset: offset to the allocated page
+ * @size: in as the requested size, out as the allocated size
+ *
+ * Get a page or a page fragment from the page allocator or page_pool caches
+ * depending on the requested size in order to allocate memory with least memory
+ * utilization and performance penalty.
+ *
+ * Return:
+ * Return allocated page or page fragment, otherwise return NULL.
+ */
+static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc(pool, offset, size, gfp);
+}
+
+static inline void *page_pool_alloc_va(struct page_pool *pool,
+ unsigned int *size, gfp_t gfp)
+{
+ unsigned int offset;
+ struct page *page;
+
+ /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */
+ page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM);
+ if (unlikely(!page))
+ return NULL;
+
+ return page_address(page) + offset;
+}
+
+/**
+ * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its
+ * va.
+ * @pool: pool from which to allocate
+ * @size: in as the requested size, out as the allocated size
+ *
+ * This is just a thin wrapper around the page_pool_alloc() API, and
+ * it returns va of the allocated page or page fragment.
+ *
+ * Return:
+ * Return the va for the allocated page or page fragment, otherwise return NULL.
+ */
+static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
+ unsigned int *size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_va(pool, size, gfp);
+}
+
+/**
+ * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
+ * @pool: pool from which page was allocated
+ *
+ * Get the stored dma direction. A driver might decide to store this locally
+ * and avoid the extra cache line from page_pool to determine the direction.
+ */
+static
+inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
+{
+ return pool->p.dma_dir;
+}
+
+/**
+ * page_pool_fragment_page() - split a fresh page into fragments
+ * @page: page to split
+ * @nr: references to set
+ *
+ * pp_ref_count represents the number of outstanding references to the page,
+ * which will be freed using page_pool APIs (rather than page allocator APIs
+ * like put_page()). Such references are usually held by page_pool-aware
+ * objects like skbs marked for page pool recycling.
+ *
+ * This helper allows the caller to take (set) multiple references to a
+ * freshly allocated page. The page must be freshly allocated (have a
+ * pp_ref_count of 1). This is commonly done by drivers and
+ * "fragment allocators" to save atomic operations - either when they know
+ * upfront how many references they will need; or to take MAX references and
+ * return the unused ones with a single atomic dec(), instead of performing
+ * multiple atomic inc() operations.
+ */
+static inline void page_pool_fragment_page(struct page *page, long nr)
+{
+ atomic_long_set(&page->pp_ref_count, nr);
+}
+
+static inline long page_pool_unref_page(struct page *page, long nr)
+{
+ long ret;
+
+ /* If nr == pp_ref_count then we have cleared all remaining
+ * references to the page:
+ * 1. 'n == 1': no need to actually overwrite it.
+ * 2. 'n != 1': overwrite it with one, which is the rare case
+ * for pp_ref_count draining.
+ *
+ * The main advantage to doing this is that not only we avoid a atomic
+ * update, as an atomic_read is generally a much cheaper operation than
+ * an atomic update, especially when dealing with a page that may be
+ * referenced by only 2 or 3 users; but also unify the pp_ref_count
+ * handling by ensuring all pages have partitioned into only 1 piece
+ * initially, and only overwrite it when the page is partitioned into
+ * more than one piece.
+ */
+ if (atomic_long_read(&page->pp_ref_count) == nr) {
+ /* As we have ensured nr is always one for constant case using
+ * the BUILD_BUG_ON(), only need to handle the non-constant case
+ * here for pp_ref_count draining, which is a rare case.
+ */
+ BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
+ if (!__builtin_constant_p(nr))
+ atomic_long_set(&page->pp_ref_count, 1);
+
+ return 0;
+ }
+
+ ret = atomic_long_sub_return(nr, &page->pp_ref_count);
+ WARN_ON(ret < 0);
+
+ /* We are the last user here too, reset pp_ref_count back to 1 to
+ * ensure all pages have been partitioned into 1 piece initially,
+ * this should be the rare case when the last two fragment users call
+ * page_pool_unref_page() currently.
+ */
+ if (unlikely(!ret))
+ atomic_long_set(&page->pp_ref_count, 1);
+
+ return ret;
+}
+
+static inline void page_pool_ref_page(struct page *page)
+{
+ atomic_long_inc(&page->pp_ref_count);
+}
+
+static inline bool page_pool_is_last_ref(struct page *page)
+{
+ /* If page_pool_unref_page() returns 0, we were the last user */
+ return page_pool_unref_page(page, 1) == 0;
+}
+
+/**
+ * page_pool_put_page() - release a reference to a page pool page
+ * @pool: pool from which page was allocated
+ * @page: page to release a reference on
+ * @dma_sync_size: how much of the page may have been touched by the device
+ * @allow_direct: released by the consumer, allow lockless caching
+ *
+ * The outcome of this depends on the page refcnt. If the driver bumps
+ * the refcnt > 1 this will unmap the page. If the page refcnt is 1
+ * the allocator owns the page and will try to recycle it in one of the pool
+ * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
+ * using dma_sync_single_range_for_device().
+ */
+static inline void page_pool_put_page(struct page_pool *pool,
+ struct page *page,
+ unsigned int dma_sync_size,
+ bool allow_direct)
+{
+ /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
+ * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+ */
+#ifdef CONFIG_PAGE_POOL
+ if (!page_pool_is_last_ref(page))
+ return;
+
+ page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct);
+#endif
+}
+
+/**
+ * page_pool_put_full_page() - release a reference on a page pool page
+ * @pool: pool from which page was allocated
+ * @page: page to release a reference on
+ * @allow_direct: released by the consumer, allow lockless caching
+ *
+ * Similar to page_pool_put_page(), but will DMA sync the entire memory area
+ * as configured in &page_pool_params.max_len.
+ */
+static inline void page_pool_put_full_page(struct page_pool *pool,
+ struct page *page, bool allow_direct)
+{
+ page_pool_put_page(pool, page, -1, allow_direct);
+}
+
+/**
+ * page_pool_recycle_direct() - release a reference on a page pool page
+ * @pool: pool from which page was allocated
+ * @page: page to release a reference on
+ *
+ * Similar to page_pool_put_full_page() but caller must guarantee safe context
+ * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
+ */
+static inline void page_pool_recycle_direct(struct page_pool *pool,
+ struct page *page)
+{
+ page_pool_put_full_page(pool, page, true);
+}
+
+#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \
+ (sizeof(dma_addr_t) > sizeof(unsigned long))
+
+/**
+ * page_pool_free_va() - free a va into the page_pool
+ * @pool: pool from which va was allocated
+ * @va: va to be freed
+ * @allow_direct: freed by the consumer, allow lockless caching
+ *
+ * Free a va allocated from page_pool_allo_va().
+ */
+static inline void page_pool_free_va(struct page_pool *pool, void *va,
+ bool allow_direct)
+{
+ page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
+}
+
+/**
+ * page_pool_get_dma_addr() - Retrieve the stored DMA address.
+ * @page: page allocated from a page pool
+ *
+ * Fetch the DMA address of the page. The page pool to which the page belongs
+ * must had been created with PP_FLAG_DMA_MAP.
+ */
+static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+{
+ dma_addr_t ret = page->dma_addr;
+
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
+ ret <<= PAGE_SHIFT;
+
+ return ret;
+}
+
+static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
+ page->dma_addr = addr >> PAGE_SHIFT;
+
+ /* We assume page alignment to shave off bottom bits,
+ * if this "compression" doesn't work we need to drop.
+ */
+ return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
+ }
+
+ page->dma_addr = addr;
+ return false;
+}
+
+static inline bool page_pool_put(struct page_pool *pool)
+{
+ return refcount_dec_and_test(&pool->user_cnt);
+}
+
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+{
+ if (unlikely(pool->p.nid != new_nid))
+ page_pool_update_nid(pool, new_nid);
+}
+
+#endif /* _NET_PAGE_POOL_HELPERS_H */
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
new file mode 100644
index 000000000000..5e43a08d3231
--- /dev/null
+++ b/include/net/page_pool/types.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NET_PAGE_POOL_TYPES_H
+#define _NET_PAGE_POOL_TYPES_H
+
+#include <linux/dma-direction.h>
+#include <linux/ptr_ring.h>
+#include <linux/types.h>
+
+#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
+ * map/unmap
+ */
+#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
+ * from page_pool will be
+ * DMA-synced-for-device according to
+ * the length provided by the device
+ * driver.
+ * Please note DMA-sync-for-CPU is still
+ * device driver responsibility
+ */
+#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
+ PP_FLAG_SYSTEM_POOL)
+
+/*
+ * Fast allocation side cache array/stack
+ *
+ * The cache size and refill watermark is related to the network
+ * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
+ * ring is usually refilled and the max consumed elements will be 64,
+ * thus a natural max size of objects needed in the cache.
+ *
+ * Keeping room for more objects, is due to XDP_DROP use-case. As
+ * XDP_DROP allows the opportunity to recycle objects directly into
+ * this array, as it shares the same softirq/NAPI protection. If
+ * cache is already full (or partly full) then the XDP_DROP recycles
+ * would have to take a slower code path.
+ */
+#define PP_ALLOC_CACHE_SIZE 128
+#define PP_ALLOC_CACHE_REFILL 64
+struct pp_alloc_cache {
+ u32 count;
+ struct page *cache[PP_ALLOC_CACHE_SIZE];
+};
+
+/**
+ * struct page_pool_params - page pool parameters
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
+ * @order: 2^order pages on allocation
+ * @pool_size: size of the ptr_ring
+ * @nid: NUMA node id to allocate from pages from
+ * @dev: device, for DMA pre-mapping purposes
+ * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
+ * @napi: NAPI which is the sole consumer of pages, otherwise NULL
+ * @dma_dir: DMA mapping direction
+ * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
+ * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
+ */
+struct page_pool_params {
+ struct_group_tagged(page_pool_params_fast, fast,
+ unsigned int flags;
+ unsigned int order;
+ unsigned int pool_size;
+ int nid;
+ struct device *dev;
+ struct napi_struct *napi;
+ enum dma_data_direction dma_dir;
+ unsigned int max_len;
+ unsigned int offset;
+ );
+ struct_group_tagged(page_pool_params_slow, slow,
+ struct net_device *netdev;
+/* private: used by test code only */
+ void (*init_callback)(struct page *page, void *arg);
+ void *init_arg;
+ );
+};
+
+#ifdef CONFIG_PAGE_POOL_STATS
+/**
+ * struct page_pool_alloc_stats - allocation statistics
+ * @fast: successful fast path allocations
+ * @slow: slow path order-0 allocations
+ * @slow_high_order: slow path high order allocations
+ * @empty: ptr ring is empty, so a slow path allocation was forced
+ * @refill: an allocation which triggered a refill of the cache
+ * @waive: pages obtained from the ptr ring that cannot be added to
+ * the cache due to a NUMA mismatch
+ */
+struct page_pool_alloc_stats {
+ u64 fast;
+ u64 slow;
+ u64 slow_high_order;
+ u64 empty;
+ u64 refill;
+ u64 waive;
+};
+
+/**
+ * struct page_pool_recycle_stats - recycling (freeing) statistics
+ * @cached: recycling placed page in the page pool cache
+ * @cache_full: page pool cache was full
+ * @ring: page placed into the ptr ring
+ * @ring_full: page released from page pool because the ptr ring was full
+ * @released_refcnt: page released (and not recycled) because refcnt > 1
+ */
+struct page_pool_recycle_stats {
+ u64 cached;
+ u64 cache_full;
+ u64 ring;
+ u64 ring_full;
+ u64 released_refcnt;
+};
+
+/**
+ * struct page_pool_stats - combined page pool use statistics
+ * @alloc_stats: see struct page_pool_alloc_stats
+ * @recycle_stats: see struct page_pool_recycle_stats
+ *
+ * Wrapper struct for combining page pool stats with different storage
+ * requirements.
+ */
+struct page_pool_stats {
+ struct page_pool_alloc_stats alloc_stats;
+ struct page_pool_recycle_stats recycle_stats;
+};
+#endif
+
+struct page_pool {
+ struct page_pool_params_fast p;
+
+ int cpuid;
+ bool has_init_callback;
+
+ long frag_users;
+ struct page *frag_page;
+ unsigned int frag_offset;
+ u32 pages_state_hold_cnt;
+
+ struct delayed_work release_dw;
+ void (*disconnect)(void *pool);
+ unsigned long defer_start;
+ unsigned long defer_warn;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* these stats are incremented while in softirq context */
+ struct page_pool_alloc_stats alloc_stats;
+#endif
+ u32 xdp_mem_id;
+
+ /*
+ * Data structure for allocation side
+ *
+ * Drivers allocation side usually already perform some kind
+ * of resource protection. Piggyback on this protection, and
+ * require driver to protect allocation side.
+ *
+ * For NIC drivers this means, allocate a page_pool per
+ * RX-queue. As the RX-queue is already protected by
+ * Softirq/BH scheduling and napi_schedule. NAPI schedule
+ * guarantee that a single napi_struct will only be scheduled
+ * on a single CPU (see napi_schedule).
+ */
+ struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
+
+ /* Data structure for storing recycled pages.
+ *
+ * Returning/freeing pages is more complicated synchronization
+ * wise, because free's can happen on remote CPUs, with no
+ * association with allocation resource.
+ *
+ * Use ptr_ring, as it separates consumer and producer
+ * efficiently, it a way that doesn't bounce cache-lines.
+ *
+ * TODO: Implement bulk return pages into this structure.
+ */
+ struct ptr_ring ring;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* recycle stats are per-cpu to avoid locking */
+ struct page_pool_recycle_stats __percpu *recycle_stats;
+#endif
+ atomic_t pages_state_release_cnt;
+
+ /* A page_pool is strictly tied to a single RX-queue being
+ * protected by NAPI, due to above pp_alloc_cache. This
+ * refcnt serves purpose is to simplify drivers error handling.
+ */
+ refcount_t user_cnt;
+
+ u64 destroy_cnt;
+
+ /* Slow/Control-path information follows */
+ struct page_pool_params_slow slow;
+ /* User-facing fields, protected by page_pools_lock */
+ struct {
+ struct hlist_node list;
+ u64 detach_time;
+ u32 napi_id;
+ u32 id;
+ } user;
+};
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+ unsigned int size, gfp_t gfp);
+struct page_pool *page_pool_create(const struct page_pool_params *params);
+struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
+ int cpuid);
+
+struct xdp_mem_info;
+
+#ifdef CONFIG_PAGE_POOL
+void page_pool_destroy(struct page_pool *pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+ struct xdp_mem_info *mem);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count);
+#else
+static inline void page_pool_destroy(struct page_pool *pool)
+{
+}
+
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
+ void (*disconnect)(void *),
+ struct xdp_mem_info *mem)
+{
+}
+
+static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count)
+{
+}
+#endif
+
+void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size,
+ bool allow_direct);
+
+static inline bool is_page_pool_compiled_in(void)
+{
+#ifdef CONFIG_PAGE_POOL
+ return true;
+#else
+ return false;
+#endif
+}
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+
+#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index 27b1ab5e4e6d..645dddf5ce77 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -10,6 +10,9 @@
#ifndef NET_PHONET_PEP_H
#define NET_PHONET_PEP_H
+#include <linux/skbuff.h>
+#include <net/phonet/phonet.h>
+
struct pep_sock {
struct pn_sock pn_sk;
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index a27bdc6cfeab..cf5ecae4a2fc 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -10,6 +10,10 @@
#ifndef AF_PHONET_H
#define AF_PHONET_H
+#include <linux/phonet.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
/*
* The lower layers may not require more space, ever. Make sure it's
* enough.
@@ -105,4 +109,25 @@ void phonet_sysctl_exit(void);
int isi_register(void);
void isi_unregister(void);
+static inline bool sk_is_phonet(struct sock *sk)
+{
+ return sk->sk_family == PF_PHONET;
+}
+
+static inline int phonet_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ int karg;
+
+ switch (cmd) {
+ case SIOCPNADDRESOURCE:
+ case SIOCPNDELRESOURCE:
+ if (get_user(karg, (int __user *)arg))
+ return -EFAULT;
+
+ return sk->sk_prot->ioctl(sk, cmd, &karg);
+ }
+ /* A positive return value means that the ioctl was not processed */
+ return 1;
+}
#endif
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index 05b49d4d2b11..e9dc8dca5817 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -10,6 +10,11 @@
#ifndef PN_DEV_H
#define PN_DEV_H
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct net;
+
struct phonet_device_list {
struct list_head list;
struct mutex lock;
diff --git a/include/net/pie.h b/include/net/pie.h
index 3fe2361e03b4..01cbc66825a4 100644
--- a/include/net/pie.h
+++ b/include/net/pie.h
@@ -17,7 +17,7 @@
/**
* struct pie_params - contains pie parameters
* @target: target delay in pschedtime
- * @tudpate: interval at which drop probability is calculated
+ * @tupdate: interval at which drop probability is calculated
* @limit: total number of packets that can be in the queue
* @alpha: parameter to control drop probability
* @beta: parameter to control drop probability
diff --git a/include/net/ping.h b/include/net/ping.h
index 2fe78874318c..bc7779262e60 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -16,14 +16,7 @@
#define PING_HTABLE_SIZE 64
#define PING_HTABLE_MASK (PING_HTABLE_SIZE-1)
-#define ping_portaddr_for_each_entry(__sk, node, list) \
- hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
-
-/*
- * gid_t is either uint or ushort. We want to pass it to
- * proc_dointvec_minmax(), so it must not be larger than MAX_INT
- */
-#define GID_T_MAX (((gid_t)~0U) >> 1)
+#define GID_T_MAX (((gid_t)~0U) - 1)
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
struct pingv6_ops {
@@ -71,12 +64,12 @@ void ping_err(struct sk_buff *skb, int offset, u32 info);
int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
struct sk_buff *);
-int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-bool ping_rcv(struct sk_buff *skb);
+enum skb_drop_reason ping_rcv(struct sk_buff *skb);
#ifdef CONFIG_PROC_FS
void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index d4d461236351..a4ee43f493bb 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -23,7 +23,9 @@ struct tcf_walker {
};
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
-int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+#define NET_CLS_ALIAS_PREFIX "net-cls-"
+#define MODULE_ALIAS_NET_CLS(kind) MODULE_ALIAS(NET_CLS_ALIAS_PREFIX kind)
struct tcf_block_ext_info {
enum flow_block_binder_type binder_type;
@@ -48,7 +50,7 @@ void tcf_chain_put_by_act(struct tcf_chain *chain);
struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
struct tcf_chain *chain);
struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
- struct tcf_proto *tp, bool rtnl_held);
+ struct tcf_proto *tp);
void tcf_block_netif_keep_dst(struct tcf_block *block);
int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
@@ -59,6 +61,8 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
void tcf_block_put(struct tcf_block *block);
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei);
+int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
+ int police, struct tcf_proto *tp, u32 handle, bool used_action_miss);
static inline bool tcf_block_shared(struct tcf_block *block)
{
@@ -76,12 +80,23 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
return block->q;
}
-int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res, bool compat_mode);
-int tcf_classify_ingress(struct sk_buff *skb,
- const struct tcf_block *ingress_block,
- const struct tcf_proto *tp, struct tcf_result *res,
- bool compat_mode);
+int tcf_classify(struct sk_buff *skb,
+ const struct tcf_block *block,
+ const struct tcf_proto *tp, struct tcf_result *res,
+ bool compat_mode);
+
+static inline bool tc_cls_stats_dump(struct tcf_proto *tp,
+ struct tcf_walker *arg,
+ void *filter)
+{
+ if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) {
+ arg->stop = 1;
+ return false;
+ }
+
+ arg->count++;
+ return true;
+}
#else
static inline bool tcf_block_shared(struct tcf_block *block)
@@ -125,33 +140,14 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
return NULL;
}
-static inline
-int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
- void *cb_priv)
-{
- return 0;
-}
-
-static inline
-void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
- void *cb_priv)
-{
-}
-
-static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+static inline int tcf_classify(struct sk_buff *skb,
+ const struct tcf_block *block,
+ const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
return TC_ACT_UNSPEC;
}
-static inline int tcf_classify_ingress(struct sk_buff *skb,
- const struct tcf_block *ingress_block,
- const struct tcf_proto *tp,
- struct tcf_result *res, bool compat_mode)
-{
- return TC_ACT_UNSPEC;
-}
-
#endif
static inline unsigned long
@@ -205,12 +201,26 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
__tcf_unbind_filter(q, r);
}
+static inline void tc_cls_bind_class(u32 classid, unsigned long cl,
+ void *q, struct tcf_result *res,
+ unsigned long base)
+{
+ if (res->classid == classid) {
+ if (cl)
+ __tcf_bind_filter(q, res, base);
+ else
+ __tcf_unbind_filter(q, res);
+ }
+}
+
struct tcf_exts {
#ifdef CONFIG_NET_CLS_ACT
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
int nr_actions;
struct tc_action **actions;
- struct net *net;
+ struct net *net;
+ netns_tracker ns_tracker;
+ struct tcf_exts_miss_cookie_node *miss_cookie_node;
#endif
/* Map to export classifier specific extension TLV types to the
* generic extensions API. Unsupported extensions must be set to 0.
@@ -222,18 +232,11 @@ struct tcf_exts {
static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
int action, int police)
{
-#ifdef CONFIG_NET_CLS_ACT
- exts->type = 0;
- exts->nr_actions = 0;
- exts->net = net;
- exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
- GFP_KERNEL);
- if (!exts->actions)
- return -ENOMEM;
+#ifdef CONFIG_NET_CLS
+ return tcf_exts_init_ex(exts, net, action, police, NULL, 0, false);
+#else
+ return -EOPNOTSUPP;
#endif
- exts->action = action;
- exts->police = police;
- return 0;
}
/* Return false if the netns is being destroyed in cleanup_net(). Callers
@@ -244,6 +247,8 @@ static inline bool tcf_exts_get_net(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
exts->net = maybe_get_net(exts->net);
+ if (exts->net)
+ netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL);
return exts->net != NULL;
#else
return true;
@@ -254,7 +259,7 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
if (exts->net)
- put_net(exts->net);
+ put_net_track(exts->net, &exts->ns_tracker);
#endif
}
@@ -266,26 +271,38 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
for (; 0; (void)(i), (void)(a), (void)(exts))
#endif
+#define tcf_act_for_each_action(i, a, actions) \
+ for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
+
+static inline bool tc_act_in_hw(struct tc_action *act)
+{
+ return !!act->in_hw_count;
+}
+
static inline void
-tcf_exts_stats_update(const struct tcf_exts *exts,
- u64 bytes, u64 packets, u64 drops, u64 lastuse,
- u8 used_hw_stats, bool used_hw_stats_valid)
+tcf_exts_hw_stats_update(const struct tcf_exts *exts,
+ struct flow_stats *stats,
+ bool use_act_stats)
{
#ifdef CONFIG_NET_CLS_ACT
int i;
- preempt_disable();
-
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- tcf_action_stats_update(a, bytes, packets, drops,
- lastuse, true);
- a->used_hw_stats = used_hw_stats;
- a->used_hw_stats_valid = used_hw_stats_valid;
- }
+ if (use_act_stats || tc_act_in_hw(a)) {
+ if (!tcf_action_update_hw_stats(a))
+ continue;
+ }
+
+ preempt_disable();
+ tcf_action_stats_update(a, stats->bytes, stats->pkts, stats->drops,
+ stats->lastused, true);
+ preempt_enable();
- preempt_enable();
+ a->used_hw_stats = stats->used_hw_stats;
+ a->used_hw_stats_valid = stats->used_hw_stats_valid;
+ }
#endif
}
@@ -325,10 +342,25 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
return TC_ACT_OK;
}
+static inline int
+tcf_exts_exec_ex(struct sk_buff *skb, struct tcf_exts *exts, int act_index,
+ struct tcf_result *res)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return tcf_action_exec(skb, exts->actions + act_index,
+ exts->nr_actions - act_index, res);
+#else
+ return TC_ACT_OK;
+#endif
+}
+
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
- struct tcf_exts *exts, bool ovr, bool rtnl_held,
+ struct tcf_exts *exts, u32 flags,
struct netlink_ext_ack *extack);
+int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ struct nlattr *rate_tlv, struct tcf_exts *exts,
+ u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
@@ -337,6 +369,9 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
/**
* struct tcf_pkt_info - packet information
+ *
+ * @ptr: start of the pkt data
+ * @nexthdr: offset of the next header
*/
struct tcf_pkt_info {
unsigned char * ptr;
@@ -355,6 +390,7 @@ struct tcf_ematch_ops;
* @ops: the operations lookup table of the corresponding ematch module
* @datalen: length of the ematch specific configuration data
* @data: ematch specific data
+ * @net: the network namespace
*/
struct tcf_ematch {
struct tcf_ematch_ops * ops;
@@ -512,7 +548,7 @@ tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
char indev[IFNAMSIZ];
struct net_device *dev;
- if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
+ if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
"Interface name too long");
return -EINVAL;
@@ -536,9 +572,14 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
return ifindex == skb->skb_iif;
}
-int tc_setup_flow_action(struct flow_action *flow_action,
- const struct tcf_exts *exts);
-void tc_cleanup_flow_action(struct flow_action *flow_action);
+int tc_setup_offload_action(struct flow_action *flow_action,
+ const struct tcf_exts *exts,
+ struct netlink_ext_ack *extack);
+void tc_cleanup_offload_action(struct flow_action *flow_action);
+int tc_setup_action(struct flow_action *flow_action,
+ struct tc_action *actions[],
+ u32 miss_cookie_base,
+ struct netlink_ext_ack *extack);
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
void *type_data, bool err_stop, bool rtnl_held);
@@ -709,6 +750,17 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
cls_common->extack = extack;
}
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
+{
+ struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+
+ if (tc_skb_ext)
+ memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
+ return tc_skb_ext;
+}
+#endif
+
enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE,
TC_CLSMATCHALL_DESTROY,
@@ -720,6 +772,7 @@ struct tc_cls_matchall_offload {
enum tc_matchall_command command;
struct flow_rule *rule;
struct flow_stats stats;
+ bool use_act_stats;
unsigned long cookie;
};
@@ -738,16 +791,6 @@ struct tc_cls_bpf_offload {
bool exts_integrated;
};
-struct tc_mqprio_qopt_offload {
- /* struct tc_mqprio_qopt must always be the first element */
- struct tc_mqprio_qopt qopt;
- u16 mode;
- u16 shaper;
- u32 flags;
- u64 min_rate[TC_QOPT_MAX_QUEUE];
- u64 max_rate[TC_QOPT_MAX_QUEUE];
-};
-
/* This structure holds cookie structure that is passed from user
* to the kernel for actions and classifiers
*/
@@ -758,7 +801,7 @@ struct tc_cookie {
};
struct tc_qopt_offload_stats {
- struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_basic_sync *bstats;
struct gnet_stats_queue *qstats;
};
@@ -783,6 +826,43 @@ struct tc_mq_qopt_offload {
};
};
+enum tc_htb_command {
+ /* Root */
+ TC_HTB_CREATE, /* Initialize HTB offload. */
+ TC_HTB_DESTROY, /* Destroy HTB offload. */
+
+ /* Classes */
+ /* Allocate qid and create leaf. */
+ TC_HTB_LEAF_ALLOC_QUEUE,
+ /* Convert leaf to inner, preserve and return qid, create new leaf. */
+ TC_HTB_LEAF_TO_INNER,
+ /* Delete leaf, while siblings remain. */
+ TC_HTB_LEAF_DEL,
+ /* Delete leaf, convert parent to leaf, preserving qid. */
+ TC_HTB_LEAF_DEL_LAST,
+ /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
+ TC_HTB_LEAF_DEL_LAST_FORCE,
+ /* Modify parameters of a node. */
+ TC_HTB_NODE_MODIFY,
+
+ /* Class qdisc */
+ TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
+};
+
+struct tc_htb_qopt_offload {
+ struct netlink_ext_ack *extack;
+ enum tc_htb_command command;
+ u32 parent_classid;
+ u16 classid;
+ u16 qid;
+ u32 quantum;
+ u64 rate;
+ u64 ceil;
+ u8 prio;
+};
+
+#define TC_HTB_CLASSID_ROOT U32_MAX
+
enum tc_red_command {
TC_RED_REPLACE,
TC_RED_DESTROY,
@@ -843,7 +923,7 @@ struct tc_gred_qopt_offload_params {
};
struct tc_gred_qopt_offload_stats {
- struct gnet_stats_basic_packed bstats[MAX_DPs];
+ struct gnet_stats_basic_sync bstats[MAX_DPs];
struct gnet_stats_queue qstats[MAX_DPs];
struct red_stats *xstats[MAX_DPs];
};
@@ -935,6 +1015,7 @@ enum tc_tbf_command {
TC_TBF_REPLACE,
TC_TBF_DESTROY,
TC_TBF_STATS,
+ TC_TBF_GRAFT,
};
struct tc_tbf_qopt_offload_replace_params {
@@ -950,6 +1031,7 @@ struct tc_tbf_qopt_offload {
union {
struct tc_tbf_qopt_offload_replace_params replace_params;
struct tc_qopt_offload_stats stats;
+ u32 child_handle;
};
};
@@ -968,4 +1050,15 @@ struct tc_fifo_qopt_offload {
};
};
+#ifdef CONFIG_NET_CLS_ACT
+DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc);
+void tc_skb_ext_tc_enable(void);
+void tc_skb_ext_tc_disable(void);
+#define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc)
+#else /* CONFIG_NET_CLS_ACT */
+static inline void tc_skb_ext_tc_enable(void) { }
+static inline void tc_skb_ext_tc_disable(void) { }
+#define tc_skb_ext_tc_enabled() false
+#endif
+
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ac8c890a2657..d7b7b6cd4aa1 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -11,6 +11,7 @@
#include <uapi/linux/pkt_sched.h>
#define DEFAULT_TX_QUEUE_LEN 1000
+#define STAB_SIZE_LOG_MAX 30
struct qdisc_walker {
int stop;
@@ -19,12 +20,14 @@ struct qdisc_walker {
int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
};
-#define QDISC_ALIGNTO 64
-#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
+#define qdisc_priv(q) \
+ _Generic(q, \
+ const struct Qdisc * : (const void *)&q->privdata, \
+ struct Qdisc * : (void *)&q->privdata)
-static inline void *qdisc_priv(struct Qdisc *q)
+static inline struct Qdisc *qdisc_from_priv(void *priv)
{
- return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc));
+ return container_of(priv, struct Qdisc, privdata);
}
/*
@@ -60,14 +63,7 @@ static inline psched_time_t psched_get_time(void)
return PSCHED_NS2TICKS(ktime_get_ns());
}
-static inline psched_tdiff_t
-psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
-{
- return min(tv1 - tv2, bound);
-}
-
struct qdisc_watchdog {
- u64 last_expires;
struct hrtimer timer;
struct Qdisc *qdisc;
};
@@ -103,7 +99,9 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
int register_qdisc(struct Qdisc_ops *qops);
-int unregister_qdisc(struct Qdisc_ops *qops);
+void unregister_qdisc(struct Qdisc_ops *qops);
+#define NET_SCH_ALIAS_PREFIX "net-sch-"
+#define MODULE_ALIAS_NET_SCH(id) MODULE_ALIAS(NET_SCH_ALIAS_PREFIX id)
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
@@ -126,22 +124,19 @@ void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
if (qdisc_run_begin(q)) {
- /* NOLOCK qdisc must check 'state' under the qdisc seqlock
- * to avoid racing with dev_qdisc_reset()
- */
- if (!(q->flags & TCQ_F_NOLOCK) ||
- likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
- __qdisc_run(q);
+ __qdisc_run(q);
qdisc_run_end(q);
}
}
+extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
static inline unsigned int psched_mtu(const struct net_device *dev)
{
- return dev->mtu + dev->hard_header_len;
+ return READ_ONCE(dev->mtu) + dev->hard_header_len;
}
static inline struct net *qdisc_net(struct Qdisc *q)
@@ -149,6 +144,11 @@ static inline struct net *qdisc_net(struct Qdisc *q)
return dev_net(q->dev_queue->dev);
}
+struct tc_query_caps_base {
+ enum tc_setup_type type;
+ void *caps;
+};
+
struct tc_cbs_qopt_offload {
u8 enable;
s32 queue;
@@ -163,6 +163,58 @@ struct tc_etf_qopt_offload {
s32 queue;
};
+struct tc_mqprio_caps {
+ bool validate_queue_counts:1;
+};
+
+struct tc_mqprio_qopt_offload {
+ /* struct tc_mqprio_qopt must always be the first element */
+ struct tc_mqprio_qopt qopt;
+ struct netlink_ext_ack *extack;
+ u16 mode;
+ u16 shaper;
+ u32 flags;
+ u64 min_rate[TC_QOPT_MAX_QUEUE];
+ u64 max_rate[TC_QOPT_MAX_QUEUE];
+ unsigned long preemptible_tcs;
+};
+
+struct tc_taprio_caps {
+ bool supports_queue_max_sdu:1;
+ bool gate_mask_per_txq:1;
+ /* Device expects lower TXQ numbers to have higher priority over higher
+ * TXQs, regardless of their TC mapping. DO NOT USE FOR NEW DRIVERS,
+ * INSTEAD ENFORCE A PROPER TC:TXQ MAPPING COMING FROM USER SPACE.
+ */
+ bool broken_mqprio:1;
+};
+
+enum tc_taprio_qopt_cmd {
+ TAPRIO_CMD_REPLACE,
+ TAPRIO_CMD_DESTROY,
+ TAPRIO_CMD_STATS,
+ TAPRIO_CMD_QUEUE_STATS,
+};
+
+/**
+ * struct tc_taprio_qopt_stats - IEEE 802.1Qbv statistics
+ * @window_drops: Frames that were dropped because they were too large to be
+ * transmitted in any of the allotted time windows (open gates) for their
+ * traffic class.
+ * @tx_overruns: Frames still being transmitted by the MAC after the
+ * transmission gate associated with their traffic class has closed.
+ * Equivalent to `12.29.1.1.2 TransmissionOverrun` from 802.1Q-2018.
+ */
+struct tc_taprio_qopt_stats {
+ u64 window_drops;
+ u64 tx_overruns;
+};
+
+struct tc_taprio_qopt_queue_stats {
+ int queue;
+ struct tc_taprio_qopt_stats stats;
+};
+
struct tc_taprio_sched_entry {
u8 command; /* TC_TAPRIO_CMD_* */
@@ -172,18 +224,70 @@ struct tc_taprio_sched_entry {
};
struct tc_taprio_qopt_offload {
- u8 enable;
- ktime_t base_time;
- u64 cycle_time;
- u64 cycle_time_extension;
-
- size_t num_entries;
- struct tc_taprio_sched_entry entries[];
+ enum tc_taprio_qopt_cmd cmd;
+
+ union {
+ /* TAPRIO_CMD_STATS */
+ struct tc_taprio_qopt_stats stats;
+ /* TAPRIO_CMD_QUEUE_STATS */
+ struct tc_taprio_qopt_queue_stats queue_stats;
+ /* TAPRIO_CMD_REPLACE */
+ struct {
+ struct tc_mqprio_qopt_offload mqprio;
+ struct netlink_ext_ack *extack;
+ ktime_t base_time;
+ u64 cycle_time;
+ u64 cycle_time_extension;
+ u32 max_sdu[TC_MAX_QUEUE];
+
+ size_t num_entries;
+ struct tc_taprio_sched_entry entries[];
+ };
+ };
};
+#if IS_ENABLED(CONFIG_NET_SCH_TAPRIO)
+
/* Reference counting */
struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
*offload);
void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
+#else
+
+/* Reference counting */
+static inline struct tc_taprio_qopt_offload *
+taprio_offload_get(struct tc_taprio_qopt_offload *offload)
+{
+ return NULL;
+}
+
+static inline void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
+{
+}
+
+#endif
+
+/* Ensure skb_mstamp_ns, which might have been populated with the txtime, is
+ * not mistaken for a software timestamp, because this will otherwise prevent
+ * the dispatch of hardware timestamps to the socket.
+ */
+static inline void skb_txtime_consumed(struct sk_buff *skb)
+{
+ skb->tstamp = ktime_set(0, 0);
+}
+
+static inline bool tc_qdisc_stats_dump(struct Qdisc *sch,
+ unsigned long cl,
+ struct qdisc_walker *arg)
+{
+ if (arg->count >= arg->skip && arg->fn(sch, cl, arg) < 0) {
+ arg->stop = 1;
+ return false;
+ }
+
+ arg->count++;
+ return true;
+}
+
#endif
diff --git a/include/net/pptp.h b/include/net/pptp.h
index 383e25ca53a7..e63176bdd4c8 100644
--- a/include/net/pptp.h
+++ b/include/net/pptp.h
@@ -2,6 +2,9 @@
#ifndef _NET_PPTP_H
#define _NET_PPTP_H
+#include <linux/types.h>
+#include <net/gre.h>
+
#define PPP_LCP_ECHOREQ 0x09
#define PPP_LCP_ECHOREP 0x0A
#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 2b778e1d2d8f..b2499f88f8f8 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -35,26 +35,22 @@
/* This is used to register protocols. */
struct net_protocol {
- int (*early_demux)(struct sk_buff *skb);
- int (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */
int (*err_handler)(struct sk_buff *skb, u32 info);
unsigned int no_policy:1,
- netns_ok:1,
/* does the protocol do more stringent
* icmp tag validation than simple
* socket lookup?
*/
icmp_strict_tag_validation:1;
+ u32 secret;
};
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol {
- void (*early_demux)(struct sk_buff *skb);
- void (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */
@@ -64,6 +60,7 @@ struct inet6_protocol {
__be32 info);
unsigned int flags; /* INET6_PROTO_xxx */
+ u32 secret;
};
#define INET6_PROTO_NOPOLICY 0x1
@@ -73,6 +70,7 @@ struct inet6_protocol {
struct net_offload {
struct offload_callbacks callbacks;
unsigned int flags; /* Flags used by IPv6 for now */
+ u32 secret;
};
/* This should be set for any extension header which is compatible with GSO. */
#define INET6_PROTO_GSO_EXTHDR 0x1
diff --git a/include/net/psample.h b/include/net/psample.h
index 68ae16bb0a4a..0509d2d6be67 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -14,22 +14,35 @@ struct psample_group {
struct rcu_head rcu;
};
+struct psample_metadata {
+ u32 trunc_size;
+ int in_ifindex;
+ int out_ifindex;
+ u16 out_tc;
+ u64 out_tc_occ; /* bytes */
+ u64 latency; /* nanoseconds */
+ u8 out_tc_valid:1,
+ out_tc_occ_valid:1,
+ latency_valid:1,
+ unused:5;
+};
+
struct psample_group *psample_group_get(struct net *net, u32 group_num);
void psample_group_take(struct psample_group *group);
void psample_group_put(struct psample_group *group);
+struct sk_buff;
+
#if IS_ENABLED(CONFIG_PSAMPLE)
void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
- u32 trunc_size, int in_ifindex, int out_ifindex,
- u32 sample_rate);
+ u32 sample_rate, const struct psample_metadata *md);
#else
static inline void psample_sample_packet(struct psample_group *group,
- struct sk_buff *skb, u32 trunc_size,
- int in_ifindex, int out_ifindex,
- u32 sample_rate)
+ struct sk_buff *skb, u32 sample_rate,
+ const struct psample_metadata *md)
{
}
diff --git a/include/net/psnap.h b/include/net/psnap.h
index 7cb0c8ab4171..88802b0754ad 100644
--- a/include/net/psnap.h
+++ b/include/net/psnap.h
@@ -2,6 +2,11 @@
#ifndef _NET_PSNAP_H
#define _NET_PSNAP_H
+struct datalink_proto;
+struct sk_buff;
+struct packet_type;
+struct net_device;
+
struct datalink_proto *
register_snap_client(const unsigned char *desc,
int (*rcvfunc)(struct sk_buff *, struct net_device *,
diff --git a/include/net/raw.h b/include/net/raw.h
index 8ad8df594853..32a61481a253 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -15,14 +15,15 @@
#include <net/inet_sock.h>
#include <net/protocol.h>
+#include <net/netns/hash.h>
+#include <linux/hash.h>
#include <linux/icmp.h>
extern struct proto raw_prot;
extern struct raw_hashinfo raw_v4_hashinfo;
-struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
- unsigned short num, __be32 raddr,
- __be32 laddr, int dif, int sdif);
+bool raw_v4_match(struct net *net, const struct sock *sk, unsigned short num,
+ __be32 raddr, __be32 laddr, int dif, int sdif);
int raw_abort(struct sock *sk, int err);
void raw_icmp_error(struct sk_buff *, int, u32);
@@ -30,13 +31,29 @@ int raw_local_deliver(struct sk_buff *, int);
int raw_rcv(struct sock *, struct sk_buff *);
-#define RAW_HTABLE_SIZE MAX_INET_PROTOS
+#define RAW_HTABLE_LOG 8
+#define RAW_HTABLE_SIZE (1U << RAW_HTABLE_LOG)
struct raw_hashinfo {
- rwlock_t lock;
- struct hlist_head ht[RAW_HTABLE_SIZE];
+ spinlock_t lock;
+
+ struct hlist_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
};
+static inline u32 raw_hashfunc(const struct net *net, u32 proto)
+{
+ return hash_32(net_hash_mix(net) ^ proto, RAW_HTABLE_LOG);
+}
+
+static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
+{
+ int i;
+
+ spin_lock_init(&hashinfo->lock);
+ for (i = 0; i < RAW_HTABLE_SIZE; i++)
+ INIT_HLIST_HEAD(&hashinfo->ht[i]);
+}
+
#ifdef CONFIG_PROC_FS
int raw_proc_init(void);
void raw_proc_exit(void);
@@ -66,16 +83,13 @@ struct raw_sock {
u32 ipmr_table;
};
-static inline struct raw_sock *raw_sk(const struct sock *sk)
-{
- return (struct raw_sock *)sk;
-}
+#define raw_sk(ptr) container_of_const(ptr, struct raw_sock, inet.sk)
static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept,
+ return inet_bound_dev_eq(READ_ONCE(net->ipv4.sysctl_raw_l3mdev_accept),
bound_dev_if, dif, sdif);
#else
return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index 53d86b6055e8..82810cbe3798 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -3,11 +3,12 @@
#define _NET_RAWV6_H
#include <net/protocol.h>
+#include <net/raw.h>
extern struct raw_hashinfo raw_v6_hashinfo;
-struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
- unsigned short num, const struct in6_addr *loc_addr,
- const struct in6_addr *rmt_addr, int dif, int sdif);
+bool raw_v6_match(struct net *net, const struct sock *sk, unsigned short num,
+ const struct in6_addr *loc_addr,
+ const struct in6_addr *rmt_addr, int dif, int sdif);
int raw_abort(struct sock *sk, int err);
diff --git a/include/net/red.h b/include/net/red.h
index fc455445f4b2..425364de0df7 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -122,7 +122,6 @@ struct red_stats {
u32 forced_drop; /* Forced drops, qavg > max_thresh */
u32 forced_mark; /* Forced marks, qavg > max_thresh */
u32 pdrop; /* Drops due to queue limits */
- u32 other; /* Drops due to drop() calls */
};
struct red_parms {
@@ -168,14 +167,24 @@ static inline void red_set_vars(struct red_vars *v)
v->qcount = -1;
}
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
+ u8 Scell_log, u8 *stab)
{
- if (fls(qth_min) + Wlog > 32)
+ if (fls(qth_min) + Wlog >= 32)
return false;
- if (fls(qth_max) + Wlog > 32)
+ if (fls(qth_max) + Wlog >= 32)
+ return false;
+ if (Scell_log >= 32)
return false;
if (qth_max < qth_min)
return false;
+ if (stab) {
+ int i;
+
+ for (i = 0; i < RED_STAB_SIZE; i++)
+ if (stab[i] >= 32)
+ return false;
+ }
return true;
}
@@ -285,7 +294,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms
int shift;
/*
- * The problem: ideally, average length queue recalcultion should
+ * The problem: ideally, average length queue recalculation should
* be done over constant clock intervals. This is too expensive, so
* that the calculation is driven by outgoing packets.
* When the queue is idle we have to model this clock by hand.
@@ -354,7 +363,7 @@ static inline unsigned long red_calc_qavg(const struct red_parms *p,
static inline u32 red_random(const struct red_parms *p)
{
- return reciprocal_divide(prandom_u32(), p->max_P_reciprocal);
+ return reciprocal_divide(get_random_u32(), p->max_P_reciprocal);
}
static inline int red_mark_probability(const struct red_parms *p,
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 47f06f6f5a67..ebf9e028d1ef 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -1,3 +1,4 @@
+
#ifndef __NET_REGULATORY_H
#define __NET_REGULATORY_H
/*
@@ -19,6 +20,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/ieee80211.h>
+#include <linux/nl80211.h>
#include <linux/rcupdate.h>
/**
@@ -137,17 +140,6 @@ struct regulatory_request {
* otherwise initiating radiation is not allowed. This will enable the
* relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
* option
- * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure
- * all interfaces on this wiphy reside on allowed channels. If this flag
- * is not set, upon a regdomain change, the interfaces are given a grace
- * period (currently 60 seconds) to disconnect or move to an allowed
- * channel. Interfaces on forbidden channels are forcibly disconnected.
- * Currently these types of interfaces are supported for enforcement:
- * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP,
- * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR,
- * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO,
- * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device
- * includes any modes unsupported for enforcement checking.
* @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific
* regdom management. These devices will ignore all regdom changes not
* originating from their own wiphy.
@@ -174,7 +166,7 @@ enum ieee80211_regulatory_flags {
REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3),
REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
- REGULATORY_IGNORE_STALE_KICKOFF = BIT(6),
+ /* reuse bit 6 next time */
REGULATORY_WIPHY_SELF_MANAGED = BIT(7),
};
@@ -221,6 +213,7 @@ struct ieee80211_reg_rule {
u32 flags;
u32 dfs_cac_ms;
bool has_wmm;
+ s8 psd;
};
struct ieee80211_regdomain {
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index b2eb8b4ba697..004e651e6067 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -41,6 +41,13 @@ struct request_sock_ops {
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
+struct saved_syn {
+ u32 mac_hdrlen;
+ u32 network_hdrlen;
+ u32 tcp_hdrlen;
+ u8 data[];
+};
+
/* struct request_sock - mini sock to represent a connection request
*/
struct request_sock {
@@ -54,15 +61,20 @@ struct request_sock {
struct request_sock *dl_next;
u16 mss;
u8 num_retrans; /* number of retransmits */
- u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */
+ u8 syncookie:1; /* True if
+ * 1) tcpopts needs to be encoded in
+ * TS of SYN+ACK
+ * 2) ACK is validated by BPF kfunc.
+ */
u8 num_timeout:7; /* number of timeouts */
u32 ts_recent;
struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
- u32 *saved_syn;
+ struct saved_syn *saved_syn;
u32 secid;
u32 peer_secid;
+ u32 timeout;
};
static inline struct request_sock *inet_reqsk(const struct sock *sk)
@@ -75,6 +87,45 @@ static inline struct sock *req_to_sk(struct request_sock *req)
return (struct sock *)req;
}
+/**
+ * skb_steal_sock - steal a socket from an sk_buff
+ * @skb: sk_buff to steal the socket from
+ * @refcounted: is set to true if the socket is reference-counted
+ * @prefetched: is set to true if the socket was assigned from bpf
+ */
+static inline struct sock *skb_steal_sock(struct sk_buff *skb,
+ bool *refcounted, bool *prefetched)
+{
+ struct sock *sk = skb->sk;
+
+ if (!sk) {
+ *prefetched = false;
+ *refcounted = false;
+ return NULL;
+ }
+
+ *prefetched = skb_sk_is_prefetched(skb);
+ if (*prefetched) {
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+ if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
+ struct request_sock *req = inet_reqsk(sk);
+
+ *refcounted = false;
+ sk = req->rsk_listener;
+ req->rsk_listener = NULL;
+ return sk;
+ }
+#endif
+ *refcounted = sk_is_refcounted(sk);
+ } else {
+ *refcounted = true;
+ }
+
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ return sk;
+}
+
static inline struct request_sock *
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
bool attach_listener)
@@ -97,6 +148,8 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
sk_node_init(&req_to_sk(req)->sk_node);
sk_tx_queue_clear(req_to_sk(req));
req->saved_syn = NULL;
+ req->syncookie = 0;
+ req->timeout = 0;
req->num_timeout = 0;
req->num_retrans = 0;
req->sk = NULL;
diff --git a/include/net/rose.h b/include/net/rose.h
index cf517d306a28..23267b4efcfa 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -9,6 +9,7 @@
#define _ROSE_H
#include <linux/rose.h>
+#include <net/ax25.h>
#include <net/sock.h>
#define ROSE_ADDR_LEN 5
@@ -131,7 +132,8 @@ struct rose_sock {
ax25_address source_digis[ROSE_MAX_DIGIS];
ax25_address dest_digis[ROSE_MAX_DIGIS];
struct rose_neigh *neighbour;
- struct net_device *device;
+ struct net_device *device;
+ netdevice_tracker dev_tracker;
unsigned int lci, rand;
unsigned char state, condition, qbitincl, defer;
unsigned char cause, diagnostic;
@@ -162,8 +164,8 @@ extern int sysctl_rose_link_fail_timeout;
extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
-int rosecmp(rose_address *, rose_address *);
-int rosecmpm(rose_address *, rose_address *, unsigned short);
+int rosecmp(const rose_address *, const rose_address *);
+int rosecmpm(const rose_address *, const rose_address *, unsigned short);
char *rose2asc(char *buf, const rose_address *);
struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
void rose_kill_by_neigh(struct rose_neigh *);
@@ -205,8 +207,8 @@ extern const struct seq_operations rose_node_seqops;
extern struct seq_operations rose_route_seqops;
void rose_add_loopback_neigh(void);
-int __must_check rose_add_loopback_node(rose_address *);
-void rose_del_loopback_node(rose_address *);
+int __must_check rose_add_loopback_node(const rose_address *);
+void rose_del_loopback_node(const rose_address *);
void rose_rt_device_down(struct net_device *);
void rose_link_device_down(struct net_device *);
struct net_device *rose_dev_first(void);
diff --git a/include/net/route.h b/include/net/route.h
index ff021cab657e..d4a0147942f1 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -35,14 +35,22 @@
#include <linux/cache.h>
#include <linux/security.h>
-/* IPv4 datagram length is stored into 16bit field (tot_len) */
-#define IP_MAX_MTU 0xFFFFU
-
#define RTO_ONLINK 0x01
-#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
-#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
+static inline __u8 ip_sock_rt_scope(const struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_LOCALROUTE))
+ return RT_SCOPE_LINK;
+
+ return RT_SCOPE_UNIVERSE;
+}
+
+static inline __u8 ip_sock_rt_tos(const struct sock *sk)
+{
+ return RT_TOS(READ_ONCE(inet_sk(sk)->tos));
+}
+struct ip_tunnel_info;
struct fib_nh;
struct fib_info;
struct uncached_list;
@@ -67,9 +75,6 @@ struct rtable {
/* Miscellaneous cached information */
u32 rt_mtu_locked:1,
rt_pmtu:31;
-
- struct list_head rt_uncached;
- struct uncached_list *rt_uncached_list;
};
static inline bool rt_is_input_route(const struct rtable *rt)
@@ -128,12 +133,6 @@ static inline struct rtable *__ip_route_output_key(struct net *net,
struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
const struct sock *sk);
-struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
- struct net_device *dev,
- struct net *net, __be32 *saddr,
- const struct ip_tunnel_info *info,
- u8 protocol, bool use_cache);
-
struct dst_entry *ipv4_blackhole_route(struct net *net,
struct dst_entry *dst_orig);
@@ -155,17 +154,17 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
}
static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi4 *fl4,
- struct sock *sk,
+ const struct sock *sk,
__be32 daddr, __be32 saddr,
__be16 dport, __be16 sport,
__u8 proto, __u8 tos, int oif)
{
- flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
- RT_SCOPE_UNIVERSE, proto,
- sk ? inet_sk_flowi_flags(sk) : 0,
+ flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos,
+ sk ? ip_sock_rt_scope(sk) : RT_SCOPE_UNIVERSE,
+ proto, sk ? inet_sk_flowi_flags(sk) : 0,
daddr, saddr, dport, sport, sock_net_uid(net, sk));
if (sk)
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
}
@@ -187,10 +186,6 @@ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct in_device *in_dev, u32 *itag);
int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin);
-int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin,
- struct fib_result *res);
-
int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin,
const struct sk_buff *hint);
@@ -230,8 +225,7 @@ void ip_rt_multicast_event(struct in_device *);
int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
struct rtable *rt_dst_alloc(struct net_device *dev,
- unsigned int flags, u16 type,
- bool nopolicy, bool noxfrm);
+ unsigned int flags, u16 type, bool noxfrm);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
struct in_ifaddr;
@@ -288,57 +282,54 @@ static inline char rt_tos2priority(u8 tos)
* ip_route_newports() calls.
*/
-static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 src,
- u32 tos, int oif, u8 protocol,
+static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
+ __be32 src, int oif, u8 protocol,
__be16 sport, __be16 dport,
- struct sock *sk)
+ const struct sock *sk)
{
__u8 flow_flags = 0;
- if (inet_sk(sk)->transparent)
+ if (inet_test_bit(TRANSPARENT, sk))
flow_flags |= FLOWI_FLAG_ANYSRC;
- flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
- protocol, flow_flags, dst, src, dport, sport,
- sk->sk_uid);
+ flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
+ ip_sock_rt_scope(sk), protocol, flow_flags, dst,
+ src, dport, sport, sk->sk_uid);
}
-static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
- __be32 dst, __be32 src, u32 tos,
- int oif, u8 protocol,
+static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
+ __be32 src, int oif, u8 protocol,
__be16 sport, __be16 dport,
- struct sock *sk)
+ const struct sock *sk)
{
struct net *net = sock_net(sk);
struct rtable *rt;
- ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
- sport, dport, sk);
+ ip_route_connect_init(fl4, dst, src, oif, protocol, sport, dport, sk);
if (!dst || !src) {
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
return rt;
ip_rt_put(rt);
- flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
+ flowi4_update_output(fl4, oif, fl4->daddr, fl4->saddr);
}
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
}
static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt,
__be16 orig_sport, __be16 orig_dport,
__be16 sport, __be16 dport,
- struct sock *sk)
+ const struct sock *sk)
{
if (sport != orig_sport || dport != orig_dport) {
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
ip_rt_put(rt);
- flowi4_update_output(fl4, sk->sk_bound_dev_if,
- RT_CONN_FLAGS(sk), fl4->daddr,
+ flowi4_update_output(fl4, sk->sk_bound_dev_if, fl4->daddr,
fl4->saddr);
- security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(sock_net(sk), fl4, sk);
}
return rt;
@@ -360,7 +351,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
struct net *net = dev_net(dst->dev);
if (hoplimit == 0)
- hoplimit = net->ipv4.sysctl_ip_default_ttl;
+ hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
return hoplimit;
}
@@ -369,7 +360,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
{
struct neighbour *neigh;
- neigh = __ipv4_neigh_lookup_noref(dev, daddr);
+ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
diff --git a/include/net/rpl.h b/include/net/rpl.h
index 308ef0a05cae..74734191c458 100644
--- a/include/net/rpl.h
+++ b/include/net/rpl.h
@@ -23,12 +23,6 @@ static inline int rpl_init(void)
static inline void rpl_exit(void) {}
#endif
-/* Worst decompression memory usage ipv6 address (16) + pad 7 */
-#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
-
-size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
- unsigned char cmpre);
-
void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
const struct ipv6_rpl_sr_hdr *inhdr,
const struct in6_addr *daddr, unsigned char n);
diff --git a/include/net/rps.h b/include/net/rps.h
new file mode 100644
index 000000000000..7660243e905b
--- /dev/null
+++ b/include/net/rps.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_RPS_H
+#define _NET_RPS_H
+
+#include <linux/types.h>
+#include <linux/static_key.h>
+#include <net/sock.h>
+#include <net/hotdata.h>
+
+#ifdef CONFIG_RPS
+
+extern struct static_key_false rps_needed;
+extern struct static_key_false rfs_needed;
+
+/*
+ * This structure holds an RPS map which can be of variable length. The
+ * map is an array of CPUs.
+ */
+struct rps_map {
+ unsigned int len;
+ struct rcu_head rcu;
+ u16 cpus[];
+};
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
+
+/*
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
+ */
+struct rps_dev_flow {
+ u16 cpu;
+ u16 filter;
+ unsigned int last_qtail;
+};
+#define RPS_NO_FILTER 0xffff
+
+/*
+ * The rps_dev_flow_table structure contains a table of flow mappings.
+ */
+struct rps_dev_flow_table {
+ unsigned int mask;
+ struct rcu_head rcu;
+ struct rps_dev_flow flows[];
+};
+#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
+ ((_num) * sizeof(struct rps_dev_flow)))
+
+/*
+ * The rps_sock_flow_table contains mappings of flows to the last CPU
+ * on which they were processed by the application (set in recvmsg).
+ * Each entry is a 32bit value. Upper part is the high-order bits
+ * of flow hash, lower part is CPU number.
+ * rps_cpu_mask is used to partition the space, depending on number of
+ * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
+ * meaning we use 32-6=26 bits for the hash.
+ */
+struct rps_sock_flow_table {
+ u32 mask;
+
+ u32 ents[] ____cacheline_aligned_in_smp;
+};
+#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
+
+#define RPS_NO_CPU 0xffff
+
+static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+ u32 hash)
+{
+ unsigned int index = hash & table->mask;
+ u32 val = hash & ~net_hotdata.rps_cpu_mask;
+
+ /* We only give a hint, preemption can change CPU under us */
+ val |= raw_smp_processor_id();
+
+ /* The following WRITE_ONCE() is paired with the READ_ONCE()
+ * here, and another one in get_rps_cpu().
+ */
+ if (READ_ONCE(table->ents[index]) != val)
+ WRITE_ONCE(table->ents[index], val);
+}
+
+#endif /* CONFIG_RPS */
+
+static inline void sock_rps_record_flow_hash(__u32 hash)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *sock_flow_table;
+
+ if (!hash)
+ return;
+ rcu_read_lock();
+ sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
+ if (sock_flow_table)
+ rps_record_sock_flow(sock_flow_table, hash);
+ rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ if (static_branch_unlikely(&rfs_needed)) {
+ /* Reading sk->sk_rxhash might incur an expensive cache line
+ * miss.
+ *
+ * TCP_ESTABLISHED does cover almost all states where RFS
+ * might be useful, and is cheaper [1] than testing :
+ * IPv4: inet_sk(sk)->inet_daddr
+ * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ /* This READ_ONCE() is paired with the WRITE_ONCE()
+ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
+ */
+ sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
+ }
+ }
+#endif
+}
+
+#endif /* _NET_RPS_H */
diff --git a/include/net/rsi_91x.h b/include/net/rsi_91x.h
index 040f07b47f1f..b2283762e446 100644
--- a/include/net/rsi_91x.h
+++ b/include/net/rsi_91x.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2017 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index e2091bb2b3a8..3bfb80bad173 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -10,9 +10,24 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
enum rtnl_link_flags {
- RTNL_FLAG_DOIT_UNLOCKED = 1,
+ RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
+ RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
+ RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
};
+enum rtnl_kinds {
+ RTNL_KIND_NEW,
+ RTNL_KIND_DEL,
+ RTNL_KIND_GET,
+ RTNL_KIND_SET
+};
+#define RTNL_KIND_MASK 0x3
+
+static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype)
+{
+ return msgtype & RTNL_KIND_MASK;
+}
+
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
int rtnl_register_module(struct module *owner, int protocol, int msgtype,
@@ -33,9 +48,13 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
*
* @list: Used internally
* @kind: Identifier
+ * @netns_refund: Physical device, move to init_net on netns exit
* @maxtype: Highest device specific netlink attribute number
* @policy: Netlink policy for device specific attribute validation
* @validate: Optional validation function for netlink/changelink parameters
+ * @alloc: netdev allocation function, can be %NULL and is then used
+ * in place of alloc_netdev_mqs(), in this case @priv_size
+ * and @setup are unused. Returns a netdev or ERR_PTR().
* @priv_size: sizeof net_device private space
* @setup: net_device setup function
* @newlink: Function for configuring and registering a new device
@@ -62,8 +81,14 @@ struct rtnl_link_ops {
const char *kind;
size_t priv_size;
+ struct net_device *(*alloc)(struct nlattr *tb[],
+ const char *ifname,
+ unsigned char name_assign_type,
+ unsigned int num_tx_queues,
+ unsigned int num_rx_queues);
void (*setup)(struct net_device *dev);
+ bool netns_refund;
unsigned int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
@@ -143,10 +168,11 @@ struct rtnl_af_ops {
u32 ext_filter_mask);
int (*validate_link_af)(const struct net_device *dev,
- const struct nlattr *attr);
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
int (*set_link_af)(struct net_device *dev,
- const struct nlattr *attr);
-
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
int (*fill_stats_af)(struct sk_buff *skb,
const struct net_device *dev);
size_t (*get_stats_af_size)(const struct net_device *dev);
@@ -161,11 +187,12 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
const struct rtnl_link_ops *ops,
struct nlattr *tb[],
struct netlink_ext_ack *extack);
-int rtnl_delete_link(struct net_device *dev);
-int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
+int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh);
+int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
+ u32 portid, const struct nlmsghdr *nlh);
-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
- struct netlink_ext_ack *exterr);
+int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
+ struct netlink_ext_ack *exterr);
struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d60e7c39d60c..cefe0c4bdae3 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -19,6 +19,7 @@
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
#include <net/flow_offload.h>
+#include <linux/xarray.h>
struct Qdisc_ops;
struct qdisc_walker;
@@ -36,8 +37,23 @@ struct qdisc_rate_table {
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_MISSED,
+ __QDISC_STATE_DRAINING,
};
+enum qdisc_state2_t {
+ /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
+ * Use qdisc_run_begin/end() or qdisc_is_running() instead.
+ */
+ __QDISC_STATE2_RUNNING,
+};
+
+#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
+#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
+
+#define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \
+ QDISC_STATE_DRAINING)
+
struct qdisc_size_table {
struct rcu_head rcu;
struct list_head list;
@@ -89,9 +105,9 @@ struct Qdisc {
struct netdev_queue *dev_queue;
struct net_rate_estimator __rcu *rate_est;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- int padded;
+ int pad;
refcount_t refcnt;
/*
@@ -99,19 +115,20 @@ struct Qdisc {
*/
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
- struct gnet_stats_basic_packed bstats;
- seqcount_t running;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
unsigned long state;
+ unsigned long state2; /* must be written under qdisc spinlock */
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock ____cacheline_aligned_in_smp;
spinlock_t seqlock;
- /* for NOLOCK qdisc, true if there are no enqueued skbs */
- bool empty;
struct rcu_head rcu;
+ netdevice_tracker dev_tracker;
+ /* private data */
+ long privdata[] ____cacheline_aligned;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
@@ -121,6 +138,13 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
refcount_inc(&qdisc->refcnt);
}
+static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN)
+ return true;
+ return refcount_dec_if_one(&qdisc->refcnt);
+}
+
/* Intended to be used by unlocked users, when concurrent qdisc release is
* possible.
*/
@@ -134,11 +158,20 @@ static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
return NULL;
}
+/* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
+ * root_lock section, or provide their own memory barriers -- ordering
+ * against qdisc_run_begin/end() atomic bit operations.
+ */
static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK)
return spin_is_locked(&qdisc->seqlock);
- return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
+ return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+}
+
+static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
+{
+ return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
}
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
@@ -149,32 +182,53 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc))
- return READ_ONCE(qdisc->empty);
+ return nolock_qdisc_is_empty(qdisc);
return !READ_ONCE(qdisc->q.qlen);
}
+/* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
+ * the qdisc root lock acquired.
+ */
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
- if (!spin_trylock(&qdisc->seqlock))
+ if (spin_trylock(&qdisc->seqlock))
+ return true;
+
+ /* No need to insist if the MISSED flag was already set.
+ * Note that test_and_set_bit() also gives us memory ordering
+ * guarantees wrt potential earlier enqueue() and below
+ * spin_trylock(), both of which are necessary to prevent races
+ */
+ if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
return false;
- WRITE_ONCE(qdisc->empty, false);
- } else if (qdisc_is_running(qdisc)) {
- return false;
+
+ /* Try to take the lock again to make sure that we will either
+ * grab it or the CPU that still has it will see MISSED set
+ * when testing it in qdisc_run_end()
+ */
+ return spin_trylock(&qdisc->seqlock);
}
- /* Variant of write_seqcount_begin() telling lockdep a trylock
- * was attempted.
- */
- raw_write_seqcount_begin(&qdisc->running);
- seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
- return true;
+ return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
- write_seqcount_end(&qdisc->running);
- if (qdisc->flags & TCQ_F_NOLOCK)
+ if (qdisc->flags & TCQ_F_NOLOCK) {
spin_unlock(&qdisc->seqlock);
+
+ /* spin_unlock() only has store-release semantic. The unlock
+ * and test_bit() ordering is a store-load ordering, so a full
+ * memory barrier is needed here.
+ */
+ smp_mb();
+
+ if (unlikely(test_bit(__QDISC_STATE_MISSED,
+ &qdisc->state)))
+ __netif_schedule(qdisc);
+ } else {
+ __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+ }
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
@@ -184,12 +238,7 @@ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
{
-#ifdef CONFIG_BQL
- /* Non-BQL migrated drivers will return 0, too. */
- return dql_avail(&txq->dql);
-#else
- return 0;
-#endif
+ return netdev_queue_dql_avail(txq);
}
struct Qdisc_class_ops {
@@ -207,7 +256,8 @@ struct Qdisc_class_ops {
int (*change)(struct Qdisc *, u32, u32,
struct nlattr **, unsigned long *,
struct netlink_ext_ack *);
- int (*delete)(struct Qdisc *, unsigned long);
+ int (*delete)(struct Qdisc *, unsigned long,
+ struct netlink_ext_ack *);
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
/* Filter manipulation */
@@ -254,6 +304,8 @@ struct Qdisc_ops {
struct netlink_ext_ack *extack);
void (*attach)(struct Qdisc *sch);
int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
+ void (*change_real_num_tx)(struct Qdisc *sch,
+ unsigned int new_real_tx);
int (*dump)(struct Qdisc *, struct sk_buff *);
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
@@ -268,7 +320,6 @@ struct Qdisc_ops {
struct module *owner;
};
-
struct tcf_result {
union {
struct {
@@ -276,12 +327,6 @@ struct tcf_result {
u32 classid;
};
const struct tcf_proto *goto_tp;
-
- /* used in the skb_tc_reinsert function */
- struct {
- bool ingress;
- struct gnet_stats_queue *qstats;
- };
};
};
@@ -303,7 +348,7 @@ struct tcf_proto_ops {
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
- void **, bool, bool,
+ void **, u32,
struct netlink_ext_ack *);
int (*delete)(struct tcf_proto *tp, void *arg,
bool *last, bool rtnl_held,
@@ -325,6 +370,12 @@ struct tcf_proto_ops {
struct nlattr **tca,
struct netlink_ext_ack *extack);
void (*tmplt_destroy)(void *tmplt_priv);
+ void (*tmplt_reoffload)(struct tcf_chain *chain,
+ bool add,
+ flow_setup_cb_t *cb,
+ void *cb_priv);
+ struct tcf_exts * (*get_exts)(const struct tcf_proto *tp,
+ u32 handle);
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
@@ -384,7 +435,6 @@ struct qdisc_skb_cb {
};
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
- u16 mru;
};
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
@@ -406,6 +456,7 @@ struct tcf_chain {
};
struct tcf_block {
+ struct xarray ports; /* datapath accessible */
/* Lock protects tcf_block and lifetime-management data of chains
* attached to the block (refcnt, action_refcnt, explicitly_created).
*/
@@ -432,7 +483,8 @@ struct tcf_block {
struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
};
-#ifdef CONFIG_PROVE_LOCKING
+struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index);
+
static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
{
return lockdep_is_held(&chain->filter_chain_lock);
@@ -442,17 +494,6 @@ static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
{
return lockdep_is_held(&tp->lock);
}
-#else
-static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
-{
- return true;
-}
-
-static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
-{
- return true;
-}
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
#define tcf_chain_dereference(p, chain) \
rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
@@ -468,11 +509,6 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
-{
- return this_cpu_ptr(q->cpu_qstats)->qlen;
-}
-
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
@@ -517,26 +553,7 @@ static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
- return qdisc->dev_queue->qdisc_sleeping;
-}
-
-/* The qdisc root lock is a mechanism by which to top level
- * of a qdisc tree can be locked from any qdisc node in the
- * forest. This allows changing the configuration of some
- * aspect of the qdisc tree while blocking out asynchronous
- * qdisc access in the packet processing paths.
- *
- * It is only legal to do this when the root will not change
- * on us. Otherwise we'll potentially lock the wrong qdisc
- * root. This is enforced by holding the RTNL semaphore, which
- * all users of this lock accessor must do.
- */
-static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
-{
- struct Qdisc *root = qdisc_root(qdisc);
-
- ASSERT_RTNL();
- return qdisc_lock(root);
+ return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
}
static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
@@ -547,32 +564,31 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
-{
- struct Qdisc *root = qdisc_root_sleeping(qdisc);
-
- ASSERT_RTNL();
- return &root->running;
-}
-
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
}
-static inline void sch_tree_lock(const struct Qdisc *q)
+static inline void sch_tree_lock(struct Qdisc *q)
{
- spin_lock_bh(qdisc_root_sleeping_lock(q));
+ if (q->flags & TCQ_F_MQROOT)
+ spin_lock_bh(qdisc_lock(q));
+ else
+ spin_lock_bh(qdisc_root_sleeping_lock(q));
}
-static inline void sch_tree_unlock(const struct Qdisc *q)
+static inline void sch_tree_unlock(struct Qdisc *q)
{
- spin_unlock_bh(qdisc_root_sleeping_lock(q));
+ if (q->flags & TCQ_F_MQROOT)
+ spin_unlock_bh(qdisc_lock(q));
+ else
+ spin_unlock_bh(qdisc_root_sleeping_lock(q));
}
extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
+extern const u8 sch_default_prio2band[TC_PRIO_MAX + 1];
extern struct Qdisc_ops mq_qdisc_ops;
extern struct Qdisc_ops noqueue_qdisc_ops;
extern const struct Qdisc_ops *default_qdisc_ops;
@@ -585,6 +601,7 @@ get_default_qdisc_ops(const struct net_device *dev, int ntx)
struct Qdisc_class_common {
u32 classid;
+ unsigned int filter_cnt;
struct hlist_node hnode;
};
@@ -619,6 +636,31 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
return NULL;
}
+static inline bool qdisc_class_in_use(const struct Qdisc_class_common *cl)
+{
+ return cl->filter_cnt > 0;
+}
+
+static inline void qdisc_class_get(struct Qdisc_class_common *cl)
+{
+ unsigned int res;
+
+ if (check_add_overflow(cl->filter_cnt, 1, &res))
+ WARN(1, "Qdisc class overflow");
+
+ cl->filter_cnt = res;
+}
+
+static inline void qdisc_class_put(struct Qdisc_class_common *cl)
+{
+ unsigned int res;
+
+ if (check_sub_overflow(cl->filter_cnt, 1, &res))
+ WARN(1, "Qdisc class underflow");
+
+ cl->filter_cnt = res;
+}
+
static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
{
u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
@@ -635,6 +677,8 @@ void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
int dev_qdisc_change_tx_queue_len(struct net_device *dev);
+void dev_qdisc_change_real_num_tx(struct net_device *dev,
+ unsigned int new_real_tx);
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
@@ -643,6 +687,7 @@ void dev_deactivate_many(struct list_head *head);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
+void qdisc_destroy(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
@@ -670,6 +715,9 @@ qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
{
}
#endif
+void qdisc_offload_query_caps(struct net_device *dev,
+ enum tc_setup_type type,
+ void *caps, size_t caps_len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
@@ -683,7 +731,7 @@ int skb_do_redirect(struct sk_buff *);
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_XGRESS
return skb->tc_at_ingress;
#else
return false;
@@ -742,7 +790,9 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
+
+ if (rcu_access_pointer(txq->qdisc) !=
+ rcu_access_pointer(txq->qdisc_sleeping))
return true;
}
return false;
@@ -796,14 +846,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return sch->enqueue(skb, sch, to_free);
}
-static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
__u64 bytes, __u32 packets)
{
- bstats->bytes += bytes;
- bstats->packets += packets;
+ u64_stats_update_begin(&bstats->syncp);
+ u64_stats_add(&bstats->bytes, bytes);
+ u64_stats_add(&bstats->packets, packets);
+ u64_stats_update_end(&bstats->syncp);
}
-static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
_bstats_update(bstats,
@@ -811,26 +863,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
}
-static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- __u64 bytes, __u32 packets)
-{
- u64_stats_update_begin(&bstats->syncp);
- _bstats_update(&bstats->bstats, bytes, packets);
- u64_stats_update_end(&bstats->syncp);
-}
-
-static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- const struct sk_buff *skb)
-{
- u64_stats_update_begin(&bstats->syncp);
- bstats_update(&bstats->bstats, skb);
- u64_stats_update_end(&bstats->syncp);
-}
-
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
}
static inline void qdisc_bstats_update(struct Qdisc *sch,
@@ -919,10 +955,9 @@ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
__u32 *backlog)
{
struct gnet_stats_queue qstats = { 0 };
- __u32 len = qdisc_qlen_sum(sch);
- __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
- *qlen = qstats.qlen;
+ gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
+ *qlen = qstats.qlen + qdisc_qlen(sch);
*backlog = qstats.backlog;
}
@@ -943,13 +978,6 @@ static inline void qdisc_purge_queue(struct Qdisc *sch)
qdisc_tree_reduce_backlog(sch, qlen, backlog);
}
-static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
-{
- qh->head = NULL;
- qh->tail = NULL;
- qh->qlen = 0;
-}
-
static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
struct qdisc_skb_head *qh)
{
@@ -1011,6 +1039,37 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
return skb;
}
+struct tc_skb_cb {
+ struct qdisc_skb_cb qdisc_cb;
+ u32 drop_reason;
+
+ u16 zone; /* Only valid if post_ct = true */
+ u16 mru;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
+};
+
+static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
+{
+ struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+ return cb;
+}
+
+static inline enum skb_drop_reason
+tcf_get_drop_reason(const struct sk_buff *skb)
+{
+ return tc_skb_cb(skb)->drop_reason;
+}
+
+static inline void tcf_set_drop_reason(const struct sk_buff *skb,
+ enum skb_drop_reason reason)
+{
+ tc_skb_cb(skb)->drop_reason = reason;
+}
+
/* Instead of calling kfree_skb() while root qdisc lock is held,
* queue the skb for future freeing at end of __dev_xmit_skb()
*/
@@ -1047,12 +1106,6 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
return 0;
}
-static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- return __qdisc_queue_drop_head(sch, &sch->q, to_free);
-}
-
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
const struct qdisc_skb_head *qh = &sch->q;
@@ -1146,7 +1199,6 @@ static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
static inline void qdisc_reset_queue(struct Qdisc *sch)
{
__qdisc_reset_queue(&sch->q);
- sch->qstats.backlog = 0;
}
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
@@ -1158,7 +1210,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
old = *pold;
*pold = new;
if (old != NULL)
- qdisc_tree_flush_backlog(old);
+ qdisc_purge_queue(old);
sch_tree_unlock(sch);
return old;
@@ -1197,24 +1249,11 @@ static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_DROP;
}
-/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
- long it will take to send a packet given its size.
- */
-static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
-{
- int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
- if (slot < 0)
- slot = 0;
- slot >>= rtab->rate.cell_log;
- if (slot > 255)
- return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
- return rtab->data[slot];
-}
-
struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
u16 overhead;
+ u16 mpu;
u8 linklayer;
u8 shift;
};
@@ -1224,6 +1263,9 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
{
len += r->overhead;
+ if (len < r->mpu)
+ len = r->mpu;
+
if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
@@ -1246,24 +1288,39 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
res->overhead = r->overhead;
+ res->mpu = r->mpu;
res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
+struct psched_pktrate {
+ u64 rate_pkts_ps; /* packets per second */
+ u32 mult;
+ u8 shift;
+};
+
+static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
+ unsigned int pkt_num)
+{
+ return ((u64)pkt_num * r->mult) >> r->shift;
+}
+
+void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
+
/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
* The fast path only needs to access filter list and to update stats
*/
struct mini_Qdisc {
struct tcf_proto *filter_list;
struct tcf_block *block;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct rcu_head rcu;
+ unsigned long rcu_state;
};
static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
}
static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
@@ -1284,9 +1341,15 @@ void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
struct tcf_block *block);
-static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
+
+int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
+
+/* Make sure qdisc is no longer in SCHED state. */
+static inline void qdisc_synchronize(const struct Qdisc *q)
{
- return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
+ while (test_bit(__QDISC_STATE_SCHED, &q->state))
+ msleep(1);
}
#endif
diff --git a/include/net/scm.h b/include/net/scm.h
index 1ce365f4c256..92276a2c5543 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -5,10 +5,12 @@
#include <linux/limits.h>
#include <linux/net.h>
#include <linux/cred.h>
+#include <linux/file.h>
#include <linux/security.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/sched/signal.h>
+#include <net/compat.h>
/* Well, we should have at least one descriptor open
* to accept passed FDs 8)
@@ -23,6 +25,7 @@ struct scm_creds {
struct scm_fp_list {
short count;
+ short count_unix;
short max;
struct user_struct *user;
struct file *fp[SCM_MAX_FD];
@@ -105,19 +108,67 @@ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct sc
}
}
}
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return test_bit(SOCK_PASSSEC, &sock->flags);
+}
#else
static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
{ }
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return false;
+}
#endif /* CONFIG_SECURITY_NETWORK */
-static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm, int flags)
+static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm)
+{
+ struct file *pidfd_file = NULL;
+ int len, pidfd;
+
+ /* put_cmsg() doesn't return an error if CMSG is truncated,
+ * that's why we need to opencode these checks here.
+ */
+ if (msg->msg_flags & MSG_CMSG_COMPAT)
+ len = sizeof(struct compat_cmsghdr) + sizeof(int);
+ else
+ len = sizeof(struct cmsghdr) + sizeof(int);
+
+ if (msg->msg_controllen < len) {
+ msg->msg_flags |= MSG_CTRUNC;
+ return;
+ }
+
+ if (!scm->pid)
+ return;
+
+ pidfd = pidfd_prepare(scm->pid, 0, &pidfd_file);
+
+ if (put_cmsg(msg, SOL_SOCKET, SCM_PIDFD, sizeof(int), &pidfd)) {
+ if (pidfd_file) {
+ put_unused_fd(pidfd);
+ fput(pidfd_file);
+ }
+
+ return;
+ }
+
+ if (pidfd_file)
+ fd_install(pidfd, pidfd_file);
+}
+
+static inline bool __scm_recv_common(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags)
{
if (!msg->msg_control) {
- if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp)
+ if (test_bit(SOCK_PASSCRED, &sock->flags) ||
+ test_bit(SOCK_PASSPIDFD, &sock->flags) ||
+ scm->fp || scm_has_secdata(sock))
msg->msg_flags |= MSG_CTRUNC;
scm_destroy(scm);
- return;
+ return false;
}
if (test_bit(SOCK_PASSCRED, &sock->flags)) {
@@ -130,16 +181,42 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds);
}
- scm_destroy_cred(scm);
-
scm_passec(sock, msg, scm);
- if (!scm->fp)
+ if (scm->fp)
+ scm_detach_fds(msg, scm);
+
+ return true;
+}
+
+static inline void scm_recv(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags)
+{
+ if (!__scm_recv_common(sock, msg, scm, flags))
return;
-
- scm_detach_fds(msg, scm);
+
+ scm_destroy_cred(scm);
}
+static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags)
+{
+ if (!__scm_recv_common(sock, msg, scm, flags))
+ return;
+
+ if (test_bit(SOCK_PASSPIDFD, &sock->flags))
+ scm_pidfd_recv(msg, scm);
+
+ scm_destroy_cred(scm);
+}
+
+static inline int scm_recv_one_fd(struct file *f, int __user *ufd,
+ unsigned int flags)
+{
+ if (!ufd)
+ return -EFAULT;
+ return receive_fd(f, ufd, flags);
+}
#endif /* __LINUX_NET_SCM_H */
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 5a9bb09f32b6..f514a0aa849e 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -24,7 +24,7 @@
#define __sctp_checksum_h__
#include <linux/types.h>
-#include <net/sctp/sctp.h>
+#include <linux/sctp.h>
#include <linux/crc32c.h>
#include <linux/crc32.h>
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index e8df72e1627a..2058fabffbf6 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -59,6 +59,7 @@ enum sctp_verb {
SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */
SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */
+ SCTP_CMD_PROBE_TIMER_UPDATE, /* Update a probe timer. */
SCTP_CMD_TRANSPORT_HB_SENT, /* Reset the status of a transport. */
SCTP_CMD_TRANSPORT_IDLE, /* Do manipulations on idle transport */
SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */
@@ -68,7 +69,6 @@ enum sctp_verb {
SCTP_CMD_ASSOC_FAILED, /* Handle association failure. */
SCTP_CMD_DISCARD_PACKET, /* Discard the whole packet. */
SCTP_CMD_GEN_SHUTDOWN, /* Generate a SHUTDOWN chunk. */
- SCTP_CMD_UPDATE_ASSOC, /* Update association information. */
SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 122d9e2d8dfd..5859e0a16a58 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -77,6 +77,7 @@ enum sctp_event_timeout {
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
SCTP_EVENT_TIMEOUT_HEARTBEAT,
SCTP_EVENT_TIMEOUT_RECONF,
+ SCTP_EVENT_TIMEOUT_PROBE,
SCTP_EVENT_TIMEOUT_SACK,
SCTP_EVENT_TIMEOUT_AUTOCLOSE,
};
@@ -200,6 +201,23 @@ enum sctp_sock_state {
SCTP_SS_CLOSING = TCP_CLOSE_WAIT,
};
+enum sctp_plpmtud_state {
+ SCTP_PL_DISABLED,
+ SCTP_PL_BASE,
+ SCTP_PL_SEARCH,
+ SCTP_PL_COMPLETE,
+ SCTP_PL_ERROR,
+};
+
+#define SCTP_BASE_PLPMTU 1200
+#define SCTP_MAX_PLPMTU 9000
+#define SCTP_MIN_PLPMTU 512
+
+#define SCTP_MAX_PROBES 3
+
+#define SCTP_PL_BIG_STEP 32
+#define SCTP_PL_MIN_STEP 4
+
/* These functions map various type to printable names. */
const char *sctp_cname(const union sctp_subtype id); /* chunk types */
const char *sctp_oname(const union sctp_subtype id); /* other events */
@@ -286,6 +304,8 @@ enum { SCTP_MAX_GABS = 16 };
* functions simpler to write.
*/
+#define SCTP_DEFAULT_UDP_PORT 9899 /* default UDP tunneling port */
+
/* These are the values for pf exposure, UNUSED is to keep compatible with old
* applications by default.
*/
@@ -340,8 +360,7 @@ enum {
#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
- * 192.88.99.0/24.
+ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
* Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
* addresses.
*/
@@ -349,7 +368,6 @@ enum {
((htonl(INADDR_BROADCAST) == a) || \
ipv4_is_multicast(a) || \
ipv4_is_zeronet(a) || \
- ipv4_is_test_198(a) || \
ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */
@@ -422,4 +440,6 @@ enum {
*/
#define SCTP_AUTH_RANDOM_LENGTH 32
+#define SCTP_PROBE_TIMER_MIN 5000
+
#endif /* __sctp_constants_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 4fc747b778eb..a2310fa995f6 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -67,11 +67,6 @@
#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
#endif
-/* Round an int up to the next multiple of 4. */
-#define SCTP_PAD4(s) (((s)+3)&~3)
-/* Truncate to the previous multiple of 4. */
-#define SCTP_TRUNC4(s) ((s)&~3)
-
/*
* Function declarations.
*/
@@ -84,6 +79,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *addr,
struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
int sctp_register_pf(struct sctp_pf *, sa_family_t);
void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
+int sctp_udp_sock_start(struct net *net);
+void sctp_udp_sock_stop(struct net *net);
/*
* sctp/socket.c
@@ -101,21 +98,20 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
-struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
+struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int *);
+typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
void sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_next(struct net *net,
struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_idx(struct net *net,
struct rhashtable_iter *iter, int pos);
-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
- struct net *net,
+int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
const union sctp_addr *laddr,
- const union sctp_addr *paddr, void *p);
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
- int (*cb_done)(struct sctp_transport *, void *),
- struct net *net, int *pos, void *p);
+ const union sctp_addr *paddr, void *p, int dif);
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p);
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
struct sctp_info *info);
@@ -143,6 +139,8 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
struct sctphdr *, struct sctp_association **,
struct sctp_transport **);
void sctp_err_finish(struct sock *, struct sctp_transport *);
+int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb);
+int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb);
void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
struct sctp_transport *t, __u32 pmtu);
void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
@@ -150,8 +148,6 @@ void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc,
struct sctp_transport *t);
-void sctp_backlog_migrate(struct sctp_association *assoc,
- struct sock *oldsk, struct sock *newsk);
int sctp_transport_hashtable_init(void);
void sctp_transport_hashtable_destroy(void);
int sctp_hash_transport(struct sctp_transport *t);
@@ -159,10 +155,12 @@ void sctp_unhash_transport(struct sctp_transport *t);
struct sctp_transport *sctp_addrs_lookup_transport(
struct net *net,
const union sctp_addr *laddr,
- const union sctp_addr *paddr);
+ const union sctp_addr *paddr,
+ int dif, int sdif);
struct sctp_transport *sctp_epaddr_lookup_transport(
const struct sctp_endpoint *ep,
const union sctp_addr *paddr);
+bool sctp_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif);
/*
* sctp/proc.c
@@ -425,11 +423,11 @@ static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
* the chunk length to indicate when to stop. Make sure
* there is room for a param header too.
*/
-#define sctp_walk_params(pos, chunk, member)\
-_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
+#define sctp_walk_params(pos, chunk)\
+_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length))
-#define _sctp_walk_params(pos, chunk, end, member)\
-for (pos.v = chunk->member;\
+#define _sctp_walk_params(pos, chunk, end)\
+for (pos.v = (u8 *)(chunk + 1);\
(pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
(void *)chunk + end) &&\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
@@ -452,8 +450,8 @@ for (err = (struct sctp_errhdr *)((void *)chunk_hdr + \
_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
#define _sctp_walk_fwdtsn(pos, chunk, end)\
-for (pos = chunk->subh.fwdtsn_hdr->skip;\
- (void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
+for (pos = (void *)(chunk->subh.fwdtsn_hdr + 1);\
+ (void *)pos <= (void *)(chunk->subh.fwdtsn_hdr + 1) + end - sizeof(struct sctp_fwdtsn_skip);\
pos++)
/* External references. */
@@ -506,8 +504,8 @@ static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
}
-#define sctp_for_each_hentry(epb, head) \
- hlist_for_each_entry(epb, head, node)
+#define sctp_for_each_hentry(ep, head) \
+ hlist_for_each_entry(ep, head, node)
/* Is a socket of this style? */
#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
@@ -571,15 +569,19 @@ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *
/* Calculate max payload size given a MTU, or the total overhead if
* given MTU is zero
*/
-static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
- __u32 mtu, __u32 extra)
+static inline __u32 __sctp_mtu_payload(const struct sctp_sock *sp,
+ const struct sctp_transport *t,
+ __u32 mtu, __u32 extra)
{
__u32 overhead = sizeof(struct sctphdr) + extra;
- if (sp)
+ if (sp) {
overhead += sp->pf->af->net_header_len;
- else
+ if (sp->udp_port && (!t || t->encap_port))
+ overhead += sizeof(struct udphdr);
+ } else {
overhead += sizeof(struct ipv6hdr);
+ }
if (WARN_ON_ONCE(mtu && mtu <= overhead))
mtu = overhead;
@@ -587,6 +589,12 @@ static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
return mtu ? mtu - overhead : overhead;
}
+static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
+ __u32 mtu, __u32 extra)
+{
+ return __sctp_mtu_payload(sp, NULL, mtu, extra);
+}
+
static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
{
return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
@@ -610,6 +618,47 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
}
+static inline int sctp_transport_pl_hlen(struct sctp_transport *t)
+{
+ return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0) -
+ sizeof(struct sctphdr);
+}
+
+static inline void sctp_transport_pl_reset(struct sctp_transport *t)
+{
+ if (t->probe_interval && (t->param_flags & SPP_PMTUD_ENABLE) &&
+ (t->state == SCTP_ACTIVE || t->state == SCTP_UNKNOWN)) {
+ if (t->pl.state == SCTP_PL_DISABLED) {
+ t->pl.state = SCTP_PL_BASE;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ sctp_transport_reset_probe_timer(t);
+ }
+ } else {
+ if (t->pl.state != SCTP_PL_DISABLED) {
+ if (del_timer(&t->probe_timer))
+ sctp_transport_put(t);
+ t->pl.state = SCTP_PL_DISABLED;
+ }
+ }
+}
+
+static inline void sctp_transport_pl_update(struct sctp_transport *t)
+{
+ if (t->pl.state == SCTP_PL_DISABLED)
+ return;
+
+ t->pl.state = SCTP_PL_BASE;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ sctp_transport_reset_probe_timer(t);
+}
+
+static inline bool sctp_transport_pl_enabled(struct sctp_transport *t)
+{
+ return t->pl.state != SCTP_PL_DISABLED;
+}
+
static inline bool sctp_newsk_ready(const struct sock *sk)
{
return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 5c491a3bc27e..64c42bd56bb2 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -151,11 +151,11 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort;
/* Prototypes for timeout event state functions. */
sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
sctp_state_fn_t sctp_sf_send_reconf;
+sctp_state_fn_t sctp_sf_send_probe;
sctp_state_fn_t sctp_sf_do_6_2_sack;
sctp_state_fn_t sctp_sf_autoclose_timer_expire;
/* Prototypes for utility support functions. */
-__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
const struct sctp_sm_table_entry *sctp_sm_lookup_event(
struct net *net,
enum sctp_event_type event_type,
@@ -165,8 +165,6 @@ int sctp_chunk_iif(const struct sctp_chunk *);
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
struct sctp_chunk *,
gfp_t gfp);
-__u32 sctp_generate_verification_tag(void);
-void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag);
/* Prototypes for chunk-building functions. */
struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
@@ -221,12 +219,17 @@ struct sctp_chunk *sctp_make_violation_paramlen(
struct sctp_chunk *sctp_make_violation_max_retrans(
const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_new_encap_port(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
- const struct sctp_transport *transport);
+ const struct sctp_transport *transport,
+ __u32 probe_size);
struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
const void *payload,
const size_t paylen);
+struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len);
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
__be16 cause_code, const void *payload,
@@ -307,6 +310,7 @@ int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
void sctp_generate_t3_rtx_event(struct timer_list *t);
void sctp_generate_heartbeat_event(struct timer_list *t);
void sctp_generate_reconf_event(struct timer_list *t);
+void sctp_generate_probe_event(struct timer_list *t);
void sctp_generate_proto_unreach_event(struct timer_list *t);
void sctp_ootb_pkt_free(struct sctp_packet *packet);
@@ -377,10 +381,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
* Verification Tag value does not match the receiver's own
* tag value, the receiver shall silently discard the packet...
*/
- if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
- return 1;
+ if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
+ return 0;
- return 0;
+ chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
+ return 1;
}
/* Check VTAG of the packet matches the sender's own tag and the T bit is
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index 01a70b27e026..572d73fdcd5e 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -26,8 +26,8 @@ struct sctp_sched_ops {
int (*init)(struct sctp_stream *stream);
/* Init a stream */
int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
- /* Frees the entire thing */
- void (*free)(struct sctp_stream *stream);
+ /* free a stream */
+ void (*free_sid)(struct sctp_stream *stream, __u16 sid);
/* Enqueue a chunk */
void (*enqueue)(struct sctp_outq *q, struct sctp_datamsg *msg);
@@ -58,5 +58,7 @@ void sctp_sched_ops_register(enum sctp_sched_type sched,
struct sctp_sched_ops *sched_ops);
void sctp_sched_ops_prio_init(void);
void sctp_sched_ops_rr_init(void);
+void sctp_sched_ops_fc_init(void);
+void sctp_sched_ops_wfq_init(void);
#endif /* __sctp_stream_sched_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index b33f1aefad09..f24a1bbcb3ef 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -177,6 +177,10 @@ struct sctp_sock {
* will be inherited by all new associations.
*/
__u32 hbinterval;
+ __u32 probe_interval;
+
+ __be16 udp_port;
+ __be16 encap_port;
/* This is the max_retrans value for new associations. */
__u16 pathmaxrxt;
@@ -226,20 +230,19 @@ struct sctp_sock {
data_ready_signalled:1;
atomic_t pd_mode;
+
+ /* Fields after this point will be skipped on copies, like on accept
+ * and peeloff operations
+ */
+
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
- /* These must be the last fields, as they will skipped on copies,
- * like on accept and peeloff operations
- */
struct list_head auto_asconf_list;
int do_auto_asconf;
};
-static inline struct sctp_sock *sctp_sk(const struct sock *sk)
-{
- return (struct sctp_sock *)sk;
-}
+#define sctp_sk(ptr) container_of_const(ptr, struct sctp_sock, inet.sk)
static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
{
@@ -326,7 +329,7 @@ struct sctp_cookie {
* the association TCB is re-constructed from the cookie.
*/
__u32 raw_addr_list_len;
- struct sctp_init_chunk peer_init[];
+ /* struct sctp_init_chunk peer_init[]; */
};
@@ -380,6 +383,7 @@ struct sctp_sender_hb_info {
union sctp_addr daddr;
unsigned long sent_at;
__u64 hb_nonce;
+ __u32 probe_size;
};
int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
@@ -456,7 +460,7 @@ struct sctp_af {
int saddr);
void (*from_sk) (union sctp_addr *,
struct sock *sk);
- void (*from_addr_param) (union sctp_addr *,
+ bool (*from_addr_param) (union sctp_addr *,
union sctp_addr_param *,
__be16 port, int iif);
int (*to_addr_param) (const union sctp_addr *,
@@ -470,6 +474,7 @@ struct sctp_af {
int (*available) (union sctp_addr *,
struct sctp_sock *);
int (*skb_iif) (const struct sk_buff *sk);
+ int (*skb_sdif)(const struct sk_buff *sk);
int (*is_ce) (const struct sk_buff *sk);
void (*seq_dump_addr)(struct seq_file *seq,
union sctp_addr *addr);
@@ -651,6 +656,7 @@ struct sctp_chunk {
data_accepted:1, /* At least 1 chunk accepted */
auth:1, /* IN: was auth'ed | OUT: needs auth */
has_asconf:1, /* IN: have seen an asconf before */
+ pmtu_probe:1, /* Used by PLPMTUD, can be set in s HB chunk */
tsn_missing_report:2, /* Data chunk missing counter. */
fast_retransmit:2; /* Is this chunk fast retransmitted? */
};
@@ -853,6 +859,7 @@ struct sctp_transport {
* the destination address every heartbeat interval.
*/
unsigned long hbinterval;
+ unsigned long probe_interval;
/* SACK delay timeout */
unsigned long sackdelay;
@@ -875,6 +882,8 @@ struct sctp_transport {
*/
unsigned long last_time_ecne_reduced;
+ __be16 encap_port;
+
/* This is the max_retrans value for the transport and will
* be initialized from the assocs value. This can be changed
* using the SCTP_SET_PEER_ADDR_PARAMS socket option.
@@ -927,6 +936,9 @@ struct sctp_transport {
/* Timer to handler reconf chunk rtx */
struct timer_list reconf_timer;
+ /* Timer to send a probe HB packet for PLPMTUD */
+ struct timer_list probe_timer;
+
/* Since we're using per-destination retransmission timers
* (see above), we're also using per-destination "transmitted"
* queues. This probably ought to be a private struct
@@ -969,6 +981,14 @@ struct sctp_transport {
char cacc_saw_newack;
} cacc;
+ struct {
+ __u16 pmtu;
+ __u16 probe_size;
+ __u16 probe_high;
+ __u8 probe_count;
+ __u8 state;
+ } pl; /* plpmtud related */
+
/* 64-bit random number sent with heartbeat. */
__u64 hb_nonce;
@@ -986,6 +1006,8 @@ void sctp_transport_free(struct sctp_transport *);
void sctp_transport_reset_t3_rtx(struct sctp_transport *);
void sctp_transport_reset_hb_timer(struct sctp_transport *);
void sctp_transport_reset_reconf_timer(struct sctp_transport *transport);
+void sctp_transport_reset_probe_timer(struct sctp_transport *transport);
+void sctp_transport_reset_raise_timer(struct sctp_transport *transport);
int sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -1000,6 +1022,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);
+void sctp_transport_pl_send(struct sctp_transport *t);
+bool sctp_transport_pl_recv(struct sctp_transport *t);
/* This is the structure we use to queue packets as they come into
@@ -1095,8 +1119,6 @@ void sctp_outq_free(struct sctp_outq*);
void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
-void sctp_outq_restart(struct sctp_outq *);
-
void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
enum sctp_retransmit_reason reason);
void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
@@ -1115,13 +1137,14 @@ static inline void sctp_outq_cork(struct sctp_outq *q)
*/
struct sctp_input_cb {
union {
- struct inet_skb_parm h4;
+ struct inet_skb_parm h4;
#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_skb_parm h6;
+ struct inet6_skb_parm h6;
#endif
} header;
struct sctp_chunk *chunk;
struct sctp_af *af;
+ __be16 encap_port;
};
#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
@@ -1216,10 +1239,6 @@ enum sctp_endpoint_type {
*/
struct sctp_ep_common {
- /* Fields to help us manage our entries in the hash tables. */
- struct hlist_node node;
- int hashent;
-
/* Runtime type information. What kind of endpoint is this? */
enum sctp_endpoint_type type;
@@ -1271,6 +1290,10 @@ struct sctp_endpoint {
/* Common substructure for endpoint and association. */
struct sctp_ep_common base;
+ /* Fields to help us manage our entries in the hash tables. */
+ struct hlist_node node;
+ int hashent;
+
/* Associations: A list of current associations and mappings
* to the data consumers for each association. This
* may be in the form of a hash table or other
@@ -1327,16 +1350,7 @@ struct sctp_endpoint {
reconf_enable:1;
__u8 strreset_enable;
-
- /* Security identifiers from incoming (INIT). These are set by
- * security_sctp_assoc_request(). These will only be used by
- * SCTP TCP type sockets and peeled off connections as they
- * cause a new socket to be generated. security_sctp_sk_clone()
- * will then plug these into the new socket.
- */
-
- u32 secid;
- u32 peer_secid;
+ struct rcu_head rcu;
};
/* Recover the outter endpoint structure. */
@@ -1352,7 +1366,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
void sctp_endpoint_free(struct sctp_endpoint *);
void sctp_endpoint_put(struct sctp_endpoint *);
-void sctp_endpoint_hold(struct sctp_endpoint *);
+int sctp_endpoint_hold(struct sctp_endpoint *ep);
void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
struct sctp_association *sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep,
@@ -1360,10 +1374,12 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
struct sctp_transport **);
bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
const union sctp_addr *paddr);
-struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
- struct net *, const union sctp_addr *);
+struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
+ struct net *net,
+ const union sctp_addr *laddr,
+ int dif, int sdif);
bool sctp_has_association(struct net *net, const union sctp_addr *laddr,
- const union sctp_addr *paddr);
+ const union sctp_addr *paddr, int dif, int sdif);
int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
@@ -1391,6 +1407,7 @@ struct sctp_stream_priorities {
/* The next stream in line */
struct sctp_stream_out_ext *next;
__u16 prio;
+ __u16 users;
};
struct sctp_stream_out_ext {
@@ -1407,6 +1424,11 @@ struct sctp_stream_out_ext {
struct {
struct list_head rr_list;
};
+ struct {
+ struct list_head fc_list;
+ __u32 fc_length;
+ __u16 fc_weight;
+ };
};
};
@@ -1453,6 +1475,9 @@ struct sctp_stream {
/* The next stream in line */
struct sctp_stream_out_ext *rr_next;
};
+ struct {
+ struct list_head fc_list;
+ };
};
struct sctp_stream_interleave *si;
};
@@ -1681,7 +1706,6 @@ struct sctp_association {
__u16 ecn_capable:1, /* Can peer do ECN? */
ipv4_address:1, /* Peer understands IPv4 addresses? */
ipv6_address:1, /* Peer understands IPv6 addresses? */
- hostname_address:1, /* Peer understands DNS addresses? */
asconf_capable:1, /* Does peer support ADDIP? */
prsctp_capable:1, /* Can peer do PR-SCTP? */
reconf_capable:1, /* Can peer do RE-CONFIG? */
@@ -1787,6 +1811,9 @@ struct sctp_association {
* will be inherited by all new transports.
*/
unsigned long hbinterval;
+ unsigned long probe_interval;
+
+ __be16 encap_port;
/* This is the max_retrans value for new transports in the
* association.
@@ -2073,6 +2100,16 @@ struct sctp_association {
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+ /* Security identifiers from incoming (INIT). These are set by
+ * security_sctp_assoc_request(). These will only be used by
+ * SCTP TCP type sockets and peeled off connections as they
+ * cause a new socket to be generated. security_sctp_sk_clone()
+ * will then plug these into the new socket.
+ */
+
+ u32 secid;
+ u32 peer_secid;
+
struct rcu_head rcu;
};
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index 0eaf8650e3b2..60f6641290c3 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -35,8 +35,7 @@ struct sctp_ulpq {
};
/* Prototypes. */
-struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *,
- struct sctp_association *);
+void sctp_ulpq_init(struct sctp_ulpq *ulpq, struct sctp_association *asoc);
void sctp_ulpq_flush(struct sctp_ulpq *ulpq);
void sctp_ulpq_free(struct sctp_ulpq *);
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index d7d2495f83c2..21e7fa2a1813 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -4,8 +4,10 @@
#include <linux/types.h>
-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+struct net;
+
+u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
diff --git a/include/net/seg6.h b/include/net/seg6.h
index 9d19c15e8545..af668f17b398 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -58,9 +58,30 @@ extern int seg6_local_init(void);
extern void seg6_local_exit(void);
extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
+extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
+extern void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt);
extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
int proto);
extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
u32 tbl_id);
+
+/* If the packet which invoked an ICMP error contains an SRH return
+ * the true destination address from within the SRH, otherwise use the
+ * destination address in the IP header.
+ */
+static inline const struct in6_addr *seg6_get_daddr(struct sk_buff *skb,
+ struct inet6_skb_parm *opt)
+{
+ struct ipv6_sr_hdr *srh;
+
+ if (opt->flags & IP6SKB_SEG6) {
+ srh = (struct ipv6_sr_hdr *)(skb->data + opt->srhoff);
+ return &srh->segments[0];
+ }
+
+ return NULL;
+}
+
+
#endif
diff --git a/include/net/selftests.h b/include/net/selftests.h
new file mode 100644
index 000000000000..e65e8d230d33
--- /dev/null
+++ b/include/net/selftests.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_SELFTESTS
+#define _NET_SELFTESTS
+
+#include <linux/ethtool.h>
+
+#if IS_ENABLED(CONFIG_NET_SELFTESTS)
+
+void net_selftest(struct net_device *ndev, struct ethtool_test *etest,
+ u64 *buf);
+int net_selftest_get_count(void);
+void net_selftest_get_strings(u8 *data);
+
+#else
+
+static inline void net_selftest(struct net_device *ndev, struct ethtool_test *etest,
+ u64 *buf)
+{
+}
+
+static inline int net_selftest_get_count(void)
+{
+ return 0;
+}
+
+static inline void net_selftest_get_strings(u8 *data)
+{
+}
+
+#endif
+#endif /* _NET_SELFTESTS */
diff --git a/include/net/smc.h b/include/net/smc.h
index 646feb4bc75f..c9dcb30e3fd9 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -11,6 +11,14 @@
#ifndef _SMC_H
#define _SMC_H
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include "linux/ism.h"
+
+struct sock;
+
#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */
struct smc_hashinfo {
@@ -37,39 +45,42 @@ struct smcd_dmb {
#define ISM_EVENT_GID 1
#define ISM_EVENT_SWR 2
-#define ISM_ERROR 0xFFFF
+#define ISM_RESERVED_VLANID 0x1FFF
-struct smcd_event {
- u32 type;
- u32 code;
- u64 tok;
- u64 time;
- u64 info;
-};
+#define ISM_ERROR 0xFFFF
struct smcd_dev;
+struct ism_client;
+
+struct smcd_gid {
+ u64 gid;
+ u64 gid_ext;
+};
struct smcd_ops {
- int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
- u32 vid);
- int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*query_remote_gid)(struct smcd_dev *dev, struct smcd_gid *rgid,
+ u32 vid_valid, u32 vid);
+ int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb,
+ struct ism_client *client);
int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
int (*set_vlan_required)(struct smcd_dev *dev);
int (*reset_vlan_required)(struct smcd_dev *dev);
- int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info);
+ int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid,
+ u32 trigger_irq, u32 event_code, u64 info);
int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
bool sf, unsigned int offset, void *data,
unsigned int size);
+ int (*supports_v2)(void);
+ void (*get_local_gid)(struct smcd_dev *dev, struct smcd_gid *gid);
+ u16 (*get_chid)(struct smcd_dev *dev);
+ struct device* (*get_dev)(struct smcd_dev *dev);
};
struct smcd_dev {
const struct smcd_ops *ops;
- struct device dev;
void *priv;
- u64 local_gid;
struct list_head list;
spinlock_t lock;
struct smc_connection **conn;
@@ -84,11 +95,4 @@ struct smcd_dev {
u8 going_away : 1;
};
-struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
- const struct smcd_ops *ops, int max_dmbs);
-int smcd_register_dev(struct smcd_dev *smcd);
-void smcd_unregister_dev(struct smcd_dev *smcd);
-void smcd_free_dev(struct smcd_dev *smcd);
-void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
-void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit);
#endif /* _SMC_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index 064637d1ddf6..b5e00702acc1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -56,18 +56,19 @@
#include <linux/wait.h>
#include <linux/cgroup-defs.h>
#include <linux/rbtree.h>
-#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include <linux/poll.h>
#include <linux/sockptr.h>
-
+#include <linux/indirect_call_wrapper.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/llist.h>
#include <net/dst.h>
#include <net/checksum.h>
#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
#include <net/l3mdev.h>
+#include <uapi/linux/socket.h>
/*
* This structure really needs to be cleaned up.
@@ -75,19 +76,6 @@
* the other protocols.
*/
-/* Define this to get the SOCK_DBG debugging facility. */
-#define SOCK_DEBUGGING
-#ifdef SOCK_DEBUGGING
-#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
- printk(KERN_DEBUG msg); } while (0)
-#else
-/* Validate arguments and do nothing */
-static inline __printf(2, 3)
-void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
-{
-}
-#endif
-
/* This is the per-socket lock. The spinlock provides a synchronization
* between user contexts and software interrupt processing, whereas the
* mini-semaphore synchronizes multiple users amongst themselves.
@@ -160,9 +148,6 @@ typedef __u64 __bitwise __addrpair;
* for struct sock and struct inet_timewait_sock.
*/
struct sock_common {
- /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
- * address on 64bit arches : cf INET_MATCH()
- */
union {
__addrpair skc_addrpair;
struct {
@@ -226,7 +211,7 @@ struct sock_common {
struct hlist_nulls_node skc_nulls_node;
};
unsigned short skc_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
unsigned short skc_rx_queue_mapping;
#endif
union {
@@ -246,7 +231,8 @@ struct sock_common {
/* public: */
};
-struct bpf_sk_storage;
+struct bpf_local_storage;
+struct sk_filter;
/**
* struct sock - network layer representation of sockets
@@ -258,10 +244,11 @@ struct bpf_sk_storage;
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
+ * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst
+ * @sk_rx_dst_cookie: cookie for @sk_rx_dst
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
- * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@ -269,6 +256,7 @@ struct bpf_sk_storage;
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
+ * @sk_reserved_mem: space reserved and non-reclaimable for the socket
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
@@ -276,14 +264,10 @@ struct bpf_sk_storage;
* @sk_pacing_status: Pacing status (requested, handled by sch_fq)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
- * @__sk_flags_offset: empty field used to determine location of bitfield
- * @sk_padding: unused element for alignment
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
- * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
- * @sk_route_forced_caps: static, forced route capabilities
- * (set in tcp_init_sock())
+ * @sk_gso_disabled: if set, NETIF_F_GSO_MASK is forbidden.
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
* @sk_gso_max_segs: Maximum number of GSO segments
@@ -301,34 +285,43 @@ struct bpf_sk_storage;
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_uid: user id of owner
+ * @sk_prefer_busy_poll: prefer busypolling over softirq processing
+ * @sk_busy_poll_budget: napi processing budget when busypolling
* @sk_priority: %SO_PRIORITY setting
* @sk_type: socket type (%SOCK_STREAM, etc)
* @sk_protocol: which protocol this socket belongs in this network family
+ * @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred
* @sk_peer_pid: &struct pid for this socket's peer
* @sk_peer_cred: %SO_PEERCRED setting
* @sk_rcvlowat: %SO_RCVLOWAT setting
* @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting
* @sk_txhash: computed flow hash for use on transmit
+ * @sk_txrehash: enable TX hash rethink
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
* @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
- * @sk_tsflags: SO_TIMESTAMPING socket options
+ * @sk_tsflags: SO_TIMESTAMPING flags
+ * @sk_use_task_frag: allow sk_page_frag() to use current->task_frag.
+ * Sockets that can be used under memory reclaim should
+ * set this to false.
+ * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
+ * for timestamping
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
- * @sk_user_data: RPC layer private data
+ * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
* @sk_frag: cached page frag
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
- * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
* @sk_memcg: this socket's memory cgroup association
* @sk_write_pending: a write to stream socket waits to start
+ * @sk_disconnects: number of disconnect operations performed on this sock
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed
* @sk_write_space: callback to indicate there is bf sending space available
@@ -343,6 +336,7 @@ struct bpf_sk_storage;
* @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
+ * @ns_tracker: tracker for netns reference
*/
struct sock {
/*
@@ -354,7 +348,7 @@ struct sock {
#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_refcnt __sk_common.skc_refcnt
#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
#endif
@@ -384,11 +378,11 @@ struct sock {
#define sk_flags __sk_common.skc_flags
#define sk_rxhash __sk_common.skc_rxhash
- socket_lock_t sk_lock;
+ __cacheline_group_begin(sock_write_rx);
+
atomic_t sk_drops;
- int sk_rcvlowat;
+ __s32 sk_peek_off;
struct sk_buff_head sk_error_queue;
- struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@ -406,12 +400,21 @@ struct sock {
} sk_backlog;
#define sk_rmem_alloc sk_backlog.rmem_alloc
- int sk_forward_alloc;
+ __cacheline_group_end(sock_write_rx);
+
+ __cacheline_group_begin(sock_read_rx);
+ /* early demux fields */
+ struct dst_entry __rcu *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
- /* ===== mostly read cache line ===== */
unsigned int sk_napi_id;
+ u16 sk_busy_poll_budget;
+ u8 sk_prefer_busy_poll;
#endif
+ u8 sk_userlocks;
int sk_rcvbuf;
struct sk_filter __rcu *sk_filter;
@@ -421,15 +424,33 @@ struct sock {
struct socket_wq *sk_wq_raw;
/* public: */
};
+
+ void (*sk_data_ready)(struct sock *sk);
+ long sk_rcvtimeo;
+ int sk_rcvlowat;
+ __cacheline_group_end(sock_read_rx);
+
+ __cacheline_group_begin(sock_read_rxtx);
+ int sk_err;
+ struct socket *sk_socket;
+ struct mem_cgroup *sk_memcg;
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
- struct dst_entry *sk_rx_dst;
- struct dst_entry __rcu *sk_dst_cache;
+ __cacheline_group_end(sock_read_rxtx);
+
+ __cacheline_group_begin(sock_write_rxtx);
+ socket_lock_t sk_lock;
+ u32 sk_reserved_mem;
+ int sk_forward_alloc;
+ u32 sk_tsflags;
+ __cacheline_group_end(sock_write_rxtx);
+
+ __cacheline_group_begin(sock_write_tx);
+ int sk_write_pending;
atomic_t sk_omem_alloc;
int sk_sndbuf;
- /* ===== cache line for TX ===== */
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
unsigned long sk_tsq_flags;
@@ -437,89 +458,90 @@ struct sock {
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
- struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
- __s32 sk_peek_off;
- int sk_write_pending;
- __u32 sk_dst_pending_confirm;
+ u32 sk_dst_pending_confirm;
u32 sk_pacing_status; /* see enum sk_pacing */
- long sk_sndtimeo;
+ struct page_frag sk_frag;
struct timer_list sk_timer;
- __u32 sk_priority;
- __u32 sk_mark;
+
unsigned long sk_pacing_rate; /* bytes per second */
+ atomic_t sk_zckey;
+ atomic_t sk_tskey;
+ __cacheline_group_end(sock_write_tx);
+
+ __cacheline_group_begin(sock_read_tx);
unsigned long sk_max_pacing_rate;
- struct page_frag sk_frag;
+ long sk_sndtimeo;
+ u32 sk_priority;
+ u32 sk_mark;
+ struct dst_entry __rcu *sk_dst_cache;
netdev_features_t sk_route_caps;
- netdev_features_t sk_route_nocaps;
- netdev_features_t sk_route_forced_caps;
- int sk_gso_type;
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
+#endif
+ u16 sk_gso_type;
+ u16 sk_gso_max_segs;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
- __u32 sk_txhash;
+ u32 sk_txhash;
+ u8 sk_pacing_shift;
+ bool sk_use_task_frag;
+ __cacheline_group_end(sock_read_tx);
/*
* Because of non atomicity rules, all
* changes are protected by socket lock.
*/
- u8 sk_padding : 1,
+ u8 sk_gso_disabled : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
- sk_no_check_rx : 1,
- sk_userlocks : 4;
- u8 sk_pacing_shift;
+ sk_no_check_rx : 1;
+ u8 sk_shutdown;
u16 sk_type;
u16 sk_protocol;
- u16 sk_gso_max_segs;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
- int sk_err,
- sk_err_soft;
+ int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
+ spinlock_t sk_peer_lock;
+ int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
- long sk_rcvtimeo;
+
ktime_t sk_stamp;
#if BITS_PER_LONG==32
seqlock_t sk_stamp_seq;
#endif
- u16 sk_tsflags;
- u8 sk_shutdown;
- u32 sk_tskey;
- atomic_t sk_zckey;
+ int sk_disconnects;
+ u8 sk_txrehash;
u8 sk_clockid;
u8 sk_txtime_deadline_mode : 1,
sk_txtime_report_errors : 1,
sk_txtime_unused : 6;
- struct socket *sk_socket;
void *sk_user_data;
#ifdef CONFIG_SECURITY
void *sk_security;
#endif
struct sock_cgroup_data sk_cgrp_data;
- struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *sk);
- void (*sk_data_ready)(struct sock *sk);
void (*sk_write_space)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
-#ifdef CONFIG_SOCK_VALIDATE_XMIT
- struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
- struct net_device *dev,
- struct sk_buff *skb);
-#endif
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
#ifdef CONFIG_BPF_SYSCALL
- struct bpf_sk_storage __rcu *sk_bpf_storage;
+ struct bpf_local_storage __rcu *sk_bpf_storage;
#endif
struct rcu_head sk_rcu;
+ netns_tracker ns_tracker;
};
enum sk_pacing {
@@ -528,14 +550,26 @@ enum sk_pacing {
SK_PACING_FQ = 2,
};
-/* Pointer stored in sk_user_data might not be suitable for copying
- * when cloning the socket. For instance, it can point to a reference
- * counted object. sk_user_data bottom bit is set if pointer must not
- * be copied.
+/* flag bits in sk_user_data
+ *
+ * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
+ * not be suitable for copying when cloning the socket. For instance,
+ * it can point to a reference counted object. sk_user_data bottom
+ * bit is set if pointer must not be copied.
+ *
+ * - SK_USER_DATA_BPF: Mark whether sk_user_data field is
+ * managed/owned by a BPF reuseport array. This bit should be set
+ * when sk_user_data's sk is added to the bpf's reuseport_array.
+ *
+ * - SK_USER_DATA_PSOCK: Mark whether pointer stored in
+ * sk_user_data points to psock type. This bit should be set
+ * when sk_user_data is assigned to a psock object.
*/
#define SK_USER_DATA_NOCOPY 1UL
-#define SK_USER_DATA_BPF 2UL /* Managed by BPF */
-#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
+#define SK_USER_DATA_BPF 2UL
+#define SK_USER_DATA_PSOCK 4UL
+#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
+ SK_USER_DATA_PSOCK)
/**
* sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
@@ -548,24 +582,77 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk)
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
+/**
+ * __locked_read_sk_user_data_with_flags - return the pointer
+ * only if argument flags all has been set in sk_user_data. Otherwise
+ * return NULL
+ *
+ * @sk: socket
+ * @flags: flag bits
+ *
+ * The caller must be holding sk->sk_callback_lock.
+ */
+static inline void *
+__locked_read_sk_user_data_with_flags(const struct sock *sk,
+ uintptr_t flags)
+{
+ uintptr_t sk_user_data =
+ (uintptr_t)rcu_dereference_check(__sk_user_data(sk),
+ lockdep_is_held(&sk->sk_callback_lock));
+
+ WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
+
+ if ((sk_user_data & flags) == flags)
+ return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+ return NULL;
+}
+
+/**
+ * __rcu_dereference_sk_user_data_with_flags - return the pointer
+ * only if argument flags all has been set in sk_user_data. Otherwise
+ * return NULL
+ *
+ * @sk: socket
+ * @flags: flag bits
+ */
+static inline void *
+__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
+ uintptr_t flags)
+{
+ uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
+
+ WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
+
+ if ((sk_user_data & flags) == flags)
+ return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+ return NULL;
+}
+
#define rcu_dereference_sk_user_data(sk) \
+ __rcu_dereference_sk_user_data_with_flags(sk, 0)
+#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
({ \
- void *__tmp = rcu_dereference(__sk_user_data((sk))); \
- (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \
-})
-#define rcu_assign_sk_user_data(sk, ptr) \
-({ \
- uintptr_t __tmp = (uintptr_t)(ptr); \
- WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
- rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
-})
-#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
-({ \
- uintptr_t __tmp = (uintptr_t)(ptr); \
- WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
+ uintptr_t __tmp1 = (uintptr_t)(ptr), \
+ __tmp2 = (uintptr_t)(flags); \
+ WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
+ WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
rcu_assign_pointer(__sk_user_data((sk)), \
- __tmp | SK_USER_DATA_NOCOPY); \
+ __tmp1 | __tmp2); \
})
+#define rcu_assign_sk_user_data(sk, ptr) \
+ __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
+
+static inline
+struct net *sock_net(const struct sock *sk)
+{
+ return read_pnet(&sk->sk_net);
+}
+
+static inline
+void sock_net_set(struct sock *sk, struct net *net)
+{
+ write_pnet(&sk->sk_net, net);
+}
/*
* SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
@@ -580,7 +667,7 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk)
int sk_set_peek_off(struct sock *sk, int val);
-static inline int sk_peek_offset(struct sock *sk, int flags)
+static inline int sk_peek_offset(const struct sock *sk, int flags)
{
if (unlikely(flags & MSG_PEEK)) {
return READ_ONCE(sk->sk_peek_off);
@@ -660,11 +747,6 @@ static inline void sk_node_init(struct hlist_node *node)
node->pprev = NULL;
}
-static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
-{
- node->pprev = NULL;
-}
-
static inline void __sk_del_node(struct sock *sk)
{
__hlist_del(&sk->sk_node);
@@ -820,7 +902,7 @@ static inline void sk_add_bind_node(struct sock *sk,
({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
pos = rcu_dereference(hlist_next_rcu(pos)))
-static inline struct user_namespace *sk_user_ns(struct sock *sk)
+static inline struct user_namespace *sk_user_ns(const struct sock *sk)
{
/* Careful only use this in a context where these parameters
* can not change and must all be valid, such as recvmsg from
@@ -845,7 +927,6 @@ enum sock_flags {
SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
- SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
SOCK_MEMALLOC, /* VM depends on this socket for swapping */
SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
SOCK_FASYNC, /* fasync() active */
@@ -862,11 +943,12 @@ enum sock_flags {
SOCK_TXTIME,
SOCK_XDP, /* XDP is attached */
SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
+ SOCK_RCVMARK, /* Receive SO_MARK ancillary data with packet */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
-static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
+static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk)
{
nsk->sk_flags = osk->sk_flags;
}
@@ -929,6 +1011,10 @@ static inline void sk_acceptq_added(struct sock *sk)
WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
}
+/* Note: If you think the test should be:
+ * return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+ * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
+ */
static inline bool sk_acceptq_is_full(const struct sock *sk)
{
return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
@@ -952,6 +1038,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
}
+static inline void sk_forward_alloc_add(struct sock *sk, int val)
+{
+ /* Paired with lockless reads of sk->sk_forward_alloc */
+ WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
+}
+
void sk_stream_write_space(struct sock *sk);
/* OOB backlog add */
@@ -1003,12 +1095,18 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
+
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
if (sk_memalloc_socks() && skb_pfmemalloc(skb))
return __sk_backlog_rcv(sk, skb);
- return sk->sk_backlog_rcv(sk, skb);
+ return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
+ tcp_v6_do_rcv,
+ tcp_v4_do_rcv,
+ sk, skb);
}
static inline void sk_incoming_cpu_update(struct sock *sk)
@@ -1019,56 +1117,29 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
WRITE_ONCE(sk->sk_incoming_cpu, cpu);
}
-static inline void sock_rps_record_flow_hash(__u32 hash)
-{
-#ifdef CONFIG_RPS
- struct rps_sock_flow_table *sock_flow_table;
-
- rcu_read_lock();
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_record_sock_flow(sock_flow_table, hash);
- rcu_read_unlock();
-#endif
-}
-
-static inline void sock_rps_record_flow(const struct sock *sk)
-{
-#ifdef CONFIG_RPS
- if (static_branch_unlikely(&rfs_needed)) {
- /* Reading sk->sk_rxhash might incur an expensive cache line
- * miss.
- *
- * TCP_ESTABLISHED does cover almost all states where RFS
- * might be useful, and is cheaper [1] than testing :
- * IPv4: inet_sk(sk)->inet_daddr
- * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
- * OR an additional socket flag
- * [1] : sk_state and sk_prot are in the same cache line.
- */
- if (sk->sk_state == TCP_ESTABLISHED)
- sock_rps_record_flow_hash(sk->sk_rxhash);
- }
-#endif
-}
static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (unlikely(sk->sk_rxhash != skb->hash))
- sk->sk_rxhash = skb->hash;
+ /* The following WRITE_ONCE() is paired with the READ_ONCE()
+ * here, and another one in sock_rps_record_flow().
+ */
+ if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
+ WRITE_ONCE(sk->sk_rxhash, skb->hash);
#endif
}
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
- sk->sk_rxhash = 0;
+ /* Paired with READ_ONCE() in sock_rps_record_flow() */
+ WRITE_ONCE(sk->sk_rxhash, 0);
#endif
}
#define sk_wait_event(__sk, __timeo, __condition, __wait) \
- ({ int __rc; \
+ ({ int __rc, __dis = __sk->sk_disconnects; \
release_sock(__sk); \
__rc = __condition; \
if (!__rc) { \
@@ -1078,7 +1149,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
} \
sched_annotate_sleep(); \
lock_sock(__sk); \
- __rc = __condition; \
+ __rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \
__rc; \
})
@@ -1109,6 +1180,7 @@ struct inet_hashinfo;
struct raw_hashinfo;
struct smc_hashinfo;
struct module;
+struct sk_psock;
/*
* caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
@@ -1140,7 +1212,7 @@ struct proto {
bool kern);
int (*ioctl)(struct sock *sk, int cmd,
- unsigned long arg);
+ int *karg);
int (*init)(struct sock *sk);
void (*destroy)(struct sock *sk);
void (*shutdown)(struct sock *sk, int how);
@@ -1158,10 +1230,8 @@ struct proto {
int (*sendmsg)(struct sock *sk, struct msghdr *msg,
size_t len);
int (*recvmsg)(struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags,
- int *addr_len);
- int (*sendpage)(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
+ size_t len, int flags, int *addr_len);
+ void (*splice_eof)(struct socket *sock);
int (*bind)(struct sock *sk,
struct sockaddr *addr, int addr_len);
int (*bind_add)(struct sock *sk,
@@ -1169,6 +1239,8 @@ struct proto {
int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
+ bool (*bpf_bypass_getsockopt)(int level,
+ int optname);
void (*release_cb)(struct sock *sk);
@@ -1177,22 +1249,35 @@ struct proto {
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
+ void (*put_port)(struct sock *sk);
+#ifdef CONFIG_BPF_SYSCALL
+ int (*psock_update_sk_prot)(struct sock *sk,
+ struct sk_psock *psock,
+ bool restore);
+#endif
/* Keeping track of sockets in use */
#ifdef CONFIG_PROC_FS
unsigned int inuse_idx;
#endif
+#if IS_ENABLED(CONFIG_MPTCP)
+ int (*forward_alloc_get)(const struct sock *sk);
+#endif
+
bool (*stream_memory_free)(const struct sock *sk, int wake);
- bool (*stream_memory_read)(const struct sock *sk);
+ bool (*sock_is_readable)(struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
void (*leave_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
+ int __percpu *per_cpu_fw_alloc;
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
+
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
+ * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
* All the __sk_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
@@ -1209,11 +1294,12 @@ struct proto {
struct kmem_cache *slab;
unsigned int obj_size;
+ unsigned int ipv6_pinfo_offset;
slab_flags_t slab_flags;
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count;
+ unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@ -1230,9 +1316,6 @@ struct proto {
char name[32];
struct list_head node;
-#ifdef SOCK_REFCNT_DEBUG
- atomic_t socks;
-#endif
int (*diag_destroy)(struct sock *sk, int err);
} __randomize_layout;
@@ -1240,30 +1323,16 @@ int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
int sock_load_diag_module(int family, int protocol);
-#ifdef SOCK_REFCNT_DEBUG
-static inline void sk_refcnt_debug_inc(struct sock *sk)
-{
- atomic_inc(&sk->sk_prot->socks);
-}
+INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
-static inline void sk_refcnt_debug_dec(struct sock *sk)
+static inline int sk_forward_alloc_get(const struct sock *sk)
{
- atomic_dec(&sk->sk_prot->socks);
- printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
- sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
-}
-
-static inline void sk_refcnt_debug_release(const struct sock *sk)
-{
- if (refcount_read(&sk->sk_refcnt) != 1)
- printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
- sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
+#if IS_ENABLED(CONFIG_MPTCP)
+ if (sk->sk_prot->forward_alloc_get)
+ return sk->sk_prot->forward_alloc_get(sk);
+#endif
+ return READ_ONCE(sk->sk_forward_alloc);
}
-#else /* SOCK_REFCNT_DEBUG */
-#define sk_refcnt_debug_inc(sk) do { } while (0)
-#define sk_refcnt_debug_dec(sk) do { } while (0)
-#define sk_refcnt_debug_release(sk) do { } while (0)
-#endif /* SOCK_REFCNT_DEBUG */
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
@@ -1271,7 +1340,8 @@ static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
return false;
return sk->sk_prot->stream_memory_free ?
- sk->sk_prot->stream_memory_free(sk, wake) : true;
+ INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free,
+ tcp_stream_memory_free, sk, wake) : true;
}
static inline bool sk_stream_memory_free(const struct sock *sk)
@@ -1306,6 +1376,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
return sk->sk_prot->memory_pressure != NULL;
}
+static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+{
+ return sk->sk_prot->memory_pressure &&
+ !!READ_ONCE(*sk->sk_prot->memory_pressure);
+}
+
static inline bool sk_under_memory_pressure(const struct sock *sk)
{
if (!sk->sk_prot->memory_pressure)
@@ -1315,35 +1391,65 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;
- return !!*sk->sk_prot->memory_pressure;
+ return !!READ_ONCE(*sk->sk_prot->memory_pressure);
}
static inline long
-sk_memory_allocated(const struct sock *sk)
+proto_memory_allocated(const struct proto *prot)
{
- return atomic_long_read(sk->sk_prot->memory_allocated);
+ return max(0L, atomic_long_read(prot->memory_allocated));
}
static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+ return proto_memory_allocated(sk->sk_prot);
+}
+
+/* 1 MB per cpu, in page units */
+#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+extern int sysctl_mem_pcpu_rsv;
+
+static inline void
sk_memory_allocated_add(struct sock *sk, int amt)
{
- return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
+ int local_reserve;
+
+ preempt_disable();
+ local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+ if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
+ __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+ atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+ }
+ preempt_enable();
}
static inline void
sk_memory_allocated_sub(struct sock *sk, int amt)
{
- atomic_long_sub(amt, sk->sk_prot->memory_allocated);
+ int local_reserve;
+
+ preempt_disable();
+ local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+ if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
+ __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+ atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+ }
+ preempt_enable();
}
+#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
+
static inline void sk_sockets_allocated_dec(struct sock *sk)
{
- percpu_counter_dec(sk->sk_prot->sockets_allocated);
+ percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1,
+ SK_ALLOC_PERCPU_COUNTER_BATCH);
}
static inline void sk_sockets_allocated_inc(struct sock *sk)
{
- percpu_counter_inc(sk->sk_prot->sockets_allocated);
+ percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1,
+ SK_ALLOC_PERCPU_COUNTER_BATCH);
}
static inline u64
@@ -1358,29 +1464,42 @@ proto_sockets_allocated_sum_positive(struct proto *prot)
return percpu_counter_sum_positive(prot->sockets_allocated);
}
-static inline long
-proto_memory_allocated(struct proto *prot)
-{
- return atomic_long_read(prot->memory_allocated);
-}
-
static inline bool
proto_memory_pressure(struct proto *prot)
{
if (!prot->memory_pressure)
return false;
- return !!*prot->memory_pressure;
+ return !!READ_ONCE(*prot->memory_pressure);
}
#ifdef CONFIG_PROC_FS
-/* Called with local bh disabled */
-void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
+#define PROTO_INUSE_NR 64 /* should be enough for the first time */
+struct prot_inuse {
+ int all;
+ int val[PROTO_INUSE_NR];
+};
+
+static inline void sock_prot_inuse_add(const struct net *net,
+ const struct proto *prot, int val)
+{
+ this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
+}
+
+static inline void sock_inuse_add(const struct net *net, int val)
+{
+ this_cpu_add(net->core.prot_inuse->all, val);
+}
+
int sock_prot_inuse_get(struct net *net, struct proto *proto);
int sock_inuse_get(struct net *net);
#else
-static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
- int inc)
+static inline void sock_prot_inuse_add(const struct net *net,
+ const struct proto *prot, int val)
+{
+}
+
+static inline void sock_inuse_add(const struct net *net, int val)
{
}
#endif
@@ -1405,8 +1524,6 @@ static inline int __sk_prot_rehash(struct sock *sk)
#define RCV_SHUTDOWN 1
#define SEND_SHUTDOWN 2
-#define SOCK_SNDBUF_LOCK 1
-#define SOCK_RCVBUF_LOCK 2
#define SOCK_BINDADDR_LOCK 4
#define SOCK_BINDPORT_LOCK 8
@@ -1433,30 +1550,18 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind);
void __sk_mem_reduce_allocated(struct sock *sk, int amount);
void __sk_mem_reclaim(struct sock *sk, int amount);
-/* We used to have PAGE_SIZE here, but systems with 64KB pages
- * do not necessarily have 16x time more memory than 4KB ones.
- */
-#define SK_MEM_QUANTUM 4096
-#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
#define SK_MEM_SEND 0
#define SK_MEM_RECV 1
-/* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */
+/* sysctl_mem values are in pages */
static inline long sk_prot_mem_limits(const struct sock *sk, int index)
{
- long val = sk->sk_prot->sysctl_mem[index];
-
-#if PAGE_SIZE > SK_MEM_QUANTUM
- val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
-#elif PAGE_SIZE < SK_MEM_QUANTUM
- val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
-#endif
- return val;
+ return READ_ONCE(sk->sk_prot->sysctl_mem[index]);
}
static inline int sk_mem_pages(int amt)
{
- return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
+ return (amt + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
static inline bool sk_has_account(struct sock *sk)
@@ -1467,86 +1572,71 @@ static inline bool sk_has_account(struct sock *sk)
static inline bool sk_wmem_schedule(struct sock *sk, int size)
{
+ int delta;
+
if (!sk_has_account(sk))
return true;
- return size <= sk->sk_forward_alloc ||
- __sk_mem_schedule(sk, size, SK_MEM_SEND);
+ delta = size - sk->sk_forward_alloc;
+ return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND);
}
static inline bool
sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
{
+ int delta;
+
if (!sk_has_account(sk))
return true;
- return size<= sk->sk_forward_alloc ||
- __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
+ delta = size - sk->sk_forward_alloc;
+ return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
skb_pfmemalloc(skb);
}
+static inline int sk_unused_reserved_mem(const struct sock *sk)
+{
+ int unused_mem;
+
+ if (likely(!sk->sk_reserved_mem))
+ return 0;
+
+ unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+ atomic_read(&sk->sk_rmem_alloc);
+
+ return unused_mem > 0 ? unused_mem : 0;
+}
+
static inline void sk_mem_reclaim(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable >= (int)PAGE_SIZE)
+ __sk_mem_reclaim(sk, reclaimable);
}
-static inline void sk_mem_reclaim_partial(struct sock *sk)
+static inline void sk_mem_reclaim_final(struct sock *sk)
{
- if (!sk_has_account(sk))
- return;
- if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+ sk->sk_reserved_mem = 0;
+ sk_mem_reclaim(sk);
}
static inline void sk_mem_charge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc -= size;
+ sk_forward_alloc_add(sk, -size);
}
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc += size;
-
- /* Avoid a possible overflow.
- * TCP send queues can make this happen, if sk_mem_reclaim()
- * is not called and more than 2 GBytes are released at once.
- *
- * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
- * no need to hold that much forward allocation anyway.
- */
- if (unlikely(sk->sk_forward_alloc >= 1 << 21))
- __sk_mem_reclaim(sk, 1 << 20);
-}
-
-DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
-static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
-{
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
- sk_wmem_queued_add(sk, -skb->truesize);
- sk_mem_uncharge(sk, skb->truesize);
- if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
- !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
- skb_ext_reset(skb);
- skb_zcopy_clear(skb, true);
- sk->sk_tx_skb_cache = skb;
- return;
- }
- __kfree_skb(skb);
-}
-
-static inline void sock_release_ownership(struct sock *sk)
-{
- if (sk->sk_lock.owned) {
- sk->sk_lock.owned = 0;
-
- /* The sk_lock has mutex_unlock() semantics: */
- mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
- }
+ sk_forward_alloc_add(sk, size);
+ sk_mem_reclaim(sk);
}
/*
@@ -1568,13 +1658,11 @@ do { \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
-#ifdef CONFIG_LOCKDEP
static inline bool lockdep_sock_is_held(const struct sock *sk)
{
return lockdep_is_held(&sk->sk_lock) ||
lockdep_is_held(&sk->sk_lock.slock);
}
-#endif
void lock_sock_nested(struct sock *sk, int subclass);
@@ -1583,6 +1671,7 @@ static inline void lock_sock(struct sock *sk)
lock_sock_nested(sk, 0);
}
+void __lock_sock(struct sock *sk);
void __release_sock(struct sock *sk);
void release_sock(struct sock *sk);
@@ -1593,7 +1682,37 @@ void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
-bool lock_sock_fast(struct sock *sk);
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+
+/**
+ * lock_sock_fast - fast version of lock_sock
+ * @sk: socket
+ *
+ * This version should be used for very small section, where process wont block
+ * return false if fast path is taken:
+ *
+ * sk_lock.slock locked, owned = 0, BH disabled
+ *
+ * return true if slow path is taken:
+ *
+ * sk_lock.slock unlocked, owned = 1, BH enabled
+ */
+static inline bool lock_sock_fast(struct sock *sk)
+{
+ /* The sk_lock has mutex_lock() semantics here. */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
+
+/* fast socket lock variant for caller already holding a [different] socket lock */
+static inline bool lock_sock_fast_nested(struct sock *sk)
+{
+ mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
+
/**
* unlock_sock_fast - complement of lock_sock_fast
* @sk: socket
@@ -1603,13 +1722,22 @@ bool lock_sock_fast(struct sock *sk);
* If slow mode is on, we call regular release_sock()
*/
static inline void unlock_sock_fast(struct sock *sk, bool slow)
+ __releases(&sk->sk_lock.slock)
{
- if (slow)
+ if (slow) {
release_sock(sk);
- else
+ __release(&sk->sk_lock.slock);
+ } else {
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
spin_unlock_bh(&sk->sk_lock.slock);
+ }
}
+void sockopt_lock_sock(struct sock *sk);
+void sockopt_release_sock(struct sock *sk);
+bool sockopt_ns_capable(struct user_namespace *ns, int cap);
+bool sockopt_capable(int cap);
+
/* Used by processes to "lock" a socket state, so that
* interrupts and bottom half handlers won't change it
* from under us. It essentially blocks any incoming
@@ -1642,12 +1770,22 @@ static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
return sk->sk_lock.owned;
}
+static inline void sock_release_ownership(struct sock *sk)
+{
+ DEBUG_NET_WARN_ON_ONCE(!sock_owned_by_user_nocheck(sk));
+ sk->sk_lock.owned = 0;
+
+ /* The sk_lock has mutex_unlock() semantics: */
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
+}
+
/* no reclassification while locks are held */
static inline bool sock_allow_reclassification(const struct sock *csk)
{
struct sock *sk = (struct sock *)csk;
- return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
+ return !sock_owned_by_user_nocheck(sk) &&
+ !spin_is_locked(&sk->sk_lock.slock);
}
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
@@ -1673,36 +1811,57 @@ void sock_pfree(struct sk_buff *skb);
#define sock_edemux sock_efree
#endif
+int sk_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
int sock_setsockopt(struct socket *sock, int level, int op,
sockptr_t optval, unsigned int optlen);
+int do_sock_setsockopt(struct socket *sock, bool compat, int level,
+ int optname, sockptr_t optval, int optlen);
+int do_sock_getsockopt(struct socket *sock, bool compat, int level,
+ int optname, sockptr_t optval, sockptr_t optlen);
-int sock_getsockopt(struct socket *sock, int level, int op,
- char __user *optval, int __user *optlen);
+int sk_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
int sock_gettstamp(struct socket *sock, void __user *userstamp,
bool timeval, bool time32);
-struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
- int noblock, int *errcode);
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock,
int *errcode, int max_page_order);
+
+static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk,
+ unsigned long size,
+ int noblock, int *errcode)
+{
+ return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
+}
+
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
void sock_kfree_s(struct sock *sk, void *mem, int size);
void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
+static inline void sock_replace_proto(struct sock *sk, struct proto *proto)
+{
+ if (sk->sk_socket)
+ clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
+ WRITE_ONCE(sk->sk_prot, proto);
+}
+
struct sockcm_cookie {
u64 transmit_time;
u32 mark;
- u16 tsflags;
+ u32 tsflags;
};
static inline void sockcm_init(struct sockcm_cookie *sockc,
const struct sock *sk)
{
- *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
+ *sockc = (struct sockcm_cookie) {
+ .tsflags = READ_ONCE(sk->sk_tsflags)
+ };
}
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
struct sockcm_cookie *sockc);
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
struct sockcm_cookie *sockc);
@@ -1724,10 +1883,6 @@ int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
int sock_no_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
-ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
/*
* Functions to fill in entries in struct proto_ops when a protocol
@@ -1746,7 +1901,12 @@ void sk_common_release(struct sock *sk);
* Default socket callbacks and setup code
*/
-/* Initialise core socket variables */
+/* Initialise core socket variables using an explicit uid. */
+void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
+
+/* Initialise core socket variables.
+ * Assumes struct socket *sock is embedded in a struct socket_alloc.
+ */
void sock_init_data(struct socket *sock, struct sock *sk);
/*
@@ -1798,54 +1958,81 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
/* sk_tx_queue_mapping accept only upto a 16-bit value */
if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
return;
- sk->sk_tx_queue_mapping = tx_queue;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
}
#define NO_QUEUE_MAPPING USHRT_MAX
static inline void sk_tx_queue_clear(struct sock *sk)
{
- sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
}
static inline int sk_tx_queue_get(const struct sock *sk)
{
- if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_tx_queue_mapping;
+ if (sk) {
+ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
+ * and sk_tx_queue_set().
+ */
+ int val = READ_ONCE(sk->sk_tx_queue_mapping);
+ if (val != NO_QUEUE_MAPPING)
+ return val;
+ }
return -1;
}
-static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+static inline void __sk_rx_queue_set(struct sock *sk,
+ const struct sk_buff *skb,
+ bool force_set)
{
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
- return;
-
- sk->sk_rx_queue_mapping = rx_queue;
+ if (force_set ||
+ unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
}
#endif
}
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, true);
+}
+
+static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, false);
+}
+
static inline void sk_rx_queue_clear(struct sock *sk)
{
-#ifdef CONFIG_XPS
- sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+ WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
#endif
}
-#ifdef CONFIG_XPS
static inline int sk_rx_queue_get(const struct sock *sk)
{
- if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_rx_queue_mapping;
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+ if (sk) {
+ int res = READ_ONCE(sk->sk_rx_queue_mapping);
+
+ if (res != NO_QUEUE_MAPPING)
+ return res;
+ }
+#endif
return -1;
}
-#endif
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
@@ -1886,6 +2073,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
}
kuid_t sock_i_uid(struct sock *sk);
+unsigned long __sock_i_ino(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);
static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
@@ -1895,66 +2083,74 @@ static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
static inline u32 net_tx_rndhash(void)
{
- u32 v = prandom_u32();
+ u32 v = get_random_u32();
return v ?: 1;
}
static inline void sk_set_txhash(struct sock *sk)
{
- sk->sk_txhash = net_tx_rndhash();
+ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
+ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
}
-static inline void sk_rethink_txhash(struct sock *sk)
+static inline bool sk_rethink_txhash(struct sock *sk)
{
- if (sk->sk_txhash)
+ if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) {
sk_set_txhash(sk);
+ return true;
+ }
+ return false;
}
static inline struct dst_entry *
-__sk_dst_get(struct sock *sk)
+__sk_dst_get(const struct sock *sk)
{
return rcu_dereference_check(sk->sk_dst_cache,
lockdep_sock_is_held(sk));
}
static inline struct dst_entry *
-sk_dst_get(struct sock *sk)
+sk_dst_get(const struct sock *sk)
{
struct dst_entry *dst;
rcu_read_lock();
dst = rcu_dereference(sk->sk_dst_cache);
- if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+ if (dst && !rcuref_get(&dst->__rcuref))
dst = NULL;
rcu_read_unlock();
return dst;
}
-static inline void dst_negative_advice(struct sock *sk)
+static inline void __dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
- sk_rethink_txhash(sk);
-
if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);
if (ndst != dst) {
rcu_assign_pointer(sk->sk_dst_cache, ndst);
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
}
}
}
+static inline void dst_negative_advice(struct sock *sk)
+{
+ sk_rethink_txhash(sk);
+ __dst_negative_advice(sk);
+}
+
static inline void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
old_dst = rcu_dereference_protected(sk->sk_dst_cache,
lockdep_sock_is_held(sk));
rcu_assign_pointer(sk->sk_dst_cache, dst);
@@ -1967,7 +2163,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
dst_release(old_dst);
}
@@ -1998,17 +2194,14 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
{
if (skb_get_dst_pending_confirm(skb)) {
struct sock *sk = skb->sk;
- unsigned long now = jiffies;
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ neigh_confirm(n);
}
}
-bool sk_mc_loop(struct sock *sk);
+bool sk_mc_loop(const struct sock *sk);
static inline bool sk_can_gso(const struct sock *sk)
{
@@ -2017,10 +2210,10 @@ static inline bool sk_can_gso(const struct sock *sk)
void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
-static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
+static inline void sk_gso_disable(struct sock *sk)
{
- sk->sk_route_nocaps |= flags;
- sk->sk_route_caps &= ~flags;
+ sk->sk_gso_disabled = 1;
+ sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
@@ -2066,9 +2259,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
if (err)
return err;
- skb->len += copy;
- skb->data_len += copy;
- skb->truesize += copy;
+ skb_len_add(skb, copy);
sk_wmem_queued_add(sk, copy);
sk_mem_charge(sk, copy);
return 0;
@@ -2167,9 +2358,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
{
- if (sk->sk_txhash) {
+ /* This pairs with WRITE_ONCE() in sk_set_txhash() */
+ u32 txhash = READ_ONCE(sk->sk_txhash);
+
+ if (txhash) {
skb->l4_hash = 1;
- skb->hash = sk->sk_txhash;
+ skb->hash = txhash;
}
}
@@ -2192,17 +2386,59 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
+static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+{
+ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+ skb_orphan(skb);
+ skb->destructor = sock_efree;
+ skb->sk = sk;
+ return true;
+ }
+ return false;
+}
+
+static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
+{
+ skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
+ if (skb) {
+ if (sk_rmem_schedule(sk, skb, skb->truesize)) {
+ skb_set_owner_r(skb, sk);
+ return skb;
+ }
+ __kfree_skb(skb);
+ }
+ return NULL;
+}
+
+static inline void skb_prepare_for_gro(struct sk_buff *skb)
+{
+ if (skb->destructor != sock_wfree) {
+ skb_orphan(skb);
+ return;
+ }
+ skb->slow_gro = 1;
+}
+
void sk_reset_timer(struct sock *sk, struct timer_list *timer,
unsigned long expires);
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
+
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb));
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+
+int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason);
+
+static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ return sock_queue_rcv_skb_reason(sk, skb, NULL);
+}
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
@@ -2214,12 +2450,19 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
static inline int sock_error(struct sock *sk)
{
int err;
- if (likely(!sk->sk_err))
+
+ /* Avoid an atomic operation for the common case.
+ * This is racy since another cpu/thread can change sk_err under us.
+ */
+ if (likely(data_race(!sk->sk_err)))
return 0;
+
err = xchg(&sk->sk_err, 0);
return -err;
}
+void sk_error_report(struct sock *sk);
+
static inline unsigned long sock_wspace(struct sock *sk)
{
int amt = 0;
@@ -2281,31 +2524,30 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+ val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule);
-
/**
* sk_page_frag - return an appropriate page_frag
* @sk: socket
*
* Use the per task page_frag instead of the per socket one for
- * optimization when we know that we're in the normal context and owns
+ * optimization when we know that we're in process context and own
* everything that's associated with %current.
*
- * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
- * inside other socket operations and end up recursing into sk_page_frag()
- * while it's already in use.
+ * Both direct reclaim and page faults can nest inside other
+ * socket operations and end up recursing into sk_page_frag()
+ * while it's already in use: explicitly avoid task page_frag
+ * when users disable sk_use_task_frag.
*
* Return: a per task page_frag if context allows that,
* otherwise a per socket one.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- if (gfpflags_normal_context(sk->sk_allocation))
+ if (sk->sk_use_task_frag)
return &current->task_frag;
return &sk->sk_frag;
@@ -2326,6 +2568,11 @@ static inline gfp_t gfp_any(void)
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
+static inline gfp_t gfp_memcg_charge(void)
+{
+ return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
+}
+
static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
{
return noblock ? 0 : sk->sk_rcvtimeo;
@@ -2418,9 +2665,9 @@ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
- ktime_t kt = skb->tstamp;
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
-
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+ ktime_t kt = skb->tstamp;
/*
* generate control messages if
* - receive time stamping in software requested
@@ -2428,35 +2675,37 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
* - hardware time stamps available and wanted
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
- (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
- (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
+ (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
(hwtstamps->hwtstamp &&
- (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
sock_write_timestamp(sk, kt);
- if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
+ if (sock_flag(sk, SOCK_WIFI_STATUS) && skb_wifi_acked_valid(skb))
__sock_recv_wifi_status(msg, sk, skb);
}
-void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb);
+void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
-static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb)
+static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
{
-#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
- (1UL << SOCK_RCVTSTAMP))
+#define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL) | \
+ (1UL << SOCK_RCVTSTAMP) | \
+ (1UL << SOCK_RCVMARK))
#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
- __sock_recv_ts_and_drops(msg, sk, skb);
+ if (sk->sk_flags & FLAGS_RECV_CMSGS ||
+ READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
+ __sock_recv_cmsgs(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
- else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
+ else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP))
sock_write_timestamp(sk, 0);
}
@@ -2478,7 +2727,7 @@ static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
__sock_tx_timestamp(tsflags, tx_flags);
if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
- *tskey = sk->sk_tskey++;
+ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
}
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
@@ -2496,7 +2745,32 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
+static inline bool sk_is_inet(const struct sock *sk)
+{
+ int family = READ_ONCE(sk->sk_family);
+
+ return family == AF_INET || family == AF_INET6;
+}
+
+static inline bool sk_is_tcp(const struct sock *sk)
+{
+ return sk_is_inet(sk) &&
+ sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+ return sk_is_inet(sk) &&
+ sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP;
+}
+
+static inline bool sk_is_stream_unix(const struct sock *sk)
+{
+ return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+}
+
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@ -2508,27 +2782,9 @@ DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
- !sk->sk_rx_skb_cache) {
- sk->sk_rx_skb_cache = skb;
- skb_orphan(skb);
- return;
- }
__kfree_skb(skb);
}
-static inline
-struct net *sock_net(const struct sock *sk)
-{
- return read_pnet(&sk->sk_net);
-}
-
-static inline
-void sock_net_set(struct sock *sk, struct net *net)
-{
- write_pnet(&sk->sk_net, net);
-}
-
static inline bool
skb_sk_is_prefetched(struct sk_buff *skb)
{
@@ -2554,28 +2810,6 @@ sk_is_refcounted(struct sock *sk)
return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
}
-/**
- * skb_steal_sock - steal a socket from an sk_buff
- * @skb: sk_buff to steal the socket from
- * @refcounted: is set to true if the socket is reference-counted
- */
-static inline struct sock *
-skb_steal_sock(struct sk_buff *skb, bool *refcounted)
-{
- if (skb->sk) {
- struct sock *sk = skb->sk;
-
- *refcounted = true;
- if (skb_sk_is_prefetched(skb))
- *refcounted = sk_is_refcounted(sk);
- skb->destructor = NULL;
- skb->sk = NULL;
- return sk;
- }
- *refcounted = false;
- return NULL;
-}
-
/* Checks if this SKB belongs to an HW offloaded socket
* and whether any SW fallbacks are required based on dev.
* Check decrypted mark in case skb_orphan() cleared socket.
@@ -2633,29 +2867,29 @@ extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
extern int sysctl_tstamp_allow_data;
-extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
+#define SKB_FRAG_PAGE_ORDER get_order(32768)
DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
{
/* Does this proto have per netns sysctl_wmem ? */
if (proto->sysctl_wmem_offset)
- return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
- return *proto->sysctl_wmem;
+ return READ_ONCE(*proto->sysctl_wmem);
}
static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
{
/* Does this proto have per netns sysctl_rmem ? */
if (proto->sysctl_rmem_offset)
- return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
- return *proto->sysctl_rmem;
+ return READ_ONCE(*proto->sysctl_rmem);
}
/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
@@ -2676,13 +2910,14 @@ static inline void sk_pacing_shift_update(struct sock *sk, int val)
*/
static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
{
+ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
int mdif;
- if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
+ if (!bound_dev_if || bound_dev_if == dif)
return true;
mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
- if (mdif && mdif == sk->sk_bound_dev_if)
+ if (mdif && mdif == bound_dev_if)
return true;
return false;
@@ -2691,6 +2926,10 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
void sock_def_readable(struct sock *sk);
int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
+void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
+int sock_set_timestamping(struct sock *sk, int optname,
+ struct so_timestamping timestamping);
+
void sock_enable_timestamps(struct sock *sk);
void sock_no_linger(struct sock *sk);
void sock_set_keepalive(struct sock *sk);
@@ -2703,4 +2942,17 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs);
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+int sock_get_timeout(long timeo, void *optval, bool old_timeval);
+int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval);
+
+int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
+ void __user *arg, void *karg, size_t size);
+int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+static inline bool sk_is_readable(struct sock *sk)
+{
+ if (sk->sk_prot->sock_is_readable)
+ return sk->sk_prot->sock_is_readable(sk);
+ return false;
+}
#endif /* _SOCK_H */
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 505f1e18e9bf..6ec140b0a61b 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -13,8 +13,10 @@ extern spinlock_t reuseport_lock;
struct sock_reuseport {
struct rcu_head rcu;
- u16 max_socks; /* length of socks */
- u16 num_socks; /* elements in socks */
+ u16 max_socks; /* length of socks */
+ u16 num_socks; /* elements in socks */
+ u16 num_closed_socks; /* closed elements in socks */
+ u16 incoming_cpu;
/* The last synq overflow event timestamp of this
* reuse->socks[] group.
*/
@@ -31,28 +33,32 @@ extern int reuseport_alloc(struct sock *sk, bool bind_inany);
extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
bool bind_inany);
extern void reuseport_detach_sock(struct sock *sk);
+void reuseport_stop_listen_sock(struct sock *sk);
extern struct sock *reuseport_select_sock(struct sock *sk,
u32 hash,
struct sk_buff *skb,
int hdr_len);
+struct sock *reuseport_migrate_sock(struct sock *sk,
+ struct sock *migrating_sk,
+ struct sk_buff *skb);
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
extern int reuseport_detach_prog(struct sock *sk);
-static inline bool reuseport_has_conns(struct sock *sk, bool set)
+static inline bool reuseport_has_conns(struct sock *sk)
{
struct sock_reuseport *reuse;
bool ret = false;
rcu_read_lock();
reuse = rcu_dereference(sk->sk_reuseport_cb);
- if (reuse) {
- if (set)
- reuse->has_conns = 1;
- ret = reuse->has_conns;
- }
+ if (reuse && reuse->has_conns)
+ ret = true;
rcu_read_unlock();
return ret;
}
+void reuseport_has_conns_set(struct sock *sk);
+void reuseport_update_incoming_cpu(struct sock *sk, int val);
+
#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/net/stp.h b/include/net/stp.h
index 2914e6d53490..528103fce2c0 100644
--- a/include/net/stp.h
+++ b/include/net/stp.h
@@ -2,6 +2,8 @@
#ifndef _NET_STP_H
#define _NET_STP_H
+#include <linux/if_ether.h>
+
struct stp_proto {
unsigned char group_address[ETH_ALEN];
void (*rcv)(const struct stp_proto *, struct sk_buff *,
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 1d20b98493a1..41e2ce9e9e10 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -54,10 +54,35 @@ struct strp_msg {
int offset;
};
+struct _strp_msg {
+ /* Internal cb structure. struct strp_msg must be first for passing
+ * to upper layer.
+ */
+ struct strp_msg strp;
+ int accum_len;
+};
+
+struct sk_skb_cb {
+#define SK_SKB_CB_PRIV_LEN 20
+ unsigned char data[SK_SKB_CB_PRIV_LEN];
+ /* align strp on cache line boundary within skb->cb[] */
+ unsigned char pad[4];
+ struct _strp_msg strp;
+
+ /* strp users' data follows */
+ struct tls_msg {
+ u8 control;
+ } tls;
+ /* temp_reg is a temporary register used for bpf_convert_data_end_access
+ * when dst_reg == src_reg.
+ */
+ u64 temp_reg;
+};
+
static inline struct strp_msg *strp_msg(struct sk_buff *skb)
{
return (struct strp_msg *)((void *)skb->cb +
- offsetof(struct qdisc_skb_cb, data));
+ offsetof(struct sk_skb_cb, strp));
}
/* Structure for an attached lower socket */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index ff2246914301..8346b0d29542 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -16,34 +16,36 @@
#define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1)
#define SWITCHDEV_F_DEFER BIT(2)
-struct switchdev_trans {
- bool ph_prepare;
-};
-
-static inline bool switchdev_trans_ph_prepare(struct switchdev_trans *trans)
-{
- return trans && trans->ph_prepare;
-}
-
-static inline bool switchdev_trans_ph_commit(struct switchdev_trans *trans)
-{
- return trans && !trans->ph_prepare;
-}
-
enum switchdev_attr_id {
SWITCHDEV_ATTR_ID_UNDEFINED,
SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+ SWITCHDEV_ATTR_ID_PORT_MST_STATE,
SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS,
SWITCHDEV_ATTR_ID_PORT_MROUTER,
SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
+ SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
-#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
+ SWITCHDEV_ATTR_ID_BRIDGE_MST,
SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
-#endif
+ SWITCHDEV_ATTR_ID_VLAN_MSTI,
+};
+
+struct switchdev_mst_state {
+ u16 msti;
+ u8 state;
+};
+
+struct switchdev_brport_flags {
+ unsigned long val;
+ unsigned long mask;
+};
+
+struct switchdev_vlan_msti {
+ u16 vid;
+ u16 msti;
};
struct switchdev_attr {
@@ -54,15 +56,16 @@ struct switchdev_attr {
void (*complete)(struct net_device *dev, int err, void *priv);
union {
u8 stp_state; /* PORT_STP_STATE */
- unsigned long brport_flags; /* PORT_{PRE}_BRIDGE_FLAGS */
+ struct switchdev_mst_state mst_state; /* PORT_MST_STATE */
+ struct switchdev_brport_flags brport_flags; /* PORT_BRIDGE_FLAGS */
bool mrouter; /* PORT_MROUTER */
clock_t ageing_time; /* BRIDGE_AGEING_TIME */
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
+ u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */
+ bool mst; /* BRIDGE_MST */
bool mc_disabled; /* MC_DISABLED */
-#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- u8 mrp_port_state; /* MRP_PORT_STATE */
u8 mrp_port_role; /* MRP_PORT_ROLE */
-#endif
+ struct switchdev_vlan_msti vlan_msti; /* VLAN_MSTI */
} u;
};
@@ -71,7 +74,6 @@ enum switchdev_obj_id {
SWITCHDEV_OBJ_ID_PORT_VLAN,
SWITCHDEV_OBJ_ID_PORT_MDB,
SWITCHDEV_OBJ_ID_HOST_MDB,
-#if IS_ENABLED(CONFIG_BRIDGE_MRP)
SWITCHDEV_OBJ_ID_MRP,
SWITCHDEV_OBJ_ID_RING_TEST_MRP,
SWITCHDEV_OBJ_ID_RING_ROLE_MRP,
@@ -79,11 +81,10 @@ enum switchdev_obj_id {
SWITCHDEV_OBJ_ID_IN_TEST_MRP,
SWITCHDEV_OBJ_ID_IN_ROLE_MRP,
SWITCHDEV_OBJ_ID_IN_STATE_MRP,
-
-#endif
};
struct switchdev_obj {
+ struct list_head list;
struct net_device *orig_dev;
enum switchdev_obj_id id;
u32 flags;
@@ -95,8 +96,14 @@ struct switchdev_obj {
struct switchdev_obj_port_vlan {
struct switchdev_obj obj;
u16 flags;
- u16 vid_begin;
- u16 vid_end;
+ u16 vid;
+ /* If set, the notifier signifies a change of one of the following
+ * flags for a VLAN that already exists:
+ * - BRIDGE_VLAN_INFO_PVID
+ * - BRIDGE_VLAN_INFO_UNTAGGED
+ * Entries with BRIDGE_VLAN_INFO_BRENTRY unset are not notified at all.
+ */
+ bool changed;
};
#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \
@@ -113,7 +120,6 @@ struct switchdev_obj_port_mdb {
container_of((OBJ), struct switchdev_obj_port_mdb, obj)
-#if IS_ENABLED(CONFIG_BRIDGE_MRP)
/* SWITCHDEV_OBJ_ID_MRP */
struct switchdev_obj_mrp {
struct switchdev_obj obj;
@@ -145,6 +151,7 @@ struct switchdev_obj_ring_role_mrp {
struct switchdev_obj obj;
u8 ring_role;
u32 ring_id;
+ u8 sw_backup;
};
#define SWITCHDEV_OBJ_RING_ROLE_MRP(OBJ) \
@@ -179,6 +186,7 @@ struct switchdev_obj_in_role_mrp {
u32 ring_id;
u16 in_id;
u8 in_role;
+ u8 sw_backup;
};
#define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \
@@ -193,9 +201,13 @@ struct switchdev_obj_in_state_mrp {
#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \
container_of((OBJ), struct switchdev_obj_in_state_mrp, obj)
-#endif
-
-typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
+struct switchdev_brport {
+ struct net_device *dev;
+ const void *ctx;
+ struct notifier_block *atomic_nb;
+ struct notifier_block *blocking_nb;
+ bool tx_fwd_offload;
+};
enum switchdev_notifier_type {
SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
@@ -203,6 +215,7 @@ enum switchdev_notifier_type {
SWITCHDEV_FDB_ADD_TO_DEVICE,
SWITCHDEV_FDB_DEL_TO_DEVICE,
SWITCHDEV_FDB_OFFLOADED,
+ SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
@@ -213,35 +226,48 @@ enum switchdev_notifier_type {
SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE,
SWITCHDEV_VXLAN_FDB_OFFLOADED,
+
+ SWITCHDEV_BRPORT_OFFLOADED,
+ SWITCHDEV_BRPORT_UNOFFLOADED,
+ SWITCHDEV_BRPORT_REPLAY,
};
struct switchdev_notifier_info {
struct net_device *dev;
struct netlink_ext_ack *extack;
+ const void *ctx;
};
+/* Remember to update br_switchdev_fdb_populate() when adding
+ * new members to this structure
+ */
struct switchdev_notifier_fdb_info {
struct switchdev_notifier_info info; /* must be first */
const unsigned char *addr;
u16 vid;
u8 added_by_user:1,
+ is_local:1,
+ locked:1,
offloaded:1;
};
struct switchdev_notifier_port_obj_info {
struct switchdev_notifier_info info; /* must be first */
const struct switchdev_obj *obj;
- struct switchdev_trans *trans;
bool handled;
};
struct switchdev_notifier_port_attr_info {
struct switchdev_notifier_info info; /* must be first */
const struct switchdev_attr *attr;
- struct switchdev_trans *trans;
bool handled;
};
+struct switchdev_notifier_brport_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const struct switchdev_brport brport;
+};
+
static inline struct net_device *
switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
{
@@ -254,11 +280,37 @@ switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info)
return info->extack;
}
+static inline bool
+switchdev_fdb_is_dynamically_learned(const struct switchdev_notifier_fdb_info *fdb_info)
+{
+ return !fdb_info->added_by_user && !fdb_info->is_local;
+}
+
#ifdef CONFIG_NET_SWITCHDEV
+int switchdev_bridge_port_offload(struct net_device *brport_dev,
+ struct net_device *dev, const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb,
+ bool tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
+ const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb);
+int switchdev_bridge_port_replay(struct net_device *brport_dev,
+ struct net_device *dev, const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb,
+ struct netlink_ext_ack *extack);
+
void switchdev_deferred_process(void);
int switchdev_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr);
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack);
+bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
+ enum switchdev_notifier_type nt,
+ const struct switchdev_obj *obj);
int switchdev_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack);
@@ -277,37 +329,76 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info,
struct netlink_ext_ack *extack);
-void switchdev_port_fwd_mark_set(struct net_device *dev,
- struct net_device *group_dev,
- bool joining);
+int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
+ const struct switchdev_notifier_fdb_info *fdb_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info));
int switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*add_cb)(struct net_device *dev,
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack));
+int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack));
int switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*del_cb)(struct net_device *dev,
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj));
+int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj));
int switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
- int (*set_cb)(struct net_device *dev,
+ int (*set_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans));
+ struct netlink_ext_ack *extack));
#else
+static inline int
+switchdev_bridge_port_offload(struct net_device *brport_dev,
+ struct net_device *dev, const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb,
+ bool tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+switchdev_bridge_port_unoffload(struct net_device *brport_dev,
+ const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb)
+{
+}
+
static inline void switchdev_deferred_process(void)
{
}
static inline int switchdev_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr)
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -365,12 +456,36 @@ call_switchdev_blocking_notifiers(unsigned long val,
}
static inline int
+switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
+ const struct switchdev_notifier_fdb_info *fdb_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info))
+{
+ return 0;
+}
+
+static inline int
switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*add_cb)(struct net_device *dev,
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack))
+{
+ return 0;
+}
+
+static inline int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack))
{
return 0;
@@ -380,7 +495,19 @@ static inline int
switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*del_cb)(struct net_device *dev,
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj))
+{
+ return 0;
+}
+
+static inline int
+switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj))
{
return 0;
@@ -390,9 +517,9 @@ static inline int
switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
- int (*set_cb)(struct net_device *dev,
+ int (*set_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans))
+ struct netlink_ext_ack *extack))
{
return 0;
}
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
index 1f4cb477bb5d..e8dd77a96748 100644
--- a/include/net/tc_act/tc_connmark.h
+++ b/include/net/tc_act/tc_connmark.h
@@ -4,10 +4,15 @@
#include <net/act_api.h>
-struct tcf_connmark_info {
- struct tc_action common;
+struct tcf_connmark_parms {
struct net *net;
u16 zone;
+ struct rcu_head rcu;
+};
+
+struct tcf_connmark_info {
+ struct tc_action common;
+ struct tcf_connmark_parms __rcu *parms;
};
#define to_connmark(a) ((struct tcf_connmark_info *)a)
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index 8250d6f0a462..77f87c622a2e 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -10,6 +10,7 @@
#include <net/netfilter/nf_conntrack_labels.h>
struct tcf_ct_params {
+ struct nf_conntrack_helper *helper;
struct nf_conn *tmpl;
u16 zone;
@@ -21,6 +22,7 @@ struct tcf_ct_params {
struct nf_nat_range2 range;
bool ipv4_range;
+ bool put_labels;
u16 ct_action;
@@ -56,6 +58,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
return to_ct_params(a)->nf_ft;
}
+static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
+{
+ return to_ct_params(a)->helper;
+}
+
#else
static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
@@ -63,6 +70,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
{
return NULL;
}
+static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
+{
+ return NULL;
+}
#endif /* CONFIG_NF_CONNTRACK */
#if IS_ENABLED(CONFIG_NET_ACT_CT)
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index eb8f01c819e6..832efd40e023 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -59,4 +59,19 @@ static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
}
+static inline bool is_tcf_gact_continue(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_UNSPEC, false);
+}
+
+static inline bool is_tcf_gact_reclassify(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_RECLASSIFY, false);
+}
+
+static inline bool is_tcf_gact_pipe(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_PIPE, false);
+}
+
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
index 8bc6be81a7ad..c8fa11ebb397 100644
--- a/include/net/tc_act/tc_gate.h
+++ b/include/net/tc_act/tc_gate.h
@@ -60,11 +60,6 @@ static inline bool is_tcf_gate(const struct tc_action *a)
return false;
}
-static inline u32 tcf_gate_index(const struct tc_action *a)
-{
- return a->tcfa_index;
-}
-
static inline s32 tcf_gate_prio(const struct tc_action *a)
{
s32 tcfg_prio;
diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h
deleted file mode 100644
index 4225fcb1c6ba..000000000000
--- a/include/net/tc_act/tc_ipt.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NET_TC_IPT_H
-#define __NET_TC_IPT_H
-
-#include <net/act_api.h>
-
-struct xt_entry_target;
-
-struct tcf_ipt {
- struct tc_action common;
- u32 tcfi_hook;
- char *tcfi_tname;
- struct xt_entry_target *tcfi_t;
-};
-#define to_ipt(a) ((struct tcf_ipt *)a)
-
-#endif /* __NET_TC_IPT_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 1cace4c69e44..75722d967bf2 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,8 +8,10 @@
struct tcf_mirred {
struct tc_action common;
int tcfm_eaction;
+ u32 tcfm_blockid;
bool tcfm_mac_header_xmit;
struct net_device __rcu *tcfm_dev;
+ netdevice_tracker tcfm_dev_tracker;
struct list_head tcfm_list;
};
#define to_mirred(a) ((struct tcf_mirred *)a)
diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h
index c14407160812..c869274ac529 100644
--- a/include/net/tc_act/tc_nat.h
+++ b/include/net/tc_act/tc_nat.h
@@ -5,13 +5,17 @@
#include <linux/types.h>
#include <net/act_api.h>
-struct tcf_nat {
- struct tc_action common;
-
+struct tcf_nat_parms {
__be32 old_addr;
__be32 new_addr;
__be32 mask;
u32 flags;
+ struct rcu_head rcu;
+};
+
+struct tcf_nat {
+ struct tc_action common;
+ struct tcf_nat_parms __rcu *parms;
};
#define to_tcf_nat(a) ((struct tcf_nat *)a)
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 748cf87a4d7e..83fe39931781 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -4,21 +4,29 @@
#include <net/act_api.h>
#include <linux/tc_act/tc_pedit.h>
+#include <linux/types.h>
struct tcf_pedit_key_ex {
enum pedit_header_type htype;
enum pedit_cmd cmd;
};
-struct tcf_pedit {
- struct tc_action common;
- unsigned char tcfp_nkeys;
- unsigned char tcfp_flags;
+struct tcf_pedit_parms {
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
+ u32 tcfp_off_max_hint;
+ unsigned char tcfp_nkeys;
+ unsigned char tcfp_flags;
+ struct rcu_head rcu;
+};
+
+struct tcf_pedit {
+ struct tc_action common;
+ struct tcf_pedit_parms __rcu *parms;
};
#define to_pedit(a) ((struct tcf_pedit *)a)
+#define to_pedit_parms(a) (rcu_dereference(to_pedit(a)->parms))
static inline bool is_tcf_pedit(const struct tc_action *a)
{
@@ -31,37 +39,81 @@ static inline bool is_tcf_pedit(const struct tc_action *a)
static inline int tcf_pedit_nkeys(const struct tc_action *a)
{
- return to_pedit(a)->tcfp_nkeys;
+ struct tcf_pedit_parms *parms;
+ int nkeys;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ nkeys = parms->tcfp_nkeys;
+ rcu_read_unlock();
+
+ return nkeys;
}
static inline u32 tcf_pedit_htype(const struct tc_action *a, int index)
{
- if (to_pedit(a)->tcfp_keys_ex)
- return to_pedit(a)->tcfp_keys_ex[index].htype;
+ u32 htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ struct tcf_pedit_parms *parms;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ if (parms->tcfp_keys_ex)
+ htype = parms->tcfp_keys_ex[index].htype;
+ rcu_read_unlock();
- return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ return htype;
}
static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index)
{
- if (to_pedit(a)->tcfp_keys_ex)
- return to_pedit(a)->tcfp_keys_ex[index].cmd;
+ struct tcf_pedit_parms *parms;
+ u32 cmd = __PEDIT_CMD_MAX;
- return __PEDIT_CMD_MAX;
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ if (parms->tcfp_keys_ex)
+ cmd = parms->tcfp_keys_ex[index].cmd;
+ rcu_read_unlock();
+
+ return cmd;
}
static inline u32 tcf_pedit_mask(const struct tc_action *a, int index)
{
- return to_pedit(a)->tcfp_keys[index].mask;
+ struct tcf_pedit_parms *parms;
+ u32 mask;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ mask = parms->tcfp_keys[index].mask;
+ rcu_read_unlock();
+
+ return mask;
}
static inline u32 tcf_pedit_val(const struct tc_action *a, int index)
{
- return to_pedit(a)->tcfp_keys[index].val;
+ struct tcf_pedit_parms *parms;
+ u32 val;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ val = parms->tcfp_keys[index].val;
+ rcu_read_unlock();
+
+ return val;
}
static inline u32 tcf_pedit_offset(const struct tc_action *a, int index)
{
- return to_pedit(a)->tcfp_keys[index].off;
+ struct tcf_pedit_parms *parms;
+ u32 off;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ off = parms->tcfp_keys[index].off;
+ rcu_read_unlock();
+
+ return off;
}
#endif /* __NET_TC_PED_H */
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index 6d1e26b709b5..283bde711a42 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -10,10 +10,13 @@ struct tcf_police_params {
s64 tcfp_burst;
u32 tcfp_mtu;
s64 tcfp_mtu_ptoks;
+ s64 tcfp_pkt_burst;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
+ struct psched_pktrate ppsrate;
+ bool pps_present;
struct rcu_head rcu;
};
@@ -24,6 +27,7 @@ struct tcf_police {
spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
s64 tcfp_toks;
s64 tcfp_ptoks;
+ s64 tcfp_pkttoks;
s64 tcfp_t_c;
};
@@ -97,6 +101,54 @@ static inline u32 tcf_police_burst(const struct tc_action *act)
return burst;
}
+static inline u64 tcf_police_rate_pkt_ps(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->ppsrate.rate_pkts_ps;
+}
+
+static inline u32 tcf_police_burst_pkt(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+ u32 burst;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+
+ /*
+ * "rate" pkts "burst" nanoseconds
+ * ------------ * -------------------
+ * 1 second 2^6 ticks
+ *
+ * ------------------------------------
+ * NSEC_PER_SEC nanoseconds
+ * ------------------------
+ * 2^6 ticks
+ *
+ * "rate" pkts "burst" nanoseconds 2^6 ticks
+ * = ------------ * ------------------- * ------------------------
+ * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds
+ *
+ * "rate" * "burst"
+ * = ---------------- pkts/nanosecond
+ * NSEC_PER_SEC^2
+ *
+ *
+ * "rate" * "burst"
+ * = ---------------- pkts/second
+ * NSEC_PER_SEC
+ */
+ burst = div_u64(params->tcfp_pkt_burst * params->ppsrate.rate_pkts_ps,
+ NSEC_PER_SEC);
+
+ return burst;
+}
+
static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
@@ -107,4 +159,34 @@ static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act)
return params->tcfp_mtu;
}
+static inline u64 tcf_police_peakrate_bytes_ps(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->peak.rate_bytes_ps;
+}
+
+static inline u32 tcf_police_tcfp_ewma_rate(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->tcfp_ewma_rate;
+}
+
+static inline u16 tcf_police_rate_overhead(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->rate.overhead;
+}
+
#endif /* __NET_TC_POLICE_H */
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 00bfee70609e..9649600fb3dc 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -17,6 +17,7 @@ struct tcf_skbedit_params {
u32 mark;
u32 mask;
u16 queue_mapping;
+ u16 mapping_mod;
u16 ptype;
struct rcu_head rcu;
};
@@ -94,4 +95,45 @@ static inline u32 tcf_skbedit_priority(const struct tc_action *a)
return priority;
}
+static inline u16 tcf_skbedit_rx_queue_mapping(const struct tc_action *a)
+{
+ u16 rx_queue;
+
+ rcu_read_lock();
+ rx_queue = rcu_dereference(to_skbedit(a)->params)->queue_mapping;
+ rcu_read_unlock();
+
+ return rx_queue;
+}
+
+/* Return true iff action is queue_mapping */
+static inline bool is_tcf_skbedit_queue_mapping(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_QUEUE_MAPPING);
+}
+
+/* Return true if action is on ingress traffic */
+static inline bool is_tcf_skbedit_ingress(u32 flags)
+{
+ return flags & TCA_ACT_FLAGS_AT_INGRESS;
+}
+
+static inline bool is_tcf_skbedit_tx_queue_mapping(const struct tc_action *a)
+{
+ return is_tcf_skbedit_queue_mapping(a) &&
+ !is_tcf_skbedit_ingress(a->tcfa_flags);
+}
+
+static inline bool is_tcf_skbedit_rx_queue_mapping(const struct tc_action *a)
+{
+ return is_tcf_skbedit_queue_mapping(a) &&
+ is_tcf_skbedit_ingress(a->tcfa_flags);
+}
+
+/* Return true iff action is inheritdsfield */
+static inline bool is_tcf_skbedit_inheritdsfield(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_INHERITDSFIELD);
+}
+
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index e1057b255f69..879fe8cff581 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+ struct tcf_tunnel_key_params *params;
+
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
return &params->tcft_enc_metadata->u.tun_info;
#else
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 4e2502408c31..904eddfc1826 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,9 +11,12 @@
struct tcf_vlan_params {
int tcfv_action;
+ unsigned char tcfv_push_dst[ETH_ALEN];
+ unsigned char tcfv_push_src[ETH_ALEN];
u16 tcfv_push_vid;
__be16 tcfv_push_proto;
u8 tcfv_push_prio;
+ bool tcfv_push_prio_exists;
struct rcu_head rcu;
};
@@ -75,4 +78,14 @@ static inline u8 tcf_vlan_push_prio(const struct tc_action *a)
return tcfv_push_prio;
}
+
+static inline void tcf_vlan_push_eth(unsigned char *src, unsigned char *dest,
+ const struct tc_action *a)
+{
+ rcu_read_lock();
+ memcpy(dest, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_dst, ETH_ALEN);
+ memcpy(src, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_src, ETH_ALEN);
+ rcu_read_unlock();
+}
+
#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
new file mode 100644
index 000000000000..ffe58a02537c
--- /dev/null
+++ b/include/net/tc_wrapper.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_WRAPPER_H
+#define __NET_TC_WRAPPER_H
+
+#include <net/pkt_cls.h>
+
+#if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)
+
+#include <linux/cpufeature.h>
+#include <linux/static_key.h>
+#include <linux/indirect_call_wrapper.h>
+
+#define TC_INDIRECT_SCOPE
+
+extern struct static_key_false tc_skip_wrapper;
+
+/* TC Actions */
+#ifdef CONFIG_NET_CLS_ACT
+
+#define TC_INDIRECT_ACTION_DECLARE(fname) \
+ INDIRECT_CALLABLE_DECLARE(int fname(struct sk_buff *skb, \
+ const struct tc_action *a, \
+ struct tcf_result *res))
+
+TC_INDIRECT_ACTION_DECLARE(tcf_bpf_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_connmark_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_csum_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_ct_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_ctinfo_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_gact_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_gate_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_ife_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_ipt_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_mirred_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_mpls_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_nat_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_pedit_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_police_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_sample_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_simp_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_skbedit_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_skbmod_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_vlan_act);
+TC_INDIRECT_ACTION_DECLARE(tunnel_key_act);
+
+static inline int tc_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ if (static_branch_likely(&tc_skip_wrapper))
+ goto skip;
+
+#if IS_BUILTIN(CONFIG_NET_ACT_GACT)
+ if (a->ops->act == tcf_gact_act)
+ return tcf_gact_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_MIRRED)
+ if (a->ops->act == tcf_mirred_act)
+ return tcf_mirred_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_PEDIT)
+ if (a->ops->act == tcf_pedit_act)
+ return tcf_pedit_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SKBEDIT)
+ if (a->ops->act == tcf_skbedit_act)
+ return tcf_skbedit_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SKBMOD)
+ if (a->ops->act == tcf_skbmod_act)
+ return tcf_skbmod_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_POLICE)
+ if (a->ops->act == tcf_police_act)
+ return tcf_police_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_BPF)
+ if (a->ops->act == tcf_bpf_act)
+ return tcf_bpf_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CONNMARK)
+ if (a->ops->act == tcf_connmark_act)
+ return tcf_connmark_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CSUM)
+ if (a->ops->act == tcf_csum_act)
+ return tcf_csum_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CT)
+ if (a->ops->act == tcf_ct_act)
+ return tcf_ct_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CTINFO)
+ if (a->ops->act == tcf_ctinfo_act)
+ return tcf_ctinfo_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_GATE)
+ if (a->ops->act == tcf_gate_act)
+ return tcf_gate_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_MPLS)
+ if (a->ops->act == tcf_mpls_act)
+ return tcf_mpls_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_NAT)
+ if (a->ops->act == tcf_nat_act)
+ return tcf_nat_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_TUNNEL_KEY)
+ if (a->ops->act == tunnel_key_act)
+ return tunnel_key_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_VLAN)
+ if (a->ops->act == tcf_vlan_act)
+ return tcf_vlan_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_IFE)
+ if (a->ops->act == tcf_ife_act)
+ return tcf_ife_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SIMP)
+ if (a->ops->act == tcf_simp_act)
+ return tcf_simp_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SAMPLE)
+ if (a->ops->act == tcf_sample_act)
+ return tcf_sample_act(skb, a, res);
+#endif
+
+skip:
+ return a->ops->act(skb, a, res);
+}
+
+#endif /* CONFIG_NET_CLS_ACT */
+
+/* TC Filters */
+#ifdef CONFIG_NET_CLS
+
+#define TC_INDIRECT_FILTER_DECLARE(fname) \
+ INDIRECT_CALLABLE_DECLARE(int fname(struct sk_buff *skb, \
+ const struct tcf_proto *tp, \
+ struct tcf_result *res))
+
+TC_INDIRECT_FILTER_DECLARE(basic_classify);
+TC_INDIRECT_FILTER_DECLARE(cls_bpf_classify);
+TC_INDIRECT_FILTER_DECLARE(cls_cgroup_classify);
+TC_INDIRECT_FILTER_DECLARE(fl_classify);
+TC_INDIRECT_FILTER_DECLARE(flow_classify);
+TC_INDIRECT_FILTER_DECLARE(fw_classify);
+TC_INDIRECT_FILTER_DECLARE(mall_classify);
+TC_INDIRECT_FILTER_DECLARE(route4_classify);
+TC_INDIRECT_FILTER_DECLARE(u32_classify);
+
+static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res)
+{
+ if (static_branch_likely(&tc_skip_wrapper))
+ goto skip;
+
+#if IS_BUILTIN(CONFIG_NET_CLS_BPF)
+ if (tp->classify == cls_bpf_classify)
+ return cls_bpf_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_U32)
+ if (tp->classify == u32_classify)
+ return u32_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_FLOWER)
+ if (tp->classify == fl_classify)
+ return fl_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_FW)
+ if (tp->classify == fw_classify)
+ return fw_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_MATCHALL)
+ if (tp->classify == mall_classify)
+ return mall_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_BASIC)
+ if (tp->classify == basic_classify)
+ return basic_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
+ if (tp->classify == cls_cgroup_classify)
+ return cls_cgroup_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_FLOW)
+ if (tp->classify == flow_classify)
+ return flow_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_ROUTE4)
+ if (tp->classify == route4_classify)
+ return route4_classify(skb, tp, res);
+#endif
+
+skip:
+ return tp->classify(skb, tp, res);
+}
+
+#endif /* CONFIG_NET_CLS */
+
+static inline void tc_wrapper_init(void)
+{
+#ifdef CONFIG_X86
+ if (!cpu_feature_enabled(X86_FEATURE_RETPOLINE))
+ static_branch_enable(&tc_skip_wrapper);
+#endif
+}
+
+#else
+
+#define TC_INDIRECT_SCOPE static
+
+static inline int tc_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ return a->ops->act(skb, a, res);
+}
+
+static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res)
+{
+ return tp->classify(skb, tp, res);
+}
+
+static inline void tc_wrapper_init(void)
+{
+}
+
+#endif
+
+#endif /* __NET_TC_WRAPPER_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index eab6c7510b5b..6ae35199d3b3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -37,6 +37,7 @@
#include <net/snmp.h>
#include <net/ip.h>
#include <net/tcp_states.h>
+#include <net/tcp_ao.h>
#include <net/inet_ecn.h>
#include <net/dst.h>
#include <net/mptcp.h>
@@ -48,7 +49,9 @@
extern struct inet_hashinfo tcp_hashinfo;
-extern struct percpu_counter tcp_orphan_count;
+DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
+int tcp_orphan_count_sum(void);
+
void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
@@ -129,6 +132,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
+static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
+
#if HZ >= 100
#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
#define TCP_ATO_MIN ((unsigned)(HZ/25))
@@ -139,6 +144,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_RTO_MAX ((unsigned)(120*HZ))
#define TCP_RTO_MIN ((unsigned)(HZ/5))
#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
+
+#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
+
#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
* used as a fallback RTO for the
@@ -159,9 +167,12 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_KEEPCNT 127
#define MAX_TCP_SYNCNT 127
-#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
+/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
+ * to avoid overflows. This assumes a clock smaller than 1 Mhz.
+ * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
+ */
+#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
-#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
* after this time. It should be equal
* (or greater than) TCP_TIMEWAIT_LEN
@@ -184,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
+#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
#define TCPOPT_EXP 254 /* Experimental */
@@ -251,6 +263,8 @@ extern long sysctl_tcp_mem[3];
#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
extern atomic_long_t tcp_memory_allocated;
+DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
+
extern struct percpu_counter tcp_sockets_allocated;
extern unsigned long tcp_memory_pressure;
@@ -288,21 +302,18 @@ static inline bool tcp_out_of_memory(struct sock *sk)
return false;
}
-void sk_forced_mem_schedule(struct sock *sk, int size);
-
-static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
+static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
- struct percpu_counter *ocp = sk->sk_prot->orphan_count;
- int orphans = percpu_counter_read_positive(ocp);
-
- if (orphans << shift > sysctl_tcp_max_orphans) {
- orphans = percpu_counter_sum_positive(ocp);
- if (orphans << shift > sysctl_tcp_max_orphans)
- return true;
- }
- return false;
+ sk_wmem_queued_add(sk, -skb->truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_uncharge(sk, skb->truesize);
+ else
+ sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
+ __kfree_skb(skb);
}
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
bool tcp_check_oom(struct sock *sk, int shift);
@@ -322,39 +333,41 @@ void tcp_shutdown(struct sock *sk, int how);
int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
-int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
+void tcp_remove_empty_skb(struct sock *sk);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
-int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
- int flags);
-int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
-ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
+int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
+ size_t size, struct ubuf_info *uarg);
+void tcp_splice_eof(struct socket *sock);
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
+int tcp_wmem_schedule(struct sock *sk, int copy);
void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
int size_goal);
void tcp_release_cb(struct sock *sk);
void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
-int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
+int tcp_ioctl(struct sock *sk, int cmd, int *karg);
+enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
+void tcp_twsk_purge(struct list_head *net_exit_list, int family);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
+ bool force_schedule);
-void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
-static inline void tcp_dec_quickack_mode(struct sock *sk,
- const unsigned int pkts)
+static inline void tcp_dec_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ack.quick) {
+ /* How many ACKs S/ACKing new data have we sent? */
+ const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
+
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */
@@ -383,29 +396,40 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, bool fastopen,
bool *lost_race);
-int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb);
+enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
-void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
+void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
void tcp_metrics_init(void);
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
+void __tcp_close(struct sock *sk, long timeout);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
-void tcp_init_transfer(struct sock *sk, int bpf_op);
+void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
+int do_tcp_getsockopt(struct sock *sk, int level,
+ int optname, sockptr_t optval, sockptr_t optlen);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
+bool tcp_bpf_bypass_getsockopt(int level, int optname);
+int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
void tcp_syn_ack_timeout(const struct request_sock *req);
-int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len);
int tcp_set_rcvlowat(struct sock *sk, int val);
+int tcp_set_window_clamp(struct sock *sk, int val);
+void tcp_update_recv_tstamps(struct sk_buff *skb,
+ struct scm_timestamping_internal *tss);
+void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+ struct scm_timestamping_internal *tss);
void tcp_data_ready(struct sock *sk);
#ifdef CONFIG_MMU
int tcp_mmap(struct file *file, struct socket *sock,
@@ -414,7 +438,6 @@ int tcp_mmap(struct file *file, struct socket *sock,
void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
-const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
* BPF SKB-less helpers
@@ -423,6 +446,7 @@ u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
struct tcphdr *th, u32 *cookie);
u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
struct tcphdr *th, u32 *cookie);
+u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct tcphdr *th);
@@ -455,7 +479,8 @@ enum tcp_synack_type {
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type);
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
int tcp_disconnect(struct sock *sk, int flags);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -465,12 +490,30 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst, u32 tsoff);
-int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
- u32 cookie);
+ struct dst_entry *dst);
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
- struct sock *sk, struct sk_buff *skb);
+ struct sock *sk, struct sk_buff *skb,
+ struct tcp_options_received *tcp_opt,
+ int mss, u32 tsoff);
+
+#if IS_ENABLED(CONFIG_BPF)
+struct bpf_tcp_req_attrs {
+ u32 rcv_tsval;
+ u32 rcv_tsecr;
+ u16 mss;
+ u8 rcv_wscale;
+ u8 snd_wscale;
+ u8 ecn_ok;
+ u8 wscale_ok;
+ u8 sack_ok;
+ u8 tstamp_ok;
+ u8 usec_ts_ok;
+ u8 reserved[3];
+};
+#endif
+
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -508,7 +551,7 @@ static inline void tcp_synq_overflow(const struct sock *sk)
last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
if (!time_between32(now, last_overflow, last_overflow + HZ))
- WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
+ WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
}
/* syncookies: no recent synqueue overflow on this listening socket? */
@@ -550,18 +593,50 @@ static inline u32 tcp_cookie_time(void)
return val;
}
+/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
+static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
+{
+ if (usec_ts)
+ return div_u64(val, NSEC_PER_USEC);
+
+ return div_u64(val, NSEC_PER_MSEC);
+}
+
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
u64 cookie_init_timestamp(struct request_sock *req, u64 now);
bool cookie_timestamp_decode(const struct net *net,
struct tcp_options_received *opt);
-bool cookie_ecn_ok(const struct tcp_options_received *opt,
- const struct net *net, const struct dst_entry *dst);
+
+static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
+{
+ return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
+ dst_feature(dst, RTAX_FEATURE_ECN);
+}
+
+#if IS_ENABLED(CONFIG_BPF)
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return skb->sk;
+}
+
+struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
+#else
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ return NULL;
+}
+#endif
/* From net/ipv6/syncookies.c */
-int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
- u32 cookie);
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
@@ -570,6 +645,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
+void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
@@ -588,7 +665,6 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
unsigned int mss_now, gfp_t gfp);
void tcp_send_probe0(struct sock *);
-void tcp_send_partial(struct sock *);
int tcp_write_wakeup(struct sock *, int mib);
void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
@@ -605,9 +681,10 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
/* tcp_input.c */
void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
-void tcp_reset(struct sock *sk);
-void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
+void tcp_reset(struct sock *sk, struct sk_buff *skb);
void tcp_fin(struct sock *sk);
+void tcp_check_space(struct sock *sk);
+void tcp_sack_compress_send_ack(struct sock *sk);
/* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *);
@@ -624,6 +701,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk)
unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
unsigned int tcp_current_mss(struct sock *sk);
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
@@ -654,6 +732,9 @@ void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
+int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
+void tcp_read_done(struct sock *sk, size_t len);
void tcp_initialize_rcv_mss(struct sock *sk);
@@ -674,6 +755,10 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
+ /* mptcp hooks are only on the slow path */
+ if (sk_is_mptcp((struct sock *)tp))
+ return;
+
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
ntohl(TCP_FLAG_ACK) |
snd_wnd);
@@ -695,18 +780,20 @@ static inline void tcp_fast_path_check(struct sock *sk)
tcp_fast_path_on(tp);
}
+u32 tcp_delack_max(const struct sock *sk);
+
/* Compute the actual rto_min value */
-static inline u32 tcp_rto_min(struct sock *sk)
+static inline u32 tcp_rto_min(const struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
- u32 rto_min = TCP_RTO_MIN;
+ u32 rto_min = inet_csk(sk)->icsk_rto_min;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
return rto_min;
}
-static inline u32 tcp_rto_min_us(struct sock *sk)
+static inline u32 tcp_rto_min_us(const struct sock *sk)
{
return jiffies_to_usecs(tcp_rto_min(sk));
}
@@ -766,22 +853,31 @@ static inline u64 tcp_clock_us(void)
return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
}
-/* This should only be used in contexts where tp->tcp_mstamp is up to date */
-static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+static inline u64 tcp_clock_ms(void)
+{
+ return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
+}
+
+/* TCP Timestamp included in TS option (RFC 1323) can either use ms
+ * or usec resolution. Each socket carries a flag to select one or other
+ * resolution, as the route attribute could change anytime.
+ * Each flow must stick to initial resolution.
+ */
+static inline u32 tcp_clock_ts(bool usec_ts)
{
- return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
+ return usec_ts ? tcp_clock_us() : tcp_clock_ms();
}
-/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
-static inline u32 tcp_ns_to_ts(u64 ns)
+static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
{
- return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+ return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
}
-/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
-static inline u32 tcp_time_stamp_raw(void)
+static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
{
- return tcp_ns_to_ts(tcp_clock_ns());
+ if (tp->tcp_usec_ts)
+ return tp->tcp_mstamp;
+ return tcp_time_stamp_ms(tp);
}
void tcp_mstamp_refresh(struct tcp_sock *tp);
@@ -791,17 +887,30 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
return max_t(s64, t1 - t0, 0);
}
-static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
-{
- return tcp_ns_to_ts(skb->skb_mstamp_ns);
-}
-
/* provide the departure time in us unit */
static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
{
return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
}
+/* Provide skb TSval in usec or ms unit */
+static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
+{
+ if (usec_ts)
+ return tcp_skb_timestamp_us(skb);
+
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
+}
+
+static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
+{
+ return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
+}
+
+static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
+{
+ return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
+}
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
@@ -858,10 +967,11 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
+#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
/* There is space for up to 24 bytes */
- __u32 in_flight:30,/* Bytes in flight at transmit */
- is_app_limited:1, /* cwnd not fully used? */
- unused:1;
+ __u32 is_app_limited:1, /* cwnd not fully used? */
+ delivered_ce:20,
+ unused:11;
/* pkts S/ACKed so far upon tx of skb, incl retrans: */
__u32 delivered;
/* start of send pipeline phase */
@@ -875,36 +985,11 @@ struct tcp_skb_cb {
struct inet6_skb_parm h6;
#endif
} header; /* For incoming skbs */
- struct {
- __u32 flags;
- struct sock *sk_redir;
- void *data_end;
- } bpf;
};
};
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
-static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
-{
- TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
-}
-
-static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
-{
- return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
-}
-
-static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
-{
- return TCP_SKB_CB(skb)->bpf.sk_redir;
-}
-
-static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
-{
- TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
-}
-
extern const struct inet_connection_sock_af_ops ipv4_specific;
#if IS_ENABLED(CONFIG_IPV6)
@@ -937,20 +1022,10 @@ extern const struct inet_connection_sock_af_ops ipv6_specific;
INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
+void tcp_v6_early_demux(struct sk_buff *skb);
#endif
-static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
-{
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
- return true;
-#endif
- return false;
-}
-
/* TCP_SKB_CB reference means this can not be used from early demux */
static inline int tcp_v4_sdif(struct sk_buff *skb)
{
@@ -994,7 +1069,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
return likely(tcp_skb_can_collapse_to(to) &&
- mptcp_skb_can_collapse(to, from));
+ mptcp_skb_can_collapse(to, from) &&
+ skb_pure_zcopy_same(to, from));
}
/* Events passed to congestion control interface */
@@ -1048,7 +1124,9 @@ struct ack_sample {
struct rate_sample {
u64 prior_mstamp; /* starting timestamp for interval */
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
+ s32 delivered_ce; /* number of packets delivered w/ CE marks*/
long interval_us; /* time for tp->delivered to incr "delivered" */
u32 snd_interval_us; /* snd interval for delivered packets */
u32 rcv_interval_us; /* rcv interval for delivered packets */
@@ -1056,53 +1134,69 @@ struct rate_sample {
int losses; /* number of packets marked lost upon ACK */
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
u32 prior_in_flight; /* in flight before this ACK */
+ u32 last_end_seq; /* end_seq of most recently ACKed packet */
bool is_app_limited; /* is sample from packet with bubble in pipe? */
bool is_retrans; /* is sample from retransmission? */
bool is_ack_delayed; /* is this (likely) a delayed ACK? */
};
struct tcp_congestion_ops {
- struct list_head list;
- u32 key;
- u32 flags;
-
- /* initialize private data (optional) */
- void (*init)(struct sock *sk);
- /* cleanup private data (optional) */
- void (*release)(struct sock *sk);
+/* fast path fields are put first to fill one cache line */
/* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk);
+
/* do new cwnd calculation (required) */
void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
+
/* call before changing ca_state (optional) */
void (*set_state)(struct sock *sk, u8 new_state);
+
/* call when cwnd event occurs (optional) */
void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+
/* call when ack arrives (optional) */
void (*in_ack_event)(struct sock *sk, u32 flags);
- /* new value of cwnd after loss (required) */
- u32 (*undo_cwnd)(struct sock *sk);
+
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+
/* override sysctl_tcp_min_tso_segs */
u32 (*min_tso_segs)(struct sock *sk);
- /* returns the multiplier used in tcp_sndbuf_expand (optional) */
- u32 (*sndbuf_expand)(struct sock *sk);
+
/* call when packets are delivered to update cwnd and pacing rate,
* after all the ca_state processing. (optional)
*/
void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
+
+
+ /* new value of cwnd after loss (required) */
+ u32 (*undo_cwnd)(struct sock *sk);
+ /* returns the multiplier used in tcp_sndbuf_expand (optional) */
+ u32 (*sndbuf_expand)(struct sock *sk);
+
+/* control/slow paths put last */
/* get info for inet_diag (optional) */
size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);
- char name[TCP_CA_NAME_MAX];
- struct module *owner;
-};
+ char name[TCP_CA_NAME_MAX];
+ struct module *owner;
+ struct list_head list;
+ u32 key;
+ u32 flags;
+
+ /* initialize private data (optional) */
+ void (*init)(struct sock *sk);
+ /* cleanup private data (optional) */
+ void (*release)(struct sock *sk);
+} ____cacheline_aligned_in_smp;
int tcp_register_congestion_control(struct tcp_congestion_ops *type);
void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+int tcp_update_congestion_control(struct tcp_congestion_ops *type,
+ struct tcp_congestion_ops *old_type);
+int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
void tcp_assign_congestion_control(struct sock *sk);
void tcp_init_congestion_control(struct sock *sk);
@@ -1113,7 +1207,7 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
- bool reinit, bool cap_net_admin);
+ bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
@@ -1141,15 +1235,6 @@ static inline bool tcp_ca_needs_ecn(const struct sock *sk)
return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
}
-static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
-
- if (icsk->icsk_ca_ops->set_state)
- icsk->icsk_ca_ops->set_state(sk, ca_state);
- icsk->icsk_ca_state = ca_state;
-}
-
static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1158,6 +1243,9 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
+/* From tcp_cong.c */
+void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
+
/* From tcp_rate.c */
void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
@@ -1166,6 +1254,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
bool is_sack_reneg, struct rate_sample *rs);
void tcp_rate_check_app_limited(struct sock *sk);
+static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
+{
+ return t1 > t2 || (t1 == t2 && after(seq1, seq2));
+}
+
/* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows.
@@ -1209,9 +1302,20 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
#define TCP_INFINITE_SSTHRESH 0x7fffffff
+static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
+{
+ return tp->snd_cwnd;
+}
+
+static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
+{
+ WARN_ON_ONCE((int)val <= 0);
+ tp->snd_cwnd = val;
+}
+
static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
{
- return tp->snd_cwnd < tp->snd_ssthresh;
+ return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
}
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
@@ -1237,8 +1341,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
return tp->snd_ssthresh;
else
return max(tp->snd_ssthresh,
- ((tp->snd_cwnd >> 1) +
- (tp->snd_cwnd >> 2)));
+ ((tcp_snd_cwnd(tp) >> 1) +
+ (tcp_snd_cwnd(tp) >> 2)));
}
/* Use define here intentionally to get WARN_ON location shown at the caller */
@@ -1278,11 +1382,14 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+ if (tp->is_cwnd_limited)
+ return true;
+
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
if (tcp_in_slow_start(tp))
- return tp->snd_cwnd < 2 * tp->max_packets_out;
+ return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
- return tp->is_cwnd_limited;
+ return false;
}
/* BBR congestion control needs pacing.
@@ -1330,7 +1437,9 @@ static inline unsigned long tcp_probe0_base(const struct sock *sk)
static inline unsigned long tcp_probe0_when(const struct sock *sk,
unsigned long max_when)
{
- u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
+ u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
+ inet_csk(sk)->icsk_backoff);
+ u64 when = (u64)tcp_probe0_base(sk) << backoff;
return (unsigned long)min_t(u64, when, max_when);
}
@@ -1367,7 +1476,10 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
__skb_checksum_complete(skb);
}
-bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason);
+
+
int tcp_filter(struct sock *sk, struct sk_buff *skb);
void tcp_set_state(struct sock *sk, int state);
void tcp_done(struct sock *sk);
@@ -1387,8 +1499,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
s32 delta;
- if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
- ca_ops->cong_control)
+ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
+ tp->packets_out || ca_ops->cong_control)
return;
delta = tcp_jiffies32 - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto)
@@ -1401,13 +1513,41 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
__u32 *window_clamp, int wscale_ok,
__u8 *rcv_wscale, __u32 init_rcv_wnd);
+static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
+{
+ s64 scaled_space = (s64)space * scaling_ratio;
+
+ return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
+}
+
static inline int tcp_win_from_space(const struct sock *sk, int space)
{
- int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
+ return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
+}
+
+/* inverse of __tcp_win_from_space() */
+static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
+{
+ u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
- return tcp_adv_win_scale <= 0 ?
- (space>>(-tcp_adv_win_scale)) :
- space - (space>>tcp_adv_win_scale);
+ do_div(val, scaling_ratio);
+ return val;
+}
+
+static inline int tcp_space_from_win(const struct sock *sk, int win)
+{
+ return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
+}
+
+/* Assume a conservative default of 1200 bytes of payload per 4K page.
+ * This may be adjusted later in tcp_measure_rcv_mss().
+ */
+#define TCP_DEFAULT_SCALING_RATIO ((1200 << TCP_RMEM_TO_WIN_SCALE) / \
+ SKB_TRUESIZE(4096))
+
+static inline void tcp_scaling_ratio_init(struct sock *sk)
+{
+ tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
}
/* Note: caller must be prepared to deal with negative returns */
@@ -1423,6 +1563,26 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
+static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
+{
+ int unused_mem = sk_unused_reserved_mem(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
+ if (unused_mem)
+ tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+ tcp_win_from_space(sk, unused_mem));
+}
+
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+ __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
+}
+
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+void __tcp_cleanup_rbuf(struct sock *sk, int copied);
+
+
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
* If 87.5 % (7/8) of the space has been consumed, we want to override
* SO_RCVLOWAT constraint, since we are receiving skbs with too small
@@ -1430,12 +1590,29 @@ static inline int tcp_full_space(const struct sock *sk)
*/
static inline bool tcp_rmem_pressure(const struct sock *sk)
{
- int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
- int threshold = rcvbuf - (rcvbuf >> 3);
+ int rcvbuf, threshold;
+
+ if (tcp_under_memory_pressure(sk))
+ return true;
+
+ rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+ threshold = rcvbuf - (rcvbuf >> 3);
return atomic_read(&sk->sk_rmem_alloc) > threshold;
}
+static inline bool tcp_epollin_ready(const struct sock *sk, int target)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
+
+ if (avail <= 0)
+ return false;
+
+ return (avail >= target) || tcp_rmem_pressure(sk) ||
+ (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
+}
+
extern void tcp_openreq_init_rwin(struct request_sock *req,
const struct sock *sk_listener,
const struct dst_entry *dst);
@@ -1446,22 +1623,38 @@ void tcp_leave_memory_pressure(struct sock *sk);
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
+ int val;
- return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
+ * and do_tcp_setsockopt().
+ */
+ val = READ_ONCE(tp->keepalive_intvl);
+
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
}
static inline int keepalive_time_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
+ int val;
+
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
+ val = READ_ONCE(tp->keepalive_time);
- return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
}
static inline int keepalive_probes(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
+ int val;
- return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
+ * and do_tcp_setsockopt().
+ */
+ val = READ_ONCE(tp->keepalive_probes);
+
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
}
static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
@@ -1474,7 +1667,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
static inline int tcp_fin_time(const struct sock *sk)
{
- int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
+ int fin_timeout = tcp_sk(sk)->linger2 ? :
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
const int rto = inet_csk(sk)->icsk_rto;
if (fin_timeout < (rto << 2) - (rto >> 1))
@@ -1489,7 +1683,7 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
return true;
if (unlikely(!time_before32(ktime_get_seconds(),
- rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
+ rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
return true;
/*
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
@@ -1549,12 +1743,7 @@ static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
tp->retransmit_skb_hint = NULL;
}
-union tcp_md5_addr {
- struct in_addr a4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct in6_addr a6;
-#endif
-};
+#define tcp_md5_addr tcp_ao_addr
/* - key database */
struct tcp_md5sig_key {
@@ -1562,6 +1751,7 @@ struct tcp_md5sig_key {
u8 keylen;
u8 family; /* AF_INET or AF_INET6 */
u8 prefixlen;
+ u8 flags;
union tcp_md5_addr addr;
int l3index; /* set if key added with L3 scope */
u8 key[TCP_MD5SIG_MAXKEYLEN];
@@ -1597,38 +1787,83 @@ union tcp_md5sum_block {
#endif
};
-/* - pool: digest algorithm, hash description and scratch buffer */
-struct tcp_md5sig_pool {
- struct ahash_request *md5_req;
- void *scratch;
+/*
+ * struct tcp_sigpool - per-CPU pool of ahash_requests
+ * @scratch: per-CPU temporary area, that can be used between
+ * tcp_sigpool_start() and tcp_sigpool_end() to perform
+ * crypto request
+ * @req: pre-allocated ahash request
+ */
+struct tcp_sigpool {
+ void *scratch;
+ struct ahash_request *req;
};
+int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
+void tcp_sigpool_get(unsigned int id);
+void tcp_sigpool_release(unsigned int id);
+int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
+ const struct sk_buff *skb,
+ unsigned int header_len);
+
+/**
+ * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
+ * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
+ * @c: returned tcp_sigpool for usage (uninitialized on failure)
+ *
+ * Returns 0 on success, error otherwise.
+ */
+int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
+/**
+ * tcp_sigpool_end - enable bh and stop using tcp_sigpool
+ * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
+ */
+void tcp_sigpool_end(struct tcp_sigpool *c);
+size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
/* - functions */
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, int l3index,
- const u8 *newkey, u8 newkeylen, gfp_t gfp);
+ int family, u8 prefixlen, int l3index, u8 flags,
+ const u8 *newkey, u8 newkeylen);
+int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
+ int family, u8 prefixlen, int l3index,
+ struct tcp_md5sig_key *key);
+
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, int l3index);
+ int family, u8 prefixlen, int l3index, u8 flags);
+void tcp_clear_md5_list(struct sock *sk);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-#include <linux/jump_label.h>
-extern struct static_key_false tcp_md5_needed;
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
- int family);
+ int family, bool any_l3index);
static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr, int family)
{
- if (!static_branch_unlikely(&tcp_md5_needed))
+ if (!static_branch_unlikely(&tcp_md5_needed.key))
+ return NULL;
+ return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
+}
+
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup_any_l3index(const struct sock *sk,
+ const union tcp_md5_addr *addr, int family)
+{
+ if (!static_branch_unlikely(&tcp_md5_needed.key))
return NULL;
- return __tcp_md5_do_lookup(sk, l3index, addr, family);
+ return __tcp_md5_do_lookup(sk, 0, addr, family, true);
}
+enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int l3index, const __u8 *hash_location);
+
+
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *
@@ -1637,20 +1872,30 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index,
{
return NULL;
}
-#define tcp_twsk_md5_key(twsk) NULL
-#endif
-bool tcp_alloc_md5sig_pool(void);
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup_any_l3index(const struct sock *sk,
+ const union tcp_md5_addr *addr, int family)
+{
+ return NULL;
+}
-struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
-static inline void tcp_put_md5sig_pool(void)
+static inline enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int l3index, const __u8 *hash_location)
{
- local_bh_enable();
+ return SKB_NOT_DROPPED_YET;
}
+#define tcp_twsk_md5_key(twsk) NULL
+#endif
+
+int tcp_md5_alloc_sigpool(void);
+void tcp_md5_release_sigpool(void);
+void tcp_md5_add_sigpool(void);
+extern int tcp_md5_sigpool_id;
-int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
- unsigned int header_len);
-int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+int tcp_md5_hash_key(struct tcp_sigpool *hp,
const struct tcp_md5sig_key *key);
/* From tcp_fastopen.c */
@@ -1695,7 +1940,6 @@ struct tcp_fastopen_context {
struct rcu_head rcu;
};
-extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
void tcp_fastopen_active_disable(struct sock *sk);
bool tcp_fastopen_active_should_disable(struct sock *sk);
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
@@ -1773,11 +2017,6 @@ static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
return skb_rb_last(&sk->tcp_rtx_queue);
}
-static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
-{
- return skb_peek(&sk->sk_write_queue);
-}
-
static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
{
return skb_peek_tail(&sk->sk_write_queue);
@@ -1856,7 +2095,7 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc
{
list_del(&skb->tcp_tsorted_anchor);
tcp_rtx_queue_unlink(skb, sk);
- sk_wmem_free_skb(sk, skb);
+ tcp_wmem_free_skb(sk, skb);
}
static inline void tcp_push_pending_frames(struct sock *sk)
@@ -1916,7 +2155,7 @@ static inline bool inet_sk_transparent(const struct sock *sk)
case TCP_NEW_SYN_RECV:
return inet_rsk(inet_reqsk(sk))->no_srccheck;
}
- return inet_sk(sk)->transparent;
+ return inet_test_bit(TRANSPARENT, sk);
}
/* Determines whether this is a thin stream (which may suffer from
@@ -1945,7 +2184,6 @@ struct tcp_iter_state {
struct seq_net_private p;
enum tcp_seq_states state;
struct sock *syn_wait_sk;
- struct tcp_seq_afinfo *bpf_seq_afinfo;
int bucket, offset, sbucket, num;
loff_t last_pos;
};
@@ -1962,29 +2200,26 @@ INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff))
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
-int tcp_gro_complete(struct sk_buff *skb);
+#ifdef CONFIG_INET
+void tcp_gro_complete(struct sk_buff *skb);
+#else
+static inline void tcp_gro_complete(struct sk_buff *skb) { }
+#endif
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
- return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
-}
+ u32 val;
-/* @wake is one when sk_stream_write_space() calls us.
- * This sends EPOLLOUT only if notsent_bytes is half the limit.
- * This mimics the strategy used in sock_def_write_space().
- */
-static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- u32 notsent_bytes = READ_ONCE(tp->write_seq) -
- READ_ONCE(tp->snd_nxt);
+ val = READ_ONCE(tp->notsent_lowat);
- return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
+ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
}
+bool tcp_stream_memory_free(const struct sock *sk, int wake);
+
#ifdef CONFIG_PROC_FS
int tcp4_proc_init(void);
void tcp4_proc_exit(void);
@@ -2009,6 +2244,18 @@ struct tcp_sock_af_ops {
sockptr_t optval,
int optlen);
#endif
+#ifdef CONFIG_TCP_AO
+ int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
+ struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
+ struct sock *addr_sk,
+ int sndid, int rcvid);
+ int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
+ const struct sock *sk,
+ __be32 sisn, __be32 disn, bool send);
+ int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
+ const struct sock *sk, const struct sk_buff *skb,
+ const u8 *tkey, int hash_offset, u32 sne);
+#endif
};
struct tcp_request_sock_ops {
@@ -2021,21 +2268,30 @@ struct tcp_request_sock_ops {
const struct sock *sk,
const struct sk_buff *skb);
#endif
- void (*init_req)(struct request_sock *req,
- const struct sock *sk_listener,
- struct sk_buff *skb);
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
+ struct request_sock *req,
+ int sndid, int rcvid);
+ int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
+ int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
+ struct request_sock *req, const struct sk_buff *skb,
+ int hash_offset, u32 sne);
+#endif
#ifdef CONFIG_SYN_COOKIES
__u32 (*cookie_init_seq)(const struct sk_buff *skb,
__u16 *mss);
#endif
- struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
- const struct request_sock *req);
+ struct dst_entry *(*route_req)(const struct sock *sk,
+ struct sk_buff *skb,
+ struct flowi *fl,
+ struct request_sock *req);
u32 (*init_seq)(const struct sk_buff *skb);
u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type);
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
};
extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
@@ -2061,6 +2317,76 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
}
#endif
+struct tcp_key {
+ union {
+ struct {
+ struct tcp_ao_key *ao_key;
+ char *traffic_key;
+ u32 sne;
+ u8 rcv_next;
+ };
+ struct tcp_md5sig_key *md5_key;
+ };
+ enum {
+ TCP_KEY_NONE = 0,
+ TCP_KEY_MD5,
+ TCP_KEY_AO,
+ } type;
+};
+
+static inline void tcp_get_current_key(const struct sock *sk,
+ struct tcp_key *out)
+{
+#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
+ const struct tcp_sock *tp = tcp_sk(sk);
+#endif
+
+#ifdef CONFIG_TCP_AO
+ if (static_branch_unlikely(&tcp_ao_needed.key)) {
+ struct tcp_ao_info *ao;
+
+ ao = rcu_dereference_protected(tp->ao_info,
+ lockdep_sock_is_held(sk));
+ if (ao) {
+ out->ao_key = READ_ONCE(ao->current_key);
+ out->type = TCP_KEY_AO;
+ return;
+ }
+ }
+#endif
+#ifdef CONFIG_TCP_MD5SIG
+ if (static_branch_unlikely(&tcp_md5_needed.key) &&
+ rcu_access_pointer(tp->md5sig_info)) {
+ out->md5_key = tp->af_specific->md5_lookup(sk, sk);
+ if (out->md5_key) {
+ out->type = TCP_KEY_MD5;
+ return;
+ }
+ }
+#endif
+ out->type = TCP_KEY_NONE;
+}
+
+static inline bool tcp_key_is_md5(const struct tcp_key *key)
+{
+#ifdef CONFIG_TCP_MD5SIG
+ if (static_branch_unlikely(&tcp_md5_needed.key) &&
+ key->type == TCP_KEY_MD5)
+ return true;
+#endif
+ return false;
+}
+
+static inline bool tcp_key_is_ao(const struct tcp_key *key)
+{
+#ifdef CONFIG_TCP_AO
+ if (static_branch_unlikely(&tcp_ao_needed.key) &&
+ key->type == TCP_KEY_AO)
+ return true;
+#endif
+ return false;
+}
+
int tcpv4_offload_init(void);
void tcp_v4_init(void);
@@ -2071,12 +2397,40 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
u32 reo_wnd);
-extern void tcp_rack_mark_lost(struct sock *sk);
+extern bool tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
+/* tcp_plb.c */
+
+/*
+ * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
+ * expects cong_ratio which represents fraction of traffic that experienced
+ * congestion over a single RTT. In order to avoid floating point operations,
+ * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
+ */
+#define TCP_PLB_SCALE 8
+
+/* State for PLB (Protective Load Balancing) for a single TCP connection. */
+struct tcp_plb_state {
+ u8 consec_cong_rounds:5, /* consecutive congested rounds */
+ unused:3;
+ u32 pause_until; /* jiffies32 when PLB can resume rerouting */
+};
+
+static inline void tcp_plb_init(const struct sock *sk,
+ struct tcp_plb_state *plb)
+{
+ plb->consec_cong_rounds = 0;
+ plb->pause_until = 0;
+}
+void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
+ const int cong_ratio);
+void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
+void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
+
/* At how many usecs into the future should the RTO fire? */
static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
@@ -2154,9 +2508,13 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
u16 segs_in;
segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
- tp->segs_in += segs_in;
+
+ /* We update these fields while other threads might
+ * read them from tcp_get_info()
+ */
+ WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
if (skb->len > tcp_hdrlen(skb))
- tp->data_segs_in += segs_in;
+ WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
}
/*
@@ -2193,7 +2551,7 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
/* diagnostic */
- int (*get_info)(const struct sock *sk, struct sk_buff *skb);
+ int (*get_info)(struct sock *sk, struct sk_buff *skb);
size_t (*get_info_size)(const struct sock *sk);
/* clone ulp */
void (*clone)(const struct request_sock *req, struct sock *newsk,
@@ -2214,25 +2572,49 @@ void tcp_update_ulp(struct sock *sk, struct proto *p,
__MODULE_INFO(alias, alias_userspace, name); \
__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
+#ifdef CONFIG_NET_SOCK_MSG
struct sk_msg;
struct sk_psock;
-#ifdef CONFIG_BPF_STREAM_PARSER
-struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
+#ifdef CONFIG_BPF_SYSCALL
+int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+#endif /* CONFIG_BPF_SYSCALL */
+
+#ifdef CONFIG_INET
+void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
#else
-static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
+static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
{
}
-#endif /* CONFIG_BPF_STREAM_PARSER */
+#endif
-#ifdef CONFIG_NET_SOCK_MSG
-int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
- int flags);
-int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
- struct msghdr *msg, int len, int flags);
+int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
+ struct sk_msg *msg, u32 bytes, int flags);
#endif /* CONFIG_NET_SOCK_MSG */
+#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
+static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
+{
+}
+#endif
+
+#ifdef CONFIG_CGROUP_BPF
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+ skops->skb = skb;
+ skops->skb_data_end = skb->data + end_offset;
+}
+#else
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+}
+#endif
+
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
* program does not support the chosen operation or there is no BPF
@@ -2305,7 +2687,7 @@ static inline u32 tcp_timeout_init(struct sock *sk)
if (timeout <= 0)
timeout = TCP_TIMEOUT_INIT;
- return timeout;
+ return min_t(int, timeout, TCP_RTO_MAX);
}
static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
@@ -2363,4 +2745,116 @@ static inline u64 tcp_transmit_time(const struct sock *sk)
return 0;
}
+static inline int tcp_parse_auth_options(const struct tcphdr *th,
+ const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
+{
+ const u8 *md5_tmp, *ao_tmp;
+ int ret;
+
+ ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
+ if (ret)
+ return ret;
+
+ if (md5_hash)
+ *md5_hash = md5_tmp;
+
+ if (aoh) {
+ if (!ao_tmp)
+ *aoh = NULL;
+ else
+ *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
+ }
+
+ return 0;
+}
+
+static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
+ int family, int l3index, bool stat_inc)
+{
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info *ao_info;
+ struct tcp_ao_key *ao_key;
+
+ if (!static_branch_unlikely(&tcp_ao_needed.key))
+ return false;
+
+ ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
+ lockdep_sock_is_held(sk));
+ if (!ao_info)
+ return false;
+
+ ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
+ if (ao_info->ao_required || ao_key) {
+ if (stat_inc) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
+ atomic64_inc(&ao_info->counters.ao_required);
+ }
+ return true;
+ }
+#endif
+ return false;
+}
+
+/* Called with rcu_read_lock() */
+static inline enum skb_drop_reason
+tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
+ const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif)
+{
+ const struct tcphdr *th = tcp_hdr(skb);
+ const struct tcp_ao_hdr *aoh;
+ const __u8 *md5_location;
+ int l3index;
+
+ /* Invalid option or two times meet any of auth options */
+ if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
+ tcp_hash_fail("TCP segment has incorrect auth options set",
+ family, skb, "");
+ return SKB_DROP_REASON_TCP_AUTH_HDR;
+ }
+
+ if (req) {
+ if (tcp_rsk_used_ao(req) != !!aoh) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
+ tcp_hash_fail("TCP connection can't start/end using TCP-AO",
+ family, skb, "%s",
+ !aoh ? "missing AO" : "AO signed");
+ return SKB_DROP_REASON_TCP_AOFAILURE;
+ }
+ }
+
+ /* sdif set, means packet ingressed via a device
+ * in an L3 domain and dif is set to the l3mdev
+ */
+ l3index = sdif ? dif : 0;
+
+ /* Fast path: unsigned segments */
+ if (likely(!md5_location && !aoh)) {
+ /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
+ * for the remote peer. On TCP-AO established connection
+ * the last key is impossible to remove, so there's
+ * always at least one current_key.
+ */
+ if (tcp_ao_required(sk, saddr, family, l3index, true)) {
+ tcp_hash_fail("AO hash is required, but not found",
+ family, skb, "L3 index %d", l3index);
+ return SKB_DROP_REASON_TCP_AONOTFOUND;
+ }
+ if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+ tcp_hash_fail("MD5 Hash not found",
+ family, skb, "L3 index %d", l3index);
+ return SKB_DROP_REASON_TCP_MD5NOTFOUND;
+ }
+ return SKB_NOT_DROPPED_YET;
+ }
+
+ if (aoh)
+ return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
+
+ return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
+ l3index, md5_location);
+}
+
#endif /* _TCP_H */
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
new file mode 100644
index 000000000000..471e177362b4
--- /dev/null
+++ b/include/net/tcp_ao.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _TCP_AO_H
+#define _TCP_AO_H
+
+#define TCP_AO_KEY_ALIGN 1
+#define __tcp_ao_key_align __aligned(TCP_AO_KEY_ALIGN)
+
+union tcp_ao_addr {
+ struct in_addr a4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr a6;
+#endif
+};
+
+struct tcp_ao_hdr {
+ u8 kind;
+ u8 length;
+ u8 keyid;
+ u8 rnext_keyid;
+};
+
+struct tcp_ao_counters {
+ atomic64_t pkt_good;
+ atomic64_t pkt_bad;
+ atomic64_t key_not_found;
+ atomic64_t ao_required;
+ atomic64_t dropped_icmp;
+};
+
+struct tcp_ao_key {
+ struct hlist_node node;
+ union tcp_ao_addr addr;
+ u8 key[TCP_AO_MAXKEYLEN] __tcp_ao_key_align;
+ unsigned int tcp_sigpool_id;
+ unsigned int digest_size;
+ int l3index;
+ u8 prefixlen;
+ u8 family;
+ u8 keylen;
+ u8 keyflags;
+ u8 sndid;
+ u8 rcvid;
+ u8 maclen;
+ struct rcu_head rcu;
+ atomic64_t pkt_good;
+ atomic64_t pkt_bad;
+ u8 traffic_keys[];
+};
+
+static inline u8 *rcv_other_key(struct tcp_ao_key *key)
+{
+ return key->traffic_keys;
+}
+
+static inline u8 *snd_other_key(struct tcp_ao_key *key)
+{
+ return key->traffic_keys + key->digest_size;
+}
+
+static inline int tcp_ao_maclen(const struct tcp_ao_key *key)
+{
+ return key->maclen;
+}
+
+/* Use tcp_ao_len_aligned() for TCP header calculations */
+static inline int tcp_ao_len(const struct tcp_ao_key *key)
+{
+ return tcp_ao_maclen(key) + sizeof(struct tcp_ao_hdr);
+}
+
+static inline int tcp_ao_len_aligned(const struct tcp_ao_key *key)
+{
+ return round_up(tcp_ao_len(key), 4);
+}
+
+static inline unsigned int tcp_ao_digest_size(struct tcp_ao_key *key)
+{
+ return key->digest_size;
+}
+
+static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
+{
+ return sizeof(struct tcp_ao_key) + (key->digest_size << 1);
+}
+
+struct tcp_ao_info {
+ /* List of tcp_ao_key's */
+ struct hlist_head head;
+ /* current_key and rnext_key aren't maintained on listen sockets.
+ * Their purpose is to cache keys on established connections,
+ * saving needless lookups. Never dereference any of them from
+ * listen sockets.
+ * ::current_key may change in RX to the key that was requested by
+ * the peer, please use READ_ONCE()/WRITE_ONCE() in order to avoid
+ * load/store tearing.
+ * Do the same for ::rnext_key, if you don't hold socket lock
+ * (it's changed only by userspace request in setsockopt()).
+ */
+ struct tcp_ao_key *current_key;
+ struct tcp_ao_key *rnext_key;
+ struct tcp_ao_counters counters;
+ u32 ao_required :1,
+ accept_icmps :1,
+ __unused :30;
+ __be32 lisn;
+ __be32 risn;
+ /* Sequence Number Extension (SNE) are upper 4 bytes for SEQ,
+ * that protect TCP-AO connection from replayed old TCP segments.
+ * See RFC5925 (6.2).
+ * In order to get correct SNE, there's a helper tcp_ao_compute_sne().
+ * It needs SEQ basis to understand whereabouts are lower SEQ numbers.
+ * According to that basis vector, it can provide incremented SNE
+ * when SEQ rolls over or provide decremented SNE when there's
+ * a retransmitted segment from before-rolling over.
+ * - for request sockets such basis is rcv_isn/snt_isn, which seems
+ * good enough as it's unexpected to receive 4 Gbytes on reqsk.
+ * - for full sockets the basis is rcv_nxt/snd_una. snd_una is
+ * taken instead of snd_nxt as currently it's easier to track
+ * in tcp_snd_una_update(), rather than updating SNE in all
+ * WRITE_ONCE(tp->snd_nxt, ...)
+ * - for time-wait sockets the basis is tw_rcv_nxt/tw_snd_nxt.
+ * tw_snd_nxt is not expected to change, while tw_rcv_nxt may.
+ */
+ u32 snd_sne;
+ u32 rcv_sne;
+ refcount_t refcnt; /* Protects twsk destruction */
+ struct rcu_head rcu;
+};
+
+#ifdef CONFIG_TCP_MD5SIG
+#include <linux/jump_label.h>
+extern struct static_key_false_deferred tcp_md5_needed;
+#define static_branch_tcp_md5() static_branch_unlikely(&tcp_md5_needed.key)
+#else
+#define static_branch_tcp_md5() false
+#endif
+#ifdef CONFIG_TCP_AO
+/* TCP-AO structures and functions */
+#include <linux/jump_label.h>
+extern struct static_key_false_deferred tcp_ao_needed;
+#define static_branch_tcp_ao() static_branch_unlikely(&tcp_ao_needed.key)
+#else
+#define static_branch_tcp_ao() false
+#endif
+
+static inline bool tcp_hash_should_produce_warnings(void)
+{
+ return static_branch_tcp_md5() || static_branch_tcp_ao();
+}
+
+#define tcp_hash_fail(msg, family, skb, fmt, ...) \
+do { \
+ const struct tcphdr *th = tcp_hdr(skb); \
+ char hdr_flags[6]; \
+ char *f = hdr_flags; \
+ \
+ if (!tcp_hash_should_produce_warnings()) \
+ break; \
+ if (th->fin) \
+ *f++ = 'F'; \
+ if (th->syn) \
+ *f++ = 'S'; \
+ if (th->rst) \
+ *f++ = 'R'; \
+ if (th->psh) \
+ *f++ = 'P'; \
+ if (th->ack) \
+ *f++ = '.'; \
+ *f = 0; \
+ if ((family) == AF_INET) { \
+ net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \
+ msg, &ip_hdr(skb)->saddr, ntohs(th->source), \
+ &ip_hdr(skb)->daddr, ntohs(th->dest), \
+ hdr_flags, ##__VA_ARGS__); \
+ } else { \
+ net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \
+ msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \
+ &ipv6_hdr(skb)->daddr, ntohs(th->dest), \
+ hdr_flags, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#ifdef CONFIG_TCP_AO
+/* TCP-AO structures and functions */
+struct tcp4_ao_context {
+ __be32 saddr;
+ __be32 daddr;
+ __be16 sport;
+ __be16 dport;
+ __be32 sisn;
+ __be32 disn;
+};
+
+struct tcp6_ao_context {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ __be16 sport;
+ __be16 dport;
+ __be32 sisn;
+ __be32 disn;
+};
+
+struct tcp_sigpool;
+#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
+ TCPF_CLOSE | TCPF_CLOSE_WAIT | \
+ TCPF_LAST_ACK | TCPF_CLOSING)
+
+int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ struct tcp_ao_key *key, struct tcphdr *th,
+ __u8 *hash_location);
+int tcp_ao_hash_skb(unsigned short int family,
+ char *ao_hash, struct tcp_ao_key *key,
+ const struct sock *sk, const struct sk_buff *skb,
+ const u8 *tkey, int hash_offset, u32 sne);
+int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family,
+ sockptr_t optval, int optlen);
+struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao,
+ int sndid, int rcvid);
+int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
+ struct request_sock *req, struct sk_buff *skb,
+ int family);
+int tcp_ao_calc_traffic_key(struct tcp_ao_key *mkt, u8 *key, void *ctx,
+ unsigned int len, struct tcp_sigpool *hp);
+void tcp_ao_destroy_sock(struct sock *sk, bool twsk);
+void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp);
+bool tcp_ao_ignore_icmp(const struct sock *sk, int family, int type, int code);
+int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen);
+int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen);
+int tcp_ao_get_repair(struct sock *sk, sockptr_t optval, sockptr_t optlen);
+int tcp_ao_set_repair(struct sock *sk, sockptr_t optval, unsigned int optlen);
+enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk,
+ const struct sk_buff *skb, unsigned short int family,
+ const struct request_sock *req, int l3index,
+ const struct tcp_ao_hdr *aoh);
+u32 tcp_ao_compute_sne(u32 next_sne, u32 next_seq, u32 seq);
+struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, int l3index,
+ const union tcp_ao_addr *addr,
+ int family, int sndid, int rcvid);
+int tcp_ao_hash_hdr(unsigned short family, char *ao_hash,
+ struct tcp_ao_key *key, const u8 *tkey,
+ const union tcp_ao_addr *daddr,
+ const union tcp_ao_addr *saddr,
+ const struct tcphdr *th, u32 sne);
+int tcp_ao_prepare_reset(const struct sock *sk, struct sk_buff *skb,
+ const struct tcp_ao_hdr *aoh, int l3index, u32 seq,
+ struct tcp_ao_key **key, char **traffic_key,
+ bool *allocated_traffic_key, u8 *keyid, u32 *sne);
+
+/* ipv4 specific functions */
+int tcp_v4_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen);
+struct tcp_ao_key *tcp_v4_ao_lookup(const struct sock *sk, struct sock *addr_sk,
+ int sndid, int rcvid);
+int tcp_v4_ao_synack_hash(char *ao_hash, struct tcp_ao_key *mkt,
+ struct request_sock *req, const struct sk_buff *skb,
+ int hash_offset, u32 sne);
+int tcp_v4_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key,
+ const struct sock *sk,
+ __be32 sisn, __be32 disn, bool send);
+int tcp_v4_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key,
+ struct request_sock *req);
+struct tcp_ao_key *tcp_v4_ao_lookup_rsk(const struct sock *sk,
+ struct request_sock *req,
+ int sndid, int rcvid);
+int tcp_v4_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key,
+ const struct sock *sk, const struct sk_buff *skb,
+ const u8 *tkey, int hash_offset, u32 sne);
+/* ipv6 specific functions */
+int tcp_v6_ao_hash_pseudoheader(struct tcp_sigpool *hp,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr, int nbytes);
+int tcp_v6_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key,
+ const struct sk_buff *skb, __be32 sisn, __be32 disn);
+int tcp_v6_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key,
+ const struct sock *sk, __be32 sisn,
+ __be32 disn, bool send);
+int tcp_v6_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key,
+ struct request_sock *req);
+struct tcp_ao_key *tcp_v6_ao_lookup(const struct sock *sk,
+ struct sock *addr_sk, int sndid, int rcvid);
+struct tcp_ao_key *tcp_v6_ao_lookup_rsk(const struct sock *sk,
+ struct request_sock *req,
+ int sndid, int rcvid);
+int tcp_v6_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key,
+ const struct sock *sk, const struct sk_buff *skb,
+ const u8 *tkey, int hash_offset, u32 sne);
+int tcp_v6_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen);
+int tcp_v6_ao_synack_hash(char *ao_hash, struct tcp_ao_key *ao_key,
+ struct request_sock *req, const struct sk_buff *skb,
+ int hash_offset, u32 sne);
+void tcp_ao_established(struct sock *sk);
+void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb);
+void tcp_ao_connect_init(struct sock *sk);
+void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
+ struct request_sock *req, unsigned short int family);
+#else /* CONFIG_TCP_AO */
+
+static inline int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ struct tcp_ao_key *key, struct tcphdr *th,
+ __u8 *hash_location)
+{
+ return 0;
+}
+
+static inline void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
+ struct request_sock *req, unsigned short int family)
+{
+}
+
+static inline bool tcp_ao_ignore_icmp(const struct sock *sk, int family,
+ int type, int code)
+{
+ return false;
+}
+
+static inline enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk,
+ const struct sk_buff *skb, unsigned short int family,
+ const struct request_sock *req, int l3index,
+ const struct tcp_ao_hdr *aoh)
+{
+ return SKB_NOT_DROPPED_YET;
+}
+
+static inline struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk,
+ int l3index, const union tcp_ao_addr *addr,
+ int family, int sndid, int rcvid)
+{
+ return NULL;
+}
+
+static inline void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
+{
+}
+
+static inline void tcp_ao_established(struct sock *sk)
+{
+}
+
+static inline void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb)
+{
+}
+
+static inline void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw,
+ struct tcp_sock *tp)
+{
+}
+
+static inline void tcp_ao_connect_init(struct sock *sk)
+{
+}
+
+static inline int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline int tcp_ao_get_repair(struct sock *sk,
+ sockptr_t optval, sockptr_t optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline int tcp_ao_set_repair(struct sock *sk,
+ sockptr_t optval, unsigned int optlen)
+{
+ return -ENOPROTOOPT;
+}
+#endif
+
+#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
+int tcp_do_parse_auth_options(const struct tcphdr *th,
+ const u8 **md5_hash, const u8 **ao_hash);
+#else
+static inline int tcp_do_parse_auth_options(const struct tcphdr *th,
+ const u8 **md5_hash, const u8 **ao_hash)
+{
+ *md5_hash = NULL;
+ *ao_hash = NULL;
+ return 0;
+}
+#endif
+
+#endif /* _TCP_AO_H */
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
index cc00118acca1..d60e8148ff4c 100644
--- a/include/net/tcp_states.h
+++ b/include/net/tcp_states.h
@@ -22,6 +22,7 @@ enum {
TCP_LISTEN,
TCP_CLOSING, /* Now a valid state */
TCP_NEW_SYN_RECV,
+ TCP_BOUND_INACTIVE, /* Pseudo-state for inet_diag */
TCP_MAX_STATES /* Leave at the end! */
};
@@ -43,6 +44,7 @@ enum {
TCPF_LISTEN = (1 << TCP_LISTEN),
TCPF_CLOSING = (1 << TCP_CLOSING),
TCPF_NEW_SYN_RECV = (1 << TCP_NEW_SYN_RECV),
+ TCPF_BOUND_INACTIVE = (1 << TCP_BOUND_INACTIVE),
};
#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/tcx.h b/include/net/tcx.h
new file mode 100644
index 000000000000..04be9377785d
--- /dev/null
+++ b/include/net/tcx.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Isovalent */
+#ifndef __NET_TCX_H
+#define __NET_TCX_H
+
+#include <linux/bpf.h>
+#include <linux/bpf_mprog.h>
+
+#include <net/sch_generic.h>
+
+struct mini_Qdisc;
+
+struct tcx_entry {
+ struct mini_Qdisc __rcu *miniq;
+ struct bpf_mprog_bundle bundle;
+ bool miniq_active;
+ struct rcu_head rcu;
+};
+
+struct tcx_link {
+ struct bpf_link link;
+ struct net_device *dev;
+ u32 location;
+};
+
+static inline void tcx_set_ingress(struct sk_buff *skb, bool ingress)
+{
+#ifdef CONFIG_NET_XGRESS
+ skb->tc_at_ingress = ingress;
+#endif
+}
+
+#ifdef CONFIG_NET_XGRESS
+static inline struct tcx_entry *tcx_entry(struct bpf_mprog_entry *entry)
+{
+ struct bpf_mprog_bundle *bundle = entry->parent;
+
+ return container_of(bundle, struct tcx_entry, bundle);
+}
+
+static inline struct tcx_link *tcx_link(const struct bpf_link *link)
+{
+ return container_of(link, struct tcx_link, link);
+}
+
+void tcx_inc(void);
+void tcx_dec(void);
+
+static inline void tcx_entry_sync(void)
+{
+ /* bpf_mprog_entry got a/b swapped, therefore ensure that
+ * there are no inflight users on the old one anymore.
+ */
+ synchronize_rcu();
+}
+
+static inline void
+tcx_entry_update(struct net_device *dev, struct bpf_mprog_entry *entry,
+ bool ingress)
+{
+ ASSERT_RTNL();
+ if (ingress)
+ rcu_assign_pointer(dev->tcx_ingress, entry);
+ else
+ rcu_assign_pointer(dev->tcx_egress, entry);
+}
+
+static inline struct bpf_mprog_entry *
+tcx_entry_fetch(struct net_device *dev, bool ingress)
+{
+ ASSERT_RTNL();
+ if (ingress)
+ return rcu_dereference_rtnl(dev->tcx_ingress);
+ else
+ return rcu_dereference_rtnl(dev->tcx_egress);
+}
+
+static inline struct bpf_mprog_entry *tcx_entry_create(void)
+{
+ struct tcx_entry *tcx = kzalloc(sizeof(*tcx), GFP_KERNEL);
+
+ if (tcx) {
+ bpf_mprog_bundle_init(&tcx->bundle);
+ return &tcx->bundle.a;
+ }
+ return NULL;
+}
+
+static inline void tcx_entry_free(struct bpf_mprog_entry *entry)
+{
+ kfree_rcu(tcx_entry(entry), rcu);
+}
+
+static inline struct bpf_mprog_entry *
+tcx_entry_fetch_or_create(struct net_device *dev, bool ingress, bool *created)
+{
+ struct bpf_mprog_entry *entry = tcx_entry_fetch(dev, ingress);
+
+ *created = false;
+ if (!entry) {
+ entry = tcx_entry_create();
+ if (!entry)
+ return NULL;
+ *created = true;
+ }
+ return entry;
+}
+
+static inline void tcx_skeys_inc(bool ingress)
+{
+ tcx_inc();
+ if (ingress)
+ net_inc_ingress_queue();
+ else
+ net_inc_egress_queue();
+}
+
+static inline void tcx_skeys_dec(bool ingress)
+{
+ if (ingress)
+ net_dec_ingress_queue();
+ else
+ net_dec_egress_queue();
+ tcx_dec();
+}
+
+static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry,
+ const bool active)
+{
+ ASSERT_RTNL();
+ tcx_entry(entry)->miniq_active = active;
+}
+
+static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry)
+{
+ ASSERT_RTNL();
+ return bpf_mprog_total(entry) || tcx_entry(entry)->miniq_active;
+}
+
+static inline enum tcx_action_base tcx_action_code(struct sk_buff *skb,
+ int code)
+{
+ switch (code) {
+ case TCX_PASS:
+ skb->tc_index = qdisc_skb_cb(skb)->tc_classid;
+ fallthrough;
+ case TCX_DROP:
+ case TCX_REDIRECT:
+ return code;
+ case TCX_NEXT:
+ default:
+ return TCX_NEXT;
+ }
+}
+#endif /* CONFIG_NET_XGRESS */
+
+#if defined(CONFIG_NET_XGRESS) && defined(CONFIG_BPF_SYSCALL)
+int tcx_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int tcx_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int tcx_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
+void tcx_uninstall(struct net_device *dev, bool ingress);
+
+int tcx_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+
+static inline void dev_tcx_uninstall(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ tcx_uninstall(dev, true);
+ tcx_uninstall(dev, false);
+}
+#else
+static inline int tcx_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int tcx_link_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int tcx_prog_detach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int tcx_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline void dev_tcx_uninstall(struct net_device *dev)
+{
+}
+#endif /* CONFIG_NET_XGRESS && CONFIG_BPF_SYSCALL */
+#endif /* __NET_TCX_H */
diff --git a/include/net/tls.h b/include/net/tls.h
index e5dac7e74e79..340ad43971e4 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -39,7 +39,6 @@
#include <linux/crypto.h>
#include <linux/socket.h>
#include <linux/tcp.h>
-#include <linux/skmsg.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
@@ -50,6 +49,7 @@
#include <crypto/aead.h>
#include <uapi/linux/tls.h>
+struct tls_rec;
/* Maximum data size carried in a TLS record */
#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
@@ -59,14 +59,15 @@
#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
-#define TLS_RECORD_TYPE_DATA 0x17
-
#define TLS_AAD_SPACE_SIZE 13
-#define MAX_IV_SIZE 16
+#define TLS_MAX_IV_SIZE 16
+#define TLS_MAX_SALT_SIZE 4
+#define TLS_TAG_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
+#define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE
-/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
+/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
*
* IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
*
@@ -74,15 +75,7 @@
* Hence b0 contains (3 - 1) = 2.
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
-
-#define __TLS_INC_STATS(net, field) \
- __SNMP_INC_STATS((net)->mib.tls_statistics, field)
-#define TLS_INC_STATS(net, field) \
- SNMP_INC_STATS((net)->mib.tls_statistics, field)
-#define __TLS_DEC_STATS(net, field) \
- __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
-#define TLS_DEC_STATS(net, field) \
- SNMP_DEC_STATS((net)->mib.tls_statistics, field)
+#define TLS_SM4_CCM_IV_B0_BYTE 2
enum {
TLS_BASE,
@@ -92,37 +85,6 @@ enum {
TLS_NUM_CONFIG,
};
-/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
- * allocated or mapped for each TLS record. After encryption, the records are
- * stores in a linked list.
- */
-struct tls_rec {
- struct list_head list;
- int tx_ready;
- int tx_flags;
-
- struct sk_msg msg_plaintext;
- struct sk_msg msg_encrypted;
-
- /* AAD | msg_plaintext.sg.data | sg_tag */
- struct scatterlist sg_aead_in[2];
- /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
- struct scatterlist sg_aead_out[2];
-
- char content_type;
- struct scatterlist sg_content_type;
-
- char aad_space[TLS_AAD_SPACE_SIZE];
- u8 iv_data[MAX_IV_SIZE];
- struct aead_request aead_req;
- u8 aead_req_ctx[];
-};
-
-struct tls_msg {
- struct strp_msg rxm;
- u8 control;
-};
-
struct tx_work {
struct delayed_work work;
struct sock *sk;
@@ -135,9 +97,6 @@ struct tls_sw_context_tx {
struct tls_rec *open_rec;
struct list_head tx_list;
atomic_t encrypt_pending;
- /* protect crypto_wait with encrypt_pending */
- spinlock_t encrypt_compl_lock;
- int async_notify;
u8 async_capable:1;
#define BIT_TX_SCHEDULED 0
@@ -145,21 +104,37 @@ struct tls_sw_context_tx {
unsigned long tx_bitmask;
};
+struct tls_strparser {
+ struct sock *sk;
+
+ u32 mark : 8;
+ u32 stopped : 1;
+ u32 copy_mode : 1;
+ u32 mixed_decrypted : 1;
+ u32 msg_ready : 1;
+
+ struct strp_msg stm;
+
+ struct sk_buff *anchor;
+ struct work_struct work;
+};
+
struct tls_sw_context_rx {
struct crypto_aead *aead_recv;
struct crypto_wait async_wait;
- struct strparser strp;
struct sk_buff_head rx_list; /* list of decrypted 'data' records */
void (*saved_data_ready)(struct sock *sk);
- struct sk_buff *recv_pkt;
- u8 control;
+ u8 reader_present;
u8 async_capable:1;
- u8 decrypted:1;
+ u8 zc_capable:1;
+ u8 reader_contended:1;
+
+ struct tls_strparser strp;
+
atomic_t decrypt_pending;
- /* protect crypto_wait with decrypt_pending*/
- spinlock_t decrypt_compl_lock;
- bool async_notify;
+ struct sk_buff_head async_hold;
+ struct wait_queue_head wq;
};
struct tls_record_info {
@@ -170,6 +145,7 @@ struct tls_record_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
+#define TLS_DRIVER_STATE_SIZE_TX 16
struct tls_offload_context_tx {
struct crypto_aead *aead_send;
spinlock_t lock; /* protects records list */
@@ -181,29 +157,37 @@ struct tls_offload_context_tx {
struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
void (*sk_destruct)(struct sock *sk);
- u8 driver_state[] __aligned(8);
+ struct work_struct destruct_work;
+ struct tls_context *ctx;
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
-#define TLS_DRIVER_STATE_SIZE_TX 16
+ u8 driver_state[TLS_DRIVER_STATE_SIZE_TX] __aligned(8);
};
-#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
- (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
-
enum tls_context_flags {
- TLS_RX_SYNC_RUNNING = 0,
+ /* tls_device_down was called after the netdev went down, device state
+ * was released, and kTLS works in software, even though rx_conf is
+ * still TLS_HW (needed for transition).
+ */
+ TLS_RX_DEV_DEGRADED = 0,
/* Unlike RX where resync is driven entirely by the core in TX only
* the driver knows when things went out of sync, so we need the flag
* to be atomic.
*/
TLS_TX_SYNC_SCHED = 1,
+ /* tls_dev_del was called for the RX side, device state was released,
+ * but tls_ctx->netdev might still be kept, because TX-side driver
+ * resources might not be released yet. Used to prevent the second
+ * tls_dev_del call in tls_device_down if it happens simultaneously.
+ */
+ TLS_RX_DEV_CLOSED = 2,
};
struct cipher_context {
- char *iv;
- char *rec_seq;
+ char iv[TLS_MAX_IV_SIZE + TLS_MAX_SALT_SIZE];
+ char rec_seq[TLS_MAX_REC_SEQ_SIZE];
};
union tls_crypto_context {
@@ -211,6 +195,9 @@ union tls_crypto_context {
union {
struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
+ struct tls12_crypto_info_sm4_gcm sm4_gcm;
+ struct tls12_crypto_info_sm4_ccm sm4_ccm;
};
};
@@ -233,6 +220,8 @@ struct tls_context {
u8 tx_conf:3;
u8 rx_conf:3;
+ u8 zerocopy_sendfile:1;
+ u8 rx_no_pad:1;
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
@@ -240,7 +229,7 @@ struct tls_context {
void *priv_ctx_tx;
void *priv_ctx_rx;
- struct net_device *netdev;
+ struct net_device __rcu *netdev;
/* rw cache line */
struct cipher_context tx;
@@ -249,7 +238,7 @@ struct tls_context {
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
- bool in_tcp_sendpages;
+ bool splicing_pages;
bool pending_open_record_frags;
struct mutex tx_lock; /* protects partially_sent_* fields and
@@ -259,6 +248,7 @@ struct tls_context {
/* cache cold stuff */
struct proto *sk_proto;
+ struct sock *sk;
void (*sk_destruct)(struct sock *sk);
@@ -300,10 +290,12 @@ enum tls_offload_sync_type {
#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
struct tls_offload_resync_async {
atomic64_t req;
- u32 loglen;
+ u16 loglen;
+ u16 rcd_delta;
u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
};
+#define TLS_DRIVER_STATE_SIZE_RX 8
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
@@ -327,53 +319,13 @@ struct tls_offload_context_rx {
struct tls_offload_resync_async *resync_async;
};
};
- u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
-#define TLS_DRIVER_STATE_SIZE_RX 8
+ u8 driver_state[TLS_DRIVER_STATE_SIZE_RX] __aligned(8);
};
-#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
- (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
-
-struct tls_context *tls_ctx_create(struct sock *sk);
-void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
-void update_sk_prot(struct sock *sk, struct tls_context *ctx);
-
-int wait_on_pending_writer(struct sock *sk, long *timeo);
-int tls_sk_query(struct sock *sk, int optname, char __user *optval,
- int __user *optlen);
-int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
- unsigned int optlen);
-
-int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
-void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
-void tls_sw_strparser_done(struct tls_context *tls_ctx);
-int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
-int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
-int tls_sw_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
-void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
-void tls_sw_release_resources_tx(struct sock *sk);
-void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
-void tls_sw_free_resources_rx(struct sock *sk);
-void tls_sw_release_resources_rx(struct sock *sk);
-void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
-int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len);
-bool tls_sw_stream_read(const struct sock *sk);
-ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len, unsigned int flags);
-
-int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
-int tls_device_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
-int tls_tx_records(struct sock *sk, int flags);
-
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
@@ -387,64 +339,19 @@ static inline u32 tls_record_start_seq(struct tls_record_info *rec)
return rec->end_seq - rec->len;
}
-int tls_push_sg(struct sock *sk, struct tls_context *ctx,
- struct scatterlist *sg, u16 first_offset,
- int flags);
-int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
- int flags);
-void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
-
-static inline struct tls_msg *tls_msg(struct sk_buff *skb)
-{
- return (struct tls_msg *)strp_msg(skb);
-}
-
-static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
-{
- return !!ctx->partially_sent_record;
-}
-
-static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
-{
- return tls_ctx->pending_open_record_frags;
-}
-
-static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
-{
- struct tls_rec *rec;
-
- rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
- if (!rec)
- return false;
-
- return READ_ONCE(rec->tx_ready);
-}
-
-static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
-{
- u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
-
- switch (config) {
- case TLS_BASE:
- return TLS_CONF_BASE;
- case TLS_SW:
- return TLS_CONF_SW;
- case TLS_HW:
- return TLS_CONF_HW;
- case TLS_HW_RECORD:
- return TLS_CONF_HW_RECORD;
- }
- return 0;
-}
-
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
+struct sk_buff *
+tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
-static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
+static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb)
{
-#ifdef CONFIG_SOCK_VALIDATE_XMIT
- return sk_fullsock(sk) &&
+#ifdef CONFIG_TLS_DEVICE
+ struct sock *sk = skb->sk;
+
+ return sk && sk_fullsock(sk) &&
(smp_load_acquire(&sk->sk_validate_xmit_skb) ==
&tls_validate_xmit_skb);
#else
@@ -452,25 +359,6 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
#endif
}
-static inline void tls_err_abort(struct sock *sk, int err)
-{
- sk->sk_err = err;
- sk->sk_error_report(sk);
-}
-
-static inline bool tls_bigint_increment(unsigned char *seq, int len)
-{
- int i;
-
- for (i = len - 1; i >= 0; i--) {
- ++seq[i];
- if (seq[i] != 0)
- break;
- }
-
- return (i == -1);
-}
-
static inline struct tls_context *tls_get_ctx(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -481,81 +369,6 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
return (__force void *)icsk->icsk_ulp_data;
}
-static inline void tls_advance_record_sn(struct sock *sk,
- struct tls_prot_info *prot,
- struct cipher_context *ctx)
-{
- if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
- tls_err_abort(sk, EBADMSG);
-
- if (prot->version != TLS_1_3_VERSION)
- tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
- prot->iv_size);
-}
-
-static inline void tls_fill_prepend(struct tls_context *ctx,
- char *buf,
- size_t plaintext_len,
- unsigned char record_type,
- int version)
-{
- struct tls_prot_info *prot = &ctx->prot_info;
- size_t pkt_len, iv_size = prot->iv_size;
-
- pkt_len = plaintext_len + prot->tag_size;
- if (version != TLS_1_3_VERSION) {
- pkt_len += iv_size;
-
- memcpy(buf + TLS_NONCE_OFFSET,
- ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
- }
-
- /* we cover nonce explicit here as well, so buf should be of
- * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
- */
- buf[0] = version == TLS_1_3_VERSION ?
- TLS_RECORD_TYPE_DATA : record_type;
- /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
- buf[1] = TLS_1_2_VERSION_MINOR;
- buf[2] = TLS_1_2_VERSION_MAJOR;
- /* we can use IV for nonce explicit according to spec */
- buf[3] = pkt_len >> 8;
- buf[4] = pkt_len & 0xFF;
-}
-
-static inline void tls_make_aad(char *buf,
- size_t size,
- char *record_sequence,
- int record_sequence_size,
- unsigned char record_type,
- int version)
-{
- if (version != TLS_1_3_VERSION) {
- memcpy(buf, record_sequence, record_sequence_size);
- buf += 8;
- } else {
- size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
- }
-
- buf[0] = version == TLS_1_3_VERSION ?
- TLS_RECORD_TYPE_DATA : record_type;
- buf[1] = TLS_1_2_VERSION_MAJOR;
- buf[2] = TLS_1_2_VERSION_MINOR;
- buf[3] = size >> 8;
- buf[4] = size & 0xFF;
-}
-
-static inline void xor_iv_with_seq(int version, char *iv, char *seq)
-{
- int i;
-
- if (version == TLS_1_3_VERSION) {
- for (i = 0; i < 8; i++)
- iv[i + 4] ^= seq[i];
- }
-}
-
-
static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
const struct tls_context *tls_ctx)
{
@@ -592,16 +405,12 @@ static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
return !!tls_sw_ctx_rx(ctx);
}
-void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
-void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
-
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{
@@ -616,7 +425,6 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
{
return __tls_driver_ctx(tls_get_ctx(sk), direction);
}
-#endif
#define RESYNC_REQ BIT(0)
#define RESYNC_REQ_ASYNC BIT(1)
@@ -639,6 +447,7 @@ tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
rx_ctx->resync_async->loglen = 0;
+ rx_ctx->resync_async->rcd_delta = 0;
}
static inline void
@@ -670,35 +479,11 @@ static inline bool tls_offload_tx_resync_pending(struct sock *sk)
return ret;
}
-int __net_init tls_proc_init(struct net *net);
-void __net_exit tls_proc_fini(struct net *net);
-
-int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
- unsigned char *record_type);
-int decrypt_skb(struct sock *sk, struct sk_buff *skb,
- struct scatterlist *sgout);
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
-struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
- struct net_device *dev,
- struct sk_buff *skb);
-
-int tls_sw_fallback_init(struct sock *sk,
- struct tls_offload_context_tx *offload_ctx,
- struct tls_crypto_info *crypto_info);
-
#ifdef CONFIG_TLS_DEVICE
-void tls_device_init(void);
-void tls_device_cleanup(void);
void tls_device_sk_destruct(struct sock *sk);
-int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
-void tls_device_free_resources_tx(struct sock *sk);
-int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
-void tls_device_offload_cleanup_rx(struct sock *sk);
-void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
-int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
- struct sk_buff *skb, struct strp_msg *rxm);
static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
{
@@ -707,33 +492,5 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
return false;
return tls_get_ctx(sk)->rx_conf == TLS_HW;
}
-#else
-static inline void tls_device_init(void) {}
-static inline void tls_device_cleanup(void) {}
-
-static inline int
-tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void tls_device_free_resources_tx(struct sock *sk) {}
-
-static inline int
-tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
-static inline void
-tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
-
-static inline int
-tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
- struct sk_buff *skb, struct strp_msg *rxm)
-{
- return 0;
-}
#endif
#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/tls_prot.h b/include/net/tls_prot.h
new file mode 100644
index 000000000000..68a40756440b
--- /dev/null
+++ b/include/net/tls_prot.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2023, Oracle and/or its affiliates.
+ *
+ * TLS Protocol definitions
+ *
+ * From https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
+ */
+
+#ifndef _TLS_PROT_H
+#define _TLS_PROT_H
+
+/*
+ * TLS Record protocol: ContentType
+ */
+enum {
+ TLS_RECORD_TYPE_CHANGE_CIPHER_SPEC = 20,
+ TLS_RECORD_TYPE_ALERT = 21,
+ TLS_RECORD_TYPE_HANDSHAKE = 22,
+ TLS_RECORD_TYPE_DATA = 23,
+ TLS_RECORD_TYPE_HEARTBEAT = 24,
+ TLS_RECORD_TYPE_TLS12_CID = 25,
+ TLS_RECORD_TYPE_ACK = 26,
+};
+
+/*
+ * TLS Alert protocol: AlertLevel
+ */
+enum {
+ TLS_ALERT_LEVEL_WARNING = 1,
+ TLS_ALERT_LEVEL_FATAL = 2,
+};
+
+/*
+ * TLS Alert protocol: AlertDescription
+ */
+enum {
+ TLS_ALERT_DESC_CLOSE_NOTIFY = 0,
+ TLS_ALERT_DESC_UNEXPECTED_MESSAGE = 10,
+ TLS_ALERT_DESC_BAD_RECORD_MAC = 20,
+ TLS_ALERT_DESC_RECORD_OVERFLOW = 22,
+ TLS_ALERT_DESC_HANDSHAKE_FAILURE = 40,
+ TLS_ALERT_DESC_BAD_CERTIFICATE = 42,
+ TLS_ALERT_DESC_UNSUPPORTED_CERTIFICATE = 43,
+ TLS_ALERT_DESC_CERTIFICATE_REVOKED = 44,
+ TLS_ALERT_DESC_CERTIFICATE_EXPIRED = 45,
+ TLS_ALERT_DESC_CERTIFICATE_UNKNOWN = 46,
+ TLS_ALERT_DESC_ILLEGAL_PARAMETER = 47,
+ TLS_ALERT_DESC_UNKNOWN_CA = 48,
+ TLS_ALERT_DESC_ACCESS_DENIED = 49,
+ TLS_ALERT_DESC_DECODE_ERROR = 50,
+ TLS_ALERT_DESC_DECRYPT_ERROR = 51,
+ TLS_ALERT_DESC_TOO_MANY_CIDS_REQUESTED = 52,
+ TLS_ALERT_DESC_PROTOCOL_VERSION = 70,
+ TLS_ALERT_DESC_INSUFFICIENT_SECURITY = 71,
+ TLS_ALERT_DESC_INTERNAL_ERROR = 80,
+ TLS_ALERT_DESC_INAPPROPRIATE_FALLBACK = 86,
+ TLS_ALERT_DESC_USER_CANCELED = 90,
+ TLS_ALERT_DESC_MISSING_EXTENSION = 109,
+ TLS_ALERT_DESC_UNSUPPORTED_EXTENSION = 110,
+ TLS_ALERT_DESC_UNRECOGNIZED_NAME = 112,
+ TLS_ALERT_DESC_BAD_CERTIFICATE_STATUS_RESPONSE = 113,
+ TLS_ALERT_DESC_UNKNOWN_PSK_IDENTITY = 115,
+ TLS_ALERT_DESC_CERTIFICATE_REQUIRED = 116,
+ TLS_ALERT_DESC_NO_APPLICATION_PROTOCOL = 120,
+};
+
+#endif /* _TLS_PROT_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index da06613c9603..1a97e3f32029 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -3,6 +3,7 @@
#define _TRANSP_V6_H
#include <net/checksum.h>
+#include <net/sock.h>
/* IPv6 transport protocols */
extern struct proto rawv6_prot;
@@ -12,6 +13,7 @@ extern struct proto tcpv6_prot;
extern struct proto pingv6_prot;
struct flowi6;
+struct ipcm6_cookie;
/* extension headers */
int ipv6_exthdrs_init(void);
@@ -31,8 +33,6 @@ void udplitev6_exit(void);
int tcpv6_init(void);
void tcpv6_exit(void);
-int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
-
/* this does all the common and the specific ctl work */
void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb);
@@ -56,8 +56,6 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
-void inet6_destroy_sock(struct sock *sk);
-
#define IPV6_SEQ_DGRAM_HEADER \
" sl " \
"local_address " \
diff --git a/include/net/tso.h b/include/net/tso.h
index 62c98a9c60f1..e7e157ae0526 100644
--- a/include/net/tso.h
+++ b/include/net/tso.h
@@ -2,6 +2,7 @@
#ifndef _TSO_H
#define _TSO_H
+#include <linux/skbuff.h>
#include <net/ip.h>
#define TSO_HEADER_SIZE 256
@@ -16,7 +17,12 @@ struct tso_t {
u32 tcp_seq;
};
-int tso_count_descs(const struct sk_buff *skb);
+/* Calculate the worst case buffer count */
+static inline int tso_count_descs(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
+}
+
void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
int size, bool is_last);
void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size);
diff --git a/include/net/tun_proto.h b/include/net/tun_proto.h
index 2ea3deba4c99..7b0de7852908 100644
--- a/include/net/tun_proto.h
+++ b/include/net/tun_proto.h
@@ -1,7 +1,8 @@
#ifndef __NET_TUN_PROTO_H
#define __NET_TUN_PROTO_H
-#include <linux/kernel.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
/* One byte protocol values as defined by VXLAN-GPE and NSH. These will
* hopefully get a shared IANA registry.
diff --git a/include/net/udp.h b/include/net/udp.h
index 295d52a73598..488a6d2babcc 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/bug.h>
#include <net/inet_sock.h>
+#include <net/gso.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
@@ -95,6 +96,7 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
extern struct proto udp_prot;
extern atomic_long_t udp_memory_allocated;
+DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
/* sysctl variables for udp */
extern long sysctl_udp_mem[3];
@@ -164,34 +166,22 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
}
-typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
+typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
__be16 dport);
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
- struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
- struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
-struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
- struct udphdr *uh, struct sock *sk);
-int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
+void udp_v6_early_demux(struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
- netdev_features_t features);
+ netdev_features_t features, bool is_ipv6);
-static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
+static inline void udp_lib_init_sock(struct sock *sk)
{
- struct udphdr *uh;
- unsigned int hlen, off;
+ struct udp_sock *up = udp_sk(sk);
- off = skb_gro_offset(skb);
- hlen = off + sizeof(*uh);
- uh = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen))
- uh = skb_gro_header_slow(skb, hlen, off);
-
- return uh;
+ skb_queue_head_init(&up->reader_queue);
+ up->forward_threshold = sk->sk_rcvbuf >> 2;
+ set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
}
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
@@ -259,7 +249,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
+ return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept),
bound_dev_if, dif, sdif);
#else
return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
@@ -267,34 +257,32 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
}
/* net/ipv4/udp.c */
-void udp_destruct_sock(struct sock *sk);
+void udp_destruct_common(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
-struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
- int noblock, int *off, int *err);
+struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int *off,
+ int *err);
static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
- int noblock, int *err)
+ int *err)
{
int off = 0;
- return __skb_recv_udp(sk, flags, noblock, &off, err);
+ return __skb_recv_udp(sk, flags, &off, err);
}
int udp_v4_early_demux(struct sk_buff *skb);
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
-int udp_get_port(struct sock *sk, unsigned short snum,
- int (*saddr_cmp)(const struct sock *,
- const struct sock *));
int udp_err(struct sk_buff *, u32);
int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
+void udp_splice_eof(struct socket *sock);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
-int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int udp_ioctl(struct sock *sk, int cmd, int *karg);
int udp_init_sock(struct sock *sk);
int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
@@ -313,7 +301,7 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif, int sdif,
struct udp_table *tbl, struct sk_buff *skb);
-struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
+struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport);
struct sock *udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
@@ -324,8 +312,9 @@ struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
struct sk_buff *skb);
-struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport);
+int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
* possibly multiple cache miss on dequeue()
@@ -447,7 +436,6 @@ struct udp_seq_afinfo {
struct udp_iter_state {
struct seq_net_private p;
int bucket;
- struct udp_seq_afinfo *bpf_seq_afinfo;
};
void *udp_seq_start(struct seq_file *seq, loff_t *pos);
@@ -467,6 +455,7 @@ void udp_init(void);
DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
void udp_encap_enable(void);
+void udp_encap_disable(void);
#if IS_ENABLED(CONFIG_IPV6)
DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void);
@@ -488,8 +477,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
* CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
* packets in udp_gro_complete_segment. As does UDP GSO, verified by
* udp_send_skb. But when those packets are looped in dev_loopback_xmit
- * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
- * specific case, where PARTIAL is both correct and required.
+ * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
+ * Reset in this specific case, where PARTIAL is both correct and
+ * required.
*/
if (skb->pkt_type == PACKET_LOOPBACK)
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -511,9 +501,32 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
return segs;
}
-#ifdef CONFIG_BPF_STREAM_PARSER
+static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
+{
+ /* UDP-lite can't land here - no GRO */
+ WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
+
+ /* UDP packets generated with UDP_SEGMENT and traversing:
+ *
+ * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx)
+ *
+ * can reach an UDP socket with CHECKSUM_NONE, because
+ * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE.
+ * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will
+ * have a valid checksum, as the GRO engine validates the UDP csum
+ * before the aggregation and nobody strips such info in between.
+ * Instead of adding another check in the tunnel fastpath, we can force
+ * a valid csum after the segmentation.
+ * Additionally fixup the UDP CB.
+ */
+ UDP_SKB_CB(skb)->cscov = skb->len;
+ if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
+ skb->csum_valid = 1;
+}
+
+#ifdef CONFIG_BPF_SYSCALL
struct sk_psock;
-struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
-#endif /* BPF_STREAM_PARSER */
+int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+#endif
#endif /* _UDP_H */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 94bb7a882250..d716214fe03d 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -67,6 +67,9 @@ static inline int udp_sock_create(struct net *net,
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
struct sk_buff *skb);
+typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
+ struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
struct list_head *head,
@@ -80,6 +83,7 @@ struct udp_tunnel_sock_cfg {
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
udp_tunnel_encap_err_lookup_t encap_err_lookup;
+ udp_tunnel_encap_err_rcv_t encap_err_rcv;
udp_tunnel_encap_destroy_t encap_destroy;
udp_tunnel_gro_receive_t gro_receive;
udp_tunnel_gro_complete_t gro_complete;
@@ -129,12 +133,16 @@ void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
static inline void udp_tunnel_get_rx_info(struct net_device *dev)
{
ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
}
static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
{
ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
}
@@ -146,13 +154,30 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
- struct net_device *dev, struct in6_addr *saddr,
- struct in6_addr *daddr,
+ struct net_device *dev,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be32 label,
__be16 src_port, __be16 dst_port, bool nocheck);
void udp_tunnel_sock_release(struct socket *sock);
+struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net, int oif,
+ __be32 *saddr,
+ const struct ip_tunnel_key *key,
+ __be16 sport, __be16 dport, u8 tos,
+ struct dst_cache *dst_cache);
+struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net,
+ struct socket *sock, int oif,
+ struct in6_addr *saddr,
+ const struct ip_tunnel_key *key,
+ __be16 sport, __be16 dport, u8 dsfield,
+ struct dst_cache *dst_cache);
+
struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
__be16 flags, __be64 tunnel_id,
int md_size);
@@ -166,20 +191,16 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
}
#endif
-static inline void udp_tunnel_encap_enable(struct socket *sock)
+static inline void udp_tunnel_encap_enable(struct sock *sk)
{
- struct udp_sock *up = udp_sk(sock->sk);
-
- if (up->encap_enabled)
+ if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
return;
- up->encap_enabled = 1;
#if IS_ENABLED(CONFIG_IPV6)
- if (sock->sk->sk_family == PF_INET6)
+ if (READ_ONCE(sk->sk_family) == PF_INET6)
ipv6_stub->udpv6_encap_enable();
- else
#endif
- udp_encap_enable();
+ udp_encap_enable();
}
#define UDP_TUNNEL_NIC_MAX_TABLES 4
@@ -200,11 +221,27 @@ enum udp_tunnel_nic_info_flags {
UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
};
+struct udp_tunnel_nic;
+
+#define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2)
+
+struct udp_tunnel_nic_shared {
+ struct udp_tunnel_nic *udp_tunnel_nic_info;
+
+ struct list_head devices;
+};
+
+struct udp_tunnel_nic_shared_node {
+ struct net_device *dev;
+ struct list_head list;
+};
+
/**
* struct udp_tunnel_nic_info - driver UDP tunnel offload information
* @set_port: callback for adding a new port
* @unset_port: callback for removing a port
* @sync_table: callback for syncing the entire port table at once
+ * @shared: reference to device global state (optional)
* @flags: device flags from enum udp_tunnel_nic_info_flags
* @tables: UDP port tables this device has
* @tables.n_entries: number of entries in this table
@@ -213,6 +250,12 @@ enum udp_tunnel_nic_info_flags {
* Drivers are expected to provide either @set_port and @unset_port callbacks
* or the @sync_table callback. Callbacks are invoked with rtnl lock held.
*
+ * Devices which (misguidedly) share the UDP tunnel port table across multiple
+ * netdevs should allocate an instance of struct udp_tunnel_nic_shared and
+ * point @shared at it.
+ * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
+ * sharing a table.
+ *
* Known limitations:
* - UDP tunnel port notifications are fundamentally best-effort -
* it is likely the driver will both see skbs which use a UDP tunnel port,
@@ -234,6 +277,8 @@ struct udp_tunnel_nic_info {
/* all at once */
int (*sync_table)(struct net_device *dev, unsigned int table);
+ struct udp_tunnel_nic_shared *shared;
+
unsigned int flags;
struct udp_tunnel_nic_table_info {
@@ -299,6 +344,8 @@ udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
static inline void
udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
{
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
if (udp_tunnel_nic_ops)
udp_tunnel_nic_ops->add_port(dev, ti);
}
@@ -306,6 +353,8 @@ udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
static inline void
udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
{
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
if (udp_tunnel_nic_ops)
udp_tunnel_nic_ops->del_port(dev, ti);
}
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 9185e45b997f..786919d29f8d 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -6,6 +6,7 @@
#define _UDPLITE_H
#include <net/ip6_checksum.h>
+#include <net/udp.h>
/* UDP-Lite socket options */
#define UDPLITE_SEND_CSCOV 10 /* sender partial coverage (as sent) */
@@ -24,14 +25,6 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
}
-/* Designate sk as UDP-Lite socket */
-static inline int udplite_sk_init(struct sock *sk)
-{
- udp_init_sock(sk);
- udp_sk(sk)->pcflag = UDPLITE_BIT;
- return 0;
-}
-
/*
* Checksumming routines
*/
@@ -70,60 +63,21 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
return 0;
}
-/* Slow-path computation of checksum. Socket is locked. */
-static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
-{
- const struct udp_sock *up = udp_sk(skb->sk);
- int cscov = up->len;
- __wsum csum = 0;
-
- if (up->pcflag & UDPLITE_SEND_CC) {
- /*
- * Sender has set `partial coverage' option on UDP-Lite socket.
- * The special case "up->pcslen == 0" signifies full coverage.
- */
- if (up->pcslen < up->len) {
- if (0 < up->pcslen)
- cscov = up->pcslen;
- udp_hdr(skb)->len = htons(up->pcslen);
- }
- /*
- * NOTE: Causes for the error case `up->pcslen > up->len':
- * (i) Application error (will not be penalized).
- * (ii) Payload too big for send buffer: data is split
- * into several packets, each with its own header.
- * In this case (e.g. last segment), coverage may
- * exceed packet length.
- * Since packets with coverage length > packet length are
- * illegal, we fall back to the defaults here.
- */
- }
-
- skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
-
- skb_queue_walk(&sk->sk_write_queue, skb) {
- const int off = skb_transport_offset(skb);
- const int len = skb->len - off;
-
- csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum);
-
- if ((cscov -= len) <= 0)
- break;
- }
- return csum;
-}
-
/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
- const struct udp_sock *up = udp_sk(skb->sk);
const int off = skb_transport_offset(skb);
+ const struct sock *sk = skb->sk;
int len = skb->len - off;
- if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
- if (0 < up->pcslen)
- len = up->pcslen;
- udp_hdr(skb)->len = htons(up->pcslen);
+ if (udp_test_bit(UDPLITE_SEND_CC, sk)) {
+ u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen);
+
+ if (pcslen < len) {
+ if (pcslen > 0)
+ len = pcslen;
+ udp_hdr(skb)->len = htons(pcslen);
+ }
}
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
@@ -131,6 +85,4 @@ static inline __wsum udplite_csum(struct sk_buff *skb)
}
void udplite4_register(void);
-int udplite_get_port(struct sock *sk, unsigned short snum,
- int (*scmp)(const struct sock *, const struct sock *));
#endif /* _UDPLITE_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 3a41627cbdfe..33ba6fc151cf 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -3,6 +3,7 @@
#define __NET_VXLAN_H 1
#include <linux/if_vlan.h>
+#include <linux/rhashtable-types.h>
#include <net/udp_tunnel.h>
#include <net/dst_metadata.h>
#include <net/rtnetlink.h>
@@ -10,6 +11,7 @@
#include <net/nexthop.h>
#define IANA_VXLAN_UDP_PORT 4789
+#define IANA_VXLAN_GPE_UDP_PORT 4790
/* VXLAN protocol (RFC 7348) header:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -121,6 +123,9 @@ struct vxlanhdr_gbp {
#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
#define VXLAN_GBP_ID_MASK (0xFFFF)
+#define VXLAN_GBP_MASK (VXLAN_GBP_DONT_LEARN | VXLAN_GBP_POLICY_APPLIED | \
+ VXLAN_GBP_ID_MASK)
+
/*
* VXLAN Generic Protocol Extension (VXLAN_F_GPE):
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -205,22 +210,48 @@ struct vxlan_rdst {
};
struct vxlan_config {
- union vxlan_addr remote_ip;
- union vxlan_addr saddr;
- __be32 vni;
- int remote_ifindex;
- int mtu;
- __be16 dst_port;
- u16 port_min;
- u16 port_max;
- u8 tos;
- u8 ttl;
- __be32 label;
- u32 flags;
- unsigned long age_interval;
- unsigned int addrmax;
- bool no_share;
- enum ifla_vxlan_df df;
+ union vxlan_addr remote_ip;
+ union vxlan_addr saddr;
+ __be32 vni;
+ int remote_ifindex;
+ int mtu;
+ __be16 dst_port;
+ u16 port_min;
+ u16 port_max;
+ u8 tos;
+ u8 ttl;
+ __be32 label;
+ enum ifla_vxlan_label_policy label_policy;
+ u32 flags;
+ unsigned long age_interval;
+ unsigned int addrmax;
+ bool no_share;
+ enum ifla_vxlan_df df;
+};
+
+enum {
+ VXLAN_VNI_STATS_RX,
+ VXLAN_VNI_STATS_RX_DROPS,
+ VXLAN_VNI_STATS_RX_ERRORS,
+ VXLAN_VNI_STATS_TX,
+ VXLAN_VNI_STATS_TX_DROPS,
+ VXLAN_VNI_STATS_TX_ERRORS,
+};
+
+struct vxlan_vni_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+ u64 tx_errors;
+};
+
+struct vxlan_vni_stats_pcpu {
+ struct vxlan_vni_stats stats;
+ struct u64_stats_sync syncp;
};
struct vxlan_dev_node {
@@ -228,6 +259,26 @@ struct vxlan_dev_node {
struct vxlan_dev *vxlan;
};
+struct vxlan_vni_node {
+ struct rhash_head vnode;
+ struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */
+#endif
+ struct list_head vlist;
+ __be32 vni;
+ union vxlan_addr remote_ip; /* default remote ip for this vni */
+ struct vxlan_vni_stats_pcpu __percpu *stats;
+
+ struct rcu_head rcu;
+};
+
+struct vxlan_vni_group {
+ struct rhashtable vni_hash;
+ struct list_head vni_list;
+ u32 num_vnis;
+};
+
/* Pseudo network device */
struct vxlan_dev {
struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
@@ -250,7 +301,13 @@ struct vxlan_dev {
struct vxlan_config cfg;
+ struct vxlan_vni_group __rcu *vnigrp;
+
struct hlist_head fdb_head[FDB_HASH_SIZE];
+
+ struct rhashtable mdb_tbl;
+ struct hlist_head mdb_list;
+ unsigned int mdb_seq;
};
#define VXLAN_F_LEARN 0x01
@@ -270,6 +327,9 @@ struct vxlan_dev {
#define VXLAN_F_GPE 0x4000
#define VXLAN_F_IPV6_LINKLOCAL 0x8000
#define VXLAN_F_TTL_INHERIT 0x10000
+#define VXLAN_F_VNIFILTER 0x20000
+#define VXLAN_F_MDB 0x40000
+#define VXLAN_F_LOCALBYPASS 0x80000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -279,7 +339,8 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_REMCSUM_RX | \
VXLAN_F_REMCSUM_NOPARTIAL | \
- VXLAN_F_COLLECT_METADATA)
+ VXLAN_F_COLLECT_METADATA | \
+ VXLAN_F_VNIFILTER)
/* Flags that can be set together with VXLAN_F_GPE. */
#define VXLAN_F_ALLOWED_GPE (VXLAN_F_GPE | \
@@ -288,7 +349,9 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM_TX | \
VXLAN_F_UDP_ZERO_CSUM6_TX | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
- VXLAN_F_COLLECT_METADATA)
+ VXLAN_F_COLLECT_METADATA | \
+ VXLAN_F_VNIFILTER | \
+ VXLAN_F_LOCALBYPASS)
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
@@ -324,10 +387,15 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
return features;
}
-/* IP header + UDP + VXLAN + Ethernet header */
-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
-/* IPv6 header + UDP + VXLAN + Ethernet header */
-#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+static inline int vxlan_headroom(u32 flags)
+{
+ /* VXLAN: IP4/6 header + UDP + VXLAN + Ethernet header */
+ /* VXLAN-GPE: IP4/6 header + UDP + VXLAN */
+ return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr) +
+ (flags & VXLAN_F_GPE ? 0 : ETH_HLEN);
+}
static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
{
@@ -489,12 +557,12 @@ static inline void vxlan_flag_attr_error(int attrtype,
}
static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh,
- int hash,
+ u32 hash,
struct vxlan_rdst *rdst)
{
struct fib_nh_common *nhc;
- nhc = nexthop_path_fdb_result(nh, hash);
+ nhc = nexthop_path_fdb_result(nh, hash >> 1);
if (unlikely(!nhc))
return false;
@@ -512,4 +580,23 @@ static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh,
return true;
}
+static inline void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, const struct vxlan_metadata *md)
+{
+ struct vxlanhdr_gbp *gbp;
+
+ if (!md->gbp)
+ return;
+
+ gbp = (struct vxlanhdr_gbp *)vxh;
+ vxh->vx_flags |= VXLAN_HF_GBP;
+
+ if (md->gbp & VXLAN_GBP_DONT_LEARN)
+ gbp->dont_learn = 1;
+
+ if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
+ gbp->policy_applied = 1;
+
+ gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
+}
+
#endif
diff --git a/include/net/wimax.h b/include/net/wimax.h
deleted file mode 100644
index f6e31d2f47aa..000000000000
--- a/include/net/wimax.h
+++ /dev/null
@@ -1,503 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Linux WiMAX
- * Kernel space API for accessing WiMAX devices
- *
- * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * The WiMAX stack provides an API for controlling and managing the
- * system's WiMAX devices. This API affects the control plane; the
- * data plane is accessed via the network stack (netdev).
- *
- * Parts of the WiMAX stack API and notifications are exported to
- * user space via Generic Netlink. In user space, libwimax (part of
- * the wimax-tools package) provides a shim layer for accessing those
- * calls.
- *
- * The API is standarized for all WiMAX devices and different drivers
- * implement the backend support for it. However, device-specific
- * messaging pipes are provided that can be used to issue commands and
- * receive notifications in free form.
- *
- * Currently the messaging pipes are the only means of control as it
- * is not known (due to the lack of more devices in the market) what
- * will be a good abstraction layer. Expect this to change as more
- * devices show in the market. This API is designed to be growable in
- * order to address this problem.
- *
- * USAGE
- *
- * Embed a `struct wimax_dev` at the beginning of the device's
- * private structure, initialize and register it. For details, see
- * `struct wimax_dev`s documentation.
- *
- * Once this is done, wimax-tools's libwimaxll can be used to
- * communicate with the driver from user space. You user space
- * application does not have to forcibily use libwimaxll and can talk
- * the generic netlink protocol directly if desired.
- *
- * Remember this is a very low level API that will to provide all of
- * WiMAX features. Other daemons and services running in user space
- * are the expected clients of it. They offer a higher level API that
- * applications should use (an example of this is the Intel's WiMAX
- * Network Service for the i2400m).
- *
- * DESIGN
- *
- * Although not set on final stone, this very basic interface is
- * mostly completed. Remember this is meant to grow as new common
- * operations are decided upon. New operations will be added to the
- * interface, intent being on keeping backwards compatibility as much
- * as possible.
- *
- * This layer implements a set of calls to control a WiMAX device,
- * exposing a frontend to the rest of the kernel and user space (via
- * generic netlink) and a backend implementation in the driver through
- * function pointers.
- *
- * WiMAX devices have a state, and a kernel-only API allows the
- * drivers to manipulate that state. State transitions are atomic, and
- * only some of them are allowed (see `enum wimax_st`).
- *
- * Most API calls will set the state automatically; in most cases
- * drivers have to only report state changes due to external
- * conditions.
- *
- * All API operations are 'atomic', serialized through a mutex in the
- * `struct wimax_dev`.
- *
- * EXPORTING TO USER SPACE THROUGH GENERIC NETLINK
- *
- * The API is exported to user space using generic netlink (other
- * methods can be added as needed).
- *
- * There is a Generic Netlink Family named "WiMAX", where interfaces
- * supporting the WiMAX interface receive commands and broadcast their
- * signals over a multicast group named "msg".
- *
- * Mapping to the source/destination interface is done by an interface
- * index attribute.
- *
- * For user-to-kernel traffic (commands) we use a function call
- * marshalling mechanism, where a message X with attributes A, B, C
- * sent from user space to kernel space means executing the WiMAX API
- * call wimax_X(A, B, C), sending the results back as a message.
- *
- * Kernel-to-user (notifications or signals) communication is sent
- * over multicast groups. This allows to have multiple applications
- * monitoring them.
- *
- * Each command/signal gets assigned it's own attribute policy. This
- * way the validator will verify that all the attributes in there are
- * only the ones that should be for each command/signal. Thing of an
- * attribute mapping to a type+argumentname for each command/signal.
- *
- * If we had a single policy for *all* commands/signals, after running
- * the validator we'd have to check "does this attribute belong in
- * here"? for each one. It can be done manually, but it's just easier
- * to have the validator do that job with multiple policies. As well,
- * it makes it easier to later expand each command/signal signature
- * without affecting others and keeping the namespace more or less
- * sane. Not that it is too complicated, but it makes it even easier.
- *
- * No state information is maintained in the kernel for each user
- * space connection (the connection is stateless).
- *
- * TESTING FOR THE INTERFACE AND VERSIONING
- *
- * If network interface X is a WiMAX device, there will be a Generic
- * Netlink family named "WiMAX X" and the device will present a
- * "wimax" directory in it's network sysfs directory
- * (/sys/class/net/DEVICE/wimax) [used by HAL].
- *
- * The inexistence of any of these means the device does not support
- * this WiMAX API.
- *
- * By querying the generic netlink controller, versioning information
- * and the multicast groups available can be found. Applications using
- * the interface can either rely on that or use the generic netlink
- * controller to figure out which generic netlink commands/signals are
- * supported.
- *
- * NOTE: this versioning is a last resort to avoid hard
- * incompatibilities. It is the intention of the design of this
- * stack not to introduce backward incompatible changes.
- *
- * The version code has to fit in one byte (restrictions imposed by
- * generic netlink); we use `version / 10` for the major version and
- * `version % 10` for the minor. This gives 9 minors for each major
- * and 25 majors.
- *
- * The version change protocol is as follow:
- *
- * - Major versions: needs to be increased if an existing message/API
- * call is changed or removed. Doesn't need to be changed if a new
- * message is added.
- *
- * - Minor version: needs to be increased if new messages/API calls are
- * being added or some other consideration that doesn't impact the
- * user-kernel interface too much (like some kind of bug fix) and
- * that is kind of left up in the air to common sense.
- *
- * User space code should not try to work if the major version it was
- * compiled for differs from what the kernel offers. As well, if the
- * minor version of the kernel interface is lower than the one user
- * space is expecting (the one it was compiled for), the kernel
- * might be missing API calls; user space shall be ready to handle
- * said condition. Use the generic netlink controller operations to
- * find which ones are supported and which not.
- *
- * libwimaxll:wimaxll_open() takes care of checking versions.
- *
- * THE OPERATIONS:
- *
- * Each operation is defined in its on file (drivers/net/wimax/op-*.c)
- * for clarity. The parts needed for an operation are:
- *
- * - a function pointer in `struct wimax_dev`: optional, as the
- * operation might be implemented by the stack and not by the
- * driver.
- *
- * All function pointers are named wimax_dev->op_*(), and drivers
- * must implement them except where noted otherwise.
- *
- * - When exported to user space, a `struct nla_policy` to define the
- * attributes of the generic netlink command and a `struct genl_ops`
- * to define the operation.
- *
- * All the declarations for the operation codes (WIMAX_GNL_OP_<NAME>)
- * and generic netlink attributes (WIMAX_GNL_<NAME>_*) are declared in
- * include/linux/wimax.h; this file is intended to be cloned by user
- * space to gain access to those declarations.
- *
- * A few caveats to remember:
- *
- * - Need to define attribute numbers starting in 1; otherwise it
- * fails.
- *
- * - the `struct genl_family` requires a maximum attribute id; when
- * defining the `struct nla_policy` for each message, it has to have
- * an array size of WIMAX_GNL_ATTR_MAX+1.
- *
- * The op_*() function pointers will not be called if the wimax_dev is
- * in a state <= %WIMAX_ST_UNINITIALIZED. The exception is:
- *
- * - op_reset: can be called at any time after wimax_dev_add() has
- * been called.
- *
- * THE PIPE INTERFACE:
- *
- * This interface is kept intentionally simple. The driver can send
- * and receive free-form messages to/from user space through a
- * pipe. See drivers/net/wimax/op-msg.c for details.
- *
- * The kernel-to-user messages are sent with
- * wimax_msg(). user-to-kernel messages are delivered via
- * wimax_dev->op_msg_from_user().
- *
- * RFKILL:
- *
- * RFKILL support is built into the wimax_dev layer; the driver just
- * needs to call wimax_report_rfkill_{hw,sw}() to inform of changes in
- * the hardware or software RF kill switches. When the stack wants to
- * turn the radio off, it will call wimax_dev->op_rfkill_sw_toggle(),
- * which the driver implements.
- *
- * User space can set the software RF Kill switch by calling
- * wimax_rfkill().
- *
- * The code for now only supports devices that don't require polling;
- * If the device needs to be polled, create a self-rearming delayed
- * work struct for polling or look into adding polled support to the
- * WiMAX stack.
- *
- * When initializing the hardware (_probe), after calling
- * wimax_dev_add(), query the device for it's RF Kill switches status
- * and feed it back to the WiMAX stack using
- * wimax_report_rfkill_{hw,sw}(). If any switch is missing, always
- * report it as ON.
- *
- * NOTE: the wimax stack uses an inverted terminology to that of the
- * RFKILL subsystem:
- *
- * - ON: radio is ON, RFKILL is DISABLED or OFF.
- * - OFF: radio is OFF, RFKILL is ENABLED or ON.
- *
- * MISCELLANEOUS OPS:
- *
- * wimax_reset() can be used to reset the device to power on state; by
- * default it issues a warm reset that maintains the same device
- * node. If that is not possible, it falls back to a cold reset
- * (device reconnect). The driver implements the backend to this
- * through wimax_dev->op_reset().
- */
-
-#ifndef __NET__WIMAX_H__
-#define __NET__WIMAX_H__
-
-#include <linux/wimax.h>
-#include <net/genetlink.h>
-#include <linux/netdevice.h>
-
-struct net_device;
-struct genl_info;
-struct wimax_dev;
-
-/**
- * struct wimax_dev - Generic WiMAX device
- *
- * @net_dev: [fill] Pointer to the &struct net_device this WiMAX
- * device implements.
- *
- * @op_msg_from_user: [fill] Driver-specific operation to
- * handle a raw message from user space to the driver. The
- * driver can send messages to user space using with
- * wimax_msg_to_user().
- *
- * @op_rfkill_sw_toggle: [fill] Driver-specific operation to act on
- * userspace (or any other agent) requesting the WiMAX device to
- * change the RF Kill software switch (WIMAX_RF_ON or
- * WIMAX_RF_OFF).
- * If such hardware support is not present, it is assumed the
- * radio cannot be switched off and it is always on (and the stack
- * will error out when trying to switch it off). In such case,
- * this function pointer can be left as NULL.
- *
- * @op_reset: [fill] Driver specific operation to reset the
- * device.
- * This operation should always attempt first a warm reset that
- * does not disconnect the device from the bus and return 0.
- * If that fails, it should resort to some sort of cold or bus
- * reset (even if it implies a bus disconnection and device
- * disappearance). In that case, -ENODEV should be returned to
- * indicate the device is gone.
- * This operation has to be synchronous, and return only when the
- * reset is complete. In case of having had to resort to bus/cold
- * reset implying a device disconnection, the call is allowed to
- * return immediately.
- * NOTE: wimax_dev->mutex is NOT locked when this op is being
- * called; however, wimax_dev->mutex_reset IS locked to ensure
- * serialization of calls to wimax_reset().
- * See wimax_reset()'s documentation.
- *
- * @name: [fill] A way to identify this device. We need to register a
- * name with many subsystems (rfkill, workqueue creation, etc).
- * We can't use the network device name as that
- * might change and in some instances we don't know it yet (until
- * we don't call register_netdev()). So we generate an unique one
- * using the driver name and device bus id, place it here and use
- * it across the board. Recommended naming:
- * DRIVERNAME-BUSNAME:BUSID (dev->bus->name, dev->bus_id).
- *
- * @id_table_node: [private] link to the list of wimax devices kept by
- * id-table.c. Protected by it's own spinlock.
- *
- * @mutex: [private] Serializes all concurrent access and execution of
- * operations.
- *
- * @mutex_reset: [private] Serializes reset operations. Needs to be a
- * different mutex because as part of the reset operation, the
- * driver has to call back into the stack to do things such as
- * state change, that require wimax_dev->mutex.
- *
- * @state: [private] Current state of the WiMAX device.
- *
- * @rfkill: [private] integration into the RF-Kill infrastructure.
- *
- * @rf_sw: [private] State of the software radio switch (OFF/ON)
- *
- * @rf_hw: [private] State of the hardware radio switch (OFF/ON)
- *
- * @debugfs_dentry: [private] Used to hook up a debugfs entry. This
- * shows up in the debugfs root as wimax\:DEVICENAME.
- *
- * Description:
- * This structure defines a common interface to access all WiMAX
- * devices from different vendors and provides a common API as well as
- * a free-form device-specific messaging channel.
- *
- * Usage:
- * 1. Embed a &struct wimax_dev at *the beginning* the network
- * device structure so that netdev_priv() points to it.
- *
- * 2. memset() it to zero
- *
- * 3. Initialize with wimax_dev_init(). This will leave the WiMAX
- * device in the %__WIMAX_ST_NULL state.
- *
- * 4. Fill all the fields marked with [fill]; once called
- * wimax_dev_add(), those fields CANNOT be modified.
- *
- * 5. Call wimax_dev_add() *after* registering the network
- * device. This will leave the WiMAX device in the %WIMAX_ST_DOWN
- * state.
- * Protect the driver's net_device->open() against succeeding if
- * the wimax device state is lower than %WIMAX_ST_DOWN.
- *
- * 6. Select when the device is going to be turned on/initialized;
- * for example, it could be initialized on 'ifconfig up' (when the
- * netdev op 'open()' is called on the driver).
- *
- * When the device is initialized (at `ifconfig up` time, or right
- * after calling wimax_dev_add() from _probe(), make sure the
- * following steps are taken
- *
- * a. Move the device to %WIMAX_ST_UNINITIALIZED. This is needed so
- * some API calls that shouldn't work until the device is ready
- * can be blocked.
- *
- * b. Initialize the device. Make sure to turn the SW radio switch
- * off and move the device to state %WIMAX_ST_RADIO_OFF when
- * done. When just initialized, a device should be left in RADIO
- * OFF state until user space devices to turn it on.
- *
- * c. Query the device for the state of the hardware rfkill switch
- * and call wimax_rfkill_report_hw() and wimax_rfkill_report_sw()
- * as needed. See below.
- *
- * wimax_dev_rm() undoes before unregistering the network device. Once
- * wimax_dev_add() is called, the driver can get called on the
- * wimax_dev->op_* function pointers
- *
- * CONCURRENCY:
- *
- * The stack provides a mutex for each device that will disallow API
- * calls happening concurrently; thus, op calls into the driver
- * through the wimax_dev->op*() function pointers will always be
- * serialized and *never* concurrent.
- *
- * For locking, take wimax_dev->mutex is taken; (most) operations in
- * the API have to check for wimax_dev_is_ready() to return 0 before
- * continuing (this is done internally).
- *
- * REFERENCE COUNTING:
- *
- * The WiMAX device is reference counted by the associated network
- * device. The only operation that can be used to reference the device
- * is wimax_dev_get_by_genl_info(), and the reference it acquires has
- * to be released with dev_put(wimax_dev->net_dev).
- *
- * RFKILL:
- *
- * At startup, both HW and SW radio switchess are assumed to be off.
- *
- * At initialization time [after calling wimax_dev_add()], have the
- * driver query the device for the status of the software and hardware
- * RF kill switches and call wimax_report_rfkill_hw() and
- * wimax_rfkill_report_sw() to indicate their state. If any is
- * missing, just call it to indicate it is ON (radio always on).
- *
- * Whenever the driver detects a change in the state of the RF kill
- * switches, it should call wimax_report_rfkill_hw() or
- * wimax_report_rfkill_sw() to report it to the stack.
- */
-struct wimax_dev {
- struct net_device *net_dev;
- struct list_head id_table_node;
- struct mutex mutex; /* Protects all members and API calls */
- struct mutex mutex_reset;
- enum wimax_st state;
-
- int (*op_msg_from_user)(struct wimax_dev *wimax_dev,
- const char *,
- const void *, size_t,
- const struct genl_info *info);
- int (*op_rfkill_sw_toggle)(struct wimax_dev *wimax_dev,
- enum wimax_rf_state);
- int (*op_reset)(struct wimax_dev *wimax_dev);
-
- struct rfkill *rfkill;
- unsigned int rf_hw;
- unsigned int rf_sw;
- char name[32];
-
- struct dentry *debugfs_dentry;
-};
-
-
-
-/*
- * WiMAX stack public API for device drivers
- * -----------------------------------------
- *
- * These functions are not exported to user space.
- */
-void wimax_dev_init(struct wimax_dev *);
-int wimax_dev_add(struct wimax_dev *, struct net_device *);
-void wimax_dev_rm(struct wimax_dev *);
-
-static inline
-struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
-{
- return netdev_priv(net_dev);
-}
-
-static inline
-struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev)
-{
- return wimax_dev->net_dev->dev.parent;
-}
-
-void wimax_state_change(struct wimax_dev *, enum wimax_st);
-enum wimax_st wimax_state_get(struct wimax_dev *);
-
-/*
- * Radio Switch state reporting.
- *
- * enum wimax_rf_state is declared in linux/wimax.h so the exports
- * to user space can use it.
- */
-void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
-void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
-
-
-/*
- * Free-form messaging to/from user space
- *
- * Sending a message:
- *
- * wimax_msg(wimax_dev, pipe_name, buf, buf_size, GFP_KERNEL);
- *
- * Broken up:
- *
- * skb = wimax_msg_alloc(wimax_dev, pipe_name, buf_size, GFP_KERNEL);
- * ...fill up skb...
- * wimax_msg_send(wimax_dev, pipe_name, skb);
- *
- * Be sure not to modify skb->data in the middle (ie: don't use
- * skb_push()/skb_pull()/skb_reserve() on the skb).
- *
- * "pipe_name" is any string, that can be interpreted as the name of
- * the pipe or recipient; the interpretation of it is driver
- * specific, so the recipient can multiplex it as wished. It can be
- * NULL, it won't be used - an example is using a "diagnostics" tag to
- * send diagnostics information that a device-specific diagnostics
- * tool would be interested in.
- */
-struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *,
- size_t, gfp_t);
-int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
-int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t);
-
-const void *wimax_msg_data_len(struct sk_buff *, size_t *);
-const void *wimax_msg_data(struct sk_buff *);
-ssize_t wimax_msg_len(struct sk_buff *);
-
-
-/*
- * WiMAX stack user space API
- * --------------------------
- *
- * This API is what gets exported to user space for general
- * operations. As well, they can be called from within the kernel,
- * (with a properly referenced `struct wimax_dev`).
- *
- * Properly referenced means: the 'struct net_device' that embeds the
- * device's control structure and (as such) the 'struct wimax_dev' is
- * referenced by the caller.
- */
-int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
-int wimax_reset(struct wimax_dev *);
-
-#endif /* #ifndef __NET__WIMAX_H__ */
diff --git a/include/net/x25.h b/include/net/x25.h
index d7d6c2b4ffa7..597eb53c471e 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -177,10 +177,7 @@ struct x25_forward {
atomic_t refcnt;
};
-static inline struct x25_sock *x25_sk(const struct sock *sk)
-{
- return (struct x25_sock *)sk;
-}
+#define x25_sk(ptr) container_of_const(ptr, struct x25_sock, sk)
/* af_x25.c */
extern int sysctl_x25_restart_request_timeout;
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 3814fb631d52..e6770dd40c91 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -6,6 +6,9 @@
#ifndef __LINUX_NET_XDP_H__
#define __LINUX_NET_XDP_H__
+#include <linux/bitfield.h>
+#include <linux/filter.h>
+#include <linux/netdevice.h>
#include <linux/skbuff.h> /* skb_shared_info */
/**
@@ -13,15 +16,15 @@
*
* The XDP RX-queue info (xdp_rxq_info) is associated with the driver
* level RX-ring queues. It is information that is specific to how
- * the driver have configured a given RX-ring queue.
+ * the driver has configured a given RX-ring queue.
*
- * Each xdp_buff frame received in the driver carry a (pointer)
+ * Each xdp_buff frame received in the driver carries a (pointer)
* reference to this xdp_rxq_info structure. This provides the XDP
* data-path read-access to RX-info for both kernel and bpf-side
* (limited subset).
*
* For now, direct access is only safe while running in NAPI/softirq
- * context. Contents is read-mostly and must not be updated during
+ * context. Contents are read-mostly and must not be updated during
* driver NAPI/softirq poll.
*
* The driver usage API is a register and unregister API.
@@ -29,9 +32,9 @@
* The struct is not directly tied to the XDP prog. A new XDP prog
* can be attached as long as it doesn't change the underlying
* RX-ring. If the RX-ring does change significantly, the NIC driver
- * naturally need to stop the RX-ring before purging and reallocating
- * memory. In that process the driver MUST call unregistor (which
- * also apply for driver shutdown and unload). The register API is
+ * naturally needs to stop the RX-ring before purging and reallocating
+ * memory. In that process the driver MUST call unregister (which
+ * also applies for driver shutdown and unload). The register API is
* also mandatory during RX-ring setup.
*/
@@ -59,12 +62,21 @@ struct xdp_rxq_info {
u32 queue_index;
u32 reg_state;
struct xdp_mem_info mem;
+ unsigned int napi_id;
+ u32 frag_size;
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
struct xdp_txq_info {
struct net_device *dev;
};
+enum xdp_buff_flags {
+ XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */
+ XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under
+ * pressure
+ */
+};
+
struct xdp_buff {
void *data;
void *data_end;
@@ -73,8 +85,54 @@ struct xdp_buff {
struct xdp_rxq_info *rxq;
struct xdp_txq_info *txq;
u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
+ u32 flags; /* supported values defined in xdp_buff_flags */
};
+static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
+{
+ return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
+}
+
+static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
+{
+ xdp->flags |= XDP_FLAGS_HAS_FRAGS;
+}
+
+static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
+{
+ xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
+}
+
+static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp)
+{
+ return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+}
+
+static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
+{
+ xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
+}
+
+static __always_inline void
+xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
+{
+ xdp->frame_sz = frame_sz;
+ xdp->rxq = rxq;
+ xdp->flags = 0;
+}
+
+static __always_inline void
+xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
+ int headroom, int data_len, const bool meta_valid)
+{
+ unsigned char *data = hard_start + headroom;
+
+ xdp->data_hard_start = hard_start;
+ xdp->data = data;
+ xdp->data_end = data + data_len;
+ xdp->data_meta = meta_valid ? data : data + 1;
+}
+
/* Reserve memory area at end-of data area.
*
* This macro reserves tailroom in the XDP buffer by limiting the
@@ -91,19 +149,56 @@ xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
return (struct skb_shared_info *)xdp_data_hard_end(xdp);
}
+static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp)
+{
+ unsigned int len = xdp->data_end - xdp->data;
+ struct skb_shared_info *sinfo;
+
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ len += sinfo->xdp_frags_size;
+out:
+ return len;
+}
+
struct xdp_frame {
void *data;
u16 len;
u16 headroom;
- u32 metasize:8;
- u32 frame_sz:24;
+ u32 metasize; /* uses lower 8-bits */
/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
* while mem info is valid on remote CPU.
*/
struct xdp_mem_info mem;
struct net_device *dev_rx; /* used by cpumap */
+ u32 frame_sz;
+ u32 flags; /* supported values defined in xdp_buff_flags */
};
+static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
+{
+ return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
+}
+
+static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame)
+{
+ return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+}
+
+#define XDP_BULK_QUEUE_SIZE 16
+struct xdp_frame_bulk {
+ int count;
+ void *xa;
+ void *q[XDP_BULK_QUEUE_SIZE];
+};
+
+static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
+{
+ /* bq->count will be zero'ed when bq->xa gets updated */
+ bq->xa = NULL;
+}
static inline struct skb_shared_info *
xdp_get_shared_info_from_frame(struct xdp_frame *frame)
@@ -127,11 +222,31 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame)
frame->dev_rx = NULL;
}
+static inline void
+xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
+ unsigned int size, unsigned int truesize,
+ bool pfmemalloc)
+{
+ skb_shinfo(skb)->nr_frags = nr_frags;
+
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += truesize;
+ skb->pfmemalloc |= pfmemalloc;
+}
+
/* Avoids inlining WARN macro in fast-path */
void xdp_warn(const char *msg, const char *func, const int line);
#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
+struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
+ struct sk_buff *skb,
+ struct net_device *dev);
+struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
+ struct net_device *dev);
+int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
+struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
static inline
void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
@@ -141,6 +256,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
xdp->data_end = frame->data + frame->len;
xdp->data_meta = frame->data - frame->metasize;
xdp->frame_sz = frame->frame_sz;
+ xdp->flags = frame->flags;
}
static inline
@@ -167,6 +283,7 @@ int xdp_update_frame_from_buff(struct xdp_buff *xdp,
xdp_frame->headroom = headroom - sizeof(*xdp_frame);
xdp_frame->metasize = metasize;
xdp_frame->frame_sz = xdp->frame_sz;
+ xdp_frame->flags = xdp->flags;
return 0;
}
@@ -191,33 +308,49 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
return xdp_frame;
}
+void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ struct xdp_buff *xdp);
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
+void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
+void xdp_return_frame_bulk(struct xdp_frame *xdpf,
+ struct xdp_frame_bulk *bq);
-/* When sending xdp_frame into the network stack, then there is no
- * return point callback, which is needed to release e.g. DMA-mapping
- * resources with page_pool. Thus, have explicit function to release
- * frame resources.
- */
-void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
-static inline void xdp_release_frame(struct xdp_frame *xdpf)
+static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
{
- struct xdp_mem_info *mem = &xdpf->mem;
+ struct skb_shared_info *sinfo;
+ unsigned int len = xdpf->len;
+
+ if (likely(!xdp_frame_has_frags(xdpf)))
+ goto out;
- /* Curr only page_pool needs this */
- if (mem->type == MEM_TYPE_PAGE_POOL)
- __xdp_release_frame(xdpf->data, mem);
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ len += sinfo->xdp_frags_size;
+out:
+ return len;
+}
+
+int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index,
+ unsigned int napi_id, u32 frag_size);
+static inline int
+xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index,
+ unsigned int napi_id)
+{
+ return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0);
}
-int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
- struct net_device *dev, u32 queue_index);
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
enum xdp_mem_type type, void *allocator);
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
+int xdp_reg_mem_model(struct xdp_mem_info *mem,
+ enum xdp_mem_type type, void *allocator);
+void xdp_unreg_mem_model(struct xdp_mem_info *mem);
/* Drivers not supporting XDP metadata can use this helper, which
* rejects any room expansion for metadata as a result.
@@ -234,17 +367,157 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
return unlikely(xdp->data_meta > xdp->data);
}
+static inline bool xdp_metalen_invalid(unsigned long metalen)
+{
+ unsigned long meta_max;
+
+ meta_max = type_max(typeof_member(struct skb_shared_info, meta_len));
+ BUILD_BUG_ON(!__builtin_constant_p(meta_max));
+
+ return !IS_ALIGNED(metalen, sizeof(u32)) || metalen > meta_max;
+}
+
struct xdp_attachment_info {
struct bpf_prog *prog;
u32 flags;
};
struct netdev_bpf;
-bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
- struct netdev_bpf *bpf);
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
-#define DEV_MAP_BULK_SIZE 16
+#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
+
+/* Define the relationship between xdp-rx-metadata kfunc and
+ * various other entities:
+ * - xdp_rx_metadata enum
+ * - netdev netlink enum (Documentation/netlink/specs/netdev.yaml)
+ * - kfunc name
+ * - xdp_metadata_ops field
+ */
+#define XDP_METADATA_KFUNC_xxx \
+ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
+ NETDEV_XDP_RX_METADATA_TIMESTAMP, \
+ bpf_xdp_metadata_rx_timestamp, \
+ xmo_rx_timestamp) \
+ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
+ NETDEV_XDP_RX_METADATA_HASH, \
+ bpf_xdp_metadata_rx_hash, \
+ xmo_rx_hash) \
+ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_VLAN_TAG, \
+ NETDEV_XDP_RX_METADATA_VLAN_TAG, \
+ bpf_xdp_metadata_rx_vlan_tag, \
+ xmo_rx_vlan_tag) \
+
+enum xdp_rx_metadata {
+#define XDP_METADATA_KFUNC(name, _, __, ___) name,
+XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+MAX_XDP_METADATA_KFUNC,
+};
+
+enum xdp_rss_hash_type {
+ /* First part: Individual bits for L3/L4 types */
+ XDP_RSS_L3_IPV4 = BIT(0),
+ XDP_RSS_L3_IPV6 = BIT(1),
+
+ /* The fixed (L3) IPv4 and IPv6 headers can both be followed by
+ * variable/dynamic headers, IPv4 called Options and IPv6 called
+ * Extension Headers. HW RSS type can contain this info.
+ */
+ XDP_RSS_L3_DYNHDR = BIT(2),
+
+ /* When RSS hash covers L4 then drivers MUST set XDP_RSS_L4 bit in
+ * addition to the protocol specific bit. This ease interaction with
+ * SKBs and avoids reserving a fixed mask for future L4 protocol bits.
+ */
+ XDP_RSS_L4 = BIT(3), /* L4 based hash, proto can be unknown */
+ XDP_RSS_L4_TCP = BIT(4),
+ XDP_RSS_L4_UDP = BIT(5),
+ XDP_RSS_L4_SCTP = BIT(6),
+ XDP_RSS_L4_IPSEC = BIT(7), /* L4 based hash include IPSEC SPI */
+ XDP_RSS_L4_ICMP = BIT(8),
+
+ /* Second part: RSS hash type combinations used for driver HW mapping */
+ XDP_RSS_TYPE_NONE = 0,
+ XDP_RSS_TYPE_L2 = XDP_RSS_TYPE_NONE,
+
+ XDP_RSS_TYPE_L3_IPV4 = XDP_RSS_L3_IPV4,
+ XDP_RSS_TYPE_L3_IPV6 = XDP_RSS_L3_IPV6,
+ XDP_RSS_TYPE_L3_IPV4_OPT = XDP_RSS_L3_IPV4 | XDP_RSS_L3_DYNHDR,
+ XDP_RSS_TYPE_L3_IPV6_EX = XDP_RSS_L3_IPV6 | XDP_RSS_L3_DYNHDR,
+
+ XDP_RSS_TYPE_L4_ANY = XDP_RSS_L4,
+ XDP_RSS_TYPE_L4_IPV4_TCP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
+ XDP_RSS_TYPE_L4_IPV4_UDP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
+ XDP_RSS_TYPE_L4_IPV4_SCTP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
+ XDP_RSS_TYPE_L4_IPV4_IPSEC = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+ XDP_RSS_TYPE_L4_IPV4_ICMP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
+
+ XDP_RSS_TYPE_L4_IPV6_TCP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
+ XDP_RSS_TYPE_L4_IPV6_UDP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
+ XDP_RSS_TYPE_L4_IPV6_SCTP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
+ XDP_RSS_TYPE_L4_IPV6_IPSEC = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+ XDP_RSS_TYPE_L4_IPV6_ICMP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
+
+ XDP_RSS_TYPE_L4_IPV6_TCP_EX = XDP_RSS_TYPE_L4_IPV6_TCP | XDP_RSS_L3_DYNHDR,
+ XDP_RSS_TYPE_L4_IPV6_UDP_EX = XDP_RSS_TYPE_L4_IPV6_UDP | XDP_RSS_L3_DYNHDR,
+ XDP_RSS_TYPE_L4_IPV6_SCTP_EX = XDP_RSS_TYPE_L4_IPV6_SCTP | XDP_RSS_L3_DYNHDR,
+};
+
+struct xdp_metadata_ops {
+ int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
+ int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type);
+ int (*xmo_rx_vlan_tag)(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci);
+};
+
+#ifdef CONFIG_NET
+u32 bpf_xdp_metadata_kfunc_id(int id);
+bool bpf_dev_bound_kfunc_id(u32 btf_id);
+void xdp_set_features_flag(struct net_device *dev, xdp_features_t val);
+void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
+void xdp_features_clear_redirect_target(struct net_device *dev);
+#else
+static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
+static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
+
+static inline void
+xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
+{
+}
+static inline void
+xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
+{
+}
+
+static inline void
+xdp_features_clear_redirect_target(struct net_device *dev)
+{
+}
+#endif
+
+static inline void xdp_clear_features_flag(struct net_device *dev)
+{
+ xdp_set_features_flag(dev, 0);
+}
+
+static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
+ struct xdp_buff *xdp)
+{
+ /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
+ * under local_bh_disable(), which provides the needed RCU protection
+ * for accessing map entries.
+ */
+ u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
+
+ if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
+ if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
+ act = xdp_master_redirect(xdp);
+ }
+
+ return act;
+}
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h
index a9d5b7603b89..c9df68d5f258 100644
--- a/include/net/xdp_priv.h
+++ b/include/net/xdp_priv.h
@@ -3,6 +3,7 @@
#define __LINUX_NET_XDP_PRIV_H__
#include <linux/rhashtable.h>
+#include <net/xdp.h>
/* Private to net/core/xdp.c, but used by trace/events/xdp.h */
struct xdp_mem_allocator {
@@ -10,7 +11,6 @@ struct xdp_mem_allocator {
union {
void *allocator;
struct page_pool *page_pool;
- struct zero_copy_allocator *zc_alloc;
};
struct rhash_head node;
struct rcu_head rcu;
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index c9d87cc40c11..3cb4dc9bd70e 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -6,6 +6,7 @@
#ifndef _LINUX_XDP_SOCK_H
#define _LINUX_XDP_SOCK_H
+#include <linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/if_xdp.h>
#include <linux/mutex.h>
@@ -13,60 +14,63 @@
#include <linux/mm.h>
#include <net/sock.h>
+#define XDP_UMEM_SG_FLAG (1 << 1)
+
struct net_device;
struct xsk_queue;
struct xdp_buff;
struct xdp_umem {
- struct xsk_queue *fq;
- struct xsk_queue *cq;
- struct xsk_buff_pool *pool;
+ void *addrs;
u64 size;
u32 headroom;
u32 chunk_size;
+ u32 chunks;
+ u32 npgs;
struct user_struct *user;
refcount_t users;
- struct work_struct work;
- struct page **pgs;
- u32 npgs;
- u16 queue_id;
- u8 need_wakeup;
u8 flags;
- int id;
- struct net_device *dev;
+ u8 tx_metadata_len;
bool zc;
- spinlock_t xsk_tx_list_lock;
- struct list_head xsk_tx_list;
+ struct page **pgs;
+ int id;
+ struct list_head xsk_dma_list;
+ struct work_struct work;
};
struct xsk_map {
struct bpf_map map;
spinlock_t lock; /* Synchronize map updates */
- struct xdp_sock *xsk_map[];
+ atomic_t count;
+ struct xdp_sock __rcu *xsk_map[];
};
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk;
- struct xsk_queue *rx;
+ struct xsk_queue *rx ____cacheline_aligned_in_smp;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
+ struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
+ bool sg;
enum {
XSK_READY = 0,
XSK_BOUND,
XSK_UNBOUND,
} state;
- /* Protects multiple processes in the control path */
- struct mutex mutex;
+
struct xsk_queue *tx ____cacheline_aligned_in_smp;
- struct list_head list;
- /* Mutual exclusion of NAPI TX thread and sendmsg error paths
- * in the SKB destructor callback.
+ struct list_head tx_list;
+ /* record the number of tx descriptors sent by this xsk and
+ * when it exceeds MAX_PER_SOCKET_BUDGET, an opportunity needs
+ * to be given to other xsks for sending tx descriptors, thereby
+ * preventing other XSKs from being starved.
*/
- spinlock_t tx_completion_lock;
+ u32 tx_budget_spent;
+
/* Protects generic receive. */
spinlock_t rx_lock;
@@ -74,9 +78,43 @@ struct xdp_sock {
u64 rx_dropped;
u64 rx_queue_full;
+ /* When __xsk_generic_xmit() must return before it sees the EOP descriptor for the current
+ * packet, the partially built skb is saved here so that packet building can resume in next
+ * call of __xsk_generic_xmit().
+ */
+ struct sk_buff *skb;
+
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
+ /* Protects multiple processes in the control path */
+ struct mutex mutex;
+ struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
+ struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
+};
+
+/*
+ * AF_XDP TX metadata hooks for network devices.
+ * The following hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer.
+ *
+ * void (*tmo_request_timestamp)(void *priv)
+ * Called when AF_XDP frame requested egress timestamp.
+ *
+ * u64 (*tmo_fill_timestamp)(void *priv)
+ * Called when AF_XDP frame, that had requested egress timestamp,
+ * received a completion. The hook needs to return the actual HW timestamp.
+ *
+ * void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv)
+ * Called when AF_XDP frame requested HW checksum offload. csum_start
+ * indicates position where checksumming should start.
+ * csum_offset indicates position where checksum should be stored.
+ *
+ */
+struct xsk_tx_metadata_ops {
+ void (*tmo_request_timestamp)(void *priv);
+ u64 (*tmo_fill_timestamp)(void *priv);
+ void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv);
};
#ifdef CONFIG_XDP_SOCKETS
@@ -85,17 +123,73 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(void);
-static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
- u32 key)
+/**
+ * xsk_tx_metadata_to_compl - Save enough relevant metadata information
+ * to perform tx completion in the future.
+ * @meta: pointer to AF_XDP metadata area
+ * @compl: pointer to output struct xsk_tx_metadata_to_compl
+ *
+ * This function should be called by the networking device when
+ * it prepares AF_XDP egress packet. The value of @compl should be stored
+ * and passed to xsk_tx_metadata_complete upon TX completion.
+ */
+static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
+ struct xsk_tx_metadata_compl *compl)
{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct xdp_sock *xs;
+ if (!meta)
+ return;
+
+ if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
+ compl->tx_timestamp = &meta->completion.tx_timestamp;
+ else
+ compl->tx_timestamp = NULL;
+}
- if (key >= map->max_entries)
- return NULL;
+/**
+ * xsk_tx_metadata_request - Evaluate AF_XDP TX metadata at submission
+ * and call appropriate xsk_tx_metadata_ops operation.
+ * @meta: pointer to AF_XDP metadata area
+ * @ops: pointer to struct xsk_tx_metadata_ops
+ * @priv: pointer to driver-private aread
+ *
+ * This function should be called by the networking device when
+ * it prepares AF_XDP egress packet.
+ */
+static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+ if (!meta)
+ return;
+
+ if (ops->tmo_request_timestamp)
+ if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
+ ops->tmo_request_timestamp(priv);
+
+ if (ops->tmo_request_checksum)
+ if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM)
+ ops->tmo_request_checksum(meta->request.csum_start,
+ meta->request.csum_offset, priv);
+}
+
+/**
+ * xsk_tx_metadata_complete - Evaluate AF_XDP TX metadata at completion
+ * and call appropriate xsk_tx_metadata_ops operation.
+ * @compl: pointer to completion metadata produced from xsk_tx_metadata_to_compl
+ * @ops: pointer to struct xsk_tx_metadata_ops
+ * @priv: pointer to driver-private aread
+ *
+ * This function should be called by the networking device upon
+ * AF_XDP egress completion.
+ */
+static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+ if (!compl)
+ return;
- xs = READ_ONCE(m->xsk_map[key]);
- return xs;
+ *compl->tx_timestamp = ops->tmo_fill_timestamp(priv);
}
#else
@@ -114,12 +208,32 @@ static inline void __xsk_map_flush(void)
{
}
-static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
- u32 key)
+static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
+ struct xsk_tx_metadata_compl *compl)
+{
+}
+
+static inline void xsk_tx_metadata_request(struct xsk_tx_metadata *meta,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+}
+
+static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
{
- return NULL;
}
#endif /* CONFIG_XDP_SOCKETS */
+#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
+bool xsk_map_check_flush(void);
+#else
+static inline bool xsk_map_check_flush(void)
+{
+ return false;
+}
+#endif
+
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index ccf848f7efa4..c9aec9ab6191 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -9,49 +9,77 @@
#include <net/xdp_sock.h>
#include <net/xsk_buff_pool.h>
+#define XDP_UMEM_MIN_CHUNK_SHIFT 11
+#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
+
+struct xsk_cb_desc {
+ void *src;
+ u8 off;
+ u8 bytes;
+};
+
#ifdef CONFIG_XDP_SOCKETS
-void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
-bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
-void xsk_umem_consume_tx_done(struct xdp_umem *umem);
-struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
-void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
-void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
-void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
-void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
-bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
+void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
+bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
+u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
+void xsk_tx_release(struct xsk_buff_pool *pool);
+struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+ u16 queue_id);
+void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
+bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
-static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
{
- return XDP_PACKET_HEADROOM + umem->headroom;
+ return XDP_PACKET_HEADROOM + pool->headroom;
}
-static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{
- return umem->chunk_size;
+ return pool->chunk_size;
}
-static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{
- return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem);
+ return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
}
-static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq)
{
- xp_set_rxq_info(umem->pool, rxq);
+ xp_set_rxq_info(pool, rxq);
+}
+
+static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
+ struct xsk_cb_desc *desc)
+{
+ xp_fill_cb(pool, desc);
+}
+
+static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ return pool->heads[0].xdp.rxq->napi_id;
+#else
+ return 0;
+#endif
}
-static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
- xp_dma_unmap(umem->pool, attrs);
+ xp_dma_unmap(pool, attrs);
}
-static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
- unsigned long attrs)
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
{
- return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs);
+ struct xdp_umem *umem = pool->umem;
+
+ return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
}
static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
@@ -68,117 +96,231 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
return xp_get_frame_dma(xskb);
}
-static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
+{
+ return xp_alloc(pool);
+}
+
+static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
+{
+ return !xp_mb_desc(desc);
+}
+
+/* Returns as many entries as possible up to max. 0 <= N <= max. */
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
{
- return xp_alloc(umem->pool);
+ return xp_alloc_batch(pool, xdp, max);
}
-static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
- return xp_can_alloc(umem->pool, count);
+ return xp_can_alloc(pool, count);
}
static inline void xsk_buff_free(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+ struct list_head *xskb_list = &xskb->pool->xskb_list;
+ struct xdp_buff_xsk *pos, *tmp;
+
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+ list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
+ list_del(&pos->xskb_list_node);
+ xp_free(pos);
+ }
+
+ xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
+out:
xp_free(xskb);
}
-static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
+static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
{
- return xp_raw_get_dma(umem->pool, addr);
+ struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
}
-static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
+static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
{
- return xp_raw_get_data(umem->pool, addr);
+ struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
+ struct xdp_buff *ret = NULL;
+ struct xdp_buff_xsk *frag;
+
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+ struct xdp_buff_xsk, xskb_list_node);
+ if (frag) {
+ list_del(&frag->xskb_list_node);
+ ret = &frag->xdp;
+ }
+
+ return ret;
+}
+
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+ list_del(&xskb->xskb_list_node);
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+ struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
+ struct xdp_buff_xsk *frag;
+
+ frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+ xskb_list_node);
+ return &frag->xdp;
+}
+
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+ xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+ xdp->data_meta = xdp->data;
+ xdp->data_end = xdp->data + size;
+ xdp->flags = 0;
+}
+
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ return xp_raw_get_dma(pool, addr);
+}
+
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_raw_get_data(pool, addr);
+}
+
+#define XDP_TXMD_FLAGS_VALID ( \
+ XDP_TXMD_FLAGS_TIMESTAMP | \
+ XDP_TXMD_FLAGS_CHECKSUM | \
+ 0)
+
+static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
+{
+ return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
+}
+
+static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+{
+ struct xsk_tx_metadata *meta;
+
+ if (!pool->tx_metadata_len)
+ return NULL;
+
+ meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
+ if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
+ return NULL; /* no way to signal the error to the user */
+
+ return meta;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+ if (!pool->dma_need_sync)
+ return;
+
xp_dma_sync_for_cpu(xskb);
}
-static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma,
size_t size)
{
- xp_dma_sync_for_device(umem->pool, dma, size);
+ xp_dma_sync_for_device(pool, dma, size);
}
#else
-static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
+static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{
}
-static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
- struct xdp_desc *desc)
+static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
+ struct xdp_desc *desc)
{
return false;
}
-static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
+static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
+{
+ return 0;
+}
+
+static inline void xsk_tx_release(struct xsk_buff_pool *pool)
{
}
-static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
- u16 queue_id)
+static inline struct xsk_buff_pool *
+xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
{
return NULL;
}
-static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
+static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
{
return false;
}
-static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
{
return 0;
}
-static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{
return 0;
}
-static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{
return 0;
}
-static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq)
{
}
-static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
+static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
+ struct xsk_cb_desc *desc)
+{
+}
+
+static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
}
-static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
- unsigned long attrs)
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
{
return 0;
}
@@ -193,12 +335,22 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
return 0;
}
-static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
{
return NULL;
}
-static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
+static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
+{
+ return false;
+}
+
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return 0;
+}
+
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
return false;
}
@@ -207,21 +359,54 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
-static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
+static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+{
+}
+
+static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+{
+ return NULL;
+}
+
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+ return NULL;
+}
+
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+}
+
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
{
return 0;
}
-static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
+{
+ return NULL;
+}
+
+static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
+{
+ return false;
+}
+
+static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
{
return NULL;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
}
-static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma,
size_t size)
{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 2737d24ec244..57c743b7e4fe 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -51,8 +51,10 @@
#ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
+#define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
#else
#define XFRM_INC_STATS(net, field) ((void)(net))
+#define XFRM_ADD_STATS(net, field, val) ((void)(net))
#endif
@@ -126,12 +128,30 @@ struct xfrm_state_walk {
struct xfrm_address_filter *filter;
};
-struct xfrm_state_offload {
+enum {
+ XFRM_DEV_OFFLOAD_IN = 1,
+ XFRM_DEV_OFFLOAD_OUT,
+ XFRM_DEV_OFFLOAD_FWD,
+};
+
+enum {
+ XFRM_DEV_OFFLOAD_UNSPECIFIED,
+ XFRM_DEV_OFFLOAD_CRYPTO,
+ XFRM_DEV_OFFLOAD_PACKET,
+};
+
+enum {
+ XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
+};
+
+struct xfrm_dev_offload {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
struct net_device *real_dev;
unsigned long offload_handle;
- unsigned int num_exthdrs;
- u8 flags;
+ u8 dir : 2;
+ u8 type : 2;
+ u8 flags : 2;
};
struct xfrm_mode {
@@ -145,6 +165,12 @@ enum {
XFRM_MODE_FLAG_TUNNEL = 1,
};
+enum xfrm_replay_mode {
+ XFRM_REPLAY_MODE_LEGACY,
+ XFRM_REPLAY_MODE_BMP,
+ XFRM_REPLAY_MODE_ESN,
+};
+
/* Full description of state of transformer. */
struct xfrm_state {
possible_net_t xs_net;
@@ -154,6 +180,7 @@ struct xfrm_state {
};
struct hlist_node bysrc;
struct hlist_node byspi;
+ struct hlist_node byseq;
refcount_t refcnt;
spinlock_t lock;
@@ -193,6 +220,11 @@ struct xfrm_state {
struct xfrm_algo_aead *aead;
const char *geniv;
+ /* mapping change rate limiting */
+ __be16 new_mapping_sport;
+ u32 new_mapping; /* seconds */
+ u32 mapping_maxage; /* seconds for input SA */
+
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
struct sock __rcu *encap_sk;
@@ -214,9 +246,8 @@ struct xfrm_state {
struct xfrm_replay_state preplay;
struct xfrm_replay_state_esn *preplay_esn;
- /* The functions for replay detection. */
- const struct xfrm_replay *repl;
-
+ /* replay detection mode */
+ enum xfrm_replay_mode repl_mode;
/* internal flag that only holds state for delayed aevent at the
* moment
*/
@@ -235,7 +266,7 @@ struct xfrm_state {
struct xfrm_lifetime_cur curlft;
struct hrtimer mtimer;
- struct xfrm_state_offload xso;
+ struct xfrm_dev_offload xso;
/* used to fix curlft->add_time when changing date */
long saved_tmo;
@@ -296,21 +327,15 @@ struct km_event {
struct net *net;
};
-struct xfrm_replay {
- void (*advance)(struct xfrm_state *x, __be32 net_seq);
- int (*check)(struct xfrm_state *x,
- struct sk_buff *skb,
- __be32 net_seq);
- int (*recheck)(struct xfrm_state *x,
- struct sk_buff *skb,
- __be32 net_seq);
- void (*notify)(struct xfrm_state *x, int event);
- int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
+struct xfrm_if_decode_session_result {
+ struct net *net;
+ u32 if_id;
};
struct xfrm_if_cb {
- struct xfrm_if *(*decode_session)(struct sk_buff *skb,
- unsigned short family);
+ bool (*decode_session)(struct sk_buff *skb,
+ unsigned short family,
+ struct xfrm_if_decode_session_result *res);
};
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
@@ -387,7 +412,6 @@ void xfrm_flush_gc(void);
void xfrm_state_delete_tunnel(struct xfrm_state *x);
struct xfrm_type {
- char *description;
struct module *owner;
u8 proto;
u8 flags;
@@ -396,20 +420,19 @@ struct xfrm_type {
#define XFRM_TYPE_LOCAL_COADDR 4
#define XFRM_TYPE_REMOTE_COADDR 8
- int (*init_state)(struct xfrm_state *x);
+ int (*init_state)(struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
void (*destructor)(struct xfrm_state *);
int (*input)(struct xfrm_state *, struct sk_buff *skb);
int (*output)(struct xfrm_state *, struct sk_buff *pskb);
int (*reject)(struct xfrm_state *, struct sk_buff *,
const struct flowi *);
- int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
};
int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
struct xfrm_type_offload {
- char *description;
struct module *owner;
u8 proto;
void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
@@ -526,6 +549,8 @@ struct xfrm_policy {
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
+
+ struct xfrm_dev_offload xdo;
};
static inline struct net *xp_net(const struct xfrm_policy *xp)
@@ -582,8 +607,8 @@ struct xfrm_mgr {
bool (*is_alive)(const struct km_event *c);
};
-int xfrm_register_km(struct xfrm_mgr *km);
-int xfrm_unregister_km(struct xfrm_mgr *km);
+void xfrm_register_km(struct xfrm_mgr *km);
+void xfrm_unregister_km(struct xfrm_mgr *km);
struct xfrm_tunnel_skb_cb {
union {
@@ -984,6 +1009,7 @@ void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
int link; /* ifindex of underlying L2 interface */
u32 if_id; /* interface identifyer */
+ bool collect_md;
};
struct xfrm_if {
@@ -1009,7 +1035,7 @@ struct xfrm_offload {
#define CRYPTO_FALLBACK 8
#define XFRM_GSO_SEGMENT 16
#define XFRM_GRO 32
-#define XFRM_ESP_NO_TRAILER 64
+/* 64 is free */
#define XFRM_DEV_RESUME 128
#define XFRM_XMIT 256
@@ -1024,11 +1050,13 @@ struct xfrm_offload {
#define CRYPTO_INVALID_PROTOCOL 128
__u8 proto;
+ __u8 inner_ipproto;
};
struct sec_path {
int len;
int olen;
+ int verified_cnt;
struct xfrm_state *xvec[XFRM_MAX_DEPTH];
struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
@@ -1083,22 +1111,75 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
}
#ifdef CONFIG_XFRM
+static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
+{
+ struct sec_path *sp = skb_sec_path(skb);
+
+ return sp->xvec[sp->len - 1];
+}
+#endif
+
+static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ struct sec_path *sp = skb_sec_path(skb);
+
+ if (!sp || !sp->olen || sp->len != sp->olen)
+ return NULL;
+
+ return &sp->ovec[sp->olen - 1];
+#else
+ return NULL;
+#endif
+}
+
+#ifdef CONFIG_XFRM
int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
unsigned short family);
+static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
+ int dir)
+{
+ if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
+ return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
+
+ return false;
+}
+
+static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
+ int dir, unsigned short family)
+{
+ if (dir != XFRM_POLICY_OUT && family == AF_INET) {
+ /* same dst may be used for traffic originating from
+ * devices with different policy settings.
+ */
+ return IPCB(skb)->flags & IPSKB_NOPOLICY;
+ }
+ return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
+}
+
static inline int __xfrm_policy_check2(struct sock *sk, int dir,
struct sk_buff *skb,
unsigned int family, int reverse)
{
struct net *net = dev_net(skb->dev);
int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct xfrm_state *x;
if (sk && sk->sk_policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, ndir, skb, family);
- return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
- (skb_dst(skb)->flags & DST_NOPOLICY) ||
- __xfrm_policy_check(sk, ndir, skb, family);
+ if (xo) {
+ x = xfrm_input_state(skb);
+ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+ return (xo->flags & CRYPTO_DONE) &&
+ (xo->status & CRYPTO_SUCCESS);
+ }
+
+ return __xfrm_check_nopolicy(net, skb, dir) ||
+ __xfrm_check_dev_nopolicy(skb, dir, family) ||
+ __xfrm_policy_check(sk, ndir, skb, family);
}
static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
@@ -1128,20 +1209,20 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
}
-int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
unsigned int family, int reverse);
-static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
unsigned int family)
{
- return __xfrm_decode_session(skb, fl, family, 0);
+ return __xfrm_decode_session(net, skb, fl, family, 0);
}
-static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
+static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
struct flowi *fl,
unsigned int family)
{
- return __xfrm_decode_session(skb, fl, family, 1);
+ return __xfrm_decode_session(net, skb, fl, family, 1);
}
int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
@@ -1150,9 +1231,12 @@ static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
struct net *net = dev_net(skb->dev);
- return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
- (skb_dst(skb)->flags & DST_NOXFRM) ||
- __xfrm_route_forward(skb, family);
+ if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
+ net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
+ return true;
+
+ return (skb_dst(skb)->flags & DST_NOXFRM) ||
+ __xfrm_route_forward(skb, family);
}
static inline int xfrm4_route_forward(struct sk_buff *skb)
@@ -1169,6 +1253,8 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
{
+ if (!sk_fullsock(osk))
+ return 0;
sk->sk_policy[0] = NULL;
sk->sk_policy[1] = NULL;
if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
@@ -1212,7 +1298,7 @@ static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *sk
{
return 1;
}
-static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
+static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
struct flowi *fl,
unsigned int family)
{
@@ -1493,6 +1579,21 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_update_stats(struct net *net);
+#ifdef CONFIG_XFRM_OFFLOAD
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
+{
+ struct xfrm_dev_offload *xdo = &x->xso;
+ struct net_device *dev = xdo->dev;
+
+ if (dev && dev->xfrmdev_ops &&
+ dev->xfrmdev_ops->xdo_dev_state_update_stats)
+ dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
+
+}
+#else
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
+#endif
void xfrm_state_insert(struct xfrm_state *x);
int xfrm_state_add(struct xfrm_state *x);
int xfrm_state_update(struct xfrm_state *x);
@@ -1542,12 +1643,15 @@ struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_delete(struct xfrm_state *x);
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
+int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
+ bool task_valid);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
-int xfrm_init_replay(struct xfrm_state *x);
+int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
-int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
+ struct netlink_ext_ack *extack);
int xfrm_init_state(struct xfrm_state *x);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
@@ -1557,7 +1661,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
int xfrm_trans_queue(struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *));
-int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
int xfrm_output(struct sock *sk, struct sk_buff *skb);
#if IS_ENABLED(CONFIG_NET_PKTGEN)
@@ -1565,12 +1669,10 @@ int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
#endif
void xfrm_local_error(struct sk_buff *skb, int mtu);
-int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
int xfrm4_transport_finish(struct sk_buff *skb, int async);
int xfrm4_rcv(struct sk_buff *skb);
-int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
@@ -1581,13 +1683,11 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
}
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
-int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
struct ip6_tnl *t);
int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -1605,14 +1705,15 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
-int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
- u8 **prevhdr);
#ifdef CONFIG_XFRM
void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ struct sk_buff *skb);
+struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ struct sk_buff *skb);
int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
int optlen);
#else
@@ -1649,8 +1750,9 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net,
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
-int verify_spi_info(u8 proto, u32 min, u32 max);
-int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
+int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
+int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
+ struct netlink_ext_ack *extack);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
u8 mode, u32 reqid, u32 if_id, u8 proto,
const xfrm_address_t *daddr,
@@ -1663,14 +1765,16 @@ int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_bundles,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap);
-struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
+ u32 if_id);
struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m,
struct xfrm_encap_tmpl *encap);
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_bundles,
struct xfrm_kmaddress *k, struct net *net,
- struct xfrm_encap_tmpl *encap);
+ struct xfrm_encap_tmpl *encap, u32 if_id,
+ struct netlink_ext_ack *extack);
#endif
int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
@@ -1721,6 +1825,12 @@ static inline int xfrm_policy_id2dir(u32 index)
}
#ifdef CONFIG_XFRM
+void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
+int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+void xfrm_replay_notify(struct xfrm_state *x, int event);
+int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+
static inline int xfrm_aevent_is_on(struct net *net)
{
struct sock *nlsk;
@@ -1773,21 +1883,17 @@ static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_es
static inline int xfrm_replay_clone(struct xfrm_state *x,
struct xfrm_state *orig)
{
- x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
+
+ x->replay_esn = kmemdup(orig->replay_esn,
+ xfrm_replay_state_esn_len(orig->replay_esn),
GFP_KERNEL);
if (!x->replay_esn)
return -ENOMEM;
-
- x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
- x->replay_esn->replay_window = orig->replay_esn->replay_window;
-
- x->preplay_esn = kmemdup(x->replay_esn,
- xfrm_replay_state_esn_len(x->replay_esn),
+ x->preplay_esn = kmemdup(orig->preplay_esn,
+ xfrm_replay_state_esn_len(orig->preplay_esn),
GFP_KERNEL);
- if (!x->preplay_esn) {
- kfree(x->replay_esn);
+ if (!x->preplay_esn)
return -ENOMEM;
- }
return 0;
}
@@ -1823,29 +1929,6 @@ static inline void xfrm_states_delete(struct xfrm_state **states, int n)
}
#endif
-#ifdef CONFIG_XFRM
-static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
-{
- struct sec_path *sp = skb_sec_path(skb);
-
- return sp->xvec[sp->len - 1];
-}
-#endif
-
-static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
-{
-#ifdef CONFIG_XFRM
- struct sec_path *sp = skb_sec_path(skb);
-
- if (!sp || !sp->olen || sp->len != sp->olen)
- return NULL;
-
- return &sp->ovec[sp->olen - 1];
-#else
- return NULL;
-#endif
-}
-
void __init xfrm_dev_init(void);
#ifdef CONFIG_XFRM_OFFLOAD
@@ -1853,12 +1936,16 @@ void xfrm_dev_resume(struct sk_buff *skb);
void xfrm_dev_backlog(struct softnet_data *sd);
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
- struct xfrm_user_offload *xuo);
+ struct xfrm_user_offload *xuo,
+ struct netlink_ext_ack *extack);
+int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
+ struct xfrm_user_offload *xuo, u8 dir,
+ struct netlink_ext_ack *extack);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
{
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
@@ -1884,7 +1971,7 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
static inline void xfrm_dev_state_delete(struct xfrm_state *x)
{
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
if (xso->dev)
xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
@@ -1892,14 +1979,37 @@ static inline void xfrm_dev_state_delete(struct xfrm_state *x)
static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
if (dev->xfrmdev_ops->xdo_dev_state_free)
dev->xfrmdev_ops->xdo_dev_state_free(x);
xso->dev = NULL;
- dev_put(dev);
+ xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+ netdev_put(dev, &xso->dev_tracker);
+ }
+}
+
+static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
+{
+ struct xfrm_dev_offload *xdo = &x->xdo;
+ struct net_device *dev = xdo->dev;
+
+ if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
+ dev->xfrmdev_ops->xdo_dev_policy_delete(x);
+}
+
+static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
+{
+ struct xfrm_dev_offload *xdo = &x->xdo;
+ struct net_device *dev = xdo->dev;
+
+ if (dev && dev->xfrmdev_ops) {
+ if (dev->xfrmdev_ops->xdo_dev_policy_free)
+ dev->xfrmdev_ops->xdo_dev_policy_free(x);
+ xdo->dev = NULL;
+ netdev_put(dev, &xdo->dev_tracker);
}
}
#else
@@ -1916,7 +2026,7 @@ static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_fea
return skb;
}
-static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
+static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
{
return 0;
}
@@ -1929,6 +2039,21 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
}
+static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
+ struct xfrm_user_offload *xuo, u8 dir,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
+{
+}
+
+static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
+{
+}
+
static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
return false;
@@ -2000,6 +2125,39 @@ static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
return 0;
}
+extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
+extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
+
+struct xfrm_translator {
+ /* Allocate frag_list and put compat translation there */
+ int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
+
+ /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
+ struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
+ int maxtype, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack);
+
+ /* Translate 32-bit user_policy from sockptr */
+ int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
+
+ struct module *owner;
+};
+
+#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
+extern int xfrm_register_translator(struct xfrm_translator *xtr);
+extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
+extern struct xfrm_translator *xfrm_get_translator(void);
+extern void xfrm_put_translator(struct xfrm_translator *xtr);
+#else
+static inline struct xfrm_translator *xfrm_get_translator(void)
+{
+ return NULL;
+}
+static inline void xfrm_put_translator(struct xfrm_translator *xtr)
+{
+}
+#endif
+
#if IS_ENABLED(CONFIG_IPV6)
static inline bool xfrm6_local_dontfrag(const struct sock *sk)
{
@@ -2010,9 +2168,35 @@ static inline bool xfrm6_local_dontfrag(const struct sock *sk)
proto = sk->sk_protocol;
if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
- return inet6_sk(sk)->dontfrag;
+ return inet6_test_bit(DONTFRAG, sk);
return false;
}
#endif
+
+#if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+
+extern struct metadata_dst __percpu *xfrm_bpf_md_dst;
+
+int register_xfrm_interface_bpf(void);
+
+#else
+
+static inline int register_xfrm_interface_bpf(void)
+{
+ return 0;
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
+int register_xfrm_state_bpf(void);
+#else
+static inline int register_xfrm_state_bpf(void)
+{
+ return 0;
+}
+#endif
+
#endif /* _NET_XFRM_H */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 6842990e2712..99dd7376df6a 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -7,63 +7,141 @@
#include <linux/if_xdp.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/bpf.h>
#include <net/xdp.h>
struct xsk_buff_pool;
struct xdp_rxq_info;
+struct xsk_cb_desc;
struct xsk_queue;
struct xdp_desc;
+struct xdp_umem;
+struct xdp_sock;
struct device;
struct page;
+#define XSK_PRIV_MAX 24
+
struct xdp_buff_xsk {
struct xdp_buff xdp;
+ u8 cb[XSK_PRIV_MAX];
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
- bool unaligned;
u64 orig_addr;
struct list_head free_list_node;
+ struct list_head xskb_list_node;
+};
+
+#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
+#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
+
+struct xsk_dma_map {
+ dma_addr_t *dma_pages;
+ struct device *dev;
+ struct net_device *netdev;
+ refcount_t users;
+ struct list_head list; /* Protected by the RTNL_LOCK */
+ u32 dma_pages_cnt;
+ bool dma_need_sync;
};
struct xsk_buff_pool {
- struct xsk_queue *fq;
+ /* Members only used in the control path first. */
+ struct device *dev;
+ struct net_device *netdev;
+ struct list_head xsk_tx_list;
+ /* Protects modifications to the xsk_tx_list */
+ spinlock_t xsk_tx_list_lock;
+ refcount_t users;
+ struct xdp_umem *umem;
+ struct work_struct work;
struct list_head free_list;
+ struct list_head xskb_list;
+ u32 heads_cnt;
+ u16 queue_id;
+
+ /* Data path members as close to free_heads at the end as possible. */
+ struct xsk_queue *fq ____cacheline_aligned_in_smp;
+ struct xsk_queue *cq;
+ /* For performance reasons, each buff pool has its own array of dma_pages
+ * even when they are identical.
+ */
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
+ struct xdp_desc *tx_descs;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
- u32 heads_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
+ u32 chunk_shift;
u32 frame_len;
+ u8 tx_metadata_len; /* inherited from umem */
+ u8 cached_need_wakeup;
+ bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
+ bool tx_sw_csum;
void *addrs;
- struct device *dev;
+ /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
+ * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
+ * sockets share a single cq when the same netdev and queue id is shared.
+ */
+ spinlock_t cq_lock;
struct xdp_buff_xsk *free_heads[];
};
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
+
/* AF_XDP core. */
-struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
- u32 chunk_size, u32 headroom, u64 size,
- bool unaligned);
-void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq);
+struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ struct xdp_umem *umem);
+int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
+ u16 queue_id, u16 flags);
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
+ struct net_device *dev, u16 queue_id);
+int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_destroy(struct xsk_buff_pool *pool);
-void xp_release(struct xdp_buff_xsk *xskb);
+void xp_get_pool(struct xsk_buff_pool *pool);
+bool xp_put_pool(struct xsk_buff_pool *pool);
+void xp_clear_dev(struct xsk_buff_pool *pool);
+void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
+void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);
+static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ xskb->orig_addr = addr;
+ xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
+}
+
+static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ dma_addr_t *dma_pages, u64 addr)
+{
+ xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
+ (addr & ~PAGE_MASK);
+ xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
+}
+
/* AF_XDP ZC drivers, via xdp_sock_buff.h */
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
+void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc);
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages);
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
@@ -80,9 +158,6 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
{
- if (!xskb->pool->dma_need_sync)
- return;
-
xp_dma_sync_for_cpu_slow(xskb);
}
@@ -109,11 +184,16 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
{
bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
- if (pool->dma_pages_cnt && cross_pg) {
- return !(pool->dma_pages[addr >> PAGE_SHIFT] &
- XSK_NEXT_PG_CONTIG_MASK);
- }
- return false;
+ if (likely(!cross_pg))
+ return false;
+
+ return pool->dma_pages &&
+ !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
+}
+
+static inline bool xp_mb_desc(struct xdp_desc *desc)
+{
+ return desc->options & XDP_PKT_CONTD;
}
static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
@@ -137,4 +217,30 @@ static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
xp_unaligned_extract_offset(addr);
}
+static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
+}
+
+static inline void xp_release(struct xdp_buff_xsk *xskb)
+{
+ if (xskb->pool->unaligned)
+ xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
+}
+
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+{
+ u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+
+ offset += xskb->pool->headroom;
+ if (!xskb->pool->unaligned)
+ return xskb->orig_addr + offset;
+ return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+}
+
+static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
+{
+ return pool->tx_metadata_len > 0;
+}
+
#endif /* XSK_BUFF_POOL_H_ */
diff --git a/include/pcmcia/soc_common.h b/include/pcmcia/soc_common.h
new file mode 100644
index 000000000000..d4f18f4679df
--- /dev/null
+++ b/include/pcmcia/soc_common.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <pcmcia/ss.h>
+
+struct module;
+struct cpufreq_freqs;
+
+struct soc_pcmcia_regulator {
+ struct regulator *reg;
+ bool on;
+};
+
+struct pcmcia_state {
+ unsigned detect: 1,
+ ready: 1,
+ bvd1: 1,
+ bvd2: 1,
+ wrprot: 1,
+ vs_3v: 1,
+ vs_Xv: 1;
+};
+
+/*
+ * This structure encapsulates per-socket state which we might need to
+ * use when responding to a Card Services query of some kind.
+ */
+struct soc_pcmcia_socket {
+ struct pcmcia_socket socket;
+
+ /*
+ * Info from low level handler
+ */
+ unsigned int nr;
+ struct clk *clk;
+
+ /*
+ * Core PCMCIA state
+ */
+ const struct pcmcia_low_level *ops;
+
+ unsigned int status;
+ socket_state_t cs_state;
+
+ unsigned short spd_io[MAX_IO_WIN];
+ unsigned short spd_mem[MAX_WIN];
+ unsigned short spd_attr[MAX_WIN];
+
+ struct resource res_skt;
+ struct resource res_io;
+ struct resource res_io_io;
+ struct resource res_mem;
+ struct resource res_attr;
+
+ struct {
+ int gpio;
+ struct gpio_desc *desc;
+ unsigned int irq;
+ const char *name;
+ } stat[6];
+#define SOC_STAT_CD 0 /* Card detect */
+#define SOC_STAT_BVD1 1 /* BATDEAD / IOSTSCHG */
+#define SOC_STAT_BVD2 2 /* BATWARN / IOSPKR */
+#define SOC_STAT_RDY 3 /* Ready / Interrupt */
+#define SOC_STAT_VS1 4 /* Voltage sense 1 */
+#define SOC_STAT_VS2 5 /* Voltage sense 2 */
+
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_bus_enable;
+ struct soc_pcmcia_regulator vcc;
+ struct soc_pcmcia_regulator vpp;
+
+ unsigned int irq_state;
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block cpufreq_nb;
+#endif
+ struct timer_list poll_timer;
+ struct list_head node;
+ void *driver_data;
+};
+
+
+struct pcmcia_low_level {
+ struct module *owner;
+
+ /* first socket in system */
+ int first;
+ /* nr of sockets */
+ int nr;
+
+ int (*hw_init)(struct soc_pcmcia_socket *);
+ void (*hw_shutdown)(struct soc_pcmcia_socket *);
+
+ void (*socket_state)(struct soc_pcmcia_socket *, struct pcmcia_state *);
+ int (*configure_socket)(struct soc_pcmcia_socket *, const socket_state_t *);
+
+ /*
+ * Enable card status IRQs on (re-)initialisation. This can
+ * be called at initialisation, power management event, or
+ * pcmcia event.
+ */
+ void (*socket_init)(struct soc_pcmcia_socket *);
+
+ /*
+ * Disable card status IRQs and PCMCIA bus on suspend.
+ */
+ void (*socket_suspend)(struct soc_pcmcia_socket *);
+
+ /*
+ * Hardware specific timing routines.
+ * If provided, the get_timing routine overrides the SOC default.
+ */
+ unsigned int (*get_timing)(struct soc_pcmcia_socket *, unsigned int, unsigned int);
+ int (*set_timing)(struct soc_pcmcia_socket *);
+ int (*show_timing)(struct soc_pcmcia_socket *, char *);
+
+#ifdef CONFIG_CPU_FREQ
+ /*
+ * CPUFREQ support.
+ */
+ int (*frequency_change)(struct soc_pcmcia_socket *, unsigned long, struct cpufreq_freqs *);
+#endif
+};
+
+
+
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 36c5c5e38c1d..c011ea236e9b 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -300,7 +300,7 @@ TRACE_EVENT(aer_event,
const u32 status,
const u8 severity,
const u8 tlp_header_valid,
- struct aer_header_log_regs *tlp),
+ struct pcie_tlp_log *tlp),
TP_ARGS(dev_name, status, severity, tlp_header_valid, tlp),
@@ -318,10 +318,10 @@ TRACE_EVENT(aer_event,
__entry->severity = severity;
__entry->tlp_header_valid = tlp_header_valid;
if (tlp_header_valid) {
- __entry->tlp_header[0] = tlp->dw0;
- __entry->tlp_header[1] = tlp->dw1;
- __entry->tlp_header[2] = tlp->dw2;
- __entry->tlp_header[3] = tlp->dw3;
+ __entry->tlp_header[0] = tlp->dw[0];
+ __entry->tlp_header[1] = tlp->dw[1];
+ __entry->tlp_header[2] = tlp->dw[2];
+ __entry->tlp_header[3] = tlp->dw[3];
}
),
@@ -358,7 +358,6 @@ TRACE_EVENT(aer_event,
EM ( MF_MSG_KERNEL_HIGH_ORDER, "high-order kernel page" ) \
EM ( MF_MSG_SLAB, "kernel slab page" ) \
EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \
- EM ( MF_MSG_POISONED_HUGE, "huge page already hardware poisoned" ) \
EM ( MF_MSG_HUGE, "huge page" ) \
EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \
@@ -372,7 +371,8 @@ TRACE_EVENT(aer_event,
EM ( MF_MSG_CLEAN_LRU, "clean LRU page" ) \
EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \
EM ( MF_MSG_BUDDY, "free buddy page" ) \
- EM ( MF_MSG_BUDDY_2ND, "free buddy page (2nd try)" ) \
+ EM ( MF_MSG_DAX, "dax page" ) \
+ EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \
EMe ( MF_MSG_UNKNOWN, "unknown page" )
/*
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index 83139b9ce409..f7c185ff7a11 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -75,7 +75,7 @@ struct sockaddr_ib {
*/
static inline bool ib_safe_file_access(struct file *filp)
{
- return filp->f_cred == current_cred() && !uaccess_kernel();
+ return filp->f_cred == current_cred();
}
#endif /* _RDMA_IB_H */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index b0e636ac6690..811a0f11d0db 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -7,6 +7,7 @@
#ifndef IB_ADDR_H
#define IB_ADDR_H
+#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/if_arp.h>
@@ -193,29 +194,6 @@ static inline enum ib_mtu iboe_get_mtu(int mtu)
return 0;
}
-static inline int iboe_get_rate(struct net_device *dev)
-{
- struct ethtool_link_ksettings cmd;
- int err;
-
- rtnl_lock();
- err = __ethtool_get_link_ksettings(dev, &cmd);
- rtnl_unlock();
- if (err)
- return IB_RATE_PORT_CURRENT;
-
- if (cmd.base.speed >= 40000)
- return IB_RATE_40_GBPS;
- else if (cmd.base.speed >= 30000)
- return IB_RATE_30_GBPS;
- else if (cmd.base.speed >= 20000)
- return IB_RATE_20_GBPS;
- else if (cmd.base.speed >= 10000)
- return IB_RATE_10_GBPS;
- else
- return IB_RATE_PORT_CURRENT;
-}
-
static inline int rdma_link_local_addr(struct in6_addr *addr)
{
if (addr->s6_addr32[0] == htonl(0xfe800000) &&
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 66a8f369a2fa..226ae3702d8a 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -10,7 +10,7 @@
#include <rdma/ib_verbs.h>
-int rdma_query_gid(struct ib_device *device, u8 port_num, int index,
+int rdma_query_gid(struct ib_device *device, u32 port_num, int index,
union ib_gid *gid);
void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr);
const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
@@ -20,10 +20,10 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
const struct ib_gid_attr *rdma_find_gid_by_port(struct ib_device *ib_dev,
const union ib_gid *gid,
enum ib_gid_type gid_type,
- u8 port,
+ u32 port,
struct net_device *ndev);
const struct ib_gid_attr *rdma_find_gid_by_filter(
- struct ib_device *device, const union ib_gid *gid, u8 port_num,
+ struct ib_device *device, const union ib_gid *gid, u32 port_num,
bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
void *),
void *context);
@@ -43,7 +43,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
* the local software cache.
*/
int ib_get_cached_pkey(struct ib_device *device_handle,
- u8 port_num,
+ u32 port_num,
int index,
u16 *pkey);
@@ -59,7 +59,7 @@ int ib_get_cached_pkey(struct ib_device *device_handle,
* the local software cache.
*/
int ib_find_cached_pkey(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u16 pkey,
u16 *index);
@@ -75,7 +75,7 @@ int ib_find_cached_pkey(struct ib_device *device,
* the local software cache.
*/
int ib_find_exact_cached_pkey(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u16 pkey,
u16 *index);
@@ -89,7 +89,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
* the local software cache.
*/
int ib_get_cached_lmc(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u8 *lmc);
/**
@@ -102,13 +102,16 @@ int ib_get_cached_lmc(struct ib_device *device,
* the local software cache.
*/
int ib_get_cached_port_state(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum ib_port_state *port_active);
bool rdma_is_zero_gid(const union ib_gid *gid);
const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device,
- u8 port_num, int index);
+ u32 port_num, int index);
void rdma_put_gid_attr(const struct ib_gid_attr *attr);
void rdma_hold_gid_attr(const struct ib_gid_attr *attr);
+ssize_t rdma_query_gid_table(struct ib_device *device,
+ struct ib_uverbs_gid_entry *entries,
+ size_t max_entries);
#endif /* _IB_CACHE_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 382427add677..a2ac62b4a6cf 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -14,9 +14,6 @@
#include <rdma/ib_sa.h>
#include <rdma/rdma_cm.h>
-/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
-extern struct class cm_class;
-
enum ib_cm_state {
IB_CM_IDLE,
IB_CM_LISTEN,
@@ -297,7 +294,6 @@ struct ib_cm_id {
void *context;
struct ib_device *device;
__be64 service_id;
- __be64 service_mask;
enum ib_cm_state state; /* internal CM/debug use */
enum ib_cm_lap_state lap_state; /* internal CM/debug use */
__be32 local_id;
@@ -343,13 +339,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
* and service ID resolution requests. The service ID should be specified
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
* assign a service ID to the caller.
- * @service_mask: Mask applied to service ID used to listen across a
- * range of service IDs. If set to 0, the service ID is matched
- * exactly. This parameter is ignored if %service_id is set to
- * IB_CM_ASSIGN_SERVICE_ID.
*/
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
- __be64 service_mask);
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id);
struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
ib_cm_handler cm_handler,
@@ -357,6 +348,8 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
struct ib_cm_req_param {
struct sa_path_rec *primary_path;
+ struct sa_path_rec *primary_path_inbound;
+ struct sa_path_rec *primary_path_outbound;
struct sa_path_rec *alternate_path;
const struct ib_gid_attr *ppath_sgid_attr;
__be64 service_id;
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index 57c1ac881d08..8ae07c0ecdf7 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -206,11 +206,6 @@ static inline u8 ib_get_lver(struct ib_header *hdr)
IB_LVER_MASK);
}
-static inline u16 ib_get_len(struct ib_header *hdr)
-{
- return (u16)(be16_to_cpu(hdr->lrh[2]));
-}
-
static inline u32 ib_get_qkey(struct ib_other_headers *ohdr)
{
return be32_to_cpu(ohdr->u.ud.deth[0]);
@@ -237,6 +232,7 @@ static inline u32 ib_get_sqpn(struct ib_other_headers *ohdr)
#define IB_BTH_SE_SHIFT 23
#define IB_BTH_TVER_MASK 0xf
#define IB_BTH_TVER_SHIFT 16
+#define IB_BTH_OPCODE_CNP 0x81
static inline u8 ib_bth_get_pad(struct ib_other_headers *ohdr)
{
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 8dfb1ddf345a..3f1b58d8b4bf 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -276,6 +276,9 @@ enum ib_port_capability_mask2_bits {
IB_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3,
IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4,
IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5,
+ IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10,
+ IB_PORT_EXTENDED_SPEEDS2_SUP = 1 << 11,
+ IB_PORT_LINK_SPEED_XDR_SUP = 1 << 12,
};
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
@@ -668,7 +671,7 @@ struct ib_mad_reg_req {
* @registration_flags: Registration flags to set for this agent
*/
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum ib_qp_type qp_type,
struct ib_mad_reg_req *mad_reg_req,
u8 rmpp_version,
@@ -718,27 +721,26 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
/**
- * ib_cancel_mad - Cancels an outstanding send MAD operation.
- * @mad_agent: Specifies the registration associated with sent MAD.
- * @send_buf: Indicates the MAD to cancel.
- *
- * MADs will be returned to the user through the corresponding
- * ib_mad_send_handler.
- */
-void ib_cancel_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf);
-
-/**
* ib_modify_mad - Modifies an outstanding send MAD operation.
- * @mad_agent: Specifies the registration associated with sent MAD.
* @send_buf: Indicates the MAD to modify.
* @timeout_ms: New timeout value for sent MAD.
*
* This call will reset the timeout value for a sent MAD to the specified
* value.
*/
-int ib_modify_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf, u32 timeout_ms);
+int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms);
+
+/**
+ * ib_cancel_mad - Cancels an outstanding send MAD operation.
+ * @send_buf: Indicates the MAD to cancel.
+ *
+ * MADs will be returned to the user through the corresponding
+ * ib_mad_send_handler.
+ */
+static inline void ib_cancel_mad(struct ib_mad_send_buf *send_buf)
+{
+ ib_modify_mad(send_buf, 0);
+}
/**
* ib_create_send_mad - Allocate and initialize a data buffer and work request
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index a9162f25beaf..b8c56d7dc35d 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -84,6 +84,8 @@ enum {
/* opcode 0x15 is reserved */
IB_OPCODE_SEND_LAST_WITH_INVALIDATE = 0x16,
IB_OPCODE_SEND_ONLY_WITH_INVALIDATE = 0x17,
+ IB_OPCODE_FLUSH = 0x1C,
+ IB_OPCODE_ATOMIC_WRITE = 0x1D,
/* real constants follow -- see comment about above IB_OPCODE()
macro for more details */
@@ -112,6 +114,8 @@ enum {
IB_OPCODE(RC, FETCH_ADD),
IB_OPCODE(RC, SEND_LAST_WITH_INVALIDATE),
IB_OPCODE(RC, SEND_ONLY_WITH_INVALIDATE),
+ IB_OPCODE(RC, FLUSH),
+ IB_OPCODE(RC, ATOMIC_WRITE),
/* UC */
IB_OPCODE(UC, SEND_FIRST),
@@ -149,6 +153,7 @@ enum {
IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE),
IB_OPCODE(RD, COMPARE_SWAP),
IB_OPCODE(RD, FETCH_ADD),
+ IB_OPCODE(RD, FLUSH),
/* UD */
IB_OPCODE(UD, SEND_ONLY),
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 693285e76f13..b46353fc53bf 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -186,6 +186,7 @@ struct sa_path_rec {
struct sa_path_rec_opa opa;
};
enum sa_path_rec_type rec_type;
+ u32 flags;
};
static inline enum ib_gid_type
@@ -366,20 +367,6 @@ struct ib_sa_mcmember_rec {
#define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF
-struct ib_sa_service_rec {
- u64 id;
- union ib_gid gid;
- __be16 pkey;
- /* reserved */
- u32 lease;
- u8 key[16];
- u8 name[64];
- u8 data8[16];
- u16 data16[8];
- u32 data32[4];
- u64 data64[2];
-};
-
#define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0)
#define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1)
#define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2)
@@ -423,23 +410,13 @@ struct ib_sa_query;
void ib_sa_cancel_query(int id, struct ib_sa_query *query);
int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
- u8 port_num, struct sa_path_rec *rec,
+ u32 port_num, struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
gfp_t gfp_mask,
void (*callback)(int status, struct sa_path_rec *resp,
- void *context),
+ unsigned int num_prs, void *context),
void *context, struct ib_sa_query **query);
-int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num, u8 method,
- struct ib_sa_service_rec *rec,
- ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
- gfp_t gfp_mask,
- void (*callback)(int status,
- struct ib_sa_service_rec *resp,
- void *context),
- void *context, struct ib_sa_query **sa_query);
-
struct ib_sa_multicast {
struct ib_sa_mcmember_rec rec;
ib_sa_comp_mask comp_mask;
@@ -477,7 +454,8 @@ struct ib_sa_multicast {
* group, and the user must rejoin the group to continue using it.
*/
struct ib_sa_multicast *ib_sa_join_multicast(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device,
+ u32 port_num,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
int (*callback)(int status,
@@ -506,20 +484,20 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast);
* @mgid: MGID of multicast group.
* @rec: Location to copy SA multicast member record.
*/
-int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
+int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num,
union ib_gid *mgid, struct ib_sa_mcmember_rec *rec);
/**
* ib_init_ah_from_mcmember - Initialize address handle attributes based on
* an SA multicast member record.
*/
-int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
+int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num,
struct ib_sa_mcmember_rec *rec,
struct net_device *ndev,
enum ib_gid_type gid_type,
struct rdma_ah_attr *ah_attr);
-int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *sgid_attr);
@@ -538,7 +516,7 @@ void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec);
/* Support GuidInfoRecord */
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
unsigned long timeout_ms, gfp_t gfp_mask,
@@ -547,10 +525,6 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
void *context),
void *context, struct ib_sa_query **sa_query);
-bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
- struct ib_device *device,
- u8 port_num);
-
static inline bool sa_path_is_roce(struct sa_path_rec *rec)
{
return ((rec->rec_type == SA_PATH_REC_TYPE_ROCE_V1) ||
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index fdb8633cbaff..fc16b826b2c1 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -144,5 +144,15 @@ ib_get_smp_direction(struct ib_smp *smp)
#define IB_NOTICE_TRAP_DR_NOTICE 0x80
#define IB_NOTICE_TRAP_DR_TRUNC 0x40
-
+/**
+ * ib_init_query_mad - Initialize query MAD.
+ * @mad: MAD to initialize.
+ */
+static inline void ib_init_query_mad(struct ib_smp *mad)
+{
+ mad->base_version = IB_MGMT_BASE_VERSION;
+ mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ mad->class_version = 1;
+ mad->method = IB_MGMT_METHOD_GET;
+}
#endif /* IB_SMI_H */
diff --git a/include/rdma/ib_sysfs.h b/include/rdma/ib_sysfs.h
new file mode 100644
index 000000000000..3b77cfd74d9a
--- /dev/null
+++ b/include/rdma/ib_sysfs.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2021 Mellanox Technologies Ltd. All rights reserved.
+ */
+#ifndef DEF_RDMA_IB_SYSFS_H
+#define DEF_RDMA_IB_SYSFS_H
+
+#include <linux/sysfs.h>
+
+struct ib_device;
+
+struct ib_port_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf);
+ ssize_t (*store)(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, const char *buf,
+ size_t count);
+};
+
+#define IB_PORT_ATTR_RW(_name) \
+ struct ib_port_attribute ib_port_attr_##_name = __ATTR_RW(_name)
+
+#define IB_PORT_ATTR_ADMIN_RW(_name) \
+ struct ib_port_attribute ib_port_attr_##_name = \
+ __ATTR_RW_MODE(_name, 0600)
+
+#define IB_PORT_ATTR_RO(_name) \
+ struct ib_port_attribute ib_port_attr_##_name = __ATTR_RO(_name)
+
+#define IB_PORT_ATTR_WO(_name) \
+ struct ib_port_attribute ib_port_attr_##_name = __ATTR_WO(_name)
+
+struct ib_device *ib_port_sysfs_get_ibdev_kobj(struct kobject *kobj,
+ u32 *port_num);
+
+#endif
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 71f573a418bf..565a85044541 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2007 Cisco Systems. All rights reserved.
+ * Copyright (c) 2020 Intel Corporation. All rights reserved.
*/
#ifndef IB_UMEM_H
@@ -13,45 +14,146 @@
struct ib_ucontext;
struct ib_umem_odp;
+struct dma_buf_attach_ops;
struct ib_umem {
struct ib_device *ibdev;
struct mm_struct *owning_mm;
+ u64 iova;
size_t length;
unsigned long address;
u32 writable : 1;
u32 is_odp : 1;
- struct work_struct work;
- struct sg_table sg_head;
- int nmap;
- unsigned int sg_nents;
+ u32 is_dmabuf : 1;
+ struct sg_append_table sgt_append;
};
+struct ib_umem_dmabuf {
+ struct ib_umem umem;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct scatterlist *first_sg;
+ struct scatterlist *last_sg;
+ unsigned long first_sg_offset;
+ unsigned long last_sg_trim;
+ void *private;
+ u8 pinned : 1;
+};
+
+static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
+{
+ return container_of(umem, struct ib_umem_dmabuf, umem);
+}
+
/* Returns the offset of the umem start relative to the first page. */
static inline int ib_umem_offset(struct ib_umem *umem)
{
return umem->address & ~PAGE_MASK;
}
+static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
+ unsigned long pgsz)
+{
+ return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
+ (pgsz - 1);
+}
+
+static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
+ unsigned long pgsz)
+{
+ return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
+ ALIGN_DOWN(umem->iova, pgsz))) /
+ pgsz;
+}
+
static inline size_t ib_umem_num_pages(struct ib_umem *umem)
{
- return (ALIGN(umem->address + umem->length, PAGE_SIZE) -
- ALIGN_DOWN(umem->address, PAGE_SIZE)) >>
- PAGE_SHIFT;
+ return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
+}
+
+static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ struct ib_umem *umem,
+ unsigned long pgsz)
+{
+ __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
+ umem->sgt_append.sgt.nents, pgsz);
+ biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
+ biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
}
+static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
+{
+ return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
+}
+
+/**
+ * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
+ * @umem: umem to iterate over
+ * @pgsz: Page size to split the list into
+ *
+ * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
+ * returned DMA blocks will be aligned to pgsz and span the range:
+ * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
+ *
+ * Performs exactly ib_umem_num_dma_blocks() iterations.
+ */
+#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
+ for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
+ __rdma_umem_block_iter_next(biter);)
+
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access);
void ib_umem_release(struct ib_umem *umem);
-int ib_umem_page_count(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
size_t length);
unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt);
+/**
+ * ib_umem_find_best_pgoff - Find best HW page size
+ *
+ * @umem: umem struct
+ * @pgsz_bitmap bitmap of HW supported page sizes
+ * @pgoff_bitmask: Mask of bits that can be represented with an offset
+ *
+ * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
+ * an IOVA it accepts a bitmask specifying what address bits can be represented
+ * with a page offset.
+ *
+ * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
+ * and can support aligned offsets up to 4032 then pgoff_bitmask would be
+ * "111111000000".
+ *
+ * If the pgoff_bitmask requires either alignment in the low bit or an
+ * unavailable page size for the high bits, this function returns 0.
+ */
+static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ u64 pgoff_bitmask)
+{
+ struct scatterlist *sg = umem->sgt_append.sgt.sgl;
+ dma_addr_t dma_addr;
+
+ dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
+ return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
+ dma_addr & pgoff_bitmask);
+}
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops);
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access);
+int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
+void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
+void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
+
#else /* CONFIG_INFINIBAND_USER_MEM */
#include <linux/err.h>
@@ -60,20 +162,46 @@ static inline struct ib_umem *ib_umem_get(struct ib_device *device,
unsigned long addr, size_t size,
int access)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void ib_umem_release(struct ib_umem *umem) { }
-static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
size_t length) {
- return -EINVAL;
+ return -EOPNOTSUPP;
}
-static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
- unsigned long pgsz_bitmap,
- unsigned long virt) {
- return -EINVAL;
+static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ unsigned long virt)
+{
+ return 0;
}
+static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ u64 pgoff_bitmask)
+{
+ return 0;
+}
+static inline
+struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access,
+ struct dma_buf_attach_ops *ops)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
+ size_t size, int fd, int access)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
+static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
#endif /* CONFIG_INFINIBAND_USER_MEM */
-
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index d16d2c17e733..0844c1d05ac6 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -14,17 +14,13 @@ struct ib_umem_odp {
struct mmu_interval_notifier notifier;
struct pid *tgid;
+ /* An array of the pfns included in the on-demand paging umem. */
+ unsigned long *pfn_list;
+
/*
- * An array of the pages included in the on-demand paging umem.
- * Indices of pages that are currently not mapped into the device will
- * contain NULL.
- */
- struct page **page_list;
- /*
- * An array of the same size as page_list, with DMA addresses mapped
- * for pages the pages in page_list. The lower two bits designate
- * access permissions. See ODP_READ_ALLOWED_BIT and
- * ODP_WRITE_ALLOWED_BIT.
+ * An array with DMA addresses mapped for pfns in pfn_list.
+ * The lower two bits designate access permissions.
+ * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT.
*/
dma_addr_t *dma_list;
/*
@@ -97,9 +93,8 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
const struct mmu_interval_notifier_ops *ops);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
-int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
- u64 bcnt, u64 access_mask,
- unsigned long current_seq);
+int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset,
+ u64 bcnt, u64 access_mask, bool fault);
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bound);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index c0b2fa7e9b95..477bf9dd5e71 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
- * Copyright (c) 2004 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004, 2020 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
@@ -12,6 +12,7 @@
#ifndef IB_VERBS_H
#define IB_VERBS_H
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -49,6 +50,8 @@ struct ib_uqp_object;
struct ib_usrq_object;
struct ib_uwq_object;
struct rdma_cm_id;
+struct ib_port;
+struct hw_stats_device_data;
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
@@ -138,10 +141,9 @@ union ib_gid {
extern union ib_gid zgid;
enum ib_gid_type {
- /* If link layer is Ethernet, this is RoCE V1 */
- IB_GID_TYPE_IB = 0,
- IB_GID_TYPE_ROCE = 0,
- IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
+ IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
+ IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
+ IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
IB_GID_TYPE_SIZE
};
@@ -152,7 +154,7 @@ struct ib_gid_attr {
union ib_gid gid;
enum ib_gid_type gid_type;
u16 index;
- u8 port_num;
+ u32 port_num;
};
enum {
@@ -180,7 +182,7 @@ rdma_node_get_transport(unsigned int node_type);
enum rdma_network_type {
RDMA_NETWORK_IB,
- RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
+ RDMA_NETWORK_ROCE_V1,
RDMA_NETWORK_IPV4,
RDMA_NETWORK_IPV6
};
@@ -190,9 +192,10 @@ static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type net
if (network_type == RDMA_NETWORK_IPV4 ||
network_type == RDMA_NETWORK_IPV6)
return IB_GID_TYPE_ROCE_UDP_ENCAP;
-
- /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
- return IB_GID_TYPE_IB;
+ else if (network_type == RDMA_NETWORK_ROCE_V1)
+ return IB_GID_TYPE_ROCE;
+ else
+ return IB_GID_TYPE_IB;
}
static inline enum rdma_network_type
@@ -201,6 +204,9 @@ rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
if (attr->gid_type == IB_GID_TYPE_IB)
return RDMA_NETWORK_IB;
+ if (attr->gid_type == IB_GID_TYPE_ROCE)
+ return RDMA_NETWORK_ROCE_V1;
+
if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
return RDMA_NETWORK_IPV4;
else
@@ -214,32 +220,24 @@ enum rdma_link_layer {
};
enum ib_device_cap_flags {
- IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
- IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
- IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
- IB_DEVICE_RAW_MULTI = (1 << 3),
- IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
- IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
- IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
- IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
- IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
- /* Not in use, former INIT_TYPE = (1 << 9),*/
- IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
- IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
- IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
- IB_DEVICE_SRQ_RESIZE = (1 << 13),
- IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
-
- /*
- * This device supports a per-device lkey or stag that can be
- * used without performing a memory registration for the local
- * memory. Note that ULPs should never check this flag, but
- * instead of use the local_dma_lkey flag in the ib_pd structure,
- * which will always contain a usable lkey.
- */
- IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
- /* Reserved, old SEND_W_INV = (1 << 16),*/
- IB_DEVICE_MEM_WINDOW = (1 << 17),
+ IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
+ IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
+ IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
+ IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
+ IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
+ IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
+ IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
+ IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
+ IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
+ /* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
+ IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
+ IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
+ IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
+ IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
+ IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
+
+ /* Reserved, old SEND_W_INV = 1 << 16,*/
+ IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
/*
* Devices should set IB_DEVICE_UD_IP_SUM if they support
* insertion of UDP and TCP checksum on outgoing UD IPoIB
@@ -247,9 +245,8 @@ enum ib_device_cap_flags {
* incoming messages. Setting this flag implies that the
* IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
*/
- IB_DEVICE_UD_IP_CSUM = (1 << 18),
- IB_DEVICE_UD_TSO = (1 << 19),
- IB_DEVICE_XRC = (1 << 20),
+ IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
+ IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
/*
* This device supports the IB "base memory management extension",
@@ -260,31 +257,57 @@ enum ib_device_cap_flags {
* IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
* stag.
*/
- IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
- IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
- IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
- IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
- IB_DEVICE_RC_IP_CSUM = (1 << 25),
+ IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
+ IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
+ IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
+ IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
- IB_DEVICE_RAW_IP_CSUM = (1 << 26),
- /*
- * Devices should set IB_DEVICE_CROSS_CHANNEL if they
- * support execution of WQEs that involve synchronization
- * of I/O operations with single completion queue managed
- * by hardware.
- */
- IB_DEVICE_CROSS_CHANNEL = (1 << 27),
- IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
- IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
- IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
- IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
- IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
+ IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
+ IB_DEVICE_MANAGED_FLOW_STEERING =
+ IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
- IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
- IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
+ IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
/* The device supports padding incoming writes to cacheline. */
- IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
- IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
+ IB_DEVICE_PCI_WRITE_END_PADDING =
+ IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
+ /* Placement type attributes */
+ IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL,
+ IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT,
+ IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE,
+};
+
+enum ib_kernel_cap_flags {
+ /*
+ * This device supports a per-device lkey or stag that can be
+ * used without performing a memory registration for the local
+ * memory. Note that ULPs should never check this flag, but
+ * instead of use the local_dma_lkey flag in the ib_pd structure,
+ * which will always contain a usable lkey.
+ */
+ IBK_LOCAL_DMA_LKEY = 1 << 0,
+ /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
+ IBK_INTEGRITY_HANDOVER = 1 << 1,
+ /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
+ IBK_ON_DEMAND_PAGING = 1 << 2,
+ /* IB_MR_TYPE_SG_GAPS is supported */
+ IBK_SG_GAPS_REG = 1 << 3,
+ /* Driver supports RDMA_NLDEV_CMD_DELLINK */
+ IBK_ALLOW_USER_UNREG = 1 << 4,
+
+ /* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
+ IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
+ /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
+ IBK_UD_TSO = 1 << 6,
+ /* iopib will use the device ops:
+ * get_vf_config
+ * get_vf_guid
+ * get_vf_stats
+ * set_vf_guid
+ * set_vf_link_state
+ */
+ IBK_VIRTUAL_FUNCTION = 1 << 7,
+ /* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
+ IBK_RDMA_NETDEV_OPA = 1 << 8,
};
enum ib_atomic_cap {
@@ -383,6 +406,7 @@ struct ib_device_attr {
int max_qp;
int max_qp_wr;
u64 device_cap_flags;
+ u64 kernel_cap_flags;
int max_send_sge;
int max_recv_sge;
int max_sge_rd;
@@ -535,21 +559,41 @@ enum ib_port_speed {
IB_SPEED_FDR10 = 8,
IB_SPEED_FDR = 16,
IB_SPEED_EDR = 32,
- IB_SPEED_HDR = 64
+ IB_SPEED_HDR = 64,
+ IB_SPEED_NDR = 128,
+ IB_SPEED_XDR = 256,
+};
+
+enum ib_stat_flag {
+ IB_STAT_FLAG_OPTIONAL = 1 << 0,
+};
+
+/**
+ * struct rdma_stat_desc
+ * @name - The name of the counter
+ * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
+ * @priv - Driver private information; Core code should not use
+ */
+struct rdma_stat_desc {
+ const char *name;
+ unsigned int flags;
+ const void *priv;
};
/**
* struct rdma_hw_stats
* @lock - Mutex to protect parallel write access to lifespan and values
- * of counters, which are 64bits and not guaranteeed to be written
+ * of counters, which are 64bits and not guaranteed to be written
* atomicaly on 32bits systems.
* @timestamp - Used by the core code to track when the last update was
* @lifespan - Used by the core code to determine how old the counters
* should be before being updated again. Stored in jiffies, defaults
* to 10 milliseconds, drivers can override the default be specifying
* their own value during their allocation routine.
- * @name - Array of pointers to static names used for the counters in
- * directory.
+ * @descs - Array of pointers to static descriptors used for the counters
+ * in directory.
+ * @is_disabled - A bitmap to indicate each counter is currently disabled
+ * or not.
* @num_counters - How many hardware counters there are. If name is
* shorter than this number, a kernel oops will result. Driver authors
* are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
@@ -561,36 +605,19 @@ struct rdma_hw_stats {
struct mutex lock; /* Protect lifespan and values[] */
unsigned long timestamp;
unsigned long lifespan;
- const char * const *names;
+ const struct rdma_stat_desc *descs;
+ unsigned long *is_disabled;
int num_counters;
- u64 value[];
+ u64 value[] __counted_by(num_counters);
};
#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
-/**
- * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
- * for drivers.
- * @names - Array of static const char *
- * @num_counters - How many elements in array
- * @lifespan - How many milliseconds between updates
- */
-static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
- const char * const *names, int num_counters,
- unsigned long lifespan)
-{
- struct rdma_hw_stats *stats;
-
- stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
- GFP_KERNEL);
- if (!stats)
- return NULL;
- stats->names = names;
- stats->num_counters = num_counters;
- stats->lifespan = msecs_to_jiffies(lifespan);
- return stats;
-}
+struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
+ const struct rdma_stat_desc *descs, int num_counters,
+ unsigned long lifespan);
+void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
/* Define bits for the various functionality this port needs to be supported by
* the core.
@@ -669,7 +696,7 @@ struct ib_port_attr {
u8 subnet_timeout;
u8 init_type_reply;
u8 active_width;
- u8 active_speed;
+ u16 active_speed;
u8 phys_state;
u16 port_cap_flags2;
};
@@ -731,7 +758,7 @@ struct ib_event {
struct ib_qp *qp;
struct ib_srq *srq;
struct ib_wq *wq;
- u8 port_num;
+ u32 port_num;
} element;
enum ib_event_type event;
};
@@ -814,6 +841,7 @@ enum ib_rate {
IB_RATE_50_GBPS = 20,
IB_RATE_400_GBPS = 21,
IB_RATE_600_GBPS = 22,
+ IB_RATE_800_GBPS = 23,
};
/**
@@ -914,7 +942,7 @@ struct rdma_ah_attr {
struct ib_global_route grh;
u8 sl;
u8 static_rate;
- u8 port_num;
+ u32 port_num;
u8 ah_flags;
enum rdma_ah_attr_type type;
union {
@@ -952,16 +980,19 @@ enum ib_wc_status {
const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
enum ib_wc_opcode {
- IB_WC_SEND,
- IB_WC_RDMA_WRITE,
- IB_WC_RDMA_READ,
- IB_WC_COMP_SWAP,
- IB_WC_FETCH_ADD,
- IB_WC_LSO,
- IB_WC_LOCAL_INV,
+ IB_WC_SEND = IB_UVERBS_WC_SEND,
+ IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
+ IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
+ IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
+ IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
+ IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
+ IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
+ IB_WC_LSO = IB_UVERBS_WC_TSO,
+ IB_WC_ATOMIC_WRITE = IB_UVERBS_WC_ATOMIC_WRITE,
IB_WC_REG_MR,
IB_WC_MASKED_COMP_SWAP,
IB_WC_MASKED_FETCH_ADD,
+ IB_WC_FLUSH = IB_UVERBS_WC_FLUSH,
/*
* Set value of IB_WC_RECV so consumers can test if a completion is a
* receive by testing (opcode & IB_WC_RECV).
@@ -1000,7 +1031,7 @@ struct ib_wc {
u16 pkey_index;
u8 sl;
u8 dlid_path_bits;
- u8 port_num; /* valid only for DR SMPs on switches */
+ u32 port_num; /* valid only for DR SMPs on switches */
u8 smac[ETH_ALEN];
u16 vlan_id;
u8 network_hdr_type;
@@ -1065,7 +1096,7 @@ struct ib_qp_cap {
/*
* Maximum number of rdma_rw_ctx structures in flight at a time.
- * ib_create_qp() will calculate the right amount of neededed WRs
+ * ib_create_qp() will calculate the right amount of needed WRs
* and MRs based on this.
*/
u32 max_rdma_ctxs;
@@ -1139,7 +1170,7 @@ enum ib_qp_create_flags {
*/
struct ib_qp_init_attr {
- /* Consumer's event_handler callback must not block */
+ /* This callback occurs in workqueue context */
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
@@ -1155,7 +1186,7 @@ struct ib_qp_init_attr {
/*
* Only needed for special QP types, or when using the RW API.
*/
- u8 port_num;
+ u32 port_num;
struct ib_rwq_ind_table *rwq_ind_tbl;
u32 source_qpn;
};
@@ -1229,6 +1260,8 @@ enum ib_qp_attr_mask {
IB_QP_RESERVED3 = (1<<23),
IB_QP_RESERVED4 = (1<<24),
IB_QP_RATE_LIMIT = (1<<25),
+
+ IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
};
enum ib_qp_state {
@@ -1272,11 +1305,11 @@ struct ib_qp_attr {
u8 max_rd_atomic;
u8 max_dest_rd_atomic;
u8 min_rnr_timer;
- u8 port_num;
+ u32 port_num;
u8 timeout;
u8 retry_cnt;
u8 rnr_retry;
- u8 alt_port_num;
+ u32 alt_port_num;
u8 alt_timeout;
u32 rate_limit;
struct net_device *xmit_slave;
@@ -1291,6 +1324,7 @@ enum ib_wr_opcode {
IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
+ IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
IB_WR_LSO = IB_UVERBS_WR_TSO,
IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
@@ -1299,6 +1333,8 @@ enum ib_wr_opcode {
IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+ IB_WR_FLUSH = IB_UVERBS_WR_FLUSH,
+ IB_WR_ATOMIC_WRITE = IB_UVERBS_WR_ATOMIC_WRITE,
/* These are kernel only and can not be issued by userspace */
IB_WR_REG_MR = 0x20,
@@ -1392,7 +1428,7 @@ struct ib_ud_wr {
u32 remote_qpn;
u32 remote_qkey;
u16 pkey_index; /* valid for GSI only */
- u8 port_num; /* valid for DR SMPs on switch only */
+ u32 port_num; /* valid for DR SMPs on switch only */
};
static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
@@ -1432,10 +1468,12 @@ enum ib_access_flags {
IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
+ IB_ACCESS_FLUSH_GLOBAL = IB_UVERBS_ACCESS_FLUSH_GLOBAL,
+ IB_ACCESS_FLUSH_PERSISTENT = IB_UVERBS_ACCESS_FLUSH_PERSISTENT,
IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
IB_ACCESS_SUPPORTED =
- ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
+ ((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL,
};
/*
@@ -1463,11 +1501,8 @@ enum rdma_remove_reason {
RDMA_REMOVE_DRIVER_REMOVE,
/* uobj is being cleaned-up before being committed */
RDMA_REMOVE_ABORT,
- /*
- * uobj has been fully created, with the uobj->object set, but is being
- * cleaned up before being comitted
- */
- RDMA_REMOVE_ABORT_HWOBJ,
+ /* The driver failed to destroy the uobject and is being disconnected */
+ RDMA_REMOVE_DRIVER_FAILURE,
};
struct ib_rdmacg_object {
@@ -1479,14 +1514,6 @@ struct ib_rdmacg_object {
struct ib_ucontext {
struct ib_device *device;
struct ib_uverbs_file *ufile;
- /*
- * 'closing' can be read by the driver only during a destroy callback,
- * it is set when we are closing the file descriptor and indicates
- * that mm_sem may be locked.
- */
- bool closing;
-
- bool cleanup_retryable;
struct ib_rdmacg_object cg_obj;
/*
@@ -1612,22 +1639,31 @@ struct ib_srq {
} xrc;
};
} ext;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
enum ib_raw_packet_caps {
- /* Strip cvlan from incoming packet and report it in the matching work
+ /*
+ * Strip cvlan from incoming packet and report it in the matching work
* completion is supported.
*/
- IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
- /* Scatter FCS field of an incoming packet to host memory is supported.
+ IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
+ IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
+ /*
+ * Scatter FCS field of an incoming packet to host memory is supported.
*/
- IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
+ IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
/* Checksum offloads are supported (for both send and receive). */
- IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
- /* When a packet is received for an RQ with no receive WQEs, the
+ IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
+ /*
+ * When a packet is received for an RQ with no receive WQEs, the
* packet processing is delayed.
*/
- IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
+ IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
};
enum ib_wq_type {
@@ -1710,7 +1746,7 @@ struct ib_qp_security;
struct ib_port_pkey {
enum port_pkey_state state;
u16 pkey_index;
- u8 port_num;
+ u32 port_num;
struct list_head qp_list;
struct list_head to_error_list;
struct ib_qp_security *sec;
@@ -1771,7 +1807,7 @@ struct ib_qp {
enum ib_qp_type qp_type;
struct ib_rwq_ind_table *rwq_ind_tbl;
struct ib_qp_security *qp_sec;
- u8 port;
+ u32 port;
bool integrity_en;
/*
@@ -1863,17 +1899,6 @@ enum ib_flow_spec_type {
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
-/* Flow steering rule priority is set according to it's domain.
- * Lower domain value means higher priority.
- */
-enum ib_flow_domain {
- IB_FLOW_DOMAIN_USER,
- IB_FLOW_DOMAIN_ETHTOOL,
- IB_FLOW_DOMAIN_RFS,
- IB_FLOW_DOMAIN_NIC,
- IB_FLOW_DOMAIN_NUM /* Must be last */
-};
-
enum ib_flow_flags {
IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
@@ -1885,8 +1910,6 @@ struct ib_flow_eth_filter {
u8 src_mac[6];
__be16 ether_type;
__be16 vlan_tag;
- /* Must be last */
- u8 real_sz[];
};
struct ib_flow_spec_eth {
@@ -1899,8 +1922,6 @@ struct ib_flow_spec_eth {
struct ib_flow_ib_filter {
__be16 dlid;
__u8 sl;
- /* Must be last */
- u8 real_sz[];
};
struct ib_flow_spec_ib {
@@ -1924,8 +1945,6 @@ struct ib_flow_ipv4_filter {
u8 tos;
u8 ttl;
u8 flags;
- /* Must be last */
- u8 real_sz[];
};
struct ib_flow_spec_ipv4 {
@@ -1942,9 +1961,7 @@ struct ib_flow_ipv6_filter {
u8 next_hdr;
u8 traffic_class;
u8 hop_limit;
- /* Must be last */
- u8 real_sz[];
-};
+} __packed;
struct ib_flow_spec_ipv6 {
u32 type;
@@ -1956,8 +1973,6 @@ struct ib_flow_spec_ipv6 {
struct ib_flow_tcp_udp_filter {
__be16 dst_port;
__be16 src_port;
- /* Must be last */
- u8 real_sz[];
};
struct ib_flow_spec_tcp_udp {
@@ -1969,7 +1984,6 @@ struct ib_flow_spec_tcp_udp {
struct ib_flow_tunnel_filter {
__be32 tunnel_id;
- u8 real_sz[];
};
/* ib_flow_spec_tunnel describes the Vxlan tunnel
@@ -1985,8 +1999,6 @@ struct ib_flow_spec_tunnel {
struct ib_flow_esp_filter {
__be32 spi;
__be32 seq;
- /* Must be last */
- u8 real_sz[];
};
struct ib_flow_spec_esp {
@@ -2000,8 +2012,6 @@ struct ib_flow_gre_filter {
__be16 c_ks_res0_ver;
__be16 protocol;
__be32 key;
- /* Must be last */
- u8 real_sz[];
};
struct ib_flow_spec_gre {
@@ -2013,8 +2023,6 @@ struct ib_flow_spec_gre {
struct ib_flow_mpls_filter {
__be32 tag;
- /* Must be last */
- u8 real_sz[];
};
struct ib_flow_spec_mpls {
@@ -2078,7 +2086,7 @@ struct ib_flow_attr {
u16 priority;
u32 flags;
u8 num_of_specs;
- u8 port;
+ u32 port;
union ib_flow_spec flows[];
};
@@ -2147,7 +2155,6 @@ struct ib_flow_action {
};
struct ib_mad;
-struct ib_grh;
enum ib_process_mad_flags {
IB_MAD_IGNORE_MKEY = 1,
@@ -2183,15 +2190,18 @@ struct ib_port_data {
struct ib_port_immutable immutable;
spinlock_t pkey_list_lock;
+
+ spinlock_t netdev_lock;
+
struct list_head pkey_list;
struct ib_port_cache cache;
- spinlock_t netdev_lock;
struct net_device __rcu *netdev;
+ netdevice_tracker netdev_tracker;
struct hlist_node ndev_hash_link;
struct rdma_port_counter port_counter;
- struct rdma_hw_stats *hw_stats;
+ struct ib_port *sysfs;
};
/* rdma netdev type - specifies protocol type */
@@ -2207,7 +2217,7 @@ enum rdma_netdev_t {
struct rdma_netdev {
void *clnt_priv;
struct ib_device *hca;
- u8 port_num;
+ u32 port_num;
int mtu;
/*
@@ -2228,6 +2238,8 @@ struct rdma_netdev {
int set_qkey, u32 qkey);
int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
union ib_gid *gid, u16 mlid);
+ /* timeout */
+ void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
};
struct rdma_netdev_alloc_params {
@@ -2236,7 +2248,7 @@ struct rdma_netdev_alloc_params {
unsigned int rxqs;
void *param;
- int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
+ int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
struct net_device *netdev, void *param);
};
@@ -2271,8 +2283,13 @@ struct iw_cm_conn_param;
!__same_type(((struct drv_struct *)NULL)->member, \
struct ib_struct)))
-#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
- ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
+#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
+ ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
+ gfp, false))
+
+#define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \
+ ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
+ GFP_KERNEL, true))
#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
@@ -2305,6 +2322,14 @@ struct ib_device_ops {
u32 uverbs_abi_ver;
unsigned int uverbs_no_driver_id_binding:1;
+ /*
+ * NOTE: New drivers should not make use of device_group; instead new
+ * device parameter should be exposed via netlink command. This
+ * mechanism exists only for existing drivers.
+ */
+ const struct attribute_group *device_group;
+ const struct attribute_group **port_groups;
+
int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
const struct ib_send_wr **bad_send_wr);
int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
@@ -2314,12 +2339,11 @@ struct ib_device_ops {
int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
- int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
int (*post_srq_recv)(struct ib_srq *srq,
const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int (*process_mad)(struct ib_device *device, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad,
size_t *out_mad_size, u16 *out_mad_pkey_index);
@@ -2331,9 +2355,9 @@ struct ib_device_ops {
void (*get_dev_fw_str)(struct ib_device *device, char *str);
const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
int comp_vector);
- int (*query_port)(struct ib_device *device, u8 port_num,
+ int (*query_port)(struct ib_device *device, u32 port_num,
struct ib_port_attr *port_attr);
- int (*modify_port)(struct ib_device *device, u8 port_num,
+ int (*modify_port)(struct ib_device *device, u32 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
/**
@@ -2342,10 +2366,10 @@ struct ib_device_ops {
* structure to avoid cache line misses when accessing struct ib_device
* in fast paths.
*/
- int (*get_port_immutable)(struct ib_device *device, u8 port_num,
+ int (*get_port_immutable)(struct ib_device *device, u32 port_num,
struct ib_port_immutable *immutable);
enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
/**
* When calling get_netdev, the HW vendor's driver should return the
* net device of device @device at port @port_num or NULL if such
@@ -2354,7 +2378,8 @@ struct ib_device_ops {
* that this function returns NULL before the net device has finished
* NETDEV_UNREGISTER state.
*/
- struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
+ struct net_device *(*get_netdev)(struct ib_device *device,
+ u32 port_num);
/**
* rdma netdev operation
*
@@ -2362,11 +2387,11 @@ struct ib_device_ops {
* must return -EOPNOTSUPP if it doesn't support the specified type.
*/
struct net_device *(*alloc_rdma_netdev)(
- struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
+ struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
const char *name, unsigned char name_assign_type,
void (*setup)(struct net_device *));
- int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
+ int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params);
/**
@@ -2374,7 +2399,7 @@ struct ib_device_ops {
* link layer is either IB or iWarp. It is no-op if @port_num port
* is RoCE link layer.
*/
- int (*query_gid)(struct ib_device *device, u8 port_num, int index,
+ int (*query_gid)(struct ib_device *device, u32 port_num, int index,
union ib_gid *gid);
/**
* When calling add_gid, the HW vendor's driver should add the gid
@@ -2399,7 +2424,7 @@ struct ib_device_ops {
* This function is only called when roce_gid_table is used.
*/
int (*del_gid)(const struct ib_gid_attr *attr, void **context);
- int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
+ int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
u16 *pkey);
int (*alloc_ucontext)(struct ib_ucontext *context,
struct ib_udata *udata);
@@ -2414,12 +2439,14 @@ struct ib_device_ops {
void (*mmap_free)(struct rdma_user_mmap_entry *entry);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
- void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
+ int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
struct ib_udata *udata);
+ int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata);
int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
- void (*destroy_ah)(struct ib_ah *ah, u32 flags);
+ int (*destroy_ah)(struct ib_ah *ah, u32 flags);
int (*create_srq)(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
@@ -2427,10 +2454,9 @@ struct ib_device_ops {
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
- void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
- struct ib_qp *(*create_qp)(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata);
+ int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
+ int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata);
int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
@@ -2439,15 +2465,20 @@ struct ib_device_ops {
int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
- void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
+ int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
- int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_pd *pd, struct ib_udata *udata);
+ struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr, int fd,
+ int mr_access_flags,
+ struct ib_udata *udata);
+ struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
@@ -2458,51 +2489,49 @@ struct ib_device_ops {
enum ib_uverbs_advise_mr_advice advice, u32 flags,
struct ib_sge *sg_list, u32 num_sge,
struct uverbs_attr_bundle *attrs);
+
+ /*
+ * Kernel users should universally support relaxed ordering (RO), as
+ * they are designed to read data only after observing the CQE and use
+ * the DMA API correctly.
+ *
+ * Some drivers implicitly enable RO if platform supports it.
+ */
int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
- struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata);
+ int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
int (*dealloc_mw)(struct ib_mw *mw);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
- void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
+ int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
struct ib_flow *(*create_flow)(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
- int domain, struct ib_udata *udata);
+ struct ib_udata *udata);
int (*destroy_flow)(struct ib_flow *flow_id);
- struct ib_flow_action *(*create_flow_action_esp)(
- struct ib_device *device,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs);
int (*destroy_flow_action)(struct ib_flow_action *action);
- int (*modify_flow_action_esp)(
- struct ib_flow_action *action,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs);
- int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
+ int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
int state);
- int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
+ int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *ivf);
- int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
+ int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
struct ifla_vf_stats *stats);
- int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
+ int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
- int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
+ int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
int type);
struct ib_wq *(*create_wq)(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
- void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
+ int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask, struct ib_udata *udata);
- struct ib_rwq_ind_table *(*create_rwq_ind_table)(
- struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata);
+ int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
struct ib_dm *(*alloc_dm)(struct ib_device *device,
struct ib_ucontext *context,
@@ -2514,7 +2543,7 @@ struct ib_device_ops {
struct uverbs_attr_bundle *attrs);
int (*create_counters)(struct ib_counters *counters,
struct uverbs_attr_bundle *attrs);
- void (*destroy_counters)(struct ib_counters *counters);
+ int (*destroy_counters)(struct ib_counters *counters);
int (*read_counters)(struct ib_counters *counters,
struct ib_counters_read_attr *counters_read_attr,
struct uverbs_attr_bundle *attrs);
@@ -2524,13 +2553,14 @@ struct ib_device_ops {
unsigned int *meta_sg_offset);
/**
- * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
- * driver initialized data. The struct is kfree()'ed by the sysfs
- * core when the device is removed. A lifespan of -1 in the return
- * struct tells the core to set a default lifespan.
+ * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
+ * fill in the driver initialized data. The struct is kfree()'ed by
+ * the sysfs core when the device is removed. A lifespan of -1 in the
+ * return struct tells the core to set a default lifespan.
*/
- struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
- u8 port_num);
+ struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
+ struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
+ u32 port_num);
/**
* get_hw_stats - Fill in the counter value(s) in the stats struct.
* @index - The index in the value array we wish to have updated, or
@@ -2544,13 +2574,15 @@ struct ib_device_ops {
* one given in index at their option
*/
int (*get_hw_stats)(struct ib_device *device,
- struct rdma_hw_stats *stats, u8 port, int index);
- /*
- * This function is called once for each port when a ib device is
- * registered.
+ struct rdma_hw_stats *stats, u32 port, int index);
+
+ /**
+ * modify_hw_stat - Modify the counter configuration
+ * @enable: true/false when enable/disable a counter
+ * Return codes - 0 on success or error code otherwise.
*/
- int (*init_port)(struct ib_device *device, u8 port_num,
- struct kobject *port_sysfs);
+ int (*modify_hw_stat)(struct ib_device *device, u32 port,
+ unsigned int counter_index, bool enable);
/**
* Allows rdma drivers to add their own restrack attributes.
*/
@@ -2561,6 +2593,8 @@ struct ib_device_ops {
int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
+ int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq);
+ int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq);
/* Device lifecycle callbacks */
/*
@@ -2621,10 +2655,19 @@ struct ib_device_ops {
int (*query_ucontext)(struct ib_ucontext *context,
struct uverbs_attr_bundle *attrs);
+ /*
+ * Provide NUMA node. This API exists for rdmavt/hfi1 only.
+ * Everyone else relies on Linux memory management model.
+ */
+ int (*get_numa_node)(struct ib_device *dev);
+
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
+ DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
+ DECLARE_RDMA_OBJ_SIZE(ib_qp);
+ DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
@@ -2674,14 +2717,14 @@ struct ib_device {
struct ib_core_device coredev;
};
- /* First group for device attributes,
- * Second group for driver provided attributes (optional).
- * It is NULL terminated array.
+ /* First group is for device attributes,
+ * Second group is for driver provided attributes (optional).
+ * Third group is for the hw_stats
+ * It is a NULL terminated array.
*/
- const struct attribute_group *groups[3];
+ const struct attribute_group *groups[4];
u64 uverbs_cmd_mask;
- u64 uverbs_ex_cmd_mask;
char node_desc[IB_DEVICE_NODE_DESC_MAX];
__be64 node_guid;
@@ -2692,10 +2735,9 @@ struct ib_device {
/* CQ adaptive moderation (RDMA DIM) */
u16 use_cq_dim:1;
u8 node_type;
- u8 phys_port_cnt;
+ u32 phys_port_cnt;
struct ib_device_attr attrs;
- struct attribute_group *hw_stats_ag;
- struct rdma_hw_stats *hw_stats;
+ struct hw_stats_device_data *hw_stats_data;
#ifdef CONFIG_CGROUP_RDMA
struct rdmacg_device cg_device;
@@ -2731,6 +2773,15 @@ struct ib_device {
u32 lag_flags;
};
+static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
+ gfp_t gfp, bool is_numa_aware)
+{
+ if (is_numa_aware && dev->ops.get_numa_node)
+ return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
+
+ return kzalloc(size, gfp);
+}
+
struct ib_client_nl_info;
struct ib_client {
const char *name;
@@ -2758,7 +2809,7 @@ struct ib_client {
* netdev. */
struct net_device *(*get_net_dev_by_params)(
struct ib_device *dev,
- u8 port,
+ u32 port,
u16 pkey,
const union ib_gid *gid,
const struct sockaddr *addr,
@@ -2782,6 +2833,7 @@ struct ib_block_iter {
/* internal states */
struct scatterlist *__sg; /* sg holding the current aligned block */
dma_addr_t __dma_addr; /* unaligned DMA address of this block */
+ size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */
unsigned int __sg_nents; /* number of SG entries */
unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
unsigned int __pg_bit; /* alignment of current block */
@@ -2798,7 +2850,8 @@ void ib_dealloc_device(struct ib_device *device);
void ib_get_device_fw_str(struct ib_device *device, char *str);
-int ib_register_device(struct ib_device *device, const char *name);
+int ib_register_device(struct ib_device *device, const char *name,
+ struct device *dma_device);
void ib_unregister_device(struct ib_device *device);
void ib_unregister_driver(enum rdma_driver_id driver_id);
void ib_unregister_device_and_put(struct ib_device *device);
@@ -2870,6 +2923,15 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
size_t length, u32 min_pgoff,
u32 max_pgoff);
+static inline int
+rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length, u32 pgoff)
+{
+ return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
+ pgoff);
+}
+
struct rdma_user_mmap_entry *
rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
unsigned long pgoff);
@@ -2916,46 +2978,6 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
}
/**
- * ib_is_destroy_retryable - Check whether the uobject destruction
- * is retryable.
- * @ret: The initial destruction return code
- * @why: remove reason
- * @uobj: The uobject that is destroyed
- *
- * This function is a helper function that IB layer and low-level drivers
- * can use to consider whether the destruction of the given uobject is
- * retry-able.
- * It checks the original return code, if it wasn't success the destruction
- * is retryable according to the ucontext state (i.e. cleanup_retryable) and
- * the remove reason. (i.e. why).
- * Must be called with the object locked for destroy.
- */
-static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
- struct ib_uobject *uobj)
-{
- return ret && (why == RDMA_REMOVE_DESTROY ||
- uobj->context->cleanup_retryable);
-}
-
-/**
- * ib_destroy_usecnt - Called during destruction to check the usecnt
- * @usecnt: The usecnt atomic
- * @why: remove reason
- * @uobj: The uobject that is destroyed
- *
- * Non-zero usecnts will block destruction unless destruction was triggered by
- * a ucontext cleanup.
- */
-static inline int ib_destroy_usecnt(atomic_t *usecnt,
- enum rdma_remove_reason why,
- struct ib_uobject *uobj)
-{
- if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
- return -EBUSY;
- return 0;
-}
-
-/**
* ib_modify_qp_is_ok - Check that the supplied attribute mask
* contains all required attributes and no attributes not allowed for
* the given QP state transition.
@@ -2978,10 +3000,10 @@ void ib_unregister_event_handler(struct ib_event_handler *event_handler);
void ib_dispatch_event(const struct ib_event *event);
int ib_query_port(struct ib_device *device,
- u8 port_num, struct ib_port_attr *port_attr);
+ u32 port_num, struct ib_port_attr *port_attr);
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
/**
* rdma_cap_ib_switch - Check if the device is IB switch
@@ -3005,7 +3027,7 @@ static inline bool rdma_cap_ib_switch(const struct ib_device *device)
*
* Return start port number
*/
-static inline u8 rdma_start_port(const struct ib_device *device)
+static inline u32 rdma_start_port(const struct ib_device *device)
{
return rdma_cap_ib_switch(device) ? 0 : 1;
}
@@ -3016,9 +3038,10 @@ static inline u8 rdma_start_port(const struct ib_device *device)
* @iter - The unsigned int to store the port number
*/
#define rdma_for_each_port(device, iter) \
- for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
- unsigned int, iter))); \
- iter <= rdma_end_port(device); (iter)++)
+ for (iter = rdma_start_port(device + \
+ BUILD_BUG_ON_ZERO(!__same_type(u32, \
+ iter))); \
+ iter <= rdma_end_port(device); iter++)
/**
* rdma_end_port - Return the last valid port number for the device
@@ -3028,7 +3051,7 @@ static inline u8 rdma_start_port(const struct ib_device *device)
*
* Return last port number
*/
-static inline u8 rdma_end_port(const struct ib_device *device)
+static inline u32 rdma_end_port(const struct ib_device *device)
{
return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
}
@@ -3041,55 +3064,63 @@ static inline int rdma_is_port_valid(const struct ib_device *device,
}
static inline bool rdma_is_grh_required(const struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_PORT_IB_GRH_REQUIRED;
}
-static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_ib(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_IB;
}
-static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
(RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
}
-static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
}
-static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_ROCE;
}
-static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_iwarp(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_IWARP;
}
-static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
+static inline bool rdma_ib_or_roce(const struct ib_device *device,
+ u32 port_num)
{
return rdma_protocol_ib(device, port_num) ||
rdma_protocol_roce(device, port_num);
}
-static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_RAW_PACKET;
}
-static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_usnic(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_USNIC;
@@ -3107,7 +3138,7 @@ static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_n
*
* Return: true if the port supports sending/receiving of MAD packets.
*/
-static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_MAD;
@@ -3132,7 +3163,7 @@ static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
*
* Return: true if the port supports OPA MAD packet formats.
*/
-static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_OPA_MAD;
@@ -3158,7 +3189,7 @@ static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
*
* Return: true if the port provides an SMI.
*/
-static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_SMI;
@@ -3179,7 +3210,7 @@ static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
* Return: true if the port supports an IB CM (this does not guarantee that
* a CM is actually running however).
*/
-static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_CM;
@@ -3197,7 +3228,7 @@ static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
* Return: true if the port supports an iWARP CM (this does not guarantee that
* a CM is actually running however).
*/
-static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IW_CM;
@@ -3218,7 +3249,7 @@ static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
* Administration interface. This does not imply that the SA service is
* running locally.
*/
-static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_SA;
@@ -3241,7 +3272,8 @@ static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
* overhead of registering/unregistering with the SM and tracking of the
* total number of queue pairs attached to the multicast group.
*/
-static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
+ u32 port_num)
{
return rdma_cap_ib_sa(device, port_num);
}
@@ -3259,7 +3291,7 @@ static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num
* Return: true if the port uses a GID address to identify devices on the
* network.
*/
-static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_AF_IB;
@@ -3281,7 +3313,7 @@ static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
* addition of a Global Route Header built from our Ethernet Address
* Handle into our header list for connectionless packets.
*/
-static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_ETH_AH;
@@ -3296,7 +3328,7 @@ static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
* Return: true if we are running on an OPA device which supports
* the extended OPA addressing.
*/
-static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
{
return (device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
@@ -3314,7 +3346,8 @@ static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
* Return the max MAD size required by the Port. Will return 0 if the port
* does not support MADs
*/
-static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
+static inline size_t rdma_max_mad_size(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.max_mad_size;
}
@@ -3333,7 +3366,7 @@ static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_n
* its GIDs.
*/
static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return rdma_protocol_roce(device, port_num) &&
device->ops.add_gid && device->ops.del_gid;
@@ -3352,30 +3385,6 @@ static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
}
/**
- * rdma_find_pg_bit - Find page bit given address and HW supported page sizes
- *
- * @addr: address
- * @pgsz_bitmap: bitmap of HW supported page sizes
- */
-static inline unsigned int rdma_find_pg_bit(unsigned long addr,
- unsigned long pgsz_bitmap)
-{
- unsigned long align;
- unsigned long pgsz;
-
- align = addr & -addr;
-
- /* Find page bit such that addr is aligned to the highest supported
- * HW page size
- */
- pgsz = pgsz_bitmap & ~(-align << 1);
- if (!pgsz)
- return __ffs(pgsz_bitmap);
-
- return __fls(pgsz);
-}
-
-/**
* rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
* @device: Device
* @port_num: 1 based Port number
@@ -3398,7 +3407,7 @@ static inline bool rdma_core_cap_opa_port(struct ib_device *device,
* Return the MTU size supported by the port as an integer value. Will return
* -1 if enum value of mtu is not supported.
*/
-static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
+static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
int mtu)
{
if (rdma_core_cap_opa_port(device, port))
@@ -3415,7 +3424,7 @@ static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
*
* Return the MTU size supported by the port as an integer value.
*/
-static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
+static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
struct ib_port_attr *attr)
{
if (rdma_core_cap_opa_port(device, port))
@@ -3424,34 +3433,34 @@ static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
return ib_mtu_enum_to_int(attr->max_mtu);
}
-int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
int state);
-int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *info);
-int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
struct ifla_vf_stats *stats);
-int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
-int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
int type);
int ib_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey);
+ u32 port_num, u16 index, u16 *pkey);
int ib_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify);
int ib_modify_port(struct ib_device *device,
- u8 port_num, int port_modify_mask,
+ u32 port_num, int port_modify_mask,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- u8 *port_num, u16 *index);
+ u32 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
- u8 port_num, u16 pkey, u16 *index);
+ u32 port_num, u16 pkey, u16 *index);
enum ib_pd_flags {
/*
@@ -3469,15 +3478,21 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller);
+/**
+ * ib_alloc_pd - Allocates an unused protection domain.
+ * @device: The device on which to allocate the protection domain.
+ * @flags: protection domain flags
+ *
+ * A protection domain object provides an association between QPs, shared
+ * receive queues, address handles, memory regions, and memory windows.
+ *
+ * Every PD has a local_dma_lkey which can be used as the lkey value for local
+ * memory operations.
+ */
#define ib_alloc_pd(device, flags) \
__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
-/**
- * ib_dealloc_pd_user - Deallocate kernel/user PD
- * @pd: The protection domain
- * @udata: Valid user data or NULL for kernel objects
- */
-void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
+int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
/**
* ib_dealloc_pd - Deallocate kernel PD
@@ -3487,7 +3502,9 @@ void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
*/
static inline void ib_dealloc_pd(struct ib_pd *pd)
{
- ib_dealloc_pd_user(pd, NULL);
+ int ret = ib_dealloc_pd_user(pd, NULL);
+
+ WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
}
enum rdma_create_ah_flags {
@@ -3558,7 +3575,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
* attributes which are initialized using ib_init_ah_attr_from_wc().
*
*/
-int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr);
@@ -3575,7 +3592,7 @@ int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
* in all UD QP post sends.
*/
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
- const struct ib_grh *grh, u8 port_num);
+ const struct ib_grh *grh, u32 port_num);
/**
* rdma_modify_ah - Modifies the address vector associated with an address
@@ -3615,9 +3632,11 @@ int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
*
* NOTE: for user ah use rdma_destroy_ah_user with valid udata!
*/
-static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
+static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
{
- return rdma_destroy_ah_user(ah, flags, NULL);
+ int ret = rdma_destroy_ah_user(ah, flags, NULL);
+
+ WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
}
struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
@@ -3671,9 +3690,11 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
*
* NOTE: for user srq use ib_destroy_srq_user with valid udata!
*/
-static inline int ib_destroy_srq(struct ib_srq *srq)
+static inline void ib_destroy_srq(struct ib_srq *srq)
{
- return ib_destroy_srq_user(srq, NULL);
+ int ret = ib_destroy_srq_user(srq, NULL);
+
+ WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
}
/**
@@ -3693,8 +3714,22 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
bad_recv_wr ? : &dummy);
}
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr);
+struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ const char *caller);
+/**
+ * ib_create_qp - Creates a kernel QP associated with the specific protection
+ * domain.
+ * @pd: The protection domain associated with the QP.
+ * @init_attr: A list of initial attributes required to create the
+ * QP. If QP creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created QP.
+ */
+static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+{
+ return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
+}
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
@@ -3817,46 +3852,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
}
-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector,
- enum ib_poll_context poll_ctx,
- const char *caller, struct ib_udata *udata);
-
-/**
- * ib_alloc_cq_user: Allocate kernel/user CQ
- * @dev: The IB device
- * @private: Private data attached to the CQE
- * @nr_cqe: Number of CQEs in the CQ
- * @comp_vector: Completion vector used for the IRQs
- * @poll_ctx: Context used for polling the CQ
- * @udata: Valid user data or NULL for kernel objects
- */
-static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
- void *private, int nr_cqe,
- int comp_vector,
- enum ib_poll_context poll_ctx,
- struct ib_udata *udata)
-{
- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
- KBUILD_MODNAME, udata);
-}
-
-/**
- * ib_alloc_cq: Allocate kernel CQ
- * @dev: The IB device
- * @private: Private data attached to the CQE
- * @nr_cqe: Number of CQEs in the CQ
- * @comp_vector: Completion vector used for the IRQs
- * @poll_ctx: Context used for polling the CQ
- *
- * NOTE: for user cq use ib_alloc_cq_user with valid udata!
- */
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
+ int comp_vector, enum ib_poll_context poll_ctx,
+ const char *caller);
static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
int nr_cqe, int comp_vector,
enum ib_poll_context poll_ctx)
{
- return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
- NULL);
+ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
+ KBUILD_MODNAME);
}
struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
@@ -3878,26 +3882,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
KBUILD_MODNAME);
}
-/**
- * ib_free_cq_user - Free kernel/user CQ
- * @cq: The CQ to free
- * @udata: Valid user data or NULL for kernel objects
- *
- * NOTE: This function shouldn't be called on shared CQs.
- */
-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
-
-/**
- * ib_free_cq - Free kernel CQ
- * @cq: The CQ to free
- *
- * NOTE: for user cq use ib_free_cq_user with valid udata!
- */
-static inline void ib_free_cq(struct ib_cq *cq)
-{
- ib_free_cq_user(cq, NULL);
-}
-
+void ib_free_cq(struct ib_cq *cq);
int ib_process_cq_direct(struct ib_cq *cq, int budget);
/**
@@ -3955,7 +3940,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
*/
static inline void ib_destroy_cq(struct ib_cq *cq)
{
- ib_destroy_cq_user(cq, NULL);
+ int ret = ib_destroy_cq_user(cq, NULL);
+
+ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
}
/**
@@ -4015,18 +4002,50 @@ struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
+/*
+ * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
+ * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
+ * address into the dma address.
+ */
+static inline bool ib_uses_virt_dma(struct ib_device *dev)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
+}
+
+/*
+ * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
+ */
+static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
+{
+ if (ib_uses_virt_dma(dev))
+ return false;
+
+ return dma_pci_p2pdma_supported(dev->dma_device);
+}
+
/**
- * ib_req_ncomp_notif - Request completion notification when there are
- * at least the specified number of unreaped completions on the CQ.
- * @cq: The CQ to generate an event for.
- * @wc_cnt: The number of unreaped completions that should be on the
- * CQ before an event is generated.
+ * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
+ * @dma_addr: The DMA address
+ *
+ * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
+ * going through the dma_addr marshalling.
+ */
+static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
+{
+ /* virt_dma mode maps the kvs's directly into the dma addr */
+ return (void *)(uintptr_t)dma_addr;
+}
+
+/**
+ * ib_virt_dma_to_page - Convert a dma_addr to a struct page
+ * @dma_addr: The DMA address
+ *
+ * Used by ib_uses_virt_dma() device to get back to the struct page after going
+ * through the dma_addr marshalling.
*/
-static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
+static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
{
- return cq->device->ops.req_ncomp_notif ?
- cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
- -ENOSYS;
+ return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
}
/**
@@ -4036,6 +4055,8 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
*/
static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
+ if (ib_uses_virt_dma(dev))
+ return 0;
return dma_mapping_error(dev->dma_device, dma_addr);
}
@@ -4050,6 +4071,8 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
+ if (ib_uses_virt_dma(dev))
+ return (uintptr_t)cpu_addr;
return dma_map_single(dev->dma_device, cpu_addr, size, direction);
}
@@ -4064,7 +4087,8 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
- dma_unmap_single(dev->dma_device, addr, size, direction);
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_single(dev->dma_device, addr, size, direction);
}
/**
@@ -4081,6 +4105,8 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
size_t size,
enum dma_data_direction direction)
{
+ if (ib_uses_virt_dma(dev))
+ return (uintptr_t)(page_address(page) + offset);
return dma_map_page(dev->dma_device, page, offset, size, direction);
}
@@ -4095,7 +4121,63 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
- dma_unmap_page(dev->dma_device, addr, size, direction);
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_page(dev->dma_device, addr, size, direction);
+}
+
+int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
+static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ if (ib_uses_virt_dma(dev))
+ return ib_dma_virt_map_sg(dev, sg, nents);
+ return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
+}
+
+static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
+}
+
+/**
+ * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
+ * @dev: The device for which the DMA addresses are to be created
+ * @sg: The sg_table object describing the buffer
+ * @direction: The direction of the DMA
+ * @attrs: Optional DMA attributes for the map operation
+ */
+static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ int nents;
+
+ if (ib_uses_virt_dma(dev)) {
+ nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
+ if (!nents)
+ return -EIO;
+ sgt->nents = nents;
+ return 0;
+ }
+ return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
+}
+
+static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
}
/**
@@ -4109,7 +4191,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
- return dma_map_sg(dev->dma_device, sg, nents, direction);
+ return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
}
/**
@@ -4123,24 +4205,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
- dma_unmap_sg(dev->dma_device, sg, nents, direction);
-}
-
-static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction,
- unsigned long dma_attrs)
-{
- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
- dma_attrs);
-}
-
-static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction,
- unsigned long dma_attrs)
-{
- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
+ ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
}
/**
@@ -4151,6 +4216,8 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
*/
static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
{
+ if (ib_uses_virt_dma(dev))
+ return UINT_MAX;
return dma_get_max_seg_size(dev->dma_device);
}
@@ -4166,7 +4233,8 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
size_t size,
enum dma_data_direction dir)
{
- dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+ if (!ib_uses_virt_dma(dev))
+ dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
}
/**
@@ -4181,36 +4249,8 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
size_t size,
enum dma_data_direction dir)
{
- dma_sync_single_for_device(dev->dma_device, addr, size, dir);
-}
-
-/**
- * ib_dma_alloc_coherent - Allocate memory and map it for DMA
- * @dev: The device for which the DMA address is requested
- * @size: The size of the region to allocate in bytes
- * @dma_handle: A pointer for returning the DMA address of the region
- * @flag: memory allocator flags
- */
-static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
- size_t size,
- dma_addr_t *dma_handle,
- gfp_t flag)
-{
- return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
-}
-
-/**
- * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
- * @dev: The device for which the DMA addresses were allocated
- * @size: The size of the region
- * @cpu_addr: the address returned by ib_dma_alloc_coherent()
- * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
- */
-static inline void ib_dma_free_coherent(struct ib_device *dev,
- size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
-{
- dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
+ if (!ib_uses_virt_dma(dev))
+ dma_sync_single_for_device(dev->dma_device, addr, size, dir);
}
/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
@@ -4302,8 +4342,11 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
struct inode *inode, struct ib_udata *udata);
int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
-static inline int ib_check_mr_access(int flags)
+static inline int ib_check_mr_access(struct ib_device *ib_dev,
+ unsigned int flags)
{
+ u64 device_cap = ib_dev->attrs.device_cap_flags;
+
/*
* Local write permission is required if remote write or
* remote atomic permission is also requested.
@@ -4315,6 +4358,16 @@ static inline int ib_check_mr_access(int flags)
if (flags & ~IB_ACCESS_SUPPORTED)
return -EINVAL;
+ if (flags & IB_ACCESS_ON_DEMAND &&
+ !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
+ return -EOPNOTSUPP;
+
+ if ((flags & IB_ACCESS_FLUSH_GLOBAL &&
+ !(device_cap & IB_DEVICE_FLUSH_GLOBAL)) ||
+ (flags & IB_ACCESS_FLUSH_PERSISTENT &&
+ !(device_cap & IB_DEVICE_FLUSH_PERSISTENT)))
+ return -EOPNOTSUPP;
+
return 0;
}
@@ -4370,19 +4423,14 @@ struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
enum rdma_driver_id driver_id);
struct ib_device *ib_device_get_by_name(const char *name,
enum rdma_driver_id driver_id);
-struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
+struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
-struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
-
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
-int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
-int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
- u32 wq_attr_mask);
-int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset, unsigned int page_size);
@@ -4410,7 +4458,8 @@ void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
void ib_drain_qp(struct ib_qp *qp);
-int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
+int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
+ u8 *width);
static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
{
@@ -4478,12 +4527,12 @@ static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
return false;
}
-static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
+static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
{
attr->port_num = port_num;
}
-static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
+static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
{
return attr->port_num;
}
@@ -4581,7 +4630,7 @@ void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
* @port_num: Port number
*/
static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
- u8 port_num)
+ u32 port_num)
{
if (rdma_protocol_roce(dev, port_num))
return RDMA_AH_ATTR_TYPE_ROCE;
@@ -4596,7 +4645,7 @@ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
/**
* ib_lid_cpu16 - Return lid in 16bit CPU encoding.
- * In the current implementation the only way to get
+ * In the current implementation the only way to
* get the 32bit lid is from other sources for OPA.
* For IB, lids will always be 16bits so cast the
* value accordingly.
@@ -4653,40 +4702,18 @@ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
-struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *));
-int rdma_init_netdev(struct ib_device *device, u8 port_num,
+int rdma_init_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
struct net_device *netdev);
/**
- * rdma_set_device_sysfs_group - Set device attributes group to have
- * driver specific sysfs entries at
- * for infiniband class.
- *
- * @device: device pointer for which attributes to be created
- * @group: Pointer to group which should be added when device
- * is registered with sysfs.
- * rdma_set_device_sysfs_group() allows existing drivers to expose one
- * group per device to have sysfs attributes.
- *
- * NOTE: New drivers should not make use of this API; instead new device
- * parameter should be exposed via netlink command. This API and mechanism
- * exist only for existing drivers.
- */
-static inline void
-rdma_set_device_sysfs_group(struct ib_device *dev,
- const struct attribute_group *group)
-{
- dev->groups[1] = group;
-}
-
-/**
* rdma_device_to_ibdev - Get ib_device pointer from device pointer
*
* @device: device pointer for which ib_device pointer to retrieve
@@ -4703,12 +4730,25 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
}
/**
+ * ibdev_to_node - return the NUMA node for a given ib_device
+ * @dev: device to get the NUMA node for.
+ */
+static inline int ibdev_to_node(struct ib_device *ibdev)
+{
+ struct device *parent = ibdev->dev.parent;
+
+ if (!parent)
+ return NUMA_NO_NODE;
+ return dev_to_node(parent);
+}
+
+/**
* rdma_device_to_drv_device - Helper macro to reach back to driver's
* ib_device holder structure from device pointer.
*
* NOTE: New drivers should not make use of this API; This API is only for
* existing drivers who have exposed sysfs entries using
- * rdma_set_device_sysfs_group().
+ * ops->device_group.
*/
#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
@@ -4717,6 +4757,7 @@ bool rdma_dev_access_netns(const struct ib_device *device,
const struct net *net);
#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
+#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
/**
@@ -4759,4 +4800,24 @@ static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
return (u32)(v & IB_GRH_FLOWLABEL_MASK);
}
+
+/**
+ * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
+ * label. If flow label is not defined in GRH then
+ * calculate it based on lqpn/rqpn.
+ *
+ * @fl: flow label from GRH
+ * @lqpn: local qp number
+ * @rqpn: remote qp number
+ */
+static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
+{
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ return rdma_flow_label_to_udp_sport(fl);
+}
+
+const struct ib_port_immutable*
+ib_port_immutable_read(struct ib_device *dev, unsigned int port);
#endif /* IB_VERBS_H */
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 91975400e1b3..2b22f153ef63 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -70,6 +70,7 @@ struct iw_cm_id {
u8 tos;
bool tos_set:1;
bool mapped:1;
+ bool afonly:1;
};
struct iw_cm_conn_param {
@@ -114,27 +115,6 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
void iw_destroy_cm_id(struct iw_cm_id *cm_id);
/**
- * iw_cm_bind_qp - Unbind the specified IW CM identifier and QP
- *
- * @cm_id: The IW CM idenfier to unbind from the QP.
- * @qp: The QP
- *
- * This is called by the provider when destroying the QP to ensure
- * that any references held by the IWCM are released. It may also
- * be called by the IWCM when destroying a CM_ID to that any
- * references held by the provider are released.
- */
-void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
-
-/**
- * iw_cm_get_qp - Return the ib_qp associated with a QPN
- *
- * @ib_device: The IB device
- * @qpn: The queue pair number
- */
-struct ib_qp *iw_cm_get_qp(struct ib_device *device, int qpn);
-
-/**
* iw_cm_listen - Listen for incoming connection requests on the
* specified IW CM id.
*
diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h
index cbe3c2811455..d297f084001a 100644
--- a/include/rdma/opa_vnic.h
+++ b/include/rdma/opa_vnic.h
@@ -51,7 +51,7 @@ static inline void *opa_vnic_dev_priv(const struct net_device *dev)
return oparn->dev_priv;
}
-/* opa_vnic skb meta data structrue */
+/* opa_vnic skb meta data structure */
struct opa_vnic_skb_mdata {
u8 vl;
u8 entropy;
@@ -90,8 +90,7 @@ struct opa_vnic_stats {
static inline bool rdma_cap_opa_vnic(struct ib_device *device)
{
- return !!(device->attrs.device_cap_flags &
- IB_DEVICE_RDMA_NETDEV_OPA);
+ return !!(device->attrs.kernel_cap_flags & IBK_RDMA_NETDEV_OPA);
}
#endif /* _OPA_VNIC_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index cf5da2ae49bf..8a8ab2f793ab 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -52,7 +52,17 @@ struct rdma_addr {
struct rdma_route {
struct rdma_addr addr;
struct sa_path_rec *path_rec;
- int num_paths;
+
+ /* Optional path records of primary path */
+ struct sa_path_rec *path_rec_inbound;
+ struct sa_path_rec *path_rec_outbound;
+
+ /*
+ * 0 - No primary nor alternate path is available
+ * 1 - Only primary path is available
+ * 2 - Both primary and alternate path are available
+ */
+ int num_pri_alt_paths;
};
struct rdma_conn_param {
@@ -107,14 +117,18 @@ struct rdma_cm_id {
struct rdma_route route;
enum rdma_ucm_port_space ps;
enum ib_qp_type qp_type;
- u8 port_num;
+ u32 port_num;
+ struct work_struct net_work;
};
-struct rdma_cm_id *__rdma_create_id(struct net *net,
- rdma_cm_event_handler event_handler,
- void *context, enum rdma_ucm_port_space ps,
- enum ib_qp_type qp_type,
- const char *caller);
+struct rdma_cm_id *
+__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
+ void *context, enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type, const char *caller);
+struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
+ void *context,
+ enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type);
/**
* rdma_create_id - Create an RDMA identifier.
@@ -132,9 +146,9 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
* The event handler callback serializes on the id's mutex and is
* allowed to sleep.
*/
-#define rdma_create_id(net, event_handler, context, ps, qp_type) \
- __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \
- KBUILD_MODNAME)
+#define rdma_create_id(net, event_handler, context, ps, qp_type) \
+ __rdma_create_kernel_id(net, event_handler, context, ps, qp_type, \
+ KBUILD_MODNAME)
/**
* rdma_destroy_id - Destroys an RDMA identifier.
@@ -224,19 +238,9 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
int *qp_attr_mask);
-/**
- * rdma_connect - Initiate an active connection request.
- * @id: Connection identifier to connect.
- * @conn_param: Connection information used for connected QPs.
- *
- * Users must have resolved a route for the rdma_cm_id to connect with
- * by having called rdma_resolve_route before calling this routine.
- *
- * This call will either connect to a remote QP or obtain remote QP
- * information for unconnected rdma_cm_id's. The actual operation is
- * based on the rdma_cm_id's port space.
- */
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
+int rdma_connect_locked(struct rdma_cm_id *id,
+ struct rdma_conn_param *conn_param);
int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
struct rdma_ucm_ece *ece);
@@ -250,29 +254,12 @@ int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
*/
int rdma_listen(struct rdma_cm_id *id, int backlog);
-int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
- const char *caller);
+int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
-int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
- const char *caller, struct rdma_ucm_ece *ece);
-
-/**
- * rdma_accept - Called to accept a connection request or response.
- * @id: Connection identifier associated with the request.
- * @conn_param: Information needed to establish the connection. This must be
- * provided if accepting a connection request. If accepting a connection
- * response, this parameter must be NULL.
- *
- * Typically, this routine is only called by the listener to accept a connection
- * request. It must also be called on the active side of a connection if the
- * user is performing their own QP transitions.
- *
- * In the case of error, a reject message is sent to the remote side and the
- * state of the qp associated with the id is modified to error, such that any
- * previously posted receive buffers would be flushed.
- */
-#define rdma_accept(id, conn_param) \
- __rdma_accept((id), (conn_param), KBUILD_MODNAME)
+void rdma_lock_handler(struct rdma_cm_id *id);
+void rdma_unlock_handler(struct rdma_cm_id *id);
+int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece);
/**
* rdma_notify - Notifies the RDMA CM of an asynchronous event that has
@@ -355,6 +342,8 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse);
int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout);
+
+int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer);
/**
* rdma_get_service_id - Return the IB service ID for a specified address.
* @id: Communication identifier associated with the address.
diff --git a/include/rdma/rdma_counter.h b/include/rdma/rdma_counter.h
index eb99856e8b30..45d5481a7846 100644
--- a/include/rdma/rdma_counter.h
+++ b/include/rdma/rdma_counter.h
@@ -40,26 +40,29 @@ struct rdma_counter {
struct rdma_counter_mode mode;
struct mutex lock;
struct rdma_hw_stats *stats;
- u8 port;
+ u32 port;
};
void rdma_counter_init(struct ib_device *dev);
void rdma_counter_release(struct ib_device *dev);
-int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
- bool on, enum rdma_nl_counter_mask mask);
-int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port);
+int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
+ enum rdma_nl_counter_mask mask,
+ struct netlink_ext_ack *extack);
+int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port);
int rdma_counter_unbind_qp(struct ib_qp *qp, bool force);
int rdma_counter_query_stats(struct rdma_counter *counter);
-u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index);
-int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
+u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index);
+int rdma_counter_bind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id);
-int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
+int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port,
u32 qp_num, u32 *counter_id);
-int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
+int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id);
-int rdma_counter_get_mode(struct ib_device *dev, u8 port,
+int rdma_counter_get_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mode *mode,
enum rdma_nl_counter_mask *mask);
+int rdma_counter_modify(struct ib_device *dev, u32 port,
+ unsigned int index, bool enable);
#endif /* _RDMA_COUNTER_H_ */
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 2758d9df71ee..c2a79aeee113 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -30,7 +30,7 @@ enum rdma_nl_flags {
* constant as well and the compiler checks they are the same.
*/
#define MODULE_ALIAS_RDMA_NETLINK(_index, _val) \
- static inline void __chk_##_index(void) \
+ static inline void __maybe_unused __chk_##_index(void) \
{ \
BUILD_BUG_ON(_index != _val); \
} \
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 9fd217b24916..c429d6ddb129 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -92,7 +92,7 @@ struct rvt_ibport {
/*
* The pkey table is allocated and maintained by the driver. Drivers
* need to have access to this before registering with rdmav. However
- * rdmavt will need access to it so drivers need to proviee this during
+ * rdmavt will need access to it so drivers need to provide this during
* the attach port API call.
*/
u16 *pkey_table;
@@ -230,7 +230,7 @@ struct rvt_driver_provided {
void (*do_send)(struct rvt_qp *qp);
/*
- * Returns a pointer to the undelying hardware's PCI device. This is
+ * Returns a pointer to the underlying hardware's PCI device. This is
* used to display information as to what hardware is being referenced
* in an output message
*/
@@ -245,7 +245,7 @@ struct rvt_driver_provided {
void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
/*
- * Init a struture allocated with qp_priv_alloc(). This should be
+ * Init a structure allocated with qp_priv_alloc(). This should be
* called after all qp fields have been initialized in rdmavt.
*/
int (*qp_priv_init)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
@@ -257,7 +257,7 @@ struct rvt_driver_provided {
void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
/*
- * Inform the driver the particular qp in quesiton has been reset so
+ * Inform the driver the particular qp in question has been reset so
* that it can clean up anything it needs to.
*/
void (*notify_qp_reset)(struct rvt_qp *qp);
@@ -281,7 +281,7 @@ struct rvt_driver_provided {
void (*stop_send_queue)(struct rvt_qp *qp);
/*
- * Have the drivr drain any in progress operations
+ * Have the driver drain any in progress operations
*/
void (*quiesce_qp)(struct rvt_qp *qp);
@@ -309,16 +309,16 @@ struct rvt_driver_provided {
/*
* Query driver for the state of the port.
*/
- int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
+ int (*query_port_state)(struct rvt_dev_info *rdi, u32 port_num,
struct ib_port_attr *props);
/*
* Tell driver to shutdown a port
*/
- int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
+ int (*shut_down_port)(struct rvt_dev_info *rdi, u32 port_num);
/* Tell driver to send a trap for changed port capabilities */
- void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
+ void (*cap_mask_chg)(struct rvt_dev_info *rdi, u32 port_num);
/*
* The following functions can be safely ignored completely. Any use of
@@ -338,7 +338,7 @@ struct rvt_driver_provided {
/* Let the driver pick the next queue pair number*/
int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num);
+ enum ib_qp_type type, u32 port_num);
/* Determine if its safe or allowed to modify the qp */
int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
@@ -445,7 +445,7 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
* to work by setting the name manually here.
*/
dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
- strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
+ strscpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
}
/**
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 8275954f5ce6..2e58d5e6ac0e 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -444,7 +444,7 @@ struct rvt_qp {
/*
* This sge list MUST be last. Do not add anything below here.
*/
- struct rvt_sge r_sg_list[] /* verified SGEs */
+ struct rvt_sge *r_sg_list /* verified SGEs */
____cacheline_aligned_in_smp;
};
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 7682d1bcf789..8b7c46daeb07 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -50,6 +50,10 @@ enum rdma_restrack_type {
*/
RDMA_RESTRACK_COUNTER,
/**
+ * @RDMA_RESTRACK_SRQ: Shared receive queue (SRQ)
+ */
+ RDMA_RESTRACK_SRQ,
+ /**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/
RDMA_RESTRACK_MAX
@@ -68,6 +72,14 @@ struct rdma_restrack_entry {
* As an example for that, see mlx5 QPs with type MLX5_IB_QPT_HW_GSI
*/
bool valid;
+ /**
+ * @no_track: don't add this entry to restrack DB
+ *
+ * This field is used to mark an entry that doesn't need to be added to
+ * internal restrack DB and presented later to the users at the nldev
+ * query stage.
+ */
+ u8 no_track : 1;
/*
* @kref: Protect destroy of the resource
*/
@@ -106,22 +118,11 @@ struct rdma_restrack_entry {
int rdma_restrack_count(struct ib_device *dev,
enum rdma_restrack_type type);
-
-void rdma_restrack_kadd(struct rdma_restrack_entry *res);
-void rdma_restrack_uadd(struct rdma_restrack_entry *res);
-
-/**
- * rdma_restrack_del() - delete object from the reource tracking database
- * @res: resource entry
- * @type: actual type of object to operate
- */
-void rdma_restrack_del(struct rdma_restrack_entry *res);
-
/**
* rdma_is_kernel_res() - check the owner of resource
* @res: resource entry
*/
-static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
+static inline bool rdma_is_kernel_res(const struct rdma_restrack_entry *res)
{
return !res->user;
}
@@ -138,14 +139,6 @@ int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
*/
int rdma_restrack_put(struct rdma_restrack_entry *res);
-/**
- * rdma_restrack_set_task() - set the task for this resource
- * @res: resource entry
- * @caller: kernel name, the current task will be used if the caller is NULL.
- */
-void rdma_restrack_set_task(struct rdma_restrack_entry *res,
- const char *caller);
-
/*
* Helper functions for rdma drivers when filling out
* nldev driver attributes.
@@ -164,4 +157,20 @@ int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
struct rdma_restrack_entry *rdma_restrack_get_byid(struct ib_device *dev,
enum rdma_restrack_type type,
u32 id);
+
+/**
+ * rdma_restrack_no_track() - don't add resource to the DB
+ * @res: resource entry
+ *
+ * Every user of this API should be cross examined.
+ * Probably you don't need to use this function.
+ */
+static inline void rdma_restrack_no_track(struct rdma_restrack_entry *res)
+{
+ res->no_track = true;
+}
+static inline bool rdma_restrack_is_tracked(struct rdma_restrack_entry *res)
+{
+ return !res->no_track;
+}
#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/rdma/rw.h b/include/rdma/rw.h
index 6ad9dc836c10..d606cac48233 100644
--- a/include/rdma/rw.h
+++ b/include/rdma/rw.h
@@ -42,29 +42,29 @@ struct rdma_rw_ctx {
};
};
-int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir);
-void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
- struct scatterlist *sg, u32 sg_cnt,
- enum dma_data_direction dir);
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
+ enum dma_data_direction dir);
int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
enum dma_data_direction dir);
void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
enum dma_data_direction dir);
struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
-int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
-unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
+unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
unsigned int maxpages);
void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index b00270c72740..e6c0de227fad 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -24,6 +24,7 @@ enum uverbs_attr_type {
UVERBS_ATTR_TYPE_PTR_OUT,
UVERBS_ATTR_TYPE_IDR,
UVERBS_ATTR_TYPE_FD,
+ UVERBS_ATTR_TYPE_RAW_FD,
UVERBS_ATTR_TYPE_ENUM_IN,
UVERBS_ATTR_TYPE_IDRS_ARRAY,
};
@@ -435,8 +436,10 @@ struct uapi_definition {
}, \
##__VA_ARGS__
#define UAPI_DEF_CHAIN_OBJ_TREE_NAMED(_object_enum, ...) \
- UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, &UVERBS_OBJECT(_object_enum), \
- ##__VA_ARGS__)
+ UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, \
+ PTR_IF(IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS), \
+ &UVERBS_OBJECT(_object_enum)), \
+ ##__VA_ARGS__)
/*
* =======================================
@@ -521,6 +524,11 @@ struct uapi_definition {
.u.obj.access = _access, \
__VA_ARGS__ } })
+#define UVERBS_ATTR_RAW_FD(_attr_id, ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = (_attr_id), \
+ .attr = { .type = UVERBS_ATTR_TYPE_RAW_FD, __VA_ARGS__ } })
+
#define UVERBS_ATTR_PTR_IN(_attr_id, _type, ...) \
(&(const struct uverbs_attr_def){ \
.id = _attr_id, \
@@ -621,12 +629,14 @@ struct uverbs_attr {
};
struct uverbs_attr_bundle {
- struct ib_udata driver_udata;
- struct ib_udata ucore;
- struct ib_uverbs_file *ufile;
- struct ib_ucontext *context;
- struct ib_uobject *uobject;
- DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN);
+ struct_group_tagged(uverbs_attr_bundle_hdr, hdr,
+ struct ib_udata driver_udata;
+ struct ib_udata ucore;
+ struct ib_uverbs_file *ufile;
+ struct ib_ucontext *context;
+ struct ib_uobject *uobject;
+ DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN);
+ );
struct uverbs_attr attrs[];
};
@@ -647,12 +657,15 @@ static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_b
* 'ucontext'.
*
*/
-#define rdma_udata_to_drv_context(udata, drv_dev_struct, member) \
- (udata ? container_of(container_of(udata, struct uverbs_attr_bundle, \
- driver_udata) \
- ->context, \
- drv_dev_struct, member) : \
- (drv_dev_struct *)NULL)
+static inline struct uverbs_attr_bundle *
+rdma_udata_to_uverbs_attr_bundle(struct ib_udata *udata)
+{
+ return container_of(udata, struct uverbs_attr_bundle, driver_udata);
+}
+
+#define rdma_udata_to_drv_context(udata, drv_dev_struct, member) \
+ (udata ? container_of(rdma_udata_to_uverbs_attr_bundle(udata)->context, \
+ drv_dev_struct, member) : (drv_dev_struct *)NULL)
#define IS_UVERBS_COPY_ERR(_ret) ((_ret) && (_ret) != -ENOENT)
@@ -862,9 +875,24 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{
return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO);
}
-int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
- size_t idx, s64 lower_bound, u64 upper_bound,
- s64 *def_val);
+
+static inline __malloc void *uverbs_kcalloc(struct uverbs_attr_bundle *bundle,
+ size_t n, size_t size)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return ERR_PTR(-EOVERFLOW);
+ return uverbs_zalloc(bundle, bytes);
+}
+
+int _uverbs_get_const_signed(s64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val);
+int _uverbs_get_const_unsigned(u64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 upper_bound, u64 *def_val);
int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
size_t idx, const void *from, size_t size);
#else
@@ -908,27 +936,84 @@ uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
{
return -EINVAL;
}
+static inline int
+_uverbs_get_const_signed(s64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
+{
+ return -EINVAL;
+}
+static inline int
+_uverbs_get_const_unsigned(u64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 upper_bound, u64 *def_val)
+{
+ return -EINVAL;
+}
#endif
-#define uverbs_get_const(_to, _attrs_bundle, _idx) \
+#define uverbs_get_const_signed(_to, _attrs_bundle, _idx) \
({ \
s64 _val; \
- int _ret = _uverbs_get_const(&_val, _attrs_bundle, _idx, \
- type_min(typeof(*_to)), \
- type_max(typeof(*_to)), NULL); \
- (*_to) = _val; \
+ int _ret = \
+ _uverbs_get_const_signed(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*(_to))), \
+ type_max(typeof(*(_to))), NULL); \
+ (*(_to)) = _val; \
_ret; \
})
-#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \
+#define uverbs_get_const_unsigned(_to, _attrs_bundle, _idx) \
+ ({ \
+ u64 _val; \
+ int _ret = \
+ _uverbs_get_const_unsigned(&_val, _attrs_bundle, _idx, \
+ type_max(typeof(*(_to))), NULL); \
+ (*(_to)) = _val; \
+ _ret; \
+ })
+
+#define uverbs_get_const_default_signed(_to, _attrs_bundle, _idx, _default) \
({ \
s64 _val; \
s64 _def_val = _default; \
int _ret = \
- _uverbs_get_const(&_val, _attrs_bundle, _idx, \
- type_min(typeof(*_to)), \
- type_max(typeof(*_to)), &_def_val); \
- (*_to) = _val; \
+ _uverbs_get_const_signed(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*(_to))), \
+ type_max(typeof(*(_to))), &_def_val); \
+ (*(_to)) = _val; \
+ _ret; \
+ })
+
+#define uverbs_get_const_default_unsigned(_to, _attrs_bundle, _idx, _default) \
+ ({ \
+ u64 _val; \
+ u64 _def_val = _default; \
+ int _ret = \
+ _uverbs_get_const_unsigned(&_val, _attrs_bundle, _idx, \
+ type_max(typeof(*(_to))), &_def_val); \
+ (*(_to)) = _val; \
_ret; \
})
+
+#define uverbs_get_const(_to, _attrs_bundle, _idx) \
+ (is_signed_type(typeof(*(_to))) ? \
+ uverbs_get_const_signed(_to, _attrs_bundle, _idx) : \
+ uverbs_get_const_unsigned(_to, _attrs_bundle, _idx)) \
+
+#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \
+ (is_signed_type(typeof(*(_to))) ? \
+ uverbs_get_const_default_signed(_to, _attrs_bundle, _idx, \
+ _default) : \
+ uverbs_get_const_default_unsigned(_to, _attrs_bundle, _idx, \
+ _default))
+
+static inline int
+uverbs_get_raw_fd(int *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx)
+{
+ return uverbs_get_const_signed(to, attrs_bundle, idx);
+}
+
#endif
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
index f04f5126f61e..ee7873f872c3 100644
--- a/include/rdma/uverbs_named_ioctl.h
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -20,7 +20,7 @@
/* These are static so they do not need to be qualified */
#define UVERBS_METHOD_ATTRS(method_id) _method_attrs_##method_id
-#define UVERBS_OBJECT_METHODS(object_id) _object_methods_##object_id
+#define UVERBS_OBJECT_METHODS(object_id) _UVERBS_NAME(_object_methods_##object_id, __LINE__)
#define DECLARE_UVERBS_NAMED_METHOD(_method_id, ...) \
static const struct uverbs_attr_def *const UVERBS_METHOD_ATTRS( \
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index 06db27e35f40..ccd11631c167 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -71,6 +71,8 @@ struct uverbs_obj_type_class {
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs);
void (*remove_handle)(struct ib_uobject *uobj);
+ void (*swap_uobjects)(struct ib_uobject *obj_old,
+ struct ib_uobject *obj_new);
};
struct uverbs_obj_type {
@@ -116,6 +118,9 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
bool hw_obj_valid);
void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs);
+void rdma_assign_uobject(struct ib_uobject *to_uobj,
+ struct ib_uobject *new_uobj,
+ struct uverbs_attr_bundle *attrs);
/*
* uverbs_uobject_get is called in order to increase the reference count on
@@ -138,8 +143,8 @@ struct uverbs_obj_fd_type {
* because the driver is removed or the FD is closed.
*/
struct uverbs_obj_type type;
- int (*destroy_object)(struct ib_uobject *uobj,
- enum rdma_remove_reason why);
+ void (*destroy_object)(struct ib_uobject *uobj,
+ enum rdma_remove_reason why);
const struct file_operations *fops;
const char *name;
int flags;
diff --git a/include/rv/automata.h b/include/rv/automata.h
new file mode 100644
index 000000000000..eb9e636809a0
--- /dev/null
+++ b/include/rv/automata.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Deterministic automata helper functions, to be used with the automata
+ * models in C generated by the dot2k tool.
+ */
+
+/*
+ * DECLARE_AUTOMATA_HELPERS - define a set of helper functions for automata
+ *
+ * Define a set of helper functions for automata. The 'name' argument is used
+ * as suffix for the functions and data. These functions will handle automaton
+ * with data type 'type'.
+ */
+#define DECLARE_AUTOMATA_HELPERS(name, type) \
+ \
+/* \
+ * model_get_state_name_##name - return the (string) name of the given state \
+ */ \
+static char *model_get_state_name_##name(enum states_##name state) \
+{ \
+ if ((state < 0) || (state >= state_max_##name)) \
+ return "INVALID"; \
+ \
+ return automaton_##name.state_names[state]; \
+} \
+ \
+/* \
+ * model_get_event_name_##name - return the (string) name of the given event \
+ */ \
+static char *model_get_event_name_##name(enum events_##name event) \
+{ \
+ if ((event < 0) || (event >= event_max_##name)) \
+ return "INVALID"; \
+ \
+ return automaton_##name.event_names[event]; \
+} \
+ \
+/* \
+ * model_get_initial_state_##name - return the automaton's initial state \
+ */ \
+static inline type model_get_initial_state_##name(void) \
+{ \
+ return automaton_##name.initial_state; \
+} \
+ \
+/* \
+ * model_get_next_state_##name - process an automaton event occurrence \
+ * \
+ * Given the current state (curr_state) and the event (event), returns \
+ * the next state, or INVALID_STATE in case of error. \
+ */ \
+static inline type model_get_next_state_##name(enum states_##name curr_state, \
+ enum events_##name event) \
+{ \
+ if ((curr_state < 0) || (curr_state >= state_max_##name)) \
+ return INVALID_STATE; \
+ \
+ if ((event < 0) || (event >= event_max_##name)) \
+ return INVALID_STATE; \
+ \
+ return automaton_##name.function[curr_state][event]; \
+} \
+ \
+/* \
+ * model_is_final_state_##name - check if the given state is a final state \
+ */ \
+static inline bool model_is_final_state_##name(enum states_##name state) \
+{ \
+ if ((state < 0) || (state >= state_max_##name)) \
+ return 0; \
+ \
+ return automaton_##name.final_states[state]; \
+}
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
new file mode 100644
index 000000000000..9705b2a98e49
--- /dev/null
+++ b/include/rv/da_monitor.h
@@ -0,0 +1,544 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Deterministic automata (DA) monitor functions, to be used together
+ * with automata models in C generated by the dot2k tool.
+ *
+ * The dot2k tool is available at tools/verification/dot2k/
+ *
+ * For further information, see:
+ * Documentation/trace/rv/da_monitor_synthesis.rst
+ */
+
+#include <rv/automata.h>
+#include <linux/rv.h>
+#include <linux/bug.h>
+
+#ifdef CONFIG_RV_REACTORS
+
+#define DECLARE_RV_REACTING_HELPERS(name, type) \
+static char REACT_MSG_##name[1024]; \
+ \
+static inline char *format_react_msg_##name(type curr_state, type event) \
+{ \
+ snprintf(REACT_MSG_##name, 1024, \
+ "rv: monitor %s does not allow event %s on state %s\n", \
+ #name, \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(curr_state)); \
+ return REACT_MSG_##name; \
+} \
+ \
+static void cond_react_##name(char *msg) \
+{ \
+ if (rv_##name.react) \
+ rv_##name.react(msg); \
+} \
+ \
+static bool rv_reacting_on_##name(void) \
+{ \
+ return rv_reacting_on(); \
+}
+
+#else /* CONFIG_RV_REACTOR */
+
+#define DECLARE_RV_REACTING_HELPERS(name, type) \
+static inline char *format_react_msg_##name(type curr_state, type event) \
+{ \
+ return NULL; \
+} \
+ \
+static void cond_react_##name(char *msg) \
+{ \
+ return; \
+} \
+ \
+static bool rv_reacting_on_##name(void) \
+{ \
+ return 0; \
+}
+#endif
+
+/*
+ * Generic helpers for all types of deterministic automata monitors.
+ */
+#define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+ \
+DECLARE_RV_REACTING_HELPERS(name, type) \
+ \
+/* \
+ * da_monitor_reset_##name - reset a monitor and setting it to init state \
+ */ \
+static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \
+{ \
+ da_mon->monitoring = 0; \
+ da_mon->curr_state = model_get_initial_state_##name(); \
+} \
+ \
+/* \
+ * da_monitor_curr_state_##name - return the current state \
+ */ \
+static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \
+{ \
+ return da_mon->curr_state; \
+} \
+ \
+/* \
+ * da_monitor_set_state_##name - set the new current state \
+ */ \
+static inline void \
+da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \
+{ \
+ da_mon->curr_state = state; \
+} \
+ \
+/* \
+ * da_monitor_start_##name - start monitoring \
+ * \
+ * The monitor will ignore all events until monitoring is set to true. This \
+ * function needs to be called to tell the monitor to start monitoring. \
+ */ \
+static inline void da_monitor_start_##name(struct da_monitor *da_mon) \
+{ \
+ da_mon->curr_state = model_get_initial_state_##name(); \
+ da_mon->monitoring = 1; \
+} \
+ \
+/* \
+ * da_monitoring_##name - returns true if the monitor is processing events \
+ */ \
+static inline bool da_monitoring_##name(struct da_monitor *da_mon) \
+{ \
+ return da_mon->monitoring; \
+} \
+ \
+/* \
+ * da_monitor_enabled_##name - checks if the monitor is enabled \
+ */ \
+static inline bool da_monitor_enabled_##name(void) \
+{ \
+ /* global switch */ \
+ if (unlikely(!rv_monitoring_on())) \
+ return 0; \
+ \
+ /* monitor enabled */ \
+ if (unlikely(!rv_##name.enabled)) \
+ return 0; \
+ \
+ return 1; \
+} \
+ \
+/* \
+ * da_monitor_handling_event_##name - checks if the monitor is ready to handle events \
+ */ \
+static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon) \
+{ \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ /* monitor is actually monitoring */ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ return 0; \
+ \
+ return 1; \
+}
+
+/*
+ * Event handler for implicit monitors. Implicit monitor is the one which the
+ * handler does not need to specify which da_monitor to manipulate. Examples
+ * of implicit monitor are the per_cpu or the global ones.
+ */
+#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
+ \
+static inline bool \
+da_event_##name(struct da_monitor *da_mon, enum events_##name event) \
+{ \
+ type curr_state = da_monitor_curr_state_##name(da_mon); \
+ type next_state = model_get_next_state_##name(curr_state, event); \
+ \
+ if (next_state != INVALID_STATE) { \
+ da_monitor_set_state_##name(da_mon, next_state); \
+ \
+ trace_event_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ \
+ return true; \
+ } \
+ \
+ if (rv_reacting_on_##name()) \
+ cond_react_##name(format_react_msg_##name(curr_state, event)); \
+ \
+ trace_error_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ \
+ return false; \
+} \
+
+/*
+ * Event handler for per_task monitors.
+ */
+#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
+ \
+static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
+ enum events_##name event) \
+{ \
+ type curr_state = da_monitor_curr_state_##name(da_mon); \
+ type next_state = model_get_next_state_##name(curr_state, event); \
+ \
+ if (next_state != INVALID_STATE) { \
+ da_monitor_set_state_##name(da_mon, next_state); \
+ \
+ trace_event_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ \
+ return true; \
+ } \
+ \
+ if (rv_reacting_on_##name()) \
+ cond_react_##name(format_react_msg_##name(curr_state, event)); \
+ \
+ trace_error_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ \
+ return false; \
+}
+
+/*
+ * Functions to define, init and get a global monitor.
+ */
+#define DECLARE_DA_MON_INIT_GLOBAL(name, type) \
+ \
+/* \
+ * global monitor (a single variable) \
+ */ \
+static struct da_monitor da_mon_##name; \
+ \
+/* \
+ * da_get_monitor_##name - return the global monitor address \
+ */ \
+static struct da_monitor *da_get_monitor_##name(void) \
+{ \
+ return &da_mon_##name; \
+} \
+ \
+/* \
+ * da_monitor_reset_all_##name - reset the single monitor \
+ */ \
+static void da_monitor_reset_all_##name(void) \
+{ \
+ da_monitor_reset_##name(da_get_monitor_##name()); \
+} \
+ \
+/* \
+ * da_monitor_init_##name - initialize a monitor \
+ */ \
+static inline int da_monitor_init_##name(void) \
+{ \
+ da_monitor_reset_all_##name(); \
+ return 0; \
+} \
+ \
+/* \
+ * da_monitor_destroy_##name - destroy the monitor \
+ */ \
+static inline void da_monitor_destroy_##name(void) \
+{ \
+ return; \
+}
+
+/*
+ * Functions to define, init and get a per-cpu monitor.
+ */
+#define DECLARE_DA_MON_INIT_PER_CPU(name, type) \
+ \
+/* \
+ * per-cpu monitor variables \
+ */ \
+static DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
+ \
+/* \
+ * da_get_monitor_##name - return current CPU monitor address \
+ */ \
+static struct da_monitor *da_get_monitor_##name(void) \
+{ \
+ return this_cpu_ptr(&da_mon_##name); \
+} \
+ \
+/* \
+ * da_monitor_reset_all_##name - reset all CPUs' monitor \
+ */ \
+static void da_monitor_reset_all_##name(void) \
+{ \
+ struct da_monitor *da_mon; \
+ int cpu; \
+ for_each_cpu(cpu, cpu_online_mask) { \
+ da_mon = per_cpu_ptr(&da_mon_##name, cpu); \
+ da_monitor_reset_##name(da_mon); \
+ } \
+} \
+ \
+/* \
+ * da_monitor_init_##name - initialize all CPUs' monitor \
+ */ \
+static inline int da_monitor_init_##name(void) \
+{ \
+ da_monitor_reset_all_##name(); \
+ return 0; \
+} \
+ \
+/* \
+ * da_monitor_destroy_##name - destroy the monitor \
+ */ \
+static inline void da_monitor_destroy_##name(void) \
+{ \
+ return; \
+}
+
+/*
+ * Functions to define, init and get a per-task monitor.
+ */
+#define DECLARE_DA_MON_INIT_PER_TASK(name, type) \
+ \
+/* \
+ * The per-task monitor is stored a vector in the task struct. This variable \
+ * stores the position on the vector reserved for this monitor. \
+ */ \
+static int task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \
+ \
+/* \
+ * da_get_monitor_##name - return the monitor in the allocated slot for tsk \
+ */ \
+static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) \
+{ \
+ return &tsk->rv[task_mon_slot_##name].da_mon; \
+} \
+ \
+static void da_monitor_reset_all_##name(void) \
+{ \
+ struct task_struct *g, *p; \
+ \
+ read_lock(&tasklist_lock); \
+ for_each_process_thread(g, p) \
+ da_monitor_reset_##name(da_get_monitor_##name(p)); \
+ read_unlock(&tasklist_lock); \
+} \
+ \
+/* \
+ * da_monitor_init_##name - initialize the per-task monitor \
+ * \
+ * Try to allocate a slot in the task's vector of monitors. If there \
+ * is an available slot, use it and reset all task's monitor. \
+ */ \
+static int da_monitor_init_##name(void) \
+{ \
+ int slot; \
+ \
+ slot = rv_get_task_monitor_slot(); \
+ if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT) \
+ return slot; \
+ \
+ task_mon_slot_##name = slot; \
+ \
+ da_monitor_reset_all_##name(); \
+ return 0; \
+} \
+ \
+/* \
+ * da_monitor_destroy_##name - return the allocated slot \
+ */ \
+static inline void da_monitor_destroy_##name(void) \
+{ \
+ if (task_mon_slot_##name == RV_PER_TASK_MONITOR_INIT) { \
+ WARN_ONCE(1, "Disabling a disabled monitor: " #name); \
+ return; \
+ } \
+ rv_put_task_monitor_slot(task_mon_slot_##name); \
+ task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \
+ return; \
+}
+
+/*
+ * Handle event for implicit monitor: da_get_monitor_##name() will figure out
+ * the monitor.
+ */
+#define DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) \
+ \
+static inline void __da_handle_event_##name(struct da_monitor *da_mon, \
+ enum events_##name event) \
+{ \
+ bool retval; \
+ \
+ retval = da_event_##name(da_mon, event); \
+ if (!retval) \
+ da_monitor_reset_##name(da_mon); \
+} \
+ \
+/* \
+ * da_handle_event_##name - handle an event \
+ */ \
+static inline void da_handle_event_##name(enum events_##name event) \
+{ \
+ struct da_monitor *da_mon = da_get_monitor_##name(); \
+ bool retval; \
+ \
+ retval = da_monitor_handling_event_##name(da_mon); \
+ if (!retval) \
+ return; \
+ \
+ __da_handle_event_##name(da_mon, event); \
+} \
+ \
+/* \
+ * da_handle_start_event_##name - start monitoring or handle event \
+ * \
+ * This function is used to notify the monitor that the system is returning \
+ * to the initial state, so the monitor can start monitoring in the next event. \
+ * Thus: \
+ * \
+ * If the monitor already started, handle the event. \
+ * If the monitor did not start yet, start the monitor but skip the event. \
+ */ \
+static inline bool da_handle_start_event_##name(enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) { \
+ da_monitor_start_##name(da_mon); \
+ return 0; \
+ } \
+ \
+ __da_handle_event_##name(da_mon, event); \
+ \
+ return 1; \
+} \
+ \
+/* \
+ * da_handle_start_run_event_##name - start monitoring and handle event \
+ * \
+ * This function is used to notify the monitor that the system is in the \
+ * initial state, so the monitor can start monitoring and handling event. \
+ */ \
+static inline bool da_handle_start_run_event_##name(enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ da_monitor_start_##name(da_mon); \
+ \
+ __da_handle_event_##name(da_mon, event); \
+ \
+ return 1; \
+}
+
+/*
+ * Handle event for per task.
+ */
+#define DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) \
+ \
+static inline void \
+__da_handle_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
+ enum events_##name event) \
+{ \
+ bool retval; \
+ \
+ retval = da_event_##name(da_mon, tsk, event); \
+ if (!retval) \
+ da_monitor_reset_##name(da_mon); \
+} \
+ \
+/* \
+ * da_handle_event_##name - handle an event \
+ */ \
+static inline void \
+da_handle_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon = da_get_monitor_##name(tsk); \
+ bool retval; \
+ \
+ retval = da_monitor_handling_event_##name(da_mon); \
+ if (!retval) \
+ return; \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+} \
+ \
+/* \
+ * da_handle_start_event_##name - start monitoring or handle event \
+ * \
+ * This function is used to notify the monitor that the system is returning \
+ * to the initial state, so the monitor can start monitoring in the next event. \
+ * Thus: \
+ * \
+ * If the monitor already started, handle the event. \
+ * If the monitor did not start yet, start the monitor but skip the event. \
+ */ \
+static inline bool \
+da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(tsk); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) { \
+ da_monitor_start_##name(da_mon); \
+ return 0; \
+ } \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+ \
+ return 1; \
+}
+
+/*
+ * Entry point for the global monitor.
+ */
+#define DECLARE_DA_MON_GLOBAL(name, type) \
+ \
+DECLARE_AUTOMATA_HELPERS(name, type) \
+DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
+DECLARE_DA_MON_INIT_GLOBAL(name, type) \
+DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type)
+
+/*
+ * Entry point for the per-cpu monitor.
+ */
+#define DECLARE_DA_MON_PER_CPU(name, type) \
+ \
+DECLARE_AUTOMATA_HELPERS(name, type) \
+DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
+DECLARE_DA_MON_INIT_PER_CPU(name, type) \
+DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type)
+
+/*
+ * Entry point for the per-task monitor.
+ */
+#define DECLARE_DA_MON_PER_TASK(name, type) \
+ \
+DECLARE_AUTOMATA_HELPERS(name, type) \
+DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
+DECLARE_DA_MON_INIT_PER_TASK(name, type) \
+DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type)
diff --git a/include/rv/instrumentation.h b/include/rv/instrumentation.h
new file mode 100644
index 000000000000..d4e7a02ede1a
--- /dev/null
+++ b/include/rv/instrumentation.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Helper functions to facilitate the instrumentation of auto-generated
+ * RV monitors create by dot2k.
+ *
+ * The dot2k tool is available at tools/verification/dot2/
+ */
+
+#include <linux/ftrace.h>
+
+/*
+ * rv_attach_trace_probe - check and attach a handler function to a tracepoint
+ */
+#define rv_attach_trace_probe(monitor, tp, rv_handler) \
+ do { \
+ check_trace_callback_type_##tp(rv_handler); \
+ WARN_ONCE(register_trace_##tp(rv_handler, NULL), \
+ "fail attaching " #monitor " " #tp "handler"); \
+ } while (0)
+
+/*
+ * rv_detach_trace_probe - detach a handler function to a tracepoint
+ */
+#define rv_detach_trace_probe(monitor, tp, rv_handler) \
+ do { \
+ unregister_trace_##tp(rv_handler, NULL); \
+ } while (0)
diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h
index 9e273fed0a85..56a5d2b5a624 100644
--- a/include/scsi/fc/fc_ms.h
+++ b/include/scsi/fc/fc_ms.h
@@ -25,6 +25,12 @@
#define FC_FDMI_SUBTYPE 0x10 /* fs_ct_hdr.ct_fs_subtype */
/*
+ * Management server FDMI specifications.
+ */
+#define FDMI_V1 1 /* FDMI version 1 specifications */
+#define FDMI_V2 2 /* FDMI version 2 specifications */
+
+/*
* Management server FDMI Requests.
*/
enum fc_fdmi_req {
@@ -57,22 +63,36 @@ enum fc_fdmi_hba_attr_type {
FC_FDMI_HBA_ATTR_FIRMWAREVERSION = 0x0009,
FC_FDMI_HBA_ATTR_OSNAMEVERSION = 0x000A,
FC_FDMI_HBA_ATTR_MAXCTPAYLOAD = 0x000B,
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME = 0x000C,
+ FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO = 0x000D,
+ FC_FDMI_HBA_ATTR_NUMBEROFPORTS = 0x000E,
+ FC_FDMI_HBA_ATTR_FABRICNAME = 0x000F,
+ FC_FDMI_HBA_ATTR_BIOSVERSION = 0x0010,
+ FC_FDMI_HBA_ATTR_BIOSSTATE = 0x0011,
+ FC_FDMI_HBA_ATTR_VENDORIDENTIFIER = 0x00E0,
};
/*
* HBA Attribute Length
*/
#define FC_FDMI_HBA_ATTR_NODENAME_LEN 8
-#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 80
-#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 80
-#define FC_FDMI_HBA_ATTR_MODEL_LEN 256
-#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256
-#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 256
+#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64
+#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64
+#define FC_FDMI_HBA_ATTR_MODEL_LEN 64
+#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 64
+#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 128
#define FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN 4
+#define FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN 64
+#define FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN 4
+#define FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN 4
+#define FC_FDMI_HBA_ATTR_FABRICNAME_LEN 8
+#define FC_FDMI_HBA_ATTR_BIOSVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_BIOSSTATE_LEN 4
+#define FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN 8
/*
* Port Attribute Type
@@ -84,6 +104,16 @@ enum fc_fdmi_port_attr_type {
FC_FDMI_PORT_ATTR_MAXFRAMESIZE = 0x0004,
FC_FDMI_PORT_ATTR_OSDEVICENAME = 0x0005,
FC_FDMI_PORT_ATTR_HOSTNAME = 0x0006,
+ FC_FDMI_PORT_ATTR_NODENAME = 0x0007,
+ FC_FDMI_PORT_ATTR_PORTNAME = 0x0008,
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME = 0x0009,
+ FC_FDMI_PORT_ATTR_PORTTYPE = 0x000A,
+ FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC = 0x000B,
+ FC_FDMI_PORT_ATTR_FABRICNAME = 0x000C,
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE = 0x000D,
+ FC_FDMI_PORT_ATTR_PORTSTATE = 0x101,
+ FC_FDMI_PORT_ATTR_DISCOVEREDPORTS = 0x102,
+ FC_FDMI_PORT_ATTR_PORTID = 0x103,
};
/*
@@ -95,6 +125,17 @@ enum fc_fdmi_port_attr_type {
#define FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN 4
#define FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN 256
#define FC_FDMI_PORT_ATTR_HOSTNAME_LEN 256
+#define FC_FDMI_PORT_ATTR_NODENAME_LEN 8
+#define FC_FDMI_PORT_ATTR_PORTNAME_LEN 8
+#define FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN 256
+#define FC_FDMI_PORT_ATTR_PORTTYPE_LEN 4
+#define FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN 4
+#define FC_FDMI_PORT_ATTR_FABRICNAME_LEN 8
+#define FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN 32
+#define FC_FDMI_PORT_ATTR_PORTSTATE_LEN 4
+#define FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN 4
+#define FC_FDMI_PORT_ATTR_PORTID_LEN 4
+
/*
* HBA Attribute ID
@@ -117,7 +158,7 @@ struct fc_fdmi_port_name {
struct fc_fdmi_attr_entry {
__be16 type;
__be16 len;
- __u8 value[1];
+ __u8 value[];
} __attribute__((__packed__));
/*
@@ -125,7 +166,7 @@ struct fc_fdmi_attr_entry {
*/
struct fs_fdmi_attrs {
__be32 numattrs;
- struct fc_fdmi_attr_entry attr[1];
+ struct fc_fdmi_attr_entry attr[];
} __attribute__((__packed__));
/*
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
deleted file mode 100644
index c6660205d73f..000000000000
--- a/include/scsi/fc_encode.h
+++ /dev/null
@@ -1,727 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2008 Intel Corporation. All rights reserved.
- *
- * Maintained at www.Open-FCoE.org
- */
-
-#ifndef _FC_ENCODE_H_
-#define _FC_ENCODE_H_
-#include <asm/unaligned.h>
-#include <linux/utsname.h>
-
-/*
- * F_CTL values for simple requests and responses.
- */
-#define FC_FCTL_REQ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)
-#define FC_FCTL_RESP (FC_FC_EX_CTX | FC_FC_LAST_SEQ | \
- FC_FC_END_SEQ | FC_FC_SEQ_INIT)
-
-struct fc_ns_rft {
- struct fc_ns_fid fid; /* port ID object */
- struct fc_ns_fts fts; /* FC4-types object */
-};
-
-struct fc_ct_req {
- struct fc_ct_hdr hdr;
- union {
- struct fc_ns_gid_ft gid;
- struct fc_ns_rn_id rn;
- struct fc_ns_rft rft;
- struct fc_ns_rff_id rff;
- struct fc_ns_fid fid;
- struct fc_ns_rsnn snn;
- struct fc_ns_rspn spn;
- struct fc_fdmi_rhba rhba;
- struct fc_fdmi_rpa rpa;
- struct fc_fdmi_dprt dprt;
- struct fc_fdmi_dhba dhba;
- } payload;
-};
-
-static inline void __fc_fill_fc_hdr(struct fc_frame_header *fh,
- enum fc_rctl r_ctl,
- u32 did, u32 sid, enum fc_fh_type type,
- u32 f_ctl, u32 parm_offset)
-{
- WARN_ON(r_ctl == 0);
- fh->fh_r_ctl = r_ctl;
- hton24(fh->fh_d_id, did);
- hton24(fh->fh_s_id, sid);
- fh->fh_type = type;
- hton24(fh->fh_f_ctl, f_ctl);
- fh->fh_cs_ctl = 0;
- fh->fh_df_ctl = 0;
- fh->fh_parm_offset = htonl(parm_offset);
-}
-
-/**
- * fill FC header fields in specified fc_frame
- */
-static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
- u32 did, u32 sid, enum fc_fh_type type,
- u32 f_ctl, u32 parm_offset)
-{
- struct fc_frame_header *fh;
-
- fh = fc_frame_header_get(fp);
- __fc_fill_fc_hdr(fh, r_ctl, did, sid, type, f_ctl, parm_offset);
-}
-
-/**
- * fc_adisc_fill() - Fill in adisc request frame
- * @lport: local port.
- * @fp: fc frame where payload will be placed.
- */
-static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_adisc *adisc;
-
- adisc = fc_frame_payload_get(fp, sizeof(*adisc));
- memset(adisc, 0, sizeof(*adisc));
- adisc->adisc_cmd = ELS_ADISC;
- put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn);
- put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn);
- hton24(adisc->adisc_port_id, lport->port_id);
-}
-
-/**
- * fc_ct_hdr_fill- fills ct header and reset ct payload
- * returns pointer to ct request.
- */
-static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
- unsigned int op, size_t req_size,
- enum fc_ct_fs_type fs_type,
- u8 subtype)
-{
- struct fc_ct_req *ct;
- size_t ct_plen;
-
- ct_plen = sizeof(struct fc_ct_hdr) + req_size;
- ct = fc_frame_payload_get(fp, ct_plen);
- memset(ct, 0, ct_plen);
- ct->hdr.ct_rev = FC_CT_REV;
- ct->hdr.ct_fs_type = fs_type;
- ct->hdr.ct_fs_subtype = subtype;
- ct->hdr.ct_cmd = htons((u16) op);
- return ct;
-}
-
-/**
- * fc_ct_ns_fill() - Fill in a name service request frame
- * @lport: local port.
- * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
- * @fp: frame to contain payload.
- * @op: CT opcode.
- * @r_ctl: pointer to FC header R_CTL.
- * @fh_type: pointer to FC-4 type.
- */
-static inline int fc_ct_ns_fill(struct fc_lport *lport,
- u32 fc_id, struct fc_frame *fp,
- unsigned int op, enum fc_rctl *r_ctl,
- enum fc_fh_type *fh_type)
-{
- struct fc_ct_req *ct;
- size_t len;
-
- switch (op) {
- case FC_NS_GPN_FT:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft),
- FC_FST_DIR, FC_NS_SUBTYPE);
- ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
- break;
-
- case FC_NS_GPN_ID:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid),
- FC_FST_DIR, FC_NS_SUBTYPE);
- ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
- hton24(ct->payload.fid.fp_fid, fc_id);
- break;
-
- case FC_NS_RFT_ID:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft),
- FC_FST_DIR, FC_NS_SUBTYPE);
- hton24(ct->payload.rft.fid.fp_fid, lport->port_id);
- ct->payload.rft.fts = lport->fcts;
- break;
-
- case FC_NS_RFF_ID:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id),
- FC_FST_DIR, FC_NS_SUBTYPE);
- hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id);
- ct->payload.rff.fr_type = FC_TYPE_FCP;
- if (lport->service_params & FCP_SPPF_INIT_FCN)
- ct->payload.rff.fr_feat = FCP_FEAT_INIT;
- if (lport->service_params & FCP_SPPF_TARG_FCN)
- ct->payload.rff.fr_feat |= FCP_FEAT_TARG;
- break;
-
- case FC_NS_RNN_ID:
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id),
- FC_FST_DIR, FC_NS_SUBTYPE);
- hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id);
- put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn);
- break;
-
- case FC_NS_RSPN_ID:
- len = strnlen(fc_host_symbolic_name(lport->host), 255);
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len,
- FC_FST_DIR, FC_NS_SUBTYPE);
- hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
- strncpy(ct->payload.spn.fr_name,
- fc_host_symbolic_name(lport->host), len);
- ct->payload.spn.fr_name_len = len;
- break;
-
- case FC_NS_RSNN_NN:
- len = strnlen(fc_host_symbolic_name(lport->host), 255);
- ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len,
- FC_FST_DIR, FC_NS_SUBTYPE);
- put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn);
- strncpy(ct->payload.snn.fr_name,
- fc_host_symbolic_name(lport->host), len);
- ct->payload.snn.fr_name_len = len;
- break;
-
- default:
- return -EINVAL;
- }
- *r_ctl = FC_RCTL_DD_UNSOL_CTL;
- *fh_type = FC_TYPE_CT;
- return 0;
-}
-
-/**
- * fc_ct_ms_fill() - Fill in a mgmt service request frame
- * @lport: local port.
- * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
- * @fp: frame to contain payload.
- * @op: CT opcode.
- * @r_ctl: pointer to FC header R_CTL.
- * @fh_type: pointer to FC-4 type.
- */
-static inline int fc_ct_ms_fill(struct fc_lport *lport,
- u32 fc_id, struct fc_frame *fp,
- unsigned int op, enum fc_rctl *r_ctl,
- enum fc_fh_type *fh_type)
-{
- struct fc_ct_req *ct;
- size_t len;
- struct fc_fdmi_attr_entry *entry;
- struct fs_fdmi_attrs *hba_attrs;
- int numattrs = 0;
-
- switch (op) {
- case FC_FDMI_RHBA:
- numattrs = 10;
- len = sizeof(struct fc_fdmi_rhba);
- len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
- len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
- len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
- len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
- len += FC_FDMI_HBA_ATTR_MODEL_LEN;
- len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
- len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
- len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
- len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
- len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
- len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
- ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
-
- /* HBA Identifier */
- put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id);
- /* Number of Ports - always 1 */
- put_unaligned_be32(1, &ct->payload.rhba.port.numport);
- /* Port Name */
- put_unaligned_be64(lport->wwpn,
- &ct->payload.rhba.port.port[0].portname);
-
- /* HBA Attributes */
- put_unaligned_be32(numattrs,
- &ct->payload.rhba.hba_attrs.numattrs);
- hba_attrs = &ct->payload.rhba.hba_attrs;
- entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
- /* NodeName*/
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_NODENAME,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- put_unaligned_be64(lport->wwnn,
- (__be64 *)&entry->value[0]);
-
- /* Manufacturer */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_NODENAME_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_MANUFACTURER,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_manufacturer(lport->host),
- FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
-
- /* SerialNumber */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_SERIALNUMBER,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_serial_number(lport->host),
- FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
-
- /* Model */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_MODEL_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_MODEL,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_model(lport->host),
- FC_FDMI_HBA_ATTR_MODEL_LEN);
-
- /* Model Description */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_MODEL_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_model_description(lport->host),
- FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
-
- /* Hardware Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_HARDWAREVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_hardware_version(lport->host),
- FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
-
- /* Driver Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_DRIVERVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_driver_version(lport->host),
- FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
-
- /* OptionROM Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_OPTIONROMVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_optionrom_version(lport->host),
- FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
-
- /* Firmware Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- strncpy((char *)&entry->value,
- fc_host_firmware_version(lport->host),
- FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
-
- /* OS Name and Version */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
- put_unaligned_be16(FC_FDMI_HBA_ATTR_OSNAMEVERSION,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- snprintf((char *)&entry->value,
- FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN,
- "%s v%s",
- init_utsname()->sysname,
- init_utsname()->release);
- break;
- case FC_FDMI_RPA:
- numattrs = 6;
- len = sizeof(struct fc_fdmi_rpa);
- len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
- len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
- len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
- len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
- len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
- len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
- len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
- ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
-
- /* Port Name */
- put_unaligned_be64(lport->wwpn,
- &ct->payload.rpa.port.portname);
-
- /* Port Attributes */
- put_unaligned_be32(numattrs,
- &ct->payload.rpa.hba_attrs.numattrs);
-
- hba_attrs = &ct->payload.rpa.hba_attrs;
- entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
-
- /* FC4 types */
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_FC4TYPES,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- memcpy(&entry->value, fc_host_supported_fc4s(lport->host),
- FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
-
- /* Supported Speed */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
-
- put_unaligned_be32(fc_host_supported_speeds(lport->host),
- &entry->value);
-
- /* Current Port Speed */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- put_unaligned_be32(lport->link_speed,
- &entry->value);
-
- /* Max Frame Size */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- put_unaligned_be32(fc_host_maxframe_size(lport->host),
- &entry->value);
-
- /* OS Device Name */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_OSDEVICENAME,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- /* Use the sysfs device name */
- strncpy((char *)&entry->value,
- dev_name(&lport->host->shost_gendev),
- strnlen(dev_name(&lport->host->shost_gendev),
- FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
-
- /* Host Name */
- entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
- FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN);
- len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
- len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
- put_unaligned_be16(FC_FDMI_PORT_ATTR_HOSTNAME,
- &entry->type);
- put_unaligned_be16(len, &entry->len);
- if (strlen(fc_host_system_hostname(lport->host)))
- strncpy((char *)&entry->value,
- fc_host_system_hostname(lport->host),
- strnlen(fc_host_system_hostname(lport->host),
- FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
- else
- strncpy((char *)&entry->value,
- init_utsname()->nodename,
- FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
- break;
- case FC_FDMI_DPRT:
- len = sizeof(struct fc_fdmi_dprt);
- ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
- /* Port Name */
- put_unaligned_be64(lport->wwpn,
- &ct->payload.dprt.port.portname);
- break;
- case FC_FDMI_DHBA:
- len = sizeof(struct fc_fdmi_dhba);
- ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
- /* HBA Identifier */
- put_unaligned_be64(lport->wwpn, &ct->payload.dhba.hbaid.id);
- break;
- default:
- return -EINVAL;
- }
- *r_ctl = FC_RCTL_DD_UNSOL_CTL;
- *fh_type = FC_TYPE_CT;
- return 0;
-}
-
-/**
- * fc_ct_fill() - Fill in a common transport service request frame
- * @lport: local port.
- * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
- * @fp: frame to contain payload.
- * @op: CT opcode.
- * @r_ctl: pointer to FC header R_CTL.
- * @fh_type: pointer to FC-4 type.
- */
-static inline int fc_ct_fill(struct fc_lport *lport,
- u32 fc_id, struct fc_frame *fp,
- unsigned int op, enum fc_rctl *r_ctl,
- enum fc_fh_type *fh_type, u32 *did)
-{
- int rc = -EINVAL;
-
- switch (fc_id) {
- case FC_FID_MGMT_SERV:
- rc = fc_ct_ms_fill(lport, fc_id, fp, op, r_ctl, fh_type);
- *did = FC_FID_MGMT_SERV;
- break;
- case FC_FID_DIR_SERV:
- default:
- rc = fc_ct_ns_fill(lport, fc_id, fp, op, r_ctl, fh_type);
- *did = FC_FID_DIR_SERV;
- break;
- }
-
- return rc;
-}
-/**
- * fc_plogi_fill - Fill in plogi request frame
- */
-static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
- unsigned int op)
-{
- struct fc_els_flogi *plogi;
- struct fc_els_csp *csp;
- struct fc_els_cssp *cp;
-
- plogi = fc_frame_payload_get(fp, sizeof(*plogi));
- memset(plogi, 0, sizeof(*plogi));
- plogi->fl_cmd = (u8) op;
- put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
- put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
-
- csp = &plogi->fl_csp;
- csp->sp_hi_ver = 0x20;
- csp->sp_lo_ver = 0x20;
- csp->sp_bb_cred = htons(10); /* this gets set by gateway */
- csp->sp_bb_data = htons((u16) lport->mfs);
- cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */
- cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
- csp->sp_features = htons(FC_SP_FT_CIRO);
- csp->sp_tot_seq = htons(255); /* seq. we accept */
- csp->sp_rel_off = htons(0x1f);
- csp->sp_e_d_tov = htonl(lport->e_d_tov);
-
- cp->cp_rdfs = htons((u16) lport->mfs);
- cp->cp_con_seq = htons(255);
- cp->cp_open_seq = 1;
-}
-
-/**
- * fc_flogi_fill - Fill in a flogi request frame.
- */
-static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_csp *sp;
- struct fc_els_cssp *cp;
- struct fc_els_flogi *flogi;
-
- flogi = fc_frame_payload_get(fp, sizeof(*flogi));
- memset(flogi, 0, sizeof(*flogi));
- flogi->fl_cmd = (u8) ELS_FLOGI;
- put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
- put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
- sp = &flogi->fl_csp;
- sp->sp_hi_ver = 0x20;
- sp->sp_lo_ver = 0x20;
- sp->sp_bb_cred = htons(10); /* this gets set by gateway */
- sp->sp_bb_data = htons((u16) lport->mfs);
- cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
- cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
- if (lport->does_npiv)
- sp->sp_features = htons(FC_SP_FT_NPIV);
-}
-
-/**
- * fc_fdisc_fill - Fill in a fdisc request frame.
- */
-static inline void fc_fdisc_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_csp *sp;
- struct fc_els_cssp *cp;
- struct fc_els_flogi *fdisc;
-
- fdisc = fc_frame_payload_get(fp, sizeof(*fdisc));
- memset(fdisc, 0, sizeof(*fdisc));
- fdisc->fl_cmd = (u8) ELS_FDISC;
- put_unaligned_be64(lport->wwpn, &fdisc->fl_wwpn);
- put_unaligned_be64(lport->wwnn, &fdisc->fl_wwnn);
- sp = &fdisc->fl_csp;
- sp->sp_hi_ver = 0x20;
- sp->sp_lo_ver = 0x20;
- sp->sp_bb_cred = htons(10); /* this gets set by gateway */
- sp->sp_bb_data = htons((u16) lport->mfs);
- cp = &fdisc->fl_cssp[3 - 1]; /* class 3 parameters */
- cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
-}
-
-/**
- * fc_logo_fill - Fill in a logo request frame.
- */
-static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_logo *logo;
-
- logo = fc_frame_payload_get(fp, sizeof(*logo));
- memset(logo, 0, sizeof(*logo));
- logo->fl_cmd = ELS_LOGO;
- hton24(logo->fl_n_port_id, lport->port_id);
- logo->fl_n_port_wwn = htonll(lport->wwpn);
-}
-
-/**
- * fc_rtv_fill - Fill in RTV (read timeout value) request frame.
- */
-static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_rtv *rtv;
-
- rtv = fc_frame_payload_get(fp, sizeof(*rtv));
- memset(rtv, 0, sizeof(*rtv));
- rtv->rtv_cmd = ELS_RTV;
-}
-
-/**
- * fc_rec_fill - Fill in rec request frame
- */
-static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_rec *rec;
- struct fc_exch *ep = fc_seq_exch(fr_seq(fp));
-
- rec = fc_frame_payload_get(fp, sizeof(*rec));
- memset(rec, 0, sizeof(*rec));
- rec->rec_cmd = ELS_REC;
- hton24(rec->rec_s_id, lport->port_id);
- rec->rec_ox_id = htons(ep->oxid);
- rec->rec_rx_id = htons(ep->rxid);
-}
-
-/**
- * fc_prli_fill - Fill in prli request frame
- */
-static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct {
- struct fc_els_prli prli;
- struct fc_els_spp spp;
- } *pp;
-
- pp = fc_frame_payload_get(fp, sizeof(*pp));
- memset(pp, 0, sizeof(*pp));
- pp->prli.prli_cmd = ELS_PRLI;
- pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
- pp->prli.prli_len = htons(sizeof(*pp));
- pp->spp.spp_type = FC_TYPE_FCP;
- pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
- pp->spp.spp_params = htonl(lport->service_params);
-}
-
-/**
- * fc_scr_fill - Fill in a scr request frame.
- */
-static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp)
-{
- struct fc_els_scr *scr;
-
- scr = fc_frame_payload_get(fp, sizeof(*scr));
- memset(scr, 0, sizeof(*scr));
- scr->scr_cmd = ELS_SCR;
- scr->scr_reg_func = ELS_SCRF_FULL;
-}
-
-/**
- * fc_els_fill - Fill in an ELS request frame
- */
-static inline int fc_els_fill(struct fc_lport *lport,
- u32 did,
- struct fc_frame *fp, unsigned int op,
- enum fc_rctl *r_ctl, enum fc_fh_type *fh_type)
-{
- switch (op) {
- case ELS_ADISC:
- fc_adisc_fill(lport, fp);
- break;
-
- case ELS_PLOGI:
- fc_plogi_fill(lport, fp, ELS_PLOGI);
- break;
-
- case ELS_FLOGI:
- fc_flogi_fill(lport, fp);
- break;
-
- case ELS_FDISC:
- fc_fdisc_fill(lport, fp);
- break;
-
- case ELS_LOGO:
- fc_logo_fill(lport, fp);
- break;
-
- case ELS_RTV:
- fc_rtv_fill(lport, fp);
- break;
-
- case ELS_REC:
- fc_rec_fill(lport, fp);
- break;
-
- case ELS_PRLI:
- fc_prli_fill(lport, fp);
- break;
-
- case ELS_SCR:
- fc_scr_fill(lport, fp);
- break;
-
- default:
- return -EINVAL;
- }
-
- *r_ctl = FC_RCTL_ELS_REQ;
- *fh_type = FC_TYPE_ELS;
- return 0;
-}
-#endif /* _FC_ENCODE_H_ */
diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
index 41df2ba9dbaa..d544dc5057fc 100644
--- a/include/scsi/fc_frame.h
+++ b/include/scsi/fc_frame.h
@@ -246,4 +246,34 @@ static inline bool fc_frame_is_cmd(const struct fc_frame *fp)
*/
void fc_frame_leak_check(void);
+static inline void __fc_fill_fc_hdr(struct fc_frame_header *fh,
+ enum fc_rctl r_ctl,
+ u32 did, u32 sid, enum fc_fh_type type,
+ u32 f_ctl, u32 parm_offset)
+{
+ WARN_ON(r_ctl == 0);
+ fh->fh_r_ctl = r_ctl;
+ hton24(fh->fh_d_id, did);
+ hton24(fh->fh_s_id, sid);
+ fh->fh_type = type;
+ hton24(fh->fh_f_ctl, f_ctl);
+ fh->fh_cs_ctl = 0;
+ fh->fh_df_ctl = 0;
+ fh->fh_parm_offset = htonl(parm_offset);
+}
+
+/**
+ * fill FC header fields in specified fc_frame
+ */
+static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
+ u32 did, u32 sid, enum fc_fh_type type,
+ u32 f_ctl, u32 parm_offset)
+{
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ __fc_fill_fc_hdr(fh, r_ctl, did, sid, type, f_ctl, parm_offset);
+}
+
+
#endif /* _FC_FRAME_H_ */
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index b71b5c4f418c..7b192d88f186 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -52,7 +52,7 @@ static inline int iscsi_sna_gte(u32 n1, u32 n2)
}
/*
- * useful common(control and data pathes) macro
+ * useful common(control and data paths) macro
*/
#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
#define hton24(p, v) { \
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 9b87e1a1c646..eca6fd42d7f7 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -352,6 +352,15 @@ struct fc_fcp_pkt {
} ____cacheline_aligned_in_smp;
/*
+ * @fsp should be tested and set under the scsi_pkt_queue lock
+ */
+struct libfc_cmd_priv {
+ struct fc_fcp_pkt *fsp;
+ u32 resid_len;
+ u8 status;
+};
+
+/*
* Structure and function definitions for managing Fibre Channel Exchanges
* and Sequences
*
@@ -399,7 +408,7 @@ struct fc_seq {
* @sid: Source FCID
* @did: Destination FCID
* @esb_stat: ESB exchange status
- * @r_a_tov: Resouce allocation time out value (in msecs)
+ * @r_a_tov: Resource allocation time out value (in msecs)
* @seq_id: The next sequence ID to use
* @encaps: encapsulation information for lower-level driver
* @f_ctl: F_CTL flags for the sequence
@@ -668,7 +677,7 @@ enum fc_lport_event {
* @wwnn: World Wide Node Name
* @service_params: Common service parameters
* @e_d_tov: Error detection timeout value
- * @r_a_tov: Resouce allocation timeout value
+ * @r_a_tov: Resource allocation timeout value
* @rnid_gen: RNID information
* @sg_supp: Indicates if scatter gather is supported
* @seq_offload: Indicates if sequence offload is supported
@@ -841,7 +850,7 @@ static inline void fc_lport_free_stats(struct fc_lport *lport)
/**
* lport_priv() - Return the private data from a local port
- * @lport: The local port whose private data is to be retreived
+ * @lport: The local port whose private data is to be retrieved
*/
static inline void *lport_priv(const struct fc_lport *lport)
{
@@ -857,7 +866,7 @@ static inline void *lport_priv(const struct fc_lport *lport)
* Returns: libfc lport
*/
static inline struct fc_lport *
-libfc_host_alloc(struct scsi_host_template *sht, int priv_size)
+libfc_host_alloc(const struct scsi_host_template *sht, int priv_size)
{
struct fc_lport *lport;
struct Scsi_Host *shost;
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 2568cb0627ec..8300ef1a982e 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/local_lock.h>
#include <linux/random.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
@@ -249,7 +250,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
struct fc_frame *);
/* libfcoe funcs */
-u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
+u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN], unsigned int scheme,
+ unsigned int port);
int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
const struct libfc_function_template *, int init_fcp);
u32 fcoe_fc_crc(struct fc_frame *fp);
@@ -326,6 +328,7 @@ struct fcoe_percpu_s {
struct sk_buff_head fcoe_rx_list;
struct page *crc_eof_page;
int crc_eof_offset;
+ local_lock_t lock;
};
/**
@@ -394,10 +397,8 @@ int fcoe_transport_attach(struct fcoe_transport *ft);
int fcoe_transport_detach(struct fcoe_transport *ft);
/* sysfs store handler for ctrl_control interface */
-ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
- const char *buf, size_t count);
-ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
- const char *buf, size_t count);
+ssize_t fcoe_ctlr_create_store(const char *buf, size_t count);
+ssize_t fcoe_ctlr_destroy_store(const char *buf, size_t count);
#endif /* _LIBFCOE_H */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index c25fb86ffae9..7282555adfd5 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -19,6 +19,7 @@
#include <linux/refcount.h>
#include <scsi/iscsi_proto.h>
#include <scsi/iscsi_if.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport_iscsi.h>
struct scsi_transport_template;
@@ -52,8 +53,10 @@ enum {
#define ISID_SIZE 6
-/* Connection suspend "bit" */
-#define ISCSI_SUSPEND_BIT 1
+/* Connection flags */
+#define ISCSI_CONN_FLAG_SUSPEND_TX 0
+#define ISCSI_CONN_FLAG_SUSPEND_RX 1
+#define ISCSI_CONN_FLAG_BOUND 2
#define ISCSI_ITT_MASK 0x1fff
#define ISCSI_TOTAL_CMDS_MAX 4096
@@ -142,6 +145,24 @@ static inline void* iscsi_next_hdr(struct iscsi_task *task)
return (void*)task->hdr + task->hdr_len;
}
+static inline bool iscsi_task_is_completed(struct iscsi_task *task)
+{
+ return task->state == ISCSI_TASK_COMPLETED ||
+ task->state == ISCSI_TASK_ABRT_TMF ||
+ task->state == ISCSI_TASK_ABRT_SESS_RECOV;
+}
+
+/* Private data associated with struct scsi_cmnd. */
+struct iscsi_cmd {
+ struct iscsi_task *task;
+ int age;
+};
+
+static inline struct iscsi_cmd *iscsi_cmd(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/* Connection's states */
enum {
ISCSI_CONN_INITIAL_STAGE,
@@ -184,19 +205,14 @@ struct iscsi_conn {
struct iscsi_task *task; /* xmit task in progress */
/* xmit */
- spinlock_t taskqueuelock; /* protects the next three lists */
+ /* items must be added/deleted under frwd lock */
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */
struct work_struct xmitwork; /* per-conn. xmit workqueue */
- unsigned long suspend_tx; /* suspend Tx */
- unsigned long suspend_rx; /* suspend Rx */
-
- /* abort */
- wait_queue_head_t ehwait; /* used in eh_abort() */
- struct iscsi_tm tmhdr;
- struct timer_list tmf_timer;
- int tmf_state; /* see TMF_INITIAL, etc.*/
+ /* recv */
+ struct work_struct recvwork;
+ unsigned long flags; /* ISCSI_CONN_FLAGs */
/* negotiated params */
unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
@@ -267,6 +283,12 @@ struct iscsi_session {
* and recv lock.
*/
struct mutex eh_mutex;
+ /* abort */
+ wait_queue_head_t ehwait; /* used in eh_abort() */
+ struct iscsi_tm tmhdr;
+ struct timer_list tmf_timer;
+ int tmf_state; /* see TMF_INITIAL, etc.*/
+ struct iscsi_task *running_aborted_task;
/* iSCSI session-wide sequencing */
uint32_t cmdsn;
@@ -329,7 +351,7 @@ struct iscsi_session {
* cmdsn, queued_cmdsn *
* session resources: *
* - cmdpool kfifo_out , *
- * - mgmtpool, */
+ * - mgmtpool, queues */
spinlock_t back_lock; /* protects cmdsn_exp *
* cmdsn_max, *
* cmdpool kfifo_in */
@@ -361,7 +383,6 @@ struct iscsi_host {
int state;
struct workqueue_struct *workq;
- char workq_name[20];
};
/*
@@ -372,7 +393,7 @@ extern int iscsi_eh_recover_target(struct scsi_cmnd *sc);
extern int iscsi_eh_session_reset(struct scsi_cmnd *sc);
extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc);
-extern enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc);
+extern enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc);
/*
* iSCSI host helpers.
@@ -386,12 +407,14 @@ extern int iscsi_host_set_param(struct Scsi_Host *shost,
extern int iscsi_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+extern struct Scsi_Host *iscsi_host_alloc(const struct scsi_host_template *sht,
int dd_data_size,
bool xmit_can_sleep);
-extern void iscsi_host_remove(struct Scsi_Host *shost);
+extern void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown);
extern void iscsi_host_free(struct Scsi_Host *shost);
extern int iscsi_target_alloc(struct scsi_target *starget);
+extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
+ uint16_t requested_cmds_max);
/*
* session management
@@ -399,6 +422,8 @@ extern int iscsi_target_alloc(struct scsi_target *starget);
extern struct iscsi_cls_session *
iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
uint16_t, int, int, uint32_t, unsigned int);
+void iscsi_session_remove(struct iscsi_cls_session *cls_session);
+void iscsi_session_free(struct iscsi_cls_session *cls_session);
extern void iscsi_session_teardown(struct iscsi_cls_session *);
extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
@@ -419,6 +444,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *);
extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
int);
+extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active);
extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
extern void iscsi_session_failure(struct iscsi_session *session,
enum iscsi_err err);
@@ -427,8 +453,10 @@ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
extern int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
enum iscsi_param param, char *buf);
extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+extern void iscsi_suspend_rx(struct iscsi_conn *conn);
extern void iscsi_suspend_queue(struct iscsi_conn *conn);
-extern void iscsi_conn_queue_work(struct iscsi_conn *conn);
+extern void iscsi_conn_queue_xmit(struct iscsi_conn *conn);
+extern void iscsi_conn_queue_recv(struct iscsi_conn *conn);
#define iscsi_conn_printk(prefix, _c, fmt, a...) \
iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
@@ -453,7 +481,7 @@ extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
extern void iscsi_requeue_task(struct iscsi_task *task);
extern void iscsi_put_task(struct iscsi_task *task);
extern void __iscsi_put_task(struct iscsi_task *task);
-extern void __iscsi_get_task(struct iscsi_task *task);
+extern bool iscsi_get_task(struct iscsi_task *task);
extern void iscsi_complete_scsi_task(struct iscsi_task *task,
uint32_t exp_cmdsn, uint32_t max_cmdsn);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 4e2d61e8fb1e..f5257103fdb6 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -23,22 +23,12 @@
struct block_device;
-enum sas_class {
- SAS,
- EXPANDER
-};
-
enum sas_phy_role {
PHY_ROLE_NONE = 0,
PHY_ROLE_TARGET = 0x40,
PHY_ROLE_INITIATOR = 0x80,
};
-enum sas_phy_type {
- PHY_TYPE_PHYSICAL,
- PHY_TYPE_VIRTUAL
-};
-
/* The events are mnemonically described in sas_dump.c
* so when updating/adding events here, please also
* update the other file too.
@@ -145,7 +135,7 @@ struct sata_device {
struct ata_port *ap;
struct ata_host *ata_host;
- struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
+ struct smp_rps_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
u8 fis[ATA_RESP_FIS_SIZE];
};
@@ -258,7 +248,6 @@ struct asd_sas_port {
/* public: */
int id;
- enum sas_class class;
u8 sas_addr[SAS_ADDR_SIZE];
u8 attached_sas_addr[SAS_ADDR_SIZE];
enum sas_protocol iproto;
@@ -319,11 +308,9 @@ struct asd_sas_phy {
int enabled; /* must be set */
int id; /* must be set */
- enum sas_class class;
enum sas_protocol iproto;
enum sas_protocol tproto;
- enum sas_phy_type type;
enum sas_phy_role role;
enum sas_oob_mode oob_mode;
enum sas_linkrate linkrate;
@@ -346,16 +333,12 @@ struct asd_sas_phy {
void *lldd_phy; /* not touched by the sas_class_code */
};
-struct scsi_core {
- struct Scsi_Host *shost;
-
-};
-
enum sas_ha_state {
SAS_HA_REGISTERED,
SAS_HA_DRAINING,
SAS_HA_ATA_EH_ACTIVE,
SAS_HA_FROZEN,
+ SAS_HA_RESUMING,
};
struct sas_ha_struct {
@@ -370,12 +353,11 @@ struct sas_ha_struct {
struct mutex disco_mutex;
- struct scsi_core core;
+ struct Scsi_Host *shost;
/* public: */
char *sas_ha_name;
struct device *dev; /* should be set */
- struct module *lldd_module; /* should be set */
struct workqueue_struct *event_q;
struct workqueue_struct *disco_q;
@@ -391,10 +373,6 @@ struct sas_ha_struct {
int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
* their siblings when forming wide ports */
- /* LLDD calls these to notify the class of an event. */
- int (*notify_port_event)(struct asd_sas_phy *, enum port_event);
- int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
-
void *lldd_ha; /* not touched by sas class code */
struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
@@ -426,8 +404,6 @@ cmd_to_domain_dev(struct scsi_cmnd *cmd)
return sdev_to_domain_dev(cmd->device);
}
-void sas_hash_addr(u8 *hashed, const u8 *sas_addr);
-
/* Before calling a notify event, LLDD should use this function
* when the link is severed (possibly from its tasklet).
* The idea is that the Class only reads those, while the LLDD,
@@ -478,10 +454,16 @@ enum service_response {
};
enum exec_status {
- /* The SAM_STAT_.. codes fit in the lower 6 bits, alias some of
- * them here to silence 'case value not in enumerated type' warnings
+ /*
+ * Values 0..0x7f are used to return the SAM_STAT_* codes. To avoid
+ * 'case value not in enumerated type' compiler warnings every value
+ * returned through the exec_status enum needs an alias with the SAS_
+ * prefix here.
*/
- __SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION,
+ SAS_SAM_STAT_GOOD = SAM_STAT_GOOD,
+ SAS_SAM_STAT_BUSY = SAM_STAT_BUSY,
+ SAS_SAM_STAT_TASK_ABORTED = SAM_STAT_TASK_ABORTED,
+ SAS_SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION,
SAS_DEV_NO_RESPONSE = 0x80,
SAS_DATA_UNDERRUN,
@@ -489,7 +471,6 @@ enum exec_status {
SAS_INTERRUPTED,
SAS_QUEUE_FULL,
SAS_DEVICE_UNKNOWN,
- SAS_SG_ERR,
SAS_OPEN_REJECT,
SAS_OPEN_TO,
SAS_PROTO_RESPONSE,
@@ -542,14 +523,26 @@ struct sas_ata_task {
struct host_to_dev_fis fis;
u8 atapi_packet[16]; /* 0 if not ATAPI task */
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 dma_xfer:1; /* PIO:0 or DMA:1 */
u8 use_ncq:1;
- u8 set_affil_pol:1;
- u8 stp_affil_pol:1;
+ u8 return_fis_on_success:1;
u8 device_control_reg_update:1;
+
+ bool force_phy;
+ int force_phy_id;
+};
+
+/* LLDDs rely on these values */
+enum sas_internal_abort {
+ SAS_INTERNAL_ABORT_SINGLE = 0,
+ SAS_INTERNAL_ABORT_DEV = 1,
+};
+
+struct sas_internal_abort_task {
+ enum sas_internal_abort type;
+ unsigned int qid;
+ u16 tag;
};
struct sas_smp_task {
@@ -565,15 +558,16 @@ enum task_attribute {
};
struct sas_ssp_task {
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 LUN[8];
- u8 enable_first_burst:1;
enum task_attribute task_attr;
- u8 task_prio;
struct scsi_cmnd *cmd;
};
+struct sas_tmf_task {
+ u8 tmf;
+ u16 tag_of_task_to_be_managed;
+};
+
struct sas_task {
struct domain_device *dev;
@@ -586,6 +580,7 @@ struct sas_task {
struct sas_ata_task ata_task;
struct sas_smp_task smp_task;
struct sas_ssp_task ssp_task;
+ struct sas_internal_abort_task abort_task;
};
struct scatterlist *scatter;
@@ -599,6 +594,7 @@ struct sas_task {
void *lldd_task; /* for use by LLDDs */
void *uldd_task;
struct sas_task_slow *slow_task;
+ struct sas_tmf_task *tmf;
};
struct sas_task_slow {
@@ -614,11 +610,29 @@ struct sas_task_slow {
#define SAS_TASK_STATE_DONE 2
#define SAS_TASK_STATE_ABORTED 4
#define SAS_TASK_NEED_DEV_RESET 8
-#define SAS_TASK_AT_INITIATOR 16
-extern struct sas_task *sas_alloc_task(gfp_t flags);
-extern struct sas_task *sas_alloc_slow_task(gfp_t flags);
-extern void sas_free_task(struct sas_task *task);
+static inline bool sas_is_internal_abort(struct sas_task *task)
+{
+ return task->task_proto == SAS_PROTOCOL_INTERNAL_ABORT;
+}
+
+static inline struct request *sas_task_find_rq(struct sas_task *task)
+{
+ struct scsi_cmnd *scmd;
+
+ if (task->task_proto & SAS_PROTOCOL_STP_ALL) {
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ scmd = qc ? qc->scsicmd : NULL;
+ } else {
+ scmd = task->uldd_task;
+ }
+
+ if (!scmd)
+ return NULL;
+
+ return scsi_cmd_to_rq(scmd);
+}
struct sas_domain_function_template {
/* The class calls these to notify the LLDD of an event. */
@@ -634,7 +648,6 @@ struct sas_domain_function_template {
/* Task Management Functions. Must be called from process context. */
int (*lldd_abort_task)(struct sas_task *);
int (*lldd_abort_task_set)(struct domain_device *, u8 *lun);
- int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
int (*lldd_I_T_nexus_reset)(struct domain_device *);
int (*lldd_ata_check_ready)(struct domain_device *);
@@ -642,6 +655,11 @@ struct sas_domain_function_template {
int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
int (*lldd_query_task)(struct sas_task *);
+ /* Special TMF callbacks */
+ void (*lldd_tmf_exec_complete)(struct domain_device *dev);
+ void (*lldd_tmf_aborted)(struct sas_task *task);
+ bool (*lldd_abort_timeout)(struct sas_task *task, void *data);
+
/* Port and Adapter management */
int (*lldd_clear_nexus_port)(struct asd_sas_port *);
int (*lldd_clear_nexus_ha)(struct sas_ha_struct *);
@@ -658,37 +676,26 @@ extern int sas_register_ha(struct sas_ha_struct *);
extern int sas_unregister_ha(struct sas_ha_struct *);
extern void sas_prep_resume_ha(struct sas_ha_struct *sas_ha);
extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
+extern void sas_resume_ha_no_sync(struct sas_ha_struct *sas_ha);
extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
-int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
+int sas_phy_enable(struct sas_phy *phy, int enable);
extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
extern int sas_slave_configure(struct scsi_device *);
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
extern int sas_bios_param(struct scsi_device *, struct block_device *,
sector_t capacity, int *hsc);
+int sas_execute_internal_abort_single(struct domain_device *device,
+ u16 tag, unsigned int qid,
+ void *data);
+int sas_execute_internal_abort_dev(struct domain_device *device,
+ unsigned int qid, void *data);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
extern struct device_attribute dev_attr_phy_event_threshold;
-int sas_discover_root_expander(struct domain_device *);
-
-void sas_init_ex_attr(void);
-
-int sas_ex_revalidate_domain(struct domain_device *);
-
-void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
-void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *);
-int sas_discover_event(struct asd_sas_port *, enum discover_event ev);
-
-int sas_discover_sata(struct domain_device *);
-int sas_discover_end_dev(struct domain_device *);
-
-void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *);
-
-void sas_init_dev(struct domain_device *);
-
void sas_task_abort(struct sas_task *);
int sas_eh_abort_handler(struct scsi_cmnd *cmd);
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd);
@@ -706,4 +713,17 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev);
int sas_request_addr(struct Scsi_Host *shost, u8 *addr);
+int sas_abort_task_set(struct domain_device *dev, u8 *lun);
+int sas_clear_task_set(struct domain_device *dev, u8 *lun);
+int sas_lu_reset(struct domain_device *dev, u8 *lun);
+int sas_query_task(struct sas_task *task, u16 tag);
+int sas_abort_task(struct sas_task *task, u16 tag);
+int sas_find_attached_phy_id(struct expander_device *ex_dev,
+ struct domain_device *dev);
+
+void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
+ gfp_t gfp_flags);
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
+ gfp_t gfp_flags);
+
#endif /* _SASLIB_H_ */
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index 4726c1bbec65..71b749bed3b0 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -95,6 +95,8 @@ enum sas_protocol {
SAS_PROTOCOL_SSP = 0x08,
SAS_PROTOCOL_ALL = 0x0E,
SAS_PROTOCOL_STP_ALL = SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA,
+ /* these are internal to libsas */
+ SAS_PROTOCOL_INTERNAL_ABORT = 0x10,
};
/* From the spec; local phys only */
@@ -191,6 +193,13 @@ enum sas_gpio_reg_type {
SAS_GPIO_REG_TX_GP = 4,
};
+/* Response frame DATAPRES field */
+enum {
+ SAS_DATAPRES_NO_DATA = 0,
+ SAS_DATAPRES_RESPONSE_DATA = 1,
+ SAS_DATAPRES_SENSE_DATA = 2,
+};
+
struct dev_to_host_fis {
u8 fis_type; /* 0x34 */
u8 flags;
@@ -323,8 +332,10 @@ struct ssp_response_iu {
__be32 sense_data_len;
__be32 response_data_len;
- u8 resp_data[0];
- u8 sense_data[];
+ union {
+ DECLARE_FLEX_ARRAY(u8, resp_data);
+ DECLARE_FLEX_ARRAY(u8, sense_data);
+ };
} __attribute__ ((packed));
struct ssp_command_iu {
@@ -460,18 +471,6 @@ struct report_phy_sata_resp {
__be32 crc;
} __attribute__ ((packed));
-struct smp_resp {
- u8 frame_type;
- u8 function;
- u8 result;
- u8 reserved;
- union {
- struct report_general_resp rg;
- struct discover_resp disc;
- struct report_phy_sata_resp rps;
- };
-} __attribute__ ((packed));
-
#elif defined(__BIG_ENDIAN_BITFIELD)
struct sas_identify_frame {
/* Byte 0 */
@@ -554,8 +553,10 @@ struct ssp_response_iu {
__be32 sense_data_len;
__be32 response_data_len;
- u8 resp_data[0];
- u8 sense_data[];
+ union {
+ DECLARE_FLEX_ARRAY(u8, resp_data);
+ DECLARE_FLEX_ARRAY(u8, sense_data);
+ };
} __attribute__ ((packed));
struct ssp_command_iu {
@@ -691,20 +692,32 @@ struct report_phy_sata_resp {
__be32 crc;
} __attribute__ ((packed));
-struct smp_resp {
+#else
+#error "Bitfield order not defined!"
+#endif
+
+struct smp_rg_resp {
u8 frame_type;
u8 function;
u8 result;
u8 reserved;
- union {
- struct report_general_resp rg;
- struct discover_resp disc;
- struct report_phy_sata_resp rps;
- };
+ struct report_general_resp rg;
} __attribute__ ((packed));
-#else
-#error "Bitfield order not defined!"
-#endif
+struct smp_disc_resp {
+ u8 frame_type;
+ u8 function;
+ u8 result;
+ u8 reserved;
+ struct discover_resp disc;
+} __attribute__ ((packed));
+
+struct smp_rps_resp {
+ u8 frame_type;
+ u8 function;
+ u8 result;
+ u8 reserved;
+ struct report_phy_sata_resp rps;
+} __attribute__ ((packed));
#endif /* _SAS_H_ */
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index 416c9c47d0e7..2f8c719840a6 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -25,16 +25,26 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
int sas_ata_init(struct domain_device *dev);
void sas_ata_task_abort(struct sas_task *task);
void sas_ata_strategy_handler(struct Scsi_Host *shost);
-void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q);
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q);
void sas_ata_schedule_reset(struct domain_device *dev);
void sas_ata_wait_eh(struct domain_device *dev);
void sas_probe_sata(struct asd_sas_port *port);
void sas_suspend_sata(struct asd_sas_port *port);
void sas_resume_sata(struct asd_sas_port *port);
void sas_ata_end_eh(struct ata_port *ap);
+void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset);
+int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ int force_phy_id);
+int smp_ata_check_ready_type(struct ata_link *link);
+int sas_discover_sata(struct domain_device *dev);
+int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id);
#else
+static inline void sas_ata_disabled_notice(void)
+{
+ pr_notice_once("ATA device seen but CONFIG_SCSI_SAS_ATA=N\n");
+}
static inline int dev_is_sata(struct domain_device *dev)
{
@@ -52,8 +62,7 @@ static inline void sas_ata_strategy_handler(struct Scsi_Host *shost)
{
}
-static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q)
+static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
{
}
@@ -85,6 +94,35 @@ static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy
static inline void sas_ata_end_eh(struct ata_port *ap)
{
}
+
+static inline void sas_ata_device_link_abort(struct domain_device *dev,
+ bool force_reset)
+{
+}
+
+static inline int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ int force_phy_id)
+{
+ return 0;
+}
+
+static inline int smp_ata_check_ready_type(struct ata_link *link)
+{
+ return 0;
+}
+
+static inline int sas_discover_sata(struct domain_device *dev)
+{
+ sas_ata_disabled_notice();
+ return -ENXIO;
+}
+
+static inline int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id)
+{
+ sas_ata_disabled_notice();
+ return -ENODEV;
+}
#endif
#endif /* _SAS_ATA_H_ */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 5339baadc082..4498f845b112 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
+#include <scsi/scsi_status.h>
struct scsi_cmnd;
@@ -30,32 +31,6 @@ enum scsi_timeouts {
*/
#define SCAN_WILD_CARD ~0
-/** scsi_status_is_good - check the status return.
- *
- * @status: the status passed up from the driver (including host and
- * driver components)
- *
- * This returns true for known good conditions that may be treated as
- * command completed normally
- */
-static inline int scsi_status_is_good(int status)
-{
- /*
- * FIXME: bit0 is listed as reserved in SCSI-2, but is
- * significant in SCSI-3. For now, we follow the SCSI-2
- * behaviour and ignore reserved bits.
- */
- status &= 0xfe;
- return ((status == SAM_STAT_GOOD) ||
- (status == SAM_STAT_CONDITION_MET) ||
- /* Next two "intermediate" statuses are obsolete in SAM-4 */
- (status == SAM_STAT_INTERMEDIATE) ||
- (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
- /* FIXME: this is obsolete in SAM-3 */
- (status == SAM_STAT_COMMAND_TERMINATED));
-}
-
-
/*
* standard mode-select header prepended to all mode-select commands
*/
@@ -88,106 +63,46 @@ static inline int scsi_is_wlun(u64 lun)
return (lun & 0xff00) == SCSI_W_LUN_BASE;
}
+/**
+ * scsi_status_is_check_condition - check the status return.
+ *
+ * @status: the status passed up from the driver (including host and
+ * driver components)
+ *
+ * This returns true if the status code is SAM_STAT_CHECK_CONDITION.
+ */
+static inline int scsi_status_is_check_condition(int status)
+{
+ if (status < 0)
+ return false;
+ status &= 0xfe;
+ return status == SAM_STAT_CHECK_CONDITION;
+}
/*
- * MESSAGE CODES
+ * Extended message codes.
*/
-
-#define COMMAND_COMPLETE 0x00
-#define EXTENDED_MESSAGE 0x01
#define EXTENDED_MODIFY_DATA_POINTER 0x00
#define EXTENDED_SDTR 0x01
#define EXTENDED_EXTENDED_IDENTIFY 0x02 /* SCSI-I only */
#define EXTENDED_WDTR 0x03
#define EXTENDED_PPR 0x04
#define EXTENDED_MODIFY_BIDI_DATA_PTR 0x05
-#define SAVE_POINTERS 0x02
-#define RESTORE_POINTERS 0x03
-#define DISCONNECT 0x04
-#define INITIATOR_ERROR 0x05
-#define ABORT_TASK_SET 0x06
-#define MESSAGE_REJECT 0x07
-#define NOP 0x08
-#define MSG_PARITY_ERROR 0x09
-#define LINKED_CMD_COMPLETE 0x0a
-#define LINKED_FLG_CMD_COMPLETE 0x0b
-#define TARGET_RESET 0x0c
-#define ABORT_TASK 0x0d
-#define CLEAR_TASK_SET 0x0e
-#define INITIATE_RECOVERY 0x0f /* SCSI-II only */
-#define RELEASE_RECOVERY 0x10 /* SCSI-II only */
-#define CLEAR_ACA 0x16
-#define LOGICAL_UNIT_RESET 0x17
-#define SIMPLE_QUEUE_TAG 0x20
-#define HEAD_OF_QUEUE_TAG 0x21
-#define ORDERED_QUEUE_TAG 0x22
-#define IGNORE_WIDE_RESIDUE 0x23
-#define ACA 0x24
-#define QAS_REQUEST 0x55
-
-/* Old SCSI2 names, don't use in new code */
-#define BUS_DEVICE_RESET TARGET_RESET
-#define ABORT ABORT_TASK_SET
-
-/*
- * Host byte codes
- */
-
-#define DID_OK 0x00 /* NO error */
-#define DID_NO_CONNECT 0x01 /* Couldn't connect before timeout period */
-#define DID_BUS_BUSY 0x02 /* BUS stayed busy through time out period */
-#define DID_TIME_OUT 0x03 /* TIMED OUT for other reason */
-#define DID_BAD_TARGET 0x04 /* BAD target. */
-#define DID_ABORT 0x05 /* Told to abort for some other reason */
-#define DID_PARITY 0x06 /* Parity error */
-#define DID_ERROR 0x07 /* Internal error */
-#define DID_RESET 0x08 /* Reset by somebody. */
-#define DID_BAD_INTR 0x09 /* Got an interrupt we weren't expecting. */
-#define DID_PASSTHROUGH 0x0a /* Force command past mid-layer */
-#define DID_SOFT_ERROR 0x0b /* The low level driver just wish a retry */
-#define DID_IMM_RETRY 0x0c /* Retry without decrementing retry count */
-#define DID_REQUEUE 0x0d /* Requeue command (no immediate retry) also
- * without decrementing the retry count */
-#define DID_TRANSPORT_DISRUPTED 0x0e /* Transport error disrupted execution
- * and the driver blocked the port to
- * recover the link. Transport class will
- * retry or fail IO */
-#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */
-#define DID_TARGET_FAILURE 0x10 /* Permanent target failure, do not retry on
- * other paths */
-#define DID_NEXUS_FAILURE 0x11 /* Permanent nexus failure, retry on other
- * paths might yield different results */
-#define DID_ALLOC_FAILURE 0x12 /* Space allocation on the device failed */
-#define DID_MEDIUM_ERROR 0x13 /* Medium error */
-#define DRIVER_OK 0x00 /* Driver status */
-
-/*
- * These indicate the error that occurred, and what is available.
- */
-
-#define DRIVER_BUSY 0x01
-#define DRIVER_SOFT 0x02
-#define DRIVER_MEDIA 0x03
-#define DRIVER_ERROR 0x04
-
-#define DRIVER_INVALID 0x05
-#define DRIVER_TIMEOUT 0x06
-#define DRIVER_HARD 0x07
-#define DRIVER_SENSE 0x08
/*
* Internal return values.
*/
-
-#define NEEDS_RETRY 0x2001
-#define SUCCESS 0x2002
-#define FAILED 0x2003
-#define QUEUED 0x2004
-#define SOFT_ERROR 0x2005
-#define ADD_TO_MLQUEUE 0x2006
-#define TIMEOUT_ERROR 0x2007
-#define SCSI_RETURN_NOT_HANDLED 0x2008
-#define FAST_IO_FAIL 0x2009
+enum scsi_disposition {
+ NEEDS_RETRY = 0x2001,
+ SUCCESS = 0x2002,
+ FAILED = 0x2003,
+ QUEUED = 0x2004,
+ SOFT_ERROR = 0x2005,
+ ADD_TO_MLQUEUE = 0x2006,
+ TIMEOUT_ERROR = 0x2007,
+ SCSI_RETURN_NOT_HANDLED = 0x2008,
+ FAST_IO_FAIL = 0x2009,
+};
/*
* Midlevel queue return values.
@@ -203,14 +118,11 @@ static inline int scsi_is_wlun(u64 lun)
* These are set by:
*
* status byte = set from target device
- * msg_byte = return status from host adapter itself.
+ * msg_byte (unused)
* host_byte = set by low-level driver to indicate status.
- * driver_byte = set by mid-level.
*/
-#define status_byte(result) (((result) >> 1) & 0x7f)
-#define msg_byte(result) (((result) >> 8) & 0xff)
+#define status_byte(result) (result & 0xff)
#define host_byte(result) (((result) >> 16) & 0xff)
-#define driver_byte(result) (((result) >> 24) & 0xff)
#define sense_class(sense) (((sense) >> 4) & 0x7)
#define sense_error(sense) ((sense) & 0xf)
@@ -245,6 +157,9 @@ static inline int scsi_is_wlun(u64 lun)
#define SCSI_3 4 /* SPC */
#define SCSI_SPC_2 5
#define SCSI_SPC_3 6
+#define SCSI_SPC_4 7
+#define SCSI_SPC_5 8
+#define SCSI_SPC_6 14
/*
* INQ PERIPHERAL QUALIFIERS
@@ -274,4 +189,35 @@ static inline int scsi_is_wlun(u64 lun)
/* Used to obtain the PCI location of a device */
#define SCSI_IOCTL_GET_PCI 0x5387
+/** scsi_status_is_good - check the status return.
+ *
+ * @status: the status passed up from the driver (including host and
+ * driver components)
+ *
+ * This returns true for known good conditions that may be treated as
+ * command completed normally
+ */
+static inline bool scsi_status_is_good(int status)
+{
+ if (status < 0)
+ return false;
+
+ if (host_byte(status) == DID_NO_CONNECT)
+ return false;
+
+ /*
+ * FIXME: bit0 is listed as reserved in SCSI-2, but is
+ * significant in SCSI-3. For now, we follow the SCSI-2
+ * behaviour and ignore reserved bits.
+ */
+ status &= 0xfe;
+ return ((status == SAM_STAT_GOOD) ||
+ (status == SAM_STAT_CONDITION_MET) ||
+ /* Next two "intermediate" statuses are obsolete in SAM-4 */
+ (status == SAM_STAT_INTERMEDIATE) ||
+ (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
+ /* FIXME: this is obsolete in SAM-3 */
+ (status == SAM_STAT_COMMAND_TERMINATED));
+}
+
#endif /* _SCSI_SCSI_H */
diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h
index 6b8128005af8..9b1f0f424a79 100644
--- a/include/scsi/scsi_bsg_iscsi.h
+++ b/include/scsi/scsi_bsg_iscsi.h
@@ -84,7 +84,7 @@ struct iscsi_bsg_reply {
*/
uint32_t result;
- /* If there was reply_payload, how much was recevied ? */
+ /* If there was reply_payload, how much was received ? */
uint32_t reply_payload_rcv_len;
union {
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index e76bac4d14c5..526def14e7fb 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -10,10 +10,8 @@
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_device.h>
-#include <scsi/scsi_request.h>
struct Scsi_Host;
-struct scsi_driver;
/*
* MAX_COMMAND_SIZE is:
@@ -28,9 +26,6 @@ struct scsi_driver;
* supports without specifying a cmd_len by ULD's
*/
#define MAX_COMMAND_SIZE 16
-#if (MAX_COMMAND_SIZE > BLK_MAX_CDB)
-# error MAX_COMMAND_SIZE can not be bigger than BLK_MAX_CDB
-#endif
struct scsi_data_buffer {
struct sg_table table;
@@ -55,26 +50,38 @@ struct scsi_pointer {
/* for scmd->flags */
#define SCMD_TAGGED (1 << 0)
-#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
-#define SCMD_INITIALIZED (1 << 2)
-#define SCMD_LAST (1 << 3)
+#define SCMD_INITIALIZED (1 << 1)
+#define SCMD_LAST (1 << 2)
+/*
+ * libata uses SCSI EH to fetch sense data for successful commands.
+ * SCSI EH should not overwrite scmd->result when SCMD_FORCE_EH_SUCCESS is set.
+ */
+#define SCMD_FORCE_EH_SUCCESS (1 << 3)
+#define SCMD_FAIL_IF_RECOVERING (1 << 4)
/* flags preserved across unprep / reprep */
-#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
+#define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED | SCMD_FAIL_IF_RECOVERING)
/* for scmd->state */
#define SCMD_STATE_COMPLETE 0
#define SCMD_STATE_INFLIGHT 1
+enum scsi_cmnd_submitter {
+ SUBMITTED_BY_BLOCK_LAYER = 0,
+ SUBMITTED_BY_SCSI_ERROR_HANDLER = 1,
+ SUBMITTED_BY_SCSI_RESET_IOCTL = 2,
+} __packed;
+
struct scsi_cmnd {
- struct scsi_request req;
struct scsi_device *device;
- struct list_head eh_entry; /* entry for the host eh_cmd_q */
+ struct list_head eh_entry; /* entry for the host eh_abort_list/eh_cmd_q */
struct delayed_work abort_work;
struct rcu_head rcu;
int eh_eflags; /* Used by error handlr */
+ int budget_token;
+
/*
* This is set to jiffies as it was when the command was first
* allocated. It is used to time how long the command has
@@ -88,13 +95,12 @@ struct scsi_cmnd {
unsigned char prot_op;
unsigned char prot_type;
unsigned char prot_flags;
+ enum scsi_cmnd_submitter submitter;
unsigned short cmd_len;
enum dma_data_direction sc_data_direction;
- /* These elements define the operation we are about to perform */
- unsigned char *cmnd;
-
+ unsigned char cmnd[32]; /* SCSI CDB */
/* These elements define the operation we ultimately want to perform */
struct scsi_data_buffer sdb;
@@ -108,25 +114,23 @@ struct scsi_cmnd {
(ie, between disconnect /
reconnects. Probably == sector
size */
-
- struct request *request; /* The command we are
- working on */
-
+ unsigned resid_len; /* residual count */
+ unsigned sense_len;
unsigned char *sense_buffer;
/* obtained by REQUEST SENSE when
* CHECK CONDITION is received on original
* command (auto-sense). Length must be
* SCSI_SENSE_BUFFERSIZE bytes. */
- /* Low-level done function - can be used by low-level driver to point
- * to completion function. Not used by mid/upper level code. */
- void (*scsi_done) (struct scsi_cmnd *);
+ int flags; /* Command flags */
+ unsigned long state; /* Command completion state */
+
+ unsigned int extra_len; /* length of alignment and padding */
/*
- * The following fields can be written to by the host specific code.
- * Everything else should be left alone.
+ * The fields below can be modified by the LLD but the fields above
+ * must not be modified.
*/
- struct scsi_pointer SCp; /* Scratchpad used by some host adapters */
unsigned char *host_scribble; /* The host adapter is allowed to
* call scsi_malloc and get some memory
@@ -137,13 +141,14 @@ struct scsi_cmnd {
* to be at an address < 16Mb). */
int result; /* Status code from lower level driver */
- int flags; /* Command flags */
- unsigned long state; /* Command completion state */
-
- unsigned char tag; /* SCSI-II queued command tag */
- unsigned int extra_len; /* length of alignment and padding */
};
+/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
+static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd)
+{
+ return blk_mq_rq_from_pdu(scmd);
+}
+
/*
* Return the driver private allocation behind the command.
* Only works if cmd_size is set in the host template.
@@ -153,11 +158,8 @@ static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
return cmd + 1;
}
-/* make sure not to use it with passthrough commands */
-static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
-{
- return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
-}
+void scsi_done(struct scsi_cmnd *cmd);
+void scsi_done_direct(struct scsi_cmnd *cmd);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
@@ -165,7 +167,8 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt);
-extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd);
+blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd);
+void scsi_free_sgtables(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_DMA
extern int scsi_dma_map(struct scsi_cmnd *cmd);
@@ -192,19 +195,19 @@ static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
static inline void scsi_set_resid(struct scsi_cmnd *cmd, unsigned int resid)
{
- cmd->req.resid_len = resid;
+ cmd->resid_len = resid;
}
static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd)
{
- return cmd->req.resid_len;
+ return cmd->resid_len;
}
#define scsi_for_each_sg(cmd, sg, nseg, __i) \
for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd,
- void *buf, int buflen)
+ const void *buf, int buflen)
{
return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
buf, buflen);
@@ -217,6 +220,25 @@ static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd,
buf, buflen);
}
+static inline sector_t scsi_get_sector(struct scsi_cmnd *scmd)
+{
+ return blk_rq_pos(scsi_cmd_to_rq(scmd));
+}
+
+static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+{
+ unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
+
+ return blk_rq_pos(scsi_cmd_to_rq(scmd)) >> shift;
+}
+
+static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
+{
+ unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
+
+ return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
+}
+
/*
* The operations below are hints that tell the controller driver how
* to handle I/Os with DIF or similar types of protection information.
@@ -279,9 +301,11 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
return scmd->prot_type;
}
-static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
{
- return blk_rq_pos(scmd->request);
+ struct request *rq = blk_mq_rq_from_pdu(scmd);
+
+ return t10_pi_ref_tag(rq);
}
static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
@@ -307,9 +331,14 @@ static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd)
#define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \
for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i)
-static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
+static inline void set_status_byte(struct scsi_cmnd *cmd, char status)
{
- cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
+ cmd->result = (cmd->result & 0xffffff00) | status;
+}
+
+static inline u8 get_status_byte(struct scsi_cmnd *cmd)
+{
+ return cmd->result & 0xff;
}
static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
@@ -317,9 +346,36 @@ static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
cmd->result = (cmd->result & 0xff00ffff) | (status << 16);
}
-static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
+static inline u8 get_host_byte(struct scsi_cmnd *cmd)
+{
+ return (cmd->result >> 16) & 0xff;
+}
+
+/**
+ * scsi_msg_to_host_byte() - translate message byte
+ *
+ * Translate the SCSI parallel message byte to a matching
+ * host byte setting. A message of COMMAND_COMPLETE indicates
+ * a successful command execution, any other message indicate
+ * an error. As the messages themselves only have a meaning
+ * for the SCSI parallel protocol this function translates
+ * them into a matching host byte value for SCSI EH.
+ */
+static inline void scsi_msg_to_host_byte(struct scsi_cmnd *cmd, u8 msg)
{
- cmd->result = (cmd->result & 0x00ffffff) | (status << 24);
+ switch (msg) {
+ case COMMAND_COMPLETE:
+ break;
+ case ABORT_TASK_SET:
+ set_host_byte(cmd, DID_ABORT);
+ break;
+ case TARGET_RESET:
+ set_host_byte(cmd, DID_RESET);
+ break;
+ default:
+ set_host_byte(cmd, DID_ERROR);
+ break;
+ }
}
static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
@@ -333,4 +389,10 @@ static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
return xfer_len;
}
+extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc,
+ u8 key, u8 asc, u8 ascq);
+
+struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
+ blk_mq_req_flags_t flags);
+
#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 731ac09ed231..fb58715fac86 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -7,8 +7,21 @@
#define _SCSI_COMMON_H_
#include <linux/types.h>
+#include <uapi/linux/pr.h>
#include <scsi/scsi_proto.h>
+enum scsi_pr_type {
+ SCSI_PR_WRITE_EXCLUSIVE = 0x01,
+ SCSI_PR_EXCLUSIVE_ACCESS = 0x03,
+ SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 0x05,
+ SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 0x06,
+ SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 0x07,
+ SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 0x08,
+};
+
+enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type);
+enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type);
+
static inline unsigned
scsi_varlen_cdb_length(const void *hdr)
{
@@ -25,6 +38,13 @@ scsi_command_size(const unsigned char *cmnd)
scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
}
+static inline unsigned char
+scsi_command_control(const unsigned char *cmnd)
+{
+ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
+ cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1];
+}
+
/* Returns a human-readable name for the device */
extern const char *scsi_device_type(unsigned type);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index bc5909033d13..fddc767c4787 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -5,10 +5,12 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <scsi/scsi.h>
#include <linux/atomic.h>
+#include <linux/sbitmap.h>
+struct bsg_device;
struct device;
struct request_queue;
struct scsi_cmnd;
@@ -106,9 +108,10 @@ struct scsi_device {
struct list_head siblings; /* list of all devices on this host */
struct list_head same_target_siblings; /* just the devices sharing same target id */
- atomic_t device_busy; /* commands actually active on LLDD */
+ struct sbitmap budget_map;
atomic_t device_blocked; /* Device returned QUEUE_FULL. */
+ atomic_t restarts;
spinlock_t list_lock;
struct list_head starved_entry;
unsigned short queue_depth; /* How deep of a queue we want */
@@ -138,19 +141,48 @@ struct scsi_device {
const char * model; /* ... after scan; point to static string */
const char * rev; /* ... "nullnullnullnull" before scan */
-#define SCSI_VPD_PG_LEN 255
+#define SCSI_DEFAULT_VPD_LEN 255 /* default SCSI VPD page size (max) */
struct scsi_vpd __rcu *vpd_pg0;
struct scsi_vpd __rcu *vpd_pg83;
struct scsi_vpd __rcu *vpd_pg80;
struct scsi_vpd __rcu *vpd_pg89;
- unsigned char current_tag; /* current tag */
- struct scsi_target *sdev_target; /* used only for single_lun */
+ struct scsi_vpd __rcu *vpd_pgb0;
+ struct scsi_vpd __rcu *vpd_pgb1;
+ struct scsi_vpd __rcu *vpd_pgb2;
+
+ struct scsi_target *sdev_target;
blist_flags_t sdev_bflags; /* black/white flags as also found in
* scsi_devinfo.[hc]. For now used only to
* pass settings from slave_alloc to scsi
* core. */
unsigned int eh_timeout; /* Error handling timeout */
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system suspend/resume (suspend to RAM and
+ * hibernation) operations.
+ */
+ unsigned manage_system_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for runtime device suspand and resume operations.
+ */
+ unsigned manage_runtime_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system shutdown (power off) operations.
+ */
+ unsigned manage_shutdown:1;
+
+ /*
+ * If set and if the device is runtime suspended, ask the high-level
+ * device driver (sd) to force a runtime resume of the device.
+ */
+ unsigned force_runtime_start_on_system_start:1;
+
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
unsigned busy:1; /* Used to prevent races */
@@ -172,9 +204,11 @@ struct scsi_device {
unsigned use_10_for_rw:1; /* first try 10-byte read / write */
unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
+ unsigned read_before_ms:1; /* perform a READ before MODE SENSE */
unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
unsigned no_write_same:1; /* no WRITE SAME command */
unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
+ unsigned use_16_for_sync:1; /* Use sync (16) over sync (10) */
unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
unsigned skip_vpd_pages:1; /* do not read VPD pages */
@@ -182,7 +216,6 @@ struct scsi_device {
unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
unsigned no_start_on_add:1; /* do not issue start on add */
unsigned allow_restart:1; /* issue START_UNIT in error handler */
- unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */
unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
unsigned select_no_atn:1;
@@ -203,7 +236,14 @@ struct scsi_device {
unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device
* creation time */
+ unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
+ unsigned silence_suspend:1; /* Do not print runtime PM related messages */
+ unsigned no_vpd_size:1; /* No VPD size reported in header */
+
+ unsigned cdl_supported:1; /* Command duration limits supported */
+ unsigned cdl_enable:1; /* Enable/disable Command duration limits */
+ unsigned int queue_stopped; /* request queue is quiesced */
bool offline_already; /* Device offline message logged */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
@@ -219,11 +259,11 @@ struct scsi_device {
atomic_t iorequest_cnt;
atomic_t iodone_cnt;
atomic_t ioerr_cnt;
+ atomic_t iotmo_cnt;
struct device sdev_gendev,
sdev_dev;
- struct execute_work ew; /* used to get process context on put */
struct work_struct requeue_work;
struct scsi_device_handler *handler;
@@ -232,6 +272,10 @@ struct scsi_device {
size_t dma_drain_len;
void *dma_drain_buf;
+ unsigned int sg_timeout;
+ unsigned int sg_reserved_size;
+
+ struct bsg_device *bsg_dev;
unsigned char access_state;
struct mutex state_mutex;
enum scsi_device_state sdev_state;
@@ -263,13 +307,15 @@ sdev_prefix_printk(const char *, const struct scsi_device *, const char *,
__printf(3, 4) void
scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
-#define scmd_dbg(scmd, fmt, a...) \
- do { \
- if ((scmd)->request->rq_disk) \
- sdev_dbg((scmd)->device, "[%s] " fmt, \
- (scmd)->request->rq_disk->disk_name, ##a);\
- else \
- sdev_dbg((scmd)->device, fmt, ##a); \
+#define scmd_dbg(scmd, fmt, a...) \
+ do { \
+ struct request *__rq = scsi_cmd_to_rq((scmd)); \
+ \
+ if (__rq->q->disk) \
+ sdev_dbg((scmd)->device, "[%s] " fmt, \
+ __rq->q->disk->disk_name, ##a); \
+ else \
+ sdev_dbg((scmd)->device, fmt, ##a); \
} while (0)
enum scsi_target_state {
@@ -343,6 +389,8 @@ extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh);
extern void scsi_remove_device(struct scsi_device *);
extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
void scsi_attach_vpd(struct scsi_device *sdev);
+void scsi_cdl_check(struct scsi_device *sdev);
+int scsi_cdl_enable(struct scsi_device *sdev, bool enable);
extern struct scsi_device *scsi_device_from_queue(struct request_queue *q);
extern int __must_check scsi_device_get(struct scsi_device *);
@@ -400,21 +448,21 @@ extern int scsi_track_queue_full(struct scsi_device *, int);
extern int scsi_set_medium_removal(struct scsi_device *, char);
-extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
- unsigned char *buffer, int len, int timeout,
- int retries, struct scsi_mode_data *data,
- struct scsi_sense_hdr *);
+int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
+ int subpage, unsigned char *buffer, int len, int timeout,
+ int retries, struct scsi_mode_data *data,
+ struct scsi_sense_hdr *);
extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
- int modepage, unsigned char *buffer, int len,
- int timeout, int retries,
- struct scsi_mode_data *data,
+ unsigned char *buffer, int len, int timeout,
+ int retries, struct scsi_mode_data *data,
struct scsi_sense_hdr *);
extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
int retries, struct scsi_sense_hdr *sshdr);
extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf,
int buf_len);
-extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
- unsigned int len, unsigned char opcode);
+int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+ unsigned int len, unsigned char opcode,
+ unsigned short sa);
extern int scsi_device_set_state(struct scsi_device *sdev,
enum scsi_device_state state);
extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
@@ -430,36 +478,77 @@ extern void scsi_scan_target(struct device *parent, unsigned int channel,
unsigned int id, u64 lun,
enum scsi_scan_mode rescan);
extern void scsi_target_reap(struct scsi_target *);
-extern void scsi_target_block(struct device *);
+void scsi_block_targets(struct Scsi_Host *shost, struct device *dev);
extern void scsi_target_unblock(struct device *, enum scsi_device_state);
extern void scsi_remove_target(struct device *);
extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
-extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
- int data_direction, void *buffer, unsigned bufflen,
- unsigned char *sense, struct scsi_sense_hdr *sshdr,
- int timeout, int retries, u64 flags,
- req_flags_t rq_flags, int *resid);
-/* Make sure any sense buffer is the correct size. */
-#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, \
- sshdr, timeout, retries, flags, rq_flags, resid) \
-({ \
- BUILD_BUG_ON((sense) != NULL && \
- sizeof(sense) != SCSI_SENSE_BUFFERSIZE); \
- __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, \
- sense, sshdr, timeout, retries, flags, rq_flags, \
- resid); \
-})
-static inline int scsi_execute_req(struct scsi_device *sdev,
- const unsigned char *cmd, int data_direction, void *buffer,
- unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
- int retries, int *resid)
-{
- return scsi_execute(sdev, cmd, data_direction, buffer,
- bufflen, NULL, sshdr, timeout, retries, 0, 0, resid);
-}
+
+/*
+ * scsi_execute_cmd users can set scsi_failure.result to have
+ * scsi_check_passthrough fail/retry a command. scsi_failure.result can be a
+ * specific host byte or message code, or SCMD_FAILURE_RESULT_ANY can be used
+ * to match any host or message code.
+ */
+#define SCMD_FAILURE_RESULT_ANY 0x7fffffff
+/*
+ * Set scsi_failure.result to SCMD_FAILURE_STAT_ANY to fail/retry any failure
+ * scsi_status_is_good returns false for.
+ */
+#define SCMD_FAILURE_STAT_ANY 0xff
+/*
+ * The following can be set to the scsi_failure sense, asc and ascq fields to
+ * match on any sense, ASC, or ASCQ value.
+ */
+#define SCMD_FAILURE_SENSE_ANY 0xff
+#define SCMD_FAILURE_ASC_ANY 0xff
+#define SCMD_FAILURE_ASCQ_ANY 0xff
+/* Always retry a matching failure. */
+#define SCMD_FAILURE_NO_LIMIT -1
+
+struct scsi_failure {
+ int result;
+ u8 sense;
+ u8 asc;
+ u8 ascq;
+ /*
+ * Number of times scsi_execute_cmd will retry the failure. It does
+ * not count for the total_allowed.
+ */
+ s8 allowed;
+ /* Number of times the failure has been retried. */
+ s8 retries;
+};
+
+struct scsi_failures {
+ /*
+ * If a scsi_failure does not have a retry limit setup this limit will
+ * be used.
+ */
+ int total_allowed;
+ int total_retries;
+ struct scsi_failure *failure_definitions;
+};
+
+/* Optional arguments to scsi_execute_cmd */
+struct scsi_exec_args {
+ unsigned char *sense; /* sense buffer */
+ unsigned int sense_len; /* sense buffer len */
+ struct scsi_sense_hdr *sshdr; /* decoded sense header */
+ blk_mq_req_flags_t req_flags; /* BLK_MQ_REQ flags */
+ int scmd_flags; /* SCMD flags */
+ int *resid; /* residual length */
+ struct scsi_failures *failures; /* failures to retry */
+};
+
+int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
+ blk_opf_t opf, void *buffer, unsigned int bufflen,
+ int timeout, int retries,
+ const struct scsi_exec_args *args);
+void scsi_failures_reset_retries(struct scsi_failures *failures);
+
extern void sdev_disable_disk_events(struct scsi_device *sdev);
extern void sdev_enable_disk_events(struct scsi_device *sdev);
extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t);
@@ -589,6 +678,11 @@ static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
return 0;
}
+static inline int scsi_device_busy(struct scsi_device *sdev)
+{
+ return sbitmap_weight(&sdev->budget_map);
+}
+
#define MODULE_ALIAS_SCSI_DEVICE(type) \
MODULE_ALIAS("scsi:t-" __stringify(type) "*")
#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 3fdb322d4c4b..6b548dc2c496 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -28,10 +28,12 @@
#define BLIST_LARGELUN ((__force blist_flags_t)(1ULL << 9))
/* override additional length field */
#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1ULL << 10))
-#define __BLIST_UNUSED_11 ((__force blist_flags_t)(1ULL << 11))
+/* ignore MEDIA CHANGE unit attention after resuming from runtime suspend */
+#define BLIST_IGN_MEDIA_CHANGE ((__force blist_flags_t)(1ULL << 11))
/* do not do automatic start on add */
#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1ULL << 12))
-#define __BLIST_UNUSED_13 ((__force blist_flags_t)(1ULL << 13))
+/* do not ask for VPD page size first on some broken targets */
+#define BLIST_NO_VPD_SIZE ((__force blist_flags_t)(1ULL << 13))
#define __BLIST_UNUSED_14 ((__force blist_flags_t)(1ULL << 14))
#define __BLIST_UNUSED_15 ((__force blist_flags_t)(1ULL << 15))
#define __BLIST_UNUSED_16 ((__force blist_flags_t)(1ULL << 16))
@@ -73,9 +75,7 @@
#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
(__force blist_flags_t) \
((__force __u64)__BLIST_LAST_USED - 1ULL)))
-#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_11 | \
- __BLIST_UNUSED_13 | \
- __BLIST_UNUSED_14 | \
+#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_14 | \
__BLIST_UNUSED_15 | \
__BLIST_UNUSED_16 | \
__BLIST_UNUSED_24 | \
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index 2852e470a8ed..4df943c1b90b 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Header file for SCSI device handler infrastruture.
+ * Header file for SCSI device handler infrastructure.
*
* Modified version of patches posted by Mike Christie <michaelc@cs.wisc.edu>
*
@@ -52,7 +52,8 @@ struct scsi_device_handler {
/* Filled by the hardware handler */
struct module *module;
const char *name;
- int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
+ enum scsi_disposition (*check_sense)(struct scsi_device *,
+ struct scsi_sense_hdr *);
int (*attach)(struct scsi_device *);
void (*detach)(struct scsi_device *);
int (*activate)(struct scsi_device *, activate_complete, void *);
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 6dffa8555a39..4ce1988b2ba0 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -4,11 +4,10 @@
#include <linux/blk_types.h>
#include <linux/device.h>
+#include <scsi/scsi_cmnd.h>
struct module;
struct request;
-struct scsi_cmnd;
-struct scsi_device;
struct scsi_driver {
struct device_driver gendrv;
@@ -31,4 +30,10 @@ extern int scsi_register_interface(struct class_interface *);
#define scsi_unregister_interface(intf) \
class_interface_unregister(intf)
+/* make sure not to use it with passthrough commands */
+static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
+{
+ return to_scsi_driver(cmd->device->sdev_gendev.driver);
+}
+
#endif /* _SCSI_SCSI_DRIVER_H */
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 6bd5ed695a5e..1ae08e81339f 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -17,7 +17,7 @@ extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
extern int scsi_block_when_processing_errors(struct scsi_device *);
extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr);
-extern int scsi_check_sense(struct scsi_cmnd *);
+extern enum scsi_disposition scsi_check_sense(struct scsi_cmnd *);
static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
{
@@ -38,10 +38,8 @@ struct scsi_eh_save {
unsigned underflow;
unsigned char cmd_len;
unsigned char prot_op;
- unsigned char *cmnd;
+ unsigned char cmnd[32];
struct scsi_data_buffer sdb;
- /* new command support */
- unsigned char eh_cmnd[BLK_MAX_CDB];
struct scatterlist sense_sgl;
};
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 46ef8cccc982..b259d42a1e1a 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -16,10 +16,8 @@ struct completion;
struct module;
struct scsi_cmnd;
struct scsi_device;
-struct scsi_host_cmd_pool;
struct scsi_target;
struct Scsi_Host;
-struct scsi_host_cmd_pool;
struct scsi_transport_template;
@@ -29,41 +27,28 @@ struct scsi_transport_template;
#define MODE_INITIATOR 0x01
#define MODE_TARGET 0x02
-struct scsi_host_template {
- struct module *module;
- const char *name;
+/**
+ * enum scsi_timeout_action - How to handle a command that timed out.
+ * @SCSI_EH_DONE: The command has already been completed.
+ * @SCSI_EH_RESET_TIMER: Reset the timer and continue waiting for completion.
+ * @SCSI_EH_NOT_HANDLED: The command has not yet finished. Abort the command.
+ */
+enum scsi_timeout_action {
+ SCSI_EH_DONE,
+ SCSI_EH_RESET_TIMER,
+ SCSI_EH_NOT_HANDLED,
+};
+struct scsi_host_template {
/*
- * The info function will return whatever useful information the
- * developer sees fit. If not provided, then the name field will
- * be used instead.
- *
- * Status: OPTIONAL
+ * Put fields referenced in IO submission path together in
+ * same cacheline
*/
- const char *(* info)(struct Scsi_Host *);
/*
- * Ioctl interface
- *
- * Status: OPTIONAL
- */
- int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
- void __user *arg);
-
-
-#ifdef CONFIG_COMPAT
- /*
- * Compat handler. Handle 32bit ABI.
- * When unknown ioctl is passed return -ENOIOCTLCMD.
- *
- * Status: OPTIONAL
+ * Additional per-command data allocated for the driver.
*/
- int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
- void __user *arg);
-#endif
-
- int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
- int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+ unsigned int cmd_size;
/*
* The queuecommand function is used to queue up a scsi
@@ -111,6 +96,41 @@ struct scsi_host_template {
*/
void (*commit_rqs)(struct Scsi_Host *, u16);
+ struct module *module;
+ const char *name;
+
+ /*
+ * The info function will return whatever useful information the
+ * developer sees fit. If not provided, then the name field will
+ * be used instead.
+ *
+ * Status: OPTIONAL
+ */
+ const char *(*info)(struct Scsi_Host *);
+
+ /*
+ * Ioctl interface
+ *
+ * Status: OPTIONAL
+ */
+ int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
+
+
+#ifdef CONFIG_COMPAT
+ /*
+ * Compat handler. Handle 32bit ABI.
+ * When unknown ioctl is passed return -ENOIOCTLCMD.
+ *
+ * Status: OPTIONAL
+ */
+ int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
+#endif
+
+ int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+ int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+
/*
* This is an error handling strategy routine. You don't need to
* define one of these if you don't want to - there is a default
@@ -225,6 +245,9 @@ struct scsi_host_template {
* midlayer calls this point so that the driver may deallocate
* and terminate any references to the target.
*
+ * Note: This callback is called with the host lock held and hence
+ * must not sleep.
+ *
* Status: OPTIONAL
*/
void (* target_destroy)(struct scsi_target *);
@@ -268,7 +291,17 @@ struct scsi_host_template {
*
* Status: OPTIONAL
*/
- int (* map_queues)(struct Scsi_Host *shost);
+ void (* map_queues)(struct Scsi_Host *shost);
+
+ /*
+ * SCSI interface of blk_poll - poll for IO completions.
+ * Only applicable if SCSI LLD exposes multiple h/w queues.
+ *
+ * Return value: Number of completed entries found.
+ *
+ * Status: OPTIONAL
+ */
+ int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
/*
* Check if scatterlists need to be padded for DMA draining.
@@ -313,7 +346,13 @@ struct scsi_host_template {
*
* Status: OPTIONAL
*/
- enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
+ enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *);
+ /*
+ * Optional routine that allows the transport to decide if a cmd
+ * is retryable. Return true if the transport is in a state the
+ * cmd should be retried on.
+ */
+ bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
/* This is an optional routine that allows transport to initiate
* LLD adapter or firmware reset using sysfs attribute.
@@ -334,12 +373,6 @@ struct scsi_host_template {
const char *proc_name;
/*
- * Used to store the procfs directory if a driver implements the
- * show_info method.
- */
- struct proc_dir_entry *proc_dir;
-
- /*
* This determines if we will use a non-interrupt driven
* or an interrupt driven scheme. It is set to the maximum number
* of simultaneous commands a single hw queue in HBA will accept.
@@ -399,12 +432,6 @@ struct scsi_host_template {
*/
short cmd_per_lun;
- /*
- * present contains counter indicating how many boards of this
- * type were found when we did the scan.
- */
- unsigned char present;
-
/* If use block layer to manage tags, this is tag allocation policy */
int tag_alloc_policy;
@@ -419,11 +446,6 @@ struct scsi_host_template {
unsigned supported_mode:2;
/*
- * True if this host adapter uses unchecked DMA onto an ISA bus.
- */
- unsigned unchecked_isa_dma:1;
-
- /*
* True for emulated SCSI host adapters (e.g. ATAPI).
*/
unsigned emulated:1;
@@ -436,6 +458,12 @@ struct scsi_host_template {
/* True if the controller does not support WRITE SAME */
unsigned no_write_same:1;
+ /* True if the host uses host-wide tagspace */
+ unsigned host_tagset:1;
+
+ /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
+ unsigned queuecommand_may_block:1;
+
/*
* Countdown for host blocking with no commands outstanding.
*/
@@ -451,14 +479,9 @@ struct scsi_host_template {
#define SCSI_DEFAULT_HOST_BLOCKED 7
/*
- * Pointer to the sysfs class properties for this host, NULL terminated.
+ * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
*/
- struct device_attribute **shost_attrs;
-
- /*
- * Pointer to the SCSI device properties for this host, NULL terminated.
- */
- struct device_attribute **sdev_attrs;
+ const struct attribute_group **shost_groups;
/*
* Pointer to the SCSI device attribute groups for this host,
@@ -474,15 +497,6 @@ struct scsi_host_template {
* scsi_netlink.h
*/
u64 vendor_id;
-
- /*
- * Additional per-command data allocated for the driver.
- */
- unsigned int cmd_size;
- struct scsi_host_cmd_pool *cmd_pool;
-
- /* Delay for runtime autosuspend */
- int rpm_autosuspend_delay;
};
/*
@@ -497,7 +511,7 @@ struct scsi_host_template {
unsigned long irq_flags; \
int rc; \
spin_lock_irqsave(shost->host_lock, irq_flags); \
- rc = func_name##_lck (cmd, cmd->scsi_done); \
+ rc = func_name##_lck(cmd); \
spin_unlock_irqrestore(shost->host_lock, irq_flags); \
return rc; \
}
@@ -537,14 +551,17 @@ struct Scsi_Host {
struct mutex scan_mutex;/* serialize scanning activity */
+ struct list_head eh_abort_list;
struct list_head eh_cmd_q;
struct task_struct * ehandler; /* Error recovery thread. */
struct completion * eh_action; /* Wait for specific actions on the
host. */
wait_queue_head_t host_wait;
- struct scsi_host_template *hostt;
+ const struct scsi_host_template *hostt;
struct scsi_transport_template *transportt;
+ struct kref tagset_refcnt;
+ struct completion tagset_freed;
/* Area to keep a shared tag map */
struct blk_mq_tag_set tag_set;
@@ -595,6 +612,7 @@ struct Scsi_Host {
short unsigned int sg_tablesize;
short unsigned int sg_prot_tablesize;
unsigned int max_sectors;
+ unsigned int opt_sectors;
unsigned int max_segment_size;
unsigned long dma_boundary;
unsigned long virt_boundary_mask;
@@ -603,11 +621,12 @@ struct Scsi_Host {
*
* Note: it is assumed that each hardware queue has a queue depth of
* can_queue. In other words, the total queue depth per host
- * is nr_hw_queues * can_queue.
+ * is nr_hw_queues * can_queue. However, for when host_tagset is set,
+ * the total queue depth is can_queue.
*/
unsigned nr_hw_queues;
+ unsigned nr_maps;
unsigned active_mode:2;
- unsigned unchecked_isa_dma:1;
/*
* Host has requested that no further requests come through for the
@@ -634,6 +653,12 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
+ /* True if the host uses host-wide tagspace */
+ unsigned host_tagset:1;
+
+ /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
+ unsigned queuecommand_may_block:1;
+
/* Host responded with short (<36 bytes) INQUIRY result */
unsigned short_inquiry:1;
@@ -685,6 +710,9 @@ struct Scsi_Host {
*/
struct device *dma_dev;
+ /* Delay for runtime autosuspend */
+ int rpm_autosuspend_delay;
+
/*
* We should ensure that this is aligned, both for better performance
* and also because some compilers (m68k) don't automatically force
@@ -728,20 +756,26 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
extern void scsi_flush_work(struct Scsi_Host *);
-extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
+extern struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *, int);
extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
struct device *,
struct device *);
+#if defined(CONFIG_SCSI_PROC_FS)
+struct proc_dir_entry *
+scsi_template_proc_dir(const struct scsi_host_template *sht);
+#else
+#define scsi_template_proc_dir(sht) NULL
+#endif
extern void scsi_scan_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct device *);
+extern int scsi_rescan_device(struct scsi_device *sdev);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
-extern struct Scsi_Host *scsi_host_lookup(unsigned short);
+extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
extern const char *scsi_host_state_name(enum scsi_host_state);
extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
- int status);
+ enum scsi_host_status status);
static inline int __must_check scsi_add_host(struct Scsi_Host *host,
struct device *dev)
@@ -770,21 +804,11 @@ extern int scsi_host_block(struct Scsi_Host *shost);
extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
void scsi_host_busy_iter(struct Scsi_Host *,
- bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv);
+ bool (*fn)(struct scsi_cmnd *, void *), void *priv);
struct class_container;
/*
- * These two functions are used to allocate and free a pseudo device
- * which will connect to the host adapter itself rather than any
- * physical device. You must deallocate when you are done with the
- * thing. This physical pseudo-device isn't real and won't be available
- * from any high-level drivers.
- */
-extern void scsi_free_host_dev(struct scsi_device *);
-extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
-
-/*
* DIF defines the exchange of protection information between
* initiator and SBC block device.
*
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index b465799f4d2d..a207c07da9d2 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -18,7 +18,9 @@
#ifdef __KERNEL__
+struct gendisk;
struct scsi_device;
+struct sg_io_hdr;
/*
* Structures used for scsi_ioctl et al.
@@ -43,8 +45,11 @@ typedef struct scsi_fctargaddress {
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
int cmd, bool ndelay);
-extern int scsi_ioctl(struct scsi_device *, int, void __user *);
-extern int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
+int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd,
+ void __user *arg);
+int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
+int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
+bool scsi_cmd_allowed(unsigned char *cmd, bool open_for_write);
#endif /* __KERNEL__ */
#endif /* _SCSI_IOCTL_H */
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index c36860111932..07d65c1f59db 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -151,6 +151,11 @@
#define ZO_FINISH_ZONE 0x02
#define ZO_OPEN_ZONE 0x03
#define ZO_RESET_WRITE_POINTER 0x04
+/* values for PR in service action */
+#define READ_KEYS 0x00
+#define READ_RESERVATION 0x01
+#define REPORT_CAPABILITES 0x02
+#define READ_FULL_STATUS 0x03
/* values for variable length command */
#define XDREAD_32 0x03
#define XDWRITE_32 0x04
@@ -190,43 +195,25 @@ struct scsi_varlen_cdb_hdr {
* SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
* T10/1561-D Revision 4 Draft dated 7th November 2002.
*/
-#define SAM_STAT_GOOD 0x00
-#define SAM_STAT_CHECK_CONDITION 0x02
-#define SAM_STAT_CONDITION_MET 0x04
-#define SAM_STAT_BUSY 0x08
-#define SAM_STAT_INTERMEDIATE 0x10
-#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
-#define SAM_STAT_TASK_SET_FULL 0x28
-#define SAM_STAT_ACA_ACTIVE 0x30
-#define SAM_STAT_TASK_ABORTED 0x40
-
-/*
- * Status codes. These are deprecated as they are shifted 1 bit right
- * from those found in the SCSI standards. This causes confusion for
- * applications that are ported to several OSes. Prefer SAM Status codes
- * above.
- */
-
-#define GOOD 0x00
-#define CHECK_CONDITION 0x01
-#define CONDITION_GOOD 0x02
-#define BUSY 0x04
-#define INTERMEDIATE_GOOD 0x08
-#define INTERMEDIATE_C_GOOD 0x0a
-#define RESERVATION_CONFLICT 0x0c
-#define COMMAND_TERMINATED 0x11
-#define QUEUE_FULL 0x14
-#define ACA_ACTIVE 0x18
-#define TASK_ABORTED 0x20
+enum sam_status {
+ SAM_STAT_GOOD = 0x00,
+ SAM_STAT_CHECK_CONDITION = 0x02,
+ SAM_STAT_CONDITION_MET = 0x04,
+ SAM_STAT_BUSY = 0x08,
+ SAM_STAT_INTERMEDIATE = 0x10,
+ SAM_STAT_INTERMEDIATE_CONDITION_MET = 0x14,
+ SAM_STAT_RESERVATION_CONFLICT = 0x18,
+ SAM_STAT_COMMAND_TERMINATED = 0x22, /* obsolete in SAM-3 */
+ SAM_STAT_TASK_SET_FULL = 0x28,
+ SAM_STAT_ACA_ACTIVE = 0x30,
+ SAM_STAT_TASK_ABORTED = 0x40,
+};
-#define STATUS_MASK 0xfe
+#define STATUS_MASK 0xfe
/*
* SENSE KEYS
*/
-
#define NO_SENSE 0x00
#define RECOVERED_ERROR 0x01
#define NOT_READY 0x02
@@ -241,7 +228,7 @@ struct scsi_varlen_cdb_hdr {
#define ABORTED_COMMAND 0x0b
#define VOLUME_OVERFLOW 0x0d
#define MISCOMPARE 0x0e
-
+#define COMPLETED 0x0f
/*
* DEVICE TYPES
@@ -325,7 +312,9 @@ enum zbc_zone_type {
ZBC_ZONE_TYPE_CONV = 0x1,
ZBC_ZONE_TYPE_SEQWRITE_REQ = 0x2,
ZBC_ZONE_TYPE_SEQWRITE_PREF = 0x3,
- /* 0x4 to 0xf are reserved */
+ ZBC_ZONE_TYPE_SEQ_OR_BEFORE_REQ = 0x4,
+ ZBC_ZONE_TYPE_GAP = 0x5,
+ /* 0x6 to 0xf are reserved */
};
/* Zone conditions of REPORT ZONES zone descriptors */
@@ -341,4 +330,31 @@ enum zbc_zone_cond {
ZBC_ZONE_COND_OFFLINE = 0xf,
};
+enum zbc_zone_alignment_method {
+ ZBC_CONSTANT_ZONE_LENGTH = 0x1,
+ ZBC_CONSTANT_ZONE_START_OFFSET = 0x8,
+};
+
+/* Version descriptor values for INQUIRY */
+enum scsi_version_descriptor {
+ SCSI_VERSION_DESCRIPTOR_FCP4 = 0x0a40,
+ SCSI_VERSION_DESCRIPTOR_ISCSI = 0x0960,
+ SCSI_VERSION_DESCRIPTOR_SAM5 = 0x00a0,
+ SCSI_VERSION_DESCRIPTOR_SAS3 = 0x0c60,
+ SCSI_VERSION_DESCRIPTOR_SBC3 = 0x04c0,
+ SCSI_VERSION_DESCRIPTOR_SBP3 = 0x0980,
+ SCSI_VERSION_DESCRIPTOR_SPC4 = 0x0460,
+ SCSI_VERSION_DESCRIPTOR_SRP = 0x0940
+};
+
+enum scsi_support_opcode {
+ SCSI_SUPPORT_NO_INFO = 0,
+ SCSI_SUPPORT_NOT_SUPPORTED = 1,
+ SCSI_SUPPORT_FULL = 3,
+ SCSI_SUPPORT_VENDOR = 5,
+};
+
+#define SCSI_CONTROL_MASK 0
+#define SCSI_GROUP_NUMBER_MASK 0
+
#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h
deleted file mode 100644
index b06f28c74908..000000000000
--- a/include/scsi/scsi_request.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SCSI_SCSI_REQUEST_H
-#define _SCSI_SCSI_REQUEST_H
-
-#include <linux/blk-mq.h>
-
-#define BLK_MAX_CDB 16
-
-struct scsi_request {
- unsigned char __cmd[BLK_MAX_CDB];
- unsigned char *cmd;
- unsigned short cmd_len;
- int result;
- unsigned int sense_len;
- unsigned int resid_len; /* residual count */
- int retries;
- void *sense;
-};
-
-static inline struct scsi_request *scsi_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline void scsi_req_free_cmd(struct scsi_request *req)
-{
- if (req->cmd != req->__cmd)
- kfree(req->cmd);
-}
-
-void scsi_req_init(struct scsi_request *req);
-
-#endif /* _SCSI_SCSI_REQUEST_H */
diff --git a/include/scsi/scsi_status.h b/include/scsi/scsi_status.h
new file mode 100644
index 000000000000..9cb85262de64
--- /dev/null
+++ b/include/scsi/scsi_status.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _SCSI_SCSI_STATUS_H
+#define _SCSI_SCSI_STATUS_H
+
+#include <linux/types.h>
+#include <scsi/scsi_proto.h>
+
+/* Message codes. */
+enum scsi_msg_byte {
+ COMMAND_COMPLETE = 0x00,
+ EXTENDED_MESSAGE = 0x01,
+ SAVE_POINTERS = 0x02,
+ RESTORE_POINTERS = 0x03,
+ DISCONNECT = 0x04,
+ INITIATOR_ERROR = 0x05,
+ ABORT_TASK_SET = 0x06,
+ MESSAGE_REJECT = 0x07,
+ NOP = 0x08,
+ MSG_PARITY_ERROR = 0x09,
+ LINKED_CMD_COMPLETE = 0x0a,
+ LINKED_FLG_CMD_COMPLETE = 0x0b,
+ TARGET_RESET = 0x0c,
+ ABORT_TASK = 0x0d,
+ CLEAR_TASK_SET = 0x0e,
+ INITIATE_RECOVERY = 0x0f, /* SCSI-II only */
+ RELEASE_RECOVERY = 0x10, /* SCSI-II only */
+ TERMINATE_IO_PROC = 0x11, /* SCSI-II only */
+ CLEAR_ACA = 0x16,
+ LOGICAL_UNIT_RESET = 0x17,
+ SIMPLE_QUEUE_TAG = 0x20,
+ HEAD_OF_QUEUE_TAG = 0x21,
+ ORDERED_QUEUE_TAG = 0x22,
+ IGNORE_WIDE_RESIDUE = 0x23,
+ ACA = 0x24,
+ QAS_REQUEST = 0x55,
+
+ /* Old SCSI2 names, don't use in new code */
+ BUS_DEVICE_RESET = TARGET_RESET,
+ ABORT = ABORT_TASK_SET,
+};
+
+/* Host byte codes. */
+enum scsi_host_status {
+ DID_OK = 0x00, /* NO error */
+ DID_NO_CONNECT = 0x01, /* Couldn't connect before timeout period */
+ DID_BUS_BUSY = 0x02, /* BUS stayed busy through time out period */
+ DID_TIME_OUT = 0x03, /* TIMED OUT for other reason */
+ DID_BAD_TARGET = 0x04, /* BAD target. */
+ DID_ABORT = 0x05, /* Told to abort for some other reason */
+ DID_PARITY = 0x06, /* Parity error */
+ DID_ERROR = 0x07, /* Internal error */
+ DID_RESET = 0x08, /* Reset by somebody. */
+ DID_BAD_INTR = 0x09, /* Got an interrupt we weren't expecting. */
+ DID_PASSTHROUGH = 0x0a, /* Force command past mid-layer */
+ DID_SOFT_ERROR = 0x0b, /* The low level driver just wish a retry */
+ DID_IMM_RETRY = 0x0c, /* Retry without decrementing retry count */
+ DID_REQUEUE = 0x0d, /* Requeue command (no immediate retry) also
+ * without decrementing the retry count */
+ DID_TRANSPORT_DISRUPTED = 0x0e, /* Transport error disrupted execution
+ * and the driver blocked the port to
+ * recover the link. Transport class will
+ * retry or fail IO */
+ DID_TRANSPORT_FAILFAST = 0x0f, /* Transport class fastfailed the io */
+ /*
+ * We used to have DID_TARGET_FAILURE, DID_NEXUS_FAILURE,
+ * DID_ALLOC_FAILURE and DID_MEDIUM_ERROR at 0x10 - 0x13. For compat
+ * with userspace apps that parse the host byte for SG IO, we leave
+ * that block of codes unused and start at 0x14 below.
+ */
+ DID_TRANSPORT_MARGINAL = 0x14, /* Transport marginal errors */
+};
+
+#endif /* _SCSI_SCSI_STATUS_H */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 7db2dd783834..483513c57597 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -67,6 +67,7 @@ enum fc_port_state {
FC_PORTSTATE_ERROR,
FC_PORTSTATE_LOOPBACK,
FC_PORTSTATE_DELETED,
+ FC_PORTSTATE_MARGINAL,
};
@@ -124,6 +125,7 @@ enum fc_vport_state {
#define FC_PORTSPEED_25GBIT 0x800
#define FC_PORTSPEED_64GBIT 0x1000
#define FC_PORTSPEED_128GBIT 0x2000
+#define FC_PORTSPEED_256GBIT 0x4000
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
@@ -284,6 +286,36 @@ struct fc_rport_identifiers {
u32 roles;
};
+/*
+ * Fabric Performance Impact Notification Statistics
+ */
+struct fc_fpin_stats {
+ /* Delivery */
+ u64 dn;
+ u64 dn_unknown;
+ u64 dn_timeout;
+ u64 dn_unable_to_route;
+ u64 dn_device_specific;
+
+ /* Link Integrity */
+ u64 li;
+ u64 li_failure_unknown;
+ u64 li_link_failure_count;
+ u64 li_loss_of_sync_count;
+ u64 li_loss_of_signals_count;
+ u64 li_prim_seq_err_count;
+ u64 li_invalid_tx_word_count;
+ u64 li_invalid_crc_count;
+ u64 li_device_specific;
+
+ /* Congestion/Peer Congestion */
+ u64 cn;
+ u64 cn_clear;
+ u64 cn_lost_credit;
+ u64 cn_credit_stall;
+ u64 cn_oversubscription;
+ u64 cn_device_specific;
+};
/* Macro for use in defining Remote Port attributes */
#define FC_RPORT_ATTR(_name,_mode,_show,_store) \
@@ -325,6 +357,7 @@ struct fc_rport { /* aka fc_starget_attrs */
/* Dynamic Attributes */
u32 dev_loss_tmo; /* Remote Port loss timeout in seconds. */
+ struct fc_fpin_stats fpin_stats;
/* Private (Transport-managed) Attributes */
u64 node_name;
@@ -436,6 +469,9 @@ struct fc_host_statistics {
u64 fc_seq_not_found; /* seq is not found for exchange */
u64 fc_non_bls_resp; /* a non BLS response frame with
a sequence responder in new exch */
+ /* Host Congestion Signals */
+ u64 cn_sig_warn;
+ u64 cn_sig_alarm;
};
@@ -460,6 +496,7 @@ enum fc_host_event_code {
FCH_EVT_PORT_FABRIC = 0x204,
FCH_EVT_LINK_UNKNOWN = 0x500,
FCH_EVT_LINK_FPIN = 0x501,
+ FCH_EVT_LINK_FPIN_ACK = 0x502,
FCH_EVT_VENDOR_UNIQUE = 0xffff,
};
@@ -481,10 +518,11 @@ enum fc_host_event_code {
* managed by the transport w/o driver interaction.
*/
+#define FC_VENDOR_IDENTIFIER 8
#define FC_FC4_LIST_SIZE 32
#define FC_SYMBOLIC_NAME_SIZE 256
#define FC_VERSION_STRING_SIZE 64
-#define FC_SERIAL_NUMBER_SIZE 80
+#define FC_SERIAL_NUMBER_SIZE 64
struct fc_host_attrs {
/* Fixed Attributes */
@@ -496,6 +534,10 @@ struct fc_host_attrs {
u32 supported_speeds;
u32 maxframe_size;
u16 max_npiv_vports;
+ u32 max_ct_payload;
+ u32 num_ports;
+ u32 num_discovered_ports;
+ u32 bootbios_state;
char serial_number[FC_SERIAL_NUMBER_SIZE];
char manufacturer[FC_SERIAL_NUMBER_SIZE];
char model[FC_SYMBOLIC_NAME_SIZE];
@@ -504,6 +546,9 @@ struct fc_host_attrs {
char driver_version[FC_VERSION_STRING_SIZE];
char firmware_version[FC_VERSION_STRING_SIZE];
char optionrom_version[FC_VERSION_STRING_SIZE];
+ char vendor_identifier[FC_VENDOR_IDENTIFIER];
+ char bootbios_version[FC_SYMBOLIC_NAME_SIZE];
+
/* Dynamic Attributes */
u32 port_id;
@@ -515,6 +560,7 @@ struct fc_host_attrs {
char symbolic_name[FC_SYMBOLIC_NAME_SIZE];
char system_hostname[FC_SYMBOLIC_NAME_SIZE];
u32 dev_loss_tmo;
+ struct fc_fpin_stats fpin_stats;
/* Private (Transport-managed) Attributes */
enum fc_tgtid_binding_type tgtid_bind_type;
@@ -536,6 +582,9 @@ struct fc_host_attrs {
/* bsg support */
struct request_queue *rqst_q;
+
+ /* FDMI support version*/
+ u8 fdmi_version;
};
#define shost_to_fc_host(x) \
@@ -615,6 +664,18 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \
(((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
+#define fc_host_max_ct_payload(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->max_ct_payload)
+#define fc_host_vendor_identifier(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->vendor_identifier)
+#define fc_host_num_discovered_ports(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->num_discovered_ports)
+#define fc_host_num_ports(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->num_ports)
+#define fc_host_bootbios_version(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->bootbios_version)
+#define fc_host_bootbios_state(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->bootbios_state)
/* The functions by which the transport class and the driver communicate */
struct fc_function_template {
@@ -706,7 +767,6 @@ struct fc_function_template {
unsigned long disable_target_scan:1;
};
-
/**
* fc_remote_port_chkready - called to validate the remote port state
* prior to initiating io to the port.
@@ -722,6 +782,7 @@ fc_remote_port_chkready(struct fc_rport *rport)
switch (rport->port_state) {
case FC_PORTSTATE_ONLINE:
+ case FC_PORTSTATE_MARGINAL:
if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
result = 0;
else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
@@ -786,6 +847,7 @@ void fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
enum fc_host_event_code event_code, u32 event_data);
void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
u32 data_len, char *data_buf, u64 vendor_id);
+struct fc_rport *fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn);
void fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
enum fc_host_event_code event_code,
u32 data_len, char *data_buf, u64 vendor_id);
@@ -795,13 +857,15 @@ void fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
* Note: when calling fc_host_post_fc_event(), vendor_id may be
* specified as 0.
*/
-void fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf);
+void fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf,
+ u8 event_acknowledge);
struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
struct fc_vport_identifiers *);
int fc_vport_terminate(struct fc_vport *vport);
int fc_block_rport(struct fc_rport *rport);
int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
-enum blk_eh_timer_return fc_eh_timed_out(struct scsi_cmnd *scmd);
+enum scsi_timeout_action fc_eh_timed_out(struct scsi_cmnd *scmd);
+bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd);
static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job)
{
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 8a26a2ffa952..fb3399e4cd29 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -82,6 +82,7 @@ struct iscsi_transport {
void (*destroy_session) (struct iscsi_cls_session *session);
struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
uint32_t cid);
+ void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active);
int (*bind_conn) (struct iscsi_cls_session *session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_eph, int is_leading);
@@ -161,7 +162,7 @@ struct iscsi_transport {
* transport registration upcalls
*/
extern struct scsi_transport_template *iscsi_register_transport(struct iscsi_transport *tt);
-extern int iscsi_unregister_transport(struct iscsi_transport *tt);
+extern void iscsi_unregister_transport(struct iscsi_transport *tt);
/*
* control plane upcalls
@@ -193,17 +194,28 @@ enum iscsi_connection_state {
ISCSI_CONN_UP = 0,
ISCSI_CONN_DOWN,
ISCSI_CONN_FAILED,
+ ISCSI_CONN_BOUND,
};
+#define ISCSI_CLS_CONN_BIT_CLEANUP 1
+
struct iscsi_cls_conn {
struct list_head conn_list; /* item in connlist */
- struct list_head conn_list_err; /* item in connlist_err */
void *dd_data; /* LLD private data */
struct iscsi_transport *transport;
uint32_t cid; /* connection id */
+ /*
+ * This protects the conn startup and binding/unbinding of the ep to
+ * the conn. Unbinding includes ep_disconnect and stop_conn.
+ */
struct mutex ep_mutex;
struct iscsi_endpoint *ep;
+ /* Used when accessing flags and queueing work. */
+ spinlock_t lock;
+ unsigned long flags;
+ struct work_struct cleanup_work;
+
struct device dev; /* sysfs transport/container device */
enum iscsi_connection_state state;
};
@@ -224,6 +236,14 @@ enum {
ISCSI_SESSION_FREE,
};
+enum {
+ ISCSI_SESSION_TARGET_UNBOUND,
+ ISCSI_SESSION_TARGET_ALLOCATED,
+ ISCSI_SESSION_TARGET_SCANNED,
+ ISCSI_SESSION_TARGET_UNBINDING,
+ ISCSI_SESSION_TARGET_MAX,
+};
+
#define ISCSI_MAX_TARGET -1
struct iscsi_cls_session {
@@ -241,6 +261,8 @@ struct iscsi_cls_session {
bool recovery_tmo_sysfs_override;
struct delayed_work recovery_work;
+ struct workqueue_struct *workq;
+
unsigned int target_id;
bool ida_used;
@@ -250,6 +272,7 @@ struct iscsi_cls_session {
*/
pid_t creator;
int state;
+ int target_state; /* session target bind state */
int sid; /* session id */
void *dd_data; /* LLD private data */
struct device dev; /* sysfs transport/container device */
@@ -268,7 +291,6 @@ struct iscsi_cls_session {
iscsi_dev_to_session(_stgt->dev.parent)
struct iscsi_cls_host {
- atomic_t nr_scans;
struct mutex mutex;
struct request_queue *bsg_q;
uint32_t port_speed;
@@ -284,7 +306,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
struct iscsi_endpoint {
void *dd_data; /* LLD private data */
struct device dev;
- uint64_t id;
+ int id;
struct iscsi_cls_conn *conn;
};
@@ -429,24 +451,27 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
struct iscsi_transport *t,
int dd_size,
unsigned int target_id);
+extern void iscsi_force_destroy_session(struct iscsi_cls_session *session);
extern void iscsi_remove_session(struct iscsi_cls_session *session);
extern void iscsi_free_session(struct iscsi_cls_session *session);
-extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+extern struct iscsi_cls_conn *iscsi_alloc_conn(struct iscsi_cls_session *sess,
int dd_size, uint32_t cid);
-extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+extern int iscsi_add_conn(struct iscsi_cls_conn *conn);
+extern void iscsi_remove_conn(struct iscsi_cls_conn *conn);
+extern void iscsi_put_conn(struct iscsi_cls_conn *conn);
+extern void iscsi_get_conn(struct iscsi_cls_conn *conn);
extern void iscsi_unblock_session(struct iscsi_cls_session *session);
extern void iscsi_block_session(struct iscsi_cls_session *session);
-extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+extern void iscsi_put_endpoint(struct iscsi_endpoint *ep);
extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd);
extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
struct iscsi_transport *t,
uint32_t iface_type,
uint32_t iface_num, int dd_size);
extern void iscsi_destroy_iface(struct iscsi_iface *iface);
-extern struct iscsi_iface *iscsi_lookup_iface(int handle);
extern char *iscsi_get_port_speed_name(struct Scsi_Host *shost);
extern char *iscsi_get_port_state_name(struct Scsi_Host *shost);
extern int iscsi_is_session_dev(const struct device *dev);
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 05ec927a3c72..0e75b9277c8c 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -41,6 +41,7 @@ enum sas_linkrate {
SAS_LINK_RATE_G2 = SAS_LINK_RATE_3_0_GBPS,
SAS_LINK_RATE_6_0_GBPS = 10,
SAS_LINK_RATE_12_0_GBPS = 11,
+ SAS_LINK_RATE_22_5_GBPS = 12,
/* These are virtual to the transport class and may never
* be signalled normally since the standard defined field
* is only 4 bits */
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index d22df12584f9..dfc78aa112ad 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -118,7 +118,7 @@ extern int srp_reconnect_rport(struct srp_rport *rport);
extern void srp_start_tl_fail_timers(struct srp_rport *rport);
extern void srp_remove_host(struct Scsi_Host *);
extern void srp_stop_rport_timers(struct srp_rport *rport);
-enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd);
+enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd);
/**
* srp_chkready() - evaluate the transport layer state before I/O
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 7327e12f3373..af31cecd9012 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -29,10 +29,6 @@
* For utility and test programs see: http://sg.danny.cz/sg/sg3_utils.html
*/
-#ifdef __KERNEL__
-extern int sg_big_buff; /* for sysctl */
-#endif
-
typedef struct sg_iovec /* same structure as used by readv() Linux system */
{ /* call. It defines one scatter-gather element. */
@@ -131,6 +127,39 @@ struct compat_sg_io_hdr {
#define SG_INFO_DIRECT_IO 0x2 /* direct IO requested and performed */
#define SG_INFO_MIXED_IO 0x4 /* part direct, part indirect IO */
+/*
+ * Obsolete DRIVER_SENSE driver byte
+ *
+ * Originally the SCSI midlayer would set the DRIVER_SENSE driver byte when
+ * a sense code was generated and a sense buffer was allocated.
+ * However, as nowadays every scsi command has a sense code allocated this
+ * distinction became moot as one could check the sense buffer directly.
+ * Consequently this byte is not set anymore from the midlayer, but SG will
+ * keep setting this byte to be compatible with previous releases.
+ */
+#define DRIVER_SENSE 0x08
+/* Obsolete driver_byte() declaration */
+#define driver_byte(result) (((result) >> 24) & 0xff)
+
+/*
+ * Original linux SCSI Status codes. They are shifted 1 bit right
+ * from those found in the SCSI standards.
+ */
+
+#define GOOD 0x00
+#define CHECK_CONDITION 0x01
+#define CONDITION_GOOD 0x02
+#define BUSY 0x04
+#define INTERMEDIATE_GOOD 0x08
+#define INTERMEDIATE_C_GOOD 0x0a
+#define RESERVATION_CONFLICT 0x0c
+#define COMMAND_TERMINATED 0x11
+#define QUEUE_FULL 0x14
+#define ACA_ACTIVE 0x18
+#define TASK_ABORTED 0x20
+
+/* Obsolete status_byte() declaration */
+#define sg_status_byte(result) (((result) >> 1) & 0x7f)
typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */
int host_no; /* as in "scsi<n>" where 'n' is one of 0, 1, 2 etc */
@@ -145,7 +174,7 @@ typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */
typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
char req_state; /* 0 -> not used, 1 -> written, 2 -> ready to read */
- char orphan; /* 0 -> normal request, 1 -> from interruped SG_IO */
+ char orphan; /* 0 -> normal request, 1 -> from interrupted SG_IO */
char sg_io_owned; /* 0 -> complete with read(), 1 -> owned by SG_IO */
char problem; /* 0 -> no problem detected, 1 -> error to report */
int pack_id; /* pack_id associated with request */
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 177d8026e96f..dfe0984b58a9 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -107,10 +107,10 @@ struct srp_direct_buf {
* having the 20-byte structure padded to 24 bytes on 64-bit architectures.
*/
struct srp_indirect_buf {
- struct srp_direct_buf table_desc;
+ struct srp_direct_buf table_desc __packed __aligned(4);
__be32 len;
- struct srp_direct_buf desc_list[];
-} __attribute__((packed));
+ struct srp_direct_buf desc_list[] __packed __aligned(4);
+};
/* Immediate data buffer descriptor as defined in SRP2. */
struct srp_imm_buf {
@@ -175,13 +175,13 @@ struct srp_login_rsp {
u8 opcode;
u8 reserved1[3];
__be32 req_lim_delta;
- u64 tag;
+ u64 tag __packed __aligned(4);
__be32 max_it_iu_len;
__be32 max_ti_iu_len;
__be16 buf_fmt;
u8 rsp_flags;
u8 reserved2[25];
-} __attribute__((packed));
+};
struct srp_login_rej {
u8 opcode;
@@ -207,10 +207,6 @@ struct srp_t_logout {
u64 tag;
};
-/*
- * We need the packed attribute because the SRP spec only aligns the
- * 8-byte LUN field to 4 bytes.
- */
struct srp_tsk_mgmt {
u8 opcode;
u8 sol_not;
@@ -225,10 +221,6 @@ struct srp_tsk_mgmt {
u8 reserved5[8];
};
-/*
- * We need the packed attribute because the SRP spec only aligns the
- * 8-byte LUN field to 4 bytes.
- */
struct srp_cmd {
u8 opcode;
u8 sol_not;
@@ -266,7 +258,7 @@ struct srp_rsp {
u8 sol_not;
u8 reserved1[2];
__be32 req_lim_delta;
- u64 tag;
+ u64 tag __packed __aligned(4);
u8 reserved2[2];
u8 flags;
u8 status;
@@ -275,7 +267,7 @@ struct srp_rsp {
__be32 sense_data_len;
__be32 resp_data_len;
u8 data[];
-} __attribute__((packed));
+};
struct srp_cred_req {
u8 opcode;
@@ -301,13 +293,13 @@ struct srp_aer_req {
u8 sol_not;
u8 reserved[2];
__be32 req_lim_delta;
- u64 tag;
+ u64 tag __packed __aligned(4);
u32 reserved2;
struct scsi_lun lun;
__be32 sense_data_len;
u32 reserved3;
u8 sense_data[];
-} __attribute__((packed));
+};
struct srp_aer_rsp {
u8 opcode;
diff --git a/include/scsi/viosrp.h b/include/scsi/viosrp.h
index c978133c83e3..6c5559d2b285 100644
--- a/include/scsi/viosrp.h
+++ b/include/scsi/viosrp.h
@@ -70,12 +70,17 @@ enum viosrp_crq_status {
};
struct viosrp_crq {
- u8 valid; /* used by RPA */
- u8 format; /* SCSI vs out-of-band */
- u8 reserved;
- u8 status; /* non-scsi failure? (e.g. DMA failure) */
- __be16 timeout; /* in seconds */
- __be16 IU_length; /* in bytes */
+ union {
+ __be64 high; /* High 64 bits */
+ struct {
+ u8 valid; /* used by RPA */
+ u8 format; /* SCSI vs out-of-band */
+ u8 reserved;
+ u8 status; /* non-scsi failure? (e.g. DMA failure) */
+ __be16 timeout; /* in seconds */
+ __be16 IU_length; /* in bytes */
+ };
+ };
__be64 IU_data_ptr; /* the TCE for transferring data */
};
diff --git a/include/soc/amlogic/meson_ddr_pmu.h b/include/soc/amlogic/meson_ddr_pmu.h
new file mode 100644
index 000000000000..4a33e4ab8ada
--- /dev/null
+++ b/include/soc/amlogic/meson_ddr_pmu.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef __MESON_DDR_PMU_H__
+#define __MESON_DDR_PMU_H__
+
+#define MAX_CHANNEL_NUM 8
+
+enum {
+ ALL_CHAN_COUNTER_ID,
+ CHAN1_COUNTER_ID,
+ CHAN2_COUNTER_ID,
+ CHAN3_COUNTER_ID,
+ CHAN4_COUNTER_ID,
+ CHAN5_COUNTER_ID,
+ CHAN6_COUNTER_ID,
+ CHAN7_COUNTER_ID,
+ CHAN8_COUNTER_ID,
+ COUNTER_MAX_ID,
+};
+
+struct dmc_info;
+
+struct dmc_counter {
+ u64 all_cnt; /* The count of all requests come in/out ddr controller */
+ union {
+ u64 all_req;
+ struct {
+ u64 all_idle_cnt;
+ u64 all_16bit_cnt;
+ };
+ };
+ u64 channel_cnt[MAX_CHANNEL_NUM]; /* To save a DMC bandwidth-monitor channel counter */
+};
+
+struct dmc_hw_info {
+ void (*enable)(struct dmc_info *info);
+ void (*disable)(struct dmc_info *info);
+ /* Bind an axi line to a bandwidth-monitor channel */
+ void (*set_axi_filter)(struct dmc_info *info, int axi_id, int chann);
+ int (*irq_handler)(struct dmc_info *info,
+ struct dmc_counter *counter);
+ void (*get_counters)(struct dmc_info *info,
+ struct dmc_counter *counter);
+
+ int dmc_nr; /* The number of dmc controller */
+ int chann_nr; /* The number of dmc bandwidth monitor channels */
+ struct attribute **fmt_attr;
+ const u64 capability[2];
+};
+
+struct dmc_info {
+ const struct dmc_hw_info *hw_info;
+
+ void __iomem *ddr_reg[4];
+ unsigned long timer_value; /* Timer value in TIMER register */
+ void __iomem *pll_reg;
+ int irq_num; /* irq vector number */
+};
+
+int meson_ddr_pmu_create(struct platform_device *pdev);
+int meson_ddr_pmu_remove(struct platform_device *pdev);
+
+#endif /* __MESON_DDR_PMU_H__ */
diff --git a/include/soc/arc/timers.h b/include/soc/arc/timers.h
index 7ecde3b159c8..ae99d3e855f1 100644
--- a/include/soc/arc/timers.h
+++ b/include/soc/arc/timers.h
@@ -17,8 +17,8 @@
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
/* CTRL reg bits */
-#define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
-#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
+#define ARC_TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
+#define ARC_TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
#define ARC_TIMERN_MAX 0xFFFFFFFF
diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h
index 1d7071dc0bca..26b56a07bd1f 100644
--- a/include/soc/at91/atmel_tcb.h
+++ b/include/soc/at91/atmel_tcb.h
@@ -77,9 +77,6 @@ struct atmel_tc {
bool allocated;
};
-extern struct atmel_tc *atmel_tc_alloc(unsigned block);
-extern void atmel_tc_free(struct atmel_tc *tc);
-
/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
extern const u8 atmel_tc_divisors[5];
diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h
new file mode 100644
index 000000000000..5ad7ac2e3a7c
--- /dev/null
+++ b/include/soc/at91/sama7-ddr.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Microchip SAMA7 UDDR Controller and DDR3 PHY Controller registers offsets
+ * and bit definitions.
+ *
+ * Copyright (C) [2020] Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#ifndef __SAMA7_DDR_H__
+#define __SAMA7_DDR_H__
+
+/* DDR3PHY */
+#define DDR3PHY_PIR (0x04) /* DDR3PHY PHY Initialization Register */
+#define DDR3PHY_PIR_DLLBYP (1 << 17) /* DLL Bypass */
+#define DDR3PHY_PIR_ITMSRST (1 << 4) /* Interface Timing Module Soft Reset */
+#define DDR3PHY_PIR_DLLLOCK (1 << 2) /* DLL Lock */
+#define DDR3PHY_PIR_DLLSRST (1 << 1) /* DLL Soft Rest */
+#define DDR3PHY_PIR_INIT (1 << 0) /* Initialization Trigger */
+
+#define DDR3PHY_PGCR (0x08) /* DDR3PHY PHY General Configuration Register */
+#define DDR3PHY_PGCR_CKDV1 (1 << 13) /* CK# Disable Value */
+#define DDR3PHY_PGCR_CKDV0 (1 << 12) /* CK Disable Value */
+
+#define DDR3PHY_PGSR (0x0C) /* DDR3PHY PHY General Status Register */
+#define DDR3PHY_PGSR_IDONE (1 << 0) /* Initialization Done */
+
+#define DDR3PHY_ACDLLCR (0x14) /* DDR3PHY AC DLL Control Register */
+#define DDR3PHY_ACDLLCR_DLLSRST (1 << 30) /* DLL Soft Reset */
+
+#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */
+#define DDR3PHY_ACIOCR_CSPDD_CS0 (1 << 18) /* CS#[0] Power Down Driver */
+#define DDR3PHY_ACIOCR_CKPDD_CK0 (1 << 8) /* CK[0] Power Down Driver */
+#define DDR3PHY_ACIORC_ACPDD (1 << 3) /* AC Power Down Driver */
+
+#define DDR3PHY_DXCCR (0x28) /* DDR3PHY DATX8 Common Configuration Register */
+#define DDR3PHY_DXCCR_DXPDR (1 << 3) /* Data Power Down Receiver */
+
+#define DDR3PHY_DSGCR (0x2C) /* DDR3PHY DDR System General Configuration Register */
+#define DDR3PHY_DSGCR_ODTPDD_ODT0 (1 << 20) /* ODT[0] Power Down Driver */
+
+#define DDR3PHY_ZQ0SR0 (0x188) /* ZQ status register 0 */
+#define DDR3PHY_ZQ0SR0_PDO_OFF (0) /* Pull-down output impedance select offset */
+#define DDR3PHY_ZQ0SR0_PUO_OFF (5) /* Pull-up output impedance select offset */
+#define DDR3PHY_ZQ0SR0_PDODT_OFF (10) /* Pull-down on-die termination impedance select offset */
+#define DDR3PHY_ZQ0SRO_PUODT_OFF (15) /* Pull-up on-die termination impedance select offset */
+
+#define DDR3PHY_DX0DLLCR (0x1CC) /* DDR3PHY DATX8 DLL Control Register */
+#define DDR3PHY_DX1DLLCR (0x20C) /* DDR3PHY DATX8 DLL Control Register */
+#define DDR3PHY_DXDLLCR_DLLDIS (1 << 31) /* DLL Disable */
+
+/* UDDRC */
+#define UDDRC_STAT (0x04) /* UDDRC Operating Mode Status Register */
+#define UDDRC_STAT_SELFREF_TYPE_DIS (0x0 << 4) /* SDRAM is not in Self-refresh */
+#define UDDRC_STAT_SELFREF_TYPE_PHY (0x1 << 4) /* SDRAM is in Self-refresh, which was caused by PHY Master Request */
+#define UDDRC_STAT_SELFREF_TYPE_SW (0x2 << 4) /* SDRAM is in Self-refresh, which was not caused solely under Automatic Self-refresh control */
+#define UDDRC_STAT_SELFREF_TYPE_AUTO (0x3 << 4) /* SDRAM is in Self-refresh, which was caused by Automatic Self-refresh only */
+#define UDDRC_STAT_SELFREF_TYPE_MSK (0x3 << 4) /* Self-refresh type mask */
+#define UDDRC_STAT_OPMODE_INIT (0x0 << 0) /* Init */
+#define UDDRC_STAT_OPMODE_NORMAL (0x1 << 0) /* Normal */
+#define UDDRC_STAT_OPMODE_PWRDOWN (0x2 << 0) /* Power-down */
+#define UDDRC_STAT_OPMODE_SELF_REFRESH (0x3 << 0) /* Self-refresh */
+#define UDDRC_STAT_OPMODE_MSK (0x7 << 0) /* Operating mode mask */
+
+#define UDDRC_PWRCTL (0x30) /* UDDRC Low Power Control Register */
+#define UDDRC_PWRCTL_SELFREF_EN (1 << 0) /* Automatic self-refresh */
+#define UDDRC_PWRCTL_SELFREF_SW (1 << 5) /* Software self-refresh */
+
+#define UDDRC_DFIMISC (0x1B0) /* UDDRC DFI Miscellaneous Control Register */
+#define UDDRC_DFIMISC_DFI_INIT_COMPLETE_EN (1 << 0) /* PHY initialization complete enable signal */
+
+#define UDDRC_SWCTRL (0x320) /* UDDRC Software Register Programming Control Enable */
+#define UDDRC_SWCTRL_SW_DONE (1 << 0) /* Enable quasi-dynamic register programming outside reset */
+
+#define UDDRC_SWSTAT (0x324) /* UDDRC Software Register Programming Control Status */
+#define UDDRC_SWSTAT_SW_DONE_ACK (1 << 0) /* Register programming done */
+
+#define UDDRC_PSTAT (0x3FC) /* UDDRC Port Status Register */
+#define UDDRC_PSTAT_ALL_PORTS (0x1F001F) /* Read + writes outstanding transactions on all ports */
+
+#define UDDRC_PCTRL_0 (0x490) /* UDDRC Port 0 Control Register */
+#define UDDRC_PCTRL_1 (0x540) /* UDDRC Port 1 Control Register */
+#define UDDRC_PCTRL_2 (0x5F0) /* UDDRC Port 2 Control Register */
+#define UDDRC_PCTRL_3 (0x6A0) /* UDDRC Port 3 Control Register */
+#define UDDRC_PCTRL_4 (0x750) /* UDDRC Port 4 Control Register */
+
+#endif /* __SAMA7_DDR_H__ */
diff --git a/include/soc/at91/sama7-sfrbu.h b/include/soc/at91/sama7-sfrbu.h
new file mode 100644
index 000000000000..76b740810d34
--- /dev/null
+++ b/include/soc/at91/sama7-sfrbu.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Microchip SAMA7 SFRBU registers offsets and bit definitions.
+ *
+ * Copyright (C) [2020] Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#ifndef __SAMA7_SFRBU_H__
+#define __SAMA7_SFRBU_H__
+
+#ifdef CONFIG_SOC_SAMA7
+
+#define AT91_SFRBU_PSWBU (0x00) /* SFRBU Power Switch BU Control Register */
+#define AT91_SFRBU_PSWBU_PSWKEY (0x4BD20C << 8) /* Specific value mandatory to allow writing of other register bits */
+#define AT91_SFRBU_PSWBU_STATE (1 << 2) /* Power switch BU state */
+#define AT91_SFRBU_PSWBU_SOFTSWITCH (1 << 1) /* Power switch BU source selection */
+#define AT91_SFRBU_PSWBU_CTRL (1 << 0) /* Power switch BU control */
+
+#define AT91_SFRBU_25LDOCR (0x0C) /* SFRBU 2.5V LDO Control Register */
+#define AT91_SFRBU_25LDOCR_LDOANAKEY (0x3B6E18 << 8) /* Specific value mandatory to allow writing of other register bits. */
+#define AT91_SFRBU_25LDOCR_STATE (1 << 3) /* LDOANA Switch On/Off Control */
+#define AT91_SFRBU_25LDOCR_LP (1 << 2) /* LDOANA Low-Power Mode Control */
+#define AT91_SFRBU_PD_VALUE_MSK (0x3)
+#define AT91_SFRBU_25LDOCR_PD_VALUE(v) ((v) & AT91_SFRBU_PD_VALUE_MSK) /* LDOANA Pull-down value */
+
+#define AT91_FRBU_DDRPWR (0x10) /* SFRBU DDR Power Control Register */
+#define AT91_FRBU_DDRPWR_STATE (1 << 0) /* DDR Power Mode State */
+
+#endif /* CONFIG_SOC_SAMA7 */
+
+#endif /* __SAMA7_SFRBU_H__ */
+
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 3025aca3c358..73cac8d0287e 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -10,7 +10,6 @@
#include <linux/of_device.h>
struct rpi_firmware;
-struct pci_dev;
enum rpi_firmware_property_status {
RPI_FIRMWARE_STATUS_REQUEST = 0,
@@ -92,6 +91,7 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_GET_POE_HAT_VAL = 0x00030049,
RPI_FIRMWARE_SET_POE_HAT_VAL = 0x00030050,
RPI_FIRMWARE_NOTIFY_XHCI_RESET = 0x00030058,
+ RPI_FIRMWARE_NOTIFY_DISPLAY_DONE = 0x00030066,
/* Dispmanx TAGS */
RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001,
@@ -136,13 +136,55 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_GET_DMA_CHANNELS = 0x00060001,
};
+enum rpi_firmware_clk_id {
+ RPI_FIRMWARE_EMMC_CLK_ID = 1,
+ RPI_FIRMWARE_UART_CLK_ID,
+ RPI_FIRMWARE_ARM_CLK_ID,
+ RPI_FIRMWARE_CORE_CLK_ID,
+ RPI_FIRMWARE_V3D_CLK_ID,
+ RPI_FIRMWARE_H264_CLK_ID,
+ RPI_FIRMWARE_ISP_CLK_ID,
+ RPI_FIRMWARE_SDRAM_CLK_ID,
+ RPI_FIRMWARE_PIXEL_CLK_ID,
+ RPI_FIRMWARE_PWM_CLK_ID,
+ RPI_FIRMWARE_HEVC_CLK_ID,
+ RPI_FIRMWARE_EMMC2_CLK_ID,
+ RPI_FIRMWARE_M2MC_CLK_ID,
+ RPI_FIRMWARE_PIXEL_BVB_CLK_ID,
+ RPI_FIRMWARE_VEC_CLK_ID,
+ RPI_FIRMWARE_NUM_CLK_ID,
+};
+
+/**
+ * struct rpi_firmware_clk_rate_request - Firmware Request for a rate
+ * @id: ID of the clock being queried
+ * @rate: Rate in Hertz. Set by the firmware.
+ *
+ * Used by @RPI_FIRMWARE_GET_CLOCK_RATE, @RPI_FIRMWARE_GET_CLOCK_MEASURED,
+ * @RPI_FIRMWARE_GET_MAX_CLOCK_RATE and @RPI_FIRMWARE_GET_MIN_CLOCK_RATE.
+ */
+struct rpi_firmware_clk_rate_request {
+ __le32 id;
+ __le32 rate;
+} __packed;
+
+#define RPI_FIRMWARE_CLK_RATE_REQUEST(_id) \
+ { \
+ .id = cpu_to_le32(_id), \
+ }
+
#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
int rpi_firmware_property(struct rpi_firmware *fw,
u32 tag, void *data, size_t len);
int rpi_firmware_property_list(struct rpi_firmware *fw,
void *data, size_t tag_size);
+void rpi_firmware_put(struct rpi_firmware *fw);
+unsigned int rpi_firmware_clk_get_max_rate(struct rpi_firmware *fw,
+ unsigned int id);
+struct device_node *rpi_firmware_find_node(void);
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
-int rpi_firmware_init_vl805(struct pci_dev *pdev);
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node);
#else
static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
void *data, size_t len)
@@ -156,14 +198,28 @@ static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
return -ENOSYS;
}
+static inline void rpi_firmware_put(struct rpi_firmware *fw) { }
+
+static inline unsigned int rpi_firmware_clk_get_max_rate(struct rpi_firmware *fw,
+ unsigned int id)
+{
+ return UINT_MAX;
+}
+
+static inline struct device_node *rpi_firmware_find_node(void)
+{
+ return NULL;
+}
+
static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
{
return NULL;
}
-static inline int rpi_firmware_init_vl805(struct pci_dev *pdev)
+static inline struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
{
- return 0;
+ return NULL;
}
#endif
diff --git a/include/soc/brcmstb/common.h b/include/soc/brcmstb/common.h
deleted file mode 100644
index e4fe76856de9..000000000000
--- a/include/soc/brcmstb/common.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2014 NVIDIA Corporation
- * Copyright © 2015 Broadcom Corporation
- */
-
-#ifndef __SOC_BRCMSTB_COMMON_H__
-#define __SOC_BRCMSTB_COMMON_H__
-
-bool soc_is_brcmstb(void);
-
-#endif /* __SOC_BRCMSTB_COMMON_H__ */
diff --git a/include/soc/canaan/k210-sysctl.h b/include/soc/canaan/k210-sysctl.h
new file mode 100644
index 000000000000..0c2b2c2dabca
--- /dev/null
+++ b/include/soc/canaan/k210-sysctl.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef K210_SYSCTL_H
+#define K210_SYSCTL_H
+
+/*
+ * Kendryte K210 SoC system controller registers offsets.
+ * Taken from Kendryte SDK (kendryte-standalone-sdk).
+ */
+#define K210_SYSCTL_GIT_ID 0x00 /* Git short commit id */
+#define K210_SYSCTL_UART_BAUD 0x04 /* Default UARTHS baud rate */
+#define K210_SYSCTL_PLL0 0x08 /* PLL0 controller */
+#define K210_SYSCTL_PLL1 0x0C /* PLL1 controller */
+#define K210_SYSCTL_PLL2 0x10 /* PLL2 controller */
+#define K210_SYSCTL_PLL_LOCK 0x18 /* PLL lock tester */
+#define K210_SYSCTL_ROM_ERROR 0x1C /* AXI ROM detector */
+#define K210_SYSCTL_SEL0 0x20 /* Clock select controller 0 */
+#define K210_SYSCTL_SEL1 0x24 /* Clock select controller 1 */
+#define K210_SYSCTL_EN_CENT 0x28 /* Central clock enable */
+#define K210_SYSCTL_EN_PERI 0x2C /* Peripheral clock enable */
+#define K210_SYSCTL_SOFT_RESET 0x30 /* Soft reset ctrl */
+#define K210_SYSCTL_PERI_RESET 0x34 /* Peripheral reset controller */
+#define K210_SYSCTL_THR0 0x38 /* Clock threshold controller 0 */
+#define K210_SYSCTL_THR1 0x3C /* Clock threshold controller 1 */
+#define K210_SYSCTL_THR2 0x40 /* Clock threshold controller 2 */
+#define K210_SYSCTL_THR3 0x44 /* Clock threshold controller 3 */
+#define K210_SYSCTL_THR4 0x48 /* Clock threshold controller 4 */
+#define K210_SYSCTL_THR5 0x4C /* Clock threshold controller 5 */
+#define K210_SYSCTL_THR6 0x50 /* Clock threshold controller 6 */
+#define K210_SYSCTL_MISC 0x54 /* Miscellaneous controller */
+#define K210_SYSCTL_PERI 0x58 /* Peripheral controller */
+#define K210_SYSCTL_SPI_SLEEP 0x5C /* SPI sleep controller */
+#define K210_SYSCTL_RESET_STAT 0x60 /* Reset source status */
+#define K210_SYSCTL_DMA_SEL0 0x64 /* DMA handshake selector 0 */
+#define K210_SYSCTL_DMA_SEL1 0x68 /* DMA handshake selector 1 */
+#define K210_SYSCTL_POWER_SEL 0x6C /* IO Power Mode Select controller */
+
+void k210_clk_early_init(void __iomem *regs);
+
+#endif
diff --git a/include/soc/fsl/caam-blob.h b/include/soc/fsl/caam-blob.h
new file mode 100644
index 000000000000..937cac52f36d
--- /dev/null
+++ b/include/soc/fsl/caam-blob.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ */
+
+#ifndef __CAAM_BLOB_GEN
+#define __CAAM_BLOB_GEN
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#define CAAM_BLOB_KEYMOD_LENGTH 16
+#define CAAM_BLOB_OVERHEAD (32 + 16)
+#define CAAM_BLOB_MAX_LEN 4096
+
+struct caam_blob_priv;
+
+/**
+ * struct caam_blob_info - information for CAAM blobbing
+ * @input: pointer to input buffer (must be DMAable)
+ * @input_len: length of @input buffer in bytes.
+ * @output: pointer to output buffer (must be DMAable)
+ * @output_len: length of @output buffer in bytes.
+ * @key_mod: key modifier
+ * @key_mod_len: length of @key_mod in bytes.
+ * May not exceed %CAAM_BLOB_KEYMOD_LENGTH
+ */
+struct caam_blob_info {
+ void *input;
+ size_t input_len;
+
+ void *output;
+ size_t output_len;
+
+ const void *key_mod;
+ size_t key_mod_len;
+};
+
+/**
+ * caam_blob_gen_init - initialize blob generation
+ * Return: pointer to new &struct caam_blob_priv instance on success
+ * and ``ERR_PTR(-ENODEV)`` if CAAM has no hardware blobbing support
+ * or no job ring could be allocated.
+ */
+struct caam_blob_priv *caam_blob_gen_init(void);
+
+/**
+ * caam_blob_gen_exit - free blob generation resources
+ * @priv: instance returned by caam_blob_gen_init()
+ */
+void caam_blob_gen_exit(struct caam_blob_priv *priv);
+
+/**
+ * caam_process_blob - encapsulate or decapsulate blob
+ * @priv: instance returned by caam_blob_gen_init()
+ * @info: pointer to blobbing info describing key, blob and
+ * key modifier buffers.
+ * @encap: true for encapsulation, false for decapsulation
+ *
+ * Return: %0 and sets ``info->output_len`` on success and a negative
+ * error code otherwise.
+ */
+int caam_process_blob(struct caam_blob_priv *priv,
+ struct caam_blob_info *info, bool encap);
+
+/**
+ * caam_encap_blob - encapsulate blob
+ * @priv: instance returned by caam_blob_gen_init()
+ * @info: pointer to blobbing info describing input key,
+ * output blob and key modifier buffers.
+ *
+ * Return: %0 and sets ``info->output_len`` on success and
+ * a negative error code otherwise.
+ */
+static inline int caam_encap_blob(struct caam_blob_priv *priv,
+ struct caam_blob_info *info)
+{
+ if (info->output_len < info->input_len + CAAM_BLOB_OVERHEAD)
+ return -EINVAL;
+
+ return caam_process_blob(priv, info, true);
+}
+
+/**
+ * caam_decap_blob - decapsulate blob
+ * @priv: instance returned by caam_blob_gen_init()
+ * @info: pointer to blobbing info describing output key,
+ * input blob and key modifier buffers.
+ *
+ * Return: %0 and sets ``info->output_len`` on success and
+ * a negative error code otherwise.
+ */
+static inline int caam_decap_blob(struct caam_blob_priv *priv,
+ struct caam_blob_info *info)
+{
+ if (info->input_len < CAAM_BLOB_OVERHEAD ||
+ info->output_len < info->input_len - CAAM_BLOB_OVERHEAD)
+ return -EINVAL;
+
+ return caam_process_blob(priv, info, false);
+}
+
+#endif
diff --git a/include/soc/fsl/dpaa2-fd.h b/include/soc/fsl/dpaa2-fd.h
index 90ae8d191f1a..bae490cac0aa 100644
--- a/include/soc/fsl/dpaa2-fd.h
+++ b/include/soc/fsl/dpaa2-fd.h
@@ -7,7 +7,8 @@
#ifndef __FSL_DPAA2_FD_H
#define __FSL_DPAA2_FD_H
-#include <linux/kernel.h>
+#include <linux/byteorder/generic.h>
+#include <linux/types.h>
/**
* DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index c9d849924f89..4bf62de2e00e 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -44,6 +44,7 @@ struct device;
* @regs_cinh: The cache inhibited regs
* @dpio_id: The dpio index
* @qman_version: The qman version
+ * @qman_clk: The qman clock frequency in Hz
*
* Describes the attributes and features of the DPIO object.
*/
@@ -55,6 +56,7 @@ struct dpaa2_io_desc {
void __iomem *regs_cinh;
int dpio_id;
u32 qman_version;
+ u32 qman_clk;
};
struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
@@ -129,4 +131,11 @@ int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
u32 *fcnt, u32 *bcnt);
int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid,
u32 *num);
+
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff);
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff);
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce);
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d);
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes);
#endif /* __FSL_DPAA2_IO_H */
diff --git a/include/soc/fsl/qe/immap_qe.h b/include/soc/fsl/qe/immap_qe.h
index 7614fee532f1..edd601f53f5d 100644
--- a/include/soc/fsl/qe/immap_qe.h
+++ b/include/soc/fsl/qe/immap_qe.h
@@ -13,7 +13,8 @@
#define _ASM_POWERPC_IMMAP_QE_H
#ifdef __KERNEL__
-#include <linux/kernel.h>
+#include <linux/types.h>
+
#include <asm/io.h>
#define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index 3feddfec9f87..af793f2a0ec4 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -27,12 +27,6 @@
#define QE_NUM_OF_BRGS 16
#define QE_NUM_OF_PORTS 1024
-/* Memory partitions
-*/
-#define MEM_PART_SYSTEM 0
-#define MEM_PART_SECONDARY 1
-#define MEM_PART_MURAM 2
-
/* Clocks and BRGs */
enum qe_clock {
QE_CLK_NONE = 0,
@@ -102,8 +96,9 @@ s32 cpm_muram_alloc(unsigned long size, unsigned long align);
void cpm_muram_free(s32 offset);
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
void __iomem *cpm_muram_addr(unsigned long offset);
-unsigned long cpm_muram_offset(void __iomem *addr);
+unsigned long cpm_muram_offset(const void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
+void cpm_muram_free_addr(const void __iomem *addr);
#else
static inline s32 cpm_muram_alloc(unsigned long size,
unsigned long align)
@@ -126,7 +121,7 @@ static inline void __iomem *cpm_muram_addr(unsigned long offset)
return NULL;
}
-static inline unsigned long cpm_muram_offset(void __iomem *addr)
+static inline unsigned long cpm_muram_offset(const void __iomem *addr)
{
return -ENOSYS;
}
@@ -135,6 +130,9 @@ static inline dma_addr_t cpm_muram_dma(void __iomem *addr)
{
return 0;
}
+static inline void cpm_muram_free_addr(const void __iomem *addr)
+{
+}
#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */
/* QE PIO */
@@ -174,14 +172,15 @@ static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
/*
* Pin multiplexing functions.
*/
+struct device;
struct qe_pin;
#ifdef CONFIG_QE_GPIO
-extern struct qe_pin *qe_pin_request(struct device_node *np, int index);
+extern struct qe_pin *qe_pin_request(struct device *dev, int index);
extern void qe_pin_free(struct qe_pin *qe_pin);
extern void qe_pin_set_gpio(struct qe_pin *qe_pin);
extern void qe_pin_set_dedicated(struct qe_pin *pin);
#else
-static inline struct qe_pin *qe_pin_request(struct device_node *np, int index)
+static inline struct qe_pin *qe_pin_request(struct device *dev, int index)
{
return ERR_PTR(-ENOSYS);
}
@@ -239,42 +238,27 @@ static inline int qe_alive_during_sleep(void)
#define qe_muram_addr cpm_muram_addr
#define qe_muram_offset cpm_muram_offset
#define qe_muram_dma cpm_muram_dma
+#define qe_muram_free_addr cpm_muram_free_addr
-#ifdef CONFIG_PPC32
-#define qe_iowrite8(val, addr) out_8(addr, val)
-#define qe_iowrite16be(val, addr) out_be16(addr, val)
-#define qe_iowrite32be(val, addr) out_be32(addr, val)
-#define qe_ioread8(addr) in_8(addr)
-#define qe_ioread16be(addr) in_be16(addr)
-#define qe_ioread32be(addr) in_be32(addr)
-#else
-#define qe_iowrite8(val, addr) iowrite8(val, addr)
-#define qe_iowrite16be(val, addr) iowrite16be(val, addr)
-#define qe_iowrite32be(val, addr) iowrite32be(val, addr)
-#define qe_ioread8(addr) ioread8(addr)
-#define qe_ioread16be(addr) ioread16be(addr)
-#define qe_ioread32be(addr) ioread32be(addr)
-#endif
-
-#define qe_setbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) | (_v), (_addr))
-#define qe_clrbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) & ~(_v), (_addr))
+#define qe_setbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
+#define qe_clrbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
-#define qe_setbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) | (_v), (_addr))
-#define qe_clrbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) & ~(_v), (_addr))
+#define qe_setbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
+#define qe_clrbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
-#define qe_setbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) | (_v), (_addr))
-#define qe_clrbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) & ~(_v), (_addr))
+#define qe_setbits_8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
+#define qe_clrbits_8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
#define qe_clrsetbits_be32(addr, clear, set) \
- qe_iowrite32be((qe_ioread32be(addr) & ~(clear)) | (set), (addr))
+ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
#define qe_clrsetbits_be16(addr, clear, set) \
- qe_iowrite16be((qe_ioread16be(addr) & ~(clear)) | (set), (addr))
+ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
#define qe_clrsetbits_8(addr, clear, set) \
- qe_iowrite8((qe_ioread8(addr) & ~(clear)) | (set), (addr))
+ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
/* Structure that defines QE firmware binary files.
*
- * See Documentation/powerpc/qe_firmware.rst for a description of these
+ * See Documentation/arch/powerpc/qe_firmware.rst for a description of these
* fields.
*/
struct qe_firmware {
diff --git a/include/soc/fsl/qe/qe_tdm.h b/include/soc/fsl/qe/qe_tdm.h
index b6febe225071..43ea830cfe1f 100644
--- a/include/soc/fsl/qe/qe_tdm.h
+++ b/include/soc/fsl/qe/qe_tdm.h
@@ -10,8 +10,8 @@
#ifndef _QE_TDM_H_
#define _QE_TDM_H_
-#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/types.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
@@ -19,6 +19,8 @@
#include <soc/fsl/qe/ucc.h>
#include <soc/fsl/qe/ucc_fast.h>
+struct device_node;
+
/* SI RAM entries */
#define SIR_LAST 0x0001
#define SIR_BYTE 0x0002
diff --git a/include/soc/fsl/qe/qmc.h b/include/soc/fsl/qe/qmc.h
new file mode 100644
index 000000000000..2a333fc1ea81
--- /dev/null
+++ b/include/soc/fsl/qe/qmc.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QMC management
+ *
+ * Copyright 2022 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+#ifndef __SOC_FSL_QMC_H__
+#define __SOC_FSL_QMC_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct device_node;
+struct device;
+struct qmc_chan;
+
+struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name);
+struct qmc_chan *qmc_chan_get_bychild(struct device_node *np);
+void qmc_chan_put(struct qmc_chan *chan);
+struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev, struct device_node *np,
+ const char *phandle_name);
+struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev, struct device_node *np);
+
+enum qmc_mode {
+ QMC_TRANSPARENT,
+ QMC_HDLC,
+};
+
+struct qmc_chan_info {
+ enum qmc_mode mode;
+ unsigned long rx_fs_rate;
+ unsigned long rx_bit_rate;
+ u8 nb_rx_ts;
+ unsigned long tx_fs_rate;
+ unsigned long tx_bit_rate;
+ u8 nb_tx_ts;
+};
+
+int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info);
+
+struct qmc_chan_ts_info {
+ u64 rx_ts_mask_avail;
+ u64 tx_ts_mask_avail;
+ u64 rx_ts_mask;
+ u64 tx_ts_mask;
+};
+
+int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info);
+int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info);
+
+struct qmc_chan_param {
+ enum qmc_mode mode;
+ union {
+ struct {
+ u16 max_rx_buf_size;
+ u16 max_rx_frame_size;
+ bool is_crc32;
+ } hdlc;
+ struct {
+ u16 max_rx_buf_size;
+ } transp;
+ };
+};
+
+int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param);
+
+int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ void (*complete)(void *context), void *context);
+
+/* Flags available (ORed) for read complete() flags parameter in HDLC mode.
+ * No flags are available in transparent mode and the read complete() flags
+ * parameter has no meaning in transparent mode.
+ */
+#define QMC_RX_FLAG_HDLC_LAST BIT(11) /* Last in frame */
+#define QMC_RX_FLAG_HDLC_FIRST BIT(10) /* First in frame */
+#define QMC_RX_FLAG_HDLC_OVF BIT(5) /* Data overflow */
+#define QMC_RX_FLAG_HDLC_UNA BIT(4) /* Unaligned (ie. bits received not multiple of 8) */
+#define QMC_RX_FLAG_HDLC_ABORT BIT(3) /* Received an abort sequence (seven consecutive ones) */
+#define QMC_RX_FLAG_HDLC_CRC BIT(2) /* CRC error */
+
+int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ void (*complete)(void *context, size_t length,
+ unsigned int flags),
+ void *context);
+
+#define QMC_CHAN_READ (1<<0)
+#define QMC_CHAN_WRITE (1<<1)
+#define QMC_CHAN_ALL (QMC_CHAN_READ | QMC_CHAN_WRITE)
+
+int qmc_chan_start(struct qmc_chan *chan, int direction);
+int qmc_chan_stop(struct qmc_chan *chan, int direction);
+int qmc_chan_reset(struct qmc_chan *chan, int direction);
+
+#endif /* __SOC_FSL_QMC_H__ */
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index dc4e79468094..ad60b87a3c69 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -10,7 +10,7 @@
#ifndef __UCC_FAST_H__
#define __UCC_FAST_H__
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
@@ -146,7 +146,6 @@ struct ucc_fast_info {
resource_size_t regs;
int irq;
u32 uccm_mask;
- int bd_mem_part;
int brkpt_support;
int grant_support;
int tsa;
diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h
index 11a216e4e919..7548ce8a202d 100644
--- a/include/soc/fsl/qe/ucc_slow.h
+++ b/include/soc/fsl/qe/ucc_slow.h
@@ -11,7 +11,7 @@
#ifndef __UCC_SLOW_H__
#define __UCC_SLOW_H__
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index cfe00e08e85b..0d3d6beb7fdb 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -256,7 +256,7 @@ struct qm_dqrr_entry {
__be32 context_b;
struct qm_fd fd;
u8 __reserved4[32];
-} __packed;
+} __packed __aligned(64);
#define QM_DQRR_VERB_VBIT 0x80
#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
@@ -289,7 +289,7 @@ union qm_mr_entry {
__be32 tag;
struct qm_fd fd;
u8 __reserved1[32];
- } __packed ern;
+ } __packed __aligned(64) ern;
struct {
u8 verb;
u8 fqs; /* Frame Queue Status */
@@ -689,7 +689,8 @@ enum qman_cb_dqrr_result {
};
typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr);
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi);
/*
* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
@@ -1171,6 +1172,15 @@ int qman_delete_cgr(struct qman_cgr *cgr);
void qman_delete_cgr_safe(struct qman_cgr *cgr);
/**
+ * qman_update_cgr_safe - Modifies a congestion group object from any CPU
+ * @cgr: the 'cgr' object to modify
+ * @opts: state of the CGR settings
+ *
+ * This will select the proper CPU and modify the CGR settings.
+ */
+int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts);
+
+/**
* qman_query_cgr_congested - Queries CGR's congestion status
* @cgr: the 'cgr' object to query
* @result: returns 'cgr's congestion status, 1 (true) if congested
diff --git a/include/soc/imx/cpu.h b/include/soc/imx/cpu.h
index 42d6aeb951fa..0bf610acafd0 100644
--- a/include/soc/imx/cpu.h
+++ b/include/soc/imx/cpu.h
@@ -9,6 +9,7 @@
#define MXC_CPU_MX27 27
#define MXC_CPU_MX31 31
#define MXC_CPU_MX35 35
+#define MXC_CPU_MX50 50
#define MXC_CPU_MX51 51
#define MXC_CPU_MX53 53
#define MXC_CPU_IMX6SL 0x60
diff --git a/include/soc/imx/revision.h b/include/soc/imx/revision.h
index b2a55dafaf0a..b122d2fc8881 100644
--- a/include/soc/imx/revision.h
+++ b/include/soc/imx/revision.h
@@ -22,6 +22,7 @@
#define IMX_CHIP_REVISION_3_3 0x33
#define IMX_CHIP_REVISION_UNKNOWN 0xff
+int mx25_revision(void);
int mx27_revision(void);
int mx31_revision(void);
int mx35_revision(void);
diff --git a/include/soc/imx/timer.h b/include/soc/imx/timer.h
deleted file mode 100644
index b888d5076b4d..000000000000
--- a/include/soc/imx/timer.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2015 Linaro Ltd.
- */
-
-#ifndef __SOC_IMX_TIMER_H__
-#define __SOC_IMX_TIMER_H__
-
-enum imx_gpt_type {
- GPT_TYPE_IMX1, /* i.MX1 */
- GPT_TYPE_IMX21, /* i.MX21/27 */
- GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */
- GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */
-};
-
-/*
- * This is a stop-gap solution for clock drivers like imx1/imx21 which call
- * mxc_timer_init() to initialize timer for non-DT boot. It can be removed
- * when these legacy non-DT support is converted or dropped.
- */
-void mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type);
-
-#endif /* __SOC_IMX_TIMER_H__ */
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index 5a34b87d89e3..000eb1cf68b7 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -9,37 +9,22 @@
#include <linux/bitops.h>
#include <linux/device.h>
-#ifdef CONFIG_MTK_SMI
+#if IS_ENABLED(CONFIG_MTK_SMI)
-#define MTK_LARB_NR_MAX 16
+enum iommu_atf_cmd {
+ IOMMU_ATF_CMD_CONFIG_SMI_LARB, /* For mm master to en/disable iommu */
+ IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, /* For infra master to enable iommu */
+ IOMMU_ATF_CMD_MAX,
+};
#define MTK_SMI_MMU_EN(port) BIT(port)
struct mtk_smi_larb_iommu {
struct device *dev;
unsigned int mmu;
+ unsigned char bank[32];
};
-/*
- * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter.
- * It also initialize some basic setting(like iommu).
- * mtk_smi_larb_put: Disable the power domain and clocks for this local arbiter.
- * Both should be called in non-atomic context.
- *
- * Returns 0 if successful, negative on failure.
- */
-int mtk_smi_larb_get(struct device *larbdev);
-void mtk_smi_larb_put(struct device *larbdev);
-
-#else
-
-static inline int mtk_smi_larb_get(struct device *larbdev)
-{
- return 0;
-}
-
-static inline void mtk_smi_larb_put(struct device *larbdev) { }
-
#endif
#endif
diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h
new file mode 100644
index 000000000000..09722f83b0ca
--- /dev/null
+++ b/include/soc/microchip/mpfs.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * Microchip PolarFire SoC (MPFS)
+ *
+ * Copyright (c) 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ */
+
+#ifndef __SOC_MPFS_H__
+#define __SOC_MPFS_H__
+
+#include <linux/types.h>
+#include <linux/of_device.h>
+
+struct mpfs_sys_controller;
+
+struct mpfs_mss_msg {
+ u8 cmd_opcode;
+ u16 cmd_data_size;
+ struct mpfs_mss_response *response;
+ u8 *cmd_data;
+ u16 mbox_offset;
+ u16 resp_offset;
+};
+
+struct mpfs_mss_response {
+ u32 resp_status;
+ u32 *resp_msg;
+ u16 resp_size;
+};
+
+#if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL)
+
+int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, struct mpfs_mss_msg *msg);
+
+struct mpfs_sys_controller *mpfs_sys_controller_get(struct device *dev);
+
+struct mtd_info *mpfs_sys_controller_get_flash(struct mpfs_sys_controller *mpfs_client);
+
+#endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */
+
+#if IS_ENABLED(CONFIG_MCHP_CLK_MPFS)
+
+u32 mpfs_reset_read(struct device *dev);
+
+void mpfs_reset_write(struct device *dev, u32 val);
+
+#endif /* if IS_ENABLED(CONFIG_MCHP_CLK_MPFS) */
+
+#endif /* __SOC_MPFS_H__ */
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index da369b12005f..1e1b40f4e664 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -11,6 +11,8 @@
#include <linux/regmap.h>
#include <net/dsa.h>
+struct tc_mqprio_qopt_offload;
+
/* Port Group IDs (PGID) are masks of destination ports.
*
* For L2 forwarding, the switch performs 3 lookups in the PGID table for each
@@ -51,19 +53,22 @@
*/
/* Reserve some destination PGIDs at the end of the range:
+ * PGID_BLACKHOLE: used for not forwarding the frames
* PGID_CPU: used for whitelisting certain MAC addresses, such as the addresses
* of the switch port net devices, towards the CPU port module.
* PGID_UC: the flooding destinations for unknown unicast traffic.
- * PGID_MC: the flooding destinations for broadcast and non-IP multicast
- * traffic.
+ * PGID_MC: the flooding destinations for non-IP multicast traffic.
* PGID_MCIPV4: the flooding destinations for IPv4 multicast traffic.
* PGID_MCIPV6: the flooding destinations for IPv6 multicast traffic.
+ * PGID_BC: the flooding destinations for broadcast traffic.
*/
-#define PGID_CPU 59
-#define PGID_UC 60
-#define PGID_MC 61
-#define PGID_MCIPV4 62
-#define PGID_MCIPV6 63
+#define PGID_BLACKHOLE 57
+#define PGID_CPU 58
+#define PGID_UC 59
+#define PGID_MC 60
+#define PGID_MCIPV4 61
+#define PGID_MCIPV6 62
+#define PGID_BC 63
#define for_each_unicast_dest_pgid(ocelot, pgid) \
for ((pgid) = 0; \
@@ -72,7 +77,7 @@
#define for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) \
for ((pgid) = (ocelot)->num_phys_ports + 1; \
- (pgid) < PGID_CPU; \
+ (pgid) < PGID_BLACKHOLE; \
(pgid)++)
#define for_each_aggr_pgid(ocelot, pgid) \
@@ -86,21 +91,7 @@
/* Source PGIDs, one per physical port */
#define PGID_SRC 80
-#define IFH_INJ_BYPASS BIT(31)
-#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
-
-#define IFH_TAG_TYPE_C 0
-#define IFH_TAG_TYPE_S 1
-
-#define IFH_REW_OP_NOOP 0x0
-#define IFH_REW_OP_DSCP 0x1
-#define IFH_REW_OP_ONE_STEP_PTP 0x2
-#define IFH_REW_OP_TWO_STEP_PTP 0x3
-#define IFH_REW_OP_ORIGIN_PTP 0x5
-
-#define OCELOT_TAG_LEN 16
-#define OCELOT_SHORT_PREFIX_LEN 4
-#define OCELOT_LONG_PREFIX_LEN 16
+#define OCELOT_NUM_TC 8
#define OCELOT_SPEED_2500 0
#define OCELOT_SPEED_1000 1
@@ -122,9 +113,12 @@ enum ocelot_target {
QSYS,
REW,
SYS,
+ S0,
+ S1,
S2,
HSIO,
PTP,
+ FDMA,
GCB,
DEV_GMII,
TARGET_MAX,
@@ -338,13 +332,61 @@ enum ocelot_reg {
SYS_COUNT_RX_64,
SYS_COUNT_RX_65_127,
SYS_COUNT_RX_128_255,
- SYS_COUNT_RX_256_1023,
+ SYS_COUNT_RX_256_511,
+ SYS_COUNT_RX_512_1023,
SYS_COUNT_RX_1024_1526,
SYS_COUNT_RX_1527_MAX,
SYS_COUNT_RX_PAUSE,
SYS_COUNT_RX_CONTROL,
SYS_COUNT_RX_LONGS,
SYS_COUNT_RX_CLASSIFIED_DROPS,
+ SYS_COUNT_RX_RED_PRIO_0,
+ SYS_COUNT_RX_RED_PRIO_1,
+ SYS_COUNT_RX_RED_PRIO_2,
+ SYS_COUNT_RX_RED_PRIO_3,
+ SYS_COUNT_RX_RED_PRIO_4,
+ SYS_COUNT_RX_RED_PRIO_5,
+ SYS_COUNT_RX_RED_PRIO_6,
+ SYS_COUNT_RX_RED_PRIO_7,
+ SYS_COUNT_RX_YELLOW_PRIO_0,
+ SYS_COUNT_RX_YELLOW_PRIO_1,
+ SYS_COUNT_RX_YELLOW_PRIO_2,
+ SYS_COUNT_RX_YELLOW_PRIO_3,
+ SYS_COUNT_RX_YELLOW_PRIO_4,
+ SYS_COUNT_RX_YELLOW_PRIO_5,
+ SYS_COUNT_RX_YELLOW_PRIO_6,
+ SYS_COUNT_RX_YELLOW_PRIO_7,
+ SYS_COUNT_RX_GREEN_PRIO_0,
+ SYS_COUNT_RX_GREEN_PRIO_1,
+ SYS_COUNT_RX_GREEN_PRIO_2,
+ SYS_COUNT_RX_GREEN_PRIO_3,
+ SYS_COUNT_RX_GREEN_PRIO_4,
+ SYS_COUNT_RX_GREEN_PRIO_5,
+ SYS_COUNT_RX_GREEN_PRIO_6,
+ SYS_COUNT_RX_GREEN_PRIO_7,
+ SYS_COUNT_RX_ASSEMBLY_ERRS,
+ SYS_COUNT_RX_SMD_ERRS,
+ SYS_COUNT_RX_ASSEMBLY_OK,
+ SYS_COUNT_RX_MERGE_FRAGMENTS,
+ SYS_COUNT_RX_PMAC_OCTETS,
+ SYS_COUNT_RX_PMAC_UNICAST,
+ SYS_COUNT_RX_PMAC_MULTICAST,
+ SYS_COUNT_RX_PMAC_BROADCAST,
+ SYS_COUNT_RX_PMAC_SHORTS,
+ SYS_COUNT_RX_PMAC_FRAGMENTS,
+ SYS_COUNT_RX_PMAC_JABBERS,
+ SYS_COUNT_RX_PMAC_CRC_ALIGN_ERRS,
+ SYS_COUNT_RX_PMAC_SYM_ERRS,
+ SYS_COUNT_RX_PMAC_64,
+ SYS_COUNT_RX_PMAC_65_127,
+ SYS_COUNT_RX_PMAC_128_255,
+ SYS_COUNT_RX_PMAC_256_511,
+ SYS_COUNT_RX_PMAC_512_1023,
+ SYS_COUNT_RX_PMAC_1024_1526,
+ SYS_COUNT_RX_PMAC_1527_MAX,
+ SYS_COUNT_RX_PMAC_PAUSE,
+ SYS_COUNT_RX_PMAC_CONTROL,
+ SYS_COUNT_RX_PMAC_LONGS,
SYS_COUNT_TX_OCTETS,
SYS_COUNT_TX_UNICAST,
SYS_COUNT_TX_MULTICAST,
@@ -354,11 +396,64 @@ enum ocelot_reg {
SYS_COUNT_TX_PAUSE,
SYS_COUNT_TX_64,
SYS_COUNT_TX_65_127,
- SYS_COUNT_TX_128_511,
+ SYS_COUNT_TX_128_255,
+ SYS_COUNT_TX_256_511,
SYS_COUNT_TX_512_1023,
SYS_COUNT_TX_1024_1526,
SYS_COUNT_TX_1527_MAX,
- SYS_COUNT_TX_AGING,
+ SYS_COUNT_TX_YELLOW_PRIO_0,
+ SYS_COUNT_TX_YELLOW_PRIO_1,
+ SYS_COUNT_TX_YELLOW_PRIO_2,
+ SYS_COUNT_TX_YELLOW_PRIO_3,
+ SYS_COUNT_TX_YELLOW_PRIO_4,
+ SYS_COUNT_TX_YELLOW_PRIO_5,
+ SYS_COUNT_TX_YELLOW_PRIO_6,
+ SYS_COUNT_TX_YELLOW_PRIO_7,
+ SYS_COUNT_TX_GREEN_PRIO_0,
+ SYS_COUNT_TX_GREEN_PRIO_1,
+ SYS_COUNT_TX_GREEN_PRIO_2,
+ SYS_COUNT_TX_GREEN_PRIO_3,
+ SYS_COUNT_TX_GREEN_PRIO_4,
+ SYS_COUNT_TX_GREEN_PRIO_5,
+ SYS_COUNT_TX_GREEN_PRIO_6,
+ SYS_COUNT_TX_GREEN_PRIO_7,
+ SYS_COUNT_TX_AGED,
+ SYS_COUNT_TX_MM_HOLD,
+ SYS_COUNT_TX_MERGE_FRAGMENTS,
+ SYS_COUNT_TX_PMAC_OCTETS,
+ SYS_COUNT_TX_PMAC_UNICAST,
+ SYS_COUNT_TX_PMAC_MULTICAST,
+ SYS_COUNT_TX_PMAC_BROADCAST,
+ SYS_COUNT_TX_PMAC_PAUSE,
+ SYS_COUNT_TX_PMAC_64,
+ SYS_COUNT_TX_PMAC_65_127,
+ SYS_COUNT_TX_PMAC_128_255,
+ SYS_COUNT_TX_PMAC_256_511,
+ SYS_COUNT_TX_PMAC_512_1023,
+ SYS_COUNT_TX_PMAC_1024_1526,
+ SYS_COUNT_TX_PMAC_1527_MAX,
+ SYS_COUNT_DROP_LOCAL,
+ SYS_COUNT_DROP_TAIL,
+ SYS_COUNT_DROP_YELLOW_PRIO_0,
+ SYS_COUNT_DROP_YELLOW_PRIO_1,
+ SYS_COUNT_DROP_YELLOW_PRIO_2,
+ SYS_COUNT_DROP_YELLOW_PRIO_3,
+ SYS_COUNT_DROP_YELLOW_PRIO_4,
+ SYS_COUNT_DROP_YELLOW_PRIO_5,
+ SYS_COUNT_DROP_YELLOW_PRIO_6,
+ SYS_COUNT_DROP_YELLOW_PRIO_7,
+ SYS_COUNT_DROP_GREEN_PRIO_0,
+ SYS_COUNT_DROP_GREEN_PRIO_1,
+ SYS_COUNT_DROP_GREEN_PRIO_2,
+ SYS_COUNT_DROP_GREEN_PRIO_3,
+ SYS_COUNT_DROP_GREEN_PRIO_4,
+ SYS_COUNT_DROP_GREEN_PRIO_5,
+ SYS_COUNT_DROP_GREEN_PRIO_6,
+ SYS_COUNT_DROP_GREEN_PRIO_7,
+ SYS_COUNT_SF_MATCHING_FRAMES,
+ SYS_COUNT_SF_NOT_PASSING_FRAMES,
+ SYS_COUNT_SF_NOT_PASSING_SDU,
+ SYS_COUNT_SF_RED_FRAMES,
SYS_RESET_CFG,
SYS_SR_ETYPE_CFG,
SYS_VLAN_ETYPE_CFG,
@@ -381,7 +476,6 @@ enum ocelot_reg {
SYS_MMGT_FAST,
SYS_EVENTS_DIF,
SYS_EVENTS_CORE,
- SYS_CNT,
SYS_PTP_STATUS,
SYS_PTP_TXSTAMP,
SYS_PTP_NXT,
@@ -392,13 +486,6 @@ enum ocelot_reg {
SYS_CM_DATA_RD,
SYS_CM_OP,
SYS_CM_DATA,
- S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
- S2_CORE_MV_CFG,
- S2_CACHE_ENTRY_DAT,
- S2_CACHE_MASK_DAT,
- S2_CACHE_ACTION_DAT,
- S2_CACHE_CNT_DAT,
- S2_CACHE_TG_DAT,
PTP_PIN_CFG = PTP << TARGET_OFFSET,
PTP_PIN_TOD_SEC_MSB,
PTP_PIN_TOD_SEC_LSB,
@@ -430,6 +517,9 @@ enum ocelot_reg {
DEV_MAC_FC_MAC_LOW_CFG,
DEV_MAC_FC_MAC_HIGH_CFG,
DEV_MAC_STICKY,
+ DEV_MM_ENABLE_CONFIG,
+ DEV_MM_VERIF_CONFIG,
+ DEV_MM_STATUS,
PCS1G_CFG,
PCS1G_MODE_CFG,
PCS1G_SD_CFG,
@@ -517,6 +607,29 @@ enum ocelot_regfield {
REGFIELD_MAX
};
+enum {
+ /* VCAP_CORE_CFG */
+ VCAP_CORE_UPDATE_CTRL,
+ VCAP_CORE_MV_CFG,
+ /* VCAP_CORE_CACHE */
+ VCAP_CACHE_ENTRY_DAT,
+ VCAP_CACHE_MASK_DAT,
+ VCAP_CACHE_ACTION_DAT,
+ VCAP_CACHE_CNT_DAT,
+ VCAP_CACHE_TG_DAT,
+ /* VCAP_CONST */
+ VCAP_CONST_VCAP_VER,
+ VCAP_CONST_ENTRY_WIDTH,
+ VCAP_CONST_ENTRY_CNT,
+ VCAP_CONST_ENTRY_SWCNT,
+ VCAP_CONST_ENTRY_TG_WIDTH,
+ VCAP_CONST_ACTION_DEF_CNT,
+ VCAP_CONST_ACTION_WIDTH,
+ VCAP_CONST_CNT_WIDTH,
+ VCAP_CONST_CORE_CNT,
+ VCAP_CONST_IF_CNT,
+};
+
enum ocelot_ptp_pins {
PTP_PIN_0,
PTP_PIN_1,
@@ -525,11 +638,6 @@ enum ocelot_ptp_pins {
TOD_ACC_PIN
};
-struct ocelot_stat_layout {
- u32 offset;
- char name[ETH_GSTRING_LEN];
-};
-
enum ocelot_tag_prefix {
OCELOT_TAG_PREFIX_DISABLED = 0,
OCELOT_TAG_PREFIX_NONE,
@@ -538,63 +646,188 @@ enum ocelot_tag_prefix {
};
struct ocelot;
+struct device_node;
struct ocelot_ops {
+ struct net_device *(*port_to_netdev)(struct ocelot *ocelot, int port);
+ int (*netdev_to_port)(struct net_device *dev);
int (*reset)(struct ocelot *ocelot);
u16 (*wm_enc)(u16 value);
+ u16 (*wm_dec)(u16 value);
+ void (*wm_stat)(u32 val, u32 *inuse, u32 *maxuse);
+ void (*psfp_init)(struct ocelot *ocelot);
+ int (*psfp_filter_add)(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f);
+ int (*psfp_filter_del)(struct ocelot *ocelot, struct flow_cls_offload *f);
+ int (*psfp_stats_get)(struct ocelot *ocelot, struct flow_cls_offload *f,
+ struct flow_stats *stats);
+ void (*cut_through_fwd)(struct ocelot *ocelot);
+ void (*tas_clock_adjust)(struct ocelot *ocelot);
+ void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
+ void (*update_stats)(struct ocelot *ocelot);
+};
+
+struct ocelot_vcap_policer {
+ struct list_head pol_list;
+ u16 base;
+ u16 max;
+ u16 base2;
+ u16 max2;
};
struct ocelot_vcap_block {
struct list_head rules;
int count;
- int pol_lpr;
};
+struct ocelot_bridge_vlan {
+ u16 vid;
+ unsigned long portmask;
+ unsigned long untagged;
+ struct list_head list;
+};
+
+enum ocelot_port_tag_config {
+ /* all VLANs are egress-untagged */
+ OCELOT_PORT_TAG_DISABLED = 0,
+ /* all VLANs except the native VLAN and VID 0 are egress-tagged */
+ OCELOT_PORT_TAG_NATIVE = 1,
+ /* all VLANs except VID 0 are egress-tagged */
+ OCELOT_PORT_TAG_TRUNK_NO_VID0 = 2,
+ /* all VLANs are egress-tagged */
+ OCELOT_PORT_TAG_TRUNK = 3,
+};
+
+struct ocelot_psfp_list {
+ struct list_head stream_list;
+ struct list_head sfi_list;
+ struct list_head sgi_list;
+ /* Serialize access to the lists */
+ struct mutex lock;
+};
+
+enum ocelot_sb {
+ OCELOT_SB_BUF,
+ OCELOT_SB_REF,
+ OCELOT_SB_NUM,
+};
+
+enum ocelot_sb_pool {
+ OCELOT_SB_POOL_ING,
+ OCELOT_SB_POOL_EGR,
+ OCELOT_SB_POOL_NUM,
+};
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACv4,
+ ENTRYTYPE_MACv6,
+};
+
+enum ocelot_proto {
+ OCELOT_PROTO_PTP_L2 = BIT(0),
+ OCELOT_PROTO_PTP_L4 = BIT(1),
+};
+
+#define OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION BIT(0)
+#define OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP BIT(1)
+
+struct ocelot_lag_fdb {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ struct net_device *bond;
+ struct list_head list;
+};
+
+struct ocelot_mirror {
+ refcount_t refcount;
+ int to;
+};
+
+struct ocelot_mm_state {
+ enum ethtool_mm_verify_status verify_status;
+ bool tx_enabled;
+ bool tx_active;
+ u8 preemptible_tcs;
+ u8 active_preemptible_tcs;
+};
+
+struct ocelot_port;
+
struct ocelot_port {
struct ocelot *ocelot;
struct regmap *target;
- bool vlan_aware;
+ struct net_device *bond;
+ struct net_device *bridge;
- /* Ingress default VLAN (pvid) */
- u16 pvid;
+ struct ocelot_port *dsa_8021q_cpu;
- /* Egress default VLAN (vid) */
- u16 vid;
+ /* VLAN that untagged frames are classified to, on ingress */
+ const struct ocelot_bridge_vlan *pvid_vlan;
- u8 ptp_cmd;
+ struct tc_taprio_qopt_offload *taprio;
+
+ phy_interface_t phy_mode;
+
+ unsigned int ptp_skbs_in_flight;
struct sk_buff_head tx_skbs;
+
+ unsigned int trap_proto;
+
+ u16 mrp_ring_id;
+
+ u8 ptp_cmd;
u8 ts_id;
- phy_interface_t phy_mode;
+ u8 index;
- u8 *xmit_template;
+ u8 stp_state;
+ bool vlan_aware;
+ bool is_dsa_8021q_cpu;
+ bool learn_ena;
+
+ bool lag_tx_active;
+
+ int bridge_num;
+
+ int speed;
};
struct ocelot {
struct device *dev;
+ struct devlink *devlink;
+ struct devlink_port *devlink_ports;
const struct ocelot_ops *ops;
struct regmap *targets[TARGET_MAX];
struct regmap_field *regfields[REGFIELD_MAX];
const u32 *const *map;
- const struct ocelot_stat_layout *stats_layout;
- unsigned int num_stats;
+ struct list_head stats_regions;
- int shared_queue_sz;
+ u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
+ int packet_buffer_size;
+ int num_frame_refs;
int num_mact_rows;
- struct net_device *hw_bridge_dev;
- u16 bridge_mask;
- u16 bridge_fwd_mask;
-
struct ocelot_port **ports;
u8 base_mac[ETH_ALEN];
- /* Keep track of the vlan port masks */
- u32 vlan_mask[VLAN_N_VID];
+ struct list_head vlans;
+ struct list_head traps;
+ struct list_head lag_fdbs;
+
+ /* Switches like VSC9959 have flooding per traffic class */
+ int num_flooding_pgids;
/* In tables like ANA:PORT and the ANA:PGID:PGID mask,
* the CPU is located after the physical ports (at the
@@ -604,34 +837,55 @@ struct ocelot {
int npi;
- enum ocelot_tag_prefix inj_prefix;
- enum ocelot_tag_prefix xtr_prefix;
+ enum ocelot_tag_prefix npi_inj_prefix;
+ enum ocelot_tag_prefix npi_xtr_prefix;
- u32 *lags;
+ unsigned long bridges;
struct list_head multicast;
+ struct list_head pgids;
- struct ocelot_vcap_block block;
+ struct list_head dummy_rules;
+ struct ocelot_vcap_block block[3];
+ struct ocelot_vcap_policer vcap_pol;
+ struct vcap_props *vcap;
+ struct ocelot_mirror *mirror;
- const struct vcap_field *vcap_is2_keys;
- const struct vcap_field *vcap_is2_actions;
- const struct vcap_props *vcap;
+ struct ocelot_psfp_list psfp;
- /* Workqueue to check statistics for overflow with its lock */
- struct mutex stats_lock;
- u64 *stats;
+ /* Workqueue to check statistics for overflow */
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
+ /* Lock for serializing access to the statistics array */
+ spinlock_t stats_lock;
+ u64 *stats;
+
+ /* Lock for serializing indirect access to STAT_VIEW registers */
+ struct mutex stat_view_lock;
+ /* Lock for serializing access to the MAC table */
+ struct mutex mact_lock;
+ /* Lock for serializing forwarding domain changes, including the
+ * configuration of the Time-Aware Shaper, MAC Merge layer and
+ * cut-through forwarding, on which it depends
+ */
+ struct mutex fwd_domain_lock;
+
+ struct workqueue_struct *owq;
u8 ptp:1;
+ u8 mm_supported:1;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
- struct hwtstamp_config hwtstamp_config;
- /* Protects the PTP interface state */
- struct mutex ptp_lock;
+ unsigned int ptp_skbs_in_flight;
+ /* Protects the 2-step TX timestamp ID logic */
+ spinlock_t ts_id_lock;
/* Protects the PTP clock */
spinlock_t ptp_clock_lock;
struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM];
+
+ struct ocelot_mm_state *mm;
+
+ struct ocelot_fdma *fdma;
};
struct ocelot_policer {
@@ -639,83 +893,181 @@ struct ocelot_policer {
u32 burst; /* bytes */
};
-#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
-#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
-#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
+#define ocelot_bulk_read(ocelot, reg, buf, count) \
+ __ocelot_bulk_read_ix(ocelot, reg, 0, buf, count)
+
+#define ocelot_read_ix(ocelot, reg, gi, ri) \
+ __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_read_gix(ocelot, reg, gi) \
+ __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
+#define ocelot_read_rix(ocelot, reg, ri) \
+ __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
+#define ocelot_read(ocelot, reg) \
+ __ocelot_read_ix(ocelot, reg, 0)
-#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
-#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
+#define ocelot_write_ix(ocelot, val, reg, gi, ri) \
+ __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_write_gix(ocelot, val, reg, gi) \
+ __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
+#define ocelot_write_rix(ocelot, val, reg, ri) \
+ __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
-#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
-#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
+#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) \
+ __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_rmw_gix(ocelot, val, m, reg, gi) \
+ __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
+#define ocelot_rmw_rix(ocelot, val, m, reg, ri) \
+ __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
-#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
-#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
-#define ocelot_fields_write(ocelot, id, reg, val) regmap_fields_write((ocelot)->regfields[(reg)], (id), (val))
-#define ocelot_fields_read(ocelot, id, reg, val) regmap_fields_read((ocelot)->regfields[(reg)], (id), (val))
+#define ocelot_field_write(ocelot, reg, val) \
+ regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) \
+ regmap_field_read((ocelot)->regfields[(reg)], (val))
+#define ocelot_fields_write(ocelot, id, reg, val) \
+ regmap_fields_write((ocelot)->regfields[(reg)], (id), (val))
+#define ocelot_fields_read(ocelot, id, reg, val) \
+ regmap_fields_read((ocelot)->regfields[(reg)], (id), (val))
+
+#define ocelot_target_read_ix(ocelot, target, reg, gi, ri) \
+ __ocelot_target_read_ix(ocelot, target, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_target_read_gix(ocelot, target, reg, gi) \
+ __ocelot_target_read_ix(ocelot, target, reg, reg##_GSZ * (gi))
+#define ocelot_target_read_rix(ocelot, target, reg, ri) \
+ __ocelot_target_read_ix(ocelot, target, reg, reg##_RSZ * (ri))
+#define ocelot_target_read(ocelot, target, reg) \
+ __ocelot_target_read_ix(ocelot, target, reg, 0)
+
+#define ocelot_target_write_ix(ocelot, target, val, reg, gi, ri) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_target_write_gix(ocelot, target, val, reg, gi) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, reg##_GSZ * (gi))
+#define ocelot_target_write_rix(ocelot, target, val, reg, ri) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, reg##_RSZ * (ri))
+#define ocelot_target_write(ocelot, target, val, reg) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, 0)
/* I/O */
-u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
-void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
-u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
-void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
-void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
- u32 offset);
+u32 ocelot_port_readl(struct ocelot_port *port, enum ocelot_reg reg);
+void ocelot_port_writel(struct ocelot_port *port, u32 val, enum ocelot_reg reg);
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask,
+ enum ocelot_reg reg);
+int __ocelot_bulk_read_ix(struct ocelot *ocelot, enum ocelot_reg reg,
+ u32 offset, void *buf, int count);
+u32 __ocelot_read_ix(struct ocelot *ocelot, enum ocelot_reg reg, u32 offset);
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, enum ocelot_reg reg,
+ u32 offset);
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask,
+ enum ocelot_reg reg, u32 offset);
+u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 reg, u32 offset);
+void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 val, u32 reg, u32 offset);
+
+/* Packet I/O */
+bool ocelot_can_inject(struct ocelot *ocelot, int grp);
+void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
+ u32 rew_op, struct sk_buff *skb);
+void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag);
+int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
+void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
+void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
+ u64 timestamp);
/* Hardware initialization */
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
-void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction);
+int ocelot_reset(struct ocelot *ocelot);
int ocelot_init(struct ocelot *ocelot);
void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
+void ocelot_deinit_port(struct ocelot *ocelot, int port);
+
+void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
+void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
+void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, int cpu);
+void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port);
+u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port);
+
+/* Watermark interface */
+u16 ocelot_wm_enc(u16 value);
+u16 ocelot_wm_dec(u16 wm);
+void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse);
/* DSA callbacks */
-void ocelot_port_enable(struct ocelot *ocelot, int port,
- struct phy_device *phy);
-void ocelot_port_disable(struct ocelot *ocelot, int port);
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data);
void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data);
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
+void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
+ struct rtnl_link_stats64 *stats);
+void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
+ struct ethtool_pause_stats *pause_stats);
+void ocelot_port_get_mm_stats(struct ocelot *ocelot, int port,
+ struct ethtool_mm_stats *stats);
+void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_mac_stats *mac_stats);
+void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_phy_stats *phy_stats);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info);
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
-void ocelot_adjust_link(struct ocelot *ocelot, int port,
- struct phy_device *phydev);
-void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware);
+int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
+ struct netlink_ext_ack *extack);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
+u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port);
+int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags val);
+void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags val);
+int ocelot_port_get_default_prio(struct ocelot *ocelot, int port);
+int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio);
+int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp);
+int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio);
+int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio);
int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
- struct net_device *bridge);
-int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
- struct net_device *bridge);
+ struct net_device *bridge, int bridge_num,
+ struct netlink_ext_ack *extack);
+void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge);
+int ocelot_mact_flush(struct ocelot *ocelot, int port);
int ocelot_fdb_dump(struct ocelot *ocelot, int port,
dsa_fdb_dump_cb_t *cb, void *data);
-int ocelot_fdb_add(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid);
-int ocelot_fdb_del(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid);
+int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge);
+int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge);
+int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge);
+int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge);
+int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged, struct netlink_ext_ack *extack);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged);
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid);
int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr);
int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr);
-int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
- struct sk_buff *skb);
+int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
+ struct sk_buff *skb,
+ struct sk_buff **clone);
void ocelot_get_txtstamp(struct ocelot *ocelot);
void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu);
int ocelot_get_max_mtu(struct ocelot *ocelot, int port);
int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol);
int ocelot_port_policer_del(struct ocelot *ocelot, int port);
+int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
+ bool ingress, struct netlink_ext_ack *extack);
+void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress);
int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress);
int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
@@ -723,8 +1075,134 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress);
int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge);
int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge);
+int ocelot_port_lag_join(struct ocelot *ocelot, int port,
+ struct net_device *bond,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
+ struct net_device *bond);
+void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active);
+int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond);
+
+int ocelot_devlink_sb_register(struct ocelot *ocelot);
+void ocelot_devlink_sb_unregister(struct ocelot *ocelot);
+int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
+int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack);
+int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
+int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index);
+int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index);
+int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
+
+int ocelot_port_configure_serdes(struct ocelot *ocelot, int port,
+ struct device_node *portnp);
+
+void ocelot_phylink_mac_config(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state);
+void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ unsigned long quirks);
+void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
+ struct phy_device *phydev,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause,
+ unsigned long quirks);
+
+int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type *type);
+int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type,
+ int sfid, int ssid);
+
+int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask,
+ unsigned long to_mask);
+
+int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol);
+int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix);
+
+void ocelot_mm_irq(struct ocelot *ocelot);
+int ocelot_port_set_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+int ocelot_port_get_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_state *state);
+int ocelot_port_mqprio(struct ocelot *ocelot, int port,
+ struct tc_mqprio_qopt_offload *mqprio);
+
+#if IS_ENABLED(CONFIG_BRIDGE_MRP)
+int ocelot_mrp_add(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp);
+int ocelot_mrp_del(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp);
+int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp);
+#else
+static inline int ocelot_mrp_add(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ocelot_mrp_del(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+void ocelot_pll5_init(struct ocelot *ocelot);
#endif
diff --git a/include/soc/mscc/ocelot_ana.h b/include/soc/mscc/ocelot_ana.h
index 841c6ec22b64..67e0ae05a5ab 100644
--- a/include/soc/mscc/ocelot_ana.h
+++ b/include/soc/mscc/ocelot_ana.h
@@ -227,6 +227,11 @@
#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
+#define SFIDACCESS_CMD_IDLE 0
+#define SFIDACCESS_CMD_READ 1
+#define SFIDACCESS_CMD_WRITE 2
+#define SFIDACCESS_CMD_INIT 3
+
#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
@@ -252,10 +257,15 @@
#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24))
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24)
-#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 21) & GENMASK(24, 21))
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(24, 21)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(24, 21)) >> 21)
+#define ANA_SG_CONFIG_REG_3_IPV_VALID BIT(24)
+#define ANA_SG_CONFIG_REG_3_IPV_INVALID(x) (((x) << 24) & GENMASK(24, 24))
+#define ANA_SG_CONFIG_REG_3_INIT_IPV(x) (((x) << 21) & GENMASK(23, 21))
+#define ANA_SG_CONFIG_REG_3_INIT_IPV_M GENMASK(23, 21)
+#define ANA_SG_CONFIG_REG_3_INIT_IPV_X(x) (((x) & GENMASK(23, 21)) >> 21)
+#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(25)
#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h
index 0c6021f02fee..fcf02baa76b2 100644
--- a/include/soc/mscc/ocelot_dev.h
+++ b/include/soc/mscc/ocelot_dev.h
@@ -93,6 +93,29 @@
#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
+#define DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0)
+#define DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4)
+#define DEV_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8)
+
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0)
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4))
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4)
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4)
+#define DEV_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12))
+#define DEV_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12)
+#define DEV_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12)
+
+#define DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0)
+#define DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4)
+#define DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8))
+#define DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8)
+#define DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12)
+#define DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16)
+#define DEV_MM_STAT_MM_STATUS_MM_RX_FRAME_STATUS BIT(20)
+#define DEV_MM_STAT_MM_STATUS_MM_TX_FRAME_STATUS BIT(24)
+#define DEV_MM_STAT_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28)
+
#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
#define PCS1G_CFG_PCS_ENA BIT(0)
diff --git a/include/soc/mscc/ocelot_ptp.h b/include/soc/mscc/ocelot_ptp.h
index 4a6b2f71b6b2..f085884b1fa2 100644
--- a/include/soc/mscc/ocelot_ptp.h
+++ b/include/soc/mscc/ocelot_ptp.h
@@ -13,6 +13,9 @@
#include <linux/ptp_clock_kernel.h>
#include <soc/mscc/ocelot.h>
+#define OCELOT_MAX_PTP_ID 63
+#define OCELOT_PTP_FIFO_SIZE 128
+
#define PTP_PIN_CFG_RSZ 0x20
#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
@@ -37,8 +40,6 @@ enum {
#define PTP_CFG_MISC_PTP_EN BIT(2)
-#define PSEC_PER_SEC 1000000000000LL
-
#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0)
#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1)
@@ -53,6 +54,7 @@ int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
int ocelot_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
-int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info);
+int ocelot_init_timestamp(struct ocelot *ocelot,
+ const struct ptp_clock_info *info);
int ocelot_deinit_timestamp(struct ocelot *ocelot);
#endif
diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h
index a814bc2017d8..9731895be643 100644
--- a/include/soc/mscc/ocelot_qsys.h
+++ b/include/soc/mscc/ocelot_qsys.h
@@ -71,11 +71,8 @@
#define QSYS_RES_STAT_GSZ 0x8
-#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
-#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
-#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
-#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
-#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
+#define QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(x) ((x) & GENMASK(15, 0))
+#define QSYS_MMGT_EQ_CTRL_FP_FREE_CNT_M GENMASK(15, 0)
#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index 5748373ab4d3..c601a4598b0d 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -6,17 +6,36 @@
#ifndef _OCELOT_VCAP_H_
#define _OCELOT_VCAP_H_
+#include <soc/mscc/ocelot.h>
+
+/* Cookie definitions for private VCAP filters installed by the driver.
+ * Must be unique per VCAP block.
+ */
+#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port))
+#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
+#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
+#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
+#define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2)
+#define OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 1)
+#define OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 2)
+#define OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 3)
+#define OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 4)
+#define OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 5)
+
/* =================================================================
* VCAP Common
* =================================================================
*/
enum {
- /* VCAP_IS1, */
+ VCAP_ES0,
+ VCAP_IS1,
VCAP_IS2,
- /* VCAP_ES0, */
+ __VCAP_COUNT,
};
+#define OCELOT_NUM_VCAP_BLOCKS __VCAP_COUNT
+
struct vcap_props {
u16 tg_width; /* Type-group width (in bits) */
u16 sw_count; /* Sub word count */
@@ -33,6 +52,11 @@ struct vcap_props {
} action_table[2];
u16 counter_words; /* Number of counter words */
u16 counter_width; /* Counter width (in bits) */
+
+ enum ocelot_target target;
+
+ const struct vcap_field *keys;
+ const struct vcap_field *actions;
};
/* VCAP Type-Group values */
@@ -41,6 +65,61 @@ struct vcap_props {
#define VCAP_TG_HALF 2 /* Half entry */
#define VCAP_TG_QUARTER 3 /* Quarter entry */
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CMD(x) (((x) << 22) & GENMASK(24, 22))
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CMD_M GENMASK(24, 22)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CMD_X(x) (((x) & GENMASK(24, 22)) >> 22)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR(x) (((x) << 3) & GENMASK(18, 3))
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR_M GENMASK(18, 3)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR_X(x) (((x) & GENMASK(18, 3)) >> 3)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_CORE_UPDATE_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_CORE_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
+
+#define VCAP_CORE_MV_CFG_MV_NUM_POS(x) (((x) << 16) & GENMASK(31, 16))
+#define VCAP_CORE_MV_CFG_MV_NUM_POS_M GENMASK(31, 16)
+#define VCAP_CORE_MV_CFG_MV_NUM_POS_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define VCAP_CORE_MV_CFG_MV_SIZE(x) ((x) & GENMASK(15, 0))
+#define VCAP_CORE_MV_CFG_MV_SIZE_M GENMASK(15, 0)
+
+#define VCAP_CACHE_ENTRY_DAT_RSZ 0x4
+
+#define VCAP_CACHE_MASK_DAT_RSZ 0x4
+
+#define VCAP_CACHE_ACTION_DAT_RSZ 0x4
+
+#define VCAP_CACHE_CNT_DAT_RSZ 0x4
+
+#define VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+
+#define TCAM_BIST_CTRL_TCAM_BIST BIT(1)
+#define TCAM_BIST_CTRL_TCAM_INIT BIT(0)
+
+#define TCAM_BIST_CFG_TCAM_BIST_SOE_ENA BIT(8)
+#define TCAM_BIST_CFG_TCAM_HCG_DIS BIT(7)
+#define TCAM_BIST_CFG_TCAM_CG_DIS BIT(6)
+#define TCAM_BIST_CFG_TCAM_BIAS(x) ((x) & GENMASK(5, 0))
+#define TCAM_BIST_CFG_TCAM_BIAS_M GENMASK(5, 0)
+
+#define TCAM_BIST_STAT_BIST_RT_ERR BIT(15)
+#define TCAM_BIST_STAT_BIST_PENC_ERR BIT(14)
+#define TCAM_BIST_STAT_BIST_COMP_ERR BIT(13)
+#define TCAM_BIST_STAT_BIST_ADDR_ERR BIT(12)
+#define TCAM_BIST_STAT_BIST_BL1E_ERR BIT(11)
+#define TCAM_BIST_STAT_BIST_BL1_ERR BIT(10)
+#define TCAM_BIST_STAT_BIST_BL0E_ERR BIT(9)
+#define TCAM_BIST_STAT_BIST_BL0_ERR BIT(8)
+#define TCAM_BIST_STAT_BIST_PH1_ERR BIT(7)
+#define TCAM_BIST_STAT_BIST_PH0_ERR BIT(6)
+#define TCAM_BIST_STAT_BIST_PV1_ERR BIT(5)
+#define TCAM_BIST_STAT_BIST_PV0_ERR BIT(4)
+#define TCAM_BIST_STAT_BIST_RUN BIT(3)
+#define TCAM_BIST_STAT_BIST_ERR BIT(2)
+#define TCAM_BIST_STAT_BIST_BUSY BIT(1)
+#define TCAM_BIST_STAT_TCAM_RDY BIT(0)
+
/* =================================================================
* VCAP IS2
* =================================================================
@@ -202,4 +281,451 @@ enum vcap_is2_action_field {
VCAP_IS2_ACT_HIT_CNT,
};
+/* =================================================================
+ * VCAP IS1
+ * =================================================================
+ */
+
+/* IS1 half key types */
+#define IS1_TYPE_S1_NORMAL 0
+#define IS1_TYPE_S1_5TUPLE_IP4 1
+
+/* IS1 full key types */
+#define IS1_TYPE_S1_NORMAL_IP6 0
+#define IS1_TYPE_S1_7TUPLE 1
+#define IS2_TYPE_S1_5TUPLE_IP6 2
+
+enum {
+ IS1_ACTION_TYPE_NORMAL,
+ IS1_ACTION_TYPE_MAX,
+};
+
+enum vcap_is1_half_key_field {
+ VCAP_IS1_HK_TYPE,
+ VCAP_IS1_HK_LOOKUP,
+ VCAP_IS1_HK_IGR_PORT_MASK,
+ VCAP_IS1_HK_RSV,
+ VCAP_IS1_HK_OAM_Y1731,
+ VCAP_IS1_HK_L2_MC,
+ VCAP_IS1_HK_L2_BC,
+ VCAP_IS1_HK_IP_MC,
+ VCAP_IS1_HK_VLAN_TAGGED,
+ VCAP_IS1_HK_VLAN_DBL_TAGGED,
+ VCAP_IS1_HK_TPID,
+ VCAP_IS1_HK_VID,
+ VCAP_IS1_HK_DEI,
+ VCAP_IS1_HK_PCP,
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ VCAP_IS1_HK_L2_SMAC,
+ VCAP_IS1_HK_ETYPE_LEN,
+ VCAP_IS1_HK_ETYPE,
+ VCAP_IS1_HK_IP_SNAP,
+ VCAP_IS1_HK_IP4,
+ VCAP_IS1_HK_L3_FRAGMENT,
+ VCAP_IS1_HK_L3_FRAG_OFS_GT0,
+ VCAP_IS1_HK_L3_OPTIONS,
+ VCAP_IS1_HK_L3_DSCP,
+ VCAP_IS1_HK_L3_IP4_SIP,
+ VCAP_IS1_HK_TCP_UDP,
+ VCAP_IS1_HK_TCP,
+ VCAP_IS1_HK_L4_SPORT,
+ VCAP_IS1_HK_L4_RNG,
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ VCAP_IS1_HK_IP4_INNER_TPID,
+ VCAP_IS1_HK_IP4_INNER_VID,
+ VCAP_IS1_HK_IP4_INNER_DEI,
+ VCAP_IS1_HK_IP4_INNER_PCP,
+ VCAP_IS1_HK_IP4_IP4,
+ VCAP_IS1_HK_IP4_L3_FRAGMENT,
+ VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0,
+ VCAP_IS1_HK_IP4_L3_OPTIONS,
+ VCAP_IS1_HK_IP4_L3_DSCP,
+ VCAP_IS1_HK_IP4_L3_IP4_DIP,
+ VCAP_IS1_HK_IP4_L3_IP4_SIP,
+ VCAP_IS1_HK_IP4_L3_PROTO,
+ VCAP_IS1_HK_IP4_TCP_UDP,
+ VCAP_IS1_HK_IP4_TCP,
+ VCAP_IS1_HK_IP4_L4_RNG,
+ VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE,
+};
+
+enum vcap_is1_action_field {
+ VCAP_IS1_ACT_DSCP_ENA,
+ VCAP_IS1_ACT_DSCP_VAL,
+ VCAP_IS1_ACT_QOS_ENA,
+ VCAP_IS1_ACT_QOS_VAL,
+ VCAP_IS1_ACT_DP_ENA,
+ VCAP_IS1_ACT_DP_VAL,
+ VCAP_IS1_ACT_PAG_OVERRIDE_MASK,
+ VCAP_IS1_ACT_PAG_VAL,
+ VCAP_IS1_ACT_RSV,
+ VCAP_IS1_ACT_VID_REPLACE_ENA,
+ VCAP_IS1_ACT_VID_ADD_VAL,
+ VCAP_IS1_ACT_FID_SEL,
+ VCAP_IS1_ACT_FID_VAL,
+ VCAP_IS1_ACT_PCP_DEI_ENA,
+ VCAP_IS1_ACT_PCP_VAL,
+ VCAP_IS1_ACT_DEI_VAL,
+ VCAP_IS1_ACT_VLAN_POP_CNT_ENA,
+ VCAP_IS1_ACT_VLAN_POP_CNT,
+ VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA,
+ VCAP_IS1_ACT_HIT_STICKY,
+};
+
+/* =================================================================
+ * VCAP ES0
+ * =================================================================
+ */
+
+enum {
+ ES0_ACTION_TYPE_NORMAL,
+ ES0_ACTION_TYPE_MAX,
+};
+
+enum vcap_es0_key_field {
+ VCAP_ES0_EGR_PORT,
+ VCAP_ES0_IGR_PORT,
+ VCAP_ES0_RSV,
+ VCAP_ES0_L2_MC,
+ VCAP_ES0_L2_BC,
+ VCAP_ES0_VID,
+ VCAP_ES0_DP,
+ VCAP_ES0_PCP,
+};
+
+enum vcap_es0_action_field {
+ VCAP_ES0_ACT_PUSH_OUTER_TAG,
+ VCAP_ES0_ACT_PUSH_INNER_TAG,
+ VCAP_ES0_ACT_TAG_A_TPID_SEL,
+ VCAP_ES0_ACT_TAG_A_VID_SEL,
+ VCAP_ES0_ACT_TAG_A_PCP_SEL,
+ VCAP_ES0_ACT_TAG_A_DEI_SEL,
+ VCAP_ES0_ACT_TAG_B_TPID_SEL,
+ VCAP_ES0_ACT_TAG_B_VID_SEL,
+ VCAP_ES0_ACT_TAG_B_PCP_SEL,
+ VCAP_ES0_ACT_TAG_B_DEI_SEL,
+ VCAP_ES0_ACT_VID_A_VAL,
+ VCAP_ES0_ACT_PCP_A_VAL,
+ VCAP_ES0_ACT_DEI_A_VAL,
+ VCAP_ES0_ACT_VID_B_VAL,
+ VCAP_ES0_ACT_PCP_B_VAL,
+ VCAP_ES0_ACT_DEI_B_VAL,
+ VCAP_ES0_ACT_RSV,
+ VCAP_ES0_ACT_HIT_STICKY,
+};
+
+struct ocelot_ipv4 {
+ u8 addr[4];
+};
+
+enum ocelot_vcap_bit {
+ OCELOT_VCAP_BIT_ANY,
+ OCELOT_VCAP_BIT_0,
+ OCELOT_VCAP_BIT_1
+};
+
+struct ocelot_vcap_u8 {
+ u8 value[1];
+ u8 mask[1];
+};
+
+struct ocelot_vcap_u16 {
+ u8 value[2];
+ u8 mask[2];
+};
+
+struct ocelot_vcap_u24 {
+ u8 value[3];
+ u8 mask[3];
+};
+
+struct ocelot_vcap_u32 {
+ u8 value[4];
+ u8 mask[4];
+};
+
+struct ocelot_vcap_u40 {
+ u8 value[5];
+ u8 mask[5];
+};
+
+struct ocelot_vcap_u48 {
+ u8 value[6];
+ u8 mask[6];
+};
+
+struct ocelot_vcap_u64 {
+ u8 value[8];
+ u8 mask[8];
+};
+
+struct ocelot_vcap_u128 {
+ u8 value[16];
+ u8 mask[16];
+};
+
+struct ocelot_vcap_vid {
+ u16 value;
+ u16 mask;
+};
+
+struct ocelot_vcap_ipv4 {
+ struct ocelot_ipv4 value;
+ struct ocelot_ipv4 mask;
+};
+
+struct ocelot_vcap_udp_tcp {
+ u16 value;
+ u16 mask;
+};
+
+struct ocelot_vcap_port {
+ u8 value;
+ u8 mask;
+};
+
+enum ocelot_vcap_key_type {
+ OCELOT_VCAP_KEY_ANY,
+ OCELOT_VCAP_KEY_ETYPE,
+ OCELOT_VCAP_KEY_LLC,
+ OCELOT_VCAP_KEY_SNAP,
+ OCELOT_VCAP_KEY_ARP,
+ OCELOT_VCAP_KEY_IPV4,
+ OCELOT_VCAP_KEY_IPV6
+};
+
+struct ocelot_vcap_key_vlan {
+ struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */
+ struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
+ enum ocelot_vcap_bit dei; /* DEI */
+ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
+};
+
+struct ocelot_vcap_key_etype {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+ struct ocelot_vcap_u16 etype;
+ struct ocelot_vcap_u16 data; /* MAC data */
+};
+
+struct ocelot_vcap_key_llc {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* LLC header: DSAP at byte 0, SSAP at byte 1, Control at byte 2 */
+ struct ocelot_vcap_u32 llc;
+};
+
+struct ocelot_vcap_key_snap {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* SNAP header: Organization Code at byte 0, Type at byte 3 */
+ struct ocelot_vcap_u40 snap;
+};
+
+struct ocelot_vcap_key_arp {
+ struct ocelot_vcap_u48 smac;
+ enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */
+ enum ocelot_vcap_bit req; /* Opcode request/reply */
+ enum ocelot_vcap_bit unknown; /* Opcode unknown */
+ enum ocelot_vcap_bit smac_match; /* Sender MAC matches SMAC */
+ enum ocelot_vcap_bit dmac_match; /* Target MAC matches DMAC */
+
+ /**< Protocol addr. length 4, hardware length 6 */
+ enum ocelot_vcap_bit length;
+
+ enum ocelot_vcap_bit ip; /* Protocol address type IP */
+ enum ocelot_vcap_bit ethernet; /* Hardware address type Ethernet */
+ struct ocelot_vcap_ipv4 sip; /* Sender IP address */
+ struct ocelot_vcap_ipv4 dip; /* Target IP address */
+};
+
+struct ocelot_vcap_key_ipv4 {
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ enum ocelot_vcap_bit fragment; /* Fragment */
+ enum ocelot_vcap_bit options; /* Header options */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u8 proto; /* Protocol */
+ struct ocelot_vcap_ipv4 sip; /* Source IP address */
+ struct ocelot_vcap_ipv4 dip; /* Destination IP address */
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport; /* UDP/TCP: Source port */
+ struct ocelot_vcap_udp_tcp dport; /* UDP/TCP: Destination port */
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+struct ocelot_vcap_key_ipv6 {
+ struct ocelot_vcap_u8 proto; /* IPv6 protocol */
+ struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
+ struct ocelot_vcap_u128 dip; /* IPv6 destination (byte 0-7 ignored) */
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport;
+ struct ocelot_vcap_udp_tcp dport;
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+enum ocelot_mask_mode {
+ OCELOT_MASK_MODE_NONE,
+ OCELOT_MASK_MODE_PERMIT_DENY,
+ OCELOT_MASK_MODE_POLICY,
+ OCELOT_MASK_MODE_REDIRECT,
+};
+
+enum ocelot_es0_vid_sel {
+ OCELOT_ES0_VID_PLUS_CLASSIFIED_VID = 0,
+ OCELOT_ES0_VID = 1,
+};
+
+enum ocelot_es0_pcp_sel {
+ OCELOT_CLASSIFIED_PCP = 0,
+ OCELOT_ES0_PCP = 1,
+};
+
+enum ocelot_es0_tag {
+ OCELOT_NO_ES0_TAG,
+ OCELOT_ES0_TAG,
+ OCELOT_FORCE_PORT_TAG,
+ OCELOT_FORCE_UNTAG,
+};
+
+enum ocelot_tag_tpid_sel {
+ OCELOT_TAG_TPID_SEL_8021Q,
+ OCELOT_TAG_TPID_SEL_8021AD,
+};
+
+struct ocelot_vcap_action {
+ union {
+ /* VCAP ES0 */
+ struct {
+ enum ocelot_es0_tag push_outer_tag;
+ enum ocelot_es0_tag push_inner_tag;
+ enum ocelot_tag_tpid_sel tag_a_tpid_sel;
+ int tag_a_vid_sel;
+ int tag_a_pcp_sel;
+ u16 vid_a_val;
+ u8 pcp_a_val;
+ u8 dei_a_val;
+ enum ocelot_tag_tpid_sel tag_b_tpid_sel;
+ int tag_b_vid_sel;
+ int tag_b_pcp_sel;
+ u16 vid_b_val;
+ u8 pcp_b_val;
+ u8 dei_b_val;
+ };
+
+ /* VCAP IS1 */
+ struct {
+ bool vid_replace_ena;
+ u16 vid;
+ bool vlan_pop_cnt_ena;
+ int vlan_pop_cnt;
+ bool pcp_dei_ena;
+ u8 pcp;
+ u8 dei;
+ bool qos_ena;
+ u8 qos_val;
+ u8 pag_override_mask;
+ u8 pag_val;
+ };
+
+ /* VCAP IS2 */
+ struct {
+ bool cpu_copy_ena;
+ u8 cpu_qu_num;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ bool police_ena;
+ bool mirror_ena;
+ struct ocelot_policer pol;
+ u32 pol_ix;
+ };
+ };
+};
+
+struct ocelot_vcap_stats {
+ u64 bytes;
+ u64 pkts;
+ u64 used;
+};
+
+enum ocelot_vcap_filter_type {
+ OCELOT_VCAP_FILTER_DUMMY,
+ OCELOT_VCAP_FILTER_PAG,
+ OCELOT_VCAP_FILTER_OFFLOAD,
+ OCELOT_PSFP_FILTER_OFFLOAD,
+};
+
+struct ocelot_vcap_id {
+ unsigned long cookie;
+ bool tc_offload;
+};
+
+struct ocelot_vcap_filter {
+ struct list_head list;
+
+ enum ocelot_vcap_filter_type type;
+ int block_id;
+ int goto_target;
+ int lookup;
+ u8 pag;
+ u16 prio;
+ struct ocelot_vcap_id id;
+
+ struct ocelot_vcap_action action;
+ struct ocelot_vcap_stats stats;
+ /* For VCAP IS1 and IS2 */
+ bool take_ts;
+ bool is_trap;
+ unsigned long ingress_port_mask;
+ /* For VCAP ES0 */
+ struct ocelot_vcap_port ingress_port;
+ /* For VCAP IS2 mirrors and ES0 */
+ struct ocelot_vcap_port egress_port;
+
+ enum ocelot_vcap_bit dmac_mc;
+ enum ocelot_vcap_bit dmac_bc;
+ struct ocelot_vcap_key_vlan vlan;
+
+ enum ocelot_vcap_key_type key_type;
+ union {
+ /* OCELOT_VCAP_KEY_ANY: No specific fields */
+ struct ocelot_vcap_key_etype etype;
+ struct ocelot_vcap_key_llc llc;
+ struct ocelot_vcap_key_snap snap;
+ struct ocelot_vcap_key_arp arp;
+ struct ocelot_vcap_key_ipv4 ipv4;
+ struct ocelot_vcap_key_ipv6 ipv6;
+ } key;
+};
+
+int ocelot_vcap_filter_add(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *rule,
+ struct netlink_ext_ack *extack);
+int ocelot_vcap_filter_del(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *rule);
+int ocelot_vcap_filter_replace(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter);
+struct ocelot_vcap_filter *
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
+ unsigned long cookie, bool tc_offload);
+
#endif /* _OCELOT_VCAP_H_ */
diff --git a/include/soc/mscc/vsc7514_regs.h b/include/soc/mscc/vsc7514_regs.h
new file mode 100644
index 000000000000..ffe343a9c04b
--- /dev/null
+++ b/include/soc/mscc/vsc7514_regs.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2021 Innovative Advantage Inc.
+ */
+
+#ifndef VSC7514_REGS_H
+#define VSC7514_REGS_H
+
+#include <soc/mscc/ocelot_vcap.h>
+
+extern struct vcap_props vsc7514_vcap_props[];
+
+extern const struct reg_field vsc7514_regfields[REGFIELD_MAX];
+
+extern const u32 *vsc7514_regmap[TARGET_MAX];
+
+#endif
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
deleted file mode 100644
index 9b1d43d671a3..000000000000
--- a/include/soc/nps/common.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_COMMON_H
-#define SOC_NPS_COMMON_H
-
-#ifdef CONFIG_SMP
-#define NPS_IPI_IRQ 5
-#endif
-
-#define NPS_HOST_REG_BASE 0xF6000000
-
-#define NPS_MSU_BLKID 0x018
-
-#define CTOP_INST_RSPI_GIC_0_R12 0x3C56117E
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
-
-#ifndef __ASSEMBLY__
-
-/* In order to increase compilation test coverage */
-#ifdef CONFIG_ARC
-static inline void nps_ack_gic(void)
-{
- __asm__ __volatile__ (
- " .word %0\n"
- :
- : "i"(CTOP_INST_RSPI_GIC_0_R12)
- : "memory");
-}
-#else
-static inline void nps_ack_gic(void) { }
-#define write_aux_reg(r, v)
-#define read_aux_reg(r) 0
-#endif
-
-/* CPU global ID */
-struct global_id {
- union {
- struct {
-#ifdef CONFIG_EZNPS_MTM_EXT
- u32 __reserved:20, cluster:4, core:4, thread:4;
-#else
- u32 __reserved:24, cluster:4, core:4;
-#endif
- };
- u32 value;
- };
-};
-
-/*
- * Convert logical to physical CPU IDs
- *
- * The conversion swap bits 1 and 2 of cluster id (out of 4 bits)
- * Now quad of logical clusters id's are adjacent physically,
- * and not like the id's physically came with each cluster.
- * Below table is 4x4 mesh of core clusters as it layout on chip.
- * Cluster ids are in format: logical (physical)
- *
- * ----------------- ------------------
- * 3 | 5 (3) 7 (7) | | 13 (11) 15 (15)|
- *
- * 2 | 4 (2) 6 (6) | | 12 (10) 14 (14)|
- * ----------------- ------------------
- * 1 | 1 (1) 3 (5) | | 9 (9) 11 (13)|
- *
- * 0 | 0 (0) 2 (4) | | 8 (8) 10 (12)|
- * ----------------- ------------------
- * 0 1 2 3
- */
-static inline int nps_cluster_logic_to_phys(int cluster)
-{
-#ifdef __arc__
- __asm__ __volatile__(
- " mov r3,%0\n"
- " .short %1\n"
- " .word %2\n"
- " mov %0,r3\n"
- : "+r"(cluster)
- : "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST),
- "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM)
- : "r3");
-#endif
-
- return cluster;
-}
-
-#define NPS_CPU_TO_CLUSTER_NUM(cpu) \
- ({ struct global_id gid; gid.value = cpu; \
- nps_cluster_logic_to_phys(gid.cluster); })
-
-struct nps_host_reg_address {
- union {
- struct {
- u32 base:8, cl_x:4, cl_y:4,
- blkid:6, reg:8, __reserved:2;
- };
- u32 value;
- };
-};
-
-struct nps_host_reg_address_non_cl {
- union {
- struct {
- u32 base:7, blkid:11, reg:12, __reserved:2;
- };
- u32 value;
- };
-};
-
-static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg)
-{
- struct nps_host_reg_address_non_cl reg_address;
-
- reg_address.value = NPS_HOST_REG_BASE;
- reg_address.blkid = blkid;
- reg_address.reg = reg;
-
- return (void *)reg_address.value;
-}
-
-static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg)
-{
- struct nps_host_reg_address reg_address;
- u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
-
- reg_address.value = NPS_HOST_REG_BASE;
- reg_address.cl_x = (cl >> 2) & 0x3;
- reg_address.cl_y = cl & 0x3;
- reg_address.blkid = blkid;
- reg_address.reg = reg;
-
- return (void *)reg_address.value;
-}
-#endif /* __ASSEMBLY__ */
-
-#endif /* SOC_NPS_COMMON_H */
diff --git a/include/soc/nps/mtm.h b/include/soc/nps/mtm.h
deleted file mode 100644
index d2f5e7e3703e..000000000000
--- a/include/soc/nps/mtm.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_MTM_H
-#define SOC_NPS_MTM_H
-
-#define CTOP_INST_HWSCHD_OFF_R3 0x3B6F00BF
-#define CTOP_INST_HWSCHD_RESTORE_R3 0x3E6F70C3
-
-static inline void hw_schd_save(unsigned int *flags)
-{
- __asm__ __volatile__(
- " .word %1\n"
- " st r3,[%0]\n"
- :
- : "r"(flags), "i"(CTOP_INST_HWSCHD_OFF_R3)
- : "r3", "memory");
-}
-
-static inline void hw_schd_restore(unsigned int flags)
-{
- __asm__ __volatile__(
- " mov r3, %0\n"
- " .word %1\n"
- :
- : "r"(flags), "i"(CTOP_INST_HWSCHD_RESTORE_R3)
- : "r3");
-}
-
-#endif /* SOC_NPS_MTM_H */
diff --git a/include/soc/qcom/ice.h b/include/soc/qcom/ice.h
new file mode 100644
index 000000000000..5870a94599a2
--- /dev/null
+++ b/include/soc/qcom/ice.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __QCOM_ICE_H__
+#define __QCOM_ICE_H__
+
+#include <linux/types.h>
+
+struct qcom_ice;
+
+enum qcom_ice_crypto_key_size {
+ QCOM_ICE_CRYPTO_KEY_SIZE_INVALID = 0x0,
+ QCOM_ICE_CRYPTO_KEY_SIZE_128 = 0x1,
+ QCOM_ICE_CRYPTO_KEY_SIZE_192 = 0x2,
+ QCOM_ICE_CRYPTO_KEY_SIZE_256 = 0x3,
+ QCOM_ICE_CRYPTO_KEY_SIZE_512 = 0x4,
+};
+
+enum qcom_ice_crypto_alg {
+ QCOM_ICE_CRYPTO_ALG_AES_XTS = 0x0,
+ QCOM_ICE_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1,
+ QCOM_ICE_CRYPTO_ALG_AES_ECB = 0x2,
+ QCOM_ICE_CRYPTO_ALG_ESSIV_AES_CBC = 0x3,
+};
+
+int qcom_ice_enable(struct qcom_ice *ice);
+int qcom_ice_resume(struct qcom_ice *ice);
+int qcom_ice_suspend(struct qcom_ice *ice);
+int qcom_ice_program_key(struct qcom_ice *ice,
+ u8 algorithm_id, u8 key_size,
+ const u8 crypto_key[], u8 data_unit_size,
+ int slot);
+int qcom_ice_evict_key(struct qcom_ice *ice, int slot);
+struct qcom_ice *of_qcom_ice_get(struct device *dev);
+#endif /* __QCOM_ICE_H__ */
diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h
new file mode 100644
index 000000000000..a62d500a6fda
--- /dev/null
+++ b/include/soc/qcom/qcom-spmi-pmic.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Linaro. All rights reserved.
+ * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ */
+
+#ifndef __QCOM_SPMI_PMIC_H__
+#define __QCOM_SPMI_PMIC_H__
+
+#include <linux/device.h>
+
+#define COMMON_SUBTYPE 0x00
+#define PM8941_SUBTYPE 0x01
+#define PM8841_SUBTYPE 0x02
+#define PM8019_SUBTYPE 0x03
+#define PM8226_SUBTYPE 0x04
+#define PM8110_SUBTYPE 0x05
+#define PMA8084_SUBTYPE 0x06
+#define PMI8962_SUBTYPE 0x07
+#define PMD9635_SUBTYPE 0x08
+#define PM8994_SUBTYPE 0x09
+#define PMI8994_SUBTYPE 0x0a
+#define PM8916_SUBTYPE 0x0b
+#define PM8004_SUBTYPE 0x0c
+#define PM8909_SUBTYPE 0x0d
+#define PM8028_SUBTYPE 0x0e
+#define PM8901_SUBTYPE 0x0f
+#define PM8950_SUBTYPE 0x10
+#define PMI8950_SUBTYPE 0x11
+#define PMK8001_SUBTYPE 0x12
+#define PMI8996_SUBTYPE 0x13
+#define PM8998_SUBTYPE 0x14
+#define PMI8998_SUBTYPE 0x15
+#define PM8005_SUBTYPE 0x18
+#define PM8937_SUBTYPE 0x19
+#define PM660L_SUBTYPE 0x1a
+#define PM660_SUBTYPE 0x1b
+#define PM8150_SUBTYPE 0x1e
+#define PM8150L_SUBTYPE 0x1f
+#define PM8150B_SUBTYPE 0x20
+#define PMK8002_SUBTYPE 0x21
+#define PM8009_SUBTYPE 0x24
+#define PMI632_SUBTYPE 0x25
+#define PM8150C_SUBTYPE 0x26
+#define PM6150_SUBTYPE 0x28
+#define SMB2351_SUBTYPE 0x29
+#define PM8008_SUBTYPE 0x2c
+#define PM6125_SUBTYPE 0x2d
+#define PM7250B_SUBTYPE 0x2e
+#define PMK8350_SUBTYPE 0x2f
+#define PMR735B_SUBTYPE 0x34
+#define PM6350_SUBTYPE 0x36
+#define PM4125_SUBTYPE 0x37
+
+#define PMI8998_FAB_ID_SMIC 0x11
+#define PMI8998_FAB_ID_GF 0x30
+
+#define PM660_FAB_ID_GF 0x0
+#define PM660_FAB_ID_TSMC 0x2
+#define PM660_FAB_ID_MX 0x3
+
+struct qcom_spmi_pmic {
+ unsigned int type;
+ unsigned int subtype;
+ unsigned int major;
+ unsigned int minor;
+ unsigned int rev2;
+ unsigned int fab_id;
+ const char *name;
+};
+
+const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev);
+
+#endif /* __QCOM_SPMI_PMIC_H__ */
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
new file mode 100644
index 000000000000..5b263c685812
--- /dev/null
+++ b/include/soc/qcom/spm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ */
+
+#ifndef __SPM_H__
+#define __SPM_H__
+
+enum pm_sleep_mode {
+ PM_SLEEP_MODE_STBY,
+ PM_SLEEP_MODE_RET,
+ PM_SLEEP_MODE_SPC,
+ PM_SLEEP_MODE_PC,
+ PM_SLEEP_MODE_NR,
+};
+
+struct spm_driver_data;
+void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode);
+
+#endif /* __SPM_H__ */
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
index 7a2a055ba6b0..3acca067c72b 100644
--- a/include/soc/qcom/tcs.h
+++ b/include/soc/qcom/tcs.h
@@ -30,7 +30,13 @@ enum rpmh_state {
*
* @addr: the address of the resource slv_id:18:16 | offset:0:15
* @data: the resource state request
- * @wait: wait for this request to be complete before sending the next
+ * @wait: ensure that this command is complete before returning.
+ * Setting "wait" here only makes sense during rpmh_write_batch() for
+ * active-only transfers, this is because:
+ * rpmh_write() - Always waits.
+ * (DEFINE_RPMH_MSG_ONSTACK will set .wait_for_compl)
+ * rpmh_write_async() - Never waits.
+ * (There's no request completion callback)
*/
struct tcs_cmd {
u32 addr;
@@ -43,6 +49,7 @@ struct tcs_cmd {
*
* @state: state for the request.
* @wait_for_compl: wait until we get a response from the h/w accelerator
+ * (same as setting cmd->wait for all commands in the request)
* @num_cmds: the number of @cmds in this request
* @cmds: an array of tcs_cmds
*/
diff --git a/include/soc/rockchip/pm_domains.h b/include/soc/rockchip/pm_domains.h
new file mode 100644
index 000000000000..7dbd941fc937
--- /dev/null
+++ b/include/soc/rockchip/pm_domains.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2022, The Chromium OS Authors. All rights reserved.
+ */
+
+#ifndef __SOC_ROCKCHIP_PM_DOMAINS_H__
+#define __SOC_ROCKCHIP_PM_DOMAINS_H__
+
+#ifdef CONFIG_ROCKCHIP_PM_DOMAINS
+
+int rockchip_pmu_block(void);
+void rockchip_pmu_unblock(void);
+
+#else /* CONFIG_ROCKCHIP_PM_DOMAINS */
+
+static inline int rockchip_pmu_block(void)
+{
+ return 0;
+}
+
+static inline void rockchip_pmu_unblock(void) { }
+
+#endif /* CONFIG_ROCKCHIP_PM_DOMAINS */
+
+#endif /* __SOC_ROCKCHIP_PM_DOMAINS_H__ */
diff --git a/include/soc/rockchip/rk3399_grf.h b/include/soc/rockchip/rk3399_grf.h
index 3eebabcb2812..39cd44cec982 100644
--- a/include/soc/rockchip/rk3399_grf.h
+++ b/include/soc/rockchip/rk3399_grf.h
@@ -11,11 +11,8 @@
/* PMU GRF Registers */
#define RK3399_PMUGRF_OS_REG2 0x308
-#define RK3399_PMUGRF_DDRTYPE_SHIFT 13
-#define RK3399_PMUGRF_DDRTYPE_MASK 7
-#define RK3399_PMUGRF_DDRTYPE_DDR3 3
-#define RK3399_PMUGRF_DDRTYPE_LPDDR2 5
-#define RK3399_PMUGRF_DDRTYPE_LPDDR3 6
-#define RK3399_PMUGRF_DDRTYPE_LPDDR4 7
+#define RK3399_PMUGRF_OS_REG2_DDRTYPE GENMASK(15, 13)
+#define RK3399_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2)
+#define RK3399_PMUGRF_OS_REG2_BW_CH1 GENMASK(19, 18)
#endif
diff --git a/include/soc/rockchip/rk3568_grf.h b/include/soc/rockchip/rk3568_grf.h
new file mode 100644
index 000000000000..52853efd6720
--- /dev/null
+++ b/include/soc/rockchip/rk3568_grf.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __SOC_RK3568_GRF_H
+#define __SOC_RK3568_GRF_H
+
+#define RK3568_PMUGRF_OS_REG2 0x208
+#define RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO GENMASK(15, 13)
+#define RK3568_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2)
+
+#define RK3568_PMUGRF_OS_REG3 0x20c
+#define RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12)
+#define RK3568_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28)
+
+#endif /* __SOC_RK3568_GRF_H */
diff --git a/include/soc/rockchip/rk3588_grf.h b/include/soc/rockchip/rk3588_grf.h
new file mode 100644
index 000000000000..630b35a55064
--- /dev/null
+++ b/include/soc/rockchip/rk3588_grf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __SOC_RK3588_GRF_H
+#define __SOC_RK3588_GRF_H
+
+#define RK3588_PMUGRF_OS_REG2 0x208
+#define RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO GENMASK(15, 13)
+#define RK3588_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2)
+#define RK3588_PMUGRF_OS_REG2_BW_CH1 GENMASK(19, 18)
+#define RK3588_PMUGRF_OS_REG2_CH_INFO GENMASK(29, 28)
+
+#define RK3588_PMUGRF_OS_REG3 0x20c
+#define RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12)
+#define RK3588_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28)
+
+#define RK3588_PMUGRF_OS_REG4 0x210
+#define RK3588_PMUGRF_OS_REG5 0x214
+
+#endif /* __SOC_RK3588_GRF_H */
diff --git a/include/soc/rockchip/rockchip_grf.h b/include/soc/rockchip/rockchip_grf.h
new file mode 100644
index 000000000000..e46fd72aea8d
--- /dev/null
+++ b/include/soc/rockchip/rockchip_grf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Rockchip General Register Files definitions
+ */
+
+#ifndef __SOC_ROCKCHIP_GRF_H
+#define __SOC_ROCKCHIP_GRF_H
+
+/* Rockchip DDRTYPE defines */
+enum {
+ ROCKCHIP_DDRTYPE_DDR3 = 3,
+ ROCKCHIP_DDRTYPE_LPDDR2 = 5,
+ ROCKCHIP_DDRTYPE_LPDDR3 = 6,
+ ROCKCHIP_DDRTYPE_LPDDR4 = 7,
+ ROCKCHIP_DDRTYPE_LPDDR4X = 8,
+};
+
+#endif /* __SOC_ROCKCHIP_GRF_H */
diff --git a/include/soc/sifive/sifive_ccache.h b/include/soc/sifive/sifive_ccache.h
new file mode 100644
index 000000000000..4d4ed49388a0
--- /dev/null
+++ b/include/soc/sifive/sifive_ccache.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SiFive Composable Cache Controller header file
+ *
+ */
+
+#ifndef __SOC_SIFIVE_CCACHE_H
+#define __SOC_SIFIVE_CCACHE_H
+
+extern int register_sifive_ccache_error_notifier(struct notifier_block *nb);
+extern int unregister_sifive_ccache_error_notifier(struct notifier_block *nb);
+
+#define SIFIVE_CCACHE_ERR_TYPE_CE 0
+#define SIFIVE_CCACHE_ERR_TYPE_UE 1
+
+#endif /* __SOC_SIFIVE_CCACHE_H */
diff --git a/include/soc/sifive/sifive_l2_cache.h b/include/soc/sifive/sifive_l2_cache.h
deleted file mode 100644
index 92ade10ed67e..000000000000
--- a/include/soc/sifive/sifive_l2_cache.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * SiFive L2 Cache Controller header file
- *
- */
-
-#ifndef __SOC_SIFIVE_L2_CACHE_H
-#define __SOC_SIFIVE_L2_CACHE_H
-
-extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
-extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
-
-#define SIFIVE_L2_ERR_TYPE_CE 0
-#define SIFIVE_L2_ERR_TYPE_UE 1
-
-#endif /* __SOC_SIFIVE_L2_CACHE_H */
diff --git a/include/soc/starfive/reset-starfive-jh71x0.h b/include/soc/starfive/reset-starfive-jh71x0.h
new file mode 100644
index 000000000000..47b486ececc5
--- /dev/null
+++ b/include/soc/starfive/reset-starfive-jh71x0.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_STARFIVE_RESET_JH71X0_H
+#define __SOC_STARFIVE_RESET_JH71X0_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/compiler_types.h>
+#include <linux/container_of.h>
+
+struct jh71x0_reset_adev {
+ void __iomem *base;
+ struct auxiliary_device adev;
+};
+
+#define to_jh71x0_reset_adev(_adev) \
+ container_of((_adev), struct jh71x0_reset_adev, adev)
+
+#endif
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index bff99f23860c..6b995a8f0f6d 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef ABI_BPMP_ABI_H
@@ -74,6 +74,32 @@
/**
* @ingroup MRQ_Format
+ * Request an answer from the peer.
+ * This should be set in mrq_request::flags for all requests targetted
+ * at BPMP. For requests originating in BPMP, this flag is optional except
+ * for messages targeting MCE, for which the field must be set.
+ * When this flag is not set, the remote peer must not send a response
+ * back.
+ */
+#define BPMP_MAIL_DO_ACK (1U << 0U)
+
+/**
+ * @ingroup MRQ_Format
+ * Ring the sender's doorbell when responding. This should be set unless
+ * the sender wants to poll the underlying communications layer directly.
+ *
+ * An optional direction that can be specified in mrq_request::flags.
+ */
+#define BPMP_MAIL_RING_DB (1U << 1U)
+
+/**
+ * @ingroup MRQ_Format
+ * CRC present
+ */
+#define BPMP_MAIL_CRC_PRESENT (1U << 2U)
+
+/**
+ * @ingroup MRQ_Format
* @brief Header for an MRQ message
*
* Provides the MRQ number for the MRQ message: #mrq. The remainder of
@@ -85,12 +111,139 @@ struct mrq_request {
uint32_t mrq;
/**
- * @brief Flags providing follow up directions to the receiver
+ * @brief 32bit word containing a number of fields as follows:
+ *
+ * struct {
+ * uint8_t options:4;
+ * uint8_t xid:4;
+ * uint8_t payload_length;
+ * uint16_t crc16;
+ * };
+ *
+ * **options** directions to the receiver and indicates CRC presence.
+ *
+ * #BPMP_MAIL_DO_ACK and #BPMP_MAIL_RING_DB see documentation of respective options.
+ * #BPMP_MAIL_CRC_PRESENT is supported on T234 and later platforms. It indicates the
+ * crc16, xid and length fields are present when set.
+ * Some platform configurations, especially when targeted to applications requiring
+ * functional safety, mandate this option being set or otherwise will respond with
+ * -BPMP_EBADMSG and ignore the request.
+ *
+ * **xid** is a transaction ID.
+ *
+ * Only used when #BPMP_MAIL_CRC_PRESENT is set.
+ *
+ * **payload_length** of the message expressed in bytes without the size of this header.
+ * See table below for minimum accepted payload lengths for each MRQ.
+ * Note: For DMCE communication, this field expresses the length as a multiple of 4 bytes
+ * rather than bytes.
+ *
+ * Only used when #BPMP_MAIL_CRC_PRESENT is set.
+ *
+ * | MRQ | CMD | minimum payload length
+ * | -------------------- | ------------------------------------ | ------------------------------------------ |
+ * | MRQ_PING | | 4 |
+ * | MRQ_THREADED_PING | | 4 |
+ * | MRQ_RESET | any | 8 |
+ * | MRQ_I2C | | 12 + cmd_i2c_xfer_request.data_size |
+ * | MRQ_CLK | CMD_CLK_GET_RATE | 4 |
+ * | MRQ_CLK | CMD_CLK_SET_RATE | 16 |
+ * | MRQ_CLK | CMD_CLK_ROUND_RATE | 16 |
+ * | MRQ_CLK | CMD_CLK_GET_PARENT | 4 |
+ * | MRQ_CLK | CMD_CLK_SET_PARENT | 8 |
+ * | MRQ_CLK | CMD_CLK_ENABLE | 4 |
+ * | MRQ_CLK | CMD_CLK_DISABLE | 4 |
+ * | MRQ_CLK | CMD_CLK_IS_ENABLED | 4 |
+ * | MRQ_CLK | CMD_CLK_GET_ALL_INFO | 4 |
+ * | MRQ_CLK | CMD_CLK_GET_MAX_CLK_ID | 4 |
+ * | MRQ_CLK | CMD_CLK_GET_FMAX_AT_VMIN | 4 |
+ * | MRQ_QUERY_ABI | | 4 |
+ * | MRQ_PG | CMD_PG_QUERY_ABI | 12 |
+ * | MRQ_PG | CMD_PG_SET_STATE | 12 |
+ * | MRQ_PG | CMD_PG_GET_STATE | 8 |
+ * | MRQ_PG | CMD_PG_GET_NAME | 8 |
+ * | MRQ_PG | CMD_PG_GET_MAX_ID | 8 |
+ * | MRQ_THERMAL | CMD_THERMAL_QUERY_ABI | 8 |
+ * | MRQ_THERMAL | CMD_THERMAL_GET_TEMP | 8 |
+ * | MRQ_THERMAL | CMD_THERMAL_SET_TRIP | 20 |
+ * | MRQ_THERMAL | CMD_THERMAL_GET_NUM_ZONES | 4 |
+ * | MRQ_THERMAL | CMD_THERMAL_GET_THERMTRIP | 8 |
+ * | MRQ_CPU_VHINT | | 8 |
+ * | MRQ_ABI_RATCHET | | 2 |
+ * | MRQ_EMC_DVFS_LATENCY | | 8 |
+ * | MRQ_EMC_DVFS_EMCHUB | | 8 |
+ * | MRQ_EMC_DISP_RFL | | 4 |
+ * | MRQ_BWMGR | CMD_BWMGR_QUERY_ABI | 8 |
+ * | MRQ_BWMGR | CMD_BWMGR_CALC_RATE | 8 + 8 * bwmgr_rate_req.num_iso_clients |
+ * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_QUERY_ABI | 8 |
+ * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_CALCULATE_LA | 16 |
+ * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_SET_LA | 16 |
+ * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_GET_MAX_BW | 8 |
+ * | MRQ_CPU_NDIV_LIMITS | | 4 |
+ * | MRQ_CPU_AUTO_CC3 | | 4 |
+ * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_QUERY_ABI | 8 |
+ * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_READ | 5 |
+ * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_WRITE | 5 + cmd_ringbuf_console_write_req.len |
+ * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_GET_FIFO | 4 |
+ * | MRQ_STRAP | STRAP_SET | 12 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | 24 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_LANE_MARGIN_STATUS | 4 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT | 5 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_CONTROLLER_STATE | 6 |
+ * | MRQ_UPHY | CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF | 5 |
+ * | MRQ_FMON | CMD_FMON_GEAR_CLAMP | 16 |
+ * | MRQ_FMON | CMD_FMON_GEAR_FREE | 4 |
+ * | MRQ_FMON | CMD_FMON_GEAR_GET | 4 |
+ * | MRQ_FMON | CMD_FMON_FAULT_STS_GET | 8 |
+ * | MRQ_EC | CMD_EC_STATUS_EX_GET | 12 |
+ * | MRQ_QUERY_FW_TAG | | 0 |
+ * | MRQ_DEBUG | CMD_DEBUG_OPEN_RO | 4 + length of cmd_debug_fopen_request.name |
+ * | MRQ_DEBUG | CMD_DEBUG_OPEN_WO | 4 + length of cmd_debug_fopen_request.name |
+ * | MRQ_DEBUG | CMD_DEBUG_READ | 8 |
+ * | MRQ_DEBUG | CMD_DEBUG_WRITE | 12 + cmd_debug_fwrite_request.datalen |
+ * | MRQ_DEBUG | CMD_DEBUG_CLOSE | 8 |
+ * | MRQ_TELEMETRY | | 8 |
+ * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_QUERY_ABI | 8 |
+ * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_SET | 20 |
+ * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_GET | 16 |
+ * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_CURR_CAP | 8 |
+ * | MRQ_GEARS | | 0 |
+ * | MRQ_BWMGR_INT | CMD_BWMGR_INT_QUERY_ABI | 8 |
+ * | MRQ_BWMGR_INT | CMD_BWMGR_INT_CALC_AND_SET | 16 |
+ * | MRQ_BWMGR_INT | CMD_BWMGR_INT_CAP_SET | 8 |
+ * | MRQ_OC_STATUS | | 0 |
+ *
+ * **crc16**
+ *
+ * CRC16 using polynomial x^16 + x^14 + x^12 + x^11 + x^8 + x^5 + x^4 + x^2 + 1
+ * and initialization value 0x4657. The CRC is calculated over all bytes of the message
+ * including this header. However the crc16 field is considered to be set to 0 when
+ * calculating the CRC. Only used when #BPMP_MAIL_CRC_PRESENT is set. If
+ * #BPMP_MAIL_CRC_PRESENT is set and this field does not match the CRC as
+ * calculated by BPMP, -BPMP_EBADMSG will be returned and the request will
+ * be ignored. See code snippet below on how to calculate the CRC.
*
- * | Bit | Description |
- * |-----|--------------------------------------------|
- * | 1 | ring the sender's doorbell when responding |
- * | 0 | should be 1 |
+ * @code
+ * uint16_t calc_crc_digest(uint16_t crc, uint8_t *data, size_t size)
+ * {
+ * for (size_t i = 0; i < size; i++) {
+ * crc ^= data[i] << 8;
+ * for (size_t j = 0; j < 8; j++) {
+ * if ((crc & 0x8000) == 0x8000) {
+ * crc = (crc << 1) ^ 0xAC9A;
+ * } else {
+ * crc = (crc << 1);
+ * }
+ * }
+ * }
+ * return crc;
+ * }
+ *
+ * uint16_t calc_crc(uint8_t *data, size_t size)
+ * {
+ * return calc_crc_digest(0x4657, data, size);
+ * }
+ * @endcode
*/
uint32_t flags;
} BPMP_ABI_PACKED;
@@ -107,7 +260,35 @@ struct mrq_request {
struct mrq_response {
/** @brief Error code for the MRQ request itself */
int32_t err;
- /** @brief Reserved for future use */
+
+ /**
+ * @brief 32bit word containing a number of fields as follows:
+ *
+ * struct {
+ * uint8_t options:4;
+ * uint8_t xid:4;
+ * uint8_t payload_length;
+ * uint16_t crc16;
+ * };
+ *
+ * **options** indicates CRC presence.
+ *
+ * #BPMP_MAIL_CRC_PRESENT is supported on T234 and later platforms and
+ * indicates the crc16 related fields are present when set.
+ *
+ * **xid** is the transaction ID as sent by the requestor.
+ *
+ * **length** of the message expressed in bytes without the size of this header.
+ * Note: For DMCE communication, this field expresses the length as a multiple of 4 bytes
+ * rather than bytes.
+ *
+ * **crc16**
+ *
+ * CRC16 using polynomial x^16 + x^14 + x^12 + x^11 + x^8 + x^5 + x^4 + x^2 + 1
+ * and initialization value 0x4657. The CRC is calculated over all bytes of the message
+ * including this header. However the crc16 field is considered to be set to 0 when
+ * calculating the CRC. Only used when #BPMP_MAIL_CRC_PRESENT is set.
+ */
uint32_t flags;
} BPMP_ABI_PACKED;
@@ -131,24 +312,16 @@ struct mrq_response {
#define MRQ_PING 0U
#define MRQ_QUERY_TAG 1U
-#define MRQ_MODULE_LOAD 4U
-#define MRQ_MODULE_UNLOAD 5U
-#define MRQ_TRACE_MODIFY 7U
-#define MRQ_WRITE_TRACE 8U
#define MRQ_THREADED_PING 9U
-#define MRQ_MODULE_MAIL 11U
#define MRQ_DEBUGFS 19U
#define MRQ_RESET 20U
#define MRQ_I2C 21U
#define MRQ_CLK 22U
#define MRQ_QUERY_ABI 23U
-#define MRQ_PG_READ_STATE 25U
-#define MRQ_PG_UPDATE_STATE 26U
#define MRQ_THERMAL 27U
#define MRQ_CPU_VHINT 28U
#define MRQ_ABI_RATCHET 29U
#define MRQ_EMC_DVFS_LATENCY 31U
-#define MRQ_TRACE_ITER 64U
#define MRQ_RINGBUF_CONSOLE 65U
#define MRQ_PG 66U
#define MRQ_CPU_NDIV_LIMITS 67U
@@ -159,6 +332,40 @@ struct mrq_response {
#define MRQ_FMON 72U
#define MRQ_EC 73U
#define MRQ_DEBUG 75U
+#define MRQ_EMC_DVFS_EMCHUB 76U
+#define MRQ_BWMGR 77U
+#define MRQ_ISO_CLIENT 78U
+#define MRQ_EMC_DISP_RFL 79U
+#define MRQ_TELEMETRY 80U
+#define MRQ_PWR_LIMIT 81U
+#define MRQ_GEARS 82U
+#define MRQ_BWMGR_INT 83U
+#define MRQ_OC_STATUS 84U
+
+/** @cond DEPRECATED */
+#define MRQ_RESERVED_2 2U
+#define MRQ_RESERVED_3 3U
+#define MRQ_RESERVED_4 4U
+#define MRQ_RESERVED_5 5U
+#define MRQ_RESERVED_6 6U
+#define MRQ_RESERVED_7 7U
+#define MRQ_RESERVED_8 8U
+#define MRQ_RESERVED_10 10U
+#define MRQ_RESERVED_11 11U
+#define MRQ_RESERVED_12 12U
+#define MRQ_RESERVED_13 13U
+#define MRQ_RESERVED_14 14U
+#define MRQ_RESERVED_15 15U
+#define MRQ_RESERVED_16 16U
+#define MRQ_RESERVED_17 17U
+#define MRQ_RESERVED_18 18U
+#define MRQ_RESERVED_24 24U
+#define MRQ_RESERVED_25 25U
+#define MRQ_RESERVED_26 26U
+#define MRQ_RESERVED_30 30U
+#define MRQ_RESERVED_64 64U
+#define MRQ_RESERVED_74 74U
+/** @endcond DEPRECATED */
/** @} */
@@ -167,7 +374,7 @@ struct mrq_response {
* @brief Maximum MRQ code to be sent by CPU software to
* BPMP. Subject to change in future
*/
-#define MAX_CPU_MRQ_ID 75U
+#define MAX_CPU_MRQ_ID 84U
/**
* @addtogroup MRQ_Payloads
@@ -183,8 +390,11 @@ struct mrq_response {
* @defgroup ABI_info ABI Info
* @defgroup Powergating Power Gating
* @defgroup Thermal Thermal
+ * @defgroup OC_status OC status
* @defgroup Vhint CPU Voltage hint
* @defgroup EMC EMC
+ * @defgroup BWMGR BWMGR
+ * @defgroup ISO_CLIENT ISO_CLIENT
* @defgroup CPU NDIV Limits
* @defgroup RingbufConsole Ring Buffer Console
* @defgroup Strap Straps
@@ -192,8 +402,11 @@ struct mrq_response {
* @defgroup CC3 Auto-CC3
* @defgroup FMON FMON
* @defgroup EC EC
- * @defgroup Fbvolt_status Fuse Burn Voltage Status
- * @}
+ * @defgroup Telemetry Telemetry
+ * @defgroup Pwrlimit PWR_LIMIT
+ * @defgroup Gears Gears
+ * @defgroup BWMGR_INT Bandwidth Manager Integrated
+ * @} MRQ_Payloads
*/
/**
@@ -304,190 +517,6 @@ struct mrq_query_fw_tag_response {
uint8_t tag[32];
} BPMP_ABI_PACKED;
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_MODULE_LOAD
- * @brief Dynamically load a BPMP code module
- *
- * * Platforms: T210, T210B01, T186
- * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: @ref mrq_module_load_request
- * * Response Payload: @ref mrq_module_load_response
- *
- * @note This MRQ is disabled on production systems
- *
- */
-
-/**
- * @ingroup Module
- * @brief Request with #MRQ_MODULE_LOAD
- *
- * Used by #MRQ_MODULE_LOAD calls to ask the recipient to dynamically
- * load the code located at #phys_addr and having size #size
- * bytes. #phys_addr is treated as a void pointer.
- *
- * The recipient copies the code from #phys_addr to locally allocated
- * memory prior to responding to this message.
- *
- * @todo document the module header format
- *
- * The sender is responsible for ensuring that the code is mapped in
- * the recipient's address map.
- *
- */
-struct mrq_module_load_request {
- /** @brief Base address of the code to load */
- uint32_t phys_addr;
- /** @brief Size in bytes of code to load */
- uint32_t size;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Module
- * @brief Response to #MRQ_MODULE_LOAD
- *
- * @todo document mrq_response::err
- */
-struct mrq_module_load_response {
- /** @brief Handle to the loaded module */
- uint32_t base;
-} BPMP_ABI_PACKED;
-/** @endcond*/
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_MODULE_UNLOAD
- * @brief Unload a previously loaded code module
- *
- * * Platforms: T210, T210B01, T186
- * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: @ref mrq_module_unload_request
- * * Response Payload: N/A
- *
- * @note This MRQ is disabled on production systems
- */
-
-/**
- * @ingroup Module
- * @brief Request with #MRQ_MODULE_UNLOAD
- *
- * Used by #MRQ_MODULE_UNLOAD calls to request that a previously loaded
- * module be unloaded.
- */
-struct mrq_module_unload_request {
- /** @brief Handle of the module to unload */
- uint32_t base;
-} BPMP_ABI_PACKED;
-/** @endcond*/
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_TRACE_MODIFY
- * @brief Modify the set of enabled trace events
- *
- * @deprecated
- *
- * * Platforms: All
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: @ref mrq_trace_modify_request
- * * Response Payload: @ref mrq_trace_modify_response
- *
- * @note This MRQ is disabled on production systems
- */
-
-/**
- * @ingroup Trace
- * @brief Request with #MRQ_TRACE_MODIFY
- *
- * Used by %MRQ_TRACE_MODIFY calls to enable or disable specify trace
- * events. #set takes precedence for any bit set in both #set and
- * #clr.
- */
-struct mrq_trace_modify_request {
- /** @brief Bit mask of trace events to disable */
- uint32_t clr;
- /** @brief Bit mask of trace events to enable */
- uint32_t set;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Trace
- * @brief Response to #MRQ_TRACE_MODIFY
- *
- * Sent in repsonse to an #MRQ_TRACE_MODIFY message. #mask reflects the
- * state of which events are enabled after the recipient acted on the
- * message.
- *
- */
-struct mrq_trace_modify_response {
- /** @brief Bit mask of trace event enable states */
- uint32_t mask;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_WRITE_TRACE
- * @brief Write trace data to a buffer
- *
- * @deprecated
- *
- * * Platforms: All
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: @ref mrq_write_trace_request
- * * Response Payload: @ref mrq_write_trace_response
- *
- * mrq_response::err depends on the @ref mrq_write_trace_request field
- * values. err is -#BPMP_EINVAL if size is zero or area is NULL or
- * area is in an illegal range. A positive value for err indicates the
- * number of bytes written to area.
- *
- * @note This MRQ is disabled on production systems
- */
-
-/**
- * @ingroup Trace
- * @brief Request with #MRQ_WRITE_TRACE
- *
- * Used by MRQ_WRITE_TRACE calls to ask the recipient to copy trace
- * data from the recipient's local buffer to the output buffer. #area
- * is treated as a byte-aligned pointer in the recipient's address
- * space.
- *
- * The sender is responsible for ensuring that the output
- * buffer is mapped in the recipient's address map. The recipient is
- * responsible for protecting its own code and data from accidental
- * overwrites.
- */
-struct mrq_write_trace_request {
- /** @brief Base address of output buffer */
- uint32_t area;
- /** @brief Size in bytes of the output buffer */
- uint32_t size;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Trace
- * @brief Response to #MRQ_WRITE_TRACE
- *
- * Once this response is sent, the respondent will not access the
- * output buffer further.
- */
-struct mrq_write_trace_response {
- /**
- * @brief Flag whether more data remains in local buffer
- *
- * Value is 1 if the entire local trace buffer has been
- * drained to the outputbuffer. Value is 0 otherwise.
- */
- uint32_t eof;
-} BPMP_ABI_PACKED;
-
/** @private */
struct mrq_threaded_ping_request {
uint32_t challenge;
@@ -500,50 +529,6 @@ struct mrq_threaded_ping_response {
/**
* @ingroup MRQ_Codes
- * @def MRQ_MODULE_MAIL
- * @brief Send a message to a loadable module
- *
- * * Platforms: T210, T210B01, T186
- * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
- * * Initiators: Any
- * * Targets: BPMP
- * * Request Payload: @ref mrq_module_mail_request
- * * Response Payload: @ref mrq_module_mail_response
- *
- * @note This MRQ is disabled on production systems
- */
-
-/**
- * @ingroup Module
- * @brief Request with #MRQ_MODULE_MAIL
- */
-struct mrq_module_mail_request {
- /** @brief Handle to the previously loaded module */
- uint32_t base;
- /** @brief Module-specific mail payload
- *
- * The length of data[ ] is unknown to the BPMP core firmware
- * but it is limited to the size of an IPC message.
- */
- uint8_t data[BPMP_ABI_EMPTY_ARRAY];
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Module
- * @brief Response to #MRQ_MODULE_MAIL
- */
-struct mrq_module_mail_response {
- /** @brief Module-specific mail payload
- *
- * The length of data[ ] is unknown to the BPMP core firmware
- * but it is limited to the size of an IPC message.
- */
- uint8_t data[BPMP_ABI_EMPTY_ARRAY];
-} BPMP_ABI_PACKED;
-/** @endcond */
-
-/**
- * @ingroup MRQ_Codes
* @def MRQ_DEBUGFS
* @brief Interact with BPMP's debugfs file nodes
*
@@ -686,7 +671,7 @@ struct mrq_debugfs_response {
#define DEBUGFS_S_ISDIR (1 << 9)
#define DEBUGFS_S_IRUSR (1 << 8)
#define DEBUGFS_S_IWUSR (1 << 7)
-/** @} */
+/** @} Debugfs */
/**
* @ingroup MRQ_Codes
@@ -970,7 +955,7 @@ struct mrq_reset_response {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
+/** @} Reset */
/**
* @ingroup MRQ_Codes
@@ -1032,7 +1017,17 @@ struct serial_i2c_request {
* @brief Trigger one or more i2c transactions
*/
struct cmd_i2c_xfer_request {
- /** @brief Valid bus number from @ref bpmp_i2c_ids*/
+ /**
+ * @brief Tegra PWR_I2C bus identifier
+ *
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_t194)
+ * Must be set to 5.
+ * @endcond (bpmp_t234 || bpmp_t239 || bpmp_t194)
+ * @cond bpmp_th500
+ * Must be set to 1.
+ * @endcond bpmp_th500
+ *
+ */
uint32_t bus_id;
/** @brief Count of valid bytes in #data_buf*/
@@ -1084,7 +1079,7 @@ struct mrq_i2c_response {
struct cmd_i2c_xfer_response xfer;
} BPMP_ABI_PACKED;
-/** @} */
+/** @} I2C */
/**
* @ingroup MRQ_Codes
@@ -1109,6 +1104,13 @@ enum {
CMD_CLK_IS_ENABLED = 6,
CMD_CLK_ENABLE = 7,
CMD_CLK_DISABLE = 8,
+/** @cond DEPRECATED */
+ CMD_CLK_PROPERTIES = 9,
+ CMD_CLK_POSSIBLE_PARENTS = 10,
+ CMD_CLK_NUM_POSSIBLE_PARENTS = 11,
+ CMD_CLK_GET_POSSIBLE_PARENT = 12,
+ CMD_CLK_RESET_REFCOUNTS = 13,
+/** @endcond DEPRECATED */
CMD_CLK_GET_ALL_INFO = 14,
CMD_CLK_GET_MAX_CLK_ID = 15,
CMD_CLK_GET_FMAX_AT_VMIN = 16,
@@ -1119,6 +1121,21 @@ enum {
#define BPMP_CLK_HAS_SET_RATE (1U << 1U)
#define BPMP_CLK_IS_ROOT (1U << 2U)
#define BPMP_CLK_IS_VAR_ROOT (1U << 3U)
+/**
+ * @brief Protection against rate and parent changes
+ *
+ * #MRQ_CLK command #CMD_CLK_SET_RATE or #MRQ_CLK command #CMD_CLK_SET_PARENT will return
+ * -#BPMP_EACCES.
+ */
+#define BPMP_CLK_RATE_PARENT_CHANGE_DENIED (1U << 30)
+
+/**
+ * @brief Protection against state changes
+ *
+ * #MRQ_CLK command #CMD_CLK_ENABLE or #MRQ_CLK command #CMD_CLK_DISABLE will return
+ * -#BPMP_EACCES.
+ */
+#define BPMP_CLK_STATE_CHANGE_DENIED (1U << 31)
#define MRQ_CLK_NAME_MAXLEN 40U
#define MRQ_CLK_MAX_PARENTS 16U
@@ -1177,7 +1194,7 @@ struct cmd_clk_is_enabled_request {
*/
struct cmd_clk_is_enabled_response {
/**
- * @brief The state of the clock that has been succesfully
+ * @brief The state of the clock that has been successfully
* requested with CMD_CLK_ENABLE or CMD_CLK_DISABLE by the
* master invoking the command earlier.
*
@@ -1210,6 +1227,46 @@ struct cmd_clk_disable_response {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
+/** @cond DEPRECATED */
+/** @private */
+struct cmd_clk_properties_request {
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+
+/** @todo flags need to be spelled out here */
+struct cmd_clk_properties_response {
+ uint32_t flags;
+} BPMP_ABI_PACKED;
+
+/** @private */
+struct cmd_clk_possible_parents_request {
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+
+struct cmd_clk_possible_parents_response {
+ uint8_t num_parents;
+ uint8_t reserved[3];
+ uint32_t parent_id[MRQ_CLK_MAX_PARENTS];
+} BPMP_ABI_PACKED;
+
+/** @private */
+struct cmd_clk_num_possible_parents_request {
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+
+struct cmd_clk_num_possible_parents_response {
+ uint8_t num_parents;
+} BPMP_ABI_PACKED;
+
+struct cmd_clk_get_possible_parent_request {
+ uint8_t parent_idx;
+} BPMP_ABI_PACKED;
+
+struct cmd_clk_get_possible_parent_response {
+ uint32_t parent_id;
+} BPMP_ABI_PACKED;
+/** @endcond DEPRECATED */
+
/** @private */
struct cmd_clk_get_all_info_request {
BPMP_ABI_EMPTY
@@ -1241,6 +1298,7 @@ struct cmd_clk_get_fmax_at_vmin_response {
int64_t rate;
} BPMP_ABI_PACKED;
+
/**
* @ingroup Clocks
* @brief Request with #MRQ_CLK
@@ -1267,6 +1325,17 @@ struct cmd_clk_get_fmax_at_vmin_response {
*
*/
+/** @cond DEPRECATED
+ *
+ * Older versions of firmware also supported following sub-commands:
+ * |CMD_CLK_PROPERTIES |- |
+ * |CMD_CLK_POSSIBLE_PARENTS |- |
+ * |CMD_CLK_NUM_POSSIBLE_PARENTS|- |
+ * |CMD_CLK_GET_POSSIBLE_PARENT |clk_get_possible_parent|
+ * |CMD_CLK_RESET_REFCOUNTS |- |
+ *
+ * @endcond DEPRECATED */
+
struct mrq_clk_request {
/** @brief Sub-command and clock id concatenated to 32-bit word.
* - bits[31..24] is the sub-cmd.
@@ -1288,6 +1357,15 @@ struct mrq_clk_request {
struct cmd_clk_disable_request clk_disable;
/** @private */
struct cmd_clk_is_enabled_request clk_is_enabled;
+ /** @cond DEPRECATED */
+ /** @private */
+ struct cmd_clk_properties_request clk_properties;
+ /** @private */
+ struct cmd_clk_possible_parents_request clk_possible_parents;
+ /** @private */
+ struct cmd_clk_num_possible_parents_request clk_num_possible_parents;
+ struct cmd_clk_get_possible_parent_request clk_get_possible_parent;
+ /** @endcond DEPRECATED */
/** @private */
struct cmd_clk_get_all_info_request clk_get_all_info;
/** @private */
@@ -1321,6 +1399,17 @@ struct mrq_clk_request {
*
*/
+/** @cond DEPRECATED
+ *
+ * Older versions of firmware also supported following sub-commands:
+ * |CMD_CLK_PROPERTIES |clk_properties |
+ * |CMD_CLK_POSSIBLE_PARENTS |clk_possible_parents |
+ * |CMD_CLK_NUM_POSSIBLE_PARENTS|clk_num_possible_parents|
+ * |CMD_CLK_GET_POSSIBLE_PARENT |clk_get_possible_parents|
+ * |CMD_CLK_RESET_REFCOUNTS |- |
+ *
+ * @endcond DEPRECATED */
+
struct mrq_clk_response {
union {
struct cmd_clk_get_rate_response clk_get_rate;
@@ -1333,13 +1422,19 @@ struct mrq_clk_response {
/** @private */
struct cmd_clk_disable_response clk_disable;
struct cmd_clk_is_enabled_response clk_is_enabled;
+ /** @cond DEPRECATED */
+ struct cmd_clk_properties_response clk_properties;
+ struct cmd_clk_possible_parents_response clk_possible_parents;
+ struct cmd_clk_num_possible_parents_response clk_num_possible_parents;
+ struct cmd_clk_get_possible_parent_response clk_get_possible_parent;
+ /** @endcond DEPRECATED */
struct cmd_clk_get_all_info_response clk_get_all_info;
struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
struct cmd_clk_get_fmax_at_vmin_response clk_get_fmax_at_vmin;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
+/** @} Clocks */
/**
* @ingroup MRQ_Codes
@@ -1378,107 +1473,20 @@ struct mrq_query_abi_response {
} BPMP_ABI_PACKED;
/**
- * @ingroup MRQ_Codes
- * @def MRQ_PG_READ_STATE
- * @brief Read the power-gating state of a partition
*
- * * Platforms: T186
- * @cond bpmp_t186
- * * Initiators: Any
- * * Targets: BPMP
- * * Request Payload: @ref mrq_pg_read_state_request
- * * Response Payload: @ref mrq_pg_read_state_response
- */
-
-/**
- * @ingroup Powergating
- * @brief Request with #MRQ_PG_READ_STATE
- *
- * Used by MRQ_PG_READ_STATE call to read the current state of a
- * partition.
- */
-struct mrq_pg_read_state_request {
- /** @brief ID of partition */
- uint32_t partition_id;
-} BPMP_ABI_PACKED;
-
-/**
- * @ingroup Powergating
- * @brief Response to MRQ_PG_READ_STATE
- * @todo define possible errors.
- */
-struct mrq_pg_read_state_response {
- /** @brief Read as don't care */
- uint32_t sram_state;
- /** @brief State of power partition
- * * 0 : off
- * * 1 : on
- */
- uint32_t logic_state;
-} BPMP_ABI_PACKED;
-/** @endcond*/
-/** @} */
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_PG_UPDATE_STATE
- * @brief Modify the power-gating state of a partition. In contrast to
- * MRQ_PG calls, the operations that change state (on/off) of power
- * partition are reference counted.
- *
- * * Platforms: T186
- * @cond bpmp_t186
- * * Initiators: Any
- * * Targets: BPMP
- * * Request Payload: @ref mrq_pg_update_state_request
- * * Response Payload: N/A
- */
-
-/**
- * @ingroup Powergating
- * @brief Request with mrq_pg_update_state_request
- *
- * Used by #MRQ_PG_UPDATE_STATE call to request BPMP to change the
- * state of a power partition #partition_id.
- */
-struct mrq_pg_update_state_request {
- /** @brief ID of partition */
- uint32_t partition_id;
- /** @brief Secondary control of power partition
- * @details Ignored by many versions of the BPMP
- * firmware. For maximum compatibility, set the value
- * according to @ref logic_state
- * * 0x1: power ON partition (@ref logic_state == 0x3)
- * * 0x3: power OFF partition (@ref logic_state == 0x1)
- */
- uint32_t sram_state;
- /** @brief Controls state of power partition, legal values are
- * * 0x1 : power OFF partition
- * * 0x3 : power ON partition
- */
- uint32_t logic_state;
- /** @brief Change state of clocks of the power partition, legal values
- * * 0x0 : do not change clock state
- * * 0x1 : disable partition clocks (only applicable when
- * @ref logic_state == 0x1)
- * * 0x3 : enable partition clocks (only applicable when
- * @ref logic_state == 0x3)
- */
- uint32_t clock_state;
-} BPMP_ABI_PACKED;
-/** @endcond*/
-
-/**
* @ingroup MRQ_Codes
* @def MRQ_PG
* @brief Control power-gating state of a partition. In contrast to
* MRQ_PG_UPDATE_STATE, operations that change the power partition
* state are NOT reference counted
*
- * @note BPMP-FW forcefully turns off some partitions as part of SC7 entry
- * because their state cannot be adequately restored on exit. Therefore,
- * it is recommended to power off all domains via MRQ_PG prior to SC7 entry.
+ * @cond (bpmp_t194 || bpmp_t186)
+ * @note On T194 and earlier BPMP-FW forcefully turns off some partitions as
+ * part of SC7 entry because their state cannot be adequately restored on exit.
+ * Therefore, it is recommended to power off all domains via MRQ_PG prior to SC7
+ * entry.
* See @ref bpmp_pdomain_ids for further detail.
+ * @endcond (bpmp_t194 || bpmp_t186)
*
* * Platforms: T186, T194
* * Initiators: Any
@@ -1643,7 +1651,7 @@ struct mrq_pg_response {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
+/** @} Powergating */
/**
* @ingroup MRQ_Codes
@@ -1889,7 +1897,44 @@ union mrq_thermal_bpmp_to_host_response {
struct cmd_thermal_get_thermtrip_response get_thermtrip;
struct cmd_thermal_get_num_zones_response get_num_zones;
} BPMP_ABI_PACKED;
-/** @} */
+
+/** @} Thermal */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_OC_STATUS
+ * @brief Query over current status
+ *
+ * * Platforms: T234
+ * @cond bpmp_t234
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_oc_status_response
+ *
+ * @addtogroup OC_status
+ * @{
+ */
+
+#define OC_STATUS_MAX_SIZE 24U
+
+/*
+ * @brief Response to #MRQ_OC_STATUS
+ *
+ * throt_en: Value for each OC alarm where zero signifies throttle is
+ * disabled, and non-zero throttle is enabled.
+ * event_cnt: Total number of OC events for each OC alarm.
+ *
+ * mrq_response::err is 0 if the operation was successful and
+ * -#BPMP_ENODEV otherwise.
+ */
+struct mrq_oc_status_response {
+ uint8_t throt_en[OC_STATUS_MAX_SIZE];
+ uint32_t event_cnt[OC_STATUS_MAX_SIZE];
+} BPMP_ABI_PACKED;
+
+/** @} OC_status */
+/** @endcond bpmp_t234 */
/**
* @ingroup MRQ_Codes
@@ -1948,8 +1993,9 @@ struct cpu_vhint_data {
/** reserved for future use */
uint16_t reserved[328];
} BPMP_ABI_PACKED;
-/** @endcond */
-/** @} */
+
+/** @} Vhint */
+/** @endcond bpmp_t186 */
/**
* @ingroup MRQ_Codes
@@ -2016,14 +2062,15 @@ struct mrq_abi_ratchet_response {
/** @brief BPMP's ratchet value */
uint16_t ratchet;
};
-/** @} */
+
+/** @} ABI_info */
/**
* @ingroup MRQ_Codes
* @def MRQ_EMC_DVFS_LATENCY
* @brief Query frequency dependent EMC DVFS latency
*
- * * Platforms: T186, T194
+ * * Platforms: T186, T194, T234
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: N/A
@@ -2053,7 +2100,543 @@ struct mrq_emc_dvfs_latency_response {
struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
} BPMP_ABI_PACKED;
-/** @} */
+/** @} EMC */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_EMC_DVFS_EMCHUB
+ * @brief Query EMC HUB frequencies
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_emc_dvfs_emchub_response
+ * @addtogroup EMC
+ * @{
+ */
+
+/**
+ * @brief Used by @ref mrq_emc_dvfs_emchub_response
+ */
+struct emc_dvfs_emchub {
+ /** @brief EMC DVFS node frequency in kHz */
+ uint32_t freq;
+ /** @brief EMC HUB frequency in kHz */
+ uint32_t hub_freq;
+} BPMP_ABI_PACKED;
+
+#define EMC_DVFS_EMCHUB_MAX_SIZE EMC_DVFS_LATENCY_MAX_SIZE
+/**
+ * @brief Response to #MRQ_EMC_DVFS_EMCHUB
+ */
+struct mrq_emc_dvfs_emchub_response {
+ /** @brief The number valid entries in #pairs */
+ uint32_t num_pairs;
+ /** @brief EMC DVFS node <frequency, hub frequency> information */
+ struct emc_dvfs_emchub pairs[EMC_DVFS_EMCHUB_MAX_SIZE];
+} BPMP_ABI_PACKED;
+
+/** @} EMC */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_EMC_DISP_RFL
+ * @brief Set EMC display RFL handshake mode of operations
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_emc_disp_rfl_request
+ * * Response Payload: N/A
+ *
+ * @addtogroup EMC
+ * @{
+ */
+
+enum mrq_emc_disp_rfl_mode {
+ /** @brief EMC display RFL handshake disabled */
+ EMC_DISP_RFL_MODE_DISABLED = 0,
+ /** @brief EMC display RFL handshake enabled */
+ EMC_DISP_RFL_MODE_ENABLED = 1,
+};
+
+/**
+ * @ingroup EMC
+ * @brief Request with #MRQ_EMC_DISP_RFL
+ *
+ * Used by the sender of an #MRQ_EMC_DISP_RFL message to
+ * request the mode of EMC display RFL handshake.
+ *
+ * mrq_response::err is
+ * * 0: RFL mode is set successfully
+ * * -#BPMP_EINVAL: invalid mode requested
+ * * -#BPMP_ENOSYS: RFL handshake is not supported
+ * * -#BPMP_EACCES: Permission denied
+ * * -#BPMP_ENODEV: if disp rfl mrq is not supported by BPMP-FW
+ */
+struct mrq_emc_disp_rfl_request {
+ /** @brief EMC display RFL mode (@ref mrq_emc_disp_rfl_mode) */
+ uint32_t mode;
+} BPMP_ABI_PACKED;
+
+/** @} EMC */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_BWMGR
+ * @brief bwmgr requests
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_bwmgr_request
+ * * Response Payload: @ref mrq_bwmgr_response
+ *
+ * @addtogroup BWMGR
+ *
+ * @{
+ */
+
+enum mrq_bwmgr_cmd {
+ /**
+ * @brief Check whether the BPMP driver supports the specified
+ * request type
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_BWMGR_QUERY_ABI = 0,
+
+ /**
+ * @brief Determine dram rate to satisfy iso/niso bw requests
+ *
+ * mrq_response::err is
+ * * 0: calc_rate succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_ENOTSUP: Requested bw is not available.
+ */
+ CMD_BWMGR_CALC_RATE = 1
+};
+
+/*
+ * request data for request type CMD_BWMGR_QUERY_ABI
+ *
+ * type: Request type for which to check existence.
+ */
+struct cmd_bwmgr_query_abi_request {
+ uint32_t type;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Used by @ref cmd_bwmgr_calc_rate_request
+ */
+struct iso_req {
+ /* @brief bwmgr client ID @ref bpmp_bwmgr_ids */
+ uint32_t id;
+ /* @brief bw in kBps requested by client */
+ uint32_t iso_bw;
+} BPMP_ABI_PACKED;
+
+#define MAX_ISO_CLIENTS 13U
+/*
+ * request data for request type CMD_BWMGR_CALC_RATE
+ */
+struct cmd_bwmgr_calc_rate_request {
+ /* @brief total bw in kBps requested by all niso clients */
+ uint32_t sum_niso_bw;
+ /* @brief The number of iso clients */
+ uint32_t num_iso_clients;
+ /* @brief iso_req <id, iso_bw> information */
+ struct iso_req isobw_reqs[MAX_ISO_CLIENTS];
+} BPMP_ABI_PACKED;
+
+/*
+ * response data for request type CMD_BWMGR_CALC_RATE
+ *
+ * iso_rate_min: min dram data clk rate in kHz to satisfy all iso bw reqs
+ * total_rate_min: min dram data clk rate in kHz to satisfy all bw reqs
+ */
+struct cmd_bwmgr_calc_rate_response {
+ uint32_t iso_rate_min;
+ uint32_t total_rate_min;
+} BPMP_ABI_PACKED;
+
+/*
+ * @brief Request with #MRQ_BWMGR
+ *
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------------|
+ * |CMD_BWMGR_QUERY_ABI | cmd_bwmgr_query_abi_request |
+ * |CMD_BWMGR_CALC_RATE | cmd_bwmgr_calc_rate_request |
+ *
+ */
+struct mrq_bwmgr_request {
+ uint32_t cmd;
+ union {
+ struct cmd_bwmgr_query_abi_request query_abi;
+ struct cmd_bwmgr_calc_rate_request bwmgr_rate_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/*
+ * @brief Response to MRQ_BWMGR
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------------|
+ * |CMD_BWMGR_CALC_RATE | cmd_bwmgr_calc_rate_response |
+ */
+struct mrq_bwmgr_response {
+ union {
+ struct cmd_bwmgr_calc_rate_response bwmgr_rate_resp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} BWMGR */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_BWMGR_INT
+ * @brief bpmp-integrated bwmgr requests
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_bwmgr_int_request
+ * * Response Payload: @ref mrq_bwmgr_int_response
+ *
+ * @addtogroup BWMGR_INT
+ * @{
+ */
+
+enum mrq_bwmgr_int_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * request type
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_BWMGR_INT_QUERY_ABI = 1,
+
+ /**
+ * @brief Determine and set dram rate to satisfy iso/niso bw request
+ *
+ * mrq_response::err is
+ * * 0: request succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * set_frequency in @ref cmd_bwmgr_int_calc_and_set_response
+ * will not be set.
+ * * -#BPMP_ENOTSUP: Requested bw is not available.
+ * set_frequency in @ref cmd_bwmgr_int_calc_and_set_response
+ * will be current dram-clk rate.
+ */
+ CMD_BWMGR_INT_CALC_AND_SET = 2,
+
+ /**
+ * @brief Set a max DRAM frequency for the bandwidth-manager
+ *
+ * mrq_response::err is
+ * * 0: request succeeded.
+ * * -#BPMP_ENOTSUP: Requested cap frequency is not possible.
+ */
+ CMD_BWMGR_INT_CAP_SET = 3
+};
+
+/*
+ * request structure for request type CMD_BWMGR_QUERY_ABI
+ *
+ * type: Request type for which to check existence.
+ */
+struct cmd_bwmgr_int_query_abi_request {
+ /* @brief request type determined by @ref mrq_bwmgr_int_cmd */
+ uint32_t type;
+} BPMP_ABI_PACKED;
+
+/**
+ * @defgroup bwmgr_int_unit_type BWMGR_INT floor unit-types
+ * @addtogroup bwmgr_int_unit_type
+ * @{
+ */
+/** @brief kilobytes per second unit-type */
+#define BWMGR_INT_UNIT_KBPS 0U
+/** @brief kilohertz unit-type */
+#define BWMGR_INT_UNIT_KHZ 1U
+
+/** @} bwmgr_int_unit_type */
+
+/*
+ * request data for request type CMD_BWMGR_INT_CALC_AND_SET
+ */
+struct cmd_bwmgr_int_calc_and_set_request {
+ /* @brief bwmgr client ID @ref bpmp_bwmgr_ids */
+ uint32_t client_id;
+ /* @brief average niso bw usage in kBps requested by client. */
+ uint32_t niso_bw;
+ /*
+ * @brief average iso bw usage in kBps requested by client.
+ * Value is ignored if client is niso. Determined by client_id.
+ */
+ uint32_t iso_bw;
+ /*
+ * @brief memory clock floor requested by client.
+ * Unit determined by floor_unit.
+ */
+ uint32_t mc_floor;
+ /*
+ * @brief toggle to determine the unit-type of floor value.
+ * See @ref bwmgr_int_unit_type definitions for unit-type mappings.
+ */
+ uint8_t floor_unit;
+} BPMP_ABI_PACKED;
+
+struct cmd_bwmgr_int_cap_set_request {
+ /* @brief requested cap frequency in Hz. */
+ uint64_t rate;
+} BPMP_ABI_PACKED;
+
+/*
+ * response data for request type CMD_BWMGR_CALC_AND_SET
+ */
+struct cmd_bwmgr_int_calc_and_set_response {
+ /* @brief current set memory clock frequency in Hz */
+ uint64_t rate;
+} BPMP_ABI_PACKED;
+
+/*
+ * @brief Request with #MRQ_BWMGR_INT
+ *
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------------------|
+ * |CMD_BWMGR_INT_QUERY_ABI | cmd_bwmgr_int_query_abi_request |
+ * |CMD_BWMGR_INT_CALC_AND_SET | cmd_bwmgr_int_calc_and_set_request|
+ * |CMD_BWMGR_INT_CAP_SET | cmd_bwmgr_int_cap_set_request |
+ *
+ */
+struct mrq_bwmgr_int_request {
+ uint32_t cmd;
+ union {
+ struct cmd_bwmgr_int_query_abi_request query_abi;
+ struct cmd_bwmgr_int_calc_and_set_request bwmgr_calc_set_req;
+ struct cmd_bwmgr_int_cap_set_request bwmgr_cap_set_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/*
+ * @brief Response to MRQ_BWMGR_INT
+ *
+ * |sub-command |payload |
+ * |----------------------------|---------------------------------------|
+ * |CMD_BWMGR_INT_CALC_AND_SET | cmd_bwmgr_int_calc_and_set_response |
+ */
+struct mrq_bwmgr_int_response {
+ union {
+ struct cmd_bwmgr_int_calc_and_set_response bwmgr_calc_set_resp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} BWMGR_INT */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_ISO_CLIENT
+ * @brief ISO client requests
+ *
+ * * Platforms: T234 onwards
+ * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_iso_client_request
+ * * Response Payload: @ref mrq_iso_client_response
+ *
+ * @addtogroup ISO_CLIENT
+ * @{
+ */
+
+enum mrq_iso_client_cmd {
+ /**
+ * @brief Check whether the BPMP driver supports the specified
+ * request type
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_ISO_CLIENT_QUERY_ABI = 0,
+
+ /*
+ * @brief check for legal LA for the iso client. Without programming
+ * LA MC registers, calculate and ensure that legal LA is possible for
+ * iso bw requested by the ISO client.
+ *
+ * mrq_response::err is
+ * * 0: check la succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_EFAULT: Legal LA is not possible for client requested iso_bw
+ */
+ CMD_ISO_CLIENT_CALCULATE_LA = 1,
+
+ /*
+ * @brief set LA for the iso client. Calculate and program the LA/PTSA
+ * MC registers corresponding to the client making bw request
+ *
+ * mrq_response::err is
+ * * 0: set la succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_EFAULT: Failed to calculate or program MC registers.
+ */
+ CMD_ISO_CLIENT_SET_LA = 2,
+
+ /*
+ * @brief Get max possible bw for iso client
+ *
+ * mrq_response::err is
+ * * 0: get_max_bw succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ */
+ CMD_ISO_CLIENT_GET_MAX_BW = 3
+};
+
+/*
+ * request data for request type CMD_ISO_CLIENT_QUERY_ABI
+ *
+ * type: Request type for which to check existence.
+ */
+struct cmd_iso_client_query_abi_request {
+ uint32_t type;
+} BPMP_ABI_PACKED;
+
+/*
+ * request data for request type CMD_ISO_CLIENT_CALCULATE_LA
+ *
+ * id: client ID in @ref bpmp_bwmgr_ids
+ * bw: bw requested in kBps by client ID.
+ * init_bw_floor: initial dram_bw_floor in kBps passed by client ID.
+ * ISO client will perform mempool allocation and DVFS buffering based
+ * on this dram_bw_floor.
+ */
+struct cmd_iso_client_calculate_la_request {
+ uint32_t id;
+ uint32_t bw;
+ uint32_t init_bw_floor;
+} BPMP_ABI_PACKED;
+
+/*
+ * request data for request type CMD_ISO_CLIENT_SET_LA
+ *
+ * id: client ID in @ref bpmp_bwmgr_ids
+ * bw: bw requested in kBps by client ID.
+ * final_bw_floor: final dram_bw_floor in kBps.
+ * Sometimes the initial dram_bw_floor passed by ISO client may need to be
+ * updated by considering higher dram freq's. This is the final dram_bw_floor
+ * used to calculate and program MC registers.
+ */
+struct cmd_iso_client_set_la_request {
+ uint32_t id;
+ uint32_t bw;
+ uint32_t final_bw_floor;
+} BPMP_ABI_PACKED;
+
+/*
+ * request data for request type CMD_ISO_CLIENT_GET_MAX_BW
+ *
+ * id: client ID in @ref bpmp_bwmgr_ids
+ */
+struct cmd_iso_client_get_max_bw_request {
+ uint32_t id;
+} BPMP_ABI_PACKED;
+
+/*
+ * response data for request type CMD_ISO_CLIENT_CALCULATE_LA
+ *
+ * la_rate_floor: minimum dram_rate_floor in kHz at which a legal la is possible
+ * iso_client_only_rate: Minimum dram freq in kHz required to satisfy this clients
+ * iso bw request, assuming all other iso clients are inactive
+ */
+struct cmd_iso_client_calculate_la_response {
+ uint32_t la_rate_floor;
+ uint32_t iso_client_only_rate;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Used by @ref cmd_iso_client_get_max_bw_response
+ */
+struct iso_max_bw {
+ /* @brief dram frequency in kHz */
+ uint32_t freq;
+ /* @brief max possible iso-bw in kBps */
+ uint32_t iso_bw;
+} BPMP_ABI_PACKED;
+
+#define ISO_MAX_BW_MAX_SIZE 14U
+/*
+ * response data for request type CMD_ISO_CLIENT_GET_MAX_BW
+ */
+struct cmd_iso_client_get_max_bw_response {
+ /* @brief The number valid entries in iso_max_bw pairs */
+ uint32_t num_pairs;
+ /* @brief max ISOBW <dram freq, max bw> information */
+ struct iso_max_bw pairs[ISO_MAX_BW_MAX_SIZE];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request with #MRQ_ISO_CLIENT
+ *
+ * Used by the sender of an #MRQ_ISO_CLIENT message.
+ *
+ * |sub-command |payload |
+ * |------------------------------------ |----------------------------------------|
+ * |CMD_ISO_CLIENT_QUERY_ABI |cmd_iso_client_query_abi_request |
+ * |CMD_ISO_CLIENT_CALCULATE_LA |cmd_iso_client_calculate_la_request |
+ * |CMD_ISO_CLIENT_SET_LA |cmd_iso_client_set_la_request |
+ * |CMD_ISO_CLIENT_GET_MAX_BW |cmd_iso_client_get_max_bw_request |
+ *
+ */
+
+struct mrq_iso_client_request {
+ /* Type of request. Values listed in enum mrq_iso_client_cmd */
+ uint32_t cmd;
+ union {
+ struct cmd_iso_client_query_abi_request query_abi;
+ struct cmd_iso_client_calculate_la_request calculate_la_req;
+ struct cmd_iso_client_set_la_request set_la_req;
+ struct cmd_iso_client_get_max_bw_request max_isobw_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response to MRQ_ISO_CLIENT
+ *
+ * Each sub-command supported by @ref mrq_iso_client_request may return
+ * sub-command-specific data. Some do and some do not as indicated in
+ * the following table
+ *
+ * |sub-command |payload |
+ * |---------------------------- |------------------------------------|
+ * |CMD_ISO_CLIENT_CALCULATE_LA |cmd_iso_client_calculate_la_response|
+ * |CMD_ISO_CLIENT_SET_LA |N/A |
+ * |CMD_ISO_CLIENT_GET_MAX_BW |cmd_iso_client_get_max_bw_response |
+ *
+ */
+
+struct mrq_iso_client_response {
+ union {
+ struct cmd_iso_client_calculate_la_response calculate_la_resp;
+ struct cmd_iso_client_get_max_bw_response max_isobw_resp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} ISO_CLIENT */
+/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
/**
* @ingroup MRQ_Codes
@@ -2061,7 +2644,7 @@ struct mrq_emc_dvfs_latency_response {
* @brief CPU freq. limits in ndiv
*
* * Platforms: T194 onwards
- * @cond bpmp_t194
+ * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_cpu_ndiv_limits_request
@@ -2094,15 +2677,15 @@ struct mrq_cpu_ndiv_limits_response {
uint16_t ndiv_min;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
+/** @} CPU */
+/** @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500) */
/**
* @ingroup MRQ_Codes
* @def MRQ_CPU_AUTO_CC3
* @brief Query CPU cluster auto-CC3 configuration
*
- * * Platforms: T194 onwards
+ * * Platforms: T194
* @cond bpmp_t194
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -2140,40 +2723,8 @@ struct mrq_cpu_auto_cc3_response {
uint32_t auto_cc3_config;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_TRACE_ITER
- * @brief Manage the trace iterator
- *
- * @deprecated
- *
- * * Platforms: All
- * * Initiators: CCPLEX
- * * Targets: BPMP
- * * Request Payload: N/A
- * * Response Payload: @ref mrq_trace_iter_request
- * @addtogroup Trace
- * @{
- */
-enum {
- /** @brief (re)start the tracing now. Ignore older events */
- TRACE_ITER_INIT = 0,
- /** @brief Clobber all events in the trace buffer */
- TRACE_ITER_CLEAN = 1
-};
-
-/**
- * @brief Request with #MRQ_TRACE_ITER
- */
-struct mrq_trace_iter_request {
- /** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */
- uint32_t cmd;
-} BPMP_ABI_PACKED;
-
-/** @} */
+/** @} CC3 */
+/** @endcond bpmp_t194 */
/**
* @ingroup MRQ_Codes
@@ -2351,7 +2902,8 @@ union mrq_ringbuf_console_bpmp_to_host_response {
struct cmd_ringbuf_console_write_resp write;
struct cmd_ringbuf_console_get_fifo_resp get_fifo;
} BPMP_ABI_PACKED;
-/** @} */
+
+/** @} RingbufConsole */
/**
* @ingroup MRQ_Codes
@@ -2359,7 +2911,7 @@ union mrq_ringbuf_console_bpmp_to_host_response {
* @brief Set a strap value controlled by BPMP
*
* * Platforms: T194 onwards
- * @cond bpmp_t194
+ * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_strap_request
@@ -2390,17 +2942,14 @@ enum mrq_strap_cmd {
struct mrq_strap_request {
/** @brief @ref mrq_strap_cmd */
uint32_t cmd;
- /** @brief Strap ID from @ref Strap_Ids */
+ /** @brief Strap ID from @ref Strap_Identifiers */
uint32_t id;
/** @brief Desired value for strap (if cmd is #STRAP_SET) */
uint32_t value;
} BPMP_ABI_PACKED;
-/**
- * @defgroup Strap_Ids Strap Identifiers
- * @}
- */
-/** @endcond */
+/** @} Strap */
+/** @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500) */
/**
* @ingroup MRQ_Codes
@@ -2408,7 +2957,7 @@ struct mrq_strap_request {
* @brief Perform a UPHY operation
*
* * Platforms: T194 onwards
- * @cond bpmp_t194
+ * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_uphy_request
@@ -2423,6 +2972,9 @@ enum {
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT = 3,
CMD_UPHY_PCIE_CONTROLLER_STATE = 4,
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF = 5,
+ CMD_UPHY_DISPLAY_PORT_INIT = 6,
+ CMD_UPHY_DISPLAY_PORT_OFF = 7,
+ CMD_UPHY_XUSB_DYN_LANES_RESTORE = 8,
CMD_UPHY_MAX,
};
@@ -2445,28 +2997,41 @@ struct cmd_uphy_margin_status_response {
} BPMP_ABI_PACKED;
struct cmd_uphy_ep_controller_pll_init_request {
- /** @brief EP controller number, valid: 0, 4, 5 */
+ /** @brief EP controller number, T194 valid: 0, 4, 5; T234 valid: 5, 6, 7, 10; T239 valid: 0 */
uint8_t ep_controller;
} BPMP_ABI_PACKED;
struct cmd_uphy_pcie_controller_state_request {
- /** @brief PCIE controller number, valid: 0, 1, 2, 3, 4 */
+ /** @brief PCIE controller number, T194 valid: 0-4; T234 valid: 0-10; T239 valid: 0-3 */
uint8_t pcie_controller;
uint8_t enable;
} BPMP_ABI_PACKED;
struct cmd_uphy_ep_controller_pll_off_request {
- /** @brief EP controller number, valid: 0, 4, 5 */
+ /** @brief EP controller number, T194 valid: 0, 4, 5; T234 valid: 5, 6, 7, 10; T239 valid: 0 */
uint8_t ep_controller;
} BPMP_ABI_PACKED;
+struct cmd_uphy_display_port_init_request {
+ /** @brief DisplayPort link rate, T239 valid: 1620, 2700, 5400, 8100, 2160, 2430, 3240, 4320, 6750 */
+ uint16_t link_rate;
+ /** @brief 1: lane 0; 2: lane 1; 3: lane 0 and 1 */
+ uint16_t lanes_bitmap;
+} BPMP_ABI_PACKED;
+
+struct cmd_uphy_xusb_dyn_lanes_restore_request {
+ /** @brief 1: lane 0; 2: lane 1; 3: lane 0 and 1 */
+ uint16_t lanes_bitmap;
+} BPMP_ABI_PACKED;
+
/**
* @ingroup UPHY
* @brief Request with #MRQ_UPHY
*
- * Used by the sender of an #MRQ_UPHY message to control UPHY Lane RX margining.
- * The uphy_request is split into several sub-commands. Some sub-commands
- * require no additional data. Others have a sub-command specific payload
+ * Used by the sender of an #MRQ_UPHY message to control UPHY.
+ * The uphy_request is split into several sub-commands. CMD_UPHY_PCIE_LANE_MARGIN_STATUS
+ * requires no additional data. Others have a sub-command specific payload. Below table
+ * shows sub-commands with their corresponding payload data.
*
* |sub-command |payload |
* |------------------------------------ |----------------------------------------|
@@ -2475,6 +3040,9 @@ struct cmd_uphy_ep_controller_pll_off_request {
* |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |cmd_uphy_ep_controller_pll_init_request |
* |CMD_UPHY_PCIE_CONTROLLER_STATE |cmd_uphy_pcie_controller_state_request |
* |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF |cmd_uphy_ep_controller_pll_off_request |
+ * |CMD_UPHY_PCIE_DISPLAY_PORT_INIT |cmd_uphy_display_port_init_request |
+ * |CMD_UPHY_PCIE_DISPLAY_PORT_OFF | |
+ * |CMD_UPHY_XUSB_DYN_LANES_RESTORE |cmd_uphy_xusb_dyn_lanes_restore_request |
*
*/
@@ -2489,6 +3057,8 @@ struct mrq_uphy_request {
struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init;
struct cmd_uphy_pcie_controller_state_request controller_state;
struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off;
+ struct cmd_uphy_display_port_init_request display_port_init;
+ struct cmd_uphy_xusb_dyn_lanes_restore_request xusb_dyn_lanes_restore;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
@@ -2513,8 +3083,8 @@ struct mrq_uphy_response {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
+/** @} UPHY */
+/** @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500) */
/**
* @ingroup MRQ_Codes
@@ -2522,14 +3092,16 @@ struct mrq_uphy_response {
* @brief Perform a frequency monitor configuration operations
*
* * Platforms: T194 onwards
- * @cond bpmp_t194
+ * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_fmon_request
* * Response Payload: @ref mrq_fmon_response
+ * @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
*
* @addtogroup FMON
* @{
+ * @cond (bpmp_t194 || bpmp_t234)
*/
enum {
/**
@@ -2538,6 +3110,20 @@ enum {
* The monitored clock must be running for clamp to succeed. If
* clamped, FMON configuration is preserved when clock rate
* and/or state is changed.
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EACCES: FMON access error @n
+ * -#BPMP_EBADCMD if subcommand is not supported @n
+ * -#BPMP_EBADSLT: clamp FMON on cluster with auto-CC3 enabled @n
+ * -#BPMP_EBUSY: fmon is already clamped at different rate @n
+ * -#BPMP_EFAULT: self-diagnostic error @n
+ * -#BPMP_EINVAL: invalid FMON configuration @n
+ * -#BPMP_EOPNOTSUPP: not in production mode @n
+ * -#BPMP_ENODEV: invalid clk_id @n
+ * -#BPMP_ENOENT: no calibration data, uninitialized @n
+ * -#BPMP_ENOTSUP: avfs config not set @n
+ * -#BPMP_ENOSYS: clamp FMON on cluster clock w/ no NAFLL @n
+ * -#BPMP_ETIMEDOUT: operation timed out @n
*/
CMD_FMON_GEAR_CLAMP = 1,
/**
@@ -2545,6 +3131,13 @@ enum {
*
* Allow FMON configuration to follow monitored clock rate
* and/or state changes.
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EBADCMD if subcommand is not supported @n
+ * -#BPMP_ENODEV: invalid clk_id @n
+ * -#BPMP_ENOENT: no calibration data, uninitialized @n
+ * -#BPMP_ENOTSUP: avfs config not set @n
+ * -#BPMP_EOPNOTSUPP: not in production mode @n
*/
CMD_FMON_GEAR_FREE = 2,
/**
@@ -2553,11 +3146,54 @@ enum {
*
* Inherently racy, since clamp state can be changed
* concurrently. Useful for testing.
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EBADCMD if subcommand is not supported @n
+ * -#BPMP_ENODEV: invalid clk_id @n
+ * -#BPMP_ENOENT: no calibration data, uninitialized @n
+ * -#BPMP_ENOTSUP: avfs config not set @n
+ * -#BPMP_EOPNOTSUPP: not in production mode @n
*/
CMD_FMON_GEAR_GET = 3,
- CMD_FMON_NUM,
+ /**
+ * @brief Return current status of FMON faults detected by FMON
+ * h/w or s/w since last invocation of this command.
+ * Clears fault status.
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EBADCMD if subcommand is not supported @n
+ * -#BPMP_EINVAL: invalid fault type @n
+ * -#BPMP_ENODEV: invalid clk_id @n
+ * -#BPMP_ENOENT: no calibration data, uninitialized @n
+ * -#BPMP_ENOTSUP: avfs config not set @n
+ * -#BPMP_EOPNOTSUPP: not in production mode @n
+ */
+ CMD_FMON_FAULT_STS_GET = 4,
};
+/**
+ * @cond DEPRECATED
+ * Kept for backward compatibility
+ */
+#define CMD_FMON_NUM 4
+
+/** @endcond DEPRECATED */
+
+/**
+ * @defgroup fmon_fault_type FMON fault type
+ * @addtogroup fmon_fault_type
+ * @{
+ */
+/** @brief All detected FMON faults (h/w or s/w) */
+#define FMON_FAULT_TYPE_ALL 0U
+/** @brief FMON faults detected by h/w */
+#define FMON_FAULT_TYPE_HW 1U
+/** @brief FMON faults detected by s/w */
+#define FMON_FAULT_TYPE_SW 2U
+
+/** @} fmon_fault_type */
+
+
struct cmd_fmon_gear_clamp_request {
int32_t unused;
int64_t rate;
@@ -2587,6 +3223,14 @@ struct cmd_fmon_gear_get_response {
int64_t rate;
} BPMP_ABI_PACKED;
+struct cmd_fmon_fault_sts_get_request {
+ uint32_t fault_type; /**< @ref fmon_fault_type */
+} BPMP_ABI_PACKED;
+
+struct cmd_fmon_fault_sts_get_response {
+ uint32_t fault_sts;
+} BPMP_ABI_PACKED;
+
/**
* @ingroup FMON
* @brief Request with #MRQ_FMON
@@ -2601,9 +3245,9 @@ struct cmd_fmon_gear_get_response {
* |CMD_FMON_GEAR_CLAMP |fmon_gear_clamp |
* |CMD_FMON_GEAR_FREE |- |
* |CMD_FMON_GEAR_GET |- |
+ * |CMD_FMON_FAULT_STS_GET |fmon_fault_sts_get |
*
*/
-
struct mrq_fmon_request {
/** @brief Sub-command and clock id concatenated to 32-bit word.
* - bits[31..24] is the sub-cmd.
@@ -2618,6 +3262,7 @@ struct mrq_fmon_request {
struct cmd_fmon_gear_free_request fmon_gear_free;
/** @private */
struct cmd_fmon_gear_get_request fmon_gear_get;
+ struct cmd_fmon_fault_sts_get_request fmon_fault_sts_get;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
@@ -2633,6 +3278,7 @@ struct mrq_fmon_request {
* |CMD_FMON_GEAR_CLAMP |- |
* |CMD_FMON_GEAR_FREE |- |
* |CMD_FMON_GEAR_GET |fmon_gear_get |
+ * |CMD_FMON_FAULT_STS_GET |fmon_fault_sts_get |
*
*/
@@ -2643,11 +3289,12 @@ struct mrq_fmon_response {
/** @private */
struct cmd_fmon_gear_free_response fmon_gear_free;
struct cmd_fmon_gear_get_response fmon_gear_get;
+ struct cmd_fmon_fault_sts_get_response fmon_fault_sts_get;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
+/** @endcond (bpmp_t194 || bpmp_t234) */
+/** @} FMON */
/**
* @ingroup MRQ_Codes
@@ -2655,7 +3302,7 @@ struct mrq_fmon_response {
* @brief Provide status information on faults reported by Error
* Collator (EC) to HSM.
*
- * * Platforms: T194 onwards
+ * * Platforms: T194
* @cond bpmp_t194
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -2664,8 +3311,10 @@ struct mrq_fmon_response {
*
* @note This MRQ ABI is under construction, and subject to change
*
+ * @endcond bpmp_t194
* @addtogroup EC
* @{
+ * @cond bpmp_t194
*/
enum {
/**
@@ -2676,7 +3325,7 @@ enum {
* -#BPMP_ENODEV if target EC is not owned by BPMP @n
* -#BPMP_EACCES if target EC power domain is turned off @n
* -#BPMP_EBADCMD if subcommand is not supported
- * @endcond
+ * @endcond DEPRECATED
*/
CMD_EC_STATUS_GET = 1, /* deprecated */
@@ -2787,7 +3436,8 @@ enum ec_registers_group {
#define EC_STATUS_FLAG_LAST_ERROR 0x0002U
/** @brief EC latent error flag */
#define EC_STATUS_FLAG_LATENT_ERROR 0x0004U
-/** @} */
+
+/** @} bpmp_ec_status_flags */
/**
* @defgroup bpmp_ec_desc_flags EC Descriptor Flags
@@ -2798,7 +3448,8 @@ enum ec_registers_group {
#define EC_DESC_FLAG_RESOLVED 0x0001U
/** @brief EC descriptor failed to retrieve id flag */
#define EC_DESC_FLAG_NO_ID 0x0002U
-/** @} */
+
+/** @} bpmp_ec_desc_flags */
/**
* |error type | fmon_clk_id values |
@@ -2810,14 +3461,18 @@ struct ec_err_fmon_desc {
uint16_t desc_flags;
/** @brief FMON monitored clock id */
uint16_t fmon_clk_id;
- /** @brief Bitmask of @ref bpmp_fmon_faults_flags */
+ /**
+ * @brief Bitmask of fault flags
+ *
+ * @ref bpmp_fmon_faults_flags
+ */
uint32_t fmon_faults;
/** @brief FMON faults access error */
int32_t fmon_access_error;
} BPMP_ABI_PACKED;
/**
- * |error type | vmon_adc_id values |
+ * | error type | vmon_adc_id values |
* |---------------------------------|---------------------------|
* |@ref EC_ERR_TYPE_VOLTAGE_MONITOR |@ref bpmp_adc_ids |
*/
@@ -2826,16 +3481,16 @@ struct ec_err_vmon_desc {
uint16_t desc_flags;
/** @brief VMON rail adc id */
uint16_t vmon_adc_id;
- /** @brief Bitmask of @ref bpmp_vmon_faults_flags */
+ /** @brief Bitmask of bpmp_vmon_faults_flags */
uint32_t vmon_faults;
/** @brief VMON faults access error */
int32_t vmon_access_error;
} BPMP_ABI_PACKED;
/**
- * |error type | reg_id values |
- * |---------------------------------|---------------------------|
- * |@ref EC_ERR_TYPE_REGISTER_PARITY |@ref bpmp_ec_registers_ids |
+ * |error type | reg_id values |
+ * |---------------------------------|-----------------------|
+ * |@ref EC_ERR_TYPE_REGISTER_PARITY | bpmp_ec_registers_ids |
*/
struct ec_err_reg_parity_desc {
/** @brief Bitmask of @ref bpmp_ec_desc_flags */
@@ -2847,10 +3502,10 @@ struct ec_err_reg_parity_desc {
} BPMP_ABI_PACKED;
/**
- * |error type | err_source_id values |
- * |--------------------------------- |--------------------------|
- * |@ref EC_ERR_TYPE_SW_CORRECTABLE | @ref bpmp_ec_ce_swd_ids |
- * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE | @ref bpmp_ec_ue_swd_ids |
+ * |error type | err_source_id values |
+ * |--------------------------------- |----------------------|
+ * |@ref EC_ERR_TYPE_SW_CORRECTABLE | bpmp_ec_ce_swd_ids |
+ * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE | bpmp_ec_ue_swd_ids |
*/
struct ec_err_sw_error_desc {
/** @brief Bitmask of @ref bpmp_ec_desc_flags */
@@ -2862,15 +3517,15 @@ struct ec_err_sw_error_desc {
} BPMP_ABI_PACKED;
/**
- * |error type | err_source_id values |
- * |----------------------------------------|---------------------------|
- * |@ref EC_ERR_TYPE_PARITY_INTERNAL |@ref bpmp_ec_ipath_ids |
- * |@ref EC_ERR_TYPE_ECC_SEC_INTERNAL |@ref bpmp_ec_ipath_ids |
- * |@ref EC_ERR_TYPE_ECC_DED_INTERNAL |@ref bpmp_ec_ipath_ids |
- * |@ref EC_ERR_TYPE_COMPARATOR |@ref bpmp_ec_comparator_ids|
- * |@ref EC_ERR_TYPE_PARITY_SRAM |@ref bpmp_clock_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
+ * |error type | err_source_id values |
+ * |----------------------------------------|------------------------|
+ * |@ref EC_ERR_TYPE_PARITY_INTERNAL | bpmp_ec_ipath_ids |
+ * |@ref EC_ERR_TYPE_ECC_SEC_INTERNAL | bpmp_ec_ipath_ids |
+ * |@ref EC_ERR_TYPE_ECC_DED_INTERNAL | bpmp_ec_ipath_ids |
+ * |@ref EC_ERR_TYPE_COMPARATOR | bpmp_ec_comparator_ids|
+ * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE | bpmp_ec_misc_hwd_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE | bpmp_ec_misc_hwd_ids |
+ * |@ref EC_ERR_TYPE_PARITY_SRAM | bpmp_clock_ids |
*/
struct ec_err_simple_desc {
/** @brief Bitmask of @ref bpmp_ec_desc_flags */
@@ -2917,7 +3572,7 @@ struct cmd_ec_status_get_response {
/** @brief EC error descriptors */
union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
} BPMP_ABI_PACKED;
-/** @endcond */
+/** @endcond DEPRECATED */
struct cmd_ec_status_ex_get_response {
/** @brief Target EC id (the same id received with request). */
@@ -2955,7 +3610,7 @@ struct cmd_ec_status_ex_get_response {
* |sub-command |payload |
* |----------------------------|-----------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
- * @endcond
+ * @endcond DEPRECATED
*
* |sub-command |payload |
* |----------------------------|-----------------------|
@@ -2983,7 +3638,7 @@ struct mrq_ec_request {
* |sub-command |payload |
* |----------------------------|------------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
- * @endcond
+ * @endcond DEPRECATED
*
* |sub-command |payload |
* |----------------------------|------------------------|
@@ -2997,13 +3652,264 @@ struct mrq_ec_response {
* @cond DEPRECATED
*/
struct cmd_ec_status_get_response ec_status_get;
- /** @endcond */
+ /** @endcond DEPRECATED */
struct cmd_ec_status_ex_get_response ec_status_ex_get;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} */
-/** @endcond */
+/** @endcond bpmp_t194 */
+/** @} EC */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_TELEMETRY
+ * @brief Get address of memory buffer refreshed with recently sampled
+ * telemetry data
+ *
+ * * Platforms: TH500 onwards
+ * @cond bpmp_th500
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_telemetry_response
+ * @addtogroup Telemetry
+ * @{
+ */
+
+/**
+ * @brief Response to #MRQ_TELEMETRY
+ *
+ * mrq_response::err is
+ * * 0: Telemetry data is available at returned address
+ * * -#BPMP_EACCES: MRQ master is not allowed to request buffer refresh
+ * * -#BPMP_ENAVAIL: Telemetry buffer cannot be refreshed via this MRQ channel
+ * * -#BPMP_ENOTSUP: Telemetry buffer is not supported by BPMP-FW
+ * * -#BPMP_ENODEV: Telemetry mrq is not supported by BPMP-FW
+ */
+struct mrq_telemetry_response {
+ /** @brief Physical address of telemetry data buffer */
+ uint64_t data_buf_addr; /**< see @ref bpmp_telemetry_layout */
+} BPMP_ABI_PACKED;
+
+/** @} Telemetry */
+/** @endcond bpmp_th500 */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PWR_LIMIT
+ * @brief Control power limits.
+ *
+ * * Platforms: TH500 onwards
+ * @cond bpmp_th500
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pwr_limit_request
+ * * Response Payload: @ref mrq_pwr_limit_response
+ *
+ * @addtogroup Pwrlimit
+ * @{
+ */
+enum mrq_pwr_limit_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * command
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_PWR_LIMIT_QUERY_ABI = 0,
+
+ /**
+ * @brief Set power limit
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: Pwr limit mrq is not supported by BPMP-FW
+ * * -#BPMP_ENAVAIL: Invalid request parameters
+ * * -#BPMP_EACCES: Request is not accepted
+ */
+ CMD_PWR_LIMIT_SET = 1,
+
+ /**
+ * @brief Get power limit setting
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: Pwr limit mrq is not supported by BPMP-FW
+ * * -#BPMP_ENAVAIL: Invalid request parameters
+ */
+ CMD_PWR_LIMIT_GET = 2,
+
+ /**
+ * @brief Get current power cap
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: Pwr limit mrq is not supported by BPMP-FW
+ * * -#BPMP_ENAVAIL: Invalid request parameters
+ */
+ CMD_PWR_LIMIT_CURR_CAP = 3,
+};
+
+/**
+ * @defgroup bpmp_pwr_limit_type PWR_LIMIT TYPEs
+ * @{
+ */
+/** @brief Limit value specifies traget cap */
+#define PWR_LIMIT_TYPE_TARGET_CAP 0U
+/** @brief Limit value specifies maximum possible target cap */
+#define PWR_LIMIT_TYPE_BOUND_MAX 1U
+/** @brief Limit value specifies minimum possible target cap */
+#define PWR_LIMIT_TYPE_BOUND_MIN 2U
+/** @brief Number of limit types supported by mrq interface */
+#define PWR_LIMIT_TYPE_NUM 3U
+
+/** @} bpmp_pwr_limit_type */
+
+/**
+ * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_QUERY_ABI
+ */
+struct cmd_pwr_limit_query_abi_request {
+ uint32_t cmd_code; /**< @ref mrq_pwr_limit_cmd */
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_SET
+ *
+ * Set specified limit of specified type from specified source. The success of
+ * the request means that specified value is accepted as input to arbitration
+ * with other sources settings for the same limit of the same type. Zero limit
+ * is ignored by the arbitration (i.e., indicates "no limit set").
+ */
+struct cmd_pwr_limit_set_request {
+ uint32_t limit_id; /**< @ref bpmp_pwr_limit_id */
+ uint32_t limit_src; /**< @ref bpmp_pwr_limit_src */
+ uint32_t limit_type; /**< @ref bpmp_pwr_limit_type */
+ uint32_t limit_setting;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_GET
+ *
+ * Get previously set from specified source specified limit value of specified
+ * type.
+ */
+struct cmd_pwr_limit_get_request {
+ uint32_t limit_id; /**< @ref bpmp_pwr_limit_id */
+ uint32_t limit_src; /**< @ref bpmp_pwr_limit_src */
+ uint32_t limit_type; /**< @ref bpmp_pwr_limit_type */
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_GET
+ */
+struct cmd_pwr_limit_get_response {
+ uint32_t limit_setting;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_CURR_CAP
+ *
+ * For specified limit get current power cap aggregated from all sources.
+ */
+struct cmd_pwr_limit_curr_cap_request {
+ uint32_t limit_id; /**< @ref bpmp_pwr_limit_id */
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_CURR_CAP
+ */
+struct cmd_pwr_limit_curr_cap_response {
+ uint32_t curr_cap;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request with #MRQ_PWR_LIMIT
+ *
+ * |sub-command |payload |
+ * |----------------------------|---------------------------------|
+ * |CMD_PWR_LIMIT_QUERY_ABI | cmd_pwr_limit_query_abi_request |
+ * |CMD_PWR_LIMIT_SET | cmd_pwr_limit_set_request |
+ * |CMD_PWR_LIMIT_GET | cmd_pwr_limit_get_request |
+ * |CMD_PWR_LIMIT_CURR_CAP | cmd_pwr_limit_curr_cap_request |
+ */
+struct mrq_pwr_limit_request {
+ uint32_t cmd;
+ union {
+ struct cmd_pwr_limit_query_abi_request pwr_limit_query_abi_req;
+ struct cmd_pwr_limit_set_request pwr_limit_set_req;
+ struct cmd_pwr_limit_get_request pwr_limit_get_req;
+ struct cmd_pwr_limit_curr_cap_request pwr_limit_curr_cap_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response to MRQ_PWR_LIMIT
+ *
+ * |sub-command |payload |
+ * |----------------------------|---------------------------------|
+ * |CMD_PWR_LIMIT_QUERY_ABI | - |
+ * |CMD_PWR_LIMIT_SET | - |
+ * |CMD_PWR_LIMIT_GET | cmd_pwr_limit_get_response |
+ * |CMD_PWR_LIMIT_CURR_CAP | cmd_pwr_limit_curr_cap_response |
+ */
+struct mrq_pwr_limit_response {
+ union {
+ struct cmd_pwr_limit_get_response pwr_limit_get_rsp;
+ struct cmd_pwr_limit_curr_cap_response pwr_limit_curr_cap_rsp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} PwrLimit */
+/** @endcond bpmp_th500 */
+
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_GEARS
+ * @brief Get thresholds for NDIV offset switching
+ *
+ * * Platforms: TH500 onwards
+ * @cond bpmp_th500
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_gears_response
+ * @addtogroup Gears
+ * @{
+ */
+
+/**
+ * @brief Response to #MRQ_GEARS
+ *
+ * Used by the sender of an #MRQ_GEARS message to request thresholds
+ * for NDIV offset switching.
+ *
+ * The mrq_gears_response::ncpu array defines four thresholds in units
+ * of number of online CPUS to be used for choosing between five different
+ * NDIV offset settings for CCPLEX cluster NAFLLs
+ *
+ * 1. If number of online CPUs < ncpu[0] use offset0
+ * 2. If number of online CPUs < ncpu[1] use offset1
+ * 3. If number of online CPUs < ncpu[2] use offset2
+ * 4. If number of online CPUs < ncpu[3] use offset3
+ * 5. If number of online CPUs >= ncpu[3] disable offsetting
+ *
+ * For TH500 mrq_gears_response::ncpu array has four valid entries.
+ *
+ * mrq_response::err is
+ * * 0: gears defined and response data valid
+ * * -#BPMP_ENODEV: MRQ is not supported by BPMP-FW
+ * * -#BPMP_EACCES: Operation not permitted for the MRQ master
+ * * -#BPMP_ENAVAIL: NDIV offsetting is disabled
+ */
+struct mrq_gears_response {
+ /** @brief number of online CPUs for each gear */
+ uint32_t ncpu[16];
+} BPMP_ABI_PACKED;
+
+/** @} Gears */
+/** @endcond bpmp_th500 */
/**
* @addtogroup Error_Codes
@@ -3047,12 +3953,18 @@ struct mrq_ec_response {
#define BPMP_ENOSYS 38
/** @brief Invalid slot */
#define BPMP_EBADSLT 57
+/** @brief Invalid message */
+#define BPMP_EBADMSG 77
+/** @brief Operation not supported */
+#define BPMP_EOPNOTSUPP 95
+/** @brief Targeted resource not available */
+#define BPMP_ENAVAIL 119
/** @brief Not supported */
#define BPMP_ENOTSUP 134
/** @brief No such device or address */
#define BPMP_ENXIO 140
-/** @} */
+/** @} Error_Codes */
#if defined(BPMP_ABI_CHECKS)
#include "bpmp_abi_checks.h"
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
index f2604e99af09..f5e4ac5b8cce 100644
--- a/include/soc/tegra/bpmp.h
+++ b/include/soc/tegra/bpmp.h
@@ -6,6 +6,7 @@
#ifndef __SOC_TEGRA_BPMP_H
#define __SOC_TEGRA_BPMP_H
+#include <linux/iosys-map.h>
#include <linux/mailbox_client.h>
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
@@ -36,10 +37,22 @@ struct tegra_bpmp_mb_data {
u8 data[MSG_DATA_MIN_SZ];
} __packed;
+#define tegra_bpmp_mb_read(dst, mb, size) \
+ iosys_map_memcpy_from(dst, mb, offsetof(struct tegra_bpmp_mb_data, data), size)
+
+#define tegra_bpmp_mb_write(mb, src, size) \
+ iosys_map_memcpy_to(mb, offsetof(struct tegra_bpmp_mb_data, data), src, size)
+
+#define tegra_bpmp_mb_read_field(mb, field) \
+ iosys_map_rd_field(mb, 0, struct tegra_bpmp_mb_data, field)
+
+#define tegra_bpmp_mb_write_field(mb, field, value) \
+ iosys_map_wr_field(mb, 0, struct tegra_bpmp_mb_data, field, value)
+
struct tegra_bpmp_channel {
struct tegra_bpmp *bpmp;
- struct tegra_bpmp_mb_data *ib;
- struct tegra_bpmp_mb_data *ob;
+ struct iosys_map ib;
+ struct iosys_map ob;
struct completion completion;
struct tegra_ivc *ivc;
unsigned int index;
@@ -89,8 +102,12 @@ struct tegra_bpmp {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_mirror;
#endif
+
+ bool suspended;
};
+#define TEGRA_BPMP_MESSAGE_RESET BIT(0)
+
struct tegra_bpmp_message {
unsigned int mrq;
@@ -104,6 +121,8 @@ struct tegra_bpmp_message {
size_t size;
int ret;
} rx;
+
+ unsigned long flags;
};
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
diff --git a/include/soc/tegra/common.h b/include/soc/tegra/common.h
index 98027a76ce3d..8ec1ac07fc85 100644
--- a/include/soc/tegra/common.h
+++ b/include/soc/tegra/common.h
@@ -6,6 +6,52 @@
#ifndef __SOC_TEGRA_COMMON_H__
#define __SOC_TEGRA_COMMON_H__
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * Tegra SoC core device OPP table configuration
+ *
+ * @init_state: pre-initialize OPP state of a device
+ */
+struct tegra_core_opp_params {
+ bool init_state;
+};
+
+#ifdef CONFIG_ARCH_TEGRA
bool soc_is_tegra(void);
+int devm_tegra_core_dev_init_opp_table(struct device *dev,
+ struct tegra_core_opp_params *params);
+#else
+static inline bool soc_is_tegra(void)
+{
+ return false;
+}
+
+static inline int
+devm_tegra_core_dev_init_opp_table(struct device *dev,
+ struct tegra_core_opp_params *params)
+{
+ return -ENODEV;
+}
+#endif
+
+static inline int
+devm_tegra_core_dev_init_opp_table_common(struct device *dev)
+{
+ struct tegra_core_opp_params opp_params = {};
+ int err;
+
+ opp_params.init_state = true;
+
+ err = devm_tegra_core_dev_init_opp_table(dev, &opp_params);
+ if (err != -ENODEV)
+ return err;
+
+ return 0;
+}
+
#endif /* __SOC_TEGRA_COMMON_H__ */
diff --git a/include/soc/tegra/emc.h b/include/soc/tegra/emc.h
deleted file mode 100644
index 05199a97ccf4..000000000000
--- a/include/soc/tegra/emc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014 NVIDIA Corporation. All rights reserved.
- */
-
-#ifndef __SOC_TEGRA_EMC_H__
-#define __SOC_TEGRA_EMC_H__
-
-struct tegra_emc;
-
-int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
- unsigned long rate);
-void tegra_emc_complete_timing_change(struct tegra_emc *emc,
- unsigned long rate);
-
-#endif /* __SOC_TEGRA_EMC_H__ */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 1097feca41ed..8f421b9f7585 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2023, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __SOC_TEGRA_FUSE_H__
#define __SOC_TEGRA_FUSE_H__
+#include <linux/types.h>
+
#define TEGRA20 0x20
#define TEGRA30 0x30
#define TEGRA114 0x35
@@ -14,6 +16,9 @@
#define TEGRA210 0x21
#define TEGRA186 0x18
#define TEGRA194 0x19
+#define TEGRA234 0x23
+#define TEGRA241 0x24
+#define TEGRA264 0x26
#define TEGRA_FUSE_SKU_CALIB_0 0xf0
#define TEGRA30_FUSE_SATA_CALIB 0x124
@@ -21,9 +26,6 @@
#ifndef __ASSEMBLY__
-u32 tegra_read_chipid(void);
-u8 tegra_get_chip_id(void);
-
enum tegra_revision {
TEGRA_REVISION_UNKNOWN = 0,
TEGRA_REVISION_A01,
@@ -34,6 +36,20 @@ enum tegra_revision {
TEGRA_REVISION_MAX,
};
+enum tegra_platform {
+ TEGRA_PLATFORM_SILICON = 0,
+ TEGRA_PLATFORM_QT,
+ TEGRA_PLATFORM_SYSTEM_FPGA,
+ TEGRA_PLATFORM_UNIT_FPGA,
+ TEGRA_PLATFORM_ASIM_QT,
+ TEGRA_PLATFORM_ASIM_LINSIM,
+ TEGRA_PLATFORM_DSIM_ASIM_LINSIM,
+ TEGRA_PLATFORM_VERIFICATION_SIMULATION,
+ TEGRA_PLATFORM_VDK,
+ TEGRA_PLATFORM_VSP,
+ TEGRA_PLATFORM_MAX,
+};
+
struct tegra_sku_info {
int sku_id;
int cpu_process_id;
@@ -47,13 +63,62 @@ struct tegra_sku_info {
int gpu_speedo_id;
int gpu_speedo_value;
enum tegra_revision revision;
+ enum tegra_platform platform;
};
+#ifdef CONFIG_ARCH_TEGRA
+extern struct tegra_sku_info tegra_sku_info;
u32 tegra_read_straps(void);
u32 tegra_read_ram_code(void);
int tegra_fuse_readl(unsigned long offset, u32 *value);
+u32 tegra_read_chipid(void);
+u8 tegra_get_chip_id(void);
+u8 tegra_get_platform(void);
+bool tegra_is_silicon(void);
+int tegra194_miscreg_mask_serror(void);
+#else
+static struct tegra_sku_info tegra_sku_info __maybe_unused;
-extern struct tegra_sku_info tegra_sku_info;
+static inline u32 tegra_read_straps(void)
+{
+ return 0;
+}
+
+static inline u32 tegra_read_ram_code(void)
+{
+ return 0;
+}
+
+static inline int tegra_fuse_readl(unsigned long offset, u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline u32 tegra_read_chipid(void)
+{
+ return 0;
+}
+
+static inline u8 tegra_get_chip_id(void)
+{
+ return 0;
+}
+
+static inline u8 tegra_get_platform(void)
+{
+ return 0;
+}
+
+static inline bool tegra_is_silicon(void)
+{
+ return false;
+}
+
+static inline int tegra194_miscreg_mask_serror(void)
+{
+ return false;
+}
+#endif
struct device *tegra_soc_device_register(void);
diff --git a/include/soc/tegra/irq.h b/include/soc/tegra/irq.h
index 8eb11a7109e4..94539551c8c1 100644
--- a/include/soc/tegra/irq.h
+++ b/include/soc/tegra/irq.h
@@ -6,8 +6,15 @@
#ifndef __SOC_TEGRA_IRQ_H
#define __SOC_TEGRA_IRQ_H
-#if defined(CONFIG_ARM)
+#include <linux/types.h>
+
+#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA)
bool tegra_pending_sgi(void);
+#else
+static inline bool tegra_pending_sgi(void)
+{
+ return false;
+}
#endif
#endif /* __SOC_TEGRA_IRQ_H */
diff --git a/include/soc/tegra/ivc.h b/include/soc/tegra/ivc.h
index 4aeb77cc22c5..be45d5f5adea 100644
--- a/include/soc/tegra/ivc.h
+++ b/include/soc/tegra/ivc.h
@@ -4,9 +4,11 @@
*/
#ifndef __TEGRA_IVC_H
+#define __TEGRA_IVC_H
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/iosys-map.h>
#include <linux/types.h>
struct tegra_ivc_header;
@@ -15,7 +17,7 @@ struct tegra_ivc {
struct device *peer;
struct {
- struct tegra_ivc_header *channel;
+ struct iosys_map map;
unsigned int position;
dma_addr_t phys;
} rx, tx;
@@ -36,7 +38,7 @@ struct tegra_ivc {
*
* Returns a pointer to the frame, or an error encoded pointer.
*/
-void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc);
+int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map);
/**
* tegra_ivc_read_advance - Advance the read queue
@@ -56,7 +58,7 @@ int tegra_ivc_read_advance(struct tegra_ivc *ivc);
*
* Returns a pointer to the frame, or an error encoded pointer.
*/
-void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc);
+int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map);
/**
* tegra_ivc_write_advance - Advance the write queue
@@ -91,8 +93,8 @@ void tegra_ivc_reset(struct tegra_ivc *ivc);
size_t tegra_ivc_align(size_t size);
unsigned tegra_ivc_total_queue_size(unsigned queue_size);
-int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
- dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, const struct iosys_map *rx,
+ dma_addr_t rx_phys, const struct iosys_map *tx, dma_addr_t tx_phys,
unsigned int num_frames, size_t frame_size,
void (*notify)(struct tegra_ivc *ivc, void *data),
void *data);
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 1238e35653d1..6ee4c59db620 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -6,42 +6,63 @@
#ifndef __SOC_TEGRA_MC_H__
#define __SOC_TEGRA_MC_H__
+#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/irq.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
+#include <linux/tegra-icc.h>
struct clk;
struct device;
struct page;
-struct tegra_smmu_enable {
- unsigned int reg;
- unsigned int bit;
-};
-
struct tegra_mc_timing {
unsigned long rate;
u32 *emem_data;
};
-/* latency allowance */
-struct tegra_mc_la {
- unsigned int reg;
- unsigned int shift;
- unsigned int mask;
- unsigned int def;
-};
-
struct tegra_mc_client {
unsigned int id;
+ unsigned int bpmp_id;
+ enum tegra_icc_client_type type;
const char *name;
- unsigned int swgroup;
+ /*
+ * For Tegra210 and earlier, this is the SWGROUP ID used for IOVA translations in the
+ * Tegra SMMU, whereas on Tegra186 and later this is the ID used to override the ARM SMMU
+ * stream ID used for IOVA translations for the given memory client.
+ */
+ union {
+ unsigned int swgroup;
+ unsigned int sid;
+ };
unsigned int fifo_size;
- struct tegra_smmu_enable smmu;
- struct tegra_mc_la la;
+ struct {
+ /* Tegra SMMU enable (Tegra210 and earlier) */
+ struct {
+ unsigned int reg;
+ unsigned int bit;
+ } smmu;
+
+ /* latency allowance */
+ struct {
+ unsigned int reg;
+ unsigned int shift;
+ unsigned int mask;
+ unsigned int def;
+ } la;
+
+ /* stream ID overrides (Tegra186 and later) */
+ struct {
+ unsigned int override;
+ unsigned int security;
+ } sid;
+ } regs;
};
struct tegra_smmu_swgroup {
@@ -75,7 +96,6 @@ struct tegra_smmu_soc {
struct tegra_mc;
struct tegra_smmu;
-struct gart_device;
#ifdef CONFIG_TEGRA_IOMMU_SMMU
struct tegra_smmu *tegra_smmu_probe(struct device *dev,
@@ -95,28 +115,6 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu)
}
#endif
-#ifdef CONFIG_TEGRA_IOMMU_GART
-struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc);
-int tegra_gart_suspend(struct gart_device *gart);
-int tegra_gart_resume(struct gart_device *gart);
-#else
-static inline struct gart_device *
-tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline int tegra_gart_suspend(struct gart_device *gart)
-{
- return -ENODEV;
-}
-
-static inline int tegra_gart_resume(struct gart_device *gart)
-{
- return -ENODEV;
-}
-#endif
-
struct tegra_mc_reset {
const char *name;
unsigned long id;
@@ -141,6 +139,35 @@ struct tegra_mc_reset_ops {
const struct tegra_mc_reset *rst);
};
+#define TEGRA_MC_ICC_TAG_DEFAULT 0
+#define TEGRA_MC_ICC_TAG_ISO BIT(0)
+
+struct tegra_mc_icc_ops {
+ int (*set)(struct icc_node *src, struct icc_node *dst);
+ int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+ struct icc_node* (*xlate)(const struct of_phandle_args *spec, void *data);
+ struct icc_node_data *(*xlate_extended)(const struct of_phandle_args *spec,
+ void *data);
+ int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
+};
+
+struct icc_node *tegra_mc_icc_xlate(const struct of_phandle_args *spec,
+ void *data);
+extern const struct tegra_mc_icc_ops tegra_mc_icc_ops;
+
+struct tegra_mc_ops {
+ /*
+ * @probe: Callback to set up SoC-specific bits of the memory controller. This is called
+ * after basic, common set up that is done by the SoC-agnostic bits.
+ */
+ int (*probe)(struct tegra_mc *mc);
+ void (*remove)(struct tegra_mc *mc);
+ int (*resume)(struct tegra_mc *mc);
+ irqreturn_t (*handle_irq)(int irq, void *data);
+ int (*probe_device)(struct tegra_mc *mc, struct device *dev);
+};
+
struct tegra_mc_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
@@ -151,22 +178,33 @@ struct tegra_mc_soc {
unsigned int num_address_bits;
unsigned int atom_size;
- u8 client_id_mask;
+ unsigned int num_carveouts;
+
+ u16 client_id_mask;
+ u8 num_channels;
const struct tegra_smmu_soc *smmu;
u32 intmask;
+ u32 ch_intmask;
+ u32 global_intstatus_channel_shift;
+ bool has_addr_hi_reg;
const struct tegra_mc_reset_ops *reset_ops;
const struct tegra_mc_reset *resets;
unsigned int num_resets;
+
+ const struct tegra_mc_icc_ops *icc_ops;
+ const struct tegra_mc_ops *ops;
};
struct tegra_mc {
+ struct tegra_bpmp *bpmp;
struct device *dev;
struct tegra_smmu *smmu;
- struct gart_device *gart;
void __iomem *regs;
+ void __iomem *bcast_ch_regs;
+ void __iomem **ch_regs;
struct clk *clk;
int irq;
@@ -175,13 +213,47 @@ struct tegra_mc {
struct tegra_mc_timing *timings;
unsigned int num_timings;
+ unsigned int num_channels;
+ bool bwmgr_mrq_supported;
struct reset_controller_dev reset;
+ struct icc_provider provider;
+
spinlock_t lock;
+
+ struct {
+ struct dentry *root;
+ } debugfs;
};
int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate);
unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc);
+#ifdef CONFIG_TEGRA_MC
+struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev);
+int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev);
+int tegra_mc_get_carveout_info(struct tegra_mc *mc, unsigned int id,
+ phys_addr_t *base, u64 *size);
+#else
+static inline struct tegra_mc *
+devm_tegra_memory_controller_get(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int
+tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int
+tegra_mc_get_carveout_info(struct tegra_mc *mc, unsigned int id,
+ phys_addr_t *base, u64 *size)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* __SOC_TEGRA_MC_H__ */
diff --git a/include/soc/tegra/pm.h b/include/soc/tegra/pm.h
index 08477d7bfab9..ce4d0b1bd0d6 100644
--- a/include/soc/tegra/pm.h
+++ b/include/soc/tegra/pm.h
@@ -14,9 +14,10 @@ enum tegra_suspend_mode {
TEGRA_SUSPEND_LP1, /* CPU voltage off, DRAM self-refresh */
TEGRA_SUSPEND_LP0, /* CPU + core voltage off, DRAM self-refresh */
TEGRA_MAX_SUSPEND_MODE,
+ TEGRA_SUSPEND_NOT_READY,
};
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA)
enum tegra_suspend_mode
tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode);
@@ -28,6 +29,7 @@ void tegra_pm_clear_cpu_in_lp2(void);
void tegra_pm_set_cpu_in_lp2(void);
int tegra_pm_enter_lp2(void);
int tegra_pm_park_secondary_cpu(unsigned long cpu);
+void tegra_pm_init_suspend(void);
#else
static inline enum tegra_suspend_mode
tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode)
@@ -61,6 +63,10 @@ static inline int tegra_pm_park_secondary_cpu(unsigned long cpu)
{
return -ENOTSUPP;
}
+
+static inline void tegra_pm_init_suspend(void)
+{
+}
#endif /* CONFIG_PM_SLEEP */
#endif /* __SOC_TEGRA_PM_H__ */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 361cb64246f7..c545875d0ff1 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -118,9 +118,9 @@ enum tegra_io_pad {
TEGRA_IO_PAD_PEX_CLK_2,
TEGRA_IO_PAD_PEX_CNTRL,
TEGRA_IO_PAD_PEX_CTL2,
- TEGRA_IO_PAD_PEX_L0_RST_N,
- TEGRA_IO_PAD_PEX_L1_RST_N,
- TEGRA_IO_PAD_PEX_L5_RST_N,
+ TEGRA_IO_PAD_PEX_L0_RST,
+ TEGRA_IO_PAD_PEX_L1_RST,
+ TEGRA_IO_PAD_PEX_L5_RST,
TEGRA_IO_PAD_PWR_CTL,
TEGRA_IO_PAD_SDMMC1,
TEGRA_IO_PAD_SDMMC1_HV,
@@ -148,10 +148,6 @@ enum tegra_io_pad {
TEGRA_IO_PAD_AO_HV,
};
-/* deprecated, use TEGRA_IO_PAD_{HDMI,LVDS} instead */
-#define TEGRA_IO_RAIL_HDMI TEGRA_IO_PAD_HDMI
-#define TEGRA_IO_RAIL_LVDS TEGRA_IO_PAD_LVDS
-
#ifdef CONFIG_SOC_TEGRA_PMC
int tegra_powergate_power_on(unsigned int id);
int tegra_powergate_power_off(unsigned int id);
@@ -164,13 +160,11 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
int tegra_io_pad_power_enable(enum tegra_io_pad id);
int tegra_io_pad_power_disable(enum tegra_io_pad id);
-/* deprecated, use tegra_io_pad_power_{enable,disable}() instead */
-int tegra_io_rail_power_on(unsigned int id);
-int tegra_io_rail_power_off(unsigned int id);
-
void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
+bool tegra_pmc_core_domain_state_synced(void);
+
#else
static inline int tegra_powergate_power_on(unsigned int id)
{
@@ -209,22 +203,17 @@ static inline int tegra_io_pad_get_voltage(enum tegra_io_pad id)
return -ENOSYS;
}
-static inline int tegra_io_rail_power_on(unsigned int id)
-{
- return -ENOSYS;
-}
-
-static inline int tegra_io_rail_power_off(unsigned int id)
+static inline void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
{
- return -ENOSYS;
}
-static inline void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
+static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
{
}
-static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
+static inline bool tegra_pmc_core_domain_state_synced(void)
{
+ return false;
}
#endif /* CONFIG_SOC_TEGRA_PMC */
diff --git a/include/soc/tegra/tegra-cbb.h b/include/soc/tegra/tegra-cbb.h
new file mode 100644
index 000000000000..e864c2ebe794
--- /dev/null
+++ b/include/soc/tegra/tegra-cbb.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ */
+
+#ifndef TEGRA_CBB_H
+#define TEGRA_CBB_H
+
+#include <linux/list.h>
+
+struct tegra_cbb_error {
+ const char *code;
+ const char *source;
+ const char *desc;
+};
+
+struct tegra_cbb {
+ struct device *dev;
+ const struct tegra_cbb_ops *ops;
+ struct list_head node;
+};
+
+struct tegra_cbb_ops {
+ int (*debugfs_show)(struct tegra_cbb *cbb, struct seq_file *s, void *v);
+ int (*interrupt_enable)(struct tegra_cbb *cbb);
+ void (*error_enable)(struct tegra_cbb *cbb);
+ void (*fault_enable)(struct tegra_cbb *cbb);
+ void (*stall_enable)(struct tegra_cbb *cbb);
+ void (*error_clear)(struct tegra_cbb *cbb);
+ u32 (*get_status)(struct tegra_cbb *cbb);
+};
+
+int tegra_cbb_get_irq(struct platform_device *pdev, unsigned int *nonsec_irq,
+ unsigned int *sec_irq);
+__printf(2, 3)
+void tegra_cbb_print_err(struct seq_file *file, const char *fmt, ...);
+
+void tegra_cbb_print_cache(struct seq_file *file, u32 cache);
+void tegra_cbb_print_prot(struct seq_file *file, u32 prot);
+int tegra_cbb_register(struct tegra_cbb *cbb);
+
+void tegra_cbb_fault_enable(struct tegra_cbb *cbb);
+void tegra_cbb_stall_enable(struct tegra_cbb *cbb);
+void tegra_cbb_error_clear(struct tegra_cbb *cbb);
+u32 tegra_cbb_get_status(struct tegra_cbb *cbb);
+
+#endif /* TEGRA_CBB_H */
diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h
index 9792d25fa369..2fc641cb1982 100644
--- a/include/sound/ac97/codec.h
+++ b/include/sound/ac97/codec.h
@@ -63,7 +63,7 @@ struct ac97_codec_device {
struct ac97_codec_driver {
struct device_driver driver;
int (*probe)(struct ac97_codec_device *);
- int (*remove)(struct ac97_codec_device *);
+ void (*remove)(struct ac97_codec_device *dev);
void (*shutdown)(struct ac97_codec_device *);
const struct ac97_id *id_table;
};
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 49200ec26dc4..4bd3be3a3192 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -335,6 +335,9 @@ static inline int snd_ac97_update_power(struct snd_ac97 *ac97, int reg,
#ifdef CONFIG_PM
void snd_ac97_suspend(struct snd_ac97 *ac97);
void snd_ac97_resume(struct snd_ac97 *ac97);
+#else
+static inline void snd_ac97_suspend(struct snd_ac97 *ac97) {}
+static inline void snd_ac97_resume(struct snd_ac97 *ac97) {}
#endif
int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
unsigned int id_mask);
@@ -407,7 +410,7 @@ int snd_ac97_pcm_close(struct ac97_pcm *pcm);
int snd_ac97_pcm_double_rate_rules(struct snd_pcm_runtime *runtime);
/* ad hoc AC97 device driver access */
-extern struct bus_type ac97_bus_type;
+extern const struct bus_type ac97_bus_type;
/* AC97 platform_data adding function */
static inline void snd_ac97_dev_add_pdata(struct snd_ac97 *ac97, void *data)
diff --git a/include/sound/acp63_chip_offset_byte.h b/include/sound/acp63_chip_offset_byte.h
new file mode 100644
index 000000000000..d746c1cb0163
--- /dev/null
+++ b/include/sound/acp63_chip_offset_byte.h
@@ -0,0 +1,495 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ACP 6.3 Register Documentation
+ *
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#ifndef _acp_ip_OFFSET_HEADER
+#define _acp_ip_OFFSET_HEADER
+
+/* Registers from ACP_DMA block */
+#define ACP_DMA_CNTL_0 0x0000000
+#define ACP_DMA_CNTL_1 0x0000004
+#define ACP_DMA_CNTL_2 0x0000008
+#define ACP_DMA_CNTL_3 0x000000C
+#define ACP_DMA_CNTL_4 0x0000010
+#define ACP_DMA_CNTL_5 0x0000014
+#define ACP_DMA_CNTL_6 0x0000018
+#define ACP_DMA_CNTL_7 0x000001C
+#define ACP_DMA_DSCR_STRT_IDX_0 0x0000020
+#define ACP_DMA_DSCR_STRT_IDX_1 0x0000024
+#define ACP_DMA_DSCR_STRT_IDX_2 0x0000028
+#define ACP_DMA_DSCR_STRT_IDX_3 0x000002C
+#define ACP_DMA_DSCR_STRT_IDX_4 0x0000030
+#define ACP_DMA_DSCR_STRT_IDX_5 0x0000034
+#define ACP_DMA_DSCR_STRT_IDX_6 0x0000038
+#define ACP_DMA_DSCR_STRT_IDX_7 0x000003C
+#define ACP_DMA_DSCR_CNT_0 0x0000040
+#define ACP_DMA_DSCR_CNT_1 0x0000044
+#define ACP_DMA_DSCR_CNT_2 0x0000048
+#define ACP_DMA_DSCR_CNT_3 0x000004C
+#define ACP_DMA_DSCR_CNT_4 0x0000050
+#define ACP_DMA_DSCR_CNT_5 0x0000054
+#define ACP_DMA_DSCR_CNT_6 0x0000058
+#define ACP_DMA_DSCR_CNT_7 0x000005C
+#define ACP_DMA_PRIO_0 0x0000060
+#define ACP_DMA_PRIO_1 0x0000064
+#define ACP_DMA_PRIO_2 0x0000068
+#define ACP_DMA_PRIO_3 0x000006C
+#define ACP_DMA_PRIO_4 0x0000070
+#define ACP_DMA_PRIO_5 0x0000074
+#define ACP_DMA_PRIO_6 0x0000078
+#define ACP_DMA_PRIO_7 0x000007C
+#define ACP_DMA_CUR_DSCR_0 0x0000080
+#define ACP_DMA_CUR_DSCR_1 0x0000084
+#define ACP_DMA_CUR_DSCR_2 0x0000088
+#define ACP_DMA_CUR_DSCR_3 0x000008C
+#define ACP_DMA_CUR_DSCR_4 0x0000090
+#define ACP_DMA_CUR_DSCR_5 0x0000094
+#define ACP_DMA_CUR_DSCR_6 0x0000098
+#define ACP_DMA_CUR_DSCR_7 0x000009C
+#define ACP_DMA_CUR_TRANS_CNT_0 0x00000A0
+#define ACP_DMA_CUR_TRANS_CNT_1 0x00000A4
+#define ACP_DMA_CUR_TRANS_CNT_2 0x00000A8
+#define ACP_DMA_CUR_TRANS_CNT_3 0x00000AC
+#define ACP_DMA_CUR_TRANS_CNT_4 0x00000B0
+#define ACP_DMA_CUR_TRANS_CNT_5 0x00000B4
+#define ACP_DMA_CUR_TRANS_CNT_6 0x00000B8
+#define ACP_DMA_CUR_TRANS_CNT_7 0x00000BC
+#define ACP_DMA_ERR_STS_0 0x00000C0
+#define ACP_DMA_ERR_STS_1 0x00000C4
+#define ACP_DMA_ERR_STS_2 0x00000C8
+#define ACP_DMA_ERR_STS_3 0x00000CC
+#define ACP_DMA_ERR_STS_4 0x00000D0
+#define ACP_DMA_ERR_STS_5 0x00000D4
+#define ACP_DMA_ERR_STS_6 0x00000D8
+#define ACP_DMA_ERR_STS_7 0x00000DC
+#define ACP_DMA_DESC_BASE_ADDR 0x00000E0
+#define ACP_DMA_DESC_MAX_NUM_DSCR 0x00000E4
+#define ACP_DMA_CH_STS 0x00000E8
+#define ACP_DMA_CH_GROUP 0x00000EC
+#define ACP_DMA_CH_RST_STS 0x00000F0
+
+/* Registers from ACP_AXI2AXIATU block */
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1 0x0000C00
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_1 0x0000C04
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2 0x0000C08
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_2 0x0000C0C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3 0x0000C10
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_3 0x0000C14
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4 0x0000C18
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_4 0x0000C1C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5 0x0000C20
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_5 0x0000C24
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6 0x0000C28
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_6 0x0000C2C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7 0x0000C30
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_7 0x0000C34
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8 0x0000C38
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8 0x0000C3C
+#define ACPAXI2AXI_ATU_CTRL 0x0000C40
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_9 0x0000C44
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_9 0x0000C48
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_10 0x0000C4C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_10 0x0000C50
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_11 0x0000C54
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_11 0x0000C58
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_12 0x0000C5C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_12 0x0000C60
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_13 0x0000C64
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_13 0x0000C68
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_14 0x0000C6C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_14 0x0000C70
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_15 0x0000C74
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_15 0x0000C78
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_16 0x0000C7C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_16 0x0000C80
+
+/* Registers from ACP_CLKRST block */
+#define ACP_SOFT_RESET 0x0001000
+#define ACP_CONTROL 0x0001004
+#define ACP_STATUS 0x0001008
+#define ACP_DYNAMIC_CG_MASTER_CONTROL 0x0001010
+#define ACP_ZSC_DSP_CTRL 0x0001014
+#define ACP_ZSC_STS 0x0001018
+#define ACP_PGFSM_CONTROL 0x0001024
+#define ACP_PGFSM_STATUS 0x0001028
+#define ACP_CLKMUX_SEL 0x000102C
+
+/* Registers from ACP_AON block */
+#define ACP_PME_EN 0x0001400
+#define ACP_DEVICE_STATE 0x0001404
+#define AZ_DEVICE_STATE 0x0001408
+#define ACP_PIN_CONFIG 0x0001440
+#define ACP_PAD_PULLUP_CTRL 0x0001444
+#define ACP_PAD_PULLDOWN_CTRL 0x0001448
+#define ACP_PAD_DRIVE_STRENGTH_CTRL 0x000144C
+#define ACP_PAD_SCHMEN_CTRL 0x0001450
+#define ACP_SW0_PAD_KEEPER_EN 0x0001454
+#define ACP_SW0_WAKE_EN 0x0001458
+#define ACP_I2S_WAKE_EN 0x000145C
+#define ACP_SW1_WAKE_EN 0x0001460
+
+#define ACP_SW0_I2S_ERROR_REASON 0x00018B4
+#define ACP_SW0_POS_TRACK_AUDIO0_TX_CTRL 0x00018B8
+#define ACP_SW0_AUDIO0_TX_DMA_POS 0x00018BC
+#define ACP_SW0_POS_TRACK_AUDIO1_TX_CTRL 0x00018C0
+#define ACP_SW0_AUDIO1_TX_DMA_POS 0x00018C4
+#define ACP_SW0_POS_TRACK_AUDIO2_TX_CTRL 0x00018C8
+#define ACP_SW0_AUDIO2_TX_DMA_POS 0x00018CC
+#define ACP_SW0_POS_TRACK_AUDIO0_RX_CTRL 0x00018D0
+#define ACP_SW0_AUDIO0_DMA_POS 0x00018D4
+#define ACP_SW0_POS_TRACK_AUDIO1_RX_CTRL 0x00018D8
+#define ACP_SW0_AUDIO1_RX_DMA_POS 0x00018DC
+#define ACP_SW0_POS_TRACK_AUDIO2_RX_CTRL 0x00018E0
+#define ACP_SW0_AUDIO2_RX_DMA_POS 0x00018E4
+#define ACP_ERROR_INTR_MASK1 0X0001974
+#define ACP_ERROR_INTR_MASK2 0X0001978
+#define ACP_ERROR_INTR_MASK3 0X000197C
+
+/* Registers from ACP_P1_MISC block */
+#define ACP_EXTERNAL_INTR_ENB 0x0001A00
+#define ACP_EXTERNAL_INTR_CNTL 0x0001A04
+#define ACP_EXTERNAL_INTR_CNTL1 0x0001A08
+#define ACP_EXTERNAL_INTR_STAT 0x0001A0C
+#define ACP_EXTERNAL_INTR_STAT1 0x0001A10
+#define ACP_ERROR_STATUS 0x0001A4C
+#define ACP_SW1_I2S_ERROR_REASON 0x0001A50
+#define ACP_SW1_POS_TRACK_AUDIO0_TX_CTRL 0x0001A6C
+#define ACP_SW1_AUDIO0_TX_DMA_POS 0x0001A70
+#define ACP_SW1_POS_TRACK_AUDIO0_RX_CTRL 0x0001A74
+#define ACP_SW1_AUDIO0_RX_DMA_POS 0x0001A78
+#define ACP_P1_DMIC_I2S_GPIO_INTR_CTRL 0x0001A7C
+#define ACP_P1_DMIC_I2S_GPIO_INTR_STATUS 0x0001A80
+#define ACP_SCRATCH_REG_BASE_ADDR 0x0001A84
+#define ACP_SW1_POS_TRACK_AUDIO1_TX_CTRL 0x0001A88
+#define ACP_SW1_AUDIO1_TX_DMA_POS 0x0001A8C
+#define ACP_SW1_POS_TRACK_AUDIO2_TX_CTRL 0x0001A90
+#define ACP_SW1_AUDIO2_TX_DMA_POS 0x0001A94
+#define ACP_SW1_POS_TRACK_AUDIO1_RX_CTRL 0x0001A98
+#define ACP_SW1_AUDIO1_RX_DMA_POS 0x0001A9C
+#define ACP_SW1_POS_TRACK_AUDIO2_RX_CTRL 0x0001AA0
+#define ACP_SW1_AUDIO2_RX_DMA_POS 0x0001AA4
+#define ACP_ERROR_INTR_MASK4 0X0001AEC
+#define ACP_ERROR_INTR_MASK5 0X0001AF0
+
+/* Registers from ACP_AUDIO_BUFFERS block */
+#define ACP_AUDIO0_RX_RINGBUFADDR 0x0002000
+#define ACP_AUDIO0_RX_RINGBUFSIZE 0x0002004
+#define ACP_AUDIO0_RX_LINKPOSITIONCNTR 0x0002008
+#define ACP_AUDIO0_RX_FIFOADDR 0x000200C
+#define ACP_AUDIO0_RX_FIFOSIZE 0x0002010
+#define ACP_AUDIO0_RX_DMA_SIZE 0x0002014
+#define ACP_AUDIO0_RX_LINEARPOSITIONCNTR_HIGH 0x0002018
+#define ACP_AUDIO0_RX_LINEARPOSITIONCNTR_LOW 0x000201C
+#define ACP_AUDIO0_RX_INTR_WATERMARK_SIZE 0x0002020
+#define ACP_AUDIO0_TX_RINGBUFADDR 0x0002024
+#define ACP_AUDIO0_TX_RINGBUFSIZE 0x0002028
+#define ACP_AUDIO0_TX_LINKPOSITIONCNTR 0x000202C
+#define ACP_AUDIO0_TX_FIFOADDR 0x0002030
+#define ACP_AUDIO0_TX_FIFOSIZE 0x0002034
+#define ACP_AUDIO0_TX_DMA_SIZE 0x0002038
+#define ACP_AUDIO0_TX_LINEARPOSITIONCNTR_HIGH 0x000203C
+#define ACP_AUDIO0_TX_LINEARPOSITIONCNTR_LOW 0x0002040
+#define ACP_AUDIO0_TX_INTR_WATERMARK_SIZE 0x0002044
+#define ACP_AUDIO1_RX_RINGBUFADDR 0x0002048
+#define ACP_AUDIO1_RX_RINGBUFSIZE 0x000204C
+#define ACP_AUDIO1_RX_LINKPOSITIONCNTR 0x0002050
+#define ACP_AUDIO1_RX_FIFOADDR 0x0002054
+#define ACP_AUDIO1_RX_FIFOSIZE 0x0002058
+#define ACP_AUDIO1_RX_DMA_SIZE 0x000205C
+#define ACP_AUDIO1_RX_LINEARPOSITIONCNTR_HIGH 0x0002060
+#define ACP_AUDIO1_RX_LINEARPOSITIONCNTR_LOW 0x0002064
+#define ACP_AUDIO1_RX_INTR_WATERMARK_SIZE 0x0002068
+#define ACP_AUDIO1_TX_RINGBUFADDR 0x000206C
+#define ACP_AUDIO1_TX_RINGBUFSIZE 0x0002070
+#define ACP_AUDIO1_TX_LINKPOSITIONCNTR 0x0002074
+#define ACP_AUDIO1_TX_FIFOADDR 0x0002078
+#define ACP_AUDIO1_TX_FIFOSIZE 0x000207C
+#define ACP_AUDIO1_TX_DMA_SIZE 0x0002080
+#define ACP_AUDIO1_TX_LINEARPOSITIONCNTR_HIGH 0x0002084
+#define ACP_AUDIO1_TX_LINEARPOSITIONCNTR_LOW 0x0002088
+#define ACP_AUDIO1_TX_INTR_WATERMARK_SIZE 0x000208C
+#define ACP_AUDIO2_RX_RINGBUFADDR 0x0002090
+#define ACP_AUDIO2_RX_RINGBUFSIZE 0x0002094
+#define ACP_AUDIO2_RX_LINKPOSITIONCNTR 0x0002098
+#define ACP_AUDIO2_RX_FIFOADDR 0x000209C
+#define ACP_AUDIO2_RX_FIFOSIZE 0x00020A0
+#define ACP_AUDIO2_RX_DMA_SIZE 0x00020A4
+#define ACP_AUDIO2_RX_LINEARPOSITIONCNTR_HIGH 0x00020A8
+#define ACP_AUDIO2_RX_LINEARPOSITIONCNTR_LOW 0x00020AC
+#define ACP_AUDIO2_RX_INTR_WATERMARK_SIZE 0x00020B0
+#define ACP_AUDIO2_TX_RINGBUFADDR 0x00020B4
+#define ACP_AUDIO2_TX_RINGBUFSIZE 0x00020B8
+#define ACP_AUDIO2_TX_LINKPOSITIONCNTR 0x00020BC
+#define ACP_AUDIO2_TX_FIFOADDR 0x00020C0
+#define ACP_AUDIO2_TX_FIFOSIZE 0x00020C4
+#define ACP_AUDIO2_TX_DMA_SIZE 0x00020C8
+#define ACP_AUDIO2_TX_LINEARPOSITIONCNTR_HIGH 0x00020CC
+#define ACP_AUDIO2_TX_LINEARPOSITIONCNTR_LOW 0x00020D0
+#define ACP_AUDIO2_TX_INTR_WATERMARK_SIZE 0x00020D4
+
+/* Registers from ACP_I2S_TDM block */
+#define ACP_I2STDM_IER 0x0002400
+#define ACP_I2STDM_IRER 0x0002404
+#define ACP_I2STDM_RXFRMT 0x0002408
+#define ACP_I2STDM_ITER 0x000240C
+#define ACP_I2STDM_TXFRMT 0x0002410
+#define ACP_I2STDM0_MSTRCLKGEN 0x0002414
+#define ACP_I2STDM1_MSTRCLKGEN 0x0002418
+#define ACP_I2STDM2_MSTRCLKGEN 0x000241C
+#define ACP_I2STDM_REFCLKGEN 0x0002420
+
+/* Registers from ACP_BT_TDM block */
+#define ACP_BTTDM_IER 0x0002800
+#define ACP_BTTDM_IRER 0x0002804
+#define ACP_BTTDM_RXFRMT 0x0002808
+#define ACP_BTTDM_ITER 0x000280C
+#define ACP_BTTDM_TXFRMT 0x0002810
+#define ACP_HSTDM_IER 0x0002814
+#define ACP_HSTDM_IRER 0x0002818
+#define ACP_HSTDM_RXFRMT 0x000281C
+#define ACP_HSTDM_ITER 0x0002820
+#define ACP_HSTDM_TXFRMT 0x0002824
+
+/* Registers from ACP_WOV block */
+#define ACP_WOV_PDM_ENABLE 0x0002C04
+#define ACP_WOV_PDM_DMA_ENABLE 0x0002C08
+#define ACP_WOV_RX_RINGBUFADDR 0x0002C0C
+#define ACP_WOV_RX_RINGBUFSIZE 0x0002C10
+#define ACP_WOV_RX_LINKPOSITIONCNTR 0x0002C14
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH 0x0002C18
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_LOW 0x0002C1C
+#define ACP_WOV_RX_INTR_WATERMARK_SIZE 0x0002C20
+#define ACP_WOV_PDM_FIFO_FLUSH 0x0002C24
+#define ACP_WOV_PDM_NO_OF_CHANNELS 0x0002C28
+#define ACP_WOV_PDM_DECIMATION_FACTOR 0x0002C2C
+#define ACP_WOV_PDM_VAD_CTRL 0x0002C30
+#define ACP_WOV_WAKE 0x0002C54
+#define ACP_WOV_BUFFER_STATUS 0x0002C58
+#define ACP_WOV_MISC_CTRL 0x0002C5C
+#define ACP_WOV_CLK_CTRL 0x0002C60
+#define ACP_PDM_VAD_DYNAMIC_CLK_GATING_EN 0x0002C64
+#define ACP_WOV_ERROR_STATUS_REGISTER 0x0002C68
+#define ACP_PDM_CLKDIV 0x0002C6C
+
+/* Registers from ACP_SW0_SWCLK block */
+#define ACP_SW0_EN 0x0003000
+#define ACP_SW0_EN_STATUS 0x0003004
+#define ACP_SW0_FRAMESIZE 0x0003008
+#define ACP_SW0_SSP_COUNTER 0x000300C
+#define ACP_SW0_AUDIO0_TX_EN 0x0003010
+#define ACP_SW0_AUDIO0_TX_EN_STATUS 0x0003014
+#define ACP_SW0_AUDIO0_TX_FRAME_FORMAT 0x0003018
+#define ACP_SW0_AUDIO0_TX_SAMPLEINTERVAL 0x000301C
+#define ACP_SW0_AUDIO0_TX_HCTRL_DP0 0x0003020
+#define ACP_SW0_AUDIO0_TX_HCTRL_DP1 0x0003024
+#define ACP_SW0_AUDIO0_TX_HCTRL_DP2 0x0003028
+#define ACP_SW0_AUDIO0_TX_HCTRL_DP3 0x000302C
+#define ACP_SW0_AUDIO0_TX_OFFSET_DP0 0x0003030
+#define ACP_SW0_AUDIO0_TX_OFFSET_DP1 0x0003034
+#define ACP_SW0_AUDIO0_TX_OFFSET_DP2 0x0003038
+#define ACP_SW0_AUDIO0_TX_OFFSET_DP3 0x000303C
+#define ACP_SW0_AUDIO0_TX_CHANNEL_ENABLE_DP0 0x0003040
+#define ACP_SW0_AUDIO0_TX_CHANNEL_ENABLE_DP1 0x0003044
+#define ACP_SW0_AUDIO0_TX_CHANNEL_ENABLE_DP2 0x0003048
+#define ACP_SW0_AUDIO0_TX_CHANNEL_ENABLE_DP3 0x000304C
+#define ACP_SW0_AUDIO1_TX_EN 0x0003050
+#define ACP_SW0_AUDIO1_TX_EN_STATUS 0x0003054
+#define ACP_SW0_AUDIO1_TX_FRAME_FORMAT 0x0003058
+#define ACP_SW0_AUDIO1_TX_SAMPLEINTERVAL 0x000305C
+#define ACP_SW0_AUDIO1_TX_HCTRL 0x0003060
+#define ACP_SW0_AUDIO1_TX_OFFSET 0x0003064
+#define ACP_SW0_AUDIO1_TX_CHANNEL_ENABLE_DP0 0x0003068
+#define ACP_SW0_AUDIO2_TX_EN 0x000306C
+#define ACP_SW0_AUDIO2_TX_EN_STATUS 0x0003070
+#define ACP_SW0_AUDIO2_TX_FRAME_FORMAT 0x0003074
+#define ACP_SW0_AUDIO2_TX_SAMPLEINTERVAL 0x0003078
+#define ACP_SW0_AUDIO2_TX_HCTRL 0x000307C
+#define ACP_SW0_AUDIO2_TX_OFFSET 0x0003080
+#define ACP_SW0_AUDIO2_TX_CHANNEL_ENABLE_DP0 0x0003084
+#define ACP_SW0_AUDIO0_RX_EN 0x0003088
+#define ACP_SW0_AUDIO0_RX_EN_STATUS 0x000308C
+#define ACP_SW0_AUDIO0_RX_FRAME_FORMAT 0x0003090
+#define ACP_SW0_AUDIO0_RX_SAMPLEINTERVAL 0x0003094
+#define ACP_SW0_AUDIO0_RX_HCTRL_DP0 0x0003098
+#define ACP_SW0_AUDIO0_RX_HCTRL_DP1 0x000309C
+#define ACP_SW0_AUDIO0_RX_HCTRL_DP2 0x0003100
+#define ACP_SW0_AUDIO0_RX_HCTRL_DP3 0x0003104
+#define ACP_SW0_AUDIO0_RX_OFFSET_DP0 0x0003108
+#define ACP_SW0_AUDIO0_RX_OFFSET_DP1 0x000310C
+#define ACP_SW0_AUDIO0_RX_OFFSET_DP2 0x0003110
+#define ACP_SW0_AUDIO0_RX_OFFSET_DP3 0x0003114
+#define ACP_SW0_AUDIO0_RX_CHANNEL_ENABLE_DP0 0x0003118
+#define ACP_SW0_AUDIO0_RX_CHANNEL_ENABLE_DP1 0x000311C
+#define ACP_SW0_AUDIO0_RX_CHANNEL_ENABLE_DP2 0x0003120
+#define ACP_SW0_AUDIO0_RX_CHANNEL_ENABLE_DP3 0x0003124
+#define ACP_SW0_AUDIO1_RX_EN 0x0003128
+#define ACP_SW0_AUDIO1_RX_EN_STATUS 0x000312C
+#define ACP_SW0_AUDIO1_RX_FRAME_FORMAT 0x0003130
+#define ACP_SW0_AUDIO1_RX_SAMPLEINTERVAL 0x0003134
+#define ACP_SW0_AUDIO1_RX_HCTRL 0x0003138
+#define ACP_SW0_AUDIO1_RX_OFFSET 0x000313C
+#define ACP_SW0_AUDIO1_RX_CHANNEL_ENABLE_DP0 0x0003140
+#define ACP_SW0_AUDIO2_RX_EN 0x0003144
+#define ACP_SW0_AUDIO2_RX_EN_STATUS 0x0003148
+#define ACP_SW0_AUDIO2_RX_FRAME_FORMAT 0x000314C
+#define ACP_SW0_AUDIO2_RX_SAMPLEINTERVAL 0x0003150
+#define ACP_SW0_AUDIO2_RX_HCTRL 0x0003154
+#define ACP_SW0_AUDIO2_RX_OFFSET 0x0003158
+#define ACP_SW0_AUDIO2_RX_CHANNEL_ENABLE_DP0 0x000315C
+#define ACP_SW0_BPT_PORT_EN 0x0003160
+#define ACP_SW0_BPT_PORT_EN_STATUS 0x0003164
+#define ACP_SW0_BPT_PORT_FRAME_FORMAT 0x0003168
+#define ACP_SW0_BPT_PORT_SAMPLEINTERVAL 0x000316C
+#define ACP_SW0_BPT_PORT_HCTRL 0x0003170
+#define ACP_SW0_BPT_PORT_OFFSET 0x0003174
+#define ACP_SW0_BPT_PORT_CHANNEL_ENABLE 0x0003178
+#define ACP_SW0_BPT_PORT_FIRST_BYTE_ADDR 0x000317C
+#define ACP_SW0_CLK_RESUME_CTRL 0x0003180
+#define ACP_SW0_CLK_RESUME_DELAY_CNTR 0x0003184
+#define ACP_SW0_BUS_RESET_CTRL 0x0003188
+#define ACP_SW0_PRBS_ERR_STATUS 0x000318C
+#define ACP_SW0_IMM_CMD_UPPER_WORD 0x0003230
+#define ACP_SW0_IMM_CMD_LOWER_QWORD 0x0003234
+#define ACP_SW0_IMM_RESP_UPPER_WORD 0x0003238
+#define ACP_SW0_IMM_RESP_LOWER_QWORD 0x000323C
+#define ACP_SW0_IMM_CMD_STS 0x0003240
+#define ACP_SW0_BRA_BASE_ADDRESS 0x0003244
+#define ACP_SW0_BRA_TRANSFER_SIZE 0x0003248
+#define ACP_SW0_BRA_DMA_BUSY 0x000324C
+#define ACP_SW0_BRA_RESP 0x0003250
+#define ACP_SW0_BRA_RESP_FRAME_ADDR 0x0003254
+#define ACP_SW0_BRA_CURRENT_TRANSFER_SIZE 0x0003258
+#define ACP_SW0_STATECHANGE_STATUS_0TO7 0x000325C
+#define ACP_SW0_STATECHANGE_STATUS_8TO11 0x0003260
+#define ACP_SW0_STATECHANGE_STATUS_MASK_0TO7 0x0003264
+#define ACP_SW0_STATECHANGE_STATUS_MASK_8TO11 0x0003268
+#define ACP_SW0_CLK_FREQUENCY_CTRL 0x000326C
+#define ACP_SW0_ERROR_INTR_MASK 0x0003270
+#define ACP_SW0_PHY_TEST_MODE_DATA_OFF 0x0003274
+
+/* Registers from ACP_P1_AUDIO_BUFFERS block */
+#define ACP_P1_AUDIO0_RX_RINGBUFADDR 0x0003A00
+#define ACP_P1_AUDIO0_RX_RINGBUFSIZE 0x0003A04
+#define ACP_P1_AUDIO0_RX_LINKPOSITIONCNTR 0x0003A08
+#define ACP_P1_AUDIO0_RX_FIFOADDR 0x0003A0C
+#define ACP_P1_AUDIO0_RX_FIFOSIZE 0x0003A10
+#define ACP_P1_AUDIO0_RX_DMA_SIZE 0x0003A14
+#define ACP_P1_AUDIO0_RX_LINEARPOSITIONCNTR_HIGH 0x0003A18
+#define ACP_P1_AUDIO0_RX_LINEARPOSITIONCNTR_LOW 0x0003A1C
+#define ACP_P1_AUDIO0_RX_INTR_WATERMARK_SIZE 0x0003A20
+#define ACP_P1_AUDIO0_TX_RINGBUFADDR 0x0003A24
+#define ACP_P1_AUDIO0_TX_RINGBUFSIZE 0x0003A28
+#define ACP_P1_AUDIO0_TX_LINKPOSITIONCNTR 0x0003A2C
+#define ACP_P1_AUDIO0_TX_FIFOADDR 0x0003A30
+#define ACP_P1_AUDIO0_TX_FIFOSIZE 0x0003A34
+#define ACP_P1_AUDIO0_TX_DMA_SIZE 0x0003A38
+#define ACP_P1_AUDIO0_TX_LINEARPOSITIONCNTR_HIGH 0x0003A3C
+#define ACP_P1_AUDIO0_TX_LINEARPOSITIONCNTR_LOW 0x0003A40
+#define ACP_P1_AUDIO0_TX_INTR_WATERMARK_SIZE 0x0003A44
+#define ACP_P1_AUDIO1_RX_RINGBUFADDR 0x0003A48
+#define ACP_P1_AUDIO1_RX_RINGBUFSIZE 0x0003A4C
+#define ACP_P1_AUDIO1_RX_LINKPOSITIONCNTR 0x0003A50
+#define ACP_P1_AUDIO1_RX_FIFOADDR 0x0003A54
+#define ACP_P1_AUDIO1_RX_FIFOSIZE 0x0003A58
+#define ACP_P1_AUDIO1_RX_DMA_SIZE 0x0003A5C
+#define ACP_P1_AUDIO1_RX_LINEARPOSITIONCNTR_HIGH 0x0003A60
+#define ACP_P1_AUDIO1_RX_LINEARPOSITIONCNTR_LOW 0x0003A64
+#define ACP_P1_AUDIO1_RX_INTR_WATERMARK_SIZE 0x0003A68
+#define ACP_P1_AUDIO1_TX_RINGBUFADDR 0x0003A6C
+#define ACP_P1_AUDIO1_TX_RINGBUFSIZE 0x0003A70
+#define ACP_P1_AUDIO1_TX_LINKPOSITIONCNTR 0x0003A74
+#define ACP_P1_AUDIO1_TX_FIFOADDR 0x0003A78
+#define ACP_P1_AUDIO1_TX_FIFOSIZE 0x0003A7C
+#define ACP_P1_AUDIO1_TX_DMA_SIZE 0x0003A80
+#define ACP_P1_AUDIO1_TX_LINEARPOSITIONCNTR_HIGH 0x0003A84
+#define ACP_P1_AUDIO1_TX_LINEARPOSITIONCNTR_LOW 0x0003A88
+#define ACP_P1_AUDIO1_TX_INTR_WATERMARK_SIZE 0x0003A8C
+#define ACP_P1_AUDIO2_RX_RINGBUFADDR 0x0003A90
+#define ACP_P1_AUDIO2_RX_RINGBUFSIZE 0x0003A94
+#define ACP_P1_AUDIO2_RX_LINKPOSITIONCNTR 0x0003A98
+#define ACP_P1_AUDIO2_RX_FIFOADDR 0x0003A9C
+#define ACP_P1_AUDIO2_RX_FIFOSIZE 0x0003AA0
+#define ACP_P1_AUDIO2_RX_DMA_SIZE 0x0003AA4
+#define ACP_P1_AUDIO2_RX_LINEARPOSITIONCNTR_HIGH 0x0003AA8
+#define ACP_P1_AUDIO2_RX_LINEARPOSITIONCNTR_LOW 0x0003AAC
+#define ACP_P1_AUDIO2_RX_INTR_WATERMARK_SIZE 0x0003AB0
+#define ACP_P1_AUDIO2_TX_RINGBUFADDR 0x0003AB4
+#define ACP_P1_AUDIO2_TX_RINGBUFSIZE 0x0003AB8
+#define ACP_P1_AUDIO2_TX_LINKPOSITIONCNTR 0x0003ABC
+#define ACP_P1_AUDIO2_TX_FIFOADDR 0x0003AC0
+#define ACP_P1_AUDIO2_TX_FIFOSIZE 0x0003AC4
+#define ACP_P1_AUDIO2_TX_DMA_SIZE 0x0003AC8
+#define ACP_P1_AUDIO2_TX_LINEARPOSITIONCNTR_HIGH 0x0003ACC
+#define ACP_P1_AUDIO2_TX_LINEARPOSITIONCNTR_LOW 0x0003AD0
+#define ACP_P1_AUDIO2_TX_INTR_WATERMARK_SIZE 0x0003AD4
+
+/* Registers from ACP_SW1_SWCLK block */
+#define ACP_SW1_EN 0x0003C00
+#define ACP_SW1_EN_STATUS 0x0003C04
+#define ACP_SW1_FRAMESIZE 0x0003C08
+#define ACP_SW1_SSP_COUNTER 0x0003C0C
+#define ACP_SW1_AUDIO1_TX_EN 0x0003C50
+#define ACP_SW1_AUDIO1_TX_EN_STATUS 0x0003C54
+#define ACP_SW1_AUDIO1_TX_FRAME_FORMAT 0x0003C58
+#define ACP_SW1_AUDIO1_TX_SAMPLEINTERVAL 0x0003C5C
+#define ACP_SW1_AUDIO1_TX_HCTRL 0x0003C60
+#define ACP_SW1_AUDIO1_TX_OFFSET 0x0003C64
+#define ACP_SW1_AUDIO1_TX_CHANNEL_ENABLE_DP0 0x0003C68
+#define ACP_SW1_AUDIO1_RX_EN 0x0003D28
+#define ACP_SW1_AUDIO1_RX_EN_STATUS 0x0003D2C
+#define ACP_SW1_AUDIO1_RX_FRAME_FORMAT 0x0003D30
+#define ACP_SW1_AUDIO1_RX_SAMPLEINTERVAL 0x0003D34
+#define ACP_SW1_AUDIO1_RX_HCTRL 0x0003D38
+#define ACP_SW1_AUDIO1_RX_OFFSET 0x0003D3C
+#define ACP_SW1_AUDIO1_RX_CHANNEL_ENABLE_DP0 0x0003D40
+#define ACP_SW1_BPT_PORT_EN 0x0003D60
+#define ACP_SW1_BPT_PORT_EN_STATUS 0x0003D64
+#define ACP_SW1_BPT_PORT_FRAME_FORMAT 0x0003D68
+#define ACP_SW1_BPT_PORT_SAMPLEINTERVAL 0x0003D6C
+#define ACP_SW1_BPT_PORT_HCTRL 0x0003D70
+#define ACP_SW1_BPT_PORT_OFFSET 0x0003D74
+#define ACP_SW1_BPT_PORT_CHANNEL_ENABLE 0x0003D78
+#define ACP_SW1_BPT_PORT_FIRST_BYTE_ADDR 0x0003D7C
+#define ACP_SW1_CLK_RESUME_CTRL 0x0003D80
+#define ACP_SW1_CLK_RESUME_DELAY_CNTR 0x0003D84
+#define ACP_SW1_BUS_RESET_CTRL 0x0003D88
+#define ACP_SW1_PRBS_ERR_STATUS 0x0003D8C
+
+/* Registers from ACP_SW1_ACLK block */
+#define ACP_SW1_CORB_BASE_ADDRESS 0x0003E00
+#define ACP_SW1_CORB_WRITE_POINTER 0x0003E04
+#define ACP_SW1_CORB_READ_POINTER 0x0003E08
+#define ACP_SW1_CORB_CONTROL 0x0003E0C
+#define ACP_SW1_CORB_SIZE 0x0003E14
+#define ACP_SW1_RIRB_BASE_ADDRESS 0x0003E18
+#define ACP_SW1_RIRB_WRITE_POINTER 0x0003E1C
+#define ACP_SW1_RIRB_RESPONSE_INTERRUPT_COUNT 0x0003E20
+#define ACP_SW1_RIRB_CONTROL 0x0003E24
+#define ACP_SW1_RIRB_SIZE 0x0003E28
+#define ACP_SW1_RIRB_FIFO_MIN_THDL 0x0003E2C
+#define ACP_SW1_IMM_CMD_UPPER_WORD 0x0003E30
+#define ACP_SW1_IMM_CMD_LOWER_QWORD 0x0003E34
+#define ACP_SW1_IMM_RESP_UPPER_WORD 0x0003E38
+#define ACP_SW1_IMM_RESP_LOWER_QWORD 0x0003E3C
+#define ACP_SW1_IMM_CMD_STS 0x0003E40
+#define ACP_SW1_BRA_BASE_ADDRESS 0x0003E44
+#define ACP_SW1_BRA_TRANSFER_SIZE 0x0003E48
+#define ACP_SW1_BRA_DMA_BUSY 0x0003E4C
+#define ACP_SW1_BRA_RESP 0x0003E50
+#define ACP_SW1_BRA_RESP_FRAME_ADDR 0x0003E54
+#define ACP_SW1_BRA_CURRENT_TRANSFER_SIZE 0x0003E58
+#define ACP_SW1_STATECHANGE_STATUS_0TO7 0x0003E5C
+#define ACP_SW1_STATECHANGE_STATUS_8TO11 0x0003E60
+#define ACP_SW1_STATECHANGE_STATUS_MASK_0TO7 0x0003E64
+#define ACP_SW1_STATECHANGE_STATUS_MASK_8TO11 0x0003E68
+#define ACP_SW1_CLK_FREQUENCY_CTRL 0x0003E6C
+#define ACP_SW1_ERROR_INTR_MASK 0x0003E70
+#define ACP_SW1_PHY_TEST_MODE_DATA_OFF 0x0003E74
+
+/* Registers from ACP_SCRATCH block */
+#define ACP_SCRATCH_REG_0 0x0010000
+
+#endif
diff --git a/include/sound/ak4531_codec.h b/include/sound/ak4531_codec.h
index 9a4429970d92..64402347d7a2 100644
--- a/include/sound/ak4531_codec.h
+++ b/include/sound/ak4531_codec.h
@@ -65,6 +65,9 @@ int snd_ak4531_mixer(struct snd_card *card, struct snd_ak4531 *_ak4531,
#ifdef CONFIG_PM
void snd_ak4531_suspend(struct snd_ak4531 *ak4531);
void snd_ak4531_resume(struct snd_ak4531 *ak4531);
+#else
+static inline void snd_ak4531_suspend(struct snd_ak4531 *ak4531) {}
+static inline void snd_ak4531_resume(struct snd_ak4531 *ak4531) {}
#endif
#endif /* __SOUND_AK4531_CODEC_H */
diff --git a/include/sound/asequencer.h b/include/sound/asequencer.h
index 18d4bc3ee0b7..ddbb6bf801bb 100644
--- a/include/sound/asequencer.h
+++ b/include/sound/asequencer.h
@@ -65,6 +65,10 @@
#define snd_seq_ev_is_abstime(ev) (snd_seq_ev_timemode_type(ev) == SNDRV_SEQ_TIME_MODE_ABS)
#define snd_seq_ev_is_reltime(ev) (snd_seq_ev_timemode_type(ev) == SNDRV_SEQ_TIME_MODE_REL)
+/* check whether the given event is a UMP event */
+#define snd_seq_ev_is_ump(ev) \
+ (IS_ENABLED(CONFIG_SND_SEQ_UMP) && ((ev)->flags & SNDRV_SEQ_EVENT_UMP))
+
/* queue sync port */
#define snd_seq_queue_sync_port(q) ((q) + 16)
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 70cbc5095e72..bcf872c17dd3 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -67,6 +67,7 @@ struct snd_compr_runtime {
* @metadata_set: metadata set flag, true when set
* @next_track: has userspace signal next track transition, true when set
* @partial_drain: undergoing partial_drain for stream, true when set
+ * @pause_in_draining: paused during draining state, true when set
* @private_data: pointer to DSP private data
* @dma_buffer: allocated buffer if any
*/
@@ -80,6 +81,7 @@ struct snd_compr_stream {
bool metadata_set;
bool next_track;
bool partial_drain;
+ bool pause_in_draining;
void *private_data;
struct snd_dma_buffer dma_buffer;
};
@@ -142,16 +144,18 @@ struct snd_compr_ops {
* @direction: Playback or capture direction
* @lock: device lock
* @device: device id
+ * @use_pause_in_draining: allow pause in draining, true when set
*/
struct snd_compr {
const char *name;
- struct device dev;
+ struct device *dev;
struct snd_compr_ops *ops;
void *private_data;
struct snd_card *card;
unsigned int direction;
struct mutex lock;
int device;
+ bool use_pause_in_draining;
#ifdef CONFIG_SND_VERBOSE_PROCFS
/* private: */
char id[64];
@@ -161,11 +165,21 @@ struct snd_compr {
};
/* compress device register APIs */
-int snd_compress_register(struct snd_compr *device);
-int snd_compress_deregister(struct snd_compr *device);
int snd_compress_new(struct snd_card *card, int device,
int type, const char *id, struct snd_compr *compr);
+/**
+ * snd_compr_use_pause_in_draining - Allow pause and resume in draining state
+ * @substream: compress substream to set
+ *
+ * Allow pause and resume in draining state.
+ * Only HW driver supports this transition can call this API.
+ */
+static inline void snd_compr_use_pause_in_draining(struct snd_compr_stream *substream)
+{
+ substream->device->use_pause_in_draining = true;
+}
+
/* dsp driver callback apis
* For playback: driver should call snd_compress_fragment_elapsed() to let the
* framework know that a fragment has been consumed from the ring buffer
diff --git a/include/sound/control.h b/include/sound/control.h
index e128cff10dfa..9a4f4f7138da 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -23,8 +23,8 @@ typedef int (snd_kcontrol_tlv_rw_t)(struct snd_kcontrol *kcontrol,
unsigned int __user *tlv);
/* internal flag for skipping validations */
-#ifdef CONFIG_SND_CTL_VALIDATION
-#define SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK (1 << 27)
+#ifdef CONFIG_SND_CTL_DEBUG
+#define SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK (1 << 24)
#define snd_ctl_skip_validation(info) \
((info)->access & SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK)
#else
@@ -32,6 +32,12 @@ typedef int (snd_kcontrol_tlv_rw_t)(struct snd_kcontrol *kcontrol,
#define snd_ctl_skip_validation(info) true
#endif
+/* kernel only - LED bits */
+#define SNDRV_CTL_ELEM_ACCESS_LED_SHIFT 25
+#define SNDRV_CTL_ELEM_ACCESS_LED_MASK (7<<25) /* kernel three bits - LED group */
+#define SNDRV_CTL_ELEM_ACCESS_SPK_LED (1<<25) /* kernel speaker (output) LED flag */
+#define SNDRV_CTL_ELEM_ACCESS_MIC_LED (2<<25) /* kernel microphone (input) LED flag */
+
enum {
SNDRV_CTL_TLV_OP_READ = 0,
SNDRV_CTL_TLV_OP_WRITE = 1,
@@ -42,7 +48,7 @@ struct snd_kcontrol_new {
snd_ctl_elem_iface_t iface; /* interface identifier */
unsigned int device; /* device/client number */
unsigned int subdevice; /* subdevice (substream) number */
- const unsigned char *name; /* ASCII name of item */
+ const char *name; /* ASCII name of item */
unsigned int index; /* index of item */
unsigned int access; /* access rights */
unsigned int count; /* count of same elements */
@@ -103,11 +109,19 @@ struct snd_ctl_file {
int preferred_subdevice[SND_CTL_SUBDEV_ITEMS];
wait_queue_head_t change_sleep;
spinlock_t read_lock;
- struct fasync_struct *fasync;
+ struct snd_fasync *fasync;
int subscribed; /* read interface is activated */
struct list_head events; /* waiting events for read */
};
+struct snd_ctl_layer_ops {
+ struct snd_ctl_layer_ops *next;
+ const char *module_name;
+ void (*lregister)(struct snd_card *card);
+ void (*ldisconnect)(struct snd_card *card);
+ void (*lnotify)(struct snd_card *card, unsigned int mask, struct snd_kcontrol *kctl, unsigned int ioff);
+};
+
#define snd_ctl_file(n) list_entry(n, struct snd_ctl_file, list)
typedef int (*snd_kctl_ioctl_func_t) (struct snd_card * card,
@@ -115,6 +129,7 @@ typedef int (*snd_kctl_ioctl_func_t) (struct snd_card * card,
unsigned int cmd, unsigned long arg);
void snd_ctl_notify(struct snd_card * card, unsigned int mask, struct snd_ctl_elem_id * id);
+void snd_ctl_notify_one(struct snd_card * card, unsigned int mask, struct snd_kcontrol * kctl, unsigned int ioff);
struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new * kcontrolnew, void * private_data);
void snd_ctl_free_one(struct snd_kcontrol * kcontrol);
@@ -123,10 +138,34 @@ int snd_ctl_remove(struct snd_card * card, struct snd_kcontrol * kcontrol);
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace);
int snd_ctl_remove_id(struct snd_card * card, struct snd_ctl_elem_id *id);
int snd_ctl_rename_id(struct snd_card * card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id);
-int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id,
- int active);
-struct snd_kcontrol *snd_ctl_find_numid(struct snd_card * card, unsigned int numid);
-struct snd_kcontrol *snd_ctl_find_id(struct snd_card * card, struct snd_ctl_elem_id *id);
+void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl, const char *name);
+int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active);
+struct snd_kcontrol *snd_ctl_find_numid_locked(struct snd_card *card, unsigned int numid);
+struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid);
+struct snd_kcontrol *snd_ctl_find_id_locked(struct snd_card *card, const struct snd_ctl_elem_id *id);
+struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, const struct snd_ctl_elem_id *id);
+
+/**
+ * snd_ctl_find_id_mixer - find the control instance with the given name string
+ * @card: the card instance
+ * @name: the name string
+ *
+ * Finds the control instance with the given name and
+ * @SNDRV_CTL_ELEM_IFACE_MIXER. Other fields are set to zero.
+ *
+ * This is merely a wrapper to snd_ctl_find_id().
+ *
+ * Return: The pointer of the instance if found, or %NULL if not.
+ */
+static inline struct snd_kcontrol *
+snd_ctl_find_id_mixer(struct snd_card *card, const char *name)
+{
+ struct snd_ctl_elem_id id = {};
+
+ id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ strscpy(id.name, name, sizeof(id.name));
+ return snd_ctl_find_id(card, &id);
+}
int snd_ctl_create(struct snd_card *card);
@@ -140,6 +179,10 @@ int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn);
#define snd_ctl_unregister_ioctl_compat(fcn)
#endif
+int snd_ctl_request_layer(const char *module_name);
+void snd_ctl_register_layer(struct snd_ctl_layer_ops *lops);
+void snd_ctl_disconnect_layer(struct snd_ctl_layer_ops *lops);
+
int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
@@ -219,6 +262,9 @@ snd_ctl_add_follower(struct snd_kcontrol *master, struct snd_kcontrol *follower)
return _snd_ctl_add_follower(master, follower, 0);
}
+int snd_ctl_add_followers(struct snd_card *card, struct snd_kcontrol *master,
+ const char * const *list);
+
/**
* snd_ctl_add_follower_uncached - Add a virtual follower control
* @master: vmaster element
@@ -254,6 +300,17 @@ int snd_ctl_apply_vmaster_followers(struct snd_kcontrol *kctl,
void *arg);
/*
+ * Control LED trigger layer
+ */
+#define SND_CTL_LAYER_MODULE_LED "snd-ctl-led"
+
+#if IS_MODULE(CONFIG_SND_CTL_LED)
+static inline int snd_ctl_led_request(void) { return snd_ctl_request_layer(SND_CTL_LAYER_MODULE_LED); }
+#else
+static inline int snd_ctl_led_request(void) { return 0; }
+#endif
+
+/*
* Helper functions for jack-detection controls
*/
struct snd_kcontrol *
diff --git a/include/sound/core.h b/include/sound/core.h
index 381a010a1bd4..dfef0c9d4b9f 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -14,6 +14,7 @@
#include <linux/pm.h> /* pm_message_t */
#include <linux/stringify.h>
#include <linux/printk.h>
+#include <linux/xarray.h>
/* number of supported soundcards */
#ifdef CONFIG_SND_DYNAMIC_MINORS
@@ -95,14 +96,19 @@ struct snd_card {
private data */
struct list_head devices; /* devices */
- struct device ctl_dev; /* control device */
+ struct device *ctl_dev; /* control device */
unsigned int last_numid; /* last used numeric ID */
- struct rw_semaphore controls_rwsem; /* controls list lock */
+ struct rw_semaphore controls_rwsem; /* controls lock (list and values) */
rwlock_t ctl_files_rwlock; /* ctl_files list lock */
int controls_count; /* count of all controls */
- int user_ctl_count; /* count of all user controls */
+ size_t user_ctl_alloc_size; // current memory allocation by user controls.
struct list_head controls; /* all controls for this card */
struct list_head ctl_files; /* active control files */
+#ifdef CONFIG_SND_CTL_FAST_LOOKUP
+ struct xarray ctl_numids; /* hash table for numids */
+ struct xarray ctl_hash; /* hash table for ctl id matching */
+ bool ctl_hash_collision; /* ctl_hash collision seen? */
+#endif
struct snd_info_entry *proc_root; /* root for soundcard specific files */
struct proc_dir_entry *proc_root_link; /* number link to real id */
@@ -117,15 +123,22 @@ struct snd_card {
struct device card_dev; /* cardX object for sysfs */
const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */
bool registered; /* card_dev is registered? */
+ bool managed; /* managed via devres */
+ bool releasing; /* during card free process */
int sync_irq; /* assigned irq, used for PCM sync */
wait_queue_head_t remove_sleep;
size_t total_pcm_alloc_bytes; /* total amount of allocated buffers */
struct mutex memory_mutex; /* protection for the above */
+#ifdef CONFIG_SND_DEBUG
+ struct dentry *debugfs_root; /* debugfs root for card */
+#endif
#ifdef CONFIG_PM
unsigned int power_state; /* power state */
+ atomic_t power_ref;
wait_queue_head_t power_sleep;
+ wait_queue_head_t power_ref_sleep;
#endif
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -139,21 +152,61 @@ struct snd_card {
#ifdef CONFIG_PM
static inline unsigned int snd_power_get_state(struct snd_card *card)
{
- return card->power_state;
+ return READ_ONCE(card->power_state);
}
static inline void snd_power_change_state(struct snd_card *card, unsigned int state)
{
- card->power_state = state;
+ WRITE_ONCE(card->power_state, state);
wake_up(&card->power_sleep);
}
+/**
+ * snd_power_ref - Take the reference count for power control
+ * @card: sound card object
+ *
+ * The power_ref reference of the card is used for managing to block
+ * the snd_power_sync_ref() operation. This function increments the reference.
+ * The counterpart snd_power_unref() has to be called appropriately later.
+ */
+static inline void snd_power_ref(struct snd_card *card)
+{
+ atomic_inc(&card->power_ref);
+}
+
+/**
+ * snd_power_unref - Release the reference count for power control
+ * @card: sound card object
+ */
+static inline void snd_power_unref(struct snd_card *card)
+{
+ if (atomic_dec_and_test(&card->power_ref))
+ wake_up(&card->power_ref_sleep);
+}
+
+/**
+ * snd_power_sync_ref - wait until the card power_ref is freed
+ * @card: sound card object
+ *
+ * This function is used to synchronize with the pending power_ref being
+ * released.
+ */
+static inline void snd_power_sync_ref(struct snd_card *card)
+{
+ wait_event(card->power_ref_sleep, !atomic_read(&card->power_ref));
+}
+
/* init.c */
-int snd_power_wait(struct snd_card *card, unsigned int power_state);
+int snd_power_wait(struct snd_card *card);
+int snd_power_ref_and_wait(struct snd_card *card);
#else /* ! CONFIG_PM */
-static inline int snd_power_wait(struct snd_card *card, unsigned int state) { return 0; }
+static inline int snd_power_wait(struct snd_card *card) { return 0; }
+static inline void snd_power_ref(struct snd_card *card) {}
+static inline void snd_power_unref(struct snd_card *card) {}
+static inline int snd_power_ref_and_wait(struct snd_card *card) { return 0; }
+static inline void snd_power_sync_ref(struct snd_card *card) {}
#define snd_power_get_state(card) ({ (void)(card); SNDRV_CTL_POWER_D0; })
#define snd_power_change_state(card, state) do { (void)(card); } while (0)
@@ -179,11 +232,14 @@ static inline struct device *snd_card_get_device_link(struct snd_card *card)
extern int snd_major;
extern int snd_ecards_limit;
-extern struct class *sound_class;
+extern const struct class sound_class;
+#ifdef CONFIG_SND_DEBUG
+extern struct dentry *sound_debugfs_root;
+#endif
void snd_request_card(int card);
-void snd_device_initialize(struct device *dev, struct snd_card *card);
+int snd_device_alloc(struct device **dev_p, struct snd_card *card);
int snd_register_device(int type, struct snd_card *card, int dev,
const struct file_operations *f_ops,
@@ -226,11 +282,15 @@ extern int (*snd_mixer_oss_notify_callback)(struct snd_card *card, int cmd);
int snd_card_new(struct device *parent, int idx, const char *xid,
struct module *module, int extra_size,
struct snd_card **card_ret);
+int snd_devm_card_new(struct device *parent, int idx, const char *xid,
+ struct module *module, size_t extra_size,
+ struct snd_card **card_ret);
-int snd_card_disconnect(struct snd_card *card);
+void snd_card_disconnect(struct snd_card *card);
void snd_card_disconnect_sync(struct snd_card *card);
-int snd_card_free(struct snd_card *card);
-int snd_card_free_when_closed(struct snd_card *card);
+void snd_card_free(struct snd_card *card);
+void snd_card_free_when_closed(struct snd_card *card);
+int snd_card_free_on_error(struct device *dev, int ret);
void snd_card_set_id(struct snd_card *card, const char *id);
int snd_card_register(struct snd_card *card);
int snd_card_info_init(void);
@@ -276,6 +336,7 @@ int snd_device_get_state(struct snd_card *card, void *device_data);
void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode);
void snd_dma_disable(unsigned long dma);
unsigned int snd_dma_pointer(unsigned long dma, unsigned int size);
+int snd_devm_request_dma(struct device *dev, int dma, const char *name);
#endif
/* misc.c */
@@ -332,7 +393,8 @@ void __snd_printk(unsigned int level, const char *file, int line,
#define snd_BUG() WARN(1, "BUG?\n")
/**
- * Suppress high rates of output when CONFIG_SND_DEBUG is enabled.
+ * snd_printd_ratelimit - Suppress high rates of output when
+ * CONFIG_SND_DEBUG is enabled.
*/
#define snd_printd_ratelimit() printk_ratelimit()
@@ -445,4 +507,12 @@ snd_pci_quirk_lookup_id(u16 vendor, u16 device,
}
#endif
+/* async signal helpers */
+struct snd_fasync;
+
+int snd_fasync_helper(int fd, struct file *file, int on,
+ struct snd_fasync **fasyncp);
+void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll);
+void snd_fasync_free(struct snd_fasync *fasync);
+
#endif /* __SOUND_CORE_H */
diff --git a/include/sound/cs-amp-lib.h b/include/sound/cs-amp-lib.h
new file mode 100644
index 000000000000..f481148735e1
--- /dev/null
+++ b/include/sound/cs-amp-lib.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS_AMP_LIB_H
+#define CS_AMP_LIB_H
+
+#include <linux/efi.h>
+#include <linux/types.h>
+
+struct cs_dsp;
+
+struct cirrus_amp_cal_data {
+ u32 calTarget[2];
+ u32 calTime[2];
+ s8 calAmbient;
+ u8 calStatus;
+ u16 calR;
+} __packed;
+
+struct cirrus_amp_efi_data {
+ u32 size;
+ u32 count;
+ struct cirrus_amp_cal_data data[];
+} __packed;
+
+/**
+ * struct cirrus_amp_cal_controls - definition of firmware calibration controls
+ * @alg_id: ID of algorithm containing the controls.
+ * @mem_region: DSP memory region containing the controls.
+ * @ambient: Name of control for calAmbient value.
+ * @calr: Name of control for calR value.
+ * @status: Name of control for calStatus value.
+ * @checksum: Name of control for checksum value.
+ */
+struct cirrus_amp_cal_controls {
+ unsigned int alg_id;
+ int mem_region;
+ const char *ambient;
+ const char *calr;
+ const char *status;
+ const char *checksum;
+};
+
+int cs_amp_write_cal_coeffs(struct cs_dsp *dsp,
+ const struct cirrus_amp_cal_controls *controls,
+ const struct cirrus_amp_cal_data *data);
+int cs_amp_get_efi_calibration_data(struct device *dev, u64 target_uid, int amp_index,
+ struct cirrus_amp_cal_data *out_data);
+
+struct cs_amp_test_hooks {
+ efi_status_t (*get_efi_variable)(efi_char16_t *name,
+ efi_guid_t *guid,
+ unsigned long *size,
+ void *buf);
+
+ int (*write_cal_coeff)(struct cs_dsp *dsp,
+ const struct cirrus_amp_cal_controls *controls,
+ const char *ctl_name, u32 val);
+};
+
+extern const struct cs_amp_test_hooks * const cs_amp_test_hooks;
+
+#endif /* CS_AMP_LIB_H */
diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
new file mode 100644
index 000000000000..68e053fe7340
--- /dev/null
+++ b/include/sound/cs35l41.h
@@ -0,0 +1,911 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * linux/sound/cs35l41.h -- Platform data for CS35L41
+ *
+ * Copyright (c) 2017-2021 Cirrus Logic Inc.
+ *
+ * Author: David Rhodes <david.rhodes@cirrus.com>
+ */
+
+#ifndef __CS35L41_H
+#define __CS35L41_H
+
+#include <linux/regmap.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+
+#define CS35L41_FIRSTREG 0x00000000
+#define CS35L41_LASTREG 0x03804FE8
+#define CS35L41_DEVID 0x00000000
+#define CS35L41_REVID 0x00000004
+#define CS35L41_FABID 0x00000008
+#define CS35L41_RELID 0x0000000C
+#define CS35L41_OTPID 0x00000010
+#define CS35L41_SFT_RESET 0x00000020
+#define CS35L41_TEST_KEY_CTL 0x00000040
+#define CS35L41_USER_KEY_CTL 0x00000044
+#define CS35L41_OTP_MEM0 0x00000400
+#define CS35L41_OTP_MEM31 0x0000047C
+#define CS35L41_OTP_CTRL0 0x00000500
+#define CS35L41_OTP_CTRL1 0x00000504
+#define CS35L41_OTP_CTRL3 0x00000508
+#define CS35L41_OTP_CTRL4 0x0000050C
+#define CS35L41_OTP_CTRL5 0x00000510
+#define CS35L41_OTP_CTRL6 0x00000514
+#define CS35L41_OTP_CTRL7 0x00000518
+#define CS35L41_OTP_CTRL8 0x0000051C
+#define CS35L41_PWR_CTRL1 0x00002014
+#define CS35L41_PWR_CTRL2 0x00002018
+#define CS35L41_PWR_CTRL3 0x0000201C
+#define CS35L41_CTRL_OVRRIDE 0x00002020
+#define CS35L41_AMP_OUT_MUTE 0x00002024
+#define CS35L41_PROTECT_REL_ERR_IGN 0x00002034
+#define CS35L41_GPIO_PAD_CONTROL 0x0000242C
+#define CS35L41_JTAG_CONTROL 0x00002438
+#define CS35L41_PWRMGT_CTL 0x00002900
+#define CS35L41_WAKESRC_CTL 0x00002904
+#define CS35L41_PWRMGT_STS 0x00002908
+#define CS35L41_PLL_CLK_CTRL 0x00002C04
+#define CS35L41_DSP_CLK_CTRL 0x00002C08
+#define CS35L41_GLOBAL_CLK_CTRL 0x00002C0C
+#define CS35L41_DATA_FS_SEL 0x00002C10
+#define CS35L41_TST_FS_MON0 0x00002D10
+#define CS35L41_MDSYNC_EN 0x00003400
+#define CS35L41_MDSYNC_TX_ID 0x00003408
+#define CS35L41_MDSYNC_PWR_CTRL 0x0000340C
+#define CS35L41_MDSYNC_DATA_TX 0x00003410
+#define CS35L41_MDSYNC_TX_STATUS 0x00003414
+#define CS35L41_MDSYNC_DATA_RX 0x0000341C
+#define CS35L41_MDSYNC_RX_STATUS 0x00003420
+#define CS35L41_MDSYNC_ERR_STATUS 0x00003424
+#define CS35L41_MDSYNC_SYNC_PTE2 0x00003528
+#define CS35L41_MDSYNC_SYNC_PTE3 0x0000352C
+#define CS35L41_MDSYNC_SYNC_MSM_STATUS 0x0000353C
+#define CS35L41_BSTCVRT_VCTRL1 0x00003800
+#define CS35L41_BSTCVRT_VCTRL2 0x00003804
+#define CS35L41_BSTCVRT_PEAK_CUR 0x00003808
+#define CS35L41_BSTCVRT_SFT_RAMP 0x0000380C
+#define CS35L41_BSTCVRT_COEFF 0x00003810
+#define CS35L41_BSTCVRT_SLOPE_LBST 0x00003814
+#define CS35L41_BSTCVRT_SW_FREQ 0x00003818
+#define CS35L41_BSTCVRT_DCM_CTRL 0x0000381C
+#define CS35L41_BSTCVRT_DCM_MODE_FORCE 0x00003820
+#define CS35L41_BSTCVRT_OVERVOLT_CTRL 0x00003830
+#define CS35L41_VI_VOL_POL 0x00004000
+#define CS35L41_VIMON_SPKMON_RESYNC 0x00004100
+#define CS35L41_DTEMP_WARN_THLD 0x00004220
+#define CS35L41_DTEMP_CFG 0x00004224
+#define CS35L41_DTEMP_EN 0x00004308
+#define CS35L41_VPVBST_FS_SEL 0x00004400
+#define CS35L41_SP_ENABLES 0x00004800
+#define CS35L41_SP_RATE_CTRL 0x00004804
+#define CS35L41_SP_FORMAT 0x00004808
+#define CS35L41_SP_HIZ_CTRL 0x0000480C
+#define CS35L41_SP_FRAME_TX_SLOT 0x00004810
+#define CS35L41_SP_FRAME_RX_SLOT 0x00004820
+#define CS35L41_SP_TX_WL 0x00004830
+#define CS35L41_SP_RX_WL 0x00004840
+#define CS35L41_ASP_CONTROL4 0x00004854
+#define CS35L41_DAC_PCM1_SRC 0x00004C00
+#define CS35L41_ASP_TX1_SRC 0x00004C20
+#define CS35L41_ASP_TX2_SRC 0x00004C24
+#define CS35L41_ASP_TX3_SRC 0x00004C28
+#define CS35L41_ASP_TX4_SRC 0x00004C2C
+#define CS35L41_DSP1_RX1_SRC 0x00004C40
+#define CS35L41_DSP1_RX2_SRC 0x00004C44
+#define CS35L41_DSP1_RX3_SRC 0x00004C48
+#define CS35L41_DSP1_RX4_SRC 0x00004C4C
+#define CS35L41_DSP1_RX5_SRC 0x00004C50
+#define CS35L41_DSP1_RX6_SRC 0x00004C54
+#define CS35L41_DSP1_RX7_SRC 0x00004C58
+#define CS35L41_DSP1_RX8_SRC 0x00004C5C
+#define CS35L41_NGATE1_SRC 0x00004C60
+#define CS35L41_NGATE2_SRC 0x00004C64
+#define CS35L41_AMP_DIG_VOL_CTRL 0x00006000
+#define CS35L41_VPBR_CFG 0x00006404
+#define CS35L41_VBBR_CFG 0x00006408
+#define CS35L41_VPBR_STATUS 0x0000640C
+#define CS35L41_VBBR_STATUS 0x00006410
+#define CS35L41_OVERTEMP_CFG 0x00006414
+#define CS35L41_AMP_ERR_VOL 0x00006418
+#define CS35L41_VOL_STATUS_TO_DSP 0x00006450
+#define CS35L41_CLASSH_CFG 0x00006800
+#define CS35L41_WKFET_CFG 0x00006804
+#define CS35L41_NG_CFG 0x00006808
+#define CS35L41_AMP_GAIN_CTRL 0x00006C04
+#define CS35L41_DAC_MSM_CFG 0x00007400
+#define CS35L41_IRQ1_CFG 0x00010000
+#define CS35L41_IRQ1_STATUS 0x00010004
+#define CS35L41_IRQ1_STATUS1 0x00010010
+#define CS35L41_IRQ1_STATUS2 0x00010014
+#define CS35L41_IRQ1_STATUS3 0x00010018
+#define CS35L41_IRQ1_STATUS4 0x0001001C
+#define CS35L41_IRQ1_RAW_STATUS1 0x00010090
+#define CS35L41_IRQ1_RAW_STATUS2 0x00010094
+#define CS35L41_IRQ1_RAW_STATUS3 0x00010098
+#define CS35L41_IRQ1_RAW_STATUS4 0x0001009C
+#define CS35L41_IRQ1_MASK1 0x00010110
+#define CS35L41_IRQ1_MASK2 0x00010114
+#define CS35L41_IRQ1_MASK3 0x00010118
+#define CS35L41_IRQ1_MASK4 0x0001011C
+#define CS35L41_IRQ1_FRC1 0x00010190
+#define CS35L41_IRQ1_FRC2 0x00010194
+#define CS35L41_IRQ1_FRC3 0x00010198
+#define CS35L41_IRQ1_FRC4 0x0001019C
+#define CS35L41_IRQ1_EDGE1 0x00010210
+#define CS35L41_IRQ1_EDGE4 0x0001021C
+#define CS35L41_IRQ1_POL1 0x00010290
+#define CS35L41_IRQ1_POL2 0x00010294
+#define CS35L41_IRQ1_POL3 0x00010298
+#define CS35L41_IRQ1_POL4 0x0001029C
+#define CS35L41_IRQ1_DB3 0x00010318
+#define CS35L41_IRQ2_CFG 0x00010800
+#define CS35L41_IRQ2_STATUS 0x00010804
+#define CS35L41_IRQ2_STATUS1 0x00010810
+#define CS35L41_IRQ2_STATUS2 0x00010814
+#define CS35L41_IRQ2_STATUS3 0x00010818
+#define CS35L41_IRQ2_STATUS4 0x0001081C
+#define CS35L41_IRQ2_RAW_STATUS1 0x00010890
+#define CS35L41_IRQ2_RAW_STATUS2 0x00010894
+#define CS35L41_IRQ2_RAW_STATUS3 0x00010898
+#define CS35L41_IRQ2_RAW_STATUS4 0x0001089C
+#define CS35L41_IRQ2_MASK1 0x00010910
+#define CS35L41_IRQ2_MASK2 0x00010914
+#define CS35L41_IRQ2_MASK3 0x00010918
+#define CS35L41_IRQ2_MASK4 0x0001091C
+#define CS35L41_IRQ2_FRC1 0x00010990
+#define CS35L41_IRQ2_FRC2 0x00010994
+#define CS35L41_IRQ2_FRC3 0x00010998
+#define CS35L41_IRQ2_FRC4 0x0001099C
+#define CS35L41_IRQ2_EDGE1 0x00010A10
+#define CS35L41_IRQ2_EDGE4 0x00010A1C
+#define CS35L41_IRQ2_POL1 0x00010A90
+#define CS35L41_IRQ2_POL2 0x00010A94
+#define CS35L41_IRQ2_POL3 0x00010A98
+#define CS35L41_IRQ2_POL4 0x00010A9C
+#define CS35L41_IRQ2_DB3 0x00010B18
+#define CS35L41_GPIO_STATUS1 0x00011000
+#define CS35L41_GPIO1_CTRL1 0x00011008
+#define CS35L41_GPIO2_CTRL1 0x0001100C
+#define CS35L41_MIXER_NGATE_CFG 0x00012000
+#define CS35L41_MIXER_NGATE_CH1_CFG 0x00012004
+#define CS35L41_MIXER_NGATE_CH2_CFG 0x00012008
+#define CS35L41_DSP_MBOX_1 0x00013000
+#define CS35L41_DSP_MBOX_2 0x00013004
+#define CS35L41_DSP_MBOX_3 0x00013008
+#define CS35L41_DSP_MBOX_4 0x0001300C
+#define CS35L41_DSP_MBOX_5 0x00013010
+#define CS35L41_DSP_MBOX_6 0x00013014
+#define CS35L41_DSP_MBOX_7 0x00013018
+#define CS35L41_DSP_MBOX_8 0x0001301C
+#define CS35L41_DSP_VIRT1_MBOX_1 0x00013020
+#define CS35L41_DSP_VIRT1_MBOX_2 0x00013024
+#define CS35L41_DSP_VIRT1_MBOX_3 0x00013028
+#define CS35L41_DSP_VIRT1_MBOX_4 0x0001302C
+#define CS35L41_DSP_VIRT1_MBOX_5 0x00013030
+#define CS35L41_DSP_VIRT1_MBOX_6 0x00013034
+#define CS35L41_DSP_VIRT1_MBOX_7 0x00013038
+#define CS35L41_DSP_VIRT1_MBOX_8 0x0001303C
+#define CS35L41_DSP_VIRT2_MBOX_1 0x00013040
+#define CS35L41_DSP_VIRT2_MBOX_2 0x00013044
+#define CS35L41_DSP_VIRT2_MBOX_3 0x00013048
+#define CS35L41_DSP_VIRT2_MBOX_4 0x0001304C
+#define CS35L41_DSP_VIRT2_MBOX_5 0x00013050
+#define CS35L41_DSP_VIRT2_MBOX_6 0x00013054
+#define CS35L41_DSP_VIRT2_MBOX_7 0x00013058
+#define CS35L41_DSP_VIRT2_MBOX_8 0x0001305C
+#define CS35L41_CLOCK_DETECT_1 0x00014000
+#define CS35L41_TIMER1_CONTROL 0x00015000
+#define CS35L41_TIMER1_COUNT_PRESET 0x00015004
+#define CS35L41_TIMER1_START_STOP 0x0001500C
+#define CS35L41_TIMER1_STATUS 0x00015010
+#define CS35L41_TIMER1_COUNT_READBACK 0x00015014
+#define CS35L41_TIMER1_DSP_CLK_CFG 0x00015018
+#define CS35L41_TIMER1_DSP_CLK_STATUS 0x0001501C
+#define CS35L41_TIMER2_CONTROL 0x00015100
+#define CS35L41_TIMER2_COUNT_PRESET 0x00015104
+#define CS35L41_TIMER2_START_STOP 0x0001510C
+#define CS35L41_TIMER2_STATUS 0x00015110
+#define CS35L41_TIMER2_COUNT_READBACK 0x00015114
+#define CS35L41_TIMER2_DSP_CLK_CFG 0x00015118
+#define CS35L41_TIMER2_DSP_CLK_STATUS 0x0001511C
+#define CS35L41_DFT_JTAG_CONTROL 0x00016000
+#define CS35L41_DIE_STS1 0x00017040
+#define CS35L41_DIE_STS2 0x00017044
+#define CS35L41_TEMP_CAL1 0x00017048
+#define CS35L41_TEMP_CAL2 0x0001704C
+#define CS35L41_DSP1_XMEM_PACK_0 0x02000000
+#define CS35L41_DSP1_XMEM_PACK_3068 0x02002FF0
+#define CS35L41_DSP1_XMEM_UNPACK32_0 0x02400000
+#define CS35L41_DSP1_XMEM_UNPACK32_2046 0x02401FF8
+#define CS35L41_DSP1_TIMESTAMP_COUNT 0x025C0800
+#define CS35L41_DSP1_SYS_ID 0x025E0000
+#define CS35L41_DSP1_SYS_VERSION 0x025E0004
+#define CS35L41_DSP1_SYS_CORE_ID 0x025E0008
+#define CS35L41_DSP1_SYS_AHB_ADDR 0x025E000C
+#define CS35L41_DSP1_SYS_XSRAM_SIZE 0x025E0010
+#define CS35L41_DSP1_SYS_YSRAM_SIZE 0x025E0018
+#define CS35L41_DSP1_SYS_PSRAM_SIZE 0x025E0020
+#define CS35L41_DSP1_SYS_PM_BOOT_SIZE 0x025E0028
+#define CS35L41_DSP1_SYS_FEATURES 0x025E002C
+#define CS35L41_DSP1_SYS_FIR_FILTERS 0x025E0030
+#define CS35L41_DSP1_SYS_LMS_FILTERS 0x025E0034
+#define CS35L41_DSP1_SYS_XM_BANK_SIZE 0x025E0038
+#define CS35L41_DSP1_SYS_YM_BANK_SIZE 0x025E003C
+#define CS35L41_DSP1_SYS_PM_BANK_SIZE 0x025E0040
+#define CS35L41_DSP1_AHBM_WIN0_CTRL0 0x025E2000
+#define CS35L41_DSP1_AHBM_WIN0_CTRL1 0x025E2004
+#define CS35L41_DSP1_AHBM_WIN1_CTRL0 0x025E2008
+#define CS35L41_DSP1_AHBM_WIN1_CTRL1 0x025E200C
+#define CS35L41_DSP1_AHBM_WIN2_CTRL0 0x025E2010
+#define CS35L41_DSP1_AHBM_WIN2_CTRL1 0x025E2014
+#define CS35L41_DSP1_AHBM_WIN3_CTRL0 0x025E2018
+#define CS35L41_DSP1_AHBM_WIN3_CTRL1 0x025E201C
+#define CS35L41_DSP1_AHBM_WIN4_CTRL0 0x025E2020
+#define CS35L41_DSP1_AHBM_WIN4_CTRL1 0x025E2024
+#define CS35L41_DSP1_AHBM_WIN5_CTRL0 0x025E2028
+#define CS35L41_DSP1_AHBM_WIN5_CTRL1 0x025E202C
+#define CS35L41_DSP1_AHBM_WIN6_CTRL0 0x025E2030
+#define CS35L41_DSP1_AHBM_WIN6_CTRL1 0x025E2034
+#define CS35L41_DSP1_AHBM_WIN7_CTRL0 0x025E2038
+#define CS35L41_DSP1_AHBM_WIN7_CTRL1 0x025E203C
+#define CS35L41_DSP1_AHBM_WIN_DBG_CTRL0 0x025E2040
+#define CS35L41_DSP1_AHBM_WIN_DBG_CTRL1 0x025E2044
+#define CS35L41_DSP1_XMEM_UNPACK24_0 0x02800000
+#define CS35L41_DSP1_XMEM_UNPACK24_4093 0x02803FF4
+#define CS35L41_DSP1_CTRL_BASE 0x02B80000
+#define CS35L41_DSP1_CORE_SOFT_RESET 0x02B80010
+#define CS35L41_DSP1_DEBUG 0x02B80040
+#define CS35L41_DSP1_TIMER_CTRL 0x02B80048
+#define CS35L41_DSP1_STREAM_ARB_CTRL 0x02B80050
+#define CS35L41_DSP1_RX1_RATE 0x02B80080
+#define CS35L41_DSP1_RX2_RATE 0x02B80088
+#define CS35L41_DSP1_RX3_RATE 0x02B80090
+#define CS35L41_DSP1_RX4_RATE 0x02B80098
+#define CS35L41_DSP1_RX5_RATE 0x02B800A0
+#define CS35L41_DSP1_RX6_RATE 0x02B800A8
+#define CS35L41_DSP1_RX7_RATE 0x02B800B0
+#define CS35L41_DSP1_RX8_RATE 0x02B800B8
+#define CS35L41_DSP1_TX1_RATE 0x02B80280
+#define CS35L41_DSP1_TX2_RATE 0x02B80288
+#define CS35L41_DSP1_TX3_RATE 0x02B80290
+#define CS35L41_DSP1_TX4_RATE 0x02B80298
+#define CS35L41_DSP1_TX5_RATE 0x02B802A0
+#define CS35L41_DSP1_TX6_RATE 0x02B802A8
+#define CS35L41_DSP1_TX7_RATE 0x02B802B0
+#define CS35L41_DSP1_TX8_RATE 0x02B802B8
+#define CS35L41_DSP1_NMI_CTRL1 0x02B80480
+#define CS35L41_DSP1_NMI_CTRL2 0x02B80488
+#define CS35L41_DSP1_NMI_CTRL3 0x02B80490
+#define CS35L41_DSP1_NMI_CTRL4 0x02B80498
+#define CS35L41_DSP1_NMI_CTRL5 0x02B804A0
+#define CS35L41_DSP1_NMI_CTRL6 0x02B804A8
+#define CS35L41_DSP1_NMI_CTRL7 0x02B804B0
+#define CS35L41_DSP1_NMI_CTRL8 0x02B804B8
+#define CS35L41_DSP1_RESUME_CTRL 0x02B80500
+#define CS35L41_DSP1_IRQ1_CTRL 0x02B80508
+#define CS35L41_DSP1_IRQ2_CTRL 0x02B80510
+#define CS35L41_DSP1_IRQ3_CTRL 0x02B80518
+#define CS35L41_DSP1_IRQ4_CTRL 0x02B80520
+#define CS35L41_DSP1_IRQ5_CTRL 0x02B80528
+#define CS35L41_DSP1_IRQ6_CTRL 0x02B80530
+#define CS35L41_DSP1_IRQ7_CTRL 0x02B80538
+#define CS35L41_DSP1_IRQ8_CTRL 0x02B80540
+#define CS35L41_DSP1_IRQ9_CTRL 0x02B80548
+#define CS35L41_DSP1_IRQ10_CTRL 0x02B80550
+#define CS35L41_DSP1_IRQ11_CTRL 0x02B80558
+#define CS35L41_DSP1_IRQ12_CTRL 0x02B80560
+#define CS35L41_DSP1_IRQ13_CTRL 0x02B80568
+#define CS35L41_DSP1_IRQ14_CTRL 0x02B80570
+#define CS35L41_DSP1_IRQ15_CTRL 0x02B80578
+#define CS35L41_DSP1_IRQ16_CTRL 0x02B80580
+#define CS35L41_DSP1_IRQ17_CTRL 0x02B80588
+#define CS35L41_DSP1_IRQ18_CTRL 0x02B80590
+#define CS35L41_DSP1_IRQ19_CTRL 0x02B80598
+#define CS35L41_DSP1_IRQ20_CTRL 0x02B805A0
+#define CS35L41_DSP1_IRQ21_CTRL 0x02B805A8
+#define CS35L41_DSP1_IRQ22_CTRL 0x02B805B0
+#define CS35L41_DSP1_IRQ23_CTRL 0x02B805B8
+#define CS35L41_DSP1_SCRATCH1 0x02B805C0
+#define CS35L41_DSP1_SCRATCH2 0x02B805C8
+#define CS35L41_DSP1_SCRATCH3 0x02B805D0
+#define CS35L41_DSP1_SCRATCH4 0x02B805D8
+#define CS35L41_DSP1_CCM_CORE_CTRL 0x02BC1000
+#define CS35L41_DSP1_CCM_CLK_OVERRIDE 0x02BC1008
+#define CS35L41_DSP1_XM_MSTR_EN 0x02BC2000
+#define CS35L41_DSP1_XM_CORE_PRI 0x02BC2008
+#define CS35L41_DSP1_XM_AHB_PACK_PL_PRI 0x02BC2010
+#define CS35L41_DSP1_XM_AHB_UP_PL_PRI 0x02BC2018
+#define CS35L41_DSP1_XM_ACCEL_PL0_PRI 0x02BC2020
+#define CS35L41_DSP1_XM_NPL0_PRI 0x02BC2078
+#define CS35L41_DSP1_YM_MSTR_EN 0x02BC20C0
+#define CS35L41_DSP1_YM_CORE_PRI 0x02BC20C8
+#define CS35L41_DSP1_YM_AHB_PACK_PL_PRI 0x02BC20D0
+#define CS35L41_DSP1_YM_AHB_UP_PL_PRI 0x02BC20D8
+#define CS35L41_DSP1_YM_ACCEL_PL0_PRI 0x02BC20E0
+#define CS35L41_DSP1_YM_NPL0_PRI 0x02BC2138
+#define CS35L41_DSP1_PM_MSTR_EN 0x02BC2180
+#define CS35L41_DSP1_PM_PATCH0_ADDR 0x02BC2188
+#define CS35L41_DSP1_PM_PATCH0_EN 0x02BC218C
+#define CS35L41_DSP1_PM_PATCH0_DATA_LO 0x02BC2190
+#define CS35L41_DSP1_PM_PATCH0_DATA_HI 0x02BC2194
+#define CS35L41_DSP1_PM_PATCH1_ADDR 0x02BC2198
+#define CS35L41_DSP1_PM_PATCH1_EN 0x02BC219C
+#define CS35L41_DSP1_PM_PATCH1_DATA_LO 0x02BC21A0
+#define CS35L41_DSP1_PM_PATCH1_DATA_HI 0x02BC21A4
+#define CS35L41_DSP1_PM_PATCH2_ADDR 0x02BC21A8
+#define CS35L41_DSP1_PM_PATCH2_EN 0x02BC21AC
+#define CS35L41_DSP1_PM_PATCH2_DATA_LO 0x02BC21B0
+#define CS35L41_DSP1_PM_PATCH2_DATA_HI 0x02BC21B4
+#define CS35L41_DSP1_PM_PATCH3_ADDR 0x02BC21B8
+#define CS35L41_DSP1_PM_PATCH3_EN 0x02BC21BC
+#define CS35L41_DSP1_PM_PATCH3_DATA_LO 0x02BC21C0
+#define CS35L41_DSP1_PM_PATCH3_DATA_HI 0x02BC21C4
+#define CS35L41_DSP1_PM_PATCH4_ADDR 0x02BC21C8
+#define CS35L41_DSP1_PM_PATCH4_EN 0x02BC21CC
+#define CS35L41_DSP1_PM_PATCH4_DATA_LO 0x02BC21D0
+#define CS35L41_DSP1_PM_PATCH4_DATA_HI 0x02BC21D4
+#define CS35L41_DSP1_PM_PATCH5_ADDR 0x02BC21D8
+#define CS35L41_DSP1_PM_PATCH5_EN 0x02BC21DC
+#define CS35L41_DSP1_PM_PATCH5_DATA_LO 0x02BC21E0
+#define CS35L41_DSP1_PM_PATCH5_DATA_HI 0x02BC21E4
+#define CS35L41_DSP1_PM_PATCH6_ADDR 0x02BC21E8
+#define CS35L41_DSP1_PM_PATCH6_EN 0x02BC21EC
+#define CS35L41_DSP1_PM_PATCH6_DATA_LO 0x02BC21F0
+#define CS35L41_DSP1_PM_PATCH6_DATA_HI 0x02BC21F4
+#define CS35L41_DSP1_PM_PATCH7_ADDR 0x02BC21F8
+#define CS35L41_DSP1_PM_PATCH7_EN 0x02BC21FC
+#define CS35L41_DSP1_PM_PATCH7_DATA_LO 0x02BC2200
+#define CS35L41_DSP1_PM_PATCH7_DATA_HI 0x02BC2204
+#define CS35L41_DSP1_MPU_XM_ACCESS0 0x02BC3000
+#define CS35L41_DSP1_MPU_YM_ACCESS0 0x02BC3004
+#define CS35L41_DSP1_MPU_WNDW_ACCESS0 0x02BC3008
+#define CS35L41_DSP1_MPU_XREG_ACCESS0 0x02BC300C
+#define CS35L41_DSP1_MPU_YREG_ACCESS0 0x02BC3014
+#define CS35L41_DSP1_MPU_XM_ACCESS1 0x02BC3018
+#define CS35L41_DSP1_MPU_YM_ACCESS1 0x02BC301C
+#define CS35L41_DSP1_MPU_WNDW_ACCESS1 0x02BC3020
+#define CS35L41_DSP1_MPU_XREG_ACCESS1 0x02BC3024
+#define CS35L41_DSP1_MPU_YREG_ACCESS1 0x02BC302C
+#define CS35L41_DSP1_MPU_XM_ACCESS2 0x02BC3030
+#define CS35L41_DSP1_MPU_YM_ACCESS2 0x02BC3034
+#define CS35L41_DSP1_MPU_WNDW_ACCESS2 0x02BC3038
+#define CS35L41_DSP1_MPU_XREG_ACCESS2 0x02BC303C
+#define CS35L41_DSP1_MPU_YREG_ACCESS2 0x02BC3044
+#define CS35L41_DSP1_MPU_XM_ACCESS3 0x02BC3048
+#define CS35L41_DSP1_MPU_YM_ACCESS3 0x02BC304C
+#define CS35L41_DSP1_MPU_WNDW_ACCESS3 0x02BC3050
+#define CS35L41_DSP1_MPU_XREG_ACCESS3 0x02BC3054
+#define CS35L41_DSP1_MPU_YREG_ACCESS3 0x02BC305C
+#define CS35L41_DSP1_MPU_XM_VIO_ADDR 0x02BC3100
+#define CS35L41_DSP1_MPU_XM_VIO_STATUS 0x02BC3104
+#define CS35L41_DSP1_MPU_YM_VIO_ADDR 0x02BC3108
+#define CS35L41_DSP1_MPU_YM_VIO_STATUS 0x02BC310C
+#define CS35L41_DSP1_MPU_PM_VIO_ADDR 0x02BC3110
+#define CS35L41_DSP1_MPU_PM_VIO_STATUS 0x02BC3114
+#define CS35L41_DSP1_MPU_LOCK_CONFIG 0x02BC3140
+#define CS35L41_DSP1_MPU_WDT_RST_CTRL 0x02BC3180
+#define CS35L41_DSP1_STRMARB_MSTR0_CFG0 0x02BC5000
+#define CS35L41_DSP1_STRMARB_MSTR0_CFG1 0x02BC5004
+#define CS35L41_DSP1_STRMARB_MSTR0_CFG2 0x02BC5008
+#define CS35L41_DSP1_STRMARB_MSTR1_CFG0 0x02BC5010
+#define CS35L41_DSP1_STRMARB_MSTR1_CFG1 0x02BC5014
+#define CS35L41_DSP1_STRMARB_MSTR1_CFG2 0x02BC5018
+#define CS35L41_DSP1_STRMARB_MSTR2_CFG0 0x02BC5020
+#define CS35L41_DSP1_STRMARB_MSTR2_CFG1 0x02BC5024
+#define CS35L41_DSP1_STRMARB_MSTR2_CFG2 0x02BC5028
+#define CS35L41_DSP1_STRMARB_MSTR3_CFG0 0x02BC5030
+#define CS35L41_DSP1_STRMARB_MSTR3_CFG1 0x02BC5034
+#define CS35L41_DSP1_STRMARB_MSTR3_CFG2 0x02BC5038
+#define CS35L41_DSP1_STRMARB_MSTR4_CFG0 0x02BC5040
+#define CS35L41_DSP1_STRMARB_MSTR4_CFG1 0x02BC5044
+#define CS35L41_DSP1_STRMARB_MSTR4_CFG2 0x02BC5048
+#define CS35L41_DSP1_STRMARB_MSTR5_CFG0 0x02BC5050
+#define CS35L41_DSP1_STRMARB_MSTR5_CFG1 0x02BC5054
+#define CS35L41_DSP1_STRMARB_MSTR5_CFG2 0x02BC5058
+#define CS35L41_DSP1_STRMARB_MSTR6_CFG0 0x02BC5060
+#define CS35L41_DSP1_STRMARB_MSTR6_CFG1 0x02BC5064
+#define CS35L41_DSP1_STRMARB_MSTR6_CFG2 0x02BC5068
+#define CS35L41_DSP1_STRMARB_MSTR7_CFG0 0x02BC5070
+#define CS35L41_DSP1_STRMARB_MSTR7_CFG1 0x02BC5074
+#define CS35L41_DSP1_STRMARB_MSTR7_CFG2 0x02BC5078
+#define CS35L41_DSP1_STRMARB_TX0_CFG0 0x02BC5200
+#define CS35L41_DSP1_STRMARB_TX0_CFG1 0x02BC5204
+#define CS35L41_DSP1_STRMARB_TX1_CFG0 0x02BC5208
+#define CS35L41_DSP1_STRMARB_TX1_CFG1 0x02BC520C
+#define CS35L41_DSP1_STRMARB_TX2_CFG0 0x02BC5210
+#define CS35L41_DSP1_STRMARB_TX2_CFG1 0x02BC5214
+#define CS35L41_DSP1_STRMARB_TX3_CFG0 0x02BC5218
+#define CS35L41_DSP1_STRMARB_TX3_CFG1 0x02BC521C
+#define CS35L41_DSP1_STRMARB_TX4_CFG0 0x02BC5220
+#define CS35L41_DSP1_STRMARB_TX4_CFG1 0x02BC5224
+#define CS35L41_DSP1_STRMARB_TX5_CFG0 0x02BC5228
+#define CS35L41_DSP1_STRMARB_TX5_CFG1 0x02BC522C
+#define CS35L41_DSP1_STRMARB_TX6_CFG0 0x02BC5230
+#define CS35L41_DSP1_STRMARB_TX6_CFG1 0x02BC5234
+#define CS35L41_DSP1_STRMARB_TX7_CFG0 0x02BC5238
+#define CS35L41_DSP1_STRMARB_TX7_CFG1 0x02BC523C
+#define CS35L41_DSP1_STRMARB_RX0_CFG0 0x02BC5400
+#define CS35L41_DSP1_STRMARB_RX0_CFG1 0x02BC5404
+#define CS35L41_DSP1_STRMARB_RX1_CFG0 0x02BC5408
+#define CS35L41_DSP1_STRMARB_RX1_CFG1 0x02BC540C
+#define CS35L41_DSP1_STRMARB_RX2_CFG0 0x02BC5410
+#define CS35L41_DSP1_STRMARB_RX2_CFG1 0x02BC5414
+#define CS35L41_DSP1_STRMARB_RX3_CFG0 0x02BC5418
+#define CS35L41_DSP1_STRMARB_RX3_CFG1 0x02BC541C
+#define CS35L41_DSP1_STRMARB_RX4_CFG0 0x02BC5420
+#define CS35L41_DSP1_STRMARB_RX4_CFG1 0x02BC5424
+#define CS35L41_DSP1_STRMARB_RX5_CFG0 0x02BC5428
+#define CS35L41_DSP1_STRMARB_RX5_CFG1 0x02BC542C
+#define CS35L41_DSP1_STRMARB_RX6_CFG0 0x02BC5430
+#define CS35L41_DSP1_STRMARB_RX6_CFG1 0x02BC5434
+#define CS35L41_DSP1_STRMARB_RX7_CFG0 0x02BC5438
+#define CS35L41_DSP1_STRMARB_RX7_CFG1 0x02BC543C
+#define CS35L41_DSP1_STRMARB_IRQ0_CFG0 0x02BC5600
+#define CS35L41_DSP1_STRMARB_IRQ0_CFG1 0x02BC5604
+#define CS35L41_DSP1_STRMARB_IRQ0_CFG2 0x02BC5608
+#define CS35L41_DSP1_STRMARB_IRQ1_CFG0 0x02BC5610
+#define CS35L41_DSP1_STRMARB_IRQ1_CFG1 0x02BC5614
+#define CS35L41_DSP1_STRMARB_IRQ1_CFG2 0x02BC5618
+#define CS35L41_DSP1_STRMARB_IRQ2_CFG0 0x02BC5620
+#define CS35L41_DSP1_STRMARB_IRQ2_CFG1 0x02BC5624
+#define CS35L41_DSP1_STRMARB_IRQ2_CFG2 0x02BC5628
+#define CS35L41_DSP1_STRMARB_IRQ3_CFG0 0x02BC5630
+#define CS35L41_DSP1_STRMARB_IRQ3_CFG1 0x02BC5634
+#define CS35L41_DSP1_STRMARB_IRQ3_CFG2 0x02BC5638
+#define CS35L41_DSP1_STRMARB_IRQ4_CFG0 0x02BC5640
+#define CS35L41_DSP1_STRMARB_IRQ4_CFG1 0x02BC5644
+#define CS35L41_DSP1_STRMARB_IRQ4_CFG2 0x02BC5648
+#define CS35L41_DSP1_STRMARB_IRQ5_CFG0 0x02BC5650
+#define CS35L41_DSP1_STRMARB_IRQ5_CFG1 0x02BC5654
+#define CS35L41_DSP1_STRMARB_IRQ5_CFG2 0x02BC5658
+#define CS35L41_DSP1_STRMARB_IRQ6_CFG0 0x02BC5660
+#define CS35L41_DSP1_STRMARB_IRQ6_CFG1 0x02BC5664
+#define CS35L41_DSP1_STRMARB_IRQ6_CFG2 0x02BC5668
+#define CS35L41_DSP1_STRMARB_IRQ7_CFG0 0x02BC5670
+#define CS35L41_DSP1_STRMARB_IRQ7_CFG1 0x02BC5674
+#define CS35L41_DSP1_STRMARB_IRQ7_CFG2 0x02BC5678
+#define CS35L41_DSP1_STRMARB_RESYNC_MSK 0x02BC5A00
+#define CS35L41_DSP1_STRMARB_ERR_STATUS 0x02BC5A08
+#define CS35L41_DSP1_INTPCTL_RES_STATIC 0x02BC6000
+#define CS35L41_DSP1_INTPCTL_RES_DYN 0x02BC6004
+#define CS35L41_DSP1_INTPCTL_NMI_CTRL 0x02BC6008
+#define CS35L41_DSP1_INTPCTL_IRQ_INV 0x02BC6010
+#define CS35L41_DSP1_INTPCTL_IRQ_MODE 0x02BC6014
+#define CS35L41_DSP1_INTPCTL_IRQ_EN 0x02BC6018
+#define CS35L41_DSP1_INTPCTL_IRQ_MSK 0x02BC601C
+#define CS35L41_DSP1_INTPCTL_IRQ_FLUSH 0x02BC6020
+#define CS35L41_DSP1_INTPCTL_IRQ_MSKCLR 0x02BC6024
+#define CS35L41_DSP1_INTPCTL_IRQ_FRC 0x02BC6028
+#define CS35L41_DSP1_INTPCTL_IRQ_MSKSET 0x02BC602C
+#define CS35L41_DSP1_INTPCTL_IRQ_ERR 0x02BC6030
+#define CS35L41_DSP1_INTPCTL_IRQ_PEND 0x02BC6034
+#define CS35L41_DSP1_INTPCTL_IRQ_GEN 0x02BC6038
+#define CS35L41_DSP1_INTPCTL_TESTBITS 0x02BC6040
+#define CS35L41_DSP1_WDT_CONTROL 0x02BC7000
+#define CS35L41_DSP1_WDT_STATUS 0x02BC7008
+#define CS35L41_DSP1_YMEM_PACK_0 0x02C00000
+#define CS35L41_DSP1_YMEM_PACK_1532 0x02C017F0
+#define CS35L41_DSP1_YMEM_UNPACK32_0 0x03000000
+#define CS35L41_DSP1_YMEM_UNPACK32_1022 0x03000FF8
+#define CS35L41_DSP1_YMEM_UNPACK24_0 0x03400000
+#define CS35L41_DSP1_YMEM_UNPACK24_2045 0x03401FF4
+#define CS35L41_DSP1_PMEM_0 0x03800000
+#define CS35L41_DSP1_PMEM_5114 0x03804FE8
+
+/*test regs for emulation bringup*/
+#define CS35L41_PLL_OVR 0x00003018
+#define CS35L41_BST_TEST_DUTY 0x00003900
+#define CS35L41_DIGPWM_IOCTRL 0x0000706C
+
+/*registers populated by OTP*/
+#define CS35L41_OTP_TRIM_1 0x0000208c
+#define CS35L41_OTP_TRIM_2 0x00002090
+#define CS35L41_OTP_TRIM_3 0x00003010
+#define CS35L41_OTP_TRIM_4 0x0000300C
+#define CS35L41_OTP_TRIM_5 0x0000394C
+#define CS35L41_OTP_TRIM_6 0x00003950
+#define CS35L41_OTP_TRIM_7 0x00003954
+#define CS35L41_OTP_TRIM_8 0x00003958
+#define CS35L41_OTP_TRIM_9 0x0000395C
+#define CS35L41_OTP_TRIM_10 0x0000416C
+#define CS35L41_OTP_TRIM_11 0x00004160
+#define CS35L41_OTP_TRIM_12 0x00004170
+#define CS35L41_OTP_TRIM_13 0x00004360
+#define CS35L41_OTP_TRIM_14 0x00004448
+#define CS35L41_OTP_TRIM_15 0x0000444C
+#define CS35L41_OTP_TRIM_16 0x00006E30
+#define CS35L41_OTP_TRIM_17 0x00006E34
+#define CS35L41_OTP_TRIM_18 0x00006E38
+#define CS35L41_OTP_TRIM_19 0x00006E3C
+#define CS35L41_OTP_TRIM_20 0x00006E40
+#define CS35L41_OTP_TRIM_21 0x00006E44
+#define CS35L41_OTP_TRIM_22 0x00006E48
+#define CS35L41_OTP_TRIM_23 0x00006E4C
+#define CS35L41_OTP_TRIM_24 0x00006E50
+#define CS35L41_OTP_TRIM_25 0x00006E54
+#define CS35L41_OTP_TRIM_26 0x00006E58
+#define CS35L41_OTP_TRIM_27 0x00006E5C
+#define CS35L41_OTP_TRIM_28 0x00006E60
+#define CS35L41_OTP_TRIM_29 0x00006E64
+#define CS35L41_OTP_TRIM_30 0x00007418
+#define CS35L41_OTP_TRIM_31 0x0000741C
+#define CS35L41_OTP_TRIM_32 0x00007434
+#define CS35L41_OTP_TRIM_33 0x00007068
+#define CS35L41_OTP_TRIM_34 0x0000410C
+#define CS35L41_OTP_TRIM_35 0x0000400C
+#define CS35L41_OTP_TRIM_36 0x00002030
+
+#define CS35L41_MAX_CACHE_REG 36
+#define CS35L41_OTP_SIZE_WORDS 32
+
+#define CS35L41_NUM_SUPPLIES 2
+
+#define CS35L41_SCLK_MSTR_MASK 0x10
+#define CS35L41_SCLK_MSTR_SHIFT 4
+#define CS35L41_LRCLK_MSTR_MASK 0x01
+#define CS35L41_LRCLK_MSTR_SHIFT 0
+#define CS35L41_SCLK_INV_MASK 0x40
+#define CS35L41_SCLK_INV_SHIFT 6
+#define CS35L41_LRCLK_INV_MASK 0x04
+#define CS35L41_LRCLK_INV_SHIFT 2
+#define CS35L41_SCLK_FRC_MASK 0x20
+#define CS35L41_SCLK_FRC_SHIFT 5
+#define CS35L41_LRCLK_FRC_MASK 0x02
+#define CS35L41_LRCLK_FRC_SHIFT 1
+
+#define CS35L41_AMP_GAIN_PCM_MASK 0x3E0
+#define CS35L41_AMP_GAIN_ZC_MASK 0x0400
+#define CS35L41_AMP_GAIN_ZC_SHIFT 10
+
+#define CS35L41_BST_CTL_MASK 0xFF
+#define CS35L41_BST_CTL_SEL_MASK 0x03
+#define CS35L41_BST_CTL_SEL_REG 0x00
+#define CS35L41_BST_CTL_SEL_CLASSH 0x01
+#define CS35L41_BST_IPK_MASK 0x7F
+#define CS35L41_BST_IPK_SHIFT 0
+#define CS35L41_BST_LIM_MASK 0x4
+#define CS35L41_BST_LIM_SHIFT 2
+#define CS35L41_BST_K1_MASK 0x000000FF
+#define CS35L41_BST_K1_SHIFT 0
+#define CS35L41_BST_K2_MASK 0x0000FF00
+#define CS35L41_BST_K2_SHIFT 8
+#define CS35L41_BST_SLOPE_MASK 0x0000FF00
+#define CS35L41_BST_SLOPE_SHIFT 8
+#define CS35L41_BST_LBST_VAL_MASK 0x00000003
+#define CS35L41_BST_LBST_VAL_SHIFT 0
+
+#define CS35L41_TEMP_THLD_MASK 0x03
+#define CS35L41_VMON_IMON_VOL_MASK 0x07FF07FF
+#define CS35L41_PDM_MODE_MASK 0x01
+#define CS35L41_PDM_MODE_SHIFT 0
+
+#define CS35L41_CH_MEM_DEPTH_MASK 0x07
+#define CS35L41_CH_MEM_DEPTH_SHIFT 0
+#define CS35L41_CH_HDRM_CTL_MASK 0x007F0000
+#define CS35L41_CH_HDRM_CTL_SHIFT 16
+#define CS35L41_CH_REL_RATE_MASK 0xFF00
+#define CS35L41_CH_REL_RATE_SHIFT 8
+#define CS35L41_CH_WKFET_DLY_MASK 0x001C
+#define CS35L41_CH_WKFET_DLY_SHIFT 2
+#define CS35L41_CH_WKFET_THLD_MASK 0x0F00
+#define CS35L41_CH_WKFET_THLD_SHIFT 8
+
+#define CS35L41_HW_NG_SEL_MASK 0x3F00
+#define CS35L41_HW_NG_SEL_SHIFT 8
+#define CS35L41_HW_NG_DLY_MASK 0x0070
+#define CS35L41_HW_NG_DLY_SHIFT 4
+#define CS35L41_HW_NG_THLD_MASK 0x0007
+#define CS35L41_HW_NG_THLD_SHIFT 0
+
+#define CS35L41_DSP_NG_ENABLE_MASK 0x00010000
+#define CS35L41_DSP_NG_ENABLE_SHIFT 16
+#define CS35L41_DSP_NG_THLD_MASK 0x7
+#define CS35L41_DSP_NG_THLD_SHIFT 0
+#define CS35L41_DSP_NG_DELAY_MASK 0x0F00
+#define CS35L41_DSP_NG_DELAY_SHIFT 8
+
+#define CS35L41_ASP_FMT_MASK 0x0700
+#define CS35L41_ASP_FMT_SHIFT 8
+#define CS35L41_ASP_DOUT_HIZ_MASK 0x03
+#define CS35L41_ASP_DOUT_HIZ_SHIFT 0
+#define CS35L41_ASP_WIDTH_16 0x10
+#define CS35L41_ASP_WIDTH_24 0x18
+#define CS35L41_ASP_WIDTH_32 0x20
+#define CS35L41_ASP_WIDTH_TX_MASK 0xFF0000
+#define CS35L41_ASP_WIDTH_TX_SHIFT 16
+#define CS35L41_ASP_WIDTH_RX_MASK 0xFF000000
+#define CS35L41_ASP_WIDTH_RX_SHIFT 24
+#define CS35L41_ASP_RX1_SLOT_MASK 0x3F
+#define CS35L41_ASP_RX1_SLOT_SHIFT 0
+#define CS35L41_ASP_RX2_SLOT_MASK 0x3F00
+#define CS35L41_ASP_RX2_SLOT_SHIFT 8
+#define CS35L41_ASP_RX_WL_MASK 0x3F
+#define CS35L41_ASP_TX_WL_MASK 0x3F
+#define CS35L41_ASP_RX_WL_SHIFT 0
+#define CS35L41_ASP_TX_WL_SHIFT 0
+#define CS35L41_ASP_SOURCE_MASK 0x7F
+
+#define CS35L41_INPUT_SRC_ASPRX1 0x08
+#define CS35L41_INPUT_SRC_ASPRX2 0x09
+#define CS35L41_INPUT_SRC_VMON 0x18
+#define CS35L41_INPUT_SRC_IMON 0x19
+#define CS35L41_INPUT_SRC_CLASSH 0x21
+#define CS35L41_INPUT_SRC_VPMON 0x28
+#define CS35L41_INPUT_SRC_VBSTMON 0x29
+#define CS35L41_INPUT_SRC_TEMPMON 0x3A
+#define CS35L41_INPUT_SRC_RSVD 0x3B
+#define CS35L41_INPUT_DSP_TX1 0x32
+#define CS35L41_INPUT_DSP_TX2 0x33
+
+#define CS35L41_WR_PEND_STS_MASK 0x2
+
+#define CS35L41_PLL_CLK_SEL_MASK 0x07
+#define CS35L41_PLL_CLK_SEL_SHIFT 0
+#define CS35L41_PLL_CLK_EN_MASK 0x10
+#define CS35L41_PLL_CLK_EN_SHIFT 4
+#define CS35L41_PLL_OPENLOOP_MASK 0x0800
+#define CS35L41_PLL_OPENLOOP_SHIFT 11
+#define CS35L41_PLLSRC_SCLK 0
+#define CS35L41_PLLSRC_LRCLK 1
+#define CS35L41_PLLSRC_SELF 3
+#define CS35L41_PLLSRC_PDMCLK 4
+#define CS35L41_PLLSRC_MCLK 5
+#define CS35L41_PLLSRC_SWIRE 7
+#define CS35L41_REFCLK_FREQ_MASK 0x7E0
+#define CS35L41_REFCLK_FREQ_SHIFT 5
+
+#define CS35L41_GLOBAL_FS_MASK 0x1F
+#define CS35L41_GLOBAL_FS_SHIFT 0
+
+#define CS35L41_GLOBAL_EN_MASK 0x01
+#define CS35L41_GLOBAL_EN_SHIFT 0
+#define CS35L41_BST_EN_MASK 0x0030
+#define CS35L41_BST_EN_SHIFT 4
+#define CS35L41_BST_DIS_FET_OFF 0x00
+#define CS35L41_BST_EN_DEFAULT 0x2
+#define CS35L41_AMP_EN_SHIFT 0
+#define CS35L41_AMP_EN_MASK 1
+#define CS35L41_VMON_EN_MASK 0x1000
+#define CS35L41_VMON_EN_SHIFT 12
+#define CS35L41_IMON_EN_MASK 0x2000
+#define CS35L41_IMON_EN_SHIFT 13
+
+#define CS35L41_PDN_DONE_MASK 0x00800000
+#define CS35L41_PDN_DONE_SHIFT 23
+#define CS35L41_PUP_DONE_MASK 0x01000000
+#define CS35L41_PUP_DONE_SHIFT 24
+
+#define CS35L36_PUP_DONE_IRQ_UNMASK 0x5F
+#define CS35L36_PUP_DONE_IRQ_MASK 0xBF
+#define CS35L41_SYNC_EN_MASK BIT(8)
+
+#define CS35L41_AMP_SHORT_ERR 0x80000000
+#define CS35L41_BST_SHORT_ERR 0x0100
+#define CS35L41_TEMP_WARN 0x8000
+#define CS35L41_TEMP_ERR 0x00020000
+#define CS35L41_BST_OVP_ERR 0x40
+#define CS35L41_BST_DCM_UVP_ERR 0x80
+#define CS35L41_OTP_BOOT_DONE 0x02
+#define CS35L41_PLL_UNLOCK 0x10
+#define CS35L41_PLL_LOCK BIT(1)
+#define CS35L41_OTP_BOOT_ERR 0x80000000
+
+#define CS35L41_AMP_SHORT_ERR_RLS 0x02
+#define CS35L41_BST_SHORT_ERR_RLS 0x04
+#define CS35L41_BST_OVP_ERR_RLS 0x08
+#define CS35L41_BST_UVP_ERR_RLS 0x10
+#define CS35L41_TEMP_WARN_ERR_RLS 0x20
+#define CS35L41_TEMP_ERR_RLS 0x40
+
+#define CS35L41_AMP_SHORT_ERR_RLS_SHIFT 1
+#define CS35L41_BST_SHORT_ERR_RLS_SHIFT 2
+#define CS35L41_BST_OVP_ERR_RLS_SHIFT 3
+#define CS35L41_BST_UVP_ERR_RLS_SHIFT 4
+#define CS35L41_TEMP_WARN_ERR_RLS_SHIFT 5
+#define CS35L41_TEMP_ERR_RLS_SHIFT 6
+
+#define CS35L41_INT1_MASK_DEFAULT 0x7FFCFE3F
+#define CS35L41_INT1_UNMASK_PUP 0xFEFFFFFF
+#define CS35L41_INT1_UNMASK_PDN 0xFF7FFFFF
+#define CS35L41_INT3_PLL_LOCK_SHIFT 1
+#define CS35L41_INT3_PLL_LOCK_MASK BIT(CS35L41_INT3_PLL_LOCK_SHIFT)
+
+#define CS35L41_GPIO_DIR_MASK 0x80000000
+#define CS35L41_GPIO_DIR_SHIFT 31
+#define CS35L41_GPIO1_CTRL_MASK 0x00030000
+#define CS35L41_GPIO1_CTRL_SHIFT 16
+#define CS35L41_GPIO2_CTRL_MASK 0x07000000
+#define CS35L41_GPIO2_CTRL_SHIFT 24
+#define CS35L41_GPIO_LVL_SHIFT 15
+#define CS35L41_GPIO_LVL_MASK BIT(CS35L41_GPIO_LVL_SHIFT)
+#define CS35L41_GPIO_POL_MASK 0x1000
+#define CS35L41_GPIO_POL_SHIFT 12
+
+#define CS35L41_AMP_INV_PCM_SHIFT 14
+#define CS35L41_AMP_INV_PCM_MASK BIT(CS35L41_AMP_INV_PCM_SHIFT)
+#define CS35L41_AMP_PCM_VOL_SHIFT 3
+#define CS35L41_AMP_PCM_VOL_MASK (0x7FF << 3)
+#define CS35L41_AMP_PCM_VOL_MUTE 0x4CF
+
+#define CS35L41_CHIP_ID 0x35a40
+#define CS35L41R_CHIP_ID 0x35b40
+#define CS35L41_MTLREVID_MASK 0x0F
+#define CS35L41_REVID_A0 0xA0
+#define CS35L41_REVID_B0 0xB0
+#define CS35L41_REVID_B2 0xB2
+
+#define CS35L41_HALO_CORE_RESET 0x00000200
+#define CS35L41_SOFTWARE_RESET 0x5A000000
+
+#define CS35L41_FS1_WINDOW_MASK 0x000007FF
+#define CS35L41_FS2_WINDOW_MASK 0x00FFF800
+#define CS35L41_FS2_WINDOW_SHIFT 12
+
+#define CS35L41_SPI_MAX_FREQ 4000000
+#define CS35L41_REGSTRIDE 4
+
+enum cs35l41_boost_type {
+ CS35L41_INT_BOOST,
+ CS35L41_EXT_BOOST,
+ CS35L41_SHD_BOOST_ACTV,
+ CS35L41_SHD_BOOST_PASS,
+
+ // Not present in Binding Documentation, so no system should use this value.
+ // This value is only used in CLSA0100 Laptop
+ CS35L41_EXT_BOOST_NO_VSPK_SWITCH,
+};
+
+enum cs35l41_clk_ids {
+ CS35L41_CLKID_SCLK = 0,
+ CS35L41_CLKID_LRCLK = 1,
+ CS35L41_CLKID_MCLK = 4,
+};
+
+enum cs35l41_gpio1_func {
+ CS35L41_GPIO1_HIZ,
+ CS35L41_GPIO1_GPIO,
+ CS35L41_GPIO1_MDSYNC,
+ CS35L41_GPIO1_MCLK,
+ CS35L41_GPIO1_PDM_CLK,
+ CS35L41_GPIO1_PDM_DATA,
+};
+
+enum cs35l41_gpio2_func {
+ CS35L41_GPIO2_HIZ,
+ CS35L41_GPIO2_GPIO,
+ CS35L41_GPIO2_INT_OPEN_DRAIN,
+ CS35L41_GPIO2_MCLK,
+ CS35L41_GPIO2_INT_PUSH_PULL_LOW,
+ CS35L41_GPIO2_INT_PUSH_PULL_HIGH,
+ CS35L41_GPIO2_PDM_CLK,
+ CS35L41_GPIO2_PDM_DATA,
+};
+
+struct cs35l41_gpio_cfg {
+ bool valid;
+ bool pol_inv;
+ bool out_en;
+ unsigned int func;
+};
+
+struct cs35l41_hw_cfg {
+ bool valid;
+ int bst_ind;
+ int bst_ipk;
+ int bst_cap;
+ int dout_hiz;
+ struct cs35l41_gpio_cfg gpio1;
+ struct cs35l41_gpio_cfg gpio2;
+ unsigned int spk_pos;
+
+ enum cs35l41_boost_type bst_type;
+};
+
+struct cs35l41_otp_packed_element_t {
+ u32 reg;
+ u8 shift;
+ u8 size;
+};
+
+struct cs35l41_otp_map_element_t {
+ u32 id;
+ u32 num_elements;
+ const struct cs35l41_otp_packed_element_t *map;
+ u32 bit_offset;
+ u32 word_offset;
+};
+
+enum cs35l41_cspl_mbox_status {
+ CSPL_MBOX_STS_ERROR = U32_MAX,
+ CSPL_MBOX_STS_ERROR2 = 0x00ffffff, // firmware not always sign-extending 24-bit value
+ CSPL_MBOX_STS_RUNNING = 0,
+ CSPL_MBOX_STS_PAUSED = 1,
+ CSPL_MBOX_STS_RDY_FOR_REINIT = 2,
+};
+
+enum cs35l41_cspl_mbox_cmd {
+ CSPL_MBOX_CMD_NONE = 0,
+ CSPL_MBOX_CMD_PAUSE = 1,
+ CSPL_MBOX_CMD_RESUME = 2,
+ CSPL_MBOX_CMD_REINIT = 3,
+ CSPL_MBOX_CMD_STOP_PRE_REINIT = 4,
+ CSPL_MBOX_CMD_HIBERNATE = 5,
+ CSPL_MBOX_CMD_OUT_OF_HIBERNATE = 6,
+ CSPL_MBOX_CMD_SPK_OUT_ENABLE = 7,
+ CSPL_MBOX_CMD_UNKNOWN_CMD = -1,
+ CSPL_MBOX_CMD_INVALID_SEQUENCE = -2,
+};
+
+/*
+ * IRQs
+ */
+#define CS35L41_IRQ(_irq, _name, _hand) \
+ { \
+ .irq = CS35L41_ ## _irq ## _IRQ,\
+ .name = _name, \
+ .handler = _hand, \
+ }
+
+struct cs35l41_irq {
+ int irq;
+ const char *name;
+ irqreturn_t (*handler)(int irq, void *data);
+};
+
+#define CS35L41_REG_IRQ(_reg, _irq) \
+ [CS35L41_ ## _irq ## _IRQ] = { \
+ .reg_offset = (CS35L41_ ## _reg) - CS35L41_IRQ1_STATUS1,\
+ .mask = CS35L41_ ## _irq ## _MASK \
+ }
+
+/* (0x0000E010) CS35L41_IRQ1_STATUS1 */
+#define CS35L41_BST_OVP_ERR_SHIFT 6
+#define CS35L41_BST_OVP_ERR_MASK BIT(CS35L41_BST_OVP_ERR_SHIFT)
+#define CS35L41_BST_DCM_UVP_ERR_SHIFT 7
+#define CS35L41_BST_DCM_UVP_ERR_MASK BIT(CS35L41_BST_DCM_UVP_ERR_SHIFT)
+#define CS35L41_BST_SHORT_ERR_SHIFT 8
+#define CS35L41_BST_SHORT_ERR_MASK BIT(CS35L41_BST_SHORT_ERR_SHIFT)
+#define CS35L41_TEMP_WARN_SHIFT 15
+#define CS35L41_TEMP_WARN_MASK BIT(CS35L41_TEMP_WARN_SHIFT)
+#define CS35L41_TEMP_ERR_SHIFT 17
+#define CS35L41_TEMP_ERR_MASK BIT(CS35L41_TEMP_ERR_SHIFT)
+#define CS35L41_AMP_SHORT_ERR_SHIFT 31
+#define CS35L41_AMP_SHORT_ERR_MASK BIT(CS35L41_AMP_SHORT_ERR_SHIFT)
+
+enum cs35l41_irq_list {
+ CS35L41_BST_OVP_ERR_IRQ,
+ CS35L41_BST_DCM_UVP_ERR_IRQ,
+ CS35L41_BST_SHORT_ERR_IRQ,
+ CS35L41_TEMP_WARN_IRQ,
+ CS35L41_TEMP_ERR_IRQ,
+ CS35L41_AMP_SHORT_ERR_IRQ,
+
+ CS35L41_NUM_IRQ
+};
+
+extern struct regmap_config cs35l41_regmap_i2c;
+extern struct regmap_config cs35l41_regmap_spi;
+
+int cs35l41_test_key_unlock(struct device *dev, struct regmap *regmap);
+int cs35l41_test_key_lock(struct device *dev, struct regmap *regmap);
+int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap);
+int cs35l41_register_errata_patch(struct device *dev, struct regmap *reg, unsigned int reg_revid);
+int cs35l41_set_channels(struct device *dev, struct regmap *reg,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot);
+int cs35l41_gpio_config(struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg);
+void cs35l41_configure_cs_dsp(struct device *dev, struct regmap *reg, struct cs_dsp *dsp);
+int cs35l41_set_cspl_mbox_cmd(struct device *dev, struct regmap *regmap,
+ enum cs35l41_cspl_mbox_cmd cmd);
+int cs35l41_write_fs_errata(struct device *dev, struct regmap *regmap);
+int cs35l41_enter_hibernate(struct device *dev, struct regmap *regmap,
+ enum cs35l41_boost_type b_type);
+int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap);
+int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
+ struct cs35l41_hw_cfg *hw_cfg);
+bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
+int cs35l41_mdsync_up(struct regmap *regmap);
+int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
+ int enable, struct cs_dsp *dsp);
+
+#endif /* __CS35L41_H */
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
new file mode 100644
index 000000000000..e0629699b563
--- /dev/null
+++ b/include/sound/cs35l56.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common definitions for Cirrus Logic CS35L56 smart amp
+ *
+ * Copyright (C) 2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __CS35L56_H
+#define __CS35L56_H
+
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <sound/cs-amp-lib.h>
+
+#define CS35L56_DEVID 0x0000000
+#define CS35L56_REVID 0x0000004
+#define CS35L56_RELID 0x000000C
+#define CS35L56_OTPID 0x0000010
+#define CS35L56_SFT_RESET 0x0000020
+#define CS35L56_GLOBAL_ENABLES 0x0002014
+#define CS35L56_BLOCK_ENABLES 0x0002018
+#define CS35L56_BLOCK_ENABLES2 0x000201C
+#define CS35L56_REFCLK_INPUT 0x0002C04
+#define CS35L56_GLOBAL_SAMPLE_RATE 0x0002C0C
+#define CS35L56_OTP_MEM_53 0x00300D4
+#define CS35L56_OTP_MEM_54 0x00300D8
+#define CS35L56_OTP_MEM_55 0x00300DC
+#define CS35L56_ASP1_ENABLES1 0x0004800
+#define CS35L56_ASP1_CONTROL1 0x0004804
+#define CS35L56_ASP1_CONTROL2 0x0004808
+#define CS35L56_ASP1_CONTROL3 0x000480C
+#define CS35L56_ASP1_FRAME_CONTROL1 0x0004810
+#define CS35L56_ASP1_FRAME_CONTROL5 0x0004820
+#define CS35L56_ASP1_DATA_CONTROL1 0x0004830
+#define CS35L56_ASP1_DATA_CONTROL5 0x0004840
+#define CS35L56_DACPCM1_INPUT 0x0004C00
+#define CS35L56_DACPCM2_INPUT 0x0004C08
+#define CS35L56_ASP1TX1_INPUT 0x0004C20
+#define CS35L56_ASP1TX2_INPUT 0x0004C24
+#define CS35L56_ASP1TX3_INPUT 0x0004C28
+#define CS35L56_ASP1TX4_INPUT 0x0004C2C
+#define CS35L56_DSP1RX1_INPUT 0x0004C40
+#define CS35L56_DSP1RX2_INPUT 0x0004C44
+#define CS35L56_SWIRE_DP3_CH1_INPUT 0x0004C70
+#define CS35L56_SWIRE_DP3_CH2_INPUT 0x0004C74
+#define CS35L56_SWIRE_DP3_CH3_INPUT 0x0004C78
+#define CS35L56_SWIRE_DP3_CH4_INPUT 0x0004C7C
+#define CS35L56_IRQ1_CFG 0x000E000
+#define CS35L56_IRQ1_STATUS 0x000E004
+#define CS35L56_IRQ1_EINT_1 0x000E010
+#define CS35L56_IRQ1_EINT_2 0x000E014
+#define CS35L56_IRQ1_EINT_4 0x000E01C
+#define CS35L56_IRQ1_EINT_8 0x000E02C
+#define CS35L56_IRQ1_EINT_18 0x000E054
+#define CS35L56_IRQ1_EINT_20 0x000E05C
+#define CS35L56_IRQ1_MASK_1 0x000E090
+#define CS35L56_IRQ1_MASK_2 0x000E094
+#define CS35L56_IRQ1_MASK_4 0x000E09C
+#define CS35L56_IRQ1_MASK_8 0x000E0AC
+#define CS35L56_IRQ1_MASK_18 0x000E0D4
+#define CS35L56_IRQ1_MASK_20 0x000E0DC
+#define CS35L56_DSP_VIRTUAL1_MBOX_1 0x0011020
+#define CS35L56_DSP_VIRTUAL1_MBOX_2 0x0011024
+#define CS35L56_DSP_VIRTUAL1_MBOX_3 0x0011028
+#define CS35L56_DSP_VIRTUAL1_MBOX_4 0x001102C
+#define CS35L56_DSP_VIRTUAL1_MBOX_5 0x0011030
+#define CS35L56_DSP_VIRTUAL1_MBOX_6 0x0011034
+#define CS35L56_DSP_VIRTUAL1_MBOX_7 0x0011038
+#define CS35L56_DSP_VIRTUAL1_MBOX_8 0x001103C
+#define CS35L56_DSP_RESTRICT_STS1 0x00190F0
+#define CS35L56_DSP1_XMEM_PACKED_0 0x2000000
+#define CS35L56_DSP1_XMEM_PACKED_6143 0x2005FFC
+#define CS35L56_DSP1_XMEM_UNPACKED32_0 0x2400000
+#define CS35L56_DSP1_XMEM_UNPACKED32_4095 0x2403FFC
+#define CS35L56_DSP1_SYS_INFO_ID 0x25E0000
+#define CS35L56_DSP1_SYS_INFO_END 0x25E004C
+#define CS35L56_DSP1_AHBM_WINDOW_DEBUG_0 0x25E2040
+#define CS35L56_DSP1_AHBM_WINDOW_DEBUG_1 0x25E2044
+#define CS35L56_DSP1_XMEM_UNPACKED24_0 0x2800000
+#define CS35L56_DSP1_FW_VER 0x2800010
+#define CS35L56_DSP1_HALO_STATE_A1 0x2801E58
+#define CS35L56_DSP1_HALO_STATE 0x28021E0
+#define CS35L56_DSP1_PM_CUR_STATE_A1 0x2804000
+#define CS35L56_DSP1_PM_CUR_STATE 0x2804308
+#define CS35L56_DSP1_XMEM_UNPACKED24_8191 0x2807FFC
+#define CS35L56_DSP1_CORE_BASE 0x2B80000
+#define CS35L56_DSP1_SCRATCH1 0x2B805C0
+#define CS35L56_DSP1_SCRATCH2 0x2B805C8
+#define CS35L56_DSP1_SCRATCH3 0x2B805D0
+#define CS35L56_DSP1_SCRATCH4 0x2B805D8
+#define CS35L56_DSP1_YMEM_PACKED_0 0x2C00000
+#define CS35L56_DSP1_YMEM_PACKED_4604 0x2C047F0
+#define CS35L56_DSP1_YMEM_UNPACKED32_0 0x3000000
+#define CS35L56_DSP1_YMEM_UNPACKED32_3070 0x3002FF8
+#define CS35L56_DSP1_YMEM_UNPACKED24_0 0x3400000
+#define CS35L56_MAIN_RENDER_USER_MUTE 0x3400024
+#define CS35L56_MAIN_RENDER_USER_VOLUME 0x340002C
+#define CS35L56_MAIN_POSTURE_NUMBER 0x3400094
+#define CS35L56_PROTECTION_STATUS 0x34000D8
+#define CS35L56_TRANSDUCER_ACTUAL_PS 0x3400150
+#define CS35L56_DSP1_YMEM_UNPACKED24_6141 0x3405FF4
+#define CS35L56_DSP1_PMEM_0 0x3800000
+#define CS35L56_DSP1_PMEM_5114 0x3804FE8
+
+/* DEVID */
+#define CS35L56_DEVID_MASK 0x00FFFFFF
+
+/* REVID */
+#define CS35L56_AREVID_MASK 0x000000F0
+#define CS35L56_MTLREVID_MASK 0x0000000F
+#define CS35L56_REVID_B0 0x000000B0
+
+/* ASP_ENABLES1 */
+#define CS35L56_ASP_RX2_EN_SHIFT 17
+#define CS35L56_ASP_RX1_EN_SHIFT 16
+#define CS35L56_ASP_TX4_EN_SHIFT 3
+#define CS35L56_ASP_TX3_EN_SHIFT 2
+#define CS35L56_ASP_TX2_EN_SHIFT 1
+#define CS35L56_ASP_TX1_EN_SHIFT 0
+
+/* ASP_CONTROL1 */
+#define CS35L56_ASP_BCLK_FREQ_MASK 0x0000003F
+#define CS35L56_ASP_BCLK_FREQ_SHIFT 0
+
+/* ASP_CONTROL2 */
+#define CS35L56_ASP_RX_WIDTH_MASK 0xFF000000
+#define CS35L56_ASP_RX_WIDTH_SHIFT 24
+#define CS35L56_ASP_TX_WIDTH_MASK 0x00FF0000
+#define CS35L56_ASP_TX_WIDTH_SHIFT 16
+#define CS35L56_ASP_FMT_MASK 0x00000700
+#define CS35L56_ASP_FMT_SHIFT 8
+#define CS35L56_ASP_BCLK_INV_MASK 0x00000040
+#define CS35L56_ASP_FSYNC_INV_MASK 0x00000004
+
+/* ASP_CONTROL3 */
+#define CS35L56_ASP1_DOUT_HIZ_CTRL_MASK 0x00000003
+
+/* ASP_DATA_CONTROL1 */
+#define CS35L56_ASP_TX_WL_MASK 0x0000003F
+
+/* ASP_DATA_CONTROL5 */
+#define CS35L56_ASP_RX_WL_MASK 0x0000003F
+
+/* ASPTXn_INPUT */
+#define CS35L56_ASP_TXn_SRC_MASK 0x0000007F
+
+/* SWIRETX[1..7]_SRC SDWTXn INPUT */
+#define CS35L56_SWIRETXn_SRC_MASK 0x0000007F
+
+/* IRQ1_STATUS */
+#define CS35L56_IRQ1_STS_MASK 0x00000001
+
+/* IRQ1_EINT_1 */
+#define CS35L56_AMP_SHORT_ERR_EINT1_MASK 0x80000000
+
+/* IRQ1_EINT_2 */
+#define CS35L56_DSP_VIRTUAL2_MBOX_WR_EINT1_MASK 0x00200000
+
+/* IRQ1_EINT_4 */
+#define CS35L56_OTP_BOOT_DONE_MASK 0x00000002
+
+/* IRQ1_EINT_8 */
+#define CS35L56_TEMP_ERR_EINT1_MASK 0x80000000
+
+/* Mixer input sources */
+#define CS35L56_INPUT_SRC_NONE 0x00
+#define CS35L56_INPUT_SRC_ASP1RX1 0x08
+#define CS35L56_INPUT_SRC_ASP1RX2 0x09
+#define CS35L56_INPUT_SRC_VMON 0x18
+#define CS35L56_INPUT_SRC_IMON 0x19
+#define CS35L56_INPUT_SRC_ERR_VOL 0x20
+#define CS35L56_INPUT_SRC_CLASSH 0x21
+#define CS35L56_INPUT_SRC_VDDBMON 0x28
+#define CS35L56_INPUT_SRC_VBSTMON 0x29
+#define CS35L56_INPUT_SRC_DSP1TX1 0x32
+#define CS35L56_INPUT_SRC_DSP1TX2 0x33
+#define CS35L56_INPUT_SRC_DSP1TX3 0x34
+#define CS35L56_INPUT_SRC_DSP1TX4 0x35
+#define CS35L56_INPUT_SRC_DSP1TX5 0x36
+#define CS35L56_INPUT_SRC_DSP1TX6 0x37
+#define CS35L56_INPUT_SRC_DSP1TX7 0x38
+#define CS35L56_INPUT_SRC_DSP1TX8 0x39
+#define CS35L56_INPUT_SRC_TEMPMON 0x3A
+#define CS35L56_INPUT_SRC_INTERPOLATOR 0x40
+#define CS35L56_INPUT_SRC_SWIRE_DP1_CHANNEL1 0x44
+#define CS35L56_INPUT_SRC_SWIRE_DP1_CHANNEL2 0x45
+#define CS35L56_INPUT_MASK 0x7F
+
+#define CS35L56_NUM_INPUT_SRC 21
+
+/* ASP formats */
+#define CS35L56_ASP_FMT_DSP_A 0
+#define CS35L56_ASP_FMT_I2S 2
+
+/* ASP HiZ modes */
+#define CS35L56_ASP_UNUSED_HIZ_OFF_HIZ 3
+
+/* MAIN_RENDER_ACTUAL_PS */
+#define CS35L56_PS0 0
+#define CS35L56_PS3 3
+
+/* CS35L56_DSP_RESTRICT_STS1 */
+#define CS35L56_RESTRICTED_MASK 0x7
+
+/* CS35L56_MAIN_RENDER_USER_MUTE */
+#define CS35L56_MAIN_RENDER_USER_MUTE_MASK 1
+
+/* CS35L56_MAIN_RENDER_USER_VOLUME */
+#define CS35L56_MAIN_RENDER_USER_VOLUME_MIN -400
+#define CS35L56_MAIN_RENDER_USER_VOLUME_MAX 400
+#define CS35L56_MAIN_RENDER_USER_VOLUME_MASK 0x0000FFC0
+#define CS35L56_MAIN_RENDER_USER_VOLUME_SHIFT 6
+#define CS35L56_MAIN_RENDER_USER_VOLUME_SIGNBIT 9
+
+/* CS35L56_MAIN_POSTURE_NUMBER */
+#define CS35L56_MAIN_POSTURE_MIN 0
+#define CS35L56_MAIN_POSTURE_MAX 255
+#define CS35L56_MAIN_POSTURE_MASK CS35L56_MAIN_POSTURE_MAX
+
+/* CS35L56_PROTECTION_STATUS */
+#define CS35L56_FIRMWARE_MISSING BIT(0)
+
+/* Software Values */
+#define CS35L56_HALO_STATE_SHUTDOWN 1
+#define CS35L56_HALO_STATE_BOOT_DONE 2
+
+#define CS35L56_MBOX_CMD_AUDIO_PLAY 0x0B000001
+#define CS35L56_MBOX_CMD_AUDIO_PAUSE 0x0B000002
+#define CS35L56_MBOX_CMD_AUDIO_REINIT 0x0B000003
+#define CS35L56_MBOX_CMD_HIBERNATE_NOW 0x02000001
+#define CS35L56_MBOX_CMD_WAKEUP 0x02000002
+#define CS35L56_MBOX_CMD_PREVENT_AUTO_HIBERNATE 0x02000003
+#define CS35L56_MBOX_CMD_ALLOW_AUTO_HIBERNATE 0x02000004
+#define CS35L56_MBOX_CMD_SHUTDOWN 0x02000005
+#define CS35L56_MBOX_CMD_SYSTEM_RESET 0x02000007
+
+#define CS35L56_MBOX_TIMEOUT_US 5000
+#define CS35L56_MBOX_POLL_US 250
+
+#define CS35L56_PS0_POLL_US 500
+#define CS35L56_PS0_TIMEOUT_US 50000
+#define CS35L56_PS3_POLL_US 500
+#define CS35L56_PS3_TIMEOUT_US 300000
+
+#define CS35L56_CONTROL_PORT_READY_US 2200
+#define CS35L56_HALO_STATE_POLL_US 1000
+#define CS35L56_HALO_STATE_TIMEOUT_US 250000
+#define CS35L56_RESET_PULSE_MIN_US 1100
+#define CS35L56_WAKE_HOLD_TIME_US 1000
+
+#define CS35L56_SDW1_PLAYBACK_PORT 1
+#define CS35L56_SDW1_CAPTURE_PORT 3
+
+#define CS35L56_NUM_BULK_SUPPLIES 3
+#define CS35L56_NUM_DSP_REGIONS 5
+
+struct cs35l56_base {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct mutex irq_lock;
+ u8 type;
+ u8 rev;
+ bool init_done;
+ bool fw_patched;
+ bool secured;
+ bool can_hibernate;
+ bool cal_data_valid;
+ s8 cal_index;
+ struct cirrus_amp_cal_data cal_data;
+ struct gpio_desc *reset_gpio;
+};
+
+extern struct regmap_config cs35l56_regmap_i2c;
+extern struct regmap_config cs35l56_regmap_spi;
+extern struct regmap_config cs35l56_regmap_sdw;
+
+extern const struct cirrus_amp_cal_controls cs35l56_calibration_controls;
+
+extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
+extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
+
+int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
+int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base);
+int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
+int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
+int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base);
+void cs35l56_wait_control_port_ready(void);
+void cs35l56_wait_min_reset_pulse(void);
+void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire);
+int cs35l56_irq_request(struct cs35l56_base *cs35l56_base, int irq);
+irqreturn_t cs35l56_irq(int irq, void *data);
+int cs35l56_is_fw_reload_needed(struct cs35l56_base *cs35l56_base);
+int cs35l56_runtime_suspend_common(struct cs35l56_base *cs35l56_base);
+int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_soundwire);
+void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
+int cs35l56_get_calibration(struct cs35l56_base *cs35l56_base);
+int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base,
+ bool *fw_missing, unsigned int *fw_version);
+int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
+int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base);
+int cs35l56_get_bclk_freq_id(unsigned int freq);
+void cs35l56_fill_supply_names(struct regulator_bulk_data *data);
+
+#endif /* ifndef __CS35L56_H */
diff --git a/include/sound/cs4271.h b/include/sound/cs4271.h
index 6ff23ab48894..5a55d135bdea 100644
--- a/include/sound/cs4271.h
+++ b/include/sound/cs4271.h
@@ -9,7 +9,6 @@
#define __CS4271_H
struct cs4271_platform_data {
- int gpio_nreset; /* GPIO driving Reset pin, if any */
bool amutec_eq_bmutec; /* flag to enable AMUTEC=BMUTEC */
/*
diff --git a/include/sound/cs42l42.h b/include/sound/cs42l42.h
new file mode 100644
index 000000000000..1bd8eee54f66
--- /dev/null
+++ b/include/sound/cs42l42.h
@@ -0,0 +1,815 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/sound/cs42l42.h -- Platform data for CS42L42 ALSA SoC audio driver header
+ *
+ * Copyright 2016-2022 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ * Author: Michael White <michael.white@cirrus.com>
+ */
+
+#ifndef __CS42L42_H
+#define __CS42L42_H
+
+#define CS42L42_PAGE_REGISTER 0x00 /* Page Select Register */
+#define CS42L42_WIN_START 0x00
+#define CS42L42_WIN_LEN 0x100
+#define CS42L42_RANGE_MIN 0x00
+#define CS42L42_RANGE_MAX 0x7F
+
+#define CS42L42_PAGE_10 0x1000
+#define CS42L42_PAGE_11 0x1100
+#define CS42L42_PAGE_12 0x1200
+#define CS42L42_PAGE_13 0x1300
+#define CS42L42_PAGE_15 0x1500
+#define CS42L42_PAGE_19 0x1900
+#define CS42L42_PAGE_1B 0x1B00
+#define CS42L42_PAGE_1C 0x1C00
+#define CS42L42_PAGE_1D 0x1D00
+#define CS42L42_PAGE_1F 0x1F00
+#define CS42L42_PAGE_20 0x2000
+#define CS42L42_PAGE_21 0x2100
+#define CS42L42_PAGE_23 0x2300
+#define CS42L42_PAGE_24 0x2400
+#define CS42L42_PAGE_25 0x2500
+#define CS42L42_PAGE_26 0x2600
+#define CS42L42_PAGE_27 0x2700
+#define CS42L42_PAGE_28 0x2800
+#define CS42L42_PAGE_29 0x2900
+#define CS42L42_PAGE_2A 0x2A00
+#define CS42L42_PAGE_30 0x3000
+
+#define CS42L42_CHIP_ID 0x42A42
+#define CS42L83_CHIP_ID 0x42A83
+
+/* Page 0x10 Global Registers */
+#define CS42L42_DEVID_AB (CS42L42_PAGE_10 + 0x01)
+#define CS42L42_DEVID_CD (CS42L42_PAGE_10 + 0x02)
+#define CS42L42_DEVID_E (CS42L42_PAGE_10 + 0x03)
+#define CS42L42_FABID (CS42L42_PAGE_10 + 0x04)
+#define CS42L42_REVID (CS42L42_PAGE_10 + 0x05)
+#define CS42L42_FRZ_CTL (CS42L42_PAGE_10 + 0x06)
+
+#define CS42L42_SRC_CTL (CS42L42_PAGE_10 + 0x07)
+#define CS42L42_SRC_BYPASS_DAC_SHIFT 1
+#define CS42L42_SRC_BYPASS_DAC_MASK (1 << CS42L42_SRC_BYPASS_DAC_SHIFT)
+
+#define CS42L42_MCLK_STATUS (CS42L42_PAGE_10 + 0x08)
+
+#define CS42L42_MCLK_CTL (CS42L42_PAGE_10 + 0x09)
+#define CS42L42_INTERNAL_FS_SHIFT 1
+#define CS42L42_INTERNAL_FS_MASK (1 << CS42L42_INTERNAL_FS_SHIFT)
+
+#define CS42L42_SFTRAMP_RATE (CS42L42_PAGE_10 + 0x0A)
+#define CS42L42_SLOW_START_ENABLE (CS42L42_PAGE_10 + 0x0B)
+#define CS42L42_SLOW_START_EN_MASK GENMASK(6, 4)
+#define CS42L42_SLOW_START_EN_SHIFT 4
+#define CS42L42_I2C_DEBOUNCE (CS42L42_PAGE_10 + 0x0E)
+#define CS42L42_I2C_STRETCH (CS42L42_PAGE_10 + 0x0F)
+#define CS42L42_I2C_TIMEOUT (CS42L42_PAGE_10 + 0x10)
+
+/* Page 0x11 Power and Headset Detect Registers */
+#define CS42L42_PWR_CTL1 (CS42L42_PAGE_11 + 0x01)
+#define CS42L42_ASP_DAO_PDN_SHIFT 7
+#define CS42L42_ASP_DAO_PDN_MASK (1 << CS42L42_ASP_DAO_PDN_SHIFT)
+#define CS42L42_ASP_DAI_PDN_SHIFT 6
+#define CS42L42_ASP_DAI_PDN_MASK (1 << CS42L42_ASP_DAI_PDN_SHIFT)
+#define CS42L42_MIXER_PDN_SHIFT 5
+#define CS42L42_MIXER_PDN_MASK (1 << CS42L42_MIXER_PDN_SHIFT)
+#define CS42L42_EQ_PDN_SHIFT 4
+#define CS42L42_EQ_PDN_MASK (1 << CS42L42_EQ_PDN_SHIFT)
+#define CS42L42_HP_PDN_SHIFT 3
+#define CS42L42_HP_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
+#define CS42L42_ADC_PDN_SHIFT 2
+#define CS42L42_ADC_PDN_MASK (1 << CS42L42_ADC_PDN_SHIFT)
+#define CS42L42_PDN_ALL_SHIFT 0
+#define CS42L42_PDN_ALL_MASK (1 << CS42L42_PDN_ALL_SHIFT)
+
+#define CS42L42_PWR_CTL2 (CS42L42_PAGE_11 + 0x02)
+#define CS42L42_ADC_SRC_PDNB_SHIFT 0
+#define CS42L42_ADC_SRC_PDNB_MASK (1 << CS42L42_ADC_SRC_PDNB_SHIFT)
+#define CS42L42_DAC_SRC_PDNB_SHIFT 1
+#define CS42L42_DAC_SRC_PDNB_MASK (1 << CS42L42_DAC_SRC_PDNB_SHIFT)
+#define CS42L42_ASP_DAI1_PDN_SHIFT 2
+#define CS42L42_ASP_DAI1_PDN_MASK (1 << CS42L42_ASP_DAI1_PDN_SHIFT)
+#define CS42L42_SRC_PDN_OVERRIDE_SHIFT 3
+#define CS42L42_SRC_PDN_OVERRIDE_MASK (1 << CS42L42_SRC_PDN_OVERRIDE_SHIFT)
+#define CS42L42_DISCHARGE_FILT_SHIFT 4
+#define CS42L42_DISCHARGE_FILT_MASK (1 << CS42L42_DISCHARGE_FILT_SHIFT)
+
+#define CS42L42_PWR_CTL3 (CS42L42_PAGE_11 + 0x03)
+#define CS42L42_RING_SENSE_PDNB_SHIFT 1
+#define CS42L42_RING_SENSE_PDNB_MASK (1 << CS42L42_RING_SENSE_PDNB_SHIFT)
+#define CS42L42_VPMON_PDNB_SHIFT 2
+#define CS42L42_VPMON_PDNB_MASK (1 << CS42L42_VPMON_PDNB_SHIFT)
+#define CS42L42_SW_CLK_STP_STAT_SEL_SHIFT 5
+#define CS42L42_SW_CLK_STP_STAT_SEL_MASK (3 << CS42L42_SW_CLK_STP_STAT_SEL_SHIFT)
+
+#define CS42L42_RSENSE_CTL1 (CS42L42_PAGE_11 + 0x04)
+#define CS42L42_RS_TRIM_R_SHIFT 0
+#define CS42L42_RS_TRIM_R_MASK (1 << CS42L42_RS_TRIM_R_SHIFT)
+#define CS42L42_RS_TRIM_T_SHIFT 1
+#define CS42L42_RS_TRIM_T_MASK (1 << CS42L42_RS_TRIM_T_SHIFT)
+#define CS42L42_HPREF_RS_SHIFT 2
+#define CS42L42_HPREF_RS_MASK (1 << CS42L42_HPREF_RS_SHIFT)
+#define CS42L42_HSBIAS_FILT_REF_RS_SHIFT 3
+#define CS42L42_HSBIAS_FILT_REF_RS_MASK (1 << CS42L42_HSBIAS_FILT_REF_RS_SHIFT)
+#define CS42L42_RING_SENSE_PU_HIZ_SHIFT 6
+#define CS42L42_RING_SENSE_PU_HIZ_MASK (1 << CS42L42_RING_SENSE_PU_HIZ_SHIFT)
+
+#define CS42L42_RSENSE_CTL2 (CS42L42_PAGE_11 + 0x05)
+#define CS42L42_TS_RS_GATE_SHIFT 7
+#define CS42L42_TS_RS_GATE_MAS (1 << CS42L42_TS_RS_GATE_SHIFT)
+
+#define CS42L42_OSC_SWITCH (CS42L42_PAGE_11 + 0x07)
+#define CS42L42_SCLK_PRESENT_SHIFT 0
+#define CS42L42_SCLK_PRESENT_MASK (1 << CS42L42_SCLK_PRESENT_SHIFT)
+
+#define CS42L42_OSC_SWITCH_STATUS (CS42L42_PAGE_11 + 0x09)
+#define CS42L42_OSC_SW_SEL_STAT_SHIFT 0
+#define CS42L42_OSC_SW_SEL_STAT_MASK (3 << CS42L42_OSC_SW_SEL_STAT_SHIFT)
+#define CS42L42_OSC_PDNB_STAT_SHIFT 2
+#define CS42L42_OSC_PDNB_STAT_MASK (1 << CS42L42_OSC_SW_SEL_STAT_SHIFT)
+
+#define CS42L42_RSENSE_CTL3 (CS42L42_PAGE_11 + 0x12)
+#define CS42L42_RS_RISE_DBNCE_TIME_SHIFT 0
+#define CS42L42_RS_RISE_DBNCE_TIME_MASK (7 << CS42L42_RS_RISE_DBNCE_TIME_SHIFT)
+#define CS42L42_RS_FALL_DBNCE_TIME_SHIFT 3
+#define CS42L42_RS_FALL_DBNCE_TIME_MASK (7 << CS42L42_RS_FALL_DBNCE_TIME_SHIFT)
+#define CS42L42_RS_PU_EN_SHIFT 6
+#define CS42L42_RS_PU_EN_MASK (1 << CS42L42_RS_PU_EN_SHIFT)
+#define CS42L42_RS_INV_SHIFT 7
+#define CS42L42_RS_INV_MASK (1 << CS42L42_RS_INV_SHIFT)
+
+#define CS42L42_TSENSE_CTL (CS42L42_PAGE_11 + 0x13)
+#define CS42L42_TS_RISE_DBNCE_TIME_SHIFT 0
+#define CS42L42_TS_RISE_DBNCE_TIME_MASK (7 << CS42L42_TS_RISE_DBNCE_TIME_SHIFT)
+#define CS42L42_TS_FALL_DBNCE_TIME_SHIFT 3
+#define CS42L42_TS_FALL_DBNCE_TIME_MASK (7 << CS42L42_TS_FALL_DBNCE_TIME_SHIFT)
+#define CS42L42_TS_INV_SHIFT 7
+#define CS42L42_TS_INV_MASK (1 << CS42L42_TS_INV_SHIFT)
+
+#define CS42L42_TSRS_INT_DISABLE (CS42L42_PAGE_11 + 0x14)
+#define CS42L42_D_RS_PLUG_DBNC_SHIFT 0
+#define CS42L42_D_RS_PLUG_DBNC_MASK (1 << CS42L42_D_RS_PLUG_DBNC_SHIFT)
+#define CS42L42_D_RS_UNPLUG_DBNC_SHIFT 1
+#define CS42L42_D_RS_UNPLUG_DBNC_MASK (1 << CS42L42_D_RS_UNPLUG_DBNC_SHIFT)
+#define CS42L42_D_TS_PLUG_DBNC_SHIFT 2
+#define CS42L42_D_TS_PLUG_DBNC_MASK (1 << CS42L42_D_TS_PLUG_DBNC_SHIFT)
+#define CS42L42_D_TS_UNPLUG_DBNC_SHIFT 3
+#define CS42L42_D_TS_UNPLUG_DBNC_MASK (1 << CS42L42_D_TS_UNPLUG_DBNC_SHIFT)
+
+#define CS42L42_TRSENSE_STATUS (CS42L42_PAGE_11 + 0x15)
+#define CS42L42_RS_PLUG_DBNC_SHIFT 0
+#define CS42L42_RS_PLUG_DBNC_MASK (1 << CS42L42_RS_PLUG_DBNC_SHIFT)
+#define CS42L42_RS_UNPLUG_DBNC_SHIFT 1
+#define CS42L42_RS_UNPLUG_DBNC_MASK (1 << CS42L42_RS_UNPLUG_DBNC_SHIFT)
+#define CS42L42_TS_PLUG_DBNC_SHIFT 2
+#define CS42L42_TS_PLUG_DBNC_MASK (1 << CS42L42_TS_PLUG_DBNC_SHIFT)
+#define CS42L42_TS_UNPLUG_DBNC_SHIFT 3
+#define CS42L42_TS_UNPLUG_DBNC_MASK (1 << CS42L42_TS_UNPLUG_DBNC_SHIFT)
+
+#define CS42L42_HSDET_CTL1 (CS42L42_PAGE_11 + 0x1F)
+#define CS42L42_HSDET_COMP1_LVL_SHIFT 0
+#define CS42L42_HSDET_COMP1_LVL_MASK (15 << CS42L42_HSDET_COMP1_LVL_SHIFT)
+#define CS42L42_HSDET_COMP2_LVL_SHIFT 4
+#define CS42L42_HSDET_COMP2_LVL_MASK (15 << CS42L42_HSDET_COMP2_LVL_SHIFT)
+
+#define CS42L42_HSDET_COMP1_LVL_VAL 12 /* 1.25V Comparator */
+#define CS42L42_HSDET_COMP2_LVL_VAL 2 /* 1.75V Comparator */
+#define CS42L42_HSDET_COMP1_LVL_DEFAULT 7 /* 1V Comparator */
+#define CS42L42_HSDET_COMP2_LVL_DEFAULT 7 /* 2V Comparator */
+
+#define CS42L42_HSDET_CTL2 (CS42L42_PAGE_11 + 0x20)
+#define CS42L42_HSDET_AUTO_TIME_SHIFT 0
+#define CS42L42_HSDET_AUTO_TIME_MASK (3 << CS42L42_HSDET_AUTO_TIME_SHIFT)
+#define CS42L42_HSBIAS_REF_SHIFT 3
+#define CS42L42_HSBIAS_REF_MASK (1 << CS42L42_HSBIAS_REF_SHIFT)
+#define CS42L42_HSDET_SET_SHIFT 4
+#define CS42L42_HSDET_SET_MASK (3 << CS42L42_HSDET_SET_SHIFT)
+#define CS42L42_HSDET_CTRL_SHIFT 6
+#define CS42L42_HSDET_CTRL_MASK (3 << CS42L42_HSDET_CTRL_SHIFT)
+
+#define CS42L42_HS_SWITCH_CTL (CS42L42_PAGE_11 + 0x21)
+#define CS42L42_SW_GNDHS_HS4_SHIFT 0
+#define CS42L42_SW_GNDHS_HS4_MASK (1 << CS42L42_SW_GNDHS_HS4_SHIFT)
+#define CS42L42_SW_GNDHS_HS3_SHIFT 1
+#define CS42L42_SW_GNDHS_HS3_MASK (1 << CS42L42_SW_GNDHS_HS3_SHIFT)
+#define CS42L42_SW_HSB_HS4_SHIFT 2
+#define CS42L42_SW_HSB_HS4_MASK (1 << CS42L42_SW_HSB_HS4_SHIFT)
+#define CS42L42_SW_HSB_HS3_SHIFT 3
+#define CS42L42_SW_HSB_HS3_MASK (1 << CS42L42_SW_HSB_HS3_SHIFT)
+#define CS42L42_SW_HSB_FILT_HS4_SHIFT 4
+#define CS42L42_SW_HSB_FILT_HS4_MASK (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT)
+#define CS42L42_SW_HSB_FILT_HS3_SHIFT 5
+#define CS42L42_SW_HSB_FILT_HS3_MASK (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT)
+#define CS42L42_SW_REF_HS4_SHIFT 6
+#define CS42L42_SW_REF_HS4_MASK (1 << CS42L42_SW_REF_HS4_SHIFT)
+#define CS42L42_SW_REF_HS3_SHIFT 7
+#define CS42L42_SW_REF_HS3_MASK (1 << CS42L42_SW_REF_HS3_SHIFT)
+
+#define CS42L42_HS_DET_STATUS (CS42L42_PAGE_11 + 0x24)
+#define CS42L42_HSDET_TYPE_SHIFT 0
+#define CS42L42_HSDET_TYPE_MASK (3 << CS42L42_HSDET_TYPE_SHIFT)
+#define CS42L42_HSDET_COMP1_OUT_SHIFT 6
+#define CS42L42_HSDET_COMP1_OUT_MASK (1 << CS42L42_HSDET_COMP1_OUT_SHIFT)
+#define CS42L42_HSDET_COMP2_OUT_SHIFT 7
+#define CS42L42_HSDET_COMP2_OUT_MASK (1 << CS42L42_HSDET_COMP2_OUT_SHIFT)
+#define CS42L42_PLUG_CTIA 0
+#define CS42L42_PLUG_OMTP 1
+#define CS42L42_PLUG_HEADPHONE 2
+#define CS42L42_PLUG_INVALID 3
+
+#define CS42L42_HSDET_SW_COMP1 ((0 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_COMP2 ((1 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (0 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_TYPE1 ((0 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_TYPE2 ((1 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (0 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_TYPE3 ((1 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS3_SHIFT))
+#define CS42L42_HSDET_SW_TYPE4 ((0 << CS42L42_SW_GNDHS_HS4_SHIFT) | \
+ (1 << CS42L42_SW_GNDHS_HS3_SHIFT) | \
+ (1 << CS42L42_SW_HSB_HS4_SHIFT) | \
+ (0 << CS42L42_SW_HSB_HS3_SHIFT) | \
+ (0 << CS42L42_SW_HSB_FILT_HS4_SHIFT) | \
+ (1 << CS42L42_SW_HSB_FILT_HS3_SHIFT) | \
+ (0 << CS42L42_SW_REF_HS4_SHIFT) | \
+ (1 << CS42L42_SW_REF_HS3_SHIFT))
+
+#define CS42L42_HSDET_COMP_TYPE1 1
+#define CS42L42_HSDET_COMP_TYPE2 2
+#define CS42L42_HSDET_COMP_TYPE3 0
+#define CS42L42_HSDET_COMP_TYPE4 3
+
+#define CS42L42_HS_CLAMP_DISABLE (CS42L42_PAGE_11 + 0x29)
+#define CS42L42_HS_CLAMP_DISABLE_SHIFT 0
+#define CS42L42_HS_CLAMP_DISABLE_MASK (1 << CS42L42_HS_CLAMP_DISABLE_SHIFT)
+
+/* Page 0x12 Clocking Registers */
+#define CS42L42_MCLK_SRC_SEL (CS42L42_PAGE_12 + 0x01)
+#define CS42L42_MCLKDIV_SHIFT 1
+#define CS42L42_MCLKDIV_MASK (1 << CS42L42_MCLKDIV_SHIFT)
+#define CS42L42_MCLK_SRC_SEL_SHIFT 0
+#define CS42L42_MCLK_SRC_SEL_MASK (1 << CS42L42_MCLK_SRC_SEL_SHIFT)
+
+#define CS42L42_SPDIF_CLK_CFG (CS42L42_PAGE_12 + 0x02)
+#define CS42L42_FSYNC_PW_LOWER (CS42L42_PAGE_12 + 0x03)
+
+#define CS42L42_FSYNC_PW_UPPER (CS42L42_PAGE_12 + 0x04)
+#define CS42L42_FSYNC_PULSE_WIDTH_SHIFT 0
+#define CS42L42_FSYNC_PULSE_WIDTH_MASK (0xff << \
+ CS42L42_FSYNC_PULSE_WIDTH_SHIFT)
+
+#define CS42L42_FSYNC_P_LOWER (CS42L42_PAGE_12 + 0x05)
+
+#define CS42L42_FSYNC_P_UPPER (CS42L42_PAGE_12 + 0x06)
+#define CS42L42_FSYNC_PERIOD_SHIFT 0
+#define CS42L42_FSYNC_PERIOD_MASK (0xff << CS42L42_FSYNC_PERIOD_SHIFT)
+
+#define CS42L42_ASP_CLK_CFG (CS42L42_PAGE_12 + 0x07)
+#define CS42L42_ASP_SCLK_EN_SHIFT 5
+#define CS42L42_ASP_SCLK_EN_MASK (1 << CS42L42_ASP_SCLK_EN_SHIFT)
+#define CS42L42_ASP_MASTER_MODE 0x01
+#define CS42L42_ASP_SLAVE_MODE 0x00
+#define CS42L42_ASP_MODE_SHIFT 4
+#define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT)
+#define CS42L42_ASP_SCPOL_SHIFT 2
+#define CS42L42_ASP_SCPOL_MASK (3 << CS42L42_ASP_SCPOL_SHIFT)
+#define CS42L42_ASP_SCPOL_NOR 3
+#define CS42L42_ASP_LCPOL_SHIFT 0
+#define CS42L42_ASP_LCPOL_MASK (3 << CS42L42_ASP_LCPOL_SHIFT)
+#define CS42L42_ASP_LCPOL_INV 3
+
+#define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08)
+#define CS42L42_ASP_STP_SHIFT 4
+#define CS42L42_ASP_STP_MASK (1 << CS42L42_ASP_STP_SHIFT)
+#define CS42L42_ASP_5050_SHIFT 3
+#define CS42L42_ASP_5050_MASK (1 << CS42L42_ASP_5050_SHIFT)
+#define CS42L42_ASP_FSD_SHIFT 0
+#define CS42L42_ASP_FSD_MASK (7 << CS42L42_ASP_FSD_SHIFT)
+#define CS42L42_ASP_FSD_0_5 1
+#define CS42L42_ASP_FSD_1_0 2
+#define CS42L42_ASP_FSD_1_5 3
+#define CS42L42_ASP_FSD_2_0 4
+
+#define CS42L42_FS_RATE_EN (CS42L42_PAGE_12 + 0x09)
+#define CS42L42_FS_EN_SHIFT 0
+#define CS42L42_FS_EN_MASK (0xf << CS42L42_FS_EN_SHIFT)
+#define CS42L42_FS_EN_IASRC_96K 0x1
+#define CS42L42_FS_EN_OASRC_96K 0x2
+
+#define CS42L42_IN_ASRC_CLK (CS42L42_PAGE_12 + 0x0A)
+#define CS42L42_CLK_IASRC_SEL_SHIFT 0
+#define CS42L42_CLK_IASRC_SEL_MASK (1 << CS42L42_CLK_IASRC_SEL_SHIFT)
+#define CS42L42_CLK_IASRC_SEL_6 0
+#define CS42L42_CLK_IASRC_SEL_12 1
+
+#define CS42L42_OUT_ASRC_CLK (CS42L42_PAGE_12 + 0x0B)
+#define CS42L42_CLK_OASRC_SEL_SHIFT 0
+#define CS42L42_CLK_OASRC_SEL_MASK (1 << CS42L42_CLK_OASRC_SEL_SHIFT)
+#define CS42L42_CLK_OASRC_SEL_12 1
+
+#define CS42L42_PLL_DIV_CFG1 (CS42L42_PAGE_12 + 0x0C)
+#define CS42L42_SCLK_PREDIV_SHIFT 0
+#define CS42L42_SCLK_PREDIV_MASK (3 << CS42L42_SCLK_PREDIV_SHIFT)
+
+/* Page 0x13 Interrupt Registers */
+/* Interrupts */
+#define CS42L42_ADC_OVFL_STATUS (CS42L42_PAGE_13 + 0x01)
+#define CS42L42_MIXER_STATUS (CS42L42_PAGE_13 + 0x02)
+#define CS42L42_SRC_STATUS (CS42L42_PAGE_13 + 0x03)
+#define CS42L42_ASP_RX_STATUS (CS42L42_PAGE_13 + 0x04)
+#define CS42L42_ASP_TX_STATUS (CS42L42_PAGE_13 + 0x05)
+#define CS42L42_CODEC_STATUS (CS42L42_PAGE_13 + 0x08)
+#define CS42L42_DET_INT_STATUS1 (CS42L42_PAGE_13 + 0x09)
+#define CS42L42_DET_INT_STATUS2 (CS42L42_PAGE_13 + 0x0A)
+#define CS42L42_SRCPL_INT_STATUS (CS42L42_PAGE_13 + 0x0B)
+#define CS42L42_VPMON_STATUS (CS42L42_PAGE_13 + 0x0D)
+#define CS42L42_PLL_LOCK_STATUS (CS42L42_PAGE_13 + 0x0E)
+#define CS42L42_TSRS_PLUG_STATUS (CS42L42_PAGE_13 + 0x0F)
+/* Masks */
+#define CS42L42_ADC_OVFL_INT_MASK (CS42L42_PAGE_13 + 0x16)
+#define CS42L42_ADC_OVFL_SHIFT 0
+#define CS42L42_ADC_OVFL_MASK (1 << CS42L42_ADC_OVFL_SHIFT)
+#define CS42L42_ADC_OVFL_VAL_MASK CS42L42_ADC_OVFL_MASK
+
+#define CS42L42_MIXER_INT_MASK (CS42L42_PAGE_13 + 0x17)
+#define CS42L42_MIX_CHB_OVFL_SHIFT 0
+#define CS42L42_MIX_CHB_OVFL_MASK (1 << CS42L42_MIX_CHB_OVFL_SHIFT)
+#define CS42L42_MIX_CHA_OVFL_SHIFT 1
+#define CS42L42_MIX_CHA_OVFL_MASK (1 << CS42L42_MIX_CHA_OVFL_SHIFT)
+#define CS42L42_EQ_OVFL_SHIFT 2
+#define CS42L42_EQ_OVFL_MASK (1 << CS42L42_EQ_OVFL_SHIFT)
+#define CS42L42_EQ_BIQUAD_OVFL_SHIFT 3
+#define CS42L42_EQ_BIQUAD_OVFL_MASK (1 << CS42L42_EQ_BIQUAD_OVFL_SHIFT)
+#define CS42L42_MIXER_VAL_MASK (CS42L42_MIX_CHB_OVFL_MASK | \
+ CS42L42_MIX_CHA_OVFL_MASK | \
+ CS42L42_EQ_OVFL_MASK | \
+ CS42L42_EQ_BIQUAD_OVFL_MASK)
+
+#define CS42L42_SRC_INT_MASK (CS42L42_PAGE_13 + 0x18)
+#define CS42L42_SRC_ILK_SHIFT 0
+#define CS42L42_SRC_ILK_MASK (1 << CS42L42_SRC_ILK_SHIFT)
+#define CS42L42_SRC_OLK_SHIFT 1
+#define CS42L42_SRC_OLK_MASK (1 << CS42L42_SRC_OLK_SHIFT)
+#define CS42L42_SRC_IUNLK_SHIFT 2
+#define CS42L42_SRC_IUNLK_MASK (1 << CS42L42_SRC_IUNLK_SHIFT)
+#define CS42L42_SRC_OUNLK_SHIFT 3
+#define CS42L42_SRC_OUNLK_MASK (1 << CS42L42_SRC_OUNLK_SHIFT)
+#define CS42L42_SRC_VAL_MASK (CS42L42_SRC_ILK_MASK | \
+ CS42L42_SRC_OLK_MASK | \
+ CS42L42_SRC_IUNLK_MASK | \
+ CS42L42_SRC_OUNLK_MASK)
+
+#define CS42L42_ASP_RX_INT_MASK (CS42L42_PAGE_13 + 0x19)
+#define CS42L42_ASPRX_NOLRCK_SHIFT 0
+#define CS42L42_ASPRX_NOLRCK_MASK (1 << CS42L42_ASPRX_NOLRCK_SHIFT)
+#define CS42L42_ASPRX_EARLY_SHIFT 1
+#define CS42L42_ASPRX_EARLY_MASK (1 << CS42L42_ASPRX_EARLY_SHIFT)
+#define CS42L42_ASPRX_LATE_SHIFT 2
+#define CS42L42_ASPRX_LATE_MASK (1 << CS42L42_ASPRX_LATE_SHIFT)
+#define CS42L42_ASPRX_ERROR_SHIFT 3
+#define CS42L42_ASPRX_ERROR_MASK (1 << CS42L42_ASPRX_ERROR_SHIFT)
+#define CS42L42_ASPRX_OVLD_SHIFT 4
+#define CS42L42_ASPRX_OVLD_MASK (1 << CS42L42_ASPRX_OVLD_SHIFT)
+#define CS42L42_ASP_RX_VAL_MASK (CS42L42_ASPRX_NOLRCK_MASK | \
+ CS42L42_ASPRX_EARLY_MASK | \
+ CS42L42_ASPRX_LATE_MASK | \
+ CS42L42_ASPRX_ERROR_MASK | \
+ CS42L42_ASPRX_OVLD_MASK)
+
+#define CS42L42_ASP_TX_INT_MASK (CS42L42_PAGE_13 + 0x1A)
+#define CS42L42_ASPTX_NOLRCK_SHIFT 0
+#define CS42L42_ASPTX_NOLRCK_MASK (1 << CS42L42_ASPTX_NOLRCK_SHIFT)
+#define CS42L42_ASPTX_EARLY_SHIFT 1
+#define CS42L42_ASPTX_EARLY_MASK (1 << CS42L42_ASPTX_EARLY_SHIFT)
+#define CS42L42_ASPTX_LATE_SHIFT 2
+#define CS42L42_ASPTX_LATE_MASK (1 << CS42L42_ASPTX_LATE_SHIFT)
+#define CS42L42_ASPTX_SMERROR_SHIFT 3
+#define CS42L42_ASPTX_SMERROR_MASK (1 << CS42L42_ASPTX_SMERROR_SHIFT)
+#define CS42L42_ASP_TX_VAL_MASK (CS42L42_ASPTX_NOLRCK_MASK | \
+ CS42L42_ASPTX_EARLY_MASK | \
+ CS42L42_ASPTX_LATE_MASK | \
+ CS42L42_ASPTX_SMERROR_MASK)
+
+#define CS42L42_CODEC_INT_MASK (CS42L42_PAGE_13 + 0x1B)
+#define CS42L42_PDN_DONE_SHIFT 0
+#define CS42L42_PDN_DONE_MASK (1 << CS42L42_PDN_DONE_SHIFT)
+#define CS42L42_HSDET_AUTO_DONE_SHIFT 1
+#define CS42L42_HSDET_AUTO_DONE_MASK (1 << CS42L42_HSDET_AUTO_DONE_SHIFT)
+#define CS42L42_CODEC_VAL_MASK (CS42L42_PDN_DONE_MASK | \
+ CS42L42_HSDET_AUTO_DONE_MASK)
+
+#define CS42L42_SRCPL_INT_MASK (CS42L42_PAGE_13 + 0x1C)
+#define CS42L42_SRCPL_ADC_LK_SHIFT 0
+#define CS42L42_SRCPL_ADC_LK_MASK (1 << CS42L42_SRCPL_ADC_LK_SHIFT)
+#define CS42L42_SRCPL_DAC_LK_SHIFT 2
+#define CS42L42_SRCPL_DAC_LK_MASK (1 << CS42L42_SRCPL_DAC_LK_SHIFT)
+#define CS42L42_SRCPL_ADC_UNLK_SHIFT 5
+#define CS42L42_SRCPL_ADC_UNLK_MASK (1 << CS42L42_SRCPL_ADC_UNLK_SHIFT)
+#define CS42L42_SRCPL_DAC_UNLK_SHIFT 6
+#define CS42L42_SRCPL_DAC_UNLK_MASK (1 << CS42L42_SRCPL_DAC_UNLK_SHIFT)
+#define CS42L42_SRCPL_VAL_MASK (CS42L42_SRCPL_ADC_LK_MASK | \
+ CS42L42_SRCPL_DAC_LK_MASK | \
+ CS42L42_SRCPL_ADC_UNLK_MASK | \
+ CS42L42_SRCPL_DAC_UNLK_MASK)
+
+#define CS42L42_VPMON_INT_MASK (CS42L42_PAGE_13 + 0x1E)
+#define CS42L42_VPMON_SHIFT 0
+#define CS42L42_VPMON_MASK (1 << CS42L42_VPMON_SHIFT)
+#define CS42L42_VPMON_VAL_MASK CS42L42_VPMON_MASK
+
+#define CS42L42_PLL_LOCK_INT_MASK (CS42L42_PAGE_13 + 0x1F)
+#define CS42L42_PLL_LOCK_SHIFT 0
+#define CS42L42_PLL_LOCK_MASK (1 << CS42L42_PLL_LOCK_SHIFT)
+#define CS42L42_PLL_LOCK_VAL_MASK CS42L42_PLL_LOCK_MASK
+
+#define CS42L42_TSRS_PLUG_INT_MASK (CS42L42_PAGE_13 + 0x20)
+#define CS42L42_RS_PLUG_SHIFT 0
+#define CS42L42_RS_PLUG_MASK (1 << CS42L42_RS_PLUG_SHIFT)
+#define CS42L42_RS_UNPLUG_SHIFT 1
+#define CS42L42_RS_UNPLUG_MASK (1 << CS42L42_RS_UNPLUG_SHIFT)
+#define CS42L42_TS_PLUG_SHIFT 2
+#define CS42L42_TS_PLUG_MASK (1 << CS42L42_TS_PLUG_SHIFT)
+#define CS42L42_TS_UNPLUG_SHIFT 3
+#define CS42L42_TS_UNPLUG_MASK (1 << CS42L42_TS_UNPLUG_SHIFT)
+#define CS42L42_TSRS_PLUG_VAL_MASK (CS42L42_RS_PLUG_MASK | \
+ CS42L42_RS_UNPLUG_MASK | \
+ CS42L42_TS_PLUG_MASK | \
+ CS42L42_TS_UNPLUG_MASK)
+#define CS42L42_TS_PLUG 3
+#define CS42L42_TS_UNPLUG 0
+#define CS42L42_TS_TRANS 1
+
+/*
+ * NOTE: PLL_START must be 0 while both ADC_PDN=1 and HP_PDN=1.
+ * Otherwise it will prevent FILT+ from charging properly.
+ */
+#define CS42L42_PLL_CTL1 (CS42L42_PAGE_15 + 0x01)
+#define CS42L42_PLL_START_SHIFT 0
+#define CS42L42_PLL_START_MASK (1 << CS42L42_PLL_START_SHIFT)
+
+#define CS42L42_PLL_DIV_FRAC0 (CS42L42_PAGE_15 + 0x02)
+#define CS42L42_PLL_DIV_FRAC_SHIFT 0
+#define CS42L42_PLL_DIV_FRAC_MASK (0xff << CS42L42_PLL_DIV_FRAC_SHIFT)
+
+#define CS42L42_PLL_DIV_FRAC1 (CS42L42_PAGE_15 + 0x03)
+#define CS42L42_PLL_DIV_FRAC2 (CS42L42_PAGE_15 + 0x04)
+
+#define CS42L42_PLL_DIV_INT (CS42L42_PAGE_15 + 0x05)
+#define CS42L42_PLL_DIV_INT_SHIFT 0
+#define CS42L42_PLL_DIV_INT_MASK (0xff << CS42L42_PLL_DIV_INT_SHIFT)
+
+#define CS42L42_PLL_CTL3 (CS42L42_PAGE_15 + 0x08)
+#define CS42L42_PLL_DIVOUT_SHIFT 0
+#define CS42L42_PLL_DIVOUT_MASK (0xff << CS42L42_PLL_DIVOUT_SHIFT)
+
+#define CS42L42_PLL_CAL_RATIO (CS42L42_PAGE_15 + 0x0A)
+#define CS42L42_PLL_CAL_RATIO_SHIFT 0
+#define CS42L42_PLL_CAL_RATIO_MASK (0xff << CS42L42_PLL_CAL_RATIO_SHIFT)
+
+#define CS42L42_PLL_CTL4 (CS42L42_PAGE_15 + 0x1B)
+#define CS42L42_PLL_MODE_SHIFT 0
+#define CS42L42_PLL_MODE_MASK (3 << CS42L42_PLL_MODE_SHIFT)
+
+/* Page 0x19 HP Load Detect Registers */
+#define CS42L42_LOAD_DET_RCSTAT (CS42L42_PAGE_19 + 0x25)
+#define CS42L42_RLA_STAT_SHIFT 0
+#define CS42L42_RLA_STAT_MASK (3 << CS42L42_RLA_STAT_SHIFT)
+#define CS42L42_RLA_STAT_15_OHM 0
+
+#define CS42L42_LOAD_DET_DONE (CS42L42_PAGE_19 + 0x26)
+#define CS42L42_HPLOAD_DET_DONE_SHIFT 0
+#define CS42L42_HPLOAD_DET_DONE_MASK (1 << CS42L42_HPLOAD_DET_DONE_SHIFT)
+
+#define CS42L42_LOAD_DET_EN (CS42L42_PAGE_19 + 0x27)
+#define CS42L42_HP_LD_EN_SHIFT 0
+#define CS42L42_HP_LD_EN_MASK (1 << CS42L42_HP_LD_EN_SHIFT)
+
+/* Page 0x1B Headset Interface Registers */
+#define CS42L42_HSBIAS_SC_AUTOCTL (CS42L42_PAGE_1B + 0x70)
+#define CS42L42_HSBIAS_SENSE_TRIP_SHIFT 0
+#define CS42L42_HSBIAS_SENSE_TRIP_MASK (7 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT)
+#define CS42L42_TIP_SENSE_EN_SHIFT 5
+#define CS42L42_TIP_SENSE_EN_MASK (1 << CS42L42_TIP_SENSE_EN_SHIFT)
+#define CS42L42_AUTO_HSBIAS_HIZ_SHIFT 6
+#define CS42L42_AUTO_HSBIAS_HIZ_MASK (1 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT)
+#define CS42L42_HSBIAS_SENSE_EN_SHIFT 7
+#define CS42L42_HSBIAS_SENSE_EN_MASK (1 << CS42L42_HSBIAS_SENSE_EN_SHIFT)
+
+#define CS42L42_WAKE_CTL (CS42L42_PAGE_1B + 0x71)
+#define CS42L42_WAKEB_CLEAR_SHIFT 0
+#define CS42L42_WAKEB_CLEAR_MASK (1 << CS42L42_WAKEB_CLEAR_SHIFT)
+#define CS42L42_WAKEB_MODE_SHIFT 5
+#define CS42L42_WAKEB_MODE_MASK (1 << CS42L42_WAKEB_MODE_SHIFT)
+#define CS42L42_M_HP_WAKE_SHIFT 6
+#define CS42L42_M_HP_WAKE_MASK (1 << CS42L42_M_HP_WAKE_SHIFT)
+#define CS42L42_M_MIC_WAKE_SHIFT 7
+#define CS42L42_M_MIC_WAKE_MASK (1 << CS42L42_M_MIC_WAKE_SHIFT)
+
+#define CS42L42_ADC_DISABLE_MUTE (CS42L42_PAGE_1B + 0x72)
+#define CS42L42_ADC_DISABLE_S0_MUTE_SHIFT 7
+#define CS42L42_ADC_DISABLE_S0_MUTE_MASK (1 << CS42L42_ADC_DISABLE_S0_MUTE_SHIFT)
+
+#define CS42L42_TIPSENSE_CTL (CS42L42_PAGE_1B + 0x73)
+#define CS42L42_TIP_SENSE_DEBOUNCE_SHIFT 0
+#define CS42L42_TIP_SENSE_DEBOUNCE_MASK (3 << CS42L42_TIP_SENSE_DEBOUNCE_SHIFT)
+#define CS42L42_TIP_SENSE_INV_SHIFT 5
+#define CS42L42_TIP_SENSE_INV_MASK (1 << CS42L42_TIP_SENSE_INV_SHIFT)
+#define CS42L42_TIP_SENSE_CTRL_SHIFT 6
+#define CS42L42_TIP_SENSE_CTRL_MASK (3 << CS42L42_TIP_SENSE_CTRL_SHIFT)
+
+/*
+ * NOTE: DETECT_MODE must be 0 while both ADC_PDN=1 and HP_PDN=1.
+ * Otherwise it will prevent FILT+ from charging properly.
+ */
+#define CS42L42_MISC_DET_CTL (CS42L42_PAGE_1B + 0x74)
+#define CS42L42_PDN_MIC_LVL_DET_SHIFT 0
+#define CS42L42_PDN_MIC_LVL_DET_MASK (1 << CS42L42_PDN_MIC_LVL_DET_SHIFT)
+#define CS42L42_HSBIAS_CTL_SHIFT 1
+#define CS42L42_HSBIAS_CTL_MASK (3 << CS42L42_HSBIAS_CTL_SHIFT)
+#define CS42L42_DETECT_MODE_SHIFT 3
+#define CS42L42_DETECT_MODE_MASK (3 << CS42L42_DETECT_MODE_SHIFT)
+
+#define CS42L42_MIC_DET_CTL1 (CS42L42_PAGE_1B + 0x75)
+#define CS42L42_HS_DET_LEVEL_SHIFT 0
+#define CS42L42_HS_DET_LEVEL_MASK (0x3F << CS42L42_HS_DET_LEVEL_SHIFT)
+#define CS42L42_EVENT_STAT_SEL_SHIFT 6
+#define CS42L42_EVENT_STAT_SEL_MASK (1 << CS42L42_EVENT_STAT_SEL_SHIFT)
+#define CS42L42_LATCH_TO_VP_SHIFT 7
+#define CS42L42_LATCH_TO_VP_MASK (1 << CS42L42_LATCH_TO_VP_SHIFT)
+
+#define CS42L42_MIC_DET_CTL2 (CS42L42_PAGE_1B + 0x76)
+#define CS42L42_DEBOUNCE_TIME_SHIFT 5
+#define CS42L42_DEBOUNCE_TIME_MASK (0x07 << CS42L42_DEBOUNCE_TIME_SHIFT)
+
+#define CS42L42_DET_STATUS1 (CS42L42_PAGE_1B + 0x77)
+#define CS42L42_HSBIAS_HIZ_MODE_SHIFT 6
+#define CS42L42_HSBIAS_HIZ_MODE_MASK (1 << CS42L42_HSBIAS_HIZ_MODE_SHIFT)
+#define CS42L42_TIP_SENSE_SHIFT 7
+#define CS42L42_TIP_SENSE_MASK (1 << CS42L42_TIP_SENSE_SHIFT)
+
+#define CS42L42_DET_STATUS2 (CS42L42_PAGE_1B + 0x78)
+#define CS42L42_SHORT_TRUE_SHIFT 0
+#define CS42L42_SHORT_TRUE_MASK (1 << CS42L42_SHORT_TRUE_SHIFT)
+#define CS42L42_HS_TRUE_SHIFT 1
+#define CS42L42_HS_TRUE_MASK (1 << CS42L42_HS_TRUE_SHIFT)
+
+#define CS42L42_DET_INT1_MASK (CS42L42_PAGE_1B + 0x79)
+#define CS42L42_TIP_SENSE_UNPLUG_SHIFT 5
+#define CS42L42_TIP_SENSE_UNPLUG_MASK (1 << CS42L42_TIP_SENSE_UNPLUG_SHIFT)
+#define CS42L42_TIP_SENSE_PLUG_SHIFT 6
+#define CS42L42_TIP_SENSE_PLUG_MASK (1 << CS42L42_TIP_SENSE_PLUG_SHIFT)
+#define CS42L42_HSBIAS_SENSE_SHIFT 7
+#define CS42L42_HSBIAS_SENSE_MASK (1 << CS42L42_HSBIAS_SENSE_SHIFT)
+#define CS42L42_DET_INT_VAL1_MASK (CS42L42_TIP_SENSE_UNPLUG_MASK | \
+ CS42L42_TIP_SENSE_PLUG_MASK | \
+ CS42L42_HSBIAS_SENSE_MASK)
+
+#define CS42L42_DET_INT2_MASK (CS42L42_PAGE_1B + 0x7A)
+#define CS42L42_M_SHORT_DET_SHIFT 0
+#define CS42L42_M_SHORT_DET_MASK (1 << CS42L42_M_SHORT_DET_SHIFT)
+#define CS42L42_M_SHORT_RLS_SHIFT 1
+#define CS42L42_M_SHORT_RLS_MASK (1 << CS42L42_M_SHORT_RLS_SHIFT)
+#define CS42L42_M_HSBIAS_HIZ_SHIFT 2
+#define CS42L42_M_HSBIAS_HIZ_MASK (1 << CS42L42_M_HSBIAS_HIZ_SHIFT)
+#define CS42L42_M_DETECT_FT_SHIFT 6
+#define CS42L42_M_DETECT_FT_MASK (1 << CS42L42_M_DETECT_FT_SHIFT)
+#define CS42L42_M_DETECT_TF_SHIFT 7
+#define CS42L42_M_DETECT_TF_MASK (1 << CS42L42_M_DETECT_TF_SHIFT)
+#define CS42L42_DET_INT_VAL2_MASK (CS42L42_M_SHORT_DET_MASK | \
+ CS42L42_M_SHORT_RLS_MASK | \
+ CS42L42_M_HSBIAS_HIZ_MASK | \
+ CS42L42_M_DETECT_FT_MASK | \
+ CS42L42_M_DETECT_TF_MASK)
+
+/* Page 0x1C Headset Bias Registers */
+#define CS42L42_HS_BIAS_CTL (CS42L42_PAGE_1C + 0x03)
+#define CS42L42_HSBIAS_RAMP_SHIFT 0
+#define CS42L42_HSBIAS_RAMP_MASK (3 << CS42L42_HSBIAS_RAMP_SHIFT)
+#define CS42L42_HSBIAS_PD_SHIFT 4
+#define CS42L42_HSBIAS_PD_MASK (1 << CS42L42_HSBIAS_PD_SHIFT)
+#define CS42L42_HSBIAS_CAPLESS_SHIFT 7
+#define CS42L42_HSBIAS_CAPLESS_MASK (1 << CS42L42_HSBIAS_CAPLESS_SHIFT)
+
+/* Page 0x1D ADC Registers */
+#define CS42L42_ADC_CTL (CS42L42_PAGE_1D + 0x01)
+#define CS42L42_ADC_NOTCH_DIS_SHIFT 5
+#define CS42L42_ADC_FORCE_WEAK_VCM_SHIFT 4
+#define CS42L42_ADC_INV_SHIFT 2
+#define CS42L42_ADC_DIG_BOOST_SHIFT 0
+
+#define CS42L42_ADC_VOLUME (CS42L42_PAGE_1D + 0x03)
+#define CS42L42_ADC_VOL_SHIFT 0
+
+#define CS42L42_ADC_WNF_HPF_CTL (CS42L42_PAGE_1D + 0x04)
+#define CS42L42_ADC_WNF_CF_SHIFT 4
+#define CS42L42_ADC_WNF_EN_SHIFT 3
+#define CS42L42_ADC_HPF_CF_SHIFT 1
+#define CS42L42_ADC_HPF_EN_SHIFT 0
+
+/* Page 0x1F DAC Registers */
+#define CS42L42_DAC_CTL1 (CS42L42_PAGE_1F + 0x01)
+#define CS42L42_DACB_INV_SHIFT 1
+#define CS42L42_DACA_INV_SHIFT 0
+
+#define CS42L42_DAC_CTL2 (CS42L42_PAGE_1F + 0x06)
+#define CS42L42_HPOUT_PULLDOWN_SHIFT 4
+#define CS42L42_HPOUT_PULLDOWN_MASK (15 << CS42L42_HPOUT_PULLDOWN_SHIFT)
+#define CS42L42_HPOUT_LOAD_SHIFT 3
+#define CS42L42_HPOUT_LOAD_MASK (1 << CS42L42_HPOUT_LOAD_SHIFT)
+#define CS42L42_HPOUT_CLAMP_SHIFT 2
+#define CS42L42_HPOUT_CLAMP_MASK (1 << CS42L42_HPOUT_CLAMP_SHIFT)
+#define CS42L42_DAC_HPF_EN_SHIFT 1
+#define CS42L42_DAC_HPF_EN_MASK (1 << CS42L42_DAC_HPF_EN_SHIFT)
+#define CS42L42_DAC_MON_EN_SHIFT 0
+#define CS42L42_DAC_MON_EN_MASK (1 << CS42L42_DAC_MON_EN_SHIFT)
+
+/* Page 0x20 HP CTL Registers */
+#define CS42L42_HP_CTL (CS42L42_PAGE_20 + 0x01)
+#define CS42L42_HP_ANA_BMUTE_SHIFT 3
+#define CS42L42_HP_ANA_BMUTE_MASK (1 << CS42L42_HP_ANA_BMUTE_SHIFT)
+#define CS42L42_HP_ANA_AMUTE_SHIFT 2
+#define CS42L42_HP_ANA_AMUTE_MASK (1 << CS42L42_HP_ANA_AMUTE_SHIFT)
+#define CS42L42_HP_FULL_SCALE_VOL_SHIFT 1
+#define CS42L42_HP_FULL_SCALE_VOL_MASK (1 << CS42L42_HP_FULL_SCALE_VOL_SHIFT)
+
+/* Page 0x21 Class H Registers */
+#define CS42L42_CLASSH_CTL (CS42L42_PAGE_21 + 0x01)
+
+/* Page 0x23 Mixer Volume Registers */
+#define CS42L42_MIXER_CHA_VOL (CS42L42_PAGE_23 + 0x01)
+#define CS42L42_MIXER_ADC_VOL (CS42L42_PAGE_23 + 0x02)
+
+#define CS42L42_MIXER_CHB_VOL (CS42L42_PAGE_23 + 0x03)
+#define CS42L42_MIXER_CH_VOL_SHIFT 0
+#define CS42L42_MIXER_CH_VOL_MASK (0x3f << CS42L42_MIXER_CH_VOL_SHIFT)
+
+/* Page 0x24 EQ Registers */
+#define CS42L42_EQ_COEF_IN0 (CS42L42_PAGE_24 + 0x01)
+#define CS42L42_EQ_COEF_IN1 (CS42L42_PAGE_24 + 0x02)
+#define CS42L42_EQ_COEF_IN2 (CS42L42_PAGE_24 + 0x03)
+#define CS42L42_EQ_COEF_IN3 (CS42L42_PAGE_24 + 0x04)
+#define CS42L42_EQ_COEF_RW (CS42L42_PAGE_24 + 0x06)
+#define CS42L42_EQ_COEF_OUT0 (CS42L42_PAGE_24 + 0x07)
+#define CS42L42_EQ_COEF_OUT1 (CS42L42_PAGE_24 + 0x08)
+#define CS42L42_EQ_COEF_OUT2 (CS42L42_PAGE_24 + 0x09)
+#define CS42L42_EQ_COEF_OUT3 (CS42L42_PAGE_24 + 0x0A)
+#define CS42L42_EQ_INIT_STAT (CS42L42_PAGE_24 + 0x0B)
+#define CS42L42_EQ_START_FILT (CS42L42_PAGE_24 + 0x0C)
+#define CS42L42_EQ_MUTE_CTL (CS42L42_PAGE_24 + 0x0E)
+
+/* Page 0x25 Audio Port Registers */
+#define CS42L42_SP_RX_CH_SEL (CS42L42_PAGE_25 + 0x01)
+#define CS42L42_SP_RX_CHB_SEL_SHIFT 2
+#define CS42L42_SP_RX_CHB_SEL_MASK (3 << CS42L42_SP_RX_CHB_SEL_SHIFT)
+
+#define CS42L42_SP_RX_ISOC_CTL (CS42L42_PAGE_25 + 0x02)
+#define CS42L42_SP_RX_RSYNC_SHIFT 6
+#define CS42L42_SP_RX_RSYNC_MASK (1 << CS42L42_SP_RX_RSYNC_SHIFT)
+#define CS42L42_SP_RX_NSB_POS_SHIFT 3
+#define CS42L42_SP_RX_NSB_POS_MASK (7 << CS42L42_SP_RX_NSB_POS_SHIFT)
+#define CS42L42_SP_RX_NFS_NSBB_SHIFT 2
+#define CS42L42_SP_RX_NFS_NSBB_MASK (1 << CS42L42_SP_RX_NFS_NSBB_SHIFT)
+#define CS42L42_SP_RX_ISOC_MODE_SHIFT 0
+#define CS42L42_SP_RX_ISOC_MODE_MASK (3 << CS42L42_SP_RX_ISOC_MODE_SHIFT)
+
+#define CS42L42_SP_RX_FS (CS42L42_PAGE_25 + 0x03)
+#define CS42l42_SPDIF_CH_SEL (CS42L42_PAGE_25 + 0x04)
+#define CS42L42_SP_TX_ISOC_CTL (CS42L42_PAGE_25 + 0x05)
+#define CS42L42_SP_TX_FS (CS42L42_PAGE_25 + 0x06)
+#define CS42L42_SPDIF_SW_CTL1 (CS42L42_PAGE_25 + 0x07)
+
+/* Page 0x26 SRC Registers */
+#define CS42L42_SRC_SDIN_FS (CS42L42_PAGE_26 + 0x01)
+#define CS42L42_SRC_SDIN_FS_SHIFT 0
+#define CS42L42_SRC_SDIN_FS_MASK (0x1f << CS42L42_SRC_SDIN_FS_SHIFT)
+
+#define CS42L42_SRC_SDOUT_FS (CS42L42_PAGE_26 + 0x09)
+
+/* Page 0x27 DMA */
+#define CS42L42_SOFT_RESET_REBOOT (CS42L42_PAGE_27 + 0x01)
+#define CS42L42_SFT_RST_REBOOT_MASK BIT(1)
+
+/* Page 0x28 S/PDIF Registers */
+#define CS42L42_SPDIF_CTL1 (CS42L42_PAGE_28 + 0x01)
+#define CS42L42_SPDIF_CTL2 (CS42L42_PAGE_28 + 0x02)
+#define CS42L42_SPDIF_CTL3 (CS42L42_PAGE_28 + 0x03)
+#define CS42L42_SPDIF_CTL4 (CS42L42_PAGE_28 + 0x04)
+
+/* Page 0x29 Serial Port TX Registers */
+#define CS42L42_ASP_TX_SZ_EN (CS42L42_PAGE_29 + 0x01)
+#define CS42L42_ASP_TX_EN_SHIFT 0
+#define CS42L42_ASP_TX_CH_EN (CS42L42_PAGE_29 + 0x02)
+#define CS42L42_ASP_TX0_CH2_SHIFT 1
+#define CS42L42_ASP_TX0_CH1_SHIFT 0
+
+#define CS42L42_ASP_TX_CH_AP_RES (CS42L42_PAGE_29 + 0x03)
+#define CS42L42_ASP_TX_CH1_AP_SHIFT 7
+#define CS42L42_ASP_TX_CH1_AP_MASK (1 << CS42L42_ASP_TX_CH1_AP_SHIFT)
+#define CS42L42_ASP_TX_CH2_AP_SHIFT 6
+#define CS42L42_ASP_TX_CH2_AP_MASK (1 << CS42L42_ASP_TX_CH2_AP_SHIFT)
+#define CS42L42_ASP_TX_CH2_RES_SHIFT 2
+#define CS42L42_ASP_TX_CH2_RES_MASK (3 << CS42L42_ASP_TX_CH2_RES_SHIFT)
+#define CS42L42_ASP_TX_CH1_RES_SHIFT 0
+#define CS42L42_ASP_TX_CH1_RES_MASK (3 << CS42L42_ASP_TX_CH1_RES_SHIFT)
+#define CS42L42_ASP_TX_CH1_BIT_MSB (CS42L42_PAGE_29 + 0x04)
+#define CS42L42_ASP_TX_CH1_BIT_LSB (CS42L42_PAGE_29 + 0x05)
+#define CS42L42_ASP_TX_HIZ_DLY_CFG (CS42L42_PAGE_29 + 0x06)
+#define CS42L42_ASP_TX_CH2_BIT_MSB (CS42L42_PAGE_29 + 0x0A)
+#define CS42L42_ASP_TX_CH2_BIT_LSB (CS42L42_PAGE_29 + 0x0B)
+
+/* Page 0x2A Serial Port RX Registers */
+#define CS42L42_ASP_RX_DAI0_EN (CS42L42_PAGE_2A + 0x01)
+#define CS42L42_ASP_RX0_CH_EN_SHIFT 2
+#define CS42L42_ASP_RX0_CH_EN_MASK (0xf << CS42L42_ASP_RX0_CH_EN_SHIFT)
+#define CS42L42_ASP_RX0_CH1_SHIFT 2
+#define CS42L42_ASP_RX0_CH2_SHIFT 3
+#define CS42L42_ASP_RX0_CH3_SHIFT 4
+#define CS42L42_ASP_RX0_CH4_SHIFT 5
+
+#define CS42L42_ASP_RX_DAI0_CH1_AP_RES (CS42L42_PAGE_2A + 0x02)
+#define CS42L42_ASP_RX_DAI0_CH1_BIT_MSB (CS42L42_PAGE_2A + 0x03)
+#define CS42L42_ASP_RX_DAI0_CH1_BIT_LSB (CS42L42_PAGE_2A + 0x04)
+#define CS42L42_ASP_RX_DAI0_CH2_AP_RES (CS42L42_PAGE_2A + 0x05)
+#define CS42L42_ASP_RX_DAI0_CH2_BIT_MSB (CS42L42_PAGE_2A + 0x06)
+#define CS42L42_ASP_RX_DAI0_CH2_BIT_LSB (CS42L42_PAGE_2A + 0x07)
+#define CS42L42_ASP_RX_DAI0_CH3_AP_RES (CS42L42_PAGE_2A + 0x08)
+#define CS42L42_ASP_RX_DAI0_CH3_BIT_MSB (CS42L42_PAGE_2A + 0x09)
+#define CS42L42_ASP_RX_DAI0_CH3_BIT_LSB (CS42L42_PAGE_2A + 0x0A)
+#define CS42L42_ASP_RX_DAI0_CH4_AP_RES (CS42L42_PAGE_2A + 0x0B)
+#define CS42L42_ASP_RX_DAI0_CH4_BIT_MSB (CS42L42_PAGE_2A + 0x0C)
+#define CS42L42_ASP_RX_DAI0_CH4_BIT_LSB (CS42L42_PAGE_2A + 0x0D)
+#define CS42L42_ASP_RX_DAI1_CH1_AP_RES (CS42L42_PAGE_2A + 0x0E)
+#define CS42L42_ASP_RX_DAI1_CH1_BIT_MSB (CS42L42_PAGE_2A + 0x0F)
+#define CS42L42_ASP_RX_DAI1_CH1_BIT_LSB (CS42L42_PAGE_2A + 0x10)
+#define CS42L42_ASP_RX_DAI1_CH2_AP_RES (CS42L42_PAGE_2A + 0x11)
+#define CS42L42_ASP_RX_DAI1_CH2_BIT_MSB (CS42L42_PAGE_2A + 0x12)
+#define CS42L42_ASP_RX_DAI1_CH2_BIT_LSB (CS42L42_PAGE_2A + 0x13)
+
+#define CS42L42_ASP_RX_CH_AP_SHIFT 6
+#define CS42L42_ASP_RX_CH_AP_MASK (1 << CS42L42_ASP_RX_CH_AP_SHIFT)
+#define CS42L42_ASP_RX_CH_AP_LOW 0
+#define CS42L42_ASP_RX_CH_AP_HI 1
+#define CS42L42_ASP_RX_CH_RES_SHIFT 0
+#define CS42L42_ASP_RX_CH_RES_MASK (3 << CS42L42_ASP_RX_CH_RES_SHIFT)
+#define CS42L42_ASP_RX_CH_RES_32 3
+#define CS42L42_ASP_RX_CH_RES_16 1
+#define CS42L42_ASP_RX_CH_BIT_ST_SHIFT 0
+#define CS42L42_ASP_RX_CH_BIT_ST_MASK (0xff << CS42L42_ASP_RX_CH_BIT_ST_SHIFT)
+
+/* Page 0x30 ID Registers */
+#define CS42L42_SUB_REVID (CS42L42_PAGE_30 + 0x14)
+#define CS42L42_MAX_REGISTER (CS42L42_PAGE_30 + 0x14)
+
+/* Defines for fracturing values spread across multiple registers */
+#define CS42L42_FRAC0_VAL(val) ((val) & 0x0000ff)
+#define CS42L42_FRAC1_VAL(val) (((val) & 0x00ff00) >> 8)
+#define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16)
+
+#define CS42L42_NUM_SUPPLIES 5
+#define CS42L42_BOOT_TIME_US 3000
+#define CS42L42_PLL_DIVOUT_TIME_US 800
+#define CS42L42_CLOCK_SWITCH_DELAY_US 150
+#define CS42L42_PLL_LOCK_POLL_US 250
+#define CS42L42_PLL_LOCK_TIMEOUT_US 1250
+#define CS42L42_HP_ADC_EN_TIME_US 20000
+#define CS42L42_PDN_DONE_POLL_US 1000
+#define CS42L42_PDN_DONE_TIMEOUT_US 235000
+#define CS42L42_PDN_DONE_TIME_MS 65
+
+#endif /* __CS42L42_H */
diff --git a/include/sound/cs42l43.h b/include/sound/cs42l43.h
new file mode 100644
index 000000000000..deb337fc4e8c
--- /dev/null
+++ b/include/sound/cs42l43.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CS42L43 CODEC driver external data
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_ASOC_EXT_H
+#define CS42L43_ASOC_EXT_H
+
+#define CS42L43_SYSCLK 0
+
+#define CS42L43_SYSCLK_MCLK 0
+#define CS42L43_SYSCLK_SDW 1
+
+#endif /* CS42L43_ASOC_EXT_H */
diff --git a/include/sound/da7219-aad.h b/include/sound/da7219-aad.h
index 24ee7baa2589..41320522daa2 100644
--- a/include/sound/da7219-aad.h
+++ b/include/sound/da7219-aad.h
@@ -44,6 +44,11 @@ enum da7219_aad_jack_ins_deb {
DA7219_AAD_JACK_INS_DEB_1S,
};
+enum da7219_aad_jack_ins_det_pty {
+ DA7219_AAD_JACK_INS_DET_PTY_LOW = 0,
+ DA7219_AAD_JACK_INS_DET_PTY_HIGH,
+};
+
enum da7219_aad_jack_det_rate {
DA7219_AAD_JACK_DET_RATE_32_64MS = 0,
DA7219_AAD_JACK_DET_RATE_64_128MS,
@@ -80,6 +85,7 @@ struct da7219_aad_pdata {
enum da7219_aad_btn_cfg btn_cfg;
enum da7219_aad_mic_det_thr mic_det_thr;
enum da7219_aad_jack_ins_deb jack_ins_deb;
+ enum da7219_aad_jack_ins_det_pty jack_ins_det_pty;
enum da7219_aad_jack_det_rate jack_det_rate;
enum da7219_aad_jack_rem_deb jack_rem_deb;
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h
index 80d275b9ae0d..f6803205a9fb 100644
--- a/include/sound/designware_i2s.h
+++ b/include/sound/designware_i2s.h
@@ -21,6 +21,8 @@ struct i2s_clk_config_data {
u32 sample_rate;
};
+struct dw_i2s_dev;
+
struct i2s_platform_data {
#define DWC_I2S_PLAY (1 << 0)
#define DWC_I2S_RECORD (1 << 1)
@@ -42,6 +44,7 @@ struct i2s_platform_data {
void *capture_dma_data;
bool (*filter)(struct dma_chan *chan, void *slave);
int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
+ int (*i2s_pd_init)(struct dw_i2s_dev *dev);
};
struct i2s_dma_data {
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 8c5e38180fb0..d70c55f17df7 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -15,6 +15,8 @@
* snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
* substream
* @substream: PCM substream
+ *
+ * Return: DMA transfer direction
*/
static inline enum dma_transfer_direction
snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
@@ -60,22 +62,25 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* @maxburst: Maximum number of words(note: words, as in units of the
* src_addr_width member, not bytes) that can be send to or received from the
* DAI in one burst.
- * @slave_id: Slave requester id for the DMA channel.
* @filter_data: Custom DMA channel filter data, this will usually be used when
* requesting the DMA channel.
* @chan_name: Custom channel name to use when requesting DMA channel.
* @fifo_size: FIFO size of the DAI controller in bytes
* @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
*/
struct snd_dmaengine_dai_dma_data {
dma_addr_t addr;
enum dma_slave_buswidth addr_width;
u32 maxburst;
- unsigned int slave_id;
void *filter_data;
const char *chan_name;
unsigned int fifo_size;
unsigned int flags;
+ void *peripheral_config;
+ size_t peripheral_size;
};
void snd_dmaengine_pcm_set_config_from_dai_data(
@@ -137,7 +142,7 @@ struct snd_dmaengine_pcm_config {
struct snd_pcm_substream *substream);
int (*process)(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- void *buf, unsigned long bytes);
+ unsigned long bytes);
dma_filter_fn compat_filter_fn;
struct device *dma_dev;
const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 468e38c54dd3..1af9e6819392 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -25,13 +25,9 @@
/* ------------------- DEFINES -------------------- */
#define EMUPAGESIZE 4096
-#define MAXREQVOICES 8
#define MAXPAGES0 4096 /* 32 bit mode */
#define MAXPAGES1 8192 /* 31 bit mode */
-#define RESERVED 0
-#define NUM_MIDI 16
#define NUM_G 64 /* use all channels */
-#define NUM_FXSENDS 4
#define NUM_EFX_PLAYBACK 16
/* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
@@ -39,10 +35,38 @@
#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
#define TMEMSIZE 256*1024
-#define TMEMSIZEREG 4
#define IP_TO_CP(ip) ((ip == 0) ? 0 : (((0x00001000uL | (ip & 0x00000FFFL)) << (((ip >> 12) & 0x000FL) + 4)) & 0xFFFF0000uL))
+// This is used to define hardware bit-fields (sub-registers) by combining
+// the bit shift and count with the actual register address. The passed
+// mask must represent a single run of adjacent bits.
+// The non-concatenating (_NC) variant should be used directly only for
+// sub-registers that do not follow the <register>_<field> naming pattern.
+#define SUB_REG_NC(reg, field, mask) \
+ enum { \
+ field ## _MASK = mask, \
+ field = reg | \
+ (__builtin_ctz(mask) << 16) | \
+ (__builtin_popcount(mask) << 24), \
+ };
+#define SUB_REG(reg, field, mask) SUB_REG_NC(reg, reg ## _ ## field, mask)
+
+// Macros for manipulating values of bit-fields declared using the above macros.
+// Best used with constant register addresses, as otherwise quite some code is
+// generated. The actual register read/write functions handle combined addresses
+// automatically, so use of these macros conveys no advantage when accessing a
+// single sub-register at a time.
+#define REG_SHIFT(r) (((r) >> 16) & 0x1f)
+#define REG_SIZE(r) (((r) >> 24) & 0x1f)
+#define REG_MASK0(r) ((1U << REG_SIZE(r)) - 1U)
+#define REG_MASK(r) (REG_MASK0(r) << REG_SHIFT(r))
+#define REG_VAL_GET(r, v) ((v & REG_MASK(r)) >> REG_SHIFT(r))
+#define REG_VAL_PUT(r, v) ((v) << REG_SHIFT(r))
+
+// List terminator for snd_emu10k1_ptr_write_multiple()
+#define REGLIST_END ~0
+
// Audigy specify registers are prefixed with 'A_'
/************************************************************************************************/
@@ -66,8 +90,8 @@
/* the relevant bits and zero to the other bits */
#define IPR_P16V 0x80000000 /* Bit set when the CA0151 P16V chip wishes
to interrupt */
-#define IPR_GPIOMSG 0x20000000 /* GPIO message interrupt (RE'd, still not sure
- which INTE bits enable it) */
+#define IPR_WATERMARK_REACHED 0x40000000
+#define IPR_A_GPIO 0x20000000 /* GPIO input pin change */
/* The next two interrupts are for the midi port on the Audigy Drive (A_MPU1) */
#define IPR_A_MIDITRANSBUFEMPTY2 0x10000000 /* MIDI UART transmit buffer empty */
@@ -95,11 +119,15 @@
#define IPR_MIDITRANSBUFEMPTY 0x00000100 /* MIDI UART transmit buffer empty */
#define IPR_MIDIRECVBUFEMPTY 0x00000080 /* MIDI UART receive buffer empty */
#define IPR_CHANNELLOOP 0x00000040 /* Channel (half) loop interrupt(s) pending */
+ /* The interrupt is triggered shortly after */
+ /* CCR_READADDRESS has crossed the boundary; */
+ /* due to the cache, this runs ahead of the */
+ /* actual playback position. */
#define IPR_CHANNELNUMBERMASK 0x0000003f /* When IPR_CHANNELLOOP is set, indicates the */
/* highest set channel in CLIPL, CLIPH, HLIPL, */
- /* or HLIPH. When IP is written with CL set, */
+ /* or HLIPH. When IPR is written with CL set, */
/* the bit in H/CLIPL or H/CLIPH corresponding */
- /* to the CIN value written will be cleared. */
+ /* to the CN value written will be cleared. */
#define INTE 0x0c /* Interrupt enable register */
#define INTE_VIRTUALSB_MASK 0xc0000000 /* Virtual Soundblaster I/O port capture */
@@ -127,10 +155,14 @@
/* behavior and possibly random segfaults and */
/* lockups if enabled. */
+#define INTE_A_GPIOENABLE 0x00040000 /* Enable GPIO input change interrupts */
+
/* The next two interrupts are for the midi port on the Audigy Drive (A_MPU1) */
#define INTE_A_MIDITXENABLE2 0x00020000 /* Enable MIDI transmit-buffer-empty interrupts */
#define INTE_A_MIDIRXENABLE2 0x00010000 /* Enable MIDI receive-buffer-empty interrupts */
+#define INTE_A_SPDIF_BUFFULL_ENABLE 0x00008000
+#define INTE_A_SPDIF_HALFBUFFULL_ENABLE 0x00004000
#define INTE_SAMPLERATETRACKER 0x00002000 /* Enable sample rate tracker interrupts */
/* NOTE: This bit must always be enabled */
@@ -149,9 +181,8 @@
#define INTE_MIDIRXENABLE 0x00000001 /* Enable MIDI receive-buffer-empty interrupts */
#define WC 0x10 /* Wall Clock register */
-#define WC_SAMPLECOUNTER_MASK 0x03FFFFC0 /* Sample periods elapsed since reset */
-#define WC_SAMPLECOUNTER 0x14060010
-#define WC_CURRENTCHANNEL 0x0000003F /* Channel [0..63] currently being serviced */
+SUB_REG(WC, SAMPLECOUNTER, 0x03FFFFC0) /* Sample periods elapsed since reset */
+SUB_REG(WC, CURRENTCHANNEL, 0x0000003F) /* Channel [0..63] currently being serviced */
/* NOTE: Each channel takes 1/64th of a sample */
/* period to be serviced. */
@@ -180,6 +211,7 @@
#define HCFG_CODECFORMAT_MASK 0x00030000 /* CODEC format */
/* Specific to Alice2, CA0102 */
+
#define HCFG_CODECFORMAT_AC97_1 0x00000000 /* AC97 CODEC format -- Ver 1.03 */
#define HCFG_CODECFORMAT_AC97_2 0x00010000 /* AC97 CODEC format -- Ver 2.1 */
#define HCFG_AUTOMUTE_ASYNC 0x00008000 /* When set, the async sample rate convertors */
@@ -200,9 +232,8 @@
/* I2S format input */
/* Rest of HCFG 0x0000000f same as below. LOCKSOUNDCACHE etc. */
-
-
/* Older chips */
+
#define HCFG_CODECFORMAT_AC97 0x00000000 /* AC97 CODEC format -- Primary Output */
#define HCFG_CODECFORMAT_I2S 0x00010000 /* I2S CODEC format -- Secondary (Rear) Output */
#define HCFG_GPINPUT0 0x00004000 /* External pin112 */
@@ -225,9 +256,8 @@
/* async audio source */
#define HCFG_LOCKSOUNDCACHE 0x00000008 /* 1 = Cancel bustmaster accesses to soundcache */
/* NOTE: This should generally never be used. */
-#define HCFG_LOCKTANKCACHE_MASK 0x00000004 /* 1 = Cancel bustmaster accesses to tankcache */
+SUB_REG(HCFG, LOCKTANKCACHE, 0x00000004) /* 1 = Cancel bustmaster accesses to tankcache */
/* NOTE: This should generally never be used. */
-#define HCFG_LOCKTANKCACHE 0x01020014
#define HCFG_MUTEBUTTONENABLE 0x00000002 /* 1 = Master mute button sets AUDIOENABLE = 0. */
/* NOTE: This is a 'cheap' way to implement a */
/* master mute function on the mute button, and */
@@ -238,7 +268,7 @@
/* Should be set to 1 when the EMU10K1 is */
/* completely initialized. */
-//For Audigy, MPU port move to 0x70-0x74 ptr register
+// On Audigy, the MPU port moved to the 0x70-0x74 ptr registers
#define MUDATA 0x18 /* MPU401 data register (8 bits) */
@@ -251,11 +281,17 @@
#define MUSTAT_IRDYN 0x80 /* 0 = MIDI data or command ACK */
#define MUSTAT_ORDYN 0x40 /* 0 = MUDATA can accept a command or data */
-#define A_IOCFG 0x18 /* GPIO on Audigy card (16bits) */
-#define A_GPINPUT_MASK 0xff00
+#define A_GPIO 0x18 /* GPIO on Audigy card (16bits) */
+#define A_GPINPUT_MASK 0xff00 /* Alice/2 has 8 input pins */
+#define A3_GPINPUT_MASK 0x3f00 /* ... while Tina/2 has only 6 */
#define A_GPOUTPUT_MASK 0x00ff
+// The GPIO port is used for I/O config on Sound Blasters;
+// card-specific info can be found in the emu_chip_details table.
+// On E-MU cards the port is used as the interface to the FPGA.
+
// Audigy output/GPIO stuff taken from the kX drivers
+#define A_IOCFG A_GPIO
#define A_IOCFG_GPOUT0 0x0044 /* analog/digital */
#define A_IOCFG_DISABLE_ANALOG 0x0040 /* = 'enable' for Audigy2 (chiprev=4) */
#define A_IOCFG_ENABLE_DIGITAL 0x0004
@@ -271,19 +307,12 @@
#define A_IOCFG_REAR_JACK 0x8000
#define A_IOCFG_PHONES_JACK 0x0100 /* LiveDrive */
-/* outputs:
- * for audigy2 platinum: 0xa00
- * for a2 platinum ex: 0x1c00
- * for a1 platinum: 0x0
- */
-
#define TIMER 0x1a /* Timer terminal count register */
/* NOTE: After the rate is changed, a maximum */
/* of 1024 sample periods should be allowed */
/* before the new rate is guaranteed accurate. */
-#define TIMER_RATE_MASK 0x000003ff /* Timer interrupt rate in sample periods */
+#define TIMER_RATE_MASK 0x03ff /* Timer interrupt rate in sample periods */
/* 0 == 1024 periods, [1..4] are not useful */
-#define TIMER_RATE 0x0a00001a
#define AC97DATA 0x1c /* AC97 register set data register (16 bit) */
@@ -317,7 +346,7 @@
/* 0x00000000 2-channel output. */
/* 0x00000200 8-channel output. */
/* 0x00000004 pauses stream/irq fail. */
- /* Rest of bits no nothing to sound output */
+ /* Rest of bits do nothing to sound output */
/* bit 0: Enable P16V audio.
* bit 1: Lock P16V record memory cache.
* bit 2: Lock P16V playback memory cache.
@@ -331,6 +360,7 @@
*/
#define IPR3 0x38 /* Cdif interrupt pending register */
#define INTE3 0x3c /* Cdif interrupt enable register. */
+
/************************************************************************************************/
/* PCI function 1 registers, address = <val> + PCIBASE1 */
/************************************************************************************************/
@@ -349,61 +379,82 @@
#define JOYSTICK_BUTTONS 0x0f /* Joystick button data */
#define JOYSTICK_COMPARATOR 0xf0 /* Joystick comparator data */
-
/********************************************************************************************************/
/* Emu10k1 pointer-offset register set, accessed through the PTR and DATA registers */
/********************************************************************************************************/
+// No official documentation was released for EMU10K1, but some info
+// about playback can be extrapolated from the EMU8K documents:
+// "AWE32/EMU8000 Programmer’s Guide" (emu8kpgm.pdf) - registers
+// "AWE32 Developer's Information Pack" (adip301.pdf) - high-level view
+
+// The short version:
+// - The engine has 64 playback channels, also called voices. The channels
+// operate independently, except when paired for stereo (see below).
+// - PCM samples are fetched into the cache; see description of CD0 below.
+// - Samples are consumed at the rate CPF_CURRENTPITCH.
+// - 8-bit samples are transformed upon use: cooked = (raw ^ 0x80) << 8
+// - 8 samples are read at CCR_READADDRESS:CPF_FRACADDRESS and interpolated
+// according to CCCA_INTERPROM_*. With CCCA_INTERPROM_0 selected and a zero
+// CPF_FRACADDRESS, this results in CCR_READADDRESS[3] being used verbatim.
+// - The value is multiplied by CVCF_CURRENTVOL.
+// - The value goes through a filter with cutoff CVCF_CURRENTFILTER;
+// delay stages Z1 and Z2.
+// - The value is added by so-called `sends` to 4 (EMU10K1) / 8 (EMU10K2)
+// of the 16 (EMU10K1) / 64 (EMU10K2) FX bus accumulators via FXRT*,
+// multiplied by a per-send amount (*_FXSENDAMOUNT_*).
+// The scaling of the send amounts is exponential-ish.
+// - The DSP has a go at FXBUS* and outputs the values to EXTOUT* or EMU32OUT*.
+// - The pitch, volume, and filter cutoff can be modulated by two envelope
+// engines and two low frequency oscillators.
+// - To avoid abrupt changes to the parameters (which may cause audible
+// distortion), the modulation engine sets the target registers, towards
+// which the current registers "swerve" gradually.
+
+// For the odd channel in a stereo pair, these registers are meaningless:
+// CPF_STEREO, CPF_CURRENTPITCH, PTRX_PITCHTARGET, CCR_CACHEINVALIDSIZE,
+// PSST_LOOPSTARTADDR, DSL_LOOPENDADDR, CCCA_CURRADDR
+// The somewhat non-obviously still meaningful ones are:
+// CPF_STOP, CPF_FRACADDRESS, CCR_READADDRESS (!),
+// CCCA_INTERPROM, CCCA_8BITSELECT (!)
+// (The envelope engine is ignored here, as stereo matters only for verbatim playback.)
+
#define CPF 0x00 /* Current pitch and fraction register */
-#define CPF_CURRENTPITCH_MASK 0xffff0000 /* Current pitch (linear, 0x4000 == unity pitch shift) */
-#define CPF_CURRENTPITCH 0x10100000
+SUB_REG(CPF, CURRENTPITCH, 0xffff0000) /* Current pitch (linear, 0x4000 == unity pitch shift) */
#define CPF_STEREO_MASK 0x00008000 /* 1 = Even channel interleave, odd channel locked */
-#define CPF_STOP_MASK 0x00004000 /* 1 = Current pitch forced to 0 */
+SUB_REG(CPF, STOP, 0x00004000) /* 1 = Current pitch forced to 0 */
+ /* Can be set only while matching bit in SOLEx is 1 */
#define CPF_FRACADDRESS_MASK 0x00003fff /* Linear fractional address of the current channel */
#define PTRX 0x01 /* Pitch target and send A/B amounts register */
-#define PTRX_PITCHTARGET_MASK 0xffff0000 /* Pitch target of specified channel */
-#define PTRX_PITCHTARGET 0x10100001
-#define PTRX_FXSENDAMOUNT_A_MASK 0x0000ff00 /* Linear level of channel output sent to FX send bus A */
-#define PTRX_FXSENDAMOUNT_A 0x08080001
-#define PTRX_FXSENDAMOUNT_B_MASK 0x000000ff /* Linear level of channel output sent to FX send bus B */
-#define PTRX_FXSENDAMOUNT_B 0x08000001
+SUB_REG(PTRX, PITCHTARGET, 0xffff0000) /* Pitch target of specified channel */
+SUB_REG(PTRX, FXSENDAMOUNT_A, 0x0000ff00) /* Linear level of channel output sent to FX send bus A */
+SUB_REG(PTRX, FXSENDAMOUNT_B, 0x000000ff) /* Linear level of channel output sent to FX send bus B */
+// Note: the volumes are raw multpliers, so real 100% is impossible.
#define CVCF 0x02 /* Current volume and filter cutoff register */
-#define CVCF_CURRENTVOL_MASK 0xffff0000 /* Current linear volume of specified channel */
-#define CVCF_CURRENTVOL 0x10100002
-#define CVCF_CURRENTFILTER_MASK 0x0000ffff /* Current filter cutoff frequency of specified channel */
-#define CVCF_CURRENTFILTER 0x10000002
+SUB_REG(CVCF, CURRENTVOL, 0xffff0000) /* Current linear volume of specified channel */
+SUB_REG(CVCF, CURRENTFILTER, 0x0000ffff) /* Current filter cutoff frequency of specified channel */
#define VTFT 0x03 /* Volume target and filter cutoff target register */
-#define VTFT_VOLUMETARGET_MASK 0xffff0000 /* Volume target of specified channel */
-#define VTFT_VOLUMETARGET 0x10100003
-#define VTFT_FILTERTARGET_MASK 0x0000ffff /* Filter cutoff target of specified channel */
-#define VTFT_FILTERTARGET 0x10000003
+SUB_REG(VTFT, VOLUMETARGET, 0xffff0000) /* Volume target of specified channel */
+SUB_REG(VTFT, FILTERTARGET, 0x0000ffff) /* Filter cutoff target of specified channel */
#define Z1 0x05 /* Filter delay memory 1 register */
#define Z2 0x04 /* Filter delay memory 2 register */
#define PSST 0x06 /* Send C amount and loop start address register */
-#define PSST_FXSENDAMOUNT_C_MASK 0xff000000 /* Linear level of channel output sent to FX send bus C */
-
-#define PSST_FXSENDAMOUNT_C 0x08180006
+SUB_REG(PSST, FXSENDAMOUNT_C, 0xff000000) /* Linear level of channel output sent to FX send bus C */
+SUB_REG(PSST, LOOPSTARTADDR, 0x00ffffff) /* Loop start address of the specified channel */
-#define PSST_LOOPSTARTADDR_MASK 0x00ffffff /* Loop start address of the specified channel */
-#define PSST_LOOPSTARTADDR 0x18000006
-
-#define DSL 0x07 /* Send D amount and loop start address register */
-#define DSL_FXSENDAMOUNT_D_MASK 0xff000000 /* Linear level of channel output sent to FX send bus D */
-
-#define DSL_FXSENDAMOUNT_D 0x08180007
-
-#define DSL_LOOPENDADDR_MASK 0x00ffffff /* Loop end address of the specified channel */
-#define DSL_LOOPENDADDR 0x18000007
+#define DSL 0x07 /* Send D amount and loop end address register */
+SUB_REG(DSL, FXSENDAMOUNT_D, 0xff000000) /* Linear level of channel output sent to FX send bus D */
+SUB_REG(DSL, LOOPENDADDR, 0x00ffffff) /* Loop end address of the specified channel */
#define CCCA 0x08 /* Filter Q, interp. ROM, byte size, cur. addr register */
-#define CCCA_RESONANCE 0xf0000000 /* Lowpass filter resonance (Q) height */
-#define CCCA_INTERPROMMASK 0x0e000000 /* Selects passband of interpolation ROM */
+SUB_REG(CCCA, RESONANCE, 0xf0000000) /* Lowpass filter resonance (Q) height */
+#define CCCA_INTERPROM_MASK 0x0e000000 /* Selects passband of interpolation ROM */
/* 1 == full band, 7 == lowpass */
/* ROM 0 is used when pitch shifting downward or less */
/* then 3 semitones upward. Increasingly higher ROM */
@@ -418,25 +469,25 @@
#define CCCA_INTERPROM_6 0x0c000000 /* Select interpolation ROM 6 */
#define CCCA_INTERPROM_7 0x0e000000 /* Select interpolation ROM 7 */
#define CCCA_8BITSELECT 0x01000000 /* 1 = Sound memory for this channel uses 8-bit samples */
-#define CCCA_CURRADDR_MASK 0x00ffffff /* Current address of the selected channel */
-#define CCCA_CURRADDR 0x18000008
+ /* 8-bit samples are unsigned, 16-bit ones signed */
+SUB_REG(CCCA, CURRADDR, 0x00ffffff) /* Current address of the selected channel */
#define CCR 0x09 /* Cache control register */
-#define CCR_CACHEINVALIDSIZE 0x07190009
-#define CCR_CACHEINVALIDSIZE_MASK 0xfe000000 /* Number of invalid samples cache for this channel */
+SUB_REG(CCR, CACHEINVALIDSIZE, 0xfe000000) /* Number of invalid samples before the read address */
#define CCR_CACHELOOPFLAG 0x01000000 /* 1 = Cache has a loop service pending */
#define CCR_INTERLEAVEDSAMPLES 0x00800000 /* 1 = A cache service will fetch interleaved samples */
+ /* Auto-set from CPF_STEREO_MASK */
#define CCR_WORDSIZEDSAMPLES 0x00400000 /* 1 = A cache service will fetch word sized samples */
-#define CCR_READADDRESS 0x06100009
-#define CCR_READADDRESS_MASK 0x003f0000 /* Location of cache just beyond current cache service */
-#define CCR_LOOPINVALSIZE 0x0000fe00 /* Number of invalid samples in cache prior to loop */
+ /* Auto-set from CCCA_8BITSELECT */
+SUB_REG(CCR, READADDRESS, 0x003f0000) /* Next cached sample to play */
+SUB_REG(CCR, LOOPINVALSIZE, 0x0000fe00) /* Number of invalid samples in cache prior to loop */
/* NOTE: This is valid only if CACHELOOPFLAG is set */
#define CCR_LOOPFLAG 0x00000100 /* Set for a single sample period when a loop occurs */
-#define CCR_CACHELOOPADDRHI 0x000000ff /* DSL_LOOPSTARTADDR's hi byte if CACHELOOPFLAG is set */
+SUB_REG(CCR, CACHELOOPADDRHI, 0x000000ff) /* CLP_LOOPSTARTADDR's hi byte if CACHELOOPFLAG is set */
#define CLP 0x0a /* Cache loop register (valid if CCR_CACHELOOPFLAG = 1) */
/* NOTE: This register is normally not used */
-#define CLP_CACHELOOPADDR 0x0000ffff /* Cache loop address (DSL_LOOPSTARTADDR [0..15]) */
+SUB_REG(CLP, CACHELOOPADDR, 0x0000ffff) /* Cache loop address low word */
#define FXRT 0x0b /* Effects send routing register */
/* NOTE: It is illegal to assign the same routing to */
@@ -446,9 +497,7 @@
#define FXRT_CHANNELC 0x0f000000 /* Effects send bus number for channel's effects send C */
#define FXRT_CHANNELD 0xf0000000 /* Effects send bus number for channel's effects send D */
-#define A_HR 0x0b /* High Resolution. 24bit playback from host to DSP. */
#define MAPA 0x0c /* Cache map A */
-
#define MAPB 0x0d /* Cache map B */
#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
@@ -457,22 +506,22 @@
#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
-/* 0x0e, 0x0f: Not used */
+/* 0x0e, 0x0f: Internal state, at least on Audigy */
#define ENVVOL 0x10 /* Volume envelope register */
#define ENVVOL_MASK 0x0000ffff /* Current value of volume envelope state variable */
/* 0x8000-n == 666*n usec delay */
#define ATKHLDV 0x11 /* Volume envelope hold and attack register */
-#define ATKHLDV_PHASE0 0x00008000 /* 0 = Begin attack phase */
+#define ATKHLDV_PHASE0_MASK 0x00008000 /* 0 = Begin attack phase */
#define ATKHLDV_HOLDTIME_MASK 0x00007f00 /* Envelope hold time (127-n == n*88.2msec) */
#define ATKHLDV_ATTACKTIME_MASK 0x0000007f /* Envelope attack time, log encoded */
/* 0 = infinite, 1 = 10.9msec, ... 0x7f = 5.5msec */
#define DCYSUSV 0x12 /* Volume envelope sustain and decay register */
-#define DCYSUSV_PHASE1_MASK 0x00008000 /* 0 = Begin attack phase, 1 = begin release phase */
+#define DCYSUSV_PHASE1_MASK 0x00008000 /* 0 = Begin decay phase, 1 = begin release phase */
#define DCYSUSV_SUSTAINLEVEL_MASK 0x00007f00 /* 127 = full, 0 = off, 0.75dB increments */
-#define DCYSUSV_CHANNELENABLE_MASK 0x00000080 /* 1 = Inhibit envelope engine from writing values in */
+#define DCYSUSV_CHANNELENABLE_MASK 0x00000080 /* 0 = Inhibit envelope engine from writing values in */
/* this channel and from writing to pitch, filter and */
/* volume targets. */
#define DCYSUSV_DECAYTIME_MASK 0x0000007f /* Volume envelope decay time, log encoded */
@@ -487,13 +536,13 @@
/* 0x8000-n == 666*n usec delay */
#define ATKHLDM 0x15 /* Modulation envelope hold and attack register */
-#define ATKHLDM_PHASE0 0x00008000 /* 0 = Begin attack phase */
+#define ATKHLDM_PHASE0_MASK 0x00008000 /* 0 = Begin attack phase */
#define ATKHLDM_HOLDTIME 0x00007f00 /* Envelope hold time (127-n == n*42msec) */
#define ATKHLDM_ATTACKTIME 0x0000007f /* Envelope attack time, log encoded */
/* 0 = infinite, 1 = 11msec, ... 0x7f = 5.5msec */
#define DCYSUSM 0x16 /* Modulation envelope decay and sustain register */
-#define DCYSUSM_PHASE1_MASK 0x00008000 /* 0 = Begin attack phase, 1 = begin release phase */
+#define DCYSUSM_PHASE1_MASK 0x00008000 /* 0 = Begin decay phase, 1 = begin release phase */
#define DCYSUSM_SUSTAINLEVEL_MASK 0x00007f00 /* 127 = full, 0 = off, 0.75dB increments */
#define DCYSUSM_DECAYTIME_MASK 0x0000007f /* Envelope decay time, log encoded */
/* 0 = 43.7msec, 1 = 21.8msec, 0x7f = 22msec */
@@ -508,34 +557,30 @@
#define IP_UNITY 0x0000e000 /* Unity pitch shift */
#define IFATN 0x19 /* Initial filter cutoff and attenuation register */
-#define IFATN_FILTERCUTOFF_MASK 0x0000ff00 /* Initial filter cutoff frequency in exponential units */
+SUB_REG(IFATN, FILTERCUTOFF, 0x0000ff00) /* Initial filter cutoff frequency in exponential units */
/* 6 most significant bits are semitones */
/* 2 least significant bits are fractions */
-#define IFATN_FILTERCUTOFF 0x08080019
-#define IFATN_ATTENUATION_MASK 0x000000ff /* Initial attenuation in 0.375dB steps */
-#define IFATN_ATTENUATION 0x08000019
-
+SUB_REG(IFATN, ATTENUATION, 0x000000ff) /* Initial attenuation in 0.375dB steps */
#define PEFE 0x1a /* Pitch envelope and filter envelope amount register */
-#define PEFE_PITCHAMOUNT_MASK 0x0000ff00 /* Pitch envlope amount */
+SUB_REG(PEFE, PITCHAMOUNT, 0x0000ff00) /* Pitch envlope amount */
/* Signed 2's complement, +/- one octave peak extremes */
-#define PEFE_PITCHAMOUNT 0x0808001a
-#define PEFE_FILTERAMOUNT_MASK 0x000000ff /* Filter envlope amount */
+SUB_REG(PEFE, FILTERAMOUNT, 0x000000ff) /* Filter envlope amount */
/* Signed 2's complement, +/- six octaves peak extremes */
-#define PEFE_FILTERAMOUNT 0x0800001a
+
+
#define FMMOD 0x1b /* Vibrato/filter modulation from LFO register */
#define FMMOD_MODVIBRATO 0x0000ff00 /* Vibrato LFO modulation depth */
/* Signed 2's complement, +/- one octave extremes */
#define FMMOD_MOFILTER 0x000000ff /* Filter LFO modulation depth */
/* Signed 2's complement, +/- three octave extremes */
-
#define TREMFRQ 0x1c /* Tremolo amount and modulation LFO frequency register */
#define TREMFRQ_DEPTH 0x0000ff00 /* Tremolo depth */
/* Signed 2's complement, with +/- 12dB extremes */
-
#define TREMFRQ_FREQUENCY 0x000000ff /* Tremolo LFO frequency */
/* ??Hz steps, maximum of ?? Hz. */
+
#define FM2FRQ2 0x1d /* Vibrato amount and vibrato LFO frequency register */
#define FM2FRQ2_DEPTH 0x0000ff00 /* Vibrato LFO vibrato depth */
/* Signed 2's complement, +/- one octave extremes */
@@ -549,24 +594,22 @@
/* 0x1f: not used */
-#define CD0 0x20 /* Cache data 0 register */
-#define CD1 0x21 /* Cache data 1 register */
-#define CD2 0x22 /* Cache data 2 register */
-#define CD3 0x23 /* Cache data 3 register */
-#define CD4 0x24 /* Cache data 4 register */
-#define CD5 0x25 /* Cache data 5 register */
-#define CD6 0x26 /* Cache data 6 register */
-#define CD7 0x27 /* Cache data 7 register */
-#define CD8 0x28 /* Cache data 8 register */
-#define CD9 0x29 /* Cache data 9 register */
-#define CDA 0x2a /* Cache data A register */
-#define CDB 0x2b /* Cache data B register */
-#define CDC 0x2c /* Cache data C register */
-#define CDD 0x2d /* Cache data D register */
-#define CDE 0x2e /* Cache data E register */
-#define CDF 0x2f /* Cache data F register */
-
-/* 0x30-3f seem to be the same as 0x20-2f */
+// 32 cache registers (== 128 bytes) per channel follow.
+// In stereo mode, the two channels' caches are concatenated into one,
+// and hold the interleaved frames.
+// The cache holds 64 frames, so the upper half is not used in 8-bit mode.
+// All registers mentioned below count in frames.
+// The cache is a ring buffer; CCR_READADDRESS operates modulo 64.
+// The cache is filled from (CCCA_CURRADDR - CCR_CACHEINVALIDSIZE)
+// into (CCR_READADDRESS - CCR_CACHEINVALIDSIZE).
+// The engine has a fetch threshold of 32 bytes, so it tries to keep
+// CCR_CACHEINVALIDSIZE below 8 (16-bit stereo), 16 (16-bit mono,
+// 8-bit stereo), or 32 (8-bit mono). The actual transfers are pretty
+// unpredictable, especially if several voices are running.
+// Frames are consumed at CCR_READADDRESS, which is incremented afterwards,
+// along with CCCA_CURRADDR and CCR_CACHEINVALIDSIZE. This implies that the
+// actual playback position always lags CCCA_CURRADDR by exactly 64 frames.
+#define CD0 0x20 /* Cache data registers 0 .. 0x1f */
#define PTB 0x40 /* Page table base register */
#define PTB_MASK 0xfffff000 /* Physical address of the page table in host memory */
@@ -603,20 +646,6 @@
/* is 16bit, 48KHz only. All 32 channels can be enabled */
/* simultaneously. */
-#define FXWC_DEFAULTROUTE_C (1<<0) /* left emu out? */
-#define FXWC_DEFAULTROUTE_B (1<<1) /* right emu out? */
-#define FXWC_DEFAULTROUTE_A (1<<12)
-#define FXWC_DEFAULTROUTE_D (1<<13)
-#define FXWC_ADCLEFT (1<<18)
-#define FXWC_CDROMSPDIFLEFT (1<<18)
-#define FXWC_ADCRIGHT (1<<19)
-#define FXWC_CDROMSPDIFRIGHT (1<<19)
-#define FXWC_MIC (1<<20)
-#define FXWC_ZOOMLEFT (1<<20)
-#define FXWC_ZOOMRIGHT (1<<21)
-#define FXWC_SPDIFLEFT (1<<22) /* 0x00400000 */
-#define FXWC_SPDIFRIGHT (1<<23) /* 0x00800000 */
-
#define A_TBLSZ 0x43 /* Effects Tank Internal Table Size. Only low byte or register used */
#define TCBS 0x44 /* Tank cache buffer size register */
@@ -639,7 +668,7 @@
#define FXBA 0x47 /* FX Buffer Address */
#define FXBA_MASK 0xfffff000 /* 20 bit base address */
-#define A_HWM 0x48 /* High PCI Water Mark - word access, defaults to 3f */
+#define A_HWM 0x48 /* High PCI Water Mark - word access, defaults to 3f */
#define MICBS 0x49 /* Microphone buffer size register */
@@ -647,9 +676,7 @@
#define FXBS 0x4b /* FX buffer size register */
-/* register: 0x4c..4f: ffff-ffff current amounts, per-channel */
-
-/* The following mask values define the size of the ADC, MIX and FX buffers in bytes */
+/* The following mask values define the size of the ADC, MIC and FX buffers in bytes */
#define ADCBS_BUFSIZE_NONE 0x00000000
#define ADCBS_BUFSIZE_384 0x00000001
#define ADCBS_BUFSIZE_448 0x00000002
@@ -683,38 +710,29 @@
#define ADCBS_BUFSIZE_57344 0x0000001e
#define ADCBS_BUFSIZE_65536 0x0000001f
-/* Current Send B, A Amounts */
-#define A_CSBA 0x4c
-
-/* Current Send D, C Amounts */
-#define A_CSDC 0x4d
+// On Audigy, the FX send amounts are not applied instantly, but determine
+// targets towards which the following registers swerve gradually.
+#define A_CSBA 0x4c /* FX send B & A current amounts */
+#define A_CSDC 0x4d /* FX send D & C current amounts */
+#define A_CSFE 0x4e /* FX send F & E current amounts */
+#define A_CSHG 0x4f /* FX send H & G current amounts */
-/* Current Send F, E Amounts */
-#define A_CSFE 0x4e
+// NOTE: 0x50,51,52: 64-bit (split over voices 0 & 1)
+#define CDCS 0x50 /* CD-ROM digital channel status register */
-/* Current Send H, G Amounts */
-#define A_CSHG 0x4f
+#define GPSCS 0x51 /* General Purpose SPDIF channel status register */
+// Corresponding EMU10K1_DBG_* constants are in the public header
+#define DBG 0x52
-#define CDCS 0x50 /* CD-ROM digital channel status register */
+#define A_SPSC 0x52 /* S/PDIF Input C Channel Status */
-#define GPSCS 0x51 /* General Purpose SPDIF channel status register*/
+#define REG53 0x53 /* DO NOT PROGRAM THIS REGISTER!!! MAY DESTROY CHIP */
-#define DBG 0x52 /* DO NOT PROGRAM THIS REGISTER!!! MAY DESTROY CHIP */
+// Corresponding A_DBG_* constants are in the public header
+#define A_DBG 0x53
-/* S/PDIF Input C Channel Status */
-#define A_SPSC 0x52
-
-#define REG53 0x53 /* DO NOT PROGRAM THIS REGISTER!!! MAY DESTROY CHIP */
-
-#define A_DBG 0x53
-#define A_DBG_SINGLE_STEP 0x00020000 /* Set to zero to start dsp */
-#define A_DBG_ZC 0x40000000 /* zero tram counter */
-#define A_DBG_STEP_ADDR 0x000003ff
-#define A_DBG_SATURATION_OCCURED 0x20000000
-#define A_DBG_SATURATION_ADDR 0x0ffc0000
-
-// NOTE: 0x54,55,56: 64-bit
+// NOTE: 0x54,55,56: 64-bit (split over voices 0 & 1)
#define SPCS0 0x54 /* SPDIF output Channel Status 0 register */
#define SPCS1 0x55 /* SPDIF output Channel Status 1 register */
@@ -747,17 +765,17 @@
/* 0x57: Not used */
-/* The 32-bit CLIx and SOLx registers all have one bit per channel control/status */
+/* The 32-bit CLIx and SOLEx registers all have one bit per channel control/status */
#define CLIEL 0x58 /* Channel loop interrupt enable low register */
-
#define CLIEH 0x59 /* Channel loop interrupt enable high register */
#define CLIPL 0x5a /* Channel loop interrupt pending low register */
-
#define CLIPH 0x5b /* Channel loop interrupt pending high register */
+// These cause CPF_STOP_MASK to be set shortly after CCCA_CURRADDR passes DSL_LOOPENDADDR.
+// Subsequent changes to the address registers don't resume; clearing the bit here or in CPF does.
+// The registers are NOT synchronized; the next serviced channel picks up immediately.
#define SOLEL 0x5c /* Stop on loop enable low register */
-
#define SOLEH 0x5d /* Stop on loop enable high register */
#define SPBYPASS 0x5e /* SPDIF BYPASS mode register */
@@ -767,13 +785,12 @@
#define SPBYPASS_FORMAT 0x00000f00 /* If 1, SPDIF XX uses 24 bit, if 0 - 20 bit */
#define AC97SLOT 0x5f /* additional AC97 slots enable bits */
-#define AC97SLOT_REAR_RIGHT 0x01 /* Rear left */
-#define AC97SLOT_REAR_LEFT 0x02 /* Rear right */
-#define AC97SLOT_CNTR 0x10 /* Center enable */
-#define AC97SLOT_LFE 0x20 /* LFE enable */
+#define AC97SLOT_REAR_RIGHT 0x01 /* Rear left */
+#define AC97SLOT_REAR_LEFT 0x02 /* Rear right */
+#define AC97SLOT_CNTR 0x10 /* Center enable */
+#define AC97SLOT_LFE 0x20 /* LFE enable */
-/* PCB Revision */
-#define A_PCB 0x5f
+#define A_PCB 0x5f /* PCB Revision */
// NOTE: 0x60,61,62: 64-bit
#define CDSRCS 0x60 /* CD-ROM Sample Rate Converter status register */
@@ -796,44 +813,35 @@
#define SRCS_SPDIFRATE_96 0x00080000
#define MICIDX 0x63 /* Microphone recording buffer index register */
-#define MICIDX_MASK 0x0000ffff /* 16-bit value */
-#define MICIDX_IDX 0x10000063
+SUB_REG(MICIDX, IDX, 0x0000ffff)
#define ADCIDX 0x64 /* ADC recording buffer index register */
-#define ADCIDX_MASK 0x0000ffff /* 16 bit index field */
-#define ADCIDX_IDX 0x10000064
+SUB_REG(ADCIDX, IDX, 0x0000ffff)
#define A_ADCIDX 0x63
-#define A_ADCIDX_IDX 0x10000063
+SUB_REG(A_ADCIDX, IDX, 0x0000ffff)
#define A_MICIDX 0x64
-#define A_MICIDX_IDX 0x10000064
+SUB_REG(A_MICIDX, IDX, 0x0000ffff)
#define FXIDX 0x65 /* FX recording buffer index register */
-#define FXIDX_MASK 0x0000ffff /* 16-bit value */
-#define FXIDX_IDX 0x10000065
+SUB_REG(FXIDX, IDX, 0x0000ffff)
-/* The 32-bit HLIx and HLIPx registers all have one bit per channel control/status */
+/* The 32-bit HLIEx and HLIPx registers all have one bit per channel control/status */
#define HLIEL 0x66 /* Channel half loop interrupt enable low register */
-
#define HLIEH 0x67 /* Channel half loop interrupt enable high register */
#define HLIPL 0x68 /* Channel half loop interrupt pending low register */
-
#define HLIPH 0x69 /* Channel half loop interrupt pending high register */
-/* S/PDIF Host Record Index (bypasses SRC) */
-#define A_SPRI 0x6a
-/* S/PDIF Host Record Address */
-#define A_SPRA 0x6b
-/* S/PDIF Host Record Control */
-#define A_SPRC 0x6c
-/* Delayed Interrupt Counter & Enable */
-#define A_DICE 0x6d
-/* Tank Table Base */
-#define A_TTB 0x6e
-/* Tank Delay Offset */
-#define A_TDOF 0x6f
+#define A_SPRI 0x6a /* S/PDIF Host Record Index (bypasses SRC) */
+#define A_SPRA 0x6b /* S/PDIF Host Record Address */
+#define A_SPRC 0x6c /* S/PDIF Host Record Control */
+
+#define A_DICE 0x6d /* Delayed Interrupt Counter & Enable */
+
+#define A_TTB 0x6e /* Tank Table Base */
+#define A_TDOF 0x6f /* Tank Delay Offset */
/* This is the MPU port on the card (via the game port) */
#define A_MUDATA1 0x70
@@ -846,48 +854,58 @@
#define A_MUSTAT2 A_MUCMD2
/* The next two are the Audigy equivalent of FXWC */
-/* the Audigy can record any output (16bit, 48kHz, up to 64 channel simultaneously) */
+/* the Audigy can record any output (16bit, 48kHz, up to 64 channels simultaneously) */
/* Each bit selects a channel for recording */
#define A_FXWC1 0x74 /* Selects 0x7f-0x60 for FX recording */
#define A_FXWC2 0x75 /* Selects 0x9f-0x80 for FX recording */
-/* Extended Hardware Control */
-#define A_SPDIF_SAMPLERATE 0x76 /* Set the sample rate of SPDIF output */
-#define A_SAMPLE_RATE 0x76 /* Various sample rate settings. */
-#define A_SAMPLE_RATE_NOT_USED 0x0ffc111e /* Bits that are not used and cannot be set. */
-#define A_SAMPLE_RATE_UNKNOWN 0xf0030001 /* Bits that can be set, but have unknown use. */
+#define A_EHC 0x76 /* Extended Hardware Control */
+
+#define A_SPDIF_SAMPLERATE A_EHC /* Set the sample rate of SPDIF output */
#define A_SPDIF_RATE_MASK 0x000000e0 /* Any other values for rates, just use 48000 */
-#define A_SPDIF_48000 0x00000000
+#define A_SPDIF_48000 0x00000000 /* kX calls this BYPASS */
#define A_SPDIF_192000 0x00000020
#define A_SPDIF_96000 0x00000040
#define A_SPDIF_44100 0x00000080
+#define A_SPDIF_MUTED 0x000000c0
+
+SUB_REG_NC(A_EHC, A_I2S_CAPTURE_RATE, 0x00000e00) /* This sets the capture PCM rate, but it is */
+ /* unclear if this sets the ADC rate as well. */
+#define A_I2S_CAPTURE_48000 0x0
+#define A_I2S_CAPTURE_192000 0x1
+#define A_I2S_CAPTURE_96000 0x2
+#define A_I2S_CAPTURE_44100 0x4
+
+#define A_EHC_SRC48_MASK 0x0000e000 /* This sets the playback PCM rate on the P16V */
+#define A_EHC_SRC48_BYPASS 0x00000000
+#define A_EHC_SRC48_192 0x00002000
+#define A_EHC_SRC48_96 0x00004000
+#define A_EHC_SRC48_44 0x00008000
+#define A_EHC_SRC48_MUTED 0x0000c000
+
+#define A_EHC_P17V_TVM 0x00000001 /* Tank virtual memory mode */
+#define A_EHC_P17V_SEL0_MASK 0x00030000 /* Aka A_EHC_P16V_PB_RATE; 00: 48, 01: 44.1, 10: 96, 11: 192 */
+#define A_EHC_P17V_SEL1_MASK 0x000c0000
+#define A_EHC_P17V_SEL2_MASK 0x00300000
+#define A_EHC_P17V_SEL3_MASK 0x00c00000
+
+#define A_EHC_ASYNC_BYPASS 0x80000000
+
+#define A_SRT3 0x77 /* I2S0 Sample Rate Tracker Status */
+#define A_SRT4 0x78 /* I2S1 Sample Rate Tracker Status */
+#define A_SRT5 0x79 /* I2S2 Sample Rate Tracker Status */
+/* - default to 0x01080000 on my audigy 2 ZS --rlrevell */
-#define A_I2S_CAPTURE_RATE_MASK 0x00000e00 /* This sets the capture PCM rate, but it is */
-#define A_I2S_CAPTURE_48000 0x00000000 /* unclear if this sets the ADC rate as well. */
-#define A_I2S_CAPTURE_192000 0x00000200
-#define A_I2S_CAPTURE_96000 0x00000400
-#define A_I2S_CAPTURE_44100 0x00000800
-
-#define A_PCM_RATE_MASK 0x0000e000 /* This sets the playback PCM rate on the P16V */
-#define A_PCM_48000 0x00000000
-#define A_PCM_192000 0x00002000
-#define A_PCM_96000 0x00004000
-#define A_PCM_44100 0x00008000
-
-/* I2S0 Sample Rate Tracker Status */
-#define A_SRT3 0x77
-
-/* I2S1 Sample Rate Tracker Status */
-#define A_SRT4 0x78
+#define A_SRT_ESTSAMPLERATE 0x001fffff
+#define A_SRT_RATELOCKED 0x01000000
-/* I2S2 Sample Rate Tracker Status */
-#define A_SRT5 0x79
-/* - default to 0x01080000 on my audigy 2 ZS --rlrevell */
+#define A_TTDA 0x7a /* Tank Table DMA Address */
+#define A_TTDD 0x7b /* Tank Table DMA Data */
-/* Tank Table DMA Address */
-#define A_TTDA 0x7a
-/* Tank Table DMA Data */
-#define A_TTDD 0x7b
+// In A_FXRT1 & A_FXRT2, the 0x80 bit of each byte completely disables the
+// filter (CVCF_CURRENTFILTER) for the corresponding channel. There is no
+// effect on the volume (CVCF_CURRENTVOLUME) or the interpolator's filter
+// (CCCA_INTERPROM_MASK).
#define A_FXRT2 0x7c
#define A_FXRT_CHANNELE 0x0000003f /* Effects send bus number for channel's effects send E */
@@ -900,8 +918,7 @@
#define A_FXSENDAMOUNT_F_MASK 0x00FF0000
#define A_FXSENDAMOUNT_G_MASK 0x0000FF00
#define A_FXSENDAMOUNT_H_MASK 0x000000FF
-/* 0x7c, 0x7e "high bit is used for filtering" */
-
+
/* The send amounts for this one are the same as used with the emu10k1 */
#define A_FXRT1 0x7e
#define A_FXRT_CHANNELA 0x0000003f
@@ -910,55 +927,56 @@
#define A_FXRT_CHANNELD 0x3f000000
/* 0x7f: Not used */
-/* Each FX general purpose register is 32 bits in length, all bits are used */
-#define FXGPREGBASE 0x100 /* FX general purpose registers base */
-#define A_FXGPREGBASE 0x400 /* Audigy GPRs, 0x400 to 0x5ff */
-
-#define A_TANKMEMCTLREGBASE 0x100 /* Tank memory control registers base - only for Audigy */
-#define A_TANKMEMCTLREG_MASK 0x1f /* only 5 bits used - only for Audigy */
-
-/* Tank audio data is logarithmically compressed down to 16 bits before writing to TRAM and is */
-/* decompressed back to 20 bits on a read. There are a total of 160 locations, the last 32 */
-/* locations are for external TRAM. */
-#define TANKMEMDATAREGBASE 0x200 /* Tank memory data registers base */
-#define TANKMEMDATAREG_MASK 0x000fffff /* 20 bit tank audio data field */
-
-/* Combined address field and memory opcode or flag field. 160 locations, last 32 are external */
-#define TANKMEMADDRREGBASE 0x300 /* Tank memory address registers base */
-#define TANKMEMADDRREG_ADDR_MASK 0x000fffff /* 20 bit tank address field */
-#define TANKMEMADDRREG_CLEAR 0x00800000 /* Clear tank memory */
-#define TANKMEMADDRREG_ALIGN 0x00400000 /* Align read or write relative to tank access */
-#define TANKMEMADDRREG_WRITE 0x00200000 /* Write to tank memory */
-#define TANKMEMADDRREG_READ 0x00100000 /* Read from tank memory */
-#define MICROCODEBASE 0x400 /* Microcode data base address */
+/* The public header defines the GPR and TRAM base addresses that
+ * are valid for _both_ CPU and DSP addressing. */
/* Each DSP microcode instruction is mapped into 2 doublewords */
/* NOTE: When writing, always write the LO doubleword first. Reads can be in either order. */
-#define LOWORD_OPX_MASK 0x000ffc00 /* Instruction operand X */
-#define LOWORD_OPY_MASK 0x000003ff /* Instruction operand Y */
-#define HIWORD_OPCODE_MASK 0x00f00000 /* Instruction opcode */
-#define HIWORD_RESULT_MASK 0x000ffc00 /* Instruction result */
-#define HIWORD_OPA_MASK 0x000003ff /* Instruction operand A */
+#define MICROCODEBASE 0x400 /* Microcode data base address */
+#define A_MICROCODEBASE 0x600
-/* Audigy Soundcard have a different instruction format */
-#define A_MICROCODEBASE 0x600
-#define A_LOWORD_OPY_MASK 0x000007ff
-#define A_LOWORD_OPX_MASK 0x007ff000
-#define A_HIWORD_OPCODE_MASK 0x0f000000
-#define A_HIWORD_RESULT_MASK 0x007ff000
-#define A_HIWORD_OPA_MASK 0x000007ff
+/************************************************************************************************/
+/* E-MU Digital Audio System overview */
+/************************************************************************************************/
+
+// - These cards use a regular PCI-attached Audigy chip (Alice2/Tina/Tina2);
+// the PCIe variants simply put the Audigy chip behind a PCI bridge.
+// - All physical PCM I/O is routed through an additional FPGA; the regular
+// EXTIN/EXTOUT ports are unconnected.
+// - The FPGA has a signal routing matrix, to connect each destination (output
+// socket or capture channel) to a source (input socket or playback channel).
+// - The FPGA is controlled via Audigy's GPIO port, while sample data is
+// transmitted via proprietary EMU32 serial links. On first-generation
+// E-MU 1010 cards, Audigy's I2S inputs are also used for sample data.
+// - The Audio/Micro Dock is attached to Hana via EDI, a "network" link.
+// - The Audigy chip operates in slave mode; the clock is supplied by the FPGA.
+// Gen1 E-MU 1010 cards have two crystals (for 44.1 kHz and 48 kHz multiples),
+// while the later cards use a single crystal and a PLL chip.
+// - The whole card is switched to 2x/4x mode to achieve 88.2/96/176.4/192 kHz
+// sample rates. Alice2/Tina keeps running at 44.1/48 kHz, but multiple channels
+// are bundled.
+// - The number of available EMU32/EDI channels is hit in 2x/4x mode, so the total
+// number of usable inputs/outputs is limited, esp. with ADAT in use.
+// - S/PDIF is unavailable in 4x mode (only over TOSLINK on newer 1010 cards) due
+// to being unspecified at 176.4/192 kHz. Therefore, the Dock's S/PDIF channels
+// can overlap with the Dock's ADC/DAC's high channels.
+// - The code names are mentioned below and in the emu_chip_details table.
/************************************************************************************************/
-/* EMU1010m HANA FPGA registers */
+/* EMU1010 FPGA registers */
/************************************************************************************************/
+
#define EMU_HANA_DESTHI 0x00 /* 0000xxx 3 bits Link Destination */
#define EMU_HANA_DESTLO 0x01 /* 00xxxxx 5 bits */
+
#define EMU_HANA_SRCHI 0x02 /* 0000xxx 3 bits Link Source */
#define EMU_HANA_SRCLO 0x03 /* 00xxxxx 5 bits */
+
#define EMU_HANA_DOCK_PWR 0x04 /* 000000x 1 bits Audio Dock power */
#define EMU_HANA_DOCK_PWR_ON 0x01 /* Audio Dock power on */
+
#define EMU_HANA_WCLOCK 0x05 /* 0000xxx 3 bits Word Clock source select */
/* Must be written after power on to reset DLL */
/* One is unable to detect the Audio dock without this */
@@ -967,7 +985,7 @@
#define EMU_HANA_WCLOCK_INT_44_1K 0x01
#define EMU_HANA_WCLOCK_HANA_SPDIF_IN 0x02
#define EMU_HANA_WCLOCK_HANA_ADAT_IN 0x03
-#define EMU_HANA_WCLOCK_SYNC_BNCN 0x04
+#define EMU_HANA_WCLOCK_SYNC_BNC 0x04
#define EMU_HANA_WCLOCK_2ND_HANA 0x05
#define EMU_HANA_WCLOCK_SRC_RESERVED 0x06
#define EMU_HANA_WCLOCK_OFF 0x07 /* For testing, forces fallback to DEFCLOCK */
@@ -977,6 +995,9 @@
#define EMU_HANA_WCLOCK_4X 0x10
#define EMU_HANA_WCLOCK_MULT_RESERVED 0x18
+// If the selected external clock source is/becomes invalid or incompatible
+// with the clock multiplier, the clock source is reset to this value, and
+// a WCLK_CHANGED interrupt is raised.
#define EMU_HANA_DEFCLOCK 0x06 /* 000000x 1 bits Default Word Clock */
#define EMU_HANA_DEFCLOCK_48K 0x00
#define EMU_HANA_DEFCLOCK_44_1K 0x01
@@ -996,10 +1017,10 @@
#define EMU_HANA_IRQ_DOCK_LOST 0x08
#define EMU_HANA_SPDIF_MODE 0x0a /* 00xxxxx 5 bits SPDIF MODE */
-#define EMU_HANA_SPDIF_MODE_TX_COMSUMER 0x00
+#define EMU_HANA_SPDIF_MODE_TX_CONSUMER 0x00
#define EMU_HANA_SPDIF_MODE_TX_PRO 0x01
#define EMU_HANA_SPDIF_MODE_TX_NOCOPY 0x02
-#define EMU_HANA_SPDIF_MODE_RX_COMSUMER 0x00
+#define EMU_HANA_SPDIF_MODE_RX_CONSUMER 0x00
#define EMU_HANA_SPDIF_MODE_RX_PRO 0x04
#define EMU_HANA_SPDIF_MODE_RX_NOCOPY 0x08
#define EMU_HANA_SPDIF_MODE_RX_INVALID 0x10
@@ -1011,8 +1032,12 @@
#define EMU_HANA_OPTICAL_OUT_ADAT 0x02
#define EMU_HANA_MIDI_IN 0x0c /* 000000x 1 bit Control MIDI */
-#define EMU_HANA_MIDI_IN_FROM_HAMOA 0x00 /* HAMOA MIDI in to Alice 2 MIDI B */
-#define EMU_HANA_MIDI_IN_FROM_DOCK 0x01 /* Audio Dock MIDI in to Alice 2 MIDI B */
+#define EMU_HANA_MIDI_INA_FROM_HAMOA 0x01 /* HAMOA MIDI in to Alice 2 MIDI A */
+#define EMU_HANA_MIDI_INA_FROM_DOCK1 0x02 /* Audio Dock-1 MIDI in to Alice 2 MIDI A */
+#define EMU_HANA_MIDI_INA_FROM_DOCK2 0x03 /* Audio Dock-2 MIDI in to Alice 2 MIDI A */
+#define EMU_HANA_MIDI_INB_FROM_HAMOA 0x08 /* HAMOA MIDI in to Alice 2 MIDI B */
+#define EMU_HANA_MIDI_INB_FROM_DOCK1 0x10 /* Audio Dock-1 MIDI in to Alice 2 MIDI B */
+#define EMU_HANA_MIDI_INB_FROM_DOCK2 0x18 /* Audio Dock-2 MIDI in to Alice 2 MIDI B */
#define EMU_HANA_DOCK_LEDS_1 0x0d /* 000xxxx 4 bit Audio Dock LEDs */
#define EMU_HANA_DOCK_LEDS_1_MIDI1 0x01 /* MIDI 1 LED on */
@@ -1037,51 +1062,49 @@
#define EMU_HANA_DOCK_LEDS_3_MANUAL_SIGNAL 0x20 /* Manual Signal detection */
#define EMU_HANA_ADC_PADS 0x10 /* 0000xxx 3 bit Audio Dock ADC 14dB pads */
-#define EMU_HANA_DOCK_ADC_PAD1 0x01 /* 14dB Attenuation on Audio Dock ADC 1 */
-#define EMU_HANA_DOCK_ADC_PAD2 0x02 /* 14dB Attenuation on Audio Dock ADC 2 */
-#define EMU_HANA_DOCK_ADC_PAD3 0x04 /* 14dB Attenuation on Audio Dock ADC 3 */
-#define EMU_HANA_0202_ADC_PAD1 0x08 /* 14dB Attenuation on 0202 ADC 1 */
+#define EMU_HANA_DOCK_ADC_PAD1 0x01 /* 14dB Attenuation on Audio Dock ADC 1 */
+#define EMU_HANA_DOCK_ADC_PAD2 0x02 /* 14dB Attenuation on Audio Dock ADC 2 */
+#define EMU_HANA_DOCK_ADC_PAD3 0x04 /* 14dB Attenuation on Audio Dock ADC 3 */
+#define EMU_HANA_0202_ADC_PAD1 0x08 /* 14dB Attenuation on 0202 ADC 1 */
#define EMU_HANA_DOCK_MISC 0x11 /* 0xxxxxx 6 bit Audio Dock misc bits */
-#define EMU_HANA_DOCK_DAC1_MUTE 0x01 /* DAC 1 Mute */
-#define EMU_HANA_DOCK_DAC2_MUTE 0x02 /* DAC 2 Mute */
-#define EMU_HANA_DOCK_DAC3_MUTE 0x04 /* DAC 3 Mute */
-#define EMU_HANA_DOCK_DAC4_MUTE 0x08 /* DAC 4 Mute */
+#define EMU_HANA_DOCK_DAC1_MUTE 0x01 /* DAC 1 Mute */
+#define EMU_HANA_DOCK_DAC2_MUTE 0x02 /* DAC 2 Mute */
+#define EMU_HANA_DOCK_DAC3_MUTE 0x04 /* DAC 3 Mute */
+#define EMU_HANA_DOCK_DAC4_MUTE 0x08 /* DAC 4 Mute */
#define EMU_HANA_DOCK_PHONES_192_DAC1 0x00 /* DAC 1 Headphones source at 192kHz */
#define EMU_HANA_DOCK_PHONES_192_DAC2 0x10 /* DAC 2 Headphones source at 192kHz */
#define EMU_HANA_DOCK_PHONES_192_DAC3 0x20 /* DAC 3 Headphones source at 192kHz */
#define EMU_HANA_DOCK_PHONES_192_DAC4 0x30 /* DAC 4 Headphones source at 192kHz */
#define EMU_HANA_MIDI_OUT 0x12 /* 00xxxxx 5 bit Source for each MIDI out port */
-#define EMU_HANA_MIDI_OUT_0202 0x01 /* 0202 MIDI from Alice 2. 0 = A, 1 = B */
-#define EMU_HANA_MIDI_OUT_DOCK1 0x02 /* Audio Dock MIDI1 front, from Alice 2. 0 = A, 1 = B */
-#define EMU_HANA_MIDI_OUT_DOCK2 0x04 /* Audio Dock MIDI2 rear, from Alice 2. 0 = A, 1 = B */
-#define EMU_HANA_MIDI_OUT_SYNC2 0x08 /* Sync card. Not the actual MIDI out jack. 0 = A, 1 = B */
-#define EMU_HANA_MIDI_OUT_LOOP 0x10 /* 0 = bits (3:0) normal. 1 = MIDI loopback enabled. */
+#define EMU_HANA_MIDI_OUT_0202 0x01 /* 0202 MIDI from Alice 2. 0 = A, 1 = B */
+#define EMU_HANA_MIDI_OUT_DOCK1 0x02 /* Audio Dock MIDI1 front, from Alice 2. 0 = A, 1 = B */
+#define EMU_HANA_MIDI_OUT_DOCK2 0x04 /* Audio Dock MIDI2 rear, from Alice 2. 0 = A, 1 = B */
+#define EMU_HANA_MIDI_OUT_SYNC2 0x08 /* Sync card. Not the actual MIDI out jack. 0 = A, 1 = B */
+#define EMU_HANA_MIDI_OUT_LOOP 0x10 /* 0 = bits (3:0) normal. 1 = MIDI loopback enabled. */
#define EMU_HANA_DAC_PADS 0x13 /* 00xxxxx 5 bit DAC 14dB attenuation pads */
-#define EMU_HANA_DOCK_DAC_PAD1 0x01 /* 14dB Attenuation on AudioDock DAC 1. Left and Right */
-#define EMU_HANA_DOCK_DAC_PAD2 0x02 /* 14dB Attenuation on AudioDock DAC 2. Left and Right */
-#define EMU_HANA_DOCK_DAC_PAD3 0x04 /* 14dB Attenuation on AudioDock DAC 3. Left and Right */
-#define EMU_HANA_DOCK_DAC_PAD4 0x08 /* 14dB Attenuation on AudioDock DAC 4. Left and Right */
-#define EMU_HANA_0202_DAC_PAD1 0x10 /* 14dB Attenuation on 0202 DAC 1. Left and Right */
+#define EMU_HANA_DOCK_DAC_PAD1 0x01 /* 14dB Attenuation on AudioDock DAC 1. Left and Right */
+#define EMU_HANA_DOCK_DAC_PAD2 0x02 /* 14dB Attenuation on AudioDock DAC 2. Left and Right */
+#define EMU_HANA_DOCK_DAC_PAD3 0x04 /* 14dB Attenuation on AudioDock DAC 3. Left and Right */
+#define EMU_HANA_DOCK_DAC_PAD4 0x08 /* 14dB Attenuation on AudioDock DAC 4. Left and Right */
+#define EMU_HANA_0202_DAC_PAD1 0x10 /* 14dB Attenuation on 0202 DAC 1. Left and Right */
/* 0x14 - 0x1f Unused R/W registers */
-#define EMU_HANA_IRQ_STATUS 0x20 /* 000xxxx 4 bits IRQ Status */
-#if 0 /* Already defined for reg 0x09 IRQ_ENABLE */
-#define EMU_HANA_IRQ_WCLK_CHANGED 0x01
-#define EMU_HANA_IRQ_ADAT 0x02
-#define EMU_HANA_IRQ_DOCK 0x04
-#define EMU_HANA_IRQ_DOCK_LOST 0x08
-#endif
+
+#define EMU_HANA_IRQ_STATUS 0x20 /* 00xxxxx 5 bits IRQ Status */
+ /* Same bits as for EMU_HANA_IRQ_ENABLE */
+ /* Reading the register resets it. */
#define EMU_HANA_OPTION_CARDS 0x21 /* 000xxxx 4 bits Presence of option cards */
-#define EMU_HANA_OPTION_HAMOA 0x01 /* HAMOA card present */
-#define EMU_HANA_OPTION_SYNC 0x02 /* Sync card present */
-#define EMU_HANA_OPTION_DOCK_ONLINE 0x04 /* Audio Dock online and FPGA configured */
-#define EMU_HANA_OPTION_DOCK_OFFLINE 0x08 /* Audio Dock online and FPGA not configured */
+#define EMU_HANA_OPTION_HAMOA 0x01 /* Hamoa (analog I/O) card present */
+#define EMU_HANA_OPTION_SYNC 0x02 /* Sync card present */
+#define EMU_HANA_OPTION_DOCK_ONLINE 0x04 /* Audio/Micro dock present and FPGA configured */
+#define EMU_HANA_OPTION_DOCK_OFFLINE 0x08 /* Audio/Micro dock present and FPGA not configured */
-#define EMU_HANA_ID 0x22 /* 1010101 7 bits ID byte & 0x7f = 0x55 */
+#define EMU_HANA_ID 0x22 /* 1010101 7 bits ID byte & 0x7f = 0x55 with Alice2 */
+ /* 0010101 5 bits ID byte & 0x1f = 0x15 with Tina/2 */
#define EMU_HANA_MAJOR_REV 0x23 /* 0000xxx 3 bit Hana FPGA Major rev */
#define EMU_HANA_MINOR_REV 0x24 /* 0000xxx 3 bit Hana FPGA Minor rev */
@@ -1090,8 +1113,11 @@
#define EMU_DOCK_MINOR_REV 0x26 /* 0000xxx 3 bit Audio Dock FPGA Minor rev */
#define EMU_DOCK_BOARD_ID 0x27 /* 00000xx 2 bits Audio Dock ID pins */
-#define EMU_DOCK_BOARD_ID0 0x00 /* ID bit 0 */
-#define EMU_DOCK_BOARD_ID1 0x03 /* ID bit 1 */
+#define EMU_DOCK_BOARD_ID0 0x00 /* ID bit 0 */
+#define EMU_DOCK_BOARD_ID1 0x03 /* ID bit 1 */
+
+// The actual code disagrees about the bit width of the registers -
+// the formula used is freq = 0x1770000 / (((X_HI << 5) | X_LO) + 1)
#define EMU_HANA_WC_SPDIF_HI 0x28 /* 0xxxxxx 6 bit SPDIF IN Word clock, upper 6 bits */
#define EMU_HANA_WC_SPDIF_LO 0x29 /* 0xxxxxx 6 bit SPDIF IN Word clock, lower 6 bits */
@@ -1104,31 +1130,35 @@
#define EMU_HANA2_WC_SPDIF_HI 0x2e /* 0xxxxxx 6 bit HANA2 SPDIF IN Word clock, upper 6 bits */
#define EMU_HANA2_WC_SPDIF_LO 0x2f /* 0xxxxxx 6 bit HANA2 SPDIF IN Word clock, lower 6 bits */
+
/* 0x30 - 0x3f Unused Read only registers */
+// The meaning of this is not clear; kX-project just calls it "lock" in some info-only code.
+#define EMU_HANA_LOCK_STS_LO 0x38 /* 0xxxxxx lower 6 bits */
+#define EMU_HANA_LOCK_STS_HI 0x39 /* 0xxxxxx upper 6 bits */
+
/************************************************************************************************/
-/* EMU1010m HANA Destinations */
+/* EMU1010 Audio Destinations */
/************************************************************************************************/
-/* Hana, original 1010,1212,1820 using Alice2
- * Destiniations for SRATEX = 1X rates: 44.1 kHz or 48 kHz
+/* Hana, original 1010,1212m,1820[m] using Alice2
* 0x00, 0x00-0x0f: 16 EMU32 channels to Alice2
- * 0x01, 0x10-0x1f: 32 Elink channels to Audio Dock
- * 0x01, 0x00: Dock DAC 1 Left
- * 0x01, 0x04: Dock DAC 1 Right
- * 0x01, 0x08: Dock DAC 2 Left
- * 0x01, 0x0c: Dock DAC 2 Right
- * 0x01, 0x10: Dock DAC 3 Left
- * 0x01, 0x12: PHONES Left
- * 0x01, 0x14: Dock DAC 3 Right
- * 0x01, 0x16: PHONES Right
- * 0x01, 0x18: Dock DAC 4 Left
- * 0x01, 0x1a: S/PDIF Left
- * 0x01, 0x1c: Dock DAC 4 Right
- * 0x01, 0x1e: S/PDIF Right
+ * 0x01, 0x00-0x1f: 32 EDI channels to Audio Dock
+ * 0x00: Dock DAC 1 Left
+ * 0x04: Dock DAC 1 Right
+ * 0x08: Dock DAC 2 Left
+ * 0x0c: Dock DAC 2 Right
+ * 0x10: Dock DAC 3 Left
+ * 0x12: PHONES Left (n/a in 2x/4x mode; output mirrors DAC4 Left)
+ * 0x14: Dock DAC 3 Right
+ * 0x16: PHONES Right (n/a in 2x/4x mode; output mirrors DAC4 Right)
+ * 0x18: Dock DAC 4 Left
+ * 0x1a: S/PDIF Left
+ * 0x1c: Dock DAC 4 Right
+ * 0x1e: S/PDIF Right
* 0x02, 0x00: Hana S/PDIF Left
* 0x02, 0x01: Hana S/PDIF Right
- * 0x03, 0x00: Hanoa DAC Left
- * 0x03, 0x01: Hanoa DAC Right
+ * 0x03, 0x00: Hamoa DAC Left
+ * 0x03, 0x01: Hamoa DAC Right
* 0x04, 0x00-0x07: Hana ADAT
* 0x05, 0x00: I2S0 Left to Alice2
* 0x05, 0x01: I2S0 Right to Alice2
@@ -1140,40 +1170,29 @@
* Hana2 never released, but used Tina
* Not needed.
*
- * Hana3, rev2 1010,1212,1616 using Tina
- * Destinations for SRATEX = 1X rates: 44.1 kHz or 48 kHz
+ * Hana3, rev2 1010,1212m,1616[m] using Tina
* 0x00, 0x00-0x0f: 16 EMU32A channels to Tina
- * 0x01, 0x10-0x1f: 32 EDI channels to Micro Dock
- * 0x01, 0x00: Dock DAC 1 Left
- * 0x01, 0x04: Dock DAC 1 Right
- * 0x01, 0x08: Dock DAC 2 Left
- * 0x01, 0x0c: Dock DAC 2 Right
- * 0x01, 0x10: Dock DAC 3 Left
- * 0x01, 0x12: Dock S/PDIF Left
- * 0x01, 0x14: Dock DAC 3 Right
- * 0x01, 0x16: Dock S/PDIF Right
- * 0x01, 0x18-0x1f: Dock ADAT 0-7
+ * 0x01, 0x00-0x1f: 32 EDI channels to Micro Dock
+ * 0x00: Dock DAC 1 Left
+ * 0x04: Dock DAC 1 Right
+ * 0x08: Dock DAC 2 Left
+ * 0x0c: Dock DAC 2 Right
+ * 0x10: Dock DAC 3 Left
+ * 0x12: Dock S/PDIF Left
+ * 0x14: Dock DAC 3 Right
+ * 0x16: Dock S/PDIF Right
+ * 0x18-0x1f: Dock ADAT 0-7
* 0x02, 0x00: Hana3 S/PDIF Left
* 0x02, 0x01: Hana3 S/PDIF Right
- * 0x03, 0x00: Hanoa DAC Left
- * 0x03, 0x01: Hanoa DAC Right
+ * 0x03, 0x00: Hamoa DAC Left
+ * 0x03, 0x01: Hamoa DAC Right
* 0x04, 0x00-0x07: Hana3 ADAT 0-7
* 0x05, 0x00-0x0f: 16 EMU32B channels to Tina
* 0x06-0x07: Not used
*
* HanaLite, rev1 0404 using Alice2
- * Destiniations for SRATEX = 1X rates: 44.1 kHz or 48 kHz
- * 0x00, 0x00-0x0f: 16 EMU32 channels to Alice2
- * 0x01: Not used
- * 0x02, 0x00: S/PDIF Left
- * 0x02, 0x01: S/PDIF Right
- * 0x03, 0x00: DAC Left
- * 0x03, 0x01: DAC Right
- * 0x04-0x07: Not used
- *
- * HanaLiteLite, rev2 0404 using Alice2
- * Destiniations for SRATEX = 1X rates: 44.1 kHz or 48 kHz
- * 0x00, 0x00-0x0f: 16 EMU32 channels to Alice2
+ * HanaLiteLite, rev2 0404 using Tina
+ * 0x00, 0x00-0x0f: 16 EMU32 channels to Alice2/Tina
* 0x01: Not used
* 0x02, 0x00: S/PDIF Left
* 0x02, 0x01: S/PDIF Right
@@ -1182,37 +1201,24 @@
* 0x04-0x07: Not used
*
* Mana, Cardbus 1616 using Tina2
- * Destinations for SRATEX = 1X rates: 44.1 kHz or 48 kHz
* 0x00, 0x00-0x0f: 16 EMU32A channels to Tina2
- * 0x01, 0x10-0x1f: 32 EDI channels to Micro Dock
- * 0x01, 0x00: Dock DAC 1 Left
- * 0x01, 0x04: Dock DAC 1 Right
- * 0x01, 0x08: Dock DAC 2 Left
- * 0x01, 0x0c: Dock DAC 2 Right
- * 0x01, 0x10: Dock DAC 3 Left
- * 0x01, 0x12: Dock S/PDIF Left
- * 0x01, 0x14: Dock DAC 3 Right
- * 0x01, 0x16: Dock S/PDIF Right
- * 0x01, 0x18-0x1f: Dock ADAT 0-7
+ * 0x01, 0x00-0x1f: 32 EDI channels to Micro Dock
+ * (same as rev2 1010)
* 0x02: Not used
* 0x03, 0x00: Mana DAC Left
* 0x03, 0x01: Mana DAC Right
* 0x04, 0x00-0x0f: 16 EMU32B channels to Tina2
* 0x05-0x07: Not used
- *
- *
*/
+
/* 32-bit destinations of signal in the Hana FPGA. Destinations are either
- * physical outputs of Hana, or outputs going to Alice2 (audigy) for capture
- * - 16 x EMU_DST_ALICE2_EMU32_X.
- */
-/* EMU32 = 32-bit serial channel between Alice2 (audigy) and Hana (FPGA) */
-/* EMU_DST_ALICE2_EMU32_X - data channels from Hana to Alice2 used for capture.
- * Which data is fed into a EMU_DST_ALICE2_EMU32_X channel in Hana depends on
- * setup of mixer control for each destination - see emumixer.c -
- * snd_emu1010_output_enum_ctls[], snd_emu1010_input_enum_ctls[]
+ * physical outputs of Hana, or outputs going to Alice2/Tina for capture -
+ * 16 x EMU_DST_ALICE2_EMU32_X (2x on rev2 boards). Which data is fed into
+ * a channel depends on the mixer control setting for each destination - see
+ * the register arrays in emumixer.c.
*/
#define EMU_DST_ALICE2_EMU32_0 0x000f /* 16 EMU32 channels to Alice2 +0 to +0xf */
+ /* This channel is delayed by one sample. */
#define EMU_DST_ALICE2_EMU32_1 0x0000 /* 16 EMU32 channels to Alice2 +0 to +0xf */
#define EMU_DST_ALICE2_EMU32_2 0x0001 /* 16 EMU32 channels to Alice2 +0 to +0xf */
#define EMU_DST_ALICE2_EMU32_3 0x0002 /* 16 EMU32 channels to Alice2 +0 to +0xf */
@@ -1270,8 +1276,12 @@
#define EMU_DST_DOCK_SPDIF_RIGHT2 0x011f /* Audio Dock SPDIF Right, 2nd or 96kHz */
#define EMU_DST_HANA_SPDIF_LEFT1 0x0200 /* Hana SPDIF Left, 1st or 48kHz only */
#define EMU_DST_HANA_SPDIF_LEFT2 0x0202 /* Hana SPDIF Left, 2nd or 96kHz */
+#define EMU_DST_HANA_SPDIF_LEFT3 0x0204 /* Hana SPDIF Left, 3rd or 192kHz */
+#define EMU_DST_HANA_SPDIF_LEFT4 0x0206 /* Hana SPDIF Left, 4th or 192kHz */
#define EMU_DST_HANA_SPDIF_RIGHT1 0x0201 /* Hana SPDIF Right, 1st or 48kHz only */
#define EMU_DST_HANA_SPDIF_RIGHT2 0x0203 /* Hana SPDIF Right, 2nd or 96kHz */
+#define EMU_DST_HANA_SPDIF_RIGHT3 0x0205 /* Hana SPDIF Right, 3rd or 192kHz */
+#define EMU_DST_HANA_SPDIF_RIGHT4 0x0207 /* Hana SPDIF Right, 4th or 192kHz */
#define EMU_DST_HAMOA_DAC_LEFT1 0x0300 /* Hamoa DAC Left, 1st or 48kHz only */
#define EMU_DST_HAMOA_DAC_LEFT2 0x0302 /* Hamoa DAC Left, 2nd or 96kHz */
#define EMU_DST_HAMOA_DAC_LEFT3 0x0304 /* Hamoa DAC Left, 3rd or 192kHz */
@@ -1280,6 +1290,7 @@
#define EMU_DST_HAMOA_DAC_RIGHT2 0x0303 /* Hamoa DAC Right, 2nd or 96kHz */
#define EMU_DST_HAMOA_DAC_RIGHT3 0x0305 /* Hamoa DAC Right, 3rd or 192kHz */
#define EMU_DST_HAMOA_DAC_RIGHT4 0x0307 /* Hamoa DAC Right, 4th or 192kHz */
+// In S/MUX mode, the samples of one channel are adjacent.
#define EMU_DST_HANA_ADAT 0x0400 /* Hana ADAT 8 channel out +0 to +7 */
#define EMU_DST_ALICE_I2S0_LEFT 0x0500 /* Alice2 I2S0 Left */
#define EMU_DST_ALICE_I2S0_RIGHT 0x0501 /* Alice2 I2S0 Right */
@@ -1289,39 +1300,32 @@
#define EMU_DST_ALICE_I2S2_RIGHT 0x0701 /* Alice2 I2S2 Right */
/* Additional destinations for 1616(M)/Microdock */
-/* Microdock S/PDIF OUT Left, 1st or 48kHz only */
-#define EMU_DST_MDOCK_SPDIF_LEFT1 0x0112
-/* Microdock S/PDIF OUT Left, 2nd or 96kHz */
-#define EMU_DST_MDOCK_SPDIF_LEFT2 0x0113
-/* Microdock S/PDIF OUT Right, 1st or 48kHz only */
-#define EMU_DST_MDOCK_SPDIF_RIGHT1 0x0116
-/* Microdock S/PDIF OUT Right, 2nd or 96kHz */
-#define EMU_DST_MDOCK_SPDIF_RIGHT2 0x0117
-/* Microdock S/PDIF ADAT 8 channel out +8 to +f */
-#define EMU_DST_MDOCK_ADAT 0x0118
-
-/* Headphone jack on 1010 cardbus? 44.1/48kHz only? */
-#define EMU_DST_MANA_DAC_LEFT 0x0300
-/* Headphone jack on 1010 cardbus? 44.1/48kHz only? */
-#define EMU_DST_MANA_DAC_RIGHT 0x0301
+
+#define EMU_DST_MDOCK_SPDIF_LEFT1 0x0112 /* Microdock S/PDIF OUT Left, 1st or 48kHz only */
+#define EMU_DST_MDOCK_SPDIF_LEFT2 0x0113 /* Microdock S/PDIF OUT Left, 2nd or 96kHz */
+#define EMU_DST_MDOCK_SPDIF_RIGHT1 0x0116 /* Microdock S/PDIF OUT Right, 1st or 48kHz only */
+#define EMU_DST_MDOCK_SPDIF_RIGHT2 0x0117 /* Microdock S/PDIF OUT Right, 2nd or 96kHz */
+#define EMU_DST_MDOCK_ADAT 0x0118 /* Microdock S/PDIF ADAT 8 channel out +8 to +f */
+
+#define EMU_DST_MANA_DAC_LEFT 0x0300 /* Headphone jack on 1010 cardbus? 44.1/48kHz only? */
+#define EMU_DST_MANA_DAC_RIGHT 0x0301 /* Headphone jack on 1010 cardbus? 44.1/48kHz only? */
/************************************************************************************************/
-/* EMU1010m HANA Sources */
+/* EMU1010 Audio Sources */
/************************************************************************************************/
-/* Hana, original 1010,1212,1820 using Alice2
- * Sources SRATEX = 1X rates: 44.1 kHz or 48 kHz
- * 0x00,0x00-0x1f: Silence
- * 0x01, 0x10-0x1f: 32 Elink channels from Audio Dock
- * 0x01, 0x00: Dock Mic A
- * 0x01, 0x04: Dock Mic B
- * 0x01, 0x08: Dock ADC 1 Left
- * 0x01, 0x0c: Dock ADC 1 Right
- * 0x01, 0x10: Dock ADC 2 Left
- * 0x01, 0x14: Dock ADC 2 Right
- * 0x01, 0x18: Dock ADC 3 Left
- * 0x01, 0x1c: Dock ADC 3 Right
- * 0x02, 0x00: Hana ADC Left
- * 0x02, 0x01: Hana ADC Right
+/* Hana, original 1010,1212m,1820[m] using Alice2
+ * 0x00, 0x00-0x1f: Silence
+ * 0x01, 0x00-0x1f: 32 EDI channels from Audio Dock
+ * 0x00: Dock Mic A
+ * 0x04: Dock Mic B
+ * 0x08: Dock ADC 1 Left
+ * 0x0c: Dock ADC 1 Right
+ * 0x10: Dock ADC 2 Left
+ * 0x14: Dock ADC 2 Right
+ * 0x18: Dock ADC 3 Left
+ * 0x1c: Dock ADC 3 Right
+ * 0x02, 0x00: Hamoa ADC Left
+ * 0x02, 0x01: Hamoa ADC Right
* 0x03, 0x00-0x0f: 16 inputs from Alice2 Emu32A output
* 0x03, 0x10-0x1f: 16 inputs from Alice2 Emu32B output
* 0x04, 0x00-0x07: Hana ADAT
@@ -1332,23 +1336,20 @@
* Hana2 never released, but used Tina
* Not needed.
*
- * Hana3, rev2 1010,1212,1616 using Tina
- * Sources SRATEX = 1X rates: 44.1 kHz or 48 kHz
- * 0x00,0x00-0x1f: Silence
- * 0x01, 0x10-0x1f: 32 Elink channels from Audio Dock
- * 0x01, 0x00: Dock Mic A
- * 0x01, 0x04: Dock Mic B
- * 0x01, 0x08: Dock ADC 1 Left
- * 0x01, 0x0c: Dock ADC 1 Right
- * 0x01, 0x10: Dock ADC 2 Left
- * 0x01, 0x12: Dock S/PDIF Left
- * 0x01, 0x14: Dock ADC 2 Right
- * 0x01, 0x16: Dock S/PDIF Right
- * 0x01, 0x18-0x1f: Dock ADAT 0-7
- * 0x01, 0x18: Dock ADC 3 Left
- * 0x01, 0x1c: Dock ADC 3 Right
- * 0x02, 0x00: Hanoa ADC Left
- * 0x02, 0x01: Hanoa ADC Right
+ * Hana3, rev2 1010,1212m,1616[m] using Tina
+ * 0x00, 0x00-0x1f: Silence
+ * 0x01, 0x00-0x1f: 32 EDI channels from Micro Dock
+ * 0x00: Dock Mic A
+ * 0x04: Dock Mic B
+ * 0x08: Dock ADC 1 Left
+ * 0x0c: Dock ADC 1 Right
+ * 0x10: Dock ADC 2 Left
+ * 0x12: Dock S/PDIF Left
+ * 0x14: Dock ADC 2 Right
+ * 0x16: Dock S/PDIF Right
+ * 0x18-0x1f: Dock ADAT 0-7
+ * 0x02, 0x00: Hamoa ADC Left
+ * 0x02, 0x01: Hamoa ADC Right
* 0x03, 0x00-0x0f: 16 inputs from Tina Emu32A output
* 0x03, 0x10-0x1f: 16 inputs from Tina Emu32B output
* 0x04, 0x00-0x07: Hana3 ADAT
@@ -1357,58 +1358,32 @@
* 0x06-0x07: Not used
*
* HanaLite, rev1 0404 using Alice2
- * Sources SRATEX = 1X rates: 44.1 kHz or 48 kHz
- * 0x00,0x00-0x1f: Silence
+ * HanaLiteLite, rev2 0404 using Tina
+ * 0x00, 0x00-0x1f: Silence
* 0x01: Not used
* 0x02, 0x00: ADC Left
* 0x02, 0x01: ADC Right
- * 0x03, 0x00-0x0f: 16 inputs from Alice2 Emu32A output
- * 0x03, 0x10-0x1f: 16 inputs from Alice2 Emu32B output
- * 0x04: Not used
- * 0x05, 0x00: S/PDIF Left
- * 0x05, 0x01: S/PDIF Right
- * 0x06-0x07: Not used
- *
- * HanaLiteLite, rev2 0404 using Alice2
- * Sources SRATEX = 1X rates: 44.1 kHz or 48 kHz
- * 0x00,0x00-0x1f: Silence
- * 0x01: Not used
- * 0x02, 0x00: ADC Left
- * 0x02, 0x01: ADC Right
- * 0x03, 0x00-0x0f: 16 inputs from Alice2 Emu32A output
- * 0x03, 0x10-0x1f: 16 inputs from Alice2 Emu32B output
+ * 0x03, 0x00-0x0f: 16 inputs from Alice2/Tina Emu32A output
+ * 0x03, 0x10-0x1f: 16 inputs from Alice2/Tina Emu32B output
* 0x04: Not used
* 0x05, 0x00: S/PDIF Left
* 0x05, 0x01: S/PDIF Right
* 0x06-0x07: Not used
*
* Mana, Cardbus 1616 using Tina2
- * Sources SRATEX = 1X rates: 44.1 kHz or 48 kHz
- * 0x00,0x00-0x1f: Silence
- * 0x01, 0x10-0x1f: 32 Elink channels from Audio Dock
- * 0x01, 0x00: Dock Mic A
- * 0x01, 0x04: Dock Mic B
- * 0x01, 0x08: Dock ADC 1 Left
- * 0x01, 0x0c: Dock ADC 1 Right
- * 0x01, 0x10: Dock ADC 2 Left
- * 0x01, 0x12: Dock S/PDIF Left
- * 0x01, 0x14: Dock ADC 2 Right
- * 0x01, 0x16: Dock S/PDIF Right
- * 0x01, 0x18-0x1f: Dock ADAT 0-7
- * 0x01, 0x18: Dock ADC 3 Left
- * 0x01, 0x1c: Dock ADC 3 Right
+ * 0x00, 0x00-0x1f: Silence
+ * 0x01, 0x00-0x1f: 32 EDI channels from Micro Dock
+ * (same as rev2 1010)
* 0x02: Not used
- * 0x03, 0x00-0x0f: 16 inputs from Tina Emu32A output
- * 0x03, 0x10-0x1f: 16 inputs from Tina Emu32B output
+ * 0x03, 0x00-0x0f: 16 inputs from Tina2 Emu32A output
+ * 0x03, 0x10-0x1f: 16 inputs from Tina2 Emu32B output
* 0x04-0x07: Not used
- *
*/
/* 32-bit sources of signal in the Hana FPGA. The sources are routed to
- * destinations using mixer control for each destination - see emumixer.c
- * Sources are either physical inputs of FPGA,
- * or outputs from Alice (audigy) - 16 x EMU_SRC_ALICE_EMU32A +
- * 16 x EMU_SRC_ALICE_EMU32B
+ * destinations using a mixer control for each destination - see emumixer.c.
+ * Sources are either physical inputs of Hana, or inputs from Alice2/Tina -
+ * 16 x EMU_SRC_ALICE_EMU32A + 16 x EMU_SRC_ALICE_EMU32B.
*/
#define EMU_SRC_SILENCE 0x0000 /* Silence */
#define EMU_SRC_DOCK_MIC_A1 0x0100 /* Audio Dock Mic A, 1st or 48kHz only */
@@ -1453,45 +1428,56 @@
#define EMU_SRC_HAMOA_ADC_RIGHT4 0x0207 /* Hamoa ADC Right, 4th or 192kHz */
#define EMU_SRC_ALICE_EMU32A 0x0300 /* Alice2 EMU32a 16 outputs. +0 to +0xf */
#define EMU_SRC_ALICE_EMU32B 0x0310 /* Alice2 EMU32b 16 outputs. +0 to +0xf */
+// In S/MUX mode, the samples of one channel are adjacent.
#define EMU_SRC_HANA_ADAT 0x0400 /* Hana ADAT 8 channel in +0 to +7 */
#define EMU_SRC_HANA_SPDIF_LEFT1 0x0500 /* Hana SPDIF Left, 1st or 48kHz only */
#define EMU_SRC_HANA_SPDIF_LEFT2 0x0502 /* Hana SPDIF Left, 2nd or 96kHz */
+#define EMU_SRC_HANA_SPDIF_LEFT3 0x0504 /* Hana SPDIF Left, 3rd or 192kHz */
+#define EMU_SRC_HANA_SPDIF_LEFT4 0x0506 /* Hana SPDIF Left, 4th or 192kHz */
#define EMU_SRC_HANA_SPDIF_RIGHT1 0x0501 /* Hana SPDIF Right, 1st or 48kHz only */
#define EMU_SRC_HANA_SPDIF_RIGHT2 0x0503 /* Hana SPDIF Right, 2nd or 96kHz */
+#define EMU_SRC_HANA_SPDIF_RIGHT3 0x0505 /* Hana SPDIF Right, 3rd or 192kHz */
+#define EMU_SRC_HANA_SPDIF_RIGHT4 0x0507 /* Hana SPDIF Right, 4th or 192kHz */
/* Additional inputs for 1616(M)/Microdock */
-/* Microdock S/PDIF Left, 1st or 48kHz only */
-#define EMU_SRC_MDOCK_SPDIF_LEFT1 0x0112
-/* Microdock S/PDIF Left, 2nd or 96kHz */
-#define EMU_SRC_MDOCK_SPDIF_LEFT2 0x0113
-/* Microdock S/PDIF Right, 1st or 48kHz only */
-#define EMU_SRC_MDOCK_SPDIF_RIGHT1 0x0116
-/* Microdock S/PDIF Right, 2nd or 96kHz */
-#define EMU_SRC_MDOCK_SPDIF_RIGHT2 0x0117
-/* Microdock ADAT 8 channel in +8 to +f */
-#define EMU_SRC_MDOCK_ADAT 0x0118
+
+#define EMU_SRC_MDOCK_SPDIF_LEFT1 0x0112 /* Microdock S/PDIF Left, 1st or 48kHz only */
+#define EMU_SRC_MDOCK_SPDIF_LEFT2 0x0113 /* Microdock S/PDIF Left, 2nd or 96kHz */
+#define EMU_SRC_MDOCK_SPDIF_RIGHT1 0x0116 /* Microdock S/PDIF Right, 1st or 48kHz only */
+#define EMU_SRC_MDOCK_SPDIF_RIGHT2 0x0117 /* Microdock S/PDIF Right, 2nd or 96kHz */
+#define EMU_SRC_MDOCK_ADAT 0x0118 /* Microdock ADAT 8 channel in +8 to +f */
/* 0x600 and 0x700 no used */
+
+/* ------------------- CONSTANTS -------------------- */
+
+extern const char * const snd_emu10k1_fxbus[32];
+extern const char * const snd_emu10k1_sblive_ins[16];
+extern const char * const snd_emu10k1_audigy_ins[16];
+extern const char * const snd_emu10k1_sblive_outs[32];
+extern const char * const snd_emu10k1_audigy_outs[32];
+extern const s8 snd_emu10k1_sblive51_fxbus2_map[16];
+
/* ------------------- STRUCTURES -------------------- */
enum {
+ EMU10K1_UNUSED, // This must be zero
EMU10K1_EFX,
+ EMU10K1_EFX_IRQ,
EMU10K1_PCM,
+ EMU10K1_PCM_IRQ,
EMU10K1_SYNTH,
- EMU10K1_MIDI
+ EMU10K1_NUM_TYPES
};
struct snd_emu10k1;
struct snd_emu10k1_voice {
- struct snd_emu10k1 *emu;
- int number;
- unsigned int use: 1,
- pcm: 1,
- efx: 1,
- synth: 1,
- midi: 1;
+ unsigned char number;
+ unsigned char use;
+ unsigned char dirty;
+ unsigned char last;
void (*interrupt)(struct snd_emu10k1 *emu, struct snd_emu10k1_voice *pvoice);
struct snd_emu10k1_pcm *epcm;
@@ -1513,7 +1499,9 @@ struct snd_emu10k1_pcm {
struct snd_emu10k1_voice *extra;
unsigned short running;
unsigned short first_ptr;
+ snd_pcm_uframes_t resume_pos;
struct snd_util_memblk *memblk;
+ unsigned int pitch_target;
unsigned int start_addr;
unsigned int ccca_start_addr;
unsigned int capture_ipr; /* interrupt acknowledge mask */
@@ -1531,6 +1519,8 @@ struct snd_emu10k1_pcm_mixer {
/* mono, left, right x 8 sends (4 on emu10k1) */
unsigned char send_routing[3][8];
unsigned char send_volume[3][8];
+ // 0x8000 is neutral. The mixer code rescales it to 0xffff to maintain
+ // backwards compatibility with user space.
unsigned short attn[3];
struct snd_emu10k1_pcm *epcm;
};
@@ -1539,10 +1529,13 @@ struct snd_emu10k1_pcm_mixer {
((route[0] | (route[1] << 4) | (route[2] << 8) | (route[3] << 12)) << 16)
#define snd_emu10k1_compose_audigy_fxrt1(route) \
-((unsigned int)route[0] | ((unsigned int)route[1] << 8) | ((unsigned int)route[2] << 16) | ((unsigned int)route[3] << 24))
+((unsigned int)route[0] | ((unsigned int)route[1] << 8) | ((unsigned int)route[2] << 16) | ((unsigned int)route[3] << 24) | 0x80808080)
#define snd_emu10k1_compose_audigy_fxrt2(route) \
-((unsigned int)route[4] | ((unsigned int)route[5] << 8) | ((unsigned int)route[6] << 16) | ((unsigned int)route[7] << 24))
+((unsigned int)route[4] | ((unsigned int)route[5] << 8) | ((unsigned int)route[6] << 16) | ((unsigned int)route[7] << 24) | 0x80808080)
+
+#define snd_emu10k1_compose_audigy_sendamounts(vol) \
+(((unsigned int)vol[4] << 24) | ((unsigned int)vol[5] << 16) | ((unsigned int)vol[6] << 8) | (unsigned int)vol[7])
struct snd_emu10k1_memblk {
struct snd_util_memblk mem;
@@ -1562,9 +1555,9 @@ struct snd_emu10k1_fx8010_ctl {
unsigned int vcount;
unsigned int count; /* count of GPR (1..16) */
unsigned short gpr[32]; /* GPR number(s) */
- unsigned int value[32];
- unsigned int min; /* minimum range */
- unsigned int max; /* maximum range */
+ int value[32];
+ int min; /* minimum range */
+ int max; /* maximum range */
unsigned int translation; /* translation type (EMU10K1_GPR_TRANSLATION*) */
struct snd_kcontrol *kcontrol;
};
@@ -1599,10 +1592,8 @@ struct snd_emu10k1_fx8010_pcm {
};
struct snd_emu10k1_fx8010 {
- unsigned short fxbus_mask; /* used FX buses (bitmask) */
- unsigned short extin_mask; /* used external inputs (bitmask) */
- unsigned short extout_mask; /* used external outputs (bitmask) */
- unsigned short pad1;
+ unsigned short extin_mask; /* used external inputs (bitmask); not used for Audigy */
+ unsigned short extout_mask; /* used external outputs (bitmask); not used for Audigy */
unsigned int itram_size; /* internal TRAM size in samples */
struct snd_dma_buffer etram_pages; /* external TRAM pages and size */
unsigned int dbg; /* FX debugger register */
@@ -1639,42 +1630,62 @@ enum {
EMU_MODEL_EMU0404,
};
+// Chip-o-logy:
+// - All SB Live! cards use EMU10K1 chips
+// - All SB Audigy cards use CA* chips, termed "emu10k2" by the driver
+// - Original Audigy uses CA0100 "Alice"
+// - Audigy 2 uses CA0102/CA10200 "Alice2"
+// - Has an interface for CA0151 (P16V) "Alice3"
+// - Audigy 2 Value uses CA0108/CA10300 "Tina"
+// - Approximately a CA0102 with an on-chip CA0151 (P17V)
+// - Audigy 2 ZS NB uses CA0109 "Tina2"
+// - Cardbus version of CA0108
struct snd_emu_chip_details {
u32 vendor;
u32 device;
u32 subsystem;
unsigned char revision;
- unsigned char emu10k1_chip; /* Original SB Live. Not SB Live 24bit. */
- unsigned char emu10k2_chip; /* Audigy 1 or Audigy 2. */
- unsigned char ca0102_chip; /* Audigy 1 or Audigy 2. Not SB Audigy 2 Value. */
- unsigned char ca0108_chip; /* Audigy 2 Value */
- unsigned char ca_cardbus_chip; /* Audigy 2 ZS Notebook */
- unsigned char ca0151_chip; /* P16V */
- unsigned char spk71; /* Has 7.1 speakers */
- unsigned char sblive51; /* SBLive! 5.1 - extout 0x11 -> center, 0x12 -> lfe */
- unsigned char spdif_bug; /* Has Spdif phasing bug */
- unsigned char ac97_chip; /* Has an AC97 chip: 1 = mandatory, 2 = optional */
- unsigned char ecard; /* APS EEPROM */
- unsigned char emu_model; /* EMU model type */
- unsigned char spi_dac; /* SPI interface for DAC */
- unsigned char i2c_adc; /* I2C interface for ADC */
- unsigned char adc_1361t; /* Use Philips 1361T ADC */
- unsigned char invert_shared_spdif; /* analog/digital switch inverted */
+ unsigned char emu_model; /* EMU model type */
+ unsigned int emu10k1_chip:1; /* Original SB Live. Not SB Live 24bit. */
+ /* Redundant with emu10k2_chip being unset. */
+ unsigned int emu10k2_chip:1; /* Audigy 1 or Audigy 2. */
+ unsigned int ca0102_chip:1; /* Audigy 1 or Audigy 2. Not SB Audigy 2 Value. */
+ /* Redundant with ca0108_chip being unset. */
+ unsigned int ca0108_chip:1; /* Audigy 2 Value */
+ unsigned int ca_cardbus_chip:1; /* Audigy 2 ZS Notebook */
+ unsigned int ca0151_chip:1; /* P16V */
+ unsigned int spk20:1; /* Stereo only */
+ unsigned int spk71:1; /* Has 7.1 speakers */
+ unsigned int no_adat:1; /* Has no ADAT, only SPDIF */
+ unsigned int sblive51:1; /* SBLive! 5.1 - extout 0x11 -> center, 0x12 -> lfe */
+ unsigned int spdif_bug:1; /* Has Spdif phasing bug */
+ unsigned int ac97_chip:2; /* Has an AC97 chip: 1 = mandatory, 2 = optional */
+ unsigned int ecard:1; /* APS EEPROM */
+ unsigned int spi_dac:1; /* SPI interface for DAC; requires ca0108_chip */
+ unsigned int i2c_adc:1; /* I2C interface for ADC; requires ca0108_chip */
+ unsigned int adc_1361t:1; /* Use Philips 1361T ADC */
+ unsigned int invert_shared_spdif:1; /* analog/digital switch inverted */
const char *driver;
const char *name;
const char *id; /* for backward compatibility - can be NULL if not needed */
};
+#define NUM_OUTPUT_DESTS 28
+#define NUM_INPUT_DESTS 22
+
struct snd_emu1010 {
- unsigned int output_source[64];
- unsigned int input_source[64];
+ unsigned char output_source[NUM_OUTPUT_DESTS];
+ unsigned char input_source[NUM_INPUT_DESTS];
unsigned int adc_pads; /* bit mask */
unsigned int dac_pads; /* bit mask */
- unsigned int internal_clock; /* 44100 or 48000 */
+ unsigned int wclock; /* Cached register value */
+ unsigned int word_clock; /* Cached effective value */
+ unsigned int clock_source;
+ unsigned int clock_fallback;
unsigned int optical_in; /* 0:SPDIF, 1:ADAT */
unsigned int optical_out; /* 0:SPDIF, 1:ADAT */
- struct delayed_work firmware_work;
- u32 last_reg;
+ struct work_struct firmware_work;
+ struct work_struct clock_work;
};
struct snd_emu10k1 {
@@ -1691,17 +1702,15 @@ struct snd_emu10k1 {
unsigned int revision; /* chip revision */
unsigned int serial; /* serial number */
unsigned short model; /* subsystem id */
- unsigned int card_type; /* EMU10K1_CARD_* */
unsigned int ecard_ctrl; /* ecard control bits */
unsigned int address_mode; /* address mode */
unsigned long dma_mask; /* PCI DMA mask */
bool iommu_workaround; /* IOMMU workaround needed */
- unsigned int delay_pcm_irq; /* in samples */
int max_cache_pages; /* max memory size / PAGE_SIZE */
struct snd_dma_buffer silent_page; /* silent page */
struct snd_dma_buffer ptb_pages; /* page table pages */
struct snd_dma_device p16v_dma_dev;
- struct snd_dma_buffer p16v_buffer;
+ struct snd_dma_buffer *p16v_buffer;
struct snd_util_memhdr *memhdr; /* page allocation list */
@@ -1732,15 +1741,13 @@ struct snd_emu10k1 {
void *synth;
int (*get_synth_voice)(struct snd_emu10k1 *emu);
- spinlock_t reg_lock;
- spinlock_t emu_lock;
- spinlock_t voice_lock;
+ spinlock_t reg_lock; // high-level driver lock
+ spinlock_t emu_lock; // low-level i/o lock
+ spinlock_t voice_lock; // voice allocator lock
spinlock_t spi_lock; /* serialises access to spi port */
spinlock_t i2c_lock; /* serialises access to i2c port */
struct snd_emu10k1_voice voices[NUM_G];
- struct snd_emu10k1_voice p16v_voices[4];
- struct snd_emu10k1_voice p16v_capture_voice;
int p16v_device_offset;
u32 p16v_capture_source;
u32 p16v_capture_channel;
@@ -1753,6 +1760,7 @@ struct snd_emu10k1 {
struct snd_kcontrol *ctl_efx_send_routing;
struct snd_kcontrol *ctl_efx_send_volume;
struct snd_kcontrol *ctl_efx_attn;
+ struct snd_kcontrol *ctl_clock_source;
void (*hwvol_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
void (*capture_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
@@ -1760,11 +1768,12 @@ struct snd_emu10k1 {
void (*capture_efx_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
void (*spdif_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
void (*dsp_interrupt)(struct snd_emu10k1 *emu);
+ void (*gpio_interrupt)(struct snd_emu10k1 *emu);
+ void (*p16v_interrupt)(struct snd_emu10k1 *emu);
struct snd_pcm_substream *pcm_capture_substream;
struct snd_pcm_substream *pcm_capture_mic_substream;
struct snd_pcm_substream *pcm_capture_efx_substream;
- struct snd_pcm_substream *pcm_playback_efx_substream;
struct snd_timer *timer;
@@ -1796,14 +1805,12 @@ int snd_emu10k1_create(struct snd_card *card,
unsigned short extout_mask,
long max_cache_bytes,
int enable_ir,
- uint subsystem,
- struct snd_emu10k1 ** remu);
+ uint subsystem);
int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device);
int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device);
int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device);
int snd_p16v_pcm(struct snd_emu10k1 *emu, int device);
-int snd_p16v_free(struct snd_emu10k1 * emu);
int snd_p16v_mixer(struct snd_emu10k1 * emu);
int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device);
int snd_emu10k1_fx8010_pcm(struct snd_emu10k1 *emu, int device);
@@ -1822,13 +1829,17 @@ int snd_emu10k1_done(struct snd_emu10k1 * emu);
/* I/O functions */
unsigned int snd_emu10k1_ptr_read(struct snd_emu10k1 * emu, unsigned int reg, unsigned int chn);
void snd_emu10k1_ptr_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data);
+void snd_emu10k1_ptr_write_multiple(struct snd_emu10k1 *emu, unsigned int chn, ...);
unsigned int snd_emu10k1_ptr20_read(struct snd_emu10k1 * emu, unsigned int reg, unsigned int chn);
void snd_emu10k1_ptr20_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data);
int snd_emu10k1_spi_write(struct snd_emu10k1 * emu, unsigned int data);
int snd_emu10k1_i2c_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
-int snd_emu1010_fpga_write(struct snd_emu10k1 * emu, u32 reg, u32 value);
-int snd_emu1010_fpga_read(struct snd_emu10k1 * emu, u32 reg, u32 *value);
-int snd_emu1010_fpga_link_dst_src_write(struct snd_emu10k1 * emu, u32 dst, u32 src);
+void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
+void snd_emu1010_fpga_read(struct snd_emu10k1 *emu, u32 reg, u32 *value);
+void snd_emu1010_fpga_link_dst_src_write(struct snd_emu10k1 *emu, u32 dst, u32 src);
+u32 snd_emu1010_fpga_link_dst_src_read(struct snd_emu10k1 *emu, u32 dst);
+int snd_emu1010_get_raw_rate(struct snd_emu10k1 *emu, u8 src);
+void snd_emu1010_update_clock(struct snd_emu10k1 *emu);
unsigned int snd_emu10k1_efx_read(struct snd_emu10k1 *emu, unsigned int pc);
void snd_emu10k1_intr_enable(struct snd_emu10k1 *emu, unsigned int intrenb);
void snd_emu10k1_intr_disable(struct snd_emu10k1 *emu, unsigned int intrenb);
@@ -1838,13 +1849,17 @@ void snd_emu10k1_voice_intr_ack(struct snd_emu10k1 *emu, unsigned int voicenum);
void snd_emu10k1_voice_half_loop_intr_enable(struct snd_emu10k1 *emu, unsigned int voicenum);
void snd_emu10k1_voice_half_loop_intr_disable(struct snd_emu10k1 *emu, unsigned int voicenum);
void snd_emu10k1_voice_half_loop_intr_ack(struct snd_emu10k1 *emu, unsigned int voicenum);
+#if 0
void snd_emu10k1_voice_set_loop_stop(struct snd_emu10k1 *emu, unsigned int voicenum);
void snd_emu10k1_voice_clear_loop_stop(struct snd_emu10k1 *emu, unsigned int voicenum);
+#endif
+void snd_emu10k1_voice_set_loop_stop_multiple(struct snd_emu10k1 *emu, u64 voices);
+void snd_emu10k1_voice_clear_loop_stop_multiple(struct snd_emu10k1 *emu, u64 voices);
+int snd_emu10k1_voice_clear_loop_stop_multiple_atomic(struct snd_emu10k1 *emu, u64 voices);
void snd_emu10k1_wait(struct snd_emu10k1 *emu, unsigned int wait);
static inline unsigned int snd_emu10k1_wc(struct snd_emu10k1 *emu) { return (inl(emu->port + WC) >> 6) & 0xfffff; }
unsigned short snd_emu10k1_ac97_read(struct snd_ac97 *ac97, unsigned short reg);
void snd_emu10k1_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short data);
-unsigned int snd_emu10k1_rate_to_pitch(unsigned int rate);
#ifdef CONFIG_PM_SLEEP
void snd_emu10k1_suspend_regs(struct snd_emu10k1 *emu);
@@ -1872,7 +1887,8 @@ int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_me
int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk);
/* voice allocation */
-int snd_emu10k1_voice_alloc(struct snd_emu10k1 *emu, int type, int pair, struct snd_emu10k1_voice **rvoice);
+int snd_emu10k1_voice_alloc(struct snd_emu10k1 *emu, int type, int count, int channels,
+ struct snd_emu10k1_pcm *epcm, struct snd_emu10k1_voice **rvoice);
int snd_emu10k1_voice_free(struct snd_emu10k1 *emu, struct snd_emu10k1_voice *pvoice);
/* MIDI uart */
diff --git a/include/sound/emu8000.h b/include/sound/emu8000.h
index ad0365d6de5e..072791bbcf5c 100644
--- a/include/sound/emu8000.h
+++ b/include/sound/emu8000.h
@@ -56,9 +56,6 @@ struct snd_emu8000 {
unsigned long port1; /* Port usually base+0 */
unsigned long port2; /* Port usually at base+0x400 */
unsigned long port3; /* Port usually at base+0x800 */
- struct resource *res_port1;
- struct resource *res_port2;
- struct resource *res_port3;
unsigned short last_reg;/* Last register command */
spinlock_t reg_lock;
diff --git a/include/sound/emux_synth.h b/include/sound/emux_synth.h
index d499b68122a3..3f7f365ed248 100644
--- a/include/sound/emux_synth.h
+++ b/include/sound/emux_synth.h
@@ -54,6 +54,7 @@ struct snd_emux_operators {
#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
int (*oss_ioctl)(struct snd_emux *emu, int cmd, int p1, int p2);
#endif
+ int (*get_pitch_shift)(struct snd_emux *emu);
};
@@ -82,7 +83,6 @@ struct snd_emux {
int max_voices; /* Number of voices */
int mem_size; /* memory size (in byte) */
int num_ports; /* number of ports to be created */
- int pitch_shift; /* pitch shift value (for Emu10k1) */
struct snd_emux_operators ops; /* operators */
void *hw; /* hardware */
unsigned long flags; /* other conditions */
@@ -103,7 +103,7 @@ struct snd_emux {
int ports[SNDRV_EMUX_MAX_PORTS]; /* The ports for this device */
struct snd_emux_port *portptrs[SNDRV_EMUX_MAX_PORTS];
int used; /* use counter */
- char *name; /* name of the device (internal) */
+ const char *name; /* name of the device (internal) */
struct snd_rawmidi **vmidi;
struct timer_list tlist; /* for pending note-offs */
int timer_active;
diff --git a/include/sound/graph_card.h b/include/sound/graph_card.h
new file mode 100644
index 000000000000..8e2e15dfcb1e
--- /dev/null
+++ b/include/sound/graph_card.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * ASoC audio graph card support
+ *
+ */
+
+#ifndef __GRAPH_CARD_H
+#define __GRAPH_CARD_H
+
+#include <sound/simple_card_utils.h>
+
+typedef int (*GRAPH2_CUSTOM)(struct simple_util_priv *priv,
+ struct device_node *lnk,
+ struct link_info *li);
+
+struct graph2_custom_hooks {
+ int (*hook_pre)(struct simple_util_priv *priv);
+ int (*hook_post)(struct simple_util_priv *priv);
+ GRAPH2_CUSTOM custom_normal;
+ GRAPH2_CUSTOM custom_dpcm;
+ GRAPH2_CUSTOM custom_c2c;
+};
+
+int audio_graph_parse_of(struct simple_util_priv *priv, struct device *dev);
+int audio_graph2_parse_of(struct simple_util_priv *priv, struct device *dev,
+ struct graph2_custom_hooks *hooks);
+
+int audio_graph2_link_normal(struct simple_util_priv *priv,
+ struct device_node *lnk, struct link_info *li);
+int audio_graph2_link_dpcm(struct simple_util_priv *priv,
+ struct device_node *lnk, struct link_info *li);
+int audio_graph2_link_c2c(struct simple_util_priv *priv,
+ struct device_node *lnk, struct link_info *li);
+
+#endif /* __GRAPH_CARD_H */
diff --git a/include/sound/hda-mlink.h b/include/sound/hda-mlink.h
new file mode 100644
index 000000000000..d849d9b24f13
--- /dev/null
+++ b/include/sound/hda-mlink.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022-2023 Intel Corporation. All rights reserved.
+ */
+
+struct hdac_bus;
+struct hdac_ext_link;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_MLINK)
+
+int hda_bus_ml_init(struct hdac_bus *bus);
+void hda_bus_ml_free(struct hdac_bus *bus);
+
+int hdac_bus_eml_get_count(struct hdac_bus *bus, bool alt, int elid);
+void hdac_bus_eml_enable_interrupt(struct hdac_bus *bus, bool alt, int elid, bool enable);
+bool hdac_bus_eml_check_interrupt(struct hdac_bus *bus, bool alt, int elid);
+
+int hdac_bus_eml_set_syncprd_unlocked(struct hdac_bus *bus, bool alt, int elid, u32 syncprd);
+int hdac_bus_eml_sdw_set_syncprd_unlocked(struct hdac_bus *bus, u32 syncprd);
+
+int hdac_bus_eml_wait_syncpu_unlocked(struct hdac_bus *bus, bool alt, int elid);
+int hdac_bus_eml_sdw_wait_syncpu_unlocked(struct hdac_bus *bus);
+
+void hdac_bus_eml_sync_arm_unlocked(struct hdac_bus *bus, bool alt, int elid, int sublink);
+void hdac_bus_eml_sdw_sync_arm_unlocked(struct hdac_bus *bus, int sublink);
+
+int hdac_bus_eml_sync_go_unlocked(struct hdac_bus *bus, bool alt, int elid);
+int hdac_bus_eml_sdw_sync_go_unlocked(struct hdac_bus *bus);
+
+bool hdac_bus_eml_check_cmdsync_unlocked(struct hdac_bus *bus, bool alt, int elid);
+bool hdac_bus_eml_sdw_check_cmdsync_unlocked(struct hdac_bus *bus);
+
+int hdac_bus_eml_power_up(struct hdac_bus *bus, bool alt, int elid, int sublink);
+int hdac_bus_eml_power_up_unlocked(struct hdac_bus *bus, bool alt, int elid, int sublink);
+
+int hdac_bus_eml_power_down(struct hdac_bus *bus, bool alt, int elid, int sublink);
+int hdac_bus_eml_power_down_unlocked(struct hdac_bus *bus, bool alt, int elid, int sublink);
+
+int hdac_bus_eml_sdw_power_up_unlocked(struct hdac_bus *bus, int sublink);
+int hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink);
+
+int hdac_bus_eml_sdw_get_lsdiid_unlocked(struct hdac_bus *bus, int sublink, u16 *lsdiid);
+int hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num);
+
+int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+ int channel_mask, int stream_id, int dir);
+
+void hda_bus_ml_put_all(struct hdac_bus *bus);
+void hda_bus_ml_reset_losidv(struct hdac_bus *bus);
+int hda_bus_ml_resume(struct hdac_bus *bus);
+int hda_bus_ml_suspend(struct hdac_bus *bus);
+
+struct hdac_ext_link *hdac_bus_eml_ssp_get_hlink(struct hdac_bus *bus);
+struct hdac_ext_link *hdac_bus_eml_dmic_get_hlink(struct hdac_bus *bus);
+struct hdac_ext_link *hdac_bus_eml_sdw_get_hlink(struct hdac_bus *bus);
+
+struct mutex *hdac_bus_eml_get_mutex(struct hdac_bus *bus, bool alt, int elid);
+
+int hdac_bus_eml_enable_offload(struct hdac_bus *bus, bool alt, int elid, bool enable);
+
+#else
+
+static inline int
+hda_bus_ml_init(struct hdac_bus *bus) { return 0; }
+
+static inline void hda_bus_ml_free(struct hdac_bus *bus) { }
+
+static inline int
+hdac_bus_eml_get_count(struct hdac_bus *bus, bool alt, int elid) { return 0; }
+
+static inline void
+hdac_bus_eml_enable_interrupt(struct hdac_bus *bus, bool alt, int elid, bool enable) { }
+
+static inline bool
+hdac_bus_eml_check_interrupt(struct hdac_bus *bus, bool alt, int elid) { return false; }
+
+static inline int
+hdac_bus_eml_set_syncprd_unlocked(struct hdac_bus *bus, bool alt, int elid, u32 syncprd)
+{
+ return 0;
+}
+
+static inline int
+hdac_bus_eml_sdw_set_syncprd_unlocked(struct hdac_bus *bus, u32 syncprd)
+{
+ return 0;
+}
+
+static inline int
+hdac_bus_eml_wait_syncpu_unlocked(struct hdac_bus *bus, bool alt, int elid)
+{
+ return 0;
+}
+
+static inline int
+hdac_bus_eml_sdw_wait_syncpu_unlocked(struct hdac_bus *bus) { return 0; }
+
+static inline void
+hdac_bus_eml_sync_arm_unlocked(struct hdac_bus *bus, bool alt, int elid, int sublink) { }
+
+static inline void
+hdac_bus_eml_sdw_sync_arm_unlocked(struct hdac_bus *bus, int sublink) { }
+
+static inline int
+hdac_bus_eml_sync_go_unlocked(struct hdac_bus *bus, bool alt, int elid) { return 0; }
+
+static inline int
+hdac_bus_eml_sdw_sync_go_unlocked(struct hdac_bus *bus) { return 0; }
+
+static inline bool
+hdac_bus_eml_check_cmdsync_unlocked(struct hdac_bus *bus, bool alt, int elid) { return false; }
+
+static inline bool
+hdac_bus_eml_sdw_check_cmdsync_unlocked(struct hdac_bus *bus) { return false; }
+
+static inline int
+hdac_bus_eml_power_up(struct hdac_bus *bus, bool alt, int elid, int sublink)
+{
+ return 0;
+}
+
+static inline int
+hdac_bus_eml_power_up_unlocked(struct hdac_bus *bus, bool alt, int elid, int sublink)
+{
+ return 0;
+}
+
+static inline int
+hdac_bus_eml_power_down(struct hdac_bus *bus, bool alt, int elid, int sublink)
+{
+ return 0;
+}
+
+static inline int
+hdac_bus_eml_power_down_unlocked(struct hdac_bus *bus, bool alt, int elid, int sublink)
+{
+ return 0;
+}
+
+static inline int
+hdac_bus_eml_sdw_power_up_unlocked(struct hdac_bus *bus, int sublink) { return 0; }
+
+static inline int
+hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink) { return 0; }
+
+static inline int
+hdac_bus_eml_sdw_get_lsdiid_unlocked(struct hdac_bus *bus, int sublink, u16 *lsdiid) { return 0; }
+
+static inline int
+hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num) { return 0; }
+
+static inline int
+hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+ int channel_mask, int stream_id, int dir)
+{
+ return 0;
+}
+
+static inline void hda_bus_ml_put_all(struct hdac_bus *bus) { }
+static inline void hda_bus_ml_reset_losidv(struct hdac_bus *bus) { }
+static inline int hda_bus_ml_resume(struct hdac_bus *bus) { return 0; }
+static inline int hda_bus_ml_suspend(struct hdac_bus *bus) { return 0; }
+
+static inline struct hdac_ext_link *
+hdac_bus_eml_ssp_get_hlink(struct hdac_bus *bus) { return NULL; }
+
+static inline struct hdac_ext_link *
+hdac_bus_eml_dmic_get_hlink(struct hdac_bus *bus) { return NULL; }
+
+static inline struct hdac_ext_link *
+hdac_bus_eml_sdw_get_hlink(struct hdac_bus *bus) { return NULL; }
+
+static inline struct mutex *
+hdac_bus_eml_get_mutex(struct hdac_bus *bus, bool alt, int elid) { return NULL; }
+
+static inline int
+hdac_bus_eml_enable_offload(struct hdac_bus *bus, bool alt, int elid, bool enable)
+{
+ return 0;
+}
+#endif /* CONFIG_SND_SOC_SOF_HDA_MLINK */
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 0fea49bfc5e8..9c94ba7c183d 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -8,7 +8,7 @@
#ifndef __SOUND_HDA_CODEC_H
#define __SOUND_HDA_CODEC_H
-#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/mod_devicetable.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -18,9 +18,6 @@
#include <sound/hda_verbs.h>
#include <sound/hda_regmap.h>
-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
-
/*
* Structures
*/
@@ -59,6 +56,9 @@ struct hda_bus {
unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
unsigned int bus_probing :1; /* during probing process */
unsigned int keep_power:1; /* keep power up for notification */
+ unsigned int jackpoll_in_suspend:1; /* keep jack polling during
+ * runtime suspend
+ */
int primary_dig_out_type; /* primary digital out PCM type */
unsigned int mixer_assigned; /* codec addr for mixer name */
@@ -114,7 +114,6 @@ struct hda_codec_ops {
int (*resume)(struct hda_codec *codec);
int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
#endif
- void (*reboot_notify)(struct hda_codec *codec);
void (*stream_pm)(struct hda_codec *codec, hda_nid_t nid, bool on);
};
@@ -142,6 +141,7 @@ struct hda_pcm_stream {
hda_nid_t nid; /* default NID to query rates/formats/bps, or set up */
u32 rates; /* supported rates */
u64 formats; /* supported formats (SNDRV_PCM_FMTBIT_) */
+ u32 subformats; /* for S32_LE format, SNDRV_PCM_SUBFMTBIT_* */
unsigned int maxbps; /* supported max. bit per sample */
const struct snd_pcm_chmap_elem *chmap; /* chmap to override */
struct hda_pcm_ops ops;
@@ -167,8 +167,8 @@ struct hda_pcm {
bool own_chmap; /* codec driver provides own channel maps */
/* private: */
struct hda_codec *codec;
- struct kref kref;
struct list_head list;
+ unsigned int disconnected:1;
};
/* codec information */
@@ -188,6 +188,8 @@ struct hda_codec {
/* PCM to create, set by patch_ops.build_pcms callback */
struct list_head pcm_list_head;
+ refcount_t pcm_ref;
+ wait_queue_head_t remove_sleep;
/* codec specific info */
void *spec;
@@ -225,8 +227,8 @@ struct hda_codec {
#endif
/* misc flags */
+ unsigned int configured:1; /* codec was configured */
unsigned int in_freeing:1; /* being released */
- unsigned int registered:1; /* codec was registered */
unsigned int display_power_control:1; /* needs display power */
unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each
* status change
@@ -253,7 +255,9 @@ struct hda_codec {
unsigned int force_pin_prefix:1; /* Add location prefix */
unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
- unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
+ unsigned int forced_resume:1; /* forced resume for jack */
+ unsigned int no_stream_clean_at_suspend:1; /* do not clean streams at suspend */
+ unsigned int ctl_dev_id:1; /* old control element id build behaviour */
#ifdef CONFIG_PM
unsigned long power_on_acct;
@@ -288,8 +292,6 @@ struct hda_codec {
#define dev_to_hda_codec(_dev) container_of(_dev, struct hda_codec, core.dev)
#define hda_codec_dev(_dev) (&(_dev)->core.dev)
-#define hdac_to_hda_priv(_hdac) \
- container_of(_hdac, struct hdac_hda_priv, codec.core)
#define hdac_to_hda_codec(_hdac) container_of(_hdac, struct hda_codec, core)
#define list_for_each_codec(c, bus) \
@@ -303,12 +305,19 @@ struct hda_codec {
/*
* constructors
*/
+__printf(3, 4) struct hda_codec *
+snd_hda_codec_device_init(struct hda_bus *bus, unsigned int codec_addr,
+ const char *fmt, ...);
int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
unsigned int codec_addr, struct hda_codec **codecp);
int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
- unsigned int codec_addr, struct hda_codec *codec);
+ unsigned int codec_addr, struct hda_codec *codec,
+ bool snddev_managed);
int snd_hda_codec_configure(struct hda_codec *codec);
int snd_hda_codec_update_widgets(struct hda_codec *codec);
+void snd_hda_codec_register(struct hda_codec *codec);
+void snd_hda_codec_unregister(struct hda_codec *codec);
+void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec);
/*
* low level functions
@@ -343,7 +352,7 @@ snd_hda_get_num_conns(struct hda_codec *codec, hda_nid_t nid)
#define snd_hda_get_raw_connections(codec, nid, list, max_conns) \
snd_hdac_get_connections(&(codec)->core, nid, list, max_conns)
#define snd_hda_get_num_raw_conns(codec, nid) \
- snd_hdac_get_connections(&(codec)->core, nid, NULL, 0);
+ snd_hdac_get_connections(&(codec)->core, nid, NULL, 0)
int snd_hda_get_conn_list(struct hda_codec *codec, hda_nid_t nid,
const hda_nid_t **listp);
@@ -419,7 +428,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec);
static inline void snd_hda_codec_pcm_get(struct hda_pcm *pcm)
{
- kref_get(&pcm->kref);
+ refcount_inc(&pcm->codec->pcm_ref);
}
void snd_hda_codec_pcm_put(struct hda_pcm *pcm);
@@ -440,8 +449,8 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
#define snd_hda_codec_cleanup_stream(codec, nid) \
__snd_hda_codec_cleanup_stream(codec, nid, 0)
-#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, bpsp) \
- snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, bpsp)
+#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, subfmtp, bpsp) \
+ snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, subfmtp, bpsp)
#define snd_hda_is_supported_format(codec, nid, fmt) \
snd_hdac_is_supported_format(&(codec)->core, nid, fmt)
@@ -487,9 +496,11 @@ int hda_call_check_power_status(struct hda_codec *codec, hda_nid_t nid)
#define snd_hda_power_down(codec) snd_hdac_power_down(&(codec)->core)
#define snd_hda_power_down_pm(codec) snd_hdac_power_down_pm(&(codec)->core)
#ifdef CONFIG_PM
+void snd_hda_codec_set_power_save(struct hda_codec *codec, int delay);
void snd_hda_set_power_save(struct hda_bus *bus, int delay);
void snd_hda_update_power_acct(struct hda_codec *codec);
#else
+static inline void snd_hda_codec_set_power_save(struct hda_codec *codec, int delay) {}
static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {}
#endif
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 057d2a2d0bd0..5ff31e6d41c1 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -91,6 +91,8 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_SD_BDLPL 0x18
#define AZX_REG_SD_BDLPU 0x1c
+#define AZX_SD_FIFOSIZE_MASK GENMASK(15, 0)
+
/* GTS registers */
#define AZX_REG_LLCH 0x14
@@ -119,7 +121,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_VS_EM3U 0x103C
#define AZX_REG_VS_EM4L 0x1040
#define AZX_REG_VS_EM4U 0x1044
-#define AZX_REG_VS_LTRC 0x1048
+#define AZX_REG_VS_LTRP 0x1048
#define AZX_REG_VS_D0I3C 0x104A
#define AZX_REG_VS_PCE 0x104B
#define AZX_REG_VS_L2MAGC 0x1050
@@ -129,6 +131,8 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_VS_SDXEFIFOS_XBASE 0x1094
#define AZX_REG_VS_SDXEFIFOS_XINTERVAL 0x20
+#define AZX_REG_VS_LTRP_GB_MASK GENMASK(6, 0)
+
/* PCI space */
#define AZX_PCIREG_TCSEL 0x44
@@ -140,8 +144,12 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define BDL_SIZE 4096
#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16)
#define AZX_MAX_FRAG 32
-/* max buffer size - no h/w limit, you can increase as you like */
-#define AZX_MAX_BUF_SIZE (1024*1024*1024)
+/*
+ * max buffer size - artificial 4MB limit per stream to avoid big allocations
+ * In theory it can be really big, but as it is per stream on systems with many streams memory could
+ * be quickly saturated if userspace requests maximum buffer size for each of them.
+ */
+#define AZX_MAX_BUF_SIZE (4*1024*1024)
/* RIRB int mask: overrun[2], response[0] */
#define RIRB_INT_RESPONSE 0x01
@@ -254,24 +262,62 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_ML_BASE 0x40
#define AZX_ML_INTERVAL 0x40
+/* HDaudio registers valid for HDaudio and HDaudio extended links */
#define AZX_REG_ML_LCAP 0x00
+
+#define AZX_ML_HDA_LCAP_ALT BIT(28)
+#define AZX_ML_HDA_LCAP_ALT_HDA 0x0
+#define AZX_ML_HDA_LCAP_ALT_HDA_EXT 0x1
+
+#define AZX_ML_HDA_LCAP_INTC BIT(27) /* only used if ALT == 1 */
+#define AZX_ML_HDA_LCAP_OFLS BIT(26) /* only used if ALT == 1 */
+#define AZX_ML_HDA_LCAP_LSS BIT(23) /* only used if ALT == 1 */
+#define AZX_ML_HDA_LCAP_SLCOUNT GENMASK(22, 20) /* only used if ALT == 1 */
+
#define AZX_REG_ML_LCTL 0x04
+#define AZX_ML_LCTL_INTSTS BIT(31) /* only used if ALT == 1 */
+#define AZX_ML_LCTL_CPA BIT(23)
+#define AZX_ML_LCTL_CPA_SHIFT 23
+#define AZX_ML_LCTL_SPA BIT(16)
+#define AZX_ML_LCTL_SPA_SHIFT 16
+#define AZX_ML_LCTL_INTEN BIT(5) /* only used if ALT == 1 */
+#define AZX_ML_LCTL_OFLEN BIT(4) /* only used if ALT == 1 */
+#define AZX_ML_LCTL_SCF GENMASK(3, 0) /* only used if ALT == 0 */
+
#define AZX_REG_ML_LOSIDV 0x08
+
+/* bit0 is reserved, with BIT(1) mapping to stream1 */
+#define AZX_ML_LOSIDV_STREAM_MASK 0xFFFE
+
#define AZX_REG_ML_LSDIID 0x0C
+#define AZX_REG_ML_LSDIID_OFFSET(x) (0x0C + (x) * 0x02) /* only used if ALT == 1 */
+
+/* HDaudio registers only valid if LCAP.ALT == 0 */
#define AZX_REG_ML_LPSOO 0x10
#define AZX_REG_ML_LPSIO 0x12
#define AZX_REG_ML_LWALFC 0x18
#define AZX_REG_ML_LOUTPAY 0x20
#define AZX_REG_ML_LINPAY 0x30
-/* bit0 is reserved, with BIT(1) mapping to stream1 */
-#define ML_LOSIDV_STREAM_MASK 0xFFFE
-
-#define ML_LCTL_SCF_MASK 0xF
-#define AZX_MLCTL_SPA (0x1 << 16)
-#define AZX_MLCTL_CPA (0x1 << 23)
-#define AZX_MLCTL_SPA_SHIFT 16
-#define AZX_MLCTL_CPA_SHIFT 23
+/* HDaudio Extended link registers only valid if LCAP.ALT == 1 */
+#define AZX_REG_ML_LSYNC 0x1C
+
+#define AZX_REG_ML_LSYNC_CMDSYNC BIT(24)
+#define AZX_REG_ML_LSYNC_CMDSYNC_SHIFT 24
+#define AZX_REG_ML_LSYNC_SYNCGO BIT(23)
+#define AZX_REG_ML_LSYNC_SYNCPU BIT(20)
+#define AZX_REG_ML_LSYNC_SYNCPRD GENMASK(19, 0)
+
+#define AZX_REG_ML_LEPTR 0x20
+
+#define AZX_REG_ML_LEPTR_ID GENMASK(31, 24)
+#define AZX_REG_ML_LEPTR_ID_SHIFT 24
+#define AZX_REG_ML_LEPTR_ID_SDW 0x00
+#define AZX_REG_ML_LEPTR_ID_INTEL_SSP 0xC0
+#define AZX_REG_ML_LEPTR_ID_INTEL_DMIC 0xC1
+#define AZX_REG_ML_LEPTR_ID_INTEL_UAOL 0xC2
+#define AZX_REG_ML_LEPTR_VER GENMASK(23, 20)
+#define AZX_REG_ML_LEPTR_PTR GENMASK(19, 0)
/* registers for DMA Resume Capability Structure */
#define AZX_DRSM_CAP_ID 0x5
diff --git a/include/sound/hda_verbs.h b/include/sound/hda_verbs.h
index e36b77531c5c..006d358acce2 100644
--- a/include/sound/hda_verbs.h
+++ b/include/sound/hda_verbs.h
@@ -461,7 +461,7 @@ enum {
#define AC_DE_ELDV (1<<1)
#define AC_DE_IA (1<<2)
-/* device device types (0x0-0xf) */
+/* device types (0x0-0xf) */
enum {
AC_JACK_LINE_OUT,
AC_JACK_SPEAKER,
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 6eed61e6cf8a..a73d7f34f4e5 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -9,6 +9,9 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
@@ -30,7 +33,7 @@ struct hda_device_id;
/*
* exported bus type
*/
-extern struct bus_type snd_hda_bus_type;
+extern const struct bus_type snd_hda_bus_type;
/*
* generic arrays
@@ -92,6 +95,7 @@ struct hdac_device {
bool lazy_cache:1; /* don't wake up for writes */
bool caps_overwriting:1; /* caps overwrite being in process */
bool cache_coef:1; /* cache COEF read/write too */
+ unsigned int registered:1; /* codec was registered */
};
/* device/driver type used for matching */
@@ -120,7 +124,7 @@ void snd_hdac_device_exit(struct hdac_device *dev);
int snd_hdac_device_register(struct hdac_device *codec);
void snd_hdac_device_unregister(struct hdac_device *codec);
int snd_hdac_device_set_chip_name(struct hdac_device *codec, const char *name);
-int snd_hdac_codec_modalias(struct hdac_device *hdac, char *buf, size_t size);
+int snd_hdac_codec_modalias(const struct hdac_device *hdac, char *buf, size_t size);
int snd_hdac_refresh_widgets(struct hdac_device *codec);
@@ -136,13 +140,14 @@ int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid,
hda_nid_t *conn_list, int max_conns);
int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
hda_nid_t *start_id);
-unsigned int snd_hdac_calc_stream_format(unsigned int rate,
- unsigned int channels,
- snd_pcm_format_t format,
- unsigned int maxbps,
- unsigned short spdif_ctls);
+unsigned int snd_hdac_stream_format_bits(snd_pcm_format_t format, snd_pcm_subformat_t subformat,
+ unsigned int maxbits);
+unsigned int snd_hdac_stream_format(unsigned int channels, unsigned int bits, unsigned int rate);
+unsigned int snd_hdac_spdif_stream_format(unsigned int channels, unsigned int bits,
+ unsigned int rate, unsigned short spdif_ctls);
int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid,
- u32 *ratesp, u64 *formatsp, unsigned int *bpsp);
+ u32 *ratesp, u64 *formatsp, u32 *subformatsp,
+ unsigned int *bpsp);
bool snd_hdac_is_supported_format(struct hdac_device *codec, hda_nid_t nid,
unsigned int format);
@@ -241,6 +246,8 @@ struct hdac_bus_ops {
/* get a response from the last command */
int (*get_response)(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
+ /* notify of codec link power-up/down */
+ void (*link_power)(struct hdac_device *hdev, bool enable);
};
/*
@@ -342,6 +349,8 @@ struct hdac_bus {
bool corbrp_self_clear:1; /* CORBRP clears itself after reset */
bool polling_mode:1;
bool needs_damn_long_delay:1;
+ bool not_use_interrupts:1; /* prohibiting the RIRB IRQ */
+ bool access_sdnctl_in_dword:1; /* accessing the sdnctl register by dword */
int poll_count;
@@ -378,15 +387,8 @@ void snd_hdac_bus_exit(struct hdac_bus *bus);
int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr,
unsigned int cmd, unsigned int *res);
-static inline void snd_hdac_codec_link_up(struct hdac_device *codec)
-{
- set_bit(codec->addr, &codec->bus->codec_powered);
-}
-
-static inline void snd_hdac_codec_link_down(struct hdac_device *codec)
-{
- clear_bit(codec->addr, &codec->bus->codec_powered);
-}
+void snd_hdac_codec_link_up(struct hdac_device *codec);
+void snd_hdac_codec_link_down(struct hdac_device *codec);
int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val);
int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
@@ -400,6 +402,7 @@ void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
+void snd_hdac_bus_link_power(struct hdac_device *hdev, bool enable);
void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
@@ -452,6 +455,8 @@ static inline u16 snd_hdac_reg_readw(struct hdac_bus *bus, void __iomem *addr)
#define snd_hdac_reg_writel(bus, addr, val) writel(val, addr)
#define snd_hdac_reg_readl(bus, addr) readl(addr)
+#define snd_hdac_reg_writeq(bus, addr, val) writeq(val, addr)
+#define snd_hdac_reg_readq(bus, addr) readq(addr)
/*
* macros for easy use
@@ -494,6 +499,13 @@ static inline u16 snd_hdac_reg_readw(struct hdac_bus *bus, void __iomem *addr)
snd_hdac_chip_writeb(chip, reg, \
(snd_hdac_chip_readb(chip, reg) & ~(mask)) | (val))
+/* update register macro */
+#define snd_hdac_updatel(addr, reg, mask, val) \
+ writel(((readl(addr + reg) & ~(mask)) | (val)), addr + reg)
+
+#define snd_hdac_updatew(addr, reg, mask, val) \
+ writew(((readw(addr + reg) & ~(mask)) | (val)), addr + reg)
+
/*
* HD-audio stream
*/
@@ -510,6 +522,13 @@ struct hdac_stream {
void __iomem *sd_addr; /* stream descriptor pointer */
+ void __iomem *spib_addr; /* software position in buffers stream pointer */
+ void __iomem *fifo_addr; /* software position Max fifos stream pointer */
+
+ void __iomem *dpibr_addr; /* DMA position in buffer resume pointer */
+ u32 dpib; /* DMA position in buffer */
+ u32 lpib; /* Linear position in buffer */
+
u32 sd_int_sta_mask; /* stream int status mask */
/* pcm support */
@@ -550,18 +569,20 @@ void snd_hdac_stream_init(struct hdac_bus *bus, struct hdac_stream *azx_dev,
int idx, int direction, int tag);
struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream);
+void snd_hdac_stream_release_locked(struct hdac_stream *azx_dev);
void snd_hdac_stream_release(struct hdac_stream *azx_dev);
struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
int dir, int stream_tag);
-int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
+int snd_hdac_stream_setup(struct hdac_stream *azx_dev, bool code_loading);
void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev);
int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
unsigned int format_val);
-void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
-void snd_hdac_stream_clear(struct hdac_stream *azx_dev);
+void snd_hdac_stream_start(struct hdac_stream *azx_dev);
void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
+void snd_hdac_stop_streams(struct hdac_bus *bus);
+void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus);
void snd_hdac_stream_reset(struct hdac_stream *azx_dev);
void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
unsigned int streams, unsigned int reg);
@@ -572,6 +593,19 @@ void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev,
int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
struct snd_pcm_substream *substream);
+void snd_hdac_stream_spbcap_enable(struct hdac_bus *chip,
+ bool enable, int index);
+int snd_hdac_stream_set_spib(struct hdac_bus *bus,
+ struct hdac_stream *azx_dev, u32 value);
+int snd_hdac_stream_get_spbmaxfifo(struct hdac_bus *bus,
+ struct hdac_stream *azx_dev);
+void snd_hdac_stream_drsm_enable(struct hdac_bus *bus,
+ bool enable, int index);
+int snd_hdac_stream_wait_drsm(struct hdac_stream *azx_dev);
+int snd_hdac_stream_set_dpibr(struct hdac_bus *bus,
+ struct hdac_stream *azx_dev, u32 value);
+int snd_hdac_stream_set_lpib(struct hdac_stream *azx_dev, u32 value);
+
/*
* macros for easy use
*/
@@ -588,6 +622,15 @@ int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
snd_hdac_reg_readw((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
#define snd_hdac_stream_readb(dev, reg) \
snd_hdac_reg_readb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+#define snd_hdac_stream_readb_poll(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(snd_hdac_reg_readb, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+#define snd_hdac_stream_readw_poll(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(snd_hdac_reg_readw, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+#define snd_hdac_stream_readl_poll(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(snd_hdac_reg_readl, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
/* update a register, pass without AZX_REG_ prefix */
#define snd_hdac_stream_updatel(dev, reg, mask, val) \
@@ -666,4 +709,29 @@ static inline unsigned int snd_array_index(struct snd_array *array, void *ptr)
for ((idx) = 0, (ptr) = (array)->list; (idx) < (array)->used; \
(ptr) = snd_array_elem(array, ++(idx)))
+/*
+ * Device matching
+ */
+
+#define HDA_CONTROLLER_IS_HSW(pci) (pci_match_id((struct pci_device_id []){ \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_HSW_0) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_HSW_2) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_HSW_3) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_BDW) }, \
+ { } \
+ }, pci))
+
+#define HDA_CONTROLLER_IS_APL(pci) (pci_match_id((struct pci_device_id []){ \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_APL) }, \
+ { } \
+ }, pci))
+
+#define HDA_CONTROLLER_IN_GPU(pci) (pci_match_id((struct pci_device_id []){ \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG1) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_0) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_1) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_2) }, \
+ { } \
+ }, pci) || HDA_CONTROLLER_IS_HSW(pci))
+
#endif /* __SOUND_HDAUDIO_H */
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index ef88b20c7b0a..a8bebac1e4b2 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -2,6 +2,8 @@
#ifndef __SOUND_HDAUDIO_EXT_H
#define __SOUND_HDAUDIO_EXT_H
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
#include <sound/hdaudio.h>
int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
@@ -9,9 +11,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
const struct hdac_ext_bus_ops *ext_ops);
void snd_hdac_ext_bus_exit(struct hdac_bus *bus);
-int snd_hdac_ext_bus_device_init(struct hdac_bus *bus, int addr,
- struct hdac_device *hdev);
-void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus);
#define HDA_CODEC_REV_EXT_ENTRY(_vid, _rev, _name, drv_data) \
@@ -24,12 +23,10 @@ void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus);
void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *chip, bool enable);
void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *chip, bool enable);
-void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *chip,
- bool enable, int index);
-
int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus);
-struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
- const char *codec_name);
+struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_addr(struct hdac_bus *bus, int addr);
+struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_name(struct hdac_bus *bus,
+ const char *codec_name);
enum hdac_ext_stream_type {
HDAC_EXT_STREAM_TYPE_COUPLED = 0,
@@ -43,15 +40,10 @@ enum hdac_ext_stream_type {
* @hstream: hdac_stream
* @pphc_addr: processing pipe host stream pointer
* @pplc_addr: processing pipe link stream pointer
- * @spib_addr: software position in buffers stream pointer
- * @fifo_addr: software position Max fifos stream pointer
- * @dpibr_addr: DMA position in buffer resume pointer
- * @dpib: DMA position in buffer
- * @lpib: Linear position in buffer
* @decoupled: stream host and link is decoupled
* @link_locked: link is locked
* @link_prepared: link is prepared
- * link_substream: link substream
+ * @link_substream: link substream
*/
struct hdac_ext_stream {
struct hdac_stream hstream;
@@ -59,17 +51,17 @@ struct hdac_ext_stream {
void __iomem *pphc_addr;
void __iomem *pplc_addr;
- void __iomem *spib_addr;
- void __iomem *fifo_addr;
-
- void __iomem *dpibr_addr;
+ u32 pphcllpl;
+ u32 pphcllpu;
+ u32 pphcldpl;
+ u32 pphcldpu;
- u32 dpib;
- u32 lpib;
bool decoupled:1;
bool link_locked:1;
bool link_prepared;
+ int (*host_setup)(struct hdac_stream *, bool);
+
struct snd_pcm_substream *link_substream;
};
@@ -77,35 +69,26 @@ struct hdac_ext_stream {
#define stream_to_hdac_ext_stream(s) \
container_of(s, struct hdac_ext_stream, hstream)
-void snd_hdac_ext_stream_init(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, int idx,
- int direction, int tag);
int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
- int num_stream, int dir);
-void snd_hdac_stream_free_all(struct hdac_bus *bus);
-void snd_hdac_link_free_all(struct hdac_bus *bus);
+ int num_stream, int dir);
+void snd_hdac_ext_stream_free_all(struct hdac_bus *bus);
+void snd_hdac_ext_link_free_all(struct hdac_bus *bus);
struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
int type);
-void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
+void snd_hdac_ext_stream_release(struct hdac_ext_stream *hext_stream, int type);
+struct hdac_ext_stream *snd_hdac_ext_cstream_assign(struct hdac_bus *bus,
+ struct snd_compr_stream *cstream);
+void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus,
+ struct hdac_ext_stream *hext_stream, bool decouple);
void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
-void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
-
-int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, u32 value);
-int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
- struct hdac_ext_stream *stream);
-void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
- bool enable, int index);
-int snd_hdac_ext_stream_set_dpibr(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, u32 value);
-int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value);
-
-void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
-void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
-void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
-int snd_hdac_ext_link_stream_setup(struct hdac_ext_stream *stream, int fmt);
+
+void snd_hdac_ext_stream_start(struct hdac_ext_stream *hext_stream);
+void snd_hdac_ext_stream_clear(struct hdac_ext_stream *hext_stream);
+void snd_hdac_ext_stream_reset(struct hdac_ext_stream *hext_stream);
+int snd_hdac_ext_stream_setup(struct hdac_ext_stream *hext_stream, int fmt);
+int snd_hdac_ext_host_stream_setup(struct hdac_ext_stream *hext_stream, bool code_loading);
struct hdac_ext_link {
struct hdac_bus *bus;
@@ -119,27 +102,62 @@ struct hdac_ext_link {
struct list_head list;
};
-int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
-int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *hlink);
+int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *hlink);
int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus);
int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus);
-void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
- int stream);
-void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
- int stream);
-
-int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *link);
-int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *link);
-
-/* update register macro */
-#define snd_hdac_updatel(addr, reg, mask, val) \
- writel(((readl(addr + reg) & ~(mask)) | (val)), \
- addr + reg)
-
-#define snd_hdac_updatew(addr, reg, mask, val) \
- writew(((readw(addr + reg) & ~(mask)) | (val)), \
- addr + reg)
-
+void snd_hdac_ext_bus_link_set_stream_id(struct hdac_ext_link *hlink,
+ int stream);
+void snd_hdac_ext_bus_link_clear_stream_id(struct hdac_ext_link *hlink,
+ int stream);
+
+int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *hlink);
+int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *hlink);
+
+void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable);
+
+#define snd_hdac_adsp_writeb(chip, reg, value) \
+ snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readb(chip, reg) \
+ snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg))
+#define snd_hdac_adsp_writew(chip, reg, value) \
+ snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readw(chip, reg) \
+ snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg))
+#define snd_hdac_adsp_writel(chip, reg, value) \
+ snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readl(chip, reg) \
+ snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg))
+#define snd_hdac_adsp_writeq(chip, reg, value) \
+ snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readq(chip, reg) \
+ snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg))
+
+#define snd_hdac_adsp_updateb(chip, reg, mask, val) \
+ snd_hdac_adsp_writeb(chip, reg, \
+ (snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updatew(chip, reg, mask, val) \
+ snd_hdac_adsp_writew(chip, reg, \
+ (snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updatel(chip, reg, mask, val) \
+ snd_hdac_adsp_writel(chip, reg, \
+ (snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updateq(chip, reg, mask, val) \
+ snd_hdac_adsp_writeq(chip, reg, \
+ (snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val))
+
+#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
struct hdac_ext_device;
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 7754631a3102..5e1a9eafd10f 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -12,7 +12,6 @@
#include <linux/of_graph.h>
#include <linux/hdmi.h>
-#include <drm/drm_edid.h>
#include <sound/asoundef.h>
#include <sound/soc.h>
#include <uapi/sound/asound.h>
@@ -32,8 +31,13 @@ struct hdmi_codec_daifmt {
} fmt;
unsigned int bit_clk_inv:1;
unsigned int frame_clk_inv:1;
- unsigned int bit_clk_master:1;
- unsigned int frame_clk_master:1;
+ unsigned int bit_clk_provider:1;
+ unsigned int frame_clk_provider:1;
+ /* bit_fmt could be standard PCM format or
+ * IEC958 encoded format. ALSA IEC958 plugin will pass
+ * IEC958_SUBFRAME format to the underneath driver.
+ */
+ snd_pcm_format_t bit_fmt;
};
/*
@@ -60,13 +64,23 @@ struct hdmi_codec_ops {
/*
* Configures HDMI-encoder for audio stream.
- * Mandatory
+ * Having either prepare or hw_params is mandatory.
*/
int (*hw_params)(struct device *dev, void *data,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
/*
+ * Configures HDMI-encoder for audio stream. Can be called
+ * multiple times for each setup.
+ *
+ * Having either prepare or hw_params is mandatory.
+ */
+ int (*prepare)(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /*
* Shuts down the audio stream.
* Mandatory
*/
@@ -109,7 +123,11 @@ struct hdmi_codec_ops {
struct hdmi_codec_pdata {
const struct hdmi_codec_ops *ops;
uint i2s:1;
+ uint no_i2s_playback:1;
+ uint no_i2s_capture:1;
uint spdif:1;
+ uint no_spdif_playback:1;
+ uint no_spdif_capture:1;
int max_i2s_channels;
void *data;
};
@@ -117,9 +135,6 @@ struct hdmi_codec_pdata {
struct snd_soc_component;
struct snd_soc_jack;
-int hdmi_codec_set_jack_detect(struct snd_soc_component *component,
- struct snd_soc_jack *jack);
-
#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
#endif /* __HDMI_CODEC_H__ */
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index 8d6cdb254039..b0da633184cd 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -53,7 +53,7 @@ struct snd_hwdep {
wait_queue_head_t open_wait;
void *private_data;
void (*private_free) (struct snd_hwdep *hwdep);
- struct device dev;
+ struct device *dev;
struct mutex open_mutex;
int used; /* reference counter */
diff --git a/include/sound/info.h b/include/sound/info.h
index 7c13bf52cc81..adbc506860d6 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -118,8 +118,6 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
const char *name,
struct snd_info_entry *parent);
void snd_info_free_entry(struct snd_info_entry *entry);
-int snd_info_store_text(struct snd_info_entry *entry);
-int snd_info_restore_text(struct snd_info_entry *entry);
int snd_info_card_create(struct snd_card *card);
int snd_info_card_register(struct snd_card *card);
diff --git a/include/sound/intel-dsp-config.h b/include/sound/intel-dsp-config.h
index c36622bee3f8..34c975910574 100644
--- a/include/sound/intel-dsp-config.h
+++ b/include/sound/intel-dsp-config.h
@@ -15,12 +15,14 @@ enum {
SND_INTEL_DSP_DRIVER_LEGACY,
SND_INTEL_DSP_DRIVER_SST,
SND_INTEL_DSP_DRIVER_SOF,
- SND_INTEL_DSP_DRIVER_LAST = SND_INTEL_DSP_DRIVER_SOF
+ SND_INTEL_DSP_DRIVER_AVS,
+ SND_INTEL_DSP_DRIVER_LAST = SND_INTEL_DSP_DRIVER_AVS
};
#if IS_ENABLED(CONFIG_SND_INTEL_DSP_CONFIG)
int snd_intel_dsp_driver_probe(struct pci_dev *pci);
+int snd_intel_acpi_dsp_driver_probe(struct device *dev, const u8 acpi_hid[ACPI_ID_LEN]);
#else
@@ -29,6 +31,12 @@ static inline int snd_intel_dsp_driver_probe(struct pci_dev *pci)
return SND_INTEL_DSP_DRIVER_ANY;
}
+static inline
+int snd_intel_acpi_dsp_driver_probe(struct device *dev, const u8 acpi_hid[ACPI_ID_LEN])
+{
+ return SND_INTEL_DSP_DRIVER_ANY;
+}
+
#endif
#endif
diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
index 743c2f442280..53470d6a28d6 100644
--- a/include/sound/intel-nhlt.h
+++ b/include/sound/intel-nhlt.h
@@ -10,7 +10,20 @@
#include <linux/acpi.h>
-#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT)
+enum nhlt_link_type {
+ NHLT_LINK_HDA = 0,
+ NHLT_LINK_DSP = 1,
+ NHLT_LINK_DMIC = 2,
+ NHLT_LINK_SSP = 3,
+ NHLT_LINK_INVALID
+};
+
+enum nhlt_device_type {
+ NHLT_DEVICE_BT = 0,
+ NHLT_DEVICE_DMIC = 1,
+ NHLT_DEVICE_I2S = 4,
+ NHLT_DEVICE_INVALID
+};
struct wav_fmt {
u16 fmt_tag;
@@ -33,21 +46,6 @@ struct wav_fmt_ext {
u8 sub_fmt[16];
} __packed;
-enum nhlt_link_type {
- NHLT_LINK_HDA = 0,
- NHLT_LINK_DSP = 1,
- NHLT_LINK_DMIC = 2,
- NHLT_LINK_SSP = 3,
- NHLT_LINK_INVALID
-};
-
-enum nhlt_device_type {
- NHLT_DEVICE_BT = 0,
- NHLT_DEVICE_DMIC = 1,
- NHLT_DEVICE_I2S = 4,
- NHLT_DEVICE_INVALID
-};
-
struct nhlt_specific_cfg {
u32 size;
u8 caps[];
@@ -113,6 +111,11 @@ struct nhlt_vendor_dmic_array_config {
} __packed;
enum {
+ NHLT_CONFIG_TYPE_GENERIC = 0,
+ NHLT_CONFIG_TYPE_MIC_ARRAY = 1
+};
+
+enum {
NHLT_MIC_ARRAY_2CH_SMALL = 0xa,
NHLT_MIC_ARRAY_2CH_BIG = 0xb,
NHLT_MIC_ARRAY_4CH_1ST_GEOM = 0xc,
@@ -121,15 +124,26 @@ enum {
NHLT_MIC_ARRAY_VENDOR_DEFINED = 0xf,
};
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT)
+
struct nhlt_acpi_table *intel_nhlt_init(struct device *dev);
void intel_nhlt_free(struct nhlt_acpi_table *addr);
int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt);
-#else
+bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, u8 link_type);
+
+int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type);
-struct nhlt_acpi_table;
+int intel_nhlt_ssp_mclk_mask(struct nhlt_acpi_table *nhlt, int ssp_num);
+
+struct nhlt_specific_cfg *
+intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u32 bus_id, u8 link_type, u8 vbps, u8 bps,
+ u8 num_ch, u32 rate, u8 dir, u8 dev_type);
+
+#else
static inline struct nhlt_acpi_table *intel_nhlt_init(struct device *dev)
{
@@ -145,6 +159,31 @@ static inline int intel_nhlt_get_dmic_geo(struct device *dev,
{
return 0;
}
+
+static inline bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt,
+ u8 link_type)
+{
+ return false;
+}
+
+static inline int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type)
+{
+ return 0;
+}
+
+static inline int intel_nhlt_ssp_mclk_mask(struct nhlt_acpi_table *nhlt, int ssp_num)
+{
+ return 0;
+}
+
+static inline struct nhlt_specific_cfg *
+intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u32 bus_id, u8 link_type, u8 vbps, u8 bps,
+ u8 num_ch, u32 rate, u8 dir, u8 dev_type)
+{
+ return NULL;
+}
+
#endif
#endif
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 9eb2b5ec1ec4..1ed90e2109e9 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -62,11 +62,13 @@ struct snd_jack {
const char *id;
#ifdef CONFIG_SND_JACK_INPUT_DEV
struct input_dev *input_dev;
+ struct mutex input_dev_lock;
int registered;
int type;
char name[100];
unsigned int key[6]; /* Keep in sync with definitions above */
#endif /* CONFIG_SND_JACK_INPUT_DEV */
+ int hw_status_cache;
void *private_data;
void (*private_free)(struct snd_jack *);
};
diff --git a/include/sound/l3.h b/include/sound/l3.h
deleted file mode 100644
index b6f58072237a..000000000000
--- a/include/sound/l3.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _L3_H_
-#define _L3_H_ 1
-
-struct l3_pins {
- void (*setdat)(struct l3_pins *, int);
- void (*setclk)(struct l3_pins *, int);
- void (*setmode)(struct l3_pins *, int);
-
- int gpio_data;
- int gpio_clk;
- int gpio_mode;
- int use_gpios;
-
- int data_hold;
- int data_setup;
- int clock_high;
- int mode_hold;
- int mode;
- int mode_setup;
-};
-
-struct device;
-
-int l3_write(struct l3_pins *adap, u8 addr, u8 *data, int len);
-int l3_set_gpio_ops(struct device *dev, struct l3_pins *adap);
-
-#endif
diff --git a/include/sound/madera-pdata.h b/include/sound/madera-pdata.h
index e3060f48f108..58398d80c3de 100644
--- a/include/sound/madera-pdata.h
+++ b/include/sound/madera-pdata.h
@@ -9,7 +9,7 @@
#ifndef MADERA_CODEC_PDATA_H
#define MADERA_CODEC_PDATA_H
-#include <linux/kernel.h>
+#include <linux/types.h>
#define MADERA_MAX_INPUT 6
#define MADERA_MAX_MUXED_CHANNELS 4
diff --git a/include/sound/max9768.h b/include/sound/max9768.h
index 0f78b41d030e..8509ba0079b0 100644
--- a/include/sound/max9768.h
+++ b/include/sound/max9768.h
@@ -9,14 +9,10 @@
/**
* struct max9768_pdata - optional platform specific MAX9768 configuration
- * @shdn_gpio: GPIO to SHDN pin. If not valid, pin must be hardwired HIGH
- * @mute_gpio: GPIO to MUTE pin. If not valid, control for mute won't be added
* @flags: configuration flags, e.g. set classic PWM mode (check datasheet
* regarding "filterless modulation" which is default).
*/
struct max9768_pdata {
- int shdn_gpio;
- int mute_gpio;
unsigned flags;
#define MAX9768_FLAG_CLASSIC_PWM (1 << 0)
};
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 5daa937684a4..43d524580bd2 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -9,41 +9,50 @@
#ifndef __SOUND_MEMALLOC_H
#define __SOUND_MEMALLOC_H
+#include <linux/dma-direction.h>
#include <asm/page.h>
struct device;
+struct vm_area_struct;
+struct sg_table;
/*
* buffer device info
*/
struct snd_dma_device {
int type; /* SNDRV_DMA_TYPE_XXX */
+ enum dma_data_direction dir; /* DMA direction */
+ bool need_sync; /* explicit sync needed? */
struct device *dev; /* generic device */
};
-#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
-
-
/*
* buffer types
*/
#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
-#define SNDRV_DMA_TYPE_DEV_UC 5 /* continuous non-cahced */
-#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
-#define SNDRV_DMA_TYPE_DEV_UC_SG 6 /* SG non-cached */
-#else
-#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
-#define SNDRV_DMA_TYPE_DEV_UC_SG SNDRV_DMA_TYPE_DEV_UC
-#endif
+#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */
#ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
#else
#define SNDRV_DMA_TYPE_DEV_IRAM SNDRV_DMA_TYPE_DEV
#endif
#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
+#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
+#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
+#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
+#else
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
+#endif
+/* fallback types, don't use those directly */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK 10
+#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK 11
+#endif
/*
* info for buffer allocation
@@ -64,84 +73,53 @@ static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * Scatter-Gather generic device pages
- */
-void *snd_malloc_sgbuf_pages(struct device *device,
- size_t size, struct snd_dma_buffer *dmab,
- size_t *res_size);
-int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
-
-struct snd_sg_page {
- void *buf;
- dma_addr_t addr;
-};
-
-struct snd_sg_buf {
- int size; /* allocated byte size */
- int pages; /* allocated pages */
- int tblsize; /* allocated table size */
- struct snd_sg_page *table; /* address table */
- struct page **page_table; /* page table (for vmap/vunmap) */
- struct device *dev;
-};
+/* allocate/release a buffer */
+int snd_dma_alloc_dir_pages(int type, struct device *dev,
+ enum dma_data_direction dir, size_t size,
+ struct snd_dma_buffer *dmab);
-/*
- * return the physical address at the corresponding offset
- */
-static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
- size_t offset)
+static inline int snd_dma_alloc_pages(int type, struct device *dev,
+ size_t size, struct snd_dma_buffer *dmab)
{
- struct snd_sg_buf *sgbuf = dmab->private_data;
- dma_addr_t addr;
-
- if (!sgbuf)
- return dmab->addr + offset;
- addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
- addr &= ~((dma_addr_t)PAGE_SIZE - 1);
- return addr + offset % PAGE_SIZE;
+ return snd_dma_alloc_dir_pages(type, dev, DMA_BIDIRECTIONAL, size, dmab);
}
-/*
- * return the virtual address at the corresponding offset
- */
-static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
- size_t offset)
-{
- struct snd_sg_buf *sgbuf = dmab->private_data;
+int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
+ struct snd_dma_buffer *dmab);
+void snd_dma_free_pages(struct snd_dma_buffer *dmab);
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area);
- if (!sgbuf)
- return dmab->area + offset;
- return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
-}
+enum snd_dma_sync_mode { SNDRV_DMA_SYNC_CPU, SNDRV_DMA_SYNC_DEVICE };
+#ifdef CONFIG_HAS_DMA
+void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode);
+#else
+static inline void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode) {}
+#endif
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
unsigned int ofs, unsigned int size);
-#else
-/* non-SG versions */
-static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
- size_t offset)
+
+/* device-managed memory allocator */
+struct snd_dma_buffer *snd_devm_alloc_dir_pages(struct device *dev, int type,
+ enum dma_data_direction dir,
+ size_t size);
+
+static inline struct snd_dma_buffer *
+snd_devm_alloc_pages(struct device *dev, int type, size_t size)
{
- return dmab->addr + offset;
+ return snd_devm_alloc_dir_pages(dev, type, DMA_BIDIRECTIONAL, size);
}
-static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
- size_t offset)
+static inline struct sg_table *
+snd_dma_noncontig_sg_table(struct snd_dma_buffer *dmab)
{
- return dmab->area + offset;
+ return dmab->private_data;
}
-#define snd_sgbuf_get_chunk_size(dmab, ofs, size) (size)
-
-#endif /* CONFIG_SND_DMA_SGBUF */
-
-/* allocate/release a buffer */
-int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
- struct snd_dma_buffer *dmab);
-int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
- struct snd_dma_buffer *dmab);
-void snd_dma_free_pages(struct snd_dma_buffer *dmab);
-
#endif /* __SOUND_MEMALLOC_H */
diff --git a/include/sound/opl3.h b/include/sound/opl3.h
index ebf3852da9fe..052395a2f876 100644
--- a/include/sound/opl3.h
+++ b/include/sound/opl3.h
@@ -229,7 +229,7 @@ struct fm_operator {
unsigned char attack_decay;
unsigned char sustain_release;
unsigned char wave_select;
-} __attribute__((packed));
+} __packed;
/* Instrument data */
struct fm_instrument {
diff --git a/include/sound/pcm-indirect.h b/include/sound/pcm-indirect.h
index 04127686e8d0..98e06ea73b2b 100644
--- a/include/sound/pcm-indirect.h
+++ b/include/sound/pcm-indirect.h
@@ -44,7 +44,7 @@ snd_pcm_indirect_playback_transfer(struct snd_pcm_substream *substream,
if (diff < -(snd_pcm_sframes_t) (runtime->boundary / 2))
diff += runtime->boundary;
if (diff < 0)
- return -EINVAL;
+ return -EPIPE;
rec->sw_ready += (int)frames_to_bytes(runtime, diff);
rec->appl_ptr = appl_ptr;
}
@@ -83,6 +83,8 @@ snd_pcm_indirect_playback_pointer(struct snd_pcm_substream *substream,
struct snd_pcm_indirect *rec, unsigned int ptr)
{
int bytes = ptr - rec->hw_io;
+ int err;
+
if (bytes < 0)
bytes += rec->hw_buffer_size;
rec->hw_io = ptr;
@@ -90,8 +92,11 @@ snd_pcm_indirect_playback_pointer(struct snd_pcm_substream *substream,
rec->sw_io += bytes;
if (rec->sw_io >= rec->sw_buffer_size)
rec->sw_io -= rec->sw_buffer_size;
- if (substream->ops->ack)
- substream->ops->ack(substream);
+ if (substream->ops->ack) {
+ err = substream->ops->ack(substream);
+ if (err == -EPIPE)
+ return SNDRV_PCM_POS_XRUN;
+ }
return bytes_to_frames(substream->runtime, rec->sw_io);
}
@@ -112,7 +117,7 @@ snd_pcm_indirect_capture_transfer(struct snd_pcm_substream *substream,
if (diff < -(snd_pcm_sframes_t) (runtime->boundary / 2))
diff += runtime->boundary;
if (diff < 0)
- return -EINVAL;
+ return -EPIPE;
rec->sw_ready -= frames_to_bytes(runtime, diff);
rec->appl_ptr = appl_ptr;
}
@@ -152,6 +157,8 @@ snd_pcm_indirect_capture_pointer(struct snd_pcm_substream *substream,
{
int qsize;
int bytes = ptr - rec->hw_io;
+ int err;
+
if (bytes < 0)
bytes += rec->hw_buffer_size;
rec->hw_io = ptr;
@@ -162,8 +169,11 @@ snd_pcm_indirect_capture_pointer(struct snd_pcm_substream *substream,
rec->sw_io += bytes;
if (rec->sw_io >= rec->sw_buffer_size)
rec->sw_io -= rec->sw_buffer_size;
- if (substream->ops->ack)
- substream->ops->ack(substream);
+ if (substream->ops->ack) {
+ err = substream->ops->ack(substream);
+ if (err == -EPIPE)
+ return SNDRV_PCM_POS_XRUN;
+ }
return bytes_to_frames(substream->runtime, rec->sw_io);
}
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 2ba5df2c9e23..210096f124ee 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -16,6 +16,7 @@
#include <linux/bitops.h>
#include <linux/pm_qos.h>
#include <linux/refcount.h>
+#include <linux/uio.h>
#define snd_pcm_substream_chip(substream) ((substream)->private_data)
#define snd_pcm_chip(pcm) ((pcm)->private_data)
@@ -31,6 +32,7 @@
struct snd_pcm_hardware {
unsigned int info; /* SNDRV_PCM_INFO_* */
u64 formats; /* SNDRV_PCM_FMTBIT_* */
+ u32 subformats; /* for S32_LE, SNDRV_PCM_SUBFMTBIT_* */
unsigned int rates; /* SNDRV_PCM_RATE_* */
unsigned int rate_min; /* min rate */
unsigned int rate_max; /* max rate */
@@ -68,11 +70,8 @@ struct snd_pcm_ops {
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*fill_silence)(struct snd_pcm_substream *substream, int channel,
unsigned long pos, unsigned long bytes);
- int (*copy_user)(struct snd_pcm_substream *substream, int channel,
- unsigned long pos, void __user *buf,
- unsigned long bytes);
- int (*copy_kernel)(struct snd_pcm_substream *substream, int channel,
- unsigned long pos, void *buf, unsigned long bytes);
+ int (*copy)(struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, struct iov_iter *iter, unsigned long bytes);
struct page *(*page)(struct snd_pcm_substream *substream,
unsigned long offset);
int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
@@ -106,24 +105,24 @@ struct snd_pcm_ops {
#define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1)
/* If you change this don't forget to change rates[] table in pcm_native.c */
-#define SNDRV_PCM_RATE_5512 (1<<0) /* 5512Hz */
-#define SNDRV_PCM_RATE_8000 (1<<1) /* 8000Hz */
-#define SNDRV_PCM_RATE_11025 (1<<2) /* 11025Hz */
-#define SNDRV_PCM_RATE_16000 (1<<3) /* 16000Hz */
-#define SNDRV_PCM_RATE_22050 (1<<4) /* 22050Hz */
-#define SNDRV_PCM_RATE_32000 (1<<5) /* 32000Hz */
-#define SNDRV_PCM_RATE_44100 (1<<6) /* 44100Hz */
-#define SNDRV_PCM_RATE_48000 (1<<7) /* 48000Hz */
-#define SNDRV_PCM_RATE_64000 (1<<8) /* 64000Hz */
-#define SNDRV_PCM_RATE_88200 (1<<9) /* 88200Hz */
-#define SNDRV_PCM_RATE_96000 (1<<10) /* 96000Hz */
-#define SNDRV_PCM_RATE_176400 (1<<11) /* 176400Hz */
-#define SNDRV_PCM_RATE_192000 (1<<12) /* 192000Hz */
-#define SNDRV_PCM_RATE_352800 (1<<13) /* 352800Hz */
-#define SNDRV_PCM_RATE_384000 (1<<14) /* 384000Hz */
-
-#define SNDRV_PCM_RATE_CONTINUOUS (1<<30) /* continuous range */
-#define SNDRV_PCM_RATE_KNOT (1<<31) /* supports more non-continuos rates */
+#define SNDRV_PCM_RATE_5512 (1U<<0) /* 5512Hz */
+#define SNDRV_PCM_RATE_8000 (1U<<1) /* 8000Hz */
+#define SNDRV_PCM_RATE_11025 (1U<<2) /* 11025Hz */
+#define SNDRV_PCM_RATE_16000 (1U<<3) /* 16000Hz */
+#define SNDRV_PCM_RATE_22050 (1U<<4) /* 22050Hz */
+#define SNDRV_PCM_RATE_32000 (1U<<5) /* 32000Hz */
+#define SNDRV_PCM_RATE_44100 (1U<<6) /* 44100Hz */
+#define SNDRV_PCM_RATE_48000 (1U<<7) /* 48000Hz */
+#define SNDRV_PCM_RATE_64000 (1U<<8) /* 64000Hz */
+#define SNDRV_PCM_RATE_88200 (1U<<9) /* 88200Hz */
+#define SNDRV_PCM_RATE_96000 (1U<<10) /* 96000Hz */
+#define SNDRV_PCM_RATE_176400 (1U<<11) /* 176400Hz */
+#define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */
+#define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */
+#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */
+
+#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */
+#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */
#define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\
SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\
@@ -147,6 +146,9 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_S24_BE _SNDRV_PCM_FMTBIT(S24_BE)
#define SNDRV_PCM_FMTBIT_U24_LE _SNDRV_PCM_FMTBIT(U24_LE)
#define SNDRV_PCM_FMTBIT_U24_BE _SNDRV_PCM_FMTBIT(U24_BE)
+// For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+// available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+// `right-padding` sample which has less width than 32 bit.
#define SNDRV_PCM_FMTBIT_S32_LE _SNDRV_PCM_FMTBIT(S32_LE)
#define SNDRV_PCM_FMTBIT_S32_BE _SNDRV_PCM_FMTBIT(S32_BE)
#define SNDRV_PCM_FMTBIT_U32_LE _SNDRV_PCM_FMTBIT(U32_LE)
@@ -216,6 +218,12 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE
#endif
+#define _SNDRV_PCM_SUBFMTBIT(fmt) BIT((__force int)SNDRV_PCM_SUBFORMAT_##fmt)
+#define SNDRV_PCM_SUBFMTBIT_STD _SNDRV_PCM_SUBFMTBIT(STD)
+#define SNDRV_PCM_SUBFMTBIT_MSBITS_MAX _SNDRV_PCM_SUBFMTBIT(MSBITS_MAX)
+#define SNDRV_PCM_SUBFMTBIT_MSBITS_20 _SNDRV_PCM_SUBFMTBIT(MSBITS_20)
+#define SNDRV_PCM_SUBFMTBIT_MSBITS_24 _SNDRV_PCM_SUBFMTBIT(MSBITS_24)
+
struct snd_pcm_file {
struct snd_pcm_substream *substream;
int no_compat_mmap;
@@ -229,7 +237,7 @@ typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule {
unsigned int cond;
int var;
- int deps[4];
+ int deps[5];
snd_pcm_hw_rule_func_t func;
void *private;
@@ -343,6 +351,8 @@ static inline void snd_pcm_pack_audio_tstamp_report(__u32 *data, __u32 *accuracy
struct snd_pcm_runtime {
/* -- Status -- */
+ snd_pcm_state_t state; /* stream state */
+ snd_pcm_state_t suspended_state; /* suspended stream state */
struct snd_pcm_substream *trigger_master;
struct timespec64 trigger_tstamp; /* trigger timestamp */
bool trigger_tstamp_latched; /* trigger timestamp latched in low-level driver/hardware */
@@ -373,18 +383,18 @@ struct snd_pcm_runtime {
unsigned int rate_den;
unsigned int no_period_wakeup: 1;
- /* -- SW params -- */
- int tstamp_mode; /* mmap timestamp is updated */
+ /* -- SW params; see struct snd_pcm_sw_params for comments -- */
+ int tstamp_mode;
unsigned int period_step;
snd_pcm_uframes_t start_threshold;
snd_pcm_uframes_t stop_threshold;
- snd_pcm_uframes_t silence_threshold; /* Silence filling happens when
- noise is nearest than this */
- snd_pcm_uframes_t silence_size; /* Silence filling size */
- snd_pcm_uframes_t boundary; /* pointers wrap point */
+ snd_pcm_uframes_t silence_threshold;
+ snd_pcm_uframes_t silence_size;
+ snd_pcm_uframes_t boundary;
+ /* internal data of auto-silencer */
snd_pcm_uframes_t silence_start; /* starting pointer to silence area */
- snd_pcm_uframes_t silence_filled; /* size filled with silence */
+ snd_pcm_uframes_t silence_filled; /* already filled part of silence area */
union snd_pcm_sync_id sync; /* hardware synchronization ID */
@@ -396,8 +406,10 @@ struct snd_pcm_runtime {
snd_pcm_uframes_t twake; /* do transfer (!poll) wakeup if non-zero */
wait_queue_head_t sleep; /* poll sleep */
wait_queue_head_t tsleep; /* transfer sleep */
- struct fasync_struct *fasync;
+ struct snd_fasync *fasync;
bool stop_operating; /* sync_stop will be called */
+ struct mutex buffer_mutex; /* protect for buffer changes */
+ atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */
/* -- private section -- */
void *private_data;
@@ -503,7 +515,7 @@ struct snd_pcm_str {
#endif
#endif
struct snd_kcontrol *chmap_kctl; /* channel-mapping controls */
- struct device dev;
+ struct device *dev;
};
struct snd_pcm {
@@ -602,7 +614,7 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
* snd_pcm_stream_linked - Check whether the substream is linked with others
* @substream: substream to check
*
- * Returns true if the given substream is being linked with others.
+ * Return: true if the given substream is being linked with others
*/
static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
{
@@ -614,6 +626,7 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream);
/**
* snd_pcm_stream_lock_irqsave - Lock the PCM stream
@@ -633,6 +646,32 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags);
/**
+ * snd_pcm_stream_lock_irqsave_nested - Single-nested PCM stream locking
+ * @substream: PCM substream
+ * @flags: irq flags
+ *
+ * This locks the PCM stream like snd_pcm_stream_lock_irqsave() but with
+ * the single-depth lockdep subclass.
+ */
+#define snd_pcm_stream_lock_irqsave_nested(substream, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _snd_pcm_stream_lock_irqsave_nested(substream); \
+ } while (0)
+
+/* definitions for guard(); use like guard(pcm_stream_lock) */
+DEFINE_LOCK_GUARD_1(pcm_stream_lock, struct snd_pcm_substream,
+ snd_pcm_stream_lock(_T->lock),
+ snd_pcm_stream_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1(pcm_stream_lock_irq, struct snd_pcm_substream,
+ snd_pcm_stream_lock_irq(_T->lock),
+ snd_pcm_stream_unlock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1(pcm_stream_lock_irqsave, struct snd_pcm_substream,
+ snd_pcm_stream_lock_irqsave(_T->lock, _T->flags),
+ snd_pcm_stream_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+/**
* snd_pcm_group_for_each_entry - iterate over the linked substreams
* @s: the iterator
* @substream: the substream
@@ -653,20 +692,36 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
* snd_pcm_running - Check whether the substream is in a running state
* @substream: substream to check
*
- * Returns true if the given substream is in the state RUNNING, or in the
+ * Return: true if the given substream is in the state RUNNING, or in the
* state DRAINING for playback.
*/
static inline int snd_pcm_running(struct snd_pcm_substream *substream)
{
- return (substream->runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
- (substream->runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
+ return (substream->runtime->state == SNDRV_PCM_STATE_RUNNING ||
+ (substream->runtime->state == SNDRV_PCM_STATE_DRAINING &&
substream->stream == SNDRV_PCM_STREAM_PLAYBACK));
}
/**
+ * __snd_pcm_set_state - Change the current PCM state
+ * @runtime: PCM runtime to set
+ * @state: the current state to set
+ *
+ * Call within the stream lock
+ */
+static inline void __snd_pcm_set_state(struct snd_pcm_runtime *runtime,
+ snd_pcm_state_t state)
+{
+ runtime->state = state;
+ runtime->status->state = state; /* copy for mmap */
+}
+
+/**
* bytes_to_samples - Unit conversion of the size from bytes to samples
* @runtime: PCM runtime instance
* @size: size in bytes
+ *
+ * Return: the size in samples
*/
static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size)
{
@@ -677,6 +732,8 @@ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t
* bytes_to_frames - Unit conversion of the size from bytes to frames
* @runtime: PCM runtime instance
* @size: size in bytes
+ *
+ * Return: the size in frames
*/
static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size)
{
@@ -687,6 +744,8 @@ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime,
* samples_to_bytes - Unit conversion of the size from samples to bytes
* @runtime: PCM runtime instance
* @size: size in samples
+ *
+ * Return: the byte size
*/
static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size)
{
@@ -697,6 +756,8 @@ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t
* frames_to_bytes - Unit conversion of the size from frames to bytes
* @runtime: PCM runtime instance
* @size: size in frames
+ *
+ * Return: the byte size
*/
static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size)
{
@@ -707,6 +768,8 @@ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_s
* frame_aligned - Check whether the byte size is aligned to frames
* @runtime: PCM runtime instance
* @bytes: size in bytes
+ *
+ * Return: true if aligned, or false if not
*/
static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
{
@@ -716,6 +779,8 @@ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
/**
* snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes
* @substream: PCM substream
+ *
+ * Return: buffer byte size
*/
static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream)
{
@@ -726,6 +791,8 @@ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substrea
/**
* snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes
* @substream: PCM substream
+ *
+ * Return: period byte size
*/
static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream)
{
@@ -738,6 +805,8 @@ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substrea
* @runtime: PCM runtime instance
*
* Result is between 0 ... (boundary - 1)
+ *
+ * Return: available frame size
*/
static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime)
{
@@ -754,6 +823,8 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r
* @runtime: PCM runtime instance
*
* Result is between 0 ... (boundary - 1)
+ *
+ * Return: available frame size
*/
static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime)
{
@@ -766,6 +837,8 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru
/**
* snd_pcm_playback_hw_avail - Get the queued space for playback
* @runtime: PCM runtime instance
+ *
+ * Return: available frame size
*/
static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime)
{
@@ -775,6 +848,8 @@ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime
/**
* snd_pcm_capture_hw_avail - Get the free space for capture
* @runtime: PCM runtime instance
+ *
+ * Return: available frame size
*/
static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime)
{
@@ -914,6 +989,8 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc
/**
* params_channels - Get the number of channels from the hw params
* @p: hw params
+ *
+ * Return: the number of channels
*/
static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
{
@@ -923,6 +1000,8 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
/**
* params_rate - Get the sample rate from the hw params
* @p: hw params
+ *
+ * Return: the sample rate
*/
static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
{
@@ -932,6 +1011,8 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
/**
* params_period_size - Get the period size (in frames) from the hw params
* @p: hw params
+ *
+ * Return: the period size in frames
*/
static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
{
@@ -941,6 +1022,8 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
/**
* params_periods - Get the number of periods from the hw params
* @p: hw params
+ *
+ * Return: the number of periods
*/
static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
{
@@ -950,6 +1033,8 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
/**
* params_buffer_size - Get the buffer size (in frames) from the hw params
* @p: hw params
+ *
+ * Return: the buffer size in frames
*/
static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
{
@@ -959,6 +1044,8 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
/**
* params_buffer_bytes - Get the buffer size (in bytes) from the hw params
* @p: hw params
+ *
+ * Return: the buffer size in bytes
*/
static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
{
@@ -1066,6 +1153,7 @@ void snd_pcm_set_ops(struct snd_pcm * pcm, int direction,
void snd_pcm_set_sync(struct snd_pcm_substream *substream);
int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
+void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream);
void snd_pcm_period_elapsed(struct snd_pcm_substream *substream);
snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
void *buf, bool interleaved,
@@ -1203,11 +1291,52 @@ void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream);
-void snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
- struct device *data, size_t size, size_t max);
-void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
- struct device *data,
- size_t size, size_t max);
+int snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
+ struct device *data, size_t size, size_t max);
+int snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
+ struct device *data,
+ size_t size, size_t max);
+
+/**
+ * snd_pcm_set_fixed_buffer - Preallocate and set up the fixed size PCM buffer
+ * @substream: the pcm substream instance
+ * @type: DMA type (SNDRV_DMA_TYPE_*)
+ * @data: DMA type dependent data
+ * @size: the requested pre-allocation size in bytes
+ *
+ * This is a variant of snd_pcm_set_managed_buffer(), but this pre-allocates
+ * only the given sized buffer and doesn't allow re-allocation nor dynamic
+ * allocation of a larger buffer unlike the standard one.
+ * The function may return -ENOMEM error, hence the caller must check it.
+ *
+ * Return: zero if successful, or a negative error code
+ */
+static inline int __must_check
+snd_pcm_set_fixed_buffer(struct snd_pcm_substream *substream, int type,
+ struct device *data, size_t size)
+{
+ return snd_pcm_set_managed_buffer(substream, type, data, size, 0);
+}
+
+/**
+ * snd_pcm_set_fixed_buffer_all - Preallocate and set up the fixed size PCM buffer
+ * @pcm: the pcm instance
+ * @type: DMA type (SNDRV_DMA_TYPE_*)
+ * @data: DMA type dependent data
+ * @size: the requested pre-allocation size in bytes
+ *
+ * Apply the set up of the fixed buffer via snd_pcm_set_fixed_buffer() for
+ * all substream. If any of allocation fails, it returns -ENOMEM, hence the
+ * caller must check the return value.
+ *
+ * Return: zero if successful, or a negative error code
+ */
+static inline int __must_check
+snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type,
+ struct device *data, size_t size)
+{
+ return snd_pcm_set_managed_buffer_all(pcm, type, data, size, 0);
+}
int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
size_t size, gfp_t gfp_flags);
@@ -1253,18 +1382,12 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
#define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p)
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * SG-buffer handling
- */
-#define snd_pcm_substream_sgbuf(substream) \
- snd_pcm_get_dma_buf(substream)->private_data
-#endif /* SND_DMA_SGBUF */
-
/**
* snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset
* @substream: PCM substream
* @ofs: byte offset
+ *
+ * Return: DMA address
*/
static inline dma_addr_t
snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
@@ -1273,22 +1396,13 @@ snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
}
/**
- * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset
- * @substream: PCM substream
- * @ofs: byte offset
- */
-static inline void *
-snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
-{
- return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs);
-}
-
-/**
- * snd_pcm_sgbuf_chunk_size - Compute the max size that fits within the contig.
- * page from the given size
+ * snd_pcm_sgbuf_get_chunk_size - Compute the max size that fits within the
+ * contig. page from the given size
* @substream: PCM substream
* @ofs: byte offset
* @size: byte size to examine
+ *
+ * Return: chunk size
*/
static inline unsigned int
snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
@@ -1354,6 +1468,20 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
const char *snd_pcm_format_name(snd_pcm_format_t format);
/**
+ * snd_pcm_direction_name - Get a string naming the direction of a stream
+ * @direction: Stream's direction, one of SNDRV_PCM_STREAM_XXX
+ *
+ * Returns a string naming the direction of the stream.
+ */
+static inline const char *snd_pcm_direction_name(int direction)
+{
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ return "Playback";
+ else
+ return "Capture";
+}
+
+/**
* snd_pcm_stream_str - Get a string naming the direction of a stream
* @substream: the pcm substream instance
*
@@ -1361,10 +1489,7 @@ const char *snd_pcm_format_name(snd_pcm_format_t format);
*/
static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream)
{
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- return "Playback";
- else
- return "Capture";
+ return snd_pcm_direction_name(substream->stream);
}
/*
@@ -1391,6 +1516,8 @@ struct snd_pcm_chmap {
* snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info
* @info: chmap information
* @idx: the substream number index
+ *
+ * Return: the matched PCM substream, or NULL if not found
*/
static inline struct snd_pcm_substream *
snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx)
@@ -1421,6 +1548,8 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
/**
* pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise
* @pcm_format: PCM format
+ *
+ * Return: 64bit mask corresponding to the given PCM format
*/
static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
{
@@ -1444,6 +1573,11 @@ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
#define pcm_dbg(pcm, fmt, args...) \
dev_dbg((pcm)->card->dev, fmt, ##args)
+/* helpers for copying between iov_iter and iomem */
+int copy_to_iter_fromio(struct iov_iter *itert, const void __iomem *src,
+ size_t count);
+int copy_from_iter_toio(void __iomem *dst, struct iov_iter *iter, size_t count);
+
struct snd_pcm_status64 {
snd_pcm_state_t state; /* stream state */
u8 rsvd[4];
diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
index 0939aa45e2fe..64e84441cde1 100644
--- a/include/sound/pcm_iec958.h
+++ b/include/sound/pcm_iec958.h
@@ -4,6 +4,14 @@
#include <linux/types.h>
+int snd_pcm_create_iec958_consumer_default(u8 *cs, size_t len);
+
+int snd_pcm_fill_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
+ size_t len);
+
+int snd_pcm_fill_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+ u8 *cs, size_t len);
+
int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
size_t len);
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 36f94735d23d..fbf35df6e5cf 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -23,11 +23,6 @@ int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
#define MASK_OFS(i) ((i) >> 5)
#define MASK_BIT(i) (1U << ((i) & 31))
-static inline size_t snd_mask_sizeof(void)
-{
- return sizeof(struct snd_mask);
-}
-
static inline void snd_mask_none(struct snd_mask *mask)
{
memset(mask, 0, sizeof(*mask));
@@ -367,6 +362,8 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p)
return snd_pcm_format_physical_width(params_format(p));
}
+int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p);
+
static inline void
params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
{
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 0feaf16e6ac0..0a6f8dabf8c4 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -14,18 +14,12 @@ struct snd_soc_component;
extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-extern int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_open(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_close(struct snd_pcm_substream *substream);
-extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma);
-extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream);
-extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
-extern void pxa2xx_soc_pcm_free(struct snd_soc_component *component,
- struct snd_pcm *pcm);
+extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm);
extern int pxa2xx_soc_pcm_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd);
extern int pxa2xx_soc_pcm_open(struct snd_soc_component *component,
@@ -35,8 +29,6 @@ extern int pxa2xx_soc_pcm_close(struct snd_soc_component *component,
extern int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-extern int pxa2xx_soc_pcm_hw_free(struct snd_soc_component *component,
- struct snd_pcm_substream *substream);
extern int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component,
@@ -44,9 +36,6 @@ extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component,
extern snd_pcm_uframes_t
pxa2xx_soc_pcm_pointer(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
-extern int pxa2xx_soc_pcm_mmap(struct snd_soc_component *component,
- struct snd_pcm_substream *substream,
- struct vm_area_struct *vma);
/* AC97 */
@@ -63,4 +52,8 @@ extern int pxa2xx_ac97_hw_resume(void);
extern int pxa2xx_ac97_hw_probe(struct platform_device *dev);
extern void pxa2xx_ac97_hw_remove(struct platform_device *dev);
+/* modem registers, used by touchscreen driver */
+u32 pxa2xx_ac97_read_modr(void);
+u32 pxa2xx_ac97_read_misr(void);
+
#endif
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 334842daa904..f31cabf0158c 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -18,6 +18,7 @@
#if IS_ENABLED(CONFIG_SND_SEQUENCER)
#include <sound/seq_device.h>
#endif
+#include <sound/info.h>
/*
* Raw MIDI interface
@@ -47,6 +48,10 @@ struct snd_rawmidi_global_ops {
int (*dev_unregister) (struct snd_rawmidi * rmidi);
void (*get_port_info)(struct snd_rawmidi *rmidi, int number,
struct snd_seq_port_info *info);
+ long (*ioctl)(struct snd_rawmidi *rmidi, unsigned int cmd,
+ void __user *argp);
+ void (*proc_read)(struct snd_info_entry *entry,
+ struct snd_info_buffer *buf);
};
struct snd_rawmidi_runtime {
@@ -61,9 +66,9 @@ struct snd_rawmidi_runtime {
size_t avail_min; /* min avail for wakeup */
size_t avail; /* max used buffer for wakeup */
size_t xruns; /* over/underruns counter */
+ size_t align; /* alignment (0 = byte stream, 3 = UMP) */
int buffer_ref; /* buffer reference count */
/* misc */
- spinlock_t lock;
wait_queue_head_t sleep;
/* event handler (new bytes, input only) */
void (*event)(struct snd_rawmidi_substream *substream);
@@ -81,8 +86,11 @@ struct snd_rawmidi_substream {
bool opened; /* open flag */
bool append; /* append flag (merge more streams) */
bool active_sensing; /* send active sensing when close */
+ unsigned int framing; /* whether to frame input data */
+ unsigned int clock_type; /* clock source to use for input framing */
int use_count; /* use counter (for output) */
size_t bytes;
+ spinlock_t lock;
struct snd_rawmidi *rmidi;
struct snd_rawmidi_str *pstr;
char name[32];
@@ -96,6 +104,7 @@ struct snd_rawmidi_file {
struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *input;
struct snd_rawmidi_substream *output;
+ unsigned int user_pversion; /* supported protocol version */
};
struct snd_rawmidi_str {
@@ -126,7 +135,7 @@ struct snd_rawmidi {
struct mutex open_mutex;
wait_queue_head_t open_wait;
- struct device dev;
+ struct device *dev;
struct snd_info_entry *proc_entry;
@@ -143,6 +152,13 @@ int snd_rawmidi_new(struct snd_card *card, char *id, int device,
void snd_rawmidi_set_ops(struct snd_rawmidi *rmidi, int stream,
const struct snd_rawmidi_ops *ops);
+/* internal */
+int snd_rawmidi_init(struct snd_rawmidi *rmidi,
+ struct snd_card *card, char *id, int device,
+ int output_count, int input_count,
+ unsigned int info_flags);
+int snd_rawmidi_free(struct snd_rawmidi *rmidi);
+
/* callbacks */
int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
@@ -153,16 +169,12 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count);
-int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
- unsigned char *buffer, int count);
-int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
- int count);
int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream);
/* main midi functions */
int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info);
-int snd_rawmidi_kernel_open(struct snd_card *card, int device, int subdevice,
+int snd_rawmidi_kernel_open(struct snd_rawmidi *rmidi, int subdevice,
int mode, struct snd_rawmidi_file *rfile);
int snd_rawmidi_kernel_release(struct snd_rawmidi_file *rfile);
int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
diff --git a/include/sound/rt1015.h b/include/sound/rt1015.h
new file mode 100644
index 000000000000..70a7538d4c89
--- /dev/null
+++ b/include/sound/rt1015.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/sound/rt1015.h -- Platform data for RT1015
+ *
+ * Copyright 2020 Realtek Microelectronics
+ */
+
+#ifndef __LINUX_SND_RT1015_H
+#define __LINUX_SND_RT1015_H
+
+struct rt1015_platform_data {
+ unsigned int power_up_delay_ms;
+};
+
+#endif
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
deleted file mode 100644
index 39a77c7cea36..000000000000
--- a/include/sound/rt5645.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/rt5645.h -- Platform data for RT5645
- *
- * Copyright 2013 Realtek Microelectronics
- */
-
-#ifndef __LINUX_SND_RT5645_H
-#define __LINUX_SND_RT5645_H
-
-struct rt5645_platform_data {
- /* IN2 can optionally be differential */
- bool in2_diff;
-
- unsigned int dmic1_data_pin;
- /* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */
- unsigned int dmic2_data_pin;
- /* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */
-
- unsigned int jd_mode;
- /* Use level triggered irq */
- bool level_trigger_irq;
- /* Invert JD1_1 status polarity */
- bool inv_jd1_1;
-
- /* Value to asign to snd_soc_card.long_name */
- const char *long_name;
-};
-
-#endif
diff --git a/include/sound/rt5665.h b/include/sound/rt5665.h
index 3b3d6a19ca49..e865f041929b 100644
--- a/include/sound/rt5665.h
+++ b/include/sound/rt5665.h
@@ -31,8 +31,6 @@ struct rt5665_platform_data {
bool in3_diff;
bool in4_diff;
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5665_dmic1_data_pin dmic1_data_pin;
enum rt5665_dmic2_data_pin dmic2_data_pin;
enum rt5665_jd_src jd_src;
diff --git a/include/sound/rt5668.h b/include/sound/rt5668.h
index 182edfbc9e7a..b682418c6cd6 100644
--- a/include/sound/rt5668.h
+++ b/include/sound/rt5668.h
@@ -25,9 +25,6 @@ enum rt5668_jd_src {
};
struct rt5668_platform_data {
-
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5668_dmic1_data_pin dmic1_data_pin;
enum rt5668_dmic1_clk_pin dmic1_clk_pin;
enum rt5668_jd_src jd_src;
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
index e1f790561ac1..4256df721e3a 100644
--- a/include/sound/rt5682.h
+++ b/include/sound/rt5682.h
@@ -31,15 +31,13 @@ enum rt5682_dai_clks {
};
struct rt5682_platform_data {
-
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5682_dmic1_data_pin dmic1_data_pin;
enum rt5682_dmic1_clk_pin dmic1_clk_pin;
enum rt5682_jd_src jd_src;
unsigned int btndet_delay;
unsigned int dmic_clk_rate;
unsigned int dmic_delay;
+ bool dmic_clk_driving_high;
const char *dai_clk_names[RT5682_DAI_NUM_CLKS];
};
diff --git a/include/sound/rt5682s.h b/include/sound/rt5682s.h
new file mode 100644
index 000000000000..006e6003d11c
--- /dev/null
+++ b/include/sound/rt5682s.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/sound/rt5682s.h -- Platform data for RT5682I-VS
+ *
+ * Copyright 2021 Realtek Microelectronics
+ */
+
+#ifndef __LINUX_SND_RT5682S_H
+#define __LINUX_SND_RT5682S_H
+
+enum rt5682s_dmic1_data_pin {
+ RT5682S_DMIC1_DATA_NULL,
+ RT5682S_DMIC1_DATA_GPIO2,
+ RT5682S_DMIC1_DATA_GPIO5,
+};
+
+enum rt5682s_dmic1_clk_pin {
+ RT5682S_DMIC1_CLK_NULL,
+ RT5682S_DMIC1_CLK_GPIO1,
+ RT5682S_DMIC1_CLK_GPIO3,
+};
+
+enum rt5682s_jd_src {
+ RT5682S_JD_NULL,
+ RT5682S_JD1,
+};
+
+enum rt5682s_dai_clks {
+ RT5682S_DAI_WCLK_IDX,
+ RT5682S_DAI_BCLK_IDX,
+ RT5682S_DAI_NUM_CLKS,
+};
+
+enum {
+ RT5682S_LDO_1_607V,
+ RT5682S_LDO_1_5V,
+ RT5682S_LDO_1_406V,
+ RT5682S_LDO_1_731V,
+};
+
+struct rt5682s_platform_data {
+ enum rt5682s_dmic1_data_pin dmic1_data_pin;
+ enum rt5682s_dmic1_clk_pin dmic1_clk_pin;
+ enum rt5682s_jd_src jd_src;
+ unsigned int dmic_clk_rate;
+ unsigned int dmic_delay;
+ unsigned int amic_delay;
+ unsigned int ldo_dacref;
+ bool dmic_clk_driving_high;
+
+ const char *dai_clk_names[RT5682S_DAI_NUM_CLKS];
+};
+
+#endif
diff --git a/include/sound/s3c24xx_uda134x.h b/include/sound/s3c24xx_uda134x.h
deleted file mode 100644
index 0232b80ff486..000000000000
--- a/include/sound/s3c24xx_uda134x.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _S3C24XX_UDA134X_H_
-#define _S3C24XX_UDA134X_H_ 1
-
-#include <sound/uda134x.h>
-
-struct s3c24xx_uda134x_platform_data {
- int l3_clk;
- int l3_mode;
- int l3_data;
- int model;
-};
-
-#endif
diff --git a/include/sound/sb.h b/include/sound/sb.h
index f540339fb0c7..24970f36c38a 100644
--- a/include/sound/sb.h
+++ b/include/sound/sb.h
@@ -290,6 +290,9 @@ int snd_sbmixer_new(struct snd_sb *chip);
#ifdef CONFIG_PM
void snd_sbmixer_suspend(struct snd_sb *chip);
void snd_sbmixer_resume(struct snd_sb *chip);
+#else
+static inline void snd_sbmixer_suspend(struct snd_sb *chip) {}
+static inline void snd_sbmixer_resume(struct snd_sb *chip) {}
#endif
/* sb8_init.c */
diff --git a/include/sound/sdw.h b/include/sound/sdw.h
new file mode 100644
index 000000000000..6dcdb3228dba
--- /dev/null
+++ b/include/sound/sdw.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * linux/sound/sdw.h -- SoundWire helpers for ALSA/ASoC
+ *
+ * Copyright (c) 2022 Cirrus Logic Inc.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#include <linux/soundwire/sdw.h>
+#include <sound/asound.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#ifndef __INCLUDE_SOUND_SDW_H
+#define __INCLUDE_SOUND_SDW_H
+
+/**
+ * snd_sdw_params_to_config() - Conversion from hw_params to SoundWire config
+ *
+ * @substream: Pointer to the PCM substream structure
+ * @params: Pointer to the hardware params structure
+ * @stream_config: Stream configuration for the SoundWire audio stream
+ * @port_config: Port configuration for the SoundWire audio stream
+ *
+ * This function provides a basic conversion from the hw_params structure to
+ * SoundWire configuration structures. The user will at a minimum need to also
+ * set the port number in the port config, but may also override more of the
+ * setup, or in the case of a complex user, not use this helper at all and
+ * open-code everything.
+ */
+static inline void snd_sdw_params_to_config(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sdw_stream_config *stream_config,
+ struct sdw_port_config *port_config)
+{
+ stream_config->frame_rate = params_rate(params);
+ stream_config->ch_count = params_channels(params);
+ stream_config->bps = snd_pcm_format_width(params_format(params));
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ stream_config->direction = SDW_DATA_DIR_RX;
+ else
+ stream_config->direction = SDW_DATA_DIR_TX;
+
+ port_config->ch_mask = GENMASK(stream_config->ch_count - 1, 0);
+}
+
+#endif
diff --git a/include/sound/seq_device.h b/include/sound/seq_device.h
index 8899affe9155..dead74b022f4 100644
--- a/include/sound/seq_device.h
+++ b/include/sound/seq_device.h
@@ -78,5 +78,6 @@ void snd_seq_driver_unregister(struct snd_seq_driver *drv);
*/
#define SNDRV_SEQ_DEV_ID_MIDISYNTH "seq-midi"
#define SNDRV_SEQ_DEV_ID_OPL3 "opl3-synth"
+#define SNDRV_SEQ_DEV_ID_UMP "seq-ump-client"
#endif /* __SOUND_SEQ_DEVICE_H */
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index 658911926f3a..c8621671fa70 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -70,9 +70,19 @@ int snd_seq_kernel_client_ctl(int client, unsigned int cmd, void *arg);
typedef int (*snd_seq_dump_func_t)(void *ptr, void *buf, int count);
int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
int in_kernel, int size_aligned);
+int snd_seq_expand_var_event_at(const struct snd_seq_event *event, int count,
+ char *buf, int offset);
int snd_seq_dump_var_event(const struct snd_seq_event *event,
snd_seq_dump_func_t func, void *private_data);
+/* size of the event packet; it can be greater than snd_seq_event size */
+static inline size_t snd_seq_event_packet_size(struct snd_seq_event *ev)
+{
+ if (snd_seq_ev_is_ump(ev))
+ return sizeof(struct snd_seq_ump_event);
+ return sizeof(struct snd_seq_event);
+}
+
/* interface for OSS emulation */
int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo);
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index d264e5463f22..2e999916dbd7 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -12,15 +12,15 @@
#include <sound/soc.h>
#include <sound/simple_card_utils.h>
-struct asoc_simple_card_info {
+struct simple_util_info {
const char *name;
const char *card;
const char *codec;
const char *platform;
unsigned int daifmt;
- struct asoc_simple_dai cpu_dai;
- struct asoc_simple_dai codec_dai;
+ struct simple_util_dai cpu_dai;
+ struct simple_util_dai codec_dai;
};
#endif /* __SIMPLE_CARD_H */
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 86a1e956991e..ad67957b7b48 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -11,12 +11,18 @@
#include <linux/clk.h>
#include <sound/soc.h>
-#define asoc_simple_init_hp(card, sjack, prefix) \
- asoc_simple_init_jack(card, sjack, 1, prefix, NULL)
-#define asoc_simple_init_mic(card, sjack, prefix) \
- asoc_simple_init_jack(card, sjack, 0, prefix, NULL)
+#define simple_util_init_hp(card, sjack, prefix) \
+ simple_util_init_jack(card, sjack, 1, prefix, NULL)
+#define simple_util_init_mic(card, sjack, prefix) \
+ simple_util_init_jack(card, sjack, 0, prefix, NULL)
+
+struct simple_util_tdm_width_map {
+ u8 sample_bits;
+ u8 slot_count;
+ u16 slot_width;
+};
-struct asoc_simple_dai {
+struct simple_util_dai {
const char *name;
unsigned int sysclk;
int clk_direction;
@@ -25,120 +31,177 @@ struct asoc_simple_dai {
unsigned int tx_slot_mask;
unsigned int rx_slot_mask;
struct clk *clk;
+ bool clk_fixed;
+ struct simple_util_tdm_width_map *tdm_width_map;
+ int n_tdm_widths;
};
-struct asoc_simple_data {
+struct simple_util_data {
u32 convert_rate;
u32 convert_channels;
+ const char *convert_sample_format;
};
-struct asoc_simple_jack {
+struct simple_util_jack {
struct snd_soc_jack jack;
struct snd_soc_jack_pin pin;
struct snd_soc_jack_gpio gpio;
};
-struct asoc_simple_priv {
+struct prop_nums {
+ int cpus;
+ int codecs;
+ int platforms;
+};
+
+struct simple_util_priv {
struct snd_soc_card snd_card;
struct simple_dai_props {
- struct asoc_simple_dai *cpu_dai;
- struct asoc_simple_dai *codec_dai;
- struct snd_soc_dai_link_component cpus; /* single cpu */
- struct snd_soc_dai_link_component codecs; /* single codec */
- struct snd_soc_dai_link_component platforms;
- struct asoc_simple_data adata;
+ struct simple_util_dai *cpu_dai;
+ struct simple_util_dai *codec_dai;
+ struct simple_util_data adata;
struct snd_soc_codec_conf *codec_conf;
+ struct prop_nums num;
unsigned int mclk_fs;
} *dai_props;
- struct asoc_simple_jack hp_jack;
- struct asoc_simple_jack mic_jack;
+ struct simple_util_jack hp_jack;
+ struct simple_util_jack mic_jack;
+ struct snd_soc_jack *aux_jacks;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dais;
+ struct simple_util_dai *dais;
+ struct snd_soc_dai_link_component *dlcs;
struct snd_soc_codec_conf *codec_conf;
struct gpio_desc *pa_gpio;
+ const struct snd_soc_ops *ops;
+ unsigned int dpcm_selectable:1;
+ unsigned int force_dpcm:1;
};
#define simple_priv_to_card(priv) (&(priv)->snd_card)
#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i))
#define simple_priv_to_dev(priv) (simple_priv_to_card(priv)->dev)
#define simple_priv_to_link(priv, i) (simple_priv_to_card(priv)->dai_link + (i))
+#define simple_props_to_dlc_cpu(props, i) ((props)->cpus + i)
+#define simple_props_to_dlc_codec(props, i) ((props)->codecs + i)
+#define simple_props_to_dlc_platform(props, i) ((props)->platforms + i)
+
+#define simple_props_to_dai_cpu(props, i) ((props)->cpu_dai + i)
+#define simple_props_to_dai_codec(props, i) ((props)->codec_dai + i)
+#define simple_props_to_codec_conf(props, i) ((props)->codec_conf + i)
+
+#define for_each_prop_dlc_cpus(props, i, cpu) \
+ for ((i) = 0; \
+ ((i) < (props)->num.cpus) && \
+ ((cpu) = simple_props_to_dlc_cpu(props, i)); \
+ (i)++)
+#define for_each_prop_dlc_codecs(props, i, codec) \
+ for ((i) = 0; \
+ ((i) < (props)->num.codecs) && \
+ ((codec) = simple_props_to_dlc_codec(props, i)); \
+ (i)++)
+#define for_each_prop_dlc_platforms(props, i, platform) \
+ for ((i) = 0; \
+ ((i) < (props)->num.platforms) && \
+ ((platform) = simple_props_to_dlc_platform(props, i)); \
+ (i)++)
+#define for_each_prop_codec_conf(props, i, conf) \
+ for ((i) = 0; \
+ ((i) < (props)->num.codecs) && \
+ (props)->codec_conf && \
+ ((conf) = simple_props_to_codec_conf(props, i)); \
+ (i)++)
+
+#define for_each_prop_dai_cpu(props, i, cpu) \
+ for ((i) = 0; \
+ ((i) < (props)->num.cpus) && \
+ ((cpu) = simple_props_to_dai_cpu(props, i)); \
+ (i)++)
+#define for_each_prop_dai_codec(props, i, codec) \
+ for ((i) = 0; \
+ ((i) < (props)->num.codecs) && \
+ ((codec) = simple_props_to_dai_codec(props, i)); \
+ (i)++)
+
+#define SNDRV_MAX_LINKS 512
+
struct link_info {
- int dais; /* number of dai */
int link; /* number of link */
- int conf; /* number of codec_conf */
int cpu; /* turn for CPU / Codec */
+ struct prop_nums num[SNDRV_MAX_LINKS];
};
-int asoc_simple_parse_daifmt(struct device *dev,
+int simple_util_parse_daifmt(struct device *dev,
struct device_node *node,
struct device_node *codec,
char *prefix,
unsigned int *retfmt);
+int simple_util_parse_tdm_width_map(struct device *dev, struct device_node *np,
+ struct simple_util_dai *dai);
+
__printf(3, 4)
-int asoc_simple_set_dailink_name(struct device *dev,
+int simple_util_set_dailink_name(struct device *dev,
struct snd_soc_dai_link *dai_link,
const char *fmt, ...);
-int asoc_simple_parse_card_name(struct snd_soc_card *card,
+int simple_util_parse_card_name(struct snd_soc_card *card,
char *prefix);
-#define asoc_simple_parse_clk_cpu(dev, node, dai_link, simple_dai) \
- asoc_simple_parse_clk(dev, node, simple_dai, dai_link->cpus)
-#define asoc_simple_parse_clk_codec(dev, node, dai_link, simple_dai) \
- asoc_simple_parse_clk(dev, node, simple_dai, dai_link->codecs)
-int asoc_simple_parse_clk(struct device *dev,
+int simple_util_parse_clk(struct device *dev,
struct device_node *node,
- struct asoc_simple_dai *simple_dai,
+ struct simple_util_dai *simple_dai,
struct snd_soc_dai_link_component *dlc);
-int asoc_simple_startup(struct snd_pcm_substream *substream);
-void asoc_simple_shutdown(struct snd_pcm_substream *substream);
-int asoc_simple_hw_params(struct snd_pcm_substream *substream,
+int simple_util_startup(struct snd_pcm_substream *substream);
+void simple_util_shutdown(struct snd_pcm_substream *substream);
+int simple_util_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-int asoc_simple_dai_init(struct snd_soc_pcm_runtime *rtd);
-int asoc_simple_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+int simple_util_dai_init(struct snd_soc_pcm_runtime *rtd);
+int simple_util_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params);
-#define asoc_simple_parse_cpu(node, dai_link, is_single_link) \
- asoc_simple_parse_dai(node, dai_link->cpus, is_single_link)
-#define asoc_simple_parse_codec(node, dai_link) \
- asoc_simple_parse_dai(node, dai_link->codecs, NULL)
-#define asoc_simple_parse_platform(node, dai_link) \
- asoc_simple_parse_dai(node, dai_link->platforms, NULL)
-
-#define asoc_simple_parse_tdm(np, dai) \
+#define simple_util_parse_tdm(np, dai) \
snd_soc_of_parse_tdm_slot(np, &(dai)->tx_slot_mask, \
&(dai)->rx_slot_mask, \
&(dai)->slots, \
&(dai)->slot_width);
-void asoc_simple_canonicalize_platform(struct snd_soc_dai_link *dai_link);
-void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
- int is_single_links);
+void simple_util_canonicalize_platform(struct snd_soc_dai_link_component *platforms,
+ struct snd_soc_dai_link_component *cpus);
+void simple_util_canonicalize_cpu(struct snd_soc_dai_link_component *cpus,
+ int is_single_links);
-int asoc_simple_clean_reference(struct snd_soc_card *card);
+void simple_util_clean_reference(struct snd_soc_card *card);
-void asoc_simple_convert_fixup(struct asoc_simple_data *data,
- struct snd_pcm_hw_params *params);
-void asoc_simple_parse_convert(struct device *dev,
- struct device_node *np, char *prefix,
- struct asoc_simple_data *data);
+void simple_util_parse_convert(struct device_node *np, char *prefix,
+ struct simple_util_data *data);
+bool simple_util_is_convert_required(const struct simple_util_data *data);
-int asoc_simple_parse_routing(struct snd_soc_card *card,
+int simple_util_parse_routing(struct snd_soc_card *card,
char *prefix);
-int asoc_simple_parse_widgets(struct snd_soc_card *card,
+int simple_util_parse_widgets(struct snd_soc_card *card,
char *prefix);
-int asoc_simple_parse_pin_switches(struct snd_soc_card *card,
+int simple_util_parse_pin_switches(struct snd_soc_card *card,
char *prefix);
-int asoc_simple_init_jack(struct snd_soc_card *card,
- struct asoc_simple_jack *sjack,
+int simple_util_init_jack(struct snd_soc_card *card,
+ struct simple_util_jack *sjack,
int is_hp, char *prefix, char *pin);
-int asoc_simple_init_priv(struct asoc_simple_priv *priv,
+int simple_util_init_aux_jacks(struct simple_util_priv *priv,
+ char *prefix);
+int simple_util_init_priv(struct simple_util_priv *priv,
struct link_info *li);
+void simple_util_remove(struct platform_device *pdev);
+
+int graph_util_card_probe(struct snd_soc_card *card);
+int graph_util_is_ports0(struct device_node *port);
+int graph_util_parse_dai(struct device *dev, struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc, int *is_single_link);
+
+int graph_util_parse_link_direction(struct device_node *np,
+ bool *is_playback_only, bool *is_capture_only);
#ifdef DEBUG
-static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+static inline void simple_util_debug_dai(struct simple_util_priv *priv,
char *name,
- struct asoc_simple_dai *dai)
+ struct simple_util_dai *dai)
{
struct device *dev = simple_priv_to_dev(priv);
@@ -149,12 +212,6 @@ static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
if (dai->name)
dev_dbg(dev, "%s dai name = %s\n",
name, dai->name);
- if (dai->sysclk)
- dev_dbg(dev, "%s sysclk = %d\n",
- name, dai->sysclk);
-
- dev_dbg(dev, "%s direction = %s\n",
- name, dai->clk_direction ? "OUT" : "IN");
if (dai->slots)
dev_dbg(dev, "%s slots = %d\n", name, dai->slots);
@@ -166,9 +223,15 @@ static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
dev_dbg(dev, "%s rx slot mask = %d\n", name, dai->rx_slot_mask);
if (dai->clk)
dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk));
+ if (dai->sysclk)
+ dev_dbg(dev, "%s sysclk = %dHz\n",
+ name, dai->sysclk);
+ if (dai->clk || dai->sysclk)
+ dev_dbg(dev, "%s direction = %s\n",
+ name, dai->clk_direction ? "OUT" : "IN");
}
-static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
+static inline void simple_util_debug_info(struct simple_util_priv *priv)
{
struct snd_soc_card *card = simple_priv_to_card(priv);
struct device *dev = simple_priv_to_dev(priv);
@@ -181,33 +244,36 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
for (i = 0; i < card->num_links; i++) {
struct simple_dai_props *props = simple_priv_to_props(priv, i);
struct snd_soc_dai_link *link = simple_priv_to_link(priv, i);
+ struct simple_util_dai *dai;
+ struct snd_soc_codec_conf *cnf;
+ int j;
dev_dbg(dev, "DAI%d\n", i);
- asoc_simple_debug_dai(priv, "cpu", props->cpu_dai);
- asoc_simple_debug_dai(priv, "codec", props->codec_dai);
+ dev_dbg(dev, "cpu num = %d\n", link->num_cpus);
+ for_each_prop_dai_cpu(props, j, dai)
+ simple_util_debug_dai(priv, "cpu", dai);
+ dev_dbg(dev, "codec num = %d\n", link->num_codecs);
+ for_each_prop_dai_codec(props, j, dai)
+ simple_util_debug_dai(priv, "codec", dai);
if (link->name)
dev_dbg(dev, "dai name = %s\n", link->name);
-
- dev_dbg(dev, "dai format = %04x\n", link->dai_fmt);
-
+ if (link->dai_fmt)
+ dev_dbg(dev, "dai format = %04x\n", link->dai_fmt);
if (props->adata.convert_rate)
- dev_dbg(dev, "convert_rate = %d\n",
- props->adata.convert_rate);
+ dev_dbg(dev, "convert_rate = %d\n", props->adata.convert_rate);
if (props->adata.convert_channels)
- dev_dbg(dev, "convert_channels = %d\n",
- props->adata.convert_channels);
- if (props->codec_conf && props->codec_conf->name_prefix)
- dev_dbg(dev, "name prefix = %s\n",
- props->codec_conf->name_prefix);
+ dev_dbg(dev, "convert_channels = %d\n", props->adata.convert_channels);
+ for_each_prop_codec_conf(props, j, cnf)
+ if (cnf->name_prefix)
+ dev_dbg(dev, "name prefix = %s\n", cnf->name_prefix);
if (props->mclk_fs)
- dev_dbg(dev, "mclk-fs = %d\n",
- props->mclk_fs);
+ dev_dbg(dev, "mclk-fs = %d\n", props->mclk_fs);
}
}
#else
-#define asoc_simple_debug_info(priv)
+#define simple_util_debug_info(priv)
#endif /* DEBUG */
#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index ab6f75a86611..845e7608ac37 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -14,9 +14,7 @@
* these tables are not constants, some fields can be used for
* pdata or machine ops
*/
-extern struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[];
-extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_skl_machines[];
@@ -30,12 +28,22 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index d2e9e3b4d7ea..23d6d6bfb073 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -9,6 +9,7 @@
#include <linux/stddef.h>
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
+#include <linux/soundwire/sdw.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
@@ -58,11 +59,19 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* snd_soc_acpi_mach_params: interface for machine driver configuration
*
* @acpi_ipc_irq_index: used for BYT-CR detection
- * @platform: string used for HDaudio codec support
+ * @platform: string used for HDAudio codec support
* @codec_mask: used for HDAudio support
+ * @dmic_num: number of SoC- or chipset-attached PDM digital microphones
* @common_hdmi_codec_drv: use commom HDAudio HDMI codec driver
- * @link_mask: links enabled on the board
- * @links: array of link _ADR descriptors, null terminated
+ * @link_mask: SoundWire links enabled on the board
+ * @links: array of SoundWire link _ADR descriptors, null terminated
+ * @i2s_link_mask: I2S/TDM links enabled on the board
+ * @num_dai_drivers: number of elements in @dai_drivers
+ * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
+ * @subsystem_vendor: optional PCI SSID vendor value
+ * @subsystem_device: optional PCI SSID device value
+ * @subsystem_id_set: true if a value has been written to
+ * subsystem_vendor and subsystem_device.
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
@@ -72,6 +81,12 @@ struct snd_soc_acpi_mach_params {
bool common_hdmi_codec_drv;
u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
+ u32 i2s_link_mask;
+ u32 num_dai_drivers;
+ struct snd_soc_dai_driver *dai_drivers;
+ unsigned short subsystem_vendor;
+ unsigned short subsystem_device;
+ bool subsystem_id_set;
};
/**
@@ -93,11 +108,13 @@ struct snd_soc_acpi_endpoint {
* @adr: 64 bit ACPI _ADR value
* @num_endpoints: number of endpoints for this device
* @endpoints: array of endpoints
+ * @name_prefix: string used for codec controls
*/
struct snd_soc_acpi_adr_device {
const u64 adr;
const u8 num_endpoints;
const struct snd_soc_acpi_endpoint *endpoints;
+ const char *name_prefix;
};
/**
@@ -116,6 +133,24 @@ struct snd_soc_acpi_link_adr {
const struct snd_soc_acpi_adr_device *adr_d;
};
+/*
+ * when set the topology uses the -ssp<N> suffix, where N is determined based on
+ * BIOS or DMI information
+ */
+#define SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER BIT(0)
+
+/*
+ * when more than one SSP is reported in the link mask, use the most significant.
+ * This choice was found to be valid on platforms with ES8336 codecs.
+ */
+#define SND_SOC_ACPI_TPLG_INTEL_SSP_MSB BIT(1)
+
+/*
+ * when set the topology uses the -dmic<N>ch suffix, where N is determined based on
+ * BIOS or DMI information
+ */
+#define SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER BIT(2)
+
/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
@@ -123,10 +158,14 @@ struct snd_soc_acpi_link_adr {
* all firmware/topology related fields.
*
* @id: ACPI ID (usually the codec's) used to find a matching machine driver.
+ * @uid: ACPI Unique ID, can be used to disambiguate matches.
+ * @comp_ids: list of compatible audio codecs using the same machine driver,
+ * firmware and topology
* @link_mask: describes required board layout, e.g. for SoundWire.
* @links: array of link _ADR descriptors, null terminated.
* @drv_name: machine driver name
* @fw_filename: firmware file name. Used when SOF is not enabled.
+ * @tplg_filename: topology file name. Used when SOF is not enabled.
* @board: board name
* @machine_quirk: pointer to quirk, usually based on DMI information when
* ACPI ID alone is not sufficient, wrong or misleading
@@ -134,23 +173,26 @@ struct snd_soc_acpi_link_adr {
* audio codecs whose presence if checked with ACPI
* @pdata: intended for platform data or machine specific-ops. This structure
* is not constant since this field may be updated at run-time
- * @sof_fw_filename: Sound Open Firmware file name, if enabled
* @sof_tplg_filename: Sound Open Firmware topology file name, if enabled
+ * @tplg_quirk_mask: quirks to select different topology files dynamically
*/
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
- const u8 id[ACPI_ID_LEN];
+ u8 id[ACPI_ID_LEN];
+ const char *uid;
+ const struct snd_soc_acpi_codecs *comp_ids;
const u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
const char *drv_name;
const char *fw_filename;
+ const char *tplg_filename;
const char *board;
struct snd_soc_acpi_mach * (*machine_quirk)(void *arg);
const void *quirk_data;
void *pdata;
struct snd_soc_acpi_mach_params mach_params;
- const char *sof_fw_filename;
const char *sof_tplg_filename;
+ const u32 tplg_quirk_mask;
};
#define SND_SOC_ACPI_MAX_CODECS 3
@@ -169,4 +211,15 @@ struct snd_soc_acpi_codecs {
u8 codecs[SND_SOC_ACPI_MAX_CODECS][ACPI_ID_LEN];
};
+static inline bool snd_soc_acpi_sof_parent(struct device *dev)
+{
+ return dev->parent && dev->parent->driver && dev->parent->driver->name &&
+ !strncmp(dev->parent->driver->name, "sof-audio-acpi", strlen("sof-audio-acpi"));
+}
+
+bool snd_soc_acpi_sdw_link_slaves_found(struct device *dev,
+ const struct snd_soc_acpi_link_adr *link,
+ struct sdw_extended_slave_id *ids,
+ int num_slaves);
+
#endif
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
index 4f2cc4fb56b7..1f4c39922d82 100644
--- a/include/sound/soc-card.h
+++ b/include/sound/soc-card.h
@@ -9,15 +9,35 @@
#define __SOC_CARD_H
enum snd_soc_card_subclass {
- SND_SOC_CARD_CLASS_INIT = 0,
+ SND_SOC_CARD_CLASS_ROOT = 0,
SND_SOC_CARD_CLASS_RUNTIME = 1,
};
+static inline void snd_soc_card_mutex_lock_root(struct snd_soc_card *card)
+{
+ mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_ROOT);
+}
+
+static inline void snd_soc_card_mutex_lock(struct snd_soc_card *card)
+{
+ mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+}
+
+static inline void snd_soc_card_mutex_unlock(struct snd_soc_card *card)
+{
+ mutex_unlock(&card->mutex);
+}
+
struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
const char *name);
+struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
+ const char *name);
int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
- struct snd_soc_jack *jack,
- struct snd_soc_jack_pin *pins, unsigned int num_pins);
+ struct snd_soc_jack *jack);
+int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id,
+ int type, struct snd_soc_jack *jack,
+ struct snd_soc_jack_pin *pins,
+ unsigned int num_pins);
int snd_soc_card_suspend_pre(struct snd_soc_card *card);
int snd_soc_card_suspend_post(struct snd_soc_card *card);
@@ -26,6 +46,7 @@ int snd_soc_card_resume_post(struct snd_soc_card *card);
int snd_soc_card_probe(struct snd_soc_card *card);
int snd_soc_card_late_probe(struct snd_soc_card *card);
+void snd_soc_card_fixup_controls(struct snd_soc_card *card);
int snd_soc_card_remove(struct snd_soc_card *card);
int snd_soc_card_set_bias_level(struct snd_soc_card *card,
@@ -40,6 +61,43 @@ int snd_soc_card_add_dai_link(struct snd_soc_card *card,
void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
+#ifdef CONFIG_PCI
+static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
+ unsigned short vendor,
+ unsigned short device)
+{
+ card->pci_subsystem_vendor = vendor;
+ card->pci_subsystem_device = device;
+ card->pci_subsystem_set = true;
+}
+
+static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
+ unsigned short *vendor,
+ unsigned short *device)
+{
+ if (!card->pci_subsystem_set)
+ return -ENOENT;
+
+ *vendor = card->pci_subsystem_vendor;
+ *device = card->pci_subsystem_device;
+
+ return 0;
+}
+#else /* !CONFIG_PCI */
+static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
+ unsigned short vendor,
+ unsigned short device)
+{
+}
+
+static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
+ unsigned short *vendor,
+ unsigned short *device)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_PCI */
+
/* device driver data */
static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
void *data)
@@ -59,8 +117,8 @@ struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd;
for_each_card_rtds(card, rtd) {
- if (!strcmp(asoc_rtd_to_codec(rtd, 0)->name, dai_name))
- return asoc_rtd_to_codec(rtd, 0);
+ if (!strcmp(snd_soc_rtd_to_codec(rtd, 0)->name, dai_name))
+ return snd_soc_rtd_to_codec(rtd, 0);
}
return NULL;
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 089ea9441fd1..ceca69b46a82 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -98,10 +98,11 @@ struct snd_soc_component_driver {
int source, unsigned int freq_in, unsigned int freq_out);
int (*set_jack)(struct snd_soc_component *component,
struct snd_soc_jack *jack, void *data);
+ int (*get_jack_type)(struct snd_soc_component *component);
/* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component,
- struct of_phandle_args *args,
+ const struct of_phandle_args *args,
const char **dai_name);
int (*of_xlate_dai_id)(struct snd_soc_component *comment,
struct device_node *endpoint);
@@ -136,16 +137,20 @@ struct snd_soc_component_driver {
struct timespec64 *audio_ts,
struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
- int (*copy_user)(struct snd_soc_component *component,
- struct snd_pcm_substream *substream, int channel,
- unsigned long pos, void __user *buf,
- unsigned long bytes);
+ int (*copy)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, struct iov_iter *iter,
+ unsigned long bytes);
struct page *(*page)(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
unsigned long offset);
int (*mmap)(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
+ int (*ack)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ snd_pcm_sframes_t (*delay)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
const struct snd_compress_ops *compress_ops;
@@ -154,6 +159,15 @@ struct snd_soc_component_driver {
int remove_order;
/*
+ * soc_pcm_trigger() start/stop sequence.
+ * see also
+ * snd_soc_dai_link
+ * soc_pcm_trigger()
+ */
+ enum snd_soc_trigger_order trigger_start;
+ enum snd_soc_trigger_order trigger_stop;
+
+ /*
* signal if the module handling the component should not be removed
* if a pcm is open. Setting this would prevent the module
* refcount being incremented in probe() but allow it be incremented
@@ -165,8 +179,17 @@ struct snd_soc_component_driver {
unsigned int idle_bias_on:1;
unsigned int suspend_bias_off:1;
unsigned int use_pmdown_time:1; /* care pmdown_time at stop */
+ /*
+ * Indicates that the component does not care about the endianness of
+ * PCM audio data and the core will ensure that both LE and BE variants
+ * of each used format are present. Typically this is because the
+ * component sits behind a bus that abstracts away the endian of the
+ * original data, ie. one for which the transmission endian is defined
+ * (I2S/SLIMbus/SoundWire), or the concept of endian doesn't exist (PDM,
+ * analogue).
+ */
unsigned int endianness:1;
- unsigned int non_legacy_dai_naming:1;
+ unsigned int legacy_dai_naming:1;
/* this component uses topology and ignore machine driver FEs */
const char *ignore_machine;
@@ -175,6 +198,10 @@ struct snd_soc_component_driver {
struct snd_pcm_hw_params *params);
bool use_dai_pcm_id; /* use DAI link PCM ID as PCM device number */
int be_pcm_base; /* base device ID for all BE PCMs */
+
+#ifdef CONFIG_DEBUG_FS
+ const char *debugfs_prefix;
+#endif
};
struct snd_soc_component {
@@ -217,10 +244,16 @@ struct snd_soc_component {
/* machine specific init */
int (*init)(struct snd_soc_component *component);
-#ifdef CONFIG_DEBUG_FS
+ /* function mark */
+ void *mark_module;
+ struct snd_pcm_substream *mark_open;
+ struct snd_pcm_substream *mark_hw_params;
+ struct snd_pcm_substream *mark_trigger;
+ struct snd_compr_stream *mark_compr_open;
+ void *mark_pm;
+
struct dentry *debugfs_root;
const char *debugfs_prefix;
-#endif
};
#define for_each_component_dais(component, dai)\
@@ -328,6 +361,7 @@ static inline int snd_soc_component_cache_sync(
void snd_soc_component_set_aux(struct snd_soc_component *component,
struct snd_soc_aux_dev *aux);
int snd_soc_component_init(struct snd_soc_component *component);
+int snd_soc_component_is_dummy(struct snd_soc_component *component);
/* component IO */
unsigned int snd_soc_component_read(struct snd_soc_component *component,
@@ -345,6 +379,12 @@ int snd_soc_component_test_bits(struct snd_soc_component *component,
unsigned int reg, unsigned int mask,
unsigned int value);
+unsigned int snd_soc_component_read_field(struct snd_soc_component *component,
+ unsigned int reg, unsigned int mask);
+int snd_soc_component_write_field(struct snd_soc_component *component,
+ unsigned int reg, unsigned int mask,
+ unsigned int val);
+
/* component wide operations */
int snd_soc_component_set_sysclk(struct snd_soc_component *component,
int clk_id, int source,
@@ -354,6 +394,7 @@ int snd_soc_component_set_pll(struct snd_soc_component *component, int pll_id,
unsigned int freq_out);
int snd_soc_component_set_jack(struct snd_soc_component *component,
struct snd_soc_jack *jack, void *data);
+int snd_soc_component_get_jack_type(struct snd_soc_component *component);
void snd_soc_component_seq_notifier(struct snd_soc_component *component,
enum snd_soc_dapm_type type, int subseq);
@@ -370,17 +411,17 @@ void snd_soc_component_exit_regmap(struct snd_soc_component *component);
#endif
#define snd_soc_component_module_get_when_probe(component)\
- snd_soc_component_module_get(component, 0)
-#define snd_soc_component_module_get_when_open(component) \
- snd_soc_component_module_get(component, 1)
+ snd_soc_component_module_get(component, NULL, 0)
+#define snd_soc_component_module_get_when_open(component, substream) \
+ snd_soc_component_module_get(component, substream, 1)
int snd_soc_component_module_get(struct snd_soc_component *component,
- int upon_open);
+ void *mark, int upon_open);
#define snd_soc_component_module_put_when_remove(component) \
- snd_soc_component_module_put(component, 0)
-#define snd_soc_component_module_put_when_close(component) \
- snd_soc_component_module_put(component, 1)
+ snd_soc_component_module_put(component, NULL, 0, 0)
+#define snd_soc_component_module_put_when_close(component, substream, rollback) \
+ snd_soc_component_module_put(component, substream, 1, rollback)
void snd_soc_component_module_put(struct snd_soc_component *component,
- int upon_open);
+ void *mark, int upon_open, int rollback);
static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c,
void *data)
@@ -420,11 +461,16 @@ int snd_soc_component_force_enable_pin_unlocked(
struct snd_soc_component *component,
const char *pin);
+/* component controls */
+int snd_soc_component_notify_control(struct snd_soc_component *component,
+ const char * const ctl);
+
/* component driver ops */
int snd_soc_component_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
int snd_soc_component_close(struct snd_soc_component *component,
- struct snd_pcm_substream *substream);
+ struct snd_pcm_substream *substream,
+ int rollback);
void snd_soc_component_suspend(struct snd_soc_component *component);
void snd_soc_component_resume(struct snd_soc_component *component);
int snd_soc_component_is_suspended(struct snd_soc_component *component);
@@ -433,16 +479,39 @@ void snd_soc_component_remove(struct snd_soc_component *component);
int snd_soc_component_of_xlate_dai_id(struct snd_soc_component *component,
struct device_node *ep);
int snd_soc_component_of_xlate_dai_name(struct snd_soc_component *component,
- struct of_phandle_args *args,
+ const struct of_phandle_args *args,
const char **dai_name);
+int snd_soc_component_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream);
+void snd_soc_component_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ int rollback);
+int snd_soc_component_compr_trigger(struct snd_compr_stream *cstream, int cmd);
+int snd_soc_component_compr_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params);
+int snd_soc_component_compr_get_params(struct snd_compr_stream *cstream,
+ struct snd_codec *params);
+int snd_soc_component_compr_get_caps(struct snd_compr_stream *cstream,
+ struct snd_compr_caps *caps);
+int snd_soc_component_compr_get_codec_caps(struct snd_compr_stream *cstream,
+ struct snd_compr_codec_caps *codec);
+int snd_soc_component_compr_ack(struct snd_compr_stream *cstream, size_t bytes);
+int snd_soc_component_compr_pointer(struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp);
+int snd_soc_component_compr_copy(struct snd_compr_stream *cstream,
+ char __user *buf, size_t count);
+int snd_soc_component_compr_set_metadata(struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata);
+int snd_soc_component_compr_get_metadata(struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata);
int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream);
-int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
- int channel, unsigned long pos,
- void __user *buf, unsigned long bytes);
+int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ struct iov_iter *iter, unsigned long bytes);
struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
unsigned long offset);
int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
@@ -451,11 +520,17 @@ int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd);
void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd);
int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_component **last);
+ struct snd_pcm_hw_params *params);
void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_component *last);
+ int rollback);
int snd_soc_pcm_component_trigger(struct snd_pcm_substream *substream,
- int cmd);
+ int cmd, int rollback);
+int snd_soc_pcm_component_pm_runtime_get(struct snd_soc_pcm_runtime *rtd,
+ void *stream);
+void snd_soc_pcm_component_pm_runtime_put(struct snd_soc_pcm_runtime *rtd,
+ void *stream, int rollback);
+int snd_soc_pcm_component_ack(struct snd_pcm_substream *substream);
+void snd_soc_pcm_component_delay(struct snd_pcm_substream *substream,
+ snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay);
#endif /* __SOC_COMPONENT_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 776a60529e70..adcd8719d343 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -36,6 +36,22 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_MSB SND_SOC_DAIFMT_LEFT_J
#define SND_SOC_DAIFMT_LSB SND_SOC_DAIFMT_RIGHT_J
+/* Describes the possible PCM format */
+/*
+ * use SND_SOC_DAI_FORMAT_xx as eash shift.
+ * see
+ * snd_soc_runtime_get_dai_fmt()
+ */
+#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT 0
+#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_I2S (1 << SND_SOC_DAI_FORMAT_I2S)
+#define SND_SOC_POSSIBLE_DAIFMT_RIGHT_J (1 << SND_SOC_DAI_FORMAT_RIGHT_J)
+#define SND_SOC_POSSIBLE_DAIFMT_LEFT_J (1 << SND_SOC_DAI_FORMAT_LEFT_J)
+#define SND_SOC_POSSIBLE_DAIFMT_DSP_A (1 << SND_SOC_DAI_FORMAT_DSP_A)
+#define SND_SOC_POSSIBLE_DAIFMT_DSP_B (1 << SND_SOC_DAI_FORMAT_DSP_B)
+#define SND_SOC_POSSIBLE_DAIFMT_AC97 (1 << SND_SOC_DAI_FORMAT_AC97)
+#define SND_SOC_POSSIBLE_DAIFMT_PDM (1 << SND_SOC_DAI_FORMAT_PDM)
+
/*
* DAI Clock gating.
*
@@ -45,6 +61,17 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_CONT (1 << 4) /* continuous clock */
#define SND_SOC_DAIFMT_GATED (0 << 4) /* clock is gated */
+/* Describes the possible PCM format */
+/*
+ * define GATED -> CONT. GATED will be selected if both are selected.
+ * see
+ * snd_soc_runtime_get_dai_fmt()
+ */
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT 16
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_GATED (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CONT (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT)
+
/*
* DAI hardware signal polarity.
*
@@ -71,22 +98,52 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_IB_NF (3 << 8) /* invert BCLK + nor FRM */
#define SND_SOC_DAIFMT_IB_IF (4 << 8) /* invert BCLK + FRM */
+/* Describes the possible PCM format */
+#define SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT 32
+#define SND_SOC_POSSIBLE_DAIFMT_INV_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_NB_NF (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_NB_IF (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_IB_NF (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_IB_IF (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+
/*
- * DAI hardware clock masters.
+ * DAI hardware clock providers/consumers
*
* This is wrt the codec, the inverse is true for the interface
- * i.e. if the codec is clk and FRM master then the interface is
- * clk and frame secondary.
+ * i.e. if the codec is clk and FRM provider then the interface is
+ * clk and frame consumer.
*/
-#define SND_SOC_DAIFMT_CBM_CFM (1 << 12) /* codec clk & FRM master */
-#define SND_SOC_DAIFMT_CBS_CFM (2 << 12) /* codec clk secondary & FRM master */
-#define SND_SOC_DAIFMT_CBM_CFS (3 << 12) /* codec clk master & frame secondary */
-#define SND_SOC_DAIFMT_CBS_CFS (4 << 12) /* codec clk & FRM secondary */
-
-#define SND_SOC_DAIFMT_FORMAT_MASK 0x000f
-#define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0
-#define SND_SOC_DAIFMT_INV_MASK 0x0f00
-#define SND_SOC_DAIFMT_MASTER_MASK 0xf000
+#define SND_SOC_DAIFMT_CBP_CFP (1 << 12) /* codec clk provider & frame provider */
+#define SND_SOC_DAIFMT_CBC_CFP (2 << 12) /* codec clk consumer & frame provider */
+#define SND_SOC_DAIFMT_CBP_CFC (3 << 12) /* codec clk provider & frame consumer */
+#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame consumer */
+
+/* previous definitions kept for backwards-compatibility, do not use in new contributions */
+#define SND_SOC_DAIFMT_CBM_CFM SND_SOC_DAIFMT_CBP_CFP
+#define SND_SOC_DAIFMT_CBS_CFM SND_SOC_DAIFMT_CBC_CFP
+#define SND_SOC_DAIFMT_CBM_CFS SND_SOC_DAIFMT_CBP_CFC
+#define SND_SOC_DAIFMT_CBS_CFS SND_SOC_DAIFMT_CBC_CFC
+
+/* when passed to set_fmt directly indicate if the device is provider or consumer */
+#define SND_SOC_DAIFMT_BP_FP SND_SOC_DAIFMT_CBP_CFP
+#define SND_SOC_DAIFMT_BC_FP SND_SOC_DAIFMT_CBC_CFP
+#define SND_SOC_DAIFMT_BP_FC SND_SOC_DAIFMT_CBP_CFC
+#define SND_SOC_DAIFMT_BC_FC SND_SOC_DAIFMT_CBC_CFC
+
+/* Describes the possible PCM format */
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT 48
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFP (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFP (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFC (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFC (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+
+#define SND_SOC_DAIFMT_FORMAT_MASK 0x000f
+#define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0
+#define SND_SOC_DAIFMT_INV_MASK 0x0f00
+#define SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK 0xf000
+
+#define SND_SOC_DAIFMT_MASTER_MASK SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK
/*
* Master Clock Directions
@@ -123,6 +180,8 @@ int snd_soc_dai_set_pll(struct snd_soc_dai *dai,
int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio);
/* Digital Audio interface formatting */
+int snd_soc_dai_get_fmt_max_priority(struct snd_soc_pcm_runtime *rtd);
+u64 snd_soc_dai_get_fmt(struct snd_soc_dai *dai, int priority);
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
@@ -149,13 +208,12 @@ int snd_soc_dai_hw_params(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
void snd_soc_dai_hw_free(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream);
+ struct snd_pcm_substream *substream,
+ int rollback);
int snd_soc_dai_startup(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream);
void snd_soc_dai_shutdown(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream);
-snd_pcm_sframes_t snd_soc_dai_delay(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream);
+ struct snd_pcm_substream *substream, int rollback);
void snd_soc_dai_suspend(struct snd_soc_dai *dai);
void snd_soc_dai_resume(struct snd_soc_dai *dai);
int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
@@ -180,14 +238,18 @@ int snd_soc_pcm_dai_probe(struct snd_soc_pcm_runtime *rtd, int order);
int snd_soc_pcm_dai_remove(struct snd_soc_pcm_runtime *rtd, int order);
int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd);
int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream);
-int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd);
+int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ int rollback);
int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream,
int cmd);
+void snd_soc_pcm_dai_delay(struct snd_pcm_substream *substream,
+ snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay);
int snd_soc_dai_compr_startup(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream);
void snd_soc_dai_compr_shutdown(struct snd_soc_dai *dai,
- struct snd_compr_stream *cstream);
+ struct snd_compr_stream *cstream,
+ int rollback);
int snd_soc_dai_compr_trigger(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream, int cmd);
int snd_soc_dai_compr_set_params(struct snd_soc_dai *dai,
@@ -209,7 +271,18 @@ int snd_soc_dai_compr_get_metadata(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream,
struct snd_compr_metadata *metadata);
+const char *snd_soc_dai_name_get(struct snd_soc_dai *dai);
+
struct snd_soc_dai_ops {
+ /* DAI driver callbacks */
+ int (*probe)(struct snd_soc_dai *dai);
+ int (*remove)(struct snd_soc_dai *dai);
+ /* compress dai */
+ int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num);
+ /* Optional Callback used at pcm creation*/
+ int (*pcm_new)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai);
+
/*
* DAI clocking configuration, all optional.
* Called by soc_card drivers, normally in their hw_params.
@@ -239,9 +312,9 @@ struct snd_soc_dai_ops {
unsigned int *rx_num, unsigned int *rx_slot);
int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
- int (*set_sdw_stream)(struct snd_soc_dai *dai,
- void *stream, int direction);
- void *(*get_sdw_stream)(struct snd_soc_dai *dai, int direction);
+ int (*set_stream)(struct snd_soc_dai *dai,
+ void *stream, int direction);
+ void *(*get_stream)(struct snd_soc_dai *dai, int direction);
/*
* DAI digital mute - optional.
@@ -281,8 +354,23 @@ struct snd_soc_dai_ops {
snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
struct snd_soc_dai *);
+ /*
+ * Format list for auto selection.
+ * Format will be increased if priority format was
+ * not selected.
+ * see
+ * snd_soc_dai_get_fmt()
+ */
+ u64 *auto_selectable_formats;
+ int num_auto_selectable_formats;
+
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
+
/* bit field */
unsigned int no_capture_mute:1;
+ unsigned int mute_unmute_on_trigger:1;
};
struct snd_soc_cdai_ops {
@@ -325,15 +413,7 @@ struct snd_soc_dai_driver {
unsigned int id;
unsigned int base;
struct snd_soc_dobj dobj;
-
- /* DAI driver callbacks */
- int (*probe)(struct snd_soc_dai *dai);
- int (*remove)(struct snd_soc_dai *dai);
- /* compress dai */
- int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num);
- /* Optional Callback used at pcm creation*/
- int (*pcm_new)(struct snd_soc_pcm_runtime *rtd,
- struct snd_soc_dai *dai);
+ struct of_phandle_args *dai_args;
/* ops */
const struct snd_soc_dai_ops *ops;
@@ -342,13 +422,19 @@ struct snd_soc_dai_driver {
/* DAI capabilities */
struct snd_soc_pcm_stream capture;
struct snd_soc_pcm_stream playback;
- unsigned int symmetric_rates:1;
+ unsigned int symmetric_rate:1;
unsigned int symmetric_channels:1;
- unsigned int symmetric_samplebits:1;
+ unsigned int symmetric_sample_bits:1;
+};
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
+/* for Playback/Capture */
+struct snd_soc_dai_stream {
+ struct snd_soc_dapm_widget *widget;
+
+ unsigned int active; /* usage count */
+ unsigned int tdm_mask; /* CODEC TDM slot masks and params (for fixup) */
+
+ void *dma_data; /* DAI DMA data */
};
/*
@@ -365,14 +451,7 @@ struct snd_soc_dai {
struct snd_soc_dai_driver *driver;
/* DAI runtime info */
- unsigned int stream_active[SNDRV_PCM_STREAM_LAST + 1]; /* usage count */
-
- struct snd_soc_dapm_widget *playback_widget;
- struct snd_soc_dapm_widget *capture_widget;
-
- /* DAI DMA data */
- void *playback_dma_data;
- void *capture_dma_data;
+ struct snd_soc_dai_stream stream[SNDRV_PCM_STREAM_LAST + 1];
/* Symmetry data - only valid if symmetry is being enforced */
unsigned int rate;
@@ -382,12 +461,14 @@ struct snd_soc_dai {
/* parent platform/codec */
struct snd_soc_component *component;
- /* CODEC TDM slot masks and params (for fixup) */
- unsigned int tx_mask;
- unsigned int rx_mask;
-
struct list_head list;
+ /* function mark */
+ struct snd_pcm_substream *mark_startup;
+ struct snd_pcm_substream *mark_hw_params;
+ struct snd_pcm_substream *mark_trigger;
+ struct snd_compr_stream *mark_compr_startup;
+
/* bit field */
unsigned int probed:1;
};
@@ -399,36 +480,59 @@ snd_soc_dai_get_pcm_stream(const struct snd_soc_dai *dai, int stream)
&dai->driver->playback : &dai->driver->capture;
}
+#define snd_soc_dai_get_widget_playback(dai) snd_soc_dai_get_widget(dai, SNDRV_PCM_STREAM_PLAYBACK)
+#define snd_soc_dai_get_widget_capture(dai) snd_soc_dai_get_widget(dai, SNDRV_PCM_STREAM_CAPTURE)
static inline
-struct snd_soc_dapm_widget *snd_soc_dai_get_widget(
- struct snd_soc_dai *dai, int stream)
+struct snd_soc_dapm_widget *snd_soc_dai_get_widget(struct snd_soc_dai *dai, int stream)
{
- return (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- dai->playback_widget : dai->capture_widget;
+ return dai->stream[stream].widget;
}
-static inline void *snd_soc_dai_get_dma_data(const struct snd_soc_dai *dai,
- const struct snd_pcm_substream *ss)
+#define snd_soc_dai_set_widget_playback(dai, widget) snd_soc_dai_set_widget(dai, SNDRV_PCM_STREAM_PLAYBACK, widget)
+#define snd_soc_dai_set_widget_capture(dai, widget) snd_soc_dai_set_widget(dai, SNDRV_PCM_STREAM_CAPTURE, widget)
+static inline
+void snd_soc_dai_set_widget(struct snd_soc_dai *dai, int stream, struct snd_soc_dapm_widget *widget)
{
- return (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- dai->playback_dma_data : dai->capture_dma_data;
+ dai->stream[stream].widget = widget;
}
-static inline void snd_soc_dai_set_dma_data(struct snd_soc_dai *dai,
- const struct snd_pcm_substream *ss,
- void *data)
+#define snd_soc_dai_dma_data_get_playback(dai) snd_soc_dai_dma_data_get(dai, SNDRV_PCM_STREAM_PLAYBACK)
+#define snd_soc_dai_dma_data_get_capture(dai) snd_soc_dai_dma_data_get(dai, SNDRV_PCM_STREAM_CAPTURE)
+#define snd_soc_dai_get_dma_data(dai, ss) snd_soc_dai_dma_data_get(dai, ss->stream)
+static inline void *snd_soc_dai_dma_data_get(const struct snd_soc_dai *dai, int stream)
{
- if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK)
- dai->playback_dma_data = data;
- else
- dai->capture_dma_data = data;
+ return dai->stream[stream].dma_data;
}
-static inline void snd_soc_dai_init_dma_data(struct snd_soc_dai *dai,
- void *playback, void *capture)
+#define snd_soc_dai_dma_data_set_playback(dai, data) snd_soc_dai_dma_data_set(dai, SNDRV_PCM_STREAM_PLAYBACK, data)
+#define snd_soc_dai_dma_data_set_capture(dai, data) snd_soc_dai_dma_data_set(dai, SNDRV_PCM_STREAM_CAPTURE, data)
+#define snd_soc_dai_set_dma_data(dai, ss, data) snd_soc_dai_dma_data_set(dai, ss->stream, data)
+static inline void snd_soc_dai_dma_data_set(struct snd_soc_dai *dai, int stream, void *data)
{
- dai->playback_dma_data = playback;
- dai->capture_dma_data = capture;
+ dai->stream[stream].dma_data = data;
+}
+
+static inline void snd_soc_dai_init_dma_data(struct snd_soc_dai *dai, void *playback, void *capture)
+{
+ snd_soc_dai_dma_data_set_playback(dai, playback);
+ snd_soc_dai_dma_data_set_capture(dai, capture);
+}
+
+static inline unsigned int snd_soc_dai_tdm_mask_get(struct snd_soc_dai *dai, int stream)
+{
+ return dai->stream[stream].tdm_mask;
+}
+
+static inline void snd_soc_dai_tdm_mask_set(struct snd_soc_dai *dai, int stream,
+ unsigned int tdm_mask)
+{
+ dai->stream[stream].tdm_mask = tdm_mask;
+}
+
+static inline unsigned int snd_soc_dai_stream_active(struct snd_soc_dai *dai, int stream)
+{
+ /* see snd_soc_dai_action() for setup */
+ return dai->stream[stream].active;
}
static inline void snd_soc_dai_set_drvdata(struct snd_soc_dai *dai,
@@ -443,49 +547,44 @@ static inline void *snd_soc_dai_get_drvdata(struct snd_soc_dai *dai)
}
/**
- * snd_soc_dai_set_sdw_stream() - Configures a DAI for SDW stream operation
+ * snd_soc_dai_set_stream() - Configures a DAI for stream operation
* @dai: DAI
- * @stream: STREAM
+ * @stream: STREAM (opaque structure depending on DAI type)
* @direction: Stream direction(Playback/Capture)
- * SoundWire subsystem doesn't have a notion of direction and we reuse
+ * Some subsystems, such as SoundWire, don't have a notion of direction and we reuse
* the ASoC stream direction to configure sink/source ports.
* Playback maps to source ports and Capture for sink ports.
*
* This should be invoked with NULL to clear the stream set previously.
* Returns 0 on success, a negative error code otherwise.
*/
-static inline int snd_soc_dai_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, int direction)
+static inline int snd_soc_dai_set_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
{
- if (dai->driver->ops->set_sdw_stream)
- return dai->driver->ops->set_sdw_stream(dai, stream, direction);
+ if (dai->driver->ops->set_stream)
+ return dai->driver->ops->set_stream(dai, stream, direction);
else
return -ENOTSUPP;
}
/**
- * snd_soc_dai_get_sdw_stream() - Retrieves SDW stream from DAI
+ * snd_soc_dai_get_stream() - Retrieves stream from DAI
* @dai: DAI
* @direction: Stream direction(Playback/Capture)
*
* This routine only retrieves that was previously configured
- * with snd_soc_dai_get_sdw_stream()
+ * with snd_soc_dai_get_stream()
*
- * Returns pointer to stream or -ENOTSUPP if callback is not supported;
+ * Returns pointer to stream or an ERR_PTR value, e.g.
+ * ERR_PTR(-ENOTSUPP) if callback is not supported;
*/
-static inline void *snd_soc_dai_get_sdw_stream(struct snd_soc_dai *dai,
- int direction)
+static inline void *snd_soc_dai_get_stream(struct snd_soc_dai *dai,
+ int direction)
{
- if (dai->driver->ops->get_sdw_stream)
- return dai->driver->ops->get_sdw_stream(dai, direction);
+ if (dai->driver->ops->get_stream)
+ return dai->driver->ops->get_stream(dai, direction);
else
return ERR_PTR(-ENOTSUPP);
}
-static inline unsigned int
-snd_soc_dai_stream_active(struct snd_soc_dai *dai, int stream)
-{
- return dai->stream_active[stream];
-}
-
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index c3039e97929a..667ecd4daa68 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -16,6 +16,7 @@
#include <sound/asoc.h>
struct device;
+struct snd_pcm_substream;
struct snd_soc_pcm_runtime;
struct soc_enum;
@@ -41,36 +42,45 @@ struct soc_enum;
/* codec domain */
#define SND_SOC_DAPM_VMID(wname) \
-{ .id = snd_soc_dapm_vmid, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_vmid, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0}
/* platform domain */
#define SND_SOC_DAPM_SIGGEN(wname) \
-{ .id = snd_soc_dapm_siggen, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_siggen, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_SINK(wname) \
-{ .id = snd_soc_dapm_sink, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_sink, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_INPUT(wname) \
-{ .id = snd_soc_dapm_input, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_input, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_OUTPUT(wname) \
-{ .id = snd_soc_dapm_output, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_output, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_MIC(wname, wevent) \
-{ .id = snd_soc_dapm_mic, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mic, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
#define SND_SOC_DAPM_HP(wname, wevent) \
-{ .id = snd_soc_dapm_hp, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_hp, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_SPK(wname, wevent) \
-{ .id = snd_soc_dapm_spk, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_spk, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_LINE(wname, wevent) \
-{ .id = snd_soc_dapm_line, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_line, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
@@ -81,93 +91,110 @@ struct soc_enum;
/* path domain */
#define SND_SOC_DAPM_PGA(wname, wreg, wshift, winvert,\
wcontrols, wncontrols) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_OUT_DRV(wname, wreg, wshift, winvert,\
wcontrols, wncontrols) \
-{ .id = snd_soc_dapm_out_drv, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_out_drv, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_MIXER(wname, wreg, wshift, winvert, \
wcontrols, wncontrols)\
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_MIXER_NAMED_CTL(wname, wreg, wshift, winvert, \
wcontrols, wncontrols)\
-{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
/* DEPRECATED: use SND_SOC_DAPM_SUPPLY */
#define SND_SOC_DAPM_MICBIAS(wname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_micbias, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_micbias, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = NULL, .num_kcontrols = 0}
#define SND_SOC_DAPM_SWITCH(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_switch, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_switch, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_mux, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_DEMUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_demux, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_demux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_ARRAY(wname, wreg, wshift, winvert,\
wcontrols) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
#define SOC_MIXER_ARRAY(wname, wreg, wshift, winvert, \
wcontrols)\
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
#define SOC_MIXER_NAMED_CTL_ARRAY(wname, wreg, wshift, winvert, \
wcontrols)\
-{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
/* path domain with event - event handler must return 0 for success */
#define SND_SOC_DAPM_PGA_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_OUT_DRV_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_out_drv, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_out_drv, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MIXER_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MIXER_NAMED_CTL_E(wname, wreg, wshift, winvert, \
wcontrols, wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, \
.num_kcontrols = wncontrols, .event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_SWITCH_E(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_switch, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_switch, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_mux, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
@@ -175,101 +202,121 @@ struct soc_enum;
/* additional sequencing control within an event type */
#define SND_SOC_DAPM_PGA_S(wname, wsubseq, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags, \
.subseq = wsubseq}
#define SND_SOC_DAPM_SUPPLY_S(wname, wsubseq, wreg, wshift, winvert, wevent, \
wflags) \
-{ .id = snd_soc_dapm_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_supply, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags, .subseq = wsubseq}
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
#define SOC_MIXER_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
#define SOC_MIXER_NAMED_CTL_E_ARRAY(wname, wreg, wshift, winvert, \
wcontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
/* events that are pre and post DAPM */
#define SND_SOC_DAPM_PRE(wname, wevent) \
-{ .id = snd_soc_dapm_pre, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pre, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_POST(wname, wevent) \
-{ .id = snd_soc_dapm_post, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_post, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
/* stream domain */
#define SND_SOC_DAPM_AIF_IN(wname, stname, wchan, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_AIF_IN_E(wname, stname, wchan, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
#define SND_SOC_DAPM_AIF_OUT(wname, stname, wchan, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_AIF_OUT_E(wname, stname, wchan, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert) }
#define SND_SOC_DAPM_DAC_E(wname, stname, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_ADC_E(wname, stname, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_CLOCK_SUPPLY(wname) \
-{ .id = snd_soc_dapm_clock_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_clock_supply, .name = wname, \
.reg = SND_SOC_NOPM, .event = dapm_clock_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }
/* generic widgets */
#define SND_SOC_DAPM_REG(wid, wname, wreg, wshift, wmask, won_val, woff_val) \
-{ .id = wid, .name = wname, .kcontrol_news = NULL, .num_kcontrols = 0, \
+(struct snd_soc_dapm_widget) { \
+ .id = wid, .name = wname, .kcontrol_news = NULL, .num_kcontrols = 0, \
.reg = wreg, .shift = wshift, .mask = wmask, \
.on_val = won_val, .off_val = woff_val, }
#define SND_SOC_DAPM_SUPPLY(wname, wreg, wshift, winvert, wevent, wflags) \
-{ .id = snd_soc_dapm_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_supply, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_REGULATOR_SUPPLY(wname, wdelay, wflags) \
-{ .id = snd_soc_dapm_regulator_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_regulator_supply, .name = wname, \
.reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
.on_val = wflags}
#define SND_SOC_DAPM_PINCTRL(wname, active, sleep) \
-{ .id = snd_soc_dapm_pinctrl, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pinctrl, .name = wname, \
.priv = (&(struct snd_soc_dapm_pinctrl_priv) \
{ .active_state = active, .sleep_state = sleep,}), \
.reg = SND_SOC_NOPM, .event = dapm_pinctrl_event, \
@@ -341,31 +388,27 @@ struct soc_enum;
#define SND_SOC_DAPM_STREAM_STOP 0x2
#define SND_SOC_DAPM_STREAM_SUSPEND 0x4
#define SND_SOC_DAPM_STREAM_RESUME 0x8
-#define SND_SOC_DAPM_STREAM_PAUSE_PUSH 0x10
+#define SND_SOC_DAPM_STREAM_PAUSE_PUSH 0x10
#define SND_SOC_DAPM_STREAM_PAUSE_RELEASE 0x20
/* dapm event types */
-#define SND_SOC_DAPM_PRE_PMU 0x1 /* before widget power up */
-#define SND_SOC_DAPM_POST_PMU 0x2 /* after widget power up */
-#define SND_SOC_DAPM_PRE_PMD 0x4 /* before widget power down */
-#define SND_SOC_DAPM_POST_PMD 0x8 /* after widget power down */
-#define SND_SOC_DAPM_PRE_REG 0x10 /* before audio path setup */
-#define SND_SOC_DAPM_POST_REG 0x20 /* after audio path setup */
-#define SND_SOC_DAPM_WILL_PMU 0x40 /* called at start of sequence */
-#define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */
-#define SND_SOC_DAPM_PRE_POST_PMD \
- (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
-#define SND_SOC_DAPM_PRE_POST_PMU \
- (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
+#define SND_SOC_DAPM_PRE_PMU 0x1 /* before widget power up */
+#define SND_SOC_DAPM_POST_PMU 0x2 /* after widget power up */
+#define SND_SOC_DAPM_PRE_PMD 0x4 /* before widget power down */
+#define SND_SOC_DAPM_POST_PMD 0x8 /* after widget power down */
+#define SND_SOC_DAPM_PRE_REG 0x10 /* before audio path setup */
+#define SND_SOC_DAPM_POST_REG 0x20 /* after audio path setup */
+#define SND_SOC_DAPM_WILL_PMU 0x40 /* called at start of sequence */
+#define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */
+#define SND_SOC_DAPM_PRE_POST_PMD (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
+#define SND_SOC_DAPM_PRE_POST_PMU (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
/* convenience event type detection */
-#define SND_SOC_DAPM_EVENT_ON(e) \
- (e & (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU))
-#define SND_SOC_DAPM_EVENT_OFF(e) \
- (e & (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD))
+#define SND_SOC_DAPM_EVENT_ON(e) (e & (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU))
+#define SND_SOC_DAPM_EVENT_OFF(e) (e & (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD))
/* regulator widget flags */
-#define SND_SOC_DAPM_REGULATOR_BYPASS 0x1 /* bypass when disabled */
+#define SND_SOC_DAPM_REGULATOR_BYPASS 0x1 /* bypass when disabled */
struct snd_soc_dapm_widget;
enum snd_soc_dapm_type;
@@ -396,18 +439,13 @@ enum snd_soc_bias_level {
SND_SOC_BIAS_ON = 3,
};
-int dapm_regulator_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event);
-int dapm_clock_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event);
-int dapm_pinctrl_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event);
+int dapm_regulator_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
+int dapm_clock_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
+int dapm_pinctrl_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
/* dapm controls */
-int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
+int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
@@ -419,29 +457,25 @@ int snd_soc_dapm_get_pin_switch(struct snd_kcontrol *kcontrol,
int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uncontrol);
int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_widget *widget,
- int num);
-struct snd_soc_dapm_widget *snd_soc_dapm_new_control(
- struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget, int num);
+struct snd_soc_dapm_widget *snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_widget *widget);
-struct snd_soc_dapm_widget *snd_soc_dapm_new_control_unlocked(
- struct snd_soc_dapm_context *dapm,
+struct snd_soc_dapm_widget *snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_widget *widget);
-int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
- struct snd_soc_dai *dai);
+int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, struct snd_soc_dai *dai);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai);
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
void snd_soc_dapm_init(struct snd_soc_dapm_context *dapm,
- struct snd_soc_card *card,
- struct snd_soc_component *component);
+ struct snd_soc_card *card, struct snd_soc_component *component);
int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
@@ -449,49 +483,36 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
-void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm);
/* dapm events */
-void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
- int event);
+void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, int event);
void snd_soc_dapm_stream_stop(struct snd_soc_pcm_runtime *rtd, int stream);
void snd_soc_dapm_shutdown(struct snd_soc_card *card);
/* external DAPM widget events */
int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
- struct snd_kcontrol *kcontrol, int connect,
- struct snd_soc_dapm_update *update);
+ struct snd_kcontrol *kcontrol, int connect, struct snd_soc_dapm_update *update);
int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e,
struct snd_soc_dapm_update *update);
/* dapm sys fs - used by the core */
extern struct attribute *soc_dapm_dev_attrs[];
-void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
- struct dentry *parent);
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm, struct dentry *parent);
/* dapm audio pin control and status */
-int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm,
- const char *pin);
-int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
- const char *pin);
-int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
- const char *pin);
-int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm,
- const char *pin);
+int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm,
- const char *pin);
-int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
- const char *pin);
+int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm, const char *pin);
int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm);
int snd_soc_dapm_sync_unlocked(struct snd_soc_dapm_context *dapm);
-int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
- const char *pin);
-int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
- const char *pin);
-int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
- const char *pin);
+int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, const char *pin);
unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
/* Mostly internal - should not normally be used */
@@ -500,40 +521,35 @@ void dapm_mark_endpoints_dirty(struct snd_soc_card *card);
/* dapm path query */
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list,
- bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
- enum snd_soc_dapm_direction));
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *, enum snd_soc_dapm_direction));
void snd_soc_dapm_dai_free_widgets(struct snd_soc_dapm_widget_list **list);
-struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
- struct snd_kcontrol *kcontrol);
-
-struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget(
- struct snd_kcontrol *kcontrol);
+struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(struct snd_kcontrol *kcontrol);
+struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget(struct snd_kcontrol *kcontrol);
-int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm,
- enum snd_soc_bias_level level);
+int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level);
/* dapm widget types */
enum snd_soc_dapm_type {
snd_soc_dapm_input = 0, /* input pin */
snd_soc_dapm_output, /* output pin */
- snd_soc_dapm_mux, /* selects 1 analog signal from many inputs */
- snd_soc_dapm_demux, /* connects the input to one of multiple outputs */
- snd_soc_dapm_mixer, /* mixes several analog signals together */
- snd_soc_dapm_mixer_named_ctl, /* mixer with named controls */
- snd_soc_dapm_pga, /* programmable gain/attenuation (volume) */
- snd_soc_dapm_out_drv, /* output driver */
- snd_soc_dapm_adc, /* analog to digital converter */
- snd_soc_dapm_dac, /* digital to analog converter */
+ snd_soc_dapm_mux, /* selects 1 analog signal from many inputs */
+ snd_soc_dapm_demux, /* connects the input to one of multiple outputs */
+ snd_soc_dapm_mixer, /* mixes several analog signals together */
+ snd_soc_dapm_mixer_named_ctl, /* mixer with named controls */
+ snd_soc_dapm_pga, /* programmable gain/attenuation (volume) */
+ snd_soc_dapm_out_drv, /* output driver */
+ snd_soc_dapm_adc, /* analog to digital converter */
+ snd_soc_dapm_dac, /* digital to analog converter */
snd_soc_dapm_micbias, /* microphone bias (power) - DEPRECATED: use snd_soc_dapm_supply */
- snd_soc_dapm_mic, /* microphone */
- snd_soc_dapm_hp, /* headphones */
- snd_soc_dapm_spk, /* speaker */
- snd_soc_dapm_line, /* line input/output */
+ snd_soc_dapm_mic, /* microphone */
+ snd_soc_dapm_hp, /* headphones */
+ snd_soc_dapm_spk, /* speaker */
+ snd_soc_dapm_line, /* line input/output */
snd_soc_dapm_switch, /* analog switch */
- snd_soc_dapm_vmid, /* codec bias/vmid - to minimise pops */
- snd_soc_dapm_pre, /* machine specific pre widget - exec first */
- snd_soc_dapm_post, /* machine specific post widget - exec last */
+ snd_soc_dapm_vmid, /* codec bias/vmid - to minimise pops */
+ snd_soc_dapm_pre, /* machine specific pre widget - exec first */
+ snd_soc_dapm_post, /* machine specific post widget - exec last */
snd_soc_dapm_supply, /* power/clock supply */
snd_soc_dapm_pinctrl, /* pinctrl */
snd_soc_dapm_regulator_supply, /* external regulator */
@@ -558,11 +574,6 @@ enum snd_soc_dapm_type {
SND_SOC_DAPM_TYPE_COUNT
};
-enum snd_soc_dapm_subclass {
- SND_SOC_DAPM_CLASS_INIT = 0,
- SND_SOC_DAPM_CLASS_RUNTIME = 1,
-};
-
/*
* DAPM audio route definition.
*
@@ -599,9 +610,9 @@ struct snd_soc_dapm_path {
};
/* status */
- u32 connect:1; /* source and sink widgets are connected */
- u32 walking:1; /* path is in the process of being walked */
- u32 weak:1; /* path ignored for power management */
+ u32 connect:1; /* source and sink widgets are connected */
+ u32 walking:1; /* path is in the process of being walked */
+ u32 weak:1; /* path ignored for power management */
u32 is_supply:1; /* At least one of the connected widgets is a supply */
int (*connected)(struct snd_soc_dapm_widget *source,
@@ -615,8 +626,8 @@ struct snd_soc_dapm_path {
/* dapm widget */
struct snd_soc_dapm_widget {
enum snd_soc_dapm_type id;
- const char *name; /* widget name */
- const char *sname; /* stream name */
+ const char *name; /* widget name */
+ const char *sname; /* stream name */
struct list_head list;
struct snd_soc_dapm_context *dapm;
@@ -635,11 +646,12 @@ struct snd_soc_dapm_widget {
unsigned char connected:1; /* connected codec pin */
unsigned char new:1; /* cnew complete */
unsigned char force:1; /* force state */
- unsigned char ignore_suspend:1; /* kept enabled over suspend */
+ unsigned char ignore_suspend:1; /* kept enabled over suspend */
unsigned char new_power:1; /* power from this run */
unsigned char power_checked:1; /* power checked this run */
unsigned char is_supply:1; /* Widget is a supply type widget */
unsigned char is_ep:2; /* Widget is a endpoint type widget */
+ unsigned char no_wname_in_kcontrol_name:1; /* No widget name prefix in kcontrol name */
int subseq; /* sort within widget type */
int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -679,27 +691,24 @@ struct snd_soc_dapm_update {
bool has_second_set;
};
-struct snd_soc_dapm_wcache {
- struct snd_soc_dapm_widget *widget;
-};
-
/* DAPM context */
struct snd_soc_dapm_context {
enum snd_soc_bias_level bias_level;
- unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
- /* Go to BIAS_OFF in suspend if the DAPM context is idle */
- unsigned int suspend_bias_off:1;
- struct device *dev; /* from parent - for debug */
- struct snd_soc_component *component; /* parent component */
- struct snd_soc_card *card; /* parent card */
+ /* bit field */
+ unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
+ unsigned int suspend_bias_off:1; /* Use BIAS_OFF in suspend if the DAPM is idle */
+
+ struct device *dev; /* from parent - for debug */
+ struct snd_soc_component *component; /* parent component */
+ struct snd_soc_card *card; /* parent card */
/* used during DAPM updates */
enum snd_soc_bias_level target_bias_level;
struct list_head list;
- struct snd_soc_dapm_wcache path_sink_cache;
- struct snd_soc_dapm_wcache path_source_cache;
+ struct snd_soc_dapm_widget *wcache_sink;
+ struct snd_soc_dapm_widget *wcache_source;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_dapm;
@@ -709,7 +718,7 @@ struct snd_soc_dapm_context {
/* A list of widgets associated with an object, typically a snd_kcontrol */
struct snd_soc_dapm_widget_list {
int num_widgets;
- struct snd_soc_dapm_widget *widgets[];
+ struct snd_soc_dapm_widget *widgets[] __counted_by(num_widgets);
};
#define for_each_dapm_widgets(list, i, widget) \
@@ -766,11 +775,11 @@ enum snd_soc_dapm_direction {
#define SND_SOC_DAPM_DIR_TO_EP(x) BIT(x)
-#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
-#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
+#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
+#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
/**
- * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths in the
+ * snd_soc_dapm_widget_for_each_path - Iterates over all paths in the
* specified direction of a widget
* @w: The widget
* @dir: Whether to iterate over the paths where the specified widget is the
@@ -781,7 +790,7 @@ enum snd_soc_dapm_direction {
list_for_each_entry(p, &w->edges[dir], list_node[dir])
/**
- * snd_soc_dapm_widget_for_each_sink_path_safe - Iterates over all paths in the
+ * snd_soc_dapm_widget_for_each_path_safe - Iterates over all paths in the
* specified direction of a widget
* @w: The widget
* @dir: Whether to iterate over the paths where the specified widget is the
@@ -789,7 +798,7 @@ enum snd_soc_dapm_direction {
* @p: The path iterator variable
* @next_p: Temporary storage for the next path
*
- * This function works like snd_soc_dapm_widget_for_each_sink_path, expect that
+ * This function works like snd_soc_dapm_widget_for_each_path, expect that
* it is safe to remove the current path from the list while iterating
*/
#define snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) \
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 0f6c50b17bba..ebd24753dd00 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -78,8 +78,6 @@ struct snd_soc_dpcm {
struct list_head list_be;
struct list_head list_fe;
- /* hw params for this link - may be different for each link */
- struct snd_pcm_hw_params hw_params;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_state;
#endif
@@ -93,7 +91,6 @@ struct snd_soc_dpcm_runtime {
struct list_head fe_clients;
int users;
- struct snd_pcm_runtime *runtime;
struct snd_pcm_hw_params hw_params;
/* state and update */
@@ -101,6 +98,10 @@ struct snd_soc_dpcm_runtime {
enum snd_soc_dpcm_state state;
int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
+
+ int be_start; /* refcount protected by BE stream pcm lock */
+ int be_pause; /* refcount protected by BE stream pcm lock */
+ bool fe_pause; /* used to track STOP after PAUSE */
};
#define for_each_dpcm_fe(be, stream, _dpcm) \
@@ -121,6 +122,10 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream);
+/* can this BE perform prepare */
+int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be, int stream);
+
/* is the current PCM operation for this FE ? */
int snd_soc_dpcm_fe_can_update(struct snd_soc_pcm_runtime *fe, int stream);
@@ -149,14 +154,23 @@ void dpcm_path_put(struct snd_soc_dapm_widget_list **list);
int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list, int new);
int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream);
-int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_be_dai_stop(struct snd_soc_pcm_runtime *fe, int stream,
+ int do_hw_free, struct snd_soc_dpcm *last);
void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream);
void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream);
-int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream);
int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int tream);
int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd);
int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream);
int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
int event);
+bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget, enum snd_soc_dapm_direction dir);
+int widget_in_list(struct snd_soc_dapm_widget_list *list,
+ struct snd_soc_dapm_widget *widget);
+
+#define dpcm_be_dai_startup_rollback(fe, stream, last) \
+ dpcm_be_dai_stop(fe, stream, 0, last)
+#define dpcm_be_dai_startup_unwind(fe, stream) dpcm_be_dai_stop(fe, stream, 0, NULL)
+#define dpcm_be_dai_shutdown(fe, stream) dpcm_be_dai_stop(fe, stream, 1, NULL)
#endif
diff --git a/include/sound/soc-jack.h b/include/sound/soc-jack.h
new file mode 100644
index 000000000000..a0abb1ee5110
--- /dev/null
+++ b/include/sound/soc-jack.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * soc-jack.h
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ */
+#ifndef __SOC_JACK_H
+#define __SOC_JACK_H
+
+/**
+ * struct snd_soc_jack_pin - Describes a pin to update based on jack detection
+ *
+ * @pin: name of the pin to update
+ * @mask: bits to check for in reported jack status
+ * @invert: if non-zero then pin is enabled when status is not reported
+ * @list: internal list entry
+ */
+struct snd_soc_jack_pin {
+ struct list_head list;
+ const char *pin;
+ int mask;
+ bool invert;
+};
+
+/**
+ * struct snd_soc_jack_zone - Describes voltage zones of jack detection
+ *
+ * @min_mv: start voltage in mv
+ * @max_mv: end voltage in mv
+ * @jack_type: type of jack that is expected for this voltage
+ * @debounce_time: debounce_time for jack, codec driver should wait for this
+ * duration before reading the adc for voltages
+ * @list: internal list entry
+ */
+struct snd_soc_jack_zone {
+ unsigned int min_mv;
+ unsigned int max_mv;
+ unsigned int jack_type;
+ unsigned int debounce_time;
+ struct list_head list;
+};
+
+/**
+ * struct snd_soc_jack_gpio - Describes a gpio pin for jack detection
+ *
+ * @gpio: legacy gpio number
+ * @idx: gpio descriptor index within the function of the GPIO
+ * consumer device
+ * @gpiod_dev: GPIO consumer device
+ * @name: gpio name. Also as connection ID for the GPIO consumer
+ * device function name lookup
+ * @report: value to report when jack detected
+ * @invert: report presence in low state
+ * @debounce_time: debounce time in ms
+ * @wake: enable as wake source
+ * @jack_status_check: callback function which overrides the detection
+ * to provide more complex checks (eg, reading an
+ * ADC).
+ */
+struct snd_soc_jack_gpio {
+ unsigned int gpio;
+ unsigned int idx;
+ struct device *gpiod_dev;
+ const char *name;
+ int report;
+ int invert;
+ int debounce_time;
+ bool wake;
+
+ /* private: */
+ struct snd_soc_jack *jack;
+ struct delayed_work work;
+ struct notifier_block pm_notifier;
+ struct gpio_desc *desc;
+
+ void *data;
+ /* public: */
+ int (*jack_status_check)(void *data);
+};
+
+struct snd_soc_jack {
+ struct mutex mutex;
+ struct snd_jack *jack;
+ struct snd_soc_card *card;
+ struct list_head pins;
+ int status;
+ struct blocking_notifier_head notifier;
+ struct list_head jack_zones;
+};
+
+/* Jack reporting */
+void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask);
+int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_pin *pins);
+void snd_soc_jack_notifier_register(struct snd_soc_jack *jack,
+ struct notifier_block *nb);
+void snd_soc_jack_notifier_unregister(struct snd_soc_jack *jack,
+ struct notifier_block *nb);
+int snd_soc_jack_add_zones(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_zone *zones);
+int snd_soc_jack_get_type(struct snd_soc_jack *jack, int micbias_voltage);
+#ifdef CONFIG_GPIOLIB
+int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_gpio *gpios);
+int snd_soc_jack_add_gpiods(struct device *gpiod_dev,
+ struct snd_soc_jack *jack,
+ int count, struct snd_soc_jack_gpio *gpios);
+void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_gpio *gpios);
+#else
+static inline int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_gpio *gpios)
+{
+ return 0;
+}
+
+static inline int snd_soc_jack_add_gpiods(struct device *gpiod_dev,
+ struct snd_soc_jack *jack,
+ int count,
+ struct snd_soc_jack_gpio *gpios)
+{
+ return 0;
+}
+
+static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
+ struct snd_soc_jack_gpio *gpios)
+{
+}
+#endif
+
+#endif /* __SOC_JACK_H */
diff --git a/include/sound/soc-link.h b/include/sound/soc-link.h
index 337ac5666757..9314cde1756b 100644
--- a/include/sound/soc-link.h
+++ b/include/sound/soc-link.h
@@ -14,15 +14,19 @@ int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params);
int snd_soc_link_startup(struct snd_pcm_substream *substream);
-void snd_soc_link_shutdown(struct snd_pcm_substream *substream);
+void snd_soc_link_shutdown(struct snd_pcm_substream *substream,
+ int rollback);
int snd_soc_link_prepare(struct snd_pcm_substream *substream);
int snd_soc_link_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-void snd_soc_link_hw_free(struct snd_pcm_substream *substream);
-int snd_soc_link_trigger(struct snd_pcm_substream *substream, int cmd);
+void snd_soc_link_hw_free(struct snd_pcm_substream *substream,
+ int rollback);
+int snd_soc_link_trigger(struct snd_pcm_substream *substream, int cmd,
+ int rollback);
int snd_soc_link_compr_startup(struct snd_compr_stream *cstream);
-void snd_soc_link_compr_shutdown(struct snd_compr_stream *cstream);
+void snd_soc_link_compr_shutdown(struct snd_compr_stream *cstream,
+ int rollback);
int snd_soc_link_compr_set_params(struct snd_compr_stream *cstream);
#endif /* __SOC_LINK_H */
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 5223896de26f..f055c6917f6c 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -31,9 +31,6 @@ struct snd_soc_dai_driver;
struct snd_soc_dai;
struct snd_soc_dapm_route;
-/* object scan be loaded and unloaded in groups with identfying indexes */
-#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */
-
/* dynamic object type */
enum snd_soc_dobj_type {
SND_SOC_DOBJ_NONE = 0, /* object is not dynamic */
@@ -57,7 +54,7 @@ struct snd_soc_dobj_control {
/* dynamic widget object */
struct snd_soc_dobj_widget {
- unsigned int kcontrol_type; /* kcontrol type: mixer, enum, bytes */
+ unsigned int *kcontrol_type; /* kcontrol type: mixer, enum, bytes */
};
/* generic dynamic object - all dynamic objects belong to this struct */
@@ -65,7 +62,7 @@ struct snd_soc_dobj {
enum snd_soc_dobj_type type;
unsigned int index; /* objects can belong in different groups */
struct list_head list;
- struct snd_soc_tplg_ops *ops;
+ int (*unload)(struct snd_soc_component *comp, struct snd_soc_dobj *dobj);
union {
struct snd_soc_dobj_control control;
struct snd_soc_dobj_widget widget;
@@ -154,7 +151,7 @@ struct snd_soc_tplg_ops {
struct snd_soc_tplg_hdr *);
/* completion - called at completion of firmware loading */
- void (*complete)(struct snd_soc_component *);
+ int (*complete)(struct snd_soc_component *comp);
/* manifest - optional to inform component of manifest */
int (*manifest)(struct snd_soc_component *, int index,
@@ -181,14 +178,8 @@ static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
/* Dynamic Object loading and removal for component drivers */
int snd_soc_tplg_component_load(struct snd_soc_component *comp,
- struct snd_soc_tplg_ops *ops, const struct firmware *fw,
- u32 index);
-int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index);
-
-/* Widget removal - widgets also removed wth component API */
-void snd_soc_tplg_widget_remove(struct snd_soc_dapm_widget *w);
-void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
- u32 index);
+ struct snd_soc_tplg_ops *ops, const struct firmware *fw);
+int snd_soc_tplg_component_remove(struct snd_soc_component *comp);
/* Binds event handlers to dynamic widgets */
int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
@@ -197,8 +188,7 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
#else
-static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
- u32 index)
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
{
return 0;
}
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 5e3919ffb00c..39613b406b1d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -10,6 +10,7 @@
#ifndef __LINUX_SND_SOC_H
#define __LINUX_SND_SOC_H
+#include <linux/args.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/types.h>
@@ -31,31 +32,31 @@
#define SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, xmax, xinvert, xautodisable) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
- .rshift = shift_right, .max = xmax, .platform_max = xmax, \
+ .rshift = shift_right, .max = xmax, \
.invert = xinvert, .autodisable = xautodisable})
#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, xinvert, xautodisable) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
- .rshift = shift_right, .min = xmin, .max = xmax, .platform_max = xmax, \
+ .rshift = shift_right, .min = xmin, .max = xmax, \
.sign_bit = xsign_bit, .invert = xinvert, .autodisable = xautodisable})
#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \
SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable)
#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .max = xmax, .platform_max = xmax, .invert = xinvert})
+ {.reg = xreg, .max = xmax, .invert = xinvert})
#define SOC_DOUBLE_R_VALUE(xlreg, xrreg, xshift, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .max = xmax, .platform_max = xmax, .invert = xinvert})
+ .max = xmax, .invert = xinvert})
#define SOC_DOUBLE_R_S_VALUE(xlreg, xrreg, xshift, xmin, xmax, xsign_bit, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .max = xmax, .min = xmin, .platform_max = xmax, .sign_bit = xsign_bit, \
+ .max = xmax, .min = xmin, .sign_bit = xsign_bit, \
.invert = xinvert})
#define SOC_DOUBLE_R_RANGE_VALUE(xlreg, xrreg, xshift, xmin, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .min = xmin, .max = xmax, .platform_max = xmax, .invert = xinvert})
+ .min = xmin, .max = xmax, .invert = xinvert})
#define SOC_SINGLE(xname, reg, shift, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
@@ -68,7 +69,7 @@
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
- .platform_max = xmax, .invert = xinvert} }
+ .invert = xinvert} }
#define SOC_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -99,7 +100,7 @@
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
- .platform_max = xmax, .invert = xinvert} }
+ .invert = xinvert} }
#define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
@@ -136,6 +137,18 @@
.put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
max, invert, 0) }
+#define SOC_DOUBLE_SX_TLV(xname, xreg, shift_left, shift_right, xmin, xmax, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw_sx, \
+ .get = snd_soc_get_volsw_sx, \
+ .put = snd_soc_put_volsw_sx, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .rreg = xreg, \
+ .shift = shift_left, .rshift = shift_right, \
+ .max = xmax, .min = xmin} }
#define SOC_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -176,6 +189,8 @@
.get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_R_S_VALUE(reg_left, reg_right, xshift, \
xmin, xmax, xsign_bit, xinvert) }
+#define SOC_SINGLE_S_TLV(xname, xreg, xshift, xmin, xmax, xsign_bit, xinvert, tlv_array) \
+ SOC_DOUBLE_R_S_TLV(xname, xreg, xreg, xshift, xmin, xmax, xsign_bit, xinvert, tlv_array)
#define SOC_SINGLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
@@ -185,7 +200,7 @@
.put = snd_soc_put_volsw, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, \
- .min = xmin, .max = xmax, .platform_max = xmax, \
+ .min = xmin, .max = xmax, \
.sign_bit = 7,} }
#define SOC_DOUBLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -259,7 +274,7 @@
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
- .platform_max = xmax, .invert = xinvert} }
+ .invert = xinvert} }
#define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -280,6 +295,23 @@
.get = xhandler_get, .put = xhandler_put, \
.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
xmax, xinvert) }
+#define SOC_DOUBLE_R_S_EXT_TLV(xname, reg_left, reg_right, xshift, xmin, xmax, \
+ xsign_bit, xinvert, xhandler_get, xhandler_put, \
+ tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = SOC_DOUBLE_R_S_VALUE(reg_left, reg_right, xshift, \
+ xmin, xmax, xsign_bit, xinvert) }
+#define SOC_SINGLE_S_EXT_TLV(xname, xreg, xshift, xmin, xmax, \
+ xsign_bit, xinvert, xhandler_get, xhandler_put, \
+ tlv_array) \
+ SOC_DOUBLE_R_S_EXT_TLV(xname, xreg, xreg, xshift, xmin, xmax, \
+ xsign_bit, xinvert, xhandler_get, xhandler_put, \
+ tlv_array)
#define SOC_SINGLE_BOOL_EXT(xname, xdata, xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_bool_ext, \
@@ -389,15 +421,13 @@ struct snd_soc_jack_pin;
struct snd_soc_jack_gpio;
-typedef int (*hw_write_t)(void *,const char* ,int);
-
enum snd_soc_pcm_subclass {
SND_SOC_PCM_CLASS_PCM = 0,
SND_SOC_PCM_CLASS_BE = 1,
};
int snd_soc_register_card(struct snd_soc_card *card);
-int snd_soc_unregister_card(struct snd_soc_card *card);
+void snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
#ifdef CONFIG_PM_SLEEP
int snd_soc_suspend(struct device *dev);
@@ -485,51 +515,13 @@ int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params);
int snd_soc_calc_bclk(int fs, int sample_size, int channels, int tdm_slots);
int snd_soc_params_to_bclk(struct snd_pcm_hw_params *parms);
+int snd_soc_tdm_params_to_bclk(struct snd_pcm_hw_params *params,
+ int tdm_width, int tdm_slots, int slot_multiple);
/* set runtime hw params */
int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream,
const struct snd_pcm_hardware *hw);
-/* Jack reporting */
-void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask);
-int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_pin *pins);
-void snd_soc_jack_notifier_register(struct snd_soc_jack *jack,
- struct notifier_block *nb);
-void snd_soc_jack_notifier_unregister(struct snd_soc_jack *jack,
- struct notifier_block *nb);
-int snd_soc_jack_add_zones(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_zone *zones);
-int snd_soc_jack_get_type(struct snd_soc_jack *jack, int micbias_voltage);
-#ifdef CONFIG_GPIOLIB
-int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_gpio *gpios);
-int snd_soc_jack_add_gpiods(struct device *gpiod_dev,
- struct snd_soc_jack *jack,
- int count, struct snd_soc_jack_gpio *gpios);
-void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_gpio *gpios);
-#else
-static inline int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_gpio *gpios)
-{
- return 0;
-}
-
-static inline int snd_soc_jack_add_gpiods(struct device *gpiod_dev,
- struct snd_soc_jack *jack,
- int count,
- struct snd_soc_jack_gpio *gpios)
-{
- return 0;
-}
-
-static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
- struct snd_soc_jack_gpio *gpios)
-{
-}
-#endif
-
struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
unsigned int id, unsigned int id_mask);
@@ -616,91 +608,19 @@ int snd_soc_get_strobe(struct snd_kcontrol *kcontrol,
int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
-/**
- * struct snd_soc_jack_pin - Describes a pin to update based on jack detection
- *
- * @pin: name of the pin to update
- * @mask: bits to check for in reported jack status
- * @invert: if non-zero then pin is enabled when status is not reported
- * @list: internal list entry
- */
-struct snd_soc_jack_pin {
- struct list_head list;
- const char *pin;
- int mask;
- bool invert;
-};
-
-/**
- * struct snd_soc_jack_zone - Describes voltage zones of jack detection
- *
- * @min_mv: start voltage in mv
- * @max_mv: end voltage in mv
- * @jack_type: type of jack that is expected for this voltage
- * @debounce_time: debounce_time for jack, codec driver should wait for this
- * duration before reading the adc for voltages
- * @list: internal list entry
- */
-struct snd_soc_jack_zone {
- unsigned int min_mv;
- unsigned int max_mv;
- unsigned int jack_type;
- unsigned int debounce_time;
- struct list_head list;
-};
-
-/**
- * struct snd_soc_jack_gpio - Describes a gpio pin for jack detection
- *
- * @gpio: legacy gpio number
- * @idx: gpio descriptor index within the function of the GPIO
- * consumer device
- * @gpiod_dev: GPIO consumer device
- * @name: gpio name. Also as connection ID for the GPIO consumer
- * device function name lookup
- * @report: value to report when jack detected
- * @invert: report presence in low state
- * @debounce_time: debounce time in ms
- * @wake: enable as wake source
- * @jack_status_check: callback function which overrides the detection
- * to provide more complex checks (eg, reading an
- * ADC).
- */
-struct snd_soc_jack_gpio {
- unsigned int gpio;
- unsigned int idx;
- struct device *gpiod_dev;
- const char *name;
- int report;
- int invert;
- int debounce_time;
- bool wake;
-
- /* private: */
- struct snd_soc_jack *jack;
- struct delayed_work work;
- struct notifier_block pm_notifier;
- struct gpio_desc *desc;
-
- void *data;
- /* public: */
- int (*jack_status_check)(void *data);
-};
+enum snd_soc_trigger_order {
+ /* start stop */
+ SND_SOC_TRIGGER_ORDER_DEFAULT = 0, /* Link->Component->DAI DAI->Component->Link */
+ SND_SOC_TRIGGER_ORDER_LDC, /* Link->DAI->Component Component->DAI->Link */
-struct snd_soc_jack {
- struct mutex mutex;
- struct snd_jack *jack;
- struct snd_soc_card *card;
- struct list_head pins;
- int status;
- struct blocking_notifier_head notifier;
- struct list_head jack_zones;
+ SND_SOC_TRIGGER_ORDER_MAX,
};
/* SoC PCM stream information */
struct snd_soc_pcm_stream {
const char *stream_name;
u64 formats; /* SNDRV_PCM_FMTBIT_* */
+ u32 subformats; /* for S32_LE format, SNDRV_PCM_SUBFMTBIT_* */
unsigned int rates; /* SNDRV_PCM_RATE_* */
unsigned int rate_min; /* min rate */
unsigned int rate_max; /* max rate */
@@ -723,7 +643,6 @@ struct snd_soc_compr_ops {
int (*startup)(struct snd_compr_stream *);
void (*shutdown)(struct snd_compr_stream *);
int (*set_params)(struct snd_compr_stream *);
- int (*trigger)(struct snd_compr_stream *);
};
struct snd_soc_component*
@@ -734,6 +653,49 @@ struct snd_soc_dai_link_component {
const char *name;
struct device_node *of_node;
const char *dai_name;
+ struct of_phandle_args *dai_args;
+};
+
+/*
+ * [dai_link->ch_maps Image sample]
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ * CPU1 <---> Codec1
+ * CPU2 <---> Codec2
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ * ch-map[1].cpu = 1 ch-map[1].codec = 1
+ * ch-map[2].cpu = 2 ch-map[2].codec = 2
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ * CPU1 <-+-> Codec1
+ * CPU2 <-/
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ * ch-map[1].cpu = 1 ch-map[1].codec = 1
+ * ch-map[2].cpu = 2 ch-map[2].codec = 1
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ * CPU1 <-+-> Codec1
+ * \-> Codec2
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ * ch-map[1].cpu = 1 ch-map[1].codec = 1
+ * ch-map[2].cpu = 1 ch-map[2].codec = 2
+ *
+ */
+struct snd_soc_dai_link_ch_map {
+ unsigned int cpu;
+ unsigned int codec;
+ unsigned int ch_mask;
};
struct snd_soc_dai_link {
@@ -764,6 +726,9 @@ struct snd_soc_dai_link {
struct snd_soc_dai_link_component *codecs;
unsigned int num_codecs;
+ /* num_ch_maps = max(num_cpu, num_codecs) */
+ struct snd_soc_dai_link_ch_map *ch_maps;
+
/*
* You MAY specify the link's platform/PCM/DMA driver, either by
* device name, or by DT/OF node, but not both. Some forms of link
@@ -774,8 +739,11 @@ struct snd_soc_dai_link {
int id; /* optional ID for machine driver link identification */
- const struct snd_soc_pcm_stream *params;
- unsigned int num_params;
+ /*
+ * for Codec2Codec
+ */
+ const struct snd_soc_pcm_stream *c2c_params;
+ unsigned int num_c2c_params;
unsigned int dai_fmt; /* format to set on init */
@@ -795,6 +763,15 @@ struct snd_soc_dai_link {
const struct snd_soc_ops *ops;
const struct snd_soc_compr_ops *compr_ops;
+ /*
+ * soc_pcm_trigger() start/stop sequence.
+ * see also
+ * snd_soc_component_driver
+ * soc_pcm_trigger()
+ */
+ enum snd_soc_trigger_order trigger_start;
+ enum snd_soc_trigger_order trigger_stop;
+
/* Mark this pcm with non atomic ops */
unsigned int nonatomic:1;
@@ -806,9 +783,9 @@ struct snd_soc_dai_link {
unsigned int ignore_suspend:1;
/* Symmetry requirements */
- unsigned int symmetric_rates:1;
+ unsigned int symmetric_rate:1;
unsigned int symmetric_channels:1;
- unsigned int symmetric_samplebits:1;
+ unsigned int symmetric_sample_bits:1;
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int no_pcm:1;
@@ -837,20 +814,48 @@ struct snd_soc_dai_link {
struct snd_soc_dobj dobj; /* For topology */
#endif
};
+
+static inline int snd_soc_link_num_ch_map(struct snd_soc_dai_link *link) {
+ return max(link->num_cpus, link->num_codecs);
+}
+
+static inline struct snd_soc_dai_link_component*
+snd_soc_link_to_cpu(struct snd_soc_dai_link *link, int n) {
+ return &(link)->cpus[n];
+}
+
+static inline struct snd_soc_dai_link_component*
+snd_soc_link_to_codec(struct snd_soc_dai_link *link, int n) {
+ return &(link)->codecs[n];
+}
+
+static inline struct snd_soc_dai_link_component*
+snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) {
+ return &(link)->platforms[n];
+}
+
#define for_each_link_codecs(link, i, codec) \
for ((i) = 0; \
- ((i) < link->num_codecs) && ((codec) = &link->codecs[i]); \
+ ((i) < link->num_codecs) && \
+ ((codec) = snd_soc_link_to_codec(link, i)); \
(i)++)
#define for_each_link_platforms(link, i, platform) \
for ((i) = 0; \
((i) < link->num_platforms) && \
- ((platform) = &link->platforms[i]); \
+ ((platform) = snd_soc_link_to_platform(link, i)); \
(i)++)
#define for_each_link_cpus(link, i, cpu) \
for ((i) = 0; \
- ((i) < link->num_cpus) && ((cpu) = &link->cpus[i]); \
+ ((i) < link->num_cpus) && \
+ ((cpu) = snd_soc_link_to_cpu(link, i)); \
+ (i)++)
+
+#define for_each_link_ch_maps(link, i, ch_map) \
+ for ((i) = 0; \
+ ((i) < snd_soc_link_num_ch_map(link) && \
+ ((ch_map) = link->ch_maps + i)); \
(i)++)
/*
@@ -916,12 +921,8 @@ struct snd_soc_dai_link {
.platforms = platform, \
.num_platforms = ARRAY_SIZE(platform)
-#define SND_SOC_DAILINK_REGx(_1, _2, _3, func, ...) func
#define SND_SOC_DAILINK_REG(...) \
- SND_SOC_DAILINK_REGx(__VA_ARGS__, \
- SND_SOC_DAILINK_REG3, \
- SND_SOC_DAILINK_REG2, \
- SND_SOC_DAILINK_REG1)(__VA_ARGS__)
+ CONCATENATE(SND_SOC_DAILINK_REG, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
#define SND_SOC_DAILINK_DEF(name, def...) \
static struct snd_soc_dai_link_component name[] = { def }
@@ -938,9 +939,10 @@ struct snd_soc_dai_link {
#define COMP_PLATFORM(_name) { .name = _name }
#define COMP_AUX(_name) { .name = _name }
#define COMP_CODEC_CONF(_name) { .name = _name }
-#define COMP_DUMMY() { .name = "snd-soc-dummy", .dai_name = "snd-soc-dummy-dai", }
+#define COMP_DUMMY() /* see snd_soc_fill_dummy_dai() */
extern struct snd_soc_dai_link_component null_dailink_component[0];
+extern struct snd_soc_dai_link_component snd_soc_dummy_dlc;
struct snd_soc_codec_conf {
@@ -977,6 +979,17 @@ struct snd_soc_card {
#ifdef CONFIG_DMI
char dmi_longname[80];
#endif /* CONFIG_DMI */
+
+#ifdef CONFIG_PCI
+ /*
+ * PCI does not define 0 as invalid, so pci_subsystem_set indicates
+ * whether a value has been written to these fields.
+ */
+ unsigned short pci_subsystem_vendor;
+ unsigned short pci_subsystem_device;
+ bool pci_subsystem_set;
+#endif /* CONFIG_PCI */
+
char topology_shortname[32];
struct device *dev;
@@ -990,10 +1003,9 @@ struct snd_soc_card {
struct mutex pcm_mutex;
enum snd_soc_pcm_subclass pcm_subclass;
- spinlock_t dpcm_lock;
-
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
+ void (*fixup_controls)(struct snd_soc_card *card);
int (*remove)(struct snd_soc_card *card);
/* the pre and post PM functions are used to do any PM work before and
@@ -1084,6 +1096,7 @@ struct snd_soc_card {
unsigned int fully_routed:1;
unsigned int disable_route_checks:1;
unsigned int probed:1;
+ unsigned int component_chaining:1;
void *drvdata;
};
@@ -1118,6 +1131,12 @@ struct snd_soc_card {
#define for_each_card_widgets_safe(card, w, _w) \
list_for_each_entry_safe(w, _w, &card->widgets, list)
+
+static inline int snd_soc_card_is_instantiated(struct snd_soc_card *card)
+{
+ return card && card->instantiated;
+}
+
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
struct device *dev;
@@ -1125,10 +1144,11 @@ struct snd_soc_pcm_runtime {
struct snd_soc_dai_link *dai_link;
struct snd_pcm_ops ops;
- unsigned int params_select; /* currently selected param for dai link */
+ unsigned int c2c_params_select; /* currently selected c2c_param for dai link */
/* Dynamic PCM BE runtime data */
- struct snd_soc_dpcm_runtime dpcm[2];
+ struct snd_soc_dpcm_runtime dpcm[SNDRV_PCM_STREAM_LAST + 1];
+ struct snd_soc_dapm_widget *c2c_widget[SNDRV_PCM_STREAM_LAST + 1];
long pmdown_time;
@@ -1140,15 +1160,10 @@ struct snd_soc_pcm_runtime {
* dais = cpu_dai + codec_dai
* see
* soc_new_pcm_runtime()
- * asoc_rtd_to_cpu()
- * asoc_rtd_to_codec()
+ * snd_soc_rtd_to_cpu()
+ * snd_soc_rtd_to_codec()
*/
struct snd_soc_dai **dais;
- unsigned int num_codecs;
- unsigned int num_cpus;
-
- struct snd_soc_dapm_widget *playback_widget;
- struct snd_soc_dapm_widget *capture_widget;
struct delayed_work delayed_work;
void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd);
@@ -1159,17 +1174,26 @@ struct snd_soc_pcm_runtime {
unsigned int num; /* 0-based and monotonic increasing */
struct list_head list; /* rtd list of the soc card */
+ /* function mark */
+ struct snd_pcm_substream *mark_startup;
+ struct snd_pcm_substream *mark_hw_params;
+ struct snd_pcm_substream *mark_trigger;
+ struct snd_compr_stream *mark_compr_startup;
+
/* bit field */
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
+ bool initialized;
+
int num_components;
struct snd_soc_component *components[]; /* CPU/Codec/Platform */
};
+
/* see soc_new_pcm_runtime() */
-#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
-#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->num_cpus]
-#define asoc_substream_to_rtd(substream) \
+#define snd_soc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
+#define snd_soc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus]
+#define snd_soc_substream_to_rtd(substream) \
(struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream)
#define for_each_rtd_components(rtd, i, component) \
@@ -1178,21 +1202,18 @@ struct snd_soc_pcm_runtime {
(i)++)
#define for_each_rtd_cpu_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < rtd->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \
+ ((i) < rtd->dai_link->num_cpus) && ((dai) = snd_soc_rtd_to_cpu(rtd, i)); \
(i)++)
-#define for_each_rtd_cpu_dais_rollback(rtd, i, dai) \
- for (; (--(i) >= 0) && ((dai) = asoc_rtd_to_cpu(rtd, i));)
#define for_each_rtd_codec_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < rtd->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \
+ ((i) < rtd->dai_link->num_codecs) && ((dai) = snd_soc_rtd_to_codec(rtd, i)); \
(i)++)
-#define for_each_rtd_codec_dais_rollback(rtd, i, dai) \
- for (; (--(i) >= 0) && ((dai) = asoc_rtd_to_codec(rtd, i));)
#define for_each_rtd_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < (rtd)->num_cpus + (rtd)->num_codecs) && \
+ ((i) < (rtd)->dai_link->num_cpus + (rtd)->dai_link->num_codecs) && \
((dai) = (rtd)->dais[i]); \
(i)++)
+#define for_each_rtd_ch_maps(rtd, i, ch_maps) for_each_link_ch_maps(rtd->dai_link, i, ch_maps)
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
@@ -1307,6 +1328,7 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
const char *propname);
int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
const char *propname);
+int snd_soc_of_parse_pin_switches(struct snd_soc_card *card, const char *prop);
int snd_soc_of_get_slot_mask(struct device_node *np,
const char *prop_name,
unsigned int *mask);
@@ -1331,25 +1353,56 @@ void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
-unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
- const char *prefix,
- struct device_node **bitclkmaster,
- struct device_node **framemaster);
+int snd_soc_of_parse_aux_devs(struct snd_soc_card *card, const char *propname);
+
+unsigned int snd_soc_daifmt_clock_provider_flipped(unsigned int dai_fmt);
+unsigned int snd_soc_daifmt_clock_provider_from_bitmap(unsigned int bit_frame);
+
+unsigned int snd_soc_daifmt_parse_format(struct device_node *np, const char *prefix);
+unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
+ const char *prefix,
+ struct device_node **bitclkmaster,
+ struct device_node **framemaster);
+#define snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix) \
+ snd_soc_daifmt_parse_clock_provider_raw(np, prefix, NULL, NULL)
+#define snd_soc_daifmt_parse_clock_provider_as_phandle \
+ snd_soc_daifmt_parse_clock_provider_raw
+#define snd_soc_daifmt_parse_clock_provider_as_flag(np, prefix) \
+ snd_soc_daifmt_clock_provider_from_bitmap( \
+ snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix))
+
+int snd_soc_get_stream_cpu(struct snd_soc_dai_link *dai_link, int stream);
+int snd_soc_get_dlc(const struct of_phandle_args *args,
+ struct snd_soc_dai_link_component *dlc);
+int snd_soc_of_get_dlc(struct device_node *of_node,
+ struct of_phandle_args *args,
+ struct snd_soc_dai_link_component *dlc,
+ int index);
int snd_soc_get_dai_id(struct device_node *ep);
-int snd_soc_get_dai_name(struct of_phandle_args *args,
+int snd_soc_get_dai_name(const struct of_phandle_args *args,
const char **dai_name);
int snd_soc_of_get_dai_name(struct device_node *of_node,
- const char **dai_name);
+ const char **dai_name, int index);
int snd_soc_of_get_dai_link_codecs(struct device *dev,
struct device_node *of_node,
struct snd_soc_dai_link *dai_link);
void snd_soc_of_put_dai_link_codecs(struct snd_soc_dai_link *dai_link);
-
-int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link);
+int snd_soc_of_get_dai_link_cpus(struct device *dev,
+ struct device_node *of_node,
+ struct snd_soc_dai_link *dai_link);
+void snd_soc_of_put_dai_link_cpus(struct snd_soc_dai_link *dai_link);
+
+int snd_soc_add_pcm_runtimes(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link,
+ int num_dai_link);
void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd);
+void snd_soc_dlc_use_cpu_as_platform(struct snd_soc_dai_link_component *platforms,
+ struct snd_soc_dai_link_component *cpus);
+struct of_phandle_args *snd_soc_copy_dai_args(struct device *dev,
+ const struct of_phandle_args *args);
+struct snd_soc_dai *snd_soc_get_dai_via_args(const struct of_phandle_args *dai_args);
struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv,
bool legacy_dai_naming);
@@ -1361,6 +1414,8 @@ void snd_soc_unregister_dai(struct snd_soc_dai *dai);
struct snd_soc_dai *snd_soc_find_dai(
const struct snd_soc_dai_link_component *dlc);
+struct snd_soc_dai *snd_soc_find_dai_with_mutex(
+ const struct snd_soc_dai_link_component *dlc);
#include <sound/soc-dai.h>
@@ -1377,13 +1432,17 @@ int snd_soc_fixup_dai_links_platform_name(struct snd_soc_card *card,
/* set platform name for each dailink */
for_each_card_prelinks(card, i, dai_link) {
- name = devm_kstrdup(card->dev, platform_name, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
+ /* only single platform is supported for now */
+ if (dai_link->num_platforms != 1)
+ return -EINVAL;
if (!dai_link->platforms)
return -EINVAL;
+ name = devm_kstrdup(card->dev, platform_name, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
/* only single platform is supported for now */
dai_link->platforms->name = name;
}
@@ -1397,18 +1456,114 @@ extern struct dentry *snd_soc_debugfs_root;
extern const struct dev_pm_ops snd_soc_pm_ops;
-/* Helper functions */
-static inline void snd_soc_dapm_mutex_lock(struct snd_soc_dapm_context *dapm)
+/*
+ * DAPM helper functions
+ */
+enum snd_soc_dapm_subclass {
+ SND_SOC_DAPM_CLASS_ROOT = 0,
+ SND_SOC_DAPM_CLASS_RUNTIME = 1,
+};
+
+static inline void _snd_soc_dapm_mutex_lock_root_c(struct snd_soc_card *card)
+{
+ mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_ROOT);
+}
+
+static inline void _snd_soc_dapm_mutex_lock_c(struct snd_soc_card *card)
+{
+ mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+}
+
+static inline void _snd_soc_dapm_mutex_unlock_c(struct snd_soc_card *card)
+{
+ mutex_unlock(&card->dapm_mutex);
+}
+
+static inline void _snd_soc_dapm_mutex_assert_held_c(struct snd_soc_card *card)
+{
+ lockdep_assert_held(&card->dapm_mutex);
+}
+
+static inline void _snd_soc_dapm_mutex_lock_root_d(struct snd_soc_dapm_context *dapm)
+{
+ _snd_soc_dapm_mutex_lock_root_c(dapm->card);
+}
+
+static inline void _snd_soc_dapm_mutex_lock_d(struct snd_soc_dapm_context *dapm)
+{
+ _snd_soc_dapm_mutex_lock_c(dapm->card);
+}
+
+static inline void _snd_soc_dapm_mutex_unlock_d(struct snd_soc_dapm_context *dapm)
+{
+ _snd_soc_dapm_mutex_unlock_c(dapm->card);
+}
+
+static inline void _snd_soc_dapm_mutex_assert_held_d(struct snd_soc_dapm_context *dapm)
+{
+ _snd_soc_dapm_mutex_assert_held_c(dapm->card);
+}
+
+#define snd_soc_dapm_mutex_lock_root(x) _Generic((x), \
+ struct snd_soc_card * : _snd_soc_dapm_mutex_lock_root_c, \
+ struct snd_soc_dapm_context * : _snd_soc_dapm_mutex_lock_root_d)(x)
+#define snd_soc_dapm_mutex_lock(x) _Generic((x), \
+ struct snd_soc_card * : _snd_soc_dapm_mutex_lock_c, \
+ struct snd_soc_dapm_context * : _snd_soc_dapm_mutex_lock_d)(x)
+#define snd_soc_dapm_mutex_unlock(x) _Generic((x), \
+ struct snd_soc_card * : _snd_soc_dapm_mutex_unlock_c, \
+ struct snd_soc_dapm_context * : _snd_soc_dapm_mutex_unlock_d)(x)
+#define snd_soc_dapm_mutex_assert_held(x) _Generic((x), \
+ struct snd_soc_card * : _snd_soc_dapm_mutex_assert_held_c, \
+ struct snd_soc_dapm_context * : _snd_soc_dapm_mutex_assert_held_d)(x)
+
+/*
+ * PCM helper functions
+ */
+static inline void _snd_soc_dpcm_mutex_lock_c(struct snd_soc_card *card)
+{
+ mutex_lock_nested(&card->pcm_mutex, card->pcm_subclass);
+}
+
+static inline void _snd_soc_dpcm_mutex_unlock_c(struct snd_soc_card *card)
+{
+ mutex_unlock(&card->pcm_mutex);
+}
+
+static inline void _snd_soc_dpcm_mutex_assert_held_c(struct snd_soc_card *card)
{
- mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+ lockdep_assert_held(&card->pcm_mutex);
}
-static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm)
+static inline void _snd_soc_dpcm_mutex_lock_r(struct snd_soc_pcm_runtime *rtd)
{
- mutex_unlock(&dapm->card->dapm_mutex);
+ _snd_soc_dpcm_mutex_lock_c(rtd->card);
}
+static inline void _snd_soc_dpcm_mutex_unlock_r(struct snd_soc_pcm_runtime *rtd)
+{
+ _snd_soc_dpcm_mutex_unlock_c(rtd->card);
+}
+
+static inline void _snd_soc_dpcm_mutex_assert_held_r(struct snd_soc_pcm_runtime *rtd)
+{
+ _snd_soc_dpcm_mutex_assert_held_c(rtd->card);
+}
+
+#define snd_soc_dpcm_mutex_lock(x) _Generic((x), \
+ struct snd_soc_card * : _snd_soc_dpcm_mutex_lock_c, \
+ struct snd_soc_pcm_runtime * : _snd_soc_dpcm_mutex_lock_r)(x)
+
+#define snd_soc_dpcm_mutex_unlock(x) _Generic((x), \
+ struct snd_soc_card * : _snd_soc_dpcm_mutex_unlock_c, \
+ struct snd_soc_pcm_runtime * : _snd_soc_dpcm_mutex_unlock_r)(x)
+
+#define snd_soc_dpcm_mutex_assert_held(x) _Generic((x), \
+ struct snd_soc_card * : _snd_soc_dpcm_mutex_assert_held_c, \
+ struct snd_soc_pcm_runtime * : _snd_soc_dpcm_mutex_assert_held_r)(x)
+
#include <sound/soc-component.h>
#include <sound/soc-card.h>
+#include <sound/soc-jack.h>
#endif
diff --git a/include/sound/sof.h b/include/sound/sof.h
index f3e716c8ce1c..05213bb515a3 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -16,19 +16,75 @@
#include <sound/soc-acpi.h>
struct snd_sof_dsp_ops;
+struct snd_sof_dev;
+
+/**
+ * enum sof_fw_state - DSP firmware state definitions
+ * @SOF_FW_BOOT_NOT_STARTED: firmware boot is not yet started
+ * @SOF_DSPLESS_MODE: DSP is not used
+ * @SOF_FW_BOOT_PREPARE: preparing for boot (firmware loading for exaqmple)
+ * @SOF_FW_BOOT_IN_PROGRESS: firmware boot is in progress
+ * @SOF_FW_BOOT_FAILED: firmware boot failed
+ * @SOF_FW_BOOT_READY_FAILED: firmware booted but fw_ready op failed
+ * @SOF_FW_BOOT_READY_OK: firmware booted and fw_ready op passed
+ * @SOF_FW_BOOT_COMPLETE: firmware is booted up and functional
+ * @SOF_FW_CRASHED: firmware crashed after successful boot
+ */
+enum sof_fw_state {
+ SOF_FW_BOOT_NOT_STARTED = 0,
+ SOF_DSPLESS_MODE,
+ SOF_FW_BOOT_PREPARE,
+ SOF_FW_BOOT_IN_PROGRESS,
+ SOF_FW_BOOT_FAILED,
+ SOF_FW_BOOT_READY_FAILED,
+ SOF_FW_BOOT_READY_OK,
+ SOF_FW_BOOT_COMPLETE,
+ SOF_FW_CRASHED,
+};
+
+/* DSP power states */
+enum sof_dsp_power_states {
+ SOF_DSP_PM_D0,
+ SOF_DSP_PM_D1,
+ SOF_DSP_PM_D2,
+ SOF_DSP_PM_D3,
+};
+
+/* Definitions for multiple IPCs */
+enum sof_ipc_type {
+ SOF_IPC_TYPE_3,
+ SOF_IPC_TYPE_4,
+ SOF_IPC_TYPE_COUNT
+};
+
+struct sof_loadable_file_profile {
+ enum sof_ipc_type ipc_type;
+
+ const char *fw_path;
+ const char *fw_path_postfix;
+ const char *fw_name;
+ const char *fw_lib_path;
+ const char *fw_lib_path_postfix;
+ const char *tplg_path;
+ const char *tplg_name;
+};
/*
* SOF Platform data.
*/
struct snd_sof_pdata {
- const struct firmware *fw;
const char *name;
const char *platform;
- struct device *dev;
+ /*
+ * PCI SSID. As PCI does not define 0 as invalid, the subsystem_id_set
+ * flag indicates that a value has been written to these members.
+ */
+ unsigned short subsystem_vendor;
+ unsigned short subsystem_device;
+ bool subsystem_id_set;
- /* indicate how many first bytes shouldn't be loaded into DSP memory. */
- size_t fw_offset;
+ struct device *dev;
/*
* notification callback used if the hardware initialization
@@ -42,17 +98,26 @@ struct snd_sof_pdata {
/* descriptor */
const struct sof_dev_desc *desc;
+ /* platform's preferred IPC type and path overrides */
+ struct sof_loadable_file_profile ipc_file_profile_base;
+
/* firmware and topology filenames */
const char *fw_filename_prefix;
const char *fw_filename;
const char *tplg_filename_prefix;
const char *tplg_filename;
+ /* loadable external libraries available under this directory */
+ const char *fw_lib_prefix;
+
/* machine */
struct platform_device *pdev_mach;
const struct snd_soc_acpi_mach *machine;
+ const struct snd_sof_of_mach *of_machine;
void *hw_pdata;
+
+ enum sof_ipc_type ipc_type;
};
/*
@@ -62,21 +127,19 @@ struct snd_sof_pdata {
struct sof_dev_desc {
/* list of machines using this configuration */
struct snd_soc_acpi_mach *machines;
+ struct snd_sof_of_mach *of_machines;
/* alternate list of machines using this configuration */
struct snd_soc_acpi_mach *alt_machines;
+ bool use_acpi_target_states;
+
/* Platform resource indexes in BAR / ACPI resources. */
/* Must set to -1 if not used - add new items to end */
int resindex_lpe_base;
int resindex_pcicfg_base;
int resindex_imr_base;
int irqindex_host_ipc;
- int resindex_dma_base;
-
- /* DMA only valid when resindex_dma_base != -1*/
- int dma_engine;
- int dma_size;
/* IPC timeouts in ms */
int ipc_timeout;
@@ -88,16 +151,27 @@ struct sof_dev_desc {
/* defaults for no codec mode */
const char *nocodec_tplg_filename;
- /* defaults paths for firmware and topology files */
- const char *default_fw_path;
- const char *default_tplg_path;
+ /* information on supported IPCs */
+ unsigned int ipc_supported_mask;
+ enum sof_ipc_type ipc_default;
+
+ /* The platform supports DSPless mode */
+ bool dspless_mode_supported;
+
+ /* defaults paths for firmware, library and topology files */
+ const char *default_fw_path[SOF_IPC_TYPE_COUNT];
+ const char *default_lib_path[SOF_IPC_TYPE_COUNT];
+ const char *default_tplg_path[SOF_IPC_TYPE_COUNT];
/* default firmware name */
- const char *default_fw_filename;
+ const char *default_fw_filename[SOF_IPC_TYPE_COUNT];
- const struct snd_sof_dsp_ops *ops;
+ struct snd_sof_dsp_ops *ops;
+ int (*ops_init)(struct snd_sof_dev *sdev);
+ void (*ops_free)(struct snd_sof_dev *sdev);
};
-int sof_nocodec_setup(struct device *dev,
- const struct snd_sof_dsp_ops *ops);
+int sof_dai_get_mclk(struct snd_soc_pcm_runtime *rtd);
+int sof_dai_get_bclk(struct snd_soc_pcm_runtime *rtd);
+
#endif
diff --git a/include/sound/sof/channel_map.h b/include/sound/sof/channel_map.h
index fd3a30fcf756..d363f0ca6979 100644
--- a/include/sound/sof/channel_map.h
+++ b/include/sound/sof/channel_map.h
@@ -39,7 +39,7 @@ struct sof_ipc_channel_map {
uint32_t ext_id;
uint32_t ch_mask;
uint32_t reserved;
- int32_t ch_coeffs[0];
+ int32_t ch_coeffs[];
} __packed;
/**
@@ -55,7 +55,7 @@ struct sof_ipc_stream_map {
struct sof_ipc_cmd_hdr hdr;
uint32_t num_ch_map;
uint32_t reserved[3];
- struct sof_ipc_channel_map ch_map[0];
+ struct sof_ipc_channel_map ch_map[];
} __packed;
#endif /* __IPC_CHANNEL_MAP_H__ */
diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h
index 7379a33d7247..983d374fe511 100644
--- a/include/sound/sof/control.h
+++ b/include/sound/sof/control.h
@@ -117,11 +117,11 @@ struct sof_ipc_ctrl_data {
/* control data - add new types if needed */
union {
/* channel values can be used by volume type controls */
- struct sof_ipc_ctrl_value_chan chanv[0];
+ DECLARE_FLEX_ARRAY(struct sof_ipc_ctrl_value_chan, chanv);
/* component values used by routing controls like mux, mixer */
- struct sof_ipc_ctrl_value_comp compv[0];
+ DECLARE_FLEX_ARRAY(struct sof_ipc_ctrl_value_comp, compv);
/* data can be used by binary controls */
- struct sof_abi_hdr data[0];
+ DECLARE_FLEX_ARRAY(struct sof_abi_hdr, data);
};
} __packed;
diff --git a/include/sound/sof/dai-amd.h b/include/sound/sof/dai-amd.h
new file mode 100644
index 000000000000..59cd014392c1
--- /dev/null
+++ b/include/sound/sof/dai-amd.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2021 Advanced Micro Devices, Inc.. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_AMD_H__
+#define __INCLUDE_SOUND_SOF_DAI_AMD_H__
+
+#include <sound/sof/header.h>
+
+/* ACP Configuration Request - SOF_IPC_DAI_AMD_CONFIG */
+struct sof_ipc_dai_acp_params {
+ struct sof_ipc_hdr hdr;
+
+ uint32_t fsync_rate; /* FSYNC frequency in Hz */
+ uint32_t tdm_slots;
+ uint32_t tdm_mode;
+} __packed;
+
+/* ACPDMIC Configuration Request - SOF_IPC_DAI_AMD_CONFIG */
+struct sof_ipc_dai_acpdmic_params {
+ uint32_t pdm_rate;
+ uint32_t pdm_ch;
+} __packed;
+
+/* ACP_SDW Configuration Request - SOF_IPC_DAI_AMD_SDW_CONFIG */
+struct sof_ipc_dai_acp_sdw_params {
+ struct sof_ipc_hdr hdr;
+ u32 rate;
+ u32 channels;
+} __packed;
+
+#endif
diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h
index ca8325353d41..6bc987bd4761 100644
--- a/include/sound/sof/dai-imx.h
+++ b/include/sound/sof/dai-imx.h
@@ -51,4 +51,11 @@ struct sof_ipc_dai_sai_params {
uint16_t tdm_slot_width;
uint16_t reserved2; /* alignment */
} __packed;
+
+/* MICFIL Configuration Request - SOF_IPC_DAI_MICFIL_CONFIG */
+struct sof_ipc_dai_micfil_params {
+ uint32_t pdm_rate;
+ uint32_t pdm_ch;
+} __packed;
+
#endif
diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h
index 136adf6686e2..5b93b7292f5e 100644
--- a/include/sound/sof/dai-intel.h
+++ b/include/sound/sof/dai-intel.h
@@ -48,6 +48,12 @@
#define SOF_DAI_INTEL_SSP_CLKCTRL_FS_KA BIT(4)
/* bclk idle */
#define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_IDLE_HIGH BIT(5)
+/* mclk early start */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_MCLK_ES BIT(6)
+/* bclk early start */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_ES BIT(7)
+/* mclk always on */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_MCLK_AON BIT(8)
/* DMIC max. four controllers for eight microphone channels */
#define SOF_DAI_INTEL_DMIC_NUM_CTRL 4
diff --git a/include/sound/sof/dai-mediatek.h b/include/sound/sof/dai-mediatek.h
new file mode 100644
index 000000000000..62dd4720558d
--- /dev/null
+++ b/include/sound/sof/dai-mediatek.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2021 Mediatek Corporation. All rights reserved.
+ *
+ * Author: Bo Pan <bo.pan@mediatek.com>
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_MEDIATEK_H__
+#define __INCLUDE_SOUND_SOF_DAI_MEDIATEK_H__
+
+#include <sound/sof/header.h>
+
+struct sof_ipc_dai_mtk_afe_params {
+ struct sof_ipc_hdr hdr;
+ u32 channels;
+ u32 rate;
+ u32 format;
+ u32 stream_id;
+ u32 reserved[4]; /* reserve for future */
+} __packed;
+
+#endif
+
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 34f135adf8ec..0764a80c17a9 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -12,6 +12,8 @@
#include <sound/sof/header.h>
#include <sound/sof/dai-intel.h>
#include <sound/sof/dai-imx.h>
+#include <sound/sof/dai-amd.h>
+#include <sound/sof/dai-mediatek.h>
/*
* DAI Configuration.
@@ -34,15 +36,41 @@
#define SOF_DAI_FMT_IB_NF (3 << 8) /**< invert BCLK + nor FRM */
#define SOF_DAI_FMT_IB_IF (4 << 8) /**< invert BCLK + FRM */
-#define SOF_DAI_FMT_CBM_CFM (0 << 12) /**< codec clk & FRM master */
-#define SOF_DAI_FMT_CBS_CFM (2 << 12) /**< codec clk slave & FRM master */
-#define SOF_DAI_FMT_CBM_CFS (3 << 12) /**< codec clk master & frame slave */
-#define SOF_DAI_FMT_CBS_CFS (4 << 12) /**< codec clk & FRM slave */
+#define SOF_DAI_FMT_CBP_CFP (0 << 12) /**< codec bclk provider & frame provider */
+#define SOF_DAI_FMT_CBC_CFP (2 << 12) /**< codec bclk consumer & frame provider */
+#define SOF_DAI_FMT_CBP_CFC (3 << 12) /**< codec bclk provider & frame consumer */
+#define SOF_DAI_FMT_CBC_CFC (4 << 12) /**< codec bclk consumer & frame consumer */
+
+/* keep old definitions for backwards compatibility */
+#define SOF_DAI_FMT_CBM_CFM SOF_DAI_FMT_CBP_CFP
+#define SOF_DAI_FMT_CBS_CFM SOF_DAI_FMT_CBC_CFP
+#define SOF_DAI_FMT_CBM_CFS SOF_DAI_FMT_CBP_CFC
+#define SOF_DAI_FMT_CBS_CFS SOF_DAI_FMT_CBC_CFC
#define SOF_DAI_FMT_FORMAT_MASK 0x000f
#define SOF_DAI_FMT_CLOCK_MASK 0x00f0
#define SOF_DAI_FMT_INV_MASK 0x0f00
-#define SOF_DAI_FMT_MASTER_MASK 0xf000
+#define SOF_DAI_FMT_CLOCK_PROVIDER_MASK 0xf000
+
+/*
+ * DAI_CONFIG flags. The 4 LSB bits are used for the commands, HW_PARAMS, HW_FREE and PAUSE
+ * representing when the IPC is sent. The 4 MSB bits are used to add quirks along with the above
+ * commands.
+ */
+#define SOF_DAI_CONFIG_FLAGS_CMD_MASK 0xF
+#define SOF_DAI_CONFIG_FLAGS_NONE 0 /**< DAI_CONFIG sent without stage information */
+#define SOF_DAI_CONFIG_FLAGS_HW_PARAMS BIT(0) /**< DAI_CONFIG sent during hw_params stage */
+#define SOF_DAI_CONFIG_FLAGS_HW_FREE BIT(1) /**< DAI_CONFIG sent during hw_free stage */
+/**< DAI_CONFIG sent during pause trigger. Only available ABI 3.20 onwards */
+#define SOF_DAI_CONFIG_FLAGS_PAUSE BIT(2)
+#define SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT 4
+#define SOF_DAI_CONFIG_FLAGS_QUIRK_MASK (0xF << SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT)
+/*
+ * This should be used along with the SOF_DAI_CONFIG_FLAGS_HW_PARAMS to indicate that pipeline
+ * stop/pause and DAI DMA stop/pause should happen in two steps. This change is only available
+ * ABI 3.20 onwards.
+ */
+#define SOF_DAI_CONFIG_FLAGS_2_STEP_STOP BIT(0)
/** \brief Types of DAI */
enum sof_ipc_dai_type {
@@ -53,6 +81,15 @@ enum sof_ipc_dai_type {
SOF_DAI_INTEL_ALH, /**< Intel ALH */
SOF_DAI_IMX_SAI, /**< i.MX SAI */
SOF_DAI_IMX_ESAI, /**< i.MX ESAI */
+ SOF_DAI_AMD_BT, /**< AMD ACP BT*/
+ SOF_DAI_AMD_SP, /**< AMD ACP SP */
+ SOF_DAI_AMD_DMIC, /**< AMD ACP DMIC */
+ SOF_DAI_MEDIATEK_AFE, /**< Mediatek AFE */
+ SOF_DAI_AMD_HS, /**< Amd HS */
+ SOF_DAI_AMD_SP_VIRTUAL, /**< AMD ACP SP VIRTUAL */
+ SOF_DAI_AMD_HS_VIRTUAL, /**< AMD ACP HS VIRTUAL */
+ SOF_DAI_IMX_MICFIL, /** < i.MX MICFIL PDM */
+ SOF_DAI_AMD_SDW, /**< AMD ACP SDW */
};
/* general purpose DAI configuration */
@@ -63,7 +100,8 @@ struct sof_ipc_dai_config {
/* physical protocol and clocking */
uint16_t format; /**< SOF_DAI_FMT_ */
- uint16_t reserved16; /**< alignment */
+ uint8_t group_id; /**< group ID, 0 means no group (ABI 3.17) */
+ uint8_t flags; /**< SOF_DAI_CONFIG_FLAGS_ (ABI 3.19) */
/* reserved for future use */
uint32_t reserved[8];
@@ -76,7 +114,19 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_alh_params alh;
struct sof_ipc_dai_esai_params esai;
struct sof_ipc_dai_sai_params sai;
+ struct sof_ipc_dai_acp_params acpbt;
+ struct sof_ipc_dai_acp_params acpsp;
+ struct sof_ipc_dai_acpdmic_params acpdmic;
+ struct sof_ipc_dai_acp_params acphs;
+ struct sof_ipc_dai_mtk_afe_params afe;
+ struct sof_ipc_dai_micfil_params micfil;
+ struct sof_ipc_dai_acp_sdw_params acp_sdw;
};
} __packed;
+struct sof_dai_private_data {
+ struct sof_ipc_comp_dai *comp_dai;
+ struct sof_ipc_dai_config *dai_config;
+};
+
#endif
diff --git a/include/sound/sof/debug.h b/include/sound/sof/debug.h
new file mode 100644
index 000000000000..38693e3fb514
--- /dev/null
+++ b/include/sound/sof/debug.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2020 Intel Corporation. All rights reserved.
+ *
+ * Author: Karol Trzcinski <karolx.trzcinski@linux.intel.com>
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DEBUG_H__
+#define __INCLUDE_SOUND_SOF_DEBUG_H__
+
+#include <sound/sof/header.h>
+
+/** ABI3.18 */
+enum sof_ipc_dbg_mem_zone {
+ SOF_IPC_MEM_ZONE_SYS = 0, /**< System zone */
+ SOF_IPC_MEM_ZONE_SYS_RUNTIME = 1, /**< System-runtime zone */
+ SOF_IPC_MEM_ZONE_RUNTIME = 2, /**< Runtime zone */
+ SOF_IPC_MEM_ZONE_BUFFER = 3, /**< Buffer zone */
+ SOF_IPC_MEM_ZONE_RUNTIME_SHARED = 4, /**< System runtime zone */
+ SOF_IPC_MEM_ZONE_SYS_SHARED = 5, /**< System shared zone */
+};
+
+/** ABI3.18 */
+struct sof_ipc_dbg_mem_usage_elem {
+ uint32_t zone; /**< see sof_ipc_dbg_mem_zone */
+ uint32_t id; /**< heap index within zone */
+ uint32_t used; /**< number of bytes used in zone */
+ uint32_t free; /**< number of bytes free to use within zone */
+ uint32_t reserved; /**< for future use */
+} __packed;
+
+/** ABI3.18 */
+struct sof_ipc_dbg_mem_usage {
+ struct sof_ipc_reply rhdr; /**< generic IPC reply header */
+ uint32_t reserved[4]; /**< reserved for future use */
+ uint32_t num_elems; /**< elems[] counter */
+ struct sof_ipc_dbg_mem_usage_elem elems[]; /**< memory usage information */
+} __packed;
+
+#endif
diff --git a/include/sound/sof/ext_manifest.h b/include/sound/sof/ext_manifest.h
index 04359cda92dc..2a7e055584f9 100644
--- a/include/sound/sof/ext_manifest.h
+++ b/include/sound/sof/ext_manifest.h
@@ -58,8 +58,11 @@ struct sof_ext_man_header {
/* Extended manifest elements types */
enum sof_ext_man_elem_type {
SOF_EXT_MAN_ELEM_FW_VERSION = 0,
- SOF_EXT_MAN_ELEM_WINDOW = SOF_IPC_EXT_WINDOW,
- SOF_EXT_MAN_ELEM_CC_VERSION = SOF_IPC_EXT_CC_INFO,
+ SOF_EXT_MAN_ELEM_WINDOW = 1,
+ SOF_EXT_MAN_ELEM_CC_VERSION = 2,
+ SOF_EXT_MAN_ELEM_DBG_ABI = 4,
+ SOF_EXT_MAN_ELEM_CONFIG_DATA = 5, /**< ABI3.17 */
+ SOF_EXT_MAN_ELEM_PLATFORM_CONFIG_DATA = 6,
};
/* extended manifest element header */
@@ -92,4 +95,29 @@ struct sof_ext_man_cc_version {
struct sof_ipc_cc_version cc_version;
} __packed;
+struct ext_man_dbg_abi {
+ struct sof_ext_man_elem_header hdr;
+ /* use sof_ipc struct because of code re-use */
+ struct sof_ipc_user_abi_version dbg_abi;
+} __packed;
+
+/* EXT_MAN_ELEM_CONFIG_DATA elements identificators, ABI3.17 */
+enum config_elem_type {
+ SOF_EXT_MAN_CONFIG_EMPTY = 0,
+ SOF_EXT_MAN_CONFIG_IPC_MSG_SIZE = 1,
+ SOF_EXT_MAN_CONFIG_MEMORY_USAGE_SCAN = 2, /**< ABI 3.18 */
+};
+
+struct sof_config_elem {
+ uint32_t token;
+ uint32_t value;
+} __packed;
+
+/* firmware configuration information */
+struct sof_ext_man_config_data {
+ struct sof_ext_man_elem_header hdr;
+
+ struct sof_config_elem elems[];
+} __packed;
+
#endif /* __SOF_FIRMWARE_EXT_MANIFEST_H__ */
diff --git a/include/sound/sof/ext_manifest4.h b/include/sound/sof/ext_manifest4.h
new file mode 100644
index 000000000000..ec97edcbbfc3
--- /dev/null
+++ b/include/sound/sof/ext_manifest4.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Extended manifest is a place to store metadata about firmware, known during
+ * compilation time - for example firmware version or used compiler.
+ * Given information are read on host side before firmware startup.
+ * This part of output binary is not signed.
+ */
+
+#ifndef __SOF_FIRMWARE_EXT_MANIFEST4_H__
+#define __SOF_FIRMWARE_EXT_MANIFEST4_H__
+
+#include <linux/uuid.h>
+
+/* In ASCII $AE1 */
+#define SOF_EXT_MAN4_MAGIC_NUMBER 0x31454124
+
+#define MAX_MODULE_NAME_LEN 8
+#define MAX_FW_BINARY_NAME 8
+#define DEFAULT_HASH_SHA256_LEN 32
+#define SOF_MAN4_FW_HDR_OFFSET 0x2000
+#define SOF_MAN4_FW_HDR_OFFSET_CAVS_1_5 0x284
+
+/*********************************************************************
+ * extended manifest (struct sof_ext_manifest4_hdr)
+ *-------------------
+ * css_manifest hdr
+ *-------------------
+ * offset reserved for future
+ *-------------------
+ * fw_hdr (struct sof_man4_fw_binary_header)
+ *-------------------
+ * module_entry[0] (struct sof_man4_module)
+ *-------------------
+ * module_entry[1]
+ *-------------------
+ * ...
+ *-------------------
+ * module_entry[n]
+ *-------------------
+ * module_config[0] (struct sof_man4_module_config)
+ *-------------------
+ * module_config[1]
+ *-------------------
+ * ...
+ *-------------------
+ * module_config[m]
+ *-------------------
+ * FW content
+ *-------------------
+ *********************************************************************/
+
+struct sof_ext_manifest4_hdr {
+ uint32_t id;
+ uint32_t len; /* length of extension manifest */
+ uint16_t version_major; /* header version */
+ uint16_t version_minor;
+ uint32_t num_module_entries;
+} __packed;
+
+struct sof_man4_fw_binary_header {
+ /* This part must be unchanged to be backward compatible with SPT-LP ROM */
+ uint32_t id;
+ uint32_t len; /* sizeof(sof_man4_fw_binary_header) in bytes */
+ uint8_t name[MAX_FW_BINARY_NAME];
+ uint32_t preload_page_count; /* number of pages of preloaded image */
+ uint32_t fw_image_flags;
+ uint32_t feature_mask;
+ uint16_t major_version; /* Firmware version */
+ uint16_t minor_version;
+ uint16_t hotfix_version;
+ uint16_t build_version;
+ uint32_t num_module_entries;
+
+ /* This part may change to contain any additional data for BaseFw that is skipped by ROM */
+ uint32_t hw_buf_base_addr;
+ uint32_t hw_buf_length;
+ uint32_t load_offset; /* This value is used by ROM */
+} __packed;
+
+struct sof_man4_segment_desc {
+ uint32_t flags;
+ uint32_t v_base_addr;
+ uint32_t file_offset;
+} __packed;
+
+struct sof_man4_module {
+ uint32_t id;
+ uint8_t name[MAX_MODULE_NAME_LEN];
+ guid_t uuid;
+ uint32_t type;
+ uint8_t hash[DEFAULT_HASH_SHA256_LEN];
+ uint32_t entry_point;
+ uint16_t cfg_offset;
+ uint16_t cfg_count;
+ uint32_t affinity_mask;
+ uint16_t instance_max_count;
+ uint16_t instance_stack_size;
+ struct sof_man4_segment_desc segments[3];
+} __packed;
+
+struct sof_man4_module_config {
+ uint32_t par[4]; /* module parameters */
+ uint32_t is_bytes; /* actual size of instance .bss (bytes) */
+ uint32_t cps; /* cycles per second */
+ uint32_t ibs; /* input buffer size (bytes) */
+ uint32_t obs; /* output buffer size (bytes) */
+ uint32_t module_flags; /* flags, reserved for future use */
+ uint32_t cpc; /* cycles per single run */
+ uint32_t obls; /* output block size, reserved for future use */
+} __packed;
+
+#endif /* __SOF_FIRMWARE_EXT_MANIFEST4_H__ */
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index 2d35997ace40..b22e925c70e2 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -31,12 +31,12 @@
/* Global Message - Generic */
#define SOF_GLB_TYPE_SHIFT 28
-#define SOF_GLB_TYPE_MASK (0xf << SOF_GLB_TYPE_SHIFT)
+#define SOF_GLB_TYPE_MASK (0xfUL << SOF_GLB_TYPE_SHIFT)
#define SOF_GLB_TYPE(x) ((x) << SOF_GLB_TYPE_SHIFT)
/* Command Message - Generic */
#define SOF_CMD_TYPE_SHIFT 16
-#define SOF_CMD_TYPE_MASK (0xfff << SOF_CMD_TYPE_SHIFT)
+#define SOF_CMD_TYPE_MASK (0xfffL << SOF_CMD_TYPE_SHIFT)
#define SOF_CMD_TYPE(x) ((x) << SOF_CMD_TYPE_SHIFT)
/* Global Message Types */
@@ -49,9 +49,10 @@
#define SOF_IPC_FW_READY SOF_GLB_TYPE(0x7U)
#define SOF_IPC_GLB_DAI_MSG SOF_GLB_TYPE(0x8U)
#define SOF_IPC_GLB_TRACE_MSG SOF_GLB_TYPE(0x9U)
-#define SOF_IPC_GLB_GDB_DEBUG SOF_GLB_TYPE(0xAU)
+#define SOF_IPC_GLB_GDB_DEBUG SOF_GLB_TYPE(0xAU)
#define SOF_IPC_GLB_TEST_MSG SOF_GLB_TYPE(0xBU)
#define SOF_IPC_GLB_PROBE SOF_GLB_TYPE(0xCU)
+#define SOF_IPC_GLB_DEBUG SOF_GLB_TYPE(0xDU)
/*
* DSP Command Message Types
@@ -109,7 +110,7 @@
#define SOF_IPC_PROBE_DMA_ADD SOF_CMD_TYPE(0x003)
#define SOF_IPC_PROBE_DMA_INFO SOF_CMD_TYPE(0x004)
#define SOF_IPC_PROBE_DMA_REMOVE SOF_CMD_TYPE(0x005)
-#define SOF_IPC_PROBE_POINT_ADD SOF_CMD_TYPE(0x006)
+#define SOF_IPC_PROBE_POINT_ADD SOF_CMD_TYPE(0x006)
#define SOF_IPC_PROBE_POINT_INFO SOF_CMD_TYPE(0x007)
#define SOF_IPC_PROBE_POINT_REMOVE SOF_CMD_TYPE(0x008)
@@ -117,9 +118,14 @@
#define SOF_IPC_TRACE_DMA_PARAMS SOF_CMD_TYPE(0x001)
#define SOF_IPC_TRACE_DMA_POSITION SOF_CMD_TYPE(0x002)
#define SOF_IPC_TRACE_DMA_PARAMS_EXT SOF_CMD_TYPE(0x003)
+#define SOF_IPC_TRACE_FILTER_UPDATE SOF_CMD_TYPE(0x004) /**< ABI3.17 */
+#define SOF_IPC_TRACE_DMA_FREE SOF_CMD_TYPE(0x005) /**< ABI3.20 */
/* debug */
-#define SOF_IPC_TEST_IPC_FLOOD SOF_CMD_TYPE(0x001)
+#define SOF_IPC_DEBUG_MEM_USAGE SOF_CMD_TYPE(0x001)
+
+/* test */
+#define SOF_IPC_TEST_IPC_FLOOD SOF_CMD_TYPE(0x001)
/* Get message component id */
#define SOF_IPC_MESSAGE_ID(x) ((x) & 0xffff)
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 5a55ba8b7e56..75193850ead0 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -25,6 +25,7 @@
#define SOF_IPC_INFO_LOCKS BIT(1)
#define SOF_IPC_INFO_LOCKSV BIT(2)
#define SOF_IPC_INFO_GDB BIT(3)
+#define SOF_IPC_INFO_D3_PERSISTENT BIT(4)
/* extended data types that can be appended onto end of sof_ipc_fw_ready */
enum sof_ipc_ext_data {
@@ -35,6 +36,10 @@ enum sof_ipc_ext_data {
SOF_IPC_EXT_USER_ABI_INFO = 4,
};
+/* Build u32 number in format MMmmmppp */
+#define SOF_FW_VER(MAJOR, MINOR, PATCH) ((uint32_t)( \
+ ((MAJOR) << 24) | ((MINOR) << 12) | (PATCH)))
+
/* FW version - SOF_IPC_GLB_VERSION */
struct sof_ipc_fw_version {
struct sof_ipc_hdr hdr;
@@ -46,9 +51,11 @@ struct sof_ipc_fw_version {
uint8_t time[10];
uint8_t tag[6];
uint32_t abi_version;
+ /* used to check FW and ldc file compatibility, reproducible value */
+ uint32_t src_hash;
/* reserved for future use */
- uint32_t reserved[4];
+ uint32_t reserved[3];
} __packed;
/* FW ready Message - sent by firmware when boot has completed */
@@ -99,7 +106,7 @@ struct sof_ipc_window_elem {
struct sof_ipc_window {
struct sof_ipc_ext_data_hdr ext_hdr;
uint32_t num_windows;
- struct sof_ipc_window_elem window[];
+ struct sof_ipc_window_elem window[SOF_IPC_MAX_ELEMS];
} __packed;
struct sof_ipc_cc_version {
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
new file mode 100644
index 000000000000..1eb538e18236
--- /dev/null
+++ b/include/sound/sof/ipc4/header.h
@@ -0,0 +1,572 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_IPC4_HEADER_H__
+#define __INCLUDE_SOUND_SOF_IPC4_HEADER_H__
+
+#include <linux/types.h>
+#include <uapi/sound/sof/abi.h>
+
+/* maximum message size for mailbox Tx/Rx */
+#define SOF_IPC4_MSG_MAX_SIZE 4096
+
+/** \addtogroup sof_uapi uAPI
+ * SOF uAPI specification.
+ * @{
+ */
+
+/**
+ * struct sof_ipc4_msg - Placeholder of an IPC4 message
+ * @header_u64: IPC4 header as single u64 number
+ * @primary: Primary, mandatory part of the header
+ * @extension: Extended part of the header, if not used it should be
+ * set to 0
+ * @data_size: Size of data in bytes pointed by @data_ptr
+ * @data_ptr: Pointer to the optional payload of a message
+ */
+struct sof_ipc4_msg {
+ union {
+ u64 header_u64;
+ struct {
+ u32 primary;
+ u32 extension;
+ };
+ };
+
+ size_t data_size;
+ void *data_ptr;
+};
+
+/**
+ * struct sof_ipc4_tuple - Generic type/ID and parameter tuple
+ * @type: type/ID
+ * @size: size of the @value array in bytes
+ * @value: value for the given type
+ */
+struct sof_ipc4_tuple {
+ uint32_t type;
+ uint32_t size;
+ uint32_t value[];
+} __packed;
+
+/*
+ * IPC4 messages have two 32 bit identifier made up as follows :-
+ *
+ * header - msg type, msg id, msg direction ...
+ * extension - extra params such as msg data size in mailbox
+ *
+ * These are sent at the start of the IPC message in the mailbox. Messages
+ * should not be sent in the doorbell (special exceptions for firmware).
+ */
+
+/*
+ * IPC4 primary header bit allocation for messages
+ * bit 0-23: message type specific
+ * bit 24-28: type: enum sof_ipc4_global_msg if target is SOF_IPC4_FW_GEN_MSG
+ * enum sof_ipc4_module_type if target is SOF_IPC4_MODULE_MSG
+ * bit 29: response - sof_ipc4_msg_dir
+ * bit 30: target - enum sof_ipc4_msg_target
+ * bit 31: reserved, unused
+ */
+
+/* Value of target field - must fit into 1 bit */
+enum sof_ipc4_msg_target {
+ /* Global FW message */
+ SOF_IPC4_FW_GEN_MSG,
+
+ /* Module message */
+ SOF_IPC4_MODULE_MSG
+};
+
+/* Value of type field - must fit into 5 bits */
+enum sof_ipc4_global_msg {
+ SOF_IPC4_GLB_BOOT_CONFIG,
+ SOF_IPC4_GLB_ROM_CONTROL,
+ SOF_IPC4_GLB_IPCGATEWAY_CMD,
+
+ /* 3 .. 12: RESERVED - do not use */
+
+ SOF_IPC4_GLB_PERF_MEASUREMENTS_CMD = 13,
+ SOF_IPC4_GLB_CHAIN_DMA,
+
+ SOF_IPC4_GLB_LOAD_MULTIPLE_MODULES,
+ SOF_IPC4_GLB_UNLOAD_MULTIPLE_MODULES,
+
+ /* pipeline settings */
+ SOF_IPC4_GLB_CREATE_PIPELINE,
+ SOF_IPC4_GLB_DELETE_PIPELINE,
+ SOF_IPC4_GLB_SET_PIPELINE_STATE,
+ SOF_IPC4_GLB_GET_PIPELINE_STATE,
+ SOF_IPC4_GLB_GET_PIPELINE_CONTEXT_SIZE,
+ SOF_IPC4_GLB_SAVE_PIPELINE,
+ SOF_IPC4_GLB_RESTORE_PIPELINE,
+
+ /*
+ * library loading
+ *
+ * Loads library (using Code Load or HD/A Host Output DMA)
+ */
+ SOF_IPC4_GLB_LOAD_LIBRARY,
+ /*
+ * Prepare the host DMA channel for library loading, must be followed by
+ * a SOF_IPC4_GLB_LOAD_LIBRARY message as the library loading step
+ */
+ SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE,
+
+ SOF_IPC4_GLB_INTERNAL_MESSAGE,
+
+ /* Notification (FW to SW driver) */
+ SOF_IPC4_GLB_NOTIFICATION,
+
+ /* 28 .. 31: RESERVED - do not use */
+
+ SOF_IPC4_GLB_TYPE_LAST,
+};
+
+/* Value of response field - must fit into 1 bit */
+enum sof_ipc4_msg_dir {
+ SOF_IPC4_MSG_REQUEST,
+ SOF_IPC4_MSG_REPLY,
+};
+
+enum sof_ipc4_pipeline_state {
+ SOF_IPC4_PIPE_INVALID_STATE,
+ SOF_IPC4_PIPE_UNINITIALIZED,
+ SOF_IPC4_PIPE_RESET,
+ SOF_IPC4_PIPE_PAUSED,
+ SOF_IPC4_PIPE_RUNNING,
+ SOF_IPC4_PIPE_EOS
+};
+
+/* Generic message fields (bit 24-30) */
+
+/* encoded to header's msg_tgt field */
+#define SOF_IPC4_MSG_TARGET_SHIFT 30
+#define SOF_IPC4_MSG_TARGET_MASK BIT(30)
+#define SOF_IPC4_MSG_TARGET(x) ((x) << SOF_IPC4_MSG_TARGET_SHIFT)
+#define SOF_IPC4_MSG_IS_MODULE_MSG(x) ((x) & SOF_IPC4_MSG_TARGET_MASK ? 1 : 0)
+
+/* encoded to header's rsp field */
+#define SOF_IPC4_MSG_DIR_SHIFT 29
+#define SOF_IPC4_MSG_DIR_MASK BIT(29)
+#define SOF_IPC4_MSG_DIR(x) ((x) << SOF_IPC4_MSG_DIR_SHIFT)
+
+/* encoded to header's type field */
+#define SOF_IPC4_MSG_TYPE_SHIFT 24
+#define SOF_IPC4_MSG_TYPE_MASK GENMASK(28, 24)
+#define SOF_IPC4_MSG_TYPE_SET(x) (((x) << SOF_IPC4_MSG_TYPE_SHIFT) & \
+ SOF_IPC4_MSG_TYPE_MASK)
+#define SOF_IPC4_MSG_TYPE_GET(x) (((x) & SOF_IPC4_MSG_TYPE_MASK) >> \
+ SOF_IPC4_MSG_TYPE_SHIFT)
+
+/* Global message type specific field definitions */
+
+/* pipeline creation ipc msg */
+#define SOF_IPC4_GLB_PIPE_INSTANCE_SHIFT 16
+#define SOF_IPC4_GLB_PIPE_INSTANCE_MASK GENMASK(23, 16)
+#define SOF_IPC4_GLB_PIPE_INSTANCE_ID(x) ((x) << SOF_IPC4_GLB_PIPE_INSTANCE_SHIFT)
+
+#define SOF_IPC4_GLB_PIPE_PRIORITY_SHIFT 11
+#define SOF_IPC4_GLB_PIPE_PRIORITY_MASK GENMASK(15, 11)
+#define SOF_IPC4_GLB_PIPE_PRIORITY(x) ((x) << SOF_IPC4_GLB_PIPE_PRIORITY_SHIFT)
+
+#define SOF_IPC4_GLB_PIPE_MEM_SIZE_SHIFT 0
+#define SOF_IPC4_GLB_PIPE_MEM_SIZE_MASK GENMASK(10, 0)
+#define SOF_IPC4_GLB_PIPE_MEM_SIZE(x) ((x) << SOF_IPC4_GLB_PIPE_MEM_SIZE_SHIFT)
+
+#define SOF_IPC4_GLB_PIPE_EXT_LP_SHIFT 0
+#define SOF_IPC4_GLB_PIPE_EXT_LP_MASK BIT(0)
+#define SOF_IPC4_GLB_PIPE_EXT_LP(x) ((x) << SOF_IPC4_GLB_PIPE_EXT_LP_SHIFT)
+
+#define SOF_IPC4_GLB_PIPE_EXT_CORE_ID_SHIFT 20
+#define SOF_IPC4_GLB_PIPE_EXT_CORE_ID_MASK GENMASK(23, 20)
+#define SOF_IPC4_GLB_PIPE_EXT_CORE_ID(x) ((x) << SOF_IPC4_GLB_PIPE_EXT_CORE_ID_SHIFT)
+
+/* pipeline set state ipc msg */
+#define SOF_IPC4_GLB_PIPE_STATE_ID_SHIFT 16
+#define SOF_IPC4_GLB_PIPE_STATE_ID_MASK GENMASK(23, 16)
+#define SOF_IPC4_GLB_PIPE_STATE_ID(x) ((x) << SOF_IPC4_GLB_PIPE_STATE_ID_SHIFT)
+
+#define SOF_IPC4_GLB_PIPE_STATE_SHIFT 0
+#define SOF_IPC4_GLB_PIPE_STATE_MASK GENMASK(15, 0)
+#define SOF_IPC4_GLB_PIPE_STATE(x) ((x) << SOF_IPC4_GLB_PIPE_STATE_SHIFT)
+
+/* pipeline set state IPC msg extension */
+#define SOF_IPC4_GLB_PIPE_STATE_EXT_MULTI BIT(0)
+
+/* load library ipc msg */
+#define SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID_SHIFT 16
+#define SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(x) ((x) << SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID_SHIFT)
+
+/* chain dma ipc message */
+#define SOF_IPC4_GLB_CHAIN_DMA_HOST_ID_SHIFT 0
+#define SOF_IPC4_GLB_CHAIN_DMA_HOST_ID_MASK GENMASK(4, 0)
+#define SOF_IPC4_GLB_CHAIN_DMA_HOST_ID(x) (((x) << SOF_IPC4_GLB_CHAIN_DMA_HOST_ID_SHIFT) & \
+ SOF_IPC4_GLB_CHAIN_DMA_HOST_ID_MASK)
+
+#define SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_SHIFT 8
+#define SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK GENMASK(12, 8)
+#define SOF_IPC4_GLB_CHAIN_DMA_LINK_ID(x) (((x) << SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_SHIFT) & \
+ SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK)
+
+#define SOF_IPC4_GLB_CHAIN_DMA_ALLOCATE_SHIFT 16
+#define SOF_IPC4_GLB_CHAIN_DMA_ALLOCATE_MASK BIT(16)
+#define SOF_IPC4_GLB_CHAIN_DMA_ALLOCATE(x) (((x) & 1) << SOF_IPC4_GLB_CHAIN_DMA_ALLOCATE_SHIFT)
+
+#define SOF_IPC4_GLB_CHAIN_DMA_ENABLE_SHIFT 17
+#define SOF_IPC4_GLB_CHAIN_DMA_ENABLE_MASK BIT(17)
+#define SOF_IPC4_GLB_CHAIN_DMA_ENABLE(x) (((x) & 1) << SOF_IPC4_GLB_CHAIN_DMA_ENABLE_SHIFT)
+
+#define SOF_IPC4_GLB_CHAIN_DMA_SCS_SHIFT 18
+#define SOF_IPC4_GLB_CHAIN_DMA_SCS_MASK BIT(18)
+#define SOF_IPC4_GLB_CHAIN_DMA_SCS(x) (((x) & 1) << SOF_IPC4_GLB_CHAIN_DMA_SCS_SHIFT)
+
+#define SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE_SHIFT 0
+#define SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE_MASK GENMASK(24, 0)
+#define SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE(x) (((x) << \
+ SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE_SHIFT) & \
+ SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE_MASK)
+
+enum sof_ipc4_channel_config {
+ /* one channel only. */
+ SOF_IPC4_CHANNEL_CONFIG_MONO,
+ /* L & R. */
+ SOF_IPC4_CHANNEL_CONFIG_STEREO,
+ /* L, R & LFE; PCM only. */
+ SOF_IPC4_CHANNEL_CONFIG_2_POINT_1,
+ /* L, C & R; MP3 & AAC only. */
+ SOF_IPC4_CHANNEL_CONFIG_3_POINT_0,
+ /* L, C, R & LFE; PCM only. */
+ SOF_IPC4_CHANNEL_CONFIG_3_POINT_1,
+ /* L, R, Ls & Rs; PCM only. */
+ SOF_IPC4_CHANNEL_CONFIG_QUATRO,
+ /* L, C, R & Cs; MP3 & AAC only. */
+ SOF_IPC4_CHANNEL_CONFIG_4_POINT_0,
+ /* L, C, R, Ls & Rs. */
+ SOF_IPC4_CHANNEL_CONFIG_5_POINT_0,
+ /* L, C, R, Ls, Rs & LFE. */
+ SOF_IPC4_CHANNEL_CONFIG_5_POINT_1,
+ /* one channel replicated in two. */
+ SOF_IPC4_CHANNEL_CONFIG_DUAL_MONO,
+ /* Stereo (L,R) in 4 slots, 1st stream: [ L, R, -, - ] */
+ SOF_IPC4_CHANNEL_CONFIG_I2S_DUAL_STEREO_0,
+ /* Stereo (L,R) in 4 slots, 2nd stream: [ -, -, L, R ] */
+ SOF_IPC4_CHANNEL_CONFIG_I2S_DUAL_STEREO_1,
+ /* L, C, R, Ls, Rs & LFE., LS, RS */
+ SOF_IPC4_CHANNEL_CONFIG_7_POINT_1,
+};
+
+enum sof_ipc4_interleaved_style {
+ SOF_IPC4_CHANNELS_INTERLEAVED,
+ SOF_IPC4_CHANNELS_NONINTERLEAVED,
+};
+
+enum sof_ipc4_sample_type {
+ SOF_IPC4_MSB_INTEGER, /* integer with Most Significant Byte first */
+ SOF_IPC4_LSB_INTEGER, /* integer with Least Significant Byte first */
+};
+
+struct sof_ipc4_audio_format {
+ uint32_t sampling_frequency;
+ uint32_t bit_depth;
+ uint32_t ch_map;
+ uint32_t ch_cfg; /* sof_ipc4_channel_config */
+ uint32_t interleaving_style;
+ uint32_t fmt_cfg; /* channels_count valid_bit_depth s_type */
+} __packed __aligned(4);
+
+#define SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT_SHIFT 0
+#define SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT_MASK GENMASK(7, 0)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(x) \
+ ((x) & SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT_MASK)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_SHIFT 8
+#define SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_MASK GENMASK(15, 8)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(x) \
+ (((x) & SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_MASK) >> \
+ SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH_SHIFT)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_SHIFT 16
+#define SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_MASK GENMASK(23, 16)
+#define SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE(x) \
+ (((x) & SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_MASK) >> \
+ SOF_IPC4_AUDIO_FORMAT_CFG_SAMPLE_TYPE_SHIFT)
+
+/* Module message type specific field definitions */
+
+enum sof_ipc4_module_type {
+ SOF_IPC4_MOD_INIT_INSTANCE,
+ SOF_IPC4_MOD_CONFIG_GET,
+ SOF_IPC4_MOD_CONFIG_SET,
+ SOF_IPC4_MOD_LARGE_CONFIG_GET,
+ SOF_IPC4_MOD_LARGE_CONFIG_SET,
+ SOF_IPC4_MOD_BIND,
+ SOF_IPC4_MOD_UNBIND,
+ SOF_IPC4_MOD_SET_DX,
+ SOF_IPC4_MOD_SET_D0IX,
+ SOF_IPC4_MOD_ENTER_MODULE_RESTORE,
+ SOF_IPC4_MOD_EXIT_MODULE_RESTORE,
+ SOF_IPC4_MOD_DELETE_INSTANCE,
+
+ SOF_IPC4_MOD_TYPE_LAST,
+};
+
+struct sof_ipc4_base_module_cfg {
+ uint32_t cpc; /* the max count of Cycles Per Chunk processing */
+ uint32_t ibs; /* input Buffer Size (in bytes) */
+ uint32_t obs; /* output Buffer Size (in bytes) */
+ uint32_t is_pages; /* number of physical pages used */
+ struct sof_ipc4_audio_format audio_fmt;
+} __packed __aligned(4);
+
+/* common module ipc msg */
+#define SOF_IPC4_MOD_INSTANCE_SHIFT 16
+#define SOF_IPC4_MOD_INSTANCE_MASK GENMASK(23, 16)
+#define SOF_IPC4_MOD_INSTANCE(x) ((x) << SOF_IPC4_MOD_INSTANCE_SHIFT)
+
+#define SOF_IPC4_MOD_ID_SHIFT 0
+#define SOF_IPC4_MOD_ID_MASK GENMASK(15, 0)
+#define SOF_IPC4_MOD_ID(x) ((x) << SOF_IPC4_MOD_ID_SHIFT)
+
+/* init module ipc msg */
+#define SOF_IPC4_MOD_EXT_PARAM_SIZE_SHIFT 0
+#define SOF_IPC4_MOD_EXT_PARAM_SIZE_MASK GENMASK(15, 0)
+#define SOF_IPC4_MOD_EXT_PARAM_SIZE(x) ((x) << SOF_IPC4_MOD_EXT_PARAM_SIZE_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_PPL_ID_SHIFT 16
+#define SOF_IPC4_MOD_EXT_PPL_ID_MASK GENMASK(23, 16)
+#define SOF_IPC4_MOD_EXT_PPL_ID(x) ((x) << SOF_IPC4_MOD_EXT_PPL_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_CORE_ID_SHIFT 24
+#define SOF_IPC4_MOD_EXT_CORE_ID_MASK GENMASK(27, 24)
+#define SOF_IPC4_MOD_EXT_CORE_ID(x) ((x) << SOF_IPC4_MOD_EXT_CORE_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_DOMAIN_SHIFT 28
+#define SOF_IPC4_MOD_EXT_DOMAIN_MASK BIT(28)
+#define SOF_IPC4_MOD_EXT_DOMAIN(x) ((x) << SOF_IPC4_MOD_EXT_DOMAIN_SHIFT)
+
+/* bind/unbind module ipc msg */
+#define SOF_IPC4_MOD_EXT_DST_MOD_ID_SHIFT 0
+#define SOF_IPC4_MOD_EXT_DST_MOD_ID_MASK GENMASK(15, 0)
+#define SOF_IPC4_MOD_EXT_DST_MOD_ID(x) ((x) << SOF_IPC4_MOD_EXT_DST_MOD_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE_SHIFT 16
+#define SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE_MASK GENMASK(23, 16)
+#define SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(x) ((x) << SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID_SHIFT 24
+#define SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID_MASK GENMASK(26, 24)
+#define SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID(x) ((x) << SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID_SHIFT 27
+#define SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID_MASK GENMASK(29, 27)
+#define SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID(x) ((x) << SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID_SHIFT)
+
+#define MOD_ENABLE_LOG 6
+#define MOD_SYSTEM_TIME 20
+
+/* set module large config */
+#define SOF_IPC4_MOD_EXT_MSG_SIZE_SHIFT 0
+#define SOF_IPC4_MOD_EXT_MSG_SIZE_MASK GENMASK(19, 0)
+#define SOF_IPC4_MOD_EXT_MSG_SIZE(x) ((x) << SOF_IPC4_MOD_EXT_MSG_SIZE_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_MSG_PARAM_ID_SHIFT 20
+#define SOF_IPC4_MOD_EXT_MSG_PARAM_ID_MASK GENMASK(27, 20)
+#define SOF_IPC4_MOD_EXT_MSG_PARAM_ID(x) ((x) << SOF_IPC4_MOD_EXT_MSG_PARAM_ID_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK_SHIFT 28
+#define SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK_MASK BIT(28)
+#define SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK(x) ((x) << SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK_SHIFT)
+
+#define SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_SHIFT 29
+#define SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_MASK BIT(29)
+#define SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK(x) ((x) << SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_SHIFT)
+
+/* Init instance messagees */
+#define SOF_IPC4_MOD_INIT_BASEFW_MOD_ID 0
+#define SOF_IPC4_MOD_INIT_BASEFW_INSTANCE_ID 0
+
+enum sof_ipc4_base_fw_params {
+ SOF_IPC4_FW_PARAM_ENABLE_LOGS = 6,
+ SOF_IPC4_FW_PARAM_FW_CONFIG,
+ SOF_IPC4_FW_PARAM_HW_CONFIG_GET,
+ SOF_IPC4_FW_PARAM_MODULES_INFO_GET,
+ SOF_IPC4_FW_PARAM_LIBRARIES_INFO_GET = 16,
+ SOF_IPC4_FW_PARAM_SYSTEM_TIME = 20,
+};
+
+enum sof_ipc4_fw_config_params {
+ SOF_IPC4_FW_CFG_FW_VERSION,
+ SOF_IPC4_FW_CFG_MEMORY_RECLAIMED,
+ SOF_IPC4_FW_CFG_SLOW_CLOCK_FREQ_HZ,
+ SOF_IPC4_FW_CFG_FAST_CLOCK_FREQ_HZ,
+ SOF_IPC4_FW_CFG_DMA_BUFFER_CONFIG,
+ SOF_IPC4_FW_CFG_ALH_SUPPORT_LEVEL,
+ SOF_IPC4_FW_CFG_DL_MAILBOX_BYTES,
+ SOF_IPC4_FW_CFG_UL_MAILBOX_BYTES,
+ SOF_IPC4_FW_CFG_TRACE_LOG_BYTES,
+ SOF_IPC4_FW_CFG_MAX_PPL_COUNT,
+ SOF_IPC4_FW_CFG_MAX_ASTATE_COUNT,
+ SOF_IPC4_FW_CFG_MAX_MODULE_PIN_COUNT,
+ SOF_IPC4_FW_CFG_MODULES_COUNT,
+ SOF_IPC4_FW_CFG_MAX_MOD_INST_COUNT,
+ SOF_IPC4_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT,
+ SOF_IPC4_FW_CFG_LL_PRI_COUNT,
+ SOF_IPC4_FW_CFG_MAX_DP_TASKS_COUNT,
+ SOF_IPC4_FW_CFG_MAX_LIBS_COUNT,
+ SOF_IPC4_FW_CFG_SCHEDULER_CONFIG,
+ SOF_IPC4_FW_CFG_XTAL_FREQ_HZ,
+ SOF_IPC4_FW_CFG_CLOCKS_CONFIG,
+ SOF_IPC4_FW_CFG_RESERVED,
+ SOF_IPC4_FW_CFG_POWER_GATING_POLICY,
+ SOF_IPC4_FW_CFG_ASSERT_MODE,
+ SOF_IPC4_FW_RESERVED1,
+ SOF_IPC4_FW_RESERVED2,
+ SOF_IPC4_FW_RESERVED3,
+ SOF_IPC4_FW_RESERVED4,
+ SOF_IPC4_FW_RESERVED5,
+ SOF_IPC4_FW_CONTEXT_SAVE
+};
+
+struct sof_ipc4_fw_version {
+ uint16_t major;
+ uint16_t minor;
+ uint16_t hotfix;
+ uint16_t build;
+} __packed;
+
+/* Payload data for SOF_IPC4_MOD_SET_DX */
+struct sof_ipc4_dx_state_info {
+ /* core(s) to apply the change */
+ uint32_t core_mask;
+ /* core state: 0: put core_id to D3; 1: put core_id to D0 */
+ uint32_t dx_mask;
+} __packed __aligned(4);
+
+/* Reply messages */
+
+/*
+ * IPC4 primary header bit allocation for replies
+ * bit 0-23: status
+ * bit 24-28: type: enum sof_ipc4_global_msg if target is SOF_IPC4_FW_GEN_MSG
+ * enum sof_ipc4_module_type if target is SOF_IPC4_MODULE_MSG
+ * bit 29: response - sof_ipc4_msg_dir
+ * bit 30: target - enum sof_ipc4_msg_target
+ * bit 31: reserved, unused
+ */
+
+#define SOF_IPC4_REPLY_STATUS GENMASK(23, 0)
+
+/* Notification messages */
+
+/*
+ * IPC4 primary header bit allocation for notifications
+ * bit 0-15: notification type specific
+ * bit 16-23: enum sof_ipc4_notification_type
+ * bit 24-28: SOF_IPC4_GLB_NOTIFICATION
+ * bit 29: response - sof_ipc4_msg_dir
+ * bit 30: target - enum sof_ipc4_msg_target
+ * bit 31: reserved, unused
+ */
+
+#define SOF_IPC4_MSG_IS_NOTIFICATION(x) (SOF_IPC4_MSG_TYPE_GET(x) == \
+ SOF_IPC4_GLB_NOTIFICATION)
+
+#define SOF_IPC4_NOTIFICATION_TYPE_SHIFT 16
+#define SOF_IPC4_NOTIFICATION_TYPE_MASK GENMASK(23, 16)
+#define SOF_IPC4_NOTIFICATION_TYPE_GET(x) (((x) & SOF_IPC4_NOTIFICATION_TYPE_MASK) >> \
+ SOF_IPC4_NOTIFICATION_TYPE_SHIFT)
+
+#define SOF_IPC4_LOG_CORE_SHIFT 12
+#define SOF_IPC4_LOG_CORE_MASK GENMASK(15, 12)
+#define SOF_IPC4_LOG_CORE_GET(x) (((x) & SOF_IPC4_LOG_CORE_MASK) >> \
+ SOF_IPC4_LOG_CORE_SHIFT)
+
+/* Value of notification type field - must fit into 8 bits */
+enum sof_ipc4_notification_type {
+ /* Phrase detected (notification from WoV module) */
+ SOF_IPC4_NOTIFY_PHRASE_DETECTED = 4,
+ /* Event from a resource (pipeline or module instance) */
+ SOF_IPC4_NOTIFY_RESOURCE_EVENT,
+ /* Debug log buffer status changed */
+ SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS,
+ /* Timestamp captured at the link */
+ SOF_IPC4_NOTIFY_TIMESTAMP_CAPTURED,
+ /* FW complete initialization */
+ SOF_IPC4_NOTIFY_FW_READY,
+ /* Audio classifier result (ACA) */
+ SOF_IPC4_NOTIFY_FW_AUD_CLASS_RESULT,
+ /* Exception caught by DSP FW */
+ SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT,
+ /* 11 is skipped by the existing cavs firmware */
+ /* Custom module notification */
+ SOF_IPC4_NOTIFY_MODULE_NOTIFICATION = 12,
+ /* 13 is reserved - do not use */
+ /* Probe notify data available */
+ SOF_IPC4_NOTIFY_PROBE_DATA_AVAILABLE = 14,
+ /* AM module notifications */
+ SOF_IPC4_NOTIFY_ASYNC_MSG_SRVC_MESSAGE,
+
+ SOF_IPC4_NOTIFY_TYPE_LAST,
+};
+
+struct sof_ipc4_notify_resource_data {
+ uint32_t resource_type;
+ uint32_t resource_id;
+ uint32_t event_type;
+ uint32_t reserved;
+ uint32_t data[6];
+} __packed __aligned(4);
+
+#define SOF_IPC4_DEBUG_DESCRIPTOR_SIZE 12 /* 3 x u32 */
+
+/*
+ * The debug memory window is divided into 16 slots, and the
+ * first slot is used as a recorder for the other 15 slots.
+ */
+#define SOF_IPC4_MAX_DEBUG_SLOTS 15
+#define SOF_IPC4_DEBUG_SLOT_SIZE 0x1000
+
+/* debug log slot types */
+#define SOF_IPC4_DEBUG_SLOT_UNUSED 0x00000000
+#define SOF_IPC4_DEBUG_SLOT_CRITICAL_LOG 0x54524300 /* byte 0: core ID */
+#define SOF_IPC4_DEBUG_SLOT_DEBUG_LOG 0x474f4c00 /* byte 0: core ID */
+#define SOF_IPC4_DEBUG_SLOT_GDB_STUB 0x42444700
+#define SOF_IPC4_DEBUG_SLOT_TELEMETRY 0x4c455400
+#define SOF_IPC4_DEBUG_SLOT_BROKEN 0x44414544
+
+/**
+ * struct sof_ipc4_notify_module_data - payload for module notification
+ * @instance_id: instance ID of the originator module of the notification
+ * @module_id: module ID of the originator of the notification
+ * @event_id: module specific event id
+ * @event_data_size: Size of the @event_data (if any) in bytes
+ * @event_data: Optional notification data, module and notification dependent
+ */
+struct sof_ipc4_notify_module_data {
+ uint16_t instance_id;
+ uint16_t module_id;
+ uint32_t event_id;
+ uint32_t event_data_size;
+ uint8_t event_data[];
+} __packed __aligned(4);
+
+/*
+ * ALSA kcontrol change notification
+ *
+ * The event_id of struct sof_ipc4_notify_module_data is divided into two u16:
+ * upper u16: magic number for ALSA kcontrol types: 0xA15A
+ * lower u16: param_id of the control, which is the type of the control
+ * The event_data contains the struct sof_ipc4_control_msg_payload of the control
+ * which sent the notification.
+ */
+#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_MAGIC_MASK GENMASK(31, 16)
+#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_MAGIC_VAL 0xA15A0000
+#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_PARAMID_MASK GENMASK(15, 0)
+
+/** @}*/
+
+#endif
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
index 58a0d49977d6..9377113f13e4 100644
--- a/include/sound/sof/stream.h
+++ b/include/sound/sof/stream.h
@@ -85,9 +85,12 @@ struct sof_ipc_stream_params {
uint32_t host_period_bytes;
uint16_t no_stream_position; /**< 1 means don't send stream position */
-
- uint16_t reserved[3];
+ uint8_t cont_update_posn; /**< 1 means continuous update stream position */
+ uint8_t reserved0;
+ int16_t ext_data_length; /**< 0, means no extended data */
+ uint8_t reserved[2];
uint16_t chmap[SOF_IPC_MAX_CHANNELS]; /**< channel map - SOF_CHMAP_ */
+ uint8_t ext_data[]; /**< extended data */
} __packed;
/* PCM params info - SOF_IPC_STREAM_PCM_PARAMS */
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index f56e80d09b32..b3ca886fa28f 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -26,9 +26,9 @@ enum sof_comp_type {
SOF_COMP_MIXER,
SOF_COMP_MUX,
SOF_COMP_SRC,
- SOF_COMP_SPLITTER,
+ SOF_COMP_DEPRECATED0, /* Formerly SOF_COMP_SPLITTER */
SOF_COMP_TONE,
- SOF_COMP_SWITCH,
+ SOF_COMP_DEPRECATED1, /* Formerly SOF_COMP_SWITCH */
SOF_COMP_BUFFER,
SOF_COMP_EQ_IIR,
SOF_COMP_EQ_FIR,
@@ -39,6 +39,7 @@ enum sof_comp_type {
SOF_COMP_ASRC, /**< Asynchronous sample rate converter */
SOF_COMP_DCBLOCK,
SOF_COMP_SMART_AMP, /**< smart amplifier component */
+ SOF_COMP_MODULE_ADAPTER, /**< module adapter */
/* keep FILEREAD/FILEWRITE as the last ones */
SOF_COMP_FILEREAD = 10000, /**< host test based file IO */
SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */
@@ -57,9 +58,9 @@ struct sof_ipc_comp {
uint32_t pipeline_id;
uint32_t core;
- /* reserved for future use */
- uint32_t reserved[1];
-} __packed;
+ /* extended data length, 0 if no extended data */
+ uint32_t ext_data_length;
+} __packed __aligned(4);
/*
* Component Buffers
@@ -68,14 +69,15 @@ struct sof_ipc_comp {
/*
* SOF memory capabilities, add new ones at the end
*/
-#define SOF_MEM_CAPS_RAM (1 << 0)
-#define SOF_MEM_CAPS_ROM (1 << 1)
-#define SOF_MEM_CAPS_EXT (1 << 2) /**< external */
-#define SOF_MEM_CAPS_LP (1 << 3) /**< low power */
-#define SOF_MEM_CAPS_HP (1 << 4) /**< high performance */
-#define SOF_MEM_CAPS_DMA (1 << 5) /**< DMA'able */
-#define SOF_MEM_CAPS_CACHE (1 << 6) /**< cacheable */
-#define SOF_MEM_CAPS_EXEC (1 << 7) /**< executable */
+#define SOF_MEM_CAPS_RAM BIT(0)
+#define SOF_MEM_CAPS_ROM BIT(1)
+#define SOF_MEM_CAPS_EXT BIT(2) /**< external */
+#define SOF_MEM_CAPS_LP BIT(3) /**< low power */
+#define SOF_MEM_CAPS_HP BIT(4) /**< high performance */
+#define SOF_MEM_CAPS_DMA BIT(5) /**< DMA'able */
+#define SOF_MEM_CAPS_CACHE BIT(6) /**< cacheable */
+#define SOF_MEM_CAPS_EXEC BIT(7) /**< executable */
+#define SOF_MEM_CAPS_L3 BIT(8) /**< L3 memory */
/*
* overrun will cause ring buffer overwrite, instead of XRUN.
@@ -87,6 +89,9 @@ struct sof_ipc_comp {
*/
#define SOF_BUF_UNDERRUN_PERMITTED BIT(1)
+/* the UUID size in bytes, shared between FW and host */
+#define SOF_UUID_SIZE 16
+
/* create new component buffer - SOF_IPC_TPLG_BUFFER_NEW */
struct sof_ipc_buffer {
struct sof_ipc_comp comp;
@@ -94,7 +99,7 @@ struct sof_ipc_buffer {
uint32_t caps; /**< SOF_MEM_CAPS_ */
uint32_t flags; /**< SOF_BUF_ flags defined above */
uint32_t reserved; /**< reserved for future use */
-} __packed;
+} __packed __aligned(4);
/* generic component config data - must always be after struct sof_ipc_comp */
struct sof_ipc_comp_config {
@@ -107,7 +112,7 @@ struct sof_ipc_comp_config {
/* reserved for future use */
uint32_t reserved[2];
-} __packed;
+} __packed __aligned(4);
/* generic host component */
struct sof_ipc_comp_host {
@@ -116,7 +121,7 @@ struct sof_ipc_comp_host {
uint32_t direction; /**< SOF_IPC_STREAM_ */
uint32_t no_irq; /**< don't send periodic IRQ to host/DSP */
uint32_t dmac_config; /**< DMA engine specific */
-} __packed;
+} __packed __aligned(4);
/* generic DAI component */
struct sof_ipc_comp_dai {
@@ -126,13 +131,13 @@ struct sof_ipc_comp_dai {
uint32_t dai_index; /**< index of this type dai */
uint32_t type; /**< DAI type - SOF_DAI_ */
uint32_t reserved; /**< reserved */
-} __packed;
+} __packed __aligned(4);
/* generic mixer component */
struct sof_ipc_comp_mixer {
struct sof_ipc_comp comp;
struct sof_ipc_comp_config config;
-} __packed;
+} __packed __aligned(4);
/* volume ramping types */
enum sof_volume_ramp {
@@ -140,6 +145,8 @@ enum sof_volume_ramp {
SOF_VOLUME_LOG,
SOF_VOLUME_LINEAR_ZC,
SOF_VOLUME_LOG_ZC,
+ SOF_VOLUME_WINDOWS_FADE,
+ SOF_VOLUME_WINDOWS_NO_FADE,
};
/* generic volume component */
@@ -151,7 +158,7 @@ struct sof_ipc_comp_volume {
uint32_t max_value;
uint32_t ramp; /**< SOF_VOLUME_ */
uint32_t initial_ramp; /**< ramp space in ms */
-} __packed;
+} __packed __aligned(4);
/* generic SRC component */
struct sof_ipc_comp_src {
@@ -161,7 +168,7 @@ struct sof_ipc_comp_src {
uint32_t source_rate; /**< source rate or 0 for variable */
uint32_t sink_rate; /**< sink rate or 0 for variable */
uint32_t rate_mask; /**< SOF_RATE_ supported rates */
-} __packed;
+} __packed __aligned(4);
/* generic ASRC component */
struct sof_ipc_comp_asrc {
@@ -187,13 +194,13 @@ struct sof_ipc_comp_asrc {
/* reserved for future use */
uint32_t reserved[4];
-} __attribute__((packed));
+} __packed __aligned(4);
/* generic MUX component */
struct sof_ipc_comp_mux {
struct sof_ipc_comp comp;
struct sof_ipc_comp_config config;
-} __packed;
+} __packed __aligned(4);
/* generic tone generator component */
struct sof_ipc_comp_tone {
@@ -208,7 +215,7 @@ struct sof_ipc_comp_tone {
int32_t period;
int32_t repeats;
int32_t ramp_step;
-} __packed;
+} __packed __aligned(4);
/** \brief Types of processing components */
enum sof_ipc_process_type {
@@ -234,8 +241,8 @@ struct sof_ipc_comp_process {
/* reserved for future use */
uint32_t reserved[7];
- uint8_t data[0];
-} __packed;
+ unsigned char data[];
+} __packed __aligned(4);
/* frees components, buffers and pipelines
* SOF_IPC_TPLG_COMP_FREE, SOF_IPC_TPLG_PIPE_FREE, SOF_IPC_TPLG_BUFFER_FREE
@@ -243,13 +250,13 @@ struct sof_ipc_comp_process {
struct sof_ipc_free {
struct sof_ipc_cmd_hdr hdr;
uint32_t id;
-} __packed;
+} __packed __aligned(4);
struct sof_ipc_comp_reply {
struct sof_ipc_reply rhdr;
uint32_t id;
uint32_t offset;
-} __packed;
+} __packed __aligned(4);
/*
* Pipeline
@@ -274,25 +281,25 @@ struct sof_ipc_pipe_new {
uint32_t frames_per_sched;/**< output frames of pipeline, 0 is variable */
uint32_t xrun_limit_usecs; /**< report xruns greater than limit */
uint32_t time_domain; /**< scheduling time domain */
-} __packed;
+} __packed __aligned(4);
/* pipeline construction complete - SOF_IPC_TPLG_PIPE_COMPLETE */
struct sof_ipc_pipe_ready {
struct sof_ipc_cmd_hdr hdr;
uint32_t comp_id;
-} __packed;
+} __packed __aligned(4);
struct sof_ipc_pipe_free {
struct sof_ipc_cmd_hdr hdr;
uint32_t comp_id;
-} __packed;
+} __packed __aligned(4);
/* connect two components in pipeline - SOF_IPC_TPLG_COMP_CONNECT */
struct sof_ipc_pipe_comp_connect {
struct sof_ipc_cmd_hdr hdr;
uint32_t source_id;
uint32_t sink_id;
-} __packed;
+} __packed __aligned(4);
/* external events */
enum sof_event_types {
diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h
index c31a94a13ce0..25ea99f62d37 100644
--- a/include/sound/sof/trace.h
+++ b/include/sound/sof/trace.h
@@ -43,6 +43,34 @@ struct sof_ipc_dma_trace_posn {
uint32_t messages; /* total trace messages */
} __packed;
+/* Values used in sof_ipc_trace_filter_elem: */
+
+/* bits 6..0 */
+#define SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL 0x01 /**< trace level for selected components */
+#define SOF_IPC_TRACE_FILTER_ELEM_BY_UUID 0x02 /**< filter by uuid key */
+#define SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE 0x03 /**< filter by pipeline */
+#define SOF_IPC_TRACE_FILTER_ELEM_BY_COMP 0x04 /**< filter by component id */
+
+/* bit 7 */
+#define SOF_IPC_TRACE_FILTER_ELEM_FIN 0x80 /**< mark last filter in set */
+
+/* bits 31..8: Unused */
+
+/** part of sof_ipc_trace_filter, ABI3.17 */
+struct sof_ipc_trace_filter_elem {
+ uint32_t key; /**< SOF_IPC_TRACE_FILTER_ELEM_ {LEVEL, UUID, COMP, PIPE} */
+ uint32_t value; /**< element value */
+} __packed;
+
+/** Runtime tracing filtration data - SOF_IPC_TRACE_FILTER_UPDATE, ABI3.17 */
+struct sof_ipc_trace_filter {
+ struct sof_ipc_cmd_hdr hdr; /**< IPC command header */
+ uint32_t elem_cnt; /**< number of entries in elems[] array */
+ uint32_t reserved[8]; /**< reserved for future usage */
+ /** variable size array with new filtering settings */
+ struct sof_ipc_trace_filter_elem elems[];
+} __packed;
+
/*
* Commom debug
*/
diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h
new file mode 100644
index 000000000000..ea9af2726a53
--- /dev/null
+++ b/include/sound/tas2781-dsp.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+//
+// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2781 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2781 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+// Author: Kevin Lu <kevin-lu@ti.com>
+//
+
+#ifndef __TASDEVICE_DSP_H__
+#define __TASDEVICE_DSP_H__
+
+#define MAIN_ALL_DEVICES 0x0d
+#define MAIN_DEVICE_A 0x01
+#define MAIN_DEVICE_B 0x08
+#define MAIN_DEVICE_C 0x10
+#define MAIN_DEVICE_D 0x14
+#define COEFF_DEVICE_A 0x03
+#define COEFF_DEVICE_B 0x0a
+#define COEFF_DEVICE_C 0x11
+#define COEFF_DEVICE_D 0x15
+#define PRE_DEVICE_A 0x04
+#define PRE_DEVICE_B 0x0b
+#define PRE_DEVICE_C 0x12
+#define PRE_DEVICE_D 0x16
+
+#define PPC3_VERSION 0x4100
+#define PPC3_VERSION_TAS2781 0x14600
+#define TASDEVICE_DEVICE_SUM 8
+#define TASDEVICE_CONFIG_SUM 64
+
+#define TASDEVICE_MAX_CHANNELS 8
+
+enum tasdevice_dsp_dev_idx {
+ TASDEVICE_DSP_TAS_2555 = 0,
+ TASDEVICE_DSP_TAS_2555_STEREO,
+ TASDEVICE_DSP_TAS_2557_MONO,
+ TASDEVICE_DSP_TAS_2557_DUAL_MONO,
+ TASDEVICE_DSP_TAS_2559,
+ TASDEVICE_DSP_TAS_2563,
+ TASDEVICE_DSP_TAS_2563_DUAL_MONO = 7,
+ TASDEVICE_DSP_TAS_2563_QUAD,
+ TASDEVICE_DSP_TAS_2563_21,
+ TASDEVICE_DSP_TAS_2781,
+ TASDEVICE_DSP_TAS_2781_DUAL_MONO,
+ TASDEVICE_DSP_TAS_2781_21,
+ TASDEVICE_DSP_TAS_2781_QUAD,
+ TASDEVICE_DSP_TAS_MAX_DEVICE
+};
+
+struct tasdevice_fw_fixed_hdr {
+ unsigned int fwsize;
+ unsigned int ppcver;
+ unsigned int drv_ver;
+};
+
+struct tasdevice_dspfw_hdr {
+ struct tasdevice_fw_fixed_hdr fixed_hdr;
+ unsigned short device_family;
+ unsigned short device;
+ unsigned char ndev;
+};
+
+struct tasdev_blk {
+ int nr_retry;
+ unsigned int type;
+ unsigned char is_pchksum_present;
+ unsigned char pchksum;
+ unsigned char is_ychksum_present;
+ unsigned char ychksum;
+ unsigned int nr_cmds;
+ unsigned int blk_size;
+ unsigned int nr_subblocks;
+ /* fixed m68k compiling issue, storing the dev_idx as a member of block
+ * can reduce unnecessary timeand system resource comsumption of
+ * dev_idx mapping every time the block data writing to the dsp.
+ */
+ unsigned char dev_idx;
+ unsigned char *data;
+};
+
+struct tasdevice_data {
+ char name[64];
+ unsigned int nr_blk;
+ struct tasdev_blk *dev_blks;
+};
+
+struct tasdevice_prog {
+ unsigned int prog_size;
+ struct tasdevice_data dev_data;
+};
+
+struct tasdevice_config {
+ unsigned int cfg_size;
+ char name[64];
+ struct tasdevice_data dev_data;
+};
+
+struct tasdevice_calibration {
+ struct tasdevice_data dev_data;
+};
+
+struct tasdevice_fw {
+ struct tasdevice_dspfw_hdr fw_hdr;
+ unsigned short nr_programs;
+ struct tasdevice_prog *programs;
+ unsigned short nr_configurations;
+ struct tasdevice_config *configs;
+ unsigned short nr_calibrations;
+ struct tasdevice_calibration *calibrations;
+ struct device *dev;
+};
+
+enum tasdevice_dsp_fw_state {
+ TASDEVICE_DSP_FW_NONE = 0,
+ TASDEVICE_DSP_FW_PENDING,
+ TASDEVICE_DSP_FW_FAIL,
+ TASDEVICE_DSP_FW_ALL_OK,
+};
+
+enum tasdevice_bin_blk_type {
+ TASDEVICE_BIN_BLK_COEFF = 1,
+ TASDEVICE_BIN_BLK_POST_POWER_UP,
+ TASDEVICE_BIN_BLK_PRE_SHUTDOWN,
+ TASDEVICE_BIN_BLK_PRE_POWER_UP,
+ TASDEVICE_BIN_BLK_POST_SHUTDOWN
+};
+
+struct tasdevice_rca_hdr {
+ unsigned int img_sz;
+ unsigned int checksum;
+ unsigned int binary_version_num;
+ unsigned int drv_fw_version;
+ unsigned char plat_type;
+ unsigned char dev_family;
+ unsigned char reserve;
+ unsigned char ndev;
+ unsigned char devs[TASDEVICE_DEVICE_SUM];
+ unsigned int nconfig;
+ unsigned int config_size[TASDEVICE_CONFIG_SUM];
+};
+
+struct tasdev_blk_data {
+ unsigned char dev_idx;
+ unsigned char block_type;
+ unsigned short yram_checksum;
+ unsigned int block_size;
+ unsigned int n_subblks;
+ unsigned char *regdata;
+};
+
+struct tasdevice_config_info {
+ unsigned int nblocks;
+ unsigned int real_nblocks;
+ unsigned char active_dev;
+ struct tasdev_blk_data **blk_data;
+};
+
+struct tasdevice_rca {
+ struct tasdevice_rca_hdr fw_hdr;
+ int ncfgs;
+ struct tasdevice_config_info **cfg_info;
+ int profile_cfg_id;
+};
+
+void tasdevice_select_cfg_blk(void *context, int conf_no,
+ unsigned char block_type);
+void tasdevice_config_info_remove(void *context);
+void tasdevice_dsp_remove(void *context);
+int tasdevice_dsp_parser(void *context);
+int tasdevice_rca_parser(void *context, const struct firmware *fmw);
+void tasdevice_dsp_remove(void *context);
+void tasdevice_calbin_remove(void *context);
+int tasdevice_select_tuningprm_cfg(void *context, int prm,
+ int cfg_no, int rca_conf_no);
+int tasdevice_prmg_load(void *context, int prm_no);
+int tasdevice_prmg_calibdata_load(void *context, int prm_no);
+void tasdevice_tuning_switch(void *context, int state);
+int tas2781_load_calibration(void *context, char *file_name,
+ unsigned short i);
+
+#endif
diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
new file mode 100644
index 000000000000..4038dd421150
--- /dev/null
+++ b/include/sound/tas2781-tlv.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+//
+// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2781 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2781 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+//
+
+#ifndef __TAS2781_TLV_H__
+#define __TAS2781_TLV_H__
+
+static const DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
+static const DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
+
+#endif
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
new file mode 100644
index 000000000000..99ca3e401fd1
--- /dev/null
+++ b/include/sound/tas2781.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
+//
+// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2563/TAS2781 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2563/TAS2781 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+// Author: Kevin Lu <kevin-lu@ti.com>
+//
+
+#ifndef __TAS2781_H__
+#define __TAS2781_H__
+
+#include "tas2781-dsp.h"
+
+/* version number */
+#define TAS2781_DRV_VER 1
+#define SMARTAMP_MODULE_NAME "tas2781"
+#define TAS2781_GLOBAL_ADDR 0x40
+#define TAS2563_GLOBAL_ADDR 0x48
+#define TASDEVICE_RATES (SNDRV_PCM_RATE_44100 |\
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_88200)
+
+#define TASDEVICE_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+/*PAGE Control Register (available in page0 of each book) */
+#define TASDEVICE_PAGE_SELECT 0x00
+#define TASDEVICE_BOOKCTL_PAGE 0x00
+#define TASDEVICE_BOOKCTL_REG 127
+#define TASDEVICE_BOOK_ID(reg) (reg / (256 * 128))
+#define TASDEVICE_PAGE_ID(reg) ((reg % (256 * 128)) / 128)
+#define TASDEVICE_PAGE_REG(reg) ((reg % (256 * 128)) % 128)
+#define TASDEVICE_PGRG(reg) (reg % (256 * 128))
+#define TASDEVICE_REG(book, page, reg) (((book * 256 * 128) + \
+ (page * 128)) + reg)
+
+/*Software Reset */
+#define TAS2781_REG_SWRESET TASDEVICE_REG(0x0, 0X0, 0x01)
+#define TAS2781_REG_SWRESET_RESET BIT(0)
+
+/*I2C Checksum */
+#define TASDEVICE_I2CChecksum TASDEVICE_REG(0x0, 0x0, 0x7E)
+
+/* Volume control */
+#define TAS2781_DVC_LVL TASDEVICE_REG(0x0, 0x0, 0x1A)
+#define TAS2781_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x03)
+#define TAS2781_AMP_LEVEL_MASK GENMASK(5, 1)
+
+#define TASDEVICE_CMD_SING_W 0x1
+#define TASDEVICE_CMD_BURST 0x2
+#define TASDEVICE_CMD_DELAY 0x3
+#define TASDEVICE_CMD_FIELD_W 0x4
+
+enum audio_device {
+ TAS2563,
+ TAS2781,
+};
+
+enum device_catlog_id {
+ LENOVO = 0,
+ OTHERS
+};
+
+struct tasdevice {
+ struct tasdevice_fw *cali_data_fmw;
+ unsigned int dev_addr;
+ unsigned int err_code;
+ unsigned char cur_book;
+ short cur_prog;
+ short cur_conf;
+ bool is_loading;
+ bool is_loaderr;
+};
+
+struct tasdevice_irqinfo {
+ int irq_gpio;
+ int irq;
+};
+
+struct calidata {
+ unsigned char *data;
+ unsigned long total_sz;
+};
+
+struct tasdevice_priv {
+ struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS];
+ struct tasdevice_irqinfo irq_info;
+ struct tasdevice_rca rcabin;
+ struct calidata cali_data;
+ struct tasdevice_fw *fmw;
+ struct gpio_desc *reset;
+ struct mutex codec_lock;
+ struct regmap *regmap;
+ struct device *dev;
+ struct tm tm;
+
+ enum device_catlog_id catlog_id;
+ unsigned char cal_binaryname[TASDEVICE_MAX_CHANNELS][64];
+ unsigned char crc8_lkp_tbl[CRC8_TABLE_SIZE];
+ unsigned char coef_binaryname[64];
+ unsigned char rca_binaryname[64];
+ unsigned char dev_name[32];
+ unsigned char ndev;
+ unsigned int magic_num;
+ unsigned int chip_id;
+ unsigned int sysclk;
+
+ int cur_prog;
+ int cur_conf;
+ int fw_state;
+ int index;
+ void *client;
+ void *codec;
+ bool force_fwload_status;
+ bool playback_started;
+ bool isacpi;
+ unsigned int global_addr;
+
+ int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv,
+ const struct firmware *fmw, int offset);
+ int (*fw_parse_program_data)(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw,
+ const struct firmware *fmw, int offset);
+ int (*fw_parse_configuration_data)(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw,
+ const struct firmware *fmw, int offset);
+ int (*tasdevice_load_block)(struct tasdevice_priv *tas_priv,
+ struct tasdev_blk *block);
+
+ int (*save_calibration)(struct tasdevice_priv *tas_priv);
+ void (*apply_calibration)(struct tasdevice_priv *tas_priv);
+};
+
+void tas2781_reset(struct tasdevice_priv *tas_dev);
+int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
+ struct module *module,
+ void (*cont)(const struct firmware *fw, void *context));
+struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c);
+int tasdevice_init(struct tasdevice_priv *tas_priv);
+void tasdevice_remove(struct tasdevice_priv *tas_priv);
+int tasdevice_save_calibration(struct tasdevice_priv *tas_priv);
+void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv);
+int tasdevice_dev_read(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned int *value);
+int tasdevice_dev_write(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned int value);
+int tasdevice_dev_bulk_write(
+ struct tasdevice_priv *tas_priv, unsigned short chn,
+ unsigned int reg, unsigned char *p_data, unsigned int n_length);
+int tasdevice_dev_bulk_read(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned char *p_data,
+ unsigned int n_length);
+int tasdevice_dev_update_bits(
+ struct tasdevice_priv *tasdevice, unsigned short chn,
+ unsigned int reg, unsigned int mask, unsigned int value);
+int tasdevice_amp_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_amp_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_digital_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_digital_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+
+#endif /* __TAS2781_H__ */
diff --git a/include/sound/timer.h b/include/sound/timer.h
index 23e885d31525..760e132cc0cd 100644
--- a/include/sound/timer.h
+++ b/include/sound/timer.h
@@ -21,13 +21,13 @@
#define SNDRV_TIMER_HW_STOP 0x00000002 /* call stop before start */
#define SNDRV_TIMER_HW_SLAVE 0x00000004 /* only slave timer (variable resolution) */
#define SNDRV_TIMER_HW_FIRST 0x00000008 /* first tick can be incomplete */
-#define SNDRV_TIMER_HW_TASKLET 0x00000010 /* timer is called from tasklet */
+#define SNDRV_TIMER_HW_WORK 0x00000010 /* timer is called from work */
#define SNDRV_TIMER_IFLG_SLAVE 0x00000001
#define SNDRV_TIMER_IFLG_RUNNING 0x00000002
#define SNDRV_TIMER_IFLG_START 0x00000004
#define SNDRV_TIMER_IFLG_AUTO 0x00000008 /* auto restart */
-#define SNDRV_TIMER_IFLG_FAST 0x00000010 /* fast callback (do not use tasklet) */
+#define SNDRV_TIMER_IFLG_FAST 0x00000010 /* fast callback (do not use work) */
#define SNDRV_TIMER_IFLG_CALLBACK 0x00000020 /* timer callback is active */
#define SNDRV_TIMER_IFLG_EXCLUSIVE 0x00000040 /* exclusive owner - no more instances */
#define SNDRV_TIMER_IFLG_EARLY_EVENT 0x00000080 /* write early event to the poll queue */
@@ -74,7 +74,7 @@ struct snd_timer {
struct list_head active_list_head;
struct list_head ack_list_head;
struct list_head sack_list_head; /* slow ack list head */
- struct tasklet_struct task_queue;
+ struct work_struct task_work;
int max_instances; /* upper limit of timer instances */
int num_instances; /* current number of timer instances */
};
@@ -96,7 +96,7 @@ struct snd_timer_instance {
unsigned long ticks; /* auto-load ticks when expired */
unsigned long cticks; /* current ticks */
unsigned long pticks; /* accumulated ticks for callback */
- unsigned long resolution; /* current resolution for tasklet */
+ unsigned long resolution; /* current resolution for work */
unsigned long lost; /* lost ticks */
int slave_class;
unsigned int slave_id;
diff --git a/include/sound/tlv320aic3x.h b/include/sound/tlv320aic3x.h
deleted file mode 100644
index b660a9ed05ec..000000000000
--- a/include/sound/tlv320aic3x.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform data for Texas Instruments TLV320AIC3x codec
- *
- * Author: Jarkko Nikula <jarkko.nikula@bitmer.com>
- */
-#ifndef __TLV320AIC3x_H__
-#define __TLV320AIC3x_H__
-
-/* GPIO API */
-enum {
- AIC3X_GPIO1_FUNC_DISABLED = 0,
- AIC3X_GPIO1_FUNC_AUDIO_WORDCLK_ADC = 1,
- AIC3X_GPIO1_FUNC_CLOCK_MUX = 2,
- AIC3X_GPIO1_FUNC_CLOCK_MUX_DIV2 = 3,
- AIC3X_GPIO1_FUNC_CLOCK_MUX_DIV4 = 4,
- AIC3X_GPIO1_FUNC_CLOCK_MUX_DIV8 = 5,
- AIC3X_GPIO1_FUNC_SHORT_CIRCUIT_IRQ = 6,
- AIC3X_GPIO1_FUNC_AGC_NOISE_IRQ = 7,
- AIC3X_GPIO1_FUNC_INPUT = 8,
- AIC3X_GPIO1_FUNC_OUTPUT = 9,
- AIC3X_GPIO1_FUNC_DIGITAL_MIC_MODCLK = 10,
- AIC3X_GPIO1_FUNC_AUDIO_WORDCLK = 11,
- AIC3X_GPIO1_FUNC_BUTTON_IRQ = 12,
- AIC3X_GPIO1_FUNC_HEADSET_DETECT_IRQ = 13,
- AIC3X_GPIO1_FUNC_HEADSET_DETECT_OR_BUTTON_IRQ = 14,
- AIC3X_GPIO1_FUNC_ALL_IRQ = 16
-};
-
-enum {
- AIC3X_GPIO2_FUNC_DISABLED = 0,
- AIC3X_GPIO2_FUNC_HEADSET_DETECT_IRQ = 2,
- AIC3X_GPIO2_FUNC_INPUT = 3,
- AIC3X_GPIO2_FUNC_OUTPUT = 4,
- AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT = 5,
- AIC3X_GPIO2_FUNC_AUDIO_BITCLK = 8,
- AIC3X_GPIO2_FUNC_HEADSET_DETECT_OR_BUTTON_IRQ = 9,
- AIC3X_GPIO2_FUNC_ALL_IRQ = 10,
- AIC3X_GPIO2_FUNC_SHORT_CIRCUIT_OR_AGC_IRQ = 11,
- AIC3X_GPIO2_FUNC_HEADSET_OR_BUTTON_PRESS_OR_SHORT_CIRCUIT_IRQ = 12,
- AIC3X_GPIO2_FUNC_SHORT_CIRCUIT_IRQ = 13,
- AIC3X_GPIO2_FUNC_AGC_NOISE_IRQ = 14,
- AIC3X_GPIO2_FUNC_BUTTON_PRESS_IRQ = 15
-};
-
-enum aic3x_micbias_voltage {
- AIC3X_MICBIAS_OFF = 0,
- AIC3X_MICBIAS_2_0V = 1,
- AIC3X_MICBIAS_2_5V = 2,
- AIC3X_MICBIAS_AVDDV = 3,
-};
-
-struct aic3x_setup_data {
- unsigned int gpio_func[2];
-};
-
-struct aic3x_pdata {
- int gpio_reset; /* < 0 if not used */
- struct aic3x_setup_data *setup;
-
- /* Selects the micbias voltage */
- enum aic3x_micbias_voltage micbias_vg;
-};
-
-#endif
diff --git a/include/sound/uda134x.h b/include/sound/uda134x.h
deleted file mode 100644
index db82516da162..000000000000
--- a/include/sound/uda134x.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * uda134x.h -- UDA134x ALSA SoC Codec driver
- *
- * Copyright 2007 Dension Audio Systems Ltd.
- * Author: Zoltan Devai
- */
-
-#ifndef _UDA134X_H
-#define _UDA134X_H
-
-#include <sound/l3.h>
-
-struct uda134x_platform_data {
- struct l3_pins l3;
- void (*power) (int);
- int model;
-#define UDA134X_UDA1340 1
-#define UDA134X_UDA1341 2
-#define UDA134X_UDA1344 3
-#define UDA134X_UDA1345 4
-};
-
-#endif /* _UDA134X_H */
diff --git a/include/sound/ump.h b/include/sound/ump.h
new file mode 100644
index 000000000000..91238dabe307
--- /dev/null
+++ b/include/sound/ump.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Universal MIDI Packet (UMP) Support
+ */
+#ifndef __SOUND_UMP_H
+#define __SOUND_UMP_H
+
+#include <sound/rawmidi.h>
+
+struct snd_ump_endpoint;
+struct snd_ump_block;
+struct snd_ump_ops;
+struct ump_cvt_to_ump;
+struct snd_seq_ump_ops;
+
+struct snd_ump_endpoint {
+ struct snd_rawmidi core; /* raw UMP access */
+
+ struct snd_ump_endpoint_info info;
+
+ const struct snd_ump_ops *ops; /* UMP ops set by the driver */
+ struct snd_rawmidi_substream *substreams[2]; /* opened substreams */
+
+ void *private_data;
+ void (*private_free)(struct snd_ump_endpoint *ump);
+
+ /* UMP Stream message processing */
+ u32 stream_wait_for; /* expected stream message status */
+ bool stream_finished; /* set when message has been processed */
+ bool parsed; /* UMP / FB parse finished? */
+ bool no_process_stream; /* suppress UMP stream messages handling */
+ wait_queue_head_t stream_wait;
+ struct snd_rawmidi_file stream_rfile;
+
+ struct list_head block_list; /* list of snd_ump_block objects */
+
+ /* intermediate buffer for UMP input */
+ u32 input_buf[4];
+ int input_buf_head;
+ int input_pending;
+
+ struct mutex open_mutex;
+
+#if IS_ENABLED(CONFIG_SND_UMP_LEGACY_RAWMIDI)
+ spinlock_t legacy_locks[2];
+ struct snd_rawmidi *legacy_rmidi;
+ struct snd_rawmidi_substream *legacy_substreams[2][SNDRV_UMP_MAX_GROUPS];
+ unsigned char legacy_mapping[SNDRV_UMP_MAX_GROUPS];
+
+ /* for legacy output; need to open the actual substream unlike input */
+ int legacy_out_opens;
+ struct snd_rawmidi_file legacy_out_rfile;
+ struct ump_cvt_to_ump *out_cvts;
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
+ struct snd_seq_device *seq_dev;
+ const struct snd_seq_ump_ops *seq_ops;
+ void *seq_client;
+#endif
+};
+
+/* ops filled by UMP drivers */
+struct snd_ump_ops {
+ int (*open)(struct snd_ump_endpoint *ump, int dir);
+ void (*close)(struct snd_ump_endpoint *ump, int dir);
+ void (*trigger)(struct snd_ump_endpoint *ump, int dir, int up);
+ void (*drain)(struct snd_ump_endpoint *ump, int dir);
+};
+
+/* ops filled by sequencer binding */
+struct snd_seq_ump_ops {
+ void (*input_receive)(struct snd_ump_endpoint *ump,
+ const u32 *data, int words);
+ int (*notify_fb_change)(struct snd_ump_endpoint *ump,
+ struct snd_ump_block *fb);
+ int (*switch_protocol)(struct snd_ump_endpoint *ump);
+};
+
+struct snd_ump_block {
+ struct snd_ump_block_info info;
+ struct snd_ump_endpoint *ump;
+
+ void *private_data;
+ void (*private_free)(struct snd_ump_block *blk);
+
+ struct list_head list;
+};
+
+#define rawmidi_to_ump(rmidi) container_of(rmidi, struct snd_ump_endpoint, core)
+
+int snd_ump_endpoint_new(struct snd_card *card, char *id, int device,
+ int output, int input,
+ struct snd_ump_endpoint **ump_ret);
+int snd_ump_parse_endpoint(struct snd_ump_endpoint *ump);
+int snd_ump_block_new(struct snd_ump_endpoint *ump, unsigned int blk,
+ unsigned int direction, unsigned int first_group,
+ unsigned int num_groups, struct snd_ump_block **blk_ret);
+int snd_ump_receive(struct snd_ump_endpoint *ump, const u32 *buffer, int count);
+int snd_ump_transmit(struct snd_ump_endpoint *ump, u32 *buffer, int count);
+
+#if IS_ENABLED(CONFIG_SND_UMP_LEGACY_RAWMIDI)
+int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ char *id, int device);
+#else
+static inline int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ char *id, int device)
+{
+ return 0;
+}
+#endif
+
+int snd_ump_receive_ump_val(struct snd_ump_endpoint *ump, u32 val);
+int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol);
+
+/*
+ * Some definitions for UMP
+ */
+
+/* MIDI 2.0 Message Type */
+enum {
+ UMP_MSG_TYPE_UTILITY = 0x00,
+ UMP_MSG_TYPE_SYSTEM = 0x01,
+ UMP_MSG_TYPE_MIDI1_CHANNEL_VOICE = 0x02,
+ UMP_MSG_TYPE_DATA = 0x03,
+ UMP_MSG_TYPE_MIDI2_CHANNEL_VOICE = 0x04,
+ UMP_MSG_TYPE_EXTENDED_DATA = 0x05,
+ UMP_MSG_TYPE_FLEX_DATA = 0x0d,
+ UMP_MSG_TYPE_STREAM = 0x0f,
+};
+
+/* MIDI 2.0 SysEx / Data Status; same values for both 7-bit and 8-bit SysEx */
+enum {
+ UMP_SYSEX_STATUS_SINGLE = 0,
+ UMP_SYSEX_STATUS_START = 1,
+ UMP_SYSEX_STATUS_CONTINUE = 2,
+ UMP_SYSEX_STATUS_END = 3,
+};
+
+/* UMP Utility Type Status (type 0x0) */
+enum {
+ UMP_UTILITY_MSG_STATUS_NOOP = 0x00,
+ UMP_UTILITY_MSG_STATUS_JR_CLOCK = 0x01,
+ UMP_UTILITY_MSG_STATUS_JR_TSTAMP = 0x02,
+ UMP_UTILITY_MSG_STATUS_DCTPQ = 0x03,
+ UMP_UTILITY_MSG_STATUS_DC = 0x04,
+};
+
+/* UMP Stream Message Status (type 0xf) */
+enum {
+ UMP_STREAM_MSG_STATUS_EP_DISCOVERY = 0x00,
+ UMP_STREAM_MSG_STATUS_EP_INFO = 0x01,
+ UMP_STREAM_MSG_STATUS_DEVICE_INFO = 0x02,
+ UMP_STREAM_MSG_STATUS_EP_NAME = 0x03,
+ UMP_STREAM_MSG_STATUS_PRODUCT_ID = 0x04,
+ UMP_STREAM_MSG_STATUS_STREAM_CFG_REQUEST = 0x05,
+ UMP_STREAM_MSG_STATUS_STREAM_CFG = 0x06,
+ UMP_STREAM_MSG_STATUS_FB_DISCOVERY = 0x10,
+ UMP_STREAM_MSG_STATUS_FB_INFO = 0x11,
+ UMP_STREAM_MSG_STATUS_FB_NAME = 0x12,
+ UMP_STREAM_MSG_STATUS_START_CLIP = 0x20,
+ UMP_STREAM_MSG_STATUS_END_CLIP = 0x21,
+};
+
+/* UMP Endpoint Discovery filter bitmap */
+enum {
+ UMP_STREAM_MSG_REQUEST_EP_INFO = (1U << 0),
+ UMP_STREAM_MSG_REQUEST_DEVICE_INFO = (1U << 1),
+ UMP_STREAM_MSG_REQUEST_EP_NAME = (1U << 2),
+ UMP_STREAM_MSG_REQUEST_PRODUCT_ID = (1U << 3),
+ UMP_STREAM_MSG_REQUEST_STREAM_CFG = (1U << 4),
+};
+
+/* UMP Function Block Discovery filter bitmap */
+enum {
+ UMP_STREAM_MSG_REQUEST_FB_INFO = (1U << 0),
+ UMP_STREAM_MSG_REQUEST_FB_NAME = (1U << 1),
+};
+
+/* UMP Endpoint Info capability bits (used for protocol request/notify, too) */
+enum {
+ UMP_STREAM_MSG_EP_INFO_CAP_TXJR = (1U << 0), /* Sending JRTS */
+ UMP_STREAM_MSG_EP_INFO_CAP_RXJR = (1U << 1), /* Receiving JRTS */
+ UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 = (1U << 8), /* MIDI 1.0 */
+ UMP_STREAM_MSG_EP_INFO_CAP_MIDI2 = (1U << 9), /* MIDI 2.0 */
+};
+
+/* UMP EP / FB name string format; same as SysEx string handling */
+enum {
+ UMP_STREAM_MSG_FORMAT_SINGLE = 0,
+ UMP_STREAM_MSG_FORMAT_START = 1,
+ UMP_STREAM_MSG_FORMAT_CONTINUE = 2,
+ UMP_STREAM_MSG_FORMAT_END = 3,
+};
+
+/*
+ * Helpers for retrieving / filling bits from UMP
+ */
+/* get the message type (4bit) from a UMP packet (header) */
+static inline unsigned char ump_message_type(u32 data)
+{
+ return data >> 28;
+}
+
+/* get the group number (0-based, 4bit) from a UMP packet (header) */
+static inline unsigned char ump_message_group(u32 data)
+{
+ return (data >> 24) & 0x0f;
+}
+
+/* get the MIDI status code (4bit) from a UMP packet (header) */
+static inline unsigned char ump_message_status_code(u32 data)
+{
+ return (data >> 20) & 0x0f;
+}
+
+/* get the MIDI channel number (0-based, 4bit) from a UMP packet (header) */
+static inline unsigned char ump_message_channel(u32 data)
+{
+ return (data >> 16) & 0x0f;
+}
+
+/* get the MIDI status + channel combo byte (8bit) from a UMP packet (header) */
+static inline unsigned char ump_message_status_channel(u32 data)
+{
+ return (data >> 16) & 0xff;
+}
+
+/* compose a UMP packet (header) from type, group and status values */
+static inline u32 ump_compose(unsigned char type, unsigned char group,
+ unsigned char status, unsigned char channel)
+{
+ return ((u32)type << 28) | ((u32)group << 24) | ((u32)status << 20) |
+ ((u32)channel << 16);
+}
+
+/* get SysEx message status (for both 7 and 8bits) from a UMP packet (header) */
+static inline unsigned char ump_sysex_message_status(u32 data)
+{
+ return (data >> 20) & 0xf;
+}
+
+/* get SysEx message length (for both 7 and 8bits) from a UMP packet (header) */
+static inline unsigned char ump_sysex_message_length(u32 data)
+{
+ return (data >> 16) & 0xf;
+}
+
+/* For Stream Messages */
+static inline unsigned char ump_stream_message_format(u32 data)
+{
+ return (data >> 26) & 0x03;
+}
+
+static inline unsigned int ump_stream_message_status(u32 data)
+{
+ return (data >> 16) & 0x3ff;
+}
+
+static inline u32 ump_stream_compose(unsigned char status, unsigned short form)
+{
+ return (UMP_MSG_TYPE_STREAM << 28) | ((u32)form << 26) |
+ ((u32)status << 16);
+}
+
+#define ump_is_groupless_msg(type) \
+ ((type) == UMP_MSG_TYPE_UTILITY || (type) == UMP_MSG_TYPE_STREAM)
+
+#endif /* __SOUND_UMP_H */
diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h
new file mode 100644
index 000000000000..28c364c63245
--- /dev/null
+++ b/include/sound/ump_convert.h
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#ifndef __SOUND_UMP_CONVERT_H
+#define __SOUND_UMP_CONVERT_H
+
+#include <sound/ump_msg.h>
+
+/* context for converting from legacy control messages to UMP packet */
+struct ump_cvt_to_ump_bank {
+ bool rpn_set;
+ bool nrpn_set;
+ bool bank_set;
+ unsigned char cc_rpn_msb, cc_rpn_lsb;
+ unsigned char cc_nrpn_msb, cc_nrpn_lsb;
+ unsigned char cc_data_msb, cc_data_lsb;
+ unsigned char cc_bank_msb, cc_bank_lsb;
+};
+
+/* context for converting from MIDI1 byte stream to UMP packet */
+struct ump_cvt_to_ump {
+ /* MIDI1 intermediate buffer */
+ unsigned char buf[4];
+ int len;
+ int cmd_bytes;
+
+ /* UMP output packet */
+ u32 ump[4];
+ int ump_bytes;
+
+ /* various status */
+ unsigned int in_sysex;
+ struct ump_cvt_to_ump_bank bank[16]; /* per channel */
+};
+
+int snd_ump_convert_from_ump(const u32 *data, unsigned char *dst,
+ unsigned char *group_ret);
+void snd_ump_convert_to_ump(struct ump_cvt_to_ump *cvt, unsigned char group,
+ unsigned int protocol, unsigned char c);
+
+/* reset the converter context, called at each open to ump */
+static inline void snd_ump_convert_reset(struct ump_cvt_to_ump *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+}
+
+#endif /* __SOUND_UMP_CONVERT_H */
diff --git a/include/sound/ump_msg.h b/include/sound/ump_msg.h
new file mode 100644
index 000000000000..72f60ddfea75
--- /dev/null
+++ b/include/sound/ump_msg.h
@@ -0,0 +1,765 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Universal MIDI Packet (UMP): Message Definitions
+ */
+#ifndef __SOUND_UMP_MSG_H
+#define __SOUND_UMP_MSG_H
+
+/* MIDI 1.0 / 2.0 Status Code (4bit) */
+enum {
+ UMP_MSG_STATUS_PER_NOTE_RCC = 0x0,
+ UMP_MSG_STATUS_PER_NOTE_ACC = 0x1,
+ UMP_MSG_STATUS_RPN = 0x2,
+ UMP_MSG_STATUS_NRPN = 0x3,
+ UMP_MSG_STATUS_RELATIVE_RPN = 0x4,
+ UMP_MSG_STATUS_RELATIVE_NRPN = 0x5,
+ UMP_MSG_STATUS_PER_NOTE_PITCH_BEND = 0x6,
+ UMP_MSG_STATUS_NOTE_OFF = 0x8,
+ UMP_MSG_STATUS_NOTE_ON = 0x9,
+ UMP_MSG_STATUS_POLY_PRESSURE = 0xa,
+ UMP_MSG_STATUS_CC = 0xb,
+ UMP_MSG_STATUS_PROGRAM = 0xc,
+ UMP_MSG_STATUS_CHANNEL_PRESSURE = 0xd,
+ UMP_MSG_STATUS_PITCH_BEND = 0xe,
+ UMP_MSG_STATUS_PER_NOTE_MGMT = 0xf,
+};
+
+/* MIDI 1.0 Channel Control (7bit) */
+enum {
+ UMP_CC_BANK_SELECT = 0,
+ UMP_CC_MODULATION = 1,
+ UMP_CC_BREATH = 2,
+ UMP_CC_FOOT = 4,
+ UMP_CC_PORTAMENTO_TIME = 5,
+ UMP_CC_DATA = 6,
+ UMP_CC_VOLUME = 7,
+ UMP_CC_BALANCE = 8,
+ UMP_CC_PAN = 10,
+ UMP_CC_EXPRESSION = 11,
+ UMP_CC_EFFECT_CONTROL_1 = 12,
+ UMP_CC_EFFECT_CONTROL_2 = 13,
+ UMP_CC_GP_1 = 16,
+ UMP_CC_GP_2 = 17,
+ UMP_CC_GP_3 = 18,
+ UMP_CC_GP_4 = 19,
+ UMP_CC_BANK_SELECT_LSB = 32,
+ UMP_CC_MODULATION_LSB = 33,
+ UMP_CC_BREATH_LSB = 34,
+ UMP_CC_FOOT_LSB = 36,
+ UMP_CC_PORTAMENTO_TIME_LSB = 37,
+ UMP_CC_DATA_LSB = 38,
+ UMP_CC_VOLUME_LSB = 39,
+ UMP_CC_BALANCE_LSB = 40,
+ UMP_CC_PAN_LSB = 42,
+ UMP_CC_EXPRESSION_LSB = 43,
+ UMP_CC_EFFECT1_LSB = 44,
+ UMP_CC_EFFECT2_LSB = 45,
+ UMP_CC_GP_1_LSB = 48,
+ UMP_CC_GP_2_LSB = 49,
+ UMP_CC_GP_3_LSB = 50,
+ UMP_CC_GP_4_LSB = 51,
+ UMP_CC_SUSTAIN = 64,
+ UMP_CC_PORTAMENTO_SWITCH = 65,
+ UMP_CC_SOSTENUTO = 66,
+ UMP_CC_SOFT_PEDAL = 67,
+ UMP_CC_LEGATO = 68,
+ UMP_CC_HOLD_2 = 69,
+ UMP_CC_SOUND_CONTROLLER_1 = 70,
+ UMP_CC_SOUND_CONTROLLER_2 = 71,
+ UMP_CC_SOUND_CONTROLLER_3 = 72,
+ UMP_CC_SOUND_CONTROLLER_4 = 73,
+ UMP_CC_SOUND_CONTROLLER_5 = 74,
+ UMP_CC_SOUND_CONTROLLER_6 = 75,
+ UMP_CC_SOUND_CONTROLLER_7 = 76,
+ UMP_CC_SOUND_CONTROLLER_8 = 77,
+ UMP_CC_SOUND_CONTROLLER_9 = 78,
+ UMP_CC_SOUND_CONTROLLER_10 = 79,
+ UMP_CC_GP_5 = 80,
+ UMP_CC_GP_6 = 81,
+ UMP_CC_GP_7 = 82,
+ UMP_CC_GP_8 = 83,
+ UMP_CC_PORTAMENTO_CONTROL = 84,
+ UMP_CC_EFFECT_1 = 91,
+ UMP_CC_EFFECT_2 = 92,
+ UMP_CC_EFFECT_3 = 93,
+ UMP_CC_EFFECT_4 = 94,
+ UMP_CC_EFFECT_5 = 95,
+ UMP_CC_DATA_INC = 96,
+ UMP_CC_DATA_DEC = 97,
+ UMP_CC_NRPN_LSB = 98,
+ UMP_CC_NRPN_MSB = 99,
+ UMP_CC_RPN_LSB = 100,
+ UMP_CC_RPN_MSB = 101,
+ UMP_CC_ALL_SOUND_OFF = 120,
+ UMP_CC_RESET_ALL = 121,
+ UMP_CC_LOCAL_CONTROL = 122,
+ UMP_CC_ALL_NOTES_OFF = 123,
+ UMP_CC_OMNI_OFF = 124,
+ UMP_CC_OMNI_ON = 125,
+ UMP_CC_POLY_OFF = 126,
+ UMP_CC_POLY_ON = 127,
+};
+
+/* MIDI 1.0 / 2.0 System Messages (0xfx) */
+enum {
+ UMP_SYSTEM_STATUS_MIDI_TIME_CODE = 0xf1,
+ UMP_SYSTEM_STATUS_SONG_POSITION = 0xf2,
+ UMP_SYSTEM_STATUS_SONG_SELECT = 0xf3,
+ UMP_SYSTEM_STATUS_TUNE_REQUEST = 0xf6,
+ UMP_SYSTEM_STATUS_TIMING_CLOCK = 0xf8,
+ UMP_SYSTEM_STATUS_START = 0xfa,
+ UMP_SYSTEM_STATUS_CONTINUE = 0xfb,
+ UMP_SYSTEM_STATUS_STOP = 0xfc,
+ UMP_SYSTEM_STATUS_ACTIVE_SENSING = 0xfe,
+ UMP_SYSTEM_STATUS_RESET = 0xff,
+};
+
+/* MIDI 1.0 Realtime and SysEx status messages (0xfx) */
+enum {
+ UMP_MIDI1_MSG_REALTIME = 0xf0, /* mask */
+ UMP_MIDI1_MSG_SYSEX_START = 0xf0,
+ UMP_MIDI1_MSG_SYSEX_END = 0xf7,
+};
+
+/*
+ * UMP Message Definitions
+ */
+
+/* MIDI 1.0 Note Off / Note On (32bit) */
+struct snd_ump_midi1_msg_note {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 velocity:8;
+#else
+ u32 velocity:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Poly Pressure (32bit) */
+struct snd_ump_midi1_msg_paf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 data:8;
+#else
+ u32 data:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Control Change (32bit) */
+struct snd_ump_midi1_msg_cc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 index:8;
+ u32 data:8;
+#else
+ u32 data:8;
+ u32 index:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Program Change (32bit) */
+struct snd_ump_midi1_msg_program {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 program:8;
+ u32 reserved:8;
+#else
+ u32 reserved:8;
+ u32 program:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Channel Pressure (32bit) */
+struct snd_ump_midi1_msg_caf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 data:8;
+ u32 reserved:8;
+#else
+ u32 reserved:8;
+ u32 data:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Pitch Bend (32bit) */
+struct snd_ump_midi1_msg_pitchbend {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 data_lsb:8;
+ u32 data_msb:8;
+#else
+ u32 data_msb:8;
+ u32 data_lsb:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* System Common and Real Time messages (32bit); no channel field */
+struct snd_ump_system_msg {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:8;
+ u32 parm1:8;
+ u32 parm2:8;
+#else
+ u32 parm2:8;
+ u32 parm1:8;
+ u32 status:8;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 UMP CVM (32bit) */
+union snd_ump_midi1_msg {
+ struct snd_ump_midi1_msg_note note;
+ struct snd_ump_midi1_msg_paf paf;
+ struct snd_ump_midi1_msg_cc cc;
+ struct snd_ump_midi1_msg_program pg;
+ struct snd_ump_midi1_msg_caf caf;
+ struct snd_ump_midi1_msg_pitchbend pb;
+ struct snd_ump_system_msg system;
+ u32 raw;
+};
+
+/* MIDI 2.0 Note Off / Note On (64bit) */
+struct snd_ump_midi2_msg_note {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 attribute_type:8;
+ /* 1 */
+ u32 velocity:16;
+ u32 attribute_data:16;
+#else
+ /* 0 */
+ u32 attribute_type:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 attribute_data:16;
+ u32 velocity:16;
+#endif
+} __packed;
+
+/* MIDI 2.0 Poly Pressure (64bit) */
+struct snd_ump_midi2_msg_paf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 reserved:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Per-Note Controller (64bit) */
+struct snd_ump_midi2_msg_pernote_cc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 index:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 index:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Per-Note Management (64bit) */
+struct snd_ump_midi2_msg_pernote_mgmt {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 flags:8;
+ /* 1 */
+ u32 reserved;
+#else
+ /* 0 */
+ u32 flags:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 reserved;
+#endif
+} __packed;
+
+/* MIDI 2.0 Control Change (64bit) */
+struct snd_ump_midi2_msg_cc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 index:8;
+ u32 reserved:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:8;
+ u32 index:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Registered Controller (RPN) / Assignable Controller (NRPN) (64bit) */
+struct snd_ump_midi2_msg_rpn {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 bank:8;
+ u32 index:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 index:8;
+ u32 bank:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Program Change (64bit) */
+struct snd_ump_midi2_msg_program {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 reserved:15;
+ u32 bank_valid:1;
+ /* 1 */
+ u32 program:8;
+ u32 reserved2:8;
+ u32 bank_msb:8;
+ u32 bank_lsb:8;
+#else
+ /* 0 */
+ u32 bank_valid:1;
+ u32 reserved:15;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 bank_lsb:8;
+ u32 bank_msb:8;
+ u32 reserved2:8;
+ u32 program:8;
+#endif
+} __packed;
+
+/* MIDI 2.0 Channel Pressure (64bit) */
+struct snd_ump_midi2_msg_caf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 reserved:16;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:16;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Pitch Bend (64bit) */
+struct snd_ump_midi2_msg_pitchbend {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 reserved:16;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:16;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Per-Note Pitch Bend (64bit) */
+struct snd_ump_midi2_msg_pernote_pitchbend {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 reserved:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 UMP CVM (64bit) */
+union snd_ump_midi2_msg {
+ struct snd_ump_midi2_msg_note note;
+ struct snd_ump_midi2_msg_paf paf;
+ struct snd_ump_midi2_msg_pernote_cc pernote_cc;
+ struct snd_ump_midi2_msg_pernote_mgmt pernote_mgmt;
+ struct snd_ump_midi2_msg_cc cc;
+ struct snd_ump_midi2_msg_rpn rpn;
+ struct snd_ump_midi2_msg_program pg;
+ struct snd_ump_midi2_msg_caf caf;
+ struct snd_ump_midi2_msg_pitchbend pb;
+ struct snd_ump_midi2_msg_pernote_pitchbend pernote_pb;
+ u32 raw[2];
+};
+
+/* UMP Stream Message: Endpoint Discovery (128bit) */
+struct snd_ump_stream_msg_ep_discovery {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 ump_version_major:8;
+ u32 ump_version_minor:8;
+ /* 1 */
+ u32 reserved:24;
+ u32 filter_bitmap:8;
+ /* 2-3 */
+ u32 reserved2[2];
+#else
+ /* 0 */
+ u32 ump_version_minor:8;
+ u32 ump_version_major:8;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1 */
+ u32 filter_bitmap:8;
+ u32 reserved:24;
+ /* 2-3 */
+ u32 reserved2[2];
+#endif
+} __packed;
+
+/* UMP Stream Message: Endpoint Info Notification (128bit) */
+struct snd_ump_stream_msg_ep_info {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 ump_version_major:8;
+ u32 ump_version_minor:8;
+ /* 1 */
+ u32 static_function_block:1;
+ u32 num_function_blocks:7;
+ u32 reserved:8;
+ u32 protocol:8;
+ u32 reserved2:6;
+ u32 jrts:2;
+ /* 2-3 */
+ u32 reserved3[2];
+#else
+ /* 0 */
+ u32 ump_version_minor:8;
+ u32 ump_version_major:8;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1 */
+ u32 jrts:2;
+ u32 reserved2:6;
+ u32 protocol:8;
+ u32 reserved:8;
+ u32 num_function_blocks:7;
+ u32 static_function_block:1;
+ /* 2-3 */
+ u32 reserved3[2];
+#endif
+} __packed;
+
+/* UMP Stream Message: Device Info Notification (128bit) */
+struct snd_ump_stream_msg_devince_info {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 reserved:16;
+ /* 1 */
+ u32 manufacture_id;
+ /* 2 */
+ u8 family_lsb;
+ u8 family_msb;
+ u8 model_lsb;
+ u8 model_msb;
+ /* 3 */
+ u32 sw_revision;
+#else
+ /* 0 */
+ u32 reserved:16;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1 */
+ u32 manufacture_id;
+ /* 2 */
+ u8 model_msb;
+ u8 model_lsb;
+ u8 family_msb;
+ u8 family_lsb;
+ /* 3 */
+ u32 sw_revision;
+#endif
+} __packed;
+
+/* UMP Stream Message: Stream Config Request / Notification (128bit) */
+struct snd_ump_stream_msg_stream_cfg {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 protocol:8;
+ u32 reserved:6;
+ u32 jrts:2;
+ /* 1-3 */
+ u32 reserved2[3];
+#else
+ /* 0 */
+ u32 jrts:2;
+ u32 reserved:6;
+ u32 protocol:8;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1-3 */
+ u32 reserved2[3];
+#endif
+} __packed;
+
+/* UMP Stream Message: Function Block Discovery (128bit) */
+struct snd_ump_stream_msg_fb_discovery {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 function_block_id:8;
+ u32 filter:8;
+ /* 1-3 */
+ u32 reserved[3];
+#else
+ /* 0 */
+ u32 filter:8;
+ u32 function_block_id:8;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1-3 */
+ u32 reserved[3];
+#endif
+} __packed;
+
+/* UMP Stream Message: Function Block Info Notification (128bit) */
+struct snd_ump_stream_msg_fb_info {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 active:1;
+ u32 function_block_id:7;
+ u32 reserved:2;
+ u32 ui_hint:2;
+ u32 midi_10:2;
+ u32 direction:2;
+ /* 1 */
+ u32 first_group:8;
+ u32 num_groups:8;
+ u32 midi_ci_version:8;
+ u32 sysex8_streams:8;
+ /* 2-3 */
+ u32 reserved2[2];
+#else
+ /* 0 */
+ u32 direction:2;
+ u32 midi_10:2;
+ u32 ui_hint:2;
+ u32 reserved:2;
+ u32 function_block_id:7;
+ u32 active:1;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1 */
+ u32 sysex8_streams:8;
+ u32 midi_ci_version:8;
+ u32 num_groups:8;
+ u32 first_group:8;
+ /* 2-3 */
+ u32 reserved2[2];
+#endif
+} __packed;
+
+/* UMP Stream Message: Function Block Name Notification (128bit) */
+struct snd_ump_stream_msg_fb_name {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u16 type:4;
+ u16 format:2;
+ u16 status:10;
+ u8 function_block_id;
+ u8 name0;
+ /* 1-3 */
+ u8 name[12];
+#else
+ /* 0 */
+ u8 name0;
+ u8 function_block_id;
+ u16 status:10;
+ u16 format:2;
+ u16 type:4;
+ /* 1-3 */
+ u8 name[12]; // FIXME: byte order
+#endif
+} __packed;
+
+/* MIDI 2.0 Stream Messages (128bit) */
+union snd_ump_stream_msg {
+ struct snd_ump_stream_msg_ep_discovery ep_discovery;
+ struct snd_ump_stream_msg_ep_info ep_info;
+ struct snd_ump_stream_msg_devince_info device_info;
+ struct snd_ump_stream_msg_stream_cfg stream_cfg;
+ struct snd_ump_stream_msg_fb_discovery fb_discovery;
+ struct snd_ump_stream_msg_fb_info fb_info;
+ struct snd_ump_stream_msg_fb_name fb_name;
+ u32 raw[4];
+};
+
+#endif /* __SOUND_UMP_MSG_H */
diff --git a/include/sound/wavefront.h b/include/sound/wavefront.h
index 37ed437e2123..ef6f46accf29 100644
--- a/include/sound/wavefront.h
+++ b/include/sound/wavefront.h
@@ -8,34 +8,6 @@
* Copyright (c) by Paul Barton-Davis <pbd@op.net>
*/
-#if (!defined(__GNUC__) && !defined(__GNUG__))
-
- You will not be able to compile this file correctly without gcc, because
- it is necessary to pack the "wavefront_alias" structure to a size
- of 22 bytes, corresponding to 16-bit alignment (as would have been
- the case on the original platform, MS-DOS). If this is not done,
- then WavePatch-format files cannot be read/written correctly.
- The method used to do this here ("__attribute__((packed)") is
- completely compiler dependent.
-
- All other wavefront_* types end up aligned to 32 bit values and
- still have the same (correct) size.
-
-#else
-
- /* However, note that as of G++ 2.7.3.2, g++ was unable to
- correctly parse *type* __attribute__ tags. It will do the
- right thing if we use the "packed" attribute on each struct
- member, which has the same semantics anyway.
- */
-
-#endif /* __GNUC__ */
-
-/***************************** WARNING ********************************
- PLEASE DO NOT MODIFY THIS FILE IN ANY WAY THAT AFFECTS ITS ABILITY TO
- BE USED WITH EITHER C *OR* C++.
- **********************************************************************/
-
#ifndef NUM_MIDIKEYS
#define NUM_MIDIKEYS 128
#endif /* NUM_MIDIKEYS */
@@ -44,29 +16,6 @@
#define NUM_MIDICHANNELS 16
#endif /* NUM_MIDICHANNELS */
-/* These are very useful/important. the original wavefront interface
- was developed on a 16 bit system, where sizeof(int) = 2
- bytes. Defining things like this makes the code much more portable, and
- easier to understand without having to toggle back and forth
- between a 16-bit view of the world and a 32-bit one.
- */
-
-#ifndef __KERNEL__
-/* keep them for compatibility */
-typedef short s16;
-typedef unsigned short u16;
-typedef int s32;
-typedef unsigned int u32;
-typedef char s8;
-typedef unsigned char u8;
-typedef s16 INT16;
-typedef u16 UINT16;
-typedef s32 INT32;
-typedef u32 UINT32;
-typedef s8 CHAR8;
-typedef u8 UCHAR8;
-#endif
-
/* Pseudo-commands not part of the WaveFront command set.
These are used for various driver controls and direct
hardware control.
@@ -468,7 +417,7 @@ typedef struct wf_alias {
*/
u8 sixteen_bit_padding;
-} __attribute__((packed)) wavefront_alias;
+} __packed wavefront_alias;
typedef struct wf_drum {
u8 PatchNumber;
diff --git a/include/sound/wm0010.h b/include/sound/wm0010.h
index 13b473935ca1..14ff9056c5d0 100644
--- a/include/sound/wm0010.h
+++ b/include/sound/wm0010.h
@@ -11,12 +11,6 @@
#define WM0010_PDATA_H
struct wm0010_pdata {
- int gpio_reset;
-
- /* Set if there is an inverter between the GPIO controlling
- * the reset signal and the device.
- */
- int reset_active_high;
int irq_flags;
};
diff --git a/include/sound/wm1250-ev1.h b/include/sound/wm1250-ev1.h
deleted file mode 100644
index d16614ebecb4..000000000000
--- a/include/sound/wm1250-ev1.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/wm1250-ev1.h - Platform data for WM1250-EV1
- *
- * Copyright 2011 Wolfson Microelectronics. PLC.
- */
-
-#ifndef __LINUX_SND_WM1250_EV1_H
-#define __LINUX_SND_WM1250_EV1_H
-
-#define WM1250_EV1_NUM_GPIOS 5
-
-#define WM1250_EV1_GPIO_CLK_ENA 0
-#define WM1250_EV1_GPIO_CLK_SEL0 1
-#define WM1250_EV1_GPIO_CLK_SEL1 2
-#define WM1250_EV1_GPIO_OSR 3
-#define WM1250_EV1_GPIO_MASTER 4
-
-
-struct wm1250_ev1_pdata {
- int gpios[WM1250_EV1_NUM_GPIOS];
-};
-
-#endif
diff --git a/include/sound/wm2200.h b/include/sound/wm2200.h
index 9987e6c09bdc..2e4913ee2505 100644
--- a/include/sound/wm2200.h
+++ b/include/sound/wm2200.h
@@ -42,8 +42,6 @@ struct wm2200_micbias {
};
struct wm2200_pdata {
- int reset; /** GPIO controlling /RESET, if any */
- int ldo_ena; /** GPIO controlling LODENA, if any */
int irq_flags;
int gpio_defaults[4];
diff --git a/include/sound/wm5100.h b/include/sound/wm5100.h
index b94badf72947..1c48090fdb2c 100644
--- a/include/sound/wm5100.h
+++ b/include/sound/wm5100.h
@@ -36,11 +36,7 @@ struct wm5100_jack_mode {
#define WM5100_GPIO_SET 0x10000
struct wm5100_pdata {
- int reset; /** GPIO controlling /RESET, if any */
- int ldo_ena; /** GPIO controlling LODENA, if any */
- int hp_pol; /** GPIO controlling headset polarity, if any */
int irq_flags;
- int gpio_base;
struct wm5100_jack_mode jack_modes[2];
diff --git a/include/sound/wm8996.h b/include/sound/wm8996.h
index 247f9917e33d..342abeef288f 100644
--- a/include/sound/wm8996.h
+++ b/include/sound/wm8996.h
@@ -33,8 +33,6 @@ struct wm8996_retune_mobile_config {
struct wm8996_pdata {
int irq_flags; /** Set IRQ trigger flags; default active low */
- int ldo_ena; /** GPIO for LDO1; -1 for none */
-
int micdet_def; /** Default MICDET_SRC/HP1FB_SRC/MICD_BIAS */
enum wm8996_inmode inl_mode;
@@ -42,7 +40,6 @@ struct wm8996_pdata {
u32 spkmute_seq; /** Value for register 0x802 */
- int gpio_base;
u32 gpio_default[5];
int num_retune_mobile_cfgs;
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 1eccb2ac7d02..60af7c63b34e 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -26,6 +26,7 @@ struct sock;
#define ISCSI_RX_THREAD_NAME "iscsi_trx"
#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
#define ISCSI_IQN_LEN 224
+#define NA_AUTHENTICATION_INHERITED -1
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -49,9 +50,6 @@ struct sock;
#define TA_LOGIN_TIMEOUT 15
#define TA_LOGIN_TIMEOUT_MAX 30
#define TA_LOGIN_TIMEOUT_MIN 5
-#define TA_NETIF_TIMEOUT 2
-#define TA_NETIF_TIMEOUT_MAX 15
-#define TA_NETIF_TIMEOUT_MIN 2
#define TA_GENERATE_NODE_ACLS 0
#define TA_DEFAULT_CMDSN_DEPTH 64
#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
@@ -143,7 +141,7 @@ enum tiqn_state_table {
TIQN_STATE_SHUTDOWN = 2,
};
-/* struct iscsi_cmd->cmd_flags */
+/* struct iscsit_cmd->cmd_flags */
enum cmd_flags_table {
ICF_GOT_LAST_DATAOUT = 0x00000001,
ICF_GOT_DATACK_SNACK = 0x00000002,
@@ -157,7 +155,7 @@ enum cmd_flags_table {
ICF_SENDTARGETS_SINGLE = 0x00000200,
};
-/* struct iscsi_cmd->i_state */
+/* struct iscsit_cmd->i_state */
enum cmd_i_state_table {
ISTATE_NO_STATE = 0,
ISTATE_NEW_CMD = 1,
@@ -297,7 +295,7 @@ struct iscsi_sess_ops {
struct iscsi_queue_req {
int state;
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
struct list_head qr_list;
};
@@ -327,7 +325,7 @@ struct iscsi_ooo_cmdsn {
u32 batch_count;
u32 cmdsn;
u32 exp_cmdsn;
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
struct list_head ooo_list;
} ____cacheline_aligned;
@@ -349,7 +347,7 @@ struct iscsi_r2t {
struct list_head r2t_list;
} ____cacheline_aligned;
-struct iscsi_cmd {
+struct iscsit_cmd {
enum iscsi_timer_flags_table dataout_timer_flags;
/* DataOUT timeout retries */
u8 dataout_timeout_retries;
@@ -405,22 +403,22 @@ struct iscsi_cmd {
u32 outstanding_r2ts;
/* Next R2T Offset when DataSequenceInOrder=Yes */
u32 r2t_offset;
- /* Iovec current and orig count for iscsi_cmd->iov_data */
+ /* Iovec current and orig count for iscsit_cmd->iov_data */
u32 iov_data_count;
u32 orig_iov_data_count;
/* Number of miscellaneous iovecs used for IP stack calls */
u32 iov_misc_count;
- /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ /* Number of struct iscsi_pdu in struct iscsit_cmd->pdu_list */
u32 pdu_count;
- /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+ /* Next struct iscsi_pdu to send in struct iscsit_cmd->pdu_list */
u32 pdu_send_order;
- /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ /* Current struct iscsi_pdu in struct iscsit_cmd->pdu_list */
u32 pdu_start;
- /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+ /* Next struct iscsi_seq to send in struct iscsit_cmd->seq_list */
u32 seq_send_order;
- /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+ /* Number of struct iscsi_seq in struct iscsit_cmd->seq_list */
u32 seq_count;
- /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+ /* Current struct iscsi_seq in struct iscsit_cmd->seq_list */
u32 seq_no;
/* Lowest offset in current DataOUT sequence */
u32 seq_start_offset;
@@ -444,12 +442,12 @@ struct iscsi_cmd {
enum dma_data_direction data_direction;
/* iSCSI PDU Header + CRC */
unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
- /* Number of times struct iscsi_cmd is present in immediate queue */
+ /* Number of times struct iscsit_cmd is present in immediate queue */
atomic_t immed_queue_count;
atomic_t response_queue_count;
spinlock_t datain_lock;
spinlock_t dataout_timeout_lock;
- /* spinlock for protecting struct iscsi_cmd->i_state */
+ /* spinlock for protecting struct iscsit_cmd->i_state */
spinlock_t istate_lock;
/* spinlock for adding within command recovery entries */
spinlock_t error_lock;
@@ -478,11 +476,11 @@ struct iscsi_cmd {
/* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
struct iscsi_tmr_req *tmr_req;
/* Connection this command is alligient to */
- struct iscsi_conn *conn;
+ struct iscsit_conn *conn;
/* Pointer to connection recovery entry */
struct iscsi_conn_recovery *cr;
/* Session the command is part of, used for connection recovery */
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
/* list_head for connection list */
struct list_head i_conn_node;
/* The TCM I/O descriptor that is accessed via container_of() */
@@ -503,12 +501,12 @@ struct iscsi_cmd {
struct iscsi_tmr_req {
bool task_reassign:1;
u32 exp_data_sn;
- struct iscsi_cmd *ref_cmd;
+ struct iscsit_cmd *ref_cmd;
struct iscsi_conn_recovery *conn_recovery;
struct se_tmr_req *se_tmr_req;
};
-struct iscsi_conn {
+struct iscsit_conn {
wait_queue_head_t queues_wq;
/* Authentication Successful for this connection */
u8 auth_complete;
@@ -561,12 +559,13 @@ struct iscsi_conn {
#define LOGIN_FLAGS_READ_ACTIVE 2
#define LOGIN_FLAGS_WRITE_ACTIVE 3
#define LOGIN_FLAGS_CLOSED 4
+#define LOGIN_FLAGS_WORKER_RUNNING 5
unsigned long login_flags;
struct delayed_work login_work;
struct iscsi_login *login;
struct timer_list nopin_timer;
struct timer_list nopin_response_timer;
- struct timer_list transport_timer;
+ struct timer_list login_timer;
struct task_struct *login_kworker;
/* Spinlock used for add/deleting cmd's from conn_cmd_list */
spinlock_t cmd_lock;
@@ -575,14 +574,17 @@ struct iscsi_conn {
spinlock_t nopin_timer_lock;
spinlock_t response_queue_lock;
spinlock_t state_lock;
+ spinlock_t login_timer_lock;
+ spinlock_t login_worker_lock;
/* libcrypto RX and TX contexts for crc32c */
struct ahash_request *conn_rx_hash;
struct ahash_request *conn_tx_hash;
/* Used for scheduling TX and RX connection kthreads */
cpumask_var_t conn_cpumask;
+ cpumask_var_t allowed_cpumask;
unsigned int conn_rx_reset_cpumask:1;
unsigned int conn_tx_reset_cpumask:1;
- /* list_head of struct iscsi_cmd for this connection */
+ /* list_head of struct iscsit_cmd for this connection */
struct list_head conn_cmd_list;
struct list_head immed_queue_list;
struct list_head response_queue_list;
@@ -597,7 +599,8 @@ struct iscsi_conn {
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
/* Pointer to parent session */
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
+ struct target_cmd_counter *cmd_cnt;
int bitmap_id;
int rx_thread_active;
struct task_struct *rx_thread;
@@ -617,11 +620,11 @@ struct iscsi_conn_recovery {
struct list_head conn_recovery_cmd_list;
spinlock_t conn_recovery_cmd_lock;
struct timer_list time2retain_timer;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct list_head cr_list;
} ____cacheline_aligned;
-struct iscsi_session {
+struct iscsit_session {
u8 initiator_vendor;
u8 isid[6];
enum iscsi_timer_flags_table time2retain_timer_flags;
@@ -709,11 +712,12 @@ struct iscsi_login {
char rsp[ISCSI_HDR_LEN];
char *req_buf;
char *rsp_buf;
- struct iscsi_conn *conn;
+ struct iscsit_conn *conn;
struct iscsi_np *np;
} ____cacheline_aligned;
struct iscsi_node_attrib {
+ s32 authentication;
u32 dataout_timeout;
u32 dataout_timeout_retries;
u32 default_erl;
@@ -757,10 +761,15 @@ struct iscsi_node_acl {
struct iscsi_node_stat_grps node_stat_grps;
};
+static inline struct iscsi_node_acl *
+to_iscsi_nacl(struct se_node_acl *se_nacl)
+{
+ return container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+}
+
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
- u32 netif_timeout;
u32 generate_node_acls;
u32 cache_dynamic_acls;
u32 default_cmdsn_depth;
@@ -782,7 +791,6 @@ struct iscsi_np {
enum np_thread_state_table np_thread_state;
bool enabled;
atomic_t np_reset_count;
- enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
spinlock_t np_thread_lock;
@@ -790,7 +798,6 @@ struct iscsi_np {
struct socket *np_socket;
struct sockaddr_storage np_sockaddr;
struct task_struct *np_thread;
- struct timer_list np_login_timer;
void *np_context;
struct iscsit_transport *np_transport;
struct list_head np_list;
@@ -838,6 +845,12 @@ struct iscsi_portal_group {
struct list_head tpg_list;
} ____cacheline_aligned;
+static inline struct iscsi_portal_group *
+to_iscsi_tpg(struct se_portal_group *se_tpg)
+{
+ return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+}
+
struct iscsi_wwn_stat_grps {
struct config_group iscsi_stat_group;
struct config_group iscsi_instance_group;
@@ -878,12 +891,13 @@ struct iscsit_global {
/* Thread Set bitmap pointer */
unsigned long *ts_bitmap;
spinlock_t ts_bitmap_lock;
+ cpumask_var_t allowed_cpumask;
/* Used for iSCSI discovery session authentication */
struct iscsi_node_acl discovery_acl;
struct iscsi_portal_group *discovery_tpg;
};
-static inline u32 session_get_next_ttt(struct iscsi_session *session)
+static inline u32 session_get_next_ttt(struct iscsit_session *session)
{
u32 ttt;
@@ -896,31 +910,10 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session)
return ttt;
}
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+extern struct iscsit_cmd *iscsit_find_cmd_from_itt(struct iscsit_conn *, itt_t);
+
+extern void iscsit_thread_check_cpumask(struct iscsit_conn *conn,
+ struct task_struct *p,
+ int mode);
-static inline void iscsit_thread_check_cpumask(
- struct iscsi_conn *conn,
- struct task_struct *p,
- int mode)
-{
- /*
- * mode == 1 signals iscsi_target_tx_thread() usage.
- * mode == 0 signals iscsi_target_rx_thread() usage.
- */
- if (mode == 1) {
- if (!conn->conn_tx_reset_cpumask)
- return;
- conn->conn_tx_reset_cpumask = 0;
- } else {
- if (!conn->conn_rx_reset_cpumask)
- return;
- conn->conn_rx_reset_cpumask = 0;
- }
- /*
- * Update the CPU mask for this single kthread so that
- * both TX and RX kthreads are scheduled to run on the
- * same CPU.
- */
- set_cpus_allowed_ptr(p, conn->conn_cpumask);
-}
#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index b8feba7ffebc..42cfe02ea909 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include "iscsi_target_core.h" /* struct iscsi_cmd */
+#include "iscsi_target_core.h" /* struct iscsit_cmd */
struct sockaddr_storage;
@@ -12,29 +12,29 @@ struct iscsit_transport {
struct module *owner;
struct list_head t_node;
int (*iscsit_setup_np)(struct iscsi_np *, struct sockaddr_storage *);
- int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
+ int (*iscsit_accept_np)(struct iscsi_np *, struct iscsit_conn *);
void (*iscsit_free_np)(struct iscsi_np *);
- void (*iscsit_wait_conn)(struct iscsi_conn *);
- void (*iscsit_free_conn)(struct iscsi_conn *);
- int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
- int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
- int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
- int (*iscsit_response_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
- int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
- int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
- int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
- void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
- int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *,
+ void (*iscsit_wait_conn)(struct iscsit_conn *);
+ void (*iscsit_free_conn)(struct iscsit_conn *);
+ int (*iscsit_get_login_rx)(struct iscsit_conn *, struct iscsi_login *);
+ int (*iscsit_put_login_tx)(struct iscsit_conn *, struct iscsi_login *, u32);
+ int (*iscsit_immediate_queue)(struct iscsit_conn *, struct iscsit_cmd *, int);
+ int (*iscsit_response_queue)(struct iscsit_conn *, struct iscsit_cmd *, int);
+ int (*iscsit_get_dataout)(struct iscsit_conn *, struct iscsit_cmd *, bool);
+ int (*iscsit_queue_data_in)(struct iscsit_conn *, struct iscsit_cmd *);
+ int (*iscsit_queue_status)(struct iscsit_conn *, struct iscsit_cmd *);
+ void (*iscsit_aborted_task)(struct iscsit_conn *, struct iscsit_cmd *);
+ int (*iscsit_xmit_pdu)(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_datain_req *, const void *, u32);
- void (*iscsit_unmap_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
- void (*iscsit_get_rx_pdu)(struct iscsi_conn *);
- int (*iscsit_validate_params)(struct iscsi_conn *);
- void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *,
+ void (*iscsit_unmap_cmd)(struct iscsit_conn *, struct iscsit_cmd *);
+ void (*iscsit_get_rx_pdu)(struct iscsit_conn *);
+ int (*iscsit_validate_params)(struct iscsit_conn *);
+ void (*iscsit_get_r2t_ttt)(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_r2t *);
- enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
+ enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsit_conn *);
};
-static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
+static inline void *iscsit_priv_cmd(struct iscsit_cmd *cmd)
{
return (void *)(cmd + 1);
}
@@ -51,100 +51,100 @@ extern void iscsit_put_transport(struct iscsit_transport *);
/*
* From iscsi_target.c
*/
-extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_setup_scsi_cmd(struct iscsit_conn *, struct iscsit_cmd *,
unsigned char *);
-extern void iscsit_set_unsolicited_dataout(struct iscsi_cmd *);
-extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *);
+extern int iscsit_process_scsi_cmd(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_scsi_req *);
extern int
-__iscsit_check_dataout_hdr(struct iscsi_conn *, void *,
- struct iscsi_cmd *, u32, bool *);
+__iscsit_check_dataout_hdr(struct iscsit_conn *, void *,
+ struct iscsit_cmd *, u32, bool *);
extern int
-iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
- struct iscsi_cmd **out_cmd);
-extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *,
+iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
+ struct iscsit_cmd **out_cmd);
+extern int iscsit_check_dataout_payload(struct iscsit_cmd *, struct iscsi_data *,
bool);
-extern int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_setup_nop_out(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_nopout *);
-extern int iscsit_process_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_process_nop_out(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_nopout *);
-extern int iscsit_handle_logout_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_handle_logout_cmd(struct iscsit_conn *, struct iscsit_cmd *,
unsigned char *);
-extern int iscsit_handle_task_mgt_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_handle_task_mgt_cmd(struct iscsit_conn *, struct iscsit_cmd *,
unsigned char *);
-extern int iscsit_setup_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_setup_text_cmd(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_text *);
-extern int iscsit_process_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_process_text_cmd(struct iscsit_conn *, struct iscsit_cmd *,
struct iscsi_text *);
-extern void iscsit_build_rsp_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_rsp_pdu(struct iscsit_cmd *, struct iscsit_conn *,
bool, struct iscsi_scsi_rsp *);
-extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_nopin_rsp(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_nopin *, bool);
-extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_task_mgt_rsp(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_tm_rsp *);
-extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern int iscsit_build_text_rsp(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_text_rsp *,
enum iscsit_transport_type);
-extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_reject(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_reject *);
-extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern int iscsit_build_logout_rsp(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_logout_rsp *);
-extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_queue_rsp(struct iscsi_conn *, struct iscsi_cmd *);
-extern void iscsit_aborted_task(struct iscsi_conn *, struct iscsi_cmd *);
-extern int iscsit_add_reject(struct iscsi_conn *, u8, unsigned char *);
-extern int iscsit_reject_cmd(struct iscsi_cmd *, u8, unsigned char *);
-extern int iscsit_handle_snack(struct iscsi_conn *, unsigned char *);
-extern void iscsit_build_datain_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+extern int iscsit_logout_post_handler(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_queue_rsp(struct iscsit_conn *, struct iscsit_cmd *);
+extern void iscsit_aborted_task(struct iscsit_conn *, struct iscsit_cmd *);
+extern int iscsit_add_reject(struct iscsit_conn *, u8, unsigned char *);
+extern int iscsit_reject_cmd(struct iscsit_cmd *, u8, unsigned char *);
+extern int iscsit_handle_snack(struct iscsit_conn *, unsigned char *);
+extern void iscsit_build_datain_pdu(struct iscsit_cmd *, struct iscsit_conn *,
struct iscsi_datain *,
struct iscsi_data_rsp *, bool);
-extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *,
bool);
-extern int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
-extern int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+extern int iscsit_immediate_queue(struct iscsit_conn *, struct iscsit_cmd *, int);
+extern int iscsit_response_queue(struct iscsit_conn *, struct iscsit_cmd *, int);
/*
* From iscsi_target_device.c
*/
-extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsit_cmd *, struct iscsit_session *);
/*
* From iscsi_target_erl0.c
*/
-extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+extern void iscsit_cause_connection_reinstatement(struct iscsit_conn *, int);
/*
* From iscsi_target_erl1.c
*/
-extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+extern void iscsit_stop_dataout_timer(struct iscsit_cmd *);
/*
* From iscsi_target_tmr.c
*/
-extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_tmr_post_handler(struct iscsit_cmd *, struct iscsit_conn *);
/*
* From iscsi_target_util.c
*/
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
-extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int);
+extern int iscsit_sequence_cmd(struct iscsit_conn *, struct iscsit_cmd *,
unsigned char *, __be32);
-extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
-extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *,
- struct iscsi_conn *, u8);
-extern struct iscsi_cmd *
-iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *conn,
+extern void iscsit_release_cmd(struct iscsit_cmd *);
+extern void iscsit_free_cmd(struct iscsit_cmd *, bool);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *,
+ struct iscsit_conn *, u8);
+extern struct iscsit_cmd *
+iscsit_find_cmd_from_itt_or_dump(struct iscsit_conn *conn,
itt_t init_task_tag, u32 length);
/*
* From iscsi_target_nego.c
*/
-extern int iscsi_target_check_login_request(struct iscsi_conn *,
+extern int iscsi_target_check_login_request(struct iscsit_conn *,
struct iscsi_login *);
/*
* From iscsi_target_login.c
*/
extern __printf(2, 3) int iscsi_change_param_sprintf(
- struct iscsi_conn *, const char *, ...);
+ struct iscsit_conn *, const char *, ...);
/*
* From iscsi_target_parameters.c
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 6336780d83a7..739df993aa5e 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -14,7 +14,7 @@
#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2
#define TRANSPORT_FLAG_PASSTHROUGH_PGR 0x4
-struct request_queue;
+struct block_device;
struct scatterlist;
struct target_backend_ops {
@@ -34,7 +34,10 @@ struct target_backend_ops {
int (*configure_device)(struct se_device *);
void (*destroy_device)(struct se_device *);
void (*free_device)(struct se_device *device);
+ struct se_dev_plug *(*plug_device)(struct se_device *se_dev);
+ void (*unplug_device)(struct se_dev_plug *se_plug);
+ bool (*configure_unmap)(struct se_device *se_dev);
ssize_t (*set_configfs_dev_params)(struct se_device *,
const char *, ssize_t);
ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
@@ -59,19 +62,25 @@ struct target_backend_ops {
struct configfs_attribute **tb_dev_action_attrs;
};
-struct sbc_ops {
+struct exec_cmd_ops {
sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *,
u32, enum dma_data_direction);
sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
sense_reason_t (*execute_unmap)(struct se_cmd *cmd,
sector_t lba, sector_t nolb);
+ sense_reason_t (*execute_pr_out)(struct se_cmd *cmd, u8 sa, u64 key,
+ u64 sa_key, u8 type, bool aptpl);
+ sense_reason_t (*execute_pr_in)(struct se_cmd *cmd, u8 sa,
+ unsigned char *param_data);
};
int transport_backend_register(const struct target_backend_ops *);
void target_backend_unregister(const struct target_backend_ops *);
void target_complete_cmd(struct se_cmd *, u8);
+void target_set_cmd_data_length(struct se_cmd *, int);
+void target_complete_cmd_with_sense(struct se_cmd *, u8, sense_reason_t);
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
@@ -81,7 +90,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *);
-sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
+sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops);
u32 sbc_get_device_rev(struct se_device *dev);
u32 sbc_get_device_type(struct se_device *dev);
sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
@@ -113,7 +122,7 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q);
+ struct block_device *bdev);
static inline bool target_dev_configured(struct se_device *se_dev)
{
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 549947d407cf..97099a5e3f6c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -91,6 +91,8 @@
#define DA_EMULATE_ALUA 0
/* Emulate SCSI2 RESERVE/RELEASE and Persistent Reservations by default */
#define DA_EMULATE_PR 1
+/* Emulation for REPORT SUPPORTED OPERATION CODES */
+#define DA_EMULATE_RSOC 1
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1
/* Force SPC-3 PR Activate Persistence across Target Power Loss */
@@ -106,6 +108,15 @@
#define SE_MODE_PAGE_BUF 512
#define SE_SENSE_BUF 96
+enum target_submit_type {
+ /* Use the fabric driver's default submission type */
+ TARGET_FABRIC_DEFAULT_SUBMIT,
+ /* Submit from the calling context */
+ TARGET_DIRECT_SUBMIT,
+ /* Defer submission to the LIO workqueue */
+ TARGET_QUEUE_SUBMIT,
+};
+
/* struct se_hba->hba_flags */
enum hba_flags_table {
HBA_FLAGS_INTERNAL_USE = 0x01,
@@ -127,25 +138,25 @@ enum transport_state_table {
/* Used for struct se_cmd->se_cmd_flags */
enum se_cmd_flags_table {
- SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
- SCF_TRANSPORT_TASK_SENSE = 0x00000002,
- SCF_EMULATED_TASK_SENSE = 0x00000004,
- SCF_SCSI_DATA_CDB = 0x00000008,
- SCF_SCSI_TMR_CDB = 0x00000010,
- SCF_FUA = 0x00000080,
- SCF_SE_LUN_CMD = 0x00000100,
- SCF_BIDI = 0x00000400,
- SCF_SENT_CHECK_CONDITION = 0x00000800,
- SCF_OVERFLOW_BIT = 0x00001000,
- SCF_UNDERFLOW_BIT = 0x00002000,
- SCF_ALUA_NON_OPTIMIZED = 0x00008000,
- SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
- SCF_COMPARE_AND_WRITE = 0x00080000,
- SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
- SCF_ACK_KREF = 0x00400000,
- SCF_USE_CPUID = 0x00800000,
- SCF_TASK_ATTR_SET = 0x01000000,
- SCF_TREAT_READ_AS_NORMAL = 0x02000000,
+ SCF_SUPPORTED_SAM_OPCODE = (1 << 0),
+ SCF_TRANSPORT_TASK_SENSE = (1 << 1),
+ SCF_EMULATED_TASK_SENSE = (1 << 2),
+ SCF_SCSI_DATA_CDB = (1 << 3),
+ SCF_SCSI_TMR_CDB = (1 << 4),
+ SCF_FUA = (1 << 5),
+ SCF_SE_LUN_CMD = (1 << 6),
+ SCF_BIDI = (1 << 7),
+ SCF_SENT_CHECK_CONDITION = (1 << 8),
+ SCF_OVERFLOW_BIT = (1 << 9),
+ SCF_UNDERFLOW_BIT = (1 << 10),
+ SCF_ALUA_NON_OPTIMIZED = (1 << 11),
+ SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = (1 << 12),
+ SCF_COMPARE_AND_WRITE = (1 << 13),
+ SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = (1 << 14),
+ SCF_ACK_KREF = (1 << 15),
+ SCF_USE_CPUID = (1 << 16),
+ SCF_TASK_ATTR_SET = (1 << 17),
+ SCF_TREAT_READ_AS_NORMAL = (1 << 18),
};
/*
@@ -171,7 +182,7 @@ enum tcm_sense_reason_table {
TCM_WRITE_PROTECTED = R(0x0c),
TCM_CHECK_CONDITION_ABORT_CMD = R(0x0d),
TCM_CHECK_CONDITION_UNIT_ATTENTION = R(0x0e),
- TCM_CHECK_CONDITION_NOT_READY = R(0x0f),
+
TCM_RESERVATION_CONFLICT = R(0x10),
TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
TCM_OUT_OF_RESOURCES = R(0x12),
@@ -187,6 +198,11 @@ enum tcm_sense_reason_table {
TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
TCM_LUN_BUSY = R(0x1e),
+ TCM_INVALID_FIELD_IN_COMMAND_IU = R(0x1f),
+ TCM_ALUA_TG_PT_STANDBY = R(0x20),
+ TCM_ALUA_TG_PT_UNAVAILABLE = R(0x21),
+ TCM_ALUA_STATE_TRANSITION = R(0x22),
+ TCM_ALUA_OFFLINE = R(0x23),
#undef R
};
@@ -195,7 +211,6 @@ enum target_sc_flags_table {
TARGET_SCF_ACK_KREF = 0x02,
TARGET_SCF_UNKNOWN_SIZE = 0x04,
TARGET_SCF_USE_CPUID = 0x08,
- TARGET_SCF_LOOKUP_LUN_FROM_TAG = 0x10,
};
/* fabric independent task management function values */
@@ -326,6 +341,7 @@ struct t10_wwn {
char model[INQUIRY_MODEL_LEN + 1];
char revision[INQUIRY_REVISION_LEN + 1];
char unit_serial[INQUIRY_VPD_SERIAL_LEN];
+ u32 company_id;
spinlock_t t10_vpd_lock;
struct se_device *t10_dev;
struct config_group t10_wwn_group;
@@ -452,10 +468,10 @@ enum target_core_dif_check {
#define TCM_ACA_TAG 0x24
struct se_cmd {
+ /* Used for fail with specific sense codes */
+ sense_reason_t sense_reason;
/* SAM response code being sent to initiator */
u8 scsi_status;
- u8 scsi_asc;
- u8 scsi_ascq;
u16 scsi_sense_length;
unsigned unknown_data_length:1;
bool state_active:1;
@@ -487,8 +503,9 @@ struct se_cmd {
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
+ struct target_cmd_counter *cmd_cnt;
struct se_tmr_req *se_tmr_req;
- struct list_head se_cmd_list;
+ struct llist_node se_cmd_list;
struct completion *free_compl;
struct completion *abrt_compl;
const struct target_core_fabric_ops *se_tfo;
@@ -540,7 +557,11 @@ struct se_cmd {
struct scatterlist *t_prot_sg;
unsigned int t_prot_nents;
sense_reason_t pi_err;
- sector_t bad_sector;
+ u64 sense_info;
+ /*
+ * CPU LIO will execute the cmd on. Defaults to the CPU the cmd is
+ * initialized on. Drivers can override.
+ */
int cpuid;
};
@@ -608,22 +629,26 @@ static inline struct se_node_acl *fabric_stat_to_nacl(struct config_item *item)
acl_fabric_stat_group);
}
+struct target_cmd_counter {
+ struct percpu_ref refcnt;
+ wait_queue_head_t refcnt_wq;
+ struct completion stop_done;
+ atomic_t stopped;
+};
+
struct se_session {
- unsigned sess_tearing_down:1;
u64 sess_bin_isid;
enum target_prot_op sup_prot_ops;
enum target_prot_type sess_prot_type;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
- struct percpu_ref cmd_count;
struct list_head sess_list;
struct list_head sess_acl_list;
- struct list_head sess_cmd_list;
spinlock_t sess_cmd_lock;
- wait_queue_head_t cmd_list_wq;
void *sess_cmd_map;
struct sbitmap_queue sess_tag_pool;
+ struct target_cmd_counter *cmd_cnt;
};
struct se_device;
@@ -656,9 +681,9 @@ struct se_dev_entry {
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
struct kref pr_kref;
struct completion pr_comp;
- struct se_lun_acl __rcu *se_lun_acl;
+ struct se_lun_acl *se_lun_acl;
spinlock_t ua_lock;
- struct se_lun __rcu *se_lun;
+ struct se_lun *se_lun;
#define DEF_PR_REG_ACTIVE 1
unsigned long deve_flags;
struct list_head alua_port_list;
@@ -681,6 +706,7 @@ struct se_dev_attrib {
bool emulate_caw;
bool emulate_3pc;
bool emulate_pr;
+ bool emulate_rsoc;
enum target_prot_type pi_prot_type;
enum target_prot_type hw_pi_prot_type;
bool pi_prot_verify;
@@ -700,7 +726,7 @@ struct se_dev_attrib {
u32 unmap_granularity;
u32 unmap_granularity_alignment;
u32 max_write_same_len;
- u32 max_bytes_per_io;
+ u8 submit_type;
struct se_device *da_dev;
struct config_group da_group;
};
@@ -724,8 +750,6 @@ struct se_lun {
bool lun_access_ro;
u32 lun_index;
- /* RELATIVE TARGET PORT IDENTIFER */
- u16 lun_rtpi;
atomic_t lun_acl_count;
struct se_device __rcu *lun_se_dev;
@@ -740,7 +764,7 @@ struct se_lun {
/* ALUA target port group linkage */
struct list_head lun_tg_pt_gp_link;
- struct t10_alua_tg_pt_gp *lun_tg_pt_gp;
+ struct t10_alua_tg_pt_gp __rcu *lun_tg_pt_gp;
spinlock_t lun_tg_pt_gp_lock;
struct se_portal_group *lun_tpg;
@@ -761,9 +785,22 @@ struct se_dev_stat_grps {
struct config_group scsi_lu_group;
};
+struct se_cmd_queue {
+ struct llist_head cmd_list;
+ struct work_struct work;
+};
+
+struct se_dev_plug {
+ struct se_device *se_dev;
+};
+
+struct se_device_queue {
+ struct list_head state_list;
+ spinlock_t lock;
+ struct se_cmd_queue sq;
+};
+
struct se_device {
- /* RELATIVE TARGET PORT IDENTIFER Counter */
- u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
u32 dev_cur_ordered_id;
u32 dev_flags;
@@ -788,12 +825,12 @@ struct se_device {
atomic_long_t read_bytes;
atomic_long_t write_bytes;
/* Active commands on this virtual SE device */
- atomic_t simple_cmds;
- atomic_t dev_ordered_sync;
+ atomic_t non_ordered;
+ bool ordered_sync_in_progress;
+ atomic_t delayed_cmd_count;
atomic_t dev_qf_count;
u32 export_count;
spinlock_t delayed_cmd_lock;
- spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock;
unsigned int dev_reservation_flags;
#define DRF_SPC2_RESERVATIONS 0x00000001
@@ -811,8 +848,8 @@ struct se_device {
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
struct work_struct qf_work_queue;
+ struct work_struct delayed_cmd_work;
struct list_head delayed_cmd_list;
- struct list_head state_list;
struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
@@ -839,6 +876,25 @@ struct se_device {
/* For se_lun->lun_se_dev RCU read-side critical access */
u32 hba_index;
struct rcu_head rcu_head;
+ int queue_cnt;
+ struct se_device_queue *queues;
+ struct mutex lun_reset_mutex;
+};
+
+struct target_opcode_descriptor {
+ u8 support:3;
+ u8 serv_action_valid:1;
+ u8 opcode;
+ u16 service_action;
+ u32 cdb_size;
+ u8 specific_timeout;
+ u16 nominal_timeout;
+ u16 recommended_timeout;
+ bool (*enabled)(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd);
+ void (*update_usage_bits)(u8 *usage_bits,
+ struct se_device *dev);
+ u8 usage_bits[];
};
struct se_hba {
@@ -876,6 +932,10 @@ struct se_portal_group {
* Negative values can be used by fabric drivers for internal use TPGs.
*/
int proto_id;
+ bool enabled;
+ /* RELATIVE TARGET PORT IDENTIFIER */
+ u16 tpg_rtpi;
+ bool rtpi_manual;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t tpg_pr_ref_count;
/* Spinlock for adding/removing ACLed Nodes */
@@ -925,11 +985,20 @@ static inline struct se_portal_group *param_to_tpg(struct config_item *item)
tpg_param_group);
}
+enum {
+ /* Use se_cmd's cpuid for completion */
+ SE_COMPL_AFFINITY_CPUID = -1,
+ /* Complete on current CPU */
+ SE_COMPL_AFFINITY_CURR_CPU = -2,
+};
+
struct se_wwn {
struct target_fabric_configfs *wwn_tf;
void *priv;
struct config_group wwn_group;
struct config_group fabric_stat_group;
+ struct config_group param_group;
+ int cmd_compl_affinity;
};
static inline void atomic_inc_mb(atomic_t *v)
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 6adf4d71acf6..3378ff9ee271 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -89,6 +89,7 @@ struct target_core_fabric_ops {
void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
const char *);
+ int (*fabric_enable_tpg)(struct se_portal_group *se_tpg, bool enable);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);
@@ -112,11 +113,20 @@ struct target_core_fabric_ops {
struct configfs_attribute **tfc_tpg_nacl_param_attrs;
/*
- * Set this member variable to true if the SCSI transport protocol
+ * Set this member variable if the SCSI transport protocol
* (e.g. iSCSI) requires that the Data-Out buffer is transferred in
* its entirety before a command is aborted.
*/
- bool write_pending_must_be_called;
+ unsigned int write_pending_must_be_called:1;
+ /*
+ * Set this if the driver supports submitting commands to the backend
+ * from target_submit/target_submit_cmd.
+ */
+ unsigned int direct_submit_supp:1;
+ /*
+ * Set this to a target_submit_type value.
+ */
+ u8 default_submit_type;
};
int target_register_template(const struct target_core_fabric_ops *fo);
@@ -132,7 +142,12 @@ struct se_session *target_setup_session(struct se_portal_group *,
struct se_session *, void *));
void target_remove_session(struct se_session *);
-int transport_init_session(struct se_session *se_sess);
+void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt);
+void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt);
+struct target_cmd_counter *target_alloc_cmd_counter(void);
+void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt);
+
+void transport_init_session(struct se_session *se_sess);
struct se_session *transport_alloc_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
@@ -148,23 +163,30 @@ void transport_deregister_session_configfs(struct se_session *);
void transport_deregister_session(struct se_session *);
-void transport_init_se_cmd(struct se_cmd *,
- const struct target_core_fabric_ops *,
- struct se_session *, u32, int, int, unsigned char *, u64);
+void __target_init_cmd(struct se_cmd *cmd,
+ const struct target_core_fabric_ops *tfo,
+ struct se_session *sess, u32 data_length, int data_direction,
+ int task_attr, unsigned char *sense_buffer, u64 unpacked_lun,
+ struct target_cmd_counter *cmd_cnt);
+int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *sense, u64 unpacked_lun, u32 data_length,
+ int task_attr, int data_dir, int flags);
+int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
+ struct scatterlist *sgl, u32 sgl_count,
+ struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+ struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp);
+int target_submit(struct se_cmd *se_cmd);
sense_reason_t transport_lookup_cmd_lun(struct se_cmd *);
-sense_reason_t target_cmd_init_cdb(struct se_cmd *, unsigned char *);
+sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb,
+ gfp_t gfp);
sense_reason_t target_cmd_parse_cdb(struct se_cmd *);
-int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
- unsigned char *, unsigned char *, u64, u32, int, int, int,
- struct scatterlist *, u32, struct scatterlist *, u32,
- struct scatterlist *, u32);
-int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
+void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
unsigned char *, u64, u32, int, int, int);
+
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u64 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t, u64, int);
-int transport_handle_cdb_direct(struct se_cmd *);
sense_reason_t transport_generic_new_cmd(struct se_cmd *);
void target_put_cmd_and_wait(struct se_cmd *cmd);
@@ -178,12 +200,10 @@ int transport_send_check_condition_and_sense(struct se_cmd *,
int target_send_busy(struct se_cmd *cmd);
int target_get_sess_cmd(struct se_cmd *, bool);
int target_put_sess_cmd(struct se_cmd *);
-void target_sess_cmd_list_set_waiting(struct se_session *);
+void target_stop_session(struct se_session *se_sess);
void target_wait_for_sess_cmds(struct se_session *);
void target_show_cmd(const char *pfx, struct se_cmd *cmd);
-int core_alua_check_nonop_delay(struct se_cmd *);
-
int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 1ce3be63add1..e609cd7da47e 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -4,22 +4,7 @@
#ifdef CONFIG_BPF_EVENTS
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+#include "stages/stage6_event_callback.h"
#undef __perf_count
#define __perf_count(c) (c)
@@ -27,6 +12,8 @@
#undef __perf_task
#define __perf_task(t) (t)
+#include <linux/args.h>
+
/* cast any integer, pointer, or small struct to u64 */
#define UINTTYPE(size) \
__typeof__(__builtin_choose_expr(size == 1, (u8)1, \
@@ -55,8 +42,7 @@
/* tracepoints with more than 12 arguments will hit build error */
#define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+#define __BPF_DECLARE_TRACE(call, proto, args) \
static notrace void \
__bpf_trace_##call(void *__data, proto) \
{ \
@@ -64,6 +50,10 @@ __bpf_trace_##call(void *__data, proto) \
CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \
}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args))
+
/*
* This part is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the
@@ -79,7 +69,7 @@ static union { \
struct bpf_raw_event_map event; \
btf_trace_##call handler; \
} __bpf_trace_tp_map_##call __used \
-__attribute__((section("__bpf_raw_tp_map"))) = { \
+__section("__bpf_raw_tp_map") = { \
.event = { \
.tp = &__tracepoint_##call, \
.bpf_func = __bpf_trace_##template, \
@@ -90,8 +80,7 @@ __attribute__((section("__bpf_raw_tp_map"))) = { \
#define FIRST(x, ...) x
-#undef DEFINE_EVENT_WRITABLE
-#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
+#define __CHECK_WRITABLE_BUF_SIZE(call, proto, args, size) \
static inline void bpf_test_buffer_##call(void) \
{ \
/* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \
@@ -100,8 +89,12 @@ static inline void bpf_test_buffer_##call(void) \
*/ \
FIRST(proto); \
(void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \
-} \
-__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
+}
+
+#undef DEFINE_EVENT_WRITABLE
+#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
+ __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
+ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
@@ -111,9 +104,22 @@ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+#undef DECLARE_TRACE
+#define DECLARE_TRACE(call, proto, args) \
+ __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
+ __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0)
+
+#undef DECLARE_TRACE_WRITABLE
+#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \
+ __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
+ __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
+ __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size)
+
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef DECLARE_TRACE_WRITABLE
#undef DEFINE_EVENT_WRITABLE
+#undef __CHECK_WRITABLE_BUF_SIZE
#undef __DEFINE_EVENT
#undef FIRST
diff --git a/include/trace/define_custom_trace.h b/include/trace/define_custom_trace.h
new file mode 100644
index 000000000000..5827a4c92c74
--- /dev/null
+++ b/include/trace/define_custom_trace.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace files that want to automate creation of all tracepoints defined
+ * in their file should include this file. The following are macros that the
+ * trace file may define:
+ *
+ * TRACE_SYSTEM defines the system the tracepoint is for
+ *
+ * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
+ * This macro may be defined to tell define_trace.h what file to include.
+ * Note, leave off the ".h".
+ *
+ * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
+ * then this macro can define the path to use. Note, the path is relative to
+ * define_trace.h, not the file including it. Full path names for out of tree
+ * modules must be used.
+ */
+
+#ifdef CREATE_CUSTOM_TRACE_EVENTS
+
+/* Prevent recursion */
+#undef CREATE_CUSTOM_TRACE_EVENTS
+
+#include <linux/stringify.h>
+
+#undef TRACE_CUSTOM_EVENT
+#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print)
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args)
+
+#undef TRACE_INCLUDE
+#undef __TRACE_INCLUDE
+
+#ifndef TRACE_INCLUDE_FILE
+# define TRACE_INCLUDE_FILE TRACE_SYSTEM
+# define UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifndef TRACE_INCLUDE_PATH
+# define __TRACE_INCLUDE(system) <trace/events/system.h>
+# define UNDEF_TRACE_INCLUDE_PATH
+#else
+# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
+#endif
+
+# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
+
+/* Let the trace headers be reread */
+#define TRACE_CUSTOM_MULTI_READ
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#ifdef TRACEPOINTS_ENABLED
+#include <trace/trace_custom_events.h>
+#endif
+
+#undef TRACE_CUSTOM_EVENT
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#undef DEFINE_CUSTOM_EVENT
+#undef TRACE_CUSTOM_MULTI_READ
+
+/* Only undef what we defined in this file */
+#ifdef UNDEF_TRACE_INCLUDE_FILE
+# undef TRACE_INCLUDE_FILE
+# undef UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifdef UNDEF_TRACE_INCLUDE_PATH
+# undef TRACE_INCLUDE_PATH
+# undef UNDEF_TRACE_INCLUDE_PATH
+#endif
+
+/* We may be processing more files */
+#define CREATE_CUSTOM_TRACE_POINTS
+
+#endif /* CREATE_CUSTOM_TRACE_POINTS */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index bd75f97867b9..00723935dcc7 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -25,7 +25,7 @@
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
- DEFINE_TRACE(name)
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_CONDITION
#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
@@ -39,12 +39,12 @@
#undef TRACE_EVENT_FN
#define TRACE_EVENT_FN(name, proto, args, tstruct, \
assign, print, reg, unreg) \
- DEFINE_TRACE_FN(name, reg, unreg)
+ DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_FN_COND
#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
assign, print, reg, unreg) \
- DEFINE_TRACE_FN(name, reg, unreg)
+ DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_NOP
#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print)
@@ -54,15 +54,15 @@
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
- DEFINE_TRACE(name)
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_FN
#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
- DEFINE_TRACE_FN(name, reg, unreg)
+ DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_TRACE(name)
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_CONDITION
#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
@@ -70,7 +70,7 @@
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \
- DEFINE_TRACE(name)
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef TRACE_INCLUDE
#undef __TRACE_INCLUDE
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index 78c5608a1648..cd104a1343e2 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -77,6 +77,13 @@
EM( P9_TWSTAT, "P9_TWSTAT" ) \
EMe(P9_RWSTAT, "P9_RWSTAT" )
+
+#define P9_FID_REFTYPE \
+ EM( P9_FID_REF_CREATE, "create " ) \
+ EM( P9_FID_REF_GET, "get " ) \
+ EM( P9_FID_REF_PUT, "put " ) \
+ EMe(P9_FID_REF_DESTROY, "destroy" )
+
/* Define EM() to export the enums to userspace via TRACE_DEFINE_ENUM() */
#undef EM
#undef EMe
@@ -84,6 +91,21 @@
#define EMe(a, b) TRACE_DEFINE_ENUM(a);
P9_MSG_T
+P9_FID_REFTYPE
+
+/* And also use EM/EMe to define helper enums -- once */
+#ifndef __9P_DECLARE_TRACE_ENUMS_ONLY_ONCE
+#define __9P_DECLARE_TRACE_ENUMS_ONLY_ONCE
+#undef EM
+#undef EMe
+#define EM(a, b) a,
+#define EMe(a, b) a
+
+enum p9_fid_reftype {
+ P9_FID_REFTYPE
+} __mode(byte);
+
+#endif
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
@@ -96,6 +118,8 @@ P9_MSG_T
#define show_9p_op(type) \
__print_symbolic(type, P9_MSG_T)
+#define show_9p_fid_reftype(type) \
+ __print_symbolic(type, P9_FID_REFTYPE)
TRACE_EVENT(9p_client_req,
TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
@@ -154,20 +178,47 @@ TRACE_EVENT(9p_protocol_dump,
__field( void *, clnt )
__field( __u8, type )
__field( __u16, tag )
- __array( unsigned char, line, P9_PROTO_DUMP_SZ )
+ __dynamic_array(unsigned char, line,
+ min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ))
),
TP_fast_assign(
__entry->clnt = clnt;
__entry->type = pdu->id;
__entry->tag = pdu->tag;
- memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
+ memcpy(__get_dynamic_array(line), pdu->sdata,
+ __get_dynamic_array_len(line));
),
- TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
+ TP_printk("clnt %lu %s(tag = %d)\n%*ph\n",
(unsigned long)__entry->clnt, show_9p_op(__entry->type),
- __entry->tag, 0, __entry->line, 16, __entry->line + 16)
+ __entry->tag, __get_dynamic_array_len(line),
+ __get_dynamic_array(line))
);
+
+TRACE_EVENT(9p_fid_ref,
+ TP_PROTO(struct p9_fid *fid, __u8 type),
+
+ TP_ARGS(fid, type),
+
+ TP_STRUCT__entry(
+ __field( int, fid )
+ __field( int, refcount )
+ __field( __u8, type )
+ ),
+
+ TP_fast_assign(
+ __entry->fid = fid->fid;
+ __entry->refcount = refcount_read(&fid->count);
+ __entry->type = type;
+ ),
+
+ TP_printk("%s fid %d, refcount %d",
+ show_9p_fid_reftype(__entry->type),
+ __entry->fid, __entry->refcount)
+);
+
+
#endif /* _TRACE_9P_H */
/* This part must be outside protection */
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 5f0c1cf1ea13..450c44c83a5d 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -18,55 +18,6 @@
#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-enum afs_call_trace {
- afs_call_trace_alloc,
- afs_call_trace_free,
- afs_call_trace_get,
- afs_call_trace_put,
- afs_call_trace_wake,
- afs_call_trace_work,
-};
-
-enum afs_server_trace {
- afs_server_trace_alloc,
- afs_server_trace_callback,
- afs_server_trace_destroy,
- afs_server_trace_free,
- afs_server_trace_gc,
- afs_server_trace_get_by_addr,
- afs_server_trace_get_by_uuid,
- afs_server_trace_get_caps,
- afs_server_trace_get_install,
- afs_server_trace_get_new_cbi,
- afs_server_trace_get_probe,
- afs_server_trace_give_up_cb,
- afs_server_trace_put_call,
- afs_server_trace_put_cbi,
- afs_server_trace_put_find_rsq,
- afs_server_trace_put_probe,
- afs_server_trace_put_slist,
- afs_server_trace_put_slist_isort,
- afs_server_trace_put_uuid_rsq,
- afs_server_trace_update,
-};
-
-enum afs_volume_trace {
- afs_volume_trace_alloc,
- afs_volume_trace_free,
- afs_volume_trace_get_alloc_sbi,
- afs_volume_trace_get_cell_insert,
- afs_volume_trace_get_new_op,
- afs_volume_trace_get_query_alias,
- afs_volume_trace_put_cell_dup,
- afs_volume_trace_put_cell_root,
- afs_volume_trace_put_destroy_sbi,
- afs_volume_trace_put_free_fc,
- afs_volume_trace_put_put_op,
- afs_volume_trace_put_query_alias,
- afs_volume_trace_put_validate_fc,
- afs_volume_trace_remove,
-};
-
enum afs_fs_operation {
afs_FS_FetchData = 130, /* AFS Fetch file data */
afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
@@ -132,116 +83,32 @@ enum afs_vl_operation {
afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */
};
-enum afs_edit_dir_op {
- afs_edit_dir_create,
- afs_edit_dir_create_error,
- afs_edit_dir_create_inval,
- afs_edit_dir_create_nospc,
- afs_edit_dir_delete,
- afs_edit_dir_delete_error,
- afs_edit_dir_delete_inval,
- afs_edit_dir_delete_noent,
-};
-
-enum afs_edit_dir_reason {
- afs_edit_dir_for_create,
- afs_edit_dir_for_link,
- afs_edit_dir_for_mkdir,
- afs_edit_dir_for_rename_0,
- afs_edit_dir_for_rename_1,
- afs_edit_dir_for_rename_2,
- afs_edit_dir_for_rmdir,
- afs_edit_dir_for_silly_0,
- afs_edit_dir_for_silly_1,
- afs_edit_dir_for_symlink,
- afs_edit_dir_for_unlink,
-};
-
-enum afs_eproto_cause {
- afs_eproto_bad_status,
- afs_eproto_cb_count,
- afs_eproto_cb_fid_count,
- afs_eproto_cellname_len,
- afs_eproto_file_type,
- afs_eproto_ibulkst_cb_count,
- afs_eproto_ibulkst_count,
- afs_eproto_motd_len,
- afs_eproto_offline_msg_len,
- afs_eproto_volname_len,
- afs_eproto_yvl_fsendpt4_len,
- afs_eproto_yvl_fsendpt6_len,
- afs_eproto_yvl_fsendpt_num,
- afs_eproto_yvl_fsendpt_type,
- afs_eproto_yvl_vlendpt4_len,
- afs_eproto_yvl_vlendpt6_len,
- afs_eproto_yvl_vlendpt_type,
-};
-
-enum afs_io_error {
- afs_io_error_cm_reply,
- afs_io_error_extract,
- afs_io_error_fs_probe_fail,
- afs_io_error_vl_lookup_fail,
- afs_io_error_vl_probe_fail,
+enum afs_cm_operation {
+ afs_CB_CallBack = 204, /* AFS break callback promises */
+ afs_CB_InitCallBackState = 205, /* AFS initialise callback state */
+ afs_CB_Probe = 206, /* AFS probe client */
+ afs_CB_GetLock = 207, /* AFS get contents of CM lock table */
+ afs_CB_GetCE = 208, /* AFS get cache file description */
+ afs_CB_GetXStatsVersion = 209, /* AFS get version of extended statistics */
+ afs_CB_GetXStats = 210, /* AFS get contents of extended statistics data */
+ afs_CB_InitCallBackState3 = 213, /* AFS initialise callback state, version 3 */
+ afs_CB_ProbeUuid = 214, /* AFS check the client hasn't rebooted */
};
-enum afs_file_error {
- afs_file_error_dir_bad_magic,
- afs_file_error_dir_big,
- afs_file_error_dir_missing_page,
- afs_file_error_dir_over_end,
- afs_file_error_dir_small,
- afs_file_error_dir_unmarked_ext,
- afs_file_error_mntpt,
- afs_file_error_writeback_fail,
-};
-
-enum afs_flock_event {
- afs_flock_acquired,
- afs_flock_callback_break,
- afs_flock_defer_unlock,
- afs_flock_extend_fail,
- afs_flock_fail_other,
- afs_flock_fail_perm,
- afs_flock_no_lockers,
- afs_flock_release_fail,
- afs_flock_silly_delete,
- afs_flock_timestamp,
- afs_flock_try_to_lock,
- afs_flock_vfs_lock,
- afs_flock_vfs_locking,
- afs_flock_waited,
- afs_flock_waiting,
- afs_flock_work_extending,
- afs_flock_work_retry,
- afs_flock_work_unlocking,
- afs_flock_would_block,
-};
-
-enum afs_flock_operation {
- afs_flock_op_copy_lock,
- afs_flock_op_flock,
- afs_flock_op_grant,
- afs_flock_op_lock,
- afs_flock_op_release_lock,
- afs_flock_op_return_ok,
- afs_flock_op_return_eagain,
- afs_flock_op_return_edeadlk,
- afs_flock_op_return_error,
- afs_flock_op_set_lock,
- afs_flock_op_unlock,
- afs_flock_op_wake,
-};
-
-enum afs_cb_break_reason {
- afs_cb_break_no_break,
- afs_cb_break_for_callback,
- afs_cb_break_for_deleted,
- afs_cb_break_for_lapsed,
- afs_cb_break_for_unlink,
- afs_cb_break_for_vsbreak,
- afs_cb_break_for_volume_callback,
- afs_cb_break_for_zap,
+enum yfs_cm_operation {
+ yfs_CB_Probe = 206, /* YFS probe client */
+ yfs_CB_GetLock = 207, /* YFS get contents of CM lock table */
+ yfs_CB_XStatsVersion = 209, /* YFS get version of extended statistics */
+ yfs_CB_GetXStats = 210, /* YFS get contents of extended statistics data */
+ yfs_CB_InitCallBackState3 = 213, /* YFS initialise callback state, version 3 */
+ yfs_CB_ProbeUuid = 214, /* YFS check the client hasn't rebooted */
+ yfs_CB_GetServerPrefs = 215,
+ yfs_CB_GetCellServDV = 216,
+ yfs_CB_GetLocalCell = 217,
+ yfs_CB_GetCacheConfig = 218,
+ yfs_CB_GetCellByNum = 65537,
+ yfs_CB_TellMeAboutYourself = 65538, /* get client capabilities */
+ yfs_CB_CallBack = 64204,
};
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
@@ -270,6 +137,7 @@ enum afs_cb_break_reason {
EM(afs_server_trace_get_new_cbi, "GET cbi ") \
EM(afs_server_trace_get_probe, "GET probe") \
EM(afs_server_trace_give_up_cb, "giveup-cb") \
+ EM(afs_server_trace_purging, "PURGE ") \
EM(afs_server_trace_put_call, "PUT call ") \
EM(afs_server_trace_put_cbi, "PUT cbi ") \
EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \
@@ -283,9 +151,11 @@ enum afs_cb_break_reason {
EM(afs_volume_trace_alloc, "ALLOC ") \
EM(afs_volume_trace_free, "FREE ") \
EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \
+ EM(afs_volume_trace_get_callback, "GET callback ") \
EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \
EM(afs_volume_trace_get_new_op, "GET op-new ") \
EM(afs_volume_trace_get_query_alias, "GET cell-alias") \
+ EM(afs_volume_trace_put_callback, "PUT callback ") \
EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \
EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \
EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \
@@ -295,6 +165,79 @@ enum afs_cb_break_reason {
EM(afs_volume_trace_put_validate_fc, "PUT fc-validat") \
E_(afs_volume_trace_remove, "REMOVE ")
+#define afs_cell_traces \
+ EM(afs_cell_trace_alloc, "ALLOC ") \
+ EM(afs_cell_trace_free, "FREE ") \
+ EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \
+ EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \
+ EM(afs_cell_trace_get_queue_new, "GET q-new ") \
+ EM(afs_cell_trace_get_vol, "GET vol ") \
+ EM(afs_cell_trace_insert, "INSERT ") \
+ EM(afs_cell_trace_manage, "MANAGE ") \
+ EM(afs_cell_trace_put_candidate, "PUT candid") \
+ EM(afs_cell_trace_put_destroy, "PUT destry") \
+ EM(afs_cell_trace_put_queue_work, "PUT q-work") \
+ EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \
+ EM(afs_cell_trace_put_vol, "PUT vol ") \
+ EM(afs_cell_trace_see_source, "SEE source") \
+ EM(afs_cell_trace_see_ws, "SEE ws ") \
+ EM(afs_cell_trace_unuse_alias, "UNU alias ") \
+ EM(afs_cell_trace_unuse_check_alias, "UNU chk-al") \
+ EM(afs_cell_trace_unuse_delete, "UNU delete") \
+ EM(afs_cell_trace_unuse_fc, "UNU fc ") \
+ EM(afs_cell_trace_unuse_lookup, "UNU lookup") \
+ EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
+ EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \
+ EM(afs_cell_trace_unuse_parse, "UNU parse ") \
+ EM(afs_cell_trace_unuse_pin, "UNU pin ") \
+ EM(afs_cell_trace_unuse_probe, "UNU probe ") \
+ EM(afs_cell_trace_unuse_sbi, "UNU sbi ") \
+ EM(afs_cell_trace_unuse_ws, "UNU ws ") \
+ EM(afs_cell_trace_use_alias, "USE alias ") \
+ EM(afs_cell_trace_use_check_alias, "USE chk-al") \
+ EM(afs_cell_trace_use_fc, "USE fc ") \
+ EM(afs_cell_trace_use_fc_alias, "USE fc-al ") \
+ EM(afs_cell_trace_use_lookup, "USE lookup") \
+ EM(afs_cell_trace_use_mntpt, "USE mntpt ") \
+ EM(afs_cell_trace_use_pin, "USE pin ") \
+ EM(afs_cell_trace_use_probe, "USE probe ") \
+ EM(afs_cell_trace_use_sbi, "USE sbi ") \
+ E_(afs_cell_trace_wait, "WAIT ")
+
+#define afs_alist_traces \
+ EM(afs_alist_trace_alloc, "ALLOC ") \
+ EM(afs_alist_trace_get_estate, "GET estate") \
+ EM(afs_alist_trace_get_vlgetcaps, "GET vgtcap") \
+ EM(afs_alist_trace_get_vlprobe, "GET vprobe") \
+ EM(afs_alist_trace_get_vlrotate_set, "GET vl-rot") \
+ EM(afs_alist_trace_put_estate, "PUT estate") \
+ EM(afs_alist_trace_put_getaddru, "PUT GtAdrU") \
+ EM(afs_alist_trace_put_parse_empty, "PUT p-empt") \
+ EM(afs_alist_trace_put_parse_error, "PUT p-err ") \
+ EM(afs_alist_trace_put_server_dup, "PUT sv-dup") \
+ EM(afs_alist_trace_put_server_oom, "PUT sv-oom") \
+ EM(afs_alist_trace_put_server_update, "PUT sv-upd") \
+ EM(afs_alist_trace_put_vlgetcaps, "PUT vgtcap") \
+ EM(afs_alist_trace_put_vlprobe, "PUT vprobe") \
+ EM(afs_alist_trace_put_vlrotate_end, "PUT vr-end") \
+ EM(afs_alist_trace_put_vlrotate_fail, "PUT vr-fai") \
+ EM(afs_alist_trace_put_vlrotate_next, "PUT vr-nxt") \
+ EM(afs_alist_trace_put_vlrotate_restart,"PUT vr-rst") \
+ EM(afs_alist_trace_put_vlserver, "PUT vlsrvr") \
+ EM(afs_alist_trace_put_vlserver_old, "PUT vs-old") \
+ E_(afs_alist_trace_free, "FREE ")
+
+#define afs_estate_traces \
+ EM(afs_estate_trace_alloc_probe, "ALLOC prob") \
+ EM(afs_estate_trace_alloc_server, "ALLOC srvr") \
+ EM(afs_estate_trace_get_server_state, "GET srv-st") \
+ EM(afs_estate_trace_get_getcaps, "GET getcap") \
+ EM(afs_estate_trace_put_getcaps, "PUT getcap") \
+ EM(afs_estate_trace_put_probe, "PUT probe ") \
+ EM(afs_estate_trace_put_server, "PUT server") \
+ EM(afs_estate_trace_put_server_state, "PUT srv-st") \
+ E_(afs_estate_trace_free, "FREE ")
+
#define afs_fs_operations \
EM(afs_FS_FetchData, "FS.FetchData") \
EM(afs_FS_FetchStatus, "FS.FetchStatus") \
@@ -354,6 +297,32 @@ enum afs_cb_break_reason {
EM(afs_YFSVL_GetCellName, "YFSVL.GetCellName") \
E_(afs_VL_GetCapabilities, "VL.GetCapabilities")
+#define afs_cm_operations \
+ EM(afs_CB_CallBack, "CB.CallBack") \
+ EM(afs_CB_InitCallBackState, "CB.InitCallBackState") \
+ EM(afs_CB_Probe, "CB.Probe") \
+ EM(afs_CB_GetLock, "CB.GetLock") \
+ EM(afs_CB_GetCE, "CB.GetCE") \
+ EM(afs_CB_GetXStatsVersion, "CB.GetXStatsVersion") \
+ EM(afs_CB_GetXStats, "CB.GetXStats") \
+ EM(afs_CB_InitCallBackState3, "CB.InitCallBackState3") \
+ E_(afs_CB_ProbeUuid, "CB.ProbeUuid")
+
+#define yfs_cm_operations \
+ EM(yfs_CB_Probe, "YFSCB.Probe") \
+ EM(yfs_CB_GetLock, "YFSCB.GetLock") \
+ EM(yfs_CB_XStatsVersion, "YFSCB.XStatsVersion") \
+ EM(yfs_CB_GetXStats, "YFSCB.GetXStats") \
+ EM(yfs_CB_InitCallBackState3, "YFSCB.InitCallBackState3") \
+ EM(yfs_CB_ProbeUuid, "YFSCB.ProbeUuid") \
+ EM(yfs_CB_GetServerPrefs, "YFSCB.GetServerPrefs") \
+ EM(yfs_CB_GetCellServDV, "YFSCB.GetCellServDV") \
+ EM(yfs_CB_GetLocalCell, "YFSCB.GetLocalCell") \
+ EM(yfs_CB_GetCacheConfig, "YFSCB.GetCacheConfig") \
+ EM(yfs_CB_GetCellByNum, "YFSCB.GetCellByNum") \
+ EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \
+ E_(yfs_CB_CallBack, "YFSCB.CallBack")
+
#define afs_edit_dir_ops \
EM(afs_edit_dir_create, "create") \
EM(afs_edit_dir_create_error, "c_fail") \
@@ -407,6 +376,7 @@ enum afs_cb_break_reason {
EM(afs_file_error_dir_bad_magic, "DIR_BAD_MAGIC") \
EM(afs_file_error_dir_big, "DIR_BIG") \
EM(afs_file_error_dir_missing_page, "DIR_MISSING_PAGE") \
+ EM(afs_file_error_dir_name_too_long, "DIR_NAME_TOO_LONG") \
EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \
EM(afs_file_error_dir_small, "DIR_SMALL") \
EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \
@@ -466,12 +436,66 @@ enum afs_cb_break_reason {
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
EM(afs_cb_break_for_callback, "break-cb") \
+ EM(afs_cb_break_for_creation_regress, "creation-regress") \
EM(afs_cb_break_for_deleted, "break-del") \
- EM(afs_cb_break_for_lapsed, "break-lapsed") \
+ EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_vsbreak, "break-vs") \
+ EM(afs_cb_break_for_update_regress, "update-regress") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
- E_(afs_cb_break_for_zap, "break-zap")
+ EM(afs_cb_break_for_vos_release, "break-vos-release") \
+ E_(afs_cb_break_volume_excluded, "vol-excluded")
+
+#define afs_rotate_traces \
+ EM(afs_rotate_trace_aborted, "Abortd") \
+ EM(afs_rotate_trace_busy_sleep, "BsySlp") \
+ EM(afs_rotate_trace_check_vol_status, "VolStt") \
+ EM(afs_rotate_trace_failed, "Failed") \
+ EM(afs_rotate_trace_iter, "Iter ") \
+ EM(afs_rotate_trace_iterate_addr, "ItAddr") \
+ EM(afs_rotate_trace_next_server, "NextSv") \
+ EM(afs_rotate_trace_no_more_servers, "NoMore") \
+ EM(afs_rotate_trace_nomem, "Nomem ") \
+ EM(afs_rotate_trace_probe_error, "PrbErr") \
+ EM(afs_rotate_trace_probe_fileserver, "PrbFsv") \
+ EM(afs_rotate_trace_probe_none, "PrbNon") \
+ EM(afs_rotate_trace_probe_response, "PrbRsp") \
+ EM(afs_rotate_trace_probe_superseded, "PrbSup") \
+ EM(afs_rotate_trace_restart, "Rstart") \
+ EM(afs_rotate_trace_retry_server, "RtrySv") \
+ EM(afs_rotate_trace_selected_server, "SlctSv") \
+ EM(afs_rotate_trace_stale_lock, "StlLck") \
+ EM(afs_rotate_trace_start, "Start ") \
+ EM(afs_rotate_trace_stop, "Stop ") \
+ E_(afs_rotate_trace_stopped, "Stoppd")
+
+/*
+ * Generate enums for tracing information.
+ */
+#ifndef __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+#define __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum afs_alist_trace { afs_alist_traces } __mode(byte);
+enum afs_call_trace { afs_call_traces } __mode(byte);
+enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte);
+enum afs_cell_trace { afs_cell_traces } __mode(byte);
+enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte);
+enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte);
+enum afs_eproto_cause { afs_eproto_causes } __mode(byte);
+enum afs_estate_trace { afs_estate_traces } __mode(byte);
+enum afs_file_error { afs_file_errors } __mode(byte);
+enum afs_flock_event { afs_flock_events } __mode(byte);
+enum afs_flock_operation { afs_flock_operations } __mode(byte);
+enum afs_io_error { afs_io_errors } __mode(byte);
+enum afs_rotate_trace { afs_rotate_traces } __mode(byte);
+enum afs_server_trace { afs_server_traces } __mode(byte);
+enum afs_volume_trace { afs_volume_traces } __mode(byte);
+
+#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */
/*
* Export enum symbols via userspace.
@@ -481,18 +505,24 @@ enum afs_cb_break_reason {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
+afs_alist_traces;
afs_call_traces;
-afs_server_traces;
-afs_fs_operations;
-afs_vl_operations;
+afs_cb_break_reasons;
+afs_cell_traces;
+afs_cm_operations;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
-afs_io_errors;
+afs_estate_traces;
afs_file_errors;
-afs_flock_types;
afs_flock_operations;
-afs_cb_break_reasons;
+afs_flock_types;
+afs_fs_operations;
+afs_io_errors;
+afs_rotate_traces;
+afs_server_traces;
+afs_vl_operations;
+yfs_cm_operations;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -510,12 +540,12 @@ TRACE_EVENT(afs_receive_data,
TP_ARGS(call, iter, want_more, ret),
TP_STRUCT__entry(
- __field(loff_t, remain )
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
- __field(bool, want_more )
- __field(int, ret )
+ __field(loff_t, remain)
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
+ __field(bool, want_more)
+ __field(int, ret)
),
TP_fast_assign(
@@ -542,9 +572,9 @@ TRACE_EVENT(afs_notify_call,
TP_ARGS(rxcall, call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
),
TP_fast_assign(
@@ -564,49 +594,50 @@ TRACE_EVENT(afs_cb_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(const char *, name )
- __field(u32, op )
+ __field(unsigned int, call)
+ __field(u32, op)
+ __field(u16, service_id)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->name = call->type->name;
__entry->op = call->operation_ID;
+ __entry->service_id = call->service_id;
),
- TP_printk("c=%08x %s o=%u",
+ TP_printk("c=%08x %s",
__entry->call,
- __entry->name,
- __entry->op)
+ __entry->service_id == 2501 ?
+ __print_symbolic(__entry->op, yfs_cm_operations) :
+ __print_symbolic(__entry->op, afs_cm_operations))
);
TRACE_EVENT(afs_call,
- TP_PROTO(struct afs_call *call, enum afs_call_trace op,
- int usage, int outstanding, const void *where),
+ TP_PROTO(unsigned int call_debug_id, enum afs_call_trace op,
+ int ref, int outstanding, const void *where),
- TP_ARGS(call, op, usage, outstanding, where),
+ TP_ARGS(call_debug_id, op, ref, outstanding, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, op )
- __field(int, usage )
- __field(int, outstanding )
- __field(const void *, where )
+ __field(unsigned int, call)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, outstanding)
+ __field(const void *, where)
),
TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call_debug_id;
__entry->op = op;
- __entry->usage = usage;
+ __entry->ref = ref;
__entry->outstanding = outstanding;
__entry->where = where;
),
- TP_printk("c=%08x %s u=%d o=%d sp=%pSR",
+ TP_printk("c=%08x %s r=%d o=%d sp=%pSR",
__entry->call,
__print_symbolic(__entry->op, afs_call_traces),
- __entry->usage,
+ __entry->ref,
__entry->outstanding,
__entry->where)
);
@@ -617,9 +648,9 @@ TRACE_EVENT(afs_make_fs_call,
TP_ARGS(call, fid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -649,10 +680,10 @@ TRACE_EVENT(afs_make_fs_calli,
TP_ARGS(call, fid, i),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, i )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(unsigned int, i)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -684,10 +715,10 @@ TRACE_EVENT(afs_make_fs_call1,
TP_ARGS(call, fid, name),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -721,11 +752,11 @@ TRACE_EVENT(afs_make_fs_call2,
TP_ARGS(call, fid, name, name2),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
- __array(char, name2, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
+ __array(char, name2, 24)
),
TP_fast_assign(
@@ -762,8 +793,8 @@ TRACE_EVENT(afs_make_vl_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_vl_operation, op )
+ __field(unsigned int, call)
+ __field(enum afs_vl_operation, op)
),
TP_fast_assign(
@@ -782,10 +813,10 @@ TRACE_EVENT(afs_call_done,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(struct rxrpc_call *, rx_call )
- __field(int, ret )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(struct rxrpc_call *, rx_call)
+ __field(int, ret)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -802,65 +833,52 @@ TRACE_EVENT(afs_call_done,
__entry->rx_call)
);
-TRACE_EVENT(afs_send_pages,
- TP_PROTO(struct afs_call *call, struct msghdr *msg,
- pgoff_t first, pgoff_t last, unsigned int offset),
+TRACE_EVENT(afs_send_data,
+ TP_PROTO(struct afs_call *call, struct msghdr *msg),
- TP_ARGS(call, msg, first, last, offset),
+ TP_ARGS(call, msg),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(pgoff_t, first )
- __field(pgoff_t, last )
- __field(unsigned int, nr )
- __field(unsigned int, bytes )
- __field(unsigned int, offset )
- __field(unsigned int, flags )
+ __field(unsigned int, call)
+ __field(unsigned int, flags)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->first = first;
- __entry->last = last;
- __entry->nr = msg->msg_iter.nr_segs;
- __entry->bytes = msg->msg_iter.count;
- __entry->offset = offset;
__entry->flags = msg->msg_flags;
+ __entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset;
+ __entry->count = iov_iter_count(&msg->msg_iter);
),
- TP_printk(" c=%08x %lx-%lx-%lx b=%x o=%x f=%x",
- __entry->call,
- __entry->first, __entry->first + __entry->nr - 1, __entry->last,
- __entry->bytes, __entry->offset,
+ TP_printk(" c=%08x o=%llx n=%llx f=%x",
+ __entry->call, __entry->offset, __entry->count,
__entry->flags)
);
-TRACE_EVENT(afs_sent_pages,
- TP_PROTO(struct afs_call *call, pgoff_t first, pgoff_t last,
- pgoff_t cursor, int ret),
+TRACE_EVENT(afs_sent_data,
+ TP_PROTO(struct afs_call *call, struct msghdr *msg, int ret),
- TP_ARGS(call, first, last, cursor, ret),
+ TP_ARGS(call, msg, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(pgoff_t, first )
- __field(pgoff_t, last )
- __field(pgoff_t, cursor )
- __field(int, ret )
+ __field(unsigned int, call)
+ __field(int, ret)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->first = first;
- __entry->last = last;
- __entry->cursor = cursor;
__entry->ret = ret;
+ __entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset;
+ __entry->count = iov_iter_count(&msg->msg_iter);
),
- TP_printk(" c=%08x %lx-%lx c=%lx r=%d",
- __entry->call,
- __entry->first, __entry->last,
- __entry->cursor, __entry->ret)
+ TP_printk(" c=%08x o=%llx n=%llx r=%x",
+ __entry->call, __entry->offset, __entry->count,
+ __entry->ret)
);
TRACE_EVENT(afs_dir_check_failed,
@@ -869,9 +887,9 @@ TRACE_EVENT(afs_dir_check_failed,
TP_ARGS(vnode, off, i_size),
TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(loff_t, off )
- __field(loff_t, i_size )
+ __field(struct afs_vnode *, vnode)
+ __field(loff_t, off)
+ __field(loff_t, i_size)
),
TP_fast_assign(
@@ -884,45 +902,6 @@ TRACE_EVENT(afs_dir_check_failed,
__entry->vnode, __entry->off, __entry->i_size)
);
-/*
- * We use page->private to hold the amount of the page that we've written to,
- * splitting the field into two parts. However, we need to represent a range
- * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
- */
-#if PAGE_SIZE > 32768
-#define AFS_PRIV_MAX 0xffffffff
-#define AFS_PRIV_SHIFT 32
-#else
-#define AFS_PRIV_MAX 0xffff
-#define AFS_PRIV_SHIFT 16
-#endif
-
-TRACE_EVENT(afs_page_dirty,
- TP_PROTO(struct afs_vnode *vnode, const char *where,
- pgoff_t page, unsigned long priv),
-
- TP_ARGS(vnode, where, page, priv),
-
- TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(const char *, where )
- __field(pgoff_t, page )
- __field(unsigned long, priv )
- ),
-
- TP_fast_assign(
- __entry->vnode = vnode;
- __entry->where = where;
- __entry->page = page;
- __entry->priv = priv;
- ),
-
- TP_printk("vn=%p %lx %s %lu-%lu",
- __entry->vnode, __entry->page, __entry->where,
- __entry->priv & AFS_PRIV_MAX,
- __entry->priv >> AFS_PRIV_SHIFT)
- );
-
TRACE_EVENT(afs_call_state,
TP_PROTO(struct afs_call *call,
enum afs_call_state from,
@@ -932,11 +911,11 @@ TRACE_EVENT(afs_call_state,
TP_ARGS(call, from, to, ret, remote_abort),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, from )
- __field(enum afs_call_state, to )
- __field(int, ret )
- __field(u32, abort )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, from)
+ __field(enum afs_call_state, to)
+ __field(int, ret)
+ __field(u32, abort)
),
TP_fast_assign(
@@ -960,9 +939,9 @@ TRACE_EVENT(afs_lookup,
TP_ARGS(dvnode, name, fid),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, dfid )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field_struct(struct afs_fid, dfid)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -992,15 +971,15 @@ TRACE_EVENT(afs_edit_dir,
TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name),
TP_STRUCT__entry(
- __field(unsigned int, vnode )
- __field(unsigned int, unique )
- __field(enum afs_edit_dir_reason, why )
- __field(enum afs_edit_dir_op, op )
- __field(unsigned int, block )
- __field(unsigned short, slot )
- __field(unsigned int, f_vnode )
- __field(unsigned int, f_unique )
- __array(char, name, 24 )
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_edit_dir_reason, why)
+ __field(enum afs_edit_dir_op, op)
+ __field(unsigned int, block)
+ __field(unsigned short, slot)
+ __field(unsigned int, f_vnode)
+ __field(unsigned int, f_unique)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1033,8 +1012,8 @@ TRACE_EVENT(afs_protocol_error,
TP_ARGS(call, cause),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_eproto_cause, cause )
+ __field(unsigned int, call)
+ __field(enum afs_eproto_cause, cause)
),
TP_fast_assign(
@@ -1053,9 +1032,9 @@ TRACE_EVENT(afs_io_error,
TP_ARGS(call, error, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, error )
- __field(enum afs_io_error, where )
+ __field(unsigned int, call)
+ __field(int, error)
+ __field(enum afs_io_error, where)
),
TP_fast_assign(
@@ -1075,9 +1054,9 @@ TRACE_EVENT(afs_file_error,
TP_ARGS(vnode, error, where),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(int, error )
- __field(enum afs_file_error, where )
+ __field_struct(struct afs_fid, fid)
+ __field(int, error)
+ __field(enum afs_file_error, where)
),
TP_fast_assign(
@@ -1092,15 +1071,40 @@ TRACE_EVENT(afs_file_error,
__print_symbolic(__entry->where, afs_file_errors))
);
+TRACE_EVENT(afs_bulkstat_error,
+ TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort),
+
+ TP_ARGS(op, fid, index, abort),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, op)
+ __field(unsigned int, index)
+ __field(s32, abort)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->fid = *fid;
+ __entry->index = index;
+ __entry->abort = abort;
+ ),
+
+ TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d",
+ __entry->op, __entry->index,
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+ __entry->abort)
+ );
+
TRACE_EVENT(afs_cm_no_server,
TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
TP_ARGS(call, srx),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1119,9 +1123,9 @@ TRACE_EVENT(afs_cm_no_server_u,
TP_ARGS(call, uuid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(uuid_t, uuid )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(uuid_t, uuid)
),
TP_fast_assign(
@@ -1141,11 +1145,11 @@ TRACE_EVENT(afs_flock_ev,
TP_ARGS(vnode, fl, event, error),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_flock_event, event )
- __field(enum afs_lock_state, state )
- __field(int, error )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_flock_event, event)
+ __field(enum afs_lock_state, state)
+ __field(int, error)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1171,13 +1175,13 @@ TRACE_EVENT(afs_flock_op,
TP_ARGS(vnode, fl, op),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(loff_t, from )
- __field(loff_t, len )
- __field(enum afs_flock_operation, op )
- __field(unsigned char, type )
- __field(unsigned int, flags )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(loff_t, from)
+ __field(loff_t, len)
+ __field(enum afs_flock_operation, op)
+ __field(unsigned char, type)
+ __field(unsigned int, flags)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1185,8 +1189,8 @@ TRACE_EVENT(afs_flock_op,
__entry->from = fl->fl_start;
__entry->len = fl->fl_end - fl->fl_start + 1;
__entry->op = op;
- __entry->type = fl->fl_type;
- __entry->flags = fl->fl_flags;
+ __entry->type = fl->c.flc_type;
+ __entry->flags = fl->c.flc_flags;
__entry->debug_id = fl->fl_u.afs.debug_id;
),
@@ -1204,7 +1208,7 @@ TRACE_EVENT(afs_reload_dir,
TP_ARGS(vnode),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -1221,8 +1225,8 @@ TRACE_EVENT(afs_silly_rename,
TP_ARGS(vnode, done),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(bool, done )
+ __field_struct(struct afs_fid, fid)
+ __field(bool, done)
),
TP_fast_assign(
@@ -1241,9 +1245,9 @@ TRACE_EVENT(afs_get_tree,
TP_ARGS(cell, volume),
TP_STRUCT__entry(
- __field(u64, vid )
- __array(char, cell, 24 )
- __array(char, volume, 24 )
+ __field(u64, vid)
+ __array(char, cell, 24)
+ __array(char, volume, 24)
),
TP_fast_assign(
@@ -1261,6 +1265,30 @@ TRACE_EVENT(afs_get_tree,
__entry->cell, __entry->volume, __entry->vid)
);
+TRACE_EVENT(afs_cb_v_break,
+ TP_PROTO(afs_volid_t vid, unsigned int cb_v_break,
+ enum afs_cb_break_reason reason),
+
+ TP_ARGS(vid, cb_v_break, reason),
+
+ TP_STRUCT__entry(
+ __field(afs_volid_t, vid)
+ __field(unsigned int, cb_v_break)
+ __field(enum afs_cb_break_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->vid = vid;
+ __entry->cb_v_break = cb_v_break;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%llx vb=%x %s",
+ __entry->vid,
+ __entry->cb_v_break,
+ __print_symbolic(__entry->reason, afs_cb_break_reasons))
+ );
+
TRACE_EVENT(afs_cb_break,
TP_PROTO(struct afs_fid *fid, unsigned int cb_break,
enum afs_cb_break_reason reason, bool skipped),
@@ -1268,10 +1296,10 @@ TRACE_EVENT(afs_cb_break,
TP_ARGS(fid, cb_break, reason, skipped),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(unsigned int, cb_break )
- __field(enum afs_cb_break_reason, reason )
- __field(bool, skipped )
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, cb_break)
+ __field(enum afs_cb_break_reason, reason)
+ __field(bool, skipped)
),
TP_fast_assign(
@@ -1294,8 +1322,8 @@ TRACE_EVENT(afs_cb_miss,
TP_ARGS(fid, reason),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_cb_break_reason, reason )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_cb_break_reason, reason)
),
TP_fast_assign(
@@ -1309,20 +1337,20 @@ TRACE_EVENT(afs_cb_miss,
);
TRACE_EVENT(afs_server,
- TP_PROTO(struct afs_server *server, int ref, int active,
+ TP_PROTO(unsigned int server_debug_id, int ref, int active,
enum afs_server_trace reason),
- TP_ARGS(server, ref, active, reason),
+ TP_ARGS(server_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, server )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, server)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
- __entry->server = server->debug_id;
+ __entry->server = server_debug_id;
__entry->ref = ref;
__entry->active = active;
__entry->reason = reason;
@@ -1341,9 +1369,9 @@ TRACE_EVENT(afs_volume,
TP_ARGS(vid, ref, reason),
TP_STRUCT__entry(
- __field(afs_volid_t, vid )
- __field(int, ref )
- __field(enum afs_volume_trace, reason )
+ __field(afs_volid_t, vid)
+ __field(int, ref)
+ __field(enum afs_volume_trace, reason)
),
TP_fast_assign(
@@ -1352,12 +1380,232 @@ TRACE_EVENT(afs_volume,
__entry->reason = reason;
),
- TP_printk("V=%llx %s u=%d",
+ TP_printk("V=%llx %s ur=%d",
__entry->vid,
__print_symbolic(__entry->reason, afs_volume_traces),
__entry->ref)
);
+TRACE_EVENT(afs_cell,
+ TP_PROTO(unsigned int cell_debug_id, int ref, int active,
+ enum afs_cell_trace reason),
+
+ TP_ARGS(cell_debug_id, ref, active, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cell)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->cell = cell_debug_id;
+ __entry->ref = ref;
+ __entry->active = active;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("L=%08x %s r=%d a=%d",
+ __entry->cell,
+ __print_symbolic(__entry->reason, afs_cell_traces),
+ __entry->ref,
+ __entry->active)
+ );
+
+TRACE_EVENT(afs_alist,
+ TP_PROTO(unsigned int alist_debug_id, int ref, enum afs_alist_trace reason),
+
+ TP_ARGS(alist_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, alist)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->alist = alist_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("AL=%08x %s r=%d",
+ __entry->alist,
+ __print_symbolic(__entry->reason, afs_alist_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_estate,
+ TP_PROTO(unsigned int server_debug_id, unsigned int estate_debug_id,
+ int ref, enum afs_estate_trace reason),
+
+ TP_ARGS(server_debug_id, estate_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server_debug_id;
+ __entry->estate = estate_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("ES=%08x[%x] %s r=%d",
+ __entry->server,
+ __entry->estate,
+ __print_symbolic(__entry->reason, afs_estate_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_fs_probe,
+ TP_PROTO(struct afs_server *server, bool tx, struct afs_endpoint_state *estate,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, estate, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(bool, tx)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ struct afs_addr_list *alist = estate->addresses;
+ __entry->server = server->debug_id;
+ __entry->estate = estate->probe_seq;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("s=%08x %s pq=%x ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->estate,
+ __entry->addr_index, __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_vl_probe,
+ TP_PROTO(struct afs_vlserver *server, bool tx, struct afs_addr_list *alist,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, alist, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(bool, tx)
+ __field(unsigned short, flags)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server->debug_id;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("vl=%08x %s ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->addr_index,
+ __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_rotate,
+ TP_PROTO(struct afs_operation *op, enum afs_rotate_trace reason, unsigned int extra),
+
+ TP_ARGS(op, reason, extra),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, op)
+ __field(unsigned int, flags)
+ __field(unsigned int, extra)
+ __field(unsigned short, iteration)
+ __field(short, server_index)
+ __field(short, addr_index)
+ __field(enum afs_rotate_trace, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->flags = op->flags;
+ __entry->iteration = op->nr_iterations;
+ __entry->server_index = op->server_index;
+ __entry->addr_index = op->addr_index;
+ __entry->reason = reason;
+ __entry->extra = extra;
+ ),
+
+ TP_printk("OP=%08x it=%02x %s fl=%x sx=%d ax=%d ext=%d",
+ __entry->op,
+ __entry->iteration,
+ __print_symbolic(__entry->reason, afs_rotate_traces),
+ __entry->flags,
+ __entry->server_index,
+ __entry->addr_index,
+ __entry->extra)
+ );
+
+TRACE_EVENT(afs_make_call,
+ TP_PROTO(struct afs_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(bool, is_vl)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->op = call->operation_ID;
+ __entry->fid = call->fid;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(call->peer),
+ sizeof(__entry->srx));
+ __entry->srx.srx_service = call->service_id;
+ __entry->is_vl = (__entry->srx.srx_service == VL_SERVICE ||
+ __entry->srx.srx_service == YFS_VL_SERVICE);
+ ),
+
+ TP_printk("c=%08x %pISpc+%u %s %llx:%llx:%x",
+ __entry->call,
+ &__entry->srx.transport,
+ __entry->srx.srx_service,
+ __entry->is_vl ?
+ __print_symbolic(__entry->op, afs_vl_operations) :
+ __print_symbolic(__entry->op, afs_fs_operations),
+ __entry->fid.vid,
+ __entry->fid.vnode,
+ __entry->fid.unique)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 40c300fe704d..4eed9028bb11 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -7,6 +7,7 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
+#include <sound/jack.h>
#define DAPM_DIRECT "(direct)"
#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
@@ -16,71 +17,76 @@ struct snd_soc_card;
struct snd_soc_dapm_widget;
struct snd_soc_dapm_path;
-DECLARE_EVENT_CLASS(snd_soc_card,
+DECLARE_EVENT_CLASS(snd_soc_dapm,
- TP_PROTO(struct snd_soc_card *card, int val),
+ TP_PROTO(struct snd_soc_dapm_context *dapm, int val),
- TP_ARGS(card, val),
+ TP_ARGS(dapm, val),
TP_STRUCT__entry(
- __string( name, card->name )
- __field( int, val )
+ __string( card_name, dapm->card->name)
+ __string( comp_name, dapm->component ? dapm->component->name : "(none)")
+ __field( int, val)
),
TP_fast_assign(
- __assign_str(name, card->name);
+ __assign_str(card_name, dapm->card->name);
+ __assign_str(comp_name, dapm->component ? dapm->component->name : "(none)");
__entry->val = val;
),
- TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val)
+ TP_printk("card=%s component=%s val=%d",
+ __get_str(card_name), __get_str(comp_name), (int)__entry->val)
);
-DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start,
+DEFINE_EVENT(snd_soc_dapm, snd_soc_bias_level_start,
- TP_PROTO(struct snd_soc_card *card, int val),
+ TP_PROTO(struct snd_soc_dapm_context *dapm, int val),
- TP_ARGS(card, val)
+ TP_ARGS(dapm, val)
);
-DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done,
+DEFINE_EVENT(snd_soc_dapm, snd_soc_bias_level_done,
- TP_PROTO(struct snd_soc_card *card, int val),
+ TP_PROTO(struct snd_soc_dapm_context *dapm, int val),
- TP_ARGS(card, val)
+ TP_ARGS(dapm, val)
);
DECLARE_EVENT_CLASS(snd_soc_dapm_basic,
- TP_PROTO(struct snd_soc_card *card),
+ TP_PROTO(struct snd_soc_card *card, int event),
- TP_ARGS(card),
+ TP_ARGS(card, event),
TP_STRUCT__entry(
__string( name, card->name )
+ __field( int, event )
),
TP_fast_assign(
__assign_str(name, card->name);
+ __entry->event = event;
),
- TP_printk("card=%s", __get_str(name))
+ TP_printk("card=%s event=%d", __get_str(name), (int)__entry->event)
);
DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start,
- TP_PROTO(struct snd_soc_card *card),
+ TP_PROTO(struct snd_soc_card *card, int event),
- TP_ARGS(card)
+ TP_ARGS(card, event)
);
DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done,
- TP_PROTO(struct snd_soc_card *card),
+ TP_PROTO(struct snd_soc_card *card, int event),
- TP_ARGS(card)
+ TP_ARGS(card, event)
);
diff --git a/include/trace/events/avc.h b/include/trace/events/avc.h
new file mode 100644
index 000000000000..b55fda2e0773
--- /dev/null
+++ b/include/trace/events/avc.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Authors: Thiébaud Weksteen <tweek@google.com>
+ * Peter Enderborg <Peter.Enderborg@sony.com>
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM avc
+
+#if !defined(_TRACE_SELINUX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SELINUX_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(selinux_audited,
+
+ TP_PROTO(struct selinux_audit_data *sad,
+ char *scontext,
+ char *tcontext,
+ const char *tclass
+ ),
+
+ TP_ARGS(sad, scontext, tcontext, tclass),
+
+ TP_STRUCT__entry(
+ __field(u32, requested)
+ __field(u32, denied)
+ __field(u32, audited)
+ __field(int, result)
+ __string(scontext, scontext)
+ __string(tcontext, tcontext)
+ __string(tclass, tclass)
+ ),
+
+ TP_fast_assign(
+ __entry->requested = sad->requested;
+ __entry->denied = sad->denied;
+ __entry->audited = sad->audited;
+ __entry->result = sad->result;
+ __assign_str(tcontext, tcontext);
+ __assign_str(scontext, scontext);
+ __assign_str(tclass, tclass);
+ ),
+
+ TP_printk("requested=0x%x denied=0x%x audited=0x%x result=%d scontext=%s tcontext=%s tclass=%s",
+ __entry->requested, __entry->denied, __entry->audited, __entry->result,
+ __get_str(scontext), __get_str(tcontext), __get_str(tclass)
+ )
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 0bddea663b3b..899fdacf57b9 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(bcache_request,
__entry->sector = bio->bi_iter.bi_sector;
__entry->orig_sector = bio->bi_iter.bi_sector - 16;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u",
@@ -137,7 +137,7 @@ TRACE_EVENT(bcache_read,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
__entry->cache_hit = hit;
__entry->bypass = bypass;
),
@@ -164,11 +164,11 @@ TRACE_EVENT(bcache_write,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->sb.set_uuid, 16);
+ memcpy(__entry->uuid, c->set_uuid, 16);
__entry->inode = inode;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
__entry->writeback = writeback;
__entry->bypass = bypass;
),
@@ -200,7 +200,7 @@ DECLARE_EVENT_CLASS(cache_set,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->sb.set_uuid, 16);
+ memcpy(__entry->uuid, c->set_uuid, 16);
),
TP_printk("%pU", __entry->uuid)
@@ -238,7 +238,7 @@ TRACE_EVENT(bcache_journal_write,
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
__entry->nr_keys = keys;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u keys %u",
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 34d64ca306b1..0e128ad51460 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -12,6 +12,7 @@
#define RWBS_LEN 8
+#ifdef CONFIG_BUFFER_HEAD
DECLARE_EVENT_CLASS(block_buffer,
TP_PROTO(struct buffer_head *bh),
@@ -61,10 +62,10 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
TP_ARGS(bh)
);
+#endif /* CONFIG_BUFFER_HEAD */
/**
* block_rq_requeue - place block IO request back on a queue
- * @q: queue holding operation
* @rq: block IO operation request
*
* The block operation request @rq is being placed back into queue
@@ -73,9 +74,9 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
*/
TRACE_EVENT(block_rq_requeue,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -86,11 +87,11 @@ TRACE_EVENT(block_rq_requeue,
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
@@ -101,21 +102,9 @@ TRACE_EVENT(block_rq_requeue,
__entry->nr_sector, 0)
);
-/**
- * block_rq_complete - block IO operation completed by device driver
- * @rq: block operations request
- * @error: status code
- * @nr_bytes: number of completed bytes
- *
- * The block_rq_complete tracepoint event indicates that some portion
- * of operation request has been completed by the device driver. If
- * the @rq->bio is %NULL, then there is absolutely no additional work to
- * do for the request. If @rq->bio is non-NULL then there is
- * additional work required to complete the request.
- */
-TRACE_EVENT(block_rq_complete,
+DECLARE_EVENT_CLASS(block_rq_completion,
- TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
TP_ARGS(rq, error, nr_bytes),
@@ -123,18 +112,18 @@ TRACE_EVENT(block_rq_complete,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __field( int, error )
+ __field( int , error )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
- __entry->error = error;
+ __entry->error = blk_status_to_errno(error);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
@@ -145,11 +134,46 @@ TRACE_EVENT(block_rq_complete,
__entry->nr_sector, __entry->error)
);
+/**
+ * block_rq_complete - block IO operation completed by device driver
+ * @rq: block operations request
+ * @error: status code
+ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_complete tracepoint event indicates that some portion
+ * of operation request has been completed by the device driver. If
+ * the @rq->bio is %NULL, then there is absolutely no additional work to
+ * do for the request. If @rq->bio is non-NULL then there is
+ * additional work required to complete the request.
+ */
+DEFINE_EVENT(block_rq_completion, block_rq_complete,
+
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
+
+ TP_ARGS(rq, error, nr_bytes)
+);
+
+/**
+ * block_rq_error - block IO operation error reported by device driver
+ * @rq: block operations request
+ * @error: status code
+ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_error tracepoint event indicates that some portion
+ * of operation request has failed as reported by the device driver.
+ */
+DEFINE_EVENT(block_rq_completion, block_rq_error,
+
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
+
+ TP_ARGS(rq, error, nr_bytes)
+);
+
DECLARE_EVENT_CLASS(block_rq,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -162,12 +186,12 @@ DECLARE_EVENT_CLASS(block_rq,
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
__entry->bytes = blk_rq_bytes(rq);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -181,7 +205,6 @@ DECLARE_EVENT_CLASS(block_rq,
/**
* block_rq_insert - insert block operation request into queue
- * @q: target queue
* @rq: block IO operation request
*
* Called immediately before block operation request @rq is inserted
@@ -191,78 +214,63 @@ DECLARE_EVENT_CLASS(block_rq,
*/
DEFINE_EVENT(block_rq, block_rq_insert,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(rq)
);
/**
* block_rq_issue - issue pending block IO request operation to device driver
- * @q: queue holding operation
- * @rq: block IO operation operation request
+ * @rq: block IO operation request
*
* Called when block operation request @rq from queue @q is sent to a
* device driver for processing.
*/
DEFINE_EVENT(block_rq, block_rq_issue,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(rq)
);
/**
* block_rq_merge - merge request with another one in the elevator
- * @q: queue holding operation
- * @rq: block IO operation operation request
+ * @rq: block IO operation request
*
* Called when block operation request @rq from queue @q is merged to another
* request queued in the elevator.
*/
DEFINE_EVENT(block_rq, block_rq_merge,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(rq)
);
/**
- * block_bio_bounce - used bounce buffer when processing block operation
- * @q: queue holding the block operation
- * @bio: block operation
+ * block_io_start - insert a request for execution
+ * @rq: block IO operation request
*
- * A bounce buffer was used to handle the block operation @bio in @q.
- * This occurs when hardware limitations prevent a direct transfer of
- * data between the @bio data memory area and the IO device. Use of a
- * bounce buffer requires extra copying of data and decreases
- * performance.
+ * Called when block operation request @rq is queued for execution
*/
-TRACE_EVENT(block_bio_bounce,
+DEFINE_EVENT(block_rq, block_io_start,
- TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, bio),
+ TP_ARGS(rq)
+);
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
+/**
+ * block_io_done - block IO operation request completed
+ * @rq: block IO operation request
+ *
+ * Called when block operation request @rq is completed
+ */
+DEFINE_EVENT(block_rq, block_io_done,
- TP_fast_assign(
- __entry->dev = bio_dev(bio);
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
+ TP_PROTO(struct request *rq),
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+ TP_ARGS(rq)
);
/**
@@ -292,7 +300,7 @@ TRACE_EVENT(block_bio_complete,
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
__entry->error = blk_status_to_errno(bio->bi_status);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u [%d]",
@@ -301,11 +309,11 @@ TRACE_EVENT(block_bio_complete,
__entry->nr_sector, __entry->error)
);
-DECLARE_EVENT_CLASS(block_bio_merge,
+DECLARE_EVENT_CLASS(block_bio,
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
+ TP_PROTO(struct bio *bio),
- TP_ARGS(q, rq, bio),
+ TP_ARGS(bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -319,7 +327,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -330,133 +338,62 @@ DECLARE_EVENT_CLASS(block_bio_merge,
);
/**
+ * block_bio_bounce - used bounce buffer when processing block operation
+ * @bio: block operation
+ *
+ * A bounce buffer was used to handle the block operation @bio in @q.
+ * This occurs when hardware limitations prevent a direct transfer of
+ * data between the @bio data memory area and the IO device. Use of a
+ * bounce buffer requires extra copying of data and decreases
+ * performance.
+ */
+DEFINE_EVENT(block_bio, block_bio_bounce,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+/**
* block_bio_backmerge - merging block operation to the end of an existing operation
- * @q: queue holding operation
- * @rq: request bio is being merged into
* @bio: new block operation to merge
*
- * Merging block request @bio to the end of an existing block request
- * in queue @q.
+ * Merging block request @bio to the end of an existing block request.
*/
-DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
-
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
-
- TP_ARGS(q, rq, bio)
+DEFINE_EVENT(block_bio, block_bio_backmerge,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_bio_frontmerge - merging block operation to the beginning of an existing operation
- * @q: queue holding operation
- * @rq: request bio is being merged into
* @bio: new block operation to merge
*
- * Merging block IO operation @bio to the beginning of an existing block
- * operation in queue @q.
+ * Merging block IO operation @bio to the beginning of an existing block request.
*/
-DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
-
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
-
- TP_ARGS(q, rq, bio)
+DEFINE_EVENT(block_bio, block_bio_frontmerge,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_bio_queue - putting new block IO operation in queue
- * @q: queue holding operation
* @bio: new block operation
*
* About to place the block IO operation @bio into queue @q.
*/
-TRACE_EVENT(block_bio_queue,
-
- TP_PROTO(struct request_queue *q, struct bio *bio),
-
- TP_ARGS(q, bio),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
-
- TP_fast_assign(
- __entry->dev = bio_dev(bio);
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
-
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
-);
-
-DECLARE_EVENT_CLASS(block_get_rq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
-
- TP_fast_assign(
- __entry->dev = bio ? bio_dev(bio) : 0;
- __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
- __entry->nr_sector = bio ? bio_sectors(bio) : 0;
- blk_fill_rwbs(__entry->rwbs,
- bio ? bio->bi_opf : 0, __entry->nr_sector);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
-
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+DEFINE_EVENT(block_bio, block_bio_queue,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_getrq - get a free request entry in queue for block IO operations
- * @q: queue for operations
- * @bio: pending block IO operation (can be %NULL)
- * @rw: low bit indicates a read (%0) or a write (%1)
- *
- * A request struct for queue @q has been allocated to handle the
- * block IO operation @bio.
- */
-DEFINE_EVENT(block_get_rq, block_getrq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw)
-);
-
-/**
- * block_sleeprq - waiting to get a free request entry in queue for block IO operation
- * @q: queue for operation
* @bio: pending block IO operation (can be %NULL)
- * @rw: low bit indicates a read (%0) or a write (%1)
*
- * In the case where a request struct cannot be provided for queue @q
- * the process needs to wait for an request struct to become
- * available. This tracepoint event is generated each time the
- * process goes to sleep waiting for request struct become available.
+ * A request struct has been allocated to handle the block IO operation @bio.
*/
-DEFINE_EVENT(block_get_rq, block_sleeprq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw)
+DEFINE_EVENT(block_bio, block_getrq,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
@@ -521,21 +458,19 @@ DEFINE_EVENT(block_unplug, block_unplug,
/**
* block_split - split a single bio struct into two bio structs
- * @q: queue containing the bio
* @bio: block operation being split
* @new_sector: The starting sector for the new bio
*
- * The bio request @bio in request queue @q needs to be split into two
- * bio requests. The newly created @bio request starts at
- * @new_sector. This split may be required due to hardware limitation
- * such as operation crossing device boundaries in a RAID system.
+ * The bio request @bio needs to be split into two bio requests. The newly
+ * created @bio request starts at @new_sector. This split may be required due to
+ * hardware limitations such as operation crossing device boundaries in a RAID
+ * system.
*/
TRACE_EVENT(block_split,
- TP_PROTO(struct request_queue *q, struct bio *bio,
- unsigned int new_sector),
+ TP_PROTO(struct bio *bio, unsigned int new_sector),
- TP_ARGS(q, bio, new_sector),
+ TP_ARGS(bio, new_sector),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -549,7 +484,7 @@ TRACE_EVENT(block_split,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->new_sector = new_sector;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -562,9 +497,8 @@ TRACE_EVENT(block_split,
/**
* block_bio_remap - map request for a logical device to the raw device
- * @q: queue holding the operation
* @bio: revised operation
- * @dev: device for the operation
+ * @dev: original device for the operation
* @from: original sector for the operation
*
* An operation for a logical device has been mapped to the
@@ -572,10 +506,9 @@ TRACE_EVENT(block_split,
*/
TRACE_EVENT(block_bio_remap,
- TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
- sector_t from),
+ TP_PROTO(struct bio *bio, dev_t dev, sector_t from),
- TP_ARGS(q, bio, dev, from),
+ TP_ARGS(bio, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -592,7 +525,7 @@ TRACE_EVENT(block_bio_remap,
__entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev;
__entry->old_sector = from;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
@@ -605,7 +538,6 @@ TRACE_EVENT(block_bio_remap,
/**
* block_rq_remap - map request for a block operation request
- * @q: queue holding the operation
* @rq: block IO operation request
* @dev: device for the operation
* @from: original sector for the operation
@@ -616,10 +548,9 @@ TRACE_EVENT(block_bio_remap,
*/
TRACE_EVENT(block_rq_remap,
- TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
- sector_t from),
+ TP_PROTO(struct request *rq, dev_t dev, sector_t from),
- TP_ARGS(q, rq, dev, from),
+ TP_ARGS(rq, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -632,13 +563,13 @@ TRACE_EVENT(block_rq_remap,
),
TP_fast_assign(
- __entry->dev = disk_devt(rq->rq_disk);
+ __entry->dev = disk_devt(rq->q->disk);
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = blk_rq_sectors(rq);
__entry->old_dev = dev;
__entry->old_sector = from;
__entry->nr_bios = blk_rq_count_bios(rq);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 6b200059c2c5..a6b3a4e409f0 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -122,6 +122,64 @@ TRACE_EVENT(br_fdb_update,
__entry->flags)
);
+TRACE_EVENT(br_mdb_full,
+
+ TP_PROTO(const struct net_device *dev,
+ const struct br_ip *group),
+
+ TP_ARGS(dev, group),
+
+ TP_STRUCT__entry(
+ __string(dev, dev->name)
+ __field(int, af)
+ __field(u16, vid)
+ __array(__u8, src, 16)
+ __array(__u8, grp, 16)
+ __array(__u8, grpmac, ETH_ALEN) /* For af == 0. */
+ ),
+
+ TP_fast_assign(
+ struct in6_addr *in6;
+
+ __assign_str(dev, dev->name);
+ __entry->vid = group->vid;
+
+ if (!group->proto) {
+ __entry->af = 0;
+
+ memset(__entry->src, 0, sizeof(__entry->src));
+ memset(__entry->grp, 0, sizeof(__entry->grp));
+ memcpy(__entry->grpmac, group->dst.mac_addr, ETH_ALEN);
+ } else if (group->proto == htons(ETH_P_IP)) {
+ __entry->af = AF_INET;
+
+ in6 = (struct in6_addr *)__entry->src;
+ ipv6_addr_set_v4mapped(group->src.ip4, in6);
+
+ in6 = (struct in6_addr *)__entry->grp;
+ ipv6_addr_set_v4mapped(group->dst.ip4, in6);
+
+ memset(__entry->grpmac, 0, ETH_ALEN);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ __entry->af = AF_INET6;
+
+ in6 = (struct in6_addr *)__entry->src;
+ *in6 = group->src.ip6;
+
+ in6 = (struct in6_addr *)__entry->grp;
+ *in6 = group->dst.ip6;
+
+ memset(__entry->grpmac, 0, ETH_ALEN);
+#endif
+ }
+ ),
+
+ TP_printk("dev %s af %u src %pI6c grp %pI6c/%pM vid %u",
+ __get_str(dev), __entry->af, __entry->src, __entry->grp,
+ __entry->grpmac, __entry->vid)
+);
#endif /* _TRACE_BRIDGE_H */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 863335ecb7e8..90b0222390e5 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -21,21 +21,23 @@ struct btrfs_delayed_data_ref;
struct btrfs_delayed_ref_head;
struct btrfs_block_group;
struct btrfs_free_cluster;
-struct map_lookup;
+struct btrfs_chunk_map;
struct extent_buffer;
struct btrfs_work;
-struct __btrfs_workqueue;
+struct btrfs_workqueue;
struct btrfs_qgroup_extent_record;
struct btrfs_qgroup;
struct extent_io_tree;
struct prelim_ref;
struct btrfs_space_info;
+struct btrfs_raid_bio;
+struct raid56_bio_trace_info;
+struct find_free_extent_ctl;
#define show_ref_type(type) \
__print_symbolic(type, \
{ BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
{ BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \
- { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \
{ BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \
{ BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
@@ -53,6 +55,7 @@ struct btrfs_space_info;
{ BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
{ BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" }, \
{ BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" }, \
+ { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },\
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
#define show_root_type(obj) \
@@ -79,8 +82,8 @@ struct btrfs_space_info;
#define IO_TREE_OWNER \
EM( IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS") \
EM( IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS") \
+ EM( IO_TREE_BTREE_INODE_IO, "BTREE_INODE_IO") \
EM( IO_TREE_INODE_IO, "INODE_IO") \
- EM( IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE") \
EM( IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS") \
EM( IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES") \
EM( IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES") \
@@ -93,8 +96,9 @@ struct btrfs_space_info;
EM( FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS") \
EM( FLUSH_DELALLOC, "FLUSH_DELALLOC") \
EM( FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT") \
+ EM( FLUSH_DELALLOC_FULL, "FLUSH_DELALLOC_FULL") \
EM( FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR") \
- EM( FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS") \
+ EM( FLUSH_DELAYED_REFS, "FLUSH_DELAYED_REFS") \
EM( ALLOC_CHUNK, "ALLOC_CHUNK") \
EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \
EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \
@@ -149,7 +153,6 @@ FLUSH_STATES
{ EXTENT_NODATASUM, "NODATASUM"}, \
{ EXTENT_CLEAR_META_RESV, "CLEAR_META_RESV"}, \
{ EXTENT_NEED_WAIT, "NEED_WAIT"}, \
- { EXTENT_DAMAGED, "DAMAGED"}, \
{ EXTENT_NORESERVE, "NORESERVE"}, \
{ EXTENT_QGROUP_RESERVED, "QGROUP_RESERVED"}, \
{ EXTENT_CLEAR_DATA_RESV, "CLEAR_DATA_RESV"}, \
@@ -180,18 +183,18 @@ FLUSH_STATES
TRACE_EVENT(btrfs_transaction_commit,
- TP_PROTO(const struct btrfs_root *root),
+ TP_PROTO(const struct btrfs_fs_info *fs_info),
- TP_ARGS(root),
+ TP_ARGS(fs_info),
TP_STRUCT__entry_btrfs(
__field( u64, generation )
__field( u64, root_objectid )
),
- TP_fast_assign_btrfs(root->fs_info,
- __entry->generation = root->fs_info->generation;
- __entry->root_objectid = root->root_key.objectid;
+ TP_fast_assign_btrfs(fs_info,
+ __entry->generation = fs_info->generation;
+ __entry->root_objectid = BTRFS_ROOT_TREE_OBJECTID;
),
TP_printk_btrfs("root=%llu(%s) gen=%llu",
@@ -262,20 +265,20 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
__print_symbolic_u64(type, \
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
{ EXTENT_MAP_HOLE, "HOLE" }, \
- { EXTENT_MAP_INLINE, "INLINE" }, \
- { EXTENT_MAP_DELALLOC, "DELALLOC" })
+ { EXTENT_MAP_INLINE, "INLINE" })
#define show_map_type(type) \
type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
#define show_map_flags(flag) \
__print_flags(flag, "|", \
- { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
- { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
- { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
- { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
- { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
- { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
+ { EXTENT_FLAG_PINNED, "PINNED" },\
+ { EXTENT_FLAG_COMPRESS_ZLIB, "COMPRESS_ZLIB" },\
+ { EXTENT_FLAG_COMPRESS_LZO, "COMPRESS_LZO" },\
+ { EXTENT_FLAG_COMPRESS_ZSTD, "COMPRESS_ZSTD" },\
+ { EXTENT_FLAG_PREALLOC, "PREALLOC" },\
+ { EXTENT_FLAG_LOGGING, "LOGGING" },\
+ { EXTENT_FLAG_FILLING, "FILLING" })
TRACE_EVENT_CONDITION(btrfs_get_extent,
@@ -294,9 +297,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__field( u64, orig_start )
__field( u64, block_start )
__field( u64, block_len )
- __field( unsigned long, flags )
+ __field( u32, flags )
__field( int, refs )
- __field( unsigned int, compress_type )
),
TP_fast_assign_btrfs(root->fs_info,
@@ -309,13 +311,11 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->block_len = map->block_len;
__entry->flags = map->flags;
__entry->refs = refcount_read(&map->refs);
- __entry->compress_type = map->compress_type;
),
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
"orig_start=%llu block_start=%llu(%s) "
- "block_len=%llu flags=%s refs=%u "
- "compress_type=%u",
+ "block_len=%llu flags=%s refs=%u",
show_root_type(__entry->root_objectid),
__entry->ino,
__entry->start,
@@ -324,7 +324,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
show_map_type(__entry->block_start),
__entry->block_len,
show_map_flags(__entry->flags),
- __entry->refs, __entry->compress_type)
+ __entry->refs)
);
TRACE_EVENT(btrfs_handle_em_exist,
@@ -498,19 +498,20 @@ DEFINE_EVENT(
#define show_ordered_flags(flags) \
__print_flags(flags, "|", \
- { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \
- { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \
+ { (1 << BTRFS_ORDERED_REGULAR), "REGULAR" }, \
{ (1 << BTRFS_ORDERED_NOCOW), "NOCOW" }, \
- { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \
{ (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \
+ { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \
{ (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
+ { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \
+ { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \
{ (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
{ (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
DECLARE_EVENT_CLASS(btrfs__ordered_extent,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered),
@@ -529,8 +530,8 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__field( u64, truncated_len )
),
- TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->ino = btrfs_ino(BTRFS_I(inode));
+ TP_fast_assign_btrfs(inode->root->fs_info,
+ __entry->ino = btrfs_ino(inode);
__entry->file_offset = ordered->file_offset;
__entry->start = ordered->disk_bytenr;
__entry->len = ordered->num_bytes;
@@ -539,8 +540,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->flags = ordered->flags;
__entry->compress_type = ordered->compress_type;
__entry->refs = refcount_read(&ordered->refs);
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = inode->root->root_key.objectid;
__entry->truncated_len = ordered->truncated_len;
),
@@ -563,7 +563,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
@@ -571,7 +571,7 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
@@ -579,7 +579,7 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
@@ -587,12 +587,105 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
- TP_PROTO(const struct inode *inode,
+ TP_PROTO(const struct btrfs_inode *inode,
const struct btrfs_ordered_extent *ordered),
TP_ARGS(inode, ordered)
);
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_range,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first_range,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_for_logging,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_split,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_dec_test_pending,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_mark_finished,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+TRACE_EVENT(btrfs_finish_ordered_extent,
+
+ TP_PROTO(const struct btrfs_inode *inode, u64 start, u64 len,
+ bool uptodate),
+
+ TP_ARGS(inode, start, len, uptodate),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, ino )
+ __field( u64, start )
+ __field( u64, len )
+ __field( bool, uptodate )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign_btrfs(inode->root->fs_info,
+ __entry->ino = btrfs_ino(inode);
+ __entry->start = start;
+ __entry->len = len;
+ __entry->uptodate = uptodate;
+ __entry->root_objectid = inode->root->root_key.objectid;
+ ),
+
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu uptodate=%d",
+ show_root_type(__entry->root_objectid),
+ __entry->ino, __entry->start,
+ __entry->len, !!__entry->uptodate)
+);
+
DECLARE_EVENT_CLASS(btrfs__writepage,
TP_PROTO(const struct page *page, const struct inode *inode,
@@ -652,34 +745,30 @@ DEFINE_EVENT(btrfs__writepage, __extent_writepage,
TRACE_EVENT(btrfs_writepage_end_io_hook,
- TP_PROTO(const struct page *page, u64 start, u64 end, int uptodate),
+ TP_PROTO(const struct btrfs_inode *inode, u64 start, u64 end,
+ int uptodate),
- TP_ARGS(page, start, end, uptodate),
+ TP_ARGS(inode, start, end, uptodate),
TP_STRUCT__entry_btrfs(
__field( u64, ino )
- __field( unsigned long, index )
__field( u64, start )
__field( u64, end )
__field( int, uptodate )
__field( u64, root_objectid )
),
- TP_fast_assign_btrfs(btrfs_sb(page->mapping->host->i_sb),
- __entry->ino = btrfs_ino(BTRFS_I(page->mapping->host));
- __entry->index = page->index;
+ TP_fast_assign_btrfs(inode->root->fs_info,
+ __entry->ino = btrfs_ino(inode);
__entry->start = start;
__entry->end = end;
__entry->uptodate = uptodate;
- __entry->root_objectid =
- BTRFS_I(page->mapping->host)->root->root_key.objectid;
+ __entry->root_objectid = inode->root->root_key.objectid;
),
- TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu "
- "end=%llu uptodate=%d",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu end=%llu uptodate=%d",
show_root_type(__entry->root_objectid),
- __entry->ino, __entry->index,
- __entry->start,
+ __entry->ino, __entry->start,
__entry->end, __entry->uptodate)
);
@@ -969,7 +1058,7 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
DECLARE_EVENT_CLASS(btrfs__chunk,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size),
@@ -1003,7 +1092,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -1011,7 +1100,7 @@ DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -1095,7 +1184,7 @@ TRACE_EVENT(btrfs_trigger_flush,
__entry->flags = flags;
__entry->bytes = bytes;
__entry->flush = flush;
- __assign_str(reason, reason)
+ __assign_str(reason, reason);
),
TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
@@ -1111,15 +1200,16 @@ TRACE_EVENT(btrfs_trigger_flush,
TRACE_EVENT(btrfs_flush_space,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 num_bytes,
- int state, int ret),
+ int state, int ret, bool for_preempt),
- TP_ARGS(fs_info, flags, num_bytes, state, ret),
+ TP_ARGS(fs_info, flags, num_bytes, state, ret, for_preempt),
TP_STRUCT__entry_btrfs(
__field( u64, flags )
__field( u64, num_bytes )
__field( int, state )
__field( int, ret )
+ __field( bool, for_preempt )
),
TP_fast_assign_btrfs(fs_info,
@@ -1127,15 +1217,16 @@ TRACE_EVENT(btrfs_flush_space,
__entry->num_bytes = num_bytes;
__entry->state = state;
__entry->ret = ret;
+ __entry->for_preempt = for_preempt;
),
- TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d",
+ TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d for_preempt=%d",
__entry->state,
__print_symbolic(__entry->state, FLUSH_STATES),
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS),
- __entry->num_bytes, __entry->ret)
+ __entry->num_bytes, __entry->ret, __entry->for_preempt)
);
DECLARE_EVENT_CLASS(btrfs__reserved_extent,
@@ -1176,74 +1267,156 @@ DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
TRACE_EVENT(find_free_extent,
- TP_PROTO(const struct btrfs_fs_info *fs_info, u64 num_bytes,
- u64 empty_size, u64 data),
+ TP_PROTO(const struct btrfs_root *root,
+ const struct find_free_extent_ctl *ffe_ctl),
- TP_ARGS(fs_info, num_bytes, empty_size, data),
+ TP_ARGS(root, ffe_ctl),
TP_STRUCT__entry_btrfs(
+ __field( u64, root_objectid )
__field( u64, num_bytes )
__field( u64, empty_size )
- __field( u64, data )
+ __field( u64, flags )
),
- TP_fast_assign_btrfs(fs_info,
- __entry->num_bytes = num_bytes;
- __entry->empty_size = empty_size;
- __entry->data = data;
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->root_objectid = root->root_key.objectid;
+ __entry->num_bytes = ffe_ctl->num_bytes;
+ __entry->empty_size = ffe_ctl->empty_size;
+ __entry->flags = ffe_ctl->flags;
),
TP_printk_btrfs("root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s)",
- show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
- __entry->num_bytes, __entry->empty_size, __entry->data,
- __print_flags((unsigned long)__entry->data, "|",
+ show_root_type(__entry->root_objectid),
+ __entry->num_bytes, __entry->empty_size, __entry->flags,
+ __print_flags((unsigned long)__entry->flags, "|",
+ BTRFS_GROUP_FLAGS))
+);
+
+TRACE_EVENT(find_free_extent_search_loop,
+
+ TP_PROTO(const struct btrfs_root *root,
+ const struct find_free_extent_ctl *ffe_ctl),
+
+ TP_ARGS(root, ffe_ctl),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_objectid )
+ __field( u64, num_bytes )
+ __field( u64, empty_size )
+ __field( u64, flags )
+ __field( u64, loop )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->root_objectid = root->root_key.objectid;
+ __entry->num_bytes = ffe_ctl->num_bytes;
+ __entry->empty_size = ffe_ctl->empty_size;
+ __entry->flags = ffe_ctl->flags;
+ __entry->loop = ffe_ctl->loop;
+ ),
+
+ TP_printk_btrfs("root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s) loop=%llu",
+ show_root_type(__entry->root_objectid),
+ __entry->num_bytes, __entry->empty_size, __entry->flags,
+ __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS),
+ __entry->loop)
+);
+
+TRACE_EVENT(find_free_extent_have_block_group,
+
+ TP_PROTO(const struct btrfs_root *root,
+ const struct find_free_extent_ctl *ffe_ctl,
+ const struct btrfs_block_group *block_group),
+
+ TP_ARGS(root, ffe_ctl, block_group),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_objectid )
+ __field( u64, num_bytes )
+ __field( u64, empty_size )
+ __field( u64, flags )
+ __field( u64, loop )
+ __field( bool, hinted )
+ __field( u64, bg_start )
+ __field( u64, bg_flags )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->root_objectid = root->root_key.objectid;
+ __entry->num_bytes = ffe_ctl->num_bytes;
+ __entry->empty_size = ffe_ctl->empty_size;
+ __entry->flags = ffe_ctl->flags;
+ __entry->loop = ffe_ctl->loop;
+ __entry->hinted = ffe_ctl->hinted;
+ __entry->bg_start = block_group->start;
+ __entry->bg_flags = block_group->flags;
+ ),
+
+ TP_printk_btrfs(
+"root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s) loop=%llu hinted=%d block_group=%llu bg_flags=%llu(%s)",
+ show_root_type(__entry->root_objectid),
+ __entry->num_bytes, __entry->empty_size, __entry->flags,
+ __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS),
+ __entry->loop, __entry->hinted,
+ __entry->bg_start, __entry->bg_flags,
+ __print_flags((unsigned long)__entry->bg_flags, "|",
BTRFS_GROUP_FLAGS))
);
DECLARE_EVENT_CLASS(btrfs__reserve_extent,
- TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
- u64 len),
+ TP_PROTO(const struct btrfs_block_group *block_group,
+ const struct find_free_extent_ctl *ffe_ctl),
- TP_ARGS(block_group, start, len),
+ TP_ARGS(block_group, ffe_ctl),
TP_STRUCT__entry_btrfs(
__field( u64, bg_objectid )
__field( u64, flags )
+ __field( int, bg_size_class )
__field( u64, start )
__field( u64, len )
+ __field( u64, loop )
+ __field( bool, hinted )
+ __field( int, size_class )
),
TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
- __entry->start = start;
- __entry->len = len;
+ __entry->bg_size_class = block_group->size_class;
+ __entry->start = ffe_ctl->search_start;
+ __entry->len = ffe_ctl->num_bytes;
+ __entry->loop = ffe_ctl->loop;
+ __entry->hinted = ffe_ctl->hinted;
+ __entry->size_class = ffe_ctl->size_class;
),
- TP_printk_btrfs("root=%llu(%s) block_group=%llu flags=%llu(%s) "
- "start=%llu len=%llu",
+ TP_printk_btrfs(
+"root=%llu(%s) block_group=%llu flags=%llu(%s) bg_size_class=%d start=%llu len=%llu loop=%llu hinted=%d size_class=%d",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
"|", BTRFS_GROUP_FLAGS),
- __entry->start, __entry->len)
+ __entry->bg_size_class, __entry->start, __entry->len,
+ __entry->loop, __entry->hinted, __entry->size_class)
);
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
- TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
- u64 len),
+ TP_PROTO(const struct btrfs_block_group *block_group,
+ const struct find_free_extent_ctl *ffe_ctl),
- TP_ARGS(block_group, start, len)
+ TP_ARGS(block_group, ffe_ctl)
);
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
- TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
- u64 len),
+ TP_PROTO(const struct btrfs_block_group *block_group,
+ const struct find_free_extent_ctl *ffe_ctl),
- TP_ARGS(block_group, start, len)
+ TP_ARGS(block_group, ffe_ctl)
);
TRACE_EVENT(btrfs_find_cluster,
@@ -1341,13 +1514,13 @@ TRACE_EVENT(alloc_extent_state,
TP_STRUCT__entry(
__field(const struct extent_state *, state)
- __field(gfp_t, mask)
+ __field(unsigned long, mask)
__field(const void*, ip)
),
TP_fast_assign(
__entry->state = state,
- __entry->mask = mask,
+ __entry->mask = (__force unsigned long)mask,
__entry->ip = (const void *)IP
),
@@ -1385,7 +1558,6 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( const void *, wq )
__field( const void *, func )
__field( const void *, ordered_func )
- __field( const void *, ordered_free )
__field( const void *, normal_work )
),
@@ -1394,14 +1566,12 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->wq = work->wq;
__entry->func = work->func;
__entry->ordered_func = work->ordered_func;
- __entry->ordered_free = work->ordered_free;
__entry->normal_work = &work->normal_work;
),
- TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p "
- "ordered_free=%p",
+ TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p",
__entry->work, __entry->normal_work, __entry->wq,
- __entry->func, __entry->ordered_func, __entry->ordered_free)
+ __entry->func, __entry->ordered_func)
);
/*
@@ -1454,42 +1624,36 @@ DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
TP_ARGS(work)
);
-DECLARE_EVENT_CLASS(btrfs__workqueue,
+DECLARE_EVENT_CLASS(btrfs_workqueue,
- TP_PROTO(const struct __btrfs_workqueue *wq,
- const char *name, int high),
+ TP_PROTO(const struct btrfs_workqueue *wq, const char *name),
- TP_ARGS(wq, name, high),
+ TP_ARGS(wq, name),
TP_STRUCT__entry_btrfs(
__field( const void *, wq )
__string( name, name )
- __field( int , high )
),
TP_fast_assign_btrfs(btrfs_workqueue_owner(wq),
__entry->wq = wq;
__assign_str(name, name);
- __entry->high = high;
),
- TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
- __print_flags(__entry->high, "",
- {(WQ_HIGHPRI), "-high"}),
+ TP_printk_btrfs("name=%s wq=%p", __get_str(name),
__entry->wq)
);
-DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc,
+DEFINE_EVENT(btrfs_workqueue, btrfs_workqueue_alloc,
- TP_PROTO(const struct __btrfs_workqueue *wq,
- const char *name, int high),
+ TP_PROTO(const struct btrfs_workqueue *wq, const char *name),
- TP_ARGS(wq, name, high)
+ TP_ARGS(wq, name)
);
-DECLARE_EVENT_CLASS(btrfs__workqueue_done,
+DECLARE_EVENT_CLASS(btrfs_workqueue_done,
- TP_PROTO(const struct __btrfs_workqueue *wq),
+ TP_PROTO(const struct btrfs_workqueue *wq),
TP_ARGS(wq),
@@ -1504,9 +1668,9 @@ DECLARE_EVENT_CLASS(btrfs__workqueue_done,
TP_printk_btrfs("wq=%p", __entry->wq)
);
-DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
+DEFINE_EVENT(btrfs_workqueue_done, btrfs_workqueue_destroy,
- TP_PROTO(const struct __btrfs_workqueue *wq),
+ TP_PROTO(const struct btrfs_workqueue *wq),
TP_ARGS(wq)
);
@@ -1840,25 +2004,27 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert,
);
TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
- TP_PROTO(const struct btrfs_root *root, u64 ino, int mod),
+ TP_PROTO(const struct btrfs_root *root, u64 ino, int mod, unsigned outstanding),
- TP_ARGS(root, ino, mod),
+ TP_ARGS(root, ino, mod, outstanding),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, ino )
__field( int, mod )
+ __field( unsigned, outstanding )
),
TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
__entry->ino = ino;
__entry->mod = mod;
+ __entry->outstanding = outstanding;
),
- TP_printk_btrfs("root=%llu(%s) ino=%llu mod=%d",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu mod=%d outstanding=%u",
show_root_type(__entry->root_objectid),
- __entry->ino, __entry->mod)
+ __entry->ino, __entry->mod, __entry->outstanding)
);
DECLARE_EVENT_CLASS(btrfs__block_group,
@@ -1897,6 +2063,18 @@ DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group,
TP_ARGS(bg_cache)
);
+DEFINE_EVENT(btrfs__block_group, btrfs_add_reclaim_block_group,
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
+
+ TP_ARGS(bg_cache)
+);
+
+DEFINE_EVENT(btrfs__block_group, btrfs_reclaim_block_group,
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
+
+ TP_ARGS(bg_cache)
+);
+
DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group,
TP_PROTO(const struct btrfs_block_group *bg_cache),
@@ -1918,18 +2096,12 @@ TRACE_EVENT(btrfs_set_extent_bit,
__field( unsigned, set_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->private_data) {
- const struct inode *inode = tree->private_data;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->rootid =
- BTRFS_I(inode)->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -1957,18 +2129,12 @@ TRACE_EVENT(btrfs_clear_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->private_data) {
- const struct inode *inode = tree->private_data;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->rootid =
- BTRFS_I(inode)->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->clear_bits = clear_bits;
@@ -1997,18 +2163,12 @@ TRACE_EVENT(btrfs_convert_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->private_data) {
- const struct inode *inode = tree->private_data;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->rootid =
- BTRFS_I(inode)->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2023,6 +2183,109 @@ TRACE_EVENT(btrfs_convert_extent_bit,
__print_flags(__entry->clear_bits, "|", EXTENT_FLAGS))
);
+DECLARE_EVENT_CLASS(btrfs_dump_space_info,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo),
+
+ TP_ARGS(fs_info, sinfo),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, flags )
+ __field( u64, total_bytes )
+ __field( u64, bytes_used )
+ __field( u64, bytes_pinned )
+ __field( u64, bytes_reserved )
+ __field( u64, bytes_may_use )
+ __field( u64, bytes_readonly )
+ __field( u64, reclaim_size )
+ __field( int, clamp )
+ __field( u64, global_reserved )
+ __field( u64, trans_reserved )
+ __field( u64, delayed_refs_reserved )
+ __field( u64, delayed_reserved )
+ __field( u64, free_chunk_space )
+ __field( u64, delalloc_bytes )
+ __field( u64, ordered_bytes )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->flags = sinfo->flags;
+ __entry->total_bytes = sinfo->total_bytes;
+ __entry->bytes_used = sinfo->bytes_used;
+ __entry->bytes_pinned = sinfo->bytes_pinned;
+ __entry->bytes_reserved = sinfo->bytes_reserved;
+ __entry->bytes_may_use = sinfo->bytes_may_use;
+ __entry->bytes_readonly = sinfo->bytes_readonly;
+ __entry->reclaim_size = sinfo->reclaim_size;
+ __entry->clamp = sinfo->clamp;
+ __entry->global_reserved = fs_info->global_block_rsv.reserved;
+ __entry->trans_reserved = fs_info->trans_block_rsv.reserved;
+ __entry->delayed_refs_reserved = fs_info->delayed_refs_rsv.reserved;
+ __entry->delayed_reserved = fs_info->delayed_block_rsv.reserved;
+ __entry->free_chunk_space = atomic64_read(&fs_info->free_chunk_space);
+ __entry->delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
+ __entry->ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
+ ),
+
+ TP_printk_btrfs("flags=%s total_bytes=%llu bytes_used=%llu "
+ "bytes_pinned=%llu bytes_reserved=%llu "
+ "bytes_may_use=%llu bytes_readonly=%llu "
+ "reclaim_size=%llu clamp=%d global_reserved=%llu "
+ "trans_reserved=%llu delayed_refs_reserved=%llu "
+ "delayed_reserved=%llu chunk_free_space=%llu "
+ "delalloc_bytes=%llu ordered_bytes=%llu",
+ __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS),
+ __entry->total_bytes, __entry->bytes_used,
+ __entry->bytes_pinned, __entry->bytes_reserved,
+ __entry->bytes_may_use, __entry->bytes_readonly,
+ __entry->reclaim_size, __entry->clamp,
+ __entry->global_reserved, __entry->trans_reserved,
+ __entry->delayed_refs_reserved,
+ __entry->delayed_reserved, __entry->free_chunk_space,
+ __entry->delalloc_bytes, __entry->ordered_bytes)
+);
+
+DEFINE_EVENT(btrfs_dump_space_info, btrfs_done_preemptive_reclaim,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo),
+ TP_ARGS(fs_info, sinfo)
+);
+
+DEFINE_EVENT(btrfs_dump_space_info, btrfs_fail_all_tickets,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo),
+ TP_ARGS(fs_info, sinfo)
+);
+
+TRACE_EVENT(btrfs_reserve_ticket,
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes,
+ u64 start_ns, int flush, int error),
+
+ TP_ARGS(fs_info, flags, bytes, start_ns, flush, error),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, flags )
+ __field( u64, bytes )
+ __field( u64, start_ns )
+ __field( int, flush )
+ __field( int, error )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->flags = flags;
+ __entry->bytes = bytes;
+ __entry->start_ns = start_ns;
+ __entry->flush = flush;
+ __entry->error = error;
+ ),
+
+ TP_printk_btrfs("flags=%s bytes=%llu start_ns=%llu flush=%s error=%d",
+ __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS),
+ __entry->bytes, __entry->start_ns,
+ __print_symbolic(__entry->flush, FLUSH_ACTIONS),
+ __entry->error)
+);
+
DECLARE_EVENT_CLASS(btrfs_sleep_tree_lock,
TP_PROTO(const struct extent_buffer *eb, u64 start_ns),
@@ -2146,6 +2409,149 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
TP_ARGS(fs_info, sinfo, old, diff)
);
+DECLARE_EVENT_CLASS(btrfs_raid56_bio,
+
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+ const struct bio *bio,
+ const struct raid56_bio_trace_info *trace_info),
+
+ TP_ARGS(rbio, bio, trace_info),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, full_stripe )
+ __field( u64, physical )
+ __field( u64, devid )
+ __field( u32, offset )
+ __field( u32, len )
+ __field( u8, opf )
+ __field( u8, total_stripes )
+ __field( u8, real_stripes )
+ __field( u8, nr_data )
+ __field( u8, stripe_nr )
+ ),
+
+ TP_fast_assign_btrfs(rbio->bioc->fs_info,
+ __entry->full_stripe = rbio->bioc->full_stripe_logical;
+ __entry->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ __entry->len = bio->bi_iter.bi_size;
+ __entry->opf = bio_op(bio);
+ __entry->devid = trace_info->devid;
+ __entry->offset = trace_info->offset;
+ __entry->stripe_nr = trace_info->stripe_nr;
+ __entry->total_stripes = rbio->bioc->num_stripes;
+ __entry->real_stripes = rbio->real_stripes;
+ __entry->nr_data = rbio->nr_data;
+ ),
+ /*
+ * For type output, we need to output things like "DATA1"
+ * (the first data stripe), "DATA2" (the second data stripe),
+ * "PQ1" (P stripe),"PQ2" (Q stripe), "REPLACE0" (replace target device).
+ */
+ TP_printk_btrfs(
+"full_stripe=%llu devid=%lld type=%s%d offset=%d opf=0x%x physical=%llu len=%u",
+ __entry->full_stripe, __entry->devid,
+ (__entry->stripe_nr < __entry->nr_data) ? "DATA" :
+ ((__entry->stripe_nr < __entry->real_stripes) ? "PQ" :
+ "REPLACE"),
+ (__entry->stripe_nr < __entry->nr_data) ?
+ (__entry->stripe_nr + 1) :
+ ((__entry->stripe_nr < __entry->real_stripes) ?
+ (__entry->stripe_nr - __entry->nr_data + 1) : 0),
+ __entry->offset, __entry->opf, __entry->physical, __entry->len)
+);
+
+DEFINE_EVENT(btrfs_raid56_bio, raid56_read,
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+ const struct bio *bio,
+ const struct raid56_bio_trace_info *trace_info),
+
+ TP_ARGS(rbio, bio, trace_info)
+);
+
+DEFINE_EVENT(btrfs_raid56_bio, raid56_write,
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+ const struct bio *bio,
+ const struct raid56_bio_trace_info *trace_info),
+
+ TP_ARGS(rbio, bio, trace_info)
+);
+
+TRACE_EVENT(btrfs_insert_one_raid_extent,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
+ int num_stripes),
+
+ TP_ARGS(fs_info, logical, length, num_stripes),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, logical )
+ __field( u64, length )
+ __field( int, num_stripes )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->logical = logical;
+ __entry->length = length;
+ __entry->num_stripes = num_stripes;
+ ),
+
+ TP_printk_btrfs("logical=%llu length=%llu num_stripes=%d",
+ __entry->logical, __entry->length,
+ __entry->num_stripes)
+);
+
+TRACE_EVENT(btrfs_raid_extent_delete,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 end,
+ u64 found_start, u64 found_end),
+
+ TP_ARGS(fs_info, start, end, found_start, found_end),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, start )
+ __field( u64, end )
+ __field( u64, found_start )
+ __field( u64, found_end )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->start = start;
+ __entry->end = end;
+ __entry->found_start = found_start;
+ __entry->found_end = found_end;
+ ),
+
+ TP_printk_btrfs("start=%llu end=%llu found_start=%llu found_end=%llu",
+ __entry->start, __entry->end, __entry->found_start,
+ __entry->found_end)
+);
+
+TRACE_EVENT(btrfs_get_raid_extent_offset,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
+ u64 physical, u64 devid),
+
+ TP_ARGS(fs_info, logical, length, physical, devid),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, logical )
+ __field( u64, length )
+ __field( u64, physical )
+ __field( u64, devid )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->logical = logical;
+ __entry->length = length;
+ __entry->physical = physical;
+ __entry->devid = devid;
+ ),
+
+ TP_printk_btrfs("logical=%llu length=%llu physical=%llu devid=%llu",
+ __entry->logical, __entry->length, __entry->physical,
+ __entry->devid)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index 5d9de24cb9c0..cf4b98b9a9ed 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* CacheFiles tracepoints
*
- * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#undef TRACE_SYSTEM
@@ -19,9 +19,86 @@
#define __CACHEFILES_DECLARE_TRACE_ENUMS_ONCE_ONLY
enum cachefiles_obj_ref_trace {
- cachefiles_obj_put_wait_retry = fscache_obj_ref__nr_traces,
- cachefiles_obj_put_wait_timeo,
- cachefiles_obj_ref__nr_traces
+ cachefiles_obj_get_ioreq,
+ cachefiles_obj_new,
+ cachefiles_obj_put_alloc_fail,
+ cachefiles_obj_put_detach,
+ cachefiles_obj_put_ioreq,
+ cachefiles_obj_see_clean_commit,
+ cachefiles_obj_see_clean_delete,
+ cachefiles_obj_see_clean_drop_tmp,
+ cachefiles_obj_see_lookup_cookie,
+ cachefiles_obj_see_lookup_failed,
+ cachefiles_obj_see_withdraw_cookie,
+ cachefiles_obj_see_withdrawal,
+ cachefiles_obj_get_ondemand_fd,
+ cachefiles_obj_put_ondemand_fd,
+};
+
+enum fscache_why_object_killed {
+ FSCACHE_OBJECT_IS_STALE,
+ FSCACHE_OBJECT_IS_WEIRD,
+ FSCACHE_OBJECT_INVALIDATED,
+ FSCACHE_OBJECT_NO_SPACE,
+ FSCACHE_OBJECT_WAS_RETIRED,
+ FSCACHE_OBJECT_WAS_CULLED,
+ FSCACHE_VOLUME_IS_WEIRD,
+};
+
+enum cachefiles_coherency_trace {
+ cachefiles_coherency_check_aux,
+ cachefiles_coherency_check_content,
+ cachefiles_coherency_check_dirty,
+ cachefiles_coherency_check_len,
+ cachefiles_coherency_check_objsize,
+ cachefiles_coherency_check_ok,
+ cachefiles_coherency_check_type,
+ cachefiles_coherency_check_xattr,
+ cachefiles_coherency_set_fail,
+ cachefiles_coherency_set_ok,
+ cachefiles_coherency_vol_check_cmp,
+ cachefiles_coherency_vol_check_ok,
+ cachefiles_coherency_vol_check_resv,
+ cachefiles_coherency_vol_check_xattr,
+ cachefiles_coherency_vol_set_fail,
+ cachefiles_coherency_vol_set_ok,
+};
+
+enum cachefiles_trunc_trace {
+ cachefiles_trunc_dio_adjust,
+ cachefiles_trunc_expand_tmpfile,
+ cachefiles_trunc_shrink,
+};
+
+enum cachefiles_prepare_read_trace {
+ cachefiles_trace_read_after_eof,
+ cachefiles_trace_read_found_hole,
+ cachefiles_trace_read_found_part,
+ cachefiles_trace_read_have_data,
+ cachefiles_trace_read_no_data,
+ cachefiles_trace_read_no_file,
+ cachefiles_trace_read_seek_error,
+ cachefiles_trace_read_seek_nxio,
+};
+
+enum cachefiles_error_trace {
+ cachefiles_trace_fallocate_error,
+ cachefiles_trace_getxattr_error,
+ cachefiles_trace_link_error,
+ cachefiles_trace_lookup_error,
+ cachefiles_trace_mkdir_error,
+ cachefiles_trace_notify_change_error,
+ cachefiles_trace_open_error,
+ cachefiles_trace_read_error,
+ cachefiles_trace_remxattr_error,
+ cachefiles_trace_rename_error,
+ cachefiles_trace_seek_error,
+ cachefiles_trace_setxattr_error,
+ cachefiles_trace_statfs_error,
+ cachefiles_trace_tmpfile_error,
+ cachefiles_trace_trunc_error,
+ cachefiles_trace_unlink_error,
+ cachefiles_trace_write_error,
};
#endif
@@ -31,21 +108,79 @@ enum cachefiles_obj_ref_trace {
*/
#define cachefiles_obj_kill_traces \
EM(FSCACHE_OBJECT_IS_STALE, "stale") \
+ EM(FSCACHE_OBJECT_IS_WEIRD, "weird") \
+ EM(FSCACHE_OBJECT_INVALIDATED, "inval") \
EM(FSCACHE_OBJECT_NO_SPACE, "no_space") \
EM(FSCACHE_OBJECT_WAS_RETIRED, "was_retired") \
- E_(FSCACHE_OBJECT_WAS_CULLED, "was_culled")
+ EM(FSCACHE_OBJECT_WAS_CULLED, "was_culled") \
+ E_(FSCACHE_VOLUME_IS_WEIRD, "volume_weird")
#define cachefiles_obj_ref_traces \
- EM(fscache_obj_get_add_to_deps, "GET add_to_deps") \
- EM(fscache_obj_get_queue, "GET queue") \
- EM(fscache_obj_put_alloc_fail, "PUT alloc_fail") \
- EM(fscache_obj_put_attach_fail, "PUT attach_fail") \
- EM(fscache_obj_put_drop_obj, "PUT drop_obj") \
- EM(fscache_obj_put_enq_dep, "PUT enq_dep") \
- EM(fscache_obj_put_queue, "PUT queue") \
- EM(fscache_obj_put_work, "PUT work") \
- EM(cachefiles_obj_put_wait_retry, "PUT wait_retry") \
- E_(cachefiles_obj_put_wait_timeo, "PUT wait_timeo")
+ EM(cachefiles_obj_get_ioreq, "GET ioreq") \
+ EM(cachefiles_obj_new, "NEW obj") \
+ EM(cachefiles_obj_put_alloc_fail, "PUT alloc_fail") \
+ EM(cachefiles_obj_put_detach, "PUT detach") \
+ EM(cachefiles_obj_put_ioreq, "PUT ioreq") \
+ EM(cachefiles_obj_see_clean_commit, "SEE clean_commit") \
+ EM(cachefiles_obj_see_clean_delete, "SEE clean_delete") \
+ EM(cachefiles_obj_see_clean_drop_tmp, "SEE clean_drop_tmp") \
+ EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
+ EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
+ EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
+ E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
+
+#define cachefiles_coherency_traces \
+ EM(cachefiles_coherency_check_aux, "BAD aux ") \
+ EM(cachefiles_coherency_check_content, "BAD cont") \
+ EM(cachefiles_coherency_check_dirty, "BAD dirt") \
+ EM(cachefiles_coherency_check_len, "BAD len ") \
+ EM(cachefiles_coherency_check_objsize, "BAD osiz") \
+ EM(cachefiles_coherency_check_ok, "OK ") \
+ EM(cachefiles_coherency_check_type, "BAD type") \
+ EM(cachefiles_coherency_check_xattr, "BAD xatt") \
+ EM(cachefiles_coherency_set_fail, "SET fail") \
+ EM(cachefiles_coherency_set_ok, "SET ok ") \
+ EM(cachefiles_coherency_vol_check_cmp, "VOL BAD cmp ") \
+ EM(cachefiles_coherency_vol_check_ok, "VOL OK ") \
+ EM(cachefiles_coherency_vol_check_resv, "VOL BAD resv") \
+ EM(cachefiles_coherency_vol_check_xattr,"VOL BAD xatt") \
+ EM(cachefiles_coherency_vol_set_fail, "VOL SET fail") \
+ E_(cachefiles_coherency_vol_set_ok, "VOL SET ok ")
+
+#define cachefiles_trunc_traces \
+ EM(cachefiles_trunc_dio_adjust, "DIOADJ") \
+ EM(cachefiles_trunc_expand_tmpfile, "EXPTMP") \
+ E_(cachefiles_trunc_shrink, "SHRINK")
+
+#define cachefiles_prepare_read_traces \
+ EM(cachefiles_trace_read_after_eof, "after-eof ") \
+ EM(cachefiles_trace_read_found_hole, "found-hole") \
+ EM(cachefiles_trace_read_found_part, "found-part") \
+ EM(cachefiles_trace_read_have_data, "have-data ") \
+ EM(cachefiles_trace_read_no_data, "no-data ") \
+ EM(cachefiles_trace_read_no_file, "no-file ") \
+ EM(cachefiles_trace_read_seek_error, "seek-error") \
+ E_(cachefiles_trace_read_seek_nxio, "seek-enxio")
+
+#define cachefiles_error_traces \
+ EM(cachefiles_trace_fallocate_error, "fallocate") \
+ EM(cachefiles_trace_getxattr_error, "getxattr") \
+ EM(cachefiles_trace_link_error, "link") \
+ EM(cachefiles_trace_lookup_error, "lookup") \
+ EM(cachefiles_trace_mkdir_error, "mkdir") \
+ EM(cachefiles_trace_notify_change_error, "notify_change") \
+ EM(cachefiles_trace_open_error, "open") \
+ EM(cachefiles_trace_read_error, "read") \
+ EM(cachefiles_trace_remxattr_error, "remxattr") \
+ EM(cachefiles_trace_rename_error, "rename") \
+ EM(cachefiles_trace_seek_error, "seek") \
+ EM(cachefiles_trace_setxattr_error, "setxattr") \
+ EM(cachefiles_trace_statfs_error, "statfs") \
+ EM(cachefiles_trace_tmpfile_error, "tmpfile") \
+ EM(cachefiles_trace_trunc_error, "trunc") \
+ EM(cachefiles_trace_unlink_error, "unlink") \
+ E_(cachefiles_trace_write_error, "write")
+
/*
* Export enum symbols via userspace.
@@ -57,6 +192,10 @@ enum cachefiles_obj_ref_trace {
cachefiles_obj_kill_traces;
cachefiles_obj_ref_traces;
+cachefiles_coherency_traces;
+cachefiles_trunc_traces;
+cachefiles_prepare_read_traces;
+cachefiles_error_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -69,250 +208,644 @@ cachefiles_obj_ref_traces;
TRACE_EVENT(cachefiles_ref,
- TP_PROTO(struct cachefiles_object *obj,
- struct fscache_cookie *cookie,
- enum cachefiles_obj_ref_trace why,
- int usage),
+ TP_PROTO(unsigned int object_debug_id,
+ unsigned int cookie_debug_id,
+ int usage,
+ enum cachefiles_obj_ref_trace why),
- TP_ARGS(obj, cookie, why, usage),
+ TP_ARGS(object_debug_id, cookie_debug_id, usage, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct fscache_cookie *, cookie )
+ __field(unsigned int, obj )
+ __field(unsigned int, cookie )
__field(enum cachefiles_obj_ref_trace, why )
__field(int, usage )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->cookie = cookie;
+ __entry->obj = object_debug_id;
+ __entry->cookie = cookie_debug_id;
__entry->usage = usage;
__entry->why = why;
),
- TP_printk("c=%p o=%p u=%d %s",
+ TP_printk("c=%08x o=%08x u=%d %s",
__entry->cookie, __entry->obj, __entry->usage,
__print_symbolic(__entry->why, cachefiles_obj_ref_traces))
);
TRACE_EVENT(cachefiles_lookup,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- struct inode *inode),
+ struct dentry *dir,
+ struct dentry *de),
- TP_ARGS(obj, de, inode),
+ TP_ARGS(obj, dir, de),
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(struct inode *, inode )
+ __field(unsigned int, obj )
+ __field(short, error )
+ __field(unsigned long, dino )
+ __field(unsigned long, ino )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->inode = inode;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->dino = d_backing_inode(dir)->i_ino;
+ __entry->ino = (!IS_ERR(de) && d_backing_inode(de) ?
+ d_backing_inode(de)->i_ino : 0);
+ __entry->error = IS_ERR(de) ? PTR_ERR(de) : 0;
),
- TP_printk("o=%p d=%p i=%p",
- __entry->obj, __entry->de, __entry->inode)
+ TP_printk("o=%08x dB=%lx B=%lx e=%d",
+ __entry->obj, __entry->dino, __entry->ino, __entry->error)
);
TRACE_EVENT(cachefiles_mkdir,
- TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de, int ret),
+ TP_PROTO(struct dentry *dir, struct dentry *subdir),
- TP_ARGS(obj, de, ret),
+ TP_ARGS(dir, subdir),
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(int, ret )
+ __field(unsigned int, dir )
+ __field(unsigned int, subdir )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->ret = ret;
+ __entry->dir = d_backing_inode(dir)->i_ino;
+ __entry->subdir = d_backing_inode(subdir)->i_ino;
),
- TP_printk("o=%p d=%p r=%u",
- __entry->obj, __entry->de, __entry->ret)
+ TP_printk("dB=%x sB=%x",
+ __entry->dir,
+ __entry->subdir)
);
-TRACE_EVENT(cachefiles_create,
- TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de, int ret),
+TRACE_EVENT(cachefiles_tmpfile,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer),
- TP_ARGS(obj, de, ret),
+ TP_ARGS(obj, backer),
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(int, ret )
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->ret = ret;
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
),
- TP_printk("o=%p d=%p r=%u",
- __entry->obj, __entry->de, __entry->ret)
+ TP_printk("o=%08x B=%x",
+ __entry->obj,
+ __entry->backer)
+ );
+
+TRACE_EVENT(cachefiles_link,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer),
+
+ TP_ARGS(obj, backer),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ ),
+
+ TP_printk("o=%08x B=%x",
+ __entry->obj,
+ __entry->backer)
);
TRACE_EVENT(cachefiles_unlink,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
+ ino_t ino,
enum fscache_why_object_killed why),
- TP_ARGS(obj, de, why),
+ TP_ARGS(obj, ino, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
+ __field(unsigned int, obj )
+ __field(unsigned int, ino )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
+ __entry->obj = obj ? obj->debug_id : UINT_MAX;
+ __entry->ino = ino;
__entry->why = why;
),
- TP_printk("o=%p d=%p w=%s",
- __entry->obj, __entry->de,
+ TP_printk("o=%08x B=%x w=%s",
+ __entry->obj, __entry->ino,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
TRACE_EVENT(cachefiles_rename,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- struct dentry *to,
+ ino_t ino,
enum fscache_why_object_killed why),
- TP_ARGS(obj, de, to, why),
+ TP_ARGS(obj, ino, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(struct dentry *, to )
+ __field(unsigned int, obj )
+ __field(unsigned int, ino )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->to = to;
+ __entry->obj = obj ? obj->debug_id : UINT_MAX;
+ __entry->ino = ino;
__entry->why = why;
),
- TP_printk("o=%p d=%p t=%p w=%s",
- __entry->obj, __entry->de, __entry->to,
+ TP_printk("o=%08x B=%x w=%s",
+ __entry->obj, __entry->ino,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
-TRACE_EVENT(cachefiles_mark_active,
+TRACE_EVENT(cachefiles_coherency,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de),
+ ino_t ino,
+ enum cachefiles_content content,
+ enum cachefiles_coherency_trace why),
- TP_ARGS(obj, de),
+ TP_ARGS(obj, ino, content, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
+ __field(unsigned int, obj )
+ __field(enum cachefiles_coherency_trace, why )
+ __field(enum cachefiles_content, content )
+ __field(u64, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->why = why;
+ __entry->content = content;
+ __entry->ino = ino;
+ ),
+
+ TP_printk("o=%08x %s B=%llx c=%u",
+ __entry->obj,
+ __print_symbolic(__entry->why, cachefiles_coherency_traces),
+ __entry->ino,
+ __entry->content)
+ );
+
+TRACE_EVENT(cachefiles_vol_coherency,
+ TP_PROTO(struct cachefiles_volume *volume,
+ ino_t ino,
+ enum cachefiles_coherency_trace why),
+
+ TP_ARGS(volume, ino, why),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(unsigned int, vol )
+ __field(enum cachefiles_coherency_trace, why )
+ __field(u64, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->vol = volume->vcookie->debug_id;
+ __entry->why = why;
+ __entry->ino = ino;
+ ),
+
+ TP_printk("V=%08x %s B=%llx",
+ __entry->vol,
+ __print_symbolic(__entry->why, cachefiles_coherency_traces),
+ __entry->ino)
+ );
+
+TRACE_EVENT(cachefiles_prep_read,
+ TP_PROTO(struct cachefiles_object *obj,
+ loff_t start,
+ size_t len,
+ unsigned short flags,
+ enum netfs_io_source source,
+ enum cachefiles_prepare_read_trace why,
+ ino_t cache_inode, ino_t netfs_inode),
+
+ TP_ARGS(obj, start, len, flags, source, why, cache_inode, netfs_inode),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned short, flags )
+ __field(enum netfs_io_source, source )
+ __field(enum cachefiles_prepare_read_trace, why )
+ __field(size_t, len )
+ __field(loff_t, start )
+ __field(unsigned int, netfs_inode )
+ __field(unsigned int, cache_inode )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->flags = flags;
+ __entry->source = source;
+ __entry->why = why;
+ __entry->len = len;
+ __entry->start = start;
+ __entry->netfs_inode = netfs_inode;
+ __entry->cache_inode = cache_inode;
+ ),
+
+ TP_printk("o=%08x %s %s f=%02x s=%llx %zx ni=%x B=%x",
+ __entry->obj,
+ __print_symbolic(__entry->source, netfs_sreq_sources),
+ __print_symbolic(__entry->why, cachefiles_prepare_read_traces),
+ __entry->flags,
+ __entry->start, __entry->len,
+ __entry->netfs_inode, __entry->cache_inode)
+ );
+
+TRACE_EVENT(cachefiles_read,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct inode *backer,
+ loff_t start,
+ size_t len),
+
+ TP_ARGS(obj, backer, start, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(size_t, len )
+ __field(loff_t, start )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ __entry->start = start;
+ __entry->len = len;
),
- TP_printk("o=%p d=%p",
- __entry->obj, __entry->de)
+ TP_printk("o=%08x B=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->backer,
+ __entry->start,
+ __entry->len)
);
-TRACE_EVENT(cachefiles_wait_active,
+TRACE_EVENT(cachefiles_write,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- struct cachefiles_object *xobj),
+ struct inode *backer,
+ loff_t start,
+ size_t len),
- TP_ARGS(obj, de, xobj),
+ TP_ARGS(obj, backer, start, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(size_t, len )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("o=%08x B=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->backer,
+ __entry->start,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_trunc,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ loff_t from, loff_t to, enum cachefiles_trunc_trace why),
+
+ TP_ARGS(obj, backer, from, to, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(enum cachefiles_trunc_trace, why )
+ __field(loff_t, from )
+ __field(loff_t, to )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ __entry->from = from;
+ __entry->to = to;
+ __entry->why = why;
+ ),
+
+ TP_printk("o=%08x B=%x %s l=%llx->%llx",
+ __entry->obj,
+ __entry->backer,
+ __print_symbolic(__entry->why, cachefiles_trunc_traces),
+ __entry->from,
+ __entry->to)
+ );
+
+TRACE_EVENT(cachefiles_mark_active,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct inode *inode),
+
+ TP_ARGS(obj, inode),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(struct cachefiles_object *, xobj )
- __field(u16, flags )
- __field(u16, fsc_flags )
+ __field(unsigned int, obj )
+ __field(ino_t, inode )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->xobj = xobj;
- __entry->flags = xobj->flags;
- __entry->fsc_flags = xobj->fscache.flags;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->inode = inode->i_ino;
),
- TP_printk("o=%p d=%p wo=%p wf=%x wff=%x",
- __entry->obj, __entry->de, __entry->xobj,
- __entry->flags, __entry->fsc_flags)
+ TP_printk("o=%08x B=%lx",
+ __entry->obj, __entry->inode)
);
-TRACE_EVENT(cachefiles_mark_inactive,
+TRACE_EVENT(cachefiles_mark_failed,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
struct inode *inode),
- TP_ARGS(obj, de, inode),
+ TP_ARGS(obj, inode),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(struct inode *, inode )
+ __field(unsigned int, obj )
+ __field(ino_t, inode )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->inode = inode;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->inode = inode->i_ino;
),
- TP_printk("o=%p d=%p i=%p",
- __entry->obj, __entry->de, __entry->inode)
+ TP_printk("o=%08x B=%lx",
+ __entry->obj, __entry->inode)
);
-TRACE_EVENT(cachefiles_mark_buried,
+TRACE_EVENT(cachefiles_mark_inactive,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- enum fscache_why_object_killed why),
+ struct inode *inode),
- TP_ARGS(obj, de, why),
+ TP_ARGS(obj, inode),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(struct cachefiles_object *, obj )
- __field(struct dentry *, de )
- __field(enum fscache_why_object_killed, why )
+ __field(unsigned int, obj )
+ __field(ino_t, inode )
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->de = de;
- __entry->why = why;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->inode = inode->i_ino;
),
- TP_printk("o=%p d=%p w=%s",
- __entry->obj, __entry->de,
- __print_symbolic(__entry->why, cachefiles_obj_kill_traces))
+ TP_printk("o=%08x B=%lx",
+ __entry->obj, __entry->inode)
+ );
+
+TRACE_EVENT(cachefiles_vfs_error,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ int error, enum cachefiles_error_trace where),
+
+ TP_ARGS(obj, backer, error, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(enum cachefiles_error_trace, where )
+ __field(short, error )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->backer = backer->i_ino;
+ __entry->error = error;
+ __entry->where = where;
+ ),
+
+ TP_printk("o=%08x B=%x %s e=%d",
+ __entry->obj,
+ __entry->backer,
+ __print_symbolic(__entry->where, cachefiles_error_traces),
+ __entry->error)
+ );
+
+TRACE_EVENT(cachefiles_io_error,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ int error, enum cachefiles_error_trace where),
+
+ TP_ARGS(obj, backer, error, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(enum cachefiles_error_trace, where )
+ __field(short, error )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->backer = backer->i_ino;
+ __entry->error = error;
+ __entry->where = where;
+ ),
+
+ TP_printk("o=%08x B=%x %s e=%d",
+ __entry->obj,
+ __entry->backer,
+ __print_symbolic(__entry->where, cachefiles_error_traces),
+ __entry->error)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_open,
+ TP_PROTO(struct cachefiles_object *obj, struct cachefiles_msg *msg,
+ struct cachefiles_open *load),
+
+ TP_ARGS(obj, msg, load),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ __field(unsigned int, object_id )
+ __field(unsigned int, fd )
+ __field(unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg->msg_id;
+ __entry->object_id = msg->object_id;
+ __entry->fd = load->fd;
+ __entry->flags = load->flags;
+ ),
+
+ TP_printk("o=%08x mid=%x oid=%x fd=%d f=%x",
+ __entry->obj,
+ __entry->msg_id,
+ __entry->object_id,
+ __entry->fd,
+ __entry->flags)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_copen,
+ TP_PROTO(struct cachefiles_object *obj, unsigned int msg_id,
+ long len),
+
+ TP_ARGS(obj, msg_id, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ __field(long, len )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg_id;
+ __entry->len = len;
+ ),
+
+ TP_printk("o=%08x mid=%x l=%lx",
+ __entry->obj,
+ __entry->msg_id,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_close,
+ TP_PROTO(struct cachefiles_object *obj, struct cachefiles_msg *msg),
+
+ TP_ARGS(obj, msg),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ __field(unsigned int, object_id )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg->msg_id;
+ __entry->object_id = msg->object_id;
+ ),
+
+ TP_printk("o=%08x mid=%x oid=%x",
+ __entry->obj,
+ __entry->msg_id,
+ __entry->object_id)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_read,
+ TP_PROTO(struct cachefiles_object *obj, struct cachefiles_msg *msg,
+ struct cachefiles_read *load),
+
+ TP_ARGS(obj, msg, load),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ __field(unsigned int, object_id )
+ __field(loff_t, start )
+ __field(size_t, len )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg->msg_id;
+ __entry->object_id = msg->object_id;
+ __entry->start = load->off;
+ __entry->len = load->len;
+ ),
+
+ TP_printk("o=%08x mid=%x oid=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->msg_id,
+ __entry->object_id,
+ __entry->start,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_cread,
+ TP_PROTO(struct cachefiles_object *obj, unsigned int msg_id),
+
+ TP_ARGS(obj, msg_id),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, msg_id )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->msg_id = msg_id;
+ ),
+
+ TP_printk("o=%08x mid=%x",
+ __entry->obj,
+ __entry->msg_id)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_fd_write,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ loff_t start, size_t len),
+
+ TP_ARGS(obj, backer, start, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(loff_t, start )
+ __field(size_t, len )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->backer = backer->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("o=%08x iB=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->backer,
+ __entry->start,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_ondemand_fd_release,
+ TP_PROTO(struct cachefiles_object *obj, int object_id),
+
+ TP_ARGS(obj, object_id),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, object_id )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->object_id = object_id;
+ ),
+
+ TP_printk("o=%08x oid=%x",
+ __entry->obj,
+ __entry->object_id)
);
#endif /* _TRACE_CACHEFILES_H */
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index 7f42a3de59e6..dd7d7c9efecd 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -59,8 +59,8 @@ DECLARE_EVENT_CLASS(cgroup,
TP_STRUCT__entry(
__field( int, root )
- __field( int, id )
__field( int, level )
+ __field( u64, id )
__string( path, path )
),
@@ -71,7 +71,7 @@ DECLARE_EVENT_CLASS(cgroup,
__assign_str(path, path);
),
- TP_printk("root=%d id=%d level=%d path=%s",
+ TP_printk("root=%d id=%llu level=%d path=%s",
__entry->root, __entry->id, __entry->level, __get_str(path))
);
@@ -126,8 +126,8 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
TP_STRUCT__entry(
__field( int, dst_root )
- __field( int, dst_id )
__field( int, dst_level )
+ __field( u64, dst_id )
__field( int, pid )
__string( dst_path, path )
__string( comm, task->comm )
@@ -142,7 +142,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
__assign_str(comm, task->comm);
),
- TP_printk("dst_root=%d dst_id=%d dst_level=%d dst_path=%s pid=%d comm=%s",
+ TP_printk("dst_root=%d dst_id=%llu dst_level=%d dst_path=%s pid=%d comm=%s",
__entry->dst_root, __entry->dst_id, __entry->dst_level,
__get_str(dst_path), __entry->pid, __get_str(comm))
);
@@ -171,8 +171,8 @@ DECLARE_EVENT_CLASS(cgroup_event,
TP_STRUCT__entry(
__field( int, root )
- __field( int, id )
__field( int, level )
+ __field( u64, id )
__string( path, path )
__field( int, val )
),
@@ -185,7 +185,7 @@ DECLARE_EVENT_CLASS(cgroup_event,
__entry->val = val;
),
- TP_printk("root=%d id=%d level=%d path=%s val=%d",
+ TP_printk("root=%d id=%llu level=%d path=%s val=%d",
__entry->root, __entry->id, __entry->level, __get_str(path),
__entry->val)
);
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index cb1aea25c199..daed3c7a48c1 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -118,6 +118,50 @@ DEFINE_EVENT(clk_rate, clk_set_rate_complete,
TP_ARGS(core, rate)
);
+DEFINE_EVENT(clk_rate, clk_set_min_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DEFINE_EVENT(clk_rate, clk_set_max_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DECLARE_EVENT_CLASS(clk_rate_range,
+
+ TP_PROTO(struct clk_core *core, unsigned long min, unsigned long max),
+
+ TP_ARGS(core, min, max),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field(unsigned long, min )
+ __field(unsigned long, max )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->min = min;
+ __entry->max = max;
+ ),
+
+ TP_printk("%s min %lu max %lu", __get_str(name),
+ (unsigned long)__entry->min,
+ (unsigned long)__entry->max)
+);
+
+DEFINE_EVENT(clk_rate_range, clk_set_rate_range,
+
+ TP_PROTO(struct clk_core *core, unsigned long min, unsigned long max),
+
+ TP_ARGS(core, min, max)
+);
+
DECLARE_EVENT_CLASS(clk_parent,
TP_PROTO(struct clk_core *core, struct clk_core *parent),
@@ -220,6 +264,49 @@ DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle_complete,
TP_ARGS(core, duty)
);
+DECLARE_EVENT_CLASS(clk_rate_request,
+
+ TP_PROTO(struct clk_rate_request *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __string( name, req->core ? req->core->name : "none")
+ __string( pname, req->best_parent_hw ? clk_hw_get_name(req->best_parent_hw) : "none" )
+ __field(unsigned long, min )
+ __field(unsigned long, max )
+ __field(unsigned long, prate )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, req->core ? req->core->name : "none");
+ __assign_str(pname, req->best_parent_hw ? clk_hw_get_name(req->best_parent_hw) : "none");
+ __entry->min = req->min_rate;
+ __entry->max = req->max_rate;
+ __entry->prate = req->best_parent_rate;
+ ),
+
+ TP_printk("%s min %lu max %lu, parent %s (%lu)", __get_str(name),
+ (unsigned long)__entry->min,
+ (unsigned long)__entry->max,
+ __get_str(pname),
+ (unsigned long)__entry->prate)
+);
+
+DEFINE_EVENT(clk_rate_request, clk_rate_request_start,
+
+ TP_PROTO(struct clk_rate_request *req),
+
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(clk_rate_request, clk_rate_request_done,
+
+ TP_PROTO(struct clk_rate_request *req),
+
+ TP_ARGS(req)
+);
+
#endif /* _TRACE_CLK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index 5017a8829270..25103e67737c 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -8,57 +8,121 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-TRACE_EVENT(cma_alloc,
+TRACE_EVENT(cma_release,
- TP_PROTO(unsigned long pfn, const struct page *page,
- unsigned int count, unsigned int align),
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
+ unsigned long count),
- TP_ARGS(pfn, page, count, align),
+ TP_ARGS(name, pfn, page, count),
TP_STRUCT__entry(
+ __string(name, name)
__field(unsigned long, pfn)
__field(const struct page *, page)
- __field(unsigned int, count)
+ __field(unsigned long, count)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->pfn = pfn;
+ __entry->page = page;
+ __entry->count = count;
+ ),
+
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu",
+ __get_str(name),
+ __entry->pfn,
+ __entry->page,
+ __entry->count)
+);
+
+TRACE_EVENT(cma_alloc_start,
+
+ TP_PROTO(const char *name, unsigned long count, unsigned int align),
+
+ TP_ARGS(name, count, align),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned long, count)
__field(unsigned int, align)
),
TP_fast_assign(
+ __assign_str(name, name);
+ __entry->count = count;
+ __entry->align = align;
+ ),
+
+ TP_printk("name=%s count=%lu align=%u",
+ __get_str(name),
+ __entry->count,
+ __entry->align)
+);
+
+TRACE_EVENT(cma_alloc_finish,
+
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
+ unsigned long count, unsigned int align, int errorno),
+
+ TP_ARGS(name, pfn, page, count, align, errorno),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned long, pfn)
+ __field(const struct page *, page)
+ __field(unsigned long, count)
+ __field(unsigned int, align)
+ __field(int, errorno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
__entry->pfn = pfn;
__entry->page = page;
__entry->count = count;
__entry->align = align;
+ __entry->errorno = errorno;
),
- TP_printk("pfn=%lx page=%p count=%u align=%u",
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u errorno=%d",
+ __get_str(name),
__entry->pfn,
__entry->page,
__entry->count,
- __entry->align)
+ __entry->align,
+ __entry->errorno)
);
-TRACE_EVENT(cma_release,
+TRACE_EVENT(cma_alloc_busy_retry,
- TP_PROTO(unsigned long pfn, const struct page *page,
- unsigned int count),
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
+ unsigned long count, unsigned int align),
- TP_ARGS(pfn, page, count),
+ TP_ARGS(name, pfn, page, count, align),
TP_STRUCT__entry(
+ __string(name, name)
__field(unsigned long, pfn)
__field(const struct page *, page)
- __field(unsigned int, count)
+ __field(unsigned long, count)
+ __field(unsigned int, align)
),
TP_fast_assign(
+ __assign_str(name, name);
__entry->pfn = pfn;
__entry->page = page;
__entry->count = count;
+ __entry->align = align;
),
- TP_printk("pfn=%lx page=%p count=%u",
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u",
+ __get_str(name),
__entry->pfn,
__entry->page,
- __entry->count)
+ __entry->count,
+ __entry->align)
);
#endif /* _TRACE_CMA_H */
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 54e5bf081171..d05759d18538 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -64,14 +64,24 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
);
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_fast_isolate_freepages,
+
+ TP_PROTO(
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long nr_scanned,
+ unsigned long nr_taken),
+
+ TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
+);
+
#ifdef CONFIG_COMPACTION
TRACE_EVENT(mm_compaction_migratepages,
- TP_PROTO(unsigned long nr_all,
- int migrate_rc,
- struct list_head *migratepages),
+ TP_PROTO(unsigned int nr_migratepages,
+ unsigned int nr_succeeded),
- TP_ARGS(nr_all, migrate_rc, migratepages),
+ TP_ARGS(nr_migratepages, nr_succeeded),
TP_STRUCT__entry(
__field(unsigned long, nr_migrated)
@@ -79,23 +89,8 @@ TRACE_EVENT(mm_compaction_migratepages,
),
TP_fast_assign(
- unsigned long nr_failed = 0;
- struct list_head *page_lru;
-
- /*
- * migrate_pages() returns either a non-negative number
- * with the number of pages that failed migration, or an
- * error code, in which case we need to count the remaining
- * pages manually
- */
- if (migrate_rc >= 0)
- nr_failed = migrate_rc;
- else
- list_for_each(page_lru, migratepages)
- nr_failed++;
-
- __entry->nr_migrated = nr_all - nr_failed;
- __entry->nr_failed = nr_failed;
+ __entry->nr_migrated = nr_succeeded;
+ __entry->nr_failed = nr_migratepages - nr_succeeded;
),
TP_printk("nr_migrated=%lu nr_failed=%lu",
@@ -104,10 +99,10 @@ TRACE_EVENT(mm_compaction_migratepages,
);
TRACE_EVENT(mm_compaction_begin,
- TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
- unsigned long free_pfn, unsigned long zone_end, bool sync),
+ TP_PROTO(struct compact_control *cc, unsigned long zone_start,
+ unsigned long zone_end, bool sync),
- TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync),
+ TP_ARGS(cc, zone_start, zone_end, sync),
TP_STRUCT__entry(
__field(unsigned long, zone_start)
@@ -119,8 +114,8 @@ TRACE_EVENT(mm_compaction_begin,
TP_fast_assign(
__entry->zone_start = zone_start;
- __entry->migrate_pfn = migrate_pfn;
- __entry->free_pfn = free_pfn;
+ __entry->migrate_pfn = cc->migrate_pfn;
+ __entry->free_pfn = cc->free_pfn;
__entry->zone_end = zone_end;
__entry->sync = sync;
),
@@ -134,11 +129,11 @@ TRACE_EVENT(mm_compaction_begin,
);
TRACE_EVENT(mm_compaction_end,
- TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
- unsigned long free_pfn, unsigned long zone_end, bool sync,
+ TP_PROTO(struct compact_control *cc, unsigned long zone_start,
+ unsigned long zone_end, bool sync,
int status),
- TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync, status),
+ TP_ARGS(cc, zone_start, zone_end, sync, status),
TP_STRUCT__entry(
__field(unsigned long, zone_start)
@@ -151,8 +146,8 @@ TRACE_EVENT(mm_compaction_end,
TP_fast_assign(
__entry->zone_start = zone_start;
- __entry->migrate_pfn = migrate_pfn;
- __entry->free_pfn = free_pfn;
+ __entry->migrate_pfn = cc->migrate_pfn;
+ __entry->free_pfn = cc->free_pfn;
__entry->zone_end = zone_end;
__entry->sync = sync;
__entry->status = status;
@@ -178,13 +173,13 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages,
TP_STRUCT__entry(
__field(int, order)
- __field(gfp_t, gfp_mask)
+ __field(unsigned long, gfp_mask)
__field(int, prio)
),
TP_fast_assign(
__entry->order = order;
- __entry->gfp_mask = gfp_mask;
+ __entry->gfp_mask = (__force unsigned long)gfp_mask;
__entry->prio = prio;
),
diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h
new file mode 100644
index 000000000000..58cc83b99c34
--- /dev/null
+++ b/include/trace/events/csd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM csd
+
+#if !defined(_TRACE_CSD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CSD_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(csd_queue_cpu,
+
+ TP_PROTO(const unsigned int cpu,
+ unsigned long callsite,
+ smp_call_func_t func,
+ call_single_data_t *csd),
+
+ TP_ARGS(cpu, callsite, func, csd),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(void *, callsite)
+ __field(void *, func)
+ __field(void *, csd)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->callsite = (void *)callsite;
+ __entry->func = func;
+ __entry->csd = csd;
+ ),
+
+ TP_printk("cpu=%u callsite=%pS func=%ps csd=%p",
+ __entry->cpu, __entry->callsite, __entry->func, __entry->csd)
+ );
+
+/*
+ * Tracepoints for a function which is called as an effect of smp_call_function.*
+ */
+DECLARE_EVENT_CLASS(csd_function,
+
+ TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+
+ TP_ARGS(func, csd),
+
+ TP_STRUCT__entry(
+ __field(void *, func)
+ __field(void *, csd)
+ ),
+
+ TP_fast_assign(
+ __entry->func = func;
+ __entry->csd = csd;
+ ),
+
+ TP_printk("func=%ps, csd=%p", __entry->func, __entry->csd)
+);
+
+DEFINE_EVENT(csd_function, csd_function_entry,
+ TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+ TP_ARGS(func, csd)
+);
+
+DEFINE_EVENT(csd_function, csd_function_exit,
+ TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+ TP_ARGS(func, csd)
+);
+
+#endif /* _TRACE_CSD_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
new file mode 100644
index 000000000000..23200aabccac
--- /dev/null
+++ b/include/trace/events/damon.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM damon
+
+#if !defined(_TRACE_DAMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DAMON_H
+
+#include <linux/damon.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT_CONDITION(damos_before_apply,
+
+ TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+ unsigned int target_idx, struct damon_region *r,
+ unsigned int nr_regions, bool do_trace),
+
+ TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace),
+
+ TP_CONDITION(do_trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, context_idx)
+ __field(unsigned int, scheme_idx)
+ __field(unsigned long, target_idx)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned int, nr_accesses)
+ __field(unsigned int, age)
+ __field(unsigned int, nr_regions)
+ ),
+
+ TP_fast_assign(
+ __entry->context_idx = context_idx;
+ __entry->scheme_idx = scheme_idx;
+ __entry->target_idx = target_idx;
+ __entry->start = r->ar.start;
+ __entry->end = r->ar.end;
+ __entry->nr_accesses = r->nr_accesses_bp / 10000;
+ __entry->age = r->age;
+ __entry->nr_regions = nr_regions;
+ ),
+
+ TP_printk("ctx_idx=%u scheme_idx=%u target_idx=%lu nr_regions=%u %lu-%lu: %u %u",
+ __entry->context_idx, __entry->scheme_idx,
+ __entry->target_idx, __entry->nr_regions,
+ __entry->start, __entry->end,
+ __entry->nr_accesses, __entry->age)
+);
+
+TRACE_EVENT(damon_aggregated,
+
+ TP_PROTO(unsigned int target_id, struct damon_region *r,
+ unsigned int nr_regions),
+
+ TP_ARGS(target_id, r, nr_regions),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, target_id)
+ __field(unsigned int, nr_regions)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned int, nr_accesses)
+ __field(unsigned int, age)
+ ),
+
+ TP_fast_assign(
+ __entry->target_id = target_id;
+ __entry->nr_regions = nr_regions;
+ __entry->start = r->ar.start;
+ __entry->end = r->ar.end;
+ __entry->nr_accesses = r->nr_accesses;
+ __entry->age = r->age;
+ ),
+
+ TP_printk("target_id=%lu nr_regions=%u %lu-%lu: %u %u",
+ __entry->target_id, __entry->nr_regions,
+ __entry->start, __entry->end,
+ __entry->nr_accesses, __entry->age)
+);
+
+#endif /* _TRACE_DAMON_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/devfreq.h b/include/trace/events/devfreq.h
index cf5b8772175d..7627c620bbda 100644
--- a/include/trace/events/devfreq.h
+++ b/include/trace/events/devfreq.h
@@ -8,6 +8,34 @@
#include <linux/devfreq.h>
#include <linux/tracepoint.h>
+TRACE_EVENT(devfreq_frequency,
+ TP_PROTO(struct devfreq *devfreq, unsigned long freq,
+ unsigned long prev_freq),
+
+ TP_ARGS(devfreq, freq, prev_freq),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(&devfreq->dev))
+ __field(unsigned long, freq)
+ __field(unsigned long, prev_freq)
+ __field(unsigned long, busy_time)
+ __field(unsigned long, total_time)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name(&devfreq->dev));
+ __entry->freq = freq;
+ __entry->prev_freq = prev_freq;
+ __entry->busy_time = devfreq->last_status.busy_time;
+ __entry->total_time = devfreq->last_status.total_time;
+ ),
+
+ TP_printk("dev_name=%-30s freq=%-12lu prev_freq=%-12lu load=%-2lu",
+ __get_str(dev_name), __entry->freq, __entry->prev_freq,
+ __entry->total_time == 0 ? 0 :
+ (100 * __entry->busy_time) / __entry->total_time)
+);
+
TRACE_EVENT(devfreq_monitor,
TP_PROTO(struct devfreq *devfreq),
@@ -29,7 +57,7 @@ TRACE_EVENT(devfreq_monitor,
__assign_str(dev_name, dev_name(&devfreq->dev));
),
- TP_printk("dev_name=%s freq=%lu polling_ms=%u load=%lu",
+ TP_printk("dev_name=%-30s freq=%-12lu polling_ms=%-3u load=%-2lu",
__get_str(dev_name), __entry->freq, __entry->polling_ms,
__entry->total_time == 0 ? 0 :
(100 * __entry->busy_time) / __entry->total_time)
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 6f60a78d9a7e..77ff7cfc6049 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -21,9 +21,9 @@ TRACE_EVENT(devlink_hwmsg,
TP_ARGS(devlink, incoming, type, buf, len),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__field(bool, incoming)
__field(unsigned long, type)
__dynamic_array(u8, buf, len)
@@ -31,9 +31,9 @@ TRACE_EVENT(devlink_hwmsg,
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__entry->incoming = incoming;
__entry->type = type;
memcpy(__get_dynamic_array(buf), buf, len);
@@ -55,17 +55,17 @@ TRACE_EVENT(devlink_hwerr,
TP_ARGS(devlink, err, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__field(int, err)
__string(msg, msg)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__entry->err = err;
__assign_str(msg, msg);
),
@@ -85,17 +85,17 @@ TRACE_EVENT(devlink_health_report,
TP_ARGS(devlink, reporter_name, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
- __string(reporter_name, msg)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
+ __string(reporter_name, reporter_name)
__string(msg, msg)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__assign_str(msg, msg);
),
@@ -116,18 +116,18 @@ TRACE_EVENT(devlink_health_recover_aborted,
TP_ARGS(devlink, reporter_name, health_state, time_since_last_recover),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, reporter_name)
__field(bool, health_state)
__field(u64, time_since_last_recover)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__entry->health_state = health_state;
__entry->time_since_last_recover = time_since_last_recover;
@@ -150,17 +150,17 @@ TRACE_EVENT(devlink_health_reporter_state_update,
TP_ARGS(devlink, reporter_name, new_state),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, reporter_name)
__field(u8, new_state)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__entry->new_state = new_state;
),
@@ -171,6 +171,42 @@ TRACE_EVENT(devlink_health_reporter_state_update,
__entry->new_state)
);
+/*
+ * Tracepoint for devlink packet trap:
+ */
+TRACE_EVENT(devlink_trap_report,
+ TP_PROTO(const struct devlink *devlink, struct sk_buff *skb,
+ const struct devlink_trap_metadata *metadata),
+
+ TP_ARGS(devlink, skb, metadata),
+
+ TP_STRUCT__entry(
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
+ __string(trap_name, metadata->trap_name)
+ __string(trap_group_name, metadata->trap_group_name)
+ __array(char, input_dev_name, IFNAMSIZ)
+ ),
+
+ TP_fast_assign(
+ struct net_device *input_dev = metadata->input_dev;
+
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
+ __assign_str(trap_name, metadata->trap_name);
+ __assign_str(trap_group_name, metadata->trap_group_name);
+ strscpy(__entry->input_dev_name, input_dev ? input_dev->name : "NULL", IFNAMSIZ);
+ ),
+
+ TP_printk("bus_name=%s dev_name=%s driver_name=%s trap_name=%s "
+ "trap_group_name=%s input_dev_name=%s", __get_str(bus_name),
+ __get_str(dev_name), __get_str(driver_name),
+ __get_str(trap_name), __get_str(trap_group_name),
+ __entry->input_dev_name)
+);
+
#endif /* _TRACE_DEVLINK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
new file mode 100644
index 000000000000..c1a146f9fc91
--- /dev/null
+++ b/include/trace/events/dlm.h
@@ -0,0 +1,682 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dlm
+
+#if !defined(_TRACE_DLM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DLM_H
+
+#include <linux/dlm.h>
+#include <linux/dlmconstants.h>
+#include <uapi/linux/dlm_plock.h>
+#include <linux/tracepoint.h>
+
+#include "../../../fs/dlm/dlm_internal.h"
+
+#define show_lock_flags(flags) __print_flags(flags, "|", \
+ { DLM_LKF_NOQUEUE, "NOQUEUE" }, \
+ { DLM_LKF_CANCEL, "CANCEL" }, \
+ { DLM_LKF_CONVERT, "CONVERT" }, \
+ { DLM_LKF_VALBLK, "VALBLK" }, \
+ { DLM_LKF_QUECVT, "QUECVT" }, \
+ { DLM_LKF_IVVALBLK, "IVVALBLK" }, \
+ { DLM_LKF_CONVDEADLK, "CONVDEADLK" }, \
+ { DLM_LKF_PERSISTENT, "PERSISTENT" }, \
+ { DLM_LKF_NODLCKWT, "NODLCKWT" }, \
+ { DLM_LKF_NODLCKBLK, "NODLCKBLK" }, \
+ { DLM_LKF_EXPEDITE, "EXPEDITE" }, \
+ { DLM_LKF_NOQUEUEBAST, "NOQUEUEBAST" }, \
+ { DLM_LKF_HEADQUE, "HEADQUE" }, \
+ { DLM_LKF_NOORDER, "NOORDER" }, \
+ { DLM_LKF_ORPHAN, "ORPHAN" }, \
+ { DLM_LKF_ALTPR, "ALTPR" }, \
+ { DLM_LKF_ALTCW, "ALTCW" }, \
+ { DLM_LKF_FORCEUNLOCK, "FORCEUNLOCK" }, \
+ { DLM_LKF_TIMEOUT, "TIMEOUT" })
+
+#define show_lock_mode(mode) __print_symbolic(mode, \
+ { DLM_LOCK_IV, "IV"}, \
+ { DLM_LOCK_NL, "NL"}, \
+ { DLM_LOCK_CR, "CR"}, \
+ { DLM_LOCK_CW, "CW"}, \
+ { DLM_LOCK_PR, "PR"}, \
+ { DLM_LOCK_PW, "PW"}, \
+ { DLM_LOCK_EX, "EX"})
+
+#define show_dlm_sb_flags(flags) __print_flags(flags, "|", \
+ { DLM_SBF_DEMOTED, "DEMOTED" }, \
+ { DLM_SBF_VALNOTVALID, "VALNOTVALID" }, \
+ { DLM_SBF_ALTMODE, "ALTMODE" })
+
+#define show_lkb_flags(flags) __print_flags(flags, "|", \
+ { BIT(DLM_DFL_USER_BIT), "USER" }, \
+ { BIT(DLM_DFL_ORPHAN_BIT), "ORPHAN" })
+
+#define show_header_cmd(cmd) __print_symbolic(cmd, \
+ { DLM_MSG, "MSG"}, \
+ { DLM_RCOM, "RCOM"}, \
+ { DLM_OPTS, "OPTS"}, \
+ { DLM_ACK, "ACK"}, \
+ { DLM_FIN, "FIN"})
+
+#define show_message_version(version) __print_symbolic(version, \
+ { DLM_VERSION_3_1, "3.1"}, \
+ { DLM_VERSION_3_2, "3.2"})
+
+#define show_message_type(type) __print_symbolic(type, \
+ { DLM_MSG_REQUEST, "REQUEST"}, \
+ { DLM_MSG_CONVERT, "CONVERT"}, \
+ { DLM_MSG_UNLOCK, "UNLOCK"}, \
+ { DLM_MSG_CANCEL, "CANCEL"}, \
+ { DLM_MSG_REQUEST_REPLY, "REQUEST_REPLY"}, \
+ { DLM_MSG_CONVERT_REPLY, "CONVERT_REPLY"}, \
+ { DLM_MSG_UNLOCK_REPLY, "UNLOCK_REPLY"}, \
+ { DLM_MSG_CANCEL_REPLY, "CANCEL_REPLY"}, \
+ { DLM_MSG_GRANT, "GRANT"}, \
+ { DLM_MSG_BAST, "BAST"}, \
+ { DLM_MSG_LOOKUP, "LOOKUP"}, \
+ { DLM_MSG_REMOVE, "REMOVE"}, \
+ { DLM_MSG_LOOKUP_REPLY, "LOOKUP_REPLY"}, \
+ { DLM_MSG_PURGE, "PURGE"})
+
+#define show_rcom_type(type) __print_symbolic(type, \
+ { DLM_RCOM_STATUS, "STATUS"}, \
+ { DLM_RCOM_NAMES, "NAMES"}, \
+ { DLM_RCOM_LOOKUP, "LOOKUP"}, \
+ { DLM_RCOM_LOCK, "LOCK"}, \
+ { DLM_RCOM_STATUS_REPLY, "STATUS_REPLY"}, \
+ { DLM_RCOM_NAMES_REPLY, "NAMES_REPLY"}, \
+ { DLM_RCOM_LOOKUP_REPLY, "LOOKUP_REPLY"}, \
+ { DLM_RCOM_LOCK_REPLY, "LOCK_REPLY"})
+
+
+/* note: we begin tracing dlm_lock_start() only if ls and lkb are found */
+TRACE_EVENT(dlm_lock_start,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, const void *name,
+ unsigned int namelen, int mode, __u32 flags),
+
+ TP_ARGS(ls, lkb, name, namelen, mode, flags),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(int, mode)
+ __field(__u32, flags)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->mode = mode;
+ __entry->flags = flags;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ else if (name)
+ memcpy(__get_dynamic_array(res_name), name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_mode(__entry->mode),
+ show_lock_flags(__entry->flags),
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_lock_end,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, const void *name,
+ unsigned int namelen, int mode, __u32 flags, int error,
+ bool kernel_lock),
+
+ TP_ARGS(ls, lkb, name, namelen, mode, flags, error, kernel_lock),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(int, mode)
+ __field(__u32, flags)
+ __field(int, error)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->mode = mode;
+ __entry->flags = flags;
+ __entry->error = error;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ else if (name)
+ memcpy(__get_dynamic_array(res_name), name,
+ __get_dynamic_array_len(res_name));
+
+ if (kernel_lock) {
+ /* return value will be zeroed in those cases by dlm_lock()
+ * we do it here again to not introduce more overhead if
+ * trace isn't running and error reflects the return value.
+ */
+ if (error == -EAGAIN || error == -EDEADLK)
+ __entry->error = 0;
+ }
+
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s error=%d res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_mode(__entry->mode),
+ show_lock_flags(__entry->flags), __entry->error,
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_bast,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode),
+
+ TP_ARGS(ls, lkb, mode),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(int, mode)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->mode = mode;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_mode(__entry->mode),
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_ast,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb),
+
+ TP_ARGS(ls, lkb),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(u8, sb_flags)
+ __field(int, sb_status)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->sb_flags = lkb->lkb_lksb->sb_flags;
+ __entry->sb_status = lkb->lkb_lksb->sb_status;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_dlm_sb_flags(__entry->sb_flags), __entry->sb_status,
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+/* note: we begin tracing dlm_unlock_start() only if ls and lkb are found */
+TRACE_EVENT(dlm_unlock_start,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, __u32 flags),
+
+ TP_ARGS(ls, lkb, flags),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(__u32, flags)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->flags = flags;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x flags=%s res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_flags(__entry->flags),
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_unlock_end,
+
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, __u32 flags,
+ int error),
+
+ TP_ARGS(ls, lkb, flags, error),
+
+ TP_STRUCT__entry(
+ __field(__u32, ls_id)
+ __field(__u32, lkb_id)
+ __field(__u32, flags)
+ __field(int, error)
+ __dynamic_array(unsigned char, res_name,
+ lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ ),
+
+ TP_fast_assign(
+ struct dlm_rsb *r;
+
+ __entry->ls_id = ls->ls_global_id;
+ __entry->lkb_id = lkb->lkb_id;
+ __entry->flags = flags;
+ __entry->error = error;
+
+ r = lkb->lkb_resource;
+ if (r)
+ memcpy(__get_dynamic_array(res_name), r->res_name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("ls_id=%u lkb_id=%x flags=%s error=%d res_name=%s",
+ __entry->ls_id, __entry->lkb_id,
+ show_lock_flags(__entry->flags), __entry->error,
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+DECLARE_EVENT_CLASS(dlm_rcom_template,
+
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_rcom *rc),
+
+ TP_ARGS(dst, h_seq, rc),
+
+ TP_STRUCT__entry(
+ __field(uint32_t, dst)
+ __field(uint32_t, h_seq)
+ __field(uint32_t, h_version)
+ __field(uint32_t, h_lockspace)
+ __field(uint32_t, h_nodeid)
+ __field(uint16_t, h_length)
+ __field(uint8_t, h_cmd)
+ __field(uint32_t, rc_type)
+ __field(int32_t, rc_result)
+ __field(uint64_t, rc_id)
+ __field(uint64_t, rc_seq)
+ __field(uint64_t, rc_seq_reply)
+ __dynamic_array(unsigned char, rc_buf,
+ le16_to_cpu(rc->rc_header.h_length) - sizeof(*rc))
+ ),
+
+ TP_fast_assign(
+ __entry->dst = dst;
+ __entry->h_seq = h_seq;
+ __entry->h_version = le32_to_cpu(rc->rc_header.h_version);
+ __entry->h_lockspace = le32_to_cpu(rc->rc_header.u.h_lockspace);
+ __entry->h_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
+ __entry->h_length = le16_to_cpu(rc->rc_header.h_length);
+ __entry->h_cmd = rc->rc_header.h_cmd;
+ __entry->rc_type = le32_to_cpu(rc->rc_type);
+ __entry->rc_result = le32_to_cpu(rc->rc_result);
+ __entry->rc_id = le64_to_cpu(rc->rc_id);
+ __entry->rc_seq = le64_to_cpu(rc->rc_seq);
+ __entry->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
+ memcpy(__get_dynamic_array(rc_buf), rc->rc_buf,
+ __get_dynamic_array_len(rc_buf));
+ ),
+
+ TP_printk("dst=%u h_seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
+ "h_length=%u h_cmd=%s rc_type=%s rc_result=%d "
+ "rc_id=%llu rc_seq=%llu rc_seq_reply=%llu "
+ "rc_buf=0x%s", __entry->dst, __entry->h_seq,
+ show_message_version(__entry->h_version),
+ __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
+ show_header_cmd(__entry->h_cmd),
+ show_rcom_type(__entry->rc_type),
+ __entry->rc_result, __entry->rc_id, __entry->rc_seq,
+ __entry->rc_seq_reply,
+ __print_hex_str(__get_dynamic_array(rc_buf),
+ __get_dynamic_array_len(rc_buf)))
+
+);
+
+DEFINE_EVENT(dlm_rcom_template, dlm_send_rcom,
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_rcom *rc),
+ TP_ARGS(dst, h_seq, rc));
+
+DEFINE_EVENT(dlm_rcom_template, dlm_recv_rcom,
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_rcom *rc),
+ TP_ARGS(dst, h_seq, rc));
+
+TRACE_EVENT(dlm_send_message,
+
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_message *ms,
+ const void *name, int namelen),
+
+ TP_ARGS(dst, h_seq, ms, name, namelen),
+
+ TP_STRUCT__entry(
+ __field(uint32_t, dst)
+ __field(uint32_t, h_seq)
+ __field(uint32_t, h_version)
+ __field(uint32_t, h_lockspace)
+ __field(uint32_t, h_nodeid)
+ __field(uint16_t, h_length)
+ __field(uint8_t, h_cmd)
+ __field(uint32_t, m_type)
+ __field(uint32_t, m_nodeid)
+ __field(uint32_t, m_pid)
+ __field(uint32_t, m_lkid)
+ __field(uint32_t, m_remid)
+ __field(uint32_t, m_parent_lkid)
+ __field(uint32_t, m_parent_remid)
+ __field(uint32_t, m_exflags)
+ __field(uint32_t, m_sbflags)
+ __field(uint32_t, m_flags)
+ __field(uint32_t, m_lvbseq)
+ __field(uint32_t, m_hash)
+ __field(int32_t, m_status)
+ __field(int32_t, m_grmode)
+ __field(int32_t, m_rqmode)
+ __field(int32_t, m_bastmode)
+ __field(int32_t, m_asts)
+ __field(int32_t, m_result)
+ __dynamic_array(unsigned char, m_extra,
+ le16_to_cpu(ms->m_header.h_length) - sizeof(*ms))
+ __dynamic_array(unsigned char, res_name, namelen)
+ ),
+
+ TP_fast_assign(
+ __entry->dst = dst;
+ __entry->h_seq = h_seq;
+ __entry->h_version = le32_to_cpu(ms->m_header.h_version);
+ __entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace);
+ __entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+ __entry->h_length = le16_to_cpu(ms->m_header.h_length);
+ __entry->h_cmd = ms->m_header.h_cmd;
+ __entry->m_type = le32_to_cpu(ms->m_type);
+ __entry->m_nodeid = le32_to_cpu(ms->m_nodeid);
+ __entry->m_pid = le32_to_cpu(ms->m_pid);
+ __entry->m_lkid = le32_to_cpu(ms->m_lkid);
+ __entry->m_remid = le32_to_cpu(ms->m_remid);
+ __entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
+ __entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
+ __entry->m_exflags = le32_to_cpu(ms->m_exflags);
+ __entry->m_sbflags = le32_to_cpu(ms->m_sbflags);
+ __entry->m_flags = le32_to_cpu(ms->m_flags);
+ __entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
+ __entry->m_hash = le32_to_cpu(ms->m_hash);
+ __entry->m_status = le32_to_cpu(ms->m_status);
+ __entry->m_grmode = le32_to_cpu(ms->m_grmode);
+ __entry->m_rqmode = le32_to_cpu(ms->m_rqmode);
+ __entry->m_bastmode = le32_to_cpu(ms->m_bastmode);
+ __entry->m_asts = le32_to_cpu(ms->m_asts);
+ __entry->m_result = le32_to_cpu(ms->m_result);
+ memcpy(__get_dynamic_array(m_extra), ms->m_extra,
+ __get_dynamic_array_len(m_extra));
+ memcpy(__get_dynamic_array(res_name), name,
+ __get_dynamic_array_len(res_name));
+ ),
+
+ TP_printk("dst=%u h_seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
+ "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u "
+ "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u "
+ "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s "
+ "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s "
+ "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d "
+ "m_extra=0x%s res_name=0x%s", __entry->dst,
+ __entry->h_seq, show_message_version(__entry->h_version),
+ __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
+ show_header_cmd(__entry->h_cmd),
+ show_message_type(__entry->m_type),
+ __entry->m_nodeid, __entry->m_pid, __entry->m_lkid,
+ __entry->m_remid, __entry->m_parent_lkid,
+ __entry->m_parent_remid, show_lock_flags(__entry->m_exflags),
+ show_dlm_sb_flags(__entry->m_sbflags),
+ show_lkb_flags(__entry->m_flags), __entry->m_lvbseq,
+ __entry->m_hash, __entry->m_status,
+ show_lock_mode(__entry->m_grmode),
+ show_lock_mode(__entry->m_rqmode),
+ show_lock_mode(__entry->m_bastmode),
+ __entry->m_asts, __entry->m_result,
+ __print_hex_str(__get_dynamic_array(m_extra),
+ __get_dynamic_array_len(m_extra)),
+ __print_hex_str(__get_dynamic_array(res_name),
+ __get_dynamic_array_len(res_name)))
+
+);
+
+TRACE_EVENT(dlm_recv_message,
+
+ TP_PROTO(uint32_t dst, uint32_t h_seq, const struct dlm_message *ms),
+
+ TP_ARGS(dst, h_seq, ms),
+
+ TP_STRUCT__entry(
+ __field(uint32_t, dst)
+ __field(uint32_t, h_seq)
+ __field(uint32_t, h_version)
+ __field(uint32_t, h_lockspace)
+ __field(uint32_t, h_nodeid)
+ __field(uint16_t, h_length)
+ __field(uint8_t, h_cmd)
+ __field(uint32_t, m_type)
+ __field(uint32_t, m_nodeid)
+ __field(uint32_t, m_pid)
+ __field(uint32_t, m_lkid)
+ __field(uint32_t, m_remid)
+ __field(uint32_t, m_parent_lkid)
+ __field(uint32_t, m_parent_remid)
+ __field(uint32_t, m_exflags)
+ __field(uint32_t, m_sbflags)
+ __field(uint32_t, m_flags)
+ __field(uint32_t, m_lvbseq)
+ __field(uint32_t, m_hash)
+ __field(int32_t, m_status)
+ __field(int32_t, m_grmode)
+ __field(int32_t, m_rqmode)
+ __field(int32_t, m_bastmode)
+ __field(int32_t, m_asts)
+ __field(int32_t, m_result)
+ __dynamic_array(unsigned char, m_extra,
+ le16_to_cpu(ms->m_header.h_length) - sizeof(*ms))
+ ),
+
+ TP_fast_assign(
+ __entry->dst = dst;
+ __entry->h_seq = h_seq;
+ __entry->h_version = le32_to_cpu(ms->m_header.h_version);
+ __entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace);
+ __entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+ __entry->h_length = le16_to_cpu(ms->m_header.h_length);
+ __entry->h_cmd = ms->m_header.h_cmd;
+ __entry->m_type = le32_to_cpu(ms->m_type);
+ __entry->m_nodeid = le32_to_cpu(ms->m_nodeid);
+ __entry->m_pid = le32_to_cpu(ms->m_pid);
+ __entry->m_lkid = le32_to_cpu(ms->m_lkid);
+ __entry->m_remid = le32_to_cpu(ms->m_remid);
+ __entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
+ __entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
+ __entry->m_exflags = le32_to_cpu(ms->m_exflags);
+ __entry->m_sbflags = le32_to_cpu(ms->m_sbflags);
+ __entry->m_flags = le32_to_cpu(ms->m_flags);
+ __entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
+ __entry->m_hash = le32_to_cpu(ms->m_hash);
+ __entry->m_status = le32_to_cpu(ms->m_status);
+ __entry->m_grmode = le32_to_cpu(ms->m_grmode);
+ __entry->m_rqmode = le32_to_cpu(ms->m_rqmode);
+ __entry->m_bastmode = le32_to_cpu(ms->m_bastmode);
+ __entry->m_asts = le32_to_cpu(ms->m_asts);
+ __entry->m_result = le32_to_cpu(ms->m_result);
+ memcpy(__get_dynamic_array(m_extra), ms->m_extra,
+ __get_dynamic_array_len(m_extra));
+ ),
+
+ TP_printk("dst=%u h_seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
+ "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u "
+ "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u "
+ "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s "
+ "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s "
+ "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d "
+ "m_extra=0x%s", __entry->dst,
+ __entry->h_seq, show_message_version(__entry->h_version),
+ __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
+ show_header_cmd(__entry->h_cmd),
+ show_message_type(__entry->m_type),
+ __entry->m_nodeid, __entry->m_pid, __entry->m_lkid,
+ __entry->m_remid, __entry->m_parent_lkid,
+ __entry->m_parent_remid, show_lock_flags(__entry->m_exflags),
+ show_dlm_sb_flags(__entry->m_sbflags),
+ show_lkb_flags(__entry->m_flags), __entry->m_lvbseq,
+ __entry->m_hash, __entry->m_status,
+ show_lock_mode(__entry->m_grmode),
+ show_lock_mode(__entry->m_rqmode),
+ show_lock_mode(__entry->m_bastmode),
+ __entry->m_asts, __entry->m_result,
+ __print_hex_str(__get_dynamic_array(m_extra),
+ __get_dynamic_array_len(m_extra)))
+
+);
+
+DECLARE_EVENT_CLASS(dlm_plock_template,
+
+ TP_PROTO(const struct dlm_plock_info *info),
+
+ TP_ARGS(info),
+
+ TP_STRUCT__entry(
+ __field(uint8_t, optype)
+ __field(uint8_t, ex)
+ __field(uint8_t, wait)
+ __field(uint8_t, flags)
+ __field(uint32_t, pid)
+ __field(int32_t, nodeid)
+ __field(int32_t, rv)
+ __field(uint32_t, fsid)
+ __field(uint64_t, number)
+ __field(uint64_t, start)
+ __field(uint64_t, end)
+ __field(uint64_t, owner)
+ ),
+
+ TP_fast_assign(
+ __entry->optype = info->optype;
+ __entry->ex = info->ex;
+ __entry->wait = info->wait;
+ __entry->flags = info->flags;
+ __entry->pid = info->pid;
+ __entry->nodeid = info->nodeid;
+ __entry->rv = info->rv;
+ __entry->fsid = info->fsid;
+ __entry->number = info->number;
+ __entry->start = info->start;
+ __entry->end = info->end;
+ __entry->owner = info->owner;
+ ),
+
+ TP_printk("fsid=%u number=%llx owner=%llx optype=%d ex=%d wait=%d flags=%x pid=%u nodeid=%d rv=%d start=%llx end=%llx",
+ __entry->fsid, __entry->number, __entry->owner,
+ __entry->optype, __entry->ex, __entry->wait,
+ __entry->flags, __entry->pid, __entry->nodeid,
+ __entry->rv, __entry->start, __entry->end)
+
+);
+
+DEFINE_EVENT(dlm_plock_template, dlm_plock_read,
+ TP_PROTO(const struct dlm_plock_info *info), TP_ARGS(info));
+
+DEFINE_EVENT(dlm_plock_template, dlm_plock_write,
+ TP_PROTO(const struct dlm_plock_info *info), TP_ARGS(info));
+
+TRACE_EVENT(dlm_send,
+
+ TP_PROTO(int nodeid, int ret),
+
+ TP_ARGS(nodeid, ret),
+
+ TP_STRUCT__entry(
+ __field(int, nodeid)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->nodeid = nodeid;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("nodeid=%d ret=%d", __entry->nodeid, __entry->ret)
+
+);
+
+TRACE_EVENT(dlm_recv,
+
+ TP_PROTO(int nodeid, int ret),
+
+ TP_ARGS(nodeid, ret),
+
+ TP_STRUCT__entry(
+ __field(int, nodeid)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->nodeid = nodeid;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("nodeid=%d ret=%d", __entry->nodeid, __entry->ret)
+
+);
+
+#endif /* if !defined(_TRACE_DLM_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index 64e92d56c6a8..3963e79ca7b4 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -23,8 +23,8 @@ DECLARE_EVENT_CLASS(dma_fence,
),
TP_fast_assign(
- __assign_str(driver, fence->ops->get_driver_name(fence))
- __assign_str(timeline, fence->ops->get_timeline_name(fence))
+ __assign_str(driver, fence->ops->get_driver_name(fence));
+ __assign_str(timeline, fence->ops->get_timeline_name(fence));
__entry->context = fence->context;
__entry->seqno = fence->seqno;
),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index bf9806fd1306..e18684b02c3d 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -19,12 +19,17 @@ struct erofs_map_blocks;
{ 1, "DIR" })
#define show_map_flags(flags) __print_flags(flags, "|", \
- { EROFS_GET_BLOCKS_RAW, "RAW" })
+ { EROFS_GET_BLOCKS_FIEMAP, "FIEMAP" }, \
+ { EROFS_GET_BLOCKS_READMORE, "READMORE" }, \
+ { EROFS_GET_BLOCKS_FINDTAIL, "FINDTAIL" })
#define show_mflags(flags) __print_flags(flags, "", \
- { EROFS_MAP_MAPPED, "M" }, \
- { EROFS_MAP_META, "I" }, \
- { EROFS_MAP_ZIPPED, "Z" })
+ { EROFS_MAP_MAPPED, "M" }, \
+ { EROFS_MAP_META, "I" }, \
+ { EROFS_MAP_ENCODED, "E" }, \
+ { EROFS_MAP_FULL_MAPPED, "F" }, \
+ { EROFS_MAP_FRAGMENT, "R" }, \
+ { EROFS_MAP_PARTIAL_REF, "P" })
TRACE_EVENT(erofs_lookup,
@@ -35,54 +40,51 @@ TRACE_EVENT(erofs_lookup,
TP_STRUCT__entry(
__field(dev_t, dev )
__field(erofs_nid_t, nid )
- __field(const char *, name )
+ __string(name, dentry->d_name.name )
__field(unsigned int, flags )
),
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->nid = EROFS_I(dir)->nid;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->flags = flags;
),
TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
show_dev_nid(__entry),
- __entry->name,
+ __get_str(name),
__entry->flags)
);
TRACE_EVENT(erofs_fill_inode,
- TP_PROTO(struct inode *inode, int isdir),
- TP_ARGS(inode, isdir),
+ TP_PROTO(struct inode *inode),
+ TP_ARGS(inode),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(erofs_nid_t, nid )
__field(erofs_blk_t, blkaddr )
__field(unsigned int, ofs )
- __field(int, isdir )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->nid = EROFS_I(inode)->nid;
- __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid));
- __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid));
- __entry->isdir = isdir;
+ __entry->blkaddr = erofs_blknr(inode->i_sb, erofs_iloc(inode));
+ __entry->ofs = erofs_blkoff(inode->i_sb, erofs_iloc(inode));
),
- TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d",
+ TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u",
show_dev_nid(__entry),
- __entry->blkaddr, __entry->ofs,
- __entry->isdir)
+ __entry->blkaddr, __entry->ofs)
);
-TRACE_EVENT(erofs_readpage,
+TRACE_EVENT(erofs_read_folio,
- TP_PROTO(struct page *page, bool raw),
+ TP_PROTO(struct folio *folio, bool raw),
- TP_ARGS(page, raw),
+ TP_ARGS(folio, raw),
TP_STRUCT__entry(
__field(dev_t, dev )
@@ -94,11 +96,11 @@ TRACE_EVENT(erofs_readpage,
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_I(page->mapping->host)->nid;
- __entry->dir = S_ISDIR(page->mapping->host->i_mode);
- __entry->index = page->index;
- __entry->uptodate = PageUptodate(page);
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->nid = EROFS_I(folio->mapping->host)->nid;
+ __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->index = folio->index;
+ __entry->uptodate = folio_test_uptodate(folio);
__entry->raw = raw;
),
@@ -169,7 +171,7 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
__entry->flags ? show_map_flags(__entry->flags) : "NULL")
);
-DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter,
+DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned flags),
@@ -221,7 +223,7 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
show_mflags(__entry->mflags), __entry->ret)
);
-DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit,
+DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned flags, int ret),
diff --git a/include/trace/events/error_report.h b/include/trace/events/error_report.h
new file mode 100644
index 000000000000..a1922a800e6f
--- /dev/null
+++ b/include/trace/events/error_report.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Declarations for error reporting tracepoints.
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM error_report
+
+#if !defined(_TRACE_ERROR_REPORT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ERROR_REPORT_H
+
+#include <linux/tracepoint.h>
+
+#ifndef __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum error_detector {
+ ERROR_DETECTOR_KFENCE,
+ ERROR_DETECTOR_KASAN,
+ ERROR_DETECTOR_WARN,
+};
+
+#endif /* __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY */
+
+#define error_detector_list \
+ EM(ERROR_DETECTOR_KFENCE, "kfence") \
+ EM(ERROR_DETECTOR_KASAN, "kasan") \
+ EMe(ERROR_DETECTOR_WARN, "warning")
+/* Always end the list with an EMe. */
+
+#undef EM
+#undef EMe
+
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+error_detector_list
+
+#undef EM
+#undef EMe
+
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+#define show_error_detector_list(val) \
+ __print_symbolic(val, error_detector_list)
+
+DECLARE_EVENT_CLASS(error_report_template,
+ TP_PROTO(enum error_detector error_detector, unsigned long id),
+ TP_ARGS(error_detector, id),
+ TP_STRUCT__entry(__field(enum error_detector, error_detector)
+ __field(unsigned long, id)),
+ TP_fast_assign(__entry->error_detector = error_detector;
+ __entry->id = id;),
+ TP_printk("[%s] %lx",
+ show_error_detector_list(__entry->error_detector),
+ __entry->id));
+
+/**
+ * error_report_end - called after printing the error report
+ * @error_detector: short string describing the error detection tool
+ * @id: pseudo-unique descriptor identifying the report
+ * (e.g. the memory access address)
+ *
+ * This event occurs right after a debugging tool finishes printing the error
+ * report.
+ */
+DEFINE_EVENT(error_report_template, error_report_end,
+ TP_PROTO(enum error_detector error_detector, unsigned long id),
+ TP_ARGS(error_detector, id));
+
+#endif /* _TRACE_ERROR_REPORT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 4c8b99ec8606..a697f4b77162 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -95,6 +95,44 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B);
{ FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \
{ FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_XATTR);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_CROSS_RENAME);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_NOMEM);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_SWAP_BOOT);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_ENCRYPTED_FILENAME);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
+
+#define show_fc_reason(reason) \
+ __print_symbolic(reason, \
+ { EXT4_FC_REASON_XATTR, "XATTR"}, \
+ { EXT4_FC_REASON_CROSS_RENAME, "CROSS_RENAME"}, \
+ { EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, "JOURNAL_FLAG_CHANGE"}, \
+ { EXT4_FC_REASON_NOMEM, "NO_MEM"}, \
+ { EXT4_FC_REASON_SWAP_BOOT, "SWAP_BOOT"}, \
+ { EXT4_FC_REASON_RESIZE, "RESIZE"}, \
+ { EXT4_FC_REASON_RENAME_DIR, "RENAME_DIR"}, \
+ { EXT4_FC_REASON_FALLOC_RANGE, "FALLOC_RANGE"}, \
+ { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}, \
+ { EXT4_FC_REASON_ENCRYPTED_FILENAME, "ENCRYPTED_FILENAME"})
+
+TRACE_DEFINE_ENUM(CR_POWER2_ALIGNED);
+TRACE_DEFINE_ENUM(CR_GOAL_LEN_FAST);
+TRACE_DEFINE_ENUM(CR_BEST_AVAIL_LEN);
+TRACE_DEFINE_ENUM(CR_GOAL_LEN_SLOW);
+TRACE_DEFINE_ENUM(CR_ANY_FREE);
+
+#define show_criteria(cr) \
+ __print_symbolic(cr, \
+ { CR_POWER2_ALIGNED, "CR_POWER2_ALIGNED" }, \
+ { CR_GOAL_LEN_FAST, "CR_GOAL_LEN_FAST" }, \
+ { CR_BEST_AVAIL_LEN, "CR_BEST_AVAIL_LEN" }, \
+ { CR_GOAL_LEN_SLOW, "CR_GOAL_LEN_SLOW" }, \
+ { CR_ANY_FREE, "CR_ANY_FREE" })
TRACE_EVENT(ext4_other_inode_update_time,
TP_PROTO(struct inode *inode, ino_t orig_ino),
@@ -313,17 +351,15 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
DECLARE_EVENT_CLASS(ext4__write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags),
+ TP_ARGS(inode, pos, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( loff_t, pos )
__field( unsigned int, len )
- __field( unsigned int, flags )
),
TP_fast_assign(
@@ -331,29 +367,26 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
- __entry->flags = flags;
),
- TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
+ TP_printk("dev %d,%d ino %lu pos %lld len %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->pos, __entry->len, __entry->flags)
+ __entry->pos, __entry->len)
);
DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags)
+ TP_ARGS(inode, pos, len)
);
DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags)
+ TP_ARGS(inode, pos, len)
);
DECLARE_EVENT_CLASS(ext4__write_end,
@@ -541,10 +574,10 @@ TRACE_EVENT(ext4_writepages_result,
(unsigned long) __entry->writeback_index)
);
-DECLARE_EVENT_CLASS(ext4__page_op,
- TP_PROTO(struct page *page),
+DECLARE_EVENT_CLASS(ext4__folio_op,
+ TP_PROTO(struct inode *inode, struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(inode, folio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -554,76 +587,69 @@ DECLARE_EVENT_CLASS(ext4__page_op,
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->index = page->index;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->index = folio->index;
),
- TP_printk("dev %d,%d ino %lu page_index %lu",
+ TP_printk("dev %d,%d ino %lu folio_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->index)
);
-DEFINE_EVENT(ext4__page_op, ext4_writepage,
+DEFINE_EVENT(ext4__folio_op, ext4_read_folio,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct inode *inode, struct folio *folio),
- TP_ARGS(page)
+ TP_ARGS(inode, folio)
);
-DEFINE_EVENT(ext4__page_op, ext4_readpage,
+DEFINE_EVENT(ext4__folio_op, ext4_release_folio,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct inode *inode, struct folio *folio),
- TP_ARGS(page)
+ TP_ARGS(inode, folio)
);
-DEFINE_EVENT(ext4__page_op, ext4_releasepage,
+DECLARE_EVENT_CLASS(ext4_invalidate_folio_op,
+ TP_PROTO(struct folio *folio, size_t offset, size_t length),
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
-
- TP_ARGS(page, offset, length),
+ TP_ARGS(folio, offset, length),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
- __field( unsigned int, offset )
- __field( unsigned int, length )
+ __field( size_t, offset )
+ __field( size_t, length )
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->index = page->index;
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->index = folio->index;
__entry->offset = offset;
__entry->length = length;
),
- TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
+ TP_printk("dev %d,%d ino %lu folio_index %lu offset %zu length %zu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->index,
__entry->offset, __entry->length)
);
-DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+DEFINE_EVENT(ext4_invalidate_folio_op, ext4_invalidate_folio,
+ TP_PROTO(struct folio *folio, size_t offset, size_t length),
- TP_ARGS(page, offset, length)
+ TP_ARGS(folio, offset, length)
);
-DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+DEFINE_EVENT(ext4_invalidate_folio_op, ext4_journalled_invalidate_folio,
+ TP_PROTO(struct folio *folio, size_t offset, size_t length),
- TP_ARGS(page, offset, length)
+ TP_ARGS(folio, offset, length)
);
TRACE_EVENT(ext4_discard_blocks,
@@ -746,15 +772,14 @@ TRACE_EVENT(ext4_mb_release_group_pa,
);
TRACE_EVENT(ext4_discard_preallocations,
- TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
+ TP_PROTO(struct inode *inode, unsigned int len),
- TP_ARGS(inode, len, needed),
+ TP_ARGS(inode, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( unsigned int, len )
- __field( unsigned int, needed )
),
@@ -762,13 +787,11 @@ TRACE_EVENT(ext4_discard_preallocations,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->len = len;
- __entry->needed = needed;
),
- TP_printk("dev %d,%d ino %lu len: %u needed %u",
+ TP_printk("dev %d,%d ino %lu len: %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->len,
- __entry->needed)
+ (unsigned long) __entry->ino, __entry->len)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -1051,7 +1074,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
),
TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
- "result %u/%d/%u@%u blks %u grps %u cr %u flags %s "
+ "result %u/%d/%u@%u blks %u grps %u cr %s flags %s "
"tail %u broken %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
@@ -1061,7 +1084,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
__entry->goal_len, __entry->goal_logical,
__entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical,
- __entry->found, __entry->groups, __entry->cr,
+ __entry->found, __entry->groups, show_criteria(__entry->cr),
show_mballoc_flags(__entry->flags), __entry->tail,
__entry->buddy ? 1 << __entry->buddy : 0)
);
@@ -1347,64 +1370,6 @@ TRACE_EVENT(ext4_read_block_bitmap_load,
__entry->group, __entry->prefetch)
);
-TRACE_EVENT(ext4_direct_IO_enter,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
-
- TP_ARGS(inode, offset, len, rw),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( loff_t, pos )
- __field( unsigned long, len )
- __field( int, rw )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->pos = offset;
- __entry->len = len;
- __entry->rw = rw;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->pos, __entry->len, __entry->rw)
-);
-
-TRACE_EVENT(ext4_direct_IO_exit,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
- int rw, int ret),
-
- TP_ARGS(inode, offset, len, rw, ret),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( loff_t, pos )
- __field( unsigned long, len )
- __field( int, rw )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->pos = offset;
- __entry->len = len;
- __entry->rw = rw;
- __entry->ret = ret;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->pos, __entry->len,
- __entry->rw, __entry->ret)
-);
-
DECLARE_EVENT_CLASS(ext4__fallocate_mode,
TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
@@ -1766,9 +1731,9 @@ TRACE_EVENT(ext4_ext_load_extent,
);
TRACE_EVENT(ext4_load_inode,
- TP_PROTO(struct inode *inode),
+ TP_PROTO(struct super_block *sb, unsigned long ino),
- TP_ARGS(inode),
+ TP_ARGS(sb, ino),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -1776,8 +1741,8 @@ TRACE_EVENT(ext4_load_inode,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
+ __entry->dev = sb->s_dev;
+ __entry->ino = ino;
),
TP_printk("dev %d,%d ino %ld",
@@ -1785,18 +1750,19 @@ TRACE_EVENT(ext4_load_inode,
(unsigned long) __entry->ino)
);
-TRACE_EVENT(ext4_journal_start,
+TRACE_EVENT(ext4_journal_start_sb,
TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks,
- int revoke_creds, unsigned long IP),
+ int revoke_creds, int type, unsigned long IP),
- TP_ARGS(sb, blocks, rsv_blocks, revoke_creds, IP),
+ TP_ARGS(sb, blocks, rsv_blocks, revoke_creds, type, IP),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field(unsigned long, ip )
- __field( int, blocks )
- __field( int, rsv_blocks )
- __field( int, revoke_creds )
+ __field( dev_t, dev )
+ __field( unsigned long, ip )
+ __field( int, blocks )
+ __field( int, rsv_blocks )
+ __field( int, revoke_creds )
+ __field( int, type )
),
TP_fast_assign(
@@ -1805,11 +1771,45 @@ TRACE_EVENT(ext4_journal_start,
__entry->blocks = blocks;
__entry->rsv_blocks = rsv_blocks;
__entry->revoke_creds = revoke_creds;
+ __entry->type = type;
+ ),
+
+ TP_printk("dev %d,%d blocks %d, rsv_blocks %d, revoke_creds %d,"
+ " type %d, caller %pS", MAJOR(__entry->dev),
+ MINOR(__entry->dev), __entry->blocks, __entry->rsv_blocks,
+ __entry->revoke_creds, __entry->type, (void *)__entry->ip)
+);
+
+TRACE_EVENT(ext4_journal_start_inode,
+ TP_PROTO(struct inode *inode, int blocks, int rsv_blocks,
+ int revoke_creds, int type, unsigned long IP),
+
+ TP_ARGS(inode, blocks, rsv_blocks, revoke_creds, type, IP),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ino )
+ __field( dev_t, dev )
+ __field( unsigned long, ip )
+ __field( int, blocks )
+ __field( int, rsv_blocks )
+ __field( int, revoke_creds )
+ __field( int, type )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ip = IP;
+ __entry->blocks = blocks;
+ __entry->rsv_blocks = rsv_blocks;
+ __entry->revoke_creds = revoke_creds;
+ __entry->type = type;
+ __entry->ino = inode->i_ino;
),
- TP_printk("dev %d,%d blocks %d, rsv_blocks %d, revoke_creds %d, "
- "caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->blocks, __entry->rsv_blocks, __entry->revoke_creds,
+ TP_printk("dev %d,%d blocks %d, rsv_blocks %d, revoke_creds %d,"
+ " type %d, ino %lu, caller %pS", MAJOR(__entry->dev),
+ MINOR(__entry->dev), __entry->blocks, __entry->rsv_blocks,
+ __entry->revoke_creds, __entry->type, __entry->ino,
(void *)__entry->ip)
);
@@ -1951,124 +1951,6 @@ TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
__entry->len, show_mflags(__entry->flags), __entry->ret)
);
-TRACE_EVENT(ext4_ext_put_in_cache,
- TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
- ext4_fsblk_t start),
-
- TP_ARGS(inode, lblk, len, start),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ext4_lblk_t, lblk )
- __field( unsigned int, len )
- __field( ext4_fsblk_t, start )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->lblk = lblk;
- __entry->len = len;
- __entry->start = start;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned) __entry->lblk,
- __entry->len,
- (unsigned long long) __entry->start)
-);
-
-TRACE_EVENT(ext4_ext_in_cache,
- TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
-
- TP_ARGS(inode, lblk, ret),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ext4_lblk_t, lblk )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->lblk = lblk;
- __entry->ret = ret;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %u ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned) __entry->lblk,
- __entry->ret)
-
-);
-
-TRACE_EVENT(ext4_find_delalloc_range,
- TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
- int reverse, int found, ext4_lblk_t found_blk),
-
- TP_ARGS(inode, from, to, reverse, found, found_blk),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ext4_lblk_t, from )
- __field( ext4_lblk_t, to )
- __field( int, reverse )
- __field( int, found )
- __field( ext4_lblk_t, found_blk )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->from = from;
- __entry->to = to;
- __entry->reverse = reverse;
- __entry->found = found;
- __entry->found_blk = found_blk;
- ),
-
- TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
- "(blk = %u)",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned) __entry->from, (unsigned) __entry->to,
- __entry->reverse, __entry->found,
- (unsigned) __entry->found_blk)
-);
-
-TRACE_EVENT(ext4_get_reserved_cluster_alloc,
- TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
-
- TP_ARGS(inode, lblk, len),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ext4_lblk_t, lblk )
- __field( unsigned int, len )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->lblk = lblk;
- __entry->len = len;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %u len %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned) __entry->lblk,
- __entry->len)
-);
-
TRACE_EVENT(ext4_ext_show_extent,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
unsigned short len),
@@ -2791,6 +2673,310 @@ TRACE_EVENT(ext4_lazy_itable_init,
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group)
);
+TRACE_EVENT(ext4_fc_replay_scan,
+ TP_PROTO(struct super_block *sb, int error, int off),
+
+ TP_ARGS(sb, error, off),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, error)
+ __field(int, off)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->error = error;
+ __entry->off = off;
+ ),
+
+ TP_printk("dev %d,%d error %d, off %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->error, __entry->off)
+);
+
+TRACE_EVENT(ext4_fc_replay,
+ TP_PROTO(struct super_block *sb, int tag, int ino, int priv1, int priv2),
+
+ TP_ARGS(sb, tag, ino, priv1, priv2),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, tag)
+ __field(int, ino)
+ __field(int, priv1)
+ __field(int, priv2)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->tag = tag;
+ __entry->ino = ino;
+ __entry->priv1 = priv1;
+ __entry->priv2 = priv2;
+ ),
+
+ TP_printk("dev %d,%d: tag %d, ino %d, data1 %d, data2 %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tag, __entry->ino, __entry->priv1, __entry->priv2)
+);
+
+TRACE_EVENT(ext4_fc_commit_start,
+ TP_PROTO(struct super_block *sb, tid_t commit_tid),
+
+ TP_ARGS(sb, commit_tid),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, tid)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->tid = commit_tid;
+ ),
+
+ TP_printk("dev %d,%d tid %u", MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tid)
+);
+
+TRACE_EVENT(ext4_fc_commit_stop,
+ TP_PROTO(struct super_block *sb, int nblks, int reason,
+ tid_t commit_tid),
+
+ TP_ARGS(sb, nblks, reason, commit_tid),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, nblks)
+ __field(int, reason)
+ __field(int, num_fc)
+ __field(int, num_fc_ineligible)
+ __field(int, nblks_agg)
+ __field(tid_t, tid)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->nblks = nblks;
+ __entry->reason = reason;
+ __entry->num_fc = EXT4_SB(sb)->s_fc_stats.fc_num_commits;
+ __entry->num_fc_ineligible =
+ EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits;
+ __entry->nblks_agg = EXT4_SB(sb)->s_fc_stats.fc_numblks;
+ __entry->tid = commit_tid;
+ ),
+
+ TP_printk("dev %d,%d nblks %d, reason %d, fc = %d, ineligible = %d, agg_nblks %d, tid %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->nblks, __entry->reason, __entry->num_fc,
+ __entry->num_fc_ineligible, __entry->nblks_agg, __entry->tid)
+);
+
+#define FC_REASON_NAME_STAT(reason) \
+ show_fc_reason(reason), \
+ __entry->fc_ineligible_rc[reason]
+
+TRACE_EVENT(ext4_fc_stats,
+ TP_PROTO(struct super_block *sb),
+
+ TP_ARGS(sb),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __array(unsigned int, fc_ineligible_rc, EXT4_FC_REASON_MAX)
+ __field(unsigned long, fc_commits)
+ __field(unsigned long, fc_ineligible_commits)
+ __field(unsigned long, fc_numblks)
+ ),
+
+ TP_fast_assign(
+ int i;
+
+ __entry->dev = sb->s_dev;
+ for (i = 0; i < EXT4_FC_REASON_MAX; i++) {
+ __entry->fc_ineligible_rc[i] =
+ EXT4_SB(sb)->s_fc_stats.fc_ineligible_reason_count[i];
+ }
+ __entry->fc_commits = EXT4_SB(sb)->s_fc_stats.fc_num_commits;
+ __entry->fc_ineligible_commits =
+ EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits;
+ __entry->fc_numblks = EXT4_SB(sb)->s_fc_stats.fc_numblks;
+ ),
+
+ TP_printk("dev %d,%d fc ineligible reasons:\n"
+ "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u"
+ "num_commits:%lu, ineligible: %lu, numblks: %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_ENCRYPTED_FILENAME),
+ __entry->fc_commits, __entry->fc_ineligible_commits,
+ __entry->fc_numblks)
+);
+
+DECLARE_EVENT_CLASS(ext4_fc_track_dentry,
+
+ TP_PROTO(handle_t *handle, struct inode *inode,
+ struct dentry *dentry, int ret),
+
+ TP_ARGS(handle, inode, dentry, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, t_tid)
+ __field(ino_t, i_ino)
+ __field(tid_t, i_sync_tid)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->t_tid = handle->h_transaction->t_tid;
+ __entry->i_ino = inode->i_ino;
+ __entry->i_sync_tid = ei->i_sync_tid;
+ __entry->error = ret;
+ ),
+
+ TP_printk("dev %d,%d, t_tid %u, ino %lu, i_sync_tid %u, error %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
+ __entry->error
+ )
+);
+
+#define DEFINE_EVENT_CLASS_DENTRY(__type) \
+DEFINE_EVENT(ext4_fc_track_dentry, ext4_fc_track_##__type, \
+ TP_PROTO(handle_t *handle, struct inode *inode, \
+ struct dentry *dentry, int ret), \
+ TP_ARGS(handle, inode, dentry, ret) \
+)
+
+DEFINE_EVENT_CLASS_DENTRY(create);
+DEFINE_EVENT_CLASS_DENTRY(link);
+DEFINE_EVENT_CLASS_DENTRY(unlink);
+
+TRACE_EVENT(ext4_fc_track_inode,
+ TP_PROTO(handle_t *handle, struct inode *inode, int ret),
+
+ TP_ARGS(handle, inode, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, t_tid)
+ __field(ino_t, i_ino)
+ __field(tid_t, i_sync_tid)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->t_tid = handle->h_transaction->t_tid;
+ __entry->i_ino = inode->i_ino;
+ __entry->i_sync_tid = ei->i_sync_tid;
+ __entry->error = ret;
+ ),
+
+ TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
+ __entry->error)
+ );
+
+TRACE_EVENT(ext4_fc_track_range,
+ TP_PROTO(handle_t *handle, struct inode *inode,
+ long start, long end, int ret),
+
+ TP_ARGS(handle, inode, start, end, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, t_tid)
+ __field(ino_t, i_ino)
+ __field(tid_t, i_sync_tid)
+ __field(long, start)
+ __field(long, end)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->t_tid = handle->h_transaction->t_tid;
+ __entry->i_ino = inode->i_ino;
+ __entry->i_sync_tid = ei->i_sync_tid;
+ __entry->start = start;
+ __entry->end = end;
+ __entry->error = ret;
+ ),
+
+ TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d, start %ld, end %ld",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
+ __entry->error, __entry->start, __entry->end)
+ );
+
+TRACE_EVENT(ext4_fc_cleanup,
+ TP_PROTO(journal_t *journal, int full, tid_t tid),
+
+ TP_ARGS(journal, full, tid),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, j_fc_off)
+ __field(int, full)
+ __field(tid_t, tid)
+ ),
+
+ TP_fast_assign(
+ struct super_block *sb = journal->j_private;
+
+ __entry->dev = sb->s_dev;
+ __entry->j_fc_off = journal->j_fc_off;
+ __entry->full = full;
+ __entry->tid = tid;
+ ),
+
+ TP_printk("dev %d,%d, j_fc_off %d, full %d, tid %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->j_fc_off, __entry->full, __entry->tid)
+ );
+
+TRACE_EVENT(ext4_update_sb,
+ TP_PROTO(struct super_block *sb, ext4_fsblk_t fsblk,
+ unsigned int flags),
+
+ TP_ARGS(sb, fsblk, flags),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ext4_fsblk_t, fsblk)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->fsblk = fsblk;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev %d,%d fsblk %llu flags %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->fsblk, __entry->flags)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 8a1c1311acac..7ed0fc430dc6 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -6,6 +6,7 @@
#define _TRACE_F2FS_H
#include <linux/tracepoint.h>
+#include <uapi/linux/f2fs.h>
#define show_dev(dev) MAJOR(dev), MINOR(dev)
#define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino
@@ -14,10 +15,6 @@ TRACE_DEFINE_ENUM(NODE);
TRACE_DEFINE_ENUM(DATA);
TRACE_DEFINE_ENUM(META);
TRACE_DEFINE_ENUM(META_FLUSH);
-TRACE_DEFINE_ENUM(INMEM);
-TRACE_DEFINE_ENUM(INMEM_DROP);
-TRACE_DEFINE_ENUM(INMEM_INVALIDATE);
-TRACE_DEFINE_ENUM(INMEM_REVOKE);
TRACE_DEFINE_ENUM(IPU);
TRACE_DEFINE_ENUM(OPU);
TRACE_DEFINE_ENUM(HOT);
@@ -51,6 +48,8 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
TRACE_DEFINE_ENUM(CP_TRIMMED);
TRACE_DEFINE_ENUM(CP_PAUSE);
TRACE_DEFINE_ENUM(CP_RESIZE);
+TRACE_DEFINE_ENUM(EX_READ);
+TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -58,10 +57,6 @@ TRACE_DEFINE_ENUM(CP_RESIZE);
{ DATA, "DATA" }, \
{ META, "META" }, \
{ META_FLUSH, "META_FLUSH" }, \
- { INMEM, "INMEM" }, \
- { INMEM_DROP, "INMEM_DROP" }, \
- { INMEM_INVALIDATE, "INMEM_INVALIDATE" }, \
- { INMEM_REVOKE, "INMEM_REVOKE" }, \
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
@@ -73,7 +68,7 @@ TRACE_DEFINE_ENUM(CP_RESIZE);
#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_META | REQ_PRIO | \
REQ_PREFLUSH | REQ_FUA)
-#define F2FS_BIO_FLAG_MASK(t) (t & F2FS_OP_FLAGS)
+#define F2FS_BIO_FLAG_MASK(t) (__force u32)((t) & F2FS_OP_FLAGS)
#define show_bio_type(op,op_flags) show_bio_op(op), \
show_bio_op_flags(op_flags)
@@ -82,12 +77,12 @@ TRACE_DEFINE_ENUM(CP_RESIZE);
#define show_bio_op_flags(flags) \
__print_flags(F2FS_BIO_FLAG_MASK(flags), "|", \
- { REQ_RAHEAD, "R" }, \
- { REQ_SYNC, "S" }, \
- { REQ_META, "M" }, \
- { REQ_PRIO, "P" }, \
- { REQ_PREFLUSH, "PF" }, \
- { REQ_FUA, "FUA" })
+ { (__force u32)REQ_RAHEAD, "R" }, \
+ { (__force u32)REQ_SYNC, "S" }, \
+ { (__force u32)REQ_META, "M" }, \
+ { (__force u32)REQ_PRIO, "P" }, \
+ { (__force u32)REQ_PREFLUSH, "PF" }, \
+ { (__force u32)REQ_FUA, "FUA" })
#define show_data_type(type) \
__print_symbolic(type, \
@@ -111,13 +106,15 @@ TRACE_DEFINE_ENUM(CP_RESIZE);
#define show_alloc_mode(type) \
__print_symbolic(type, \
- { LFS, "LFS-mode" }, \
- { SSR, "SSR-mode" })
+ { LFS, "LFS-mode" }, \
+ { SSR, "SSR-mode" }, \
+ { AT_SSR, "AT_SSR-mode" })
#define show_victim_policy(type) \
__print_symbolic(type, \
{ GC_GREEDY, "Greedy" }, \
- { GC_CB, "Cost-Benefit" })
+ { GC_CB, "Cost-Benefit" }, \
+ { GC_AT, "Age-threshold" })
#define show_cpreason(type) \
__print_flags(type, "|", \
@@ -134,7 +131,7 @@ TRACE_DEFINE_ENUM(CP_RESIZE);
__print_symbolic(type, \
{ CP_NO_NEEDED, "no needed" }, \
{ CP_NON_REGULAR, "non regular" }, \
- { CP_COMPRESSED, "compreesed" }, \
+ { CP_COMPRESSED, "compressed" }, \
{ CP_HARDLINK, "hardlink" }, \
{ CP_SB_NEED_CP, "sb needs cp" }, \
{ CP_WRONG_PINO, "wrong pino" }, \
@@ -159,6 +156,24 @@ TRACE_DEFINE_ENUM(CP_RESIZE);
{ COMPRESS_ZSTD, "ZSTD" }, \
{ COMPRESS_LZORLE, "LZO-RLE" })
+#define show_extent_type(type) \
+ __print_symbolic(type, \
+ { EX_READ, "Read" }, \
+ { EX_BLOCK_AGE, "Block Age" })
+
+#define show_inode_type(x) \
+ __print_symbolic(x, \
+ { S_IFLNK, "symbolic" }, \
+ { S_IFREG, "regular" }, \
+ { S_IFDIR, "directory" }, \
+ { S_IFCHR, "character" }, \
+ { S_IFBLK, "block" }, \
+ { S_IFIFO, "fifo" }, \
+ { S_IFSOCK, "sock" })
+
+#define S_ALL_PERM (S_ISUID | S_ISGID | S_ISVTX | \
+ S_IRWXU | S_IRWXG | S_IRWXO)
+
struct f2fs_sb_info;
struct f2fs_io_info;
struct extent_info;
@@ -213,17 +228,21 @@ DECLARE_EVENT_CLASS(f2fs__inode_exit,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
+ __field(umode_t, mode)
__field(int, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, ret = %d",
+ TP_printk("dev = (%d,%d), ino = %lu, type: %s, mode = 0%o, ret = %d",
show_dev_ino(__entry),
+ show_inode_type(__entry->mode & S_IFMT),
+ __entry->mode & S_ALL_PERM,
__entry->ret)
);
@@ -327,7 +346,7 @@ TRACE_EVENT(f2fs_unlink_enter,
__field(ino_t, ino)
__field(loff_t, size)
__field(blkcnt_t, blocks)
- __field(const char *, name)
+ __string(name, dentry->d_name.name)
),
TP_fast_assign(
@@ -335,7 +354,7 @@ TRACE_EVENT(f2fs_unlink_enter,
__entry->ino = dir->i_ino;
__entry->size = dir->i_size;
__entry->blocks = dir->i_blocks;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
),
TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, "
@@ -343,7 +362,7 @@ TRACE_EVENT(f2fs_unlink_enter,
show_dev_ino(__entry),
__entry->size,
(unsigned long long)__entry->blocks,
- __entry->name)
+ __get_str(name))
);
DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit,
@@ -510,7 +529,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(nid_t, nid[3])
+ __array(nid_t, nid, 3)
__field(int, depth)
__field(int, err)
),
@@ -537,17 +556,17 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
TRACE_EVENT(f2fs_file_write_iter,
- TP_PROTO(struct inode *inode, unsigned long offset,
- unsigned long length, int ret),
+ TP_PROTO(struct inode *inode, loff_t offset, size_t length,
+ ssize_t ret),
TP_ARGS(inode, offset, length, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(unsigned long, offset)
- __field(unsigned long, length)
- __field(int, ret)
+ __field(loff_t, offset)
+ __field(size_t, length)
+ __field(ssize_t, ret)
),
TP_fast_assign(
@@ -559,7 +578,7 @@ TRACE_EVENT(f2fs_file_write_iter,
),
TP_printk("dev = (%d,%d), ino = %lu, "
- "offset = %lu, length = %lu, written(err) = %d",
+ "offset = %lld, length = %zu, written(err) = %zd",
show_dev_ino(__entry),
__entry->offset,
__entry->length,
@@ -567,9 +586,10 @@ TRACE_EVENT(f2fs_file_write_iter,
);
TRACE_EVENT(f2fs_map_blocks,
- TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
+ TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int flag,
+ int ret),
- TP_ARGS(inode, map, ret),
+ TP_ARGS(inode, map, flag, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -580,11 +600,13 @@ TRACE_EVENT(f2fs_map_blocks,
__field(unsigned int, m_flags)
__field(int, m_seg_type)
__field(bool, m_may_create)
+ __field(bool, m_multidev_dio)
+ __field(int, flag)
__field(int, ret)
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev = map->m_bdev->bd_dev;
__entry->ino = inode->i_ino;
__entry->m_lblk = map->m_lblk;
__entry->m_pblk = map->m_pblk;
@@ -592,12 +614,15 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_flags = map->m_flags;
__entry->m_seg_type = map->m_seg_type;
__entry->m_may_create = map->m_may_create;
+ __entry->m_multidev_dio = map->m_multidev_dio;
+ __entry->flag = flag;
__entry->ret = ret;
),
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
- "start blkaddr = 0x%llx, len = 0x%llx, flags = %u,"
- "seg_type = %d, may_create = %d, err = %d",
+ "start blkaddr = 0x%llx, len = 0x%llx, flags = %u, "
+ "seg_type = %d, may_create = %d, multidevice = %d, "
+ "flag = %d, err = %d",
show_dev_ino(__entry),
(unsigned long long)__entry->m_lblk,
(unsigned long long)__entry->m_pblk,
@@ -605,6 +630,8 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_flags,
__entry->m_seg_type,
__entry->m_may_create,
+ __entry->m_multidev_dio,
+ __entry->flag,
__entry->ret)
);
@@ -638,19 +665,22 @@ TRACE_EVENT(f2fs_background_gc,
TRACE_EVENT(f2fs_gc_begin,
- TP_PROTO(struct super_block *sb, bool sync, bool background,
+ TP_PROTO(struct super_block *sb, int gc_type, bool no_bg_gc,
+ unsigned int nr_free_secs,
long long dirty_nodes, long long dirty_dents,
long long dirty_imeta, unsigned int free_sec,
unsigned int free_seg, int reserved_seg,
unsigned int prefree_seg),
- TP_ARGS(sb, sync, background, dirty_nodes, dirty_dents, dirty_imeta,
+ TP_ARGS(sb, gc_type, no_bg_gc, nr_free_secs, dirty_nodes,
+ dirty_dents, dirty_imeta,
free_sec, free_seg, reserved_seg, prefree_seg),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(bool, sync)
- __field(bool, background)
+ __field(int, gc_type)
+ __field(bool, no_bg_gc)
+ __field(unsigned int, nr_free_secs)
__field(long long, dirty_nodes)
__field(long long, dirty_dents)
__field(long long, dirty_imeta)
@@ -662,8 +692,9 @@ TRACE_EVENT(f2fs_gc_begin,
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->sync = sync;
- __entry->background = background;
+ __entry->gc_type = gc_type;
+ __entry->no_bg_gc = no_bg_gc;
+ __entry->nr_free_secs = nr_free_secs;
__entry->dirty_nodes = dirty_nodes;
__entry->dirty_dents = dirty_dents;
__entry->dirty_imeta = dirty_imeta;
@@ -673,12 +704,13 @@ TRACE_EVENT(f2fs_gc_begin,
__entry->prefree_seg = prefree_seg;
),
- TP_printk("dev = (%d,%d), sync = %d, background = %d, nodes = %lld, "
- "dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
+ TP_printk("dev = (%d,%d), gc_type = %s, no_background_GC = %d, nr_free_secs = %u, "
+ "nodes = %lld, dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
"rsv_seg:%d, prefree_seg:%u",
show_dev(__entry->dev),
- __entry->sync,
- __entry->background,
+ show_gc_type(__entry->gc_type),
+ (__entry->gc_type == BG_GC) ? __entry->no_bg_gc : -1,
+ __entry->nr_free_secs,
__entry->dirty_nodes,
__entry->dirty_dents,
__entry->dirty_imeta,
@@ -804,20 +836,20 @@ TRACE_EVENT(f2fs_lookup_start,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(const char *, name)
+ __string(name, dentry->d_name.name)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->flags = flags;
),
TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u",
show_dev_ino(__entry),
- __entry->name,
+ __get_str(name),
__entry->flags)
);
@@ -831,7 +863,7 @@ TRACE_EVENT(f2fs_lookup_end,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(const char *, name)
+ __string(name, dentry->d_name.name)
__field(nid_t, cino)
__field(int, err)
),
@@ -839,18 +871,87 @@ TRACE_EVENT(f2fs_lookup_end,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->cino = ino;
__entry->err = err;
),
TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d",
show_dev_ino(__entry),
- __entry->name,
+ __get_str(name),
__entry->cino,
__entry->err)
);
+TRACE_EVENT(f2fs_rename_start,
+
+ TP_PROTO(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags),
+
+ TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, flags),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __string(old_name, old_dentry->d_name.name)
+ __field(ino_t, new_pino)
+ __string(new_name, new_dentry->d_name.name)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = old_dir->i_sb->s_dev;
+ __entry->ino = old_dir->i_ino;
+ __assign_str(old_name, old_dentry->d_name.name);
+ __entry->new_pino = new_dir->i_ino;
+ __assign_str(new_name, new_dentry->d_name.name);
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev = (%d,%d), old_dir = %lu, old_name: %s, "
+ "new_dir = %lu, new_name: %s, flags = %u",
+ show_dev_ino(__entry),
+ __get_str(old_name),
+ __entry->new_pino,
+ __get_str(new_name),
+ __entry->flags)
+);
+
+TRACE_EVENT(f2fs_rename_end,
+
+ TP_PROTO(struct dentry *old_dentry, struct dentry *new_dentry,
+ unsigned int flags, int ret),
+
+ TP_ARGS(old_dentry, new_dentry, flags, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __string(old_name, old_dentry->d_name.name)
+ __string(new_name, new_dentry->d_name.name)
+ __field(unsigned int, flags)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = old_dentry->d_sb->s_dev;
+ __entry->ino = old_dentry->d_inode->i_ino;
+ __assign_str(old_name, old_dentry->d_name.name);
+ __assign_str(new_name, new_dentry->d_name.name);
+ __entry->flags = flags;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, old_name: %s, "
+ "new_name: %s, flags = %u, ret = %d",
+ show_dev_ino(__entry),
+ __get_str(old_name),
+ __get_str(new_name),
+ __entry->flags,
+ __entry->ret)
+);
+
TRACE_EVENT(f2fs_readdir,
TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err),
@@ -922,30 +1023,36 @@ TRACE_EVENT(f2fs_fallocate,
TRACE_EVENT(f2fs_direct_IO_enter,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
+ TP_PROTO(struct inode *inode, struct kiocb *iocb, long len, int rw),
- TP_ARGS(inode, offset, len, rw),
+ TP_ARGS(inode, iocb, len, rw),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(loff_t, pos)
+ __field(loff_t, ki_pos)
+ __field(int, ki_flags)
+ __field(u16, ki_ioprio)
__field(unsigned long, len)
__field(int, rw)
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->pos = offset;
- __entry->len = len;
- __entry->rw = rw;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->ki_pos = iocb->ki_pos;
+ __entry->ki_flags = iocb->ki_flags;
+ __entry->ki_ioprio = iocb->ki_ioprio;
+ __entry->len = len;
+ __entry->rw = rw;
),
- TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu rw = %d",
+ TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d",
show_dev_ino(__entry),
- __entry->pos,
+ __entry->ki_pos,
__entry->len,
+ __entry->ki_flags,
+ __entry->ki_ioprio,
__entry->rw)
);
@@ -1023,8 +1130,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__field(pgoff_t, index)
__field(block_t, old_blkaddr)
__field(block_t, new_blkaddr)
- __field(int, op)
- __field(int, op_flags)
+ __field(enum req_op, op)
+ __field(blk_opf_t, op_flags)
__field(int, temp)
__field(int, type)
),
@@ -1079,8 +1186,8 @@ DECLARE_EVENT_CLASS(f2fs__bio,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(dev_t, target)
- __field(int, op)
- __field(int, op_flags)
+ __field(enum req_op, op)
+ __field(blk_opf_t, op_flags)
__field(int, type)
__field(sector_t, sector)
__field(unsigned int, size)
@@ -1143,17 +1250,15 @@ DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio,
TRACE_EVENT(f2fs_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags),
+ TP_ARGS(inode, pos, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(loff_t, pos)
__field(unsigned int, len)
- __field(unsigned int, flags)
),
TP_fast_assign(
@@ -1161,14 +1266,12 @@ TRACE_EVENT(f2fs_write_begin,
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
- __entry->flags = flags;
),
- TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, flags = %u",
+ TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u",
show_dev_ino(__entry),
(unsigned long long)__entry->pos,
- __entry->len,
- __entry->flags)
+ __entry->len)
);
TRACE_EVENT(f2fs_write_end,
@@ -1266,51 +1369,87 @@ DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty,
TP_ARGS(page, type)
);
-DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
-
- TP_PROTO(struct page *page, int type),
-
- TP_ARGS(page, type)
-);
-
-DEFINE_EVENT(f2fs__page, f2fs_register_inmem_page,
+TRACE_EVENT(f2fs_replace_atomic_write_block,
- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index,
+ block_t old_addr, block_t new_addr, bool recovery),
- TP_ARGS(page, type)
-);
+ TP_ARGS(inode, cow_inode, index, old_addr, new_addr, recovery),
-DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(ino_t, cow_ino)
+ __field(pgoff_t, index)
+ __field(block_t, old_addr)
+ __field(block_t, new_addr)
+ __field(bool, recovery)
+ ),
- TP_PROTO(struct page *page, int type),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->cow_ino = cow_inode->i_ino;
+ __entry->index = index;
+ __entry->old_addr = old_addr;
+ __entry->new_addr = new_addr;
+ __entry->recovery = recovery;
+ ),
- TP_ARGS(page, type)
+ TP_printk("dev = (%d,%d), ino = %lu, cow_ino = %lu, index = %lu, "
+ "old_addr = 0x%llx, new_addr = 0x%llx, recovery = %d",
+ show_dev_ino(__entry),
+ __entry->cow_ino,
+ (unsigned long)__entry->index,
+ (unsigned long long)__entry->old_addr,
+ (unsigned long long)__entry->new_addr,
+ __entry->recovery)
);
-TRACE_EVENT(f2fs_filemap_fault,
+DECLARE_EVENT_CLASS(f2fs_mmap,
- TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
- TP_ARGS(inode, index, ret),
+ TP_ARGS(inode, index, flags, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
- __field(unsigned long, ret)
+ __field(vm_flags_t, flags)
+ __field(vm_fault_t, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->index = index;
+ __entry->flags = flags;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx",
+ TP_printk("dev = (%d,%d), ino = %lu, index = %lu, flags: %s, ret: %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
- __entry->ret)
+ __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
+ __print_flags(__entry->ret, "|", VM_FAULT_RESULT_TRACE))
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_filemap_fault,
+
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
+
+ TP_ARGS(inode, index, flags, ret)
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_vm_page_mkwrite,
+
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
+
+ TP_ARGS(inode, index, flags, ret)
);
TRACE_EVENT(f2fs_writepages,
@@ -1405,26 +1544,26 @@ TRACE_EVENT(f2fs_readpages,
TRACE_EVENT(f2fs_write_checkpoint,
- TP_PROTO(struct super_block *sb, int reason, char *msg),
+ TP_PROTO(struct super_block *sb, int reason, const char *msg),
TP_ARGS(sb, reason, msg),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(int, reason)
- __field(char *, msg)
+ __string(dest_msg, msg)
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->reason = reason;
- __entry->msg = msg;
+ __assign_str(dest_msg, msg);
),
TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
show_dev(__entry->dev),
show_cpreason(__entry->reason),
- __entry->msg)
+ __get_str(dest_msg))
);
DECLARE_EVENT_CLASS(f2fs_discard,
@@ -1472,7 +1611,7 @@ DEFINE_EVENT(f2fs_discard, f2fs_remove_discard,
TP_ARGS(dev, blkstart, blklen)
);
-TRACE_EVENT(f2fs_issue_reset_zone,
+DECLARE_EVENT_CLASS(f2fs_reset_zone,
TP_PROTO(struct block_device *dev, block_t blkstart),
@@ -1488,11 +1627,25 @@ TRACE_EVENT(f2fs_issue_reset_zone,
__entry->blkstart = blkstart;
),
- TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+ TP_printk("dev = (%d,%d), zone at block = 0x%llx",
show_dev(__entry->dev),
(unsigned long long)__entry->blkstart)
);
+DEFINE_EVENT(f2fs_reset_zone, f2fs_queue_reset_zone,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart),
+
+ TP_ARGS(dev, blkstart)
+);
+
+DEFINE_EVENT(f2fs_reset_zone, f2fs_issue_reset_zone,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart),
+
+ TP_ARGS(dev, blkstart)
+);
+
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct block_device *dev, unsigned int nobarrier,
@@ -1523,28 +1676,31 @@ TRACE_EVENT(f2fs_issue_flush,
TRACE_EVENT(f2fs_lookup_extent_tree_start,
- TP_PROTO(struct inode *inode, unsigned int pgofs),
+ TP_PROTO(struct inode *inode, unsigned int pgofs, enum extent_type type),
- TP_ARGS(inode, pgofs),
+ TP_ARGS(inode, pgofs, type),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(unsigned int, pgofs)
+ __field(enum extent_type, type)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->pgofs = pgofs;
+ __entry->type = type;
),
- TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u",
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, type = %s",
show_dev_ino(__entry),
- __entry->pgofs)
+ __entry->pgofs,
+ show_extent_type(__entry->type))
);
-TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
+TRACE_EVENT_CONDITION(f2fs_lookup_read_extent_tree_end,
TP_PROTO(struct inode *inode, unsigned int pgofs,
struct extent_info *ei),
@@ -1558,8 +1714,8 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
__field(ino_t, ino)
__field(unsigned int, pgofs)
__field(unsigned int, fofs)
- __field(u32, blk)
__field(unsigned int, len)
+ __field(u32, blk)
),
TP_fast_assign(
@@ -1567,25 +1723,65 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
__entry->ino = inode->i_ino;
__entry->pgofs = pgofs;
__entry->fofs = ei->fofs;
+ __entry->len = ei->len;
__entry->blk = ei->blk;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ "read_ext_info(fofs: %u, len: %u, blk: %u)",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+ __entry->fofs,
+ __entry->len,
+ __entry->blk)
+);
+
+TRACE_EVENT_CONDITION(f2fs_lookup_age_extent_tree_end,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs,
+ struct extent_info *ei),
+
+ TP_ARGS(inode, pgofs, ei),
+
+ TP_CONDITION(ei),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ __field(unsigned int, fofs)
+ __field(unsigned int, len)
+ __field(unsigned long long, age)
+ __field(unsigned long long, blocks)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ __entry->fofs = ei->fofs;
__entry->len = ei->len;
+ __entry->age = ei->age;
+ __entry->blocks = ei->last_blocks;
),
TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
- "ext_info(fofs: %u, blk: %u, len: %u)",
+ "age_ext_info(fofs: %u, len: %u, age: %llu, blocks: %llu)",
show_dev_ino(__entry),
__entry->pgofs,
__entry->fofs,
- __entry->blk,
- __entry->len)
+ __entry->len,
+ __entry->age,
+ __entry->blocks)
);
-TRACE_EVENT(f2fs_update_extent_tree_range,
+TRACE_EVENT(f2fs_update_read_extent_tree_range,
- TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr,
- unsigned int len),
+ TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len,
+ block_t blkaddr,
+ unsigned int c_len),
- TP_ARGS(inode, pgofs, blkaddr, len),
+ TP_ARGS(inode, pgofs, len, blkaddr, c_len),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1593,70 +1789,115 @@ TRACE_EVENT(f2fs_update_extent_tree_range,
__field(unsigned int, pgofs)
__field(u32, blk)
__field(unsigned int, len)
+ __field(unsigned int, c_len)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->pgofs = pgofs;
- __entry->blk = blkaddr;
__entry->len = len;
+ __entry->blk = blkaddr;
+ __entry->c_len = c_len;
),
TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
- "blkaddr = %u, len = %u",
+ "len = %u, blkaddr = %u, c_len = %u",
show_dev_ino(__entry),
__entry->pgofs,
+ __entry->len,
__entry->blk,
- __entry->len)
+ __entry->c_len)
+);
+
+TRACE_EVENT(f2fs_update_age_extent_tree_range,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len,
+ unsigned long long age,
+ unsigned long long last_blks),
+
+ TP_ARGS(inode, pgofs, len, age, last_blks),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ __field(unsigned int, len)
+ __field(unsigned long long, age)
+ __field(unsigned long long, blocks)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ __entry->len = len;
+ __entry->age = age;
+ __entry->blocks = last_blks;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ "len = %u, age = %llu, blocks = %llu",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+ __entry->len,
+ __entry->age,
+ __entry->blocks)
);
TRACE_EVENT(f2fs_shrink_extent_tree,
TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt,
- unsigned int tree_cnt),
+ unsigned int tree_cnt, enum extent_type type),
- TP_ARGS(sbi, node_cnt, tree_cnt),
+ TP_ARGS(sbi, node_cnt, tree_cnt, type),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned int, node_cnt)
__field(unsigned int, tree_cnt)
+ __field(enum extent_type, type)
),
TP_fast_assign(
__entry->dev = sbi->sb->s_dev;
__entry->node_cnt = node_cnt;
__entry->tree_cnt = tree_cnt;
+ __entry->type = type;
),
- TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u",
+ TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u, type = %s",
show_dev(__entry->dev),
__entry->node_cnt,
- __entry->tree_cnt)
+ __entry->tree_cnt,
+ show_extent_type(__entry->type))
);
TRACE_EVENT(f2fs_destroy_extent_tree,
- TP_PROTO(struct inode *inode, unsigned int node_cnt),
+ TP_PROTO(struct inode *inode, unsigned int node_cnt,
+ enum extent_type type),
- TP_ARGS(inode, node_cnt),
+ TP_ARGS(inode, node_cnt, type),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(unsigned int, node_cnt)
+ __field(enum extent_type, type)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->node_cnt = node_cnt;
+ __entry->type = type;
),
- TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u",
+ TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u, type = %s",
show_dev_ino(__entry),
- __entry->node_cnt)
+ __entry->node_cnt,
+ show_extent_type(__entry->type))
);
DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
@@ -1815,6 +2056,7 @@ DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end,
TP_ARGS(inode, cluster_idx, compressed_size, ret)
);
+#ifdef CONFIG_F2FS_IOSTAT
TRACE_EVENT(f2fs_iostat,
TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat),
@@ -1827,7 +2069,10 @@ TRACE_EVENT(f2fs_iostat,
__field(unsigned long long, app_bio)
__field(unsigned long long, app_wio)
__field(unsigned long long, app_mio)
+ __field(unsigned long long, app_bcdio)
+ __field(unsigned long long, app_mcdio)
__field(unsigned long long, fs_dio)
+ __field(unsigned long long, fs_cdio)
__field(unsigned long long, fs_nio)
__field(unsigned long long, fs_mio)
__field(unsigned long long, fs_gc_dio)
@@ -1839,12 +2084,15 @@ TRACE_EVENT(f2fs_iostat,
__field(unsigned long long, app_brio)
__field(unsigned long long, app_rio)
__field(unsigned long long, app_mrio)
+ __field(unsigned long long, app_bcrio)
+ __field(unsigned long long, app_mcrio)
__field(unsigned long long, fs_drio)
__field(unsigned long long, fs_gdrio)
__field(unsigned long long, fs_cdrio)
__field(unsigned long long, fs_nrio)
__field(unsigned long long, fs_mrio)
__field(unsigned long long, fs_discard)
+ __field(unsigned long long, fs_reset_zone)
),
TP_fast_assign(
@@ -1853,7 +2101,10 @@ TRACE_EVENT(f2fs_iostat,
__entry->app_bio = iostat[APP_BUFFERED_IO];
__entry->app_wio = iostat[APP_WRITE_IO];
__entry->app_mio = iostat[APP_MAPPED_IO];
+ __entry->app_bcdio = iostat[APP_BUFFERED_CDATA_IO];
+ __entry->app_mcdio = iostat[APP_MAPPED_CDATA_IO];
__entry->fs_dio = iostat[FS_DATA_IO];
+ __entry->fs_cdio = iostat[FS_CDATA_IO];
__entry->fs_nio = iostat[FS_NODE_IO];
__entry->fs_mio = iostat[FS_META_IO];
__entry->fs_gc_dio = iostat[FS_GC_DATA_IO];
@@ -1865,32 +2116,137 @@ TRACE_EVENT(f2fs_iostat,
__entry->app_brio = iostat[APP_BUFFERED_READ_IO];
__entry->app_rio = iostat[APP_READ_IO];
__entry->app_mrio = iostat[APP_MAPPED_READ_IO];
+ __entry->app_bcrio = iostat[APP_BUFFERED_CDATA_READ_IO];
+ __entry->app_mcrio = iostat[APP_MAPPED_CDATA_READ_IO];
__entry->fs_drio = iostat[FS_DATA_READ_IO];
__entry->fs_gdrio = iostat[FS_GDATA_READ_IO];
__entry->fs_cdrio = iostat[FS_CDATA_READ_IO];
__entry->fs_nrio = iostat[FS_NODE_READ_IO];
__entry->fs_mrio = iostat[FS_META_READ_IO];
- __entry->fs_discard = iostat[FS_DISCARD];
+ __entry->fs_discard = iostat[FS_DISCARD_IO];
+ __entry->fs_reset_zone = iostat[FS_ZONE_RESET_IO];
),
TP_printk("dev = (%d,%d), "
- "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
- "fs [data=%llu, node=%llu, meta=%llu, discard=%llu], "
+ "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu, "
+ "compr(buffered=%llu, mapped=%llu)], "
+ "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu, "
+ "reset_zone=%llu], "
"gc [data=%llu, node=%llu], "
"cp [data=%llu, node=%llu, meta=%llu], "
"app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
- "fs [data=%llu, (gc_data=%llu, compr_data=%llu), "
+ "compr(buffered=%llu, mapped=%llu)], "
+ "fs [data=%llu, (gc_data=%llu, cdata=%llu), "
"node=%llu, meta=%llu]",
show_dev(__entry->dev), __entry->app_wio, __entry->app_dio,
- __entry->app_bio, __entry->app_mio, __entry->fs_dio,
+ __entry->app_bio, __entry->app_mio, __entry->app_bcdio,
+ __entry->app_mcdio, __entry->fs_dio, __entry->fs_cdio,
__entry->fs_nio, __entry->fs_mio, __entry->fs_discard,
+ __entry->fs_reset_zone,
__entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio,
__entry->fs_cp_nio, __entry->fs_cp_mio,
__entry->app_rio, __entry->app_drio, __entry->app_brio,
- __entry->app_mrio, __entry->fs_drio, __entry->fs_gdrio,
+ __entry->app_mrio, __entry->app_bcrio, __entry->app_mcrio,
+ __entry->fs_drio, __entry->fs_gdrio,
__entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio)
);
+#ifndef __F2FS_IOSTAT_LATENCY_TYPE
+#define __F2FS_IOSTAT_LATENCY_TYPE
+struct f2fs_iostat_latency {
+ unsigned int peak_lat;
+ unsigned int avg_lat;
+ unsigned int cnt;
+};
+#endif /* __F2FS_IOSTAT_LATENCY_TYPE */
+
+TRACE_EVENT(f2fs_iostat_latency,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, struct f2fs_iostat_latency (*iostat_lat)[NR_PAGE_TYPE]),
+
+ TP_ARGS(sbi, iostat_lat),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, d_rd_peak)
+ __field(unsigned int, d_rd_avg)
+ __field(unsigned int, d_rd_cnt)
+ __field(unsigned int, n_rd_peak)
+ __field(unsigned int, n_rd_avg)
+ __field(unsigned int, n_rd_cnt)
+ __field(unsigned int, m_rd_peak)
+ __field(unsigned int, m_rd_avg)
+ __field(unsigned int, m_rd_cnt)
+ __field(unsigned int, d_wr_s_peak)
+ __field(unsigned int, d_wr_s_avg)
+ __field(unsigned int, d_wr_s_cnt)
+ __field(unsigned int, n_wr_s_peak)
+ __field(unsigned int, n_wr_s_avg)
+ __field(unsigned int, n_wr_s_cnt)
+ __field(unsigned int, m_wr_s_peak)
+ __field(unsigned int, m_wr_s_avg)
+ __field(unsigned int, m_wr_s_cnt)
+ __field(unsigned int, d_wr_as_peak)
+ __field(unsigned int, d_wr_as_avg)
+ __field(unsigned int, d_wr_as_cnt)
+ __field(unsigned int, n_wr_as_peak)
+ __field(unsigned int, n_wr_as_avg)
+ __field(unsigned int, n_wr_as_cnt)
+ __field(unsigned int, m_wr_as_peak)
+ __field(unsigned int, m_wr_as_avg)
+ __field(unsigned int, m_wr_as_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->d_rd_peak = iostat_lat[READ_IO][DATA].peak_lat;
+ __entry->d_rd_avg = iostat_lat[READ_IO][DATA].avg_lat;
+ __entry->d_rd_cnt = iostat_lat[READ_IO][DATA].cnt;
+ __entry->n_rd_peak = iostat_lat[READ_IO][NODE].peak_lat;
+ __entry->n_rd_avg = iostat_lat[READ_IO][NODE].avg_lat;
+ __entry->n_rd_cnt = iostat_lat[READ_IO][NODE].cnt;
+ __entry->m_rd_peak = iostat_lat[READ_IO][META].peak_lat;
+ __entry->m_rd_avg = iostat_lat[READ_IO][META].avg_lat;
+ __entry->m_rd_cnt = iostat_lat[READ_IO][META].cnt;
+ __entry->d_wr_s_peak = iostat_lat[WRITE_SYNC_IO][DATA].peak_lat;
+ __entry->d_wr_s_avg = iostat_lat[WRITE_SYNC_IO][DATA].avg_lat;
+ __entry->d_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][DATA].cnt;
+ __entry->n_wr_s_peak = iostat_lat[WRITE_SYNC_IO][NODE].peak_lat;
+ __entry->n_wr_s_avg = iostat_lat[WRITE_SYNC_IO][NODE].avg_lat;
+ __entry->n_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][NODE].cnt;
+ __entry->m_wr_s_peak = iostat_lat[WRITE_SYNC_IO][META].peak_lat;
+ __entry->m_wr_s_avg = iostat_lat[WRITE_SYNC_IO][META].avg_lat;
+ __entry->m_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][META].cnt;
+ __entry->d_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][DATA].peak_lat;
+ __entry->d_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][DATA].avg_lat;
+ __entry->d_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][DATA].cnt;
+ __entry->n_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][NODE].peak_lat;
+ __entry->n_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][NODE].avg_lat;
+ __entry->n_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][NODE].cnt;
+ __entry->m_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][META].peak_lat;
+ __entry->m_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][META].avg_lat;
+ __entry->m_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][META].cnt;
+ ),
+
+ TP_printk("dev = (%d,%d), "
+ "iotype [peak lat.(ms)/avg lat.(ms)/count], "
+ "rd_data [%u/%u/%u], rd_node [%u/%u/%u], rd_meta [%u/%u/%u], "
+ "wr_sync_data [%u/%u/%u], wr_sync_node [%u/%u/%u], "
+ "wr_sync_meta [%u/%u/%u], wr_async_data [%u/%u/%u], "
+ "wr_async_node [%u/%u/%u], wr_async_meta [%u/%u/%u]",
+ show_dev(__entry->dev),
+ __entry->d_rd_peak, __entry->d_rd_avg, __entry->d_rd_cnt,
+ __entry->n_rd_peak, __entry->n_rd_avg, __entry->n_rd_cnt,
+ __entry->m_rd_peak, __entry->m_rd_avg, __entry->m_rd_cnt,
+ __entry->d_wr_s_peak, __entry->d_wr_s_avg, __entry->d_wr_s_cnt,
+ __entry->n_wr_s_peak, __entry->n_wr_s_avg, __entry->n_wr_s_cnt,
+ __entry->m_wr_s_peak, __entry->m_wr_s_avg, __entry->m_wr_s_cnt,
+ __entry->d_wr_as_peak, __entry->d_wr_as_avg, __entry->d_wr_as_cnt,
+ __entry->n_wr_as_peak, __entry->n_wr_as_avg, __entry->n_wr_as_cnt,
+ __entry->m_wr_as_peak, __entry->m_wr_as_avg, __entry->m_wr_as_cnt)
+);
+#endif
+
TRACE_EVENT(f2fs_bmap,
TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock),
@@ -1954,6 +2310,100 @@ TRACE_EVENT(f2fs_fiemap,
__entry->ret)
);
+DECLARE_EVENT_CLASS(f2fs__rw_start,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *pathname, char *command),
+
+ TP_ARGS(inode, offset, bytes, pid, pathname, command),
+
+ TP_STRUCT__entry(
+ __string(pathbuf, pathname)
+ __field(loff_t, offset)
+ __field(int, bytes)
+ __field(loff_t, i_size)
+ __string(cmdline, command)
+ __field(pid_t, pid)
+ __field(ino_t, ino)
+ ),
+
+ TP_fast_assign(
+ /*
+ * Replace the spaces in filenames and cmdlines
+ * because this screws up the tooling that parses
+ * the traces.
+ */
+ __assign_str(pathbuf, pathname);
+ (void)strreplace(__get_str(pathbuf), ' ', '_');
+ __entry->offset = offset;
+ __entry->bytes = bytes;
+ __entry->i_size = i_size_read(inode);
+ __assign_str(cmdline, command);
+ (void)strreplace(__get_str(cmdline), ' ', '_');
+ __entry->pid = pid;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+ " pid %d, i_size %llu, ino %lu",
+ __get_str(pathbuf), __entry->offset, __entry->bytes,
+ __get_str(cmdline), __entry->pid, __entry->i_size,
+ (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(f2fs__rw_end,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+ TP_ARGS(inode, offset, bytes),
+
+ TP_STRUCT__entry(
+ __field(ino_t, ino)
+ __field(loff_t, offset)
+ __field(int, bytes)
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->offset = offset;
+ __entry->bytes = bytes;
+ ),
+
+ TP_printk("ino %lu, offset %llu, bytes %d",
+ (unsigned long) __entry->ino,
+ __entry->offset, __entry->bytes)
+);
+
+DEFINE_EVENT(f2fs__rw_start, f2fs_dataread_start,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *pathname, char *command),
+
+ TP_ARGS(inode, offset, bytes, pid, pathname, command)
+);
+
+DEFINE_EVENT(f2fs__rw_end, f2fs_dataread_end,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+ TP_ARGS(inode, offset, bytes)
+);
+
+DEFINE_EVENT(f2fs__rw_start, f2fs_datawrite_start,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *pathname, char *command),
+
+ TP_ARGS(inode, offset, bytes, pid, pathname, command)
+);
+
+DEFINE_EVENT(f2fs__rw_end, f2fs_datawrite_end,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+ TP_ARGS(inode, offset, bytes)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 6f2a4dc35e37..20b914250ce9 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -32,11 +32,10 @@ TRACE_EVENT(fib_table_lookup,
__array( __u8, gw6, 16 )
__field( u16, sport )
__field( u16, dport )
- __dynamic_array(char, name, IFNAMSIZ )
+ __array(char, name, IFNAMSIZ )
),
TP_fast_assign(
- struct in6_addr in6_zero = {};
struct net_device *dev;
struct in6_addr *in6;
__be32 *p32;
@@ -66,7 +65,7 @@ TRACE_EVENT(fib_table_lookup,
}
dev = nhc ? nhc->nhc_dev : NULL;
- __assign_str(name, dev ? dev->name : "-");
+ strscpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ);
if (nhc) {
if (nhc->nhc_gw_family == AF_INET) {
@@ -74,7 +73,7 @@ TRACE_EVENT(fib_table_lookup,
*p32 = nhc->nhc_gw.ipv4;
in6 = (struct in6_addr *)__entry->gw6;
- *in6 = in6_zero;
+ *in6 = in6addr_any;
} else if (nhc->nhc_gw_family == AF_INET6) {
p32 = (__be32 *) __entry->gw4;
*p32 = 0;
@@ -87,7 +86,7 @@ TRACE_EVENT(fib_table_lookup,
*p32 = 0;
in6 = (struct in6_addr *)__entry->gw6;
- *in6 = in6_zero;
+ *in6 = in6addr_any;
}
),
@@ -95,7 +94,7 @@ TRACE_EVENT(fib_table_lookup,
__entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
__entry->src, __entry->sport, __entry->dst, __entry->dport,
__entry->tos, __entry->scope, __entry->flags,
- __get_str(name), __entry->gw4, __entry->gw6, __entry->err)
+ __entry->name, __entry->gw4, __entry->gw6, __entry->err)
);
#endif /* _TRACE_FIB_H */
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index c6abdcc77c12..5d7ee2610728 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -31,7 +31,7 @@ TRACE_EVENT(fib6_table_lookup,
__field( u16, dport )
__field( u8, proto )
__field( u8, rt_type )
- __dynamic_array( char, name, IFNAMSIZ )
+ __array( char, name, IFNAMSIZ )
__array( __u8, gw, 16 )
),
@@ -63,16 +63,13 @@ TRACE_EVENT(fib6_table_lookup,
}
if (res->nh && res->nh->fib_nh_dev) {
- __assign_str(name, res->nh->fib_nh_dev);
+ strscpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ);
} else {
- __assign_str(name, "-");
+ strcpy(__entry->name, "-");
}
if (res->f6i == net->ipv6.fib6_null_entry) {
- struct in6_addr in6_zero = {};
-
in6 = (struct in6_addr *)__entry->gw;
- *in6 = in6_zero;
-
+ *in6 = in6addr_any;
} else if (res->nh) {
in6 = (struct in6_addr *)__entry->gw;
*in6 = res->nh->fib_nh_gw6;
@@ -83,7 +80,7 @@ TRACE_EVENT(fib6_table_lookup,
__entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
__entry->src, __entry->sport, __entry->dst, __entry->dport,
__entry->tos, __entry->scope, __entry->flags,
- __get_str(name), __entry->gw, __entry->err)
+ __entry->name, __entry->gw, __entry->err)
);
#endif /* _TRACE_FIB6_H */
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index c705e4944a50..b8d1e00a7982 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -68,11 +68,11 @@ DECLARE_EVENT_CLASS(filelock_lock,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_blocker)
- __field(fl_owner_t, fl_owner)
- __field(unsigned int, fl_pid)
- __field(unsigned int, fl_flags)
- __field(unsigned char, fl_type)
+ __field(struct file_lock_core *, blocker)
+ __field(fl_owner_t, owner)
+ __field(unsigned int, pid)
+ __field(unsigned int, flags)
+ __field(unsigned char, type)
__field(loff_t, fl_start)
__field(loff_t, fl_end)
__field(int, ret)
@@ -82,21 +82,21 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
- __entry->fl_owner = fl ? fl->fl_owner : NULL;
- __entry->fl_pid = fl ? fl->fl_pid : 0;
- __entry->fl_flags = fl ? fl->fl_flags : 0;
- __entry->fl_type = fl ? fl->fl_type : 0;
+ __entry->blocker = fl ? fl->c.flc_blocker : NULL;
+ __entry->owner = fl ? fl->c.flc_owner : NULL;
+ __entry->pid = fl ? fl->c.flc_pid : 0;
+ __entry->flags = fl ? fl->c.flc_flags : 0;
+ __entry->type = fl ? fl->c.flc_type : 0;
__entry->fl_start = fl ? fl->fl_start : 0;
__entry->fl_end = fl ? fl->fl_end : 0;
__entry->ret = ret;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
+ TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
- __entry->fl_pid, show_fl_flags(__entry->fl_flags),
- show_fl_type(__entry->fl_type),
+ __entry->i_ino, __entry->blocker, __entry->owner,
+ __entry->pid, show_fl_flags(__entry->flags),
+ show_fl_type(__entry->type),
__entry->fl_start, __entry->fl_end, __entry->ret)
);
@@ -117,59 +117,59 @@ DEFINE_EVENT(filelock_lock, flock_lock_inode,
TP_ARGS(inode, fl, ret));
DECLARE_EVENT_CLASS(filelock_lease,
- TP_PROTO(struct inode *inode, struct file_lock *fl),
+ TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl),
TP_STRUCT__entry(
- __field(struct file_lock *, fl)
+ __field(struct file_lease *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_blocker)
- __field(fl_owner_t, fl_owner)
- __field(unsigned int, fl_flags)
- __field(unsigned char, fl_type)
- __field(unsigned long, fl_break_time)
- __field(unsigned long, fl_downgrade_time)
+ __field(struct file_lock_core *, blocker)
+ __field(fl_owner_t, owner)
+ __field(unsigned int, flags)
+ __field(unsigned char, type)
+ __field(unsigned long, break_time)
+ __field(unsigned long, downgrade_time)
),
TP_fast_assign(
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
- __entry->fl_owner = fl ? fl->fl_owner : NULL;
- __entry->fl_flags = fl ? fl->fl_flags : 0;
- __entry->fl_type = fl ? fl->fl_type : 0;
- __entry->fl_break_time = fl ? fl->fl_break_time : 0;
- __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
+ __entry->blocker = fl ? fl->c.flc_blocker : NULL;
+ __entry->owner = fl ? fl->c.flc_owner : NULL;
+ __entry->flags = fl ? fl->c.flc_flags : 0;
+ __entry->type = fl ? fl->c.flc_type : 0;
+ __entry->break_time = fl ? fl->fl_break_time : 0;
+ __entry->downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
+ TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
- show_fl_flags(__entry->fl_flags),
- show_fl_type(__entry->fl_type),
- __entry->fl_break_time, __entry->fl_downgrade_time)
+ __entry->i_ino, __entry->blocker, __entry->owner,
+ show_fl_flags(__entry->flags),
+ show_fl_type(__entry->type),
+ __entry->break_time, __entry->downgrade_time)
);
-DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl),
+DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl));
TRACE_EVENT(generic_add_lease,
- TP_PROTO(struct inode *inode, struct file_lock *fl),
+ TP_PROTO(struct inode *inode, struct file_lease *fl),
TP_ARGS(inode, fl),
@@ -179,9 +179,9 @@ TRACE_EVENT(generic_add_lease,
__field(int, rcount)
__field(int, icount)
__field(dev_t, s_dev)
- __field(fl_owner_t, fl_owner)
- __field(unsigned int, fl_flags)
- __field(unsigned char, fl_type)
+ __field(fl_owner_t, owner)
+ __field(unsigned int, flags)
+ __field(unsigned char, type)
),
TP_fast_assign(
@@ -190,21 +190,21 @@ TRACE_EVENT(generic_add_lease,
__entry->wcount = atomic_read(&inode->i_writecount);
__entry->rcount = atomic_read(&inode->i_readcount);
__entry->icount = atomic_read(&inode->i_count);
- __entry->fl_owner = fl->fl_owner;
- __entry->fl_flags = fl->fl_flags;
- __entry->fl_type = fl->fl_type;
+ __entry->owner = fl->c.flc_owner;
+ __entry->flags = fl->c.flc_flags;
+ __entry->type = fl->c.flc_type;
),
- TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d rcount=%d icount=%d fl_owner=0x%p fl_flags=%s fl_type=%s",
+ TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d rcount=%d icount=%d fl_owner=%p fl_flags=%s fl_type=%s",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->wcount, __entry->rcount,
- __entry->icount, __entry->fl_owner,
- show_fl_flags(__entry->fl_flags),
- show_fl_type(__entry->fl_type))
+ __entry->icount, __entry->owner,
+ show_fl_flags(__entry->flags),
+ show_fl_type(__entry->type))
);
TRACE_EVENT(leases_conflict,
- TP_PROTO(bool conflict, struct file_lock *lease, struct file_lock *breaker),
+ TP_PROTO(bool conflict, struct file_lease *lease, struct file_lease *breaker),
TP_ARGS(conflict, lease, breaker),
@@ -220,15 +220,15 @@ TRACE_EVENT(leases_conflict,
TP_fast_assign(
__entry->lease = lease;
- __entry->l_fl_flags = lease->fl_flags;
- __entry->l_fl_type = lease->fl_type;
+ __entry->l_fl_flags = lease->c.flc_flags;
+ __entry->l_fl_type = lease->c.flc_type;
__entry->breaker = breaker;
- __entry->b_fl_flags = breaker->fl_flags;
- __entry->b_fl_type = breaker->fl_type;
+ __entry->b_fl_flags = breaker->c.flc_flags;
+ __entry->b_fl_type = breaker->c.flc_type;
__entry->conflict = conflict;
),
- TP_printk("conflict %d: lease=0x%p fl_flags=%s fl_type=%s; breaker=0x%p fl_flags=%s fl_type=%s",
+ TP_printk("conflict %d: lease=%p fl_flags=%s fl_type=%s; breaker=%p fl_flags=%s fl_type=%s",
__entry->conflict,
__entry->lease,
show_fl_flags(__entry->l_fl_flags),
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 796053e162d2..46c89c1e460c 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -15,43 +15,45 @@
DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
__field(unsigned long, pfn)
__field(unsigned long, i_ino)
__field(unsigned long, index)
__field(dev_t, s_dev)
+ __field(unsigned char, order)
),
TP_fast_assign(
- __entry->pfn = page_to_pfn(page);
- __entry->i_ino = page->mapping->host->i_ino;
- __entry->index = page->index;
- if (page->mapping->host->i_sb)
- __entry->s_dev = page->mapping->host->i_sb->s_dev;
+ __entry->pfn = folio_pfn(folio);
+ __entry->i_ino = folio->mapping->host->i_ino;
+ __entry->index = folio->index;
+ if (folio->mapping->host->i_sb)
+ __entry->s_dev = folio->mapping->host->i_sb->s_dev;
else
- __entry->s_dev = page->mapping->host->i_rdev;
+ __entry->s_dev = folio->mapping->host->i_rdev;
+ __entry->order = folio_order(folio);
),
- TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
+ TP_printk("dev %d:%d ino %lx pfn=0x%lx ofs=%lu order=%u",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
- pfn_to_page(__entry->pfn),
__entry->pfn,
- __entry->index << PAGE_SHIFT)
+ __entry->index << PAGE_SHIFT,
+ __entry->order)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache,
- TP_PROTO(struct page *page),
- TP_ARGS(page)
+ TP_PROTO(struct folio *folio),
+ TP_ARGS(folio)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
- TP_PROTO(struct page *page),
- TP_ARGS(page)
+ TP_PROTO(struct folio *folio),
+ TP_ARGS(folio)
);
TRACE_EVENT(filemap_set_wb_err,
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
index d16fe6ed78a2..a6190aa1b406 100644
--- a/include/trace/events/fscache.h
+++ b/include/trace/events/fscache.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* FS-Cache tracepoints
*
- * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#undef TRACE_SYSTEM
@@ -19,65 +19,85 @@
#ifndef __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
+enum fscache_cache_trace {
+ fscache_cache_collision,
+ fscache_cache_get_acquire,
+ fscache_cache_new_acquire,
+ fscache_cache_put_alloc_volume,
+ fscache_cache_put_cache,
+ fscache_cache_put_prep_failed,
+ fscache_cache_put_relinquish,
+ fscache_cache_put_volume,
+};
+
+enum fscache_volume_trace {
+ fscache_volume_collision,
+ fscache_volume_get_cookie,
+ fscache_volume_get_create_work,
+ fscache_volume_get_hash_collision,
+ fscache_volume_free,
+ fscache_volume_new_acquire,
+ fscache_volume_put_cookie,
+ fscache_volume_put_create_work,
+ fscache_volume_put_hash_collision,
+ fscache_volume_put_relinquish,
+ fscache_volume_see_create_work,
+ fscache_volume_see_hash_wake,
+ fscache_volume_wait_create_work,
+};
+
enum fscache_cookie_trace {
fscache_cookie_collision,
fscache_cookie_discard,
- fscache_cookie_get_acquire_parent,
+ fscache_cookie_failed,
fscache_cookie_get_attach_object,
- fscache_cookie_get_reacquire,
- fscache_cookie_get_register_netfs,
- fscache_cookie_put_acquire_nobufs,
- fscache_cookie_put_dup_netfs,
- fscache_cookie_put_relinquish,
+ fscache_cookie_get_end_access,
+ fscache_cookie_get_hash_collision,
+ fscache_cookie_get_inval_work,
+ fscache_cookie_get_lru,
+ fscache_cookie_get_use_work,
+ fscache_cookie_new_acquire,
+ fscache_cookie_put_hash_collision,
+ fscache_cookie_put_lru,
fscache_cookie_put_object,
- fscache_cookie_put_parent,
-};
-
-enum fscache_page_trace {
- fscache_page_cached,
- fscache_page_inval,
- fscache_page_maybe_release,
- fscache_page_radix_clear_store,
- fscache_page_radix_delete,
- fscache_page_radix_insert,
- fscache_page_radix_pend2store,
- fscache_page_radix_set_pend,
- fscache_page_uncache,
- fscache_page_write,
- fscache_page_write_end,
- fscache_page_write_end_pend,
- fscache_page_write_end_noc,
- fscache_page_write_wait,
- fscache_page_trace__nr
+ fscache_cookie_put_over_queued,
+ fscache_cookie_put_relinquish,
+ fscache_cookie_put_withdrawn,
+ fscache_cookie_put_work,
+ fscache_cookie_see_active,
+ fscache_cookie_see_lru_discard,
+ fscache_cookie_see_lru_discard_clear,
+ fscache_cookie_see_lru_do_one,
+ fscache_cookie_see_relinquish,
+ fscache_cookie_see_withdraw,
+ fscache_cookie_see_work,
};
-enum fscache_op_trace {
- fscache_op_cancel,
- fscache_op_cancel_all,
- fscache_op_cancelled,
- fscache_op_completed,
- fscache_op_enqueue_async,
- fscache_op_enqueue_mythread,
- fscache_op_gc,
- fscache_op_init,
- fscache_op_put,
- fscache_op_run,
- fscache_op_signal,
- fscache_op_submit,
- fscache_op_submit_ex,
- fscache_op_work,
- fscache_op_trace__nr
+enum fscache_active_trace {
+ fscache_active_use,
+ fscache_active_use_modify,
+ fscache_active_unuse,
};
-enum fscache_page_op_trace {
- fscache_page_op_alloc_one,
- fscache_page_op_attr_changed,
- fscache_page_op_check_consistency,
- fscache_page_op_invalidate,
- fscache_page_op_retr_multi,
- fscache_page_op_retr_one,
- fscache_page_op_write_one,
- fscache_page_op_trace__nr
+enum fscache_access_trace {
+ fscache_access_acquire_volume,
+ fscache_access_acquire_volume_end,
+ fscache_access_cache_pin,
+ fscache_access_cache_unpin,
+ fscache_access_invalidate_cookie,
+ fscache_access_invalidate_cookie_end,
+ fscache_access_io_end,
+ fscache_access_io_not_live,
+ fscache_access_io_read,
+ fscache_access_io_resize,
+ fscache_access_io_wait,
+ fscache_access_io_write,
+ fscache_access_lookup_cookie,
+ fscache_access_lookup_cookie_end,
+ fscache_access_lookup_cookie_end_failed,
+ fscache_access_relinquish_volume,
+ fscache_access_relinquish_volume_end,
+ fscache_access_unlive,
};
#endif
@@ -85,59 +105,81 @@ enum fscache_page_op_trace {
/*
* Declare tracing information enums and their string mappings for display.
*/
+#define fscache_cache_traces \
+ EM(fscache_cache_collision, "*COLLIDE*") \
+ EM(fscache_cache_get_acquire, "GET acq ") \
+ EM(fscache_cache_new_acquire, "NEW acq ") \
+ EM(fscache_cache_put_alloc_volume, "PUT alvol") \
+ EM(fscache_cache_put_cache, "PUT cache") \
+ EM(fscache_cache_put_prep_failed, "PUT pfail") \
+ EM(fscache_cache_put_relinquish, "PUT relnq") \
+ E_(fscache_cache_put_volume, "PUT vol ")
+
+#define fscache_volume_traces \
+ EM(fscache_volume_collision, "*COLLIDE*") \
+ EM(fscache_volume_get_cookie, "GET cook ") \
+ EM(fscache_volume_get_create_work, "GET creat") \
+ EM(fscache_volume_get_hash_collision, "GET hcoll") \
+ EM(fscache_volume_free, "FREE ") \
+ EM(fscache_volume_new_acquire, "NEW acq ") \
+ EM(fscache_volume_put_cookie, "PUT cook ") \
+ EM(fscache_volume_put_create_work, "PUT creat") \
+ EM(fscache_volume_put_hash_collision, "PUT hcoll") \
+ EM(fscache_volume_put_relinquish, "PUT relnq") \
+ EM(fscache_volume_see_create_work, "SEE creat") \
+ EM(fscache_volume_see_hash_wake, "SEE hwake") \
+ E_(fscache_volume_wait_create_work, "WAIT crea")
+
#define fscache_cookie_traces \
- EM(fscache_cookie_collision, "*COLLISION*") \
- EM(fscache_cookie_discard, "DISCARD") \
- EM(fscache_cookie_get_acquire_parent, "GET prn") \
- EM(fscache_cookie_get_attach_object, "GET obj") \
- EM(fscache_cookie_get_reacquire, "GET raq") \
- EM(fscache_cookie_get_register_netfs, "GET net") \
- EM(fscache_cookie_put_acquire_nobufs, "PUT nbf") \
- EM(fscache_cookie_put_dup_netfs, "PUT dnt") \
- EM(fscache_cookie_put_relinquish, "PUT rlq") \
- EM(fscache_cookie_put_object, "PUT obj") \
- E_(fscache_cookie_put_parent, "PUT prn")
-
-#define fscache_page_traces \
- EM(fscache_page_cached, "Cached ") \
- EM(fscache_page_inval, "InvalPg") \
- EM(fscache_page_maybe_release, "MayRels") \
- EM(fscache_page_uncache, "Uncache") \
- EM(fscache_page_radix_clear_store, "RxCStr ") \
- EM(fscache_page_radix_delete, "RxDel ") \
- EM(fscache_page_radix_insert, "RxIns ") \
- EM(fscache_page_radix_pend2store, "RxP2S ") \
- EM(fscache_page_radix_set_pend, "RxSPend ") \
- EM(fscache_page_write, "WritePg") \
- EM(fscache_page_write_end, "EndPgWr") \
- EM(fscache_page_write_end_pend, "EndPgWP") \
- EM(fscache_page_write_end_noc, "EndPgNC") \
- E_(fscache_page_write_wait, "WtOnWrt")
-
-#define fscache_op_traces \
- EM(fscache_op_cancel, "Cancel1") \
- EM(fscache_op_cancel_all, "CancelA") \
- EM(fscache_op_cancelled, "Canclld") \
- EM(fscache_op_completed, "Complet") \
- EM(fscache_op_enqueue_async, "EnqAsyn") \
- EM(fscache_op_enqueue_mythread, "EnqMyTh") \
- EM(fscache_op_gc, "GC ") \
- EM(fscache_op_init, "Init ") \
- EM(fscache_op_put, "Put ") \
- EM(fscache_op_run, "Run ") \
- EM(fscache_op_signal, "Signal ") \
- EM(fscache_op_submit, "Submit ") \
- EM(fscache_op_submit_ex, "SubmitX") \
- E_(fscache_op_work, "Work ")
-
-#define fscache_page_op_traces \
- EM(fscache_page_op_alloc_one, "Alloc1 ") \
- EM(fscache_page_op_attr_changed, "AttrChg") \
- EM(fscache_page_op_check_consistency, "CheckCn") \
- EM(fscache_page_op_invalidate, "Inval ") \
- EM(fscache_page_op_retr_multi, "RetrMul") \
- EM(fscache_page_op_retr_one, "Retr1 ") \
- E_(fscache_page_op_write_one, "Write1 ")
+ EM(fscache_cookie_collision, "*COLLIDE*") \
+ EM(fscache_cookie_discard, "DISCARD ") \
+ EM(fscache_cookie_failed, "FAILED ") \
+ EM(fscache_cookie_get_attach_object, "GET attch") \
+ EM(fscache_cookie_get_hash_collision, "GET hcoll") \
+ EM(fscache_cookie_get_end_access, "GQ endac") \
+ EM(fscache_cookie_get_inval_work, "GQ inval") \
+ EM(fscache_cookie_get_lru, "GET lru ") \
+ EM(fscache_cookie_get_use_work, "GQ use ") \
+ EM(fscache_cookie_new_acquire, "NEW acq ") \
+ EM(fscache_cookie_put_hash_collision, "PUT hcoll") \
+ EM(fscache_cookie_put_lru, "PUT lru ") \
+ EM(fscache_cookie_put_object, "PUT obj ") \
+ EM(fscache_cookie_put_over_queued, "PQ overq") \
+ EM(fscache_cookie_put_relinquish, "PUT relnq") \
+ EM(fscache_cookie_put_withdrawn, "PUT wthdn") \
+ EM(fscache_cookie_put_work, "PQ work ") \
+ EM(fscache_cookie_see_active, "- activ") \
+ EM(fscache_cookie_see_lru_discard, "- x-lru") \
+ EM(fscache_cookie_see_lru_discard_clear,"- lrudc") \
+ EM(fscache_cookie_see_lru_do_one, "- lrudo") \
+ EM(fscache_cookie_see_relinquish, "- x-rlq") \
+ EM(fscache_cookie_see_withdraw, "- x-wth") \
+ E_(fscache_cookie_see_work, "- work ")
+
+#define fscache_active_traces \
+ EM(fscache_active_use, "USE ") \
+ EM(fscache_active_use_modify, "USE-m ") \
+ E_(fscache_active_unuse, "UNUSE ")
+
+#define fscache_access_traces \
+ EM(fscache_access_acquire_volume, "BEGIN acq_vol") \
+ EM(fscache_access_acquire_volume_end, "END acq_vol") \
+ EM(fscache_access_cache_pin, "PIN cache ") \
+ EM(fscache_access_cache_unpin, "UNPIN cache ") \
+ EM(fscache_access_invalidate_cookie, "BEGIN inval ") \
+ EM(fscache_access_invalidate_cookie_end,"END inval ") \
+ EM(fscache_access_io_end, "END io ") \
+ EM(fscache_access_io_not_live, "END io_notl") \
+ EM(fscache_access_io_read, "BEGIN io_read") \
+ EM(fscache_access_io_resize, "BEGIN io_resz") \
+ EM(fscache_access_io_wait, "WAIT io ") \
+ EM(fscache_access_io_write, "BEGIN io_writ") \
+ EM(fscache_access_lookup_cookie, "BEGIN lookup ") \
+ EM(fscache_access_lookup_cookie_end, "END lookup ") \
+ EM(fscache_access_lookup_cookie_end_failed,"END lookupf") \
+ EM(fscache_access_relinquish_volume, "BEGIN rlq_vol") \
+ EM(fscache_access_relinquish_volume_end,"END rlq_vol") \
+ E_(fscache_access_unlive, "END unlive ")
/*
* Export enum symbols via userspace.
@@ -147,7 +189,10 @@ enum fscache_page_op_trace {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
+fscache_cache_traces;
+fscache_volume_traces;
fscache_cookie_traces;
+fscache_access_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -159,372 +204,297 @@ fscache_cookie_traces;
#define E_(a, b) { a, b }
-TRACE_EVENT(fscache_cookie,
- TP_PROTO(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where,
- int usage),
+TRACE_EVENT(fscache_cache,
+ TP_PROTO(unsigned int cache_debug_id,
+ int usage,
+ enum fscache_cache_trace where),
- TP_ARGS(cookie, where, usage),
+ TP_ARGS(cache_debug_id, usage, where),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_cookie *, parent )
- __field(enum fscache_cookie_trace, where )
+ __field(unsigned int, cache )
__field(int, usage )
- __field(int, n_children )
- __field(int, n_active )
- __field(u8, flags )
+ __field(enum fscache_cache_trace, where )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->parent = cookie->parent;
- __entry->where = where;
+ __entry->cache = cache_debug_id;
__entry->usage = usage;
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
- ),
-
- TP_printk("%s c=%p u=%d p=%p Nc=%d Na=%d f=%02x",
- __print_symbolic(__entry->where, fscache_cookie_traces),
- __entry->cookie, __entry->usage,
- __entry->parent, __entry->n_children, __entry->n_active,
- __entry->flags)
- );
-
-TRACE_EVENT(fscache_netfs,
- TP_PROTO(struct fscache_netfs *netfs),
-
- TP_ARGS(netfs),
-
- TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __array(char, name, 8 )
- ),
-
- TP_fast_assign(
- __entry->cookie = netfs->primary_index;
- strncpy(__entry->name, netfs->name, 8);
- __entry->name[7] = 0;
- ),
-
- TP_printk("c=%p n=%s",
- __entry->cookie, __entry->name)
- );
-
-TRACE_EVENT(fscache_acquire,
- TP_PROTO(struct fscache_cookie *cookie),
-
- TP_ARGS(cookie),
-
- TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_cookie *, parent )
- __array(char, name, 8 )
- __field(int, p_usage )
- __field(int, p_n_children )
- __field(u8, p_flags )
- ),
-
- TP_fast_assign(
- __entry->cookie = cookie;
- __entry->parent = cookie->parent;
- __entry->p_usage = atomic_read(&cookie->parent->usage);
- __entry->p_n_children = atomic_read(&cookie->parent->n_children);
- __entry->p_flags = cookie->parent->flags;
- memcpy(__entry->name, cookie->def->name, 8);
- __entry->name[7] = 0;
+ __entry->where = where;
),
- TP_printk("c=%p p=%p pu=%d pc=%d pf=%02x n=%s",
- __entry->cookie, __entry->parent, __entry->p_usage,
- __entry->p_n_children, __entry->p_flags, __entry->name)
+ TP_printk("C=%08x %s r=%d",
+ __entry->cache,
+ __print_symbolic(__entry->where, fscache_cache_traces),
+ __entry->usage)
);
-TRACE_EVENT(fscache_relinquish,
- TP_PROTO(struct fscache_cookie *cookie, bool retire),
+TRACE_EVENT(fscache_volume,
+ TP_PROTO(unsigned int volume_debug_id,
+ int usage,
+ enum fscache_volume_trace where),
- TP_ARGS(cookie, retire),
+ TP_ARGS(volume_debug_id, usage, where),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_cookie *, parent )
+ __field(unsigned int, volume )
__field(int, usage )
- __field(int, n_children )
- __field(int, n_active )
- __field(u8, flags )
- __field(bool, retire )
+ __field(enum fscache_volume_trace, where )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->parent = cookie->parent;
- __entry->usage = atomic_read(&cookie->usage);
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
- __entry->retire = retire;
+ __entry->volume = volume_debug_id;
+ __entry->usage = usage;
+ __entry->where = where;
),
- TP_printk("c=%p u=%d p=%p Nc=%d Na=%d f=%02x r=%u",
- __entry->cookie, __entry->usage,
- __entry->parent, __entry->n_children, __entry->n_active,
- __entry->flags, __entry->retire)
+ TP_printk("V=%08x %s u=%d",
+ __entry->volume,
+ __print_symbolic(__entry->where, fscache_volume_traces),
+ __entry->usage)
);
-TRACE_EVENT(fscache_enable,
- TP_PROTO(struct fscache_cookie *cookie),
+TRACE_EVENT(fscache_cookie,
+ TP_PROTO(unsigned int cookie_debug_id,
+ int ref,
+ enum fscache_cookie_trace where),
- TP_ARGS(cookie),
+ TP_ARGS(cookie_debug_id, ref, where),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(int, usage )
- __field(int, n_children )
- __field(int, n_active )
- __field(u8, flags )
+ __field(unsigned int, cookie )
+ __field(int, ref )
+ __field(enum fscache_cookie_trace, where )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->usage = atomic_read(&cookie->usage);
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->where = where;
),
- TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
- __entry->cookie, __entry->usage,
- __entry->n_children, __entry->n_active, __entry->flags)
+ TP_printk("c=%08x %s r=%d",
+ __entry->cookie,
+ __print_symbolic(__entry->where, fscache_cookie_traces),
+ __entry->ref)
);
-TRACE_EVENT(fscache_disable,
- TP_PROTO(struct fscache_cookie *cookie),
+TRACE_EVENT(fscache_active,
+ TP_PROTO(unsigned int cookie_debug_id,
+ int ref,
+ int n_active,
+ int n_accesses,
+ enum fscache_active_trace why),
- TP_ARGS(cookie),
+ TP_ARGS(cookie_debug_id, ref, n_active, n_accesses, why),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(int, usage )
- __field(int, n_children )
+ __field(unsigned int, cookie )
+ __field(int, ref )
__field(int, n_active )
- __field(u8, flags )
+ __field(int, n_accesses )
+ __field(enum fscache_active_trace, why )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->usage = atomic_read(&cookie->usage);
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->n_active = n_active;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
- __entry->cookie, __entry->usage,
- __entry->n_children, __entry->n_active, __entry->flags)
+ TP_printk("c=%08x %s r=%d a=%d c=%d",
+ __entry->cookie,
+ __print_symbolic(__entry->why, fscache_active_traces),
+ __entry->ref,
+ __entry->n_accesses,
+ __entry->n_active)
);
-TRACE_EVENT(fscache_osm,
- TP_PROTO(struct fscache_object *object,
- const struct fscache_state *state,
- bool wait, bool oob, s8 event_num),
+TRACE_EVENT(fscache_access_cache,
+ TP_PROTO(unsigned int cache_debug_id,
+ int ref,
+ int n_accesses,
+ enum fscache_access_trace why),
- TP_ARGS(object, state, wait, oob, event_num),
+ TP_ARGS(cache_debug_id, ref, n_accesses, why),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_object *, object )
- __array(char, state, 8 )
- __field(bool, wait )
- __field(bool, oob )
- __field(s8, event_num )
+ __field(unsigned int, cache )
+ __field(int, ref )
+ __field(int, n_accesses )
+ __field(enum fscache_access_trace, why )
),
TP_fast_assign(
- __entry->cookie = object->cookie;
- __entry->object = object;
- __entry->wait = wait;
- __entry->oob = oob;
- __entry->event_num = event_num;
- memcpy(__entry->state, state->short_name, 8);
+ __entry->cache = cache_debug_id;
+ __entry->ref = ref;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%p o=%p %s %s%sev=%d",
- __entry->cookie,
- __entry->object,
- __entry->state,
- __print_symbolic(__entry->wait,
- { true, "WAIT" },
- { false, "WORK" }),
- __print_symbolic(__entry->oob,
- { true, " OOB " },
- { false, " " }),
- __entry->event_num)
+ TP_printk("C=%08x %s r=%d a=%d",
+ __entry->cache,
+ __print_symbolic(__entry->why, fscache_access_traces),
+ __entry->ref,
+ __entry->n_accesses)
);
-TRACE_EVENT(fscache_page,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- enum fscache_page_trace why),
+TRACE_EVENT(fscache_access_volume,
+ TP_PROTO(unsigned int volume_debug_id,
+ unsigned int cookie_debug_id,
+ int ref,
+ int n_accesses,
+ enum fscache_access_trace why),
- TP_ARGS(cookie, page, why),
+ TP_ARGS(volume_debug_id, cookie_debug_id, ref, n_accesses, why),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(pgoff_t, page )
- __field(enum fscache_page_trace, why )
+ __field(unsigned int, volume )
+ __field(unsigned int, cookie )
+ __field(int, ref )
+ __field(int, n_accesses )
+ __field(enum fscache_access_trace, why )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->page = page->index;
- __entry->why = why;
+ __entry->volume = volume_debug_id;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%p %s pg=%lx",
+ TP_printk("V=%08x c=%08x %s r=%d a=%d",
+ __entry->volume,
__entry->cookie,
- __print_symbolic(__entry->why, fscache_page_traces),
- __entry->page)
+ __print_symbolic(__entry->why, fscache_access_traces),
+ __entry->ref,
+ __entry->n_accesses)
);
-TRACE_EVENT(fscache_check_page,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- void *val, int n),
+TRACE_EVENT(fscache_access,
+ TP_PROTO(unsigned int cookie_debug_id,
+ int ref,
+ int n_accesses,
+ enum fscache_access_trace why),
- TP_ARGS(cookie, page, val, n),
+ TP_ARGS(cookie_debug_id, ref, n_accesses, why),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(void *, page )
- __field(void *, val )
- __field(int, n )
+ __field(unsigned int, cookie )
+ __field(int, ref )
+ __field(int, n_accesses )
+ __field(enum fscache_access_trace, why )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->page = page;
- __entry->val = val;
- __entry->n = n;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%p pg=%p val=%p n=%d",
- __entry->cookie, __entry->page, __entry->val, __entry->n)
+ TP_printk("c=%08x %s r=%d a=%d",
+ __entry->cookie,
+ __print_symbolic(__entry->why, fscache_access_traces),
+ __entry->ref,
+ __entry->n_accesses)
);
-TRACE_EVENT(fscache_wake_cookie,
+TRACE_EVENT(fscache_acquire,
TP_PROTO(struct fscache_cookie *cookie),
TP_ARGS(cookie),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- ),
-
- TP_fast_assign(
- __entry->cookie = cookie;
- ),
-
- TP_printk("c=%p", __entry->cookie)
- );
-
-TRACE_EVENT(fscache_op,
- TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
- enum fscache_op_trace why),
-
- TP_ARGS(cookie, op, why),
-
- TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_operation *, op )
- __field(enum fscache_op_trace, why )
+ __field(unsigned int, cookie )
+ __field(unsigned int, volume )
+ __field(int, v_ref )
+ __field(int, v_n_cookies )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->op = op;
- __entry->why = why;
+ __entry->cookie = cookie->debug_id;
+ __entry->volume = cookie->volume->debug_id;
+ __entry->v_ref = refcount_read(&cookie->volume->ref);
+ __entry->v_n_cookies = atomic_read(&cookie->volume->n_cookies);
),
- TP_printk("c=%p op=%p %s",
- __entry->cookie, __entry->op,
- __print_symbolic(__entry->why, fscache_op_traces))
+ TP_printk("c=%08x V=%08x vr=%d vc=%d",
+ __entry->cookie,
+ __entry->volume, __entry->v_ref, __entry->v_n_cookies)
);
-TRACE_EVENT(fscache_page_op,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- struct fscache_operation *op, enum fscache_page_op_trace what),
+TRACE_EVENT(fscache_relinquish,
+ TP_PROTO(struct fscache_cookie *cookie, bool retire),
- TP_ARGS(cookie, page, op, what),
+ TP_ARGS(cookie, retire),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(pgoff_t, page )
- __field(struct fscache_operation *, op )
- __field(enum fscache_page_op_trace, what )
+ __field(unsigned int, cookie )
+ __field(unsigned int, volume )
+ __field(int, ref )
+ __field(int, n_active )
+ __field(u8, flags )
+ __field(bool, retire )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->page = page ? page->index : 0;
- __entry->op = op;
- __entry->what = what;
+ __entry->cookie = cookie->debug_id;
+ __entry->volume = cookie->volume->debug_id;
+ __entry->ref = refcount_read(&cookie->ref);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ __entry->retire = retire;
),
- TP_printk("c=%p %s pg=%lx op=%p",
- __entry->cookie,
- __print_symbolic(__entry->what, fscache_page_op_traces),
- __entry->page, __entry->op)
+ TP_printk("c=%08x V=%08x r=%d U=%d f=%02x rt=%u",
+ __entry->cookie, __entry->volume, __entry->ref,
+ __entry->n_active, __entry->flags, __entry->retire)
);
-TRACE_EVENT(fscache_wrote_page,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- struct fscache_operation *op, int ret),
+TRACE_EVENT(fscache_invalidate,
+ TP_PROTO(struct fscache_cookie *cookie, loff_t new_size),
- TP_ARGS(cookie, page, op, ret),
+ TP_ARGS(cookie, new_size),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(pgoff_t, page )
- __field(struct fscache_operation *, op )
- __field(int, ret )
+ __field(unsigned int, cookie )
+ __field(loff_t, new_size )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->page = page->index;
- __entry->op = op;
- __entry->ret = ret;
+ __entry->cookie = cookie->debug_id;
+ __entry->new_size = new_size;
),
- TP_printk("c=%p pg=%lx op=%p ret=%d",
- __entry->cookie, __entry->page, __entry->op, __entry->ret)
+ TP_printk("c=%08x sz=%llx",
+ __entry->cookie, __entry->new_size)
);
-TRACE_EVENT(fscache_gang_lookup,
- TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
- void **results, int n, pgoff_t store_limit),
+TRACE_EVENT(fscache_resize,
+ TP_PROTO(struct fscache_cookie *cookie, loff_t new_size),
- TP_ARGS(cookie, op, results, n, store_limit),
+ TP_ARGS(cookie, new_size),
TP_STRUCT__entry(
- __field(struct fscache_cookie *, cookie )
- __field(struct fscache_operation *, op )
- __field(pgoff_t, results0 )
- __field(int, n )
- __field(pgoff_t, store_limit )
+ __field(unsigned int, cookie )
+ __field(loff_t, old_size )
+ __field(loff_t, new_size )
),
TP_fast_assign(
- __entry->cookie = cookie;
- __entry->op = op;
- __entry->results0 = results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1;
- __entry->n = n;
- __entry->store_limit = store_limit;
+ __entry->cookie = cookie->debug_id;
+ __entry->old_size = cookie->object_size;
+ __entry->new_size = new_size;
),
- TP_printk("c=%p op=%p r0=%lx n=%d sl=%lx",
- __entry->cookie, __entry->op, __entry->results0, __entry->n,
- __entry->store_limit)
+ TP_printk("c=%08x os=%08llx sz=%08llx",
+ __entry->cookie,
+ __entry->old_size,
+ __entry->new_size)
);
#endif /* _TRACE_FSCACHE_H */
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index 9832cb8e0eb0..5ff15126ad9d 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -122,6 +122,123 @@ TRACE_EVENT(fsi_master_break,
)
);
+TRACE_EVENT(fsi_master_scan,
+ TP_PROTO(const struct fsi_master *master, bool scan),
+ TP_ARGS(master, scan),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ __field(bool, scan)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ __entry->scan = scan;
+ ),
+ TP_printk("fsi%d (%d links) %s", __entry->master_idx, __entry->n_links,
+ __entry->scan ? "scan" : "unscan")
+);
+
+TRACE_EVENT(fsi_master_unregister,
+ TP_PROTO(const struct fsi_master *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ ),
+ TP_printk("fsi%d (%d links)", __entry->master_idx, __entry->n_links)
+);
+
+TRACE_EVENT(fsi_slave_init,
+ TP_PROTO(const struct fsi_slave *slave),
+ TP_ARGS(slave),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, master_n_links)
+ __field(int, idx)
+ __field(int, link)
+ __field(int, chip_id)
+ __field(__u32, cfam_id)
+ __field(__u32, size)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = slave->master->idx;
+ __entry->master_n_links = slave->master->n_links;
+ __entry->idx = slave->cdev_idx;
+ __entry->link = slave->link;
+ __entry->chip_id = slave->chip_id;
+ __entry->cfam_id = slave->cfam_id;
+ __entry->size = slave->size;
+ ),
+ TP_printk("fsi%d: idx:%d link:%d/%d cid:%d cfam:%08x %08x",
+ __entry->master_idx,
+ __entry->idx,
+ __entry->link,
+ __entry->master_n_links,
+ __entry->chip_id,
+ __entry->cfam_id,
+ __entry->size
+ )
+);
+
+TRACE_EVENT(fsi_slave_invalid_cfam,
+ TP_PROTO(const struct fsi_master *master, int link, uint32_t cfam_id),
+ TP_ARGS(master, link, cfam_id),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, master_n_links)
+ __field(int, link)
+ __field(__u32, cfam_id)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->master_n_links = master->n_links;
+ __entry->link = link;
+ __entry->cfam_id = cfam_id;
+ ),
+ TP_printk("fsi%d: cfam:%08x link:%d/%d",
+ __entry->master_idx,
+ __entry->cfam_id,
+ __entry->link,
+ __entry->master_n_links
+ )
+);
+
+TRACE_EVENT(fsi_dev_init,
+ TP_PROTO(const struct fsi_device *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, link)
+ __field(int, type)
+ __field(int, unit)
+ __field(int, version)
+ __field(__u32, addr)
+ __field(__u32, size)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = dev->slave->master->idx;
+ __entry->link = dev->slave->link;
+ __entry->type = dev->engine_type;
+ __entry->unit = dev->unit;
+ __entry->version = dev->version;
+ __entry->addr = dev->addr;
+ __entry->size = dev->size;
+ ),
+ TP_printk("fsi%d: slv%d: t:%02x u:%02x v:%02x %08x@%08x",
+ __entry->master_idx,
+ __entry->link,
+ __entry->type,
+ __entry->unit,
+ __entry->version,
+ __entry->size,
+ __entry->addr
+ )
+);
#endif /* _TRACE_FSI_H */
diff --git a/include/trace/events/fsi_master_aspeed.h b/include/trace/events/fsi_master_aspeed.h
index a355ceacc33f..0fff873775f1 100644
--- a/include/trace/events/fsi_master_aspeed.h
+++ b/include/trace/events/fsi_master_aspeed.h
@@ -72,6 +72,18 @@ TRACE_EVENT(fsi_master_aspeed_opb_error,
)
);
+TRACE_EVENT(fsi_master_aspeed_cfam_reset,
+ TP_PROTO(bool start),
+ TP_ARGS(start),
+ TP_STRUCT__entry(
+ __field(bool, start)
+ ),
+ TP_fast_assign(
+ __entry->start = start;
+ ),
+ TP_printk("%s", __entry->start ? "start" : "end")
+);
+
#endif
#include <trace/define_trace.h>
diff --git a/include/trace/events/fsi_master_i2cr.h b/include/trace/events/fsi_master_i2cr.h
new file mode 100644
index 000000000000..c33eba130049
--- /dev/null
+++ b/include/trace/events/fsi_master_i2cr.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_i2cr
+
+#if !defined(_TRACE_FSI_MASTER_I2CR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_I2CR_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(i2cr_i2c_error,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, int rc),
+ TP_ARGS(client, command, rc),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __field(int, rc)
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ __entry->rc = rc;
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } rc:%d", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, __entry->rc)
+);
+
+TRACE_EVENT(i2cr_read,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t *data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+TRACE_EVENT(i2cr_status,
+ TP_PROTO(const struct i2c_client *client, uint64_t status),
+ TP_ARGS(client, status),
+ TP_STRUCT__entry(
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x %016llx", __entry->bus, __entry->addr, __entry->status)
+);
+
+TRACE_EVENT(i2cr_status_error,
+ TP_PROTO(const struct i2c_client *client, uint64_t status, uint64_t error, uint64_t log),
+ TP_ARGS(client, status, error, log),
+ TP_STRUCT__entry(
+ __field(uint64_t, error)
+ __field(uint64_t, log)
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->log = log;
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x status:%016llx error:%016llx log:%016llx", __entry->bus, __entry->addr,
+ __entry->status, __entry->error, __entry->log)
+);
+
+TRACE_EVENT(i2cr_write,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, &data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h
new file mode 100644
index 000000000000..a78d21fa9f29
--- /dev/null
+++ b/include/trace/events/habanalabs.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2022-2023 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM habanalabs
+
+#if !defined(_TRACE_HABANALABS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HABANALABS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(habanalabs_mmu_template,
+ TP_PROTO(struct device *dev, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte),
+
+ TP_ARGS(dev, virt_addr, phys_addr, page_size, flush_pte),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u64, virt_addr)
+ __field(u64, phys_addr)
+ __field(u32, page_size)
+ __field(u8, flush_pte)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->virt_addr = virt_addr;
+ __entry->phys_addr = phys_addr;
+ __entry->page_size = page_size;
+ __entry->flush_pte = flush_pte;
+ ),
+
+ TP_printk("%s: vaddr: %#llx, paddr: %#llx, psize: %#x, flush: %s",
+ __get_str(dname),
+ __entry->virt_addr,
+ __entry->phys_addr,
+ __entry->page_size,
+ __entry->flush_pte ? "true" : "false")
+);
+
+DEFINE_EVENT(habanalabs_mmu_template, habanalabs_mmu_map,
+ TP_PROTO(struct device *dev, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte),
+ TP_ARGS(dev, virt_addr, phys_addr, page_size, flush_pte));
+
+DEFINE_EVENT(habanalabs_mmu_template, habanalabs_mmu_unmap,
+ TP_PROTO(struct device *dev, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte),
+ TP_ARGS(dev, virt_addr, phys_addr, page_size, flush_pte));
+
+DECLARE_EVENT_CLASS(habanalabs_dma_alloc_template,
+ TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
+
+ TP_ARGS(dev, cpu_addr, dma_addr, size, caller),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u64, cpu_addr)
+ __field(u64, dma_addr)
+ __field(u32, size)
+ __field(const char *, caller)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->cpu_addr = cpu_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->caller = caller;
+ ),
+
+ TP_printk("%s: cpu_addr: %#llx, dma_addr: %#llx, size: %#x, caller: %s",
+ __get_str(dname),
+ __entry->cpu_addr,
+ __entry->dma_addr,
+ __entry->size,
+ __entry->caller)
+);
+
+DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_alloc,
+ TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
+ TP_ARGS(dev, cpu_addr, dma_addr, size, caller));
+
+DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_free,
+ TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
+ TP_ARGS(dev, cpu_addr, dma_addr, size, caller));
+
+DECLARE_EVENT_CLASS(habanalabs_dma_map_template,
+ TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len,
+ enum dma_data_direction dir, const char *caller),
+
+ TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(u32, len)
+ __field(int, dir)
+ __field(const char *, caller)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->phys_addr = phys_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->len = len;
+ __entry->dir = dir;
+ __entry->caller = caller;
+ ),
+
+ TP_printk("%s: phys_addr: %#llx, dma_addr: %#llx, len: %#x, dir: %d, caller: %s",
+ __get_str(dname),
+ __entry->phys_addr,
+ __entry->dma_addr,
+ __entry->len,
+ __entry->dir,
+ __entry->caller)
+);
+
+DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_map_page,
+ TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len,
+ enum dma_data_direction dir, const char *caller),
+ TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller));
+
+DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_unmap_page,
+ TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len,
+ enum dma_data_direction dir, const char *caller),
+ TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller));
+
+DECLARE_EVENT_CLASS(habanalabs_comms_template,
+ TP_PROTO(struct device *dev, char *op_str),
+
+ TP_ARGS(dev, op_str),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(char *, op_str)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->op_str = op_str;
+ ),
+
+ TP_printk("%s: cms: %s",
+ __get_str(dname),
+ __entry->op_str)
+);
+
+DEFINE_EVENT(habanalabs_comms_template, habanalabs_comms_protocol_cmd,
+ TP_PROTO(struct device *dev, char *op_str),
+ TP_ARGS(dev, op_str));
+
+DEFINE_EVENT(habanalabs_comms_template, habanalabs_comms_send_cmd,
+ TP_PROTO(struct device *dev, char *op_str),
+ TP_ARGS(dev, op_str));
+
+DEFINE_EVENT(habanalabs_comms_template, habanalabs_comms_wait_status,
+ TP_PROTO(struct device *dev, char *op_str),
+ TP_ARGS(dev, op_str));
+
+DEFINE_EVENT(habanalabs_comms_template, habanalabs_comms_wait_status_done,
+ TP_PROTO(struct device *dev, char *op_str),
+ TP_ARGS(dev, op_str));
+
+DECLARE_EVENT_CLASS(habanalabs_reg_access_template,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+
+ TP_ARGS(dev, addr, val),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u32, addr)
+ __field(u32, val)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->addr = addr;
+ __entry->val = val;
+ ),
+
+ TP_printk("%s: addr: %#x, val: %#x",
+ __get_str(dname),
+ __entry->addr,
+ __entry->val)
+);
+
+DEFINE_EVENT(habanalabs_reg_access_template, habanalabs_rreg32,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+ TP_ARGS(dev, addr, val));
+
+DEFINE_EVENT(habanalabs_reg_access_template, habanalabs_wreg32,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+ TP_ARGS(dev, addr, val));
+
+DEFINE_EVENT(habanalabs_reg_access_template, habanalabs_elbi_read,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+ TP_ARGS(dev, addr, val));
+
+DEFINE_EVENT(habanalabs_reg_access_template, habanalabs_elbi_write,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+ TP_ARGS(dev, addr, val));
+
+#endif /* if !defined(_TRACE_HABANALABS_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/handshake.h b/include/trace/events/handshake.h
new file mode 100644
index 000000000000..bdd8a03cf5ba
--- /dev/null
+++ b/include/trace/events/handshake.h
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM handshake
+
+#if !defined(_TRACE_HANDSHAKE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HANDSHAKE_H
+
+#include <linux/net.h>
+#include <net/tls_prot.h>
+#include <linux/tracepoint.h>
+#include <trace/events/net_probe_common.h>
+
+#define TLS_RECORD_TYPE_LIST \
+ record_type(CHANGE_CIPHER_SPEC) \
+ record_type(ALERT) \
+ record_type(HANDSHAKE) \
+ record_type(DATA) \
+ record_type(HEARTBEAT) \
+ record_type(TLS12_CID) \
+ record_type_end(ACK)
+
+#undef record_type
+#undef record_type_end
+#define record_type(x) TRACE_DEFINE_ENUM(TLS_RECORD_TYPE_##x);
+#define record_type_end(x) TRACE_DEFINE_ENUM(TLS_RECORD_TYPE_##x);
+
+TLS_RECORD_TYPE_LIST
+
+#undef record_type
+#undef record_type_end
+#define record_type(x) { TLS_RECORD_TYPE_##x, #x },
+#define record_type_end(x) { TLS_RECORD_TYPE_##x, #x }
+
+#define show_tls_content_type(type) \
+ __print_symbolic(type, TLS_RECORD_TYPE_LIST)
+
+TRACE_DEFINE_ENUM(TLS_ALERT_LEVEL_WARNING);
+TRACE_DEFINE_ENUM(TLS_ALERT_LEVEL_FATAL);
+
+#define show_tls_alert_level(level) \
+ __print_symbolic(level, \
+ { TLS_ALERT_LEVEL_WARNING, "Warning" }, \
+ { TLS_ALERT_LEVEL_FATAL, "Fatal" })
+
+#define TLS_ALERT_DESCRIPTION_LIST \
+ alert_description(CLOSE_NOTIFY) \
+ alert_description(UNEXPECTED_MESSAGE) \
+ alert_description(BAD_RECORD_MAC) \
+ alert_description(RECORD_OVERFLOW) \
+ alert_description(HANDSHAKE_FAILURE) \
+ alert_description(BAD_CERTIFICATE) \
+ alert_description(UNSUPPORTED_CERTIFICATE) \
+ alert_description(CERTIFICATE_REVOKED) \
+ alert_description(CERTIFICATE_EXPIRED) \
+ alert_description(CERTIFICATE_UNKNOWN) \
+ alert_description(ILLEGAL_PARAMETER) \
+ alert_description(UNKNOWN_CA) \
+ alert_description(ACCESS_DENIED) \
+ alert_description(DECODE_ERROR) \
+ alert_description(DECRYPT_ERROR) \
+ alert_description(TOO_MANY_CIDS_REQUESTED) \
+ alert_description(PROTOCOL_VERSION) \
+ alert_description(INSUFFICIENT_SECURITY) \
+ alert_description(INTERNAL_ERROR) \
+ alert_description(INAPPROPRIATE_FALLBACK) \
+ alert_description(USER_CANCELED) \
+ alert_description(MISSING_EXTENSION) \
+ alert_description(UNSUPPORTED_EXTENSION) \
+ alert_description(UNRECOGNIZED_NAME) \
+ alert_description(BAD_CERTIFICATE_STATUS_RESPONSE) \
+ alert_description(UNKNOWN_PSK_IDENTITY) \
+ alert_description(CERTIFICATE_REQUIRED) \
+ alert_description_end(NO_APPLICATION_PROTOCOL)
+
+#undef alert_description
+#undef alert_description_end
+#define alert_description(x) TRACE_DEFINE_ENUM(TLS_ALERT_DESC_##x);
+#define alert_description_end(x) TRACE_DEFINE_ENUM(TLS_ALERT_DESC_##x);
+
+TLS_ALERT_DESCRIPTION_LIST
+
+#undef alert_description
+#undef alert_description_end
+#define alert_description(x) { TLS_ALERT_DESC_##x, #x },
+#define alert_description_end(x) { TLS_ALERT_DESC_##x, #x }
+
+#define show_tls_alert_description(desc) \
+ __print_symbolic(desc, TLS_ALERT_DESCRIPTION_LIST)
+
+DECLARE_EVENT_CLASS(handshake_event_class,
+ TP_PROTO(
+ const struct net *net,
+ const struct handshake_req *req,
+ const struct sock *sk
+ ),
+ TP_ARGS(net, req, sk),
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(const void *, sk)
+ __field(unsigned int, netns_ino)
+ ),
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->sk = sk;
+ __entry->netns_ino = net->ns.inum;
+ ),
+ TP_printk("req=%p sk=%p",
+ __entry->req, __entry->sk
+ )
+);
+#define DEFINE_HANDSHAKE_EVENT(name) \
+ DEFINE_EVENT(handshake_event_class, name, \
+ TP_PROTO( \
+ const struct net *net, \
+ const struct handshake_req *req, \
+ const struct sock *sk \
+ ), \
+ TP_ARGS(net, req, sk))
+
+DECLARE_EVENT_CLASS(handshake_fd_class,
+ TP_PROTO(
+ const struct net *net,
+ const struct handshake_req *req,
+ const struct sock *sk,
+ int fd
+ ),
+ TP_ARGS(net, req, sk, fd),
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(const void *, sk)
+ __field(int, fd)
+ __field(unsigned int, netns_ino)
+ ),
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->sk = req->hr_sk;
+ __entry->fd = fd;
+ __entry->netns_ino = net->ns.inum;
+ ),
+ TP_printk("req=%p sk=%p fd=%d",
+ __entry->req, __entry->sk, __entry->fd
+ )
+);
+#define DEFINE_HANDSHAKE_FD_EVENT(name) \
+ DEFINE_EVENT(handshake_fd_class, name, \
+ TP_PROTO( \
+ const struct net *net, \
+ const struct handshake_req *req, \
+ const struct sock *sk, \
+ int fd \
+ ), \
+ TP_ARGS(net, req, sk, fd))
+
+DECLARE_EVENT_CLASS(handshake_error_class,
+ TP_PROTO(
+ const struct net *net,
+ const struct handshake_req *req,
+ const struct sock *sk,
+ int err
+ ),
+ TP_ARGS(net, req, sk, err),
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(const void *, sk)
+ __field(int, err)
+ __field(unsigned int, netns_ino)
+ ),
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->sk = sk;
+ __entry->err = err;
+ __entry->netns_ino = net->ns.inum;
+ ),
+ TP_printk("req=%p sk=%p err=%d",
+ __entry->req, __entry->sk, __entry->err
+ )
+);
+#define DEFINE_HANDSHAKE_ERROR(name) \
+ DEFINE_EVENT(handshake_error_class, name, \
+ TP_PROTO( \
+ const struct net *net, \
+ const struct handshake_req *req, \
+ const struct sock *sk, \
+ int err \
+ ), \
+ TP_ARGS(net, req, sk, err))
+
+DECLARE_EVENT_CLASS(handshake_alert_class,
+ TP_PROTO(
+ const struct sock *sk,
+ unsigned char level,
+ unsigned char description
+ ),
+ TP_ARGS(sk, level, description),
+ TP_STRUCT__entry(
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, netns_ino)
+ __field(unsigned long, level)
+ __field(unsigned long, description)
+ ),
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ __entry->netns_ino = sock_net(sk)->ns.inum;
+ __entry->level = level;
+ __entry->description = description;
+ ),
+ TP_printk("src=%pISpc dest=%pISpc %s: %s",
+ __entry->saddr, __entry->daddr,
+ show_tls_alert_level(__entry->level),
+ show_tls_alert_description(__entry->description)
+ )
+);
+#define DEFINE_HANDSHAKE_ALERT(name) \
+ DEFINE_EVENT(handshake_alert_class, name, \
+ TP_PROTO( \
+ const struct sock *sk, \
+ unsigned char level, \
+ unsigned char description \
+ ), \
+ TP_ARGS(sk, level, description))
+
+
+/*
+ * Request lifetime events
+ */
+
+DEFINE_HANDSHAKE_EVENT(handshake_submit);
+DEFINE_HANDSHAKE_ERROR(handshake_submit_err);
+DEFINE_HANDSHAKE_EVENT(handshake_cancel);
+DEFINE_HANDSHAKE_EVENT(handshake_cancel_none);
+DEFINE_HANDSHAKE_EVENT(handshake_cancel_busy);
+DEFINE_HANDSHAKE_EVENT(handshake_destruct);
+
+
+TRACE_EVENT(handshake_complete,
+ TP_PROTO(
+ const struct net *net,
+ const struct handshake_req *req,
+ const struct sock *sk,
+ int status
+ ),
+ TP_ARGS(net, req, sk, status),
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(const void *, sk)
+ __field(int, status)
+ __field(unsigned int, netns_ino)
+ ),
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->sk = sk;
+ __entry->status = status;
+ __entry->netns_ino = net->ns.inum;
+ ),
+ TP_printk("req=%p sk=%p status=%d",
+ __entry->req, __entry->sk, __entry->status
+ )
+);
+
+/*
+ * Netlink events
+ */
+
+DEFINE_HANDSHAKE_ERROR(handshake_notify_err);
+DEFINE_HANDSHAKE_FD_EVENT(handshake_cmd_accept);
+DEFINE_HANDSHAKE_ERROR(handshake_cmd_accept_err);
+DEFINE_HANDSHAKE_FD_EVENT(handshake_cmd_done);
+DEFINE_HANDSHAKE_ERROR(handshake_cmd_done_err);
+
+/*
+ * TLS Record events
+ */
+
+TRACE_EVENT(tls_contenttype,
+ TP_PROTO(
+ const struct sock *sk,
+ unsigned char type
+ ),
+ TP_ARGS(sk, type),
+ TP_STRUCT__entry(
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, netns_ino)
+ __field(unsigned long, type)
+ ),
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ __entry->netns_ino = sock_net(sk)->ns.inum;
+ __entry->type = type;
+ ),
+ TP_printk("src=%pISpc dest=%pISpc %s",
+ __entry->saddr, __entry->daddr,
+ show_tls_content_type(__entry->type)
+ )
+);
+
+/*
+ * TLS Alert events
+ */
+
+DEFINE_HANDSHAKE_ALERT(tls_alert_send);
+DEFINE_HANDSHAKE_ALERT(tls_alert_recv);
+
+#endif /* _TRACE_HANDSHAKE_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/hswadsp.h b/include/trace/events/hswadsp.h
deleted file mode 100644
index 939d7a09d73f..000000000000
--- a/include/trace/events/hswadsp.h
+++ /dev/null
@@ -1,385 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hswadsp
-
-#if !defined(_TRACE_HSWADSP_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_HSWADSP_H
-
-#include <linux/types.h>
-#include <linux/ktime.h>
-#include <linux/tracepoint.h>
-
-struct sst_hsw;
-struct sst_hsw_stream;
-struct sst_hsw_ipc_stream_free_req;
-struct sst_hsw_ipc_volume_req;
-struct sst_hsw_ipc_stream_alloc_req;
-struct sst_hsw_audio_data_format_ipc;
-struct sst_hsw_ipc_stream_info_reply;
-struct sst_hsw_ipc_device_config_req;
-
-DECLARE_EVENT_CLASS(sst_irq,
-
- TP_PROTO(uint32_t status, uint32_t mask),
-
- TP_ARGS(status, mask),
-
- TP_STRUCT__entry(
- __field( unsigned int, status )
- __field( unsigned int, mask )
- ),
-
- TP_fast_assign(
- __entry->status = status;
- __entry->mask = mask;
- ),
-
- TP_printk("status 0x%8.8x mask 0x%8.8x",
- (unsigned int)__entry->status, (unsigned int)__entry->mask)
-);
-
-DEFINE_EVENT(sst_irq, sst_irq_busy,
-
- TP_PROTO(unsigned int status, unsigned int mask),
-
- TP_ARGS(status, mask)
-
-);
-
-DEFINE_EVENT(sst_irq, sst_irq_done,
-
- TP_PROTO(unsigned int status, unsigned int mask),
-
- TP_ARGS(status, mask)
-
-);
-
-DECLARE_EVENT_CLASS(ipc,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val),
-
- TP_STRUCT__entry(
- __string( name, name )
- __field( unsigned int, val )
- ),
-
- TP_fast_assign(
- __assign_str(name, name);
- __entry->val = val;
- ),
-
- TP_printk("%s 0x%8.8x", __get_str(name), (unsigned int)__entry->val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_request,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_reply,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_pending_reply,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_notification,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DEFINE_EVENT(ipc, ipc_error,
-
- TP_PROTO(const char *name, int val),
-
- TP_ARGS(name, val)
-
-);
-
-DECLARE_EVENT_CLASS(stream_position,
-
- TP_PROTO(unsigned int id, unsigned int pos),
-
- TP_ARGS(id, pos),
-
- TP_STRUCT__entry(
- __field( unsigned int, id )
- __field( unsigned int, pos )
- ),
-
- TP_fast_assign(
- __entry->id = id;
- __entry->pos = pos;
- ),
-
- TP_printk("id %d position 0x%x",
- (unsigned int)__entry->id, (unsigned int)__entry->pos)
-);
-
-DEFINE_EVENT(stream_position, stream_read_position,
-
- TP_PROTO(unsigned int id, unsigned int pos),
-
- TP_ARGS(id, pos)
-
-);
-
-DEFINE_EVENT(stream_position, stream_write_position,
-
- TP_PROTO(unsigned int id, unsigned int pos),
-
- TP_ARGS(id, pos)
-
-);
-
-TRACE_EVENT(hsw_stream_buffer,
-
- TP_PROTO(struct sst_hsw_stream *stream),
-
- TP_ARGS(stream),
-
- TP_STRUCT__entry(
- __field( int, id )
- __field( int, pt_addr )
- __field( int, num_pages )
- __field( int, ring_size )
- __field( int, ring_offset )
- __field( int, first_pfn )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->pt_addr = stream->request.ringinfo.ring_pt_address;
- __entry->num_pages = stream->request.ringinfo.num_pages;
- __entry->ring_size = stream->request.ringinfo.ring_size;
- __entry->ring_offset = stream->request.ringinfo.ring_offset;
- __entry->first_pfn = stream->request.ringinfo.ring_first_pfn;
- ),
-
- TP_printk("stream %d ring addr 0x%x pages %d size 0x%x offset 0x%x PFN 0x%x",
- (int) __entry->id, (int)__entry->pt_addr,
- (int)__entry->num_pages, (int)__entry->ring_size,
- (int)__entry->ring_offset, (int)__entry->first_pfn)
-);
-
-TRACE_EVENT(hsw_stream_alloc_reply,
-
- TP_PROTO(struct sst_hsw_stream *stream),
-
- TP_ARGS(stream),
-
- TP_STRUCT__entry(
- __field( int, id )
- __field( int, stream_id )
- __field( int, mixer_id )
- __field( int, peak0 )
- __field( int, peak1 )
- __field( int, vol0 )
- __field( int, vol1 )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->stream_id = stream->reply.stream_hw_id;
- __entry->mixer_id = stream->reply.mixer_hw_id;
- __entry->peak0 = stream->reply.peak_meter_register_address[0];
- __entry->peak1 = stream->reply.peak_meter_register_address[1];
- __entry->vol0 = stream->reply.volume_register_address[0];
- __entry->vol1 = stream->reply.volume_register_address[1];
- ),
-
- TP_printk("stream %d hw id %d mixer %d peak 0x%x:0x%x vol 0x%x,0x%x",
- (int) __entry->id, (int) __entry->stream_id, (int)__entry->mixer_id,
- (int)__entry->peak0, (int)__entry->peak1,
- (int)__entry->vol0, (int)__entry->vol1)
-);
-
-TRACE_EVENT(hsw_mixer_info_reply,
-
- TP_PROTO(struct sst_hsw_ipc_stream_info_reply *reply),
-
- TP_ARGS(reply),
-
- TP_STRUCT__entry(
- __field( int, mixer_id )
- __field( int, peak0 )
- __field( int, peak1 )
- __field( int, vol0 )
- __field( int, vol1 )
- ),
-
- TP_fast_assign(
- __entry->mixer_id = reply->mixer_hw_id;
- __entry->peak0 = reply->peak_meter_register_address[0];
- __entry->peak1 = reply->peak_meter_register_address[1];
- __entry->vol0 = reply->volume_register_address[0];
- __entry->vol1 = reply->volume_register_address[1];
- ),
-
- TP_printk("mixer id %d peak 0x%x:0x%x vol 0x%x,0x%x",
- (int)__entry->mixer_id,
- (int)__entry->peak0, (int)__entry->peak1,
- (int)__entry->vol0, (int)__entry->vol1)
-);
-
-TRACE_EVENT(hsw_stream_data_format,
-
- TP_PROTO(struct sst_hsw_stream *stream,
- struct sst_hsw_audio_data_format_ipc *req),
-
- TP_ARGS(stream, req),
-
- TP_STRUCT__entry(
- __field( uint32_t, id )
- __field( uint32_t, frequency )
- __field( uint32_t, bitdepth )
- __field( uint32_t, map )
- __field( uint32_t, config )
- __field( uint32_t, style )
- __field( uint8_t, ch_num )
- __field( uint8_t, valid_bit )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->frequency = req->frequency;
- __entry->bitdepth = req->bitdepth;
- __entry->map = req->map;
- __entry->config = req->config;
- __entry->style = req->style;
- __entry->ch_num = req->ch_num;
- __entry->valid_bit = req->valid_bit;
- ),
-
- TP_printk("stream %d freq %d depth %d map 0x%x config 0x%x style 0x%x ch %d bits %d",
- (int) __entry->id, (uint32_t)__entry->frequency,
- (uint32_t)__entry->bitdepth, (uint32_t)__entry->map,
- (uint32_t)__entry->config, (uint32_t)__entry->style,
- (uint8_t)__entry->ch_num, (uint8_t)__entry->valid_bit)
-);
-
-TRACE_EVENT(hsw_stream_alloc_request,
-
- TP_PROTO(struct sst_hsw_stream *stream,
- struct sst_hsw_ipc_stream_alloc_req *req),
-
- TP_ARGS(stream, req),
-
- TP_STRUCT__entry(
- __field( uint32_t, id )
- __field( uint8_t, path_id )
- __field( uint8_t, stream_type )
- __field( uint8_t, format_id )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->path_id = req->path_id;
- __entry->stream_type = req->stream_type;
- __entry->format_id = req->format_id;
- ),
-
- TP_printk("stream %d path %d type %d format %d",
- (int) __entry->id, (uint8_t)__entry->path_id,
- (uint8_t)__entry->stream_type, (uint8_t)__entry->format_id)
-);
-
-TRACE_EVENT(hsw_stream_free_req,
-
- TP_PROTO(struct sst_hsw_stream *stream,
- struct sst_hsw_ipc_stream_free_req *req),
-
- TP_ARGS(stream, req),
-
- TP_STRUCT__entry(
- __field( int, id )
- __field( int, stream_id )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->stream_id = req->stream_id;
- ),
-
- TP_printk("stream %d hw id %d",
- (int) __entry->id, (int) __entry->stream_id)
-);
-
-TRACE_EVENT(hsw_volume_req,
-
- TP_PROTO(struct sst_hsw_stream *stream,
- struct sst_hsw_ipc_volume_req *req),
-
- TP_ARGS(stream, req),
-
- TP_STRUCT__entry(
- __field( int, id )
- __field( uint32_t, channel )
- __field( uint32_t, target_volume )
- __field( uint64_t, curve_duration )
- __field( uint32_t, curve_type )
- ),
-
- TP_fast_assign(
- __entry->id = stream->host_id;
- __entry->channel = req->channel;
- __entry->target_volume = req->target_volume;
- __entry->curve_duration = req->curve_duration;
- __entry->curve_type = req->curve_type;
- ),
-
- TP_printk("stream %d chan 0x%x vol %d duration %llu type %d",
- (int) __entry->id, (uint32_t) __entry->channel,
- (uint32_t)__entry->target_volume,
- (uint64_t)__entry->curve_duration,
- (uint32_t)__entry->curve_type)
-);
-
-TRACE_EVENT(hsw_device_config_req,
-
- TP_PROTO(struct sst_hsw_ipc_device_config_req *req),
-
- TP_ARGS(req),
-
- TP_STRUCT__entry(
- __field( uint32_t, ssp )
- __field( uint32_t, clock_freq )
- __field( uint32_t, mode )
- __field( uint16_t, clock_divider )
- ),
-
- TP_fast_assign(
- __entry->ssp = req->ssp_interface;
- __entry->clock_freq = req->clock_frequency;
- __entry->mode = req->mode;
- __entry->clock_divider = req->clock_divider;
- ),
-
- TP_printk("SSP %d Freq %d mode %d div %d",
- (uint32_t)__entry->ssp,
- (uint32_t)__entry->clock_freq, (uint32_t)__entry->mode,
- (uint32_t)__entry->clock_divider)
-);
-
-#endif /* _TRACE_HSWADSP_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 4fdb14a81108..6e2ef1d4b002 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -11,11 +11,14 @@
EM( SCAN_FAIL, "failed") \
EM( SCAN_SUCCEED, "succeeded") \
EM( SCAN_PMD_NULL, "pmd_null") \
+ EM( SCAN_PMD_NONE, "pmd_none") \
+ EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \
EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \
EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \
EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \
+ EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \
EM( SCAN_PAGE_RO, "no_writable_page") \
EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \
EM( SCAN_PAGE_NULL, "page_null") \
@@ -29,12 +32,14 @@
EM( SCAN_VMA_NULL, "vma_null") \
EM( SCAN_VMA_CHECK, "vma_check_failed") \
EM( SCAN_ADDRESS_RANGE, "not_suitable_address_range") \
- EM( SCAN_SWAP_CACHE_PAGE, "page_swap_cache") \
EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\
EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
EM( SCAN_TRUNCATED, "truncated") \
- EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
+ EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
+ EM( SCAN_STORE_FAILED, "store_failed") \
+ EM( SCAN_COPY_MC, "copy_poisoned_page") \
+ EMe(SCAN_PAGE_FILLED, "page_filled")
#undef EM
#undef EMe
@@ -167,5 +172,77 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
__entry->ret)
);
+TRACE_EVENT(mm_khugepaged_scan_file,
+
+ TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file,
+ int present, int swap, int result),
+
+ TP_ARGS(mm, page, file, present, swap, result),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, pfn)
+ __string(filename, file->f_path.dentry->d_iname)
+ __field(int, present)
+ __field(int, swap)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->pfn = page ? page_to_pfn(page) : -1;
+ __assign_str(filename, file->f_path.dentry->d_iname);
+ __entry->present = present;
+ __entry->swap = swap;
+ __entry->result = result;
+ ),
+
+ TP_printk("mm=%p, scan_pfn=0x%lx, filename=%s, present=%d, swap=%d, result=%s",
+ __entry->mm,
+ __entry->pfn,
+ __get_str(filename),
+ __entry->present,
+ __entry->swap,
+ __print_symbolic(__entry->result, SCAN_STATUS))
+);
+
+TRACE_EVENT(mm_khugepaged_collapse_file,
+ TP_PROTO(struct mm_struct *mm, struct page *hpage, pgoff_t index,
+ bool is_shmem, unsigned long addr, struct file *file,
+ int nr, int result),
+ TP_ARGS(mm, hpage, index, addr, is_shmem, file, nr, result),
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, hpfn)
+ __field(pgoff_t, index)
+ __field(unsigned long, addr)
+ __field(bool, is_shmem)
+ __string(filename, file->f_path.dentry->d_iname)
+ __field(int, nr)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->hpfn = hpage ? page_to_pfn(hpage) : -1;
+ __entry->index = index;
+ __entry->addr = addr;
+ __entry->is_shmem = is_shmem;
+ __assign_str(filename, file->f_path.dentry->d_iname);
+ __entry->nr = nr;
+ __entry->result = result;
+ ),
+
+ TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%ld, is_shmem=%d, filename=%s, nr=%d, result=%s",
+ __entry->mm,
+ __entry->hpfn,
+ __entry->index,
+ __entry->addr,
+ __entry->is_shmem,
+ __get_str(filename),
+ __entry->nr,
+ __print_symbolic(__entry->result, SCAN_STATUS))
+);
+
#endif /* __HUGE_MEMORY_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/i2c_slave.h b/include/trace/events/i2c_slave.h
new file mode 100644
index 000000000000..811166abbe3a
--- /dev/null
+++ b/include/trace/events/i2c_slave.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * I2C slave tracepoints
+ *
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i2c_slave
+
+#if !defined(_TRACE_I2C_SLAVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_I2C_SLAVE_H
+
+#include <linux/i2c.h>
+#include <linux/tracepoint.h>
+
+TRACE_DEFINE_ENUM(I2C_SLAVE_READ_REQUESTED);
+TRACE_DEFINE_ENUM(I2C_SLAVE_WRITE_REQUESTED);
+TRACE_DEFINE_ENUM(I2C_SLAVE_READ_PROCESSED);
+TRACE_DEFINE_ENUM(I2C_SLAVE_WRITE_RECEIVED);
+TRACE_DEFINE_ENUM(I2C_SLAVE_STOP);
+
+#define show_event_type(type) \
+ __print_symbolic(type, \
+ { I2C_SLAVE_READ_REQUESTED, "RD_REQ" }, \
+ { I2C_SLAVE_WRITE_REQUESTED, "WR_REQ" }, \
+ { I2C_SLAVE_READ_PROCESSED, "RD_PRO" }, \
+ { I2C_SLAVE_WRITE_RECEIVED, "WR_RCV" }, \
+ { I2C_SLAVE_STOP, " STOP" })
+
+TRACE_EVENT(i2c_slave,
+ TP_PROTO(const struct i2c_client *client, enum i2c_slave_event event,
+ __u8 *val, int cb_ret),
+ TP_ARGS(client, event, val, cb_ret),
+ TP_STRUCT__entry(
+ __field(int, adapter_nr )
+ __field(int, ret )
+ __field(__u16, addr )
+ __field(__u16, len )
+ __field(enum i2c_slave_event, event )
+ __array(__u8, buf, 1) ),
+
+ TP_fast_assign(
+ __entry->adapter_nr = client->adapter->nr;
+ __entry->addr = client->addr;
+ __entry->event = event;
+ __entry->ret = cb_ret;
+ switch (event) {
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_READ_PROCESSED:
+ case I2C_SLAVE_WRITE_RECEIVED:
+ __entry->len = 1;
+ memcpy(__entry->buf, val, __entry->len);
+ break;
+ default:
+ __entry->len = 0;
+ break;
+ }
+ ),
+ TP_printk("i2c-%d a=%03x ret=%d %s [%*phD]",
+ __entry->adapter_nr, __entry->addr, __entry->ret,
+ show_event_type(__entry->event), __entry->len, __entry->buf
+ ));
+
+#endif /* _TRACE_I2C_SLAVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h
index 59363a083ecb..d92691c78cff 100644
--- a/include/trace/events/ib_mad.h
+++ b/include/trace/events/ib_mad.h
@@ -49,7 +49,6 @@ DECLARE_EVENT_CLASS(ib_mad_send_template,
__field(int, retries_left)
__field(int, max_retries)
__field(int, retry)
- __field(u16, pkey)
),
TP_fast_assign(
@@ -89,7 +88,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template,
"hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \
"method 0x%x status 0x%x class_specific 0x%x tid 0x%llx " \
"attr_id 0x%x attr_mod 0x%x => dlid 0x%08x sl %d "\
- "pkey 0x%x rpqn 0x%x rqpkey 0x%x",
+ "rpqn 0x%x rqpkey 0x%x",
__entry->dev_index, __entry->port_num, __entry->qp_num,
__entry->agent_priv, be64_to_cpu(__entry->wrtid),
__entry->retries_left, __entry->max_retries,
@@ -100,7 +99,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template,
be16_to_cpu(__entry->class_specific),
be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
be32_to_cpu(__entry->attr_mod),
- be32_to_cpu(__entry->dlid), __entry->sl, __entry->pkey,
+ be32_to_cpu(__entry->dlid), __entry->sl,
__entry->rqpn, __entry->rqkey
)
);
@@ -204,7 +203,6 @@ TRACE_EVENT(ib_mad_recv_done_handler,
__field(u16, wc_status)
__field(u32, slid)
__field(u32, dev_index)
- __field(u16, pkey)
),
TP_fast_assign(
@@ -224,9 +222,6 @@ TRACE_EVENT(ib_mad_recv_done_handler,
__entry->slid = wc->slid;
__entry->src_qp = wc->src_qp;
__entry->sl = wc->sl;
- ib_query_pkey(qp_info->port_priv->device,
- qp_info->port_priv->port_num,
- wc->pkey_index, &__entry->pkey);
__entry->wc_status = wc->status;
),
@@ -234,7 +229,7 @@ TRACE_EVENT(ib_mad_recv_done_handler,
"base_ver 0x%02x class 0x%02x class_ver 0x%02x " \
"method 0x%02x status 0x%04x class_specific 0x%04x " \
"tid 0x%016llx attr_id 0x%04x attr_mod 0x%08x " \
- "slid 0x%08x src QP%d, sl %d pkey 0x%04x",
+ "slid 0x%08x src QP%d, sl %d",
__entry->dev_index, __entry->port_num, __entry->qp_num,
__entry->wc_status,
__entry->length,
@@ -244,7 +239,7 @@ TRACE_EVENT(ib_mad_recv_done_handler,
be16_to_cpu(__entry->class_specific),
be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
be32_to_cpu(__entry->attr_mod),
- __entry->slid, __entry->src_qp, __entry->sl, __entry->pkey
+ __entry->slid, __entry->src_qp, __entry->sl
)
);
diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h
new file mode 100644
index 000000000000..8ce2de120f2d
--- /dev/null
+++ b/include/trace/events/intel_ifs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM intel_ifs
+
+#if !defined(_TRACE_IFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IFS_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(ifs_status,
+
+ TP_PROTO(int batch, int start, int stop, u64 status),
+
+ TP_ARGS(batch, start, stop, status),
+
+ TP_STRUCT__entry(
+ __field( int, batch )
+ __field( u64, status )
+ __field( u16, start )
+ __field( u16, stop )
+ ),
+
+ TP_fast_assign(
+ __entry->batch = batch;
+ __entry->start = start;
+ __entry->stop = stop;
+ __entry->status = status;
+ ),
+
+ TP_printk("batch: %.2d, start: %.4x, stop: %.4x, status: %.16llx",
+ __entry->batch,
+ __entry->start,
+ __entry->stop,
+ __entry->status)
+);
+
+#endif /* _TRACE_IFS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h
deleted file mode 100644
index 112bd06487bf..000000000000
--- a/include/trace/events/intel_iommu.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Intel IOMMU trace support
- *
- * Copyright (C) 2019 Intel Corporation
- *
- * Author: Lu Baolu <baolu.lu@linux.intel.com>
- */
-#ifdef CONFIG_INTEL_IOMMU
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM intel_iommu
-
-#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_INTEL_IOMMU_H
-
-#include <linux/tracepoint.h>
-#include <linux/intel-iommu.h>
-
-DECLARE_EVENT_CLASS(dma_map,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
- size_t size),
-
- TP_ARGS(dev, dev_addr, phys_addr, size),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(dma_addr_t, dev_addr)
- __field(phys_addr_t, phys_addr)
- __field(size_t, size)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->dev_addr = dev_addr;
- __entry->phys_addr = phys_addr;
- __entry->size = size;
- ),
-
- TP_printk("dev=%s dev_addr=0x%llx phys_addr=0x%llx size=%zu",
- __get_str(dev_name),
- (unsigned long long)__entry->dev_addr,
- (unsigned long long)__entry->phys_addr,
- __entry->size)
-);
-
-DEFINE_EVENT(dma_map, map_single,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
- size_t size),
- TP_ARGS(dev, dev_addr, phys_addr, size)
-);
-
-DEFINE_EVENT(dma_map, bounce_map_single,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
- size_t size),
- TP_ARGS(dev, dev_addr, phys_addr, size)
-);
-
-DECLARE_EVENT_CLASS(dma_unmap,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
-
- TP_ARGS(dev, dev_addr, size),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(dma_addr_t, dev_addr)
- __field(size_t, size)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->dev_addr = dev_addr;
- __entry->size = size;
- ),
-
- TP_printk("dev=%s dev_addr=0x%llx size=%zu",
- __get_str(dev_name),
- (unsigned long long)__entry->dev_addr,
- __entry->size)
-);
-
-DEFINE_EVENT(dma_unmap, unmap_single,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
- TP_ARGS(dev, dev_addr, size)
-);
-
-DEFINE_EVENT(dma_unmap, unmap_sg,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
- TP_ARGS(dev, dev_addr, size)
-);
-
-DEFINE_EVENT(dma_unmap, bounce_unmap_single,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
- TP_ARGS(dev, dev_addr, size)
-);
-
-DECLARE_EVENT_CLASS(dma_map_sg,
- TP_PROTO(struct device *dev, int index, int total,
- struct scatterlist *sg),
-
- TP_ARGS(dev, index, total, sg),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(dma_addr_t, dev_addr)
- __field(phys_addr_t, phys_addr)
- __field(size_t, size)
- __field(int, index)
- __field(int, total)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->dev_addr = sg->dma_address;
- __entry->phys_addr = sg_phys(sg);
- __entry->size = sg->dma_length;
- __entry->index = index;
- __entry->total = total;
- ),
-
- TP_printk("dev=%s [%d/%d] dev_addr=0x%llx phys_addr=0x%llx size=%zu",
- __get_str(dev_name), __entry->index, __entry->total,
- (unsigned long long)__entry->dev_addr,
- (unsigned long long)__entry->phys_addr,
- __entry->size)
-);
-
-DEFINE_EVENT(dma_map_sg, map_sg,
- TP_PROTO(struct device *dev, int index, int total,
- struct scatterlist *sg),
- TP_ARGS(dev, index, total, sg)
-);
-
-DEFINE_EVENT(dma_map_sg, bounce_map_sg,
- TP_PROTO(struct device *dev, int index, int total,
- struct scatterlist *sg),
- TP_ARGS(dev, index, total, sg)
-);
-#endif /* _TRACE_INTEL_IOMMU_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
-#endif /* CONFIG_INTEL_IOMMU */
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 9f0d3b7d56b0..e948df7ce625 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -6,17 +6,20 @@
#define _TRACE_IO_URING_H
#include <linux/tracepoint.h>
+#include <uapi/linux/io_uring.h>
+#include <linux/io_uring_types.h>
+#include <linux/io_uring.h>
struct io_wq_work;
/**
* io_uring_create - called after a new io_uring context was prepared
*
- * @fd: corresponding file descriptor
- * @ctx: pointer to a ring context structure
+ * @fd: corresponding file descriptor
+ * @ctx: pointer to a ring context structure
* @sq_entries: actual SQ size
* @cq_entries: actual CQ size
- * @flags: SQ ring flags, provided to io_uring_setup(2)
+ * @flags: SQ ring flags, provided to io_uring_setup(2)
*
* Allows to trace io_uring creation and provide pointer to a context, that can
* be used later to find correlated events.
@@ -28,38 +31,37 @@ TRACE_EVENT(io_uring_create,
TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
TP_STRUCT__entry (
- __field( int, fd )
- __field( void *, ctx )
+ __field( int, fd )
+ __field( void *, ctx )
__field( u32, sq_entries )
__field( u32, cq_entries )
__field( u32, flags )
),
TP_fast_assign(
- __entry->fd = fd;
+ __entry->fd = fd;
__entry->ctx = ctx;
__entry->sq_entries = sq_entries;
__entry->cq_entries = cq_entries;
__entry->flags = flags;
),
- TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
+ TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
__entry->ctx, __entry->fd, __entry->sq_entries,
__entry->cq_entries, __entry->flags)
);
/**
- * io_uring_register - called after a buffer/file/eventfd was succesfully
+ * io_uring_register - called after a buffer/file/eventfd was successfully
* registered for a ring
*
- * @ctx: pointer to a ring context structure
- * @opcode: describes which operation to perform
+ * @ctx: pointer to a ring context structure
+ * @opcode: describes which operation to perform
* @nr_user_files: number of registered files
* @nr_user_bufs: number of registered buffers
- * @cq_ev_fd: whether eventfs registered or not
- * @ret: return code
+ * @ret: return code
*
- * Allows to trace fixed files/buffers/eventfds, that could be registered to
+ * Allows to trace fixed files/buffers, that could be registered to
* avoid an overhead of getting references to them for every operation. This
* event, together with io_uring_file_get, can provide a full picture of how
* much overhead one can reduce via fixing.
@@ -67,17 +69,16 @@ TRACE_EVENT(io_uring_create,
TRACE_EVENT(io_uring_register,
TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
- unsigned nr_bufs, bool eventfd, long ret),
+ unsigned nr_bufs, long ret),
- TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
+ TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( unsigned, opcode )
- __field( unsigned, nr_files )
- __field( unsigned, nr_bufs )
- __field( bool, eventfd )
- __field( long, ret )
+ __field( void *, ctx )
+ __field( unsigned, opcode )
+ __field( unsigned, nr_files)
+ __field( unsigned, nr_bufs )
+ __field( long, ret )
),
TP_fast_assign(
@@ -85,20 +86,19 @@ TRACE_EVENT(io_uring_register,
__entry->opcode = opcode;
__entry->nr_files = nr_files;
__entry->nr_bufs = nr_bufs;
- __entry->eventfd = eventfd;
__entry->ret = ret;
),
TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
- "eventfd %d, ret %ld",
+ "ret %ld",
__entry->ctx, __entry->opcode, __entry->nr_files,
- __entry->nr_bufs, __entry->eventfd, __entry->ret)
+ __entry->nr_bufs, __entry->ret)
);
/**
* io_uring_file_get - called before getting references to an SQE file
*
- * @ctx: pointer to a ring context structure
+ * @req: pointer to a submitted request
* @fd: SQE file descriptor
*
* Allows to trace out how often an SQE file reference is obtained, which can
@@ -107,99 +107,114 @@ TRACE_EVENT(io_uring_register,
*/
TRACE_EVENT(io_uring_file_get,
- TP_PROTO(void *ctx, int fd),
+ TP_PROTO(struct io_kiocb *req, int fd),
- TP_ARGS(ctx, fd),
+ TP_ARGS(req, fd),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( int, fd )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( u64, user_data )
+ __field( int, fd )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
__entry->fd = fd;
),
- TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
+ TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
+ __entry->ctx, __entry->req, __entry->user_data, __entry->fd)
);
/**
* io_uring_queue_async_work - called before submitting a new async work
*
- * @ctx: pointer to a ring context structure
- * @hashed: type of workqueue, hashed or normal
* @req: pointer to a submitted request
- * @work: pointer to a submitted io_wq_work
+ * @rw: type of workqueue, hashed or normal
*
* Allows to trace asynchronous work submission.
*/
TRACE_EVENT(io_uring_queue_async_work,
- TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
- unsigned int flags),
+ TP_PROTO(struct io_kiocb *req, int rw),
- TP_ARGS(ctx, rw, req, work, flags),
+ TP_ARGS(req, rw),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( int, rw )
- __field( void *, req )
- __field( struct io_wq_work *, work )
- __field( unsigned int, flags )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( u64, user_data )
+ __field( u8, opcode )
+ __field( unsigned long long, flags )
+ __field( struct io_wq_work *, work )
+ __field( int, rw )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->flags = (__force unsigned long long) req->flags;
+ __entry->opcode = req->opcode;
+ __entry->work = &req->work;
__entry->rw = rw;
- __entry->req = req;
- __entry->work = work;
- __entry->flags = flags;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
- __entry->ctx, __entry->req, __entry->flags,
- __entry->rw ? "hashed" : "normal", __entry->work)
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%llx, %s queue, work %p",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str), __entry->flags,
+ __entry->rw ? "hashed" : "normal", __entry->work)
);
/**
* io_uring_defer - called when an io_uring request is deferred
*
- * @ctx: pointer to a ring context structure
* @req: pointer to a deferred request
- * @user_data: user data associated with the request
*
* Allows to track deferred requests, to get an insight about what requests are
* not started immediately.
*/
TRACE_EVENT(io_uring_defer,
- TP_PROTO(void *ctx, void *req, unsigned long long user_data),
+ TP_PROTO(struct io_kiocb *req),
- TP_ARGS(ctx, req, user_data),
+ TP_ARGS(req),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( void *, req )
- __field( unsigned long long, data )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, data )
+ __field( u8, opcode )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = req->ctx;
__entry->req = req;
- __entry->data = user_data;
+ __entry->data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
- __entry->req, __entry->data)
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
+ __entry->ctx, __entry->req, __entry->data,
+ __get_str(op_str))
);
/**
* io_uring_link - called before the io_uring request added into link_list of
- * another request
+ * another request
*
- * @ctx: pointer to a ring context structure
- * @req: pointer to a linked request
+ * @req: pointer to a linked request
* @target_req: pointer to a previous request, that would contain @req
*
* Allows to track linked requests, to understand dependencies between requests
@@ -207,18 +222,18 @@ TRACE_EVENT(io_uring_defer,
*/
TRACE_EVENT(io_uring_link,
- TP_PROTO(void *ctx, void *req, void *target_req),
+ TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req),
- TP_ARGS(ctx, req, target_req),
+ TP_ARGS(req, target_req),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( void *, req )
+ __field( void *, ctx )
+ __field( void *, req )
__field( void *, target_req )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = req->ctx;
__entry->req = req;
__entry->target_req = target_req;
),
@@ -244,12 +259,12 @@ TRACE_EVENT(io_uring_cqring_wait,
TP_ARGS(ctx, min_events),
TP_STRUCT__entry (
- __field( void *, ctx )
+ __field( void *, ctx )
__field( int, min_events )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = ctx;
__entry->min_events = min_events;
),
@@ -267,197 +282,399 @@ TRACE_EVENT(io_uring_cqring_wait,
*/
TRACE_EVENT(io_uring_fail_link,
- TP_PROTO(void *req, void *link),
+ TP_PROTO(struct io_kiocb *req, struct io_kiocb *link),
TP_ARGS(req, link),
TP_STRUCT__entry (
- __field( void *, req )
- __field( void *, link )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( void *, link )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
),
TP_fast_assign(
- __entry->req = req;
- __entry->link = link;
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
+ __entry->link = link;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("request %p, link %p", __entry->req, __entry->link)
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str), __entry->link)
);
/**
* io_uring_complete - called when completing an SQE
*
* @ctx: pointer to a ring context structure
+ * @req: pointer to a submitted request
* @user_data: user data associated with the request
* @res: result of the request
+ * @cflags: completion flags
+ * @extra1: extra 64-bit data for CQE32
+ * @extra2: extra 64-bit data for CQE32
*
*/
TRACE_EVENT(io_uring_complete,
- TP_PROTO(void *ctx, u64 user_data, long res),
+ TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
+ u64 extra1, u64 extra2),
- TP_ARGS(ctx, user_data, res),
+ TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
TP_STRUCT__entry (
__field( void *, ctx )
+ __field( void *, req )
__field( u64, user_data )
- __field( long, res )
+ __field( int, res )
+ __field( unsigned, cflags )
+ __field( u64, extra1 )
+ __field( u64, extra2 )
),
TP_fast_assign(
__entry->ctx = ctx;
+ __entry->req = req;
__entry->user_data = user_data;
__entry->res = res;
+ __entry->cflags = cflags;
+ __entry->extra1 = extra1;
+ __entry->extra2 = extra2;
),
- TP_printk("ring %p, user_data 0x%llx, result %ld",
- __entry->ctx, (unsigned long long)__entry->user_data,
- __entry->res)
+ TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
+ "extra1 %llu extra2 %llu ",
+ __entry->ctx, __entry->req,
+ __entry->user_data,
+ __entry->res, __entry->cflags,
+ (unsigned long long) __entry->extra1,
+ (unsigned long long) __entry->extra2)
);
-
/**
- * io_uring_submit_sqe - called before submitting one SQE
+ * io_uring_submit_req - called before submitting a request
*
- * @ctx: pointer to a ring context structure
- * @opcode: opcode of request
- * @user_data: user data associated with the request
- * @force_nonblock: whether a context blocking or not
- * @sq_thread: true if sq_thread has submitted this SQE
+ * @req: pointer to a submitted request
*
* Allows to track SQE submitting, to understand what was the source of it, SQ
* thread or io_uring_enter call.
*/
-TRACE_EVENT(io_uring_submit_sqe,
+TRACE_EVENT(io_uring_submit_req,
- TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock,
- bool sq_thread),
+ TP_PROTO(struct io_kiocb *req),
- TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread),
+ TP_ARGS(req),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( u8, opcode )
- __field( u64, user_data )
- __field( bool, force_nonblock )
- __field( bool, sq_thread )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( unsigned long long, flags )
+ __field( bool, sq_thread )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
),
TP_fast_assign(
- __entry->ctx = ctx;
- __entry->opcode = opcode;
- __entry->user_data = user_data;
- __entry->force_nonblock = force_nonblock;
- __entry->sq_thread = sq_thread;
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
+ __entry->flags = (__force unsigned long long) req->flags;
+ __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
- __entry->ctx, __entry->opcode,
- (unsigned long long) __entry->user_data,
- __entry->force_nonblock, __entry->sq_thread)
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%llx, "
+ "sq_thread %d", __entry->ctx, __entry->req,
+ __entry->user_data, __get_str(op_str), __entry->flags,
+ __entry->sq_thread)
);
+/*
+ * io_uring_poll_arm - called after arming a poll wait if successful
+ *
+ * @req: pointer to the armed request
+ * @mask: request poll events mask
+ * @events: registered events of interest
+ *
+ * Allows to track which fds are waiting for and what are the events of
+ * interest.
+ */
TRACE_EVENT(io_uring_poll_arm,
- TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events),
+ TP_PROTO(struct io_kiocb *req, int mask, int events),
- TP_ARGS(ctx, opcode, user_data, mask, events),
+ TP_ARGS(req, mask, events),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( u8, opcode )
- __field( u64, user_data )
- __field( int, mask )
- __field( int, events )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( int, mask )
+ __field( int, events )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
),
TP_fast_assign(
- __entry->ctx = ctx;
- __entry->opcode = opcode;
- __entry->user_data = user_data;
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
__entry->mask = mask;
__entry->events = events;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
- __entry->ctx, __entry->opcode,
- (unsigned long long) __entry->user_data,
- __entry->mask, __entry->events)
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str),
+ __entry->mask, __entry->events)
);
-TRACE_EVENT(io_uring_poll_wake,
+/*
+ * io_uring_task_add - called after adding a task
+ *
+ * @req: pointer to request
+ * @mask: request poll events mask
+ *
+ */
+TRACE_EVENT(io_uring_task_add,
- TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
+ TP_PROTO(struct io_kiocb *req, int mask),
- TP_ARGS(ctx, opcode, user_data, mask),
+ TP_ARGS(req, mask),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( u8, opcode )
- __field( u64, user_data )
- __field( int, mask )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( int, mask )
+
+ __string( op_str, io_uring_get_opcode(req->opcode) )
),
TP_fast_assign(
- __entry->ctx = ctx;
- __entry->opcode = opcode;
- __entry->user_data = user_data;
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = req->cqe.user_data;
+ __entry->opcode = req->opcode;
__entry->mask = mask;
+
+ __assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
- __entry->ctx, __entry->opcode,
- (unsigned long long) __entry->user_data,
- __entry->mask)
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str),
+ __entry->mask)
);
-TRACE_EVENT(io_uring_task_add,
+/*
+ * io_uring_req_failed - called when an sqe is errored dring submission
+ *
+ * @sqe: pointer to the io_uring_sqe that failed
+ * @req: pointer to request
+ * @error: error it failed with
+ *
+ * Allows easier diagnosing of malformed requests in production systems.
+ */
+TRACE_EVENT(io_uring_req_failed,
- TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
+ TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
- TP_ARGS(ctx, opcode, user_data, mask),
+ TP_ARGS(sqe, req, error),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( u8, opcode )
- __field( u64, user_data )
- __field( int, mask )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( u8, flags )
+ __field( u8, ioprio )
+ __field( u64, off )
+ __field( u64, addr )
+ __field( u32, len )
+ __field( u32, op_flags )
+ __field( u16, buf_index )
+ __field( u16, personality )
+ __field( u32, file_index )
+ __field( u64, pad1 )
+ __field( u64, addr3 )
+ __field( int, error )
+
+ __string( op_str, io_uring_get_opcode(sqe->opcode) )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = req->ctx;
+ __entry->req = req;
+ __entry->user_data = sqe->user_data;
+ __entry->opcode = sqe->opcode;
+ __entry->flags = sqe->flags;
+ __entry->ioprio = sqe->ioprio;
+ __entry->off = sqe->off;
+ __entry->addr = sqe->addr;
+ __entry->len = sqe->len;
+ __entry->op_flags = sqe->poll32_events;
+ __entry->buf_index = sqe->buf_index;
+ __entry->personality = sqe->personality;
+ __entry->file_index = sqe->file_index;
+ __entry->pad1 = sqe->__pad2[0];
+ __entry->addr3 = sqe->addr3;
+ __entry->error = error;
+
+ __assign_str(op_str, io_uring_get_opcode(sqe->opcode));
+ ),
+
+ TP_printk("ring %p, req %p, user_data 0x%llx, "
+ "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
+ "len=%u, rw_flags=0x%x, buf_index=%d, "
+ "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, "
+ "error=%d",
+ __entry->ctx, __entry->req, __entry->user_data,
+ __get_str(op_str),
+ __entry->flags, __entry->ioprio,
+ (unsigned long long)__entry->off,
+ (unsigned long long) __entry->addr, __entry->len,
+ __entry->op_flags,
+ __entry->buf_index, __entry->personality, __entry->file_index,
+ (unsigned long long) __entry->pad1,
+ (unsigned long long) __entry->addr3, __entry->error)
+);
+
+
+/*
+ * io_uring_cqe_overflow - a CQE overflowed
+ *
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
+ * @res: CQE result
+ * @cflags: CQE flags
+ * @ocqe: pointer to the overflow cqe (if available)
+ *
+ */
+TRACE_EVENT(io_uring_cqe_overflow,
+
+ TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags,
+ void *ocqe),
+
+ TP_ARGS(ctx, user_data, res, cflags, ocqe),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( unsigned long long, user_data )
+ __field( s32, res )
+ __field( u32, cflags )
+ __field( void *, ocqe )
),
TP_fast_assign(
__entry->ctx = ctx;
- __entry->opcode = opcode;
__entry->user_data = user_data;
- __entry->mask = mask;
+ __entry->res = res;
+ __entry->cflags = cflags;
+ __entry->ocqe = ocqe;
),
- TP_printk("ring %p, op %d, data 0x%llx, mask %x",
- __entry->ctx, __entry->opcode,
- (unsigned long long) __entry->user_data,
- __entry->mask)
+ TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, "
+ "overflow_cqe %p",
+ __entry->ctx, __entry->user_data, __entry->res,
+ __entry->cflags, __entry->ocqe)
);
-TRACE_EVENT(io_uring_task_run,
+/*
+ * io_uring_task_work_run - ran task work
+ *
+ * @tctx: pointer to a io_uring_task
+ * @count: how many functions it ran
+ *
+ */
+TRACE_EVENT(io_uring_task_work_run,
- TP_PROTO(void *ctx, u8 opcode, u64 user_data),
+ TP_PROTO(void *tctx, unsigned int count),
- TP_ARGS(ctx, opcode, user_data),
+ TP_ARGS(tctx, count),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( u8, opcode )
- __field( u64, user_data )
+ __field( void *, tctx )
+ __field( unsigned int, count )
+ ),
+
+ TP_fast_assign(
+ __entry->tctx = tctx;
+ __entry->count = count;
+ ),
+
+ TP_printk("tctx %p, count %u", __entry->tctx, __entry->count)
+);
+
+TRACE_EVENT(io_uring_short_write,
+
+ TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got),
+
+ TP_ARGS(ctx, fpos, wanted, got),
+
+ TP_STRUCT__entry(
+ __field(void *, ctx)
+ __field(u64, fpos)
+ __field(u64, wanted)
+ __field(u64, got)
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->fpos = fpos;
+ __entry->wanted = wanted;
+ __entry->got = got;
+ ),
+
+ TP_printk("ring %p, fpos %lld, wanted %lld, got %lld",
+ __entry->ctx, __entry->fpos,
+ __entry->wanted, __entry->got)
+);
+
+/*
+ * io_uring_local_work_run - ran ring local task work
+ *
+ * @tctx: pointer to a io_uring_ctx
+ * @count: how many functions it ran
+ * @loops: how many loops it ran
+ *
+ */
+TRACE_EVENT(io_uring_local_work_run,
+
+ TP_PROTO(void *ctx, int count, unsigned int loops),
+
+ TP_ARGS(ctx, count, loops),
+
+ TP_STRUCT__entry (
+ __field(void *, ctx )
+ __field(int, count )
+ __field(unsigned int, loops )
),
TP_fast_assign(
__entry->ctx = ctx;
- __entry->opcode = opcode;
- __entry->user_data = user_data;
+ __entry->count = count;
+ __entry->loops = loops;
),
- TP_printk("ring %p, op %d, data 0x%llx",
- __entry->ctx, __entry->opcode,
- (unsigned long long) __entry->user_data)
+ TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
);
#endif /* _TRACE_IO_URING_H */
diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h
index c2f580fd371b..af8bfed528fc 100644
--- a/include/trace/events/iocost.h
+++ b/include/trace/events/iocost.h
@@ -11,7 +11,7 @@ struct ioc_gq;
#include <linux/tracepoint.h>
-TRACE_EVENT(iocost_iocg_activate,
+DECLARE_EVENT_CLASS(iocost_iocg_state,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
u64 last_period, u64 cur_period, u64 vtime),
@@ -26,7 +26,6 @@ TRACE_EVENT(iocost_iocg_activate,
__field(u64, vrate)
__field(u64, last_period)
__field(u64, cur_period)
- __field(u64, last_vtime)
__field(u64, vtime)
__field(u32, weight)
__field(u32, inuse)
@@ -39,10 +38,9 @@ TRACE_EVENT(iocost_iocg_activate,
__assign_str(cgroup, path);
__entry->now = now->now;
__entry->vnow = now->vnow;
- __entry->vrate = now->vrate;
+ __entry->vrate = iocg->ioc->vtime_base_rate;
__entry->last_period = last_period;
__entry->cur_period = cur_period;
- __entry->last_vtime = iocg->last_vtime;
__entry->vtime = vtime;
__entry->weight = iocg->weight;
__entry->inuse = iocg->inuse;
@@ -51,17 +49,30 @@ TRACE_EVENT(iocost_iocg_activate,
),
TP_printk("[%s:%s] now=%llu:%llu vrate=%llu "
- "period=%llu->%llu vtime=%llu->%llu "
+ "period=%llu->%llu vtime=%llu "
"weight=%u/%u hweight=%llu/%llu",
__get_str(devname), __get_str(cgroup),
__entry->now, __entry->vnow, __entry->vrate,
__entry->last_period, __entry->cur_period,
- __entry->last_vtime, __entry->vtime,
- __entry->inuse, __entry->weight,
+ __entry->vtime, __entry->inuse, __entry->weight,
__entry->hweight_inuse, __entry->hweight_active
)
);
+DEFINE_EVENT(iocost_iocg_state, iocost_iocg_activate,
+ TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
+ u64 last_period, u64 cur_period, u64 vtime),
+
+ TP_ARGS(iocg, path, now, last_period, cur_period, vtime)
+);
+
+DEFINE_EVENT(iocost_iocg_state, iocost_iocg_idle,
+ TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
+ u64 last_period, u64 cur_period, u64 vtime),
+
+ TP_ARGS(iocg, path, now, last_period, cur_period, vtime)
+);
+
DECLARE_EVENT_CLASS(iocg_inuse_update,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
@@ -98,7 +109,7 @@ DECLARE_EVENT_CLASS(iocg_inuse_update,
)
);
-DEFINE_EVENT(iocg_inuse_update, iocost_inuse_takeback,
+DEFINE_EVENT(iocg_inuse_update, iocost_inuse_shortage,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
u32 old_inuse, u32 new_inuse,
@@ -108,7 +119,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_takeback,
old_hw_inuse, new_hw_inuse)
);
-DEFINE_EVENT(iocg_inuse_update, iocost_inuse_giveaway,
+DEFINE_EVENT(iocg_inuse_update, iocost_inuse_transfer,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
u32 old_inuse, u32 new_inuse,
@@ -118,7 +129,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_giveaway,
old_hw_inuse, new_hw_inuse)
);
-DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset,
+DEFINE_EVENT(iocg_inuse_update, iocost_inuse_adjust,
TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
u32 old_inuse, u32 new_inuse,
@@ -131,11 +142,9 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset,
TRACE_EVENT(iocost_ioc_vrate_adj,
TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 *missed_ppm,
- u32 rq_wait_pct, int nr_lagging, int nr_shortages,
- int nr_surpluses),
+ u32 rq_wait_pct, int nr_lagging, int nr_shortages),
- TP_ARGS(ioc, new_vrate, missed_ppm, rq_wait_pct, nr_lagging, nr_shortages,
- nr_surpluses),
+ TP_ARGS(ioc, new_vrate, missed_ppm, rq_wait_pct, nr_lagging, nr_shortages),
TP_STRUCT__entry (
__string(devname, ioc_name(ioc))
@@ -147,12 +156,11 @@ TRACE_EVENT(iocost_ioc_vrate_adj,
__field(u32, rq_wait_pct)
__field(int, nr_lagging)
__field(int, nr_shortages)
- __field(int, nr_surpluses)
),
TP_fast_assign(
__assign_str(devname, ioc_name(ioc));
- __entry->old_vrate = atomic64_read(&ioc->vtime_rate);;
+ __entry->old_vrate = ioc->vtime_base_rate;
__entry->new_vrate = new_vrate;
__entry->busy_level = ioc->busy_level;
__entry->read_missed_ppm = missed_ppm[READ];
@@ -160,15 +168,54 @@ TRACE_EVENT(iocost_ioc_vrate_adj,
__entry->rq_wait_pct = rq_wait_pct;
__entry->nr_lagging = nr_lagging;
__entry->nr_shortages = nr_shortages;
- __entry->nr_surpluses = nr_surpluses;
),
- TP_printk("[%s] vrate=%llu->%llu busy=%d missed_ppm=%u:%u rq_wait_pct=%u lagging=%d shortages=%d surpluses=%d",
+ TP_printk("[%s] vrate=%llu->%llu busy=%d missed_ppm=%u:%u rq_wait_pct=%u lagging=%d shortages=%d",
__get_str(devname), __entry->old_vrate, __entry->new_vrate,
__entry->busy_level,
__entry->read_missed_ppm, __entry->write_missed_ppm,
- __entry->rq_wait_pct, __entry->nr_lagging, __entry->nr_shortages,
- __entry->nr_surpluses
+ __entry->rq_wait_pct, __entry->nr_lagging, __entry->nr_shortages
+ )
+);
+
+TRACE_EVENT(iocost_iocg_forgive_debt,
+
+ TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now,
+ u32 usage_pct, u64 old_debt, u64 new_debt,
+ u64 old_delay, u64 new_delay),
+
+ TP_ARGS(iocg, path, now, usage_pct,
+ old_debt, new_debt, old_delay, new_delay),
+
+ TP_STRUCT__entry (
+ __string(devname, ioc_name(iocg->ioc))
+ __string(cgroup, path)
+ __field(u64, now)
+ __field(u64, vnow)
+ __field(u32, usage_pct)
+ __field(u64, old_debt)
+ __field(u64, new_debt)
+ __field(u64, old_delay)
+ __field(u64, new_delay)
+ ),
+
+ TP_fast_assign(
+ __assign_str(devname, ioc_name(iocg->ioc));
+ __assign_str(cgroup, path);
+ __entry->now = now->now;
+ __entry->vnow = now->vnow;
+ __entry->usage_pct = usage_pct;
+ __entry->old_debt = old_debt;
+ __entry->new_debt = new_debt;
+ __entry->old_delay = old_delay;
+ __entry->new_delay = new_delay;
+ ),
+
+ TP_printk("[%s:%s] now=%llu:%llu usage=%u debt=%llu->%llu delay=%llu->%llu",
+ __get_str(devname), __get_str(cgroup),
+ __entry->now, __entry->vnow, __entry->usage_pct,
+ __entry->old_debt, __entry->new_debt,
+ __entry->old_delay, __entry->new_delay
)
);
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 72b4582322ff..70743db1fb75 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -76,13 +76,6 @@ DEFINE_EVENT(iommu_device_event, attach_device_to_domain,
TP_ARGS(dev)
);
-DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
-
- TP_PROTO(struct device *dev),
-
- TP_ARGS(dev)
-);
-
TRACE_EVENT(map,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
@@ -101,8 +94,9 @@ TRACE_EVENT(map,
__entry->size = size;
),
- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
- __entry->iova, __entry->paddr, __entry->size
+ TP_printk("IOMMU: iova=0x%016llx - 0x%016llx paddr=0x%016llx size=%zu",
+ __entry->iova, __entry->iova + __entry->size, __entry->paddr,
+ __entry->size
)
);
@@ -124,8 +118,9 @@ TRACE_EVENT(unmap,
__entry->unmapped_size = unmapped_size;
),
- TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
- __entry->iova, __entry->size, __entry->unmapped_size
+ TP_printk("IOMMU: iova=0x%016llx - 0x%016llx size=%zu unmapped_size=%zu",
+ __entry->iova, __entry->iova + __entry->size,
+ __entry->size, __entry->unmapped_size
)
);
diff --git a/include/trace/events/ipi.h b/include/trace/events/ipi.h
index 0be71dad6ec0..3de9bfc982ce 100644
--- a/include/trace/events/ipi.h
+++ b/include/trace/events/ipi.h
@@ -35,6 +35,50 @@ TRACE_EVENT(ipi_raise,
TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
);
+TRACE_EVENT(ipi_send_cpu,
+
+ TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback),
+
+ TP_ARGS(cpu, callsite, callback),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(void *, callsite)
+ __field(void *, callback)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->callsite = (void *)callsite;
+ __entry->callback = callback;
+ ),
+
+ TP_printk("cpu=%u callsite=%pS callback=%pS",
+ __entry->cpu, __entry->callsite, __entry->callback)
+);
+
+TRACE_EVENT(ipi_send_cpumask,
+
+ TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback),
+
+ TP_ARGS(cpumask, callsite, callback),
+
+ TP_STRUCT__entry(
+ __cpumask(cpumask)
+ __field(void *, callsite)
+ __field(void *, callback)
+ ),
+
+ TP_fast_assign(
+ __assign_cpumask(cpumask, cpumask_bits(cpumask));
+ __entry->callsite = (void *)callsite;
+ __entry->callback = callback;
+ ),
+
+ TP_printk("cpumask=%s callsite=%pS callback=%pS",
+ __get_cpumask(cpumask), __entry->callsite, __entry->callback)
+);
+
DECLARE_EVENT_CLASS(ipi_handler,
TP_PROTO(const char *reason),
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index eeceafaaea4c..a07b4607b663 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -160,6 +160,53 @@ DEFINE_EVENT(softirq, softirq_raise,
TP_ARGS(vec_nr)
);
+DECLARE_EVENT_CLASS(tasklet,
+
+ TP_PROTO(struct tasklet_struct *t, void *func),
+
+ TP_ARGS(t, func),
+
+ TP_STRUCT__entry(
+ __field( void *, tasklet)
+ __field( void *, func)
+ ),
+
+ TP_fast_assign(
+ __entry->tasklet = t;
+ __entry->func = func;
+ ),
+
+ TP_printk("tasklet=%ps function=%ps", __entry->tasklet, __entry->func)
+);
+
+/**
+ * tasklet_entry - called immediately before the tasklet is run
+ * @t: tasklet pointer
+ * @func: tasklet callback or function being run
+ *
+ * Used to find individual tasklet execution time
+ */
+DEFINE_EVENT(tasklet, tasklet_entry,
+
+ TP_PROTO(struct tasklet_struct *t, void *func),
+
+ TP_ARGS(t, func)
+);
+
+/**
+ * tasklet_exit - called immediately after the tasklet is run
+ * @t: tasklet pointer
+ * @func: tasklet callback or function being run
+ *
+ * Used to find individual tasklet execution time
+ */
+DEFINE_EVENT(tasklet, tasklet_exit,
+
+ TP_PROTO(struct tasklet_struct *t, void *func),
+
+ TP_ARGS(t, func)
+);
+
#endif /* _TRACE_IRQ_H */
/* This part must be outside protection */
diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h
index 87408faf6e4e..8ff2a3ca5d75 100644
--- a/include/trace/events/iscsi.h
+++ b/include/trace/events/iscsi.h
@@ -26,12 +26,12 @@ DECLARE_EVENT_CLASS(iscsi_log_msg,
TP_STRUCT__entry(
__string(dname, dev_name(dev) )
- __dynamic_array(char, msg, ISCSI_MSG_MAX )
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(dname, dev_name(dev));
- vsnprintf(__get_str(msg), ISCSI_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s",__get_str(dname), __get_str(msg)
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index d16a32867f3a..5646ae15a957 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( char, sync_commit )
- __field( int, transaction )
+ __field( tid_t, transaction )
),
TP_fast_assign(
@@ -49,7 +49,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %d,%d transaction %d sync %d",
+ TP_printk("dev %d,%d transaction %u sync %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction, __entry->sync_commit)
);
@@ -97,8 +97,8 @@ TRACE_EVENT(jbd2_end_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( char, sync_commit )
- __field( int, transaction )
- __field( int, head )
+ __field( tid_t, transaction )
+ __field( tid_t, head )
),
TP_fast_assign(
@@ -108,7 +108,7 @@ TRACE_EVENT(jbd2_end_commit,
__entry->head = journal->j_tail_sequence;
),
- TP_printk("dev %d,%d transaction %d sync %d head %d",
+ TP_printk("dev %d,%d transaction %u sync %d head %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction, __entry->sync_commit, __entry->head)
);
@@ -134,14 +134,14 @@ TRACE_EVENT(jbd2_submit_inode_data,
);
DECLARE_EVENT_CLASS(jbd2_handle_start_class,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int requested_blocks),
TP_ARGS(dev, tid, type, line_no, requested_blocks),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, requested_blocks)
@@ -155,28 +155,28 @@ DECLARE_EVENT_CLASS(jbd2_handle_start_class,
__entry->requested_blocks = requested_blocks;
),
- TP_printk("dev %d,%d tid %lu type %u line_no %u "
+ TP_printk("dev %d,%d tid %u type %u line_no %u "
"requested_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->requested_blocks)
);
DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int requested_blocks),
TP_ARGS(dev, tid, type, line_no, requested_blocks)
);
DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int requested_blocks),
TP_ARGS(dev, tid, type, line_no, requested_blocks)
);
TRACE_EVENT(jbd2_handle_extend,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int buffer_credits,
int requested_blocks),
@@ -184,7 +184,7 @@ TRACE_EVENT(jbd2_handle_extend,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, buffer_credits )
@@ -200,7 +200,7 @@ TRACE_EVENT(jbd2_handle_extend,
__entry->requested_blocks = requested_blocks;
),
- TP_printk("dev %d,%d tid %lu type %u line_no %u "
+ TP_printk("dev %d,%d tid %u type %u line_no %u "
"buffer_credits %d requested_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->buffer_credits,
@@ -208,7 +208,7 @@ TRACE_EVENT(jbd2_handle_extend,
);
TRACE_EVENT(jbd2_handle_stats,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int interval, int sync,
int requested_blocks, int dirtied_blocks),
@@ -217,7 +217,7 @@ TRACE_EVENT(jbd2_handle_stats,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, interval )
@@ -237,7 +237,7 @@ TRACE_EVENT(jbd2_handle_stats,
__entry->dirtied_blocks = dirtied_blocks;
),
- TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d "
+ TP_printk("dev %d,%d tid %u type %u line_no %u interval %d "
"sync %d requested_blocks %d dirtied_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->interval,
@@ -246,14 +246,14 @@ TRACE_EVENT(jbd2_handle_stats,
);
TRACE_EVENT(jbd2_run_stats,
- TP_PROTO(dev_t dev, unsigned long tid,
+ TP_PROTO(dev_t dev, tid_t tid,
struct transaction_run_stats_s *stats),
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned long, wait )
__field( unsigned long, request_delay )
__field( unsigned long, running )
@@ -279,7 +279,7 @@ TRACE_EVENT(jbd2_run_stats,
__entry->blocks_logged = stats->rs_blocks_logged;
),
- TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u "
+ TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u "
"locked %u flushing %u logging %u handle_count %u "
"blocks %u blocks_logged %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
@@ -294,14 +294,14 @@ TRACE_EVENT(jbd2_run_stats,
);
TRACE_EVENT(jbd2_checkpoint_stats,
- TP_PROTO(dev_t dev, unsigned long tid,
+ TP_PROTO(dev_t dev, tid_t tid,
struct transaction_chp_stats_s *stats),
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned long, chp_time )
__field( __u32, forced_to_close )
__field( __u32, written )
@@ -317,7 +317,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,
__entry->dropped = stats->cs_dropped;
),
- TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
+ TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u "
"written %u dropped %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
jiffies_to_msecs(__entry->chp_time),
@@ -355,22 +355,22 @@ TRACE_EVENT(jbd2_update_log_tail,
TRACE_EVENT(jbd2_write_superblock,
- TP_PROTO(journal_t *journal, int write_op),
+ TP_PROTO(journal_t *journal, blk_opf_t write_flags),
- TP_ARGS(journal, write_op),
+ TP_ARGS(journal, write_flags),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( int, write_op )
+ __field( blk_opf_t, write_flags )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->write_op = write_op;
+ __entry->write_flags = write_flags;
),
- TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
- MINOR(__entry->dev), __entry->write_op)
+ TP_printk("dev %d,%d write_flags %x", MAJOR(__entry->dev),
+ MINOR(__entry->dev), (__force u32)__entry->write_flags)
);
TRACE_EVENT(jbd2_lock_buffer_stall,
@@ -394,6 +394,103 @@ TRACE_EVENT(jbd2_lock_buffer_stall,
__entry->stall_ms)
);
+DECLARE_EVENT_CLASS(jbd2_journal_shrink,
+
+ TP_PROTO(journal_t *journal, unsigned long nr_to_scan,
+ unsigned long count),
+
+ TP_ARGS(journal, nr_to_scan, count),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long, nr_to_scan)
+ __field(unsigned long, count)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->count = count;
+ ),
+
+ TP_printk("dev %d,%d nr_to_scan %lu count %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->nr_to_scan, __entry->count)
+);
+
+DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_count,
+
+ TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count),
+
+ TP_ARGS(journal, nr_to_scan, count)
+);
+
+DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_scan_enter,
+
+ TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count),
+
+ TP_ARGS(journal, nr_to_scan, count)
+);
+
+TRACE_EVENT(jbd2_shrink_scan_exit,
+
+ TP_PROTO(journal_t *journal, unsigned long nr_to_scan,
+ unsigned long nr_shrunk, unsigned long count),
+
+ TP_ARGS(journal, nr_to_scan, nr_shrunk, count),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long, nr_to_scan)
+ __field(unsigned long, nr_shrunk)
+ __field(unsigned long, count)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->nr_shrunk = nr_shrunk;
+ __entry->count = count;
+ ),
+
+ TP_printk("dev %d,%d nr_to_scan %lu nr_shrunk %lu count %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->nr_to_scan, __entry->nr_shrunk,
+ __entry->count)
+);
+
+TRACE_EVENT(jbd2_shrink_checkpoint_list,
+
+ TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid,
+ unsigned long nr_freed, tid_t next_tid),
+
+ TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, first_tid)
+ __field(tid_t, tid)
+ __field(tid_t, last_tid)
+ __field(unsigned long, nr_freed)
+ __field(tid_t, next_tid)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->first_tid = first_tid;
+ __entry->tid = tid;
+ __entry->last_tid = last_tid;
+ __entry->nr_freed = nr_freed;
+ __entry->next_tid = next_tid;
+ ),
+
+ TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
+ "next transaction %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->first_tid, __entry->tid, __entry->last_tid,
+ __entry->nr_freed, __entry->next_tid)
+);
+
#endif /* _TRACE_JBD2_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f65b1f6db22d..6e62cc64cd92 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -9,57 +9,49 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
-DECLARE_EVENT_CLASS(kmem_alloc,
+TRACE_EVENT(kmem_cache_alloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags),
+ struct kmem_cache *s,
+ gfp_t gfp_flags,
+ int node),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
+ TP_ARGS(call_site, ptr, s, gfp_flags, node),
TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
+ __field( int, node )
+ __field( bool, accounted )
),
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
- __entry->bytes_req = bytes_req;
- __entry->bytes_alloc = bytes_alloc;
- __entry->gfp_flags = gfp_flags;
+ __entry->bytes_req = s->object_size;
+ __entry->bytes_alloc = s->size;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
+ __entry->node = node;
+ __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
+ ((gfp_flags & __GFP_ACCOUNT) ||
+ (s->flags & SLAB_ACCOUNT)) : false;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
+ TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
- show_gfp_flags(__entry->gfp_flags))
-);
-
-DEFINE_EVENT(kmem_alloc, kmalloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
-);
-
-DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->node,
+ __entry->accounted ? "true" : "false")
);
-DECLARE_EVENT_CLASS(kmem_alloc_node,
+TRACE_EVENT(kmalloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
@@ -75,7 +67,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
__field( int, node )
),
@@ -84,38 +76,22 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->ptr = ptr;
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+ TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
- __entry->node)
-);
-
-DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc,
- gfp_t gfp_flags, int node),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
+ __entry->node,
+ (IS_ENABLED(CONFIG_MEMCG_KMEM) &&
+ (__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
);
-DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc,
- gfp_t gfp_flags, int node),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
-);
-
-DECLARE_EVENT_CLASS(kmem_free,
+TRACE_EVENT(kfree,
TP_PROTO(unsigned long call_site, const void *ptr),
@@ -135,18 +111,26 @@ DECLARE_EVENT_CLASS(kmem_free,
(void *)__entry->call_site, __entry->ptr)
);
-DEFINE_EVENT(kmem_free, kfree,
+TRACE_EVENT(kmem_cache_free,
- TP_PROTO(unsigned long call_site, const void *ptr),
+ TP_PROTO(unsigned long call_site, const void *ptr, const struct kmem_cache *s),
- TP_ARGS(call_site, ptr)
-);
+ TP_ARGS(call_site, ptr, s),
-DEFINE_EVENT(kmem_free, kmem_cache_free,
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __string( name, s->name )
+ ),
- TP_PROTO(unsigned long call_site, const void *ptr),
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __assign_str(name, s->name);
+ ),
- TP_ARGS(call_site, ptr)
+ TP_printk("call_site=%pS ptr=%p name=%s",
+ (void *)__entry->call_site, __entry->ptr, __get_str(name))
);
TRACE_EVENT(mm_page_free,
@@ -165,7 +149,7 @@ TRACE_EVENT(mm_page_free,
__entry->order = order;
),
- TP_printk("page=%p pfn=%lu order=%d",
+ TP_printk("page=%p pfn=0x%lx order=%d",
pfn_to_page(__entry->pfn),
__entry->pfn,
__entry->order)
@@ -185,7 +169,7 @@ TRACE_EVENT(mm_page_free_batched,
__entry->pfn = page_to_pfn(page);
),
- TP_printk("page=%p pfn=%lu order=0",
+ TP_printk("page=%p pfn=0x%lx order=0",
pfn_to_page(__entry->pfn),
__entry->pfn)
);
@@ -200,18 +184,18 @@ TRACE_EVENT(mm_page_alloc,
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
__field( int, migratetype )
),
TP_fast_assign(
__entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->migratetype = migratetype;
),
- TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
+ TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
__entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
@@ -221,35 +205,39 @@ TRACE_EVENT(mm_page_alloc,
DECLARE_EVENT_CLASS(mm_page,
- TP_PROTO(struct page *page, unsigned int order, int migratetype),
+ TP_PROTO(struct page *page, unsigned int order, int migratetype,
+ int percpu_refill),
- TP_ARGS(page, order, migratetype),
+ TP_ARGS(page, order, migratetype, percpu_refill),
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
__field( int, migratetype )
+ __field( int, percpu_refill )
),
TP_fast_assign(
__entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
__entry->migratetype = migratetype;
+ __entry->percpu_refill = percpu_refill;
),
- TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
+ TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
__entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
__entry->migratetype,
- __entry->order == 0)
+ __entry->percpu_refill)
);
DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
- TP_PROTO(struct page *page, unsigned int order, int migratetype),
+ TP_PROTO(struct page *page, unsigned int order, int migratetype,
+ int percpu_refill),
- TP_ARGS(page, order, migratetype)
+ TP_ARGS(page, order, migratetype, percpu_refill)
);
TRACE_EVENT(mm_page_pcpu_drain,
@@ -270,7 +258,7 @@ TRACE_EVENT(mm_page_pcpu_drain,
__entry->migratetype = migratetype;
),
- TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
+ TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
pfn_to_page(__entry->pfn), __entry->pfn,
__entry->order, __entry->migratetype)
);
@@ -304,7 +292,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
get_pageblock_migratetype(page));
),
- TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
+ TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
pfn_to_page(__entry->pfn),
__entry->pfn,
__entry->alloc_order,
@@ -316,6 +304,44 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
+TRACE_EVENT(mm_alloc_contig_migrate_range_info,
+
+ TP_PROTO(unsigned long start,
+ unsigned long end,
+ unsigned long nr_migrated,
+ unsigned long nr_reclaimed,
+ unsigned long nr_mapped,
+ int migratetype),
+
+ TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, migratetype),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned long, nr_migrated)
+ __field(unsigned long, nr_reclaimed)
+ __field(unsigned long, nr_mapped)
+ __field(int, migratetype)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->nr_migrated = nr_migrated;
+ __entry->nr_reclaimed = nr_reclaimed;
+ __entry->nr_mapped = nr_mapped;
+ __entry->migratetype = migratetype;
+ ),
+
+ TP_printk("start=0x%lx end=0x%lx migratetype=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
+ __entry->start,
+ __entry->end,
+ __entry->migratetype,
+ __entry->nr_migrated,
+ __entry->nr_reclaimed,
+ __entry->nr_mapped)
+);
+
/*
* Required for uniquely and securely identifying mm in rss_stat tracepoint.
*/
@@ -335,13 +361,32 @@ static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
#define __PTR_TO_HASHVAL
#endif
+#define TRACE_MM_PAGES \
+ EM(MM_FILEPAGES) \
+ EM(MM_ANONPAGES) \
+ EM(MM_SWAPENTS) \
+ EMe(MM_SHMEMPAGES)
+
+#undef EM
+#undef EMe
+
+#define EM(a) TRACE_DEFINE_ENUM(a);
+#define EMe(a) TRACE_DEFINE_ENUM(a);
+
+TRACE_MM_PAGES
+
+#undef EM
+#undef EMe
+
+#define EM(a) { a, #a },
+#define EMe(a) { a, #a }
+
TRACE_EVENT(rss_stat,
TP_PROTO(struct mm_struct *mm,
- int member,
- long count),
+ int member),
- TP_ARGS(mm, member, count),
+ TP_ARGS(mm, member),
TP_STRUCT__entry(
__field(unsigned int, mm_id)
@@ -354,13 +399,14 @@ TRACE_EVENT(rss_stat,
__entry->mm_id = mm_ptr_to_hash(mm);
__entry->curr = !!(current->mm == mm);
__entry->member = member;
- __entry->size = (count << PAGE_SHIFT);
+ __entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member])
+ << PAGE_SHIFT);
),
- TP_printk("mm_id=%u curr=%d member=%d size=%ldB",
+ TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
__entry->mm_id,
__entry->curr,
- __entry->member,
+ __print_symbolic(__entry->member, TRACE_MM_PAGES),
__entry->size)
);
#endif /* _TRACE_KMEM_H */
diff --git a/include/trace/events/ksm.h b/include/trace/events/ksm.h
new file mode 100644
index 000000000000..e728647b5d26
--- /dev/null
+++ b/include/trace/events/ksm.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ksm
+
+#if !defined(_TRACE_KSM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KSM_H
+
+#include <linux/tracepoint.h>
+
+/**
+ * ksm_scan_template - called for start / stop scan
+ *
+ * @seq: sequence number of scan
+ * @rmap_entries: actual number of rmap entries
+ *
+ * Allows to trace the start / stop of a ksm scan.
+ */
+DECLARE_EVENT_CLASS(ksm_scan_template,
+
+ TP_PROTO(int seq, u32 rmap_entries),
+
+ TP_ARGS(seq, rmap_entries),
+
+ TP_STRUCT__entry(
+ __field(int, seq)
+ __field(u32, rmap_entries)
+ ),
+
+ TP_fast_assign(
+ __entry->seq = seq;
+ __entry->rmap_entries = rmap_entries;
+ ),
+
+ TP_printk("seq %d rmap size %d",
+ __entry->seq, __entry->rmap_entries)
+);
+
+/**
+ * ksm_start_scan - called after a new ksm scan is started
+ *
+ * @seq: sequence number of scan
+ * @rmap_entries: actual number of rmap entries
+ *
+ * Allows to trace the start of a ksm scan.
+ */
+DEFINE_EVENT(ksm_scan_template, ksm_start_scan,
+
+ TP_PROTO(int seq, u32 rmap_entries),
+
+ TP_ARGS(seq, rmap_entries)
+);
+
+/**
+ * ksm_stop_scan - called after a new ksm scan has completed
+ *
+ * @seq: sequence number of scan
+ * @rmap_entries: actual number of rmap entries
+ *
+ * Allows to trace the completion of a ksm scan.
+ */
+DEFINE_EVENT(ksm_scan_template, ksm_stop_scan,
+
+ TP_PROTO(int seq, u32 rmap_entries),
+
+ TP_ARGS(seq, rmap_entries)
+);
+
+/**
+ * ksm_enter - called after a new process has been added / removed from ksm
+ *
+ * @mm: address of the mm object of the process
+ *
+ * Allows to trace the when a process has been added or removed from ksm.
+ */
+DECLARE_EVENT_CLASS(ksm_enter_exit_template,
+
+ TP_PROTO(void *mm),
+
+ TP_ARGS(mm),
+
+ TP_STRUCT__entry(
+ __field(void *, mm)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ ),
+
+ TP_printk("mm %p", __entry->mm)
+);
+
+/**
+ * ksm_enter - called after a new process has been added to ksm
+ *
+ * @mm: address of the mm object of the process
+ *
+ * Allows to trace the when a process has been added to ksm.
+ */
+DEFINE_EVENT(ksm_enter_exit_template, ksm_enter,
+
+ TP_PROTO(void *mm),
+
+ TP_ARGS(mm)
+);
+
+/**
+ * ksm_exit - called after a new process has been removed from ksm
+ *
+ * @mm: address of the mm object of the process
+ *
+ * Allows to trace the when a process has been removed from ksm.
+ */
+DEFINE_EVENT(ksm_enter_exit_template, ksm_exit,
+
+ TP_PROTO(void *mm),
+
+ TP_ARGS(mm)
+);
+
+/**
+ * ksm_merge_one_page - called after a page has been merged
+ *
+ * @pfn: page frame number of ksm page
+ * @rmap_item: address of rmap_item object
+ * @mm: address of the process mm struct
+ * @err: success
+ *
+ * Allows to trace the ksm merging of individual pages.
+ */
+TRACE_EVENT(ksm_merge_one_page,
+
+ TP_PROTO(unsigned long pfn, void *rmap_item, void *mm, int err),
+
+ TP_ARGS(pfn, rmap_item, mm, err),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(void *, rmap_item)
+ __field(void *, mm)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->rmap_item = rmap_item;
+ __entry->mm = mm;
+ __entry->err = err;
+ ),
+
+ TP_printk("ksm pfn %lu rmap_item %p mm %p error %d",
+ __entry->pfn, __entry->rmap_item, __entry->mm, __entry->err)
+);
+
+/**
+ * ksm_merge_with_ksm_page - called after a page has been merged with a ksm page
+ *
+ * @ksm_page: address ksm page
+ * @pfn: page frame number of ksm page
+ * @rmap_item: address of rmap_item object
+ * @mm: address of the mm object of the process
+ * @err: success
+ *
+ * Allows to trace the merging of a page with a ksm page.
+ */
+TRACE_EVENT(ksm_merge_with_ksm_page,
+
+ TP_PROTO(void *ksm_page, unsigned long pfn, void *rmap_item, void *mm, int err),
+
+ TP_ARGS(ksm_page, pfn, rmap_item, mm, err),
+
+ TP_STRUCT__entry(
+ __field(void *, ksm_page)
+ __field(unsigned long, pfn)
+ __field(void *, rmap_item)
+ __field(void *, mm)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->ksm_page = ksm_page;
+ __entry->pfn = pfn;
+ __entry->rmap_item = rmap_item;
+ __entry->mm = mm;
+ __entry->err = err;
+ ),
+
+ TP_printk("%spfn %lu rmap_item %p mm %p error %d",
+ (__entry->ksm_page ? "ksm " : ""),
+ __entry->pfn, __entry->rmap_item, __entry->mm, __entry->err)
+);
+
+/**
+ * ksm_remove_ksm_page - called after a ksm page has been removed
+ *
+ * @pfn: page frame number of ksm page
+ *
+ * Allows to trace the removing of stable ksm pages.
+ */
+TRACE_EVENT(ksm_remove_ksm_page,
+
+ TP_PROTO(unsigned long pfn),
+
+ TP_ARGS(pfn),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ ),
+
+ TP_printk("pfn %lu", __entry->pfn)
+);
+
+/**
+ * ksm_remove_rmap_item - called after a rmap_item has been removed from the
+ * stable tree
+ *
+ * @pfn: page frame number of ksm page
+ * @rmap_item: address of rmap_item object
+ * @mm: address of the process mm struct
+ *
+ * Allows to trace the removal of pages from the stable tree list.
+ */
+TRACE_EVENT(ksm_remove_rmap_item,
+
+ TP_PROTO(unsigned long pfn, void *rmap_item, void *mm),
+
+ TP_ARGS(pfn, rmap_item, mm),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(void *, rmap_item)
+ __field(void *, mm)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->rmap_item = rmap_item;
+ __entry->mm = mm;
+ ),
+
+ TP_printk("pfn %lu rmap_item %p mm %p",
+ __entry->pfn, __entry->rmap_item, __entry->mm)
+);
+
+/**
+ * ksm_advisor - called after the advisor has run
+ *
+ * @scan_time: scan time in seconds
+ * @pages_to_scan: new pages_to_scan value
+ * @cpu_percent: cpu usage in percent
+ *
+ * Allows to trace the ksm advisor.
+ */
+TRACE_EVENT(ksm_advisor,
+
+ TP_PROTO(s64 scan_time, unsigned long pages_to_scan,
+ unsigned int cpu_percent),
+
+ TP_ARGS(scan_time, pages_to_scan, cpu_percent),
+
+ TP_STRUCT__entry(
+ __field(s64, scan_time)
+ __field(unsigned long, pages_to_scan)
+ __field(unsigned int, cpu_percent)
+ ),
+
+ TP_fast_assign(
+ __entry->scan_time = scan_time;
+ __entry->pages_to_scan = pages_to_scan;
+ __entry->cpu_percent = cpu_percent;
+ ),
+
+ TP_printk("ksm scan time %lld pages_to_scan %lu cpu percent %u",
+ __entry->scan_time, __entry->pages_to_scan,
+ __entry->cpu_percent)
+);
+
+#endif /* _TRACE_KSM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 9417a34aad08..011fba6b5552 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -17,7 +17,7 @@
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
- ERSN(HYPERV), ERSN(ARM_NISV)
+ ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
@@ -62,7 +62,7 @@ TRACE_EVENT(kvm_vcpu_wakeup,
__entry->valid ? "valid" : "invalid")
);
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#if defined(CONFIG_HAVE_KVM_IRQCHIP)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
@@ -82,7 +82,7 @@ TRACE_EVENT(kvm_set_irq,
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
#if defined(__KVM_HAVE_IOAPIC)
#define kvm_deliver_mode \
@@ -170,7 +170,7 @@ TRACE_EVENT(kvm_msi_set_irq,
#endif /* defined(__KVM_HAVE_IOAPIC) */
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#if defined(CONFIG_HAVE_KVM_IRQCHIP)
#ifdef kvm_irqchips
#define kvm_ack_irq_string "irqchip %s pin %u"
@@ -197,7 +197,7 @@ TRACE_EVENT(kvm_ack_irq,
TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
@@ -255,30 +255,6 @@ TRACE_EVENT(kvm_fpu,
TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
);
-TRACE_EVENT(kvm_age_page,
- TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
- TP_ARGS(gfn, level, slot, ref),
-
- TP_STRUCT__entry(
- __field( u64, hva )
- __field( u64, gfn )
- __field( u8, level )
- __field( u8, referenced )
- ),
-
- TP_fast_assign(
- __entry->gfn = gfn;
- __entry->level = level;
- __entry->hva = ((gfn - slot->base_gfn) <<
- PAGE_SHIFT) + slot->userspace_addr;
- __entry->referenced = ref;
- ),
-
- TP_printk("hva %llx gfn %llx level %u %s",
- __entry->hva, __entry->gfn, __entry->level,
- __entry->referenced ? "YOUNG" : "OLD")
-);
-
#ifdef CONFIG_KVM_ASYNC_PF
DECLARE_EVENT_CLASS(kvm_async_get_page_class,
@@ -306,7 +282,7 @@ DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
TP_ARGS(gva, gfn)
);
-DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
TP_PROTO(u64 gva, u64 gfn),
@@ -399,6 +375,135 @@ TRACE_EVENT(kvm_halt_poll_ns,
#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
+TRACE_EVENT(kvm_dirty_ring_push,
+ TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
+ TP_ARGS(ring, slot, offset),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u32, dirty_index)
+ __field(u32, reset_index)
+ __field(u32, slot)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->index;
+ __entry->dirty_index = ring->dirty_index;
+ __entry->reset_index = ring->reset_index;
+ __entry->slot = slot;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("ring %d: dirty 0x%x reset 0x%x "
+ "slot %u offset 0x%llx (used %u)",
+ __entry->index, __entry->dirty_index,
+ __entry->reset_index, __entry->slot, __entry->offset,
+ __entry->dirty_index - __entry->reset_index)
+);
+
+TRACE_EVENT(kvm_dirty_ring_reset,
+ TP_PROTO(struct kvm_dirty_ring *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u32, dirty_index)
+ __field(u32, reset_index)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->index;
+ __entry->dirty_index = ring->dirty_index;
+ __entry->reset_index = ring->reset_index;
+ ),
+
+ TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
+ __entry->index, __entry->dirty_index, __entry->reset_index,
+ __entry->dirty_index - __entry->reset_index)
+);
+
+TRACE_EVENT(kvm_dirty_ring_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ ),
+
+ TP_printk("vcpu %d", __entry->vcpu_id)
+);
+
+TRACE_EVENT(kvm_unmap_hva_range,
+ TP_PROTO(unsigned long start, unsigned long end),
+ TP_ARGS(start, end),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, start )
+ __field( unsigned long, end )
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
+ __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_set_spte_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
+);
+
+TRACE_EVENT(kvm_age_hva,
+ TP_PROTO(unsigned long start, unsigned long end),
+ TP_ARGS(start, end),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, start )
+ __field( unsigned long, end )
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
+ __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_test_age_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
+);
+
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index c0e7d24ca256..9d44781efc1c 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -13,11 +13,11 @@
TRACE_EVENT(kyber_latency,
- TP_PROTO(struct request_queue *q, const char *domain, const char *type,
+ TP_PROTO(dev_t dev, const char *domain, const char *type,
unsigned int percentile, unsigned int numerator,
unsigned int denominator, unsigned int samples),
- TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
+ TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -30,9 +30,9 @@ TRACE_EVENT(kyber_latency,
),
TP_fast_assign(
- __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
- strlcpy(__entry->type, type, sizeof(__entry->type));
+ __entry->dev = dev;
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
__entry->numerator = numerator;
__entry->denominator = denominator;
@@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency,
TRACE_EVENT(kyber_adjust,
- TP_PROTO(struct request_queue *q, const char *domain,
- unsigned int depth),
+ TP_PROTO(dev_t dev, const char *domain, unsigned int depth),
- TP_ARGS(q, domain, depth),
+ TP_ARGS(dev, domain, depth),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -59,8 +58,8 @@ TRACE_EVENT(kyber_adjust,
),
TP_fast_assign(
- __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ __entry->dev = dev;
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
@@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust,
TRACE_EVENT(kyber_throttled,
- TP_PROTO(struct request_queue *q, const char *domain),
+ TP_PROTO(dev_t dev, const char *domain),
- TP_ARGS(q, domain),
+ TP_ARGS(dev, domain),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -81,8 +80,8 @@ TRACE_EVENT(kyber_throttled,
),
TP_fast_assign(
- __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ __entry->dev = dev;
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
),
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h
index ab69434e2329..6025dd8ba4aa 100644
--- a/include/trace/events/libata.h
+++ b/include/trace/events/libata.h
@@ -132,9 +132,37 @@
ata_protocol_name(ATAPI_PROT_PIO), \
ata_protocol_name(ATAPI_PROT_DMA))
+#define ata_class_name(class) { class, #class }
+#define show_class_name(val) \
+ __print_symbolic(val, \
+ ata_class_name(ATA_DEV_UNKNOWN), \
+ ata_class_name(ATA_DEV_ATA), \
+ ata_class_name(ATA_DEV_ATA_UNSUP), \
+ ata_class_name(ATA_DEV_ATAPI), \
+ ata_class_name(ATA_DEV_ATAPI_UNSUP), \
+ ata_class_name(ATA_DEV_PMP), \
+ ata_class_name(ATA_DEV_PMP_UNSUP), \
+ ata_class_name(ATA_DEV_SEMB), \
+ ata_class_name(ATA_DEV_SEMB_UNSUP), \
+ ata_class_name(ATA_DEV_ZAC), \
+ ata_class_name(ATA_DEV_ZAC_UNSUP), \
+ ata_class_name(ATA_DEV_NONE))
+
+#define ata_sff_hsm_state_name(state) { state, #state }
+#define show_sff_hsm_state_name(val) \
+ __print_symbolic(val, \
+ ata_sff_hsm_state_name(HSM_ST_IDLE), \
+ ata_sff_hsm_state_name(HSM_ST_FIRST), \
+ ata_sff_hsm_state_name(HSM_ST), \
+ ata_sff_hsm_state_name(HSM_ST_LAST), \
+ ata_sff_hsm_state_name(HSM_ST_ERR))
+
const char *libata_trace_parse_status(struct trace_seq*, unsigned char);
#define __parse_status(s) libata_trace_parse_status(p, s)
+const char *libata_trace_parse_host_stat(struct trace_seq *, unsigned char);
+#define __parse_host_stat(s) libata_trace_parse_host_stat(p, s)
+
const char *libata_trace_parse_eh_action(struct trace_seq *, unsigned int);
#define __parse_eh_action(a) libata_trace_parse_eh_action(p, a)
@@ -144,11 +172,14 @@ const char *libata_trace_parse_eh_err_mask(struct trace_seq *, unsigned int);
const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int);
#define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f)
+const char *libata_trace_parse_tf_flags(struct trace_seq *, unsigned int);
+#define __parse_tf_flags(f) libata_trace_parse_tf_flags(p, f)
+
const char *libata_trace_parse_subcmd(struct trace_seq *, unsigned char,
unsigned char, unsigned char);
#define __parse_subcmd(c,f,h) libata_trace_parse_subcmd(p, c, f, h)
-TRACE_EVENT(ata_qc_issue,
+DECLARE_EVENT_CLASS(ata_qc_issue_template,
TP_PROTO(struct ata_queued_cmd *qc),
@@ -207,6 +238,14 @@ TRACE_EVENT(ata_qc_issue,
__entry->dev)
);
+DEFINE_EVENT(ata_qc_issue_template, ata_qc_prep,
+ TP_PROTO(struct ata_queued_cmd *qc),
+ TP_ARGS(qc));
+
+DEFINE_EVENT(ata_qc_issue_template, ata_qc_issue,
+ TP_PROTO(struct ata_queued_cmd *qc),
+ TP_ARGS(qc));
+
DECLARE_EVENT_CLASS(ata_qc_complete_template,
TP_PROTO(struct ata_queued_cmd *qc),
@@ -249,6 +288,7 @@ DECLARE_EVENT_CLASS(ata_qc_complete_template,
__entry->hob_feature = qc->result_tf.hob_feature;
__entry->nsect = qc->result_tf.nsect;
__entry->hob_nsect = qc->result_tf.hob_nsect;
+ __entry->flags = qc->flags;
),
TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \
@@ -275,6 +315,128 @@ DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_done,
TP_PROTO(struct ata_queued_cmd *qc),
TP_ARGS(qc));
+TRACE_EVENT(ata_tf_load,
+
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf),
+
+ TP_ARGS(ap, tf),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned char, cmd )
+ __field( unsigned char, dev )
+ __field( unsigned char, lbal )
+ __field( unsigned char, lbam )
+ __field( unsigned char, lbah )
+ __field( unsigned char, nsect )
+ __field( unsigned char, feature )
+ __field( unsigned char, hob_lbal )
+ __field( unsigned char, hob_lbam )
+ __field( unsigned char, hob_lbah )
+ __field( unsigned char, hob_nsect )
+ __field( unsigned char, hob_feature )
+ __field( unsigned char, proto )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->proto = tf->protocol;
+ __entry->cmd = tf->command;
+ __entry->dev = tf->device;
+ __entry->lbal = tf->lbal;
+ __entry->lbam = tf->lbam;
+ __entry->lbah = tf->lbah;
+ __entry->hob_lbal = tf->hob_lbal;
+ __entry->hob_lbam = tf->hob_lbam;
+ __entry->hob_lbah = tf->hob_lbah;
+ __entry->feature = tf->feature;
+ __entry->hob_feature = tf->hob_feature;
+ __entry->nsect = tf->nsect;
+ __entry->hob_nsect = tf->hob_nsect;
+ ),
+
+ TP_printk("ata_port=%u proto=%s cmd=%s%s " \
+ " tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)",
+ __entry->ata_port,
+ show_protocol_name(__entry->proto),
+ show_opcode_name(__entry->cmd),
+ __parse_subcmd(__entry->cmd, __entry->feature, __entry->hob_nsect),
+ __entry->cmd, __entry->feature, __entry->nsect,
+ __entry->lbal, __entry->lbam, __entry->lbah,
+ __entry->hob_feature, __entry->hob_nsect,
+ __entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah,
+ __entry->dev)
+);
+
+DECLARE_EVENT_CLASS(ata_exec_command_template,
+
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+
+ TP_ARGS(ap, tf, tag),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, tag )
+ __field( unsigned char, cmd )
+ __field( unsigned char, feature )
+ __field( unsigned char, hob_nsect )
+ __field( unsigned char, proto )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->tag = tag;
+ __entry->proto = tf->protocol;
+ __entry->cmd = tf->command;
+ __entry->feature = tf->feature;
+ __entry->hob_nsect = tf->hob_nsect;
+ ),
+
+ TP_printk("ata_port=%u tag=%d proto=%s cmd=%s%s",
+ __entry->ata_port, __entry->tag,
+ show_protocol_name(__entry->proto),
+ show_opcode_name(__entry->cmd),
+ __parse_subcmd(__entry->cmd, __entry->feature, __entry->hob_nsect))
+);
+
+DEFINE_EVENT(ata_exec_command_template, ata_exec_command,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+DEFINE_EVENT(ata_exec_command_template, ata_bmdma_setup,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+DEFINE_EVENT(ata_exec_command_template, ata_bmdma_start,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+DEFINE_EVENT(ata_exec_command_template, ata_bmdma_stop,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+TRACE_EVENT(ata_bmdma_status,
+
+ TP_PROTO(struct ata_port *ap, unsigned int host_stat),
+
+ TP_ARGS(ap, host_stat),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, tag )
+ __field( unsigned char, host_stat )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->host_stat = host_stat;
+ ),
+
+ TP_printk("ata_port=%u host_stat=%s",
+ __entry->ata_port,
+ __parse_host_stat(__entry->host_stat))
+);
+
TRACE_EVENT(ata_eh_link_autopsy,
TP_PROTO(struct ata_device *dev, unsigned int eh_action, unsigned int eh_err_mask),
@@ -329,6 +491,259 @@ TRACE_EVENT(ata_eh_link_autopsy_qc,
__parse_eh_err_mask(__entry->eh_err_mask))
);
+DECLARE_EVENT_CLASS(ata_eh_action_template,
+
+ TP_PROTO(struct ata_link *link, unsigned int devno, unsigned int eh_action),
+
+ TP_ARGS(link, devno, eh_action),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, eh_action )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = link->ap->print_id;
+ __entry->ata_dev = link->pmp + devno;
+ __entry->eh_action = eh_action;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u eh_action=%s",
+ __entry->ata_port, __entry->ata_dev,
+ __parse_eh_action(__entry->eh_action))
+);
+
+DEFINE_EVENT(ata_eh_action_template, ata_eh_about_to_do,
+ TP_PROTO(struct ata_link *link, unsigned int devno, unsigned int eh_action),
+ TP_ARGS(link, devno, eh_action));
+
+DEFINE_EVENT(ata_eh_action_template, ata_eh_done,
+ TP_PROTO(struct ata_link *link, unsigned int devno, unsigned int eh_action),
+ TP_ARGS(link, devno, eh_action));
+
+DECLARE_EVENT_CLASS(ata_link_reset_begin_template,
+
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+
+ TP_ARGS(link, class, deadline),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __array( unsigned int, class, 2 )
+ __field( unsigned long, deadline )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = link->ap->print_id;
+ memcpy(__entry->class, class, 2);
+ __entry->deadline = deadline;
+ ),
+
+ TP_printk("ata_port=%u deadline=%lu classes=[%s,%s]",
+ __entry->ata_port, __entry->deadline,
+ show_class_name(__entry->class[0]),
+ show_class_name(__entry->class[1]))
+);
+
+DEFINE_EVENT(ata_link_reset_begin_template, ata_link_hardreset_begin,
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+ TP_ARGS(link, class, deadline));
+
+DEFINE_EVENT(ata_link_reset_begin_template, ata_slave_hardreset_begin,
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+ TP_ARGS(link, class, deadline));
+
+DEFINE_EVENT(ata_link_reset_begin_template, ata_link_softreset_begin,
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+ TP_ARGS(link, class, deadline));
+
+DECLARE_EVENT_CLASS(ata_link_reset_end_template,
+
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+
+ TP_ARGS(link, class, rc),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __array( unsigned int, class, 2 )
+ __field( int, rc )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = link->ap->print_id;
+ memcpy(__entry->class, class, 2);
+ __entry->rc = rc;
+ ),
+
+ TP_printk("ata_port=%u rc=%d class=[%s,%s]",
+ __entry->ata_port, __entry->rc,
+ show_class_name(__entry->class[0]),
+ show_class_name(__entry->class[1]))
+);
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_link_hardreset_end,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_slave_hardreset_end,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_link_softreset_end,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_link_postreset,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_slave_postreset,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DECLARE_EVENT_CLASS(ata_port_eh_begin_template,
+
+ TP_PROTO(struct ata_port *ap),
+
+ TP_ARGS(ap),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ ),
+
+ TP_printk("ata_port=%u", __entry->ata_port)
+);
+
+DEFINE_EVENT(ata_port_eh_begin_template, ata_std_sched_eh,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
+DEFINE_EVENT(ata_port_eh_begin_template, ata_port_freeze,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
+DEFINE_EVENT(ata_port_eh_begin_template, ata_port_thaw,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
+DECLARE_EVENT_CLASS(ata_sff_hsm_template,
+
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char status),
+
+ TP_ARGS(qc, status),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, tag )
+ __field( unsigned int, qc_flags )
+ __field( unsigned int, protocol )
+ __field( unsigned int, hsm_state )
+ __field( unsigned char, dev_state )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = qc->ap->print_id;
+ __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
+ __entry->tag = qc->tag;
+ __entry->qc_flags = qc->flags;
+ __entry->protocol = qc->tf.protocol;
+ __entry->hsm_state = qc->ap->hsm_task_state;
+ __entry->dev_state = status;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s flags=%s task_state=%s dev_stat=0x%X",
+ __entry->ata_port, __entry->ata_dev, __entry->tag,
+ show_protocol_name(__entry->protocol),
+ __parse_qc_flags(__entry->qc_flags),
+ show_sff_hsm_state_name(__entry->hsm_state),
+ __entry->dev_state)
+);
+
+DEFINE_EVENT(ata_sff_hsm_template, ata_sff_hsm_state,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char state),
+ TP_ARGS(qc, state));
+
+DEFINE_EVENT(ata_sff_hsm_template, ata_sff_hsm_command_complete,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char state),
+ TP_ARGS(qc, state));
+
+DEFINE_EVENT(ata_sff_hsm_template, ata_sff_port_intr,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char state),
+ TP_ARGS(qc, state));
+
+DECLARE_EVENT_CLASS(ata_transfer_data_template,
+
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+
+ TP_ARGS(qc, offset, count),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, tag )
+ __field( unsigned int, flags )
+ __field( unsigned int, offset )
+ __field( unsigned int, bytes )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = qc->ap->print_id;
+ __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
+ __entry->tag = qc->tag;
+ __entry->flags = qc->tf.flags;
+ __entry->offset = offset;
+ __entry->bytes = count;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s offset=%u bytes=%u",
+ __entry->ata_port, __entry->ata_dev, __entry->tag,
+ __parse_tf_flags(__entry->flags),
+ __entry->offset, __entry->bytes)
+);
+
+DEFINE_EVENT(ata_transfer_data_template, ata_sff_pio_transfer_data,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+ TP_ARGS(qc, offset, count));
+
+DEFINE_EVENT(ata_transfer_data_template, atapi_pio_transfer_data,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+ TP_ARGS(qc, offset, count));
+
+DEFINE_EVENT(ata_transfer_data_template, atapi_send_cdb,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+ TP_ARGS(qc, offset, count));
+
+DECLARE_EVENT_CLASS(ata_sff_template,
+
+ TP_PROTO(struct ata_port *ap),
+
+ TP_ARGS(ap),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned char, hsm_state )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->hsm_state = ap->hsm_task_state;
+ ),
+
+ TP_printk("ata_port=%u task_state=%s",
+ __entry->ata_port,
+ show_sff_hsm_state_name(__entry->hsm_state))
+);
+
+DEFINE_EVENT(ata_sff_template, ata_sff_flush_pio_task,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
#endif /* _TRACE_LIBATA_H */
/* This part must be outside protection */
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index d7512129a324..9ebd081e057e 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -5,11 +5,22 @@
#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_LOCK_H
-#include <linux/lockdep.h>
+#include <linux/sched.h>
#include <linux/tracepoint.h>
+/* flags for lock:contention_begin */
+#define LCB_F_SPIN (1U << 0)
+#define LCB_F_READ (1U << 1)
+#define LCB_F_WRITE (1U << 2)
+#define LCB_F_RT (1U << 3)
+#define LCB_F_PERCPU (1U << 4)
+#define LCB_F_MUTEX (1U << 5)
+
+
#ifdef CONFIG_LOCKDEP
+#include <linux/lockdep.h>
+
TRACE_EVENT(lock_acquire,
TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
@@ -78,8 +89,54 @@ DEFINE_EVENT(lock, lock_acquired,
TP_ARGS(lock, ip)
);
-#endif
-#endif
+#endif /* CONFIG_LOCK_STAT */
+#endif /* CONFIG_LOCKDEP */
+
+TRACE_EVENT(contention_begin,
+
+ TP_PROTO(void *lock, unsigned int flags),
+
+ TP_ARGS(lock, flags),
+
+ TP_STRUCT__entry(
+ __field(void *, lock_addr)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->lock_addr = lock;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("%p (flags=%s)", __entry->lock_addr,
+ __print_flags(__entry->flags, "|",
+ { LCB_F_SPIN, "SPIN" },
+ { LCB_F_READ, "READ" },
+ { LCB_F_WRITE, "WRITE" },
+ { LCB_F_RT, "RT" },
+ { LCB_F_PERCPU, "PERCPU" },
+ { LCB_F_MUTEX, "MUTEX" }
+ ))
+);
+
+TRACE_EVENT(contention_end,
+
+ TP_PROTO(void *lock, int ret),
+
+ TP_ARGS(lock, ret),
+
+ TP_STRUCT__entry(
+ __field(void *, lock_addr)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->lock_addr = lock;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%p (ret=%d)", __entry->lock_addr, __entry->ret)
+);
#endif /* _TRACE_LOCK_H */
diff --git a/include/trace/events/maple_tree.h b/include/trace/events/maple_tree.h
new file mode 100644
index 000000000000..2be403bdc2bd
--- /dev/null
+++ b/include/trace/events/maple_tree.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM maple_tree
+
+#if !defined(_TRACE_MM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MM_H
+
+
+#include <linux/tracepoint.h>
+
+struct ma_state;
+
+TRACE_EVENT(ma_op,
+
+ TP_PROTO(const char *fn, struct ma_state *mas),
+
+ TP_ARGS(fn, mas),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last
+ )
+)
+TRACE_EVENT(ma_read,
+
+ TP_PROTO(const char *fn, struct ma_state *mas),
+
+ TP_ARGS(fn, mas),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last
+ )
+)
+
+TRACE_EVENT(ma_write,
+
+ TP_PROTO(const char *fn, struct ma_state *mas, unsigned long piv,
+ void *val),
+
+ TP_ARGS(fn, mas, piv, val),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(unsigned long, piv)
+ __field(void *, val)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->piv = piv;
+ __entry->val = val;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode %p (%lu %lu) range:%lu-%lu piv (%lu) val %p",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last,
+ (unsigned long) __entry->piv,
+ (void *) __entry->val
+ )
+)
+#endif /* _TRACE_MM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mctp.h b/include/trace/events/mctp.h
new file mode 100644
index 000000000000..165cf25f77a7
--- /dev/null
+++ b/include/trace/events/mctp.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mctp
+
+#if !defined(_TRACE_MCTP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MCTP_H
+
+#include <linux/tracepoint.h>
+
+#ifndef __TRACE_MCTP_ENUMS
+#define __TRACE_MCTP_ENUMS
+enum {
+ MCTP_TRACE_KEY_TIMEOUT,
+ MCTP_TRACE_KEY_REPLIED,
+ MCTP_TRACE_KEY_INVALIDATED,
+ MCTP_TRACE_KEY_CLOSED,
+ MCTP_TRACE_KEY_DROPPED,
+};
+#endif /* __TRACE_MCTP_ENUMS */
+
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_TIMEOUT);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_REPLIED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_INVALIDATED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_CLOSED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_DROPPED);
+
+TRACE_EVENT(mctp_key_acquire,
+ TP_PROTO(const struct mctp_sk_key *key),
+ TP_ARGS(key),
+ TP_STRUCT__entry(
+ __field(__u8, paddr)
+ __field(__u8, laddr)
+ __field(__u8, tag)
+ ),
+ TP_fast_assign(
+ __entry->paddr = key->peer_addr;
+ __entry->laddr = key->local_addr;
+ __entry->tag = key->tag;
+ ),
+ TP_printk("local %d, peer %d, tag %1x",
+ __entry->laddr,
+ __entry->paddr,
+ __entry->tag
+ )
+);
+
+TRACE_EVENT(mctp_key_release,
+ TP_PROTO(const struct mctp_sk_key *key, int reason),
+ TP_ARGS(key, reason),
+ TP_STRUCT__entry(
+ __field(__u8, paddr)
+ __field(__u8, laddr)
+ __field(__u8, tag)
+ __field(int, reason)
+ ),
+ TP_fast_assign(
+ __entry->paddr = key->peer_addr;
+ __entry->laddr = key->local_addr;
+ __entry->tag = key->tag;
+ __entry->reason = reason;
+ ),
+ TP_printk("local %d, peer %d, tag %1x %s",
+ __entry->laddr,
+ __entry->paddr,
+ __entry->tag,
+ __print_symbolic(__entry->reason,
+ { MCTP_TRACE_KEY_TIMEOUT, "timeout" },
+ { MCTP_TRACE_KEY_REPLIED, "replied" },
+ { MCTP_TRACE_KEY_INVALIDATED, "invalidated" },
+ { MCTP_TRACE_KEY_CLOSED, "closed" },
+ { MCTP_TRACE_KEY_DROPPED, "dropped" })
+ )
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 4d434398d64d..0190ef725b43 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -20,7 +20,9 @@
EM( MR_SYSCALL, "syscall_or_cpuset") \
EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
EM( MR_NUMA_MISPLACED, "numa_misplaced") \
- EMe(MR_CONTIG_RANGE, "contig_range")
+ EM( MR_CONTIG_RANGE, "contig_range") \
+ EM( MR_LONGTERM_PIN, "longterm_pin") \
+ EMe(MR_DEMOTION, "demotion")
/*
* First define the enums in the above macros to be exported to userspace
@@ -47,10 +49,11 @@ TRACE_EVENT(mm_migrate_pages,
TP_PROTO(unsigned long succeeded, unsigned long failed,
unsigned long thp_succeeded, unsigned long thp_failed,
- unsigned long thp_split, enum migrate_mode mode, int reason),
+ unsigned long thp_split, unsigned long large_folio_split,
+ enum migrate_mode mode, int reason),
TP_ARGS(succeeded, failed, thp_succeeded, thp_failed,
- thp_split, mode, reason),
+ thp_split, large_folio_split, mode, reason),
TP_STRUCT__entry(
__field( unsigned long, succeeded)
@@ -58,29 +61,85 @@ TRACE_EVENT(mm_migrate_pages,
__field( unsigned long, thp_succeeded)
__field( unsigned long, thp_failed)
__field( unsigned long, thp_split)
+ __field( unsigned long, large_folio_split)
__field( enum migrate_mode, mode)
__field( int, reason)
),
TP_fast_assign(
- __entry->succeeded = succeeded;
- __entry->failed = failed;
- __entry->thp_succeeded = thp_succeeded;
- __entry->thp_failed = thp_failed;
- __entry->thp_split = thp_split;
- __entry->mode = mode;
- __entry->reason = reason;
+ __entry->succeeded = succeeded;
+ __entry->failed = failed;
+ __entry->thp_succeeded = thp_succeeded;
+ __entry->thp_failed = thp_failed;
+ __entry->thp_split = thp_split;
+ __entry->large_folio_split = large_folio_split;
+ __entry->mode = mode;
+ __entry->reason = reason;
),
- TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s",
+ TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu nr_split=%lu mode=%s reason=%s",
__entry->succeeded,
__entry->failed,
__entry->thp_succeeded,
__entry->thp_failed,
__entry->thp_split,
+ __entry->large_folio_split,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+ TP_PROTO(enum migrate_mode mode, int reason),
+
+ TP_ARGS(mode, reason),
+
+ TP_STRUCT__entry(
+ __field(enum migrate_mode, mode)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->mode = mode;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("mode=%s reason=%s",
+ __print_symbolic(__entry->mode, MIGRATE_MODE),
+ __print_symbolic(__entry->reason, MIGRATE_REASON))
+);
+
+DECLARE_EVENT_CLASS(migration_pte,
+
+ TP_PROTO(unsigned long addr, unsigned long pte, int order),
+
+ TP_ARGS(addr, pte, order),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, pte)
+ __field(int, order)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->pte = pte;
+ __entry->order = order;
+ ),
+
+ TP_printk("addr=%lx, pte=%lx order=%d", __entry->addr, __entry->pte, __entry->order)
+);
+
+DEFINE_EVENT(migration_pte, set_migration_pte,
+ TP_PROTO(unsigned long addr, unsigned long pte, int order),
+ TP_ARGS(addr, pte, order)
+);
+
+DEFINE_EVENT(migration_pte, remove_migration_pte,
+ TP_PROTO(unsigned long addr, unsigned long pte, int order),
+ TP_ARGS(addr, pte, order)
+);
+
#endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */
diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h
index 4661f7ba07c0..f8d61485de16 100644
--- a/include/trace/events/mmap.h
+++ b/include/trace/events/mmap.h
@@ -35,13 +35,86 @@ TRACE_EVENT(vm_unmapped_area,
__entry->align_offset = info->align_offset;
),
- TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n",
+ TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx",
IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr,
IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0,
__entry->total_vm, __entry->flags, __entry->length,
__entry->low_limit, __entry->high_limit, __entry->align_mask,
__entry->align_offset)
);
+
+TRACE_EVENT(vma_mas_szero,
+ TP_PROTO(struct maple_tree *mt, unsigned long start,
+ unsigned long end),
+
+ TP_ARGS(mt, start, end),
+
+ TP_STRUCT__entry(
+ __field(struct maple_tree *, mt)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ ),
+
+ TP_fast_assign(
+ __entry->mt = mt;
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,",
+ __entry->mt,
+ (unsigned long) __entry->start,
+ (unsigned long) __entry->end
+ )
+);
+
+TRACE_EVENT(vma_store,
+ TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma),
+
+ TP_ARGS(mt, vma),
+
+ TP_STRUCT__entry(
+ __field(struct maple_tree *, mt)
+ __field(struct vm_area_struct *, vma)
+ __field(unsigned long, vm_start)
+ __field(unsigned long, vm_end)
+ ),
+
+ TP_fast_assign(
+ __entry->mt = mt;
+ __entry->vma = vma;
+ __entry->vm_start = vma->vm_start;
+ __entry->vm_end = vma->vm_end - 1;
+ ),
+
+ TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,",
+ __entry->mt, __entry->vma,
+ (unsigned long) __entry->vm_start,
+ (unsigned long) __entry->vm_end
+ )
+);
+
+
+TRACE_EVENT(exit_mmap,
+ TP_PROTO(struct mm_struct *mm),
+
+ TP_ARGS(mm),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(struct maple_tree *, mt)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->mt = &mm->mm_mt;
+ ),
+
+ TP_printk("mt_mod %p, DESTROY",
+ __entry->mt
+ )
+);
+
#endif
/* This part must be outside protection */
diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h
new file mode 100644
index 000000000000..14db8044c1ff
--- /dev/null
+++ b/include/trace/events/mmap_lock.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmap_lock
+
+#if !defined(_TRACE_MMAP_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMAP_LOCK_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+struct mm_struct;
+
+extern int trace_mmap_lock_reg(void);
+extern void trace_mmap_lock_unreg(void);
+
+DECLARE_EVENT_CLASS(mmap_lock,
+
+ TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write),
+
+ TP_ARGS(mm, memcg_path, write),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __string(memcg_path, memcg_path)
+ __field(bool, write)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __assign_str(memcg_path, memcg_path);
+ __entry->write = write;
+ ),
+
+ TP_printk(
+ "mm=%p memcg_path=%s write=%s",
+ __entry->mm,
+ __get_str(memcg_path),
+ __entry->write ? "true" : "false"
+ )
+);
+
+#define DEFINE_MMAP_LOCK_EVENT(name) \
+ DEFINE_EVENT_FN(mmap_lock, name, \
+ TP_PROTO(struct mm_struct *mm, const char *memcg_path, \
+ bool write), \
+ TP_ARGS(mm, memcg_path, write), \
+ trace_mmap_lock_reg, trace_mmap_lock_unreg)
+
+DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking);
+DEFINE_MMAP_LOCK_EVENT(mmap_lock_released);
+
+TRACE_EVENT_FN(mmap_lock_acquire_returned,
+
+ TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write,
+ bool success),
+
+ TP_ARGS(mm, memcg_path, write, success),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __string(memcg_path, memcg_path)
+ __field(bool, write)
+ __field(bool, success)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __assign_str(memcg_path, memcg_path);
+ __entry->write = write;
+ __entry->success = success;
+ ),
+
+ TP_printk(
+ "mm=%p memcg_path=%s write=%s success=%s",
+ __entry->mm,
+ __get_str(memcg_path),
+ __entry->write ? "true" : "false",
+ __entry->success ? "true" : "false"
+ ),
+
+ trace_mmap_lock_reg, trace_mmap_lock_unreg
+);
+
+#endif /* _TRACE_MMAP_LOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 5fb752034386..d801409b33cf 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -13,110 +13,138 @@
* Thus most bits set go first.
*/
-#define __def_gfpflag_names \
- {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
- {(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
- {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
- {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
- {(unsigned long)GFP_USER, "GFP_USER"}, \
- {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
- {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
- {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
- {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
- {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
- {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
- {(unsigned long)GFP_DMA, "GFP_DMA"}, \
- {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
- {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
- {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
- {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
- {(unsigned long)__GFP_IO, "__GFP_IO"}, \
- {(unsigned long)__GFP_FS, "__GFP_FS"}, \
- {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
- {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
- {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
- {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
- {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
- {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
- {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
- {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
- {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
- {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
- {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
- {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
- {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
- {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
- {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
- {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
- {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
+#define gfpflag_string(flag) {(__force unsigned long)flag, #flag}
+
+#define __def_gfpflag_names \
+ gfpflag_string(GFP_TRANSHUGE), \
+ gfpflag_string(GFP_TRANSHUGE_LIGHT), \
+ gfpflag_string(GFP_HIGHUSER_MOVABLE), \
+ gfpflag_string(GFP_HIGHUSER), \
+ gfpflag_string(GFP_USER), \
+ gfpflag_string(GFP_KERNEL_ACCOUNT), \
+ gfpflag_string(GFP_KERNEL), \
+ gfpflag_string(GFP_NOFS), \
+ gfpflag_string(GFP_ATOMIC), \
+ gfpflag_string(GFP_NOIO), \
+ gfpflag_string(GFP_NOWAIT), \
+ gfpflag_string(GFP_DMA), \
+ gfpflag_string(__GFP_HIGHMEM), \
+ gfpflag_string(GFP_DMA32), \
+ gfpflag_string(__GFP_HIGH), \
+ gfpflag_string(__GFP_IO), \
+ gfpflag_string(__GFP_FS), \
+ gfpflag_string(__GFP_NOWARN), \
+ gfpflag_string(__GFP_RETRY_MAYFAIL), \
+ gfpflag_string(__GFP_NOFAIL), \
+ gfpflag_string(__GFP_NORETRY), \
+ gfpflag_string(__GFP_COMP), \
+ gfpflag_string(__GFP_ZERO), \
+ gfpflag_string(__GFP_NOMEMALLOC), \
+ gfpflag_string(__GFP_MEMALLOC), \
+ gfpflag_string(__GFP_HARDWALL), \
+ gfpflag_string(__GFP_THISNODE), \
+ gfpflag_string(__GFP_RECLAIMABLE), \
+ gfpflag_string(__GFP_MOVABLE), \
+ gfpflag_string(__GFP_ACCOUNT), \
+ gfpflag_string(__GFP_WRITE), \
+ gfpflag_string(__GFP_RECLAIM), \
+ gfpflag_string(__GFP_DIRECT_RECLAIM), \
+ gfpflag_string(__GFP_KSWAPD_RECLAIM), \
+ gfpflag_string(__GFP_ZEROTAGS)
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define __def_gfpflag_names_kasan , \
+ gfpflag_string(__GFP_SKIP_ZERO), \
+ gfpflag_string(__GFP_SKIP_KASAN)
+#else
+#define __def_gfpflag_names_kasan
+#endif
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
- __def_gfpflag_names \
+ __def_gfpflag_names __def_gfpflag_names_kasan \
) : "none"
#ifdef CONFIG_MMU
-#define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
+#define IF_HAVE_PG_MLOCK(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_MLOCK(flag,string)
+#define IF_HAVE_PG_MLOCK(_name)
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-#define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
+#define IF_HAVE_PG_UNCACHED(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_UNCACHED(flag,string)
+#define IF_HAVE_PG_UNCACHED(_name)
#endif
#ifdef CONFIG_MEMORY_FAILURE
-#define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
+#define IF_HAVE_PG_HWPOISON(_name) ,{1UL << PG_##_name, __stringify(_name)}
+#else
+#define IF_HAVE_PG_HWPOISON(_name)
+#endif
+
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
+#define IF_HAVE_PG_IDLE(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_HWPOISON(flag,string)
+#define IF_HAVE_PG_IDLE(_name)
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
-#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
+#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+#define IF_HAVE_PG_ARCH_X(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_IDLE(flag,string)
+#define IF_HAVE_PG_ARCH_X(_name)
#endif
+#define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) }
+
#define __def_pageflag_names \
- {1UL << PG_locked, "locked" }, \
- {1UL << PG_waiters, "waiters" }, \
- {1UL << PG_error, "error" }, \
- {1UL << PG_referenced, "referenced" }, \
- {1UL << PG_uptodate, "uptodate" }, \
- {1UL << PG_dirty, "dirty" }, \
- {1UL << PG_lru, "lru" }, \
- {1UL << PG_active, "active" }, \
- {1UL << PG_workingset, "workingset" }, \
- {1UL << PG_slab, "slab" }, \
- {1UL << PG_owner_priv_1, "owner_priv_1" }, \
- {1UL << PG_arch_1, "arch_1" }, \
- {1UL << PG_reserved, "reserved" }, \
- {1UL << PG_private, "private" }, \
- {1UL << PG_private_2, "private_2" }, \
- {1UL << PG_writeback, "writeback" }, \
- {1UL << PG_head, "head" }, \
- {1UL << PG_mappedtodisk, "mappedtodisk" }, \
- {1UL << PG_reclaim, "reclaim" }, \
- {1UL << PG_swapbacked, "swapbacked" }, \
- {1UL << PG_unevictable, "unevictable" } \
-IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
-IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
-IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
-IF_HAVE_PG_IDLE(PG_young, "young" ) \
-IF_HAVE_PG_IDLE(PG_idle, "idle" )
+ DEF_PAGEFLAG_NAME(locked), \
+ DEF_PAGEFLAG_NAME(waiters), \
+ DEF_PAGEFLAG_NAME(error), \
+ DEF_PAGEFLAG_NAME(referenced), \
+ DEF_PAGEFLAG_NAME(uptodate), \
+ DEF_PAGEFLAG_NAME(dirty), \
+ DEF_PAGEFLAG_NAME(lru), \
+ DEF_PAGEFLAG_NAME(active), \
+ DEF_PAGEFLAG_NAME(workingset), \
+ DEF_PAGEFLAG_NAME(slab), \
+ DEF_PAGEFLAG_NAME(owner_priv_1), \
+ DEF_PAGEFLAG_NAME(arch_1), \
+ DEF_PAGEFLAG_NAME(reserved), \
+ DEF_PAGEFLAG_NAME(private), \
+ DEF_PAGEFLAG_NAME(private_2), \
+ DEF_PAGEFLAG_NAME(writeback), \
+ DEF_PAGEFLAG_NAME(head), \
+ DEF_PAGEFLAG_NAME(mappedtodisk), \
+ DEF_PAGEFLAG_NAME(reclaim), \
+ DEF_PAGEFLAG_NAME(swapbacked), \
+ DEF_PAGEFLAG_NAME(unevictable) \
+IF_HAVE_PG_MLOCK(mlocked) \
+IF_HAVE_PG_UNCACHED(uncached) \
+IF_HAVE_PG_HWPOISON(hwpoison) \
+IF_HAVE_PG_IDLE(idle) \
+IF_HAVE_PG_IDLE(young) \
+IF_HAVE_PG_ARCH_X(arch_2) \
+IF_HAVE_PG_ARCH_X(arch_3)
#define show_page_flags(flags) \
(flags) ? __print_flags(flags, "|", \
__def_pageflag_names \
) : "none"
+#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
+
+#define __def_pagetype_names \
+ DEF_PAGETYPE_NAME(offline), \
+ DEF_PAGETYPE_NAME(guard), \
+ DEF_PAGETYPE_NAME(table), \
+ DEF_PAGETYPE_NAME(buddy)
+
#if defined(CONFIG_X86)
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
#elif defined(CONFIG_PPC)
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
-#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
+#elif defined(CONFIG_PARISC)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
#elif !defined(CONFIG_MMU)
#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
@@ -130,6 +158,12 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
#define IF_HAVE_VM_SOFTDIRTY(flag,name)
#endif
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
+# define IF_HAVE_UFFD_MINOR(flag, name) {flag, name},
+#else
+# define IF_HAVE_UFFD_MINOR(flag, name)
+#endif
+
#define __def_vmaflag_names \
{VM_READ, "read" }, \
{VM_WRITE, "write" }, \
@@ -141,8 +175,8 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
{VM_MAYSHARE, "mayshare" }, \
{VM_GROWSDOWN, "growsdown" }, \
{VM_UFFD_MISSING, "uffd_missing" }, \
+IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \
{VM_PFNMAP, "pfnmap" }, \
- {VM_DENYWRITE, "denywrite" }, \
{VM_UFFD_WP, "uffd_wp" }, \
{VM_LOCKED, "locked" }, \
{VM_IO, "io" }, \
@@ -189,8 +223,8 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
#define compact_result_to_feedback(result) \
({ \
enum compact_result __result = result; \
- (compaction_failed(__result)) ? COMPACTION_FAILED : \
- (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
+ (__result == COMPACT_COMPLETE) ? COMPACTION_FAILED : \
+ (__result == COMPACT_SUCCESS) ? COMPACTION_PROGRESS : COMPACTION_WITHDRAWN; \
})
#define COMPACTION_FEEDBACK \
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
new file mode 100644
index 000000000000..09e72215b9f9
--- /dev/null
+++ b/include/trace/events/mptcp.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mptcp
+
+#if !defined(_TRACE_MPTCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MPTCP_H
+
+#include <linux/tracepoint.h>
+
+#define show_mapping_status(status) \
+ __print_symbolic(status, \
+ { 0, "MAPPING_OK" }, \
+ { 1, "MAPPING_INVALID" }, \
+ { 2, "MAPPING_EMPTY" }, \
+ { 3, "MAPPING_DATA_FIN" }, \
+ { 4, "MAPPING_DUMMY" })
+
+TRACE_EVENT(mptcp_subflow_get_send,
+
+ TP_PROTO(struct mptcp_subflow_context *subflow),
+
+ TP_ARGS(subflow),
+
+ TP_STRUCT__entry(
+ __field(bool, active)
+ __field(bool, free)
+ __field(u32, snd_wnd)
+ __field(u32, pace)
+ __field(u8, backup)
+ __field(u64, ratio)
+ ),
+
+ TP_fast_assign(
+ struct sock *ssk;
+
+ __entry->active = mptcp_subflow_active(subflow);
+ __entry->backup = subflow->backup;
+
+ if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
+ __entry->free = sk_stream_memory_free(subflow->tcp_sock);
+ else
+ __entry->free = 0;
+
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ if (ssk && sk_fullsock(ssk)) {
+ __entry->snd_wnd = tcp_sk(ssk)->snd_wnd;
+ __entry->pace = READ_ONCE(ssk->sk_pacing_rate);
+ } else {
+ __entry->snd_wnd = 0;
+ __entry->pace = 0;
+ }
+
+ if (ssk && sk_fullsock(ssk) && __entry->pace)
+ __entry->ratio = div_u64((u64)ssk->sk_wmem_queued << 32, __entry->pace);
+ else
+ __entry->ratio = 0;
+ ),
+
+ TP_printk("active=%d free=%d snd_wnd=%u pace=%u backup=%u ratio=%llu",
+ __entry->active, __entry->free,
+ __entry->snd_wnd, __entry->pace,
+ __entry->backup, __entry->ratio)
+);
+
+DECLARE_EVENT_CLASS(mptcp_dump_mpext,
+
+ TP_PROTO(struct mptcp_ext *mpext),
+
+ TP_ARGS(mpext),
+
+ TP_STRUCT__entry(
+ __field(u64, data_ack)
+ __field(u64, data_seq)
+ __field(u32, subflow_seq)
+ __field(u16, data_len)
+ __field(u16, csum)
+ __field(u8, use_map)
+ __field(u8, dsn64)
+ __field(u8, data_fin)
+ __field(u8, use_ack)
+ __field(u8, ack64)
+ __field(u8, mpc_map)
+ __field(u8, frozen)
+ __field(u8, reset_transient)
+ __field(u8, reset_reason)
+ __field(u8, csum_reqd)
+ __field(u8, infinite_map)
+ ),
+
+ TP_fast_assign(
+ __entry->data_ack = mpext->ack64 ? mpext->data_ack : mpext->data_ack32;
+ __entry->data_seq = mpext->data_seq;
+ __entry->subflow_seq = mpext->subflow_seq;
+ __entry->data_len = mpext->data_len;
+ __entry->csum = (__force u16)mpext->csum;
+ __entry->use_map = mpext->use_map;
+ __entry->dsn64 = mpext->dsn64;
+ __entry->data_fin = mpext->data_fin;
+ __entry->use_ack = mpext->use_ack;
+ __entry->ack64 = mpext->ack64;
+ __entry->mpc_map = mpext->mpc_map;
+ __entry->frozen = mpext->frozen;
+ __entry->reset_transient = mpext->reset_transient;
+ __entry->reset_reason = mpext->reset_reason;
+ __entry->csum_reqd = mpext->csum_reqd;
+ __entry->infinite_map = mpext->infinite_map;
+ ),
+
+ TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u infinite_map=%u",
+ __entry->data_ack, __entry->data_seq,
+ __entry->subflow_seq, __entry->data_len,
+ __entry->csum, __entry->use_map,
+ __entry->dsn64, __entry->data_fin,
+ __entry->use_ack, __entry->ack64,
+ __entry->mpc_map, __entry->frozen,
+ __entry->reset_transient, __entry->reset_reason,
+ __entry->csum_reqd, __entry->infinite_map)
+);
+
+DEFINE_EVENT(mptcp_dump_mpext, mptcp_sendmsg_frag,
+ TP_PROTO(struct mptcp_ext *mpext),
+ TP_ARGS(mpext));
+
+DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status,
+ TP_PROTO(struct mptcp_ext *mpext),
+ TP_ARGS(mpext));
+
+TRACE_EVENT(ack_update_msk,
+
+ TP_PROTO(u64 data_ack, u64 old_snd_una,
+ u64 new_snd_una, u64 new_wnd_end,
+ u64 msk_wnd_end),
+
+ TP_ARGS(data_ack, old_snd_una,
+ new_snd_una, new_wnd_end,
+ msk_wnd_end),
+
+ TP_STRUCT__entry(
+ __field(u64, data_ack)
+ __field(u64, old_snd_una)
+ __field(u64, new_snd_una)
+ __field(u64, new_wnd_end)
+ __field(u64, msk_wnd_end)
+ ),
+
+ TP_fast_assign(
+ __entry->data_ack = data_ack;
+ __entry->old_snd_una = old_snd_una;
+ __entry->new_snd_una = new_snd_una;
+ __entry->new_wnd_end = new_wnd_end;
+ __entry->msk_wnd_end = msk_wnd_end;
+ ),
+
+ TP_printk("data_ack=%llu old_snd_una=%llu new_snd_una=%llu new_wnd_end=%llu msk_wnd_end=%llu",
+ __entry->data_ack, __entry->old_snd_una,
+ __entry->new_snd_una, __entry->new_wnd_end,
+ __entry->msk_wnd_end)
+);
+
+TRACE_EVENT(subflow_check_data_avail,
+
+ TP_PROTO(__u8 status, struct sk_buff *skb),
+
+ TP_ARGS(status, skb),
+
+ TP_STRUCT__entry(
+ __field(u8, status)
+ __field(const void *, skb)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->skb = skb;
+ ),
+
+ TP_printk("mapping_status=%s, skb=%p",
+ show_mapping_status(__entry->status),
+ __entry->skb)
+);
+
+#endif /* _TRACE_MPTCP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index 6678cf8b235b..dc03cf8e0369 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -36,6 +36,39 @@ TRACE_EVENT(napi_poll,
__entry->work, __entry->budget)
);
+TRACE_EVENT(dql_stall_detected,
+
+ TP_PROTO(unsigned short thrs, unsigned int len,
+ unsigned long last_reap, unsigned long hist_head,
+ unsigned long now, unsigned long *hist),
+
+ TP_ARGS(thrs, len, last_reap, hist_head, now, hist),
+
+ TP_STRUCT__entry(
+ __field( unsigned short, thrs)
+ __field( unsigned int, len)
+ __field( unsigned long, last_reap)
+ __field( unsigned long, hist_head)
+ __field( unsigned long, now)
+ __array( unsigned long, hist, 4)
+ ),
+
+ TP_fast_assign(
+ __entry->thrs = thrs;
+ __entry->len = len;
+ __entry->last_reap = last_reap;
+ __entry->hist_head = hist_head * BITS_PER_LONG;
+ __entry->now = now;
+ memcpy(__entry->hist, hist, sizeof(entry->hist));
+ ),
+
+ TP_printk("thrs %u len %u last_reap %lu hist_head %lu now %lu hist %016lx %016lx %016lx %016lx",
+ __entry->thrs, __entry->len,
+ __entry->last_reap, __entry->hist_head, __entry->now,
+ __entry->hist[0], __entry->hist[1],
+ __entry->hist[2], __entry->hist[3])
+);
+
#undef NO_DEV
#endif /* _TRACE_NAPI_H */
diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
index 62bb17516713..833143d0992e 100644
--- a/include/trace/events/neigh.h
+++ b/include/trace/events/neigh.h
@@ -30,7 +30,7 @@ TRACE_EVENT(neigh_create,
TP_STRUCT__entry(
__field(u32, family)
- __dynamic_array(char, dev, IFNAMSIZ )
+ __string(dev, dev ? dev->name : "NULL")
__field(int, entries)
__field(u8, created)
__field(u8, gc_exempt)
@@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create,
),
TP_fast_assign(
- struct in6_addr *pin6;
__be32 *p32;
__entry->family = tbl->family;
@@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create,
__entry->entries = atomic_read(&tbl->gc_entries);
__entry->created = n != NULL;
__entry->gc_exempt = exempt_from_gc;
- pin6 = (struct in6_addr *)__entry->primary_key6;
p32 = (__be32 *)__entry->primary_key4;
if (tbl->family == AF_INET)
@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create,
#if IS_ENABLED(CONFIG_IPV6)
if (tbl->family == AF_INET6) {
+ struct in6_addr *pin6;
+
pin6 = (struct in6_addr *)__entry->primary_key6;
*pin6 = *(struct in6_addr *)pkey;
}
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 2399073c3afc..f667c76a3b02 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -51,7 +51,8 @@ TRACE_EVENT(net_dev_start_xmit,
__entry->network_offset = skb_network_offset(skb);
__entry->transport_offset_valid =
skb_transport_header_was_set(skb);
- __entry->transport_offset = skb_transport_offset(skb);
+ __entry->transport_offset = skb_transport_header_was_set(skb) ?
+ skb_transport_offset(skb) : 0;
__entry->tx_flags = skb_shinfo(skb)->tx_flags;
__entry->gso_size = skb_shinfo(skb)->gso_size;
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -260,13 +261,6 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
TP_ARGS(skb)
);
-DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
-
- TP_PROTO(const struct sk_buff *skb),
-
- TP_ARGS(skb)
-);
-
DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
TP_PROTO(int ret),
@@ -312,13 +306,6 @@ DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit,
TP_ARGS(ret)
);
-DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit,
-
- TP_PROTO(int ret),
-
- TP_ARGS(ret)
-);
-
DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
TP_PROTO(int ret),
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
new file mode 100644
index 000000000000..447a8c21cf57
--- /dev/null
+++ b/include/trace/events/netfs.h
@@ -0,0 +1,463 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Network filesystem support module tracepoints
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM netfs
+
+#if !defined(_TRACE_NETFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NETFS_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Define enums for tracing information.
+ */
+#define netfs_read_traces \
+ EM(netfs_read_trace_dio_read, "DIO-READ ") \
+ EM(netfs_read_trace_expanded, "EXPANDED ") \
+ EM(netfs_read_trace_readahead, "READAHEAD") \
+ EM(netfs_read_trace_readpage, "READPAGE ") \
+ EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \
+ E_(netfs_read_trace_write_begin, "WRITEBEGN")
+
+#define netfs_write_traces \
+ EM(netfs_write_trace_dio_write, "DIO-WRITE") \
+ EM(netfs_write_trace_launder, "LAUNDER ") \
+ EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
+ EM(netfs_write_trace_writeback, "WRITEBACK") \
+ E_(netfs_write_trace_writethrough, "WRITETHRU")
+
+#define netfs_rreq_origins \
+ EM(NETFS_READAHEAD, "RA") \
+ EM(NETFS_READPAGE, "RP") \
+ EM(NETFS_READ_FOR_WRITE, "RW") \
+ EM(NETFS_WRITEBACK, "WB") \
+ EM(NETFS_WRITETHROUGH, "WT") \
+ EM(NETFS_LAUNDER_WRITE, "LW") \
+ EM(NETFS_UNBUFFERED_WRITE, "UW") \
+ EM(NETFS_DIO_READ, "DR") \
+ E_(NETFS_DIO_WRITE, "DW")
+
+#define netfs_rreq_traces \
+ EM(netfs_rreq_trace_assess, "ASSESS ") \
+ EM(netfs_rreq_trace_copy, "COPY ") \
+ EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_free, "FREE ") \
+ EM(netfs_rreq_trace_redirty, "REDIRTY") \
+ EM(netfs_rreq_trace_resubmit, "RESUBMT") \
+ EM(netfs_rreq_trace_unlock, "UNLOCK ") \
+ EM(netfs_rreq_trace_unmark, "UNMARK ") \
+ EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
+ EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \
+ E_(netfs_rreq_trace_write_done, "WR-DONE")
+
+#define netfs_sreq_sources \
+ EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
+ EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
+ EM(NETFS_READ_FROM_CACHE, "READ") \
+ EM(NETFS_INVALID_READ, "INVL") \
+ EM(NETFS_UPLOAD_TO_SERVER, "UPLD") \
+ EM(NETFS_WRITE_TO_CACHE, "WRIT") \
+ E_(NETFS_INVALID_WRITE, "INVL")
+
+#define netfs_sreq_traces \
+ EM(netfs_sreq_trace_download_instead, "RDOWN") \
+ EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_limited, "LIMIT") \
+ EM(netfs_sreq_trace_prepare, "PREP ") \
+ EM(netfs_sreq_trace_resubmit_short, "SHORT") \
+ EM(netfs_sreq_trace_submit, "SUBMT") \
+ EM(netfs_sreq_trace_terminated, "TERM ") \
+ EM(netfs_sreq_trace_write, "WRITE") \
+ EM(netfs_sreq_trace_write_skip, "SKIP ") \
+ E_(netfs_sreq_trace_write_term, "WTERM")
+
+#define netfs_failures \
+ EM(netfs_fail_check_write_begin, "check-write-begin") \
+ EM(netfs_fail_copy_to_cache, "copy-to-cache") \
+ EM(netfs_fail_dio_read_short, "dio-read-short") \
+ EM(netfs_fail_dio_read_zero, "dio-read-zero") \
+ EM(netfs_fail_read, "read") \
+ EM(netfs_fail_short_read, "short-read") \
+ EM(netfs_fail_prepare_write, "prep-write") \
+ E_(netfs_fail_write, "write")
+
+#define netfs_rreq_ref_traces \
+ EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
+ EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
+ EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
+ EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
+ EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
+ EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \
+ EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
+ EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
+ EM(netfs_rreq_trace_put_work, "PUT WORK ") \
+ EM(netfs_rreq_trace_see_work, "SEE WORK ") \
+ E_(netfs_rreq_trace_new, "NEW ")
+
+#define netfs_sreq_ref_traces \
+ EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \
+ EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \
+ EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
+ EM(netfs_sreq_trace_new, "NEW ") \
+ EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
+ EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \
+ EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
+ EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
+ EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \
+ EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
+ EM(netfs_sreq_trace_put_work, "PUT WORK ") \
+ E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
+
+#define netfs_folio_traces \
+ /* The first few correspond to enum netfs_how_to_modify */ \
+ EM(netfs_folio_is_uptodate, "mod-uptodate") \
+ EM(netfs_just_prefetch, "mod-prefetch") \
+ EM(netfs_whole_folio_modify, "mod-whole-f") \
+ EM(netfs_modify_and_clear, "mod-n-clear") \
+ EM(netfs_streaming_write, "mod-streamw") \
+ EM(netfs_streaming_write_cont, "mod-streamw+") \
+ EM(netfs_flush_content, "flush") \
+ EM(netfs_streaming_filled_page, "mod-streamw-f") \
+ EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
+ /* The rest are for writeback */ \
+ EM(netfs_folio_trace_clear, "clear") \
+ EM(netfs_folio_trace_clear_s, "clear-s") \
+ EM(netfs_folio_trace_clear_g, "clear-g") \
+ EM(netfs_folio_trace_copy_to_cache, "copy") \
+ EM(netfs_folio_trace_end_copy, "end-copy") \
+ EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
+ EM(netfs_folio_trace_kill, "kill") \
+ EM(netfs_folio_trace_launder, "launder") \
+ EM(netfs_folio_trace_mkwrite, "mkwrite") \
+ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
+ EM(netfs_folio_trace_read_gaps, "read-gaps") \
+ EM(netfs_folio_trace_redirty, "redirty") \
+ EM(netfs_folio_trace_redirtied, "redirtied") \
+ EM(netfs_folio_trace_store, "store") \
+ EM(netfs_folio_trace_store_plus, "store+") \
+ EM(netfs_folio_trace_wthru, "wthru") \
+ E_(netfs_folio_trace_wthru_plus, "wthru+")
+
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_write_trace { netfs_write_traces } __mode(byte);
+enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
+enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
+enum netfs_failure { netfs_failures } __mode(byte);
+enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
+enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
+
+#endif
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+netfs_read_traces;
+netfs_write_traces;
+netfs_rreq_origins;
+netfs_rreq_traces;
+netfs_sreq_sources;
+netfs_sreq_traces;
+netfs_failures;
+netfs_rreq_ref_traces;
+netfs_sreq_ref_traces;
+netfs_folio_traces;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+TRACE_EVENT(netfs_read,
+ TP_PROTO(struct netfs_io_request *rreq,
+ loff_t start, size_t len,
+ enum netfs_read_trace what),
+
+ TP_ARGS(rreq, start, len, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned int, cookie )
+ __field(loff_t, start )
+ __field(size_t, len )
+ __field(enum netfs_read_trace, what )
+ __field(unsigned int, netfs_inode )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->cookie = rreq->cache_resources.debug_id;
+ __entry->start = start;
+ __entry->len = len;
+ __entry->what = what;
+ __entry->netfs_inode = rreq->inode->i_ino;
+ ),
+
+ TP_printk("R=%08x %s c=%08x ni=%x s=%llx %zx",
+ __entry->rreq,
+ __print_symbolic(__entry->what, netfs_read_traces),
+ __entry->cookie,
+ __entry->netfs_inode,
+ __entry->start, __entry->len)
+ );
+
+TRACE_EVENT(netfs_rreq,
+ TP_PROTO(struct netfs_io_request *rreq,
+ enum netfs_rreq_trace what),
+
+ TP_ARGS(rreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned int, flags )
+ __field(enum netfs_io_origin, origin )
+ __field(enum netfs_rreq_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->flags = rreq->flags;
+ __entry->origin = rreq->origin;
+ __entry->what = what;
+ ),
+
+ TP_printk("R=%08x %s %s f=%02x",
+ __entry->rreq,
+ __print_symbolic(__entry->origin, netfs_rreq_origins),
+ __print_symbolic(__entry->what, netfs_rreq_traces),
+ __entry->flags)
+ );
+
+TRACE_EVENT(netfs_sreq,
+ TP_PROTO(struct netfs_io_subrequest *sreq,
+ enum netfs_sreq_trace what),
+
+ TP_ARGS(sreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned short, index )
+ __field(short, error )
+ __field(unsigned short, flags )
+ __field(enum netfs_io_source, source )
+ __field(enum netfs_sreq_trace, what )
+ __field(size_t, len )
+ __field(size_t, transferred )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = sreq->rreq->debug_id;
+ __entry->index = sreq->debug_index;
+ __entry->error = sreq->error;
+ __entry->flags = sreq->flags;
+ __entry->source = sreq->source;
+ __entry->what = what;
+ __entry->len = sreq->len;
+ __entry->transferred = sreq->transferred;
+ __entry->start = sreq->start;
+ ),
+
+ TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d",
+ __entry->rreq, __entry->index,
+ __print_symbolic(__entry->source, netfs_sreq_sources),
+ __print_symbolic(__entry->what, netfs_sreq_traces),
+ __entry->flags,
+ __entry->start, __entry->transferred, __entry->len,
+ __entry->error)
+ );
+
+TRACE_EVENT(netfs_failure,
+ TP_PROTO(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *sreq,
+ int error, enum netfs_failure what),
+
+ TP_ARGS(rreq, sreq, error, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(short, index )
+ __field(short, error )
+ __field(unsigned short, flags )
+ __field(enum netfs_io_source, source )
+ __field(enum netfs_failure, what )
+ __field(size_t, len )
+ __field(size_t, transferred )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->index = sreq ? sreq->debug_index : -1;
+ __entry->error = error;
+ __entry->flags = sreq ? sreq->flags : 0;
+ __entry->source = sreq ? sreq->source : NETFS_INVALID_READ;
+ __entry->what = what;
+ __entry->len = sreq ? sreq->len : rreq->len;
+ __entry->transferred = sreq ? sreq->transferred : 0;
+ __entry->start = sreq ? sreq->start : 0;
+ ),
+
+ TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d",
+ __entry->rreq, __entry->index,
+ __print_symbolic(__entry->source, netfs_sreq_sources),
+ __entry->flags,
+ __entry->start, __entry->transferred, __entry->len,
+ __print_symbolic(__entry->what, netfs_failures),
+ __entry->error)
+ );
+
+TRACE_EVENT(netfs_rreq_ref,
+ TP_PROTO(unsigned int rreq_debug_id, int ref,
+ enum netfs_rreq_ref_trace what),
+
+ TP_ARGS(rreq_debug_id, ref, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(int, ref )
+ __field(enum netfs_rreq_ref_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq_debug_id;
+ __entry->ref = ref;
+ __entry->what = what;
+ ),
+
+ TP_printk("R=%08x %s r=%u",
+ __entry->rreq,
+ __print_symbolic(__entry->what, netfs_rreq_ref_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(netfs_sreq_ref,
+ TP_PROTO(unsigned int rreq_debug_id, unsigned int subreq_debug_index,
+ int ref, enum netfs_sreq_ref_trace what),
+
+ TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned int, subreq )
+ __field(int, ref )
+ __field(enum netfs_sreq_ref_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq_debug_id;
+ __entry->subreq = subreq_debug_index;
+ __entry->ref = ref;
+ __entry->what = what;
+ ),
+
+ TP_printk("R=%08x[%x] %s r=%u",
+ __entry->rreq,
+ __entry->subreq,
+ __print_symbolic(__entry->what, netfs_sreq_ref_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(netfs_folio,
+ TP_PROTO(struct folio *folio, enum netfs_folio_trace why),
+
+ TP_ARGS(folio, why),
+
+ TP_STRUCT__entry(
+ __field(ino_t, ino)
+ __field(pgoff_t, index)
+ __field(unsigned int, nr)
+ __field(enum netfs_folio_trace, why)
+ ),
+
+ TP_fast_assign(
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->why = why;
+ __entry->index = folio_index(folio);
+ __entry->nr = folio_nr_pages(folio);
+ ),
+
+ TP_printk("i=%05lx ix=%05lx-%05lx %s",
+ __entry->ino, __entry->index, __entry->index + __entry->nr - 1,
+ __print_symbolic(__entry->why, netfs_folio_traces))
+ );
+
+TRACE_EVENT(netfs_write_iter,
+ TP_PROTO(const struct kiocb *iocb, const struct iov_iter *from),
+
+ TP_ARGS(iocb, from),
+
+ TP_STRUCT__entry(
+ __field(unsigned long long, start )
+ __field(size_t, len )
+ __field(unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->start = iocb->ki_pos;
+ __entry->len = iov_iter_count(from);
+ __entry->flags = iocb->ki_flags;
+ ),
+
+ TP_printk("WRITE-ITER s=%llx l=%zx f=%x",
+ __entry->start, __entry->len, __entry->flags)
+ );
+
+TRACE_EVENT(netfs_write,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ enum netfs_write_trace what),
+
+ TP_ARGS(wreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq )
+ __field(unsigned int, cookie )
+ __field(enum netfs_write_trace, what )
+ __field(unsigned long long, start )
+ __field(size_t, len )
+ ),
+
+ TP_fast_assign(
+ struct netfs_inode *__ctx = netfs_inode(wreq->inode);
+ struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+ __entry->wreq = wreq->debug_id;
+ __entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->what = what;
+ __entry->start = wreq->start;
+ __entry->len = wreq->len;
+ ),
+
+ TP_printk("R=%08x %s c=%08x by=%llx-%llx",
+ __entry->wreq,
+ __print_symbolic(__entry->what, netfs_write_traces),
+ __entry->cookie,
+ __entry->start, __entry->start + __entry->len - 1)
+ );
+
+#undef EM
+#undef E_
+#endif /* _TRACE_NETFS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/netlink.h b/include/trace/events/netlink.h
new file mode 100644
index 000000000000..3b7be3b386a4
--- /dev/null
+++ b/include/trace/events/netlink.h
@@ -0,0 +1,29 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM netlink
+
+#if !defined(_TRACE_NETLINK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NETLINK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(netlink_extack,
+
+ TP_PROTO(const char *msg),
+
+ TP_ARGS(msg),
+
+ TP_STRUCT__entry(
+ __string( msg, msg )
+ ),
+
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("msg=%s", __get_str(msg))
+);
+
+#endif /* _TRACE_NETLINK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/nilfs2.h b/include/trace/events/nilfs2.h
index 84ee31fc04cc..8efc6236f57c 100644
--- a/include/trace/events/nilfs2.h
+++ b/include/trace/events/nilfs2.h
@@ -192,7 +192,7 @@ TRACE_EVENT(nilfs2_mdt_submit_block,
TP_PROTO(struct inode *inode,
unsigned long ino,
unsigned long blkoff,
- int mode),
+ enum req_op mode),
TP_ARGS(inode, ino, blkoff, mode),
@@ -200,7 +200,7 @@ TRACE_EVENT(nilfs2_mdt_submit_block,
__field(struct inode *, inode)
__field(unsigned long, ino)
__field(unsigned long, blkoff)
- __field(int, mode)
+ __field(enum req_op, mode)
),
TP_fast_assign(
diff --git a/include/trace/events/notifier.h b/include/trace/events/notifier.h
new file mode 100644
index 000000000000..26b298a31950
--- /dev/null
+++ b/include/trace/events/notifier.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM notifier
+
+#if !defined(_TRACE_NOTIFIERS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NOTIFIERS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(notifier_info,
+
+ TP_PROTO(void *cb),
+
+ TP_ARGS(cb),
+
+ TP_STRUCT__entry(
+ __field(void *, cb)
+ ),
+
+ TP_fast_assign(
+ __entry->cb = cb;
+ ),
+
+ TP_printk("%ps", __entry->cb)
+);
+
+/*
+ * notifier_register - called upon notifier callback registration
+ *
+ * @cb: callback pointer
+ *
+ */
+DEFINE_EVENT(notifier_info, notifier_register,
+
+ TP_PROTO(void *cb),
+
+ TP_ARGS(cb)
+);
+
+/*
+ * notifier_unregister - called upon notifier callback unregistration
+ *
+ * @cb: callback pointer
+ *
+ */
+DEFINE_EVENT(notifier_info, notifier_unregister,
+
+ TP_PROTO(void *cb),
+
+ TP_ARGS(cb)
+);
+
+/*
+ * notifier_run - called upon notifier callback execution
+ *
+ * @cb: callback pointer
+ *
+ */
+DEFINE_EVENT(notifier_info, notifier_run,
+
+ TP_PROTO(void *cb),
+
+ TP_ARGS(cb)
+);
+
+#endif /* _TRACE_NOTIFIERS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index 26a11e4a2c36..b799f3bcba82 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -7,6 +7,8 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
+#define PG_COUNT_TO_KB(x) ((x) << (PAGE_SHIFT - 10))
+
TRACE_EVENT(oom_score_adj_update,
TP_PROTO(struct task_struct *task),
@@ -72,19 +74,45 @@ TRACE_EVENT(reclaim_retry_zone,
);
TRACE_EVENT(mark_victim,
- TP_PROTO(int pid),
+ TP_PROTO(struct task_struct *task, uid_t uid),
- TP_ARGS(pid),
+ TP_ARGS(task, uid),
TP_STRUCT__entry(
__field(int, pid)
+ __string(comm, task->comm)
+ __field(unsigned long, total_vm)
+ __field(unsigned long, anon_rss)
+ __field(unsigned long, file_rss)
+ __field(unsigned long, shmem_rss)
+ __field(uid_t, uid)
+ __field(unsigned long, pgtables)
+ __field(short, oom_score_adj)
),
TP_fast_assign(
- __entry->pid = pid;
+ __entry->pid = task->pid;
+ __assign_str(comm, task->comm);
+ __entry->total_vm = PG_COUNT_TO_KB(task->mm->total_vm);
+ __entry->anon_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_ANONPAGES));
+ __entry->file_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_FILEPAGES));
+ __entry->shmem_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_SHMEMPAGES));
+ __entry->uid = uid;
+ __entry->pgtables = mm_pgtables_bytes(task->mm) >> 10;
+ __entry->oom_score_adj = task->signal->oom_score_adj;
),
- TP_printk("pid=%d", __entry->pid)
+ TP_printk("pid=%d comm=%s total-vm=%lukB anon-rss=%lukB file-rss:%lukB shmem-rss:%lukB uid=%u pgtables=%lukB oom_score_adj=%hd",
+ __entry->pid,
+ __get_str(comm),
+ __entry->total_vm,
+ __entry->anon_rss,
+ __entry->file_rss,
+ __entry->shmem_rss,
+ __entry->uid,
+ __entry->pgtables,
+ __entry->oom_score_adj
+ )
);
TRACE_EVENT(wake_reaper,
diff --git a/include/trace/events/osnoise.h b/include/trace/events/osnoise.h
new file mode 100644
index 000000000000..82f741ec0f57
--- /dev/null
+++ b/include/trace/events/osnoise.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM osnoise
+
+#if !defined(_OSNOISE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _OSNOISE_TRACE_H
+
+#include <linux/tracepoint.h>
+TRACE_EVENT(thread_noise,
+
+ TP_PROTO(struct task_struct *t, u64 start, u64 duration),
+
+ TP_ARGS(t, start, duration),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN)
+ __field( u64, start )
+ __field( u64, duration)
+ __field( pid_t, pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
+ __entry->pid = t->pid;
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("%8s:%d start %llu.%09u duration %llu ns",
+ __entry->comm,
+ __entry->pid,
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(softirq_noise,
+
+ TP_PROTO(int vector, u64 start, u64 duration),
+
+ TP_ARGS(vector, start, duration),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ __field( int, vector )
+ ),
+
+ TP_fast_assign(
+ __entry->vector = vector;
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("%8s:%d start %llu.%09u duration %llu ns",
+ show_softirq_name(__entry->vector),
+ __entry->vector,
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(irq_noise,
+
+ TP_PROTO(int vector, const char *desc, u64 start, u64 duration),
+
+ TP_ARGS(vector, desc, start, duration),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ __string( desc, desc )
+ __field( int, vector )
+
+ ),
+
+ TP_fast_assign(
+ __assign_str(desc, desc);
+ __entry->vector = vector;
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("%s:%d start %llu.%09u duration %llu ns",
+ __get_str(desc),
+ __entry->vector,
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(nmi_noise,
+
+ TP_PROTO(u64 start, u64 duration),
+
+ TP_ARGS(start, duration),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("start %llu.%09u duration %llu ns",
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(sample_threshold,
+
+ TP_PROTO(u64 start, u64 duration, u64 interference),
+
+ TP_ARGS(start, duration, interference),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ __field( u64, interference)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->duration = duration;
+ __entry->interference = interference;
+ ),
+
+ TP_printk("start %llu.%09u duration %llu ns interference %llu",
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration,
+ __entry->interference)
+);
+
+#endif /* _TRACE_OSNOISE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index ad0aa7f31675..6834356b2d2a 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -9,7 +9,7 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
TRACE_EVENT(page_pool_release,
@@ -60,7 +60,7 @@ TRACE_EVENT(page_pool_state_release,
__entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p pfn=%lu release=%u",
+ TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->release)
);
@@ -85,7 +85,7 @@ TRACE_EVENT(page_pool_state_hold,
__entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p pfn=%lu hold=%u",
+ TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->hold)
);
diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h
index 5d2ea93956ce..8a99c1cd417b 100644
--- a/include/trace/events/page_ref.h
+++ b/include/trace/events/page_ref.h
@@ -38,7 +38,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template,
TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d",
__entry->pfn,
- show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)),
+ show_page_flags(__entry->flags & PAGEFLAGS_MASK),
__entry->count,
__entry->mapcount, __entry->mapping, __entry->mt,
__entry->val)
@@ -88,7 +88,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d ret=%d",
__entry->pfn,
- show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)),
+ show_page_flags(__entry->flags & PAGEFLAGS_MASK),
__entry->count,
__entry->mapcount, __entry->mapping, __entry->mt,
__entry->val, __entry->ret)
diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h
index 8fd1babae761..171524d3526d 100644
--- a/include/trace/events/pagemap.h
+++ b/include/trace/events/pagemap.h
@@ -16,41 +16,38 @@
#define PAGEMAP_MAPPEDDISK 0x0020u
#define PAGEMAP_BUFFERS 0x0040u
-#define trace_pagemap_flags(page) ( \
- (PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
- (page_mapped(page) ? PAGEMAP_MAPPED : 0) | \
- (PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \
- (PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \
- (PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \
- (page_has_private(page) ? PAGEMAP_BUFFERS : 0) \
+#define trace_pagemap_flags(folio) ( \
+ (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
+ (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \
+ (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \
+ (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \
+ (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \
+ (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \
)
TRACE_EVENT(mm_lru_insertion,
- TP_PROTO(
- struct page *page,
- int lru
- ),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page, lru),
+ TP_ARGS(folio),
TP_STRUCT__entry(
- __field(struct page *, page )
+ __field(struct folio *, folio )
__field(unsigned long, pfn )
- __field(int, lru )
+ __field(enum lru_list, lru )
__field(unsigned long, flags )
),
TP_fast_assign(
- __entry->page = page;
- __entry->pfn = page_to_pfn(page);
- __entry->lru = lru;
- __entry->flags = trace_pagemap_flags(page);
+ __entry->folio = folio;
+ __entry->pfn = folio_pfn(folio);
+ __entry->lru = folio_lru_list(folio);
+ __entry->flags = trace_pagemap_flags(folio);
),
/* Flag format is based on page-types.c formatting for pagemap */
- TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s",
- __entry->page,
+ TP_printk("folio=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s",
+ __entry->folio,
__entry->pfn,
__entry->lru,
__entry->flags & PAGEMAP_MAPPED ? "M" : " ",
@@ -63,23 +60,21 @@ TRACE_EVENT(mm_lru_insertion,
TRACE_EVENT(mm_lru_activate,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
- __field(struct page *, page )
+ __field(struct folio *, folio )
__field(unsigned long, pfn )
),
TP_fast_assign(
- __entry->page = page;
- __entry->pfn = page_to_pfn(page);
+ __entry->folio = folio;
+ __entry->pfn = folio_pfn(folio);
),
- /* Flag format is based on page-types.c formatting for pagemap */
- TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn)
-
+ TP_printk("folio=%p pfn=0x%lx", __entry->folio, __entry->pfn)
);
#endif /* _TRACE_PAGEMAP_H */
diff --git a/include/trace/events/percpu.h b/include/trace/events/percpu.h
index df112a64f6c9..5b8211ca8950 100644
--- a/include/trace/events/percpu.h
+++ b/include/trace/events/percpu.h
@@ -6,15 +6,20 @@
#define _TRACE_PERCPU_H
#include <linux/tracepoint.h>
+#include <trace/events/mmflags.h>
TRACE_EVENT(percpu_alloc_percpu,
- TP_PROTO(bool reserved, bool is_atomic, size_t size,
- size_t align, void *base_addr, int off, void __percpu *ptr),
+ TP_PROTO(unsigned long call_site,
+ bool reserved, bool is_atomic, size_t size,
+ size_t align, void *base_addr, int off,
+ void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
- TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr),
+ TP_ARGS(call_site, reserved, is_atomic, size, align, base_addr, off,
+ ptr, bytes_alloc, gfp_flags),
TP_STRUCT__entry(
+ __field( unsigned long, call_site )
__field( bool, reserved )
__field( bool, is_atomic )
__field( size_t, size )
@@ -22,9 +27,11 @@ TRACE_EVENT(percpu_alloc_percpu,
__field( void *, base_addr )
__field( int, off )
__field( void __percpu *, ptr )
+ __field( size_t, bytes_alloc )
+ __field( unsigned long, gfp_flags )
),
-
TP_fast_assign(
+ __entry->call_site = call_site;
__entry->reserved = reserved;
__entry->is_atomic = is_atomic;
__entry->size = size;
@@ -32,12 +39,16 @@ TRACE_EVENT(percpu_alloc_percpu,
__entry->base_addr = base_addr;
__entry->off = off;
__entry->ptr = ptr;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
- TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p",
+ TP_printk("call_site=%pS reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s",
+ (void *)__entry->call_site,
__entry->reserved, __entry->is_atomic,
__entry->size, __entry->align,
- __entry->base_addr, __entry->off, __entry->ptr)
+ __entry->base_addr, __entry->off, __entry->ptr,
+ __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags))
);
TRACE_EVENT(percpu_free_percpu,
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index af5018aa9517..77f14f7a11d4 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -40,6 +40,28 @@ DEFINE_EVENT(cpu, cpu_idle,
TP_ARGS(state, cpu_id)
);
+TRACE_EVENT(cpu_idle_miss,
+
+ TP_PROTO(unsigned int cpu_id, unsigned int state, bool below),
+
+ TP_ARGS(cpu_id, state, below),
+
+ TP_STRUCT__entry(
+ __field(u32, cpu_id)
+ __field(u32, state)
+ __field(bool, below)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->state = state;
+ __entry->below = below;
+ ),
+
+ TP_printk("cpu_id=%lu state=%lu type=%s", (unsigned long)__entry->cpu_id,
+ (unsigned long)__entry->state, (__entry->below)?"below":"above")
+);
+
TRACE_EVENT(powernv_throttle,
TP_PROTO(int chip_id, const char *reason, int pmax),
@@ -500,6 +522,35 @@ DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request,
TP_ARGS(name, type, new_value)
);
+
+TRACE_EVENT(guest_halt_poll_ns,
+
+ TP_PROTO(bool grow, unsigned int new, unsigned int old),
+
+ TP_ARGS(grow, new, old),
+
+ TP_STRUCT__entry(
+ __field(bool, grow)
+ __field(unsigned int, new)
+ __field(unsigned int, old)
+ ),
+
+ TP_fast_assign(
+ __entry->grow = grow;
+ __entry->new = new;
+ __entry->old = old;
+ ),
+
+ TP_printk("halt_poll_ns %u (%s %u)",
+ __entry->new,
+ __entry->grow ? "grow" : "shrink",
+ __entry->old)
+);
+
+#define trace_guest_halt_poll_ns_grow(new, old) \
+ trace_guest_halt_poll_ns(true, new, old)
+#define trace_guest_halt_poll_ns_shrink(new, old) \
+ trace_guest_halt_poll_ns(false, new, old)
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h
index cf243de41cc8..12b35e4ff917 100644
--- a/include/trace/events/pwm.h
+++ b/include/trace/events/pwm.h
@@ -10,9 +10,9 @@
DECLARE_EVENT_CLASS(pwm,
- TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+ TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err),
- TP_ARGS(pwm, state),
+ TP_ARGS(pwm, state, err),
TP_STRUCT__entry(
__field(struct pwm_device *, pwm)
@@ -20,6 +20,7 @@ DECLARE_EVENT_CLASS(pwm,
__field(u64, duty_cycle)
__field(enum pwm_polarity, polarity)
__field(bool, enabled)
+ __field(int, err)
),
TP_fast_assign(
@@ -28,28 +29,27 @@ DECLARE_EVENT_CLASS(pwm,
__entry->duty_cycle = state->duty_cycle;
__entry->polarity = state->polarity;
__entry->enabled = state->enabled;
+ __entry->err = err;
),
- TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d",
+ TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
__entry->pwm, __entry->period, __entry->duty_cycle,
- __entry->polarity, __entry->enabled)
+ __entry->polarity, __entry->enabled, __entry->err)
);
DEFINE_EVENT(pwm, pwm_apply,
- TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
-
- TP_ARGS(pwm, state)
+ TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err),
+ TP_ARGS(pwm, state, err)
);
DEFINE_EVENT(pwm, pwm_get,
- TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
-
- TP_ARGS(pwm, state)
+ TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err),
+ TP_ARGS(pwm, state, err)
);
#endif /* _TRACE_PWM_H */
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index 330d32d84485..1f4258308b96 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -46,6 +46,34 @@ TRACE_EVENT(qdisc_dequeue,
__entry->txq_state, __entry->packets, __entry->skbaddr )
);
+TRACE_EVENT(qdisc_enqueue,
+
+ TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb),
+
+ TP_ARGS(qdisc, txq, skb),
+
+ TP_STRUCT__entry(
+ __field(struct Qdisc *, qdisc)
+ __field(const struct netdev_queue *, txq)
+ __field(void *, skbaddr)
+ __field(int, ifindex)
+ __field(u32, handle)
+ __field(u32, parent)
+ ),
+
+ TP_fast_assign(
+ __entry->qdisc = qdisc;
+ __entry->txq = txq;
+ __entry->skbaddr = skb;
+ __entry->ifindex = txq->dev ? txq->dev->ifindex : 0;
+ __entry->handle = qdisc->handle;
+ __entry->parent = qdisc->parent;
+ ),
+
+ TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%p",
+ __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr)
+);
+
TRACE_EVENT(qdisc_reset,
TP_PROTO(struct Qdisc *q),
@@ -53,14 +81,14 @@ TRACE_EVENT(qdisc_reset,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q)->name )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
+ __assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;
@@ -78,14 +106,14 @@ TRACE_EVENT(qdisc_destroy,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q)->name )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
+ __assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;
diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h
index 5857cf682ee7..e7fd55e7dc3d 100644
--- a/include/trace/events/qla.h
+++ b/include/trace/events/qla.h
@@ -22,11 +22,11 @@ DECLARE_EVENT_CLASS(qla_log_event,
TP_STRUCT__entry(
__string(buf, buf)
- __dynamic_array(char, msg, QLA_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(buf, buf);
- vsnprintf(__get_str(msg), QLA_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s %s", __get_str(buf), __get_str(msg))
diff --git a/include/trace/events/qrtr.h b/include/trace/events/qrtr.h
index b1de14c3bb93..441132c67133 100644
--- a/include/trace/events/qrtr.h
+++ b/include/trace/events/qrtr.h
@@ -10,15 +10,16 @@
TRACE_EVENT(qrtr_ns_service_announce_new,
- TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+ TP_PROTO(unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port),
TP_ARGS(service, instance, node, port),
TP_STRUCT__entry(
- __field(__le32, service)
- __field(__le32, instance)
- __field(__le32, node)
- __field(__le32, port)
+ __field(unsigned int, service)
+ __field(unsigned int, instance)
+ __field(unsigned int, node)
+ __field(unsigned int, port)
),
TP_fast_assign(
@@ -36,15 +37,16 @@ TRACE_EVENT(qrtr_ns_service_announce_new,
TRACE_EVENT(qrtr_ns_service_announce_del,
- TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+ TP_PROTO(unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port),
TP_ARGS(service, instance, node, port),
TP_STRUCT__entry(
- __field(__le32, service)
- __field(__le32, instance)
- __field(__le32, node)
- __field(__le32, port)
+ __field(unsigned int, service)
+ __field(unsigned int, instance)
+ __field(unsigned int, node)
+ __field(unsigned int, port)
),
TP_fast_assign(
@@ -62,15 +64,16 @@ TRACE_EVENT(qrtr_ns_service_announce_del,
TRACE_EVENT(qrtr_ns_server_add,
- TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+ TP_PROTO(unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port),
TP_ARGS(service, instance, node, port),
TP_STRUCT__entry(
- __field(__le32, service)
- __field(__le32, instance)
- __field(__le32, node)
- __field(__le32, port)
+ __field(unsigned int, service)
+ __field(unsigned int, instance)
+ __field(unsigned int, node)
+ __field(unsigned int, port)
),
TP_fast_assign(
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
deleted file mode 100644
index 9570a10cb949..000000000000
--- a/include/trace/events/random.h
+++ /dev/null
@@ -1,330 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM random
-
-#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_RANDOM_H
-
-#include <linux/writeback.h>
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(add_device_randomness,
- TP_PROTO(int bytes, unsigned long IP),
-
- TP_ARGS(bytes, IP),
-
- TP_STRUCT__entry(
- __field( int, bytes )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->bytes = bytes;
- __entry->IP = IP;
- ),
-
- TP_printk("bytes %d caller %pS",
- __entry->bytes, (void *)__entry->IP)
-);
-
-DECLARE_EVENT_CLASS(random__mix_pool_bytes,
- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
-
- TP_ARGS(pool_name, bytes, IP),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, bytes )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->bytes = bytes;
- __entry->IP = IP;
- ),
-
- TP_printk("%s pool: bytes %d caller %pS",
- __entry->pool_name, __entry->bytes, (void *)__entry->IP)
-);
-
-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
-
- TP_ARGS(pool_name, bytes, IP)
-);
-
-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
-
- TP_ARGS(pool_name, bytes, IP)
-);
-
-TRACE_EVENT(credit_entropy_bits,
- TP_PROTO(const char *pool_name, int bits, int entropy_count,
- unsigned long IP),
-
- TP_ARGS(pool_name, bits, entropy_count, IP),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, bits )
- __field( int, entropy_count )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->bits = bits;
- __entry->entropy_count = entropy_count;
- __entry->IP = IP;
- ),
-
- TP_printk("%s pool: bits %d entropy_count %d caller %pS",
- __entry->pool_name, __entry->bits,
- __entry->entropy_count, (void *)__entry->IP)
-);
-
-TRACE_EVENT(push_to_pool,
- TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
-
- TP_ARGS(pool_name, pool_bits, input_bits),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, pool_bits )
- __field( int, input_bits )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->pool_bits = pool_bits;
- __entry->input_bits = input_bits;
- ),
-
- TP_printk("%s: pool_bits %d input_pool_bits %d",
- __entry->pool_name, __entry->pool_bits,
- __entry->input_bits)
-);
-
-TRACE_EVENT(debit_entropy,
- TP_PROTO(const char *pool_name, int debit_bits),
-
- TP_ARGS(pool_name, debit_bits),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, debit_bits )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->debit_bits = debit_bits;
- ),
-
- TP_printk("%s: debit_bits %d", __entry->pool_name,
- __entry->debit_bits)
-);
-
-TRACE_EVENT(add_input_randomness,
- TP_PROTO(int input_bits),
-
- TP_ARGS(input_bits),
-
- TP_STRUCT__entry(
- __field( int, input_bits )
- ),
-
- TP_fast_assign(
- __entry->input_bits = input_bits;
- ),
-
- TP_printk("input_pool_bits %d", __entry->input_bits)
-);
-
-TRACE_EVENT(add_disk_randomness,
- TP_PROTO(dev_t dev, int input_bits),
-
- TP_ARGS(dev, input_bits),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, input_bits )
- ),
-
- TP_fast_assign(
- __entry->dev = dev;
- __entry->input_bits = input_bits;
- ),
-
- TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
- MINOR(__entry->dev), __entry->input_bits)
-);
-
-TRACE_EVENT(xfer_secondary_pool,
- TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
- int pool_entropy, int input_entropy),
-
- TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
- input_entropy),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, xfer_bits )
- __field( int, request_bits )
- __field( int, pool_entropy )
- __field( int, input_entropy )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->xfer_bits = xfer_bits;
- __entry->request_bits = request_bits;
- __entry->pool_entropy = pool_entropy;
- __entry->input_entropy = input_entropy;
- ),
-
- TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
- "input_entropy %d", __entry->pool_name, __entry->xfer_bits,
- __entry->request_bits, __entry->pool_entropy,
- __entry->input_entropy)
-);
-
-DECLARE_EVENT_CLASS(random__get_random_bytes,
- TP_PROTO(int nbytes, unsigned long IP),
-
- TP_ARGS(nbytes, IP),
-
- TP_STRUCT__entry(
- __field( int, nbytes )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->nbytes = nbytes;
- __entry->IP = IP;
- ),
-
- TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP)
-);
-
-DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
- TP_PROTO(int nbytes, unsigned long IP),
-
- TP_ARGS(nbytes, IP)
-);
-
-DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
- TP_PROTO(int nbytes, unsigned long IP),
-
- TP_ARGS(nbytes, IP)
-);
-
-DECLARE_EVENT_CLASS(random__extract_entropy,
- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
- unsigned long IP),
-
- TP_ARGS(pool_name, nbytes, entropy_count, IP),
-
- TP_STRUCT__entry(
- __field( const char *, pool_name )
- __field( int, nbytes )
- __field( int, entropy_count )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->pool_name = pool_name;
- __entry->nbytes = nbytes;
- __entry->entropy_count = entropy_count;
- __entry->IP = IP;
- ),
-
- TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
- __entry->pool_name, __entry->nbytes, __entry->entropy_count,
- (void *)__entry->IP)
-);
-
-
-DEFINE_EVENT(random__extract_entropy, extract_entropy,
- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
- unsigned long IP),
-
- TP_ARGS(pool_name, nbytes, entropy_count, IP)
-);
-
-DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
- unsigned long IP),
-
- TP_ARGS(pool_name, nbytes, entropy_count, IP)
-);
-
-TRACE_EVENT(random_read,
- TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
-
- TP_ARGS(got_bits, need_bits, pool_left, input_left),
-
- TP_STRUCT__entry(
- __field( int, got_bits )
- __field( int, need_bits )
- __field( int, pool_left )
- __field( int, input_left )
- ),
-
- TP_fast_assign(
- __entry->got_bits = got_bits;
- __entry->need_bits = need_bits;
- __entry->pool_left = pool_left;
- __entry->input_left = input_left;
- ),
-
- TP_printk("got_bits %d still_needed_bits %d "
- "blocking_pool_entropy_left %d input_entropy_left %d",
- __entry->got_bits, __entry->got_bits, __entry->pool_left,
- __entry->input_left)
-);
-
-TRACE_EVENT(urandom_read,
- TP_PROTO(int got_bits, int pool_left, int input_left),
-
- TP_ARGS(got_bits, pool_left, input_left),
-
- TP_STRUCT__entry(
- __field( int, got_bits )
- __field( int, pool_left )
- __field( int, input_left )
- ),
-
- TP_fast_assign(
- __entry->got_bits = got_bits;
- __entry->pool_left = pool_left;
- __entry->input_left = input_left;
- ),
-
- TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
- "input_entropy_left %d", __entry->got_bits,
- __entry->pool_left, __entry->input_left)
-);
-
-TRACE_EVENT(prandom_u32,
-
- TP_PROTO(unsigned int ret),
-
- TP_ARGS(ret),
-
- TP_STRUCT__entry(
- __field( unsigned int, ret)
- ),
-
- TP_fast_assign(
- __entry->ret = ret;
- ),
-
- TP_printk("ret=%u" , __entry->ret)
-);
-
-#endif /* _TRACE_RANDOM_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index ced71237b7e4..2ef9c719772a 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -48,7 +48,7 @@ TRACE_EVENT(rcu_utilization,
* RCU flavor, the grace-period number, and a string identifying the
* grace-period-related event as follows:
*
- * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL.
+ * "AccReadyCB": CPU accelerates new callbacks to RCU_NEXT_READY_TAIL.
* "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
* "newreq": Request a new grace period.
* "start": Start a grace period.
@@ -74,17 +74,17 @@ TRACE_EVENT_RCU(rcu_grace_period,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %s",
+ TP_printk("%s %ld %s",
__entry->rcuname, __entry->gp_seq, __entry->gpevent)
);
@@ -114,8 +114,8 @@ TRACE_EVENT_RCU(rcu_future_grace_period,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
- __field(unsigned long, gp_seq_req)
+ __field(long, gp_seq)
+ __field(long, gp_seq_req)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -124,16 +124,16 @@ TRACE_EVENT_RCU(rcu_future_grace_period,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
- __entry->gp_seq_req = gp_seq_req;
+ __entry->gp_seq = (long)gp_seq;
+ __entry->gp_seq_req = (long)gp_seq_req;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %lu %u %d %d %s",
- __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
+ TP_printk("%s %ld %ld %u %d %d %s",
+ __entry->rcuname, (long)__entry->gp_seq, (long)__entry->gp_seq_req, __entry->level,
__entry->grplo, __entry->grphi, __entry->gpevent)
);
@@ -153,7 +153,7 @@ TRACE_EVENT_RCU(rcu_grace_period_init,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -162,14 +162,14 @@ TRACE_EVENT_RCU(rcu_grace_period_init,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->qsmask = qsmask;
),
- TP_printk("%s %lu %u %d %d %lx",
+ TP_printk("%s %ld %u %d %d %lx",
__entry->rcuname, __entry->gp_seq, __entry->level,
__entry->grplo, __entry->grphi, __entry->qsmask)
);
@@ -197,17 +197,17 @@ TRACE_EVENT_RCU(rcu_exp_grace_period,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpseq)
+ __field(long, gpseq)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpseq = gpseq;
+ __entry->gpseq = (long)gpseq;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %s",
+ TP_printk("%s %ld %s",
__entry->rcuname, __entry->gpseq, __entry->gpevent)
);
@@ -278,6 +278,7 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock,
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
* "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
+ * "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended.
* "WokeEmpty": rcuo CB kthread woke to find empty list.
*/
TRACE_EVENT_RCU(rcu_nocb_wake,
@@ -316,17 +317,17 @@ TRACE_EVENT_RCU(rcu_preempt_task,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->pid = pid;
),
- TP_printk("%s %lu %d",
+ TP_printk("%s %ld %d",
__entry->rcuname, __entry->gp_seq, __entry->pid)
);
@@ -343,17 +344,17 @@ TRACE_EVENT_RCU(rcu_unlock_preempted_task,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->pid = pid;
),
- TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
+ TP_printk("%s %ld %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -374,7 +375,7 @@ TRACE_EVENT_RCU(rcu_quiescent_state_report,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(unsigned long, mask)
__field(unsigned long, qsmask)
__field(u8, level)
@@ -385,7 +386,7 @@ TRACE_EVENT_RCU(rcu_quiescent_state_report,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->mask = mask;
__entry->qsmask = qsmask;
__entry->level = level;
@@ -394,7 +395,7 @@ TRACE_EVENT_RCU(rcu_quiescent_state_report,
__entry->gp_tasks = gp_tasks;
),
- TP_printk("%s %lu %lx>%lx %u %d %d %u",
+ TP_printk("%s %ld %lx>%lx %u %d %d %u",
__entry->rcuname, __entry->gp_seq,
__entry->mask, __entry->qsmask, __entry->level,
__entry->grplo, __entry->grphi, __entry->gp_tasks)
@@ -415,23 +416,51 @@ TRACE_EVENT_RCU(rcu_fqs,
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gp_seq)
+ __field(long, gp_seq)
__field(int, cpu)
__field(const char *, qsevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gp_seq = gp_seq;
+ __entry->gp_seq = (long)gp_seq;
__entry->cpu = cpu;
__entry->qsevent = qsevent;
),
- TP_printk("%s %lu %d %s",
+ TP_printk("%s %ld %d %s",
__entry->rcuname, __entry->gp_seq,
__entry->cpu, __entry->qsevent)
);
+/*
+ * Tracepoint for RCU stall events. Takes a string identifying the RCU flavor
+ * and a string identifying which function detected the RCU stall as follows:
+ *
+ * "StallDetected": Scheduler-tick detects other CPU's stalls.
+ * "SelfDetected": Scheduler-tick detects a current CPU's stall.
+ * "ExpeditedStall": Expedited grace period detects stalls.
+ */
+TRACE_EVENT(rcu_stall_warning,
+
+ TP_PROTO(const char *rcuname, const char *msg),
+
+ TP_ARGS(rcuname, msg),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(const char *, msg)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->msg = msg;
+ ),
+
+ TP_printk("%s %s",
+ __entry->rcuname, __entry->msg)
+);
+
#endif /* #if defined(CONFIG_TREE_RCU) */
/*
@@ -505,6 +534,32 @@ TRACE_EVENT_RCU(rcu_callback,
__entry->qlen)
);
+TRACE_EVENT_RCU(rcu_segcb_stats,
+
+ TP_PROTO(struct rcu_segcblist *rs, const char *ctx),
+
+ TP_ARGS(rs, ctx),
+
+ TP_STRUCT__entry(
+ __field(const char *, ctx)
+ __array(unsigned long, gp_seq, RCU_CBLIST_NSEGS)
+ __array(long, seglen, RCU_CBLIST_NSEGS)
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ memcpy(__entry->seglen, rs->seglen, RCU_CBLIST_NSEGS * sizeof(long));
+ memcpy(__entry->gp_seq, rs->gp_seq, RCU_CBLIST_NSEGS * sizeof(unsigned long));
+
+ ),
+
+ TP_printk("%s seglen: (DONE=%ld, WAIT=%ld, NEXT_READY=%ld, NEXT=%ld) "
+ "gp_seq: (DONE=%lu, WAIT=%lu, NEXT_READY=%lu, NEXT=%lu)", __entry->ctx,
+ __entry->seglen[0], __entry->seglen[1], __entry->seglen[2], __entry->seglen[3],
+ __entry->gp_seq[0], __entry->gp_seq[1], __entry->gp_seq[2], __entry->gp_seq[3])
+
+);
+
/*
* Tracepoint for the registration of a single RCU callback of the special
* kvfree() form. The first argument is the RCU type, the second argument
@@ -713,7 +768,7 @@ TRACE_EVENT_RCU(rcu_torture_read,
TP_ARGS(rcutorturename, rhp, secs, c_old, c),
TP_STRUCT__entry(
- __field(char, rcutorturename[RCUTORTURENAME_LEN])
+ __array(char, rcutorturename, RCUTORTURENAME_LEN)
__field(struct rcu_head *, rhp)
__field(unsigned long, secs)
__field(unsigned long, c_old)
@@ -721,9 +776,7 @@ TRACE_EVENT_RCU(rcu_torture_read,
),
TP_fast_assign(
- strncpy(__entry->rcutorturename, rcutorturename,
- RCUTORTURENAME_LEN);
- __entry->rcutorturename[RCUTORTURENAME_LEN - 1] = 0;
+ strscpy(__entry->rcutorturename, rcutorturename, RCUTORTURENAME_LEN);
__entry->rhp = rhp;
__entry->secs = secs;
__entry->c_old = c_old;
@@ -739,16 +792,15 @@ TRACE_EVENT_RCU(rcu_torture_read,
* Tracepoint for rcu_barrier() execution. The string "s" describes
* the rcu_barrier phase:
* "Begin": rcu_barrier() started.
+ * "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "EarlyExit": rcu_barrier() piggybacked, thus early exit.
* "Inc1": rcu_barrier() piggyback check counter incremented.
- * "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks.
- * "OnlineQ": rcu_barrier() found online CPU with callbacks.
- * "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
+ * "Inc2": rcu_barrier() piggyback check counter incremented.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
- * "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback.
- * "Inc2": rcu_barrier() piggyback check counter incremented.
+ * "NQ": rcu_barrier() found a CPU with no callbacks.
+ * "OnlineQ": rcu_barrier() found online CPU with callbacks.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index ffdbe6f85da8..ba2d96a1bc2f 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -8,11 +8,13 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rpcgss
-#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_TRACE_RPCGSS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RPCGSS_H
#include <linux/tracepoint.h>
+#include <trace/misc/sunrpc.h>
+
/**
** GSS-API related trace events
**/
@@ -99,7 +101,7 @@ DECLARE_EVENT_CLASS(rpcgss_gssapi_event,
__entry->maj_stat = maj_stat;
),
- TP_printk("task:%u@%u maj_stat=%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " maj_stat=%s",
__entry->task_id, __entry->client_id,
__entry->maj_stat == 0 ?
"GSS_S_COMPLETE" : show_gss_status(__entry->maj_stat))
@@ -152,7 +154,7 @@ DECLARE_EVENT_CLASS(rpcgss_ctx_class,
TP_fast_assign(
__entry->cred = gc;
__entry->service = gc->gc_service;
- __assign_str(principal, gc->gc_principal)
+ __assign_str(principal, gc->gc_principal);
),
TP_printk("cred=%p service=%s principal='%s'",
@@ -204,8 +206,30 @@ DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class,
), \
TP_ARGS(rqstp, maj_stat))
+DEFINE_SVC_GSSAPI_EVENT(wrap);
DEFINE_SVC_GSSAPI_EVENT(unwrap);
DEFINE_SVC_GSSAPI_EVENT(mic);
+DEFINE_SVC_GSSAPI_EVENT(get_mic);
+
+TRACE_EVENT(rpcgss_svc_wrap_failed,
+ TP_PROTO(
+ const struct svc_rqst *rqstp
+ ),
+
+ TP_ARGS(rqstp),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+);
TRACE_EVENT(rpcgss_svc_unwrap_failed,
TP_PROTO(
@@ -332,7 +356,8 @@ TRACE_EVENT(rpcgss_unwrap_failed,
__entry->client_id = task->tk_client->cl_clid;
),
- TP_printk("task:%u@%u", __entry->task_id, __entry->client_id)
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
+ __entry->task_id, __entry->client_id)
);
TRACE_EVENT(rpcgss_bad_seqno,
@@ -358,7 +383,8 @@ TRACE_EVENT(rpcgss_bad_seqno,
__entry->received = received;
),
- TP_printk("task:%u@%u expected seqno %u, received seqno %u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " expected seqno %u, received seqno %u",
__entry->task_id, __entry->client_id,
__entry->expected, __entry->received)
);
@@ -386,7 +412,7 @@ TRACE_EVENT(rpcgss_seqno,
__entry->seqno = rqst->rq_seqno;
),
- TP_printk("task:%u@%u xid=0x%08x seqno=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x seqno=%u",
__entry->task_id, __entry->client_id,
__entry->xid, __entry->seqno)
);
@@ -418,7 +444,8 @@ TRACE_EVENT(rpcgss_need_reencode,
__entry->ret = ret;
),
- TP_printk("task:%u@%u xid=0x%08x rq_seqno=%u seq_xmit=%u reencode %sneeded",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x rq_seqno=%u seq_xmit=%u reencode %sneeded",
__entry->task_id, __entry->client_id,
__entry->xid, __entry->seqno, __entry->seq_xmit,
__entry->ret ? "" : "un")
@@ -452,7 +479,8 @@ TRACE_EVENT(rpcgss_update_slack,
__entry->verfsize = auth->au_verfsize;
),
- TP_printk("task:%u@%u xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->auth, __entry->rslack, __entry->ralign,
__entry->verfsize)
@@ -535,7 +563,7 @@ TRACE_EVENT(rpcgss_upcall_msg,
),
TP_fast_assign(
- __assign_str(msg, buf)
+ __assign_str(msg, buf);
),
TP_printk("msg='%s'", __get_str(msg))
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index abe942225637..027ac3ab457d 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -13,12 +13,45 @@
#include <linux/scatterlist.h>
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/tracepoint.h>
-#include <trace/events/rdma.h>
+#include <rdma/ib_cm.h>
+
+#include <trace/misc/rdma.h>
+#include <trace/misc/sunrpc.h>
/**
** Event classes
**/
+DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class,
+ TP_PROTO(
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ ),
+
+ TP_printk("cq.id=%d cid=%d",
+ __entry->cq_id, __entry->completion_id
+ )
+);
+
+#define DEFINE_SIMPLE_CID_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_simple_cid_class, name, \
+ TP_PROTO( \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(cid) \
+ )
+
DECLARE_EVENT_CLASS(rpcrdma_completion_class,
TP_PROTO(
const struct ib_wc *wc,
@@ -59,7 +92,200 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class,
), \
TP_ARGS(wc, cid))
-DECLARE_EVENT_CLASS(xprtrdma_reply_event,
+DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ __entry->vendor_err = wc->vendor_err;
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_SEND_FLUSH_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_send_flush_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status)
+ __entry->vendor_err = wc->vendor_err;
+ else
+ __entry->vendor_err = 0;
+ ),
+
+ TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_MR_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_mr_completion_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, received)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status) {
+ __entry->received = 0;
+ __entry->vendor_err = wc->vendor_err;
+ } else {
+ __entry->received = wc->byte_len;
+ __entry->vendor_err = 0;
+ }
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err,
+ __entry->received
+ )
+);
+
+#define DEFINE_RECEIVE_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_receive_completion_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, received)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->received = wc->byte_len;
+ ),
+
+ TP_printk("cq.id=%u cid=%d received=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->received
+ )
+);
+
+#define DEFINE_RECEIVE_SUCCESS_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_receive_success_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ __entry->vendor_err = wc->vendor_err;
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_RECEIVE_FLUSH_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_receive_flush_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
+DECLARE_EVENT_CLASS(xprtrdma_reply_class,
TP_PROTO(
const struct rpcrdma_rep *rep
),
@@ -67,29 +293,30 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_event,
TP_ARGS(rep),
TP_STRUCT__entry(
- __field(const void *, rep)
- __field(const void *, r_xprt)
__field(u32, xid)
__field(u32, version)
__field(u32, proc)
+ __string(addr, rpcrdma_addrstr(rep->rr_rxprt))
+ __string(port, rpcrdma_portstr(rep->rr_rxprt))
),
TP_fast_assign(
- __entry->rep = rep;
- __entry->r_xprt = rep->rr_rxprt;
__entry->xid = be32_to_cpu(rep->rr_xid);
__entry->version = be32_to_cpu(rep->rr_vers);
__entry->proc = be32_to_cpu(rep->rr_proc);
+ __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
+ __assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
),
- TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
- __entry->r_xprt, __entry->xid, __entry->rep,
- __entry->version, __entry->proc
+ TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
+ __get_str(addr), __get_str(port),
+ __entry->xid, __entry->version, __entry->proc
)
);
#define DEFINE_REPLY_EVENT(name) \
- DEFINE_EVENT(xprtrdma_reply_event, name, \
+ DEFINE_EVENT(xprtrdma_reply_class, \
+ xprtrdma_reply_##name##_err, \
TP_PROTO( \
const struct rpcrdma_rep *rep \
), \
@@ -103,19 +330,17 @@ DECLARE_EVENT_CLASS(xprtrdma_rxprt,
TP_ARGS(r_xprt),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p",
- __get_str(addr), __get_str(port), __entry->r_xprt
+ TP_printk("peer=[%s]:%s",
+ __get_str(addr), __get_str(port)
)
);
@@ -135,7 +360,6 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class,
TP_ARGS(r_xprt, rc),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(int, rc)
__field(int, connect_status)
__string(addr, rpcrdma_addrstr(r_xprt))
@@ -143,15 +367,14 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class,
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->rc = rc;
__entry->connect_status = r_xprt->rx_ep->re_connect_status;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
- __get_str(addr), __get_str(port), __entry->r_xprt,
+ TP_printk("peer=[%s]:%s rc=%d connection status=%d",
+ __get_str(addr), __get_str(port),
__entry->rc, __entry->connect_status
)
);
@@ -196,7 +419,8 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " pos=%u %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->pos, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
@@ -243,7 +467,8 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " %u@0x%016llx:0x%08x (%s)",
__entry->task_id, __entry->client_id,
__entry->length, (unsigned long long)__entry->offset,
__entry->handle,
@@ -260,54 +485,74 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
), \
TP_ARGS(task, mr, nsegs))
-DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
+TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
+TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
+TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
+TRACE_DEFINE_ENUM(DMA_NONE);
+
+#define xprtrdma_show_direction(x) \
+ __print_symbolic(x, \
+ { DMA_BIDIRECTIONAL, "BIDIR" }, \
+ { DMA_TO_DEVICE, "TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
+ { DMA_NONE, "NONE" })
+
+DECLARE_EVENT_CLASS(xprtrdma_mr_class,
TP_PROTO(
- const struct ib_wc *wc,
- const struct rpcrdma_frwr *frwr
+ const struct rpcrdma_mr *mr
),
- TP_ARGS(wc, frwr),
+ TP_ARGS(mr),
TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
__field(u32, mr_id)
- __field(unsigned int, status)
- __field(unsigned int, vendor_err)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(u32, dir)
),
TP_fast_assign(
- __entry->mr_id = frwr->fr_mr->res.id;
- __entry->status = wc->status;
- __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ const struct rpcrdma_req *req = mr->mr_req;
+
+ if (req) {
+ const struct rpc_task *task = req->rl_slot.rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ } else {
+ __entry->task_id = 0;
+ __entry->client_id = -1;
+ }
+ __entry->mr_id = mr->mr_ibmr->res.id;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->dir = mr->mr_dir;
),
- TP_printk(
- "mr.id=%u: %s (%u/0x%x)",
- __entry->mr_id, rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id,
+ __entry->mr_id, __entry->nents, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle,
+ xprtrdma_show_direction(__entry->dir)
)
);
-#define DEFINE_FRWR_DONE_EVENT(name) \
- DEFINE_EVENT(xprtrdma_frwr_done, name, \
+#define DEFINE_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_mr_class, \
+ xprtrdma_mr_##name, \
TP_PROTO( \
- const struct ib_wc *wc, \
- const struct rpcrdma_frwr *frwr \
+ const struct rpcrdma_mr *mr \
), \
- TP_ARGS(wc, frwr))
-
-TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
-TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
-TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
-TRACE_DEFINE_ENUM(DMA_NONE);
-
-#define xprtrdma_show_direction(x) \
- __print_symbolic(x, \
- { DMA_BIDIRECTIONAL, "BIDIR" }, \
- { DMA_TO_DEVICE, "TO_DEVICE" }, \
- { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
- { DMA_NONE, "NONE" })
+ TP_ARGS(mr))
-DECLARE_EVENT_CLASS(xprtrdma_mr,
+DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
TP_PROTO(
const struct rpcrdma_mr *mr
),
@@ -324,7 +569,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
),
TP_fast_assign(
- __entry->mr_id = mr->frwr.fr_mr->res.id;
+ __entry->mr_id = mr->mr_ibmr->res.id;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
@@ -339,45 +584,47 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
)
);
-#define DEFINE_MR_EVENT(name) \
- DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
- TP_PROTO( \
- const struct rpcrdma_mr *mr \
- ), \
+#define DEFINE_ANON_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_anonymous_mr_class, \
+ xprtrdma_mr_##name, \
+ TP_PROTO( \
+ const struct rpcrdma_mr *mr \
+ ), \
TP_ARGS(mr))
-DECLARE_EVENT_CLASS(xprtrdma_cb_event,
+DECLARE_EVENT_CLASS(xprtrdma_callback_class,
TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
const struct rpc_rqst *rqst
),
- TP_ARGS(rqst),
+ TP_ARGS(r_xprt, rqst),
TP_STRUCT__entry(
- __field(const void *, rqst)
- __field(const void *, rep)
- __field(const void *, req)
__field(u32, xid)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->rqst = rqst;
- __entry->req = rpcr_to_rdmar(rqst);
- __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
__entry->xid = be32_to_cpu(rqst->rq_xid);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
- __entry->xid, __entry->rqst, __entry->req, __entry->rep
+ TP_printk("peer=[%s]:%s xid=0x%08x",
+ __get_str(addr), __get_str(port), __entry->xid
)
);
-#define DEFINE_CB_EVENT(name) \
- DEFINE_EVENT(xprtrdma_cb_event, name, \
+#define DEFINE_CALLBACK_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_callback_class, \
+ xprtrdma_cb_##name, \
TP_PROTO( \
+ const struct rpcrdma_xprt *r_xprt, \
const struct rpc_rqst *rqst \
), \
- TP_ARGS(rqst))
+ TP_ARGS(r_xprt, rqst))
/**
** Connection events
@@ -423,7 +670,6 @@ DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect);
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
-DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
TRACE_EVENT(xprtrdma_op_connect,
TP_PROTO(
@@ -434,22 +680,19 @@ TRACE_EVENT(xprtrdma_op_connect,
TP_ARGS(r_xprt, delay),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(unsigned long, delay)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->delay = delay;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
- __get_str(addr), __get_str(port), __entry->r_xprt,
- __entry->delay
+ TP_printk("peer=[%s]:%s delay=%lu",
+ __get_str(addr), __get_str(port), __entry->delay
)
);
@@ -464,7 +707,6 @@ TRACE_EVENT(xprtrdma_op_set_cto,
TP_ARGS(r_xprt, connect, reconnect),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(unsigned long, connect)
__field(unsigned long, reconnect)
__string(addr, rpcrdma_addrstr(r_xprt))
@@ -472,51 +714,18 @@ TRACE_EVENT(xprtrdma_op_set_cto,
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->connect = connect;
__entry->reconnect = reconnect;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
- __get_str(addr), __get_str(port), __entry->r_xprt,
+ TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
+ __get_str(addr), __get_str(port),
__entry->connect / HZ, __entry->reconnect / HZ
)
);
-TRACE_EVENT(xprtrdma_qp_event,
- TP_PROTO(
- const struct rpcrdma_ep *ep,
- const struct ib_event *event
- ),
-
- TP_ARGS(ep, event),
-
- TP_STRUCT__entry(
- __field(unsigned long, event)
- __string(name, event->device->name)
- __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
- __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
- ),
-
- TP_fast_assign(
- const struct rdma_cm_id *id = ep->re_id;
-
- __entry->event = event->event;
- __assign_str(name, event->device->name);
- memcpy(__entry->srcaddr, &id->route.addr.src_addr,
- sizeof(struct sockaddr_in6));
- memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
- sizeof(struct sockaddr_in6));
- ),
-
- TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
- __entry->srcaddr, __entry->dstaddr, __get_str(name),
- rdma_show_ib_event(__entry->event), __entry->event
- )
-);
-
/**
** Call events
**/
@@ -530,86 +739,56 @@ TRACE_EVENT(xprtrdma_createmrs,
TP_ARGS(r_xprt, count),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
__field(unsigned int, count)
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->count = count;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
- __get_str(addr), __get_str(port), __entry->r_xprt,
- __entry->count
- )
-);
-
-TRACE_EVENT(xprtrdma_mr_get,
- TP_PROTO(
- const struct rpcrdma_req *req
- ),
-
- TP_ARGS(req),
-
- TP_STRUCT__entry(
- __field(const void *, req)
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(u32, xid)
- ),
-
- TP_fast_assign(
- const struct rpc_rqst *rqst = &req->rl_slot;
-
- __entry->req = req;
- __entry->task_id = rqst->rq_task->tk_pid;
- __entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- ),
-
- TP_printk("task:%u@%u xid=0x%08x req=%p",
- __entry->task_id, __entry->client_id, __entry->xid,
- __entry->req
+ TP_printk("peer=[%s]:%s created %u MRs",
+ __get_str(addr), __get_str(port), __entry->count
)
);
-TRACE_EVENT(xprtrdma_nomrs,
+TRACE_EVENT(xprtrdma_nomrs_err,
TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
const struct rpcrdma_req *req
),
- TP_ARGS(req),
+ TP_ARGS(r_xprt, req),
TP_STRUCT__entry(
- __field(const void *, req)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(u32, xid)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
- __entry->req = req;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("task:%u@%u xid=0x%08x req=%p",
- __entry->task_id, __entry->client_id, __entry->xid,
- __entry->req
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
+ __entry->task_id, __entry->client_id,
+ __get_str(addr), __get_str(port)
)
);
DEFINE_RDCH_EVENT(read);
DEFINE_WRCH_EVENT(write);
DEFINE_WRCH_EVENT(reply);
+DEFINE_WRCH_EVENT(wp);
TRACE_DEFINE_ENUM(rpcrdma_noch);
TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
@@ -664,7 +843,8 @@ TRACE_EVENT(xprtrdma_marshal,
__entry->wtype = wtype;
),
- TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->hdrlen,
__entry->headlen, __entry->pagelen, __entry->taillen,
@@ -694,7 +874,7 @@ TRACE_EVENT(xprtrdma_marshal_failed,
__entry->ret = ret;
),
- TP_printk("task:%u@%u xid=0x%08x: ret=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->ret
)
@@ -721,7 +901,7 @@ TRACE_EVENT(xprtrdma_prepsend_failed,
__entry->ret = ret;
),
- TP_printk("task:%u@%u xid=0x%08x: ret=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->ret
)
@@ -735,8 +915,8 @@ TRACE_EVENT(xprtrdma_post_send,
TP_ARGS(req),
TP_STRUCT__entry(
- __field(const void *, req)
- __field(const void *, sc)
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, num_sge)
@@ -745,175 +925,160 @@ TRACE_EVENT(xprtrdma_post_send,
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
+ const struct rpcrdma_sendctx *sc = req->rl_sendctx;
+ __entry->cq_id = sc->sc_cid.ci_queue_id;
+ __entry->completion_id = sc->sc_cid.ci_completion_id;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
- __entry->req = req;
- __entry->sc = req->rl_sendctx;
__entry->num_sge = req->rl_wr.num_sge;
__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
),
- TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
__entry->task_id, __entry->client_id,
- __entry->req, __entry->sc, __entry->num_sge,
- (__entry->num_sge == 1 ? "" : "s"),
+ __entry->cq_id, __entry->completion_id,
+ __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
(__entry->signaled ? "signaled" : "")
)
);
-TRACE_EVENT(xprtrdma_post_recv,
+TRACE_EVENT(xprtrdma_post_send_err,
TP_PROTO(
- const struct rpcrdma_rep *rep
+ const struct rpcrdma_xprt *r_xprt,
+ const struct rpcrdma_req *req,
+ int rc
),
- TP_ARGS(rep),
+ TP_ARGS(r_xprt, req, rc),
TP_STRUCT__entry(
- __field(const void *, rep)
+ __field(u32, cq_id)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, rc)
),
TP_fast_assign(
- __entry->rep = rep;
+ const struct rpc_rqst *rqst = &req->rl_slot;
+ const struct rpcrdma_ep *ep = r_xprt->rx_ep;
+
+ __entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client ?
+ rqst->rq_task->tk_client->cl_clid : -1;
+ __entry->rc = rc;
),
- TP_printk("rep=%p",
- __entry->rep
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->cq_id, __entry->rc
)
);
+DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv);
+
TRACE_EVENT(xprtrdma_post_recvs,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
- unsigned int count,
- int status
+ unsigned int count
),
- TP_ARGS(r_xprt, count, status),
+ TP_ARGS(r_xprt, count),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
+ __field(u32, cq_id)
__field(unsigned int, count)
- __field(int, status)
__field(int, posted)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
+ const struct rpcrdma_ep *ep = r_xprt->rx_ep;
+
+ __entry->cq_id = ep->re_attr.recv_cq->res.id;
__entry->count = count;
- __entry->status = status;
- __entry->posted = r_xprt->rx_ep->re_receive_count;
+ __entry->posted = ep->re_receive_count;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
- __get_str(addr), __get_str(port), __entry->r_xprt,
- __entry->count, __entry->posted, __entry->status
+ TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
+ __get_str(addr), __get_str(port), __entry->cq_id,
+ __entry->count, __entry->posted
)
);
-TRACE_EVENT(xprtrdma_post_linv,
+TRACE_EVENT(xprtrdma_post_recvs_err,
TP_PROTO(
- const struct rpcrdma_req *req,
+ const struct rpcrdma_xprt *r_xprt,
int status
),
- TP_ARGS(req, status),
+ TP_ARGS(r_xprt, status),
TP_STRUCT__entry(
- __field(const void *, req)
+ __field(u32, cq_id)
__field(int, status)
- __field(u32, xid)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->req = req;
+ const struct rpcrdma_ep *ep = r_xprt->rx_ep;
+
+ __entry->cq_id = ep->re_attr.recv_cq->res.id;
__entry->status = status;
- __entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("req=%p xid=0x%08x status=%d",
- __entry->req, __entry->xid, __entry->status
+ TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
+ __get_str(addr), __get_str(port), __entry->cq_id,
+ __entry->status
)
);
-/**
- ** Completion events
- **/
-
-TRACE_EVENT(xprtrdma_wc_send,
+TRACE_EVENT(xprtrdma_post_linv_err,
TP_PROTO(
- const struct rpcrdma_sendctx *sc,
- const struct ib_wc *wc
+ const struct rpcrdma_req *req,
+ int status
),
- TP_ARGS(sc, wc),
+ TP_ARGS(req, status),
TP_STRUCT__entry(
- __field(const void *, req)
- __field(const void *, sc)
- __field(unsigned int, unmap_count)
- __field(unsigned int, status)
- __field(unsigned int, vendor_err)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, status)
),
TP_fast_assign(
- __entry->req = sc->sc_req;
- __entry->sc = sc;
- __entry->unmap_count = sc->sc_unmap_count;
- __entry->status = wc->status;
- __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ const struct rpc_task *task = req->rl_slot.rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->status = status;
),
- TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
- __entry->req, __entry->sc, __entry->unmap_count,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
+ __entry->task_id, __entry->client_id, __entry->status
)
);
-TRACE_EVENT(xprtrdma_wc_receive,
- TP_PROTO(
- const struct ib_wc *wc
- ),
-
- TP_ARGS(wc),
-
- TP_STRUCT__entry(
- __field(const void *, rep)
- __field(u32, byte_len)
- __field(unsigned int, status)
- __field(u32, vendor_err)
- ),
-
- TP_fast_assign(
- __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
- rr_cqe);
- __entry->status = wc->status;
- if (wc->status) {
- __entry->byte_len = 0;
- __entry->vendor_err = wc->vendor_err;
- } else {
- __entry->byte_len = wc->byte_len;
- __entry->vendor_err = 0;
- }
- ),
+/**
+ ** Completion events
+ **/
- TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
- __entry->rep, __entry->byte_len,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
- )
-);
+DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
-DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
+DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
+DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
+DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
+DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
+DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
TRACE_EVENT(xprtrdma_frwr_alloc,
TP_PROTO(
@@ -929,7 +1094,7 @@ TRACE_EVENT(xprtrdma_frwr_alloc,
),
TP_fast_assign(
- __entry->mr_id = mr->frwr.fr_mr->res.id;
+ __entry->mr_id = mr->mr_ibmr->res.id;
__entry->rc = rc;
),
@@ -957,7 +1122,7 @@ TRACE_EVENT(xprtrdma_frwr_dereg,
),
TP_fast_assign(
- __entry->mr_id = mr->frwr.fr_mr->res.id;
+ __entry->mr_id = mr->mr_ibmr->res.id;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
@@ -990,7 +1155,7 @@ TRACE_EVENT(xprtrdma_frwr_sgerr,
),
TP_fast_assign(
- __entry->mr_id = mr->frwr.fr_mr->res.id;
+ __entry->mr_id = mr->mr_ibmr->res.id;
__entry->addr = mr->mr_sg->dma_address;
__entry->dir = mr->mr_dir;
__entry->nents = sg_nents;
@@ -1020,7 +1185,7 @@ TRACE_EVENT(xprtrdma_frwr_maperr,
),
TP_fast_assign(
- __entry->mr_id = mr->frwr.fr_mr->res.id;
+ __entry->mr_id = mr->mr_ibmr->res.id;
__entry->addr = mr->mr_sg->dma_address;
__entry->dir = mr->mr_dir;
__entry->num_mapped = num_mapped;
@@ -1034,11 +1199,12 @@ TRACE_EVENT(xprtrdma_frwr_maperr,
)
);
+DEFINE_MR_EVENT(fastreg);
DEFINE_MR_EVENT(localinv);
-DEFINE_MR_EVENT(map);
-DEFINE_MR_EVENT(unmap);
DEFINE_MR_EVENT(reminv);
-DEFINE_MR_EVENT(recycle);
+DEFINE_MR_EVENT(map);
+
+DEFINE_ANON_MR_EVENT(unmap);
TRACE_EVENT(xprtrdma_dma_maperr,
TP_PROTO(
@@ -1066,17 +1232,14 @@ TRACE_EVENT(xprtrdma_reply,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_rep *rep,
- const struct rpcrdma_req *req,
unsigned int credits
),
- TP_ARGS(task, rep, req, credits),
+ TP_ARGS(task, rep, credits),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, rep)
- __field(const void *, req)
__field(u32, xid)
__field(unsigned int, credits)
),
@@ -1084,49 +1247,102 @@ TRACE_EVENT(xprtrdma_reply,
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->rep = rep;
- __entry->req = req;
__entry->xid = be32_to_cpu(rep->rr_xid);
__entry->credits = credits;
),
- TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
__entry->task_id, __entry->client_id, __entry->xid,
- __entry->credits, __entry->rep, __entry->req
+ __entry->credits
)
);
-TRACE_EVENT(xprtrdma_defer_cmp,
+DEFINE_REPLY_EVENT(vers);
+DEFINE_REPLY_EVENT(rqst);
+DEFINE_REPLY_EVENT(short);
+DEFINE_REPLY_EVENT(hdr);
+
+TRACE_EVENT(xprtrdma_err_vers,
TP_PROTO(
- const struct rpcrdma_rep *rep
+ const struct rpc_rqst *rqst,
+ __be32 *min,
+ __be32 *max
),
- TP_ARGS(rep),
+ TP_ARGS(rqst, min, max),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, rep)
__field(u32, xid)
+ __field(u32, min)
+ __field(u32, max)
),
TP_fast_assign(
- __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
- __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
- __entry->rep = rep;
- __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->min = be32_to_cpup(min);
+ __entry->max = be32_to_cpup(max);
),
- TP_printk("task:%u@%u xid=0x%08x rep=%p",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
__entry->task_id, __entry->client_id, __entry->xid,
- __entry->rep
+ __entry->min, __entry->max
+ )
+);
+
+TRACE_EVENT(xprtrdma_err_chunk,
+ TP_PROTO(
+ const struct rpc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
+ __entry->task_id, __entry->client_id, __entry->xid
)
);
-DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
-DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
-DEFINE_REPLY_EVENT(xprtrdma_reply_short);
-DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
+TRACE_EVENT(xprtrdma_err_unrecognized,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ __be32 *procedure
+ ),
+
+ TP_ARGS(rqst, procedure),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(u32, procedure)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->procedure = be32_to_cpup(procedure);
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->procedure
+ )
+);
TRACE_EVENT(xprtrdma_fixup,
TP_PROTO(
@@ -1154,7 +1370,7 @@ TRACE_EVENT(xprtrdma_fixup,
__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
),
- TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
__entry->task_id, __entry->client_id, __entry->fixup,
__entry->headlen, __entry->pagelen, __entry->taillen
)
@@ -1187,65 +1403,25 @@ TRACE_EVENT(xprtrdma_decode_seg,
)
);
-/**
- ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
- **/
-
-TRACE_EVENT(xprtrdma_op_allocate,
- TP_PROTO(
- const struct rpc_task *task,
- const struct rpcrdma_req *req
- ),
-
- TP_ARGS(task, req),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(const void *, req)
- __field(size_t, callsize)
- __field(size_t, rcvsize)
- ),
-
- TP_fast_assign(
- __entry->task_id = task->tk_pid;
- __entry->client_id = task->tk_client->cl_clid;
- __entry->req = req;
- __entry->callsize = task->tk_rqstp->rq_callsize;
- __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
- ),
-
- TP_printk("task:%u@%u req=%p (%zu, %zu)",
- __entry->task_id, __entry->client_id,
- __entry->req, __entry->callsize, __entry->rcvsize
- )
-);
-
-TRACE_EVENT(xprtrdma_op_free,
+TRACE_EVENT(xprtrdma_mrs_zap,
TP_PROTO(
- const struct rpc_task *task,
- const struct rpcrdma_req *req
+ const struct rpc_task *task
),
- TP_ARGS(task, req),
+ TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, req)
- __field(const void *, rep)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->req = req;
- __entry->rep = req->rl_reply;
),
- TP_printk("task:%u@%u req=%p rep=%p",
- __entry->task_id, __entry->client_id,
- __entry->req, __entry->rep
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
+ __entry->task_id, __entry->client_id
)
);
@@ -1262,55 +1438,24 @@ TRACE_EVENT(xprtrdma_cb_setup,
TP_ARGS(r_xprt, reqs),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(unsigned int, reqs)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
__entry->reqs = reqs;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
- __get_str(addr), __get_str(port),
- __entry->r_xprt, __entry->reqs
+ TP_printk("peer=[%s]:%s %u reqs",
+ __get_str(addr), __get_str(port), __entry->reqs
)
);
-DEFINE_CB_EVENT(xprtrdma_cb_call);
-DEFINE_CB_EVENT(xprtrdma_cb_reply);
-
-TRACE_EVENT(xprtrdma_leaked_rep,
- TP_PROTO(
- const struct rpc_rqst *rqst,
- const struct rpcrdma_rep *rep
- ),
-
- TP_ARGS(rqst, rep),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(u32, xid)
- __field(const void *, rep)
- ),
-
- TP_fast_assign(
- __entry->task_id = rqst->rq_task->tk_pid;
- __entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->rep = rep;
- ),
-
- TP_printk("task:%u@%u xid=0x%08x rep=%p",
- __entry->task_id, __entry->client_id, __entry->xid,
- __entry->rep
- )
-);
+DEFINE_CALLBACK_EVENT(call);
+DEFINE_CALLBACK_EVENT(reply);
/**
** Server-side RPC/RDMA events
@@ -1472,101 +1617,112 @@ DEFINE_BADREQ_EVENT(drop);
DEFINE_BADREQ_EVENT(badproc);
DEFINE_BADREQ_EVENT(parse);
-DECLARE_EVENT_CLASS(svcrdma_segment_event,
+TRACE_EVENT(svcrdma_encode_wseg,
TP_PROTO(
+ const struct svc_rdma_send_ctxt *ctxt,
+ u32 segno,
u32 handle,
u32 length,
u64 offset
),
- TP_ARGS(handle, length, offset),
+ TP_ARGS(ctxt, segno, handle, length, offset),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, segno)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
),
TP_fast_assign(
+ __entry->cq_id = ctxt->sc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->sc_cid.ci_completion_id;
+ __entry->segno = segno;
__entry->handle = handle;
__entry->length = length;
__entry->offset = offset;
),
- TP_printk("%u@0x%016llx:0x%08x",
- __entry->length, (unsigned long long)__entry->offset,
- __entry->handle
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ __entry->cq_id, __entry->completion_id,
+ __entry->segno, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle
)
);
-#define DEFINE_SEGMENT_EVENT(name) \
- DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\
- TP_PROTO( \
- u32 handle, \
- u32 length, \
- u64 offset \
- ), \
- TP_ARGS(handle, length, offset))
-
-DEFINE_SEGMENT_EVENT(decode_wseg);
-DEFINE_SEGMENT_EVENT(encode_rseg);
-DEFINE_SEGMENT_EVENT(send_rseg);
-DEFINE_SEGMENT_EVENT(encode_wseg);
-DEFINE_SEGMENT_EVENT(send_wseg);
-
-DECLARE_EVENT_CLASS(svcrdma_chunk_event,
+TRACE_EVENT(svcrdma_decode_rseg,
TP_PROTO(
- u32 length
+ const struct rpc_rdma_cid *cid,
+ const struct svc_rdma_chunk *chunk,
+ const struct svc_rdma_segment *segment
),
- TP_ARGS(length),
+ TP_ARGS(cid, chunk, segment),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, segno)
+ __field(u32, position)
+ __field(u32, handle)
__field(u32, length)
+ __field(u64, offset)
),
TP_fast_assign(
- __entry->length = length;
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->segno = chunk->ch_segcount;
+ __entry->position = chunk->ch_position;
+ __entry->handle = segment->rs_handle;
+ __entry->length = segment->rs_length;
+ __entry->offset = segment->rs_offset;
),
- TP_printk("length=%u",
- __entry->length
+ TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
+ __entry->cq_id, __entry->completion_id,
+ __entry->segno, __entry->position, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle
)
);
-#define DEFINE_CHUNK_EVENT(name) \
- DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name, \
- TP_PROTO( \
- u32 length \
- ), \
- TP_ARGS(length))
-
-DEFINE_CHUNK_EVENT(send_pzr);
-DEFINE_CHUNK_EVENT(encode_write_chunk);
-DEFINE_CHUNK_EVENT(send_write_chunk);
-DEFINE_CHUNK_EVENT(encode_read_chunk);
-DEFINE_CHUNK_EVENT(send_reply_chunk);
-
-TRACE_EVENT(svcrdma_send_read_chunk,
+TRACE_EVENT(svcrdma_decode_wseg,
TP_PROTO(
- u32 length,
- u32 position
+ const struct rpc_rdma_cid *cid,
+ const struct svc_rdma_chunk *chunk,
+ u32 segno
),
- TP_ARGS(length, position),
+ TP_ARGS(cid, chunk, segno),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(u32, segno)
+ __field(u32, handle)
__field(u32, length)
- __field(u32, position)
+ __field(u64, offset)
),
TP_fast_assign(
- __entry->length = length;
- __entry->position = position;
+ const struct svc_rdma_segment *segment =
+ &chunk->ch_segments[segno];
+
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->segno = segno;
+ __entry->handle = segment->rs_handle;
+ __entry->length = segment->rs_length;
+ __entry->offset = segment->rs_offset;
),
- TP_printk("length=%u position=%u",
- __entry->length, __entry->position
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ __entry->cq_id, __entry->completion_id,
+ __entry->segno, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle
)
);
@@ -1606,29 +1762,29 @@ DEFINE_ERROR_EVENT(chunk);
DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
u64 dma_addr,
u32 length
),
- TP_ARGS(rdma, dma_addr, length),
+ TP_ARGS(cid, dma_addr, length),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(u64, dma_addr)
__field(u32, length)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->dma_addr = dma_addr;
__entry->length = length;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
- __get_str(addr), __get_str(device),
+ TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u",
+ __entry->cq_id, __entry->completion_id,
__entry->dma_addr, __entry->length
)
);
@@ -1636,45 +1792,51 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
#define DEFINE_SVC_DMA_EVENT(name) \
DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
TP_PROTO( \
- const struct svcxprt_rdma *rdma,\
+ const struct rpc_rdma_cid *cid, \
u64 dma_addr, \
u32 length \
), \
- TP_ARGS(rdma, dma_addr, length))
+ TP_ARGS(cid, dma_addr, length) \
+ )
DEFINE_SVC_DMA_EVENT(dma_map_page);
+DEFINE_SVC_DMA_EVENT(dma_map_err);
DEFINE_SVC_DMA_EVENT(dma_unmap_page);
TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ u64 offset,
+ u32 handle,
unsigned int nents,
int status
),
- TP_ARGS(rdma, nents, status),
+ TP_ARGS(rdma, offset, handle, nents, status),
TP_STRUCT__entry(
- __field(int, status)
+ __field(u32, cq_id)
+ __field(u32, handle)
+ __field(u64, offset)
__field(unsigned int, nents)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ __field(int, status)
),
TP_fast_assign(
- __entry->status = status;
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
+ __entry->handle = handle;
+ __entry->offset = offset;
__entry->nents = nents;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ __entry->status = status;
),
- TP_printk("addr=%s device=%s nents=%u status=%d",
- __get_str(addr), __get_str(device), __entry->nents,
- __entry->status
+ TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d",
+ __entry->cq_id, (unsigned long long)__entry->offset,
+ __entry->handle, __entry->nents, __entry->status
)
);
-TRACE_EVENT(svcrdma_no_rwctx_err,
+TRACE_EVENT(svcrdma_rwctx_empty,
TP_PROTO(
const struct svcxprt_rdma *rdma,
unsigned int num_sges
@@ -1683,98 +1845,104 @@ TRACE_EVENT(svcrdma_no_rwctx_err,
TP_ARGS(rdma, num_sges),
TP_STRUCT__entry(
+ __field(u32, cq_id)
__field(unsigned int, num_sges)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
__entry->num_sges = num_sges;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s num_sges=%d",
- __get_str(addr), __get_str(device), __entry->num_sges
+ TP_printk("cq.id=%u num_sges=%d",
+ __entry->cq_id, __entry->num_sges
)
);
TRACE_EVENT(svcrdma_page_overrun_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
- const struct svc_rqst *rqst,
+ const struct rpc_rdma_cid *cid,
unsigned int pageno
),
- TP_ARGS(rdma, rqst, pageno),
+ TP_ARGS(cid, pageno),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, pageno)
- __field(u32, xid)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->pageno = pageno;
- __entry->xid = __be32_to_cpu(rqst->rq_xid);
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
- __get_str(device), __entry->xid, __entry->pageno
+ TP_printk("cq.id=%u cid=%d pageno=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->pageno
)
);
TRACE_EVENT(svcrdma_small_wrch_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
unsigned int remaining,
unsigned int seg_no,
unsigned int num_segs
),
- TP_ARGS(rdma, remaining, seg_no, num_segs),
+ TP_ARGS(cid, remaining, seg_no, num_segs),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, remaining)
__field(unsigned int, seg_no)
__field(unsigned int, num_segs)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->remaining = remaining;
__entry->seg_no = seg_no;
__entry->num_segs = num_segs;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
- __get_str(addr), __get_str(device), __entry->remaining,
- __entry->seg_no, __entry->num_segs
+ TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->remaining, __entry->seg_no, __entry->num_segs
)
);
TRACE_EVENT(svcrdma_send_pullup,
TP_PROTO(
- unsigned int len
+ const struct svc_rdma_send_ctxt *ctxt,
+ unsigned int msglen
),
- TP_ARGS(len),
+ TP_ARGS(ctxt, msglen),
TP_STRUCT__entry(
- __field(unsigned int, len)
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned int, hdrlen)
+ __field(unsigned int, msglen)
),
TP_fast_assign(
- __entry->len = len;
+ __entry->cq_id = ctxt->sc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->sc_cid.ci_completion_id;
+ __entry->hdrlen = ctxt->sc_hdrbuf.len,
+ __entry->msglen = msglen;
),
- TP_printk("len=%u", __entry->len)
+ TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)",
+ __entry->cq_id, __entry->completion_id,
+ __entry->hdrlen, __entry->msglen,
+ __entry->hdrlen + __entry->msglen)
);
TRACE_EVENT(svcrdma_send_err,
@@ -1826,37 +1994,21 @@ TRACE_EVENT(svcrdma_post_send,
wr->ex.invalidate_rkey : 0;
),
- TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
+ TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->num_sge, __entry->inv_rkey
)
);
-DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
-TRACE_EVENT(svcrdma_post_recv,
- TP_PROTO(
- const struct svc_rdma_recv_ctxt *ctxt
- ),
-
- TP_ARGS(ctxt),
+DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv);
- TP_STRUCT__entry(
- __field(u32, cq_id)
- __field(int, completion_id)
- ),
-
- TP_fast_assign(
- __entry->cq_id = ctxt->rc_cid.ci_queue_id;
- __entry->completion_id = ctxt->rc_cid.ci_completion_id;
- ),
-
- TP_printk("cq.id=%d cid=%d",
- __entry->cq_id, __entry->completion_id
- )
-);
-
-DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
+DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
+DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
+DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
TRACE_EVENT(svcrdma_rq_post_err,
TP_PROTO(
@@ -1881,7 +2033,7 @@ TRACE_EVENT(svcrdma_rq_post_err,
)
);
-TRACE_EVENT(svcrdma_post_chunk,
+DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
TP_PROTO(
const struct rpc_rdma_cid *cid,
int sqecount
@@ -1907,8 +2059,68 @@ TRACE_EVENT(svcrdma_post_chunk,
)
);
-DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
-DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
+#define DEFINE_POST_CHUNK_EVENT(name) \
+ DEFINE_EVENT(svcrdma_post_chunk_class, \
+ svcrdma_post_##name##_chunk, \
+ TP_PROTO( \
+ const struct rpc_rdma_cid *cid, \
+ int sqecount \
+ ), \
+ TP_ARGS(cid, sqecount))
+
+DEFINE_POST_CHUNK_EVENT(read);
+DEFINE_POST_CHUNK_EVENT(write);
+DEFINE_POST_CHUNK_EVENT(reply);
+
+DEFINE_EVENT(svcrdma_post_chunk_class, svcrdma_cc_release,
+ TP_PROTO(
+ const struct rpc_rdma_cid *cid,
+ int sqecount
+ ),
+ TP_ARGS(cid, sqecount)
+);
+
+TRACE_EVENT(svcrdma_wc_read,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid,
+ unsigned int totalbytes,
+ const ktime_t posttime
+ ),
+
+ TP_ARGS(wc, cid, totalbytes, posttime),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(s64, read_latency)
+ __field(unsigned int, totalbytes)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->totalbytes = totalbytes;
+ __entry->read_latency = ktime_us_delta(ktime_get(), posttime);
+ ),
+
+ TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld",
+ __entry->cq_id, __entry->completion_id,
+ __entry->totalbytes, __entry->read_latency
+ )
+);
+
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished);
+
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
+
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_reply);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_flush);
+DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_err);
TRACE_EVENT(svcrdma_qp_error,
TP_PROTO(
@@ -1937,65 +2149,74 @@ TRACE_EVENT(svcrdma_qp_error,
)
);
-DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
+DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
TP_PROTO(
- const struct svcxprt_rdma *rdma
+ const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid
),
- TP_ARGS(rdma),
+ TP_ARGS(rdma, cid),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d",
- __get_str(addr), __entry->avail, __entry->depth
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth
)
);
#define DEFINE_SQ_EVENT(name) \
- DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
- TP_PROTO( \
- const struct svcxprt_rdma *rdma \
- ), \
- TP_ARGS(rdma))
+ DEFINE_EVENT(svcrdma_sendqueue_class, name, \
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(rdma, cid) \
+ )
-DEFINE_SQ_EVENT(full);
-DEFINE_SQ_EVENT(retry);
+DEFINE_SQ_EVENT(svcrdma_sq_full);
+DEFINE_SQ_EVENT(svcrdma_sq_retry);
TRACE_EVENT(svcrdma_sq_post_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
int status
),
- TP_ARGS(rdma, status),
+ TP_ARGS(rdma, cid, status),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
__field(int, status)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
__entry->status = status;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
- __get_str(addr), __entry->avail, __entry->depth,
- __entry->status
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth, __entry->status
)
);
diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h
index 3c716214dab1..bd120e23ce12 100644
--- a/include/trace/events/rpm.h
+++ b/include/trace/events/rpm.h
@@ -101,6 +101,48 @@ TRACE_EVENT(rpm_return_int,
__entry->ret)
);
+#define RPM_STATUS_STRINGS \
+ EM(RPM_INVALID, "RPM_INVALID") \
+ EM(RPM_ACTIVE, "RPM_ACTIVE") \
+ EM(RPM_RESUMING, "RPM_RESUMING") \
+ EM(RPM_SUSPENDED, "RPM_SUSPENDED") \
+ EMe(RPM_SUSPENDING, "RPM_SUSPENDING")
+
+/* Enums require being exported to userspace, for user tool parsing. */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+RPM_STATUS_STRINGS
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+TRACE_EVENT(rpm_status,
+ TP_PROTO(struct device *dev, enum rpm_status status),
+ TP_ARGS(dev, status),
+
+ TP_STRUCT__entry(
+ __string(name, dev_name(dev))
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->status = status;
+ ),
+
+ TP_printk("%s status=%s", __get_str(name),
+ __print_symbolic(__entry->status, RPM_STATUS_STRINGS))
+);
+
#endif /* _TRACE_RUNTIME_POWER_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rseq.h b/include/trace/events/rseq.h
index a04a64bc1a00..823b47d1ba1e 100644
--- a/include/trace/events/rseq.h
+++ b/include/trace/events/rseq.h
@@ -16,13 +16,18 @@ TRACE_EVENT(rseq_update,
TP_STRUCT__entry(
__field(s32, cpu_id)
+ __field(s32, node_id)
+ __field(s32, mm_cid)
),
TP_fast_assign(
__entry->cpu_id = raw_smp_processor_id();
+ __entry->node_id = cpu_to_node(__entry->cpu_id);
+ __entry->mm_cid = task_mm_cid(t);
),
- TP_printk("cpu_id=%d", __entry->cpu_id)
+ TP_printk("cpu_id=%d node_id=%d mm_cid=%d", __entry->cpu_id,
+ __entry->node_id, __entry->mm_cid)
);
TRACE_EVENT(rseq_ip_fixup,
diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h
new file mode 100644
index 000000000000..56592da9301c
--- /dev/null
+++ b/include/trace/events/rv.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rv
+
+#if !defined(_TRACE_RV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RV_H
+
+#include <linux/rv.h>
+#include <linux/tracepoint.h>
+
+#ifdef CONFIG_DA_MON_EVENTS_IMPLICIT
+DECLARE_EVENT_CLASS(event_da_monitor,
+
+ TP_PROTO(char *state, char *event, char *next_state, bool final_state),
+
+ TP_ARGS(state, event, next_state, final_state),
+
+ TP_STRUCT__entry(
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ __array( char, next_state, MAX_DA_NAME_LEN )
+ __field( bool, final_state )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
+ __entry->final_state = final_state;
+ ),
+
+ TP_printk("%s x %s -> %s %s",
+ __entry->state,
+ __entry->event,
+ __entry->next_state,
+ __entry->final_state ? "(final)" : "")
+);
+
+DECLARE_EVENT_CLASS(error_da_monitor,
+
+ TP_PROTO(char *state, char *event),
+
+ TP_ARGS(state, event),
+
+ TP_STRUCT__entry(
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ ),
+
+ TP_printk("event %s not expected in the state %s",
+ __entry->event,
+ __entry->state)
+);
+
+#ifdef CONFIG_RV_MON_WIP
+DEFINE_EVENT(event_da_monitor, event_wip,
+ TP_PROTO(char *state, char *event, char *next_state, bool final_state),
+ TP_ARGS(state, event, next_state, final_state));
+
+DEFINE_EVENT(error_da_monitor, error_wip,
+ TP_PROTO(char *state, char *event),
+ TP_ARGS(state, event));
+#endif /* CONFIG_RV_MON_WIP */
+#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */
+
+#ifdef CONFIG_DA_MON_EVENTS_ID
+DECLARE_EVENT_CLASS(event_da_monitor_id,
+
+ TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
+
+ TP_ARGS(id, state, event, next_state, final_state),
+
+ TP_STRUCT__entry(
+ __field( int, id )
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ __array( char, next_state, MAX_DA_NAME_LEN )
+ __field( bool, final_state )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
+ __entry->id = id;
+ __entry->final_state = final_state;
+ ),
+
+ TP_printk("%d: %s x %s -> %s %s",
+ __entry->id,
+ __entry->state,
+ __entry->event,
+ __entry->next_state,
+ __entry->final_state ? "(final)" : "")
+);
+
+DECLARE_EVENT_CLASS(error_da_monitor_id,
+
+ TP_PROTO(int id, char *state, char *event),
+
+ TP_ARGS(id, state, event),
+
+ TP_STRUCT__entry(
+ __field( int, id )
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ __entry->id = id;
+ ),
+
+ TP_printk("%d: event %s not expected in the state %s",
+ __entry->id,
+ __entry->event,
+ __entry->state)
+);
+
+#ifdef CONFIG_RV_MON_WWNR
+/* id is the pid of the task */
+DEFINE_EVENT(event_da_monitor_id, event_wwnr,
+ TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
+ TP_ARGS(id, state, event, next_state, final_state));
+
+DEFINE_EVENT(error_da_monitor_id, error_wwnr,
+ TP_PROTO(int id, char *state, char *event),
+ TP_ARGS(id, state, event));
+#endif /* CONFIG_RV_MON_WWNR */
+
+#endif /* CONFIG_DA_MON_EVENTS_ID */
+#endif /* _TRACE_RV_H */
+
+/* This part ust be outside protection */
+#undef TRACE_INCLUDE_PATH
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rwmmio.h b/include/trace/events/rwmmio.h
new file mode 100644
index 000000000000..a43e5dd7436b
--- /dev/null
+++ b/include/trace/events/rwmmio.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rwmmio
+
+#if !defined(_TRACE_RWMMIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RWMMIO_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rwmmio_rw_template,
+
+ TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width,
+ volatile void __iomem *addr),
+
+ TP_ARGS(caller, caller0, val, width, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller)
+ __field(unsigned long, caller0)
+ __field(unsigned long, addr)
+ __field(u64, val)
+ __field(u8, width)
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->caller0 = caller0;
+ __entry->val = val;
+ __entry->addr = (unsigned long)addr;
+ __entry->width = width;
+ ),
+
+ TP_printk("%pS -> %pS width=%d val=%#llx addr=%#lx",
+ (void *)__entry->caller0, (void *)__entry->caller, __entry->width,
+ __entry->val, __entry->addr)
+);
+
+DEFINE_EVENT(rwmmio_rw_template, rwmmio_write,
+ TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width,
+ volatile void __iomem *addr),
+ TP_ARGS(caller, caller0, val, width, addr)
+);
+
+DEFINE_EVENT(rwmmio_rw_template, rwmmio_post_write,
+ TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width,
+ volatile void __iomem *addr),
+ TP_ARGS(caller, caller0, val, width, addr)
+);
+
+TRACE_EVENT(rwmmio_read,
+
+ TP_PROTO(unsigned long caller, unsigned long caller0, u8 width,
+ const volatile void __iomem *addr),
+
+ TP_ARGS(caller, caller0, width, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller)
+ __field(unsigned long, caller0)
+ __field(unsigned long, addr)
+ __field(u8, width)
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->caller0 = caller0;
+ __entry->addr = (unsigned long)addr;
+ __entry->width = width;
+ ),
+
+ TP_printk("%pS -> %pS width=%d addr=%#lx",
+ (void *)__entry->caller0, (void *)__entry->caller, __entry->width, __entry->addr)
+);
+
+TRACE_EVENT(rwmmio_post_read,
+
+ TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width,
+ const volatile void __iomem *addr),
+
+ TP_ARGS(caller, caller0, val, width, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller)
+ __field(unsigned long, caller0)
+ __field(unsigned long, addr)
+ __field(u64, val)
+ __field(u8, width)
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->caller0 = caller0;
+ __entry->val = val;
+ __entry->addr = (unsigned long)addr;
+ __entry->width = width;
+ ),
+
+ TP_printk("%pS -> %pS width=%d val=%#llx addr=%#lx",
+ (void *)__entry->caller0, (void *)__entry->caller, __entry->width,
+ __entry->val, __entry->addr)
+);
+
+#endif /* _TRACE_RWMMIO_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index c33079b986e8..a1b126a6b0d7 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -14,256 +14,230 @@
#include <linux/errqueue.h>
/*
- * Define enums for tracing information.
- *
- * These should all be kept sorted, making it easier to match the string
- * mapping tables further on.
- */
-#ifndef __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
-#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
-
-enum rxrpc_skb_trace {
- rxrpc_skb_cleaned,
- rxrpc_skb_freed,
- rxrpc_skb_got,
- rxrpc_skb_lost,
- rxrpc_skb_new,
- rxrpc_skb_purged,
- rxrpc_skb_received,
- rxrpc_skb_rotated,
- rxrpc_skb_seen,
- rxrpc_skb_unshared,
- rxrpc_skb_unshared_nomem,
-};
-
-enum rxrpc_local_trace {
- rxrpc_local_got,
- rxrpc_local_new,
- rxrpc_local_processing,
- rxrpc_local_put,
- rxrpc_local_queued,
-};
-
-enum rxrpc_peer_trace {
- rxrpc_peer_got,
- rxrpc_peer_new,
- rxrpc_peer_processing,
- rxrpc_peer_put,
-};
-
-enum rxrpc_conn_trace {
- rxrpc_conn_got,
- rxrpc_conn_new_client,
- rxrpc_conn_new_service,
- rxrpc_conn_put_client,
- rxrpc_conn_put_service,
- rxrpc_conn_queued,
- rxrpc_conn_reap_service,
- rxrpc_conn_seen,
-};
-
-enum rxrpc_client_trace {
- rxrpc_client_activate_chans,
- rxrpc_client_alloc,
- rxrpc_client_chan_activate,
- rxrpc_client_chan_disconnect,
- rxrpc_client_chan_pass,
- rxrpc_client_chan_unstarted,
- rxrpc_client_chan_wait_failed,
- rxrpc_client_cleanup,
- rxrpc_client_count,
- rxrpc_client_discard,
- rxrpc_client_duplicate,
- rxrpc_client_exposed,
- rxrpc_client_replace,
- rxrpc_client_to_active,
- rxrpc_client_to_culled,
- rxrpc_client_to_idle,
- rxrpc_client_to_inactive,
- rxrpc_client_to_upgrade,
- rxrpc_client_to_waiting,
- rxrpc_client_uncount,
-};
-
-enum rxrpc_call_trace {
- rxrpc_call_connected,
- rxrpc_call_error,
- rxrpc_call_got,
- rxrpc_call_got_kernel,
- rxrpc_call_got_userid,
- rxrpc_call_new_client,
- rxrpc_call_new_service,
- rxrpc_call_put,
- rxrpc_call_put_kernel,
- rxrpc_call_put_noqueue,
- rxrpc_call_put_userid,
- rxrpc_call_queued,
- rxrpc_call_queued_ref,
- rxrpc_call_release,
- rxrpc_call_seen,
-};
-
-enum rxrpc_transmit_trace {
- rxrpc_transmit_await_reply,
- rxrpc_transmit_end,
- rxrpc_transmit_queue,
- rxrpc_transmit_queue_last,
- rxrpc_transmit_rotate,
- rxrpc_transmit_rotate_last,
- rxrpc_transmit_wait,
-};
-
-enum rxrpc_receive_trace {
- rxrpc_receive_end,
- rxrpc_receive_front,
- rxrpc_receive_incoming,
- rxrpc_receive_queue,
- rxrpc_receive_queue_last,
- rxrpc_receive_rotate,
-};
-
-enum rxrpc_recvmsg_trace {
- rxrpc_recvmsg_cont,
- rxrpc_recvmsg_data_return,
- rxrpc_recvmsg_dequeue,
- rxrpc_recvmsg_enter,
- rxrpc_recvmsg_full,
- rxrpc_recvmsg_hole,
- rxrpc_recvmsg_next,
- rxrpc_recvmsg_requeue,
- rxrpc_recvmsg_return,
- rxrpc_recvmsg_terminal,
- rxrpc_recvmsg_to_be_accepted,
- rxrpc_recvmsg_wait,
-};
-
-enum rxrpc_rtt_tx_trace {
- rxrpc_rtt_tx_cancel,
- rxrpc_rtt_tx_data,
- rxrpc_rtt_tx_no_slot,
- rxrpc_rtt_tx_ping,
-};
-
-enum rxrpc_rtt_rx_trace {
- rxrpc_rtt_rx_cancel,
- rxrpc_rtt_rx_lost,
- rxrpc_rtt_rx_obsolete,
- rxrpc_rtt_rx_ping_response,
- rxrpc_rtt_rx_requested_ack,
-};
-
-enum rxrpc_timer_trace {
- rxrpc_timer_begin,
- rxrpc_timer_exp_ack,
- rxrpc_timer_exp_hard,
- rxrpc_timer_exp_idle,
- rxrpc_timer_exp_keepalive,
- rxrpc_timer_exp_lost_ack,
- rxrpc_timer_exp_normal,
- rxrpc_timer_exp_ping,
- rxrpc_timer_exp_resend,
- rxrpc_timer_expired,
- rxrpc_timer_init_for_reply,
- rxrpc_timer_init_for_send_reply,
- rxrpc_timer_restart,
- rxrpc_timer_set_for_ack,
- rxrpc_timer_set_for_hard,
- rxrpc_timer_set_for_idle,
- rxrpc_timer_set_for_keepalive,
- rxrpc_timer_set_for_lost_ack,
- rxrpc_timer_set_for_normal,
- rxrpc_timer_set_for_ping,
- rxrpc_timer_set_for_resend,
- rxrpc_timer_set_for_send,
-};
-
-enum rxrpc_propose_ack_trace {
- rxrpc_propose_ack_client_tx_end,
- rxrpc_propose_ack_input_data,
- rxrpc_propose_ack_ping_for_check_life,
- rxrpc_propose_ack_ping_for_keepalive,
- rxrpc_propose_ack_ping_for_lost_ack,
- rxrpc_propose_ack_ping_for_lost_reply,
- rxrpc_propose_ack_ping_for_params,
- rxrpc_propose_ack_processing_op,
- rxrpc_propose_ack_respond_to_ack,
- rxrpc_propose_ack_respond_to_ping,
- rxrpc_propose_ack_retry_tx,
- rxrpc_propose_ack_rotate_rx,
- rxrpc_propose_ack_terminal_ack,
-};
-
-enum rxrpc_propose_ack_outcome {
- rxrpc_propose_ack_subsume,
- rxrpc_propose_ack_update,
- rxrpc_propose_ack_use,
-};
-
-enum rxrpc_congest_change {
- rxrpc_cong_begin_retransmission,
- rxrpc_cong_cleared_nacks,
- rxrpc_cong_new_low_nack,
- rxrpc_cong_no_change,
- rxrpc_cong_progress,
- rxrpc_cong_retransmit_again,
- rxrpc_cong_rtt_window_end,
- rxrpc_cong_saw_nack,
-};
-
-enum rxrpc_tx_point {
- rxrpc_tx_point_call_abort,
- rxrpc_tx_point_call_ack,
- rxrpc_tx_point_call_data_frag,
- rxrpc_tx_point_call_data_nofrag,
- rxrpc_tx_point_call_final_resend,
- rxrpc_tx_point_conn_abort,
- rxrpc_tx_point_rxkad_challenge,
- rxrpc_tx_point_rxkad_response,
- rxrpc_tx_point_reject,
- rxrpc_tx_point_version_keepalive,
- rxrpc_tx_point_version_reply,
-};
-
-#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
-
-/*
* Declare tracing information enums and their string mappings for display.
*/
+#define rxrpc_abort_reasons \
+ /* AFS errors */ \
+ EM(afs_abort_general_error, "afs-error") \
+ EM(afs_abort_interrupted, "afs-intr") \
+ EM(afs_abort_oom, "afs-oom") \
+ EM(afs_abort_op_not_supported, "afs-op-notsupp") \
+ EM(afs_abort_probeuuid_negative, "afs-probeuuid-neg") \
+ EM(afs_abort_send_data_error, "afs-send-data") \
+ EM(afs_abort_unmarshal_error, "afs-unmarshal") \
+ /* rxperf errors */ \
+ EM(rxperf_abort_general_error, "rxperf-error") \
+ EM(rxperf_abort_oom, "rxperf-oom") \
+ EM(rxperf_abort_op_not_supported, "rxperf-op-notsupp") \
+ EM(rxperf_abort_unmarshal_error, "rxperf-unmarshal") \
+ /* RxKAD security errors */ \
+ EM(rxkad_abort_1_short_check, "rxkad1-short-check") \
+ EM(rxkad_abort_1_short_data, "rxkad1-short-data") \
+ EM(rxkad_abort_1_short_encdata, "rxkad1-short-encdata") \
+ EM(rxkad_abort_1_short_header, "rxkad1-short-hdr") \
+ EM(rxkad_abort_2_short_check, "rxkad2-short-check") \
+ EM(rxkad_abort_2_short_data, "rxkad2-short-data") \
+ EM(rxkad_abort_2_short_header, "rxkad2-short-hdr") \
+ EM(rxkad_abort_2_short_len, "rxkad2-short-len") \
+ EM(rxkad_abort_bad_checksum, "rxkad2-bad-cksum") \
+ EM(rxkad_abort_chall_key_expired, "rxkad-chall-key-exp") \
+ EM(rxkad_abort_chall_level, "rxkad-chall-level") \
+ EM(rxkad_abort_chall_no_key, "rxkad-chall-nokey") \
+ EM(rxkad_abort_chall_short, "rxkad-chall-short") \
+ EM(rxkad_abort_chall_version, "rxkad-chall-version") \
+ EM(rxkad_abort_resp_bad_callid, "rxkad-resp-bad-callid") \
+ EM(rxkad_abort_resp_bad_checksum, "rxkad-resp-bad-cksum") \
+ EM(rxkad_abort_resp_bad_param, "rxkad-resp-bad-param") \
+ EM(rxkad_abort_resp_call_ctr, "rxkad-resp-call-ctr") \
+ EM(rxkad_abort_resp_call_state, "rxkad-resp-call-state") \
+ EM(rxkad_abort_resp_key_expired, "rxkad-resp-key-exp") \
+ EM(rxkad_abort_resp_key_rejected, "rxkad-resp-key-rej") \
+ EM(rxkad_abort_resp_level, "rxkad-resp-level") \
+ EM(rxkad_abort_resp_nokey, "rxkad-resp-nokey") \
+ EM(rxkad_abort_resp_ooseq, "rxkad-resp-ooseq") \
+ EM(rxkad_abort_resp_short, "rxkad-resp-short") \
+ EM(rxkad_abort_resp_short_tkt, "rxkad-resp-short-tkt") \
+ EM(rxkad_abort_resp_tkt_aname, "rxkad-resp-tk-aname") \
+ EM(rxkad_abort_resp_tkt_expired, "rxkad-resp-tk-exp") \
+ EM(rxkad_abort_resp_tkt_future, "rxkad-resp-tk-future") \
+ EM(rxkad_abort_resp_tkt_inst, "rxkad-resp-tk-inst") \
+ EM(rxkad_abort_resp_tkt_len, "rxkad-resp-tk-len") \
+ EM(rxkad_abort_resp_tkt_realm, "rxkad-resp-tk-realm") \
+ EM(rxkad_abort_resp_tkt_short, "rxkad-resp-tk-short") \
+ EM(rxkad_abort_resp_tkt_sinst, "rxkad-resp-tk-sinst") \
+ EM(rxkad_abort_resp_tkt_sname, "rxkad-resp-tk-sname") \
+ EM(rxkad_abort_resp_unknown_tkt, "rxkad-resp-unknown-tkt") \
+ EM(rxkad_abort_resp_version, "rxkad-resp-version") \
+ /* rxrpc errors */ \
+ EM(rxrpc_abort_call_improper_term, "call-improper-term") \
+ EM(rxrpc_abort_call_reset, "call-reset") \
+ EM(rxrpc_abort_call_sendmsg, "call-sendmsg") \
+ EM(rxrpc_abort_call_sock_release, "call-sock-rel") \
+ EM(rxrpc_abort_call_sock_release_tba, "call-sock-rel-tba") \
+ EM(rxrpc_abort_call_timeout, "call-timeout") \
+ EM(rxrpc_abort_no_service_key, "no-serv-key") \
+ EM(rxrpc_abort_nomem, "nomem") \
+ EM(rxrpc_abort_service_not_offered, "serv-not-offered") \
+ EM(rxrpc_abort_shut_down, "shut-down") \
+ EM(rxrpc_abort_unsupported_security, "unsup-sec") \
+ EM(rxrpc_badmsg_bad_abort, "bad-abort") \
+ EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \
+ EM(rxrpc_badmsg_short_ack, "short-ack") \
+ EM(rxrpc_badmsg_short_ack_trailer, "short-ack-trailer") \
+ EM(rxrpc_badmsg_short_hdr, "short-hdr") \
+ EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \
+ EM(rxrpc_badmsg_zero_call, "zero-call") \
+ EM(rxrpc_badmsg_zero_seq, "zero-seq") \
+ EM(rxrpc_badmsg_zero_service, "zero-service") \
+ EM(rxrpc_eproto_ackr_outside_window, "ackr-out-win") \
+ EM(rxrpc_eproto_ackr_sack_overflow, "ackr-sack-over") \
+ EM(rxrpc_eproto_ackr_short_sack, "ackr-short-sack") \
+ EM(rxrpc_eproto_ackr_zero, "ackr-zero") \
+ EM(rxrpc_eproto_bad_upgrade, "bad-upgrade") \
+ EM(rxrpc_eproto_data_after_last, "data-after-last") \
+ EM(rxrpc_eproto_different_last, "diff-last") \
+ EM(rxrpc_eproto_early_reply, "early-reply") \
+ EM(rxrpc_eproto_improper_term, "improper-term") \
+ EM(rxrpc_eproto_no_client_call, "no-cl-call") \
+ EM(rxrpc_eproto_no_client_conn, "no-cl-conn") \
+ EM(rxrpc_eproto_no_service_call, "no-sv-call") \
+ EM(rxrpc_eproto_reupgrade, "re-upgrade") \
+ EM(rxrpc_eproto_rxnull_challenge, "rxnull-chall") \
+ EM(rxrpc_eproto_rxnull_response, "rxnull-resp") \
+ EM(rxrpc_eproto_tx_rot_last, "tx-rot-last") \
+ EM(rxrpc_eproto_unexpected_ack, "unex-ack") \
+ EM(rxrpc_eproto_unexpected_ackall, "unex-ackall") \
+ EM(rxrpc_eproto_unexpected_implicit_end, "unex-impl-end") \
+ EM(rxrpc_eproto_unexpected_reply, "unex-reply") \
+ EM(rxrpc_eproto_wrong_security, "wrong-sec") \
+ EM(rxrpc_recvmsg_excess_data, "recvmsg-excess") \
+ EM(rxrpc_recvmsg_short_data, "recvmsg-short") \
+ E_(rxrpc_sendmsg_late_send, "sendmsg-late")
+
+#define rxrpc_call_poke_traces \
+ EM(rxrpc_call_poke_abort, "Abort") \
+ EM(rxrpc_call_poke_complete, "Compl") \
+ EM(rxrpc_call_poke_error, "Error") \
+ EM(rxrpc_call_poke_idle, "Idle") \
+ EM(rxrpc_call_poke_set_timeout, "Set-timo") \
+ EM(rxrpc_call_poke_start, "Start") \
+ EM(rxrpc_call_poke_timer, "Timer") \
+ E_(rxrpc_call_poke_timer_now, "Timer-now")
+
#define rxrpc_skb_traces \
- EM(rxrpc_skb_cleaned, "CLN") \
- EM(rxrpc_skb_freed, "FRE") \
- EM(rxrpc_skb_got, "GOT") \
- EM(rxrpc_skb_lost, "*L*") \
- EM(rxrpc_skb_new, "NEW") \
- EM(rxrpc_skb_purged, "PUR") \
- EM(rxrpc_skb_received, "RCV") \
- EM(rxrpc_skb_rotated, "ROT") \
- EM(rxrpc_skb_seen, "SEE") \
- EM(rxrpc_skb_unshared, "UNS") \
- E_(rxrpc_skb_unshared_nomem, "US0")
+ EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
+ EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
+ EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
+ EM(rxrpc_skb_get_conn_work, "GET conn-work") \
+ EM(rxrpc_skb_get_last_nack, "GET last-nack") \
+ EM(rxrpc_skb_get_local_work, "GET locl-work") \
+ EM(rxrpc_skb_get_reject_work, "GET rej-work ") \
+ EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \
+ EM(rxrpc_skb_get_to_recvmsg_oos, "GET to-recv-o") \
+ EM(rxrpc_skb_new_encap_rcv, "NEW encap-rcv") \
+ EM(rxrpc_skb_new_error_report, "NEW error-rpt") \
+ EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \
+ EM(rxrpc_skb_new_unshared, "NEW unshared ") \
+ EM(rxrpc_skb_put_conn_secured, "PUT conn-secd") \
+ EM(rxrpc_skb_put_conn_work, "PUT conn-work") \
+ EM(rxrpc_skb_put_error_report, "PUT error-rep") \
+ EM(rxrpc_skb_put_input, "PUT input ") \
+ EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
+ EM(rxrpc_skb_put_last_nack, "PUT last-nack") \
+ EM(rxrpc_skb_put_purge, "PUT purge ") \
+ EM(rxrpc_skb_put_rotate, "PUT rotate ") \
+ EM(rxrpc_skb_put_unknown, "PUT unknown ") \
+ EM(rxrpc_skb_see_conn_work, "SEE conn-work") \
+ EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_skb_see_reject, "SEE reject ") \
+ EM(rxrpc_skb_see_rotate, "SEE rotate ") \
+ E_(rxrpc_skb_see_version, "SEE version ")
#define rxrpc_local_traces \
- EM(rxrpc_local_got, "GOT") \
- EM(rxrpc_local_new, "NEW") \
- EM(rxrpc_local_processing, "PRO") \
- EM(rxrpc_local_put, "PUT") \
- E_(rxrpc_local_queued, "QUE")
+ EM(rxrpc_local_free, "FREE ") \
+ EM(rxrpc_local_get_call, "GET call ") \
+ EM(rxrpc_local_get_client_conn, "GET conn-cln") \
+ EM(rxrpc_local_get_for_use, "GET for-use ") \
+ EM(rxrpc_local_get_peer, "GET peer ") \
+ EM(rxrpc_local_get_prealloc_conn, "GET conn-pre") \
+ EM(rxrpc_local_new, "NEW ") \
+ EM(rxrpc_local_put_bind, "PUT bind ") \
+ EM(rxrpc_local_put_call, "PUT call ") \
+ EM(rxrpc_local_put_for_use, "PUT for-use ") \
+ EM(rxrpc_local_put_kill_conn, "PUT conn-kil") \
+ EM(rxrpc_local_put_peer, "PUT peer ") \
+ EM(rxrpc_local_put_prealloc_peer, "PUT peer-pre") \
+ EM(rxrpc_local_put_release_sock, "PUT rel-sock") \
+ EM(rxrpc_local_stop, "STOP ") \
+ EM(rxrpc_local_stopped, "STOPPED ") \
+ EM(rxrpc_local_unuse_bind, "UNU bind ") \
+ EM(rxrpc_local_unuse_conn_work, "UNU conn-wrk") \
+ EM(rxrpc_local_unuse_peer_keepalive, "UNU peer-kpa") \
+ EM(rxrpc_local_unuse_release_sock, "UNU rel-sock") \
+ EM(rxrpc_local_use_conn_work, "USE conn-wrk") \
+ EM(rxrpc_local_use_lookup, "USE lookup ") \
+ E_(rxrpc_local_use_peer_keepalive, "USE peer-kpa")
#define rxrpc_peer_traces \
- EM(rxrpc_peer_got, "GOT") \
- EM(rxrpc_peer_new, "NEW") \
- EM(rxrpc_peer_processing, "PRO") \
- E_(rxrpc_peer_put, "PUT")
+ EM(rxrpc_peer_free, "FREE ") \
+ EM(rxrpc_peer_get_accept, "GET accept ") \
+ EM(rxrpc_peer_get_application, "GET app ") \
+ EM(rxrpc_peer_get_bundle, "GET bundle ") \
+ EM(rxrpc_peer_get_call, "GET call ") \
+ EM(rxrpc_peer_get_client_conn, "GET cln-conn") \
+ EM(rxrpc_peer_get_input, "GET input ") \
+ EM(rxrpc_peer_get_input_error, "GET inpt-err") \
+ EM(rxrpc_peer_get_keepalive, "GET keepaliv") \
+ EM(rxrpc_peer_get_lookup_client, "GET look-cln") \
+ EM(rxrpc_peer_get_service_conn, "GET srv-conn") \
+ EM(rxrpc_peer_new_client, "NEW client ") \
+ EM(rxrpc_peer_new_prealloc, "NEW prealloc") \
+ EM(rxrpc_peer_put_application, "PUT app ") \
+ EM(rxrpc_peer_put_bundle, "PUT bundle ") \
+ EM(rxrpc_peer_put_call, "PUT call ") \
+ EM(rxrpc_peer_put_conn, "PUT conn ") \
+ EM(rxrpc_peer_put_input, "PUT input ") \
+ EM(rxrpc_peer_put_input_error, "PUT inpt-err") \
+ E_(rxrpc_peer_put_keepalive, "PUT keepaliv")
+
+#define rxrpc_bundle_traces \
+ EM(rxrpc_bundle_free, "FREE ") \
+ EM(rxrpc_bundle_get_client_call, "GET clt-call") \
+ EM(rxrpc_bundle_get_client_conn, "GET clt-conn") \
+ EM(rxrpc_bundle_get_service_conn, "GET svc-conn") \
+ EM(rxrpc_bundle_put_call, "PUT call ") \
+ EM(rxrpc_bundle_put_conn, "PUT conn ") \
+ EM(rxrpc_bundle_put_discard, "PUT discard ") \
+ E_(rxrpc_bundle_new, "NEW ")
#define rxrpc_conn_traces \
- EM(rxrpc_conn_got, "GOT") \
- EM(rxrpc_conn_new_client, "NWc") \
- EM(rxrpc_conn_new_service, "NWs") \
- EM(rxrpc_conn_put_client, "PTc") \
- EM(rxrpc_conn_put_service, "PTs") \
- EM(rxrpc_conn_queued, "QUE") \
- EM(rxrpc_conn_reap_service, "RPs") \
- E_(rxrpc_conn_seen, "SEE")
+ EM(rxrpc_conn_free, "FREE ") \
+ EM(rxrpc_conn_get_activate_call, "GET act-call") \
+ EM(rxrpc_conn_get_call_input, "GET inp-call") \
+ EM(rxrpc_conn_get_conn_input, "GET inp-conn") \
+ EM(rxrpc_conn_get_idle, "GET idle ") \
+ EM(rxrpc_conn_get_poke_abort, "GET pk-abort") \
+ EM(rxrpc_conn_get_poke_timer, "GET poke ") \
+ EM(rxrpc_conn_get_service_conn, "GET svc-conn") \
+ EM(rxrpc_conn_new_client, "NEW client ") \
+ EM(rxrpc_conn_new_service, "NEW service ") \
+ EM(rxrpc_conn_put_call, "PUT call ") \
+ EM(rxrpc_conn_put_call_input, "PUT inp-call") \
+ EM(rxrpc_conn_put_conn_input, "PUT inp-conn") \
+ EM(rxrpc_conn_put_discard_idle, "PUT disc-idl") \
+ EM(rxrpc_conn_put_local_dead, "PUT loc-dead") \
+ EM(rxrpc_conn_put_noreuse, "PUT noreuse ") \
+ EM(rxrpc_conn_put_poke, "PUT poke ") \
+ EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \
+ EM(rxrpc_conn_put_unbundle, "PUT unbundle") \
+ EM(rxrpc_conn_put_unidle, "PUT unidle ") \
+ EM(rxrpc_conn_put_work, "PUT work ") \
+ EM(rxrpc_conn_queue_challenge, "QUE chall ") \
+ EM(rxrpc_conn_queue_retry_work, "QUE retry-wk") \
+ EM(rxrpc_conn_queue_rx_work, "QUE rx-work ") \
+ EM(rxrpc_conn_see_new_service_conn, "SEE new-svc ") \
+ EM(rxrpc_conn_see_reap_service, "SEE reap-svc") \
+ E_(rxrpc_conn_see_work, "SEE work ")
#define rxrpc_client_traces \
EM(rxrpc_client_activate_chans, "Activa") \
@@ -271,55 +245,59 @@ enum rxrpc_tx_point {
EM(rxrpc_client_chan_activate, "ChActv") \
EM(rxrpc_client_chan_disconnect, "ChDisc") \
EM(rxrpc_client_chan_pass, "ChPass") \
- EM(rxrpc_client_chan_unstarted, "ChUnst") \
- EM(rxrpc_client_chan_wait_failed, "ChWtFl") \
EM(rxrpc_client_cleanup, "Clean ") \
- EM(rxrpc_client_count, "Count ") \
EM(rxrpc_client_discard, "Discar") \
- EM(rxrpc_client_duplicate, "Duplic") \
EM(rxrpc_client_exposed, "Expose") \
EM(rxrpc_client_replace, "Replac") \
+ EM(rxrpc_client_queue_new_call, "Q-Call") \
EM(rxrpc_client_to_active, "->Actv") \
- EM(rxrpc_client_to_culled, "->Cull") \
- EM(rxrpc_client_to_idle, "->Idle") \
- EM(rxrpc_client_to_inactive, "->Inac") \
- EM(rxrpc_client_to_upgrade, "->Upgd") \
- EM(rxrpc_client_to_waiting, "->Wait") \
- E_(rxrpc_client_uncount, "Uncoun")
-
-#define rxrpc_conn_cache_states \
- EM(RXRPC_CONN_CLIENT_INACTIVE, "Inac") \
- EM(RXRPC_CONN_CLIENT_WAITING, "Wait") \
- EM(RXRPC_CONN_CLIENT_ACTIVE, "Actv") \
- EM(RXRPC_CONN_CLIENT_UPGRADE, "Upgd") \
- EM(RXRPC_CONN_CLIENT_CULLED, "Cull") \
- E_(RXRPC_CONN_CLIENT_IDLE, "Idle") \
+ E_(rxrpc_client_to_idle, "->Idle")
#define rxrpc_call_traces \
- EM(rxrpc_call_connected, "CON") \
- EM(rxrpc_call_error, "*E*") \
- EM(rxrpc_call_got, "GOT") \
- EM(rxrpc_call_got_kernel, "Gke") \
- EM(rxrpc_call_got_userid, "Gus") \
- EM(rxrpc_call_new_client, "NWc") \
- EM(rxrpc_call_new_service, "NWs") \
- EM(rxrpc_call_put, "PUT") \
- EM(rxrpc_call_put_kernel, "Pke") \
- EM(rxrpc_call_put_noqueue, "PNQ") \
- EM(rxrpc_call_put_userid, "Pus") \
- EM(rxrpc_call_queued, "QUE") \
- EM(rxrpc_call_queued_ref, "QUR") \
- EM(rxrpc_call_release, "RLS") \
- E_(rxrpc_call_seen, "SEE")
-
-#define rxrpc_transmit_traces \
- EM(rxrpc_transmit_await_reply, "AWR") \
- EM(rxrpc_transmit_end, "END") \
- EM(rxrpc_transmit_queue, "QUE") \
- EM(rxrpc_transmit_queue_last, "QLS") \
- EM(rxrpc_transmit_rotate, "ROT") \
- EM(rxrpc_transmit_rotate_last, "RLS") \
- E_(rxrpc_transmit_wait, "WAI")
+ EM(rxrpc_call_get_io_thread, "GET iothread") \
+ EM(rxrpc_call_get_input, "GET input ") \
+ EM(rxrpc_call_get_kernel_service, "GET krnl-srv") \
+ EM(rxrpc_call_get_notify_socket, "GET notify ") \
+ EM(rxrpc_call_get_poke, "GET poke ") \
+ EM(rxrpc_call_get_recvmsg, "GET recvmsg ") \
+ EM(rxrpc_call_get_release_sock, "GET rel-sock") \
+ EM(rxrpc_call_get_sendmsg, "GET sendmsg ") \
+ EM(rxrpc_call_get_userid, "GET user-id ") \
+ EM(rxrpc_call_new_client, "NEW client ") \
+ EM(rxrpc_call_new_prealloc_service, "NEW prealloc") \
+ EM(rxrpc_call_put_discard_prealloc, "PUT disc-pre") \
+ EM(rxrpc_call_put_discard_error, "PUT disc-err") \
+ EM(rxrpc_call_put_io_thread, "PUT iothread") \
+ EM(rxrpc_call_put_input, "PUT input ") \
+ EM(rxrpc_call_put_kernel, "PUT kernel ") \
+ EM(rxrpc_call_put_poke, "PUT poke ") \
+ EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \
+ EM(rxrpc_call_put_release_sock, "PUT rls-sock") \
+ EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \
+ EM(rxrpc_call_put_sendmsg, "PUT sendmsg ") \
+ EM(rxrpc_call_put_unnotify, "PUT unnotify") \
+ EM(rxrpc_call_put_userid_exists, "PUT u-exists") \
+ EM(rxrpc_call_put_userid, "PUT user-id ") \
+ EM(rxrpc_call_see_accept, "SEE accept ") \
+ EM(rxrpc_call_see_activate_client, "SEE act-clnt") \
+ EM(rxrpc_call_see_connect_failed, "SEE con-fail") \
+ EM(rxrpc_call_see_connected, "SEE connect ") \
+ EM(rxrpc_call_see_disconnected, "SEE disconn ") \
+ EM(rxrpc_call_see_distribute_error, "SEE dist-err") \
+ EM(rxrpc_call_see_input, "SEE input ") \
+ EM(rxrpc_call_see_release, "SEE release ") \
+ EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
+ E_(rxrpc_call_see_zap, "SEE zap ")
+
+#define rxrpc_txqueue_traces \
+ EM(rxrpc_txqueue_await_reply, "AWR") \
+ EM(rxrpc_txqueue_dequeue, "DEQ") \
+ EM(rxrpc_txqueue_end, "END") \
+ EM(rxrpc_txqueue_queue, "QUE") \
+ EM(rxrpc_txqueue_queue_last, "QLS") \
+ EM(rxrpc_txqueue_rotate, "ROT") \
+ EM(rxrpc_txqueue_rotate_last, "RLS") \
+ E_(rxrpc_txqueue_wait, "WAI")
#define rxrpc_receive_traces \
EM(rxrpc_receive_end, "END") \
@@ -327,7 +305,12 @@ enum rxrpc_tx_point {
EM(rxrpc_receive_incoming, "INC") \
EM(rxrpc_receive_queue, "QUE") \
EM(rxrpc_receive_queue_last, "QLS") \
- E_(rxrpc_receive_rotate, "ROT")
+ EM(rxrpc_receive_queue_oos, "QUO") \
+ EM(rxrpc_receive_queue_oos_last, "QOL") \
+ EM(rxrpc_receive_oos, "OOS") \
+ EM(rxrpc_receive_oos_last, "OSL") \
+ EM(rxrpc_receive_rotate, "ROT") \
+ E_(rxrpc_receive_rotate_last, "RLS")
#define rxrpc_recvmsg_traces \
EM(rxrpc_recvmsg_cont, "CONT") \
@@ -341,6 +324,7 @@ enum rxrpc_tx_point {
EM(rxrpc_recvmsg_return, "RETN") \
EM(rxrpc_recvmsg_terminal, "TERM") \
EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \
+ EM(rxrpc_recvmsg_unqueue, "UNQU") \
E_(rxrpc_recvmsg_wait, "WAIT")
#define rxrpc_rtt_tx_traces \
@@ -350,56 +334,44 @@ enum rxrpc_tx_point {
E_(rxrpc_rtt_tx_ping, "PING")
#define rxrpc_rtt_rx_traces \
- EM(rxrpc_rtt_rx_cancel, "CNCL") \
+ EM(rxrpc_rtt_rx_other_ack, "OACK") \
EM(rxrpc_rtt_rx_obsolete, "OBSL") \
EM(rxrpc_rtt_rx_lost, "LOST") \
EM(rxrpc_rtt_rx_ping_response, "PONG") \
E_(rxrpc_rtt_rx_requested_ack, "RACK")
#define rxrpc_timer_traces \
- EM(rxrpc_timer_begin, "Begin ") \
- EM(rxrpc_timer_expired, "*EXPR*") \
- EM(rxrpc_timer_exp_ack, "ExpAck") \
- EM(rxrpc_timer_exp_hard, "ExpHrd") \
- EM(rxrpc_timer_exp_idle, "ExpIdl") \
- EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
- EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
- EM(rxrpc_timer_exp_normal, "ExpNml") \
- EM(rxrpc_timer_exp_ping, "ExpPng") \
- EM(rxrpc_timer_exp_resend, "ExpRsn") \
- EM(rxrpc_timer_init_for_reply, "IniRpl") \
- EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
- EM(rxrpc_timer_restart, "Restrt") \
- EM(rxrpc_timer_set_for_ack, "SetAck") \
- EM(rxrpc_timer_set_for_hard, "SetHrd") \
- EM(rxrpc_timer_set_for_idle, "SetIdl") \
- EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
- EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
- EM(rxrpc_timer_set_for_normal, "SetNml") \
- EM(rxrpc_timer_set_for_ping, "SetPng") \
- EM(rxrpc_timer_set_for_resend, "SetRTx") \
- E_(rxrpc_timer_set_for_send, "SetSnd")
+ EM(rxrpc_timer_trace_delayed_ack, "DelayAck ") \
+ EM(rxrpc_timer_trace_expect_rx, "ExpectRx ") \
+ EM(rxrpc_timer_trace_hard, "HardLimit") \
+ EM(rxrpc_timer_trace_idle, "IdleLimit") \
+ EM(rxrpc_timer_trace_keepalive, "KeepAlive") \
+ EM(rxrpc_timer_trace_lost_ack, "LostAck ") \
+ EM(rxrpc_timer_trace_ping, "DelayPing") \
+ EM(rxrpc_timer_trace_resend, "Resend ") \
+ EM(rxrpc_timer_trace_resend_reset, "ResendRst") \
+ E_(rxrpc_timer_trace_resend_tx, "ResendTx ")
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
+ EM(rxrpc_propose_ack_delayed_ack, "DlydAck") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
- EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
+ EM(rxrpc_propose_ack_input_data_hole, "DataInH") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
+ EM(rxrpc_propose_ack_ping_for_0_retrans, "0-Retrn") \
+ EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
+ EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \
EM(rxrpc_propose_ack_processing_op, "ProcOp ") \
EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \
EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \
EM(rxrpc_propose_ack_retry_tx, "RetryTx") \
EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \
+ EM(rxrpc_propose_ack_rx_idle, "RxIdle ") \
E_(rxrpc_propose_ack_terminal_ack, "ClTerm ")
-#define rxrpc_propose_ack_outcomes \
- EM(rxrpc_propose_ack_subsume, " Subsume") \
- EM(rxrpc_propose_ack_update, " Update") \
- E_(rxrpc_propose_ack_use, " New")
-
#define rxrpc_congest_modes \
EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \
EM(RXRPC_CALL_FAST_RETRANSMIT, "FastReTx ") \
@@ -412,6 +384,7 @@ enum rxrpc_tx_point {
EM(rxrpc_cong_new_low_nack, " NewLowN") \
EM(rxrpc_cong_no_change, " -") \
EM(rxrpc_cong_progress, " Progres") \
+ EM(rxrpc_cong_idle_reset, " IdleRes") \
EM(rxrpc_cong_retransmit_again, " ReTxAgn") \
EM(rxrpc_cong_rtt_window_end, " RttWinE") \
E_(rxrpc_cong_saw_nack, " SawNack")
@@ -447,6 +420,13 @@ enum rxrpc_tx_point {
EM(RXRPC_ACK_IDLE, "IDL") \
E_(RXRPC_ACK__INVALID, "-?-")
+#define rxrpc_sack_traces \
+ EM(rxrpc_sack_advance, "ADV") \
+ EM(rxrpc_sack_fill, "FIL") \
+ EM(rxrpc_sack_nack, "NAK") \
+ EM(rxrpc_sack_none, "---") \
+ E_(rxrpc_sack_oos, "OOS")
+
#define rxrpc_completions \
EM(RXRPC_CALL_SUCCEEDED, "Succeeded") \
EM(RXRPC_CALL_REMOTELY_ABORTED, "RemoteAbort") \
@@ -467,30 +447,102 @@ enum rxrpc_tx_point {
EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \
E_(rxrpc_tx_point_version_reply, "VerReply")
+#define rxrpc_req_ack_traces \
+ EM(rxrpc_reqack_ack_lost, "ACK-LOST ") \
+ EM(rxrpc_reqack_already_on, "ALREADY-ON") \
+ EM(rxrpc_reqack_more_rtt, "MORE-RTT ") \
+ EM(rxrpc_reqack_no_srv_last, "NO-SRVLAST") \
+ EM(rxrpc_reqack_old_rtt, "OLD-RTT ") \
+ EM(rxrpc_reqack_retrans, "RETRANS ") \
+ EM(rxrpc_reqack_slow_start, "SLOW-START") \
+ E_(rxrpc_reqack_small_txwin, "SMALL-TXWN")
+/* ---- Must update size of stat_why_req_ack[] if more are added! */
+
+#define rxrpc_txbuf_traces \
+ EM(rxrpc_txbuf_alloc_ack, "ALLOC ACK ") \
+ EM(rxrpc_txbuf_alloc_data, "ALLOC DATA ") \
+ EM(rxrpc_txbuf_free, "FREE ") \
+ EM(rxrpc_txbuf_get_buffer, "GET BUFFER ") \
+ EM(rxrpc_txbuf_get_trans, "GET TRANS ") \
+ EM(rxrpc_txbuf_get_retrans, "GET RETRANS") \
+ EM(rxrpc_txbuf_put_ack_tx, "PUT ACK TX ") \
+ EM(rxrpc_txbuf_put_cleaned, "PUT CLEANED") \
+ EM(rxrpc_txbuf_put_nomem, "PUT NOMEM ") \
+ EM(rxrpc_txbuf_put_rotated, "PUT ROTATED") \
+ EM(rxrpc_txbuf_put_send_aborted, "PUT SEND-X ") \
+ EM(rxrpc_txbuf_put_trans, "PUT TRANS ") \
+ EM(rxrpc_txbuf_see_out_of_step, "OUT-OF-STEP") \
+ EM(rxrpc_txbuf_see_send_more, "SEE SEND+ ") \
+ E_(rxrpc_txbuf_see_unacked, "SEE UNACKED")
+
+/*
+ * Generate enums for tracing information.
+ */
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum rxrpc_abort_reason { rxrpc_abort_reasons } __mode(byte);
+enum rxrpc_bundle_trace { rxrpc_bundle_traces } __mode(byte);
+enum rxrpc_call_poke_trace { rxrpc_call_poke_traces } __mode(byte);
+enum rxrpc_call_trace { rxrpc_call_traces } __mode(byte);
+enum rxrpc_client_trace { rxrpc_client_traces } __mode(byte);
+enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte);
+enum rxrpc_conn_trace { rxrpc_conn_traces } __mode(byte);
+enum rxrpc_local_trace { rxrpc_local_traces } __mode(byte);
+enum rxrpc_peer_trace { rxrpc_peer_traces } __mode(byte);
+enum rxrpc_propose_ack_outcome { rxrpc_propose_ack_outcomes } __mode(byte);
+enum rxrpc_propose_ack_trace { rxrpc_propose_ack_traces } __mode(byte);
+enum rxrpc_receive_trace { rxrpc_receive_traces } __mode(byte);
+enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte);
+enum rxrpc_req_ack_trace { rxrpc_req_ack_traces } __mode(byte);
+enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte);
+enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte);
+enum rxrpc_sack_trace { rxrpc_sack_traces } __mode(byte);
+enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte);
+enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte);
+enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte);
+enum rxrpc_txbuf_trace { rxrpc_txbuf_traces } __mode(byte);
+enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte);
+
+#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
+
/*
* Export enum symbols via userspace.
*/
#undef EM
#undef E_
+
+#ifndef RXRPC_TRACE_ONLY_DEFINE_ENUMS
+
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
-rxrpc_skb_traces;
-rxrpc_local_traces;
-rxrpc_conn_traces;
-rxrpc_client_traces;
+rxrpc_abort_reasons;
+rxrpc_bundle_traces;
+rxrpc_call_poke_traces;
rxrpc_call_traces;
-rxrpc_transmit_traces;
+rxrpc_client_traces;
+rxrpc_congest_changes;
+rxrpc_congest_modes;
+rxrpc_conn_traces;
+rxrpc_local_traces;
+rxrpc_propose_ack_traces;
rxrpc_receive_traces;
rxrpc_recvmsg_traces;
-rxrpc_rtt_tx_traces;
+rxrpc_req_ack_traces;
rxrpc_rtt_rx_traces;
+rxrpc_rtt_tx_traces;
+rxrpc_sack_traces;
+rxrpc_skb_traces;
rxrpc_timer_traces;
-rxrpc_propose_ack_traces;
-rxrpc_propose_ack_outcomes;
-rxrpc_congest_modes;
-rxrpc_congest_changes;
rxrpc_tx_points;
+rxrpc_txbuf_traces;
+rxrpc_txqueue_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -503,83 +555,98 @@ rxrpc_tx_points;
TRACE_EVENT(rxrpc_local,
TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
- int usage, const void *where),
+ int ref, int usage),
- TP_ARGS(local_debug_id, op, usage, where),
+ TP_ARGS(local_debug_id, op, ref, usage),
TP_STRUCT__entry(
- __field(unsigned int, local )
- __field(int, op )
- __field(int, usage )
- __field(const void *, where )
+ __field(unsigned int, local)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, usage)
),
TP_fast_assign(
__entry->local = local_debug_id;
__entry->op = op;
+ __entry->ref = ref;
__entry->usage = usage;
- __entry->where = where;
),
- TP_printk("L=%08x %s u=%d sp=%pSR",
+ TP_printk("L=%08x %s r=%d u=%d",
__entry->local,
__print_symbolic(__entry->op, rxrpc_local_traces),
- __entry->usage,
- __entry->where)
+ __entry->ref,
+ __entry->usage)
);
TRACE_EVENT(rxrpc_peer,
- TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op,
- int usage, const void *where),
+ TP_PROTO(unsigned int peer_debug_id, int ref, enum rxrpc_peer_trace why),
- TP_ARGS(peer_debug_id, op, usage, where),
+ TP_ARGS(peer_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, peer )
- __field(int, op )
- __field(int, usage )
- __field(const void *, where )
+ __field(unsigned int, peer)
+ __field(int, ref)
+ __field(enum rxrpc_peer_trace, why)
),
TP_fast_assign(
__entry->peer = peer_debug_id;
- __entry->op = op;
- __entry->usage = usage;
- __entry->where = where;
+ __entry->ref = ref;
+ __entry->why = why;
),
- TP_printk("P=%08x %s u=%d sp=%pSR",
+ TP_printk("P=%08x %s r=%d",
__entry->peer,
- __print_symbolic(__entry->op, rxrpc_peer_traces),
- __entry->usage,
- __entry->where)
+ __print_symbolic(__entry->why, rxrpc_peer_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(rxrpc_bundle,
+ TP_PROTO(unsigned int bundle_debug_id, int ref, enum rxrpc_bundle_trace why),
+
+ TP_ARGS(bundle_debug_id, ref, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, bundle)
+ __field(int, ref)
+ __field(int, why)
+ ),
+
+ TP_fast_assign(
+ __entry->bundle = bundle_debug_id;
+ __entry->ref = ref;
+ __entry->why = why;
+ ),
+
+ TP_printk("CB=%08x %s r=%d",
+ __entry->bundle,
+ __print_symbolic(__entry->why, rxrpc_bundle_traces),
+ __entry->ref)
);
TRACE_EVENT(rxrpc_conn,
- TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op,
- int usage, const void *where),
+ TP_PROTO(unsigned int conn_debug_id, int ref, enum rxrpc_conn_trace why),
- TP_ARGS(conn_debug_id, op, usage, where),
+ TP_ARGS(conn_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(int, op )
- __field(int, usage )
- __field(const void *, where )
+ __field(unsigned int, conn)
+ __field(int, ref)
+ __field(int, why)
),
TP_fast_assign(
__entry->conn = conn_debug_id;
- __entry->op = op;
- __entry->usage = usage;
- __entry->where = where;
+ __entry->ref = ref;
+ __entry->why = why;
),
- TP_printk("C=%08x %s u=%d sp=%pSR",
+ TP_printk("C=%08x %s r=%d",
__entry->conn,
- __print_symbolic(__entry->op, rxrpc_conn_traces),
- __entry->usage,
- __entry->where)
+ __print_symbolic(__entry->why, rxrpc_conn_traces),
+ __entry->ref)
);
TRACE_EVENT(rxrpc_client,
@@ -589,93 +656,81 @@ TRACE_EVENT(rxrpc_client,
TP_ARGS(conn, channel, op),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(u32, cid )
- __field(int, channel )
- __field(int, usage )
- __field(enum rxrpc_client_trace, op )
- __field(enum rxrpc_conn_cache_state, cs )
+ __field(unsigned int, conn)
+ __field(u32, cid)
+ __field(int, channel)
+ __field(int, usage)
+ __field(enum rxrpc_client_trace, op)
),
TP_fast_assign(
- __entry->conn = conn->debug_id;
+ __entry->conn = conn ? conn->debug_id : 0;
__entry->channel = channel;
- __entry->usage = atomic_read(&conn->usage);
+ __entry->usage = conn ? refcount_read(&conn->ref) : -2;
__entry->op = op;
- __entry->cid = conn->proto.cid;
- __entry->cs = conn->cache_state;
+ __entry->cid = conn ? conn->proto.cid : 0;
),
- TP_printk("C=%08x h=%2d %s %s i=%08x u=%d",
+ TP_printk("C=%08x h=%2d %s i=%08x u=%d",
__entry->conn,
__entry->channel,
__print_symbolic(__entry->op, rxrpc_client_traces),
- __print_symbolic(__entry->cs, rxrpc_conn_cache_states),
__entry->cid,
__entry->usage)
);
TRACE_EVENT(rxrpc_call,
- TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op,
- int usage, const void *where, const void *aux),
+ TP_PROTO(unsigned int call_debug_id, int ref, unsigned long aux,
+ enum rxrpc_call_trace why),
- TP_ARGS(call_debug_id, op, usage, where, aux),
+ TP_ARGS(call_debug_id, ref, aux, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, op )
- __field(int, usage )
- __field(const void *, where )
- __field(const void *, aux )
+ __field(unsigned int, call)
+ __field(int, ref)
+ __field(int, why)
+ __field(unsigned long, aux)
),
TP_fast_assign(
__entry->call = call_debug_id;
- __entry->op = op;
- __entry->usage = usage;
- __entry->where = where;
+ __entry->ref = ref;
+ __entry->why = why;
__entry->aux = aux;
),
- TP_printk("c=%08x %s u=%d sp=%pSR a=%p",
+ TP_printk("c=%08x %s r=%d a=%lx",
__entry->call,
- __print_symbolic(__entry->op, rxrpc_call_traces),
- __entry->usage,
- __entry->where,
+ __print_symbolic(__entry->why, rxrpc_call_traces),
+ __entry->ref,
__entry->aux)
);
TRACE_EVENT(rxrpc_skb,
- TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
- int usage, int mod_count, u8 flags, const void *where),
+ TP_PROTO(struct sk_buff *skb, int usage, int mod_count,
+ enum rxrpc_skb_trace why),
- TP_ARGS(skb, op, usage, mod_count, flags, where),
+ TP_ARGS(skb, usage, mod_count, why),
TP_STRUCT__entry(
- __field(struct sk_buff *, skb )
- __field(enum rxrpc_skb_trace, op )
- __field(u8, flags )
- __field(int, usage )
- __field(int, mod_count )
- __field(const void *, where )
+ __field(struct sk_buff *, skb)
+ __field(int, usage)
+ __field(int, mod_count)
+ __field(enum rxrpc_skb_trace, why)
),
TP_fast_assign(
__entry->skb = skb;
- __entry->flags = flags;
- __entry->op = op;
__entry->usage = usage;
__entry->mod_count = mod_count;
- __entry->where = where;
+ __entry->why = why;
),
- TP_printk("s=%p %cx %s u=%d m=%d p=%pSR",
+ TP_printk("s=%p Rx %s u=%d m=%d",
__entry->skb,
- __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R',
- __print_symbolic(__entry->op, rxrpc_skb_traces),
+ __print_symbolic(__entry->why, rxrpc_skb_traces),
__entry->usage,
- __entry->mod_count,
- __entry->where)
+ __entry->mod_count)
);
TRACE_EVENT(rxrpc_rx_packet,
@@ -684,7 +739,7 @@ TRACE_EVENT(rxrpc_rx_packet,
TP_ARGS(sp),
TP_STRUCT__entry(
- __field_struct(struct rxrpc_host_header, hdr )
+ __field_struct(struct rxrpc_host_header, hdr)
),
TP_fast_assign(
@@ -695,9 +750,8 @@ TRACE_EVENT(rxrpc_rx_packet,
__entry->hdr.epoch, __entry->hdr.cid,
__entry->hdr.callNumber, __entry->hdr.serviceId,
__entry->hdr.serial, __entry->hdr.seq,
- __entry->hdr.type, __entry->hdr.flags,
- __entry->hdr.type <= 15 ?
- __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK")
+ __entry->hdr.securityIndex, __entry->hdr.flags,
+ __print_symbolic(__entry->hdr.type, rxrpc_pkts))
);
TRACE_EVENT(rxrpc_rx_done,
@@ -706,8 +760,8 @@ TRACE_EVENT(rxrpc_rx_done,
TP_ARGS(result, abort_code),
TP_STRUCT__entry(
- __field(int, result )
- __field(int, abort_code )
+ __field(int, result)
+ __field(int, abort_code)
),
TP_fast_assign(
@@ -719,24 +773,24 @@ TRACE_EVENT(rxrpc_rx_done,
);
TRACE_EVENT(rxrpc_abort,
- TP_PROTO(unsigned int call_nr, const char *why, u32 cid, u32 call_id,
- rxrpc_seq_t seq, int abort_code, int error),
+ TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why,
+ u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error),
TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
TP_STRUCT__entry(
- __field(unsigned int, call_nr )
- __array(char, why, 4 )
- __field(u32, cid )
- __field(u32, call_id )
- __field(rxrpc_seq_t, seq )
- __field(int, abort_code )
- __field(int, error )
+ __field(unsigned int, call_nr)
+ __field(enum rxrpc_abort_reason, why)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(rxrpc_seq_t, seq)
+ __field(int, abort_code)
+ __field(int, error)
),
TP_fast_assign(
- memcpy(__entry->why, why, 4);
__entry->call_nr = call_nr;
+ __entry->why = why;
__entry->cid = cid;
__entry->call_id = call_id;
__entry->abort_code = abort_code;
@@ -747,7 +801,8 @@ TRACE_EVENT(rxrpc_abort,
TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s",
__entry->call_nr,
__entry->cid, __entry->call_id, __entry->seq,
- __entry->abort_code, __entry->error, __entry->why)
+ __entry->abort_code, __entry->error,
+ __print_symbolic(__entry->why, rxrpc_abort_reasons))
);
TRACE_EVENT(rxrpc_call_complete,
@@ -756,10 +811,10 @@ TRACE_EVENT(rxrpc_call_complete,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_call_completion, compl )
- __field(int, error )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(enum rxrpc_call_completion, compl)
+ __field(int, error)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -776,47 +831,53 @@ TRACE_EVENT(rxrpc_call_complete,
__entry->abort_code)
);
-TRACE_EVENT(rxrpc_transmit,
- TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
+TRACE_EVENT(rxrpc_txqueue,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_txqueue_trace why),
TP_ARGS(call, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_transmit_trace, why )
- __field(rxrpc_seq_t, tx_hard_ack )
- __field(rxrpc_seq_t, tx_top )
- __field(int, tx_winsize )
+ __field(unsigned int, call)
+ __field(enum rxrpc_txqueue_trace, why)
+ __field(rxrpc_seq_t, acks_hard_ack)
+ __field(rxrpc_seq_t, tx_bottom)
+ __field(rxrpc_seq_t, tx_top)
+ __field(rxrpc_seq_t, tx_prepared)
+ __field(int, tx_winsize)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
- __entry->tx_hard_ack = call->tx_hard_ack;
+ __entry->acks_hard_ack = call->acks_hard_ack;
+ __entry->tx_bottom = call->tx_bottom;
__entry->tx_top = call->tx_top;
+ __entry->tx_prepared = call->tx_prepared;
__entry->tx_winsize = call->tx_winsize;
),
- TP_printk("c=%08x %s f=%08x n=%u/%u",
+ TP_printk("c=%08x %s f=%08x h=%08x n=%u/%u/%u/%u",
__entry->call,
- __print_symbolic(__entry->why, rxrpc_transmit_traces),
- __entry->tx_hard_ack + 1,
- __entry->tx_top - __entry->tx_hard_ack,
+ __print_symbolic(__entry->why, rxrpc_txqueue_traces),
+ __entry->tx_bottom,
+ __entry->acks_hard_ack,
+ __entry->tx_top - __entry->tx_bottom,
+ __entry->tx_top - __entry->acks_hard_ack,
+ __entry->tx_prepared - __entry->tx_bottom,
__entry->tx_winsize)
);
TRACE_EVENT(rxrpc_rx_data,
TP_PROTO(unsigned int call, rxrpc_seq_t seq,
- rxrpc_serial_t serial, u8 flags, u8 anno),
+ rxrpc_serial_t serial, u8 flags),
- TP_ARGS(call, seq, serial, flags, anno),
+ TP_ARGS(call, seq, serial, flags),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_serial_t, serial )
- __field(u8, flags )
- __field(u8, anno )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, flags)
),
TP_fast_assign(
@@ -824,15 +885,13 @@ TRACE_EVENT(rxrpc_rx_data,
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
- __entry->anno = anno;
),
- TP_printk("c=%08x DATA %08x q=%08x fl=%02x a=%02x",
+ TP_printk("c=%08x DATA %08x q=%08x fl=%02x",
__entry->call,
__entry->serial,
__entry->seq,
- __entry->flags,
- __entry->anno)
+ __entry->flags)
);
TRACE_EVENT(rxrpc_rx_ack,
@@ -843,13 +902,13 @@ TRACE_EVENT(rxrpc_rx_ack,
TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_serial_t, ack_serial )
- __field(rxrpc_seq_t, first )
- __field(rxrpc_seq_t, prev )
- __field(u8, reason )
- __field(u8, n_acks )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, first)
+ __field(rxrpc_seq_t, prev)
+ __field(u8, reason)
+ __field(u8, n_acks)
),
TP_fast_assign(
@@ -879,9 +938,9 @@ TRACE_EVENT(rxrpc_rx_abort,
TP_ARGS(call, serial, abort_code),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -896,6 +955,66 @@ TRACE_EVENT(rxrpc_rx_abort,
__entry->abort_code)
);
+TRACE_EVENT(rxrpc_rx_challenge,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ u32 version, u32 nonce, u32 min_level),
+
+ TP_ARGS(conn, serial, version, nonce, min_level),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, nonce)
+ __field(u32, min_level)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = serial;
+ __entry->version = version;
+ __entry->nonce = nonce;
+ __entry->min_level = min_level;
+ ),
+
+ TP_printk("C=%08x CHALLENGE %08x v=%x n=%x ml=%x",
+ __entry->conn,
+ __entry->serial,
+ __entry->version,
+ __entry->nonce,
+ __entry->min_level)
+ );
+
+TRACE_EVENT(rxrpc_rx_response,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ u32 version, u32 kvno, u32 ticket_len),
+
+ TP_ARGS(conn, serial, version, kvno, ticket_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, kvno)
+ __field(u32, ticket_len)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = serial;
+ __entry->version = version;
+ __entry->kvno = kvno;
+ __entry->ticket_len = ticket_len;
+ ),
+
+ TP_printk("C=%08x RESPONSE %08x v=%x kvno=%x tl=%x",
+ __entry->conn,
+ __entry->serial,
+ __entry->version,
+ __entry->kvno,
+ __entry->ticket_len)
+ );
+
TRACE_EVENT(rxrpc_rx_rwind_change,
TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
u32 rwind, bool wake),
@@ -903,10 +1022,10 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
TP_ARGS(call, serial, rwind, wake),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(u32, rwind )
- __field(bool, wake )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, rwind)
+ __field(bool, wake)
),
TP_fast_assign(
@@ -930,9 +1049,9 @@ TRACE_EVENT(rxrpc_tx_packet,
TP_ARGS(call_id, whdr, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_tx_point, where )
- __field_struct(struct rxrpc_wire_header, whdr )
+ __field(unsigned int, call)
+ __field(enum rxrpc_tx_point, where)
+ __field_struct(struct rxrpc_wire_header, whdr)
),
TP_fast_assign(
@@ -957,19 +1076,18 @@ TRACE_EVENT(rxrpc_tx_packet,
TRACE_EVENT(rxrpc_tx_data,
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
- rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
+ rxrpc_serial_t serial, unsigned int flags, bool lose),
- TP_ARGS(call, seq, serial, flags, retrans, lose),
+ TP_ARGS(call, seq, serial, flags, lose),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_serial_t, serial )
- __field(u32, cid )
- __field(u32, call_id )
- __field(u8, flags )
- __field(bool, retrans )
- __field(bool, lose )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(u16, flags)
+ __field(bool, lose)
),
TP_fast_assign(
@@ -979,7 +1097,6 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
- __entry->retrans = retrans;
__entry->lose = lose;
),
@@ -989,25 +1106,26 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->call_id,
__entry->serial,
__entry->seq,
- __entry->flags,
- __entry->retrans ? " *RETRANS*" : "",
+ __entry->flags & RXRPC_TXBUF_WIRE_FLAGS,
+ __entry->flags & RXRPC_TXBUF_RESENT ? " *RETRANS*" : "",
__entry->lose ? " *LOSE*" : "")
);
TRACE_EVENT(rxrpc_tx_ack,
TP_PROTO(unsigned int call, rxrpc_serial_t serial,
rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
- u8 reason, u8 n_acks),
+ u8 reason, u8 n_acks, u16 rwind),
- TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_seq_t, ack_first )
- __field(rxrpc_serial_t, ack_serial )
- __field(u8, reason )
- __field(u8, n_acks )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, ack_first)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(u8, reason)
+ __field(u8, n_acks)
+ __field(u16, rwind)
),
TP_fast_assign(
@@ -1017,15 +1135,17 @@ TRACE_EVENT(rxrpc_tx_ack,
__entry->ack_serial = ack_serial;
__entry->reason = reason;
__entry->n_acks = n_acks;
+ __entry->rwind = rwind;
),
- TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u",
+ TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
__entry->ack_first,
__entry->ack_serial,
- __entry->n_acks)
+ __entry->n_acks,
+ __entry->rwind)
);
TRACE_EVENT(rxrpc_receive,
@@ -1035,12 +1155,12 @@ TRACE_EVENT(rxrpc_receive,
TP_ARGS(call, why, serial, seq),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_receive_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_seq_t, hard_ack )
- __field(rxrpc_seq_t, top )
+ __field(unsigned int, call)
+ __field(enum rxrpc_receive_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, window)
+ __field(rxrpc_seq_t, wtop)
),
TP_fast_assign(
@@ -1048,8 +1168,8 @@ TRACE_EVENT(rxrpc_receive,
__entry->why = why;
__entry->serial = serial;
__entry->seq = seq;
- __entry->hard_ack = call->rx_hard_ack;
- __entry->top = call->rx_top;
+ __entry->window = call->ackr_window;
+ __entry->wtop = call->ackr_wtop;
),
TP_printk("c=%08x %s r=%08x q=%08x w=%08x-%08x",
@@ -1057,11 +1177,35 @@ TRACE_EVENT(rxrpc_receive,
__print_symbolic(__entry->why, rxrpc_receive_traces),
__entry->serial,
__entry->seq,
- __entry->hard_ack,
- __entry->top)
+ __entry->window,
+ __entry->wtop)
);
TRACE_EVENT(rxrpc_recvmsg,
+ TP_PROTO(unsigned int call_debug_id, enum rxrpc_recvmsg_trace why,
+ int ret),
+
+ TP_ARGS(call_debug_id, why, ret),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_recvmsg_trace, why)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call_debug_id;
+ __entry->why = why;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("c=%08x %s ret=%d",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_recvmsg_traces),
+ __entry->ret)
+ );
+
+TRACE_EVENT(rxrpc_recvdata,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
rxrpc_seq_t seq, unsigned int offset, unsigned int len,
int ret),
@@ -1069,12 +1213,12 @@ TRACE_EVENT(rxrpc_recvmsg,
TP_ARGS(call, why, seq, offset, len, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_recvmsg_trace, why )
- __field(rxrpc_seq_t, seq )
- __field(unsigned int, offset )
- __field(unsigned int, len )
- __field(int, ret )
+ __field(unsigned int, call)
+ __field(enum rxrpc_recvmsg_trace, why)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, offset)
+ __field(unsigned int, len)
+ __field(int, ret)
),
TP_fast_assign(
@@ -1102,10 +1246,10 @@ TRACE_EVENT(rxrpc_rtt_tx,
TP_ARGS(call, why, slot, send_serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_rtt_tx_trace, why )
- __field(int, slot )
- __field(rxrpc_serial_t, send_serial )
+ __field(unsigned int, call)
+ __field(enum rxrpc_rtt_tx_trace, why)
+ __field(int, slot)
+ __field(rxrpc_serial_t, send_serial)
),
TP_fast_assign(
@@ -1131,13 +1275,13 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_rtt_rx_trace, why )
- __field(int, slot )
- __field(rxrpc_serial_t, send_serial )
- __field(rxrpc_serial_t, resp_serial )
- __field(u32, rtt )
- __field(u32, rto )
+ __field(unsigned int, call)
+ __field(enum rxrpc_rtt_rx_trace, why)
+ __field(int, slot)
+ __field(rxrpc_serial_t, send_serial)
+ __field(rxrpc_serial_t, resp_serial)
+ __field(u32, rtt)
+ __field(u32, rto)
),
TP_fast_assign(
@@ -1160,49 +1304,112 @@ TRACE_EVENT(rxrpc_rtt_rx,
__entry->rto)
);
-TRACE_EVENT(rxrpc_timer,
- TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- unsigned long now),
+TRACE_EVENT(rxrpc_timer_set,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay,
+ enum rxrpc_timer_trace why),
+
+ TP_ARGS(call, delay, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->why = why;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x %s to=%lld",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_timer_traces),
+ ktime_to_us(__entry->delay))
+ );
+
+TRACE_EVENT(rxrpc_timer_exp,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay,
+ enum rxrpc_timer_trace why),
- TP_ARGS(call, why, now),
+ TP_ARGS(call, delay, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_timer_trace, why )
- __field(long, now )
- __field(long, ack_at )
- __field(long, ack_lost_at )
- __field(long, resend_at )
- __field(long, ping_at )
- __field(long, expect_rx_by )
- __field(long, expect_req_by )
- __field(long, expect_term_by )
- __field(long, timer )
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ __field(ktime_t, delay)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
- __entry->now = now;
- __entry->ack_at = call->ack_at;
- __entry->ack_lost_at = call->ack_lost_at;
- __entry->resend_at = call->resend_at;
- __entry->expect_rx_by = call->expect_rx_by;
- __entry->expect_req_by = call->expect_req_by;
- __entry->expect_term_by = call->expect_term_by;
- __entry->timer = call->timer.expires;
+ __entry->delay = delay;
),
- TP_printk("c=%08x %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
+ TP_printk("c=%08x %s to=%lld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces),
- __entry->ack_at - __entry->now,
- __entry->ack_lost_at - __entry->now,
- __entry->resend_at - __entry->now,
- __entry->expect_rx_by - __entry->now,
- __entry->expect_req_by - __entry->now,
- __entry->expect_term_by - __entry->now,
- __entry->timer - __entry->now)
+ ktime_to_us(__entry->delay))
+ );
+
+TRACE_EVENT(rxrpc_timer_can,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why),
+
+ TP_ARGS(call, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%08x %s",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_timer_traces))
+ );
+
+TRACE_EVENT(rxrpc_timer_restart,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay, unsigned long delayj),
+
+ TP_ARGS(call, delay, delayj),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(unsigned long, delayj)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->delayj = delayj;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x to=%lld j=%ld",
+ __entry->call,
+ ktime_to_us(__entry->delay),
+ __entry->delayj)
+ );
+
+TRACE_EVENT(rxrpc_timer_expired,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ ),
+
+ TP_printk("c=%08x EXPIRED",
+ __entry->call)
);
TRACE_EVENT(rxrpc_rx_lose,
@@ -1211,7 +1418,7 @@ TRACE_EVENT(rxrpc_rx_lose,
TP_ARGS(sp),
TP_STRUCT__entry(
- __field_struct(struct rxrpc_host_header, hdr )
+ __field_struct(struct rxrpc_host_header, hdr)
),
TP_fast_assign(
@@ -1229,20 +1436,15 @@ TRACE_EVENT(rxrpc_rx_lose,
TRACE_EVENT(rxrpc_propose_ack,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
- u8 ack_reason, rxrpc_serial_t serial, bool immediate,
- bool background, enum rxrpc_propose_ack_outcome outcome),
+ u8 ack_reason, rxrpc_serial_t serial),
- TP_ARGS(call, why, ack_reason, serial, immediate, background,
- outcome),
+ TP_ARGS(call, why, ack_reason, serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
- __field(bool, immediate )
- __field(bool, background )
- __field(enum rxrpc_propose_ack_outcome, outcome )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
),
TP_fast_assign(
@@ -1250,46 +1452,96 @@ TRACE_EVENT(rxrpc_propose_ack,
__entry->why = why;
__entry->serial = serial;
__entry->ack_reason = ack_reason;
- __entry->immediate = immediate;
- __entry->background = background;
- __entry->outcome = outcome;
),
- TP_printk("c=%08x %s %s r=%08x i=%u b=%u%s",
+ TP_printk("c=%08x %s %s r=%08x",
__entry->call,
__print_symbolic(__entry->why, rxrpc_propose_ack_traces),
__print_symbolic(__entry->ack_reason, rxrpc_ack_names),
- __entry->serial,
- __entry->immediate,
- __entry->background,
- __print_symbolic(__entry->outcome, rxrpc_propose_ack_outcomes))
+ __entry->serial)
+ );
+
+TRACE_EVENT(rxrpc_send_ack,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
+ u8 ack_reason, rxrpc_serial_t serial),
+
+ TP_ARGS(call, why, ack_reason, serial),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->ack_reason = ack_reason;
+ ),
+
+ TP_printk("c=%08x %s %s r=%08x",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_propose_ack_traces),
+ __print_symbolic(__entry->ack_reason, rxrpc_ack_names),
+ __entry->serial)
+ );
+
+TRACE_EVENT(rxrpc_drop_ack,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
+ u8 ack_reason, rxrpc_serial_t serial, bool nobuf),
+
+ TP_ARGS(call, why, ack_reason, serial, nobuf),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
+ __field(bool, nobuf)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->ack_reason = ack_reason;
+ __entry->nobuf = nobuf;
+ ),
+
+ TP_printk("c=%08x %s %s r=%08x nbf=%u",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_propose_ack_traces),
+ __print_symbolic(__entry->ack_reason, rxrpc_ack_names),
+ __entry->serial, __entry->nobuf)
);
TRACE_EVENT(rxrpc_retransmit,
- TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, u8 annotation,
- s64 expiry),
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ rxrpc_serial_t serial, ktime_t expiry),
- TP_ARGS(call, seq, annotation, expiry),
+ TP_ARGS(call, seq, serial, expiry),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(u8, annotation )
- __field(s64, expiry )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
+ __field(ktime_t, expiry)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->seq = seq;
- __entry->annotation = annotation;
+ __entry->serial = serial;
__entry->expiry = expiry;
),
- TP_printk("c=%08x q=%x a=%02x xp=%lld",
+ TP_printk("c=%08x q=%x r=%x xp=%lld",
__entry->call,
__entry->seq,
- __entry->annotation,
- __entry->expiry)
+ __entry->serial,
+ ktime_to_us(__entry->expiry))
);
TRACE_EVENT(rxrpc_congest,
@@ -1299,26 +1551,26 @@ TRACE_EVENT(rxrpc_congest,
TP_ARGS(call, summary, ack_serial, change),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_congest_change, change )
- __field(rxrpc_seq_t, hard_ack )
- __field(rxrpc_seq_t, top )
- __field(rxrpc_seq_t, lowest_nak )
- __field(rxrpc_serial_t, ack_serial )
- __field_struct(struct rxrpc_ack_summary, sum )
+ __field(unsigned int, call)
+ __field(enum rxrpc_congest_change, change)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(rxrpc_seq_t, top)
+ __field(rxrpc_seq_t, lowest_nak)
+ __field(rxrpc_serial_t, ack_serial)
+ __field_struct(struct rxrpc_ack_summary, sum)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->change = change;
- __entry->hard_ack = call->tx_hard_ack;
+ __entry->hard_ack = call->acks_hard_ack;
__entry->top = call->tx_top;
__entry->lowest_nak = call->acks_lowest_nak;
__entry->ack_serial = ack_serial;
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
),
- TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
__entry->ack_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
@@ -1326,9 +1578,9 @@ TRACE_EVENT(rxrpc_congest,
__print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
__entry->sum.cwnd,
__entry->sum.ssthresh,
- __entry->sum.nr_acks, __entry->sum.nr_nacks,
- __entry->sum.nr_new_acks, __entry->sum.nr_new_nacks,
- __entry->sum.nr_rot_new_acks,
+ __entry->sum.nr_acks, __entry->sum.nr_retained_nacks,
+ __entry->sum.nr_new_acks,
+ __entry->sum.nr_new_nacks,
__entry->top - __entry->hard_ack,
__entry->sum.cumulative_acks,
__entry->sum.dup_acks,
@@ -1337,34 +1589,52 @@ TRACE_EVENT(rxrpc_congest,
__entry->sum.retrans_timeo ? " rTxTo" : "")
);
-TRACE_EVENT(rxrpc_disconnect_call,
- TP_PROTO(struct rxrpc_call *call),
+TRACE_EVENT(rxrpc_reset_cwnd,
+ TP_PROTO(struct rxrpc_call *call, ktime_t now),
- TP_ARGS(call),
+ TP_ARGS(call, now),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(enum rxrpc_congest_mode, mode)
+ __field(unsigned short, cwnd)
+ __field(unsigned short, extra)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(rxrpc_seq_t, prepared)
+ __field(ktime_t, since_last_tx)
+ __field(bool, has_data)
),
TP_fast_assign(
- __entry->call = call->debug_id;
- __entry->abort_code = call->abort_code;
+ __entry->call = call->debug_id;
+ __entry->mode = call->cong_mode;
+ __entry->cwnd = call->cong_cwnd;
+ __entry->extra = call->cong_extra;
+ __entry->hard_ack = call->acks_hard_ack;
+ __entry->prepared = call->tx_prepared - call->tx_bottom;
+ __entry->since_last_tx = ktime_sub(now, call->tx_last_sent);
+ __entry->has_data = !list_empty(&call->tx_sendmsg);
),
- TP_printk("c=%08x ab=%08x",
+ TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu d=%u",
__entry->call,
- __entry->abort_code)
+ __entry->hard_ack,
+ __print_symbolic(__entry->mode, rxrpc_congest_modes),
+ __entry->cwnd,
+ __entry->extra,
+ __entry->prepared,
+ ktime_to_ns(__entry->since_last_tx),
+ __entry->has_data)
);
-TRACE_EVENT(rxrpc_improper_term,
+TRACE_EVENT(rxrpc_disconnect_call,
TP_PROTO(struct rxrpc_call *call),
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -1377,28 +1647,24 @@ TRACE_EVENT(rxrpc_improper_term,
__entry->abort_code)
);
-TRACE_EVENT(rxrpc_rx_eproto,
- TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
- const char *why),
+TRACE_EVENT(rxrpc_improper_term,
+ TP_PROTO(struct rxrpc_call *call),
- TP_ARGS(call, serial, why),
+ TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(const char *, why )
+ __field(unsigned int, call)
+ __field(u32, abort_code)
),
TP_fast_assign(
- __entry->call = call ? call->debug_id : 0;
- __entry->serial = serial;
- __entry->why = why;
+ __entry->call = call->debug_id;
+ __entry->abort_code = call->abort_code;
),
- TP_printk("c=%08x EPROTO %08x %s",
+ TP_printk("c=%08x ab=%08x",
__entry->call,
- __entry->serial,
- __entry->why)
+ __entry->abort_code)
);
TRACE_EVENT(rxrpc_connect_call,
@@ -1407,10 +1673,11 @@ TRACE_EVENT(rxrpc_connect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned long, user_call_ID )
- __field(u32, cid )
- __field(u32, call_id )
+ __field(unsigned int, call)
+ __field(unsigned long, user_call_ID)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1418,36 +1685,42 @@ TRACE_EVENT(rxrpc_connect_call,
__entry->user_call_ID = call->user_call_ID;
__entry->cid = call->cid;
__entry->call_id = call->call_id;
+ __entry->srx = call->dest_srx;
),
- TP_printk("c=%08x u=%p %08x:%08x",
+ TP_printk("c=%08x u=%p %08x:%08x dst=%pISp",
__entry->call,
(void *)__entry->user_call_ID,
__entry->cid,
- __entry->call_id)
+ __entry->call_id,
+ &__entry->srx.transport)
);
TRACE_EVENT(rxrpc_resend,
- TP_PROTO(struct rxrpc_call *call, int ix),
+ TP_PROTO(struct rxrpc_call *call, struct sk_buff *ack),
- TP_ARGS(call, ix),
+ TP_ARGS(call, ack),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, ix )
- __array(u8, anno, 64 )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, transmitted)
+ __field(rxrpc_serial_t, ack_serial)
),
TP_fast_assign(
+ struct rxrpc_skb_priv *sp = ack ? rxrpc_skb(ack) : NULL;
__entry->call = call->debug_id;
- __entry->ix = ix;
- memcpy(__entry->anno, call->rxtx_annotations, 64);
+ __entry->seq = call->acks_hard_ack;
+ __entry->transmitted = call->tx_transmitted;
+ __entry->ack_serial = sp ? sp->hdr.serial : 0;
),
- TP_printk("c=%08x ix=%u a=%64phN",
+ TP_printk("c=%08x r=%x q=%x tq=%x",
__entry->call,
- __entry->ix,
- __entry->anno)
+ __entry->ack_serial,
+ __entry->seq,
+ __entry->transmitted)
);
TRACE_EVENT(rxrpc_rx_icmp,
@@ -1457,9 +1730,9 @@ TRACE_EVENT(rxrpc_rx_icmp,
TP_ARGS(peer, ee, srx),
TP_STRUCT__entry(
- __field(unsigned int, peer )
- __field_struct(struct sock_extended_err, ee )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, peer)
+ __field_struct(struct sock_extended_err, ee)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1486,10 +1759,10 @@ TRACE_EVENT(rxrpc_tx_fail,
TP_ARGS(debug_id, serial, ret, where),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
- __field(int, ret )
- __field(enum rxrpc_tx_point, where )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
+ __field(int, ret)
+ __field(enum rxrpc_tx_point, where)
),
TP_fast_assign(
@@ -1512,13 +1785,13 @@ TRACE_EVENT(rxrpc_call_reset,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(u32, cid )
- __field(u32, call_id )
- __field(rxrpc_serial_t, call_serial )
- __field(rxrpc_serial_t, conn_serial )
- __field(rxrpc_seq_t, tx_seq )
- __field(rxrpc_seq_t, rx_seq )
+ __field(unsigned int, debug_id)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(rxrpc_serial_t, call_serial)
+ __field(rxrpc_serial_t, conn_serial)
+ __field(rxrpc_seq_t, tx_seq)
+ __field(rxrpc_seq_t, rx_seq)
),
TP_fast_assign(
@@ -1527,8 +1800,8 @@ TRACE_EVENT(rxrpc_call_reset,
__entry->call_id = call->call_id;
__entry->call_serial = call->rx_serial;
__entry->conn_serial = call->conn->hi_serial;
- __entry->tx_seq = call->tx_hard_ack;
- __entry->rx_seq = call->ackr_seen;
+ __entry->tx_seq = call->acks_hard_ack;
+ __entry->rx_seq = call->rx_highest_seq;
),
TP_printk("c=%08x %08x:%08x r=%08x/%08x tx=%08x rx=%08x",
@@ -1544,8 +1817,8 @@ TRACE_EVENT(rxrpc_notify_socket,
TP_ARGS(debug_id, serial),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
),
TP_fast_assign(
@@ -1567,8 +1840,8 @@ TRACE_EVENT(rxrpc_rx_discard_ack,
prev_pkt, call_ackr_prev),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
__field(rxrpc_seq_t, first_soft_ack)
__field(rxrpc_seq_t, call_ackr_first)
__field(rxrpc_seq_t, prev_pkt)
@@ -1593,6 +1866,133 @@ TRACE_EVENT(rxrpc_rx_discard_ack,
__entry->call_ackr_prev)
);
+TRACE_EVENT(rxrpc_req_ack,
+ TP_PROTO(unsigned int call_debug_id, rxrpc_seq_t seq,
+ enum rxrpc_req_ack_trace why),
+
+ TP_ARGS(call_debug_id, seq, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_req_ack_trace, why)
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call_debug_id;
+ __entry->seq = seq;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%08x q=%08x REQ-%s",
+ __entry->call_debug_id,
+ __entry->seq,
+ __print_symbolic(__entry->why, rxrpc_req_ack_traces))
+ );
+
+TRACE_EVENT(rxrpc_txbuf,
+ TP_PROTO(unsigned int debug_id,
+ unsigned int call_debug_id, rxrpc_seq_t seq,
+ int ref, enum rxrpc_txbuf_trace what),
+
+ TP_ARGS(debug_id, call_debug_id, seq, ref, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(int, ref)
+ __field(enum rxrpc_txbuf_trace, what)
+ ),
+
+ TP_fast_assign(
+ __entry->debug_id = debug_id;
+ __entry->call_debug_id = call_debug_id;
+ __entry->seq = seq;
+ __entry->ref = ref;
+ __entry->what = what;
+ ),
+
+ TP_printk("B=%08x c=%08x q=%08x %s r=%d",
+ __entry->debug_id,
+ __entry->call_debug_id,
+ __entry->seq,
+ __print_symbolic(__entry->what, rxrpc_txbuf_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(rxrpc_poke_call,
+ TP_PROTO(struct rxrpc_call *call, bool busy,
+ enum rxrpc_call_poke_trace what),
+
+ TP_ARGS(call, busy, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id)
+ __field(bool, busy)
+ __field(enum rxrpc_call_poke_trace, what)
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ __entry->busy = busy;
+ __entry->what = what;
+ ),
+
+ TP_printk("c=%08x %s%s",
+ __entry->call_debug_id,
+ __print_symbolic(__entry->what, rxrpc_call_poke_traces),
+ __entry->busy ? "!" : "")
+ );
+
+TRACE_EVENT(rxrpc_call_poked,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id)
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ ),
+
+ TP_printk("c=%08x",
+ __entry->call_debug_id)
+ );
+
+TRACE_EVENT(rxrpc_sack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ unsigned int sack, enum rxrpc_sack_trace what),
+
+ TP_ARGS(call, seq, sack, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, sack)
+ __field(enum rxrpc_sack_trace, what)
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ __entry->seq = seq;
+ __entry->sack = sack;
+ __entry->what = what;
+ ),
+
+ TP_printk("c=%08x q=%08x %s k=%x",
+ __entry->call_debug_id,
+ __entry->seq,
+ __print_symbolic(__entry->what, rxrpc_sack_traces),
+ __entry->sack)
+ );
+
+#undef EM
+#undef E_
+
+#endif /* RXRPC_TRACE_ONLY_DEFINE_ENUMS */
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index fec25b9cfbaf..dbb01b4b7451 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -5,6 +5,7 @@
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
+#include <linux/kthread.h>
#include <linux/sched/numa_balancing.h>
#include <linux/tracepoint.h>
#include <linux/binfmts.h>
@@ -51,6 +52,89 @@ TRACE_EVENT(sched_kthread_stop_ret,
TP_printk("ret=%d", __entry->ret)
);
+/**
+ * sched_kthread_work_queue_work - called when a work gets queued
+ * @worker: pointer to the kthread_worker
+ * @work: pointer to struct kthread_work
+ *
+ * This event occurs when a work is queued immediately or once a
+ * delayed work is actually queued (ie: once the delay has been
+ * reached).
+ */
+TRACE_EVENT(sched_kthread_work_queue_work,
+
+ TP_PROTO(struct kthread_worker *worker,
+ struct kthread_work *work),
+
+ TP_ARGS(worker, work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, function)
+ __field( void *, worker)
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->function = work->func;
+ __entry->worker = worker;
+ ),
+
+ TP_printk("work struct=%p function=%ps worker=%p",
+ __entry->work, __entry->function, __entry->worker)
+);
+
+/**
+ * sched_kthread_work_execute_start - called immediately before the work callback
+ * @work: pointer to struct kthread_work
+ *
+ * Allows to track kthread work execution.
+ */
+TRACE_EVENT(sched_kthread_work_execute_start,
+
+ TP_PROTO(struct kthread_work *work),
+
+ TP_ARGS(work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, function)
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->function = work->func;
+ ),
+
+ TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
+);
+
+/**
+ * sched_kthread_work_execute_end - called immediately after the work callback
+ * @work: pointer to struct work_struct
+ * @function: pointer to worker function
+ *
+ * Allows to track workqueue execution.
+ */
+TRACE_EVENT(sched_kthread_work_execute_end,
+
+ TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
+
+ TP_ARGS(work, function),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, function)
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->function = function;
+ ),
+
+ TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
+);
+
/*
* Tracepoint for waking up a task:
*/
@@ -64,7 +148,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
- __field( int, success )
__field( int, target_cpu )
),
@@ -72,7 +155,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio; /* XXX SCHED_DEADLINE */
- __entry->success = 1; /* rudiment, kill when possible */
__entry->target_cpu = task_cpu(p);
),
@@ -90,7 +172,7 @@ DEFINE_EVENT(sched_wakeup_template, sched_waking,
TP_ARGS(p));
/*
- * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
* It is not always called from the waking context.
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
@@ -105,7 +187,9 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_ARGS(p));
#ifdef CREATE_TRACE_POINTS
-static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
+static inline long __trace_sched_switch_state(bool preempt,
+ unsigned int prev_state,
+ struct task_struct *p)
{
unsigned int state;
@@ -126,7 +210,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
* it for left shift operation to get the correct task->state
* mapping.
*/
- state = task_state_index(p);
+ state = __task_state_index(prev_state, p->exit_state);
return state ? (1 << (state - 1)) : state;
}
@@ -139,9 +223,10 @@ TRACE_EVENT(sched_switch,
TP_PROTO(bool preempt,
struct task_struct *prev,
- struct task_struct *next),
+ struct task_struct *next,
+ unsigned int prev_state),
- TP_ARGS(preempt, prev, next),
+ TP_ARGS(preempt, prev, next, prev_state),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
@@ -157,7 +242,7 @@ TRACE_EVENT(sched_switch,
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
- __entry->prev_state = __trace_sched_switch_state(preempt, prev);
+ __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
@@ -282,7 +367,7 @@ TRACE_EVENT(sched_process_wait,
);
/*
- * Tracepoint for do_fork:
+ * Tracepoint for kernel_clone:
*/
TRACE_EVENT(sched_process_fork,
@@ -408,33 +493,30 @@ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
*/
DECLARE_EVENT_CLASS(sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
- TP_ARGS(tsk, __perf_count(runtime), vruntime),
+ TP_ARGS(tsk, __perf_count(runtime)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
- __field( u64, vruntime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
- __entry->vruntime = vruntime;
),
- TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
+ TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
__entry->comm, __entry->pid,
- (unsigned long long)__entry->runtime,
- (unsigned long long)__entry->vruntime)
+ (unsigned long long)__entry->runtime)
);
DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
- TP_ARGS(tsk, runtime, vruntime));
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
+ TP_ARGS(tsk, runtime));
/*
* Tracepoint for showing priority inheritance modifying a tasks
@@ -579,6 +661,58 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
);
+#ifdef CONFIG_NUMA_BALANCING
+#define NUMAB_SKIP_REASON \
+ EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \
+ EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \
+ EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \
+ EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \
+ EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \
+ EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \
+ EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" )
+
+/* Redefine for export. */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+NUMAB_SKIP_REASON
+
+/* Redefine for symbolic printing. */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+TRACE_EVENT(sched_skip_vma_numa,
+
+ TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma,
+ enum numa_vmaskip_reason reason),
+
+ TP_ARGS(mm, vma, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, numa_scan_offset)
+ __field(unsigned long, vm_start)
+ __field(unsigned long, vm_end)
+ __field(enum numa_vmaskip_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->numa_scan_offset = mm->numa_scan_offset;
+ __entry->vm_start = vma->vm_start;
+ __entry->vm_end = vma->vm_end;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s",
+ __entry->numa_scan_offset,
+ __entry->vm_start,
+ __entry->vm_end,
+ __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
+);
+#endif /* CONFIG_NUMA_BALANCING */
/*
* Tracepoint for waking a polling cpu without an IPI.
@@ -630,6 +764,10 @@ DECLARE_TRACE(pelt_se_tp,
TP_PROTO(struct sched_entity *se),
TP_ARGS(se));
+DECLARE_TRACE(sched_cpu_capacity_tp,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq));
+
DECLARE_TRACE(sched_overutilized_tp,
TP_PROTO(struct root_domain *rd, bool overutilized),
TP_ARGS(rd, overutilized));
@@ -646,6 +784,11 @@ DECLARE_TRACE(sched_update_nr_running_tp,
TP_PROTO(struct rq *rq, int change),
TP_ARGS(rq, change));
+DECLARE_TRACE(sched_compute_energy_tp,
+ TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy,
+ unsigned long max_util, unsigned long busy_time),
+ TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index f3a4b4d60714..422c1ad9484d 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -7,6 +7,31 @@
#include <linux/tracepoint.h>
+TRACE_EVENT(scmi_fc_call,
+ TP_PROTO(u8 protocol_id, u8 msg_id, u32 res_id, u32 val1, u32 val2),
+ TP_ARGS(protocol_id, msg_id, res_id, val1, val2),
+
+ TP_STRUCT__entry(
+ __field(u8, protocol_id)
+ __field(u8, msg_id)
+ __field(u32, res_id)
+ __field(u32, val1)
+ __field(u32, val2)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol_id = protocol_id;
+ __entry->msg_id = msg_id;
+ __entry->res_id = res_id;
+ __entry->val1 = val1;
+ __entry->val2 = val2;
+ ),
+
+ TP_printk("pt=%02X msg_id=%02X res_id:%u vals=%u:%u",
+ __entry->protocol_id, __entry->msg_id,
+ __entry->res_id, __entry->val1, __entry->val2)
+);
+
TRACE_EVENT(scmi_xfer_begin,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
bool poll),
@@ -28,9 +53,37 @@ TRACE_EVENT(scmi_xfer_begin,
__entry->poll = poll;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u poll=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->poll)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->poll)
+);
+
+TRACE_EVENT(scmi_xfer_response_wait,
+ TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+ u32 timeout, bool poll),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, timeout, poll),
+
+ TP_STRUCT__entry(
+ __field(int, transfer_id)
+ __field(u8, msg_id)
+ __field(u8, protocol_id)
+ __field(u16, seq)
+ __field(u32, timeout)
+ __field(bool, poll)
+ ),
+
+ TP_fast_assign(
+ __entry->transfer_id = transfer_id;
+ __entry->msg_id = msg_id;
+ __entry->protocol_id = protocol_id;
+ __entry->seq = seq;
+ __entry->timeout = timeout;
+ __entry->poll = poll;
+ ),
+
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X tmo_ms=%u poll=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->timeout, __entry->poll)
);
TRACE_EVENT(scmi_xfer_end,
@@ -54,9 +107,9 @@ TRACE_EVENT(scmi_xfer_end,
__entry->status = status;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%d",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->status)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->status)
);
TRACE_EVENT(scmi_rx_done,
@@ -80,9 +133,46 @@ TRACE_EVENT(scmi_rx_done,
__entry->msg_type = msg_type;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u msg_type=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->msg_type)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X msg_type=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->msg_type)
+);
+
+TRACE_EVENT(scmi_msg_dump,
+ TP_PROTO(int id, u8 channel_id, u8 protocol_id, u8 msg_id,
+ unsigned char *tag, u16 seq, int status,
+ void *buf, size_t len),
+ TP_ARGS(id, channel_id, protocol_id, msg_id, tag, seq, status,
+ buf, len),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(u8, channel_id)
+ __field(u8, protocol_id)
+ __field(u8, msg_id)
+ __array(char, tag, 5)
+ __field(u16, seq)
+ __field(int, status)
+ __field(size_t, len)
+ __dynamic_array(unsigned char, cmd, len)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->channel_id = channel_id;
+ __entry->protocol_id = protocol_id;
+ __entry->msg_id = msg_id;
+ strscpy(__entry->tag, tag, 5);
+ __entry->seq = seq;
+ __entry->status = status;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(cmd), buf, __entry->len);
+ ),
+
+ TP_printk("id=%d ch=%02X pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
+ __entry->id, __entry->channel_id, __entry->protocol_id,
+ __entry->tag, __entry->msg_id, __entry->seq, __entry->status,
+ __print_hex_str(__get_dynamic_array(cmd), __entry->len))
);
#endif /* _TRACE_SCMI_H */
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index f624969a4f14..8e2d9b1b0e77 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -124,50 +124,6 @@
scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \
scsi_hostbyte_name(DID_TRANSPORT_FAILFAST))
-#define scsi_driverbyte_name(result) { result, #result }
-#define show_driverbyte_name(val) \
- __print_symbolic(val, \
- scsi_driverbyte_name(DRIVER_OK), \
- scsi_driverbyte_name(DRIVER_BUSY), \
- scsi_driverbyte_name(DRIVER_SOFT), \
- scsi_driverbyte_name(DRIVER_MEDIA), \
- scsi_driverbyte_name(DRIVER_ERROR), \
- scsi_driverbyte_name(DRIVER_INVALID), \
- scsi_driverbyte_name(DRIVER_TIMEOUT), \
- scsi_driverbyte_name(DRIVER_HARD), \
- scsi_driverbyte_name(DRIVER_SENSE))
-
-#define scsi_msgbyte_name(result) { result, #result }
-#define show_msgbyte_name(val) \
- __print_symbolic(val, \
- scsi_msgbyte_name(COMMAND_COMPLETE), \
- scsi_msgbyte_name(EXTENDED_MESSAGE), \
- scsi_msgbyte_name(SAVE_POINTERS), \
- scsi_msgbyte_name(RESTORE_POINTERS), \
- scsi_msgbyte_name(DISCONNECT), \
- scsi_msgbyte_name(INITIATOR_ERROR), \
- scsi_msgbyte_name(ABORT_TASK_SET), \
- scsi_msgbyte_name(MESSAGE_REJECT), \
- scsi_msgbyte_name(NOP), \
- scsi_msgbyte_name(MSG_PARITY_ERROR), \
- scsi_msgbyte_name(LINKED_CMD_COMPLETE), \
- scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \
- scsi_msgbyte_name(TARGET_RESET), \
- scsi_msgbyte_name(ABORT_TASK), \
- scsi_msgbyte_name(CLEAR_TASK_SET), \
- scsi_msgbyte_name(INITIATE_RECOVERY), \
- scsi_msgbyte_name(RELEASE_RECOVERY), \
- scsi_msgbyte_name(CLEAR_ACA), \
- scsi_msgbyte_name(LOGICAL_UNIT_RESET), \
- scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \
- scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \
- scsi_msgbyte_name(ORDERED_QUEUE_TAG), \
- scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \
- scsi_msgbyte_name(ACA), \
- scsi_msgbyte_name(QAS_REQUEST), \
- scsi_msgbyte_name(BUS_DEVICE_RESET), \
- scsi_msgbyte_name(ABORT))
-
#define scsi_statusbyte_name(result) { result, #result }
#define show_statusbyte_name(val) \
__print_symbolic(val, \
@@ -210,6 +166,8 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__field( unsigned int, lun )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
+ __field( int, driver_tag)
+ __field( int, scheduler_tag)
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
@@ -223,6 +181,8 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__entry->lun = cmd->device->lun;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
+ __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag;
+ __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
@@ -230,11 +190,11 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
- " prot_op=%s cmnd=(%s %s raw=%s)",
+ " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
- show_prot_op_name(__entry->prot_op),
- show_opcode_name(__entry->opcode),
+ show_prot_op_name(__entry->prot_op), __entry->driver_tag,
+ __entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
);
@@ -253,6 +213,8 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
__field( int, rtn )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
+ __field( int, driver_tag)
+ __field( int, scheduler_tag)
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
@@ -267,6 +229,8 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
__entry->rtn = rtn;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
+ __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag;
+ __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
@@ -274,11 +238,12 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
- " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d",
+ " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \
+ " rtn=%d",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
- show_prot_op_name(__entry->prot_op),
- show_opcode_name(__entry->opcode),
+ show_prot_op_name(__entry->prot_op), __entry->driver_tag,
+ __entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
__entry->rtn)
@@ -298,13 +263,20 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__field( int, result )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
+ __field( int, driver_tag)
+ __field( int, scheduler_tag)
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
__dynamic_array(unsigned char, cmnd, cmd->cmd_len)
+ __field( u8, sense_key )
+ __field( u8, asc )
+ __field( u8, ascq )
),
TP_fast_assign(
+ struct scsi_sense_hdr sshdr;
+
__entry->host_no = cmd->device->host->host_no;
__entry->channel = cmd->device->channel;
__entry->id = cmd->device->id;
@@ -312,25 +284,39 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__entry->result = cmd->result;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
+ __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag;
+ __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
+ if (cmd->sense_buffer && SCSI_SENSE_VALID(cmd) &&
+ scsi_command_normalize_sense(cmd, &sshdr)) {
+ __entry->sense_key = sshdr.sense_key;
+ __entry->asc = sshdr.asc;
+ __entry->ascq = sshdr.ascq;
+ } else {
+ __entry->sense_key = 0;
+ __entry->asc = 0;
+ __entry->ascq = 0;
+ }
),
- TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
- "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \
- "%s host=%s message=%s status=%s)",
+ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u " \
+ "prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s) " \
+ "result=(driver=%s host=%s message=%s status=%s) "
+ "sense=(key=%#x asc=%#x ascq=%#x)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
- show_prot_op_name(__entry->prot_op),
- show_opcode_name(__entry->opcode),
+ show_prot_op_name(__entry->prot_op), __entry->driver_tag,
+ __entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
- show_driverbyte_name(((__entry->result) >> 24) & 0xff),
+ "DRIVER_OK",
show_hostbyte_name(((__entry->result) >> 16) & 0xff),
- show_msgbyte_name(((__entry->result) >> 8) & 0xff),
- show_statusbyte_name(__entry->result & 0xff))
+ "COMMAND_COMPLETE",
+ show_statusbyte_name(__entry->result & 0xff),
+ __entry->sense_key, __entry->asc, __entry->ascq)
);
DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done,
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 9e92f22eb086..07e0715628ec 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -9,46 +9,65 @@
#include <linux/netdevice.h>
#include <linux/tracepoint.h>
+#undef FN
+#define FN(reason) TRACE_DEFINE_ENUM(SKB_DROP_REASON_##reason);
+DEFINE_DROP_REASON(FN, FN)
+
+#undef FN
+#undef FNe
+#define FN(reason) { SKB_DROP_REASON_##reason, #reason },
+#define FNe(reason) { SKB_DROP_REASON_##reason, #reason }
+
/*
* Tracepoint for free an sk_buff:
*/
TRACE_EVENT(kfree_skb,
- TP_PROTO(struct sk_buff *skb, void *location),
+ TP_PROTO(struct sk_buff *skb, void *location,
+ enum skb_drop_reason reason),
- TP_ARGS(skb, location),
+ TP_ARGS(skb, location, reason),
TP_STRUCT__entry(
- __field( void *, skbaddr )
- __field( void *, location )
- __field( unsigned short, protocol )
+ __field(void *, skbaddr)
+ __field(void *, location)
+ __field(unsigned short, protocol)
+ __field(enum skb_drop_reason, reason)
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->location = location;
__entry->protocol = ntohs(skb->protocol);
+ __entry->reason = reason;
),
- TP_printk("skbaddr=%p protocol=%u location=%p",
- __entry->skbaddr, __entry->protocol, __entry->location)
+ TP_printk("skbaddr=%p protocol=%u location=%pS reason: %s",
+ __entry->skbaddr, __entry->protocol, __entry->location,
+ __print_symbolic(__entry->reason,
+ DEFINE_DROP_REASON(FN, FNe)))
);
+#undef FN
+#undef FNe
+
TRACE_EVENT(consume_skb,
- TP_PROTO(struct sk_buff *skb),
+ TP_PROTO(struct sk_buff *skb, void *location),
- TP_ARGS(skb),
+ TP_ARGS(skb, location),
TP_STRUCT__entry(
- __field( void *, skbaddr )
+ __field( void *, skbaddr)
+ __field( void *, location)
),
TP_fast_assign(
__entry->skbaddr = skb;
+ __entry->location = location;
),
- TP_printk("skbaddr=%p", __entry->skbaddr)
+ TP_printk("skbaddr=%p location=%pS", __entry->skbaddr, __entry->location)
);
TRACE_EVENT(skb_copy_datagram_iovec,
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index a966d4b5ab37..fd206a6ab5b8 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -98,7 +98,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(long *, sysctl_mem)
+ __array(long, sysctl_mem, 3)
__field(long, allocated)
__field(int, sysctl_rmem)
__field(int, rmem_alloc)
@@ -110,7 +110,9 @@ TRACE_EVENT(sock_exceed_buf_limit,
TP_fast_assign(
strncpy(__entry->name, prot->name, 32);
- __entry->sysctl_mem = prot->sysctl_mem;
+ __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]);
+ __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]);
+ __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]);
__entry->allocated = allocated;
__entry->sysctl_rmem = sk_get_rmem0(sk, prot);
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
@@ -156,7 +158,7 @@ TRACE_EVENT(inet_sock_set_state,
),
TP_fast_assign(
- struct inet_sock *inet = inet_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
struct in6_addr *pin6;
__be32 *p32;
@@ -201,6 +203,135 @@ TRACE_EVENT(inet_sock_set_state,
show_tcp_state_name(__entry->newstate))
);
+TRACE_EVENT(inet_sk_error_report,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ ),
+
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->error = sk->sk_err;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ *pin6 = sk->sk_v6_rcv_saddr;
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ *pin6 = sk->sk_v6_daddr;
+ } else
+#endif
+ {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+ }
+ ),
+
+ TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c error=%d",
+ show_family_name(__entry->family),
+ show_inet_protocol_name(__entry->protocol),
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ __entry->error)
+);
+
+TRACE_EVENT(sk_data_ready,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __field(unsigned long, ip)
+ ),
+
+ TP_fast_assign(
+ __entry->skaddr = sk;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->ip = _RET_IP_;
+ ),
+
+ TP_printk("family=%u protocol=%u func=%ps",
+ __entry->family, __entry->protocol, (void *)__entry->ip)
+);
+
+/*
+ * sock send/recv msg length
+ */
+DECLARE_EVENT_CLASS(sock_msg_length,
+
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags),
+
+ TP_STRUCT__entry(
+ __field(void *, sk)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __field(int, ret)
+ __field(int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->ret = ret;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("sk address = %p, family = %s protocol = %s, length = %d, error = %d, flags = 0x%x",
+ __entry->sk, show_family_name(__entry->family),
+ show_inet_protocol_name(__entry->protocol),
+ !(__entry->flags & MSG_PEEK) ?
+ (__entry->ret > 0 ? __entry->ret : 0) : 0,
+ __entry->ret < 0 ? __entry->ret : 0,
+ __entry->flags)
+);
+
+DEFINE_EVENT(sock_msg_length, sock_send_length,
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags)
+);
+
+DEFINE_EVENT(sock_msg_length, sock_recv_length,
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags)
+);
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sof.h b/include/trace/events/sof.h
new file mode 100644
index 000000000000..21c2a1efb9f6
--- /dev/null
+++ b/include/trace/events/sof.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ *
+ * Author: Noah Klayman <noah.klayman@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sof
+
+#if !defined(_TRACE_SOF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SOF_H
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+#include <sound/sof/stream.h>
+#include "../../../sound/soc/sof/sof-audio.h"
+
+DECLARE_EVENT_CLASS(sof_widget_template,
+ TP_PROTO(struct snd_sof_widget *swidget),
+ TP_ARGS(swidget),
+ TP_STRUCT__entry(
+ __string(name, swidget->widget->name)
+ __field(int, use_count)
+ ),
+ TP_fast_assign(
+ __assign_str(name, swidget->widget->name);
+ __entry->use_count = swidget->use_count;
+ ),
+ TP_printk("name=%s use_count=%d", __get_str(name), __entry->use_count)
+);
+
+DEFINE_EVENT(sof_widget_template, sof_widget_setup,
+ TP_PROTO(struct snd_sof_widget *swidget),
+ TP_ARGS(swidget)
+);
+
+DEFINE_EVENT(sof_widget_template, sof_widget_free,
+ TP_PROTO(struct snd_sof_widget *swidget),
+ TP_ARGS(swidget)
+);
+
+TRACE_EVENT(sof_ipc3_period_elapsed_position,
+ TP_PROTO(struct snd_sof_dev *sdev, struct sof_ipc_stream_posn *posn),
+ TP_ARGS(sdev, posn),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u64, host_posn)
+ __field(u64, dai_posn)
+ __field(u64, wallclock)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->host_posn = posn->host_posn;
+ __entry->dai_posn = posn->dai_posn;
+ __entry->wallclock = posn->wallclock;
+ ),
+ TP_printk("device_name=%s host_posn=%#llx dai_posn=%#llx wallclock=%#llx",
+ __get_str(device_name), __entry->host_posn, __entry->dai_posn,
+ __entry->wallclock)
+);
+
+TRACE_EVENT(sof_pcm_pointer_position,
+ TP_PROTO(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm *spcm,
+ struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t dma_posn,
+ snd_pcm_uframes_t dai_posn
+ ),
+ TP_ARGS(sdev, spcm, substream, dma_posn, dai_posn),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, pcm_id)
+ __field(int, stream)
+ __field(unsigned long, dma_posn)
+ __field(unsigned long, dai_posn)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->pcm_id = le32_to_cpu(spcm->pcm.pcm_id);
+ __entry->stream = substream->stream;
+ __entry->dma_posn = dma_posn;
+ __entry->dai_posn = dai_posn;
+ ),
+ TP_printk("device_name=%s pcm_id=%d stream=%d dma_posn=%lu dai_posn=%lu",
+ __get_str(device_name), __entry->pcm_id, __entry->stream,
+ __entry->dma_posn, __entry->dai_posn)
+);
+
+TRACE_EVENT(sof_stream_position_ipc_rx,
+ TP_PROTO(struct device *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(dev))
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(dev));
+ ),
+ TP_printk("device_name=%s", __get_str(device_name))
+);
+
+TRACE_EVENT(sof_ipc4_fw_config,
+ TP_PROTO(struct snd_sof_dev *sdev, char *key, u32 value),
+ TP_ARGS(sdev, key, value),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __string(key, key)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(key, key);
+ __entry->value = value;
+ ),
+ TP_printk("device_name=%s key=%s value=%d",
+ __get_str(device_name), __get_str(key), __entry->value)
+);
+
+#endif /* _TRACE_SOF_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sof_intel.h b/include/trace/events/sof_intel.h
new file mode 100644
index 000000000000..2a77f9d26c0b
--- /dev/null
+++ b/include/trace/events/sof_intel.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ *
+ * Author: Noah Klayman <noah.klayman@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sof_intel
+
+#if !defined(_TRACE_SOF_INTEL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SOF_INTEL_H
+#include <linux/tracepoint.h>
+#include <sound/hdaudio.h>
+#include "../../../sound/soc/sof/sof-audio.h"
+
+TRACE_EVENT(sof_intel_hda_irq,
+ TP_PROTO(struct snd_sof_dev *sdev, char *source),
+ TP_ARGS(sdev, source),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __string(source, source)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(source, source);
+ ),
+ TP_printk("device_name=%s source=%s",
+ __get_str(device_name), __get_str(source))
+);
+
+DECLARE_EVENT_CLASS(sof_intel_ipc_firmware_template,
+ TP_ARGS(struct snd_sof_dev *sdev, u32 msg, u32 msg_ext),
+ TP_PROTO(sdev, msg, msg_ext),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, msg)
+ __field(u32, msg_ext)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->msg = msg;
+ __entry->msg_ext = msg_ext;
+ ),
+ TP_printk("device_name=%s msg=%#x msg_ext=%#x",
+ __get_str(device_name), __entry->msg, __entry->msg_ext)
+);
+
+DEFINE_EVENT(sof_intel_ipc_firmware_template, sof_intel_ipc_firmware_response,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 msg, u32 msg_ext),
+ TP_ARGS(sdev, msg, msg_ext)
+);
+
+DEFINE_EVENT(sof_intel_ipc_firmware_template, sof_intel_ipc_firmware_initiated,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 msg, u32 msg_ext),
+ TP_ARGS(sdev, msg, msg_ext)
+);
+
+TRACE_EVENT(sof_intel_D0I3C_updated,
+ TP_PROTO(struct snd_sof_dev *sdev, u8 reg),
+ TP_ARGS(sdev, reg),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u8, reg)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->reg = reg;
+ ),
+ TP_printk("device_name=%s register=%#x",
+ __get_str(device_name), __entry->reg)
+);
+
+TRACE_EVENT(sof_intel_hda_irq_ipc_check,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 irq_status),
+ TP_ARGS(sdev, irq_status),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, irq_status)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->irq_status = irq_status;
+ ),
+ TP_printk("device_name=%s irq_status=%#x",
+ __get_str(device_name), __entry->irq_status)
+);
+
+TRACE_EVENT(sof_intel_hda_dsp_pcm,
+ TP_PROTO(struct snd_sof_dev *sdev,
+ struct hdac_stream *hstream,
+ struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t pos
+ ),
+ TP_ARGS(sdev, hstream, substream, pos),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, hstream_index)
+ __field(u32, substream)
+ __field(unsigned long, pos)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->hstream_index = hstream->index;
+ __entry->substream = substream->stream;
+ __entry->pos = pos;
+ ),
+ TP_printk("device_name=%s hstream_index=%d substream=%d pos=%lu",
+ __get_str(device_name), __entry->hstream_index,
+ __entry->substream, __entry->pos)
+);
+
+TRACE_EVENT(sof_intel_hda_dsp_stream_status,
+ TP_PROTO(struct device *dev, struct hdac_stream *s, u32 status),
+ TP_ARGS(dev, s, status),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(dev))
+ __field(u32, stream)
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(dev));
+ __entry->stream = s->index;
+ __entry->status = status;
+ ),
+ TP_printk("device_name=%s stream=%d status=%#x",
+ __get_str(device_name), __entry->stream, __entry->status)
+);
+
+TRACE_EVENT(sof_intel_hda_dsp_check_stream_irq,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 status),
+ TP_ARGS(sdev, status),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->status = status;
+ ),
+ TP_printk("device_name=%s status=%#x",
+ __get_str(device_name), __entry->status)
+);
+
+#endif /* _TRACE_SOF_INTEL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index 0dd9171d2ad8..e63d4a24d879 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -42,6 +42,63 @@ DEFINE_EVENT(spi_controller, spi_controller_busy,
);
+TRACE_EVENT(spi_setup,
+ TP_PROTO(struct spi_device *spi, int status),
+ TP_ARGS(spi, status),
+
+ TP_STRUCT__entry(
+ __field(int, bus_num)
+ __field(int, chip_select)
+ __field(unsigned long, mode)
+ __field(unsigned int, bits_per_word)
+ __field(unsigned int, max_speed_hz)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = spi->controller->bus_num;
+ __entry->chip_select = spi_get_chipselect(spi, 0);
+ __entry->mode = spi->mode;
+ __entry->bits_per_word = spi->bits_per_word;
+ __entry->max_speed_hz = spi->max_speed_hz;
+ __entry->status = status;
+ ),
+
+ TP_printk("spi%d.%d setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d",
+ __entry->bus_num, __entry->chip_select,
+ (__entry->mode & SPI_MODE_X_MASK),
+ (__entry->mode & SPI_CS_HIGH) ? "cs_high, " : "",
+ (__entry->mode & SPI_LSB_FIRST) ? "lsb, " : "",
+ (__entry->mode & SPI_3WIRE) ? "3wire, " : "",
+ (__entry->mode & SPI_LOOP) ? "loopback, " : "",
+ __entry->bits_per_word, __entry->max_speed_hz,
+ __entry->status)
+);
+
+TRACE_EVENT(spi_set_cs,
+ TP_PROTO(struct spi_device *spi, bool enable),
+ TP_ARGS(spi, enable),
+
+ TP_STRUCT__entry(
+ __field(int, bus_num)
+ __field(int, chip_select)
+ __field(unsigned long, mode)
+ __field(bool, enable)
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = spi->controller->bus_num;
+ __entry->chip_select = spi_get_chipselect(spi, 0);
+ __entry->mode = spi->mode;
+ __entry->enable = enable;
+ ),
+
+ TP_printk("spi%d.%d %s%s",
+ __entry->bus_num, __entry->chip_select,
+ __entry->enable ? "activate" : "deactivate",
+ (__entry->mode & SPI_CS_HIGH) ? ", cs_high" : "")
+);
+
DECLARE_EVENT_CLASS(spi_message,
TP_PROTO(struct spi_message *msg),
@@ -56,7 +113,7 @@ DECLARE_EVENT_CLASS(spi_message,
TP_fast_assign(
__entry->bus_num = msg->spi->controller->bus_num;
- __entry->chip_select = msg->spi->chip_select;
+ __entry->chip_select = spi_get_chipselect(msg->spi, 0);
__entry->msg = msg;
),
@@ -97,7 +154,7 @@ TRACE_EVENT(spi_message_done,
TP_fast_assign(
__entry->bus_num = msg->spi->controller->bus_num;
- __entry->chip_select = msg->spi->chip_select;
+ __entry->chip_select = spi_get_chipselect(msg->spi, 0);
__entry->msg = msg;
__entry->frame = msg->frame_length;
__entry->actual = msg->actual_length;
@@ -110,7 +167,7 @@ TRACE_EVENT(spi_message_done,
);
/*
- * consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
+ * Consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
* that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or
* SPI_CONTROLLER_MUST_RX.
*/
@@ -140,7 +197,7 @@ DECLARE_EVENT_CLASS(spi_transfer,
TP_fast_assign(
__entry->bus_num = msg->spi->controller->bus_num;
- __entry->chip_select = msg->spi->chip_select;
+ __entry->chip_select = spi_get_chipselect(msg->spi, 0);
__entry->xfer = xfer;
__entry->len = xfer->len;
diff --git a/include/trace/events/spmi.h b/include/trace/events/spmi.h
index 8b60efe18ba6..a6819fd85cdf 100644
--- a/include/trace/events/spmi.h
+++ b/include/trace/events/spmi.h
@@ -21,15 +21,15 @@ TRACE_EVENT(spmi_write_begin,
__field ( u8, sid )
__field ( u16, addr )
__field ( u8, len )
- __dynamic_array ( u8, buf, len + 1 )
+ __dynamic_array ( u8, buf, len )
),
TP_fast_assign(
__entry->opcode = opcode;
__entry->sid = sid;
__entry->addr = addr;
- __entry->len = len + 1;
- memcpy(__get_dynamic_array(buf), buf, len + 1);
+ __entry->len = len;
+ memcpy(__get_dynamic_array(buf), buf, len);
),
TP_printk("opc=%d sid=%02d addr=0x%04x len=%d buf=0x[%*phD]",
@@ -92,7 +92,7 @@ TRACE_EVENT(spmi_read_end,
__field ( u16, addr )
__field ( int, ret )
__field ( u8, len )
- __dynamic_array ( u8, buf, len + 1 )
+ __dynamic_array ( u8, buf, len )
),
TP_fast_assign(
@@ -100,8 +100,8 @@ TRACE_EVENT(spmi_read_end,
__entry->sid = sid;
__entry->addr = addr;
__entry->ret = ret;
- __entry->len = len + 1;
- memcpy(__get_dynamic_array(buf), buf, len + 1);
+ __entry->len = len;
+ memcpy(__get_dynamic_array(buf), buf, len);
),
TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d len=%02d buf=0x[%*phD]",
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 65d7dfbbc9cd..ac05ed06a071 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -14,6 +14,8 @@
#include <linux/net.h>
#include <linux/tracepoint.h>
+#include <trace/misc/sunrpc.h>
+
TRACE_DEFINE_ENUM(SOCK_STREAM);
TRACE_DEFINE_ENUM(SOCK_DGRAM);
TRACE_DEFINE_ENUM(SOCK_RAW);
@@ -62,25 +64,31 @@ DECLARE_EVENT_CLASS(rpc_xdr_buf_class,
__field(size_t, head_len)
__field(const void *, tail_base)
__field(size_t, tail_len)
+ __field(unsigned int, page_base)
__field(unsigned int, page_len)
__field(unsigned int, msg_len)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
- __entry->client_id = task->tk_client->cl_clid;
+ __entry->client_id = task->tk_client ?
+ task->tk_client->cl_clid : -1;
__entry->head_base = xdr->head[0].iov_base;
__entry->head_len = xdr->head[0].iov_len;
__entry->tail_base = xdr->tail[0].iov_base;
__entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_base = xdr->page_base;
__entry->page_len = xdr->page_len;
__entry->msg_len = xdr->len;
),
- TP_printk("task:%u@%u head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " head=[%p,%zu] page=%u(%u) tail=[%p,%zu] len=%u",
__entry->task_id, __entry->client_id,
- __entry->head_base, __entry->head_len, __entry->page_len,
- __entry->tail_base, __entry->tail_len, __entry->msg_len
+ __entry->head_base, __entry->head_len,
+ __entry->page_len, __entry->page_base,
+ __entry->tail_base, __entry->tail_len,
+ __entry->msg_len
)
);
@@ -113,7 +121,7 @@ DECLARE_EVENT_CLASS(rpc_clnt_class,
__entry->client_id = clnt->cl_clid;
),
- TP_printk("clid=%u", __entry->client_id)
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER, __entry->client_id)
);
#define DEFINE_RPC_CLNT_EVENT(name) \
@@ -131,35 +139,68 @@ DEFINE_RPC_CLNT_EVENT(release);
DEFINE_RPC_CLNT_EVENT(replace_xprt);
DEFINE_RPC_CLNT_EVENT(replace_xprt_err);
+TRACE_DEFINE_ENUM(RPC_XPRTSEC_NONE);
+TRACE_DEFINE_ENUM(RPC_XPRTSEC_TLS_X509);
+
+#define rpc_show_xprtsec_policy(policy) \
+ __print_symbolic(policy, \
+ { RPC_XPRTSEC_NONE, "none" }, \
+ { RPC_XPRTSEC_TLS_ANON, "tls-anon" }, \
+ { RPC_XPRTSEC_TLS_X509, "tls-x509" })
+
+#define rpc_show_create_flags(flags) \
+ __print_flags(flags, "|", \
+ { RPC_CLNT_CREATE_HARDRTRY, "HARDRTRY" }, \
+ { RPC_CLNT_CREATE_AUTOBIND, "AUTOBIND" }, \
+ { RPC_CLNT_CREATE_NONPRIVPORT, "NONPRIVPORT" }, \
+ { RPC_CLNT_CREATE_NOPING, "NOPING" }, \
+ { RPC_CLNT_CREATE_DISCRTRY, "DISCRTRY" }, \
+ { RPC_CLNT_CREATE_QUIET, "QUIET" }, \
+ { RPC_CLNT_CREATE_INFINITE_SLOTS, \
+ "INFINITE_SLOTS" }, \
+ { RPC_CLNT_CREATE_NO_IDLE_TIMEOUT, \
+ "NO_IDLE_TIMEOUT" }, \
+ { RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT, \
+ "NO_RETRANS_TIMEOUT" }, \
+ { RPC_CLNT_CREATE_SOFTERR, "SOFTERR" }, \
+ { RPC_CLNT_CREATE_REUSEPORT, "REUSEPORT" })
+
TRACE_EVENT(rpc_clnt_new,
TP_PROTO(
const struct rpc_clnt *clnt,
const struct rpc_xprt *xprt,
- const char *program,
- const char *server
+ const struct rpc_create_args *args
),
- TP_ARGS(clnt, xprt, program, server),
+ TP_ARGS(clnt, xprt, args),
TP_STRUCT__entry(
__field(unsigned int, client_id)
+ __field(unsigned long, xprtsec)
+ __field(unsigned long, flags)
+ __string(program, clnt->cl_program->name)
+ __string(server, xprt->servername)
__string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
__string(port, xprt->address_strings[RPC_DISPLAY_PORT])
- __string(program, program)
- __string(server, server)
),
TP_fast_assign(
__entry->client_id = clnt->cl_clid;
+ __entry->xprtsec = args->xprtsec.policy;
+ __entry->flags = args->flags;
+ __assign_str(program, clnt->cl_program->name);
+ __assign_str(server, xprt->servername);
__assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
- __assign_str(program, program)
- __assign_str(server, server)
),
- TP_printk("client=%u peer=[%s]:%s program=%s server=%s",
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " peer=[%s]:%s"
+ " program=%s server=%s xprtsec=%s flags=%s",
__entry->client_id, __get_str(addr), __get_str(port),
- __get_str(program), __get_str(server))
+ __get_str(program), __get_str(server),
+ rpc_show_xprtsec_policy(__entry->xprtsec),
+ rpc_show_create_flags(__entry->flags)
+ )
);
TRACE_EVENT(rpc_clnt_new_err,
@@ -179,8 +220,8 @@ TRACE_EVENT(rpc_clnt_new_err,
TP_fast_assign(
__entry->error = error;
- __assign_str(program, program)
- __assign_str(server, server)
+ __assign_str(program, program);
+ __assign_str(server, server);
),
TP_printk("program=%s server=%s error=%d",
@@ -205,7 +246,8 @@ TRACE_EVENT(rpc_clnt_clone_err,
__entry->error = error;
),
- TP_printk("client=%u error=%d", __entry->client_id, __entry->error)
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " error=%d",
+ __entry->client_id, __entry->error)
);
@@ -247,7 +289,7 @@ DECLARE_EVENT_CLASS(rpc_task_status,
__entry->status = task->tk_status;
),
- TP_printk("task:%u@%u status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -259,8 +301,10 @@ DECLARE_EVENT_CLASS(rpc_task_status,
TP_ARGS(task))
DEFINE_RPC_STATUS_EVENT(call);
-DEFINE_RPC_STATUS_EVENT(bind);
DEFINE_RPC_STATUS_EVENT(connect);
+DEFINE_RPC_STATUS_EVENT(timeout);
+DEFINE_RPC_STATUS_EVENT(retry_refresh);
+DEFINE_RPC_STATUS_EVENT(refresh);
TRACE_EVENT(rpc_request,
TP_PROTO(const struct rpc_task *task),
@@ -281,39 +325,24 @@ TRACE_EVENT(rpc_request,
__entry->client_id = task->tk_client->cl_clid;
__entry->version = task->tk_client->cl_vers;
__entry->async = RPC_IS_ASYNC(task);
- __assign_str(progname, task->tk_client->cl_program->name)
- __assign_str(procname, rpc_proc_name(task))
+ __assign_str(progname, task->tk_client->cl_program->name);
+ __assign_str(procname, rpc_proc_name(task));
),
- TP_printk("task:%u@%u %sv%d %s (%ssync)",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " %sv%d %s (%ssync)",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version,
__get_str(procname), __entry->async ? "a": ""
)
);
-TRACE_DEFINE_ENUM(RPC_TASK_ASYNC);
-TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
-TRACE_DEFINE_ENUM(RPC_TASK_NULLCREDS);
-TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
-TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
-TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
-TRACE_DEFINE_ENUM(RPC_TASK_NO_ROUND_ROBIN);
-TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
-TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
-TRACE_DEFINE_ENUM(RPC_TASK_SENT);
-TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT);
-TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT);
-TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
-TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF);
-
#define rpc_show_task_flags(flags) \
__print_flags(flags, "|", \
{ RPC_TASK_ASYNC, "ASYNC" }, \
{ RPC_TASK_SWAPPER, "SWAPPER" }, \
+ { RPC_TASK_MOVEABLE, "MOVEABLE" }, \
{ RPC_TASK_NULLCREDS, "NULLCREDS" }, \
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
- { RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
{ RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
@@ -324,14 +353,6 @@ TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF);
{ RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }, \
{ RPC_TASK_CRED_NOREF, "CRED_NOREF" })
-TRACE_DEFINE_ENUM(RPC_TASK_RUNNING);
-TRACE_DEFINE_ENUM(RPC_TASK_QUEUED);
-TRACE_DEFINE_ENUM(RPC_TASK_ACTIVE);
-TRACE_DEFINE_ENUM(RPC_TASK_NEED_XMIT);
-TRACE_DEFINE_ENUM(RPC_TASK_NEED_RECV);
-TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT);
-TRACE_DEFINE_ENUM(RPC_TASK_SIGNALLED);
-
#define rpc_show_runstate(flags) \
__print_flags(flags, "|", \
{ (1UL << RPC_TASK_RUNNING), "RUNNING" }, \
@@ -367,7 +388,8 @@ DECLARE_EVENT_CLASS(rpc_task_running,
__entry->flags = task->tk_flags;
),
- TP_printk("task:%u@%d flags=%s runstate=%s status=%d action=%ps",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " flags=%s runstate=%s status=%d action=%ps",
__entry->task_id, __entry->client_id,
rpc_show_task_flags(__entry->flags),
rpc_show_runstate(__entry->runstate),
@@ -385,9 +407,13 @@ DECLARE_EVENT_CLASS(rpc_task_running,
DEFINE_RPC_RUNNING_EVENT(begin);
DEFINE_RPC_RUNNING_EVENT(run_action);
+DEFINE_RPC_RUNNING_EVENT(sync_sleep);
+DEFINE_RPC_RUNNING_EVENT(sync_wake);
DEFINE_RPC_RUNNING_EVENT(complete);
+DEFINE_RPC_RUNNING_EVENT(timeout);
DEFINE_RPC_RUNNING_EVENT(signalled);
DEFINE_RPC_RUNNING_EVENT(end);
+DEFINE_RPC_RUNNING_EVENT(call_done);
DECLARE_EVENT_CLASS(rpc_task_queued,
@@ -416,7 +442,8 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__assign_str(q_name, rpc_qname(q));
),
- TP_printk("task:%u@%d flags=%s runstate=%s status=%d timeout=%lu queue=%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " flags=%s runstate=%s status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
rpc_show_task_flags(__entry->flags),
rpc_show_runstate(__entry->runstate),
@@ -452,7 +479,7 @@ DECLARE_EVENT_CLASS(rpc_failure,
__entry->client_id = task->tk_client->cl_clid;
),
- TP_printk("task:%u@%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
__entry->task_id, __entry->client_id)
);
@@ -488,13 +515,14 @@ DECLARE_EVENT_CLASS(rpc_reply_event,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
- __assign_str(progname, task->tk_client->cl_program->name)
+ __assign_str(progname, task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procname, rpc_proc_name(task))
- __assign_str(servername, task->tk_xprt->servername)
+ __assign_str(procname, rpc_proc_name(task));
+ __assign_str(servername, task->tk_xprt->servername);
),
- TP_printk("task:%u@%d server=%s xid=0x%08x %sv%d %s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " server=%s xid=0x%08x %sv%d %s",
__entry->task_id, __entry->client_id, __get_str(servername),
__entry->xid, __get_str(progname), __entry->version,
__get_str(procname))
@@ -517,6 +545,50 @@ DEFINE_RPC_REPLY_EVENT(stale_creds);
DEFINE_RPC_REPLY_EVENT(bad_creds);
DEFINE_RPC_REPLY_EVENT(auth_tooweak);
+#define DEFINE_RPCB_ERROR_EVENT(name) \
+ DEFINE_EVENT(rpc_reply_event, rpcb_##name##_err, \
+ TP_PROTO( \
+ const struct rpc_task *task \
+ ), \
+ TP_ARGS(task))
+
+DEFINE_RPCB_ERROR_EVENT(prog_unavail);
+DEFINE_RPCB_ERROR_EVENT(timeout);
+DEFINE_RPCB_ERROR_EVENT(bind_version);
+DEFINE_RPCB_ERROR_EVENT(unreachable);
+DEFINE_RPCB_ERROR_EVENT(unrecognized);
+
+TRACE_EVENT(rpc_buf_alloc,
+ TP_PROTO(
+ const struct rpc_task *task,
+ int status
+ ),
+
+ TP_ARGS(task, status),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(size_t, callsize)
+ __field(size_t, recvsize)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->callsize = task->tk_rqstp->rq_callsize;
+ __entry->recvsize = task->tk_rqstp->rq_rcvsize;
+ __entry->status = status;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " callsize=%zu recvsize=%zu status=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->callsize, __entry->recvsize, __entry->status
+ )
+);
+
TRACE_EVENT(rpc_call_rpcerror,
TP_PROTO(
const struct rpc_task *task,
@@ -540,7 +612,8 @@ TRACE_EVENT(rpc_call_rpcerror,
__entry->rpc_status = rpc_status;
),
- TP_printk("task:%u@%u tk_status=%d rpc_status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " tk_status=%d rpc_status=%d",
__entry->task_id, __entry->client_id,
__entry->tk_status, __entry->rpc_status)
);
@@ -566,6 +639,7 @@ TRACE_EVENT(rpc_stats_latency,
__field(unsigned long, backlog)
__field(unsigned long, rtt)
__field(unsigned long, execute)
+ __field(u32, xprt_id)
),
TP_fast_assign(
@@ -573,17 +647,21 @@ TRACE_EVENT(rpc_stats_latency,
__entry->task_id = task->tk_pid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->version = task->tk_client->cl_vers;
- __assign_str(progname, task->tk_client->cl_program->name)
- __assign_str(procname, rpc_proc_name(task))
+ __assign_str(progname, task->tk_client->cl_program->name);
+ __assign_str(procname, rpc_proc_name(task));
__entry->backlog = ktime_to_us(backlog);
__entry->rtt = ktime_to_us(rtt);
__entry->execute = ktime_to_us(execute);
+ __entry->xprt_id = task->tk_xprt->id;
),
- TP_printk("task:%u@%d xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu"
+ " xprt_id=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__get_str(progname), __entry->version, __get_str(procname),
- __entry->backlog, __entry->rtt, __entry->execute)
+ __entry->backlog, __entry->rtt, __entry->execute,
+ __entry->xprt_id)
);
TRACE_EVENT(rpc_xdr_overflow,
@@ -607,10 +685,10 @@ TRACE_EVENT(rpc_xdr_overflow,
__field(size_t, tail_len)
__field(unsigned int, page_len)
__field(unsigned int, len)
- __string(progname,
- xdr->rqst->rq_task->tk_client->cl_program->name)
- __string(procedure,
- xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
+ __string(progname, xdr->rqst ?
+ xdr->rqst->rq_task->tk_client->cl_program->name : "unknown")
+ __string(procedure, xdr->rqst ?
+ xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown")
),
TP_fast_assign(
@@ -620,15 +698,15 @@ TRACE_EVENT(rpc_xdr_overflow,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__assign_str(progname,
- task->tk_client->cl_program->name)
+ task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
} else {
- __entry->task_id = 0;
- __entry->client_id = 0;
- __assign_str(progname, "unknown")
+ __entry->task_id = -1;
+ __entry->client_id = -1;
+ __assign_str(progname, "unknown");
__entry->version = 0;
- __assign_str(procedure, "unknown")
+ __assign_str(procedure, "unknown");
}
__entry->requested = requested;
__entry->end = xdr->end;
@@ -641,8 +719,8 @@ TRACE_EVENT(rpc_xdr_overflow,
__entry->len = xdr->buf->len;
),
- TP_printk(
- "task:%u@%u %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->requested, __entry->p, __entry->end,
@@ -686,9 +764,9 @@ TRACE_EVENT(rpc_xdr_alignment,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__assign_str(progname,
- task->tk_client->cl_program->name)
+ task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
__entry->offset = offset;
__entry->copied = copied;
@@ -700,8 +778,8 @@ TRACE_EVENT(rpc_xdr_alignment,
__entry->len = xdr->buf->len;
),
- TP_printk(
- "task:%u@%u %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->offset, __entry->copied,
@@ -751,6 +829,9 @@ RPC_SHOW_SOCKET
RPC_SHOW_SOCK
+
+#include <trace/events/net_probe_common.h>
+
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
* that will be printed in the output.
@@ -773,27 +854,32 @@ DECLARE_EVENT_CLASS(xs_socket_event,
__field(unsigned int, socket_state)
__field(unsigned int, sock_state)
__field(unsigned long long, ino)
- __string(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR])
- __string(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT])
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
struct inode *inode = SOCK_INODE(socket);
+ const struct sock *sk = socket->sk;
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
__entry->socket_state = socket->state;
__entry->sock_state = socket->sk->sk_state;
__entry->ino = (unsigned long long)inode->i_ino;
- __assign_str(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT]);
+
),
TP_printk(
- "socket:[%llu] dstaddr=%s/%s "
+ "socket:[%llu] srcaddr=%pISpc dstaddr=%pISpc "
"state=%u (%s) sk_state=%u (%s)",
- __entry->ino, __get_str(dstaddr), __get_str(dstport),
+ __entry->ino,
+ __entry->saddr,
+ __entry->daddr,
__entry->socket_state,
rpc_show_socket_state(__entry->socket_state),
__entry->sock_state,
@@ -823,29 +909,33 @@ DECLARE_EVENT_CLASS(xs_socket_event_done,
__field(unsigned int, socket_state)
__field(unsigned int, sock_state)
__field(unsigned long long, ino)
- __string(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR])
- __string(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT])
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
struct inode *inode = SOCK_INODE(socket);
+ const struct sock *sk = socket->sk;
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
__entry->socket_state = socket->state;
__entry->sock_state = socket->sk->sk_state;
__entry->ino = (unsigned long long)inode->i_ino;
__entry->error = error;
- __assign_str(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT]);
),
TP_printk(
- "error=%d socket:[%llu] dstaddr=%s/%s "
+ "error=%d socket:[%llu] srcaddr=%pISpc dstaddr=%pISpc "
"state=%u (%s) sk_state=%u (%s)",
__entry->error,
- __entry->ino, __get_str(dstaddr), __get_str(dstport),
+ __entry->ino,
+ __entry->saddr,
+ __entry->daddr,
__entry->socket_state,
rpc_show_socket_state(__entry->socket_state),
__entry->sock_state,
@@ -868,29 +958,50 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
-TRACE_DEFINE_ENUM(XPRT_LOCKED);
-TRACE_DEFINE_ENUM(XPRT_CONNECTED);
-TRACE_DEFINE_ENUM(XPRT_CONNECTING);
-TRACE_DEFINE_ENUM(XPRT_CLOSE_WAIT);
-TRACE_DEFINE_ENUM(XPRT_BOUND);
-TRACE_DEFINE_ENUM(XPRT_BINDING);
-TRACE_DEFINE_ENUM(XPRT_CLOSING);
-TRACE_DEFINE_ENUM(XPRT_CONGESTED);
-TRACE_DEFINE_ENUM(XPRT_CWND_WAIT);
-TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE);
+TRACE_EVENT(rpc_socket_nospace,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ const struct sock_xprt *transport
+ ),
+
+ TP_ARGS(rqst, transport),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(unsigned int, total)
+ __field(unsigned int, remaining)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->total = rqst->rq_slen;
+ __entry->remaining = rqst->rq_slen - transport->xmit.offset;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " total=%u remaining=%u",
+ __entry->task_id, __entry->client_id,
+ __entry->total, __entry->remaining
+ )
+);
#define rpc_show_xprt_state(x) \
__print_flags(x, "|", \
- { (1UL << XPRT_LOCKED), "LOCKED"}, \
- { (1UL << XPRT_CONNECTED), "CONNECTED"}, \
- { (1UL << XPRT_CONNECTING), "CONNECTING"}, \
- { (1UL << XPRT_CLOSE_WAIT), "CLOSE_WAIT"}, \
- { (1UL << XPRT_BOUND), "BOUND"}, \
- { (1UL << XPRT_BINDING), "BINDING"}, \
- { (1UL << XPRT_CLOSING), "CLOSING"}, \
- { (1UL << XPRT_CONGESTED), "CONGESTED"}, \
- { (1UL << XPRT_CWND_WAIT), "CWND_WAIT"}, \
- { (1UL << XPRT_WRITE_SPACE), "WRITE_SPACE"})
+ { BIT(XPRT_LOCKED), "LOCKED" }, \
+ { BIT(XPRT_CONNECTED), "CONNECTED" }, \
+ { BIT(XPRT_CONNECTING), "CONNECTING" }, \
+ { BIT(XPRT_CLOSE_WAIT), "CLOSE_WAIT" }, \
+ { BIT(XPRT_BOUND), "BOUND" }, \
+ { BIT(XPRT_BINDING), "BINDING" }, \
+ { BIT(XPRT_CLOSING), "CLOSING" }, \
+ { BIT(XPRT_OFFLINE), "OFFLINE" }, \
+ { BIT(XPRT_REMOVE), "REMOVE" }, \
+ { BIT(XPRT_CONGESTED), "CONGESTED" }, \
+ { BIT(XPRT_CWND_WAIT), "CWND_WAIT" }, \
+ { BIT(XPRT_WRITE_SPACE), "WRITE_SPACE" }, \
+ { BIT(XPRT_SND_IS_COOKIE), "SND_IS_COOKIE" })
DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
TP_PROTO(
@@ -925,10 +1036,10 @@ DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
TP_ARGS(xprt))
DEFINE_RPC_XPRT_LIFETIME_EVENT(create);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(connect);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
-DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
DECLARE_EVENT_CLASS(rpc_xprt_event,
@@ -969,7 +1080,6 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
DEFINE_RPC_XPRT_EVENT(timer);
DEFINE_RPC_XPRT_EVENT(lookup_rqst);
-DEFINE_RPC_XPRT_EVENT(complete_rqst);
TRACE_EVENT(xprt_transmit,
TP_PROTO(
@@ -996,41 +1106,52 @@ TRACE_EVENT(xprt_transmit,
__entry->status = status;
),
- TP_printk(
- "task:%u@%u xid=0x%08x seqno=%u status=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x seqno=%u status=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->seqno, __entry->status)
);
-TRACE_EVENT(xprt_enq_xmit,
+TRACE_EVENT(xprt_retransmit,
TP_PROTO(
- const struct rpc_task *task,
- int stage
+ const struct rpc_rqst *rqst
),
- TP_ARGS(task, stage),
+ TP_ARGS(rqst),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
- __field(u32, seqno)
- __field(int, stage)
+ __field(int, ntrans)
+ __field(int, version)
+ __field(unsigned long, timeout)
+ __string(progname,
+ rqst->rq_task->tk_client->cl_program->name)
+ __string(procname, rpc_proc_name(rqst->rq_task))
),
TP_fast_assign(
+ struct rpc_task *task = rqst->rq_task;
+
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client ?
task->tk_client->cl_clid : -1;
- __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
- __entry->seqno = task->tk_rqstp->rq_seqno;
- __entry->stage = stage;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->ntrans = rqst->rq_ntrans;
+ __entry->timeout = task->tk_timeout;
+ __assign_str(progname,
+ task->tk_client->cl_program->name);
+ __entry->version = task->tk_client->cl_vers;
+ __assign_str(procname, rpc_proc_name(task));
),
- TP_printk(
- "task:%u@%u xid=0x%08x seqno=%u stage=%d",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " xid=0x%08x %sv%d %s ntrans=%d timeout=%lu",
__entry->task_id, __entry->client_id, __entry->xid,
- __entry->seqno, __entry->stage)
+ __get_str(progname), __entry->version, __get_str(procname),
+ __entry->ntrans, __entry->timeout
+ )
);
TRACE_EVENT(xprt_ping,
@@ -1076,11 +1197,15 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
__entry->task_id = -1;
__entry->client_id = -1;
}
- __entry->snd_task_id = xprt->snd_task ?
- xprt->snd_task->tk_pid : -1;
+ if (xprt->snd_task &&
+ !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
+ __entry->snd_task_id = xprt->snd_task->tk_pid;
+ else
+ __entry->snd_task_id = -1;
),
- TP_printk("task:%u@%u snd_task:%u",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " snd_task:" SUNRPC_TRACE_PID_SPECIFIER,
__entry->task_id, __entry->client_id,
__entry->snd_task_id)
);
@@ -1121,14 +1246,20 @@ DECLARE_EVENT_CLASS(xprt_cong_event,
__entry->task_id = -1;
__entry->client_id = -1;
}
- __entry->snd_task_id = xprt->snd_task ?
- xprt->snd_task->tk_pid : -1;
+ if (xprt->snd_task &&
+ !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
+ __entry->snd_task_id = xprt->snd_task->tk_pid;
+ else
+ __entry->snd_task_id = -1;
+
__entry->cong = xprt->cong;
__entry->cwnd = xprt->cwnd;
__entry->wait = test_bit(XPRT_CWND_WAIT, &xprt->state);
),
- TP_printk("task:%u@%u snd_task:%u cong=%lu cwnd=%lu%s",
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " snd_task:" SUNRPC_TRACE_PID_SPECIFIER
+ " cong=%lu cwnd=%lu%s",
__entry->task_id, __entry->client_id,
__entry->snd_task_id, __entry->cong, __entry->cwnd,
__entry->wait ? " (wait)" : "")
@@ -1147,6 +1278,50 @@ DEFINE_CONG_EVENT(release_cong);
DEFINE_CONG_EVENT(get_cong);
DEFINE_CONG_EVENT(put_cong);
+TRACE_EVENT(xprt_reserve,
+ TP_PROTO(
+ const struct rpc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
+ __entry->task_id, __entry->client_id, __entry->xid
+ )
+);
+
+TRACE_EVENT(xs_data_ready,
+ TP_PROTO(
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ ),
+
+ TP_fast_assign(
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ ),
+
+ TP_printk("peer=[%s]:%s", __get_str(addr), __get_str(port))
+);
+
TRACE_EVENT(xs_stream_read_data,
TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total),
@@ -1156,18 +1331,18 @@ TRACE_EVENT(xs_stream_read_data,
__field(ssize_t, err)
__field(size_t, total)
__string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] :
- "(null)")
+ EVENT_NULL_STR)
__string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] :
- "(null)")
+ EVENT_NULL_STR)
),
TP_fast_assign(
__entry->err = err;
__entry->total = total;
__assign_str(addr, xprt ?
- xprt->address_strings[RPC_DISPLAY_ADDR] : "(null)");
+ xprt->address_strings[RPC_DISPLAY_ADDR] : EVENT_NULL_STR);
__assign_str(port, xprt ?
- xprt->address_strings[RPC_DISPLAY_PORT] : "(null)");
+ xprt->address_strings[RPC_DISPLAY_PORT] : EVENT_NULL_STR);
),
TP_printk("peer=[%s]:%s err=%zd total=%zu", __get_str(addr),
@@ -1202,14 +1377,209 @@ TRACE_EVENT(xs_stream_read_request,
__entry->copied, __entry->reclen, __entry->offset)
);
+TRACE_EVENT(rpcb_getport,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ const struct rpc_task *task,
+ unsigned int bind_version
+ ),
-DECLARE_EVENT_CLASS(svc_xdr_buf_class,
+ TP_ARGS(clnt, task, bind_version),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(unsigned int, program)
+ __field(unsigned int, version)
+ __field(int, protocol)
+ __field(unsigned int, bind_version)
+ __string(servername, task->tk_xprt->servername)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = clnt->cl_clid;
+ __entry->program = clnt->cl_prog;
+ __entry->version = clnt->cl_vers;
+ __entry->protocol = task->tk_xprt->prot;
+ __entry->bind_version = bind_version;
+ __assign_str(servername, task->tk_xprt->servername);
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " server=%s program=%u version=%u protocol=%d bind_version=%u",
+ __entry->task_id, __entry->client_id, __get_str(servername),
+ __entry->program, __entry->version, __entry->protocol,
+ __entry->bind_version
+ )
+);
+
+TRACE_EVENT(rpcb_setport,
+ TP_PROTO(
+ const struct rpc_task *task,
+ int status,
+ unsigned short port
+ ),
+
+ TP_ARGS(task, status, port),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, status)
+ __field(unsigned short, port)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->status = status;
+ __entry->port = port;
+ ),
+
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d port=%u",
+ __entry->task_id, __entry->client_id,
+ __entry->status, __entry->port
+ )
+);
+
+TRACE_EVENT(pmap_register,
+ TP_PROTO(
+ u32 program,
+ u32 version,
+ int protocol,
+ unsigned short port
+ ),
+
+ TP_ARGS(program, version, protocol, port),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, program)
+ __field(unsigned int, version)
+ __field(int, protocol)
+ __field(unsigned int, port)
+ ),
+
+ TP_fast_assign(
+ __entry->program = program;
+ __entry->version = version;
+ __entry->protocol = protocol;
+ __entry->port = port;
+ ),
+
+ TP_printk("program=%u version=%u protocol=%d port=%u",
+ __entry->program, __entry->version,
+ __entry->protocol, __entry->port
+ )
+);
+
+TRACE_EVENT(rpcb_register,
+ TP_PROTO(
+ u32 program,
+ u32 version,
+ const char *addr,
+ const char *netid
+ ),
+
+ TP_ARGS(program, version, addr, netid),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, program)
+ __field(unsigned int, version)
+ __string(addr, addr)
+ __string(netid, netid)
+ ),
+
+ TP_fast_assign(
+ __entry->program = program;
+ __entry->version = version;
+ __assign_str(addr, addr);
+ __assign_str(netid, netid);
+ ),
+
+ TP_printk("program=%u version=%u addr=%s netid=%s",
+ __entry->program, __entry->version,
+ __get_str(addr), __get_str(netid)
+ )
+);
+
+TRACE_EVENT(rpcb_unregister,
+ TP_PROTO(
+ u32 program,
+ u32 version,
+ const char *netid
+ ),
+
+ TP_ARGS(program, version, netid),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, program)
+ __field(unsigned int, version)
+ __string(netid, netid)
+ ),
+
+ TP_fast_assign(
+ __entry->program = program;
+ __entry->version = version;
+ __assign_str(netid, netid);
+ ),
+
+ TP_printk("program=%u version=%u netid=%s",
+ __entry->program, __entry->version, __get_str(netid)
+ )
+);
+
+/**
+ ** RPC-over-TLS tracepoints
+ **/
+
+DECLARE_EVENT_CLASS(rpc_tls_class,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(clnt, xprt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, requested_policy)
+ __field(u32, version)
+ __string(servername, xprt->servername)
+ __string(progname, clnt->cl_program->name)
+ ),
+
+ TP_fast_assign(
+ __entry->requested_policy = clnt->cl_xprtsec.policy;
+ __entry->version = clnt->cl_vers;
+ __assign_str(servername, xprt->servername);
+ __assign_str(progname, clnt->cl_program->name)
+ ),
+
+ TP_printk("server=%s %sv%u requested_policy=%s",
+ __get_str(servername), __get_str(progname), __entry->version,
+ rpc_show_xprtsec_policy(__entry->requested_policy)
+ )
+);
+
+#define DEFINE_RPC_TLS_EVENT(name) \
+ DEFINE_EVENT(rpc_tls_class, rpc_tls_##name, \
+ TP_PROTO( \
+ const struct rpc_clnt *clnt, \
+ const struct rpc_xprt *xprt \
+ ), \
+ TP_ARGS(clnt, xprt))
+
+DEFINE_RPC_TLS_EVENT(unavailable);
+DEFINE_RPC_TLS_EVENT(not_started);
+
+
+/* Record an xdr_buf containing a fully-formed RPC message */
+DECLARE_EVENT_CLASS(svc_xdr_msg_class,
TP_PROTO(
- const struct svc_rqst *rqst,
const struct xdr_buf *xdr
),
- TP_ARGS(rqst, xdr),
+ TP_ARGS(xdr),
TP_STRUCT__entry(
__field(u32, xid)
@@ -1222,8 +1592,10 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->head_base = xdr->head[0].iov_base;
+ __be32 *p = (__be32 *)xdr->head[0].iov_base;
+
+ __entry->xid = be32_to_cpu(*p);
+ __entry->head_base = p;
__entry->head_len = xdr->head[0].iov_len;
__entry->tail_base = xdr->tail[0].iov_base;
__entry->tail_len = xdr->tail[0].iov_len;
@@ -1238,16 +1610,65 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
)
);
+#define DEFINE_SVCXDRMSG_EVENT(name) \
+ DEFINE_EVENT(svc_xdr_msg_class, \
+ svc_xdr_##name, \
+ TP_PROTO( \
+ const struct xdr_buf *xdr \
+ ), \
+ TP_ARGS(xdr))
+
+DEFINE_SVCXDRMSG_EVENT(recvfrom);
+
+/* Record an xdr_buf containing arbitrary data, tagged with an XID */
+DECLARE_EVENT_CLASS(svc_xdr_buf_class,
+ TP_PROTO(
+ __be32 xid,
+ const struct xdr_buf *xdr
+ ),
+
+ TP_ARGS(xid, xdr),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_base)
+ __field(unsigned int, page_len)
+ __field(unsigned int, msg_len)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(xid);
+ __entry->head_base = xdr->head[0].iov_base;
+ __entry->head_len = xdr->head[0].iov_len;
+ __entry->tail_base = xdr->tail[0].iov_base;
+ __entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_base = xdr->page_base;
+ __entry->page_len = xdr->page_len;
+ __entry->msg_len = xdr->len;
+ ),
+
+ TP_printk("xid=0x%08x head=[%p,%zu] page=%u(%u) tail=[%p,%zu] len=%u",
+ __entry->xid,
+ __entry->head_base, __entry->head_len,
+ __entry->page_len, __entry->page_base,
+ __entry->tail_base, __entry->tail_len,
+ __entry->msg_len
+ )
+);
+
#define DEFINE_SVCXDRBUF_EVENT(name) \
DEFINE_EVENT(svc_xdr_buf_class, \
svc_xdr_##name, \
TP_PROTO( \
- const struct svc_rqst *rqst, \
+ __be32 xid, \
const struct xdr_buf *xdr \
), \
- TP_ARGS(rqst, xdr))
+ TP_ARGS(xid, xdr))
-DEFINE_SVCXDRBUF_EVENT(recvfrom);
DEFINE_SVCXDRBUF_EVENT(sendto);
/*
@@ -1258,11 +1679,8 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
svc_rqst_flag(LOCAL) \
svc_rqst_flag(USEDEFERRAL) \
svc_rqst_flag(DROPME) \
- svc_rqst_flag(SPLICE_OK) \
svc_rqst_flag(VICTIM) \
- svc_rqst_flag(BUSY) \
- svc_rqst_flag(DATA) \
- svc_rqst_flag_end(AUTHERR)
+ svc_rqst_flag_end(DATA)
#undef svc_rqst_flag
#undef svc_rqst_flag_end
@@ -1279,30 +1697,6 @@ SVC_RQST_FLAG_LIST
#define show_rqstp_flags(flags) \
__print_flags(flags, "|", SVC_RQST_FLAG_LIST)
-TRACE_EVENT(svc_recv,
- TP_PROTO(struct svc_rqst *rqst, int len),
-
- TP_ARGS(rqst, len),
-
- TP_STRUCT__entry(
- __field(u32, xid)
- __field(int, len)
- __field(unsigned long, flags)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
- ),
-
- TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->len = len;
- __entry->flags = rqst->rq_flags;
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
- ),
-
- TP_printk("addr=%s xid=0x%08x len=%d flags=%s",
- __get_str(addr), __entry->xid, __entry->len,
- show_rqstp_flags(__entry->flags))
-);
-
TRACE_DEFINE_ENUM(SVC_GARBAGE);
TRACE_DEFINE_ENUM(SVC_SYSERR);
TRACE_DEFINE_ENUM(SVC_VALID);
@@ -1314,7 +1708,7 @@ TRACE_DEFINE_ENUM(SVC_DENIED);
TRACE_DEFINE_ENUM(SVC_PENDING);
TRACE_DEFINE_ENUM(SVC_COMPLETE);
-#define svc_show_status(status) \
+#define show_svc_auth_status(status) \
__print_symbolic(status, \
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
{ SVC_SYSERR, "SVC_SYSERR" }, \
@@ -1327,26 +1721,58 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE);
{ SVC_PENDING, "SVC_PENDING" }, \
{ SVC_COMPLETE, "SVC_COMPLETE" })
-TRACE_EVENT(svc_authenticate,
- TP_PROTO(const struct svc_rqst *rqst, int auth_res, __be32 auth_stat),
+#define SVC_RQST_ENDPOINT_FIELDS(r) \
+ __sockaddr(server, (r)->rq_xprt->xpt_locallen) \
+ __sockaddr(client, (r)->rq_xprt->xpt_remotelen) \
+ __field(unsigned int, netns_ino) \
+ __field(u32, xid)
+
+#define SVC_RQST_ENDPOINT_ASSIGNMENTS(r) \
+ do { \
+ struct svc_xprt *xprt = (r)->rq_xprt; \
+ __assign_sockaddr(server, &xprt->xpt_local, \
+ xprt->xpt_locallen); \
+ __assign_sockaddr(client, &xprt->xpt_remote, \
+ xprt->xpt_remotelen); \
+ __entry->netns_ino = xprt->xpt_net->ns.inum; \
+ __entry->xid = be32_to_cpu((r)->rq_xid); \
+ } while (0)
+
+#define SVC_RQST_ENDPOINT_FORMAT \
+ "xid=0x%08x server=%pISpc client=%pISpc"
+
+#define SVC_RQST_ENDPOINT_VARARGS \
+ __entry->xid, __get_sockaddr(server), __get_sockaddr(client)
+
+TRACE_EVENT_CONDITION(svc_authenticate,
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ enum svc_auth_status auth_res
+ ),
+
+ TP_ARGS(rqst, auth_res),
- TP_ARGS(rqst, auth_res, auth_stat),
+ TP_CONDITION(auth_res != SVC_OK && auth_res != SVC_COMPLETE),
TP_STRUCT__entry(
- __field(u32, xid)
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
__field(unsigned long, svc_status)
__field(unsigned long, auth_stat)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
__entry->svc_status = auth_res;
- __entry->auth_stat = be32_to_cpu(auth_stat);
+ __entry->auth_stat = be32_to_cpu(rqst->rq_auth_stat);
),
- TP_printk("xid=0x%08x auth_res=%s auth_stat=%s",
- __entry->xid, svc_show_status(__entry->svc_status),
- rpc_show_auth_stat(__entry->auth_stat))
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT
+ " auth_res=%s auth_stat=%s",
+ SVC_RQST_ENDPOINT_VARARGS,
+ show_svc_auth_status(__entry->svc_status),
+ rpc_show_auth_stat(__entry->auth_stat))
);
TRACE_EVENT(svc_process,
@@ -1359,8 +1785,9 @@ TRACE_EVENT(svc_process,
__field(u32, vers)
__field(u32, proc)
__string(service, name)
+ __string(procedure, svc_proc_name(rqst))
__string(addr, rqst->rq_xprt ?
- rqst->rq_xprt->xpt_remotebuf : "(null)")
+ rqst->rq_xprt->xpt_remotebuf : EVENT_NULL_STR)
),
TP_fast_assign(
@@ -1368,17 +1795,19 @@ TRACE_EVENT(svc_process,
__entry->vers = rqst->rq_vers;
__entry->proc = rqst->rq_proc;
__assign_str(service, name);
+ __assign_str(procedure, svc_proc_name(rqst));
__assign_str(addr, rqst->rq_xprt ?
- rqst->rq_xprt->xpt_remotebuf : "(null)");
+ rqst->rq_xprt->xpt_remotebuf : EVENT_NULL_STR);
),
- TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
+ TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%s",
__get_str(addr), __entry->xid,
- __get_str(service), __entry->vers, __entry->proc)
+ __get_str(service), __entry->vers,
+ __get_str(procedure)
+ )
);
DECLARE_EVENT_CLASS(svc_rqst_event,
-
TP_PROTO(
const struct svc_rqst *rqst
),
@@ -1386,20 +1815,20 @@ DECLARE_EVENT_CLASS(svc_rqst_event,
TP_ARGS(rqst),
TP_STRUCT__entry(
- __field(u32, xid)
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
__field(unsigned long, flags)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
__entry->flags = rqst->rq_flags;
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x flags=%s",
- __get_str(addr), __entry->xid,
- show_rqstp_flags(__entry->flags))
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " flags=%s",
+ SVC_RQST_ENDPOINT_VARARGS,
+ show_rqstp_flags(__entry->flags))
);
#define DEFINE_SVC_RQST_EVENT(name) \
DEFINE_EVENT(svc_rqst_event, svc_##name, \
@@ -1412,124 +1841,248 @@ DEFINE_SVC_RQST_EVENT(defer);
DEFINE_SVC_RQST_EVENT(drop);
DECLARE_EVENT_CLASS(svc_rqst_status,
-
- TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ int status
+ ),
TP_ARGS(rqst, status),
TP_STRUCT__entry(
- __field(u32, xid)
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
__field(int, status)
__field(unsigned long, flags)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
__entry->status = status;
__entry->flags = rqst->rq_flags;
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x status=%d flags=%s",
- __get_str(addr), __entry->xid,
- __entry->status, show_rqstp_flags(__entry->flags))
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " status=%d flags=%s",
+ SVC_RQST_ENDPOINT_VARARGS,
+ __entry->status, show_rqstp_flags(__entry->flags))
);
DEFINE_EVENT(svc_rqst_status, svc_send,
- TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_PROTO(const struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status));
+TRACE_EVENT(svc_replace_page_err,
+ TP_PROTO(const struct svc_rqst *rqst),
+
+ TP_ARGS(rqst),
+ TP_STRUCT__entry(
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
+ __field(const void *, begin)
+ __field(const void *, respages)
+ __field(const void *, nextpage)
+ ),
+
+ TP_fast_assign(
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
+ __entry->begin = rqst->rq_pages;
+ __entry->respages = rqst->rq_respages;
+ __entry->nextpage = rqst->rq_next_page;
+ ),
+
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " begin=%p respages=%p nextpage=%p",
+ SVC_RQST_ENDPOINT_VARARGS,
+ __entry->begin, __entry->respages, __entry->nextpage)
+);
+
+TRACE_EVENT(svc_stats_latency,
+ TP_PROTO(
+ const struct svc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
+ __field(unsigned long, execute)
+ __string(procedure, svc_proc_name(rqst))
+ ),
+
+ TP_fast_assign(
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
+ __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_stime));
+ __assign_str(procedure, svc_proc_name(rqst));
+ ),
+
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " proc=%s execute-us=%lu",
+ SVC_RQST_ENDPOINT_VARARGS,
+ __get_str(procedure), __entry->execute)
+);
+
+/*
+ * from include/linux/sunrpc/svc_xprt.h
+ */
+#define SVC_XPRT_FLAG_LIST \
+ svc_xprt_flag(BUSY) \
+ svc_xprt_flag(CONN) \
+ svc_xprt_flag(CLOSE) \
+ svc_xprt_flag(DATA) \
+ svc_xprt_flag(TEMP) \
+ svc_xprt_flag(DEAD) \
+ svc_xprt_flag(CHNGBUF) \
+ svc_xprt_flag(DEFERRED) \
+ svc_xprt_flag(OLD) \
+ svc_xprt_flag(LISTENER) \
+ svc_xprt_flag(CACHE_AUTH) \
+ svc_xprt_flag(LOCAL) \
+ svc_xprt_flag(KILL_TEMP) \
+ svc_xprt_flag(CONG_CTRL) \
+ svc_xprt_flag(HANDSHAKE) \
+ svc_xprt_flag(TLS_SESSION) \
+ svc_xprt_flag_end(PEER_AUTH)
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) TRACE_DEFINE_ENUM(XPT_##x);
+#define svc_xprt_flag_end(x) TRACE_DEFINE_ENUM(XPT_##x);
+
+SVC_XPRT_FLAG_LIST
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) { BIT(XPT_##x), #x },
+#define svc_xprt_flag_end(x) { BIT(XPT_##x), #x }
+
#define show_svc_xprt_flags(flags) \
- __print_flags(flags, "|", \
- { (1UL << XPT_BUSY), "XPT_BUSY"}, \
- { (1UL << XPT_CONN), "XPT_CONN"}, \
- { (1UL << XPT_CLOSE), "XPT_CLOSE"}, \
- { (1UL << XPT_DATA), "XPT_DATA"}, \
- { (1UL << XPT_TEMP), "XPT_TEMP"}, \
- { (1UL << XPT_DEAD), "XPT_DEAD"}, \
- { (1UL << XPT_CHNGBUF), "XPT_CHNGBUF"}, \
- { (1UL << XPT_DEFERRED), "XPT_DEFERRED"}, \
- { (1UL << XPT_OLD), "XPT_OLD"}, \
- { (1UL << XPT_LISTENER), "XPT_LISTENER"}, \
- { (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \
- { (1UL << XPT_LOCAL), "XPT_LOCAL"}, \
- { (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \
- { (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"})
+ __print_flags(flags, "|", SVC_XPRT_FLAG_LIST)
TRACE_EVENT(svc_xprt_create_err,
TP_PROTO(
const char *program,
const char *protocol,
struct sockaddr *sap,
+ size_t salen,
const struct svc_xprt *xprt
),
- TP_ARGS(program, protocol, sap, xprt),
+ TP_ARGS(program, protocol, sap, salen, xprt),
TP_STRUCT__entry(
__field(long, error)
__string(program, program)
__string(protocol, protocol)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, salen)
),
TP_fast_assign(
__entry->error = PTR_ERR(xprt);
__assign_str(program, program);
__assign_str(protocol, protocol);
- memcpy(__entry->addr, sap, sizeof(__entry->addr));
+ __assign_sockaddr(addr, sap, salen);
),
TP_printk("addr=%pISpc program=%s protocol=%s error=%ld",
- __entry->addr, __get_str(program), __get_str(protocol),
+ __get_sockaddr(addr), __get_str(program), __get_str(protocol),
__entry->error)
);
-TRACE_EVENT(svc_xprt_do_enqueue,
- TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
+#define SVC_XPRT_ENDPOINT_FIELDS(x) \
+ __sockaddr(server, (x)->xpt_locallen) \
+ __sockaddr(client, (x)->xpt_remotelen) \
+ __field(unsigned long, flags) \
+ __field(unsigned int, netns_ino)
+
+#define SVC_XPRT_ENDPOINT_ASSIGNMENTS(x) \
+ do { \
+ __assign_sockaddr(server, &(x)->xpt_local, \
+ (x)->xpt_locallen); \
+ __assign_sockaddr(client, &(x)->xpt_remote, \
+ (x)->xpt_remotelen); \
+ __entry->flags = (x)->xpt_flags; \
+ __entry->netns_ino = (x)->xpt_net->ns.inum; \
+ } while (0)
+
+#define SVC_XPRT_ENDPOINT_FORMAT \
+ "server=%pISpc client=%pISpc flags=%s"
+
+#define SVC_XPRT_ENDPOINT_VARARGS \
+ __get_sockaddr(server), __get_sockaddr(client), \
+ show_svc_xprt_flags(__entry->flags)
+
+TRACE_EVENT(svc_xprt_enqueue,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ unsigned long flags
+ ),
- TP_ARGS(xprt, rqst),
+ TP_ARGS(xprt, flags),
TP_STRUCT__entry(
- __field(int, pid)
- __field(unsigned long, flags)
- __string(addr, xprt->xpt_remotebuf)
+ SVC_XPRT_ENDPOINT_FIELDS(xprt)
),
TP_fast_assign(
- __entry->pid = rqst? rqst->rq_task->pid : 0;
- __entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
+ __assign_sockaddr(server, &xprt->xpt_local,
+ xprt->xpt_locallen);
+ __assign_sockaddr(client, &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = flags;
+ __entry->netns_ino = xprt->xpt_net->ns.inum;
+ ),
+
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS)
+);
+
+TRACE_EVENT(svc_xprt_dequeue,
+ TP_PROTO(
+ const struct svc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ SVC_XPRT_ENDPOINT_FIELDS(rqst->rq_xprt)
+
+ __field(unsigned long, wakeup)
+ ),
+
+ TP_fast_assign(
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt);
+
+ __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_qtime));
),
- TP_printk("addr=%s pid=%d flags=%s", __get_str(addr),
- __entry->pid, show_svc_xprt_flags(__entry->flags))
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu",
+ SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup)
);
DECLARE_EVENT_CLASS(svc_xprt_event,
- TP_PROTO(struct svc_xprt *xprt),
+ TP_PROTO(
+ const struct svc_xprt *xprt
+ ),
TP_ARGS(xprt),
TP_STRUCT__entry(
- __field(unsigned long, flags)
- __string(addr, xprt->xpt_remotebuf)
+ SVC_XPRT_ENDPOINT_FIELDS(xprt)
),
TP_fast_assign(
- __entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
),
- TP_printk("addr=%s flags=%s", __get_str(addr),
- show_svc_xprt_flags(__entry->flags))
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS)
);
#define DEFINE_SVC_XPRT_EVENT(name) \
DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \
TP_PROTO( \
- struct svc_xprt *xprt \
+ const struct svc_xprt *xprt \
), \
TP_ARGS(xprt))
@@ -1538,6 +2091,17 @@ DEFINE_SVC_XPRT_EVENT(close);
DEFINE_SVC_XPRT_EVENT(detach);
DEFINE_SVC_XPRT_EVENT(free);
+#define DEFINE_SVC_TLS_EVENT(name) \
+ DEFINE_EVENT(svc_xprt_event, svc_tls_##name, \
+ TP_PROTO(const struct svc_xprt *xprt), \
+ TP_ARGS(xprt))
+
+DEFINE_SVC_TLS_EVENT(start);
+DEFINE_SVC_TLS_EVENT(upcall);
+DEFINE_SVC_TLS_EVENT(unavailable);
+DEFINE_SVC_TLS_EVENT(not_started);
+DEFINE_SVC_TLS_EVENT(timed_out);
+
TRACE_EVENT(svc_xprt_accept,
TP_PROTO(
const struct svc_xprt *xprt,
@@ -1547,44 +2111,25 @@ TRACE_EVENT(svc_xprt_accept,
TP_ARGS(xprt, service),
TP_STRUCT__entry(
- __string(addr, xprt->xpt_remotebuf)
+ SVC_XPRT_ENDPOINT_FIELDS(xprt)
+
__string(protocol, xprt->xpt_class->xcl_name)
__string(service, service)
),
TP_fast_assign(
- __assign_str(addr, xprt->xpt_remotebuf);
- __assign_str(protocol, xprt->xpt_class->xcl_name)
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
+
+ __assign_str(protocol, xprt->xpt_class->xcl_name);
__assign_str(service, service);
),
- TP_printk("addr=%s protocol=%s service=%s",
- __get_str(addr), __get_str(protocol), __get_str(service)
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " protocol=%s service=%s",
+ SVC_XPRT_ENDPOINT_VARARGS,
+ __get_str(protocol), __get_str(service)
)
);
-TRACE_EVENT(svc_xprt_dequeue,
- TP_PROTO(struct svc_rqst *rqst),
-
- TP_ARGS(rqst),
-
- TP_STRUCT__entry(
- __field(unsigned long, flags)
- __field(unsigned long, wakeup)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
- ),
-
- TP_fast_assign(
- __entry->flags = rqst->rq_xprt->xpt_flags;
- __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
- rqst->rq_qtime));
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
- ),
-
- TP_printk("addr=%s flags=%s wakeup-us=%lu", __get_str(addr),
- show_svc_xprt_flags(__entry->flags), __entry->wakeup)
-);
-
TRACE_EVENT(svc_wake_up,
TP_PROTO(int pid),
@@ -1601,47 +2146,26 @@ TRACE_EVENT(svc_wake_up,
TP_printk("pid=%d", __entry->pid)
);
-TRACE_EVENT(svc_handle_xprt,
- TP_PROTO(struct svc_xprt *xprt, int len),
-
- TP_ARGS(xprt, len),
-
- TP_STRUCT__entry(
- __field(int, len)
- __field(unsigned long, flags)
- __string(addr, xprt->xpt_remotebuf)
- ),
-
- TP_fast_assign(
- __entry->len = len;
- __entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
+TRACE_EVENT(svc_alloc_arg_err,
+ TP_PROTO(
+ unsigned int requested,
+ unsigned int allocated
),
- TP_printk("addr=%s len=%d flags=%s", __get_str(addr),
- __entry->len, show_svc_xprt_flags(__entry->flags))
-);
-
-TRACE_EVENT(svc_stats_latency,
- TP_PROTO(const struct svc_rqst *rqst),
-
- TP_ARGS(rqst),
+ TP_ARGS(requested, allocated),
TP_STRUCT__entry(
- __field(u32, xid)
- __field(unsigned long, execute)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ __field(unsigned int, requested)
+ __field(unsigned int, allocated)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
- rqst->rq_stime));
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ __entry->requested = requested;
+ __entry->allocated = allocated;
),
- TP_printk("addr=%s xid=0x%08x execute-us=%lu",
- __get_str(addr), __entry->xid, __entry->execute)
+ TP_printk("requested=%u allocated=%u",
+ __entry->requested, __entry->allocated)
);
DECLARE_EVENT_CLASS(svc_deferred_event,
@@ -1654,18 +2178,17 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_STRUCT__entry(
__field(const void *, dr)
__field(u32, xid)
- __string(addr, dr->xprt->xpt_remotebuf)
+ __sockaddr(addr, dr->addrlen)
),
TP_fast_assign(
__entry->dr = dr;
- __entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
- (dr->xprt_hlen>>2)));
- __assign_str(addr, dr->xprt->xpt_remotebuf);
+ __entry->xid = be32_to_cpu(*(__be32 *)dr->args);
+ __assign_sockaddr(addr, &dr->addr, dr->addrlen);
),
- TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
- __entry->xid)
+ TP_printk("addr=%pISpc dr=%p xid=0x%08x", __get_sockaddr(addr),
+ __entry->dr, __entry->xid)
);
#define DEFINE_SVC_DEFERRED_EVENT(name) \
@@ -1679,31 +2202,46 @@ DEFINE_SVC_DEFERRED_EVENT(drop);
DEFINE_SVC_DEFERRED_EVENT(queue);
DEFINE_SVC_DEFERRED_EVENT(recv);
-TRACE_EVENT(svcsock_new_socket,
+DECLARE_EVENT_CLASS(svcsock_lifetime_class,
TP_PROTO(
+ const void *svsk,
const struct socket *socket
),
-
- TP_ARGS(socket),
-
+ TP_ARGS(svsk, socket),
TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __field(const void *, svsk)
+ __field(const void *, sk)
__field(unsigned long, type)
__field(unsigned long, family)
- __field(bool, listener)
+ __field(unsigned long, state)
),
-
TP_fast_assign(
+ struct sock *sk = socket->sk;
+
+ __entry->netns_ino = sock_net(sk)->ns.inum;
+ __entry->svsk = svsk;
+ __entry->sk = sk;
__entry->type = socket->type;
- __entry->family = socket->sk->sk_family;
- __entry->listener = (socket->sk->sk_state == TCP_LISTEN);
+ __entry->family = sk->sk_family;
+ __entry->state = sk->sk_state;
),
-
- TP_printk("type=%s family=%s%s",
- show_socket_type(__entry->type),
+ TP_printk("svsk=%p type=%s family=%s%s",
+ __entry->svsk, show_socket_type(__entry->type),
rpc_show_address_family(__entry->family),
- __entry->listener ? " (listener)" : ""
+ __entry->state == TCP_LISTEN ? " (listener)" : ""
)
);
+#define DEFINE_SVCSOCK_LIFETIME_EVENT(name) \
+ DEFINE_EVENT(svcsock_lifetime_class, name, \
+ TP_PROTO( \
+ const void *svsk, \
+ const struct socket *socket \
+ ), \
+ TP_ARGS(svsk, socket))
+
+DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_new);
+DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_free);
TRACE_EVENT(svcsock_marker,
TP_PROTO(
@@ -1842,17 +2380,17 @@ DECLARE_EVENT_CLASS(svcsock_accept_class,
TP_STRUCT__entry(
__field(long, status)
__string(service, service)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, netns_ino)
),
TP_fast_assign(
__entry->status = status;
__assign_str(service, service);
- memcpy(__entry->addr, &xprt->xpt_local, sizeof(__entry->addr));
+ __entry->netns_ino = xprt->xpt_net->ns.inum;
),
- TP_printk("listener=%pISpc service=%s status=%ld",
- __entry->addr, __get_str(service), __entry->status
+ TP_printk("addr=listener service=%s status=%ld",
+ __get_str(service), __entry->status
)
);
diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
index 705be43b71ab..da05c9ebd224 100644
--- a/include/trace/events/swiotlb.h
+++ b/include/trace/events/swiotlb.h
@@ -8,20 +8,15 @@
#include <linux/tracepoint.h>
TRACE_EVENT(swiotlb_bounced,
-
- TP_PROTO(struct device *dev,
- dma_addr_t dev_addr,
- size_t size,
- enum swiotlb_force swiotlb_force),
-
- TP_ARGS(dev, dev_addr, size, swiotlb_force),
+ TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
+ TP_ARGS(dev, dev_addr, size),
TP_STRUCT__entry(
- __string( dev_name, dev_name(dev) )
- __field( u64, dma_mask )
- __field( dma_addr_t, dev_addr )
- __field( size_t, size )
- __field( enum swiotlb_force, swiotlb_force )
+ __string(dev_name, dev_name(dev))
+ __field(u64, dma_mask)
+ __field(dma_addr_t, dev_addr)
+ __field(size_t, size)
+ __field(bool, force)
),
TP_fast_assign(
@@ -29,19 +24,15 @@ TRACE_EVENT(swiotlb_bounced,
__entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0);
__entry->dev_addr = dev_addr;
__entry->size = size;
- __entry->swiotlb_force = swiotlb_force;
+ __entry->force = is_swiotlb_force_bounce(dev);
),
- TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx "
- "size=%zu %s",
+ TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx size=%zu %s",
__get_str(dev_name),
__entry->dma_mask,
(unsigned long long)__entry->dev_addr,
__entry->size,
- __print_symbolic(__entry->swiotlb_force,
- { SWIOTLB_NORMAL, "NORMAL" },
- { SWIOTLB_FORCE, "FORCE" },
- { SWIOTLB_NO_FORCE, "NO_FORCE" }))
+ __entry->force ? "FORCE" : "NORMAL")
);
#endif /* _TRACE_SWIOTLB_H */
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 77408edd29d2..67fad2677ed5 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -141,6 +141,7 @@ TRACE_EVENT(target_sequencer_start,
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
+ __field( unsigned char, control )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
__string( initiator, cmd->se_sess->se_node_acl->initiatorname )
),
@@ -151,6 +152,7 @@ TRACE_EVENT(target_sequencer_start,
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
+ __entry->control = scsi_command_control(cmd->t_task_cdb);
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
@@ -160,9 +162,7 @@ TRACE_EVENT(target_sequencer_start,
__entry->tag, show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
- scsi_command_size(__entry->cdb) <= 16 ?
- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
- __entry->cdb[1]
+ __entry->control
)
);
@@ -178,6 +178,7 @@ TRACE_EVENT(target_cmd_complete,
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
+ __field( unsigned char, control )
__field( unsigned char, scsi_status )
__field( unsigned char, sense_length )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
@@ -191,6 +192,7 @@ TRACE_EVENT(target_cmd_complete,
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
+ __entry->control = scsi_command_control(cmd->t_task_cdb);
__entry->scsi_status = cmd->scsi_status;
__entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
@@ -208,9 +210,7 @@ TRACE_EVENT(target_cmd_complete,
show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
- scsi_command_size(__entry->cdb) <= 16 ?
- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
- __entry->cdb[1]
+ __entry->control
)
);
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 64d160930b0d..47b527464d1a 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -47,7 +47,7 @@ TRACE_EVENT(task_rename,
TP_fast_assign(
__entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
- strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
+ strscpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index cf97f6339acb..699dafd204ea 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -59,6 +59,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__field(int, state)
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
@@ -66,7 +67,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
),
TP_fast_assign(
- struct inet_sock *inet = inet_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
__be32 *p32;
__entry->skbaddr = skb;
@@ -75,6 +76,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
p32 = (__be32 *) __entry->saddr;
*p32 = inet->inet_saddr;
@@ -86,7 +88,9 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ __entry->skbaddr, __entry->skaddr,
+ show_family_name(__entry->family),
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
show_tcp_state_name(__entry->state))
@@ -125,6 +129,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
__field(const void *, skaddr)
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
@@ -140,6 +145,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
p32 = (__be32 *) __entry->saddr;
*p32 = inet->inet_saddr;
@@ -153,7 +159,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
__entry->sock_cookie = sock_gen_cookie(sk);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c sock_cookie=%llx",
+ TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c sock_cookie=%llx",
+ show_family_name(__entry->family),
__entry->sport, __entry->dport,
__entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
@@ -192,6 +199,7 @@ TRACE_EVENT(tcp_retransmit_synack,
__field(const void *, req)
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
@@ -207,6 +215,7 @@ TRACE_EVENT(tcp_retransmit_synack,
__entry->sport = ireq->ir_num;
__entry->dport = ntohs(ireq->ir_rmt_port);
+ __entry->family = sk->sk_family;
p32 = (__be32 *) __entry->saddr;
*p32 = ireq->ir_loc_addr;
@@ -218,7 +227,8 @@ TRACE_EVENT(tcp_retransmit_synack,
ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ show_family_name(__entry->family),
__entry->sport, __entry->dport,
__entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6)
@@ -238,6 +248,7 @@ TRACE_EVENT(tcp_probe,
__array(__u8, daddr, sizeof(struct sockaddr_in6))
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__field(__u32, mark)
__field(__u16, data_len)
__field(__u32, snd_nxt)
@@ -248,6 +259,8 @@ TRACE_EVENT(tcp_probe,
__field(__u32, srtt)
__field(__u32, rcv_wnd)
__field(__u64, sock_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
),
TP_fast_assign(
@@ -264,23 +277,154 @@ TRACE_EVENT(tcp_probe,
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
__entry->mark = skb->mark;
+ __entry->family = sk->sk_family;
__entry->data_len = skb->len - __tcp_hdrlen(th);
__entry->snd_nxt = tp->snd_nxt;
__entry->snd_una = tp->snd_una;
- __entry->snd_cwnd = tp->snd_cwnd;
+ __entry->snd_cwnd = tcp_snd_cwnd(tp);
__entry->snd_wnd = tp->snd_wnd;
__entry->rcv_wnd = tp->rcv_wnd;
__entry->ssthresh = tcp_current_ssthresh(sk);
__entry->srtt = tp->srtt_us >> 3;
__entry->sock_cookie = sock_gen_cookie(sk);
+
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
),
- TP_printk("src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx",
+ TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx skbaddr=%p skaddr=%p",
+ show_family_name(__entry->family),
__entry->saddr, __entry->daddr, __entry->mark,
__entry->data_len, __entry->snd_nxt, __entry->snd_una,
__entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
- __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie)
+ __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie,
+ __entry->skbaddr, __entry->skaddr)
+);
+
+#define TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) \
+ do { \
+ const struct tcphdr *th = (const struct tcphdr *)skb->data; \
+ struct sockaddr_in *v4 = (void *)__entry->saddr; \
+ \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = th->source; \
+ v4->sin_addr.s_addr = ip_hdr(skb)->saddr; \
+ v4 = (void *)__entry->daddr; \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = th->dest; \
+ v4->sin_addr.s_addr = ip_hdr(skb)->daddr; \
+ } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \
+ do { \
+ const struct iphdr *iph = ip_hdr(skb); \
+ \
+ if (iph->version == 6) { \
+ const struct tcphdr *th = (const struct tcphdr *)skb->data; \
+ struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
+ \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = th->source; \
+ v6->sin6_addr = ipv6_hdr(skb)->saddr; \
+ v6 = (void *)__entry->daddr; \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = th->dest; \
+ v6->sin6_addr = ipv6_hdr(skb)->daddr; \
+ } else \
+ TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb); \
+ } while (0)
+
+#else
+
+#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \
+ TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb)
+
+#endif
+
+/*
+ * tcp event with only skb
+ */
+DECLARE_EVENT_CLASS(tcp_event_skb,
+
+ TP_PROTO(const struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(const void *, skbaddr)
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS_SKB(__entry, skb);
+ ),
+
+ TP_printk("skbaddr=%p src=%pISpc dest=%pISpc",
+ __entry->skbaddr, __entry->saddr, __entry->daddr)
+);
+
+DEFINE_EVENT(tcp_event_skb, tcp_bad_csum,
+
+ TP_PROTO(const struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+TRACE_EVENT(tcp_cong_state_set,
+
+ TP_PROTO(struct sock *sk, const u8 ca_state),
+
+ TP_ARGS(sk, ca_state),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ __field(__u8, cong_state)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ __be32 *p32;
+
+ __entry->skaddr = sk;
+
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->cong_state = ca_state;
+ ),
+
+ TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
+ show_family_name(__entry->family),
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ __entry->cong_state)
);
#endif /* _TRACE_TCP_H */
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
deleted file mode 100644
index 135e5421f003..000000000000
--- a/include/trace/events/thermal.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM thermal
-
-#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_THERMAL_H
-
-#include <linux/devfreq.h>
-#include <linux/thermal.h>
-#include <linux/tracepoint.h>
-
-TRACE_DEFINE_ENUM(THERMAL_TRIP_CRITICAL);
-TRACE_DEFINE_ENUM(THERMAL_TRIP_HOT);
-TRACE_DEFINE_ENUM(THERMAL_TRIP_PASSIVE);
-TRACE_DEFINE_ENUM(THERMAL_TRIP_ACTIVE);
-
-#define show_tzt_type(type) \
- __print_symbolic(type, \
- { THERMAL_TRIP_CRITICAL, "CRITICAL"}, \
- { THERMAL_TRIP_HOT, "HOT"}, \
- { THERMAL_TRIP_PASSIVE, "PASSIVE"}, \
- { THERMAL_TRIP_ACTIVE, "ACTIVE"})
-
-TRACE_EVENT(thermal_temperature,
-
- TP_PROTO(struct thermal_zone_device *tz),
-
- TP_ARGS(tz),
-
- TP_STRUCT__entry(
- __string(thermal_zone, tz->type)
- __field(int, id)
- __field(int, temp_prev)
- __field(int, temp)
- ),
-
- TP_fast_assign(
- __assign_str(thermal_zone, tz->type);
- __entry->id = tz->id;
- __entry->temp_prev = tz->last_temperature;
- __entry->temp = tz->temperature;
- ),
-
- TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d",
- __get_str(thermal_zone), __entry->id, __entry->temp_prev,
- __entry->temp)
-);
-
-TRACE_EVENT(cdev_update,
-
- TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target),
-
- TP_ARGS(cdev, target),
-
- TP_STRUCT__entry(
- __string(type, cdev->type)
- __field(unsigned long, target)
- ),
-
- TP_fast_assign(
- __assign_str(type, cdev->type);
- __entry->target = target;
- ),
-
- TP_printk("type=%s target=%lu", __get_str(type), __entry->target)
-);
-
-TRACE_EVENT(thermal_zone_trip,
-
- TP_PROTO(struct thermal_zone_device *tz, int trip,
- enum thermal_trip_type trip_type),
-
- TP_ARGS(tz, trip, trip_type),
-
- TP_STRUCT__entry(
- __string(thermal_zone, tz->type)
- __field(int, id)
- __field(int, trip)
- __field(enum thermal_trip_type, trip_type)
- ),
-
- TP_fast_assign(
- __assign_str(thermal_zone, tz->type);
- __entry->id = tz->id;
- __entry->trip = trip;
- __entry->trip_type = trip_type;
- ),
-
- TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%s",
- __get_str(thermal_zone), __entry->id, __entry->trip,
- show_tzt_type(__entry->trip_type))
-);
-
-#ifdef CONFIG_CPU_THERMAL
-TRACE_EVENT(thermal_power_cpu_get_power,
- TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
- size_t load_len, u32 dynamic_power),
-
- TP_ARGS(cpus, freq, load, load_len, dynamic_power),
-
- TP_STRUCT__entry(
- __bitmask(cpumask, num_possible_cpus())
- __field(unsigned long, freq )
- __dynamic_array(u32, load, load_len)
- __field(size_t, load_len )
- __field(u32, dynamic_power )
- ),
-
- TP_fast_assign(
- __assign_bitmask(cpumask, cpumask_bits(cpus),
- num_possible_cpus());
- __entry->freq = freq;
- memcpy(__get_dynamic_array(load), load,
- load_len * sizeof(*load));
- __entry->load_len = load_len;
- __entry->dynamic_power = dynamic_power;
- ),
-
- TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d",
- __get_bitmask(cpumask), __entry->freq,
- __print_array(__get_dynamic_array(load), __entry->load_len, 4),
- __entry->dynamic_power)
-);
-
-TRACE_EVENT(thermal_power_cpu_limit,
- TP_PROTO(const struct cpumask *cpus, unsigned int freq,
- unsigned long cdev_state, u32 power),
-
- TP_ARGS(cpus, freq, cdev_state, power),
-
- TP_STRUCT__entry(
- __bitmask(cpumask, num_possible_cpus())
- __field(unsigned int, freq )
- __field(unsigned long, cdev_state)
- __field(u32, power )
- ),
-
- TP_fast_assign(
- __assign_bitmask(cpumask, cpumask_bits(cpus),
- num_possible_cpus());
- __entry->freq = freq;
- __entry->cdev_state = cdev_state;
- __entry->power = power;
- ),
-
- TP_printk("cpus=%s freq=%u cdev_state=%lu power=%u",
- __get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
- __entry->power)
-);
-#endif /* CONFIG_CPU_THERMAL */
-
-#ifdef CONFIG_DEVFREQ_THERMAL
-TRACE_EVENT(thermal_power_devfreq_get_power,
- TP_PROTO(struct thermal_cooling_device *cdev,
- struct devfreq_dev_status *status, unsigned long freq,
- u32 dynamic_power, u32 static_power, u32 power),
-
- TP_ARGS(cdev, status, freq, dynamic_power, static_power, power),
-
- TP_STRUCT__entry(
- __string(type, cdev->type )
- __field(unsigned long, freq )
- __field(u32, load )
- __field(u32, dynamic_power )
- __field(u32, static_power )
- __field(u32, power)
- ),
-
- TP_fast_assign(
- __assign_str(type, cdev->type);
- __entry->freq = freq;
- __entry->load = (100 * status->busy_time) / status->total_time;
- __entry->dynamic_power = dynamic_power;
- __entry->static_power = static_power;
- __entry->power = power;
- ),
-
- TP_printk("type=%s freq=%lu load=%u dynamic_power=%u static_power=%u power=%u",
- __get_str(type), __entry->freq,
- __entry->load, __entry->dynamic_power, __entry->static_power,
- __entry->power)
-);
-
-TRACE_EVENT(thermal_power_devfreq_limit,
- TP_PROTO(struct thermal_cooling_device *cdev, unsigned long freq,
- unsigned long cdev_state, u32 power),
-
- TP_ARGS(cdev, freq, cdev_state, power),
-
- TP_STRUCT__entry(
- __string(type, cdev->type)
- __field(unsigned int, freq )
- __field(unsigned long, cdev_state)
- __field(u32, power )
- ),
-
- TP_fast_assign(
- __assign_str(type, cdev->type);
- __entry->freq = freq;
- __entry->cdev_state = cdev_state;
- __entry->power = power;
- ),
-
- TP_printk("type=%s freq=%u cdev_state=%lu power=%u",
- __get_str(type), __entry->freq, __entry->cdev_state,
- __entry->power)
-);
-#endif /* CONFIG_DEVFREQ_THERMAL */
-#endif /* _TRACE_THERMAL_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/thermal_power_allocator.h b/include/trace/events/thermal_power_allocator.h
deleted file mode 100644
index 1c8fb95544f9..000000000000
--- a/include/trace/events/thermal_power_allocator.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM thermal_power_allocator
-
-#if !defined(_TRACE_THERMAL_POWER_ALLOCATOR_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_THERMAL_POWER_ALLOCATOR_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(thermal_power_allocator,
- TP_PROTO(struct thermal_zone_device *tz, u32 *req_power,
- u32 total_req_power, u32 *granted_power,
- u32 total_granted_power, size_t num_actors,
- u32 power_range, u32 max_allocatable_power,
- int current_temp, s32 delta_temp),
- TP_ARGS(tz, req_power, total_req_power, granted_power,
- total_granted_power, num_actors, power_range,
- max_allocatable_power, current_temp, delta_temp),
- TP_STRUCT__entry(
- __field(int, tz_id )
- __dynamic_array(u32, req_power, num_actors )
- __field(u32, total_req_power )
- __dynamic_array(u32, granted_power, num_actors)
- __field(u32, total_granted_power )
- __field(size_t, num_actors )
- __field(u32, power_range )
- __field(u32, max_allocatable_power )
- __field(int, current_temp )
- __field(s32, delta_temp )
- ),
- TP_fast_assign(
- __entry->tz_id = tz->id;
- memcpy(__get_dynamic_array(req_power), req_power,
- num_actors * sizeof(*req_power));
- __entry->total_req_power = total_req_power;
- memcpy(__get_dynamic_array(granted_power), granted_power,
- num_actors * sizeof(*granted_power));
- __entry->total_granted_power = total_granted_power;
- __entry->num_actors = num_actors;
- __entry->power_range = power_range;
- __entry->max_allocatable_power = max_allocatable_power;
- __entry->current_temp = current_temp;
- __entry->delta_temp = delta_temp;
- ),
-
- TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
- __entry->tz_id,
- __print_array(__get_dynamic_array(req_power),
- __entry->num_actors, 4),
- __entry->total_req_power,
- __print_array(__get_dynamic_array(granted_power),
- __entry->num_actors, 4),
- __entry->total_granted_power, __entry->power_range,
- __entry->max_allocatable_power, __entry->current_temp,
- __entry->delta_temp)
-);
-
-TRACE_EVENT(thermal_power_allocator_pid,
- TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral,
- s64 p, s64 i, s64 d, s32 output),
- TP_ARGS(tz, err, err_integral, p, i, d, output),
- TP_STRUCT__entry(
- __field(int, tz_id )
- __field(s32, err )
- __field(s32, err_integral)
- __field(s64, p )
- __field(s64, i )
- __field(s64, d )
- __field(s32, output )
- ),
- TP_fast_assign(
- __entry->tz_id = tz->id;
- __entry->err = err;
- __entry->err_integral = err_integral;
- __entry->p = p;
- __entry->i = i;
- __entry->d = d;
- __entry->output = output;
- ),
-
- TP_printk("thermal_zone_id=%d err=%d err_integral=%d p=%lld i=%lld d=%lld output=%d",
- __entry->tz_id, __entry->err, __entry->err_integral,
- __entry->p, __entry->i, __entry->d, __entry->output)
-);
-#endif /* _TRACE_THERMAL_POWER_ALLOCATOR_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/thermal_pressure.h b/include/trace/events/thermal_pressure.h
new file mode 100644
index 000000000000..b68680201360
--- /dev/null
+++ b/include/trace/events/thermal_pressure.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal_pressure
+
+#if !defined(_TRACE_THERMAL_PRESSURE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_THERMAL_PRESSURE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(thermal_pressure_update,
+ TP_PROTO(int cpu, unsigned long thermal_pressure),
+ TP_ARGS(cpu, thermal_pressure),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, thermal_pressure)
+ __field(int, cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->thermal_pressure = thermal_pressure;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("cpu=%d thermal_pressure=%lu", __entry->cpu, __entry->thermal_pressure)
+);
+#endif /* _TRACE_THERMAL_PRESSURE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index d7fbbe551841..f50048af5fcc 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-TRACE_EVENT(hugepage_invalidate,
+DECLARE_EVENT_CLASS(hugepage_set,
TP_PROTO(unsigned long addr, unsigned long pte),
TP_ARGS(addr, pte),
@@ -22,29 +22,20 @@ TRACE_EVENT(hugepage_invalidate,
__entry->pte = pte;
),
- TP_printk("hugepage invalidate at addr 0x%lx and pte = 0x%lx",
- __entry->addr, __entry->pte)
+ TP_printk("Set page table entry with 0x%lx with 0x%lx", __entry->addr, __entry->pte)
);
-TRACE_EVENT(hugepage_set_pmd,
-
+DEFINE_EVENT(hugepage_set, hugepage_set_pmd,
TP_PROTO(unsigned long addr, unsigned long pmd),
- TP_ARGS(addr, pmd),
- TP_STRUCT__entry(
- __field(unsigned long, addr)
- __field(unsigned long, pmd)
- ),
-
- TP_fast_assign(
- __entry->addr = addr;
- __entry->pmd = pmd;
- ),
-
- TP_printk("Set pmd with 0x%lx with 0x%lx", __entry->addr, __entry->pmd)
+ TP_ARGS(addr, pmd)
);
+DEFINE_EVENT(hugepage_set, hugepage_set_pud,
+ TP_PROTO(unsigned long addr, unsigned long pud),
+ TP_ARGS(addr, pud)
+);
-TRACE_EVENT(hugepage_update,
+DECLARE_EVENT_CLASS(hugepage_update,
TP_PROTO(unsigned long addr, unsigned long pte, unsigned long clr, unsigned long set),
TP_ARGS(addr, pte, clr, set),
@@ -65,24 +56,44 @@ TRACE_EVENT(hugepage_update,
TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set)
);
-TRACE_EVENT(hugepage_splitting,
- TP_PROTO(unsigned long addr, unsigned long pte),
- TP_ARGS(addr, pte),
- TP_STRUCT__entry(
- __field(unsigned long, addr)
- __field(unsigned long, pte)
- ),
+DEFINE_EVENT(hugepage_update, hugepage_update_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd, unsigned long clr, unsigned long set),
+ TP_ARGS(addr, pmd, clr, set)
+);
- TP_fast_assign(
- __entry->addr = addr;
- __entry->pte = pte;
- ),
+DEFINE_EVENT(hugepage_update, hugepage_update_pud,
+ TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set),
+ TP_ARGS(addr, pud, clr, set)
+);
- TP_printk("hugepage splitting at addr 0x%lx and pte = 0x%lx",
- __entry->addr, __entry->pte)
+DECLARE_EVENT_CLASS(migration_pmd,
+
+ TP_PROTO(unsigned long addr, unsigned long pmd),
+
+ TP_ARGS(addr, pmd),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, pmd)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->pmd = pmd;
+ ),
+ TP_printk("addr=%lx, pmd=%lx", __entry->addr, __entry->pmd)
);
+DEFINE_EVENT(migration_pmd, set_migration_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd),
+ TP_ARGS(addr, pmd)
+);
+
+DEFINE_EVENT(migration_pmd, remove_migration_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd),
+ TP_ARGS(addr, pmd)
+);
#endif /* _TRACE_THP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 19abb6c3eb73..1ef58a04fc57 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -46,21 +46,21 @@ DEFINE_EVENT(timer_class, timer_init,
/**
* timer_start - called when the timer is started
- * @timer: pointer to struct timer_list
- * @expires: the timers expiry time
+ * @timer: pointer to struct timer_list
+ * @bucket_expiry: the bucket expiry time
*/
TRACE_EVENT(timer_start,
TP_PROTO(struct timer_list *timer,
- unsigned long expires,
- unsigned int flags),
+ unsigned long bucket_expiry),
- TP_ARGS(timer, expires, flags),
+ TP_ARGS(timer, bucket_expiry),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
+ __field( unsigned long, bucket_expiry )
__field( unsigned long, now )
__field( unsigned int, flags )
),
@@ -68,15 +68,16 @@ TRACE_EVENT(timer_start,
TP_fast_assign(
__entry->timer = timer;
__entry->function = timer->function;
- __entry->expires = expires;
+ __entry->expires = timer->expires;
+ __entry->bucket_expiry = bucket_expiry;
__entry->now = jiffies;
- __entry->flags = flags;
+ __entry->flags = timer->flags;
),
- TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
+ TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] bucket_expiry=%lu cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now,
- __entry->flags & TIMER_CPUMASK,
+ __entry->bucket_expiry, __entry->flags & TIMER_CPUMASK,
__entry->flags >> TIMER_ARRAYSHIFT,
decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
);
@@ -84,6 +85,7 @@ TRACE_EVENT(timer_start,
/**
* timer_expire_entry - called immediately before the timer callback
* @timer: pointer to struct timer_list
+ * @baseclk: value of timer_base::clk when timer expires
*
* Allows to determine the timer latency.
*/
@@ -119,7 +121,7 @@ TRACE_EVENT(timer_expire_entry,
* When used in combination with the timer_expire_entry tracepoint we can
* determine the runtime of the timer callback function.
*
- * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
+ * NOTE: Do NOT dereference timer in TP_fast_assign. The pointer might
* be invalid. We solely track the pointer.
*/
DEFINE_EVENT(timer_class, timer_expire_exit,
@@ -140,6 +142,26 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+TRACE_EVENT(timer_base_idle,
+
+ TP_PROTO(bool is_idle, unsigned int cpu),
+
+ TP_ARGS(is_idle, cpu),
+
+ TP_STRUCT__entry(
+ __field( bool, is_idle )
+ __field( unsigned int, cpu )
+ ),
+
+ TP_fast_assign(
+ __entry->is_idle = is_idle;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("is_idle=%d cpu=%d",
+ __entry->is_idle, __entry->cpu)
+);
+
#define decode_clockid(type) \
__print_symbolic(type, \
{ CLOCK_REALTIME, "CLOCK_REALTIME" }, \
@@ -156,7 +178,11 @@ DEFINE_EVENT(timer_class, timer_cancel,
{ HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
{ HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
{ HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
- { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
+ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" }, \
+ { HRTIMER_MODE_ABS_HARD, "ABS|HARD" }, \
+ { HRTIMER_MODE_REL_HARD, "REL|HARD" }, \
+ { HRTIMER_MODE_ABS_PINNED_HARD, "ABS|PINNED|HARD" }, \
+ { HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" })
/**
* hrtimer_init - called when the hrtimer is initialized
@@ -190,7 +216,8 @@ TRACE_EVENT(hrtimer_init,
/**
* hrtimer_start - called when the hrtimer is started
- * @hrtimer: pointer to struct hrtimer
+ * @hrtimer: pointer to struct hrtimer
+ * @mode: the hrtimers mode
*/
TRACE_EVENT(hrtimer_start,
@@ -368,7 +395,8 @@ TRACE_EVENT(itimer_expire,
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
tick_dep_name(CLOCK_UNSTABLE) \
- tick_dep_name_end(RCU)
+ tick_dep_name(RCU) \
+ tick_dep_name_end(RCU_EXP)
#undef tick_dep_name
#undef tick_dep_mask_name
diff --git a/include/trace/events/timer_migration.h b/include/trace/events/timer_migration.h
new file mode 100644
index 000000000000..79f19e76a80b
--- /dev/null
+++ b/include/trace/events/timer_migration.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timer_migration
+
+#if !defined(_TRACE_TIMER_MIGRATION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TIMER_MIGRATION_H
+
+#include <linux/tracepoint.h>
+
+/* Group events */
+TRACE_EVENT(tmigr_group_set,
+
+ TP_PROTO(struct tmigr_group *group),
+
+ TP_ARGS(group),
+
+ TP_STRUCT__entry(
+ __field( void *, group )
+ __field( unsigned int, lvl )
+ __field( unsigned int, numa_node )
+ ),
+
+ TP_fast_assign(
+ __entry->group = group;
+ __entry->lvl = group->level;
+ __entry->numa_node = group->numa_node;
+ ),
+
+ TP_printk("group=%p lvl=%d numa=%d",
+ __entry->group, __entry->lvl, __entry->numa_node)
+);
+
+TRACE_EVENT(tmigr_connect_child_parent,
+
+ TP_PROTO(struct tmigr_group *child),
+
+ TP_ARGS(child),
+
+ TP_STRUCT__entry(
+ __field( void *, child )
+ __field( void *, parent )
+ __field( unsigned int, lvl )
+ __field( unsigned int, numa_node )
+ __field( unsigned int, num_children )
+ __field( u32, childmask )
+ ),
+
+ TP_fast_assign(
+ __entry->child = child;
+ __entry->parent = child->parent;
+ __entry->lvl = child->parent->level;
+ __entry->numa_node = child->parent->numa_node;
+ __entry->num_children = child->parent->num_children;
+ __entry->childmask = child->childmask;
+ ),
+
+ TP_printk("group=%p childmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+ __entry->child, __entry->childmask, __entry->parent,
+ __entry->lvl, __entry->numa_node, __entry->num_children)
+);
+
+TRACE_EVENT(tmigr_connect_cpu_parent,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc),
+
+ TP_STRUCT__entry(
+ __field( void *, parent )
+ __field( unsigned int, cpu )
+ __field( unsigned int, lvl )
+ __field( unsigned int, numa_node )
+ __field( unsigned int, num_children )
+ __field( u32, childmask )
+ ),
+
+ TP_fast_assign(
+ __entry->parent = tmc->tmgroup;
+ __entry->cpu = tmc->cpuevt.cpu;
+ __entry->lvl = tmc->tmgroup->level;
+ __entry->numa_node = tmc->tmgroup->numa_node;
+ __entry->num_children = tmc->tmgroup->num_children;
+ __entry->childmask = tmc->childmask;
+ ),
+
+ TP_printk("cpu=%d childmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+ __entry->cpu, __entry->childmask, __entry->parent,
+ __entry->lvl, __entry->numa_node, __entry->num_children)
+);
+
+DECLARE_EVENT_CLASS(tmigr_group_and_cpu,
+
+ TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask),
+
+ TP_ARGS(group, state, childmask),
+
+ TP_STRUCT__entry(
+ __field( void *, group )
+ __field( void *, parent )
+ __field( unsigned int, lvl )
+ __field( unsigned int, numa_node )
+ __field( u32, childmask )
+ __field( u8, active )
+ __field( u8, migrator )
+ ),
+
+ TP_fast_assign(
+ __entry->group = group;
+ __entry->parent = group->parent;
+ __entry->lvl = group->level;
+ __entry->numa_node = group->numa_node;
+ __entry->childmask = childmask;
+ __entry->active = state.active;
+ __entry->migrator = state.migrator;
+ ),
+
+ TP_printk("group=%p lvl=%d numa=%d active=%0x migrator=%0x "
+ "parent=%p childmask=%0x",
+ __entry->group, __entry->lvl, __entry->numa_node,
+ __entry->active, __entry->migrator,
+ __entry->parent, __entry->childmask)
+);
+
+DEFINE_EVENT(tmigr_group_and_cpu, tmigr_group_set_cpu_inactive,
+
+ TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask),
+
+ TP_ARGS(group, state, childmask)
+);
+
+DEFINE_EVENT(tmigr_group_and_cpu, tmigr_group_set_cpu_active,
+
+ TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask),
+
+ TP_ARGS(group, state, childmask)
+);
+
+/* CPU events*/
+DECLARE_EVENT_CLASS(tmigr_cpugroup,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc),
+
+ TP_STRUCT__entry(
+ __field( u64, wakeup )
+ __field( void *, parent )
+ __field( unsigned int, cpu )
+
+ ),
+
+ TP_fast_assign(
+ __entry->wakeup = tmc->wakeup;
+ __entry->parent = tmc->tmgroup;
+ __entry->cpu = tmc->cpuevt.cpu;
+ ),
+
+ TP_printk("cpu=%d parent=%p wakeup=%llu", __entry->cpu, __entry->parent, __entry->wakeup)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_new_timer,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_online,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_offline,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_handle_remote_cpu,
+
+ TP_PROTO(struct tmigr_cpu *tmc),
+
+ TP_ARGS(tmc)
+);
+
+DECLARE_EVENT_CLASS(tmigr_idle,
+
+ TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt),
+
+ TP_ARGS(tmc, nextevt),
+
+ TP_STRUCT__entry(
+ __field( u64, nextevt)
+ __field( u64, wakeup)
+ __field( void *, parent)
+ __field( unsigned int, cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->nextevt = nextevt;
+ __entry->wakeup = tmc->wakeup;
+ __entry->parent = tmc->tmgroup;
+ __entry->cpu = tmc->cpuevt.cpu;
+ ),
+
+ TP_printk("cpu=%d parent=%p nextevt=%llu wakeup=%llu",
+ __entry->cpu, __entry->parent, __entry->nextevt, __entry->wakeup)
+);
+
+DEFINE_EVENT(tmigr_idle, tmigr_cpu_idle,
+
+ TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt),
+
+ TP_ARGS(tmc, nextevt)
+);
+
+DEFINE_EVENT(tmigr_idle, tmigr_cpu_new_timer_idle,
+
+ TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt),
+
+ TP_ARGS(tmc, nextevt)
+);
+
+TRACE_EVENT(tmigr_update_events,
+
+ TP_PROTO(struct tmigr_group *child, struct tmigr_group *group,
+ union tmigr_state childstate, union tmigr_state groupstate,
+ u64 nextevt),
+
+ TP_ARGS(child, group, childstate, groupstate, nextevt),
+
+ TP_STRUCT__entry(
+ __field( void *, child )
+ __field( void *, group )
+ __field( u64, nextevt )
+ __field( u64, group_next_expiry )
+ __field( u64, child_evt_expiry )
+ __field( unsigned int, group_lvl )
+ __field( unsigned int, child_evtcpu )
+ __field( u8, child_active )
+ __field( u8, group_active )
+ ),
+
+ TP_fast_assign(
+ __entry->child = child;
+ __entry->group = group;
+ __entry->nextevt = nextevt;
+ __entry->group_next_expiry = group->next_expiry;
+ __entry->child_evt_expiry = child ? child->groupevt.nextevt.expires : 0;
+ __entry->group_lvl = group->level;
+ __entry->child_evtcpu = child ? child->groupevt.cpu : 0;
+ __entry->child_active = childstate.active;
+ __entry->group_active = groupstate.active;
+ ),
+
+ TP_printk("child=%p group=%p group_lvl=%d child_active=%0x group_active=%0x "
+ "nextevt=%llu next_expiry=%llu child_evt_expiry=%llu child_evtcpu=%d",
+ __entry->child, __entry->group, __entry->group_lvl, __entry->child_active,
+ __entry->group_active,
+ __entry->nextevt, __entry->group_next_expiry, __entry->child_evt_expiry,
+ __entry->child_evtcpu)
+);
+
+TRACE_EVENT(tmigr_handle_remote,
+
+ TP_PROTO(struct tmigr_group *group),
+
+ TP_ARGS(group),
+
+ TP_STRUCT__entry(
+ __field( void * , group )
+ __field( unsigned int , lvl )
+ ),
+
+ TP_fast_assign(
+ __entry->group = group;
+ __entry->lvl = group->level;
+ ),
+
+ TP_printk("group=%p lvl=%d",
+ __entry->group, __entry->lvl)
+);
+
+#endif /* _TRACE_TIMER_MIGRATION_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index 84841b3a7ffd..b930669bd1f0 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -11,31 +11,60 @@
#include <linux/tracepoint.h>
-#define UFS_LINK_STATES \
- EM(UIC_LINK_OFF_STATE) \
- EM(UIC_LINK_ACTIVE_STATE) \
- EMe(UIC_LINK_HIBERN8_STATE)
-
-#define UFS_PWR_MODES \
- EM(UFS_ACTIVE_PWR_MODE) \
- EM(UFS_SLEEP_PWR_MODE) \
- EMe(UFS_POWERDOWN_PWR_MODE)
-
-#define UFSCHD_CLK_GATING_STATES \
- EM(CLKS_OFF) \
- EM(CLKS_ON) \
- EM(REQ_CLKS_OFF) \
- EMe(REQ_CLKS_ON)
+#define str_opcode(opcode) \
+ __print_symbolic(opcode, \
+ { WRITE_16, "WRITE_16" }, \
+ { WRITE_10, "WRITE_10" }, \
+ { READ_16, "READ_16" }, \
+ { READ_10, "READ_10" }, \
+ { SYNCHRONIZE_CACHE, "SYNC" }, \
+ { UNMAP, "UNMAP" })
+
+#define UFS_LINK_STATES \
+ EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \
+ EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \
+ EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE")
+
+#define UFS_PWR_MODES \
+ EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \
+ EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \
+ EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \
+ EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE")
+
+#define UFSCHD_CLK_GATING_STATES \
+ EM(CLKS_OFF, "CLKS_OFF") \
+ EM(CLKS_ON, "CLKS_ON") \
+ EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \
+ EMe(REQ_CLKS_ON, "REQ_CLKS_ON")
+
+#define UFS_CMD_TRACE_STRINGS \
+ EM(UFS_CMD_SEND, "send_req") \
+ EM(UFS_CMD_COMP, "complete_rsp") \
+ EM(UFS_DEV_COMP, "dev_complete") \
+ EM(UFS_QUERY_SEND, "query_send") \
+ EM(UFS_QUERY_COMP, "query_complete") \
+ EM(UFS_QUERY_ERR, "query_complete_err") \
+ EM(UFS_TM_SEND, "tm_send") \
+ EM(UFS_TM_COMP, "tm_complete") \
+ EMe(UFS_TM_ERR, "tm_complete_err")
+
+#define UFS_CMD_TRACE_TSF_TYPES \
+ EM(UFS_TSF_CDB, "CDB") \
+ EM(UFS_TSF_OSF, "OSF") \
+ EM(UFS_TSF_TM_INPUT, "TM_INPUT") \
+ EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT")
/* Enums require being exported to userspace, for user tool parsing */
#undef EM
#undef EMe
-#define EM(a) TRACE_DEFINE_ENUM(a);
-#define EMe(a) TRACE_DEFINE_ENUM(a);
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
UFS_LINK_STATES;
UFS_PWR_MODES;
UFSCHD_CLK_GATING_STATES;
+UFS_CMD_TRACE_STRINGS
+UFS_CMD_TRACE_TSF_TYPES
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
@@ -43,8 +72,13 @@ UFSCHD_CLK_GATING_STATES;
*/
#undef EM
#undef EMe
-#define EM(a) { a, #a },
-#define EMe(a) { a, #a }
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+#define show_ufs_cmd_trace_str(str_t) \
+ __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS)
+#define show_ufs_cmd_trace_tsf(tsf) \
+ __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES)
TRACE_EVENT(ufshcd_clk_gating,
@@ -212,52 +246,79 @@ DEFINE_EVENT(ufshcd_template, ufshcd_init,
int dev_state, int link_state),
TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
TRACE_EVENT(ufshcd_command,
- TP_PROTO(const char *dev_name, const char *str, unsigned int tag,
- u32 doorbell, int transfer_len, u32 intr, u64 lba,
- u8 opcode),
+ TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
+ unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
+ u32 intr, u64 lba, u8 opcode, u8 group_id),
- TP_ARGS(dev_name, str, tag, doorbell, transfer_len, intr, lba, opcode),
+ TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
+ opcode, group_id),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(str, str)
+ __field(struct scsi_device *, sdev)
+ __field(enum ufs_trace_str_t, str_t)
__field(unsigned int, tag)
__field(u32, doorbell)
- __field(int, transfer_len)
+ __field(u32, hwq_id)
__field(u32, intr)
__field(u64, lba)
+ __field(int, transfer_len)
__field(u8, opcode)
+ __field(u8, group_id)
),
TP_fast_assign(
- __assign_str(dev_name, dev_name);
- __assign_str(str, str);
+ __entry->sdev = sdev;
+ __entry->str_t = str_t;
__entry->tag = tag;
__entry->doorbell = doorbell;
- __entry->transfer_len = transfer_len;
+ __entry->hwq_id = hwq_id;
__entry->intr = intr;
__entry->lba = lba;
+ __entry->transfer_len = transfer_len;
__entry->opcode = opcode;
+ __entry->group_id = group_id;
),
TP_printk(
- "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x",
- __get_str(str), __get_str(dev_name), __entry->tag,
- __entry->doorbell, __entry->transfer_len,
- __entry->intr, __entry->lba, (u32)__entry->opcode
+ "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d",
+ show_ufs_cmd_trace_str(__entry->str_t),
+ dev_name(&__entry->sdev->sdev_dev), __entry->tag,
+ __entry->doorbell, __entry->transfer_len, __entry->intr,
+ __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode),
+ (u32)__entry->group_id, __entry->hwq_id
)
);
TRACE_EVENT(ufshcd_uic_command,
- TP_PROTO(const char *dev_name, const char *str, u32 cmd,
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
u32 arg1, u32 arg2, u32 arg3),
- TP_ARGS(dev_name, str, cmd, arg1, arg2, arg3),
+ TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
TP_STRUCT__entry(
__string(dev_name, dev_name)
- __string(str, str)
+ __field(enum ufs_trace_str_t, str_t)
__field(u32, cmd)
__field(u32, arg1)
__field(u32, arg2)
@@ -266,7 +327,7 @@ TRACE_EVENT(ufshcd_uic_command,
TP_fast_assign(
__assign_str(dev_name, dev_name);
- __assign_str(str, str);
+ __entry->str_t = str_t;
__entry->cmd = cmd;
__entry->arg1 = arg1;
__entry->arg2 = arg2;
@@ -275,38 +336,63 @@ TRACE_EVENT(ufshcd_uic_command,
TP_printk(
"%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
- __get_str(str), __get_str(dev_name), __entry->cmd,
- __entry->arg1, __entry->arg2, __entry->arg3
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
)
);
TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf),
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
+ void *tsf, enum ufs_trace_tsf_t tsf_t),
- TP_ARGS(dev_name, str, hdr, tsf),
+ TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
TP_STRUCT__entry(
__string(dev_name, dev_name)
- __string(str, str)
+ __field(enum ufs_trace_str_t, str_t)
__array(unsigned char, hdr, 12)
__array(unsigned char, tsf, 16)
+ __field(enum ufs_trace_tsf_t, tsf_t)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
- __assign_str(str, str);
+ __entry->str_t = str_t;
memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
+ __entry->tsf_t = tsf_t;
),
TP_printk(
- "%s: %s: HDR:%s, CDB:%s",
- __get_str(str), __get_str(dev_name),
+ "%s: %s: HDR:%s, %s:%s",
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
__print_hex(__entry->hdr, sizeof(__entry->hdr)),
+ show_ufs_cmd_trace_tsf(__entry->tsf_t),
__print_hex(__entry->tsf, sizeof(__entry->tsf))
)
);
+TRACE_EVENT(ufshcd_exception_event,
+
+ TP_PROTO(const char *dev_name, u16 status),
+
+ TP_ARGS(dev_name, status),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __field(u16, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name);
+ __entry->status = status;
+ ),
+
+ TP_printk("%s: status 0x%x",
+ __get_str(dev_name), __entry->status
+ )
+);
+
#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
/* This part must be outside protection */
diff --git a/include/trace/events/vmalloc.h b/include/trace/events/vmalloc.h
new file mode 100644
index 000000000000..ad4e02191f35
--- /dev/null
+++ b/include/trace/events/vmalloc.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vmalloc
+
+#if !defined(_TRACE_VMALLOC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VMALLOC_H
+
+#include <linux/tracepoint.h>
+
+/**
+ * alloc_vmap_area - called when a new vmap allocation occurs
+ * @addr: an allocated address
+ * @size: a requested size
+ * @align: a requested alignment
+ * @vstart: a requested start range
+ * @vend: a requested end range
+ * @failed: an allocation failed or not
+ *
+ * This event is used for a debug purpose, it can give an extra
+ * information for a developer about how often it occurs and which
+ * parameters are passed for further validation.
+ */
+TRACE_EVENT(alloc_vmap_area,
+
+ TP_PROTO(unsigned long addr, unsigned long size, unsigned long align,
+ unsigned long vstart, unsigned long vend, int failed),
+
+ TP_ARGS(addr, size, align, vstart, vend, failed),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, size)
+ __field(unsigned long, align)
+ __field(unsigned long, vstart)
+ __field(unsigned long, vend)
+ __field(int, failed)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->align = align;
+ __entry->vstart = vstart;
+ __entry->vend = vend;
+ __entry->failed = failed;
+ ),
+
+ TP_printk("va_start: %lu size=%lu align=%lu vstart=0x%lx vend=0x%lx failed=%d",
+ __entry->addr, __entry->size, __entry->align,
+ __entry->vstart, __entry->vend, __entry->failed)
+);
+
+/**
+ * purge_vmap_area_lazy - called when vmap areas were lazily freed
+ * @start: purging start address
+ * @end: purging end address
+ * @npurged: numbed of purged vmap areas
+ *
+ * This event is used for a debug purpose. It gives some
+ * indication about start:end range and how many objects
+ * are released.
+ */
+TRACE_EVENT(purge_vmap_area_lazy,
+
+ TP_PROTO(unsigned long start, unsigned long end,
+ unsigned int npurged),
+
+ TP_ARGS(start, end, npurged),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned int, npurged)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->npurged = npurged;
+ ),
+
+ TP_printk("start=0x%lx end=0x%lx num_purged=%u",
+ __entry->start, __entry->end, __entry->npurged)
+);
+
+/**
+ * free_vmap_area_noflush - called when a vmap area is freed
+ * @va_start: a start address of VA
+ * @nr_lazy: number of current lazy pages
+ * @nr_lazy_max: number of maximum lazy pages
+ *
+ * This event is used for a debug purpose. It gives some
+ * indication about a VA that is released, number of current
+ * outstanding areas and a maximum allowed threshold before
+ * dropping all of them.
+ */
+TRACE_EVENT(free_vmap_area_noflush,
+
+ TP_PROTO(unsigned long va_start, unsigned long nr_lazy,
+ unsigned long nr_lazy_max),
+
+ TP_ARGS(va_start, nr_lazy, nr_lazy_max),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, va_start)
+ __field(unsigned long, nr_lazy)
+ __field(unsigned long, nr_lazy_max)
+ ),
+
+ TP_fast_assign(
+ __entry->va_start = va_start;
+ __entry->nr_lazy = nr_lazy;
+ __entry->nr_lazy_max = nr_lazy_max;
+ ),
+
+ TP_printk("va_start=0x%lx nr_lazy=%lu nr_lazy_max=%lu",
+ __entry->va_start, __entry->nr_lazy, __entry->nr_lazy_max)
+);
+
+#endif /* _TRACE_VMALLOC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 2070df64958e..1a488c30afa5 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -27,6 +27,20 @@
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
+#define _VMSCAN_THROTTLE_WRITEBACK (1 << VMSCAN_THROTTLE_WRITEBACK)
+#define _VMSCAN_THROTTLE_ISOLATED (1 << VMSCAN_THROTTLE_ISOLATED)
+#define _VMSCAN_THROTTLE_NOPROGRESS (1 << VMSCAN_THROTTLE_NOPROGRESS)
+#define _VMSCAN_THROTTLE_CONGESTED (1 << VMSCAN_THROTTLE_CONGESTED)
+
+#define show_throttle_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ {_VMSCAN_THROTTLE_WRITEBACK, "VMSCAN_THROTTLE_WRITEBACK"}, \
+ {_VMSCAN_THROTTLE_ISOLATED, "VMSCAN_THROTTLE_ISOLATED"}, \
+ {_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"}, \
+ {_VMSCAN_THROTTLE_CONGESTED, "VMSCAN_THROTTLE_CONGESTED"} \
+ ) : "VMSCAN_THROTTLE_NONE"
+
+
#define trace_reclaim_flags(file) ( \
(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
(RECLAIM_WB_ASYNC) \
@@ -82,14 +96,14 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
__field( int, nid )
__field( int, zid )
__field( int, order )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
),
TP_fast_assign(
__entry->nid = nid;
__entry->zid = zid;
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("nid=%d order=%d gfp_flags=%s",
@@ -106,12 +120,12 @@ DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
TP_STRUCT__entry(
__field( int, order )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
),
TP_fast_assign(
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("order=%d gfp_flags=%s",
@@ -196,7 +210,7 @@ TRACE_EVENT(mm_shrink_slab_start,
__field(void *, shrink)
__field(int, nid)
__field(long, nr_objects_to_shrink)
- __field(gfp_t, gfp_flags)
+ __field(unsigned long, gfp_flags)
__field(unsigned long, cache_items)
__field(unsigned long long, delta)
__field(unsigned long, total_scan)
@@ -208,7 +222,7 @@ TRACE_EVENT(mm_shrink_slab_start,
__entry->shrink = shr->scan_objects;
__entry->nid = sc->nid;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
- __entry->gfp_flags = sc->gfp_mask;
+ __entry->gfp_flags = (__force unsigned long)sc->gfp_mask;
__entry->cache_items = cache_items;
__entry->delta = delta;
__entry->total_scan = total_scan;
@@ -271,10 +285,9 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
unsigned long nr_scanned,
unsigned long nr_skipped,
unsigned long nr_taken,
- isolate_mode_t isolate_mode,
int lru),
- TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru),
+ TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, lru),
TP_STRUCT__entry(
__field(int, highest_zoneidx)
@@ -283,7 +296,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_skipped)
__field(unsigned long, nr_taken)
- __field(isolate_mode_t, isolate_mode)
__field(int, lru)
),
@@ -294,7 +306,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__entry->nr_scanned = nr_scanned;
__entry->nr_skipped = nr_skipped;
__entry->nr_taken = nr_taken;
- __entry->isolate_mode = isolate_mode;
__entry->lru = lru;
),
@@ -302,8 +313,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
* classzone is previous name of the highest_zoneidx.
* Reason not to change it is the ABI requirement of the tracepoint.
*/
- TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s",
- __entry->isolate_mode,
+ TP_printk("classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s",
__entry->highest_zoneidx,
__entry->order,
__entry->nr_requested,
@@ -313,11 +323,11 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__print_symbolic(__entry->lru, LRU_NAMES))
);
-TRACE_EVENT(mm_vmscan_writepage,
+TRACE_EVENT(mm_vmscan_write_folio,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
__field(unsigned long, pfn)
@@ -325,12 +335,12 @@ TRACE_EVENT(mm_vmscan_writepage,
),
TP_fast_assign(
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = folio_pfn(folio);
__entry->reclaim_flags = trace_reclaim_flags(
- page_is_file_lru(page));
+ folio_is_file_lru(folio));
),
- TP_printk("page=%p pfn=%lu flags=%s",
+ TP_printk("page=%p pfn=0x%lx flags=%s",
pfn_to_page(__entry->pfn),
__entry->pfn,
show_reclaim_flags(__entry->reclaim_flags))
@@ -423,47 +433,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_active,
show_reclaim_flags(__entry->reclaim_flags))
);
-TRACE_EVENT(mm_vmscan_inactive_list_is_low,
-
- TP_PROTO(int nid, int reclaim_idx,
- unsigned long total_inactive, unsigned long inactive,
- unsigned long total_active, unsigned long active,
- unsigned long ratio, int file),
-
- TP_ARGS(nid, reclaim_idx, total_inactive, inactive, total_active, active, ratio, file),
-
- TP_STRUCT__entry(
- __field(int, nid)
- __field(int, reclaim_idx)
- __field(unsigned long, total_inactive)
- __field(unsigned long, inactive)
- __field(unsigned long, total_active)
- __field(unsigned long, active)
- __field(unsigned long, ratio)
- __field(int, reclaim_flags)
- ),
-
- TP_fast_assign(
- __entry->nid = nid;
- __entry->reclaim_idx = reclaim_idx;
- __entry->total_inactive = total_inactive;
- __entry->inactive = inactive;
- __entry->total_active = total_active;
- __entry->active = active;
- __entry->ratio = ratio;
- __entry->reclaim_flags = trace_reclaim_flags(file) &
- RECLAIM_WB_LRU;
- ),
-
- TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s",
- __entry->nid,
- __entry->reclaim_idx,
- __entry->total_inactive, __entry->inactive,
- __entry->total_active, __entry->active,
- __entry->ratio,
- show_reclaim_flags(__entry->reclaim_flags))
-);
-
TRACE_EVENT(mm_vmscan_node_reclaim_begin,
TP_PROTO(int nid, int order, gfp_t gfp_flags),
@@ -473,13 +442,13 @@ TRACE_EVENT(mm_vmscan_node_reclaim_begin,
TP_STRUCT__entry(
__field(int, nid)
__field(int, order)
- __field(gfp_t, gfp_flags)
+ __field(unsigned long, gfp_flags)
),
TP_fast_assign(
__entry->nid = nid;
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("nid=%d order=%d gfp_flags=%s",
@@ -495,6 +464,32 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_node_reclaim_end,
TP_ARGS(nr_reclaimed)
);
+TRACE_EVENT(mm_vmscan_throttled,
+
+ TP_PROTO(int nid, int usec_timeout, int usec_delayed, int reason),
+
+ TP_ARGS(nid, usec_timeout, usec_delayed, reason),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, usec_timeout)
+ __field(int, usec_delayed)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->usec_timeout = usec_timeout;
+ __entry->usec_delayed = usec_delayed;
+ __entry->reason = 1U << reason;
+ ),
+
+ TP_printk("nid=%d usec_timeout=%d usect_delayed=%d reason=%s",
+ __entry->nid,
+ __entry->usec_timeout,
+ __entry->usec_delayed,
+ show_throttle_flags(__entry->reason))
+);
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h
index 6782213778be..f1ebe36787c3 100644
--- a/include/trace/events/vsock_virtio_transport_common.h
+++ b/include/trace/events/vsock_virtio_transport_common.h
@@ -9,9 +9,12 @@
#include <linux/tracepoint.h>
TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_STREAM);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_SEQPACKET);
#define show_type(val) \
- __print_symbolic(val, { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" })
+ __print_symbolic(val, \
+ { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" }, \
+ { VIRTIO_VSOCK_TYPE_SEQPACKET, "SEQPACKET" })
TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_INVALID);
TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_REQUEST);
@@ -40,7 +43,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__u32 len,
__u16 type,
__u16 op,
- __u32 flags
+ __u32 flags,
+ bool zcopy
),
TP_ARGS(
src_cid, src_port,
@@ -48,7 +52,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
len,
type,
op,
- flags
+ flags,
+ zcopy
),
TP_STRUCT__entry(
__field(__u32, src_cid)
@@ -59,6 +64,7 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__field(__u16, type)
__field(__u16, op)
__field(__u32, flags)
+ __field(bool, zcopy)
),
TP_fast_assign(
__entry->src_cid = src_cid;
@@ -69,14 +75,15 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__entry->type = type;
__entry->op = op;
__entry->flags = flags;
+ __entry->zcopy = zcopy;
),
- TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x",
+ TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s",
__entry->src_cid, __entry->src_port,
__entry->dst_cid, __entry->dst_port,
__entry->len,
show_type(__entry->type),
show_op(__entry->op),
- __entry->flags)
+ __entry->flags, __entry->zcopy ? "true" : "false")
);
TRACE_EVENT(virtio_transport_recv_pkt,
diff --git a/include/trace/events/watchdog.h b/include/trace/events/watchdog.h
new file mode 100644
index 000000000000..beb9bb3424c8
--- /dev/null
+++ b/include/trace/events/watchdog.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM watchdog
+
+#if !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WATCHDOG_H
+
+#include <linux/watchdog.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(watchdog_template,
+
+ TP_PROTO(struct watchdog_device *wdd, int err),
+
+ TP_ARGS(wdd, err),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->id = wdd->id;
+ __entry->err = err;
+ ),
+
+ TP_printk("watchdog%d err=%d", __entry->id, __entry->err)
+);
+
+DEFINE_EVENT(watchdog_template, watchdog_start,
+ TP_PROTO(struct watchdog_device *wdd, int err),
+ TP_ARGS(wdd, err));
+
+DEFINE_EVENT(watchdog_template, watchdog_ping,
+ TP_PROTO(struct watchdog_device *wdd, int err),
+ TP_ARGS(wdd, err));
+
+DEFINE_EVENT(watchdog_template, watchdog_stop,
+ TP_PROTO(struct watchdog_device *wdd, int err),
+ TP_ARGS(wdd, err));
+
+TRACE_EVENT(watchdog_set_timeout,
+
+ TP_PROTO(struct watchdog_device *wdd, unsigned int timeout, int err),
+
+ TP_ARGS(wdd, timeout, err),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(unsigned int, timeout)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->id = wdd->id;
+ __entry->timeout = timeout;
+ __entry->err = err;
+ ),
+
+ TP_printk("watchdog%d timeout=%u err=%d", __entry->id, __entry->timeout, __entry->err)
+);
+
+#endif /* !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
index 9c66e59d859c..4661f0d27062 100644
--- a/include/trace/events/wbt.h
+++ b/include/trace/events/wbt.h
@@ -33,7 +33,7 @@ TRACE_EVENT(wbt_stat,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->rmean = stat[0].mean;
__entry->rmin = stat[0].min;
@@ -68,7 +68,7 @@ TRACE_EVENT(wbt_lat,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->lat = div_u64(lat, 1000);
),
@@ -105,7 +105,7 @@ TRACE_EVENT(wbt_step,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->msg = msg;
__entry->step = step;
@@ -141,7 +141,7 @@ TRACE_EVENT(wbt_timer,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->status = status;
__entry->step = step;
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 9b8ae961acc5..262d52021c23 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -22,7 +22,7 @@ struct pool_workqueue;
*/
TRACE_EVENT(workqueue_queue_work,
- TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
+ TP_PROTO(int req_cpu, struct pool_workqueue *pwq,
struct work_struct *work),
TP_ARGS(req_cpu, pwq, work),
@@ -30,21 +30,21 @@ TRACE_EVENT(workqueue_queue_work,
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
- __field( void *, workqueue)
- __field( unsigned int, req_cpu )
- __field( unsigned int, cpu )
+ __string( workqueue, pwq->wq->name)
+ __field( int, req_cpu )
+ __field( int, cpu )
),
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
- __entry->workqueue = pwq->wq;
+ __assign_str(workqueue, pwq->wq->name);
__entry->req_cpu = req_cpu;
__entry->cpu = pwq->pool->cpu;
),
- TP_printk("work struct=%p function=%ps workqueue=%p req_cpu=%u cpu=%u",
- __entry->work, __entry->function, __entry->workqueue,
+ TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d",
+ __entry->work, __entry->function, __get_str(workqueue),
__entry->req_cpu, __entry->cpu)
);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index e7cbccc7c14c..54e353c9f919 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -36,7 +36,8 @@
EM( WB_REASON_PERIODIC, "periodic") \
EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
- EMe(WB_REASON_FORKER_THREAD, "forker_thread")
+ EM( WB_REASON_FORKER_THREAD, "forker_thread") \
+ EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush")
WB_WORK_REASON
@@ -51,11 +52,11 @@ WB_WORK_REASON
struct wb_writeback_work;
-DECLARE_EVENT_CLASS(writeback_page_template,
+DECLARE_EVENT_CLASS(writeback_folio_template,
- TP_PROTO(struct page *page, struct address_space *mapping),
+ TP_PROTO(struct folio *folio, struct address_space *mapping),
- TP_ARGS(page, mapping),
+ TP_ARGS(folio, mapping),
TP_STRUCT__entry (
__array(char, name, 32)
@@ -67,8 +68,8 @@ DECLARE_EVENT_CLASS(writeback_page_template,
strscpy_pad(__entry->name,
bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
NULL), 32);
- __entry->ino = mapping ? mapping->host->i_ino : 0;
- __entry->index = page->index;
+ __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
+ __entry->index = folio->index;
),
TP_printk("bdi %s: ino=%lu index=%lu",
@@ -78,18 +79,18 @@ DECLARE_EVENT_CLASS(writeback_page_template,
)
);
-DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
+DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
- TP_PROTO(struct page *page, struct address_space *mapping),
+ TP_PROTO(struct folio *folio, struct address_space *mapping),
- TP_ARGS(page, mapping)
+ TP_ARGS(folio, mapping)
);
-DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
+DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
- TP_PROTO(struct page *page, struct address_space *mapping),
+ TP_PROTO(struct folio *folio, struct address_space *mapping),
- TP_ARGS(page, mapping)
+ TP_ARGS(folio, mapping)
);
DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
@@ -190,7 +191,7 @@ TRACE_EVENT(inode_foreign_history,
),
TP_fast_assign(
- strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
__entry->history = history;
@@ -219,7 +220,7 @@ TRACE_EVENT(inode_switch_wbs,
),
TP_fast_assign(
- strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
__entry->ino = inode->i_ino;
__entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
__entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
@@ -235,9 +236,9 @@ TRACE_EVENT(inode_switch_wbs,
TRACE_EVENT(track_foreign_dirty,
- TP_PROTO(struct page *page, struct bdi_writeback *wb),
+ TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
- TP_ARGS(page, wb),
+ TP_ARGS(folio, wb),
TP_STRUCT__entry(
__array(char, name, 32)
@@ -249,15 +250,15 @@ TRACE_EVENT(track_foreign_dirty,
),
TP_fast_assign(
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping = folio_mapping(folio);
struct inode *inode = mapping ? mapping->host : NULL;
- strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->bdi_id = wb->bdi->id;
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- __entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup);
+ __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
),
TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
@@ -285,7 +286,7 @@ TRACE_EVENT(flush_foreign,
),
TP_fast_assign(
- strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
__entry->frn_bdi_id = frn_bdi_id;
__entry->frn_memcg_id = frn_memcg_id;
@@ -734,41 +735,6 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
)
);
-DECLARE_EVENT_CLASS(writeback_congest_waited_template,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed),
-
- TP_STRUCT__entry(
- __field( unsigned int, usec_timeout )
- __field( unsigned int, usec_delayed )
- ),
-
- TP_fast_assign(
- __entry->usec_timeout = usec_timeout;
- __entry->usec_delayed = usec_delayed;
- ),
-
- TP_printk("usec_timeout=%u usec_delayed=%u",
- __entry->usec_timeout,
- __entry->usec_delayed)
-);
-
-DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed)
-);
-
-DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed)
-);
-
DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_PROTO(struct inode *inode,
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index cd24e8a59529..9adc2bdf2f94 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -9,6 +9,7 @@
#include <linux/filter.h>
#include <linux/tracepoint.h>
#include <linux/bpf.h>
+#include <net/xdp.h>
#define __XDP_ACT_MAP(FN) \
FN(ABORTED) \
@@ -86,19 +87,15 @@ struct _bpf_dtab_netdev {
};
#endif /* __DEVMAP_OBJ_TYPE */
-#define devmap_ifindex(tgt, map) \
- (((map->map_type == BPF_MAP_TYPE_DEVMAP || \
- map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
- ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
-
DECLARE_EVENT_CLASS(xdp_redirect_template,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
TP_STRUCT__entry(
__field(int, prog_id)
@@ -111,14 +108,26 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
),
TP_fast_assign(
+ u32 ifindex = 0, map_index = index;
+
+ if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ /* Just leave to_ifindex to 0 if do broadcast redirect,
+ * as tgt will be NULL.
+ */
+ if (tgt)
+ ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
+ } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
+ ifindex = index;
+ map_index = 0;
+ }
+
__entry->prog_id = xdp->aux->id;
__entry->act = XDP_REDIRECT;
__entry->ifindex = dev->ifindex;
__entry->err = err;
- __entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
- index;
- __entry->map_id = map ? map->id : 0;
- __entry->map_index = map ? index : 0;
+ __entry->to_ifindex = ifindex;
+ __entry->map_id = map_id;
+ __entry->map_index = map_index;
),
TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
@@ -133,45 +142,49 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index)
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index)
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);
-#define _trace_xdp_redirect(dev, xdp, to) \
- trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
+#define _trace_xdp_redirect(dev, xdp, to) \
+ trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
-#define _trace_xdp_redirect_err(dev, xdp, to, err) \
- trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
+#define _trace_xdp_redirect_err(dev, xdp, to, err) \
+ trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
-#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
- trace_xdp_redirect(dev, xdp, to, 0, map, index);
+#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
+ trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
-#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
- trace_xdp_redirect_err(dev, xdp, to, err, map, index);
+#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
+ trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
/* not used anymore, but kept around so as not to break old programs */
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index)
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
const void *tgt, int err,
- const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map, index)
+ enum bpf_map_type map_type,
+ u32 map_id, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);
TRACE_EVENT(xdp_cpumap_kthread,
@@ -392,6 +405,23 @@ TRACE_EVENT(mem_return_failed,
)
);
+TRACE_EVENT(bpf_xdp_link_attach_failed,
+
+ TP_PROTO(const char *msg),
+
+ TP_ARGS(msg),
+
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ ),
+
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("errmsg=%s", __get_str(msg))
+);
+
#endif /* _TRACE_XDP_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index a5ccfa67bc5c..0577f0cdd231 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -6,26 +6,26 @@
#define _TRACE_XEN_H
#include <linux/tracepoint.h>
-#include <asm/paravirt_types.h>
+#include <asm/xen/hypervisor.h>
#include <asm/xen/trace_types.h>
struct multicall_entry;
/* Multicalls */
DECLARE_EVENT_CLASS(xen_mc__batch,
- TP_PROTO(enum paravirt_lazy_mode mode),
+ TP_PROTO(enum xen_lazy_mode mode),
TP_ARGS(mode),
TP_STRUCT__entry(
- __field(enum paravirt_lazy_mode, mode)
+ __field(enum xen_lazy_mode, mode)
),
TP_fast_assign(__entry->mode = mode),
TP_printk("start batch LAZY_%s",
- (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
- (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
+ (__entry->mode == XEN_LAZY_MMU) ? "MMU" :
+ (__entry->mode == XEN_LAZY_CPU) ? "CPU" : "NONE")
);
#define DEFINE_XEN_MC_BATCH(name) \
DEFINE_EVENT(xen_mc__batch, name, \
- TP_PROTO(enum paravirt_lazy_mode mode), \
+ TP_PROTO(enum xen_lazy_mode mode), \
TP_ARGS(mode))
DEFINE_XEN_MC_BATCH(xen_mc_batch);
@@ -153,26 +153,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
-TRACE_EVENT(xen_mmu_set_pte_at,
- TP_PROTO(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval),
- TP_ARGS(mm, addr, ptep, pteval),
- TP_STRUCT__entry(
- __field(struct mm_struct *, mm)
- __field(unsigned long, addr)
- __field(pte_t *, ptep)
- __field(pteval_t, pteval)
- ),
- TP_fast_assign(__entry->mm = mm;
- __entry->addr = addr;
- __entry->ptep = ptep;
- __entry->pteval = pteval.pte),
- TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
- __entry->mm, __entry->addr, __entry->ptep,
- (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
- (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
- );
-
TRACE_DEFINE_SIZEOF(pmdval_t);
TRACE_EVENT(xen_mmu_set_pmd,
@@ -366,7 +346,7 @@ TRACE_EVENT(xen_mmu_flush_tlb_one_user,
TP_printk("addr %lx", __entry->addr)
);
-TRACE_EVENT(xen_mmu_flush_tlb_others,
+TRACE_EVENT(xen_mmu_flush_tlb_multi,
TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
unsigned long addr, unsigned long end),
TP_ARGS(cpus, mm, addr, end),
diff --git a/include/trace/misc/fs.h b/include/trace/misc/fs.h
new file mode 100644
index 000000000000..738b97f22f36
--- /dev/null
+++ b/include/trace/misc/fs.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Display helpers for generic filesystem items
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <linux/fs.h>
+
+#define show_fs_dirent_type(x) \
+ __print_symbolic(x, \
+ { DT_UNKNOWN, "UNKNOWN" }, \
+ { DT_FIFO, "FIFO" }, \
+ { DT_CHR, "CHR" }, \
+ { DT_DIR, "DIR" }, \
+ { DT_BLK, "BLK" }, \
+ { DT_REG, "REG" }, \
+ { DT_LNK, "LNK" }, \
+ { DT_SOCK, "SOCK" }, \
+ { DT_WHT, "WHT" })
+
+#define show_fs_fcntl_open_flags(x) \
+ __print_flags(x, "|", \
+ { O_WRONLY, "O_WRONLY" }, \
+ { O_RDWR, "O_RDWR" }, \
+ { O_CREAT, "O_CREAT" }, \
+ { O_EXCL, "O_EXCL" }, \
+ { O_NOCTTY, "O_NOCTTY" }, \
+ { O_TRUNC, "O_TRUNC" }, \
+ { O_APPEND, "O_APPEND" }, \
+ { O_NONBLOCK, "O_NONBLOCK" }, \
+ { O_DSYNC, "O_DSYNC" }, \
+ { O_DIRECT, "O_DIRECT" }, \
+ { O_LARGEFILE, "O_LARGEFILE" }, \
+ { O_DIRECTORY, "O_DIRECTORY" }, \
+ { O_NOFOLLOW, "O_NOFOLLOW" }, \
+ { O_NOATIME, "O_NOATIME" }, \
+ { O_CLOEXEC, "O_CLOEXEC" })
+
+#define __fmode_flag(x) { (__force unsigned long)FMODE_##x, #x }
+#define show_fs_fmode_flags(x) \
+ __print_flags(x, "|", \
+ __fmode_flag(READ), \
+ __fmode_flag(WRITE), \
+ __fmode_flag(EXEC))
+
+#ifdef CONFIG_64BIT
+#define show_fs_fcntl_cmd(x) \
+ __print_symbolic(x, \
+ { F_DUPFD, "DUPFD" }, \
+ { F_GETFD, "GETFD" }, \
+ { F_SETFD, "SETFD" }, \
+ { F_GETFL, "GETFL" }, \
+ { F_SETFL, "SETFL" }, \
+ { F_GETLK, "GETLK" }, \
+ { F_SETLK, "SETLK" }, \
+ { F_SETLKW, "SETLKW" }, \
+ { F_SETOWN, "SETOWN" }, \
+ { F_GETOWN, "GETOWN" }, \
+ { F_SETSIG, "SETSIG" }, \
+ { F_GETSIG, "GETSIG" }, \
+ { F_SETOWN_EX, "SETOWN_EX" }, \
+ { F_GETOWN_EX, "GETOWN_EX" }, \
+ { F_GETOWNER_UIDS, "GETOWNER_UIDS" }, \
+ { F_OFD_GETLK, "OFD_GETLK" }, \
+ { F_OFD_SETLK, "OFD_SETLK" }, \
+ { F_OFD_SETLKW, "OFD_SETLKW" })
+#else /* CONFIG_64BIT */
+#define show_fs_fcntl_cmd(x) \
+ __print_symbolic(x, \
+ { F_DUPFD, "DUPFD" }, \
+ { F_GETFD, "GETFD" }, \
+ { F_SETFD, "SETFD" }, \
+ { F_GETFL, "GETFL" }, \
+ { F_SETFL, "SETFL" }, \
+ { F_GETLK, "GETLK" }, \
+ { F_SETLK, "SETLK" }, \
+ { F_SETLKW, "SETLKW" }, \
+ { F_SETOWN, "SETOWN" }, \
+ { F_GETOWN, "GETOWN" }, \
+ { F_SETSIG, "SETSIG" }, \
+ { F_GETSIG, "GETSIG" }, \
+ { F_GETLK64, "GETLK64" }, \
+ { F_SETLK64, "SETLK64" }, \
+ { F_SETLKW64, "SETLKW64" }, \
+ { F_SETOWN_EX, "SETOWN_EX" }, \
+ { F_GETOWN_EX, "GETOWN_EX" }, \
+ { F_GETOWNER_UIDS, "GETOWNER_UIDS" }, \
+ { F_OFD_GETLK, "OFD_GETLK" }, \
+ { F_OFD_SETLK, "OFD_SETLK" }, \
+ { F_OFD_SETLKW, "OFD_SETLKW" })
+#endif /* CONFIG_64BIT */
+
+#define show_fs_fcntl_lock_type(x) \
+ __print_symbolic(x, \
+ { F_RDLCK, "RDLCK" }, \
+ { F_WRLCK, "WRLCK" }, \
+ { F_UNLCK, "UNLCK" })
+
+#define show_fs_lookup_flags(flags) \
+ __print_flags(flags, "|", \
+ { LOOKUP_FOLLOW, "FOLLOW" }, \
+ { LOOKUP_DIRECTORY, "DIRECTORY" }, \
+ { LOOKUP_AUTOMOUNT, "AUTOMOUNT" }, \
+ { LOOKUP_EMPTY, "EMPTY" }, \
+ { LOOKUP_DOWN, "DOWN" }, \
+ { LOOKUP_MOUNTPOINT, "MOUNTPOINT" }, \
+ { LOOKUP_REVAL, "REVAL" }, \
+ { LOOKUP_RCU, "RCU" }, \
+ { LOOKUP_OPEN, "OPEN" }, \
+ { LOOKUP_CREATE, "CREATE" }, \
+ { LOOKUP_EXCL, "EXCL" }, \
+ { LOOKUP_RENAME_TARGET, "RENAME_TARGET" }, \
+ { LOOKUP_PARENT, "PARENT" }, \
+ { LOOKUP_NO_SYMLINKS, "NO_SYMLINKS" }, \
+ { LOOKUP_NO_MAGICLINKS, "NO_MAGICLINKS" }, \
+ { LOOKUP_NO_XDEV, "NO_XDEV" }, \
+ { LOOKUP_BENEATH, "BENEATH" }, \
+ { LOOKUP_IN_ROOT, "IN_ROOT" }, \
+ { LOOKUP_CACHED, "CACHED" })
diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h
new file mode 100644
index 000000000000..e43e74591561
--- /dev/null
+++ b/include/trace/misc/nfs.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Display helpers for NFS protocol elements
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <uapi/linux/nfs.h>
+
+TRACE_DEFINE_ENUM(NFS_OK);
+TRACE_DEFINE_ENUM(NFSERR_PERM);
+TRACE_DEFINE_ENUM(NFSERR_NOENT);
+TRACE_DEFINE_ENUM(NFSERR_IO);
+TRACE_DEFINE_ENUM(NFSERR_NXIO);
+TRACE_DEFINE_ENUM(NFSERR_EAGAIN);
+TRACE_DEFINE_ENUM(NFSERR_ACCES);
+TRACE_DEFINE_ENUM(NFSERR_EXIST);
+TRACE_DEFINE_ENUM(NFSERR_XDEV);
+TRACE_DEFINE_ENUM(NFSERR_NODEV);
+TRACE_DEFINE_ENUM(NFSERR_NOTDIR);
+TRACE_DEFINE_ENUM(NFSERR_ISDIR);
+TRACE_DEFINE_ENUM(NFSERR_INVAL);
+TRACE_DEFINE_ENUM(NFSERR_FBIG);
+TRACE_DEFINE_ENUM(NFSERR_NOSPC);
+TRACE_DEFINE_ENUM(NFSERR_ROFS);
+TRACE_DEFINE_ENUM(NFSERR_MLINK);
+TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP);
+TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
+TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
+TRACE_DEFINE_ENUM(NFSERR_DQUOT);
+TRACE_DEFINE_ENUM(NFSERR_STALE);
+TRACE_DEFINE_ENUM(NFSERR_REMOTE);
+TRACE_DEFINE_ENUM(NFSERR_WFLUSH);
+TRACE_DEFINE_ENUM(NFSERR_BADHANDLE);
+TRACE_DEFINE_ENUM(NFSERR_NOT_SYNC);
+TRACE_DEFINE_ENUM(NFSERR_BAD_COOKIE);
+TRACE_DEFINE_ENUM(NFSERR_NOTSUPP);
+TRACE_DEFINE_ENUM(NFSERR_TOOSMALL);
+TRACE_DEFINE_ENUM(NFSERR_SERVERFAULT);
+TRACE_DEFINE_ENUM(NFSERR_BADTYPE);
+TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
+
+#define show_nfs_status(x) \
+ __print_symbolic(x, \
+ { NFS_OK, "OK" }, \
+ { NFSERR_PERM, "PERM" }, \
+ { NFSERR_NOENT, "NOENT" }, \
+ { NFSERR_IO, "IO" }, \
+ { NFSERR_NXIO, "NXIO" }, \
+ { ECHILD, "CHILD" }, \
+ { NFSERR_EAGAIN, "AGAIN" }, \
+ { NFSERR_ACCES, "ACCES" }, \
+ { NFSERR_EXIST, "EXIST" }, \
+ { NFSERR_XDEV, "XDEV" }, \
+ { NFSERR_NODEV, "NODEV" }, \
+ { NFSERR_NOTDIR, "NOTDIR" }, \
+ { NFSERR_ISDIR, "ISDIR" }, \
+ { NFSERR_INVAL, "INVAL" }, \
+ { NFSERR_FBIG, "FBIG" }, \
+ { NFSERR_NOSPC, "NOSPC" }, \
+ { NFSERR_ROFS, "ROFS" }, \
+ { NFSERR_MLINK, "MLINK" }, \
+ { NFSERR_OPNOTSUPP, "OPNOTSUPP" }, \
+ { NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \
+ { NFSERR_NOTEMPTY, "NOTEMPTY" }, \
+ { NFSERR_DQUOT, "DQUOT" }, \
+ { NFSERR_STALE, "STALE" }, \
+ { NFSERR_REMOTE, "REMOTE" }, \
+ { NFSERR_WFLUSH, "WFLUSH" }, \
+ { NFSERR_BADHANDLE, "BADHANDLE" }, \
+ { NFSERR_NOT_SYNC, "NOTSYNC" }, \
+ { NFSERR_BAD_COOKIE, "BADCOOKIE" }, \
+ { NFSERR_NOTSUPP, "NOTSUPP" }, \
+ { NFSERR_TOOSMALL, "TOOSMALL" }, \
+ { NFSERR_SERVERFAULT, "REMOTEIO" }, \
+ { NFSERR_BADTYPE, "BADTYPE" }, \
+ { NFSERR_JUKEBOX, "JUKEBOX" })
+
+TRACE_DEFINE_ENUM(NFS_UNSTABLE);
+TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
+TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
+
+#define show_nfs_stable_how(x) \
+ __print_symbolic(x, \
+ { NFS_UNSTABLE, "UNSTABLE" }, \
+ { NFS_DATA_SYNC, "DATA_SYNC" }, \
+ { NFS_FILE_SYNC, "FILE_SYNC" })
+
+TRACE_DEFINE_ENUM(NFS4_OK);
+TRACE_DEFINE_ENUM(NFS4ERR_ACCESS);
+TRACE_DEFINE_ENUM(NFS4ERR_ATTRNOTSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_ADMIN_REVOKED);
+TRACE_DEFINE_ENUM(NFS4ERR_BACK_CHAN_BUSY);
+TRACE_DEFINE_ENUM(NFS4ERR_BADCHAR);
+TRACE_DEFINE_ENUM(NFS4ERR_BADHANDLE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADIOMODE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADLAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_BADLABEL);
+TRACE_DEFINE_ENUM(NFS4ERR_BADNAME);
+TRACE_DEFINE_ENUM(NFS4ERR_BADOWNER);
+TRACE_DEFINE_ENUM(NFS4ERR_BADSESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_BADSLOT);
+TRACE_DEFINE_ENUM(NFS4ERR_BADTYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADXDR);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_COOKIE);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_HIGH_SLOT);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_RANGE);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_SEQID);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_SESSION_DIGEST);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_CB_PATH_DOWN);
+TRACE_DEFINE_ENUM(NFS4ERR_CLID_INUSE);
+TRACE_DEFINE_ENUM(NFS4ERR_CLIENTID_BUSY);
+TRACE_DEFINE_ENUM(NFS4ERR_COMPLETE_ALREADY);
+TRACE_DEFINE_ENUM(NFS4ERR_CONN_NOT_BOUND_TO_SESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_DEADLOCK);
+TRACE_DEFINE_ENUM(NFS4ERR_DEADSESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_DELAY);
+TRACE_DEFINE_ENUM(NFS4ERR_DELEG_ALREADY_WANTED);
+TRACE_DEFINE_ENUM(NFS4ERR_DELEG_REVOKED);
+TRACE_DEFINE_ENUM(NFS4ERR_DENIED);
+TRACE_DEFINE_ENUM(NFS4ERR_DIRDELEG_UNAVAIL);
+TRACE_DEFINE_ENUM(NFS4ERR_DQUOT);
+TRACE_DEFINE_ENUM(NFS4ERR_ENCR_ALG_UNSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_EXIST);
+TRACE_DEFINE_ENUM(NFS4ERR_EXPIRED);
+TRACE_DEFINE_ENUM(NFS4ERR_FBIG);
+TRACE_DEFINE_ENUM(NFS4ERR_FHEXPIRED);
+TRACE_DEFINE_ENUM(NFS4ERR_FILE_OPEN);
+TRACE_DEFINE_ENUM(NFS4ERR_GRACE);
+TRACE_DEFINE_ENUM(NFS4ERR_HASH_ALG_UNSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_INVAL);
+TRACE_DEFINE_ENUM(NFS4ERR_IO);
+TRACE_DEFINE_ENUM(NFS4ERR_ISDIR);
+TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTTRYLATER);
+TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTUNAVAILABLE);
+TRACE_DEFINE_ENUM(NFS4ERR_LEASE_MOVED);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCKED);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCKS_HELD);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCK_RANGE);
+TRACE_DEFINE_ENUM(NFS4ERR_MINOR_VERS_MISMATCH);
+TRACE_DEFINE_ENUM(NFS4ERR_MLINK);
+TRACE_DEFINE_ENUM(NFS4ERR_MOVED);
+TRACE_DEFINE_ENUM(NFS4ERR_NAMETOOLONG);
+TRACE_DEFINE_ENUM(NFS4ERR_NOENT);
+TRACE_DEFINE_ENUM(NFS4ERR_NOFILEHANDLE);
+TRACE_DEFINE_ENUM(NFS4ERR_NOMATCHING_LAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_NOSPC);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTDIR);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTEMPTY);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_NOT_ONLY_OP);
+TRACE_DEFINE_ENUM(NFS4ERR_NOT_SAME);
+TRACE_DEFINE_ENUM(NFS4ERR_NO_GRACE);
+TRACE_DEFINE_ENUM(NFS4ERR_NXIO);
+TRACE_DEFINE_ENUM(NFS4ERR_OLD_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_OPENMODE);
+TRACE_DEFINE_ENUM(NFS4ERR_OP_ILLEGAL);
+TRACE_DEFINE_ENUM(NFS4ERR_OP_NOT_IN_SESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_PERM);
+TRACE_DEFINE_ENUM(NFS4ERR_PNFS_IO_HOLE);
+TRACE_DEFINE_ENUM(NFS4ERR_PNFS_NO_LAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_RECALLCONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_BAD);
+TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_CONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_REJECT_DELEG);
+TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG);
+TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+TRACE_DEFINE_ENUM(NFS4ERR_REQ_TOO_BIG);
+TRACE_DEFINE_ENUM(NFS4ERR_RESOURCE);
+TRACE_DEFINE_ENUM(NFS4ERR_RESTOREFH);
+TRACE_DEFINE_ENUM(NFS4ERR_RETRY_UNCACHED_REP);
+TRACE_DEFINE_ENUM(NFS4ERR_RETURNCONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_ROFS);
+TRACE_DEFINE_ENUM(NFS4ERR_SAME);
+TRACE_DEFINE_ENUM(NFS4ERR_SHARE_DENIED);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQUENCE_POS);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQ_FALSE_RETRY);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQ_MISORDERED);
+TRACE_DEFINE_ENUM(NFS4ERR_SERVERFAULT);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE_CLIENTID);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_SYMLINK);
+TRACE_DEFINE_ENUM(NFS4ERR_TOOSMALL);
+TRACE_DEFINE_ENUM(NFS4ERR_TOO_MANY_OPS);
+TRACE_DEFINE_ENUM(NFS4ERR_UNKNOWN_LAYOUTTYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_UNSAFE_COMPOUND);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONGSEC);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONG_CRED);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
+
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_MDS);
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_PNFS);
+
+#define show_nfs4_status(x) \
+ __print_symbolic(x, \
+ { NFS4_OK, "OK" }, \
+ { EPERM, "EPERM" }, \
+ { ENOENT, "ENOENT" }, \
+ { EIO, "EIO" }, \
+ { ENXIO, "ENXIO" }, \
+ { EACCES, "EACCES" }, \
+ { EEXIST, "EEXIST" }, \
+ { EXDEV, "EXDEV" }, \
+ { ENOTDIR, "ENOTDIR" }, \
+ { EISDIR, "EISDIR" }, \
+ { EFBIG, "EFBIG" }, \
+ { ENOSPC, "ENOSPC" }, \
+ { EROFS, "EROFS" }, \
+ { EMLINK, "EMLINK" }, \
+ { ENAMETOOLONG, "ENAMETOOLONG" }, \
+ { ENOTEMPTY, "ENOTEMPTY" }, \
+ { EDQUOT, "EDQUOT" }, \
+ { ESTALE, "ESTALE" }, \
+ { EBADHANDLE, "EBADHANDLE" }, \
+ { EBADCOOKIE, "EBADCOOKIE" }, \
+ { ENOTSUPP, "ENOTSUPP" }, \
+ { ETOOSMALL, "ETOOSMALL" }, \
+ { EREMOTEIO, "EREMOTEIO" }, \
+ { EBADTYPE, "EBADTYPE" }, \
+ { EAGAIN, "EAGAIN" }, \
+ { ELOOP, "ELOOP" }, \
+ { EOPNOTSUPP, "EOPNOTSUPP" }, \
+ { EDEADLK, "EDEADLK" }, \
+ { ENOMEM, "ENOMEM" }, \
+ { EKEYEXPIRED, "EKEYEXPIRED" }, \
+ { ETIMEDOUT, "ETIMEDOUT" }, \
+ { ERESTARTSYS, "ERESTARTSYS" }, \
+ { ECONNREFUSED, "ECONNREFUSED" }, \
+ { ECONNRESET, "ECONNRESET" }, \
+ { ENETUNREACH, "ENETUNREACH" }, \
+ { EHOSTUNREACH, "EHOSTUNREACH" }, \
+ { EHOSTDOWN, "EHOSTDOWN" }, \
+ { EPIPE, "EPIPE" }, \
+ { EPFNOSUPPORT, "EPFNOSUPPORT" }, \
+ { EINVAL, "EINVAL" }, \
+ { EPROTONOSUPPORT, "EPROTONOSUPPORT" }, \
+ { NFS4ERR_ACCESS, "ACCESS" }, \
+ { NFS4ERR_ATTRNOTSUPP, "ATTRNOTSUPP" }, \
+ { NFS4ERR_ADMIN_REVOKED, "ADMIN_REVOKED" }, \
+ { NFS4ERR_BACK_CHAN_BUSY, "BACK_CHAN_BUSY" }, \
+ { NFS4ERR_BADCHAR, "BADCHAR" }, \
+ { NFS4ERR_BADHANDLE, "BADHANDLE" }, \
+ { NFS4ERR_BADIOMODE, "BADIOMODE" }, \
+ { NFS4ERR_BADLAYOUT, "BADLAYOUT" }, \
+ { NFS4ERR_BADLABEL, "BADLABEL" }, \
+ { NFS4ERR_BADNAME, "BADNAME" }, \
+ { NFS4ERR_BADOWNER, "BADOWNER" }, \
+ { NFS4ERR_BADSESSION, "BADSESSION" }, \
+ { NFS4ERR_BADSLOT, "BADSLOT" }, \
+ { NFS4ERR_BADTYPE, "BADTYPE" }, \
+ { NFS4ERR_BADXDR, "BADXDR" }, \
+ { NFS4ERR_BAD_COOKIE, "BAD_COOKIE" }, \
+ { NFS4ERR_BAD_HIGH_SLOT, "BAD_HIGH_SLOT" }, \
+ { NFS4ERR_BAD_RANGE, "BAD_RANGE" }, \
+ { NFS4ERR_BAD_SEQID, "BAD_SEQID" }, \
+ { NFS4ERR_BAD_SESSION_DIGEST, "BAD_SESSION_DIGEST" }, \
+ { NFS4ERR_BAD_STATEID, "BAD_STATEID" }, \
+ { NFS4ERR_CB_PATH_DOWN, "CB_PATH_DOWN" }, \
+ { NFS4ERR_CLID_INUSE, "CLID_INUSE" }, \
+ { NFS4ERR_CLIENTID_BUSY, "CLIENTID_BUSY" }, \
+ { NFS4ERR_COMPLETE_ALREADY, "COMPLETE_ALREADY" }, \
+ { NFS4ERR_CONN_NOT_BOUND_TO_SESSION, "CONN_NOT_BOUND_TO_SESSION" }, \
+ { NFS4ERR_DEADLOCK, "DEADLOCK" }, \
+ { NFS4ERR_DEADSESSION, "DEAD_SESSION" }, \
+ { NFS4ERR_DELAY, "DELAY" }, \
+ { NFS4ERR_DELEG_ALREADY_WANTED, "DELEG_ALREADY_WANTED" }, \
+ { NFS4ERR_DELEG_REVOKED, "DELEG_REVOKED" }, \
+ { NFS4ERR_DENIED, "DENIED" }, \
+ { NFS4ERR_DIRDELEG_UNAVAIL, "DIRDELEG_UNAVAIL" }, \
+ { NFS4ERR_DQUOT, "DQUOT" }, \
+ { NFS4ERR_ENCR_ALG_UNSUPP, "ENCR_ALG_UNSUPP" }, \
+ { NFS4ERR_EXIST, "EXIST" }, \
+ { NFS4ERR_EXPIRED, "EXPIRED" }, \
+ { NFS4ERR_FBIG, "FBIG" }, \
+ { NFS4ERR_FHEXPIRED, "FHEXPIRED" }, \
+ { NFS4ERR_FILE_OPEN, "FILE_OPEN" }, \
+ { NFS4ERR_GRACE, "GRACE" }, \
+ { NFS4ERR_HASH_ALG_UNSUPP, "HASH_ALG_UNSUPP" }, \
+ { NFS4ERR_INVAL, "INVAL" }, \
+ { NFS4ERR_IO, "IO" }, \
+ { NFS4ERR_ISDIR, "ISDIR" }, \
+ { NFS4ERR_LAYOUTTRYLATER, "LAYOUTTRYLATER" }, \
+ { NFS4ERR_LAYOUTUNAVAILABLE, "LAYOUTUNAVAILABLE" }, \
+ { NFS4ERR_LEASE_MOVED, "LEASE_MOVED" }, \
+ { NFS4ERR_LOCKED, "LOCKED" }, \
+ { NFS4ERR_LOCKS_HELD, "LOCKS_HELD" }, \
+ { NFS4ERR_LOCK_RANGE, "LOCK_RANGE" }, \
+ { NFS4ERR_MINOR_VERS_MISMATCH, "MINOR_VERS_MISMATCH" }, \
+ { NFS4ERR_MLINK, "MLINK" }, \
+ { NFS4ERR_MOVED, "MOVED" }, \
+ { NFS4ERR_NAMETOOLONG, "NAMETOOLONG" }, \
+ { NFS4ERR_NOENT, "NOENT" }, \
+ { NFS4ERR_NOFILEHANDLE, "NOFILEHANDLE" }, \
+ { NFS4ERR_NOMATCHING_LAYOUT, "NOMATCHING_LAYOUT" }, \
+ { NFS4ERR_NOSPC, "NOSPC" }, \
+ { NFS4ERR_NOTDIR, "NOTDIR" }, \
+ { NFS4ERR_NOTEMPTY, "NOTEMPTY" }, \
+ { NFS4ERR_NOTSUPP, "NOTSUPP" }, \
+ { NFS4ERR_NOT_ONLY_OP, "NOT_ONLY_OP" }, \
+ { NFS4ERR_NOT_SAME, "NOT_SAME" }, \
+ { NFS4ERR_NO_GRACE, "NO_GRACE" }, \
+ { NFS4ERR_NXIO, "NXIO" }, \
+ { NFS4ERR_OLD_STATEID, "OLD_STATEID" }, \
+ { NFS4ERR_OPENMODE, "OPENMODE" }, \
+ { NFS4ERR_OP_ILLEGAL, "OP_ILLEGAL" }, \
+ { NFS4ERR_OP_NOT_IN_SESSION, "OP_NOT_IN_SESSION" }, \
+ { NFS4ERR_PERM, "PERM" }, \
+ { NFS4ERR_PNFS_IO_HOLE, "PNFS_IO_HOLE" }, \
+ { NFS4ERR_PNFS_NO_LAYOUT, "PNFS_NO_LAYOUT" }, \
+ { NFS4ERR_RECALLCONFLICT, "RECALLCONFLICT" }, \
+ { NFS4ERR_RECLAIM_BAD, "RECLAIM_BAD" }, \
+ { NFS4ERR_RECLAIM_CONFLICT, "RECLAIM_CONFLICT" }, \
+ { NFS4ERR_REJECT_DELEG, "REJECT_DELEG" }, \
+ { NFS4ERR_REP_TOO_BIG, "REP_TOO_BIG" }, \
+ { NFS4ERR_REP_TOO_BIG_TO_CACHE, "REP_TOO_BIG_TO_CACHE" }, \
+ { NFS4ERR_REQ_TOO_BIG, "REQ_TOO_BIG" }, \
+ { NFS4ERR_RESOURCE, "RESOURCE" }, \
+ { NFS4ERR_RESTOREFH, "RESTOREFH" }, \
+ { NFS4ERR_RETRY_UNCACHED_REP, "RETRY_UNCACHED_REP" }, \
+ { NFS4ERR_RETURNCONFLICT, "RETURNCONFLICT" }, \
+ { NFS4ERR_ROFS, "ROFS" }, \
+ { NFS4ERR_SAME, "SAME" }, \
+ { NFS4ERR_SHARE_DENIED, "SHARE_DENIED" }, \
+ { NFS4ERR_SEQUENCE_POS, "SEQUENCE_POS" }, \
+ { NFS4ERR_SEQ_FALSE_RETRY, "SEQ_FALSE_RETRY" }, \
+ { NFS4ERR_SEQ_MISORDERED, "SEQ_MISORDERED" }, \
+ { NFS4ERR_SERVERFAULT, "SERVERFAULT" }, \
+ { NFS4ERR_STALE, "STALE" }, \
+ { NFS4ERR_STALE_CLIENTID, "STALE_CLIENTID" }, \
+ { NFS4ERR_STALE_STATEID, "STALE_STATEID" }, \
+ { NFS4ERR_SYMLINK, "SYMLINK" }, \
+ { NFS4ERR_TOOSMALL, "TOOSMALL" }, \
+ { NFS4ERR_TOO_MANY_OPS, "TOO_MANY_OPS" }, \
+ { NFS4ERR_UNKNOWN_LAYOUTTYPE, "UNKNOWN_LAYOUTTYPE" }, \
+ { NFS4ERR_UNSAFE_COMPOUND, "UNSAFE_COMPOUND" }, \
+ { NFS4ERR_WRONGSEC, "WRONGSEC" }, \
+ { NFS4ERR_WRONG_CRED, "WRONG_CRED" }, \
+ { NFS4ERR_WRONG_TYPE, "WRONG_TYPE" }, \
+ { NFS4ERR_XDEV, "XDEV" }, \
+ /* ***** Internal to Linux NFS client ***** */ \
+ { NFS4ERR_RESET_TO_MDS, "RESET_TO_MDS" }, \
+ { NFS4ERR_RESET_TO_PNFS, "RESET_TO_PNFS" })
+
+#define show_nfs4_verifier(x) \
+ __print_hex_str(x, NFS4_VERIFIER_SIZE)
+
+TRACE_DEFINE_ENUM(IOMODE_READ);
+TRACE_DEFINE_ENUM(IOMODE_RW);
+TRACE_DEFINE_ENUM(IOMODE_ANY);
+
+#define show_pnfs_layout_iomode(x) \
+ __print_symbolic(x, \
+ { IOMODE_READ, "READ" }, \
+ { IOMODE_RW, "RW" }, \
+ { IOMODE_ANY, "ANY" })
+
+#define show_rca_mask(x) \
+ __print_flags(x, "|", \
+ { BIT(RCA4_TYPE_MASK_RDATA_DLG), "RDATA_DLG" }, \
+ { BIT(RCA4_TYPE_MASK_WDATA_DLG), "WDATA_DLG" }, \
+ { BIT(RCA4_TYPE_MASK_DIR_DLG), "DIR_DLG" }, \
+ { BIT(RCA4_TYPE_MASK_FILE_LAYOUT), "FILE_LAYOUT" }, \
+ { BIT(RCA4_TYPE_MASK_BLK_LAYOUT), "BLK_LAYOUT" }, \
+ { BIT(RCA4_TYPE_MASK_OBJ_LAYOUT_MIN), "OBJ_LAYOUT_MIN" }, \
+ { BIT(RCA4_TYPE_MASK_OBJ_LAYOUT_MAX), "OBJ_LAYOUT_MAX" }, \
+ { BIT(RCA4_TYPE_MASK_OTHER_LAYOUT_MIN), "OTHER_LAYOUT_MIN" }, \
+ { BIT(RCA4_TYPE_MASK_OTHER_LAYOUT_MAX), "OTHER_LAYOUT_MAX" })
+
+#define show_nfs4_seq4_status(x) \
+ __print_flags(x, "|", \
+ { SEQ4_STATUS_CB_PATH_DOWN, "CB_PATH_DOWN" }, \
+ { SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING, "CB_GSS_CONTEXTS_EXPIRING" }, \
+ { SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED, "CB_GSS_CONTEXTS_EXPIRED" }, \
+ { SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED, "EXPIRED_ALL_STATE_REVOKED" }, \
+ { SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED, "EXPIRED_SOME_STATE_REVOKED" }, \
+ { SEQ4_STATUS_ADMIN_STATE_REVOKED, "ADMIN_STATE_REVOKED" }, \
+ { SEQ4_STATUS_RECALLABLE_STATE_REVOKED, "RECALLABLE_STATE_REVOKED" }, \
+ { SEQ4_STATUS_LEASE_MOVED, "LEASE_MOVED" }, \
+ { SEQ4_STATUS_RESTART_RECLAIM_NEEDED, "RESTART_RECLAIM_NEEDED" }, \
+ { SEQ4_STATUS_CB_PATH_DOWN_SESSION, "CB_PATH_DOWN_SESSION" }, \
+ { SEQ4_STATUS_BACKCHANNEL_FAULT, "BACKCHANNEL_FAULT" })
+
+TRACE_DEFINE_ENUM(OP_CB_GETATTR);
+TRACE_DEFINE_ENUM(OP_CB_RECALL);
+TRACE_DEFINE_ENUM(OP_CB_LAYOUTRECALL);
+TRACE_DEFINE_ENUM(OP_CB_NOTIFY);
+TRACE_DEFINE_ENUM(OP_CB_PUSH_DELEG);
+TRACE_DEFINE_ENUM(OP_CB_RECALL_ANY);
+TRACE_DEFINE_ENUM(OP_CB_RECALLABLE_OBJ_AVAIL);
+TRACE_DEFINE_ENUM(OP_CB_RECALL_SLOT);
+TRACE_DEFINE_ENUM(OP_CB_SEQUENCE);
+TRACE_DEFINE_ENUM(OP_CB_WANTS_CANCELLED);
+TRACE_DEFINE_ENUM(OP_CB_NOTIFY_LOCK);
+TRACE_DEFINE_ENUM(OP_CB_NOTIFY_DEVICEID);
+TRACE_DEFINE_ENUM(OP_CB_OFFLOAD);
+TRACE_DEFINE_ENUM(OP_CB_ILLEGAL);
+
+#define show_nfs4_cb_op(x) \
+ __print_symbolic(x, \
+ { 0, "CB_NULL" }, \
+ { 1, "CB_COMPOUND" }, \
+ { OP_CB_GETATTR, "CB_GETATTR" }, \
+ { OP_CB_RECALL, "CB_RECALL" }, \
+ { OP_CB_LAYOUTRECALL, "CB_LAYOUTRECALL" }, \
+ { OP_CB_NOTIFY, "CB_NOTIFY" }, \
+ { OP_CB_PUSH_DELEG, "CB_PUSH_DELEG" }, \
+ { OP_CB_RECALL_ANY, "CB_RECALL_ANY" }, \
+ { OP_CB_RECALLABLE_OBJ_AVAIL, "CB_RECALLABLE_OBJ_AVAIL" }, \
+ { OP_CB_RECALL_SLOT, "CB_RECALL_SLOT" }, \
+ { OP_CB_SEQUENCE, "CB_SEQUENCE" }, \
+ { OP_CB_WANTS_CANCELLED, "CB_WANTS_CANCELLED" }, \
+ { OP_CB_NOTIFY_LOCK, "CB_NOTIFY_LOCK" }, \
+ { OP_CB_NOTIFY_DEVICEID, "CB_NOTIFY_DEVICEID" }, \
+ { OP_CB_OFFLOAD, "CB_OFFLOAD" }, \
+ { OP_CB_ILLEGAL, "CB_ILLEGAL" })
diff --git a/include/trace/events/rdma.h b/include/trace/misc/rdma.h
index aa19afc73a4e..81bb454fc288 100644
--- a/include/trace/events/rdma.h
+++ b/include/trace/misc/rdma.h
@@ -6,7 +6,6 @@
/*
* enum ib_event_type, from include/rdma/ib_verbs.h
*/
-
#define IB_EVENT_LIST \
ib_event(CQ_ERR) \
ib_event(QP_FATAL) \
@@ -91,6 +90,46 @@ IB_WC_STATUS_LIST
__print_symbolic(x, IB_WC_STATUS_LIST)
/*
+ * enum ib_cm_event_type, from include/rdma/ib_cm.h
+ */
+#define IB_CM_EVENT_LIST \
+ ib_cm_event(REQ_ERROR) \
+ ib_cm_event(REQ_RECEIVED) \
+ ib_cm_event(REP_ERROR) \
+ ib_cm_event(REP_RECEIVED) \
+ ib_cm_event(RTU_RECEIVED) \
+ ib_cm_event(USER_ESTABLISHED) \
+ ib_cm_event(DREQ_ERROR) \
+ ib_cm_event(DREQ_RECEIVED) \
+ ib_cm_event(DREP_RECEIVED) \
+ ib_cm_event(TIMEWAIT_EXIT) \
+ ib_cm_event(MRA_RECEIVED) \
+ ib_cm_event(REJ_RECEIVED) \
+ ib_cm_event(LAP_ERROR) \
+ ib_cm_event(LAP_RECEIVED) \
+ ib_cm_event(APR_RECEIVED) \
+ ib_cm_event(SIDR_REQ_ERROR) \
+ ib_cm_event(SIDR_REQ_RECEIVED) \
+ ib_cm_event_end(SIDR_REP_RECEIVED)
+
+#undef ib_cm_event
+#undef ib_cm_event_end
+
+#define ib_cm_event(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_event_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_EVENT_LIST
+
+#undef ib_cm_event
+#undef ib_cm_event_end
+
+#define ib_cm_event(x) { IB_CM_##x, #x },
+#define ib_cm_event_end(x) { IB_CM_##x, #x }
+
+#define rdma_show_ib_cm_event(x) \
+ __print_symbolic(x, IB_CM_EVENT_LIST)
+
+/*
* enum rdma_cm_event_type, from include/rdma/rdma_cm.h
*/
#define RDMA_CM_EVENT_LIST \
diff --git a/include/trace/misc/sunrpc.h b/include/trace/misc/sunrpc.h
new file mode 100644
index 000000000000..588557d07ea8
--- /dev/null
+++ b/include/trace/misc/sunrpc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Oracle and/or its affiliates.
+ *
+ * Common types and format specifiers for sunrpc.
+ */
+
+#if !defined(_TRACE_SUNRPC_BASE_H)
+#define _TRACE_SUNRPC_BASE_H
+
+#include <linux/tracepoint.h>
+
+#define SUNRPC_TRACE_PID_SPECIFIER "%08x"
+#define SUNRPC_TRACE_CLID_SPECIFIER "%08x"
+#define SUNRPC_TRACE_TASK_SPECIFIER \
+ "task:" SUNRPC_TRACE_PID_SPECIFIER "@" SUNRPC_TRACE_CLID_SPECIFIER
+
+#endif /* _TRACE_SUNRPC_BASE_H */
diff --git a/include/trace/perf.h b/include/trace/perf.h
index dbc6c74defc3..2c11181c82e0 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -4,22 +4,7 @@
#ifdef CONFIG_PERF_EVENTS
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+#include "stages/stage6_event_callback.h"
#undef __perf_count
#define __perf_count(c) (__count = (c))
diff --git a/include/trace/stages/init.h b/include/trace/stages/init.h
new file mode 100644
index 000000000000..000bcfc8dd2e
--- /dev/null
+++ b/include/trace/stages/init.h
@@ -0,0 +1,37 @@
+
+#define __app__(x, y) str__##x##y
+#define __app(x, y) __app__(x, y)
+
+#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
+
+#define TRACE_MAKE_SYSTEM_STR() \
+ static const char TRACE_SYSTEM_STRING[] = \
+ __stringify(TRACE_SYSTEM)
+
+TRACE_MAKE_SYSTEM_STR();
+
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a) \
+ static struct trace_eval_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .eval_string = #a, \
+ .eval_value = a \
+ }; \
+ static struct trace_eval_map __used \
+ __section("_ftrace_eval_map") \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
+
+#undef TRACE_DEFINE_SIZEOF
+#define TRACE_DEFINE_SIZEOF(a) \
+ static struct trace_eval_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .eval_string = "sizeof(" #a ")", \
+ .eval_value = sizeof(a) \
+ }; \
+ static struct trace_eval_map __used \
+ __section("_ftrace_eval_map") \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
diff --git a/include/trace/stages/stage1_struct_define.h b/include/trace/stages/stage1_struct_define.h
new file mode 100644
index 000000000000..69e0dae453bf
--- /dev/null
+++ b/include/trace/stages/stage1_struct_define.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 1 definitions for creating trace events */
+
+#undef __field
+#define __field(type, item) type item;
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type) type item;
+
+#undef __field_struct
+#define __field_struct(type, item) type item;
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type) type item;
+
+#undef __array
+#define __array(type, item, len) type item[len];
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) u32 __data_loc_##item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
+
+#undef __cpumask
+#define __cpumask(item) __dynamic_array(char, item, -1)
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) u32 __rel_loc_##item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_cpumask
+#define __rel_cpumask(item) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
diff --git a/include/trace/stages/stage2_data_offsets.h b/include/trace/stages/stage2_data_offsets.h
new file mode 100644
index 000000000000..8b0cff06d346
--- /dev/null
+++ b/include/trace/stages/stage2_data_offsets.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 2 definitions for creating trace events */
+
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a)
+
+#undef TRACE_DEFINE_SIZEOF
+#define TRACE_DEFINE_SIZEOF(a)
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) u32 item; const void *item##_ptr_;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __cpumask
+#define __cpumask(item) __dynamic_array(unsigned long, item, -1)
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) u32 item; const void *item##_ptr_;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __rel_cpumask
+#define __rel_cpumask(item) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h
new file mode 100644
index 000000000000..c1fb1355d309
--- /dev/null
+++ b/include/trace/stages/stage3_trace_output.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 3 definitions for creating trace events */
+
+#undef __entry
+#define __entry field
+
+#undef TP_printk
+#define TP_printk(fmt, args...) fmt "\n", args
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_dynamic_array_len
+#define __get_dynamic_array_len(field) \
+ ((__entry->__data_loc_##field >> 16) & 0xffff)
+
+#undef __get_str
+#define __get_str(field) ((char *)__get_dynamic_array(field))
+
+#undef __get_rel_dynamic_array
+#define __get_rel_dynamic_array(field) \
+ ((void *)__entry + \
+ offsetof(typeof(*__entry), __rel_loc_##field) + \
+ sizeof(__entry->__rel_loc_##field) + \
+ (__entry->__rel_loc_##field & 0xffff))
+
+#undef __get_rel_dynamic_array_len
+#define __get_rel_dynamic_array_len(field) \
+ ((__entry->__rel_loc_##field >> 16) & 0xffff)
+
+#undef __get_rel_str
+#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
+
+#undef __get_bitmask
+#define __get_bitmask(field) \
+ ({ \
+ void *__bitmask = __get_dynamic_array(field); \
+ unsigned int __bitmask_size; \
+ __bitmask_size = __get_dynamic_array_len(field); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ })
+
+#undef __get_cpumask
+#define __get_cpumask(field) __get_bitmask(field)
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) \
+ ({ \
+ void *__bitmask = __get_rel_dynamic_array(field); \
+ unsigned int __bitmask_size; \
+ __bitmask_size = __get_rel_dynamic_array_len(field); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ })
+
+#undef __get_rel_cpumask
+#define __get_rel_cpumask(field) __get_rel_bitmask(field)
+
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
+#undef __print_flags
+#define __print_flags(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags __flags[] = \
+ { flag_array, { -1, NULL }}; \
+ trace_print_flags_seq(p, delim, flag, __flags); \
+ })
+
+#undef __print_symbolic
+#define __print_symbolic(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags symbols[] = \
+ { symbol_array, { -1, NULL }}; \
+ trace_print_symbols_seq(p, value, symbols); \
+ })
+
+#undef __print_flags_u64
+#undef __print_symbolic_u64
+#if BITS_PER_LONG == 32
+#define __print_flags_u64(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 __flags[] = \
+ { flag_array, { -1, NULL } }; \
+ trace_print_flags_seq_u64(p, delim, flag, __flags); \
+ })
+
+#define __print_symbolic_u64(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 symbols[] = \
+ { symbol_array, { -1, NULL } }; \
+ trace_print_symbols_seq_u64(p, value, symbols); \
+ })
+#else
+#define __print_flags_u64(flag, delim, flag_array...) \
+ __print_flags(flag, delim, flag_array)
+
+#define __print_symbolic_u64(value, symbol_array...) \
+ __print_symbolic(value, symbol_array)
+#endif
+
+#undef __print_hex
+#define __print_hex(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, false)
+
+#undef __print_hex_str
+#define __print_hex_str(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, true)
+
+#undef __print_array
+#define __print_array(array, count, el_size) \
+ ({ \
+ BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
+ el_size != 4 && el_size != 8); \
+ trace_print_array_seq(p, array, count, el_size); \
+ })
+
+#undef __print_hex_dump
+#define __print_hex_dump(prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii) \
+ trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii)
+
+#undef __print_ns_to_secs
+#define __print_ns_to_secs(value) \
+ ({ \
+ u64 ____val = (u64)(value); \
+ do_div(____val, NSEC_PER_SEC); \
+ ____val; \
+ })
+
+#undef __print_ns_without_secs
+#define __print_ns_without_secs(value) \
+ ({ \
+ u64 ____val = (u64)(value); \
+ (u32) do_div(____val, NSEC_PER_SEC); \
+ })
+
+#undef __get_buf
+#define __get_buf(len) trace_seq_acquire(p, (len))
diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h
new file mode 100644
index 000000000000..b6f679ae21aa
--- /dev/null
+++ b/include/trace/stages/stage4_event_fields.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 4 definitions for creating trace events */
+
+#define ALIGN_STRUCTFIELD(type) ((int)(__alignof__(struct {type b;})))
+
+#undef __field_ext
+#define __field_ext(_type, _item, _filter_type) { \
+ .type = #_type, .name = #_item, \
+ .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \
+ .is_signed = is_signed_type(_type), .filter_type = _filter_type },
+
+#undef __field_struct_ext
+#define __field_struct_ext(_type, _item, _filter_type) { \
+ .type = #_type, .name = #_item, \
+ .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \
+ 0, .filter_type = _filter_type },
+
+#undef __field
+#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
+
+#undef __field_struct
+#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
+
+#undef __array
+#define __array(_type, _item, _len) { \
+ .type = #_type"["__stringify(_len)"]", .name = #_item, \
+ .size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type), \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER,\
+ .len = _len },
+
+#undef __dynamic_array
+#define __dynamic_array(_type, _item, _len) { \
+ .type = "__data_loc " #_type "[]", .name = #_item, \
+ .size = 4, .align = 4, \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __cpumask
+#define __cpumask(item) { \
+ .type = "__data_loc cpumask_t", .name = #item, \
+ .size = 4, .align = 4, \
+ .is_signed = 0, .filter_type = FILTER_OTHER },
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(_type, _item, _len) { \
+ .type = "__rel_loc " #_type "[]", .name = #_item, \
+ .size = 4, .align = 4, \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __rel_cpumask
+#define __rel_cpumask(item) { \
+ .type = "__rel_loc cpumask_t", .name = #item, \
+ .size = 4, .align = 4, \
+ .is_signed = 0, .filter_type = FILTER_OTHER },
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h
new file mode 100644
index 000000000000..c6a62dfb18ef
--- /dev/null
+++ b/include/trace/stages/stage5_get_offsets.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 5 definitions for creating trace events */
+
+/*
+ * remember the offset of each array from the beginning of the event.
+ */
+
+#undef __entry
+#define __entry entry
+
+#ifndef __STAGE5_STRING_SRC_H
+#define __STAGE5_STRING_SRC_H
+static inline const char *__string_src(const char *str)
+{
+ if (!str)
+ return EVENT_NULL_STR;
+ return str;
+}
+#endif /* __STAGE5_STRING_SRC_H */
+
+/*
+ * Fields should never declare an array: i.e. __field(int, arr[5])
+ * If they do, it will cause issues in parsing and possibly corrupt the
+ * events. To prevent that from happening, test the sizeof() a fictitious
+ * type called "struct _test_no_array_##item" which will fail if "item"
+ * contains array elements (like "arr[5]").
+ *
+ * If you hit this, use __array(int, arr, 5) instead.
+ */
+#undef __field
+#define __field(type, item) \
+ { (void)sizeof(struct _test_no_array_##item *); }
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type) \
+ { (void)sizeof(struct _test_no_array_##item *); }
+
+#undef __field_struct
+#define __field_struct(type, item) \
+ { (void)sizeof(struct _test_no_array_##item *); }
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type) \
+ { (void)sizeof(struct _test_no_array_##item *); }
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __item_length = (len) * sizeof(type); \
+ __data_offsets->item = __data_size + \
+ offsetof(typeof(*entry), __data); \
+ __data_offsets->item |= __item_length << 16; \
+ __data_size += __item_length;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, \
+ strlen(__string_src(src)) + 1) \
+ __data_offsets->item##_ptr_ = src;
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)\
+ __data_offsets->item##_ptr_ = src;
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, \
+ __trace_event_vstr_len(fmt, ap))
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) \
+ __item_length = (len) * sizeof(type); \
+ __data_offsets->item = __data_size + \
+ offsetof(typeof(*entry), __data) - \
+ offsetof(typeof(*entry), __rel_loc_##item) - \
+ sizeof(u32); \
+ __data_offsets->item |= __item_length << 16; \
+ __data_size += __item_length;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, \
+ strlen(__string_src(src)) + 1) \
+ __data_offsets->item##_ptr_ = src;
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)\
+ __data_offsets->item##_ptr_ = src;
+
+/*
+ * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
+ * num_possible_cpus().
+ */
+#define __bitmask_size_in_bytes_raw(nr_bits) \
+ (((nr_bits) + 7) / 8)
+
+#define __bitmask_size_in_longs(nr_bits) \
+ ((__bitmask_size_in_bytes_raw(nr_bits) + \
+ ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
+
+/*
+ * __bitmask_size_in_bytes is the number of bytes needed to hold
+ * num_possible_cpus() padded out to the nearest long. This is what
+ * is saved in the buffer, just to be consistent.
+ */
+#define __bitmask_size_in_bytes(nr_bits) \
+ (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
+ __bitmask_size_in_longs(nr_bits))
+
+#undef __cpumask
+#define __cpumask(item) __bitmask(item, nr_cpumask_bits)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \
+ __bitmask_size_in_longs(nr_bits))
+
+#undef __rel_cpumask
+#define __rel_cpumask(item) __rel_bitmask(item, nr_cpumask_bits)
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h
new file mode 100644
index 000000000000..3690e677263f
--- /dev/null
+++ b/include/trace/stages/stage6_event_callback.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 6 definitions for creating trace events */
+
+/* Reuse some of the stage 3 macros */
+#include "stage3_trace_output.h"
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ do { \
+ char *__str__ = __get_str(dst); \
+ int __len__ = __get_dynamic_array_len(dst) - 1; \
+ WARN_ON_ONCE(!(void *)(src) != !(void *)__data_offsets.dst##_ptr_); \
+ WARN_ON_ONCE((src) && strcmp((src), __data_offsets.dst##_ptr_)); \
+ memcpy(__str__, __data_offsets.dst##_ptr_ ? : \
+ EVENT_NULL_STR, __len__); \
+ __str__[__len__] = '\0'; \
+ } while (0)
+
+#undef __assign_vstr
+#define __assign_vstr(dst, fmt, va) \
+ do { \
+ va_list __cp_va; \
+ va_copy(__cp_va, *(va)); \
+ vsnprintf(__get_str(dst), TRACE_EVENT_STR_MAX, fmt, __cp_va); \
+ va_end(__cp_va); \
+ } while (0)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __assign_bitmask
+#define __assign_bitmask(dst, src, nr_bits) \
+ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef __cpumask
+#define __cpumask(item) __dynamic_array(unsigned long, item, -1)
+
+#undef __get_cpumask
+#define __get_cpumask(field) (char *)__get_dynamic_array(field)
+
+#undef __assign_cpumask
+#define __assign_cpumask(dst, src) \
+ memcpy(__get_cpumask(dst), (src), __bitmask_size_in_bytes(nr_cpumask_bits))
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
+#undef __assign_sockaddr
+#define __assign_sockaddr(dest, src, len) \
+ memcpy(__get_dynamic_array(dest), src, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) \
+ __entry->__rel_loc_##item = __data_offsets.item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __assign_rel_str
+#define __assign_rel_str(dst) \
+ do { \
+ char *__str__ = __get_rel_str(dst); \
+ int __len__ = __get_rel_dynamic_array_len(dst) - 1; \
+ memcpy(__str__, __data_offsets.dst##_ptr_ ? : \
+ EVENT_NULL_STR, __len__); \
+ __str__[__len__] = '\0'; \
+ } while (0)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+
+#undef __assign_rel_bitmask
+#define __assign_rel_bitmask(dst, src, nr_bits) \
+ memcpy(__get_rel_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef __rel_cpumask
+#define __rel_cpumask(item) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __get_rel_cpumask
+#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field)
+
+#undef __assign_rel_cpumask
+#define __assign_rel_cpumask(dst, src) \
+ memcpy(__get_rel_cpumask(dst), (src), __bitmask_size_in_bytes(nr_cpumask_bits))
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
+
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
+#undef __assign_rel_sockaddr
+#define __assign_rel_sockaddr(dest, src, len) \
+ memcpy(__get_rel_dynamic_array(dest), src, len)
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h
new file mode 100644
index 000000000000..bcb960d16fc0
--- /dev/null
+++ b/include/trace/stages/stage7_class_define.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 7 definitions for creating trace events */
+
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __print_hex
+#undef __print_hex_str
+#undef __get_dynamic_array
+#undef __get_dynamic_array_len
+#undef __get_str
+#undef __get_bitmask
+#undef __get_cpumask
+#undef __get_sockaddr
+#undef __get_rel_dynamic_array
+#undef __get_rel_dynamic_array_len
+#undef __get_rel_str
+#undef __get_rel_bitmask
+#undef __get_rel_cpumask
+#undef __get_rel_sockaddr
+#undef __print_array
+#undef __print_hex_dump
+#undef __get_buf
+
+/*
+ * The below is not executed in the kernel. It is only what is
+ * displayed in the print format for userspace to parse.
+ */
+#undef __print_ns_to_secs
+#define __print_ns_to_secs(val) (val) / 1000000000UL
+
+#undef __print_ns_without_secs
+#define __print_ns_without_secs(val) (val) % 1000000000UL
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index dc8ac27d27c1..8e193f3a33b3 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -37,10 +37,10 @@ struct syscall_metadata {
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
static inline void syscall_tracepoint_update(struct task_struct *p)
{
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
- set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+ if (test_syscall_work(SYSCALL_TRACEPOINT))
+ set_task_syscall_work(p, SYSCALL_TRACEPOINT);
else
- clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+ clear_task_syscall_work(p, SYSCALL_TRACEPOINT);
}
#else
static inline void syscall_tracepoint_update(struct task_struct *p)
diff --git a/include/trace/trace_custom_events.h b/include/trace/trace_custom_events.h
new file mode 100644
index 000000000000..6e492dba96bf
--- /dev/null
+++ b/include/trace/trace_custom_events.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This is similar to the trace_events.h file, but is to only
+ * create custom trace events to be attached to existing tracepoints.
+ * Where as the TRACE_EVENT() macro (from trace_events.h) will create
+ * both the trace event and the tracepoint it will attach the event to,
+ * TRACE_CUSTOM_EVENT() is to create only a custom version of an existing
+ * trace event (created by TRACE_EVENT() or DEFINE_EVENT()), and will
+ * be placed in the "custom" system.
+ */
+
+#include <linux/trace_events.h>
+
+/* All custom events are placed in the custom group */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM custom
+
+#ifndef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR TRACE_SYSTEM
+#endif
+
+/* The init stage creates the system string and enum mappings */
+
+#include "stages/init.h"
+
+#undef TRACE_CUSTOM_EVENT
+#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print) \
+ DECLARE_CUSTOM_EVENT_CLASS(name, \
+ PARAMS(proto), \
+ PARAMS(args), \
+ PARAMS(tstruct), \
+ PARAMS(assign), \
+ PARAMS(print)); \
+ DEFINE_CUSTOM_EVENT(name, name, PARAMS(proto), PARAMS(args));
+
+/* Stage 1 creates the structure of the recorded event layout */
+
+#include "stages/stage1_struct_define.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
+ struct trace_custom_event_raw_##name { \
+ struct trace_entry ent; \
+ tstruct \
+ char __data[]; \
+ }; \
+ \
+ static struct trace_event_class custom_event_class_##name;
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args) \
+ static struct trace_event_call __used \
+ __attribute__((__aligned__(4))) custom_event_##name
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 2 creates the custom class */
+
+#include "stages/stage2_data_offsets.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ struct trace_custom_event_data_offsets_##call { \
+ tstruct; \
+ };
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 3 create the way to print the custom event */
+
+#include "stages/stage3_trace_output.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static notrace enum print_line_t \
+trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *trace_event) \
+{ \
+ struct trace_seq *s = &iter->seq; \
+ struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
+ struct trace_custom_event_raw_##call *field; \
+ int ret; \
+ \
+ field = (typeof(field))iter->ent; \
+ \
+ ret = trace_raw_output_prep(iter, trace_event); \
+ if (ret != TRACE_TYPE_HANDLED) \
+ return ret; \
+ \
+ trace_event_printf(iter, print); \
+ \
+ return trace_handle_return(s); \
+} \
+static struct trace_event_functions trace_custom_event_type_funcs_##call = { \
+ .trace = trace_custom_raw_output_##call, \
+};
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 4 creates the offset layout for the fields */
+
+#include "stages/stage4_event_fields.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print) \
+static struct trace_event_fields trace_custom_event_fields_##call[] = { \
+ tstruct \
+ {} };
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 5 creates the helper function for dynamic fields */
+
+#include "stages/stage5_get_offsets.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static inline notrace int trace_custom_event_get_offsets_##call( \
+ struct trace_custom_event_data_offsets_##call *__data_offsets, proto) \
+{ \
+ int __data_size = 0; \
+ int __maybe_unused __item_length; \
+ struct trace_custom_event_raw_##call __maybe_unused *entry; \
+ \
+ tstruct; \
+ \
+ return __data_size; \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 6 creates the probe function that records the event */
+
+#include "stages/stage6_event_callback.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ \
+static notrace void \
+trace_custom_event_raw_event_##call(void *__data, proto) \
+{ \
+ struct trace_event_file *trace_file = __data; \
+ struct trace_custom_event_data_offsets_##call __maybe_unused __data_offsets; \
+ struct trace_event_buffer fbuffer; \
+ struct trace_custom_event_raw_##call *entry; \
+ int __data_size; \
+ \
+ if (trace_trigger_soft_disabled(trace_file)) \
+ return; \
+ \
+ __data_size = trace_custom_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ trace_event_buffer_commit(&fbuffer); \
+}
+/*
+ * The ftrace_test_custom_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \
+static inline void ftrace_test_custom_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(trace_custom_event_raw_event_##template); \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 7 creates the actual class and event structure for the custom event */
+
+#include "stages/stage7_class_define.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static char custom_print_fmt_##call[] = print; \
+static struct trace_event_class __used __refdata custom_event_class_##call = { \
+ .system = TRACE_SYSTEM_STRING, \
+ .fields_array = trace_custom_event_fields_##call, \
+ .fields = LIST_HEAD_INIT(custom_event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = trace_custom_event_raw_event_##call, \
+ .reg = trace_event_reg, \
+};
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \
+ \
+static struct trace_event_call __used custom_event_##call = { \
+ .name = #call, \
+ .class = &custom_event_class_##template, \
+ .event.funcs = &trace_custom_event_type_funcs_##template, \
+ .print_fmt = custom_print_fmt_##template, \
+ .flags = TRACE_EVENT_FL_CUSTOM, \
+}; \
+static inline int trace_custom_event_##call##_update(struct tracepoint *tp) \
+{ \
+ if (tp->name && strcmp(tp->name, #call) == 0) { \
+ custom_event_##call.tp = tp; \
+ custom_event_##call.flags = TRACE_EVENT_FL_TRACEPOINT; \
+ return 1; \
+ } \
+ return 0; \
+} \
+static struct trace_event_call __used \
+__section("_ftrace_events") *__custom_event_##call = &custom_event_##call
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 1bc3e7bba9a4..c2f9cabf154d 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -24,42 +24,7 @@
#define TRACE_SYSTEM_VAR TRACE_SYSTEM
#endif
-#define __app__(x, y) str__##x##y
-#define __app(x, y) __app__(x, y)
-
-#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
-
-#define TRACE_MAKE_SYSTEM_STR() \
- static const char TRACE_SYSTEM_STRING[] = \
- __stringify(TRACE_SYSTEM)
-
-TRACE_MAKE_SYSTEM_STR();
-
-#undef TRACE_DEFINE_ENUM
-#define TRACE_DEFINE_ENUM(a) \
- static struct trace_eval_map __used __initdata \
- __##TRACE_SYSTEM##_##a = \
- { \
- .system = TRACE_SYSTEM_STRING, \
- .eval_string = #a, \
- .eval_value = a \
- }; \
- static struct trace_eval_map __used \
- __attribute__((section("_ftrace_eval_map"))) \
- *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
-
-#undef TRACE_DEFINE_SIZEOF
-#define TRACE_DEFINE_SIZEOF(a) \
- static struct trace_eval_map __used __initdata \
- __##TRACE_SYSTEM##_##a = \
- { \
- .system = TRACE_SYSTEM_STRING, \
- .eval_string = "sizeof(" #a ")", \
- .eval_value = sizeof(a) \
- }; \
- static struct trace_eval_map __used \
- __attribute__((section("_ftrace_eval_map"))) \
- *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
+#include "stages/init.h"
/*
* DECLARE_EVENT_CLASS can be used to add a generic function
@@ -80,40 +45,14 @@ TRACE_MAKE_SYSTEM_STR();
PARAMS(print)); \
DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
-
-#undef __field
-#define __field(type, item) type item;
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type) type item;
-
-#undef __field_struct
-#define __field_struct(type, item) type item;
-
-#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type) type item;
-
-#undef __array
-#define __array(type, item, len) type item[len];
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) u32 __data_loc_##item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
-
-#undef TP_STRUCT__entry
-#define TP_STRUCT__entry(args...) args
+#include "stages/stage1_struct_define.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
struct trace_event_raw_##name { \
struct trace_entry ent; \
tstruct \
- char __data[0]; \
+ char __data[]; \
}; \
\
static struct trace_event_class event_class_##name;
@@ -170,35 +109,7 @@ TRACE_MAKE_SYSTEM_STR();
* The size of an array is also encoded, in the higher 16 bits of <item>.
*/
-#undef TRACE_DEFINE_ENUM
-#define TRACE_DEFINE_ENUM(a)
-
-#undef TRACE_DEFINE_SIZEOF
-#define TRACE_DEFINE_SIZEOF(a)
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) u32 item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+#include "stages/stage2_data_offsets.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -231,9 +142,11 @@ TRACE_MAKE_SYSTEM_STR();
* {
* struct trace_seq *s = &iter->seq;
* struct trace_event_raw_<call> *field; <-- defined in stage 1
- * struct trace_entry *entry;
* struct trace_seq *p = &iter->tmp_seq;
- * int ret;
+ *
+ * -------(for event)-------
+ *
+ * struct trace_entry *entry;
*
* entry = iter->ent;
*
@@ -245,107 +158,30 @@ TRACE_MAKE_SYSTEM_STR();
* field = (typeof(field))entry;
*
* trace_seq_init(p);
- * ret = trace_seq_printf(s, "%s: ", <call>);
- * if (ret)
- * ret = trace_seq_printf(s, <TP_printk> "\n");
- * if (!ret)
- * return TRACE_TYPE_PARTIAL_LINE;
+ * return trace_output_call(iter, <call>, <TP_printk> "\n");
*
- * return TRACE_TYPE_HANDLED;
- * }
+ * ------(or, for event class)------
+ *
+ * int ret;
+ *
+ * field = (typeof(field))iter->ent;
+ *
+ * ret = trace_raw_output_prep(iter, trace_event);
+ * if (ret != TRACE_TYPE_HANDLED)
+ * return ret;
+ *
+ * trace_event_printf(iter, <TP_printk> "\n");
+ *
+ * return trace_handle_return(s);
+ * -------
+ * }
*
* This is the method used to print the raw event to the trace
* output format. Note, this is not needed if the data is read
* in binary.
*/
-#undef __entry
-#define __entry field
-
-#undef TP_printk
-#define TP_printk(fmt, args...) fmt "\n", args
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) \
- ({ \
- void *__bitmask = __get_dynamic_array(field); \
- unsigned int __bitmask_size; \
- __bitmask_size = __get_dynamic_array_len(field); \
- trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
- })
-
-#undef __print_flags
-#define __print_flags(flag, delim, flag_array...) \
- ({ \
- static const struct trace_print_flags __flags[] = \
- { flag_array, { -1, NULL }}; \
- trace_print_flags_seq(p, delim, flag, __flags); \
- })
-
-#undef __print_symbolic
-#define __print_symbolic(value, symbol_array...) \
- ({ \
- static const struct trace_print_flags symbols[] = \
- { symbol_array, { -1, NULL }}; \
- trace_print_symbols_seq(p, value, symbols); \
- })
-
-#undef __print_flags_u64
-#undef __print_symbolic_u64
-#if BITS_PER_LONG == 32
-#define __print_flags_u64(flag, delim, flag_array...) \
- ({ \
- static const struct trace_print_flags_u64 __flags[] = \
- { flag_array, { -1, NULL } }; \
- trace_print_flags_seq_u64(p, delim, flag, __flags); \
- })
-
-#define __print_symbolic_u64(value, symbol_array...) \
- ({ \
- static const struct trace_print_flags_u64 symbols[] = \
- { symbol_array, { -1, NULL } }; \
- trace_print_symbols_seq_u64(p, value, symbols); \
- })
-#else
-#define __print_flags_u64(flag, delim, flag_array...) \
- __print_flags(flag, delim, flag_array)
-
-#define __print_symbolic_u64(value, symbol_array...) \
- __print_symbolic(value, symbol_array)
-#endif
-
-#undef __print_hex
-#define __print_hex(buf, buf_len) \
- trace_print_hex_seq(p, buf, buf_len, false)
-
-#undef __print_hex_str
-#define __print_hex_str(buf, buf_len) \
- trace_print_hex_seq(p, buf, buf_len, true)
-
-#undef __print_array
-#define __print_array(array, count, el_size) \
- ({ \
- BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
- el_size != 4 && el_size != 8); \
- trace_print_array_seq(p, array, count, el_size); \
- })
-
-#undef __print_hex_dump
-#define __print_hex_dump(prefix_str, prefix_type, \
- rowsize, groupsize, buf, len, ascii) \
- trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
- rowsize, groupsize, buf, len, ascii)
+#include "stages/stage3_trace_output.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -364,7 +200,7 @@ trace_raw_output_##call(struct trace_iterator *iter, int flags, \
if (ret != TRACE_TYPE_HANDLED) \
return ret; \
\
- trace_seq_printf(s, print); \
+ trace_event_printf(iter, print); \
\
return trace_handle_return(s); \
} \
@@ -400,41 +236,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#undef __field_ext
-#define __field_ext(_type, _item, _filter_type) { \
- .type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
- .is_signed = is_signed_type(_type), .filter_type = _filter_type },
-
-#undef __field_struct_ext
-#define __field_struct_ext(_type, _item, _filter_type) { \
- .type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
- 0, .filter_type = _filter_type },
-
-#undef __field
-#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
-
-#undef __field_struct
-#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
-
-#undef __array
-#define __array(_type, _item, _len) { \
- .type = #_type"["__stringify(_len)"]", .name = #_item, \
- .size = sizeof(_type[_len]), .align = __alignof__(_type), \
- .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
-
-#undef __dynamic_array
-#define __dynamic_array(_type, _item, _len) { \
- .type = "__data_loc " #_type "[]", .name = #_item, \
- .size = 4, .align = 4, \
- .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+#include "stages/stage4_event_fields.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
@@ -447,62 +249,7 @@ static struct trace_event_fields trace_event_fields_##call[] = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-/*
- * remember the offset of each array from the beginning of the event.
- */
-
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __item_length = (len) * sizeof(type); \
- __data_offsets->item = __data_size + \
- offsetof(typeof(*entry), __data); \
- __data_offsets->item |= __item_length << 16; \
- __data_size += __item_length;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, \
- strlen((src) ? (const char *)(src) : "(null)") + 1)
-
-/*
- * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
- * num_possible_cpus().
- */
-#define __bitmask_size_in_bytes_raw(nr_bits) \
- (((nr_bits) + 7) / 8)
-
-#define __bitmask_size_in_longs(nr_bits) \
- ((__bitmask_size_in_bytes_raw(nr_bits) + \
- ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
-
-/*
- * __bitmask_size_in_bytes is the number of bytes needed to hold
- * num_possible_cpus() padded out to the nearest long. This is what
- * is saved in the buffer, just to be consistent.
- */
-#define __bitmask_size_in_bytes(nr_bits) \
- (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
- __bitmask_size_in_longs(nr_bits))
+#include "stages/stage5_get_offsets.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -607,7 +354,7 @@ static inline notrace int trace_event_get_offsets_##call( \
* // its only safe to use pointers when doing linker tricks to
* // create an array.
* static struct trace_event_call __used
- * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
+ * __section("_ftrace_events") *__event_<call> = &event_<call>;
*
*/
@@ -625,47 +372,7 @@ static inline notrace int trace_event_get_offsets_##call( \
#define _TRACE_PERF_INIT(call)
#endif /* CONFIG_PERF_EVENTS */
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __entry->__data_loc_##item = __data_offsets.item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __assign_str
-#define __assign_str(dst, src) \
- strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __assign_bitmask
-#define __assign_bitmask(dst, src, nr_bits) \
- memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
-
-#undef TP_fast_assign
-#define TP_fast_assign(args...) args
-
-#undef __perf_count
-#define __perf_count(c) (c)
-
-#undef __perf_task
-#define __perf_task(t) (t)
+#include "stages/stage6_event_callback.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -711,22 +418,7 @@ static inline void ftrace_test_probe_##call(void) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#undef __entry
-#define __entry REC
-
-#undef __print_flags
-#undef __print_symbolic
-#undef __print_hex
-#undef __print_hex_str
-#undef __get_dynamic_array
-#undef __get_dynamic_array_len
-#undef __get_str
-#undef __get_bitmask
-#undef __print_array
-#undef __print_hex_dump
-
-#undef TP_printk
-#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+#include "stages/stage7_class_define.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -755,7 +447,7 @@ static struct trace_event_call __used event_##call = { \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+__section("_ftrace_events") *__event_##call = &event_##call
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
@@ -772,6 +464,6 @@ static struct trace_event_call __used event_##call = { \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+__section("_ftrace_events") *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
diff --git a/include/uapi/asm-generic/bitsperlong.h b/include/uapi/asm-generic/bitsperlong.h
index 693d9a40eb7b..fadb3f857f28 100644
--- a/include/uapi/asm-generic/bitsperlong.h
+++ b/include/uapi/asm-generic/bitsperlong.h
@@ -2,6 +2,17 @@
#ifndef _UAPI__ASM_GENERIC_BITS_PER_LONG
#define _UAPI__ASM_GENERIC_BITS_PER_LONG
+#ifndef __BITS_PER_LONG
+/*
+ * In order to keep safe and avoid regression, only unify uapi
+ * bitsperlong.h for some archs which are using newer toolchains
+ * that have the definitions of __CHAR_BIT__ and __SIZEOF_LONG__.
+ * See the following link for more info:
+ * https://lore.kernel.org/linux-arch/b9624545-2c80-49a1-ac3c-39264a591f7b@app.fastmail.com/
+ */
+#if defined(__CHAR_BIT__) && defined(__SIZEOF_LONG__)
+#define __BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
+#else
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
@@ -9,8 +20,12 @@
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
-#ifndef __BITS_PER_LONG
#define __BITS_PER_LONG 32
#endif
+#endif
+
+#ifndef __BITS_PER_LONG_LONG
+#define __BITS_PER_LONG_LONG 64
+#endif
#endif /* _UAPI__ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 9dc0bf0c5a6e..80f37a0d40d7 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -91,7 +91,6 @@
/* a horrid kludge trying to make sure that this will fail on old kernels */
#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)
#ifndef O_NDELAY
#define O_NDELAY O_NONBLOCK
@@ -116,13 +115,13 @@
#define F_GETSIG 11 /* for sockets. */
#endif
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
#ifndef F_GETLK64
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif
-#endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
#ifndef F_SETOWN_EX
#define F_SETOWN_EX 15
@@ -181,6 +180,10 @@ struct f_owner_ex {
blocking */
#define LOCK_UN 8 /* remove lock */
+/*
+ * LOCK_MAND support has been removed from the kernel. We leave the symbols
+ * here to not break legacy builds, but these should not be used in new code.
+ */
#define LOCK_MAND 32 /* This is a mandatory flock ... */
#define LOCK_READ 64 /* which allows concurrent read operations */
#define LOCK_WRITE 128 /* which allows concurrent write operations */
@@ -189,24 +192,19 @@ struct f_owner_ex {
#define F_LINUX_SPECIFIC_BASE 1024
#ifndef HAVE_ARCH_STRUCT_FLOCK
-#ifndef __ARCH_FLOCK_PAD
-#define __ARCH_FLOCK_PAD
-#endif
-
struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;
- __ARCH_FLOCK_PAD
-};
+#ifdef __ARCH_FLOCK_EXTRA_SYSID
+ __ARCH_FLOCK_EXTRA_SYSID
#endif
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK64
-#ifndef __ARCH_FLOCK64_PAD
-#define __ARCH_FLOCK64_PAD
+#ifdef __ARCH_FLOCK_PAD
+ __ARCH_FLOCK_PAD
#endif
+};
struct flock64 {
short l_type;
@@ -214,8 +212,10 @@ struct flock64 {
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;
+#ifdef __ARCH_FLOCK64_PAD
__ARCH_FLOCK64_PAD
-};
#endif
+};
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h
index b0f8e87235bd..de687009bfe5 100644
--- a/include/uapi/asm-generic/hugetlb_encode.h
+++ b/include/uapi/asm-generic/hugetlb_encode.h
@@ -20,17 +20,18 @@
#define HUGETLB_FLAG_ENCODE_SHIFT 26
#define HUGETLB_FLAG_ENCODE_MASK 0x3f
-#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16KB (14U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_64KB (16U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512KB (19U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1MB (20U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2MB (21U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_8MB (23U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16MB (24U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_256MB (28U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1GB (30U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2GB (31U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16GB (34U << HUGETLB_FLAG_ENCODE_SHIFT)
#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index f94f65d429be..6ce1f1ceb432 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -72,6 +72,13 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
+#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
index 41b509f410bf..f9c520ce4bf4 100644
--- a/include/uapi/asm-generic/poll.h
+++ b/include/uapi/asm-generic/poll.h
@@ -29,7 +29,7 @@
#define POLLRDHUP 0x2000
#endif
-#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
+#define POLLFREE (__force __poll_t)0x4000
#define POLL_BUSY_LOOP (__force __poll_t)0x8000
diff --git a/include/uapi/asm-generic/sembuf.h b/include/uapi/asm-generic/sembuf.h
index 0e709bd3d730..f54e48fc91ae 100644
--- a/include/uapi/asm-generic/sembuf.h
+++ b/include/uapi/asm-generic/sembuf.h
@@ -6,9 +6,9 @@
#include <asm/ipcbuf.h>
/*
- * The semid64_ds structure for x86 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
+ * The semid64_ds structure for most architectures (though it came from x86_32
+ * originally). Note extra padding because this structure is passed back and
+ * forth between kernel and user space.
*
* semid64_ds was originally meant to be architecture specific, but
* everyone just ended up making identical copies without specific
diff --git a/include/uapi/asm-generic/shmbuf.h b/include/uapi/asm-generic/shmbuf.h
index 2bab955e0fed..2979b6dd2c56 100644
--- a/include/uapi/asm-generic/shmbuf.h
+++ b/include/uapi/asm-generic/shmbuf.h
@@ -3,6 +3,8 @@
#define __ASM_GENERIC_SHMBUF_H
#include <asm/bitsperlong.h>
+#include <asm/ipcbuf.h>
+#include <asm/posix_types.h>
/*
* The shmid64_ds structure for x86 architecture.
@@ -24,7 +26,7 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_size_t shm_segsz; /* size of segment (bytes) */
#if __BITS_PER_LONG == 64
long shm_atime; /* last attach time */
long shm_dtime; /* last detach time */
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index cb3d6c267181..b7bc545ec3b2 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -29,6 +29,11 @@ typedef union sigval {
#define __ARCH_SI_ATTRIBUTES
#endif
+/*
+ * Be careful when extending this union. On 32bit siginfo_t is 32bit
+ * aligned. Which means that a 64bit field or any other field that
+ * would increase the alignment of siginfo_t will break the ABI.
+ */
union __sifields {
/* kill() */
struct {
@@ -63,18 +68,12 @@ union __sifields {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void __user *_addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
-#ifdef __ia64__
- int _imm; /* immediate value for "break" */
- unsigned int _flags; /* see ia64 si_flags */
- unsigned long _isr; /* isr */
-#endif
#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
sizeof(short) : __alignof__(void *))
union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
/*
* used when si_code=BUS_MCEERR_AR or
* used when si_code=BUS_MCEERR_AO
@@ -91,6 +90,12 @@ union __sifields {
char _dummy_pkey[__ADDR_BND_PKEY_PAD];
__u32 _pkey;
} _addr_pkey;
+ /* used when si_code=TRAP_PERF */
+ struct {
+ unsigned long _data;
+ __u32 _type;
+ __u32 _flags;
+ } _perf;
};
} _sigfault;
@@ -148,13 +153,14 @@ typedef struct siginfo {
#define si_int _sifields._rt._sigval.sival_int
#define si_ptr _sifields._rt._sigval.sival_ptr
#define si_addr _sifields._sigfault._addr
-#ifdef __ARCH_SI_TRAPNO
#define si_trapno _sifields._sigfault._trapno
-#endif
#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_lower _sifields._sigfault._addr_bnd._lower
#define si_upper _sifields._sigfault._addr_bnd._upper
#define si_pkey _sifields._sigfault._addr_pkey._pkey
+#define si_perf_data _sifields._sigfault._perf._data
+#define si_perf_type _sifields._sigfault._perf._type
+#define si_perf_flags _sifields._sigfault._perf._flags
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
#define si_call_addr _sifields._sigsys._call_addr
@@ -229,7 +235,10 @@ typedef struct siginfo {
#define SEGV_ACCADI 5 /* ADI not enabled for mapped object */
#define SEGV_ADIDERR 6 /* Disrupting MCD error */
#define SEGV_ADIPERR 7 /* Precise MCD exception */
-#define NSIGSEGV 7
+#define SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
+#define SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
+#define SEGV_CPERR 10 /* Control protection fault */
+#define NSIGSEGV 10
/*
* SIGBUS si_codes
@@ -251,7 +260,8 @@ typedef struct siginfo {
#define TRAP_BRANCH 3 /* process taken branch trap */
#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */
#define TRAP_UNK 5 /* undiagnosed trap */
-#define NSIGTRAP 5
+#define TRAP_PERF 6 /* perf event with sigtrap=1 */
+#define NSIGTRAP 6
/*
* There is an additional set of SIGTRAP si_codes used by ptrace
@@ -259,6 +269,11 @@ typedef struct siginfo {
*/
/*
+ * Flags for si_perf_flags if SIGTRAP si_code is TRAP_PERF.
+ */
+#define TRAP_PERF_FLAG_ASYNC (1u << 0)
+
+/*
* SIGCHLD si_codes
*/
#define CLD_EXITED 1 /* child has exited */
@@ -284,7 +299,8 @@ typedef struct siginfo {
* SIGSYS si_codes
*/
#define SYS_SECCOMP 1 /* seccomp triggered */
-#define NSIGSYS 1
+#define SYS_USER_DISPATCH 2 /* syscall user dispatch triggered */
+#define NSIGSYS 2
/*
* SIGEMT si_codes
diff --git a/include/uapi/asm-generic/signal-defs.h b/include/uapi/asm-generic/signal-defs.h
index e9304c95ceea..7572f2f46ee8 100644
--- a/include/uapi/asm-generic/signal-defs.h
+++ b/include/uapi/asm-generic/signal-defs.h
@@ -4,6 +4,70 @@
#include <linux/compiler.h>
+/*
+ * SA_FLAGS values:
+ *
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_SIGINFO delivers the signal with SIGINFO structs.
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_UNSUPPORTED is a flag bit that will never be supported. Kernels from
+ * before the introduction of SA_UNSUPPORTED did not clear unknown bits from
+ * sa_flags when read using the oldact argument to sigaction and rt_sigaction,
+ * so this bit allows flag bit support to be detected from userspace while
+ * allowing an old kernel to be distinguished from a kernel that supports every
+ * flag bit.
+ * SA_EXPOSE_TAGBITS exposes an architecture-defined set of tag bits in
+ * siginfo.si_addr.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#ifndef SA_NOCLDSTOP
+#define SA_NOCLDSTOP 0x00000001
+#endif
+#ifndef SA_NOCLDWAIT
+#define SA_NOCLDWAIT 0x00000002
+#endif
+#ifndef SA_SIGINFO
+#define SA_SIGINFO 0x00000004
+#endif
+/* 0x00000008 used on alpha, mips, parisc */
+/* 0x00000010 used on alpha, parisc */
+/* 0x00000020 used on alpha, parisc, sparc */
+/* 0x00000040 used on alpha, parisc */
+/* 0x00000080 used on parisc */
+/* 0x00000100 used on sparc */
+/* 0x00000200 used on sparc */
+#define SA_UNSUPPORTED 0x00000400
+#define SA_EXPOSE_TAGBITS 0x00000800
+/* 0x00010000 used on mips */
+/* 0x00800000 used for internal SA_IMMUTABLE */
+/* 0x01000000 used on x86 */
+/* 0x02000000 used on x86 */
+/*
+ * New architectures should not define the obsolete
+ * SA_RESTORER 0x04000000
+ */
+#ifndef SA_ONSTACK
+#define SA_ONSTACK 0x08000000
+#endif
+#ifndef SA_RESTART
+#define SA_RESTART 0x10000000
+#endif
+#ifndef SA_NODEFER
+#define SA_NODEFER 0x40000000
+#endif
+#ifndef SA_RESETHAND
+#define SA_RESETHAND 0x80000000
+#endif
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
#ifndef SIG_BLOCK
#define SIG_BLOCK 0 /* for blocking signals */
#endif
diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h
index 5c716a952cbe..0eb69dc8e572 100644
--- a/include/uapi/asm-generic/signal.h
+++ b/include/uapi/asm-generic/signal.h
@@ -52,35 +52,6 @@
#define SIGRTMAX _NSIG
#endif
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-/*
- * New architectures should not define the obsolete
- * SA_RESTORER 0x04000000
- */
-
#if !defined MINSIGSTKSZ || !defined SIGSTKSZ
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
@@ -114,7 +85,7 @@ struct sigaction {
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
- size_t ss_size;
+ __kernel_size_t ss_size;
} stack_t;
#endif /* __ASSEMBLY__ */
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 77f7c1638eb1..8ce8a39a1e5f 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -119,6 +119,22 @@
#define SO_DETACH_REUSEPORT_BPF 68
+#define SO_PREFER_BUSY_POLL 69
+#define SO_BUSY_POLL_BUDGET 70
+
+#define SO_NETNS_COOKIE 71
+
+#define SO_BUF_LOCK 72
+
+#define SO_RESERVE_MEM 73
+
+#define SO_TXREHASH 74
+
+#define SO_RCVMARK 75
+
+#define SO_PASSPIDFD 76
+#define SO_PEERPIDFD 77
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/asm-generic/termbits-common.h b/include/uapi/asm-generic/termbits-common.h
new file mode 100644
index 000000000000..4a6a79f28b21
--- /dev/null
+++ b/include/uapi/asm-generic/termbits-common.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_GENERIC_TERMBITS_COMMON_H
+#define __ASM_GENERIC_TERMBITS_COMMON_H
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+
+/* c_iflag bits */
+#define IGNBRK 0x001 /* Ignore break condition */
+#define BRKINT 0x002 /* Signal interrupt on break */
+#define IGNPAR 0x004 /* Ignore characters with parity errors */
+#define PARMRK 0x008 /* Mark parity and framing errors */
+#define INPCK 0x010 /* Enable input parity check */
+#define ISTRIP 0x020 /* Strip 8th bit off characters */
+#define INLCR 0x040 /* Map NL to CR on input */
+#define IGNCR 0x080 /* Ignore CR */
+#define ICRNL 0x100 /* Map CR to NL on input */
+#define IXANY 0x800 /* Any character will restart after stop */
+
+/* c_oflag bits */
+#define OPOST 0x01 /* Perform output processing */
+#define OCRNL 0x08
+#define ONOCR 0x10
+#define ONLRET 0x20
+#define OFILL 0x40
+#define OFDEL 0x80
+
+/* c_cflag bit meaning */
+/* Common CBAUD rates */
+#define B0 0x00000000 /* hang up */
+#define B50 0x00000001
+#define B75 0x00000002
+#define B110 0x00000003
+#define B134 0x00000004
+#define B150 0x00000005
+#define B200 0x00000006
+#define B300 0x00000007
+#define B600 0x00000008
+#define B1200 0x00000009
+#define B1800 0x0000000a
+#define B2400 0x0000000b
+#define B4800 0x0000000c
+#define B9600 0x0000000d
+#define B19200 0x0000000e
+#define B38400 0x0000000f
+#define EXTA B19200
+#define EXTB B38400
+
+#define ADDRB 0x20000000 /* address bit */
+#define CMSPAR 0x40000000 /* mark or space (stick) parity */
+#define CRTSCTS 0x80000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* tcflow() ACTION argument and TCXONC use these */
+#define TCOOFF 0 /* Suspend output */
+#define TCOON 1 /* Restart suspended output */
+#define TCIOFF 2 /* Send a STOP character */
+#define TCION 3 /* Send a START character */
+
+/* tcflush() QUEUE_SELECTOR argument and TCFLSH use these */
+#define TCIFLUSH 0 /* Discard data received but not yet read */
+#define TCOFLUSH 1 /* Discard data written but not yet sent */
+#define TCIOFLUSH 2 /* Discard all pending data */
+
+#endif /* __ASM_GENERIC_TERMBITS_COMMON_H */
diff --git a/include/uapi/asm-generic/termbits.h b/include/uapi/asm-generic/termbits.h
index 2fbaf9ae89dd..890ef29053e2 100644
--- a/include/uapi/asm-generic/termbits.h
+++ b/include/uapi/asm-generic/termbits.h
@@ -2,10 +2,8 @@
#ifndef __ASM_GENERIC_TERMBITS_H
#define __ASM_GENERIC_TERMBITS_H
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
#define NCCS 19
@@ -41,156 +39,107 @@ struct ktermios {
};
/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
+#define IUCLC 0x0200
+#define IXON 0x0400
+#define IXOFF 0x1000
+#define IMAXBEL 0x2000
+#define IUTF8 0x4000
/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
+#define OLCUC 0x00002
+#define ONLCR 0x00004
+#define NLDLY 0x00100
+#define NL0 0x00000
+#define NL1 0x00100
+#define CRDLY 0x00600
+#define CR0 0x00000
+#define CR1 0x00200
+#define CR2 0x00400
+#define CR3 0x00600
+#define TABDLY 0x01800
+#define TAB0 0x00000
+#define TAB1 0x00800
+#define TAB2 0x01000
+#define TAB3 0x01800
+#define XTABS 0x01800
+#define BSDLY 0x02000
+#define BS0 0x00000
+#define BS1 0x02000
+#define VTDLY 0x04000
+#define VT0 0x00000
+#define VT1 0x04000
+#define FFDLY 0x08000
+#define FF0 0x00000
+#define FF1 0x08000
/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+#define CBAUD 0x0000100f
+#define CSIZE 0x00000030
+#define CS5 0x00000000
+#define CS6 0x00000010
+#define CS7 0x00000020
+#define CS8 0x00000030
+#define CSTOPB 0x00000040
+#define CREAD 0x00000080
+#define PARENB 0x00000100
+#define PARODD 0x00000200
+#define HUPCL 0x00000400
+#define CLOCAL 0x00000800
+#define CBAUDEX 0x00001000
+#define BOTHER 0x00001000
+#define B57600 0x00001001
+#define B115200 0x00001002
+#define B230400 0x00001003
+#define B460800 0x00001004
+#define B500000 0x00001005
+#define B576000 0x00001006
+#define B921600 0x00001007
+#define B1000000 0x00001008
+#define B1152000 0x00001009
+#define B1500000 0x0000100a
+#define B2000000 0x0000100b
+#define B2500000 0x0000100c
+#define B3000000 0x0000100d
+#define B3500000 0x0000100e
+#define B4000000 0x0000100f
+#define CIBAUD 0x100f0000 /* input baud rate */
/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC 0200000
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
+#define ISIG 0x00001
+#define ICANON 0x00002
+#define XCASE 0x00004
+#define ECHO 0x00008
+#define ECHOE 0x00010
+#define ECHOK 0x00020
+#define ECHONL 0x00040
+#define NOFLSH 0x00080
+#define TOSTOP 0x00100
+#define ECHOCTL 0x00200
+#define ECHOPRT 0x00400
+#define ECHOKE 0x00800
+#define FLUSHO 0x01000
+#define PENDIN 0x04000
+#define IEXTEN 0x08000
+#define EXTPROC 0x10000
/* tcsetattr uses these */
#define TCSANOW 0
diff --git a/include/uapi/asm-generic/types.h b/include/uapi/asm-generic/types.h
index dfaa50d99d8f..7ad4dd01b8bf 100644
--- a/include/uapi/asm-generic/types.h
+++ b/include/uapi/asm-generic/types.h
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_GENERIC_TYPES_H
-#define _ASM_GENERIC_TYPES_H
+#ifndef _UAPI_ASM_GENERIC_TYPES_H
+#define _UAPI_ASM_GENERIC_TYPES_H
/*
* int-ll64 is used everywhere now.
*/
#include <asm-generic/int-ll64.h>
-#endif /* _ASM_GENERIC_TYPES_H */
+#endif /* _UAPI_ASM_GENERIC_TYPES_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 995b36c2ea7d..75f00965ab15 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -38,12 +38,12 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
#endif
-/* fs/xattr.c */
#define __NR_setxattr 5
__SYSCALL(__NR_setxattr, sys_setxattr)
#define __NR_lsetxattr 6
@@ -68,58 +68,38 @@ __SYSCALL(__NR_removexattr, sys_removexattr)
__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
#define __NR_fremovexattr 16
__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-
-/* fs/dcache.c */
#define __NR_getcwd 17
__SYSCALL(__NR_getcwd, sys_getcwd)
-
-/* fs/cookies.c */
#define __NR_lookup_dcookie 18
-__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
-
-/* fs/eventfd.c */
+__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall)
#define __NR_eventfd2 19
__SYSCALL(__NR_eventfd2, sys_eventfd2)
-
-/* fs/eventpoll.c */
#define __NR_epoll_create1 20
__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
#define __NR_epoll_ctl 21
__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
#define __NR_epoll_pwait 22
__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
-
-/* fs/fcntl.c */
#define __NR_dup 23
__SYSCALL(__NR_dup, sys_dup)
#define __NR_dup3 24
__SYSCALL(__NR_dup3, sys_dup3)
#define __NR3264_fcntl 25
__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
-
-/* fs/inotify_user.c */
#define __NR_inotify_init1 26
__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
#define __NR_inotify_add_watch 27
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 28
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-
-/* fs/ioctl.c */
#define __NR_ioctl 29
__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
-
-/* fs/ioprio.c */
#define __NR_ioprio_set 30
__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
#define __NR_ioprio_get 31
__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-
-/* fs/locks.c */
#define __NR_flock 32
__SYSCALL(__NR_flock, sys_flock)
-
-/* fs/namei.c */
#define __NR_mknodat 33
__SYSCALL(__NR_mknodat, sys_mknodat)
#define __NR_mkdirat 34
@@ -130,25 +110,21 @@ __SYSCALL(__NR_unlinkat, sys_unlinkat)
__SYSCALL(__NR_symlinkat, sys_symlinkat)
#define __NR_linkat 37
__SYSCALL(__NR_linkat, sys_linkat)
+
#ifdef __ARCH_WANT_RENAMEAT
/* renameat is superseded with flags by renameat2 */
#define __NR_renameat 38
__SYSCALL(__NR_renameat, sys_renameat)
#endif /* __ARCH_WANT_RENAMEAT */
-/* fs/namespace.c */
#define __NR_umount2 39
__SYSCALL(__NR_umount2, sys_umount)
#define __NR_mount 40
-__SC_COMP(__NR_mount, sys_mount, compat_sys_mount)
+__SYSCALL(__NR_mount, sys_mount)
#define __NR_pivot_root 41
__SYSCALL(__NR_pivot_root, sys_pivot_root)
-
-/* fs/nfsctl.c */
#define __NR_nfsservctl 42
__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
-
-/* fs/open.c */
#define __NR3264_statfs 43
__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
compat_sys_statfs64)
@@ -161,7 +137,6 @@ __SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
#define __NR3264_ftruncate 46
__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
compat_sys_ftruncate64)
-
#define __NR_fallocate 47
__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
#define __NR_faccessat 48
@@ -186,20 +161,12 @@ __SYSCALL(__NR_openat, sys_openat)
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
__SYSCALL(__NR_vhangup, sys_vhangup)
-
-/* fs/pipe.c */
#define __NR_pipe2 59
__SYSCALL(__NR_pipe2, sys_pipe2)
-
-/* fs/quota.c */
#define __NR_quotactl 60
__SYSCALL(__NR_quotactl, sys_quotactl)
-
-/* fs/readdir.c */
#define __NR_getdents64 61
__SYSCALL(__NR_getdents64, sys_getdents64)
-
-/* fs/read_write.c */
#define __NR3264_lseek 62
__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
#define __NR_read 63
@@ -207,9 +174,9 @@ __SYSCALL(__NR_read, sys_read)
#define __NR_write 64
__SYSCALL(__NR_write, sys_write)
#define __NR_readv 65
-__SC_COMP(__NR_readv, sys_readv, compat_sys_readv)
+__SC_COMP(__NR_readv, sys_readv, sys_readv)
#define __NR_writev 66
-__SC_COMP(__NR_writev, sys_writev, compat_sys_writev)
+__SC_COMP(__NR_writev, sys_writev, sys_writev)
#define __NR_pread64 67
__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64)
#define __NR_pwrite64 68
@@ -218,12 +185,9 @@ __SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
#define __NR_pwritev 70
__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
-
-/* fs/sendfile.c */
#define __NR3264_sendfile 71
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
-/* fs/select.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
@@ -231,21 +195,17 @@ __SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_psel
__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
#endif
-/* fs/signalfd.c */
#define __NR_signalfd4 74
__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
-
-/* fs/splice.c */
#define __NR_vmsplice 75
-__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice)
+__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_splice 76
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 77
__SYSCALL(__NR_tee, sys_tee)
-
-/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
+
#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
@@ -253,13 +213,13 @@ __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
#endif
-/* fs/sync.c */
#define __NR_sync 81
__SYSCALL(__NR_sync, sys_sync)
#define __NR_fsync 82
__SYSCALL(__NR_fsync, sys_fsync)
#define __NR_fdatasync 83
__SYSCALL(__NR_fdatasync, sys_fdatasync)
+
#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
#define __NR_sync_file_range2 84
__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
@@ -270,9 +230,9 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
compat_sys_sync_file_range)
#endif
-/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
@@ -282,45 +242,35 @@ __SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
sys_timerfd_gettime)
#endif
-/* fs/utimes.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
#endif
-/* kernel/acct.c */
#define __NR_acct 89
__SYSCALL(__NR_acct, sys_acct)
-
-/* kernel/capability.c */
#define __NR_capget 90
__SYSCALL(__NR_capget, sys_capget)
#define __NR_capset 91
__SYSCALL(__NR_capset, sys_capset)
-
-/* kernel/exec_domain.c */
#define __NR_personality 92
__SYSCALL(__NR_personality, sys_personality)
-
-/* kernel/exit.c */
#define __NR_exit 93
__SYSCALL(__NR_exit, sys_exit)
#define __NR_exit_group 94
__SYSCALL(__NR_exit_group, sys_exit_group)
#define __NR_waitid 95
__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
-
-/* kernel/fork.c */
#define __NR_set_tid_address 96
__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
#define __NR_unshare 97
__SYSCALL(__NR_unshare, sys_unshare)
-/* kernel/futex.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
#endif
+
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
@@ -328,43 +278,40 @@ __SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
-/* kernel/hrtimer.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
#endif
-/* kernel/itimer.c */
#define __NR_getitimer 102
__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
#define __NR_setitimer 103
__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
-
-/* kernel/kexec.c */
#define __NR_kexec_load 104
__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
-
-/* kernel/module.c */
#define __NR_init_module 105
__SYSCALL(__NR_init_module, sys_init_module)
#define __NR_delete_module 106
__SYSCALL(__NR_delete_module, sys_delete_module)
-
-/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
#endif
+
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
#endif
+
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
@@ -377,15 +324,10 @@ __SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
sys_clock_nanosleep)
#endif
-/* kernel/printk.c */
#define __NR_syslog 116
__SYSCALL(__NR_syslog, sys_syslog)
-
-/* kernel/ptrace.c */
#define __NR_ptrace 117
-__SYSCALL(__NR_ptrace, sys_ptrace)
-
-/* kernel/sched/core.c */
+__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
#define __NR_sched_setparam 118
__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
#define __NR_sched_setscheduler 119
@@ -406,13 +348,13 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
sys_sched_rr_get_interval)
#endif
-/* kernel/signal.c */
#define __NR_restart_syscall 128
__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
#define __NR_kill 129
@@ -431,18 +373,18 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
#endif
+
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigreturn 139
__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
-
-/* kernel/sys.c */
#define __NR_setpriority 140
__SYSCALL(__NR_setpriority, sys_setpriority)
#define __NR_getpriority 141
@@ -507,7 +449,6 @@ __SYSCALL(__NR_prctl, sys_prctl)
#define __NR_getcpu 168
__SYSCALL(__NR_getcpu, sys_getcpu)
-/* kernel/time.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
@@ -517,7 +458,6 @@ __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
#endif
-/* kernel/timer.c */
#define __NR_getpid 172
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_getppid 173
@@ -534,12 +474,11 @@ __SYSCALL(__NR_getegid, sys_getegid)
__SYSCALL(__NR_gettid, sys_gettid)
#define __NR_sysinfo 179
__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
-
-/* ipc/mqueue.c */
#define __NR_mq_open 180
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
@@ -547,12 +486,11 @@ __SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
sys_mq_timedreceive)
#endif
+
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
-
-/* ipc/msg.c */
#define __NR_msgget 186
__SYSCALL(__NR_msgget, sys_msgget)
#define __NR_msgctl 187
@@ -561,20 +499,18 @@ __SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
#define __NR_msgsnd 189
__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
-
-/* ipc/sem.c */
#define __NR_semget 190
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
#endif
+
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
-
-/* ipc/shm.c */
#define __NR_shmget 194
__SYSCALL(__NR_shmget, sys_shmget)
#define __NR_shmctl 195
@@ -583,8 +519,6 @@ __SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
#define __NR_shmdt 197
__SYSCALL(__NR_shmdt, sys_shmdt)
-
-/* net/socket.c */
#define __NR_socket 198
__SYSCALL(__NR_socket, sys_socket)
#define __NR_socketpair 199
@@ -615,40 +549,30 @@ __SYSCALL(__NR_shutdown, sys_shutdown)
__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
#define __NR_recvmsg 212
__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
-
-/* mm/filemap.c */
#define __NR_readahead 213
__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
-
-/* mm/nommu.c, also with MMU */
#define __NR_brk 214
__SYSCALL(__NR_brk, sys_brk)
#define __NR_munmap 215
__SYSCALL(__NR_munmap, sys_munmap)
#define __NR_mremap 216
__SYSCALL(__NR_mremap, sys_mremap)
-
-/* security/keys/keyctl.c */
#define __NR_add_key 217
__SYSCALL(__NR_add_key, sys_add_key)
#define __NR_request_key 218
__SYSCALL(__NR_request_key, sys_request_key)
#define __NR_keyctl 219
__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
-
-/* arch/example/kernel/sys_example.c */
#define __NR_clone 220
__SYSCALL(__NR_clone, sys_clone)
#define __NR_execve 221
__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
-
#define __NR3264_mmap 222
__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
-/* mm/fadvise.c */
#define __NR3264_fadvise64 223
__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
-/* mm/, CONFIG_MMU only */
+/* CONFIG_MMU only */
#ifndef __ARCH_NOMMU
#define __NR_swapon 224
__SYSCALL(__NR_swapon, sys_swapon)
@@ -673,15 +597,15 @@ __SYSCALL(__NR_madvise, sys_madvise)
#define __NR_remap_file_pages 234
__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
#define __NR_mbind 235
-__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
+__SYSCALL(__NR_mbind, sys_mbind)
#define __NR_get_mempolicy 236
-__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
+__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
#define __NR_set_mempolicy 237
-__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
+__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
#define __NR_migrate_pages 238
-__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
+__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
#define __NR_move_pages 239
-__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
+__SYSCALL(__NR_move_pages, sys_move_pages)
#endif
#define __NR_rt_tgsigqueueinfo 240
@@ -691,6 +615,7 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
@@ -706,6 +631,7 @@ __SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recv
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
#endif
+
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
@@ -716,10 +642,12 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
#endif
+
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
@@ -727,11 +655,9 @@ __SYSCALL(__NR_setns, sys_setns)
#define __NR_sendmmsg 269
__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
#define __NR_process_vm_readv 270
-__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
- compat_sys_process_vm_readv)
+__SYSCALL(__NR_process_vm_readv, sys_process_vm_readv)
#define __NR_process_vm_writev 271
-__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
- compat_sys_process_vm_writev)
+__SYSCALL(__NR_process_vm_writev, sys_process_vm_writev)
#define __NR_kcmp 272
__SYSCALL(__NR_kcmp, sys_kcmp)
#define __NR_finit_module 273
@@ -772,16 +698,20 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
#endif
+
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
-#if __BITS_PER_LONG == 32
+
+#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
#define __NR_clock_settime64 404
@@ -846,22 +776,74 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
+
#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
-
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
#define __NR_pidfd_getfd 438
__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
#define __NR_faccessat2 439
__SYSCALL(__NR_faccessat2, sys_faccessat2)
+#define __NR_process_madvise 440
+__SYSCALL(__NR_process_madvise, sys_process_madvise)
+#define __NR_epoll_pwait2 441
+__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
+#define __NR_mount_setattr 442
+__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
+#define __NR_quotactl_fd 443
+__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
+#define __NR_landlock_create_ruleset 444
+__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
+#define __NR_landlock_add_rule 445
+__SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
+#define __NR_landlock_restrict_self 446
+__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
+
+#ifdef __ARCH_WANT_MEMFD_SECRET
+#define __NR_memfd_secret 447
+__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
+#endif
+
+#define __NR_process_mrelease 448
+__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
+#define __NR_futex_waitv 449
+__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
+#define __NR_set_mempolicy_home_node 450
+__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
+#define __NR_cachestat 451
+__SYSCALL(__NR_cachestat, sys_cachestat)
+#define __NR_fchmodat2 452
+__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+#define __NR_map_shadow_stack 453
+__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
+#define __NR_futex_wake 454
+__SYSCALL(__NR_futex_wake, sys_futex_wake)
+#define __NR_futex_wait 455
+__SYSCALL(__NR_futex_wait, sys_futex_wait)
+#define __NR_futex_requeue 456
+__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
+
+#define __NR_statmount 457
+__SYSCALL(__NR_statmount, sys_statmount)
+
+#define __NR_listmount 458
+__SYSCALL(__NR_listmount, sys_listmount)
+
+#define __NR_lsm_get_self_attr 459
+__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
+#define __NR_lsm_set_self_attr 460
+__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
+#define __NR_lsm_list_modules 461
+__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#undef __NR_syscalls
-#define __NR_syscalls 440
+#define __NR_syscalls 462
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 3218576e109d..96e32dafd4f0 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -80,7 +80,7 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
* GPU's virtual address space via gart. Gart memory linearizes non-contiguous
- * pages of system memory, allows GPU access system memory in a linezrized
+ * pages of system memory, allows GPU access system memory in a linearized
* fashion.
*
* %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
@@ -94,6 +94,9 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
+ *
+ * %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
+ * signalling user mode queues.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -101,12 +104,14 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
+#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
- AMDGPU_GEM_DOMAIN_OA)
+ AMDGPU_GEM_DOMAIN_OA | \
+ AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -116,8 +121,6 @@ extern "C" {
#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
/* Flag that the memory should be in VRAM and cleared */
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
-/* Flag that create shadow bo(GTT) while allocating vram bo */
-#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
/* Flag that allocating the BO should use linear VRAM */
#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
/* Flag that BO is always valid in this VM */
@@ -138,6 +141,36 @@ extern "C" {
* accessing it with various hw blocks
*/
#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
+/* Flag that BO will be used only in preemptible context, which does
+ * not require GTT memory accounting
+ */
+#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
+/* Flag that BO can be discarded under memory pressure without keeping the
+ * content.
+ */
+#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
+/* Flag that BO is shared coherently between multiple devices or CPU threads.
+ * May depend on GPU instructions to flush caches to system scope explicitly.
+ *
+ * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
+ * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
+ */
+#define AMDGPU_GEM_CREATE_COHERENT (1 << 13)
+/* Flag that BO should not be cached by GPU. Coherent without having to flush
+ * GPU caches explicitly
+ *
+ * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
+ * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
+ */
+#define AMDGPU_GEM_CREATE_UNCACHED (1 << 14)
+/* Flag that BO should be coherent across devices when using device-level
+ * atomics. May depend on GPU instructions to flush caches to device scope
+ * explicitly, promoting them to system scope automatically.
+ *
+ * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
+ * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
+ */
+#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -204,6 +237,8 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_OP_FREE_CTX 2
#define AMDGPU_CTX_OP_QUERY_STATE 3
#define AMDGPU_CTX_OP_QUERY_STATE2 4
+#define AMDGPU_CTX_OP_GET_STABLE_PSTATE 5
+#define AMDGPU_CTX_OP_SET_STABLE_PSTATE 6
/* GPU reset status */
#define AMDGPU_CTX_NO_RESET 0
@@ -214,15 +249,17 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
-/* indicate gpu reset occured after ctx created */
+/* indicate gpu reset occurred after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
-/* indicate vram lost occured after ctx created */
+/* indicate vram lost occurred after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
/* indicate some errors are detected by RAS */
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
+/* indicate that the reset hasn't completed yet */
+#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
@@ -236,10 +273,18 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_PRIORITY_HIGH 512
#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
+/* select a stable profiling pstate for perfmon tools */
+#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK 0xf
+#define AMDGPU_CTX_STABLE_PSTATE_NONE 0
+#define AMDGPU_CTX_STABLE_PSTATE_STANDARD 1
+#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK 2
+#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK 3
+#define AMDGPU_CTX_STABLE_PSTATE_PEAK 4
+
struct drm_amdgpu_ctx_in {
/** AMDGPU_CTX_OP_* */
__u32 op;
- /** For future use, no flags defined so far */
+ /** Flags */
__u32 flags;
__u32 ctx_id;
/** AMDGPU_CTX_PRIORITY_* */
@@ -260,6 +305,11 @@ union drm_amdgpu_ctx_out {
/** Reset status since the last call of the ioctl. */
__u32 reset_status;
} state;
+
+ struct {
+ __u32 flags;
+ __u32 _pad;
+ } pstate;
};
union drm_amdgpu_ctx {
@@ -512,6 +562,8 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_UC (4 << 5)
/* Use Read Write MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_RW (5 << 5)
+/* don't allocate MALL */
+#define AMDGPU_VM_PAGE_NOALLOC (1 << 9)
struct drm_amdgpu_gem_va {
/** GEM object handle */
@@ -536,9 +588,14 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_VCE 4
#define AMDGPU_HW_IP_UVD_ENC 5
#define AMDGPU_HW_IP_VCN_DEC 6
+/*
+ * From VCN4, AMDGPU_HW_IP_VCN_ENC is re-used to support
+ * both encoding and decoding jobs.
+ */
#define AMDGPU_HW_IP_VCN_ENC 7
#define AMDGPU_HW_IP_VCN_JPEG 8
-#define AMDGPU_HW_IP_NUM 9
+#define AMDGPU_HW_IP_VPE 9
+#define AMDGPU_HW_IP_NUM 10
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@@ -551,6 +608,7 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
+#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@@ -667,12 +725,23 @@ struct drm_amdgpu_cs_chunk_data {
};
};
-/**
+#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
+
+struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
+ __u64 shadow_va;
+ __u64 csa_va;
+ __u64 gds_va;
+ __u64 flags;
+};
+
+/*
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
*/
#define AMDGPU_IDS_FLAGS_FUSION 0x1
#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
+#define AMDGPU_IDS_FLAGS_TMZ 0x4
+#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -723,6 +792,22 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_TA 0x13
/* Subquery id: Query DMCUB firmware version */
#define AMDGPU_INFO_FW_DMCUB 0x14
+ /* Subquery id: Query TOC firmware version */
+ #define AMDGPU_INFO_FW_TOC 0x15
+ /* Subquery id: Query CAP firmware version */
+ #define AMDGPU_INFO_FW_CAP 0x16
+ /* Subquery id: Query GFX RLCP firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLCP 0x17
+ /* Subquery id: Query GFX RLCV firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLCV 0x18
+ /* Subquery id: Query MES_KIQ firmware version */
+ #define AMDGPU_INFO_FW_MES_KIQ 0x19
+ /* Subquery id: Query MES firmware version */
+ #define AMDGPU_INFO_FW_MES 0x1a
+ /* Subquery id: Query IMU firmware version */
+ #define AMDGPU_INFO_FW_IMU 0x1b
+ /* Subquery id: Query VPE firmware version */
+ #define AMDGPU_INFO_FW_VPE 0x1c
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
@@ -752,6 +837,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VBIOS_SIZE 0x1
/* Subquery id: Query vbios image */
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
+ /* Subquery id: Query vbios info */
+ #define AMDGPU_INFO_VBIOS_INFO 0x3
/* Query UVD handles */
#define AMDGPU_INFO_NUM_HANDLES 0x1C
/* Query sensor related information */
@@ -774,12 +861,17 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
/* Subquery id: Query GPU stable pstate memory clock */
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
+ /* Subquery id: Query GPU peak pstate shader clock */
+ #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa
+ /* Subquery id: Query GPU peak pstate memory clock */
+ #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb
+ /* Subquery id: Query input GPU power */
+ #define AMDGPU_INFO_SENSOR_GPU_INPUT_POWER 0xc
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
/* query ras mask of enabled features*/
#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
-
/* RAS MASK: UMC (VRAM) */
#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
/* RAS MASK: SDMA */
@@ -808,6 +900,16 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
/* RAS MASK: FUSE */
#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS 0x21
+ /* Subquery id: Decode */
+ #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
+ /* Subquery id: Encode */
+ #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
+/* Query the max number of IBs per gang per submission */
+#define AMDGPU_INFO_MAX_IBS 0x22
+/* query last page fault info */
+#define AMDGPU_INFO_GPUVM_FAULT 0x23
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -875,6 +977,10 @@ struct drm_amdgpu_info {
struct {
__u32 type;
} sensor_info;
+
+ struct {
+ __u32 type;
+ } video_cap;
};
};
@@ -935,6 +1041,15 @@ struct drm_amdgpu_info_firmware {
__u32 feature;
};
+struct drm_amdgpu_info_vbios {
+ __u8 name[64];
+ __u8 vbios_pn[64];
+ __u32 version;
+ __u32 pad;
+ __u8 vbios_ver_str[32];
+ __u8 date[32];
+};
+
#define AMDGPU_VRAM_TYPE_UNKNOWN 0
#define AMDGPU_VRAM_TYPE_GDDR1 1
#define AMDGPU_VRAM_TYPE_DDR2 2
@@ -945,6 +1060,9 @@ struct drm_amdgpu_info_firmware {
#define AMDGPU_VRAM_TYPE_DDR3 7
#define AMDGPU_VRAM_TYPE_DDR4 8
#define AMDGPU_VRAM_TYPE_GDDR6 9
+#define AMDGPU_VRAM_TYPE_DDR5 10
+#define AMDGPU_VRAM_TYPE_LPDDR4 11
+#define AMDGPU_VRAM_TYPE_LPDDR5 12
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -970,7 +1088,8 @@ struct drm_amdgpu_info_device {
__u32 enabled_rb_pipes_mask;
__u32 num_rb_pipes;
__u32 num_hw_gfx_contexts;
- __u32 _pad;
+ /* PCIe version (the smaller of the GPU and the CPU/motherboard) */
+ __u32 pcie_gen;
__u64 ids_flags;
/** Starting virtual address for UMDs. */
__u64 virtual_address_offset;
@@ -1017,7 +1136,8 @@ struct drm_amdgpu_info_device {
__u32 gs_prim_buffer_depth;
/* max gs wavefront per vgt*/
__u32 max_gs_waves_per_vgt;
- __u32 _pad1;
+ /* PCIe number of lanes (the smaller of the GPU and the CPU/motherboard) */
+ __u32 pcie_num_lanes;
/* always on cu bitmap */
__u32 cu_ao_bitmap[4][4];
/** Starting high virtual address for UMDs. */
@@ -1028,6 +1148,26 @@ struct drm_amdgpu_info_device {
__u32 pa_sc_tile_steering_override;
/* disabled TCCs */
__u64 tcc_disabled_mask;
+ __u64 min_engine_clock;
+ __u64 min_memory_clock;
+ /* The following fields are only set on gfx11+, older chips set 0. */
+ __u32 tcp_cache_size; /* AKA GL0, VMEM cache */
+ __u32 num_sqc_per_wgp;
+ __u32 sqc_data_cache_size; /* AKA SMEM cache */
+ __u32 sqc_inst_cache_size;
+ __u32 gl1c_cache_size;
+ __u32 gl2c_cache_size;
+ __u64 mall_size; /* AKA infinity cache */
+ /* high 32 bits of the rb pipes mask */
+ __u32 enabled_rb_pipes_mask_hi;
+ /* shadow area size for gfx11 */
+ __u32 shadow_size;
+ /* shadow area base virtual alignment for gfx11 */
+ __u32 shadow_alignment;
+ /* context save area size for gfx11 */
+ __u32 csa_size;
+ /* context save area base virtual alignment for gfx11 */
+ __u32 csa_alignment;
};
struct drm_amdgpu_info_hw_ip {
@@ -1042,7 +1182,8 @@ struct drm_amdgpu_info_hw_ip {
__u32 ib_size_alignment;
/** Bitmask of available rings. Bit 0 means ring 0, etc. */
__u32 available_rings;
- __u32 _pad;
+ /** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
+ __u32 ip_discovery_version;
};
struct drm_amdgpu_info_num_handles {
@@ -1070,6 +1211,44 @@ struct drm_amdgpu_info_vce_clock_table {
__u32 pad;
};
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
+
+struct drm_amdgpu_info_video_codec_info {
+ __u32 valid;
+ __u32 max_width;
+ __u32 max_height;
+ __u32 max_pixels_per_frame;
+ __u32 max_level;
+ __u32 pad;
+};
+
+struct drm_amdgpu_info_video_caps {
+ struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
+};
+
+#define AMDGPU_VMHUB_TYPE_MASK 0xff
+#define AMDGPU_VMHUB_TYPE_SHIFT 0
+#define AMDGPU_VMHUB_TYPE_GFX 0
+#define AMDGPU_VMHUB_TYPE_MM0 1
+#define AMDGPU_VMHUB_TYPE_MM1 2
+#define AMDGPU_VMHUB_IDX_MASK 0xff00
+#define AMDGPU_VMHUB_IDX_SHIFT 8
+
+struct drm_amdgpu_info_gpuvm_fault {
+ __u64 addr;
+ __u32 status;
+ __u32 vmhub;
+};
+
/*
* Supported GPU families
*/
@@ -1082,6 +1261,13 @@ struct drm_amdgpu_info_vce_clock_table {
#define AMDGPU_FAMILY_AI 141 /* Vega10 */
#define AMDGPU_FAMILY_RV 142 /* Raven */
#define AMDGPU_FAMILY_NV 143 /* Navi10 */
+#define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
+#define AMDGPU_FAMILY_GC_11_0_0 145 /* GC 11.0.0 */
+#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
+#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
+#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
+#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
+#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 808b48a93330..16122819edfe 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -1,11 +1,10 @@
-/**
- * \file drm.h
+/*
* Header for the Direct Rendering Manager
*
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
*
- * \par Acknowledgments:
- * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ * Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
*/
/*
@@ -85,7 +84,7 @@ typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
-/**
+/*
* Cliprect.
*
* \warning: If you change this structure, make sure you change
@@ -101,7 +100,7 @@ struct drm_clip_rect {
unsigned short y2;
};
-/**
+/*
* Drawable information.
*/
struct drm_drawable_info {
@@ -109,7 +108,7 @@ struct drm_drawable_info {
struct drm_clip_rect *rects;
};
-/**
+/*
* Texture region,
*/
struct drm_tex_region {
@@ -120,7 +119,7 @@ struct drm_tex_region {
unsigned int age;
};
-/**
+/*
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
@@ -132,7 +131,7 @@ struct drm_hw_lock {
char padding[60]; /**< Pad to cache line */
};
-/**
+/*
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
@@ -149,7 +148,7 @@ struct drm_version {
char __user *desc; /**< User-space buffer to hold desc */
};
-/**
+/*
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
@@ -168,7 +167,7 @@ struct drm_block {
int unused;
};
-/**
+/*
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
@@ -183,7 +182,7 @@ struct drm_control {
int irq;
};
-/**
+/*
* Type of memory to map.
*/
enum drm_map_type {
@@ -195,7 +194,7 @@ enum drm_map_type {
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
};
-/**
+/*
* Memory mapping flags.
*/
enum drm_map_flags {
@@ -214,7 +213,7 @@ struct drm_ctx_priv_map {
void *handle; /**< Handle of map */
};
-/**
+/*
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
@@ -231,7 +230,7 @@ struct drm_map {
/* Private data */
};
-/**
+/*
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
@@ -263,7 +262,7 @@ enum drm_stat_type {
/* Add to the *END* of the list */
};
-/**
+/*
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
@@ -274,7 +273,7 @@ struct drm_stats {
} data[15];
};
-/**
+/*
* Hardware locking flags.
*/
enum drm_lock_flags {
@@ -289,7 +288,7 @@ enum drm_lock_flags {
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
-/**
+/*
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
@@ -299,7 +298,7 @@ struct drm_lock {
enum drm_lock_flags flags;
};
-/**
+/*
* DMA flags
*
* \warning
@@ -328,7 +327,7 @@ enum drm_dma_flags {
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
-/**
+/*
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
@@ -351,7 +350,7 @@ struct drm_buf_desc {
*/
};
-/**
+/*
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
@@ -359,7 +358,7 @@ struct drm_buf_info {
struct drm_buf_desc __user *list;
};
-/**
+/*
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
@@ -367,7 +366,7 @@ struct drm_buf_free {
int __user *list;
};
-/**
+/*
* Buffer information
*
* \sa drm_buf_map.
@@ -379,7 +378,7 @@ struct drm_buf_pub {
void __user *address; /**< Address of buffer */
};
-/**
+/*
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
@@ -392,7 +391,7 @@ struct drm_buf_map {
struct drm_buf_pub __user *list; /**< Buffer information */
};
-/**
+/*
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
@@ -417,7 +416,7 @@ enum drm_ctx_flags {
_DRM_CONTEXT_2DONLY = 0x02
};
-/**
+/*
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
@@ -427,7 +426,7 @@ struct drm_ctx {
enum drm_ctx_flags flags;
};
-/**
+/*
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
@@ -435,14 +434,14 @@ struct drm_ctx_res {
struct drm_ctx __user *contexts;
};
-/**
+/*
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
-/**
+/*
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
@@ -456,14 +455,14 @@ struct drm_update_draw {
unsigned long long data;
};
-/**
+/*
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
-/**
+/*
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
@@ -505,7 +504,7 @@ struct drm_wait_vblank_reply {
long tval_usec;
};
-/**
+/*
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
@@ -518,7 +517,7 @@ union drm_wait_vblank {
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
-/**
+/*
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
@@ -528,7 +527,7 @@ struct drm_modeset_ctl {
__u32 cmd;
};
-/**
+/*
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
@@ -537,7 +536,7 @@ struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
-/**
+/*
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
@@ -549,7 +548,7 @@ struct drm_agp_buffer {
unsigned long physical; /**< Physical used by i810 */
};
-/**
+/*
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
@@ -559,7 +558,7 @@ struct drm_agp_binding {
unsigned long offset; /**< In bytes -- will round to page boundary */
};
-/**
+/*
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
@@ -580,7 +579,7 @@ struct drm_agp_info {
unsigned short id_device;
};
-/**
+/*
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
@@ -588,7 +587,7 @@ struct drm_scatter_gather {
unsigned long handle; /**< Used for mapping / unmapping */
};
-/**
+/*
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
@@ -598,14 +597,14 @@ struct drm_set_version {
int drm_dd_minor;
};
-/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
-/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+/* DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
@@ -614,7 +613,7 @@ struct drm_gem_flink {
__u32 name;
};
-/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+/* DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
@@ -626,33 +625,164 @@ struct drm_gem_open {
__u64 size;
};
+/**
+ * DRM_CAP_DUMB_BUFFER
+ *
+ * If set to 1, the driver supports creating dumb buffers via the
+ * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
+ */
#define DRM_CAP_DUMB_BUFFER 0x1
+/**
+ * DRM_CAP_VBLANK_HIGH_CRTC
+ *
+ * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
+ * in the high bits of &drm_wait_vblank_request.type.
+ *
+ * Starting kernel version 2.6.39, this capability is always set to 1.
+ */
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+/**
+ * DRM_CAP_DUMB_PREFERRED_DEPTH
+ *
+ * The preferred bit depth for dumb buffers.
+ *
+ * The bit depth is the number of bits used to indicate the color of a single
+ * pixel excluding any padding. This is different from the number of bits per
+ * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
+ * pixel.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+/**
+ * DRM_CAP_DUMB_PREFER_SHADOW
+ *
+ * If set to 1, the driver prefers userspace to render to a shadow buffer
+ * instead of directly rendering to a dumb buffer. For best speed, userspace
+ * should do streaming ordered memory copies into the dumb buffer and never
+ * read from it.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+/**
+ * DRM_CAP_PRIME
+ *
+ * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
+ * and &DRM_PRIME_CAP_EXPORT.
+ *
+ * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
+ * &DRM_PRIME_CAP_EXPORT are always advertised.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors.
+ * See :ref:`prime_buffer_sharing`.
+ */
#define DRM_CAP_PRIME 0x5
+/**
+ * DRM_PRIME_CAP_IMPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
+ * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
+ */
#define DRM_PRIME_CAP_IMPORT 0x1
+/**
+ * DRM_PRIME_CAP_EXPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
+ * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
+ */
#define DRM_PRIME_CAP_EXPORT 0x2
+/**
+ * DRM_CAP_TIMESTAMP_MONOTONIC
+ *
+ * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
+ * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
+ * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
+ * clocks.
+ *
+ * Starting from kernel version 2.6.39, the default value for this capability
+ * is 1. Starting kernel version 4.15, this capability is always set to 1.
+ */
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+/**
+ * DRM_CAP_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
+ * page-flips.
+ */
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
-/*
- * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
- * combination for the hardware cursor. The intention is that a hardware
- * agnostic userspace can query a cursor plane size to use.
+/**
+ * DRM_CAP_CURSOR_WIDTH
+ *
+ * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
+ * width x height combination for the hardware cursor. The intention is that a
+ * hardware agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
+/**
+ * DRM_CAP_CURSOR_HEIGHT
+ *
+ * See &DRM_CAP_CURSOR_WIDTH.
+ */
#define DRM_CAP_CURSOR_HEIGHT 0x9
+/**
+ * DRM_CAP_ADDFB2_MODIFIERS
+ *
+ * If set to 1, the driver supports supplying modifiers in the
+ * &DRM_IOCTL_MODE_ADDFB2 ioctl.
+ */
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+/**
+ * DRM_CAP_PAGE_FLIP_TARGET
+ *
+ * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
+ * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
+ * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
+ * ioctl.
+ */
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
+/**
+ * DRM_CAP_CRTC_IN_VBLANK_EVENT
+ *
+ * If set to 1, the kernel supports reporting the CRTC ID in
+ * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
+ * &DRM_EVENT_FLIP_COMPLETE events.
+ *
+ * Starting kernel version 4.12, this capability is always set to 1.
+ */
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
+/**
+ * DRM_CAP_SYNCOBJ
+ *
+ * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
+ */
#define DRM_CAP_SYNCOBJ 0x13
+/**
+ * DRM_CAP_SYNCOBJ_TIMELINE
+ *
+ * If set to 1, the driver supports timeline operations on sync objects. See
+ * :ref:`drm_sync_objects`.
+ */
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
+/**
+ * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
+ * commits.
+ */
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
-/** DRM_IOCTL_GET_CAP ioctl argument type */
+/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
@@ -661,9 +791,12 @@ struct drm_get_cap {
/**
* DRM_CLIENT_CAP_STEREO_3D
*
- * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * If set to 1, the DRM core will expose the stereo 3D capabilities of the
* monitor by advertising the supported 3D layouts in the flags of struct
- * drm_mode_modeinfo.
+ * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 3.13.
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
@@ -672,13 +805,25 @@ struct drm_get_cap {
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
+ *
+ * This capability has been introduced in kernel version 3.15. Starting from
+ * kernel version 3.17, this capability is always supported for all drivers.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
/**
* DRM_CLIENT_CAP_ATOMIC
*
- * If set to 1, the DRM core will expose atomic properties to userspace
+ * If set to 1, the DRM core will expose atomic properties to userspace. This
+ * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
+ * &DRM_CLIENT_CAP_ASPECT_RATIO.
+ *
+ * If the driver doesn't support atomic mode-setting, enabling this capability
+ * will fail with -EOPNOTSUPP.
+ *
+ * This capability has been introduced in kernel version 4.0. Starting from
+ * kernel version 4.2, this capability is always supported for atomic-capable
+ * drivers.
*/
#define DRM_CLIENT_CAP_ATOMIC 3
@@ -686,6 +831,10 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_ASPECT_RATIO
*
* If set to 1, the DRM core will provide aspect ratio information in modes.
+ * See ``DRM_MODE_FLAG_PIC_AR_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 4.18.
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
@@ -693,12 +842,40 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
*
* If set to 1, the DRM core will expose special connectors to be used for
- * writing back to memory the scene setup in the commit. Depends on client
- * also supporting DRM_CLIENT_CAP_ATOMIC
+ * writing back to memory the scene setup in the commit. The client must enable
+ * &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * This capability is always supported for atomic-capable drivers starting from
+ * kernel version 4.19.
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
-/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+/**
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
+ *
+ * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
+ * virtualbox) have additional restrictions for cursor planes (thus
+ * making cursor planes on those drivers not truly universal,) e.g.
+ * they need cursor planes to act like one would expect from a mouse
+ * cursor and have correctly set hotspot properties.
+ * If this client cap is not set the DRM core will hide cursor plane on
+ * those virtualized drivers because not setting it implies that the
+ * client is not capable of dealing with those extra restictions.
+ * Clients which do set cursor hotspot and treat the cursor plane
+ * like a mouse cursor should set this property.
+ * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * Setting this property on drivers which do not special case
+ * cursor planes (i.e. non-virtualized drivers) will return
+ * EOPNOTSUPP, which can be used by userspace to gauge
+ * requirements of the hardware/drivers they're running on.
+ *
+ * This capability is always supported for atomic-capable virtualized
+ * drivers starting from kernel version 6.6.
+ */
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
+
+/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
__u64 value;
@@ -749,6 +926,7 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -757,6 +935,14 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@@ -769,6 +955,35 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
+};
+
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ * available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+ __s32 fd;
+ __u32 pad;
};
@@ -834,6 +1049,19 @@ extern "C" {
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+/**
+ * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
+ *
+ * GEM handles are not reference-counted by the kernel. User-space is
+ * responsible for managing their lifetime. For example, if user-space imports
+ * the same memory object twice on the same DRM file description, the same GEM
+ * handle is returned by both imports, and user-space needs to ensure
+ * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
+ * when a memory object is allocated, then exported and imported again on the
+ * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
+ * and always returns fresh new GEM handles even if an existing GEM handle
+ * already refers to the same memory object before the IOCTL is performed.
+ */
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
@@ -874,7 +1102,37 @@ extern "C" {
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+/**
+ * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
+ *
+ * User-space sets &drm_prime_handle.handle with the GEM handle to export and
+ * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
+ * &drm_prime_handle.fd.
+ *
+ * The export can fail for any driver-specific reason, e.g. because export is
+ * not supported for this specific GEM handle (but might be for others).
+ *
+ * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
+ */
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
+/**
+ * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
+ *
+ * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
+ * import, and gets back a GEM handle in &drm_prime_handle.handle.
+ * &drm_prime_handle.flags is unused.
+ *
+ * If an existing GEM handle refers to the memory object backing the DMA-BUF,
+ * that GEM handle is returned. Therefore user-space which needs to handle
+ * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
+ * reference-count duplicated GEM handles. For more information see
+ * &DRM_IOCTL_GEM_CLOSE.
+ *
+ * The import can fail for any driver-specific reason, e.g. because import is
+ * only supported for DMA-BUFs allocated on this DRM device.
+ *
+ * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
+ */
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
@@ -912,10 +1170,40 @@ extern "C" {
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+/**
+ * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
+ *
+ * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * Warning: removing a framebuffer currently in-use on an enabled plane will
+ * disable that plane. The CRTC the plane is linked to may also be disabled
+ * (depending on driver capabilities).
+ */
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+/**
+ * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
+ *
+ * KMS dumb buffers provide a very primitive way to allocate a buffer object
+ * suitable for scanout and map it for software rendering. KMS dumb buffers are
+ * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
+ * buffers are not suitable to be displayed on any other device than the KMS
+ * device where they were allocated from. Also see
+ * :ref:`kms_dumb_buffer_objects`.
+ *
+ * The IOCTL argument is a struct drm_mode_create_dumb.
+ *
+ * User-space is expected to create a KMS dumb buffer via this IOCTL, then add
+ * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
+ * &DRM_IOCTL_MODE_MAP_DUMB.
+ *
+ * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
+ * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
+ * driver preferences for dumb buffers.
+ */
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
@@ -948,9 +1236,59 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+/**
+ * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
+ *
+ * This queries metadata about a framebuffer. User-space fills
+ * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
+ * struct as the output.
+ *
+ * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
+ * will be filled with GEM buffer handles. Fresh new GEM handles are always
+ * returned, even if another GEM handle referring to the same memory object
+ * already exists on the DRM file description. The caller is responsible for
+ * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
+ * new handle will be returned for multiple planes in case they use the same
+ * memory object. Planes are valid until one has a zero handle -- this can be
+ * used to compute the number of planes.
+ *
+ * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
+ * until one has a zero &drm_mode_fb_cmd2.pitches.
+ *
+ * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
+ * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
+ * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
+ *
+ * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
+ * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
+ * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
+ * double-close handles which are specified multiple times in the array.
+ */
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+
/**
+ * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
+ *
+ * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
+ * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
+ * alive. When the plane no longer uses the framebuffer (because the
+ * framebuffer is replaced with another one, or the plane is disabled), the
+ * framebuffer is cleaned up.
+ *
+ * This is useful to implement flicker-free transitions between two processes.
+ *
+ * Depending on the threat model, user-space may want to ensure that the
+ * framebuffer doesn't expose any sensitive user information: closed
+ * framebuffers attached to a plane can be read back by the next DRM master.
+ */
+#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
+
+/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0.
@@ -962,24 +1300,49 @@ extern "C" {
#define DRM_COMMAND_END 0xA0
/**
- * Header for events written back to userspace on the drm fd. The
- * type defines the type of event, the length specifies the total
- * length of the event (including the header), and user_data is
- * typically a 64 bit value passed with the ioctl that triggered the
- * event. A read on the drm fd will always only return complete
- * events, that is, if for example the read buffer is 100 bytes, and
- * there are two 64 byte events pending, only one will be returned.
+ * struct drm_event - Header for DRM events
+ * @type: event type.
+ * @length: total number of payload bytes (including header).
+ *
+ * This struct is a header for events written back to user-space on the DRM FD.
+ * A read on the DRM FD will always only return complete events: e.g. if the
+ * read buffer is 100 bytes large and there are two 64 byte events pending,
+ * only one will be returned.
*
- * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
- * up are chipset specific.
+ * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
+ * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
+ * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
*/
struct drm_event {
__u32 type;
__u32 length;
};
+/**
+ * DRM_EVENT_VBLANK - vertical blanking event
+ *
+ * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
+ * &_DRM_VBLANK_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_VBLANK 0x01
+/**
+ * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
+ *
+ * This event is sent in response to an atomic commit or legacy page-flip with
+ * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_FLIP_COMPLETE 0x02
+/**
+ * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
+ *
+ * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
+ *
+ * The event payload is a struct drm_event_crtc_sequence.
+ */
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 82f327801267..84d502e42961 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -54,16 +54,52 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
- * modifier is specific to the modifer being used. For example, some modifiers
+ * modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
+ * Modifiers must uniquely encode buffer layout. In other words, a buffer must
+ * match only a single modifier. A modifier must not be a subset of layouts of
+ * another modifier. For instance, it's incorrect to encode pitch alignment in
+ * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
+ * aligned modifier. That said, modifiers can have implicit minimal
+ * requirements.
+ *
+ * For modifiers where the combination of fourcc code and modifier can alias,
+ * a canonical pair needs to be defined and used by all drivers. Preferred
+ * combinations are also encouraged where all combinations might lead to
+ * confusion and unnecessarily reduced interoperability. An example for the
+ * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
+ *
+ * There are two kinds of modifier users:
+ *
+ * - Kernel and user-space drivers: for drivers it's important that modifiers
+ * don't alias, otherwise two drivers might support the same format but use
+ * different aliases, preventing them from sharing buffers in an efficient
+ * format.
+ * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
+ * see modifiers as opaque tokens they can check for equality and intersect.
+ * These users mustn't need to know to reason about the modifier value
+ * (i.e. they are not expected to extract information out of the modifier).
+ *
* Vendors should document their modifier usage in as much detail as
* possible, to ensure maximum compatibility across devices, drivers and
* applications.
*
* The authoritative list of format modifier codes is found in
* `include/uapi/drm/drm_fourcc.h`
+ *
+ * Open Source User Waiver
+ * -----------------------
+ *
+ * Because this is the authoritative source for pixel formats and modifiers
+ * referenced by GL, Vulkan extensions and other standards and hence used both
+ * by open source and closed source driver stacks, the usual requirement for an
+ * upstream in-kernel or open source userspace user does not apply.
+ *
+ * To ensure, as much as feasible, compatibility across stacks and avoid
+ * confusion with incompatible enumerations stakeholders for all relevant driver
+ * stacks should approve additions.
*/
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
@@ -75,12 +111,42 @@ extern "C" {
#define DRM_FORMAT_INVALID 0
/* color index */
+#define DRM_FORMAT_C1 fourcc_code('C', '1', ' ', ' ') /* [7:0] C0:C1:C2:C3:C4:C5:C6:C7 1:1:1:1:1:1:1:1 eight pixels/byte */
+#define DRM_FORMAT_C2 fourcc_code('C', '2', ' ', ' ') /* [7:0] C0:C1:C2:C3 2:2:2:2 four pixels/byte */
+#define DRM_FORMAT_C4 fourcc_code('C', '4', ' ', ' ') /* [7:0] C0:C1 4:4 two pixels/byte */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
-/* 8 bpp Red */
+/* 1 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D1 fourcc_code('D', '1', ' ', ' ') /* [7:0] D0:D1:D2:D3:D4:D5:D6:D7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D2 fourcc_code('D', '2', ' ', ' ') /* [7:0] D0:D1:D2:D3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D4 fourcc_code('D', '4', ' ', ' ') /* [7:0] D0:D1 4:4 two pixels/byte */
+
+/* 8 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D8 fourcc_code('D', '8', ' ', ' ') /* [7:0] D */
+
+/* 1 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R1 fourcc_code('R', '1', ' ', ' ') /* [7:0] R0:R1:R2:R3:R4:R5:R6:R7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R2 fourcc_code('R', '2', ' ', ' ') /* [7:0] R0:R1:R2:R3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R4 fourcc_code('R', '4', ' ', ' ') /* [7:0] R0:R1 4:4 two pixels/byte */
+
+/* 8 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
-/* 16 bpp Red */
+/* 10 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
+
+/* 12 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
+
+/* 16 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
/* 16 bpp RG */
@@ -144,6 +210,13 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 64 bpp RGB */
+#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
/*
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
@@ -155,6 +228,12 @@ extern "C" {
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+/*
+ * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
+ * of unused padding per component:
+ */
+#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
+
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
@@ -162,7 +241,9 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_AVUY8888 fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', 'U', 'Y') /* [31:0] X:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
@@ -242,6 +323,8 @@ extern "C" {
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
@@ -271,6 +354,13 @@ extern "C" {
*/
#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
+/* 2 plane YCbCr420.
+ * 3 10 bit components and 2 padding bits packed into 4 bytes.
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian
+ */
+#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */
+
/* 3 plane non-subsampled (444) YCbCr
* 16 bits per component, but only 10 bits are used and 6 bits are padded
* index 0: Y plane, [15:0] Y:x [10:6] little endian
@@ -320,7 +410,6 @@ extern "C" {
*/
/* Vendor Ids: */
-#define DRM_FORMAT_MOD_NONE 0
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
@@ -337,6 +426,12 @@ extern "C" {
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+
+#define fourcc_mod_is_vendor(modifier, vendor) \
+ (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
+
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
@@ -392,6 +487,16 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
+/*
+ * Deprecated: use DRM_FORMAT_MOD_LINEAR instead
+ *
+ * The "none" format modifier doesn't actually mean that the modifier is
+ * implicit, instead it means that the layout is linear. Whether modifiers are
+ * used is out-of-band information carried in an API-specific way (e.g. in a
+ * flag for drm_mode_fb_cmd2).
+ */
+#define DRM_FORMAT_MOD_NONE 0
+
/* Intel framebuffer modifiers */
/*
@@ -435,7 +540,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
- * Each group therefore consits out of four 256 byte units, which are also laid
+ * Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@@ -489,6 +594,115 @@ extern "C" {
#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
/*
+ * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
+ * compression.
+ *
+ * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+
+/*
+ * Intel Tile 4 layout
+ *
+ * This is a tiled layout using 4KB tiles in a row-major layout. It has the same
+ * shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It
+ * only differs from Tile Y at the 256B granularity in between. At this
+ * granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape
+ * of 64B x 8 rows.
+ */
+#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 media compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface
+ * pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths. The
+ * clear color is stored at plane index 1 and the pitch should be 64 bytes
+ * aligned. The format of the 256 bits of clear color data matches the one used
+ * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
+ * for details.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 media compression
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
+ * compression.
+ *
+ * The main surface is tile4 and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -525,6 +739,28 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+/*
+ * Qualcomm Tiled Format
+ *
+ * Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3)
+
+/*
+ * Qualcomm Alternate Tiled Format
+ *
+ * Alternate tiled format typically only used within GMEM.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2)
+
+
/* Vivante framebuffer modifiers */
/*
@@ -565,6 +801,35 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
+/*
+ * Vivante TS (tile-status) buffer modifiers. They can be combined with all of
+ * the color buffer tiling modifiers defined above. When TS is present it's a
+ * separate buffer containing the clear/compression status of each tile. The
+ * modifiers are defined as VIVANTE_MOD_TS_c_s, where c is the color buffer
+ * tile size in bytes covered by one entry in the status buffer and s is the
+ * number of status bits per entry.
+ * We reserve the top 8 bits of the Vivante modifier space for tile status
+ * clear/compression modifiers, as future cores might add some more TS layout
+ * variations.
+ */
+#define VIVANTE_MOD_TS_64_4 (1ULL << 48)
+#define VIVANTE_MOD_TS_64_2 (2ULL << 48)
+#define VIVANTE_MOD_TS_128_4 (3ULL << 48)
+#define VIVANTE_MOD_TS_256_4 (4ULL << 48)
+#define VIVANTE_MOD_TS_MASK (0xfULL << 48)
+
+/*
+ * Vivante compression modifiers. Those depend on a TS modifier being present
+ * as the TS bits get reinterpreted as compression tags instead of simple
+ * clear markers when compression is enabled.
+ */
+#define VIVANTE_MOD_COMP_DEC400 (1ULL << 52)
+#define VIVANTE_MOD_COMP_MASK (0xfULL << 52)
+
+/* Masking out the extension bits will yield the base modifier. */
+#define VIVANTE_MOD_EXT_MASK (VIVANTE_MOD_TS_MASK | \
+ VIVANTE_MOD_COMP_MASK)
+
/* NVIDIA frame buffer modifiers */
/*
@@ -777,6 +1042,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* and UV. Some SAND-using hardware stores UV in a separate tiled
* image from Y to reduce the column height, which is not supported
* with these modifiers.
+ *
+ * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also
+ * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes
+ * wide, but as this is a 10 bpp format that translates to 96 pixels.
*/
#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
@@ -834,10 +1103,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
/*
- * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
- * modifiers) denote the category for modifiers. Currently we have only two
- * categories of modifiers ie AFBC and MISC. We can have a maximum of sixteen
- * different categories.
+ * The top 4 bits (out of the 56 bits allotted for specifying vendor specific
+ * modifiers) denote the category for modifiers. Currently we have three
+ * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
+ * sixteen different categories.
*/
#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
@@ -953,6 +1222,109 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AFBC_FORMAT_MOD_USM (1ULL << 12)
/*
+ * Arm Fixed-Rate Compression (AFRC) modifiers
+ *
+ * AFRC is a proprietary fixed rate image compression protocol and format,
+ * designed to provide guaranteed bandwidth and memory footprint
+ * reductions in graphics and media use-cases.
+ *
+ * AFRC buffers consist of one or more planes, with the same components
+ * and meaning as an uncompressed buffer using the same pixel format.
+ *
+ * Within each plane, the pixel/luma/chroma values are grouped into
+ * "coding unit" blocks which are individually compressed to a
+ * fixed size (in bytes). All coding units within a given plane of a buffer
+ * store the same number of values, and have the same compressed size.
+ *
+ * The coding unit size is configurable, allowing different rates of compression.
+ *
+ * The start of each AFRC buffer plane must be aligned to an alignment granule which
+ * depends on the coding unit size.
+ *
+ * Coding Unit Size Plane Alignment
+ * ---------------- ---------------
+ * 16 bytes 1024 bytes
+ * 24 bytes 512 bytes
+ * 32 bytes 2048 bytes
+ *
+ * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned
+ * to a multiple of the paging tile dimensions.
+ * The dimensions of each paging tile depend on whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Layout Paging Tile Width Paging Tile Height
+ * ------ ----------------- ------------------
+ * SCAN 16 coding units 4 coding units
+ * ROT 8 coding units 8 coding units
+ *
+ * The dimensions of each coding unit depend on the number of components
+ * in the compressed plane and whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Number of Components in Plane Layout Coding Unit Width Coding Unit Height
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 SCAN 16 samples 4 samples
+ * Example: 16x4 luma samples in a 'Y' plane
+ * 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 ROT 8 samples 8 samples
+ * Example: 8x8 luma samples in a 'Y' plane
+ * 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 2 DONT CARE 8 samples 4 samples
+ * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 3 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer without alpha
+ * ----------------------------- --------- ----------------- ------------------
+ * 4 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer with alpha
+ */
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02
+
+#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode)
+
+/*
+ * AFRC coding unit size modifier.
+ *
+ * Indicates the number of bytes used to store each compressed coding unit for
+ * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance
+ * is the same for both Cb and Cr, which may be stored in separate planes.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store
+ * each compressed coding unit in the first plane of the buffer. For RGBA buffers
+ * this is the only plane, while for semi-planar and fully-planar YUV buffers,
+ * this corresponds to the luma plane.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store
+ * each compressed coding unit in the second and third planes in the buffer.
+ * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s).
+ *
+ * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified
+ * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero.
+ * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified.
+ */
+#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf
+#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL)
+
+#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size)
+#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4)
+
+/*
+ * AFRC scanline memory layout.
+ *
+ * Indicates if the buffer uses the scanline-optimised layout
+ * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout.
+ * The memory layout is the same for all planes.
+ */
+#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8)
+
+/*
* Arm 16x16 Block U-Interleaved modifier
*
* This is used by Arm Mali Utgard and Midgard GPUs. It divides the image
@@ -997,9 +1369,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Not all combinations are valid, and different SoCs may support different
* combinations of layout and options.
*/
-#define __fourcc_mod_amlogic_layout_mask 0xf
+#define __fourcc_mod_amlogic_layout_mask 0xff
#define __fourcc_mod_amlogic_options_shift 8
-#define __fourcc_mod_amlogic_options_mask 0xf
+#define __fourcc_mod_amlogic_options_mask 0xff
#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
fourcc_mod_code(AMLOGIC, \
@@ -1047,7 +1419,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
- * boudaries, i.e. 8bit should be stored in this mode to save allocation
+ * boundaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with
@@ -1056,6 +1428,142 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
+/*
+ * AMD modifiers
+ *
+ * Memory layout:
+ *
+ * without DCC:
+ * - main surface
+ *
+ * with DCC & without DCC_RETILE:
+ * - main surface in plane 0
+ * - DCC surface in plane 1 (RB-aligned, pipe-aligned if DCC_PIPE_ALIGN is set)
+ *
+ * with DCC & DCC_RETILE:
+ * - main surface in plane 0
+ * - displayable DCC surface in plane 1 (not RB-aligned & not pipe-aligned)
+ * - pipe-aligned DCC surface in plane 2 (RB-aligned & pipe-aligned)
+ *
+ * For multi-plane formats the above surfaces get merged into one plane for
+ * each format plane, based on the required alignment only.
+ *
+ * Bits Parameter Notes
+ * ----- ------------------------ ---------------------------------------------
+ *
+ * 7:0 TILE_VERSION Values are AMD_FMT_MOD_TILE_VER_*
+ * 12:8 TILE Values are AMD_FMT_MOD_TILE_<version>_*
+ * 13 DCC
+ * 14 DCC_RETILE
+ * 15 DCC_PIPE_ALIGN
+ * 16 DCC_INDEPENDENT_64B
+ * 17 DCC_INDEPENDENT_128B
+ * 19:18 DCC_MAX_COMPRESSED_BLOCK Values are AMD_FMT_MOD_DCC_BLOCK_*
+ * 20 DCC_CONSTANT_ENCODE
+ * 23:21 PIPE_XOR_BITS Only for some chips
+ * 26:24 BANK_XOR_BITS Only for some chips
+ * 29:27 PACKERS Only for some chips
+ * 32:30 RB Only for some chips
+ * 35:33 PIPE Only for some chips
+ * 55:36 - Reserved for future use, must be zero
+ */
+#define AMD_FMT_MOD fourcc_mod_code(AMD, 0)
+
+#define IS_AMD_FMT_MOD(val) (((val) >> 56) == DRM_FORMAT_MOD_VENDOR_AMD)
+
+/* Reserve 0 for GFX8 and older */
+#define AMD_FMT_MOD_TILE_VER_GFX9 1
+#define AMD_FMT_MOD_TILE_VER_GFX10 2
+#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+#define AMD_FMT_MOD_TILE_VER_GFX11 4
+
+/*
+ * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
+ * version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_S 9
+
+/*
+ * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
+ * GFX9 as canonical version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
+#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
+#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
+#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+
+#define AMD_FMT_MOD_DCC_BLOCK_64B 0
+#define AMD_FMT_MOD_DCC_BLOCK_128B 1
+#define AMD_FMT_MOD_DCC_BLOCK_256B 2
+
+#define AMD_FMT_MOD_TILE_VERSION_SHIFT 0
+#define AMD_FMT_MOD_TILE_VERSION_MASK 0xFF
+#define AMD_FMT_MOD_TILE_SHIFT 8
+#define AMD_FMT_MOD_TILE_MASK 0x1F
+
+/* Whether DCC compression is enabled. */
+#define AMD_FMT_MOD_DCC_SHIFT 13
+#define AMD_FMT_MOD_DCC_MASK 0x1
+
+/*
+ * Whether to include two DCC surfaces, one which is rb & pipe aligned, and
+ * one which is not-aligned.
+ */
+#define AMD_FMT_MOD_DCC_RETILE_SHIFT 14
+#define AMD_FMT_MOD_DCC_RETILE_MASK 0x1
+
+/* Only set if DCC_RETILE = false */
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_SHIFT 15
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_MASK 0x1
+
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_SHIFT 16
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_MASK 0x1
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_SHIFT 17
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_MASK 0x1
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_SHIFT 18
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3
+
+/*
+ * DCC supports embedding some clear colors directly in the DCC surface.
+ * However, on older GPUs the rendering HW ignores the embedded clear color
+ * and prefers the driver provided color. This necessitates doing a fastclear
+ * eliminate operation before a process transfers control.
+ *
+ * If this bit is set that means the fastclear eliminate is not needed for these
+ * embeddable colors.
+ */
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_SHIFT 20
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_MASK 0x1
+
+/*
+ * The below fields are for accounting for per GPU differences. These are only
+ * relevant for GFX9 and later and if the tile field is *_X/_T.
+ *
+ * PIPE_XOR_BITS = always needed
+ * BANK_XOR_BITS = only for TILE_VER_GFX9
+ * PACKERS = only for TILE_VER_GFX10_RBPLUS
+ * RB = only for TILE_VER_GFX9 & DCC
+ * PIPE = only for TILE_VER_GFX9 & DCC & (DCC_RETILE | DCC_PIPE_ALIGN)
+ */
+#define AMD_FMT_MOD_PIPE_XOR_BITS_SHIFT 21
+#define AMD_FMT_MOD_PIPE_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_BANK_XOR_BITS_SHIFT 24
+#define AMD_FMT_MOD_BANK_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_PACKERS_SHIFT 27
+#define AMD_FMT_MOD_PACKERS_MASK 0x7
+#define AMD_FMT_MOD_RB_SHIFT 30
+#define AMD_FMT_MOD_RB_MASK 0x7
+#define AMD_FMT_MOD_PIPE_SHIFT 33
+#define AMD_FMT_MOD_PIPE_MASK 0x7
+
+#define AMD_FMT_MOD_SET(field, value) \
+ ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
+#define AMD_FMT_MOD_GET(field, value) \
+ (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
+#define AMD_FMT_MOD_CLEAR(field) \
+ (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index deea447e5f22..7040e7ea80c7 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -36,10 +36,10 @@ extern "C" {
/**
* DOC: overview
*
- * DRM exposes many UAPI and structure definition to have a consistent
- * and standardized interface with user.
+ * DRM exposes many UAPI and structure definitions to have a consistent
+ * and standardized interface with users.
* Userspace can refer to these structure definitions and UAPI formats
- * to communicate to driver
+ * to communicate to drivers.
*/
#define DRM_CONNECTOR_NAME_LEN 32
@@ -218,6 +218,27 @@ extern "C" {
#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
+/**
+ * struct drm_mode_modeinfo - Display mode information.
+ * @clock: pixel clock in kHz
+ * @hdisplay: horizontal display size
+ * @hsync_start: horizontal sync start
+ * @hsync_end: horizontal sync end
+ * @htotal: horizontal total size
+ * @hskew: horizontal skew
+ * @vdisplay: vertical display size
+ * @vsync_start: vertical sync start
+ * @vsync_end: vertical sync end
+ * @vtotal: vertical total size
+ * @vscan: vertical scan
+ * @vrefresh: approximate vertical refresh rate in Hz
+ * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines
+ * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines
+ * @name: string describing the mode resolution
+ *
+ * This is the user-space API display mode information structure. For the
+ * kernel version see struct drm_display_mode.
+ */
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay;
@@ -291,16 +312,48 @@ struct drm_mode_set_plane {
__u32 src_w;
};
+/**
+ * struct drm_mode_get_plane - Get plane metadata.
+ *
+ * Userspace can perform a GETPLANE ioctl to retrieve information about a
+ * plane.
+ *
+ * To retrieve the number of formats supported, set @count_format_types to zero
+ * and call the ioctl. @count_format_types will be updated with the value.
+ *
+ * To retrieve these formats, allocate an array with the memory needed to store
+ * @count_format_types formats. Point @format_type_ptr to this array and call
+ * the ioctl again (with @count_format_types still set to the value returned in
+ * the first ioctl call).
+ */
struct drm_mode_get_plane {
+ /**
+ * @plane_id: Object ID of the plane whose information should be
+ * retrieved. Set by caller.
+ */
__u32 plane_id;
+ /** @crtc_id: Object ID of the current CRTC. */
__u32 crtc_id;
+ /** @fb_id: Object ID of the current fb. */
__u32 fb_id;
+ /**
+ * @possible_crtcs: Bitmask of CRTC's compatible with the plane. CRTC's
+ * are created and they receive an index, which corresponds to their
+ * position in the bitmask. Bit N corresponds to
+ * :ref:`CRTC index<crtc_index>` N.
+ */
__u32 possible_crtcs;
+ /** @gamma_size: Never used. */
__u32 gamma_size;
+ /** @count_format_types: Number of formats. */
__u32 count_format_types;
+ /**
+ * @format_type_ptr: Pointer to ``__u32`` array of formats that are
+ * supported by the plane. These formats do not require modifiers.
+ */
__u64 format_type_ptr;
};
@@ -332,14 +385,19 @@ struct drm_mode_get_encoder {
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
enum drm_mode_subconnector {
- DRM_MODE_SUBCONNECTOR_Automatic = 0,
- DRM_MODE_SUBCONNECTOR_Unknown = 0,
- DRM_MODE_SUBCONNECTOR_DVID = 3,
- DRM_MODE_SUBCONNECTOR_DVIA = 4,
- DRM_MODE_SUBCONNECTOR_Composite = 5,
- DRM_MODE_SUBCONNECTOR_SVIDEO = 6,
- DRM_MODE_SUBCONNECTOR_Component = 8,
- DRM_MODE_SUBCONNECTOR_SCART = 9,
+ DRM_MODE_SUBCONNECTOR_Automatic = 0, /* DVI-I, TV */
+ DRM_MODE_SUBCONNECTOR_Unknown = 0, /* DVI-I, TV, DP */
+ DRM_MODE_SUBCONNECTOR_VGA = 1, /* DP */
+ DRM_MODE_SUBCONNECTOR_DVID = 3, /* DVI-I DP */
+ DRM_MODE_SUBCONNECTOR_DVIA = 4, /* DVI-I */
+ DRM_MODE_SUBCONNECTOR_Composite = 5, /* TV */
+ DRM_MODE_SUBCONNECTOR_SVIDEO = 6, /* TV */
+ DRM_MODE_SUBCONNECTOR_Component = 8, /* TV */
+ DRM_MODE_SUBCONNECTOR_SCART = 9, /* TV */
+ DRM_MODE_SUBCONNECTOR_DisplayPort = 10, /* DP */
+ DRM_MODE_SUBCONNECTOR_HDMIA = 11, /* DP */
+ DRM_MODE_SUBCONNECTOR_Native = 15, /* DP */
+ DRM_MODE_SUBCONNECTOR_Wireless = 18, /* DP */
};
#define DRM_MODE_CONNECTOR_Unknown 0
@@ -362,28 +420,98 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_DPI 17
#define DRM_MODE_CONNECTOR_WRITEBACK 18
#define DRM_MODE_CONNECTOR_SPI 19
+#define DRM_MODE_CONNECTOR_USB 20
+/**
+ * struct drm_mode_get_connector - Get connector metadata.
+ *
+ * User-space can perform a GETCONNECTOR ioctl to retrieve information about a
+ * connector. User-space is expected to retrieve encoders, modes and properties
+ * by performing this ioctl at least twice: the first time to retrieve the
+ * number of elements, the second time to retrieve the elements themselves.
+ *
+ * To retrieve the number of elements, set @count_props and @count_encoders to
+ * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct
+ * drm_mode_modeinfo element.
+ *
+ * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr,
+ * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and
+ * @count_encoders to their capacity.
+ *
+ * Performing the ioctl only twice may be racy: the number of elements may have
+ * changed with a hotplug event in-between the two ioctls. User-space is
+ * expected to retry the last ioctl until the number of elements stabilizes.
+ * The kernel won't fill any array which doesn't have the expected length.
+ *
+ * **Force-probing a connector**
+ *
+ * If the @count_modes field is set to zero and the DRM client is the current
+ * DRM master, the kernel will perform a forced probe on the connector to
+ * refresh the connector status, modes and EDID. A forced-probe can be slow,
+ * might cause flickering and the ioctl will block.
+ *
+ * User-space needs to force-probe connectors to ensure their metadata is
+ * up-to-date at startup and after receiving a hot-plug event. User-space
+ * may perform a forced-probe when the user explicitly requests it. User-space
+ * shouldn't perform a forced-probe in other situations.
+ */
struct drm_mode_get_connector {
-
+ /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
__u64 encoders_ptr;
+ /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */
__u64 modes_ptr;
+ /** @props_ptr: Pointer to ``__u32`` array of property IDs. */
__u64 props_ptr;
+ /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */
__u64 prop_values_ptr;
+ /** @count_modes: Number of modes. */
__u32 count_modes;
+ /** @count_props: Number of properties. */
__u32 count_props;
+ /** @count_encoders: Number of encoders. */
__u32 count_encoders;
- __u32 encoder_id; /**< Current Encoder */
- __u32 connector_id; /**< Id */
+ /** @encoder_id: Object ID of the current encoder. */
+ __u32 encoder_id;
+ /** @connector_id: Object ID of the connector. */
+ __u32 connector_id;
+ /**
+ * @connector_type: Type of the connector.
+ *
+ * See DRM_MODE_CONNECTOR_* defines.
+ */
__u32 connector_type;
+ /**
+ * @connector_type_id: Type-specific connector number.
+ *
+ * This is not an object ID. This is a per-type connector number. Each
+ * (type, type_id) combination is unique across all connectors of a DRM
+ * device.
+ *
+ * The (type, type_id) combination is not a stable identifier: the
+ * type_id can change depending on the driver probe order.
+ */
__u32 connector_type_id;
+ /**
+ * @connection: Status of the connector.
+ *
+ * See enum drm_connector_status.
+ */
__u32 connection;
- __u32 mm_width; /**< width in millimeters */
- __u32 mm_height; /**< height in millimeters */
+ /** @mm_width: Width of the connected sink in millimeters. */
+ __u32 mm_width;
+ /** @mm_height: Height of the connected sink in millimeters. */
+ __u32 mm_height;
+ /**
+ * @subpixel: Subpixel order of the connected sink.
+ *
+ * See enum subpixel_order.
+ */
__u32 subpixel;
+ /** @pad: Padding, must be zero. */
__u32 pad;
};
@@ -412,26 +540,78 @@ struct drm_mode_get_connector {
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
- * witout being aware that this could be triggering a lengthy modeset.
+ * without being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
+/**
+ * struct drm_mode_property_enum - Description for an enum/bitfield entry.
+ * @value: numeric value for this enum entry.
+ * @name: symbolic name for this enum entry.
+ *
+ * See struct drm_property_enum for details.
+ */
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
+/**
+ * struct drm_mode_get_property - Get property metadata.
+ *
+ * User-space can perform a GETPROPERTY ioctl to retrieve information about a
+ * property. The same property may be attached to multiple objects, see
+ * "Modeset Base Object Abstraction".
+ *
+ * The meaning of the @values_ptr field changes depending on the property type.
+ * See &drm_property.flags for more details.
+ *
+ * The @enum_blob_ptr and @count_enum_blobs fields are only meaningful when the
+ * property has the type &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK. For
+ * backwards compatibility, the kernel will always set @count_enum_blobs to
+ * zero when the property has the type &DRM_MODE_PROP_BLOB. User-space must
+ * ignore these two fields if the property has a different type.
+ *
+ * User-space is expected to retrieve values and enums by performing this ioctl
+ * at least twice: the first time to retrieve the number of elements, the
+ * second time to retrieve the elements themselves.
+ *
+ * To retrieve the number of elements, set @count_values and @count_enum_blobs
+ * to zero, then call the ioctl. @count_values will be updated with the number
+ * of elements. If the property has the type &DRM_MODE_PROP_ENUM or
+ * &DRM_MODE_PROP_BITMASK, @count_enum_blobs will be updated as well.
+ *
+ * To retrieve the elements themselves, allocate an array for @values_ptr and
+ * set @count_values to its capacity. If the property has the type
+ * &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK, allocate an array for
+ * @enum_blob_ptr and set @count_enum_blobs to its capacity. Calling the ioctl
+ * again will fill the arrays.
+ */
struct drm_mode_get_property {
- __u64 values_ptr; /* values and blob lengths */
- __u64 enum_blob_ptr; /* enum and blob id ptrs */
+ /** @values_ptr: Pointer to a ``__u64`` array. */
+ __u64 values_ptr;
+ /** @enum_blob_ptr: Pointer to a struct drm_mode_property_enum array. */
+ __u64 enum_blob_ptr;
+ /**
+ * @prop_id: Object ID of the property which should be retrieved. Set
+ * by the caller.
+ */
__u32 prop_id;
+ /**
+ * @flags: ``DRM_MODE_PROP_*`` bitfield. See &drm_property.flags for
+ * a definition of the flags.
+ */
__u32 flags;
+ /**
+ * @name: Symbolic property name. User-space should use this field to
+ * recognize properties.
+ */
char name[DRM_PROP_NAME_LEN];
+ /** @count_values: Number of elements in @values_ptr. */
__u32 count_values;
- /* This is only used to count enum values, not blobs. The _blobs is
- * simply because of a historical reason, i.e. backwards compat. */
+ /** @count_enum_blobs: Number of elements in @enum_blob_ptr. */
__u32 count_enum_blobs;
};
@@ -484,43 +664,75 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
-#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */
+/**
+ * struct drm_mode_fb_cmd2 - Frame-buffer metadata.
+ *
+ * This struct holds frame-buffer metadata. There are two ways to use it:
+ *
+ * - User-space can fill this struct and perform a &DRM_IOCTL_MODE_ADDFB2
+ * ioctl to register a new frame-buffer. The new frame-buffer object ID will
+ * be set by the kernel in @fb_id.
+ * - User-space can set @fb_id and perform a &DRM_IOCTL_MODE_GETFB2 ioctl to
+ * fetch metadata about an existing frame-buffer.
+ *
+ * In case of planar formats, this struct allows up to 4 buffer objects with
+ * offsets and pitches per plane. The pitch and offset order are dictated by
+ * the format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8-bit Y samples followed by an
+ * interleaved U/V plane containing 8-bit 2x2 subsampled colour difference
+ * samples.
+ *
+ * So it would consist of a Y plane at ``offsets[0]`` and a UV plane at
+ * ``offsets[1]``.
+ *
+ * To accommodate tiled, compressed, etc formats, a modifier can be specified.
+ * For more information see the "Format Modifiers" section. Note that even
+ * though it looks like we have a modifier per-plane, we in fact do not. The
+ * modifier for each plane must be identical. Thus all combinations of
+ * different data layouts for multi-plane formats must be enumerated as
+ * separate modifiers.
+ *
+ * All of the entries in @handles, @pitches, @offsets and @modifier must be
+ * zero when unused. Warning, for @offsets and @modifier zero can't be used to
+ * figure out whether the entry is used or not since it's a valid value (a zero
+ * offset is common, and a zero modifier is &DRM_FORMAT_MOD_LINEAR).
+ */
struct drm_mode_fb_cmd2 {
+ /** @fb_id: Object ID of the frame-buffer. */
__u32 fb_id;
+ /** @width: Width of the frame-buffer. */
__u32 width;
+ /** @height: Height of the frame-buffer. */
__u32 height;
- __u32 pixel_format; /* fourcc code from drm_fourcc.h */
- __u32 flags; /* see above flags */
+ /**
+ * @pixel_format: FourCC format code, see ``DRM_FORMAT_*`` constants in
+ * ``drm_fourcc.h``.
+ */
+ __u32 pixel_format;
+ /**
+ * @flags: Frame-buffer flags (see &DRM_MODE_FB_INTERLACED and
+ * &DRM_MODE_FB_MODIFIERS).
+ */
+ __u32 flags;
- /*
- * In case of planar formats, this ioctl allows up to 4
- * buffer objects with offsets and pitches per plane.
- * The pitch and offset order is dictated by the fourcc,
- * e.g. NV12 (https://fourcc.org/yuv.php#NV12) is described as:
- *
- * YUV 4:2:0 image with a plane of 8 bit Y samples
- * followed by an interleaved U/V plane containing
- * 8 bit 2x2 subsampled colour difference samples.
- *
- * So it would consist of Y as offsets[0] and UV as
- * offsets[1]. Note that offsets[0] will generally
- * be 0 (but this is not required).
- *
- * To accommodate tiled, compressed, etc formats, a
- * modifier can be specified. The default value of zero
- * indicates "native" format as specified by the fourcc.
- * Vendor specific modifier token. Note that even though
- * it looks like we have a modifier per-plane, we in fact
- * do not. The modifier for each plane must be identical.
- * Thus all combinations of different data layouts for
- * multi plane formats must be enumerated as separate
- * modifiers.
+ /**
+ * @handles: GEM buffer handle, one per plane. Set to 0 if the plane is
+ * unused. The same handle can be used for multiple planes.
*/
__u32 handles[4];
- __u32 pitches[4]; /* pitch for each plane */
- __u32 offsets[4]; /* offset of each plane */
- __u64 modifier[4]; /* ie, tiling, compress */
+ /** @pitches: Pitch (aka. stride) in bytes, one per plane. */
+ __u32 pitches[4];
+ /** @offsets: Offset into the buffer in bytes, one per plane. */
+ __u32 offsets[4];
+ /**
+ * @modifier: Format modifier, one per plane. See ``DRM_FORMAT_MOD_*``
+ * constants in ``drm_fourcc.h``. All planes must use the same
+ * modifier. Ignored unless &DRM_MODE_FB_MODIFIERS is set in @flags.
+ */
+ __u64 modifier[4];
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
@@ -625,10 +837,23 @@ struct drm_color_ctm {
/*
* Conversion matrix in S31.32 sign-magnitude
* (not two's complement!) format.
+ *
+ * out matrix in
+ * |R| |0 1 2| |R|
+ * |G| = |3 4 5| x |G|
+ * |B| |6 7 8| |B|
*/
__u64 matrix[9];
};
+struct drm_color_ctm_3x4 {
+ /*
+ * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
+ * (not two's complement!) format.
+ */
+ __u64 matrix[12];
+};
+
struct drm_color_lut {
/*
* Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
@@ -664,23 +889,23 @@ struct hdr_metadata_infoframe {
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
- * @display_primaries.x: X cordinate of color primary.
- * @display_primaries.y: Y cordinate of color primary.
+ * @display_primaries.x: X coordinate of color primary.
+ * @display_primaries.y: Y coordinate of color primary.
*/
struct {
__u16 x, y;
- } display_primaries[3];
+ } display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
- * @white_point.x: X cordinate of whitepoint of color primary.
- * @white_point.y: Y cordinate of whitepoint of color primary.
+ * @white_point.x: X coordinate of whitepoint of color primary.
+ * @white_point.y: Y coordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
- } white_point;
+ } white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
@@ -726,12 +951,40 @@ struct hdr_output_metadata {
};
};
+/**
+ * DRM_MODE_PAGE_FLIP_EVENT
+ *
+ * Request that the kernel sends back a vblank event (see
+ * struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the
+ * page-flip is done.
+ */
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+/**
+ * DRM_MODE_PAGE_FLIP_ASYNC
+ *
+ * Request that the page-flip is performed as soon as possible, ie. with no
+ * delay due to waiting for vblank. This may cause tearing to be visible on
+ * the screen.
+ *
+ * When used with atomic uAPI, the driver will return an error if the hardware
+ * doesn't support performing an asynchronous page-flip for this update.
+ * User-space should handle this, e.g. by falling back to a regular page-flip.
+ *
+ * Note, some hardware might need to perform one last synchronous page-flip
+ * before being able to switch to asynchronous page-flips. As an exception,
+ * the driver will return success even though that first page-flip is not
+ * asynchronous.
+ */
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
+/**
+ * DRM_MODE_PAGE_FLIP_FLAGS
+ *
+ * Bitmask of flags suitable for &drm_mode_crtc_page_flip_target.flags.
+ */
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
DRM_MODE_PAGE_FLIP_ASYNC | \
DRM_MODE_PAGE_FLIP_TARGET)
@@ -796,13 +1049,25 @@ struct drm_mode_crtc_page_flip_target {
__u64 user_data;
};
-/* create a dumb scanout buffer */
+/**
+ * struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout.
+ * @height: buffer height in pixels
+ * @width: buffer width in pixels
+ * @bpp: bits per pixel
+ * @flags: must be zero
+ * @handle: buffer object handle
+ * @pitch: number of bytes between two consecutive lines
+ * @size: size of the whole buffer in bytes
+ *
+ * User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds,
+ * the kernel fills @handle, @pitch and @size.
+ */
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
- /* handle, pitch, size will be returned */
+
__u32 handle;
__u32 pitch;
__u64 size;
@@ -825,11 +1090,53 @@ struct drm_mode_destroy_dumb {
__u32 handle;
};
-/* page-flip flags are valid, plus: */
+/**
+ * DRM_MODE_ATOMIC_TEST_ONLY
+ *
+ * Do not apply the atomic commit, instead check whether the hardware supports
+ * this configuration.
+ *
+ * See &drm_mode_config_funcs.atomic_check for more details on test-only
+ * commits.
+ */
#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
+/**
+ * DRM_MODE_ATOMIC_NONBLOCK
+ *
+ * Do not block while applying the atomic commit. The &DRM_IOCTL_MODE_ATOMIC
+ * IOCTL returns immediately instead of waiting for the changes to be applied
+ * in hardware. Note, the driver will still check that the update can be
+ * applied before retuning.
+ */
#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
+/**
+ * DRM_MODE_ATOMIC_ALLOW_MODESET
+ *
+ * Allow the update to result in temporary or transient visible artifacts while
+ * the update is being applied. Applying the update may also take significantly
+ * more time than a page flip. All visual artifacts will disappear by the time
+ * the update is completed, as signalled through the vblank event's timestamp
+ * (see struct drm_event_vblank).
+ *
+ * This flag must be set when the KMS update might cause visible artifacts.
+ * Without this flag such KMS update will return a EINVAL error. What kind of
+ * update may cause visible artifacts depends on the driver and the hardware.
+ * User-space that needs to know beforehand if an update might cause visible
+ * artifacts can use &DRM_MODE_ATOMIC_TEST_ONLY without
+ * &DRM_MODE_ATOMIC_ALLOW_MODESET to see if it fails.
+ *
+ * To the best of the driver's knowledge, visual artifacts are guaranteed to
+ * not appear when this flag is not set. Some sinks might display visual
+ * artifacts outside of the driver's control.
+ */
#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
+/**
+ * DRM_MODE_ATOMIC_FLAGS
+ *
+ * Bitfield of flags accepted by the &DRM_IOCTL_MODE_ATOMIC IOCTL in
+ * &drm_mode_atomic.flags.
+ */
#define DRM_MODE_ATOMIC_FLAGS (\
DRM_MODE_PAGE_FLIP_EVENT |\
DRM_MODE_PAGE_FLIP_ASYNC |\
@@ -899,26 +1206,31 @@ struct drm_format_modifier {
};
/**
- * struct drm_mode_create_blob - Create New block property
- * @data: Pointer to data to copy.
- * @length: Length of data to copy.
- * @blob_id: new property ID.
+ * struct drm_mode_create_blob - Create New blob property
+ *
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
*/
struct drm_mode_create_blob {
- /** Pointer to data to copy. */
+ /** @data: Pointer to data to copy. */
__u64 data;
- /** Length of data to copy. */
+ /** @length: Length of data to copy. */
__u32 length;
- /** Return: new property ID. */
+ /** @blob_id: Return: new property ID. */
__u32 blob_id;
};
/**
* struct drm_mode_destroy_blob - Destroy user blob
* @blob_id: blob_id to destroy
+ *
* Destroy a user-created blob property.
+ *
+ * User-space can release blobs as soon as they do not need to refer to them by
+ * their blob object ID. For instance, if you are using a MODE_ID blob in an
+ * atomic commit and you will not make another commit re-using the same ID, you
+ * can destroy the blob as soon as the commit has been issued, without waiting
+ * for it to complete.
*/
struct drm_mode_destroy_blob {
__u32 blob_id;
@@ -926,36 +1238,36 @@ struct drm_mode_destroy_blob {
/**
* struct drm_mode_create_lease - Create lease
- * @object_ids: Pointer to array of object ids.
- * @object_count: Number of object ids.
- * @flags: flags for new FD.
- * @lessee_id: unique identifier for lessee.
- * @fd: file descriptor to new drm_master file.
+ *
* Lease mode resources, creating another drm_master.
+ *
+ * The @object_ids array must reference at least one CRTC, one connector and
+ * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively,
+ * the lease can be completely empty.
*/
struct drm_mode_create_lease {
- /** Pointer to array of object ids (__u32) */
+ /** @object_ids: Pointer to array of object ids (__u32) */
__u64 object_ids;
- /** Number of object ids */
+ /** @object_count: Number of object ids */
__u32 object_count;
- /** flags for new FD (O_CLOEXEC, etc) */
+ /** @flags: flags for new FD (O_CLOEXEC, etc) */
__u32 flags;
- /** Return: unique identifier for lessee. */
+ /** @lessee_id: Return: unique identifier for lessee. */
__u32 lessee_id;
- /** Return: file descriptor to new drm_master file */
+ /** @fd: Return: file descriptor to new drm_master file */
__u32 fd;
};
/**
* struct drm_mode_list_lessees - List lessees
- * @count_lessees: Number of lessees.
- * @pad: pad.
- * @lessees_ptr: Pointer to lessess.
- * List lesses from a drm_master
+ *
+ * List lesses from a drm_master.
*/
struct drm_mode_list_lessees {
- /** Number of lessees.
+ /**
+ * @count_lessees: Number of lessees.
+ *
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@@ -963,23 +1275,26 @@ struct drm_mode_list_lessees {
* the size and then the data.
*/
__u32 count_lessees;
+ /** @pad: Padding. */
__u32 pad;
- /** Pointer to lessees.
- * pointer to __u64 array of lessee ids
+ /**
+ * @lessees_ptr: Pointer to lessees.
+ *
+ * Pointer to __u64 array of lessee ids
*/
__u64 lessees_ptr;
};
/**
* struct drm_mode_get_lease - Get Lease
- * @count_objects: Number of leased objects.
- * @pad: pad.
- * @objects_ptr: Pointer to objects.
- * Get leased objects
+ *
+ * Get leased objects.
*/
struct drm_mode_get_lease {
- /** Number of leased objects.
+ /**
+ * @count_objects: Number of leased objects.
+ *
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@@ -987,22 +1302,22 @@ struct drm_mode_get_lease {
* the size and then the data.
*/
__u32 count_objects;
+ /** @pad: Padding. */
__u32 pad;
- /** Pointer to objects.
- * pointer to __u32 array of object ids
+ /**
+ * @objects_ptr: Pointer to objects.
+ *
+ * Pointer to __u32 array of object ids.
*/
__u64 objects_ptr;
};
/**
* struct drm_mode_revoke_lease - Revoke lease
- * @lessee_id: Unique ID of lessee.
- * Revoke lease
*/
struct drm_mode_revoke_lease {
- /** Unique ID of lessee
- */
+ /** @lessee_id: Unique ID of lessee */
__u32 lessee_id;
};
@@ -1025,6 +1340,16 @@ struct drm_mode_rect {
__s32 y2;
};
+/**
+ * struct drm_mode_closefb
+ * @fb_id: Framebuffer ID.
+ * @pad: Must be zero.
+ */
+struct drm_mode_closefb {
+ __u32 fb_id;
+ __u32 pad;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index 09d0df8b71c5..d87410a8443a 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -74,6 +74,14 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
+#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
+#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
+#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
+#define ETNAVIV_PARAM_GPU_NN_CORE_COUNT 0x1f
+#define ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE 0x20
+#define ETNAVIV_PARAM_GPU_TP_CORE_COUNT 0x21
+#define ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE 0x22
+#define ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE 0x23
#define ETNA_MAX_PIPES 4
diff --git a/include/uapi/drm/habanalabs_accel.h b/include/uapi/drm/habanalabs_accel.h
new file mode 100644
index 000000000000..a512dc4cffd0
--- /dev/null
+++ b/include/uapi/drm/habanalabs_accel.h
@@ -0,0 +1,2368 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ *
+ * Copyright 2016-2023 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HABANALABS_H_
+#define HABANALABS_H_
+
+#include <drm/drm.h>
+
+/*
+ * Defines that are asic-specific but constitutes as ABI between kernel driver
+ * and userspace
+ */
+#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
+#define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START 0x80 /* 128 bytes */
+
+/*
+ * 128 SOBs reserved for collective wait
+ * 16 SOBs reserved for sync stream
+ */
+#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 144
+
+/*
+ * 64 monitors reserved for collective wait
+ * 8 monitors reserved for sync stream
+ */
+#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 72
+
+/* Max number of elements in timestamps registration buffers */
+#define TS_MAX_ELEMENTS_NUM (1 << 20) /* 1MB */
+
+/*
+ * Goya queue Numbering
+ *
+ * The external queues (PCI DMA channels) MUST be before the internal queues
+ * and each group (PCI DMA channels and internal) must be contiguous inside
+ * itself but there can be a gap between the two groups (although not
+ * recommended)
+ */
+
+enum goya_queue_id {
+ GOYA_QUEUE_ID_DMA_0 = 0,
+ GOYA_QUEUE_ID_DMA_1 = 1,
+ GOYA_QUEUE_ID_DMA_2 = 2,
+ GOYA_QUEUE_ID_DMA_3 = 3,
+ GOYA_QUEUE_ID_DMA_4 = 4,
+ GOYA_QUEUE_ID_CPU_PQ = 5,
+ GOYA_QUEUE_ID_MME = 6, /* Internal queues start here */
+ GOYA_QUEUE_ID_TPC0 = 7,
+ GOYA_QUEUE_ID_TPC1 = 8,
+ GOYA_QUEUE_ID_TPC2 = 9,
+ GOYA_QUEUE_ID_TPC3 = 10,
+ GOYA_QUEUE_ID_TPC4 = 11,
+ GOYA_QUEUE_ID_TPC5 = 12,
+ GOYA_QUEUE_ID_TPC6 = 13,
+ GOYA_QUEUE_ID_TPC7 = 14,
+ GOYA_QUEUE_ID_SIZE
+};
+
+/*
+ * Gaudi queue Numbering
+ * External queues (PCI DMA channels) are DMA_0_*, DMA_1_* and DMA_5_*.
+ * Except one CPU queue, all the rest are internal queues.
+ */
+
+enum gaudi_queue_id {
+ GAUDI_QUEUE_ID_DMA_0_0 = 0, /* external */
+ GAUDI_QUEUE_ID_DMA_0_1 = 1, /* external */
+ GAUDI_QUEUE_ID_DMA_0_2 = 2, /* external */
+ GAUDI_QUEUE_ID_DMA_0_3 = 3, /* external */
+ GAUDI_QUEUE_ID_DMA_1_0 = 4, /* external */
+ GAUDI_QUEUE_ID_DMA_1_1 = 5, /* external */
+ GAUDI_QUEUE_ID_DMA_1_2 = 6, /* external */
+ GAUDI_QUEUE_ID_DMA_1_3 = 7, /* external */
+ GAUDI_QUEUE_ID_CPU_PQ = 8, /* CPU */
+ GAUDI_QUEUE_ID_DMA_2_0 = 9, /* internal */
+ GAUDI_QUEUE_ID_DMA_2_1 = 10, /* internal */
+ GAUDI_QUEUE_ID_DMA_2_2 = 11, /* internal */
+ GAUDI_QUEUE_ID_DMA_2_3 = 12, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_0 = 13, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_1 = 14, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_2 = 15, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_3 = 16, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_0 = 17, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_1 = 18, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_2 = 19, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_3 = 20, /* internal */
+ GAUDI_QUEUE_ID_DMA_5_0 = 21, /* internal */
+ GAUDI_QUEUE_ID_DMA_5_1 = 22, /* internal */
+ GAUDI_QUEUE_ID_DMA_5_2 = 23, /* internal */
+ GAUDI_QUEUE_ID_DMA_5_3 = 24, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_0 = 25, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_1 = 26, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_2 = 27, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_3 = 28, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_0 = 29, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_1 = 30, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_2 = 31, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_3 = 32, /* internal */
+ GAUDI_QUEUE_ID_MME_0_0 = 33, /* internal */
+ GAUDI_QUEUE_ID_MME_0_1 = 34, /* internal */
+ GAUDI_QUEUE_ID_MME_0_2 = 35, /* internal */
+ GAUDI_QUEUE_ID_MME_0_3 = 36, /* internal */
+ GAUDI_QUEUE_ID_MME_1_0 = 37, /* internal */
+ GAUDI_QUEUE_ID_MME_1_1 = 38, /* internal */
+ GAUDI_QUEUE_ID_MME_1_2 = 39, /* internal */
+ GAUDI_QUEUE_ID_MME_1_3 = 40, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_0 = 41, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_1 = 42, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_2 = 43, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_3 = 44, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_0 = 45, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_1 = 46, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_2 = 47, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_3 = 48, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_0 = 49, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_1 = 50, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_2 = 51, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_3 = 52, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_0 = 53, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_1 = 54, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_2 = 55, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_3 = 56, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_0 = 57, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_1 = 58, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_2 = 59, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_3 = 60, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_0 = 61, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_1 = 62, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_2 = 63, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_3 = 64, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_0 = 65, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_1 = 66, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_2 = 67, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_3 = 68, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_0 = 69, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_1 = 70, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_2 = 71, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_3 = 72, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_0 = 73, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_1 = 74, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_2 = 75, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_3 = 76, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_0 = 77, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_1 = 78, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_2 = 79, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_3 = 80, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_0 = 81, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_1 = 82, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_2 = 83, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_3 = 84, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_0 = 85, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_1 = 86, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_2 = 87, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_3 = 88, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_0 = 89, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_1 = 90, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_2 = 91, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_3 = 92, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_0 = 93, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_1 = 94, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_2 = 95, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_3 = 96, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_0 = 97, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_1 = 98, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_2 = 99, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_3 = 100, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_0 = 101, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_1 = 102, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_2 = 103, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_3 = 104, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_0 = 105, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_1 = 106, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_2 = 107, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_3 = 108, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_0 = 109, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_1 = 110, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_2 = 111, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_3 = 112, /* internal */
+ GAUDI_QUEUE_ID_SIZE
+};
+
+/*
+ * In GAUDI2 we have two modes of operation in regard to queues:
+ * 1. Legacy mode, where each QMAN exposes 4 streams to the user
+ * 2. F/W mode, where we use F/W to schedule the JOBS to the different queues.
+ *
+ * When in legacy mode, the user sends the queue id per JOB according to
+ * enum gaudi2_queue_id below.
+ *
+ * When in F/W mode, the user sends a stream id per Command Submission. The
+ * stream id is a running number from 0 up to (N-1), where N is the number
+ * of streams the F/W exposes and is passed to the user in
+ * struct hl_info_hw_ip_info
+ */
+
+enum gaudi2_queue_id {
+ GAUDI2_QUEUE_ID_PDMA_0_0 = 0,
+ GAUDI2_QUEUE_ID_PDMA_0_1 = 1,
+ GAUDI2_QUEUE_ID_PDMA_0_2 = 2,
+ GAUDI2_QUEUE_ID_PDMA_0_3 = 3,
+ GAUDI2_QUEUE_ID_PDMA_1_0 = 4,
+ GAUDI2_QUEUE_ID_PDMA_1_1 = 5,
+ GAUDI2_QUEUE_ID_PDMA_1_2 = 6,
+ GAUDI2_QUEUE_ID_PDMA_1_3 = 7,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0 = 8,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1 = 9,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2 = 10,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3 = 11,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0 = 12,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1 = 13,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2 = 14,
+ GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3 = 15,
+ GAUDI2_QUEUE_ID_DCORE0_MME_0_0 = 16,
+ GAUDI2_QUEUE_ID_DCORE0_MME_0_1 = 17,
+ GAUDI2_QUEUE_ID_DCORE0_MME_0_2 = 18,
+ GAUDI2_QUEUE_ID_DCORE0_MME_0_3 = 19,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 = 20,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_0_1 = 21,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_0_2 = 22,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_0_3 = 23,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_1_0 = 24,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_1_1 = 25,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_1_2 = 26,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_1_3 = 27,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_2_0 = 28,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_2_1 = 29,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_2_2 = 30,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_2_3 = 31,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_3_0 = 32,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_3_1 = 33,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_3_2 = 34,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_3_3 = 35,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_4_0 = 36,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_4_1 = 37,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_4_2 = 38,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_4_3 = 39,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_5_0 = 40,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_5_1 = 41,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_5_2 = 42,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_5_3 = 43,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_6_0 = 44,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_6_1 = 45,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_6_2 = 46,
+ GAUDI2_QUEUE_ID_DCORE0_TPC_6_3 = 47,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0 = 48,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1 = 49,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2 = 50,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3 = 51,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0 = 52,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1 = 53,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2 = 54,
+ GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3 = 55,
+ GAUDI2_QUEUE_ID_DCORE1_MME_0_0 = 56,
+ GAUDI2_QUEUE_ID_DCORE1_MME_0_1 = 57,
+ GAUDI2_QUEUE_ID_DCORE1_MME_0_2 = 58,
+ GAUDI2_QUEUE_ID_DCORE1_MME_0_3 = 59,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 = 60,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_0_1 = 61,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_0_2 = 62,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_0_3 = 63,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_1_0 = 64,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_1_1 = 65,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_1_2 = 66,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_1_3 = 67,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_2_0 = 68,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_2_1 = 69,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_2_2 = 70,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_2_3 = 71,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_3_0 = 72,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_3_1 = 73,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_3_2 = 74,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_3_3 = 75,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_4_0 = 76,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_4_1 = 77,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_4_2 = 78,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_4_3 = 79,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_5_0 = 80,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_5_1 = 81,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_5_2 = 82,
+ GAUDI2_QUEUE_ID_DCORE1_TPC_5_3 = 83,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0 = 84,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1 = 85,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2 = 86,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3 = 87,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0 = 88,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1 = 89,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2 = 90,
+ GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3 = 91,
+ GAUDI2_QUEUE_ID_DCORE2_MME_0_0 = 92,
+ GAUDI2_QUEUE_ID_DCORE2_MME_0_1 = 93,
+ GAUDI2_QUEUE_ID_DCORE2_MME_0_2 = 94,
+ GAUDI2_QUEUE_ID_DCORE2_MME_0_3 = 95,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 = 96,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_0_1 = 97,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_0_2 = 98,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_0_3 = 99,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_1_0 = 100,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_1_1 = 101,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_1_2 = 102,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_1_3 = 103,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_2_0 = 104,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_2_1 = 105,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_2_2 = 106,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_2_3 = 107,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_3_0 = 108,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_3_1 = 109,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_3_2 = 110,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_3_3 = 111,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_4_0 = 112,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_4_1 = 113,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_4_2 = 114,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_4_3 = 115,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_5_0 = 116,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_5_1 = 117,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_5_2 = 118,
+ GAUDI2_QUEUE_ID_DCORE2_TPC_5_3 = 119,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0 = 120,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1 = 121,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2 = 122,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3 = 123,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0 = 124,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1 = 125,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2 = 126,
+ GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3 = 127,
+ GAUDI2_QUEUE_ID_DCORE3_MME_0_0 = 128,
+ GAUDI2_QUEUE_ID_DCORE3_MME_0_1 = 129,
+ GAUDI2_QUEUE_ID_DCORE3_MME_0_2 = 130,
+ GAUDI2_QUEUE_ID_DCORE3_MME_0_3 = 131,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 = 132,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_0_1 = 133,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_0_2 = 134,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_0_3 = 135,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_1_0 = 136,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_1_1 = 137,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_1_2 = 138,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_1_3 = 139,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_2_0 = 140,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_2_1 = 141,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_2_2 = 142,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_2_3 = 143,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_3_0 = 144,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_3_1 = 145,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_3_2 = 146,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_3_3 = 147,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_4_0 = 148,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_4_1 = 149,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_4_2 = 150,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_4_3 = 151,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_5_0 = 152,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_5_1 = 153,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_5_2 = 154,
+ GAUDI2_QUEUE_ID_DCORE3_TPC_5_3 = 155,
+ GAUDI2_QUEUE_ID_NIC_0_0 = 156,
+ GAUDI2_QUEUE_ID_NIC_0_1 = 157,
+ GAUDI2_QUEUE_ID_NIC_0_2 = 158,
+ GAUDI2_QUEUE_ID_NIC_0_3 = 159,
+ GAUDI2_QUEUE_ID_NIC_1_0 = 160,
+ GAUDI2_QUEUE_ID_NIC_1_1 = 161,
+ GAUDI2_QUEUE_ID_NIC_1_2 = 162,
+ GAUDI2_QUEUE_ID_NIC_1_3 = 163,
+ GAUDI2_QUEUE_ID_NIC_2_0 = 164,
+ GAUDI2_QUEUE_ID_NIC_2_1 = 165,
+ GAUDI2_QUEUE_ID_NIC_2_2 = 166,
+ GAUDI2_QUEUE_ID_NIC_2_3 = 167,
+ GAUDI2_QUEUE_ID_NIC_3_0 = 168,
+ GAUDI2_QUEUE_ID_NIC_3_1 = 169,
+ GAUDI2_QUEUE_ID_NIC_3_2 = 170,
+ GAUDI2_QUEUE_ID_NIC_3_3 = 171,
+ GAUDI2_QUEUE_ID_NIC_4_0 = 172,
+ GAUDI2_QUEUE_ID_NIC_4_1 = 173,
+ GAUDI2_QUEUE_ID_NIC_4_2 = 174,
+ GAUDI2_QUEUE_ID_NIC_4_3 = 175,
+ GAUDI2_QUEUE_ID_NIC_5_0 = 176,
+ GAUDI2_QUEUE_ID_NIC_5_1 = 177,
+ GAUDI2_QUEUE_ID_NIC_5_2 = 178,
+ GAUDI2_QUEUE_ID_NIC_5_3 = 179,
+ GAUDI2_QUEUE_ID_NIC_6_0 = 180,
+ GAUDI2_QUEUE_ID_NIC_6_1 = 181,
+ GAUDI2_QUEUE_ID_NIC_6_2 = 182,
+ GAUDI2_QUEUE_ID_NIC_6_3 = 183,
+ GAUDI2_QUEUE_ID_NIC_7_0 = 184,
+ GAUDI2_QUEUE_ID_NIC_7_1 = 185,
+ GAUDI2_QUEUE_ID_NIC_7_2 = 186,
+ GAUDI2_QUEUE_ID_NIC_7_3 = 187,
+ GAUDI2_QUEUE_ID_NIC_8_0 = 188,
+ GAUDI2_QUEUE_ID_NIC_8_1 = 189,
+ GAUDI2_QUEUE_ID_NIC_8_2 = 190,
+ GAUDI2_QUEUE_ID_NIC_8_3 = 191,
+ GAUDI2_QUEUE_ID_NIC_9_0 = 192,
+ GAUDI2_QUEUE_ID_NIC_9_1 = 193,
+ GAUDI2_QUEUE_ID_NIC_9_2 = 194,
+ GAUDI2_QUEUE_ID_NIC_9_3 = 195,
+ GAUDI2_QUEUE_ID_NIC_10_0 = 196,
+ GAUDI2_QUEUE_ID_NIC_10_1 = 197,
+ GAUDI2_QUEUE_ID_NIC_10_2 = 198,
+ GAUDI2_QUEUE_ID_NIC_10_3 = 199,
+ GAUDI2_QUEUE_ID_NIC_11_0 = 200,
+ GAUDI2_QUEUE_ID_NIC_11_1 = 201,
+ GAUDI2_QUEUE_ID_NIC_11_2 = 202,
+ GAUDI2_QUEUE_ID_NIC_11_3 = 203,
+ GAUDI2_QUEUE_ID_NIC_12_0 = 204,
+ GAUDI2_QUEUE_ID_NIC_12_1 = 205,
+ GAUDI2_QUEUE_ID_NIC_12_2 = 206,
+ GAUDI2_QUEUE_ID_NIC_12_3 = 207,
+ GAUDI2_QUEUE_ID_NIC_13_0 = 208,
+ GAUDI2_QUEUE_ID_NIC_13_1 = 209,
+ GAUDI2_QUEUE_ID_NIC_13_2 = 210,
+ GAUDI2_QUEUE_ID_NIC_13_3 = 211,
+ GAUDI2_QUEUE_ID_NIC_14_0 = 212,
+ GAUDI2_QUEUE_ID_NIC_14_1 = 213,
+ GAUDI2_QUEUE_ID_NIC_14_2 = 214,
+ GAUDI2_QUEUE_ID_NIC_14_3 = 215,
+ GAUDI2_QUEUE_ID_NIC_15_0 = 216,
+ GAUDI2_QUEUE_ID_NIC_15_1 = 217,
+ GAUDI2_QUEUE_ID_NIC_15_2 = 218,
+ GAUDI2_QUEUE_ID_NIC_15_3 = 219,
+ GAUDI2_QUEUE_ID_NIC_16_0 = 220,
+ GAUDI2_QUEUE_ID_NIC_16_1 = 221,
+ GAUDI2_QUEUE_ID_NIC_16_2 = 222,
+ GAUDI2_QUEUE_ID_NIC_16_3 = 223,
+ GAUDI2_QUEUE_ID_NIC_17_0 = 224,
+ GAUDI2_QUEUE_ID_NIC_17_1 = 225,
+ GAUDI2_QUEUE_ID_NIC_17_2 = 226,
+ GAUDI2_QUEUE_ID_NIC_17_3 = 227,
+ GAUDI2_QUEUE_ID_NIC_18_0 = 228,
+ GAUDI2_QUEUE_ID_NIC_18_1 = 229,
+ GAUDI2_QUEUE_ID_NIC_18_2 = 230,
+ GAUDI2_QUEUE_ID_NIC_18_3 = 231,
+ GAUDI2_QUEUE_ID_NIC_19_0 = 232,
+ GAUDI2_QUEUE_ID_NIC_19_1 = 233,
+ GAUDI2_QUEUE_ID_NIC_19_2 = 234,
+ GAUDI2_QUEUE_ID_NIC_19_3 = 235,
+ GAUDI2_QUEUE_ID_NIC_20_0 = 236,
+ GAUDI2_QUEUE_ID_NIC_20_1 = 237,
+ GAUDI2_QUEUE_ID_NIC_20_2 = 238,
+ GAUDI2_QUEUE_ID_NIC_20_3 = 239,
+ GAUDI2_QUEUE_ID_NIC_21_0 = 240,
+ GAUDI2_QUEUE_ID_NIC_21_1 = 241,
+ GAUDI2_QUEUE_ID_NIC_21_2 = 242,
+ GAUDI2_QUEUE_ID_NIC_21_3 = 243,
+ GAUDI2_QUEUE_ID_NIC_22_0 = 244,
+ GAUDI2_QUEUE_ID_NIC_22_1 = 245,
+ GAUDI2_QUEUE_ID_NIC_22_2 = 246,
+ GAUDI2_QUEUE_ID_NIC_22_3 = 247,
+ GAUDI2_QUEUE_ID_NIC_23_0 = 248,
+ GAUDI2_QUEUE_ID_NIC_23_1 = 249,
+ GAUDI2_QUEUE_ID_NIC_23_2 = 250,
+ GAUDI2_QUEUE_ID_NIC_23_3 = 251,
+ GAUDI2_QUEUE_ID_ROT_0_0 = 252,
+ GAUDI2_QUEUE_ID_ROT_0_1 = 253,
+ GAUDI2_QUEUE_ID_ROT_0_2 = 254,
+ GAUDI2_QUEUE_ID_ROT_0_3 = 255,
+ GAUDI2_QUEUE_ID_ROT_1_0 = 256,
+ GAUDI2_QUEUE_ID_ROT_1_1 = 257,
+ GAUDI2_QUEUE_ID_ROT_1_2 = 258,
+ GAUDI2_QUEUE_ID_ROT_1_3 = 259,
+ GAUDI2_QUEUE_ID_CPU_PQ = 260,
+ GAUDI2_QUEUE_ID_SIZE
+};
+
+/*
+ * Engine Numbering
+ *
+ * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle'
+ */
+
+enum goya_engine_id {
+ GOYA_ENGINE_ID_DMA_0 = 0,
+ GOYA_ENGINE_ID_DMA_1,
+ GOYA_ENGINE_ID_DMA_2,
+ GOYA_ENGINE_ID_DMA_3,
+ GOYA_ENGINE_ID_DMA_4,
+ GOYA_ENGINE_ID_MME_0,
+ GOYA_ENGINE_ID_TPC_0,
+ GOYA_ENGINE_ID_TPC_1,
+ GOYA_ENGINE_ID_TPC_2,
+ GOYA_ENGINE_ID_TPC_3,
+ GOYA_ENGINE_ID_TPC_4,
+ GOYA_ENGINE_ID_TPC_5,
+ GOYA_ENGINE_ID_TPC_6,
+ GOYA_ENGINE_ID_TPC_7,
+ GOYA_ENGINE_ID_SIZE
+};
+
+enum gaudi_engine_id {
+ GAUDI_ENGINE_ID_DMA_0 = 0,
+ GAUDI_ENGINE_ID_DMA_1,
+ GAUDI_ENGINE_ID_DMA_2,
+ GAUDI_ENGINE_ID_DMA_3,
+ GAUDI_ENGINE_ID_DMA_4,
+ GAUDI_ENGINE_ID_DMA_5,
+ GAUDI_ENGINE_ID_DMA_6,
+ GAUDI_ENGINE_ID_DMA_7,
+ GAUDI_ENGINE_ID_MME_0,
+ GAUDI_ENGINE_ID_MME_1,
+ GAUDI_ENGINE_ID_MME_2,
+ GAUDI_ENGINE_ID_MME_3,
+ GAUDI_ENGINE_ID_TPC_0,
+ GAUDI_ENGINE_ID_TPC_1,
+ GAUDI_ENGINE_ID_TPC_2,
+ GAUDI_ENGINE_ID_TPC_3,
+ GAUDI_ENGINE_ID_TPC_4,
+ GAUDI_ENGINE_ID_TPC_5,
+ GAUDI_ENGINE_ID_TPC_6,
+ GAUDI_ENGINE_ID_TPC_7,
+ GAUDI_ENGINE_ID_NIC_0,
+ GAUDI_ENGINE_ID_NIC_1,
+ GAUDI_ENGINE_ID_NIC_2,
+ GAUDI_ENGINE_ID_NIC_3,
+ GAUDI_ENGINE_ID_NIC_4,
+ GAUDI_ENGINE_ID_NIC_5,
+ GAUDI_ENGINE_ID_NIC_6,
+ GAUDI_ENGINE_ID_NIC_7,
+ GAUDI_ENGINE_ID_NIC_8,
+ GAUDI_ENGINE_ID_NIC_9,
+ GAUDI_ENGINE_ID_SIZE
+};
+
+enum gaudi2_engine_id {
+ GAUDI2_DCORE0_ENGINE_ID_EDMA_0 = 0,
+ GAUDI2_DCORE0_ENGINE_ID_EDMA_1,
+ GAUDI2_DCORE0_ENGINE_ID_MME,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_0,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_1,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_2,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_3,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_4,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_5,
+ GAUDI2_DCORE0_ENGINE_ID_DEC_0,
+ GAUDI2_DCORE0_ENGINE_ID_DEC_1,
+ GAUDI2_DCORE1_ENGINE_ID_EDMA_0,
+ GAUDI2_DCORE1_ENGINE_ID_EDMA_1,
+ GAUDI2_DCORE1_ENGINE_ID_MME,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_0,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_1,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_2,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_3,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_4,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_5,
+ GAUDI2_DCORE1_ENGINE_ID_DEC_0,
+ GAUDI2_DCORE1_ENGINE_ID_DEC_1,
+ GAUDI2_DCORE2_ENGINE_ID_EDMA_0,
+ GAUDI2_DCORE2_ENGINE_ID_EDMA_1,
+ GAUDI2_DCORE2_ENGINE_ID_MME,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_0,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_1,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_2,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_3,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_4,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_5,
+ GAUDI2_DCORE2_ENGINE_ID_DEC_0,
+ GAUDI2_DCORE2_ENGINE_ID_DEC_1,
+ GAUDI2_DCORE3_ENGINE_ID_EDMA_0,
+ GAUDI2_DCORE3_ENGINE_ID_EDMA_1,
+ GAUDI2_DCORE3_ENGINE_ID_MME,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_0,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_1,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_2,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_3,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_4,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_5,
+ GAUDI2_DCORE3_ENGINE_ID_DEC_0,
+ GAUDI2_DCORE3_ENGINE_ID_DEC_1,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_6,
+ GAUDI2_ENGINE_ID_PDMA_0,
+ GAUDI2_ENGINE_ID_PDMA_1,
+ GAUDI2_ENGINE_ID_ROT_0,
+ GAUDI2_ENGINE_ID_ROT_1,
+ GAUDI2_PCIE_ENGINE_ID_DEC_0,
+ GAUDI2_PCIE_ENGINE_ID_DEC_1,
+ GAUDI2_ENGINE_ID_NIC0_0,
+ GAUDI2_ENGINE_ID_NIC0_1,
+ GAUDI2_ENGINE_ID_NIC1_0,
+ GAUDI2_ENGINE_ID_NIC1_1,
+ GAUDI2_ENGINE_ID_NIC2_0,
+ GAUDI2_ENGINE_ID_NIC2_1,
+ GAUDI2_ENGINE_ID_NIC3_0,
+ GAUDI2_ENGINE_ID_NIC3_1,
+ GAUDI2_ENGINE_ID_NIC4_0,
+ GAUDI2_ENGINE_ID_NIC4_1,
+ GAUDI2_ENGINE_ID_NIC5_0,
+ GAUDI2_ENGINE_ID_NIC5_1,
+ GAUDI2_ENGINE_ID_NIC6_0,
+ GAUDI2_ENGINE_ID_NIC6_1,
+ GAUDI2_ENGINE_ID_NIC7_0,
+ GAUDI2_ENGINE_ID_NIC7_1,
+ GAUDI2_ENGINE_ID_NIC8_0,
+ GAUDI2_ENGINE_ID_NIC8_1,
+ GAUDI2_ENGINE_ID_NIC9_0,
+ GAUDI2_ENGINE_ID_NIC9_1,
+ GAUDI2_ENGINE_ID_NIC10_0,
+ GAUDI2_ENGINE_ID_NIC10_1,
+ GAUDI2_ENGINE_ID_NIC11_0,
+ GAUDI2_ENGINE_ID_NIC11_1,
+ GAUDI2_ENGINE_ID_PCIE,
+ GAUDI2_ENGINE_ID_PSOC,
+ GAUDI2_ENGINE_ID_ARC_FARM,
+ GAUDI2_ENGINE_ID_KDMA,
+ GAUDI2_ENGINE_ID_SIZE
+};
+
+/*
+ * ASIC specific PLL index
+ *
+ * Used to retrieve in frequency info of different IPs via HL_INFO_PLL_FREQUENCY under
+ * DRM_IOCTL_HL_INFO IOCTL.
+ * The enums need to be used as an index in struct hl_pll_frequency_info.
+ */
+
+enum hl_goya_pll_index {
+ HL_GOYA_CPU_PLL = 0,
+ HL_GOYA_IC_PLL,
+ HL_GOYA_MC_PLL,
+ HL_GOYA_MME_PLL,
+ HL_GOYA_PCI_PLL,
+ HL_GOYA_EMMC_PLL,
+ HL_GOYA_TPC_PLL,
+ HL_GOYA_PLL_MAX
+};
+
+enum hl_gaudi_pll_index {
+ HL_GAUDI_CPU_PLL = 0,
+ HL_GAUDI_PCI_PLL,
+ HL_GAUDI_SRAM_PLL,
+ HL_GAUDI_HBM_PLL,
+ HL_GAUDI_NIC_PLL,
+ HL_GAUDI_DMA_PLL,
+ HL_GAUDI_MESH_PLL,
+ HL_GAUDI_MME_PLL,
+ HL_GAUDI_TPC_PLL,
+ HL_GAUDI_IF_PLL,
+ HL_GAUDI_PLL_MAX
+};
+
+enum hl_gaudi2_pll_index {
+ HL_GAUDI2_CPU_PLL = 0,
+ HL_GAUDI2_PCI_PLL,
+ HL_GAUDI2_SRAM_PLL,
+ HL_GAUDI2_HBM_PLL,
+ HL_GAUDI2_NIC_PLL,
+ HL_GAUDI2_DMA_PLL,
+ HL_GAUDI2_MESH_PLL,
+ HL_GAUDI2_MME_PLL,
+ HL_GAUDI2_TPC_PLL,
+ HL_GAUDI2_IF_PLL,
+ HL_GAUDI2_VID_PLL,
+ HL_GAUDI2_MSS_PLL,
+ HL_GAUDI2_PLL_MAX
+};
+
+/**
+ * enum hl_goya_dma_direction - Direction of DMA operation inside a LIN_DMA packet that is
+ * submitted to the GOYA's DMA QMAN. This attribute is not relevant
+ * to the H/W but the kernel driver use it to parse the packet's
+ * addresses and patch/validate them.
+ * @HL_DMA_HOST_TO_DRAM: DMA operation from Host memory to GOYA's DDR.
+ * @HL_DMA_HOST_TO_SRAM: DMA operation from Host memory to GOYA's SRAM.
+ * @HL_DMA_DRAM_TO_SRAM: DMA operation from GOYA's DDR to GOYA's SRAM.
+ * @HL_DMA_SRAM_TO_DRAM: DMA operation from GOYA's SRAM to GOYA's DDR.
+ * @HL_DMA_SRAM_TO_HOST: DMA operation from GOYA's SRAM to Host memory.
+ * @HL_DMA_DRAM_TO_HOST: DMA operation from GOYA's DDR to Host memory.
+ * @HL_DMA_DRAM_TO_DRAM: DMA operation from GOYA's DDR to GOYA's DDR.
+ * @HL_DMA_SRAM_TO_SRAM: DMA operation from GOYA's SRAM to GOYA's SRAM.
+ * @HL_DMA_ENUM_MAX: number of values in enum
+ */
+enum hl_goya_dma_direction {
+ HL_DMA_HOST_TO_DRAM,
+ HL_DMA_HOST_TO_SRAM,
+ HL_DMA_DRAM_TO_SRAM,
+ HL_DMA_SRAM_TO_DRAM,
+ HL_DMA_SRAM_TO_HOST,
+ HL_DMA_DRAM_TO_HOST,
+ HL_DMA_DRAM_TO_DRAM,
+ HL_DMA_SRAM_TO_SRAM,
+ HL_DMA_ENUM_MAX
+};
+
+/**
+ * enum hl_device_status - Device status information.
+ * @HL_DEVICE_STATUS_OPERATIONAL: Device is operational.
+ * @HL_DEVICE_STATUS_IN_RESET: Device is currently during reset.
+ * @HL_DEVICE_STATUS_MALFUNCTION: Device is unusable.
+ * @HL_DEVICE_STATUS_NEEDS_RESET: Device needs reset because auto reset was disabled.
+ * @HL_DEVICE_STATUS_IN_DEVICE_CREATION: Device is operational but its creation is still in
+ * progress.
+ * @HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE: Device is currently during reset that was
+ * triggered because the user released the device
+ * @HL_DEVICE_STATUS_LAST: Last status.
+ */
+enum hl_device_status {
+ HL_DEVICE_STATUS_OPERATIONAL,
+ HL_DEVICE_STATUS_IN_RESET,
+ HL_DEVICE_STATUS_MALFUNCTION,
+ HL_DEVICE_STATUS_NEEDS_RESET,
+ HL_DEVICE_STATUS_IN_DEVICE_CREATION,
+ HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE,
+ HL_DEVICE_STATUS_LAST = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE
+};
+
+enum hl_server_type {
+ HL_SERVER_TYPE_UNKNOWN = 0,
+ HL_SERVER_GAUDI_HLS1 = 1,
+ HL_SERVER_GAUDI_HLS1H = 2,
+ HL_SERVER_GAUDI_TYPE1 = 3,
+ HL_SERVER_GAUDI_TYPE2 = 4,
+ HL_SERVER_GAUDI2_HLS2 = 5,
+ HL_SERVER_GAUDI2_TYPE1 = 7
+};
+
+/*
+ * Notifier event values - for the notification mechanism and the HL_INFO_GET_EVENTS command
+ *
+ * HL_NOTIFIER_EVENT_TPC_ASSERT - Indicates TPC assert event
+ * HL_NOTIFIER_EVENT_UNDEFINED_OPCODE - Indicates undefined operation code
+ * HL_NOTIFIER_EVENT_DEVICE_RESET - Indicates device requires a reset
+ * HL_NOTIFIER_EVENT_CS_TIMEOUT - Indicates CS timeout error
+ * HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE - Indicates device is unavailable
+ * HL_NOTIFIER_EVENT_USER_ENGINE_ERR - Indicates device engine in error state
+ * HL_NOTIFIER_EVENT_GENERAL_HW_ERR - Indicates device HW error
+ * HL_NOTIFIER_EVENT_RAZWI - Indicates razwi happened
+ * HL_NOTIFIER_EVENT_PAGE_FAULT - Indicates page fault happened
+ * HL_NOTIFIER_EVENT_CRITICAL_HW_ERR - Indicates a HW error that requires SW abort and
+ * HW reset
+ * HL_NOTIFIER_EVENT_CRITICAL_FW_ERR - Indicates a FW error that requires SW abort and
+ * HW reset
+ */
+#define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0)
+#define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1)
+#define HL_NOTIFIER_EVENT_DEVICE_RESET (1ULL << 2)
+#define HL_NOTIFIER_EVENT_CS_TIMEOUT (1ULL << 3)
+#define HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE (1ULL << 4)
+#define HL_NOTIFIER_EVENT_USER_ENGINE_ERR (1ULL << 5)
+#define HL_NOTIFIER_EVENT_GENERAL_HW_ERR (1ULL << 6)
+#define HL_NOTIFIER_EVENT_RAZWI (1ULL << 7)
+#define HL_NOTIFIER_EVENT_PAGE_FAULT (1ULL << 8)
+#define HL_NOTIFIER_EVENT_CRITICL_HW_ERR (1ULL << 9)
+#define HL_NOTIFIER_EVENT_CRITICL_FW_ERR (1ULL << 10)
+
+/* Opcode for management ioctl
+ *
+ * HW_IP_INFO - Receive information about different IP blocks in the
+ * device.
+ * HL_INFO_HW_EVENTS - Receive an array describing how many times each event
+ * occurred since the last hard reset.
+ * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the
+ * specific context. This is relevant only for devices
+ * where the dram is managed by the kernel driver
+ * HL_INFO_HW_IDLE - Retrieve information about the idle status of each
+ * internal engine.
+ * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't
+ * require an open context.
+ * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
+ * over the last period specified by the user.
+ * The period can be between 100ms to 1s, in
+ * resolution of 100ms. The return value is a
+ * percentage of the utilization rate.
+ * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each
+ * event occurred since the driver was loaded.
+ * HL_INFO_CLK_RATE - Retrieve the current and maximum clock rate
+ * of the device in MHz. The maximum clock rate is
+ * configurable via sysfs parameter
+ * HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset
+ * operations performed on the device since the last
+ * time the driver was loaded.
+ * HL_INFO_TIME_SYNC - Retrieve the device's time alongside the host's time
+ * for synchronization.
+ * HL_INFO_CS_COUNTERS - Retrieve command submission counters
+ * HL_INFO_PCI_COUNTERS - Retrieve PCI counters
+ * HL_INFO_CLK_THROTTLE_REASON - Retrieve clock throttling reason
+ * HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore
+ * HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption
+ * HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency
+ * HL_INFO_POWER - Retrieve power information
+ * HL_INFO_OPEN_STATS - Retrieve info regarding recent device open calls
+ * HL_INFO_DRAM_REPLACED_ROWS - Retrieve DRAM replaced rows info
+ * HL_INFO_DRAM_PENDING_ROWS - Retrieve DRAM pending rows num
+ * HL_INFO_LAST_ERR_OPEN_DEV_TIME - Retrieve timestamp of the last time the device was opened
+ * and CS timeout or razwi error occurred.
+ * HL_INFO_CS_TIMEOUT_EVENT - Retrieve CS timeout timestamp and its related CS sequence number.
+ * HL_INFO_RAZWI_EVENT - Retrieve parameters of razwi:
+ * Timestamp of razwi.
+ * The address which accessing it caused the razwi.
+ * Razwi initiator.
+ * Razwi cause, was it a page fault or MMU access error.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
+ * HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation
+ * HL_INFO_SECURED_ATTESTATION - Retrieve attestation report of the boot.
+ * HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications.
+ * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
+ * HL_INFO_GET_EVENTS - Retrieve the last occurred events
+ * HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
+ * HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic.
+ * HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
+ * HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event.
+ * HL_INFO_FW_GENERIC_REQ - Send generic request to FW.
+ * HL_INFO_HW_ERR_EVENT - Retrieve information on the reported HW error.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
+ * HL_INFO_FW_ERR_EVENT - Retrieve information on the reported FW error.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
+ * HL_INFO_USER_ENGINE_ERR_EVENT - Retrieve the last engine id that reported an error.
+ */
+#define HL_INFO_HW_IP_INFO 0
+#define HL_INFO_HW_EVENTS 1
+#define HL_INFO_DRAM_USAGE 2
+#define HL_INFO_HW_IDLE 3
+#define HL_INFO_DEVICE_STATUS 4
+#define HL_INFO_DEVICE_UTILIZATION 6
+#define HL_INFO_HW_EVENTS_AGGREGATE 7
+#define HL_INFO_CLK_RATE 8
+#define HL_INFO_RESET_COUNT 9
+#define HL_INFO_TIME_SYNC 10
+#define HL_INFO_CS_COUNTERS 11
+#define HL_INFO_PCI_COUNTERS 12
+#define HL_INFO_CLK_THROTTLE_REASON 13
+#define HL_INFO_SYNC_MANAGER 14
+#define HL_INFO_TOTAL_ENERGY 15
+#define HL_INFO_PLL_FREQUENCY 16
+#define HL_INFO_POWER 17
+#define HL_INFO_OPEN_STATS 18
+#define HL_INFO_DRAM_REPLACED_ROWS 21
+#define HL_INFO_DRAM_PENDING_ROWS 22
+#define HL_INFO_LAST_ERR_OPEN_DEV_TIME 23
+#define HL_INFO_CS_TIMEOUT_EVENT 24
+#define HL_INFO_RAZWI_EVENT 25
+#define HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES 26
+#define HL_INFO_SECURED_ATTESTATION 27
+#define HL_INFO_REGISTER_EVENTFD 28
+#define HL_INFO_UNREGISTER_EVENTFD 29
+#define HL_INFO_GET_EVENTS 30
+#define HL_INFO_UNDEFINED_OPCODE_EVENT 31
+#define HL_INFO_ENGINE_STATUS 32
+#define HL_INFO_PAGE_FAULT_EVENT 33
+#define HL_INFO_USER_MAPPINGS 34
+#define HL_INFO_FW_GENERIC_REQ 35
+#define HL_INFO_HW_ERR_EVENT 36
+#define HL_INFO_FW_ERR_EVENT 37
+#define HL_INFO_USER_ENGINE_ERR_EVENT 38
+#define HL_INFO_DEV_SIGNED 40
+
+#define HL_INFO_VERSION_MAX_LEN 128
+#define HL_INFO_CARD_NAME_MAX_LEN 16
+
+/* Maximum buffer size for retrieving engines status */
+#define HL_ENGINES_DATA_MAX_SIZE SZ_1M
+
+/**
+ * struct hl_info_hw_ip_info - hardware information on various IPs in the ASIC
+ * @sram_base_address: The first SRAM physical base address that is free to be
+ * used by the user.
+ * @dram_base_address: The first DRAM virtual or physical base address that is
+ * free to be used by the user.
+ * @dram_size: The DRAM size that is available to the user.
+ * @sram_size: The SRAM size that is available to the user.
+ * @num_of_events: The number of events that can be received from the f/w. This
+ * is needed so the user can what is the size of the h/w events
+ * array he needs to pass to the kernel when he wants to fetch
+ * the event counters.
+ * @device_id: PCI device ID of the ASIC.
+ * @module_id: Module ID of the ASIC for mezzanine cards in servers
+ * (From OCP spec).
+ * @decoder_enabled_mask: Bit-mask that represents which decoders are enabled.
+ * @first_available_interrupt_id: The first available interrupt ID for the user
+ * to be used when it works with user interrupts.
+ * Relevant for Gaudi2 and later.
+ * @server_type: Server type that the Gaudi ASIC is currently installed in.
+ * The value is according to enum hl_server_type
+ * @cpld_version: CPLD version on the board.
+ * @psoc_pci_pll_nr: PCI PLL NR value. Needed by the profiler in some ASICs.
+ * @psoc_pci_pll_nf: PCI PLL NF value. Needed by the profiler in some ASICs.
+ * @psoc_pci_pll_od: PCI PLL OD value. Needed by the profiler in some ASICs.
+ * @psoc_pci_pll_div_factor: PCI PLL DIV factor value. Needed by the profiler
+ * in some ASICs.
+ * @tpc_enabled_mask: Bit-mask that represents which TPCs are enabled. Relevant
+ * for Goya/Gaudi only.
+ * @dram_enabled: Whether the DRAM is enabled.
+ * @security_enabled: Whether security is enabled on device.
+ * @mme_master_slave_mode: Indicate whether the MME is working in master/slave
+ * configuration. Relevant for Gaudi2 and later.
+ * @cpucp_version: The CPUCP f/w version.
+ * @card_name: The card name as passed by the f/w.
+ * @tpc_enabled_mask_ext: Bit-mask that represents which TPCs are enabled.
+ * Relevant for Gaudi2 and later.
+ * @dram_page_size: The DRAM physical page size.
+ * @edma_enabled_mask: Bit-mask that represents which EDMAs are enabled.
+ * Relevant for Gaudi2 and later.
+ * @number_of_user_interrupts: The number of interrupts that are available to the userspace
+ * application to use. Relevant for Gaudi2 and later.
+ * @device_mem_alloc_default_page_size: default page size used in device memory allocation.
+ * @revision_id: PCI revision ID of the ASIC.
+ * @tpc_interrupt_id: interrupt id for TPC to use in order to raise events towards the host.
+ * @rotator_enabled_mask: Bit-mask that represents which rotators are enabled.
+ * Relevant for Gaudi3 and later.
+ * @engine_core_interrupt_reg_addr: interrupt register address for engine core to use
+ * in order to raise events toward FW.
+ * @reserved_dram_size: DRAM size reserved for driver and firmware.
+ */
+struct hl_info_hw_ip_info {
+ __u64 sram_base_address;
+ __u64 dram_base_address;
+ __u64 dram_size;
+ __u32 sram_size;
+ __u32 num_of_events;
+ __u32 device_id;
+ __u32 module_id;
+ __u32 decoder_enabled_mask;
+ __u16 first_available_interrupt_id;
+ __u16 server_type;
+ __u32 cpld_version;
+ __u32 psoc_pci_pll_nr;
+ __u32 psoc_pci_pll_nf;
+ __u32 psoc_pci_pll_od;
+ __u32 psoc_pci_pll_div_factor;
+ __u8 tpc_enabled_mask;
+ __u8 dram_enabled;
+ __u8 security_enabled;
+ __u8 mme_master_slave_mode;
+ __u8 cpucp_version[HL_INFO_VERSION_MAX_LEN];
+ __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
+ __u64 tpc_enabled_mask_ext;
+ __u64 dram_page_size;
+ __u32 edma_enabled_mask;
+ __u16 number_of_user_interrupts;
+ __u8 reserved1;
+ __u8 reserved2;
+ __u64 reserved3;
+ __u64 device_mem_alloc_default_page_size;
+ __u64 reserved4;
+ __u64 reserved5;
+ __u32 reserved6;
+ __u8 reserved7;
+ __u8 revision_id;
+ __u16 tpc_interrupt_id;
+ __u32 rotator_enabled_mask;
+ __u32 reserved9;
+ __u64 engine_core_interrupt_reg_addr;
+ __u64 reserved_dram_size;
+};
+
+struct hl_info_dram_usage {
+ __u64 dram_free_mem;
+ __u64 ctx_dram_mem;
+};
+
+#define HL_BUSY_ENGINES_MASK_EXT_SIZE 4
+
+struct hl_info_hw_idle {
+ __u32 is_idle;
+ /*
+ * Bitmask of busy engines.
+ * Bits definition is according to `enum <chip>_engine_id'.
+ */
+ __u32 busy_engines_mask;
+
+ /*
+ * Extended Bitmask of busy engines.
+ * Bits definition is according to `enum <chip>_engine_id'.
+ */
+ __u64 busy_engines_mask_ext[HL_BUSY_ENGINES_MASK_EXT_SIZE];
+};
+
+struct hl_info_device_status {
+ __u32 status;
+ __u32 pad;
+};
+
+struct hl_info_device_utilization {
+ __u32 utilization;
+ __u32 pad;
+};
+
+struct hl_info_clk_rate {
+ __u32 cur_clk_rate_mhz;
+ __u32 max_clk_rate_mhz;
+};
+
+struct hl_info_reset_count {
+ __u32 hard_reset_cnt;
+ __u32 soft_reset_cnt;
+};
+
+struct hl_info_time_sync {
+ __u64 device_time;
+ __u64 host_time;
+ __u64 tsc_time;
+};
+
+/**
+ * struct hl_info_pci_counters - pci counters
+ * @rx_throughput: PCI rx throughput KBps
+ * @tx_throughput: PCI tx throughput KBps
+ * @replay_cnt: PCI replay counter
+ */
+struct hl_info_pci_counters {
+ __u64 rx_throughput;
+ __u64 tx_throughput;
+ __u64 replay_cnt;
+};
+
+enum hl_clk_throttling_type {
+ HL_CLK_THROTTLE_TYPE_POWER,
+ HL_CLK_THROTTLE_TYPE_THERMAL,
+ HL_CLK_THROTTLE_TYPE_MAX
+};
+
+/* clk_throttling_reason masks */
+#define HL_CLK_THROTTLE_POWER (1 << HL_CLK_THROTTLE_TYPE_POWER)
+#define HL_CLK_THROTTLE_THERMAL (1 << HL_CLK_THROTTLE_TYPE_THERMAL)
+
+/**
+ * struct hl_info_clk_throttle - clock throttling reason
+ * @clk_throttling_reason: each bit represents a clk throttling reason
+ * @clk_throttling_timestamp_us: represents CPU timestamp in microseconds of the start-event
+ * @clk_throttling_duration_ns: the clock throttle time in nanosec
+ */
+struct hl_info_clk_throttle {
+ __u32 clk_throttling_reason;
+ __u32 pad;
+ __u64 clk_throttling_timestamp_us[HL_CLK_THROTTLE_TYPE_MAX];
+ __u64 clk_throttling_duration_ns[HL_CLK_THROTTLE_TYPE_MAX];
+};
+
+/**
+ * struct hl_info_energy - device energy information
+ * @total_energy_consumption: total device energy consumption
+ */
+struct hl_info_energy {
+ __u64 total_energy_consumption;
+};
+
+#define HL_PLL_NUM_OUTPUTS 4
+
+struct hl_pll_frequency_info {
+ __u16 output[HL_PLL_NUM_OUTPUTS];
+};
+
+/**
+ * struct hl_open_stats_info - device open statistics information
+ * @open_counter: ever growing counter, increased on each successful dev open
+ * @last_open_period_ms: duration (ms) device was open last time
+ * @is_compute_ctx_active: Whether there is an active compute context executing
+ * @compute_ctx_in_release: true if the current compute context is being released
+ */
+struct hl_open_stats_info {
+ __u64 open_counter;
+ __u64 last_open_period_ms;
+ __u8 is_compute_ctx_active;
+ __u8 compute_ctx_in_release;
+ __u8 pad[6];
+};
+
+/**
+ * struct hl_power_info - power information
+ * @power: power consumption
+ */
+struct hl_power_info {
+ __u64 power;
+};
+
+/**
+ * struct hl_info_sync_manager - sync manager information
+ * @first_available_sync_object: first available sob
+ * @first_available_monitor: first available monitor
+ * @first_available_cq: first available cq
+ */
+struct hl_info_sync_manager {
+ __u32 first_available_sync_object;
+ __u32 first_available_monitor;
+ __u32 first_available_cq;
+ __u32 reserved;
+};
+
+/**
+ * struct hl_info_cs_counters - command submission counters
+ * @total_out_of_mem_drop_cnt: total dropped due to memory allocation issue
+ * @ctx_out_of_mem_drop_cnt: context dropped due to memory allocation issue
+ * @total_parsing_drop_cnt: total dropped due to error in packet parsing
+ * @ctx_parsing_drop_cnt: context dropped due to error in packet parsing
+ * @total_queue_full_drop_cnt: total dropped due to queue full
+ * @ctx_queue_full_drop_cnt: context dropped due to queue full
+ * @total_device_in_reset_drop_cnt: total dropped due to device in reset
+ * @ctx_device_in_reset_drop_cnt: context dropped due to device in reset
+ * @total_max_cs_in_flight_drop_cnt: total dropped due to maximum CS in-flight
+ * @ctx_max_cs_in_flight_drop_cnt: context dropped due to maximum CS in-flight
+ * @total_validation_drop_cnt: total dropped due to validation error
+ * @ctx_validation_drop_cnt: context dropped due to validation error
+ */
+struct hl_info_cs_counters {
+ __u64 total_out_of_mem_drop_cnt;
+ __u64 ctx_out_of_mem_drop_cnt;
+ __u64 total_parsing_drop_cnt;
+ __u64 ctx_parsing_drop_cnt;
+ __u64 total_queue_full_drop_cnt;
+ __u64 ctx_queue_full_drop_cnt;
+ __u64 total_device_in_reset_drop_cnt;
+ __u64 ctx_device_in_reset_drop_cnt;
+ __u64 total_max_cs_in_flight_drop_cnt;
+ __u64 ctx_max_cs_in_flight_drop_cnt;
+ __u64 total_validation_drop_cnt;
+ __u64 ctx_validation_drop_cnt;
+};
+
+/**
+ * struct hl_info_last_err_open_dev_time - last error boot information.
+ * @timestamp: timestamp of last time the device was opened and error occurred.
+ */
+struct hl_info_last_err_open_dev_time {
+ __s64 timestamp;
+};
+
+/**
+ * struct hl_info_cs_timeout_event - last CS timeout information.
+ * @timestamp: timestamp when last CS timeout event occurred.
+ * @seq: sequence number of last CS timeout event.
+ */
+struct hl_info_cs_timeout_event {
+ __s64 timestamp;
+ __u64 seq;
+};
+
+#define HL_RAZWI_NA_ENG_ID U16_MAX
+#define HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR 128
+#define HL_RAZWI_READ BIT(0)
+#define HL_RAZWI_WRITE BIT(1)
+#define HL_RAZWI_LBW BIT(2)
+#define HL_RAZWI_HBW BIT(3)
+#define HL_RAZWI_RR BIT(4)
+#define HL_RAZWI_ADDR_DEC BIT(5)
+
+/**
+ * struct hl_info_razwi_event - razwi information.
+ * @timestamp: timestamp of razwi.
+ * @addr: address which accessing it caused razwi.
+ * @engine_id: engine id of the razwi initiator, if it was initiated by engine that does not
+ * have engine id it will be set to HL_RAZWI_NA_ENG_ID. If there are several possible
+ * engines which caused the razwi, it will hold all of them.
+ * @num_of_possible_engines: contains number of possible engine ids. In some asics, razwi indication
+ * might be common for several engines and there is no way to get the
+ * exact engine. In this way, engine_id array will be filled with all
+ * possible engines caused this razwi. Also, there might be possibility
+ * in gaudi, where we don't indication on specific engine, in that case
+ * the value of this parameter will be zero.
+ * @flags: bitmask for additional data: HL_RAZWI_READ - razwi caused by read operation
+ * HL_RAZWI_WRITE - razwi caused by write operation
+ * HL_RAZWI_LBW - razwi caused by lbw fabric transaction
+ * HL_RAZWI_HBW - razwi caused by hbw fabric transaction
+ * HL_RAZWI_RR - razwi caused by range register
+ * HL_RAZWI_ADDR_DEC - razwi caused by address decode error
+ * Note: this data is not supported by all asics, in that case the relevant bits will not
+ * be set.
+ */
+struct hl_info_razwi_event {
+ __s64 timestamp;
+ __u64 addr;
+ __u16 engine_id[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR];
+ __u16 num_of_possible_engines;
+ __u8 flags;
+ __u8 pad[5];
+};
+
+#define MAX_QMAN_STREAMS_INFO 4
+#define OPCODE_INFO_MAX_ADDR_SIZE 8
+/**
+ * struct hl_info_undefined_opcode_event - info about last undefined opcode error
+ * @timestamp: timestamp of the undefined opcode error
+ * @cb_addr_streams: CB addresses (per stream) that are currently exists in the PQ
+ * entries. In case all streams array entries are
+ * filled with values, it means the execution was in Lower-CP.
+ * @cq_addr: the address of the current handled command buffer
+ * @cq_size: the size of the current handled command buffer
+ * @cb_addr_streams_len: num of streams - actual len of cb_addr_streams array.
+ * should be equal to 1 in case of undefined opcode
+ * in Upper-CP (specific stream) and equal to 4 incase
+ * of undefined opcode in Lower-CP.
+ * @engine_id: engine-id that the error occurred on
+ * @stream_id: the stream id the error occurred on. In case the stream equals to
+ * MAX_QMAN_STREAMS_INFO it means the error occurred on a Lower-CP.
+ */
+struct hl_info_undefined_opcode_event {
+ __s64 timestamp;
+ __u64 cb_addr_streams[MAX_QMAN_STREAMS_INFO][OPCODE_INFO_MAX_ADDR_SIZE];
+ __u64 cq_addr;
+ __u32 cq_size;
+ __u32 cb_addr_streams_len;
+ __u32 engine_id;
+ __u32 stream_id;
+};
+
+/**
+ * struct hl_info_hw_err_event - info about HW error
+ * @timestamp: timestamp of error occurrence
+ * @event_id: The async event ID (specific to each device type).
+ * @pad: size padding for u64 granularity.
+ */
+struct hl_info_hw_err_event {
+ __s64 timestamp;
+ __u16 event_id;
+ __u16 pad[3];
+};
+
+/* FW error definition for event_type in struct hl_info_fw_err_event */
+enum hl_info_fw_err_type {
+ HL_INFO_FW_HEARTBEAT_ERR,
+ HL_INFO_FW_REPORTED_ERR,
+};
+
+/**
+ * struct hl_info_fw_err_event - info about FW error
+ * @timestamp: time-stamp of error occurrence
+ * @err_type: The type of event as defined in hl_info_fw_err_type.
+ * @event_id: The async event ID (specific to each device type, applicable only when event type is
+ * HL_INFO_FW_REPORTED_ERR).
+ * @pad: size padding for u64 granularity.
+ */
+struct hl_info_fw_err_event {
+ __s64 timestamp;
+ __u16 err_type;
+ __u16 event_id;
+ __u32 pad;
+};
+
+/**
+ * struct hl_info_engine_err_event - engine error info
+ * @timestamp: time-stamp of error occurrence
+ * @engine_id: engine id who reported the error.
+ * @error_count: Amount of errors reported.
+ * @pad: size padding for u64 granularity.
+ */
+struct hl_info_engine_err_event {
+ __s64 timestamp;
+ __u16 engine_id;
+ __u16 error_count;
+ __u32 pad;
+};
+
+/**
+ * struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information.
+ * @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size
+ * (e.g. 0x2100000 means that 1MB and 32MB pages are supported).
+ */
+struct hl_info_dev_memalloc_page_sizes {
+ __u64 page_order_bitmask;
+};
+
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+#define SEC_DEV_INFO_BUF_SZ 5120
+
+/*
+ * struct hl_info_sec_attest - attestation report of the boot
+ * @nonce: number only used once. random number provided by host. this also passed to the quote
+ * command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pub_data_len: length of the public data (bytes)
+ * @certificate_len: length of the certificate (bytes)
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_quote: attestation report data structure
+ * @quote_sig: signature structure of the attestation report
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate: certificate for the attestation signing key
+ */
+struct hl_info_sec_attest {
+ __u32 nonce;
+ __u16 pcr_quote_len;
+ __u16 pub_data_len;
+ __u16 certificate_len;
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __u8 quote_sig_len;
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+ __u8 pad0[2];
+};
+
+/*
+ * struct hl_info_signed - device information signed by a secured device.
+ * @nonce: number only used once. random number provided by host. this also passed to the quote
+ * command as a qualifying data.
+ * @pub_data_len: length of the public data (bytes)
+ * @certificate_len: length of the certificate (bytes)
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @public_data: public key info signed info data (outPublic + name + qualifiedName)
+ * @certificate: certificate for the signing key
+ * @info_sig: signature of the info + nonce data.
+ * @dev_info_len: length of device info (bytes)
+ * @dev_info: device info as byte array.
+ */
+struct hl_info_signed {
+ __u32 nonce;
+ __u16 pub_data_len;
+ __u16 certificate_len;
+ __u8 info_sig_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __u16 dev_info_len;
+ __u8 dev_info[SEC_DEV_INFO_BUF_SZ];
+ __u8 pad[2];
+};
+
+/**
+ * struct hl_page_fault_info - page fault information.
+ * @timestamp: timestamp of page fault.
+ * @addr: address which accessing it caused page fault.
+ * @engine_id: engine id which caused the page fault, supported only in gaudi3.
+ */
+struct hl_page_fault_info {
+ __s64 timestamp;
+ __u64 addr;
+ __u16 engine_id;
+ __u8 pad[6];
+};
+
+/**
+ * struct hl_user_mapping - user mapping information.
+ * @dev_va: device virtual address.
+ * @size: virtual address mapping size.
+ */
+struct hl_user_mapping {
+ __u64 dev_va;
+ __u64 size;
+};
+
+enum gaudi_dcores {
+ HL_GAUDI_WS_DCORE,
+ HL_GAUDI_WN_DCORE,
+ HL_GAUDI_EN_DCORE,
+ HL_GAUDI_ES_DCORE
+};
+
+/**
+ * struct hl_info_args - Main structure to retrieve device related information.
+ * @return_pointer: User space address of the relevant structure related to HL_INFO_* operation
+ * mentioned in @op.
+ * @return_size: Size of the structure used in @return_pointer, just like "size" in "snprintf", it
+ * limits how many bytes the kernel can write. For hw_events array, the size should be
+ * hl_info_hw_ip_info.num_of_events * sizeof(__u32).
+ * @op: Defines which type of information to be retrieved. Refer HL_INFO_* for details.
+ * @dcore_id: DCORE id for which the information is relevant (for Gaudi refer to enum gaudi_dcores).
+ * @ctx_id: Context ID of the user. Currently not in use.
+ * @period_ms: Period value, in milliseconds, for utilization rate in range 100ms - 1000ms in 100 ms
+ * resolution. Currently not in use.
+ * @pll_index: Index as defined in hl_<asic type>_pll_index enumeration.
+ * @eventfd: event file descriptor for event notifications.
+ * @user_buffer_actual_size: Actual data size which was copied to user allocated buffer by the
+ * driver. It is possible for the user to allocate buffer larger than
+ * needed, hence updating this variable so user will know the exact amount
+ * of bytes copied by the kernel to the buffer.
+ * @sec_attest_nonce: Nonce number used for attestation report.
+ * @array_size: Number of array members copied to user buffer.
+ * Relevant for HL_INFO_USER_MAPPINGS info ioctl.
+ * @fw_sub_opcode: generic requests sub opcodes.
+ * @pad: Padding to 64 bit.
+ */
+struct hl_info_args {
+ __u64 return_pointer;
+ __u32 return_size;
+ __u32 op;
+
+ union {
+ __u32 dcore_id;
+ __u32 ctx_id;
+ __u32 period_ms;
+ __u32 pll_index;
+ __u32 eventfd;
+ __u32 user_buffer_actual_size;
+ __u32 sec_attest_nonce;
+ __u32 array_size;
+ __u32 fw_sub_opcode;
+ };
+
+ __u32 pad;
+};
+
+/* Opcode to create a new command buffer */
+#define HL_CB_OP_CREATE 0
+/* Opcode to destroy previously created command buffer */
+#define HL_CB_OP_DESTROY 1
+/* Opcode to retrieve information about a command buffer */
+#define HL_CB_OP_INFO 2
+
+/* 2MB minus 32 bytes for 2xMSG_PROT */
+#define HL_MAX_CB_SIZE (0x200000 - 32)
+
+/* Indicates whether the command buffer should be mapped to the device's MMU */
+#define HL_CB_FLAGS_MAP 0x1
+
+/* Used with HL_CB_OP_INFO opcode to get the device va address for kernel mapped CB */
+#define HL_CB_FLAGS_GET_DEVICE_VA 0x2
+
+struct hl_cb_in {
+ /* Handle of CB or 0 if we want to create one */
+ __u64 cb_handle;
+ /* HL_CB_OP_* */
+ __u32 op;
+
+ /* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that
+ * will be allocated, regardless of this parameter's value, is PAGE_SIZE
+ */
+ __u32 cb_size;
+
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ /* HL_CB_FLAGS_* */
+ __u32 flags;
+};
+
+struct hl_cb_out {
+ union {
+ /* Handle of CB */
+ __u64 cb_handle;
+
+ union {
+ /* Information about CB */
+ struct {
+ /* Usage count of CB */
+ __u32 usage_cnt;
+ __u32 pad;
+ };
+
+ /* CB mapped address to device MMU */
+ __u64 device_va;
+ };
+ };
+};
+
+union hl_cb_args {
+ struct hl_cb_in in;
+ struct hl_cb_out out;
+};
+
+/* HL_CS_CHUNK_FLAGS_ values
+ *
+ * HL_CS_CHUNK_FLAGS_USER_ALLOC_CB:
+ * Indicates if the CB was allocated and mapped by userspace
+ * (relevant to Gaudi2 and later). User allocated CB is a command buffer,
+ * allocated by the user, via malloc (or similar). After allocating the
+ * CB, the user invokes - “memory ioctl” to map the user memory into a
+ * device virtual address. The user provides this address via the
+ * cb_handle field. The interface provides the ability to create a
+ * large CBs, Which aren’t limited to “HL_MAX_CB_SIZE”. Therefore, it
+ * increases the PCI-DMA queues throughput. This CB allocation method
+ * also reduces the use of Linux DMA-able memory pool. Which are limited
+ * and used by other Linux sub-systems.
+ */
+#define HL_CS_CHUNK_FLAGS_USER_ALLOC_CB 0x1
+
+/*
+ * This structure size must always be fixed to 64-bytes for backward
+ * compatibility
+ */
+struct hl_cs_chunk {
+ union {
+ /* Goya/Gaudi:
+ * For external queue, this represents a Handle of CB on the
+ * Host.
+ * For internal queue in Goya, this represents an SRAM or
+ * a DRAM address of the internal CB. In Gaudi, this might also
+ * represent a mapped host address of the CB.
+ *
+ * Gaudi2 onwards:
+ * For H/W queue, this represents either a Handle of CB on the
+ * Host, or an SRAM, a DRAM, or a mapped host address of the CB.
+ *
+ * A mapped host address is in the device address space, after
+ * a host address was mapped by the device MMU.
+ */
+ __u64 cb_handle;
+
+ /* Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set
+ * This holds address of array of u64 values that contain
+ * signal CS sequence numbers. The wait described by
+ * this job will listen on all those signals
+ * (wait event per signal)
+ */
+ __u64 signal_seq_arr;
+
+ /*
+ * Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set
+ * along with HL_CS_FLAGS_ENCAP_SIGNALS.
+ * This is the CS sequence which has the encapsulated signals.
+ */
+ __u64 encaps_signal_seq;
+ };
+
+ /* Index of queue to put the CB on */
+ __u32 queue_index;
+
+ union {
+ /*
+ * Size of command buffer with valid packets
+ * Can be smaller then actual CB size
+ */
+ __u32 cb_size;
+
+ /* Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set.
+ * Number of entries in signal_seq_arr
+ */
+ __u32 num_signal_seq_arr;
+
+ /* Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set along
+ * with HL_CS_FLAGS_ENCAP_SIGNALS
+ * This set the signals range that the user want to wait for
+ * out of the whole reserved signals range.
+ * e.g if the signals range is 20, and user don't want
+ * to wait for signal 8, so he set this offset to 7, then
+ * he call the API again with 9 and so on till 20.
+ */
+ __u32 encaps_signal_offset;
+ };
+
+ /* HL_CS_CHUNK_FLAGS_* */
+ __u32 cs_chunk_flags;
+
+ /* Relevant only when HL_CS_FLAGS_COLLECTIVE_WAIT is set.
+ * This holds the collective engine ID. The wait described by this job
+ * will sync with this engine and with all NICs before completion.
+ */
+ __u32 collective_engine_id;
+
+ /* Align structure to 64 bytes */
+ __u32 pad[10];
+};
+
+/* SIGNAL/WAIT/COLLECTIVE_WAIT flags are mutually exclusive */
+#define HL_CS_FLAGS_FORCE_RESTORE 0x1
+#define HL_CS_FLAGS_SIGNAL 0x2
+#define HL_CS_FLAGS_WAIT 0x4
+#define HL_CS_FLAGS_COLLECTIVE_WAIT 0x8
+
+#define HL_CS_FLAGS_TIMESTAMP 0x20
+#define HL_CS_FLAGS_STAGED_SUBMISSION 0x40
+#define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST 0x80
+#define HL_CS_FLAGS_STAGED_SUBMISSION_LAST 0x100
+#define HL_CS_FLAGS_CUSTOM_TIMEOUT 0x200
+#define HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT 0x400
+
+/*
+ * The encapsulated signals CS is merged into the existing CS ioctls.
+ * In order to use this feature need to follow the below procedure:
+ * 1. Reserve signals, set the CS type to HL_CS_FLAGS_RESERVE_SIGNALS_ONLY
+ * the output of this API will be the SOB offset from CFG_BASE.
+ * this address will be used to patch CB cmds to do the signaling for this
+ * SOB by incrementing it's value.
+ * for reverting the reservation use HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY
+ * CS type, note that this might fail if out-of-sync happened to the SOB
+ * value, in case other signaling request to the same SOB occurred between
+ * reserve-unreserve calls.
+ * 2. Use the staged CS to do the encapsulated signaling jobs.
+ * use HL_CS_FLAGS_STAGED_SUBMISSION and HL_CS_FLAGS_STAGED_SUBMISSION_FIRST
+ * along with HL_CS_FLAGS_ENCAP_SIGNALS flag, and set encaps_signal_offset
+ * field. This offset allows app to wait on part of the reserved signals.
+ * 3. Use WAIT/COLLECTIVE WAIT CS along with HL_CS_FLAGS_ENCAP_SIGNALS flag
+ * to wait for the encapsulated signals.
+ */
+#define HL_CS_FLAGS_ENCAP_SIGNALS 0x800
+#define HL_CS_FLAGS_RESERVE_SIGNALS_ONLY 0x1000
+#define HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY 0x2000
+
+/*
+ * The engine cores CS is merged into the existing CS ioctls.
+ * Use it to control the engine cores mode.
+ */
+#define HL_CS_FLAGS_ENGINE_CORE_COMMAND 0x4000
+
+/*
+ * The flush HBW PCI writes is merged into the existing CS ioctls.
+ * Used to flush all HBW PCI writes.
+ * This is a blocking operation and for this reason the user shall not use
+ * the return sequence number (which will be invalid anyway)
+ */
+#define HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES 0x8000
+
+/*
+ * The engines CS is merged into the existing CS ioctls.
+ * Use it to control engines modes.
+ */
+#define HL_CS_FLAGS_ENGINES_COMMAND 0x10000
+
+#define HL_CS_STATUS_SUCCESS 0
+
+#define HL_MAX_JOBS_PER_CS 512
+
+/*
+ * enum hl_engine_command - engine command
+ *
+ * @HL_ENGINE_CORE_HALT: engine core halt
+ * @HL_ENGINE_CORE_RUN: engine core run
+ * @HL_ENGINE_STALL: user engine/s stall
+ * @HL_ENGINE_RESUME: user engine/s resume
+ */
+enum hl_engine_command {
+ HL_ENGINE_CORE_HALT = 1,
+ HL_ENGINE_CORE_RUN = 2,
+ HL_ENGINE_STALL = 3,
+ HL_ENGINE_RESUME = 4,
+ HL_ENGINE_COMMAND_MAX
+};
+
+struct hl_cs_in {
+
+ union {
+ struct {
+ /* this holds address of array of hl_cs_chunk for restore phase */
+ __u64 chunks_restore;
+
+ /* holds address of array of hl_cs_chunk for execution phase */
+ __u64 chunks_execute;
+ };
+
+ /* Valid only when HL_CS_FLAGS_ENGINE_CORE_COMMAND is set */
+ struct {
+ /* this holds address of array of uint32 for engine_cores */
+ __u64 engine_cores;
+
+ /* number of engine cores in engine_cores array */
+ __u32 num_engine_cores;
+
+ /* the core command to be sent towards engine cores */
+ __u32 core_command;
+ };
+
+ /* Valid only when HL_CS_FLAGS_ENGINES_COMMAND is set */
+ struct {
+ /* this holds address of array of uint32 for engines */
+ __u64 engines;
+
+ /* number of engines in engines array */
+ __u32 num_engines;
+
+ /* the engine command to be sent towards engines */
+ __u32 engine_command;
+ };
+ };
+
+ union {
+ /*
+ * Sequence number of a staged submission CS
+ * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set and
+ * HL_CS_FLAGS_STAGED_SUBMISSION_FIRST is unset.
+ */
+ __u64 seq;
+
+ /*
+ * Encapsulated signals handle id
+ * Valid for two flows:
+ * 1. CS with encapsulated signals:
+ * when HL_CS_FLAGS_STAGED_SUBMISSION and
+ * HL_CS_FLAGS_STAGED_SUBMISSION_FIRST
+ * and HL_CS_FLAGS_ENCAP_SIGNALS are set.
+ * 2. unreserve signals:
+ * valid when HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY is set.
+ */
+ __u32 encaps_sig_handle_id;
+
+ /* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */
+ struct {
+ /* Encapsulated signals number */
+ __u32 encaps_signals_count;
+
+ /* Encapsulated signals queue index (stream) */
+ __u32 encaps_signals_q_idx;
+ };
+ };
+
+ /* Number of chunks in restore phase array. Maximum number is
+ * HL_MAX_JOBS_PER_CS
+ */
+ __u32 num_chunks_restore;
+
+ /* Number of chunks in execution array. Maximum number is
+ * HL_MAX_JOBS_PER_CS
+ */
+ __u32 num_chunks_execute;
+
+ /* timeout in seconds - valid only if HL_CS_FLAGS_CUSTOM_TIMEOUT
+ * is set
+ */
+ __u32 timeout;
+
+ /* HL_CS_FLAGS_* */
+ __u32 cs_flags;
+
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ __u8 pad[4];
+};
+
+struct hl_cs_out {
+ union {
+ /*
+ * seq holds the sequence number of the CS to pass to wait
+ * ioctl. All values are valid except for 0 and ULLONG_MAX
+ */
+ __u64 seq;
+
+ /* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */
+ struct {
+ /* This is the reserved signal handle id */
+ __u32 handle_id;
+
+ /* This is the signals count */
+ __u32 count;
+ };
+ };
+
+ /* HL_CS_STATUS */
+ __u32 status;
+
+ /*
+ * SOB base address offset
+ * Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY or HL_CS_FLAGS_SIGNAL is set
+ */
+ __u32 sob_base_addr_offset;
+
+ /*
+ * Count of completed signals in SOB before current signal submission.
+ * Valid only when (HL_CS_FLAGS_ENCAP_SIGNALS & HL_CS_FLAGS_STAGED_SUBMISSION)
+ * or HL_CS_FLAGS_SIGNAL is set
+ */
+ __u16 sob_count_before_submission;
+ __u16 pad[3];
+};
+
+union hl_cs_args {
+ struct hl_cs_in in;
+ struct hl_cs_out out;
+};
+
+#define HL_WAIT_CS_FLAGS_INTERRUPT 0x2
+#define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000
+#define HL_WAIT_CS_FLAGS_ANY_CQ_INTERRUPT 0xFFF00000
+#define HL_WAIT_CS_FLAGS_ANY_DEC_INTERRUPT 0xFFE00000
+#define HL_WAIT_CS_FLAGS_MULTI_CS 0x4
+#define HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ 0x10
+#define HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT 0x20
+
+#define HL_WAIT_MULTI_CS_LIST_MAX_LEN 32
+
+struct hl_wait_cs_in {
+ union {
+ struct {
+ /*
+ * In case of wait_cs holds the CS sequence number.
+ * In case of wait for multi CS hold a user pointer to
+ * an array of CS sequence numbers
+ */
+ __u64 seq;
+ /* Absolute timeout to wait for command submission
+ * in microseconds
+ */
+ __u64 timeout_us;
+ };
+
+ struct {
+ union {
+ /* User address for completion comparison.
+ * upon interrupt, driver will compare the value pointed
+ * by this address with the supplied target value.
+ * in order not to perform any comparison, set address
+ * to all 1s.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
+ */
+ __u64 addr;
+
+ /* cq_counters_handle to a kernel mapped cb which contains
+ * cq counters.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ is set
+ */
+ __u64 cq_counters_handle;
+ };
+
+ /* Target value for completion comparison */
+ __u64 target;
+ };
+ };
+
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+
+ /* HL_WAIT_CS_FLAGS_*
+ * If HL_WAIT_CS_FLAGS_INTERRUPT is set, this field should include
+ * interrupt id according to HL_WAIT_CS_FLAGS_INTERRUPT_MASK
+ *
+ * in order to wait for any CQ interrupt, set interrupt value to
+ * HL_WAIT_CS_FLAGS_ANY_CQ_INTERRUPT.
+ *
+ * in order to wait for any decoder interrupt, set interrupt value to
+ * HL_WAIT_CS_FLAGS_ANY_DEC_INTERRUPT.
+ */
+ __u32 flags;
+
+ union {
+ struct {
+ /* Multi CS API info- valid entries in multi-CS array */
+ __u8 seq_arr_len;
+ __u8 pad[7];
+ };
+
+ /* Absolute timeout to wait for an interrupt in microseconds.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
+ */
+ __u64 interrupt_timeout_us;
+ };
+
+ /*
+ * cq counter offset inside the counters cb pointed by cq_counters_handle above.
+ * upon interrupt, driver will compare the value pointed
+ * by this address (cq_counters_handle + cq_counters_offset)
+ * with the supplied target value.
+ * relevant only when HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ is set
+ */
+ __u64 cq_counters_offset;
+
+ /*
+ * Timestamp_handle timestamps buffer handle.
+ * relevant only when HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT is set
+ */
+ __u64 timestamp_handle;
+
+ /*
+ * Timestamp_offset is offset inside the timestamp buffer pointed by timestamp_handle above.
+ * upon interrupt, if the cq reached the target value then driver will write
+ * timestamp to this offset.
+ * relevant only when HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT is set
+ */
+ __u64 timestamp_offset;
+};
+
+#define HL_WAIT_CS_STATUS_COMPLETED 0
+#define HL_WAIT_CS_STATUS_BUSY 1
+#define HL_WAIT_CS_STATUS_TIMEDOUT 2
+#define HL_WAIT_CS_STATUS_ABORTED 3
+
+#define HL_WAIT_CS_STATUS_FLAG_GONE 0x1
+#define HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD 0x2
+
+struct hl_wait_cs_out {
+ /* HL_WAIT_CS_STATUS_* */
+ __u32 status;
+ /* HL_WAIT_CS_STATUS_FLAG* */
+ __u32 flags;
+ /*
+ * valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set
+ * for wait_cs: timestamp of CS completion
+ * for wait_multi_cs: timestamp of FIRST CS completion
+ */
+ __s64 timestamp_nsec;
+ /* multi CS completion bitmap */
+ __u32 cs_completion_map;
+ __u32 pad;
+};
+
+union hl_wait_cs_args {
+ struct hl_wait_cs_in in;
+ struct hl_wait_cs_out out;
+};
+
+/* Opcode to allocate device memory */
+#define HL_MEM_OP_ALLOC 0
+
+/* Opcode to free previously allocated device memory */
+#define HL_MEM_OP_FREE 1
+
+/* Opcode to map host and device memory */
+#define HL_MEM_OP_MAP 2
+
+/* Opcode to unmap previously mapped host and device memory */
+#define HL_MEM_OP_UNMAP 3
+
+/* Opcode to map a hw block */
+#define HL_MEM_OP_MAP_BLOCK 4
+
+/* Opcode to create DMA-BUF object for an existing device memory allocation
+ * and to export an FD of that DMA-BUF back to the caller
+ */
+#define HL_MEM_OP_EXPORT_DMABUF_FD 5
+
+/* Opcode to create timestamps pool for user interrupts registration support
+ * The memory will be allocated by the kernel driver, A timestamp buffer which the user
+ * will get handle to it for mmap, and another internal buffer used by the
+ * driver for registration management
+ * The memory will be freed when the user closes the file descriptor(ctx close)
+ */
+#define HL_MEM_OP_TS_ALLOC 6
+
+/* Memory flags */
+#define HL_MEM_CONTIGUOUS 0x1
+#define HL_MEM_SHARED 0x2
+#define HL_MEM_USERPTR 0x4
+#define HL_MEM_FORCE_HINT 0x8
+#define HL_MEM_PREFETCH 0x40
+
+/**
+ * structure hl_mem_in - structure that handle input args for memory IOCTL
+ * @union arg: union of structures to be used based on the input operation
+ * @op: specify the requested memory operation (one of the HL_MEM_OP_* definitions).
+ * @flags: flags for the memory operation (one of the HL_MEM_* definitions).
+ * For the HL_MEM_OP_EXPORT_DMABUF_FD opcode, this field holds the DMA-BUF file/FD flags.
+ * @ctx_id: context ID - currently not in use.
+ * @num_of_elements: number of timestamp elements used only with HL_MEM_OP_TS_ALLOC opcode.
+ */
+struct hl_mem_in {
+ union {
+ /**
+ * structure for device memory allocation (used with the HL_MEM_OP_ALLOC op)
+ * @mem_size: memory size to allocate
+ * @page_size: page size to use on allocation. when the value is 0 the default page
+ * size will be taken.
+ */
+ struct {
+ __u64 mem_size;
+ __u64 page_size;
+ } alloc;
+
+ /**
+ * structure for free-ing device memory (used with the HL_MEM_OP_FREE op)
+ * @handle: handle returned from HL_MEM_OP_ALLOC
+ */
+ struct {
+ __u64 handle;
+ } free;
+
+ /**
+ * structure for mapping device memory (used with the HL_MEM_OP_MAP op)
+ * @hint_addr: requested virtual address of mapped memory.
+ * the driver will try to map the requested region to this hint
+ * address, as long as the address is valid and not already mapped.
+ * the user should check the returned address of the IOCTL to make
+ * sure he got the hint address.
+ * passing 0 here means that the driver will choose the address itself.
+ * @handle: handle returned from HL_MEM_OP_ALLOC.
+ */
+ struct {
+ __u64 hint_addr;
+ __u64 handle;
+ } map_device;
+
+ /**
+ * structure for mapping host memory (used with the HL_MEM_OP_MAP op)
+ * @host_virt_addr: address of allocated host memory.
+ * @hint_addr: requested virtual address of mapped memory.
+ * the driver will try to map the requested region to this hint
+ * address, as long as the address is valid and not already mapped.
+ * the user should check the returned address of the IOCTL to make
+ * sure he got the hint address.
+ * passing 0 here means that the driver will choose the address itself.
+ * @size: size of allocated host memory.
+ */
+ struct {
+ __u64 host_virt_addr;
+ __u64 hint_addr;
+ __u64 mem_size;
+ } map_host;
+
+ /**
+ * structure for mapping hw block (used with the HL_MEM_OP_MAP_BLOCK op)
+ * @block_addr:HW block address to map, a handle and size will be returned
+ * to the user and will be used to mmap the relevant block.
+ * only addresses from configuration space are allowed.
+ */
+ struct {
+ __u64 block_addr;
+ } map_block;
+
+ /**
+ * structure for unmapping host memory (used with the HL_MEM_OP_UNMAP op)
+ * @device_virt_addr: virtual address returned from HL_MEM_OP_MAP
+ */
+ struct {
+ __u64 device_virt_addr;
+ } unmap;
+
+ /**
+ * structure for exporting DMABUF object (used with
+ * the HL_MEM_OP_EXPORT_DMABUF_FD op)
+ * @addr: for Gaudi1, the driver expects a physical address
+ * inside the device's DRAM. this is because in Gaudi1
+ * we don't have MMU that covers the device's DRAM.
+ * for all other ASICs, the driver expects a device
+ * virtual address that represents the start address of
+ * a mapped DRAM memory area inside the device.
+ * the address must be the same as was received from the
+ * driver during a previous HL_MEM_OP_MAP operation.
+ * @mem_size: size of memory to export.
+ * @offset: for Gaudi1, this value must be 0. For all other ASICs,
+ * the driver expects an offset inside of the memory area
+ * describe by addr. the offset represents the start
+ * address of that the exported dma-buf object describes.
+ */
+ struct {
+ __u64 addr;
+ __u64 mem_size;
+ __u64 offset;
+ } export_dmabuf_fd;
+ };
+
+ __u32 op;
+ __u32 flags;
+ __u32 ctx_id;
+ __u32 num_of_elements;
+};
+
+struct hl_mem_out {
+ union {
+ /*
+ * Used for HL_MEM_OP_MAP as the virtual address that was
+ * assigned in the device VA space.
+ * A value of 0 means the requested operation failed.
+ */
+ __u64 device_virt_addr;
+
+ /*
+ * Used in HL_MEM_OP_ALLOC
+ * This is the assigned handle for the allocated memory
+ */
+ __u64 handle;
+
+ struct {
+ /*
+ * Used in HL_MEM_OP_MAP_BLOCK.
+ * This is the assigned handle for the mapped block
+ */
+ __u64 block_handle;
+
+ /*
+ * Used in HL_MEM_OP_MAP_BLOCK
+ * This is the size of the mapped block
+ */
+ __u32 block_size;
+
+ __u32 pad;
+ };
+
+ /* Returned in HL_MEM_OP_EXPORT_DMABUF_FD. Represents the
+ * DMA-BUF object that was created to describe a memory
+ * allocation on the device's memory space. The FD should be
+ * passed to the importer driver
+ */
+ __s32 fd;
+ };
+};
+
+union hl_mem_args {
+ struct hl_mem_in in;
+ struct hl_mem_out out;
+};
+
+#define HL_DEBUG_MAX_AUX_VALUES 10
+
+struct hl_debug_params_etr {
+ /* Address in memory to allocate buffer */
+ __u64 buffer_address;
+
+ /* Size of buffer to allocate */
+ __u64 buffer_size;
+
+ /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
+ __u32 sink_mode;
+ __u32 pad;
+};
+
+struct hl_debug_params_etf {
+ /* Address in memory to allocate buffer */
+ __u64 buffer_address;
+
+ /* Size of buffer to allocate */
+ __u64 buffer_size;
+
+ /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
+ __u32 sink_mode;
+ __u32 pad;
+};
+
+struct hl_debug_params_stm {
+ /* Two bit masks for HW event and Stimulus Port */
+ __u64 he_mask;
+ __u64 sp_mask;
+
+ /* Trace source ID */
+ __u32 id;
+
+ /* Frequency for the timestamp register */
+ __u32 frequency;
+};
+
+struct hl_debug_params_bmon {
+ /* Two address ranges that the user can request to filter */
+ __u64 start_addr0;
+ __u64 addr_mask0;
+
+ __u64 start_addr1;
+ __u64 addr_mask1;
+
+ /* Capture window configuration */
+ __u32 bw_win;
+ __u32 win_capture;
+
+ /* Trace source ID */
+ __u32 id;
+
+ /* Control register */
+ __u32 control;
+
+ /* Two more address ranges that the user can request to filter */
+ __u64 start_addr2;
+ __u64 end_addr2;
+
+ __u64 start_addr3;
+ __u64 end_addr3;
+};
+
+struct hl_debug_params_spmu {
+ /* Event types selection */
+ __u64 event_types[HL_DEBUG_MAX_AUX_VALUES];
+
+ /* Number of event types selection */
+ __u32 event_types_num;
+
+ /* TRC configuration register values */
+ __u32 pmtrc_val;
+ __u32 trc_ctrl_host_val;
+ __u32 trc_en_host_val;
+};
+
+/* Opcode for ETR component */
+#define HL_DEBUG_OP_ETR 0
+/* Opcode for ETF component */
+#define HL_DEBUG_OP_ETF 1
+/* Opcode for STM component */
+#define HL_DEBUG_OP_STM 2
+/* Opcode for FUNNEL component */
+#define HL_DEBUG_OP_FUNNEL 3
+/* Opcode for BMON component */
+#define HL_DEBUG_OP_BMON 4
+/* Opcode for SPMU component */
+#define HL_DEBUG_OP_SPMU 5
+/* Opcode for timestamp (deprecated) */
+#define HL_DEBUG_OP_TIMESTAMP 6
+/* Opcode for setting the device into or out of debug mode. The enable
+ * variable should be 1 for enabling debug mode and 0 for disabling it
+ */
+#define HL_DEBUG_OP_SET_MODE 7
+
+struct hl_debug_args {
+ /*
+ * Pointer to user input structure.
+ * This field is relevant to specific opcodes.
+ */
+ __u64 input_ptr;
+ /* Pointer to user output structure */
+ __u64 output_ptr;
+ /* Size of user input structure */
+ __u32 input_size;
+ /* Size of user output structure */
+ __u32 output_size;
+ /* HL_DEBUG_OP_* */
+ __u32 op;
+ /*
+ * Register index in the component, taken from the debug_regs_index enum
+ * in the various ASIC header files
+ */
+ __u32 reg_idx;
+ /* Enable/disable */
+ __u32 enable;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+};
+
+#define HL_IOCTL_INFO 0x00
+#define HL_IOCTL_CB 0x01
+#define HL_IOCTL_CS 0x02
+#define HL_IOCTL_WAIT_CS 0x03
+#define HL_IOCTL_MEMORY 0x04
+#define HL_IOCTL_DEBUG 0x05
+
+/*
+ * Various information operations such as:
+ * - H/W IP information
+ * - Current dram usage
+ *
+ * The user calls this IOCTL with an opcode that describes the required
+ * information. The user should supply a pointer to a user-allocated memory
+ * chunk, which will be filled by the driver with the requested information.
+ *
+ * The user supplies the maximum amount of size to copy into the user's memory,
+ * in order to prevent data corruption in case of differences between the
+ * definitions of structures in kernel and userspace, e.g. in case of old
+ * userspace and new kernel driver
+ */
+#define DRM_IOCTL_HL_INFO DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_INFO, struct hl_info_args)
+
+/*
+ * Command Buffer
+ * - Request a Command Buffer
+ * - Destroy a Command Buffer
+ *
+ * The command buffers are memory blocks that reside in DMA-able address
+ * space and are physically contiguous so they can be accessed by the device
+ * directly. They are allocated using the coherent DMA API.
+ *
+ * When creating a new CB, the IOCTL returns a handle of it, and the user-space
+ * process needs to use that handle to mmap the buffer so it can access them.
+ *
+ * In some instances, the device must access the command buffer through the
+ * device's MMU, and thus its memory should be mapped. In these cases, user can
+ * indicate the driver that such a mapping is required.
+ * The resulting device virtual address will be used internally by the driver,
+ * and won't be returned to user.
+ *
+ */
+#define DRM_IOCTL_HL_CB DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CB, union hl_cb_args)
+
+/*
+ * Command Submission
+ *
+ * To submit work to the device, the user need to call this IOCTL with a set
+ * of JOBS. That set of JOBS constitutes a CS object.
+ * Each JOB will be enqueued on a specific queue, according to the user's input.
+ * There can be more then one JOB per queue.
+ *
+ * The CS IOCTL will receive two sets of JOBS. One set is for "restore" phase
+ * and a second set is for "execution" phase.
+ * The JOBS on the "restore" phase are enqueued only after context-switch
+ * (or if its the first CS for this context). The user can also order the
+ * driver to run the "restore" phase explicitly
+ *
+ * Goya/Gaudi:
+ * There are two types of queues - external and internal. External queues
+ * are DMA queues which transfer data from/to the Host. All other queues are
+ * internal. The driver will get completion notifications from the device only
+ * on JOBS which are enqueued in the external queues.
+ *
+ * Gaudi2 onwards:
+ * There is a single type of queue for all types of engines, either DMA engines
+ * for transfers from/to the host or inside the device, or compute engines.
+ * The driver will get completion notifications from the device for all queues.
+ *
+ * For jobs on external queues, the user needs to create command buffers
+ * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on
+ * internal queues, the user needs to prepare a "command buffer" with packets
+ * on either the device SRAM/DRAM or the host, and give the device address of
+ * that buffer to the CS ioctl.
+ * For jobs on H/W queues both options of command buffers are valid.
+ *
+ * This IOCTL is asynchronous in regard to the actual execution of the CS. This
+ * means it returns immediately after ALL the JOBS were enqueued on their
+ * relevant queues. Therefore, the user mustn't assume the CS has been completed
+ * or has even started to execute.
+ *
+ * Upon successful enqueue, the IOCTL returns a sequence number which the user
+ * can use with the "Wait for CS" IOCTL to check whether the handle's CS
+ * non-internal JOBS have been completed. Note that if the CS has internal JOBS
+ * which can execute AFTER the external JOBS have finished, the driver might
+ * report that the CS has finished executing BEFORE the internal JOBS have
+ * actually finished executing.
+ *
+ * Even though the sequence number increments per CS, the user can NOT
+ * automatically assume that if CS with sequence number N finished, then CS
+ * with sequence number N-1 also finished. The user can make this assumption if
+ * and only if CS N and CS N-1 are exactly the same (same CBs for the same
+ * queues).
+ */
+#define DRM_IOCTL_HL_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CS, union hl_cs_args)
+
+/*
+ * Wait for Command Submission
+ *
+ * The user can call this IOCTL with a handle it received from the CS IOCTL
+ * to wait until the handle's CS has finished executing. The user will wait
+ * inside the kernel until the CS has finished or until the user-requested
+ * timeout has expired.
+ *
+ * If the timeout value is 0, the driver won't sleep at all. It will check
+ * the status of the CS and return immediately
+ *
+ * The return value of the IOCTL is a standard Linux error code. The possible
+ * values are:
+ *
+ * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal
+ * that the user process received
+ * ETIMEDOUT - The CS has caused a timeout on the device
+ * EIO - The CS was aborted (usually because the device was reset)
+ * ENODEV - The device wants to do hard-reset (so user need to close FD)
+ *
+ * The driver also returns a custom define in case the IOCTL call returned 0.
+ * The define can be one of the following:
+ *
+ * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
+ * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
+ * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device
+ * (ETIMEDOUT)
+ * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
+ * device was reset (EIO)
+ */
+#define DRM_IOCTL_HL_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_WAIT_CS, union hl_wait_cs_args)
+
+/*
+ * Memory
+ * - Map host memory to device MMU
+ * - Unmap host memory from device MMU
+ *
+ * This IOCTL allows the user to map host memory to the device MMU
+ *
+ * For host memory, the IOCTL doesn't allocate memory. The user is supposed
+ * to allocate the memory in user-space (malloc/new). The driver pins the
+ * physical pages (up to the allowed limit by the OS), assigns a virtual
+ * address in the device VA space and initializes the device MMU.
+ *
+ * There is an option for the user to specify the requested virtual address.
+ *
+ */
+#define DRM_IOCTL_HL_MEMORY DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_MEMORY, union hl_mem_args)
+
+/*
+ * Debug
+ * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces
+ *
+ * This IOCTL allows the user to get debug traces from the chip.
+ *
+ * Before the user can send configuration requests of the various
+ * debug/profile engines, it needs to set the device into debug mode.
+ * This is because the debug/profile infrastructure is shared component in the
+ * device and we can't allow multiple users to access it at the same time.
+ *
+ * Once a user set the device into debug mode, the driver won't allow other
+ * users to "work" with the device, i.e. open a FD. If there are multiple users
+ * opened on the device, the driver won't allow any user to debug the device.
+ *
+ * For each configuration request, the user needs to provide the register index
+ * and essential data such as buffer address and size.
+ *
+ * Once the user has finished using the debug/profile engines, he should
+ * set the device into non-debug mode, i.e. disable debug mode.
+ *
+ * The driver can decide to "kick out" the user if he abuses this interface.
+ *
+ */
+#define DRM_IOCTL_HL_DEBUG DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_DEBUG, struct hl_debug_args)
+
+#define HL_COMMAND_START (DRM_COMMAND_BASE + HL_IOCTL_INFO)
+#define HL_COMMAND_END (DRM_COMMAND_BASE + HL_IOCTL_DEBUG + 1)
+
+#endif /* HABANALABS_H_ */
diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h
deleted file mode 100644
index d285d5e72e6a..000000000000
--- a/include/uapi/drm/i810_drm.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _I810_DRM_H_
-#define _I810_DRM_H_
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* WARNING: These defines must be the same as what the Xserver uses.
- * if you change them, you must change the defines in the Xserver.
- */
-
-#ifndef _I810_DEFINES_
-#define _I810_DEFINES_
-
-#define I810_DMA_BUF_ORDER 12
-#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
-#define I810_DMA_BUF_NR 256
-#define I810_NR_SAREA_CLIPRECTS 8
-
-/* Each region is a minimum of 64k, and there are at most 64 of them.
- */
-#define I810_NR_TEX_REGIONS 64
-#define I810_LOG_MIN_TEX_REGION_SIZE 16
-#endif
-
-#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
-#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
-#define I810_UPLOAD_CTX 0x4
-#define I810_UPLOAD_BUFFERS 0x8
-#define I810_UPLOAD_TEX0 0x10
-#define I810_UPLOAD_TEX1 0x20
-#define I810_UPLOAD_CLIPRECTS 0x40
-
-/* Indices into buf.Setup where various bits of state are mirrored per
- * context and per buffer. These can be fired at the card as a unit,
- * or in a piecewise fashion as required.
- */
-
-/* Destbuffer state
- * - backbuffer linear offset and pitch -- invarient in the current dri
- * - zbuffer linear offset and pitch -- also invarient
- * - drawing origin in back and depth buffers.
- *
- * Keep the depth/back buffer state here to accommodate private buffers
- * in the future.
- */
-#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
-#define I810_DESTREG_DI1 1
-#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
-#define I810_DESTREG_DV1 3
-#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
-#define I810_DESTREG_DR1 5
-#define I810_DESTREG_DR2 6
-#define I810_DESTREG_DR3 7
-#define I810_DESTREG_DR4 8
-#define I810_DEST_SETUP_SIZE 10
-
-/* Context state
- */
-#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
-#define I810_CTXREG_CF1 1
-#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
-#define I810_CTXREG_ST1 3
-#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
-#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
-#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
-#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
-#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
-#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
-#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
-#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
-#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
-#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
-#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
-#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
-#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
-#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
-#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
-#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
-#define I810_CTX_SETUP_SIZE 20
-
-/* Texture state (per tex unit)
- */
-#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
-#define I810_TEXREG_MI1 1
-#define I810_TEXREG_MI2 2
-#define I810_TEXREG_MI3 3
-#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
-#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
-#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
-#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
-#define I810_TEX_SETUP_SIZE 8
-
-/* Flags for clear ioctl
- */
-#define I810_FRONT 0x1
-#define I810_BACK 0x2
-#define I810_DEPTH 0x4
-
-typedef enum _drm_i810_init_func {
- I810_INIT_DMA = 0x01,
- I810_CLEANUP_DMA = 0x02,
- I810_INIT_DMA_1_4 = 0x03
-} drm_i810_init_func_t;
-
-/* This is the init structure after v1.2 */
-typedef struct _drm_i810_init {
- drm_i810_init_func_t func;
- unsigned int mmio_offset;
- unsigned int buffers_offset;
- int sarea_priv_offset;
- unsigned int ring_start;
- unsigned int ring_end;
- unsigned int ring_size;
- unsigned int front_offset;
- unsigned int back_offset;
- unsigned int depth_offset;
- unsigned int overlay_offset;
- unsigned int overlay_physical;
- unsigned int w;
- unsigned int h;
- unsigned int pitch;
- unsigned int pitch_bits;
-} drm_i810_init_t;
-
-/* This is the init structure prior to v1.2 */
-typedef struct _drm_i810_pre12_init {
- drm_i810_init_func_t func;
- unsigned int mmio_offset;
- unsigned int buffers_offset;
- int sarea_priv_offset;
- unsigned int ring_start;
- unsigned int ring_end;
- unsigned int ring_size;
- unsigned int front_offset;
- unsigned int back_offset;
- unsigned int depth_offset;
- unsigned int w;
- unsigned int h;
- unsigned int pitch;
- unsigned int pitch_bits;
-} drm_i810_pre12_init_t;
-
-/* Warning: If you change the SAREA structure you must change the Xserver
- * structure as well */
-
-typedef struct _drm_i810_tex_region {
- unsigned char next, prev; /* indices to form a circular LRU */
- unsigned char in_use; /* owned by a client, or free? */
- int age; /* tracked by clients to update local LRU's */
-} drm_i810_tex_region_t;
-
-typedef struct _drm_i810_sarea {
- unsigned int ContextState[I810_CTX_SETUP_SIZE];
- unsigned int BufferState[I810_DEST_SETUP_SIZE];
- unsigned int TexState[2][I810_TEX_SETUP_SIZE];
- unsigned int dirty;
-
- unsigned int nbox;
- struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
-
- /* Maintain an LRU of contiguous regions of texture space. If
- * you think you own a region of texture memory, and it has an
- * age different to the one you set, then you are mistaken and
- * it has been stolen by another client. If global texAge
- * hasn't changed, there is no need to walk the list.
- *
- * These regions can be used as a proxy for the fine-grained
- * texture information of other clients - by maintaining them
- * in the same lru which is used to age their own textures,
- * clients have an approximate lru for the whole of global
- * texture space, and can make informed decisions as to which
- * areas to kick out. There is no need to choose whether to
- * kick out your own texture or someone else's - simply eject
- * them all in LRU order.
- */
-
- drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1];
- /* Last elt is sentinal */
- int texAge; /* last time texture was uploaded */
- int last_enqueue; /* last time a buffer was enqueued */
- int last_dispatch; /* age of the most recently dispatched buffer */
- int last_quiescent; /* */
- int ctxOwner; /* last context to upload state */
-
- int vertex_prim;
-
- int pf_enabled; /* is pageflipping allowed? */
- int pf_active;
- int pf_current_page; /* which buffer is being displayed? */
-} drm_i810_sarea_t;
-
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (xf86drmMga.h)
- */
-
-/* i810 specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_I810_INIT 0x00
-#define DRM_I810_VERTEX 0x01
-#define DRM_I810_CLEAR 0x02
-#define DRM_I810_FLUSH 0x03
-#define DRM_I810_GETAGE 0x04
-#define DRM_I810_GETBUF 0x05
-#define DRM_I810_SWAP 0x06
-#define DRM_I810_COPY 0x07
-#define DRM_I810_DOCOPY 0x08
-#define DRM_I810_OV0INFO 0x09
-#define DRM_I810_FSTATUS 0x0a
-#define DRM_I810_OV0FLIP 0x0b
-#define DRM_I810_MC 0x0c
-#define DRM_I810_RSTATUS 0x0d
-#define DRM_I810_FLIP 0x0e
-
-#define DRM_IOCTL_I810_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I810_INIT, drm_i810_init_t)
-#define DRM_IOCTL_I810_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I810_VERTEX, drm_i810_vertex_t)
-#define DRM_IOCTL_I810_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I810_CLEAR, drm_i810_clear_t)
-#define DRM_IOCTL_I810_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_I810_FLUSH)
-#define DRM_IOCTL_I810_GETAGE DRM_IO( DRM_COMMAND_BASE + DRM_I810_GETAGE)
-#define DRM_IOCTL_I810_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I810_GETBUF, drm_i810_dma_t)
-#define DRM_IOCTL_I810_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_I810_SWAP)
-#define DRM_IOCTL_I810_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I810_COPY, drm_i810_copy_t)
-#define DRM_IOCTL_I810_DOCOPY DRM_IO( DRM_COMMAND_BASE + DRM_I810_DOCOPY)
-#define DRM_IOCTL_I810_OV0INFO DRM_IOR( DRM_COMMAND_BASE + DRM_I810_OV0INFO, drm_i810_overlay_t)
-#define DRM_IOCTL_I810_FSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FSTATUS)
-#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_OV0FLIP)
-#define DRM_IOCTL_I810_MC DRM_IOW( DRM_COMMAND_BASE + DRM_I810_MC, drm_i810_mc_t)
-#define DRM_IOCTL_I810_RSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_RSTATUS)
-#define DRM_IOCTL_I810_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FLIP)
-
-typedef struct _drm_i810_clear {
- int clear_color;
- int clear_depth;
- int flags;
-} drm_i810_clear_t;
-
-/* These may be placeholders if we have more cliprects than
- * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
- * false, indicating that the buffer will be dispatched again with a
- * new set of cliprects.
- */
-typedef struct _drm_i810_vertex {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- int discard; /* client is finished with the buffer? */
-} drm_i810_vertex_t;
-
-typedef struct _drm_i810_copy_t {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- void *address; /* Address to copy from */
-} drm_i810_copy_t;
-
-#define PR_TRIANGLES (0x0<<18)
-#define PR_TRISTRIP_0 (0x1<<18)
-#define PR_TRISTRIP_1 (0x2<<18)
-#define PR_TRIFAN (0x3<<18)
-#define PR_POLYGON (0x4<<18)
-#define PR_LINES (0x5<<18)
-#define PR_LINESTRIP (0x6<<18)
-#define PR_RECTS (0x7<<18)
-#define PR_MASK (0x7<<18)
-
-typedef struct drm_i810_dma {
- void *virtual;
- int request_idx;
- int request_size;
- int granted;
-} drm_i810_dma_t;
-
-typedef struct _drm_i810_overlay_t {
- unsigned int offset; /* Address of the Overlay Regs */
- unsigned int physical;
-} drm_i810_overlay_t;
-
-typedef struct _drm_i810_mc {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- int num_blocks; /* number of GFXBlocks */
- int *length; /* List of lengths for GFXBlocks (FUTURE) */
- unsigned int last_render; /* Last Render Request */
-} drm_i810_mc_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* _I810_DRM_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 00546062e023..2ee338860b7e 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -38,13 +38,13 @@ extern "C" {
*/
/**
- * DOC: uevents generated by i915 on it's device node
+ * DOC: uevents generated by i915 on its device node
*
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
- * event from the gpu l3 cache. Additional information supplied is ROW,
+ * event from the GPU L3 cache. Additional information supplied is ROW,
* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
- * track of these events and if a specific cache-line seems to have a
- * persistent error remap it with the l3 remapping tool supplied in
+ * track of these events, and if a specific cache-line seems to have a
+ * persistent error, remap it with the L3 remapping tool supplied in
* intel-gpu-tools. The value supplied with the event is always 1.
*
* I915_ERROR_UEVENT - Generated upon error detection, currently only via
@@ -62,8 +62,8 @@ extern "C" {
#define I915_ERROR_UEVENT "ERROR"
#define I915_RESET_UEVENT "RESET"
-/*
- * i915_user_extension: Base class for defining a chain of extensions
+/**
+ * struct i915_user_extension - Base class for defining a chain of extensions
*
* Many interfaces need to grow over time. In most cases we can simply
* extend the struct and have userspace pass in more data. Another option,
@@ -76,12 +76,58 @@ extern "C" {
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
* the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct i915_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct i915_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+ *
*/
struct i915_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct i915_user_extension, or zero if the end.
+ */
__u64 next_extension;
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct i915_user_extension.
+ */
__u32 name;
- __u32 flags; /* All undefined bits must be zero. */
- __u32 rsvd[4]; /* Reserved for future use; must be zero. */
+ /**
+ * @flags: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 flags;
+ /**
+ * @rsvd: MBZ
+ *
+ * Reserved for future use; must be zero.
+ */
+ __u32 rsvd[4];
};
/*
@@ -108,25 +154,77 @@ enum i915_mocs_table_index {
I915_MOCS_CACHED,
};
-/*
+/**
+ * enum drm_i915_gem_engine_class - uapi engine type enumeration
+ *
* Different engines serve different roles, and there may be more than one
- * engine serving each role. enum drm_i915_gem_engine_class provides a
- * classification of the role of the engine, which may be used when requesting
- * operations to be performed on a certain subset of engines, or for providing
- * information about that group.
+ * engine serving each role. This enum provides a classification of the role
+ * of the engine, which may be used when requesting operations to be performed
+ * on a certain subset of engines, or for providing information about that
+ * group.
*/
enum drm_i915_gem_engine_class {
+ /**
+ * @I915_ENGINE_CLASS_RENDER:
+ *
+ * Render engines support instructions used for 3D, Compute (GPGPU),
+ * and programmable media workloads. These instructions fetch data and
+ * dispatch individual work items to threads that operate in parallel.
+ * The threads run small programs (called "kernels" or "shaders") on
+ * the GPU's execution units (EUs).
+ */
I915_ENGINE_CLASS_RENDER = 0,
+
+ /**
+ * @I915_ENGINE_CLASS_COPY:
+ *
+ * Copy engines (also referred to as "blitters") support instructions
+ * that move blocks of data from one location in memory to another,
+ * or that fill a specified location of memory with fixed data.
+ * Copy engines can perform pre-defined logical or bitwise operations
+ * on the source, destination, or pattern data.
+ */
I915_ENGINE_CLASS_COPY = 1,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO:
+ *
+ * Video engines (also referred to as "bit stream decode" (BSD) or
+ * "vdbox") support instructions that perform fixed-function media
+ * decode and encode.
+ */
I915_ENGINE_CLASS_VIDEO = 2,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
+ *
+ * Video enhancement engines (also referred to as "vebox") support
+ * instructions related to image enhancement.
+ */
I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
- /* should be kept compact */
+ /**
+ * @I915_ENGINE_CLASS_COMPUTE:
+ *
+ * Compute engines support a subset of the instructions available
+ * on render engines: compute engines support Compute (GPGPU) and
+ * programmable media workloads, but do not support the 3D pipeline.
+ */
+ I915_ENGINE_CLASS_COMPUTE = 4,
+
+ /* Values in this enum should be kept compact. */
+ /**
+ * @I915_ENGINE_CLASS_INVALID:
+ *
+ * Placeholder value to represent an invalid engine class assignment.
+ */
I915_ENGINE_CLASS_INVALID = -1
};
-/*
+/**
+ * struct i915_engine_class_instance - Engine class/instance identifier
+ *
* There may be more than one engine fulfilling any role within the system.
* Each engine of a class is given a unique instance number and therefore
* any engine can be specified by its class:instance tuplet. APIs that allow
@@ -134,10 +232,21 @@ enum drm_i915_gem_engine_class {
* for this identification.
*/
struct i915_engine_class_instance {
- __u16 engine_class; /* see enum drm_i915_gem_engine_class */
- __u16 engine_instance;
+ /**
+ * @engine_class:
+ *
+ * Engine class from enum drm_i915_gem_engine_class
+ */
+ __u16 engine_class;
#define I915_ENGINE_CLASS_INVALID_NONE -1
#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
+
+ /**
+ * @engine_instance:
+ *
+ * Engine instance.
+ */
+ __u16 engine_instance;
};
/**
@@ -171,14 +280,30 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_ENGINE_SEMA(class, instance) \
__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
-#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+/*
+ * Top 4 bits of every non-engine counter are GT id.
+ */
+#define __I915_PMU_GT_SHIFT (60)
+
+#define ___I915_PMU_OTHER(gt, x) \
+ (((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \
+ ((__u64)(gt) << __I915_PMU_GT_SHIFT))
+
+#define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x)
#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
+#define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4)
+
+#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
-#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+#define __I915_PMU_ACTUAL_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 0)
+#define __I915_PMU_REQUESTED_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 1)
+#define __I915_PMU_INTERRUPTS(gt) ___I915_PMU_OTHER(gt, 2)
+#define __I915_PMU_RC6_RESIDENCY(gt) ___I915_PMU_OTHER(gt, 3)
+#define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt) ___I915_PMU_OTHER(gt, 4)
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
@@ -359,6 +484,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_QUERY 0x39
#define DRM_I915_GEM_VM_CREATE 0x3a
#define DRM_I915_GEM_VM_DESTROY 0x3b
+#define DRM_I915_GEM_CREATE_EXT 0x3c
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -391,6 +517,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -523,7 +650,33 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
+/*
+ * Indicates the 2k user priority levels are statically mapped into 3 buckets as
+ * follows:
+ *
+ * -1k to -1 Low priority
+ * 0 Normal priority
+ * 1 to 1k Highest priority
+ */
+#define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5)
+/*
+ * Query the status of HuC load.
+ *
+ * The query can fail in the following scenarios with the listed error codes:
+ * -ENODEV if HuC is not present on this platform,
+ * -EOPNOTSUPP if HuC firmware usage is disabled,
+ * -ENOPKG if HuC firmware fetch failed,
+ * -ENOEXEC if HuC firmware is invalid or mismatched,
+ * -ENOMEM if i915 failed to prepare the FW objects for transfer to the uC,
+ * -EIO if the FW transfer or the FW authentication failed.
+ *
+ * If the IOCTL is successful, the returned parameter will be set to one of the
+ * following values:
+ * * 0 if HuC firmware load is not complete,
+ * * 1 if HuC firmware is loaded and fully authenticated,
+ * * 2 if HuC firmware is loaded and authenticated for clear media only
+ */
#define I915_PARAM_HUC_STATUS 42
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -540,7 +693,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_FENCE 44
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
- * user specified bufffers for post-mortem debugging of GPU hangs. See
+ * user-specified buffers for post-mortem debugging of GPU hangs. See
* EXEC_OBJECT_CAPTURE.
*/
#define I915_PARAM_HAS_EXEC_CAPTURE 45
@@ -619,16 +772,63 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_PERF_REVISION 54
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
+ * I915_EXEC_USE_EXTENSIONS.
+ */
+#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
+
+/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
+#define I915_PARAM_HAS_USERPTR_PROBE 56
+
+/*
+ * Frequency of the timestamps in OA reports. This used to be the same as the CS
+ * timestamp frequency, but differs on some platforms.
+ */
+#define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
+
+/*
+ * Query the status of PXP support in i915.
+ *
+ * The query can fail in the following scenarios with the listed error codes:
+ * -ENODEV = PXP support is not available on the GPU device or in the
+ * kernel due to missing component drivers or kernel configs.
+ *
+ * If the IOCTL is successful, the returned parameter will be set to one of
+ * the following values:
+ * 1 = PXP feature is supported and is ready for use.
+ * 2 = PXP feature is supported but should be ready soon (pending
+ * initialization of non-i915 system dependencies).
+ *
+ * NOTE: When param is supported (positive return values), user space should
+ * still refer to the GEM PXP context-creation UAPI header specs to be
+ * aware of possible failure due to system state machine at the time.
+ */
+#define I915_PARAM_PXP_STATUS 58
+
/* Must be kept compact -- no holes and well documented */
-typedef struct drm_i915_getparam {
+/**
+ * struct drm_i915_getparam - Driver parameter query structure.
+ */
+struct drm_i915_getparam {
+ /** @param: Driver parameter to query. */
__s32 param;
- /*
+
+ /**
+ * @value: Address of memory where queried value should be put.
+ *
* WARNING: Using pointers instead of fixed-size u64 means we need to write
* compat32 code. Don't repeat this mistake.
*/
int __user *value;
-} drm_i915_getparam_t;
+};
+
+/**
+ * typedef drm_i915_getparam_t - Driver parameter query structure.
+ * See struct drm_i915_getparam.
+ */
+typedef struct drm_i915_getparam drm_i915_getparam_t;
/* Ioctl to set kernel params:
*/
@@ -794,45 +994,113 @@ struct drm_i915_gem_mmap_gtt {
__u64 offset;
};
+/**
+ * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
+ *
+ * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
+ * and is used to retrieve the fake offset to mmap an object specified by &handle.
+ *
+ * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
+ * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
+ * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
+ */
struct drm_i915_gem_mmap_offset {
- /** Handle for the object being mapped. */
+ /** @handle: Handle for the object being mapped. */
__u32 handle;
+ /** @pad: Must be zero */
__u32 pad;
/**
- * Fake offset to use for subsequent mmap call
+ * @offset: The fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
/**
- * Flags for extended behaviour.
+ * @flags: Flags for extended behaviour.
*
- * It is mandatory that one of the MMAP_OFFSET types
- * (GTT, WC, WB, UC, etc) should be included.
+ * It is mandatory that one of the `MMAP_OFFSET` types
+ * should be included:
+ *
+ * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
+ * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
+ * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
+ * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
+ *
+ * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
+ * type. On devices without local memory, this caching mode is invalid.
+ *
+ * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
+ * be used, depending on the object placement on creation. WB will be used
+ * when the object can only exist in system memory, WC otherwise.
*/
__u64 flags;
-#define I915_MMAP_OFFSET_GTT 0
-#define I915_MMAP_OFFSET_WC 1
-#define I915_MMAP_OFFSET_WB 2
-#define I915_MMAP_OFFSET_UC 3
- /*
- * Zero-terminated chain of extensions.
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+#define I915_MMAP_OFFSET_FIXED 4
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
*
* No current extensions defined; mbz.
*/
__u64 extensions;
};
+/**
+ * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
+ * preparation for accessing the pages via some CPU domain.
+ *
+ * Specifying a new write or read domain will flush the object out of the
+ * previous domain(if required), before then updating the objects domain
+ * tracking with the new domain.
+ *
+ * Note this might involve waiting for the object first if it is still active on
+ * the GPU.
+ *
+ * Supported values for @read_domains and @write_domain:
+ *
+ * - I915_GEM_DOMAIN_WC: Uncached write-combined domain
+ * - I915_GEM_DOMAIN_CPU: CPU cache domain
+ * - I915_GEM_DOMAIN_GTT: Mappable aperture domain
+ *
+ * All other domains are rejected.
+ *
+ * Note that for discrete, starting from DG1, this is no longer supported, and
+ * is instead rejected. On such platforms the CPU domain is effectively static,
+ * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
+ * which can't be set explicitly and instead depends on the object placements,
+ * as per the below.
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ */
struct drm_i915_gem_set_domain {
- /** Handle for the object */
+ /** @handle: Handle for the object. */
__u32 handle;
- /** New read domains */
+ /** @read_domains: New read domains. */
__u32 read_domains;
- /** New write domain */
+ /**
+ * @write_domain: New write domain.
+ *
+ * Note that having something in the write domain implies it's in the
+ * read domain, and only that read domain.
+ */
__u32 write_domain;
};
@@ -936,6 +1204,7 @@ struct drm_i915_gem_exec_object {
__u64 offset;
};
+/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
@@ -982,10 +1251,16 @@ struct drm_i915_gem_exec_object2 {
/**
* When the EXEC_OBJECT_PINNED flag is specified this is populated by
* the user with the GTT offset at which this object will be pinned.
+ *
* When the I915_EXEC_NO_RELOC flag is specified this must contain the
* presumed_offset of the object.
+ *
* During execbuffer2 the kernel populates it with the value of the
* current GTT offset of the object, for future presumed_offset writes.
+ *
+ * See struct drm_i915_gem_create_ext for the rules when dealing with
+ * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
+ * minimum page sizes, like DG2.
*/
__u64 offset;
@@ -1034,38 +1309,119 @@ struct drm_i915_gem_exec_object2 {
__u64 rsvd2;
};
+/**
+ * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
+ * ioctl.
+ *
+ * The request will wait for input fence to signal before submission.
+ *
+ * The returned output fence will be signaled after the completion of the
+ * request.
+ */
struct drm_i915_gem_exec_fence {
- /**
- * User's handle for a drm_syncobj to wait on or signal.
- */
+ /** @handle: User's handle for a drm_syncobj to wait on or signal. */
__u32 handle;
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_EXEC_FENCE_WAIT:
+ * Wait for the input fence before request submission.
+ *
+ * I915_EXEC_FENCE_SIGNAL:
+ * Return request completion fence as output
+ */
+ __u32 flags;
#define I915_EXEC_FENCE_WAIT (1<<0)
#define I915_EXEC_FENCE_SIGNAL (1<<1)
#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
- __u32 flags;
};
-struct drm_i915_gem_execbuffer2 {
+/**
+ * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
+ * for execbuf ioctl.
+ *
+ * This structure describes an array of drm_syncobj and associated points for
+ * timeline variants of drm_syncobj. It is invalid to append this structure to
+ * the execbuf if I915_EXEC_FENCE_ARRAY is set.
+ */
+struct drm_i915_gem_execbuffer_ext_timeline_fences {
+#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /**
+ * @fence_count: Number of elements in the @handles_ptr & @value_ptr
+ * arrays.
+ */
+ __u64 fence_count;
+
/**
- * List of gem_exec_object2 structs
+ * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
+ * of length @fence_count.
*/
+ __u64 handles_ptr;
+
+ /**
+ * @values_ptr: Pointer to an array of u64 values of length
+ * @fence_count.
+ * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+ * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+ * binary one.
+ */
+ __u64 values_ptr;
+};
+
+/**
+ * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
+ * ioctl.
+ */
+struct drm_i915_gem_execbuffer2 {
+ /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
__u64 buffers_ptr;
+
+ /** @buffer_count: Number of elements in @buffers_ptr array */
__u32 buffer_count;
- /** Offset in the batchbuffer to start execution from. */
+ /**
+ * @batch_start_offset: Offset in the batchbuffer to start execution
+ * from.
+ */
__u32 batch_start_offset;
- /** Bytes used in batchbuffer from batch_start_offset */
+
+ /**
+ * @batch_len: Length in bytes of the batch buffer, starting from the
+ * @batch_start_offset. If 0, length is assumed to be the batch buffer
+ * object size.
+ */
__u32 batch_len;
+
+ /** @DR1: deprecated */
__u32 DR1;
+
+ /** @DR4: deprecated */
__u32 DR4;
+
+ /** @num_cliprects: See @cliprects_ptr */
__u32 num_cliprects;
+
/**
- * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
- * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
- * struct drm_i915_gem_exec_fence *fences.
+ * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
+ *
+ * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
+ * I915_EXEC_USE_EXTENSIONS flags are not set.
+ *
+ * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
+ * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
+ * array.
+ *
+ * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
+ * single &i915_user_extension and num_cliprects is 0.
*/
__u64 cliprects_ptr;
+
+ /** @flags: Execbuf flags */
+ __u64 flags;
#define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
@@ -1083,10 +1439,6 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
- __u64 flags;
- __u64 rsvd1; /* now used for context info */
- __u64 rsvd2;
-};
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
@@ -1181,7 +1533,30 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_FENCE_SUBMIT (1 << 20)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
+/*
+ * Setting I915_EXEC_USE_EXTENSIONS implies that
+ * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
+ * list of i915_user_extension. Each i915_user_extension node is the base of a
+ * larger structure. The list of supported structures are listed in the
+ * drm_i915_gem_execbuffer_ext enum.
+ */
+#define I915_EXEC_USE_EXTENSIONS (1 << 21)
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
+
+ /** @rsvd1: Context id */
+ __u64 rsvd1;
+
+ /**
+ * @rsvd2: in and out sync_file file descriptors.
+ *
+ * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
+ * lower 32 bits of this field will have the in sync_file fd (input).
+ *
+ * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
+ * field will have the out sync_file fd (output).
+ */
+ __u64 rsvd2;
+};
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1231,7 +1606,7 @@ struct drm_i915_gem_busy {
* is accurate.
*
* The returned dword is split into two fields to indicate both
- * the engine classess on which the object is being read, and the
+ * the engine classes on which the object is being read, and the
* engine class on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
@@ -1245,12 +1620,11 @@ struct drm_i915_gem_busy {
* reading from the object simultaneously.
*
* The value of each engine class is the same as specified in the
- * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
+ * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
* I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
- * reported as active itself. Some hardware may have parallel
- * execution engines, e.g. multiple media engines, which are
- * mapped to the same class identifier and so are not separately
- * reported for busyness.
+ * Some hardware may have parallel execution engines, e.g. multiple
+ * media engines, which are mapped to the same class identifier and so
+ * are not separately reported for busyness.
*
* Caveat emptor:
* Only the boolean result of this query is reliable; that is whether
@@ -1261,49 +1635,91 @@ struct drm_i915_gem_busy {
};
/**
- * I915_CACHING_NONE
- *
- * GPU access is not coherent with cpu caches. Default for machines without an
- * LLC.
- */
-#define I915_CACHING_NONE 0
-/**
- * I915_CACHING_CACHED
- *
- * GPU access is coherent with cpu caches and furthermore the data is cached in
- * last-level caches shared between cpu cores and the gpu GT. Default on
- * machines with HAS_LLC.
- */
-#define I915_CACHING_CACHED 1
-/**
- * I915_CACHING_DISPLAY
- *
- * Special GPU caching mode which is coherent with the scanout engines.
- * Transparently falls back to I915_CACHING_NONE on platforms where no special
- * cache mode (like write-through or gfdt flushing) is available. The kernel
- * automatically sets this mode when using a buffer as a scanout target.
- * Userspace can manually set this mode to avoid a costly stall and clflush in
- * the hotpath of drawing the first frame.
+ * struct drm_i915_gem_caching - Set or get the caching for given object
+ * handle.
+ *
+ * Allow userspace to control the GTT caching bits for a given object when the
+ * object is later mapped through the ppGTT(or GGTT on older platforms lacking
+ * ppGTT support, or if the object is used for scanout). Note that this might
+ * require unbinding the object from the GTT first, if its current caching value
+ * doesn't match.
+ *
+ * Note that this all changes on discrete platforms, starting from DG1, the
+ * set/get caching is no longer supported, and is now rejected. Instead the CPU
+ * caching attributes(WB vs WC) will become an immutable creation time property
+ * for the object, along with the GTT caching level. For now we don't expose any
+ * new uAPI for this, instead on DG1 this is all implicit, although this largely
+ * shouldn't matter since DG1 is coherent by default(without any way of
+ * controlling it).
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ *
+ * Side note: Part of the reason for this is that changing the at-allocation-time CPU
+ * caching attributes for the pages might be required(and is expensive) if we
+ * need to then CPU map the pages later with different caching attributes. This
+ * inconsistent caching behaviour, while supported on x86, is not universally
+ * supported on other architectures. So for simplicity we opt for setting
+ * everything at creation time, whilst also making it immutable, on discrete
+ * platforms.
*/
-#define I915_CACHING_DISPLAY 2
-
struct drm_i915_gem_caching {
/**
- * Handle of the buffer to set/get the caching level of. */
+ * @handle: Handle of the buffer to set/get the caching level.
+ */
__u32 handle;
/**
- * Cacheing level to apply or return value
+ * @caching: The GTT caching level to apply or possible return value.
+ *
+ * The supported @caching values:
*
- * bits0-15 are for generic caching control (i.e. the above defined
- * values). bits16-31 are reserved for platform-specific variations
- * (e.g. l3$ caching on gen7). */
+ * I915_CACHING_NONE:
+ *
+ * GPU access is not coherent with CPU caches. Default for machines
+ * without an LLC. This means manual flushing might be needed, if we
+ * want GPU access to be coherent.
+ *
+ * I915_CACHING_CACHED:
+ *
+ * GPU access is coherent with CPU caches and furthermore the data is
+ * cached in last-level caches shared between CPU cores and the GPU GT.
+ *
+ * I915_CACHING_DISPLAY:
+ *
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no
+ * special cache mode (like write-through or gfdt flushing) is
+ * available. The kernel automatically sets this mode when using a
+ * buffer as a scanout target. Userspace can manually set this mode to
+ * avoid a costly stall and clflush in the hotpath of drawing the first
+ * frame.
+ */
+#define I915_CACHING_NONE 0
+#define I915_CACHING_CACHED 1
+#define I915_CACHING_DISPLAY 2
__u32 caching;
};
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
+/*
+ * Do not add new tiling types here. The I915_TILING_* values are for
+ * de-tiling fence registers that no longer exist on modern platforms. Although
+ * the hardware may support new types of tiling in general (e.g., Tile4), we
+ * do not need to add them to the uapi that is specific to now-defunct ioctls.
+ */
#define I915_TILING_LAST I915_TILING_Y
#define I915_BIT_6_SWIZZLE_NONE 0
@@ -1399,7 +1815,7 @@ struct drm_i915_gem_madvise {
__u32 handle;
/* Advice: either the buffer will be needed again in the near future,
- * or wont be and could be discarded under memory pressure.
+ * or won't be and could be discarded under memory pressure.
*/
__u32 madv;
@@ -1521,21 +1937,64 @@ struct drm_i915_gem_context_create {
__u32 pad;
};
+/**
+ * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
+ */
struct drm_i915_gem_context_create_ext {
- __u32 ctx_id; /* output: id of new context*/
+ /** @ctx_id: Id of the created context (output) */
+ __u32 ctx_id;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
+ *
+ * Extensions may be appended to this structure and driver must check
+ * for those. See @extensions.
+ *
+ * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
+ *
+ * Created context will have single timeline.
+ */
__u32 flags;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
+ *
+ * I915_CONTEXT_CREATE_EXT_SETPARAM:
+ * Context parameter to set or query during context creation.
+ * See struct drm_i915_gem_context_create_ext_setparam.
+ *
+ * I915_CONTEXT_CREATE_EXT_CLONE:
+ * This extension has been removed. On the off chance someone somewhere
+ * has attempted to use it, never re-use this extension number.
+ */
__u64 extensions;
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
};
+/**
+ * struct drm_i915_gem_context_param - Context parameter to set or query.
+ */
struct drm_i915_gem_context_param {
+ /** @ctx_id: Context id */
__u32 ctx_id;
+
+ /** @size: Size of the parameter @value */
__u32 size;
+
+ /** @param: Parameter to set or query */
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance
+ * someone somewhere has attempted to use it, never re-use this context
+ * param number.
+ */
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
@@ -1602,6 +2061,7 @@ struct drm_i915_gem_context_param {
* Extensions:
* i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
*/
#define I915_CONTEXT_PARAM_ENGINES 0xa
@@ -1620,32 +2080,81 @@ struct drm_i915_gem_context_param {
*/
#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
-/*
- * I915_CONTEXT_PARAM_RINGSIZE:
- *
- * Sets the size of the CS ringbuffer to use for logical ring contexts. This
- * applies a limit of how many batches can be queued to HW before the caller
- * is blocked due to lack of space for more commands.
- *
- * Only reliably possible to be set prior to first use, i.e. during
- * construction. At any later point, the current execution must be flushed as
- * the ring can only be changed while the context is idle. Note, the ringsize
- * can be specified as a constructor property, see
- * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
- *
- * Only applies to the current set of engine and lost when those engines
- * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
- *
- * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
- * Default is 16 KiB.
+/* This API has been removed. On the off chance someone somewhere has
+ * attempted to use it, never re-use this context param number.
*/
#define I915_CONTEXT_PARAM_RINGSIZE 0xc
+
+/*
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ *
+ * Mark that the context makes use of protected content, which will result
+ * in the context being invalidated when the protected content session is.
+ * Given that the protected content session is killed on suspend, the device
+ * is kept awake for the lifetime of a protected context, so the user should
+ * make sure to dispose of them once done.
+ * This flag can only be set at context creation time and, when set to true,
+ * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
+ * to false. This flag can't be set to true in conjunction with setting the
+ * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_context_create_ext_setparam p_protected = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
+ * .value = 1,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_norecover = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * .next_extension = to_user_pointer(&p_protected),
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_RECOVERABLE,
+ * .value = 0,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_norecover);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * In addition to the normal failure cases, setting this flag during context
+ * creation can result in the following errors:
+ *
+ * -ENODEV: feature not available
+ * -EPERM: trying to mark a recoverable or not bannable context as protected
+ * -ENXIO: A dependency such as a component driver or firmware is not yet
+ * loaded so user space may need to attempt again. Depending on the
+ * device, this error may be reported if protected context creation is
+ * attempted very early after kernel start because the internal timeout
+ * waiting for such dependencies is not guaranteed to be larger than
+ * required (numbers differ depending on system and kernel config):
+ * - ADL/RPL: dependencies may take up to 3 seconds from kernel start
+ * while context creation internal timeout is 250 milisecs
+ * - MTL: dependencies may take up to 8 seconds from kernel start
+ * while context creation internal timeout is 250 milisecs
+ * NOTE: such dependencies happen once, so a subsequent call to create a
+ * protected context after a prior successful call will not experience
+ * such timeouts and will not return -ENXIO (unless the driver is reloaded,
+ * or, depending on the device, resumes from a suspended state).
+ * -EIO: The firmware did not succeed in creating the protected context.
+ */
+#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/* Must be kept compact -- no holes and well documented */
+ /** @value: Context parameter value to be set or queried */
__u64 value;
};
-/**
+/*
* Context SSEU programming
*
* It may be necessary for either functional or performance reason to configure
@@ -1704,6 +2213,69 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
+/**
+ * DOC: Virtual Engine uAPI
+ *
+ * Virtual engine is a concept where userspace is able to configure a set of
+ * physical engines, submit a batch buffer, and let the driver execute it on any
+ * engine from the set as it sees fit.
+ *
+ * This is primarily useful on parts which have multiple instances of a same
+ * class engine, like for example GT3+ Skylake parts with their two VCS engines.
+ *
+ * For instance userspace can enumerate all engines of a certain class using the
+ * previously described `Engine Discovery uAPI`_. After that userspace can
+ * create a GEM context with a placeholder slot for the virtual engine (using
+ * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
+ * and instance respectively) and finally using the
+ * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
+ * the same reserved slot.
+ *
+ * Example of creating a virtual engine and submitting a batch buffer to it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
+ * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
+ * .engine_index = 0, // Place this virtual engine into engine map slot 0
+ * .num_siblings = 2,
+ * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
+ * { I915_ENGINE_CLASS_VIDEO, 1 }, },
+ * };
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+ * .engines = { { I915_ENGINE_CLASS_INVALID,
+ * I915_ENGINE_CLASS_INVALID_NONE } },
+ * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // Now we have created a GEM context with its engine map containing a
+ * // single virtual engine. Submissions to this slot can go either to
+ * // vcs0 or vcs1, depending on the load balancing algorithm used inside
+ * // the driver. The load balancing is dynamic from one batch buffer to
+ * // another and transparent to userspace.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0 which is the virtual engine
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
/*
* i915_context_engines_load_balance:
*
@@ -1729,7 +2301,7 @@ struct i915_context_engines_load_balance {
__u64 mbz64; /* reserved for future use; must be zero */
- struct i915_engine_class_instance engines[0];
+ struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
@@ -1767,7 +2339,7 @@ struct i915_context_engines_bond {
__u64 flags; /* all undefined flags must be zero */
__u64 mbz64[4]; /* reserved for future use; must be zero */
- struct i915_engine_class_instance engines[0];
+ struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
@@ -1780,11 +2352,196 @@ struct i915_context_engines_bond {
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
+/**
+ * struct i915_context_engines_parallel_submit - Configure engine for
+ * parallel submission.
+ *
+ * Setup a slot in the context engine map to allow multiple BBs to be submitted
+ * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
+ * in parallel. Multiple hardware contexts are created internally in the i915 to
+ * run these BBs. Once a slot is configured for N BBs only N BBs can be
+ * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
+ * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
+ * many BBs there are based on the slot's configuration. The N BBs are the last
+ * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
+ *
+ * The default placement behavior is to create implicit bonds between each
+ * context if each context maps to more than 1 physical engine (e.g. context is
+ * a virtual engine). Also we only allow contexts of same engine class and these
+ * contexts must be in logically contiguous order. Examples of the placement
+ * behavior are described below. Lastly, the default is to not allow BBs to be
+ * preempted mid-batch. Rather insert coordinated preemption points on all
+ * hardware contexts between each set of BBs. Flags could be added in the future
+ * to change both of these default behaviors.
+ *
+ * Returns -EINVAL if hardware context placement configuration is invalid or if
+ * the placement configuration isn't supported on the platform / submission
+ * interface.
+ * Returns -ENODEV if extension isn't supported on the platform / submission
+ * interface.
+ *
+ * .. code-block:: none
+ *
+ * Examples syntax:
+ * CS[X] = generic engine of same class, logical instance X
+ * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *
+ * Example 1 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=1,
+ * engines=CS[0],CS[1])
+ *
+ * Results in the following valid placement:
+ * CS[0], CS[1]
+ *
+ * Example 2 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[2],CS[1],CS[3])
+ *
+ * Results in the following valid placements:
+ * CS[0], CS[1]
+ * CS[2], CS[3]
+ *
+ * This can be thought of as two virtual engines, each containing two
+ * engines thereby making a 2D array. However, there are bonds tying the
+ * entries together and placing restrictions on how they can be scheduled.
+ * Specifically, the scheduler can choose only vertical columns from the 2D
+ * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
+ * scheduler wants to submit to CS[0], it must also choose CS[1] and vice
+ * versa. Same for CS[2] requires also using CS[3].
+ * VE[0] = CS[0], CS[2]
+ * VE[1] = CS[1], CS[3]
+ *
+ * Example 3 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[1],CS[1],CS[3])
+ *
+ * Results in the following valid and invalid placements:
+ * CS[0], CS[1]
+ * CS[1], CS[3] - Not logically contiguous, return -EINVAL
+ */
+struct i915_context_engines_parallel_submit {
+ /**
+ * @base: base user extension.
+ */
+ struct i915_user_extension base;
+
+ /**
+ * @engine_index: slot for parallel engine
+ */
+ __u16 engine_index;
+
+ /**
+ * @width: number of contexts per parallel engine or in other words the
+ * number of batches in each submission
+ */
+ __u16 width;
+
+ /**
+ * @num_siblings: number of siblings per context or in other words the
+ * number of possible placements for each submission
+ */
+ __u16 num_siblings;
+
+ /**
+ * @mbz16: reserved for future use; must be zero
+ */
+ __u16 mbz16;
+
+ /**
+ * @flags: all undefined flags must be zero, currently not defined flags
+ */
+ __u64 flags;
+
+ /**
+ * @mbz64: reserved for future use; must be zero
+ */
+ __u64 mbz64[3];
+
+ /**
+ * @engines: 2-d array of engine instances to configure parallel engine
+ *
+ * length = width (i) * num_siblings (j)
+ * index = j + i * num_siblings
+ */
+ struct i915_engine_class_instance engines[];
+
+} __packed;
+
+#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 width; \
+ __u16 num_siblings; \
+ __u16 mbz16; \
+ __u64 flags; \
+ __u64 mbz64[3]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/**
+ * DOC: Context Engine Map uAPI
+ *
+ * Context engine map is a new way of addressing engines when submitting batch-
+ * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
+ * inside the flags field of `struct drm_i915_gem_execbuffer2`.
+ *
+ * To use it created GEM contexts need to be configured with a list of engines
+ * the user is intending to submit to. This is accomplished using the
+ * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
+ * i915_context_param_engines`.
+ *
+ * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
+ * configured map.
+ *
+ * Example of creating such context and submitting against it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
+ * .engines = { { I915_ENGINE_CLASS_RENDER, 0 },
+ * { I915_ENGINE_CLASS_COPY, 0 } }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // We have now created a GEM context with two engines in the map:
+ * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines
+ * // will not be accessible from this context.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
struct i915_context_param_engines {
__u64 extensions; /* linked chain of extension blocks, 0 terminates */
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
- struct i915_engine_class_instance engines[0];
+#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
+ struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
@@ -1792,25 +2549,19 @@ struct i915_context_param_engines {
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
+/**
+ * struct drm_i915_gem_context_create_ext_setparam - Context parameter
+ * to set or query during context creation.
+ */
struct drm_i915_gem_context_create_ext_setparam {
-#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+ /** @base: Extension link. See struct i915_user_extension. */
struct i915_user_extension base;
- struct drm_i915_gem_context_param param;
-};
-struct drm_i915_gem_context_create_ext_clone {
-#define I915_CONTEXT_CREATE_EXT_CLONE 1
- struct i915_user_extension base;
- __u32 clone_id;
- __u32 flags;
-#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
-#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
-#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
-#define I915_CONTEXT_CLONE_SSEU (1u << 3)
-#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
-#define I915_CONTEXT_CLONE_VM (1u << 5)
-#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
- __u64 rsvd;
+ /**
+ * @param: Context parameter to set or query.
+ * See struct drm_i915_gem_context_param.
+ */
+ struct drm_i915_gem_context_param param;
};
struct drm_i915_gem_context_destroy {
@@ -1818,7 +2569,9 @@ struct drm_i915_gem_context_destroy {
__u32 pad;
};
-/*
+/**
+ * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
+ *
* DRM_I915_GEM_VM_CREATE -
*
* Create a new virtual memory address space (ppGTT) for use within a context
@@ -1828,20 +2581,23 @@ struct drm_i915_gem_context_destroy {
* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
* returned in the outparam @id.
*
- * No flags are defined, with all bits reserved and must be zero.
- *
* An extension chain maybe provided, starting with @extensions, and terminated
* by the @next_extension being 0. Currently, no extensions are defined.
*
* DRM_I915_GEM_VM_DESTROY -
*
- * Destroys a previously created VM id, specified in @id.
+ * Destroys a previously created VM id, specified in @vm_id.
*
* No extensions or flags are allowed currently, and so must be zero.
*/
struct drm_i915_gem_vm_control {
+ /** @extensions: Zero-terminated chain of extensions. */
__u64 extensions;
+
+ /** @flags: reserved for future usage, currently MBZ */
__u32 flags;
+
+ /** @vm_id: Id of the VM created or to be destroyed */
__u32 vm_id;
};
@@ -1883,14 +2639,69 @@ struct drm_i915_reset_stats {
__u32 pad;
};
+/**
+ * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
+ *
+ * Userptr objects have several restrictions on what ioctls can be used with the
+ * object handle.
+ */
struct drm_i915_gem_userptr {
+ /**
+ * @user_ptr: The pointer to the allocated memory.
+ *
+ * Needs to be aligned to PAGE_SIZE.
+ */
__u64 user_ptr;
+
+ /**
+ * @user_size:
+ *
+ * The size in bytes for the allocated memory. This will also become the
+ * object size.
+ *
+ * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
+ * or larger.
+ */
__u64 user_size;
+
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * I915_USERPTR_READ_ONLY:
+ *
+ * Mark the object as readonly, this also means GPU access can only be
+ * readonly. This is only supported on HW which supports readonly access
+ * through the GTT. If the HW can't support readonly access, an error is
+ * returned.
+ *
+ * I915_USERPTR_PROBE:
+ *
+ * Probe the provided @user_ptr range and validate that the @user_ptr is
+ * indeed pointing to normal memory and that the range is also valid.
+ * For example if some garbage address is given to the kernel, then this
+ * should complain.
+ *
+ * Returns -EFAULT if the probe failed.
+ *
+ * Note that this doesn't populate the backing pages, and also doesn't
+ * guarantee that the object will remain valid when the object is
+ * eventually used.
+ *
+ * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
+ * returns a non-zero value.
+ *
+ * I915_USERPTR_UNSYNCHRONIZED:
+ *
+ * NOT USED. Setting this flag will result in an error.
+ */
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_PROBE 0x2
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
- * Returned handle for the object.
+ * @handle: Returned handle for the object.
*
* Object handles are nonzero.
*/
@@ -1911,6 +2722,14 @@ enum drm_i915_oa_format {
I915_OA_FORMAT_A12_B8_C8,
I915_OA_FORMAT_A32u40_A4u32_B8_C8,
+ /* DG2 */
+ I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
+ I915_OA_FORMAT_A24u40_A14u32_B8_C8,
+
+ /* MTL OAM */
+ I915_OAM_FORMAT_MPEC8u64_B8_C8,
+ I915_OAM_FORMAT_MPEC8u32_B8_C8,
+
I915_OA_FORMAT_MAX /* non-ABI */
};
@@ -1993,6 +2812,25 @@ enum drm_i915_perf_property_id {
*/
DRM_I915_PERF_PROP_POLL_OA_PERIOD,
+ /**
+ * Multiple engines may be mapped to the same OA unit. The OA unit is
+ * identified by class:instance of any engine mapped to it.
+ *
+ * This parameter specifies the engine class and must be passed along
+ * with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE.
+ *
+ * This property is available in perf revision 6.
+ */
+ DRM_I915_PERF_PROP_OA_ENGINE_CLASS,
+
+ /**
+ * This parameter specifies the engine instance and must be passed along
+ * with DRM_I915_PERF_PROP_OA_ENGINE_CLASS.
+ *
+ * This property is available in perf revision 6.
+ */
+ DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE,
+
DRM_I915_PERF_PROP_MAX /* non-ABI */
};
@@ -2012,7 +2850,7 @@ struct drm_i915_perf_open_param {
__u64 properties_ptr;
};
-/**
+/*
* Enable data capture for a stream that was either opened in a disabled state
* via I915_PERF_FLAG_DISABLED or was later disabled via
* I915_PERF_IOCTL_DISABLE.
@@ -2026,7 +2864,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
-/**
+/*
* Disable data capture for a stream.
*
* It is an error to try and read a stream that is disabled.
@@ -2035,7 +2873,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
-/**
+/*
* Change metrics_set captured by a stream.
*
* If the stream is bound to a specific context, the configuration change
@@ -2048,7 +2886,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
-/**
+/*
* Common to all i915 perf records
*/
struct drm_i915_perf_record_header {
@@ -2097,162 +2935,384 @@ enum drm_i915_perf_record_type {
};
/**
+ * struct drm_i915_perf_oa_config
+ *
* Structure to upload perf dynamic configuration into the kernel.
*/
struct drm_i915_perf_oa_config {
- /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+ /**
+ * @uuid:
+ *
+ * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
+ */
char uuid[36];
+ /**
+ * @n_mux_regs:
+ *
+ * Number of mux regs in &mux_regs_ptr.
+ */
__u32 n_mux_regs;
+
+ /**
+ * @n_boolean_regs:
+ *
+ * Number of boolean regs in &boolean_regs_ptr.
+ */
__u32 n_boolean_regs;
+
+ /**
+ * @n_flex_regs:
+ *
+ * Number of flex regs in &flex_regs_ptr.
+ */
__u32 n_flex_regs;
- /*
- * These fields are pointers to tuples of u32 values (register address,
- * value). For example the expected length of the buffer pointed by
- * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ /**
+ * @mux_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_mux_regs).
*/
__u64 mux_regs_ptr;
+
+ /**
+ * @boolean_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_boolean_regs).
+ */
__u64 boolean_regs_ptr;
+
+ /**
+ * @flex_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_flex_regs).
+ */
__u64 flex_regs_ptr;
};
+/**
+ * struct drm_i915_query_item - An individual query for the kernel to process.
+ *
+ * The behaviour is determined by the @query_id. Note that exactly what
+ * @data_ptr is also depends on the specific @query_id.
+ */
struct drm_i915_query_item {
+ /**
+ * @query_id:
+ *
+ * The id for this query. Currently accepted query IDs are:
+ * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
+ * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
+ * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
+ * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
+ * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
+ * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
+ * - %DRM_I915_QUERY_GUC_SUBMISSION_VERSION (see struct drm_i915_query_guc_submission_version)
+ */
__u64 query_id;
-#define DRM_I915_QUERY_TOPOLOGY_INFO 1
-#define DRM_I915_QUERY_ENGINE_INFO 2
-#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_MEMORY_REGIONS 4
+#define DRM_I915_QUERY_HWCONFIG_BLOB 5
+#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
+#define DRM_I915_QUERY_GUC_SUBMISSION_VERSION 7
/* Must be kept compact -- no holes and well documented */
- /*
+ /**
+ * @length:
+ *
* When set to zero by userspace, this is filled with the size of the
- * data to be written at the data_ptr pointer. The kernel sets this
+ * data to be written at the @data_ptr pointer. The kernel sets this
* value to a negative value to signal an error on a particular query
* item.
*/
__s32 length;
- /*
- * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ /**
+ * @flags:
+ *
+ * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ *
+ * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * following:
*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
- * following :
- * - DRM_I915_QUERY_PERF_CONFIG_LIST
- * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
- * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ *
+ * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
+ * a struct i915_engine_class_instance that references a render engine.
*/
__u32 flags;
#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
- /*
- * Data will be written at the location pointed by data_ptr when the
- * value of length matches the length of the data to be written by the
+ /**
+ * @data_ptr:
+ *
+ * Data will be written at the location pointed by @data_ptr when the
+ * value of @length matches the length of the data to be written by the
* kernel.
*/
__u64 data_ptr;
};
+/**
+ * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
+ * kernel to fill out.
+ *
+ * Note that this is generally a two step process for each struct
+ * drm_i915_query_item in the array:
+ *
+ * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
+ * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
+ * kernel will then fill in the size, in bytes, which tells userspace how
+ * memory it needs to allocate for the blob(say for an array of properties).
+ *
+ * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
+ * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
+ * the &drm_i915_query_item.length should still be the same as what the
+ * kernel previously set. At this point the kernel can fill in the blob.
+ *
+ * Note that for some query items it can make sense for userspace to just pass
+ * in a buffer/blob equal to or larger than the required size. In this case only
+ * a single ioctl call is needed. For some smaller query items this can work
+ * quite well.
+ *
+ */
struct drm_i915_query {
+ /** @num_items: The number of elements in the @items_ptr array */
__u32 num_items;
- /*
- * Unused for now. Must be cleared to zero.
+ /**
+ * @flags: Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * This points to an array of num_items drm_i915_query_item structures.
+ /**
+ * @items_ptr:
+ *
+ * Pointer to an array of struct drm_i915_query_item. The number of
+ * array elements is @num_items.
*/
__u64 items_ptr;
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
- *
- * data: contains the 3 pieces of information :
- *
- * - the slice mask with one bit per slice telling whether a slice is
- * available. The availability of slice X can be queried with the following
- * formula :
- *
- * (data[X / 8] >> (X % 8)) & 1
- *
- * - the subslice mask for each slice with one bit per subslice telling
- * whether a subslice is available. Gen12 has dual-subslices, which are
- * similar to two gen11 subslices. For gen12, this array represents dual-
- * subslices. The availability of subslice Y in slice X can be queried
- * with the following formula :
- *
- * (data[subslice_offset +
- * X * subslice_stride +
- * Y / 8] >> (Y % 8)) & 1
- *
- * - the EU mask for each subslice in each slice with one bit per EU telling
- * whether an EU is available. The availability of EU Z in subslice Y in
- * slice X can be queried with the following formula :
+/**
+ * struct drm_i915_query_topology_info
*
- * (data[eu_offset +
- * (X * max_subslices + Y) * eu_stride +
- * Z / 8] >> (Z % 8)) & 1
+ * Describes slice/subslice/EU information queried by
+ * %DRM_I915_QUERY_TOPOLOGY_INFO
*/
struct drm_i915_query_topology_info {
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u16 flags;
+ /**
+ * @max_slices:
+ *
+ * The number of bits used to express the slice mask.
+ */
__u16 max_slices;
+
+ /**
+ * @max_subslices:
+ *
+ * The number of bits used to express the subslice mask.
+ */
__u16 max_subslices;
+
+ /**
+ * @max_eus_per_subslice:
+ *
+ * The number of bits in the EU mask that correspond to a single
+ * subslice's EUs.
+ */
__u16 max_eus_per_subslice;
- /*
+ /**
+ * @subslice_offset:
+ *
* Offset in data[] at which the subslice masks are stored.
*/
__u16 subslice_offset;
- /*
+ /**
+ * @subslice_stride:
+ *
* Stride at which each of the subslice masks for each slice are
* stored.
*/
__u16 subslice_stride;
- /*
+ /**
+ * @eu_offset:
+ *
* Offset in data[] at which the EU masks are stored.
*/
__u16 eu_offset;
- /*
+ /**
+ * @eu_stride:
+ *
* Stride at which each of the EU masks for each subslice are stored.
*/
__u16 eu_stride;
+ /**
+ * @data:
+ *
+ * Contains 3 pieces of information :
+ *
+ * - The slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the
+ * following formula :
+ *
+ * .. code:: c
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * Starting with Xe_HP platforms, Intel hardware no longer has
+ * traditional slices so i915 will always report a single slice
+ * (hardcoded slicemask = 0x1) which contains all of the platform's
+ * subslices. I.e., the mask here does not reflect any of the newer
+ * hardware concepts such as "gslices" or "cslices" since userspace
+ * is capable of inferring those from the subslice mask.
+ *
+ * - The subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. Starting with Gen12 we use the
+ * term "subslice" to refer to what the hardware documentation
+ * describes as a "dual-subslices." The availability of subslice Y
+ * in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
+ *
+ * - The EU mask for each subslice in each slice, with one bit per EU
+ * telling whether an EU is available. The availability of EU Z in
+ * subslice Y in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8
+ * ] >> (Z % 8)) & 1
+ */
__u8 data[];
};
/**
+ * DOC: Engine Discovery uAPI
+ *
+ * Engine discovery uAPI is a way of enumerating physical engines present in a
+ * GPU associated with an open i915 DRM file descriptor. This supersedes the old
+ * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
+ * `I915_PARAM_HAS_BLT`.
+ *
+ * The need for this interface came starting with Icelake and newer GPUs, which
+ * started to establish a pattern of having multiple engines of a same class,
+ * where not all instances were always completely functionally equivalent.
+ *
+ * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
+ * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
+ *
+ * Example for getting the list of engines:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_engine_info *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_ENGINE_INFO;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of engines. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * //
+ * // Alternatively a large buffer can be allocated straightaway enabling
+ * // querying in one pass, in which case item.length should contain the
+ * // length of the provided buffer.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with info on all engines.
+ * item.data_ptr = (uintptr_t)&info;
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each engine in the array
+ * for (i = 0; i < info->num_engines; i++) {
+ * struct drm_i915_engine_info einfo = info->engines[i];
+ * u16 class = einfo.engine.class;
+ * u16 instance = einfo.engine.instance;
+ * ....
+ * }
+ *
+ * free(info);
+ *
+ * Each of the enumerated engines, apart from being defined by its class and
+ * instance (see `struct i915_engine_class_instance`), also can have flags and
+ * capabilities defined as documented in i915_drm.h.
+ *
+ * For instance video engines which support HEVC encoding will have the
+ * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
+ *
+ * Engine discovery only fully comes to its own when combined with the new way
+ * of addressing engines when submitting batch buffers using contexts with
+ * engine maps configured.
+ */
+
+/**
* struct drm_i915_engine_info
*
- * Describes one engine and it's capabilities as known to the driver.
+ * Describes one engine and its capabilities as known to the driver.
*/
struct drm_i915_engine_info {
- /** Engine class and instance. */
+ /** @engine: Engine class and instance. */
struct i915_engine_class_instance engine;
- /** Reserved field. */
+ /** @rsvd0: Reserved field. */
__u32 rsvd0;
- /** Engine flags. */
+ /** @flags: Engine flags. */
__u64 flags;
+#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
- /** Capabilities of this engine. */
+ /** @capabilities: Capabilities of this engine. */
__u64 capabilities;
#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
- /** Reserved fields. */
- __u64 rsvd1[4];
+ /** @logical_instance: Logical instance of engine */
+ __u16 logical_instance;
+
+ /** @rsvd1: Reserved fields. */
+ __u16 rsvd1[3];
+ /** @rsvd2: Reserved fields. */
+ __u64 rsvd2[3];
};
/**
@@ -2262,66 +3322,541 @@ struct drm_i915_engine_info {
* an array of struct drm_i915_engine_info structures.
*/
struct drm_i915_query_engine_info {
- /** Number of struct drm_i915_engine_info structs following. */
+ /** @num_engines: Number of struct drm_i915_engine_info structs following. */
__u32 num_engines;
- /** MBZ */
+ /** @rsvd: MBZ */
__u32 rsvd[3];
- /** Marker for drm_i915_engine_info structures. */
+ /** @engines: Marker for drm_i915_engine_info structures. */
struct drm_i915_engine_info engines[];
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+/**
+ * struct drm_i915_query_perf_config
+ *
+ * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
+ * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
*/
struct drm_i915_query_perf_config {
union {
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
- * this fields to the number of configurations available.
+ /**
+ * @n_configs:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
+ * the number of configurations available.
*/
__u64 n_configs;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @config:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*/
__u64 config;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @uuid:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*
* String formatted like "%08x-%04x-%04x-%04x-%012x"
*/
char uuid[36];
};
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
- * write an array of __u64 of configuration identifiers.
+ /**
+ * @data:
+ *
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
+ * i915 will write an array of __u64 of configuration identifiers.
*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
- * write a struct drm_i915_perf_oa_config. If the following fields of
- * drm_i915_perf_oa_config are set not set to 0, i915 will write into
- * the associated pointers the values of submitted when the
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
+ * i915 will write a struct drm_i915_perf_oa_config. If the following
+ * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
+ * write into the associated pointers the values of submitted when the
* configuration was created :
*
- * - n_mux_regs
- * - n_boolean_regs
- * - n_flex_regs
+ * - &drm_i915_perf_oa_config.n_mux_regs
+ * - &drm_i915_perf_oa_config.n_boolean_regs
+ * - &drm_i915_perf_oa_config.n_flex_regs
*/
__u8 data[];
};
+/**
+ * enum drm_i915_gem_memory_class - Supported memory classes
+ */
+enum drm_i915_gem_memory_class {
+ /** @I915_MEMORY_CLASS_SYSTEM: System memory */
+ I915_MEMORY_CLASS_SYSTEM = 0,
+ /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
+ I915_MEMORY_CLASS_DEVICE,
+};
+
+/**
+ * struct drm_i915_gem_memory_class_instance - Identify particular memory region
+ */
+struct drm_i915_gem_memory_class_instance {
+ /** @memory_class: See enum drm_i915_gem_memory_class */
+ __u16 memory_class;
+
+ /** @memory_instance: Which instance */
+ __u16 memory_instance;
+};
+
+/**
+ * struct drm_i915_memory_region_info - Describes one region as known to the
+ * driver.
+ *
+ * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
+ * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
+ * at &drm_i915_query_item.query_id.
+ */
+struct drm_i915_memory_region_info {
+ /** @region: The class:instance pair encoding */
+ struct drm_i915_gem_memory_class_instance region;
+
+ /** @rsvd0: MBZ */
+ __u32 rsvd0;
+
+ /**
+ * @probed_size: Memory probed by the driver
+ *
+ * Note that it should not be possible to ever encounter a zero value
+ * here, also note that no current region type will ever return -1 here.
+ * Although for future region types, this might be a possibility. The
+ * same applies to the other size fields.
+ */
+ __u64 probed_size;
+
+ /**
+ * @unallocated_size: Estimate of memory remaining
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
+ * Without this (or if this is an older kernel) the value here will
+ * always equal the @probed_size. Note this is only currently tracked
+ * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
+ * will always equal the @probed_size).
+ */
+ __u64 unallocated_size;
+
+ union {
+ /** @rsvd1: MBZ */
+ __u64 rsvd1[8];
+ struct {
+ /**
+ * @probed_cpu_visible_size: Memory probed by the driver
+ * that is CPU accessible.
+ *
+ * This will be always be <= @probed_size, and the
+ * remainder (if there is any) will not be CPU
+ * accessible.
+ *
+ * On systems without small BAR, the @probed_size will
+ * always equal the @probed_cpu_visible_size, since all
+ * of it will be CPU accessible.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the @probed_size).
+ *
+ * Note that if the value returned here is zero, then
+ * this must be an old kernel which lacks the relevant
+ * small-bar uAPI support (including
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
+ * such systems we should never actually end up with a
+ * small BAR configuration, assuming we are able to load
+ * the kernel module. Hence it should be safe to treat
+ * this the same as when @probed_cpu_visible_size ==
+ * @probed_size.
+ */
+ __u64 probed_cpu_visible_size;
+
+ /**
+ * @unallocated_cpu_visible_size: Estimate of CPU
+ * visible memory remaining.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the
+ * @probed_cpu_visible_size).
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always
+ * equal the @probed_cpu_visible_size. Note this is only
+ * currently tracked for I915_MEMORY_CLASS_DEVICE
+ * regions (for other types the value here will also
+ * always equal the @probed_cpu_visible_size).
+ *
+ * If this is an older kernel the value here will be
+ * zero, see also @probed_cpu_visible_size.
+ */
+ __u64 unallocated_cpu_visible_size;
+ };
+ };
+};
+
+/**
+ * struct drm_i915_query_memory_regions
+ *
+ * The region info query enumerates all regions known to the driver by filling
+ * in an array of struct drm_i915_memory_region_info structures.
+ *
+ * Example for getting the list of supported regions:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_memory_regions *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_MEMORY_REGIONS;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of regions. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with the all the region info.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each region in the array
+ * for (i = 0; i < info->num_regions; i++) {
+ * struct drm_i915_memory_region_info mr = info->regions[i];
+ * u16 class = mr.region.class;
+ * u16 instance = mr.region.instance;
+ *
+ * ....
+ * }
+ *
+ * free(info);
+ */
+struct drm_i915_query_memory_regions {
+ /** @num_regions: Number of supported regions */
+ __u32 num_regions;
+
+ /** @rsvd: MBZ */
+ __u32 rsvd[3];
+
+ /** @regions: Info about each supported region */
+ struct drm_i915_memory_region_info regions[];
+};
+
+/**
+ * struct drm_i915_query_guc_submission_version - query GuC submission interface version
+ */
+struct drm_i915_query_guc_submission_version {
+ /** @branch: Firmware branch version. */
+ __u32 branch;
+ /** @major: Firmware major version. */
+ __u32 major;
+ /** @minor: Firmware minor version. */
+ __u32 minor;
+ /** @patch: Firmware patch version. */
+ __u32 patch;
+};
+
+/**
+ * DOC: GuC HWCONFIG blob uAPI
+ *
+ * The GuC produces a blob with information about the current device.
+ * i915 reads this blob from GuC and makes it available via this uAPI.
+ *
+ * The format and meaning of the blob content are documented in the
+ * Programmer's Reference Manual.
+ */
+
+/**
+ * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
+ * extension support using struct i915_user_extension.
+ *
+ * Note that new buffer flags should be added here, at least for the stuff that
+ * is immutable. Previously we would have two ioctls, one to create the object
+ * with gem_create, and another to apply various parameters, however this
+ * creates some ambiguity for the params which are considered immutable. Also in
+ * general we're phasing out the various SET/GET ioctls.
+ */
+struct drm_i915_gem_create_ext {
+ /**
+ * @size: Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ *
+ * On platforms like DG2/ATS the kernel will always use 64K or larger
+ * pages for I915_MEMORY_CLASS_DEVICE. The kernel also requires a
+ * minimum of 64K GTT alignment for such objects.
+ *
+ * NOTE: Previously the ABI here required a minimum GTT alignment of 2M
+ * on DG2/ATS, due to how the hardware implemented 64K GTT page support,
+ * where we had the following complications:
+ *
+ * 1) The entire PDE (which covers a 2MB virtual address range), must
+ * contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
+ * PDE is forbidden by the hardware.
+ *
+ * 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
+ * objects.
+ *
+ * However on actual production HW this was completely changed to now
+ * allow setting a TLB hint at the PTE level (see PS64), which is a lot
+ * more flexible than the above. With this the 2M restriction was
+ * dropped where we now only require 64K.
+ */
+ __u64 size;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+ /**
+ * @flags: Optional flags.
+ *
+ * Supported values:
+ *
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
+ * the object will need to be accessed via the CPU.
+ *
+ * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
+ * strictly required on configurations where some subset of the device
+ * memory is directly visible/mappable through the CPU (which we also
+ * call small BAR), like on some DG2+ systems. Note that this is quite
+ * undesirable, but due to various factors like the client CPU, BIOS etc
+ * it's something we can expect to see in the wild. See
+ * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
+ * determine if this system applies.
+ *
+ * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
+ * ensure the kernel can always spill the allocation to system memory,
+ * if the object can't be allocated in the mappable part of
+ * I915_MEMORY_CLASS_DEVICE.
+ *
+ * Also note that since the kernel only supports flat-CCS on objects
+ * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
+ * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
+ * flat-CCS.
+ *
+ * Without this hint, the kernel will assume that non-mappable
+ * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
+ * kernel can still migrate the object to the mappable part, as a last
+ * resort, if userspace ever CPU faults this object, but this might be
+ * expensive, and so ideally should be avoided.
+ *
+ * On older kernels which lack the relevant small-bar uAPI support (see
+ * also &drm_i915_memory_region_info.probed_cpu_visible_size),
+ * usage of the flag will result in an error, but it should NEVER be
+ * possible to end up with a small BAR configuration, assuming we can
+ * also successfully load the i915 kernel module. In such cases the
+ * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
+ * such there are zero restrictions on where the object can be placed.
+ */
+#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
+ __u32 flags;
+
+ /**
+ * @extensions: The chain of extensions to apply to this object.
+ *
+ * This will be useful in the future when we need to support several
+ * different extensions, and we need to apply more than one when
+ * creating the object. See struct i915_user_extension.
+ *
+ * If we don't supply any extensions then we get the same old gem_create
+ * behaviour.
+ *
+ * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
+ * struct drm_i915_gem_create_ext_memory_regions.
+ *
+ * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
+ * struct drm_i915_gem_create_ext_protected_content.
+ *
+ * For I915_GEM_CREATE_EXT_SET_PAT usage see
+ * struct drm_i915_gem_create_ext_set_pat.
+ */
+#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
+#define I915_GEM_CREATE_EXT_SET_PAT 2
+ __u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_memory_regions - The
+ * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
+ *
+ * Set the object with the desired set of placements/regions in priority
+ * order. Each entry must be unique and supported by the device.
+ *
+ * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
+ * an equivalent layout of class:instance pair encodings. See struct
+ * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
+ * query the supported regions for a device.
+ *
+ * As an example, on discrete devices, if we wish to set the placement as
+ * device local-memory we can do something like:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_memory_class_instance region_lmem = {
+ * .memory_class = I915_MEMORY_CLASS_DEVICE,
+ * .memory_instance = 0,
+ * };
+ * struct drm_i915_gem_create_ext_memory_regions regions = {
+ * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
+ * .regions = (uintptr_t)&region_lmem,
+ * .num_regions = 1,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = 16 * PAGE_SIZE,
+ * .extensions = (uintptr_t)&regions,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ *
+ * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
+ * along with the final object size in &drm_i915_gem_create_ext.size, which
+ * should account for any rounding up, if required.
+ *
+ * Note that userspace has no means of knowing the current backing region
+ * for objects where @num_regions is larger than one. The kernel will only
+ * ensure that the priority order of the @regions array is honoured, either
+ * when initially placing the object, or when moving memory around due to
+ * memory pressure
+ *
+ * On Flat-CCS capable HW, compression is supported for the objects residing
+ * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
+ * memory class in @regions and migrated (by i915, due to memory
+ * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
+ * decompress the content. But i915 doesn't have the required information to
+ * decompress the userspace compressed objects.
+ *
+ * So i915 supports Flat-CCS, on the objects which can reside only on
+ * I915_MEMORY_CLASS_DEVICE regions.
+ */
+struct drm_i915_gem_create_ext_memory_regions {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @num_regions: Number of elements in the @regions array. */
+ __u32 num_regions;
+ /**
+ * @regions: The regions/placements array.
+ *
+ * An array of struct drm_i915_gem_memory_class_instance.
+ */
+ __u64 regions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_protected_content - The
+ * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
+ *
+ * If this extension is provided, buffer contents are expected to be protected
+ * by PXP encryption and require decryption for scan out and processing. This
+ * is only possible on platforms that have PXP enabled, on all other scenarios
+ * using this extension will cause the ioctl to fail and return -ENODEV. The
+ * flags parameter is reserved for future expansion and must currently be set
+ * to zero.
+ *
+ * The buffer contents are considered invalid after a PXP session teardown.
+ *
+ * The encryption is guaranteed to be processed correctly only if the object
+ * is submitted with a context created using the
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
+ * at submission time on the validity of the objects involved.
+ *
+ * Below is an example on how to create a protected object:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_create_ext_protected_content protected_ext = {
+ * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
+ * .flags = 0,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = PAGE_SIZE,
+ * .extensions = (uintptr_t)&protected_ext,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ */
+struct drm_i915_gem_create_ext_protected_content {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+ /** @flags: reserved for future usage, currently MBZ */
+ __u32 flags;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_set_pat - The
+ * I915_GEM_CREATE_EXT_SET_PAT extension.
+ *
+ * If this extension is provided, the specified caching policy (PAT index) is
+ * applied to the buffer object.
+ *
+ * Below is an example on how to create an object with specific caching policy:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_create_ext_set_pat set_pat_ext = {
+ * .base = { .name = I915_GEM_CREATE_EXT_SET_PAT },
+ * .pat_index = 0,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = PAGE_SIZE,
+ * .extensions = (uintptr_t)&set_pat_ext,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ */
+struct drm_i915_gem_create_ext_set_pat {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+ /**
+ * @pat_index: PAT index to be set
+ * PAT index is a bit field in Page Table Entry to control caching
+ * behaviors for GPU accesses. The definition of PAT index is
+ * platform dependent and can be found in hardware specifications,
+ */
+ __u32 pat_index;
+ /** @rsvd: reserved for future use */
+ __u32 rsvd;
+};
+
+/* ID of the protected content session managed by i915 when PXP is active */
+#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
new file mode 100644
index 000000000000..19a13468eca5
--- /dev/null
+++ b/include/uapi/drm/ivpu_accel.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __UAPI_IVPU_DRM_H__
+#define __UAPI_IVPU_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_IVPU_DRIVER_MAJOR 1
+#define DRM_IVPU_DRIVER_MINOR 0
+
+#define DRM_IVPU_GET_PARAM 0x00
+#define DRM_IVPU_SET_PARAM 0x01
+#define DRM_IVPU_BO_CREATE 0x02
+#define DRM_IVPU_BO_INFO 0x03
+#define DRM_IVPU_SUBMIT 0x05
+#define DRM_IVPU_BO_WAIT 0x06
+
+#define DRM_IOCTL_IVPU_GET_PARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
+
+#define DRM_IOCTL_IVPU_SET_PARAM \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
+
+#define DRM_IOCTL_IVPU_BO_CREATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
+
+#define DRM_IOCTL_IVPU_BO_INFO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
+
+#define DRM_IOCTL_IVPU_SUBMIT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
+
+#define DRM_IOCTL_IVPU_BO_WAIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
+
+/**
+ * DOC: contexts
+ *
+ * VPU contexts have private virtual address space, job queues and priority.
+ * Each context is identified by an unique ID. Context is created on open().
+ */
+
+#define DRM_IVPU_PARAM_DEVICE_ID 0
+#define DRM_IVPU_PARAM_DEVICE_REVISION 1
+#define DRM_IVPU_PARAM_PLATFORM_TYPE 2
+#define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
+#define DRM_IVPU_PARAM_NUM_CONTEXTS 4
+#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
+#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 /* Deprecated */
+#define DRM_IVPU_PARAM_CONTEXT_ID 7
+#define DRM_IVPU_PARAM_FW_API_VERSION 8
+#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
+#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
+#define DRM_IVPU_PARAM_TILE_CONFIG 11
+#define DRM_IVPU_PARAM_SKU 12
+#define DRM_IVPU_PARAM_CAPABILITIES 13
+
+#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
+
+/* Deprecated, use DRM_IVPU_JOB_PRIORITY */
+#define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
+#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
+#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
+#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
+
+#define DRM_IVPU_JOB_PRIORITY_DEFAULT 0
+#define DRM_IVPU_JOB_PRIORITY_IDLE 1
+#define DRM_IVPU_JOB_PRIORITY_NORMAL 2
+#define DRM_IVPU_JOB_PRIORITY_FOCUS 3
+#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
+
+/**
+ * DRM_IVPU_CAP_METRIC_STREAMER
+ *
+ * Metric streamer support. Provides sampling of various hardware performance
+ * metrics like DMA bandwidth and cache miss/hits. Can be used for profiling.
+ */
+#define DRM_IVPU_CAP_METRIC_STREAMER 1
+/**
+ * DRM_IVPU_CAP_DMA_MEMORY_RANGE
+ *
+ * Driver has capability to allocate separate memory range
+ * accessible by hardware DMA.
+ */
+#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2
+
+/**
+ * struct drm_ivpu_param - Get/Set VPU parameters
+ */
+struct drm_ivpu_param {
+ /**
+ * @param:
+ *
+ * Supported params:
+ *
+ * %DRM_IVPU_PARAM_DEVICE_ID:
+ * PCI Device ID of the VPU device (read-only)
+ *
+ * %DRM_IVPU_PARAM_DEVICE_REVISION:
+ * VPU device revision (read-only)
+ *
+ * %DRM_IVPU_PARAM_PLATFORM_TYPE:
+ * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
+ * platform type when executing on a simulator or emulator (read-only)
+ *
+ * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
+ * Current PLL frequency (read-only)
+ *
+ * %DRM_IVPU_PARAM_NUM_CONTEXTS:
+ * Maximum number of simultaneously existing contexts (read-only)
+ *
+ * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
+ * Lowest VPU virtual address available in the current context (read-only)
+ *
+ * %DRM_IVPU_PARAM_CONTEXT_ID:
+ * Current context ID, always greater than 0 (read-only)
+ *
+ * %DRM_IVPU_PARAM_FW_API_VERSION:
+ * Firmware API version array (read-only)
+ *
+ * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
+ * Heartbeat value from an engine (read-only).
+ * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
+ *
+ * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
+ * Device-unique inference ID (read-only)
+ *
+ * %DRM_IVPU_PARAM_TILE_CONFIG:
+ * VPU tile configuration (read-only)
+ *
+ * %DRM_IVPU_PARAM_SKU:
+ * VPU SKU ID (read-only)
+ *
+ * %DRM_IVPU_PARAM_CAPABILITIES:
+ * Supported capabilities (read-only)
+ */
+ __u32 param;
+
+ /** @index: Index for params that have multiple instances */
+ __u32 index;
+
+ /** @value: Param value */
+ __u64 value;
+};
+
+#define DRM_IVPU_BO_SHAVE_MEM 0x00000001
+#define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
+#define DRM_IVPU_BO_MAPPABLE 0x00000002
+#define DRM_IVPU_BO_DMA_MEM 0x00000004
+
+#define DRM_IVPU_BO_CACHED 0x00000000
+#define DRM_IVPU_BO_UNCACHED 0x00010000
+#define DRM_IVPU_BO_WC 0x00020000
+#define DRM_IVPU_BO_CACHE_MASK 0x00030000
+
+#define DRM_IVPU_BO_FLAGS \
+ (DRM_IVPU_BO_HIGH_MEM | \
+ DRM_IVPU_BO_MAPPABLE | \
+ DRM_IVPU_BO_DMA_MEM | \
+ DRM_IVPU_BO_CACHE_MASK)
+
+/**
+ * struct drm_ivpu_bo_create - Create BO backed by SHMEM
+ *
+ * Create GEM buffer object allocated in SHMEM memory.
+ */
+struct drm_ivpu_bo_create {
+ /** @size: The size in bytes of the allocated memory */
+ __u64 size;
+
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * %DRM_IVPU_BO_HIGH_MEM:
+ *
+ * Allocate VPU address from >4GB range.
+ * Buffer object with vpu address >4GB can be always accessed by the
+ * VPU DMA engine, but some HW generation may not be able to access
+ * this memory from then firmware running on the VPU management processor.
+ * Suitable for input, output and some scratch buffers.
+ *
+ * %DRM_IVPU_BO_MAPPABLE:
+ *
+ * Buffer object can be mapped using mmap().
+ *
+ * %DRM_IVPU_BO_CACHED:
+ *
+ * Allocated BO will be cached on host side (WB) and snooped on the VPU side.
+ * This is the default caching mode.
+ *
+ * %DRM_IVPU_BO_UNCACHED:
+ *
+ * Not supported. Use DRM_IVPU_BO_WC instead.
+ *
+ * %DRM_IVPU_BO_WC:
+ *
+ * Allocated BO will use write combining buffer for writes but reads will be
+ * uncached.
+ */
+ __u32 flags;
+
+ /** @handle: Returned GEM object handle */
+ __u32 handle;
+
+ /** @vpu_addr: Returned VPU virtual address */
+ __u64 vpu_addr;
+};
+
+/**
+ * struct drm_ivpu_bo_info - Query buffer object info
+ */
+struct drm_ivpu_bo_info {
+ /** @handle: Handle of the queried BO */
+ __u32 handle;
+
+ /** @flags: Returned flags used to create the BO */
+ __u32 flags;
+
+ /** @vpu_addr: Returned VPU virtual address */
+ __u64 vpu_addr;
+
+ /**
+ * @mmap_offset:
+ *
+ * Returned offset to be used in mmap(). 0 in case the BO is not mappable.
+ */
+ __u64 mmap_offset;
+
+ /** @size: Returned GEM object size, aligned to PAGE_SIZE */
+ __u64 size;
+};
+
+/* drm_ivpu_submit engines */
+#define DRM_IVPU_ENGINE_COMPUTE 0
+#define DRM_IVPU_ENGINE_COPY 1
+
+/**
+ * struct drm_ivpu_submit - Submit commands to the VPU
+ *
+ * Execute a single command buffer on a given VPU engine.
+ * Handles to all referenced buffer objects have to be provided in @buffers_ptr.
+ *
+ * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
+ */
+struct drm_ivpu_submit {
+ /**
+ * @buffers_ptr:
+ *
+ * A pointer to an u32 array of GEM handles of the BOs required for this job.
+ * The number of elements in the array must be equal to the value given by @buffer_count.
+ *
+ * The first BO is the command buffer. The rest of array has to contain all
+ * BOs referenced from the command buffer.
+ */
+ __u64 buffers_ptr;
+
+ /** @buffer_count: Number of elements in the @buffers_ptr */
+ __u32 buffer_count;
+
+ /**
+ * @engine: Select the engine this job should be executed on
+ *
+ * %DRM_IVPU_ENGINE_COMPUTE:
+ *
+ * Performs Deep Learning Neural Compute Inference Operations
+ *
+ * %DRM_IVPU_ENGINE_COPY:
+ *
+ * Performs memory copy operations to/from system memory allocated for VPU
+ */
+ __u32 engine;
+
+ /** @flags: Reserved for future use - must be zero */
+ __u32 flags;
+
+ /**
+ * @commands_offset:
+ *
+ * Offset inside the first buffer in @buffers_ptr containing commands
+ * to be executed. The offset has to be 8-byte aligned.
+ */
+ __u32 commands_offset;
+
+ /**
+ * @priority:
+ *
+ * Priority to be set for related job command queue, can be one of the following:
+ * %DRM_IVPU_JOB_PRIORITY_DEFAULT
+ * %DRM_IVPU_JOB_PRIORITY_IDLE
+ * %DRM_IVPU_JOB_PRIORITY_NORMAL
+ * %DRM_IVPU_JOB_PRIORITY_FOCUS
+ * %DRM_IVPU_JOB_PRIORITY_REALTIME
+ */
+ __u32 priority;
+};
+
+/* drm_ivpu_bo_wait job status codes */
+#define DRM_IVPU_JOB_STATUS_SUCCESS 0
+#define DRM_IVPU_JOB_STATUS_ABORTED 256
+
+/**
+ * struct drm_ivpu_bo_wait - Wait for BO to become inactive
+ *
+ * Blocks until a given buffer object becomes inactive.
+ * With @timeout_ms set to 0 returns immediately.
+ */
+struct drm_ivpu_bo_wait {
+ /** @handle: Handle to the buffer object to be waited on */
+ __u32 handle;
+
+ /** @flags: Reserved for future use - must be zero */
+ __u32 flags;
+
+ /** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
+ __s64 timeout_ns;
+
+ /**
+ * @job_status:
+ *
+ * Job status code which is updated after the job is completed.
+ * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
+ * Valid only if @handle points to a command buffer.
+ */
+ __u32 job_status;
+
+ /** @pad: Padding - must be zero */
+ __u32 pad;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __UAPI_IVPU_DRM_H__ */
diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h
deleted file mode 100644
index 8c4337548ab5..000000000000
--- a/include/uapi/drm/mga_drm.h
+++ /dev/null
@@ -1,427 +0,0 @@
-/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
- * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- *
- * Rewritten by:
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#ifndef __MGA_DRM_H__
-#define __MGA_DRM_H__
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (mga_sarea.h)
- */
-
-#ifndef __MGA_SAREA_DEFINES__
-#define __MGA_SAREA_DEFINES__
-
-/* WARP pipe flags
- */
-#define MGA_F 0x1 /* fog */
-#define MGA_A 0x2 /* alpha */
-#define MGA_S 0x4 /* specular */
-#define MGA_T2 0x8 /* multitexture */
-
-#define MGA_WARP_TGZ 0
-#define MGA_WARP_TGZF (MGA_F)
-#define MGA_WARP_TGZA (MGA_A)
-#define MGA_WARP_TGZAF (MGA_F|MGA_A)
-#define MGA_WARP_TGZS (MGA_S)
-#define MGA_WARP_TGZSF (MGA_S|MGA_F)
-#define MGA_WARP_TGZSA (MGA_S|MGA_A)
-#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
-#define MGA_WARP_T2GZ (MGA_T2)
-#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
-#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
-#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
-#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
-#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
-#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
-#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
-
-#define MGA_MAX_G200_PIPES 8 /* no multitex */
-#define MGA_MAX_G400_PIPES 16
-#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
-#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
-
-#define MGA_CARD_TYPE_G200 1
-#define MGA_CARD_TYPE_G400 2
-#define MGA_CARD_TYPE_G450 3 /* not currently used */
-#define MGA_CARD_TYPE_G550 4
-
-#define MGA_FRONT 0x1
-#define MGA_BACK 0x2
-#define MGA_DEPTH 0x4
-
-/* What needs to be changed for the current vertex dma buffer?
- */
-#define MGA_UPLOAD_CONTEXT 0x1
-#define MGA_UPLOAD_TEX0 0x2
-#define MGA_UPLOAD_TEX1 0x4
-#define MGA_UPLOAD_PIPE 0x8
-#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
-#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
-#define MGA_UPLOAD_2D 0x40
-#define MGA_WAIT_AGE 0x80 /* handled client-side */
-#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
-#if 0
-#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
- quiescent */
-#endif
-
-/* 32 buffers of 64k each, total 2 meg.
- */
-#define MGA_BUFFER_SIZE (1 << 16)
-#define MGA_NUM_BUFFERS 128
-
-/* Keep these small for testing.
- */
-#define MGA_NR_SAREA_CLIPRECTS 8
-
-/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
- * regions, subject to a minimum region size of (1<<16) == 64k.
- *
- * Clients may subdivide regions internally, but when sharing between
- * clients, the region size is the minimum granularity.
- */
-
-#define MGA_CARD_HEAP 0
-#define MGA_AGP_HEAP 1
-#define MGA_NR_TEX_HEAPS 2
-#define MGA_NR_TEX_REGIONS 16
-#define MGA_LOG_MIN_TEX_REGION_SIZE 16
-
-#define DRM_MGA_IDLE_RETRY 2048
-
-#endif /* __MGA_SAREA_DEFINES__ */
-
-/* Setup registers for 3D context
- */
-typedef struct {
- unsigned int dstorg;
- unsigned int maccess;
- unsigned int plnwt;
- unsigned int dwgctl;
- unsigned int alphactrl;
- unsigned int fogcolor;
- unsigned int wflag;
- unsigned int tdualstage0;
- unsigned int tdualstage1;
- unsigned int fcol;
- unsigned int stencil;
- unsigned int stencilctl;
-} drm_mga_context_regs_t;
-
-/* Setup registers for 2D, X server
- */
-typedef struct {
- unsigned int pitch;
-} drm_mga_server_regs_t;
-
-/* Setup registers for each texture unit
- */
-typedef struct {
- unsigned int texctl;
- unsigned int texctl2;
- unsigned int texfilter;
- unsigned int texbordercol;
- unsigned int texorg;
- unsigned int texwidth;
- unsigned int texheight;
- unsigned int texorg1;
- unsigned int texorg2;
- unsigned int texorg3;
- unsigned int texorg4;
-} drm_mga_texture_regs_t;
-
-/* General aging mechanism
- */
-typedef struct {
- unsigned int head; /* Position of head pointer */
- unsigned int wrap; /* Primary DMA wrap count */
-} drm_mga_age_t;
-
-typedef struct _drm_mga_sarea {
- /* The channel for communication of state information to the kernel
- * on firing a vertex dma buffer.
- */
- drm_mga_context_regs_t context_state;
- drm_mga_server_regs_t server_state;
- drm_mga_texture_regs_t tex_state[2];
- unsigned int warp_pipe;
- unsigned int dirty;
- unsigned int vertsize;
-
- /* The current cliprects, or a subset thereof.
- */
- struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
- unsigned int nbox;
-
- /* Information about the most recently used 3d drawable. The
- * client fills in the req_* fields, the server fills in the
- * exported_ fields and puts the cliprects into boxes, above.
- *
- * The client clears the exported_drawable field before
- * clobbering the boxes data.
- */
- unsigned int req_drawable; /* the X drawable id */
- unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
-
- unsigned int exported_drawable;
- unsigned int exported_index;
- unsigned int exported_stamp;
- unsigned int exported_buffers;
- unsigned int exported_nfront;
- unsigned int exported_nback;
- int exported_back_x, exported_front_x, exported_w;
- int exported_back_y, exported_front_y, exported_h;
- struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
-
- /* Counters for aging textures and for client-side throttling.
- */
- unsigned int status[4];
- unsigned int last_wrap;
-
- drm_mga_age_t last_frame;
- unsigned int last_enqueue; /* last time a buffer was enqueued */
- unsigned int last_dispatch; /* age of the most recently dispatched buffer */
- unsigned int last_quiescent; /* */
-
- /* LRU lists for texture memory in agp space and on the card.
- */
- struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
- unsigned int texAge[MGA_NR_TEX_HEAPS];
-
- /* Mechanism to validate card state.
- */
- int ctxOwner;
-} drm_mga_sarea_t;
-
-/* MGA specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_MGA_INIT 0x00
-#define DRM_MGA_FLUSH 0x01
-#define DRM_MGA_RESET 0x02
-#define DRM_MGA_SWAP 0x03
-#define DRM_MGA_CLEAR 0x04
-#define DRM_MGA_VERTEX 0x05
-#define DRM_MGA_INDICES 0x06
-#define DRM_MGA_ILOAD 0x07
-#define DRM_MGA_BLIT 0x08
-#define DRM_MGA_GETPARAM 0x09
-
-/* 3.2:
- * ioctls for operating on fences.
- */
-#define DRM_MGA_SET_FENCE 0x0a
-#define DRM_MGA_WAIT_FENCE 0x0b
-#define DRM_MGA_DMA_BOOTSTRAP 0x0c
-
-#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
-#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
-#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
-#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
-#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
-#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
-#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
-#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
-#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
-#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
-#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, __u32)
-#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, __u32)
-#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
-
-typedef struct _drm_mga_warp_index {
- int installed;
- unsigned long phys_addr;
- int size;
-} drm_mga_warp_index_t;
-
-typedef struct drm_mga_init {
- enum {
- MGA_INIT_DMA = 0x01,
- MGA_CLEANUP_DMA = 0x02
- } func;
-
- unsigned long sarea_priv_offset;
-
- int chipset;
- int sgram;
-
- unsigned int maccess;
-
- unsigned int fb_cpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
-
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
-
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
-
- unsigned long fb_offset;
- unsigned long mmio_offset;
- unsigned long status_offset;
- unsigned long warp_offset;
- unsigned long primary_offset;
- unsigned long buffers_offset;
-} drm_mga_init_t;
-
-typedef struct drm_mga_dma_bootstrap {
- /**
- * \name AGP texture region
- *
- * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
- * be filled in with the actual AGP texture settings.
- *
- * \warning
- * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
- * is zero, it means that PCI memory (most likely through the use of
- * an IOMMU) is being used for "AGP" textures.
- */
- /*@{ */
- unsigned long texture_handle; /**< Handle used to map AGP textures. */
- __u32 texture_size; /**< Size of the AGP texture region. */
- /*@} */
-
- /**
- * Requested size of the primary DMA region.
- *
- * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
- * filled in with the actual AGP mode. If AGP was not available
- */
- __u32 primary_size;
-
- /**
- * Requested number of secondary DMA buffers.
- *
- * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
- * filled in with the actual number of secondary DMA buffers
- * allocated. Particularly when PCI DMA is used, this may be
- * (subtantially) less than the number requested.
- */
- __u32 secondary_bin_count;
-
- /**
- * Requested size of each secondary DMA buffer.
- *
- * While the kernel \b is free to reduce
- * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
- * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
- */
- __u32 secondary_bin_size;
-
- /**
- * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
- * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
- * zero, it means that PCI DMA should be used, even if AGP is
- * possible.
- *
- * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
- * filled in with the actual AGP mode. If AGP was not available
- * (i.e., PCI DMA was used), this value will be zero.
- */
- __u32 agp_mode;
-
- /**
- * Desired AGP GART size, measured in megabytes.
- */
- __u8 agp_size;
-} drm_mga_dma_bootstrap_t;
-
-typedef struct drm_mga_clear {
- unsigned int flags;
- unsigned int clear_color;
- unsigned int clear_depth;
- unsigned int color_mask;
- unsigned int depth_mask;
-} drm_mga_clear_t;
-
-typedef struct drm_mga_vertex {
- int idx; /* buffer to queue */
- int used; /* bytes in use */
- int discard; /* client finished with buffer? */
-} drm_mga_vertex_t;
-
-typedef struct drm_mga_indices {
- int idx; /* buffer to queue */
- unsigned int start;
- unsigned int end;
- int discard; /* client finished with buffer? */
-} drm_mga_indices_t;
-
-typedef struct drm_mga_iload {
- int idx;
- unsigned int dstorg;
- unsigned int length;
-} drm_mga_iload_t;
-
-typedef struct _drm_mga_blit {
- unsigned int planemask;
- unsigned int srcorg;
- unsigned int dstorg;
- int src_pitch, dst_pitch;
- int delta_sx, delta_sy;
- int delta_dx, delta_dy;
- int height, ydir; /* flip image vertically */
- int source_pitch, dest_pitch;
-} drm_mga_blit_t;
-
-/* 3.1: An ioctl to get parameters that aren't available to the 3d
- * client any other way.
- */
-#define MGA_PARAM_IRQ_NR 1
-
-/* 3.2: Query the actual card type. The DDX only distinguishes between
- * G200 chips and non-G200 chips, which it calls G400. It turns out that
- * there are some very sublte differences between the G4x0 chips and the G550
- * chips. Using this parameter query, a client-side driver can detect the
- * difference between a G4x0 and a G550.
- */
-#define MGA_PARAM_CARD_TYPE 2
-
-typedef struct drm_mga_getparam {
- int param;
- void __user *value;
-} drm_mga_getparam_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index a6c1f3eb2623..d8a6b3472760 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -67,20 +67,41 @@ struct drm_msm_timespec {
__s64 tv_nsec; /* nanoseconds */
};
-#define MSM_PARAM_GPU_ID 0x01
-#define MSM_PARAM_GMEM_SIZE 0x02
-#define MSM_PARAM_CHIP_ID 0x03
-#define MSM_PARAM_MAX_FREQ 0x04
-#define MSM_PARAM_TIMESTAMP 0x05
-#define MSM_PARAM_GMEM_BASE 0x06
-#define MSM_PARAM_NR_RINGS 0x07
-#define MSM_PARAM_PP_PGTABLE 0x08 /* => 1 for per-process pagetables, else 0 */
-#define MSM_PARAM_FAULTS 0x09
+/* Below "RO" indicates a read-only param, "WO" indicates write-only, and
+ * "RW" indicates a param that can be both read (GET_PARAM) and written
+ * (SET_PARAM)
+ */
+#define MSM_PARAM_GPU_ID 0x01 /* RO */
+#define MSM_PARAM_GMEM_SIZE 0x02 /* RO */
+#define MSM_PARAM_CHIP_ID 0x03 /* RO */
+#define MSM_PARAM_MAX_FREQ 0x04 /* RO */
+#define MSM_PARAM_TIMESTAMP 0x05 /* RO */
+#define MSM_PARAM_GMEM_BASE 0x06 /* RO */
+#define MSM_PARAM_PRIORITIES 0x07 /* RO: The # of priority levels */
+#define MSM_PARAM_PP_PGTABLE 0x08 /* RO: Deprecated, always returns zero */
+#define MSM_PARAM_FAULTS 0x09 /* RO */
+#define MSM_PARAM_SUSPENDS 0x0a /* RO */
+#define MSM_PARAM_SYSPROF 0x0b /* WO: 1 preserves perfcntrs, 2 also disables suspend */
+#define MSM_PARAM_COMM 0x0c /* WO: override for task->comm */
+#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
+#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
+#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
+#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
+
+/* For backwards compat. The original support for preemption was based on
+ * a single ring per priority level so # of priority levels equals the #
+ * of rings. With drm/scheduler providing additional levels of priority,
+ * the number of priorities is greater than the # of rings. The param is
+ * renamed to better reflect this.
+ */
+#define MSM_PARAM_NR_RINGS MSM_PARAM_PRIORITIES
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
__u32 param; /* in, MSM_PARAM_x */
__u64 value; /* out (get_param) or in (set_param) */
+ __u32 len; /* zero for non-pointer params */
+ __u32 pad; /* must be zero */
};
/*
@@ -93,13 +114,12 @@ struct drm_msm_param {
/* cache modes */
#define MSM_BO_CACHED 0x00010000
#define MSM_BO_WC 0x00020000
-#define MSM_BO_UNCACHED 0x00040000
+#define MSM_BO_UNCACHED 0x00040000 /* deprecated, use MSM_BO_WC */
+#define MSM_BO_CACHED_COHERENT 0x080000
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
MSM_BO_GPU_READONLY | \
- MSM_BO_CACHED | \
- MSM_BO_WC | \
- MSM_BO_UNCACHED)
+ MSM_BO_CACHE_MASK)
struct drm_msm_gem_new {
__u64 size; /* in */
@@ -118,6 +138,10 @@ struct drm_msm_gem_new {
#define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */
#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
+#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
+#define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */
+#define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */
+#define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */
struct drm_msm_gem_info {
__u32 handle; /* in */
@@ -130,8 +154,13 @@ struct drm_msm_gem_info {
#define MSM_PREP_READ 0x01
#define MSM_PREP_WRITE 0x02
#define MSM_PREP_NOSYNC 0x04
+#define MSM_PREP_BOOST 0x08
-#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
+#define MSM_PREP_FLAGS (MSM_PREP_READ | \
+ MSM_PREP_WRITE | \
+ MSM_PREP_NOSYNC | \
+ MSM_PREP_BOOST | \
+ 0)
struct drm_msm_gem_cpu_prep {
__u32 handle; /* in */
@@ -160,7 +189,11 @@ struct drm_msm_gem_cpu_fini {
*/
struct drm_msm_gem_submit_reloc {
__u32 submit_offset; /* in, offset from submit_bo */
+#ifdef __cplusplus
+ __u32 _or; /* in, value OR'd with result */
+#else
__u32 or; /* in, value OR'd with result */
+#endif
__s32 shift; /* in, amount of left shift (can be negative) */
__u32 reloc_idx; /* in, index of reloc_bo buffer */
__u64 reloc_offset; /* in, offset from start of reloc_bo */
@@ -201,10 +234,12 @@ struct drm_msm_gem_submit_cmd {
#define MSM_SUBMIT_BO_READ 0x0001
#define MSM_SUBMIT_BO_WRITE 0x0002
#define MSM_SUBMIT_BO_DUMP 0x0004
+#define MSM_SUBMIT_BO_NO_IMPLICIT 0x0008
#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | \
MSM_SUBMIT_BO_WRITE | \
- MSM_SUBMIT_BO_DUMP)
+ MSM_SUBMIT_BO_DUMP | \
+ MSM_SUBMIT_BO_NO_IMPLICIT)
struct drm_msm_gem_submit_bo {
__u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
@@ -219,6 +254,7 @@ struct drm_msm_gem_submit_bo {
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
#define MSM_SUBMIT_SYNCOBJ_IN 0x08000000 /* enable input syncobj */
#define MSM_SUBMIT_SYNCOBJ_OUT 0x04000000 /* enable output syncobj */
+#define MSM_SUBMIT_FENCE_SN_IN 0x02000000 /* userspace passes in seqno fence */
#define MSM_SUBMIT_FLAGS ( \
MSM_SUBMIT_NO_IMPLICIT | \
MSM_SUBMIT_FENCE_FD_IN | \
@@ -226,6 +262,7 @@ struct drm_msm_gem_submit_bo {
MSM_SUBMIT_SUDO | \
MSM_SUBMIT_SYNCOBJ_IN | \
MSM_SUBMIT_SYNCOBJ_OUT | \
+ MSM_SUBMIT_FENCE_SN_IN | \
0)
#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
@@ -245,7 +282,7 @@ struct drm_msm_gem_submit_syncobj {
*/
struct drm_msm_gem_submit {
__u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
- __u32 fence; /* out */
+ __u32 fence; /* out (or in with MSM_SUBMIT_FENCE_SN_IN flag) */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
__u64 bos; /* in, ptr to array of submit_bo's */
@@ -261,6 +298,11 @@ struct drm_msm_gem_submit {
};
+#define MSM_WAIT_FENCE_BOOST 0x00000001
+#define MSM_WAIT_FENCE_FLAGS ( \
+ MSM_WAIT_FENCE_BOOST | \
+ 0)
+
/* The normal way to synchronize with the GPU is just to CPU_PREP on
* a buffer if you need to access it from the CPU (other cmdstream
* submission from same or other contexts, PAGE_FLIP ioctl, etc, all
@@ -270,7 +312,7 @@ struct drm_msm_gem_submit {
*/
struct drm_msm_wait_fence {
__u32 fence; /* in */
- __u32 pad;
+ __u32 flags; /* in, bitmask of MSM_WAIT_FENCE_x */
struct drm_msm_timespec timeout; /* in */
__u32 queueid; /* in, submitqueue id */
};
@@ -304,6 +346,10 @@ struct drm_msm_gem_madvise {
#define MSM_SUBMITQUEUE_FLAGS (0)
+/*
+ * The submitqueue priority should be between 0 and MSM_PARAM_PRIORITIES-1,
+ * a lower numeric value is higher priority.
+ */
struct drm_msm_submitqueue {
__u32 flags; /* in, MSM_SUBMITQUEUE_x */
__u32 prio; /* in, Priority level */
@@ -321,9 +367,7 @@ struct drm_msm_submitqueue_query {
};
#define DRM_MSM_GET_PARAM 0x00
-/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
- */
#define DRM_MSM_GEM_NEW 0x02
#define DRM_MSM_GEM_INFO 0x03
#define DRM_MSM_GEM_CPU_PREP 0x04
@@ -339,6 +383,7 @@ struct drm_msm_submitqueue_query {
#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
+#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 853a327433d3..cd84227f1b42 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -33,11 +33,75 @@
extern "C" {
#endif
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
+#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+
+/*
+ * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
+ *
+ * Query the maximum amount of IBs that can be pushed through a single
+ * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
+ * ioctl().
+ */
+#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
+
+/*
+ * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
+ *
+ * Query the VRAM BAR size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
+
+/*
+ * NOUVEAU_GETPARAM_VRAM_USED
+ *
+ * Get remaining VRAM size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_USED 19
+
+struct drm_nouveau_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_nouveau_channel_alloc {
+ __u32 fb_ctxdma_handle;
+ __u32 tt_ctxdma_handle;
+
+ __s32 channel;
+ __u32 pushbuf_domains;
+
+ /* Notifier memory */
+ __u32 notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ __u32 handle;
+ __u32 grclass;
+ } subchan[8];
+ __u32 nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ __s32 channel;
+};
+
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
+/* The BO will never be shared via import or export. */
+#define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
@@ -98,6 +162,7 @@ struct drm_nouveau_gem_pushbuf_push {
__u32 pad;
__u64 offset;
__u64 length;
+#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
};
struct drm_nouveau_gem_pushbuf {
@@ -126,16 +191,231 @@ struct drm_nouveau_gem_cpu_fini {
__u32 handle;
};
-#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
+/**
+ * struct drm_nouveau_sync - sync object
+ *
+ * This structure serves as synchronization mechanism for (potentially)
+ * asynchronous operations such as EXEC or VM_BIND.
+ */
+struct drm_nouveau_sync {
+ /**
+ * @flags: the flags for a sync object
+ *
+ * The first 8 bits are used to determine the type of the sync object.
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
+#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
+#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
+ /**
+ * @handle: the handle of the sync object
+ */
+ __u32 handle;
+ /**
+ * @timeline_value:
+ *
+ * The timeline point of the sync object in case the syncobj is of
+ * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+};
+
+/**
+ * struct drm_nouveau_vm_init - GPU VA space init structure
+ *
+ * Used to initialize the GPU's VA space for a user client, telling the kernel
+ * which portion of the VA space is managed by the UMD and kernel respectively.
+ *
+ * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
+ * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
+ * with -ENOSYS.
+ */
+struct drm_nouveau_vm_init {
+ /**
+ * @kernel_managed_addr: start address of the kernel managed VA space
+ * region
+ */
+ __u64 kernel_managed_addr;
+ /**
+ * @kernel_managed_size: size of the kernel managed VA space region in
+ * bytes
+ */
+ __u64 kernel_managed_size;
+};
+
+/**
+ * struct drm_nouveau_vm_bind_op - VM_BIND operation
+ *
+ * This structure represents a single VM_BIND operation. UMDs should pass
+ * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
+ */
+struct drm_nouveau_vm_bind_op {
+ /**
+ * @op: the operation type
+ *
+ * Supported values:
+ *
+ * %DRM_NOUVEAU_VM_BIND_OP_MAP - Map a GEM object to the GPU's VA
+ * space. Optionally, the &DRM_NOUVEAU_VM_BIND_SPARSE flag can be
+ * passed to instruct the kernel to create sparse mappings for the
+ * given range.
+ *
+ * %DRM_NOUVEAU_VM_BIND_OP_UNMAP - Unmap an existing mapping in the
+ * GPU's VA space. If the region the mapping is located in is a
+ * sparse region, new sparse mappings are created where the unmapped
+ * (memory backed) mapping was mapped previously. To remove a sparse
+ * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
+ */
+ __u32 op;
+#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
+#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind_op
+ *
+ * Supported values:
+ *
+ * %DRM_NOUVEAU_VM_BIND_SPARSE - Indicates that an allocated VA
+ * space region should be sparse.
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
+ /**
+ * @handle: the handle of the DRM GEM object to map
+ */
+ __u32 handle;
+ /**
+ * @pad: 32 bit padding, should be 0
+ */
+ __u32 pad;
+ /**
+ * @addr:
+ *
+ * the address the VA space region or (memory backed) mapping should be mapped to
+ */
+ __u64 addr;
+ /**
+ * @bo_offset: the offset within the BO backing the mapping
+ */
+ __u64 bo_offset;
+ /**
+ * @range: the size of the requested mapping in bytes
+ */
+ __u64 range;
+};
+
+/**
+ * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
+ */
+struct drm_nouveau_vm_bind {
+ /**
+ * @op_count: the number of &drm_nouveau_vm_bind_op
+ */
+ __u32 op_count;
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind ioctl
+ *
+ * Supported values:
+ *
+ * %DRM_NOUVEAU_VM_BIND_RUN_ASYNC - Indicates that the given VM_BIND
+ * operation should be executed asynchronously by the kernel.
+ *
+ * If this flag is not supplied the kernel executes the associated
+ * operations synchronously and doesn't accept any &drm_nouveau_sync
+ * objects.
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
+ */
+ __u64 op_ptr;
+};
+
+/**
+ * struct drm_nouveau_exec_push - EXEC push operation
+ *
+ * This structure represents a single EXEC push operation. UMDs should pass an
+ * array of this structure via struct drm_nouveau_exec's &push_ptr field.
+ */
+struct drm_nouveau_exec_push {
+ /**
+ * @va: the virtual address of the push buffer mapping
+ */
+ __u64 va;
+ /**
+ * @va_len: the length of the push buffer mapping
+ */
+ __u32 va_len;
+ /**
+ * @flags: the flags for this push buffer mapping
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
+};
+
+/**
+ * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
+ */
+struct drm_nouveau_exec {
+ /**
+ * @channel: the channel to execute the push buffer in
+ */
+ __u32 channel;
+ /**
+ * @push_count: the number of &drm_nouveau_exec_push ops
+ */
+ __u32 push_count;
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @push_ptr: pointer to &drm_nouveau_exec_push ops
+ */
+ __u64 push_ptr;
+};
+
+#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
#define DRM_NOUVEAU_SVM_INIT 0x08
#define DRM_NOUVEAU_SVM_BIND 0x09
+#define DRM_NOUVEAU_VM_INIT 0x10
+#define DRM_NOUVEAU_VM_BIND 0x11
+#define DRM_NOUVEAU_EXEC 0x12
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@@ -188,6 +468,10 @@ struct drm_nouveau_svm_bind {
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
+#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
@@ -197,6 +481,9 @@ struct drm_nouveau_svm_bind {
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
+#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
+#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index ec19db1eead8..9f231d40a146 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -84,14 +84,14 @@ struct drm_panfrost_wait_bo {
__s64 timeout_ns; /* absolute */
};
+/* Valid flags to pass to drm_panfrost_create_bo */
#define PANFROST_BO_NOEXEC 1
#define PANFROST_BO_HEAP 2
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
*
- * There are currently no values for the flags argument, but it may be
- * used in a future extension.
+ * The flags argument is a bit mask of PANFROST_BO_* flags.
*/
struct drm_panfrost_create_bo {
__u32 size;
@@ -171,6 +171,7 @@ enum drm_panfrost_param {
DRM_PANFROST_PARAM_JS_FEATURES15,
DRM_PANFROST_PARAM_NR_CORE_GROUPS,
DRM_PANFROST_PARAM_THREAD_TLS_ALLOC,
+ DRM_PANFROST_PARAM_AFBC_FEATURES,
};
struct drm_panfrost_get_param {
@@ -223,6 +224,57 @@ struct drm_panfrost_madvise {
__u32 retained; /* out, whether backing store still exists */
};
+/* Definitions for coredump decoding in user space */
+#define PANFROSTDUMP_MAJOR 1
+#define PANFROSTDUMP_MINOR 0
+
+#define PANFROSTDUMP_MAGIC 0x464E4150 /* PANF */
+
+#define PANFROSTDUMP_BUF_REG 0
+#define PANFROSTDUMP_BUF_BOMAP (PANFROSTDUMP_BUF_REG + 1)
+#define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1)
+#define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1)
+
+/*
+ * This structure is the native endianness of the dumping machine, tools can
+ * detect the endianness by looking at the value in 'magic'.
+ */
+struct panfrost_dump_object_header {
+ __u32 magic;
+ __u32 type;
+ __u32 file_size;
+ __u32 file_offset;
+
+ union {
+ struct {
+ __u64 jc;
+ __u32 gpu_id;
+ __u32 major;
+ __u32 minor;
+ __u64 nbos;
+ } reghdr;
+
+ struct {
+ __u32 valid;
+ __u64 iova;
+ __u32 data[2];
+ } bomap;
+
+ /*
+ * Force same size in case we want to expand the header
+ * with new fields and also keep it 512-byte aligned
+ */
+
+ __u32 sizer[496];
+ };
+};
+
+/* Registers object, an array of these */
+struct panfrost_dump_registers {
+ __u32 reg;
+ __u32 value;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h
new file mode 100644
index 000000000000..ccf6c2112468
--- /dev/null
+++ b/include/uapi/drm/pvr_drm.h
@@ -0,0 +1,1295 @@
+/* SPDX-License-Identifier: (GPL-2.0-only WITH Linux-syscall-note) OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DRM_UAPI_H
+#define PVR_DRM_UAPI_H
+
+#include "drm.h"
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: PowerVR UAPI
+ *
+ * The PowerVR IOCTL argument structs have a few limitations in place, in
+ * addition to the standard kernel restrictions:
+ *
+ * - All members must be type-aligned.
+ * - The overall struct must be padded to 64-bit alignment.
+ * - Explicit padding is almost always required. This takes the form of
+ * ``_padding_[x]`` members of sufficient size to pad to the next power-of-two
+ * alignment, where [x] is the offset into the struct in hexadecimal. Arrays
+ * are never used for alignment. Padding fields must be zeroed; this is
+ * always checked.
+ * - Unions may only appear as the last member of a struct.
+ * - Individual union members may grow in the future. The space between the
+ * end of a union member and the end of its containing union is considered
+ * "implicit padding" and must be zeroed. This is always checked.
+ *
+ * In addition to the IOCTL argument structs, the PowerVR UAPI makes use of
+ * DEV_QUERY argument structs. These are used to fetch information about the
+ * device and runtime. These structs are subject to the same rules set out
+ * above.
+ */
+
+/**
+ * struct drm_pvr_obj_array - Container used to pass arrays of objects
+ *
+ * It is not unusual to have to extend objects to pass new parameters, and the DRM
+ * ioctl infrastructure is supporting that by padding ioctl arguments with zeros
+ * when the data passed by userspace is smaller than the struct defined in the
+ * drm_ioctl_desc, thus keeping things backward compatible. This type is just
+ * applying the same concepts to indirect objects passed through arrays referenced
+ * from the main ioctl arguments structure: the stride basically defines the size
+ * of the object passed by userspace, which allows the kernel driver to pad with
+ * zeros when it's smaller than the size of the object it expects.
+ *
+ * Use ``DRM_PVR_OBJ_ARRAY()`` to fill object array fields, unless you
+ * have a very good reason not to.
+ */
+struct drm_pvr_obj_array {
+ /** @stride: Stride of object struct. Used for versioning. */
+ __u32 stride;
+
+ /** @count: Number of objects in the array. */
+ __u32 count;
+
+ /** @array: User pointer to an array of objects. */
+ __u64 array;
+};
+
+/**
+ * DRM_PVR_OBJ_ARRAY() - Helper macro for filling &struct drm_pvr_obj_array.
+ * @cnt: Number of elements pointed to py @ptr.
+ * @ptr: Pointer to start of a C array.
+ *
+ * Return: Literal of type &struct drm_pvr_obj_array.
+ */
+#define DRM_PVR_OBJ_ARRAY(cnt, ptr) \
+ { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
+
+/**
+ * DOC: PowerVR IOCTL interface
+ */
+
+/**
+ * PVR_IOCTL() - Build a PowerVR IOCTL number
+ * @_ioctl: An incrementing id for this IOCTL. Added to %DRM_COMMAND_BASE.
+ * @_mode: Must be one of %DRM_IOR, %DRM_IOW or %DRM_IOWR.
+ * @_data: The type of the args struct passed by this IOCTL.
+ *
+ * The struct referred to by @_data must have a ``drm_pvr_ioctl_`` prefix and an
+ * ``_args suffix``. They are therefore omitted from @_data.
+ *
+ * This should only be used to build the constants described below; it should
+ * never be used to call an IOCTL directly.
+ *
+ * Return: An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define PVR_IOCTL(_ioctl, _mode, _data) \
+ _mode(DRM_COMMAND_BASE + (_ioctl), struct drm_pvr_ioctl_##_data##_args)
+
+#define DRM_IOCTL_PVR_DEV_QUERY PVR_IOCTL(0x00, DRM_IOWR, dev_query)
+#define DRM_IOCTL_PVR_CREATE_BO PVR_IOCTL(0x01, DRM_IOWR, create_bo)
+#define DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET PVR_IOCTL(0x02, DRM_IOWR, get_bo_mmap_offset)
+#define DRM_IOCTL_PVR_CREATE_VM_CONTEXT PVR_IOCTL(0x03, DRM_IOWR, create_vm_context)
+#define DRM_IOCTL_PVR_DESTROY_VM_CONTEXT PVR_IOCTL(0x04, DRM_IOW, destroy_vm_context)
+#define DRM_IOCTL_PVR_VM_MAP PVR_IOCTL(0x05, DRM_IOW, vm_map)
+#define DRM_IOCTL_PVR_VM_UNMAP PVR_IOCTL(0x06, DRM_IOW, vm_unmap)
+#define DRM_IOCTL_PVR_CREATE_CONTEXT PVR_IOCTL(0x07, DRM_IOWR, create_context)
+#define DRM_IOCTL_PVR_DESTROY_CONTEXT PVR_IOCTL(0x08, DRM_IOW, destroy_context)
+#define DRM_IOCTL_PVR_CREATE_FREE_LIST PVR_IOCTL(0x09, DRM_IOWR, create_free_list)
+#define DRM_IOCTL_PVR_DESTROY_FREE_LIST PVR_IOCTL(0x0a, DRM_IOW, destroy_free_list)
+#define DRM_IOCTL_PVR_CREATE_HWRT_DATASET PVR_IOCTL(0x0b, DRM_IOWR, create_hwrt_dataset)
+#define DRM_IOCTL_PVR_DESTROY_HWRT_DATASET PVR_IOCTL(0x0c, DRM_IOW, destroy_hwrt_dataset)
+#define DRM_IOCTL_PVR_SUBMIT_JOBS PVR_IOCTL(0x0d, DRM_IOW, submit_jobs)
+
+/**
+ * DOC: PowerVR IOCTL DEV_QUERY interface
+ */
+
+/**
+ * struct drm_pvr_dev_query_gpu_info - Container used to fetch information about
+ * the graphics processor.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_GPU_INFO_GET.
+ */
+struct drm_pvr_dev_query_gpu_info {
+ /**
+ * @gpu_id: GPU identifier.
+ *
+ * For all currently supported GPUs this is the BVNC encoded as a 64-bit
+ * value as follows:
+ *
+ * +--------+--------+--------+-------+
+ * | 63..48 | 47..32 | 31..16 | 15..0 |
+ * +========+========+========+=======+
+ * | B | V | N | C |
+ * +--------+--------+--------+-------+
+ */
+ __u64 gpu_id;
+
+ /**
+ * @num_phantoms: Number of Phantoms present.
+ */
+ __u32 num_phantoms;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * struct drm_pvr_dev_query_runtime_info - Container used to fetch information
+ * about the graphics runtime.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET.
+ */
+struct drm_pvr_dev_query_runtime_info {
+ /**
+ * @free_list_min_pages: Minimum allowed free list size,
+ * in PM physical pages.
+ */
+ __u64 free_list_min_pages;
+
+ /**
+ * @free_list_max_pages: Maximum allowed free list size,
+ * in PM physical pages.
+ */
+ __u64 free_list_max_pages;
+
+ /**
+ * @common_store_alloc_region_size: Size of the Allocation
+ * Region within the Common Store used for coefficient and shared
+ * registers, in dwords.
+ */
+ __u32 common_store_alloc_region_size;
+
+ /**
+ * @common_store_partition_space_size: Size of the
+ * Partition Space within the Common Store for output buffers, in
+ * dwords.
+ */
+ __u32 common_store_partition_space_size;
+
+ /**
+ * @max_coeffs: Maximum coefficients, in dwords.
+ */
+ __u32 max_coeffs;
+
+ /**
+ * @cdm_max_local_mem_size_regs: Maximum amount of local
+ * memory available to a compute kernel, in dwords.
+ */
+ __u32 cdm_max_local_mem_size_regs;
+};
+
+/**
+ * struct drm_pvr_dev_query_quirks - Container used to fetch information about
+ * hardware fixes for which the device may require support in the user mode
+ * driver.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_QUIRKS_GET.
+ */
+struct drm_pvr_dev_query_quirks {
+ /**
+ * @quirks: A userspace address for the hardware quirks __u32 array.
+ *
+ * The first @musthave_count items in the list are quirks that the
+ * client must support for this device. If userspace does not support
+ * all these quirks then functionality is not guaranteed and client
+ * initialisation must fail.
+ * The remaining quirks in the list affect userspace and the kernel or
+ * firmware. They are disabled by default and require userspace to
+ * opt-in. The opt-in mechanism depends on the quirk.
+ */
+ __u64 quirks;
+
+ /** @count: Length of @quirks (number of __u32). */
+ __u16 count;
+
+ /**
+ * @musthave_count: The number of entries in @quirks that are
+ * mandatory, starting at index 0.
+ */
+ __u16 musthave_count;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * struct drm_pvr_dev_query_enhancements - Container used to fetch information
+ * about optional enhancements supported by the device that require support in
+ * the user mode driver.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_ENHANCEMENTS_GET.
+ */
+struct drm_pvr_dev_query_enhancements {
+ /**
+ * @enhancements: A userspace address for the hardware enhancements
+ * __u32 array.
+ *
+ * These enhancements affect userspace and the kernel or firmware. They
+ * are disabled by default and require userspace to opt-in. The opt-in
+ * mechanism depends on the enhancement.
+ */
+ __u64 enhancements;
+
+ /** @count: Length of @enhancements (number of __u32). */
+ __u16 count;
+
+ /** @_padding_a: Reserved. This field must be zeroed. */
+ __u16 _padding_a;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * enum drm_pvr_heap_id - Array index for heap info data returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * For compatibility reasons all indices will be present in the returned array,
+ * however some heaps may not be present. These are indicated where
+ * &struct drm_pvr_heap.size is set to zero.
+ */
+enum drm_pvr_heap_id {
+ /** @DRM_PVR_HEAP_GENERAL: General purpose heap. */
+ DRM_PVR_HEAP_GENERAL = 0,
+ /** @DRM_PVR_HEAP_PDS_CODE_DATA: PDS code and data heap. */
+ DRM_PVR_HEAP_PDS_CODE_DATA,
+ /** @DRM_PVR_HEAP_USC_CODE: USC code heap. */
+ DRM_PVR_HEAP_USC_CODE,
+ /** @DRM_PVR_HEAP_RGNHDR: Region header heap. Only used if GPU has BRN63142. */
+ DRM_PVR_HEAP_RGNHDR,
+ /** @DRM_PVR_HEAP_VIS_TEST: Visibility test heap. */
+ DRM_PVR_HEAP_VIS_TEST,
+ /** @DRM_PVR_HEAP_TRANSFER_FRAG: Transfer fragment heap. */
+ DRM_PVR_HEAP_TRANSFER_FRAG,
+
+ /**
+ * @DRM_PVR_HEAP_COUNT: The number of heaps returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * More heaps may be added, so this also serves as the copy limit when
+ * sent by the caller.
+ */
+ DRM_PVR_HEAP_COUNT
+ /* Please only add additional heaps above DRM_PVR_HEAP_COUNT! */
+};
+
+/**
+ * struct drm_pvr_heap - Container holding information about a single heap.
+ *
+ * This will always be fetched as an array.
+ */
+struct drm_pvr_heap {
+ /** @base: Base address of heap. */
+ __u64 base;
+
+ /** @size: Size of heap, in bytes. Will be 0 if the heap is not present. */
+ __u64 size;
+
+ /** @flags: Flags for this heap. Currently always 0. */
+ __u32 flags;
+
+ /** @page_size_log2: Log2 of page size. */
+ __u32 page_size_log2;
+};
+
+/**
+ * struct drm_pvr_dev_query_heap_info - Container used to fetch information
+ * about heaps supported by the device driver.
+ *
+ * Please note all driver-supported heaps will be returned up to &heaps.count.
+ * Some heaps will not be present in all devices, which will be indicated by
+ * &struct drm_pvr_heap.size being set to zero.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+struct drm_pvr_dev_query_heap_info {
+ /**
+ * @heaps: Array of &struct drm_pvr_heap. If pointer is NULL, the count
+ * and stride will be updated with those known to the driver version, to
+ * facilitate allocation by the caller.
+ */
+ struct drm_pvr_obj_array heaps;
+};
+
+/**
+ * enum drm_pvr_static_data_area_usage - Array index for static data area info
+ * returned by %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
+ *
+ * For compatibility reasons all indices will be present in the returned array,
+ * however some areas may not be present. These are indicated where
+ * &struct drm_pvr_static_data_area.size is set to zero.
+ */
+enum drm_pvr_static_data_area_usage {
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_EOT: End of Tile PDS program code segment.
+ *
+ * The End of Tile PDS task runs at completion of a tile during a fragment job, and is
+ * responsible for emitting the tile to the Pixel Back End.
+ */
+ DRM_PVR_STATIC_DATA_AREA_EOT = 0,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_FENCE: MCU fence area, used during cache flush and
+ * invalidation.
+ *
+ * This must point to valid physical memory but the contents otherwise are not used.
+ */
+ DRM_PVR_STATIC_DATA_AREA_FENCE,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_VDM_SYNC: VDM sync program.
+ *
+ * The VDM sync program is used to synchronise multiple areas of the GPU hardware.
+ */
+ DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_YUV_CSC: YUV coefficients.
+ *
+ * Area contains up to 16 slots with stride of 64 bytes. Each is a 3x4 matrix of u16 fixed
+ * point numbers, with 1 sign bit, 2 integer bits and 13 fractional bits.
+ *
+ * The slots are :
+ * 0 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR
+ * 1 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (full range)
+ * 2 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (conformant range)
+ * 3 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (full range)
+ * 4 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range)
+ * 5 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (full range)
+ * 6 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range)
+ * 7 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (full range)
+ * 8 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range)
+ * 9 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range, 10 bit)
+ * 10 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range, 10 bit)
+ * 11 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range, 10 bit)
+ * 14 = Identity (biased)
+ * 15 = Identity
+ */
+ DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+};
+
+/**
+ * struct drm_pvr_static_data_area - Container holding information about a
+ * single static data area.
+ *
+ * This will always be fetched as an array.
+ */
+struct drm_pvr_static_data_area {
+ /**
+ * @area_usage: Usage of static data area.
+ * See &enum drm_pvr_static_data_area_usage.
+ */
+ __u16 area_usage;
+
+ /**
+ * @location_heap_id: Array index of heap where this of static data
+ * area is located. This array is fetched using
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+ __u16 location_heap_id;
+
+ /** @size: Size of static data area. Not present if set to zero. */
+ __u32 size;
+
+ /** @offset: Offset of static data area from start of heap. */
+ __u64 offset;
+};
+
+/**
+ * struct drm_pvr_dev_query_static_data_areas - Container used to fetch
+ * information about the static data areas in heaps supported by the device
+ * driver.
+ *
+ * Please note all driver-supported static data areas will be returned up to
+ * &static_data_areas.count. Some will not be present for all devices which,
+ * will be indicated by &struct drm_pvr_static_data_area.size being set to zero.
+ *
+ * Further, some heaps will not be present either. See &struct
+ * drm_pvr_dev_query_heap_info.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
+ */
+struct drm_pvr_dev_query_static_data_areas {
+ /**
+ * @static_data_areas: Array of &struct drm_pvr_static_data_area. If
+ * pointer is NULL, the count and stride will be updated with those
+ * known to the driver version, to facilitate allocation by the caller.
+ */
+ struct drm_pvr_obj_array static_data_areas;
+};
+
+/**
+ * enum drm_pvr_dev_query - For use with &drm_pvr_ioctl_dev_query_args.type to
+ * indicate the type of the receiving container.
+ *
+ * Append only. Do not reorder.
+ */
+enum drm_pvr_dev_query {
+ /**
+ * @DRM_PVR_DEV_QUERY_GPU_INFO_GET: The dev query args contain a pointer
+ * to &struct drm_pvr_dev_query_gpu_info.
+ */
+ DRM_PVR_DEV_QUERY_GPU_INFO_GET = 0,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_runtime_info.
+ */
+ DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_QUIRKS_GET: The dev query args contain a pointer
+ * to &struct drm_pvr_dev_query_quirks.
+ */
+ DRM_PVR_DEV_QUERY_QUIRKS_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_enhancements.
+ */
+ DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_HEAP_INFO_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_heap_info.
+ */
+ DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET: The dev query args contain
+ * a pointer to &struct drm_pvr_dev_query_static_data_areas.
+ */
+ DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
+};
+
+/**
+ * struct drm_pvr_ioctl_dev_query_args - Arguments for %DRM_IOCTL_PVR_DEV_QUERY.
+ */
+struct drm_pvr_ioctl_dev_query_args {
+ /**
+ * @type: Type of query and output struct. See &enum drm_pvr_dev_query.
+ */
+ __u32 type;
+
+ /**
+ * @size: Size of the receiving struct, see @type.
+ *
+ * After a successful call this will be updated to the written byte
+ * length.
+ * Can also be used to get the minimum byte length (see @pointer).
+ * This allows additional fields to be appended to the structs in
+ * future.
+ */
+ __u32 size;
+
+ /**
+ * @pointer: Pointer to struct @type.
+ *
+ * Must be large enough to contain @size bytes.
+ * If pointer is NULL, the expected size will be returned in the @size
+ * field, but no other data will be written.
+ */
+ __u64 pointer;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_BO interface
+ */
+
+/**
+ * DOC: Flags for CREATE_BO
+ *
+ * We use "device" to refer to the GPU here because of the ambiguity between CPU and GPU in some
+ * fonts.
+ *
+ * Device mapping options
+ * :DRM_PVR_BO_BYPASS_DEVICE_CACHE: Specify that device accesses to this memory will bypass the
+ * cache. This is used for buffers that will either be regularly updated by the CPU (eg free
+ * lists) or will be accessed only once and therefore isn't worth caching (eg partial render
+ * buffers).
+ * By default, the device flushes its memory caches after every job, so this is not normally
+ * required for coherency.
+ * :DRM_PVR_BO_PM_FW_PROTECT: Specify that only the Parameter Manager (PM) and/or firmware
+ * processor should be allowed to access this memory when mapped to the device. It is not
+ * valid to specify this flag with DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS.
+ *
+ * CPU mapping options
+ * :DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS: Allow userspace to map and access the contents of this
+ * memory. It is not valid to specify this flag with DRM_PVR_BO_PM_FW_PROTECT.
+ */
+#define DRM_PVR_BO_BYPASS_DEVICE_CACHE _BITULL(0)
+#define DRM_PVR_BO_PM_FW_PROTECT _BITULL(1)
+#define DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS _BITULL(2)
+/* Bits 3..63 are reserved. */
+
+#define DRM_PVR_BO_FLAGS_MASK (DRM_PVR_BO_BYPASS_DEVICE_CACHE | DRM_PVR_BO_PM_FW_PROTECT | \
+ DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS)
+
+/**
+ * struct drm_pvr_ioctl_create_bo_args - Arguments for %DRM_IOCTL_PVR_CREATE_BO
+ */
+struct drm_pvr_ioctl_create_bo_args {
+ /**
+ * @size: [IN] Size of buffer object to create. This must be page size
+ * aligned.
+ */
+ __u64 size;
+
+ /**
+ * @handle: [OUT] GEM handle of the new buffer object for use in
+ * userspace.
+ */
+ __u32 handle;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+
+ /**
+ * @flags: [IN] Options which will affect the behaviour of this
+ * creation operation and future mapping operations on the created
+ * object. This field must be a valid combination of ``DRM_PVR_BO_*``
+ * values, with all bits marked as reserved set to zero.
+ */
+ __u64 flags;
+};
+
+/**
+ * DOC: PowerVR IOCTL GET_BO_MMAP_OFFSET interface
+ */
+
+/**
+ * struct drm_pvr_ioctl_get_bo_mmap_offset_args - Arguments for
+ * %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET
+ *
+ * Like other DRM drivers, the "mmap" IOCTL doesn't actually map any memory.
+ * Instead, it allocates a fake offset which refers to the specified buffer
+ * object. This offset can be used with a real mmap call on the DRM device
+ * itself.
+ */
+struct drm_pvr_ioctl_get_bo_mmap_offset_args {
+ /** @handle: [IN] GEM handle of the buffer object to be mapped. */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+
+ /** @offset: [OUT] Fake offset to use in the real mmap call. */
+ __u64 offset;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_VM_CONTEXT and DESTROY_VM_CONTEXT interfaces
+ */
+
+/**
+ * struct drm_pvr_ioctl_create_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_create_vm_context_args {
+ /** @handle: [OUT] Handle for new VM context. */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_vm_context_args {
+ /**
+ * @handle: [IN] Handle for VM context to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL VM_MAP and VM_UNMAP interfaces
+ *
+ * The VM UAPI allows userspace to create buffer object mappings in GPU virtual address space.
+ *
+ * The client is responsible for managing GPU address space. It should allocate mappings within
+ * the heaps returned by %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * %DRM_IOCTL_PVR_VM_MAP creates a new mapping. The client provides the target virtual address for
+ * the mapping. Size and offset within the mapped buffer object can be specified, so the client can
+ * partially map a buffer.
+ *
+ * %DRM_IOCTL_PVR_VM_UNMAP removes a mapping. The entire mapping will be removed from GPU address
+ * space only if the size of the mapping matches that known to the driver.
+ */
+
+/**
+ * struct drm_pvr_ioctl_vm_map_args - Arguments for %DRM_IOCTL_PVR_VM_MAP.
+ */
+struct drm_pvr_ioctl_vm_map_args {
+ /**
+ * @vm_context_handle: [IN] Handle for VM context for this mapping to
+ * exist in.
+ */
+ __u32 vm_context_handle;
+
+ /** @flags: [IN] Flags which affect this mapping. Currently always 0. */
+ __u32 flags;
+
+ /**
+ * @device_addr: [IN] Requested device-virtual address for the mapping.
+ * This must be non-zero and aligned to the device page size for the
+ * heap containing the requested address. It is an error to specify an
+ * address which is not contained within one of the heaps returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+ __u64 device_addr;
+
+ /**
+ * @handle: [IN] Handle of the target buffer object. This must be a
+ * valid handle returned by %DRM_IOCTL_PVR_CREATE_BO.
+ */
+ __u32 handle;
+
+ /** @_padding_14: Reserved. This field must be zeroed. */
+ __u32 _padding_14;
+
+ /**
+ * @offset: [IN] Offset into the target bo from which to begin the
+ * mapping.
+ */
+ __u64 offset;
+
+ /**
+ * @size: [IN] Size of the requested mapping. Must be aligned to
+ * the device page size for the heap containing the requested address,
+ * as well as the host page size. When added to @device_addr, the
+ * result must not overflow the heap which contains @device_addr (i.e.
+ * the range specified by @device_addr and @size must be completely
+ * contained within a single heap specified by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET).
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_pvr_ioctl_vm_unmap_args - Arguments for %DRM_IOCTL_PVR_VM_UNMAP.
+ */
+struct drm_pvr_ioctl_vm_unmap_args {
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that this mapping
+ * exists in.
+ */
+ __u32 vm_context_handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+
+ /**
+ * @device_addr: [IN] Device-virtual address at the start of the target
+ * mapping. This must be non-zero.
+ */
+ __u64 device_addr;
+
+ /**
+ * @size: Size in bytes of the target mapping. This must be non-zero.
+ */
+ __u64 size;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_CONTEXT and DESTROY_CONTEXT interfaces
+ */
+
+/**
+ * enum drm_pvr_ctx_priority - Arguments for
+ * &drm_pvr_ioctl_create_context_args.priority
+ */
+enum drm_pvr_ctx_priority {
+ /** @DRM_PVR_CTX_PRIORITY_LOW: Priority below normal. */
+ DRM_PVR_CTX_PRIORITY_LOW = -512,
+
+ /** @DRM_PVR_CTX_PRIORITY_NORMAL: Normal priority. */
+ DRM_PVR_CTX_PRIORITY_NORMAL = 0,
+
+ /**
+ * @DRM_PVR_CTX_PRIORITY_HIGH: Priority above normal.
+ * Note this requires ``CAP_SYS_NICE`` or ``DRM_MASTER``.
+ */
+ DRM_PVR_CTX_PRIORITY_HIGH = 512,
+};
+
+/**
+ * enum drm_pvr_ctx_type - Arguments for
+ * &struct drm_pvr_ioctl_create_context_args.type
+ */
+enum drm_pvr_ctx_type {
+ /**
+ * @DRM_PVR_CTX_TYPE_RENDER: Render context.
+ */
+ DRM_PVR_CTX_TYPE_RENDER = 0,
+
+ /**
+ * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context.
+ */
+ DRM_PVR_CTX_TYPE_COMPUTE,
+
+ /**
+ * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data
+ * master.
+ */
+ DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
+};
+
+/**
+ * struct drm_pvr_ioctl_create_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_CONTEXT
+ */
+struct drm_pvr_ioctl_create_context_args {
+ /**
+ * @type: [IN] Type of context to create.
+ *
+ * This must be one of the values defined by &enum drm_pvr_ctx_type.
+ */
+ __u32 type;
+
+ /** @flags: [IN] Flags for context. */
+ __u32 flags;
+
+ /**
+ * @priority: [IN] Priority of new context.
+ *
+ * This must be one of the values defined by &enum drm_pvr_ctx_priority.
+ */
+ __s32 priority;
+
+ /** @handle: [OUT] Handle for new context. */
+ __u32 handle;
+
+ /**
+ * @static_context_state: [IN] Pointer to static context state stream.
+ */
+ __u64 static_context_state;
+
+ /**
+ * @static_context_state_len: [IN] Length of static context state, in bytes.
+ */
+ __u32 static_context_state_len;
+
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that this context is
+ * associated with.
+ */
+ __u32 vm_context_handle;
+
+ /**
+ * @callstack_addr: [IN] Address for initial call stack pointer. Only valid
+ * if @type is %DRM_PVR_CTX_TYPE_RENDER, otherwise must be 0.
+ */
+ __u64 callstack_addr;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_context_args {
+ /**
+ * @handle: [IN] Handle for context to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_FREE_LIST and DESTROY_FREE_LIST interfaces
+ */
+
+/**
+ * struct drm_pvr_ioctl_create_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_FREE_LIST
+ *
+ * Free list arguments have the following constraints :
+ *
+ * - @max_num_pages must be greater than zero.
+ * - @grow_threshold must be between 0 and 100.
+ * - @grow_num_pages must be less than or equal to &max_num_pages.
+ * - @initial_num_pages, @max_num_pages and @grow_num_pages must be multiples
+ * of 4.
+ * - When &grow_num_pages is 0, @initial_num_pages must be equal to
+ * @max_num_pages.
+ * - When &grow_num_pages is non-zero, @initial_num_pages must be less than
+ * @max_num_pages.
+ */
+struct drm_pvr_ioctl_create_free_list_args {
+ /**
+ * @free_list_gpu_addr: [IN] Address of GPU mapping of buffer object
+ * containing memory to be used by free list.
+ *
+ * The mapped region of the buffer object must be at least
+ * @max_num_pages * ``sizeof(__u32)``.
+ *
+ * The buffer object must have been created with
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT set and
+ * %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS not set.
+ */
+ __u64 free_list_gpu_addr;
+
+ /** @initial_num_pages: [IN] Pages initially allocated to free list. */
+ __u32 initial_num_pages;
+
+ /** @max_num_pages: [IN] Maximum number of pages in free list. */
+ __u32 max_num_pages;
+
+ /** @grow_num_pages: [IN] Pages to grow free list by per request. */
+ __u32 grow_num_pages;
+
+ /**
+ * @grow_threshold: [IN] Percentage of FL memory used that should
+ * trigger a new grow request.
+ */
+ __u32 grow_threshold;
+
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that the free list buffer
+ * object is mapped in.
+ */
+ __u32 vm_context_handle;
+
+ /**
+ * @handle: [OUT] Handle for created free list.
+ */
+ __u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_FREE_LIST
+ */
+struct drm_pvr_ioctl_destroy_free_list_args {
+ /**
+ * @handle: [IN] Handle for free list to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces
+ */
+
+/**
+ * struct drm_pvr_create_hwrt_geom_data_args - Geometry data arguments used for
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.geom_data_args.
+ */
+struct drm_pvr_create_hwrt_geom_data_args {
+ /** @tpc_dev_addr: [IN] Tail pointer cache GPU virtual address. */
+ __u64 tpc_dev_addr;
+
+ /** @tpc_size: [IN] Size of TPC, in bytes. */
+ __u32 tpc_size;
+
+ /** @tpc_stride: [IN] Stride between layers in TPC, in pages */
+ __u32 tpc_stride;
+
+ /** @vheap_table_dev_addr: [IN] VHEAP table GPU virtual address. */
+ __u64 vheap_table_dev_addr;
+
+ /** @rtc_dev_addr: [IN] Render Target Cache virtual address. */
+ __u64 rtc_dev_addr;
+};
+
+/**
+ * struct drm_pvr_create_hwrt_rt_data_args - Render target arguments used for
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.rt_data_args.
+ */
+struct drm_pvr_create_hwrt_rt_data_args {
+ /** @pm_mlist_dev_addr: [IN] PM MLIST GPU virtual address. */
+ __u64 pm_mlist_dev_addr;
+
+ /** @macrotile_array_dev_addr: [IN] Macrotile array GPU virtual address. */
+ __u64 macrotile_array_dev_addr;
+
+ /** @region_header_dev_addr: [IN] Region header array GPU virtual address. */
+ __u64 region_header_dev_addr;
+};
+
+#define PVR_DRM_HWRT_FREE_LIST_LOCAL 0
+#define PVR_DRM_HWRT_FREE_LIST_GLOBAL 1U
+
+/**
+ * struct drm_pvr_ioctl_create_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_create_hwrt_dataset_args {
+ /** @geom_data_args: [IN] Geometry data arguments. */
+ struct drm_pvr_create_hwrt_geom_data_args geom_data_args;
+
+ /**
+ * @rt_data_args: [IN] Array of render target arguments.
+ *
+ * Each entry in this array represents a render target in a double buffered
+ * setup.
+ */
+ struct drm_pvr_create_hwrt_rt_data_args rt_data_args[2];
+
+ /**
+ * @free_list_handles: [IN] Array of free list handles.
+ *
+ * free_list_handles[PVR_DRM_HWRT_FREE_LIST_LOCAL] must have initial
+ * size of at least that reported by
+ * &drm_pvr_dev_query_runtime_info.free_list_min_pages.
+ */
+ __u32 free_list_handles[2];
+
+ /** @width: [IN] Width in pixels. */
+ __u32 width;
+
+ /** @height: [IN] Height in pixels. */
+ __u32 height;
+
+ /** @samples: [IN] Number of samples. */
+ __u32 samples;
+
+ /** @layers: [IN] Number of layers. */
+ __u32 layers;
+
+ /** @isp_merge_lower_x: [IN] Lower X coefficient for triangle merging. */
+ __u32 isp_merge_lower_x;
+
+ /** @isp_merge_lower_y: [IN] Lower Y coefficient for triangle merging. */
+ __u32 isp_merge_lower_y;
+
+ /** @isp_merge_scale_x: [IN] Scale X coefficient for triangle merging. */
+ __u32 isp_merge_scale_x;
+
+ /** @isp_merge_scale_y: [IN] Scale Y coefficient for triangle merging. */
+ __u32 isp_merge_scale_y;
+
+ /** @isp_merge_upper_x: [IN] Upper X coefficient for triangle merging. */
+ __u32 isp_merge_upper_x;
+
+ /** @isp_merge_upper_y: [IN] Upper Y coefficient for triangle merging. */
+ __u32 isp_merge_upper_y;
+
+ /**
+ * @region_header_size: [IN] Size of region header array. This common field is used by
+ * both render targets in this data set.
+ *
+ * The units for this field differ depending on what version of the simple internal
+ * parameter format the device uses. If format 2 is in use then this is interpreted as the
+ * number of region headers. For other formats it is interpreted as the size in dwords.
+ */
+ __u32 region_header_size;
+
+ /**
+ * @handle: [OUT] Handle for created HWRT dataset.
+ */
+ __u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_destroy_hwrt_dataset_args {
+ /**
+ * @handle: [IN] Handle for HWRT dataset to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL SUBMIT_JOBS interface
+ */
+
+/**
+ * DOC: Flags for the drm_pvr_sync_op object.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_HANDLE_TYPE_MASK
+ *
+ * Handle type mask for the drm_pvr_sync_op::flags field.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ
+ *
+ * Indicates the handle passed in drm_pvr_sync_op::handle is a syncobj handle.
+ * This is the default type.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ
+ *
+ * Indicates the handle passed in drm_pvr_sync_op::handle is a timeline syncobj handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_SIGNAL
+ *
+ * Signal operation requested. The out-fence bound to the job will be attached to
+ * the syncobj whose handle is passed in drm_pvr_sync_op::handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_WAIT
+ *
+ * Wait operation requested. The job will wait for this particular syncobj or syncobj
+ * point to be signaled before being started.
+ * This is the default operation.
+ */
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK 0xf
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ 0
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ 1
+#define DRM_PVR_SYNC_OP_FLAG_SIGNAL _BITULL(31)
+#define DRM_PVR_SYNC_OP_FLAG_WAIT 0
+
+#define DRM_PVR_SYNC_OP_FLAGS_MASK (DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK | \
+ DRM_PVR_SYNC_OP_FLAG_SIGNAL)
+
+/**
+ * struct drm_pvr_sync_op - Object describing a sync operation
+ */
+struct drm_pvr_sync_op {
+ /** @handle: Handle of sync object. */
+ __u32 handle;
+
+ /** @flags: Combination of ``DRM_PVR_SYNC_OP_FLAG_`` flags. */
+ __u32 flags;
+
+ /** @value: Timeline value for this drm_syncobj. MBZ for a binary syncobj. */
+ __u64 value;
+};
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl geometry command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST
+ *
+ * Indicates if this the first command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST
+ *
+ * Indicates if this the last command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE
+ *
+ * Forces to use single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the geometry cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST | \
+ DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST | \
+ DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl fragment command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE
+ *
+ * Use single core in a multi core setup.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER
+ *
+ * Indicates whether a depth buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER
+ *
+ * Indicates whether a stencil buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP
+ *
+ * Disallow compute overlapped with this render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS
+ *
+ * Indicates whether this render produces visibility results.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER
+ *
+ * Indicates whether partial renders write to a scratch buffer instead of
+ * the final surface. It also forces the full screen copy expected to be
+ * present on the last render after all partial renders have completed.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE
+ *
+ * Disable pixel merging for this render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the fragment cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP _BITULL(3)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER _BITULL(4)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS _BITULL(5)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER _BITULL(6)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE _BITULL(7)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl compute command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP
+ *
+ * Disallow other jobs overlapped with this compute.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE
+ *
+ * Forces to use single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the compute cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP | \
+ DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl transfer command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+ *
+ * Forces job to use a single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the transfer cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE _BITULL(0)
+
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK \
+ DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+
+/**
+ * enum drm_pvr_job_type - Arguments for &struct drm_pvr_job.job_type
+ */
+enum drm_pvr_job_type {
+ /** @DRM_PVR_JOB_TYPE_GEOMETRY: Job type is geometry. */
+ DRM_PVR_JOB_TYPE_GEOMETRY = 0,
+
+ /** @DRM_PVR_JOB_TYPE_FRAGMENT: Job type is fragment. */
+ DRM_PVR_JOB_TYPE_FRAGMENT,
+
+ /** @DRM_PVR_JOB_TYPE_COMPUTE: Job type is compute. */
+ DRM_PVR_JOB_TYPE_COMPUTE,
+
+ /** @DRM_PVR_JOB_TYPE_TRANSFER_FRAG: Job type is a fragment transfer. */
+ DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
+};
+
+/**
+ * struct drm_pvr_hwrt_data_ref - Reference HWRT data
+ */
+struct drm_pvr_hwrt_data_ref {
+ /** @set_handle: HWRT data set handle. */
+ __u32 set_handle;
+
+ /** @data_index: Index of the HWRT data inside the data set. */
+ __u32 data_index;
+};
+
+/**
+ * struct drm_pvr_job - Job arguments passed to the %DRM_IOCTL_PVR_SUBMIT_JOBS ioctl
+ */
+struct drm_pvr_job {
+ /**
+ * @type: [IN] Type of job being submitted
+ *
+ * This must be one of the values defined by &enum drm_pvr_job_type.
+ */
+ __u32 type;
+
+ /**
+ * @context_handle: [IN] Context handle.
+ *
+ * When @job_type is %DRM_PVR_JOB_TYPE_RENDER, %DRM_PVR_JOB_TYPE_COMPUTE or
+ * %DRM_PVR_JOB_TYPE_TRANSFER_FRAG, this must be a valid handle returned by
+ * %DRM_IOCTL_PVR_CREATE_CONTEXT. The type of context must be compatible
+ * with the type of job being submitted.
+ *
+ * When @job_type is %DRM_PVR_JOB_TYPE_NULL, this must be zero.
+ */
+ __u32 context_handle;
+
+ /**
+ * @flags: [IN] Flags for command.
+ *
+ * Those are job-dependent. See all ``DRM_PVR_SUBMIT_JOB_*``.
+ */
+ __u32 flags;
+
+ /**
+ * @cmd_stream_len: [IN] Length of command stream, in bytes.
+ */
+ __u32 cmd_stream_len;
+
+ /**
+ * @cmd_stream: [IN] Pointer to command stream for command.
+ *
+ * The command stream must be u64-aligned.
+ */
+ __u64 cmd_stream;
+
+ /** @sync_ops: [IN] Fragment sync operations. */
+ struct drm_pvr_obj_array sync_ops;
+
+ /**
+ * @hwrt: [IN] HWRT data used by render jobs (geometry or fragment).
+ *
+ * Must be zero for non-render jobs.
+ */
+ struct drm_pvr_hwrt_data_ref hwrt;
+};
+
+/**
+ * struct drm_pvr_ioctl_submit_jobs_args - Arguments for %DRM_IOCTL_PVR_SUBMIT_JOB
+ *
+ * If the syscall returns an error it is important to check the value of
+ * @jobs.count. This indicates the index into @jobs.array where the
+ * error occurred.
+ */
+struct drm_pvr_ioctl_submit_jobs_args {
+ /** @jobs: [IN] Array of jobs to submit. */
+ struct drm_pvr_obj_array jobs;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* PVR_DRM_UAPI_H */
diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h
new file mode 100644
index 000000000000..d3ca876a08e9
--- /dev/null
+++ b/include/uapi/drm/qaic_accel.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ *
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef QAIC_ACCEL_H_
+#define QAIC_ACCEL_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* The length(4K) includes len and count fields of qaic_manage_msg */
+#define QAIC_MANAGE_MAX_MSG_LENGTH SZ_4K
+
+/* semaphore flags */
+#define QAIC_SEM_INSYNCFENCE 2
+#define QAIC_SEM_OUTSYNCFENCE 1
+
+/* Semaphore commands */
+#define QAIC_SEM_NOP 0
+#define QAIC_SEM_INIT 1
+#define QAIC_SEM_INC 2
+#define QAIC_SEM_DEC 3
+#define QAIC_SEM_WAIT_EQUAL 4
+#define QAIC_SEM_WAIT_GT_EQ 5 /* Greater than or equal */
+#define QAIC_SEM_WAIT_GT_0 6 /* Greater than 0 */
+
+#define QAIC_TRANS_UNDEFINED 0
+#define QAIC_TRANS_PASSTHROUGH_FROM_USR 1
+#define QAIC_TRANS_PASSTHROUGH_TO_USR 2
+#define QAIC_TRANS_PASSTHROUGH_FROM_DEV 3
+#define QAIC_TRANS_PASSTHROUGH_TO_DEV 4
+#define QAIC_TRANS_DMA_XFER_FROM_USR 5
+#define QAIC_TRANS_DMA_XFER_TO_DEV 6
+#define QAIC_TRANS_ACTIVATE_FROM_USR 7
+#define QAIC_TRANS_ACTIVATE_FROM_DEV 8
+#define QAIC_TRANS_ACTIVATE_TO_DEV 9
+#define QAIC_TRANS_DEACTIVATE_FROM_USR 10
+#define QAIC_TRANS_DEACTIVATE_FROM_DEV 11
+#define QAIC_TRANS_STATUS_FROM_USR 12
+#define QAIC_TRANS_STATUS_TO_USR 13
+#define QAIC_TRANS_STATUS_FROM_DEV 14
+#define QAIC_TRANS_STATUS_TO_DEV 15
+#define QAIC_TRANS_TERMINATE_FROM_DEV 16
+#define QAIC_TRANS_TERMINATE_TO_DEV 17
+#define QAIC_TRANS_DMA_XFER_CONT 18
+#define QAIC_TRANS_VALIDATE_PARTITION_FROM_DEV 19
+#define QAIC_TRANS_VALIDATE_PARTITION_TO_DEV 20
+
+/**
+ * struct qaic_manage_trans_hdr - Header for a transaction in a manage message.
+ * @type: In. Identifies this transaction. See QAIC_TRANS_* defines.
+ * @len: In. Length of this transaction, including this header.
+ */
+struct qaic_manage_trans_hdr {
+ __u32 type;
+ __u32 len;
+};
+
+/**
+ * struct qaic_manage_trans_passthrough - Defines a passthrough transaction.
+ * @hdr: In. Header to identify this transaction.
+ * @data: In. Payload of this ransaction. Opaque to the driver. Userspace must
+ * encode in little endian and align/pad to 64-bit.
+ */
+struct qaic_manage_trans_passthrough {
+ struct qaic_manage_trans_hdr hdr;
+ __u8 data[];
+};
+
+/**
+ * struct qaic_manage_trans_dma_xfer - Defines a DMA transfer transaction.
+ * @hdr: In. Header to identify this transaction.
+ * @tag: In. Identified this transfer in other transactions. Opaque to the
+ * driver.
+ * @pad: Structure padding.
+ * @addr: In. Address of the data to DMA to the device.
+ * @size: In. Length of the data to DMA to the device.
+ */
+struct qaic_manage_trans_dma_xfer {
+ struct qaic_manage_trans_hdr hdr;
+ __u32 tag;
+ __u32 pad;
+ __u64 addr;
+ __u64 size;
+};
+
+/**
+ * struct qaic_manage_trans_activate_to_dev - Defines an activate request.
+ * @hdr: In. Header to identify this transaction.
+ * @queue_size: In. Number of elements for DBC request and response queues.
+ * @eventfd: Unused.
+ * @options: In. Device specific options for this activate.
+ * @pad: Structure padding. Must be 0.
+ */
+struct qaic_manage_trans_activate_to_dev {
+ struct qaic_manage_trans_hdr hdr;
+ __u32 queue_size;
+ __u32 eventfd;
+ __u32 options;
+ __u32 pad;
+};
+
+/**
+ * struct qaic_manage_trans_activate_from_dev - Defines an activate response.
+ * @hdr: Out. Header to identify this transaction.
+ * @status: Out. Return code of the request from the device.
+ * @dbc_id: Out. Id of the assigned DBC for successful request.
+ * @options: Out. Device specific options for this activate.
+ */
+struct qaic_manage_trans_activate_from_dev {
+ struct qaic_manage_trans_hdr hdr;
+ __u32 status;
+ __u32 dbc_id;
+ __u64 options;
+};
+
+/**
+ * struct qaic_manage_trans_deactivate - Defines a deactivate request.
+ * @hdr: In. Header to identify this transaction.
+ * @dbc_id: In. Id of assigned DBC.
+ * @pad: Structure padding. Must be 0.
+ */
+struct qaic_manage_trans_deactivate {
+ struct qaic_manage_trans_hdr hdr;
+ __u32 dbc_id;
+ __u32 pad;
+};
+
+/**
+ * struct qaic_manage_trans_status_to_dev - Defines a status request.
+ * @hdr: In. Header to identify this transaction.
+ */
+struct qaic_manage_trans_status_to_dev {
+ struct qaic_manage_trans_hdr hdr;
+};
+
+/**
+ * struct qaic_manage_trans_status_from_dev - Defines a status response.
+ * @hdr: Out. Header to identify this transaction.
+ * @major: Out. NNC protocol version major number.
+ * @minor: Out. NNC protocol version minor number.
+ * @status: Out. Return code from device.
+ * @status_flags: Out. Flags from device. Bit 0 indicates if CRCs are required.
+ */
+struct qaic_manage_trans_status_from_dev {
+ struct qaic_manage_trans_hdr hdr;
+ __u16 major;
+ __u16 minor;
+ __u32 status;
+ __u64 status_flags;
+};
+
+/**
+ * struct qaic_manage_msg - Defines a message to the device.
+ * @len: In. Length of all the transactions contained within this message.
+ * @count: In. Number of transactions in this message.
+ * @data: In. Address to an array where the transactions can be found.
+ */
+struct qaic_manage_msg {
+ __u32 len;
+ __u32 count;
+ __u64 data;
+};
+
+/**
+ * struct qaic_create_bo - Defines a request to create a buffer object.
+ * @size: In. Size of the buffer in bytes.
+ * @handle: Out. GEM handle for the BO.
+ * @pad: Structure padding. Must be 0.
+ */
+struct qaic_create_bo {
+ __u64 size;
+ __u32 handle;
+ __u32 pad;
+};
+
+/**
+ * struct qaic_mmap_bo - Defines a request to prepare a BO for mmap().
+ * @handle: In. Handle of the GEM BO to prepare for mmap().
+ * @pad: Structure padding. Must be 0.
+ * @offset: Out. Offset value to provide to mmap().
+ */
+struct qaic_mmap_bo {
+ __u32 handle;
+ __u32 pad;
+ __u64 offset;
+};
+
+/**
+ * struct qaic_sem - Defines a semaphore command for a BO slice.
+ * @val: In. Only lower 12 bits are valid.
+ * @index: In. Only lower 5 bits are valid.
+ * @presync: In. 1 if presync operation, 0 if postsync.
+ * @cmd: In. One of QAIC_SEM_*.
+ * @flags: In. Bitfield. See QAIC_SEM_INSYNCFENCE and QAIC_SEM_OUTSYNCFENCE
+ * @pad: Structure padding. Must be 0.
+ */
+struct qaic_sem {
+ __u16 val;
+ __u8 index;
+ __u8 presync;
+ __u8 cmd;
+ __u8 flags;
+ __u16 pad;
+};
+
+/**
+ * struct qaic_attach_slice_entry - Defines a single BO slice.
+ * @size: In. Size of this slice in bytes.
+ * @sem0: In. Semaphore command 0. Must be 0 is not valid.
+ * @sem1: In. Semaphore command 1. Must be 0 is not valid.
+ * @sem2: In. Semaphore command 2. Must be 0 is not valid.
+ * @sem3: In. Semaphore command 3. Must be 0 is not valid.
+ * @dev_addr: In. Device address this slice pushes to or pulls from.
+ * @db_addr: In. Address of the doorbell to ring.
+ * @db_data: In. Data to write to the doorbell.
+ * @db_len: In. Size of the doorbell data in bits - 32, 16, or 8. 0 is for
+ * inactive doorbells.
+ * @offset: In. Start of this slice as an offset from the start of the BO.
+ */
+struct qaic_attach_slice_entry {
+ __u64 size;
+ struct qaic_sem sem0;
+ struct qaic_sem sem1;
+ struct qaic_sem sem2;
+ struct qaic_sem sem3;
+ __u64 dev_addr;
+ __u64 db_addr;
+ __u32 db_data;
+ __u32 db_len;
+ __u64 offset;
+};
+
+/**
+ * struct qaic_attach_slice_hdr - Defines metadata for a set of BO slices.
+ * @count: In. Number of slices for this BO.
+ * @dbc_id: In. Associate the sliced BO with this DBC.
+ * @handle: In. GEM handle of the BO to slice.
+ * @dir: In. Direction of data flow. 1 = DMA_TO_DEVICE, 2 = DMA_FROM_DEVICE
+ * @size: Deprecated. This value is ignored and size of @handle is used instead.
+ */
+struct qaic_attach_slice_hdr {
+ __u32 count;
+ __u32 dbc_id;
+ __u32 handle;
+ __u32 dir;
+ __u64 size;
+};
+
+/**
+ * struct qaic_attach_slice - Defines a set of BO slices.
+ * @hdr: In. Metadata of the set of slices.
+ * @data: In. Pointer to an array containing the slice definitions.
+ */
+struct qaic_attach_slice {
+ struct qaic_attach_slice_hdr hdr;
+ __u64 data;
+};
+
+/**
+ * struct qaic_execute_entry - Defines a BO to submit to the device.
+ * @handle: In. GEM handle of the BO to commit to the device.
+ * @dir: In. Direction of data. 1 = to device, 2 = from device.
+ */
+struct qaic_execute_entry {
+ __u32 handle;
+ __u32 dir;
+};
+
+/**
+ * struct qaic_partial_execute_entry - Defines a BO to resize and submit.
+ * @handle: In. GEM handle of the BO to commit to the device.
+ * @dir: In. Direction of data. 1 = to device, 2 = from device.
+ * @resize: In. New size of the BO. Must be <= the original BO size.
+ * @resize as 0 would be interpreted as no DMA transfer is
+ * involved.
+ */
+struct qaic_partial_execute_entry {
+ __u32 handle;
+ __u32 dir;
+ __u64 resize;
+};
+
+/**
+ * struct qaic_execute_hdr - Defines metadata for BO submission.
+ * @count: In. Number of BOs to submit.
+ * @dbc_id: In. DBC to submit the BOs on.
+ */
+struct qaic_execute_hdr {
+ __u32 count;
+ __u32 dbc_id;
+};
+
+/**
+ * struct qaic_execute - Defines a list of BOs to submit to the device.
+ * @hdr: In. BO list metadata.
+ * @data: In. Pointer to an array of BOs to submit.
+ */
+struct qaic_execute {
+ struct qaic_execute_hdr hdr;
+ __u64 data;
+};
+
+/**
+ * struct qaic_wait - Defines a blocking wait for BO execution.
+ * @handle: In. GEM handle of the BO to wait on.
+ * @timeout: In. Maximum time in ms to wait for the BO.
+ * @dbc_id: In. DBC the BO is submitted to.
+ * @pad: Structure padding. Must be 0.
+ */
+struct qaic_wait {
+ __u32 handle;
+ __u32 timeout;
+ __u32 dbc_id;
+ __u32 pad;
+};
+
+/**
+ * struct qaic_perf_stats_hdr - Defines metadata for getting BO perf info.
+ * @count: In. Number of BOs requested.
+ * @pad: Structure padding. Must be 0.
+ * @dbc_id: In. DBC the BO are associated with.
+ */
+struct qaic_perf_stats_hdr {
+ __u16 count;
+ __u16 pad;
+ __u32 dbc_id;
+};
+
+/**
+ * struct qaic_perf_stats - Defines a request for getting BO perf info.
+ * @hdr: In. Request metadata
+ * @data: In. Pointer to array of stats structures that will receive the data.
+ */
+struct qaic_perf_stats {
+ struct qaic_perf_stats_hdr hdr;
+ __u64 data;
+};
+
+/**
+ * struct qaic_perf_stats_entry - Defines a BO perf info.
+ * @handle: In. GEM handle of the BO to get perf stats for.
+ * @queue_level_before: Out. Number of elements in the queue before this BO
+ * was submitted.
+ * @num_queue_element: Out. Number of elements added to the queue to submit
+ * this BO.
+ * @submit_latency_us: Out. Time taken by the driver to submit this BO.
+ * @device_latency_us: Out. Time taken by the device to execute this BO.
+ * @pad: Structure padding. Must be 0.
+ */
+struct qaic_perf_stats_entry {
+ __u32 handle;
+ __u32 queue_level_before;
+ __u32 num_queue_element;
+ __u32 submit_latency_us;
+ __u32 device_latency_us;
+ __u32 pad;
+};
+
+/**
+ * struct qaic_detach_slice - Detaches slicing configuration from BO.
+ * @handle: In. GEM handle of the BO to detach slicing configuration.
+ * @pad: Structure padding. Must be 0.
+ */
+struct qaic_detach_slice {
+ __u32 handle;
+ __u32 pad;
+};
+
+#define DRM_QAIC_MANAGE 0x00
+#define DRM_QAIC_CREATE_BO 0x01
+#define DRM_QAIC_MMAP_BO 0x02
+#define DRM_QAIC_ATTACH_SLICE_BO 0x03
+#define DRM_QAIC_EXECUTE_BO 0x04
+#define DRM_QAIC_PARTIAL_EXECUTE_BO 0x05
+#define DRM_QAIC_WAIT_BO 0x06
+#define DRM_QAIC_PERF_STATS_BO 0x07
+#define DRM_QAIC_DETACH_SLICE_BO 0x08
+
+#define DRM_IOCTL_QAIC_MANAGE DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MANAGE, struct qaic_manage_msg)
+#define DRM_IOCTL_QAIC_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_CREATE_BO, struct qaic_create_bo)
+#define DRM_IOCTL_QAIC_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MMAP_BO, struct qaic_mmap_bo)
+#define DRM_IOCTL_QAIC_ATTACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_ATTACH_SLICE_BO, struct qaic_attach_slice)
+#define DRM_IOCTL_QAIC_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_EXECUTE_BO, struct qaic_execute)
+#define DRM_IOCTL_QAIC_PARTIAL_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_PARTIAL_EXECUTE_BO, struct qaic_execute)
+#define DRM_IOCTL_QAIC_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_WAIT_BO, struct qaic_wait)
+#define DRM_IOCTL_QAIC_PERF_STATS_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_PERF_STATS_BO, struct qaic_perf_stats)
+#define DRM_IOCTL_QAIC_DETACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_DETACH_SLICE_BO, struct qaic_detach_slice)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* QAIC_ACCEL_H_ */
diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h
deleted file mode 100644
index 690e9c62f510..000000000000
--- a/include/uapi/drm/r128_drm.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
- * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
- */
-/*
- * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- * Kevin E. Martin <martin@valinux.com>
- */
-
-#ifndef __R128_DRM_H__
-#define __R128_DRM_H__
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the X server file (r128_sarea.h)
- */
-#ifndef __R128_SAREA_DEFINES__
-#define __R128_SAREA_DEFINES__
-
-/* What needs to be changed for the current vertex buffer?
- */
-#define R128_UPLOAD_CONTEXT 0x001
-#define R128_UPLOAD_SETUP 0x002
-#define R128_UPLOAD_TEX0 0x004
-#define R128_UPLOAD_TEX1 0x008
-#define R128_UPLOAD_TEX0IMAGES 0x010
-#define R128_UPLOAD_TEX1IMAGES 0x020
-#define R128_UPLOAD_CORE 0x040
-#define R128_UPLOAD_MASKS 0x080
-#define R128_UPLOAD_WINDOW 0x100
-#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
-#define R128_REQUIRE_QUIESCENCE 0x400
-#define R128_UPLOAD_ALL 0x7ff
-
-#define R128_FRONT 0x1
-#define R128_BACK 0x2
-#define R128_DEPTH 0x4
-
-/* Primitive types
- */
-#define R128_POINTS 0x1
-#define R128_LINES 0x2
-#define R128_LINE_STRIP 0x3
-#define R128_TRIANGLES 0x4
-#define R128_TRIANGLE_FAN 0x5
-#define R128_TRIANGLE_STRIP 0x6
-
-/* Vertex/indirect buffer size
- */
-#define R128_BUFFER_SIZE 16384
-
-/* Byte offsets for indirect buffer data
- */
-#define R128_INDEX_PRIM_OFFSET 20
-#define R128_HOSTDATA_BLIT_OFFSET 32
-
-/* Keep these small for testing.
- */
-#define R128_NR_SAREA_CLIPRECTS 12
-
-/* There are 2 heaps (local/AGP). Each region within a heap is a
- * minimum of 64k, and there are at most 64 of them per heap.
- */
-#define R128_LOCAL_TEX_HEAP 0
-#define R128_AGP_TEX_HEAP 1
-#define R128_NR_TEX_HEAPS 2
-#define R128_NR_TEX_REGIONS 64
-#define R128_LOG_TEX_GRANULARITY 16
-
-#define R128_NR_CONTEXT_REGS 12
-
-#define R128_MAX_TEXTURE_LEVELS 11
-#define R128_MAX_TEXTURE_UNITS 2
-
-#endif /* __R128_SAREA_DEFINES__ */
-
-typedef struct {
- /* Context state - can be written in one large chunk */
- unsigned int dst_pitch_offset_c;
- unsigned int dp_gui_master_cntl_c;
- unsigned int sc_top_left_c;
- unsigned int sc_bottom_right_c;
- unsigned int z_offset_c;
- unsigned int z_pitch_c;
- unsigned int z_sten_cntl_c;
- unsigned int tex_cntl_c;
- unsigned int misc_3d_state_cntl_reg;
- unsigned int texture_clr_cmp_clr_c;
- unsigned int texture_clr_cmp_msk_c;
- unsigned int fog_color_c;
-
- /* Texture state */
- unsigned int tex_size_pitch_c;
- unsigned int constant_color_c;
-
- /* Setup state */
- unsigned int pm4_vc_fpu_setup;
- unsigned int setup_cntl;
-
- /* Mask state */
- unsigned int dp_write_mask;
- unsigned int sten_ref_mask_c;
- unsigned int plane_3d_mask_c;
-
- /* Window state */
- unsigned int window_xy_offset;
-
- /* Core state */
- unsigned int scale_3d_cntl;
-} drm_r128_context_regs_t;
-
-/* Setup registers for each texture unit
- */
-typedef struct {
- unsigned int tex_cntl;
- unsigned int tex_combine_cntl;
- unsigned int tex_size_pitch;
- unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
- unsigned int tex_border_color;
-} drm_r128_texture_regs_t;
-
-typedef struct drm_r128_sarea {
- /* The channel for communication of state information to the kernel
- * on firing a vertex buffer.
- */
- drm_r128_context_regs_t context_state;
- drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
- unsigned int dirty;
- unsigned int vertsize;
- unsigned int vc_format;
-
- /* The current cliprects, or a subset thereof.
- */
- struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
- unsigned int nbox;
-
- /* Counters for client-side throttling of rendering clients.
- */
- unsigned int last_frame;
- unsigned int last_dispatch;
-
- struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
- unsigned int tex_age[R128_NR_TEX_HEAPS];
- int ctx_owner;
- int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
- int pfCurrentPage; /* which buffer is being displayed? */
-} drm_r128_sarea_t;
-
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (xf86drmR128.h)
- */
-
-/* Rage 128 specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_R128_INIT 0x00
-#define DRM_R128_CCE_START 0x01
-#define DRM_R128_CCE_STOP 0x02
-#define DRM_R128_CCE_RESET 0x03
-#define DRM_R128_CCE_IDLE 0x04
-/* 0x05 not used */
-#define DRM_R128_RESET 0x06
-#define DRM_R128_SWAP 0x07
-#define DRM_R128_CLEAR 0x08
-#define DRM_R128_VERTEX 0x09
-#define DRM_R128_INDICES 0x0a
-#define DRM_R128_BLIT 0x0b
-#define DRM_R128_DEPTH 0x0c
-#define DRM_R128_STIPPLE 0x0d
-/* 0x0e not used */
-#define DRM_R128_INDIRECT 0x0f
-#define DRM_R128_FULLSCREEN 0x10
-#define DRM_R128_CLEAR2 0x11
-#define DRM_R128_GETPARAM 0x12
-#define DRM_R128_FLIP 0x13
-
-#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
-#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
-#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
-#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
-#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
-/* 0x05 not used */
-#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
-#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
-#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
-#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
-#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
-#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
-#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
-#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
-/* 0x0e not used */
-#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
-#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
-#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
-#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
-#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
-
-typedef struct drm_r128_init {
- enum {
- R128_INIT_CCE = 0x01,
- R128_CLEANUP_CCE = 0x02
- } func;
- unsigned long sarea_priv_offset;
- int is_pci;
- int cce_mode;
- int cce_secure;
- int ring_size;
- int usec_timeout;
-
- unsigned int fb_bpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_bpp;
- unsigned int depth_offset, depth_pitch;
- unsigned int span_offset;
-
- unsigned long fb_offset;
- unsigned long mmio_offset;
- unsigned long ring_offset;
- unsigned long ring_rptr_offset;
- unsigned long buffers_offset;
- unsigned long agp_textures_offset;
-} drm_r128_init_t;
-
-typedef struct drm_r128_cce_stop {
- int flush;
- int idle;
-} drm_r128_cce_stop_t;
-
-typedef struct drm_r128_clear {
- unsigned int flags;
- unsigned int clear_color;
- unsigned int clear_depth;
- unsigned int color_mask;
- unsigned int depth_mask;
-} drm_r128_clear_t;
-
-typedef struct drm_r128_vertex {
- int prim;
- int idx; /* Index of vertex buffer */
- int count; /* Number of vertices in buffer */
- int discard; /* Client finished with buffer? */
-} drm_r128_vertex_t;
-
-typedef struct drm_r128_indices {
- int prim;
- int idx;
- int start;
- int end;
- int discard; /* Client finished with buffer? */
-} drm_r128_indices_t;
-
-typedef struct drm_r128_blit {
- int idx;
- int pitch;
- int offset;
- int format;
- unsigned short x, y;
- unsigned short width, height;
-} drm_r128_blit_t;
-
-typedef struct drm_r128_depth {
- enum {
- R128_WRITE_SPAN = 0x01,
- R128_WRITE_PIXELS = 0x02,
- R128_READ_SPAN = 0x03,
- R128_READ_PIXELS = 0x04
- } func;
- int n;
- int __user *x;
- int __user *y;
- unsigned int __user *buffer;
- unsigned char __user *mask;
-} drm_r128_depth_t;
-
-typedef struct drm_r128_stipple {
- unsigned int __user *mask;
-} drm_r128_stipple_t;
-
-typedef struct drm_r128_indirect {
- int idx;
- int start;
- int end;
- int discard;
-} drm_r128_indirect_t;
-
-typedef struct drm_r128_fullscreen {
- enum {
- R128_INIT_FULLSCREEN = 0x01,
- R128_CLEANUP_FULLSCREEN = 0x02
- } func;
-} drm_r128_fullscreen_t;
-
-/* 2.3: An ioctl to get parameters that aren't available to the 3d
- * client any other way.
- */
-#define R128_PARAM_IRQ_NR 1
-
-typedef struct drm_r128_getparam {
- int param;
- void __user *value;
-} drm_r128_getparam_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif
diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h
deleted file mode 100644
index 0f6eddef74aa..000000000000
--- a/include/uapi/drm/savage_drm.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/* savage_drm.h -- Public header for the savage driver
- *
- * Copyright 2004 Felix Kuehling
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __SAVAGE_DRM_H__
-#define __SAVAGE_DRM_H__
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#ifndef __SAVAGE_SAREA_DEFINES__
-#define __SAVAGE_SAREA_DEFINES__
-
-/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
- * regions, subject to a minimum region size of (1<<16) == 64k.
- *
- * Clients may subdivide regions internally, but when sharing between
- * clients, the region size is the minimum granularity.
- */
-
-#define SAVAGE_CARD_HEAP 0
-#define SAVAGE_AGP_HEAP 1
-#define SAVAGE_NR_TEX_HEAPS 2
-#define SAVAGE_NR_TEX_REGIONS 16
-#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
-
-#endif /* __SAVAGE_SAREA_DEFINES__ */
-
-typedef struct _drm_savage_sarea {
- /* LRU lists for texture memory in agp space and on the card.
- */
- struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
- 1];
- unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
-
- /* Mechanism to validate card state.
- */
- int ctxOwner;
-} drm_savage_sarea_t, *drm_savage_sarea_ptr;
-
-/* Savage-specific ioctls
- */
-#define DRM_SAVAGE_BCI_INIT 0x00
-#define DRM_SAVAGE_BCI_CMDBUF 0x01
-#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
-#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
-
-#define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
-#define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
-#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
-#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
-
-#define SAVAGE_DMA_PCI 1
-#define SAVAGE_DMA_AGP 3
-typedef struct drm_savage_init {
- enum {
- SAVAGE_INIT_BCI = 1,
- SAVAGE_CLEANUP_BCI = 2
- } func;
- unsigned int sarea_priv_offset;
-
- /* some parameters */
- unsigned int cob_size;
- unsigned int bci_threshold_lo, bci_threshold_hi;
- unsigned int dma_type;
-
- /* frame buffer layout */
- unsigned int fb_bpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_bpp;
- unsigned int depth_offset, depth_pitch;
-
- /* local textures */
- unsigned int texture_offset;
- unsigned int texture_size;
-
- /* physical locations of non-permanent maps */
- unsigned long status_offset;
- unsigned long buffers_offset;
- unsigned long agp_textures_offset;
- unsigned long cmd_dma_offset;
-} drm_savage_init_t;
-
-typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
-typedef struct drm_savage_cmdbuf {
- /* command buffer in client's address space */
- drm_savage_cmd_header_t __user *cmd_addr;
- unsigned int size; /* size of the command buffer in 64bit units */
-
- unsigned int dma_idx; /* DMA buffer index to use */
- int discard; /* discard DMA buffer when done */
- /* vertex buffer in client's address space */
- unsigned int __user *vb_addr;
- unsigned int vb_size; /* size of client vertex buffer in bytes */
- unsigned int vb_stride; /* stride of vertices in 32bit words */
- /* boxes in client's address space */
- struct drm_clip_rect __user *box_addr;
- unsigned int nbox; /* number of clipping boxes */
-} drm_savage_cmdbuf_t;
-
-#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
-#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
-#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
-typedef struct drm_savage_event {
- unsigned int count;
- unsigned int flags;
-} drm_savage_event_emit_t, drm_savage_event_wait_t;
-
-/* Commands for the cmdbuf ioctl
- */
-#define SAVAGE_CMD_STATE 0 /* a range of state registers */
-#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
-#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
-#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
-#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
-#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
-#define SAVAGE_CMD_SWAP 6 /* swap buffers */
-
-/* Primitive types
-*/
-#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
-#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
-#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
-#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
- * shading on s3d */
-
-/* Skip flags (vertex format)
- */
-#define SAVAGE_SKIP_Z 0x01
-#define SAVAGE_SKIP_W 0x02
-#define SAVAGE_SKIP_C0 0x04
-#define SAVAGE_SKIP_C1 0x08
-#define SAVAGE_SKIP_S0 0x10
-#define SAVAGE_SKIP_T0 0x20
-#define SAVAGE_SKIP_ST0 0x30
-#define SAVAGE_SKIP_S1 0x40
-#define SAVAGE_SKIP_T1 0x80
-#define SAVAGE_SKIP_ST1 0xc0
-#define SAVAGE_SKIP_ALL_S3D 0x3f
-#define SAVAGE_SKIP_ALL_S4 0xff
-
-/* Buffer names for clear command
- */
-#define SAVAGE_FRONT 0x1
-#define SAVAGE_BACK 0x2
-#define SAVAGE_DEPTH 0x4
-
-/* 64-bit command header
- */
-union drm_savage_cmd_header {
- struct {
- unsigned char cmd; /* command */
- unsigned char pad0;
- unsigned short pad1;
- unsigned short pad2;
- unsigned short pad3;
- } cmd; /* generic */
- struct {
- unsigned char cmd;
- unsigned char global; /* need idle engine? */
- unsigned short count; /* number of consecutive registers */
- unsigned short start; /* first register */
- unsigned short pad3;
- } state; /* SAVAGE_CMD_STATE */
- struct {
- unsigned char cmd;
- unsigned char prim; /* primitive type */
- unsigned short skip; /* vertex format (skip flags) */
- unsigned short count; /* number of vertices */
- unsigned short start; /* first vertex in DMA/vertex buffer */
- } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
- struct {
- unsigned char cmd;
- unsigned char prim;
- unsigned short skip;
- unsigned short count; /* number of indices that follow */
- unsigned short pad3;
- } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
- struct {
- unsigned char cmd;
- unsigned char pad0;
- unsigned short pad1;
- unsigned int flags;
- } clear0; /* SAVAGE_CMD_CLEAR */
- struct {
- unsigned int mask;
- unsigned int value;
- } clear1; /* SAVAGE_CMD_CLEAR data */
-};
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif
diff --git a/include/uapi/drm/sis_drm.h b/include/uapi/drm/sis_drm.h
deleted file mode 100644
index 3e3f7e989e0b..000000000000
--- a/include/uapi/drm/sis_drm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
-/*
- * Copyright 2005 Eric Anholt
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __SIS_DRM_H__
-#define __SIS_DRM_H__
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* SiS specific ioctls */
-#define NOT_USED_0_3
-#define DRM_SIS_FB_ALLOC 0x04
-#define DRM_SIS_FB_FREE 0x05
-#define NOT_USED_6_12
-#define DRM_SIS_AGP_INIT 0x13
-#define DRM_SIS_AGP_ALLOC 0x14
-#define DRM_SIS_AGP_FREE 0x15
-#define DRM_SIS_FB_INIT 0x16
-
-#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
-#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
-#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
-#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
-#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
-#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
-/*
-#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
-#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
-#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
-*/
-
-typedef struct {
- int context;
- unsigned long offset;
- unsigned long size;
- unsigned long free;
-} drm_sis_mem_t;
-
-typedef struct {
- unsigned long offset, size;
-} drm_sis_agp_t;
-
-typedef struct {
- unsigned long offset, size;
-} drm_sis_fb_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* __SIS_DRM_H__ */
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index c4df3c3668b3..94cfc306d50a 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -1,24 +1,5 @@
-/*
- * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
+/* Copyright (c) 2012-2020 NVIDIA Corporation */
#ifndef _UAPI_TEGRA_DRM_H_
#define _UAPI_TEGRA_DRM_H_
@@ -29,6 +10,8 @@
extern "C" {
#endif
+/* Tegra DRM legacy UAPI. Only enabled with STAGING */
+
#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
@@ -649,8 +632,8 @@ struct drm_tegra_gem_get_flags {
#define DRM_TEGRA_SYNCPT_READ 0x02
#define DRM_TEGRA_SYNCPT_INCR 0x03
#define DRM_TEGRA_SYNCPT_WAIT 0x04
-#define DRM_TEGRA_OPEN_CHANNEL 0x05
-#define DRM_TEGRA_CLOSE_CHANNEL 0x06
+#define DRM_TEGRA_OPEN_CHANNEL 0x05
+#define DRM_TEGRA_CLOSE_CHANNEL 0x06
#define DRM_TEGRA_GET_SYNCPT 0x07
#define DRM_TEGRA_SUBMIT 0x08
#define DRM_TEGRA_GET_SYNCPT_BASE 0x09
@@ -674,6 +657,402 @@ struct drm_tegra_gem_get_flags {
#define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
#define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
+/* New Tegra DRM UAPI */
+
+/*
+ * Reported by the driver in the `capabilities` field.
+ *
+ * DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT: If set, the engine is cache coherent
+ * with regard to the system memory.
+ */
+#define DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT (1 << 0)
+
+struct drm_tegra_channel_open {
+ /**
+ * @host1x_class: [in]
+ *
+ * Host1x class of the engine that will be programmed using this
+ * channel.
+ */
+ __u32 host1x_class;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * @context: [out]
+ *
+ * Opaque identifier corresponding to the opened channel.
+ */
+ __u32 context;
+
+ /**
+ * @version: [out]
+ *
+ * Version of the engine hardware. This can be used by userspace
+ * to determine how the engine needs to be programmed.
+ */
+ __u32 version;
+
+ /**
+ * @capabilities: [out]
+ *
+ * Flags describing the hardware capabilities.
+ */
+ __u32 capabilities;
+ __u32 padding;
+};
+
+struct drm_tegra_channel_close {
+ /**
+ * @context: [in]
+ *
+ * Identifier of the channel to close.
+ */
+ __u32 context;
+ __u32 padding;
+};
+
+/*
+ * Mapping flags that can be used to influence how the mapping is created.
+ *
+ * DRM_TEGRA_CHANNEL_MAP_READ: create mapping that allows HW read access
+ * DRM_TEGRA_CHANNEL_MAP_WRITE: create mapping that allows HW write access
+ */
+#define DRM_TEGRA_CHANNEL_MAP_READ (1 << 0)
+#define DRM_TEGRA_CHANNEL_MAP_WRITE (1 << 1)
+#define DRM_TEGRA_CHANNEL_MAP_READ_WRITE (DRM_TEGRA_CHANNEL_MAP_READ | \
+ DRM_TEGRA_CHANNEL_MAP_WRITE)
+
+struct drm_tegra_channel_map {
+ /**
+ * @context: [in]
+ *
+ * Identifier of the channel to which make memory available for.
+ */
+ __u32 context;
+
+ /**
+ * @handle: [in]
+ *
+ * GEM handle of the memory to map.
+ */
+ __u32 handle;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * @mapping: [out]
+ *
+ * Identifier corresponding to the mapping, to be used for
+ * relocations or unmapping later.
+ */
+ __u32 mapping;
+};
+
+struct drm_tegra_channel_unmap {
+ /**
+ * @context: [in]
+ *
+ * Channel identifier of the channel to unmap memory from.
+ */
+ __u32 context;
+
+ /**
+ * @mapping: [in]
+ *
+ * Mapping identifier of the memory mapping to unmap.
+ */
+ __u32 mapping;
+};
+
+/* Submission */
+
+/**
+ * Specify that bit 39 of the patched-in address should be set to switch
+ * swizzling between Tegra and non-Tegra sector layout on systems that store
+ * surfaces in system memory in non-Tegra sector layout.
+ */
+#define DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT (1 << 0)
+
+struct drm_tegra_submit_buf {
+ /**
+ * @mapping: [in]
+ *
+ * Identifier of the mapping to use in the submission.
+ */
+ __u32 mapping;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * Information for relocation patching.
+ */
+ struct {
+ /**
+ * @target_offset: [in]
+ *
+ * Offset from the start of the mapping of the data whose
+ * address is to be patched into the gather.
+ */
+ __u64 target_offset;
+
+ /**
+ * @gather_offset_words: [in]
+ *
+ * Offset in words from the start of the gather data to
+ * where the address should be patched into.
+ */
+ __u32 gather_offset_words;
+
+ /**
+ * @shift: [in]
+ *
+ * Number of bits the address should be shifted right before
+ * patching in.
+ */
+ __u32 shift;
+ } reloc;
+};
+
+/**
+ * Execute `words` words of Host1x opcodes specified in the `gather_data_ptr`
+ * buffer. Each GATHER_UPTR command uses successive words from the buffer.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR 0
+/**
+ * Wait for a syncpoint to reach a value before continuing with further
+ * commands.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT 1
+/**
+ * Wait for a syncpoint to reach a value before continuing with further
+ * commands. The threshold is calculated relative to the start of the job.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE 2
+
+struct drm_tegra_submit_cmd_gather_uptr {
+ __u32 words;
+ __u32 reserved[3];
+};
+
+struct drm_tegra_submit_cmd_wait_syncpt {
+ __u32 id;
+ __u32 value;
+ __u32 reserved[2];
+};
+
+struct drm_tegra_submit_cmd {
+ /**
+ * @type: [in]
+ *
+ * Command type to execute. One of the DRM_TEGRA_SUBMIT_CMD*
+ * defines.
+ */
+ __u32 type;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ union {
+ struct drm_tegra_submit_cmd_gather_uptr gather_uptr;
+ struct drm_tegra_submit_cmd_wait_syncpt wait_syncpt;
+ __u32 reserved[4];
+ };
+};
+
+struct drm_tegra_submit_syncpt {
+ /**
+ * @id: [in]
+ *
+ * ID of the syncpoint that the job will increment.
+ */
+ __u32 id;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * @increments: [in]
+ *
+ * Number of times the job will increment this syncpoint.
+ */
+ __u32 increments;
+
+ /**
+ * @value: [out]
+ *
+ * Value the syncpoint will have once the job has completed all
+ * its specified syncpoint increments.
+ *
+ * Note that the kernel may increment the syncpoint before or after
+ * the job. These increments are not reflected in this field.
+ *
+ * If the job hangs or times out, not all of the increments may
+ * get executed.
+ */
+ __u32 value;
+};
+
+struct drm_tegra_channel_submit {
+ /**
+ * @context: [in]
+ *
+ * Identifier of the channel to submit this job to.
+ */
+ __u32 context;
+
+ /**
+ * @num_bufs: [in]
+ *
+ * Number of elements in the `bufs_ptr` array.
+ */
+ __u32 num_bufs;
+
+ /**
+ * @num_cmds: [in]
+ *
+ * Number of elements in the `cmds_ptr` array.
+ */
+ __u32 num_cmds;
+
+ /**
+ * @gather_data_words: [in]
+ *
+ * Number of 32-bit words in the `gather_data_ptr` array.
+ */
+ __u32 gather_data_words;
+
+ /**
+ * @bufs_ptr: [in]
+ *
+ * Pointer to an array of drm_tegra_submit_buf structures.
+ */
+ __u64 bufs_ptr;
+
+ /**
+ * @cmds_ptr: [in]
+ *
+ * Pointer to an array of drm_tegra_submit_cmd structures.
+ */
+ __u64 cmds_ptr;
+
+ /**
+ * @gather_data_ptr: [in]
+ *
+ * Pointer to an array of Host1x opcodes to be used by GATHER_UPTR
+ * commands.
+ */
+ __u64 gather_data_ptr;
+
+ /**
+ * @syncobj_in: [in]
+ *
+ * Handle for DRM syncobj that will be waited before submission.
+ * Ignored if zero.
+ */
+ __u32 syncobj_in;
+
+ /**
+ * @syncobj_out: [in]
+ *
+ * Handle for DRM syncobj that will have its fence replaced with
+ * the job's completion fence. Ignored if zero.
+ */
+ __u32 syncobj_out;
+
+ /**
+ * @syncpt_incr: [in,out]
+ *
+ * Information about the syncpoint the job will increment.
+ */
+ struct drm_tegra_submit_syncpt syncpt;
+};
+
+struct drm_tegra_syncpoint_allocate {
+ /**
+ * @id: [out]
+ *
+ * ID of allocated syncpoint.
+ */
+ __u32 id;
+ __u32 padding;
+};
+
+struct drm_tegra_syncpoint_free {
+ /**
+ * @id: [in]
+ *
+ * ID of syncpoint to free.
+ */
+ __u32 id;
+ __u32 padding;
+};
+
+struct drm_tegra_syncpoint_wait {
+ /**
+ * @timeout: [in]
+ *
+ * Absolute timestamp at which the wait will time out.
+ */
+ __s64 timeout_ns;
+
+ /**
+ * @id: [in]
+ *
+ * ID of syncpoint to wait on.
+ */
+ __u32 id;
+
+ /**
+ * @threshold: [in]
+ *
+ * Threshold to wait for.
+ */
+ __u32 threshold;
+
+ /**
+ * @value: [out]
+ *
+ * Value of the syncpoint upon wait completion.
+ */
+ __u32 value;
+
+ __u32 padding;
+};
+
+#define DRM_IOCTL_TEGRA_CHANNEL_OPEN DRM_IOWR(DRM_COMMAND_BASE + 0x10, struct drm_tegra_channel_open)
+#define DRM_IOCTL_TEGRA_CHANNEL_CLOSE DRM_IOWR(DRM_COMMAND_BASE + 0x11, struct drm_tegra_channel_close)
+#define DRM_IOCTL_TEGRA_CHANNEL_MAP DRM_IOWR(DRM_COMMAND_BASE + 0x12, struct drm_tegra_channel_map)
+#define DRM_IOCTL_TEGRA_CHANNEL_UNMAP DRM_IOWR(DRM_COMMAND_BASE + 0x13, struct drm_tegra_channel_unmap)
+#define DRM_IOCTL_TEGRA_CHANNEL_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + 0x14, struct drm_tegra_channel_submit)
+
+#define DRM_IOCTL_TEGRA_SYNCPOINT_ALLOCATE DRM_IOWR(DRM_COMMAND_BASE + 0x20, struct drm_tegra_syncpoint_allocate)
+#define DRM_IOCTL_TEGRA_SYNCPOINT_FREE DRM_IOWR(DRM_COMMAND_BASE + 0x21, struct drm_tegra_syncpoint_free)
+#define DRM_IOCTL_TEGRA_SYNCPOINT_WAIT DRM_IOWR(DRM_COMMAND_BASE + 0x22, struct drm_tegra_syncpoint_wait)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 1ce746e228d9..dce1835eced4 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -38,6 +38,10 @@ extern "C" {
#define DRM_V3D_GET_BO_OFFSET 0x05
#define DRM_V3D_SUBMIT_TFU 0x06
#define DRM_V3D_SUBMIT_CSD 0x07
+#define DRM_V3D_PERFMON_CREATE 0x08
+#define DRM_V3D_PERFMON_DESTROY 0x09
+#define DRM_V3D_PERFMON_GET_VALUES 0x0a
+#define DRM_V3D_SUBMIT_CPU 0x0b
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -47,8 +51,83 @@ extern "C" {
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
#define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd)
+#define DRM_IOCTL_V3D_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_CREATE, \
+ struct drm_v3d_perfmon_create)
+#define DRM_IOCTL_V3D_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_DESTROY, \
+ struct drm_v3d_perfmon_destroy)
+#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
+ struct drm_v3d_perfmon_get_values)
+#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
+#define DRM_V3D_SUBMIT_EXTENSION 0x02
+
+/* struct drm_v3d_extension - ioctl extensions
+ *
+ * Linked-list of generic extensions where the id identify which struct is
+ * pointed by ext_data. Therefore, DRM_V3D_EXT_ID_* is used on id to identify
+ * the extension type.
+ */
+struct drm_v3d_extension {
+ __u64 next;
+ __u32 id;
+#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02
+#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03
+#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04
+#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05
+#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06
+#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07
+ __u32 flags; /* mbz */
+};
+
+/* struct drm_v3d_sem - wait/signal semaphore
+ *
+ * If binary semaphore, it only takes syncobj handle and ignores flags and
+ * point fields. Point is defined for timeline syncobj feature.
+ */
+struct drm_v3d_sem {
+ __u32 handle; /* syncobj */
+ /* rsv below, for future uses */
+ __u32 flags;
+ __u64 point; /* for timeline sem support */
+ __u64 mbz[2]; /* must be zero, rsv */
+};
+
+/* Enum for each of the V3D queues. */
+enum v3d_queue {
+ V3D_BIN,
+ V3D_RENDER,
+ V3D_TFU,
+ V3D_CSD,
+ V3D_CACHE_CLEAN,
+ V3D_CPU,
+};
+
+/**
+ * struct drm_v3d_multi_sync - ioctl extension to add support multiples
+ * syncobjs for commands submission.
+ *
+ * When an extension of DRM_V3D_EXT_ID_MULTI_SYNC id is defined, it points to
+ * this extension to define wait and signal dependencies, instead of single
+ * in/out sync entries on submitting commands. The field flags is used to
+ * determine the stage to set wait dependencies.
+ */
+struct drm_v3d_multi_sync {
+ struct drm_v3d_extension base;
+ /* Array of wait and signal semaphores */
+ __u64 in_syncs;
+ __u64 out_syncs;
+
+ /* Number of entries */
+ __u32 in_sync_count;
+ __u32 out_sync_count;
+
+ /* set the stage (v3d_queue) to sync */
+ __u32 wait_stage;
+
+ __u32 pad; /* mbz */
+};
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -126,7 +205,16 @@ struct drm_v3d_submit_cl {
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
+ /* DRM_V3D_SUBMIT_* properties */
__u32 flags;
+
+ /* ID of the perfmon to attach to this job. 0 means no perfmon. */
+ __u32 perfmon_id;
+
+ __u32 pad;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
};
/**
@@ -195,6 +283,9 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_TFU,
DRM_V3D_PARAM_SUPPORTS_CSD,
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
+ DRM_V3D_PARAM_SUPPORTS_PERFMON,
+ DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
+ DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
};
struct drm_v3d_get_param {
@@ -233,6 +324,16 @@ struct drm_v3d_submit_tfu {
__u32 in_sync;
/* Sync object to signal when the TFU job is done. */
__u32 out_sync;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+
+ struct {
+ __u32 ioc;
+ __u32 pad;
+ } v71;
};
/* Submits a compute shader for dispatch. This job will block on any
@@ -258,6 +359,362 @@ struct drm_v3d_submit_csd {
__u32 in_sync;
/* Sync object to signal when the CSD job is done. */
__u32 out_sync;
+
+ /* ID of the perfmon to attach to this job. 0 means no perfmon. */
+ __u32 perfmon_id;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+
+ __u32 flags;
+
+ __u32 pad;
+};
+
+/**
+ * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an
+ * indirect CSD
+ *
+ * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it
+ * points to this extension to define a indirect CSD submission. It creates a
+ * CPU job linked to a CSD job. The CPU job waits for the indirect CSD
+ * dependencies and, once they are signaled, it updates the CSD job config
+ * before allowing the CSD job execution.
+ */
+struct drm_v3d_indirect_csd {
+ struct drm_v3d_extension base;
+
+ /* Indirect CSD */
+ struct drm_v3d_submit_csd submit;
+
+ /* Handle of the indirect BO, that should be also attached to the
+ * indirect CSD.
+ */
+ __u32 indirect;
+
+ /* Offset within the BO where the workgroup counts are stored */
+ __u32 offset;
+
+ /* Workgroups size */
+ __u32 wg_size;
+
+ /* Indices of the uniforms with the workgroup dispatch counts
+ * in the uniform stream. If the uniform rewrite is not needed,
+ * the offset must be 0xffffffff.
+ */
+ __u32 wg_uniform_offsets[3];
+};
+
+/**
+ * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate
+ * a timestamp query
+ *
+ * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to
+ * this extension to define a timestamp query submission. This CPU job will
+ * calculate the timestamp query and update the query value within the
+ * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate
+ * query availability.
+ */
+struct drm_v3d_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* mbz */
+ __u32 pad;
+};
+
+/**
+ * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to
+ * reset timestamp queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a reset timestamp submission. This CPU
+ * job will reset the timestamp queries based on value offset of the first
+ * query. Moreover, it will reset the timestamp syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Offset of the first query within the timestamp BO for its value */
+ __u32 offset;
+
+ /* Number of queries */
+ __u32 count;
+};
+
+/**
+ * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy
+ * query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a copy timestamp query submission. This
+ * CPU job will copy the timestamp queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+};
+
+/**
+ * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to
+ * reset performance queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a reset performance submission. This CPU
+ * job will reset the performance queries by resetting the values of the
+ * performance monitors. Moreover, it will reset the syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+/**
+ * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy
+ * performance query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a copy performance query submission. This
+ * CPU job will copy the performance queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Number of performance counters related to this query pool */
+ __u32 ncounters;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+struct drm_v3d_submit_cpu {
+ /* Pointer to a u32 array of the BOs that are referenced by the job.
+ *
+ * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO,
+ * that contains the workgroup counts.
+ *
+ * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO,
+ * that will contain the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only
+ * one BO, that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two
+ * BOs. The first is the BO where the timestamp queries will be written
+ * to. The second is the BO that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no
+ * BOs.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one
+ * BO, where the performance queries will be written.
+ */
+ __u64 bo_handles;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+};
+
+enum {
+ V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
+ V3D_PERFCNT_FEP_VALID_PRIMS,
+ V3D_PERFCNT_FEP_EZ_NFCLIP_QUADS,
+ V3D_PERFCNT_FEP_VALID_QUADS,
+ V3D_PERFCNT_TLB_QUADS_STENCIL_FAIL,
+ V3D_PERFCNT_TLB_QUADS_STENCILZ_FAIL,
+ V3D_PERFCNT_TLB_QUADS_STENCILZ_PASS,
+ V3D_PERFCNT_TLB_QUADS_ZERO_COV,
+ V3D_PERFCNT_TLB_QUADS_NONZERO_COV,
+ V3D_PERFCNT_TLB_QUADS_WRITTEN,
+ V3D_PERFCNT_PTB_PRIM_VIEWPOINT_DISCARD,
+ V3D_PERFCNT_PTB_PRIM_CLIP,
+ V3D_PERFCNT_PTB_PRIM_REV,
+ V3D_PERFCNT_QPU_IDLE_CYCLES,
+ V3D_PERFCNT_QPU_ACTIVE_CYCLES_VERTEX_COORD_USER,
+ V3D_PERFCNT_QPU_ACTIVE_CYCLES_FRAG,
+ V3D_PERFCNT_QPU_CYCLES_VALID_INSTR,
+ V3D_PERFCNT_QPU_CYCLES_TMU_STALL,
+ V3D_PERFCNT_QPU_CYCLES_SCOREBOARD_STALL,
+ V3D_PERFCNT_QPU_CYCLES_VARYINGS_STALL,
+ V3D_PERFCNT_QPU_IC_HIT,
+ V3D_PERFCNT_QPU_IC_MISS,
+ V3D_PERFCNT_QPU_UC_HIT,
+ V3D_PERFCNT_QPU_UC_MISS,
+ V3D_PERFCNT_TMU_TCACHE_ACCESS,
+ V3D_PERFCNT_TMU_TCACHE_MISS,
+ V3D_PERFCNT_VPM_VDW_STALL,
+ V3D_PERFCNT_VPM_VCD_STALL,
+ V3D_PERFCNT_BIN_ACTIVE,
+ V3D_PERFCNT_RDR_ACTIVE,
+ V3D_PERFCNT_L2T_HITS,
+ V3D_PERFCNT_L2T_MISSES,
+ V3D_PERFCNT_CYCLE_COUNT,
+ V3D_PERFCNT_QPU_CYCLES_STALLED_VERTEX_COORD_USER,
+ V3D_PERFCNT_QPU_CYCLES_STALLED_FRAGMENT,
+ V3D_PERFCNT_PTB_PRIMS_BINNED,
+ V3D_PERFCNT_AXI_WRITES_WATCH_0,
+ V3D_PERFCNT_AXI_READS_WATCH_0,
+ V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_0,
+ V3D_PERFCNT_AXI_READ_STALLS_WATCH_0,
+ V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_0,
+ V3D_PERFCNT_AXI_READ_BYTES_WATCH_0,
+ V3D_PERFCNT_AXI_WRITES_WATCH_1,
+ V3D_PERFCNT_AXI_READS_WATCH_1,
+ V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_1,
+ V3D_PERFCNT_AXI_READ_STALLS_WATCH_1,
+ V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_1,
+ V3D_PERFCNT_AXI_READ_BYTES_WATCH_1,
+ V3D_PERFCNT_TLB_PARTIAL_QUADS,
+ V3D_PERFCNT_TMU_CONFIG_ACCESSES,
+ V3D_PERFCNT_L2T_NO_ID_STALL,
+ V3D_PERFCNT_L2T_COM_QUE_STALL,
+ V3D_PERFCNT_L2T_TMU_WRITES,
+ V3D_PERFCNT_TMU_ACTIVE_CYCLES,
+ V3D_PERFCNT_TMU_STALLED_CYCLES,
+ V3D_PERFCNT_CLE_ACTIVE,
+ V3D_PERFCNT_L2T_TMU_READS,
+ V3D_PERFCNT_L2T_CLE_READS,
+ V3D_PERFCNT_L2T_VCD_READS,
+ V3D_PERFCNT_L2T_TMUCFG_READS,
+ V3D_PERFCNT_L2T_SLC0_READS,
+ V3D_PERFCNT_L2T_SLC1_READS,
+ V3D_PERFCNT_L2T_SLC2_READS,
+ V3D_PERFCNT_L2T_TMU_W_MISSES,
+ V3D_PERFCNT_L2T_TMU_R_MISSES,
+ V3D_PERFCNT_L2T_CLE_MISSES,
+ V3D_PERFCNT_L2T_VCD_MISSES,
+ V3D_PERFCNT_L2T_TMUCFG_MISSES,
+ V3D_PERFCNT_L2T_SLC0_MISSES,
+ V3D_PERFCNT_L2T_SLC1_MISSES,
+ V3D_PERFCNT_L2T_SLC2_MISSES,
+ V3D_PERFCNT_CORE_MEM_WRITES,
+ V3D_PERFCNT_L2T_MEM_WRITES,
+ V3D_PERFCNT_PTB_MEM_WRITES,
+ V3D_PERFCNT_TLB_MEM_WRITES,
+ V3D_PERFCNT_CORE_MEM_READS,
+ V3D_PERFCNT_L2T_MEM_READS,
+ V3D_PERFCNT_PTB_MEM_READS,
+ V3D_PERFCNT_PSE_MEM_READS,
+ V3D_PERFCNT_TLB_MEM_READS,
+ V3D_PERFCNT_GMP_MEM_READS,
+ V3D_PERFCNT_PTB_W_MEM_WORDS,
+ V3D_PERFCNT_TLB_W_MEM_WORDS,
+ V3D_PERFCNT_PSE_R_MEM_WORDS,
+ V3D_PERFCNT_TLB_R_MEM_WORDS,
+ V3D_PERFCNT_TMU_MRU_HITS,
+ V3D_PERFCNT_COMPUTE_ACTIVE,
+ V3D_PERFCNT_NUM,
+};
+
+#define DRM_V3D_MAX_PERF_COUNTERS 32
+
+struct drm_v3d_perfmon_create {
+ __u32 id;
+ __u32 ncounters;
+ __u8 counters[DRM_V3D_MAX_PERF_COUNTERS];
+};
+
+struct drm_v3d_perfmon_destroy {
+ __u32 id;
+};
+
+/*
+ * Returns the values of the performance counters tracked by this
+ * perfmon (as an array of ncounters u64 values).
+ *
+ * No implicit synchronization is performed, so the user has to
+ * guarantee that any jobs using this perfmon have already been
+ * completed (probably by blocking on the seqno returned by the
+ * last exec that used the perfmon).
+ */
+struct drm_v3d_perfmon_get_values {
+ __u32 id;
+ __u32 pad;
+ __u64 values_ptr;
};
#if defined(__cplusplus)
diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h
deleted file mode 100644
index a1e125d42208..000000000000
--- a/include/uapi/drm/via_drm.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef _VIA_DRM_H_
-#define _VIA_DRM_H_
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* WARNING: These defines must be the same as what the Xserver uses.
- * if you change them, you must change the defines in the Xserver.
- */
-
-#ifndef _VIA_DEFINES_
-#define _VIA_DEFINES_
-
-
-#define VIA_NR_SAREA_CLIPRECTS 8
-#define VIA_NR_XVMC_PORTS 10
-#define VIA_NR_XVMC_LOCKS 5
-#define VIA_MAX_CACHELINE_SIZE 64
-#define XVMCLOCKPTR(saPriv,lockNo) \
- ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
- (VIA_MAX_CACHELINE_SIZE - 1)) & \
- ~(VIA_MAX_CACHELINE_SIZE - 1)) + \
- VIA_MAX_CACHELINE_SIZE*(lockNo)))
-
-/* Each region is a minimum of 64k, and there are at most 64 of them.
- */
-#define VIA_NR_TEX_REGIONS 64
-#define VIA_LOG_MIN_TEX_REGION_SIZE 16
-#endif
-
-#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
-#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
-#define VIA_UPLOAD_CTX 0x4
-#define VIA_UPLOAD_BUFFERS 0x8
-#define VIA_UPLOAD_TEX0 0x10
-#define VIA_UPLOAD_TEX1 0x20
-#define VIA_UPLOAD_CLIPRECTS 0x40
-#define VIA_UPLOAD_ALL 0xff
-
-/* VIA specific ioctls */
-#define DRM_VIA_ALLOCMEM 0x00
-#define DRM_VIA_FREEMEM 0x01
-#define DRM_VIA_AGP_INIT 0x02
-#define DRM_VIA_FB_INIT 0x03
-#define DRM_VIA_MAP_INIT 0x04
-#define DRM_VIA_DEC_FUTEX 0x05
-#define NOT_USED
-#define DRM_VIA_DMA_INIT 0x07
-#define DRM_VIA_CMDBUFFER 0x08
-#define DRM_VIA_FLUSH 0x09
-#define DRM_VIA_PCICMD 0x0a
-#define DRM_VIA_CMDBUF_SIZE 0x0b
-#define NOT_USED
-#define DRM_VIA_WAIT_IRQ 0x0d
-#define DRM_VIA_DMA_BLIT 0x0e
-#define DRM_VIA_BLIT_SYNC 0x0f
-
-#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
-#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
-#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
-#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
-#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
-#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
-#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
-#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
-#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
-#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
-#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
- drm_via_cmdbuf_size_t)
-#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
-#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
-#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
-
-/* Indices into buf.Setup where various bits of state are mirrored per
- * context and per buffer. These can be fired at the card as a unit,
- * or in a piecewise fashion as required.
- */
-
-#define VIA_TEX_SETUP_SIZE 8
-
-/* Flags for clear ioctl
- */
-#define VIA_FRONT 0x1
-#define VIA_BACK 0x2
-#define VIA_DEPTH 0x4
-#define VIA_STENCIL 0x8
-#define VIA_MEM_VIDEO 0 /* matches drm constant */
-#define VIA_MEM_AGP 1 /* matches drm constant */
-#define VIA_MEM_SYSTEM 2
-#define VIA_MEM_MIXED 3
-#define VIA_MEM_UNKNOWN 4
-
-typedef struct {
- __u32 offset;
- __u32 size;
-} drm_via_agp_t;
-
-typedef struct {
- __u32 offset;
- __u32 size;
-} drm_via_fb_t;
-
-typedef struct {
- __u32 context;
- __u32 type;
- __u32 size;
- unsigned long index;
- unsigned long offset;
-} drm_via_mem_t;
-
-typedef struct _drm_via_init {
- enum {
- VIA_INIT_MAP = 0x01,
- VIA_CLEANUP_MAP = 0x02
- } func;
-
- unsigned long sarea_priv_offset;
- unsigned long fb_offset;
- unsigned long mmio_offset;
- unsigned long agpAddr;
-} drm_via_init_t;
-
-typedef struct _drm_via_futex {
- enum {
- VIA_FUTEX_WAIT = 0x00,
- VIA_FUTEX_WAKE = 0X01
- } func;
- __u32 ms;
- __u32 lock;
- __u32 val;
-} drm_via_futex_t;
-
-typedef struct _drm_via_dma_init {
- enum {
- VIA_INIT_DMA = 0x01,
- VIA_CLEANUP_DMA = 0x02,
- VIA_DMA_INITIALIZED = 0x03
- } func;
-
- unsigned long offset;
- unsigned long size;
- unsigned long reg_pause_addr;
-} drm_via_dma_init_t;
-
-typedef struct _drm_via_cmdbuffer {
- char __user *buf;
- unsigned long size;
-} drm_via_cmdbuffer_t;
-
-/* Warning: If you change the SAREA structure you must change the Xserver
- * structure as well */
-
-typedef struct _drm_via_tex_region {
- unsigned char next, prev; /* indices to form a circular LRU */
- unsigned char inUse; /* owned by a client, or free? */
- int age; /* tracked by clients to update local LRU's */
-} drm_via_tex_region_t;
-
-typedef struct _drm_via_sarea {
- unsigned int dirty;
- unsigned int nbox;
- struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
- drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
- int texAge; /* last time texture was uploaded */
- int ctxOwner; /* last context to upload state */
- int vertexPrim;
-
- /*
- * Below is for XvMC.
- * We want the lock integers alone on, and aligned to, a cache line.
- * Therefore this somewhat strange construct.
- */
-
- char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
-
- unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
- unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
- unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
-
- /* Used by the 3d driver only at this point, for pageflipping:
- */
- unsigned int pfCurrentOffset;
-} drm_via_sarea_t;
-
-typedef struct _drm_via_cmdbuf_size {
- enum {
- VIA_CMDBUF_SPACE = 0x01,
- VIA_CMDBUF_LAG = 0x02
- } func;
- int wait;
- __u32 size;
-} drm_via_cmdbuf_size_t;
-
-typedef enum {
- VIA_IRQ_ABSOLUTE = 0x0,
- VIA_IRQ_RELATIVE = 0x1,
- VIA_IRQ_SIGNAL = 0x10000000,
- VIA_IRQ_FORCE_SEQUENCE = 0x20000000
-} via_irq_seq_type_t;
-
-#define VIA_IRQ_FLAGS_MASK 0xF0000000
-
-enum drm_via_irqs {
- drm_via_irq_hqv0 = 0,
- drm_via_irq_hqv1,
- drm_via_irq_dma0_dd,
- drm_via_irq_dma0_td,
- drm_via_irq_dma1_dd,
- drm_via_irq_dma1_td,
- drm_via_irq_num
-};
-
-struct drm_via_wait_irq_request {
- unsigned irq;
- via_irq_seq_type_t type;
- __u32 sequence;
- __u32 signal;
-};
-
-typedef union drm_via_irqwait {
- struct drm_via_wait_irq_request request;
- struct drm_wait_vblank_reply reply;
-} drm_via_irqwait_t;
-
-typedef struct drm_via_blitsync {
- __u32 sync_handle;
- unsigned engine;
-} drm_via_blitsync_t;
-
-/* - * Below,"flags" is currently unused but will be used for possible future
- * extensions like kernel space bounce buffers for bad alignments and
- * blit engine busy-wait polling for better latency in the absence of
- * interrupts.
- */
-
-typedef struct drm_via_dmablit {
- __u32 num_lines;
- __u32 line_length;
-
- __u32 fb_addr;
- __u32 fb_stride;
-
- unsigned char *mem_addr;
- __u32 mem_stride;
-
- __u32 flags;
- int to_fb;
-
- drm_via_blitsync_t sync;
-} drm_via_dmablit_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* _VIA_DRM_H_ */
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index f06a789f34cd..c2ce71987e9b 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -46,12 +46,16 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
+#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_RING_IDX 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ VIRTGPU_EXECBUF_RING_IDX |\
0)
struct drm_virtgpu_map {
@@ -60,6 +64,17 @@ struct drm_virtgpu_map {
__u32 pad;
};
+#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
+#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
+ VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
+ 0)
+struct drm_virtgpu_execbuffer_syncobj {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+};
+
+/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
__u32 size;
@@ -67,10 +82,22 @@ struct drm_virtgpu_execbuffer {
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+ __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
+ __u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
+ __u32 num_in_syncobjs;
+ __u32 num_out_syncobjs;
+ __u64 in_syncobjs;
+ __u64 out_syncobjs;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
+#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
+#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
+#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
struct drm_virtgpu_getparam {
__u64 param;
@@ -100,7 +127,7 @@ struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
- __u32 stride;
+ __u32 blob_mem;
};
struct drm_virtgpu_3d_box {
@@ -117,6 +144,8 @@ struct drm_virtgpu_3d_transfer_to_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
@@ -124,6 +153,8 @@ struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@@ -140,6 +171,55 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST 0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob_mem */
+ __u32 blob_mem;
+ __u32 blob_flags;
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u64 size;
+
+ /*
+ * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+ * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+ */
+ __u32 pad;
+ __u32 cmd_size;
+ __u64 cmd;
+ __u64 blob_id;
+};
+
+#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
+#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
+#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
+struct drm_virtgpu_context_set_param {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_virtgpu_context_init {
+ __u32 num_params;
+ __u32 pad;
+
+ /* pointer to drm_virtgpu_context_set_param array */
+ __u64 ctx_set_params;
+};
+
+/*
+ * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
+ * effect. The event size is sizeof(drm_event), since there is no additional
+ * payload.
+ */
+#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -175,6 +255,14 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
+ struct drm_virtgpu_resource_create_blob)
+
+#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
+ struct drm_virtgpu_context_init)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 02e917507479..7d786a0cc835 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -72,6 +73,9 @@ extern "C" {
#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
#define DRM_VMW_GB_SURFACE_REF_EXT 28
#define DRM_VMW_MSG 29
+#define DRM_VMW_MKSSTAT_RESET 30
+#define DRM_VMW_MKSSTAT_ADD 31
+#define DRM_VMW_MKSSTAT_REMOVE 32
/*************************************************************************/
/**
@@ -89,6 +93,12 @@ extern "C" {
*
* DRM_VMW_PARAM_SM5
* SM5 support is enabled.
+ *
+ * DRM_VMW_PARAM_GL43
+ * SM5.1+GL4.3 support is enabled.
+ *
+ * DRM_VMW_PARAM_DEVICE_ID
+ * PCI ID of the underlying SVGA device.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -107,6 +117,8 @@ extern "C" {
#define DRM_VMW_PARAM_HW_CAPS2 13
#define DRM_VMW_PARAM_SM4_1 14
#define DRM_VMW_PARAM_SM5 15
+#define DRM_VMW_PARAM_GL43 16
+#define DRM_VMW_PARAM_DEVICE_ID 17
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -891,7 +903,8 @@ struct drm_vmw_shader_arg {
/**
* enum drm_vmw_surface_flags
*
- * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
+ * @drm_vmw_surface_flag_shareable: Deprecated - all userspace surfaces are
+ * shareable.
* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
* surface.
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
@@ -1236,6 +1249,44 @@ struct drm_vmw_msg_arg {
__u32 receive_len;
};
+/**
+ * struct drm_vmw_mksstat_add_arg
+ *
+ * @stat: Pointer to user-space stat-counters array, page-aligned.
+ * @info: Pointer to user-space counter-infos array, page-aligned.
+ * @strs: Pointer to user-space stat strings, page-aligned.
+ * @stat_len: Length in bytes of stat-counters array.
+ * @info_len: Length in bytes of counter-infos array.
+ * @strs_len: Length in bytes of the stat strings, terminators included.
+ * @description: Pointer to instance descriptor string; will be truncated
+ * to MKS_GUEST_STAT_INSTANCE_DESC_LENGTH chars.
+ * @id: Output identifier of the produced record; -1 if error.
+ *
+ * Argument to the DRM_VMW_MKSSTAT_ADD ioctl.
+ */
+struct drm_vmw_mksstat_add_arg {
+ __u64 stat;
+ __u64 info;
+ __u64 strs;
+ __u64 stat_len;
+ __u64 info_len;
+ __u64 strs_len;
+ __u64 description;
+ __u64 id;
+};
+
+/**
+ * struct drm_vmw_mksstat_remove_arg
+ *
+ * @id: Identifier of the record being disposed, originally obtained through
+ * DRM_VMW_MKSSTAT_ADD ioctl.
+ *
+ * Argument to the DRM_VMW_MKSSTAT_REMOVE ioctl.
+ */
+struct drm_vmw_mksstat_remove_arg {
+ __u64 id;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
new file mode 100644
index 000000000000..538a3ac95c54
--- /dev/null
+++ b/include/uapi/drm/xe_drm.h
@@ -0,0 +1,1358 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ * Sections in this file are organized as follows:
+ * 1. IOCTL definition
+ * 2. Extension definition and helper structs
+ * 3. IOCTL's Query structs in the order of the Query's entries.
+ * 4. The rest of IOCTL structs in the order of IOCTL declaration.
+ */
+
+/**
+ * DOC: Xe Device Block Diagram
+ *
+ * The diagram below represents a high-level simplification of a discrete
+ * GPU supported by the Xe driver. It shows some device components which
+ * are necessary to understand this API, as well as how their relations
+ * to each other. This diagram does not represent real hardware::
+ *
+ * ┌──────────────────────────────────────────────────────────────────┐
+ * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
+ * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │
+ * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │
+ * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │
+ * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
+ * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │
+ * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │
+ * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │
+ * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
+ * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
+ * └─────────────────────────────Device0───────┬──────────────────────┘
+ * │
+ * ───────────────────────┴────────── PCI bus
+ */
+
+/**
+ * DOC: Xe uAPI Overview
+ *
+ * This section aims to describe the Xe's IOCTL entries, its structs, and other
+ * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
+ * entries and usage.
+ *
+ * List of supported IOCTLs:
+ * - &DRM_IOCTL_XE_DEVICE_QUERY
+ * - &DRM_IOCTL_XE_GEM_CREATE
+ * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ * - &DRM_IOCTL_XE_VM_CREATE
+ * - &DRM_IOCTL_XE_VM_DESTROY
+ * - &DRM_IOCTL_XE_VM_BIND
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ * - &DRM_IOCTL_XE_EXEC
+ * - &DRM_IOCTL_XE_WAIT_USER_FENCE
+ */
+
+/*
+ * xe specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
+ */
+#define DRM_XE_DEVICE_QUERY 0x00
+#define DRM_XE_GEM_CREATE 0x01
+#define DRM_XE_GEM_MMAP_OFFSET 0x02
+#define DRM_XE_VM_CREATE 0x03
+#define DRM_XE_VM_DESTROY 0x04
+#define DRM_XE_VM_BIND 0x05
+#define DRM_XE_EXEC_QUEUE_CREATE 0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
+#define DRM_XE_EXEC 0x09
+#define DRM_XE_WAIT_USER_FENCE 0x0a
+/* Must be kept compact -- no holes */
+
+#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
+#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
+#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
+#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
+#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
+#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
+#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
+#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
+#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
+#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
+#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
+
+/**
+ * DOC: Xe IOCTL Extensions
+ *
+ * Before detailing the IOCTLs and its structs, it is important to highlight
+ * that every IOCTL in Xe is extensible.
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct drm_xe_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+*/
+
+/**
+ * struct drm_xe_user_extension - Base class for defining a chain of extensions
+ */
+struct drm_xe_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct drm_xe_user_extension, or zero if the end.
+ */
+ __u64 next_extension;
+
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct drm_xe_user_extension.
+ */
+ __u32 name;
+
+ /**
+ * @pad: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_xe_ext_set_property - Generic set property extension
+ *
+ * A generic struct that allows any of the Xe's IOCTL to be extended
+ * with a set_property operation.
+ */
+struct drm_xe_ext_set_property {
+ /** @base: base user extension */
+ struct drm_xe_user_extension base;
+
+ /** @property: property to set */
+ __u32 property;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_engine_class_instance - instance of an engine class
+ *
+ * It is returned as part of the @drm_xe_engine, but it also is used as
+ * the input of engine selection for both @drm_xe_exec_queue_create and
+ * @drm_xe_query_engine_cycles
+ *
+ * The @engine_class can be:
+ * - %DRM_XE_ENGINE_CLASS_RENDER
+ * - %DRM_XE_ENGINE_CLASS_COPY
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
+ * - %DRM_XE_ENGINE_CLASS_COMPUTE
+ * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
+ * hardware engine class). Used for creating ordered queues of VM
+ * bind operations.
+ */
+struct drm_xe_engine_class_instance {
+#define DRM_XE_ENGINE_CLASS_RENDER 0
+#define DRM_XE_ENGINE_CLASS_COPY 1
+#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
+#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
+#define DRM_XE_ENGINE_CLASS_COMPUTE 4
+#define DRM_XE_ENGINE_CLASS_VM_BIND 5
+ /** @engine_class: engine class id */
+ __u16 engine_class;
+ /** @engine_instance: engine instance id */
+ __u16 engine_instance;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad;
+};
+
+/**
+ * struct drm_xe_engine - describe hardware engine
+ */
+struct drm_xe_engine {
+ /** @instance: The @drm_xe_engine_class_instance */
+ struct drm_xe_engine_class_instance instance;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_query_engines - describe engines
+ *
+ * If a query is made with a struct @drm_xe_device_query where .query
+ * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
+ * struct @drm_xe_query_engines in .data.
+ */
+struct drm_xe_query_engines {
+ /** @num_engines: number of engines returned in @engines */
+ __u32 num_engines;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @engines: The returned engines for this device */
+ struct drm_xe_engine engines[];
+};
+
+/**
+ * enum drm_xe_memory_class - Supported memory classes.
+ */
+enum drm_xe_memory_class {
+ /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
+ DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
+ /**
+ * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
+ * represents the memory that is local to the device, which we
+ * call VRAM. Not valid on integrated platforms.
+ */
+ DRM_XE_MEM_REGION_CLASS_VRAM
+};
+
+/**
+ * struct drm_xe_mem_region - Describes some region as known to
+ * the driver.
+ */
+struct drm_xe_mem_region {
+ /**
+ * @mem_class: The memory class describing this region.
+ *
+ * See enum drm_xe_memory_class for supported values.
+ */
+ __u16 mem_class;
+ /**
+ * @instance: The unique ID for this region, which serves as the
+ * index in the placement bitmask used as argument for
+ * &DRM_IOCTL_XE_GEM_CREATE
+ */
+ __u16 instance;
+ /**
+ * @min_page_size: Min page-size in bytes for this region.
+ *
+ * When the kernel allocates memory for this region, the
+ * underlying pages will be at least @min_page_size in size.
+ * Buffer objects with an allowable placement in this region must be
+ * created with a size aligned to this value.
+ * GPU virtual address mappings of (parts of) buffer objects that
+ * may be placed in this region must also have their GPU virtual
+ * address and range aligned to this value.
+ * Affected IOCTLS will return %-EINVAL if alignment restrictions are
+ * not met.
+ */
+ __u32 min_page_size;
+ /**
+ * @total_size: The usable size in bytes for this region.
+ */
+ __u64 total_size;
+ /**
+ * @used: Estimate of the memory used in bytes for this region.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero.
+ */
+ __u64 used;
+ /**
+ * @cpu_visible_size: How much of this region can be CPU
+ * accessed, in bytes.
+ *
+ * This will always be <= @total_size, and the remainder (if
+ * any) will not be CPU accessible. If the CPU accessible part
+ * is smaller than @total_size then this is referred to as a
+ * small BAR system.
+ *
+ * On systems without small BAR (full BAR), the probed_size will
+ * always equal the @total_size, since all of it will be CPU
+ * accessible.
+ *
+ * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
+ * regions (for other types the value here will always equal
+ * zero).
+ */
+ __u64 cpu_visible_size;
+ /**
+ * @cpu_visible_used: Estimate of CPU visible memory used, in
+ * bytes.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero. Note this is only currently tracked for
+ * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
+ * here will always be zero).
+ */
+ __u64 cpu_visible_used;
+ /** @reserved: Reserved */
+ __u64 reserved[6];
+};
+
+/**
+ * struct drm_xe_query_mem_regions - describe memory regions
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
+ * struct drm_xe_query_mem_regions in .data.
+ */
+struct drm_xe_query_mem_regions {
+ /** @num_mem_regions: number of memory regions returned in @mem_regions */
+ __u32 num_mem_regions;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @mem_regions: The returned memory regions for this device */
+ struct drm_xe_mem_region mem_regions[];
+};
+
+/**
+ * struct drm_xe_query_config - describe the device configuration
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
+ * struct drm_xe_query_config in .data.
+ *
+ * The index in @info can be:
+ * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
+ * and the device revision (next 8 bits)
+ * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
+ * configuration, see list below
+ *
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
+ * has usable VRAM
+ * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
+ * required by this device, typically SZ_4K or SZ_64K
+ * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
+ * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
+ * available exec queue priority
+ */
+struct drm_xe_query_config {
+ /** @num_params: number of parameters returned in info */
+ __u32 num_params;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
+#define DRM_XE_QUERY_CONFIG_FLAGS 1
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
+#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
+#define DRM_XE_QUERY_CONFIG_VA_BITS 3
+#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
+ /** @info: array of elements containing the config info */
+ __u64 info[];
+};
+
+/**
+ * struct drm_xe_gt - describe an individual GT.
+ *
+ * To be used with drm_xe_query_gt_list, which will return a list with all the
+ * existing GT individual descriptions.
+ * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
+ * implementing graphics and/or media operations.
+ *
+ * The index in @type can be:
+ * - %DRM_XE_QUERY_GT_TYPE_MAIN
+ * - %DRM_XE_QUERY_GT_TYPE_MEDIA
+ */
+struct drm_xe_gt {
+#define DRM_XE_QUERY_GT_TYPE_MAIN 0
+#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
+ /** @type: GT type: Main or Media */
+ __u16 type;
+ /** @tile_id: Tile ID where this GT lives (Information only) */
+ __u16 tile_id;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad[3];
+ /** @reference_clock: A clock frequency for timestamp */
+ __u32 reference_clock;
+ /**
+ * @near_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are nearest to the current engines
+ * of this GT.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 near_mem_regions;
+ /**
+ * @far_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are far from the engines of this GT.
+ * In general, they have extra indirections when compared to the
+ * @near_mem_regions. For a discrete device this could mean system
+ * memory and memory living in a different tile.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 far_mem_regions;
+ /** @reserved: Reserved */
+ __u64 reserved[8];
+};
+
+/**
+ * struct drm_xe_query_gt_list - A list with GT description items.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
+ * drm_xe_query_gt_list in .data.
+ */
+struct drm_xe_query_gt_list {
+ /** @num_gt: number of GT items returned in gt_list */
+ __u32 num_gt;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @gt_list: The GT list returned for this device */
+ struct drm_xe_gt gt_list[];
+};
+
+/**
+ * struct drm_xe_query_topology_mask - describe the topology mask of a GT
+ *
+ * This is the hardware topology which reflects the internal physical
+ * structure of the GPU.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
+ * struct drm_xe_query_topology_mask in .data.
+ *
+ * The @type can be:
+ * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
+ * (DSS) available for geometry operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for geometry.
+ * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
+ * (DSS) available for compute operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_COMPUTE ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for compute.
+ * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
+ * available per Dual Sub Slices (DSS). For example a query response
+ * containing the following in mask:
+ * ``EU_PER_DSS ff ff 00 00 00 00 00 00``
+ * means each DSS has 16 EU.
+ */
+struct drm_xe_query_topology_mask {
+ /** @gt_id: GT ID the mask is associated with */
+ __u16 gt_id;
+
+#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
+#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
+#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
+ /** @type: type of mask */
+ __u16 type;
+
+ /** @num_bytes: number of bytes in requested mask */
+ __u32 num_bytes;
+
+ /** @mask: little-endian mask of @num_bytes */
+ __u8 mask[];
+};
+
+/**
+ * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
+ *
+ * If a query is made with a struct drm_xe_device_query where .query is equal to
+ * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
+ * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
+ * .data points to this allocated structure.
+ *
+ * The query returns the engine cycles, which along with GT's @reference_clock,
+ * can be used to calculate the engine timestamp. In addition the
+ * query returns a set of cpu timestamps that indicate when the command
+ * streamer cycle count was captured.
+ */
+struct drm_xe_query_engine_cycles {
+ /**
+ * @eci: This is input by the user and is the engine for which command
+ * streamer cycles is queried.
+ */
+ struct drm_xe_engine_class_instance eci;
+
+ /**
+ * @clockid: This is input by the user and is the reference clock id for
+ * CPU timestamp. For definition, see clock_gettime(2) and
+ * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
+ * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
+ */
+ __s32 clockid;
+
+ /** @width: Width of the engine cycle counter in bits. */
+ __u32 width;
+
+ /**
+ * @engine_cycles: Engine cycles as read from its register
+ * at 0x358 offset.
+ */
+ __u64 engine_cycles;
+
+ /**
+ * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
+ * reading the engine_cycles register using the reference clockid set by the
+ * user.
+ */
+ __u64 cpu_timestamp;
+
+ /**
+ * @cpu_delta: Time delta in ns captured around reading the lower dword
+ * of the engine_cycles register.
+ */
+ __u64 cpu_delta;
+};
+
+/**
+ * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version
+ *
+ * Given a uc_type this will return the branch, major, minor and patch version
+ * of the micro-controller firmware.
+ */
+struct drm_xe_query_uc_fw_version {
+ /** @uc_type: The micro-controller type to query firmware version */
+#define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
+ __u16 uc_type;
+
+ /** @pad: MBZ */
+ __u16 pad;
+
+ /** @branch_ver: branch uc fw version */
+ __u32 branch_ver;
+ /** @major_ver: major uc fw version */
+ __u32 major_ver;
+ /** @minor_ver: minor uc fw version */
+ __u32 minor_ver;
+ /** @patch_ver: patch uc fw version */
+ __u32 patch_ver;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved;
+};
+
+/**
+ * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
+ * structure to query device information
+ *
+ * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
+ * and sets the value in the query member. This determines the type of
+ * the structure provided by the driver in data, among struct drm_xe_query_*.
+ *
+ * The @query can be:
+ * - %DRM_XE_DEVICE_QUERY_ENGINES
+ * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
+ * - %DRM_XE_DEVICE_QUERY_CONFIG
+ * - %DRM_XE_DEVICE_QUERY_GT_LIST
+ * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
+ * configuration of the device such as information on slices, memory,
+ * caches, and so on. It is provided as a table of key / value
+ * attributes.
+ * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
+ * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
+ *
+ * If size is set to 0, the driver fills it with the required size for
+ * the requested type of data to query. If size is equal to the required
+ * size, the queried information is copied into data. If size is set to
+ * a value different from 0 and different from the required size, the
+ * IOCTL call returns -EINVAL.
+ *
+ * For example the following code snippet allows retrieving and printing
+ * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_query_engines *engines;
+ * struct drm_xe_device_query query = {
+ * .extensions = 0,
+ * .query = DRM_XE_DEVICE_QUERY_ENGINES,
+ * .size = 0,
+ * .data = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * engines = malloc(query.size);
+ * query.data = (uintptr_t)engines;
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * for (int i = 0; i < engines->num_engines; i++) {
+ * printf("Engine %d: %s\n", i,
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COPY ? "COPY":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
+ * "UNKNOWN");
+ * }
+ * free(engines);
+ */
+struct drm_xe_device_query {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_DEVICE_QUERY_ENGINES 0
+#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
+#define DRM_XE_DEVICE_QUERY_CONFIG 2
+#define DRM_XE_DEVICE_QUERY_GT_LIST 3
+#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
+#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
+#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
+#define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7
+ /** @query: The type of data to query */
+ __u32 query;
+
+ /** @size: Size of the queried data */
+ __u32 size;
+
+ /** @data: Queried data is placed here */
+ __u64 data;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
+ * gem creation
+ *
+ * The @flags can be:
+ * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
+ * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
+ * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
+ * possible placement, ensure that the corresponding VRAM allocation
+ * will always use the CPU accessible part of VRAM. This is important
+ * for small-bar systems (on full-bar systems this gets turned into a
+ * noop).
+ * Note1: System memory can be used as an extra placement if the kernel
+ * should spill the allocation to system memory, if space can't be made
+ * available in the CPU accessible part of VRAM (giving the same
+ * behaviour as the i915 interface, see
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
+ * Note2: For clear-color CCS surfaces the kernel needs to read the
+ * clear-color value stored in the buffer, and on discrete platforms we
+ * need to use VRAM for display surfaces, therefore the kernel requires
+ * setting this flag for such objects, otherwise an error is thrown on
+ * small-bar systems.
+ *
+ * @cpu_caching supports the following values:
+ * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
+ * caching. On iGPU this can't be used for scanout surfaces. Currently
+ * not allowed for objects placed in VRAM.
+ * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
+ * is uncached. Scanout surfaces should likely use this. All objects
+ * that can be placed in VRAM must use this.
+ */
+struct drm_xe_gem_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @size: Size of the object to be created, must match region
+ * (system or vram) minimum alignment (&min_page_size).
+ */
+ __u64 size;
+
+ /**
+ * @placement: A mask of memory instances of where BO can be placed.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u32 placement;
+
+#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
+#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
+#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
+ /**
+ * @flags: Flags, currently a mask of memory instances of where BO can
+ * be placed
+ */
+ __u32 flags;
+
+ /**
+ * @vm_id: Attached VM, if any
+ *
+ * If a VM is specified, this BO must:
+ *
+ * 1. Only ever be bound to that VM.
+ * 2. Cannot be exported as a PRIME fd.
+ */
+ __u32 vm_id;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+#define DRM_XE_GEM_CPU_CACHING_WB 1
+#define DRM_XE_GEM_CPU_CACHING_WC 2
+ /**
+ * @cpu_caching: The CPU caching mode to select for this object. If
+ * mmaping the object the mode selected here will also be used.
+ */
+ __u16 cpu_caching;
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ */
+struct drm_xe_gem_mmap_offset {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+
+ /** @flags: Must be zero */
+ __u32 flags;
+
+ /** @offset: The fake offset to use for subsequent mmap call */
+ __u64 offset;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
+ *
+ * The @flags can be:
+ * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
+ * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
+ * exec submissions to its exec_queues that don't have an upper time
+ * limit on the job execution time. But exec submissions to these
+ * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
+ * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
+ * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * LR VMs can be created in recoverable page-fault mode using
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
+ * If that flag is omitted, the UMD can not rely on the slightly
+ * different per-VM overcommit semantics that are enabled by
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
+ * still enable recoverable pagefaults if supported by the device.
+ * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
+ * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
+ * demand when accessed, and also allows per-VM overcommit of memory.
+ * The xe driver internally uses recoverable pagefaults to implement
+ * this.
+ */
+struct drm_xe_vm_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
+#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
+#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
+ /** @flags: Flags */
+ __u32 flags;
+
+ /** @vm_id: Returned VM ID */
+ __u32 vm_id;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
+ */
+struct drm_xe_vm_destroy {
+ /** @vm_id: VM ID */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_bind_op - run bind operations
+ *
+ * The @op can be:
+ * - %DRM_XE_VM_BIND_OP_MAP
+ * - %DRM_XE_VM_BIND_OP_UNMAP
+ * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
+ * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
+ * - %DRM_XE_VM_BIND_OP_PREFETCH
+ *
+ * and the @flags can be:
+ * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
+ * tables are setup with a special bit which indicates writes are
+ * dropped and all reads return zero. In the future, the NULL flags
+ * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
+ * handle MBZ, and the BO offset MBZ. This flag is intended to
+ * implement VK sparse bindings.
+ */
+struct drm_xe_vm_bind_op {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
+ */
+ __u32 obj;
+
+ /**
+ * @pat_index: The platform defined @pat_index to use for this mapping.
+ * The index basically maps to some predefined memory attributes,
+ * including things like caching, coherency, compression etc. The exact
+ * meaning of the pat_index is platform specific and defined in the
+ * Bspec and PRMs. When the KMD sets up the binding the index here is
+ * encoded into the ppGTT PTE.
+ *
+ * For coherency the @pat_index needs to be at least 1way coherent when
+ * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
+ * will extract the coherency mode from the @pat_index and reject if
+ * there is a mismatch (see note below for pre-MTL platforms).
+ *
+ * Note: On pre-MTL platforms there is only a caching mode and no
+ * explicit coherency mode, but on such hardware there is always a
+ * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
+ * CPU caches even with the caching mode set as uncached. It's only the
+ * display engine that is incoherent (on dgpu it must be in VRAM which
+ * is always mapped as WC on the CPU). However to keep the uapi somewhat
+ * consistent with newer platforms the KMD groups the different cache
+ * levels into the following coherency buckets on all pre-MTL platforms:
+ *
+ * ppGTT UC -> COH_NONE
+ * ppGTT WC -> COH_NONE
+ * ppGTT WT -> COH_NONE
+ * ppGTT WB -> COH_AT_LEAST_1WAY
+ *
+ * In practice UC/WC/WT should only ever used for scanout surfaces on
+ * such platforms (or perhaps in general for dma-buf if shared with
+ * another device) since it is only the display engine that is actually
+ * incoherent. Everything else should typically use WB given that we
+ * have a shared-LLC. On MTL+ this completely changes and the HW
+ * defines the coherency mode as part of the @pat_index, where
+ * incoherent GT access is possible.
+ *
+ * Note: For userptr and externally imported dma-buf the kernel expects
+ * either 1WAY or 2WAY for the @pat_index.
+ *
+ * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
+ * on the @pat_index. For such mappings there is no actual memory being
+ * mapped (the address in the PTE is invalid), so the various PAT memory
+ * attributes likely do not apply. Simply leaving as zero is one
+ * option (still a valid pat_index).
+ */
+ __u16 pat_index;
+
+ /** @pad: MBZ */
+ __u16 pad;
+
+ union {
+ /**
+ * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
+ * ignored for unbind
+ */
+ __u64 obj_offset;
+
+ /** @userptr: user pointer to bind on */
+ __u64 userptr;
+ };
+
+ /**
+ * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
+ */
+ __u64 range;
+
+ /** @addr: Address to operate on, MBZ for UNMAP_ALL */
+ __u64 addr;
+
+#define DRM_XE_VM_BIND_OP_MAP 0x0
+#define DRM_XE_VM_BIND_OP_UNMAP 0x1
+#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
+#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
+#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
+ /** @op: Bind operation to perform */
+ __u32 op;
+
+#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
+#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
+ /** @flags: Bind flags */
+ __u32 flags;
+
+ /**
+ * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
+ * It is a region instance, not a mask.
+ * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
+ */
+ __u32 prefetch_mem_region_instance;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
+ *
+ * Below is an example of a minimal use of @drm_xe_vm_bind to
+ * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
+ * illustrate `userptr`. It can be synchronized by using the example
+ * provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * data = aligned_alloc(ALIGNMENT, BO_SIZE);
+ * struct drm_xe_vm_bind bind = {
+ * .vm_id = vm,
+ * .num_binds = 1,
+ * .bind.obj = 0,
+ * .bind.obj_offset = to_user_pointer(data),
+ * .bind.range = BO_SIZE,
+ * .bind.addr = BIND_ADDRESS,
+ * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
+ * .bind.flags = 0,
+ * .num_syncs = 1,
+ * .syncs = &sync,
+ * .exec_queue_id = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
+ *
+ */
+struct drm_xe_vm_bind {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: The ID of the VM to bind to */
+ __u32 vm_id;
+
+ /**
+ * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
+ * and exec queue must have same vm_id. If zero, the default VM bind engine
+ * is used.
+ */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @num_binds: number of binds in this IOCTL */
+ __u32 num_binds;
+
+ union {
+ /** @bind: used if num_binds == 1 */
+ struct drm_xe_vm_bind_op bind;
+
+ /**
+ * @vector_of_binds: userptr to array of struct
+ * drm_xe_vm_bind_op if num_binds > 1
+ */
+ __u64 vector_of_binds;
+ };
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @num_syncs: amount of syncs to wait on */
+ __u32 num_syncs;
+
+ /** @syncs: pointer to struct drm_xe_sync array */
+ __u64 syncs;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ *
+ * The example below shows how to use @drm_xe_exec_queue_create to create
+ * a simple exec_queue (no parallel submission) of class
+ * &DRM_XE_ENGINE_CLASS_RENDER.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_engine_class_instance instance = {
+ * .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
+ * };
+ * struct drm_xe_exec_queue_create exec_queue_create = {
+ * .extensions = 0,
+ * .vm_id = vm,
+ * .num_bb_per_exec = 1,
+ * .num_eng_per_bb = 1,
+ * .instances = to_user_pointer(&instance),
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
+ *
+ */
+struct drm_xe_exec_queue_create {
+#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
+
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @width: submission width (number BB per exec) for this exec queue */
+ __u16 width;
+
+ /** @num_placements: number of valid placements for this exec queue */
+ __u16 num_placements;
+
+ /** @vm_id: VM to use for this exec queue */
+ __u32 vm_id;
+
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @exec_queue_id: Returned exec queue ID */
+ __u32 exec_queue_id;
+
+ /**
+ * @instances: user pointer to a 2-d array of struct
+ * drm_xe_engine_class_instance
+ *
+ * length = width (i) * num_placements (j)
+ * index = j + i * width
+ */
+ __u64 instances;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ */
+struct drm_xe_exec_queue_destroy {
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ *
+ * The @property can be:
+ * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
+ */
+struct drm_xe_exec_queue_get_property {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
+ /** @property: property to get */
+ __u32 property;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_sync - sync object
+ *
+ * The @type can be:
+ * - %DRM_XE_SYNC_TYPE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_USER_FENCE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_SYNC_FLAG_SIGNAL
+ *
+ * A minimal use of @drm_xe_sync looks like this:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_sync sync = {
+ * .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ * .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * };
+ * struct drm_syncobj_create syncobj_create = { 0 };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
+ * sync.handle = syncobj_create.handle;
+ * ...
+ * use of &sync in drm_xe_exec or drm_xe_vm_bind
+ * ...
+ * struct drm_syncobj_wait wait = {
+ * .handles = &sync.handle,
+ * .timeout_nsec = INT64_MAX,
+ * .count_handles = 1,
+ * .flags = 0,
+ * .first_signaled = 0,
+ * .pad = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
+ */
+struct drm_xe_sync {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
+#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
+#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
+ /** @type: Type of the this sync object */
+ __u32 type;
+
+#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
+ /** @flags: Sync Flags */
+ __u32 flags;
+
+ union {
+ /** @handle: Handle for the object */
+ __u32 handle;
+
+ /**
+ * @addr: Address of user fence. When sync is passed in via exec
+ * IOCTL this is a GPU address in the VM. When sync passed in via
+ * VM bind IOCTL this is a user pointer. In either case, it is
+ * the users responsibility that this address is present and
+ * mapped when the user fence is signalled. Must be qword
+ * aligned.
+ */
+ __u64 addr;
+ };
+
+ /**
+ * @timeline_value: Input for the timeline sync object. Needs to be
+ * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
+ *
+ * This is an example to use @drm_xe_exec for execution of the object
+ * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
+ * (see example in @drm_xe_exec_queue_create). It can be synchronized
+ * by using the example provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_exec exec = {
+ * .exec_queue_id = exec_queue,
+ * .syncs = &sync,
+ * .num_syncs = 1,
+ * .address = BIND_ADDRESS,
+ * .num_batch_buffer = 1,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
+ *
+ */
+struct drm_xe_exec {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID for the batch buffer */
+ __u32 exec_queue_id;
+
+ /** @num_syncs: Amount of struct drm_xe_sync in array. */
+ __u32 num_syncs;
+
+ /** @syncs: Pointer to struct drm_xe_sync array. */
+ __u64 syncs;
+
+ /**
+ * @address: address of batch buffer if num_batch_buffer == 1 or an
+ * array of batch buffer addresses
+ */
+ __u64 address;
+
+ /**
+ * @num_batch_buffer: number of batch buffer in this exec, must match
+ * the width of the engine
+ */
+ __u16 num_batch_buffer;
+
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
+ *
+ * Wait on user fence, XE will wake-up on every HW engine interrupt in the
+ * instances list and check if user fence is complete::
+ *
+ * (*addr & MASK) OP (VALUE & MASK)
+ *
+ * Returns to user on user fence completion or timeout.
+ *
+ * The @op can be:
+ * - %DRM_XE_UFENCE_WAIT_OP_EQ
+ * - %DRM_XE_UFENCE_WAIT_OP_NEQ
+ * - %DRM_XE_UFENCE_WAIT_OP_GT
+ * - %DRM_XE_UFENCE_WAIT_OP_GTE
+ * - %DRM_XE_UFENCE_WAIT_OP_LT
+ * - %DRM_XE_UFENCE_WAIT_OP_LTE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
+ * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
+ *
+ * The @mask values can be for example:
+ * - 0xffu for u8
+ * - 0xffffu for u16
+ * - 0xffffffffu for u32
+ * - 0xffffffffffffffffu for u64
+ */
+struct drm_xe_wait_user_fence {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @addr: user pointer address to wait on, must qword aligned
+ */
+ __u64 addr;
+
+#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
+#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
+#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
+#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
+#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
+#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
+ /** @op: wait operation (type of comparison) */
+ __u16 op;
+
+#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
+ /** @flags: wait flags */
+ __u16 flags;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: compare value */
+ __u64 value;
+
+ /** @mask: comparison mask */
+ __u64 mask;
+
+ /**
+ * @timeout: how long to wait before bailing, value in nanoseconds.
+ * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
+ * it contains timeout expressed in nanoseconds to wait (fence will
+ * expire at now() + timeout).
+ * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
+ * will end at timeout (uses system MONOTONIC_CLOCK).
+ * Passing negative timeout leads to neverending wait.
+ *
+ * On relative timeout this value is updated with timeout left
+ * (for restarting the call in case of signal delivery).
+ * On absolute timeout this value stays intact (restarted call still
+ * expire at the same point of time).
+ */
+ __s64 timeout;
+
+ /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
+ __u32 exec_queue_id;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_XE_DRM_H_ */
diff --git a/include/uapi/linux/acct.h b/include/uapi/linux/acct.h
index 985b89068591..0e591152aa8a 100644
--- a/include/uapi/linux/acct.h
+++ b/include/uapi/linux/acct.h
@@ -103,12 +103,13 @@ struct acct_v3
/*
* accounting flags
*/
- /* bit set when the process ... */
+ /* bit set when the process/task ... */
#define AFORK 0x01 /* ... executed fork, but did not exec */
#define ASU 0x02 /* ... used super-user privileges */
#define ACOMPAT 0x04 /* ... used compatibility mode (VAX only not used) */
#define ACORE 0x08 /* ... dumped core */
#define AXSIG 0x10 /* ... was killed by a signal */
+#define AGROUP 0x20 /* ... was the last task of the process (task group) */
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
#define ACCT_BYTEORDER 0x80 /* accounting file is big endian */
diff --git a/include/uapi/linux/acrn.h b/include/uapi/linux/acrn.h
new file mode 100644
index 000000000000..7b714c1902eb
--- /dev/null
+++ b/include/uapi/linux/acrn.h
@@ -0,0 +1,649 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace interface for /dev/acrn_hsm - ACRN Hypervisor Service Module
+ *
+ * This file can be used by applications that need to communicate with the HSM
+ * via the ioctl interface.
+ *
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _UAPI_ACRN_H
+#define _UAPI_ACRN_H
+
+#include <linux/types.h>
+
+#define ACRN_IO_REQUEST_MAX 16
+
+#define ACRN_IOREQ_STATE_PENDING 0
+#define ACRN_IOREQ_STATE_COMPLETE 1
+#define ACRN_IOREQ_STATE_PROCESSING 2
+#define ACRN_IOREQ_STATE_FREE 3
+
+#define ACRN_IOREQ_TYPE_PORTIO 0
+#define ACRN_IOREQ_TYPE_MMIO 1
+#define ACRN_IOREQ_TYPE_PCICFG 2
+
+#define ACRN_IOREQ_DIR_READ 0
+#define ACRN_IOREQ_DIR_WRITE 1
+
+/**
+ * struct acrn_mmio_request - Info of a MMIO I/O request
+ * @direction: Access direction of this request (ACRN_IOREQ_DIR_*)
+ * @reserved: Reserved for alignment and should be 0
+ * @address: Access address of this MMIO I/O request
+ * @size: Access size of this MMIO I/O request
+ * @value: Read/write value of this MMIO I/O request
+ */
+struct acrn_mmio_request {
+ __u32 direction;
+ __u32 reserved;
+ __u64 address;
+ __u64 size;
+ __u64 value;
+};
+
+/**
+ * struct acrn_pio_request - Info of a PIO I/O request
+ * @direction: Access direction of this request (ACRN_IOREQ_DIR_*)
+ * @reserved: Reserved for alignment and should be 0
+ * @address: Access address of this PIO I/O request
+ * @size: Access size of this PIO I/O request
+ * @value: Read/write value of this PIO I/O request
+ */
+struct acrn_pio_request {
+ __u32 direction;
+ __u32 reserved;
+ __u64 address;
+ __u64 size;
+ __u32 value;
+};
+
+/**
+ * struct acrn_pci_request - Info of a PCI I/O request
+ * @direction: Access direction of this request (ACRN_IOREQ_DIR_*)
+ * @reserved: Reserved for alignment and should be 0
+ * @size: Access size of this PCI I/O request
+ * @value: Read/write value of this PIO I/O request
+ * @bus: PCI bus value of this PCI I/O request
+ * @dev: PCI device value of this PCI I/O request
+ * @func: PCI function value of this PCI I/O request
+ * @reg: PCI config space offset of this PCI I/O request
+ *
+ * Need keep same header layout with &struct acrn_pio_request.
+ */
+struct acrn_pci_request {
+ __u32 direction;
+ __u32 reserved[3];
+ __u64 size;
+ __u32 value;
+ __u32 bus;
+ __u32 dev;
+ __u32 func;
+ __u32 reg;
+};
+
+/**
+ * struct acrn_io_request - 256-byte ACRN I/O request
+ * @type: Type of this request (ACRN_IOREQ_TYPE_*).
+ * @completion_polling: Polling flag. Hypervisor will poll completion of the
+ * I/O request if this flag set.
+ * @reserved0: Reserved fields.
+ * @reqs: Union of different types of request. Byte offset: 64.
+ * @reqs.pio_request: PIO request data of the I/O request.
+ * @reqs.pci_request: PCI configuration space request data of the I/O request.
+ * @reqs.mmio_request: MMIO request data of the I/O request.
+ * @reqs.data: Raw data of the I/O request.
+ * @reserved1: Reserved fields.
+ * @kernel_handled: Flag indicates this request need be handled in kernel.
+ * @processed: The status of this request (ACRN_IOREQ_STATE_*).
+ *
+ * The state transitions of ACRN I/O request:
+ *
+ * FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
+ *
+ * An I/O request in COMPLETE or FREE state is owned by the hypervisor. HSM and
+ * ACRN userspace are in charge of processing the others.
+ *
+ * On basis of the states illustrated above, a typical lifecycle of ACRN IO
+ * request would look like:
+ *
+ * Flow (assume the initial state is FREE)
+ * |
+ * | Service VM vCPU 0 Service VM vCPU x User vCPU y
+ * |
+ * | hypervisor:
+ * | fills in type, addr, etc.
+ * | pauses the User VM vCPU y
+ * | sets the state to PENDING (a)
+ * | fires an upcall to Service VM
+ * |
+ * | HSM:
+ * | scans for PENDING requests
+ * | sets the states to PROCESSING (b)
+ * | assigns the requests to clients (c)
+ * V
+ * | client:
+ * | scans for the assigned requests
+ * | handles the requests (d)
+ * | HSM:
+ * | sets states to COMPLETE
+ * | notifies the hypervisor
+ * |
+ * | hypervisor:
+ * | resumes User VM vCPU y (e)
+ * |
+ * | hypervisor:
+ * | post handling (f)
+ * V sets states to FREE
+ *
+ * Note that the procedures (a) to (f) in the illustration above require to be
+ * strictly processed in the order. One vCPU cannot trigger another request of
+ * I/O emulation before completing the previous one.
+ *
+ * Atomic and barriers are required when HSM and hypervisor accessing the state
+ * of &struct acrn_io_request.
+ *
+ */
+struct acrn_io_request {
+ __u32 type;
+ __u32 completion_polling;
+ __u32 reserved0[14];
+ union {
+ struct acrn_pio_request pio_request;
+ struct acrn_pci_request pci_request;
+ struct acrn_mmio_request mmio_request;
+ __u64 data[8];
+ } reqs;
+ __u32 reserved1;
+ __u32 kernel_handled;
+ __u32 processed;
+} __attribute__((aligned(256)));
+
+struct acrn_io_request_buffer {
+ union {
+ struct acrn_io_request req_slot[ACRN_IO_REQUEST_MAX];
+ __u8 reserved[4096];
+ };
+};
+
+/**
+ * struct acrn_ioreq_notify - The structure of ioreq completion notification
+ * @vmid: User VM ID
+ * @reserved: Reserved and should be 0
+ * @vcpu: vCPU ID
+ */
+struct acrn_ioreq_notify {
+ __u16 vmid;
+ __u16 reserved;
+ __u32 vcpu;
+};
+
+/**
+ * struct acrn_vm_creation - Info to create a User VM
+ * @vmid: User VM ID returned from the hypervisor
+ * @reserved0: Reserved and must be 0
+ * @vcpu_num: Number of vCPU in the VM. Return from hypervisor.
+ * @reserved1: Reserved and must be 0
+ * @uuid: Empty space never to be used again (used to be UUID of the VM)
+ * @vm_flag: Flag of the VM creating. Pass to hypervisor directly.
+ * @ioreq_buf: Service VM GPA of I/O request buffer. Pass to
+ * hypervisor directly.
+ * @cpu_affinity: CPU affinity of the VM. Pass to hypervisor directly.
+ * It's a bitmap which indicates CPUs used by the VM.
+ */
+struct acrn_vm_creation {
+ __u16 vmid;
+ __u16 reserved0;
+ __u16 vcpu_num;
+ __u16 reserved1;
+ __u8 uuid[16];
+ __u64 vm_flag;
+ __u64 ioreq_buf;
+ __u64 cpu_affinity;
+};
+
+/**
+ * struct acrn_gp_regs - General registers of a User VM
+ * @rax: Value of register RAX
+ * @rcx: Value of register RCX
+ * @rdx: Value of register RDX
+ * @rbx: Value of register RBX
+ * @rsp: Value of register RSP
+ * @rbp: Value of register RBP
+ * @rsi: Value of register RSI
+ * @rdi: Value of register RDI
+ * @r8: Value of register R8
+ * @r9: Value of register R9
+ * @r10: Value of register R10
+ * @r11: Value of register R11
+ * @r12: Value of register R12
+ * @r13: Value of register R13
+ * @r14: Value of register R14
+ * @r15: Value of register R15
+ */
+struct acrn_gp_regs {
+ __le64 rax;
+ __le64 rcx;
+ __le64 rdx;
+ __le64 rbx;
+ __le64 rsp;
+ __le64 rbp;
+ __le64 rsi;
+ __le64 rdi;
+ __le64 r8;
+ __le64 r9;
+ __le64 r10;
+ __le64 r11;
+ __le64 r12;
+ __le64 r13;
+ __le64 r14;
+ __le64 r15;
+};
+
+/**
+ * struct acrn_descriptor_ptr - Segment descriptor table of a User VM.
+ * @limit: Limit field.
+ * @base: Base field.
+ * @reserved: Reserved and must be 0.
+ */
+struct acrn_descriptor_ptr {
+ __le16 limit;
+ __le64 base;
+ __le16 reserved[3];
+} __attribute__ ((__packed__));
+
+/**
+ * struct acrn_regs - Registers structure of a User VM
+ * @gprs: General registers
+ * @gdt: Global Descriptor Table
+ * @idt: Interrupt Descriptor Table
+ * @rip: Value of register RIP
+ * @cs_base: Base of code segment selector
+ * @cr0: Value of register CR0
+ * @cr4: Value of register CR4
+ * @cr3: Value of register CR3
+ * @ia32_efer: Value of IA32_EFER MSR
+ * @rflags: Value of regsiter RFLAGS
+ * @reserved_64: Reserved and must be 0
+ * @cs_ar: Attribute field of code segment selector
+ * @cs_limit: Limit field of code segment selector
+ * @reserved_32: Reserved and must be 0
+ * @cs_sel: Value of code segment selector
+ * @ss_sel: Value of stack segment selector
+ * @ds_sel: Value of data segment selector
+ * @es_sel: Value of extra segment selector
+ * @fs_sel: Value of FS selector
+ * @gs_sel: Value of GS selector
+ * @ldt_sel: Value of LDT descriptor selector
+ * @tr_sel: Value of TSS descriptor selector
+ */
+struct acrn_regs {
+ struct acrn_gp_regs gprs;
+ struct acrn_descriptor_ptr gdt;
+ struct acrn_descriptor_ptr idt;
+
+ __le64 rip;
+ __le64 cs_base;
+ __le64 cr0;
+ __le64 cr4;
+ __le64 cr3;
+ __le64 ia32_efer;
+ __le64 rflags;
+ __le64 reserved_64[4];
+
+ __le32 cs_ar;
+ __le32 cs_limit;
+ __le32 reserved_32[3];
+
+ __le16 cs_sel;
+ __le16 ss_sel;
+ __le16 ds_sel;
+ __le16 es_sel;
+ __le16 fs_sel;
+ __le16 gs_sel;
+ __le16 ldt_sel;
+ __le16 tr_sel;
+};
+
+/**
+ * struct acrn_vcpu_regs - Info of vCPU registers state
+ * @vcpu_id: vCPU ID
+ * @reserved: Reserved and must be 0
+ * @vcpu_regs: vCPU registers state
+ *
+ * This structure will be passed to hypervisor directly.
+ */
+struct acrn_vcpu_regs {
+ __u16 vcpu_id;
+ __u16 reserved[3];
+ struct acrn_regs vcpu_regs;
+};
+
+#define ACRN_MEM_ACCESS_RIGHT_MASK 0x00000007U
+#define ACRN_MEM_ACCESS_READ 0x00000001U
+#define ACRN_MEM_ACCESS_WRITE 0x00000002U
+#define ACRN_MEM_ACCESS_EXEC 0x00000004U
+#define ACRN_MEM_ACCESS_RWX (ACRN_MEM_ACCESS_READ | \
+ ACRN_MEM_ACCESS_WRITE | \
+ ACRN_MEM_ACCESS_EXEC)
+
+#define ACRN_MEM_TYPE_MASK 0x000007C0U
+#define ACRN_MEM_TYPE_WB 0x00000040U
+#define ACRN_MEM_TYPE_WT 0x00000080U
+#define ACRN_MEM_TYPE_UC 0x00000100U
+#define ACRN_MEM_TYPE_WC 0x00000200U
+#define ACRN_MEM_TYPE_WP 0x00000400U
+
+/* Memory mapping types */
+#define ACRN_MEMMAP_RAM 0
+#define ACRN_MEMMAP_MMIO 1
+
+/**
+ * struct acrn_vm_memmap - A EPT memory mapping info for a User VM.
+ * @type: Type of the memory mapping (ACRM_MEMMAP_*).
+ * Pass to hypervisor directly.
+ * @attr: Attribute of the memory mapping.
+ * Pass to hypervisor directly.
+ * @user_vm_pa: Physical address of User VM.
+ * Pass to hypervisor directly.
+ * @service_vm_pa: Physical address of Service VM.
+ * Pass to hypervisor directly.
+ * @vma_base: VMA address of Service VM. Pass to hypervisor directly.
+ * @len: Length of the memory mapping.
+ * Pass to hypervisor directly.
+ */
+struct acrn_vm_memmap {
+ __u32 type;
+ __u32 attr;
+ __u64 user_vm_pa;
+ union {
+ __u64 service_vm_pa;
+ __u64 vma_base;
+ };
+ __u64 len;
+};
+
+/* Type of interrupt of a passthrough device */
+#define ACRN_PTDEV_IRQ_INTX 0
+#define ACRN_PTDEV_IRQ_MSI 1
+#define ACRN_PTDEV_IRQ_MSIX 2
+/**
+ * struct acrn_ptdev_irq - Interrupt data of a passthrough device.
+ * @type: Type (ACRN_PTDEV_IRQ_*)
+ * @virt_bdf: Virtual Bus/Device/Function
+ * @phys_bdf: Physical Bus/Device/Function
+ * @intx: Info of interrupt
+ * @intx.virt_pin: Virtual IOAPIC pin
+ * @intx.phys_pin: Physical IOAPIC pin
+ * @intx.is_pic_pin: Is PIC pin or not
+ *
+ * This structure will be passed to hypervisor directly.
+ */
+struct acrn_ptdev_irq {
+ __u32 type;
+ __u16 virt_bdf;
+ __u16 phys_bdf;
+
+ struct {
+ __u32 virt_pin;
+ __u32 phys_pin;
+ __u32 is_pic_pin;
+ } intx;
+};
+
+/* Type of PCI device assignment */
+#define ACRN_PTDEV_QUIRK_ASSIGN (1U << 0)
+
+#define ACRN_MMIODEV_RES_NUM 3
+#define ACRN_PCI_NUM_BARS 6
+/**
+ * struct acrn_pcidev - Info for assigning or de-assigning a PCI device
+ * @type: Type of the assignment
+ * @virt_bdf: Virtual Bus/Device/Function
+ * @phys_bdf: Physical Bus/Device/Function
+ * @intr_line: PCI interrupt line
+ * @intr_pin: PCI interrupt pin
+ * @bar: PCI BARs.
+ *
+ * This structure will be passed to hypervisor directly.
+ */
+struct acrn_pcidev {
+ __u32 type;
+ __u16 virt_bdf;
+ __u16 phys_bdf;
+ __u8 intr_line;
+ __u8 intr_pin;
+ __u32 bar[ACRN_PCI_NUM_BARS];
+};
+
+/**
+ * struct acrn_mmiodev - Info for assigning or de-assigning a MMIO device
+ * @name: Name of the MMIO device.
+ * @res[].user_vm_pa: Physical address of User VM of the MMIO region
+ * for the MMIO device.
+ * @res[].service_vm_pa: Physical address of Service VM of the MMIO
+ * region for the MMIO device.
+ * @res[].size: Size of the MMIO region for the MMIO device.
+ * @res[].mem_type: Memory type of the MMIO region for the MMIO
+ * device.
+ *
+ * This structure will be passed to hypervisor directly.
+ */
+struct acrn_mmiodev {
+ __u8 name[8];
+ struct {
+ __u64 user_vm_pa;
+ __u64 service_vm_pa;
+ __u64 size;
+ __u64 mem_type;
+ } res[ACRN_MMIODEV_RES_NUM];
+};
+
+/**
+ * struct acrn_vdev - Info for creating or destroying a virtual device
+ * @id: Union of identifier of the virtual device
+ * @id.value: Raw data of the identifier
+ * @id.fields.vendor: Vendor id of the virtual PCI device
+ * @id.fields.device: Device id of the virtual PCI device
+ * @id.fields.legacy_id: ID of the virtual device if not a PCI device
+ * @slot: Virtual Bus/Device/Function of the virtual
+ * device
+ * @io_base: IO resource base address of the virtual device
+ * @io_size: IO resource size of the virtual device
+ * @args: Arguments for the virtual device creation
+ *
+ * The created virtual device can be a PCI device or a legacy device (e.g.
+ * a virtual UART controller) and it is emulated by the hypervisor. This
+ * structure will be passed to hypervisor directly.
+ */
+struct acrn_vdev {
+ /*
+ * the identifier of the device, the low 32 bits represent the vendor
+ * id and device id of PCI device and the high 32 bits represent the
+ * device number of the legacy device
+ */
+ union {
+ __u64 value;
+ struct {
+ __le16 vendor;
+ __le16 device;
+ __le32 legacy_id;
+ } fields;
+ } id;
+
+ __u64 slot;
+ __u32 io_addr[ACRN_PCI_NUM_BARS];
+ __u32 io_size[ACRN_PCI_NUM_BARS];
+ __u8 args[128];
+};
+
+/**
+ * struct acrn_msi_entry - Info for injecting a MSI interrupt to a VM
+ * @msi_addr: MSI addr[19:12] with dest vCPU ID
+ * @msi_data: MSI data[7:0] with vector
+ */
+struct acrn_msi_entry {
+ __u64 msi_addr;
+ __u64 msi_data;
+};
+
+struct acrn_acpi_generic_address {
+ __u8 space_id;
+ __u8 bit_width;
+ __u8 bit_offset;
+ __u8 access_size;
+ __u64 address;
+} __attribute__ ((__packed__));
+
+/**
+ * struct acrn_cstate_data - A C state package defined in ACPI
+ * @cx_reg: Register of the C state object
+ * @type: Type of the C state object
+ * @latency: The worst-case latency to enter and exit this C state
+ * @power: The average power consumption when in this C state
+ */
+struct acrn_cstate_data {
+ struct acrn_acpi_generic_address cx_reg;
+ __u8 type;
+ __u32 latency;
+ __u64 power;
+};
+
+/**
+ * struct acrn_pstate_data - A P state package defined in ACPI
+ * @core_frequency: CPU frequency (in MHz).
+ * @power: Power dissipation (in milliwatts).
+ * @transition_latency: The worst-case latency in microseconds that CPU is
+ * unavailable during a transition from any P state to
+ * this P state.
+ * @bus_master_latency: The worst-case latency in microseconds that Bus Masters
+ * are prevented from accessing memory during a transition
+ * from any P state to this P state.
+ * @control: The value to be written to Performance Control Register
+ * @status: Transition status.
+ */
+struct acrn_pstate_data {
+ __u64 core_frequency;
+ __u64 power;
+ __u64 transition_latency;
+ __u64 bus_master_latency;
+ __u64 control;
+ __u64 status;
+};
+
+#define PMCMD_TYPE_MASK 0x000000ff
+enum acrn_pm_cmd_type {
+ ACRN_PMCMD_GET_PX_CNT,
+ ACRN_PMCMD_GET_PX_DATA,
+ ACRN_PMCMD_GET_CX_CNT,
+ ACRN_PMCMD_GET_CX_DATA,
+};
+
+#define ACRN_IOEVENTFD_FLAG_PIO 0x01
+#define ACRN_IOEVENTFD_FLAG_DATAMATCH 0x02
+#define ACRN_IOEVENTFD_FLAG_DEASSIGN 0x04
+/**
+ * struct acrn_ioeventfd - Data to operate a &struct hsm_ioeventfd
+ * @fd: The fd of eventfd associated with a hsm_ioeventfd
+ * @flags: Logical-OR of ACRN_IOEVENTFD_FLAG_*
+ * @addr: The start address of IO range of ioeventfd
+ * @len: The length of IO range of ioeventfd
+ * @reserved: Reserved and should be 0
+ * @data: Data for data matching
+ *
+ * Without flag ACRN_IOEVENTFD_FLAG_DEASSIGN, ioctl ACRN_IOCTL_IOEVENTFD
+ * creates a &struct hsm_ioeventfd with properties originated from &struct
+ * acrn_ioeventfd. With flag ACRN_IOEVENTFD_FLAG_DEASSIGN, ioctl
+ * ACRN_IOCTL_IOEVENTFD destroys the &struct hsm_ioeventfd matching the fd.
+ */
+struct acrn_ioeventfd {
+ __u32 fd;
+ __u32 flags;
+ __u64 addr;
+ __u32 len;
+ __u32 reserved;
+ __u64 data;
+};
+
+#define ACRN_IRQFD_FLAG_DEASSIGN 0x01
+/**
+ * struct acrn_irqfd - Data to operate a &struct hsm_irqfd
+ * @fd: The fd of eventfd associated with a hsm_irqfd
+ * @flags: Logical-OR of ACRN_IRQFD_FLAG_*
+ * @msi: Info of MSI associated with the irqfd
+ */
+struct acrn_irqfd {
+ __s32 fd;
+ __u32 flags;
+ struct acrn_msi_entry msi;
+};
+
+/* The ioctl type, documented in ioctl-number.rst */
+#define ACRN_IOCTL_TYPE 0xA2
+
+/*
+ * Common IOCTL IDs definition for ACRN userspace
+ */
+#define ACRN_IOCTL_CREATE_VM \
+ _IOWR(ACRN_IOCTL_TYPE, 0x10, struct acrn_vm_creation)
+#define ACRN_IOCTL_DESTROY_VM \
+ _IO(ACRN_IOCTL_TYPE, 0x11)
+#define ACRN_IOCTL_START_VM \
+ _IO(ACRN_IOCTL_TYPE, 0x12)
+#define ACRN_IOCTL_PAUSE_VM \
+ _IO(ACRN_IOCTL_TYPE, 0x13)
+#define ACRN_IOCTL_RESET_VM \
+ _IO(ACRN_IOCTL_TYPE, 0x15)
+#define ACRN_IOCTL_SET_VCPU_REGS \
+ _IOW(ACRN_IOCTL_TYPE, 0x16, struct acrn_vcpu_regs)
+
+#define ACRN_IOCTL_INJECT_MSI \
+ _IOW(ACRN_IOCTL_TYPE, 0x23, struct acrn_msi_entry)
+#define ACRN_IOCTL_VM_INTR_MONITOR \
+ _IOW(ACRN_IOCTL_TYPE, 0x24, unsigned long)
+#define ACRN_IOCTL_SET_IRQLINE \
+ _IOW(ACRN_IOCTL_TYPE, 0x25, __u64)
+
+#define ACRN_IOCTL_NOTIFY_REQUEST_FINISH \
+ _IOW(ACRN_IOCTL_TYPE, 0x31, struct acrn_ioreq_notify)
+#define ACRN_IOCTL_CREATE_IOREQ_CLIENT \
+ _IO(ACRN_IOCTL_TYPE, 0x32)
+#define ACRN_IOCTL_ATTACH_IOREQ_CLIENT \
+ _IO(ACRN_IOCTL_TYPE, 0x33)
+#define ACRN_IOCTL_DESTROY_IOREQ_CLIENT \
+ _IO(ACRN_IOCTL_TYPE, 0x34)
+#define ACRN_IOCTL_CLEAR_VM_IOREQ \
+ _IO(ACRN_IOCTL_TYPE, 0x35)
+
+#define ACRN_IOCTL_SET_MEMSEG \
+ _IOW(ACRN_IOCTL_TYPE, 0x41, struct acrn_vm_memmap)
+#define ACRN_IOCTL_UNSET_MEMSEG \
+ _IOW(ACRN_IOCTL_TYPE, 0x42, struct acrn_vm_memmap)
+
+#define ACRN_IOCTL_SET_PTDEV_INTR \
+ _IOW(ACRN_IOCTL_TYPE, 0x53, struct acrn_ptdev_irq)
+#define ACRN_IOCTL_RESET_PTDEV_INTR \
+ _IOW(ACRN_IOCTL_TYPE, 0x54, struct acrn_ptdev_irq)
+#define ACRN_IOCTL_ASSIGN_PCIDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x55, struct acrn_pcidev)
+#define ACRN_IOCTL_DEASSIGN_PCIDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x56, struct acrn_pcidev)
+#define ACRN_IOCTL_ASSIGN_MMIODEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x57, struct acrn_mmiodev)
+#define ACRN_IOCTL_DEASSIGN_MMIODEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x58, struct acrn_mmiodev)
+#define ACRN_IOCTL_CREATE_VDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x59, struct acrn_vdev)
+#define ACRN_IOCTL_DESTROY_VDEV \
+ _IOW(ACRN_IOCTL_TYPE, 0x5A, struct acrn_vdev)
+
+#define ACRN_IOCTL_PM_GET_CPU_STATE \
+ _IOWR(ACRN_IOCTL_TYPE, 0x60, __u64)
+
+#define ACRN_IOCTL_IOEVENTFD \
+ _IOW(ACRN_IOCTL_TYPE, 0x70, struct acrn_ioeventfd)
+#define ACRN_IOCTL_IRQFD \
+ _IOW(ACRN_IOCTL_TYPE, 0x71, struct acrn_irqfd)
+
+#endif /* _UAPI_ACRN_H */
diff --git a/include/uapi/linux/affs_hardblocks.h b/include/uapi/linux/affs_hardblocks.h
index 5e2fb8481252..a5aff2eb5f70 100644
--- a/include/uapi/linux/affs_hardblocks.h
+++ b/include/uapi/linux/affs_hardblocks.h
@@ -7,42 +7,42 @@
/* Just the needed definitions for the RDB of an Amiga HD. */
struct RigidDiskBlock {
- __u32 rdb_ID;
+ __be32 rdb_ID;
__be32 rdb_SummedLongs;
- __s32 rdb_ChkSum;
- __u32 rdb_HostID;
+ __be32 rdb_ChkSum;
+ __be32 rdb_HostID;
__be32 rdb_BlockBytes;
- __u32 rdb_Flags;
- __u32 rdb_BadBlockList;
+ __be32 rdb_Flags;
+ __be32 rdb_BadBlockList;
__be32 rdb_PartitionList;
- __u32 rdb_FileSysHeaderList;
- __u32 rdb_DriveInit;
- __u32 rdb_Reserved1[6];
- __u32 rdb_Cylinders;
- __u32 rdb_Sectors;
- __u32 rdb_Heads;
- __u32 rdb_Interleave;
- __u32 rdb_Park;
- __u32 rdb_Reserved2[3];
- __u32 rdb_WritePreComp;
- __u32 rdb_ReducedWrite;
- __u32 rdb_StepRate;
- __u32 rdb_Reserved3[5];
- __u32 rdb_RDBBlocksLo;
- __u32 rdb_RDBBlocksHi;
- __u32 rdb_LoCylinder;
- __u32 rdb_HiCylinder;
- __u32 rdb_CylBlocks;
- __u32 rdb_AutoParkSeconds;
- __u32 rdb_HighRDSKBlock;
- __u32 rdb_Reserved4;
+ __be32 rdb_FileSysHeaderList;
+ __be32 rdb_DriveInit;
+ __be32 rdb_Reserved1[6];
+ __be32 rdb_Cylinders;
+ __be32 rdb_Sectors;
+ __be32 rdb_Heads;
+ __be32 rdb_Interleave;
+ __be32 rdb_Park;
+ __be32 rdb_Reserved2[3];
+ __be32 rdb_WritePreComp;
+ __be32 rdb_ReducedWrite;
+ __be32 rdb_StepRate;
+ __be32 rdb_Reserved3[5];
+ __be32 rdb_RDBBlocksLo;
+ __be32 rdb_RDBBlocksHi;
+ __be32 rdb_LoCylinder;
+ __be32 rdb_HiCylinder;
+ __be32 rdb_CylBlocks;
+ __be32 rdb_AutoParkSeconds;
+ __be32 rdb_HighRDSKBlock;
+ __be32 rdb_Reserved4;
char rdb_DiskVendor[8];
char rdb_DiskProduct[16];
char rdb_DiskRevision[4];
char rdb_ControllerVendor[8];
char rdb_ControllerProduct[16];
char rdb_ControllerRevision[4];
- __u32 rdb_Reserved5[10];
+ __be32 rdb_Reserved5[10];
};
#define IDNAME_RIGIDDISK 0x5244534B /* "RDSK" */
@@ -50,16 +50,16 @@ struct RigidDiskBlock {
struct PartitionBlock {
__be32 pb_ID;
__be32 pb_SummedLongs;
- __s32 pb_ChkSum;
- __u32 pb_HostID;
+ __be32 pb_ChkSum;
+ __be32 pb_HostID;
__be32 pb_Next;
- __u32 pb_Flags;
- __u32 pb_Reserved1[2];
- __u32 pb_DevFlags;
+ __be32 pb_Flags;
+ __be32 pb_Reserved1[2];
+ __be32 pb_DevFlags;
__u8 pb_DriveName[32];
- __u32 pb_Reserved2[15];
+ __be32 pb_Reserved2[15];
__be32 pb_Environment[17];
- __u32 pb_EReserved[15];
+ __be32 pb_EReserved[15];
};
#define IDNAME_PARTITION 0x50415254 /* "PART" */
diff --git a/include/uapi/linux/agpgart.h b/include/uapi/linux/agpgart.h
index f5251045181a..9cc3448c0b5b 100644
--- a/include/uapi/linux/agpgart.h
+++ b/include/uapi/linux/agpgart.h
@@ -52,7 +52,6 @@
#ifndef __KERNEL__
#include <linux/types.h>
-#include <stdlib.h>
struct agp_version {
__u16 major;
@@ -64,10 +63,10 @@ typedef struct _agp_info {
__u32 bridge_id; /* bridge vendor/device */
__u32 agp_mode; /* mode info of bridge */
unsigned long aper_base;/* base of aperture */
- size_t aper_size; /* size of aperture */
- size_t pg_total; /* max pages (swap + system) */
- size_t pg_system; /* max pages (system) */
- size_t pg_used; /* current pages used */
+ __kernel_size_t aper_size; /* size of aperture */
+ __kernel_size_t pg_total; /* max pages (swap + system) */
+ __kernel_size_t pg_system; /* max pages (system) */
+ __kernel_size_t pg_used; /* current pages used */
} agp_info;
typedef struct _agp_setup {
diff --git a/include/uapi/linux/amt.h b/include/uapi/linux/amt.h
new file mode 100644
index 000000000000..2dccff417195
--- /dev/null
+++ b/include/uapi/linux/amt.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com>
+ */
+#ifndef _UAPI_AMT_H_
+#define _UAPI_AMT_H_
+
+enum ifla_amt_mode {
+ /* AMT interface works as Gateway mode.
+ * The Gateway mode encapsulates IGMP/MLD traffic and decapsulates
+ * multicast traffic.
+ */
+ AMT_MODE_GATEWAY = 0,
+ /* AMT interface works as Relay mode.
+ * The Relay mode encapsulates multicast traffic and decapsulates
+ * IGMP/MLD traffic.
+ */
+ AMT_MODE_RELAY,
+ __AMT_MODE_MAX,
+};
+
+#define AMT_MODE_MAX (__AMT_MODE_MAX - 1)
+
+enum {
+ IFLA_AMT_UNSPEC,
+ /* This attribute specify mode etier Gateway or Relay. */
+ IFLA_AMT_MODE,
+ /* This attribute specify Relay port.
+ * AMT interface is created as Gateway mode, this attribute is used
+ * to specify relay(remote) port.
+ * AMT interface is created as Relay mode, this attribute is used
+ * as local port.
+ */
+ IFLA_AMT_RELAY_PORT,
+ /* This attribute specify Gateway port.
+ * AMT interface is created as Gateway mode, this attribute is used
+ * as local port.
+ * AMT interface is created as Relay mode, this attribute is not used.
+ */
+ IFLA_AMT_GATEWAY_PORT,
+ /* This attribute specify physical device */
+ IFLA_AMT_LINK,
+ /* This attribute specify local ip address */
+ IFLA_AMT_LOCAL_IP,
+ /* This attribute specify Relay ip address.
+ * So, this is not used by Relay.
+ */
+ IFLA_AMT_REMOTE_IP,
+ /* This attribute specify Discovery ip address.
+ * When Gateway get started, it send discovery message to find the
+ * Relay's ip address.
+ * So, this is not used by Relay.
+ */
+ IFLA_AMT_DISCOVERY_IP,
+ /* This attribute specify number of maximum tunnel. */
+ IFLA_AMT_MAX_TUNNELS,
+ __IFLA_AMT_MAX,
+};
+
+#define IFLA_AMT_MAX (__IFLA_AMT_MAX - 1)
+
+#endif /* _UAPI_AMT_H_ */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index f1ce2c4c077e..d44a8118b2ed 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -217,16 +217,56 @@ struct binder_node_info_for_ref {
__u32 reserved3;
};
-#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
-#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
-#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
-#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
-#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
-#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
-#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
-#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
-#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
+struct binder_freeze_info {
+ __u32 pid;
+ __u32 enable;
+ __u32 timeout_ms;
+};
+
+struct binder_frozen_status_info {
+ __u32 pid;
+
+ /* process received sync transactions since last frozen
+ * bit 0: received sync transaction after being frozen
+ * bit 1: new pending sync transaction during freezing
+ */
+ __u32 sync_recv;
+
+ /* process received async transactions since last frozen */
+ __u32 async_recv;
+};
+
+/* struct binder_extened_error - extended error information
+ * @id: identifier for the failed operation
+ * @command: command as defined by binder_driver_return_protocol
+ * @param: parameter holding a negative errno value
+ *
+ * Used with BINDER_GET_EXTENDED_ERROR. This extends the error information
+ * returned by the driver upon a failed operation. Userspace can pull this
+ * data to properly handle specific error scenarios.
+ */
+struct binder_extended_error {
+ __u32 id;
+ __u32 command;
+ __s32 param;
+};
+
+enum {
+ BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read),
+ BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64),
+ BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32),
+ BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32),
+ BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32),
+ BINDER_THREAD_EXIT = _IOW('b', 8, __s32),
+ BINDER_VERSION = _IOWR('b', 9, struct binder_version),
+ BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info),
+ BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref),
+ BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object),
+ BINDER_FREEZE = _IOW('b', 14, struct binder_freeze_info),
+ BINDER_GET_FROZEN_INFO = _IOWR('b', 15, struct binder_frozen_status_info),
+ BINDER_ENABLE_ONEWAY_SPAM_DETECTION = _IOW('b', 16, __u32),
+ BINDER_GET_EXTENDED_ERROR = _IOWR('b', 17, struct binder_extended_error),
+};
/*
* NOTE: Two special error codes you should check for when calling
@@ -248,6 +288,8 @@ enum transaction_flags {
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+ TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */
+ TF_UPDATE_TXN = 0x40, /* update the outdated pending async txn */
};
struct binder_transaction_data {
@@ -265,8 +307,8 @@ struct binder_transaction_data {
/* General information about the transaction. */
__u32 flags;
- pid_t sender_pid;
- uid_t sender_euid;
+ __kernel_pid_t sender_pid;
+ __kernel_uid32_t sender_euid;
binder_size_t data_size; /* number of bytes of data */
binder_size_t offsets_size; /* number of bytes of offsets */
@@ -407,6 +449,24 @@ enum binder_driver_return_protocol {
* The last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
*/
+
+ BR_FROZEN_REPLY = _IO('r', 18),
+ /*
+ * The target of the last sync transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is frozen. No parameters.
+ */
+
+ BR_ONEWAY_SPAM_SUSPECT = _IO('r', 19),
+ /*
+ * Current process sent too many oneway calls to target, and the last
+ * asynchronous transaction makes the allocated async buffer size exceed
+ * detection threshold. No parameters.
+ */
+
+ BR_TRANSACTION_PENDING_FROZEN = _IO('r', 20),
+ /*
+ * The target of the last async transaction is frozen. No parameters.
+ */
};
enum binder_driver_command_protocol {
diff --git a/include/uapi/linux/aspeed-video.h b/include/uapi/linux/aspeed-video.h
new file mode 100644
index 000000000000..6586a65548c4
--- /dev/null
+++ b/include/uapi/linux/aspeed-video.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2021 ASPEED Technology Inc.
+ */
+
+#ifndef _UAPI_LINUX_ASPEED_VIDEO_H
+#define _UAPI_LINUX_ASPEED_VIDEO_H
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_ASPEED_HQ_MODE (V4L2_CID_USER_ASPEED_BASE + 1)
+#define V4L2_CID_ASPEED_HQ_JPEG_QUALITY (V4L2_CID_USER_ASPEED_BASE + 2)
+
+#endif /* _UAPI_LINUX_ASPEED_VIDEO_H */
diff --git a/include/uapi/linux/atmbr2684.h b/include/uapi/linux/atmbr2684.h
index a9e2250cd720..d47c47d06f11 100644
--- a/include/uapi/linux/atmbr2684.h
+++ b/include/uapi/linux/atmbr2684.h
@@ -38,7 +38,7 @@
*/
#define BR2684_ENCAPS_VC (0) /* VC-mux */
#define BR2684_ENCAPS_LLC (1)
-#define BR2684_ENCAPS_AUTODETECT (2) /* Unsuported */
+#define BR2684_ENCAPS_AUTODETECT (2) /* Unsupported */
/*
* Is this VC bridged or routed?
diff --git a/include/uapi/linux/atmdev.h b/include/uapi/linux/atmdev.h
index a5c15cf23bd7..20b0215084fc 100644
--- a/include/uapi/linux/atmdev.h
+++ b/include/uapi/linux/atmdev.h
@@ -101,10 +101,6 @@ struct atm_dev_stats {
/* use backend to make new if */
#define ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct atm_iobuf)
/* add party to p2mp call */
-#ifdef CONFIG_COMPAT
-/* It actually takes struct sockaddr_atmsvc, not struct atm_iobuf */
-#define COMPAT_ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct compat_atm_iobuf)
-#endif
#define ATM_DROPPARTY _IOW('a', ATMIOC_SPECIAL+5,int)
/* drop party from p2mp call */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index cd2d8279a5e4..d676ed2b246e 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -48,7 +48,7 @@
* 2500 - 2999 future user space (maybe integrity labels and related events)
*
* Messages from 1000-1199 are bi-directional. 1200-1299 & 2100 - 2999 are
- * exclusively user space. 1300-2099 is kernel --> user space
+ * exclusively user space. 1300-2099 is kernel --> user space
* communication.
*/
#define AUDIT_GET 1000 /* Get status */
@@ -78,7 +78,7 @@
#define AUDIT_LAST_USER_MSG 1199
#define AUDIT_FIRST_USER_MSG2 2100 /* More user space messages */
#define AUDIT_LAST_USER_MSG2 2999
-
+
#define AUDIT_DAEMON_START 1200 /* Daemon startup record */
#define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */
#define AUDIT_DAEMON_ABORT 1202 /* Daemon error stop record */
@@ -118,6 +118,10 @@
#define AUDIT_TIME_ADJNTPVAL 1333 /* NTP value adjustment */
#define AUDIT_BPF 1334 /* BPF subsystem */
#define AUDIT_EVENT_LISTENER 1335 /* Task joined multicast read socket */
+#define AUDIT_URINGOP 1336 /* io_uring operation */
+#define AUDIT_OPENAT2 1337 /* Record showing openat2 how args */
+#define AUDIT_DM_CTRL 1338 /* Device Mapper target control */
+#define AUDIT_DM_EVENT 1339 /* Device Mapper events */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
@@ -166,8 +170,9 @@
#define AUDIT_FILTER_EXCLUDE 0x05 /* Apply rule before record creation */
#define AUDIT_FILTER_TYPE AUDIT_FILTER_EXCLUDE /* obsolete misleading naming */
#define AUDIT_FILTER_FS 0x06 /* Apply rule at __audit_inode_child */
+#define AUDIT_FILTER_URING_EXIT 0x07 /* Apply rule at io_uring op exit */
-#define AUDIT_NR_FILTERS 7
+#define AUDIT_NR_FILTERS 8
#define AUDIT_FILTER_PREPEND 0x10 /* Prepend to front of list */
@@ -182,7 +187,7 @@
#define AUDIT_MAX_KEY_LEN 256
#define AUDIT_BITMASK_SIZE 64
#define AUDIT_WORD(nr) ((__u32)((nr)/32))
-#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32))
+#define AUDIT_BIT(nr) (1U << ((nr) - AUDIT_WORD(nr)*32))
#define AUDIT_SYSCALL_CLASSES 16
#define AUDIT_CLASS_DIR_WRITE 0
@@ -434,6 +439,8 @@ enum {
#define AUDIT_ARCH_UNICORE (EM_UNICORE|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_XTENSA (EM_XTENSA)
+#define AUDIT_ARCH_LOONGARCH32 (EM_LOONGARCH|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_PERM_EXEC 1
#define AUDIT_PERM_WRITE 2
@@ -509,7 +516,7 @@ struct audit_rule_data {
__u32 values[AUDIT_MAX_FIELDS];
__u32 fieldflags[AUDIT_MAX_FIELDS];
__u32 buflen; /* total length of string fields */
- char buf[0]; /* string fields buffer */
+ char buf[]; /* string fields buffer */
};
#endif /* _UAPI_LINUX_AUDIT_H_ */
diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h
index 62e625356dc8..08be539605fc 100644
--- a/include/uapi/linux/auto_dev-ioctl.h
+++ b/include/uapi/linux/auto_dev-ioctl.h
@@ -109,7 +109,7 @@ struct autofs_dev_ioctl {
struct args_ismountpoint ismountpoint;
};
- char path[0];
+ char path[];
};
static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
diff --git a/include/uapi/linux/auxvec.h b/include/uapi/linux/auxvec.h
index abe5f2b6581b..cc61cb9b3e9a 100644
--- a/include/uapi/linux/auxvec.h
+++ b/include/uapi/linux/auxvec.h
@@ -30,8 +30,15 @@
* differ from AT_PLATFORM. */
#define AT_RANDOM 25 /* address of 16 random bytes */
#define AT_HWCAP2 26 /* extension of AT_HWCAP */
+#define AT_RSEQ_FEATURE_SIZE 27 /* rseq supported feature size */
+#define AT_RSEQ_ALIGN 28 /* rseq allocation alignment */
+#define AT_HWCAP3 29 /* extension of AT_HWCAP */
+#define AT_HWCAP4 30 /* extension of AT_HWCAP */
#define AT_EXECFN 31 /* filename of program */
+#ifndef AT_MINSIGSTKSZ
+#define AT_MINSIGSTKSZ 51 /* minimal stack size for signal delivery */
+#endif
#endif /* _UAPI_LINUX_AUXVEC_H */
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index 9c8604c5b5f6..6e25753015df 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
-/* Copyright (C) 2007-2020 B.A.T.M.A.N. contributors:
+/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
@@ -26,6 +26,7 @@
* @BATADV_CODED: network coded packets
* @BATADV_ELP: echo location packets for B.A.T.M.A.N. V
* @BATADV_OGM2: originator messages for B.A.T.M.A.N. V
+ * @BATADV_MCAST: multicast packet with multiple destination addresses
*
* @BATADV_UNICAST: unicast packets carrying unicast payload traffic
* @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
@@ -42,6 +43,7 @@ enum batadv_packettype {
BATADV_CODED = 0x02,
BATADV_ELP = 0x03,
BATADV_OGM2 = 0x04,
+ BATADV_MCAST = 0x05,
/* 0x40 - 0x7f: unicast */
#define BATADV_UNICAST_MIN 0x40
BATADV_UNICAST = 0x40,
@@ -114,6 +116,9 @@ enum batadv_icmp_packettype {
* only need routable IPv4 multicast packets we signed up for explicitly
* @BATADV_MCAST_WANT_NO_RTR6: we have no IPv6 multicast router and therefore
* only need routable IPv6 multicast packets we signed up for explicitly
+ * @BATADV_MCAST_HAVE_MC_PTYPE_CAPA: we can parse, receive and forward
+ * batman-adv multicast packets with a multicast tracker TVLV. And all our
+ * hard interfaces have an MTU of at least 1280 bytes.
*/
enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0,
@@ -121,6 +126,7 @@ enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2,
BATADV_MCAST_WANT_NO_RTR4 = 1UL << 3,
BATADV_MCAST_WANT_NO_RTR6 = 1UL << 4,
+ BATADV_MCAST_HAVE_MC_PTYPE_CAPA = 1UL << 5,
};
/* tt data subtypes */
@@ -172,14 +178,16 @@ enum batadv_bla_claimframe {
* @BATADV_TVLV_TT: translation table tvlv
* @BATADV_TVLV_ROAM: roaming advertisement tvlv
* @BATADV_TVLV_MCAST: multicast capability tvlv
+ * @BATADV_TVLV_MCAST_TRACKER: multicast tracker tvlv
*/
enum batadv_tvlv_type {
- BATADV_TVLV_GW = 0x01,
- BATADV_TVLV_DAT = 0x02,
- BATADV_TVLV_NC = 0x03,
- BATADV_TVLV_TT = 0x04,
- BATADV_TVLV_ROAM = 0x05,
- BATADV_TVLV_MCAST = 0x06,
+ BATADV_TVLV_GW = 0x01,
+ BATADV_TVLV_DAT = 0x02,
+ BATADV_TVLV_NC = 0x03,
+ BATADV_TVLV_TT = 0x04,
+ BATADV_TVLV_ROAM = 0x05,
+ BATADV_TVLV_MCAST = 0x06,
+ BATADV_TVLV_MCAST_TRACKER = 0x07,
};
#pragma pack(2)
@@ -486,6 +494,25 @@ struct batadv_bcast_packet {
};
/**
+ * struct batadv_mcast_packet - multicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
+ * @reserved: reserved byte for alignment
+ * @tvlv_len: length of the appended tvlv buffer (in bytes)
+ */
+struct batadv_mcast_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 reserved;
+ __be16 tvlv_len;
+ /* "4 bytes boundary + 2 bytes" long to make the payload after the
+ * following ethernet header again 4 bytes boundary aligned
+ */
+};
+
+/**
* struct batadv_coded_packet - network coded packet
* @packet_type: batman-adv packet type, part of the general header
* @version: batman-adv protocol version, part of the general header
@@ -626,6 +653,14 @@ struct batadv_tvlv_mcast_data {
__u8 reserved[3];
};
+/**
+ * struct batadv_tvlv_mcast_tracker - payload of a multicast tracker tvlv
+ * @num_dests: number of subsequent destination originator MAC addresses
+ */
+struct batadv_tvlv_mcast_tracker {
+ __be16 num_dests;
+};
+
#pragma pack()
#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index bb0ae945b36a..35dc016c9bb4 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: MIT */
-/* Copyright (C) 2016-2020 B.A.T.M.A.N. contributors:
+/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*/
@@ -675,4 +675,30 @@ enum batadv_tp_meter_reason {
BATADV_TP_REASON_TOO_MANY = 133,
};
+/**
+ * enum batadv_ifla_attrs - batman-adv ifla nested attributes
+ */
+enum batadv_ifla_attrs {
+ /**
+ * @IFLA_BATADV_UNSPEC: unspecified attribute which is not parsed by
+ * rtnetlink
+ */
+ IFLA_BATADV_UNSPEC,
+
+ /**
+ * @IFLA_BATADV_ALGO_NAME: routing algorithm (name) which should be
+ * used by the newly registered batadv net_device.
+ */
+ IFLA_BATADV_ALGO_NAME,
+
+ /* add attributes above here, update the policy in soft-interface.c */
+
+ /**
+ * @__IFLA_BATADV_MAX: internal use
+ */
+ __IFLA_BATADV_MAX,
+};
+
+#define IFLA_BATADV_MAX (__IFLA_BATADV_MAX - 1)
+
#endif /* _UAPI_LINUX_BATMAN_ADV_H_ */
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
deleted file mode 100644
index 52e8bcb33981..000000000000
--- a/include/uapi/linux/bcache.h
+++ /dev/null
@@ -1,445 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _LINUX_BCACHE_H
-#define _LINUX_BCACHE_H
-
-/*
- * Bcache on disk data structures
- */
-
-#include <linux/types.h>
-
-#define BITMASK(name, type, field, offset, size) \
-static inline __u64 name(const type *k) \
-{ return (k->field >> offset) & ~(~0ULL << size); } \
- \
-static inline void SET_##name(type *k, __u64 v) \
-{ \
- k->field &= ~(~(~0ULL << size) << offset); \
- k->field |= (v & ~(~0ULL << size)) << offset; \
-}
-
-/* Btree keys - all units are in sectors */
-
-struct bkey {
- __u64 high;
- __u64 low;
- __u64 ptr[];
-};
-
-#define KEY_FIELD(name, field, offset, size) \
- BITMASK(name, struct bkey, field, offset, size)
-
-#define PTR_FIELD(name, offset, size) \
-static inline __u64 name(const struct bkey *k, unsigned int i) \
-{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
- \
-static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
-{ \
- k->ptr[i] &= ~(~(~0ULL << size) << offset); \
- k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
-}
-
-#define KEY_SIZE_BITS 16
-#define KEY_MAX_U64S 8
-
-KEY_FIELD(KEY_PTRS, high, 60, 3)
-KEY_FIELD(HEADER_SIZE, high, 58, 2)
-KEY_FIELD(KEY_CSUM, high, 56, 2)
-KEY_FIELD(KEY_PINNED, high, 55, 1)
-KEY_FIELD(KEY_DIRTY, high, 36, 1)
-
-KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
-KEY_FIELD(KEY_INODE, high, 0, 20)
-
-/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
-
-static inline __u64 KEY_OFFSET(const struct bkey *k)
-{
- return k->low;
-}
-
-static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
-{
- k->low = v;
-}
-
-/*
- * The high bit being set is a relic from when we used it to do binary
- * searches - it told you where a key started. It's not used anymore,
- * and can probably be safely dropped.
- */
-#define KEY(inode, offset, size) \
-((struct bkey) { \
- .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
- .low = (offset) \
-})
-
-#define ZERO_KEY KEY(0, 0, 0)
-
-#define MAX_KEY_INODE (~(~0 << 20))
-#define MAX_KEY_OFFSET (~0ULL >> 1)
-#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
-
-#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
-#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
-
-#define PTR_DEV_BITS 12
-
-PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
-PTR_FIELD(PTR_OFFSET, 8, 43)
-PTR_FIELD(PTR_GEN, 0, 8)
-
-#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
-
-#define MAKE_PTR(gen, offset, dev) \
- ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
-
-/* Bkey utility code */
-
-static inline unsigned long bkey_u64s(const struct bkey *k)
-{
- return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
-}
-
-static inline unsigned long bkey_bytes(const struct bkey *k)
-{
- return bkey_u64s(k) * sizeof(__u64);
-}
-
-#define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
-
-static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
-{
- SET_KEY_INODE(dest, KEY_INODE(src));
- SET_KEY_OFFSET(dest, KEY_OFFSET(src));
-}
-
-static inline struct bkey *bkey_next(const struct bkey *k)
-{
- __u64 *d = (void *) k;
-
- return (struct bkey *) (d + bkey_u64s(k));
-}
-
-static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
-{
- __u64 *d = (void *) k;
-
- return (struct bkey *) (d + nr_keys);
-}
-/* Enough for a key with 6 pointers */
-#define BKEY_PAD 8
-
-#define BKEY_PADDED(key) \
- union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
-
-/* Superblock */
-
-/* Version 0: Cache device
- * Version 1: Backing device
- * Version 2: Seed pointer into btree node checksum
- * Version 3: Cache device with new UUID format
- * Version 4: Backing device with data offset
- */
-#define BCACHE_SB_VERSION_CDEV 0
-#define BCACHE_SB_VERSION_BDEV 1
-#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
-#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
-#define BCACHE_SB_VERSION_CDEV_WITH_FEATURES 5
-#define BCACHE_SB_VERSION_BDEV_WITH_FEATURES 6
-#define BCACHE_SB_MAX_VERSION 6
-
-#define SB_SECTOR 8
-#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
-#define SB_SIZE 4096
-#define SB_LABEL_SIZE 32
-#define SB_JOURNAL_BUCKETS 256U
-/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
-#define MAX_CACHES_PER_SET 8
-
-#define BDEV_DATA_START_DEFAULT 16 /* sectors */
-
-struct cache_sb_disk {
- __le64 csum;
- __le64 offset; /* sector where this sb was written */
- __le64 version;
-
- __u8 magic[16];
-
- __u8 uuid[16];
- union {
- __u8 set_uuid[16];
- __le64 set_magic;
- };
- __u8 label[SB_LABEL_SIZE];
-
- __le64 flags;
- __le64 seq;
-
- __le64 feature_compat;
- __le64 feature_incompat;
- __le64 feature_ro_compat;
-
- __le64 pad[5];
-
- union {
- struct {
- /* Cache devices */
- __le64 nbuckets; /* device size */
-
- __le16 block_size; /* sectors */
- __le16 bucket_size; /* sectors */
-
- __le16 nr_in_set;
- __le16 nr_this_dev;
- };
- struct {
- /* Backing devices */
- __le64 data_offset;
-
- /*
- * block_size from the cache device section is still used by
- * backing devices, so don't add anything here until we fix
- * things to not need it for backing devices anymore
- */
- };
- };
-
- __le32 last_mount; /* time overflow in y2106 */
-
- __le16 first_bucket;
- union {
- __le16 njournal_buckets;
- __le16 keys;
- };
- __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
- __le16 bucket_size_hi;
-};
-
-/*
- * This is for in-memory bcache super block.
- * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
- * size, ordering and even whole struct size may be different
- * from cache_sb_disk.
- */
-struct cache_sb {
- __u64 offset; /* sector where this sb was written */
- __u64 version;
-
- __u8 magic[16];
-
- __u8 uuid[16];
- union {
- __u8 set_uuid[16];
- __u64 set_magic;
- };
- __u8 label[SB_LABEL_SIZE];
-
- __u64 flags;
- __u64 seq;
-
- __u64 feature_compat;
- __u64 feature_incompat;
- __u64 feature_ro_compat;
-
- union {
- struct {
- /* Cache devices */
- __u64 nbuckets; /* device size */
-
- __u16 block_size; /* sectors */
- __u16 nr_in_set;
- __u16 nr_this_dev;
- __u32 bucket_size; /* sectors */
- };
- struct {
- /* Backing devices */
- __u64 data_offset;
-
- /*
- * block_size from the cache device section is still used by
- * backing devices, so don't add anything here until we fix
- * things to not need it for backing devices anymore
- */
- };
- };
-
- __u32 last_mount; /* time overflow in y2106 */
-
- __u16 first_bucket;
- union {
- __u16 njournal_buckets;
- __u16 keys;
- };
- __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
-};
-
-static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
-{
- return sb->version == BCACHE_SB_VERSION_BDEV
- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
-}
-
-BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
-BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
-BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
-#define CACHE_REPLACEMENT_LRU 0U
-#define CACHE_REPLACEMENT_FIFO 1U
-#define CACHE_REPLACEMENT_RANDOM 2U
-
-BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
-#define CACHE_MODE_WRITETHROUGH 0U
-#define CACHE_MODE_WRITEBACK 1U
-#define CACHE_MODE_WRITEAROUND 2U
-#define CACHE_MODE_NONE 3U
-BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
-#define BDEV_STATE_NONE 0U
-#define BDEV_STATE_CLEAN 1U
-#define BDEV_STATE_DIRTY 2U
-#define BDEV_STATE_STALE 3U
-
-/*
- * Magic numbers
- *
- * The various other data structures have their own magic numbers, which are
- * xored with the first part of the cache set's UUID
- */
-
-#define JSET_MAGIC 0x245235c1a3625032ULL
-#define PSET_MAGIC 0x6750e15f87337f91ULL
-#define BSET_MAGIC 0x90135c78b99e07f5ULL
-
-static inline __u64 jset_magic(struct cache_sb *sb)
-{
- return sb->set_magic ^ JSET_MAGIC;
-}
-
-static inline __u64 pset_magic(struct cache_sb *sb)
-{
- return sb->set_magic ^ PSET_MAGIC;
-}
-
-static inline __u64 bset_magic(struct cache_sb *sb)
-{
- return sb->set_magic ^ BSET_MAGIC;
-}
-
-/*
- * Journal
- *
- * On disk format for a journal entry:
- * seq is monotonically increasing; every journal entry has its own unique
- * sequence number.
- *
- * last_seq is the oldest journal entry that still has keys the btree hasn't
- * flushed to disk yet.
- *
- * version is for on disk format changes.
- */
-
-#define BCACHE_JSET_VERSION_UUIDv1 1
-#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
-#define BCACHE_JSET_VERSION 1
-
-struct jset {
- __u64 csum;
- __u64 magic;
- __u64 seq;
- __u32 version;
- __u32 keys;
-
- __u64 last_seq;
-
- BKEY_PADDED(uuid_bucket);
- BKEY_PADDED(btree_root);
- __u16 btree_level;
- __u16 pad[3];
-
- __u64 prio_bucket[MAX_CACHES_PER_SET];
-
- union {
- struct bkey start[0];
- __u64 d[0];
- };
-};
-
-/* Bucket prios/gens */
-
-struct prio_set {
- __u64 csum;
- __u64 magic;
- __u64 seq;
- __u32 version;
- __u32 pad;
-
- __u64 next_bucket;
-
- struct bucket_disk {
- __u16 prio;
- __u8 gen;
- } __attribute((packed)) data[];
-};
-
-/* UUIDS - per backing device/flash only volume metadata */
-
-struct uuid_entry {
- union {
- struct {
- __u8 uuid[16];
- __u8 label[32];
- __u32 first_reg; /* time overflow in y2106 */
- __u32 last_reg;
- __u32 invalidated;
-
- __u32 flags;
- /* Size of flash only volumes */
- __u64 sectors;
- };
-
- __u8 pad[128];
- };
-};
-
-BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
-
-/* Btree nodes */
-
-/* Version 1: Seed pointer into btree node checksum
- */
-#define BCACHE_BSET_CSUM 1
-#define BCACHE_BSET_VERSION 1
-
-/*
- * Btree nodes
- *
- * On disk a btree node is a list/log of these; within each set the keys are
- * sorted
- */
-struct bset {
- __u64 csum;
- __u64 magic;
- __u64 seq;
- __u32 version;
- __u32 keys;
-
- union {
- struct bkey start[0];
- __u64 d[0];
- };
-};
-
-/* OBSOLETE */
-
-/* UUIDS - per backing device/flash only volume metadata */
-
-struct uuid_entry_v0 {
- __u8 uuid[16];
- __u8 label[32];
- __u32 first_reg;
- __u32 last_reg;
- __u32 invalidated;
- __u32 pad;
-};
-
-#endif /* _LINUX_BCACHE_H */
diff --git a/include/uapi/linux/binfmts.h b/include/uapi/linux/binfmts.h
index 689025d9c185..c6f9450efc12 100644
--- a/include/uapi/linux/binfmts.h
+++ b/include/uapi/linux/binfmts.h
@@ -18,4 +18,8 @@ struct pt_regs;
/* sizeof(linux_binprm->buf) */
#define BINPRM_BUF_SIZE 256
+/* preserve argv0 for the interpreter */
+#define AT_FLAGS_PRESERVE_ARGV0_BIT 0
+#define AT_FLAGS_PRESERVE_ARGV0 (1 << AT_FLAGS_PRESERVE_ARGV0_BIT)
+
#endif /* _UAPI_LINUX_BINFMTS_H */
diff --git a/include/uapi/linux/bits.h b/include/uapi/linux/bits.h
new file mode 100644
index 000000000000..3c2a101986a3
--- /dev/null
+++ b/include/uapi/linux/bits.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* bits.h: Macros for dealing with bitmasks. */
+
+#ifndef _UAPI_LINUX_BITS_H
+#define _UAPI_LINUX_BITS_H
+
+#define __GENMASK(h, l) \
+ (((~_UL(0)) - (_UL(1) << (l)) + 1) & \
+ (~_UL(0) >> (__BITS_PER_LONG - 1 - (h))))
+
+#define __GENMASK_ULL(h, l) \
+ (((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
+ (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* _UAPI_LINUX_BITS_H */
diff --git a/include/uapi/linux/blkpg.h b/include/uapi/linux/blkpg.h
index ac6474e4f29d..d0a64ee97c6d 100644
--- a/include/uapi/linux/blkpg.h
+++ b/include/uapi/linux/blkpg.h
@@ -2,29 +2,6 @@
#ifndef _UAPI__LINUX_BLKPG_H
#define _UAPI__LINUX_BLKPG_H
-/*
- * Partition table and disk geometry handling
- *
- * A single ioctl with lots of subfunctions:
- *
- * Device number stuff:
- * get_whole_disk() (given the device number of a partition,
- * find the device number of the encompassing disk)
- * get_all_partitions() (given the device number of a disk, return the
- * device numbers of all its known partitions)
- *
- * Partition stuff:
- * add_partition()
- * delete_partition()
- * test_partition_in_use() (also for test_disk_in_use)
- *
- * Geometry stuff:
- * get_geometry()
- * set_geometry()
- * get_bios_drivedata()
- *
- * For today, only the partition stuff - aeb, 990515
- */
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -52,9 +29,8 @@ struct blkpg_partition {
long long start; /* starting offset in bytes */
long long length; /* length in bytes */
int pno; /* partition number */
- char devname[BLKPG_DEVNAMELTH]; /* partition name, like sda5 or c0d1p2,
- to be used in kernel messages */
- char volname[BLKPG_VOLNAMELTH]; /* volume label */
+ char devname[BLKPG_DEVNAMELTH]; /* unused / ignored */
+ char volname[BLKPG_VOLNAMELTH]; /* unused / ignore */
};
#endif /* _UAPI__LINUX_BLKPG_H */
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 42c3366cc25f..f85743ef6e7d 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -51,13 +51,13 @@ enum blk_zone_type {
*
* The Zone Condition state machine in the ZBC/ZAC standards maps the above
* deinitions as:
- * - ZC1: Empty | BLK_ZONE_EMPTY
+ * - ZC1: Empty | BLK_ZONE_COND_EMPTY
* - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
* - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
- * - ZC4: Closed | BLK_ZONE_CLOSED
- * - ZC5: Full | BLK_ZONE_FULL
- * - ZC6: Read Only | BLK_ZONE_READONLY
- * - ZC7: Offline | BLK_ZONE_OFFLINE
+ * - ZC4: Closed | BLK_ZONE_COND_CLOSED
+ * - ZC5: Full | BLK_ZONE_COND_FULL
+ * - ZC6: Read Only | BLK_ZONE_COND_READONLY
+ * - ZC7: Offline | BLK_ZONE_COND_OFFLINE
*
* Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
* be considered invalid.
@@ -93,12 +93,15 @@ enum blk_zone_report_flags {
* @non_seq: Flag indicating that the zone is using non-sequential resources
* (for host-aware zoned block devices only).
* @reset: Flag indicating that a zone reset is recommended.
- * @reserved: Padding to 64 B to match the ZBC/ZAC defined zone descriptor size.
+ * @resv: Padding for 8B alignment.
+ * @capacity: Zone usable capacity in 512 B sector units
+ * @reserved: Padding to 64 B to match the ZBC, ZAC and ZNS defined zone
+ * descriptor size.
*
- * start, len and wp use the regular 512 B sector unit, regardless of the
- * device logical block size. The overall structure size is 64 B to match the
- * ZBC/ZAC defined zone descriptor and allow support for future additional
- * zone information.
+ * start, len, capacity and wp use the regular 512 B sector unit, regardless
+ * of the device logical block size. The overall structure size is 64 B to
+ * match the ZBC, ZAC and ZNS defined zone descriptor and allow support for
+ * future additional zone information.
*/
struct blk_zone {
__u64 start; /* Zone start sector */
@@ -118,7 +121,7 @@ struct blk_zone {
*
* @sector: starting sector of report
* @nr_zones: IN maximum / OUT actual
- * @reserved: padding to 16 byte alignment
+ * @flags: one or more flags as defined by enum blk_zone_report_flags.
* @zones: Space to hold @nr_zones @zones entries on reply.
*
* The array of at most @nr_zones must follow this structure in memory.
@@ -127,7 +130,7 @@ struct blk_zone_report {
__u64 sector;
__u32 nr_zones;
__u32 flags;
- struct blk_zone zones[0];
+ struct blk_zone zones[];
};
/**
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index b6238b2209b7..3c42b9f1bada 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -19,7 +19,9 @@
/* ld/ldx fields */
#define BPF_DW 0x18 /* double word (64-bit) */
-#define BPF_XADD 0xc0 /* exclusive add */
+#define BPF_MEMSX 0x80 /* load with sign extension */
+#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
+#define BPF_XADD 0xc0 /* exclusive add - legacy name */
/* alu/jmp fields */
#define BPF_MOV 0xb0 /* mov reg to reg */
@@ -40,9 +42,19 @@
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
+#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
+/* atomic op type fields (stored in immediate) */
+#define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */
+#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
+#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
+
+enum bpf_cond_pseudo_jmp {
+ BPF_MAY_GOTO = 0,
+};
+
/* Register numbers */
enum {
BPF_REG_0 = 0,
@@ -70,24 +82,843 @@ struct bpf_insn {
__s32 imm; /* signed immediate constant */
};
-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
+ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
+ * the trailing flexible array member) instead.
+ */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
__u8 data[0]; /* Arbitrary size */
};
+/* Header for bpf_lpm_trie_key structs */
+struct bpf_lpm_trie_key_hdr {
+ __u32 prefixlen;
+};
+
+/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
+struct bpf_lpm_trie_key_u8 {
+ union {
+ struct bpf_lpm_trie_key_hdr hdr;
+ __u32 prefixlen;
+ };
+ __u8 data[]; /* Arbitrary size */
+};
+
struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id; /* cgroup inode id */
- __u32 attach_type; /* program attach type */
+ __u32 attach_type; /* program attach type (enum bpf_attach_type) */
+};
+
+enum bpf_cgroup_iter_order {
+ BPF_CGROUP_ITER_ORDER_UNSPEC = 0,
+ BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */
+ BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */
+ BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */
+ BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */
};
union bpf_iter_link_info {
struct {
__u32 map_fd;
} map;
+ struct {
+ enum bpf_cgroup_iter_order order;
+
+ /* At most one of cgroup_fd and cgroup_id can be non-zero. If
+ * both are zero, the walk starts from the default cgroup v2
+ * root. For walking v1 hierarchy, one should always explicitly
+ * specify cgroup_fd.
+ */
+ __u32 cgroup_fd;
+ __u64 cgroup_id;
+ } cgroup;
+ /* Parameters of task iterators. */
+ struct {
+ __u32 tid;
+ __u32 pid;
+ __u32 pid_fd;
+ } task;
};
-/* BPF syscall commands, see bpf(2) man-page for details. */
+/* BPF syscall commands, see bpf(2) man-page for more details. */
+/**
+ * DOC: eBPF Syscall Preamble
+ *
+ * The operation to be performed by the **bpf**\ () system call is determined
+ * by the *cmd* argument. Each operation takes an accompanying argument,
+ * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see
+ * below). The size argument is the size of the union pointed to by *attr*.
+ */
+/**
+ * DOC: eBPF Syscall Commands
+ *
+ * BPF_MAP_CREATE
+ * Description
+ * Create a map and return a file descriptor that refers to the
+ * map. The close-on-exec file descriptor flag (see **fcntl**\ (2))
+ * is automatically enabled for the new file descriptor.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_MAP_CREATE** will delete the map (but see NOTES).
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_MAP_LOOKUP_ELEM
+ * Description
+ * Look up an element with a given *key* in the map referred to
+ * by the file descriptor *map_fd*.
+ *
+ * The *flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_UPDATE_ELEM
+ * Description
+ * Create or update an element (key/value pair) in a specified map.
+ *
+ * The *flags* argument should be specified as one of the
+ * following:
+ *
+ * **BPF_ANY**
+ * Create a new element or update an existing element.
+ * **BPF_NOEXIST**
+ * Create a new element only if it did not exist.
+ * **BPF_EXIST**
+ * Update an existing element.
+ * **BPF_F_LOCK**
+ * Update a spin_lock-ed map element.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**,
+ * **E2BIG**, **EEXIST**, or **ENOENT**.
+ *
+ * **E2BIG**
+ * The number of elements in the map reached the
+ * *max_entries* limit specified at map creation time.
+ * **EEXIST**
+ * If *flags* specifies **BPF_NOEXIST** and the element
+ * with *key* already exists in the map.
+ * **ENOENT**
+ * If *flags* specifies **BPF_EXIST** and the element with
+ * *key* does not exist in the map.
+ *
+ * BPF_MAP_DELETE_ELEM
+ * Description
+ * Look up and delete an element by key in a specified map.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_GET_NEXT_KEY
+ * Description
+ * Look up an element by key in a specified map and return the key
+ * of the next element. Can be used to iterate over all elements
+ * in the map.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * The following cases can be used to iterate over all elements of
+ * the map:
+ *
+ * * If *key* is not found, the operation returns zero and sets
+ * the *next_key* pointer to the key of the first element.
+ * * If *key* is found, the operation returns zero and sets the
+ * *next_key* pointer to the key of the next element.
+ * * If *key* is the last element, returns -1 and *errno* is set
+ * to **ENOENT**.
+ *
+ * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or
+ * **EINVAL** on error.
+ *
+ * BPF_PROG_LOAD
+ * Description
+ * Verify and load an eBPF program, returning a new file
+ * descriptor associated with the program.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES).
+ *
+ * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is
+ * automatically enabled for the new file descriptor.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_OBJ_PIN
+ * Description
+ * Pin an eBPF program or map referred by the specified *bpf_fd*
+ * to the provided *pathname* on the filesystem.
+ *
+ * The *pathname* argument must not contain a dot (".").
+ *
+ * On success, *pathname* retains a reference to the eBPF object,
+ * preventing deallocation of the object when the original
+ * *bpf_fd* is closed. This allow the eBPF object to live beyond
+ * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent
+ * process.
+ *
+ * Applying **unlink**\ (2) or similar calls to the *pathname*
+ * unpins the object from the filesystem, removing the reference.
+ * If no other file descriptors or filesystem nodes refer to the
+ * same object, it will be deallocated (see NOTES).
+ *
+ * The filesystem type for the parent directory of *pathname* must
+ * be **BPF_FS_MAGIC**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_OBJ_GET
+ * Description
+ * Open a file descriptor for the eBPF object pinned to the
+ * specified *pathname*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_PROG_ATTACH
+ * Description
+ * Attach an eBPF program to a *target_fd* at the specified
+ * *attach_type* hook.
+ *
+ * The *attach_type* specifies the eBPF attachment point to
+ * attach the program to, and must be one of *bpf_attach_type*
+ * (see below).
+ *
+ * The *attach_bpf_fd* must be a valid file descriptor for a
+ * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap
+ * or sock_ops type corresponding to the specified *attach_type*.
+ *
+ * The *target_fd* must be a valid file descriptor for a kernel
+ * object which depends on the attach type of *attach_bpf_fd*:
+ *
+ * **BPF_PROG_TYPE_CGROUP_DEVICE**,
+ * **BPF_PROG_TYPE_CGROUP_SKB**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
+ * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
+ * **BPF_PROG_TYPE_SOCK_OPS**
+ *
+ * Control Group v2 hierarchy with the eBPF controller
+ * enabled. Requires the kernel to be compiled with
+ * **CONFIG_CGROUP_BPF**.
+ *
+ * **BPF_PROG_TYPE_FLOW_DISSECTOR**
+ *
+ * Network namespace (eg /proc/self/ns/net).
+ *
+ * **BPF_PROG_TYPE_LIRC_MODE2**
+ *
+ * LIRC device path (eg /dev/lircN). Requires the kernel
+ * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
+ *
+ * **BPF_PROG_TYPE_SK_SKB**,
+ * **BPF_PROG_TYPE_SK_MSG**
+ *
+ * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**).
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_DETACH
+ * Description
+ * Detach the eBPF program associated with the *target_fd* at the
+ * hook specified by *attach_type*. The program must have been
+ * previously attached using **BPF_PROG_ATTACH**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_TEST_RUN
+ * Description
+ * Run the eBPF program associated with the *prog_fd* a *repeat*
+ * number of times against a provided program context *ctx_in* and
+ * data *data_in*, and return the modified program context
+ * *ctx_out*, *data_out* (for example, packet data), result of the
+ * execution *retval*, and *duration* of the test run.
+ *
+ * The sizes of the buffers provided as input and output
+ * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must
+ * be provided in the corresponding variables *ctx_size_in*,
+ * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any
+ * of these parameters are not provided (ie set to NULL), the
+ * corresponding size field must be zero.
+ *
+ * Some program types have particular requirements:
+ *
+ * **BPF_PROG_TYPE_SK_LOOKUP**
+ * *data_in* and *data_out* must be NULL.
+ *
+ * **BPF_PROG_TYPE_RAW_TRACEPOINT**,
+ * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE**
+ *
+ * *ctx_out*, *data_in* and *data_out* must be NULL.
+ * *repeat* must be zero.
+ *
+ * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * **ENOSPC**
+ * Either *data_size_out* or *ctx_size_out* is too small.
+ * **ENOTSUPP**
+ * This command is not supported by the program type of
+ * the program referred to by *prog_fd*.
+ *
+ * BPF_PROG_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF program currently loaded into the kernel.
+ *
+ * Looks for the eBPF program with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF programs
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_MAP_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF map currently loaded into the kernel.
+ *
+ * Looks for the eBPF map with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF maps
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_PROG_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF program corresponding to
+ * *prog_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_MAP_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF map corresponding to
+ * *map_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_OBJ_GET_INFO_BY_FD
+ * Description
+ * Obtain information about the eBPF object corresponding to
+ * *bpf_fd*.
+ *
+ * Populates up to *info_len* bytes of *info*, which will be in
+ * one of the following formats depending on the eBPF object type
+ * of *bpf_fd*:
+ *
+ * * **struct bpf_prog_info**
+ * * **struct bpf_map_info**
+ * * **struct bpf_btf_info**
+ * * **struct bpf_link_info**
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_QUERY
+ * Description
+ * Obtain information about eBPF programs associated with the
+ * specified *attach_type* hook.
+ *
+ * The *target_fd* must be a valid file descriptor for a kernel
+ * object which depends on the attach type of *attach_bpf_fd*:
+ *
+ * **BPF_PROG_TYPE_CGROUP_DEVICE**,
+ * **BPF_PROG_TYPE_CGROUP_SKB**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
+ * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
+ * **BPF_PROG_TYPE_SOCK_OPS**
+ *
+ * Control Group v2 hierarchy with the eBPF controller
+ * enabled. Requires the kernel to be compiled with
+ * **CONFIG_CGROUP_BPF**.
+ *
+ * **BPF_PROG_TYPE_FLOW_DISSECTOR**
+ *
+ * Network namespace (eg /proc/self/ns/net).
+ *
+ * **BPF_PROG_TYPE_LIRC_MODE2**
+ *
+ * LIRC device path (eg /dev/lircN). Requires the kernel
+ * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
+ *
+ * **BPF_PROG_QUERY** always fetches the number of programs
+ * attached and the *attach_flags* which were used to attach those
+ * programs. Additionally, if *prog_ids* is nonzero and the number
+ * of attached programs is less than *prog_cnt*, populates
+ * *prog_ids* with the eBPF program ids of the programs attached
+ * at *target_fd*.
+ *
+ * The following flags may alter the result:
+ *
+ * **BPF_F_QUERY_EFFECTIVE**
+ * Only return information regarding programs which are
+ * currently effective at the specified *target_fd*.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_RAW_TRACEPOINT_OPEN
+ * Description
+ * Attach an eBPF program to a tracepoint *name* to access kernel
+ * internal arguments of the tracepoint in their raw form.
+ *
+ * The *prog_fd* must be a valid file descriptor associated with
+ * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**.
+ *
+ * No ABI guarantees are made about the content of tracepoint
+ * arguments exposed to the corresponding eBPF program.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES).
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_BTF_LOAD
+ * Description
+ * Verify and load BPF Type Format (BTF) metadata into the kernel,
+ * returning a new file descriptor associated with the metadata.
+ * BTF is described in more detail at
+ * https://www.kernel.org/doc/html/latest/bpf/btf.html.
+ *
+ * The *btf* parameter must point to valid memory providing
+ * *btf_size* bytes of BTF binary metadata.
+ *
+ * The returned file descriptor can be passed to other **bpf**\ ()
+ * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to
+ * associate the BTF with those objects.
+ *
+ * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional
+ * parameters to specify a *btf_log_buf*, *btf_log_size* and
+ * *btf_log_level* which allow the kernel to return freeform log
+ * output regarding the BTF verification process.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_BTF_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the BPF Type Format (BTF)
+ * corresponding to *btf_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_TASK_FD_QUERY
+ * Description
+ * Obtain information about eBPF programs associated with the
+ * target process identified by *pid* and *fd*.
+ *
+ * If the *pid* and *fd* are associated with a tracepoint, kprobe
+ * or uprobe perf event, then the *prog_id* and *fd_type* will
+ * be populated with the eBPF program id and file descriptor type
+ * of type **bpf_task_fd_type**. If associated with a kprobe or
+ * uprobe, the *probe_offset* and *probe_addr* will also be
+ * populated. Optionally, if *buf* is provided, then up to
+ * *buf_len* bytes of *buf* will be populated with the name of
+ * the tracepoint, kprobe or uprobe.
+ *
+ * The resulting *prog_id* may be introspected in deeper detail
+ * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_LOOKUP_AND_DELETE_ELEM
+ * Description
+ * Look up an element with the given *key* in the map referred to
+ * by the file descriptor *fd*, and if found, delete the element.
+ *
+ * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map
+ * types, the *flags* argument needs to be set to 0, but for other
+ * map types, it may be specified as:
+ *
+ * **BPF_F_LOCK**
+ * Look up and delete the value of a spin-locked map
+ * without returning the lock. This must be specified if
+ * the elements contain a spinlock.
+ *
+ * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
+ * implement this command as a "pop" operation, deleting the top
+ * element rather than one corresponding to *key*.
+ * The *key* and *key_len* parameters should be zeroed when
+ * issuing this operation for these map types.
+ *
+ * This command is only valid for the following map types:
+ * * **BPF_MAP_TYPE_QUEUE**
+ * * **BPF_MAP_TYPE_STACK**
+ * * **BPF_MAP_TYPE_HASH**
+ * * **BPF_MAP_TYPE_PERCPU_HASH**
+ * * **BPF_MAP_TYPE_LRU_HASH**
+ * * **BPF_MAP_TYPE_LRU_PERCPU_HASH**
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_FREEZE
+ * Description
+ * Freeze the permissions of the specified map.
+ *
+ * Write permissions may be frozen by passing zero *flags*.
+ * Upon success, no future syscall invocations may alter the
+ * map state of *map_fd*. Write operations from eBPF programs
+ * are still possible for a frozen map.
+ *
+ * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_BTF_GET_NEXT_ID
+ * Description
+ * Fetch the next BPF Type Format (BTF) object currently loaded
+ * into the kernel.
+ *
+ * Looks for the BTF object with an id greater than *start_id*
+ * and updates *next_id* on success. If no other BTF objects
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_MAP_LOOKUP_BATCH
+ * Description
+ * Iterate and fetch multiple elements in a map.
+ *
+ * Two opaque values are used to manage batch operations,
+ * *in_batch* and *out_batch*. Initially, *in_batch* must be set
+ * to NULL to begin the batched operation. After each subsequent
+ * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
+ * *out_batch* as the *in_batch* for the next operation to
+ * continue iteration from the current point. Both *in_batch* and
+ * *out_batch* must point to memory large enough to hold a key,
+ * except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
+ * LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
+ * must be at least 4 bytes wide regardless of key size.
+ *
+ * The *keys* and *values* are output parameters which must point
+ * to memory large enough to hold *count* items based on the key
+ * and value size of the map *map_fd*. The *keys* buffer must be
+ * of *key_size* * *count*. The *values* buffer must be of
+ * *value_size* * *count*.
+ *
+ * The *elem_flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * On success, *count* elements from the map are copied into the
+ * user buffer, with the keys copied into *keys* and the values
+ * copied into the corresponding indices in *values*.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **ENOSPC** to indicate that *keys* or
+ * *values* is too small to dump an entire bucket during
+ * iteration of a hash-based map type.
+ *
+ * BPF_MAP_LOOKUP_AND_DELETE_BATCH
+ * Description
+ * Iterate and delete all elements in a map.
+ *
+ * This operation has the same behavior as
+ * **BPF_MAP_LOOKUP_BATCH** with two exceptions:
+ *
+ * * Every element that is successfully returned is also deleted
+ * from the map. This is at least *count* elements. Note that
+ * *count* is both an input and an output parameter.
+ * * Upon returning with *errno* set to **EFAULT**, up to
+ * *count* elements may be deleted without returning the keys
+ * and values of the deleted elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_UPDATE_BATCH
+ * Description
+ * Update multiple elements in a map by *key*.
+ *
+ * The *keys* and *values* are input parameters which must point
+ * to memory large enough to hold *count* items based on the key
+ * and value size of the map *map_fd*. The *keys* buffer must be
+ * of *key_size* * *count*. The *values* buffer must be of
+ * *value_size* * *count*.
+ *
+ * Each element specified in *keys* is sequentially updated to the
+ * value in the corresponding index in *values*. The *in_batch*
+ * and *out_batch* parameters are ignored and should be zeroed.
+ *
+ * The *elem_flags* argument should be specified as one of the
+ * following:
+ *
+ * **BPF_ANY**
+ * Create new elements or update a existing elements.
+ * **BPF_NOEXIST**
+ * Create new elements only if they do not exist.
+ * **BPF_EXIST**
+ * Update existing elements.
+ * **BPF_F_LOCK**
+ * Update spin_lock-ed map elements. This must be
+ * specified if the map value contains a spinlock.
+ *
+ * On success, *count* elements from the map are updated.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or
+ * **E2BIG**. **E2BIG** indicates that the number of elements in
+ * the map reached the *max_entries* limit specified at map
+ * creation time.
+ *
+ * May set *errno* to one of the following error codes under
+ * specific circumstances:
+ *
+ * **EEXIST**
+ * If *flags* specifies **BPF_NOEXIST** and the element
+ * with *key* already exists in the map.
+ * **ENOENT**
+ * If *flags* specifies **BPF_EXIST** and the element with
+ * *key* does not exist in the map.
+ *
+ * BPF_MAP_DELETE_BATCH
+ * Description
+ * Delete multiple elements in a map by *key*.
+ *
+ * The *keys* parameter is an input parameter which must point
+ * to memory large enough to hold *count* items based on the key
+ * size of the map *map_fd*, that is, *key_size* * *count*.
+ *
+ * Each element specified in *keys* is sequentially deleted. The
+ * *in_batch*, *out_batch*, and *values* parameters are ignored
+ * and should be zeroed.
+ *
+ * The *elem_flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * On success, *count* elements from the map are updated.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements. If
+ * *errno* is **EFAULT**, up to *count* elements may be been
+ * deleted.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_LINK_CREATE
+ * Description
+ * Attach an eBPF program to a *target_fd* at the specified
+ * *attach_type* hook and return a file descriptor handle for
+ * managing the link.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_UPDATE
+ * Description
+ * Update the eBPF program in the specified *link_fd* to
+ * *new_prog_fd*.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_LINK_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF Link corresponding to
+ * *link_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF link currently loaded into the kernel.
+ *
+ * Looks for the eBPF link with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF links
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_ENABLE_STATS
+ * Description
+ * Enable eBPF runtime statistics gathering.
+ *
+ * Runtime statistics gathering for the eBPF runtime is disabled
+ * by default to minimize the corresponding performance overhead.
+ * This command enables statistics globally.
+ *
+ * Multiple programs may independently enable statistics.
+ * After gathering the desired statistics, eBPF runtime statistics
+ * may be disabled again by calling **close**\ (2) for the file
+ * descriptor returned by this function. Statistics will only be
+ * disabled system-wide when all outstanding file descriptors
+ * returned by prior calls for this subcommand are closed.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_ITER_CREATE
+ * Description
+ * Create an iterator on top of the specified *link_fd* (as
+ * previously created using **BPF_LINK_CREATE**) and return a
+ * file descriptor that can be used to trigger the iteration.
+ *
+ * If the resulting file descriptor is pinned to the filesystem
+ * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls
+ * for that path will trigger the iterator to read kernel state
+ * using the eBPF program attached to *link_fd*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_DETACH
+ * Description
+ * Forcefully detach the specified *link_fd* from its
+ * corresponding attachment point.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_BIND_MAP
+ * Description
+ * Bind a map to the lifetime of an eBPF program.
+ *
+ * The map identified by *map_fd* is bound to the program
+ * identified by *prog_fd* and only released when *prog_fd* is
+ * released. This may be used in cases where metadata should be
+ * associated with a program which otherwise does not contain any
+ * references to the map (for example, embedded in the eBPF
+ * program instructions).
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_TOKEN_CREATE
+ * Description
+ * Create BPF token with embedded information about what
+ * BPF-related functionality it allows:
+ * - a set of allowed bpf() syscall commands;
+ * - a set of allowed BPF map types to be created with
+ * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
+ * - a set of allowed BPF program types and BPF program attach
+ * types to be loaded with BPF_PROG_LOAD command, if
+ * BPF_PROG_LOAD itself is allowed.
+ *
+ * BPF token is created (derived) from an instance of BPF FS,
+ * assuming it has necessary delegation mount options specified.
+ * This BPF token can be passed as an extra parameter to various
+ * bpf() syscall commands to grant BPF subsystem functionality to
+ * unprivileged processes.
+ *
+ * When created, BPF token is "associated" with the owning
+ * user namespace of BPF FS instance (super block) that it was
+ * derived from, and subsequent BPF operations performed with
+ * BPF token would be performing capabilities checks (i.e.,
+ * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
+ * that user namespace. Without BPF token, such capabilities
+ * have to be granted in init user namespace, making bpf()
+ * syscall incompatible with user namespace, for the most part.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * NOTES
+ * eBPF objects (maps and programs) can be shared between processes.
+ *
+ * * After **fork**\ (2), the child inherits file descriptors
+ * referring to the same eBPF objects.
+ * * File descriptors referring to eBPF objects can be transferred over
+ * **unix**\ (7) domain sockets.
+ * * File descriptors referring to eBPF objects can be duplicated in the
+ * usual way, using **dup**\ (2) and similar calls.
+ * * File descriptors referring to eBPF objects can be pinned to the
+ * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2).
+ *
+ * An eBPF object is deallocated only after all file descriptors referring
+ * to the object have been closed and no references remain pinned to the
+ * filesystem or attached (for example, bound to a program or device).
+ */
enum bpf_cmd {
BPF_MAP_CREATE,
BPF_MAP_LOOKUP_ELEM,
@@ -100,6 +931,7 @@ enum bpf_cmd {
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
+ BPF_PROG_RUN = BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
@@ -124,6 +956,9 @@ enum bpf_cmd {
BPF_ENABLE_STATS,
BPF_ITER_CREATE,
BPF_LINK_DETACH,
+ BPF_PROG_BIND_MAP,
+ BPF_TOKEN_CREATE,
+ __MAX_BPF_CMD,
};
enum bpf_map_type {
@@ -146,15 +981,36 @@ enum bpf_map_type {
BPF_MAP_TYPE_CPUMAP,
BPF_MAP_TYPE_XSKMAP,
BPF_MAP_TYPE_SOCKHASH,
- BPF_MAP_TYPE_CGROUP_STORAGE,
+ BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
+ /* BPF_MAP_TYPE_CGROUP_STORAGE is available to bpf programs attaching
+ * to a cgroup. The newer BPF_MAP_TYPE_CGRP_STORAGE is available to
+ * both cgroup-attached and other progs and supports all functionality
+ * provided by BPF_MAP_TYPE_CGROUP_STORAGE. So mark
+ * BPF_MAP_TYPE_CGROUP_STORAGE deprecated.
+ */
+ BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
- BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
+ /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
+ * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
+ * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * deprecated.
+ */
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
BPF_MAP_TYPE_DEVMAP_HASH,
BPF_MAP_TYPE_STRUCT_OPS,
BPF_MAP_TYPE_RINGBUF,
+ BPF_MAP_TYPE_INODE_STORAGE,
+ BPF_MAP_TYPE_TASK_STORAGE,
+ BPF_MAP_TYPE_BLOOM_FILTER,
+ BPF_MAP_TYPE_USER_RINGBUF,
+ BPF_MAP_TYPE_CGRP_STORAGE,
+ BPF_MAP_TYPE_ARENA,
+ __MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
@@ -197,6 +1053,9 @@ enum bpf_prog_type {
BPF_PROG_TYPE_EXT,
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
+ BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
+ BPF_PROG_TYPE_NETFILTER,
+ __MAX_BPF_PROG_TYPE
};
enum bpf_attach_type {
@@ -238,6 +1097,24 @@ enum bpf_attach_type {
BPF_XDP_CPUMAP,
BPF_SK_LOOKUP,
BPF_XDP,
+ BPF_SK_SKB_VERDICT,
+ BPF_SK_REUSEPORT_SELECT,
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
+ BPF_PERF_EVENT,
+ BPF_TRACE_KPROBE_MULTI,
+ BPF_LSM_CGROUP,
+ BPF_STRUCT_OPS,
+ BPF_NETFILTER,
+ BPF_TCX_INGRESS,
+ BPF_TCX_EGRESS,
+ BPF_TRACE_UPROBE_MULTI,
+ BPF_CGROUP_UNIX_CONNECT,
+ BPF_CGROUP_UNIX_SENDMSG,
+ BPF_CGROUP_UNIX_RECVMSG,
+ BPF_CGROUP_UNIX_GETPEERNAME,
+ BPF_CGROUP_UNIX_GETSOCKNAME,
+ BPF_NETKIT_PRIMARY,
+ BPF_NETKIT_PEER,
__MAX_BPF_ATTACH_TYPE
};
@@ -251,8 +1128,26 @@ enum bpf_link_type {
BPF_LINK_TYPE_ITER = 4,
BPF_LINK_TYPE_NETNS = 5,
BPF_LINK_TYPE_XDP = 6,
+ BPF_LINK_TYPE_PERF_EVENT = 7,
+ BPF_LINK_TYPE_KPROBE_MULTI = 8,
+ BPF_LINK_TYPE_STRUCT_OPS = 9,
+ BPF_LINK_TYPE_NETFILTER = 10,
+ BPF_LINK_TYPE_TCX = 11,
+ BPF_LINK_TYPE_UPROBE_MULTI = 12,
+ BPF_LINK_TYPE_NETKIT = 13,
+ __MAX_BPF_LINK_TYPE,
+};
- MAX_BPF_LINK_TYPE,
+#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
+
+enum bpf_perf_event_type {
+ BPF_PERF_EVENT_UNSPEC = 0,
+ BPF_PERF_EVENT_UPROBE = 1,
+ BPF_PERF_EVENT_URETPROBE = 2,
+ BPF_PERF_EVENT_KPROBE = 3,
+ BPF_PERF_EVENT_KRETPROBE = 4,
+ BPF_PERF_EVENT_TRACEPOINT = 5,
+ BPF_PERF_EVENT_EVENT = 6,
};
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
@@ -301,7 +1196,12 @@ enum bpf_link_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
+/* Generic attachment flags. */
#define BPF_F_REPLACE (1U << 2)
+#define BPF_F_BEFORE (1U << 3)
+#define BPF_F_AFTER (1U << 4)
+#define BPF_F_ID (1U << 5)
+#define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
@@ -310,7 +1210,7 @@ enum bpf_link_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
-/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
+/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will allow any alignment whatsoever. On platforms
* with strict alignment requirements for loads ands stores (such
* as sparc and mips) the verifier validates that all loads and
@@ -345,24 +1245,103 @@ enum bpf_link_type {
/* The verifier internal test flag. Behavior is undefined */
#define BPF_F_TEST_STATE_FREQ (1U << 3)
+/* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
+ * restrict map and helper usage for such programs. Sleepable BPF programs can
+ * only be attached to hooks where kernel execution context allows sleeping.
+ * Such programs are allowed to use helpers that may sleep like
+ * bpf_copy_from_user().
+ */
+#define BPF_F_SLEEPABLE (1U << 4)
+
+/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program
+ * fully support xdp frags.
+ */
+#define BPF_F_XDP_HAS_FRAGS (1U << 5)
+
+/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded
+ * program becomes device-bound but can access XDP metadata.
+ */
+#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
+
+/* The verifier internal test flag. Behavior is undefined */
+#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
+
+/* link_create.kprobe_multi.flags used in LINK_CREATE command for
+ * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
+ */
+enum {
+ BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
+};
+
+/* link_create.uprobe_multi.flags used in LINK_CREATE command for
+ * BPF_TRACE_UPROBE_MULTI attach type to create return probe.
+ */
+enum {
+ BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
+};
+
+/* link_create.netfilter.flags used in LINK_CREATE command for
+ * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
+ */
+#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0)
+
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
- * two extensions:
- *
- * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE
- * insn[0].imm: map fd map fd
- * insn[1].imm: 0 offset into value
- * insn[0].off: 0 0
- * insn[1].off: 0 0
- * ldimm64 rewrite: address of map address of map[0]+offset
- * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE
+ * the following extensions:
+ *
+ * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
+ * insn[0].imm: map fd or fd_idx
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map
+ * verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
-#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_IDX 5
+
+/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
+ * insn[0].imm: map fd or fd_idx
+ * insn[1].imm: offset into value
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map[0]+offset
+ * verifier type: PTR_TO_MAP_VALUE
+ */
+#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_IDX_VALUE 6
+
+/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
+ * insn[0].imm: kernel btd id of VAR
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of the kernel variable
+ * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
+ * is struct/union.
+ */
+#define BPF_PSEUDO_BTF_ID 3
+/* insn[0].src_reg: BPF_PSEUDO_FUNC
+ * insn[0].imm: insn offset to the func
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of the function
+ * verifier type: PTR_TO_FUNC.
+ */
+#define BPF_PSEUDO_FUNC 4
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
*/
#define BPF_PSEUDO_CALL 1
+/* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL,
+ * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel
+ */
+#define BPF_PSEUDO_KFUNC_CALL 2
+
+enum bpf_addr_space_cast {
+ BPF_ADDR_SPACE_CAST = 1,
+};
/* flags for BPF_MAP_UPDATE_ELEM command */
enum {
@@ -404,16 +1383,47 @@ enum {
/* Enable memory-mapping BPF map */
BPF_F_MMAPABLE = (1U << 10),
+
+/* Share perf_event among processes */
+ BPF_F_PRESERVE_ELEMS = (1U << 11),
+
+/* Create a map that is suitable to be an inner map with dynamic max entries */
+ BPF_F_INNER_MAP = (1U << 12),
+
+/* Create a map that will be registered/unregesitered by the backed bpf_link */
+ BPF_F_LINK = (1U << 13),
+
+/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
+ BPF_F_PATH_FD = (1U << 14),
+
+/* Flag for value_type_btf_obj_fd, the fd is available */
+ BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
+
+/* BPF token FD is passed in a corresponding command's token_fd field */
+ BPF_F_TOKEN_FD = (1U << 16),
+
+/* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */
+ BPF_F_SEGV_ON_FAULT = (1U << 17),
+
+/* Do not translate kernel bpf_arena pointers to user pointers */
+ BPF_F_NO_USER_CONV = (1U << 18),
};
/* Flags for BPF_PROG_QUERY. */
/* Query effective (directly attached + inherited from ancestor cgroups)
* programs that will be executed for events within a cgroup.
- * attach_flags with this flag are returned only for directly attached programs.
+ * attach_flags with this flag are always returned 0.
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+/* Flags for BPF_PROG_TEST_RUN */
+
+/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
+#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
+/* If set, XDP frames will be transmitted after processing */
+#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
+
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
/* enabled run_time_ns and run_cnt */
@@ -463,6 +1473,25 @@ union bpf_attr {
* struct stored as the
* map value
*/
+ /* Any per-map-type extra fields
+ *
+ * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
+ * number of hash functions (if 0, the bloom filter will default
+ * to using 5 hash functions).
+ *
+ * BPF_MAP_TYPE_ARENA - contains the address where user space
+ * is going to mmap() the arena. It has to be page aligned.
+ */
+ __u64 map_extra;
+
+ __s32 value_type_btf_obj_fd; /* fd pointing to a BTF
+ * type data for
+ * btf_vmlinux_value_type_id.
+ */
+ /* BPF token FD to use with BPF_MAP_CREATE operation.
+ * If provided, map_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -517,24 +1546,54 @@ union bpf_attr {
__aligned_u64 line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */
__u32 attach_btf_id; /* in-kernel BTF type id to attach to */
- __u32 attach_prog_fd; /* 0 to attach to vmlinux */
+ union {
+ /* valid prog_fd to attach to bpf prog */
+ __u32 attach_prog_fd;
+ /* or valid module BTF object fd or 0 to attach to vmlinux */
+ __u32 attach_btf_obj_fd;
+ };
+ __u32 core_relo_cnt; /* number of bpf_core_relo */
+ __aligned_u64 fd_array; /* array of FDs */
+ __aligned_u64 core_relos;
+ __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
+ /* output: actual total log contents size (including termintaing zero).
+ * It could be both larger than original log_size (if log was
+ * truncated), or smaller (if log buffer wasn't filled completely).
+ */
+ __u32 log_true_size;
+ /* BPF token FD to use with BPF_PROG_LOAD operation.
+ * If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 prog_token_fd;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
__aligned_u64 pathname;
__u32 bpf_fd;
__u32 file_flags;
+ /* Same as dirfd in openat() syscall; see openat(2)
+ * manpage for details of path FD and pathname semantics;
+ * path_fd should accompanied by BPF_F_PATH_FD flag set in
+ * file_flags field, otherwise it should be set to zero;
+ * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed.
+ */
+ __s32 path_fd;
};
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
- __u32 target_fd; /* container object to attach to */
- __u32 attach_bpf_fd; /* eBPF program to attach */
+ union {
+ __u32 target_fd; /* target object to attach to or ... */
+ __u32 target_ifindex; /* target ifindex */
+ };
+ __u32 attach_bpf_fd;
__u32 attach_type;
__u32 attach_flags;
- __u32 replace_bpf_fd; /* previously attached eBPF
- * program to replace if
- * BPF_F_REPLACE is used
- */
+ __u32 replace_bpf_fd;
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
};
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
@@ -556,6 +1615,9 @@ union bpf_attr {
*/
__aligned_u64 ctx_in;
__aligned_u64 ctx_out;
+ __u32 flags;
+ __u32 cpu;
+ __u32 batch_size;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -577,12 +1639,26 @@ union bpf_attr {
} info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */
- __u32 target_fd; /* container object to query */
+ union {
+ __u32 target_fd; /* target object to query or ... */
+ __u32 target_ifindex; /* target ifindex */
+ };
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__aligned_u64 prog_ids;
- __u32 prog_cnt;
+ union {
+ __u32 prog_cnt;
+ __u32 count;
+ };
+ __u32 :32;
+ /* output: per-program attach_flags.
+ * not allowed to be set during effective query.
+ */
+ __aligned_u64 prog_attach_flags;
+ __aligned_u64 link_ids;
+ __aligned_u64 link_attach_flags;
+ __u64 revision;
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
@@ -596,6 +1672,16 @@ union bpf_attr {
__u32 btf_size;
__u32 btf_log_size;
__u32 btf_log_level;
+ /* output: actual total log contents size (including termintaing zero).
+ * It could be both larger than original log_size (if log was
+ * truncated), or smaller (if log buffer wasn't filled completely).
+ */
+ __u32 btf_log_true_size;
+ __u32 btf_flags;
+ /* BPF token FD to use with BPF_BTF_LOAD operation.
+ * If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 btf_token_fd;
};
struct {
@@ -615,25 +1701,96 @@ union bpf_attr {
} task_fd_query;
struct { /* struct used by BPF_LINK_CREATE command */
- __u32 prog_fd; /* eBPF program to attach */
union {
- __u32 target_fd; /* object to attach to */
- __u32 target_ifindex; /* target ifindex */
+ __u32 prog_fd; /* eBPF program to attach */
+ __u32 map_fd; /* struct_ops to attach */
+ };
+ union {
+ __u32 target_fd; /* target object to attach to or ... */
+ __u32 target_ifindex; /* target ifindex */
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
- __aligned_u64 iter_info; /* extra bpf_iter_link_info */
- __u32 iter_info_len; /* iter_info length */
+ union {
+ __u32 target_btf_id; /* btf_id of target to attach to */
+ struct {
+ __aligned_u64 iter_info; /* extra bpf_iter_link_info */
+ __u32 iter_info_len; /* iter_info length */
+ };
+ struct {
+ /* black box user-provided value passed through
+ * to BPF program at the execution time and
+ * accessible through bpf_get_attach_cookie() BPF helper
+ */
+ __u64 bpf_cookie;
+ } perf_event;
+ struct {
+ __u32 flags;
+ __u32 cnt;
+ __aligned_u64 syms;
+ __aligned_u64 addrs;
+ __aligned_u64 cookies;
+ } kprobe_multi;
+ struct {
+ /* this is overlaid with the target_btf_id above. */
+ __u32 target_btf_id;
+ /* black box user-provided value passed through
+ * to BPF program at the execution time and
+ * accessible through bpf_get_attach_cookie() BPF helper
+ */
+ __u64 cookie;
+ } tracing;
+ struct {
+ __u32 pf;
+ __u32 hooknum;
+ __s32 priority;
+ __u32 flags;
+ } netfilter;
+ struct {
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
+ } tcx;
+ struct {
+ __aligned_u64 path;
+ __aligned_u64 offsets;
+ __aligned_u64 ref_ctr_offsets;
+ __aligned_u64 cookies;
+ __u32 cnt;
+ __u32 flags;
+ __u32 pid;
+ } uprobe_multi;
+ struct {
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
+ } netkit;
+ };
} link_create;
struct { /* struct used by BPF_LINK_UPDATE command */
__u32 link_fd; /* link fd */
- /* new program fd to update link with */
- __u32 new_prog_fd;
+ union {
+ /* new program fd to update link with */
+ __u32 new_prog_fd;
+ /* new struct_ops map fd to update link with */
+ __u32 new_map_fd;
+ };
__u32 flags; /* extra flags */
- /* expected link's program fd; is specified only if
- * BPF_F_REPLACE flag is set in flags */
- __u32 old_prog_fd;
+ union {
+ /* expected link's program fd; is specified only if
+ * BPF_F_REPLACE flag is set in flags.
+ */
+ __u32 old_prog_fd;
+ /* expected link's map fd; is specified only
+ * if BPF_F_REPLACE flag is set.
+ */
+ __u32 old_map_fd;
+ };
} link_update;
struct {
@@ -649,6 +1806,17 @@ union bpf_attr {
__u32 flags;
} iter_create;
+ struct { /* struct used by BPF_PROG_BIND_MAP command */
+ __u32 prog_fd;
+ __u32 map_fd;
+ __u32 flags; /* extra flags */
+ } prog_bind_map;
+
+ struct { /* struct used by BPF_TOKEN_CREATE command */
+ __u32 flags;
+ __u32 bpffs_fd;
+ } token_create;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -656,7 +1824,7 @@ union bpf_attr {
* parsed and used to produce a manual page. The workflow is the following,
* and requires the rst2man utility:
*
- * $ ./scripts/bpf_helpers_doc.py \
+ * $ ./scripts/bpf_doc.py \
* --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
* $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
* $ man /tmp/bpf-helpers.7
@@ -721,17 +1889,17 @@ union bpf_attr {
* Description
* This helper is a "printk()-like" facility for debugging. It
* prints a message defined by format *fmt* (of size *fmt_size*)
- * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
+ * to file *\/sys/kernel/tracing/trace* from TraceFS, if
* available. It can take up to three additional **u64**
* arguments (as an eBPF helpers, the total number of arguments is
* limited to five).
*
* Each time the helper is called, it appends a line to the trace.
- * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
- * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
+ * Lines are discarded while *\/sys/kernel/tracing/trace* is
+ * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
* The format of the trace is customizable, and the exact output
* one will get depends on the options set in
- * *\/sys/kernel/debug/tracing/trace_options* (see also the
+ * *\/sys/kernel/tracing/trace_options* (see also the
* *README* file under the same directory). However, it usually
* defaults to something like:
*
@@ -791,7 +1959,7 @@ union bpf_attr {
* u32 bpf_get_smp_processor_id(void)
* Description
* Get the SMP (symmetric multiprocessing) processor id. Note that
- * all programs run with preemption disabled, which means that the
+ * all programs run with migration disabled, which means that the
* SMP processor id is stable during all the execution of the
* program.
* Return
@@ -898,7 +2066,7 @@ union bpf_attr {
* if the maximum number of tail calls has been reached for this
* chain of programs. This limit is defined in the kernel by the
* macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
- * which is currently set to 32.
+ * which is currently set to 33.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -924,9 +2092,13 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
- * 0 on success, or a negative error in case of failure.
+ * 0 on success, or a negative error in case of failure. Positive
+ * error indicates a potential drop or congestion in the target
+ * device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
+ * Description
+ * Get the current pid and tgid.
* Return
* A 64-bit integer containing the current tgid and pid, and
* created as such:
@@ -934,6 +2106,8 @@ union bpf_attr {
* *current_task*\ **->pid**.
*
* u64 bpf_get_current_uid_gid(void)
+ * Description
+ * Get the current uid and gid.
* Return
* A 64-bit integer containing the current GID and UID, and
* created as such: *current_gid* **<< 32 \|** *current_uid*.
@@ -1076,6 +2250,9 @@ union bpf_attr {
* sending the packet. This flag was added for GRE
* encapsulation, but might be used with other protocols
* as well in the future.
+ * **BPF_F_NO_TUNNEL_KEY**
+ * Add a flag to tunnel metadata indicating that no tunnel
+ * key should be set in the resulting tunnel header.
*
* Here is a typical usage on the transmit path:
*
@@ -1408,6 +2585,8 @@ union bpf_attr {
* The 32-bit hash.
*
* u64 bpf_get_current_task(void)
+ * Description
+ * Get the current task.
* Return
* A pointer to the current task struct.
*
@@ -1438,8 +2617,8 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if the *skb* task belongs to the cgroup2.
- * * 1, if the *skb* task does not belong to the cgroup2.
+ * * 1, if current task belongs to the cgroup2.
+ * * 0, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
* long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
@@ -1471,7 +2650,8 @@ union bpf_attr {
* Pull in non-linear data in case the *skb* is non-linear and not
* all of *len* are part of the linear section. Make *len* bytes
* from *skb* readable and writable. If a zero value is passed for
- * *len*, then the whole length of the *skb* is pulled.
+ * *len*, then all bytes in the linear part of *skb* will be made
+ * readable and writable.
*
* This helper is only needed for reading and writing with direct
* packet access.
@@ -1521,6 +2701,8 @@ union bpf_attr {
* indicate that the hash is outdated and to trigger a
* recalculation the next time the kernel tries to access this
* hash or when the **bpf_get_hash_recalc**\ () helper is called.
+ * Return
+ * void.
*
* long bpf_get_numa_node_id(void)
* Description
@@ -1592,24 +2774,34 @@ union bpf_attr {
* networking traffic statistics as it provides a global socket
* identifier that can be assumed unique.
* Return
- * A 8-byte long non-decreasing number on success, or 0 if the
- * socket field is missing inside *skb*.
+ * A 8-byte long unique number on success, or 0 if the socket
+ * field is missing inside *skb*.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
* *skb*, but gets socket from **struct bpf_sock_addr** context.
* Return
- * A 8-byte long non-decreasing number.
+ * A 8-byte long unique number.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
* Description
* Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
* *skb*, but gets socket from **struct bpf_sock_ops** context.
* Return
- * A 8-byte long non-decreasing number.
+ * A 8-byte long unique number.
+ *
+ * u64 bpf_get_socket_cookie(struct sock *sk)
+ * Description
+ * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
+ * *sk*, but gets socket from a BTF **struct sock**. This helper
+ * also works for sleepable programs.
+ * Return
+ * A 8-byte long unique number or 0 if *sk* is NULL.
*
* u32 bpf_get_socket_uid(struct sk_buff *skb)
+ * Description
+ * Get the owner UID of the socked associated to *skb*.
* Return
* The owner UID of the socket associated to *skb*. If the socket
* is **NULL**, or if it is not a full socket (i.e. if it is a
@@ -1635,8 +2827,8 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
- * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
- * and **BPF_CGROUP_INET6_CONNECT**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
+ * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **setsockopt()**.
* It supports the following *level*\ s:
@@ -1644,14 +2836,19 @@ union bpf_attr {
* * **SOL_SOCKET**, which supports the following *optname*\ s:
* **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
* **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
- * **SO_BINDTODEVICE**, **SO_KEEPALIVE**.
+ * **SO_BINDTODEVICE**, **SO_KEEPALIVE**, **SO_REUSEADDR**,
+ * **SO_REUSEPORT**, **SO_BINDTOIFINDEX**, **SO_TXREHASH**.
* * **IPPROTO_TCP**, which supports the following *optname*\ s:
* **TCP_CONGESTION**, **TCP_BPF_IW**,
* **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
* **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
- * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**.
+ * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
+ * **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
+ * **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
+ * **TCP_BPF_RTO_MIN**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
- * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ * * **IPPROTO_IPV6**, which supports the following *optname*\ s:
+ * **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -1670,10 +2867,12 @@ union bpf_attr {
* There are two supported modes at this time:
*
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
- * (room space is added or removed below the layer 2 header).
+ * (room space is added or removed between the layer 2 and
+ * layer 3 headers).
*
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
- * (room space is added or removed below the layer 3 header).
+ * (room space is added or removed between the layer 3 and
+ * layer 4 headers).
*
* The following flags are supported at this time:
*
@@ -1693,6 +2892,15 @@ union bpf_attr {
* Use with ENCAP_L3/L4 flags to further specify the tunnel
* type; *len* is the length of the inner MAC header.
*
+ * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**:
+ * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
+ * L2 type as Ethernet.
+ *
+ * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**,
+ * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**:
+ * Indicate the new IP header version after decapsulating the outer
+ * IP header. Used when the inner and outer IP versions are different.
+ *
* A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
@@ -1701,7 +2909,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_redirect_map(struct bpf_map *map, u64 key, u64 flags)
* Description
* Redirect the packet to the endpoint referenced by *map* at
* index *key*. Depending on its type, this *map* can contain
@@ -1713,8 +2921,12 @@ union bpf_attr {
* The lower two bits of *flags* are used as the return code if
* the map lookup fails. This is so that the return value can be
* one of the XDP program return codes up to **XDP_TX**, as chosen
- * by the caller. Any higher bits in the *flags* argument must be
- * unset.
+ * by the caller. The higher bits of *flags* can be set to
+ * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
+ *
+ * With BPF_F_BROADCAST the packet will be broadcasted to all the
+ * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
+ * interface will be excluded when do broadcasting.
*
* See also **bpf_redirect**\ (), which only supports redirecting
* to an ifindex, but doesn't require a map to do so.
@@ -1833,7 +3045,7 @@ union bpf_attr {
*
* long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
- * For en eBPF program attached to a perf event, retrieve the
+ * For an eBPF program attached to a perf event, retrieve the
* value of the event counter associated to *ctx* and store it in
* the structure pointed by *buf* and of size *buf_size*. Enabled
* and running times are also stored in the structure (see
@@ -1854,16 +3066,14 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
- * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
- * and **BPF_CGROUP_INET6_CONNECT**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
+ * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **getsockopt()**.
- * It supports the following *level*\ s:
- *
- * * **IPPROTO_TCP**, which supports *optname*
- * **TCP_CONGESTION**.
- * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
- * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ * It supports the same set of *optname*\ s that is supported by
+ * the **bpf_setsockopt**\ () helper. The exceptions are
+ * **TCP_BPF_*** is **bpf_setsockopt**\ () only and
+ * **TCP_SAVED_SYN** is **bpf_getsockopt**\ () only.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -2097,8 +3307,18 @@ union bpf_attr {
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
* **BPF_F_USER_BUILD_ID**
- * Collect buildid+offset instead of ips for user stack,
- * only valid if **BPF_F_USER_STACK** is also specified.
+ * Collect (build_id, file_offset) instead of ips for user
+ * stack, only valid if **BPF_F_USER_STACK** is also
+ * specified.
+ *
+ * *file_offset* is an offset relative to the beginning
+ * of the executable or shared object file backing the vma
+ * which the *ip* falls in. It is *not* an offset relative
+ * to that object's base address. Accordingly, it must be
+ * adjusted by adding (sh_addr - sh_offset), where
+ * sh_{addr,offset} correspond to the executable section
+ * containing *file_offset* in the object, for comparisons
+ * to symbols' st_value to be valid.
*
* **bpf_get_stack**\ () can collect up to
* **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
@@ -2111,8 +3331,8 @@ union bpf_attr {
*
* # sysctl kernel.perf_event_max_stack=<new value>
* Return
- * A non-negative value equal to or less than *size* on success,
- * or a negative error in case of failure.
+ * The non-negative copied *buf* length equal to or less than
+ * *size* on success, or a negative error in case of failure.
*
* long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
@@ -2155,9 +3375,23 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
* rules.
+ * **BPF_FIB_LOOKUP_TBID**
+ * Used with BPF_FIB_LOOKUP_DIRECT.
+ * Use the routing table ID present in *params*->tbid
+ * for the fib lookup.
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
+ * **BPF_FIB_LOOKUP_SKIP_NEIGH**
+ * Skip the neighbour table lookup. *params*->dmac
+ * and *params*->smac will not be set as output. A common
+ * use case is to call **bpf_redirect_neigh**\ () after
+ * doing **bpf_fib_lookup**\ ().
+ * **BPF_FIB_LOOKUP_SRC**
+ * Derive and set source IP addr in *params*->ipv{4,6}_src
+ * for the nexthop. If the src addr cannot be derived,
+ * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
+ * case, *params*->dmac and *params*->smac are not set either.
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@@ -2167,6 +3401,9 @@ union bpf_attr {
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
* packet is not forwarded or needs assist from full stack
*
+ * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU
+ * was exceeded and output params->mtu_result contains the MTU.
+ *
* long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a sockhash *map* referencing sockets.
@@ -2204,7 +3441,7 @@ union bpf_attr {
* Description
* This helper is used in programs implementing policies at the
* skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
- * if the verdeict eBPF program returns **SK_PASS**), redirect it
+ * if the verdict eBPF program returns **SK_PASS**), redirect it
* to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
* egress interfaces can be used for redirection. The
@@ -2373,6 +3610,9 @@ union bpf_attr {
* The id is returned or 0 in case the id could not be retrieved.
*
* u64 bpf_get_current_cgroup_id(void)
+ * Description
+ * Get the current cgroup id based on the cgroup within which
+ * the current task is running.
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
@@ -2390,7 +3630,7 @@ union bpf_attr {
* running simultaneously.
*
* A user should care about the synchronization by himself.
- * For example, by using the **BPF_STX_XADD** instruction to alter
+ * For example, by using the **BPF_ATOMIC** instructions to alter
* the shared data.
* Return
* A pointer to the local storage area.
@@ -2398,7 +3638,7 @@ union bpf_attr {
* long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
* Description
* Select a **SO_REUSEPORT** socket from a
- * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
+ * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
* It checks the selected socket is matching the incoming
* request in the socket buffer.
* Return
@@ -2496,7 +3736,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * long bpf_sk_release(struct bpf_sock *sock)
+ * long bpf_sk_release(void *sock)
* Description
* Release the reference held by *sock*. *sock* must be a
* non-**NULL** pointer that was returned from
@@ -2676,17 +3916,18 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in *sk*.
*
* *iph* points to the start of the IPv4 or IPv6 header, while
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
- * **sizeof**\ (**struct ip6hdr**).
+ * **sizeof**\ (**struct ipv6hdr**).
*
* *th* points to the start of the TCP header, while *th_len*
- * contains **sizeof**\ (**struct tcphdr**).
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
* Return
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
* error otherwise.
@@ -2807,7 +4048,7 @@ union bpf_attr {
*
* **-ERANGE** if resulting value was out of range.
*
- * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
+ * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
* Description
* Get a bpf-local-storage from a *sk*.
*
@@ -2823,6 +4064,9 @@ union bpf_attr {
* "type". The bpf-local-storage "type" (i.e. the *map*) is
* searched against all bpf-local-storages residing at *sk*.
*
+ * *sk* is a kernel **struct sock** pointer for LSM program.
+ * *sk* is a **struct bpf_sock** pointer for other program types.
+ *
* An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
* used such that a new bpf-local-storage will be
* created if one does not exist. *value* can be used
@@ -2835,13 +4079,14 @@ union bpf_attr {
* **NULL** if not found or there was an error in adding
* a new bpf-local-storage.
*
- * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
* Description
* Delete a bpf-local-storage from a *sk*.
* Return
* 0 on success.
*
* **-ENOENT** if the bpf-local-storage cannot be found.
+ * **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
*
* long bpf_send_signal(u32 sig)
* Description
@@ -2858,17 +4103,18 @@ union bpf_attr {
*
* **-EAGAIN** if bpf program can try again.
*
- * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Try to issue a SYN cookie for the packet with corresponding
* IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
*
* *iph* points to the start of the IPv4 or IPv6 header, while
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
- * **sizeof**\ (**struct ip6hdr**).
+ * **sizeof**\ (**struct ipv6hdr**).
*
* *th* points to the start of the TCP header, while *th_len*
- * contains the length of the TCP header.
+ * contains the length of the TCP header with options (at least
+ * **sizeof**\ (**struct tcphdr**)).
* Return
* On success, lower 32 bits hold the generated SYN cookie in
* followed by 16 bits which hold the MSS value for that cookie,
@@ -2931,10 +4177,10 @@ union bpf_attr {
* string length is larger than *size*, just *size*-1 bytes are
* copied and the last byte is set to NUL.
*
- * On success, the length of the copied string is returned. This
- * makes this helper useful in tracing programs for reading
- * strings, and more importantly to get its length at runtime. See
- * the following snippet:
+ * On success, returns the number of bytes that were written,
+ * including the terminal NUL. This makes this helper useful in
+ * tracing programs for reading strings, and more importantly to
+ * get its length at runtime. See the following snippet:
*
* ::
*
@@ -2962,7 +4208,7 @@ union bpf_attr {
* **->mm->env_start**: using this helper and the return value,
* one can quickly iterate at the right offset of the memory area.
* Return
- * On success, the strictly positive length of the string,
+ * On success, the strictly positive length of the output string,
* including the trailing NUL character. On error, a negative
* value.
*
@@ -3087,7 +4333,7 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
+ * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
* Description
* Helper is overloaded depending on BPF program type. This
* description applies to **BPF_PROG_TYPE_SCHED_CLS** and
@@ -3115,9 +4361,6 @@ union bpf_attr {
* **-EOPNOTSUPP** if the operation is not supported, for example
* a call from outside of TC ingress.
*
- * **-ESOCKTNOSUPPORT** if the socket type is not supported
- * (reuseport).
- *
* long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
* Description
* Helper is overloaded depending on BPF program type. This
@@ -3185,7 +4428,7 @@ union bpf_attr {
* arguments. The *data* are a **u64** array and corresponding format string
* values are stored in the array. For strings and pointers where pointees
* are accessed, only the pointer values are stored in the *data* array.
- * The *data_len* is the size of *data* in bytes.
+ * The *data_len* is the size of *data* in bytes - must be a multiple of 8.
*
* Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
* Reading kernel memory may fail due to either invalid address or
@@ -3215,11 +4458,11 @@ union bpf_attr {
*
* **-EOVERFLOW** if an overflow happened: The same object will be tried again.
*
- * u64 bpf_sk_cgroup_id(struct bpf_sock *sk)
+ * u64 bpf_sk_cgroup_id(void *sk)
* Description
* Return the cgroup v2 id of the socket *sk*.
*
- * *sk* must be a non-**NULL** pointer to a full socket, e.g. one
+ * *sk* must be a non-**NULL** pointer to a socket, e.g. one
* returned from **bpf_sk_lookup_xxx**\ (),
* **bpf_sk_fullsock**\ (), etc. The format of returned id is
* same as in **bpf_skb_cgroup_id**\ ().
@@ -3229,7 +4472,7 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * u64 bpf_sk_ancestor_cgroup_id(struct bpf_sock *sk, int ancestor_level)
+ * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
* Description
* Return id of cgroup v2 that is ancestor of cgroup associated
* with the *sk* at the *ancestor_level*. The root cgroup is at
@@ -3254,12 +4497,20 @@ union bpf_attr {
* of new data availability is sent.
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
* of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * An adaptive notification is a notification sent whenever the user-space
+ * process has caught up and consumed all available payloads. In case the user-space
+ * process is still processing a previous payload, then no notification is needed
+ * as it will process the newly added payload automatically.
* Return
* 0 on success, or a negative error in case of failure.
*
* void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
* Description
* Reserve *size* bytes of payload in a ring buffer *ringbuf*.
+ * *flags* must be 0.
* Return
* Valid pointer with *size* bytes of memory available; NULL,
* otherwise.
@@ -3271,6 +4522,10 @@ union bpf_attr {
* of new data availability is sent.
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
* of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
* Return
* Nothing. Always succeeds.
*
@@ -3281,6 +4536,10 @@ union bpf_attr {
* of new data availability is sent.
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
* of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
* Return
* Nothing. Always succeeds.
*
@@ -3337,38 +4596,40 @@ union bpf_attr {
* Description
* Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
+ * Note: the user stack will only be populated if the *task* is
+ * the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
- * pointer to struct task_struct. To store the stacktrace, the
- * bpf program provides *buf* with a nonnegative *size*.
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
*
* The last argument, *flags*, holds the number of stack frames to
* skip (from 0 to 255), masked with
@@ -3377,6 +4638,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
+ * The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
@@ -3392,161 +4654,1362 @@ union bpf_attr {
*
* # sysctl kernel.perf_event_max_stack=<new value>
* Return
- * A non-negative value equal to or less than *size* on success,
- * or a negative error in case of failure.
+ * The non-negative copied *buf* length equal to or less than
+ * *size* on success, or a negative error in case of failure.
+ *
+ * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
+ * Description
+ * Load header option. Support reading a particular TCP header
+ * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
+ *
+ * If *flags* is 0, it will search the option from the
+ * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
+ * has details on what skb_data contains under different
+ * *skops*\ **->op**.
+ *
+ * The first byte of the *searchby_res* specifies the
+ * kind that it wants to search.
+ *
+ * If the searching kind is an experimental kind
+ * (i.e. 253 or 254 according to RFC6994). It also
+ * needs to specify the "magic" which is either
+ * 2 bytes or 4 bytes. It then also needs to
+ * specify the size of the magic by using
+ * the 2nd byte which is "kind-length" of a TCP
+ * header option and the "kind-length" also
+ * includes the first 2 bytes "kind" and "kind-length"
+ * itself as a normal TCP header option also does.
+ *
+ * For example, to search experimental kind 254 with
+ * 2 byte magic 0xeB9F, the searchby_res should be
+ * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
+ *
+ * To search for the standard window scale option (3),
+ * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
+ * Note, kind-length must be 0 for regular option.
+ *
+ * Searching for No-Op (0) and End-of-Option-List (1) are
+ * not supported.
+ *
+ * *len* must be at least 2 bytes which is the minimal size
+ * of a header option.
+ *
+ * Supported flags:
+ *
+ * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
+ * saved_syn packet or the just-received syn packet.
+ *
+ * Return
+ * > 0 when found, the header option is copied to *searchby_res*.
+ * The return value is the total length copied. On failure, a
+ * negative error code is returned:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOMSG** if the option is not found.
+ *
+ * **-ENOENT** if no syn packet is available when
+ * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
+ *
+ * **-ENOSPC** if there is not enough space. Only *len* number of
+ * bytes are copied.
+ *
+ * **-EFAULT** on failure to parse the header options in the
+ * packet.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
+ * Description
+ * Store header option. The data will be copied
+ * from buffer *from* with length *len* to the TCP header.
+ *
+ * The buffer *from* should have the whole option that
+ * includes the kind, kind-length, and the actual
+ * option data. The *len* must be at least kind-length
+ * long. The kind-length does not have to be 4 byte
+ * aligned. The kernel will take care of the padding
+ * and setting the 4 bytes aligned value to th->doff.
+ *
+ * This helper will check for duplicated option
+ * by searching the same option in the outgoing skb.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** If param is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ * Nothing has been written
+ *
+ * **-EEXIST** if the option already exists.
+ *
+ * **-EFAULT** on failure to parse the existing header options.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
+ * Description
+ * Reserve *len* bytes for the bpf header option. The
+ * space will be used by **bpf_store_hdr_opt**\ () later in
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * If **bpf_reserve_hdr_opt**\ () is called multiple times,
+ * the total number of bytes will be reserved.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from an *inode*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *inode* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
+ * helper enforces the key must be an inode and the map must also
+ * be a **BPF_MAP_TYPE_INODE_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *inode* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *inode*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
+ * Description
+ * Delete a bpf_local_storage from an *inode*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * long bpf_d_path(struct path *path, char *buf, u32 sz)
+ * Description
+ * Return full path for given **struct path** object, which
+ * needs to be the kernel BTF *path* object. The path is
+ * returned in the provided buffer *buf* of size *sz* and
+ * is zero terminated.
+ *
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
+ * Description
+ * Read *size* bytes from user space address *user_ptr* and store
+ * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
+ * Return
+ * 0 on success, or a negative error in case of failure.
*
+ * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
+ * Description
+ * Use BTF to store a string representation of *ptr*->ptr in *str*,
+ * using *ptr*->type_id. This value should specify the type
+ * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
+ * can be used to look up vmlinux BTF type ids. Traversing the
+ * data structure using BTF, the type information and values are
+ * stored in the first *str_size* - 1 bytes of *str*. Safe copy of
+ * the pointer data is carried out to avoid kernel crashes during
+ * operation. Smaller types can use string space on the stack;
+ * larger programs can use map data to store the string
+ * representation.
+ *
+ * The string can be subsequently shared with userspace via
+ * bpf_perf_event_output() or ring buffer interfaces.
+ * bpf_trace_printk() is to be avoided as it places too small
+ * a limit on string size to be useful.
+ *
+ * *flags* is a combination of
+ *
+ * **BTF_F_COMPACT**
+ * no formatting around type information
+ * **BTF_F_NONAME**
+ * no struct/union member names/types
+ * **BTF_F_PTR_RAW**
+ * show raw (unobfuscated) pointer values;
+ * equivalent to printk specifier %px.
+ * **BTF_F_ZERO**
+ * show zero-valued struct/union members; they
+ * are not displayed by default
+ *
+ * Return
+ * The number of bytes that were written (or would have been
+ * written if output had to be truncated due to string size),
+ * or a negative error in cases of failure.
+ *
+ * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
+ * Description
+ * Use BTF to write to seq_write a string representation of
+ * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
+ * *flags* are identical to those used for bpf_snprintf_btf.
+ * Return
+ * 0 on success or a negative error in case of failure.
+ *
+ * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
+ * Description
+ * See **bpf_get_cgroup_classid**\ () for the main description.
+ * This helper differs from **bpf_get_cgroup_classid**\ () in that
+ * the cgroup v1 net_cls class is retrieved only from the *skb*'s
+ * associated socket instead of the current process.
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*
+ * and fill in L2 addresses from neighboring subsystem. This helper
+ * is somewhat similar to **bpf_redirect**\ (), except that it
+ * populates L2 addresses as well, meaning, internally, the helper
+ * relies on the neighbor lookup for the L2 address of the nexthop.
+ *
+ * The helper will perform a FIB lookup based on the skb's
+ * networking header to get the address of the next hop, unless
+ * this is supplied by the caller in the *params* argument. The
+ * *plen* argument indicates the len of *params* and should be set
+ * to 0 if *params* is NULL.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types, and enabled
+ * for IPv4 and IPv6 protocols.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ *
+ * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on *cpu*. A ksym is an
+ * extern variable decorated with '__ksym'. For ksym, there is a
+ * global var (either static or global) defined of the same name
+ * in the kernel. The ksym is percpu if the global var is percpu.
+ * The returned pointer points to the global percpu var on *cpu*.
+ *
+ * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
+ * kernel, except that bpf_per_cpu_ptr() may return NULL. This
+ * happens if *cpu* is larger than nr_cpu_ids. The caller of
+ * bpf_per_cpu_ptr() must check the returned value.
+ * Return
+ * A pointer pointing to the kernel percpu variable on *cpu*, or
+ * NULL, if *cpu* is invalid.
+ *
+ * void *bpf_this_cpu_ptr(const void *percpu_ptr)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on this cpu. See the
+ * description of 'ksym' in **bpf_per_cpu_ptr**\ ().
+ *
+ * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
+ * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
+ * never return NULL.
+ * Return
+ * A pointer pointing to the kernel percpu variable on this cpu.
+ *
+ * long bpf_redirect_peer(u32 ifindex, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_redirect**\ (), except
+ * that the redirection happens to the *ifindex*' peer device and
+ * the netns switch takes place from ingress to ingress without
+ * going through the CPU's backlog queue.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types at the
+ * ingress hook and for veth and netkit target device types. The
+ * peer device must reside in a different network namespace.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ *
+ * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from the *task*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *task* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
+ * helper enforces the key must be a task_struct and the map must also
+ * be a **BPF_MAP_TYPE_TASK_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *task* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *task*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
+ * Description
+ * Delete a bpf_local_storage from a *task*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * struct task_struct *bpf_get_current_task_btf(void)
+ * Description
+ * Return a BTF pointer to the "current" task.
+ * This pointer can also be used in helpers that accept an
+ * *ARG_PTR_TO_BTF_ID* of type *task_struct*.
+ * Return
+ * Pointer to the current task.
+ *
+ * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags)
+ * Description
+ * Set or clear certain options on *bprm*:
+ *
+ * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
+ * which sets the **AT_SECURE** auxv for glibc. The bit
+ * is cleared if the flag is not specified.
+ * Return
+ * **-EINVAL** if invalid *flags* are passed, zero otherwise.
+ *
+ * u64 bpf_ktime_get_coarse_ns(void)
+ * Description
+ * Return a coarse-grained version of the time elapsed since
+ * system boot, in nanoseconds. Does not include time the system
+ * was suspended.
+ *
+ * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
+ * Return
+ * Current *ktime*.
+ *
+ * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
+ * Description
+ * Returns the stored IMA hash of the *inode* (if it's available).
+ * If the hash is larger than *size*, then only *size*
+ * bytes will be copied to *dst*
+ * Return
+ * The **hash_algo** is returned on success,
+ * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
+ * invalid arguments are passed.
+ *
+ * struct socket *bpf_sock_from_file(struct file *file)
+ * Description
+ * If the given file represents a socket, returns the associated
+ * socket.
+ * Return
+ * A pointer to a struct socket on success or NULL if the file is
+ * not a socket.
+ *
+ * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
+ * Description
+ * Check packet size against exceeding MTU of net device (based
+ * on *ifindex*). This helper will likely be used in combination
+ * with helpers that adjust/change the packet size.
+ *
+ * The argument *len_diff* can be used for querying with a planned
+ * size change. This allows to check MTU prior to changing packet
+ * ctx. Providing a *len_diff* adjustment that is larger than the
+ * actual packet size (resulting in negative packet size) will in
+ * principle not exceed the MTU, which is why it is not considered
+ * a failure. Other BPF helpers are needed for performing the
+ * planned size change; therefore the responsibility for catching
+ * a negative packet size belongs in those helpers.
+ *
+ * Specifying *ifindex* zero means the MTU check is performed
+ * against the current net device. This is practical if this isn't
+ * used prior to redirect.
+ *
+ * On input *mtu_len* must be a valid pointer, else verifier will
+ * reject BPF program. If the value *mtu_len* is initialized to
+ * zero then the ctx packet size is use. When value *mtu_len* is
+ * provided as input this specify the L3 length that the MTU check
+ * is done against. Remember XDP and TC length operate at L2, but
+ * this value is L3 as this correlate to MTU and IP-header tot_len
+ * values which are L3 (similar behavior as bpf_fib_lookup).
+ *
+ * The Linux kernel route table can configure MTUs on a more
+ * specific per route level, which is not provided by this helper.
+ * For route level MTU checks use the **bpf_fib_lookup**\ ()
+ * helper.
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** for tc cls_act programs.
+ *
+ * The *flags* argument can be a combination of one or more of the
+ * following values:
+ *
+ * **BPF_MTU_CHK_SEGS**
+ * This flag will only works for *ctx* **struct sk_buff**.
+ * If packet context contains extra packet segment buffers
+ * (often knows as GSO skb), then MTU check is harder to
+ * check at this point, because in transmit path it is
+ * possible for the skb packet to get re-segmented
+ * (depending on net device features). This could still be
+ * a MTU violation, so this flag enables performing MTU
+ * check against segments, with a different violation
+ * return code to tell it apart. Check cannot use len_diff.
+ *
+ * On return *mtu_len* pointer contains the MTU value of the net
+ * device. Remember the net device configured MTU is the L3 size,
+ * which is returned here and XDP and TC length operate at L2.
+ * Helper take this into account for you, but remember when using
+ * MTU value in your BPF-code.
+ *
+ * Return
+ * * 0 on success, and populate MTU value in *mtu_len* pointer.
+ *
+ * * < 0 if any input argument is invalid (*mtu_len* not updated)
+ *
+ * MTU violations return positive values, but also populate MTU
+ * value in *mtu_len* pointer, as this can be needed for
+ * implementing PMTU handing:
+ *
+ * * **BPF_MTU_CHK_RET_FRAG_NEEDED**
+ * * **BPF_MTU_CHK_RET_SEGS_TOOBIG**
+ *
+ * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * For each element in **map**, call **callback_fn** function with
+ * **map**, **callback_ctx** and other map-specific parameters.
+ * The **callback_fn** should be a static function and
+ * the **callback_ctx** should be a pointer to the stack.
+ * The **flags** is used to control certain aspects of the helper.
+ * Currently, the **flags** must be 0.
+ *
+ * The following are a list of supported map types and their
+ * respective expected callback signatures:
+ *
+ * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
+ * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
+ * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
+ *
+ * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
+ *
+ * For per_cpu maps, the map_value is the value on the cpu where the
+ * bpf_prog is running.
+ *
+ * If **callback_fn** return 0, the helper will continue to the next
+ * element. If return value is 1, the helper will skip the rest of
+ * elements and return. Other return values are not used now.
+ *
+ * Return
+ * The number of traversed map elements for success, **-EINVAL** for
+ * invalid **flags**.
+ *
+ * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len)
+ * Description
+ * Outputs a string into the **str** buffer of size **str_size**
+ * based on a format string stored in a read-only map pointed by
+ * **fmt**.
+ *
+ * Each format specifier in **fmt** corresponds to one u64 element
+ * in the **data** array. For strings and pointers where pointees
+ * are accessed, only the pointer values are stored in the *data*
+ * array. The *data_len* is the size of *data* in bytes - must be
+ * a multiple of 8.
+ *
+ * Formats **%s** and **%p{i,I}{4,6}** require to read kernel
+ * memory. Reading kernel memory may fail due to either invalid
+ * address or valid address but requiring a major memory fault. If
+ * reading kernel memory fails, the string for **%s** will be an
+ * empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
+ * Not returning error to bpf program is consistent with what
+ * **bpf_trace_printk**\ () does for now.
+ *
+ * Return
+ * The strictly positive length of the formatted string, including
+ * the trailing zero character. If the return value is greater than
+ * **str_size**, **str** contains a truncated string, guaranteed to
+ * be zero-terminated except when **str_size** is 0.
+ *
+ * Or **-EBUSY** if the per-CPU memory copy buffer is busy.
+ *
+ * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
+ * Description
+ * Execute bpf syscall with given arguments.
+ * Return
+ * A syscall result.
+ *
+ * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
+ * Description
+ * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
+ * Return
+ * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
+ *
+ * long bpf_sys_close(u32 fd)
+ * Description
+ * Execute close syscall for given FD.
+ * Return
+ * A syscall result.
+ *
+ * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags)
+ * Description
+ * Initialize the timer.
+ * First 4 bits of *flags* specify clockid.
+ * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
+ * All other bits of *flags* are reserved.
+ * The verifier will reject the program if *timer* is not from
+ * the same *map*.
+ * Return
+ * 0 on success.
+ * **-EBUSY** if *timer* is already initialized.
+ * **-EINVAL** if invalid *flags* are passed.
+ * **-EPERM** if *timer* is in a map that doesn't have any user references.
+ * The user space should either hold a file descriptor to a map with timers
+ * or pin such map in bpffs. When map is unpinned or file descriptor is
+ * closed all timers in the map will be cancelled and freed.
+ *
+ * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn)
+ * Description
+ * Configure the timer to call *callback_fn* static function.
+ * Return
+ * 0 on success.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
+ * **-EPERM** if *timer* is in a map that doesn't have any user references.
+ * The user space should either hold a file descriptor to a map with timers
+ * or pin such map in bpffs. When map is unpinned or file descriptor is
+ * closed all timers in the map will be cancelled and freed.
+ *
+ * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags)
+ * Description
+ * Set timer expiration N nanoseconds from the current time. The
+ * configured callback will be invoked in soft irq context on some cpu
+ * and will not repeat unless another bpf_timer_start() is made.
+ * In such case the next invocation can migrate to a different cpu.
+ * Since struct bpf_timer is a field inside map element the map
+ * owns the timer. The bpf_timer_set_callback() will increment refcnt
+ * of BPF program to make sure that callback_fn code stays valid.
+ * When user space reference to a map reaches zero all timers
+ * in a map are cancelled and corresponding program's refcnts are
+ * decremented. This is done to make sure that Ctrl-C of a user
+ * process doesn't leave any timers running. If map is pinned in
+ * bpffs the callback_fn can re-arm itself indefinitely.
+ * bpf_map_update/delete_elem() helpers and user space sys_bpf commands
+ * cancel and free the timer in the given map element.
+ * The map can contain timers that invoke callback_fn-s from different
+ * programs. The same callback_fn can serve different timers from
+ * different maps if key/value layout matches across maps.
+ * Every bpf_timer_set_callback() can have different callback_fn.
+ *
+ * *flags* can be one of:
+ *
+ * **BPF_F_TIMER_ABS**
+ * Start the timer in absolute expire value instead of the
+ * default relative one.
+ * **BPF_F_TIMER_CPU_PIN**
+ * Timer will be pinned to the CPU of the caller.
+ *
+ * Return
+ * 0 on success.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
+ * or invalid *flags* are passed.
+ *
+ * long bpf_timer_cancel(struct bpf_timer *timer)
+ * Description
+ * Cancel the timer and wait for callback_fn to finish if it was running.
+ * Return
+ * 0 if the timer was not active.
+ * 1 if the timer was active.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
+ * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
+ * own timer which would have led to a deadlock otherwise.
+ *
+ * u64 bpf_get_func_ip(void *ctx)
+ * Description
+ * Get address of the traced function (for tracing and kprobe programs).
+ *
+ * When called for kprobe program attached as uprobe it returns
+ * probe address for both entry and return uprobe.
+ *
+ * Return
+ * Address of the traced function for kprobe.
+ * 0 for kprobes placed within the function (not at the entry).
+ * Address of the probe for uprobe and return uprobe.
+ *
+ * u64 bpf_get_attach_cookie(void *ctx)
+ * Description
+ * Get bpf_cookie value provided (optionally) during the program
+ * attachment. It might be different for each individual
+ * attachment, even if BPF program itself is the same.
+ * Expects BPF program context *ctx* as a first argument.
+ *
+ * Supported for the following program types:
+ * - kprobe/uprobe;
+ * - tracepoint;
+ * - perf_event.
+ * Return
+ * Value specified by user at BPF link creation/attachment time
+ * or 0, if it was not specified.
+ *
+ * long bpf_task_pt_regs(struct task_struct *task)
+ * Description
+ * Get the struct pt_regs associated with **task**.
+ * Return
+ * A pointer to struct pt_regs.
+ *
+ * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
+ * Description
+ * Get branch trace from hardware engines like Intel LBR. The
+ * hardware engine is stopped shortly after the helper is
+ * called. Therefore, the user need to filter branch entries
+ * based on the actual use case. To capture branch trace
+ * before the trigger point of the BPF program, the helper
+ * should be called at the beginning of the BPF program.
+ *
+ * The data is stored as struct perf_branch_entry into output
+ * buffer *entries*. *size* is the size of *entries* in bytes.
+ * *flags* is reserved for now and must be zero.
+ *
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-ENOENT** if architecture does not support branch records.
+ *
+ * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * Description
+ * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
+ * to format and can handle more format args as a result.
+ *
+ * Arguments are to be used as in **bpf_seq_printf**\ () helper.
+ * Return
+ * The number of bytes written to the buffer, or a negative error
+ * in case of failure.
+ *
+ * struct unix_sock *bpf_skc_to_unix_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *unix_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
+ * Description
+ * Get the address of a kernel symbol, returned in *res*. *res* is
+ * set to 0 if the symbol is not found.
+ * Return
+ * On success, zero. On error, a negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-EINVAL** if string *name* is not the same size as *name_sz*.
+ *
+ * **-ENOENT** if symbol is not found.
+ *
+ * **-EPERM** if caller does not have permission to obtain kernel address.
+ *
+ * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * Find vma of *task* that contains *addr*, call *callback_fn*
+ * function with *task*, *vma*, and *callback_ctx*.
+ * The *callback_fn* should be a static function and
+ * the *callback_ctx* should be a pointer to the stack.
+ * The *flags* is used to control certain aspects of the helper.
+ * Currently, the *flags* must be 0.
+ *
+ * The expected callback signature is
+ *
+ * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
+ *
+ * Return
+ * 0 on success.
+ * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
+ * **-EBUSY** if failed to try lock mmap_lock.
+ * **-EINVAL** for invalid **flags**.
+ *
+ * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * For **nr_loops**, call **callback_fn** function
+ * with **callback_ctx** as the context parameter.
+ * The **callback_fn** should be a static function and
+ * the **callback_ctx** should be a pointer to the stack.
+ * The **flags** is used to control certain aspects of the helper.
+ * Currently, the **flags** must be 0. Currently, nr_loops is
+ * limited to 1 << 23 (~8 million) loops.
+ *
+ * long (\*callback_fn)(u32 index, void \*ctx);
+ *
+ * where **index** is the current index in the loop. The index
+ * is zero-indexed.
+ *
+ * If **callback_fn** returns 0, the helper will continue to the next
+ * loop. If return value is 1, the helper will skip the rest of
+ * the loops and return. Other return values are not used now,
+ * and will be rejected by the verifier.
+ *
+ * Return
+ * The number of loops performed, **-EINVAL** for invalid **flags**,
+ * **-E2BIG** if **nr_loops** exceeds the maximum number of loops.
+ *
+ * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2)
+ * Description
+ * Do strncmp() between **s1** and **s2**. **s1** doesn't need
+ * to be null-terminated and **s1_sz** is the maximum storage
+ * size of **s1**. **s2** must be a read-only string.
+ * Return
+ * An integer less than, equal to, or greater than zero
+ * if the first **s1_sz** bytes of **s1** is found to be
+ * less than, to match, or be greater than **s2**.
+ *
+ * long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
+ * Description
+ * Get **n**-th argument register (zero based) of the traced function (for tracing programs)
+ * returned in **value**.
+ *
+ * Return
+ * 0 on success.
+ * **-EINVAL** if n >= argument register count of traced function.
+ *
+ * long bpf_get_func_ret(void *ctx, u64 *value)
+ * Description
+ * Get return value of the traced function (for tracing programs)
+ * in **value**.
+ *
+ * Return
+ * 0 on success.
+ * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
+ *
+ * long bpf_get_func_arg_cnt(void *ctx)
+ * Description
+ * Get number of registers of the traced function (for tracing programs) where
+ * function arguments are stored in these registers.
+ *
+ * Return
+ * The number of argument registers of the traced function.
+ *
+ * int bpf_get_retval(void)
+ * Description
+ * Get the BPF program's return value that will be returned to the upper layers.
+ *
+ * This helper is currently supported by cgroup programs and only by the hooks
+ * where BPF program's return value is returned to the userspace via errno.
+ * Return
+ * The BPF program's return value.
+ *
+ * int bpf_set_retval(int retval)
+ * Description
+ * Set the BPF program's return value that will be returned to the upper layers.
+ *
+ * This helper is currently supported by cgroup programs and only by the hooks
+ * where BPF program's return value is returned to the userspace via errno.
+ *
+ * Note that there is the following corner case where the program exports an error
+ * via bpf_set_retval but signals success via 'return 1':
+ *
+ * bpf_set_retval(-EPERM);
+ * return 1;
+ *
+ * In this case, the BPF program's return value will use helper's -EPERM. This
+ * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case.
+ *
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md)
+ * Description
+ * Get the total size of a given xdp buff (linear and paged area)
+ * Return
+ * The total size of a given xdp buffer.
+ *
+ * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
+ * Description
+ * This helper is provided as an easy way to load data from a
+ * xdp buffer. It can be used to load *len* bytes from *offset* from
+ * the frame associated to *xdp_md*, into the buffer pointed by
+ * *buf*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
+ * Description
+ * Store *len* bytes from buffer *buf* into the frame
+ * associated to *xdp_md*, at *offset*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags)
+ * Description
+ * Read *size* bytes from user space address *user_ptr* in *tsk*'s
+ * address space, and stores the data in *dst*. *flags* is not
+ * used yet and is provided for future extensibility. This helper
+ * can only be used by sleepable programs.
+ * Return
+ * 0 on success, or a negative error in case of failure. On error
+ * *dst* buffer is zeroed out.
+ *
+ * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type)
+ * Description
+ * Change the __sk_buff->tstamp_type to *tstamp_type*
+ * and set *tstamp* to the __sk_buff->tstamp together.
+ *
+ * If there is no need to change the __sk_buff->tstamp_type,
+ * the tstamp value can be directly written to __sk_buff->tstamp
+ * instead.
+ *
+ * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that
+ * will be kept during bpf_redirect_*(). A non zero
+ * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO
+ * *tstamp_type*.
+ *
+ * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used
+ * with a zero *tstamp*.
+ *
+ * Only IPv4 and IPv6 skb->protocol are supported.
+ *
+ * This function is most useful when it needs to set a
+ * mono delivery time to __sk_buff->tstamp and then
+ * bpf_redirect_*() to the egress of an iface. For example,
+ * changing the (rcv) timestamp in __sk_buff->tstamp at
+ * ingress to a mono delivery time and then bpf_redirect_*()
+ * to sch_fq@phy-dev.
+ * Return
+ * 0 on success.
+ * **-EINVAL** for invalid input
+ * **-EOPNOTSUPP** for unsupported protocol
+ *
+ * long bpf_ima_file_hash(struct file *file, void *dst, u32 size)
+ * Description
+ * Returns a calculated IMA hash of the *file*.
+ * If the hash is larger than *size*, then only *size*
+ * bytes will be copied to *dst*
+ * Return
+ * The **hash_algo** is returned on success,
+ * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
+ * invalid arguments are passed.
+ *
+ * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * Description
+ * Exchange kptr at pointer *map_value* with *ptr*, and return the
+ * old value. *ptr* can be NULL, otherwise it must be a referenced
+ * pointer which will be released when this helper is called.
+ * Return
+ * The old value of kptr (which can be NULL). The returned pointer
+ * if not NULL, is a reference which must be released using its
+ * corresponding release function, or moved into a BPF map before
+ * program exit.
+ *
+ * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
+ * Description
+ * Perform a lookup in *percpu map* for an entry associated to
+ * *key* on *cpu*.
+ * Return
+ * Map value associated to *key* on *cpu*, or **NULL** if no entry
+ * was found or *cpu* is invalid.
+ *
+ * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Get a dynptr to local memory *data*.
+ *
+ * *data* must be a ptr to a map value.
+ * The maximum *size* supported is DYNPTR_MAX_SIZE.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
+ * -EINVAL if flags is not 0.
+ *
+ * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Reserve *size* bytes of payload in a ring buffer *ringbuf*
+ * through the dynptr interface. *flags* must be 0.
+ *
+ * Please note that a corresponding bpf_ringbuf_submit_dynptr or
+ * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
+ * reservation fails. This is enforced by the verifier.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Submit reserved ring buffer sample, pointed to by *data*,
+ * through the dynptr interface. This is a no-op if the dynptr is
+ * invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_submit'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Discard reserved ring buffer sample through the dynptr
+ * interface. This is a no-op if the dynptr is invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_discard'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags)
+ * Description
+ * Read *len* bytes from *src* into *dst*, starting from *offset*
+ * into *src*.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
+ * *flags* is not 0.
+ *
+ * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
+ * Description
+ * Write *len* bytes from *src* into *dst*, starting from *offset*
+ * into *dst*.
+ *
+ * *flags* must be 0 except for skb-type dynptrs.
+ *
+ * For skb-type dynptrs:
+ * * All data slices of the dynptr are automatically
+ * invalidated after **bpf_dynptr_write**\ (). This is
+ * because writing may pull the skb and change the
+ * underlying packet buffer.
+ *
+ * * For *flags*, please see the flags accepted by
+ * **bpf_skb_store_bytes**\ ().
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
+ * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
+ * other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
+ *
+ * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len)
+ * Description
+ * Get a pointer to the underlying dynptr data.
+ *
+ * *len* must be a statically known value. The returned data slice
+ * is invalidated whenever the dynptr is invalidated.
+ *
+ * skb and xdp type dynptrs may not use bpf_dynptr_data. They should
+ * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr.
+ * Return
+ * Pointer to the underlying dynptr data, NULL if the dynptr is
+ * read-only, if the dynptr is invalid, or if the offset and length
+ * is out of bounds.
+ *
+ * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
+ * Description
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv4/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ * Return
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ *
+ * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
+ * Description
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv6/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ * Return
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ *
+ * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the TCP header.
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ *
+ * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the TCP header.
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ *
+ * u64 bpf_ktime_get_tai_ns(void)
+ * Description
+ * A nonsettable system-wide clock derived from wall-clock time but
+ * ignoring leap seconds. This clock does not experience
+ * discontinuities and backwards jumps caused by NTP inserting leap
+ * seconds as CLOCK_REALTIME does.
+ *
+ * See: **clock_gettime**\ (**CLOCK_TAI**)
+ * Return
+ * Current *ktime*.
+ *
+ * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags)
+ * Description
+ * Drain samples from the specified user ring buffer, and invoke
+ * the provided callback for each such sample:
+ *
+ * long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx);
+ *
+ * If **callback_fn** returns 0, the helper will continue to try
+ * and drain the next sample, up to a maximum of
+ * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
+ * the helper will skip the rest of the samples and return. Other
+ * return values are not used now, and will be rejected by the
+ * verifier.
+ * Return
+ * The number of drained samples if no error was encountered while
+ * draining samples, or 0 if no samples were present in the ring
+ * buffer. If a user-space producer was epoll-waiting on this map,
+ * and at least one sample was drained, they will receive an event
+ * notification notifying them of available space in the ring
+ * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
+ * function, no wakeup notification will be sent. If the
+ * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
+ * be sent even if no sample was drained.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EBUSY** if the ring buffer is contended, and another calling
+ * context was concurrently draining the ring buffer.
+ *
+ * **-EINVAL** if user-space is not properly tracking the ring
+ * buffer due to the producer position not being aligned to 8
+ * bytes, a sample not being aligned to 8 bytes, or the producer
+ * position not matching the advertised length of a sample.
+ *
+ * **-E2BIG** if user-space has tried to publish a sample which is
+ * larger than the size of the ring buffer, or which cannot fit
+ * within a struct bpf_dynptr.
+ *
+ * void *bpf_cgrp_storage_get(struct bpf_map *map, struct cgroup *cgroup, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from the *cgroup*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *cgroup* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this
+ * helper enforces the key must be a cgroup struct and the map must also
+ * be a **BPF_MAP_TYPE_CGRP_STORAGE**.
+ *
+ * In reality, the local-storage value is embedded directly inside of the
+ * *cgroup* object itself, rather than being located in the
+ * **BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is
+ * queried for some *map* on a *cgroup* object, the kernel will perform an
+ * O(n) iteration over all of the live local-storage values for that
+ * *cgroup* object until the local-storage value for the *map* is found.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * long bpf_cgrp_storage_delete(struct bpf_map *map, struct cgroup *cgroup)
+ * Description
+ * Delete a bpf_local_storage from a *cgroup*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
*/
-#define __BPF_FUNC_MAPPER(FN) \
- FN(unspec), \
- FN(map_lookup_elem), \
- FN(map_update_elem), \
- FN(map_delete_elem), \
- FN(probe_read), \
- FN(ktime_get_ns), \
- FN(trace_printk), \
- FN(get_prandom_u32), \
- FN(get_smp_processor_id), \
- FN(skb_store_bytes), \
- FN(l3_csum_replace), \
- FN(l4_csum_replace), \
- FN(tail_call), \
- FN(clone_redirect), \
- FN(get_current_pid_tgid), \
- FN(get_current_uid_gid), \
- FN(get_current_comm), \
- FN(get_cgroup_classid), \
- FN(skb_vlan_push), \
- FN(skb_vlan_pop), \
- FN(skb_get_tunnel_key), \
- FN(skb_set_tunnel_key), \
- FN(perf_event_read), \
- FN(redirect), \
- FN(get_route_realm), \
- FN(perf_event_output), \
- FN(skb_load_bytes), \
- FN(get_stackid), \
- FN(csum_diff), \
- FN(skb_get_tunnel_opt), \
- FN(skb_set_tunnel_opt), \
- FN(skb_change_proto), \
- FN(skb_change_type), \
- FN(skb_under_cgroup), \
- FN(get_hash_recalc), \
- FN(get_current_task), \
- FN(probe_write_user), \
- FN(current_task_under_cgroup), \
- FN(skb_change_tail), \
- FN(skb_pull_data), \
- FN(csum_update), \
- FN(set_hash_invalid), \
- FN(get_numa_node_id), \
- FN(skb_change_head), \
- FN(xdp_adjust_head), \
- FN(probe_read_str), \
- FN(get_socket_cookie), \
- FN(get_socket_uid), \
- FN(set_hash), \
- FN(setsockopt), \
- FN(skb_adjust_room), \
- FN(redirect_map), \
- FN(sk_redirect_map), \
- FN(sock_map_update), \
- FN(xdp_adjust_meta), \
- FN(perf_event_read_value), \
- FN(perf_prog_read_value), \
- FN(getsockopt), \
- FN(override_return), \
- FN(sock_ops_cb_flags_set), \
- FN(msg_redirect_map), \
- FN(msg_apply_bytes), \
- FN(msg_cork_bytes), \
- FN(msg_pull_data), \
- FN(bind), \
- FN(xdp_adjust_tail), \
- FN(skb_get_xfrm_state), \
- FN(get_stack), \
- FN(skb_load_bytes_relative), \
- FN(fib_lookup), \
- FN(sock_hash_update), \
- FN(msg_redirect_hash), \
- FN(sk_redirect_hash), \
- FN(lwt_push_encap), \
- FN(lwt_seg6_store_bytes), \
- FN(lwt_seg6_adjust_srh), \
- FN(lwt_seg6_action), \
- FN(rc_repeat), \
- FN(rc_keydown), \
- FN(skb_cgroup_id), \
- FN(get_current_cgroup_id), \
- FN(get_local_storage), \
- FN(sk_select_reuseport), \
- FN(skb_ancestor_cgroup_id), \
- FN(sk_lookup_tcp), \
- FN(sk_lookup_udp), \
- FN(sk_release), \
- FN(map_push_elem), \
- FN(map_pop_elem), \
- FN(map_peek_elem), \
- FN(msg_push_data), \
- FN(msg_pop_data), \
- FN(rc_pointer_rel), \
- FN(spin_lock), \
- FN(spin_unlock), \
- FN(sk_fullsock), \
- FN(tcp_sock), \
- FN(skb_ecn_set_ce), \
- FN(get_listener_sock), \
- FN(skc_lookup_tcp), \
- FN(tcp_check_syncookie), \
- FN(sysctl_get_name), \
- FN(sysctl_get_current_value), \
- FN(sysctl_get_new_value), \
- FN(sysctl_set_new_value), \
- FN(strtol), \
- FN(strtoul), \
- FN(sk_storage_get), \
- FN(sk_storage_delete), \
- FN(send_signal), \
- FN(tcp_gen_syncookie), \
- FN(skb_output), \
- FN(probe_read_user), \
- FN(probe_read_kernel), \
- FN(probe_read_user_str), \
- FN(probe_read_kernel_str), \
- FN(tcp_send_ack), \
- FN(send_signal_thread), \
- FN(jiffies64), \
- FN(read_branch_records), \
- FN(get_ns_current_pid_tgid), \
- FN(xdp_output), \
- FN(get_netns_cookie), \
- FN(get_current_ancestor_cgroup_id), \
- FN(sk_assign), \
- FN(ktime_get_boot_ns), \
- FN(seq_printf), \
- FN(seq_write), \
- FN(sk_cgroup_id), \
- FN(sk_ancestor_cgroup_id), \
- FN(ringbuf_output), \
- FN(ringbuf_reserve), \
- FN(ringbuf_submit), \
- FN(ringbuf_discard), \
- FN(ringbuf_query), \
- FN(csum_level), \
- FN(skc_to_tcp6_sock), \
- FN(skc_to_tcp_sock), \
- FN(skc_to_tcp_timewait_sock), \
- FN(skc_to_tcp_request_sock), \
- FN(skc_to_udp6_sock), \
- FN(get_task_stack), \
+#define ___BPF_FUNC_MAPPER(FN, ctx...) \
+ FN(unspec, 0, ##ctx) \
+ FN(map_lookup_elem, 1, ##ctx) \
+ FN(map_update_elem, 2, ##ctx) \
+ FN(map_delete_elem, 3, ##ctx) \
+ FN(probe_read, 4, ##ctx) \
+ FN(ktime_get_ns, 5, ##ctx) \
+ FN(trace_printk, 6, ##ctx) \
+ FN(get_prandom_u32, 7, ##ctx) \
+ FN(get_smp_processor_id, 8, ##ctx) \
+ FN(skb_store_bytes, 9, ##ctx) \
+ FN(l3_csum_replace, 10, ##ctx) \
+ FN(l4_csum_replace, 11, ##ctx) \
+ FN(tail_call, 12, ##ctx) \
+ FN(clone_redirect, 13, ##ctx) \
+ FN(get_current_pid_tgid, 14, ##ctx) \
+ FN(get_current_uid_gid, 15, ##ctx) \
+ FN(get_current_comm, 16, ##ctx) \
+ FN(get_cgroup_classid, 17, ##ctx) \
+ FN(skb_vlan_push, 18, ##ctx) \
+ FN(skb_vlan_pop, 19, ##ctx) \
+ FN(skb_get_tunnel_key, 20, ##ctx) \
+ FN(skb_set_tunnel_key, 21, ##ctx) \
+ FN(perf_event_read, 22, ##ctx) \
+ FN(redirect, 23, ##ctx) \
+ FN(get_route_realm, 24, ##ctx) \
+ FN(perf_event_output, 25, ##ctx) \
+ FN(skb_load_bytes, 26, ##ctx) \
+ FN(get_stackid, 27, ##ctx) \
+ FN(csum_diff, 28, ##ctx) \
+ FN(skb_get_tunnel_opt, 29, ##ctx) \
+ FN(skb_set_tunnel_opt, 30, ##ctx) \
+ FN(skb_change_proto, 31, ##ctx) \
+ FN(skb_change_type, 32, ##ctx) \
+ FN(skb_under_cgroup, 33, ##ctx) \
+ FN(get_hash_recalc, 34, ##ctx) \
+ FN(get_current_task, 35, ##ctx) \
+ FN(probe_write_user, 36, ##ctx) \
+ FN(current_task_under_cgroup, 37, ##ctx) \
+ FN(skb_change_tail, 38, ##ctx) \
+ FN(skb_pull_data, 39, ##ctx) \
+ FN(csum_update, 40, ##ctx) \
+ FN(set_hash_invalid, 41, ##ctx) \
+ FN(get_numa_node_id, 42, ##ctx) \
+ FN(skb_change_head, 43, ##ctx) \
+ FN(xdp_adjust_head, 44, ##ctx) \
+ FN(probe_read_str, 45, ##ctx) \
+ FN(get_socket_cookie, 46, ##ctx) \
+ FN(get_socket_uid, 47, ##ctx) \
+ FN(set_hash, 48, ##ctx) \
+ FN(setsockopt, 49, ##ctx) \
+ FN(skb_adjust_room, 50, ##ctx) \
+ FN(redirect_map, 51, ##ctx) \
+ FN(sk_redirect_map, 52, ##ctx) \
+ FN(sock_map_update, 53, ##ctx) \
+ FN(xdp_adjust_meta, 54, ##ctx) \
+ FN(perf_event_read_value, 55, ##ctx) \
+ FN(perf_prog_read_value, 56, ##ctx) \
+ FN(getsockopt, 57, ##ctx) \
+ FN(override_return, 58, ##ctx) \
+ FN(sock_ops_cb_flags_set, 59, ##ctx) \
+ FN(msg_redirect_map, 60, ##ctx) \
+ FN(msg_apply_bytes, 61, ##ctx) \
+ FN(msg_cork_bytes, 62, ##ctx) \
+ FN(msg_pull_data, 63, ##ctx) \
+ FN(bind, 64, ##ctx) \
+ FN(xdp_adjust_tail, 65, ##ctx) \
+ FN(skb_get_xfrm_state, 66, ##ctx) \
+ FN(get_stack, 67, ##ctx) \
+ FN(skb_load_bytes_relative, 68, ##ctx) \
+ FN(fib_lookup, 69, ##ctx) \
+ FN(sock_hash_update, 70, ##ctx) \
+ FN(msg_redirect_hash, 71, ##ctx) \
+ FN(sk_redirect_hash, 72, ##ctx) \
+ FN(lwt_push_encap, 73, ##ctx) \
+ FN(lwt_seg6_store_bytes, 74, ##ctx) \
+ FN(lwt_seg6_adjust_srh, 75, ##ctx) \
+ FN(lwt_seg6_action, 76, ##ctx) \
+ FN(rc_repeat, 77, ##ctx) \
+ FN(rc_keydown, 78, ##ctx) \
+ FN(skb_cgroup_id, 79, ##ctx) \
+ FN(get_current_cgroup_id, 80, ##ctx) \
+ FN(get_local_storage, 81, ##ctx) \
+ FN(sk_select_reuseport, 82, ##ctx) \
+ FN(skb_ancestor_cgroup_id, 83, ##ctx) \
+ FN(sk_lookup_tcp, 84, ##ctx) \
+ FN(sk_lookup_udp, 85, ##ctx) \
+ FN(sk_release, 86, ##ctx) \
+ FN(map_push_elem, 87, ##ctx) \
+ FN(map_pop_elem, 88, ##ctx) \
+ FN(map_peek_elem, 89, ##ctx) \
+ FN(msg_push_data, 90, ##ctx) \
+ FN(msg_pop_data, 91, ##ctx) \
+ FN(rc_pointer_rel, 92, ##ctx) \
+ FN(spin_lock, 93, ##ctx) \
+ FN(spin_unlock, 94, ##ctx) \
+ FN(sk_fullsock, 95, ##ctx) \
+ FN(tcp_sock, 96, ##ctx) \
+ FN(skb_ecn_set_ce, 97, ##ctx) \
+ FN(get_listener_sock, 98, ##ctx) \
+ FN(skc_lookup_tcp, 99, ##ctx) \
+ FN(tcp_check_syncookie, 100, ##ctx) \
+ FN(sysctl_get_name, 101, ##ctx) \
+ FN(sysctl_get_current_value, 102, ##ctx) \
+ FN(sysctl_get_new_value, 103, ##ctx) \
+ FN(sysctl_set_new_value, 104, ##ctx) \
+ FN(strtol, 105, ##ctx) \
+ FN(strtoul, 106, ##ctx) \
+ FN(sk_storage_get, 107, ##ctx) \
+ FN(sk_storage_delete, 108, ##ctx) \
+ FN(send_signal, 109, ##ctx) \
+ FN(tcp_gen_syncookie, 110, ##ctx) \
+ FN(skb_output, 111, ##ctx) \
+ FN(probe_read_user, 112, ##ctx) \
+ FN(probe_read_kernel, 113, ##ctx) \
+ FN(probe_read_user_str, 114, ##ctx) \
+ FN(probe_read_kernel_str, 115, ##ctx) \
+ FN(tcp_send_ack, 116, ##ctx) \
+ FN(send_signal_thread, 117, ##ctx) \
+ FN(jiffies64, 118, ##ctx) \
+ FN(read_branch_records, 119, ##ctx) \
+ FN(get_ns_current_pid_tgid, 120, ##ctx) \
+ FN(xdp_output, 121, ##ctx) \
+ FN(get_netns_cookie, 122, ##ctx) \
+ FN(get_current_ancestor_cgroup_id, 123, ##ctx) \
+ FN(sk_assign, 124, ##ctx) \
+ FN(ktime_get_boot_ns, 125, ##ctx) \
+ FN(seq_printf, 126, ##ctx) \
+ FN(seq_write, 127, ##ctx) \
+ FN(sk_cgroup_id, 128, ##ctx) \
+ FN(sk_ancestor_cgroup_id, 129, ##ctx) \
+ FN(ringbuf_output, 130, ##ctx) \
+ FN(ringbuf_reserve, 131, ##ctx) \
+ FN(ringbuf_submit, 132, ##ctx) \
+ FN(ringbuf_discard, 133, ##ctx) \
+ FN(ringbuf_query, 134, ##ctx) \
+ FN(csum_level, 135, ##ctx) \
+ FN(skc_to_tcp6_sock, 136, ##ctx) \
+ FN(skc_to_tcp_sock, 137, ##ctx) \
+ FN(skc_to_tcp_timewait_sock, 138, ##ctx) \
+ FN(skc_to_tcp_request_sock, 139, ##ctx) \
+ FN(skc_to_udp6_sock, 140, ##ctx) \
+ FN(get_task_stack, 141, ##ctx) \
+ FN(load_hdr_opt, 142, ##ctx) \
+ FN(store_hdr_opt, 143, ##ctx) \
+ FN(reserve_hdr_opt, 144, ##ctx) \
+ FN(inode_storage_get, 145, ##ctx) \
+ FN(inode_storage_delete, 146, ##ctx) \
+ FN(d_path, 147, ##ctx) \
+ FN(copy_from_user, 148, ##ctx) \
+ FN(snprintf_btf, 149, ##ctx) \
+ FN(seq_printf_btf, 150, ##ctx) \
+ FN(skb_cgroup_classid, 151, ##ctx) \
+ FN(redirect_neigh, 152, ##ctx) \
+ FN(per_cpu_ptr, 153, ##ctx) \
+ FN(this_cpu_ptr, 154, ##ctx) \
+ FN(redirect_peer, 155, ##ctx) \
+ FN(task_storage_get, 156, ##ctx) \
+ FN(task_storage_delete, 157, ##ctx) \
+ FN(get_current_task_btf, 158, ##ctx) \
+ FN(bprm_opts_set, 159, ##ctx) \
+ FN(ktime_get_coarse_ns, 160, ##ctx) \
+ FN(ima_inode_hash, 161, ##ctx) \
+ FN(sock_from_file, 162, ##ctx) \
+ FN(check_mtu, 163, ##ctx) \
+ FN(for_each_map_elem, 164, ##ctx) \
+ FN(snprintf, 165, ##ctx) \
+ FN(sys_bpf, 166, ##ctx) \
+ FN(btf_find_by_name_kind, 167, ##ctx) \
+ FN(sys_close, 168, ##ctx) \
+ FN(timer_init, 169, ##ctx) \
+ FN(timer_set_callback, 170, ##ctx) \
+ FN(timer_start, 171, ##ctx) \
+ FN(timer_cancel, 172, ##ctx) \
+ FN(get_func_ip, 173, ##ctx) \
+ FN(get_attach_cookie, 174, ##ctx) \
+ FN(task_pt_regs, 175, ##ctx) \
+ FN(get_branch_snapshot, 176, ##ctx) \
+ FN(trace_vprintk, 177, ##ctx) \
+ FN(skc_to_unix_sock, 178, ##ctx) \
+ FN(kallsyms_lookup_name, 179, ##ctx) \
+ FN(find_vma, 180, ##ctx) \
+ FN(loop, 181, ##ctx) \
+ FN(strncmp, 182, ##ctx) \
+ FN(get_func_arg, 183, ##ctx) \
+ FN(get_func_ret, 184, ##ctx) \
+ FN(get_func_arg_cnt, 185, ##ctx) \
+ FN(get_retval, 186, ##ctx) \
+ FN(set_retval, 187, ##ctx) \
+ FN(xdp_get_buff_len, 188, ##ctx) \
+ FN(xdp_load_bytes, 189, ##ctx) \
+ FN(xdp_store_bytes, 190, ##ctx) \
+ FN(copy_from_user_task, 191, ##ctx) \
+ FN(skb_set_tstamp, 192, ##ctx) \
+ FN(ima_file_hash, 193, ##ctx) \
+ FN(kptr_xchg, 194, ##ctx) \
+ FN(map_lookup_percpu_elem, 195, ##ctx) \
+ FN(skc_to_mptcp_sock, 196, ##ctx) \
+ FN(dynptr_from_mem, 197, ##ctx) \
+ FN(ringbuf_reserve_dynptr, 198, ##ctx) \
+ FN(ringbuf_submit_dynptr, 199, ##ctx) \
+ FN(ringbuf_discard_dynptr, 200, ##ctx) \
+ FN(dynptr_read, 201, ##ctx) \
+ FN(dynptr_write, 202, ##ctx) \
+ FN(dynptr_data, 203, ##ctx) \
+ FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx) \
+ FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx) \
+ FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx) \
+ FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx) \
+ FN(ktime_get_tai_ns, 208, ##ctx) \
+ FN(user_ringbuf_drain, 209, ##ctx) \
+ FN(cgrp_storage_get, 210, ##ctx) \
+ FN(cgrp_storage_delete, 211, ##ctx) \
/* */
+/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
+ * know or care about integer value that is now passed as second argument
+ */
+#define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name),
+#define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN)
+
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
*/
-#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
+#define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y,
enum bpf_func_id {
- __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
+ ___BPF_FUNC_MAPPER(__BPF_ENUM_FN)
__BPF_FUNC_MAX_ID,
};
#undef __BPF_ENUM_FN
@@ -3599,6 +6062,12 @@ enum {
BPF_F_ZERO_CSUM_TX = (1ULL << 1),
BPF_F_DONT_FRAGMENT = (1ULL << 2),
BPF_F_SEQ_NUMBER = (1ULL << 3),
+ BPF_F_NO_TUNNEL_KEY = (1ULL << 4),
+};
+
+/* BPF_FUNC_skb_get_tunnel_key flags. */
+enum {
+ BPF_F_TUNINFO_FLAGS = (1ULL << 4),
};
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
@@ -3632,6 +6101,9 @@ enum {
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
+ BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8),
};
enum {
@@ -3648,9 +6120,13 @@ enum {
BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
};
-/* BPF_FUNC_sk_storage_get flags */
+/* BPF_FUNC_<kernel_obj>_storage_get flags */
enum {
- BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0),
+ BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0),
+ /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
+ * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
+ */
+ BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE,
};
/* BPF_FUNC_read_branch_records flags. */
@@ -3706,12 +6182,32 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_IP,
};
+/* Flags for bpf_bprm_opts_set helper */
+enum {
+ BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
+};
+
+/* Flags for bpf_redirect_map helper */
+enum {
+ BPF_F_BROADCAST = (1ULL << 3),
+ BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
+};
+
#define __bpf_md_ptr(type, name) \
union { \
type name; \
__u64 :64; \
} __attribute__((aligned(8)))
+enum {
+ BPF_SKB_TSTAMP_UNSPEC,
+ BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
+ /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
+ * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
+ * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
+ */
+};
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -3752,6 +6248,9 @@ struct __sk_buff {
__u32 gso_segs;
__bpf_md_ptr(struct bpf_sock *, sk);
__u32 gso_size;
+ __u8 tstamp_type;
+ __u32 :24; /* Padding, future use. */
+ __u64 hwtstamp;
};
struct bpf_tunnel_key {
@@ -3762,8 +6261,15 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
- __u16 tunnel_ext; /* Padding, future use. */
+ union {
+ __u16 tunnel_ext; /* compat */
+ __be16 tunnel_flags;
+ };
__u32 tunnel_label;
+ union {
+ __u32 local_ipv4;
+ __u32 local_ipv6[4];
+ };
};
/* user accessible mirror of in-kernel xfrm_state.
@@ -3802,6 +6308,11 @@ enum bpf_ret_code {
* represented by BPF_REDIRECT above).
*/
BPF_LWT_REROUTE = 128,
+ /* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR
+ * to indicate that no custom dissection was performed, and
+ * fallback to standard dissector is requested.
+ */
+ BPF_FLOW_DISSECTOR_CONTINUE = 129,
};
struct bpf_sock {
@@ -3815,7 +6326,8 @@ struct bpf_sock {
__u32 src_ip4;
__u32 src_ip6[4];
__u32 src_port; /* host byte order */
- __u32 dst_port; /* network byte order */
+ __be16 dst_port; /* network byte order */
+ __u16 :16; /* zero padding */
__u32 dst_ip4;
__u32 dst_ip6[4];
__u32 state;
@@ -3884,6 +6396,19 @@ struct bpf_sock_tuple {
};
};
+/* (Simplified) user return codes for tcx prog type.
+ * A valid tcx program must return one of these defined values. All other
+ * return codes are reserved for future use. Must remain compatible with
+ * their TC_ACT_* counter-parts. For compatibility in behavior, unknown
+ * return codes are mapped to TCX_NEXT.
+ */
+enum tcx_action_base {
+ TCX_NEXT = -1,
+ TCX_PASS = 0,
+ TCX_DROP = 2,
+ TCX_REDIRECT = 7,
+};
+
struct bpf_xdp_sock {
__u32 queue_id;
};
@@ -3990,6 +6515,20 @@ struct sk_reuseport_md {
__u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
__u32 bind_inany; /* Is sock bound to an INANY address? */
__u32 hash; /* A hash of the packet 4 tuples */
+ /* When reuse->migrating_sk is NULL, it is selecting a sk for the
+ * new incoming connection request (e.g. selecting a listen sk for
+ * the received SYN in the TCP case). reuse->sk is one of the sk
+ * in the reuseport group. The bpf prog can use reuse->sk to learn
+ * the local listening ip/port without looking into the skb.
+ *
+ * When reuse->migrating_sk is not NULL, reuse->sk is closed and
+ * reuse->migrating_sk is the socket that needs to be migrated
+ * to another listening socket. migrating_sk could be a fullsock
+ * sk that is fully established or a reqsk that is in-the-middle
+ * of 3-way handshake.
+ */
+ __bpf_md_ptr(struct bpf_sock *, sk);
+ __bpf_md_ptr(struct bpf_sock *, migrating_sk);
};
#define BPF_TAG_SIZE 8
@@ -4030,6 +6569,10 @@ struct bpf_prog_info {
__aligned_u64 prog_tags;
__u64 run_time_ns;
__u64 run_cnt;
+ __u64 recursion_misses;
+ __u32 verified_insns;
+ __u32 attach_btf_obj_id;
+ __u32 attach_btf_id;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -4047,12 +6590,17 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
+ __u32 btf_vmlinux_id;
+ __u64 map_extra;
} __attribute__((aligned(8)));
struct bpf_btf_info {
__aligned_u64 btf;
__u32 btf_size;
__u32 id;
+ __aligned_u64 name;
+ __u32 name_len;
+ __u32 kernel_btf;
} __attribute__((aligned(8)));
struct bpf_link_info {
@@ -4066,11 +6614,37 @@ struct bpf_link_info {
} raw_tracepoint;
struct {
__u32 attach_type;
+ __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
+ __u32 target_btf_id; /* BTF type id inside the object */
} tracing;
struct {
__u64 cgroup_id;
__u32 attach_type;
} cgroup;
+ struct {
+ __aligned_u64 target_name; /* in/out: target_name buffer ptr */
+ __u32 target_name_len; /* in/out: target_name buffer len */
+
+ /* If the iter specific field is 32 bits, it can be put
+ * in the first or second union. Otherwise it should be
+ * put in the second union.
+ */
+ union {
+ struct {
+ __u32 map_id;
+ } map;
+ };
+ union {
+ struct {
+ __u64 cgroup_id;
+ __u32 order;
+ } cgroup;
+ struct {
+ __u32 tid;
+ __u32 pid;
+ } task;
+ };
+ } iter;
struct {
__u32 netns_ino;
__u32 attach_type;
@@ -4078,6 +6652,72 @@ struct bpf_link_info {
struct {
__u32 ifindex;
} xdp;
+ struct {
+ __u32 map_id;
+ } struct_ops;
+ struct {
+ __u32 pf;
+ __u32 hooknum;
+ __s32 priority;
+ __u32 flags;
+ } netfilter;
+ struct {
+ __aligned_u64 addrs;
+ __u32 count; /* in/out: kprobe_multi function count */
+ __u32 flags;
+ __u64 missed;
+ __aligned_u64 cookies;
+ } kprobe_multi;
+ struct {
+ __aligned_u64 path;
+ __aligned_u64 offsets;
+ __aligned_u64 ref_ctr_offsets;
+ __aligned_u64 cookies;
+ __u32 path_size; /* in/out: real path size on success, including zero byte */
+ __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
+ __u32 flags;
+ __u32 pid;
+ } uprobe_multi;
+ struct {
+ __u32 type; /* enum bpf_perf_event_type */
+ __u32 :32;
+ union {
+ struct {
+ __aligned_u64 file_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from file_name */
+ __u64 cookie;
+ } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
+ struct {
+ __aligned_u64 func_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from func_name */
+ __u64 addr;
+ __u64 missed;
+ __u64 cookie;
+ } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
+ struct {
+ __aligned_u64 tp_name; /* in/out */
+ __u32 name_len;
+ __u32 :32;
+ __u64 cookie;
+ } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
+ struct {
+ __u64 config;
+ __u32 type;
+ __u32 :32;
+ __u64 cookie;
+ } event; /* BPF_PERF_EVENT_EVENT */
+ };
+ } perf_event;
+ struct {
+ __u32 ifindex;
+ __u32 attach_type;
+ } tcx;
+ struct {
+ __u32 ifindex;
+ __u32 attach_type;
+ } netkit;
};
} __attribute__((aligned(8)));
@@ -4158,6 +6798,37 @@ struct bpf_sock_ops {
__u64 bytes_received;
__u64 bytes_acked;
__bpf_md_ptr(struct bpf_sock *, sk);
+ /* [skb_data, skb_data_end) covers the whole TCP header.
+ *
+ * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
+ * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the
+ * header has not been written.
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
+ * been written so far.
+ * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes
+ * the 3WHS.
+ * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
+ * the 3WHS.
+ *
+ * bpf_load_hdr_opt() can also be used to read a particular option.
+ */
+ __bpf_md_ptr(void *, skb_data);
+ __bpf_md_ptr(void *, skb_data_end);
+ __u32 skb_len; /* The total length of a packet.
+ * It includes the header, options,
+ * and payload.
+ */
+ __u32 skb_tcp_flags; /* tcp_flags of the header. It provides
+ * an easy way to check for tcp_flags
+ * without parsing skb_data.
+ *
+ * In particular, the skb_tcp_flags
+ * will still be available in
+ * BPF_SOCK_OPS_HDR_OPT_LEN even though
+ * the outgoing header has not
+ * been written yet.
+ */
+ __u64 skb_hwtstamp;
};
/* Definitions for bpf_sock_ops_cb_flags */
@@ -4166,8 +6837,51 @@ enum {
BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
+ /* Call bpf for all received TCP headers. The bpf prog will be
+ * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ *
+ * It could be used at the client/active side (i.e. connect() side)
+ * when the server told it that the server was in syncookie
+ * mode and required the active side to resend the bpf-written
+ * options. The active side can keep writing the bpf-options until
+ * it received a valid packet from the server side to confirm
+ * the earlier packet (and options) has been received. The later
+ * example patch is using it like this at the active side when the
+ * server is in syncookie mode.
+ *
+ * The bpf prog will usually turn this off in the common cases.
+ */
+ BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4),
+ /* Call bpf when kernel has received a header option that
+ * the kernel cannot handle. The bpf prog will be called under
+ * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ */
+ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
+ /* Call bpf when the kernel is writing header options for the
+ * outgoing packet. The bpf prog will first be called
+ * to reserve space in a skb under
+ * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then
+ * the bpf prog will be called to write the header option(s)
+ * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
+ * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
+ * related helpers that will be useful to the bpf programs.
+ *
+ * The kernel gets its chance to reserve space and write
+ * options first before the BPF program does.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
/* Mask of all currently supported cb flags */
- BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF,
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
};
/* List of known BPF sock_ops operators.
@@ -4223,6 +6937,63 @@ enum {
*/
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
*/
+ BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
+ * It will be called to handle
+ * the packets received at
+ * an already established
+ * connection.
+ *
+ * sock_ops->skb_data:
+ * Referring to the received skb.
+ * It covers the TCP header only.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option.
+ */
+ BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the
+ * header option later in
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Not available because no header has
+ * been written yet.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the
+ * outgoing skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_reserve_hdr_opt() should
+ * be used to reserve space.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Referring to the outgoing skb.
+ * It covers the TCP header
+ * that has already been written
+ * by the kernel and the
+ * earlier bpf-progs.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the outgoing
+ * skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_store_hdr_opt() should
+ * be used to write the
+ * option.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option that
+ * has already been written
+ * by the kernel or the
+ * earlier bpf-progs.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -4243,6 +7014,7 @@ enum {
BPF_TCP_LISTEN,
BPF_TCP_CLOSING, /* Now a valid state */
BPF_TCP_NEW_SYN_RECV,
+ BPF_TCP_BOUND_INACTIVE,
BPF_TCP_MAX_STATES /* Leave at the end! */
};
@@ -4250,6 +7022,63 @@ enum {
enum {
TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
+ TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */
+ TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */
+ /* Copy the SYN pkt to optval
+ *
+ * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the
+ * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
+ * to only getting from the saved_syn. It can either get the
+ * syn packet from:
+ *
+ * 1. the just-received SYN packet (only available when writing the
+ * SYNACK). It will be useful when it is not necessary to
+ * save the SYN packet for latter use. It is also the only way
+ * to get the SYN during syncookie mode because the syn
+ * packet cannot be saved during syncookie.
+ *
+ * OR
+ *
+ * 2. the earlier saved syn which was done by
+ * bpf_setsockopt(TCP_SAVE_SYN).
+ *
+ * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
+ * SYN packet is obtained.
+ *
+ * If the bpf-prog does not need the IP[46] header, the
+ * bpf-prog can avoid parsing the IP header by using
+ * TCP_BPF_SYN. Otherwise, the bpf-prog can get both
+ * IP[46] and TCP header by using TCP_BPF_SYN_IP.
+ *
+ * >0: Total number of bytes copied
+ * -ENOSPC: Not enough space in optval. Only optlen number of
+ * bytes is copied.
+ * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
+ * is not saved by setsockopt(TCP_SAVE_SYN).
+ */
+ TCP_BPF_SYN = 1005, /* Copy the TCP header */
+ TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
+ TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
+};
+
+enum {
+ BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
+};
+
+/* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ */
+enum {
+ BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the
+ * total option spaces
+ * required for an established
+ * sk in order to calculate the
+ * MSS. No skb is actually
+ * sent.
+ */
+ BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode
+ * when sending a SYN.
+ */
};
struct bpf_perf_event_value {
@@ -4286,6 +7115,9 @@ struct bpf_raw_tracepoint_args {
enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ BPF_FIB_LOOKUP_TBID = (1U << 3),
+ BPF_FIB_LOOKUP_SRC = (1U << 4),
};
enum {
@@ -4298,6 +7130,7 @@ enum {
BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+ BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
};
struct bpf_fib_lookup {
@@ -4311,9 +7144,13 @@ struct bpf_fib_lookup {
__be16 sport;
__be16 dport;
- /* total length of packet from network header - used for MTU check */
- __u16 tot_len;
+ union { /* used for MTU check */
+ /* input to lookup */
+ __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */
+ /* output: MTU value */
+ __u16 mtu_result;
+ };
/* input: L3 device index for lookup
* output: device index from FIB lookup
*/
@@ -4328,6 +7165,9 @@ struct bpf_fib_lookup {
__u32 rt_metric;
};
+ /* input: source address to consider for lookup
+ * output: source address result from lookup
+ */
union {
__be32 ipv4_src;
__u32 ipv6_src[4]; /* in6_addr; network order */
@@ -4342,13 +7182,44 @@ struct bpf_fib_lookup {
__u32 ipv6_dst[4]; /* in6_addr; network order */
};
- /* output */
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
+ union {
+ struct {
+ /* output */
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ };
+ /* input: when accompanied with the
+ * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
+ * specific routing table to use for the fib lookup.
+ */
+ __u32 tbid;
+ };
+
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
};
+struct bpf_redir_neigh {
+ /* network family for lookup (AF_INET, AF_INET6) */
+ __u32 nh_family;
+ /* network address of nexthop; skips fib lookup to find gateway */
+ union {
+ __be32 ipv4_nh;
+ __u32 ipv6_nh[4]; /* in6_addr; network order */
+ };
+};
+
+/* bpf_check_mtu flags*/
+enum bpf_check_mtu_flags {
+ BPF_MTU_CHK_SEGS = (1U << 0),
+};
+
+enum bpf_check_mtu_ret {
+ BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */
+ BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+ BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */
+};
+
enum bpf_task_fd_type {
BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */
BPF_FD_TYPE_TRACEPOINT, /* tp name */
@@ -4408,6 +7279,34 @@ struct bpf_spin_lock {
__u32 val;
};
+struct bpf_timer {
+ __u64 __opaque[2];
+} __attribute__((aligned(8)));
+
+struct bpf_dynptr {
+ __u64 __opaque[2];
+} __attribute__((aligned(8)));
+
+struct bpf_list_head {
+ __u64 __opaque[2];
+} __attribute__((aligned(8)));
+
+struct bpf_list_node {
+ __u64 __opaque[3];
+} __attribute__((aligned(8)));
+
+struct bpf_rb_root {
+ __u64 __opaque[2];
+} __attribute__((aligned(8)));
+
+struct bpf_rb_node {
+ __u64 __opaque[4];
+} __attribute__((aligned(8)));
+
+struct bpf_refcount {
+ __u32 __opaque[1];
+} __attribute__((aligned(4)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
@@ -4435,16 +7334,145 @@ struct bpf_pidns_info {
/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
struct bpf_sk_lookup {
- __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ union {
+ __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
+ };
__u32 family; /* Protocol family (AF_INET, AF_INET6) */
__u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
__u32 remote_ip4; /* Network byte order */
__u32 remote_ip6[4]; /* Network byte order */
- __u32 remote_port; /* Network byte order */
+ __be16 remote_port; /* Network byte order */
+ __u16 :16; /* Zero padding */
__u32 local_ip4; /* Network byte order */
__u32 local_ip6[4]; /* Network byte order */
__u32 local_port; /* Host byte order */
+ __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
+};
+
+/*
+ * struct btf_ptr is used for typed pointer representation; the
+ * type id is used to render the pointer data as the appropriate type
+ * via the bpf_snprintf_btf() helper described above. A flags field -
+ * potentially to specify additional details about the BTF pointer
+ * (rather than its mode of display) - is included for future use.
+ * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
+ */
+struct btf_ptr {
+ void *ptr;
+ __u32 type_id;
+ __u32 flags; /* BTF ptr flags; unused at present. */
+};
+
+/*
+ * Flags to control bpf_snprintf_btf() behaviour.
+ * - BTF_F_COMPACT: no formatting around type information
+ * - BTF_F_NONAME: no struct/union member names/types
+ * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_F_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ */
+enum {
+ BTF_F_COMPACT = (1ULL << 0),
+ BTF_F_NONAME = (1ULL << 1),
+ BTF_F_PTR_RAW = (1ULL << 2),
+ BTF_F_ZERO = (1ULL << 3),
};
+/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
+ * has to be adjusted by relocations. It is emitted by llvm and passed to
+ * libbpf and later to the kernel.
+ */
+enum bpf_core_relo_kind {
+ BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */
+ BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */
+ BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */
+ BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
+ BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
+ BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
+ BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
+ BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */
+ BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */
+ BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
+ BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
+ BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
+ BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
+};
+
+/*
+ * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf
+ * and from libbpf to the kernel.
+ *
+ * CO-RE relocation captures the following data:
+ * - insn_off - instruction offset (in bytes) within a BPF program that needs
+ * its insn->imm field to be relocated with actual field info;
+ * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
+ * type or field;
+ * - access_str_off - offset into corresponding .BTF string section. String
+ * interpretation depends on specific relocation kind:
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
+ * - kind - one of enum bpf_core_relo_kind;
+ *
+ * Example:
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample *s = ...;
+ * int *x = &s->a; // encoded as "0:0" (a is field #0)
+ * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ *
+ * type_id for all relocs in this example will capture BTF type id of
+ * `struct sample`.
+ *
+ * Such relocation is emitted when using __builtin_preserve_access_index()
+ * Clang built-in, passing expression that captures field address, e.g.:
+ *
+ * bpf_probe_read(&dst, sizeof(dst),
+ * __builtin_preserve_access_index(&src->a.b.c));
+ *
+ * In this case Clang will emit field relocation recording necessary data to
+ * be able to find offset of embedded `a.b.c` field within `src` struct.
+ *
+ * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
+ */
+struct bpf_core_relo {
+ __u32 insn_off;
+ __u32 type_id;
+ __u32 access_str_off;
+ enum bpf_core_relo_kind kind;
+};
+
+/*
+ * Flags to control bpf_timer_start() behaviour.
+ * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
+ * relative to current time.
+ * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
+ */
+enum {
+ BPF_F_TIMER_ABS = (1ULL << 0),
+ BPF_F_TIMER_CPU_PIN = (1ULL << 1),
+};
+
+/* BPF numbers iterator state */
+struct bpf_iter_num {
+ /* opaque iterator state; having __u64 here allows to preserve correct
+ * alignment requirements in vmlinux.h, generated from BTF
+ */
+ __u64 __opaque[1];
+} __attribute__((aligned(8)));
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h
deleted file mode 100644
index cbc1f5813f50..000000000000
--- a/include/uapi/linux/bpfilter.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_BPFILTER_H
-#define _UAPI_LINUX_BPFILTER_H
-
-#include <linux/if.h>
-
-enum {
- BPFILTER_IPT_SO_SET_REPLACE = 64,
- BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65,
- BPFILTER_IPT_SET_MAX,
-};
-
-enum {
- BPFILTER_IPT_SO_GET_INFO = 64,
- BPFILTER_IPT_SO_GET_ENTRIES = 65,
- BPFILTER_IPT_SO_GET_REVISION_MATCH = 66,
- BPFILTER_IPT_SO_GET_REVISION_TARGET = 67,
- BPFILTER_IPT_GET_MAX,
-};
-
-#endif /* _UAPI_LINUX_BPFILTER_H */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 5a667107ad2c..ec1798b6d3ff 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -33,17 +33,17 @@ struct btf_type {
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
- * bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-30: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
* bit 31: kind_flag, currently used by
- * struct, union and fwd
+ * struct, union, enum, fwd and enum64
*/
__u32 info;
- /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
+ /* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
- * FUNC, FUNC_PROTO and VAR.
+ * FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG.
* "type" is a type_id referring to another type.
*/
union {
@@ -52,28 +52,35 @@ struct btf_type {
};
};
-#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
+#define BTF_INFO_KIND(info) (((info) >> 24) & 0x1f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
#define BTF_INFO_KFLAG(info) ((info) >> 31)
-#define BTF_KIND_UNKN 0 /* Unknown */
-#define BTF_KIND_INT 1 /* Integer */
-#define BTF_KIND_PTR 2 /* Pointer */
-#define BTF_KIND_ARRAY 3 /* Array */
-#define BTF_KIND_STRUCT 4 /* Struct */
-#define BTF_KIND_UNION 5 /* Union */
-#define BTF_KIND_ENUM 6 /* Enumeration */
-#define BTF_KIND_FWD 7 /* Forward */
-#define BTF_KIND_TYPEDEF 8 /* Typedef */
-#define BTF_KIND_VOLATILE 9 /* Volatile */
-#define BTF_KIND_CONST 10 /* Const */
-#define BTF_KIND_RESTRICT 11 /* Restrict */
-#define BTF_KIND_FUNC 12 /* Function */
-#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
-#define BTF_KIND_VAR 14 /* Variable */
-#define BTF_KIND_DATASEC 15 /* Section */
-#define BTF_KIND_MAX BTF_KIND_DATASEC
-#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
+enum {
+ BTF_KIND_UNKN = 0, /* Unknown */
+ BTF_KIND_INT = 1, /* Integer */
+ BTF_KIND_PTR = 2, /* Pointer */
+ BTF_KIND_ARRAY = 3, /* Array */
+ BTF_KIND_STRUCT = 4, /* Struct */
+ BTF_KIND_UNION = 5, /* Union */
+ BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */
+ BTF_KIND_FWD = 7, /* Forward */
+ BTF_KIND_TYPEDEF = 8, /* Typedef */
+ BTF_KIND_VOLATILE = 9, /* Volatile */
+ BTF_KIND_CONST = 10, /* Const */
+ BTF_KIND_RESTRICT = 11, /* Restrict */
+ BTF_KIND_FUNC = 12, /* Function */
+ BTF_KIND_FUNC_PROTO = 13, /* Function Proto */
+ BTF_KIND_VAR = 14, /* Variable */
+ BTF_KIND_DATASEC = 15, /* Section */
+ BTF_KIND_FLOAT = 16, /* Floating point */
+ BTF_KIND_DECL_TAG = 17, /* Decl Tag */
+ BTF_KIND_TYPE_TAG = 18, /* Type Tag */
+ BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */
+
+ NR_BTF_KINDS,
+ BTF_KIND_MAX = NR_BTF_KINDS - 1,
+};
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -169,4 +176,25 @@ struct btf_var_secinfo {
__u32 size;
};
+/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe
+ * additional information related to the tag applied location.
+ * If component_idx == -1, the tag is applied to a struct, union,
+ * variable or function. Otherwise, it is applied to a struct/union
+ * member or a func argument, and component_idx indicates which member
+ * or argument (0 ... vlen-1).
+ */
+struct btf_decl_tag {
+ __s32 component_idx;
+};
+
+/* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64".
+ * The exact number of btf_enum64 is stored in the vlen (of the
+ * info in "struct btf_type").
+ */
+struct btf_enum64 {
+ __u32 name_off;
+ __u32 val_lo32;
+ __u32 val_hi32;
+};
+
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 2c39d15a2beb..cdf6ad872149 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -19,8 +19,14 @@
#ifndef _UAPI_LINUX_BTRFS_H
#define _UAPI_LINUX_BTRFS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/fs.h>
#define BTRFS_IOCTL_MAGIC 0x94
#define BTRFS_VOL_NAME_MAX 255
@@ -86,6 +92,7 @@ struct btrfs_qgroup_limit {
* struct btrfs_qgroup_inherit.flags
*/
#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0)
+#define BTRFS_QGROUP_INHERIT_FLAGS_SUPP (BTRFS_QGROUP_INHERIT_SET_LIMITS)
struct btrfs_qgroup_inherit {
__u64 flags;
@@ -93,7 +100,7 @@ struct btrfs_qgroup_inherit {
__u64 num_ref_copies;
__u64 num_excl_copies;
struct btrfs_qgroup_limit lim;
- __u64 qgroups[0];
+ __u64 qgroups[];
};
struct btrfs_ioctl_qgroup_limit_args {
@@ -154,7 +161,7 @@ struct btrfs_scrub_progress {
__u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */
__u64 read_errors; /* # of read errors encountered (EIO) */
__u64 csum_errors; /* # of failed csum checks */
- __u64 verify_errors; /* # of occurences, where the metadata
+ __u64 verify_errors; /* # of occurrences, where the metadata
* of a tree block did not match the
* expected values, like generation or
* logical */
@@ -174,13 +181,14 @@ struct btrfs_scrub_progress {
__u64 last_physical; /* last physical address scrubbed. In
* case a scrub was aborted, this can
* be used to restart the scrub */
- __u64 unverified_errors; /* # of occurences where a read for a
+ __u64 unverified_errors; /* # of occurrences where a read for a
* full (64k) bio failed, but the re-
* check succeeded for each 4k piece.
* Intermittent error. */
};
#define BTRFS_SCRUB_READONLY 1
+#define BTRFS_SCRUB_SUPPORTED_FLAGS (BTRFS_SCRUB_READONLY)
struct btrfs_ioctl_scrub_args {
__u64 devid; /* in */
__u64 start; /* in */
@@ -239,7 +247,17 @@ struct btrfs_ioctl_dev_info_args {
__u8 uuid[BTRFS_UUID_SIZE]; /* in/out */
__u64 bytes_used; /* out */
__u64 total_bytes; /* out */
- __u64 unused[379]; /* pad to 4k */
+ /*
+ * Optional, out.
+ *
+ * Showing the fsid of the device, allowing user space to check if this
+ * device is a seeding one.
+ *
+ * Introduced in v6.3, thus user space still needs to check if kernel
+ * changed this value. Older kernel will not touch the values here.
+ */
+ __u8 fsid[BTRFS_UUID_SIZE];
+ __u64 unused[377]; /* pad to 4k */
__u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */
};
@@ -288,6 +306,13 @@ struct btrfs_ioctl_fs_info_args {
* first mount when booting older kernel versions.
*/
#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID (1ULL << 1)
+#define BTRFS_FEATURE_COMPAT_RO_VERITY (1ULL << 2)
+
+/*
+ * Put all block group items into a dedicated block group tree, greatly
+ * reducing mount time for large filesystem due to better locality.
+ */
+#define BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE (1ULL << 3)
#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
@@ -307,6 +332,10 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9)
#define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10)
#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
+#define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12)
+#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
+#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
+#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
@@ -324,6 +353,12 @@ struct btrfs_ioctl_feature_flags {
*/
struct btrfs_balance_args {
__u64 profiles;
+
+ /*
+ * usage filter
+ * BTRFS_BALANCE_ARGS_USAGE with a single value means '0..N'
+ * BTRFS_BALANCE_ARGS_USAGE_RANGE - range syntax, min..max
+ */
union {
__u64 usage;
struct {
@@ -540,7 +575,7 @@ struct btrfs_ioctl_search_header {
__u64 offset;
__u32 type;
__u32 len;
-};
+} __attribute__ ((__may_alias__));
#define BTRFS_SEARCH_ARGS_BUFSIZE (4096 - sizeof(struct btrfs_ioctl_search_key))
/*
@@ -553,18 +588,23 @@ struct btrfs_ioctl_search_args {
char buf[BTRFS_SEARCH_ARGS_BUFSIZE];
};
+/*
+ * Extended version of TREE_SEARCH ioctl that can return more than 4k of bytes.
+ * The allocated size of the buffer is set in buf_size.
+ */
struct btrfs_ioctl_search_args_v2 {
struct btrfs_ioctl_search_key key; /* in/out - search parameters */
__u64 buf_size; /* in - size of buffer
* out - on EOVERFLOW: needed size
* to store item */
- __u64 buf[0]; /* out - found items */
+ __u64 buf[]; /* out - found items */
};
+/* With a @src_length of zero, the range from @src_offset->EOF is cloned! */
struct btrfs_ioctl_clone_range_args {
- __s64 src_fd;
- __u64 src_offset, src_length;
- __u64 dest_offset;
+ __s64 src_fd;
+ __u64 src_offset, src_length;
+ __u64 dest_offset;
};
/*
@@ -575,6 +615,9 @@ struct btrfs_ioctl_clone_range_args {
*/
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
#define BTRFS_DEFRAG_RANGE_START_IO 2
+#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
+ BTRFS_DEFRAG_RANGE_START_IO)
+
struct btrfs_ioctl_defrag_range_args {
/* start of the defrag operation */
__u64 start;
@@ -629,7 +672,7 @@ struct btrfs_ioctl_same_args {
__u16 dest_count; /* in - total elements in info array */
__u16 reserved1;
__u32 reserved2;
- struct btrfs_ioctl_same_extent_info info[0];
+ struct btrfs_ioctl_same_extent_info info[];
};
struct btrfs_ioctl_space_info {
@@ -641,7 +684,7 @@ struct btrfs_ioctl_space_info {
struct btrfs_ioctl_space_args {
__u64 space_slots;
__u64 total_spaces;
- struct btrfs_ioctl_space_info spaces[0];
+ struct btrfs_ioctl_space_info spaces[];
};
struct btrfs_data_container {
@@ -649,7 +692,7 @@ struct btrfs_data_container {
__u32 bytes_missing; /* out -- additional bytes needed for result */
__u32 elem_cnt; /* out */
__u32 elem_missed; /* out */
- __u64 val[0]; /* out */
+ __u64 val[]; /* out */
};
struct btrfs_ioctl_ino_path_args {
@@ -668,8 +711,11 @@ struct btrfs_ioctl_logical_ino_args {
/* struct btrfs_data_container *inodes; out */
__u64 inodes;
};
-/* Return every ref to the extent, not just those containing logical block.
- * Requires logical == extent bytenr. */
+
+/*
+ * Return every ref to the extent, not just those containing logical block.
+ * Requires logical == extent bytenr.
+ */
#define BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET (1ULL << 0)
enum btrfs_dev_stat_values {
@@ -713,6 +759,7 @@ struct btrfs_ioctl_get_dev_stats {
#define BTRFS_QUOTA_CTL_ENABLE 1
#define BTRFS_QUOTA_CTL_DISABLE 2
#define BTRFS_QUOTA_CTL_RESCAN__NOTUSED 3
+#define BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA 4
struct btrfs_ioctl_quota_ctl_args {
__u64 cmd;
__u64 status;
@@ -769,10 +816,24 @@ struct btrfs_ioctl_received_subvol_args {
*/
#define BTRFS_SEND_FLAG_OMIT_END_CMD 0x4
+/*
+ * Read the protocol version in the structure
+ */
+#define BTRFS_SEND_FLAG_VERSION 0x8
+
+/*
+ * Send compressed data using the ENCODED_WRITE command instead of decompressing
+ * the data and sending it with the WRITE command. This requires protocol
+ * version >= 2.
+ */
+#define BTRFS_SEND_FLAG_COMPRESSED 0x10
+
#define BTRFS_SEND_FLAG_MASK \
(BTRFS_SEND_FLAG_NO_FILE_DATA | \
BTRFS_SEND_FLAG_OMIT_STREAM_HEADER | \
- BTRFS_SEND_FLAG_OMIT_END_CMD)
+ BTRFS_SEND_FLAG_OMIT_END_CMD | \
+ BTRFS_SEND_FLAG_VERSION | \
+ BTRFS_SEND_FLAG_COMPRESSED)
struct btrfs_ioctl_send_args {
__s64 send_fd; /* in */
@@ -780,7 +841,8 @@ struct btrfs_ioctl_send_args {
__u64 __user *clone_sources; /* in */
__u64 parent_root; /* in */
__u64 flags; /* in */
- __u64 reserved[4]; /* in */
+ __u32 version; /* in */
+ __u8 reserved[28]; /* in */
};
/*
@@ -859,6 +921,134 @@ struct btrfs_ioctl_get_subvol_rootref_args {
__u8 align[7];
};
+/*
+ * Data and metadata for an encoded read or write.
+ *
+ * Encoded I/O bypasses any encoding automatically done by the filesystem (e.g.,
+ * compression). This can be used to read the compressed contents of a file or
+ * write pre-compressed data directly to a file.
+ *
+ * BTRFS_IOC_ENCODED_READ and BTRFS_IOC_ENCODED_WRITE are essentially
+ * preadv/pwritev with additional metadata about how the data is encoded and the
+ * size of the unencoded data.
+ *
+ * BTRFS_IOC_ENCODED_READ fills the given iovecs with the encoded data, fills
+ * the metadata fields, and returns the size of the encoded data. It reads one
+ * extent per call. It can also read data which is not encoded.
+ *
+ * BTRFS_IOC_ENCODED_WRITE uses the metadata fields, writes the encoded data
+ * from the iovecs, and returns the size of the encoded data. Note that the
+ * encoded data is not validated when it is written; if it is not valid (e.g.,
+ * it cannot be decompressed), then a subsequent read may return an error.
+ *
+ * Since the filesystem page cache contains decoded data, encoded I/O bypasses
+ * the page cache. Encoded I/O requires CAP_SYS_ADMIN.
+ */
+struct btrfs_ioctl_encoded_io_args {
+ /* Input parameters for both reads and writes. */
+
+ /*
+ * iovecs containing encoded data.
+ *
+ * For reads, if the size of the encoded data is larger than the sum of
+ * iov[n].iov_len for 0 <= n < iovcnt, then the ioctl fails with
+ * ENOBUFS.
+ *
+ * For writes, the size of the encoded data is the sum of iov[n].iov_len
+ * for 0 <= n < iovcnt. This must be less than 128 KiB (this limit may
+ * increase in the future). This must also be less than or equal to
+ * unencoded_len.
+ */
+ const struct iovec __user *iov;
+ /* Number of iovecs. */
+ unsigned long iovcnt;
+ /*
+ * Offset in file.
+ *
+ * For writes, must be aligned to the sector size of the filesystem.
+ */
+ __s64 offset;
+ /* Currently must be zero. */
+ __u64 flags;
+
+ /*
+ * For reads, the following members are output parameters that will
+ * contain the returned metadata for the encoded data.
+ * For writes, the following members must be set to the metadata for the
+ * encoded data.
+ */
+
+ /*
+ * Length of the data in the file.
+ *
+ * Must be less than or equal to unencoded_len - unencoded_offset. For
+ * writes, must be aligned to the sector size of the filesystem unless
+ * the data ends at or beyond the current end of the file.
+ */
+ __u64 len;
+ /*
+ * Length of the unencoded (i.e., decrypted and decompressed) data.
+ *
+ * For writes, must be no more than 128 KiB (this limit may increase in
+ * the future). If the unencoded data is actually longer than
+ * unencoded_len, then it is truncated; if it is shorter, then it is
+ * extended with zeroes.
+ */
+ __u64 unencoded_len;
+ /*
+ * Offset from the first byte of the unencoded data to the first byte of
+ * logical data in the file.
+ *
+ * Must be less than unencoded_len.
+ */
+ __u64 unencoded_offset;
+ /*
+ * BTRFS_ENCODED_IO_COMPRESSION_* type.
+ *
+ * For writes, must not be BTRFS_ENCODED_IO_COMPRESSION_NONE.
+ */
+ __u32 compression;
+ /* Currently always BTRFS_ENCODED_IO_ENCRYPTION_NONE. */
+ __u32 encryption;
+ /*
+ * Reserved for future expansion.
+ *
+ * For reads, always returned as zero. Users should check for non-zero
+ * bytes. If there are any, then the kernel has a newer version of this
+ * structure with additional information that the user definition is
+ * missing.
+ *
+ * For writes, must be zeroed.
+ */
+ __u8 reserved[64];
+};
+
+/* Data is not compressed. */
+#define BTRFS_ENCODED_IO_COMPRESSION_NONE 0
+/* Data is compressed as a single zlib stream. */
+#define BTRFS_ENCODED_IO_COMPRESSION_ZLIB 1
+/*
+ * Data is compressed as a single zstd frame with the windowLog compression
+ * parameter set to no more than 17.
+ */
+#define BTRFS_ENCODED_IO_COMPRESSION_ZSTD 2
+/*
+ * Data is compressed sector by sector (using the sector size indicated by the
+ * name of the constant) with LZO1X and wrapped in the format documented in
+ * fs/btrfs/lzo.c. For writes, the compression sector size must match the
+ * filesystem sector size.
+ */
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_4K 3
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_8K 4
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_16K 5
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_32K 6
+#define BTRFS_ENCODED_IO_COMPRESSION_LZO_64K 7
+#define BTRFS_ENCODED_IO_COMPRESSION_TYPES 8
+
+/* Data is not encrypted. */
+#define BTRFS_ENCODED_IO_ENCRYPTION_NONE 0
+#define BTRFS_ENCODED_IO_ENCRYPTION_TYPES 1
+
/* Error codes as returned by the kernel */
enum btrfs_err_code {
BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1,
@@ -987,5 +1177,13 @@ enum btrfs_err_code {
struct btrfs_ioctl_ino_lookup_user_args)
#define BTRFS_IOC_SNAP_DESTROY_V2 _IOW(BTRFS_IOCTL_MAGIC, 63, \
struct btrfs_ioctl_vol_args_v2)
+#define BTRFS_IOC_ENCODED_READ _IOR(BTRFS_IOCTL_MAGIC, 64, \
+ struct btrfs_ioctl_encoded_io_args)
+#define BTRFS_IOC_ENCODED_WRITE _IOW(BTRFS_IOCTL_MAGIC, 64, \
+ struct btrfs_ioctl_encoded_io_args)
+
+#ifdef __cplusplus
+}
+#endif
#endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 9ba64ca6b4ac..d24e8e121507 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -4,6 +4,28 @@
#include <linux/btrfs.h>
#include <linux/types.h>
+#ifdef __KERNEL__
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+/* ASCII for _BHRfS_M, no terminating nul */
+#define BTRFS_MAGIC 0x4D5F53665248425FULL
+
+#define BTRFS_MAX_LEVEL 8
+
+/*
+ * We can actually store much bigger names, but lets not confuse the rest of
+ * linux.
+ */
+#define BTRFS_NAME_LEN 255
+
+/*
+ * Theoretical limit is larger, but we keep this down to a sane value. That
+ * should limit greatly the possibility of collisions on inode ref items.
+ */
+#define BTRFS_LINK_MAX 65535U
/*
* This header contains the structure definitions and constants used
@@ -48,13 +70,19 @@
/* tracks free space in block groups. */
#define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL
+/* Holds the block group items for extent tree v2. */
+#define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL
+
+/* Tracks RAID stripes in block groups. */
+#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
+
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
/* for storing balance parameters in the root tree */
#define BTRFS_BALANCE_OBJECTID -4ULL
-/* orhpan objectid for tracking unlinked/truncated files */
+/* orphan objectid for tracking unlinked/truncated files */
#define BTRFS_ORPHAN_OBJECTID -5ULL
/* does write ahead logging to speed up fsyncs */
@@ -113,12 +141,37 @@
#define BTRFS_INODE_REF_KEY 12
#define BTRFS_INODE_EXTREF_KEY 13
#define BTRFS_XATTR_ITEM_KEY 24
+
+/*
+ * fs verity items are stored under two different key types on disk.
+ * The descriptor items:
+ * [ inode objectid, BTRFS_VERITY_DESC_ITEM_KEY, offset ]
+ *
+ * At offset 0, we store a btrfs_verity_descriptor_item which tracks the size
+ * of the descriptor item and some extra data for encryption.
+ * Starting at offset 1, these hold the generic fs verity descriptor. The
+ * latter are opaque to btrfs, we just read and write them as a blob for the
+ * higher level verity code. The most common descriptor size is 256 bytes.
+ *
+ * The merkle tree items:
+ * [ inode objectid, BTRFS_VERITY_MERKLE_ITEM_KEY, offset ]
+ *
+ * These also start at offset 0, and correspond to the merkle tree bytes. When
+ * fsverity asks for page 0 of the merkle tree, we pull up one page starting at
+ * offset 0 for this key type. These are also opaque to btrfs, we're blindly
+ * storing whatever fsverity sends down.
+ */
+#define BTRFS_VERITY_DESC_ITEM_KEY 36
+#define BTRFS_VERITY_MERKLE_ITEM_KEY 37
+
#define BTRFS_ORPHAN_ITEM_KEY 48
/* reserve 2-15 close to the inode for later flexibility */
/*
* dir items are the name -> inode pointers in a directory. There is one
- * for every name in a directory.
+ * for every name in a directory. BTRFS_DIR_LOG_ITEM_KEY is no longer used
+ * but it's still defined here for documentation purposes and to help avoid
+ * having its numerical value reused in the future.
*/
#define BTRFS_DIR_LOG_ITEM_KEY 60
#define BTRFS_DIR_LOG_INDEX_KEY 72
@@ -166,11 +219,31 @@
*/
#define BTRFS_METADATA_ITEM_KEY 169
+/*
+ * Special inline ref key which stores the id of the subvolume which originally
+ * created the extent. This subvolume owns the extent permanently from the
+ * perspective of simple quotas. Needed to know which subvolume to free quota
+ * usage from when the extent is deleted.
+ *
+ * Stored as an inline ref rather to avoid wasting space on a separate item on
+ * top of the existing extent item. However, unlike the other inline refs,
+ * there is one one owner ref per extent rather than one per extent.
+ *
+ * Because of this, it goes at the front of the list of inline refs, and thus
+ * must have a lower type value than any other inline ref type (to satisfy the
+ * disk format rule that inline refs have non-decreasing type).
+ */
+#define BTRFS_EXTENT_OWNER_REF_KEY 172
+
#define BTRFS_TREE_BLOCK_REF_KEY 176
#define BTRFS_EXTENT_DATA_REF_KEY 178
-#define BTRFS_EXTENT_REF_V0_KEY 180
+/*
+ * Obsolete key. Defintion removed in 6.6, value may be reused in the future.
+ *
+ * #define BTRFS_EXTENT_REF_V0_KEY 180
+ */
#define BTRFS_SHARED_BLOCK_REF_KEY 182
@@ -207,6 +280,8 @@
#define BTRFS_DEV_ITEM_KEY 216
#define BTRFS_CHUNK_ITEM_KEY 228
+#define BTRFS_RAID_STRIPE_KEY 230
+
/*
* Records the overall state of the qgroups.
* There's only one instance of this key present,
@@ -270,7 +345,7 @@
#define BTRFS_PERSISTENT_ITEM_KEY 249
/*
- * Persistantly stores the device replace state in the device tree.
+ * Persistently stores the device replace state in the device tree.
* The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0).
*/
#define BTRFS_DEV_REPLACE_KEY 250
@@ -294,7 +369,8 @@
*/
#define BTRFS_STRING_ITEM_KEY 253
-
+/* Maximum metadata block size (nodesize) */
+#define BTRFS_MAX_METADATA_BLOCKSIZE 65536
/* 32 bytes in various csum fields */
#define BTRFS_CSUM_SIZE 32
@@ -325,6 +401,50 @@ enum btrfs_csum_type {
#define BTRFS_FT_SYMLINK 7
#define BTRFS_FT_XATTR 8
#define BTRFS_FT_MAX 9
+/* Directory contains encrypted data */
+#define BTRFS_FT_ENCRYPTED 0x80
+
+static inline __u8 btrfs_dir_flags_to_ftype(__u8 flags)
+{
+ return flags & ~BTRFS_FT_ENCRYPTED;
+}
+
+/*
+ * Inode flags
+ */
+#define BTRFS_INODE_NODATASUM (1U << 0)
+#define BTRFS_INODE_NODATACOW (1U << 1)
+#define BTRFS_INODE_READONLY (1U << 2)
+#define BTRFS_INODE_NOCOMPRESS (1U << 3)
+#define BTRFS_INODE_PREALLOC (1U << 4)
+#define BTRFS_INODE_SYNC (1U << 5)
+#define BTRFS_INODE_IMMUTABLE (1U << 6)
+#define BTRFS_INODE_APPEND (1U << 7)
+#define BTRFS_INODE_NODUMP (1U << 8)
+#define BTRFS_INODE_NOATIME (1U << 9)
+#define BTRFS_INODE_DIRSYNC (1U << 10)
+#define BTRFS_INODE_COMPRESS (1U << 11)
+
+#define BTRFS_INODE_ROOT_ITEM_INIT (1U << 31)
+
+#define BTRFS_INODE_FLAG_MASK \
+ (BTRFS_INODE_NODATASUM | \
+ BTRFS_INODE_NODATACOW | \
+ BTRFS_INODE_READONLY | \
+ BTRFS_INODE_NOCOMPRESS | \
+ BTRFS_INODE_PREALLOC | \
+ BTRFS_INODE_SYNC | \
+ BTRFS_INODE_IMMUTABLE | \
+ BTRFS_INODE_APPEND | \
+ BTRFS_INODE_NODUMP | \
+ BTRFS_INODE_NOATIME | \
+ BTRFS_INODE_DIRSYNC | \
+ BTRFS_INODE_COMPRESS | \
+ BTRFS_INODE_ROOT_ITEM_INIT)
+
+#define BTRFS_INODE_RO_VERITY (1U << 0)
+
+#define BTRFS_INODE_RO_FLAG_MASK (BTRFS_INODE_RO_VERITY)
/*
* The key defines the order in the tree, and so it also defines (optimal)
@@ -355,6 +475,109 @@ struct btrfs_key {
__u64 offset;
} __attribute__ ((__packed__));
+/*
+ * Every tree block (leaf or node) starts with this header.
+ */
+struct btrfs_header {
+ /* These first four must match the super block */
+ __u8 csum[BTRFS_CSUM_SIZE];
+ /* FS specific uuid */
+ __u8 fsid[BTRFS_FSID_SIZE];
+ /* Which block this node is supposed to live in */
+ __le64 bytenr;
+ __le64 flags;
+
+ /* Allowed to be different from the super from here on down */
+ __u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+ __le64 generation;
+ __le64 owner;
+ __le32 nritems;
+ __u8 level;
+} __attribute__ ((__packed__));
+
+/*
+ * This is a very generous portion of the super block, giving us room to
+ * translate 14 chunks with 3 stripes each.
+ */
+#define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048
+
+/*
+ * Just in case we somehow lose the roots and are not able to mount, we store
+ * an array of the roots from previous transactions in the super.
+ */
+#define BTRFS_NUM_BACKUP_ROOTS 4
+struct btrfs_root_backup {
+ __le64 tree_root;
+ __le64 tree_root_gen;
+
+ __le64 chunk_root;
+ __le64 chunk_root_gen;
+
+ __le64 extent_root;
+ __le64 extent_root_gen;
+
+ __le64 fs_root;
+ __le64 fs_root_gen;
+
+ __le64 dev_root;
+ __le64 dev_root_gen;
+
+ __le64 csum_root;
+ __le64 csum_root_gen;
+
+ __le64 total_bytes;
+ __le64 bytes_used;
+ __le64 num_devices;
+ /* future */
+ __le64 unused_64[4];
+
+ __u8 tree_root_level;
+ __u8 chunk_root_level;
+ __u8 extent_root_level;
+ __u8 fs_root_level;
+ __u8 dev_root_level;
+ __u8 csum_root_level;
+ /* future and to align */
+ __u8 unused_8[10];
+} __attribute__ ((__packed__));
+
+/*
+ * A leaf is full of items. offset and size tell us where to find the item in
+ * the leaf (relative to the start of the data area)
+ */
+struct btrfs_item {
+ struct btrfs_disk_key key;
+ __le32 offset;
+ __le32 size;
+} __attribute__ ((__packed__));
+
+/*
+ * Leaves have an item area and a data area:
+ * [item0, item1....itemN] [free space] [dataN...data1, data0]
+ *
+ * The data is separate from the items to get the keys closer together during
+ * searches.
+ */
+struct btrfs_leaf {
+ struct btrfs_header header;
+ struct btrfs_item items[];
+} __attribute__ ((__packed__));
+
+/*
+ * All non-leaf blocks are nodes, they hold only keys and pointers to other
+ * blocks.
+ */
+struct btrfs_key_ptr {
+ struct btrfs_disk_key key;
+ __le64 blockptr;
+ __le64 generation;
+} __attribute__ ((__packed__));
+
+struct btrfs_node {
+ struct btrfs_header header;
+ struct btrfs_key_ptr ptrs[];
+} __attribute__ ((__packed__));
+
struct btrfs_dev_item {
/* the internal btrfs device id */
__le64 devid;
@@ -438,6 +661,69 @@ struct btrfs_chunk {
/* additional stripes go here */
} __attribute__ ((__packed__));
+/*
+ * The super block basically lists the main trees of the FS.
+ */
+struct btrfs_super_block {
+ /* The first 4 fields must match struct btrfs_header */
+ __u8 csum[BTRFS_CSUM_SIZE];
+ /* FS specific UUID, visible to user */
+ __u8 fsid[BTRFS_FSID_SIZE];
+ /* This block number */
+ __le64 bytenr;
+ __le64 flags;
+
+ /* Allowed to be different from the btrfs_header from here own down */
+ __le64 magic;
+ __le64 generation;
+ __le64 root;
+ __le64 chunk_root;
+ __le64 log_root;
+
+ /*
+ * This member has never been utilized since the very beginning, thus
+ * it's always 0 regardless of kernel version. We always use
+ * generation + 1 to read log tree root. So here we mark it deprecated.
+ */
+ __le64 __unused_log_root_transid;
+ __le64 total_bytes;
+ __le64 bytes_used;
+ __le64 root_dir_objectid;
+ __le64 num_devices;
+ __le32 sectorsize;
+ __le32 nodesize;
+ __le32 __unused_leafsize;
+ __le32 stripesize;
+ __le32 sys_chunk_array_size;
+ __le64 chunk_root_generation;
+ __le64 compat_flags;
+ __le64 compat_ro_flags;
+ __le64 incompat_flags;
+ __le16 csum_type;
+ __u8 root_level;
+ __u8 chunk_root_level;
+ __u8 log_root_level;
+ struct btrfs_dev_item dev_item;
+
+ char label[BTRFS_LABEL_SIZE];
+
+ __le64 cache_generation;
+ __le64 uuid_tree_generation;
+
+ /* The UUID written into btree blocks */
+ __u8 metadata_uuid[BTRFS_FSID_SIZE];
+
+ __u64 nr_global_roots;
+
+ /* Future expansion */
+ __le64 reserved[27];
+ __u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
+ struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS];
+
+ /* Padded to 4096 bytes */
+ __u8 padding[565];
+} __attribute__ ((__packed__));
+
#define BTRFS_FREE_SPACE_EXTENT 1
#define BTRFS_FREE_SPACE_BITMAP 2
@@ -454,6 +740,30 @@ struct btrfs_free_space_header {
__le64 num_bitmaps;
} __attribute__ ((__packed__));
+struct btrfs_raid_stride {
+ /* The id of device this raid extent lives on. */
+ __le64 devid;
+ /* The physical location on disk. */
+ __le64 physical;
+} __attribute__ ((__packed__));
+
+/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */
+#define BTRFS_STRIPE_RAID0 1
+#define BTRFS_STRIPE_RAID1 2
+#define BTRFS_STRIPE_DUP 3
+#define BTRFS_STRIPE_RAID10 4
+#define BTRFS_STRIPE_RAID5 5
+#define BTRFS_STRIPE_RAID6 6
+#define BTRFS_STRIPE_RAID1C3 7
+#define BTRFS_STRIPE_RAID1C4 8
+
+struct btrfs_stripe_extent {
+ __u8 encoding;
+ __u8 reserved[7];
+ /* An array of raid strides this stripe is composed of. */
+ struct btrfs_raid_stride strides[];
+} __attribute__ ((__packed__));
+
#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
@@ -492,6 +802,14 @@ struct btrfs_extent_item_v0 {
/* use full backrefs for extent pointers in the block */
#define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8)
+#define BTRFS_BACKREF_REV_MAX 256
+#define BTRFS_BACKREF_REV_SHIFT 56
+#define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \
+ BTRFS_BACKREF_REV_SHIFT)
+
+#define BTRFS_OLD_BACKREF_REV 0
+#define BTRFS_MIXED_BACKREF_REV 1
+
/*
* this flag is only used internally by scrub and may be changed at any time
* it is only declared here to avoid collisions
@@ -514,6 +832,10 @@ struct btrfs_shared_data_ref {
__le32 count;
} __attribute__ ((__packed__));
+struct btrfs_extent_owner_ref {
+ __le64 root_id;
+} __attribute__ ((__packed__));
+
struct btrfs_extent_inline_ref {
__u8 type;
__le64 offset;
@@ -541,7 +863,7 @@ struct btrfs_inode_extref {
__le64 parent_objectid;
__le64 index;
__le16 name_len;
- __u8 name[0];
+ __u8 name[];
/* name goes here */
} __attribute__ ((__packed__));
@@ -645,6 +967,15 @@ struct btrfs_root_item {
} __attribute__ ((__packed__));
/*
+ * Btrfs root item used to be smaller than current size. The old format ends
+ * at where member generation_v2 is.
+ */
+static inline __u32 btrfs_legacy_root_item_size(void)
+{
+ return offsetof(struct btrfs_root_item, generation_v2);
+}
+
+/*
* this is used for both forward and backward root refs
*/
struct btrfs_root_ref {
@@ -837,19 +1168,6 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
BTRFS_SPACE_INFO_GLOBAL_RSV)
-enum btrfs_raid_types {
- BTRFS_RAID_RAID10,
- BTRFS_RAID_RAID1,
- BTRFS_RAID_DUP,
- BTRFS_RAID_RAID0,
- BTRFS_RAID_SINGLE,
- BTRFS_RAID_RAID5,
- BTRFS_RAID_RAID6,
- BTRFS_RAID_RAID1C3,
- BTRFS_RAID_RAID1C4,
- BTRFS_NR_RAID_TYPES
-};
-
#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
BTRFS_BLOCK_GROUP_SYSTEM | \
BTRFS_BLOCK_GROUP_METADATA)
@@ -935,6 +1253,18 @@ static inline __u16 btrfs_qgroup_level(__u64 qgroupid)
*/
#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
+/*
+ * Whether or not this filesystem is using simple quotas. Not exactly the
+ * incompat bit, because we support using simple quotas, disabling it, then
+ * going back to full qgroup quotas.
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE (1ULL << 3)
+
+#define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \
+ BTRFS_QGROUP_STATUS_FLAG_RESCAN | \
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | \
+ BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
+
#define BTRFS_QGROUP_STATUS_VERSION 1
struct btrfs_qgroup_status_item {
@@ -955,6 +1285,15 @@ struct btrfs_qgroup_status_item {
* of the scan. It contains a logical address
*/
__le64 rescan;
+
+ /*
+ * The generation when quotas were last enabled. Used by simple quotas to
+ * avoid decrementing when freeing an extent that was written before
+ * enable.
+ *
+ * Set only if flags contain BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE.
+ */
+ __le64 enable_gen;
} __attribute__ ((__packed__));
struct btrfs_qgroup_info_item {
@@ -976,4 +1315,16 @@ struct btrfs_qgroup_limit_item {
__le64 rsv_excl;
} __attribute__ ((__packed__));
+struct btrfs_verity_descriptor_item {
+ /* Size of the verity descriptor in bytes */
+ __le64 size;
+ /*
+ * When we implement support for fscrypt, we will need to encrypt the
+ * Merkle tree for encrypted verity files. These 128 bits are for the
+ * eventual storage of an fscrypt initialization vector.
+ */
+ __le64 reserved[2];
+ __u8 encryption;
+} __attribute__ ((__packed__));
+
#endif /* _BTRFS_CTREE_H_ */
diff --git a/include/uapi/linux/byteorder/big_endian.h b/include/uapi/linux/byteorder/big_endian.h
index 2199adc6a6c2..80aa5c41a763 100644
--- a/include/uapi/linux/byteorder/big_endian.h
+++ b/include/uapi/linux/byteorder/big_endian.h
@@ -9,6 +9,7 @@
#define __BIG_ENDIAN_BITFIELD
#endif
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/swab.h>
diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
index 601c904fd5cd..cd98982e7523 100644
--- a/include/uapi/linux/byteorder/little_endian.h
+++ b/include/uapi/linux/byteorder/little_endian.h
@@ -9,6 +9,7 @@
#define __LITTLE_ENDIAN_BITFIELD
#endif
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/swab.h>
diff --git a/include/uapi/linux/cachefiles.h b/include/uapi/linux/cachefiles.h
new file mode 100644
index 000000000000..78caa73e5343
--- /dev/null
+++ b/include/uapi/linux/cachefiles.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_CACHEFILES_H
+#define _LINUX_CACHEFILES_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * Fscache ensures that the maximum length of cookie key is 255. The volume key
+ * is controlled by netfs, and generally no bigger than 255.
+ */
+#define CACHEFILES_MSG_MAX_SIZE 1024
+
+enum cachefiles_opcode {
+ CACHEFILES_OP_OPEN,
+ CACHEFILES_OP_CLOSE,
+ CACHEFILES_OP_READ,
+};
+
+/*
+ * Message Header
+ *
+ * @msg_id a unique ID identifying this message
+ * @opcode message type, CACHEFILE_OP_*
+ * @len message length, including message header and following data
+ * @object_id a unique ID identifying a cache file
+ * @data message type specific payload
+ */
+struct cachefiles_msg {
+ __u32 msg_id;
+ __u32 opcode;
+ __u32 len;
+ __u32 object_id;
+ __u8 data[];
+};
+
+/*
+ * @data contains the volume_key followed directly by the cookie_key. volume_key
+ * is a NUL-terminated string; @volume_key_size indicates the size of the volume
+ * key in bytes. cookie_key is binary data, which is netfs specific;
+ * @cookie_key_size indicates the size of the cookie key in bytes.
+ *
+ * @fd identifies an anon_fd referring to the cache file.
+ */
+struct cachefiles_open {
+ __u32 volume_key_size;
+ __u32 cookie_key_size;
+ __u32 fd;
+ __u32 flags;
+ __u8 data[];
+};
+
+/*
+ * @off indicates the starting offset of the requested file range
+ * @len indicates the length of the requested file range
+ */
+struct cachefiles_read {
+ __u64 off;
+ __u64 len;
+};
+
+/*
+ * Reply for READ request
+ * @arg for this ioctl is the @id field of READ request.
+ */
+#define CACHEFILES_IOC_READ_COMPLETE _IOW(0x98, 1, int)
+
+#endif
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 6a6d2c7655ff..e78cbd85ce7c 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -48,6 +48,7 @@
#include <linux/types.h>
#include <linux/socket.h>
+#include <linux/stddef.h> /* for offsetof */
/* controller area network (CAN) kernel definitions */
@@ -60,6 +61,7 @@
#define CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */
#define CAN_EFF_MASK 0x1FFFFFFFU /* extended frame format (EFF) */
#define CAN_ERR_MASK 0x1FFFFFFFU /* omit EFF, RTR, ERR flags */
+#define CANXL_PRIO_MASK CAN_SFF_MASK /* 11 bit priority mask */
/*
* Controller Area Network Identifier structure
@@ -73,6 +75,7 @@ typedef __u32 canid_t;
#define CAN_SFF_ID_BITS 11
#define CAN_EFF_ID_BITS 29
+#define CANXL_PRIO_BITS CAN_SFF_ID_BITS
/*
* Controller Area Network Error Message Frame Mask structure
@@ -84,37 +87,57 @@ typedef __u32 can_err_mask_t;
/* CAN payload length and DLC definitions according to ISO 11898-1 */
#define CAN_MAX_DLC 8
+#define CAN_MAX_RAW_DLC 15
#define CAN_MAX_DLEN 8
/* CAN FD payload length and DLC definitions according to ISO 11898-7 */
#define CANFD_MAX_DLC 15
#define CANFD_MAX_DLEN 64
+/*
+ * CAN XL payload length and DLC definitions according to ISO 11898-1
+ * CAN XL DLC ranges from 0 .. 2047 => data length from 1 .. 2048 byte
+ */
+#define CANXL_MIN_DLC 0
+#define CANXL_MAX_DLC 2047
+#define CANXL_MAX_DLC_MASK 0x07FF
+#define CANXL_MIN_DLEN 1
+#define CANXL_MAX_DLEN 2048
+
/**
- * struct can_frame - basic CAN frame structure
- * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition
- * @can_dlc: frame payload length in byte (0 .. 8) aka data length code
- * N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
- * mapping of the 'data length code' to the real payload length
- * @__pad: padding
- * @__res0: reserved / padding
- * @__res1: reserved / padding
- * @data: CAN frame payload (up to 8 byte)
+ * struct can_frame - Classical CAN frame structure (aka CAN 2.0B)
+ * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition
+ * @len: CAN frame payload length in byte (0 .. 8)
+ * @can_dlc: deprecated name for CAN frame payload length in byte (0 .. 8)
+ * @__pad: padding
+ * @__res0: reserved / padding
+ * @len8_dlc: optional DLC value (9 .. 15) at 8 byte payload length
+ * len8_dlc contains values from 9 .. 15 when the payload length is
+ * 8 bytes but the DLC value (see ISO 11898-1) is greater then 8.
+ * CAN_CTRLMODE_CC_LEN8_DLC flag has to be enabled in CAN driver.
+ * @data: CAN frame payload (up to 8 byte)
*/
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
- __u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
- __u8 __pad; /* padding */
- __u8 __res0; /* reserved / padding */
- __u8 __res1; /* reserved / padding */
- __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
+ union {
+ /* CAN frame payload length in byte (0 .. CAN_MAX_DLEN)
+ * was previously named can_dlc so we need to carry that
+ * name for legacy support
+ */
+ __u8 len;
+ __u8 can_dlc; /* deprecated */
+ } __attribute__((packed)); /* disable padding added in some ABIs */
+ __u8 __pad; /* padding */
+ __u8 __res0; /* reserved / padding */
+ __u8 len8_dlc; /* optional DLC for 8 byte payload length (9 .. 15) */
+ __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
};
/*
* defined bits for canfd_frame.flags
*
- * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to
- * be set in the CAN frame bitstream on the wire. The EDL bit switch turns
+ * The use of struct canfd_frame implies the FD Frame (FDF) bit to
+ * be set in the CAN frame bitstream on the wire. The FDF bit switch turns
* the CAN controllers bitstream processor into the CAN FD mode which creates
* two new options within the CAN FD frame specification:
*
@@ -125,9 +148,18 @@ struct can_frame {
* controller only the CANFD_BRS bit is relevant for real CAN controllers when
* building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make
* sense for virtual CAN interfaces to test applications with echoed frames.
+ *
+ * The struct can_frame and struct canfd_frame intentionally share the same
+ * layout to be able to write CAN frame content into a CAN FD frame structure.
+ * When this is done the former differentiation via CAN_MTU / CANFD_MTU gets
+ * lost. CANFD_FDF allows programmers to mark CAN FD frames in the case of
+ * using struct canfd_frame for mixed CAN / CAN FD content (dual use).
+ * Since the introduction of CAN XL the CANFD_FDF flag is set in all CAN FD
+ * frame structures provided by the CAN subsystem of the Linux kernel.
*/
#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */
#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */
+#define CANFD_FDF 0x04 /* mark CAN FD for dual use of struct canfd_frame */
/**
* struct canfd_frame - CAN flexible data rate frame structure
@@ -147,8 +179,51 @@ struct canfd_frame {
__u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8)));
};
+/*
+ * defined bits for canxl_frame.flags
+ *
+ * The canxl_frame.flags element contains two bits CANXL_XLF and CANXL_SEC
+ * and shares the relative position of the struct can[fd]_frame.len element.
+ * The CANXL_XLF bit ALWAYS needs to be set to indicate a valid CAN XL frame.
+ * As a side effect setting this bit intentionally breaks the length checks
+ * for Classical CAN and CAN FD frames.
+ *
+ * Undefined bits in canxl_frame.flags are reserved and shall be set to zero.
+ */
+#define CANXL_XLF 0x80 /* mandatory CAN XL frame flag (must always be set!) */
+#define CANXL_SEC 0x01 /* Simple Extended Content (security/segmentation) */
+
+/* the 8-bit VCID is optionally placed in the canxl_frame.prio element */
+#define CANXL_VCID_OFFSET 16 /* bit offset of VCID in prio element */
+#define CANXL_VCID_VAL_MASK 0xFFUL /* VCID is an 8-bit value */
+#define CANXL_VCID_MASK (CANXL_VCID_VAL_MASK << CANXL_VCID_OFFSET)
+
+/**
+ * struct canxl_frame - CAN with e'X'tended frame 'L'ength frame structure
+ * @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags / VCID
+ * @flags: additional flags for CAN XL
+ * @sdt: SDU (service data unit) type
+ * @len: frame payload length in byte (CANXL_MIN_DLEN .. CANXL_MAX_DLEN)
+ * @af: acceptance field
+ * @data: CAN XL frame payload (CANXL_MIN_DLEN .. CANXL_MAX_DLEN byte)
+ *
+ * @prio shares the same position as @can_id from struct can[fd]_frame.
+ */
+struct canxl_frame {
+ canid_t prio; /* 11 bit priority for arbitration / 8 bit VCID */
+ __u8 flags; /* additional flags for CAN XL */
+ __u8 sdt; /* SDU (service data unit) type */
+ __u16 len; /* frame payload length in byte */
+ __u32 af; /* acceptance field */
+ __u8 data[CANXL_MAX_DLEN];
+};
+
#define CAN_MTU (sizeof(struct can_frame))
#define CANFD_MTU (sizeof(struct canfd_frame))
+#define CANXL_MTU (sizeof(struct canxl_frame))
+#define CANXL_HDR_SIZE (offsetof(struct canxl_frame, data))
+#define CANXL_MIN_MTU (CANXL_HDR_SIZE + 64)
+#define CANXL_MAX_MTU CANXL_MTU
/* particular protocols of the protocol family PF_CAN */
#define CAN_RAW 1 /* RAW sockets */
@@ -215,6 +290,5 @@ struct can_filter {
};
#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
-#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
#endif /* !_UAPI_CAN_H */
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index dd2b925b09ac..f1e45f533a72 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -71,7 +71,7 @@ struct bcm_msg_head {
struct bcm_timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
- struct can_frame frames[0];
+ struct can_frame frames[];
};
enum {
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index 34633283de64..acc1ac393d2a 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -57,6 +57,8 @@
#define CAN_ERR_BUSOFF 0x00000040U /* bus off */
#define CAN_ERR_BUSERROR 0x00000080U /* bus error (may flood!) */
#define CAN_ERR_RESTARTED 0x00000100U /* controller restarted */
+#define CAN_ERR_CNT 0x00000200U /* TX error counter / data[6] */
+ /* RX error counter / data[7] */
/* arbitration lost in bit ... / data[0] */
#define CAN_ERR_LOSTARB_UNSPEC 0x00 /* unspecified */
@@ -120,6 +122,22 @@
#define CAN_ERR_TRX_CANL_SHORT_TO_GND 0x70 /* 0111 0000 */
#define CAN_ERR_TRX_CANL_SHORT_TO_CANH 0x80 /* 1000 0000 */
-/* controller specific additional information / data[5..7] */
+/* data[5] is reserved (do not use) */
+
+/* TX error counter / data[6] */
+/* RX error counter / data[7] */
+
+/* CAN state thresholds
+ *
+ * Error counter Error state
+ * -----------------------------------
+ * 0 - 95 Error-active
+ * 96 - 127 Error-warning
+ * 128 - 255 Error-passive
+ * 256 and greater Bus-off
+ */
+#define CAN_ERROR_WARNING_THRESHOLD 96
+#define CAN_ERROR_PASSIVE_THRESHOLD 128
+#define CAN_BUS_OFF_THRESHOLD 256
#endif /* _UAPI_CAN_ERROR_H */
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index c2190bbe21d8..e4f0957554f3 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -98,8 +98,8 @@ enum {
/* CAN frame elements that are affected by curr. 3 CAN frame modifications */
#define CGW_MOD_ID 0x01
-#define CGW_MOD_DLC 0x02 /* contains the data length in bytes */
-#define CGW_MOD_LEN CGW_MOD_DLC /* CAN FD length representation */
+#define CGW_MOD_DLC 0x02 /* Classical CAN data length code */
+#define CGW_MOD_LEN CGW_MOD_DLC /* CAN FD (plain) data length */
#define CGW_MOD_DATA 0x04
#define CGW_MOD_FLAGS 0x08 /* CAN FD flags */
diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h
new file mode 100644
index 000000000000..6cde62371b6f
--- /dev/null
+++ b/include/uapi/linux/can/isotp.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * linux/can/isotp.h
+ *
+ * Definitions for isotp CAN sockets (ISO 15765-2:2016)
+ *
+ * Copyright (c) 2020 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef _UAPI_CAN_ISOTP_H
+#define _UAPI_CAN_ISOTP_H
+
+#include <linux/types.h>
+#include <linux/can.h>
+
+#define SOL_CAN_ISOTP (SOL_CAN_BASE + CAN_ISOTP)
+
+/* for socket options affecting the socket (not the global system) */
+
+#define CAN_ISOTP_OPTS 1 /* pass struct can_isotp_options */
+
+#define CAN_ISOTP_RECV_FC 2 /* pass struct can_isotp_fc_options */
+
+/* sockopts to force stmin timer values for protocol regression tests */
+
+#define CAN_ISOTP_TX_STMIN 3 /* pass __u32 value in nano secs */
+ /* use this time instead of value */
+ /* provided in FC from the receiver */
+
+#define CAN_ISOTP_RX_STMIN 4 /* pass __u32 value in nano secs */
+ /* ignore received CF frames which */
+ /* timestamps differ less than val */
+
+#define CAN_ISOTP_LL_OPTS 5 /* pass struct can_isotp_ll_options */
+
+struct can_isotp_options {
+
+ __u32 flags; /* set flags for isotp behaviour. */
+ /* __u32 value : flags see below */
+
+ __u32 frame_txtime; /* frame transmission time (N_As/N_Ar) */
+ /* __u32 value : time in nano secs */
+
+ __u8 ext_address; /* set address for extended addressing */
+ /* __u8 value : extended address */
+
+ __u8 txpad_content; /* set content of padding byte (tx) */
+ /* __u8 value : content on tx path */
+
+ __u8 rxpad_content; /* set content of padding byte (rx) */
+ /* __u8 value : content on rx path */
+
+ __u8 rx_ext_address; /* set address for extended addressing */
+ /* __u8 value : extended address (rx) */
+};
+
+struct can_isotp_fc_options {
+
+ __u8 bs; /* blocksize provided in FC frame */
+ /* __u8 value : blocksize. 0 = off */
+
+ __u8 stmin; /* separation time provided in FC frame */
+ /* __u8 value : */
+ /* 0x00 - 0x7F : 0 - 127 ms */
+ /* 0x80 - 0xF0 : reserved */
+ /* 0xF1 - 0xF9 : 100 us - 900 us */
+ /* 0xFA - 0xFF : reserved */
+
+ __u8 wftmax; /* max. number of wait frame transmiss. */
+ /* __u8 value : 0 = omit FC N_PDU WT */
+};
+
+struct can_isotp_ll_options {
+
+ __u8 mtu; /* generated & accepted CAN frame type */
+ /* __u8 value : */
+ /* CAN_MTU (16) -> standard CAN 2.0 */
+ /* CANFD_MTU (72) -> CAN FD frame */
+
+ __u8 tx_dl; /* tx link layer data length in bytes */
+ /* (configured maximum payload length) */
+ /* __u8 value : 8,12,16,20,24,32,48,64 */
+ /* => rx path supports all LL_DL values */
+
+ __u8 tx_flags; /* set into struct canfd_frame.flags */
+ /* at frame creation: e.g. CANFD_BRS */
+ /* Obsolete when the BRS flag is fixed */
+ /* by the CAN netdriver configuration */
+};
+
+/* flags for isotp behaviour */
+
+#define CAN_ISOTP_LISTEN_MODE 0x0001 /* listen only (do not send FC) */
+#define CAN_ISOTP_EXTEND_ADDR 0x0002 /* enable extended addressing */
+#define CAN_ISOTP_TX_PADDING 0x0004 /* enable CAN frame padding tx path */
+#define CAN_ISOTP_RX_PADDING 0x0008 /* enable CAN frame padding rx path */
+#define CAN_ISOTP_CHK_PAD_LEN 0x0010 /* check received CAN frame padding */
+#define CAN_ISOTP_CHK_PAD_DATA 0x0020 /* check received CAN frame padding */
+#define CAN_ISOTP_HALF_DUPLEX 0x0040 /* half duplex error state handling */
+#define CAN_ISOTP_FORCE_TXSTMIN 0x0080 /* ignore stmin from received FC */
+#define CAN_ISOTP_FORCE_RXSTMIN 0x0100 /* ignore CFs depending on rx stmin */
+#define CAN_ISOTP_RX_EXT_ADDR 0x0200 /* different rx extended addressing */
+#define CAN_ISOTP_WAIT_TX_DONE 0x0400 /* wait for tx completion */
+#define CAN_ISOTP_SF_BROADCAST 0x0800 /* 1-to-N functional addressing */
+#define CAN_ISOTP_CF_BROADCAST 0x1000 /* 1-to-N transmission w/o FC */
+#define CAN_ISOTP_DYN_FC_PARMS 0x2000 /* dynamic FC parameters BS/STmin */
+
+/* protocol machine default values */
+
+#define CAN_ISOTP_DEFAULT_FLAGS 0
+#define CAN_ISOTP_DEFAULT_EXT_ADDRESS 0x00
+#define CAN_ISOTP_DEFAULT_PAD_CONTENT 0xCC /* prevent bit-stuffing */
+#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 50000 /* 50 micro seconds */
+#define CAN_ISOTP_DEFAULT_RECV_BS 0
+#define CAN_ISOTP_DEFAULT_RECV_STMIN 0x00
+#define CAN_ISOTP_DEFAULT_RECV_WFTMAX 0
+
+/*
+ * Remark on CAN_ISOTP_DEFAULT_RECV_* values:
+ *
+ * We can strongly assume, that the Linux Kernel implementation of
+ * CAN_ISOTP is capable to run with BS=0, STmin=0 and WFTmax=0.
+ * But as we like to be able to behave as a commonly available ECU,
+ * these default settings can be changed via sockopts.
+ * For that reason the STmin value is intentionally _not_ checked for
+ * consistency and copied directly into the flow control (FC) frame.
+ */
+
+/* link layer default values => make use of Classical CAN frames */
+
+#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU
+#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN
+#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0
+
+/*
+ * The CAN_ISOTP_DEFAULT_FRAME_TXTIME has become a non-zero value as
+ * it only makes sense for isotp implementation tests to run without
+ * a N_As value. As user space applications usually do not set the
+ * frame_txtime element of struct can_isotp_options the new in-kernel
+ * default is very likely overwritten with zero when the sockopt()
+ * CAN_ISOTP_OPTS is invoked.
+ * To make sure that a N_As value of zero is only set intentional the
+ * value '0' is now interpreted as 'do not change the current value'.
+ * When a frame_txtime of zero is required for testing purposes this
+ * CAN_ISOTP_FRAME_TXTIME_ZERO u32 value has to be set in frame_txtime.
+ */
+#define CAN_ISOTP_FRAME_TXTIME_ZERO 0xFFFFFFFF
+
+#endif /* !_UAPI_CAN_ISOTP_H */
diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h
index df6e821075c1..38936460f668 100644
--- a/include/uapi/linux/can/j1939.h
+++ b/include/uapi/linux/can/j1939.h
@@ -78,11 +78,20 @@ enum {
enum {
J1939_NLA_PAD,
J1939_NLA_BYTES_ACKED,
+ J1939_NLA_TOTAL_SIZE,
+ J1939_NLA_PGN,
+ J1939_NLA_SRC_NAME,
+ J1939_NLA_DEST_NAME,
+ J1939_NLA_SRC_ADDR,
+ J1939_NLA_DEST_ADDR,
};
enum {
J1939_EE_INFO_NONE,
J1939_EE_INFO_TX_ABORT,
+ J1939_EE_INFO_RX_RTS,
+ J1939_EE_INFO_RX_DPO,
+ J1939_EE_INFO_RX_ABORT,
};
struct j1939_filter {
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 6f598b73839e..02ec32d69474 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -100,6 +100,9 @@ struct can_ctrlmode {
#define CAN_CTRLMODE_FD 0x20 /* CAN FD mode */
#define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */
#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */
+#define CAN_CTRLMODE_CC_LEN8_DLC 0x100 /* Classic CAN DLC option */
+#define CAN_CTRLMODE_TDC_AUTO 0x200 /* CAN transiver automatically calculates TDCV */
+#define CAN_CTRLMODE_TDC_MANUAL 0x400 /* TDCV is manually set up by user */
/*
* CAN device statistics
@@ -133,10 +136,48 @@ enum {
IFLA_CAN_BITRATE_CONST,
IFLA_CAN_DATA_BITRATE_CONST,
IFLA_CAN_BITRATE_MAX,
- __IFLA_CAN_MAX
+ IFLA_CAN_TDC,
+ IFLA_CAN_CTRLMODE_EXT,
+
+ /* add new constants above here */
+ __IFLA_CAN_MAX,
+ IFLA_CAN_MAX = __IFLA_CAN_MAX - 1
};
-#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1)
+/*
+ * CAN FD Transmitter Delay Compensation (TDC)
+ *
+ * Please refer to struct can_tdc_const and can_tdc in
+ * include/linux/can/bittiming.h for further details.
+ */
+enum {
+ IFLA_CAN_TDC_UNSPEC,
+ IFLA_CAN_TDC_TDCV_MIN, /* u32 */
+ IFLA_CAN_TDC_TDCV_MAX, /* u32 */
+ IFLA_CAN_TDC_TDCO_MIN, /* u32 */
+ IFLA_CAN_TDC_TDCO_MAX, /* u32 */
+ IFLA_CAN_TDC_TDCF_MIN, /* u32 */
+ IFLA_CAN_TDC_TDCF_MAX, /* u32 */
+ IFLA_CAN_TDC_TDCV, /* u32 */
+ IFLA_CAN_TDC_TDCO, /* u32 */
+ IFLA_CAN_TDC_TDCF, /* u32 */
+
+ /* add new constants above here */
+ __IFLA_CAN_TDC,
+ IFLA_CAN_TDC_MAX = __IFLA_CAN_TDC - 1
+};
+
+/*
+ * IFLA_CAN_CTRLMODE_EXT nest: controller mode extended parameters
+ */
+enum {
+ IFLA_CAN_CTRLMODE_UNSPEC,
+ IFLA_CAN_CTRLMODE_SUPPORTED, /* u32 */
+
+ /* add new constants above here */
+ __IFLA_CAN_CTRLMODE,
+ IFLA_CAN_CTRLMODE_MAX = __IFLA_CAN_CTRLMODE - 1
+};
/* u16 termination range: 1..65535 Ohms */
#define CAN_TERMINATION_DISABLED 0
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 6a11d308eb5c..e024d896e278 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -49,6 +49,11 @@
#include <linux/can.h>
#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW)
+#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
+
+enum {
+ SCM_CAN_RAW_ERRQUEUE = 1,
+};
/* for socket options affecting the socket (not the global system) */
@@ -59,6 +64,23 @@ enum {
CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
+ CAN_RAW_XL_FRAMES, /* allow CAN XL frames (default:off) */
+ CAN_RAW_XL_VCID_OPTS, /* CAN XL VCID configuration options */
+};
+
+/* configuration for CAN XL virtual CAN identifier (VCID) handling */
+struct can_raw_vcid_options {
+
+ __u8 flags; /* flags for vcid (filter) behaviour */
+ __u8 tx_vcid; /* VCID value set into canxl_frame.prio */
+ __u8 rx_vcid; /* VCID value for VCID filter */
+ __u8 rx_vcid_mask; /* VCID mask for VCID filter */
+
};
+/* can_raw_vcid_options.flags for CAN XL virtual CAN identifier handling */
+#define CAN_RAW_XL_VCID_TX_SET 0x01
+#define CAN_RAW_XL_VCID_TX_PASS 0x02
+#define CAN_RAW_XL_VCID_RX_FILTER 0x04
+
#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 395dd0df8d08..5bb906098697 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -41,11 +41,12 @@ typedef struct __user_cap_header_struct {
int pid;
} __user *cap_user_header_t;
-typedef struct __user_cap_data_struct {
+struct __user_cap_data_struct {
__u32 effective;
__u32 permitted;
__u32 inheritable;
-} __user *cap_user_data_t;
+};
+typedef struct __user_cap_data_struct __user *cap_user_data_t;
#define VFS_CAP_REVISION_MASK 0xFF000000
@@ -243,7 +244,6 @@ struct vfs_ns_cap_data {
/* Allow examination and configuration of disk quotas */
/* Allow setting the domainname */
/* Allow setting the hostname */
-/* Allow calling bdflush() */
/* Allow mount() and umount(), setting up new smb connection */
/* Allow some autofs root ioctls */
/* Allow nfsservctl */
@@ -288,6 +288,8 @@ struct vfs_ns_cap_data {
processes and setting the scheduling algorithm used by another
process. */
/* Allow setting cpu affinity on other processes */
+/* Allow setting realtime ioprio class */
+/* Allow setting ioprio class on other processes */
#define CAP_SYS_NICE 23
@@ -333,7 +335,8 @@ struct vfs_ns_cap_data {
#define CAP_AUDIT_CONTROL 30
-/* Set or remove capabilities on files */
+/* Set or remove capabilities on files.
+ Map uid=0 into a child user namespace. */
#define CAP_SETFCAP 31
@@ -424,7 +427,7 @@ struct vfs_ns_cap_data {
*/
#define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */
-#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */
+#define CAP_TO_MASK(x) (1U << ((x) & 31)) /* mask for indexed __u32 */
#endif /* _UAPI_LINUX_CAPABILITY_H */
diff --git a/include/uapi/linux/ccs.h b/include/uapi/linux/ccs.h
new file mode 100644
index 000000000000..2896d3bfdb5e
--- /dev/null
+++ b/include/uapi/linux/ccs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef __UAPI_CCS_H__
+#define __UAPI_CCS_H__
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_CCS_ANALOGUE_GAIN_M0 (V4L2_CID_USER_CCS_BASE + 1)
+#define V4L2_CID_CCS_ANALOGUE_GAIN_C0 (V4L2_CID_USER_CCS_BASE + 2)
+#define V4L2_CID_CCS_ANALOGUE_GAIN_M1 (V4L2_CID_USER_CCS_BASE + 3)
+#define V4L2_CID_CCS_ANALOGUE_GAIN_C1 (V4L2_CID_USER_CCS_BASE + 4)
+#define V4L2_CID_CCS_ANALOGUE_LINEAR_GAIN (V4L2_CID_USER_CCS_BASE + 5)
+#define V4L2_CID_CCS_ANALOGUE_EXPONENTIAL_GAIN (V4L2_CID_USER_CCS_BASE + 6)
+#define V4L2_CID_CCS_SHADING_CORRECTION (V4L2_CID_USER_CCS_BASE + 8)
+#define V4L2_CID_CCS_LUMINANCE_CORRECTION_LEVEL (V4L2_CID_USER_CCS_BASE + 9)
+
+#endif
diff --git a/include/uapi/linux/cdrom.h b/include/uapi/linux/cdrom.h
index 2817230148fd..011e594e4a0d 100644
--- a/include/uapi/linux/cdrom.h
+++ b/include/uapi/linux/cdrom.h
@@ -103,7 +103,7 @@
#define CDROMREADALL 0x5318 /* read all 2646 bytes */
/*
- * These ioctls are (now) only in ide-cd.c for controlling
+ * These ioctls were only in (now removed) ide-cd.c for controlling
* drive spindown time. They should be implemented in the
* Uniform driver, via generic packet commands, GPCMD_MODE_SELECT_10,
* GPCMD_MODE_SENSE_10 and the GPMODE_POWER_PAGE...
@@ -147,6 +147,8 @@
#define CDROM_NEXT_WRITABLE 0x5394 /* get next writable block */
#define CDROM_LAST_WRITTEN 0x5395 /* get last block written on disc */
+#define CDROM_TIMED_MEDIA_CHANGE 0x5396 /* get the timestamp of the last media change */
+
/*******************************************************
* CDROM IOCTL structures
*******************************************************/
@@ -289,8 +291,28 @@ struct cdrom_generic_command
unsigned char data_direction;
int quiet;
int timeout;
- void __user *reserved[1]; /* unused, actually */
-};
+ union {
+ void __user *reserved[1]; /* unused, actually */
+ void __user *unused;
+ };
+};
+
+/* This struct is used by CDROM_TIMED_MEDIA_CHANGE */
+struct cdrom_timed_media_change_info {
+ __s64 last_media_change; /* Timestamp of the last detected media
+ * change in ms. May be set by caller,
+ * updated upon successful return of
+ * ioctl.
+ */
+ __u64 media_flags; /* Flags returned by ioctl to indicate
+ * media status.
+ */
+};
+#define MEDIA_CHANGED_FLAG 0x1 /* Last detected media change was more
+ * recent than last_media_change set by
+ * caller.
+ */
+/* other bits of media_flags available for future use */
/*
* A CD-ROM physical sector size is 2048, 2052, 2056, 2324, 2332, 2336,
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 37590027b604..d58fa1cdcb08 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -1568,6 +1568,20 @@ static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *
}
}
+static inline void cec_msg_set_audio_volume_level(struct cec_msg *msg,
+ __u8 audio_volume_level)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_SET_AUDIO_VOLUME_LEVEL;
+ msg->msg[2] = audio_volume_level;
+}
+
+static inline void cec_ops_set_audio_volume_level(const struct cec_msg *msg,
+ __u8 *audio_volume_level)
+{
+ *audio_volume_level = msg->msg[2];
+}
+
/* Audio Rate Control Feature */
static inline void cec_msg_set_audio_rate(struct cec_msg *msg,
@@ -1665,7 +1679,7 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
if (*audio_out_compensated == 3 && msg->len >= 7)
*audio_out_delay = msg->msg[6];
else
- *audio_out_delay = 0;
+ *audio_out_delay = 1;
}
static inline void cec_msg_request_current_latency(struct cec_msg *msg,
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 7d1a06c52469..b8e071abaea5 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -142,6 +142,26 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
msg->reply = msg->timeout = 0;
}
+/**
+ * cec_msg_recv_is_tx_result - return true if this message contains the
+ * result of an earlier non-blocking transmit
+ * @msg: the message structure from CEC_RECEIVE
+ */
+static inline int cec_msg_recv_is_tx_result(const struct cec_msg *msg)
+{
+ return msg->sequence && msg->tx_status && !msg->rx_status;
+}
+
+/**
+ * cec_msg_recv_is_rx_result - return true if this message contains the
+ * reply of an earlier non-blocking transmit
+ * @msg: the message structure from CEC_RECEIVE
+ */
+static inline int cec_msg_recv_is_rx_result(const struct cec_msg *msg)
+{
+ return msg->sequence && !msg->tx_status && msg->rx_status;
+}
+
/* cec_msg flags field */
#define CEC_MSG_FL_REPLY_TO_FOLLOWERS (1 << 0)
#define CEC_MSG_FL_RAW (1 << 1)
@@ -396,6 +416,7 @@ struct cec_drm_connector_info {
* associated with the CEC adapter.
* @type: connector type (if any)
* @drm: drm connector info
+ * @raw: array to pad the union
*/
struct cec_connector_info {
__u32 type;
@@ -453,7 +474,7 @@ struct cec_event_lost_msgs {
* struct cec_event - CEC event structure
* @ts: the timestamp of when the event was sent.
* @event: the event.
- * array.
+ * @flags: event flags.
* @state_change: the event payload for CEC_EVENT_STATE_CHANGE.
* @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS.
* @raw: array to pad the union.
@@ -641,7 +662,7 @@ struct cec_event {
#define CEC_OP_REC_SEQ_WEDNESDAY 0x08
#define CEC_OP_REC_SEQ_THURSDAY 0x10
#define CEC_OP_REC_SEQ_FRIDAY 0x20
-#define CEC_OP_REC_SEQ_SATERDAY 0x40
+#define CEC_OP_REC_SEQ_SATURDAY 0x40
#define CEC_OP_REC_SEQ_ONCE_ONLY 0x00
#define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99
@@ -747,6 +768,7 @@ struct cec_event {
#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08
#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04
#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02
+#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_VOLUME_LEVEL 0x01
#define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */
@@ -1038,6 +1060,7 @@ struct cec_event {
#define CEC_OP_AUD_FMT_ID_CEA861 0
#define CEC_OP_AUD_FMT_ID_CEA861_CXT 1
+#define CEC_MSG_SET_AUDIO_VOLUME_LEVEL 0x73
/* Audio Rate Control Feature */
#define CEC_MSG_SET_AUDIO_RATE 0x9a
diff --git a/include/uapi/linux/cfm_bridge.h b/include/uapi/linux/cfm_bridge.h
new file mode 100644
index 000000000000..3c1cbd1db2f5
--- /dev/null
+++ b/include/uapi/linux/cfm_bridge.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_CFM_BRIDGE_H_
+#define _UAPI_LINUX_CFM_BRIDGE_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define ETHER_HEADER_LENGTH (6+6+4+2)
+#define CFM_MAID_LENGTH 48
+#define CFM_CCM_PDU_LENGTH 75
+#define CFM_PORT_STATUS_TLV_LENGTH 4
+#define CFM_IF_STATUS_TLV_LENGTH 4
+#define CFM_IF_STATUS_TLV_TYPE 4
+#define CFM_PORT_STATUS_TLV_TYPE 2
+#define CFM_ENDE_TLV_TYPE 0
+#define CFM_CCM_MAX_FRAME_LENGTH (ETHER_HEADER_LENGTH+\
+ CFM_CCM_PDU_LENGTH+\
+ CFM_PORT_STATUS_TLV_LENGTH+\
+ CFM_IF_STATUS_TLV_LENGTH)
+#define CFM_FRAME_PRIO 7
+#define CFM_CCM_TLV_OFFSET 70
+#define CFM_CCM_PDU_MAID_OFFSET 10
+#define CFM_CCM_PDU_MEPID_OFFSET 8
+#define CFM_CCM_PDU_SEQNR_OFFSET 4
+#define CFM_CCM_PDU_TLV_OFFSET 74
+#define CFM_CCM_ITU_RESERVED_SIZE 16
+
+struct br_cfm_common_hdr {
+ __u8 mdlevel_version;
+ __u8 opcode;
+ __u8 flags;
+ __u8 tlv_offset;
+};
+
+enum br_cfm_opcodes {
+ BR_CFM_OPCODE_CCM = 0x1,
+};
+
+/* MEP domain */
+enum br_cfm_domain {
+ BR_CFM_PORT,
+ BR_CFM_VLAN,
+};
+
+/* MEP direction */
+enum br_cfm_mep_direction {
+ BR_CFM_MEP_DIRECTION_DOWN,
+ BR_CFM_MEP_DIRECTION_UP,
+};
+
+/* CCM interval supported. */
+enum br_cfm_ccm_interval {
+ BR_CFM_CCM_INTERVAL_NONE,
+ BR_CFM_CCM_INTERVAL_3_3_MS,
+ BR_CFM_CCM_INTERVAL_10_MS,
+ BR_CFM_CCM_INTERVAL_100_MS,
+ BR_CFM_CCM_INTERVAL_1_SEC,
+ BR_CFM_CCM_INTERVAL_10_SEC,
+ BR_CFM_CCM_INTERVAL_1_MIN,
+ BR_CFM_CCM_INTERVAL_10_MIN,
+};
+
+#endif
diff --git a/include/uapi/linux/cgroupstats.h b/include/uapi/linux/cgroupstats.h
index aa306e4cd6c1..80b2c8594480 100644
--- a/include/uapi/linux/cgroupstats.h
+++ b/include/uapi/linux/cgroupstats.h
@@ -24,8 +24,6 @@
* basis. This data is shared using taskstats.
*
* Most of these states are derived by looking at the task->state value
- * For the nr_io_wait state, a flag in the delay accounting structure
- * indicates that the task is waiting on IO
*
* Each member is aligned to a 8 byte boundary.
*/
diff --git a/include/uapi/linux/cifs/cifs_mount.h b/include/uapi/linux/cifs/cifs_mount.h
index 69829205fdb5..8e87d27b0951 100644
--- a/include/uapi/linux/cifs/cifs_mount.h
+++ b/include/uapi/linux/cifs/cifs_mount.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
/*
- * include/uapi/linux/cifs/cifs_mount.h
*
* Author(s): Scott Lovenberg (scott.lovenberg@gmail.com)
*
diff --git a/include/uapi/linux/cifs/cifs_netlink.h b/include/uapi/linux/cifs/cifs_netlink.h
new file mode 100644
index 000000000000..da3107582f49
--- /dev/null
+++ b/include/uapi/linux/cifs/cifs_netlink.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
+/*
+ * Netlink routines for CIFS
+ *
+ * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
+ */
+
+
+#ifndef _UAPILINUX_CIFS_NETLINK_H
+#define _UAPILINUX_CIFS_NETLINK_H
+
+#define CIFS_GENL_NAME "cifs"
+#define CIFS_GENL_VERSION 0x1
+
+#define CIFS_GENL_MCGRP_SWN_NAME "cifs_mcgrp_swn"
+
+enum cifs_genl_multicast_groups {
+ CIFS_GENL_MCGRP_SWN,
+};
+
+enum cifs_genl_attributes {
+ CIFS_GENL_ATTR_UNSPEC,
+ CIFS_GENL_ATTR_SWN_REGISTRATION_ID,
+ CIFS_GENL_ATTR_SWN_NET_NAME,
+ CIFS_GENL_ATTR_SWN_SHARE_NAME,
+ CIFS_GENL_ATTR_SWN_IP,
+ CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY,
+ CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY,
+ CIFS_GENL_ATTR_SWN_IP_NOTIFY,
+ CIFS_GENL_ATTR_SWN_KRB_AUTH,
+ CIFS_GENL_ATTR_SWN_USER_NAME,
+ CIFS_GENL_ATTR_SWN_PASSWORD,
+ CIFS_GENL_ATTR_SWN_DOMAIN_NAME,
+ CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE,
+ CIFS_GENL_ATTR_SWN_RESOURCE_STATE,
+ CIFS_GENL_ATTR_SWN_RESOURCE_NAME,
+ __CIFS_GENL_ATTR_MAX,
+};
+#define CIFS_GENL_ATTR_MAX (__CIFS_GENL_ATTR_MAX - 1)
+
+enum cifs_genl_commands {
+ CIFS_GENL_CMD_UNSPEC,
+ CIFS_GENL_CMD_SWN_REGISTER,
+ CIFS_GENL_CMD_SWN_UNREGISTER,
+ CIFS_GENL_CMD_SWN_NOTIFY,
+ __CIFS_GENL_CMD_MAX
+};
+#define CIFS_GENL_CMD_MAX (__CIFS_GENL_CMD_MAX - 1)
+
+enum cifs_swn_notification_type {
+ CIFS_SWN_NOTIFICATION_RESOURCE_CHANGE = 0x01,
+ CIFS_SWN_NOTIFICATION_CLIENT_MOVE = 0x02,
+ CIFS_SWN_NOTIFICATION_SHARE_MOVE = 0x03,
+ CIFS_SWN_NOTIFICATION_IP_CHANGE = 0x04,
+};
+
+enum cifs_swn_resource_state {
+ CIFS_SWN_RESOURCE_STATE_UNKNOWN = 0x00,
+ CIFS_SWN_RESOURCE_STATE_AVAILABLE = 0x01,
+ CIFS_SWN_RESOURCE_STATE_UNAVAILABLE = 0xFF
+};
+
+#endif /* _UAPILINUX_CIFS_NETLINK_H */
diff --git a/include/uapi/linux/close_range.h b/include/uapi/linux/close_range.h
index 6928a9fdee3c..2d804281554c 100644
--- a/include/uapi/linux/close_range.h
+++ b/include/uapi/linux/close_range.h
@@ -5,5 +5,8 @@
/* Unshare the file descriptor table before closing file descriptors. */
#define CLOSE_RANGE_UNSHARE (1U << 1)
+/* Set the FD_CLOEXEC bit instead of closing the file descriptor. */
+#define CLOSE_RANGE_CLOEXEC (1U << 2)
+
#endif /* _UAPI_LINUX_CLOSE_RANGE_H */
diff --git a/include/uapi/linux/cm4000_cs.h b/include/uapi/linux/cm4000_cs.h
deleted file mode 100644
index c70a62ec8a49..000000000000
--- a/include/uapi/linux/cm4000_cs.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI_CM4000_H_
-#define _UAPI_CM4000_H_
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define MAX_ATR 33
-
-#define CM4000_MAX_DEV 4
-
-/* those two structures are passed via ioctl() from/to userspace. They are
- * used by existing userspace programs, so I kepth the awkward "bIFSD" naming
- * not to break compilation of userspace apps. -HW */
-
-typedef struct atreq {
- __s32 atr_len;
- unsigned char atr[64];
- __s32 power_act;
- unsigned char bIFSD;
- unsigned char bIFSC;
-} atreq_t;
-
-
-/* what is particularly stupid in the original driver is the arch-dependent
- * member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace
- * will lay out the structure members differently than the 64bit kernel.
- *
- * I've changed "ptsreq.protocol" from "unsigned long" to "__u32".
- * On 32bit this will make no difference. With 64bit kernels, it will make
- * 32bit apps work, too.
- */
-
-typedef struct ptsreq {
- __u32 protocol; /*T=0: 2^0, T=1: 2^1*/
- unsigned char flags;
- unsigned char pts1;
- unsigned char pts2;
- unsigned char pts3;
-} ptsreq_t;
-
-#define CM_IOC_MAGIC 'c'
-#define CM_IOC_MAXNR 255
-
-#define CM_IOCGSTATUS _IOR (CM_IOC_MAGIC, 0, unsigned char *)
-#define CM_IOCGATR _IOWR(CM_IOC_MAGIC, 1, atreq_t *)
-#define CM_IOCSPTS _IOW (CM_IOC_MAGIC, 2, ptsreq_t *)
-#define CM_IOCSRDR _IO (CM_IOC_MAGIC, 3)
-#define CM_IOCARDOFF _IO (CM_IOC_MAGIC, 4)
-
-#define CM_IOSDBGLVL _IOW(CM_IOC_MAGIC, 250, int*)
-
-/* card and device states */
-#define CM_CARD_INSERTED 0x01
-#define CM_CARD_POWERED 0x02
-#define CM_ATR_PRESENT 0x04
-#define CM_ATR_VALID 0x08
-#define CM_STATE_VALID 0x0f
-/* extra info only from CM4000 */
-#define CM_NO_READER 0x10
-#define CM_BAD_CARD 0x20
-
-
-#endif /* _UAPI_CM4000_H_ */
diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
index db210625cee8..f2afb7cc4926 100644
--- a/include/uapi/linux/cn_proc.h
+++ b/include/uapi/linux/cn_proc.h
@@ -30,6 +30,49 @@ enum proc_cn_mcast_op {
PROC_CN_MCAST_IGNORE = 2
};
+#define PROC_EVENT_ALL (PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_UID | \
+ PROC_EVENT_GID | PROC_EVENT_SID | PROC_EVENT_PTRACE | \
+ PROC_EVENT_COMM | PROC_EVENT_NONZERO_EXIT | \
+ PROC_EVENT_COREDUMP | PROC_EVENT_EXIT)
+
+/*
+ * If you add an entry in proc_cn_event, make sure you add it in
+ * PROC_EVENT_ALL above as well.
+ */
+enum proc_cn_event {
+ /* Use successive bits so the enums can be used to record
+ * sets of events as well
+ */
+ PROC_EVENT_NONE = 0x00000000,
+ PROC_EVENT_FORK = 0x00000001,
+ PROC_EVENT_EXEC = 0x00000002,
+ PROC_EVENT_UID = 0x00000004,
+ PROC_EVENT_GID = 0x00000040,
+ PROC_EVENT_SID = 0x00000080,
+ PROC_EVENT_PTRACE = 0x00000100,
+ PROC_EVENT_COMM = 0x00000200,
+ /* "next" should be 0x00000400 */
+ /* "last" is the last process event: exit,
+ * while "next to last" is coredumping event
+ * before that is report only if process dies
+ * with non-zero exit status
+ */
+ PROC_EVENT_NONZERO_EXIT = 0x20000000,
+ PROC_EVENT_COREDUMP = 0x40000000,
+ PROC_EVENT_EXIT = 0x80000000
+};
+
+struct proc_input {
+ enum proc_cn_mcast_op mcast_op;
+ enum proc_cn_event event_type;
+};
+
+static inline enum proc_cn_event valid_event(enum proc_cn_event ev_type)
+{
+ ev_type &= PROC_EVENT_ALL;
+ return ev_type;
+}
+
/*
* From the user's point of view, the process
* ID is the thread group ID and thread ID is the internal
@@ -44,24 +87,7 @@ enum proc_cn_mcast_op {
*/
struct proc_event {
- enum what {
- /* Use successive bits so the enums can be used to record
- * sets of events as well
- */
- PROC_EVENT_NONE = 0x00000000,
- PROC_EVENT_FORK = 0x00000001,
- PROC_EVENT_EXEC = 0x00000002,
- PROC_EVENT_UID = 0x00000004,
- PROC_EVENT_GID = 0x00000040,
- PROC_EVENT_SID = 0x00000080,
- PROC_EVENT_PTRACE = 0x00000100,
- PROC_EVENT_COMM = 0x00000200,
- /* "next" should be 0x00000400 */
- /* "last" is the last process event: exit,
- * while "next to last" is coredumping event */
- PROC_EVENT_COREDUMP = 0x40000000,
- PROC_EVENT_EXIT = 0x80000000
- } what;
+ enum proc_cn_event what;
__u32 cpu;
__u64 __attribute__((aligned(8))) timestamp_ns;
/* Number of nano seconds since system boot */
diff --git a/include/uapi/linux/comedi.h b/include/uapi/linux/comedi.h
new file mode 100644
index 000000000000..7314e5ee0a1e
--- /dev/null
+++ b/include/uapi/linux/comedi.h
@@ -0,0 +1,1528 @@
+/* SPDX-License-Identifier: LGPL-2.0+ WITH Linux-syscall-note */
+/*
+ * comedi.h
+ * header file for COMEDI user API
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_H
+#define _COMEDI_H
+
+#define COMEDI_MAJORVERSION 0
+#define COMEDI_MINORVERSION 7
+#define COMEDI_MICROVERSION 76
+#define VERSION "0.7.76"
+
+/* comedi's major device number */
+#define COMEDI_MAJOR 98
+
+/*
+ * maximum number of minor devices. This can be increased, although
+ * kernel structures are currently statically allocated, thus you
+ * don't want this to be much more than you actually use.
+ */
+#define COMEDI_NDEVICES 16
+
+/* number of config options in the config structure */
+#define COMEDI_NDEVCONFOPTS 32
+
+/*
+ * NOTE: 'comedi_config --init-data' is deprecated
+ *
+ * The following indexes in the config options were used by
+ * comedi_config to pass firmware blobs from user space to the
+ * comedi drivers. The request_firmware() hotplug interface is
+ * now used by all comedi drivers instead.
+ */
+
+/* length of nth chunk of firmware data -*/
+#define COMEDI_DEVCONF_AUX_DATA3_LENGTH 25
+#define COMEDI_DEVCONF_AUX_DATA2_LENGTH 26
+#define COMEDI_DEVCONF_AUX_DATA1_LENGTH 27
+#define COMEDI_DEVCONF_AUX_DATA0_LENGTH 28
+/* most significant 32 bits of pointer address (if needed) */
+#define COMEDI_DEVCONF_AUX_DATA_HI 29
+/* least significant 32 bits of pointer address */
+#define COMEDI_DEVCONF_AUX_DATA_LO 30
+#define COMEDI_DEVCONF_AUX_DATA_LENGTH 31 /* total data length */
+
+/* max length of device and driver names */
+#define COMEDI_NAMELEN 20
+
+/* packs and unpacks a channel/range number */
+
+#define CR_PACK(chan, rng, aref) \
+ ((((aref) & 0x3) << 24) | (((rng) & 0xff) << 16) | (chan))
+#define CR_PACK_FLAGS(chan, range, aref, flags) \
+ (CR_PACK(chan, range, aref) | ((flags) & CR_FLAGS_MASK))
+
+#define CR_CHAN(a) ((a) & 0xffff)
+#define CR_RANGE(a) (((a) >> 16) & 0xff)
+#define CR_AREF(a) (((a) >> 24) & 0x03)
+
+#define CR_FLAGS_MASK 0xfc000000
+#define CR_ALT_FILTER 0x04000000
+#define CR_DITHER CR_ALT_FILTER
+#define CR_DEGLITCH CR_ALT_FILTER
+#define CR_ALT_SOURCE 0x08000000
+#define CR_EDGE 0x40000000
+#define CR_INVERT 0x80000000
+
+#define AREF_GROUND 0x00 /* analog ref = analog ground */
+#define AREF_COMMON 0x01 /* analog ref = analog common */
+#define AREF_DIFF 0x02 /* analog ref = differential */
+#define AREF_OTHER 0x03 /* analog ref = other (undefined) */
+
+/* counters -- these are arbitrary values */
+#define GPCT_RESET 0x0001
+#define GPCT_SET_SOURCE 0x0002
+#define GPCT_SET_GATE 0x0004
+#define GPCT_SET_DIRECTION 0x0008
+#define GPCT_SET_OPERATION 0x0010
+#define GPCT_ARM 0x0020
+#define GPCT_DISARM 0x0040
+#define GPCT_GET_INT_CLK_FRQ 0x0080
+
+#define GPCT_INT_CLOCK 0x0001
+#define GPCT_EXT_PIN 0x0002
+#define GPCT_NO_GATE 0x0004
+#define GPCT_UP 0x0008
+#define GPCT_DOWN 0x0010
+#define GPCT_HWUD 0x0020
+#define GPCT_SIMPLE_EVENT 0x0040
+#define GPCT_SINGLE_PERIOD 0x0080
+#define GPCT_SINGLE_PW 0x0100
+#define GPCT_CONT_PULSE_OUT 0x0200
+#define GPCT_SINGLE_PULSE_OUT 0x0400
+
+/* instructions */
+
+#define INSN_MASK_WRITE 0x8000000
+#define INSN_MASK_READ 0x4000000
+#define INSN_MASK_SPECIAL 0x2000000
+
+#define INSN_READ (0 | INSN_MASK_READ)
+#define INSN_WRITE (1 | INSN_MASK_WRITE)
+#define INSN_BITS (2 | INSN_MASK_READ | INSN_MASK_WRITE)
+#define INSN_CONFIG (3 | INSN_MASK_READ | INSN_MASK_WRITE)
+#define INSN_DEVICE_CONFIG (INSN_CONFIG | INSN_MASK_SPECIAL)
+#define INSN_GTOD (4 | INSN_MASK_READ | INSN_MASK_SPECIAL)
+#define INSN_WAIT (5 | INSN_MASK_WRITE | INSN_MASK_SPECIAL)
+#define INSN_INTTRIG (6 | INSN_MASK_WRITE | INSN_MASK_SPECIAL)
+
+/* command flags */
+/* These flags are used in comedi_cmd structures */
+
+#define CMDF_BOGUS 0x00000001 /* do the motions */
+
+/* try to use a real-time interrupt while performing command */
+#define CMDF_PRIORITY 0x00000008
+
+/* wake up on end-of-scan events */
+#define CMDF_WAKE_EOS 0x00000020
+
+#define CMDF_WRITE 0x00000040
+
+#define CMDF_RAWDATA 0x00000080
+
+/* timer rounding definitions */
+#define CMDF_ROUND_MASK 0x00030000
+#define CMDF_ROUND_NEAREST 0x00000000
+#define CMDF_ROUND_DOWN 0x00010000
+#define CMDF_ROUND_UP 0x00020000
+#define CMDF_ROUND_UP_NEXT 0x00030000
+
+#define COMEDI_EV_START 0x00040000
+#define COMEDI_EV_SCAN_BEGIN 0x00080000
+#define COMEDI_EV_CONVERT 0x00100000
+#define COMEDI_EV_SCAN_END 0x00200000
+#define COMEDI_EV_STOP 0x00400000
+
+/* compatibility definitions */
+#define TRIG_BOGUS CMDF_BOGUS
+#define TRIG_RT CMDF_PRIORITY
+#define TRIG_WAKE_EOS CMDF_WAKE_EOS
+#define TRIG_WRITE CMDF_WRITE
+#define TRIG_ROUND_MASK CMDF_ROUND_MASK
+#define TRIG_ROUND_NEAREST CMDF_ROUND_NEAREST
+#define TRIG_ROUND_DOWN CMDF_ROUND_DOWN
+#define TRIG_ROUND_UP CMDF_ROUND_UP
+#define TRIG_ROUND_UP_NEXT CMDF_ROUND_UP_NEXT
+
+/* trigger sources */
+
+#define TRIG_ANY 0xffffffff
+#define TRIG_INVALID 0x00000000
+
+#define TRIG_NONE 0x00000001 /* never trigger */
+#define TRIG_NOW 0x00000002 /* trigger now + N ns */
+#define TRIG_FOLLOW 0x00000004 /* trigger on next lower level trig */
+#define TRIG_TIME 0x00000008 /* trigger at time N ns */
+#define TRIG_TIMER 0x00000010 /* trigger at rate N ns */
+#define TRIG_COUNT 0x00000020 /* trigger when count reaches N */
+#define TRIG_EXT 0x00000040 /* trigger on external signal N */
+#define TRIG_INT 0x00000080 /* trigger on comedi-internal signal N */
+#define TRIG_OTHER 0x00000100 /* driver defined */
+
+/* subdevice flags */
+
+#define SDF_BUSY 0x0001 /* device is busy */
+#define SDF_BUSY_OWNER 0x0002 /* device is busy with your job */
+#define SDF_LOCKED 0x0004 /* subdevice is locked */
+#define SDF_LOCK_OWNER 0x0008 /* you own lock */
+#define SDF_MAXDATA 0x0010 /* maxdata depends on channel */
+#define SDF_FLAGS 0x0020 /* flags depend on channel */
+#define SDF_RANGETYPE 0x0040 /* range type depends on channel */
+#define SDF_PWM_COUNTER 0x0080 /* PWM can automatically switch off */
+#define SDF_PWM_HBRIDGE 0x0100 /* PWM is signed (H-bridge) */
+#define SDF_CMD 0x1000 /* can do commands (deprecated) */
+#define SDF_SOFT_CALIBRATED 0x2000 /* subdevice uses software calibration */
+#define SDF_CMD_WRITE 0x4000 /* can do output commands */
+#define SDF_CMD_READ 0x8000 /* can do input commands */
+
+/* subdevice can be read (e.g. analog input) */
+#define SDF_READABLE 0x00010000
+/* subdevice can be written (e.g. analog output) */
+#define SDF_WRITABLE 0x00020000
+#define SDF_WRITEABLE SDF_WRITABLE /* spelling error in API */
+/* subdevice does not have externally visible lines */
+#define SDF_INTERNAL 0x00040000
+#define SDF_GROUND 0x00100000 /* can do aref=ground */
+#define SDF_COMMON 0x00200000 /* can do aref=common */
+#define SDF_DIFF 0x00400000 /* can do aref=diff */
+#define SDF_OTHER 0x00800000 /* can do aref=other */
+#define SDF_DITHER 0x01000000 /* can do dithering */
+#define SDF_DEGLITCH 0x02000000 /* can do deglitching */
+#define SDF_MMAP 0x04000000 /* can do mmap() */
+#define SDF_RUNNING 0x08000000 /* subdevice is acquiring data */
+#define SDF_LSAMPL 0x10000000 /* subdevice uses 32-bit samples */
+#define SDF_PACKED 0x20000000 /* subdevice can do packed DIO */
+
+/* subdevice types */
+
+/**
+ * enum comedi_subdevice_type - COMEDI subdevice types
+ * @COMEDI_SUBD_UNUSED: Unused subdevice.
+ * @COMEDI_SUBD_AI: Analog input.
+ * @COMEDI_SUBD_AO: Analog output.
+ * @COMEDI_SUBD_DI: Digital input.
+ * @COMEDI_SUBD_DO: Digital output.
+ * @COMEDI_SUBD_DIO: Digital input/output.
+ * @COMEDI_SUBD_COUNTER: Counter.
+ * @COMEDI_SUBD_TIMER: Timer.
+ * @COMEDI_SUBD_MEMORY: Memory, EEPROM, DPRAM.
+ * @COMEDI_SUBD_CALIB: Calibration DACs.
+ * @COMEDI_SUBD_PROC: Processor, DSP.
+ * @COMEDI_SUBD_SERIAL: Serial I/O.
+ * @COMEDI_SUBD_PWM: Pulse-Width Modulation output.
+ */
+enum comedi_subdevice_type {
+ COMEDI_SUBD_UNUSED,
+ COMEDI_SUBD_AI,
+ COMEDI_SUBD_AO,
+ COMEDI_SUBD_DI,
+ COMEDI_SUBD_DO,
+ COMEDI_SUBD_DIO,
+ COMEDI_SUBD_COUNTER,
+ COMEDI_SUBD_TIMER,
+ COMEDI_SUBD_MEMORY,
+ COMEDI_SUBD_CALIB,
+ COMEDI_SUBD_PROC,
+ COMEDI_SUBD_SERIAL,
+ COMEDI_SUBD_PWM
+};
+
+/* configuration instructions */
+
+/**
+ * enum comedi_io_direction - COMEDI I/O directions
+ * @COMEDI_INPUT: Input.
+ * @COMEDI_OUTPUT: Output.
+ * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output.
+ *
+ * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
+ * report a direction. They may also be used in other places where a direction
+ * needs to be specified.
+ */
+enum comedi_io_direction {
+ COMEDI_INPUT = 0,
+ COMEDI_OUTPUT = 1,
+ COMEDI_OPENDRAIN = 2
+};
+
+/**
+ * enum configuration_ids - COMEDI configuration instruction codes
+ * @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input.
+ * @INSN_CONFIG_DIO_OUTPUT: Configure digital I/O as output.
+ * @INSN_CONFIG_DIO_OPENDRAIN: Configure digital I/O as open-drain (or open
+ * collector) output.
+ * @INSN_CONFIG_ANALOG_TRIG: Configure analog trigger.
+ * @INSN_CONFIG_ALT_SOURCE: Configure alternate input source.
+ * @INSN_CONFIG_DIGITAL_TRIG: Configure digital trigger.
+ * @INSN_CONFIG_BLOCK_SIZE: Configure block size for DMA transfers.
+ * @INSN_CONFIG_TIMER_1: Configure divisor for external clock.
+ * @INSN_CONFIG_FILTER: Configure a filter.
+ * @INSN_CONFIG_CHANGE_NOTIFY: Configure change notification for digital
+ * inputs. (New drivers should use
+ * %INSN_CONFIG_DIGITAL_TRIG instead.)
+ * @INSN_CONFIG_SERIAL_CLOCK: Configure clock for serial I/O.
+ * @INSN_CONFIG_BIDIRECTIONAL_DATA: Send and receive byte over serial I/O.
+ * @INSN_CONFIG_DIO_QUERY: Query direction of digital I/O channel.
+ * @INSN_CONFIG_PWM_OUTPUT: Configure pulse-width modulator output.
+ * @INSN_CONFIG_GET_PWM_OUTPUT: Get pulse-width modulator output configuration.
+ * @INSN_CONFIG_ARM: Arm a subdevice or channel.
+ * @INSN_CONFIG_DISARM: Disarm a subdevice or channel.
+ * @INSN_CONFIG_GET_COUNTER_STATUS: Get counter status.
+ * @INSN_CONFIG_RESET: Reset a subdevice or channel.
+ * @INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR: Configure counter/timer as
+ * single pulse generator.
+ * @INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR: Configure counter/timer as
+ * pulse train generator.
+ * @INSN_CONFIG_GPCT_QUADRATURE_ENCODER: Configure counter as a quadrature
+ * encoder.
+ * @INSN_CONFIG_SET_GATE_SRC: Set counter/timer gate source.
+ * @INSN_CONFIG_GET_GATE_SRC: Get counter/timer gate source.
+ * @INSN_CONFIG_SET_CLOCK_SRC: Set counter/timer master clock source.
+ * @INSN_CONFIG_GET_CLOCK_SRC: Get counter/timer master clock source.
+ * @INSN_CONFIG_SET_OTHER_SRC: Set counter/timer "other" source.
+ * @INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: Get size (in bytes) of subdevice's
+ * on-board FIFOs used during streaming
+ * input/output.
+ * @INSN_CONFIG_SET_COUNTER_MODE: Set counter/timer mode.
+ * @INSN_CONFIG_8254_SET_MODE: (Deprecated) Same as
+ * %INSN_CONFIG_SET_COUNTER_MODE.
+ * @INSN_CONFIG_8254_READ_STATUS: Read status of 8254 counter channel.
+ * @INSN_CONFIG_SET_ROUTING: Set routing for a channel.
+ * @INSN_CONFIG_GET_ROUTING: Get routing for a channel.
+ * @INSN_CONFIG_PWM_SET_PERIOD: Set PWM period in nanoseconds.
+ * @INSN_CONFIG_PWM_GET_PERIOD: Get PWM period in nanoseconds.
+ * @INSN_CONFIG_GET_PWM_STATUS: Get PWM status.
+ * @INSN_CONFIG_PWM_SET_H_BRIDGE: Set PWM H bridge duty cycle and polarity for
+ * a relay simultaneously.
+ * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity.
+ * @INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS: Get the hardware timing restraints,
+ * regardless of trigger sources.
+ */
+enum configuration_ids {
+ INSN_CONFIG_DIO_INPUT = COMEDI_INPUT,
+ INSN_CONFIG_DIO_OUTPUT = COMEDI_OUTPUT,
+ INSN_CONFIG_DIO_OPENDRAIN = COMEDI_OPENDRAIN,
+ INSN_CONFIG_ANALOG_TRIG = 16,
+/* INSN_CONFIG_WAVEFORM = 17, */
+/* INSN_CONFIG_TRIG = 18, */
+/* INSN_CONFIG_COUNTER = 19, */
+ INSN_CONFIG_ALT_SOURCE = 20,
+ INSN_CONFIG_DIGITAL_TRIG = 21,
+ INSN_CONFIG_BLOCK_SIZE = 22,
+ INSN_CONFIG_TIMER_1 = 23,
+ INSN_CONFIG_FILTER = 24,
+ INSN_CONFIG_CHANGE_NOTIFY = 25,
+
+ INSN_CONFIG_SERIAL_CLOCK = 26, /*ALPHA*/
+ INSN_CONFIG_BIDIRECTIONAL_DATA = 27,
+ INSN_CONFIG_DIO_QUERY = 28,
+ INSN_CONFIG_PWM_OUTPUT = 29,
+ INSN_CONFIG_GET_PWM_OUTPUT = 30,
+ INSN_CONFIG_ARM = 31,
+ INSN_CONFIG_DISARM = 32,
+ INSN_CONFIG_GET_COUNTER_STATUS = 33,
+ INSN_CONFIG_RESET = 34,
+ INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001,
+ INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002,
+ INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003,
+ INSN_CONFIG_SET_GATE_SRC = 2001,
+ INSN_CONFIG_GET_GATE_SRC = 2002,
+ INSN_CONFIG_SET_CLOCK_SRC = 2003,
+ INSN_CONFIG_GET_CLOCK_SRC = 2004,
+ INSN_CONFIG_SET_OTHER_SRC = 2005,
+ INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE = 2006,
+ INSN_CONFIG_SET_COUNTER_MODE = 4097,
+ INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE,
+ INSN_CONFIG_8254_READ_STATUS = 4098,
+ INSN_CONFIG_SET_ROUTING = 4099,
+ INSN_CONFIG_GET_ROUTING = 4109,
+ INSN_CONFIG_PWM_SET_PERIOD = 5000,
+ INSN_CONFIG_PWM_GET_PERIOD = 5001,
+ INSN_CONFIG_GET_PWM_STATUS = 5002,
+ INSN_CONFIG_PWM_SET_H_BRIDGE = 5003,
+ INSN_CONFIG_PWM_GET_H_BRIDGE = 5004,
+ INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS = 5005,
+};
+
+/**
+ * enum device_configuration_ids - COMEDI configuration instruction codes global
+ * to an entire device.
+ * @INSN_DEVICE_CONFIG_TEST_ROUTE: Validate the possibility of a
+ * globally-named route
+ * @INSN_DEVICE_CONFIG_CONNECT_ROUTE: Connect a globally-named route
+ * @INSN_DEVICE_CONFIG_DISCONNECT_ROUTE:Disconnect a globally-named route
+ * @INSN_DEVICE_CONFIG_GET_ROUTES: Get a list of all globally-named routes
+ * that are valid for a particular device.
+ */
+enum device_config_route_ids {
+ INSN_DEVICE_CONFIG_TEST_ROUTE = 0,
+ INSN_DEVICE_CONFIG_CONNECT_ROUTE = 1,
+ INSN_DEVICE_CONFIG_DISCONNECT_ROUTE = 2,
+ INSN_DEVICE_CONFIG_GET_ROUTES = 3,
+};
+
+/**
+ * enum comedi_digital_trig_op - operations for configuring a digital trigger
+ * @COMEDI_DIGITAL_TRIG_DISABLE: Return digital trigger to its default,
+ * inactive, unconfigured state.
+ * @COMEDI_DIGITAL_TRIG_ENABLE_EDGES: Set rising and/or falling edge inputs
+ * that each can fire the trigger.
+ * @COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: Set a combination of high and/or low
+ * level inputs that can fire the trigger.
+ *
+ * These are used with the %INSN_CONFIG_DIGITAL_TRIG configuration instruction.
+ * The data for the configuration instruction is as follows...
+ *
+ * data[%0] = %INSN_CONFIG_DIGITAL_TRIG
+ *
+ * data[%1] = trigger ID
+ *
+ * data[%2] = configuration operation
+ *
+ * data[%3] = configuration parameter 1
+ *
+ * data[%4] = configuration parameter 2
+ *
+ * data[%5] = configuration parameter 3
+ *
+ * The trigger ID (data[%1]) is used to differentiate multiple digital triggers
+ * belonging to the same subdevice. The configuration operation (data[%2]) is
+ * one of the enum comedi_digital_trig_op values. The configuration
+ * parameters (data[%3], data[%4], and data[%5]) depend on the operation; they
+ * are not used with %COMEDI_DIGITAL_TRIG_DISABLE.
+ *
+ * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES and %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS,
+ * configuration parameter 1 (data[%3]) contains a "left-shift" value that
+ * specifies the input corresponding to bit 0 of configuration parameters 2
+ * and 3. This is useful if the trigger has more than 32 inputs.
+ *
+ * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES, configuration parameter 2 (data[%4])
+ * specifies which of up to 32 inputs have rising-edge sensitivity, and
+ * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs
+ * have falling-edge sensitivity that can fire the trigger.
+ *
+ * For %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS, configuration parameter 2 (data[%4])
+ * specifies which of up to 32 inputs must be at a high level, and
+ * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs
+ * must be at a low level for the trigger to fire.
+ *
+ * Some sequences of %INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly)
+ * accumulative effect, depending on the low-level driver. This is useful
+ * when setting up a trigger that has more than 32 inputs, or has a combination
+ * of edge- and level-triggered inputs.
+ */
+enum comedi_digital_trig_op {
+ COMEDI_DIGITAL_TRIG_DISABLE = 0,
+ COMEDI_DIGITAL_TRIG_ENABLE_EDGES = 1,
+ COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = 2
+};
+
+/**
+ * enum comedi_support_level - support level for a COMEDI feature
+ * @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature.
+ * @COMEDI_SUPPORTED: Feature is supported.
+ * @COMEDI_UNSUPPORTED: Feature is unsupported.
+ */
+enum comedi_support_level {
+ COMEDI_UNKNOWN_SUPPORT = 0,
+ COMEDI_SUPPORTED,
+ COMEDI_UNSUPPORTED
+};
+
+/**
+ * enum comedi_counter_status_flags - counter status bits
+ * @COMEDI_COUNTER_ARMED: Counter is armed.
+ * @COMEDI_COUNTER_COUNTING: Counter is counting.
+ * @COMEDI_COUNTER_TERMINAL_COUNT: Counter reached terminal count.
+ *
+ * These bitwise values are used by the %INSN_CONFIG_GET_COUNTER_STATUS
+ * configuration instruction to report the status of a counter.
+ */
+enum comedi_counter_status_flags {
+ COMEDI_COUNTER_ARMED = 0x1,
+ COMEDI_COUNTER_COUNTING = 0x2,
+ COMEDI_COUNTER_TERMINAL_COUNT = 0x4,
+};
+
+/* ioctls */
+
+#define CIO 'd'
+#define COMEDI_DEVCONFIG _IOW(CIO, 0, struct comedi_devconfig)
+#define COMEDI_DEVINFO _IOR(CIO, 1, struct comedi_devinfo)
+#define COMEDI_SUBDINFO _IOR(CIO, 2, struct comedi_subdinfo)
+#define COMEDI_CHANINFO _IOR(CIO, 3, struct comedi_chaninfo)
+/* _IOWR(CIO, 4, ...) is reserved */
+#define COMEDI_LOCK _IO(CIO, 5)
+#define COMEDI_UNLOCK _IO(CIO, 6)
+#define COMEDI_CANCEL _IO(CIO, 7)
+#define COMEDI_RANGEINFO _IOR(CIO, 8, struct comedi_rangeinfo)
+#define COMEDI_CMD _IOR(CIO, 9, struct comedi_cmd)
+#define COMEDI_CMDTEST _IOR(CIO, 10, struct comedi_cmd)
+#define COMEDI_INSNLIST _IOR(CIO, 11, struct comedi_insnlist)
+#define COMEDI_INSN _IOR(CIO, 12, struct comedi_insn)
+#define COMEDI_BUFCONFIG _IOR(CIO, 13, struct comedi_bufconfig)
+#define COMEDI_BUFINFO _IOWR(CIO, 14, struct comedi_bufinfo)
+#define COMEDI_POLL _IO(CIO, 15)
+#define COMEDI_SETRSUBD _IO(CIO, 16)
+#define COMEDI_SETWSUBD _IO(CIO, 17)
+
+/* structures */
+
+/**
+ * struct comedi_insn - COMEDI instruction
+ * @insn: COMEDI instruction type (%INSN_xxx).
+ * @n: Length of @data[].
+ * @data: Pointer to data array operated on by the instruction.
+ * @subdev: Subdevice index.
+ * @chanspec: A packed "chanspec" value consisting of channel number,
+ * analog range index, analog reference type, and flags.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_INSN ioctl, and indirectly with the
+ * %COMEDI_INSNLIST ioctl.
+ */
+struct comedi_insn {
+ unsigned int insn;
+ unsigned int n;
+ unsigned int __user *data;
+ unsigned int subdev;
+ unsigned int chanspec;
+ unsigned int unused[3];
+};
+
+/**
+ * struct comedi_insnlist - list of COMEDI instructions
+ * @n_insns: Number of COMEDI instructions.
+ * @insns: Pointer to array COMEDI instructions.
+ *
+ * This is used with the %COMEDI_INSNLIST ioctl.
+ */
+struct comedi_insnlist {
+ unsigned int n_insns;
+ struct comedi_insn __user *insns;
+};
+
+/**
+ * struct comedi_cmd - COMEDI asynchronous acquisition command details
+ * @subdev: Subdevice index.
+ * @flags: Command flags (%CMDF_xxx).
+ * @start_src: "Start acquisition" trigger source (%TRIG_xxx).
+ * @start_arg: "Start acquisition" trigger argument.
+ * @scan_begin_src: "Scan begin" trigger source.
+ * @scan_begin_arg: "Scan begin" trigger argument.
+ * @convert_src: "Convert" trigger source.
+ * @convert_arg: "Convert" trigger argument.
+ * @scan_end_src: "Scan end" trigger source.
+ * @scan_end_arg: "Scan end" trigger argument.
+ * @stop_src: "Stop acquisition" trigger source.
+ * @stop_arg: "Stop acquisition" trigger argument.
+ * @chanlist: Pointer to array of "chanspec" values, containing a
+ * sequence of channel numbers packed with analog range
+ * index, etc.
+ * @chanlist_len: Number of channels in sequence.
+ * @data: Pointer to miscellaneous set-up data (not used).
+ * @data_len: Length of miscellaneous set-up data.
+ *
+ * This is used with the %COMEDI_CMD or %COMEDI_CMDTEST ioctl to set-up
+ * or validate an asynchronous acquisition command. The ioctl may modify
+ * the &struct comedi_cmd and copy it back to the caller.
+ *
+ * Optional command @flags values that can be ORed together...
+ *
+ * %CMDF_BOGUS - makes %COMEDI_CMD ioctl return error %EAGAIN instead of
+ * starting the command.
+ *
+ * %CMDF_PRIORITY - requests "hard real-time" processing (which is not
+ * supported in this version of COMEDI).
+ *
+ * %CMDF_WAKE_EOS - requests the command makes data available for reading
+ * after every "scan" period.
+ *
+ * %CMDF_WRITE - marks the command as being in the "write" (to device)
+ * direction. This does not need to be specified by the caller unless the
+ * subdevice supports commands in either direction.
+ *
+ * %CMDF_RAWDATA - prevents the command from "munging" the data between the
+ * COMEDI sample format and the raw hardware sample format.
+ *
+ * %CMDF_ROUND_NEAREST - requests timing periods to be rounded to nearest
+ * supported values.
+ *
+ * %CMDF_ROUND_DOWN - requests timing periods to be rounded down to supported
+ * values (frequencies rounded up).
+ *
+ * %CMDF_ROUND_UP - requests timing periods to be rounded up to supported
+ * values (frequencies rounded down).
+ *
+ * Trigger source values for @start_src, @scan_begin_src, @convert_src,
+ * @scan_end_src, and @stop_src...
+ *
+ * %TRIG_ANY - "all ones" value used to test which trigger sources are
+ * supported.
+ *
+ * %TRIG_INVALID - "all zeroes" value used to indicate that all requested
+ * trigger sources are invalid.
+ *
+ * %TRIG_NONE - never trigger (often used as a @stop_src value).
+ *
+ * %TRIG_NOW - trigger after '_arg' nanoseconds.
+ *
+ * %TRIG_FOLLOW - trigger follows another event.
+ *
+ * %TRIG_TIMER - trigger every '_arg' nanoseconds.
+ *
+ * %TRIG_COUNT - trigger when count '_arg' is reached.
+ *
+ * %TRIG_EXT - trigger on external signal specified by '_arg'.
+ *
+ * %TRIG_INT - trigger on internal, software trigger specified by '_arg'.
+ *
+ * %TRIG_OTHER - trigger on other, driver-defined signal specified by '_arg'.
+ */
+struct comedi_cmd {
+ unsigned int subdev;
+ unsigned int flags;
+
+ unsigned int start_src;
+ unsigned int start_arg;
+
+ unsigned int scan_begin_src;
+ unsigned int scan_begin_arg;
+
+ unsigned int convert_src;
+ unsigned int convert_arg;
+
+ unsigned int scan_end_src;
+ unsigned int scan_end_arg;
+
+ unsigned int stop_src;
+ unsigned int stop_arg;
+
+ unsigned int *chanlist;
+ unsigned int chanlist_len;
+
+ short __user *data;
+ unsigned int data_len;
+};
+
+/**
+ * struct comedi_chaninfo - used to retrieve per-channel information
+ * @subdev: Subdevice index.
+ * @maxdata_list: Optional pointer to per-channel maximum data values.
+ * @flaglist: Optional pointer to per-channel flags.
+ * @rangelist: Optional pointer to per-channel range types.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_CHANINFO ioctl to get per-channel information
+ * for the subdevice. Use of this requires knowledge of the number of channels
+ * and subdevice flags obtained using the %COMEDI_SUBDINFO ioctl.
+ *
+ * The @maxdata_list member must be %NULL unless the %SDF_MAXDATA subdevice
+ * flag is set. The @flaglist member must be %NULL unless the %SDF_FLAGS
+ * subdevice flag is set. The @rangelist member must be %NULL unless the
+ * %SDF_RANGETYPE subdevice flag is set. Otherwise, the arrays they point to
+ * must be at least as long as the number of channels.
+ */
+struct comedi_chaninfo {
+ unsigned int subdev;
+ unsigned int __user *maxdata_list;
+ unsigned int __user *flaglist;
+ unsigned int __user *rangelist;
+ unsigned int unused[4];
+};
+
+/**
+ * struct comedi_rangeinfo - used to retrieve the range table for a channel
+ * @range_type: Encodes subdevice index (bits 27:24), channel index
+ * (bits 23:16) and range table length (bits 15:0).
+ * @range_ptr: Pointer to array of @struct comedi_krange to be filled
+ * in with the range table for the channel or subdevice.
+ *
+ * This is used with the %COMEDI_RANGEINFO ioctl to retrieve the range table
+ * for a specific channel (if the subdevice has the %SDF_RANGETYPE flag set to
+ * indicate that the range table depends on the channel), or for the subdevice
+ * as a whole (if the %SDF_RANGETYPE flag is clear, indicating the range table
+ * is shared by all channels).
+ *
+ * The @range_type value is an input to the ioctl and comes from a previous
+ * use of the %COMEDI_SUBDINFO ioctl (if the %SDF_RANGETYPE flag is clear),
+ * or the %COMEDI_CHANINFO ioctl (if the %SDF_RANGETYPE flag is set).
+ */
+struct comedi_rangeinfo {
+ unsigned int range_type;
+ void __user *range_ptr;
+};
+
+/**
+ * struct comedi_krange - describes a range in a range table
+ * @min: Minimum value in millionths (1e-6) of a unit.
+ * @max: Maximum value in millionths (1e-6) of a unit.
+ * @flags: Indicates the units (in bits 7:0) OR'ed with optional flags.
+ *
+ * A range table is associated with a single channel, or with all channels in a
+ * subdevice, and a list of one or more ranges. A %struct comedi_krange
+ * describes the physical range of units for one of those ranges. Sample
+ * values in COMEDI are unsigned from %0 up to some 'maxdata' value. The
+ * mapping from sample values to physical units is assumed to be nomimally
+ * linear (for the purpose of describing the range), with sample value %0
+ * mapping to @min, and the 'maxdata' sample value mapping to @max.
+ *
+ * The currently defined units are %UNIT_volt (%0), %UNIT_mA (%1), and
+ * %UNIT_none (%2). The @min and @max values are the physical range multiplied
+ * by 1e6, so a @max value of %1000000 (with %UNIT_volt) represents a maximal
+ * value of 1 volt.
+ *
+ * The only defined flag value is %RF_EXTERNAL (%0x100), indicating that the
+ * range needs to be multiplied by an external reference.
+ */
+struct comedi_krange {
+ int min;
+ int max;
+ unsigned int flags;
+};
+
+/**
+ * struct comedi_subdinfo - used to retrieve information about a subdevice
+ * @type: Type of subdevice from &enum comedi_subdevice_type.
+ * @n_chan: Number of channels the subdevice supports.
+ * @subd_flags: A mixture of static and dynamic flags describing
+ * aspects of the subdevice and its current state.
+ * @timer_type: Timer type. Always set to %5 ("nanosecond timer").
+ * @len_chanlist: Maximum length of a channel list if the subdevice
+ * supports asynchronous acquisition commands.
+ * @maxdata: Maximum sample value for all channels if the
+ * %SDF_MAXDATA subdevice flag is clear.
+ * @flags: Channel flags for all channels if the %SDF_FLAGS
+ * subdevice flag is clear.
+ * @range_type: The range type for all channels if the %SDF_RANGETYPE
+ * subdevice flag is clear. Encodes the subdevice index
+ * (bits 27:24), a dummy channel index %0 (bits 23:16),
+ * and the range table length (bits 15:0).
+ * @settling_time_0: Not used.
+ * @insn_bits_support: Set to %COMEDI_SUPPORTED if the subdevice supports the
+ * %INSN_BITS instruction, or to %COMEDI_UNSUPPORTED if it
+ * does not.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_SUBDINFO ioctl which copies an array of
+ * &struct comedi_subdinfo back to user space, with one element per subdevice.
+ * Use of this requires knowledge of the number of subdevices obtained from
+ * the %COMEDI_DEVINFO ioctl.
+ *
+ * These are the @subd_flags values that may be ORed together...
+ *
+ * %SDF_BUSY - the subdevice is busy processing an asynchronous command or a
+ * synchronous instruction.
+ *
+ * %SDF_BUSY_OWNER - the subdevice is busy processing an asynchronous
+ * acquisition command started on the current file object (the file object
+ * issuing the %COMEDI_SUBDINFO ioctl).
+ *
+ * %SDF_LOCKED - the subdevice is locked by a %COMEDI_LOCK ioctl.
+ *
+ * %SDF_LOCK_OWNER - the subdevice is locked by a %COMEDI_LOCK ioctl from the
+ * current file object.
+ *
+ * %SDF_MAXDATA - maximum sample values are channel-specific.
+ *
+ * %SDF_FLAGS - channel flags are channel-specific.
+ *
+ * %SDF_RANGETYPE - range types are channel-specific.
+ *
+ * %SDF_PWM_COUNTER - PWM can switch off automatically.
+ *
+ * %SDF_PWM_HBRIDGE - or PWM is signed (H-bridge).
+ *
+ * %SDF_CMD - the subdevice supports asynchronous commands.
+ *
+ * %SDF_SOFT_CALIBRATED - the subdevice uses software calibration.
+ *
+ * %SDF_CMD_WRITE - the subdevice supports asynchronous commands in the output
+ * ("write") direction.
+ *
+ * %SDF_CMD_READ - the subdevice supports asynchronous commands in the input
+ * ("read") direction.
+ *
+ * %SDF_READABLE - the subdevice is readable (e.g. analog input).
+ *
+ * %SDF_WRITABLE (aliased as %SDF_WRITEABLE) - the subdevice is writable (e.g.
+ * analog output).
+ *
+ * %SDF_INTERNAL - the subdevice has no externally visible lines.
+ *
+ * %SDF_GROUND - the subdevice can use ground as an analog reference.
+ *
+ * %SDF_COMMON - the subdevice can use a common analog reference.
+ *
+ * %SDF_DIFF - the subdevice can use differential inputs (or outputs).
+ *
+ * %SDF_OTHER - the subdevice can use some other analog reference.
+ *
+ * %SDF_DITHER - the subdevice can do dithering.
+ *
+ * %SDF_DEGLITCH - the subdevice can do deglitching.
+ *
+ * %SDF_MMAP - this is never set.
+ *
+ * %SDF_RUNNING - an asynchronous command is still running.
+ *
+ * %SDF_LSAMPL - the subdevice uses "long" (32-bit) samples (for asynchronous
+ * command data).
+ *
+ * %SDF_PACKED - the subdevice packs several DIO samples into a single sample
+ * (for asynchronous command data).
+ *
+ * No "channel flags" (@flags) values are currently defined.
+ */
+struct comedi_subdinfo {
+ unsigned int type;
+ unsigned int n_chan;
+ unsigned int subd_flags;
+ unsigned int timer_type;
+ unsigned int len_chanlist;
+ unsigned int maxdata;
+ unsigned int flags;
+ unsigned int range_type;
+ unsigned int settling_time_0;
+ unsigned int insn_bits_support;
+ unsigned int unused[8];
+};
+
+/**
+ * struct comedi_devinfo - used to retrieve information about a COMEDI device
+ * @version_code: COMEDI version code.
+ * @n_subdevs: Number of subdevices the device has.
+ * @driver_name: Null-terminated COMEDI driver name.
+ * @board_name: Null-terminated COMEDI board name.
+ * @read_subdevice: Index of the current "read" subdevice (%-1 if none).
+ * @write_subdevice: Index of the current "write" subdevice (%-1 if none).
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_DEVINFO ioctl to get basic information about
+ * the device.
+ */
+struct comedi_devinfo {
+ unsigned int version_code;
+ unsigned int n_subdevs;
+ char driver_name[COMEDI_NAMELEN];
+ char board_name[COMEDI_NAMELEN];
+ int read_subdevice;
+ int write_subdevice;
+ int unused[30];
+};
+
+/**
+ * struct comedi_devconfig - used to configure a legacy COMEDI device
+ * @board_name: Null-terminated string specifying the type of board
+ * to configure.
+ * @options: An array of integer configuration options.
+ *
+ * This is used with the %COMEDI_DEVCONFIG ioctl to configure a "legacy" COMEDI
+ * device, such as an ISA card. Not all COMEDI drivers support this. Those
+ * that do either expect the specified board name to match one of a list of
+ * names registered with the COMEDI core, or expect the specified board name
+ * to match the COMEDI driver name itself. The configuration options are
+ * handled in a driver-specific manner.
+ */
+struct comedi_devconfig {
+ char board_name[COMEDI_NAMELEN];
+ int options[COMEDI_NDEVCONFOPTS];
+};
+
+/**
+ * struct comedi_bufconfig - used to set or get buffer size for a subdevice
+ * @subdevice: Subdevice index.
+ * @flags: Not used.
+ * @maximum_size: Maximum allowed buffer size.
+ * @size: Buffer size.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_BUFCONFIG ioctl to get or configure the
+ * maximum buffer size and current buffer size for a COMEDI subdevice that
+ * supports asynchronous commands. If the subdevice does not support
+ * asynchronous commands, @maximum_size and @size are ignored and set to 0.
+ *
+ * On ioctl input, non-zero values of @maximum_size and @size specify a
+ * new maximum size and new current size (in bytes), respectively. These
+ * will by rounded up to a multiple of %PAGE_SIZE. Specifying a new maximum
+ * size requires admin capabilities.
+ *
+ * On ioctl output, @maximum_size and @size and set to the current maximum
+ * buffer size and current buffer size, respectively.
+ */
+struct comedi_bufconfig {
+ unsigned int subdevice;
+ unsigned int flags;
+
+ unsigned int maximum_size;
+ unsigned int size;
+
+ unsigned int unused[4];
+};
+
+/**
+ * struct comedi_bufinfo - used to manipulate buffer position for a subdevice
+ * @subdevice: Subdevice index.
+ * @bytes_read: Specify amount to advance read position for an
+ * asynchronous command in the input ("read") direction.
+ * @buf_write_ptr: Current write position (index) within the buffer.
+ * @buf_read_ptr: Current read position (index) within the buffer.
+ * @buf_write_count: Total amount written, modulo 2^32.
+ * @buf_read_count: Total amount read, modulo 2^32.
+ * @bytes_written: Specify amount to advance write position for an
+ * asynchronous command in the output ("write") direction.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_BUFINFO ioctl to optionally advance the
+ * current read or write position in an asynchronous acquisition data buffer,
+ * and to get the current read and write positions in the buffer.
+ */
+struct comedi_bufinfo {
+ unsigned int subdevice;
+ unsigned int bytes_read;
+
+ unsigned int buf_write_ptr;
+ unsigned int buf_read_ptr;
+ unsigned int buf_write_count;
+ unsigned int buf_read_count;
+
+ unsigned int bytes_written;
+
+ unsigned int unused[4];
+};
+
+/* range stuff */
+
+#define __RANGE(a, b) ((((a) & 0xffff) << 16) | ((b) & 0xffff))
+
+#define RANGE_OFFSET(a) (((a) >> 16) & 0xffff)
+#define RANGE_LENGTH(b) ((b) & 0xffff)
+
+#define RF_UNIT(flags) ((flags) & 0xff)
+#define RF_EXTERNAL 0x100
+
+#define UNIT_volt 0
+#define UNIT_mA 1
+#define UNIT_none 2
+
+#define COMEDI_MIN_SPEED 0xffffffffu
+
+/**********************************************************/
+/* everything after this line is ALPHA */
+/**********************************************************/
+
+/*
+ * 8254 specific configuration.
+ *
+ * It supports two config commands:
+ *
+ * 0 ID: INSN_CONFIG_SET_COUNTER_MODE
+ * 1 8254 Mode
+ * I8254_MODE0, I8254_MODE1, ..., I8254_MODE5
+ * OR'ed with:
+ * I8254_BCD, I8254_BINARY
+ *
+ * 0 ID: INSN_CONFIG_8254_READ_STATUS
+ * 1 <-- Status byte returned here.
+ * B7 = Output
+ * B6 = NULL Count
+ * B5 - B0 Current mode.
+ */
+
+enum i8254_mode {
+ I8254_MODE0 = (0 << 1), /* Interrupt on terminal count */
+ I8254_MODE1 = (1 << 1), /* Hardware retriggerable one-shot */
+ I8254_MODE2 = (2 << 1), /* Rate generator */
+ I8254_MODE3 = (3 << 1), /* Square wave mode */
+ I8254_MODE4 = (4 << 1), /* Software triggered strobe */
+ /* Hardware triggered strobe (retriggerable) */
+ I8254_MODE5 = (5 << 1),
+ /* Use binary-coded decimal instead of binary (pretty useless) */
+ I8254_BCD = 1,
+ I8254_BINARY = 0
+};
+
+/* *** BEGIN GLOBALLY-NAMED NI TERMINALS/SIGNALS *** */
+
+/*
+ * Common National Instruments Terminal/Signal names.
+ * Some of these have no NI_ prefix as they are useful for non-NI hardware, such
+ * as those that utilize the PXI/RTSI trigger lines.
+ *
+ * NOTE ABOUT THE CHOICE OF NAMES HERE AND THE CAMELSCRIPT:
+ * The choice to use CamelScript and the exact names below is for
+ * maintainability, clarity, similarity to manufacturer's documentation,
+ * _and_ a mitigation for confusion that has plagued the use of these drivers
+ * for years!
+ *
+ * More detail:
+ * There have been significant confusions over the past many years for users
+ * when trying to understand how to connect to/from signals and terminals on
+ * NI hardware using comedi. The major reason for this is that the actual
+ * register values were exposed and required to be used by users. Several
+ * major reasons exist why this caused major confusion for users:
+ * 1) The register values are _NOT_ in user documentation, but rather in
+ * arcane locations, such as a few register programming manuals that are
+ * increasingly hard to find and the NI MHDDK (comments in example code).
+ * There is no one place to find the various valid values of the registers.
+ * 2) The register values are _NOT_ completely consistent. There is no way to
+ * gain any sense of intuition of which values, or even enums one should use
+ * for various registers. There was some attempt in prior use of comedi to
+ * name enums such that a user might know which enums should be used for
+ * varying purposes, but the end-user had to gain a knowledge of register
+ * values to correctly wield this approach.
+ * 3) The names for signals and registers found in the various register level
+ * programming manuals and vendor-provided documentation are _not_ even
+ * close to the same names that are in the end-user documentation.
+ *
+ * Similar, albeit less, confusion plagued NI's previous version of their own
+ * drivers. Earlier than 2003, NI greatly simplified the situation for users
+ * by releasing a new API that abstracted the names of signals/terminals to a
+ * common and intuitive set of names.
+ *
+ * The names below mirror the names chosen and well documented by NI. These
+ * names are exposed to the user via the comedilib user library. By keeping
+ * the names below, in spite of the use of CamelScript, maintenance will be
+ * greatly eased and confusion for users _and_ comedi developers will be
+ * greatly reduced.
+ */
+
+/*
+ * Base of abstracted NI names.
+ * The first 16 bits of *_arg are reserved for channel selection.
+ * Since we only actually need the first 4 or 5 bits for all register values on
+ * NI select registers anyways, we'll identify all values >= (1<<15) as being an
+ * abstracted NI signal/terminal name.
+ * These values are also used/returned by INSN_DEVICE_CONFIG_TEST_ROUTE,
+ * INSN_DEVICE_CONFIG_CONNECT_ROUTE, INSN_DEVICE_CONFIG_DISCONNECT_ROUTE,
+ * and INSN_DEVICE_CONFIG_GET_ROUTES.
+ */
+#define NI_NAMES_BASE 0x8000u
+
+#define _TERM_N(base, n, x) ((base) + ((x) & ((n) - 1)))
+
+/*
+ * not necessarily all allowed 64 PFIs are valid--certainly not for all devices
+ */
+#define NI_PFI(x) _TERM_N(NI_NAMES_BASE, 64, x)
+/* 8 trigger lines by standard, Some devices cannot talk to all eight. */
+#define TRIGGER_LINE(x) _TERM_N(NI_PFI(-1) + 1, 8, x)
+/* 4 RTSI shared MUXes to route signals to/from TRIGGER_LINES on NI hardware */
+#define NI_RTSI_BRD(x) _TERM_N(TRIGGER_LINE(-1) + 1, 4, x)
+
+/* *** Counter/timer names : 8 counters max *** */
+#define NI_MAX_COUNTERS 8
+#define NI_COUNTER_NAMES_BASE (NI_RTSI_BRD(-1) + 1)
+#define NI_CtrSource(x) _TERM_N(NI_COUNTER_NAMES_BASE, NI_MAX_COUNTERS, x)
+/* Gate, Aux, A,B,Z are all treated, at times as gates */
+#define NI_GATES_NAMES_BASE (NI_CtrSource(-1) + 1)
+#define NI_CtrGate(x) _TERM_N(NI_GATES_NAMES_BASE, NI_MAX_COUNTERS, x)
+#define NI_CtrAux(x) _TERM_N(NI_CtrGate(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrA(x) _TERM_N(NI_CtrAux(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrB(x) _TERM_N(NI_CtrA(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrZ(x) _TERM_N(NI_CtrB(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_GATES_NAMES_MAX NI_CtrZ(-1)
+#define NI_CtrArmStartTrigger(x) _TERM_N(NI_CtrZ(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrInternalOutput(x) \
+ _TERM_N(NI_CtrArmStartTrigger(-1) + 1, NI_MAX_COUNTERS, x)
+/** external pin(s) labeled conveniently as Ctr<i>Out. */
+#define NI_CtrOut(x) _TERM_N(NI_CtrInternalOutput(-1) + 1, NI_MAX_COUNTERS, x)
+/** For Buffered sampling of ctr -- x series capability. */
+#define NI_CtrSampleClock(x) _TERM_N(NI_CtrOut(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_COUNTER_NAMES_MAX NI_CtrSampleClock(-1)
+
+enum ni_common_signal_names {
+ /* PXI_Star: this is a non-NI-specific signal */
+ PXI_Star = NI_COUNTER_NAMES_MAX + 1,
+ PXI_Clk10,
+ PXIe_Clk100,
+ NI_AI_SampleClock,
+ NI_AI_SampleClockTimebase,
+ NI_AI_StartTrigger,
+ NI_AI_ReferenceTrigger,
+ NI_AI_ConvertClock,
+ NI_AI_ConvertClockTimebase,
+ NI_AI_PauseTrigger,
+ NI_AI_HoldCompleteEvent,
+ NI_AI_HoldComplete,
+ NI_AI_ExternalMUXClock,
+ NI_AI_STOP, /* pulse signal that occurs when a update is finished(?) */
+ NI_AO_SampleClock,
+ NI_AO_SampleClockTimebase,
+ NI_AO_StartTrigger,
+ NI_AO_PauseTrigger,
+ NI_DI_SampleClock,
+ NI_DI_SampleClockTimebase,
+ NI_DI_StartTrigger,
+ NI_DI_ReferenceTrigger,
+ NI_DI_PauseTrigger,
+ NI_DI_InputBufferFull,
+ NI_DI_ReadyForStartEvent,
+ NI_DI_ReadyForTransferEventBurst,
+ NI_DI_ReadyForTransferEventPipelined,
+ NI_DO_SampleClock,
+ NI_DO_SampleClockTimebase,
+ NI_DO_StartTrigger,
+ NI_DO_PauseTrigger,
+ NI_DO_OutputBufferFull,
+ NI_DO_DataActiveEvent,
+ NI_DO_ReadyForStartEvent,
+ NI_DO_ReadyForTransferEvent,
+ NI_MasterTimebase,
+ NI_20MHzTimebase,
+ NI_80MHzTimebase,
+ NI_100MHzTimebase,
+ NI_200MHzTimebase,
+ NI_100kHzTimebase,
+ NI_10MHzRefClock,
+ NI_FrequencyOutput,
+ NI_ChangeDetectionEvent,
+ NI_AnalogComparisonEvent,
+ NI_WatchdogExpiredEvent,
+ NI_WatchdogExpirationTrigger,
+ NI_SCXI_Trig1,
+ NI_LogicLow,
+ NI_LogicHigh,
+ NI_ExternalStrobe,
+ NI_PFI_DO,
+ NI_CaseGround,
+ /* special internal signal used as variable source for RTSI bus: */
+ NI_RGOUT0,
+
+ /* just a name to make the next more convenient, regardless of above */
+ _NI_NAMES_MAX_PLUS_1,
+ NI_NUM_NAMES = _NI_NAMES_MAX_PLUS_1 - NI_NAMES_BASE,
+};
+
+/* *** END GLOBALLY-NAMED NI TERMINALS/SIGNALS *** */
+
+#define NI_USUAL_PFI_SELECT(x) (((x) < 10) ? (0x1 + (x)) : (0xb + (x)))
+#define NI_USUAL_RTSI_SELECT(x) (((x) < 7) ? (0xb + (x)) : 0x1b)
+
+/*
+ * mode bits for NI general-purpose counters, set with
+ * INSN_CONFIG_SET_COUNTER_MODE
+ */
+#define NI_GPCT_COUNTING_MODE_SHIFT 16
+#define NI_GPCT_INDEX_PHASE_BITSHIFT 20
+#define NI_GPCT_COUNTING_DIRECTION_SHIFT 24
+enum ni_gpct_mode_bits {
+ NI_GPCT_GATE_ON_BOTH_EDGES_BIT = 0x4,
+ NI_GPCT_EDGE_GATE_MODE_MASK = 0x18,
+ NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS = 0x0,
+ NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS = 0x8,
+ NI_GPCT_EDGE_GATE_STARTS_BITS = 0x10,
+ NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS = 0x18,
+ NI_GPCT_STOP_MODE_MASK = 0x60,
+ NI_GPCT_STOP_ON_GATE_BITS = 0x00,
+ NI_GPCT_STOP_ON_GATE_OR_TC_BITS = 0x20,
+ NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS = 0x40,
+ NI_GPCT_LOAD_B_SELECT_BIT = 0x80,
+ NI_GPCT_OUTPUT_MODE_MASK = 0x300,
+ NI_GPCT_OUTPUT_TC_PULSE_BITS = 0x100,
+ NI_GPCT_OUTPUT_TC_TOGGLE_BITS = 0x200,
+ NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS = 0x300,
+ NI_GPCT_HARDWARE_DISARM_MASK = 0xc00,
+ NI_GPCT_NO_HARDWARE_DISARM_BITS = 0x000,
+ NI_GPCT_DISARM_AT_TC_BITS = 0x400,
+ NI_GPCT_DISARM_AT_GATE_BITS = 0x800,
+ NI_GPCT_DISARM_AT_TC_OR_GATE_BITS = 0xc00,
+ NI_GPCT_LOADING_ON_TC_BIT = 0x1000,
+ NI_GPCT_LOADING_ON_GATE_BIT = 0x4000,
+ NI_GPCT_COUNTING_MODE_MASK = 0x7 << NI_GPCT_COUNTING_MODE_SHIFT,
+ NI_GPCT_COUNTING_MODE_NORMAL_BITS =
+ 0x0 << NI_GPCT_COUNTING_MODE_SHIFT,
+ NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS =
+ 0x1 << NI_GPCT_COUNTING_MODE_SHIFT,
+ NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS =
+ 0x2 << NI_GPCT_COUNTING_MODE_SHIFT,
+ NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS =
+ 0x3 << NI_GPCT_COUNTING_MODE_SHIFT,
+ NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS =
+ 0x4 << NI_GPCT_COUNTING_MODE_SHIFT,
+ NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS =
+ 0x6 << NI_GPCT_COUNTING_MODE_SHIFT,
+ NI_GPCT_INDEX_PHASE_MASK = 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+ NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS =
+ 0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+ NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS =
+ 0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+ NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS =
+ 0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+ NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS =
+ 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+ NI_GPCT_INDEX_ENABLE_BIT = 0x400000,
+ NI_GPCT_COUNTING_DIRECTION_MASK =
+ 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+ NI_GPCT_COUNTING_DIRECTION_DOWN_BITS =
+ 0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+ NI_GPCT_COUNTING_DIRECTION_UP_BITS =
+ 0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+ NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS =
+ 0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+ NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS =
+ 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+ NI_GPCT_RELOAD_SOURCE_MASK = 0xc000000,
+ NI_GPCT_RELOAD_SOURCE_FIXED_BITS = 0x0,
+ NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS = 0x4000000,
+ NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS = 0x8000000,
+ NI_GPCT_OR_GATE_BIT = 0x10000000,
+ NI_GPCT_INVERT_OUTPUT_BIT = 0x20000000
+};
+
+/*
+ * Bits for setting a clock source with
+ * INSN_CONFIG_SET_CLOCK_SRC when using NI general-purpose counters.
+ */
+enum ni_gpct_clock_source_bits {
+ NI_GPCT_CLOCK_SRC_SELECT_MASK = 0x3f,
+ NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS = 0x0,
+ NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS = 0x1,
+ NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS = 0x2,
+ NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS = 0x3,
+ NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS = 0x4,
+ NI_GPCT_NEXT_TC_CLOCK_SRC_BITS = 0x5,
+ /* NI 660x-specific */
+ NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS = 0x6,
+ NI_GPCT_PXI10_CLOCK_SRC_BITS = 0x7,
+ NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS = 0x8,
+ NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS = 0x9,
+ NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK = 0x30000000,
+ NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS = 0x0,
+ /* divide source by 2 */
+ NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS = 0x10000000,
+ /* divide source by 8 */
+ NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS = 0x20000000,
+ NI_GPCT_INVERT_CLOCK_SRC_BIT = 0x80000000
+};
+
+/* NI 660x-specific */
+#define NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(x) (0x10 + (x))
+
+#define NI_GPCT_RTSI_CLOCK_SRC_BITS(x) (0x18 + (x))
+
+/* no pfi on NI 660x */
+#define NI_GPCT_PFI_CLOCK_SRC_BITS(x) (0x20 + (x))
+
+/*
+ * Possibilities for setting a gate source with
+ * INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters.
+ * May be bitwise-or'd with CR_EDGE or CR_INVERT.
+ */
+enum ni_gpct_gate_select {
+ /* m-series gates */
+ NI_GPCT_TIMESTAMP_MUX_GATE_SELECT = 0x0,
+ NI_GPCT_AI_START2_GATE_SELECT = 0x12,
+ NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT = 0x13,
+ NI_GPCT_NEXT_OUT_GATE_SELECT = 0x14,
+ NI_GPCT_AI_START1_GATE_SELECT = 0x1c,
+ NI_GPCT_NEXT_SOURCE_GATE_SELECT = 0x1d,
+ NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT = 0x1e,
+ NI_GPCT_LOGIC_LOW_GATE_SELECT = 0x1f,
+ /* more gates for 660x */
+ NI_GPCT_SOURCE_PIN_i_GATE_SELECT = 0x100,
+ NI_GPCT_GATE_PIN_i_GATE_SELECT = 0x101,
+ /* more gates for 660x "second gate" */
+ NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT = 0x201,
+ NI_GPCT_SELECTED_GATE_GATE_SELECT = 0x21e,
+ /*
+ * m-series "second gate" sources are unknown,
+ * we should add them here with an offset of 0x300 when
+ * known.
+ */
+ NI_GPCT_DISABLED_GATE_SELECT = 0x8000,
+};
+
+#define NI_GPCT_GATE_PIN_GATE_SELECT(x) (0x102 + (x))
+#define NI_GPCT_RTSI_GATE_SELECT(x) NI_USUAL_RTSI_SELECT(x)
+#define NI_GPCT_PFI_GATE_SELECT(x) NI_USUAL_PFI_SELECT(x)
+#define NI_GPCT_UP_DOWN_PIN_GATE_SELECT(x) (0x202 + (x))
+
+/*
+ * Possibilities for setting a source with
+ * INSN_CONFIG_SET_OTHER_SRC when using NI general-purpose counters.
+ */
+enum ni_gpct_other_index {
+ NI_GPCT_SOURCE_ENCODER_A,
+ NI_GPCT_SOURCE_ENCODER_B,
+ NI_GPCT_SOURCE_ENCODER_Z
+};
+
+enum ni_gpct_other_select {
+ /* m-series gates */
+ /* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */
+ NI_GPCT_DISABLED_OTHER_SELECT = 0x8000,
+};
+
+#define NI_GPCT_PFI_OTHER_SELECT(x) NI_USUAL_PFI_SELECT(x)
+
+/*
+ * start sources for ni general-purpose counters for use with
+ * INSN_CONFIG_ARM
+ */
+enum ni_gpct_arm_source {
+ NI_GPCT_ARM_IMMEDIATE = 0x0,
+ /*
+ * Start both the counter and the adjacent paired counter simultaneously
+ */
+ NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1,
+ /*
+ * If the NI_GPCT_HW_ARM bit is set, we will pass the least significant
+ * bits (3 bits for 660x or 5 bits for m-series) through to the
+ * hardware. To select a hardware trigger, pass the appropriate select
+ * bit, e.g.,
+ * NI_GPCT_HW_ARM | NI_GPCT_AI_START1_GATE_SELECT or
+ * NI_GPCT_HW_ARM | NI_GPCT_PFI_GATE_SELECT(pfi_number)
+ */
+ NI_GPCT_HW_ARM = 0x1000,
+ NI_GPCT_ARM_UNKNOWN = NI_GPCT_HW_ARM, /* for backward compatibility */
+};
+
+/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */
+enum ni_gpct_filter_select {
+ NI_GPCT_FILTER_OFF = 0x0,
+ NI_GPCT_FILTER_TIMEBASE_3_SYNC = 0x1,
+ NI_GPCT_FILTER_100x_TIMEBASE_1 = 0x2,
+ NI_GPCT_FILTER_20x_TIMEBASE_1 = 0x3,
+ NI_GPCT_FILTER_10x_TIMEBASE_1 = 0x4,
+ NI_GPCT_FILTER_2x_TIMEBASE_1 = 0x5,
+ NI_GPCT_FILTER_2x_TIMEBASE_3 = 0x6
+};
+
+/*
+ * PFI digital filtering options for ni m-series for use with
+ * INSN_CONFIG_FILTER.
+ */
+enum ni_pfi_filter_select {
+ NI_PFI_FILTER_OFF = 0x0,
+ NI_PFI_FILTER_125ns = 0x1,
+ NI_PFI_FILTER_6425ns = 0x2,
+ NI_PFI_FILTER_2550us = 0x3
+};
+
+/* master clock sources for ni mio boards and INSN_CONFIG_SET_CLOCK_SRC */
+enum ni_mio_clock_source {
+ NI_MIO_INTERNAL_CLOCK = 0,
+ /*
+ * Doesn't work for m-series, use NI_MIO_PLL_RTSI_CLOCK()
+ * the NI_MIO_PLL_* sources are m-series only
+ */
+ NI_MIO_RTSI_CLOCK = 1,
+ NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK = 2,
+ NI_MIO_PLL_PXI10_CLOCK = 3,
+ NI_MIO_PLL_RTSI0_CLOCK = 4
+};
+
+#define NI_MIO_PLL_RTSI_CLOCK(x) (NI_MIO_PLL_RTSI0_CLOCK + (x))
+
+/*
+ * Signals which can be routed to an NI RTSI pin with INSN_CONFIG_SET_ROUTING.
+ * The numbers assigned are not arbitrary, they correspond to the bits required
+ * to program the board.
+ */
+enum ni_rtsi_routing {
+ NI_RTSI_OUTPUT_ADR_START1 = 0,
+ NI_RTSI_OUTPUT_ADR_START2 = 1,
+ NI_RTSI_OUTPUT_SCLKG = 2,
+ NI_RTSI_OUTPUT_DACUPDN = 3,
+ NI_RTSI_OUTPUT_DA_START1 = 4,
+ NI_RTSI_OUTPUT_G_SRC0 = 5,
+ NI_RTSI_OUTPUT_G_GATE0 = 6,
+ NI_RTSI_OUTPUT_RGOUT0 = 7,
+ NI_RTSI_OUTPUT_RTSI_BRD_0 = 8,
+ /* Pre-m-series always have RTSI clock on line 7 */
+ NI_RTSI_OUTPUT_RTSI_OSC = 12
+};
+
+#define NI_RTSI_OUTPUT_RTSI_BRD(x) (NI_RTSI_OUTPUT_RTSI_BRD_0 + (x))
+
+/*
+ * Signals which can be routed to an NI PFI pin on an m-series board with
+ * INSN_CONFIG_SET_ROUTING. These numbers are also returned by
+ * INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their routing
+ * cannot be changed. The numbers assigned are not arbitrary, they correspond
+ * to the bits required to program the board.
+ */
+enum ni_pfi_routing {
+ NI_PFI_OUTPUT_PFI_DEFAULT = 0,
+ NI_PFI_OUTPUT_AI_START1 = 1,
+ NI_PFI_OUTPUT_AI_START2 = 2,
+ NI_PFI_OUTPUT_AI_CONVERT = 3,
+ NI_PFI_OUTPUT_G_SRC1 = 4,
+ NI_PFI_OUTPUT_G_GATE1 = 5,
+ NI_PFI_OUTPUT_AO_UPDATE_N = 6,
+ NI_PFI_OUTPUT_AO_START1 = 7,
+ NI_PFI_OUTPUT_AI_START_PULSE = 8,
+ NI_PFI_OUTPUT_G_SRC0 = 9,
+ NI_PFI_OUTPUT_G_GATE0 = 10,
+ NI_PFI_OUTPUT_EXT_STROBE = 11,
+ NI_PFI_OUTPUT_AI_EXT_MUX_CLK = 12,
+ NI_PFI_OUTPUT_GOUT0 = 13,
+ NI_PFI_OUTPUT_GOUT1 = 14,
+ NI_PFI_OUTPUT_FREQ_OUT = 15,
+ NI_PFI_OUTPUT_PFI_DO = 16,
+ NI_PFI_OUTPUT_I_ATRIG = 17,
+ NI_PFI_OUTPUT_RTSI0 = 18,
+ NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN = 26,
+ NI_PFI_OUTPUT_SCXI_TRIG1 = 27,
+ NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI = 28,
+ NI_PFI_OUTPUT_CDI_SAMPLE = 29,
+ NI_PFI_OUTPUT_CDO_UPDATE = 30
+};
+
+#define NI_PFI_OUTPUT_RTSI(x) (NI_PFI_OUTPUT_RTSI0 + (x))
+
+/*
+ * Signals which can be routed to output on a NI PFI pin on a 660x board
+ * with INSN_CONFIG_SET_ROUTING. The numbers assigned are
+ * not arbitrary, they correspond to the bits required
+ * to program the board. Lines 0 to 7 can only be set to
+ * NI_660X_PFI_OUTPUT_DIO. Lines 32 to 39 can only be set to
+ * NI_660X_PFI_OUTPUT_COUNTER.
+ */
+enum ni_660x_pfi_routing {
+ NI_660X_PFI_OUTPUT_COUNTER = 1, /* counter */
+ NI_660X_PFI_OUTPUT_DIO = 2, /* static digital output */
+};
+
+/*
+ * NI External Trigger lines. These values are not arbitrary, but are related
+ * to the bits required to program the board (offset by 1 for historical
+ * reasons).
+ */
+#define NI_EXT_PFI(x) (NI_USUAL_PFI_SELECT(x) - 1)
+#define NI_EXT_RTSI(x) (NI_USUAL_RTSI_SELECT(x) - 1)
+
+/*
+ * Clock sources for CDIO subdevice on NI m-series boards. Used as the
+ * scan_begin_arg for a comedi_command. These sources may also be bitwise-or'd
+ * with CR_INVERT to change polarity.
+ */
+enum ni_m_series_cdio_scan_begin_src {
+ NI_CDIO_SCAN_BEGIN_SRC_GROUND = 0,
+ NI_CDIO_SCAN_BEGIN_SRC_AI_START = 18,
+ NI_CDIO_SCAN_BEGIN_SRC_AI_CONVERT = 19,
+ NI_CDIO_SCAN_BEGIN_SRC_PXI_STAR_TRIGGER = 20,
+ NI_CDIO_SCAN_BEGIN_SRC_G0_OUT = 28,
+ NI_CDIO_SCAN_BEGIN_SRC_G1_OUT = 29,
+ NI_CDIO_SCAN_BEGIN_SRC_ANALOG_TRIGGER = 30,
+ NI_CDIO_SCAN_BEGIN_SRC_AO_UPDATE = 31,
+ NI_CDIO_SCAN_BEGIN_SRC_FREQ_OUT = 32,
+ NI_CDIO_SCAN_BEGIN_SRC_DIO_CHANGE_DETECT_IRQ = 33
+};
+
+#define NI_CDIO_SCAN_BEGIN_SRC_PFI(x) NI_USUAL_PFI_SELECT(x)
+#define NI_CDIO_SCAN_BEGIN_SRC_RTSI(x) NI_USUAL_RTSI_SELECT(x)
+
+/*
+ * scan_begin_src for scan_begin_arg==TRIG_EXT with analog output command on NI
+ * boards. These scan begin sources can also be bitwise-or'd with CR_INVERT to
+ * change polarity.
+ */
+#define NI_AO_SCAN_BEGIN_SRC_PFI(x) NI_USUAL_PFI_SELECT(x)
+#define NI_AO_SCAN_BEGIN_SRC_RTSI(x) NI_USUAL_RTSI_SELECT(x)
+
+/*
+ * Bits for setting a clock source with
+ * INSN_CONFIG_SET_CLOCK_SRC when using NI frequency output subdevice.
+ */
+enum ni_freq_out_clock_source_bits {
+ NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC, /* 10 MHz */
+ NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC /* 100 KHz */
+};
+
+/*
+ * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
+ * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver).
+ */
+enum amplc_dio_clock_source {
+ /*
+ * Per channel external clock
+ * input/output pin (pin is only an
+ * input when clock source set to this value,
+ * otherwise it is an output)
+ */
+ AMPLC_DIO_CLK_CLKN,
+ AMPLC_DIO_CLK_10MHZ, /* 10 MHz internal clock */
+ AMPLC_DIO_CLK_1MHZ, /* 1 MHz internal clock */
+ AMPLC_DIO_CLK_100KHZ, /* 100 kHz internal clock */
+ AMPLC_DIO_CLK_10KHZ, /* 10 kHz internal clock */
+ AMPLC_DIO_CLK_1KHZ, /* 1 kHz internal clock */
+ /*
+ * Output of preceding counter channel
+ * (for channel 0, preceding counter
+ * channel is channel 2 on preceding
+ * counter subdevice, for first counter
+ * subdevice, preceding counter
+ * subdevice is the last counter
+ * subdevice)
+ */
+ AMPLC_DIO_CLK_OUTNM1,
+ AMPLC_DIO_CLK_EXT, /* per chip external input pin */
+ /* the following are "enhanced" clock sources for PCIe models */
+ AMPLC_DIO_CLK_VCC, /* clock input HIGH */
+ AMPLC_DIO_CLK_GND, /* clock input LOW */
+ AMPLC_DIO_CLK_PAT_PRESENT, /* "pattern present" signal */
+ AMPLC_DIO_CLK_20MHZ /* 20 MHz internal clock */
+};
+
+/*
+ * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
+ * timer subdevice on some Amplicon DIO PCIe boards (amplc_dio200 driver).
+ */
+enum amplc_dio_ts_clock_src {
+ AMPLC_DIO_TS_CLK_1GHZ, /* 1 ns period with 20 ns granularity */
+ AMPLC_DIO_TS_CLK_1MHZ, /* 1 us period */
+ AMPLC_DIO_TS_CLK_1KHZ /* 1 ms period */
+};
+
+/*
+ * Values for setting a gate source with INSN_CONFIG_SET_GATE_SRC for
+ * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver).
+ */
+enum amplc_dio_gate_source {
+ AMPLC_DIO_GAT_VCC, /* internal high logic level */
+ AMPLC_DIO_GAT_GND, /* internal low logic level */
+ AMPLC_DIO_GAT_GATN, /* per channel external gate input */
+ /*
+ * negated output of counter channel minus 2
+ * (for channels 0 or 1, channel minus 2 is channel 1 or 2 on
+ * the preceding counter subdevice, for the first counter subdevice
+ * the preceding counter subdevice is the last counter subdevice)
+ */
+ AMPLC_DIO_GAT_NOUTNM2,
+ AMPLC_DIO_GAT_RESERVED4,
+ AMPLC_DIO_GAT_RESERVED5,
+ AMPLC_DIO_GAT_RESERVED6,
+ AMPLC_DIO_GAT_RESERVED7,
+ /* the following are "enhanced" gate sources for PCIe models */
+ AMPLC_DIO_GAT_NGATN = 6, /* negated per channel gate input */
+ /* non-negated output of counter channel minus 2 */
+ AMPLC_DIO_GAT_OUTNM2,
+ AMPLC_DIO_GAT_PAT_PRESENT, /* "pattern present" signal */
+ AMPLC_DIO_GAT_PAT_OCCURRED, /* "pattern occurred" latched */
+ AMPLC_DIO_GAT_PAT_GONE, /* "pattern gone away" latched */
+ AMPLC_DIO_GAT_NPAT_PRESENT, /* negated "pattern present" */
+ AMPLC_DIO_GAT_NPAT_OCCURRED, /* negated "pattern occurred" */
+ AMPLC_DIO_GAT_NPAT_GONE /* negated "pattern gone away" */
+};
+
+/*
+ * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
+ * the counter subdevice on the Kolter Electronic PCI-Counter board
+ * (ke_counter driver).
+ */
+enum ke_counter_clock_source {
+ KE_CLK_20MHZ, /* internal 20MHz (default) */
+ KE_CLK_4MHZ, /* internal 4MHz (option) */
+ KE_CLK_EXT /* external clock on pin 21 of D-Sub */
+};
+
+#endif /* _COMEDI_H */
diff --git a/include/uapi/linux/connector.h b/include/uapi/linux/connector.h
index 3738936149a2..5ae131c3f145 100644
--- a/include/uapi/linux/connector.h
+++ b/include/uapi/linux/connector.h
@@ -75,7 +75,7 @@ struct cn_msg {
__u16 len; /* Length of the following data */
__u16 flags;
- __u8 data[0];
+ __u8 data[];
};
#endif /* _UAPI__CONNECTOR_H */
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index 5ed721ad5b19..a429381e7ca5 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,4 +28,9 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
#endif /* _UAPI_LINUX_CONST_H */
diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h
index 8847dbf24151..7ff3709c01b8 100644
--- a/include/uapi/linux/coresight-stm.h
+++ b/include/uapi/linux/coresight-stm.h
@@ -5,6 +5,7 @@
#include <linux/const.h>
#define STM_FLAG_TIMESTAMPED _BITUL(3)
+#define STM_FLAG_MARKED _BITUL(4)
#define STM_FLAG_GUARANTEED _BITUL(7)
/*
diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h
new file mode 100644
index 000000000000..008a691c254b
--- /dev/null
+++ b/include/uapi/linux/counter.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace ABI for Counter character devices
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#ifndef _UAPI_COUNTER_H_
+#define _UAPI_COUNTER_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Component type definitions */
+enum counter_component_type {
+ COUNTER_COMPONENT_NONE,
+ COUNTER_COMPONENT_SIGNAL,
+ COUNTER_COMPONENT_COUNT,
+ COUNTER_COMPONENT_FUNCTION,
+ COUNTER_COMPONENT_SYNAPSE_ACTION,
+ COUNTER_COMPONENT_EXTENSION,
+};
+
+/* Component scope definitions */
+enum counter_scope {
+ COUNTER_SCOPE_DEVICE,
+ COUNTER_SCOPE_SIGNAL,
+ COUNTER_SCOPE_COUNT,
+};
+
+/**
+ * struct counter_component - Counter component identification
+ * @type: component type (one of enum counter_component_type)
+ * @scope: component scope (one of enum counter_scope)
+ * @parent: parent ID (matching the ID suffix of the respective parent sysfs
+ * path as described by the ABI documentation file
+ * Documentation/ABI/testing/sysfs-bus-counter)
+ * @id: component ID (matching the ID provided by the respective *_component_id
+ * sysfs attribute of the desired component)
+ *
+ * For example, if the Count 2 ceiling extension of Counter device 4 is desired,
+ * set type equal to COUNTER_COMPONENT_EXTENSION, scope equal to
+ * COUNTER_SCOPE_COUNT, parent equal to 2, and id equal to the value provided by
+ * the respective /sys/bus/counter/devices/counter4/count2/ceiling_component_id
+ * sysfs attribute.
+ */
+struct counter_component {
+ __u8 type;
+ __u8 scope;
+ __u8 parent;
+ __u8 id;
+};
+
+/* Event type definitions */
+enum counter_event_type {
+ /* Count value increased past ceiling */
+ COUNTER_EVENT_OVERFLOW,
+ /* Count value decreased past floor */
+ COUNTER_EVENT_UNDERFLOW,
+ /* Count value increased past ceiling, or decreased past floor */
+ COUNTER_EVENT_OVERFLOW_UNDERFLOW,
+ /* Count value reached threshold */
+ COUNTER_EVENT_THRESHOLD,
+ /* Index signal detected */
+ COUNTER_EVENT_INDEX,
+ /* State of counter is changed */
+ COUNTER_EVENT_CHANGE_OF_STATE,
+ /* Count value captured */
+ COUNTER_EVENT_CAPTURE,
+};
+
+/**
+ * struct counter_watch - Counter component watch configuration
+ * @component: component to watch when event triggers
+ * @event: event that triggers (one of enum counter_event_type)
+ * @channel: event channel (typically 0 unless the device supports concurrent
+ * events of the same type)
+ */
+struct counter_watch {
+ struct counter_component component;
+ __u8 event;
+ __u8 channel;
+};
+
+/*
+ * Queues a Counter watch for the specified event.
+ *
+ * The queued watches will not be applied until COUNTER_ENABLE_EVENTS_IOCTL is
+ * called.
+ */
+#define COUNTER_ADD_WATCH_IOCTL _IOW(0x3E, 0x00, struct counter_watch)
+/*
+ * Enables monitoring the events specified by the Counter watches that were
+ * queued by COUNTER_ADD_WATCH_IOCTL.
+ *
+ * If events are already enabled, the new set of watches replaces the old one.
+ * Calling this ioctl also has the effect of clearing the queue of watches added
+ * by COUNTER_ADD_WATCH_IOCTL.
+ */
+#define COUNTER_ENABLE_EVENTS_IOCTL _IO(0x3E, 0x01)
+/*
+ * Stops monitoring the previously enabled events.
+ */
+#define COUNTER_DISABLE_EVENTS_IOCTL _IO(0x3E, 0x02)
+
+/**
+ * struct counter_event - Counter event data
+ * @timestamp: best estimate of time of event occurrence, in nanoseconds
+ * @value: component value
+ * @watch: component watch configuration
+ * @status: return status (system error number)
+ */
+struct counter_event {
+ __aligned_u64 timestamp;
+ __aligned_u64 value;
+ struct counter_watch watch;
+ __u8 status;
+};
+
+/* Count direction values */
+enum counter_count_direction {
+ COUNTER_COUNT_DIRECTION_FORWARD,
+ COUNTER_COUNT_DIRECTION_BACKWARD,
+};
+
+/* Count mode values */
+enum counter_count_mode {
+ COUNTER_COUNT_MODE_NORMAL,
+ COUNTER_COUNT_MODE_RANGE_LIMIT,
+ COUNTER_COUNT_MODE_NON_RECYCLE,
+ COUNTER_COUNT_MODE_MODULO_N,
+ COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT,
+ COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT,
+ COUNTER_COUNT_MODE_RATE_GENERATOR,
+ COUNTER_COUNT_MODE_SQUARE_WAVE_MODE,
+ COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE,
+ COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE,
+};
+
+/* Count function values */
+enum counter_function {
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_DECREASE,
+ COUNTER_FUNCTION_PULSE_DIRECTION,
+ COUNTER_FUNCTION_QUADRATURE_X1_A,
+ COUNTER_FUNCTION_QUADRATURE_X1_B,
+ COUNTER_FUNCTION_QUADRATURE_X2_A,
+ COUNTER_FUNCTION_QUADRATURE_X2_B,
+ COUNTER_FUNCTION_QUADRATURE_X4,
+};
+
+/* Signal values */
+enum counter_signal_level {
+ COUNTER_SIGNAL_LEVEL_LOW,
+ COUNTER_SIGNAL_LEVEL_HIGH,
+};
+
+/* Action mode values */
+enum counter_synapse_action {
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+};
+
+/* Signal polarity values */
+enum counter_signal_polarity {
+ COUNTER_SIGNAL_POLARITY_POSITIVE,
+ COUNTER_SIGNAL_POLARITY_NEGATIVE,
+};
+
+#endif /* _UAPI_COUNTER_H_ */
diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h
new file mode 100644
index 000000000000..42066f4eb890
--- /dev/null
+++ b/include/uapi/linux/cxl_mem.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * CXL IOCTLs for Memory Devices
+ */
+
+#ifndef _UAPI_CXL_MEM_H_
+#define _UAPI_CXL_MEM_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: UAPI
+ *
+ * Not all of the commands that the driver supports are available for use by
+ * userspace at all times. Userspace can check the result of the QUERY command
+ * to determine the live set of commands. Alternatively, it can issue the
+ * command and check for failure.
+ */
+
+#define CXL_MEM_QUERY_COMMANDS _IOR(0xCE, 1, struct cxl_mem_query_commands)
+#define CXL_MEM_SEND_COMMAND _IOWR(0xCE, 2, struct cxl_send_command)
+
+/*
+ * NOTE: New defines must be added to the end of the list to preserve
+ * compatibility because this enum is exported to user space.
+ */
+#define CXL_CMDS \
+ ___C(INVALID, "Invalid Command"), \
+ ___C(IDENTIFY, "Identify Command"), \
+ ___C(RAW, "Raw device command"), \
+ ___C(GET_SUPPORTED_LOGS, "Get Supported Logs"), \
+ ___C(GET_FW_INFO, "Get FW Info"), \
+ ___C(GET_PARTITION_INFO, "Get Partition Information"), \
+ ___C(GET_LSA, "Get Label Storage Area"), \
+ ___C(GET_HEALTH_INFO, "Get Health Info"), \
+ ___C(GET_LOG, "Get Log"), \
+ ___C(SET_PARTITION_INFO, "Set Partition Information"), \
+ ___C(SET_LSA, "Set Label Storage Area"), \
+ ___C(GET_ALERT_CONFIG, "Get Alert Configuration"), \
+ ___C(SET_ALERT_CONFIG, "Set Alert Configuration"), \
+ ___C(GET_SHUTDOWN_STATE, "Get Shutdown State"), \
+ ___C(SET_SHUTDOWN_STATE, "Set Shutdown State"), \
+ ___DEPRECATED(GET_POISON, "Get Poison List"), \
+ ___DEPRECATED(INJECT_POISON, "Inject Poison"), \
+ ___DEPRECATED(CLEAR_POISON, "Clear Poison"), \
+ ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \
+ ___DEPRECATED(SCAN_MEDIA, "Scan Media"), \
+ ___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \
+ ___C(GET_TIMESTAMP, "Get Timestamp"), \
+ ___C(MAX, "invalid / last command")
+
+#define ___C(a, b) CXL_MEM_COMMAND_ID_##a
+#define ___DEPRECATED(a, b) CXL_MEM_DEPRECATED_ID_##a
+enum { CXL_CMDS };
+
+#undef ___C
+#undef ___DEPRECATED
+#define ___C(a, b) { b }
+#define ___DEPRECATED(a, b) { "Deprecated " b }
+static const struct {
+ const char *name;
+} cxl_command_names[] __attribute__((__unused__)) = { CXL_CMDS };
+
+/*
+ * Here's how this actually breaks out:
+ * cxl_command_names[] = {
+ * [CXL_MEM_COMMAND_ID_INVALID] = { "Invalid Command" },
+ * [CXL_MEM_COMMAND_ID_IDENTIFY] = { "Identify Command" },
+ * ...
+ * [CXL_MEM_COMMAND_ID_MAX] = { "invalid / last command" },
+ * };
+ */
+
+#undef ___C
+#undef ___DEPRECATED
+#define ___C(a, b) (0)
+#define ___DEPRECATED(a, b) (1)
+
+static const __u8 cxl_deprecated_commands[]
+ __attribute__((__unused__)) = { CXL_CMDS };
+
+/*
+ * Here's how this actually breaks out:
+ * cxl_deprecated_commands[] = {
+ * [CXL_MEM_COMMAND_ID_INVALID] = 0,
+ * [CXL_MEM_COMMAND_ID_IDENTIFY] = 0,
+ * ...
+ * [CXL_MEM_DEPRECATED_ID_GET_POISON] = 1,
+ * [CXL_MEM_DEPRECATED_ID_INJECT_POISON] = 1,
+ * [CXL_MEM_DEPRECATED_ID_CLEAR_POISON] = 1,
+ * ...
+ * };
+ */
+
+#undef ___C
+#undef ___DEPRECATED
+
+/**
+ * struct cxl_command_info - Command information returned from a query.
+ * @id: ID number for the command.
+ * @flags: Flags that specify command behavior.
+ *
+ * CXL_MEM_COMMAND_FLAG_USER_ENABLED
+ *
+ * The given command id is supported by the driver and is supported by
+ * a related opcode on the device.
+ *
+ * CXL_MEM_COMMAND_FLAG_EXCLUSIVE
+ *
+ * Requests with the given command id will terminate with EBUSY as the
+ * kernel actively owns management of the given resource. For example,
+ * the label-storage-area can not be written while the kernel is
+ * actively managing that space.
+ *
+ * @size_in: Expected input size, or ~0 if variable length.
+ * @size_out: Expected output size, or ~0 if variable length.
+ *
+ * Represents a single command that is supported by both the driver and the
+ * hardware. This is returned as part of an array from the query ioctl. The
+ * following would be a command that takes a variable length input and returns 0
+ * bytes of output.
+ *
+ * - @id = 10
+ * - @flags = CXL_MEM_COMMAND_FLAG_ENABLED
+ * - @size_in = ~0
+ * - @size_out = 0
+ *
+ * See struct cxl_mem_query_commands.
+ */
+struct cxl_command_info {
+ __u32 id;
+
+ __u32 flags;
+#define CXL_MEM_COMMAND_FLAG_MASK GENMASK(1, 0)
+#define CXL_MEM_COMMAND_FLAG_ENABLED BIT(0)
+#define CXL_MEM_COMMAND_FLAG_EXCLUSIVE BIT(1)
+
+ __u32 size_in;
+ __u32 size_out;
+};
+
+/**
+ * struct cxl_mem_query_commands - Query supported commands.
+ * @n_commands: In/out parameter. When @n_commands is > 0, the driver will
+ * return min(num_support_commands, n_commands). When @n_commands
+ * is 0, driver will return the number of total supported commands.
+ * @rsvd: Reserved for future use.
+ * @commands: Output array of supported commands. This array must be allocated
+ * by userspace to be at least min(num_support_commands, @n_commands)
+ *
+ * Allow userspace to query the available commands supported by both the driver,
+ * and the hardware. Commands that aren't supported by either the driver, or the
+ * hardware are not returned in the query.
+ *
+ * Examples:
+ *
+ * - { .n_commands = 0 } // Get number of supported commands
+ * - { .n_commands = 15, .commands = buf } // Return first 15 (or less)
+ * supported commands
+ *
+ * See struct cxl_command_info.
+ */
+struct cxl_mem_query_commands {
+ /*
+ * Input: Number of commands to return (space allocated by user)
+ * Output: Number of commands supported by the driver/hardware
+ *
+ * If n_commands is 0, kernel will only return number of commands and
+ * not try to populate commands[], thus allowing userspace to know how
+ * much space to allocate
+ */
+ __u32 n_commands;
+ __u32 rsvd;
+
+ struct cxl_command_info __user commands[]; /* out: supported commands */
+};
+
+/**
+ * struct cxl_send_command - Send a command to a memory device.
+ * @id: The command to send to the memory device. This must be one of the
+ * commands returned by the query command.
+ * @flags: Flags for the command (input).
+ * @raw: Special fields for raw commands
+ * @raw.opcode: Opcode passed to hardware when using the RAW command.
+ * @raw.rsvd: Must be zero.
+ * @rsvd: Must be zero.
+ * @retval: Return value from the memory device (output).
+ * @in: Parameters associated with input payload.
+ * @in.size: Size of the payload to provide to the device (input).
+ * @in.rsvd: Must be zero.
+ * @in.payload: Pointer to memory for payload input, payload is little endian.
+ * @out: Parameters associated with output payload.
+ * @out.size: Size of the payload received from the device (input/output). This
+ * field is filled in by userspace to let the driver know how much
+ * space was allocated for output. It is populated by the driver to
+ * let userspace know how large the output payload actually was.
+ * @out.rsvd: Must be zero.
+ * @out.payload: Pointer to memory for payload output, payload is little endian.
+ *
+ * Mechanism for userspace to send a command to the hardware for processing. The
+ * driver will do basic validation on the command sizes. In some cases even the
+ * payload may be introspected. Userspace is required to allocate large enough
+ * buffers for size_out which can be variable length in certain situations.
+ */
+struct cxl_send_command {
+ __u32 id;
+ __u32 flags;
+ union {
+ struct {
+ __u16 opcode;
+ __u16 rsvd;
+ } raw;
+ __u32 rsvd;
+ };
+ __u32 retval;
+
+ struct {
+ __u32 size;
+ __u32 rsvd;
+ __u64 payload;
+ } in;
+
+ struct {
+ __u32 size;
+ __u32 rsvd;
+ __u64 payload;
+ } out;
+};
+
+#endif
diff --git a/include/uapi/linux/cyclades.h b/include/uapi/linux/cyclades.h
index fc0add2194a9..6225c5aebe06 100644
--- a/include/uapi/linux/cyclades.h
+++ b/include/uapi/linux/cyclades.h
@@ -1,109 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $
- * linux/include/linux/cyclades.h
- *
- * This file was initially written by
- * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by
- * Ivan Passos <ivan@cyclades.com>.
- *
- * This file contains the general definitions for the cyclades.c driver
- *$Log: cyclades.h,v $
- *Revision 3.1 2002/01/29 11:36:16 henrique
- *added throttle field on struct cyclades_port to indicate whether the
- *port is throttled or not
- *
- *Revision 3.1 2000/04/19 18:52:52 ivan
- *converted address fields to unsigned long and added fields for physical
- *addresses on cyclades_card structure;
- *
- *Revision 3.0 1998/11/02 14:20:59 ivan
- *added nports field on cyclades_card structure;
- *
- *Revision 2.5 1998/08/03 16:57:01 ivan
- *added cyclades_idle_stats structure;
- *
- *Revision 2.4 1998/06/01 12:09:53 ivan
- *removed closing_wait2 from cyclades_port structure;
- *
- *Revision 2.3 1998/03/16 18:01:12 ivan
- *changes in the cyclades_port structure to get it closer to the
- *standard serial port structure;
- *added constants for new ioctls;
- *
- *Revision 2.2 1998/02/17 16:50:00 ivan
- *changes in the cyclades_port structure (addition of shutdown_wait and
- *chip_rev variables);
- *added constants for new ioctls and for CD1400 rev. numbers.
- *
- *Revision 2.1 1997/10/24 16:03:00 ivan
- *added rflow (which allows enabling the CD1400 special flow control
- *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to
- *cyclades_port structure;
- *added Alpha support
- *
- *Revision 2.0 1997/06/30 10:30:00 ivan
- *added some new doorbell command constants related to IOCTLW and
- *UART error signaling
- *
- *Revision 1.8 1997/06/03 15:30:00 ivan
- *added constant ZFIRM_HLT
- *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin)
- *
- *Revision 1.7 1997/03/26 10:30:00 daniel
- *new entries at the end of cyclades_port struct to reallocate
- *variables illegally allocated within card memory.
- *
- *Revision 1.6 1996/09/09 18:35:30 bentson
- *fold in changes for Cyclom-Z -- including structures for
- *communicating with board as well modest changes to original
- *structures to support new features.
- *
- *Revision 1.5 1995/11/13 21:13:31 bentson
- *changes suggested by Michael Chastain <mec@duracef.shout.net>
- *to support use of this file in non-kernel applications
- *
- *
- */
#ifndef _UAPI_LINUX_CYCLADES_H
#define _UAPI_LINUX_CYCLADES_H
-#include <linux/types.h>
+#warning "Support for features provided by this header has been removed"
+#warning "Please consider updating your code"
struct cyclades_monitor {
- unsigned long int_count;
- unsigned long char_count;
- unsigned long char_max;
- unsigned long char_last;
-};
-
-/*
- * These stats all reflect activity since the device was last initialized.
- * (i.e., since the port was opened with no other processes already having it
- * open)
- */
-struct cyclades_idle_stats {
- __kernel_old_time_t in_use; /* Time device has been in use (secs) */
- __kernel_old_time_t recv_idle; /* Time since last char received (secs) */
- __kernel_old_time_t xmit_idle; /* Time since last char transmitted (secs) */
- unsigned long recv_bytes; /* Bytes received */
- unsigned long xmit_bytes; /* Bytes transmitted */
- unsigned long overruns; /* Input overruns */
- unsigned long frame_errs; /* Input framing errors */
- unsigned long parity_errs; /* Input parity errors */
-};
-
-#define CYCLADES_MAGIC 0x4359
-
-#define CYGETMON 0x435901
-#define CYGETTHRESH 0x435902
-#define CYSETTHRESH 0x435903
-#define CYGETDEFTHRESH 0x435904
-#define CYSETDEFTHRESH 0x435905
-#define CYGETTIMEOUT 0x435906
-#define CYSETTIMEOUT 0x435907
-#define CYGETDEFTIMEOUT 0x435908
-#define CYSETDEFTIMEOUT 0x435909
+ unsigned long int_count;
+ unsigned long char_count;
+ unsigned long char_max;
+ unsigned long char_last;
+};
+
+#define CYGETMON 0x435901
+#define CYGETTHRESH 0x435902
+#define CYSETTHRESH 0x435903
+#define CYGETDEFTHRESH 0x435904
+#define CYSETDEFTHRESH 0x435905
+#define CYGETTIMEOUT 0x435906
+#define CYSETTIMEOUT 0x435907
+#define CYGETDEFTIMEOUT 0x435908
+#define CYSETDEFTIMEOUT 0x435909
#define CYSETRFLOW 0x43590a
#define CYGETRFLOW 0x43590b
#define CYSETRTSDTR_INV 0x43590c
@@ -111,384 +29,7 @@ struct cyclades_idle_stats {
#define CYZSETPOLLCYCLE 0x43590e
#define CYZGETPOLLCYCLE 0x43590f
#define CYGETCD1400VER 0x435910
-#define CYSETWAIT 0x435912
-#define CYGETWAIT 0x435913
-
-/*************** CYCLOM-Z ADDITIONS ***************/
-
-#define CZIOC ('M' << 8)
-#define CZ_NBOARDS (CZIOC|0xfa)
-#define CZ_BOOT_START (CZIOC|0xfb)
-#define CZ_BOOT_DATA (CZIOC|0xfc)
-#define CZ_BOOT_END (CZIOC|0xfd)
-#define CZ_TEST (CZIOC|0xfe)
-
-#define CZ_DEF_POLL (HZ/25)
-
-#define MAX_BOARD 4 /* Max number of boards */
-#define MAX_DEV 256 /* Max number of ports total */
-#define CYZ_MAX_SPEED 921600
-
-#define CYZ_FIFO_SIZE 16
-
-#define CYZ_BOOT_NWORDS 0x100
-struct CYZ_BOOT_CTRL {
- unsigned short nboard;
- int status[MAX_BOARD];
- int nchannel[MAX_BOARD];
- int fw_rev[MAX_BOARD];
- unsigned long offset;
- unsigned long data[CYZ_BOOT_NWORDS];
-};
-
-
-#ifndef DP_WINDOW_SIZE
-/*
- * Memory Window Sizes
- */
-
-#define DP_WINDOW_SIZE (0x00080000) /* window size 512 Kb */
-#define ZE_DP_WINDOW_SIZE (0x00100000) /* window size 1 Mb (Ze and
- 8Zo V.2 */
-#define CTRL_WINDOW_SIZE (0x00000080) /* runtime regs 128 bytes */
-
-/*
- * CUSTOM_REG - Cyclom-Z/PCI Custom Registers Set. The driver
- * normally will access only interested on the fpga_id, fpga_version,
- * start_cpu and stop_cpu.
- */
-
-struct CUSTOM_REG {
- __u32 fpga_id; /* FPGA Identification Register */
- __u32 fpga_version; /* FPGA Version Number Register */
- __u32 cpu_start; /* CPU start Register (write) */
- __u32 cpu_stop; /* CPU stop Register (write) */
- __u32 misc_reg; /* Miscellaneous Register */
- __u32 idt_mode; /* IDT mode Register */
- __u32 uart_irq_status; /* UART IRQ status Register */
- __u32 clear_timer0_irq; /* Clear timer interrupt Register */
- __u32 clear_timer1_irq; /* Clear timer interrupt Register */
- __u32 clear_timer2_irq; /* Clear timer interrupt Register */
- __u32 test_register; /* Test Register */
- __u32 test_count; /* Test Count Register */
- __u32 timer_select; /* Timer select register */
- __u32 pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */
- __u32 ram_wait_state; /* RAM wait-state Register */
- __u32 uart_wait_state; /* UART wait-state Register */
- __u32 timer_wait_state; /* timer wait-state Register */
- __u32 ack_wait_state; /* ACK wait State Register */
-};
-
-/*
- * RUNTIME_9060 - PLX PCI9060ES local configuration and shared runtime
- * registers. This structure can be used to access the 9060 registers
- * (memory mapped).
- */
-
-struct RUNTIME_9060 {
- __u32 loc_addr_range; /* 00h - Local Address Range */
- __u32 loc_addr_base; /* 04h - Local Address Base */
- __u32 loc_arbitr; /* 08h - Local Arbitration */
- __u32 endian_descr; /* 0Ch - Big/Little Endian Descriptor */
- __u32 loc_rom_range; /* 10h - Local ROM Range */
- __u32 loc_rom_base; /* 14h - Local ROM Base */
- __u32 loc_bus_descr; /* 18h - Local Bus descriptor */
- __u32 loc_range_mst; /* 1Ch - Local Range for Master to PCI */
- __u32 loc_base_mst; /* 20h - Local Base for Master PCI */
- __u32 loc_range_io; /* 24h - Local Range for Master IO */
- __u32 pci_base_mst; /* 28h - PCI Base for Master PCI */
- __u32 pci_conf_io; /* 2Ch - PCI configuration for Master IO */
- __u32 filler1; /* 30h */
- __u32 filler2; /* 34h */
- __u32 filler3; /* 38h */
- __u32 filler4; /* 3Ch */
- __u32 mail_box_0; /* 40h - Mail Box 0 */
- __u32 mail_box_1; /* 44h - Mail Box 1 */
- __u32 mail_box_2; /* 48h - Mail Box 2 */
- __u32 mail_box_3; /* 4Ch - Mail Box 3 */
- __u32 filler5; /* 50h */
- __u32 filler6; /* 54h */
- __u32 filler7; /* 58h */
- __u32 filler8; /* 5Ch */
- __u32 pci_doorbell; /* 60h - PCI to Local Doorbell */
- __u32 loc_doorbell; /* 64h - Local to PCI Doorbell */
- __u32 intr_ctrl_stat; /* 68h - Interrupt Control/Status */
- __u32 init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */
-};
-
-/* Values for the Local Base Address re-map register */
-
-#define WIN_RAM 0x00000001L /* set the sliding window to RAM */
-#define WIN_CREG 0x14000001L /* set the window to custom Registers */
-
-/* Values timer select registers */
-
-#define TIMER_BY_1M 0x00 /* clock divided by 1M */
-#define TIMER_BY_256K 0x01 /* clock divided by 256k */
-#define TIMER_BY_128K 0x02 /* clock divided by 128k */
-#define TIMER_BY_32K 0x03 /* clock divided by 32k */
-
-/****************** ****************** *******************/
-#endif
-
-#ifndef ZFIRM_ID
-/* #include "zfwint.h" */
-/****************** ****************** *******************/
-/*
- * This file contains the definitions for interfacing with the
- * Cyclom-Z ZFIRM Firmware.
- */
-
-/* General Constant definitions */
-
-#define MAX_CHAN 64 /* max number of channels per board */
-
-/* firmware id structure (set after boot) */
-
-#define ID_ADDRESS 0x00000180L /* signature/pointer address */
-#define ZFIRM_ID 0x5557465AL /* ZFIRM/U signature */
-#define ZFIRM_HLT 0x59505B5CL /* ZFIRM needs external power supply */
-#define ZFIRM_RST 0x56040674L /* RST signal (due to FW reset) */
-
-#define ZF_TINACT_DEF 1000 /* default inactivity timeout
- (1000 ms) */
-#define ZF_TINACT ZF_TINACT_DEF
-
-struct FIRM_ID {
- __u32 signature; /* ZFIRM/U signature */
- __u32 zfwctrl_addr; /* pointer to ZFW_CTRL structure */
-};
-
-/* Op. System id */
-
-#define C_OS_LINUX 0x00000030 /* generic Linux system */
-
-/* channel op_mode */
-
-#define C_CH_DISABLE 0x00000000 /* channel is disabled */
-#define C_CH_TXENABLE 0x00000001 /* channel Tx enabled */
-#define C_CH_RXENABLE 0x00000002 /* channel Rx enabled */
-#define C_CH_ENABLE 0x00000003 /* channel Tx/Rx enabled */
-#define C_CH_LOOPBACK 0x00000004 /* Loopback mode */
-
-/* comm_parity - parity */
-
-#define C_PR_NONE 0x00000000 /* None */
-#define C_PR_ODD 0x00000001 /* Odd */
-#define C_PR_EVEN 0x00000002 /* Even */
-#define C_PR_MARK 0x00000004 /* Mark */
-#define C_PR_SPACE 0x00000008 /* Space */
-#define C_PR_PARITY 0x000000ff
-
-#define C_PR_DISCARD 0x00000100 /* discard char with frame/par error */
-#define C_PR_IGNORE 0x00000200 /* ignore frame/par error */
-
-/* comm_data_l - data length and stop bits */
-
-#define C_DL_CS5 0x00000001
-#define C_DL_CS6 0x00000002
-#define C_DL_CS7 0x00000004
-#define C_DL_CS8 0x00000008
-#define C_DL_CS 0x0000000f
-#define C_DL_1STOP 0x00000010
-#define C_DL_15STOP 0x00000020
-#define C_DL_2STOP 0x00000040
-#define C_DL_STOP 0x000000f0
-
-/* interrupt enabling/status */
-
-#define C_IN_DISABLE 0x00000000 /* zero, disable interrupts */
-#define C_IN_TXBEMPTY 0x00000001 /* tx buffer empty */
-#define C_IN_TXLOWWM 0x00000002 /* tx buffer below LWM */
-#define C_IN_RXHIWM 0x00000010 /* rx buffer above HWM */
-#define C_IN_RXNNDT 0x00000020 /* rx no new data timeout */
-#define C_IN_MDCD 0x00000100 /* modem DCD change */
-#define C_IN_MDSR 0x00000200 /* modem DSR change */
-#define C_IN_MRI 0x00000400 /* modem RI change */
-#define C_IN_MCTS 0x00000800 /* modem CTS change */
-#define C_IN_RXBRK 0x00001000 /* Break received */
-#define C_IN_PR_ERROR 0x00002000 /* parity error */
-#define C_IN_FR_ERROR 0x00004000 /* frame error */
-#define C_IN_OVR_ERROR 0x00008000 /* overrun error */
-#define C_IN_RXOFL 0x00010000 /* RX buffer overflow */
-#define C_IN_IOCTLW 0x00020000 /* I/O control w/ wait */
-#define C_IN_MRTS 0x00040000 /* modem RTS drop */
-#define C_IN_ICHAR 0x00080000
-
-/* flow control */
-
-#define C_FL_OXX 0x00000001 /* output Xon/Xoff flow control */
-#define C_FL_IXX 0x00000002 /* output Xon/Xoff flow control */
-#define C_FL_OIXANY 0x00000004 /* output Xon/Xoff (any xon) */
-#define C_FL_SWFLOW 0x0000000f
-
-/* flow status */
-
-#define C_FS_TXIDLE 0x00000000 /* no Tx data in the buffer or UART */
-#define C_FS_SENDING 0x00000001 /* UART is sending data */
-#define C_FS_SWFLOW 0x00000002 /* Tx is stopped by received Xoff */
-
-/* rs_control/rs_status RS-232 signals */
-
-#define C_RS_PARAM 0x80000000 /* Indicates presence of parameter in
- IOCTLM command */
-#define C_RS_RTS 0x00000001 /* RTS */
-#define C_RS_DTR 0x00000004 /* DTR */
-#define C_RS_DCD 0x00000100 /* CD */
-#define C_RS_DSR 0x00000200 /* DSR */
-#define C_RS_RI 0x00000400 /* RI */
-#define C_RS_CTS 0x00000800 /* CTS */
-
-/* commands Host <-> Board */
-
-#define C_CM_RESET 0x01 /* reset/flush buffers */
-#define C_CM_IOCTL 0x02 /* re-read CH_CTRL */
-#define C_CM_IOCTLW 0x03 /* re-read CH_CTRL, intr when done */
-#define C_CM_IOCTLM 0x04 /* RS-232 outputs change */
-#define C_CM_SENDXOFF 0x10 /* send Xoff */
-#define C_CM_SENDXON 0x11 /* send Xon */
-#define C_CM_CLFLOW 0x12 /* Clear flow control (resume) */
-#define C_CM_SENDBRK 0x41 /* send break */
-#define C_CM_INTBACK 0x42 /* Interrupt back */
-#define C_CM_SET_BREAK 0x43 /* Tx break on */
-#define C_CM_CLR_BREAK 0x44 /* Tx break off */
-#define C_CM_CMD_DONE 0x45 /* Previous command done */
-#define C_CM_INTBACK2 0x46 /* Alternate Interrupt back */
-#define C_CM_TINACT 0x51 /* set inactivity detection */
-#define C_CM_IRQ_ENBL 0x52 /* enable generation of interrupts */
-#define C_CM_IRQ_DSBL 0x53 /* disable generation of interrupts */
-#define C_CM_ACK_ENBL 0x54 /* enable acknowledged interrupt mode */
-#define C_CM_ACK_DSBL 0x55 /* disable acknowledged intr mode */
-#define C_CM_FLUSH_RX 0x56 /* flushes Rx buffer */
-#define C_CM_FLUSH_TX 0x57 /* flushes Tx buffer */
-#define C_CM_Q_ENABLE 0x58 /* enables queue access from the
- driver */
-#define C_CM_Q_DISABLE 0x59 /* disables queue access from the
- driver */
-
-#define C_CM_TXBEMPTY 0x60 /* Tx buffer is empty */
-#define C_CM_TXLOWWM 0x61 /* Tx buffer low water mark */
-#define C_CM_RXHIWM 0x62 /* Rx buffer high water mark */
-#define C_CM_RXNNDT 0x63 /* rx no new data timeout */
-#define C_CM_TXFEMPTY 0x64
-#define C_CM_ICHAR 0x65
-#define C_CM_MDCD 0x70 /* modem DCD change */
-#define C_CM_MDSR 0x71 /* modem DSR change */
-#define C_CM_MRI 0x72 /* modem RI change */
-#define C_CM_MCTS 0x73 /* modem CTS change */
-#define C_CM_MRTS 0x74 /* modem RTS drop */
-#define C_CM_RXBRK 0x84 /* Break received */
-#define C_CM_PR_ERROR 0x85 /* Parity error */
-#define C_CM_FR_ERROR 0x86 /* Frame error */
-#define C_CM_OVR_ERROR 0x87 /* Overrun error */
-#define C_CM_RXOFL 0x88 /* RX buffer overflow */
-#define C_CM_CMDERROR 0x90 /* command error */
-#define C_CM_FATAL 0x91 /* fatal error */
-#define C_CM_HW_RESET 0x92 /* reset board */
-
-/*
- * CH_CTRL - This per port structure contains all parameters
- * that control an specific port. It can be seen as the
- * configuration registers of a "super-serial-controller".
- */
-
-struct CH_CTRL {
- __u32 op_mode; /* operation mode */
- __u32 intr_enable; /* interrupt masking */
- __u32 sw_flow; /* SW flow control */
- __u32 flow_status; /* output flow status */
- __u32 comm_baud; /* baud rate - numerically specified */
- __u32 comm_parity; /* parity */
- __u32 comm_data_l; /* data length/stop */
- __u32 comm_flags; /* other flags */
- __u32 hw_flow; /* HW flow control */
- __u32 rs_control; /* RS-232 outputs */
- __u32 rs_status; /* RS-232 inputs */
- __u32 flow_xon; /* xon char */
- __u32 flow_xoff; /* xoff char */
- __u32 hw_overflow; /* hw overflow counter */
- __u32 sw_overflow; /* sw overflow counter */
- __u32 comm_error; /* frame/parity error counter */
- __u32 ichar;
- __u32 filler[7];
-};
-
-
-/*
- * BUF_CTRL - This per channel structure contains
- * all Tx and Rx buffer control for a given channel.
- */
-
-struct BUF_CTRL {
- __u32 flag_dma; /* buffers are in Host memory */
- __u32 tx_bufaddr; /* address of the tx buffer */
- __u32 tx_bufsize; /* tx buffer size */
- __u32 tx_threshold; /* tx low water mark */
- __u32 tx_get; /* tail index tx buf */
- __u32 tx_put; /* head index tx buf */
- __u32 rx_bufaddr; /* address of the rx buffer */
- __u32 rx_bufsize; /* rx buffer size */
- __u32 rx_threshold; /* rx high water mark */
- __u32 rx_get; /* tail index rx buf */
- __u32 rx_put; /* head index rx buf */
- __u32 filler[5]; /* filler to align structures */
-};
-
-/*
- * BOARD_CTRL - This per board structure contains all global
- * control fields related to the board.
- */
-
-struct BOARD_CTRL {
-
- /* static info provided by the on-board CPU */
- __u32 n_channel; /* number of channels */
- __u32 fw_version; /* firmware version */
-
- /* static info provided by the driver */
- __u32 op_system; /* op_system id */
- __u32 dr_version; /* driver version */
-
- /* board control area */
- __u32 inactivity; /* inactivity control */
-
- /* host to FW commands */
- __u32 hcmd_channel; /* channel number */
- __u32 hcmd_param; /* pointer to parameters */
-
- /* FW to Host commands */
- __u32 fwcmd_channel; /* channel number */
- __u32 fwcmd_param; /* pointer to parameters */
- __u32 zf_int_queue_addr; /* offset for INT_QUEUE structure */
-
- /* filler so the structures are aligned */
- __u32 filler[6];
-};
-
-/* Host Interrupt Queue */
-
-#define QUEUE_SIZE (10*MAX_CHAN)
-
-struct INT_QUEUE {
- unsigned char intr_code[QUEUE_SIZE];
- unsigned long channel[QUEUE_SIZE];
- unsigned long param[QUEUE_SIZE];
- unsigned long put;
- unsigned long get;
-};
-
-/*
- * ZFW_CTRL - This is the data structure that includes all other
- * data structures used by the Firmware.
- */
-
-struct ZFW_CTRL {
- struct BOARD_CTRL board_ctrl;
- struct CH_CTRL ch_ctrl[MAX_CHAN];
- struct BUF_CTRL buf_ctrl[MAX_CHAN];
-};
-
-/****************** ****************** *******************/
-#endif
+#define CYSETWAIT 0x435912
+#define CYGETWAIT 0x435913
#endif /* _UAPI_LINUX_CYCLADES_H */
diff --git a/include/uapi/linux/cycx_cfm.h b/include/uapi/linux/cycx_cfm.h
index 51f541942ff9..91778c8024b1 100644
--- a/include/uapi/linux/cycx_cfm.h
+++ b/include/uapi/linux/cycx_cfm.h
@@ -91,7 +91,7 @@ struct cycx_firmware {
unsigned short reserved[6];
char descr[CFM_DESCR_LEN];
struct cycx_fw_info info;
- unsigned char image[0];
+ unsigned char image[];
};
struct cycx_fw_header {
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index a791a94013a6..7e15214aa5dd 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -218,6 +218,9 @@ struct cee_pfc {
#define IEEE_8021QAZ_APP_SEL_ANY 4
#define IEEE_8021QAZ_APP_SEL_DSCP 5
+/* Non-std selector values */
+#define DCB_APP_SEL_PCP 255
+
/* This structure contains the IEEE 802.1Qaz APP managed object. This
* object is also used for the CEE std as well.
*
@@ -247,6 +250,8 @@ struct dcb_app {
__u16 protocol;
};
+#define IEEE_8021QAZ_APP_SEL_MAX 255
+
/**
* struct dcb_peer_app_info - APP feature information sent by the peer
*
@@ -405,6 +410,8 @@ enum dcbnl_attrs {
* @DCB_ATTR_IEEE_PEER_ETS: peer ETS configuration - get only
* @DCB_ATTR_IEEE_PEER_PFC: peer PFC configuration - get only
* @DCB_ATTR_IEEE_PEER_APP: peer APP tlv - get only
+ * @DCB_ATTR_DCB_APP_TRUST_TABLE: selector trust table
+ * @DCB_ATTR_DCB_REWR_TABLE: rewrite configuration
*/
enum ieee_attrs {
DCB_ATTR_IEEE_UNSPEC,
@@ -418,6 +425,8 @@ enum ieee_attrs {
DCB_ATTR_IEEE_QCN,
DCB_ATTR_IEEE_QCN_STATS,
DCB_ATTR_DCB_BUFFER,
+ DCB_ATTR_DCB_APP_TRUST_TABLE,
+ DCB_ATTR_DCB_REWR_TABLE,
__DCB_ATTR_IEEE_MAX
};
#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
@@ -425,6 +434,7 @@ enum ieee_attrs {
enum ieee_attrs_app {
DCB_ATTR_IEEE_APP_UNSPEC,
DCB_ATTR_IEEE_APP,
+ DCB_ATTR_DCB_APP,
__DCB_ATTR_IEEE_APP_MAX
};
#define DCB_ATTR_IEEE_APP_MAX (__DCB_ATTR_IEEE_APP_MAX - 1)
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index cfef4245ea5a..2da0c7eb6710 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -13,6 +13,8 @@
#ifndef _UAPI_LINUX_DEVLINK_H_
#define _UAPI_LINUX_DEVLINK_H_
+#include <linux/const.h>
+
#define DEVLINK_GENL_NAME "devlink"
#define DEVLINK_GENL_VERSION 0x1
#define DEVLINK_GENL_MCGRP_CONFIG_NAME "config"
@@ -122,6 +124,23 @@ enum devlink_command {
DEVLINK_CMD_TRAP_POLICER_NEW,
DEVLINK_CMD_TRAP_POLICER_DEL,
+ DEVLINK_CMD_HEALTH_REPORTER_TEST,
+
+ DEVLINK_CMD_RATE_GET, /* can dump */
+ DEVLINK_CMD_RATE_SET,
+ DEVLINK_CMD_RATE_NEW,
+ DEVLINK_CMD_RATE_DEL,
+
+ DEVLINK_CMD_LINECARD_GET, /* can dump */
+ DEVLINK_CMD_LINECARD_SET,
+ DEVLINK_CMD_LINECARD_NEW,
+ DEVLINK_CMD_LINECARD_DEL,
+
+ DEVLINK_CMD_SELFTESTS_GET, /* can dump */
+ DEVLINK_CMD_SELFTESTS_RUN,
+
+ DEVLINK_CMD_NOTIFY_FILTER_SET,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -193,6 +212,18 @@ enum devlink_port_flavour {
* port that faces the PCI VF.
*/
DEVLINK_PORT_FLAVOUR_VIRTUAL, /* Any virtual port facing the user. */
+ DEVLINK_PORT_FLAVOUR_UNUSED, /* Port which exists in the switch, but
+ * is not used in any way.
+ */
+ DEVLINK_PORT_FLAVOUR_PCI_SF, /* Represents eswitch port
+ * for the PCI SF. It is an internal
+ * port that faces the PCI SF.
+ */
+};
+
+enum devlink_rate_type {
+ DEVLINK_RATE_TYPE_LEAF,
+ DEVLINK_RATE_TYPE_NODE,
};
enum devlink_param_cmode {
@@ -228,6 +259,52 @@ enum {
DEVLINK_ATTR_STATS_MAX = __DEVLINK_ATTR_STATS_MAX - 1
};
+/* Specify what sections of a flash component can be overwritten when
+ * performing an update. Overwriting of firmware binary sections is always
+ * implicitly assumed to be allowed.
+ *
+ * Each section must be documented in
+ * Documentation/networking/devlink/devlink-flash.rst
+ *
+ */
+enum devlink_flash_overwrite {
+ DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT,
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT,
+
+ __DEVLINK_FLASH_OVERWRITE_MAX_BIT,
+ DEVLINK_FLASH_OVERWRITE_MAX_BIT = __DEVLINK_FLASH_OVERWRITE_MAX_BIT - 1
+};
+
+#define DEVLINK_FLASH_OVERWRITE_SETTINGS _BITUL(DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT)
+#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS _BITUL(DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT)
+
+#define DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS \
+ (_BITUL(__DEVLINK_FLASH_OVERWRITE_MAX_BIT) - 1)
+
+enum devlink_attr_selftest_id {
+ DEVLINK_ATTR_SELFTEST_ID_UNSPEC,
+ DEVLINK_ATTR_SELFTEST_ID_FLASH, /* flag */
+
+ __DEVLINK_ATTR_SELFTEST_ID_MAX,
+ DEVLINK_ATTR_SELFTEST_ID_MAX = __DEVLINK_ATTR_SELFTEST_ID_MAX - 1
+};
+
+enum devlink_selftest_status {
+ DEVLINK_SELFTEST_STATUS_SKIP,
+ DEVLINK_SELFTEST_STATUS_PASS,
+ DEVLINK_SELFTEST_STATUS_FAIL
+};
+
+enum devlink_attr_selftest_result {
+ DEVLINK_ATTR_SELFTEST_RESULT_UNSPEC,
+ DEVLINK_ATTR_SELFTEST_RESULT, /* nested */
+ DEVLINK_ATTR_SELFTEST_RESULT_ID, /* u32, enum devlink_attr_selftest_id */
+ DEVLINK_ATTR_SELFTEST_RESULT_STATUS, /* u8, enum devlink_selftest_status */
+
+ __DEVLINK_ATTR_SELFTEST_RESULT_MAX,
+ DEVLINK_ATTR_SELFTEST_RESULT_MAX = __DEVLINK_ATTR_SELFTEST_RESULT_MAX - 1
+};
+
/**
* enum devlink_trap_action - Packet trap action.
* @DEVLINK_TRAP_ACTION_DROP: Packet is dropped by the device and a copy is not
@@ -272,6 +349,42 @@ enum {
DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE,
};
+enum devlink_reload_action {
+ DEVLINK_RELOAD_ACTION_UNSPEC,
+ DEVLINK_RELOAD_ACTION_DRIVER_REINIT, /* Driver entities re-instantiation */
+ DEVLINK_RELOAD_ACTION_FW_ACTIVATE, /* FW activate */
+
+ /* Add new reload actions above */
+ __DEVLINK_RELOAD_ACTION_MAX,
+ DEVLINK_RELOAD_ACTION_MAX = __DEVLINK_RELOAD_ACTION_MAX - 1
+};
+
+enum devlink_reload_limit {
+ DEVLINK_RELOAD_LIMIT_UNSPEC, /* unspecified, no constraints */
+ DEVLINK_RELOAD_LIMIT_NO_RESET, /* No reset allowed, no down time allowed,
+ * no link flap and no configuration is lost.
+ */
+
+ /* Add new reload limit above */
+ __DEVLINK_RELOAD_LIMIT_MAX,
+ DEVLINK_RELOAD_LIMIT_MAX = __DEVLINK_RELOAD_LIMIT_MAX - 1
+};
+
+#define DEVLINK_RELOAD_LIMITS_VALID_MASK (_BITUL(__DEVLINK_RELOAD_LIMIT_MAX) - 1)
+
+enum devlink_linecard_state {
+ DEVLINK_LINECARD_STATE_UNSPEC,
+ DEVLINK_LINECARD_STATE_UNPROVISIONED,
+ DEVLINK_LINECARD_STATE_UNPROVISIONING,
+ DEVLINK_LINECARD_STATE_PROVISIONING,
+ DEVLINK_LINECARD_STATE_PROVISIONING_FAILED,
+ DEVLINK_LINECARD_STATE_PROVISIONED,
+ DEVLINK_LINECARD_STATE_ACTIVE,
+
+ __DEVLINK_LINECARD_STATE_MAX,
+ DEVLINK_LINECARD_STATE_MAX = __DEVLINK_LINECARD_STATE_MAX - 1
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -458,7 +571,53 @@ enum devlink_attr {
DEVLINK_ATTR_PORT_LANES, /* u32 */
DEVLINK_ATTR_PORT_SPLITTABLE, /* u8 */
- /* add new attributes above here, update the policy in devlink.c */
+ DEVLINK_ATTR_PORT_EXTERNAL, /* u8 */
+ DEVLINK_ATTR_PORT_CONTROLLER_NUMBER, /* u32 */
+
+ DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT, /* u64 */
+ DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK, /* bitfield32 */
+
+ DEVLINK_ATTR_RELOAD_ACTION, /* u8 */
+ DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, /* bitfield32 */
+ DEVLINK_ATTR_RELOAD_LIMITS, /* bitfield32 */
+
+ DEVLINK_ATTR_DEV_STATS, /* nested */
+ DEVLINK_ATTR_RELOAD_STATS, /* nested */
+ DEVLINK_ATTR_RELOAD_STATS_ENTRY, /* nested */
+ DEVLINK_ATTR_RELOAD_STATS_LIMIT, /* u8 */
+ DEVLINK_ATTR_RELOAD_STATS_VALUE, /* u32 */
+ DEVLINK_ATTR_REMOTE_RELOAD_STATS, /* nested */
+ DEVLINK_ATTR_RELOAD_ACTION_INFO, /* nested */
+ DEVLINK_ATTR_RELOAD_ACTION_STATS, /* nested */
+
+ DEVLINK_ATTR_PORT_PCI_SF_NUMBER, /* u32 */
+
+ DEVLINK_ATTR_RATE_TYPE, /* u16 */
+ DEVLINK_ATTR_RATE_TX_SHARE, /* u64 */
+ DEVLINK_ATTR_RATE_TX_MAX, /* u64 */
+ DEVLINK_ATTR_RATE_NODE_NAME, /* string */
+ DEVLINK_ATTR_RATE_PARENT_NODE_NAME, /* string */
+
+ DEVLINK_ATTR_REGION_MAX_SNAPSHOTS, /* u32 */
+
+ DEVLINK_ATTR_LINECARD_INDEX, /* u32 */
+ DEVLINK_ATTR_LINECARD_STATE, /* u8 */
+ DEVLINK_ATTR_LINECARD_TYPE, /* string */
+ DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES, /* nested */
+
+ DEVLINK_ATTR_NESTED_DEVLINK, /* nested */
+
+ DEVLINK_ATTR_SELFTESTS, /* nested */
+
+ DEVLINK_ATTR_RATE_TX_PRIORITY, /* u32 */
+ DEVLINK_ATTR_RATE_TX_WEIGHT, /* u32 */
+
+ DEVLINK_ATTR_REGION_DIRECT, /* flag */
+
+ /* Add new attributes above here, update the spec in
+ * Documentation/netlink/specs/devlink.yaml and re-generate
+ * net/devlink/netlink_gen.c.
+ */
__DEVLINK_ATTR_MAX,
DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1
@@ -504,12 +663,50 @@ enum devlink_resource_unit {
DEVLINK_RESOURCE_UNIT_ENTRY,
};
+enum devlink_port_fn_attr_cap {
+ DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT,
+ DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT,
+ DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT,
+ DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT,
+
+ /* Add new caps above */
+ __DEVLINK_PORT_FN_ATTR_CAPS_MAX,
+};
+
+#define DEVLINK_PORT_FN_CAP_ROCE _BITUL(DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT)
+#define DEVLINK_PORT_FN_CAP_MIGRATABLE \
+ _BITUL(DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT)
+#define DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT)
+#define DEVLINK_PORT_FN_CAP_IPSEC_PACKET _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT)
+
enum devlink_port_function_attr {
DEVLINK_PORT_FUNCTION_ATTR_UNSPEC,
DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, /* binary */
+ DEVLINK_PORT_FN_ATTR_STATE, /* u8 */
+ DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */
+ DEVLINK_PORT_FN_ATTR_CAPS, /* bitfield32 */
+ DEVLINK_PORT_FN_ATTR_DEVLINK, /* nested */
__DEVLINK_PORT_FUNCTION_ATTR_MAX,
DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1
};
+enum devlink_port_fn_state {
+ DEVLINK_PORT_FN_STATE_INACTIVE,
+ DEVLINK_PORT_FN_STATE_ACTIVE,
+};
+
+/**
+ * enum devlink_port_fn_opstate - indicates operational state of the function
+ * @DEVLINK_PORT_FN_OPSTATE_ATTACHED: Driver is attached to the function.
+ * For graceful tear down of the function, after inactivation of the
+ * function, user should wait for operational state to turn DETACHED.
+ * @DEVLINK_PORT_FN_OPSTATE_DETACHED: Driver is detached from the function.
+ * It is safe to delete the port.
+ */
+enum devlink_port_fn_opstate {
+ DEVLINK_PORT_FN_OPSTATE_DETACHED,
+ DEVLINK_PORT_FN_OPSTATE_ATTACHED,
+};
+
#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/dlm.h b/include/uapi/linux/dlm.h
index 0d2eca287567..e7e905fb0bb2 100644
--- a/include/uapi/linux/dlm.h
+++ b/include/uapi/linux/dlm.h
@@ -68,8 +68,8 @@ struct dlm_lksb {
/* dlm_new_lockspace() flags */
+/* DLM_LSFL_TIMEWARN is deprecated and reserved. DO NOT USE! */
#define DLM_LSFL_TIMEWARN 0x00000002
-#define DLM_LSFL_FS 0x00000004
#define DLM_LSFL_NEWEXCL 0x00000008
diff --git a/include/uapi/linux/dlm_device.h b/include/uapi/linux/dlm_device.h
index f880d2831160..e83954c69fff 100644
--- a/include/uapi/linux/dlm_device.h
+++ b/include/uapi/linux/dlm_device.h
@@ -45,13 +45,13 @@ struct dlm_lock_params {
void __user *bastaddr;
struct dlm_lksb __user *lksb;
char lvb[DLM_USER_LVB_LEN];
- char name[0];
+ char name[];
};
struct dlm_lspace_params {
__u32 flags;
__u32 minor;
- char name[0];
+ char name[];
};
struct dlm_purge_params {
diff --git a/include/uapi/linux/dlm_netlink.h b/include/uapi/linux/dlm_netlink.h
deleted file mode 100644
index 5dc3a67d353d..000000000000
--- a/include/uapi/linux/dlm_netlink.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2007 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License v.2.
- */
-
-#ifndef _DLM_NETLINK_H
-#define _DLM_NETLINK_H
-
-#include <linux/types.h>
-#include <linux/dlmconstants.h>
-
-enum {
- DLM_STATUS_WAITING = 1,
- DLM_STATUS_GRANTED = 2,
- DLM_STATUS_CONVERT = 3,
-};
-
-#define DLM_LOCK_DATA_VERSION 1
-
-struct dlm_lock_data {
- __u16 version;
- __u32 lockspace_id;
- int nodeid;
- int ownpid;
- __u32 id;
- __u32 remid;
- __u64 xid;
- __s8 status;
- __s8 grmode;
- __s8 rqmode;
- unsigned long timestamp;
- int resource_namelen;
- char resource_name[DLM_RESNAME_MAXLEN];
-};
-
-enum {
- DLM_CMD_UNSPEC = 0,
- DLM_CMD_HELLO, /* user->kernel */
- DLM_CMD_TIMEOUT, /* kernel->user */
- __DLM_CMD_MAX,
-};
-
-#define DLM_CMD_MAX (__DLM_CMD_MAX - 1)
-
-enum {
- DLM_TYPE_UNSPEC = 0,
- DLM_TYPE_LOCK,
- __DLM_TYPE_MAX,
-};
-
-#define DLM_TYPE_MAX (__DLM_TYPE_MAX - 1)
-
-#define DLM_GENL_VERSION 0x1
-#define DLM_GENL_NAME "DLM"
-
-#endif /* _DLM_NETLINK_H */
diff --git a/include/uapi/linux/dlm_plock.h b/include/uapi/linux/dlm_plock.h
index 63b6c1fd9169..eb66afcac40e 100644
--- a/include/uapi/linux/dlm_plock.h
+++ b/include/uapi/linux/dlm_plock.h
@@ -22,6 +22,7 @@ enum {
DLM_PLOCK_OP_LOCK = 1,
DLM_PLOCK_OP_UNLOCK,
DLM_PLOCK_OP_GET,
+ DLM_PLOCK_OP_CANCEL,
};
#define DLM_PLOCK_FL_CLOSE 1
diff --git a/include/uapi/linux/dlmconstants.h b/include/uapi/linux/dlmconstants.h
index a8ae47c32a37..6ca77a6388bc 100644
--- a/include/uapi/linux/dlmconstants.h
+++ b/include/uapi/linux/dlmconstants.h
@@ -87,7 +87,6 @@
* DLM_LKF_NODLCKWT
*
* Do not cancel the lock if it gets into conversion deadlock.
- * Exclude this lock from being monitored due to DLM_LSFL_TIMEWARN.
*
* DLM_LKF_NODLCKBLK
*
@@ -132,6 +131,10 @@
* Unlock the lock even if it is converting or waiting or has sublocks.
* Only really for use by the userland device.c code.
*
+ * DLM_LKF_TIMEOUT
+ *
+ * This value is deprecated and reserved. DO NOT USE!
+ *
*/
#define DLM_LKF_NOQUEUE 0x00000001
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 6622912c2342..1990b5700f69 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -182,7 +182,7 @@ struct dm_target_spec {
struct dm_target_deps {
__u32 count; /* Array size */
__u32 padding; /* unused */
- __u64 dev[0]; /* out */
+ __u64 dev[]; /* out */
};
/*
@@ -192,9 +192,23 @@ struct dm_name_list {
__u64 dev;
__u32 next; /* offset to the next record from
the _start_ of this */
- char name[0];
+ char name[];
+
+ /*
+ * The following members can be accessed by taking a pointer that
+ * points immediately after the terminating zero character in "name"
+ * and aligning this pointer to next 8-byte boundary.
+ * Uuid is present if the flag DM_NAME_LIST_FLAG_HAS_UUID is set.
+ *
+ * __u32 event_nr;
+ * __u32 flags;
+ * char uuid[0];
+ */
};
+#define DM_NAME_LIST_FLAG_HAS_UUID 1
+#define DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID 2
+
/*
* Used to retrieve the target versions
*/
@@ -202,7 +216,7 @@ struct dm_target_versions {
__u32 next;
__u32 version[3];
- char name[0];
+ char name[];
};
/*
@@ -211,7 +225,7 @@ struct dm_target_versions {
struct dm_target_msg {
__u64 sector; /* Device sector */
- char message[0];
+ char message[];
};
/*
@@ -272,9 +286,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 42
+#define DM_VERSION_MINOR 48
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2020-02-27)"
+#define DM_VERSION_EXTRA "-ioctl (2023-03-01)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -362,4 +376,10 @@ enum {
*/
#define DM_INTERNAL_SUSPEND_FLAG (1 << 18) /* Out */
+/*
+ * If set, returns in the in buffer passed by UM, the raw table information
+ * that would be measured by IMA subsystem on device state change.
+ */
+#define DM_IMA_MEASUREMENT_FLAG (1 << 19) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/uapi/linux/dm-log-userspace.h b/include/uapi/linux/dm-log-userspace.h
index 5c47a8603376..23dad9565e46 100644
--- a/include/uapi/linux/dm-log-userspace.h
+++ b/include/uapi/linux/dm-log-userspace.h
@@ -426,7 +426,7 @@ struct dm_ulog_request {
__u32 request_type; /* DM_ULOG_* defined above */
__u32 data_size; /* How much data (not including this struct) */
- char data[0];
+ char data[];
};
#endif /* __DM_LOG_USERSPACE_H__ */
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
index 7f30393b92c3..5a6fda66d9ad 100644
--- a/include/uapi/linux/dma-buf.h
+++ b/include/uapi/linux/dma-buf.h
@@ -22,8 +22,56 @@
#include <linux/types.h>
-/* begin/end dma-buf functions used for userspace mmap. */
+/**
+ * struct dma_buf_sync - Synchronize with CPU access.
+ *
+ * When a DMA buffer is accessed from the CPU via mmap, it is not always
+ * possible to guarantee coherency between the CPU-visible map and underlying
+ * memory. To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket
+ * any CPU access to give the kernel the chance to shuffle memory around if
+ * needed.
+ *
+ * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC
+ * with DMA_BUF_SYNC_START and the appropriate read/write flags. Once the
+ * access is complete, the client should call DMA_BUF_IOCTL_SYNC with
+ * DMA_BUF_SYNC_END and the same read/write flags.
+ *
+ * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache
+ * coherency. It does not prevent other processes or devices from
+ * accessing the memory at the same time. If synchronization with a GPU or
+ * other device driver is required, it is the client's responsibility to
+ * wait for buffer to be ready for reading or writing before calling this
+ * ioctl with DMA_BUF_SYNC_START. Likewise, the client must ensure that
+ * follow-up work is not submitted to GPU or other device driver until
+ * after this ioctl has been called with DMA_BUF_SYNC_END?
+ *
+ * If the driver or API with which the client is interacting uses implicit
+ * synchronization, waiting for prior work to complete can be done via
+ * poll() on the DMA buffer file descriptor. If the driver or API requires
+ * explicit synchronization, the client may have to wait on a sync_file or
+ * other synchronization primitive outside the scope of the DMA buffer API.
+ */
struct dma_buf_sync {
+ /**
+ * @flags: Set of access flags
+ *
+ * DMA_BUF_SYNC_START:
+ * Indicates the start of a map access session.
+ *
+ * DMA_BUF_SYNC_END:
+ * Indicates the end of a map access session.
+ *
+ * DMA_BUF_SYNC_READ:
+ * Indicates that the mapped DMA buffer will be read by the
+ * client via the CPU map.
+ *
+ * DMA_BUF_SYNC_WRITE:
+ * Indicates that the mapped DMA buffer will be written by the
+ * client via the CPU map.
+ *
+ * DMA_BUF_SYNC_RW:
+ * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE.
+ */
__u64 flags;
};
@@ -37,6 +85,88 @@ struct dma_buf_sync {
#define DMA_BUF_NAME_LEN 32
+/**
+ * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf
+ *
+ * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the
+ * current set of fences on a dma-buf file descriptor as a sync_file. CPU
+ * waits via poll() or other driver-specific mechanisms typically wait on
+ * whatever fences are on the dma-buf at the time the wait begins. This
+ * is similar except that it takes a snapshot of the current fences on the
+ * dma-buf for waiting later instead of waiting immediately. This is
+ * useful for modern graphics APIs such as Vulkan which assume an explicit
+ * synchronization model but still need to inter-operate with dma-buf.
+ *
+ * The intended usage pattern is the following:
+ *
+ * 1. Export a sync_file with flags corresponding to the expected GPU usage
+ * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE.
+ *
+ * 2. Submit rendering work which uses the dma-buf. The work should wait on
+ * the exported sync file before rendering and produce another sync_file
+ * when complete.
+ *
+ * 3. Import the rendering-complete sync_file into the dma-buf with flags
+ * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE.
+ *
+ * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl,
+ * the above is not a single atomic operation. If userspace wants to ensure
+ * ordering via these fences, it is the respnosibility of userspace to use
+ * locks or other mechanisms to ensure that no other context adds fences or
+ * submits work between steps 1 and 3 above.
+ */
+struct dma_buf_export_sync_file {
+ /**
+ * @flags: Read/write flags
+ *
+ * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
+ *
+ * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
+ * the returned sync file waits on any writers of the dma-buf to
+ * complete. Waiting on the returned sync file is equivalent to
+ * poll() with POLLIN.
+ *
+ * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on
+ * any users of the dma-buf (read or write) to complete. Waiting
+ * on the returned sync file is equivalent to poll() with POLLOUT.
+ * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this
+ * is equivalent to just DMA_BUF_SYNC_WRITE.
+ */
+ __u32 flags;
+ /** @fd: Returned sync file descriptor */
+ __s32 fd;
+};
+
+/**
+ * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf
+ *
+ * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a
+ * sync_file into a dma-buf for the purposes of implicit synchronization
+ * with other dma-buf consumers. This allows clients using explicitly
+ * synchronized APIs such as Vulkan to inter-op with dma-buf consumers
+ * which expect implicit synchronization such as OpenGL or most media
+ * drivers/video.
+ */
+struct dma_buf_import_sync_file {
+ /**
+ * @flags: Read/write flags
+ *
+ * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
+ *
+ * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
+ * this inserts the sync_file as a read-only fence. Any subsequent
+ * implicitly synchronized writes to this dma-buf will wait on this
+ * fence but reads will not.
+ *
+ * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a
+ * write fence. All subsequent implicitly synchronized access to
+ * this dma-buf will wait on this fence.
+ */
+ __u32 flags;
+ /** @fd: Sync file descriptor */
+ __s32 fd;
+};
+
#define DMA_BUF_BASE 'b'
#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
@@ -44,7 +174,9 @@ struct dma_buf_sync {
* between them in actual uapi, they're just different numbers.
*/
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
-#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
-#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
+#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
+#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
+#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file)
+#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file)
#endif
diff --git a/include/uapi/linux/dn.h b/include/uapi/linux/dn.h
deleted file mode 100644
index 36ca71bd8bbe..000000000000
--- a/include/uapi/linux/dn.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _LINUX_DN_H
-#define _LINUX_DN_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-#include <linux/if_ether.h>
-
-/*
-
- DECnet Data Structures and Constants
-
-*/
-
-/*
- * DNPROTO_NSP can't be the same as SOL_SOCKET,
- * so increment each by one (compared to ULTRIX)
- */
-#define DNPROTO_NSP 2 /* NSP protocol number */
-#define DNPROTO_ROU 3 /* Routing protocol number */
-#define DNPROTO_NML 4 /* Net mgt protocol number */
-#define DNPROTO_EVL 5 /* Evl protocol number (usr) */
-#define DNPROTO_EVR 6 /* Evl protocol number (evl) */
-#define DNPROTO_NSPT 7 /* NSP trace protocol number */
-
-
-#define DN_ADDL 2
-#define DN_MAXADDL 2 /* ULTRIX headers have 20 here, but pathworks has 2 */
-#define DN_MAXOPTL 16
-#define DN_MAXOBJL 16
-#define DN_MAXACCL 40
-#define DN_MAXALIASL 128
-#define DN_MAXNODEL 256
-#define DNBUFSIZE 65023
-
-/*
- * SET/GET Socket options - must match the DSO_ numbers below
- */
-#define SO_CONDATA 1
-#define SO_CONACCESS 2
-#define SO_PROXYUSR 3
-#define SO_LINKINFO 7
-
-#define DSO_CONDATA 1 /* Set/Get connect data */
-#define DSO_DISDATA 10 /* Set/Get disconnect data */
-#define DSO_CONACCESS 2 /* Set/Get connect access data */
-#define DSO_ACCEPTMODE 4 /* Set/Get accept mode */
-#define DSO_CONACCEPT 5 /* Accept deferred connection */
-#define DSO_CONREJECT 6 /* Reject deferred connection */
-#define DSO_LINKINFO 7 /* Set/Get link information */
-#define DSO_STREAM 8 /* Set socket type to stream */
-#define DSO_SEQPACKET 9 /* Set socket type to sequenced packet */
-#define DSO_MAXWINDOW 11 /* Maximum window size allowed */
-#define DSO_NODELAY 12 /* Turn off nagle */
-#define DSO_CORK 13 /* Wait for more data! */
-#define DSO_SERVICES 14 /* NSP Services field */
-#define DSO_INFO 15 /* NSP Info field */
-#define DSO_MAX 15 /* Maximum option number */
-
-
-/* LINK States */
-#define LL_INACTIVE 0
-#define LL_CONNECTING 1
-#define LL_RUNNING 2
-#define LL_DISCONNECTING 3
-
-#define ACC_IMMED 0
-#define ACC_DEFER 1
-
-#define SDF_WILD 1 /* Wild card object */
-#define SDF_PROXY 2 /* Addr eligible for proxy */
-#define SDF_UICPROXY 4 /* Use uic-based proxy */
-
-/* Structures */
-
-
-struct dn_naddr {
- __le16 a_len;
- __u8 a_addr[DN_MAXADDL]; /* Two bytes little endian */
-};
-
-struct sockaddr_dn {
- __u16 sdn_family;
- __u8 sdn_flags;
- __u8 sdn_objnum;
- __le16 sdn_objnamel;
- __u8 sdn_objname[DN_MAXOBJL];
- struct dn_naddr sdn_add;
-};
-#define sdn_nodeaddrl sdn_add.a_len /* Node address length */
-#define sdn_nodeaddr sdn_add.a_addr /* Node address */
-
-
-
-/*
- * DECnet set/get DSO_CONDATA, DSO_DISDATA (optional data) structure
- */
-struct optdata_dn {
- __le16 opt_status; /* Extended status return */
-#define opt_sts opt_status
- __le16 opt_optl; /* Length of user data */
- __u8 opt_data[16]; /* User data */
-};
-
-struct accessdata_dn {
- __u8 acc_accl;
- __u8 acc_acc[DN_MAXACCL];
- __u8 acc_passl;
- __u8 acc_pass[DN_MAXACCL];
- __u8 acc_userl;
- __u8 acc_user[DN_MAXACCL];
-};
-
-/*
- * DECnet logical link information structure
- */
-struct linkinfo_dn {
- __u16 idn_segsize; /* Segment size for link */
- __u8 idn_linkstate; /* Logical link state */
-};
-
-/*
- * Ethernet address format (for DECnet)
- */
-union etheraddress {
- __u8 dne_addr[ETH_ALEN]; /* Full ethernet address */
- struct {
- __u8 dne_hiord[4]; /* DECnet HIORD prefix */
- __u8 dne_nodeaddr[2]; /* DECnet node address */
- } dne_remote;
-};
-
-
-/*
- * DECnet physical socket address format
- */
-struct dn_addr {
- __le16 dna_family; /* AF_DECnet */
- union etheraddress dna_netaddr; /* DECnet ethernet address */
-};
-
-#define DECNET_IOCTL_BASE 0x89 /* PROTOPRIVATE range */
-
-#define SIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, struct dn_naddr)
-#define SIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, struct dn_naddr)
-#define OSIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, int)
-#define OSIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, int)
-
-#endif /* _LINUX_DN_H */
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
new file mode 100644
index 000000000000..0c13d7f1a1bc
--- /dev/null
+++ b/include/uapi/linux/dpll.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/dpll.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_DPLL_H
+#define _UAPI_LINUX_DPLL_H
+
+#define DPLL_FAMILY_NAME "dpll"
+#define DPLL_FAMILY_VERSION 1
+
+/**
+ * enum dpll_mode - working modes a dpll can support, differentiates if and how
+ * dpll selects one of its inputs to syntonize with it, valid values for
+ * DPLL_A_MODE attribute
+ * @DPLL_MODE_MANUAL: input can be only selected by sending a request to dpll
+ * @DPLL_MODE_AUTOMATIC: highest prio input pin auto selected by dpll
+ */
+enum dpll_mode {
+ DPLL_MODE_MANUAL = 1,
+ DPLL_MODE_AUTOMATIC,
+
+ /* private: */
+ __DPLL_MODE_MAX,
+ DPLL_MODE_MAX = (__DPLL_MODE_MAX - 1)
+};
+
+/**
+ * enum dpll_lock_status - provides information of dpll device lock status,
+ * valid values for DPLL_A_LOCK_STATUS attribute
+ * @DPLL_LOCK_STATUS_UNLOCKED: dpll was not yet locked to any valid input (or
+ * forced by setting DPLL_A_MODE to DPLL_MODE_DETACHED)
+ * @DPLL_LOCK_STATUS_LOCKED: dpll is locked to a valid signal, but no holdover
+ * available
+ * @DPLL_LOCK_STATUS_LOCKED_HO_ACQ: dpll is locked and holdover acquired
+ * @DPLL_LOCK_STATUS_HOLDOVER: dpll is in holdover state - lost a valid lock or
+ * was forced by disconnecting all the pins (latter possible only when dpll
+ * lock-state was already DPLL_LOCK_STATUS_LOCKED_HO_ACQ, if dpll lock-state
+ * was not DPLL_LOCK_STATUS_LOCKED_HO_ACQ, the dpll's lock-state shall remain
+ * DPLL_LOCK_STATUS_UNLOCKED)
+ */
+enum dpll_lock_status {
+ DPLL_LOCK_STATUS_UNLOCKED = 1,
+ DPLL_LOCK_STATUS_LOCKED,
+ DPLL_LOCK_STATUS_LOCKED_HO_ACQ,
+ DPLL_LOCK_STATUS_HOLDOVER,
+
+ /* private: */
+ __DPLL_LOCK_STATUS_MAX,
+ DPLL_LOCK_STATUS_MAX = (__DPLL_LOCK_STATUS_MAX - 1)
+};
+
+/**
+ * enum dpll_lock_status_error - if previous status change was done due to a
+ * failure, this provides information of dpll device lock status error. Valid
+ * values for DPLL_A_LOCK_STATUS_ERROR attribute
+ * @DPLL_LOCK_STATUS_ERROR_NONE: dpll device lock status was changed without
+ * any error
+ * @DPLL_LOCK_STATUS_ERROR_UNDEFINED: dpll device lock status was changed due
+ * to undefined error. Driver fills this value up in case it is not able to
+ * obtain suitable exact error type.
+ * @DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN: dpll device lock status was changed
+ * because of associated media got down. This may happen for example if dpll
+ * device was previously locked on an input pin of type
+ * PIN_TYPE_SYNCE_ETH_PORT.
+ * @DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH: the FFO
+ * (Fractional Frequency Offset) between the RX and TX symbol rate on the
+ * media got too high. This may happen for example if dpll device was
+ * previously locked on an input pin of type PIN_TYPE_SYNCE_ETH_PORT.
+ */
+enum dpll_lock_status_error {
+ DPLL_LOCK_STATUS_ERROR_NONE = 1,
+ DPLL_LOCK_STATUS_ERROR_UNDEFINED,
+ DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN,
+ DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH,
+
+ /* private: */
+ __DPLL_LOCK_STATUS_ERROR_MAX,
+ DPLL_LOCK_STATUS_ERROR_MAX = (__DPLL_LOCK_STATUS_ERROR_MAX - 1)
+};
+
+#define DPLL_TEMP_DIVIDER 1000
+
+/**
+ * enum dpll_type - type of dpll, valid values for DPLL_A_TYPE attribute
+ * @DPLL_TYPE_PPS: dpll produces Pulse-Per-Second signal
+ * @DPLL_TYPE_EEC: dpll drives the Ethernet Equipment Clock
+ */
+enum dpll_type {
+ DPLL_TYPE_PPS = 1,
+ DPLL_TYPE_EEC,
+
+ /* private: */
+ __DPLL_TYPE_MAX,
+ DPLL_TYPE_MAX = (__DPLL_TYPE_MAX - 1)
+};
+
+/**
+ * enum dpll_pin_type - defines possible types of a pin, valid values for
+ * DPLL_A_PIN_TYPE attribute
+ * @DPLL_PIN_TYPE_MUX: aggregates another layer of selectable pins
+ * @DPLL_PIN_TYPE_EXT: external input
+ * @DPLL_PIN_TYPE_SYNCE_ETH_PORT: ethernet port PHY's recovered clock
+ * @DPLL_PIN_TYPE_INT_OSCILLATOR: device internal oscillator
+ * @DPLL_PIN_TYPE_GNSS: GNSS recovered clock
+ */
+enum dpll_pin_type {
+ DPLL_PIN_TYPE_MUX = 1,
+ DPLL_PIN_TYPE_EXT,
+ DPLL_PIN_TYPE_SYNCE_ETH_PORT,
+ DPLL_PIN_TYPE_INT_OSCILLATOR,
+ DPLL_PIN_TYPE_GNSS,
+
+ /* private: */
+ __DPLL_PIN_TYPE_MAX,
+ DPLL_PIN_TYPE_MAX = (__DPLL_PIN_TYPE_MAX - 1)
+};
+
+/**
+ * enum dpll_pin_direction - defines possible direction of a pin, valid values
+ * for DPLL_A_PIN_DIRECTION attribute
+ * @DPLL_PIN_DIRECTION_INPUT: pin used as a input of a signal
+ * @DPLL_PIN_DIRECTION_OUTPUT: pin used to output the signal
+ */
+enum dpll_pin_direction {
+ DPLL_PIN_DIRECTION_INPUT = 1,
+ DPLL_PIN_DIRECTION_OUTPUT,
+
+ /* private: */
+ __DPLL_PIN_DIRECTION_MAX,
+ DPLL_PIN_DIRECTION_MAX = (__DPLL_PIN_DIRECTION_MAX - 1)
+};
+
+#define DPLL_PIN_FREQUENCY_1_HZ 1
+#define DPLL_PIN_FREQUENCY_10_KHZ 10000
+#define DPLL_PIN_FREQUENCY_77_5_KHZ 77500
+#define DPLL_PIN_FREQUENCY_10_MHZ 10000000
+
+/**
+ * enum dpll_pin_state - defines possible states of a pin, valid values for
+ * DPLL_A_PIN_STATE attribute
+ * @DPLL_PIN_STATE_CONNECTED: pin connected, active input of phase locked loop
+ * @DPLL_PIN_STATE_DISCONNECTED: pin disconnected, not considered as a valid
+ * input
+ * @DPLL_PIN_STATE_SELECTABLE: pin enabled for automatic input selection
+ */
+enum dpll_pin_state {
+ DPLL_PIN_STATE_CONNECTED = 1,
+ DPLL_PIN_STATE_DISCONNECTED,
+ DPLL_PIN_STATE_SELECTABLE,
+
+ /* private: */
+ __DPLL_PIN_STATE_MAX,
+ DPLL_PIN_STATE_MAX = (__DPLL_PIN_STATE_MAX - 1)
+};
+
+/**
+ * enum dpll_pin_capabilities - defines possible capabilities of a pin, valid
+ * flags on DPLL_A_PIN_CAPABILITIES attribute
+ * @DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE: pin direction can be changed
+ * @DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE: pin priority can be changed
+ * @DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE: pin state can be changed
+ */
+enum dpll_pin_capabilities {
+ DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE = 1,
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE = 2,
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE = 4,
+};
+
+#define DPLL_PHASE_OFFSET_DIVIDER 1000
+
+enum dpll_a {
+ DPLL_A_ID = 1,
+ DPLL_A_MODULE_NAME,
+ DPLL_A_PAD,
+ DPLL_A_CLOCK_ID,
+ DPLL_A_MODE,
+ DPLL_A_MODE_SUPPORTED,
+ DPLL_A_LOCK_STATUS,
+ DPLL_A_TEMP,
+ DPLL_A_TYPE,
+ DPLL_A_LOCK_STATUS_ERROR,
+
+ __DPLL_A_MAX,
+ DPLL_A_MAX = (__DPLL_A_MAX - 1)
+};
+
+enum dpll_a_pin {
+ DPLL_A_PIN_ID = 1,
+ DPLL_A_PIN_PARENT_ID,
+ DPLL_A_PIN_MODULE_NAME,
+ DPLL_A_PIN_PAD,
+ DPLL_A_PIN_CLOCK_ID,
+ DPLL_A_PIN_BOARD_LABEL,
+ DPLL_A_PIN_PANEL_LABEL,
+ DPLL_A_PIN_PACKAGE_LABEL,
+ DPLL_A_PIN_TYPE,
+ DPLL_A_PIN_DIRECTION,
+ DPLL_A_PIN_FREQUENCY,
+ DPLL_A_PIN_FREQUENCY_SUPPORTED,
+ DPLL_A_PIN_FREQUENCY_MIN,
+ DPLL_A_PIN_FREQUENCY_MAX,
+ DPLL_A_PIN_PRIO,
+ DPLL_A_PIN_STATE,
+ DPLL_A_PIN_CAPABILITIES,
+ DPLL_A_PIN_PARENT_DEVICE,
+ DPLL_A_PIN_PARENT_PIN,
+ DPLL_A_PIN_PHASE_ADJUST_MIN,
+ DPLL_A_PIN_PHASE_ADJUST_MAX,
+ DPLL_A_PIN_PHASE_ADJUST,
+ DPLL_A_PIN_PHASE_OFFSET,
+ DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET,
+
+ __DPLL_A_PIN_MAX,
+ DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
+};
+
+enum dpll_cmd {
+ DPLL_CMD_DEVICE_ID_GET = 1,
+ DPLL_CMD_DEVICE_GET,
+ DPLL_CMD_DEVICE_SET,
+ DPLL_CMD_DEVICE_CREATE_NTF,
+ DPLL_CMD_DEVICE_DELETE_NTF,
+ DPLL_CMD_DEVICE_CHANGE_NTF,
+ DPLL_CMD_PIN_ID_GET,
+ DPLL_CMD_PIN_GET,
+ DPLL_CMD_PIN_SET,
+ DPLL_CMD_PIN_CREATE_NTF,
+ DPLL_CMD_PIN_DELETE_NTF,
+ DPLL_CMD_PIN_CHANGE_NTF,
+
+ __DPLL_CMD_MAX,
+ DPLL_CMD_MAX = (__DPLL_CMD_MAX - 1)
+};
+
+#define DPLL_MCGRP_MONITOR "monitor"
+
+#endif /* _UAPI_LINUX_DPLL_H */
diff --git a/include/uapi/linux/dqblk_xfs.h b/include/uapi/linux/dqblk_xfs.h
index 03d890b80ebc..8cda3e62e0e7 100644
--- a/include/uapi/linux/dqblk_xfs.h
+++ b/include/uapi/linux/dqblk_xfs.h
@@ -61,12 +61,16 @@ typedef struct fs_disk_quota {
__u64 d_ino_softlimit;/* preferred inode limit */
__u64 d_bcount; /* # disk blocks owned by the user */
__u64 d_icount; /* # inodes owned by the user */
- __s32 d_itimer; /* zero if within inode limits */
- /* if not, we refuse service */
+ __s32 d_itimer; /* Zero if within inode limits. If
+ * not, we refuse service at this time
+ * (in seconds since Unix epoch) */
__s32 d_btimer; /* similar to above; for disk blocks */
__u16 d_iwarns; /* # warnings issued wrt num inodes */
__u16 d_bwarns; /* # warnings issued wrt disk blocks */
- __s32 d_padding2; /* padding2 - for future use */
+ __s8 d_itimer_hi; /* upper 8 bits of timer values */
+ __s8 d_btimer_hi;
+ __s8 d_rtbtimer_hi;
+ __s8 d_padding2; /* padding2 - for future use */
__u64 d_rtb_hardlimit;/* absolute limit on realtime blks */
__u64 d_rtb_softlimit;/* preferred limit on RT disk blks */
__u64 d_rtbcount; /* # realtime blocks owned */
@@ -122,6 +126,12 @@ typedef struct fs_disk_quota {
#define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT)
/*
+ * Quota expiration timestamps are 40-bit signed integers, with the upper 8
+ * bits encoded in the _hi fields.
+ */
+#define FS_DQ_BIGTIME (1<<15)
+
+/*
* Various flags related to quotactl(2).
*/
#define FS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */
@@ -209,7 +219,10 @@ struct fs_quota_statv {
__s32 qs_rtbtimelimit;/* limit for rt blks timer */
__u16 qs_bwarnlimit; /* limit for num warnings */
__u16 qs_iwarnlimit; /* limit for num warnings */
- __u64 qs_pad2[8]; /* for future proofing */
+ __u16 qs_rtbwarnlimit;/* limit for rt blks warnings */
+ __u16 qs_pad3;
+ __u32 qs_pad4;
+ __u64 qs_pad2[7]; /* for future proofing */
};
#endif /* _LINUX_DQBLK_XFS_H */
diff --git a/include/uapi/linux/dvb/audio.h b/include/uapi/linux/dvb/audio.h
index 2f869da69171..77fb866890b4 100644
--- a/include/uapi/linux/dvb/audio.h
+++ b/include/uapi/linux/dvb/audio.h
@@ -7,21 +7,6 @@
* Copyright (C) 2000 Ralph Metzler <ralph@convergence.de>
* & Marcus Metzler <marcus@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Lesser Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DVBAUDIO_H_
diff --git a/include/uapi/linux/dvb/ca.h b/include/uapi/linux/dvb/ca.h
index dffa59e95ebb..4244b187cc4d 100644
--- a/include/uapi/linux/dvb/ca.h
+++ b/include/uapi/linux/dvb/ca.h
@@ -5,21 +5,6 @@
* Copyright (C) 2000 Ralph Metzler <ralph@convergence.de>
* & Marcus Metzler <marcus@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Lesser Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DVBCA_H_
diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h
index b4112f0b6dd3..7b16375f94e2 100644
--- a/include/uapi/linux/dvb/dmx.h
+++ b/include/uapi/linux/dvb/dmx.h
@@ -5,21 +5,6 @@
* Copyright (C) 2000 Marcus Metzler <marcus@convergence.de>
* & Ralph Metzler <ralph@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _UAPI_DVBDMX_H_
diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h
index 4f9b4551c534..7e0983b987c2 100644
--- a/include/uapi/linux/dvb/frontend.h
+++ b/include/uapi/linux/dvb/frontend.h
@@ -7,21 +7,6 @@
* Holger Waechtler <holger@convergence.de>
* Andre Draszik <ad@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DVBFRONTEND_H_
@@ -282,7 +267,6 @@ enum fe_spectral_inversion {
/**
* enum fe_code_rate - Type of Forward Error Correction (FEC)
*
- *
* @FEC_NONE: No Forward Error Correction Code
* @FEC_1_2: Forward Error Correction Code 1/2
* @FEC_2_3: Forward Error Correction Code 2/3
@@ -296,6 +280,26 @@ enum fe_spectral_inversion {
* @FEC_3_5: Forward Error Correction Code 3/5
* @FEC_9_10: Forward Error Correction Code 9/10
* @FEC_2_5: Forward Error Correction Code 2/5
+ * @FEC_1_3: Forward Error Correction Code 1/3
+ * @FEC_1_4: Forward Error Correction Code 1/4
+ * @FEC_5_9: Forward Error Correction Code 5/9
+ * @FEC_7_9: Forward Error Correction Code 7/9
+ * @FEC_8_15: Forward Error Correction Code 8/15
+ * @FEC_11_15: Forward Error Correction Code 11/15
+ * @FEC_13_18: Forward Error Correction Code 13/18
+ * @FEC_9_20: Forward Error Correction Code 9/20
+ * @FEC_11_20: Forward Error Correction Code 11/20
+ * @FEC_23_36: Forward Error Correction Code 23/36
+ * @FEC_25_36: Forward Error Correction Code 25/36
+ * @FEC_13_45: Forward Error Correction Code 13/45
+ * @FEC_26_45: Forward Error Correction Code 26/45
+ * @FEC_28_45: Forward Error Correction Code 28/45
+ * @FEC_32_45: Forward Error Correction Code 32/45
+ * @FEC_77_90: Forward Error Correction Code 77/90
+ * @FEC_11_45: Forward Error Correction Code 11/45
+ * @FEC_4_15: Forward Error Correction Code 4/15
+ * @FEC_14_45: Forward Error Correction Code 14/45
+ * @FEC_7_15: Forward Error Correction Code 7/15
*
* Please note that not all FEC types are supported by a given standard.
*/
@@ -313,6 +317,26 @@ enum fe_code_rate {
FEC_3_5,
FEC_9_10,
FEC_2_5,
+ FEC_1_3,
+ FEC_1_4,
+ FEC_5_9,
+ FEC_7_9,
+ FEC_8_15,
+ FEC_11_15,
+ FEC_13_18,
+ FEC_9_20,
+ FEC_11_20,
+ FEC_23_36,
+ FEC_25_36,
+ FEC_13_45,
+ FEC_26_45,
+ FEC_28_45,
+ FEC_32_45,
+ FEC_77_90,
+ FEC_11_45,
+ FEC_4_15,
+ FEC_14_45,
+ FEC_7_15,
};
/**
@@ -331,6 +355,13 @@ enum fe_code_rate {
* @APSK_32: 32-APSK modulation
* @DQPSK: DQPSK modulation
* @QAM_4_NR: 4-QAM-NR modulation
+ * @QAM_1024: 1024-QAM modulation
+ * @QAM_4096: 4096-QAM modulation
+ * @APSK_8_L: 8APSK-L modulation
+ * @APSK_16_L: 16APSK-L modulation
+ * @APSK_32_L: 32APSK-L modulation
+ * @APSK_64: 64APSK modulation
+ * @APSK_64_L: 64APSK-L modulation
*
* Please note that not all modulations are supported by a given standard.
*
@@ -350,6 +381,13 @@ enum fe_modulation {
APSK_32,
DQPSK,
QAM_4_NR,
+ QAM_1024,
+ QAM_4096,
+ APSK_8_L,
+ APSK_16_L,
+ APSK_32_L,
+ APSK_64,
+ APSK_64_L,
};
/**
@@ -404,6 +442,7 @@ enum fe_transmit_mode {
* @GUARD_INTERVAL_PN420: PN length 420 (1/4)
* @GUARD_INTERVAL_PN595: PN length 595 (1/6)
* @GUARD_INTERVAL_PN945: PN length 945 (1/9)
+ * @GUARD_INTERVAL_1_64: Guard interval 1/64
*
* Please note that not all guard intervals are supported by a given standard.
*/
@@ -419,6 +458,7 @@ enum fe_guard_interval {
GUARD_INTERVAL_PN420,
GUARD_INTERVAL_PN595,
GUARD_INTERVAL_PN945,
+ GUARD_INTERVAL_1_64,
};
/**
@@ -571,6 +611,9 @@ enum fe_pilot {
* @ROLLOFF_20: Roloff factor: α=20%
* @ROLLOFF_25: Roloff factor: α=25%
* @ROLLOFF_AUTO: Auto-detect the roloff factor.
+ * @ROLLOFF_15: Rolloff factor: α=15%
+ * @ROLLOFF_10: Rolloff factor: α=10%
+ * @ROLLOFF_5: Rolloff factor: α=5%
*
* .. note:
*
@@ -581,6 +624,9 @@ enum fe_rolloff {
ROLLOFF_20,
ROLLOFF_25,
ROLLOFF_AUTO,
+ ROLLOFF_15,
+ ROLLOFF_10,
+ ROLLOFF_5,
};
/**
@@ -594,6 +640,8 @@ enum fe_rolloff {
* Cable TV: DVB-C following ITU-T J.83 Annex B spec (ClearQAM)
* @SYS_DVBC_ANNEX_C:
* Cable TV: DVB-C following ITU-T J.83 Annex C spec
+ * @SYS_DVBC2:
+ * Cable TV: DVB-C2
* @SYS_ISDBC:
* Cable TV: ISDB-C (no drivers yet)
* @SYS_DVBT:
@@ -611,7 +659,7 @@ enum fe_rolloff {
* @SYS_DVBS:
* Satellite TV: DVB-S
* @SYS_DVBS2:
- * Satellite TV: DVB-S2
+ * Satellite TV: DVB-S2 and DVB-S2X
* @SYS_TURBO:
* Satellite TV: DVB-S Turbo
* @SYS_ISDBS:
@@ -645,6 +693,7 @@ enum fe_delivery_system {
SYS_DVBT2,
SYS_TURBO,
SYS_DVBC_ANNEX_C,
+ SYS_DVBC2,
};
/* backward compatibility definitions for delivery systems */
@@ -720,7 +769,7 @@ enum atscmh_rs_frame_mode {
};
/**
- * enum atscmh_rs_code_mode
+ * enum atscmh_rs_code_mode - ATSC-M/H Reed Solomon modes
* @ATSCMH_RSCODE_211_187: Reed Solomon code (211,187).
* @ATSCMH_RSCODE_223_187: Reed Solomon code (223,187).
* @ATSCMH_RSCODE_235_187: Reed Solomon code (235,187).
diff --git a/include/uapi/linux/dvb/net.h b/include/uapi/linux/dvb/net.h
index 0c550ef93f2c..7cbb47ac38ef 100644
--- a/include/uapi/linux/dvb/net.h
+++ b/include/uapi/linux/dvb/net.h
@@ -5,21 +5,6 @@
* Copyright (C) 2000 Marcus Metzler <marcus@convergence.de>
* & Ralph Metzler <ralph@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DVBNET_H_
diff --git a/include/uapi/linux/dvb/osd.h b/include/uapi/linux/dvb/osd.h
index 858997c74043..6003b108ba45 100644
--- a/include/uapi/linux/dvb/osd.h
+++ b/include/uapi/linux/dvb/osd.h
@@ -7,21 +7,6 @@
* Copyright (C) 2001 Ralph Metzler <ralph@convergence.de>
* & Marcus Metzler <marcus@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Lesser Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DVBOSD_H_
diff --git a/include/uapi/linux/dvb/version.h b/include/uapi/linux/dvb/version.h
index 2c5cffe6d2a0..20bc874de321 100644
--- a/include/uapi/linux/dvb/version.h
+++ b/include/uapi/linux/dvb/version.h
@@ -4,27 +4,12 @@
*
* Copyright (C) 2000 Holger Waechtler <holger@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DVBVERSION_H_
#define _DVBVERSION_H_
#define DVB_API_VERSION 5
-#define DVB_API_VERSION_MINOR 11
+#define DVB_API_VERSION_MINOR 12
#endif /*_DVBVERSION_H_*/
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index 179f1ec60af6..9910b73737e0 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -7,21 +7,6 @@
* Copyright (C) 2000 Marcus Metzler <marcus@convergence.de>
* & Ralph Metzler <ralph@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _UAPI_DVBVIDEO_H_
diff --git a/include/uapi/linux/dw100.h b/include/uapi/linux/dw100.h
new file mode 100644
index 000000000000..3356496edd6b
--- /dev/null
+++ b/include/uapi/linux/dw100.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/* Copyright 2022 NXP */
+
+#ifndef __UAPI_DW100_H__
+#define __UAPI_DW100_H__
+
+#include <linux/v4l2-controls.h>
+
+/*
+ * Check Documentation/userspace-api/media/drivers/dw100.rst for control details.
+ */
+#define V4L2_CID_DW100_DEWARPING_16x16_VERTEX_MAP (V4L2_CID_USER_DW100_BASE + 1)
+
+#endif
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index f47e853546fa..ef38c2bc5ab7 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -51,6 +51,7 @@
#define EM_RISCV 243 /* RISC-V */
#define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */
#define EM_CSKY 252 /* C-SKY */
+#define EM_LOONGARCH 258 /* LoongArch */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
/*
diff --git a/include/uapi/linux/elf-fdpic.h b/include/uapi/linux/elf-fdpic.h
index 4fcc6cfebe18..ec23f0871129 100644
--- a/include/uapi/linux/elf-fdpic.h
+++ b/include/uapi/linux/elf-fdpic.h
@@ -32,4 +32,19 @@ struct elf32_fdpic_loadmap {
#define ELF32_FDPIC_LOADMAP_VERSION 0x0000
+/* segment mappings for ELF FDPIC libraries/executables/interpreters */
+struct elf64_fdpic_loadseg {
+ Elf64_Addr addr; /* core address to which mapped */
+ Elf64_Addr p_vaddr; /* VMA recorded in file */
+ Elf64_Word p_memsz; /* allocation size recorded in file */
+};
+
+struct elf64_fdpic_loadmap {
+ Elf64_Half version; /* version of these structures, just in case... */
+ Elf64_Half nsegs; /* number of segments */
+ struct elf64_fdpic_loadseg segs[];
+};
+
+#define ELF64_FDPIC_LOADMAP_VERSION 0x0000
+
#endif /* _UAPI_LINUX_ELF_FDPIC_H */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 22220945a5fd..b54b313bcf07 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -35,10 +35,14 @@ typedef __s64 Elf64_Sxword;
#define PT_HIOS 0x6fffffff /* OS-specific */
#define PT_LOPROC 0x70000000
#define PT_HIPROC 0x7fffffff
-#define PT_GNU_EH_FRAME 0x6474e550
-#define PT_GNU_PROPERTY 0x6474e553
-
+#define PT_GNU_EH_FRAME (PT_LOOS + 0x474e550)
#define PT_GNU_STACK (PT_LOOS + 0x474e551)
+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
+#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
+
+
+/* ARM MTE memory tag segment type */
+#define PT_AARCH64_MEMTAG_MTE (PT_LOPROC + 0x2)
/*
* Extended Numbering
@@ -87,7 +91,7 @@ typedef __s64 Elf64_Sxword;
#define DT_INIT 12
#define DT_FINI 13
#define DT_SONAME 14
-#define DT_RPATH 15
+#define DT_RPATH 15
#define DT_SYMBOLIC 16
#define DT_REL 17
#define DT_RELSZ 18
@@ -130,15 +134,15 @@ typedef __s64 Elf64_Sxword;
#define STT_TLS 6
#define ELF_ST_BIND(x) ((x) >> 4)
-#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF_ST_TYPE(x) ((x) & 0xf)
#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
-typedef struct dynamic{
+typedef struct {
Elf32_Sword d_tag;
- union{
+ union {
Elf32_Sword d_val;
Elf32_Addr d_ptr;
} d_un;
@@ -169,7 +173,7 @@ typedef struct elf64_rel {
Elf64_Xword r_info; /* index and type of relocation */
} Elf64_Rel;
-typedef struct elf32_rela{
+typedef struct elf32_rela {
Elf32_Addr r_offset;
Elf32_Word r_info;
Elf32_Sword r_addend;
@@ -181,7 +185,7 @@ typedef struct elf64_rela {
Elf64_Sxword r_addend; /* Constant addend used to compute value */
} Elf64_Rela;
-typedef struct elf32_sym{
+typedef struct elf32_sym {
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
@@ -202,7 +206,7 @@ typedef struct elf64_sym {
#define EI_NIDENT 16
-typedef struct elf32_hdr{
+typedef struct elf32_hdr {
unsigned char e_ident[EI_NIDENT];
Elf32_Half e_type;
Elf32_Half e_machine;
@@ -242,7 +246,7 @@ typedef struct elf64_hdr {
#define PF_W 0x2
#define PF_X 0x1
-typedef struct elf32_phdr{
+typedef struct elf32_phdr {
Elf32_Word p_type;
Elf32_Off p_offset;
Elf32_Addr p_vaddr;
@@ -368,7 +372,8 @@ typedef struct elf64_shdr {
* Notes used in ET_CORE. Architectures export some of the arch register sets
* using the corresponding note types via the PTRACE_GETREGSET and
* PTRACE_SETREGSET requests.
- * The note name for all these is "LINUX".
+ * The note name for these types is "LINUX", except NT_PRFPREG that is named
+ * "CORE".
*/
#define NT_PRSTATUS 1
#define NT_PRFPREG 2
@@ -399,9 +404,13 @@ typedef struct elf64_shdr {
#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
#define NT_PPC_PKEY 0x110 /* Memory Protection Keys registers */
+#define NT_PPC_DEXCR 0x111 /* PowerPC DEXCR registers */
+#define NT_PPC_HASHKEYR 0x112 /* PowerPC HASHKEYR register */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
+/* Old binutils treats 0x203 as a CET state */
+#define NT_X86_SHSTK 0x204 /* x86 SHSTK state */
#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
#define NT_S390_TIMER 0x301 /* s390 timer register */
#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
@@ -416,6 +425,7 @@ typedef struct elf64_shdr {
#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
#define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */
#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
+#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */
#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
#define NT_ARM_TLS 0x401 /* ARM TLS register */
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
@@ -425,11 +435,26 @@ typedef struct elf64_shdr {
#define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */
#define NT_ARM_PACA_KEYS 0x407 /* ARM pointer authentication address keys */
#define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */
+#define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */
+#define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */
+#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */
+#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */
+#define NT_ARM_ZT 0x40d /* ARM SME ZT registers */
+#define NT_ARM_FPMR 0x40e /* ARM floating point mode register */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
+#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
+#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
+#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
+#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
+#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
+#define NT_LOONGARCH_LASX 0xa03 /* LoongArch Loongson Advanced SIMD Extension registers */
+#define NT_LOONGARCH_LBT 0xa04 /* LoongArch Loongson Binary Translation registers */
+#define NT_LOONGARCH_HW_BREAK 0xa05 /* LoongArch hardware breakpoint registers */
+#define NT_LOONGARCH_HW_WATCH 0xa06 /* LoongArch hardware watchpoint registers */
/* Note types with note name "GNU" */
#define NT_GNU_PROPERTY_TYPE_0 5
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b4f2d134e713..11fc18988bc2 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -14,7 +14,7 @@
#ifndef _UAPI_LINUX_ETHTOOL_H
#define _UAPI_LINUX_ETHTOOL_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/if_ether.h>
@@ -26,6 +26,14 @@
* have the same layout for 32-bit and 64-bit userland.
*/
+/* Note on reserved space.
+ * Reserved fields must not be accessed directly by user space because
+ * they may be replaced by a different field in the future. They must
+ * be initialized to zero before making the request, e.g. via memset
+ * of the entire structure or implicitly by not being set in a structure
+ * initializer.
+ */
+
/**
* struct ethtool_cmd - DEPRECATED, link control and status
* This structure is DEPRECATED, please use struct ethtool_link_settings.
@@ -67,6 +75,7 @@
* and other link features that the link partner advertised
* through autonegotiation; 0 if unknown or not applicable.
* Read-only.
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* The link speed in Mbps is split between @speed and @speed_hi. Use
* the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
@@ -150,11 +159,14 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
* in its bus driver structure (e.g. pci_driver::name). Must
* not be an empty string.
* @version: Driver version string; may be an empty string
- * @fw_version: Firmware version string; may be an empty string
- * @erom_version: Expansion ROM version string; may be an empty string
+ * @fw_version: Firmware version string; driver defined; may be an
+ * empty string
+ * @erom_version: Expansion ROM version string; driver defined; may be
+ * an empty string
* @bus_info: Device bus address. This should match the dev_name()
* string for the underlying bus device, if there is one. May be
* an empty string.
+ * @reserved2: Reserved for future use; see the note on reserved space.
* @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
* %ETHTOOL_SPFLAGS commands; also the number of strings in the
* %ETH_SS_PRIV_FLAGS set
@@ -169,10 +181,6 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
*
* Users can use the %ETHTOOL_GSSET_INFO command to get the number of
* strings in any string set (from Linux 2.6.34).
- *
- * Drivers should set at most @driver, @version, @fw_version and
- * @bus_info in their get_drvinfo() implementation. The ethtool
- * core fills in the other fields using other driver operations.
*/
struct ethtool_drvinfo {
__u32 cmd;
@@ -221,9 +229,10 @@ enum tunable_id {
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
+ ETHTOOL_TX_COPYBREAK_BUF_SIZE,
/*
* Add your fresh new tunable attribute above and remember to update
- * tunable_strings[] in net/core/ethtool.c
+ * tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_TUNABLE_COUNT,
};
@@ -246,7 +255,7 @@ struct ethtool_tunable {
__u32 id;
__u32 type_id;
__u32 len;
- void *data[0];
+ void *data[];
};
#define DOWNSHIFT_DEV_DEFAULT_COUNT 0xff
@@ -287,7 +296,7 @@ enum phy_tunable_id {
ETHTOOL_PHY_EDPD,
/*
* Add your fresh new phy tunable attribute above and remember to update
- * phy_tunable_strings[] in net/core/ethtool.c
+ * phy_tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_PHY_TUNABLE_COUNT,
};
@@ -311,7 +320,7 @@ struct ethtool_regs {
__u32 cmd;
__u32 version;
__u32 len;
- __u8 data[0];
+ __u8 data[];
};
/**
@@ -337,7 +346,7 @@ struct ethtool_eeprom {
__u32 magic;
__u32 offset;
__u32 len;
- __u8 data[0];
+ __u8 data[];
};
/**
@@ -356,6 +365,7 @@ struct ethtool_eeprom {
* @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
* its tx lpi (after reaching 'idle' state). Effective only when eee
* was negotiated and tx_lpi_enabled was set.
+ * @reserved: Reserved for future use; see the note on reserved space.
*/
struct ethtool_eee {
__u32 cmd;
@@ -374,6 +384,7 @@ struct ethtool_eee {
* @cmd: %ETHTOOL_GMODULEINFO
* @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
* @eeprom_len: Length of the eeprom
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* This structure is used to return the information to
* properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
@@ -579,9 +590,7 @@ struct ethtool_pauseparam {
__u32 tx_pause;
};
-/**
- * enum ethtool_link_ext_state - link extended state
- */
+/* Link extended state */
enum ethtool_link_ext_state {
ETHTOOL_LINK_EXT_STATE_AUTONEG,
ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
@@ -593,12 +602,10 @@ enum ethtool_link_ext_state {
ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE,
ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
ETHTOOL_LINK_EXT_STATE_OVERHEAT,
+ ETHTOOL_LINK_EXT_STATE_MODULE,
};
-/**
- * enum ethtool_link_ext_substate_autoneg - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_AUTONEG.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
enum ethtool_link_ext_substate_autoneg {
ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
@@ -608,9 +615,7 @@ enum ethtool_link_ext_substate_autoneg {
ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
};
-/**
- * enum ethtool_link_ext_substate_link_training - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
*/
enum ethtool_link_ext_substate_link_training {
ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
@@ -619,9 +624,7 @@ enum ethtool_link_ext_substate_link_training {
ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
};
-/**
- * enum ethtool_link_ext_substate_logical_mismatch - more information in addition
- * to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
*/
enum ethtool_link_ext_substate_link_logical_mismatch {
ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
@@ -631,24 +634,26 @@ enum ethtool_link_ext_substate_link_logical_mismatch {
ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
};
-/**
- * enum ethtool_link_ext_substate_bad_signal_integrity - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
*/
enum ethtool_link_ext_substate_bad_signal_integrity {
ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS,
};
-/**
- * enum ethtool_link_ext_substate_cable_issue - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */
enum ethtool_link_ext_substate_cable_issue {
ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
};
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_MODULE. */
+enum ethtool_link_ext_substate_module {
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1,
+};
+
#define ETH_GSTRING_LEN 32
/**
@@ -661,6 +666,7 @@ enum ethtool_link_ext_substate_cable_issue {
* now deprecated
* @ETH_SS_FEATURES: Device feature names
* @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
+ * @ETH_SS_TUNABLES: tunable names
* @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
* @ETH_SS_PHY_TUNABLES: PHY tunable names
* @ETH_SS_LINK_MODES: link mode names
@@ -670,6 +676,13 @@ enum ethtool_link_ext_substate_cable_issue {
* @ETH_SS_TS_TX_TYPES: timestamping Tx types
* @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
* @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
+ * @ETH_SS_STATS_STD: standardized stats
+ * @ETH_SS_STATS_ETH_PHY: names of IEEE 802.3 PHY statistics
+ * @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
+ * @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
+ * @ETH_SS_STATS_RMON: names of RMON statistics
+ *
+ * @ETH_SS_COUNT: number of defined string sets
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -688,12 +701,128 @@ enum ethtool_stringset {
ETH_SS_TS_TX_TYPES,
ETH_SS_TS_RX_FILTERS,
ETH_SS_UDP_TUNNEL_TYPES,
+ ETH_SS_STATS_STD,
+ ETH_SS_STATS_ETH_PHY,
+ ETH_SS_STATS_ETH_MAC,
+ ETH_SS_STATS_ETH_CTRL,
+ ETH_SS_STATS_RMON,
/* add new constants above here */
ETH_SS_COUNT
};
/**
+ * enum ethtool_mac_stats_src - source of ethtool MAC statistics
+ * @ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ * if device supports a MAC merge layer, this retrieves the aggregate
+ * statistics of the eMAC and pMAC. Otherwise, it retrieves just the
+ * statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_EMAC:
+ * if device supports a MM layer, this retrieves the eMAC statistics.
+ * Otherwise, it retrieves the statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_PMAC:
+ * if device supports a MM layer, this retrieves the pMAC statistics.
+ */
+enum ethtool_mac_stats_src {
+ ETHTOOL_MAC_STATS_SRC_AGGREGATE,
+ ETHTOOL_MAC_STATS_SRC_EMAC,
+ ETHTOOL_MAC_STATS_SRC_PMAC,
+};
+
+/**
+ * enum ethtool_module_power_mode_policy - plug-in module power mode policy
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host
+ * to high power mode when the first port using it is put administratively
+ * up and to low power mode when the last port using it is put
+ * administratively down.
+ */
+enum ethtool_module_power_mode_policy {
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1,
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO,
+};
+
+/**
+ * enum ethtool_module_power_mode - plug-in module power mode
+ * @ETHTOOL_MODULE_POWER_MODE_LOW: Module is in low power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_HIGH: Module is in high power mode.
+ */
+enum ethtool_module_power_mode {
+ ETHTOOL_MODULE_POWER_MODE_LOW = 1,
+ ETHTOOL_MODULE_POWER_MODE_HIGH,
+};
+
+/**
+ * enum ethtool_podl_pse_admin_state - operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN: state of PoDL PSE functions are
+ * unknown
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: PoDL PSE functions are disabled
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: PoDL PSE functions are enabled
+ */
+enum ethtool_podl_pse_admin_state {
+ ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED,
+};
+
+/**
+ * enum ethtool_podl_pse_pw_d_status - power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN: PoDL PSE
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED: "The enumeration “disabled” is
+ * asserted true when the PoDL PSE state diagram variable mr_pse_enable is
+ * false"
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING: "The enumeration “searching” is
+ * asserted true when either of the PSE state diagram variables
+ * pi_detecting or pi_classifying is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING: "The enumeration “deliveringPower”
+ * is asserted true when the PoDL PSE state diagram variable pi_powered is
+ * true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP: "The enumeration “sleep” is asserted
+ * true when the PoDL PSE state diagram variable pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE: "The enumeration “idle” is asserted true
+ * when the logical combination of the PoDL PSE state diagram variables
+ * pi_prebiased*!pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR: "The enumeration “error” is asserted
+ * true when the PoDL PSE state diagram variable overload_held is true."
+ */
+enum ethtool_podl_pse_pw_d_status {
+ ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR,
+};
+
+/**
+ * enum ethtool_mm_verify_status - status of MAC Merge Verify function
+ * @ETHTOOL_MM_VERIFY_STATUS_UNKNOWN:
+ * verification status is unknown
+ * @ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ * the 802.3 Verify State diagram is in the state INIT_VERIFICATION
+ * @ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ * the Verify State diagram is in the state VERIFICATION_IDLE,
+ * SEND_VERIFY or WAIT_FOR_RESPONSE
+ * @ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ * indicates that the Verify State diagram is in the state VERIFIED
+ * @ETHTOOL_MM_VERIFY_STATUS_FAILED:
+ * the Verify State diagram is in the state VERIFY_FAIL
+ * @ETHTOOL_MM_VERIFY_STATUS_DISABLED:
+ * verification of preemption operation is disabled
+ */
+enum ethtool_mm_verify_status {
+ ETHTOOL_MM_VERIFY_STATUS_UNKNOWN,
+ ETHTOOL_MM_VERIFY_STATUS_INITIAL,
+ ETHTOOL_MM_VERIFY_STATUS_VERIFYING,
+ ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED,
+ ETHTOOL_MM_VERIFY_STATUS_FAILED,
+ ETHTOOL_MM_VERIFY_STATUS_DISABLED,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
@@ -709,12 +838,13 @@ struct ethtool_gstrings {
__u32 cmd;
__u32 string_set;
__u32 len;
- __u8 data[0];
+ __u8 data[];
};
/**
* struct ethtool_sset_info - string set information
* @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @reserved: Reserved for future use; see the note on reserved space.
* @sset_mask: On entry, a bitmask of string sets to query, with bits
* numbered according to &enum ethtool_stringset. On return, a
* bitmask of those string sets queried that are supported.
@@ -733,7 +863,7 @@ struct ethtool_sset_info {
__u32 cmd;
__u32 reserved;
__u64 sset_mask;
- __u32 data[0];
+ __u32 data[];
};
/**
@@ -759,6 +889,7 @@ enum ethtool_test_flags {
* @flags: A bitmask of flags from &enum ethtool_test_flags. Some
* flags may be set by the user on entry; others may be set by
* the driver on return.
+ * @reserved: Reserved for future use; see the note on reserved space.
* @len: On return, the number of test results
* @data: Array of test results
*
@@ -772,7 +903,7 @@ struct ethtool_test {
__u32 flags;
__u32 reserved;
__u32 len;
- __u64 data[0];
+ __u64 data[];
};
/**
@@ -789,7 +920,7 @@ struct ethtool_test {
struct ethtool_stats {
__u32 cmd;
__u32 n_stats;
- __u64 data[0];
+ __u64 data[];
};
/**
@@ -806,7 +937,7 @@ struct ethtool_stats {
struct ethtool_perm_addr {
__u32 cmd;
__u32 size;
- __u8 data[0];
+ __u8 data[];
};
/* boolean flags controlling per-interface behavior characteristics.
@@ -959,6 +1090,7 @@ union ethtool_flow_union {
* @vlan_etype: VLAN EtherType
* @vlan_tci: VLAN tag control information
* @data: user defined data
+ * @padding: Reserved for future use; see the note on reserved space.
*
* Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT
* is set in &struct ethtool_rx_flow_spec @flow_type.
@@ -1094,7 +1226,7 @@ struct ethtool_rxnfc {
__u32 rule_cnt;
__u32 rss_context;
};
- __u32 rule_locs[0];
+ __u32 rule_locs[];
};
@@ -1114,7 +1246,7 @@ struct ethtool_rxnfc {
struct ethtool_rxfh_indir {
__u32 cmd;
__u32 size;
- __u32 ring_index[0];
+ __u32 ring_index[];
};
/**
@@ -1134,7 +1266,10 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
- * @rsvd: Reserved for future extensions.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ * @rsvd8: Reserved for future use; see the note on reserved space.
+ * @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
* of @indir_size __u32 elements, followed by hash key of @key_size
* bytes.
@@ -1152,9 +1287,10 @@ struct ethtool_rxfh {
__u32 indir_size;
__u32 key_size;
__u8 hfunc;
- __u8 rsvd8[3];
+ __u8 input_xfrm;
+ __u8 rsvd8[2];
__u32 rsvd32;
- __u32 rss_config[0];
+ __u32 rss_config[];
};
#define ETH_RXFH_CONTEXT_ALLOC 0xffffffff
#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff
@@ -1239,7 +1375,7 @@ struct ethtool_dump {
__u32 version;
__u32 flag;
__u32 len;
- __u8 data[0];
+ __u8 data[];
};
#define ETH_FW_DUMP_DISABLE 0
@@ -1271,7 +1407,7 @@ struct ethtool_get_features_block {
struct ethtool_gfeatures {
__u32 cmd;
__u32 size;
- struct ethtool_get_features_block features[0];
+ struct ethtool_get_features_block features[];
};
/**
@@ -1293,7 +1429,7 @@ struct ethtool_set_features_block {
struct ethtool_sfeatures {
__u32 cmd;
__u32 size;
- struct ethtool_set_features_block features[0];
+ struct ethtool_set_features_block features[];
};
/**
@@ -1302,7 +1438,9 @@ struct ethtool_sfeatures {
* @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
* @phc_index: device index of the associated PHC, or -1 if there is none
* @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @tx_reserved: Reserved for future use; see the note on reserved space.
* @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ * @rx_reserved: Reserved for future use; see the note on reserved space.
*
* The bits in the 'tx_types' and 'rx_filters' fields correspond to
* the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
@@ -1376,15 +1514,33 @@ struct ethtool_per_queue_op {
};
/**
- * struct ethtool_fecparam - Ethernet forward error correction(fec) parameters
+ * struct ethtool_fecparam - Ethernet Forward Error Correction parameters
* @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM
- * @active_fec: FEC mode which is active on porte
- * @fec: Bitmask of supported/configured FEC modes
- * @rsvd: Reserved for future extensions. i.e FEC bypass feature.
+ * @active_fec: FEC mode which is active on the port, single bit set, GET only.
+ * @fec: Bitmask of configured FEC modes.
+ * @reserved: Reserved for future extensions, ignore on GET, write 0 for SET.
*
- * Drivers should reject a non-zero setting of @autoneg when
- * autoneogotiation is disabled (or not supported) for the link.
+ * Note that @reserved was never validated on input and ethtool user space
+ * left it uninitialized when calling SET. Hence going forward it can only be
+ * used to return a value to userspace with GET.
+ *
+ * FEC modes supported by the device can be read via %ETHTOOL_GLINKSETTINGS.
+ * FEC settings are configured by link autonegotiation whenever it's enabled.
+ * With autoneg on %ETHTOOL_GFECPARAM can be used to read the current mode.
*
+ * When autoneg is disabled %ETHTOOL_SFECPARAM controls the FEC settings.
+ * It is recommended that drivers only accept a single bit set in @fec.
+ * When multiple bits are set in @fec drivers may pick mode in an implementation
+ * dependent way. Drivers should reject mixing %ETHTOOL_FEC_AUTO_BIT with other
+ * FEC modes, because it's unclear whether in this case other modes constrain
+ * AUTO or are independent choices.
+ * Drivers must reject SET requests if they support none of the requested modes.
+ *
+ * If device does not support FEC drivers may use %ETHTOOL_FEC_NONE instead
+ * of returning %EOPNOTSUPP from %ETHTOOL_GFECPARAM.
+ *
+ * See enum ethtool_fec_config_bits for definition of valid bits for both
+ * @fec and @active_fec.
*/
struct ethtool_fecparam {
__u32 cmd;
@@ -1396,11 +1552,16 @@ struct ethtool_fecparam {
/**
* enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration
- * @ETHTOOL_FEC_NONE: FEC mode configuration is not supported
- * @ETHTOOL_FEC_AUTO: Default/Best FEC mode provided by driver
- * @ETHTOOL_FEC_OFF: No FEC Mode
- * @ETHTOOL_FEC_RS: Reed-Solomon Forward Error Detection mode
- * @ETHTOOL_FEC_BASER: Base-R/Reed-Solomon Forward Error Detection mode
+ * @ETHTOOL_FEC_NONE_BIT: FEC mode configuration is not supported. Should not
+ * be used together with other bits. GET only.
+ * @ETHTOOL_FEC_AUTO_BIT: Select default/best FEC mode automatically, usually
+ * based link mode and SFP parameters read from module's
+ * EEPROM. This bit does _not_ mean autonegotiation.
+ * @ETHTOOL_FEC_OFF_BIT: No FEC Mode
+ * @ETHTOOL_FEC_RS_BIT: Reed-Solomon FEC Mode
+ * @ETHTOOL_FEC_BASER_BIT: Base-R/Reed-Solomon FEC Mode
+ * @ETHTOOL_FEC_LLRS_BIT: Low Latency Reed Solomon FEC Mode (25G/50G Ethernet
+ * Consortium)
*/
enum ethtool_fec_config_bits {
ETHTOOL_FEC_NONE_BIT,
@@ -1617,6 +1778,19 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87,
ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88,
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89,
+ ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90,
+ ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91,
+ ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92,
+ ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93,
+ ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94,
+ ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95,
+ ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96,
+ ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97,
+ ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98,
+ ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99,
+ ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100,
+ ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101,
+
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
};
@@ -1728,6 +1902,7 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_100000 100000
#define SPEED_200000 200000
#define SPEED_400000 400000
+#define SPEED_800000 800000
#define SPEED_UNKNOWN -1
@@ -1765,6 +1940,20 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define MASTER_SLAVE_STATE_SLAVE 3
#define MASTER_SLAVE_STATE_ERR 4
+/* These are used to throttle the rate of data on the phy interface when the
+ * native speed of the interface is higher than the link speed. These should
+ * not be used for phy interfaces which natively support multiple speeds (e.g.
+ * MII or SGMII).
+ */
+/* No rate matching performed. */
+#define RATE_MATCH_NONE 0
+/* The phy sends pause frames to throttle the MAC. */
+#define RATE_MATCH_PAUSE 1
+/* The phy asserts CRS to prevent the MAC from transmitting. */
+#define RATE_MATCH_CRS 2
+/* The MAC is programmed with a sufficiently-large IPG. */
+#define RATE_MATCH_OPEN_LOOP 3
+
/* Which connector port. */
#define PORT_TP 0x00
#define PORT_AUI 0x01
@@ -1806,6 +1995,15 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define WOL_MODE_COUNT 8
+/* RSS hash function data
+ * XOR the corresponding source and destination fields of each specified
+ * protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH
+ * calculation. Note that this XORing reduces the input set entropy and could
+ * be exploited to reduce the RSS queue spread.
+ */
+#define RXH_XFRM_SYM_XOR (1 << 0)
+#define RXH_XFRM_NO_CHANGE 0xff
+
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
@@ -1825,6 +2023,53 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define IPV4_FLOW 0x10 /* hash only */
#define IPV6_FLOW 0x11 /* hash only */
#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
+
+/* Used for GTP-U IPv4 and IPv6.
+ * The format of GTP packets only includes
+ * elements such as TEID and GTP version.
+ * It is primarily intended for data communication of the UE.
+ */
+#define GTPU_V4_FLOW 0x13 /* hash only */
+#define GTPU_V6_FLOW 0x14 /* hash only */
+
+/* Use for GTP-C IPv4 and v6.
+ * The format of these GTP packets does not include TEID.
+ * Primarily expected to be used for communication
+ * to create sessions for UE data communication,
+ * commonly referred to as CSR (Create Session Request).
+ */
+#define GTPC_V4_FLOW 0x15 /* hash only */
+#define GTPC_V6_FLOW 0x16 /* hash only */
+
+/* Use for GTP-C IPv4 and v6.
+ * Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID.
+ * After session creation, it becomes this packet.
+ * This is mainly used for requests to realize UE handover.
+ */
+#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
+#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
+
+/* Use for GTP-U and extended headers for the PSC (PDU Session Container).
+ * The format of these GTP packets includes TEID and QFI.
+ * In 5G communication using UPF (User Plane Function),
+ * data communication with this extended header is performed.
+ */
+#define GTPU_EH_V4_FLOW 0x19 /* hash only */
+#define GTPU_EH_V6_FLOW 0x1a /* hash only */
+
+/* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers.
+ * This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by
+ * UL/DL included in the PSC.
+ * There are differences in the data included based on Downlink/Uplink,
+ * and can be used to distinguish packets.
+ * The functions described so far are useful when you want to
+ * handle communication from the mobile network in UPF, PGW, etc.
+ */
+#define GTPU_UL_V4_FLOW 0x1b /* hash only */
+#define GTPU_UL_V6_FLOW 0x1c /* hash only */
+#define GTPU_DL_V4_FLOW 0x1d /* hash only */
+#define GTPU_DL_V6_FLOW 0x1e /* hash only */
+
/* Flag to enable additional fields in struct ethtool_rx_flow_spec */
#define FLOW_EXT 0x80000000
#define FLOW_MAC_EXT 0x40000000
@@ -1839,6 +2084,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define RXH_IP_DST (1 << 5)
#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
+#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
@@ -1942,20 +2188,13 @@ enum ethtool_reset_flags {
* refused. For drivers: ignore this field (use kernel's
* __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will
* be overwritten by kernel.
- * @supported: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features for which the interface
- * supports autonegotiation or auto-detection. Read-only.
- * @advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features that are advertised through
- * autonegotiation or enabled for auto-detection.
- * @lp_advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, and other
- * link features that the link partner advertised through
- * autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
+ * @master_slave_cfg: Master/slave port mode.
+ * @master_slave_state: Master/slave port state.
+ * @rate_matching: Rate adaptation performed by the PHY
+ * @reserved: Reserved for future use; see the note on reserved space.
+ * @link_mode_masks: Variable length bitmaps.
*
* If autonegotiation is disabled, the speed and @duplex represent the
* fixed link mode and are writable if the driver supports multiple
@@ -1990,6 +2229,21 @@ enum ethtool_reset_flags {
* %set_link_ksettings() should validate all fields other than @cmd
* and @link_mode_masks_nwords that are not described as read-only or
* deprecated, and must ignore all fields described as read-only.
+ *
+ * @link_mode_masks is divided into three bitfields, each of length
+ * @link_mode_masks_nwords:
+ * - supported: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features for which the interface
+ * supports autonegotiation or auto-detection. Read-only.
+ * - advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features that are advertised through
+ * autonegotiation or enabled for auto-detection.
+ * - lp_advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, and other
+ * link features that the link partner advertised through
+ * autonegotiation; 0 if unknown or not applicable. Read-only.
*/
struct ethtool_link_settings {
__u32 cmd;
@@ -2005,9 +2259,9 @@ struct ethtool_link_settings {
__u8 transceiver;
__u8 master_slave_cfg;
__u8 master_slave_state;
- __u8 reserved1[1];
+ __u8 rate_matching;
__u32 reserved[7];
- __u32 link_mode_masks[0];
+ __u32 link_mode_masks[];
/* layout of link_mode_masks fields:
* __u32 map_supported[link_mode_masks_nwords];
* __u32 map_advertising[link_mode_masks_nwords];
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 5dcd24cb33ea..3f89074aa06c 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -42,6 +42,21 @@ enum {
ETHTOOL_MSG_CABLE_TEST_ACT,
ETHTOOL_MSG_CABLE_TEST_TDR_ACT,
ETHTOOL_MSG_TUNNEL_INFO_GET,
+ ETHTOOL_MSG_FEC_GET,
+ ETHTOOL_MSG_FEC_SET,
+ ETHTOOL_MSG_MODULE_EEPROM_GET,
+ ETHTOOL_MSG_STATS_GET,
+ ETHTOOL_MSG_PHC_VCLOCKS_GET,
+ ETHTOOL_MSG_MODULE_GET,
+ ETHTOOL_MSG_MODULE_SET,
+ ETHTOOL_MSG_PSE_GET,
+ ETHTOOL_MSG_PSE_SET,
+ ETHTOOL_MSG_RSS_GET,
+ ETHTOOL_MSG_PLCA_GET_CFG,
+ ETHTOOL_MSG_PLCA_SET_CFG,
+ ETHTOOL_MSG_PLCA_GET_STATUS,
+ ETHTOOL_MSG_MM_GET,
+ ETHTOOL_MSG_MM_SET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -79,6 +94,21 @@ enum {
ETHTOOL_MSG_TSINFO_GET_REPLY,
ETHTOOL_MSG_CABLE_TEST_NTF,
ETHTOOL_MSG_CABLE_TEST_TDR_NTF,
+ ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
+ ETHTOOL_MSG_FEC_GET_REPLY,
+ ETHTOOL_MSG_FEC_NTF,
+ ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY,
+ ETHTOOL_MSG_STATS_GET_REPLY,
+ ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
+ ETHTOOL_MSG_MODULE_GET_REPLY,
+ ETHTOOL_MSG_MODULE_NTF,
+ ETHTOOL_MSG_PSE_GET_REPLY,
+ ETHTOOL_MSG_RSS_GET_REPLY,
+ ETHTOOL_MSG_PLCA_GET_CFG_REPLY,
+ ETHTOOL_MSG_PLCA_GET_STATUS_REPLY,
+ ETHTOOL_MSG_PLCA_NTF,
+ ETHTOOL_MSG_MM_GET_REPLY,
+ ETHTOOL_MSG_MM_NTF,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -91,9 +121,12 @@ enum {
#define ETHTOOL_FLAG_COMPACT_BITSETS (1 << 0)
/* provide optional reply for SET or ACT requests */
#define ETHTOOL_FLAG_OMIT_REPLY (1 << 1)
+/* request statistics, if supported by the driver */
+#define ETHTOOL_FLAG_STATS (1 << 2)
#define ETHTOOL_FLAG_ALL (ETHTOOL_FLAG_COMPACT_BITSETS | \
- ETHTOOL_FLAG_OMIT_REPLY)
+ ETHTOOL_FLAG_OMIT_REPLY | \
+ ETHTOOL_FLAG_STATS)
enum {
ETHTOOL_A_HEADER_UNSPEC,
@@ -223,6 +256,8 @@ enum {
ETHTOOL_A_LINKMODES_DUPLEX, /* u8 */
ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, /* u8 */
ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, /* u8 */
+ ETHTOOL_A_LINKMODES_LANES, /* u32 */
+ ETHTOOL_A_LINKMODES_RATE_MATCHING, /* u8 */
/* add new constants above here */
__ETHTOOL_A_LINKMODES_CNT,
@@ -239,6 +274,7 @@ enum {
ETHTOOL_A_LINKSTATE_SQI_MAX, /* u32 */
ETHTOOL_A_LINKSTATE_EXT_STATE, /* u8 */
ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, /* u8 */
+ ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT, /* u32 */
/* add new constants above here */
__ETHTOOL_A_LINKSTATE_CNT,
@@ -300,6 +336,12 @@ enum {
/* RINGS */
enum {
+ ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0,
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED,
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED,
+};
+
+enum {
ETHTOOL_A_RINGS_UNSPEC,
ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */
ETHTOOL_A_RINGS_RX_MAX, /* u32 */
@@ -310,6 +352,13 @@ enum {
ETHTOOL_A_RINGS_RX_MINI, /* u32 */
ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */
ETHTOOL_A_RINGS_TX, /* u32 */
+ ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */
+ ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
+ ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
+ ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
+ ETHTOOL_A_RINGS_RX_PUSH, /* u8 */
+ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, /* u32 */
+ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, /* u32 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,
@@ -362,6 +411,11 @@ enum {
ETHTOOL_A_COALESCE_TX_USECS_HIGH, /* u32 */
ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, /* u32 */
ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, /* u32 */
+ ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, /* u8 */
+ ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, /* u8 */
+ ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, /* u32 */
+ ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, /* u32 */
+ ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, /* u32 */
/* add new constants above here */
__ETHTOOL_A_COALESCE_CNT,
@@ -376,12 +430,28 @@ enum {
ETHTOOL_A_PAUSE_AUTONEG, /* u8 */
ETHTOOL_A_PAUSE_RX, /* u8 */
ETHTOOL_A_PAUSE_TX, /* u8 */
+ ETHTOOL_A_PAUSE_STATS, /* nest - _PAUSE_STAT_* */
+ ETHTOOL_A_PAUSE_STATS_SRC, /* u32 */
/* add new constants above here */
__ETHTOOL_A_PAUSE_CNT,
ETHTOOL_A_PAUSE_MAX = (__ETHTOOL_A_PAUSE_CNT - 1)
};
+enum {
+ ETHTOOL_A_PAUSE_STAT_UNSPEC,
+ ETHTOOL_A_PAUSE_STAT_PAD,
+
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES,
+ ETHTOOL_A_PAUSE_STAT_RX_FRAMES,
+
+ /* add new constants above here
+ * adjust ETHTOOL_PAUSE_STAT_CNT if adding non-stats!
+ */
+ __ETHTOOL_A_PAUSE_STAT_CNT,
+ ETHTOOL_A_PAUSE_STAT_MAX = (__ETHTOOL_A_PAUSE_STAT_CNT - 1)
+};
+
/* EEE */
enum {
@@ -414,6 +484,19 @@ enum {
ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1)
};
+/* PHC VCLOCKS */
+
+enum {
+ ETHTOOL_A_PHC_VCLOCKS_UNSPEC,
+ ETHTOOL_A_PHC_VCLOCKS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PHC_VCLOCKS_NUM, /* u32 */
+ ETHTOOL_A_PHC_VCLOCKS_INDEX, /* array, s32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PHC_VCLOCKS_CNT,
+ ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1)
+};
+
/* CABLE TEST */
enum {
@@ -611,6 +694,288 @@ enum {
ETHTOOL_A_TUNNEL_INFO_MAX = (__ETHTOOL_A_TUNNEL_INFO_CNT - 1)
};
+/* FEC */
+
+enum {
+ ETHTOOL_A_FEC_UNSPEC,
+ ETHTOOL_A_FEC_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_FEC_MODES, /* bitset */
+ ETHTOOL_A_FEC_AUTO, /* u8 */
+ ETHTOOL_A_FEC_ACTIVE, /* u32 */
+ ETHTOOL_A_FEC_STATS, /* nest - _A_FEC_STAT */
+
+ __ETHTOOL_A_FEC_CNT,
+ ETHTOOL_A_FEC_MAX = (__ETHTOOL_A_FEC_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_FEC_STAT_UNSPEC,
+ ETHTOOL_A_FEC_STAT_PAD,
+
+ ETHTOOL_A_FEC_STAT_CORRECTED, /* array, u64 */
+ ETHTOOL_A_FEC_STAT_UNCORR, /* array, u64 */
+ ETHTOOL_A_FEC_STAT_CORR_BITS, /* array, u64 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_FEC_STAT_CNT,
+ ETHTOOL_A_FEC_STAT_MAX = (__ETHTOOL_A_FEC_STAT_CNT - 1)
+};
+
+/* MODULE EEPROM */
+
+enum {
+ ETHTOOL_A_MODULE_EEPROM_UNSPEC,
+ ETHTOOL_A_MODULE_EEPROM_HEADER, /* nest - _A_HEADER_* */
+
+ ETHTOOL_A_MODULE_EEPROM_OFFSET, /* u32 */
+ ETHTOOL_A_MODULE_EEPROM_LENGTH, /* u32 */
+ ETHTOOL_A_MODULE_EEPROM_PAGE, /* u8 */
+ ETHTOOL_A_MODULE_EEPROM_BANK, /* u8 */
+ ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS, /* u8 */
+ ETHTOOL_A_MODULE_EEPROM_DATA, /* binary */
+
+ __ETHTOOL_A_MODULE_EEPROM_CNT,
+ ETHTOOL_A_MODULE_EEPROM_MAX = (__ETHTOOL_A_MODULE_EEPROM_CNT - 1)
+};
+
+/* STATS */
+
+enum {
+ ETHTOOL_A_STATS_UNSPEC,
+ ETHTOOL_A_STATS_PAD,
+ ETHTOOL_A_STATS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_STATS_GROUPS, /* bitset */
+
+ ETHTOOL_A_STATS_GRP, /* nest - _A_STATS_GRP_* */
+
+ ETHTOOL_A_STATS_SRC, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_CNT,
+ ETHTOOL_A_STATS_MAX = (__ETHTOOL_A_STATS_CNT - 1)
+};
+
+enum {
+ ETHTOOL_STATS_ETH_PHY,
+ ETHTOOL_STATS_ETH_MAC,
+ ETHTOOL_STATS_ETH_CTRL,
+ ETHTOOL_STATS_RMON,
+
+ /* add new constants above here */
+ __ETHTOOL_STATS_CNT
+};
+
+enum {
+ ETHTOOL_A_STATS_GRP_UNSPEC,
+ ETHTOOL_A_STATS_GRP_PAD,
+
+ ETHTOOL_A_STATS_GRP_ID, /* u32 */
+ ETHTOOL_A_STATS_GRP_SS_ID, /* u32 */
+
+ ETHTOOL_A_STATS_GRP_STAT, /* nest */
+
+ ETHTOOL_A_STATS_GRP_HIST_RX, /* nest */
+ ETHTOOL_A_STATS_GRP_HIST_TX, /* nest */
+
+ ETHTOOL_A_STATS_GRP_HIST_BKT_LOW, /* u32 */
+ ETHTOOL_A_STATS_GRP_HIST_BKT_HI, /* u32 */
+ ETHTOOL_A_STATS_GRP_HIST_VAL, /* u64 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_GRP_CNT,
+ ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_GRP_CNT - 1)
+};
+
+enum {
+ /* 30.3.2.1.5 aSymbolErrorDuringCarrier */
+ ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR,
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_ETH_PHY_CNT,
+ ETHTOOL_A_STATS_ETH_PHY_MAX = (__ETHTOOL_A_STATS_ETH_PHY_CNT - 1)
+};
+
+enum {
+ /* 30.3.1.1.2 aFramesTransmittedOK */
+ ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT,
+ /* 30.3.1.1.3 aSingleCollisionFrames */
+ ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL,
+ /* 30.3.1.1.4 aMultipleCollisionFrames */
+ ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL,
+ /* 30.3.1.1.5 aFramesReceivedOK */
+ ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT,
+ /* 30.3.1.1.6 aFrameCheckSequenceErrors */
+ ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR,
+ /* 30.3.1.1.7 aAlignmentErrors */
+ ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR,
+ /* 30.3.1.1.8 aOctetsTransmittedOK */
+ ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES,
+ /* 30.3.1.1.9 aFramesWithDeferredXmissions */
+ ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER,
+ /* 30.3.1.1.10 aLateCollisions */
+ ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL,
+ /* 30.3.1.1.11 aFramesAbortedDueToXSColls */
+ ETHTOOL_A_STATS_ETH_MAC_11_XS_COL,
+ /* 30.3.1.1.12 aFramesLostDueToIntMACXmitError */
+ ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR,
+ /* 30.3.1.1.13 aCarrierSenseErrors */
+ ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR,
+ /* 30.3.1.1.14 aOctetsReceivedOK */
+ ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES,
+ /* 30.3.1.1.15 aFramesLostDueToIntMACRcvError */
+ ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR,
+
+ /* 30.3.1.1.18 aMulticastFramesXmittedOK */
+ ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST,
+ /* 30.3.1.1.19 aBroadcastFramesXmittedOK */
+ ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST,
+ /* 30.3.1.1.20 aFramesWithExcessiveDeferral */
+ ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER,
+ /* 30.3.1.1.21 aMulticastFramesReceivedOK */
+ ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST,
+ /* 30.3.1.1.22 aBroadcastFramesReceivedOK */
+ ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST,
+ /* 30.3.1.1.23 aInRangeLengthErrors */
+ ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR,
+ /* 30.3.1.1.24 aOutOfRangeLengthField */
+ ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN,
+ /* 30.3.1.1.25 aFrameTooLongErrors */
+ ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR,
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_ETH_MAC_CNT,
+ ETHTOOL_A_STATS_ETH_MAC_MAX = (__ETHTOOL_A_STATS_ETH_MAC_CNT - 1)
+};
+
+enum {
+ /* 30.3.3.3 aMACControlFramesTransmitted */
+ ETHTOOL_A_STATS_ETH_CTRL_3_TX,
+ /* 30.3.3.4 aMACControlFramesReceived */
+ ETHTOOL_A_STATS_ETH_CTRL_4_RX,
+ /* 30.3.3.5 aUnsupportedOpcodesReceived */
+ ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP,
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_ETH_CTRL_CNT,
+ ETHTOOL_A_STATS_ETH_CTRL_MAX = (__ETHTOOL_A_STATS_ETH_CTRL_CNT - 1)
+};
+
+enum {
+ /* etherStatsUndersizePkts */
+ ETHTOOL_A_STATS_RMON_UNDERSIZE,
+ /* etherStatsOversizePkts */
+ ETHTOOL_A_STATS_RMON_OVERSIZE,
+ /* etherStatsFragments */
+ ETHTOOL_A_STATS_RMON_FRAG,
+ /* etherStatsJabbers */
+ ETHTOOL_A_STATS_RMON_JABBER,
+
+ /* add new constants above here */
+ __ETHTOOL_A_STATS_RMON_CNT,
+ ETHTOOL_A_STATS_RMON_MAX = (__ETHTOOL_A_STATS_RMON_CNT - 1)
+};
+
+/* MODULE */
+
+enum {
+ ETHTOOL_A_MODULE_UNSPEC,
+ ETHTOOL_A_MODULE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_MODULE_POWER_MODE_POLICY, /* u8 */
+ ETHTOOL_A_MODULE_POWER_MODE, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MODULE_CNT,
+ ETHTOOL_A_MODULE_MAX = (__ETHTOOL_A_MODULE_CNT - 1)
+};
+
+/* Power Sourcing Equipment */
+enum {
+ ETHTOOL_A_PSE_UNSPEC,
+ ETHTOOL_A_PSE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PODL_PSE_ADMIN_STATE, /* u32 */
+ ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, /* u32 */
+ ETHTOOL_A_PODL_PSE_PW_D_STATUS, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PSE_CNT,
+ ETHTOOL_A_PSE_MAX = (__ETHTOOL_A_PSE_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_RSS_UNSPEC,
+ ETHTOOL_A_RSS_HEADER,
+ ETHTOOL_A_RSS_CONTEXT, /* u32 */
+ ETHTOOL_A_RSS_HFUNC, /* u32 */
+ ETHTOOL_A_RSS_INDIR, /* binary */
+ ETHTOOL_A_RSS_HKEY, /* binary */
+ ETHTOOL_A_RSS_INPUT_XFRM, /* u32 */
+
+ __ETHTOOL_A_RSS_CNT,
+ ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1),
+};
+
+/* PLCA */
+
+enum {
+ ETHTOOL_A_PLCA_UNSPEC,
+ ETHTOOL_A_PLCA_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PLCA_VERSION, /* u16 */
+ ETHTOOL_A_PLCA_ENABLED, /* u8 */
+ ETHTOOL_A_PLCA_STATUS, /* u8 */
+ ETHTOOL_A_PLCA_NODE_CNT, /* u32 */
+ ETHTOOL_A_PLCA_NODE_ID, /* u32 */
+ ETHTOOL_A_PLCA_TO_TMR, /* u32 */
+ ETHTOOL_A_PLCA_BURST_CNT, /* u32 */
+ ETHTOOL_A_PLCA_BURST_TMR, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PLCA_CNT,
+ ETHTOOL_A_PLCA_MAX = (__ETHTOOL_A_PLCA_CNT - 1)
+};
+
+/* MAC Merge (802.3) */
+
+enum {
+ ETHTOOL_A_MM_STAT_UNSPEC,
+ ETHTOOL_A_MM_STAT_PAD,
+
+ /* aMACMergeFrameAssErrorCount */
+ ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS, /* u64 */
+ /* aMACMergeFrameSmdErrorCount */
+ ETHTOOL_A_MM_STAT_SMD_ERRORS, /* u64 */
+ /* aMACMergeFrameAssOkCount */
+ ETHTOOL_A_MM_STAT_REASSEMBLY_OK, /* u64 */
+ /* aMACMergeFragCountRx */
+ ETHTOOL_A_MM_STAT_RX_FRAG_COUNT, /* u64 */
+ /* aMACMergeFragCountTx */
+ ETHTOOL_A_MM_STAT_TX_FRAG_COUNT, /* u64 */
+ /* aMACMergeHoldCount */
+ ETHTOOL_A_MM_STAT_HOLD_COUNT, /* u64 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MM_STAT_CNT,
+ ETHTOOL_A_MM_STAT_MAX = (__ETHTOOL_A_MM_STAT_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_MM_UNSPEC,
+ ETHTOOL_A_MM_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_MM_PMAC_ENABLED, /* u8 */
+ ETHTOOL_A_MM_TX_ENABLED, /* u8 */
+ ETHTOOL_A_MM_TX_ACTIVE, /* u8 */
+ ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, /* u32 */
+ ETHTOOL_A_MM_RX_MIN_FRAG_SIZE, /* u32 */
+ ETHTOOL_A_MM_VERIFY_ENABLED, /* u8 */
+ ETHTOOL_A_MM_VERIFY_STATUS, /* u8 */
+ ETHTOOL_A_MM_VERIFY_TIME, /* u32 */
+ ETHTOOL_A_MM_MAX_VERIFY_TIME, /* u32 */
+ ETHTOOL_A_MM_STATS, /* nest - _A_MM_STAT_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MM_CNT,
+ ETHTOOL_A_MM_MAX = (__ETHTOOL_A_MM_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/eventfd.h b/include/uapi/linux/eventfd.h
new file mode 100644
index 000000000000..2eb9ab6c32f3
--- /dev/null
+++ b/include/uapi/linux/eventfd.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_EVENTFD_H
+#define _UAPI_LINUX_EVENTFD_H
+
+#include <linux/fcntl.h>
+
+#define EFD_SEMAPHORE (1 << 0)
+#define EFD_CLOEXEC O_CLOEXEC
+#define EFD_NONBLOCK O_NONBLOCK
+
+#endif /* _UAPI_LINUX_EVENTFD_H */
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 8a3432d0f0dc..4f4b948ef381 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -41,6 +41,12 @@
#define EPOLLMSG (__force __poll_t)0x00000400
#define EPOLLRDHUP (__force __poll_t)0x00002000
+/*
+ * Internal flag - wakeup generated by io_uring, used to detect recursion back
+ * into the io_uring poll handler.
+ */
+#define EPOLL_URING_WAKE ((__force __poll_t)(1U << 27))
+
/* Set exclusive wakeup mode for the target file descriptor */
#define EPOLLEXCLUSIVE ((__force __poll_t)(1U << 28))
@@ -79,16 +85,17 @@ struct epoll_event {
__u64 data;
} EPOLL_PACKED;
-#ifdef CONFIG_PM_SLEEP
-static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
-{
- if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
- epev->events &= ~EPOLLWAKEUP;
-}
-#else
-static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
-{
- epev->events &= ~EPOLLWAKEUP;
-}
-#endif
+struct epoll_params {
+ __u32 busy_poll_usecs;
+ __u16 busy_poll_budget;
+ __u8 prefer_busy_poll;
+
+ /* pad the struct to a multiple of 64bits */
+ __u8 __pad;
+};
+
+#define EPOLL_IOC_TYPE 0x8A
+#define EPIOCSPARAMS _IOW(EPOLL_IOC_TYPE, 0x01, struct epoll_params)
+#define EPIOCGPARAMS _IOR(EPOLL_IOC_TYPE, 0x02, struct epoll_params)
+
#endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/linux/ext4.h b/include/uapi/linux/ext4.h
new file mode 100644
index 000000000000..1c4c2dd29112
--- /dev/null
+++ b/include/uapi/linux/ext4.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_EXT4_H
+#define _UAPI_LINUX_EXT4_H
+#include <linux/fiemap.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * ext4-specific ioctl commands
+ */
+#define EXT4_IOC_GETVERSION _IOR('f', 3, long)
+#define EXT4_IOC_SETVERSION _IOW('f', 4, long)
+#define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION
+#define EXT4_IOC_SETVERSION_OLD FS_IOC_SETVERSION
+#define EXT4_IOC_GETRSVSZ _IOR('f', 5, long)
+#define EXT4_IOC_SETRSVSZ _IOW('f', 6, long)
+#define EXT4_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long)
+#define EXT4_IOC_GROUP_ADD _IOW('f', 8, struct ext4_new_group_input)
+#define EXT4_IOC_MIGRATE _IO('f', 9)
+ /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
+ /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
+#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
+#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
+#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
+#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
+#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
+/* ioctl codes 19--39 are reserved for fscrypt */
+#define EXT4_IOC_CLEAR_ES_CACHE _IO('f', 40)
+#define EXT4_IOC_GETSTATE _IOW('f', 41, __u32)
+#define EXT4_IOC_GET_ES_CACHE _IOWR('f', 42, struct fiemap)
+#define EXT4_IOC_CHECKPOINT _IOW('f', 43, __u32)
+#define EXT4_IOC_GETFSUUID _IOR('f', 44, struct fsuuid)
+#define EXT4_IOC_SETFSUUID _IOW('f', 44, struct fsuuid)
+
+#define EXT4_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+/*
+ * ioctl commands in 32 bit emulation
+ */
+#define EXT4_IOC32_GETVERSION _IOR('f', 3, int)
+#define EXT4_IOC32_SETVERSION _IOW('f', 4, int)
+#define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int)
+#define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int)
+#define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
+#define EXT4_IOC32_GROUP_ADD _IOW('f', 8, struct compat_ext4_new_group_input)
+#define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
+#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
+
+/*
+ * Flags returned by EXT4_IOC_GETSTATE
+ *
+ * We only expose to userspace a subset of the state flags in
+ * i_state_flags
+ */
+#define EXT4_STATE_FLAG_EXT_PRECACHED 0x00000001
+#define EXT4_STATE_FLAG_NEW 0x00000002
+#define EXT4_STATE_FLAG_NEWENTRY 0x00000004
+#define EXT4_STATE_FLAG_DA_ALLOC_CLOSE 0x00000008
+
+/*
+ * Flags for ioctl EXT4_IOC_CHECKPOINT
+ */
+#define EXT4_IOC_CHECKPOINT_FLAG_DISCARD 0x1
+#define EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT 0x2
+#define EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN 0x4
+#define EXT4_IOC_CHECKPOINT_FLAG_VALID (EXT4_IOC_CHECKPOINT_FLAG_DISCARD | \
+ EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT | \
+ EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN)
+
+/*
+ * Structure for EXT4_IOC_GETFSUUID/EXT4_IOC_SETFSUUID
+ */
+struct fsuuid {
+ __u32 fsu_len;
+ __u32 fsu_flags;
+ __u8 fsu_uuid[];
+};
+
+/*
+ * Structure for EXT4_IOC_MOVE_EXT
+ */
+struct move_extent {
+ __u32 reserved; /* should be zero */
+ __u32 donor_fd; /* donor file descriptor */
+ __u64 orig_start; /* logical start offset in block for orig */
+ __u64 donor_start; /* logical start offset in block for donor */
+ __u64 len; /* block length to be moved */
+ __u64 moved_len; /* moved block length */
+};
+
+/*
+ * Flags used by EXT4_IOC_SHUTDOWN
+ */
+#define EXT4_GOING_FLAGS_DEFAULT 0x0 /* going down */
+#define EXT4_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */
+#define EXT4_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */
+
+/* Used to pass group descriptor data when online resize is done */
+struct ext4_new_group_input {
+ __u32 group; /* Group number for this data */
+ __u64 block_bitmap; /* Absolute block number of block bitmap */
+ __u64 inode_bitmap; /* Absolute block number of inode bitmap */
+ __u64 inode_table; /* Absolute block number of inode table start */
+ __u32 blocks_count; /* Total number of blocks in this group */
+ __u16 reserved_blocks; /* Number of reserved blocks in this group */
+ __u16 unused;
+};
+
+/*
+ * Returned by EXT4_IOC_GET_ES_CACHE as an additional possible flag.
+ * It indicates that the entry in extent status cache is for a hole.
+ */
+#define EXT4_FIEMAP_EXTENT_HOLE 0x08000000
+
+#endif /* _UAPI_LINUX_EXT4_H */
diff --git a/include/uapi/linux/f2fs.h b/include/uapi/linux/f2fs.h
new file mode 100644
index 000000000000..955d440be104
--- /dev/null
+++ b/include/uapi/linux/f2fs.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_F2FS_H
+#define _UAPI_LINUX_F2FS_H
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * f2fs-specific ioctl commands
+ */
+#define F2FS_IOCTL_MAGIC 0xf5
+#define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1)
+#define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2)
+#define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3)
+#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4)
+#define F2FS_IOC_ABORT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
+#define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32)
+#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
+#define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \
+ struct f2fs_defragment)
+#define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
+ struct f2fs_move_range)
+#define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \
+ struct f2fs_flush_device)
+#define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \
+ struct f2fs_gc_range)
+#define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32)
+#define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32)
+#define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32)
+#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
+#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
+#define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64)
+#define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \
+ _IOR(F2FS_IOCTL_MAGIC, 18, __u64)
+#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \
+ _IOR(F2FS_IOCTL_MAGIC, 19, __u64)
+#define F2FS_IOC_SEC_TRIM_FILE _IOW(F2FS_IOCTL_MAGIC, 20, \
+ struct f2fs_sectrim_range)
+#define F2FS_IOC_GET_COMPRESS_OPTION _IOR(F2FS_IOCTL_MAGIC, 21, \
+ struct f2fs_comp_option)
+#define F2FS_IOC_SET_COMPRESS_OPTION _IOW(F2FS_IOCTL_MAGIC, 22, \
+ struct f2fs_comp_option)
+#define F2FS_IOC_DECOMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 23)
+#define F2FS_IOC_COMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 24)
+#define F2FS_IOC_START_ATOMIC_REPLACE _IO(F2FS_IOCTL_MAGIC, 25)
+
+/*
+ * should be same as XFS_IOC_GOINGDOWN.
+ * Flags for going down operation used by FS_IOC_GOINGDOWN
+ */
+#define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */
+#define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */
+#define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */
+#define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */
+#define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */
+#define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */
+
+/*
+ * Flags used by F2FS_IOC_SEC_TRIM_FILE
+ */
+#define F2FS_TRIM_FILE_DISCARD 0x1 /* send discard command */
+#define F2FS_TRIM_FILE_ZEROOUT 0x2 /* zero out */
+#define F2FS_TRIM_FILE_MASK 0x3
+
+struct f2fs_gc_range {
+ __u32 sync;
+ __u64 start;
+ __u64 len;
+};
+
+struct f2fs_defragment {
+ __u64 start;
+ __u64 len;
+};
+
+struct f2fs_move_range {
+ __u32 dst_fd; /* destination fd */
+ __u64 pos_in; /* start position in src_fd */
+ __u64 pos_out; /* start position in dst_fd */
+ __u64 len; /* size to move */
+};
+
+struct f2fs_flush_device {
+ __u32 dev_num; /* device number to flush */
+ __u32 segments; /* # of segments to flush */
+};
+
+struct f2fs_sectrim_range {
+ __u64 start;
+ __u64 len;
+ __u64 flags;
+};
+
+struct f2fs_comp_option {
+ __u8 algorithm;
+ __u8 log_cluster_size;
+};
+
+#endif /* _UAPI_LINUX_F2FS_H */
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index fbf9c5c7dd59..a37de58ca571 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -8,8 +8,8 @@
#define FAN_ACCESS 0x00000001 /* File was accessed */
#define FAN_MODIFY 0x00000002 /* File was modified */
#define FAN_ATTRIB 0x00000004 /* Metadata changed */
-#define FAN_CLOSE_WRITE 0x00000008 /* Writtable file closed */
-#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
+#define FAN_CLOSE_WRITE 0x00000008 /* Writable file closed */
+#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */
#define FAN_OPEN 0x00000020 /* File was opened */
#define FAN_MOVED_FROM 0x00000040 /* File was moved from X */
#define FAN_MOVED_TO 0x00000080 /* File was moved to Y */
@@ -20,6 +20,7 @@
#define FAN_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FAN_FS_ERROR 0x00008000 /* Filesystem error */
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
@@ -27,6 +28,8 @@
#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */
+#define FAN_RENAME 0x10000000 /* File was renamed */
+
#define FAN_ONDIR 0x40000000 /* Event occurred against dir */
/* helper events */
@@ -51,13 +54,18 @@
#define FAN_ENABLE_AUDIT 0x00000040
/* Flags to determine fanotify event format */
+#define FAN_REPORT_PIDFD 0x00000080 /* Report pidfd for event->pid */
#define FAN_REPORT_TID 0x00000100 /* event->pid is thread id */
#define FAN_REPORT_FID 0x00000200 /* Report unique file id */
#define FAN_REPORT_DIR_FID 0x00000400 /* Report unique directory id */
#define FAN_REPORT_NAME 0x00000800 /* Report events with name */
+#define FAN_REPORT_TARGET_FID 0x00001000 /* Report dirent target id */
/* Convenience macro - FAN_REPORT_NAME requires FAN_REPORT_DIR_FID */
#define FAN_REPORT_DFID_NAME (FAN_REPORT_DIR_FID | FAN_REPORT_NAME)
+/* Convenience macro - FAN_REPORT_TARGET_FID requires all other FID flags */
+#define FAN_REPORT_DFID_NAME_TARGET (FAN_REPORT_DFID_NAME | \
+ FAN_REPORT_FID | FAN_REPORT_TARGET_FID)
/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \
@@ -74,12 +82,21 @@
#define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040
#define FAN_MARK_FLUSH 0x00000080
/* FAN_MARK_FILESYSTEM is 0x00000100 */
+#define FAN_MARK_EVICTABLE 0x00000200
+/* This bit is mutually exclusive with FAN_MARK_IGNORED_MASK bit */
+#define FAN_MARK_IGNORE 0x00000400
/* These are NOT bitwise flags. Both bits can be used togther. */
#define FAN_MARK_INODE 0x00000000
#define FAN_MARK_MOUNT 0x00000010
#define FAN_MARK_FILESYSTEM 0x00000100
+/*
+ * Convenience macro - FAN_MARK_IGNORE requires FAN_MARK_IGNORED_SURV_MODIFY
+ * for non-inode mark types.
+ */
+#define FAN_MARK_IGNORE_SURV (FAN_MARK_IGNORE | FAN_MARK_IGNORED_SURV_MODIFY)
+
/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\
FAN_MARK_REMOVE |\
@@ -123,6 +140,14 @@ struct fanotify_event_metadata {
#define FAN_EVENT_INFO_TYPE_FID 1
#define FAN_EVENT_INFO_TYPE_DFID_NAME 2
#define FAN_EVENT_INFO_TYPE_DFID 3
+#define FAN_EVENT_INFO_TYPE_PIDFD 4
+#define FAN_EVENT_INFO_TYPE_ERROR 5
+
+/* Special info types for FAN_RENAME */
+#define FAN_EVENT_INFO_TYPE_OLD_DFID_NAME 10
+/* Reserved for FAN_EVENT_INFO_TYPE_OLD_DFID 11 */
+#define FAN_EVENT_INFO_TYPE_NEW_DFID_NAME 12
+/* Reserved for FAN_EVENT_INFO_TYPE_NEW_DFID 13 */
/* Variable length info record following event metadata */
struct fanotify_event_info_header {
@@ -145,21 +170,66 @@ struct fanotify_event_info_fid {
* Following is an opaque struct file_handle that can be passed as
* an argument to open_by_handle_at(2).
*/
- unsigned char handle[0];
+ unsigned char handle[];
+};
+
+/*
+ * This structure is used for info records of type FAN_EVENT_INFO_TYPE_PIDFD.
+ * It holds a pidfd for the pid that was responsible for generating an event.
+ */
+struct fanotify_event_info_pidfd {
+ struct fanotify_event_info_header hdr;
+ __s32 pidfd;
};
+struct fanotify_event_info_error {
+ struct fanotify_event_info_header hdr;
+ __s32 error;
+ __u32 error_count;
+};
+
+/*
+ * User space may need to record additional information about its decision.
+ * The extra information type records what kind of information is included.
+ * The default is none. We also define an extra information buffer whose
+ * size is determined by the extra information type.
+ *
+ * If the information type is Audit Rule, then the information following
+ * is the rule number that triggered the user space decision that
+ * requires auditing.
+ */
+
+#define FAN_RESPONSE_INFO_NONE 0
+#define FAN_RESPONSE_INFO_AUDIT_RULE 1
+
struct fanotify_response {
__s32 fd;
__u32 response;
};
+struct fanotify_response_info_header {
+ __u8 type;
+ __u8 pad;
+ __u16 len;
+};
+
+struct fanotify_response_info_audit_rule {
+ struct fanotify_response_info_header hdr;
+ __u32 rule_number;
+ __u32 subj_trust;
+ __u32 obj_trust;
+};
+
/* Legit userspace responses to a _PERM event */
#define FAN_ALLOW 0x01
#define FAN_DENY 0x02
-#define FAN_AUDIT 0x10 /* Bit mask to create audit record for result */
+#define FAN_AUDIT 0x10 /* Bitmask to create audit record for result */
+#define FAN_INFO 0x20 /* Bitmask to indicate additional information */
/* No fd set in event */
#define FAN_NOFD -1
+#define FAN_NOPIDFD FAN_NOFD
+#define FAN_EPIDFD -2
/* Helper functions to deal with fanotify_event_metadata buffers */
#define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
index 4c14e8be7267..cde8f173f566 100644
--- a/include/uapi/linux/fb.h
+++ b/include/uapi/linux/fb.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/i2c.h>
+#include <linux/vesa.h>
/* Definitions of frame buffers */
@@ -182,7 +183,7 @@ struct fb_fix_screeninfo {
*
* For pseudocolor: offset and length should be the same for all color
* components. Offset specifies the position of the least significant bit
- * of the pallette index in a pixel value. Length indicates the number
+ * of the palette index in a pixel value. Length indicates the number
* of available palette entries (i.e. # of entries = 1 << length).
*/
struct fb_bitfield {
@@ -293,13 +294,6 @@ struct fb_con2fbmap {
__u32 framebuffer;
};
-/* VESA Blanking Levels */
-#define VESA_NO_BLANKING 0
-#define VESA_VSYNC_SUSPEND 1
-#define VESA_HSYNC_SUSPEND 2
-#define VESA_POWERDOWN 3
-
-
enum {
/* screen: unblanked, hsync: on, vsync: on */
FB_BLANK_UNBLANK = VESA_NO_BLANKING,
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 2f86b2ad6d7e..282e90aeb163 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -43,6 +43,7 @@
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
+#define F_SEAL_EXEC 0x0020 /* prevent chmod modifying exec bits */
/* (1U << 31) is reserved for signed error codes */
/*
@@ -111,4 +112,12 @@
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+/* Flags for name_to_handle_at(2). We reuse AT_ flag space to save bits... */
+#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
+ compare object identity and may not
+ be usable to open_by_handle_at(2) */
+#if defined(__KERNEL__)
+#define AT_GETATTR_NOSEC 0x80000000
+#endif
+
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/include/uapi/linux/fd.h b/include/uapi/linux/fd.h
index 8b80c63b971c..7022e3413dbc 100644
--- a/include/uapi/linux/fd.h
+++ b/include/uapi/linux/fd.h
@@ -49,11 +49,11 @@ struct floppy_struct {
#define FDCLRPRM _IO(2, 0x41)
/* clear user-defined parameters */
-#define FDSETPRM _IOW(2, 0x42, struct floppy_struct)
+#define FDSETPRM _IOW(2, 0x42, struct floppy_struct)
#define FDSETMEDIAPRM FDSETPRM
/* set user-defined parameters for current media */
-#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct)
+#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct)
#define FDGETPRM _IOR(2, 0x04, struct floppy_struct)
#define FDDEFMEDIAPRM FDDEFPRM
#define FDGETMEDIAPRM FDGETPRM
@@ -65,7 +65,7 @@ struct floppy_struct {
/* issue/don't issue kernel messages on media type change */
-/*
+/*
* Formatting (obsolete)
*/
#define FD_FILL_BYTE 0xF6 /* format fill byte. */
@@ -126,13 +126,13 @@ typedef char floppy_drive_name[16];
*/
struct floppy_drive_params {
signed char cmos; /* CMOS type */
-
- /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms
+
+ /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms
* etc) and ND is set means no DMA. Hardcoded to 6 (HLD=6ms, use DMA).
*/
unsigned long max_dtr; /* Step rate, usec */
unsigned long hlt; /* Head load/settle time, msec */
- unsigned long hut; /* Head unload time (remnant of
+ unsigned long hut; /* Head unload time (remnant of
* 8" drives) */
unsigned long srt; /* Step rate, usec */
@@ -145,12 +145,12 @@ struct floppy_drive_params {
unsigned char rps; /* rotations per second */
unsigned char tracks; /* maximum number of tracks */
unsigned long timeout; /* timeout for interrupt requests */
-
- unsigned char interleave_sect; /* if there are more sectors, use
+
+ unsigned char interleave_sect; /* if there are more sectors, use
* interleave */
-
+
struct floppy_max_errors max_errors;
-
+
char flags; /* various flags, including ftd_msg */
/*
* Announce successful media type detection and media information loss after
@@ -162,7 +162,7 @@ struct floppy_drive_params {
#define FD_BROKEN_DCL 0x20
#define FD_DEBUG 0x02
#define FD_SILENT_DCL_CLEAR 0x4
-#define FD_INVERTED_DCL 0x80 /* must be 0x80, because of hardware
+#define FD_INVERTED_DCL 0x80 /* must be 0x80, because of hardware
considerations */
char read_track; /* use readtrack during probing? */
@@ -176,8 +176,8 @@ struct floppy_drive_params {
#define FD_AUTODETECT_SIZE 8
short autodetect[FD_AUTODETECT_SIZE]; /* autodetected formats */
-
- int checkfreq; /* how often should the drive be checked for disk
+
+ int checkfreq; /* how often should the drive be checked for disk
* changes */
int native_format; /* native format of this drive */
};
@@ -225,13 +225,13 @@ struct floppy_drive_struct {
* decremented after each probe.
*/
int keep_data;
-
+
/* Prevent "aliased" accesses. */
int fd_ref;
int fd_device;
- unsigned long last_checked; /* when was the drive last checked for a disk
+ unsigned long last_checked; /* when was the drive last checked for a disk
* change? */
-
+
char *dmabuf;
int bufblocks;
};
@@ -255,7 +255,7 @@ enum reset_mode {
/*
* FDC state
*/
-struct floppy_fdc_state {
+struct floppy_fdc_state {
int spec1; /* spec1 value last used */
int spec2; /* spec2 value last used */
int dtr;
@@ -302,16 +302,16 @@ struct floppy_write_errors {
* to the user process are not counted.
*/
- unsigned int write_errors; /* number of physical write errors
+ unsigned int write_errors; /* number of physical write errors
* encountered */
-
+
/* position of first and last write errors */
unsigned long first_error_sector;
int first_error_generation;
unsigned long last_error_sector;
int last_error_generation;
-
- unsigned int badness; /* highest retry count for a read or write
+
+ unsigned int badness; /* highest retry count for a read or write
* operation */
};
@@ -335,7 +335,7 @@ struct floppy_raw_cmd {
#define FD_RAW_DISK_CHANGE 4 /* out: disk change flag was set */
#define FD_RAW_INTR 8 /* wait for an interrupt */
#define FD_RAW_SPIN 0x10 /* spin up the disk for this command */
-#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command
+#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command
* completion */
#define FD_RAW_NEED_DISK 0x40 /* this command needs a disk to be present */
#define FD_RAW_NEED_SEEK 0x80 /* this command uses an implied seek (soft) */
@@ -353,7 +353,7 @@ struct floppy_raw_cmd {
void __user *data;
char *kernel_data; /* location of data buffer in the kernel */
- struct floppy_raw_cmd *next; /* used for chaining of raw cmd's
+ struct floppy_raw_cmd *next; /* used for chaining of raw cmd's
* within the kernel */
long length; /* in: length of dma transfer. out: remaining bytes */
long phys_length; /* physical length, if different from dma length */
diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h
index 07c1cdcb715e..24ca0c00cae3 100644
--- a/include/uapi/linux/fiemap.h
+++ b/include/uapi/linux/fiemap.h
@@ -34,7 +34,7 @@ struct fiemap {
__u32 fm_mapped_extents;/* number of extents that were mapped (out) */
__u32 fm_extent_count; /* size of fm_extents array (in) */
__u32 fm_reserved;
- struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
+ struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */
};
#define FIEMAP_MAX_OFFSET (~0ULL)
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index 7e5b5c10a49c..1f2c9469f921 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -46,6 +46,12 @@
#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08
#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09
+/* available since kernel version 6.5 */
+#define FW_CDEV_EVENT_REQUEST3 0x0a
+#define FW_CDEV_EVENT_RESPONSE2 0x0b
+#define FW_CDEV_EVENT_PHY_PACKET_SENT2 0x0c
+#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED2 0x0d
+
/**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_* types
* @closure: For arbitrary use by userspace
@@ -103,6 +109,32 @@ struct fw_cdev_event_bus_reset {
* @length: Data length, i.e. the response's payload size in bytes
* @data: Payload data, if any
*
+ * This event is sent instead of &fw_cdev_event_response if the kernel or the client implements
+ * ABI version <= 5. It has the lack of time stamp field comparing to &fw_cdev_event_response2.
+ */
+struct fw_cdev_event_response {
+ __u64 closure;
+ __u32 type;
+ __u32 rcode;
+ __u32 length;
+ __u32 data[];
+};
+
+/**
+ * struct fw_cdev_event_response2 - Sent when a response packet was received
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST
+ * or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST
+ * or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE
+ * @rcode: Response code returned by the remote node
+ * @length: Data length, i.e. the response's payload size in bytes
+ * @request_tstamp: The time stamp of isochronous cycle at which the request was sent.
+ * @response_tstamp: The time stamp of isochronous cycle at which the response was sent.
+ * @padding: Padding to keep the size of structure as multiples of 8 in various architectures
+ * since 4 byte alignment is used for 8 byte of object type in System V ABI for i386
+ * architecture.
+ * @data: Payload data, if any
+ *
* This event is sent when the stack receives a response to an outgoing request
* sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses
* carrying data (read and lock responses) follows immediately and can be
@@ -112,13 +144,22 @@ struct fw_cdev_event_bus_reset {
* involve response packets. This includes unified write transactions,
* broadcast write transactions, and transmission of asynchronous stream
* packets. @rcode indicates success or failure of such transmissions.
+ *
+ * The value of @request_tstamp expresses the isochronous cycle at which the request was sent to
+ * initiate the transaction. The value of @response_tstamp expresses the isochronous cycle at which
+ * the response arrived to complete the transaction. Each value is unsigned 16 bit integer
+ * containing three low order bits of second field and all 13 bits of cycle field in format of
+ * CYCLE_TIMER register.
*/
-struct fw_cdev_event_response {
+struct fw_cdev_event_response2 {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
- __u32 data[0];
+ __u32 request_tstamp;
+ __u32 response_tstamp;
+ __u32 padding;
+ __u32 data[];
};
/**
@@ -142,7 +183,7 @@ struct fw_cdev_event_request {
__u64 offset;
__u32 handle;
__u32 length;
- __u32 data[0];
+ __u32 data[];
};
/**
@@ -159,6 +200,41 @@ struct fw_cdev_event_request {
* @length: Data length, i.e. the request's payload size in bytes
* @data: Incoming data, if any
*
+ * This event is sent instead of &fw_cdev_event_request3 if the kernel or the client implements
+ * ABI version <= 5. It has the lack of time stamp field comparing to &fw_cdev_event_request3.
+ */
+struct fw_cdev_event_request2 {
+ __u64 closure;
+ __u32 type;
+ __u32 tcode;
+ __u64 offset;
+ __u32 source_node_id;
+ __u32 destination_node_id;
+ __u32 card;
+ __u32 generation;
+ __u32 handle;
+ __u32 length;
+ __u32 data[];
+};
+
+/**
+ * struct fw_cdev_event_request3 - Sent on incoming request to an address region
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2
+ * @tcode: Transaction code of the incoming request
+ * @offset: The offset into the 48-bit per-node address space
+ * @source_node_id: Sender node ID
+ * @destination_node_id: Destination node ID
+ * @card: The index of the card from which the request came
+ * @generation: Bus generation in which the request is valid
+ * @handle: Reference to the kernel-side pending request
+ * @length: Data length, i.e. the request's payload size in bytes
+ * @tstamp: The time stamp of isochronous cycle at which the request arrived.
+ * @padding: Padding to keep the size of structure as multiples of 8 in various architectures
+ * since 4 byte alignment is used for 8 byte of object type in System V ABI for i386
+ * architecture.
+ * @data: Incoming data, if any
+ *
* This event is sent when the stack receives an incoming request to an address
* region registered using the %FW_CDEV_IOC_ALLOCATE ioctl. The request is
* guaranteed to be completely contained in the specified region. Userspace is
@@ -191,10 +267,14 @@ struct fw_cdev_event_request {
* sent.
*
* If the client subsequently needs to initiate requests to the sender node of
- * an &fw_cdev_event_request2, it needs to use a device file with matching
+ * an &fw_cdev_event_request3, it needs to use a device file with matching
* card index, node ID, and generation for outbound requests.
+ *
+ * @tstamp is isochronous cycle at which the request arrived. It is 16 bit integer value and the
+ * higher 3 bits expresses three low order bits of second field in the format of CYCLE_TIME
+ * register and the rest 13 bits expresses cycle field.
*/
-struct fw_cdev_event_request2 {
+struct fw_cdev_event_request3 {
__u64 closure;
__u32 type;
__u32 tcode;
@@ -205,7 +285,9 @@ struct fw_cdev_event_request2 {
__u32 generation;
__u32 handle;
__u32 length;
- __u32 data[0];
+ __u32 tstamp;
+ __u32 padding;
+ __u32 data[];
};
/**
@@ -265,7 +347,7 @@ struct fw_cdev_event_iso_interrupt {
__u32 type;
__u32 cycle;
__u32 header_length;
- __u32 header[0];
+ __u32 header[];
};
/**
@@ -341,21 +423,60 @@ struct fw_cdev_event_iso_resource {
* @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED
* @rcode: %RCODE_..., indicates success or failure of transmission
* @length: Data length in bytes
+ * @data: Incoming data for %FW_CDEV_IOC_RECEIVE_PHY_PACKETS. For %FW_CDEV_IOC_SEND_PHY_PACKET
+ * the field has the same data in the request, thus the length of 8 bytes.
+ *
+ * This event is sent instead of &fw_cdev_event_phy_packet2 if the kernel or
+ * the client implements ABI version <= 5. It has the lack of time stamp field comparing to
+ * &fw_cdev_event_phy_packet2.
+ */
+struct fw_cdev_event_phy_packet {
+ __u64 closure;
+ __u32 type;
+ __u32 rcode;
+ __u32 length;
+ __u32 data[];
+};
+
+/**
+ * struct fw_cdev_event_phy_packet2 - A PHY packet was transmitted or received with time stamp.
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET
+ * or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl
+ * @type: %FW_CDEV_EVENT_PHY_PACKET_SENT2 or %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
+ * @rcode: %RCODE_..., indicates success or failure of transmission
+ * @length: Data length in bytes
+ * @tstamp: For %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, the time stamp of isochronous cycle at
+ * which the packet arrived. For %FW_CDEV_EVENT_PHY_PACKET_SENT2 and non-ping packet,
+ * the time stamp of isochronous cycle at which the packet was sent. For ping packet,
+ * the tick count for round-trip time measured by 1394 OHCI controller.
+ * The time stamp of isochronous cycle at which either the response was sent for
+ * %FW_CDEV_EVENT_PHY_PACKET_SENT2 or the request arrived for
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2.
* @data: Incoming data
*
- * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty,
- * except in case of a ping packet: Then, @length is 4, and @data[0] is the
- * ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE.
+ * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT2, @length is 8 and @data consists of the two PHY
+ * packet quadlets to be sent, in host byte order,
*
- * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data
- * consists of the two PHY packet quadlets, in host byte order.
+ * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, @length is 8 and @data consists of the two PHY
+ * packet quadlets, in host byte order.
+ *
+ * For %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, the @tstamp is the isochronous cycle at which the
+ * packet arrived. It is 16 bit integer value and the higher 3 bits expresses three low order bits
+ * of second field and the rest 13 bits expresses cycle field in the format of CYCLE_TIME register.
+ *
+ * For %FW_CDEV_EVENT_PHY_PACKET_SENT2, the @tstamp has different meanings whether to sent the
+ * packet for ping or not. If it's not for ping, the @tstamp is the isochronous cycle at which the
+ * packet was sent, and use the same format as the case of %FW_CDEV_EVENT_PHY_PACKET_SENT2. If it's
+ * for ping, the @tstamp is for round-trip time measured by 1394 OHCI controller with 42.195 MHz
+ * resolution.
*/
-struct fw_cdev_event_phy_packet {
+struct fw_cdev_event_phy_packet2 {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
- __u32 data[0];
+ __u32 tstamp;
+ __u32 data[];
};
/**
@@ -375,6 +496,11 @@ struct fw_cdev_event_phy_packet {
* %FW_CDEV_EVENT_PHY_PACKET_SENT or
* %FW_CDEV_EVENT_PHY_PACKET_RECEIVED
*
+ * @request3: Valid if @common.type == %FW_CDEV_EVENT_REQUEST3
+ * @response2: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE2
+ * @phy_packet2: Valid if @common.type == %FW_CDEV_EVENT_PHY_PACKET_SENT2 or
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
+ *
* Convenience union for userspace use. Events could be read(2) into an
* appropriately aligned char buffer and then cast to this union for further
* processing. Note that for a request, response or iso_interrupt event,
@@ -393,6 +519,9 @@ union fw_cdev_event {
struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */
struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */
struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */
+ struct fw_cdev_event_request3 request3; /* added in 6.5 */
+ struct fw_cdev_event_response2 response2; /* added in 6.5 */
+ struct fw_cdev_event_phy_packet2 phy_packet2; /* added in 6.5 */
};
/* available since kernel version 2.6.22 */
@@ -457,6 +586,11 @@ union fw_cdev_event {
* 5 (3.4) - send %FW_CDEV_EVENT_ISO_INTERRUPT events when needed to
* avoid dropping data
* - added %FW_CDEV_IOC_FLUSH_ISO
+ * 6 (6.5) - added some event for subactions of asynchronous transaction with time stamp
+ * - %FW_CDEV_EVENT_REQUEST3
+ * - %FW_CDEV_EVENT_RESPONSE2
+ * - %FW_CDEV_EVENT_PHY_PACKET_SENT2
+ * - %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
*/
/**
@@ -502,11 +636,11 @@ struct fw_cdev_get_info {
* @data: Userspace pointer to payload
* @generation: The bus generation where packet is valid
*
- * Send a request to the device. This ioctl implements all outgoing requests.
- * Both quadlet and block request specify the payload as a pointer to the data
- * in the @data field. Once the transaction completes, the kernel writes an
- * &fw_cdev_event_response event back. The @closure field is passed back to
- * user space in the response event.
+ * Send a request to the device. This ioctl implements all outgoing requests. Both quadlet and
+ * block request specify the payload as a pointer to the data in the @data field. Once the
+ * transaction completes, the kernel writes either &fw_cdev_event_response event or
+ * &fw_cdev_event_response event back. The @closure field is passed back to user space in the
+ * response event.
*/
struct fw_cdev_send_request {
__u32 tcode;
@@ -803,7 +937,7 @@ struct fw_cdev_set_iso_channels {
*/
struct fw_cdev_iso_packet {
__u32 control;
- __u32 header[0];
+ __u32 header[];
};
/**
@@ -844,7 +978,7 @@ struct fw_cdev_queue_iso {
* struct fw_cdev_start_iso - Start an isochronous transmission or reception
* @cycle: Cycle in which to start I/O. If @cycle is greater than or
* equal to 0, the I/O will start on that cycle.
- * @sync: Determines the value to wait for for receive packets that have
+ * @sync: Determines the value to wait for receive packets that have
* the %FW_CDEV_ISO_SYNC bit set
* @tags: Tag filter bit mask. Only valid for isochronous reception.
* Determines the tag values for which packets will be accepted.
@@ -989,10 +1123,9 @@ struct fw_cdev_allocate_iso_resource {
* @generation: The bus generation where packet is valid
* @speed: Speed to transmit at
*
- * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
- * to every device which is listening to the specified channel. The kernel
- * writes an &fw_cdev_event_response event which indicates success or failure of
- * the transmission.
+ * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet to every device
+ * which is listening to the specified channel. The kernel writes either &fw_cdev_event_response
+ * event or &fw_cdev_event_response2 event which indicates success or failure of the transmission.
*/
struct fw_cdev_send_stream_packet {
__u32 length;
@@ -1011,8 +1144,8 @@ struct fw_cdev_send_stream_packet {
* @data: First and second quadlet of the PHY packet
* @generation: The bus generation where packet is valid
*
- * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes
- * on the same card as this device. After transmission, an
+ * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes on the same card as this
+ * device. After transmission, either %FW_CDEV_EVENT_PHY_PACKET_SENT event or
* %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated.
*
* The payload @data\[\] shall be specified in host byte order. Usually,
@@ -1031,8 +1164,9 @@ struct fw_cdev_send_phy_packet {
* struct fw_cdev_receive_phy_packets - start reception of PHY packets
* @closure: Passed back to userspace in phy packet events
*
- * This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to
- * incoming PHY packets from any node on the same bus as the device.
+ * This ioctl activates issuing of either %FW_CDEV_EVENT_PHY_PACKET_RECEIVED or
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2 due to incoming PHY packets from any node on the same bus
+ * as the device.
*
* The ioctl is only permitted on device files which represent a local node.
*/
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
index 87c2c9f08803..b5cd3e7b3775 100644
--- a/include/uapi/linux/fou.h
+++ b/include/uapi/linux/fou.h
@@ -1,32 +1,37 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* fou.h - FOU Interface */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/fou.yaml */
+/* YNL-GEN uapi header */
#ifndef _UAPI_LINUX_FOU_H
#define _UAPI_LINUX_FOU_H
-/* NETLINK_GENERIC related info
- */
#define FOU_GENL_NAME "fou"
-#define FOU_GENL_VERSION 0x1
+#define FOU_GENL_VERSION 1
enum {
- FOU_ATTR_UNSPEC,
- FOU_ATTR_PORT, /* u16 */
- FOU_ATTR_AF, /* u8 */
- FOU_ATTR_IPPROTO, /* u8 */
- FOU_ATTR_TYPE, /* u8 */
- FOU_ATTR_REMCSUM_NOPARTIAL, /* flag */
- FOU_ATTR_LOCAL_V4, /* u32 */
- FOU_ATTR_LOCAL_V6, /* in6_addr */
- FOU_ATTR_PEER_V4, /* u32 */
- FOU_ATTR_PEER_V6, /* in6_addr */
- FOU_ATTR_PEER_PORT, /* u16 */
- FOU_ATTR_IFINDEX, /* s32 */
-
- __FOU_ATTR_MAX,
+ FOU_ENCAP_UNSPEC,
+ FOU_ENCAP_DIRECT,
+ FOU_ENCAP_GUE,
};
-#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1)
+enum {
+ FOU_ATTR_UNSPEC,
+ FOU_ATTR_PORT,
+ FOU_ATTR_AF,
+ FOU_ATTR_IPPROTO,
+ FOU_ATTR_TYPE,
+ FOU_ATTR_REMCSUM_NOPARTIAL,
+ FOU_ATTR_LOCAL_V4,
+ FOU_ATTR_LOCAL_V6,
+ FOU_ATTR_PEER_V4,
+ FOU_ATTR_PEER_V6,
+ FOU_ATTR_PEER_PORT,
+ FOU_ATTR_IFINDEX,
+
+ __FOU_ATTR_MAX
+};
+#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1)
enum {
FOU_CMD_UNSPEC,
@@ -34,15 +39,8 @@ enum {
FOU_CMD_DEL,
FOU_CMD_GET,
- __FOU_CMD_MAX,
+ __FOU_CMD_MAX
};
-
-enum {
- FOU_ENCAP_UNSPEC,
- FOU_ENCAP_DIRECT,
- FOU_ENCAP_GUE,
-};
-
-#define FOU_CMD_MAX (__FOU_CMD_MAX - 1)
+#define FOU_CMD_MAX (__FOU_CMD_MAX - 1)
#endif /* _UAPI_LINUX_FOU_H */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index f44eb0a04afd..45e4e64fd664 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -64,6 +64,24 @@ struct fstrim_range {
__u64 minlen;
};
+/*
+ * We include a length field because some filesystems (vfat) have an identifier
+ * that we do want to expose as a UUID, but doesn't have the standard length.
+ *
+ * We use a fixed size buffer beacuse this interface will, by fiat, never
+ * support "UUIDs" longer than 16 bytes; we don't want to force all downstream
+ * users to have to deal with that.
+ */
+struct fsuuid2 {
+ __u8 len;
+ __u8 uuid[16];
+};
+
+struct fs_sysfs_path {
+ __u8 len;
+ __u8 name[128];
+};
+
/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */
#define FILE_DEDUPE_RANGE_SAME 0
#define FILE_DEDUPE_RANGE_DIFFERS 1
@@ -90,7 +108,7 @@ struct file_dedupe_range {
__u16 dest_count; /* in - total elements in info array */
__u16 reserved1; /* must be zero */
__u32 reserved2; /* must be zero */
- struct file_dedupe_range_info info[0];
+ struct file_dedupe_range_info info[];
};
/* And dynamically-tunable limits and defaults: */
@@ -184,8 +202,9 @@ struct fsxattr {
#define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
+#define BLKGETDISKSEQ _IOR(0x12,128,__u64)
/*
- * A jump here: 130-131 are reserved for zoned block devices
+ * A jump here: 130-136 are reserved for zoned block devices
* (see uapi/linux/blkzoned.h)
*/
@@ -214,6 +233,13 @@ struct fsxattr {
#define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
#define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX])
#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX])
+/* Returns the external filesystem UUID, the same one blkid returns */
+#define FS_IOC_GETFSUUID _IOR(0x15, 0, struct fsuuid2)
+/*
+ * Returns the path component under /sys/fs/ that refers to this filesystem;
+ * also /sys/kernel/debug/ for filesystems with debugfs exports
+ */
+#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path)
/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
@@ -300,8 +326,71 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO O_APPEND */
#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
+/* per-IO negation of O_APPEND */
+#define RWF_NOAPPEND ((__force __kernel_rwf_t)0x00000020)
+
/* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
- RWF_APPEND)
+ RWF_APPEND | RWF_NOAPPEND)
+
+/* Pagemap ioctl */
+#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg)
+
+/* Bitmasks provided in pm_scan_args masks and reported in page_region.categories. */
+#define PAGE_IS_WPALLOWED (1 << 0)
+#define PAGE_IS_WRITTEN (1 << 1)
+#define PAGE_IS_FILE (1 << 2)
+#define PAGE_IS_PRESENT (1 << 3)
+#define PAGE_IS_SWAPPED (1 << 4)
+#define PAGE_IS_PFNZERO (1 << 5)
+#define PAGE_IS_HUGE (1 << 6)
+#define PAGE_IS_SOFT_DIRTY (1 << 7)
+
+/*
+ * struct page_region - Page region with flags
+ * @start: Start of the region
+ * @end: End of the region (exclusive)
+ * @categories: PAGE_IS_* category bitmask for the region
+ */
+struct page_region {
+ __u64 start;
+ __u64 end;
+ __u64 categories;
+};
+
+/* Flags for PAGEMAP_SCAN ioctl */
+#define PM_SCAN_WP_MATCHING (1 << 0) /* Write protect the pages matched. */
+#define PM_SCAN_CHECK_WPASYNC (1 << 1) /* Abort the scan when a non-WP-enabled page is found. */
+
+/*
+ * struct pm_scan_arg - Pagemap ioctl argument
+ * @size: Size of the structure
+ * @flags: Flags for the IOCTL
+ * @start: Starting address of the region
+ * @end: Ending address of the region
+ * @walk_end Address where the scan stopped (written by kernel).
+ * walk_end == end (address tags cleared) informs that the scan completed on entire range.
+ * @vec: Address of page_region struct array for output
+ * @vec_len: Length of the page_region struct array
+ * @max_pages: Optional limit for number of returned pages (0 = disabled)
+ * @category_inverted: PAGE_IS_* categories which values match if 0 instead of 1
+ * @category_mask: Skip pages for which any category doesn't match
+ * @category_anyof_mask: Skip pages for which no category matches
+ * @return_mask: PAGE_IS_* categories that are to be reported in `page_region`s returned
+ */
+struct pm_scan_arg {
+ __u64 size;
+ __u64 flags;
+ __u64 start;
+ __u64 end;
+ __u64 walk_end;
+ __u64 vec;
+ __u64 vec_len;
+ __u64 max_pages;
+ __u64 category_inverted;
+ __u64 category_mask;
+ __u64 category_anyof_mask;
+ __u64 return_mask;
+};
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index 7875709ccfeb..7a8f4c290187 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -20,15 +20,17 @@
#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04
#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08
#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 0x10
-#define FSCRYPT_POLICY_FLAGS_VALID 0x1F
/* Encryption algorithms */
#define FSCRYPT_MODE_AES_256_XTS 1
#define FSCRYPT_MODE_AES_256_CTS 4
#define FSCRYPT_MODE_AES_128_CBC 5
#define FSCRYPT_MODE_AES_128_CTS 6
+#define FSCRYPT_MODE_SM4_XTS 7
+#define FSCRYPT_MODE_SM4_CTS 8
#define FSCRYPT_MODE_ADIANTUM 9
-#define __FSCRYPT_MODE_MAX 9
+#define FSCRYPT_MODE_AES_256_HCTR2 10
+/* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */
/*
* Legacy policy version; ad-hoc KDF and no key verification.
@@ -45,7 +47,6 @@ struct fscrypt_policy_v1 {
__u8 flags;
__u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
};
-#define fscrypt_policy fscrypt_policy_v1
/*
* Process-subscribed "logon" key description prefix and payload format.
@@ -70,7 +71,8 @@ struct fscrypt_policy_v2 {
__u8 contents_encryption_mode;
__u8 filenames_encryption_mode;
__u8 flags;
- __u8 __reserved[4];
+ __u8 log2_data_unit_size;
+ __u8 __reserved[3];
__u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
};
@@ -156,9 +158,9 @@ struct fscrypt_get_key_status_arg {
__u32 __out_reserved[13];
};
-#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy_v1)
#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
-#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy_v1)
#define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */
#define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg)
#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg)
@@ -170,6 +172,7 @@ struct fscrypt_get_key_status_arg {
/* old names; don't add anything new here! */
#ifndef __KERNEL__
+#define fscrypt_policy fscrypt_policy_v1
#define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE
#define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4
#define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8
@@ -177,7 +180,7 @@ struct fscrypt_get_key_status_arg {
#define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32
#define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK
#define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY
-#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID
+#define FS_POLICY_FLAGS_VALID 0x07 /* contains old flags only */
#define FS_ENCRYPTION_MODE_INVALID 0 /* never used */
#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS
#define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */
@@ -185,8 +188,6 @@ struct fscrypt_get_key_status_arg {
#define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS
#define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC
#define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS
-#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* removed */
-#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* removed */
#define FS_ENCRYPTION_MODE_ADIANTUM FSCRYPT_MODE_ADIANTUM
#define FS_KEY_DESC_PREFIX FSCRYPT_KEY_DESC_PREFIX
#define FS_KEY_DESC_PREFIX_SIZE FSCRYPT_KEY_DESC_PREFIX_SIZE
diff --git a/include/uapi/linux/fsi.h b/include/uapi/linux/fsi.h
index da577ecd90e7..a2e730fc6309 100644
--- a/include/uapi/linux/fsi.h
+++ b/include/uapi/linux/fsi.h
@@ -55,4 +55,28 @@ struct scom_access {
#define FSI_SCOM_WRITE _IOWR('s', 0x02, struct scom_access)
#define FSI_SCOM_RESET _IOW('s', 0x03, __u32)
+/*
+ * /dev/sbefifo* ioctl interface
+ */
+
+/**
+ * FSI_SBEFIFO_CMD_TIMEOUT sets the timeout for writing data to the SBEFIFO.
+ *
+ * The command timeout is specified in seconds. The minimum value of command
+ * timeout is 1 seconds (default) and the maximum value of command timeout is
+ * 120 seconds. A command timeout of 0 will reset the value to the default of
+ * 1 seconds.
+ */
+#define FSI_SBEFIFO_CMD_TIMEOUT_SECONDS _IOW('s', 0x01, __u32)
+
+/**
+ * FSI_SBEFIFO_READ_TIMEOUT sets the read timeout for response from SBE.
+ *
+ * The read timeout is specified in seconds. The minimum value of read
+ * timeout is 10 seconds (default) and the maximum value of read timeout is
+ * 120 seconds. A read timeout of 0 will reset the value to the default of
+ * (10 seconds).
+ */
+#define FSI_SBEFIFO_READ_TIMEOUT_SECONDS _IOW('s', 0x00, __u32)
+
#endif /* _UAPI_LINUX_FSI_H */
diff --git a/include/uapi/linux/fsl_mc.h b/include/uapi/linux/fsl_mc.h
new file mode 100644
index 000000000000..e57451570033
--- /dev/null
+++ b/include/uapi/linux/fsl_mc.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Management Complex (MC) userspace public interface
+ *
+ * Copyright 2021 NXP
+ *
+ */
+#ifndef _UAPI_FSL_MC_H_
+#define _UAPI_FSL_MC_H_
+
+#include <linux/types.h>
+
+#define MC_CMD_NUM_OF_PARAMS 7
+
+/**
+ * struct fsl_mc_command - Management Complex (MC) command structure
+ * @header: MC command header
+ * @params: MC command parameters
+ *
+ * Used by FSL_MC_SEND_MC_COMMAND
+ */
+struct fsl_mc_command {
+ __le64 header;
+ __le64 params[MC_CMD_NUM_OF_PARAMS];
+};
+
+#define FSL_MC_SEND_CMD_IOCTL_TYPE 'R'
+#define FSL_MC_SEND_CMD_IOCTL_SEQ 0xE0
+
+#define FSL_MC_SEND_MC_COMMAND \
+ _IOWR(FSL_MC_SEND_CMD_IOCTL_TYPE, FSL_MC_SEND_CMD_IOCTL_SEQ, \
+ struct fsl_mc_command)
+
+#endif /* _UAPI_FSL_MC_H_ */
diff --git a/include/uapi/linux/fsmap.h b/include/uapi/linux/fsmap.h
index 91fd519a3f7d..c690d17f1d07 100644
--- a/include/uapi/linux/fsmap.h
+++ b/include/uapi/linux/fsmap.h
@@ -69,7 +69,7 @@ struct fsmap_head {
};
/* Size of an fsmap_head with room for nr records. */
-static inline size_t
+static inline __kernel_size_t
fsmap_sizeof(
unsigned int nr)
{
diff --git a/include/uapi/linux/fsverity.h b/include/uapi/linux/fsverity.h
index da0daf6c193b..15384e22e331 100644
--- a/include/uapi/linux/fsverity.h
+++ b/include/uapi/linux/fsverity.h
@@ -34,7 +34,70 @@ struct fsverity_digest {
__u8 digest[];
};
+/*
+ * Struct containing a file's Merkle tree properties. The fs-verity file digest
+ * is the hash of this struct. A userspace program needs this struct only if it
+ * needs to compute fs-verity file digests itself, e.g. in order to sign files.
+ * It isn't needed just to enable fs-verity on a file.
+ *
+ * Note: when computing the file digest, 'sig_size' and 'signature' must be left
+ * zero and empty, respectively. These fields are present only because some
+ * filesystems reuse this struct as part of their on-disk format.
+ */
+struct fsverity_descriptor {
+ __u8 version; /* must be 1 */
+ __u8 hash_algorithm; /* Merkle tree hash algorithm */
+ __u8 log_blocksize; /* log2 of size of data and tree blocks */
+ __u8 salt_size; /* size of salt in bytes; 0 if none */
+#ifdef __KERNEL__
+ __le32 sig_size;
+#else
+ __le32 __reserved_0x04; /* must be 0 */
+#endif
+ __le64 data_size; /* size of file the Merkle tree is built over */
+ __u8 root_hash[64]; /* Merkle tree root hash */
+ __u8 salt[32]; /* salt prepended to each hashed block */
+ __u8 __reserved[144]; /* must be 0's */
+#ifdef __KERNEL__
+ __u8 signature[];
+#endif
+};
+
+/*
+ * Format in which fs-verity file digests are signed in built-in signatures.
+ * This is the same as 'struct fsverity_digest', except here some magic bytes
+ * are prepended to provide some context about what is being signed in case the
+ * same key is used for non-fsverity purposes, and here the fields have fixed
+ * endianness.
+ *
+ * This struct is specific to the built-in signature verification support, which
+ * is optional. fs-verity users may also verify signatures in userspace, in
+ * which case userspace is responsible for deciding on what bytes are signed.
+ * This struct may still be used, but it doesn't have to be. For example,
+ * userspace could instead use a string like "sha256:$digest_as_hex_string".
+ */
+struct fsverity_formatted_digest {
+ char magic[8]; /* must be "FSVerity" */
+ __le16 digest_algorithm;
+ __le16 digest_size;
+ __u8 digest[];
+};
+
+#define FS_VERITY_METADATA_TYPE_MERKLE_TREE 1
+#define FS_VERITY_METADATA_TYPE_DESCRIPTOR 2
+#define FS_VERITY_METADATA_TYPE_SIGNATURE 3
+
+struct fsverity_read_metadata_arg {
+ __u64 metadata_type;
+ __u64 offset;
+ __u64 length;
+ __u64 buf_ptr;
+ __u64 __reserved;
+};
+
#define FS_IOC_ENABLE_VERITY _IOW('f', 133, struct fsverity_enable_arg)
#define FS_IOC_MEASURE_VERITY _IOWR('f', 134, struct fsverity_digest)
+#define FS_IOC_READ_VERITY_METADATA \
+ _IOWR('f', 135, struct fsverity_read_metadata_arg)
#endif /* _UAPI_LINUX_FSVERITY_H */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 373cada89815..d08b99d60f6f 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -172,6 +172,51 @@
* - add FUSE_WRITE_KILL_PRIV flag
* - add FUSE_SETUPMAPPING and FUSE_REMOVEMAPPING
* - add map_alignment to fuse_init_out, add FUSE_MAP_ALIGNMENT flag
+ *
+ * 7.32
+ * - add flags to fuse_attr, add FUSE_ATTR_SUBMOUNT, add FUSE_SUBMOUNTS
+ *
+ * 7.33
+ * - add FUSE_HANDLE_KILLPRIV_V2, FUSE_WRITE_KILL_SUIDGID, FATTR_KILL_SUIDGID
+ * - add FUSE_OPEN_KILL_SUIDGID
+ * - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT
+ * - add FUSE_SETXATTR_ACL_KILL_SGID
+ *
+ * 7.34
+ * - add FUSE_SYNCFS
+ *
+ * 7.35
+ * - add FOPEN_NOFLUSH
+ *
+ * 7.36
+ * - extend fuse_init_in with reserved fields, add FUSE_INIT_EXT init flag
+ * - add flags2 to fuse_init_in and fuse_init_out
+ * - add FUSE_SECURITY_CTX init flag
+ * - add security context to create, mkdir, symlink, and mknod requests
+ * - add FUSE_HAS_INODE_DAX, FUSE_ATTR_DAX
+ *
+ * 7.37
+ * - add FUSE_TMPFILE
+ *
+ * 7.38
+ * - add FUSE_EXPIRE_ONLY flag to fuse_notify_inval_entry
+ * - add FOPEN_PARALLEL_DIRECT_WRITES
+ * - add total_extlen to fuse_in_header
+ * - add FUSE_MAX_NR_SECCTX
+ * - add extension header
+ * - add FUSE_EXT_GROUPS
+ * - add FUSE_CREATE_SUPP_GROUP
+ * - add FUSE_HAS_EXPIRE_ONLY
+ *
+ * 7.39
+ * - add FUSE_DIRECT_IO_ALLOW_MMAP
+ * - add FUSE_STATX and related structures
+ *
+ * 7.40
+ * - add max_stack_depth to fuse_init_out, add FUSE_PASSTHROUGH init flag
+ * - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag
+ * - add FUSE_NO_EXPORT_SUPPORT init flag
+ * - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag
*/
#ifndef _LINUX_FUSE_H
@@ -207,7 +252,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 31
+#define FUSE_KERNEL_MINOR_VERSION 40
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -231,7 +276,41 @@ struct fuse_attr {
uint32_t gid;
uint32_t rdev;
uint32_t blksize;
- uint32_t padding;
+ uint32_t flags;
+};
+
+/*
+ * The following structures are bit-for-bit compatible with the statx(2) ABI in
+ * Linux.
+ */
+struct fuse_sx_time {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t __reserved;
+};
+
+struct fuse_statx {
+ uint32_t mask;
+ uint32_t blksize;
+ uint64_t attributes;
+ uint32_t nlink;
+ uint32_t uid;
+ uint32_t gid;
+ uint16_t mode;
+ uint16_t __spare0[1];
+ uint64_t ino;
+ uint64_t size;
+ uint64_t blocks;
+ uint64_t attributes_mask;
+ struct fuse_sx_time atime;
+ struct fuse_sx_time btime;
+ struct fuse_sx_time ctime;
+ struct fuse_sx_time mtime;
+ uint32_t rdev_major;
+ uint32_t rdev_minor;
+ uint32_t dev_major;
+ uint32_t dev_minor;
+ uint64_t __spare2[14];
};
struct fuse_kstatfs {
@@ -268,6 +347,7 @@ struct fuse_file_lock {
#define FATTR_MTIME_NOW (1 << 8)
#define FATTR_LOCKOWNER (1 << 9)
#define FATTR_CTIME (1 << 10)
+#define FATTR_KILL_SUIDGID (1 << 11)
/**
* Flags returned by the OPEN request
@@ -277,12 +357,18 @@ struct fuse_file_lock {
* FOPEN_NONSEEKABLE: the file is not seekable
* FOPEN_CACHE_DIR: allow caching this directory
* FOPEN_STREAM: the file is stream-like (no file position at all)
+ * FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE)
+ * FOPEN_PARALLEL_DIRECT_WRITES: Allow concurrent direct writes on the same inode
+ * FOPEN_PASSTHROUGH: passthrough read/write io for this open file
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
#define FOPEN_NONSEEKABLE (1 << 2)
#define FOPEN_CACHE_DIR (1 << 3)
#define FOPEN_STREAM (1 << 4)
+#define FOPEN_NOFLUSH (1 << 5)
+#define FOPEN_PARALLEL_DIRECT_WRITES (1 << 6)
+#define FOPEN_PASSTHROUGH (1 << 7)
/**
* INIT request/reply flags
@@ -313,7 +399,28 @@ struct fuse_file_lock {
* FUSE_CACHE_SYMLINKS: cache READLINK responses
* FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir
* FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request
- * FUSE_MAP_ALIGNMENT: map_alignment field is valid
+ * FUSE_MAP_ALIGNMENT: init_out.map_alignment contains log2(byte alignment) for
+ * foffset and moffset fields in struct
+ * fuse_setupmapping_out and fuse_removemapping_one.
+ * FUSE_SUBMOUNTS: kernel supports auto-mounting directory submounts
+ * FUSE_HANDLE_KILLPRIV_V2: fs kills suid/sgid/cap on write/chown/trunc.
+ * Upon write/truncate suid/sgid is only killed if caller
+ * does not have CAP_FSETID. Additionally upon
+ * write/truncate sgid is killed only if file has group
+ * execute permission. (Same as Linux VFS behavior).
+ * FUSE_SETXATTR_EXT: Server supports extended struct fuse_setxattr_in
+ * FUSE_INIT_EXT: extended fuse_init_in request
+ * FUSE_INIT_RESERVED: reserved, do not use
+ * FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and
+ * mknod
+ * FUSE_HAS_INODE_DAX: use per inode DAX
+ * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
+ * symlink and mknod (single group that matches parent)
+ * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
+ * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode.
+ * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support
+ * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
+ * of the request ID indicates resend requests
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -342,6 +449,23 @@ struct fuse_file_lock {
#define FUSE_NO_OPENDIR_SUPPORT (1 << 24)
#define FUSE_EXPLICIT_INVAL_DATA (1 << 25)
#define FUSE_MAP_ALIGNMENT (1 << 26)
+#define FUSE_SUBMOUNTS (1 << 27)
+#define FUSE_HANDLE_KILLPRIV_V2 (1 << 28)
+#define FUSE_SETXATTR_EXT (1 << 29)
+#define FUSE_INIT_EXT (1 << 30)
+#define FUSE_INIT_RESERVED (1 << 31)
+/* bits 32..63 get shifted down 32 bits into the flags2 field */
+#define FUSE_SECURITY_CTX (1ULL << 32)
+#define FUSE_HAS_INODE_DAX (1ULL << 33)
+#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
+#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
+#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36)
+#define FUSE_PASSTHROUGH (1ULL << 37)
+#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38)
+#define FUSE_HAS_RESEND (1ULL << 39)
+
+/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
+#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
/**
* CUSE INIT request/reply flags
@@ -371,11 +495,14 @@ struct fuse_file_lock {
*
* FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed
* FUSE_WRITE_LOCKOWNER: lock_owner field is valid
- * FUSE_WRITE_KILL_PRIV: kill suid and sgid bits
+ * FUSE_WRITE_KILL_SUIDGID: kill suid and sgid bits
*/
#define FUSE_WRITE_CACHE (1 << 0)
#define FUSE_WRITE_LOCKOWNER (1 << 1)
-#define FUSE_WRITE_KILL_PRIV (1 << 2)
+#define FUSE_WRITE_KILL_SUIDGID (1 << 2)
+
+/* Obsolete alias; this flag implies killing suid/sgid only. */
+#define FUSE_WRITE_KILL_PRIV FUSE_WRITE_KILL_SUIDGID
/**
* Read flags
@@ -417,6 +544,44 @@ struct fuse_file_lock {
*/
#define FUSE_FSYNC_FDATASYNC (1 << 0)
+/**
+ * fuse_attr flags
+ *
+ * FUSE_ATTR_SUBMOUNT: Object is a submount root
+ * FUSE_ATTR_DAX: Enable DAX for this file in per inode DAX mode
+ */
+#define FUSE_ATTR_SUBMOUNT (1 << 0)
+#define FUSE_ATTR_DAX (1 << 1)
+
+/**
+ * Open flags
+ * FUSE_OPEN_KILL_SUIDGID: Kill suid and sgid if executable
+ */
+#define FUSE_OPEN_KILL_SUIDGID (1 << 0)
+
+/**
+ * setxattr flags
+ * FUSE_SETXATTR_ACL_KILL_SGID: Clear SGID when system.posix_acl_access is set
+ */
+#define FUSE_SETXATTR_ACL_KILL_SGID (1 << 0)
+
+/**
+ * notify_inval_entry flags
+ * FUSE_EXPIRE_ONLY
+ */
+#define FUSE_EXPIRE_ONLY (1 << 0)
+
+/**
+ * extension type
+ * FUSE_MAX_NR_SECCTX: maximum value of &fuse_secctx_header.nr_secctx
+ * FUSE_EXT_GROUPS: &fuse_supp_groups extension
+ */
+enum fuse_ext_type {
+ /* Types 0..31 are reserved for fuse_secctx_header */
+ FUSE_MAX_NR_SECCTX = 31,
+ FUSE_EXT_GROUPS = 32,
+};
+
enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, /* no reply */
@@ -465,6 +630,9 @@ enum fuse_opcode {
FUSE_COPY_FILE_RANGE = 47,
FUSE_SETUPMAPPING = 48,
FUSE_REMOVEMAPPING = 49,
+ FUSE_SYNCFS = 50,
+ FUSE_TMPFILE = 51,
+ FUSE_STATX = 52,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -481,6 +649,7 @@ enum fuse_notify_code {
FUSE_NOTIFY_STORE = 4,
FUSE_NOTIFY_RETRIEVE = 5,
FUSE_NOTIFY_DELETE = 6,
+ FUSE_NOTIFY_RESEND = 7,
FUSE_NOTIFY_CODE_MAX,
};
@@ -529,6 +698,22 @@ struct fuse_attr_out {
struct fuse_attr attr;
};
+struct fuse_statx_in {
+ uint32_t getattr_flags;
+ uint32_t reserved;
+ uint64_t fh;
+ uint32_t sx_flags;
+ uint32_t sx_mask;
+};
+
+struct fuse_statx_out {
+ uint64_t attr_valid; /* Cache timeout for the attributes */
+ uint32_t attr_valid_nsec;
+ uint32_t flags;
+ uint64_t spare[2];
+ struct fuse_statx stat;
+};
+
#define FUSE_COMPAT_MKNOD_IN_SIZE 8
struct fuse_mknod_in {
@@ -578,20 +763,20 @@ struct fuse_setattr_in {
struct fuse_open_in {
uint32_t flags;
- uint32_t unused;
+ uint32_t open_flags; /* FUSE_OPEN_... */
};
struct fuse_create_in {
uint32_t flags;
uint32_t mode;
uint32_t umask;
- uint32_t padding;
+ uint32_t open_flags; /* FUSE_OPEN_... */
};
struct fuse_open_out {
uint64_t fh;
uint32_t open_flags;
- uint32_t padding;
+ int32_t backing_id;
};
struct fuse_release_in {
@@ -647,9 +832,13 @@ struct fuse_fsync_in {
uint32_t padding;
};
+#define FUSE_COMPAT_SETXATTR_IN_SIZE 8
+
struct fuse_setxattr_in {
uint32_t size;
uint32_t flags;
+ uint32_t setxattr_flags;
+ uint32_t padding;
};
struct fuse_getxattr_in {
@@ -684,6 +873,8 @@ struct fuse_init_in {
uint32_t minor;
uint32_t max_readahead;
uint32_t flags;
+ uint32_t flags2;
+ uint32_t unused[11];
};
#define FUSE_COMPAT_INIT_OUT_SIZE 8
@@ -700,7 +891,9 @@ struct fuse_init_out {
uint32_t time_gran;
uint16_t max_pages;
uint16_t map_alignment;
- uint32_t unused[8];
+ uint32_t flags2;
+ uint32_t max_stack_depth;
+ uint32_t unused[6];
};
#define CUSE_INIT_INFO_MAX 4096
@@ -783,6 +976,14 @@ struct fuse_fallocate_in {
uint32_t padding;
};
+/**
+ * FUSE request unique ID flag
+ *
+ * Indicates whether this is a resend request. The receiver should handle this
+ * request accordingly.
+ */
+#define FUSE_UNIQUE_RESEND (1ULL << 63)
+
struct fuse_in_header {
uint32_t len;
uint32_t opcode;
@@ -791,7 +992,8 @@ struct fuse_in_header {
uint32_t uid;
uint32_t gid;
uint32_t pid;
- uint32_t padding;
+ uint16_t total_extlen; /* length of extensions in 8byte units */
+ uint16_t padding;
};
struct fuse_out_header {
@@ -808,9 +1010,12 @@ struct fuse_dirent {
char name[];
};
-#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
-#define FUSE_DIRENT_ALIGN(x) \
+/* Align variable length records to 64bit boundary */
+#define FUSE_REC_ALIGN(x) \
(((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
+
+#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
+#define FUSE_DIRENT_ALIGN(x) FUSE_REC_ALIGN(x)
#define FUSE_DIRENT_SIZE(d) \
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
@@ -833,7 +1038,7 @@ struct fuse_notify_inval_inode_out {
struct fuse_notify_inval_entry_out {
uint64_t parent;
uint32_t namelen;
- uint32_t padding;
+ uint32_t flags;
};
struct fuse_notify_delete_out {
@@ -868,8 +1073,18 @@ struct fuse_notify_retrieve_in {
uint64_t dummy4;
};
+struct fuse_backing_map {
+ int32_t fd;
+ uint32_t flags;
+ uint64_t padding;
+};
+
/* Device ioctls: */
-#define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t)
+#define FUSE_DEV_IOC_MAGIC 229
+#define FUSE_DEV_IOC_CLONE _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t)
+#define FUSE_DEV_IOC_BACKING_OPEN _IOW(FUSE_DEV_IOC_MAGIC, 1, \
+ struct fuse_backing_map)
+#define FUSE_DEV_IOC_BACKING_CLOSE _IOW(FUSE_DEV_IOC_MAGIC, 2, uint32_t)
struct fuse_lseek_in {
uint64_t fh;
@@ -892,4 +1107,83 @@ struct fuse_copy_file_range_in {
uint64_t flags;
};
+#define FUSE_SETUPMAPPING_FLAG_WRITE (1ull << 0)
+#define FUSE_SETUPMAPPING_FLAG_READ (1ull << 1)
+struct fuse_setupmapping_in {
+ /* An already open handle */
+ uint64_t fh;
+ /* Offset into the file to start the mapping */
+ uint64_t foffset;
+ /* Length of mapping required */
+ uint64_t len;
+ /* Flags, FUSE_SETUPMAPPING_FLAG_* */
+ uint64_t flags;
+ /* Offset in Memory Window */
+ uint64_t moffset;
+};
+
+struct fuse_removemapping_in {
+ /* number of fuse_removemapping_one follows */
+ uint32_t count;
+};
+
+struct fuse_removemapping_one {
+ /* Offset into the dax window start the unmapping */
+ uint64_t moffset;
+ /* Length of mapping required */
+ uint64_t len;
+};
+
+#define FUSE_REMOVEMAPPING_MAX_ENTRY \
+ (PAGE_SIZE / sizeof(struct fuse_removemapping_one))
+
+struct fuse_syncfs_in {
+ uint64_t padding;
+};
+
+/*
+ * For each security context, send fuse_secctx with size of security context
+ * fuse_secctx will be followed by security context name and this in turn
+ * will be followed by actual context label.
+ * fuse_secctx, name, context
+ */
+struct fuse_secctx {
+ uint32_t size;
+ uint32_t padding;
+};
+
+/*
+ * Contains the information about how many fuse_secctx structures are being
+ * sent and what's the total size of all security contexts (including
+ * size of fuse_secctx_header).
+ *
+ */
+struct fuse_secctx_header {
+ uint32_t size;
+ uint32_t nr_secctx;
+};
+
+/**
+ * struct fuse_ext_header - extension header
+ * @size: total size of this extension including this header
+ * @type: type of extension
+ *
+ * This is made compatible with fuse_secctx_header by using type values >
+ * FUSE_MAX_NR_SECCTX
+ */
+struct fuse_ext_header {
+ uint32_t size;
+ uint32_t type;
+};
+
+/**
+ * struct fuse_supp_groups - Supplementary group extension
+ * @nr_groups: number of supplementary groups
+ * @groups: flexible array of group IDs
+ */
+struct fuse_supp_groups {
+ uint32_t nr_groups;
+ uint32_t groups[];
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
index a89eb0accd5e..d2ee625ea189 100644
--- a/include/uapi/linux/futex.h
+++ b/include/uapi/linux/futex.h
@@ -21,6 +21,7 @@
#define FUTEX_WAKE_BITSET 10
#define FUTEX_WAIT_REQUEUE_PI 11
#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_LOCK_PI2 13
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CLOCK_REALTIME 256
@@ -32,6 +33,7 @@
#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
+#define FUTEX_LOCK_PI2_PRIVATE (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG)
#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG)
@@ -42,6 +44,56 @@
FUTEX_PRIVATE_FLAG)
/*
+ * Flags for futex2 syscalls.
+ *
+ * NOTE: these are not pure flags, they can also be seen as:
+ *
+ * union {
+ * u32 flags;
+ * struct {
+ * u32 size : 2,
+ * numa : 1,
+ * : 4,
+ * private : 1;
+ * };
+ * };
+ */
+#define FUTEX2_SIZE_U8 0x00
+#define FUTEX2_SIZE_U16 0x01
+#define FUTEX2_SIZE_U32 0x02
+#define FUTEX2_SIZE_U64 0x03
+#define FUTEX2_NUMA 0x04
+ /* 0x08 */
+ /* 0x10 */
+ /* 0x20 */
+ /* 0x40 */
+#define FUTEX2_PRIVATE FUTEX_PRIVATE_FLAG
+
+#define FUTEX2_SIZE_MASK 0x03
+
+/* do not use */
+#define FUTEX_32 FUTEX2_SIZE_U32 /* historical accident :-( */
+
+/*
+ * Max numbers of elements in a futex_waitv array
+ */
+#define FUTEX_WAITV_MAX 128
+
+/**
+ * struct futex_waitv - A waiter for vectorized wait
+ * @val: Expected value at uaddr
+ * @uaddr: User address to wait on
+ * @flags: Flags for this waiter
+ * @__reserved: Reserved member to preserve data alignment. Should be 0.
+ */
+struct futex_waitv {
+ __u64 val;
+ __u64 uaddr;
+ __u32 flags;
+ __u32 __reserved;
+};
+
+/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
*/
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h
index 9c0636ec2286..ddba3ca01e39 100644
--- a/include/uapi/linux/genetlink.h
+++ b/include/uapi/linux/genetlink.h
@@ -64,6 +64,8 @@ enum {
CTRL_ATTR_OPS,
CTRL_ATTR_MCAST_GROUPS,
CTRL_ATTR_POLICY,
+ CTRL_ATTR_OP_POLICY,
+ CTRL_ATTR_OP,
__CTRL_ATTR_MAX,
};
@@ -87,5 +89,15 @@ enum {
#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
+enum {
+ CTRL_ATTR_POLICY_UNSPEC,
+ CTRL_ATTR_POLICY_DO,
+ CTRL_ATTR_POLICY_DUMP,
+
+ __CTRL_ATTR_POLICY_DUMP_MAX,
+ CTRL_ATTR_POLICY_DUMP_MAX = __CTRL_ATTR_POLICY_DUMP_MAX - 1
+};
+
+#define CTRL_ATTR_POLICY_MAX (__CTRL_ATTR_POLICY_DUMP_MAX - 1)
#endif /* _UAPI__LINUX_GENERIC_NETLINK_H */
diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h
index 07e508e6691b..6ec4291bcc7a 100644
--- a/include/uapi/linux/gfs2_ondisk.h
+++ b/include/uapi/linux/gfs2_ondisk.h
@@ -47,7 +47,7 @@
#define GFS2_FORMAT_DE 1200
#define GFS2_FORMAT_QU 1500
/* These are part of the superblock */
-#define GFS2_FORMAT_FS 1801
+#define GFS2_FORMAT_FS 1802
#define GFS2_FORMAT_MULTI 1900
/*
@@ -389,8 +389,9 @@ struct gfs2_leaf {
#define GFS2_EATYPE_USR 1
#define GFS2_EATYPE_SYS 2
#define GFS2_EATYPE_SECURITY 3
+#define GFS2_EATYPE_TRUSTED 4
-#define GFS2_EATYPE_LAST 3
+#define GFS2_EATYPE_LAST 4
#define GFS2_EATYPE_VALID(x) ((x) <= GFS2_EATYPE_LAST)
#define GFS2_EAFLAG_LAST 0x01 /* last ea in block */
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index 9c27cecf406f..f7cb8ae87df7 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* <linux/gpio.h> - userspace ABI for the GPIO character devices
*
@@ -11,22 +11,305 @@
#ifndef _UAPI_GPIO_H_
#define _UAPI_GPIO_H_
+#include <linux/const.h>
#include <linux/ioctl.h>
#include <linux/types.h>
+/*
+ * The maximum size of name and label arrays.
+ *
+ * Must be a multiple of 8 to ensure 32/64-bit alignment of structs.
+ */
+#define GPIO_MAX_NAME_SIZE 32
+
/**
* struct gpiochip_info - Information about a certain GPIO chip
* @name: the Linux kernel name of this GPIO chip
* @label: a functional name for this GPIO chip, such as a product
- * number, may be empty
+ * number, may be empty (i.e. label[0] == '\0')
* @lines: number of GPIO lines on this chip
*/
struct gpiochip_info {
- char name[32];
- char label[32];
+ char name[GPIO_MAX_NAME_SIZE];
+ char label[GPIO_MAX_NAME_SIZE];
__u32 lines;
};
+/*
+ * Maximum number of requested lines.
+ *
+ * Must be no greater than 64, as bitmaps are restricted here to 64-bits
+ * for simplicity, and a multiple of 2 to ensure 32/64-bit alignment of
+ * structs.
+ */
+#define GPIO_V2_LINES_MAX 64
+
+/*
+ * The maximum number of configuration attributes associated with a line
+ * request.
+ */
+#define GPIO_V2_LINE_NUM_ATTRS_MAX 10
+
+/**
+ * enum gpio_v2_line_flag - &struct gpio_v2_line_attribute.flags values
+ * @GPIO_V2_LINE_FLAG_USED: line is not available for request
+ * @GPIO_V2_LINE_FLAG_ACTIVE_LOW: line active state is physical low
+ * @GPIO_V2_LINE_FLAG_INPUT: line is an input
+ * @GPIO_V2_LINE_FLAG_OUTPUT: line is an output
+ * @GPIO_V2_LINE_FLAG_EDGE_RISING: line detects rising (inactive to active)
+ * edges
+ * @GPIO_V2_LINE_FLAG_EDGE_FALLING: line detects falling (active to
+ * inactive) edges
+ * @GPIO_V2_LINE_FLAG_OPEN_DRAIN: line is an open drain output
+ * @GPIO_V2_LINE_FLAG_OPEN_SOURCE: line is an open source output
+ * @GPIO_V2_LINE_FLAG_BIAS_PULL_UP: line has pull-up bias enabled
+ * @GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN: line has pull-down bias enabled
+ * @GPIO_V2_LINE_FLAG_BIAS_DISABLED: line has bias disabled
+ * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME: line events contain REALTIME timestamps
+ * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE: line events contain timestamps from
+ * the hardware timestamping engine (HTE) subsystem
+ */
+enum gpio_v2_line_flag {
+ GPIO_V2_LINE_FLAG_USED = _BITULL(0),
+ GPIO_V2_LINE_FLAG_ACTIVE_LOW = _BITULL(1),
+ GPIO_V2_LINE_FLAG_INPUT = _BITULL(2),
+ GPIO_V2_LINE_FLAG_OUTPUT = _BITULL(3),
+ GPIO_V2_LINE_FLAG_EDGE_RISING = _BITULL(4),
+ GPIO_V2_LINE_FLAG_EDGE_FALLING = _BITULL(5),
+ GPIO_V2_LINE_FLAG_OPEN_DRAIN = _BITULL(6),
+ GPIO_V2_LINE_FLAG_OPEN_SOURCE = _BITULL(7),
+ GPIO_V2_LINE_FLAG_BIAS_PULL_UP = _BITULL(8),
+ GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN = _BITULL(9),
+ GPIO_V2_LINE_FLAG_BIAS_DISABLED = _BITULL(10),
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME = _BITULL(11),
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE = _BITULL(12),
+};
+
+/**
+ * struct gpio_v2_line_values - Values of GPIO lines
+ * @bits: a bitmap containing the value of the lines, set to 1 for active
+ * and 0 for inactive
+ * @mask: a bitmap identifying the lines to get or set, with each bit
+ * number corresponding to the index into &struct
+ * gpio_v2_line_request.offsets
+ */
+struct gpio_v2_line_values {
+ __aligned_u64 bits;
+ __aligned_u64 mask;
+};
+
+/**
+ * enum gpio_v2_line_attr_id - &struct gpio_v2_line_attribute.id values
+ * identifying which field of the attribute union is in use.
+ * @GPIO_V2_LINE_ATTR_ID_FLAGS: flags field is in use
+ * @GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES: values field is in use
+ * @GPIO_V2_LINE_ATTR_ID_DEBOUNCE: debounce_period_us field is in use
+ */
+enum gpio_v2_line_attr_id {
+ GPIO_V2_LINE_ATTR_ID_FLAGS = 1,
+ GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES = 2,
+ GPIO_V2_LINE_ATTR_ID_DEBOUNCE = 3,
+};
+
+/**
+ * struct gpio_v2_line_attribute - a configurable attribute of a line
+ * @id: attribute identifier with value from &enum gpio_v2_line_attr_id
+ * @padding: reserved for future use and must be zero filled
+ * @flags: if id is %GPIO_V2_LINE_ATTR_ID_FLAGS, the flags for the GPIO
+ * line, with values from &enum gpio_v2_line_flag, such as
+ * %GPIO_V2_LINE_FLAG_ACTIVE_LOW, %GPIO_V2_LINE_FLAG_OUTPUT etc, added
+ * together. This overrides the default flags contained in the &struct
+ * gpio_v2_line_config for the associated line.
+ * @values: if id is %GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES, a bitmap
+ * containing the values to which the lines will be set, with each bit
+ * number corresponding to the index into &struct
+ * gpio_v2_line_request.offsets
+ * @debounce_period_us: if id is %GPIO_V2_LINE_ATTR_ID_DEBOUNCE, the
+ * desired debounce period, in microseconds
+ */
+struct gpio_v2_line_attribute {
+ __u32 id;
+ __u32 padding;
+ union {
+ __aligned_u64 flags;
+ __aligned_u64 values;
+ __u32 debounce_period_us;
+ };
+};
+
+/**
+ * struct gpio_v2_line_config_attribute - a configuration attribute
+ * associated with one or more of the requested lines.
+ * @attr: the configurable attribute
+ * @mask: a bitmap identifying the lines to which the attribute applies,
+ * with each bit number corresponding to the index into &struct
+ * gpio_v2_line_request.offsets
+ */
+struct gpio_v2_line_config_attribute {
+ struct gpio_v2_line_attribute attr;
+ __aligned_u64 mask;
+};
+
+/**
+ * struct gpio_v2_line_config - Configuration for GPIO lines
+ * @flags: flags for the GPIO lines, with values from &enum
+ * gpio_v2_line_flag, such as %GPIO_V2_LINE_FLAG_ACTIVE_LOW,
+ * %GPIO_V2_LINE_FLAG_OUTPUT etc, added together. This is the default for
+ * all requested lines but may be overridden for particular lines using
+ * @attrs.
+ * @num_attrs: the number of attributes in @attrs
+ * @padding: reserved for future use and must be zero filled
+ * @attrs: the configuration attributes associated with the requested
+ * lines. Any attribute should only be associated with a particular line
+ * once. If an attribute is associated with a line multiple times then the
+ * first occurrence (i.e. lowest index) has precedence.
+ */
+struct gpio_v2_line_config {
+ __aligned_u64 flags;
+ __u32 num_attrs;
+ /* Pad to fill implicit padding and reserve space for future use. */
+ __u32 padding[5];
+ struct gpio_v2_line_config_attribute attrs[GPIO_V2_LINE_NUM_ATTRS_MAX];
+};
+
+/**
+ * struct gpio_v2_line_request - Information about a request for GPIO lines
+ * @offsets: an array of desired lines, specified by offset index for the
+ * associated GPIO chip
+ * @consumer: a desired consumer label for the selected GPIO lines such as
+ * "my-bitbanged-relay"
+ * @config: requested configuration for the lines
+ * @num_lines: number of lines requested in this request, i.e. the number
+ * of valid fields in the %GPIO_V2_LINES_MAX sized arrays, set to 1 to
+ * request a single line
+ * @event_buffer_size: a suggested minimum number of line events that the
+ * kernel should buffer. This is only relevant if edge detection is
+ * enabled in the configuration. Note that this is only a suggested value
+ * and the kernel may allocate a larger buffer or cap the size of the
+ * buffer. If this field is zero then the buffer size defaults to a minimum
+ * of @num_lines * 16.
+ * @padding: reserved for future use and must be zero filled
+ * @fd: after a successful %GPIO_V2_GET_LINE_IOCTL operation, contains
+ * a valid anonymous file descriptor representing the request
+ */
+struct gpio_v2_line_request {
+ __u32 offsets[GPIO_V2_LINES_MAX];
+ char consumer[GPIO_MAX_NAME_SIZE];
+ struct gpio_v2_line_config config;
+ __u32 num_lines;
+ __u32 event_buffer_size;
+ /* Pad to fill implicit padding and reserve space for future use. */
+ __u32 padding[5];
+ __s32 fd;
+};
+
+/**
+ * struct gpio_v2_line_info - Information about a certain GPIO line
+ * @name: the name of this GPIO line, such as the output pin of the line on
+ * the chip, a rail or a pin header name on a board, as specified by the
+ * GPIO chip, may be empty (i.e. name[0] == '\0')
+ * @consumer: a functional name for the consumer of this GPIO line as set
+ * by whatever is using it, will be empty if there is no current user but
+ * may also be empty if the consumer doesn't set this up
+ * @offset: the local offset on this GPIO chip, fill this in when
+ * requesting the line information from the kernel
+ * @num_attrs: the number of attributes in @attrs
+ * @flags: flags for this GPIO line, with values from &enum
+ * gpio_v2_line_flag, such as %GPIO_V2_LINE_FLAG_ACTIVE_LOW,
+ * %GPIO_V2_LINE_FLAG_OUTPUT etc, added together
+ * @attrs: the configuration attributes associated with the line
+ * @padding: reserved for future use
+ */
+struct gpio_v2_line_info {
+ char name[GPIO_MAX_NAME_SIZE];
+ char consumer[GPIO_MAX_NAME_SIZE];
+ __u32 offset;
+ __u32 num_attrs;
+ __aligned_u64 flags;
+ struct gpio_v2_line_attribute attrs[GPIO_V2_LINE_NUM_ATTRS_MAX];
+ /* Space reserved for future use. */
+ __u32 padding[4];
+};
+
+/**
+ * enum gpio_v2_line_changed_type - &struct gpio_v2_line_changed.event_type
+ * values
+ * @GPIO_V2_LINE_CHANGED_REQUESTED: line has been requested
+ * @GPIO_V2_LINE_CHANGED_RELEASED: line has been released
+ * @GPIO_V2_LINE_CHANGED_CONFIG: line has been reconfigured
+ */
+enum gpio_v2_line_changed_type {
+ GPIO_V2_LINE_CHANGED_REQUESTED = 1,
+ GPIO_V2_LINE_CHANGED_RELEASED = 2,
+ GPIO_V2_LINE_CHANGED_CONFIG = 3,
+};
+
+/**
+ * struct gpio_v2_line_info_changed - Information about a change in status
+ * of a GPIO line
+ * @info: updated line information
+ * @timestamp_ns: estimate of time of status change occurrence, in nanoseconds
+ * @event_type: the type of change with a value from &enum
+ * gpio_v2_line_changed_type
+ * @padding: reserved for future use
+ */
+struct gpio_v2_line_info_changed {
+ struct gpio_v2_line_info info;
+ __aligned_u64 timestamp_ns;
+ __u32 event_type;
+ /* Pad struct to 64-bit boundary and reserve space for future use. */
+ __u32 padding[5];
+};
+
+/**
+ * enum gpio_v2_line_event_id - &struct gpio_v2_line_event.id values
+ * @GPIO_V2_LINE_EVENT_RISING_EDGE: event triggered by a rising edge
+ * @GPIO_V2_LINE_EVENT_FALLING_EDGE: event triggered by a falling edge
+ */
+enum gpio_v2_line_event_id {
+ GPIO_V2_LINE_EVENT_RISING_EDGE = 1,
+ GPIO_V2_LINE_EVENT_FALLING_EDGE = 2,
+};
+
+/**
+ * struct gpio_v2_line_event - The actual event being pushed to userspace
+ * @timestamp_ns: best estimate of time of event occurrence, in nanoseconds
+ * @id: event identifier with value from &enum gpio_v2_line_event_id
+ * @offset: the offset of the line that triggered the event
+ * @seqno: the sequence number for this event in the sequence of events for
+ * all the lines in this line request
+ * @line_seqno: the sequence number for this event in the sequence of
+ * events on this particular line
+ * @padding: reserved for future use
+ *
+ * By default the @timestamp_ns is read from %CLOCK_MONOTONIC and is
+ * intended to allow the accurate measurement of the time between events.
+ * It does not provide the wall-clock time.
+ *
+ * If the %GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME flag is set then the
+ * @timestamp_ns is read from %CLOCK_REALTIME.
+ *
+ * If the %GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE flag is set then the
+ * @timestamp_ns is provided by the hardware timestamping engine (HTE)
+ * subsystem.
+ */
+struct gpio_v2_line_event {
+ __aligned_u64 timestamp_ns;
+ __u32 id;
+ __u32 offset;
+ __u32 seqno;
+ __u32 line_seqno;
+ /* Space reserved for future use. */
+ __u32 padding[6];
+};
+
+/*
+ * ABI v1
+ *
+ * This version of the ABI is deprecated.
+ * Use the latest version of the ABI, defined above, instead.
+ */
+
/* Informational flags */
#define GPIOLINE_FLAG_KERNEL (1UL << 0) /* Line used by the kernel */
#define GPIOLINE_FLAG_IS_OUT (1UL << 1)
@@ -44,16 +327,19 @@ struct gpiochip_info {
* @flags: various flags for this line
* @name: the name of this GPIO line, such as the output pin of the line on the
* chip, a rail or a pin header name on a board, as specified by the gpio
- * chip, may be empty
+ * chip, may be empty (i.e. name[0] == '\0')
* @consumer: a functional name for the consumer of this GPIO line as set by
* whatever is using it, will be empty if there is no current user but may
* also be empty if the consumer doesn't set this up
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use ABI v2 and &struct gpio_v2_line_info instead.
*/
struct gpioline_info {
__u32 line_offset;
__u32 flags;
- char name[32];
- char consumer[32];
+ char name[GPIO_MAX_NAME_SIZE];
+ char consumer[GPIO_MAX_NAME_SIZE];
};
/* Maximum number of requested handles */
@@ -71,14 +357,18 @@ enum {
* of a GPIO line
* @info: updated line information
* @timestamp: estimate of time of status change occurrence, in nanoseconds
- * @event_type: one of GPIOLINE_CHANGED_REQUESTED, GPIOLINE_CHANGED_RELEASED
- * and GPIOLINE_CHANGED_CONFIG
+ * @event_type: one of %GPIOLINE_CHANGED_REQUESTED,
+ * %GPIOLINE_CHANGED_RELEASED and %GPIOLINE_CHANGED_CONFIG
+ * @padding: reserved for future use
*
- * Note: struct gpioline_info embedded here has 32-bit alignment on its own,
+ * The &struct gpioline_info embedded here has 32-bit alignment on its own,
* but it works fine with 64-bit alignment too. With its 72 byte size, we can
* guarantee there are no implicit holes between it and subsequent members.
* The 20-byte padding at the end makes sure we don't add any implicit padding
* at the end of the structure on 64-bit architectures.
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use ABI v2 and &struct gpio_v2_line_info_changed instead.
*/
struct gpioline_info_changed {
struct gpioline_info info;
@@ -102,28 +392,30 @@ struct gpioline_info_changed {
* @lineoffsets: an array of desired lines, specified by offset index for the
* associated GPIO device
* @flags: desired flags for the desired GPIO lines, such as
- * GPIOHANDLE_REQUEST_OUTPUT, GPIOHANDLE_REQUEST_ACTIVE_LOW etc, OR:ed
+ * %GPIOHANDLE_REQUEST_OUTPUT, %GPIOHANDLE_REQUEST_ACTIVE_LOW etc, added
* together. Note that even if multiple lines are requested, the same flags
* must be applicable to all of them, if you want lines with individual
* flags set, request them one by one. It is possible to select
* a batch of input or output lines, but they must all have the same
* characteristics, i.e. all inputs or all outputs, all active low etc
- * @default_values: if the GPIOHANDLE_REQUEST_OUTPUT is set for a requested
- * line, this specifies the default output value, should be 0 (low) or
- * 1 (high), anything else than 0 or 1 will be interpreted as 1 (high)
+ * @default_values: if the %GPIOHANDLE_REQUEST_OUTPUT is set for a requested
+ * line, this specifies the default output value, should be 0 (inactive) or
+ * 1 (active). Anything other than 0 or 1 will be interpreted as active.
* @consumer_label: a desired consumer label for the selected GPIO line(s)
* such as "my-bitbanged-relay"
* @lines: number of lines requested in this request, i.e. the number of
* valid fields in the above arrays, set to 1 to request a single line
- * @fd: if successful this field will contain a valid anonymous file handle
- * after a GPIO_GET_LINEHANDLE_IOCTL operation, zero or negative value
- * means error
+ * @fd: after a successful %GPIO_GET_LINEHANDLE_IOCTL operation, contains
+ * a valid anonymous file descriptor representing the request
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use ABI v2 and &struct gpio_v2_line_request instead.
*/
struct gpiohandle_request {
__u32 lineoffsets[GPIOHANDLES_MAX];
__u32 flags;
__u8 default_values[GPIOHANDLES_MAX];
- char consumer_label[32];
+ char consumer_label[GPIO_MAX_NAME_SIZE];
__u32 lines;
int fd;
};
@@ -131,12 +423,15 @@ struct gpiohandle_request {
/**
* struct gpiohandle_config - Configuration for a GPIO handle request
* @flags: updated flags for the requested GPIO lines, such as
- * GPIOHANDLE_REQUEST_OUTPUT, GPIOHANDLE_REQUEST_ACTIVE_LOW etc, OR:ed
+ * %GPIOHANDLE_REQUEST_OUTPUT, %GPIOHANDLE_REQUEST_ACTIVE_LOW etc, added
* together
- * @default_values: if the GPIOHANDLE_REQUEST_OUTPUT is set in flags,
- * this specifies the default output value, should be 0 (low) or
- * 1 (high), anything else than 0 or 1 will be interpreted as 1 (high)
+ * @default_values: if the %GPIOHANDLE_REQUEST_OUTPUT is set in flags,
+ * this specifies the default output value, should be 0 (inactive) or
+ * 1 (active). Anything other than 0 or 1 will be interpreted as active.
* @padding: reserved for future use and should be zero filled
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use ABI v2 and &struct gpio_v2_line_config instead.
*/
struct gpiohandle_config {
__u32 flags;
@@ -144,21 +439,20 @@ struct gpiohandle_config {
__u32 padding[4]; /* padding for future use */
};
-#define GPIOHANDLE_SET_CONFIG_IOCTL _IOWR(0xB4, 0x0a, struct gpiohandle_config)
-
/**
* struct gpiohandle_data - Information of values on a GPIO handle
* @values: when getting the state of lines this contains the current
* state of a line, when setting the state of lines these should contain
- * the desired target state
+ * the desired target state. States are 0 (inactive) or 1 (active).
+ * When setting, anything other than 0 or 1 will be interpreted as active.
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use ABI v2 and &struct gpio_v2_line_values instead.
*/
struct gpiohandle_data {
__u8 values[GPIOHANDLES_MAX];
};
-#define GPIOHANDLE_GET_LINE_VALUES_IOCTL _IOWR(0xB4, 0x08, struct gpiohandle_data)
-#define GPIOHANDLE_SET_LINE_VALUES_IOCTL _IOWR(0xB4, 0x09, struct gpiohandle_data)
-
/* Eventrequest flags */
#define GPIOEVENT_REQUEST_RISING_EDGE (1UL << 0)
#define GPIOEVENT_REQUEST_FALLING_EDGE (1UL << 1)
@@ -169,24 +463,26 @@ struct gpiohandle_data {
* @lineoffset: the desired line to subscribe to events from, specified by
* offset index for the associated GPIO device
* @handleflags: desired handle flags for the desired GPIO line, such as
- * GPIOHANDLE_REQUEST_ACTIVE_LOW or GPIOHANDLE_REQUEST_OPEN_DRAIN
+ * %GPIOHANDLE_REQUEST_ACTIVE_LOW or %GPIOHANDLE_REQUEST_OPEN_DRAIN
* @eventflags: desired flags for the desired GPIO event line, such as
- * GPIOEVENT_REQUEST_RISING_EDGE or GPIOEVENT_REQUEST_FALLING_EDGE
+ * %GPIOEVENT_REQUEST_RISING_EDGE or %GPIOEVENT_REQUEST_FALLING_EDGE
* @consumer_label: a desired consumer label for the selected GPIO line(s)
* such as "my-listener"
- * @fd: if successful this field will contain a valid anonymous file handle
- * after a GPIO_GET_LINEEVENT_IOCTL operation, zero or negative value
- * means error
+ * @fd: after a successful %GPIO_GET_LINEEVENT_IOCTL operation, contains a
+ * valid anonymous file descriptor representing the request
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use ABI v2 and &struct gpio_v2_line_request instead.
*/
struct gpioevent_request {
__u32 lineoffset;
__u32 handleflags;
__u32 eventflags;
- char consumer_label[32];
+ char consumer_label[GPIO_MAX_NAME_SIZE];
int fd;
};
-/**
+/*
* GPIO event types
*/
#define GPIOEVENT_EVENT_RISING_EDGE 0x01
@@ -195,18 +491,44 @@ struct gpioevent_request {
/**
* struct gpioevent_data - The actual event being pushed to userspace
* @timestamp: best estimate of time of event occurrence, in nanoseconds
- * @id: event identifier
+ * @id: event identifier, one of %GPIOEVENT_EVENT_RISING_EDGE or
+ * %GPIOEVENT_EVENT_FALLING_EDGE
+ *
+ * Note: This struct is part of ABI v1 and is deprecated.
+ * Use ABI v2 and &struct gpio_v2_line_event instead.
*/
struct gpioevent_data {
__u64 timestamp;
__u32 id;
};
+/*
+ * v1 and v2 ioctl()s
+ */
#define GPIO_GET_CHIPINFO_IOCTL _IOR(0xB4, 0x01, struct gpiochip_info)
+#define GPIO_GET_LINEINFO_UNWATCH_IOCTL _IOWR(0xB4, 0x0C, __u32)
+
+/*
+ * v2 ioctl()s
+ */
+#define GPIO_V2_GET_LINEINFO_IOCTL _IOWR(0xB4, 0x05, struct gpio_v2_line_info)
+#define GPIO_V2_GET_LINEINFO_WATCH_IOCTL _IOWR(0xB4, 0x06, struct gpio_v2_line_info)
+#define GPIO_V2_GET_LINE_IOCTL _IOWR(0xB4, 0x07, struct gpio_v2_line_request)
+#define GPIO_V2_LINE_SET_CONFIG_IOCTL _IOWR(0xB4, 0x0D, struct gpio_v2_line_config)
+#define GPIO_V2_LINE_GET_VALUES_IOCTL _IOWR(0xB4, 0x0E, struct gpio_v2_line_values)
+#define GPIO_V2_LINE_SET_VALUES_IOCTL _IOWR(0xB4, 0x0F, struct gpio_v2_line_values)
+
+/*
+ * v1 ioctl()s
+ *
+ * These ioctl()s are deprecated. Use the v2 equivalent instead.
+ */
#define GPIO_GET_LINEINFO_IOCTL _IOWR(0xB4, 0x02, struct gpioline_info)
-#define GPIO_GET_LINEINFO_WATCH_IOCTL _IOWR(0xB4, 0x0b, struct gpioline_info)
-#define GPIO_GET_LINEINFO_UNWATCH_IOCTL _IOWR(0xB4, 0x0c, __u32)
#define GPIO_GET_LINEHANDLE_IOCTL _IOWR(0xB4, 0x03, struct gpiohandle_request)
#define GPIO_GET_LINEEVENT_IOCTL _IOWR(0xB4, 0x04, struct gpioevent_request)
+#define GPIOHANDLE_GET_LINE_VALUES_IOCTL _IOWR(0xB4, 0x08, struct gpiohandle_data)
+#define GPIOHANDLE_SET_LINE_VALUES_IOCTL _IOWR(0xB4, 0x09, struct gpiohandle_data)
+#define GPIOHANDLE_SET_CONFIG_IOCTL _IOWR(0xB4, 0x0A, struct gpiohandle_config)
+#define GPIO_GET_LINEINFO_WATCH_IOCTL _IOWR(0xB4, 0x0B, struct gpioline_info)
#endif /* _UAPI_GPIO_H_ */
diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index cb8693b39cb7..3a93f17ca943 100644
--- a/include/uapi/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -1,11 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (c) 2022/23 Siemens Mobility GmbH */
#ifndef _LINUX_GSMMUX_H
#define _LINUX_GSMMUX_H
+#include <linux/const.h>
#include <linux/if.h>
#include <linux/ioctl.h>
#include <linux/types.h>
+/*
+ * flags definition for n_gsm
+ *
+ * Used by:
+ * struct gsm_config_ext.flags
+ * struct gsm_dlci_config.flags
+ */
+/* Forces a DLCI reset if set. Otherwise, a DLCI reset is only done if
+ * incompatible settings were provided. Always cleared on retrieval.
+ */
+#define GSM_FL_RESTART _BITUL(0)
+
+/**
+ * struct gsm_config - n_gsm basic configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF and GSMIOC_SETCONF
+ * to retrieve and set the basic parameters of an n_gsm ldisc.
+ * struct gsm_config_ext can be used to configure extended ldisc parameters.
+ *
+ * All timers are in units of 1/100th of a second.
+ *
+ * @adaption: Convergence layer type
+ * @encapsulation: Framing (0 = basic option, 1 = advanced option)
+ * @initiator: Initiator or responder
+ * @t1: Acknowledgment timer
+ * @t2: Response timer for multiplexer control channel
+ * @t3: Response timer for wake-up procedure
+ * @n2: Maximum number of retransmissions
+ * @mru: Maximum incoming frame payload size
+ * @mtu: Maximum outgoing frame payload size
+ * @k: Window size
+ * @i: Frame type (1 = UIH, 2 = UI)
+ * @unused: Can not be used
+ */
struct gsm_config
{
unsigned int adaption;
@@ -19,19 +55,32 @@ struct gsm_config
unsigned int mtu;
unsigned int k;
unsigned int i;
- unsigned int unused[8]; /* Padding for expansion without
- breaking stuff */
+ unsigned int unused[8];
};
#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config)
#define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config)
+/**
+ * struct gsm_netconfig - n_gsm network configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_ENABLE_NET and
+ * GSMIOC_DISABLE_NET to enable or disable a network data connection
+ * over a mux virtual tty channel. This is for modems that support
+ * data connections with raw IP frames instead of PPP.
+ *
+ * @adaption: Adaption to use in network mode.
+ * @protocol: Protocol to use - only ETH_P_IP supported.
+ * @unused2: Can not be used.
+ * @if_name: Interface name format string.
+ * @unused: Can not be used.
+ */
struct gsm_netconfig {
- unsigned int adaption; /* Adaption to use in network mode */
- unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */
+ unsigned int adaption;
+ unsigned short protocol;
unsigned short unused2;
- char if_name[IFNAMSIZ]; /* interface name format string */
- __u8 unused[28]; /* For future use */
+ char if_name[IFNAMSIZ];
+ __u8 unused[28];
};
#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig)
@@ -40,4 +89,60 @@ struct gsm_netconfig {
/* get the base tty number for a configured gsmmux tty */
#define GSMIOC_GETFIRST _IOR('G', 4, __u32)
+/**
+ * struct gsm_config_ext - n_gsm extended configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF_EXT and
+ * GSMIOC_SETCONF_EXT to retrieve and set the extended parameters of an
+ * n_gsm ldisc.
+ *
+ * All timers are in units of 1/100th of a second.
+ *
+ * @keep_alive: Control channel keep-alive in 1/100th of a second (0 to disable).
+ * @wait_config: Wait for DLCI config before opening virtual link?
+ * @flags: Mux specific flags.
+ * @reserved: For future use, must be initialized to zero.
+ */
+struct gsm_config_ext {
+ __u32 keep_alive;
+ __u32 wait_config;
+ __u32 flags;
+ __u32 reserved[5];
+};
+
+#define GSMIOC_GETCONF_EXT _IOR('G', 5, struct gsm_config_ext)
+#define GSMIOC_SETCONF_EXT _IOW('G', 6, struct gsm_config_ext)
+
+/**
+ * struct gsm_dlci_config - n_gsm channel configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF_DLCI and
+ * GSMIOC_SETCONF_DLCI to retrieve and set the channel specific parameters
+ * of an n_gsm ldisc.
+ *
+ * Set the channel accordingly before calling GSMIOC_GETCONF_DLCI.
+ *
+ * @channel: DLCI (0 for the associated DLCI).
+ * @adaption: Convergence layer type.
+ * @mtu: Maximum transfer unit.
+ * @priority: Priority (0 for default value).
+ * @i: Frame type (1 = UIH, 2 = UI).
+ * @k: Window size (0 for default value).
+ * @flags: DLCI specific flags.
+ * @reserved: For future use, must be initialized to zero.
+ */
+struct gsm_dlci_config {
+ __u32 channel;
+ __u32 adaption;
+ __u32 mtu;
+ __u32 priority;
+ __u32 i;
+ __u32 k;
+ __u32 flags;
+ __u32 reserved[7];
+};
+
+#define GSMIOC_GETCONF_DLCI _IOWR('G', 7, struct gsm_dlci_config)
+#define GSMIOC_SETCONF_DLCI _IOW('G', 8, struct gsm_dlci_config)
+
#endif
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index c7d66755d212..3dcdb9e33cba 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -2,10 +2,13 @@
#ifndef _UAPI_LINUX_GTP_H_
#define _UAPI_LINUX_GTP_H_
+#define GTP_GENL_MCGRP_NAME "gtp"
+
enum gtp_genl_cmds {
GTP_CMD_NEWPDP,
GTP_CMD_DELPDP,
GTP_CMD_GETPDP,
+ GTP_CMD_ECHOREQ,
GTP_CMD_MAX,
};
@@ -30,6 +33,6 @@ enum gtp_attrs {
GTPA_PAD,
__GTPA_MAX,
};
-#define GTPA_MAX (__GTPA_MAX + 1)
+#define GTPA_MAX (__GTPA_MAX - 1)
#endif /* _UAPI_LINUX_GTP_H_ */
diff --git a/include/uapi/linux/handshake.h b/include/uapi/linux/handshake.h
new file mode 100644
index 000000000000..3d7ea58778c9
--- /dev/null
+++ b/include/uapi/linux/handshake.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/handshake.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_HANDSHAKE_H
+#define _UAPI_LINUX_HANDSHAKE_H
+
+#define HANDSHAKE_FAMILY_NAME "handshake"
+#define HANDSHAKE_FAMILY_VERSION 1
+
+enum handshake_handler_class {
+ HANDSHAKE_HANDLER_CLASS_NONE,
+ HANDSHAKE_HANDLER_CLASS_TLSHD,
+ HANDSHAKE_HANDLER_CLASS_MAX,
+};
+
+enum handshake_msg_type {
+ HANDSHAKE_MSG_TYPE_UNSPEC,
+ HANDSHAKE_MSG_TYPE_CLIENTHELLO,
+ HANDSHAKE_MSG_TYPE_SERVERHELLO,
+};
+
+enum handshake_auth {
+ HANDSHAKE_AUTH_UNSPEC,
+ HANDSHAKE_AUTH_UNAUTH,
+ HANDSHAKE_AUTH_PSK,
+ HANDSHAKE_AUTH_X509,
+};
+
+enum {
+ HANDSHAKE_A_X509_CERT = 1,
+ HANDSHAKE_A_X509_PRIVKEY,
+
+ __HANDSHAKE_A_X509_MAX,
+ HANDSHAKE_A_X509_MAX = (__HANDSHAKE_A_X509_MAX - 1)
+};
+
+enum {
+ HANDSHAKE_A_ACCEPT_SOCKFD = 1,
+ HANDSHAKE_A_ACCEPT_HANDLER_CLASS,
+ HANDSHAKE_A_ACCEPT_MESSAGE_TYPE,
+ HANDSHAKE_A_ACCEPT_TIMEOUT,
+ HANDSHAKE_A_ACCEPT_AUTH_MODE,
+ HANDSHAKE_A_ACCEPT_PEER_IDENTITY,
+ HANDSHAKE_A_ACCEPT_CERTIFICATE,
+ HANDSHAKE_A_ACCEPT_PEERNAME,
+
+ __HANDSHAKE_A_ACCEPT_MAX,
+ HANDSHAKE_A_ACCEPT_MAX = (__HANDSHAKE_A_ACCEPT_MAX - 1)
+};
+
+enum {
+ HANDSHAKE_A_DONE_STATUS = 1,
+ HANDSHAKE_A_DONE_SOCKFD,
+ HANDSHAKE_A_DONE_REMOTE_AUTH,
+
+ __HANDSHAKE_A_DONE_MAX,
+ HANDSHAKE_A_DONE_MAX = (__HANDSHAKE_A_DONE_MAX - 1)
+};
+
+enum {
+ HANDSHAKE_CMD_READY = 1,
+ HANDSHAKE_CMD_ACCEPT,
+ HANDSHAKE_CMD_DONE,
+
+ __HANDSHAKE_CMD_MAX,
+ HANDSHAKE_CMD_MAX = (__HANDSHAKE_CMD_MAX - 1)
+};
+
+#define HANDSHAKE_MCGRP_NONE "none"
+#define HANDSHAKE_MCGRP_TLSHD "tlshd"
+
+#endif /* _UAPI_LINUX_HANDSHAKE_H */
diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h
index 74a8609fcb4d..0af23ec196d8 100644
--- a/include/uapi/linux/hash_info.h
+++ b/include/uapi/linux/hash_info.h
@@ -35,6 +35,9 @@ enum hash_algo {
HASH_ALGO_SM3_256,
HASH_ALGO_STREEBOG_256,
HASH_ALGO_STREEBOG_512,
+ HASH_ALGO_SHA3_256,
+ HASH_ALGO_SHA3_384,
+ HASH_ALGO_SHA3_512,
HASH_ALGO__LAST
};
diff --git a/include/uapi/linux/hid.h b/include/uapi/linux/hid.h
index b34492a87a8a..a4dcb34386e3 100644
--- a/include/uapi/linux/hid.h
+++ b/include/uapi/linux/hid.h
@@ -43,15 +43,29 @@
#define USB_INTERFACE_PROTOCOL_MOUSE 2
/*
+ * HID report types --- Ouch! HID spec says 1 2 3!
+ */
+
+enum hid_report_type {
+ HID_INPUT_REPORT = 0,
+ HID_OUTPUT_REPORT = 1,
+ HID_FEATURE_REPORT = 2,
+
+ HID_REPORT_TYPES,
+};
+
+/*
* HID class requests
*/
-#define HID_REQ_GET_REPORT 0x01
-#define HID_REQ_GET_IDLE 0x02
-#define HID_REQ_GET_PROTOCOL 0x03
-#define HID_REQ_SET_REPORT 0x09
-#define HID_REQ_SET_IDLE 0x0A
-#define HID_REQ_SET_PROTOCOL 0x0B
+enum hid_class_request {
+ HID_REQ_GET_REPORT = 0x01,
+ HID_REQ_GET_IDLE = 0x02,
+ HID_REQ_GET_PROTOCOL = 0x03,
+ HID_REQ_SET_REPORT = 0x09,
+ HID_REQ_SET_IDLE = 0x0A,
+ HID_REQ_SET_PROTOCOL = 0x0B,
+};
/*
* HID class descriptor types
diff --git a/include/uapi/linux/hidraw.h b/include/uapi/linux/hidraw.h
index 4913539e5bcc..33ebad81720a 100644
--- a/include/uapi/linux/hidraw.h
+++ b/include/uapi/linux/hidraw.h
@@ -40,6 +40,12 @@ struct hidraw_devinfo {
#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
#define HIDIOCGRAWUNIQ(len) _IOC(_IOC_READ, 'H', 0x08, len)
+/* The first byte of SINPUT and GINPUT is the report number */
+#define HIDIOCSINPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x09, len)
+#define HIDIOCGINPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0A, len)
+/* The first byte of SOUTPUT and GOUTPUT is the report number */
+#define HIDIOCSOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0B, len)
+#define HIDIOCGOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0C, len)
#define HIDRAW_FIRST_MINOR 0
#define HIDRAW_MAX_DEVICES 64
diff --git a/include/uapi/linux/hsi/cs-protocol.h b/include/uapi/linux/hsi/cs-protocol.h
index c7f6e7672cb5..07c3bfb67463 100644
--- a/include/uapi/linux/hsi/cs-protocol.h
+++ b/include/uapi/linux/hsi/cs-protocol.h
@@ -6,20 +6,6 @@
*
* Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
* Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef _CS_PROTOCOL_H
diff --git a/include/uapi/linux/hsi/hsi_char.h b/include/uapi/linux/hsi/hsi_char.h
index 91623b0398b1..5ef72f0daf94 100644
--- a/include/uapi/linux/hsi/hsi_char.h
+++ b/include/uapi/linux/hsi/hsi_char.h
@@ -5,20 +5,6 @@
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
*
* Contact: Andras Domokos <andras.domokos at nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef __HSI_CHAR_H
diff --git a/include/uapi/linux/hw_breakpoint.h b/include/uapi/linux/hw_breakpoint.h
index 965e4d8606d8..1575d3ca6f0d 100644
--- a/include/uapi/linux/hw_breakpoint.h
+++ b/include/uapi/linux/hw_breakpoint.h
@@ -22,14 +22,4 @@ enum {
HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
};
-enum bp_type_idx {
- TYPE_INST = 0,
-#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
- TYPE_DATA = 0,
-#else
- TYPE_DATA = 1,
-#endif
- TYPE_MAX
-};
-
#endif /* _UAPI_LINUX_HW_BREAKPOINT_H */
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index 6135d92e0d47..aaa502a7bff4 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -26,7 +26,7 @@
#ifndef _UAPI_HYPERV_H
#define _UAPI_HYPERV_H
-#include <linux/uuid.h>
+#include <linux/types.h>
/*
* Framework version for util services.
@@ -90,6 +90,17 @@ struct hv_vss_check_dm_info {
__u32 flags;
} __attribute__((packed));
+/*
+ * struct hv_vss_msg encodes the fields that the Linux VSS
+ * driver accesses. However, FREEZE messages from Hyper-V contain
+ * additional LUN information that Linux doesn't use and are not
+ * represented in struct hv_vss_msg. A received FREEZE message may
+ * be as large as 6,260 bytes, so the driver must allocate at least
+ * that much space, not sizeof(struct hv_vss_msg). Other messages
+ * such as AUTO_RECOVER may be as large as 12,500 bytes. However,
+ * because the Linux VSS driver responds that it doesn't support
+ * auto-recovery, it should not receive such messages.
+ */
struct hv_vss_msg {
union {
struct hv_vss_hdr vss_hdr;
diff --git a/include/uapi/linux/i2c-dev.h b/include/uapi/linux/i2c-dev.h
index 85f8047afcf2..1c4cec4ddd84 100644
--- a/include/uapi/linux/i2c-dev.h
+++ b/include/uapi/linux/i2c-dev.h
@@ -1,25 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
- i2c-dev.h - i2c-bus driver, char device interface
-
- Copyright (C) 1995-97 Simon G. Vogl
- Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA.
-*/
+ * i2c-dev.h - I2C bus char device interface
+ *
+ * Copyright (C) 1995-97 Simon G. Vogl
+ * Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
+ */
#ifndef _UAPI_LINUX_I2C_DEV_H
#define _UAPI_LINUX_I2C_DEV_H
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index f71a1751cacf..92326ebde350 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -1,29 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/* ------------------------------------------------------------------------- */
-/* */
-/* i2c.h - definitions for the i2c-bus interface */
-/* */
-/* ------------------------------------------------------------------------- */
-/* Copyright (C) 1995-2000 Simon G. Vogl
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA. */
-/* ------------------------------------------------------------------------- */
-
-/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
- Frodo Looijaard <frodol@dds.nl> */
+/*
+ * i2c.h - definitions for the I2C bus interface
+ *
+ * Copyright (C) 1995-2000 Simon G. Vogl
+ * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
+ * Frodo Looijaard <frodol@dds.nl>
+ */
#ifndef _UAPI_LINUX_I2C_H
#define _UAPI_LINUX_I2C_H
@@ -32,18 +14,41 @@
/**
* struct i2c_msg - an I2C transaction segment beginning with START
- * @addr: Slave address, either seven or ten bits. When this is a ten
- * bit address, I2C_M_TEN must be set in @flags and the adapter
- * must support I2C_FUNC_10BIT_ADDR.
- * @flags: I2C_M_RD is handled by all adapters. No other flags may be
- * provided unless the adapter exported the relevant I2C_FUNC_*
- * flags through i2c_check_functionality().
- * @len: Number of data bytes in @buf being read from or written to the
- * I2C slave address. For read transactions where I2C_M_RECV_LEN
- * is set, the caller guarantees that this buffer can hold up to
- * 32 bytes in addition to the initial length byte sent by the
- * slave (plus, if used, the SMBus PEC); and this value will be
- * incremented by the number of block data bytes received.
+ *
+ * @addr: Slave address, either 7 or 10 bits. When this is a 10 bit address,
+ * %I2C_M_TEN must be set in @flags and the adapter must support
+ * %I2C_FUNC_10BIT_ADDR.
+ *
+ * @flags:
+ * Supported by all adapters:
+ * %I2C_M_RD: read data (from slave to master). Guaranteed to be 0x0001!
+ *
+ * Optional:
+ * %I2C_M_DMA_SAFE: the buffer of this message is DMA safe. Makes only sense
+ * in kernelspace, because userspace buffers are copied anyway
+ *
+ * Only if I2C_FUNC_10BIT_ADDR is set:
+ * %I2C_M_TEN: this is a 10 bit chip address
+ *
+ * Only if I2C_FUNC_SMBUS_READ_BLOCK_DATA is set:
+ * %I2C_M_RECV_LEN: message length will be first received byte
+ *
+ * Only if I2C_FUNC_NOSTART is set:
+ * %I2C_M_NOSTART: skip repeated start sequence
+
+ * Only if I2C_FUNC_PROTOCOL_MANGLING is set:
+ * %I2C_M_NO_RD_ACK: in a read message, master ACK/NACK bit is skipped
+ * %I2C_M_IGNORE_NAK: treat NACK from client as ACK
+ * %I2C_M_REV_DIR_ADDR: toggles the Rd/Wr bit
+ * %I2C_M_STOP: force a STOP condition after the message
+ *
+ * @len: Number of data bytes in @buf being read from or written to the I2C
+ * slave address. For read transactions where %I2C_M_RECV_LEN is set, the
+ * caller guarantees that this buffer can hold up to %I2C_SMBUS_BLOCK_MAX
+ * bytes in addition to the initial length byte sent by the slave (plus,
+ * if used, the SMBus PEC); and this value will be incremented by the number
+ * of block data bytes received.
+ *
* @buf: The buffer into which data is read, or from which it's written.
*
* An i2c_msg is the low level representation of one segment of an I2C
@@ -60,40 +65,36 @@
* group, it is followed by a STOP. Otherwise it is followed by the next
* @i2c_msg transaction segment, beginning with a (repeated) START.
*
- * Alternatively, when the adapter supports I2C_FUNC_PROTOCOL_MANGLING then
+ * Alternatively, when the adapter supports %I2C_FUNC_PROTOCOL_MANGLING then
* passing certain @flags may have changed those standard protocol behaviors.
* Those flags are only for use with broken/nonconforming slaves, and with
- * adapters which are known to support the specific mangling options they
- * need (one or more of IGNORE_NAK, NO_RD_ACK, NOSTART, and REV_DIR_ADDR).
+ * adapters which are known to support the specific mangling options they need.
*/
struct i2c_msg {
- __u16 addr; /* slave address */
+ __u16 addr;
__u16 flags;
-#define I2C_M_RD 0x0001 /* read data, from slave to master */
- /* I2C_M_RD is guaranteed to be 0x0001! */
-#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
-#define I2C_M_DMA_SAFE 0x0200 /* the buffer of this message is DMA safe */
- /* makes only sense in kernelspace */
- /* userspace buffers are copied anyway */
-#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */
-#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */
-#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */
- __u16 len; /* msg length */
- __u8 *buf; /* pointer to msg data */
+#define I2C_M_RD 0x0001 /* guaranteed to be 0x0001! */
+#define I2C_M_TEN 0x0010 /* use only if I2C_FUNC_10BIT_ADDR */
+#define I2C_M_DMA_SAFE 0x0200 /* use only in kernel space */
+#define I2C_M_RECV_LEN 0x0400 /* use only if I2C_FUNC_SMBUS_READ_BLOCK_DATA */
+#define I2C_M_NO_RD_ACK 0x0800 /* use only if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_IGNORE_NAK 0x1000 /* use only if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_REV_DIR_ADDR 0x2000 /* use only if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART 0x4000 /* use only if I2C_FUNC_NOSTART */
+#define I2C_M_STOP 0x8000 /* use only if I2C_FUNC_PROTOCOL_MANGLING */
+ __u16 len;
+ __u8 *buf;
};
/* To determine what functionality is present */
#define I2C_FUNC_I2C 0x00000001
-#define I2C_FUNC_10BIT_ADDR 0x00000002
-#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_IGNORE_NAK etc. */
+#define I2C_FUNC_10BIT_ADDR 0x00000002 /* required for I2C_M_TEN */
+#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* required for I2C_M_IGNORE_NAK etc. */
#define I2C_FUNC_SMBUS_PEC 0x00000008
-#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */
+#define I2C_FUNC_NOSTART 0x00000010 /* required for I2C_M_NOSTART */
#define I2C_FUNC_SLAVE 0x00000020
-#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
+#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 or later */
#define I2C_FUNC_SMBUS_QUICK 0x00010000
#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000
@@ -102,11 +103,11 @@ struct i2c_msg {
#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000
#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000
#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000
-#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000
+#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 /* required for I2C_M_RECV_LEN */
#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */
#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */
-#define I2C_FUNC_SMBUS_HOST_NOTIFY 0x10000000
+#define I2C_FUNC_SMBUS_HOST_NOTIFY 0x10000000 /* SMBus 2.0 or later */
#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \
I2C_FUNC_SMBUS_WRITE_BYTE)
@@ -128,6 +129,11 @@ struct i2c_msg {
I2C_FUNC_SMBUS_I2C_BLOCK | \
I2C_FUNC_SMBUS_PEC)
+/* if I2C_M_RECV_LEN is also supported */
+#define I2C_FUNC_SMBUS_EMUL_ALL (I2C_FUNC_SMBUS_EMUL | \
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | \
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL)
+
/*
* Data for SMBus Messages
*/
diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h
index fb169a50895e..163c0998aec9 100644
--- a/include/uapi/linux/icmp.h
+++ b/include/uapi/linux/icmp.h
@@ -20,6 +20,8 @@
#include <linux/types.h>
#include <asm/byteorder.h>
+#include <linux/if.h>
+#include <linux/in6.h>
#define ICMP_ECHOREPLY 0 /* Echo Reply */
#define ICMP_DEST_UNREACH 3 /* Destination Unreachable */
@@ -66,6 +68,23 @@
#define ICMP_EXC_TTL 0 /* TTL count exceeded */
#define ICMP_EXC_FRAGTIME 1 /* Fragment Reass time exceeded */
+/* Codes for EXT_ECHO (PROBE) */
+#define ICMP_EXT_ECHO 42
+#define ICMP_EXT_ECHOREPLY 43
+#define ICMP_EXT_CODE_MAL_QUERY 1 /* Malformed Query */
+#define ICMP_EXT_CODE_NO_IF 2 /* No such Interface */
+#define ICMP_EXT_CODE_NO_TABLE_ENT 3 /* No such Table Entry */
+#define ICMP_EXT_CODE_MULT_IFS 4 /* Multiple Interfaces Satisfy Query */
+
+/* Constants for EXT_ECHO (PROBE) */
+#define ICMP_EXT_ECHOREPLY_ACTIVE (1 << 2)/* active bit in reply message */
+#define ICMP_EXT_ECHOREPLY_IPV4 (1 << 1)/* ipv4 bit in reply message */
+#define ICMP_EXT_ECHOREPLY_IPV6 1 /* ipv6 bit in reply message */
+#define ICMP_EXT_ECHO_CTYPE_NAME 1
+#define ICMP_EXT_ECHO_CTYPE_INDEX 2
+#define ICMP_EXT_ECHO_CTYPE_ADDR 3
+#define ICMP_AFI_IP 1 /* Address Family Identifier for ipv4 */
+#define ICMP_AFI_IP6 2 /* Address Family Identifier for ipv6 */
struct icmphdr {
__u8 type;
@@ -118,4 +137,26 @@ struct icmp_extobj_hdr {
__u8 class_type;
};
+/* RFC 8335: 2.1 Header for c-type 3 payload */
+struct icmp_ext_echo_ctype3_hdr {
+ __be16 afi;
+ __u8 addrlen;
+ __u8 reserved;
+};
+
+/* RFC 8335: 2.1 Interface Identification Object */
+struct icmp_ext_echo_iio {
+ struct icmp_extobj_hdr extobj_hdr;
+ union {
+ char name[IFNAMSIZ];
+ __be32 ifindex;
+ struct {
+ struct icmp_ext_echo_ctype3_hdr ctype3_hdr;
+ union {
+ __be32 ipv4_addr;
+ struct in6_addr ipv6_addr;
+ } ip_addr;
+ } addr;
+ } ident;
+};
#endif /* _UAPI_LINUX_ICMP_H */
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index c1661febc2dc..ecaece3af38d 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -138,7 +138,11 @@ struct icmp6hdr {
#define ICMPV6_HDR_FIELD 0
#define ICMPV6_UNK_NEXTHDR 1
#define ICMPV6_UNK_OPTION 2
+#define ICMPV6_HDR_INCOMP 3
+/* Codes for EXT_ECHO (PROBE) */
+#define ICMPV6_EXT_ECHO_REQUEST 160
+#define ICMPV6_EXT_ECHO_REPLY 161
/*
* constants for (set|get)sockopt
*/
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index fdcdfe414223..3d1987e1bb2d 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _USR_IDXD_H_
#define _USR_IDXD_H_
@@ -9,6 +9,34 @@
#include <stdint.h>
#endif
+/* Driver command error status */
+enum idxd_scmd_stat {
+ IDXD_SCMD_DEV_ENABLED = 0x80000010,
+ IDXD_SCMD_DEV_NOT_ENABLED = 0x80000020,
+ IDXD_SCMD_WQ_ENABLED = 0x80000021,
+ IDXD_SCMD_DEV_DMA_ERR = 0x80020000,
+ IDXD_SCMD_WQ_NO_GRP = 0x80030000,
+ IDXD_SCMD_WQ_NO_NAME = 0x80040000,
+ IDXD_SCMD_WQ_NO_SVM = 0x80050000,
+ IDXD_SCMD_WQ_NO_THRESH = 0x80060000,
+ IDXD_SCMD_WQ_PORTAL_ERR = 0x80070000,
+ IDXD_SCMD_WQ_RES_ALLOC_ERR = 0x80080000,
+ IDXD_SCMD_PERCPU_ERR = 0x80090000,
+ IDXD_SCMD_DMA_CHAN_ERR = 0x800a0000,
+ IDXD_SCMD_CDEV_ERR = 0x800b0000,
+ IDXD_SCMD_WQ_NO_SWQ_SUPPORT = 0x800c0000,
+ IDXD_SCMD_WQ_NONE_CONFIGURED = 0x800d0000,
+ IDXD_SCMD_WQ_NO_SIZE = 0x800e0000,
+ IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
+ IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
+ IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000,
+ IDXD_SCMD_DEV_EVL_ERR = 0x80120000,
+ IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000,
+};
+
+#define IDXD_SCMD_SOFTERR_MASK 0x80000000
+#define IDXD_SCMD_SOFTERR_SHIFT 16
+
/* Descriptor flags */
#define IDXD_OP_FLAG_FENCE 0x0001
#define IDXD_OP_FLAG_BOF 0x0002
@@ -26,6 +54,14 @@
#define IDXD_OP_FLAG_DRDBK 0x4000
#define IDXD_OP_FLAG_DSTS 0x8000
+/* IAX */
+#define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000
+#define IDXD_OP_FLAG_RD_SRC2_2ND 0x020000
+#define IDXD_OP_FLAG_WR_SRC2_AECS_COMP 0x040000
+#define IDXD_OP_FLAG_WR_SRC2_AECS_OVFL 0x080000
+#define IDXD_OP_FLAG_SRC2_STS 0x100000
+#define IDXD_OP_FLAG_CRC_RFC3720 0x200000
+
/* Opcode */
enum dsa_opcode {
DSA_OPCODE_NOOP = 0,
@@ -38,15 +74,37 @@ enum dsa_opcode {
DSA_OPCODE_CR_DELTA,
DSA_OPCODE_AP_DELTA,
DSA_OPCODE_DUALCAST,
+ DSA_OPCODE_TRANSL_FETCH,
DSA_OPCODE_CRCGEN = 0x10,
DSA_OPCODE_COPY_CRC,
DSA_OPCODE_DIF_CHECK,
DSA_OPCODE_DIF_INS,
DSA_OPCODE_DIF_STRP,
DSA_OPCODE_DIF_UPDT,
+ DSA_OPCODE_DIX_GEN = 0x17,
DSA_OPCODE_CFLUSH = 0x20,
};
+enum iax_opcode {
+ IAX_OPCODE_NOOP = 0,
+ IAX_OPCODE_DRAIN = 2,
+ IAX_OPCODE_MEMMOVE,
+ IAX_OPCODE_DECOMPRESS = 0x42,
+ IAX_OPCODE_COMPRESS,
+ IAX_OPCODE_CRC64,
+ IAX_OPCODE_ZERO_DECOMP_32 = 0x48,
+ IAX_OPCODE_ZERO_DECOMP_16,
+ IAX_OPCODE_ZERO_COMP_32 = 0x4c,
+ IAX_OPCODE_ZERO_COMP_16,
+ IAX_OPCODE_SCAN = 0x50,
+ IAX_OPCODE_SET_MEMBER,
+ IAX_OPCODE_EXTRACT,
+ IAX_OPCODE_SELECT,
+ IAX_OPCODE_RLE_BURST,
+ IAX_OPCODE_FIND_UNIQUE,
+ IAX_OPCODE_EXPAND,
+};
+
/* Completion record status */
enum dsa_completion_status {
DSA_COMP_NONE = 0,
@@ -78,10 +136,44 @@ enum dsa_completion_status {
DSA_COMP_HW_ERR1,
DSA_COMP_HW_ERR_DRB,
DSA_COMP_TRANSLATION_FAIL,
+ DSA_COMP_DRAIN_EVL = 0x26,
+ DSA_COMP_BATCH_EVL_ERR,
+};
+
+enum iax_completion_status {
+ IAX_COMP_NONE = 0,
+ IAX_COMP_SUCCESS,
+ IAX_COMP_PAGE_FAULT_IR = 0x04,
+ IAX_COMP_ANALYTICS_ERROR = 0x0a,
+ IAX_COMP_OUTBUF_OVERFLOW,
+ IAX_COMP_BAD_OPCODE = 0x10,
+ IAX_COMP_INVALID_FLAGS,
+ IAX_COMP_NOZERO_RESERVE,
+ IAX_COMP_INVALID_SIZE,
+ IAX_COMP_OVERLAP_BUFFERS = 0x16,
+ IAX_COMP_INT_HANDLE_INVAL = 0x19,
+ IAX_COMP_CRA_XLAT,
+ IAX_COMP_CRA_ALIGN,
+ IAX_COMP_ADDR_ALIGN,
+ IAX_COMP_PRIV_BAD,
+ IAX_COMP_TRAFFIC_CLASS_CONF,
+ IAX_COMP_PFAULT_RDBA,
+ IAX_COMP_HW_ERR1,
+ IAX_COMP_HW_ERR_DRB,
+ IAX_COMP_TRANSLATION_FAIL,
+ IAX_COMP_PRS_TIMEOUT,
+ IAX_COMP_WATCHDOG,
+ IAX_COMP_INVALID_COMP_FLAG = 0x30,
+ IAX_COMP_INVALID_FILTER_FLAG,
+ IAX_COMP_INVALID_INPUT_SIZE,
+ IAX_COMP_INVALID_NUM_ELEMS,
+ IAX_COMP_INVALID_SRC1_WIDTH,
+ IAX_COMP_INVALID_INVERT_OUT,
};
#define DSA_COMP_STATUS_MASK 0x7f
#define DSA_COMP_STATUS_WRITE 0x80
+#define DSA_COMP_STATUS(status) ((status) & DSA_COMP_STATUS_MASK)
struct dsa_hw_desc {
uint32_t pasid:20;
@@ -95,6 +187,8 @@ struct dsa_hw_desc {
uint64_t rdback_addr;
uint64_t pattern;
uint64_t desc_list_addr;
+ uint64_t pattern_lower;
+ uint64_t transl_fetch_addr;
};
union {
uint64_t dst_addr;
@@ -105,6 +199,7 @@ struct dsa_hw_desc {
union {
uint32_t xfer_size;
uint32_t desc_count;
+ uint32_t region_size;
};
uint16_t int_handle;
uint16_t rsvd1;
@@ -159,10 +254,52 @@ struct dsa_hw_desc {
uint16_t dest_app_tag_seed;
};
+ /* Fill */
+ uint64_t pattern_upper;
+
+ /* Translation fetch */
+ struct {
+ uint64_t transl_fetch_res;
+ uint32_t region_stride;
+ };
+
+ /* DIX generate */
+ struct {
+ uint8_t dix_gen_res;
+ uint8_t dest_dif_flags;
+ uint8_t dif_flags;
+ uint8_t dix_gen_res2[13];
+ uint32_t ref_tag_seed;
+ uint16_t app_tag_mask;
+ uint16_t app_tag_seed;
+ };
+
uint8_t op_specific[24];
};
} __attribute__((packed));
+struct iax_hw_desc {
+ uint32_t pasid:20;
+ uint32_t rsvd:11;
+ uint32_t priv:1;
+ uint32_t flags:24;
+ uint32_t opcode:8;
+ uint64_t completion_addr;
+ uint64_t src1_addr;
+ uint64_t dst_addr;
+ uint32_t src1_size;
+ uint16_t int_handle;
+ union {
+ uint16_t compr_flags;
+ uint16_t decompr_flags;
+ };
+ uint64_t src2_addr;
+ uint32_t max_dst_size;
+ uint32_t src2_size;
+ uint32_t filter_flags;
+ uint32_t num_inputs;
+} __attribute__((packed));
+
struct dsa_raw_desc {
uint64_t field[8];
} __attribute__((packed));
@@ -177,8 +314,12 @@ struct dsa_completion_record {
uint8_t result;
uint8_t dif_status;
};
- uint16_t rsvd;
- uint32_t bytes_completed;
+ uint8_t fault_info;
+ uint8_t rsvd;
+ union {
+ uint32_t bytes_completed;
+ uint32_t descs_completed;
+ };
uint64_t fault_addr;
union {
/* common record */
@@ -187,8 +328,8 @@ struct dsa_completion_record {
uint32_t rsvd2:8;
};
- uint16_t delta_rec_size;
- uint16_t crc_val;
+ uint32_t delta_rec_size;
+ uint64_t crc_val;
/* DIF check & strip */
struct {
@@ -215,6 +356,14 @@ struct dsa_completion_record {
uint16_t dif_upd_dest_app_tag;
};
+ /* DIX generate */
+ struct {
+ uint64_t dix_gen_res;
+ uint32_t dix_ref_tag;
+ uint16_t dix_app_tag_mask;
+ uint16_t dix_app_tag;
+ };
+
uint8_t op_specific[16];
};
} __attribute__((packed));
@@ -223,4 +372,28 @@ struct dsa_raw_completion_record {
uint64_t field[4];
} __attribute__((packed));
+struct iax_completion_record {
+ volatile uint8_t status;
+ uint8_t error_code;
+ uint8_t fault_info;
+ uint8_t rsvd;
+ uint32_t bytes_completed;
+ uint64_t fault_addr;
+ uint32_t invalid_flags;
+ uint32_t rsvd2;
+ uint32_t output_size;
+ uint8_t output_bits;
+ uint8_t rsvd3;
+ uint16_t xor_csum;
+ uint32_t crc;
+ uint32_t min;
+ uint32_t max;
+ uint32_t sum;
+ uint64_t rsvd4[2];
+} __attribute__((packed));
+
+struct iax_raw_completion_record {
+ uint64_t field[8];
+} __attribute__((packed));
+
#endif
diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h
index dfcf3ce0097f..1c392dd95a5e 100644
--- a/include/uapi/linux/if_addr.h
+++ b/include/uapi/linux/if_addr.h
@@ -33,8 +33,9 @@ enum {
IFA_CACHEINFO,
IFA_MULTICAST,
IFA_FLAGS,
- IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */
+ IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */
IFA_TARGET_NETNSID,
+ IFA_PROTO, /* u8, address protocol */
__IFA_MAX,
};
@@ -69,4 +70,10 @@ struct ifa_cacheinfo {
#define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg))
#endif
+/* ifa_proto */
+#define IFAPROT_UNSPEC 0
+#define IFAPROT_KERNEL_LO 1 /* loopback */
+#define IFAPROT_KERNEL_RA 2 /* set by kernel from router announcement */
+#define IFAPROT_KERNEL_LL 3 /* link-local set by kernel */
+
#endif
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index bc2bcdec377b..0824fbc026a1 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -24,9 +24,25 @@ struct sockaddr_alg {
__u8 salg_name[64];
};
+/*
+ * Linux v4.12 and later removed the 64-byte limit on salg_name[]; it's now an
+ * arbitrary-length field. We had to keep the original struct above for source
+ * compatibility with existing userspace programs, though. Use the new struct
+ * below if support for very long algorithm names is needed. To do this,
+ * allocate 'sizeof(struct sockaddr_alg_new) + strlen(algname) + 1' bytes, and
+ * copy algname (including the null terminator) into salg_name.
+ */
+struct sockaddr_alg_new {
+ __u16 salg_family;
+ __u8 salg_type[14];
+ __u32 salg_feat;
+ __u32 salg_mask;
+ __u8 salg_name[];
+};
+
struct af_alg_iv {
__u32 ivlen;
- __u8 iv[0];
+ __u8 iv[];
};
/* Socket options */
@@ -35,6 +51,8 @@ struct af_alg_iv {
#define ALG_SET_OP 3
#define ALG_SET_AEAD_ASSOCLEN 4
#define ALG_SET_AEAD_AUTHSIZE 5
+#define ALG_SET_DRBG_ENTROPY 6
+#define ALG_SET_KEY_BY_KEY_SERIAL 7
/* Operations */
#define ALG_OP_DECRYPT 0
diff --git a/include/uapi/linux/if_arcnet.h b/include/uapi/linux/if_arcnet.h
index 683878036d76..b122cfac7128 100644
--- a/include/uapi/linux/if_arcnet.h
+++ b/include/uapi/linux/if_arcnet.h
@@ -60,7 +60,7 @@ struct arc_rfc1201 {
__u8 proto; /* protocol ID field - varies */
__u8 split_flag; /* for use with split packets */
__be16 sequence; /* sequence number */
- __u8 payload[0]; /* space remaining in packet (504 bytes)*/
+ __u8 payload[]; /* space remaining in packet (504 bytes)*/
};
#define RFC1201_HDR_SIZE 4
@@ -69,7 +69,7 @@ struct arc_rfc1201 {
*/
struct arc_rfc1051 {
__u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */
- __u8 payload[0]; /* 507 bytes */
+ __u8 payload[]; /* 507 bytes */
};
#define RFC1051_HDR_SIZE 1
@@ -80,7 +80,7 @@ struct arc_rfc1051 {
struct arc_eth_encap {
__u8 proto; /* Always ARC_P_ETHER */
struct ethhdr eth; /* standard ethernet header (yuck!) */
- __u8 payload[0]; /* 493 bytes */
+ __u8 payload[]; /* 493 bytes */
};
#define ETH_ENCAP_HDR_SIZE 14
diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h
index c3cc5a9e5eaf..4783af9fe520 100644
--- a/include/uapi/linux/if_arp.h
+++ b/include/uapi/linux/if_arp.h
@@ -54,6 +54,7 @@
#define ARPHRD_X25 271 /* CCITT X.25 */
#define ARPHRD_HWX25 272 /* Boards with X.25 in firmware */
#define ARPHRD_CAN 280 /* Controller Area Network */
+#define ARPHRD_MCTP 290
#define ARPHRD_PPP 512
#define ARPHRD_CISCO 513 /* Cisco HDLC */
#define ARPHRD_HDLC ARPHRD_CISCO
diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h
index 45f3750aa861..d174914a837d 100644
--- a/include/uapi/linux/if_bonding.h
+++ b/include/uapi/linux/if_bonding.h
@@ -94,6 +94,7 @@
#define BOND_XMIT_POLICY_LAYER23 2 /* layer 2+3 (IP ^ MAC) */
#define BOND_XMIT_POLICY_ENCAP23 3 /* encapsulated layer 2+3 */
#define BOND_XMIT_POLICY_ENCAP34 4 /* encapsulated layer 3+4 */
+#define BOND_XMIT_POLICY_VLAN_SRCMAC 5 /* vlan + source MAC */
/* 802.3ad port state definitions (43.4.2.2 in the 802.3ad standard) */
#define LACP_STATE_LACP_ACTIVITY 0x1
@@ -152,14 +153,3 @@ enum {
#define BOND_3AD_STAT_MAX (__BOND_3AD_STAT_MAX - 1)
#endif /* _LINUX_IF_BONDING_H */
-
-/*
- * Local variables:
- * version-control: t
- * kept-new-versions: 5
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
-
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index c1227aecd38f..a5b743a2f775 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -121,6 +121,8 @@ enum {
IFLA_BRIDGE_VLAN_INFO,
IFLA_BRIDGE_VLAN_TUNNEL_INFO,
IFLA_BRIDGE_MRP,
+ IFLA_BRIDGE_CFM,
+ IFLA_BRIDGE_MST,
__IFLA_BRIDGE_MAX,
};
#define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1)
@@ -328,6 +330,145 @@ struct br_mrp_start_in_test {
__u16 in_id;
};
+enum {
+ IFLA_BRIDGE_CFM_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_CREATE,
+ IFLA_BRIDGE_CFM_MEP_DELETE,
+ IFLA_BRIDGE_CFM_MEP_CONFIG,
+ IFLA_BRIDGE_CFM_CC_CONFIG,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_ADD,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_REMOVE,
+ IFLA_BRIDGE_CFM_CC_RDI,
+ IFLA_BRIDGE_CFM_CC_CCM_TX,
+ IFLA_BRIDGE_CFM_MEP_CREATE_INFO,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_INFO,
+ IFLA_BRIDGE_CFM_CC_CONFIG_INFO,
+ IFLA_BRIDGE_CFM_CC_RDI_INFO,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_INFO,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_INFO,
+ IFLA_BRIDGE_CFM_MEP_STATUS_INFO,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_INFO,
+ __IFLA_BRIDGE_CFM_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MAX (__IFLA_BRIDGE_CFM_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_MEP_CREATE_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE,
+ IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN,
+ IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION,
+ IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX,
+ __IFLA_BRIDGE_CFM_MEP_CREATE_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MEP_CREATE_MAX (__IFLA_BRIDGE_CFM_MEP_CREATE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_MEP_DELETE_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_DELETE_INSTANCE,
+ __IFLA_BRIDGE_CFM_MEP_DELETE_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MEP_DELETE_MAX (__IFLA_BRIDGE_CFM_MEP_DELETE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_MEP_CONFIG_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL,
+ IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID,
+ __IFLA_BRIDGE_CFM_MEP_CONFIG_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MEP_CONFIG_MAX (__IFLA_BRIDGE_CFM_MEP_CONFIG_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_CONFIG_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE,
+ IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL,
+ IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID,
+ __IFLA_BRIDGE_CFM_CC_CONFIG_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_CONFIG_MAX (__IFLA_BRIDGE_CFM_CC_CONFIG_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_PEER_MEPID,
+ __IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX (__IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_RDI_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_RDI_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_RDI_RDI,
+ __IFLA_BRIDGE_CFM_CC_RDI_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_RDI_MAX (__IFLA_BRIDGE_CFM_CC_RDI_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_CCM_TX_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV,
+ IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE,
+ __IFLA_BRIDGE_CFM_CC_CCM_TX_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_CCM_TX_MAX (__IFLA_BRIDGE_CFM_CC_CCM_TX_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_MEP_STATUS_UNSPEC,
+ IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE,
+ IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN,
+ IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN,
+ IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN,
+ __IFLA_BRIDGE_CFM_MEP_STATUS_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_MEP_STATUS_MAX (__IFLA_BRIDGE_CFM_MEP_STATUS_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_UNSPEC,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN,
+ __IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX,
+};
+
+#define IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX (__IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MST_UNSPEC,
+ IFLA_BRIDGE_MST_ENTRY,
+ __IFLA_BRIDGE_MST_MAX,
+};
+#define IFLA_BRIDGE_MST_MAX (__IFLA_BRIDGE_MST_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MST_ENTRY_UNSPEC,
+ IFLA_BRIDGE_MST_ENTRY_MSTI,
+ IFLA_BRIDGE_MST_ENTRY_STATE,
+ __IFLA_BRIDGE_MST_ENTRY_MAX,
+};
+#define IFLA_BRIDGE_MST_ENTRY_MAX (__IFLA_BRIDGE_MST_ENTRY_MAX - 1)
+
struct bridge_stp_xstats {
__u64 transition_blk;
__u64 transition_fwd;
@@ -354,16 +495,22 @@ enum {
/* flags used in BRIDGE_VLANDB_DUMP_FLAGS attribute to affect dumps */
#define BRIDGE_VLANDB_DUMPF_STATS (1 << 0) /* Include stats in the dump */
+#define BRIDGE_VLANDB_DUMPF_GLOBAL (1 << 1) /* Dump global vlan options only */
/* Bridge vlan RTM attributes
* [BRIDGE_VLANDB_ENTRY] = {
* [BRIDGE_VLANDB_ENTRY_INFO]
* ...
* }
+ * [BRIDGE_VLANDB_GLOBAL_OPTIONS] = {
+ * [BRIDGE_VLANDB_GOPTS_ID]
+ * ...
+ * }
*/
enum {
BRIDGE_VLANDB_UNSPEC,
BRIDGE_VLANDB_ENTRY,
+ BRIDGE_VLANDB_GLOBAL_OPTIONS,
__BRIDGE_VLANDB_MAX,
};
#define BRIDGE_VLANDB_MAX (__BRIDGE_VLANDB_MAX - 1)
@@ -375,6 +522,10 @@ enum {
BRIDGE_VLANDB_ENTRY_STATE,
BRIDGE_VLANDB_ENTRY_TUNNEL_INFO,
BRIDGE_VLANDB_ENTRY_STATS,
+ BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
+ BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS,
+ BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS,
+ BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS,
__BRIDGE_VLANDB_ENTRY_MAX,
};
#define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
@@ -413,6 +564,30 @@ enum {
};
#define BRIDGE_VLANDB_STATS_MAX (__BRIDGE_VLANDB_STATS_MAX - 1)
+enum {
+ BRIDGE_VLANDB_GOPTS_UNSPEC,
+ BRIDGE_VLANDB_GOPTS_ID,
+ BRIDGE_VLANDB_GOPTS_RANGE,
+ BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING,
+ BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION,
+ BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION,
+ BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT,
+ BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT,
+ BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL,
+ BRIDGE_VLANDB_GOPTS_PAD,
+ BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERIER,
+ BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS,
+ BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE,
+ BRIDGE_VLANDB_GOPTS_MSTI,
+ __BRIDGE_VLANDB_GOPTS_MAX
+};
+#define BRIDGE_VLANDB_GOPTS_MAX (__BRIDGE_VLANDB_GOPTS_MAX - 1)
+
/* Bridge multicast database attributes
* [MDBA_MDB] = {
* [MDBA_MDB_ENTRY] = {
@@ -455,10 +630,38 @@ enum {
enum {
MDBA_MDB_EATTR_UNSPEC,
MDBA_MDB_EATTR_TIMER,
+ MDBA_MDB_EATTR_SRC_LIST,
+ MDBA_MDB_EATTR_GROUP_MODE,
+ MDBA_MDB_EATTR_SOURCE,
+ MDBA_MDB_EATTR_RTPROT,
+ MDBA_MDB_EATTR_DST,
+ MDBA_MDB_EATTR_DST_PORT,
+ MDBA_MDB_EATTR_VNI,
+ MDBA_MDB_EATTR_IFINDEX,
+ MDBA_MDB_EATTR_SRC_VNI,
__MDBA_MDB_EATTR_MAX
};
#define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1)
+/* per mdb entry source */
+enum {
+ MDBA_MDB_SRCLIST_UNSPEC,
+ MDBA_MDB_SRCLIST_ENTRY,
+ __MDBA_MDB_SRCLIST_MAX
+};
+#define MDBA_MDB_SRCLIST_MAX (__MDBA_MDB_SRCLIST_MAX - 1)
+
+/* per mdb entry per source attributes
+ * these are embedded in MDBA_MDB_SRCLIST_ENTRY
+ */
+enum {
+ MDBA_MDB_SRCATTR_UNSPEC,
+ MDBA_MDB_SRCATTR_ADDRESS,
+ MDBA_MDB_SRCATTR_TIMER,
+ __MDBA_MDB_SRCATTR_MAX
+};
+#define MDBA_MDB_SRCATTR_MAX (__MDBA_MDB_SRCATTR_MAX - 1)
+
/* multicast router types */
enum {
MDB_RTR_TYPE_DISABLED,
@@ -479,6 +682,9 @@ enum {
MDBA_ROUTER_PATTR_UNSPEC,
MDBA_ROUTER_PATTR_TIMER,
MDBA_ROUTER_PATTR_TYPE,
+ MDBA_ROUTER_PATTR_INET_TIMER,
+ MDBA_ROUTER_PATTR_INET6_TIMER,
+ MDBA_ROUTER_PATTR_VID,
__MDBA_ROUTER_PATTR_MAX
};
#define MDBA_ROUTER_PATTR_MAX (__MDBA_ROUTER_PATTR_MAX - 1)
@@ -495,12 +701,15 @@ struct br_mdb_entry {
__u8 state;
#define MDB_FLAGS_OFFLOAD (1 << 0)
#define MDB_FLAGS_FAST_LEAVE (1 << 1)
+#define MDB_FLAGS_STAR_EXCL (1 << 2)
+#define MDB_FLAGS_BLOCKED (1 << 3)
__u8 flags;
__u16 vid;
struct {
union {
__be32 ip4;
struct in6_addr ip6;
+ unsigned char mac_addr[ETH_ALEN];
} u;
__be16 proto;
} addr;
@@ -509,10 +718,68 @@ struct br_mdb_entry {
enum {
MDBA_SET_ENTRY_UNSPEC,
MDBA_SET_ENTRY,
+ MDBA_SET_ENTRY_ATTRS,
__MDBA_SET_ENTRY_MAX,
};
#define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
+/* [MDBA_GET_ENTRY] = {
+ * struct br_mdb_entry
+ * [MDBA_GET_ENTRY_ATTRS] = {
+ * [MDBE_ATTR_SOURCE]
+ * struct in_addr / struct in6_addr
+ * [MDBE_ATTR_SRC_VNI]
+ * u32
+ * }
+ * }
+ */
+enum {
+ MDBA_GET_ENTRY_UNSPEC,
+ MDBA_GET_ENTRY,
+ MDBA_GET_ENTRY_ATTRS,
+ __MDBA_GET_ENTRY_MAX,
+};
+#define MDBA_GET_ENTRY_MAX (__MDBA_GET_ENTRY_MAX - 1)
+
+/* [MDBA_SET_ENTRY_ATTRS] = {
+ * [MDBE_ATTR_xxx]
+ * ...
+ * }
+ */
+enum {
+ MDBE_ATTR_UNSPEC,
+ MDBE_ATTR_SOURCE,
+ MDBE_ATTR_SRC_LIST,
+ MDBE_ATTR_GROUP_MODE,
+ MDBE_ATTR_RTPROT,
+ MDBE_ATTR_DST,
+ MDBE_ATTR_DST_PORT,
+ MDBE_ATTR_VNI,
+ MDBE_ATTR_IFINDEX,
+ MDBE_ATTR_SRC_VNI,
+ MDBE_ATTR_STATE_MASK,
+ __MDBE_ATTR_MAX,
+};
+#define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1)
+
+/* per mdb entry source */
+enum {
+ MDBE_SRC_LIST_UNSPEC,
+ MDBE_SRC_LIST_ENTRY,
+ __MDBE_SRC_LIST_MAX,
+};
+#define MDBE_SRC_LIST_MAX (__MDBE_SRC_LIST_MAX - 1)
+
+/* per mdb entry per source attributes
+ * these are embedded in MDBE_SRC_LIST_ENTRY
+ */
+enum {
+ MDBE_SRCATTR_UNSPEC,
+ MDBE_SRCATTR_ADDRESS,
+ __MDBE_SRCATTR_MAX,
+};
+#define MDBE_SRCATTR_MAX (__MDBE_SRCATTR_MAX - 1)
+
/* Embedded inside LINK_XSTATS_TYPE_BRIDGE */
enum {
BRIDGE_XSTATS_UNSPEC,
@@ -554,12 +821,15 @@ struct br_mcast_stats {
/* bridge boolean options
* BR_BOOLOPT_NO_LL_LEARN - disable learning from link-local packets
+ * BR_BOOLOPT_MCAST_VLAN_SNOOPING - control vlan multicast snooping
*
* IMPORTANT: if adding a new option do not forget to handle
* it in br_boolopt_toggle/get and bridge sysfs
*/
enum br_boolopt_id {
BR_BOOLOPT_NO_LL_LEARN,
+ BR_BOOLOPT_MCAST_VLAN_SNOOPING,
+ BR_BOOLOPT_MST_ENABLE,
BR_BOOLOPT_MAX
};
@@ -572,4 +842,17 @@ struct br_boolopt_multi {
__u32 optval;
__u32 optmask;
};
+
+enum {
+ BRIDGE_QUERIER_UNSPEC,
+ BRIDGE_QUERIER_IP_ADDRESS,
+ BRIDGE_QUERIER_IP_PORT,
+ BRIDGE_QUERIER_IP_OTHER_TIMER,
+ BRIDGE_QUERIER_PAD,
+ BRIDGE_QUERIER_IPV6_ADDRESS,
+ BRIDGE_QUERIER_IPV6_PORT,
+ BRIDGE_QUERIER_IPV6_OTHER_TIMER,
+ __BRIDGE_QUERIER_MAX
+};
+#define BRIDGE_QUERIER_MAX (__BRIDGE_QUERIER_MAX - 1)
#endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index d6de2b167448..69e0457eb200 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -86,7 +86,10 @@
* over Ethernet
*/
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#define ETH_P_PROFINET 0x8892 /* PROFINET */
+#define ETH_P_REALTEK 0x8899 /* Multiple proprietary protocols */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
+#define ETH_P_ETHERCAT 0x88A4 /* EtherCAT */
#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */
#define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */
@@ -99,6 +102,7 @@
#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
#define ETH_P_NCSI 0x88F8 /* NCSI protocol */
#define ETH_P_PRP 0x88FB /* IEC 62439-3 PRP/HSRv0 */
+#define ETH_P_CFM 0x8902 /* Connectivity Fault Management */
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
#define ETH_P_IBOE 0x8915 /* Infiniband over Ethernet */
#define ETH_P_TDLS 0x890D /* TDLS */
@@ -112,10 +116,11 @@
#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_DSA_8021Q 0xDADB /* Fake VLAN Header for DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_DSA_A5PSW 0xE001 /* A5PSW Tag Value [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */
#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
-#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value
+#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is more than this value
* then the frame is Ethernet II. Else it is 802.3 */
/*
@@ -133,6 +138,7 @@
#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
#define ETH_P_CAN 0x000C /* CAN: Controller Area Network */
#define ETH_P_CANFD 0x000D /* CANFD: CAN flexible data rate*/
+#define ETH_P_CANXL 0x000E /* CANXL: eXtended frame Length */
#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */
@@ -150,6 +156,9 @@
#define ETH_P_MAP 0x00F9 /* Qualcomm multiplexing and
* aggregation protocol
*/
+#define ETH_P_MCTP 0x00FA /* Management component transport
+ * protocol packets
+ */
/*
* This is an Ethernet frame header.
diff --git a/include/uapi/linux/if_fddi.h b/include/uapi/linux/if_fddi.h
index 7239aa9c0766..8df2d9934bcd 100644
--- a/include/uapi/linux/if_fddi.h
+++ b/include/uapi/linux/if_fddi.h
@@ -9,7 +9,7 @@
* Version: @(#)if_fddi.h 1.0.3 Oct 6 2018
*
* Author: Lawrence V. Stefani, <stefani@yahoo.com>
- * Maintainer: Maciej W. Rozycki, <macro@linux-mips.org>
+ * Maintainer: Maciej W. Rozycki, <macro@orcam.me.uk>
*
* if_fddi.h is based on previous if_ether.h and if_tr.h work by
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
diff --git a/include/uapi/linux/if_frad.h b/include/uapi/linux/if_frad.h
deleted file mode 100644
index 3c6ee85f6262..000000000000
--- a/include/uapi/linux/if_frad.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are
- * created for each DLCI associated with a FRAD. The FRAD driver
- * is not truly a network device, but the lower level device
- * handler. This allows other FRAD manufacturers to use the DLCI
- * code, including its RFC1490 encapsulation alongside the current
- * implementation for the Sangoma cards.
- *
- * Version: @(#)if_ifrad.h 0.15 31 Mar 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan changed structure defs (packed)
- * re-arranged flags
- * added DLCI_RET vars
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _UAPI_FRAD_H_
-#define _UAPI_FRAD_H_
-
-#include <linux/if.h>
-
-/* Structures and constants associated with the DLCI device driver */
-
-struct dlci_add
-{
- char devname[IFNAMSIZ];
- short dlci;
-};
-
-#define DLCI_GET_CONF (SIOCDEVPRIVATE + 2)
-#define DLCI_SET_CONF (SIOCDEVPRIVATE + 3)
-
-/*
- * These are related to the Sangoma SDLA and should remain in order.
- * Code within the SDLA module is based on the specifics of this
- * structure. Change at your own peril.
- */
-struct dlci_conf {
- short flags;
- short CIR_fwd;
- short Bc_fwd;
- short Be_fwd;
- short CIR_bwd;
- short Bc_bwd;
- short Be_bwd;
-
-/* these are part of the status read */
- short Tc_fwd;
- short Tc_bwd;
- short Tf_max;
- short Tb_max;
-
-/* add any new fields here above is a mirror of sdla_dlci_conf */
-};
-
-#define DLCI_GET_SLAVE (SIOCDEVPRIVATE + 4)
-
-/* configuration flags for DLCI */
-#define DLCI_IGNORE_CIR_OUT 0x0001
-#define DLCI_ACCOUNT_CIR_IN 0x0002
-#define DLCI_BUFFER_IF 0x0008
-
-#define DLCI_VALID_FLAGS 0x000B
-
-/* defines for the actual Frame Relay hardware */
-#define FRAD_GET_CONF (SIOCDEVPRIVATE)
-#define FRAD_SET_CONF (SIOCDEVPRIVATE + 1)
-
-#define FRAD_LAST_IOCTL FRAD_SET_CONF
-
-/*
- * Based on the setup for the Sangoma SDLA. If changes are
- * necessary to this structure, a routine will need to be
- * added to that module to copy fields.
- */
-struct frad_conf
-{
- short station;
- short flags;
- short kbaud;
- short clocking;
- short mtu;
- short T391;
- short T392;
- short N391;
- short N392;
- short N393;
- short CIR_fwd;
- short Bc_fwd;
- short Be_fwd;
- short CIR_bwd;
- short Bc_bwd;
- short Be_bwd;
-
-/* Add new fields here, above is a mirror of the sdla_conf */
-
-};
-
-#define FRAD_STATION_CPE 0x0000
-#define FRAD_STATION_NODE 0x0001
-
-#define FRAD_TX_IGNORE_CIR 0x0001
-#define FRAD_RX_ACCOUNT_CIR 0x0002
-#define FRAD_DROP_ABORTED 0x0004
-#define FRAD_BUFFERIF 0x0008
-#define FRAD_STATS 0x0010
-#define FRAD_MCI 0x0100
-#define FRAD_AUTODLCI 0x8000
-#define FRAD_VALID_FLAGS 0x811F
-
-#define FRAD_CLOCK_INT 0x0001
-#define FRAD_CLOCK_EXT 0x0000
-
-
-#endif /* _UAPI_FRAD_H_ */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 7fba4de511de..ffa637b38c93 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -7,24 +7,23 @@
/* This struct should be in sync with struct rtnl_link_stats64 */
struct rtnl_link_stats {
- __u32 rx_packets; /* total packets received */
- __u32 tx_packets; /* total packets transmitted */
- __u32 rx_bytes; /* total bytes received */
- __u32 tx_bytes; /* total bytes transmitted */
- __u32 rx_errors; /* bad packets received */
- __u32 tx_errors; /* packet transmit problems */
- __u32 rx_dropped; /* no space in linux buffers */
- __u32 tx_dropped; /* no space available in linux */
- __u32 multicast; /* multicast packets received */
+ __u32 rx_packets;
+ __u32 tx_packets;
+ __u32 rx_bytes;
+ __u32 tx_bytes;
+ __u32 rx_errors;
+ __u32 tx_errors;
+ __u32 rx_dropped;
+ __u32 tx_dropped;
+ __u32 multicast;
__u32 collisions;
-
/* detailed rx_errors: */
__u32 rx_length_errors;
- __u32 rx_over_errors; /* receiver ring buff overflow */
- __u32 rx_crc_errors; /* recved pkt with crc error */
- __u32 rx_frame_errors; /* recv'd frame alignment error */
- __u32 rx_fifo_errors; /* recv'r fifo overrun */
- __u32 rx_missed_errors; /* receiver missed packet */
+ __u32 rx_over_errors;
+ __u32 rx_crc_errors;
+ __u32 rx_frame_errors;
+ __u32 rx_fifo_errors;
+ __u32 rx_missed_errors;
/* detailed tx_errors */
__u32 tx_aborted_errors;
@@ -37,29 +36,204 @@ struct rtnl_link_stats {
__u32 rx_compressed;
__u32 tx_compressed;
- __u32 rx_nohandler; /* dropped, no handler found */
+ __u32 rx_nohandler;
};
-/* The main device statistics structure */
+/**
+ * struct rtnl_link_stats64 - The main device statistics structure.
+ *
+ * @rx_packets: Number of good packets received by the interface.
+ * For hardware interfaces counts all good packets received from the device
+ * by the host, including packets which host had to drop at various stages
+ * of processing (even in the driver).
+ *
+ * @tx_packets: Number of packets successfully transmitted.
+ * For hardware interfaces counts packets which host was able to successfully
+ * hand over to the device, which does not necessarily mean that packets
+ * had been successfully transmitted out of the device, only that device
+ * acknowledged it copied them out of host memory.
+ *
+ * @rx_bytes: Number of good received bytes, corresponding to @rx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @rx_errors: Total number of bad packets received on this network device.
+ * This counter must include events counted by @rx_length_errors,
+ * @rx_crc_errors, @rx_frame_errors and other errors not otherwise
+ * counted.
+ *
+ * @tx_errors: Total number of transmit problems.
+ * This counter must include events counter by @tx_aborted_errors,
+ * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors,
+ * @tx_window_errors and other errors not otherwise counted.
+ *
+ * @rx_dropped: Number of packets received but not processed,
+ * e.g. due to lack of resources or unsupported protocol.
+ * For hardware interfaces this counter may include packets discarded
+ * due to L2 address filtering but should not include packets dropped
+ * by the device due to buffer exhaustion which are counted separately in
+ * @rx_missed_errors (since procfs folds those two counters together).
+ *
+ * @tx_dropped: Number of packets dropped on their way to transmission,
+ * e.g. due to lack of resources.
+ *
+ * @multicast: Multicast packets received.
+ * For hardware interfaces this statistic is commonly calculated
+ * at the device level (unlike @rx_packets) and therefore may include
+ * packets which did not reach the host.
+ *
+ * For IEEE 802.3 devices this counter may be equivalent to:
+ *
+ * - 30.3.1.1.21 aMulticastFramesReceivedOK
+ *
+ * @collisions: Number of collisions during packet transmissions.
+ *
+ * @rx_length_errors: Number of packets dropped due to invalid length.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to a sum
+ * of the following attributes:
+ *
+ * - 30.3.1.1.23 aInRangeLengthErrors
+ * - 30.3.1.1.24 aOutOfRangeLengthField
+ * - 30.3.1.1.25 aFrameTooLongErrors
+ *
+ * @rx_over_errors: Receiver FIFO overflow event counter.
+ *
+ * Historically the count of overflow events. Such events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * The recommended interpretation for high speed interfaces is -
+ * number of packets dropped because they did not fit into buffers
+ * provided by the host, e.g. packets larger than MTU or next buffer
+ * in the ring was not available for a scatter transfer.
+ *
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * This statistics was historically used interchangeably with
+ * @rx_fifo_errors.
+ *
+ * This statistic corresponds to hardware events and is not commonly used
+ * on software devices.
+ *
+ * @rx_crc_errors: Number of packets received with a CRC error.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.6 aFrameCheckSequenceErrors
+ *
+ * @rx_frame_errors: Receiver frame alignment errors.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to:
+ *
+ * - 30.3.1.1.7 aAlignmentErrors
+ *
+ * @rx_fifo_errors: Receiver FIFO error counter.
+ *
+ * Historically the count of overflow events. Those events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * This statistics was used interchangeably with @rx_over_errors.
+ * Not recommended for use in drivers for high speed interfaces.
+ *
+ * This statistic is used on software devices, e.g. to count software
+ * packet queue overflow (can) or sequencing errors (GRE).
+ *
+ * @rx_missed_errors: Count of packets missed by the host.
+ * Folded into the "drop" counter in `/proc/net/dev`.
+ *
+ * Counts number of packets dropped by the device due to lack
+ * of buffer space. This usually indicates that the host interface
+ * is slower than the network interface, or host is not keeping up
+ * with the receive packet rate.
+ *
+ * This statistic corresponds to hardware events and is not used
+ * on software devices.
+ *
+ * @tx_aborted_errors:
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ * For IEEE 802.3 devices capable of half-duplex operation this counter
+ * must be equivalent to:
+ *
+ * - 30.3.1.1.11 aFramesAbortedDueToXSColls
+ *
+ * High speed interfaces may use this counter as a general device
+ * discard counter.
+ *
+ * @tx_carrier_errors: Number of frame transmission errors due to loss
+ * of carrier during transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.13 aCarrierSenseErrors
+ *
+ * @tx_fifo_errors: Number of frame transmission errors due to device
+ * FIFO underrun / underflow. This condition occurs when the device
+ * begins transmission of a frame but is unable to deliver the
+ * entire frame to the transmitter in time for transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for
+ * old half-duplex Ethernet.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices possibly equivalent to:
+ *
+ * - 30.3.2.1.4 aSQETestErrors
+ *
+ * @tx_window_errors: Number of frame transmission errors due
+ * to late collisions (for Ethernet - after the first 64B of transmission).
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.10 aLateCollisions
+ *
+ * @rx_compressed: Number of correctly received compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @tx_compressed: Number of transmitted compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @rx_nohandler: Number of packets received on the interface
+ * but dropped by the networking stack because the device is
+ * not designated to receive packets (e.g. backup link in a bond).
+ *
+ * @rx_otherhost_dropped: Number of packets dropped due to mismatch
+ * in destination MAC address.
+ */
struct rtnl_link_stats64 {
- __u64 rx_packets; /* total packets received */
- __u64 tx_packets; /* total packets transmitted */
- __u64 rx_bytes; /* total bytes received */
- __u64 tx_bytes; /* total bytes transmitted */
- __u64 rx_errors; /* bad packets received */
- __u64 tx_errors; /* packet transmit problems */
- __u64 rx_dropped; /* no space in linux buffers */
- __u64 tx_dropped; /* no space available in linux */
- __u64 multicast; /* multicast packets received */
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 rx_errors;
+ __u64 tx_errors;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+ __u64 multicast;
__u64 collisions;
/* detailed rx_errors: */
__u64 rx_length_errors;
- __u64 rx_over_errors; /* receiver ring buff overflow */
- __u64 rx_crc_errors; /* recved pkt with crc error */
- __u64 rx_frame_errors; /* recv'd frame alignment error */
- __u64 rx_fifo_errors; /* recv'r fifo overrun */
- __u64 rx_missed_errors; /* receiver missed packet */
+ __u64 rx_over_errors;
+ __u64 rx_crc_errors;
+ __u64 rx_frame_errors;
+ __u64 rx_fifo_errors;
+ __u64 rx_missed_errors;
/* detailed tx_errors */
__u64 tx_aborted_errors;
@@ -71,8 +245,24 @@ struct rtnl_link_stats64 {
/* for cslip etc */
__u64 rx_compressed;
__u64 tx_compressed;
+ __u64 rx_nohandler;
+
+ __u64 rx_otherhost_dropped;
+};
- __u64 rx_nohandler; /* dropped, no handler found */
+/* Subset of link stats useful for in-HW collection. Meaning of the fields is as
+ * for struct rtnl_link_stats64.
+ */
+struct rtnl_hw_stats64 {
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 rx_errors;
+ __u64 tx_errors;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+ __u64 multicast;
};
/* The struct should be in sync with struct ifmap */
@@ -171,6 +361,22 @@ enum {
IFLA_ALT_IFNAME, /* Alternative ifname */
IFLA_PERM_ADDRESS,
IFLA_PROTO_DOWN_REASON,
+
+ /* device (sysfs) name as parent, used instead
+ * of IFLA_LINK where there's no parent netdev
+ */
+ IFLA_PARENT_DEV_NAME,
+ IFLA_PARENT_DEV_BUS_NAME,
+ IFLA_GRO_MAX_SIZE,
+ IFLA_TSO_MAX_SIZE,
+ IFLA_TSO_MAX_SEGS,
+ IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */
+
+ IFLA_DEVLINK_PORT,
+
+ IFLA_GSO_IPV4_MAX_SIZE,
+ IFLA_GRO_IPV4_MAX_SIZE,
+ IFLA_DPLL_PIN,
__IFLA_MAX
};
@@ -240,6 +446,7 @@ enum {
IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */
IFLA_INET6_TOKEN, /* device token */
IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */
+ IFLA_INET6_RA_MTU, /* mtu carried in the RA message */
__IFLA_INET6_MAX
};
@@ -254,6 +461,286 @@ enum in6_addr_gen_mode {
/* Bridge section */
+/**
+ * DOC: Bridge enum definition
+ *
+ * Please *note* that the timer values in the following section are expected
+ * in clock_t format, which is seconds multiplied by USER_HZ (generally
+ * defined as 100).
+ *
+ * @IFLA_BR_FORWARD_DELAY
+ * The bridge forwarding delay is the time spent in LISTENING state
+ * (before moving to LEARNING) and in LEARNING state (before moving
+ * to FORWARDING). Only relevant if STP is enabled.
+ *
+ * The valid values are between (2 * USER_HZ) and (30 * USER_HZ).
+ * The default value is (15 * USER_HZ).
+ *
+ * @IFLA_BR_HELLO_TIME
+ * The time between hello packets sent by the bridge, when it is a root
+ * bridge or a designated bridge. Only relevant if STP is enabled.
+ *
+ * The valid values are between (1 * USER_HZ) and (10 * USER_HZ).
+ * The default value is (2 * USER_HZ).
+ *
+ * @IFLA_BR_MAX_AGE
+ * The hello packet timeout is the time until another bridge in the
+ * spanning tree is assumed to be dead, after reception of its last hello
+ * message. Only relevant if STP is enabled.
+ *
+ * The valid values are between (6 * USER_HZ) and (40 * USER_HZ).
+ * The default value is (20 * USER_HZ).
+ *
+ * @IFLA_BR_AGEING_TIME
+ * Configure the bridge's FDB entries aging time. It is the time a MAC
+ * address will be kept in the FDB after a packet has been received from
+ * that address. After this time has passed, entries are cleaned up.
+ * Allow values outside the 802.1 standard specification for special cases:
+ *
+ * * 0 - entry never ages (all permanent)
+ * * 1 - entry disappears (no persistence)
+ *
+ * The default value is (300 * USER_HZ).
+ *
+ * @IFLA_BR_STP_STATE
+ * Turn spanning tree protocol on (*IFLA_BR_STP_STATE* > 0) or off
+ * (*IFLA_BR_STP_STATE* == 0) for this bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_PRIORITY
+ * Set this bridge's spanning tree priority, used during STP root bridge
+ * election.
+ *
+ * The valid values are between 0 and 65535.
+ *
+ * @IFLA_BR_VLAN_FILTERING
+ * Turn VLAN filtering on (*IFLA_BR_VLAN_FILTERING* > 0) or off
+ * (*IFLA_BR_VLAN_FILTERING* == 0). When disabled, the bridge will not
+ * consider the VLAN tag when handling packets.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_VLAN_PROTOCOL
+ * Set the protocol used for VLAN filtering.
+ *
+ * The valid values are 0x8100(802.1Q) or 0x88A8(802.1AD). The default value
+ * is 0x8100(802.1Q).
+ *
+ * @IFLA_BR_GROUP_FWD_MASK
+ * The group forwarding mask. This is the bitmask that is applied to
+ * decide whether to forward incoming frames destined to link-local
+ * addresses (of the form 01:80:C2:00:00:0X).
+ *
+ * The default value is 0, which means the bridge does not forward any
+ * link-local frames coming on this port.
+ *
+ * @IFLA_BR_ROOT_ID
+ * The bridge root id, read only.
+ *
+ * @IFLA_BR_BRIDGE_ID
+ * The bridge id, read only.
+ *
+ * @IFLA_BR_ROOT_PORT
+ * The bridge root port, read only.
+ *
+ * @IFLA_BR_ROOT_PATH_COST
+ * The bridge root path cost, read only.
+ *
+ * @IFLA_BR_TOPOLOGY_CHANGE
+ * The bridge topology change, read only.
+ *
+ * @IFLA_BR_TOPOLOGY_CHANGE_DETECTED
+ * The bridge topology change detected, read only.
+ *
+ * @IFLA_BR_HELLO_TIMER
+ * The bridge hello timer, read only.
+ *
+ * @IFLA_BR_TCN_TIMER
+ * The bridge tcn timer, read only.
+ *
+ * @IFLA_BR_TOPOLOGY_CHANGE_TIMER
+ * The bridge topology change timer, read only.
+ *
+ * @IFLA_BR_GC_TIMER
+ * The bridge gc timer, read only.
+ *
+ * @IFLA_BR_GROUP_ADDR
+ * Set the MAC address of the multicast group this bridge uses for STP.
+ * The address must be a link-local address in standard Ethernet MAC address
+ * format. It is an address of the form 01:80:C2:00:00:0X, with X in [0, 4..f].
+ *
+ * The default value is 0.
+ *
+ * @IFLA_BR_FDB_FLUSH
+ * Flush bridge's fdb dynamic entries.
+ *
+ * @IFLA_BR_MCAST_ROUTER
+ * Set bridge's multicast router if IGMP snooping is enabled.
+ * The valid values are:
+ *
+ * * 0 - disabled.
+ * * 1 - automatic (queried).
+ * * 2 - permanently enabled.
+ *
+ * The default value is 1.
+ *
+ * @IFLA_BR_MCAST_SNOOPING
+ * Turn multicast snooping on (*IFLA_BR_MCAST_SNOOPING* > 0) or off
+ * (*IFLA_BR_MCAST_SNOOPING* == 0).
+ *
+ * The default value is 1.
+ *
+ * @IFLA_BR_MCAST_QUERY_USE_IFADDR
+ * If enabled use the bridge's own IP address as source address for IGMP
+ * queries (*IFLA_BR_MCAST_QUERY_USE_IFADDR* > 0) or the default of 0.0.0.0
+ * (*IFLA_BR_MCAST_QUERY_USE_IFADDR* == 0).
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_QUERIER
+ * Enable (*IFLA_BR_MULTICAST_QUERIER* > 0) or disable
+ * (*IFLA_BR_MULTICAST_QUERIER* == 0) IGMP querier, ie sending of multicast
+ * queries by the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_HASH_ELASTICITY
+ * Set multicast database hash elasticity, It is the maximum chain length in
+ * the multicast hash table. This attribute is *deprecated* and the value
+ * is always 16.
+ *
+ * @IFLA_BR_MCAST_HASH_MAX
+ * Set maximum size of the multicast hash table
+ *
+ * The default value is 4096, the value must be a power of 2.
+ *
+ * @IFLA_BR_MCAST_LAST_MEMBER_CNT
+ * The Last Member Query Count is the number of Group-Specific Queries
+ * sent before the router assumes there are no local members. The Last
+ * Member Query Count is also the number of Group-and-Source-Specific
+ * Queries sent before the router assumes there are no listeners for a
+ * particular source.
+ *
+ * The default value is 2.
+ *
+ * @IFLA_BR_MCAST_STARTUP_QUERY_CNT
+ * The Startup Query Count is the number of Queries sent out on startup,
+ * separated by the Startup Query Interval.
+ *
+ * The default value is 2.
+ *
+ * @IFLA_BR_MCAST_LAST_MEMBER_INTVL
+ * The Last Member Query Interval is the Max Response Time inserted into
+ * Group-Specific Queries sent in response to Leave Group messages, and
+ * is also the amount of time between Group-Specific Query messages.
+ *
+ * The default value is (1 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_MEMBERSHIP_INTVL
+ * The interval after which the bridge will leave a group, if no membership
+ * reports for this group are received.
+ *
+ * The default value is (260 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_QUERIER_INTVL
+ * The interval between queries sent by other routers. if no queries are
+ * seen after this delay has passed, the bridge will start to send its own
+ * queries (as if *IFLA_BR_MCAST_QUERIER_INTVL* was enabled).
+ *
+ * The default value is (255 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_QUERY_INTVL
+ * The Query Interval is the interval between General Queries sent by
+ * the Querier.
+ *
+ * The default value is (125 * USER_HZ). The minimum value is (1 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
+ * The Max Response Time used to calculate the Max Resp Code inserted
+ * into the periodic General Queries.
+ *
+ * The default value is (10 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_STARTUP_QUERY_INTVL
+ * The interval between queries in the startup phase.
+ *
+ * The default value is (125 * USER_HZ) / 4. The minimum value is (1 * USER_HZ).
+ *
+ * @IFLA_BR_NF_CALL_IPTABLES
+ * Enable (*NF_CALL_IPTABLES* > 0) or disable (*NF_CALL_IPTABLES* == 0)
+ * iptables hooks on the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_NF_CALL_IP6TABLES
+ * Enable (*NF_CALL_IP6TABLES* > 0) or disable (*NF_CALL_IP6TABLES* == 0)
+ * ip6tables hooks on the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_NF_CALL_ARPTABLES
+ * Enable (*NF_CALL_ARPTABLES* > 0) or disable (*NF_CALL_ARPTABLES* == 0)
+ * arptables hooks on the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_VLAN_DEFAULT_PVID
+ * VLAN ID applied to untagged and priority-tagged incoming packets.
+ *
+ * The default value is 1. Setting to the special value 0 makes all ports of
+ * this bridge not have a PVID by default, which means that they will
+ * not accept VLAN-untagged traffic.
+ *
+ * @IFLA_BR_PAD
+ * Bridge attribute padding type for netlink message.
+ *
+ * @IFLA_BR_VLAN_STATS_ENABLED
+ * Enable (*IFLA_BR_VLAN_STATS_ENABLED* == 1) or disable
+ * (*IFLA_BR_VLAN_STATS_ENABLED* == 0) per-VLAN stats accounting.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_STATS_ENABLED
+ * Enable (*IFLA_BR_MCAST_STATS_ENABLED* > 0) or disable
+ * (*IFLA_BR_MCAST_STATS_ENABLED* == 0) multicast (IGMP/MLD) stats
+ * accounting.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_IGMP_VERSION
+ * Set the IGMP version.
+ *
+ * The valid values are 2 and 3. The default value is 2.
+ *
+ * @IFLA_BR_MCAST_MLD_VERSION
+ * Set the MLD version.
+ *
+ * The valid values are 1 and 2. The default value is 1.
+ *
+ * @IFLA_BR_VLAN_STATS_PER_PORT
+ * Enable (*IFLA_BR_VLAN_STATS_PER_PORT* == 1) or disable
+ * (*IFLA_BR_VLAN_STATS_PER_PORT* == 0) per-VLAN per-port stats accounting.
+ * Can be changed only when there are no port VLANs configured.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MULTI_BOOLOPT
+ * The multi_boolopt is used to control new boolean options to avoid adding
+ * new netlink attributes. You can look at ``enum br_boolopt_id`` for those
+ * options.
+ *
+ * @IFLA_BR_MCAST_QUERIER_STATE
+ * Bridge mcast querier states, read only.
+ *
+ * @IFLA_BR_FDB_N_LEARNED
+ * The number of dynamically learned FDB entries for the current bridge,
+ * read only.
+ *
+ * @IFLA_BR_FDB_MAX_LEARNED
+ * Set the number of max dynamically learned FDB entries for the current
+ * bridge.
+ */
enum {
IFLA_BR_UNSPEC,
IFLA_BR_FORWARD_DELAY,
@@ -302,6 +789,9 @@ enum {
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
IFLA_BR_MULTI_BOOLOPT,
+ IFLA_BR_MCAST_QUERIER_STATE,
+ IFLA_BR_FDB_N_LEARNED,
+ IFLA_BR_FDB_MAX_LEARNED,
__IFLA_BR_MAX,
};
@@ -312,11 +802,252 @@ struct ifla_bridge_id {
__u8 addr[6]; /* ETH_ALEN */
};
+/**
+ * DOC: Bridge mode enum definition
+ *
+ * @BRIDGE_MODE_HAIRPIN
+ * Controls whether traffic may be sent back out of the port on which it
+ * was received. This option is also called reflective relay mode, and is
+ * used to support basic VEPA (Virtual Ethernet Port Aggregator)
+ * capabilities. By default, this flag is turned off and the bridge will
+ * not forward traffic back out of the receiving port.
+ */
enum {
BRIDGE_MODE_UNSPEC,
BRIDGE_MODE_HAIRPIN,
};
+/**
+ * DOC: Bridge port enum definition
+ *
+ * @IFLA_BRPORT_STATE
+ * The operation state of the port. Here are the valid values.
+ *
+ * * 0 - port is in STP *DISABLED* state. Make this port completely
+ * inactive for STP. This is also called BPDU filter and could be used
+ * to disable STP on an untrusted port, like a leaf virtual device.
+ * The traffic forwarding is also stopped on this port.
+ * * 1 - port is in STP *LISTENING* state. Only valid if STP is enabled
+ * on the bridge. In this state the port listens for STP BPDUs and
+ * drops all other traffic frames.
+ * * 2 - port is in STP *LEARNING* state. Only valid if STP is enabled on
+ * the bridge. In this state the port will accept traffic only for the
+ * purpose of updating MAC address tables.
+ * * 3 - port is in STP *FORWARDING* state. Port is fully active.
+ * * 4 - port is in STP *BLOCKING* state. Only valid if STP is enabled on
+ * the bridge. This state is used during the STP election process.
+ * In this state, port will only process STP BPDUs.
+ *
+ * @IFLA_BRPORT_PRIORITY
+ * The STP port priority. The valid values are between 0 and 255.
+ *
+ * @IFLA_BRPORT_COST
+ * The STP path cost of the port. The valid values are between 1 and 65535.
+ *
+ * @IFLA_BRPORT_MODE
+ * Set the bridge port mode. See *BRIDGE_MODE_HAIRPIN* for more details.
+ *
+ * @IFLA_BRPORT_GUARD
+ * Controls whether STP BPDUs will be processed by the bridge port. By
+ * default, the flag is turned off to allow BPDU processing. Turning this
+ * flag on will disable the bridge port if a STP BPDU packet is received.
+ *
+ * If the bridge has Spanning Tree enabled, hostile devices on the network
+ * may send BPDU on a port and cause network failure. Setting *guard on*
+ * will detect and stop this by disabling the port. The port will be
+ * restarted if the link is brought down, or removed and reattached.
+ *
+ * @IFLA_BRPORT_PROTECT
+ * Controls whether a given port is allowed to become a root port or not.
+ * Only used when STP is enabled on the bridge. By default the flag is off.
+ *
+ * This feature is also called root port guard. If BPDU is received from a
+ * leaf (edge) port, it should not be elected as root port. This could
+ * be used if using STP on a bridge and the downstream bridges are not fully
+ * trusted; this prevents a hostile guest from rerouting traffic.
+ *
+ * @IFLA_BRPORT_FAST_LEAVE
+ * This flag allows the bridge to immediately stop multicast traffic
+ * forwarding on a port that receives an IGMP Leave message. It is only used
+ * when IGMP snooping is enabled on the bridge. By default the flag is off.
+ *
+ * @IFLA_BRPORT_LEARNING
+ * Controls whether a given port will learn *source* MAC addresses from
+ * received traffic or not. Also controls whether dynamic FDB entries
+ * (which can also be added by software) will be refreshed by incoming
+ * traffic. By default this flag is on.
+ *
+ * @IFLA_BRPORT_UNICAST_FLOOD
+ * Controls whether unicast traffic for which there is no FDB entry will
+ * be flooded towards this port. By default this flag is on.
+ *
+ * @IFLA_BRPORT_PROXYARP
+ * Enable proxy ARP on this port.
+ *
+ * @IFLA_BRPORT_LEARNING_SYNC
+ * Controls whether a given port will sync MAC addresses learned on device
+ * port to bridge FDB.
+ *
+ * @IFLA_BRPORT_PROXYARP_WIFI
+ * Enable proxy ARP on this port which meets extended requirements by
+ * IEEE 802.11 and Hotspot 2.0 specifications.
+ *
+ * @IFLA_BRPORT_ROOT_ID
+ *
+ * @IFLA_BRPORT_BRIDGE_ID
+ *
+ * @IFLA_BRPORT_DESIGNATED_PORT
+ *
+ * @IFLA_BRPORT_DESIGNATED_COST
+ *
+ * @IFLA_BRPORT_ID
+ *
+ * @IFLA_BRPORT_NO
+ *
+ * @IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
+ *
+ * @IFLA_BRPORT_CONFIG_PENDING
+ *
+ * @IFLA_BRPORT_MESSAGE_AGE_TIMER
+ *
+ * @IFLA_BRPORT_FORWARD_DELAY_TIMER
+ *
+ * @IFLA_BRPORT_HOLD_TIMER
+ *
+ * @IFLA_BRPORT_FLUSH
+ * Flush bridge ports' fdb dynamic entries.
+ *
+ * @IFLA_BRPORT_MULTICAST_ROUTER
+ * Configure the port's multicast router presence. A port with
+ * a multicast router will receive all multicast traffic.
+ * The valid values are:
+ *
+ * * 0 disable multicast routers on this port
+ * * 1 let the system detect the presence of routers (default)
+ * * 2 permanently enable multicast traffic forwarding on this port
+ * * 3 enable multicast routers temporarily on this port, not depending
+ * on incoming queries.
+ *
+ * @IFLA_BRPORT_PAD
+ *
+ * @IFLA_BRPORT_MCAST_FLOOD
+ * Controls whether a given port will flood multicast traffic for which
+ * there is no MDB entry. By default this flag is on.
+ *
+ * @IFLA_BRPORT_MCAST_TO_UCAST
+ * Controls whether a given port will replicate packets using unicast
+ * instead of multicast. By default this flag is off.
+ *
+ * This is done by copying the packet per host and changing the multicast
+ * destination MAC to a unicast one accordingly.
+ *
+ * *mcast_to_unicast* works on top of the multicast snooping feature of the
+ * bridge. Which means unicast copies are only delivered to hosts which
+ * are interested in unicast and signaled this via IGMP/MLD reports previously.
+ *
+ * This feature is intended for interface types which have a more reliable
+ * and/or efficient way to deliver unicast packets than broadcast ones
+ * (e.g. WiFi).
+ *
+ * However, it should only be enabled on interfaces where no IGMPv2/MLDv1
+ * report suppression takes place. IGMP/MLD report suppression issue is
+ * usually overcome by the network daemon (supplicant) enabling AP isolation
+ * and by that separating all STAs.
+ *
+ * Delivery of STA-to-STA IP multicast is made possible again by enabling
+ * and utilizing the bridge hairpin mode, which considers the incoming port
+ * as a potential outgoing port, too (see *BRIDGE_MODE_HAIRPIN* option).
+ * Hairpin mode is performed after multicast snooping, therefore leading
+ * to only deliver reports to STAs running a multicast router.
+ *
+ * @IFLA_BRPORT_VLAN_TUNNEL
+ * Controls whether vlan to tunnel mapping is enabled on the port.
+ * By default this flag is off.
+ *
+ * @IFLA_BRPORT_BCAST_FLOOD
+ * Controls flooding of broadcast traffic on the given port. By default
+ * this flag is on.
+ *
+ * @IFLA_BRPORT_GROUP_FWD_MASK
+ * Set the group forward mask. This is a bitmask that is applied to
+ * decide whether to forward incoming frames destined to link-local
+ * addresses. The addresses of the form are 01:80:C2:00:00:0X (defaults
+ * to 0, which means the bridge does not forward any link-local frames
+ * coming on this port).
+ *
+ * @IFLA_BRPORT_NEIGH_SUPPRESS
+ * Controls whether neighbor discovery (arp and nd) proxy and suppression
+ * is enabled on the port. By default this flag is off.
+ *
+ * @IFLA_BRPORT_ISOLATED
+ * Controls whether a given port will be isolated, which means it will be
+ * able to communicate with non-isolated ports only. By default this
+ * flag is off.
+ *
+ * @IFLA_BRPORT_BACKUP_PORT
+ * Set a backup port. If the port loses carrier all traffic will be
+ * redirected to the configured backup port. Set the value to 0 to disable
+ * it.
+ *
+ * @IFLA_BRPORT_MRP_RING_OPEN
+ *
+ * @IFLA_BRPORT_MRP_IN_OPEN
+ *
+ * @IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT
+ * The number of per-port EHT hosts limit. The default value is 512.
+ * Setting to 0 is not allowed.
+ *
+ * @IFLA_BRPORT_MCAST_EHT_HOSTS_CNT
+ * The current number of tracked hosts, read only.
+ *
+ * @IFLA_BRPORT_LOCKED
+ * Controls whether a port will be locked, meaning that hosts behind the
+ * port will not be able to communicate through the port unless an FDB
+ * entry with the unit's MAC address is in the FDB. The common use case is
+ * that hosts are allowed access through authentication with the IEEE 802.1X
+ * protocol or based on whitelists. By default this flag is off.
+ *
+ * Please note that secure 802.1X deployments should always use the
+ * *BR_BOOLOPT_NO_LL_LEARN* flag, to not permit the bridge to populate its
+ * FDB based on link-local (EAPOL) traffic received on the port.
+ *
+ * @IFLA_BRPORT_MAB
+ * Controls whether a port will use MAC Authentication Bypass (MAB), a
+ * technique through which select MAC addresses may be allowed on a locked
+ * port, without using 802.1X authentication. Packets with an unknown source
+ * MAC address generates a "locked" FDB entry on the incoming bridge port.
+ * The common use case is for user space to react to these bridge FDB
+ * notifications and optionally replace the locked FDB entry with a normal
+ * one, allowing traffic to pass for whitelisted MAC addresses.
+ *
+ * Setting this flag also requires *IFLA_BRPORT_LOCKED* and
+ * *IFLA_BRPORT_LEARNING*. *IFLA_BRPORT_LOCKED* ensures that unauthorized
+ * data packets are dropped, and *IFLA_BRPORT_LEARNING* allows the dynamic
+ * FDB entries installed by user space (as replacements for the locked FDB
+ * entries) to be refreshed and/or aged out.
+ *
+ * @IFLA_BRPORT_MCAST_N_GROUPS
+ *
+ * @IFLA_BRPORT_MCAST_MAX_GROUPS
+ * Sets the maximum number of MDB entries that can be registered for a
+ * given port. Attempts to register more MDB entries at the port than this
+ * limit allows will be rejected, whether they are done through netlink
+ * (e.g. the bridge tool), or IGMP or MLD membership reports. Setting a
+ * limit of 0 disables the limit. The default value is 0.
+ *
+ * @IFLA_BRPORT_NEIGH_VLAN_SUPPRESS
+ * Controls whether neighbor discovery (arp and nd) proxy and suppression is
+ * enabled for a given port. By default this flag is off.
+ *
+ * Note that this option only takes effect when *IFLA_BRPORT_NEIGH_SUPPRESS*
+ * is enabled for a given port.
+ *
+ * @IFLA_BRPORT_BACKUP_NHID
+ * The FDB nexthop object ID to attach to packets being redirected to a
+ * backup port that has VLAN tunnel mapping enabled (via the
+ * *IFLA_BRPORT_VLAN_TUNNEL* option). Setting a value of 0 (default) has
+ * the effect of not attaching any ID.
+ */
enum {
IFLA_BRPORT_UNSPEC,
IFLA_BRPORT_STATE, /* Spanning tree state */
@@ -355,6 +1086,14 @@ enum {
IFLA_BRPORT_BACKUP_PORT,
IFLA_BRPORT_MRP_RING_OPEN,
IFLA_BRPORT_MRP_IN_OPEN,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
+ IFLA_BRPORT_LOCKED,
+ IFLA_BRPORT_MAB,
+ IFLA_BRPORT_MCAST_N_GROUPS,
+ IFLA_BRPORT_MCAST_MAX_GROUPS,
+ IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
+ IFLA_BRPORT_BACKUP_NHID,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -419,6 +1158,9 @@ enum {
IFLA_MACVLAN_MACADDR,
IFLA_MACVLAN_MACADDR_DATA,
IFLA_MACVLAN_MACADDR_COUNT,
+ IFLA_MACVLAN_BC_QUEUE_LEN,
+ IFLA_MACVLAN_BC_QUEUE_LEN_USED,
+ IFLA_MACVLAN_BC_CUTOFF,
__IFLA_MACVLAN_MAX,
};
@@ -440,6 +1182,7 @@ enum macvlan_macaddr_mode {
};
#define MACVLAN_FLAG_NOPROMISC 1
+#define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */
/* VRF section */
enum {
@@ -486,6 +1229,7 @@ enum {
IFLA_XFRM_UNSPEC,
IFLA_XFRM_LINK,
IFLA_XFRM_IF_ID,
+ IFLA_XFRM_COLLECT_METADATA,
__IFLA_XFRM_MAX
};
@@ -527,7 +1271,79 @@ enum ipvlan_mode {
#define IPVLAN_F_PRIVATE 0x01
#define IPVLAN_F_VEPA 0x02
+/* Tunnel RTM header */
+struct tunnel_msg {
+ __u8 family;
+ __u8 flags;
+ __u16 reserved2;
+ __u32 ifindex;
+};
+
+/* netkit section */
+enum netkit_action {
+ NETKIT_NEXT = -1,
+ NETKIT_PASS = 0,
+ NETKIT_DROP = 2,
+ NETKIT_REDIRECT = 7,
+};
+
+enum netkit_mode {
+ NETKIT_L2,
+ NETKIT_L3,
+};
+
+enum {
+ IFLA_NETKIT_UNSPEC,
+ IFLA_NETKIT_PEER_INFO,
+ IFLA_NETKIT_PRIMARY,
+ IFLA_NETKIT_POLICY,
+ IFLA_NETKIT_PEER_POLICY,
+ IFLA_NETKIT_MODE,
+ __IFLA_NETKIT_MAX,
+};
+#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1)
+
/* VXLAN section */
+
+/* include statistics in the dump */
+#define TUNNEL_MSG_FLAG_STATS 0x01
+
+#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS
+
+/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */
+enum {
+ VNIFILTER_ENTRY_STATS_UNSPEC,
+ VNIFILTER_ENTRY_STATS_RX_BYTES,
+ VNIFILTER_ENTRY_STATS_RX_PKTS,
+ VNIFILTER_ENTRY_STATS_RX_DROPS,
+ VNIFILTER_ENTRY_STATS_RX_ERRORS,
+ VNIFILTER_ENTRY_STATS_TX_BYTES,
+ VNIFILTER_ENTRY_STATS_TX_PKTS,
+ VNIFILTER_ENTRY_STATS_TX_DROPS,
+ VNIFILTER_ENTRY_STATS_TX_ERRORS,
+ VNIFILTER_ENTRY_STATS_PAD,
+ __VNIFILTER_ENTRY_STATS_MAX
+};
+#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1)
+
+enum {
+ VXLAN_VNIFILTER_ENTRY_UNSPEC,
+ VXLAN_VNIFILTER_ENTRY_START,
+ VXLAN_VNIFILTER_ENTRY_END,
+ VXLAN_VNIFILTER_ENTRY_GROUP,
+ VXLAN_VNIFILTER_ENTRY_GROUP6,
+ VXLAN_VNIFILTER_ENTRY_STATS,
+ __VXLAN_VNIFILTER_ENTRY_MAX
+};
+#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1)
+
+enum {
+ VXLAN_VNIFILTER_UNSPEC,
+ VXLAN_VNIFILTER_ENTRY,
+ __VXLAN_VNIFILTER_MAX
+};
+#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1)
+
enum {
IFLA_VXLAN_UNSPEC,
IFLA_VXLAN_ID,
@@ -559,6 +1375,9 @@ enum {
IFLA_VXLAN_GPE,
IFLA_VXLAN_TTL_INHERIT,
IFLA_VXLAN_DF,
+ IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */
+ IFLA_VXLAN_LOCALBYPASS,
+ IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -576,6 +1395,13 @@ enum ifla_vxlan_df {
VXLAN_DF_MAX = __VXLAN_DF_END - 1,
};
+enum ifla_vxlan_label_policy {
+ VXLAN_LABEL_FIXED = 0,
+ VXLAN_LABEL_INHERIT = 1,
+ __VXLAN_LABEL_END,
+ VXLAN_LABEL_MAX = __VXLAN_LABEL_END - 1,
+};
+
/* GENEVE section */
enum {
IFLA_GENEVE_UNSPEC,
@@ -592,6 +1418,7 @@ enum {
IFLA_GENEVE_LABEL,
IFLA_GENEVE_TTL_INHERIT,
IFLA_GENEVE_DF,
+ IFLA_GENEVE_INNER_PROTO_INHERIT,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
@@ -637,6 +1464,8 @@ enum {
IFLA_GTP_FD1,
IFLA_GTP_PDP_HASHSIZE,
IFLA_GTP_ROLE,
+ IFLA_GTP_CREATE_SOCKETS,
+ IFLA_GTP_RESTART_COUNT,
__IFLA_GTP_MAX,
};
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
@@ -673,6 +1502,10 @@ enum {
IFLA_BOND_AD_ACTOR_SYSTEM,
IFLA_BOND_TLB_DYNAMIC_LB,
IFLA_BOND_PEER_NOTIF_DELAY,
+ IFLA_BOND_AD_LACP_ACTIVE,
+ IFLA_BOND_MISSED_MAX,
+ IFLA_BOND_NS_IP6_TARGET,
+ IFLA_BOND_COUPLED_CONTROL,
__IFLA_BOND_MAX,
};
@@ -700,6 +1533,7 @@ enum {
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
+ IFLA_BOND_SLAVE_PRIO,
__IFLA_BOND_SLAVE_MAX,
};
@@ -969,6 +1803,17 @@ enum {
#define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1))
+enum {
+ IFLA_STATS_GETSET_UNSPEC,
+ IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with
+ * a filter mask for the corresponding group.
+ */
+ IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */
+ __IFLA_STATS_GETSET_MAX,
+};
+
+#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1)
+
/* These are embedded into IFLA_STATS_LINK_XSTATS:
* [IFLA_STATS_LINK_XSTATS]
* -> [LINK_XSTATS_TYPE_xxx]
@@ -986,10 +1831,21 @@ enum {
enum {
IFLA_OFFLOAD_XSTATS_UNSPEC,
IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
+ IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */
+ IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */
__IFLA_OFFLOAD_XSTATS_MAX
};
#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
+enum {
+ IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC,
+ IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */
+ IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */
+ __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX,
+};
+#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \
+ (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1)
+
/* XDP section */
#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
@@ -1061,6 +1917,8 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
+#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)
+#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5)
enum {
IFLA_RMNET_UNSPEC,
@@ -1076,4 +1934,26 @@ struct ifla_rmnet_flags {
__u32 mask;
};
+/* MCTP section */
+
+enum {
+ IFLA_MCTP_UNSPEC,
+ IFLA_MCTP_NET,
+ __IFLA_MCTP_MAX,
+};
+
+#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
+
+/* DSA section */
+
+enum {
+ IFLA_DSA_UNSPEC,
+ IFLA_DSA_CONDUIT,
+ /* Deprecated, use IFLA_DSA_CONDUIT instead */
+ IFLA_DSA_MASTER = IFLA_DSA_CONDUIT,
+ __IFLA_DSA_MAX,
+};
+
+#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 3af2aa069a36..d5b6d1f37353 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -22,6 +22,8 @@
#define MACSEC_KEYID_LEN 16
+#define MACSEC_SALT_LEN 12
+
/* cipher IDs as per IEEE802.1AE-2018 (Table 14-1) */
#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 3d884d68eb30..9efc42382fdb 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_IF_PACKET_H
#define __LINUX_IF_PACKET_H
+#include <asm/byteorder.h>
#include <linux/types.h>
struct sockaddr_pkt {
@@ -58,6 +59,7 @@ struct sockaddr_ll {
#define PACKET_ROLLOVER_STATS 21
#define PACKET_FANOUT_DATA 22
#define PACKET_IGNORE_OUTGOING 23
+#define PACKET_VNET_HDR_SZ 24
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
@@ -69,6 +71,7 @@ struct sockaddr_ll {
#define PACKET_FANOUT_EBPF 7
#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
#define PACKET_FANOUT_FLAG_UNIQUEID 0x2000
+#define PACKET_FANOUT_FLAG_IGNORE_OUTGOING 0x4000
#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
struct tpacket_stats {
@@ -113,6 +116,7 @@ struct tpacket_auxdata {
#define TP_STATUS_BLK_TMO (1 << 5)
#define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */
#define TP_STATUS_CSUM_VALID (1 << 7)
+#define TP_STATUS_GSO_TCP (1 << 8)
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0
@@ -296,6 +300,17 @@ struct packet_mreq {
unsigned char mr_address[8];
};
+struct fanout_args {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 id;
+ __u16 type_flags;
+#else
+ __u16 type_flags;
+ __u16 id;
+#endif
+ __u32 max_num_members;
+};
+
#define PACKET_MR_MULTICAST 0
#define PACKET_MR_PROMISC 1
#define PACKET_MR_ALLMULTI 2
diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h
index 060b4d1f3129..a91044328bc9 100644
--- a/include/uapi/linux/if_pppol2tp.h
+++ b/include/uapi/linux/if_pppol2tp.h
@@ -75,7 +75,7 @@ struct pppol2tpv3in6_addr {
};
/* Socket options:
- * DEBUG - bitmask of debug message categories
+ * DEBUG - bitmask of debug message categories (not used)
* SENDSEQ - 0 => don't send packets with sequence numbers
* 1 => send packets with sequence numbers
* RECVSEQ - 0 => receive packet sequence numbers are optional
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index e7a693c28f16..9abd80dcc46f 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -122,7 +122,7 @@ struct sockaddr_pppol2tpv3in6 {
struct pppoe_tag {
__be16 tag_type;
__be16 tag_len;
- char tag_data[0];
+ char tag_data[];
} __attribute__ ((packed));
/* Tag identifiers */
@@ -150,7 +150,7 @@ struct pppoe_hdr {
__u8 code;
__be16 sid;
__be16 length;
- struct pppoe_tag tag[0];
+ struct pppoe_tag tag[];
} __packed;
/* Length of entire PPPoE + PPP header */
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 454ae31b93c7..287cdc81c939 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -67,6 +67,8 @@
#define IFF_TAP 0x0002
#define IFF_NAPI 0x0010
#define IFF_NAPI_FRAGS 0x0020
+/* Used in TUNSETIFF to bring up tun/tap without carrier */
+#define IFF_NO_CARRIER 0x0040
#define IFF_NO_PI 0x1000
/* This flag has no real effect */
#define IFF_ONE_QUEUE 0x2000
@@ -88,6 +90,8 @@
#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */
#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */
#define TUN_F_UFO 0x10 /* I can handle UFO packets */
+#define TUN_F_USO4 0x20 /* I can handle USO for IPv4 packets */
+#define TUN_F_USO6 0x40 /* I can handle USO for IPv6 packets */
/* Protocol info prepended to the packets (when IFF_NO_PI is not set) */
#define TUN_PKT_STRIP 0x0001
@@ -108,7 +112,7 @@ struct tun_pi {
struct tun_filter {
__u16 flags; /* TUN_FLT_ flags see above */
__u16 count; /* Number of addresses */
- __u8 addr[0][ETH_ALEN];
+ __u8 addr[][ETH_ALEN];
};
#endif /* _UAPI__IF_TUN_H */
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 7d9105533c7b..102119628ff5 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -176,8 +176,10 @@ enum {
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
+#define TUNNEL_GTP_OPT __cpu_to_be16(0x8000)
#define TUNNEL_OPTIONS_PRESENT \
- (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
+ (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \
+ TUNNEL_GTP_OPT)
#endif /* _UAPI_IF_TUNNEL_H_ */
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index a78a8096f4ce..d31698410410 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -25,9 +25,21 @@
* application.
*/
#define XDP_USE_NEED_WAKEUP (1 << 3)
+/* By setting this option, userspace application indicates that it can
+ * handle multiple descriptors per packet thus enabling AF_XDP to split
+ * multi-buffer XDP frames into multiple Rx descriptors. Without this set
+ * such frames will be dropped.
+ */
+#define XDP_USE_SG (1 << 4)
/* Flags for xsk_umem_config flags */
-#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
+#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
+
+/* Force checksum calculation in software. Can be used for testing or
+ * working around potential HW issues. This option causes performance
+ * degradation and only works in XDP_COPY mode.
+ */
+#define XDP_UMEM_TX_SW_CSUM (1 << 1)
struct sockaddr_xdp {
__u16 sxdp_family;
@@ -70,6 +82,7 @@ struct xdp_umem_reg {
__u32 chunk_size;
__u32 headroom;
__u32 flags;
+ __u32 tx_metadata_len;
};
struct xdp_statistics {
@@ -99,6 +112,41 @@ struct xdp_options {
#define XSK_UNALIGNED_BUF_ADDR_MASK \
((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
+/* Request transmit timestamp. Upon completion, put it into tx_timestamp
+ * field of struct xsk_tx_metadata.
+ */
+#define XDP_TXMD_FLAGS_TIMESTAMP (1 << 0)
+
+/* Request transmit checksum offload. Checksum start position and offset
+ * are communicated via csum_start and csum_offset fields of struct
+ * xsk_tx_metadata.
+ */
+#define XDP_TXMD_FLAGS_CHECKSUM (1 << 1)
+
+/* AF_XDP offloads request. 'request' union member is consumed by the driver
+ * when the packet is being transmitted. 'completion' union member is
+ * filled by the driver when the transmit completion arrives.
+ */
+struct xsk_tx_metadata {
+ __u64 flags;
+
+ union {
+ struct {
+ /* XDP_TXMD_FLAGS_CHECKSUM */
+
+ /* Offset from desc->addr where checksumming should start. */
+ __u16 csum_start;
+ /* Offset from csum_start where checksum should be stored. */
+ __u16 csum_offset;
+ } request;
+
+ struct {
+ /* XDP_TXMD_FLAGS_TIMESTAMP */
+ __u64 tx_timestamp;
+ } completion;
+ };
+};
+
/* Rx/Tx descriptor */
struct xdp_desc {
__u64 addr;
@@ -108,4 +156,14 @@ struct xdp_desc {
/* UMEM descriptor is __u64 */
+/* Flag indicating that the packet continues with the buffer pointed out by the
+ * next frame in the ring. The end of the packet is signalled by setting this
+ * bit to zero. For single buffer packets, every descriptor has 'options' set
+ * to 0 and this maintains backward compatibility.
+ */
+#define XDP_PKT_CONTD (1 << 0)
+
+/* TX packet carries valid metadata. */
+#define XDP_TX_METADATA (1 << 1)
+
#endif /* _LINUX_IF_XDP_H */
diff --git a/include/uapi/linux/igmp.h b/include/uapi/linux/igmp.h
index 90c28bc466c6..5930f2437cd1 100644
--- a/include/uapi/linux/igmp.h
+++ b/include/uapi/linux/igmp.h
@@ -48,7 +48,7 @@ struct igmpv3_grec {
__u8 grec_auxwords;
__be16 grec_nsrcs;
__be32 grec_mca;
- __be32 grec_src[0];
+ __be32 grec_src[];
};
struct igmpv3_report {
@@ -57,7 +57,7 @@ struct igmpv3_report {
__sum16 csum;
__be16 resv2;
__be16 ngrec;
- struct igmpv3_grec grec[0];
+ struct igmpv3_grec grec[];
};
struct igmpv3_query {
@@ -78,7 +78,7 @@ struct igmpv3_query {
#endif
__u8 qqic;
__be16 nsrcs;
- __be32 srcs[0];
+ __be32 srcs[];
};
#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */
diff --git a/include/uapi/linux/iio/buffer.h b/include/uapi/linux/iio/buffer.h
new file mode 100644
index 000000000000..13939032b3f6
--- /dev/null
+++ b/include/uapi/linux/iio/buffer.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* industrial I/O buffer definitions needed both in and out of kernel
+ */
+
+#ifndef _UAPI_IIO_BUFFER_H_
+#define _UAPI_IIO_BUFFER_H_
+
+#define IIO_BUFFER_GET_FD_IOCTL _IOWR('i', 0x91, int)
+
+#endif /* _UAPI_IIO_BUFFER_H_ */
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index fdd81affca4b..f2e0b2d50e6b 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -47,6 +47,10 @@ enum iio_chan_type {
IIO_POSITIONRELATIVE,
IIO_PHASE,
IIO_MASSCONCENTRATION,
+ IIO_DELTA_ANGL,
+ IIO_DELTA_VELOCITY,
+ IIO_COLORTEMP,
+ IIO_CHROMATICITY,
};
enum iio_modifier {
@@ -94,6 +98,15 @@ enum iio_modifier {
IIO_MOD_PM10,
IIO_MOD_ETHANOL,
IIO_MOD_H2,
+ IIO_MOD_O2,
+ IIO_MOD_LINEAR_X,
+ IIO_MOD_LINEAR_Y,
+ IIO_MOD_LINEAR_Z,
+ IIO_MOD_PITCH,
+ IIO_MOD_YAW,
+ IIO_MOD_ROLL,
+ IIO_MOD_LIGHT_UVA,
+ IIO_MOD_LIGHT_UVB,
};
enum iio_event_type {
@@ -103,6 +116,8 @@ enum iio_event_type {
IIO_EV_TYPE_THRESH_ADAPTIVE,
IIO_EV_TYPE_MAG_ADAPTIVE,
IIO_EV_TYPE_CHANGE,
+ IIO_EV_TYPE_MAG_REFERENCED,
+ IIO_EV_TYPE_GESTURE,
};
enum iio_event_direction {
@@ -110,7 +125,8 @@ enum iio_event_direction {
IIO_EV_DIR_RISING,
IIO_EV_DIR_FALLING,
IIO_EV_DIR_NONE,
+ IIO_EV_DIR_SINGLETAP,
+ IIO_EV_DIR_DOUBLETAP,
};
#endif /* _UAPI_IIO_TYPES_H_ */
-
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 7d6687618d80..e682ab628dfa 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -20,6 +20,7 @@
#define _UAPI_LINUX_IN_H
#include <linux/types.h>
+#include <linux/stddef.h>
#include <linux/libc-compat.h>
#include <linux/socket.h>
@@ -68,6 +69,8 @@ enum {
#define IPPROTO_PIM IPPROTO_PIM
IPPROTO_COMP = 108, /* Compression Header Protocol */
#define IPPROTO_COMP IPPROTO_COMP
+ IPPROTO_L2TP = 115, /* Layer 2 Tunnelling Protocol */
+#define IPPROTO_L2TP IPPROTO_L2TP
IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
@@ -159,6 +162,8 @@ struct in_addr {
#define MCAST_MSFILTER 48
#define IP_MULTICAST_ALL 49
#define IP_UNICAST_IF 50
+#define IP_LOCAL_PORT_RANGE 51
+#define IP_PROTOCOL 52
#define MCAST_EXCLUDE 0
#define MCAST_INCLUDE 1
@@ -192,7 +197,10 @@ struct ip_msfilter {
__be32 imsf_interface;
__u32 imsf_fmode;
__u32 imsf_numsrc;
- __be32 imsf_slist[1];
+ union {
+ __be32 imsf_slist[1];
+ __DECLARE_FLEX_ARRAY(__be32, imsf_slist_flex);
+ };
};
#define IP_MSFILTER_SIZE(numsrc) \
@@ -211,11 +219,22 @@ struct group_source_req {
};
struct group_filter {
- __u32 gf_interface; /* interface index */
- struct __kernel_sockaddr_storage gf_group; /* multicast address */
- __u32 gf_fmode; /* filter mode */
- __u32 gf_numsrc; /* number of sources */
- struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+ union {
+ struct {
+ __u32 gf_interface_aux; /* interface index */
+ struct __kernel_sockaddr_storage gf_group_aux; /* multicast address */
+ __u32 gf_fmode_aux; /* filter mode */
+ __u32 gf_numsrc_aux; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+ };
+ struct {
+ __u32 gf_interface; /* interface index */
+ struct __kernel_sockaddr_storage gf_group; /* multicast address */
+ __u32 gf_fmode; /* filter mode */
+ __u32 gf_numsrc; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist_flex[]; /* interface index */
+ };
+ };
};
#define GROUP_FILTER_SIZE(numsrc) \
@@ -289,6 +308,9 @@ struct sockaddr_in {
/* Address indicating an error return. */
#define INADDR_NONE ((unsigned long int) 0xffffffff)
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
+
/* Network number for local host loopback. */
#define IN_LOOPBACKNET 127
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 5ad396a57eb3..ff8d21f9e95b 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -145,6 +145,7 @@ struct in6_flowlabel_req {
#define IPV6_TLV_PADN 1
#define IPV6_TLV_ROUTERALERT 5
#define IPV6_TLV_CALIPSO 7 /* RFC 5570 */
+#define IPV6_TLV_IOAM 49 /* RFC 9486 */
#define IPV6_TLV_JUMBO 194
#define IPV6_TLV_HAO 201 /* home address option */
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 5ba122c1949a..50655de04c9b 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -104,7 +104,7 @@ struct inet_diag_hostcond {
__u8 family;
__u8 prefix_len;
int port;
- __be32 addr[0];
+ __be32 addr[];
};
struct inet_diag_markcond {
@@ -160,6 +160,7 @@ enum {
INET_DIAG_ULP_INFO,
INET_DIAG_SK_BPF_STORAGES,
INET_DIAG_CGROUP_ID,
+ INET_DIAG_SOCKOPT,
__INET_DIAG_MAX,
};
@@ -183,6 +184,23 @@ struct inet_diag_meminfo {
__u32 idiag_tmem;
};
+/* INET_DIAG_SOCKOPT */
+
+struct inet_diag_sockopt {
+ __u8 recverr:1,
+ is_icsk:1,
+ freebind:1,
+ hdrincl:1,
+ mc_loop:1,
+ transparent:1,
+ mc_all:1,
+ nodefrag:1;
+ __u8 bind_address_no_port:1,
+ recverr_rfc4884:1,
+ defer_connect:1,
+ unused:5;
+};
+
/* INET_DIAG_VEGASINFO */
struct tcpvegas_info {
diff --git a/include/uapi/linux/inotify.h b/include/uapi/linux/inotify.h
index 884b4846b630..d94f20e38e5d 100644
--- a/include/uapi/linux/inotify.h
+++ b/include/uapi/linux/inotify.h
@@ -23,15 +23,15 @@ struct inotify_event {
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
- char name[0]; /* stub for possible name */
+ char name[]; /* stub for possible name */
};
/* the following are legal, implemented events that user-space can watch for */
#define IN_ACCESS 0x00000001 /* File was accessed */
#define IN_MODIFY 0x00000002 /* File was modified */
#define IN_ATTRIB 0x00000004 /* Metadata changed */
-#define IN_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
-#define IN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
+#define IN_CLOSE_WRITE 0x00000008 /* Writable file was closed */
+#define IN_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */
#define IN_OPEN 0x00000020 /* File was opened */
#define IN_MOVED_FROM 0x00000040 /* File was moved from X */
#define IN_MOVED_TO 0x00000080 /* File was moved to Y */
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 0c2e27d28e0a..03edf2ccdf6c 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -278,7 +278,8 @@
#define KEY_PAUSECD 201
#define KEY_PROG3 202
#define KEY_PROG4 203
-#define KEY_DASHBOARD 204 /* AL Dashboard */
+#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */
+#define KEY_DASHBOARD KEY_ALL_APPLICATIONS
#define KEY_SUSPEND 205
#define KEY_CLOSE 206 /* AC Close */
#define KEY_PLAY 207
@@ -515,6 +516,9 @@
#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
#define KEY_IMAGES 0x1ba /* AL Image Browser */
+#define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */
+#define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */
+#define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1
@@ -542,6 +546,7 @@
#define KEY_FN_F 0x1e2
#define KEY_FN_S 0x1e3
#define KEY_FN_B 0x1e4
+#define KEY_FN_RIGHT_SHIFT 0x1e5
#define KEY_BRL_DOT1 0x1f1
#define KEY_BRL_DOT2 0x1f2
@@ -597,6 +602,7 @@
#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
+#define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */
#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
@@ -607,6 +613,11 @@
#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */
#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */
+#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */
+#define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */
+#define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */
+#define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
@@ -655,6 +666,27 @@
/* Select an area of screen to be copied */
#define KEY_SELECTIVE_SCREENSHOT 0x27a
+/* Move the focus to the next or previous user controllable element within a UI container */
+#define KEY_NEXT_ELEMENT 0x27b
+#define KEY_PREVIOUS_ELEMENT 0x27c
+
+/* Toggle Autopilot engagement */
+#define KEY_AUTOPILOT_ENGAGE_TOGGLE 0x27d
+
+/* Shortcut Keys */
+#define KEY_MARK_WAYPOINT 0x27e
+#define KEY_SOS 0x27f
+#define KEY_NAV_CHART 0x280
+#define KEY_FISHING_CHART 0x281
+#define KEY_SINGLE_RANGE_RADAR 0x282
+#define KEY_DUAL_RANGE_RADAR 0x283
+#define KEY_RADAR_OVERLAY 0x284
+#define KEY_TRADITIONAL_SONAR 0x285
+#define KEY_CLEARVU_SONAR 0x286
+#define KEY_SIDEVU_SONAR 0x287
+#define KEY_NAV_INFO 0x288
+#define KEY_BRIGHTNESS_MENU 0x289
+
/*
* Some keyboards have keys which do not have a defined meaning, these keys
* are intended to be programmed / bound to macros by the user. For most
@@ -834,6 +866,7 @@
#define ABS_TOOL_WIDTH 0x1c
#define ABS_VOLUME 0x20
+#define ABS_PROFILE 0x21
#define ABS_MISC 0x28
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 9a61c28ed3ae..2557eb7b0561 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -78,13 +78,16 @@ struct input_id {
* Note that input core does not clamp reported values to the
* [minimum, maximum] limits, such task is left to userspace.
*
- * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z)
- * is reported in units per millimeter (units/mm), resolution
- * for rotational axes (ABS_RX, ABS_RY, ABS_RZ) is reported
- * in units per radian.
+ * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z,
+ * ABS_MT_POSITION_X, ABS_MT_POSITION_Y) is reported in units
+ * per millimeter (units/mm), resolution for rotational axes
+ * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian.
+ * The resolution for the size axes (ABS_MT_TOUCH_MAJOR,
+ * ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR)
+ * is reported in units per millimeter (units/mm).
* When INPUT_PROP_ACCELEROMETER is set the resolution changes.
* The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in
- * in units per g (units/g) and in units per degree per second
+ * units per g (units/g) and in units per degree per second
* (units/deg/s) for rotational axes (ABS_RX, ABS_RY, ABS_RZ).
*/
struct input_absinfo {
@@ -271,6 +274,7 @@ struct input_mask {
#define BUS_RMI 0x1D
#define BUS_CEC 0x1E
#define BUS_INTEL_ISHTP 0x1F
+#define BUS_AMD_SFH 0x20
/*
* MT_TOOL types
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index d65fde732518..7bd10201a02b 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -10,6 +10,19 @@
#include <linux/fs.h>
#include <linux/types.h>
+/*
+ * this file is shared with liburing and that has to autodetect
+ * if linux/time_types.h is available or not, it can
+ * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
+ * if linux/time_types.h is not available
+ */
+#ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
+#include <linux/time_types.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
/*
* IO submission data structure (Submission Queue Entry)
@@ -22,10 +35,18 @@ struct io_uring_sqe {
union {
__u64 off; /* offset into file */
__u64 addr2;
+ struct {
+ __u32 cmd_op;
+ __u32 __pad1;
+ };
};
union {
__u64 addr; /* pointer to buffer or iovecs */
__u64 splice_off_in;
+ struct {
+ __u32 level;
+ __u32 optname;
+ };
};
__u32 len; /* buffer size or number of iovecs */
union {
@@ -42,25 +63,58 @@ struct io_uring_sqe {
__u32 statx_flags;
__u32 fadvise_advice;
__u32 splice_flags;
+ __u32 rename_flags;
+ __u32 unlink_flags;
+ __u32 hardlink_flags;
+ __u32 xattr_flags;
+ __u32 msg_ring_flags;
+ __u32 uring_cmd_flags;
+ __u32 waitid_flags;
+ __u32 futex_flags;
+ __u32 install_fd_flags;
};
__u64 user_data; /* data to be passed back at completion time */
+ /* pack this to avoid bogus arm OABI complaints */
+ union {
+ /* index into fixed buffers, if used */
+ __u16 buf_index;
+ /* for grouped buffer selection */
+ __u16 buf_group;
+ } __attribute__((packed));
+ /* personality to use, if used */
+ __u16 personality;
+ union {
+ __s32 splice_fd_in;
+ __u32 file_index;
+ __u32 optlen;
+ struct {
+ __u16 addr_len;
+ __u16 __pad3[1];
+ };
+ };
union {
struct {
- /* pack this to avoid bogus arm OABI complaints */
- union {
- /* index into fixed buffers, if used */
- __u16 buf_index;
- /* for grouped buffer selection */
- __u16 buf_group;
- } __attribute__((packed));
- /* personality to use, if used */
- __u16 personality;
- __s32 splice_fd_in;
+ __u64 addr3;
+ __u64 __pad2[1];
};
- __u64 __pad2[3];
+ __u64 optval;
+ /*
+ * If the ring is initialized with IORING_SETUP_SQE128, then
+ * this field is used for 80 bytes of arbitrary command data
+ */
+ __u8 cmd[0];
};
};
+/*
+ * If sqe->file_index is set to this for opcodes that instantiate a new
+ * direct descriptor (like openat/openat2/accept), then io_uring will allocate
+ * an available direct descriptor instead of having the application pass one
+ * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
+ * if the space is full.
+ */
+#define IORING_FILE_INDEX_ALLOC (~0U)
+
enum {
IOSQE_FIXED_FILE_BIT,
IOSQE_IO_DRAIN_BIT,
@@ -68,6 +122,7 @@ enum {
IOSQE_IO_HARDLINK_BIT,
IOSQE_ASYNC_BIT,
IOSQE_BUFFER_SELECT_BIT,
+ IOSQE_CQE_SKIP_SUCCESS_BIT,
};
/*
@@ -85,6 +140,8 @@ enum {
#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
/* select buffer from sqe->buf_group */
#define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
+/* don't post CQE if request succeeded */
+#define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT)
/*
* io_uring_setup() flags
@@ -95,8 +152,54 @@ enum {
#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
#define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
+#define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */
+#define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */
+/*
+ * Cooperative task running. When requests complete, they often require
+ * forcing the submitter to transition to the kernel to complete. If this
+ * flag is set, work will be done when the task transitions anyway, rather
+ * than force an inter-processor interrupt reschedule. This avoids interrupting
+ * a task running in userspace, and saves an IPI.
+ */
+#define IORING_SETUP_COOP_TASKRUN (1U << 8)
+/*
+ * If COOP_TASKRUN is set, get notified if task work is available for
+ * running and a kernel transition would be needed to run it. This sets
+ * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
+ */
+#define IORING_SETUP_TASKRUN_FLAG (1U << 9)
+#define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
+#define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */
+/*
+ * Only one task is allowed to submit requests
+ */
+#define IORING_SETUP_SINGLE_ISSUER (1U << 12)
-enum {
+/*
+ * Defer running task work to get events.
+ * Rather than running bits of task work whenever the task transitions
+ * try to do it just before it is needed.
+ */
+#define IORING_SETUP_DEFER_TASKRUN (1U << 13)
+
+/*
+ * Application provides the memory for the rings
+ */
+#define IORING_SETUP_NO_MMAP (1U << 14)
+
+/*
+ * Register the ring fd in itself for use with
+ * IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather
+ * than an fd.
+ */
+#define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15)
+
+/*
+ * Removes indirection through the SQ index array.
+ */
+#define IORING_SETUP_NO_SQARRAY (1U << 16)
+
+enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
IORING_OP_WRITEV,
@@ -131,12 +234,43 @@ enum {
IORING_OP_PROVIDE_BUFFERS,
IORING_OP_REMOVE_BUFFERS,
IORING_OP_TEE,
+ IORING_OP_SHUTDOWN,
+ IORING_OP_RENAMEAT,
+ IORING_OP_UNLINKAT,
+ IORING_OP_MKDIRAT,
+ IORING_OP_SYMLINKAT,
+ IORING_OP_LINKAT,
+ IORING_OP_MSG_RING,
+ IORING_OP_FSETXATTR,
+ IORING_OP_SETXATTR,
+ IORING_OP_FGETXATTR,
+ IORING_OP_GETXATTR,
+ IORING_OP_SOCKET,
+ IORING_OP_URING_CMD,
+ IORING_OP_SEND_ZC,
+ IORING_OP_SENDMSG_ZC,
+ IORING_OP_READ_MULTISHOT,
+ IORING_OP_WAITID,
+ IORING_OP_FUTEX_WAIT,
+ IORING_OP_FUTEX_WAKE,
+ IORING_OP_FUTEX_WAITV,
+ IORING_OP_FIXED_FD_INSTALL,
+ IORING_OP_FTRUNCATE,
/* this goes last, obviously */
IORING_OP_LAST,
};
/*
+ * sqe->uring_cmd_flags top 8bits aren't available for userspace
+ * IORING_URING_CMD_FIXED use registered buffer; pass this flag
+ * along with setting sqe->buf_index.
+ */
+#define IORING_URING_CMD_FIXED (1U << 0)
+#define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED
+
+
+/*
* sqe->fsync_flags
*/
#define IORING_FSYNC_DATASYNC (1U << 0)
@@ -144,8 +278,15 @@ enum {
/*
* sqe->timeout_flags
*/
-#define IORING_TIMEOUT_ABS (1U << 0)
-
+#define IORING_TIMEOUT_ABS (1U << 0)
+#define IORING_TIMEOUT_UPDATE (1U << 1)
+#define IORING_TIMEOUT_BOOTTIME (1U << 2)
+#define IORING_TIMEOUT_REALTIME (1U << 3)
+#define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
+#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
+#define IORING_TIMEOUT_MULTISHOT (1U << 6)
+#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
+#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
/*
* sqe->splice_flags
* extends splice(2) flags
@@ -153,20 +294,136 @@ enum {
#define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */
/*
+ * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
+ * command flags for POLL_ADD are stored in sqe->len.
+ *
+ * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if
+ * the poll handler will continue to report
+ * CQEs on behalf of the same SQE.
+ *
+ * IORING_POLL_UPDATE Update existing poll request, matching
+ * sqe->addr as the old user_data field.
+ *
+ * IORING_POLL_LEVEL Level triggered poll.
+ */
+#define IORING_POLL_ADD_MULTI (1U << 0)
+#define IORING_POLL_UPDATE_EVENTS (1U << 1)
+#define IORING_POLL_UPDATE_USER_DATA (1U << 2)
+#define IORING_POLL_ADD_LEVEL (1U << 3)
+
+/*
+ * ASYNC_CANCEL flags.
+ *
+ * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key
+ * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the
+ * request 'user_data'
+ * IORING_ASYNC_CANCEL_ANY Match any request
+ * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
+ * IORING_ASYNC_CANCEL_USERDATA Match on user_data, default for no other key
+ * IORING_ASYNC_CANCEL_OP Match request based on opcode
+ */
+#define IORING_ASYNC_CANCEL_ALL (1U << 0)
+#define IORING_ASYNC_CANCEL_FD (1U << 1)
+#define IORING_ASYNC_CANCEL_ANY (1U << 2)
+#define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3)
+#define IORING_ASYNC_CANCEL_USERDATA (1U << 4)
+#define IORING_ASYNC_CANCEL_OP (1U << 5)
+
+/*
+ * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
+ *
+ * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send
+ * or receive and arm poll if that yields an
+ * -EAGAIN result, arm poll upfront and skip
+ * the initial transfer attempt.
+ *
+ * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if
+ * the handler will continue to report
+ * CQEs on behalf of the same SQE.
+ *
+ * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
+ * the buf_index field.
+ *
+ * IORING_SEND_ZC_REPORT_USAGE
+ * If set, SEND[MSG]_ZC should report
+ * the zerocopy usage in cqe.res
+ * for the IORING_CQE_F_NOTIF cqe.
+ * 0 is reported if zerocopy was actually possible.
+ * IORING_NOTIF_USAGE_ZC_COPIED if data was copied
+ * (at least partially).
+ */
+#define IORING_RECVSEND_POLL_FIRST (1U << 0)
+#define IORING_RECV_MULTISHOT (1U << 1)
+#define IORING_RECVSEND_FIXED_BUF (1U << 2)
+#define IORING_SEND_ZC_REPORT_USAGE (1U << 3)
+
+/*
+ * cqe.res for IORING_CQE_F_NOTIF if
+ * IORING_SEND_ZC_REPORT_USAGE was requested
+ *
+ * It should be treated as a flag, all other
+ * bits of cqe.res should be treated as reserved!
+ */
+#define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31)
+
+/*
+ * accept flags stored in sqe->ioprio
+ */
+#define IORING_ACCEPT_MULTISHOT (1U << 0)
+
+/*
+ * IORING_OP_MSG_RING command types, stored in sqe->addr
+ */
+enum {
+ IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
+ IORING_MSG_SEND_FD, /* send a registered fd to another ring */
+};
+
+/*
+ * IORING_OP_MSG_RING flags (sqe->msg_ring_flags)
+ *
+ * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not
+ * applicable for IORING_MSG_DATA, obviously.
+ */
+#define IORING_MSG_RING_CQE_SKIP (1U << 0)
+/* Pass through the flags from sqe->file_index to cqe->flags */
+#define IORING_MSG_RING_FLAGS_PASS (1U << 1)
+
+/*
+ * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags)
+ *
+ * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC
+ */
+#define IORING_FIXED_FD_NO_CLOEXEC (1U << 0)
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
__u64 user_data; /* sqe->data submission passed back */
__s32 res; /* result code for this event */
__u32 flags;
+
+ /*
+ * If the ring is initialized with IORING_SETUP_CQE32, then this field
+ * contains 16-bytes of padding, doubling the size of the CQE.
+ */
+ __u64 big_cqe[];
};
/*
* cqe->flags
*
* IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
+ * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
+ * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
+ * IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
+ * them from sends.
*/
#define IORING_CQE_F_BUFFER (1U << 0)
+#define IORING_CQE_F_MORE (1U << 1)
+#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
+#define IORING_CQE_F_NOTIF (1U << 3)
enum {
IORING_CQE_BUFFER_SHIFT = 16,
@@ -178,6 +435,9 @@ enum {
#define IORING_OFF_SQ_RING 0ULL
#define IORING_OFF_CQ_RING 0x8000000ULL
#define IORING_OFF_SQES 0x10000000ULL
+#define IORING_OFF_PBUF_RING 0x80000000ULL
+#define IORING_OFF_PBUF_SHIFT 16
+#define IORING_OFF_MMAP_MASK 0xf8000000ULL
/*
* Filled with the offset for mmap(2)
@@ -191,7 +451,7 @@ struct io_sqring_offsets {
__u32 dropped;
__u32 array;
__u32 resv1;
- __u64 resv2;
+ __u64 user_addr;
};
/*
@@ -199,6 +459,7 @@ struct io_sqring_offsets {
*/
#define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */
#define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */
+#define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */
struct io_cqring_offsets {
__u32 head;
@@ -209,7 +470,7 @@ struct io_cqring_offsets {
__u32 cqes;
__u32 flags;
__u32 resv1;
- __u64 resv2;
+ __u64 user_addr;
};
/*
@@ -222,8 +483,11 @@ struct io_cqring_offsets {
/*
* io_uring_enter(2) flags
*/
-#define IORING_ENTER_GETEVENTS (1U << 0)
-#define IORING_ENTER_SQ_WAKEUP (1U << 1)
+#define IORING_ENTER_GETEVENTS (1U << 0)
+#define IORING_ENTER_SQ_WAKEUP (1U << 1)
+#define IORING_ENTER_SQ_WAIT (1U << 2)
+#define IORING_ENTER_EXT_ARG (1U << 3)
+#define IORING_ENTER_REGISTERED_RING (1U << 4)
/*
* Passed in for io_uring_setup(2). Copied back with updated info on success
@@ -251,28 +515,118 @@ struct io_uring_params {
#define IORING_FEAT_CUR_PERSONALITY (1U << 4)
#define IORING_FEAT_FAST_POLL (1U << 5)
#define IORING_FEAT_POLL_32BITS (1U << 6)
+#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
+#define IORING_FEAT_EXT_ARG (1U << 8)
+#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
+#define IORING_FEAT_RSRC_TAGS (1U << 10)
+#define IORING_FEAT_CQE_SKIP (1U << 11)
+#define IORING_FEAT_LINKED_FILE (1U << 12)
+#define IORING_FEAT_REG_REG_RING (1U << 13)
/*
* io_uring_register(2) opcodes and arguments
*/
-#define IORING_REGISTER_BUFFERS 0
-#define IORING_UNREGISTER_BUFFERS 1
-#define IORING_REGISTER_FILES 2
-#define IORING_UNREGISTER_FILES 3
-#define IORING_REGISTER_EVENTFD 4
-#define IORING_UNREGISTER_EVENTFD 5
-#define IORING_REGISTER_FILES_UPDATE 6
-#define IORING_REGISTER_EVENTFD_ASYNC 7
-#define IORING_REGISTER_PROBE 8
-#define IORING_REGISTER_PERSONALITY 9
-#define IORING_UNREGISTER_PERSONALITY 10
+enum {
+ IORING_REGISTER_BUFFERS = 0,
+ IORING_UNREGISTER_BUFFERS = 1,
+ IORING_REGISTER_FILES = 2,
+ IORING_UNREGISTER_FILES = 3,
+ IORING_REGISTER_EVENTFD = 4,
+ IORING_UNREGISTER_EVENTFD = 5,
+ IORING_REGISTER_FILES_UPDATE = 6,
+ IORING_REGISTER_EVENTFD_ASYNC = 7,
+ IORING_REGISTER_PROBE = 8,
+ IORING_REGISTER_PERSONALITY = 9,
+ IORING_UNREGISTER_PERSONALITY = 10,
+ IORING_REGISTER_RESTRICTIONS = 11,
+ IORING_REGISTER_ENABLE_RINGS = 12,
+
+ /* extended with tagging */
+ IORING_REGISTER_FILES2 = 13,
+ IORING_REGISTER_FILES_UPDATE2 = 14,
+ IORING_REGISTER_BUFFERS2 = 15,
+ IORING_REGISTER_BUFFERS_UPDATE = 16,
+
+ /* set/clear io-wq thread affinities */
+ IORING_REGISTER_IOWQ_AFF = 17,
+ IORING_UNREGISTER_IOWQ_AFF = 18,
+
+ /* set/get max number of io-wq workers */
+ IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
+ /* register/unregister io_uring fd with the ring */
+ IORING_REGISTER_RING_FDS = 20,
+ IORING_UNREGISTER_RING_FDS = 21,
+
+ /* register ring based provide buffer group */
+ IORING_REGISTER_PBUF_RING = 22,
+ IORING_UNREGISTER_PBUF_RING = 23,
+
+ /* sync cancelation API */
+ IORING_REGISTER_SYNC_CANCEL = 24,
+
+ /* register a range of fixed file slots for automatic slot allocation */
+ IORING_REGISTER_FILE_ALLOC_RANGE = 25,
+
+ /* return status information for a buffer group */
+ IORING_REGISTER_PBUF_STATUS = 26,
+
+ /* set/clear busy poll settings */
+ IORING_REGISTER_NAPI = 27,
+ IORING_UNREGISTER_NAPI = 28,
+
+ /* this goes last */
+ IORING_REGISTER_LAST,
+
+ /* flag added to the opcode to use a registered ring fd */
+ IORING_REGISTER_USE_REGISTERED_RING = 1U << 31
+};
+
+/* io-wq worker categories */
+enum {
+ IO_WQ_BOUND,
+ IO_WQ_UNBOUND,
+};
+
+/* deprecated, see struct io_uring_rsrc_update */
struct io_uring_files_update {
__u32 offset;
__u32 resv;
__aligned_u64 /* __s32 * */ fds;
};
+/*
+ * Register a fully sparse file space, rather than pass in an array of all
+ * -1 file descriptors.
+ */
+#define IORING_RSRC_REGISTER_SPARSE (1U << 0)
+
+struct io_uring_rsrc_register {
+ __u32 nr;
+ __u32 flags;
+ __u64 resv2;
+ __aligned_u64 data;
+ __aligned_u64 tags;
+};
+
+struct io_uring_rsrc_update {
+ __u32 offset;
+ __u32 resv;
+ __aligned_u64 data;
+};
+
+struct io_uring_rsrc_update2 {
+ __u32 offset;
+ __u32 resv;
+ __aligned_u64 data;
+ __aligned_u64 tags;
+ __u32 nr;
+ __u32 resv2;
+};
+
+/* Skip updating fd indexes set to this value in the fd table */
+#define IORING_REGISTER_FILES_SKIP (-2)
+
#define IO_URING_OP_SUPPORTED (1U << 0)
struct io_uring_probe_op {
@@ -287,7 +641,149 @@ struct io_uring_probe {
__u8 ops_len; /* length of ops[] array below */
__u16 resv;
__u32 resv2[3];
- struct io_uring_probe_op ops[0];
+ struct io_uring_probe_op ops[];
+};
+
+struct io_uring_restriction {
+ __u16 opcode;
+ union {
+ __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
+ __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */
+ __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */
+ };
+ __u8 resv;
+ __u32 resv2[3];
+};
+
+struct io_uring_buf {
+ __u64 addr;
+ __u32 len;
+ __u16 bid;
+ __u16 resv;
+};
+
+struct io_uring_buf_ring {
+ union {
+ /*
+ * To avoid spilling into more pages than we need to, the
+ * ring tail is overlaid with the io_uring_buf->resv field.
+ */
+ struct {
+ __u64 resv1;
+ __u32 resv2;
+ __u16 resv3;
+ __u16 tail;
+ };
+ __DECLARE_FLEX_ARRAY(struct io_uring_buf, bufs);
+ };
+};
+
+/*
+ * Flags for IORING_REGISTER_PBUF_RING.
+ *
+ * IOU_PBUF_RING_MMAP: If set, kernel will allocate the memory for the ring.
+ * The application must not set a ring_addr in struct
+ * io_uring_buf_reg, instead it must subsequently call
+ * mmap(2) with the offset set as:
+ * IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
+ * to get a virtual mapping for the ring.
+ */
+enum {
+ IOU_PBUF_RING_MMAP = 1,
+};
+
+/* argument for IORING_(UN)REGISTER_PBUF_RING */
+struct io_uring_buf_reg {
+ __u64 ring_addr;
+ __u32 ring_entries;
+ __u16 bgid;
+ __u16 flags;
+ __u64 resv[3];
+};
+
+/* argument for IORING_REGISTER_PBUF_STATUS */
+struct io_uring_buf_status {
+ __u32 buf_group; /* input */
+ __u32 head; /* output */
+ __u32 resv[8];
+};
+
+/* argument for IORING_(UN)REGISTER_NAPI */
+struct io_uring_napi {
+ __u32 busy_poll_to;
+ __u8 prefer_busy_poll;
+ __u8 pad[3];
+ __u64 resv;
+};
+
+/*
+ * io_uring_restriction->opcode values
+ */
+enum {
+ /* Allow an io_uring_register(2) opcode */
+ IORING_RESTRICTION_REGISTER_OP = 0,
+
+ /* Allow an sqe opcode */
+ IORING_RESTRICTION_SQE_OP = 1,
+
+ /* Allow sqe flags */
+ IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2,
+
+ /* Require sqe flags (these flags must be set on each submission) */
+ IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3,
+
+ IORING_RESTRICTION_LAST
+};
+
+struct io_uring_getevents_arg {
+ __u64 sigmask;
+ __u32 sigmask_sz;
+ __u32 pad;
+ __u64 ts;
+};
+
+/*
+ * Argument for IORING_REGISTER_SYNC_CANCEL
+ */
+struct io_uring_sync_cancel_reg {
+ __u64 addr;
+ __s32 fd;
+ __u32 flags;
+ struct __kernel_timespec timeout;
+ __u8 opcode;
+ __u8 pad[7];
+ __u64 pad2[3];
+};
+
+/*
+ * Argument for IORING_REGISTER_FILE_ALLOC_RANGE
+ * The range is specified as [off, off + len)
+ */
+struct io_uring_file_index_range {
+ __u32 off;
+ __u32 len;
+ __u64 resv;
};
+struct io_uring_recvmsg_out {
+ __u32 namelen;
+ __u32 controllen;
+ __u32 payloadlen;
+ __u32 flags;
+};
+
+/*
+ * Argument for IORING_OP_URING_CMD when file is a socket
+ */
+enum {
+ SOCKET_URING_OP_SIOCINQ = 0,
+ SOCKET_URING_OP_SIOCOUTQ,
+ SOCKET_URING_OP_GETSOCKOPT,
+ SOCKET_URING_OP_SETSOCKOPT,
+};
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/include/uapi/linux/ioam6.h b/include/uapi/linux/ioam6.h
new file mode 100644
index 000000000000..8f72b24fefb3
--- /dev/null
+++ b/include/uapi/linux/ioam6.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 IOAM implementation
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+
+#ifndef _UAPI_LINUX_IOAM6_H
+#define _UAPI_LINUX_IOAM6_H
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+
+#define IOAM6_U16_UNAVAILABLE U16_MAX
+#define IOAM6_U32_UNAVAILABLE U32_MAX
+#define IOAM6_U64_UNAVAILABLE U64_MAX
+
+#define IOAM6_DEFAULT_ID (IOAM6_U32_UNAVAILABLE >> 8)
+#define IOAM6_DEFAULT_ID_WIDE (IOAM6_U64_UNAVAILABLE >> 8)
+#define IOAM6_DEFAULT_IF_ID IOAM6_U16_UNAVAILABLE
+#define IOAM6_DEFAULT_IF_ID_WIDE IOAM6_U32_UNAVAILABLE
+
+/*
+ * IPv6 IOAM Option Header
+ */
+struct ioam6_hdr {
+ __u8 opt_type;
+ __u8 opt_len;
+ __u8 :8; /* reserved */
+#define IOAM6_TYPE_PREALLOC 0
+ __u8 type;
+} __attribute__((packed));
+
+/*
+ * IOAM Trace Header
+ */
+struct ioam6_trace_hdr {
+ __be16 namespace_id;
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+
+ __u8 :1, /* unused */
+ :1, /* unused */
+ overflow:1,
+ nodelen:5;
+
+ __u8 remlen:7,
+ :1; /* unused */
+
+ union {
+ __be32 type_be32;
+
+ struct {
+ __u32 bit7:1,
+ bit6:1,
+ bit5:1,
+ bit4:1,
+ bit3:1,
+ bit2:1,
+ bit1:1,
+ bit0:1,
+ bit15:1, /* unused */
+ bit14:1, /* unused */
+ bit13:1, /* unused */
+ bit12:1, /* unused */
+ bit11:1,
+ bit10:1,
+ bit9:1,
+ bit8:1,
+ bit23:1, /* reserved */
+ bit22:1,
+ bit21:1, /* unused */
+ bit20:1, /* unused */
+ bit19:1, /* unused */
+ bit18:1, /* unused */
+ bit17:1, /* unused */
+ bit16:1, /* unused */
+ :8; /* reserved */
+ } type;
+ };
+
+#elif defined(__BIG_ENDIAN_BITFIELD)
+
+ __u8 nodelen:5,
+ overflow:1,
+ :1, /* unused */
+ :1; /* unused */
+
+ __u8 :1, /* unused */
+ remlen:7;
+
+ union {
+ __be32 type_be32;
+
+ struct {
+ __u32 bit0:1,
+ bit1:1,
+ bit2:1,
+ bit3:1,
+ bit4:1,
+ bit5:1,
+ bit6:1,
+ bit7:1,
+ bit8:1,
+ bit9:1,
+ bit10:1,
+ bit11:1,
+ bit12:1, /* unused */
+ bit13:1, /* unused */
+ bit14:1, /* unused */
+ bit15:1, /* unused */
+ bit16:1, /* unused */
+ bit17:1, /* unused */
+ bit18:1, /* unused */
+ bit19:1, /* unused */
+ bit20:1, /* unused */
+ bit21:1, /* unused */
+ bit22:1,
+ bit23:1, /* reserved */
+ :8; /* reserved */
+ } type;
+ };
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define IOAM6_TRACE_DATA_SIZE_MAX 244
+ __u8 data[];
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IOAM6_H */
diff --git a/include/uapi/linux/ioam6_genl.h b/include/uapi/linux/ioam6_genl.h
new file mode 100644
index 000000000000..1733fbc51fb5
--- /dev/null
+++ b/include/uapi/linux/ioam6_genl.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 IOAM Generic Netlink API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+
+#ifndef _UAPI_LINUX_IOAM6_GENL_H
+#define _UAPI_LINUX_IOAM6_GENL_H
+
+#define IOAM6_GENL_NAME "IOAM6"
+#define IOAM6_GENL_VERSION 0x1
+
+enum {
+ IOAM6_ATTR_UNSPEC,
+
+ IOAM6_ATTR_NS_ID, /* u16 */
+ IOAM6_ATTR_NS_DATA, /* u32 */
+ IOAM6_ATTR_NS_DATA_WIDE,/* u64 */
+
+#define IOAM6_MAX_SCHEMA_DATA_LEN (255 * 4)
+ IOAM6_ATTR_SC_ID, /* u32 */
+ IOAM6_ATTR_SC_DATA, /* Binary */
+ IOAM6_ATTR_SC_NONE, /* Flag */
+
+ IOAM6_ATTR_PAD,
+
+ __IOAM6_ATTR_MAX,
+};
+
+#define IOAM6_ATTR_MAX (__IOAM6_ATTR_MAX - 1)
+
+enum {
+ IOAM6_CMD_UNSPEC,
+
+ IOAM6_CMD_ADD_NAMESPACE,
+ IOAM6_CMD_DEL_NAMESPACE,
+ IOAM6_CMD_DUMP_NAMESPACES,
+
+ IOAM6_CMD_ADD_SCHEMA,
+ IOAM6_CMD_DEL_SCHEMA,
+ IOAM6_CMD_DUMP_SCHEMAS,
+
+ IOAM6_CMD_NS_SET_SCHEMA,
+
+ __IOAM6_CMD_MAX,
+};
+
+#define IOAM6_CMD_MAX (__IOAM6_CMD_MAX - 1)
+
+#define IOAM6_GENL_EV_GRP_NAME "ioam6_events"
+
+enum ioam6_event_type {
+ IOAM6_EVENT_UNSPEC,
+ IOAM6_EVENT_TRACE,
+};
+
+enum ioam6_event_attr {
+ IOAM6_EVENT_ATTR_UNSPEC,
+
+ IOAM6_EVENT_ATTR_TRACE_NAMESPACE, /* u16 */
+ IOAM6_EVENT_ATTR_TRACE_NODELEN, /* u8 */
+ IOAM6_EVENT_ATTR_TRACE_TYPE, /* u32 */
+ IOAM6_EVENT_ATTR_TRACE_DATA, /* Binary */
+
+ __IOAM6_EVENT_ATTR_MAX
+};
+
+#define IOAM6_EVENT_ATTR_MAX (__IOAM6_EVENT_ATTR_MAX - 1)
+
+#endif /* _UAPI_LINUX_IOAM6_GENL_H */
diff --git a/include/uapi/linux/ioam6_iptunnel.h b/include/uapi/linux/ioam6_iptunnel.h
new file mode 100644
index 000000000000..38f6a8fdfd34
--- /dev/null
+++ b/include/uapi/linux/ioam6_iptunnel.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 IOAM Lightweight Tunnel API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+
+#ifndef _UAPI_LINUX_IOAM6_IPTUNNEL_H
+#define _UAPI_LINUX_IOAM6_IPTUNNEL_H
+
+/* Encap modes:
+ * - inline: direct insertion
+ * - encap: ip6ip6 encapsulation
+ * - auto: inline for local packets, encap for in-transit packets
+ */
+enum {
+ __IOAM6_IPTUNNEL_MODE_MIN,
+
+ IOAM6_IPTUNNEL_MODE_INLINE,
+ IOAM6_IPTUNNEL_MODE_ENCAP,
+ IOAM6_IPTUNNEL_MODE_AUTO,
+
+ __IOAM6_IPTUNNEL_MODE_MAX,
+};
+
+#define IOAM6_IPTUNNEL_MODE_MIN (__IOAM6_IPTUNNEL_MODE_MIN + 1)
+#define IOAM6_IPTUNNEL_MODE_MAX (__IOAM6_IPTUNNEL_MODE_MAX - 1)
+
+enum {
+ IOAM6_IPTUNNEL_UNSPEC,
+
+ /* Encap mode */
+ IOAM6_IPTUNNEL_MODE, /* u8 */
+
+ /* Tunnel dst address.
+ * For encap,auto modes.
+ */
+ IOAM6_IPTUNNEL_DST, /* struct in6_addr */
+
+ /* IOAM Trace Header */
+ IOAM6_IPTUNNEL_TRACE, /* struct ioam6_trace_hdr */
+
+ /* Insertion frequency:
+ * "k over n" packets (0 < k <= n)
+ * [0.0001% ... 100%]
+ */
+#define IOAM6_IPTUNNEL_FREQ_MIN 1
+#define IOAM6_IPTUNNEL_FREQ_MAX 1000000
+ IOAM6_IPTUNNEL_FREQ_K, /* u32 */
+ IOAM6_IPTUNNEL_FREQ_N, /* u32 */
+
+ __IOAM6_IPTUNNEL_MAX,
+};
+
+#define IOAM6_IPTUNNEL_MAX (__IOAM6_IPTUNNEL_MAX - 1)
+
+#endif /* _UAPI_LINUX_IOAM6_IPTUNNEL_H */
diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
deleted file mode 100644
index c2b2caf9ed41..000000000000
--- a/include/uapi/linux/iommu.h
+++ /dev/null
@@ -1,333 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * IOMMU user API definitions
- */
-
-#ifndef _UAPI_IOMMU_H
-#define _UAPI_IOMMU_H
-
-#include <linux/types.h>
-
-#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
-#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
-#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
-#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
-
-/* Generic fault types, can be expanded IRQ remapping fault */
-enum iommu_fault_type {
- IOMMU_FAULT_DMA_UNRECOV = 1, /* unrecoverable fault */
- IOMMU_FAULT_PAGE_REQ, /* page request fault */
-};
-
-enum iommu_fault_reason {
- IOMMU_FAULT_REASON_UNKNOWN = 0,
-
- /* Could not access the PASID table (fetch caused external abort) */
- IOMMU_FAULT_REASON_PASID_FETCH,
-
- /* PASID entry is invalid or has configuration errors */
- IOMMU_FAULT_REASON_BAD_PASID_ENTRY,
-
- /*
- * PASID is out of range (e.g. exceeds the maximum PASID
- * supported by the IOMMU) or disabled.
- */
- IOMMU_FAULT_REASON_PASID_INVALID,
-
- /*
- * An external abort occurred fetching (or updating) a translation
- * table descriptor
- */
- IOMMU_FAULT_REASON_WALK_EABT,
-
- /*
- * Could not access the page table entry (Bad address),
- * actual translation fault
- */
- IOMMU_FAULT_REASON_PTE_FETCH,
-
- /* Protection flag check failed */
- IOMMU_FAULT_REASON_PERMISSION,
-
- /* access flag check failed */
- IOMMU_FAULT_REASON_ACCESS,
-
- /* Output address of a translation stage caused Address Size fault */
- IOMMU_FAULT_REASON_OOR_ADDRESS,
-};
-
-/**
- * struct iommu_fault_unrecoverable - Unrecoverable fault data
- * @reason: reason of the fault, from &enum iommu_fault_reason
- * @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values)
- * @pasid: Process Address Space ID
- * @perm: requested permission access using by the incoming transaction
- * (IOMMU_FAULT_PERM_* values)
- * @addr: offending page address
- * @fetch_addr: address that caused a fetch abort, if any
- */
-struct iommu_fault_unrecoverable {
- __u32 reason;
-#define IOMMU_FAULT_UNRECOV_PASID_VALID (1 << 0)
-#define IOMMU_FAULT_UNRECOV_ADDR_VALID (1 << 1)
-#define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID (1 << 2)
- __u32 flags;
- __u32 pasid;
- __u32 perm;
- __u64 addr;
- __u64 fetch_addr;
-};
-
-/**
- * struct iommu_fault_page_request - Page Request data
- * @flags: encodes whether the corresponding fields are valid and whether this
- * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
- * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
- * must have the same PASID value as the page request. When it is clear,
- * the page response should not have a PASID.
- * @pasid: Process Address Space ID
- * @grpid: Page Request Group Index
- * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
- * @addr: page address
- * @private_data: device-specific private information
- */
-struct iommu_fault_page_request {
-#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
-#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
-#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
-#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
- __u32 flags;
- __u32 pasid;
- __u32 grpid;
- __u32 perm;
- __u64 addr;
- __u64 private_data[2];
-};
-
-/**
- * struct iommu_fault - Generic fault data
- * @type: fault type from &enum iommu_fault_type
- * @padding: reserved for future use (should be zero)
- * @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV
- * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
- * @padding2: sets the fault size to allow for future extensions
- */
-struct iommu_fault {
- __u32 type;
- __u32 padding;
- union {
- struct iommu_fault_unrecoverable event;
- struct iommu_fault_page_request prm;
- __u8 padding2[56];
- };
-};
-
-/**
- * enum iommu_page_response_code - Return status of fault handlers
- * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
- * populated, retry the access. This is "Success" in PCI PRI.
- * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
- * this device if possible. This is "Response Failure" in PCI PRI.
- * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
- * access. This is "Invalid Request" in PCI PRI.
- */
-enum iommu_page_response_code {
- IOMMU_PAGE_RESP_SUCCESS = 0,
- IOMMU_PAGE_RESP_INVALID,
- IOMMU_PAGE_RESP_FAILURE,
-};
-
-/**
- * struct iommu_page_response - Generic page response information
- * @version: API version of this structure
- * @flags: encodes whether the corresponding fields are valid
- * (IOMMU_FAULT_PAGE_RESPONSE_* values)
- * @pasid: Process Address Space ID
- * @grpid: Page Request Group Index
- * @code: response code from &enum iommu_page_response_code
- */
-struct iommu_page_response {
-#define IOMMU_PAGE_RESP_VERSION_1 1
- __u32 version;
-#define IOMMU_PAGE_RESP_PASID_VALID (1 << 0)
- __u32 flags;
- __u32 pasid;
- __u32 grpid;
- __u32 code;
-};
-
-/* defines the granularity of the invalidation */
-enum iommu_inv_granularity {
- IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
- IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
- IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
- IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
-};
-
-/**
- * struct iommu_inv_addr_info - Address Selective Invalidation Structure
- *
- * @flags: indicates the granularity of the address-selective invalidation
- * - If the PASID bit is set, the @pasid field is populated and the invalidation
- * relates to cache entries tagged with this PASID and matching the address
- * range.
- * - If ARCHID bit is set, @archid is populated and the invalidation relates
- * to cache entries tagged with this architecture specific ID and matching
- * the address range.
- * - Both PASID and ARCHID can be set as they may tag different caches.
- * - If neither PASID or ARCHID is set, global addr invalidation applies.
- * - The LEAF flag indicates whether only the leaf PTE caching needs to be
- * invalidated and other paging structure caches can be preserved.
- * @pasid: process address space ID
- * @archid: architecture-specific ID
- * @addr: first stage/level input address
- * @granule_size: page/block size of the mapping in bytes
- * @nb_granules: number of contiguous granules to be invalidated
- */
-struct iommu_inv_addr_info {
-#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
-#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
-#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
- __u32 flags;
- __u32 archid;
- __u64 pasid;
- __u64 addr;
- __u64 granule_size;
- __u64 nb_granules;
-};
-
-/**
- * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
- *
- * @flags: indicates the granularity of the PASID-selective invalidation
- * - If the PASID bit is set, the @pasid field is populated and the invalidation
- * relates to cache entries tagged with this PASID and matching the address
- * range.
- * - If the ARCHID bit is set, the @archid is populated and the invalidation
- * relates to cache entries tagged with this architecture specific ID and
- * matching the address range.
- * - Both PASID and ARCHID can be set as they may tag different caches.
- * - At least one of PASID or ARCHID must be set.
- * @pasid: process address space ID
- * @archid: architecture-specific ID
- */
-struct iommu_inv_pasid_info {
-#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
-#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
- __u32 flags;
- __u32 archid;
- __u64 pasid;
-};
-
-/**
- * struct iommu_cache_invalidate_info - First level/stage invalidation
- * information
- * @version: API version of this structure
- * @cache: bitfield that allows to select which caches to invalidate
- * @granularity: defines the lowest granularity used for the invalidation:
- * domain > PASID > addr
- * @padding: reserved for future use (should be zero)
- * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
- * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
- *
- * Not all the combinations of cache/granularity are valid:
- *
- * +--------------+---------------+---------------+---------------+
- * | type / | DEV_IOTLB | IOTLB | PASID |
- * | granularity | | | cache |
- * +==============+===============+===============+===============+
- * | DOMAIN | N/A | Y | Y |
- * +--------------+---------------+---------------+---------------+
- * | PASID | Y | Y | Y |
- * +--------------+---------------+---------------+---------------+
- * | ADDR | Y | Y | N/A |
- * +--------------+---------------+---------------+---------------+
- *
- * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
- * @version and @cache.
- *
- * If multiple cache types are invalidated simultaneously, they all
- * must support the used granularity.
- */
-struct iommu_cache_invalidate_info {
-#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
- __u32 version;
-/* IOMMU paging structure cache */
-#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
-#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
-#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
-#define IOMMU_CACHE_INV_TYPE_NR (3)
- __u8 cache;
- __u8 granularity;
- __u8 padding[2];
- union {
- struct iommu_inv_pasid_info pasid_info;
- struct iommu_inv_addr_info addr_info;
- };
-};
-
-/**
- * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
- * SVA binding.
- *
- * @flags: VT-d PASID table entry attributes
- * @pat: Page attribute table data to compute effective memory type
- * @emt: Extended memory type
- *
- * Only guest vIOMMU selectable and effective options are passed down to
- * the host IOMMU.
- */
-struct iommu_gpasid_bind_data_vtd {
-#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
-#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
-#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
-#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
-#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
-#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
- __u64 flags;
- __u32 pat;
- __u32 emt;
-};
-
-#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \
- IOMMU_SVA_VTD_GPASID_EMTE | \
- IOMMU_SVA_VTD_GPASID_PCD | \
- IOMMU_SVA_VTD_GPASID_PWT)
-
-/**
- * struct iommu_gpasid_bind_data - Information about device and guest PASID binding
- * @version: Version of this data structure
- * @format: PASID table entry format
- * @flags: Additional information on guest bind request
- * @gpgd: Guest page directory base of the guest mm to bind
- * @hpasid: Process address space ID used for the guest mm in host IOMMU
- * @gpasid: Process address space ID used for the guest mm in guest IOMMU
- * @addr_width: Guest virtual address width
- * @padding: Reserved for future use (should be zero)
- * @vtd: Intel VT-d specific data
- *
- * Guest to host PASID mapping can be an identity or non-identity, where guest
- * has its own PASID space. For non-identify mapping, guest to host PASID lookup
- * is needed when VM programs guest PASID into an assigned device. VMM may
- * trap such PASID programming then request host IOMMU driver to convert guest
- * PASID to host PASID based on this bind data.
- */
-struct iommu_gpasid_bind_data {
-#define IOMMU_GPASID_BIND_VERSION_1 1
- __u32 version;
-#define IOMMU_PASID_FORMAT_INTEL_VTD 1
- __u32 format;
-#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
- __u64 flags;
- __u64 gpgd;
- __u64 hpasid;
- __u64 gpasid;
- __u32 addr_width;
- __u8 padding[12];
- /* Vendor specific data */
- union {
- struct iommu_gpasid_bind_data_vtd vtd;
- };
-};
-
-#endif /* _UAPI_IOMMU_H */
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
new file mode 100644
index 000000000000..1dfeaa2e649e
--- /dev/null
+++ b/include/uapi/linux/iommufd.h
@@ -0,0 +1,695 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
+ */
+#ifndef _UAPI_IOMMUFD_H
+#define _UAPI_IOMMUFD_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define IOMMUFD_TYPE (';')
+
+/**
+ * DOC: General ioctl format
+ *
+ * The ioctl interface follows a general format to allow for extensibility. Each
+ * ioctl is passed in a structure pointer as the argument providing the size of
+ * the structure in the first u32. The kernel checks that any structure space
+ * beyond what it understands is 0. This allows userspace to use the backward
+ * compatible portion while consistently using the newer, larger, structures.
+ *
+ * ioctls use a standard meaning for common errnos:
+ *
+ * - ENOTTY: The IOCTL number itself is not supported at all
+ * - E2BIG: The IOCTL number is supported, but the provided structure has
+ * non-zero in a part the kernel does not understand.
+ * - EOPNOTSUPP: The IOCTL number is supported, and the structure is
+ * understood, however a known field has a value the kernel does not
+ * understand or support.
+ * - EINVAL: Everything about the IOCTL was understood, but a field is not
+ * correct.
+ * - ENOENT: An ID or IOVA provided does not exist.
+ * - ENOMEM: Out of memory.
+ * - EOVERFLOW: Mathematics overflowed.
+ *
+ * As well as additional errnos, within specific ioctls.
+ */
+enum {
+ IOMMUFD_CMD_BASE = 0x80,
+ IOMMUFD_CMD_DESTROY = IOMMUFD_CMD_BASE,
+ IOMMUFD_CMD_IOAS_ALLOC,
+ IOMMUFD_CMD_IOAS_ALLOW_IOVAS,
+ IOMMUFD_CMD_IOAS_COPY,
+ IOMMUFD_CMD_IOAS_IOVA_RANGES,
+ IOMMUFD_CMD_IOAS_MAP,
+ IOMMUFD_CMD_IOAS_UNMAP,
+ IOMMUFD_CMD_OPTION,
+ IOMMUFD_CMD_VFIO_IOAS,
+ IOMMUFD_CMD_HWPT_ALLOC,
+ IOMMUFD_CMD_GET_HW_INFO,
+ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
+ IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
+ IOMMUFD_CMD_HWPT_INVALIDATE,
+};
+
+/**
+ * struct iommu_destroy - ioctl(IOMMU_DESTROY)
+ * @size: sizeof(struct iommu_destroy)
+ * @id: iommufd object ID to destroy. Can be any destroyable object type.
+ *
+ * Destroy any object held within iommufd.
+ */
+struct iommu_destroy {
+ __u32 size;
+ __u32 id;
+};
+#define IOMMU_DESTROY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_DESTROY)
+
+/**
+ * struct iommu_ioas_alloc - ioctl(IOMMU_IOAS_ALLOC)
+ * @size: sizeof(struct iommu_ioas_alloc)
+ * @flags: Must be 0
+ * @out_ioas_id: Output IOAS ID for the allocated object
+ *
+ * Allocate an IO Address Space (IOAS) which holds an IO Virtual Address (IOVA)
+ * to memory mapping.
+ */
+struct iommu_ioas_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 out_ioas_id;
+};
+#define IOMMU_IOAS_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOC)
+
+/**
+ * struct iommu_iova_range - ioctl(IOMMU_IOVA_RANGE)
+ * @start: First IOVA
+ * @last: Inclusive last IOVA
+ *
+ * An interval in IOVA space.
+ */
+struct iommu_iova_range {
+ __aligned_u64 start;
+ __aligned_u64 last;
+};
+
+/**
+ * struct iommu_ioas_iova_ranges - ioctl(IOMMU_IOAS_IOVA_RANGES)
+ * @size: sizeof(struct iommu_ioas_iova_ranges)
+ * @ioas_id: IOAS ID to read ranges from
+ * @num_iovas: Input/Output total number of ranges in the IOAS
+ * @__reserved: Must be 0
+ * @allowed_iovas: Pointer to the output array of struct iommu_iova_range
+ * @out_iova_alignment: Minimum alignment required for mapping IOVA
+ *
+ * Query an IOAS for ranges of allowed IOVAs. Mapping IOVA outside these ranges
+ * is not allowed. num_iovas will be set to the total number of iovas and
+ * the allowed_iovas[] will be filled in as space permits.
+ *
+ * The allowed ranges are dependent on the HW path the DMA operation takes, and
+ * can change during the lifetime of the IOAS. A fresh empty IOAS will have a
+ * full range, and each attached device will narrow the ranges based on that
+ * device's HW restrictions. Detaching a device can widen the ranges. Userspace
+ * should query ranges after every attach/detach to know what IOVAs are valid
+ * for mapping.
+ *
+ * On input num_iovas is the length of the allowed_iovas array. On output it is
+ * the total number of iovas filled in. The ioctl will return -EMSGSIZE and set
+ * num_iovas to the required value if num_iovas is too small. In this case the
+ * caller should allocate a larger output array and re-issue the ioctl.
+ *
+ * out_iova_alignment returns the minimum IOVA alignment that can be given
+ * to IOMMU_IOAS_MAP/COPY. IOVA's must satisfy::
+ *
+ * starting_iova % out_iova_alignment == 0
+ * (starting_iova + length) % out_iova_alignment == 0
+ *
+ * out_iova_alignment can be 1 indicating any IOVA is allowed. It cannot
+ * be higher than the system PAGE_SIZE.
+ */
+struct iommu_ioas_iova_ranges {
+ __u32 size;
+ __u32 ioas_id;
+ __u32 num_iovas;
+ __u32 __reserved;
+ __aligned_u64 allowed_iovas;
+ __aligned_u64 out_iova_alignment;
+};
+#define IOMMU_IOAS_IOVA_RANGES _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_IOVA_RANGES)
+
+/**
+ * struct iommu_ioas_allow_iovas - ioctl(IOMMU_IOAS_ALLOW_IOVAS)
+ * @size: sizeof(struct iommu_ioas_allow_iovas)
+ * @ioas_id: IOAS ID to allow IOVAs from
+ * @num_iovas: Input/Output total number of ranges in the IOAS
+ * @__reserved: Must be 0
+ * @allowed_iovas: Pointer to array of struct iommu_iova_range
+ *
+ * Ensure a range of IOVAs are always available for allocation. If this call
+ * succeeds then IOMMU_IOAS_IOVA_RANGES will never return a list of IOVA ranges
+ * that are narrower than the ranges provided here. This call will fail if
+ * IOMMU_IOAS_IOVA_RANGES is currently narrower than the given ranges.
+ *
+ * When an IOAS is first created the IOVA_RANGES will be maximally sized, and as
+ * devices are attached the IOVA will narrow based on the device restrictions.
+ * When an allowed range is specified any narrowing will be refused, ie device
+ * attachment can fail if the device requires limiting within the allowed range.
+ *
+ * Automatic IOVA allocation is also impacted by this call. MAP will only
+ * allocate within the allowed IOVAs if they are present.
+ *
+ * This call replaces the entire allowed list with the given list.
+ */
+struct iommu_ioas_allow_iovas {
+ __u32 size;
+ __u32 ioas_id;
+ __u32 num_iovas;
+ __u32 __reserved;
+ __aligned_u64 allowed_iovas;
+};
+#define IOMMU_IOAS_ALLOW_IOVAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOW_IOVAS)
+
+/**
+ * enum iommufd_ioas_map_flags - Flags for map and copy
+ * @IOMMU_IOAS_MAP_FIXED_IOVA: If clear the kernel will compute an appropriate
+ * IOVA to place the mapping at
+ * @IOMMU_IOAS_MAP_WRITEABLE: DMA is allowed to write to this mapping
+ * @IOMMU_IOAS_MAP_READABLE: DMA is allowed to read from this mapping
+ */
+enum iommufd_ioas_map_flags {
+ IOMMU_IOAS_MAP_FIXED_IOVA = 1 << 0,
+ IOMMU_IOAS_MAP_WRITEABLE = 1 << 1,
+ IOMMU_IOAS_MAP_READABLE = 1 << 2,
+};
+
+/**
+ * struct iommu_ioas_map - ioctl(IOMMU_IOAS_MAP)
+ * @size: sizeof(struct iommu_ioas_map)
+ * @flags: Combination of enum iommufd_ioas_map_flags
+ * @ioas_id: IOAS ID to change the mapping of
+ * @__reserved: Must be 0
+ * @user_va: Userspace pointer to start mapping from
+ * @length: Number of bytes to map
+ * @iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is set
+ * then this must be provided as input.
+ *
+ * Set an IOVA mapping from a user pointer. If FIXED_IOVA is specified then the
+ * mapping will be established at iova, otherwise a suitable location based on
+ * the reserved and allowed lists will be automatically selected and returned in
+ * iova.
+ *
+ * If IOMMU_IOAS_MAP_FIXED_IOVA is specified then the iova range must currently
+ * be unused, existing IOVA cannot be replaced.
+ */
+struct iommu_ioas_map {
+ __u32 size;
+ __u32 flags;
+ __u32 ioas_id;
+ __u32 __reserved;
+ __aligned_u64 user_va;
+ __aligned_u64 length;
+ __aligned_u64 iova;
+};
+#define IOMMU_IOAS_MAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP)
+
+/**
+ * struct iommu_ioas_copy - ioctl(IOMMU_IOAS_COPY)
+ * @size: sizeof(struct iommu_ioas_copy)
+ * @flags: Combination of enum iommufd_ioas_map_flags
+ * @dst_ioas_id: IOAS ID to change the mapping of
+ * @src_ioas_id: IOAS ID to copy from
+ * @length: Number of bytes to copy and map
+ * @dst_iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is
+ * set then this must be provided as input.
+ * @src_iova: IOVA to start the copy
+ *
+ * Copy an already existing mapping from src_ioas_id and establish it in
+ * dst_ioas_id. The src iova/length must exactly match a range used with
+ * IOMMU_IOAS_MAP.
+ *
+ * This may be used to efficiently clone a subset of an IOAS to another, or as a
+ * kind of 'cache' to speed up mapping. Copy has an efficiency advantage over
+ * establishing equivalent new mappings, as internal resources are shared, and
+ * the kernel will pin the user memory only once.
+ */
+struct iommu_ioas_copy {
+ __u32 size;
+ __u32 flags;
+ __u32 dst_ioas_id;
+ __u32 src_ioas_id;
+ __aligned_u64 length;
+ __aligned_u64 dst_iova;
+ __aligned_u64 src_iova;
+};
+#define IOMMU_IOAS_COPY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_COPY)
+
+/**
+ * struct iommu_ioas_unmap - ioctl(IOMMU_IOAS_UNMAP)
+ * @size: sizeof(struct iommu_ioas_unmap)
+ * @ioas_id: IOAS ID to change the mapping of
+ * @iova: IOVA to start the unmapping at
+ * @length: Number of bytes to unmap, and return back the bytes unmapped
+ *
+ * Unmap an IOVA range. The iova/length must be a superset of a previously
+ * mapped range used with IOMMU_IOAS_MAP or IOMMU_IOAS_COPY. Splitting or
+ * truncating ranges is not allowed. The values 0 to U64_MAX will unmap
+ * everything.
+ */
+struct iommu_ioas_unmap {
+ __u32 size;
+ __u32 ioas_id;
+ __aligned_u64 iova;
+ __aligned_u64 length;
+};
+#define IOMMU_IOAS_UNMAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_UNMAP)
+
+/**
+ * enum iommufd_option - ioctl(IOMMU_OPTION_RLIMIT_MODE) and
+ * ioctl(IOMMU_OPTION_HUGE_PAGES)
+ * @IOMMU_OPTION_RLIMIT_MODE:
+ * Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege
+ * to invoke this. Value 0 (default) is user based accouting, 1 uses process
+ * based accounting. Global option, object_id must be 0
+ * @IOMMU_OPTION_HUGE_PAGES:
+ * Value 1 (default) allows contiguous pages to be combined when generating
+ * iommu mappings. Value 0 disables combining, everything is mapped to
+ * PAGE_SIZE. This can be useful for benchmarking. This is a per-IOAS
+ * option, the object_id must be the IOAS ID.
+ */
+enum iommufd_option {
+ IOMMU_OPTION_RLIMIT_MODE = 0,
+ IOMMU_OPTION_HUGE_PAGES = 1,
+};
+
+/**
+ * enum iommufd_option_ops - ioctl(IOMMU_OPTION_OP_SET) and
+ * ioctl(IOMMU_OPTION_OP_GET)
+ * @IOMMU_OPTION_OP_SET: Set the option's value
+ * @IOMMU_OPTION_OP_GET: Get the option's value
+ */
+enum iommufd_option_ops {
+ IOMMU_OPTION_OP_SET = 0,
+ IOMMU_OPTION_OP_GET = 1,
+};
+
+/**
+ * struct iommu_option - iommu option multiplexer
+ * @size: sizeof(struct iommu_option)
+ * @option_id: One of enum iommufd_option
+ * @op: One of enum iommufd_option_ops
+ * @__reserved: Must be 0
+ * @object_id: ID of the object if required
+ * @val64: Option value to set or value returned on get
+ *
+ * Change a simple option value. This multiplexor allows controlling options
+ * on objects. IOMMU_OPTION_OP_SET will load an option and IOMMU_OPTION_OP_GET
+ * will return the current value.
+ */
+struct iommu_option {
+ __u32 size;
+ __u32 option_id;
+ __u16 op;
+ __u16 __reserved;
+ __u32 object_id;
+ __aligned_u64 val64;
+};
+#define IOMMU_OPTION _IO(IOMMUFD_TYPE, IOMMUFD_CMD_OPTION)
+
+/**
+ * enum iommufd_vfio_ioas_op - IOMMU_VFIO_IOAS_* ioctls
+ * @IOMMU_VFIO_IOAS_GET: Get the current compatibility IOAS
+ * @IOMMU_VFIO_IOAS_SET: Change the current compatibility IOAS
+ * @IOMMU_VFIO_IOAS_CLEAR: Disable VFIO compatibility
+ */
+enum iommufd_vfio_ioas_op {
+ IOMMU_VFIO_IOAS_GET = 0,
+ IOMMU_VFIO_IOAS_SET = 1,
+ IOMMU_VFIO_IOAS_CLEAR = 2,
+};
+
+/**
+ * struct iommu_vfio_ioas - ioctl(IOMMU_VFIO_IOAS)
+ * @size: sizeof(struct iommu_vfio_ioas)
+ * @ioas_id: For IOMMU_VFIO_IOAS_SET the input IOAS ID to set
+ * For IOMMU_VFIO_IOAS_GET will output the IOAS ID
+ * @op: One of enum iommufd_vfio_ioas_op
+ * @__reserved: Must be 0
+ *
+ * The VFIO compatibility support uses a single ioas because VFIO APIs do not
+ * support the ID field. Set or Get the IOAS that VFIO compatibility will use.
+ * When VFIO_GROUP_SET_CONTAINER is used on an iommufd it will get the
+ * compatibility ioas, either by taking what is already set, or auto creating
+ * one. From then on VFIO will continue to use that ioas and is not effected by
+ * this ioctl. SET or CLEAR does not destroy any auto-created IOAS.
+ */
+struct iommu_vfio_ioas {
+ __u32 size;
+ __u32 ioas_id;
+ __u16 op;
+ __u16 __reserved;
+};
+#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
+
+/**
+ * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation
+ * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as
+ * the parent HWPT in a nesting configuration.
+ * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is
+ * enforced on device attachment
+ */
+enum iommufd_hwpt_alloc_flags {
+ IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1,
+};
+
+/**
+ * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table
+ * entry attributes
+ * @IOMMU_VTD_S1_SRE: Supervisor request
+ * @IOMMU_VTD_S1_EAFE: Extended access enable
+ * @IOMMU_VTD_S1_WPE: Write protect enable
+ */
+enum iommu_hwpt_vtd_s1_flags {
+ IOMMU_VTD_S1_SRE = 1 << 0,
+ IOMMU_VTD_S1_EAFE = 1 << 1,
+ IOMMU_VTD_S1_WPE = 1 << 2,
+};
+
+/**
+ * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table
+ * info (IOMMU_HWPT_DATA_VTD_S1)
+ * @flags: Combination of enum iommu_hwpt_vtd_s1_flags
+ * @pgtbl_addr: The base address of the stage-1 page table.
+ * @addr_width: The address width of the stage-1 page table
+ * @__reserved: Must be 0
+ */
+struct iommu_hwpt_vtd_s1 {
+ __aligned_u64 flags;
+ __aligned_u64 pgtbl_addr;
+ __u32 addr_width;
+ __u32 __reserved;
+};
+
+/**
+ * enum iommu_hwpt_data_type - IOMMU HWPT Data Type
+ * @IOMMU_HWPT_DATA_NONE: no data
+ * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
+ */
+enum iommu_hwpt_data_type {
+ IOMMU_HWPT_DATA_NONE,
+ IOMMU_HWPT_DATA_VTD_S1,
+};
+
+/**
+ * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
+ * @size: sizeof(struct iommu_hwpt_alloc)
+ * @flags: Combination of enum iommufd_hwpt_alloc_flags
+ * @dev_id: The device to allocate this HWPT for
+ * @pt_id: The IOAS or HWPT to connect this HWPT to
+ * @out_hwpt_id: The ID of the new HWPT
+ * @__reserved: Must be 0
+ * @data_type: One of enum iommu_hwpt_data_type
+ * @data_len: Length of the type specific data
+ * @data_uptr: User pointer to the type specific data
+ *
+ * Explicitly allocate a hardware page table object. This is the same object
+ * type that is returned by iommufd_device_attach() and represents the
+ * underlying iommu driver's iommu_domain kernel object.
+ *
+ * A kernel-managed HWPT will be created with the mappings from the given
+ * IOAS via the @pt_id. The @data_type for this allocation must be set to
+ * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
+ * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
+ *
+ * A user-managed nested HWPT will be created from a given parent HWPT via
+ * @pt_id, in which the parent HWPT must be allocated previously via the
+ * same ioctl from a given IOAS (@pt_id). In this case, the @data_type
+ * must be set to a pre-defined type corresponding to an I/O page table
+ * type supported by the underlying IOMMU hardware.
+ *
+ * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
+ * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
+ * must be given.
+ */
+struct iommu_hwpt_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 dev_id;
+ __u32 pt_id;
+ __u32 out_hwpt_id;
+ __u32 __reserved;
+ __u32 data_type;
+ __u32 data_len;
+ __aligned_u64 data_uptr;
+};
+#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
+
+/**
+ * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info
+ * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings
+ * on a nested_parent domain.
+ * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html
+ */
+enum iommu_hw_info_vtd_flags {
+ IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0,
+};
+
+/**
+ * struct iommu_hw_info_vtd - Intel VT-d hardware information
+ *
+ * @flags: Combination of enum iommu_hw_info_vtd_flags
+ * @__reserved: Must be 0
+ *
+ * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
+ * section 11.4.2 Capability Register.
+ * @ecap_reg: Value of Intel VT-d capability register defined in VT-d spec
+ * section 11.4.3 Extended Capability Register.
+ *
+ * User needs to understand the Intel VT-d specification to decode the
+ * register value.
+ */
+struct iommu_hw_info_vtd {
+ __u32 flags;
+ __u32 __reserved;
+ __aligned_u64 cap_reg;
+ __aligned_u64 ecap_reg;
+};
+
+/**
+ * enum iommu_hw_info_type - IOMMU Hardware Info Types
+ * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
+ * info
+ * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
+ */
+enum iommu_hw_info_type {
+ IOMMU_HW_INFO_TYPE_NONE,
+ IOMMU_HW_INFO_TYPE_INTEL_VTD,
+};
+
+/**
+ * enum iommufd_hw_capabilities
+ * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking
+ * If available, it means the following APIs
+ * are supported:
+ *
+ * IOMMU_HWPT_GET_DIRTY_BITMAP
+ * IOMMU_HWPT_SET_DIRTY_TRACKING
+ *
+ */
+enum iommufd_hw_capabilities {
+ IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
+};
+
+/**
+ * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
+ * @size: sizeof(struct iommu_hw_info)
+ * @flags: Must be 0
+ * @dev_id: The device bound to the iommufd
+ * @data_len: Input the length of a user buffer in bytes. Output the length of
+ * data that kernel supports
+ * @data_uptr: User pointer to a user-space buffer used by the kernel to fill
+ * the iommu type specific hardware information data
+ * @out_data_type: Output the iommu hardware info type as defined in the enum
+ * iommu_hw_info_type.
+ * @out_capabilities: Output the generic iommu capability info type as defined
+ * in the enum iommu_hw_capabilities.
+ * @__reserved: Must be 0
+ *
+ * Query an iommu type specific hardware information data from an iommu behind
+ * a given device that has been bound to iommufd. This hardware info data will
+ * be used to sync capabilities between the virtual iommu and the physical
+ * iommu, e.g. a nested translation setup needs to check the hardware info, so
+ * a guest stage-1 page table can be compatible with the physical iommu.
+ *
+ * To capture an iommu type specific hardware information data, @data_uptr and
+ * its length @data_len must be provided. Trailing bytes will be zeroed if the
+ * user buffer is larger than the data that kernel has. Otherwise, kernel only
+ * fills the buffer using the given length in @data_len. If the ioctl succeeds,
+ * @data_len will be updated to the length that kernel actually supports,
+ * @out_data_type will be filled to decode the data filled in the buffer
+ * pointed by @data_uptr. Input @data_len == zero is allowed.
+ */
+struct iommu_hw_info {
+ __u32 size;
+ __u32 flags;
+ __u32 dev_id;
+ __u32 data_len;
+ __aligned_u64 data_uptr;
+ __u32 out_data_type;
+ __u32 __reserved;
+ __aligned_u64 out_capabilities;
+};
+#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
+
+/*
+ * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty
+ * tracking
+ * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking
+ */
+enum iommufd_hwpt_set_dirty_tracking_flags {
+ IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1,
+};
+
+/**
+ * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING)
+ * @size: sizeof(struct iommu_hwpt_set_dirty_tracking)
+ * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags
+ * @hwpt_id: HW pagetable ID that represents the IOMMU domain
+ * @__reserved: Must be 0
+ *
+ * Toggle dirty tracking on an HW pagetable.
+ */
+struct iommu_hwpt_set_dirty_tracking {
+ __u32 size;
+ __u32 flags;
+ __u32 hwpt_id;
+ __u32 __reserved;
+};
+#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \
+ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING)
+
+/**
+ * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits
+ * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing
+ * any dirty bits metadata. This flag
+ * can be passed in the expectation
+ * where the next operation is an unmap
+ * of the same IOVA range.
+ *
+ */
+enum iommufd_hwpt_get_dirty_bitmap_flags {
+ IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1,
+};
+
+/**
+ * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP)
+ * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap)
+ * @hwpt_id: HW pagetable ID that represents the IOMMU domain
+ * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags
+ * @__reserved: Must be 0
+ * @iova: base IOVA of the bitmap first bit
+ * @length: IOVA range size
+ * @page_size: page size granularity of each bit in the bitmap
+ * @data: bitmap where to set the dirty bits. The bitmap bits each
+ * represent a page_size which you deviate from an arbitrary iova.
+ *
+ * Checking a given IOVA is dirty:
+ *
+ * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64))
+ *
+ * Walk the IOMMU pagetables for a given IOVA range to return a bitmap
+ * with the dirty IOVAs. In doing so it will also by default clear any
+ * dirty bit metadata set in the IOPTE.
+ */
+struct iommu_hwpt_get_dirty_bitmap {
+ __u32 size;
+ __u32 hwpt_id;
+ __u32 flags;
+ __u32 __reserved;
+ __aligned_u64 iova;
+ __aligned_u64 length;
+ __aligned_u64 page_size;
+ __aligned_u64 data;
+};
+#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
+ IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
+
+/**
+ * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
+ * Data Type
+ * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
+ */
+enum iommu_hwpt_invalidate_data_type {
+ IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
+};
+
+/**
+ * enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d
+ * stage-1 cache invalidation
+ * @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies
+ * to all-levels page structure cache or just
+ * the leaf PTE cache.
+ */
+enum iommu_hwpt_vtd_s1_invalidate_flags {
+ IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0,
+};
+
+/**
+ * struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation
+ * (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1)
+ * @addr: The start address of the range to be invalidated. It needs to
+ * be 4KB aligned.
+ * @npages: Number of contiguous 4K pages to be invalidated.
+ * @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags
+ * @__reserved: Must be 0
+ *
+ * The Intel VT-d specific invalidation data for user-managed stage-1 cache
+ * invalidation in nested translation. Userspace uses this structure to
+ * tell the impacted cache scope after modifying the stage-1 page table.
+ *
+ * Invalidating all the caches related to the page table by setting @addr
+ * to be 0 and @npages to be U64_MAX.
+ *
+ * The device TLB will be invalidated automatically if ATS is enabled.
+ */
+struct iommu_hwpt_vtd_s1_invalidate {
+ __aligned_u64 addr;
+ __aligned_u64 npages;
+ __u32 flags;
+ __u32 __reserved;
+};
+
+/**
+ * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
+ * @size: sizeof(struct iommu_hwpt_invalidate)
+ * @hwpt_id: ID of a nested HWPT for cache invalidation
+ * @data_uptr: User pointer to an array of driver-specific cache invalidation
+ * data.
+ * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data
+ * type of all the entries in the invalidation request array. It
+ * should be a type supported by the hwpt pointed by @hwpt_id.
+ * @entry_len: Length (in bytes) of a request entry in the request array
+ * @entry_num: Input the number of cache invalidation requests in the array.
+ * Output the number of requests successfully handled by kernel.
+ * @__reserved: Must be 0.
+ *
+ * Invalidate the iommu cache for user-managed page table. Modifications on a
+ * user-managed page table should be followed by this operation to sync cache.
+ * Each ioctl can support one or more cache invalidation requests in the array
+ * that has a total size of @entry_len * @entry_num.
+ *
+ * An empty invalidation request array by setting @entry_num==0 is allowed, and
+ * @entry_len and @data_uptr would be ignored in this case. This can be used to
+ * check if the given @data_type is supported or not by kernel.
+ */
+struct iommu_hwpt_invalidate {
+ __u32 size;
+ __u32 hwpt_id;
+ __aligned_u64 data_uptr;
+ __u32 data_type;
+ __u32 entry_len;
+ __u32 entry_num;
+ __u32 __reserved;
+};
+#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
+#endif
diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h
new file mode 100644
index 000000000000..bee2bdb0eedb
--- /dev/null
+++ b/include/uapi/linux/ioprio.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_IOPRIO_H
+#define _UAPI_LINUX_IOPRIO_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+/*
+ * Gives us 8 prio classes with 13-bits of data for each class
+ */
+#define IOPRIO_CLASS_SHIFT 13
+#define IOPRIO_NR_CLASSES 8
+#define IOPRIO_CLASS_MASK (IOPRIO_NR_CLASSES - 1)
+#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
+
+#define IOPRIO_PRIO_CLASS(ioprio) \
+ (((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK)
+#define IOPRIO_PRIO_DATA(ioprio) ((ioprio) & IOPRIO_PRIO_MASK)
+
+/*
+ * These are the io priority classes as implemented by the BFQ and mq-deadline
+ * schedulers. RT is the realtime class, it always gets premium service. For
+ * ATA disks supporting NCQ IO priority, RT class IOs will be processed using
+ * high priority NCQ commands. BE is the best-effort scheduling class, the
+ * default for any process. IDLE is the idle scheduling class, it is only
+ * served when no one else is using the disk.
+ */
+enum {
+ IOPRIO_CLASS_NONE = 0,
+ IOPRIO_CLASS_RT = 1,
+ IOPRIO_CLASS_BE = 2,
+ IOPRIO_CLASS_IDLE = 3,
+
+ /* Special class to indicate an invalid ioprio value */
+ IOPRIO_CLASS_INVALID = 7,
+};
+
+/*
+ * The RT and BE priority classes both support up to 8 priority levels that
+ * can be specified using the lower 3-bits of the priority data.
+ */
+#define IOPRIO_LEVEL_NR_BITS 3
+#define IOPRIO_NR_LEVELS (1 << IOPRIO_LEVEL_NR_BITS)
+#define IOPRIO_LEVEL_MASK (IOPRIO_NR_LEVELS - 1)
+#define IOPRIO_PRIO_LEVEL(ioprio) ((ioprio) & IOPRIO_LEVEL_MASK)
+
+#define IOPRIO_BE_NR IOPRIO_NR_LEVELS
+
+/*
+ * Possible values for the "which" argument of the ioprio_get() and
+ * ioprio_set() system calls (see "man ioprio_set").
+ */
+enum {
+ IOPRIO_WHO_PROCESS = 1,
+ IOPRIO_WHO_PGRP,
+ IOPRIO_WHO_USER,
+};
+
+/*
+ * Fallback BE class priority level.
+ */
+#define IOPRIO_NORM 4
+#define IOPRIO_BE_NORM IOPRIO_NORM
+
+/*
+ * The 10 bits between the priority class and the priority level are used to
+ * optionally define I/O hints for any combination of I/O priority class and
+ * level. Depending on the kernel configuration, I/O scheduler being used and
+ * the target I/O device being used, hints can influence how I/Os are processed
+ * without affecting the I/O scheduling ordering defined by the I/O priority
+ * class and level.
+ */
+#define IOPRIO_HINT_SHIFT IOPRIO_LEVEL_NR_BITS
+#define IOPRIO_HINT_NR_BITS 10
+#define IOPRIO_NR_HINTS (1 << IOPRIO_HINT_NR_BITS)
+#define IOPRIO_HINT_MASK (IOPRIO_NR_HINTS - 1)
+#define IOPRIO_PRIO_HINT(ioprio) \
+ (((ioprio) >> IOPRIO_HINT_SHIFT) & IOPRIO_HINT_MASK)
+
+/*
+ * I/O hints.
+ */
+enum {
+ /* No hint */
+ IOPRIO_HINT_NONE = 0,
+
+ /*
+ * Device command duration limits: indicate to the device a desired
+ * duration limit for the commands that will be used to process an I/O.
+ * These will currently only be effective for SCSI and ATA devices that
+ * support the command duration limits feature. If this feature is
+ * enabled, then the commands issued to the device to process an I/O with
+ * one of these hints set will have the duration limit index (dld field)
+ * set to the value of the hint.
+ */
+ IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7,
+};
+
+#define IOPRIO_BAD_VALUE(val, max) ((val) < 0 || (val) >= (max))
+
+/*
+ * Return an I/O priority value based on a class, a level and a hint.
+ */
+static __always_inline __u16 ioprio_value(int prioclass, int priolevel,
+ int priohint)
+{
+ if (IOPRIO_BAD_VALUE(prioclass, IOPRIO_NR_CLASSES) ||
+ IOPRIO_BAD_VALUE(priolevel, IOPRIO_NR_LEVELS) ||
+ IOPRIO_BAD_VALUE(priohint, IOPRIO_NR_HINTS))
+ return IOPRIO_CLASS_INVALID << IOPRIO_CLASS_SHIFT;
+
+ return (prioclass << IOPRIO_CLASS_SHIFT) |
+ (priohint << IOPRIO_HINT_SHIFT) | priolevel;
+}
+
+#define IOPRIO_PRIO_VALUE(prioclass, priolevel) \
+ ioprio_value(prioclass, priolevel, IOPRIO_HINT_NONE)
+#define IOPRIO_PRIO_VALUE_HINT(prioclass, priolevel, priohint) \
+ ioprio_value(prioclass, priolevel, priohint)
+
+#endif /* _UAPI_LINUX_IOPRIO_H */
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index e42d13b55cf3..283dec7e3645 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -18,6 +18,7 @@
#ifndef _UAPI_LINUX_IP_H
#define _UAPI_LINUX_IP_H
#include <linux/types.h>
+#include <linux/stddef.h>
#include <asm/byteorder.h>
#define IPTOS_TOS_MASK 0x1E
@@ -100,8 +101,10 @@ struct iphdr {
__u8 ttl;
__u8 protocol;
__sum16 check;
- __be32 saddr;
- __be32 daddr;
+ __struct_group(/* no tag */, addrs, /* no attrs */,
+ __be32 saddr;
+ __be32 daddr;
+ );
/*The options start here. */
};
@@ -112,13 +115,13 @@ struct ip_auth_hdr {
__be16 reserved;
__be32 spi;
__be32 seq_no; /* Sequence number */
- __u8 auth_data[0]; /* Variable len but >=4. Mind the 64 bit alignment! */
+ __u8 auth_data[]; /* Variable len but >=4. Mind the 64 bit alignment! */
};
struct ip_esp_hdr {
__be32 spi;
__be32 seq_no; /* Sequence number */
- __u8 enc_data[0]; /* Variable len but >=8. Mind the 64 bit alignment! */
+ __u8 enc_data[]; /* Variable len but >=8. Mind the 64 bit alignment! */
};
struct ip_comp_hdr {
@@ -169,6 +172,7 @@ enum
IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
IPV4_DEVCONF_BC_FORWARDING,
+ IPV4_DEVCONF_ARP_EVICT_NOCARRIER,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 4102ddcb4e14..1ed234e7f251 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -254,7 +254,7 @@ struct ip_vs_get_dests {
unsigned int num_dests;
/* the real servers */
- struct ip_vs_dest_entry entrytable[0];
+ struct ip_vs_dest_entry entrytable[];
};
@@ -264,7 +264,7 @@ struct ip_vs_get_services {
unsigned int num_services;
/* service table */
- struct ip_vs_service_entry entrytable[0];
+ struct ip_vs_service_entry entrytable[];
};
diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h
index 32d148309b16..966c3070959b 100644
--- a/include/uapi/linux/ipmi.h
+++ b/include/uapi/linux/ipmi.h
@@ -81,6 +81,20 @@ struct ipmi_ipmb_addr {
};
/*
+ * Used for messages received directly from an IPMB that have not gone
+ * through a MC. This is for systems that sit right on an IPMB so
+ * they can receive commands and respond to them.
+ */
+#define IPMI_IPMB_DIRECT_ADDR_TYPE 0x81
+struct ipmi_ipmb_direct_addr {
+ int addr_type;
+ short channel;
+ unsigned char slave_addr;
+ unsigned char rs_lun;
+ unsigned char rq_lun;
+};
+
+/*
* A LAN Address. This is an address to/from a LAN interface bridged
* by the BMC, not an address actually out on the LAN.
*
@@ -158,7 +172,7 @@ struct kernel_ipmi_msg {
* is used for the receive in-kernel interface and in the receive
* IOCTL.
*
- * The "IPMI_RESPONSE_RESPNOSE_TYPE" is a little strange sounding, but
+ * The "IPMI_RESPONSE_RESPONSE_TYPE" is a little strange sounding, but
* it allows you to get the message results when you send a response
* message.
*/
diff --git a/include/uapi/linux/ipmi_msgdefs.h b/include/uapi/linux/ipmi_msgdefs.h
index c2b23a9fdf3d..0934af3b8037 100644
--- a/include/uapi/linux/ipmi_msgdefs.h
+++ b/include/uapi/linux/ipmi_msgdefs.h
@@ -69,6 +69,8 @@
#define IPMI_ERR_MSG_TRUNCATED 0xc6
#define IPMI_REQ_LEN_INVALID_ERR 0xc7
#define IPMI_REQ_LEN_EXCEEDED_ERR 0xc8
+#define IPMI_DEVICE_IN_FW_UPDATE_ERR 0xd1
+#define IPMI_DEVICE_IN_INIT_ERR 0xd2
#define IPMI_NOT_IN_MY_STATE_ERR 0xd5 /* IPMI 2.0 */
#define IPMI_LOST_ARBITRATION_ERR 0x81
#define IPMI_BUS_ERR 0x82
diff --git a/include/uapi/linux/ipmi_ssif_bmc.h b/include/uapi/linux/ipmi_ssif_bmc.h
new file mode 100644
index 000000000000..1c6a753dad08
--- /dev/null
+++ b/include/uapi/linux/ipmi_ssif_bmc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note*/
+/*
+ * Copyright (c) 2022, Ampere Computing LLC.
+ */
+
+#ifndef _UAPI_LINUX_IPMI_SSIF_BMC_H
+#define _UAPI_LINUX_IPMI_SSIF_BMC_H
+
+#include <linux/types.h>
+
+/* Max length of ipmi ssif message included netfn and cmd field */
+#define IPMI_SSIF_PAYLOAD_MAX 254
+struct ipmi_ssif_msg {
+ unsigned int len;
+ __u8 payload[IPMI_SSIF_PAYLOAD_MAX];
+};
+
+#endif /* _UAPI_LINUX_IPMI_SSIF_BMC_H */
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 13e8751bf24a..cf592d7b630f 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -4,6 +4,7 @@
#include <linux/libc-compat.h>
#include <linux/types.h>
+#include <linux/stddef.h>
#include <linux/in6.h>
#include <asm/byteorder.h>
@@ -80,7 +81,7 @@ struct ipv6_opt_hdr {
struct rt0_hdr {
struct ipv6_rt_hdr rt_hdr;
__u32 reserved;
- struct in6_addr addr[0];
+ struct in6_addr addr[];
#define rt0_type rt_hdr.type
};
@@ -130,8 +131,10 @@ struct ipv6hdr {
__u8 nexthdr;
__u8 hop_limit;
- struct in6_addr saddr;
- struct in6_addr daddr;
+ __struct_group(/* no tag */, addrs, /* no attrs */,
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ );
};
@@ -189,6 +192,13 @@ enum {
DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
DEVCONF_NDISC_TCLASS,
DEVCONF_RPL_SEG_ENABLED,
+ DEVCONF_RA_DEFRTR_METRIC,
+ DEVCONF_IOAM6_ENABLED,
+ DEVCONF_IOAM6_ID,
+ DEVCONF_IOAM6_ID_WIDE,
+ DEVCONF_NDISC_EVICT_NOCARRIER,
+ DEVCONF_ACCEPT_UNTRACKED_NA,
+ DEVCONF_ACCEPT_RA_MIN_LFT,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/ipx.h b/include/uapi/linux/ipx.h
deleted file mode 100644
index 3168137adae8..000000000000
--- a/include/uapi/linux/ipx.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _IPX_H_
-#define _IPX_H_
-#include <linux/libc-compat.h> /* for compatibility with glibc netipx/ipx.h */
-#include <linux/types.h>
-#include <linux/sockios.h>
-#include <linux/socket.h>
-#define IPX_NODE_LEN 6
-#define IPX_MTU 576
-
-#if __UAPI_DEF_SOCKADDR_IPX
-struct sockaddr_ipx {
- __kernel_sa_family_t sipx_family;
- __be16 sipx_port;
- __be32 sipx_network;
- unsigned char sipx_node[IPX_NODE_LEN];
- __u8 sipx_type;
- unsigned char sipx_zero; /* 16 byte fill */
-};
-#endif /* __UAPI_DEF_SOCKADDR_IPX */
-
-/*
- * So we can fit the extra info for SIOCSIFADDR into the address nicely
- */
-#define sipx_special sipx_port
-#define sipx_action sipx_zero
-#define IPX_DLTITF 0
-#define IPX_CRTITF 1
-
-#if __UAPI_DEF_IPX_ROUTE_DEFINITION
-struct ipx_route_definition {
- __be32 ipx_network;
- __be32 ipx_router_network;
- unsigned char ipx_router_node[IPX_NODE_LEN];
-};
-#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */
-
-#if __UAPI_DEF_IPX_INTERFACE_DEFINITION
-struct ipx_interface_definition {
- __be32 ipx_network;
- unsigned char ipx_device[16];
- unsigned char ipx_dlink_type;
-#define IPX_FRAME_NONE 0
-#define IPX_FRAME_SNAP 1
-#define IPX_FRAME_8022 2
-#define IPX_FRAME_ETHERII 3
-#define IPX_FRAME_8023 4
-#define IPX_FRAME_TR_8022 5 /* obsolete */
- unsigned char ipx_special;
-#define IPX_SPECIAL_NONE 0
-#define IPX_PRIMARY 1
-#define IPX_INTERNAL 2
- unsigned char ipx_node[IPX_NODE_LEN];
-};
-#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */
-
-#if __UAPI_DEF_IPX_CONFIG_DATA
-struct ipx_config_data {
- unsigned char ipxcfg_auto_select_primary;
- unsigned char ipxcfg_auto_create_interfaces;
-};
-#endif /* __UAPI_DEF_IPX_CONFIG_DATA */
-
-/*
- * OLD Route Definition for backward compatibility.
- */
-
-#if __UAPI_DEF_IPX_ROUTE_DEF
-struct ipx_route_def {
- __be32 ipx_network;
- __be32 ipx_router_network;
-#define IPX_ROUTE_NO_ROUTER 0
- unsigned char ipx_router_node[IPX_NODE_LEN];
- unsigned char ipx_device[16];
- unsigned short ipx_flags;
-#define IPX_RT_SNAP 8
-#define IPX_RT_8022 4
-#define IPX_RT_BLUEBOOK 2
-#define IPX_RT_ROUTED 1
-};
-#endif /* __UAPI_DEF_IPX_ROUTE_DEF */
-
-#define SIOCAIPXITFCRT (SIOCPROTOPRIVATE)
-#define SIOCAIPXPRISLT (SIOCPROTOPRIVATE + 1)
-#define SIOCIPXCFGDATA (SIOCPROTOPRIVATE + 2)
-#define SIOCIPXNCPCONN (SIOCPROTOPRIVATE + 3)
-#endif /* _IPX_H_ */
diff --git a/include/uapi/linux/iso_fs.h b/include/uapi/linux/iso_fs.h
index a2555176f6d1..758178f5b52d 100644
--- a/include/uapi/linux/iso_fs.h
+++ b/include/uapi/linux/iso_fs.h
@@ -137,7 +137,7 @@ struct iso_path_table{
__u8 name_len[2]; /* 721 */
__u8 extent[4]; /* 731 */
__u8 parent[2]; /* 721 */
- char name[0];
+ char name[];
} __attribute__((packed));
/* high sierra is identical to iso, except that the date is only 6 bytes, and
@@ -154,7 +154,7 @@ struct iso_directory_record {
__u8 interleave [ISODCL (28, 28)]; /* 711 */
__u8 volume_sequence_number [ISODCL (29, 32)]; /* 723 */
__u8 name_len [ISODCL (33, 33)]; /* 711 */
- char name [0];
+ char name [];
} __attribute__((packed));
#define ISOFS_BLOCK_BITS 11
diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h
index ba078f8e9add..0df1a1c3caf4 100644
--- a/include/uapi/linux/isst_if.h
+++ b/include/uapi/linux/isst_if.h
@@ -163,10 +163,313 @@ struct isst_if_msr_cmds {
struct isst_if_msr_cmd msr_cmd[1];
};
+/**
+ * struct isst_core_power - Structure to get/set core_power feature
+ * @get_set: 0: Get, 1: Set
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * @enable: Feature enable status
+ * @priority_type: Priority type for the feature (ordered/proportional)
+ *
+ * Structure to get/set core_power feature state using IOCTL
+ * ISST_IF_CORE_POWER_STATE.
+ */
+struct isst_core_power {
+ __u8 get_set;
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u8 enable;
+ __u8 supported;
+ __u8 priority_type;
+};
+
+/**
+ * struct isst_clos_param - Structure to get/set clos praram
+ * @get_set: 0: Get, 1: Set
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * clos: Clos ID for the parameters
+ * min_freq_mhz: Minimum frequency in MHz
+ * max_freq_mhz: Maximum frequency in MHz
+ * prop_prio: Proportional priority from 0-15
+ *
+ * Structure to get/set per clos property using IOCTL
+ * ISST_IF_CLOS_PARAM.
+ */
+struct isst_clos_param {
+ __u8 get_set;
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u8 clos;
+ __u16 min_freq_mhz;
+ __u16 max_freq_mhz;
+ __u8 prop_prio;
+};
+
+/**
+ * struct isst_if_clos_assoc - Structure to assign clos to a CPU
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * @logical_cpu: CPU number
+ * @clos: Clos ID to assign to the logical CPU
+ *
+ * Structure to get/set core_power feature.
+ */
+struct isst_if_clos_assoc {
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u16 logical_cpu;
+ __u16 clos;
+};
+
+/**
+ * struct isst_if_clos_assoc_cmds - Structure to assign clos to CPUs
+ * @cmd_count: Number of cmds (cpus) in this request
+ * @get_set: Request is for get or set
+ * @punit_cpu_map: Set to 1 if the CPU number is punit numbering not
+ * Linux CPU number
+ *
+ * Structure used to get/set associate CPUs to clos using IOCTL
+ * ISST_IF_CLOS_ASSOC.
+ */
+struct isst_if_clos_assoc_cmds {
+ __u16 cmd_count;
+ __u16 get_set;
+ __u16 punit_cpu_map;
+ struct isst_if_clos_assoc assoc_info[1];
+};
+
+/**
+ * struct isst_tpmi_instance_count - Get number of TPMI instances per socket
+ * @socket_id: Socket/package id
+ * @count: Number of instances
+ * @valid_mask: Mask of instances as there can be holes
+ *
+ * Structure used to get TPMI instances information using
+ * IOCTL ISST_IF_COUNT_TPMI_INSTANCES.
+ */
+struct isst_tpmi_instance_count {
+ __u8 socket_id;
+ __u8 count;
+ __u16 valid_mask;
+};
+
+/**
+ * struct isst_perf_level_info - Structure to get information on SST-PP levels
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * @logical_cpu: CPU number
+ * @clos: Clos ID to assign to the logical CPU
+ * @max_level: Maximum performance level supported by the platform
+ * @feature_rev: The feature revision for SST-PP supported by the platform
+ * @level_mask: Mask of supported performance levels
+ * @current_level: Current performance level
+ * @feature_state: SST-BF and SST-TF (enabled/disabled) status at current level
+ * @locked: SST-PP performance level change is locked/unlocked
+ * @enabled: SST-PP feature is enabled or not
+ * @sst-tf_support: SST-TF support status at this level
+ * @sst-bf_support: SST-BF support status at this level
+ *
+ * Structure to get SST-PP details using IOCTL ISST_IF_PERF_LEVELS.
+ */
+struct isst_perf_level_info {
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u8 max_level;
+ __u8 feature_rev;
+ __u8 level_mask;
+ __u8 current_level;
+ __u8 feature_state;
+ __u8 locked;
+ __u8 enabled;
+ __u8 sst_tf_support;
+ __u8 sst_bf_support;
+};
+
+/**
+ * struct isst_perf_level_control - Structure to set SST-PP level
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * @level: level to set
+ *
+ * Structure used change SST-PP level using IOCTL ISST_IF_PERF_SET_LEVEL.
+ */
+struct isst_perf_level_control {
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u8 level;
+};
+
+/**
+ * struct isst_perf_feature_control - Structure to activate SST-BF/SST-TF
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * @feature: bit 0 = SST-BF state, bit 1 = SST-TF state
+ *
+ * Structure used to enable SST-BF/SST-TF using IOCTL ISST_IF_PERF_SET_FEATURE.
+ */
+struct isst_perf_feature_control {
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u8 feature;
+};
+
+#define TRL_MAX_BUCKETS 8
+#define TRL_MAX_LEVELS 6
+
+/**
+ * struct isst_perf_level_data_info - Structure to get SST-PP level details
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * @level: SST-PP level for which caller wants to get information
+ * @tdp_ratio: TDP Ratio
+ * @base_freq_mhz: Base frequency in MHz
+ * @base_freq_avx2_mhz: AVX2 Base frequency in MHz
+ * @base_freq_avx512_mhz: AVX512 base frequency in MHz
+ * @base_freq_amx_mhz: AMX base frequency in MHz
+ * @thermal_design_power_w: Thermal design (TDP) power
+ * @tjunction_max_c: Max junction temperature
+ * @max_memory_freq_mhz: Max memory frequency in MHz
+ * @cooling_type: Type of cooling is used
+ * @p0_freq_mhz: core maximum frequency
+ * @p1_freq_mhz: Core TDP frequency
+ * @pn_freq_mhz: Core maximum efficiency frequency
+ * @pm_freq_mhz: Core minimum frequency
+ * @p0_fabric_freq_mhz: Fabric (Uncore) maximum frequency
+ * @p1_fabric_freq_mhz: Fabric (Uncore) TDP frequency
+ * @pn_fabric_freq_mhz: Fabric (Uncore) minimum efficiency frequency
+ * @pm_fabric_freq_mhz: Fabric (Uncore) minimum frequency
+ * @max_buckets: Maximum trl buckets
+ * @max_trl_levels: Maximum trl levels
+ * @bucket_core_counts[TRL_MAX_BUCKETS]: Number of cores per bucket
+ * @trl_freq_mhz[TRL_MAX_LEVELS][TRL_MAX_BUCKETS]: maximum frequency
+ * for a bucket and trl level
+ *
+ * Structure used to get information on frequencies and TDP for a SST-PP
+ * level using ISST_IF_GET_PERF_LEVEL_INFO.
+ */
+struct isst_perf_level_data_info {
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u16 level;
+ __u16 tdp_ratio;
+ __u16 base_freq_mhz;
+ __u16 base_freq_avx2_mhz;
+ __u16 base_freq_avx512_mhz;
+ __u16 base_freq_amx_mhz;
+ __u16 thermal_design_power_w;
+ __u16 tjunction_max_c;
+ __u16 max_memory_freq_mhz;
+ __u16 cooling_type;
+ __u16 p0_freq_mhz;
+ __u16 p1_freq_mhz;
+ __u16 pn_freq_mhz;
+ __u16 pm_freq_mhz;
+ __u16 p0_fabric_freq_mhz;
+ __u16 p1_fabric_freq_mhz;
+ __u16 pn_fabric_freq_mhz;
+ __u16 pm_fabric_freq_mhz;
+ __u16 max_buckets;
+ __u16 max_trl_levels;
+ __u16 bucket_core_counts[TRL_MAX_BUCKETS];
+ __u16 trl_freq_mhz[TRL_MAX_LEVELS][TRL_MAX_BUCKETS];
+};
+
+/**
+ * struct isst_perf_level_cpu_mask - Structure to get SST-PP level CPU mask
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * @level: SST-PP level for which caller wants to get information
+ * @punit_cpu_map: Set to 1 if the CPU number is punit numbering not
+ * Linux CPU number. If 0 CPU buffer is copied to user space
+ * supplied cpu_buffer of size cpu_buffer_size. Punit
+ * cpu mask is copied to "mask" field.
+ * @mask: cpu mask for this PP level (punit CPU numbering)
+ * @cpu_buffer_size: size of cpu_buffer also used to return the copied CPU
+ * buffer size.
+ * @cpu_buffer: Buffer to copy CPU mask when punit_cpu_map is 0
+ *
+ * Structure used to get cpumask for a SST-PP level using
+ * IOCTL ISST_IF_GET_PERF_LEVEL_CPU_MASK. Also used to get CPU mask for
+ * IOCTL ISST_IF_GET_BASE_FREQ_CPU_MASK for SST-BF.
+ */
+struct isst_perf_level_cpu_mask {
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u8 level;
+ __u8 punit_cpu_map;
+ __u64 mask;
+ __u16 cpu_buffer_size;
+ __s8 cpu_buffer[1];
+};
+
+/**
+ * struct isst_base_freq_info - Structure to get SST-BF frequencies
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * @level: SST-PP level for which caller wants to get information
+ * @high_base_freq_mhz: High priority CPU base frequency
+ * @low_base_freq_mhz: Low priority CPU base frequency
+ * @tjunction_max_c: Max junction temperature
+ * @thermal_design_power_w: Thermal design power in watts
+ *
+ * Structure used to get SST-BF information using
+ * IOCTL ISST_IF_GET_BASE_FREQ_INFO.
+ */
+struct isst_base_freq_info {
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u16 level;
+ __u16 high_base_freq_mhz;
+ __u16 low_base_freq_mhz;
+ __u16 tjunction_max_c;
+ __u16 thermal_design_power_w;
+};
+
+/**
+ * struct isst_turbo_freq_info - Structure to get SST-TF frequencies
+ * @socket_id: Socket/package id
+ * @power_domain: Power Domain id
+ * @level: SST-PP level for which caller wants to get information
+ * @max_clip_freqs: Maximum number of low priority core clipping frequencies
+ * @lp_clip_freq_mhz: Clip frequencies per trl level
+ * @bucket_core_counts: Maximum number of cores for a bucket
+ * @trl_freq_mhz: Frequencies per trl level for each bucket
+ *
+ * Structure used to get SST-TF information using
+ * IOCTL ISST_IF_GET_TURBO_FREQ_INFO.
+ */
+struct isst_turbo_freq_info {
+ __u8 socket_id;
+ __u8 power_domain_id;
+ __u16 level;
+ __u16 max_clip_freqs;
+ __u16 max_buckets;
+ __u16 max_trl_levels;
+ __u16 lp_clip_freq_mhz[TRL_MAX_LEVELS];
+ __u16 bucket_core_counts[TRL_MAX_BUCKETS];
+ __u16 trl_freq_mhz[TRL_MAX_LEVELS][TRL_MAX_BUCKETS];
+};
+
#define ISST_IF_MAGIC 0xFE
#define ISST_IF_GET_PLATFORM_INFO _IOR(ISST_IF_MAGIC, 0, struct isst_if_platform_info *)
#define ISST_IF_GET_PHY_ID _IOWR(ISST_IF_MAGIC, 1, struct isst_if_cpu_map *)
#define ISST_IF_IO_CMD _IOW(ISST_IF_MAGIC, 2, struct isst_if_io_regs *)
#define ISST_IF_MBOX_COMMAND _IOWR(ISST_IF_MAGIC, 3, struct isst_if_mbox_cmds *)
#define ISST_IF_MSR_COMMAND _IOWR(ISST_IF_MAGIC, 4, struct isst_if_msr_cmds *)
+
+#define ISST_IF_COUNT_TPMI_INSTANCES _IOR(ISST_IF_MAGIC, 5, struct isst_tpmi_instance_count *)
+#define ISST_IF_CORE_POWER_STATE _IOWR(ISST_IF_MAGIC, 6, struct isst_core_power *)
+#define ISST_IF_CLOS_PARAM _IOWR(ISST_IF_MAGIC, 7, struct isst_clos_param *)
+#define ISST_IF_CLOS_ASSOC _IOWR(ISST_IF_MAGIC, 8, struct isst_if_clos_assoc_cmds *)
+
+#define ISST_IF_PERF_LEVELS _IOWR(ISST_IF_MAGIC, 9, struct isst_perf_level_info *)
+#define ISST_IF_PERF_SET_LEVEL _IOW(ISST_IF_MAGIC, 10, struct isst_perf_level_control *)
+#define ISST_IF_PERF_SET_FEATURE _IOW(ISST_IF_MAGIC, 11, struct isst_perf_feature_control *)
+#define ISST_IF_GET_PERF_LEVEL_INFO _IOR(ISST_IF_MAGIC, 12, struct isst_perf_level_data_info *)
+#define ISST_IF_GET_PERF_LEVEL_CPU_MASK _IOR(ISST_IF_MAGIC, 13, struct isst_perf_level_cpu_mask *)
+#define ISST_IF_GET_BASE_FREQ_INFO _IOR(ISST_IF_MAGIC, 14, struct isst_base_freq_info *)
+#define ISST_IF_GET_BASE_FREQ_CPU_MASK _IOR(ISST_IF_MAGIC, 15, struct isst_perf_level_cpu_mask *)
+#define ISST_IF_GET_TURBO_FREQ_INFO _IOR(ISST_IF_MAGIC, 16, struct isst_turbo_freq_info *)
+
#endif
diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h
index 784ba0b9690a..637ee4a793cf 100644
--- a/include/uapi/linux/jffs2.h
+++ b/include/uapi/linux/jffs2.h
@@ -123,7 +123,7 @@ struct jffs2_raw_dirent
__u8 unused[2];
jint32_t node_crc;
jint32_t name_crc;
- __u8 name[0];
+ __u8 name[];
};
/* The JFFS2 raw inode structure: Used for storage on physical media. */
@@ -155,7 +155,7 @@ struct jffs2_raw_inode
jint16_t flags; /* See JFFS2_INO_FLAG_* */
jint32_t data_crc; /* CRC for the (compressed) data. */
jint32_t node_crc; /* CRC for the raw inode (excluding data) */
- __u8 data[0];
+ __u8 data[];
};
struct jffs2_raw_xattr {
@@ -170,7 +170,7 @@ struct jffs2_raw_xattr {
jint16_t value_len;
jint32_t data_crc;
jint32_t node_crc;
- __u8 data[0];
+ __u8 data[];
} __attribute__((packed));
struct jffs2_raw_xref
@@ -196,7 +196,7 @@ struct jffs2_raw_summary
jint32_t padded; /* sum of the size of padding nodes */
jint32_t sum_crc; /* summary information crc */
jint32_t node_crc; /* node crc */
- jint32_t sum[0]; /* inode summary info */
+ jint32_t sum[]; /* inode summary info */
};
union jffs2_node_union
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
index 1d0350e44ae3..ed95dba9fa37 100644
--- a/include/uapi/linux/kcov.h
+++ b/include/uapi/linux/kcov.h
@@ -13,7 +13,7 @@ struct kcov_remote_arg {
__u32 area_size; /* Length of coverage buffer in words */
__u32 num_handles; /* Size of handles array */
__aligned_u64 common_handle;
- __aligned_u64 handles[0];
+ __aligned_u64 handles[];
};
#define KCOV_REMOTE_MAX_HANDLES 0x100
diff --git a/include/uapi/linux/kd.h b/include/uapi/linux/kd.h
index 4616b31f84da..6b384065c013 100644
--- a/include/uapi/linux/kd.h
+++ b/include/uapi/linux/kd.h
@@ -161,19 +161,25 @@ struct console_font_op {
unsigned int flags; /* KD_FONT_FLAG_* */
unsigned int width, height; /* font size */
unsigned int charcount;
- unsigned char __user *data; /* font data with height fixed to 32 */
+ unsigned char __user *data; /* font data with vpitch fixed to 32 for
+ * KD_FONT_OP_SET/GET
+ */
};
struct console_font {
unsigned int width, height; /* font size */
unsigned int charcount;
- unsigned char *data; /* font data with height fixed to 32 */
+ unsigned char *data; /* font data with vpitch fixed to 32 for
+ * KD_FONT_OP_SET/GET
+ */
};
#define KD_FONT_OP_SET 0 /* Set font */
#define KD_FONT_OP_GET 1 /* Get font */
#define KD_FONT_OP_SET_DEFAULT 2 /* Set font to default, data points to name / NULL */
-#define KD_FONT_OP_COPY 3 /* Copy from another console */
+#define KD_FONT_OP_COPY 3 /* Obsolete, do not use */
+#define KD_FONT_OP_SET_TALL 4 /* Set font with vpitch = height */
+#define KD_FONT_OP_GET_TALL 5 /* Get font with vpitch = height */
#define KD_FONT_FLAG_DONT_RECALC 1 /* Don't recalculate hw charcell size [compat] */
diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
index 0ff8f7477847..fadf2db71fe8 100644
--- a/include/uapi/linux/kernel.h
+++ b/include/uapi/linux/kernel.h
@@ -3,13 +3,6 @@
#define _UAPI_LINUX_KERNEL_H
#include <linux/sysinfo.h>
-
-/*
- * 'kernel.h' contains some often-used function prototypes etc
- */
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
-
-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#include <linux/const.h>
#endif /* _UAPI_LINUX_KERNEL_H */
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 05669c87a0af..c17bb096ea68 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -12,6 +12,7 @@
/* kexec flags for different usage scenarios */
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_PRESERVE_CONTEXT 0x00000002
+#define KEXEC_UPDATE_ELFCOREHDR 0x00000004
#define KEXEC_ARCH_MASK 0xffff0000
/*
@@ -24,6 +25,7 @@
#define KEXEC_FILE_UNLOAD 0x00000001
#define KEXEC_FILE_ON_CRASH 0x00000002
#define KEXEC_FILE_NO_INITRAMFS 0x00000004
+#define KEXEC_FILE_DEBUG 0x00000008
/* These values match the ELF architecture values.
* Unless there is a good reason that should continue to be the case.
@@ -42,6 +44,8 @@
#define KEXEC_ARCH_MIPS_LE (10 << 16)
#define KEXEC_ARCH_MIPS ( 8 << 16)
#define KEXEC_ARCH_AARCH64 (183 << 16)
+#define KEXEC_ARCH_RISCV (243 << 16)
+#define KEXEC_ARCH_LOONGARCH (258 << 16)
/* The artificial cap on the number of segments passed to kexec_load. */
#define KEXEC_SEGMENT_MAX 16
@@ -53,9 +57,9 @@
*/
struct kexec_segment {
const void *buf;
- size_t bufsz;
+ __kernel_size_t bufsz;
const void *mem;
- size_t memsz;
+ __kernel_size_t memsz;
};
#endif /* __KERNEL__ */
diff --git a/include/uapi/linux/keyboard.h b/include/uapi/linux/keyboard.h
index 4846716e7c5c..36d230cedf12 100644
--- a/include/uapi/linux/keyboard.h
+++ b/include/uapi/linux/keyboard.h
@@ -27,7 +27,6 @@
#define MAX_NR_FUNC 256 /* max nr of strings assigned to keys */
#define KT_LATIN 0 /* we depend on this being zero */
-#define KT_LETTER 11 /* symbol that can be acted upon by CapsLock */
#define KT_FN 1
#define KT_SPEC 2
#define KT_PAD 3
@@ -38,6 +37,7 @@
#define KT_META 8
#define KT_ASCII 9
#define KT_LOCK 10
+#define KT_LETTER 11 /* symbol that can be acted upon by CapsLock */
#define KT_SLOCK 12
#define KT_DEAD2 13
#define KT_BRL 14
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index f738c3b53f4e..9ce46edc62a5 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -29,9 +29,21 @@
/*
* - 1.1 - initial version
* - 1.3 - Add SMI events support
+ * - 1.4 - Indicate new SRAM EDC bit in device properties
+ * - 1.5 - Add SVM API
+ * - 1.6 - Query clear flags in SVM get_attr API
+ * - 1.7 - Checkpoint Restore (CRIU) API
+ * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs
+ * - 1.9 - Add available memory ioctl
+ * - 1.10 - Add SMI profiler event log
+ * - 1.11 - Add unified memory for ctx save/restore area
+ * - 1.12 - Add DMA buf export ioctl
+ * - 1.13 - Add debugger API
+ * - 1.14 - Update kfd_event_data
+ * - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
*/
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 3
+#define KFD_IOCTL_MINOR_VERSION 15
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -95,6 +107,38 @@ struct kfd_ioctl_get_queue_wave_state_args {
__u32 pad;
};
+struct kfd_ioctl_get_available_memory_args {
+ __u64 available; /* from KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 pad;
+};
+
+struct kfd_dbg_device_info_entry {
+ __u64 exception_status;
+ __u64 lds_base;
+ __u64 lds_limit;
+ __u64 scratch_base;
+ __u64 scratch_limit;
+ __u64 gpuvm_base;
+ __u64 gpuvm_limit;
+ __u32 gpu_id;
+ __u32 location_id;
+ __u32 vendor_id;
+ __u32 device_id;
+ __u32 revision_id;
+ __u32 subsystem_vendor_id;
+ __u32 subsystem_device_id;
+ __u32 fw_version;
+ __u32 gfx_target_version;
+ __u32 simd_count;
+ __u32 max_waves_per_simd;
+ __u32 array_count;
+ __u32 simd_arrays_per_engine;
+ __u32 num_xcc;
+ __u32 capability;
+ __u32 debug_prop;
+};
+
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
#define KFD_IOC_CACHE_POLICY_COHERENT 0
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@@ -191,6 +235,8 @@ struct kfd_ioctl_dbg_wave_control_args {
__u32 buf_size_in_bytes; /*including gpu_id and buf_size */
};
+#define KFD_INVALID_FD 0xffffffff
+
/* Matching HSA_EVENTTYPE */
#define KFD_IOC_EVENT_SIGNAL 0
#define KFD_IOC_EVENT_NODECHANGE 1
@@ -276,12 +322,20 @@ struct kfd_hsa_hw_exception_data {
__u32 gpu_id;
};
+/* hsa signal event data */
+struct kfd_hsa_signal_event_data {
+ __u64 last_event_age; /* to and from KFD */
+};
+
/* Event data */
struct kfd_event_data {
union {
+ /* From KFD */
struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_hsa_hw_exception_data hw_exception_data;
- }; /* From KFD */
+ /* To and From KFD */
+ struct kfd_hsa_signal_event_data signal_event_data;
+ };
__u64 kfd_event_data_ext; /* pointer to an extension structure
for future exception types */
__u32 event_id; /* to KFD */
@@ -351,6 +405,8 @@ struct kfd_ioctl_acquire_vm_args {
#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
+#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
+#define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24)
/* Allocate memory for later SVM (shared virtual memory) mapping.
*
@@ -446,17 +502,144 @@ struct kfd_ioctl_import_dmabuf_args {
__u32 dmabuf_fd; /* to KFD */
};
+struct kfd_ioctl_export_dmabuf_args {
+ __u64 handle; /* to KFD */
+ __u32 flags; /* to KFD */
+ __u32 dmabuf_fd; /* from KFD */
+};
+
/*
* KFD SMI(System Management Interface) events
*/
-/* Event type (defined by bitmask) */
-#define KFD_SMI_EVENT_VMFAULT 0x0000000000000001
+enum kfd_smi_event {
+ KFD_SMI_EVENT_NONE = 0, /* not used */
+ KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
+ KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
+ KFD_SMI_EVENT_GPU_PRE_RESET = 3,
+ KFD_SMI_EVENT_GPU_POST_RESET = 4,
+ KFD_SMI_EVENT_MIGRATE_START = 5,
+ KFD_SMI_EVENT_MIGRATE_END = 6,
+ KFD_SMI_EVENT_PAGE_FAULT_START = 7,
+ KFD_SMI_EVENT_PAGE_FAULT_END = 8,
+ KFD_SMI_EVENT_QUEUE_EVICTION = 9,
+ KFD_SMI_EVENT_QUEUE_RESTORE = 10,
+ KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
+
+ /*
+ * max event number, as a flag bit to get events from all processes,
+ * this requires super user permission, otherwise will not be able to
+ * receive event from any process. Without this flag to receive events
+ * from same process.
+ */
+ KFD_SMI_EVENT_ALL_PROCESS = 64
+};
+
+enum KFD_MIGRATE_TRIGGERS {
+ KFD_MIGRATE_TRIGGER_PREFETCH,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
+ KFD_MIGRATE_TRIGGER_TTM_EVICTION
+};
+
+enum KFD_QUEUE_EVICTION_TRIGGERS {
+ KFD_QUEUE_EVICTION_TRIGGER_SVM,
+ KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
+ KFD_QUEUE_EVICTION_TRIGGER_TTM,
+ KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
+ KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
+ KFD_QUEUE_EVICTION_CRIU_RESTORE
+};
+
+enum KFD_SVM_UNMAP_TRIGGERS {
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
+};
+
+#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
+#define KFD_SMI_EVENT_MSG_SIZE 96
struct kfd_ioctl_smi_events_args {
__u32 gpuid; /* to KFD */
__u32 anon_fd; /* from KFD */
};
+/**************************************************************************************************
+ * CRIU IOCTLs (Checkpoint Restore In Userspace)
+ *
+ * When checkpointing a process, the userspace application will perform:
+ * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
+ * all the queues.
+ * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
+ * 3. UNPAUSE op to un-evict all the queues
+ *
+ * When restoring a process, the CRIU userspace application will perform:
+ *
+ * 1. RESTORE op to restore process contents
+ * 2. RESUME op to start the process
+ *
+ * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
+ * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
+ */
+
+enum kfd_criu_op {
+ KFD_CRIU_OP_PROCESS_INFO,
+ KFD_CRIU_OP_CHECKPOINT,
+ KFD_CRIU_OP_UNPAUSE,
+ KFD_CRIU_OP_RESTORE,
+ KFD_CRIU_OP_RESUME,
+};
+
+/**
+ * kfd_ioctl_criu_args - Arguments perform CRIU operation
+ * @devices: [in/out] User pointer to memory location for devices information.
+ * This is an array of type kfd_criu_device_bucket.
+ * @bos: [in/out] User pointer to memory location for BOs information
+ * This is an array of type kfd_criu_bo_bucket.
+ * @priv_data: [in/out] User pointer to memory location for private data
+ * @priv_data_size: [in/out] Size of priv_data in bytes
+ * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array.
+ * @num_bos [in/out] Number of BOs used by process. Size of @bos array.
+ * @num_objects: [in/out] Number of objects used by process. Objects are opaque to
+ * user application.
+ * @pid: [in/out] PID of the process being checkpointed
+ * @op [in] Type of operation (kfd_criu_op)
+ *
+ * Return: 0 on success, -errno on failure
+ */
+struct kfd_ioctl_criu_args {
+ __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */
+ __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */
+ __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */
+ __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */
+ __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */
+ __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */
+ __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */
+ __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */
+ __u32 op;
+};
+
+struct kfd_criu_device_bucket {
+ __u32 user_gpu_id;
+ __u32 actual_gpu_id;
+ __u32 drm_fd;
+ __u32 pad;
+};
+
+struct kfd_criu_bo_bucket {
+ __u64 addr;
+ __u64 size;
+ __u64 offset;
+ __u64 restored_offset; /* During restore, updated offset for BO */
+ __u32 gpu_id; /* This is the user_gpu_id */
+ __u32 alloc_flags;
+ __u32 dmabuf_fd;
+ __u32 pad;
+};
+
+/* CRIU IOCTLs - END */
+/**************************************************************************************************/
+
/* Register offset inside the remapped mmio page
*/
enum kfd_mmio_remap {
@@ -464,6 +647,806 @@ enum kfd_mmio_remap {
KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
};
+/* Guarantee host access to memory */
+#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
+/* Fine grained coherency between all devices with access */
+#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
+/* Use any GPU in same hive as preferred device */
+#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
+/* GPUs only read, allows replication */
+#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
+/* Allow execution on GPU */
+#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
+/* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
+#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
+/* Keep GPU memory mapping always valid as if XNACK is disable */
+#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
+/* Fine grained coherency between all devices using device-scope atomics */
+#define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080
+
+/**
+ * kfd_ioctl_svm_op - SVM ioctl operations
+ *
+ * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
+ * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
+ */
+enum kfd_ioctl_svm_op {
+ KFD_IOCTL_SVM_OP_SET_ATTR,
+ KFD_IOCTL_SVM_OP_GET_ATTR
+};
+
+/** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
+ *
+ * GPU IDs are used to specify GPUs as preferred and prefetch locations.
+ * Below definitions are used for system memory or for leaving the preferred
+ * location unspecified.
+ */
+enum kfd_ioctl_svm_location {
+ KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
+ KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
+};
+
+/**
+ * kfd_ioctl_svm_attr_type - SVM attribute types
+ *
+ * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
+ * system memory
+ * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
+ * system memory. Setting this triggers an
+ * immediate prefetch (migration).
+ * @KFD_IOCTL_SVM_ATTR_ACCESS:
+ * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
+ * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
+ * by the attribute value
+ * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
+ * KFD_IOCTL_SVM_FLAG_...)
+ * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
+ * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
+ * (log2 num pages)
+ */
+enum kfd_ioctl_svm_attr_type {
+ KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
+ KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
+ KFD_IOCTL_SVM_ATTR_ACCESS,
+ KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
+ KFD_IOCTL_SVM_ATTR_NO_ACCESS,
+ KFD_IOCTL_SVM_ATTR_SET_FLAGS,
+ KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
+ KFD_IOCTL_SVM_ATTR_GRANULARITY
+};
+
+/**
+ * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
+ *
+ * The meaning of the @value depends on the attribute type.
+ *
+ * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
+ * @value: attribute value
+ */
+struct kfd_ioctl_svm_attribute {
+ __u32 type;
+ __u32 value;
+};
+
+/**
+ * kfd_ioctl_svm_args - Arguments for SVM ioctl
+ *
+ * @op specifies the operation to perform (see enum
+ * @kfd_ioctl_svm_op). @start_addr and @size are common for all
+ * operations.
+ *
+ * A variable number of attributes can be given in @attrs.
+ * @nattr specifies the number of attributes. New attributes can be
+ * added in the future without breaking the ABI. If unknown attributes
+ * are given, the function returns -EINVAL.
+ *
+ * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
+ * range. It may overlap existing virtual address ranges. If it does,
+ * the existing ranges will be split such that the attribute changes
+ * only apply to the specified address range.
+ *
+ * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
+ * over all memory in the given range and returns the result as the
+ * attribute value. If different pages have different preferred or
+ * prefetch locations, 0xffffffff will be returned for
+ * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
+ * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
+ * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
+ * aggregated by bitwise AND. That means, a flag will be set in the
+ * output, if that flag is set for all pages in the range. For
+ * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
+ * aggregated by bitwise NOR. That means, a flag will be set in the
+ * output, if that flag is clear for all pages in the range.
+ * The minimum migration granularity throughout the range will be
+ * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
+ *
+ * Querying of accessibility attributes works by initializing the
+ * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
+ * GPUID being queried. Multiple attributes can be given to allow
+ * querying multiple GPUIDs. The ioctl function overwrites the
+ * attribute type to indicate the access for the specified GPU.
+ */
+struct kfd_ioctl_svm_args {
+ __u64 start_addr;
+ __u64 size;
+ __u32 op;
+ __u32 nattr;
+ /* Variable length array of attributes */
+ struct kfd_ioctl_svm_attribute attrs[];
+};
+
+/**
+ * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
+ *
+ * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process
+ *
+ * @xnack_enabled indicates whether recoverable page faults should be
+ * enabled for the current process. 0 means disabled, positive means
+ * enabled, negative means leave unchanged. If enabled, virtual address
+ * translations on GFXv9 and later AMD GPUs can return XNACK and retry
+ * the access until a valid PTE is available. This is used to implement
+ * device page faults.
+ *
+ * On output, @xnack_enabled returns the (new) current mode (0 or
+ * positive). Therefore, a negative input value can be used to query
+ * the current mode without changing it.
+ *
+ * The XNACK mode fundamentally changes the way SVM managed memory works
+ * in the driver, with subtle effects on application performance and
+ * functionality.
+ *
+ * Enabling XNACK mode requires shader programs to be compiled
+ * differently. Furthermore, not all GPUs support changing the mode
+ * per-process. Therefore changing the mode is only allowed while no
+ * user mode queues exist in the process. This ensure that no shader
+ * code is running that may be compiled for the wrong mode. And GPUs
+ * that cannot change to the requested mode will prevent the XNACK
+ * mode from occurring. All GPUs used by the process must be in the
+ * same XNACK mode.
+ *
+ * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
+ * Therefore those GPUs are not considered for the XNACK mode switch.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+struct kfd_ioctl_set_xnack_mode_args {
+ __s32 xnack_enabled;
+};
+
+/* Wave launch override modes */
+enum kfd_dbg_trap_override_mode {
+ KFD_DBG_TRAP_OVERRIDE_OR = 0,
+ KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
+};
+
+/* Wave launch overrides */
+enum kfd_dbg_trap_mask {
+ KFD_DBG_TRAP_MASK_FP_INVALID = 1,
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
+ KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
+};
+
+/* Wave launch modes */
+enum kfd_dbg_trap_wave_launch_mode {
+ KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
+ KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
+ KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
+};
+
+/* Address watch modes */
+enum kfd_dbg_trap_address_watch_mode {
+ KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
+ KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
+ KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
+ KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
+};
+
+/* Additional wave settings */
+enum kfd_dbg_trap_flags {
+ KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
+};
+
+/* Trap exceptions */
+enum kfd_dbg_trap_exception_code {
+ EC_NONE = 0,
+ /* per queue */
+ EC_QUEUE_WAVE_ABORT = 1,
+ EC_QUEUE_WAVE_TRAP = 2,
+ EC_QUEUE_WAVE_MATH_ERROR = 3,
+ EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
+ EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
+ EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
+ EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
+ EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
+ EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
+ EC_QUEUE_PACKET_RESERVED = 19,
+ EC_QUEUE_PACKET_UNSUPPORTED = 20,
+ EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
+ EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
+ EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
+ EC_QUEUE_PREEMPTION_ERROR = 30,
+ EC_QUEUE_NEW = 31,
+ /* per device */
+ EC_DEVICE_QUEUE_DELETE = 32,
+ EC_DEVICE_MEMORY_VIOLATION = 33,
+ EC_DEVICE_RAS_ERROR = 34,
+ EC_DEVICE_FATAL_HALT = 35,
+ EC_DEVICE_NEW = 36,
+ /* per process */
+ EC_PROCESS_RUNTIME = 48,
+ EC_PROCESS_DEVICE_REMOVE = 49,
+ EC_MAX
+};
+
+/* Mask generated by ecode in kfd_dbg_trap_exception_code */
+#define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
+
+/* Masks for exception code type checks below */
+#define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \
+ KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \
+ KFD_EC_MASK(EC_QUEUE_NEW))
+#define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \
+ KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \
+ KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \
+ KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \
+ KFD_EC_MASK(EC_DEVICE_NEW))
+#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
+ KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
+
+/* Checks for exception code types for KFD search */
+#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
+ (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
+#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
+ (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
+#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
+ (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
+
+
+/* Runtime enable states */
+enum kfd_dbg_runtime_state {
+ DEBUG_RUNTIME_STATE_DISABLED = 0,
+ DEBUG_RUNTIME_STATE_ENABLED = 1,
+ DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
+ DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
+};
+
+/* Runtime enable status */
+struct kfd_runtime_info {
+ __u64 r_debug;
+ __u32 runtime_state;
+ __u32 ttmp_setup;
+};
+
+/* Enable modes for runtime enable */
+#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
+#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
+
+/**
+ * kfd_ioctl_runtime_enable_args - Arguments for runtime enable
+ *
+ * Coordinates debug exception signalling and debug device enablement with runtime.
+ *
+ * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
+ * @mode_mask - mask to set mode
+ * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
+ * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
+ * @capabilities_mask - mask to notify runtime on what KFD supports
+ *
+ * Return - 0 on SUCCESS.
+ * - EBUSY if runtime enable call already pending.
+ * - EEXIST if user queues already active prior to call.
+ * If process is debug enabled, runtime enable will enable debug devices and
+ * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
+ * to unblock - see kfd_ioctl_dbg_trap_args.
+ *
+ */
+struct kfd_ioctl_runtime_enable_args {
+ __u64 r_debug;
+ __u32 mode_mask;
+ __u32 capabilities_mask;
+};
+
+/* Queue information */
+struct kfd_queue_snapshot_entry {
+ __u64 exception_status;
+ __u64 ring_base_address;
+ __u64 write_pointer_address;
+ __u64 read_pointer_address;
+ __u64 ctx_save_restore_address;
+ __u32 queue_id;
+ __u32 gpu_id;
+ __u32 ring_size;
+ __u32 queue_type;
+ __u32 ctx_save_restore_area_size;
+ __u32 reserved;
+};
+
+/* Queue status return for suspend/resume */
+#define KFD_DBG_QUEUE_ERROR_BIT 30
+#define KFD_DBG_QUEUE_INVALID_BIT 31
+#define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
+#define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
+
+/* Context save area header information */
+struct kfd_context_save_area_header {
+ struct {
+ __u32 control_stack_offset;
+ __u32 control_stack_size;
+ __u32 wave_state_offset;
+ __u32 wave_state_size;
+ } wave_state;
+ __u32 debug_offset;
+ __u32 debug_size;
+ __u64 err_payload_addr;
+ __u32 err_event_id;
+ __u32 reserved1;
+};
+
+/*
+ * Debug operations
+ *
+ * For specifics on usage and return values, see documentation per operation
+ * below. Otherwise, generic error returns apply:
+ * - ESRCH if the process to debug does not exist.
+ *
+ * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
+ * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
+ * Also returns this error if GPU hardware scheduling is not supported.
+ *
+ * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
+ * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
+ * clean up of debug mode as long as process is debug enabled.
+ *
+ * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when
+ * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
+ *
+ * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
+ *
+ * - Other errors may be returned when a DBG_HW_OP occurs while the GPU
+ * is in a fatal state.
+ *
+ */
+enum kfd_dbg_trap_operations {
+ KFD_IOC_DBG_TRAP_ENABLE = 0,
+ KFD_IOC_DBG_TRAP_DISABLE = 1,
+ KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
+ KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
+ KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
+ KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
+ KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
+ KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
+ KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
+};
+
+/**
+ * kfd_ioctl_dbg_trap_enable_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_ENABLE.
+ *
+ * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
+ * kfd_ioctl_dbg_trap_args to disable debug session.
+ *
+ * @exception_mask (IN) - exceptions to raise to the debugger
+ * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info)
+ * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes
+ * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised
+ * exceptions set in exception_mask.
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
+ * Size of kfd_runtime saved by the KFD returned to @rinfo_size.
+ * - EBADF if KFD cannot get a reference to dbg_fd.
+ * - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
+ * - EINVAL if target process is already debug enabled.
+ *
+ */
+struct kfd_ioctl_dbg_trap_enable_args {
+ __u64 exception_mask;
+ __u64 rinfo_ptr;
+ __u32 rinfo_size;
+ __u32 dbg_fd;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_send_runtime_event_args
+ *
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
+ * Raises exceptions to runtime.
+ *
+ * @exception_mask (IN) - exceptions to raise to runtime
+ * @gpu_id (IN) - target device id
+ * @queue_id (IN) - target queue id
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * - ENODEV if gpu_id not found.
+ * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
+ * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
+ * All other exceptions are raised to runtime through err_payload_addr.
+ * See kfd_context_save_area_header.
+ */
+struct kfd_ioctl_dbg_trap_send_runtime_event_args {
+ __u64 exception_mask;
+ __u32 gpu_id;
+ __u32 queue_id;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_exceptions_enabled_args
+ *
+ * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
+ * Set new exceptions to be raised to the debugger.
+ *
+ * @exception_mask (IN) - new exceptions to raise the debugger
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ */
+struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
+ __u64 exception_mask;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_wave_launch_override_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
+ * Enable HW exceptions to raise trap.
+ *
+ * @override_mode (IN) - see kfd_dbg_trap_override_mode
+ * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask.
+ * IN is the override modes requested to be enabled.
+ * OUT is referenced in Return below.
+ * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
+ * IN is the override modes requested for support check.
+ * OUT is referenced in Return below.
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Previous enablement is returned in @enable_mask.
+ * Actual override support is returned in @support_request_mask.
+ * - EINVAL if override mode is not supported.
+ * - EACCES if trap support requested is not actually supported.
+ * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
+ * Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
+ */
+struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
+ __u32 override_mode;
+ __u32 enable_mask;
+ __u32 support_request_mask;
+ __u32 pad;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_wave_launch_mode_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
+ * Set wave launch mode.
+ *
+ * @mode (IN) - see kfd_dbg_trap_wave_launch_mode
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ */
+struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
+ __u32 launch_mode;
+ __u32 pad;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_suspend_queues_ags
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
+ * Suspend queues.
+ *
+ * @exception_mask (IN) - raised exceptions to clear
+ * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
+ * to suspend
+ * @num_queues (IN) - number of queues to suspend in @queue_array_ptr
+ * @grace_period (IN) - wave time allowance before preemption
+ * per 1K GPU clock cycle unit
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Destruction of a suspended queue is blocked until the queue is
+ * resumed. This allows the debugger to access queue information and
+ * the its context save area without running into a race condition on
+ * queue destruction.
+ * Automatically copies per queue context save area header information
+ * into the save area base
+ * (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
+ *
+ * Return - Number of queues suspended on SUCCESS.
+ * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
+ * for each queue id in @queue_array_ptr array reports unsuccessful
+ * suspend reason.
+ * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
+ * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
+ * is being destroyed.
+ */
+struct kfd_ioctl_dbg_trap_suspend_queues_args {
+ __u64 exception_mask;
+ __u64 queue_array_ptr;
+ __u32 num_queues;
+ __u32 grace_period;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_resume_queues_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
+ * Resume queues.
+ *
+ * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
+ * to resume
+ * @num_queues (IN) - number of queues to resume in @queue_array_ptr
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - Number of queues resumed on SUCCESS.
+ * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
+ * for each queue id in @queue_array_ptr array reports unsuccessful
+ * resume reason.
+ * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
+ * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
+ */
+struct kfd_ioctl_dbg_trap_resume_queues_args {
+ __u64 queue_array_ptr;
+ __u32 num_queues;
+ __u32 pad;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_node_address_watch_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
+ * Sets address watch for device.
+ *
+ * @address (IN) - watch address to set
+ * @mode (IN) - see kfd_dbg_trap_address_watch_mode
+ * @mask (IN) - watch address mask
+ * @gpu_id (IN) - target gpu to set watch point
+ * @id (OUT) - watch id allocated
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Allocated watch ID returned to @id.
+ * - ENODEV if gpu_id not found.
+ * - ENOMEM if watch IDs can be allocated
+ */
+struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
+ __u64 address;
+ __u32 mode;
+ __u32 mask;
+ __u32 gpu_id;
+ __u32 id;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_clear_node_address_watch_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
+ * Clear address watch for device.
+ *
+ * @gpu_id (IN) - target device to clear watch point
+ * @id (IN) - allocated watch id to clear
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * - ENODEV if gpu_id not found.
+ * - EINVAL if watch ID has not been allocated.
+ */
+struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
+ __u32 gpu_id;
+ __u32 id;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_flags_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
+ * Sets flags for wave behaviour.
+ *
+ * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * - EACCESS if any debug device does not allow flag options.
+ */
+struct kfd_ioctl_dbg_trap_set_flags_args {
+ __u32 flags;
+ __u32 pad;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_query_debug_event_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
+ *
+ * Find one or more raised exceptions. This function can return multiple
+ * exceptions from a single queue or a single device with one call. To find
+ * all raised exceptions, this function must be called repeatedly until it
+ * returns -EAGAIN. Returned exceptions can optionally be cleared by
+ * setting the corresponding bit in the @exception_mask input parameter.
+ * However, clearing an exception prevents retrieving further information
+ * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
+ *
+ * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
+ * @gpu_id (OUT) - gpu id of exceptions raised
+ * @queue_id (OUT) - queue id of exceptions raised
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on raised exception found
+ * Raised exceptions found are returned in @exception mask
+ * with reported source id returned in @gpu_id or @queue_id.
+ * - EAGAIN if no raised exception has been found
+ */
+struct kfd_ioctl_dbg_trap_query_debug_event_args {
+ __u64 exception_mask;
+ __u32 gpu_id;
+ __u32 queue_id;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_query_exception_info_args
+ *
+ * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
+ * Get additional info on raised exception.
+ *
+ * @info_ptr (IN) - pointer to exception info buffer to copy to
+ * @info_size (IN/OUT) - exception info buffer size (bytes)
+ * @source_id (IN) - target gpu or queue id
+ * @exception_code (IN) - target exception
+ * @clear_exception (IN) - clear raised @exception_code exception
+ * (0 = false, 1 = true)
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
+ * bytes of memory exception data to @info_ptr.
+ * If @exception_code is EC_PROCESS_RUNTIME, copy saved
+ * kfd_runtime_info to @info_ptr.
+ * Actual required @info_ptr size (bytes) is returned in @info_size.
+ */
+struct kfd_ioctl_dbg_trap_query_exception_info_args {
+ __u64 info_ptr;
+ __u32 info_size;
+ __u32 source_id;
+ __u32 exception_code;
+ __u32 clear_exception;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_get_queue_snapshot_args
+ *
+ * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
+ * Get queue information.
+ *
+ * @exception_mask (IN) - exceptions raised to clear
+ * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
+ * @num_queues (IN/OUT) - number of queue snapshot entries
+ * The debugger specifies the size of the array allocated in @num_queues.
+ * KFD returns the number of queues that actually existed. If this is
+ * larger than the size specified by the debugger, KFD will not overflow
+ * the array allocated by the debugger.
+ *
+ * @entry_size (IN/OUT) - size per entry in bytes
+ * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
+ * @entry_size. KFD returns the number of bytes actually populated per
+ * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
+ * which fields in struct kfd_queue_snapshot_entry are valid. This allows
+ * growing the ABI in a backwards compatible manner.
+ * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
+ * event that it's larger than actual kfd_queue_snapshot_entry.
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
+ * into @snapshot_buf_ptr if @num_queues(IN) > 0.
+ * Otherwise return @num_queues(OUT) queue snapshot entries that exist.
+ */
+struct kfd_ioctl_dbg_trap_queue_snapshot_args {
+ __u64 exception_mask;
+ __u64 snapshot_buf_ptr;
+ __u32 num_queues;
+ __u32 entry_size;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_get_device_snapshot_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
+ * Get device information.
+ *
+ * @exception_mask (IN) - exceptions raised to clear
+ * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
+ * @num_devices (IN/OUT) - number of debug devices to snapshot
+ * The debugger specifies the size of the array allocated in @num_devices.
+ * KFD returns the number of devices that actually existed. If this is
+ * larger than the size specified by the debugger, KFD will not overflow
+ * the array allocated by the debugger.
+ *
+ * @entry_size (IN/OUT) - size per entry in bytes
+ * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
+ * @entry_size. KFD returns the number of bytes actually populated. The
+ * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
+ * in struct kfd_dbg_device_info_entry are valid. This allows growing the
+ * ABI in a backwards compatible manner.
+ * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
+ * event that it's larger than actual kfd_dbg_device_info_entry.
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
+ * into @snapshot_buf_ptr if @num_devices(IN) > 0.
+ * Otherwise return @num_devices(OUT) queue snapshot entries that exist.
+ */
+struct kfd_ioctl_dbg_trap_device_snapshot_args {
+ __u64 exception_mask;
+ __u64 snapshot_buf_ptr;
+ __u32 num_devices;
+ __u32 entry_size;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_args
+ *
+ * Arguments to debug target process.
+ *
+ * @pid - target process to debug
+ * @op - debug operation (see kfd_dbg_trap_operations)
+ *
+ * @op determines which union struct args to use.
+ * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
+ */
+struct kfd_ioctl_dbg_trap_args {
+ __u32 pid;
+ __u32 op;
+
+ union {
+ struct kfd_ioctl_dbg_trap_enable_args enable;
+ struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
+ struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
+ struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
+ struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
+ struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
+ struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
+ struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
+ struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
+ struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
+ struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
+ struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
+ struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
+ struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
+ };
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -506,16 +1489,16 @@ enum kfd_mmio_remap {
#define AMDKFD_IOC_WAIT_EVENTS \
AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
-#define AMDKFD_IOC_DBG_REGISTER \
+#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \
AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
-#define AMDKFD_IOC_DBG_UNREGISTER \
+#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \
AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
-#define AMDKFD_IOC_DBG_ADDRESS_WATCH \
+#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \
AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
-#define AMDKFD_IOC_DBG_WAVE_CONTROL \
+#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \
AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
@@ -564,7 +1547,27 @@ enum kfd_mmio_remap {
#define AMDKFD_IOC_SMI_EVENTS \
AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
+#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
+
+#define AMDKFD_IOC_SET_XNACK_MODE \
+ AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
+
+#define AMDKFD_IOC_CRIU_OP \
+ AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
+
+#define AMDKFD_IOC_AVAILABLE_MEMORY \
+ AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
+
+#define AMDKFD_IOC_EXPORT_DMABUF \
+ AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
+
+#define AMDKFD_IOC_RUNTIME_ENABLE \
+ AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
+
+#define AMDKFD_IOC_DBG_TRAP \
+ AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x20
+#define AMDKFD_COMMAND_END 0x27
#endif
diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h
new file mode 100644
index 000000000000..a51b7331e0b4
--- /dev/null
+++ b/include/uapi/linux/kfd_sysfs.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_SYSFS_H_INCLUDED
+#define KFD_SYSFS_H_INCLUDED
+
+/* Capability bits in node properties */
+#define HSA_CAP_HOT_PLUGGABLE 0x00000001
+#define HSA_CAP_ATS_PRESENT 0x00000002
+#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004
+#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008
+#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010
+#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020
+#define HSA_CAP_VA_LIMIT 0x00000040
+#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
+
+#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
+#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
+#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
+#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+
+#define HSA_CAP_TRAP_DEBUG_SUPPORT 0x00008000
+#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED 0x00010000
+#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED 0x00020000
+#define HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED 0x00040000
+
+/* Old buggy user mode depends on this being 0 */
+#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000
+
+#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
+#define HSA_CAP_RASEVENTNOTIFY 0x00200000
+#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000
+#define HSA_CAP_ASIC_REVISION_SHIFT 22
+#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000
+#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000
+#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000
+#define HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED 0x20000000
+#define HSA_CAP_RESERVED 0xe00f8000
+
+/* debug_prop bits in node properties */
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_SHIFT 0
+#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_MASK 0x000003f0
+#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT 4
+#define HSA_DBG_DISPATCH_INFO_ALWAYS_VALID 0x00000400
+#define HSA_DBG_WATCHPOINTS_EXCLUSIVE 0x00000800
+#define HSA_DBG_RESERVED 0xfffffffffffff000ull
+
+/* Heap types in memory properties */
+#define HSA_MEM_HEAP_TYPE_SYSTEM 0
+#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1
+#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2
+#define HSA_MEM_HEAP_TYPE_GPU_GDS 3
+#define HSA_MEM_HEAP_TYPE_GPU_LDS 4
+#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5
+
+/* Flag bits in memory properties */
+#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
+#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
+#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
+
+/* Cache types in cache properties */
+#define HSA_CACHE_TYPE_DATA 0x00000001
+#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
+#define HSA_CACHE_TYPE_CPU 0x00000004
+#define HSA_CACHE_TYPE_HSACU 0x00000008
+#define HSA_CACHE_TYPE_RESERVED 0xfffffff0
+
+/* Link types in IO link properties (matches CRAT link types) */
+#define HSA_IOLINK_TYPE_UNDEFINED 0
+#define HSA_IOLINK_TYPE_HYPERTRANSPORT 1
+#define HSA_IOLINK_TYPE_PCIEXPRESS 2
+#define HSA_IOLINK_TYPE_AMBA 3
+#define HSA_IOLINK_TYPE_MIPI 4
+#define HSA_IOLINK_TYPE_QPI_1_1 5
+#define HSA_IOLINK_TYPE_RESERVED1 6
+#define HSA_IOLINK_TYPE_RESERVED2 7
+#define HSA_IOLINK_TYPE_RAPID_IO 8
+#define HSA_IOLINK_TYPE_INFINIBAND 9
+#define HSA_IOLINK_TYPE_RESERVED3 10
+#define HSA_IOLINK_TYPE_XGMI 11
+#define HSA_IOLINK_TYPE_XGOP 12
+#define HSA_IOLINK_TYPE_GZ 13
+#define HSA_IOLINK_TYPE_ETHERNET_RDMA 14
+#define HSA_IOLINK_TYPE_RDMA_OTHER 15
+#define HSA_IOLINK_TYPE_OTHER 16
+
+/* Flag bits in IO link properties (matches CRAT flags, excluding the
+ * bi-directional flag, which is not offially part of the CRAT spec, and
+ * only used internally in KFD)
+ */
+#define HSA_IOLINK_FLAGS_ENABLED (1 << 0)
+#define HSA_IOLINK_FLAGS_NON_COHERENT (1 << 1)
+#define HSA_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
+#define HSA_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
+#define HSA_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
+#define HSA_IOLINK_FLAGS_RESERVED 0xffffffe0
+
+#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f6d86033c4fa..2190adbe3002 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -8,6 +8,7 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -15,100 +16,41 @@
#define KVM_API_VERSION 12
-/* *** Deprecated interfaces *** */
-
-#define KVM_TRC_SHIFT 16
-
-#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
-#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1))
-
-#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
-#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
-#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
-
-#define KVM_TRC_HEAD_SIZE 12
-#define KVM_TRC_CYCLE_SIZE 8
-#define KVM_TRC_EXTRA_MAX 7
-
-#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
-#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
-#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
-#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
-#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
-#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
-#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
-#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
-#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
-#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
-#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
-#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
-#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
-#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
-#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
-#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
-#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
-#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
-#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
-#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
-#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
-#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
-#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
-#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
-
-struct kvm_user_trace_setup {
- __u32 buf_size;
- __u32 buf_nr;
-};
-
-#define __KVM_DEPRECATED_MAIN_W_0x06 \
- _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
-#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
-#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
-
-#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
-
-struct kvm_breakpoint {
- __u32 enabled;
- __u32 padding;
- __u64 address;
-};
-
-struct kvm_debug_guest {
- __u32 enabled;
- __u32 pad;
- struct kvm_breakpoint breakpoints[4];
- __u32 singlestep;
-};
-
-#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
-
-/* *** End of deprecated interfaces *** */
-
+/*
+ * Backwards-compatible definitions.
+ */
+#define __KVM_HAVE_GUEST_DEBUG
-/* for KVM_CREATE_MEMORY_REGION */
-struct kvm_memory_region {
+/* for KVM_SET_USER_MEMORY_REGION */
+struct kvm_userspace_memory_region {
__u32 slot;
__u32 flags;
__u64 guest_phys_addr;
__u64 memory_size; /* bytes */
+ __u64 userspace_addr; /* start of the userspace allocated memory */
};
-/* for KVM_SET_USER_MEMORY_REGION */
-struct kvm_userspace_memory_region {
+/* for KVM_SET_USER_MEMORY_REGION2 */
+struct kvm_userspace_memory_region2 {
__u32 slot;
__u32 flags;
__u64 guest_phys_addr;
- __u64 memory_size; /* bytes */
- __u64 userspace_addr; /* start of the userspace allocated memory */
+ __u64 memory_size;
+ __u64 userspace_addr;
+ __u64 guest_memfd_offset;
+ __u32 guest_memfd;
+ __u32 pad1;
+ __u64 pad2[14];
};
/*
- * The bit 0 ~ bit 15 of kvm_memory_region::flags are visible for userspace,
- * other bits are reserved for kvm internal use which are defined in
- * include/linux/kvm_host.h.
+ * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
+ * userspace, other bits are reserved for kvm internal use which are defined
+ * in include/linux/kvm_host.h.
*/
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1)
+#define KVM_MEM_GUEST_MEMFD (1UL << 2)
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
@@ -148,43 +90,6 @@ struct kvm_pit_config {
#define KVM_PIT_SPEAKER_DUMMY 1
-struct kvm_s390_skeys {
- __u64 start_gfn;
- __u64 count;
- __u64 skeydata_addr;
- __u32 flags;
- __u32 reserved[9];
-};
-
-#define KVM_S390_CMMA_PEEK (1 << 0)
-
-/**
- * kvm_s390_cmma_log - Used for CMMA migration.
- *
- * Used both for input and output.
- *
- * @start_gfn: Guest page number to start from.
- * @count: Size of the result buffer.
- * @flags: Control operation mode via KVM_S390_CMMA_* flags
- * @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty
- * pages are still remaining.
- * @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set
- * in the PGSTE.
- * @values: Pointer to the values buffer.
- *
- * Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls.
- */
-struct kvm_s390_cmma_log {
- __u64 start_gfn;
- __u32 count;
- __u32 flags;
- union {
- __u64 remaining;
- __u64 mask;
- };
- __u64 values;
-};
-
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
@@ -216,6 +121,20 @@ struct kvm_hyperv_exit {
} u;
};
+struct kvm_xen_exit {
+#define KVM_EXIT_XEN_HCALL 1
+ __u32 type;
+ union {
+ struct {
+ __u32 longmode;
+ __u32 cpl;
+ __u64 input;
+ __u64 result;
+ __u64 params[6];
+ } hcall;
+ } u;
+};
+
#define KVM_S390_GET_SKEYS_NONE 1
#define KVM_S390_SKEYS_MAX 1048576
@@ -248,6 +167,17 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_IOAPIC_EOI 26
#define KVM_EXIT_HYPERV 27
#define KVM_EXIT_ARM_NISV 28
+#define KVM_EXIT_X86_RDMSR 29
+#define KVM_EXIT_X86_WRMSR 30
+#define KVM_EXIT_DIRTY_RING_FULL 31
+#define KVM_EXIT_AP_RESET_HOLD 32
+#define KVM_EXIT_X86_BUS_LOCK 33
+#define KVM_EXIT_XEN 34
+#define KVM_EXIT_RISCV_SBI 35
+#define KVM_EXIT_RISCV_CSR 36
+#define KVM_EXIT_NOTIFY 37
+#define KVM_EXIT_LOONGARCH_IOCSR 38
+#define KVM_EXIT_MEMORY_FAULT 39
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -259,6 +189,9 @@ struct kvm_hyperv_exit {
/* Encounter unexpected vm-exit reason */
#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
+/* Flags that describe what fields in emulation_failure hold valid data. */
+#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0)
+
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
/* in */
@@ -317,13 +250,25 @@ struct kvm_run {
__u32 len;
__u8 is_write;
} mmio;
+ /* KVM_EXIT_LOONGARCH_IOCSR */
+ struct {
+ __u64 phys_addr;
+ __u8 data[8];
+ __u32 len;
+ __u8 is_write;
+ } iocsr_io;
/* KVM_EXIT_HYPERCALL */
struct {
__u64 nr;
__u64 args[6];
__u64 ret;
- __u32 longmode;
- __u32 pad;
+
+ union {
+#ifndef __KERNEL__
+ __u32 longmode;
+#endif
+ __u64 flags;
+ };
} hypercall;
/* KVM_EXIT_TPR_ACCESS */
struct {
@@ -338,11 +283,6 @@ struct kvm_run {
__u32 ipb;
} s390_sieic;
/* KVM_EXIT_S390_RESET */
-#define KVM_S390_RESET_POR 1
-#define KVM_S390_RESET_CLEAR 2
-#define KVM_S390_RESET_SUBSYSTEM 4
-#define KVM_S390_RESET_CPU_INIT 8
-#define KVM_S390_RESET_IPL 16
__u64 s390_reset_flags;
/* KVM_EXIT_S390_UCONTROL */
struct {
@@ -362,6 +302,35 @@ struct kvm_run {
__u32 ndata;
__u64 data[16];
} internal;
+ /*
+ * KVM_INTERNAL_ERROR_EMULATION
+ *
+ * "struct emulation_failure" is an overlay of "struct internal"
+ * that is used for the KVM_INTERNAL_ERROR_EMULATION sub-type of
+ * KVM_EXIT_INTERNAL_ERROR. Note, unlike other internal error
+ * sub-types, this struct is ABI! It also needs to be backwards
+ * compatible with "struct internal". Take special care that
+ * "ndata" is correct, that new fields are enumerated in "flags",
+ * and that each flag enumerates fields that are 64-bit aligned
+ * and sized (so that ndata+internal.data[] is valid/accurate).
+ *
+ * Space beyond the defined fields may be used to store arbitrary
+ * debug information relating to the emulation failure. It is
+ * accounted for in "ndata" but the format is unspecified and is
+ * not represented in "flags". Any such information is *not* ABI!
+ */
+ struct {
+ __u32 suberror;
+ __u32 ndata;
+ __u64 flags;
+ union {
+ struct {
+ __u8 insn_size;
+ __u8 insn_bytes[15];
+ };
+ };
+ /* Arbitrary debug data may follow. */
+ } emulation_failure;
/* KVM_EXIT_OSI */
struct {
__u64 gprs[32];
@@ -390,8 +359,17 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2
#define KVM_SYSTEM_EVENT_CRASH 3
+#define KVM_SYSTEM_EVENT_WAKEUP 4
+#define KVM_SYSTEM_EVENT_SUSPEND 5
+#define KVM_SYSTEM_EVENT_SEV_TERM 6
__u32 type;
- __u64 flags;
+ __u32 ndata;
+ union {
+#ifndef __KERNEL__
+ __u64 flags;
+#endif
+ __u64 data[16];
+ };
} system_event;
/* KVM_EXIT_S390_STSI */
struct {
@@ -413,6 +391,48 @@ struct kvm_run {
__u64 esr_iss;
__u64 fault_ipa;
} arm_nisv;
+ /* KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR */
+ struct {
+ __u8 error; /* user -> kernel */
+ __u8 pad[7];
+#define KVM_MSR_EXIT_REASON_INVAL (1 << 0)
+#define KVM_MSR_EXIT_REASON_UNKNOWN (1 << 1)
+#define KVM_MSR_EXIT_REASON_FILTER (1 << 2)
+#define KVM_MSR_EXIT_REASON_VALID_MASK (KVM_MSR_EXIT_REASON_INVAL | \
+ KVM_MSR_EXIT_REASON_UNKNOWN | \
+ KVM_MSR_EXIT_REASON_FILTER)
+ __u32 reason; /* kernel -> user */
+ __u32 index; /* kernel -> user */
+ __u64 data; /* kernel <-> user */
+ } msr;
+ /* KVM_EXIT_XEN */
+ struct kvm_xen_exit xen;
+ /* KVM_EXIT_RISCV_SBI */
+ struct {
+ unsigned long extension_id;
+ unsigned long function_id;
+ unsigned long args[6];
+ unsigned long ret[2];
+ } riscv_sbi;
+ /* KVM_EXIT_RISCV_CSR */
+ struct {
+ unsigned long csr_num;
+ unsigned long new_value;
+ unsigned long write_mask;
+ unsigned long ret_value;
+ } riscv_csr;
+ /* KVM_EXIT_NOTIFY */
+ struct {
+#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
+ __u32 flags;
+ } notify;
+ /* KVM_EXIT_MEMORY_FAULT */
+ struct {
+#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3)
+ __u64 flags;
+ __u64 gpa;
+ __u64 size;
+ } memory_fault;
/* Fix the size of the union. */
char padding[256];
};
@@ -459,7 +479,7 @@ struct kvm_coalesced_mmio {
struct kvm_coalesced_mmio_ring {
__u32 first, last;
- struct kvm_coalesced_mmio coalesced_mmio[0];
+ struct kvm_coalesced_mmio coalesced_mmio[];
};
#define KVM_COALESCED_MMIO_MAX \
@@ -479,29 +499,6 @@ struct kvm_translation {
__u8 pad[5];
};
-/* for KVM_S390_MEM_OP */
-struct kvm_s390_mem_op {
- /* in */
- __u64 gaddr; /* the guest address */
- __u64 flags; /* flags */
- __u32 size; /* amount of bytes */
- __u32 op; /* type of operation */
- __u64 buf; /* buffer in userspace */
- union {
- __u8 ar; /* the access register number */
- __u32 sida_offset; /* offset into the sida */
- __u8 reserved[32]; /* should be set to 0 */
- };
-};
-/* types for kvm_s390_mem_op->op */
-#define KVM_S390_MEMOP_LOGICAL_READ 0
-#define KVM_S390_MEMOP_LOGICAL_WRITE 1
-#define KVM_S390_MEMOP_SIDA_READ 2
-#define KVM_S390_MEMOP_SIDA_WRITE 3
-/* flags for kvm_s390_mem_op->flags */
-#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
-#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
-
/* for KVM_INTERRUPT */
struct kvm_interrupt {
/* in */
@@ -532,7 +529,7 @@ struct kvm_clear_dirty_log {
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
- __u8 sigset[0];
+ __u8 sigset[];
};
/* for KVM_TPR_ACCESS_REPORTING */
@@ -559,129 +556,13 @@ struct kvm_vapic_addr {
#define KVM_MP_STATE_CHECK_STOP 6
#define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8
+#define KVM_MP_STATE_AP_RESET_HOLD 9
+#define KVM_MP_STATE_SUSPENDED 10
struct kvm_mp_state {
__u32 mp_state;
};
-struct kvm_s390_psw {
- __u64 mask;
- __u64 addr;
-};
-
-/* valid values for type in kvm_s390_interrupt */
-#define KVM_S390_SIGP_STOP 0xfffe0000u
-#define KVM_S390_PROGRAM_INT 0xfffe0001u
-#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
-#define KVM_S390_RESTART 0xfffe0003u
-#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u
-#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u
-#define KVM_S390_MCHK 0xfffe1000u
-#define KVM_S390_INT_CLOCK_COMP 0xffff1004u
-#define KVM_S390_INT_CPU_TIMER 0xffff1005u
-#define KVM_S390_INT_VIRTIO 0xffff2603u
-#define KVM_S390_INT_SERVICE 0xffff2401u
-#define KVM_S390_INT_EMERGENCY 0xffff1201u
-#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
-/* Anything below 0xfffe0000u is taken by INT_IO */
-#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \
- (((schid)) | \
- ((ssid) << 16) | \
- ((cssid) << 18) | \
- ((ai) << 26))
-#define KVM_S390_INT_IO_MIN 0x00000000u
-#define KVM_S390_INT_IO_MAX 0xfffdffffu
-#define KVM_S390_INT_IO_AI_MASK 0x04000000u
-
-
-struct kvm_s390_interrupt {
- __u32 type;
- __u32 parm;
- __u64 parm64;
-};
-
-struct kvm_s390_io_info {
- __u16 subchannel_id;
- __u16 subchannel_nr;
- __u32 io_int_parm;
- __u32 io_int_word;
-};
-
-struct kvm_s390_ext_info {
- __u32 ext_params;
- __u32 pad;
- __u64 ext_params2;
-};
-
-struct kvm_s390_pgm_info {
- __u64 trans_exc_code;
- __u64 mon_code;
- __u64 per_address;
- __u32 data_exc_code;
- __u16 code;
- __u16 mon_class_nr;
- __u8 per_code;
- __u8 per_atmid;
- __u8 exc_access_id;
- __u8 per_access_id;
- __u8 op_access_id;
-#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01
-#define KVM_S390_PGM_FLAGS_ILC_0 0x02
-#define KVM_S390_PGM_FLAGS_ILC_1 0x04
-#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06
-#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08
- __u8 flags;
- __u8 pad[2];
-};
-
-struct kvm_s390_prefix_info {
- __u32 address;
-};
-
-struct kvm_s390_extcall_info {
- __u16 code;
-};
-
-struct kvm_s390_emerg_info {
- __u16 code;
-};
-
-#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
-struct kvm_s390_stop_info {
- __u32 flags;
-};
-
-struct kvm_s390_mchk_info {
- __u64 cr14;
- __u64 mcic;
- __u64 failing_storage_address;
- __u32 ext_damage_code;
- __u32 pad;
- __u8 fixed_logout[16];
-};
-
-struct kvm_s390_irq {
- __u64 type;
- union {
- struct kvm_s390_io_info io;
- struct kvm_s390_ext_info ext;
- struct kvm_s390_pgm_info pgm;
- struct kvm_s390_emerg_info emerg;
- struct kvm_s390_extcall_info extcall;
- struct kvm_s390_prefix_info prefix;
- struct kvm_s390_stop_info stop;
- struct kvm_s390_mchk_info mchk;
- char reserved[64];
- } u;
-};
-
-struct kvm_s390_irq_state {
- __u64 buf;
- __u32 flags; /* will stay unused for compatibility reasons */
- __u32 len;
- __u32 reserved[4]; /* will stay unused for compatibility reasons */
-};
-
/* for KVM_SET_GUEST_DEBUG */
#define KVM_GUESTDBG_ENABLE 0x00000001
@@ -737,50 +618,6 @@ struct kvm_enable_cap {
__u8 pad[64];
};
-/* for KVM_PPC_GET_PVINFO */
-
-#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
-
-struct kvm_ppc_pvinfo {
- /* out */
- __u32 flags;
- __u32 hcall[4];
- __u8 pad[108];
-};
-
-/* for KVM_PPC_GET_SMMU_INFO */
-#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
-
-struct kvm_ppc_one_page_size {
- __u32 page_shift; /* Page shift (or 0) */
- __u32 pte_enc; /* Encoding in the HPTE (>>12) */
-};
-
-struct kvm_ppc_one_seg_page_size {
- __u32 page_shift; /* Base page shift of segment (or 0) */
- __u32 slb_enc; /* SLB encoding for BookS */
- struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
-};
-
-#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
-#define KVM_PPC_1T_SEGMENTS 0x00000002
-#define KVM_PPC_NO_HASH 0x00000004
-
-struct kvm_ppc_smmu_info {
- __u64 flags;
- __u32 slb_size;
- __u16 data_keys; /* # storage keys supported for data */
- __u16 instr_keys; /* # storage keys supported for instructions */
- struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
-};
-
-/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */
-struct kvm_ppc_resize_hpt {
- __u64 flags;
- __u32 shift;
- __u32 pad;
-};
-
#define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */
@@ -790,9 +627,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2
-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
-#define KVM_VM_MIPS_TE 0
+/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
+#define KVM_VM_MIPS_AUTO 0
#define KVM_VM_MIPS_VZ 1
+#define KVM_VM_MIPS_TE 2
#define KVM_S390_SIE_PAGE_OFFSET 1
@@ -823,9 +661,6 @@ struct kvm_ppc_resize_hpt {
*/
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
-#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
-#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
@@ -852,9 +687,7 @@ struct kvm_ppc_resize_hpt {
/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
#define KVM_CAP_USER_NMI 22
-#ifdef __KVM_HAVE_GUEST_DEBUG
#define KVM_CAP_SET_GUEST_DEBUG 23
-#endif
#ifdef __KVM_HAVE_PIT
#define KVM_CAP_REINJECT_CONTROL 24
#endif
@@ -1035,8 +868,55 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_LAST_CPU 184
#define KVM_CAP_SMALLER_MAXPHYADDR 185
#define KVM_CAP_S390_DIAG318 186
-
-#ifdef KVM_CAP_IRQ_ROUTING
+#define KVM_CAP_STEAL_TIME 187
+#define KVM_CAP_X86_USER_SPACE_MSR 188
+#define KVM_CAP_X86_MSR_FILTER 189
+#define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190
+#define KVM_CAP_SYS_HYPERV_CPUID 191
+#define KVM_CAP_DIRTY_LOG_RING 192
+#define KVM_CAP_X86_BUS_LOCK_EXIT 193
+#define KVM_CAP_PPC_DAWR1 194
+#define KVM_CAP_SET_GUEST_DEBUG2 195
+#define KVM_CAP_SGX_ATTRIBUTE 196
+#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
+#define KVM_CAP_PTP_KVM 198
+#define KVM_CAP_HYPERV_ENFORCE_CPUID 199
+#define KVM_CAP_SREGS2 200
+#define KVM_CAP_EXIT_HYPERCALL 201
+#define KVM_CAP_PPC_RPT_INVALIDATE 202
+#define KVM_CAP_BINARY_STATS_FD 203
+#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
+#define KVM_CAP_ARM_MTE 205
+#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
+#define KVM_CAP_VM_GPA_BITS 207
+#define KVM_CAP_XSAVE2 208
+#define KVM_CAP_SYS_ATTRIBUTES 209
+#define KVM_CAP_PPC_AIL_MODE_3 210
+#define KVM_CAP_S390_MEM_OP_EXTENSION 211
+#define KVM_CAP_PMU_CAPABILITY 212
+#define KVM_CAP_DISABLE_QUIRKS2 213
+#define KVM_CAP_VM_TSC_CONTROL 214
+#define KVM_CAP_SYSTEM_EVENT_DATA 215
+#define KVM_CAP_ARM_SYSTEM_SUSPEND 216
+#define KVM_CAP_S390_PROTECTED_DUMP 217
+#define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218
+#define KVM_CAP_X86_NOTIFY_VMEXIT 219
+#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
+#define KVM_CAP_S390_ZPCI_OP 221
+#define KVM_CAP_S390_CPU_TOPOLOGY 222
+#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
+#define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
+#define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
+#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
+#define KVM_CAP_COUNTER_OFFSET 227
+#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
+#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
+#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
+#define KVM_CAP_USER_MEMORY2 231
+#define KVM_CAP_MEMORY_FAULT_INFO 232
+#define KVM_CAP_MEMORY_ATTRIBUTES 233
+#define KVM_CAP_GUEST_MEMFD 234
+#define KVM_CAP_VM_TYPES 235
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1066,11 +946,20 @@ struct kvm_irq_routing_hv_sint {
__u32 sint;
};
+struct kvm_irq_routing_xen_evtchn {
+ __u32 port;
+ __u32 vcpu;
+ __u32 priority;
+};
+
+#define KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL ((__u32)(-1))
+
/* gsi routing entry types */
#define KVM_IRQ_ROUTING_IRQCHIP 1
#define KVM_IRQ_ROUTING_MSI 2
#define KVM_IRQ_ROUTING_S390_ADAPTER 3
#define KVM_IRQ_ROUTING_HV_SINT 4
+#define KVM_IRQ_ROUTING_XEN_EVTCHN 5
struct kvm_irq_routing_entry {
__u32 gsi;
@@ -1082,6 +971,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing_msi msi;
struct kvm_irq_routing_s390_adapter adapter;
struct kvm_irq_routing_hv_sint hv_sint;
+ struct kvm_irq_routing_xen_evtchn xen_evtchn;
__u32 pad[8];
} u;
};
@@ -1089,36 +979,9 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing {
__u32 nr;
__u32 flags;
- struct kvm_irq_routing_entry entries[0];
+ struct kvm_irq_routing_entry entries[];
};
-#endif
-
-#ifdef KVM_CAP_MCE
-/* x86 MCE */
-struct kvm_x86_mce {
- __u64 status;
- __u64 addr;
- __u64 misc;
- __u64 mcg_status;
- __u8 bank;
- __u8 pad1[7];
- __u64 pad2[3];
-};
-#endif
-
-#ifdef KVM_CAP_XEN_HVM
-struct kvm_xen_hvm_config {
- __u32 flags;
- __u32 msr;
- __u64 blob_addr_32;
- __u64 blob_addr_64;
- __u8 blob_size_32;
- __u8 blob_size_64;
- __u8 pad2[30];
-};
-#endif
-
#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
/*
* Available with KVM_CAP_IRQFD_RESAMPLE
@@ -1141,11 +1004,16 @@ struct kvm_irqfd {
/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */
#define KVM_CLOCK_TSC_STABLE 2
+#define KVM_CLOCK_REALTIME (1 << 2)
+#define KVM_CLOCK_HOST_TSC (1 << 3)
struct kvm_clock_data {
__u64 clock;
__u32 flags;
- __u32 pad[9];
+ __u32 pad0;
+ __u64 realtime;
+ __u64 host_tsc;
+ __u32 pad[4];
};
/* For KVM_CAP_SW_TLB */
@@ -1182,6 +1050,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_ARM64 0x6000000000000000ULL
#define KVM_REG_MIPS 0x7000000000000000ULL
#define KVM_REG_RISCV 0x8000000000000000ULL
+#define KVM_REG_LOONGARCH 0x9000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
@@ -1197,7 +1066,7 @@ struct kvm_dirty_tlb {
struct kvm_reg_list {
__u64 n; /* number of regs */
- __u64 reg[0];
+ __u64 reg[];
};
struct kvm_one_reg {
@@ -1238,9 +1107,16 @@ struct kvm_device_attr {
__u64 addr; /* userspace address of attr data */
};
-#define KVM_DEV_VFIO_GROUP 1
-#define KVM_DEV_VFIO_GROUP_ADD 1
-#define KVM_DEV_VFIO_GROUP_DEL 2
+#define KVM_DEV_VFIO_FILE 1
+
+#define KVM_DEV_VFIO_FILE_ADD 1
+#define KVM_DEV_VFIO_FILE_DEL 2
+
+/* KVM_DEV_VFIO_GROUP aliases are for compile time uapi compatibility */
+#define KVM_DEV_VFIO_GROUP KVM_DEV_VFIO_FILE
+
+#define KVM_DEV_VFIO_GROUP_ADD KVM_DEV_VFIO_FILE_ADD
+#define KVM_DEV_VFIO_GROUP_DEL KVM_DEV_VFIO_FILE_DEL
#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
enum kvm_device_type {
@@ -1264,6 +1140,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_ARM_PV_TIME,
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
+ KVM_DEV_TYPE_RISCV_AIA,
+#define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA
KVM_DEV_TYPE_MAX,
};
@@ -1273,30 +1151,21 @@ struct kvm_vfio_spapr_tce {
};
/*
- * ioctls for VM fds
- */
-#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
-/*
* KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
* a vcpu fd.
*/
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
-/* KVM_SET_MEMORY_ALIAS is obsolete: */
-#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
-#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
+#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) /* deprecated */
#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
+ struct kvm_userspace_memory_region2)
/* enable ucontrol for s390 */
-struct kvm_s390_ucas_mapping {
- __u64 user_addr;
- __u64 vcpu_addr;
- __u64 length;
-};
#define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping)
#define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping)
#define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long)
@@ -1314,20 +1183,8 @@ struct kvm_s390_ucas_mapping {
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
-#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
- struct kvm_assigned_pci_dev)
#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
-/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
-#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70
-#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
-#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
- struct kvm_assigned_pci_dev)
-#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \
- struct kvm_assigned_msix_nr)
-#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \
- struct kvm_assigned_msix_entry)
-#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
@@ -1340,12 +1197,10 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
/* Available with KVM_CAP_PPC_GET_PVINFO */
#define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo)
-/* Available with KVM_CAP_TSC_CONTROL */
+/* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with
+* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
-/* Available with KVM_CAP_PCI_2_3 */
-#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
- struct kvm_assigned_pci_dev)
/* Available with KVM_CAP_SIGNAL_MSI */
#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
@@ -1375,6 +1230,10 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PMU_EVENT_FILTER */
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
+#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
+/* Available with KVM_CAP_COUNTER_OFFSET */
+#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset)
+#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
@@ -1394,8 +1253,6 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
-/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
-#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87
#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
@@ -1437,7 +1294,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
/*
- * vcpu version available with KVM_ENABLE_CAP
+ * vcpu version available with KVM_CAP_ENABLE_CAP
* vm version available with KVM_CAP_ENABLE_CAP_VM
*/
#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
@@ -1493,7 +1350,7 @@ struct kvm_enc_region {
/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT_2 */
#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
-/* Available with KVM_CAP_HYPERV_CPUID */
+/* Available with KVM_CAP_HYPERV_CPUID (vcpu) / KVM_CAP_SYS_HYPERV_CPUID (system) */
#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
/* Available with KVM_CAP_ARM_SVE */
@@ -1503,190 +1360,192 @@ struct kvm_enc_region {
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
-struct kvm_s390_pv_sec_parm {
- __u64 origin;
- __u64 length;
-};
-
-struct kvm_s390_pv_unp {
- __u64 addr;
- __u64 size;
- __u64 tweak;
-};
-
-enum pv_cmd_id {
- KVM_PV_ENABLE,
- KVM_PV_DISABLE,
- KVM_PV_SET_SEC_PARMS,
- KVM_PV_UNPACK,
- KVM_PV_VERIFY,
- KVM_PV_PREP_RESET,
- KVM_PV_UNSHARE_ALL,
-};
-
-struct kvm_pv_cmd {
- __u32 cmd; /* Command to be executed */
- __u16 rc; /* Ultravisor return code */
- __u16 rrc; /* Ultravisor return reason code */
- __u64 data; /* Data or address */
- __u32 flags; /* flags for future extensions. Must be 0 for now */
- __u32 reserved[3];
-};
-
/* Available with KVM_CAP_S390_PROTECTED */
#define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd)
-/* Secure Encrypted Virtualization command */
-enum sev_cmd_id {
- /* Guest initialization commands */
- KVM_SEV_INIT = 0,
- KVM_SEV_ES_INIT,
- /* Guest launch commands */
- KVM_SEV_LAUNCH_START,
- KVM_SEV_LAUNCH_UPDATE_DATA,
- KVM_SEV_LAUNCH_UPDATE_VMSA,
- KVM_SEV_LAUNCH_SECRET,
- KVM_SEV_LAUNCH_MEASURE,
- KVM_SEV_LAUNCH_FINISH,
- /* Guest migration commands (outgoing) */
- KVM_SEV_SEND_START,
- KVM_SEV_SEND_UPDATE_DATA,
- KVM_SEV_SEND_UPDATE_VMSA,
- KVM_SEV_SEND_FINISH,
- /* Guest migration commands (incoming) */
- KVM_SEV_RECEIVE_START,
- KVM_SEV_RECEIVE_UPDATE_DATA,
- KVM_SEV_RECEIVE_UPDATE_VMSA,
- KVM_SEV_RECEIVE_FINISH,
- /* Guest status and debug commands */
- KVM_SEV_GUEST_STATUS,
- KVM_SEV_DBG_DECRYPT,
- KVM_SEV_DBG_ENCRYPT,
- /* Guest certificates commands */
- KVM_SEV_CERT_EXPORT,
-
- KVM_SEV_NR_MAX,
-};
+/* Available with KVM_CAP_X86_MSR_FILTER */
+#define KVM_X86_SET_MSR_FILTER _IOW(KVMIO, 0xc6, struct kvm_msr_filter)
-struct kvm_sev_cmd {
- __u32 id;
- __u64 data;
- __u32 error;
- __u32 sev_fd;
-};
+/* Available with KVM_CAP_DIRTY_LOG_RING */
+#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
-struct kvm_sev_launch_start {
- __u32 handle;
- __u32 policy;
- __u64 dh_uaddr;
- __u32 dh_len;
- __u64 session_uaddr;
- __u32 session_len;
-};
+/* Per-VM Xen attributes */
+#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr)
+#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr)
-struct kvm_sev_launch_update_data {
- __u64 uaddr;
- __u32 len;
-};
+/* Per-vCPU Xen attributes */
+#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
+#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn)
-struct kvm_sev_launch_secret {
- __u64 hdr_uaddr;
- __u32 hdr_len;
- __u64 guest_uaddr;
- __u32 guest_len;
- __u64 trans_uaddr;
- __u32 trans_len;
-};
+#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
+#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
-struct kvm_sev_launch_measure {
- __u64 uaddr;
- __u32 len;
-};
-
-struct kvm_sev_guest_status {
- __u32 handle;
- __u32 policy;
- __u32 state;
-};
+#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
+#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
-struct kvm_sev_dbg {
- __u64 src_uaddr;
- __u64 dst_uaddr;
- __u32 len;
-};
+/*
+ * Arch needs to define the macro after implementing the dirty ring
+ * feature. KVM_DIRTY_LOG_PAGE_OFFSET should be defined as the
+ * starting page offset of the dirty ring structures.
+ */
+#ifndef KVM_DIRTY_LOG_PAGE_OFFSET
+#define KVM_DIRTY_LOG_PAGE_OFFSET 0
+#endif
-#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
-#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
-#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
+/*
+ * KVM dirty GFN flags, defined as:
+ *
+ * |---------------+---------------+--------------|
+ * | bit 1 (reset) | bit 0 (dirty) | Status |
+ * |---------------+---------------+--------------|
+ * | 0 | 0 | Invalid GFN |
+ * | 0 | 1 | Dirty GFN |
+ * | 1 | X | GFN to reset |
+ * |---------------+---------------+--------------|
+ *
+ * Lifecycle of a dirty GFN goes like:
+ *
+ * dirtied harvested reset
+ * 00 -----------> 01 -------------> 1X -------+
+ * ^ |
+ * | |
+ * +------------------------------------------+
+ *
+ * The userspace program is only responsible for the 01->1X state
+ * conversion after harvesting an entry. Also, it must not skip any
+ * dirty bits, so that dirty bits are always harvested in sequence.
+ */
+#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET _BITUL(1)
+#define KVM_DIRTY_GFN_F_MASK 0x3
-struct kvm_assigned_pci_dev {
- __u32 assigned_dev_id;
- __u32 busnr;
- __u32 devfn;
+/*
+ * KVM dirty rings should be mapped at KVM_DIRTY_LOG_PAGE_OFFSET of
+ * per-vcpu mmaped regions as an array of struct kvm_dirty_gfn. The
+ * size of the gfn buffer is decided by the first argument when
+ * enabling KVM_CAP_DIRTY_LOG_RING.
+ */
+struct kvm_dirty_gfn {
__u32 flags;
- __u32 segnr;
- union {
- __u32 reserved[11];
- };
+ __u32 slot;
+ __u64 offset;
};
-#define KVM_DEV_IRQ_HOST_INTX (1 << 0)
-#define KVM_DEV_IRQ_HOST_MSI (1 << 1)
-#define KVM_DEV_IRQ_HOST_MSIX (1 << 2)
+#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0)
+#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1)
-#define KVM_DEV_IRQ_GUEST_INTX (1 << 8)
-#define KVM_DEV_IRQ_GUEST_MSI (1 << 9)
-#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10)
+#define KVM_PMU_CAP_DISABLE (1 << 0)
-#define KVM_DEV_IRQ_HOST_MASK 0x00ff
-#define KVM_DEV_IRQ_GUEST_MASK 0xff00
+/**
+ * struct kvm_stats_header - Header of per vm/vcpu binary statistics data.
+ * @flags: Some extra information for header, always 0 for now.
+ * @name_size: The size in bytes of the memory which contains statistics
+ * name string including trailing '\0'. The memory is allocated
+ * at the send of statistics descriptor.
+ * @num_desc: The number of statistics the vm or vcpu has.
+ * @id_offset: The offset of the vm/vcpu stats' id string in the file pointed
+ * by vm/vcpu stats fd.
+ * @desc_offset: The offset of the vm/vcpu stats' descriptor block in the file
+ * pointd by vm/vcpu stats fd.
+ * @data_offset: The offset of the vm/vcpu stats' data block in the file
+ * pointed by vm/vcpu stats fd.
+ *
+ * This is the header userspace needs to read from stats fd before any other
+ * readings. It is used by userspace to discover all the information about the
+ * vm/vcpu's binary statistics.
+ * Userspace reads this header from the start of the vm/vcpu's stats fd.
+ */
+struct kvm_stats_header {
+ __u32 flags;
+ __u32 name_size;
+ __u32 num_desc;
+ __u32 id_offset;
+ __u32 desc_offset;
+ __u32 data_offset;
+};
+
+#define KVM_STATS_TYPE_SHIFT 0
+#define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_LINEAR_HIST (0x3 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_LOG_HIST (0x4 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_LOG_HIST
+
+#define KVM_STATS_UNIT_SHIFT 4
+#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN
+
+#define KVM_STATS_BASE_SHIFT 8
+#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2
-struct kvm_assigned_irq {
- __u32 assigned_dev_id;
- __u32 host_irq; /* ignored (legacy field) */
- __u32 guest_irq;
+/**
+ * struct kvm_stats_desc - Descriptor of a KVM statistics.
+ * @flags: Annotations of the stats, like type, unit, etc.
+ * @exponent: Used together with @flags to determine the unit.
+ * @size: The number of data items for this stats.
+ * Every data item is of type __u64.
+ * @offset: The offset of the stats to the start of stat structure in
+ * structure kvm or kvm_vcpu.
+ * @bucket_size: A parameter value used for histogram stats. It is only used
+ * for linear histogram stats, specifying the size of the bucket;
+ * @name: The name string for the stats. Its size is indicated by the
+ * &kvm_stats_header->name_size.
+ */
+struct kvm_stats_desc {
__u32 flags;
- union {
- __u32 reserved[12];
- };
+ __s16 exponent;
+ __u16 size;
+ __u32 offset;
+ __u32 bucket_size;
+ char name[];
};
-struct kvm_assigned_msix_nr {
- __u32 assigned_dev_id;
- __u16 entry_nr;
- __u16 padding;
-};
+#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
-#define KVM_MAX_MSIX_PER_DEV 256
-struct kvm_assigned_msix_entry {
- __u32 assigned_dev_id;
- __u32 gsi;
- __u16 entry; /* The index of entry in the MSI-X table */
- __u16 padding[3];
-};
+/* Available with KVM_CAP_XSAVE2 */
+#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
-#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
-#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
+/* Available with KVM_CAP_S390_PROTECTED_DUMP */
+#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd)
-/* Available with KVM_CAP_ARM_USER_IRQ */
+/* Available with KVM_CAP_X86_NOTIFY_VMEXIT */
+#define KVM_X86_NOTIFY_VMEXIT_ENABLED (1ULL << 0)
+#define KVM_X86_NOTIFY_VMEXIT_USER (1ULL << 1)
-/* Bits for run->s.regs.device_irq_level */
-#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
-#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
-#define KVM_ARM_DEV_PMU (1 << 2)
+/* Available with KVM_CAP_S390_ZPCI_OP */
+#define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op)
-struct kvm_hyperv_eventfd {
- __u32 conn_id;
- __s32 fd;
- __u32 flags;
- __u32 padding[3];
+/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
+#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
+
+struct kvm_memory_attributes {
+ __u64 address;
+ __u64 size;
+ __u64 attributes;
+ __u64 flags;
};
-#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
-#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
+#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
-#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
-#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
+#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
+
+struct kvm_create_guest_memfd {
+ __u64 size;
+ __u64 flags;
+ __u64 reserved[6];
+};
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h
index 8b86609849b9..960c7e93d1a9 100644
--- a/include/uapi/linux/kvm_para.h
+++ b/include/uapi/linux/kvm_para.h
@@ -29,6 +29,7 @@
#define KVM_HC_CLOCK_PAIRING 9
#define KVM_HC_SEND_IPI 10
#define KVM_HC_SCHED_YIELD 11
+#define KVM_HC_MAP_GPA_RANGE 12
/*
* hypercalls use architecture specific
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 61158f5a1a5b..7d81c3e1ec29 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -13,8 +13,6 @@
#include <linux/in.h>
#include <linux/in6.h>
-#define IPPROTO_L2TP 115
-
/**
* struct sockaddr_l2tpip - the sockaddr structure for L2TP-over-IP sockets
* @l2tp_family: address family number AF_L2TPIP.
@@ -108,7 +106,7 @@ enum {
L2TP_ATTR_VLAN_ID, /* u16 (not used) */
L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
- L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
+ L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags (not used) */
L2TP_ATTR_RECV_SEQ, /* u8 */
L2TP_ATTR_SEND_SEQ, /* u8 */
L2TP_ATTR_LNS_MODE, /* u8 */
@@ -144,6 +142,8 @@ enum {
L2TP_ATTR_RX_OOS_PACKETS, /* u64 */
L2TP_ATTR_RX_ERRORS, /* u64 */
L2TP_ATTR_STATS_PAD,
+ L2TP_ATTR_RX_COOKIE_DISCARDS, /* u64 */
+ L2TP_ATTR_RX_INVALID, /* u64 */
__L2TP_ATTR_STATS_MAX,
};
@@ -177,7 +177,9 @@ enum l2tp_seqmode {
};
/**
- * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions
+ * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions.
+ *
+ * Unused.
*
* @L2TP_MSG_DEBUG: verbose debug (if compiled in)
* @L2TP_MSG_CONTROL: userspace - kernel interface
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
new file mode 100644
index 000000000000..25c8d7677539
--- /dev/null
+++ b/include/uapi/linux/landlock.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Landlock - User space API
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _UAPI_LINUX_LANDLOCK_H
+#define _UAPI_LINUX_LANDLOCK_H
+
+#include <linux/types.h>
+
+/**
+ * struct landlock_ruleset_attr - Ruleset definition
+ *
+ * Argument of sys_landlock_create_ruleset(). This structure can grow in
+ * future versions.
+ */
+struct landlock_ruleset_attr {
+ /**
+ * @handled_access_fs: Bitmask of actions (cf. `Filesystem flags`_)
+ * that is handled by this ruleset and should then be forbidden if no
+ * rule explicitly allow them: it is a deny-by-default list that should
+ * contain as much Landlock access rights as possible. Indeed, all
+ * Landlock filesystem access rights that are not part of
+ * handled_access_fs are allowed. This is needed for backward
+ * compatibility reasons. One exception is the
+ * %LANDLOCK_ACCESS_FS_REFER access right, which is always implicitly
+ * handled, but must still be explicitly handled to add new rules with
+ * this access right.
+ */
+ __u64 handled_access_fs;
+ /**
+ * @handled_access_net: Bitmask of actions (cf. `Network flags`_)
+ * that is handled by this ruleset and should then be forbidden if no
+ * rule explicitly allow them.
+ */
+ __u64 handled_access_net;
+};
+
+/*
+ * sys_landlock_create_ruleset() flags:
+ *
+ * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI
+ * version.
+ */
+/* clang-format off */
+#define LANDLOCK_CREATE_RULESET_VERSION (1U << 0)
+/* clang-format on */
+
+/**
+ * enum landlock_rule_type - Landlock rule type
+ *
+ * Argument of sys_landlock_add_rule().
+ */
+enum landlock_rule_type {
+ /**
+ * @LANDLOCK_RULE_PATH_BENEATH: Type of a &struct
+ * landlock_path_beneath_attr .
+ */
+ LANDLOCK_RULE_PATH_BENEATH = 1,
+ /**
+ * @LANDLOCK_RULE_NET_PORT: Type of a &struct
+ * landlock_net_port_attr .
+ */
+ LANDLOCK_RULE_NET_PORT,
+};
+
+/**
+ * struct landlock_path_beneath_attr - Path hierarchy definition
+ *
+ * Argument of sys_landlock_add_rule().
+ */
+struct landlock_path_beneath_attr {
+ /**
+ * @allowed_access: Bitmask of allowed actions for this file hierarchy
+ * (cf. `Filesystem flags`_).
+ */
+ __u64 allowed_access;
+ /**
+ * @parent_fd: File descriptor, preferably opened with ``O_PATH``,
+ * which identifies the parent directory of a file hierarchy, or just a
+ * file.
+ */
+ __s32 parent_fd;
+ /*
+ * This struct is packed to avoid trailing reserved members.
+ * Cf. security/landlock/syscalls.c:build_check_abi()
+ */
+} __attribute__((packed));
+
+/**
+ * struct landlock_net_port_attr - Network port definition
+ *
+ * Argument of sys_landlock_add_rule().
+ */
+struct landlock_net_port_attr {
+ /**
+ * @allowed_access: Bitmask of allowed access network for a port
+ * (cf. `Network flags`_).
+ */
+ __u64 allowed_access;
+ /**
+ * @port: Network port in host endianness.
+ *
+ * It should be noted that port 0 passed to :manpage:`bind(2)` will
+ * bind to an available port from a specific port range. This can be
+ * configured thanks to the ``/proc/sys/net/ipv4/ip_local_port_range``
+ * sysctl (also used for IPv6). A Landlock rule with port 0 and the
+ * ``LANDLOCK_ACCESS_NET_BIND_TCP`` right means that requesting to bind
+ * on port 0 is allowed and it will automatically translate to binding
+ * on the related port range.
+ */
+ __u64 port;
+};
+
+/**
+ * DOC: fs_access
+ *
+ * A set of actions on kernel objects may be defined by an attribute (e.g.
+ * &struct landlock_path_beneath_attr) including a bitmask of access.
+ *
+ * Filesystem flags
+ * ~~~~~~~~~~~~~~~~
+ *
+ * These flags enable to restrict a sandboxed process to a set of actions on
+ * files and directories. Files or directories opened before the sandboxing
+ * are not subject to these restrictions.
+ *
+ * A file can only receive these access rights:
+ *
+ * - %LANDLOCK_ACCESS_FS_EXECUTE: Execute a file.
+ * - %LANDLOCK_ACCESS_FS_WRITE_FILE: Open a file with write access. Note that
+ * you might additionally need the %LANDLOCK_ACCESS_FS_TRUNCATE right in order
+ * to overwrite files with :manpage:`open(2)` using ``O_TRUNC`` or
+ * :manpage:`creat(2)`.
+ * - %LANDLOCK_ACCESS_FS_READ_FILE: Open a file with read access.
+ * - %LANDLOCK_ACCESS_FS_TRUNCATE: Truncate a file with :manpage:`truncate(2)`,
+ * :manpage:`ftruncate(2)`, :manpage:`creat(2)`, or :manpage:`open(2)` with
+ * ``O_TRUNC``. Whether an opened file can be truncated with
+ * :manpage:`ftruncate(2)` is determined during :manpage:`open(2)`, in the
+ * same way as read and write permissions are checked during
+ * :manpage:`open(2)` using %LANDLOCK_ACCESS_FS_READ_FILE and
+ * %LANDLOCK_ACCESS_FS_WRITE_FILE. This access right is available since the
+ * third version of the Landlock ABI.
+ *
+ * A directory can receive access rights related to files or directories. The
+ * following access right is applied to the directory itself, and the
+ * directories beneath it:
+ *
+ * - %LANDLOCK_ACCESS_FS_READ_DIR: Open a directory or list its content.
+ *
+ * However, the following access rights only apply to the content of a
+ * directory, not the directory itself:
+ *
+ * - %LANDLOCK_ACCESS_FS_REMOVE_DIR: Remove an empty directory or rename one.
+ * - %LANDLOCK_ACCESS_FS_REMOVE_FILE: Unlink (or rename) a file.
+ * - %LANDLOCK_ACCESS_FS_MAKE_CHAR: Create (or rename or link) a character
+ * device.
+ * - %LANDLOCK_ACCESS_FS_MAKE_DIR: Create (or rename) a directory.
+ * - %LANDLOCK_ACCESS_FS_MAKE_REG: Create (or rename or link) a regular file.
+ * - %LANDLOCK_ACCESS_FS_MAKE_SOCK: Create (or rename or link) a UNIX domain
+ * socket.
+ * - %LANDLOCK_ACCESS_FS_MAKE_FIFO: Create (or rename or link) a named pipe.
+ * - %LANDLOCK_ACCESS_FS_MAKE_BLOCK: Create (or rename or link) a block device.
+ * - %LANDLOCK_ACCESS_FS_MAKE_SYM: Create (or rename or link) a symbolic link.
+ * - %LANDLOCK_ACCESS_FS_REFER: Link or rename a file from or to a different
+ * directory (i.e. reparent a file hierarchy).
+ *
+ * This access right is available since the second version of the Landlock
+ * ABI.
+ *
+ * This is the only access right which is denied by default by any ruleset,
+ * even if the right is not specified as handled at ruleset creation time.
+ * The only way to make a ruleset grant this right is to explicitly allow it
+ * for a specific directory by adding a matching rule to the ruleset.
+ *
+ * In particular, when using the first Landlock ABI version, Landlock will
+ * always deny attempts to reparent files between different directories.
+ *
+ * In addition to the source and destination directories having the
+ * %LANDLOCK_ACCESS_FS_REFER access right, the attempted link or rename
+ * operation must meet the following constraints:
+ *
+ * * The reparented file may not gain more access rights in the destination
+ * directory than it previously had in the source directory. If this is
+ * attempted, the operation results in an ``EXDEV`` error.
+ *
+ * * When linking or renaming, the ``LANDLOCK_ACCESS_FS_MAKE_*`` right for the
+ * respective file type must be granted for the destination directory.
+ * Otherwise, the operation results in an ``EACCES`` error.
+ *
+ * * When renaming, the ``LANDLOCK_ACCESS_FS_REMOVE_*`` right for the
+ * respective file type must be granted for the source directory. Otherwise,
+ * the operation results in an ``EACCES`` error.
+ *
+ * If multiple requirements are not met, the ``EACCES`` error code takes
+ * precedence over ``EXDEV``.
+ *
+ * .. warning::
+ *
+ * It is currently not possible to restrict some file-related actions
+ * accessible through these syscall families: :manpage:`chdir(2)`,
+ * :manpage:`stat(2)`, :manpage:`flock(2)`, :manpage:`chmod(2)`,
+ * :manpage:`chown(2)`, :manpage:`setxattr(2)`, :manpage:`utime(2)`,
+ * :manpage:`ioctl(2)`, :manpage:`fcntl(2)`, :manpage:`access(2)`.
+ * Future Landlock evolutions will enable to restrict them.
+ */
+/* clang-format off */
+#define LANDLOCK_ACCESS_FS_EXECUTE (1ULL << 0)
+#define LANDLOCK_ACCESS_FS_WRITE_FILE (1ULL << 1)
+#define LANDLOCK_ACCESS_FS_READ_FILE (1ULL << 2)
+#define LANDLOCK_ACCESS_FS_READ_DIR (1ULL << 3)
+#define LANDLOCK_ACCESS_FS_REMOVE_DIR (1ULL << 4)
+#define LANDLOCK_ACCESS_FS_REMOVE_FILE (1ULL << 5)
+#define LANDLOCK_ACCESS_FS_MAKE_CHAR (1ULL << 6)
+#define LANDLOCK_ACCESS_FS_MAKE_DIR (1ULL << 7)
+#define LANDLOCK_ACCESS_FS_MAKE_REG (1ULL << 8)
+#define LANDLOCK_ACCESS_FS_MAKE_SOCK (1ULL << 9)
+#define LANDLOCK_ACCESS_FS_MAKE_FIFO (1ULL << 10)
+#define LANDLOCK_ACCESS_FS_MAKE_BLOCK (1ULL << 11)
+#define LANDLOCK_ACCESS_FS_MAKE_SYM (1ULL << 12)
+#define LANDLOCK_ACCESS_FS_REFER (1ULL << 13)
+#define LANDLOCK_ACCESS_FS_TRUNCATE (1ULL << 14)
+/* clang-format on */
+
+/**
+ * DOC: net_access
+ *
+ * Network flags
+ * ~~~~~~~~~~~~~~~~
+ *
+ * These flags enable to restrict a sandboxed process to a set of network
+ * actions. This is supported since the Landlock ABI version 4.
+ *
+ * TCP sockets with allowed actions:
+ *
+ * - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind a TCP socket to a local port.
+ * - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect an active TCP socket to
+ * a remote port.
+ */
+/* clang-format off */
+#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0)
+#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1)
+/* clang-format on */
+#endif /* _UAPI_LINUX_LANDLOCK_H */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
deleted file mode 100644
index f9a1be7fc696..000000000000
--- a/include/uapi/linux/lightnvm.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2015 CNEX Labs. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- */
-
-#ifndef _UAPI_LINUX_LIGHTNVM_H
-#define _UAPI_LINUX_LIGHTNVM_H
-
-#ifdef __KERNEL__
-#include <linux/kernel.h>
-#include <linux/ioctl.h>
-#else /* __KERNEL__ */
-#include <stdio.h>
-#include <sys/ioctl.h>
-#define DISK_NAME_LEN 32
-#endif /* __KERNEL__ */
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define NVM_TTYPE_NAME_MAX 48
-#define NVM_TTYPE_MAX 63
-#define NVM_MMTYPE_LEN 8
-
-#define NVM_CTRL_FILE "/dev/lightnvm/control"
-
-struct nvm_ioctl_info_tgt {
- __u32 version[3];
- __u32 reserved;
- char tgtname[NVM_TTYPE_NAME_MAX];
-};
-
-struct nvm_ioctl_info {
- __u32 version[3]; /* in/out - major, minor, patch */
- __u16 tgtsize; /* number of targets */
- __u16 reserved16; /* pad to 4K page */
- __u32 reserved[12];
- struct nvm_ioctl_info_tgt tgts[NVM_TTYPE_MAX];
-};
-
-enum {
- NVM_DEVICE_ACTIVE = 1 << 0,
-};
-
-struct nvm_ioctl_device_info {
- char devname[DISK_NAME_LEN];
- char bmname[NVM_TTYPE_NAME_MAX];
- __u32 bmversion[3];
- __u32 flags;
- __u32 reserved[8];
-};
-
-struct nvm_ioctl_get_devices {
- __u32 nr_devices;
- __u32 reserved[31];
- struct nvm_ioctl_device_info info[31];
-};
-
-struct nvm_ioctl_create_simple {
- __u32 lun_begin;
- __u32 lun_end;
-};
-
-struct nvm_ioctl_create_extended {
- __u16 lun_begin;
- __u16 lun_end;
- __u16 op;
- __u16 rsv;
-};
-
-enum {
- NVM_CONFIG_TYPE_SIMPLE = 0,
- NVM_CONFIG_TYPE_EXTENDED = 1,
-};
-
-struct nvm_ioctl_create_conf {
- __u32 type;
- union {
- struct nvm_ioctl_create_simple s;
- struct nvm_ioctl_create_extended e;
- };
-};
-
-enum {
- NVM_TARGET_FACTORY = 1 << 0, /* Init target in factory mode */
-};
-
-struct nvm_ioctl_create {
- char dev[DISK_NAME_LEN]; /* open-channel SSD device */
- char tgttype[NVM_TTYPE_NAME_MAX]; /* target type name */
- char tgtname[DISK_NAME_LEN]; /* dev to expose target as */
-
- __u32 flags;
-
- struct nvm_ioctl_create_conf conf;
-};
-
-struct nvm_ioctl_remove {
- char tgtname[DISK_NAME_LEN];
-
- __u32 flags;
-};
-
-struct nvm_ioctl_dev_init {
- char dev[DISK_NAME_LEN]; /* open-channel SSD device */
- char mmtype[NVM_MMTYPE_LEN]; /* register to media manager */
-
- __u32 flags;
-};
-
-enum {
- NVM_FACTORY_ERASE_ONLY_USER = 1 << 0, /* erase only blocks used as
- * host blks or grown blks */
- NVM_FACTORY_RESET_HOST_BLKS = 1 << 1, /* remove host blk marks */
- NVM_FACTORY_RESET_GRWN_BBLKS = 1 << 2, /* remove grown blk marks */
- NVM_FACTORY_NR_BITS = 1 << 3, /* stops here */
-};
-
-struct nvm_ioctl_dev_factory {
- char dev[DISK_NAME_LEN];
-
- __u32 flags;
-};
-
-struct nvm_user_vio {
- __u8 opcode;
- __u8 flags;
- __u16 control;
- __u16 nppas;
- __u16 rsvd;
- __u64 metadata;
- __u64 addr;
- __u64 ppa_list;
- __u32 metadata_len;
- __u32 data_len;
- __u64 status;
- __u32 result;
- __u32 rsvd3[3];
-};
-
-struct nvm_passthru_vio {
- __u8 opcode;
- __u8 flags;
- __u8 rsvd[2];
- __u32 nsid;
- __u32 cdw2;
- __u32 cdw3;
- __u64 metadata;
- __u64 addr;
- __u32 metadata_len;
- __u32 data_len;
- __u64 ppa_list;
- __u16 nppas;
- __u16 control;
- __u32 cdw13;
- __u32 cdw14;
- __u32 cdw15;
- __u64 status;
- __u32 result;
- __u32 timeout_ms;
-};
-
-/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */
-enum {
- /* top level cmds */
- NVM_INFO_CMD = 0x20,
- NVM_GET_DEVICES_CMD,
-
- /* device level cmds */
- NVM_DEV_CREATE_CMD,
- NVM_DEV_REMOVE_CMD,
-
- /* Init a device to support LightNVM media managers */
- NVM_DEV_INIT_CMD,
-
- /* Factory reset device */
- NVM_DEV_FACTORY_CMD,
-
- /* Vector user I/O */
- NVM_DEV_VIO_ADMIN_CMD = 0x41,
- NVM_DEV_VIO_CMD = 0x42,
- NVM_DEV_VIO_USER_CMD = 0x43,
-};
-
-#define NVM_IOCTL 'L' /* 0x4c */
-
-#define NVM_INFO _IOWR(NVM_IOCTL, NVM_INFO_CMD, \
- struct nvm_ioctl_info)
-#define NVM_GET_DEVICES _IOR(NVM_IOCTL, NVM_GET_DEVICES_CMD, \
- struct nvm_ioctl_get_devices)
-#define NVM_DEV_CREATE _IOW(NVM_IOCTL, NVM_DEV_CREATE_CMD, \
- struct nvm_ioctl_create)
-#define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
- struct nvm_ioctl_remove)
-#define NVM_DEV_INIT _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \
- struct nvm_ioctl_dev_init)
-#define NVM_DEV_FACTORY _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \
- struct nvm_ioctl_dev_factory)
-
-#define NVME_NVM_IOCTL_IO_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_USER_CMD, \
- struct nvm_passthru_vio)
-#define NVME_NVM_IOCTL_ADMIN_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_ADMIN_CMD,\
- struct nvm_passthru_vio)
-#define NVME_NVM_IOCTL_SUBMIT_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_CMD,\
- struct nvm_user_vio)
-
-#define NVM_VERSION_MAJOR 1
-#define NVM_VERSION_MINOR 0
-#define NVM_VERSION_PATCHLEVEL 0
-
-#endif
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index f99d9dcae667..8d7ca7c6af42 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* lirc.h - linux infrared remote control header file
- * last modified 2010/07/13 by Jarod Wilson
*/
#ifndef _LINUX_LIRC_H
@@ -17,14 +16,16 @@
#define LIRC_MODE2_PULSE 0x01000000
#define LIRC_MODE2_FREQUENCY 0x02000000
#define LIRC_MODE2_TIMEOUT 0x03000000
+#define LIRC_MODE2_OVERFLOW 0x04000000
#define LIRC_VALUE_MASK 0x00FFFFFF
#define LIRC_MODE2_MASK 0xFF000000
-#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE)
-#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE)
-#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY)
-#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT)
+#define LIRC_SPACE(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_SPACE)
+#define LIRC_PULSE(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_PULSE)
+#define LIRC_FREQUENCY(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY)
+#define LIRC_TIMEOUT(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT)
+#define LIRC_OVERFLOW(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_OVERFLOW)
#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK)
#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK)
@@ -33,6 +34,7 @@
#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE)
#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY)
#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT)
+#define LIRC_IS_OVERFLOW(val) (LIRC_MODE2(val) == LIRC_MODE2_OVERFLOW)
/* used heavily by lirc userspace */
#define lirc_t int
@@ -71,13 +73,10 @@
#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK)
#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16)
-#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16)
-#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000
#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000
#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000
#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000
-#define LIRC_CAN_SET_REC_FILTER 0x08000000
#define LIRC_CAN_MEASURE_CARRIER 0x02000000
#define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000
@@ -85,7 +84,12 @@
#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK)
#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK)
-#define LIRC_CAN_NOTIFY_DECODE 0x01000000
+/*
+ * Unused features. These features were never implemented, in tree or
+ * out of tree. These definitions are here so not to break the lircd build.
+ */
+#define LIRC_CAN_SET_REC_FILTER 0
+#define LIRC_CAN_NOTIFY_DECODE 0
/*** IOCTL commands for lirc driver ***/
@@ -139,7 +143,7 @@
*/
#define LIRC_GET_REC_TIMEOUT _IOR('i', 0x00000024, __u32)
-/*
+/**
* struct lirc_scancode - decoded scancode with protocol for use with
* LIRC_MODE_SCANCODE
*
@@ -196,6 +200,7 @@ struct lirc_scancode {
* @RC_PROTO_RCMM24: RC-MM protocol 24 bits
* @RC_PROTO_RCMM32: RC-MM protocol 32 bits
* @RC_PROTO_XBOX_DVD: Xbox DVD Movie Playback Kit protocol
+ * @RC_PROTO_MAX: Maximum value of enum rc_proto
*/
enum rc_proto {
RC_PROTO_UNKNOWN = 0,
@@ -226,6 +231,7 @@ enum rc_proto {
RC_PROTO_RCMM24 = 25,
RC_PROTO_RCMM32 = 26,
RC_PROTO_XBOX_DVD = 27,
+ RC_PROTO_MAX = RC_PROTO_XBOX_DVD,
};
#endif
diff --git a/include/uapi/linux/loadpin.h b/include/uapi/linux/loadpin.h
new file mode 100644
index 000000000000..daa6dbb8bb02
--- /dev/null
+++ b/include/uapi/linux/loadpin.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2022, Google LLC
+ */
+
+#ifndef _UAPI_LINUX_LOOP_LOADPIN_H
+#define _UAPI_LINUX_LOOP_LOADPIN_H
+
+#define LOADPIN_IOC_MAGIC 'L'
+
+/**
+ * LOADPIN_IOC_SET_TRUSTED_VERITY_DIGESTS - Set up the root digests of verity devices
+ * that loadpin should trust.
+ *
+ * Takes a file descriptor from which to read the root digests of trusted verity devices. The file
+ * is expected to contain a list of digests in ASCII format, with one line per digest. The ioctl
+ * must be issued on the securityfs attribute 'loadpin/dm-verity' (which can be typically found
+ * under /sys/kernel/security/loadpin/dm-verity).
+ */
+#define LOADPIN_IOC_SET_TRUSTED_VERITY_DIGESTS _IOW(LOADPIN_IOC_MAGIC, 0x00, unsigned int)
+
+#endif /* _UAPI_LINUX_LOOP_LOADPIN_H */
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
index 24a1c45bd1ae..6f63527dd2ed 100644
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */
/*
- * include/linux/loop.h
- *
- * Written by Theodore Ts'o, 3/29/93.
- *
- * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
- * permitted under the GNU General Public License.
+ * Copyright 1993 by Theodore Ts'o.
*/
#ifndef _UAPI_LINUX_LOOP_H
#define _UAPI_LINUX_LOOP_H
@@ -45,7 +40,7 @@ struct loop_info {
unsigned long lo_inode; /* ioctl r/o */
__kernel_old_dev_t lo_rdevice; /* ioctl r/o */
int lo_offset;
- int lo_encrypt_type;
+ int lo_encrypt_type; /* obsolete, ignored */
int lo_encrypt_key_size; /* ioctl w/o */
int lo_flags;
char lo_name[LO_NAME_SIZE];
@@ -61,7 +56,7 @@ struct loop_info64 {
__u64 lo_offset;
__u64 lo_sizelimit;/* bytes, 0 == max available */
__u32 lo_number; /* ioctl r/o */
- __u32 lo_encrypt_type;
+ __u32 lo_encrypt_type; /* obsolete, ignored */
__u32 lo_encrypt_key_size; /* ioctl w/o */
__u32 lo_flags;
__u8 lo_file_name[LO_NAME_SIZE];
diff --git a/include/uapi/linux/lsm.h b/include/uapi/linux/lsm.h
new file mode 100644
index 000000000000..33d8c9f4aa6b
--- /dev/null
+++ b/include/uapi/linux/lsm.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Linux Security Modules (LSM) - User space API
+ *
+ * Copyright (C) 2022 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef _UAPI_LINUX_LSM_H
+#define _UAPI_LINUX_LSM_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+
+/**
+ * struct lsm_ctx - LSM context information
+ * @id: the LSM id number, see LSM_ID_XXX
+ * @flags: LSM specific flags
+ * @len: length of the lsm_ctx struct, @ctx and any other data or padding
+ * @ctx_len: the size of @ctx
+ * @ctx: the LSM context value
+ *
+ * The @len field MUST be equal to the size of the lsm_ctx struct
+ * plus any additional padding and/or data placed after @ctx.
+ *
+ * In all cases @ctx_len MUST be equal to the length of @ctx.
+ * If @ctx is a string value it should be nul terminated with
+ * @ctx_len equal to `strlen(@ctx) + 1`. Binary values are
+ * supported.
+ *
+ * The @flags and @ctx fields SHOULD only be interpreted by the
+ * LSM specified by @id; they MUST be set to zero/0 when not used.
+ */
+struct lsm_ctx {
+ __u64 id;
+ __u64 flags;
+ __u64 len;
+ __u64 ctx_len;
+ __u8 ctx[] __counted_by(ctx_len);
+};
+
+/*
+ * ID tokens to identify Linux Security Modules (LSMs)
+ *
+ * These token values are used to uniquely identify specific LSMs
+ * in the kernel as well as in the kernel's LSM userspace API.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ID_UNDEF 0
+#define LSM_ID_CAPABILITY 100
+#define LSM_ID_SELINUX 101
+#define LSM_ID_SMACK 102
+#define LSM_ID_TOMOYO 103
+#define LSM_ID_APPARMOR 104
+#define LSM_ID_YAMA 105
+#define LSM_ID_LOADPIN 106
+#define LSM_ID_SAFESETID 107
+#define LSM_ID_LOCKDOWN 108
+#define LSM_ID_BPF 109
+#define LSM_ID_LANDLOCK 110
+#define LSM_ID_IMA 111
+#define LSM_ID_EVM 112
+
+/*
+ * LSM_ATTR_XXX definitions identify different LSM attributes
+ * which are used in the kernel's LSM userspace API. Support
+ * for these attributes vary across the different LSMs. None
+ * are required.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ATTR_UNDEF 0
+#define LSM_ATTR_CURRENT 100
+#define LSM_ATTR_EXEC 101
+#define LSM_ATTR_FSCREATE 102
+#define LSM_ATTR_KEYCREATE 103
+#define LSM_ATTR_PREV 104
+#define LSM_ATTR_SOCKCREATE 105
+
+/*
+ * LSM_FLAG_XXX definitions identify special handling instructions
+ * for the API.
+ */
+#define LSM_FLAG_SINGLE 0x0001
+
+#endif /* _UAPI_LINUX_LSM_H */
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index 568a4303ccce..229655ef792f 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -14,6 +14,8 @@ enum lwtunnel_encap_types {
LWTUNNEL_ENCAP_BPF,
LWTUNNEL_ENCAP_SEG6_LOCAL,
LWTUNNEL_ENCAP_RPL,
+ LWTUNNEL_ENCAP_IOAM6,
+ LWTUNNEL_ENCAP_XFRM,
__LWTUNNEL_ENCAP_MAX,
};
@@ -110,4 +112,13 @@ enum {
#define LWT_BPF_MAX_HEADROOM 256
+enum {
+ LWT_XFRM_UNSPEC,
+ LWT_XFRM_IF_ID,
+ LWT_XFRM_LINK,
+ __LWT_XFRM_MAX,
+};
+
+#define LWT_XFRM_MAX (__LWT_XFRM_MAX - 1)
+
#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index f3956fc11de6..1b40a968ba91 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -6,6 +6,7 @@
#define AFFS_SUPER_MAGIC 0xadff
#define AFS_SUPER_MAGIC 0x5346414F
#define AUTOFS_SUPER_MAGIC 0x0187
+#define CEPH_SUPER_MAGIC 0x00c36400
#define CODA_SUPER_MAGIC 0x73757245
#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */
@@ -35,6 +36,7 @@
#define EFIVARFS_MAGIC 0xde5e81e4
#define HOSTFS_SUPER_MAGIC 0x00c0ffee
#define OVERLAYFS_SUPER_MAGIC 0x794c7630
+#define FUSE_SUPER_MAGIC 0x65735546
#define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */
#define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */
@@ -43,6 +45,7 @@
#define MINIX3_SUPER_MAGIC 0x4d5a /* minix v3 fs, 60 char names */
#define MSDOS_SUPER_MAGIC 0x4d44 /* MD */
+#define EXFAT_SUPER_MAGIC 0x2011BAB0
#define NCP_SUPER_MAGIC 0x564c /* Guess, what 0x564c is :-) */
#define NFS_SUPER_MAGIC 0x6969
#define OCFS2_SUPER_MAGIC 0x7461636f
@@ -51,6 +54,7 @@
#define QNX6_SUPER_MAGIC 0x68191122 /* qnx6 fs detection */
#define AFS_FS_MAGIC 0x6B414653
+
#define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */
/* used by file system utilities that
look at the superblock, etc. */
@@ -59,6 +63,9 @@
#define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
#define SMB_SUPER_MAGIC 0x517B
+#define CIFS_SUPER_MAGIC 0xFF534D42 /* the first four bytes of SMB PDUs */
+#define SMB2_SUPER_MAGIC 0xFE534D42
+
#define CGROUP_SUPER_MAGIC 0x27e0eb
#define CGROUP2_SUPER_MAGIC 0x63677270
@@ -91,11 +98,9 @@
/* Since UDF 2.01 is ISO 13346 based... */
#define UDF_SUPER_MAGIC 0x15013346
-#define BALLOON_KVM_MAGIC 0x13661366
-#define ZSMALLOC_MAGIC 0x58295829
#define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */
#define DEVMEM_MAGIC 0x454d444d /* "DMEM" */
-#define Z3FOLD_MAGIC 0x33
-#define PPC_CMM_MAGIC 0xc7571590
+#define SECRETMEM_MAGIC 0x5345434d /* "SECM" */
+#define PID_FS_MAGIC 0x50494446 /* "PIDF" */
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/major.h b/include/uapi/linux/major.h
index 7e5fa8e15c43..4e5f2b3a3d54 100644
--- a/include/uapi/linux/major.h
+++ b/include/uapi/linux/major.h
@@ -34,8 +34,6 @@
#define GOLDSTAR_CDROM_MAJOR 16
#define OPTICS_CDROM_MAJOR 17
#define SANYO_CDROM_MAJOR 18
-#define CYCLADES_MAJOR 19
-#define CYCLADESAUX_MAJOR 20
#define MITSUMI_X_CDROM_MAJOR 20
#define MFM_ACORN_MAJOR 21 /* ARM Linux /dev/mfm */
#define SCSI_GENERIC_MAJOR 21
diff --git a/include/uapi/linux/map_to_14segment.h b/include/uapi/linux/map_to_14segment.h
new file mode 100644
index 000000000000..0346ef76543b
--- /dev/null
+++ b/include/uapi/linux/map_to_14segment.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2021 Glider bv
+ *
+ * Based on include/uapi/linux/map_to_7segment.h:
+
+ * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com>
+ */
+
+#ifndef MAP_TO_14SEGMENT_H
+#define MAP_TO_14SEGMENT_H
+
+/* This file provides translation primitives and tables for the conversion
+ * of (ASCII) characters to a 14-segments notation.
+ *
+ * The 14 segment's wikipedia notation below is used as standard.
+ * See: https://en.wikipedia.org/wiki/Fourteen-segment_display
+ *
+ * Notation: +---a---+
+ * |\ | /|
+ * f h i j b
+ * | \|/ |
+ * +-g1+-g2+
+ * | /|\ |
+ * e k l m c
+ * |/ | \|
+ * +---d---+
+ *
+ * Usage:
+ *
+ * Register a map variable, and fill it with a character set:
+ * static SEG14_DEFAULT_MAP(map_seg14);
+ *
+ *
+ * Then use for conversion:
+ * seg14 = map_to_seg14(&map_seg14, some_char);
+ * ...
+ *
+ * In device drivers it is recommended, if required, to make the char map
+ * accessible via the sysfs interface using the following scheme:
+ *
+ * static ssize_t map_seg14_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
+ * memcpy(buf, &map_seg14, sizeof(map_seg14));
+ * return sizeof(map_seg14);
+ * }
+ * static ssize_t map_seg14_store(struct device *dev,
+ * struct device_attribute *attr,
+ * const char *buf, size_t cnt)
+ * {
+ * if (cnt != sizeof(map_seg14))
+ * return -EINVAL;
+ * memcpy(&map_seg14, buf, cnt);
+ * return cnt;
+ * }
+ * static DEVICE_ATTR_RW(map_seg14);
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#define BIT_SEG14_A 0
+#define BIT_SEG14_B 1
+#define BIT_SEG14_C 2
+#define BIT_SEG14_D 3
+#define BIT_SEG14_E 4
+#define BIT_SEG14_F 5
+#define BIT_SEG14_G1 6
+#define BIT_SEG14_G2 7
+#define BIT_SEG14_H 8
+#define BIT_SEG14_I 9
+#define BIT_SEG14_J 10
+#define BIT_SEG14_K 11
+#define BIT_SEG14_L 12
+#define BIT_SEG14_M 13
+#define BIT_SEG14_RESERVED1 14
+#define BIT_SEG14_RESERVED2 15
+
+struct seg14_conversion_map {
+ __be16 table[128];
+};
+
+static __inline__ int map_to_seg14(struct seg14_conversion_map *map, int c)
+{
+ if (c < 0 || c >= sizeof(map->table) / sizeof(map->table[0]))
+ return -EINVAL;
+
+ return __be16_to_cpu(map->table[c]);
+}
+
+#define SEG14_CONVERSION_MAP(_name, _map) \
+ struct seg14_conversion_map _name = { .table = { _map } }
+
+/*
+ * It is recommended to use a facility that allows user space to redefine
+ * custom character sets for LCD devices. Please use a sysfs interface
+ * as described above.
+ */
+#define MAP_TO_SEG14_SYSFS_FILE "map_seg14"
+
+/*******************************************************************************
+ * ASCII conversion table
+ ******************************************************************************/
+
+#define _SEG14(sym, a, b, c, d, e, f, g1, g2, h, j, k, l, m, n) \
+ __cpu_to_be16( a << BIT_SEG14_A | b << BIT_SEG14_B | \
+ c << BIT_SEG14_C | d << BIT_SEG14_D | \
+ e << BIT_SEG14_E | f << BIT_SEG14_F | \
+ g1 << BIT_SEG14_G1 | g2 << BIT_SEG14_G2 | \
+ h << BIT_SEG14_H | j << BIT_SEG14_I | \
+ k << BIT_SEG14_J | l << BIT_SEG14_K | \
+ m << BIT_SEG14_L | n << BIT_SEG14_M )
+
+#define _MAP_0_32_ASCII_SEG14_NON_PRINTABLE \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+
+#define _MAP_33_47_ASCII_SEG14_SYMBOL \
+ _SEG14('!', 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('"', 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0), \
+ _SEG14('#', 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14('$', 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14('%', 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0), \
+ _SEG14('&', 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1), \
+ _SEG14('\'',0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0), \
+ _SEG14('(', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1), \
+ _SEG14(')', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0), \
+ _SEG14('*', 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1), \
+ _SEG14('+', 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14(',', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0), \
+ _SEG14('-', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('.', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('/', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0),
+
+#define _MAP_48_57_ASCII_SEG14_NUMERIC \
+ _SEG14('0', 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0), \
+ _SEG14('1', 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0), \
+ _SEG14('2', 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('3', 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('4', 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('5', 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('6', 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('7', 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0), \
+ _SEG14('8', 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('9', 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0),
+
+#define _MAP_58_64_ASCII_SEG14_SYMBOL \
+ _SEG14(':', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14(';', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0), \
+ _SEG14('<', 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1), \
+ _SEG14('=', 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('>', 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0), \
+ _SEG14('?', 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0), \
+ _SEG14('@', 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0),
+
+#define _MAP_65_90_ASCII_SEG14_ALPHA_UPPER \
+ _SEG14('A', 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('B', 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0), \
+ _SEG14('C', 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('D', 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('E', 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('F', 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('G', 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('H', 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('I', 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('J', 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('K', 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1), \
+ _SEG14('L', 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('M', 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0), \
+ _SEG14('N', 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1), \
+ _SEG14('O', 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('P', 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('Q', 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('R', 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1), \
+ _SEG14('S', 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('T', 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('U', 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('V', 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0), \
+ _SEG14('W', 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1), \
+ _SEG14('X', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1), \
+ _SEG14('Y', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0), \
+ _SEG14('Z', 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0),
+
+#define _MAP_91_96_ASCII_SEG14_SYMBOL \
+ _SEG14('[', 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('\\',0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1), \
+ _SEG14(']', 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('^', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1), \
+ _SEG14('_', 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('`', 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0),
+
+#define _MAP_97_122_ASCII_SEG14_ALPHA_LOWER \
+ _SEG14('a', 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('b', 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1), \
+ _SEG14('c', 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('d', 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0), \
+ _SEG14('e', 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0), \
+ _SEG14('f', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0), \
+ _SEG14('g', 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0), \
+ _SEG14('h', 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('i', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('j', 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0), \
+ _SEG14('k', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1), \
+ _SEG14('l', 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('m', 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0), \
+ _SEG14('n', 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0), \
+ _SEG14('o', 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0), \
+ _SEG14('p', 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0), \
+ _SEG14('q', 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0), \
+ _SEG14('r', 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('s', 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1), \
+ _SEG14('t', 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('u', 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0), \
+ _SEG14('v', 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0), \
+ _SEG14('w', 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1), \
+ _SEG14('x', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), \
+ _SEG14('y', 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0), \
+ _SEG14('z', 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0),
+
+#define _MAP_123_126_ASCII_SEG14_SYMBOL \
+ _SEG14('{', 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0), \
+ _SEG14('|', 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0), \
+ _SEG14('}', 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1), \
+ _SEG14('~', 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0),
+
+/* Maps */
+#define MAP_ASCII14SEG_ALPHANUM \
+ _MAP_0_32_ASCII_SEG14_NON_PRINTABLE \
+ _MAP_33_47_ASCII_SEG14_SYMBOL \
+ _MAP_48_57_ASCII_SEG14_NUMERIC \
+ _MAP_58_64_ASCII_SEG14_SYMBOL \
+ _MAP_65_90_ASCII_SEG14_ALPHA_UPPER \
+ _MAP_91_96_ASCII_SEG14_SYMBOL \
+ _MAP_97_122_ASCII_SEG14_ALPHA_LOWER \
+ _MAP_123_126_ASCII_SEG14_SYMBOL
+
+#define SEG14_DEFAULT_MAP(_name) \
+ SEG14_CONVERSION_MAP(_name, MAP_ASCII14SEG_ALPHANUM)
+
+#endif /* MAP_TO_14SEGMENT_H */
diff --git a/include/uapi/linux/map_to_7segment.h b/include/uapi/linux/map_to_7segment.h
index 13a06e5e966e..04c8b55812e7 100644
--- a/include/uapi/linux/map_to_7segment.h
+++ b/include/uapi/linux/map_to_7segment.h
@@ -1,20 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef MAP_TO_7SEGMENT_H
@@ -45,17 +31,22 @@
* In device drivers it is recommended, if required, to make the char map
* accessible via the sysfs interface using the following scheme:
*
- * static ssize_t show_map(struct device *dev, char *buf) {
+ * static ssize_t map_seg7_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
* memcpy(buf, &map_seg7, sizeof(map_seg7));
* return sizeof(map_seg7);
* }
- * static ssize_t store_map(struct device *dev, const char *buf, size_t cnt) {
+ * static ssize_t map_seg7_store(struct device *dev,
+ * struct device_attribute *attr, const char *buf,
+ * size_t cnt)
+ * {
* if(cnt != sizeof(map_seg7))
* return -EINVAL;
* memcpy(&map_seg7, buf, cnt);
* return cnt;
* }
- * static DEVICE_ATTR(map_seg7, PERMS_RW, show_map, store_map);
+ * static DEVICE_ATTR_RW(map_seg7);
*
* History:
* 2005-05-31 RFC linux-kernel@vger.kernel.org
diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h
new file mode 100644
index 000000000000..e1db65df9359
--- /dev/null
+++ b/include/uapi/linux/mctp.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Management Component Transport Protocol (MCTP)
+ *
+ * Copyright (c) 2021 Code Construct
+ * Copyright (c) 2021 Google
+ */
+
+#ifndef __UAPI_MCTP_H
+#define __UAPI_MCTP_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/netdevice.h>
+
+typedef __u8 mctp_eid_t;
+
+struct mctp_addr {
+ mctp_eid_t s_addr;
+};
+
+struct sockaddr_mctp {
+ __kernel_sa_family_t smctp_family;
+ __u16 __smctp_pad0;
+ unsigned int smctp_network;
+ struct mctp_addr smctp_addr;
+ __u8 smctp_type;
+ __u8 smctp_tag;
+ __u8 __smctp_pad1;
+};
+
+struct sockaddr_mctp_ext {
+ struct sockaddr_mctp smctp_base;
+ int smctp_ifindex;
+ __u8 smctp_halen;
+ __u8 __smctp_pad0[3];
+ __u8 smctp_haddr[MAX_ADDR_LEN];
+};
+
+#define MCTP_NET_ANY 0x0
+
+#define MCTP_ADDR_NULL 0x00
+#define MCTP_ADDR_ANY 0xff
+
+#define MCTP_TAG_MASK 0x07
+#define MCTP_TAG_OWNER 0x08
+#define MCTP_TAG_PREALLOC 0x10
+
+#define MCTP_OPT_ADDR_EXT 1
+
+#define SIOCMCTPALLOCTAG (SIOCPROTOPRIVATE + 0)
+#define SIOCMCTPDROPTAG (SIOCPROTOPRIVATE + 1)
+#define SIOCMCTPALLOCTAG2 (SIOCPROTOPRIVATE + 2)
+#define SIOCMCTPDROPTAG2 (SIOCPROTOPRIVATE + 3)
+
+/* Deprecated: use mctp_ioc_tag_ctl2 / TAG2 ioctls instead, which defines the
+ * MCTP network ID as part of the allocated tag. Using this assumes the default
+ * net ID for allocated tags, which may not give correct behaviour on system
+ * with multiple networks configured.
+ */
+struct mctp_ioc_tag_ctl {
+ mctp_eid_t peer_addr;
+
+ /* For SIOCMCTPALLOCTAG: must be passed as zero, kernel will
+ * populate with the allocated tag value. Returned tag value will
+ * always have TO and PREALLOC set.
+ *
+ * For SIOCMCTPDROPTAG: userspace provides tag value to drop, from
+ * a prior SIOCMCTPALLOCTAG call (and so must have TO and PREALLOC set).
+ */
+ __u8 tag;
+ __u16 flags;
+};
+
+struct mctp_ioc_tag_ctl2 {
+ /* Peer details: network ID, peer EID, local EID. All set by the
+ * caller.
+ *
+ * Local EID must be MCTP_ADDR_NULL or MCTP_ADDR_ANY in current
+ * kernels.
+ */
+ unsigned int net;
+ mctp_eid_t peer_addr;
+ mctp_eid_t local_addr;
+
+ /* Set by caller, but no flags defined currently. Must be 0 */
+ __u16 flags;
+
+ /* For SIOCMCTPALLOCTAG2: must be passed as zero, kernel will
+ * populate with the allocated tag value. Returned tag value will
+ * always have TO and PREALLOC set.
+ *
+ * For SIOCMCTPDROPTAG2: userspace provides tag value to drop, from
+ * a prior SIOCMCTPALLOCTAG2 call (and so must have TO and PREALLOC set).
+ */
+ __u8 tag;
+
+};
+
+#endif /* __UAPI_MCTP_H */
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index 3f302e2523b2..c0c8ec995b06 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -53,18 +53,37 @@
#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
#define MDIO_AN_EEE_ADV2 62 /* EEE advertisement 2 */
#define MDIO_AN_EEE_LPABLE2 63 /* EEE link partner ability 2 */
+#define MDIO_AN_CTRL2 64 /* AN THP bypass request control */
/* Media-dependent registers. */
#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
* Lanes B-D are numbered 134-136. */
+#define MDIO_PMA_10GBR_FSRT_CSR 147 /* 10GBASE-R fast retrain status and control */
#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+#define MDIO_B10L_PMA_CTRL 2294 /* 10BASE-T1L PMA control */
+#define MDIO_PMA_10T1L_STAT 2295 /* 10BASE-T1L PMA status */
+#define MDIO_PCS_10T1L_CTRL 2278 /* 10BASE-T1L PCS control */
+#define MDIO_PMA_PMD_BT1 18 /* BASE-T1 PMA/PMD extended ability */
+#define MDIO_AN_T1_CTRL 512 /* BASE-T1 AN control */
+#define MDIO_AN_T1_STAT 513 /* BASE-T1 AN status */
+#define MDIO_AN_T1_ADV_L 514 /* BASE-T1 AN advertisement register [15:0] */
+#define MDIO_AN_T1_ADV_M 515 /* BASE-T1 AN advertisement register [31:16] */
+#define MDIO_AN_T1_ADV_H 516 /* BASE-T1 AN advertisement register [47:32] */
+#define MDIO_AN_T1_LP_L 517 /* BASE-T1 AN LP Base Page ability register [15:0] */
+#define MDIO_AN_T1_LP_M 518 /* BASE-T1 AN LP Base Page ability register [31:16] */
+#define MDIO_AN_T1_LP_H 519 /* BASE-T1 AN LP Base Page ability register [47:32] */
+#define MDIO_AN_10BT1_AN_CTRL 526 /* 10BASE-T1 AN control register */
+#define MDIO_AN_10BT1_AN_STAT 527 /* 10BASE-T1 AN status register */
+#define MDIO_PMA_PMD_BT1_CTRL 2100 /* BASE-T1 PMA/PMD control register */
+#define MDIO_PCS_1000BT1_CTRL 2304 /* 1000BASE-T1 PCS control register */
+#define MDIO_PCS_1000BT1_STAT 2305 /* 1000BASE-T1 PCS status register */
/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
@@ -119,7 +138,11 @@
#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */
#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */
#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */
+#define MDIO_PMA_SPEED_2_5G 0x2000 /* 2.5G capable */
+#define MDIO_PMA_SPEED_5G 0x4000 /* 5G capable */
#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */
+#define MDIO_PCS_SPEED_2_5G 0x0040 /* 2.5G capable */
+#define MDIO_PCS_SPEED_5G 0x0080 /* 5G capable */
/* Device present registers. */
#define MDIO_DEVS_PRESENT(devad) (1 << (devad))
@@ -155,6 +178,7 @@
#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */
#define MDIO_PMA_CTRL2_2_5GBT 0x0030 /* 2.5GBaseT type */
#define MDIO_PMA_CTRL2_5GBT 0x0031 /* 5GBaseT type */
+#define MDIO_PMA_CTRL2_BASET1 0x003D /* BASE-T1 type */
#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */
#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */
#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */
@@ -208,8 +232,33 @@
#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */
#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */
#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */
+#define MDIO_PMA_EXTABLE_BT1 0x0800 /* BASE-T1 ability */
#define MDIO_PMA_EXTABLE_NBT 0x4000 /* 2.5/5GBASE-T ability */
+/* AN Clause 73 linkword */
+#define MDIO_AN_C73_0_S_MASK GENMASK(4, 0)
+#define MDIO_AN_C73_0_E_MASK GENMASK(9, 5)
+#define MDIO_AN_C73_0_PAUSE BIT(10)
+#define MDIO_AN_C73_0_ASM_DIR BIT(11)
+#define MDIO_AN_C73_0_C2 BIT(12)
+#define MDIO_AN_C73_0_RF BIT(13)
+#define MDIO_AN_C73_0_ACK BIT(14)
+#define MDIO_AN_C73_0_NP BIT(15)
+#define MDIO_AN_C73_1_T_MASK GENMASK(4, 0)
+#define MDIO_AN_C73_1_1000BASE_KX BIT(5)
+#define MDIO_AN_C73_1_10GBASE_KX4 BIT(6)
+#define MDIO_AN_C73_1_10GBASE_KR BIT(7)
+#define MDIO_AN_C73_1_40GBASE_KR4 BIT(8)
+#define MDIO_AN_C73_1_40GBASE_CR4 BIT(9)
+#define MDIO_AN_C73_1_100GBASE_CR10 BIT(10)
+#define MDIO_AN_C73_1_100GBASE_KP4 BIT(11)
+#define MDIO_AN_C73_1_100GBASE_KR4 BIT(12)
+#define MDIO_AN_C73_1_100GBASE_CR4 BIT(13)
+#define MDIO_AN_C73_1_25GBASE_R_S BIT(14)
+#define MDIO_AN_C73_1_25GBASE_R BIT(15)
+#define MDIO_AN_C73_2_2500BASE_KX BIT(0)
+#define MDIO_AN_C73_2_5GBASE_KR BIT(1)
+
/* PHY XGXS lane state register. */
#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001
#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002
@@ -237,6 +286,9 @@
#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+/* PMA 10GBASE-R Fast Retrain status and control register. */
+#define MDIO_PMA_10GBR_FSRT_ENABLE 0x0001 /* Fast retrain enable */
+
/* PCS 10GBASE-R/-T status register 1. */
#define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */
@@ -245,6 +297,7 @@
#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00
/* AN 10GBASE-T control register. */
+#define MDIO_AN_10GBT_CTRL_ADVFSRT2_5G 0x0020 /* Advertise 2.5GBASE-T fast retrain */
#define MDIO_AN_10GBT_CTRL_ADV2_5G 0x0080 /* Advertise 2.5GBASE-T */
#define MDIO_AN_10GBT_CTRL_ADV5G 0x0100 /* Advertise 5GBASE-T */
#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */
@@ -260,6 +313,88 @@
#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
+/* 10BASE-T1L PMA control */
+#define MDIO_PMA_10T1L_CTRL_LB_EN 0x0001 /* Enable loopback mode */
+#define MDIO_PMA_10T1L_CTRL_EEE_EN 0x0400 /* Enable EEE mode */
+#define MDIO_PMA_10T1L_CTRL_LOW_POWER 0x0800 /* Low-power mode */
+#define MDIO_PMA_10T1L_CTRL_2V4_EN 0x1000 /* Enable 2.4 Vpp operating mode */
+#define MDIO_PMA_10T1L_CTRL_TX_DIS 0x4000 /* Transmit disable */
+#define MDIO_PMA_10T1L_CTRL_PMA_RST 0x8000 /* MA reset */
+
+/* 10BASE-T1L PMA status register. */
+#define MDIO_PMA_10T1L_STAT_LINK 0x0001 /* PMA receive link up */
+#define MDIO_PMA_10T1L_STAT_FAULT 0x0002 /* Fault condition detected */
+#define MDIO_PMA_10T1L_STAT_POLARITY 0x0004 /* Receive polarity is reversed */
+#define MDIO_PMA_10T1L_STAT_RECV_FAULT 0x0200 /* Able to detect fault on receive path */
+#define MDIO_PMA_10T1L_STAT_EEE 0x0400 /* PHY has EEE ability */
+#define MDIO_PMA_10T1L_STAT_LOW_POWER 0x0800 /* PMA has low-power ability */
+#define MDIO_PMA_10T1L_STAT_2V4_ABLE 0x1000 /* PHY has 2.4 Vpp operating mode ability */
+#define MDIO_PMA_10T1L_STAT_LB_ABLE 0x2000 /* PHY has loopback ability */
+
+/* 10BASE-T1L PCS control register. */
+#define MDIO_PCS_10T1L_CTRL_LB 0x4000 /* Enable PCS level loopback mode */
+#define MDIO_PCS_10T1L_CTRL_RESET 0x8000 /* PCS reset */
+
+/* BASE-T1 PMA/PMD extended ability register. */
+#define MDIO_PMA_PMD_BT1_B100_ABLE 0x0001 /* 100BASE-T1 Ability */
+#define MDIO_PMA_PMD_BT1_B1000_ABLE 0x0002 /* 1000BASE-T1 Ability */
+#define MDIO_PMA_PMD_BT1_B10L_ABLE 0x0004 /* 10BASE-T1L Ability */
+
+/* BASE-T1 auto-negotiation advertisement register [15:0] */
+#define MDIO_AN_T1_ADV_L_PAUSE_CAP ADVERTISE_PAUSE_CAP
+#define MDIO_AN_T1_ADV_L_PAUSE_ASYM ADVERTISE_PAUSE_ASYM
+#define MDIO_AN_T1_ADV_L_FORCE_MS 0x1000 /* Force Master/slave Configuration */
+#define MDIO_AN_T1_ADV_L_REMOTE_FAULT ADVERTISE_RFAULT
+#define MDIO_AN_T1_ADV_L_ACK ADVERTISE_LPACK
+#define MDIO_AN_T1_ADV_L_NEXT_PAGE_REQ ADVERTISE_NPAGE
+
+/* BASE-T1 auto-negotiation advertisement register [31:16] */
+#define MDIO_AN_T1_ADV_M_B10L 0x4000 /* device is compatible with 10BASE-T1L */
+#define MDIO_AN_T1_ADV_M_1000BT1 0x0080 /* advertise 1000BASE-T1 */
+#define MDIO_AN_T1_ADV_M_100BT1 0x0020 /* advertise 100BASE-T1 */
+#define MDIO_AN_T1_ADV_M_MST 0x0010 /* advertise master preference */
+
+/* BASE-T1 auto-negotiation advertisement register [47:32] */
+#define MDIO_AN_T1_ADV_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level Transmit Request */
+#define MDIO_AN_T1_ADV_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level Transmit Ability */
+
+/* BASE-T1 AN LP Base Page ability register [15:0] */
+#define MDIO_AN_T1_LP_L_PAUSE_CAP LPA_PAUSE_CAP
+#define MDIO_AN_T1_LP_L_PAUSE_ASYM LPA_PAUSE_ASYM
+#define MDIO_AN_T1_LP_L_FORCE_MS 0x1000 /* LP Force Master/slave Configuration */
+#define MDIO_AN_T1_LP_L_REMOTE_FAULT LPA_RFAULT
+#define MDIO_AN_T1_LP_L_ACK LPA_LPACK
+#define MDIO_AN_T1_LP_L_NEXT_PAGE_REQ LPA_NPAGE
+
+/* BASE-T1 AN LP Base Page ability register [31:16] */
+#define MDIO_AN_T1_LP_M_MST 0x0010 /* LP master preference */
+#define MDIO_AN_T1_LP_M_B10L 0x4000 /* LP is compatible with 10BASE-T1L */
+
+/* BASE-T1 AN LP Base Page ability register [47:32] */
+#define MDIO_AN_T1_LP_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level LP Transmit Request */
+#define MDIO_AN_T1_LP_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level LP Transmit Ability */
+
+/* 10BASE-T1 AN control register */
+#define MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L 0x4000 /* 10BASE-T1L EEE ability advertisement */
+
+/* 10BASE-T1 AN status register */
+#define MDIO_AN_10BT1_AN_STAT_LPA_EEE_T1L 0x4000 /* 10BASE-T1L LP EEE ability advertisement */
+
+/* BASE-T1 PMA/PMD control register */
+#define MDIO_PMA_PMD_BT1_CTRL_STRAP 0x000F /* Type selection (Strap) */
+#define MDIO_PMA_PMD_BT1_CTRL_STRAP_B1000 0x0001 /* Select 1000BASE-T1 */
+#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */
+
+/* 1000BASE-T1 PCS control register */
+#define MDIO_PCS_1000BT1_CTRL_LOW_POWER 0x0800 /* Low power mode */
+#define MDIO_PCS_1000BT1_CTRL_DISABLE_TX 0x4000 /* Global PMA transmit disable */
+#define MDIO_PCS_1000BT1_CTRL_RESET 0x8000 /* Software reset value */
+
+/* 1000BASE-T1 PCS status register */
+#define MDIO_PCS_1000BT1_STAT_LINK 0x0004 /* PCS Link is up */
+#define MDIO_PCS_1000BT1_STAT_FAULT 0x0080 /* There is a fault condition */
+
+
/* EEE Supported/Advertisement/LP Advertisement registers.
*
* EEE capability Register (3.20), Advertisement (7.60) and
@@ -287,6 +422,9 @@
#define MDIO_EEE_2_5GT 0x0001 /* 2.5GT EEE cap */
#define MDIO_EEE_5GT 0x0002 /* 5GT EEE cap */
+/* AN MultiGBASE-T AN control 2 */
+#define MDIO_AN_THP_BP2_5GT 0x0008 /* 2.5GT THP bypass request */
+
/* 2.5G/5G Extended abilities register. */
#define MDIO_PMA_NG_EXTABLE_2_5GBT 0x0001 /* 2.5GBASET ability */
#define MDIO_PMA_NG_EXTABLE_5GBT 0x0002 /* 5GBASET ability */
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 84fa53ffb13f..f05f747e444d 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x101d */
+/* RGB - next is 0x1026 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -46,8 +46,12 @@
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025
+#define MEDIA_BUS_FMT_BGR666_1X18 0x1023
#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
+#define MEDIA_BUS_FMT_BGR666_1X24_CPADHI 0x1024
+#define MEDIA_BUS_FMT_RGB565_1X24_CPADHI 0x1022
#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
#define MEDIA_BUS_FMT_BGR888_3X8 0x101b
@@ -56,15 +60,20 @@
#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
#define MEDIA_BUS_FMT_RGB888_3X8 0x101c
+#define MEDIA_BUS_FMT_RGB888_3X8_DELTA 0x101d
#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011
#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
+#define MEDIA_BUS_FMT_RGB666_1X30_CPADLO 0x101e
+#define MEDIA_BUS_FMT_RGB888_1X30_CPADLO 0x101f
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
#define MEDIA_BUS_FMT_RGB101010_1X30 0x1018
+#define MEDIA_BUS_FMT_RGB666_1X36_CPADLO 0x1020
+#define MEDIA_BUS_FMT_RGB888_1X36_CPADLO 0x1021
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x202e */
+/* YUV (including grey) - next is 0x202f */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -87,6 +96,7 @@
#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
#define MEDIA_BUS_FMT_Y14_1X14 0x202d
+#define MEDIA_BUS_FMT_Y16_1X16 0x202e
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
@@ -156,4 +166,12 @@
/* HSV - next is 0x6002 */
#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001
+/*
+ * This format should be used when the same driver handles
+ * both sides of the link and the bus format is a fixed
+ * metadata format that is not configurable from userspace.
+ * Width and height will be set to 0 for this format.
+ */
+#define MEDIA_BUS_FMT_METADATA_FIXED 0x7001
+
#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 383ac7b7d8f0..1c80b1d6bbaf 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -20,9 +20,6 @@
#ifndef __LINUX_MEDIA_H
#define __LINUX_MEDIA_H
-#ifndef __KERNEL__
-#include <stdint.h>
-#endif
#include <linux/ioctl.h>
#include <linux/types.h>
@@ -127,6 +124,7 @@ struct media_device_info {
#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
#define MEDIA_ENT_F_PROC_VIDEO_ENCODER (MEDIA_ENT_F_BASE + 0x4007)
#define MEDIA_ENT_F_PROC_VIDEO_DECODER (MEDIA_ENT_F_BASE + 0x4008)
+#define MEDIA_ENT_F_PROC_VIDEO_ISP (MEDIA_ENT_F_BASE + 0x4009)
/*
* Switch and bridge entity functions
@@ -142,8 +140,8 @@ struct media_device_info {
#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002)
/* Entity flags */
-#define MEDIA_ENT_FL_DEFAULT (1 << 0)
-#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
+#define MEDIA_ENT_FL_DEFAULT (1U << 0)
+#define MEDIA_ENT_FL_CONNECTOR (1U << 1)
/* OR with the entity id value to find the next entity */
#define MEDIA_ENT_ID_FLAG_NEXT (1U << 31)
@@ -207,9 +205,9 @@ struct media_entity_desc {
};
};
-#define MEDIA_PAD_FL_SINK (1 << 0)
-#define MEDIA_PAD_FL_SOURCE (1 << 1)
-#define MEDIA_PAD_FL_MUST_CONNECT (1 << 2)
+#define MEDIA_PAD_FL_SINK (1U << 0)
+#define MEDIA_PAD_FL_SOURCE (1U << 1)
+#define MEDIA_PAD_FL_MUST_CONNECT (1U << 2)
struct media_pad_desc {
__u32 entity; /* entity ID */
@@ -218,13 +216,14 @@ struct media_pad_desc {
__u32 reserved[2];
};
-#define MEDIA_LNK_FL_ENABLED (1 << 0)
-#define MEDIA_LNK_FL_IMMUTABLE (1 << 1)
-#define MEDIA_LNK_FL_DYNAMIC (1 << 2)
+#define MEDIA_LNK_FL_ENABLED (1U << 0)
+#define MEDIA_LNK_FL_IMMUTABLE (1U << 1)
+#define MEDIA_LNK_FL_DYNAMIC (1U << 2)
#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28)
-# define MEDIA_LNK_FL_DATA_LINK (0 << 28)
-# define MEDIA_LNK_FL_INTERFACE_LINK (1 << 28)
+# define MEDIA_LNK_FL_DATA_LINK (0U << 28)
+# define MEDIA_LNK_FL_INTERFACE_LINK (1U << 28)
+# define MEDIA_LNK_FL_ANCILLARY_LINK (2U << 28)
struct media_link_desc {
struct media_pad_desc source;
@@ -294,7 +293,7 @@ struct media_links_enum {
* struct media_device_info.
*/
#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \
- ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+ ((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_entity {
__u32 id;
@@ -329,7 +328,7 @@ struct media_v2_interface {
* struct media_device_info.
*/
#define MEDIA_V2_PAD_HAS_INDEX(media_version) \
- ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+ ((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_pad {
__u32 id;
@@ -433,7 +432,7 @@ struct media_v2_topology {
#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
/* Obsolete symbol for media_version, no longer used in the kernel */
-#define MEDIA_API_VERSION ((0 << 16) | (1 << 8) | 0)
+#define MEDIA_API_VERSION ((0U << 16) | (1U << 8) | 0U)
#endif
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index c6aec86cc5de..68a0272e99b7 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -7,15 +7,15 @@
#ifndef _LINUX_MEI_H
#define _LINUX_MEI_H
-#include <linux/uuid.h>
+#include <linux/mei_uuid.h>
/*
* This IOCTL is used to associate the current file descriptor with a
* FW Client (given by UUID). This opens a communication channel
* between a host client and a FW client. From this point every read and write
* will communicate with the associated FW client.
- * Only in close() (file_operation release()) the communication between
- * the clients is disconnected
+ * Only in close() (file_operation release()) is the communication between
+ * the clients disconnected.
*
* The IOCTL argument is a struct with a union that contains
* the input parameter and the output parameter for this IOCTL.
@@ -51,7 +51,7 @@ struct mei_connect_client_data {
* DOC: set and unset event notification for a connected client
*
* The IOCTL argument is 1 for enabling event notification and 0 for
- * disabling the service
+ * disabling the service.
* Return: -EOPNOTSUPP if the devices doesn't support the feature
*/
#define IOCTL_MEI_NOTIFY_SET _IOW('H', 0x02, __u32)
@@ -59,11 +59,60 @@ struct mei_connect_client_data {
/**
* DOC: retrieve notification
*
- * The IOCTL output argument is 1 if an event was is pending and 0 otherwise
- * the ioctl has to be called in order to acknowledge pending event
+ * The IOCTL output argument is 1 if an event was pending and 0 otherwise.
+ * The ioctl has to be called in order to acknowledge pending event.
*
* Return: -EOPNOTSUPP if the devices doesn't support the feature
*/
#define IOCTL_MEI_NOTIFY_GET _IOR('H', 0x03, __u32)
+/**
+ * struct mei_connect_client_vtag - mei client information struct with vtag
+ *
+ * @in_client_uuid: UUID of client to connect
+ * @vtag: virtual tag
+ * @reserved: reserved for future use
+ */
+struct mei_connect_client_vtag {
+ uuid_le in_client_uuid;
+ __u8 vtag;
+ __u8 reserved[3];
+};
+
+/**
+ * struct mei_connect_client_data_vtag - IOCTL connect data union
+ *
+ * @connect: input connect data
+ * @out_client_properties: output client data
+ */
+struct mei_connect_client_data_vtag {
+ union {
+ struct mei_connect_client_vtag connect;
+ struct mei_client out_client_properties;
+ };
+};
+
+/**
+ * DOC:
+ * This IOCTL is used to associate the current file descriptor with a
+ * FW Client (given by UUID), and virtual tag (vtag).
+ * The IOCTL opens a communication channel between a host client and
+ * a FW client on a tagged channel. From this point on, every read
+ * and write will communicate with the associated FW client
+ * on the tagged channel.
+ * Upon close() the communication is terminated.
+ *
+ * The IOCTL argument is a struct with a union that contains
+ * the input parameter and the output parameter for this IOCTL.
+ *
+ * The input parameter is UUID of the FW Client, a vtag [0,255].
+ * The output parameter is the properties of the FW client
+ * (FW protocol version and max message size).
+ *
+ * Clients that do not support tagged connection
+ * will respond with -EOPNOTSUPP.
+ */
+#define IOCTL_MEI_CONNECT_CLIENT_VTAG \
+ _IOWR('H', 0x04, struct mei_connect_client_data_vtag)
+
#endif /* _LINUX_MEI_H */
diff --git a/include/uapi/linux/mei_uuid.h b/include/uapi/linux/mei_uuid.h
new file mode 100644
index 000000000000..676ebe12d623
--- /dev/null
+++ b/include/uapi/linux/mei_uuid.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * MEI UUID definition
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Huang Ying <ying.huang@intel.com>
+ */
+
+#ifndef _UAPI_LINUX_MEI_UUID_H_
+#define _UAPI_LINUX_MEI_UUID_H_
+
+#include <linux/types.h>
+
+typedef struct {
+ __u8 b[16];
+} uuid_le;
+
+#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+((uuid_le) \
+{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
+#define NULL_UUID_LE \
+ UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00)
+
+#endif /* _UAPI_LINUX_MEI_UUID_H_ */
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index 5891d7614c8c..5f3ad6d5be6f 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -114,9 +114,32 @@
* If this command is not implemented by an
* architecture, -EINVAL is returned.
* Returns 0 on success.
+ * @MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
+ * Ensure the caller thread, upon return from
+ * system call, that all its running thread
+ * siblings have any currently running rseq
+ * critical sections restarted if @flags
+ * parameter is 0; if @flags parameter is
+ * MEMBARRIER_CMD_FLAG_CPU,
+ * then this operation is performed only
+ * on CPU indicated by @cpu_id. If this command is
+ * not implemented by an architecture, -EINVAL
+ * is returned. A process needs to register its
+ * intent to use the private expedited rseq
+ * command prior to using it, otherwise
+ * this command returns -EPERM.
+ * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
+ * Register the process intent to use
+ * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ.
+ * If this command is not implemented by an
+ * architecture, -EINVAL is returned.
+ * Returns 0 on success.
* @MEMBARRIER_CMD_SHARED:
* Alias to MEMBARRIER_CMD_GLOBAL. Provided for
* header backward compatibility.
+ * @MEMBARRIER_CMD_GET_REGISTRATIONS:
+ * Returns a bitmask of previously issued
+ * registration commands.
*
* Command to be passed to the membarrier system call. The commands need to
* be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
@@ -131,9 +154,16 @@ enum membarrier_cmd {
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5),
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6),
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = (1 << 7),
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = (1 << 8),
+ MEMBARRIER_CMD_GET_REGISTRATIONS = (1 << 9),
/* Alias for header backward compatibility. */
MEMBARRIER_CMD_SHARED = MEMBARRIER_CMD_GLOBAL,
};
+enum membarrier_cmd_flag {
+ MEMBARRIER_CMD_FLAG_CPU = (1 << 0),
+};
+
#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/memfd.h b/include/uapi/linux/memfd.h
index 7a8a26751c23..273a4e15dfcf 100644
--- a/include/uapi/linux/memfd.h
+++ b/include/uapi/linux/memfd.h
@@ -8,6 +8,10 @@
#define MFD_CLOEXEC 0x0001U
#define MFD_ALLOW_SEALING 0x0002U
#define MFD_HUGETLB 0x0004U
+/* not executable and sealed to prevent changing to executable. */
+#define MFD_NOEXEC_SEAL 0x0008U
+/* executable */
+#define MFD_EXEC 0x0010U
/*
* Huge page size encoding when MFD_HUGETLB is specified, and a huge page
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 3354774af61e..1f9bb10d1a47 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -22,18 +22,22 @@ enum {
MPOL_BIND,
MPOL_INTERLEAVE,
MPOL_LOCAL,
+ MPOL_PREFERRED_MANY,
+ MPOL_WEIGHTED_INTERLEAVE,
MPOL_MAX, /* always last member of enum */
};
/* Flags for set_mempolicy */
#define MPOL_F_STATIC_NODES (1 << 15)
#define MPOL_F_RELATIVE_NODES (1 << 14)
+#define MPOL_F_NUMA_BALANCING (1 << 13) /* Optimize with NUMA balancing if possible */
/*
* MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
* either set_mempolicy() or mbind().
*/
-#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
+#define MPOL_MODE_FLAGS \
+ (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES | MPOL_F_NUMA_BALANCING)
/* Flags for get_mempolicy */
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
@@ -45,7 +49,7 @@ enum {
#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform
to policy */
#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to policy */
-#define MPOL_MF_LAZY (1<<3) /* Modifies '_MOVE: lazy migrate on fault */
+#define MPOL_MF_LAZY (1<<3) /* UNSUPPORTED FLAG: Lazy migrate on fault */
#define MPOL_MF_INTERNAL (1<<4) /* Internal flags start here */
#define MPOL_MF_VALID (MPOL_MF_STRICT | \
@@ -58,9 +62,15 @@ enum {
* are never OR'ed into the mode in mempolicy API arguments.
*/
#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
-#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
+/*
+ * These bit locations are exposed in the vm.zone_reclaim_mode sysctl
+ * ABI. New bits are OK, but existing bits can never change.
+ */
+#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
+#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
+#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
#endif /* _UAPI_LINUX_MEMPOLICY_H */
diff --git a/include/uapi/linux/meye.h b/include/uapi/linux/meye.h
deleted file mode 100644
index de9e3a954f3d..000000000000
--- a/include/uapi/linux/meye.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * Motion Eye video4linux driver for Sony Vaio PictureBook
- *
- * Copyright (C) 2001-2003 Stelian Pop <stelian@popies.net>
- *
- * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
- *
- * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
- *
- * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
- *
- * Some parts borrowed from various video4linux drivers, especially
- * bttv-driver.c and zoran.c, see original files for credits.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _MEYE_H_
-#define _MEYE_H_
-
-/****************************************************************************/
-/* Private API for handling mjpeg capture / playback. */
-/****************************************************************************/
-
-struct meye_params {
- unsigned char subsample;
- unsigned char quality;
- unsigned char sharpness;
- unsigned char agc;
- unsigned char picture;
- unsigned char framerate;
-};
-
-/* query the extended parameters */
-#define MEYEIOC_G_PARAMS _IOR ('v', BASE_VIDIOC_PRIVATE+0, struct meye_params)
-/* set the extended parameters */
-#define MEYEIOC_S_PARAMS _IOW ('v', BASE_VIDIOC_PRIVATE+1, struct meye_params)
-/* queue a buffer for mjpeg capture */
-#define MEYEIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOC_PRIVATE+2, int)
-/* sync a previously queued mjpeg buffer */
-#define MEYEIOC_SYNC _IOWR('v', BASE_VIDIOC_PRIVATE+3, int)
-/* get a still uncompressed snapshot */
-#define MEYEIOC_STILLCAPT _IO ('v', BASE_VIDIOC_PRIVATE+4)
-/* get a jpeg compressed snapshot */
-#define MEYEIOC_STILLJCAPT _IOR ('v', BASE_VIDIOC_PRIVATE+5, int)
-
-/* V4L2 private controls */
-#define V4L2_CID_MEYE_AGC (V4L2_CID_USER_MEYE_BASE + 0)
-#define V4L2_CID_MEYE_PICTURE (V4L2_CID_USER_MEYE_BASE + 1)
-#define V4L2_CID_MEYE_FRAMERATE (V4L2_CID_USER_MEYE_BASE + 2)
-
-#endif
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
deleted file mode 100644
index 504e523f702c..000000000000
--- a/include/uapi/linux/mic_common.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC driver.
- *
- */
-#ifndef __MIC_COMMON_H_
-#define __MIC_COMMON_H_
-
-#include <linux/virtio_ring.h>
-
-#define __mic_align(a, x) (((a) + (x) - 1) & ~((x) - 1))
-
-/**
- * struct mic_device_desc: Virtio device information shared between the
- * virtio driver and userspace backend
- *
- * @type: Device type: console/network/disk etc. Type 0/-1 terminates.
- * @num_vq: Number of virtqueues.
- * @feature_len: Number of bytes of feature bits. Multiply by 2: one for
- host features and one for guest acknowledgements.
- * @config_len: Number of bytes of the config array after virtqueues.
- * @status: A status byte, written by the Guest.
- * @config: Start of the following variable length config.
- */
-struct mic_device_desc {
- __s8 type;
- __u8 num_vq;
- __u8 feature_len;
- __u8 config_len;
- __u8 status;
- __le64 config[0];
-} __attribute__ ((aligned(8)));
-
-/**
- * struct mic_device_ctrl: Per virtio device information in the device page
- * used internally by the host and card side drivers.
- *
- * @vdev: Used for storing MIC vdev information by the guest.
- * @config_change: Set to 1 by host when a config change is requested.
- * @vdev_reset: Set to 1 by guest to indicate virtio device has been reset.
- * @guest_ack: Set to 1 by guest to ack a command.
- * @host_ack: Set to 1 by host to ack a command.
- * @used_address_updated: Set to 1 by guest when the used address should be
- * updated.
- * @c2h_vdev_db: The doorbell number to be used by guest. Set by host.
- * @h2c_vdev_db: The doorbell number to be used by host. Set by guest.
- */
-struct mic_device_ctrl {
- __le64 vdev;
- __u8 config_change;
- __u8 vdev_reset;
- __u8 guest_ack;
- __u8 host_ack;
- __u8 used_address_updated;
- __s8 c2h_vdev_db;
- __s8 h2c_vdev_db;
-} __attribute__ ((aligned(8)));
-
-/**
- * struct mic_bootparam: Virtio device independent information in device page
- *
- * @magic: A magic value used by the card to ensure it can see the host
- * @h2c_config_db: Host to Card Virtio config doorbell set by card
- * @node_id: Unique id of the node
- * @h2c_scif_db - Host to card SCIF doorbell set by card
- * @c2h_scif_db - Card to host SCIF doorbell set by host
- * @scif_host_dma_addr - SCIF host queue pair DMA address
- * @scif_card_dma_addr - SCIF card queue pair DMA address
- */
-struct mic_bootparam {
- __le32 magic;
- __s8 h2c_config_db;
- __u8 node_id;
- __u8 h2c_scif_db;
- __u8 c2h_scif_db;
- __u64 scif_host_dma_addr;
- __u64 scif_card_dma_addr;
-} __attribute__ ((aligned(8)));
-
-/**
- * struct mic_device_page: High level representation of the device page
- *
- * @bootparam: The bootparam structure is used for sharing information and
- * status updates between MIC host and card drivers.
- * @desc: Array of MIC virtio device descriptors.
- */
-struct mic_device_page {
- struct mic_bootparam bootparam;
- struct mic_device_desc desc[0];
-};
-/**
- * struct mic_vqconfig: This is how we expect the device configuration field
- * for a virtqueue to be laid out in config space.
- *
- * @address: Guest/MIC physical address of the virtio ring
- * (avail and desc rings)
- * @used_address: Guest/MIC physical address of the used ring
- * @num: The number of entries in the virtio_ring
- */
-struct mic_vqconfig {
- __le64 address;
- __le64 used_address;
- __le16 num;
-} __attribute__ ((aligned(8)));
-
-/*
- * The alignment to use between consumer and producer parts of vring.
- * This is pagesize for historical reasons.
- */
-#define MIC_VIRTIO_RING_ALIGN 4096
-
-#define MIC_MAX_VRINGS 4
-#define MIC_VRING_ENTRIES 128
-
-/*
- * Max vring entries (power of 2) to ensure desc and avail rings
- * fit in a single page
- */
-#define MIC_MAX_VRING_ENTRIES 128
-
-/**
- * Max size of the desc block in bytes: includes:
- * - struct mic_device_desc
- * - struct mic_vqconfig (num_vq of these)
- * - host and guest features
- * - virtio device config space
- */
-#define MIC_MAX_DESC_BLK_SIZE 256
-
-/**
- * struct _mic_vring_info - Host vring info exposed to userspace backend
- * for the avail index and magic for the card.
- *
- * @avail_idx: host avail idx
- * @magic: A magic debug cookie.
- */
-struct _mic_vring_info {
- __u16 avail_idx;
- __le32 magic;
-};
-
-/**
- * struct mic_vring - Vring information.
- *
- * @vr: The virtio ring.
- * @info: Host vring information exposed to the userspace backend for the
- * avail index and magic for the card.
- * @va: The va for the buffer allocated for vr and info.
- * @len: The length of the buffer required for allocating vr and info.
- */
-struct mic_vring {
- struct vring vr;
- struct _mic_vring_info *info;
- void *va;
- int len;
-};
-
-#define mic_aligned_desc_size(d) __mic_align(mic_desc_size(d), 8)
-
-#ifndef INTEL_MIC_CARD
-static inline unsigned mic_desc_size(const struct mic_device_desc *desc)
-{
- return sizeof(*desc) + desc->num_vq * sizeof(struct mic_vqconfig)
- + desc->feature_len * 2 + desc->config_len;
-}
-
-static inline struct mic_vqconfig *
-mic_vq_config(const struct mic_device_desc *desc)
-{
- return (struct mic_vqconfig *)(desc + 1);
-}
-
-static inline __u8 *mic_vq_features(const struct mic_device_desc *desc)
-{
- return (__u8 *)(mic_vq_config(desc) + desc->num_vq);
-}
-
-static inline __u8 *mic_vq_configspace(const struct mic_device_desc *desc)
-{
- return mic_vq_features(desc) + desc->feature_len * 2;
-}
-static inline unsigned mic_total_desc_size(struct mic_device_desc *desc)
-{
- return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
-}
-#endif
-
-/* Device page size */
-#define MIC_DP_SIZE 4096
-
-#define MIC_MAGIC 0xc0ffee00
-
-/**
- * enum mic_states - MIC states.
- */
-enum mic_states {
- MIC_READY = 0,
- MIC_BOOTING,
- MIC_ONLINE,
- MIC_SHUTTING_DOWN,
- MIC_RESETTING,
- MIC_RESET_FAILED,
- MIC_LAST
-};
-
-/**
- * enum mic_status - MIC status reported by card after
- * a host or card initiated shutdown or a card crash.
- */
-enum mic_status {
- MIC_NOP = 0,
- MIC_CRASHED,
- MIC_HALTED,
- MIC_POWER_OFF,
- MIC_RESTART,
- MIC_STATUS_LAST
-};
-
-#endif
diff --git a/include/uapi/linux/mic_ioctl.h b/include/uapi/linux/mic_ioctl.h
deleted file mode 100644
index 687b9cd9d3e2..000000000000
--- a/include/uapi/linux/mic_ioctl.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#ifndef _MIC_IOCTL_H_
-#define _MIC_IOCTL_H_
-
-#include <linux/types.h>
-
-/*
- * mic_copy - MIC virtio descriptor copy.
- *
- * @iov: An array of IOVEC structures containing user space buffers.
- * @iovcnt: Number of IOVEC structures in iov.
- * @vr_idx: The vring index.
- * @update_used: A non zero value results in used index being updated.
- * @out_len: The aggregate of the total length written to or read from
- * the virtio device.
- */
-struct mic_copy_desc {
-#ifdef __KERNEL__
- struct iovec __user *iov;
-#else
- struct iovec *iov;
-#endif
- __u32 iovcnt;
- __u8 vr_idx;
- __u8 update_used;
- __u32 out_len;
-};
-
-/*
- * Add a new virtio device
- * The (struct mic_device_desc *) pointer points to a device page entry
- * for the virtio device consisting of:
- * - struct mic_device_desc
- * - struct mic_vqconfig (num_vq of these)
- * - host and guest features
- * - virtio device config space
- * The total size referenced by the pointer should equal the size returned
- * by desc_size() in mic_common.h
- */
-#define MIC_VIRTIO_ADD_DEVICE _IOWR('s', 1, struct mic_device_desc *)
-
-/*
- * Copy the number of entries in the iovec and update the used index
- * if requested by the user.
- */
-#define MIC_VIRTIO_COPY_DESC _IOWR('s', 2, struct mic_copy_desc *)
-
-/*
- * Notify virtio device of a config change
- * The (__u8 *) pointer points to config space values for the device
- * as they should be written into the device page. The total size
- * referenced by the pointer should equal the config_len field of struct
- * mic_device_desc.
- */
-#define MIC_VIRTIO_CONFIG_CHANGE _IOWR('s', 5, __u8 *)
-
-#endif
diff --git a/include/uapi/linux/minix_fs.h b/include/uapi/linux/minix_fs.h
index 95dbcb17eacd..8d9ca8b2c357 100644
--- a/include/uapi/linux/minix_fs.h
+++ b/include/uapi/linux/minix_fs.h
@@ -97,11 +97,11 @@ struct minix3_super_block {
struct minix_dir_entry {
__u16 inode;
- char name[0];
+ char name[];
};
struct minix3_dir_entry {
__u32 inode;
- char name[0];
+ char name[];
};
#endif
diff --git a/include/uapi/linux/misc/bcm_vk.h b/include/uapi/linux/misc/bcm_vk.h
new file mode 100644
index 000000000000..ec28e0bd46a9
--- /dev/null
+++ b/include/uapi/linux/misc/bcm_vk.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright 2018-2020 Broadcom.
+ */
+
+#ifndef __UAPI_LINUX_MISC_BCM_VK_H
+#define __UAPI_LINUX_MISC_BCM_VK_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define BCM_VK_MAX_FILENAME 64
+
+struct vk_image {
+ __u32 type; /* Type of image */
+#define VK_IMAGE_TYPE_BOOT1 1 /* 1st stage (load to SRAM) */
+#define VK_IMAGE_TYPE_BOOT2 2 /* 2nd stage (load to DDR) */
+ __u8 filename[BCM_VK_MAX_FILENAME]; /* Filename of image */
+};
+
+struct vk_reset {
+ __u32 arg1;
+ __u32 arg2;
+};
+
+#define VK_MAGIC 0x5e
+
+/* Load image to Valkyrie */
+#define VK_IOCTL_LOAD_IMAGE _IOW(VK_MAGIC, 0x2, struct vk_image)
+
+/* Send Reset to Valkyrie */
+#define VK_IOCTL_RESET _IOW(VK_MAGIC, 0x4, struct vk_reset)
+
+/*
+ * Firmware Status accessed directly via BAR space
+ */
+#define VK_BAR_FWSTS 0x41c
+#define VK_BAR_COP_FWSTS 0x428
+/* VK_FWSTS definitions */
+#define VK_FWSTS_RELOCATION_ENTRY (1UL << 0)
+#define VK_FWSTS_RELOCATION_EXIT (1UL << 1)
+#define VK_FWSTS_INIT_START (1UL << 2)
+#define VK_FWSTS_ARCH_INIT_DONE (1UL << 3)
+#define VK_FWSTS_PRE_KNL1_INIT_DONE (1UL << 4)
+#define VK_FWSTS_PRE_KNL2_INIT_DONE (1UL << 5)
+#define VK_FWSTS_POST_KNL_INIT_DONE (1UL << 6)
+#define VK_FWSTS_INIT_DONE (1UL << 7)
+#define VK_FWSTS_APP_INIT_START (1UL << 8)
+#define VK_FWSTS_APP_INIT_DONE (1UL << 9)
+#define VK_FWSTS_MASK 0xffffffff
+#define VK_FWSTS_READY (VK_FWSTS_INIT_START | \
+ VK_FWSTS_ARCH_INIT_DONE | \
+ VK_FWSTS_PRE_KNL1_INIT_DONE | \
+ VK_FWSTS_PRE_KNL2_INIT_DONE | \
+ VK_FWSTS_POST_KNL_INIT_DONE | \
+ VK_FWSTS_INIT_DONE | \
+ VK_FWSTS_APP_INIT_START | \
+ VK_FWSTS_APP_INIT_DONE)
+/* Deinit */
+#define VK_FWSTS_APP_DEINIT_START (1UL << 23)
+#define VK_FWSTS_APP_DEINIT_DONE (1UL << 24)
+#define VK_FWSTS_DRV_DEINIT_START (1UL << 25)
+#define VK_FWSTS_DRV_DEINIT_DONE (1UL << 26)
+#define VK_FWSTS_RESET_DONE (1UL << 27)
+#define VK_FWSTS_DEINIT_TRIGGERED (VK_FWSTS_APP_DEINIT_START | \
+ VK_FWSTS_APP_DEINIT_DONE | \
+ VK_FWSTS_DRV_DEINIT_START | \
+ VK_FWSTS_DRV_DEINIT_DONE)
+/* Last nibble for reboot reason */
+#define VK_FWSTS_RESET_REASON_SHIFT 28
+#define VK_FWSTS_RESET_REASON_MASK (0xf << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_SYS_PWRUP (0x0 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_MBOX_DB (0x1 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_M7_WDOG (0x2 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_TEMP (0x3 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_PCI_FLR (0x4 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_PCI_HOT (0x5 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_PCI_WARM (0x6 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_PCI_COLD (0x7 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_L1 (0x8 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_L0 (0x9 << VK_FWSTS_RESET_REASON_SHIFT)
+#define VK_FWSTS_RESET_UNKNOWN (0xf << VK_FWSTS_RESET_REASON_SHIFT)
+
+#endif /* __UAPI_LINUX_MISC_BCM_VK_H */
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index 923cc162609c..a246e11988d5 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -4,6 +4,7 @@
#include <asm/mman.h>
#include <asm-generic/hugetlb_encode.h>
+#include <linux/types.h>
#define MREMAP_MAYMOVE 1
#define MREMAP_FIXED 2
@@ -27,6 +28,7 @@
#define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
#define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
+#define MAP_HUGE_16KB HUGETLB_FLAG_ENCODE_16KB
#define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
#define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
#define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
@@ -40,4 +42,17 @@
#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
+struct cachestat_range {
+ __u64 off;
+ __u64 len;
+};
+
+struct cachestat {
+ __u64 nr_cache;
+ __u64 nr_dirty;
+ __u64 nr_writeback;
+ __u64 nr_evicted;
+ __u64 nr_recently_evicted;
+};
+
#endif /* _UAPI_LINUX_MMAN_H */
diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
index 27a39847d55c..e7401ade6822 100644
--- a/include/uapi/linux/mmc/ioctl.h
+++ b/include/uapi/linux/mmc/ioctl.h
@@ -58,7 +58,7 @@ struct mmc_ioc_cmd {
*/
struct mmc_ioc_multi_cmd {
__u64 num_of_cmds;
- struct mmc_ioc_cmd cmds[0];
+ struct mmc_ioc_cmd cmds[];
};
#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
diff --git a/include/uapi/linux/module.h b/include/uapi/linux/module.h
index 50d98ec5e866..03a33ffffcba 100644
--- a/include/uapi/linux/module.h
+++ b/include/uapi/linux/module.h
@@ -5,5 +5,6 @@
/* Flags for sys_finit_module: */
#define MODULE_INIT_IGNORE_MODVERSIONS 1
#define MODULE_INIT_IGNORE_VERMAGIC 2
+#define MODULE_INIT_COMPRESSED_FILE 4
#endif /* _UAPI_LINUX_MODULE_H */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 96a0240f23fe..ad5478dbad00 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -1,6 +1,8 @@
#ifndef _UAPI_LINUX_MOUNT_H
#define _UAPI_LINUX_MOUNT_H
+#include <linux/types.h>
+
/*
* These are the fs-independent mount-flags: up to 32 flags are supported
*
@@ -16,6 +18,7 @@
#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
+#define MS_NOSYMFOLLOW 256 /* Do not follow symlinks */
#define MS_NOATIME 1024 /* Do not update access times. */
#define MS_NODIRATIME 2048 /* Do not update directory access times */
#define MS_BIND 4096
@@ -70,7 +73,9 @@
#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */
#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */
#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
-#define MOVE_MOUNT__MASK 0x00000077
+#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */
+#define MOVE_MOUNT_BENEATH 0x00000200 /* Mount beneath top mount */
+#define MOVE_MOUNT__MASK 0x00000377
/*
* fsopen() flags.
@@ -95,8 +100,9 @@ enum fsconfig_command {
FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */
FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */
FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */
- FSCONFIG_CMD_CREATE = 6, /* Invoke superblock creation */
+ FSCONFIG_CMD_CREATE = 6, /* Create new or reuse existing superblock */
FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */
+ FSCONFIG_CMD_CREATE_EXCL = 8, /* Create new superblock, fail if reusing existing superblock */
};
/*
@@ -116,5 +122,90 @@ enum fsconfig_command {
#define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */
#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
+#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
+#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */
+
+/*
+ * mount_setattr()
+ */
+struct mount_attr {
+ __u64 attr_set;
+ __u64 attr_clr;
+ __u64 propagation;
+ __u64 userns_fd;
+};
+
+/* List of all mount_attr versions. */
+#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
+
+
+/*
+ * Structure for getting mount/superblock/filesystem info with statmount(2).
+ *
+ * The interface is similar to statx(2): individual fields or groups can be
+ * selected with the @mask argument of statmount(). Kernel will set the @mask
+ * field according to the supported fields.
+ *
+ * If string fields are selected, then the caller needs to pass a buffer that
+ * has space after the fixed part of the structure. Nul terminated strings are
+ * copied there and offsets relative to @str are stored in the relevant fields.
+ * If the buffer is too small, then EOVERFLOW is returned. The actually used
+ * size is returned in @size.
+ */
+struct statmount {
+ __u32 size; /* Total size, including strings */
+ __u32 __spare1;
+ __u64 mask; /* What results were written */
+ __u32 sb_dev_major; /* Device ID */
+ __u32 sb_dev_minor;
+ __u64 sb_magic; /* ..._SUPER_MAGIC */
+ __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
+ __u32 fs_type; /* [str] Filesystem type */
+ __u64 mnt_id; /* Unique ID of mount */
+ __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
+ __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
+ __u32 mnt_parent_id_old;
+ __u64 mnt_attr; /* MOUNT_ATTR_... */
+ __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
+ __u64 mnt_peer_group; /* ID of shared peer group */
+ __u64 mnt_master; /* Mount receives propagation from this ID */
+ __u64 propagate_from; /* Propagation from in current namespace */
+ __u32 mnt_root; /* [str] Root of mount relative to root of fs */
+ __u32 mnt_point; /* [str] Mountpoint relative to current root */
+ __u64 __spare2[50];
+ char str[]; /* Variable size part containing strings */
+};
+
+/*
+ * Structure for passing mount ID and miscellaneous parameters to statmount(2)
+ * and listmount(2).
+ *
+ * For statmount(2) @param represents the request mask.
+ * For listmount(2) @param represents the last listed mount id (or zero).
+ */
+struct mnt_id_req {
+ __u32 size;
+ __u32 spare;
+ __u64 mnt_id;
+ __u64 param;
+};
+
+/* List of all mnt_id_req versions. */
+#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
+
+/*
+ * @mask bits for statmount(2)
+ */
+#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */
+#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
+#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
+#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
+#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
+#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
+
+/*
+ * Special @mnt_id values that can be passed to listmount
+ */
+#define LSMT_ROOT 0xffffffffffffffff /* root mount */
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index 9762660df741..74cfe496891e 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -2,8 +2,16 @@
#ifndef _UAPI_MPTCP_H
#define _UAPI_MPTCP_H
+#ifndef __KERNEL__
+#include <netinet/in.h> /* for sockaddr_in and sockaddr_in6 */
+#include <sys/socket.h> /* for struct sockaddr */
+#endif
+
#include <linux/const.h>
#include <linux/types.h>
+#include <linux/in.h> /* for sockaddr_in */
+#include <linux/in6.h> /* for sockaddr_in6 */
+#include <linux/socket.h> /* for sockaddr_storage and sa_family */
#define MPTCP_SUBFLOW_FLAG_MCAP_REM _BITUL(0)
#define MPTCP_SUBFLOW_FLAG_MCAP_LOC _BITUL(1)
@@ -15,80 +23,20 @@
#define MPTCP_SUBFLOW_FLAG_CONNECTED _BITUL(7)
#define MPTCP_SUBFLOW_FLAG_MAPVALID _BITUL(8)
-enum {
- MPTCP_SUBFLOW_ATTR_UNSPEC,
- MPTCP_SUBFLOW_ATTR_TOKEN_REM,
- MPTCP_SUBFLOW_ATTR_TOKEN_LOC,
- MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
- MPTCP_SUBFLOW_ATTR_MAP_SEQ,
- MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
- MPTCP_SUBFLOW_ATTR_SSN_OFFSET,
- MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
- MPTCP_SUBFLOW_ATTR_FLAGS,
- MPTCP_SUBFLOW_ATTR_ID_REM,
- MPTCP_SUBFLOW_ATTR_ID_LOC,
- MPTCP_SUBFLOW_ATTR_PAD,
- __MPTCP_SUBFLOW_ATTR_MAX
-};
-
-#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1)
-
-/* netlink interface */
-#define MPTCP_PM_NAME "mptcp_pm"
#define MPTCP_PM_CMD_GRP_NAME "mptcp_pm_cmds"
-#define MPTCP_PM_VER 0x1
-
-/*
- * ATTR types defined for MPTCP
- */
-enum {
- MPTCP_PM_ATTR_UNSPEC,
-
- MPTCP_PM_ATTR_ADDR, /* nested address */
- MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */
- MPTCP_PM_ATTR_SUBFLOWS, /* u32 */
-
- __MPTCP_PM_ATTR_MAX
-};
-
-#define MPTCP_PM_ATTR_MAX (__MPTCP_PM_ATTR_MAX - 1)
-
-enum {
- MPTCP_PM_ADDR_ATTR_UNSPEC,
-
- MPTCP_PM_ADDR_ATTR_FAMILY, /* u16 */
- MPTCP_PM_ADDR_ATTR_ID, /* u8 */
- MPTCP_PM_ADDR_ATTR_ADDR4, /* struct in_addr */
- MPTCP_PM_ADDR_ATTR_ADDR6, /* struct in6_addr */
- MPTCP_PM_ADDR_ATTR_PORT, /* u16 */
- MPTCP_PM_ADDR_ATTR_FLAGS, /* u32 */
- MPTCP_PM_ADDR_ATTR_IF_IDX, /* s32 */
-
- __MPTCP_PM_ADDR_ATTR_MAX
-};
+#define MPTCP_PM_EV_GRP_NAME "mptcp_pm_events"
-#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
-
-#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
-#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
-#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
-
-enum {
- MPTCP_PM_CMD_UNSPEC,
-
- MPTCP_PM_CMD_ADD_ADDR,
- MPTCP_PM_CMD_DEL_ADDR,
- MPTCP_PM_CMD_GET_ADDR,
- MPTCP_PM_CMD_FLUSH_ADDRS,
- MPTCP_PM_CMD_SET_LIMITS,
- MPTCP_PM_CMD_GET_LIMITS,
-
- __MPTCP_PM_CMD_AFTER_LAST
-};
+#include <linux/mptcp_pm.h>
#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0)
#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
+#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
+#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
+#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
+#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3)
+#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4)
+
struct mptcp_info {
__u8 mptcpi_subflows;
__u8 mptcpi_add_addr_signal;
@@ -101,6 +49,76 @@ struct mptcp_info {
__u64 mptcpi_write_seq;
__u64 mptcpi_snd_una;
__u64 mptcpi_rcv_nxt;
+ __u8 mptcpi_local_addr_used;
+ __u8 mptcpi_local_addr_max;
+ __u8 mptcpi_csum_enabled;
+ __u32 mptcpi_retransmits;
+ __u64 mptcpi_bytes_retrans;
+ __u64 mptcpi_bytes_sent;
+ __u64 mptcpi_bytes_received;
+ __u64 mptcpi_bytes_acked;
+ __u8 mptcpi_subflows_total;
};
+/* MPTCP Reset reason codes, rfc8684 */
+#define MPTCP_RST_EUNSPEC 0
+#define MPTCP_RST_EMPTCP 1
+#define MPTCP_RST_ERESOURCE 2
+#define MPTCP_RST_EPROHIBIT 3
+#define MPTCP_RST_EWQ2BIG 4
+#define MPTCP_RST_EBADPERF 5
+#define MPTCP_RST_EMIDDLEBOX 6
+
+struct mptcp_subflow_data {
+ __u32 size_subflow_data; /* size of this structure in userspace */
+ __u32 num_subflows; /* must be 0, set by kernel */
+ __u32 size_kernel; /* must be 0, set by kernel */
+ __u32 size_user; /* size of one element in data[] */
+} __attribute__((aligned(8)));
+
+struct mptcp_subflow_addrs {
+ union {
+ __kernel_sa_family_t sa_family;
+ struct sockaddr sa_local;
+ struct sockaddr_in sin_local;
+ struct sockaddr_in6 sin6_local;
+ struct __kernel_sockaddr_storage ss_local;
+ };
+ union {
+ struct sockaddr sa_remote;
+ struct sockaddr_in sin_remote;
+ struct sockaddr_in6 sin6_remote;
+ struct __kernel_sockaddr_storage ss_remote;
+ };
+};
+
+struct mptcp_subflow_info {
+ __u32 id;
+ struct mptcp_subflow_addrs addrs;
+};
+
+struct mptcp_full_info {
+ __u32 size_tcpinfo_kernel; /* must be 0, set by kernel */
+ __u32 size_tcpinfo_user;
+ __u32 size_sfinfo_kernel; /* must be 0, set by kernel */
+ __u32 size_sfinfo_user;
+ __u32 num_subflows; /* must be 0, set by kernel (real subflow count) */
+ __u32 size_arrays_user; /* max subflows that userspace is interested in;
+ * the buffers at subflow_info/tcp_info
+ * are respectively at least:
+ * size_arrays * size_sfinfo_user
+ * size_arrays * size_tcpinfo_user
+ * bytes wide
+ */
+ __aligned_u64 subflow_info;
+ __aligned_u64 tcp_info;
+ struct mptcp_info mptcp_info;
+};
+
+/* MPTCP socket options */
+#define MPTCP_INFO 1
+#define MPTCP_TCPINFO 2
+#define MPTCP_SUBFLOW_ADDRS 3
+#define MPTCP_FULL_INFO 4
+
#endif /* _UAPI_MPTCP_H */
diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
new file mode 100644
index 000000000000..50589e5dd6a3
--- /dev/null
+++ b/include/uapi/linux/mptcp_pm.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/mptcp_pm.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_MPTCP_PM_H
+#define _UAPI_LINUX_MPTCP_PM_H
+
+#define MPTCP_PM_NAME "mptcp_pm"
+#define MPTCP_PM_VER 1
+
+/**
+ * enum mptcp_event_type
+ * @MPTCP_EVENT_UNSPEC: unused event
+ * @MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport A new MPTCP connection has been created. It is the good time
+ * to allocate memory and send ADD_ADDR if needed. Depending on the
+ * traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED
+ * is sent.
+ * @MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport A MPTCP connection is established (can start new subflows).
+ * @MPTCP_EVENT_CLOSED: token A MPTCP connection has stopped.
+ * @MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] A
+ * new address has been announced by the peer.
+ * @MPTCP_EVENT_REMOVED: token, rem_id An address has been lost by the peer.
+ * @MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, saddr4 |
+ * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error] A new
+ * subflow has been established. 'error' should not be set.
+ * @MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx [, error] A subflow has been
+ * closed. An error (copy of sk_err) could be set if an error has been
+ * detected for this subflow.
+ * @MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx [, error] The priority of a
+ * subflow has changed. 'error' should not be set.
+ * @MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 A new PM
+ * listener is created.
+ * @MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 A PM listener
+ * is closed.
+ */
+enum mptcp_event_type {
+ MPTCP_EVENT_UNSPEC,
+ MPTCP_EVENT_CREATED,
+ MPTCP_EVENT_ESTABLISHED,
+ MPTCP_EVENT_CLOSED,
+ MPTCP_EVENT_ANNOUNCED = 6,
+ MPTCP_EVENT_REMOVED,
+ MPTCP_EVENT_SUB_ESTABLISHED = 10,
+ MPTCP_EVENT_SUB_CLOSED,
+ MPTCP_EVENT_SUB_PRIORITY = 13,
+ MPTCP_EVENT_LISTENER_CREATED = 15,
+ MPTCP_EVENT_LISTENER_CLOSED,
+};
+
+enum {
+ MPTCP_PM_ADDR_ATTR_UNSPEC,
+ MPTCP_PM_ADDR_ATTR_FAMILY,
+ MPTCP_PM_ADDR_ATTR_ID,
+ MPTCP_PM_ADDR_ATTR_ADDR4,
+ MPTCP_PM_ADDR_ATTR_ADDR6,
+ MPTCP_PM_ADDR_ATTR_PORT,
+ MPTCP_PM_ADDR_ATTR_FLAGS,
+ MPTCP_PM_ADDR_ATTR_IF_IDX,
+
+ __MPTCP_PM_ADDR_ATTR_MAX
+};
+#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
+
+enum {
+ MPTCP_SUBFLOW_ATTR_UNSPEC,
+ MPTCP_SUBFLOW_ATTR_TOKEN_REM,
+ MPTCP_SUBFLOW_ATTR_TOKEN_LOC,
+ MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
+ MPTCP_SUBFLOW_ATTR_SSN_OFFSET,
+ MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
+ MPTCP_SUBFLOW_ATTR_FLAGS,
+ MPTCP_SUBFLOW_ATTR_ID_REM,
+ MPTCP_SUBFLOW_ATTR_ID_LOC,
+ MPTCP_SUBFLOW_ATTR_PAD,
+
+ __MPTCP_SUBFLOW_ATTR_MAX
+};
+#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1)
+
+enum {
+ MPTCP_PM_ENDPOINT_ADDR = 1,
+
+ __MPTCP_PM_ENDPOINT_MAX
+};
+#define MPTCP_PM_ENDPOINT_MAX (__MPTCP_PM_ENDPOINT_MAX - 1)
+
+enum {
+ MPTCP_PM_ATTR_UNSPEC,
+ MPTCP_PM_ATTR_ADDR,
+ MPTCP_PM_ATTR_RCV_ADD_ADDRS,
+ MPTCP_PM_ATTR_SUBFLOWS,
+ MPTCP_PM_ATTR_TOKEN,
+ MPTCP_PM_ATTR_LOC_ID,
+ MPTCP_PM_ATTR_ADDR_REMOTE,
+
+ __MPTCP_ATTR_AFTER_LAST
+};
+#define MPTCP_PM_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1)
+
+enum mptcp_event_attr {
+ MPTCP_ATTR_UNSPEC,
+ MPTCP_ATTR_TOKEN,
+ MPTCP_ATTR_FAMILY,
+ MPTCP_ATTR_LOC_ID,
+ MPTCP_ATTR_REM_ID,
+ MPTCP_ATTR_SADDR4,
+ MPTCP_ATTR_SADDR6,
+ MPTCP_ATTR_DADDR4,
+ MPTCP_ATTR_DADDR6,
+ MPTCP_ATTR_SPORT,
+ MPTCP_ATTR_DPORT,
+ MPTCP_ATTR_BACKUP,
+ MPTCP_ATTR_ERROR,
+ MPTCP_ATTR_FLAGS,
+ MPTCP_ATTR_TIMEOUT,
+ MPTCP_ATTR_IF_IDX,
+ MPTCP_ATTR_RESET_REASON,
+ MPTCP_ATTR_RESET_FLAGS,
+ MPTCP_ATTR_SERVER_SIDE,
+
+ __MPTCP_ATTR_MAX
+};
+#define MPTCP_ATTR_MAX (__MPTCP_ATTR_MAX - 1)
+
+enum {
+ MPTCP_PM_CMD_UNSPEC,
+ MPTCP_PM_CMD_ADD_ADDR,
+ MPTCP_PM_CMD_DEL_ADDR,
+ MPTCP_PM_CMD_GET_ADDR,
+ MPTCP_PM_CMD_FLUSH_ADDRS,
+ MPTCP_PM_CMD_SET_LIMITS,
+ MPTCP_PM_CMD_GET_LIMITS,
+ MPTCP_PM_CMD_SET_FLAGS,
+ MPTCP_PM_CMD_ANNOUNCE,
+ MPTCP_PM_CMD_REMOVE,
+ MPTCP_PM_CMD_SUBFLOW_CREATE,
+ MPTCP_PM_CMD_SUBFLOW_DESTROY,
+
+ __MPTCP_PM_CMD_AFTER_LAST
+};
+#define MPTCP_PM_CMD_MAX (__MPTCP_PM_CMD_AFTER_LAST - 1)
+
+#endif /* _UAPI_LINUX_MPTCP_PM_H */
diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h
index 11c8c1fc1124..1a42f5f9b31b 100644
--- a/include/uapi/linux/mroute.h
+++ b/include/uapi/linux/mroute.h
@@ -113,8 +113,8 @@ struct igmpmsg {
__u32 unused1,unused2;
unsigned char im_msgtype; /* What is this */
unsigned char im_mbz; /* Must be zero */
- unsigned char im_vif; /* Interface (this ought to be a vifi_t!) */
- unsigned char unused3;
+ unsigned char im_vif; /* Low 8 bits of Interface */
+ unsigned char im_vif_hi; /* High 8 bits of Interface */
struct in_addr im_src,im_dst;
};
@@ -169,6 +169,7 @@ enum {
IPMRA_CREPORT_SRC_ADDR,
IPMRA_CREPORT_DST_ADDR,
IPMRA_CREPORT_PKT,
+ IPMRA_CREPORT_TABLE,
__IPMRA_CREPORT_MAX
};
#define IPMRA_CREPORT_MAX (__IPMRA_CREPORT_MAX - 1)
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index c36177a86516..1d90c21a6251 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_MROUTE6_H
#define _UAPI__LINUX_MROUTE6_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/sockios.h>
#include <linux/in6.h> /* For struct sockaddr_in6. */
@@ -134,6 +134,7 @@ struct mrt6msg {
#define MRT6MSG_NOCACHE 1
#define MRT6MSG_WRONGMIF 2
#define MRT6MSG_WHOLEPKT 3 /* used for use level encap */
+#define MRT6MSG_WRMIFWHOLE 4 /* For PIM Register and assert processing */
__u8 im6_mbz; /* must be zero */
__u8 im6_msgtype; /* what type of message */
__u16 im6_mif; /* mif rec'd on */
diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h
index 6aeb13ef0b1e..bd4424de56ff 100644
--- a/include/uapi/linux/mrp_bridge.h
+++ b/include/uapi/linux/mrp_bridge.h
@@ -61,6 +61,7 @@ enum br_mrp_tlv_header_type {
BR_MRP_TLV_HEADER_IN_TOPO = 0x7,
BR_MRP_TLV_HEADER_IN_LINK_DOWN = 0x8,
BR_MRP_TLV_HEADER_IN_LINK_UP = 0x9,
+ BR_MRP_TLV_HEADER_IN_LINK_STATUS = 0xa,
BR_MRP_TLV_HEADER_OPTION = 0x7f,
};
@@ -70,90 +71,4 @@ enum br_mrp_sub_tlv_header_type {
BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR = 0x3,
};
-struct br_mrp_tlv_hdr {
- __u8 type;
- __u8 length;
-};
-
-struct br_mrp_sub_tlv_hdr {
- __u8 type;
- __u8 length;
-};
-
-struct br_mrp_end_hdr {
- struct br_mrp_tlv_hdr hdr;
-};
-
-struct br_mrp_common_hdr {
- __be16 seq_id;
- __u8 domain[MRP_DOMAIN_UUID_LENGTH];
-};
-
-struct br_mrp_ring_test_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 state;
- __be16 transitions;
- __be32 timestamp;
-};
-
-struct br_mrp_ring_topo_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 interval;
-};
-
-struct br_mrp_ring_link_hdr {
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 interval;
- __be16 blocked;
-};
-
-struct br_mrp_sub_opt_hdr {
- __u8 type;
- __u8 manufacture_data[MRP_MANUFACTURE_DATA_LENGTH];
-};
-
-struct br_mrp_test_mgr_nack_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 other_prio;
- __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_test_prop_hdr {
- __be16 prio;
- __u8 sa[ETH_ALEN];
- __be16 other_prio;
- __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_oui_hdr {
- __u8 oui[MRP_OUI_LENGTH];
-};
-
-struct br_mrp_in_test_hdr {
- __be16 id;
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 state;
- __be16 transitions;
- __be32 timestamp;
-};
-
-struct br_mrp_in_topo_hdr {
- __u8 sa[ETH_ALEN];
- __be16 id;
- __be16 interval;
-};
-
-struct br_mrp_in_link_hdr {
- __u8 sa[ETH_ALEN];
- __be16 port_role;
- __be16 id;
- __be16 interval;
-};
-
#endif
diff --git a/include/uapi/linux/n_r3964.h b/include/uapi/linux/n_r3964.h
deleted file mode 100644
index 6bbd18520f30..000000000000
--- a/include/uapi/linux/n_r3964.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */
-/* r3964 linediscipline for linux
- *
- * -----------------------------------------------------------
- * Copyright by
- * Philips Automation Projects
- * Kassel (Germany)
- * -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
- * Author:
- * L. Haag
- *
- * $Log: r3964.h,v $
- * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de>
- * Fixed HZ usage on 2.6 kernels
- * Removed unnecessary include
- *
- * Revision 1.3 2001/03/18 13:02:24 dwmw2
- * Fix timer usage, use spinlocks properly.
- *
- * Revision 1.2 2001/03/18 12:53:15 dwmw2
- * Merge changes in 2.4.2
- *
- * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2
- * This'll screw the version control
- *
- * Revision 1.6 1998/09/30 00:40:38 dwmw2
- * Updated to use kernel's N_R3964 if available
- *
- * Revision 1.4 1998/04/02 20:29:44 lhaag
- * select, blocking, ...
- *
- * Revision 1.3 1998/02/12 18:58:43 root
- * fixed some memory leaks
- * calculation of checksum characters
- *
- * Revision 1.2 1998/02/07 13:03:17 root
- * ioctl read_telegram
- *
- * Revision 1.1 1998/02/06 19:19:43 root
- * Initial revision
- *
- *
- */
-
-#ifndef _UAPI__LINUX_N_R3964_H__
-#define _UAPI__LINUX_N_R3964_H__
-
-/* line disciplines for r3964 protocol */
-
-
-/*
- * Ioctl-commands
- */
-
-#define R3964_ENABLE_SIGNALS 0x5301
-#define R3964_SETPRIORITY 0x5302
-#define R3964_USE_BCC 0x5303
-#define R3964_READ_TELEGRAM 0x5304
-
-/* Options for R3964_SETPRIORITY */
-#define R3964_MASTER 0
-#define R3964_SLAVE 1
-
-/* Options for R3964_ENABLE_SIGNALS */
-#define R3964_SIG_ACK 0x0001
-#define R3964_SIG_DATA 0x0002
-#define R3964_SIG_ALL 0x000f
-#define R3964_SIG_NONE 0x0000
-#define R3964_USE_SIGIO 0x1000
-
-/*
- * r3964 operation states:
- */
-
-/* types for msg_id: */
-enum {R3964_MSG_ACK=1, R3964_MSG_DATA };
-
-#define R3964_MAX_MSG_COUNT 32
-
-/* error codes for client messages */
-#define R3964_OK 0 /* no error. */
-#define R3964_TX_FAIL -1 /* transmission error, block NOT sent */
-#define R3964_OVERFLOW -2 /* msg queue overflow */
-
-/* the client gets this struct when calling read(fd,...): */
-struct r3964_client_message {
- int msg_id;
- int arg;
- int error_code;
-};
-
-#define R3964_MTU 256
-
-
-
-#endif /* _UAPI__LINUX_N_R3964_H__ */
diff --git a/include/uapi/linux/nbd-netlink.h b/include/uapi/linux/nbd-netlink.h
index c5d0ef7aa7d5..2d0b90964227 100644
--- a/include/uapi/linux/nbd-netlink.h
+++ b/include/uapi/linux/nbd-netlink.h
@@ -35,6 +35,7 @@ enum {
NBD_ATTR_SOCKETS,
NBD_ATTR_DEAD_CONN_TIMEOUT,
NBD_ATTR_DEVICE_LIST,
+ NBD_ATTR_BACKEND_IDENTIFIER,
__NBD_ATTR_MAX,
};
#define NBD_ATTR_MAX (__NBD_ATTR_MAX - 1)
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 20d6cc91435d..80ce0ef43afd 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -11,6 +11,8 @@
* Cleanup PARANOIA usage & code.
* 2004/02/19 Paul Clements
* Removed PARANOIA, plus various cleanup and comments
+ * 2023 Copyright Red Hat
+ * Link to userspace extensions, favor cookie over handle.
*/
#ifndef _UAPILINUX_NBD_H
@@ -30,12 +32,18 @@
#define NBD_SET_TIMEOUT _IO( 0xab, 9 )
#define NBD_SET_FLAGS _IO( 0xab, 10)
+/*
+ * See also https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
+ * for additional userspace extensions not yet utilized in the kernel module.
+ */
+
enum {
NBD_CMD_READ = 0,
NBD_CMD_WRITE = 1,
NBD_CMD_DISC = 2,
NBD_CMD_FLUSH = 3,
NBD_CMD_TRIM = 4
+ /* userspace defines additional extension commands */
};
/* values for flags field, these are server interaction specific. */
@@ -64,15 +72,19 @@ enum {
#define NBD_REQUEST_MAGIC 0x25609513
#define NBD_REPLY_MAGIC 0x67446698
/* Do *not* use magics: 0x12560953 0x96744668. */
+/* magic 0x668e33ef for structured reply not supported by kernel yet */
/*
* This is the packet used for communication between client and
* server. All data are in network byte order.
*/
struct nbd_request {
- __be32 magic;
- __be32 type; /* == READ || == WRITE */
- char handle[8];
+ __be32 magic; /* NBD_REQUEST_MAGIC */
+ __be32 type; /* See NBD_CMD_* */
+ union {
+ __be64 cookie; /* Opaque identifier for request */
+ char handle[8]; /* older spelling of cookie */
+ };
__be64 from;
__be32 len;
} __attribute__((packed));
@@ -82,8 +94,11 @@ struct nbd_request {
* it has completed an I/O request (or an error occurs).
*/
struct nbd_reply {
- __be32 magic;
+ __be32 magic; /* NBD_REPLY_MAGIC */
__be32 error; /* 0 = ok, else error */
- char handle[8]; /* handle you got from request */
+ union {
+ __be64 cookie; /* Opaque identifier from request */
+ char handle[8]; /* older spelling of cookie */
+ };
};
#endif /* _UAPILINUX_NBD_H */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 8cf1e4884fd5..73516e263627 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -30,25 +30,25 @@ struct nd_cmd_get_config_data_hdr {
__u32 in_offset;
__u32 in_length;
__u32 status;
- __u8 out_buf[0];
+ __u8 out_buf[];
} __packed;
struct nd_cmd_set_config_hdr {
__u32 in_offset;
__u32 in_length;
- __u8 in_buf[0];
+ __u8 in_buf[];
} __packed;
struct nd_cmd_vendor_hdr {
__u32 opcode;
__u32 in_length;
- __u8 in_buf[0];
+ __u8 in_buf[];
} __packed;
struct nd_cmd_vendor_tail {
__u32 status;
__u32 out_length;
- __u8 out_buf[0];
+ __u8 out_buf[];
} __packed;
struct nd_cmd_ars_cap {
@@ -86,7 +86,7 @@ struct nd_cmd_ars_status {
__u32 reserved;
__u64 err_address;
__u64 length;
- } __packed records[0];
+ } __packed records[];
} __packed;
struct nd_cmd_clear_error {
@@ -189,7 +189,6 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
#define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */
#define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */
#define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */
-#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */
#define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */
enum nd_driver_flags {
@@ -198,7 +197,6 @@ enum nd_driver_flags {
ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK,
ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO,
ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM,
- ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK,
ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
};
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index dc8b72201f6c..5e67a7eaf4a7 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -31,6 +31,9 @@ enum {
NDA_PROTOCOL, /* Originator of entry */
NDA_NH_ID,
NDA_FDB_EXT_ATTRS,
+ NDA_FLAGS_EXT,
+ NDA_NDM_STATE_MASK,
+ NDA_NDM_FLAGS_MASK,
__NDA_MAX
};
@@ -40,14 +43,17 @@ enum {
* Neighbor Cache Entry Flags
*/
-#define NTF_USE 0x01
-#define NTF_SELF 0x02
-#define NTF_MASTER 0x04
-#define NTF_PROXY 0x08 /* == ATF_PUBL */
-#define NTF_EXT_LEARNED 0x10
-#define NTF_OFFLOADED 0x20
-#define NTF_STICKY 0x40
-#define NTF_ROUTER 0x80
+#define NTF_USE (1 << 0)
+#define NTF_SELF (1 << 1)
+#define NTF_MASTER (1 << 2)
+#define NTF_PROXY (1 << 3) /* == ATF_PUBL */
+#define NTF_EXT_LEARNED (1 << 4)
+#define NTF_OFFLOADED (1 << 5)
+#define NTF_STICKY (1 << 6)
+#define NTF_ROUTER (1 << 7)
+/* Extended flags under NDA_FLAGS_EXT: */
+#define NTF_EXT_MANAGED (1 << 0)
+#define NTF_EXT_LOCKED (1 << 1)
/*
* Neighbor Cache Entry States.
@@ -65,9 +71,27 @@ enum {
#define NUD_PERMANENT 0x80
#define NUD_NONE 0x00
-/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
- and make no address resolution or NUD.
- NUD_PERMANENT also cannot be deleted by garbage collectors.
+/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change and make no
+ * address resolution or NUD.
+ *
+ * NUD_PERMANENT also cannot be deleted by garbage collectors. This holds true
+ * for dynamic entries with NTF_EXT_LEARNED flag as well. However, upon carrier
+ * down event, NUD_PERMANENT entries are not flushed whereas NTF_EXT_LEARNED
+ * flagged entries explicitly are (which is also consistent with the routing
+ * subsystem).
+ *
+ * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
+ * states don't make sense and thus are ignored. Such entries don't age and
+ * can roam.
+ *
+ * NTF_EXT_MANAGED flagged neigbor entries are managed by the kernel on behalf
+ * of a user space control plane, and automatically refreshed so that (if
+ * possible) they remain in NUD_REACHABLE state.
+ *
+ * NTF_EXT_LOCKED flagged bridge FDB entries are entries generated by the
+ * bridge in response to a host trying to communicate via a locked bridge port
+ * with MAB enabled. Their purpose is to notify user space that a host requires
+ * authentication.
*/
struct nda_cacheinfo {
@@ -136,6 +160,7 @@ enum {
NDTPA_QUEUE_LENBYTES, /* u32 */
NDTPA_MCAST_REPROBES, /* u32 */
NDTPA_PAD,
+ NDTPA_INTERVAL_PROBE_TIME_MS, /* u64, msecs */
__NDTPA_MAX
};
#define NDTPA_MAX (__NDTPA_MAX - 1)
diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h
index 66048cc5d7b3..84f622a66a7a 100644
--- a/include/uapi/linux/net_dropmon.h
+++ b/include/uapi/linux/net_dropmon.h
@@ -29,12 +29,12 @@ struct net_dm_config_entry {
struct net_dm_config_msg {
__u32 entries;
- struct net_dm_config_entry options[0];
+ struct net_dm_config_entry options[];
};
struct net_dm_alert_msg {
__u32 entries;
- struct net_dm_drop_point points[0];
+ struct net_dm_drop_point points[];
};
struct net_dm_user_msg {
@@ -93,6 +93,7 @@ enum net_dm_attr {
NET_DM_ATTR_SW_DROPS, /* flag */
NET_DM_ATTR_HW_DROPS, /* flag */
NET_DM_ATTR_FLOW_ACTION_COOKIE, /* binary */
+ NET_DM_ATTR_REASON, /* string */
__NET_DM_ATTR_MAX,
NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 7ed0b3d1c00a..a2c66b3d7f0f 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#include <linux/socket.h> /* for SO_TIMESTAMPING */
-/* SO_TIMESTAMPING gets an integer bit field comprised of these values */
+/* SO_TIMESTAMPING flags */
enum {
SOF_TIMESTAMPING_TX_HARDWARE = (1<<0),
SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1),
@@ -30,8 +30,10 @@ enum {
SOF_TIMESTAMPING_OPT_STATS = (1<<12),
SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13),
SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
+ SOF_TIMESTAMPING_BIND_PHC = (1 << 15),
+ SOF_TIMESTAMPING_OPT_ID_TCP = (1 << 16),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_ID_TCP,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
@@ -47,9 +49,21 @@ enum {
SOF_TIMESTAMPING_TX_ACK)
/**
+ * struct so_timestamping - SO_TIMESTAMPING parameter
+ *
+ * @flags: SO_TIMESTAMPING flags
+ * @bind_phc: Index of PTP virtual clock bound to sock. This is available
+ * if flag SOF_TIMESTAMPING_BIND_PHC is set.
+ */
+struct so_timestamping {
+ int flags;
+ int bind_phc;
+};
+
+/**
* struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter
*
- * @flags: no flags defined right now, must be zero for %SIOCSHWTSTAMP
+ * @flags: one of HWTSTAMP_FLAG_*
* @tx_type: one of HWTSTAMP_TX_*
* @rx_filter: one of HWTSTAMP_FILTER_*
*
@@ -65,6 +79,21 @@ struct hwtstamp_config {
int rx_filter;
};
+/* possible values for hwtstamp_config->flags */
+enum hwtstamp_flags {
+ /*
+ * With this flag, the user could get bond active interface's
+ * PHC index. Note this PHC index is not stable as when there
+ * is a failover, the bond active interface will be changed, so
+ * will be the PHC index.
+ */
+ HWTSTAMP_FLAG_BONDED_PHC_INDEX = (1<<0),
+#define HWTSTAMP_FLAG_BONDED_PHC_INDEX HWTSTAMP_FLAG_BONDED_PHC_INDEX
+
+ HWTSTAMP_FLAG_LAST = HWTSTAMP_FLAG_BONDED_PHC_INDEX,
+ HWTSTAMP_FLAG_MASK = (HWTSTAMP_FLAG_LAST - 1) | HWTSTAMP_FLAG_LAST
+};
+
/* possible values for hwtstamp_config->tx_type */
enum hwtstamp_tx_types {
/*
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
new file mode 100644
index 000000000000..bb65ee840cda
--- /dev/null
+++ b/include/uapi/linux/netdev.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NETDEV_H
+#define _UAPI_LINUX_NETDEV_H
+
+#define NETDEV_FAMILY_NAME "netdev"
+#define NETDEV_FAMILY_VERSION 1
+
+/**
+ * enum netdev_xdp_act
+ * @NETDEV_XDP_ACT_BASIC: XDP features set supported by all drivers
+ * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
+ * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
+ * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
+ * ndo_xdp_xmit callback.
+ * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP
+ * in zero copy mode.
+ * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw
+ * offloading.
+ * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear
+ * XDP buffer support in the driver napi callback.
+ * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements
+ * non-linear XDP buffer support in ndo_xdp_xmit callback.
+ */
+enum netdev_xdp_act {
+ NETDEV_XDP_ACT_BASIC = 1,
+ NETDEV_XDP_ACT_REDIRECT = 2,
+ NETDEV_XDP_ACT_NDO_XMIT = 4,
+ NETDEV_XDP_ACT_XSK_ZEROCOPY = 8,
+ NETDEV_XDP_ACT_HW_OFFLOAD = 16,
+ NETDEV_XDP_ACT_RX_SG = 32,
+ NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+
+ /* private: */
+ NETDEV_XDP_ACT_MASK = 127,
+};
+
+/**
+ * enum netdev_xdp_rx_metadata
+ * @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW
+ * timestamp via bpf_xdp_metadata_rx_timestamp().
+ * @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
+ * hash via bpf_xdp_metadata_rx_hash().
+ * @NETDEV_XDP_RX_METADATA_VLAN_TAG: Device is capable of exposing receive
+ * packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag().
+ */
+enum netdev_xdp_rx_metadata {
+ NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
+ NETDEV_XDP_RX_METADATA_HASH = 2,
+ NETDEV_XDP_RX_METADATA_VLAN_TAG = 4,
+};
+
+/**
+ * enum netdev_xsk_flags
+ * @NETDEV_XSK_FLAGS_TX_TIMESTAMP: HW timestamping egress packets is supported
+ * by the driver.
+ * @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the
+ * driver.
+ */
+enum netdev_xsk_flags {
+ NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1,
+ NETDEV_XSK_FLAGS_TX_CHECKSUM = 2,
+};
+
+enum netdev_queue_type {
+ NETDEV_QUEUE_TYPE_RX,
+ NETDEV_QUEUE_TYPE_TX,
+};
+
+enum netdev_qstats_scope {
+ NETDEV_QSTATS_SCOPE_QUEUE = 1,
+};
+
+enum {
+ NETDEV_A_DEV_IFINDEX = 1,
+ NETDEV_A_DEV_PAD,
+ NETDEV_A_DEV_XDP_FEATURES,
+ NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
+ NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
+ NETDEV_A_DEV_XSK_FEATURES,
+
+ __NETDEV_A_DEV_MAX,
+ NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
+};
+
+enum {
+ NETDEV_A_PAGE_POOL_ID = 1,
+ NETDEV_A_PAGE_POOL_IFINDEX,
+ NETDEV_A_PAGE_POOL_NAPI_ID,
+ NETDEV_A_PAGE_POOL_INFLIGHT,
+ NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
+ NETDEV_A_PAGE_POOL_DETACH_TIME,
+
+ __NETDEV_A_PAGE_POOL_MAX,
+ NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
+};
+
+enum {
+ NETDEV_A_PAGE_POOL_STATS_INFO = 1,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
+
+ __NETDEV_A_PAGE_POOL_STATS_MAX,
+ NETDEV_A_PAGE_POOL_STATS_MAX = (__NETDEV_A_PAGE_POOL_STATS_MAX - 1)
+};
+
+enum {
+ NETDEV_A_NAPI_IFINDEX = 1,
+ NETDEV_A_NAPI_ID,
+ NETDEV_A_NAPI_IRQ,
+ NETDEV_A_NAPI_PID,
+
+ __NETDEV_A_NAPI_MAX,
+ NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1)
+};
+
+enum {
+ NETDEV_A_QUEUE_ID = 1,
+ NETDEV_A_QUEUE_IFINDEX,
+ NETDEV_A_QUEUE_TYPE,
+ NETDEV_A_QUEUE_NAPI_ID,
+
+ __NETDEV_A_QUEUE_MAX,
+ NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
+};
+
+enum {
+ NETDEV_A_QSTATS_IFINDEX = 1,
+ NETDEV_A_QSTATS_QUEUE_TYPE,
+ NETDEV_A_QSTATS_QUEUE_ID,
+ NETDEV_A_QSTATS_SCOPE,
+ NETDEV_A_QSTATS_RX_PACKETS = 8,
+ NETDEV_A_QSTATS_RX_BYTES,
+ NETDEV_A_QSTATS_TX_PACKETS,
+ NETDEV_A_QSTATS_TX_BYTES,
+ NETDEV_A_QSTATS_RX_ALLOC_FAIL,
+
+ __NETDEV_A_QSTATS_MAX,
+ NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
+};
+
+enum {
+ NETDEV_CMD_DEV_GET = 1,
+ NETDEV_CMD_DEV_ADD_NTF,
+ NETDEV_CMD_DEV_DEL_NTF,
+ NETDEV_CMD_DEV_CHANGE_NTF,
+ NETDEV_CMD_PAGE_POOL_GET,
+ NETDEV_CMD_PAGE_POOL_ADD_NTF,
+ NETDEV_CMD_PAGE_POOL_DEL_NTF,
+ NETDEV_CMD_PAGE_POOL_CHANGE_NTF,
+ NETDEV_CMD_PAGE_POOL_STATS_GET,
+ NETDEV_CMD_QUEUE_GET,
+ NETDEV_CMD_NAPI_GET,
+ NETDEV_CMD_QSTATS_GET,
+
+ __NETDEV_CMD_MAX,
+ NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
+};
+
+#define NETDEV_MCGRP_MGMT "mgmt"
+#define NETDEV_MCGRP_PAGE_POOL "page-pool"
+
+#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ca9e63d6e0e4..5a79ccb76701 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -45,11 +45,13 @@ enum nf_inet_hooks {
NF_INET_FORWARD,
NF_INET_LOCAL_OUT,
NF_INET_POST_ROUTING,
- NF_INET_NUMHOOKS
+ NF_INET_NUMHOOKS,
+ NF_INET_INGRESS = NF_INET_NUMHOOKS,
};
enum nf_dev_hooks {
NF_NETDEV_INGRESS,
+ NF_NETDEV_EGRESS,
NF_NETDEV_NUMHOOKS
};
@@ -61,7 +63,9 @@ enum {
NFPROTO_NETDEV = 5,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
+#ifndef __KERNEL__ /* no longer supported by kernel */
NFPROTO_DECNET = 12,
+#endif
NFPROTO_NUMPROTO,
};
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 11a72a938eb1..333807efd32b 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -3,10 +3,6 @@
* Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _UAPI_IP_SET_H
#define _UAPI_IP_SET_H
@@ -89,14 +85,15 @@ enum {
IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO, /* 9 */
IPSET_ATTR_MARK, /* 10 */
IPSET_ATTR_MARKMASK, /* 11 */
+ IPSET_ATTR_BITMASK, /* 12 */
/* Reserve empty slots */
IPSET_ATTR_CADT_MAX = 16,
/* Create-only specific attributes */
- IPSET_ATTR_GC,
+ IPSET_ATTR_INITVAL, /* was unused IPSET_ATTR_GC */
IPSET_ATTR_HASHSIZE,
IPSET_ATTR_MAXELEM,
IPSET_ATTR_NETMASK,
- IPSET_ATTR_PROBES,
+ IPSET_ATTR_BUCKETSIZE, /* was unused IPSET_ATTR_PROBES */
IPSET_ATTR_RESIZE,
IPSET_ATTR_SIZE,
/* Kernel-only */
@@ -157,6 +154,7 @@ enum ipset_errno {
IPSET_ERR_COMMENT,
IPSET_ERR_INVALID_MARKMASK,
IPSET_ERR_SKBINFO,
+ IPSET_ERR_BITMASK_NETMASK_EXCL,
/* Type specific error codes */
IPSET_ERR_TYPE_SPECIFIC = 4352,
@@ -214,6 +212,8 @@ enum ipset_cadt_flags {
enum ipset_create_flags {
IPSET_CREATE_FLAG_BIT_FORCEADD = 0,
IPSET_CREATE_FLAG_FORCEADD = (1 << IPSET_CREATE_FLAG_BIT_FORCEADD),
+ IPSET_CREATE_FLAG_BIT_BUCKETSIZE = 1,
+ IPSET_CREATE_FLAG_BUCKETSIZE = (1 << IPSET_CREATE_FLAG_BIT_BUCKETSIZE),
IPSET_CREATE_FLAG_BIT_MAX = 7,
};
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 4b3395082d15..26071021e986 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -106,7 +106,7 @@ enum ip_conntrack_status {
IPS_NAT_CLASH = IPS_UNTRACKED,
#endif
- /* Conntrack got a helper explicitly attached via CT target. */
+ /* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
index edc6ddab0de6..2d6f80d75ae7 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
@@ -15,7 +15,7 @@ enum sctp_conntrack {
SCTP_CONNTRACK_SHUTDOWN_RECD,
SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
SCTP_CONNTRACK_HEARTBEAT_SENT,
- SCTP_CONNTRACK_HEARTBEAT_ACKED,
+ SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */
SCTP_CONNTRACK_MAX
};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 2b8e12f7a4a6..aa4094ca2444 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -97,6 +97,15 @@ enum nft_verdicts {
* @NFT_MSG_NEWFLOWTABLE: add new flow table (enum nft_flowtable_attributes)
* @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes)
* @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_GETRULE_RESET: get rules and reset stateful expressions (enum nft_obj_attributes)
+ * @NFT_MSG_DESTROYTABLE: destroy a table (enum nft_table_attributes)
+ * @NFT_MSG_DESTROYCHAIN: destroy a chain (enum nft_chain_attributes)
+ * @NFT_MSG_DESTROYRULE: destroy a rule (enum nft_rule_attributes)
+ * @NFT_MSG_DESTROYSET: destroy a set (enum nft_set_attributes)
+ * @NFT_MSG_DESTROYSETELEM: destroy a set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_DESTROYOBJ: destroy a stateful object (enum nft_object_attributes)
+ * @NFT_MSG_DESTROYFLOWTABLE: destroy flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_GETSETELEM_RESET: get set elements and reset attached stateful expressions (enum nft_set_elem_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
@@ -124,6 +133,15 @@ enum nf_tables_msg_types {
NFT_MSG_NEWFLOWTABLE,
NFT_MSG_GETFLOWTABLE,
NFT_MSG_DELFLOWTABLE,
+ NFT_MSG_GETRULE_RESET,
+ NFT_MSG_DESTROYTABLE,
+ NFT_MSG_DESTROYCHAIN,
+ NFT_MSG_DESTROYRULE,
+ NFT_MSG_DESTROYSET,
+ NFT_MSG_DESTROYSETELEM,
+ NFT_MSG_DESTROYOBJ,
+ NFT_MSG_DESTROYFLOWTABLE,
+ NFT_MSG_GETSETELEM_RESET,
NFT_MSG_MAX,
};
@@ -161,10 +179,17 @@ enum nft_hook_attributes {
* enum nft_table_flags - nf_tables table flags
*
* @NFT_TABLE_F_DORMANT: this table is not active
+ * @NFT_TABLE_F_OWNER: this table is owned by a process
+ * @NFT_TABLE_F_PERSIST: this table shall outlive its owner
*/
enum nft_table_flags {
NFT_TABLE_F_DORMANT = 0x1,
+ NFT_TABLE_F_OWNER = 0x2,
+ NFT_TABLE_F_PERSIST = 0x4,
};
+#define NFT_TABLE_F_MASK (NFT_TABLE_F_DORMANT | \
+ NFT_TABLE_F_OWNER | \
+ NFT_TABLE_F_PERSIST)
/**
* enum nft_table_attributes - nf_tables table netlink attributes
@@ -172,6 +197,8 @@ enum nft_table_flags {
* @NFTA_TABLE_NAME: name of the table (NLA_STRING)
* @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
* @NFTA_TABLE_USE: number of chains in this table (NLA_U32)
+ * @NFTA_TABLE_USERDATA: user data (NLA_BINARY)
+ * @NFTA_TABLE_OWNER: owner of this table through netlink portID (NLA_U32)
*/
enum nft_table_attributes {
NFTA_TABLE_UNSPEC,
@@ -180,6 +207,8 @@ enum nft_table_attributes {
NFTA_TABLE_USE,
NFTA_TABLE_HANDLE,
NFTA_TABLE_PAD,
+ NFTA_TABLE_USERDATA,
+ NFTA_TABLE_OWNER,
__NFTA_TABLE_MAX
};
#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
@@ -206,6 +235,7 @@ enum nft_chain_flags {
* @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
* @NFTA_CHAIN_FLAGS: chain flags
* @NFTA_CHAIN_ID: uniquely identifies a chain in a transaction (NLA_U32)
+ * @NFTA_CHAIN_USERDATA: user data (NLA_BINARY)
*/
enum nft_chain_attributes {
NFTA_CHAIN_UNSPEC,
@@ -220,6 +250,7 @@ enum nft_chain_attributes {
NFTA_CHAIN_PAD,
NFTA_CHAIN_FLAGS,
NFTA_CHAIN_ID,
+ NFTA_CHAIN_USERDATA,
__NFTA_CHAIN_MAX
};
#define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1)
@@ -236,6 +267,7 @@ enum nft_chain_attributes {
* @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN)
* @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32)
* @NFTA_RULE_POSITION_ID: transaction unique identifier of the previous rule (NLA_U32)
+ * @NFTA_RULE_CHAIN_ID: add the rule to chain by ID, alternative to @NFTA_RULE_CHAIN (NLA_U32)
*/
enum nft_rule_attributes {
NFTA_RULE_UNSPEC,
@@ -257,9 +289,11 @@ enum nft_rule_attributes {
/**
* enum nft_rule_compat_flags - nf_tables rule compat flags
*
+ * @NFT_RULE_COMPAT_F_UNUSED: unused
* @NFT_RULE_COMPAT_F_INV: invert the check result
*/
enum nft_rule_compat_flags {
+ NFT_RULE_COMPAT_F_UNUSED = (1 << 0),
NFT_RULE_COMPAT_F_INV = (1 << 1),
NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV,
};
@@ -289,6 +323,7 @@ enum nft_rule_compat_attributes {
* @NFT_SET_EVAL: set can be updated from the evaluation path
* @NFT_SET_OBJECT: set contains stateful objects
* @NFT_SET_CONCAT: set contains a concatenation
+ * @NFT_SET_EXPR: set contains expressions
*/
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
@@ -299,6 +334,7 @@ enum nft_set_flags {
NFT_SET_EVAL = 0x20,
NFT_SET_OBJECT = 0x40,
NFT_SET_CONCAT = 0x80,
+ NFT_SET_EXPR = 0x100,
};
/**
@@ -357,6 +393,7 @@ enum nft_set_field_attributes {
* @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*)
* @NFTA_SET_HANDLE: set handle (NLA_U64)
* @NFTA_SET_EXPR: set expression (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_SET_EXPRESSIONS: list of expressions (NLA_NESTED: nft_list_attributes)
*/
enum nft_set_attributes {
NFTA_SET_UNSPEC,
@@ -377,6 +414,7 @@ enum nft_set_attributes {
NFTA_SET_OBJ_TYPE,
NFTA_SET_HANDLE,
NFTA_SET_EXPR,
+ NFTA_SET_EXPRESSIONS,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -385,9 +423,11 @@ enum nft_set_attributes {
* enum nft_set_elem_flags - nf_tables set element flags
*
* @NFT_SET_ELEM_INTERVAL_END: element ends the previous interval
+ * @NFT_SET_ELEM_CATCHALL: special catch-all element
*/
enum nft_set_elem_flags {
NFT_SET_ELEM_INTERVAL_END = 0x1,
+ NFT_SET_ELEM_CATCHALL = 0x2,
};
/**
@@ -402,6 +442,7 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
* @NFTA_SET_ELEM_OBJREF: stateful object reference (NLA_STRING)
* @NFTA_SET_ELEM_KEY_END: closing key value (NLA_NESTED: nft_data)
+ * @NFTA_SET_ELEM_EXPRESSIONS: list of expressions (NLA_NESTED: nft_list_attributes)
*/
enum nft_set_elem_attributes {
NFTA_SET_ELEM_UNSPEC,
@@ -415,6 +456,7 @@ enum nft_set_elem_attributes {
NFTA_SET_ELEM_PAD,
NFTA_SET_ELEM_OBJREF,
NFTA_SET_ELEM_KEY_END,
+ NFTA_SET_ELEM_EXPRESSIONS,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -652,7 +694,7 @@ enum nft_range_ops {
* enum nft_range_attributes - nf_tables range expression netlink attributes
*
* @NFTA_RANGE_SREG: source register of data to compare (NLA_U32: nft_registers)
- * @NFTA_RANGE_OP: cmp operation (NLA_U32: nft_cmp_ops)
+ * @NFTA_RANGE_OP: cmp operation (NLA_U32: nft_range_ops)
* @NFTA_RANGE_FROM_DATA: data range from (NLA_NESTED: nft_data_attributes)
* @NFTA_RANGE_TO_DATA: data range to (NLA_NESTED: nft_data_attributes)
*/
@@ -698,6 +740,7 @@ enum nft_dynset_ops {
enum nft_dynset_flags {
NFT_DYNSET_F_INV = (1 << 0),
+ NFT_DYNSET_F_EXPR = (1 << 1),
};
/**
@@ -711,6 +754,7 @@ enum nft_dynset_flags {
* @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
* @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
* @NFTA_DYNSET_FLAGS: flags (NLA_U32)
+ * @NFTA_DYNSET_EXPRESSIONS: list of expressions (NLA_NESTED: nft_list_attributes)
*/
enum nft_dynset_attributes {
NFTA_DYNSET_UNSPEC,
@@ -723,6 +767,7 @@ enum nft_dynset_attributes {
NFTA_DYNSET_EXPR,
NFTA_DYNSET_PAD,
NFTA_DYNSET_FLAGS,
+ NFTA_DYNSET_EXPRESSIONS,
__NFTA_DYNSET_MAX,
};
#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
@@ -733,11 +778,14 @@ enum nft_dynset_attributes {
* @NFT_PAYLOAD_LL_HEADER: link layer header
* @NFT_PAYLOAD_NETWORK_HEADER: network header
* @NFT_PAYLOAD_TRANSPORT_HEADER: transport header
+ * @NFT_PAYLOAD_INNER_HEADER: inner header / payload
*/
enum nft_payload_bases {
NFT_PAYLOAD_LL_HEADER,
NFT_PAYLOAD_NETWORK_HEADER,
NFT_PAYLOAD_TRANSPORT_HEADER,
+ NFT_PAYLOAD_INNER_HEADER,
+ NFT_PAYLOAD_TUN_HEADER,
};
/**
@@ -745,16 +793,44 @@ enum nft_payload_bases {
*
* @NFT_PAYLOAD_CSUM_NONE: no checksumming
* @NFT_PAYLOAD_CSUM_INET: internet checksum (RFC 791)
+ * @NFT_PAYLOAD_CSUM_SCTP: CRC-32c, for use in SCTP header (RFC 3309)
*/
enum nft_payload_csum_types {
NFT_PAYLOAD_CSUM_NONE,
NFT_PAYLOAD_CSUM_INET,
+ NFT_PAYLOAD_CSUM_SCTP,
};
enum nft_payload_csum_flags {
NFT_PAYLOAD_L4CSUM_PSEUDOHDR = (1 << 0),
};
+enum nft_inner_type {
+ NFT_INNER_UNSPEC = 0,
+ NFT_INNER_VXLAN,
+ NFT_INNER_GENEVE,
+};
+
+enum nft_inner_flags {
+ NFT_INNER_HDRSIZE = (1 << 0),
+ NFT_INNER_LL = (1 << 1),
+ NFT_INNER_NH = (1 << 2),
+ NFT_INNER_TH = (1 << 3),
+};
+#define NFT_INNER_MASK (NFT_INNER_HDRSIZE | NFT_INNER_LL | \
+ NFT_INNER_NH | NFT_INNER_TH)
+
+enum nft_inner_attributes {
+ NFTA_INNER_UNSPEC,
+ NFTA_INNER_NUM,
+ NFTA_INNER_TYPE,
+ NFTA_INNER_FLAGS,
+ NFTA_INNER_HDRSIZE,
+ NFTA_INNER_EXPR,
+ __NFTA_INNER_MAX
+};
+#define NFTA_INNER_MAX (__NFTA_INNER_MAX - 1)
+
/**
* enum nft_payload_attributes - nf_tables payload expression netlink attributes
*
@@ -791,11 +867,15 @@ enum nft_exthdr_flags {
* @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers
* @NFT_EXTHDR_OP_TCP: match against tcp options
* @NFT_EXTHDR_OP_IPV4: match against ipv4 options
+ * @NFT_EXTHDR_OP_SCTP: match against sctp chunks
+ * @NFT_EXTHDR_OP_DCCP: match against dccp otions
*/
enum nft_exthdr_op {
NFT_EXTHDR_OP_IPV6,
NFT_EXTHDR_OP_TCPOPT,
NFT_EXTHDR_OP_IPV4,
+ NFT_EXTHDR_OP_SCTP,
+ NFT_EXTHDR_OP_DCCP,
__NFT_EXTHDR_OP_MAX
};
#define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1)
@@ -809,7 +889,7 @@ enum nft_exthdr_op {
* @NFTA_EXTHDR_LEN: extension header length (NLA_U32)
* @NFTA_EXTHDR_FLAGS: extension header flags (NLA_U32)
* @NFTA_EXTHDR_OP: option match type (NLA_U32)
- * @NFTA_EXTHDR_SREG: option match type (NLA_U32)
+ * @NFTA_EXTHDR_SREG: source register (NLA_U32: nft_registers)
*/
enum nft_exthdr_attributes {
NFTA_EXTHDR_UNSPEC,
@@ -862,6 +942,7 @@ enum nft_exthdr_attributes {
* @NFT_META_TIME_HOUR: hour of day (in seconds)
* @NFT_META_SDIF: slave device interface index
* @NFT_META_SDIFNAME: slave device interface name
+ * @NFT_META_BRI_BROUTE: packet br_netfilter_broute bit
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -872,7 +953,8 @@ enum nft_meta_keys {
NFT_META_OIF,
NFT_META_IIFNAME,
NFT_META_OIFNAME,
- NFT_META_IIFTYPE,
+ NFT_META_IFTYPE,
+#define NFT_META_IIFTYPE NFT_META_IFTYPE
NFT_META_OIFTYPE,
NFT_META_SKUID,
NFT_META_SKGID,
@@ -899,6 +981,8 @@ enum nft_meta_keys {
NFT_META_TIME_HOUR,
NFT_META_SDIF,
NFT_META_SDIFNAME,
+ NFT_META_BRI_BROUTE,
+ __NFT_META_IIFTYPE,
};
/**
@@ -994,11 +1078,13 @@ enum nft_rt_attributes {
*
* @NFTA_SOCKET_KEY: socket key to match
* @NFTA_SOCKET_DREG: destination register
+ * @NFTA_SOCKET_LEVEL: cgroups2 ancestor level (only for cgroupsv2)
*/
enum nft_socket_attributes {
NFTA_SOCKET_UNSPEC,
NFTA_SOCKET_KEY,
NFTA_SOCKET_DREG,
+ NFTA_SOCKET_LEVEL,
__NFTA_SOCKET_MAX
};
#define NFTA_SOCKET_MAX (__NFTA_SOCKET_MAX - 1)
@@ -1008,10 +1094,14 @@ enum nft_socket_attributes {
*
* @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option
* @NFT_SOCKET_MARK: Value of the socket mark
+ * @NFT_SOCKET_WILDCARD: Whether the socket is zero-bound (e.g. 0.0.0.0 or ::0)
+ * @NFT_SOCKET_CGROUPV2: Match on cgroups version 2
*/
enum nft_socket_keys {
NFT_SOCKET_TRANSPARENT,
NFT_SOCKET_MARK,
+ NFT_SOCKET_WILDCARD,
+ NFT_SOCKET_CGROUPV2,
__NFT_SOCKET_MAX
};
#define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1)
@@ -1166,12 +1256,27 @@ enum nft_counter_attributes {
#define NFTA_COUNTER_MAX (__NFTA_COUNTER_MAX - 1)
/**
+ * enum nft_last_attributes - nf_tables last expression netlink attributes
+ *
+ * @NFTA_LAST_SET: last update has been set, zero means never updated (NLA_U32)
+ * @NFTA_LAST_MSECS: milliseconds since last update (NLA_U64)
+ */
+enum nft_last_attributes {
+ NFTA_LAST_UNSPEC,
+ NFTA_LAST_SET,
+ NFTA_LAST_MSECS,
+ NFTA_LAST_PAD,
+ __NFTA_LAST_MAX
+};
+#define NFTA_LAST_MAX (__NFTA_LAST_MAX - 1)
+
+/**
* enum nft_log_attributes - nf_tables log expression netlink attributes
*
- * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32)
+ * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U16)
* @NFTA_LOG_PREFIX: prefix to prepend to log messages (NLA_STRING)
* @NFTA_LOG_SNAPLEN: length of payload to include in netlink message (NLA_U32)
- * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U32)
+ * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U16)
* @NFTA_LOG_LEVEL: log level (NLA_U32)
* @NFTA_LOG_FLAGS: logging flags (NLA_U32)
*/
@@ -1555,6 +1660,7 @@ enum nft_ct_expectation_attributes {
* @NFTA_OBJ_DATA: stateful object data (NLA_NESTED)
* @NFTA_OBJ_USE: number of references to this expression (NLA_U32)
* @NFTA_OBJ_HANDLE: object handle (NLA_U64)
+ * @NFTA_OBJ_USERDATA: user data (NLA_BINARY)
*/
enum nft_object_attributes {
NFTA_OBJ_UNSPEC,
@@ -1565,6 +1671,7 @@ enum nft_object_attributes {
NFTA_OBJ_USE,
NFTA_OBJ_HANDLE,
NFTA_OBJ_PAD,
+ NFTA_OBJ_USERDATA,
__NFTA_OBJ_MAX
};
#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h
index 5bc960f220b3..6cd58cd2a6f0 100644
--- a/include/uapi/linux/netfilter/nfnetlink.h
+++ b/include/uapi/linux/netfilter/nfnetlink.h
@@ -60,7 +60,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_CTHELPER 9
#define NFNL_SUBSYS_NFTABLES 10
#define NFNL_SUBSYS_NFT_COMPAT 11
-#define NFNL_SUBSYS_COUNT 12
+#define NFNL_SUBSYS_HOOK 12
+#define NFNL_SUBSYS_COUNT 13
/* Reserved control nfnetlink messages */
#define NFNL_MSG_BATCH_BEGIN NLMSG_MIN_TYPE
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 262881792671..c2ac7269acf7 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -56,6 +56,7 @@ enum ctattr_type {
CTA_LABELS_MASK,
CTA_SYNPROXY,
CTA_FILTER,
+ CTA_STATUS_MASK,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
@@ -247,7 +248,7 @@ enum ctattr_stats_cpu {
CTA_STATS_FOUND,
CTA_STATS_NEW, /* no longer used */
CTA_STATS_INVALID,
- CTA_STATS_IGNORE,
+ CTA_STATS_IGNORE, /* no longer used */
CTA_STATS_DELETE, /* no longer used */
CTA_STATS_DELETE_LIST, /* no longer used */
CTA_STATS_INSERT,
@@ -256,6 +257,8 @@ enum ctattr_stats_cpu {
CTA_STATS_EARLY_DROP,
CTA_STATS_ERROR,
CTA_STATS_SEARCH_RESTART,
+ CTA_STATS_CLASH_RESOLVE,
+ CTA_STATS_CHAIN_TOOLONG,
__CTA_STATS_MAX,
};
#define CTA_STATS_MAX (__CTA_STATS_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
index a13137afc429..70af02092d16 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
@@ -5,7 +5,7 @@
#define NFCT_HELPER_STATUS_DISABLED 0
#define NFCT_HELPER_STATUS_ENABLED 1
-enum nfnl_acct_msg_types {
+enum nfnl_cthelper_msg_types {
NFNL_MSG_CTHELPER_NEW,
NFNL_MSG_CTHELPER_GET,
NFNL_MSG_CTHELPER_DEL,
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index 6b20fb22717b..aa805e6d4e28 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -94,7 +94,7 @@ enum ctattr_timeout_sctp {
CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
- CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */
__CTA_TIMEOUT_SCTP_MAX
};
#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_hook.h b/include/uapi/linux/netfilter/nfnetlink_hook.h
new file mode 100644
index 000000000000..84a561a74b98
--- /dev/null
+++ b/include/uapi/linux/netfilter/nfnetlink_hook.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _NFNL_HOOK_H_
+#define _NFNL_HOOK_H_
+
+enum nfnl_hook_msg_types {
+ NFNL_MSG_HOOK_GET,
+ NFNL_MSG_HOOK_MAX,
+};
+
+/**
+ * enum nfnl_hook_attributes - netfilter hook netlink attributes
+ *
+ * @NFNLA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
+ * @NFNLA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFNLA_HOOK_DEV: netdevice name (NLA_STRING)
+ * @NFNLA_HOOK_FUNCTION_NAME: hook function name (NLA_STRING)
+ * @NFNLA_HOOK_MODULE_NAME: kernel module that registered this hook (NLA_STRING)
+ * @NFNLA_HOOK_CHAIN_INFO: basechain hook metadata (NLA_NESTED)
+ */
+enum nfnl_hook_attributes {
+ NFNLA_HOOK_UNSPEC,
+ NFNLA_HOOK_HOOKNUM,
+ NFNLA_HOOK_PRIORITY,
+ NFNLA_HOOK_DEV,
+ NFNLA_HOOK_FUNCTION_NAME,
+ NFNLA_HOOK_MODULE_NAME,
+ NFNLA_HOOK_CHAIN_INFO,
+ __NFNLA_HOOK_MAX
+};
+#define NFNLA_HOOK_MAX (__NFNLA_HOOK_MAX - 1)
+
+/**
+ * enum nfnl_hook_chain_info_attributes - chain description
+ *
+ * @NFNLA_HOOK_INFO_DESC: nft chain and table name (NLA_NESTED)
+ * @NFNLA_HOOK_INFO_TYPE: chain type (enum nfnl_hook_chaintype) (NLA_U32)
+ *
+ * NFNLA_HOOK_INFO_DESC depends on NFNLA_HOOK_INFO_TYPE value:
+ * NFNL_HOOK_TYPE_NFTABLES: enum nft_table_attributes
+ * NFNL_HOOK_TYPE_BPF: enum nfnl_hook_bpf_attributes
+ */
+enum nfnl_hook_chain_info_attributes {
+ NFNLA_HOOK_INFO_UNSPEC,
+ NFNLA_HOOK_INFO_DESC,
+ NFNLA_HOOK_INFO_TYPE,
+ __NFNLA_HOOK_INFO_MAX,
+};
+#define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1)
+
+enum nfnl_hook_chain_desc_attributes {
+ NFNLA_CHAIN_UNSPEC,
+ NFNLA_CHAIN_TABLE,
+ NFNLA_CHAIN_FAMILY,
+ NFNLA_CHAIN_NAME,
+ __NFNLA_CHAIN_MAX,
+};
+#define NFNLA_CHAIN_MAX (__NFNLA_CHAIN_MAX - 1)
+
+/**
+ * enum nfnl_hook_chaintype - chain type
+ *
+ * @NFNL_HOOK_TYPE_NFTABLES: nf_tables base chain
+ * @NFNL_HOOK_TYPE_BPF: bpf program
+ */
+enum nfnl_hook_chaintype {
+ NFNL_HOOK_TYPE_NFTABLES = 0x1,
+ NFNL_HOOK_TYPE_BPF,
+};
+
+/**
+ * enum nfnl_hook_bpf_attributes - bpf prog description
+ *
+ * @NFNLA_HOOK_BPF_ID: bpf program id (NLA_U32)
+ */
+enum nfnl_hook_bpf_attributes {
+ NFNLA_HOOK_BPF_UNSPEC,
+ NFNLA_HOOK_BPF_ID,
+ __NFNLA_HOOK_BPF_MAX,
+};
+#define NFNLA_HOOK_BPF_MAX (__NFNLA_HOOK_BPF_MAX - 1)
+
+#endif /* _NFNL_HOOK_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h
index 45c8d3b027e0..0af9c113d665 100644
--- a/include/uapi/linux/netfilter/nfnetlink_log.h
+++ b/include/uapi/linux/netfilter/nfnetlink_log.h
@@ -61,7 +61,7 @@ enum nfulnl_attr_type {
NFULA_HWTYPE, /* hardware type */
NFULA_HWHEADER, /* hardware header */
NFULA_HWLEN, /* hardware header length */
- NFULA_CT, /* nf_conntrack_netlink.h */
+ NFULA_CT, /* nfnetlink_conntrack.h */
NFULA_CT_INFO, /* enum ip_conntrack_info */
NFULA_VLAN, /* nested attribute: packet vlan info */
NFULA_L2HDR, /* full L2 header */
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index bcb2cb5d40b9..efcb7c044a74 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -51,16 +51,18 @@ enum nfqnl_attr_type {
NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */
NFQA_HWADDR, /* nfqnl_msg_packet_hw */
NFQA_PAYLOAD, /* opaque data payload */
- NFQA_CT, /* nf_conntrack_netlink.h */
+ NFQA_CT, /* nfnetlink_conntrack.h */
NFQA_CT_INFO, /* enum ip_conntrack_info */
NFQA_CAP_LEN, /* __u32 length of captured packet */
NFQA_SKB_INFO, /* __u32 skb meta information */
- NFQA_EXP, /* nf_conntrack_netlink.h */
+ NFQA_EXP, /* nfnetlink_conntrack.h */
NFQA_UID, /* __u32 sk uid */
NFQA_GID, /* __u32 sk gid */
NFQA_SECCTX, /* security context string */
NFQA_VLAN, /* nested attribute: packet vlan info */
NFQA_L2HDR, /* full L2 header */
+ NFQA_PRIORITY, /* skb->priority */
+ NFQA_CGROUP_CLASSID, /* __u32 cgroup classid */
__NFQA_MAX
};
diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h
index a8283f7dbc51..796af83a963a 100644
--- a/include/uapi/linux/netfilter/x_tables.h
+++ b/include/uapi/linux/netfilter/x_tables.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_X_TABLES_H
#define _UAPI_X_TABLES_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#define XT_FUNCTION_MAXNAMELEN 30
@@ -28,7 +28,7 @@ struct xt_entry_match {
__u16 match_size;
} u;
- unsigned char data[0];
+ unsigned char data[];
};
struct xt_entry_target {
@@ -119,7 +119,7 @@ struct xt_counters_info {
unsigned int num_counters;
/* The counters (actually `number' of these). */
- struct xt_counters counters[0];
+ struct xt_counters counters[];
};
#define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */
diff --git a/include/uapi/linux/netfilter/xt_AUDIT.h b/include/uapi/linux/netfilter/xt_AUDIT.h
index 1b314e2f84ac..56a3f6092e0c 100644
--- a/include/uapi/linux/netfilter/xt_AUDIT.h
+++ b/include/uapi/linux/netfilter/xt_AUDIT.h
@@ -4,10 +4,6 @@
*
* (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
* (C) 2010-2011 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _XT_AUDIT_TARGET_H
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 49ddcdc61c09..7bfb31a66fc9 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
- * linux/include/linux/netfilter/xt_IDLETIMER.h
- *
* Header file for Xtables timer target module.
*
* Copyright (C) 2004, 2010 Nokia Corporation
@@ -10,20 +9,6 @@
* by Luciano Coelho <luciano.coelho@nokia.com>
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef _XT_IDLETIMER_H
diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
index 1f2a708413f5..beb2cadba8a9 100644
--- a/include/uapi/linux/netfilter/xt_SECMARK.h
+++ b/include/uapi/linux/netfilter/xt_SECMARK.h
@@ -20,4 +20,10 @@ struct xt_secmark_target_info {
char secctx[SECMARK_SECCTX_MAX];
};
+struct xt_secmark_target_info_v1 {
+ __u8 mode;
+ char secctx[SECMARK_SECCTX_MAX];
+ __u32 secid;
+};
+
#endif /*_XT_SECMARK_H_target */
diff --git a/include/uapi/linux/netfilter/xt_connmark.h b/include/uapi/linux/netfilter/xt_connmark.h
index f01c19b83a2b..41b578ccd03b 100644
--- a/include/uapi/linux/netfilter/xt_connmark.h
+++ b/include/uapi/linux/netfilter/xt_connmark.h
@@ -1,18 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/* Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com>
+ * by Henrik Nordstrom <hno@marasystems.com>
+ */
+
#ifndef _XT_CONNMARK_H
#define _XT_CONNMARK_H
#include <linux/types.h>
-/* Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
enum {
XT_CONNMARK_SET = 0,
XT_CONNMARK_SAVE,
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index 6e466236ca4b..f1f097896bdf 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -1,20 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2003+ Evgeniy Polyakov <johnpol@2ka.mxt.ru>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _XT_OSF_H
diff --git a/include/uapi/linux/netfilter_arp/arp_tables.h b/include/uapi/linux/netfilter_arp/arp_tables.h
index bbf5af2b67a8..a6ac2463f787 100644
--- a/include/uapi/linux/netfilter_arp/arp_tables.h
+++ b/include/uapi/linux/netfilter_arp/arp_tables.h
@@ -109,7 +109,7 @@ struct arpt_entry
struct xt_counters counters;
/* The matches (if any), then the target. */
- unsigned char elems[0];
+ unsigned char elems[];
};
/*
@@ -181,7 +181,7 @@ struct arpt_replace {
struct xt_counters __user *counters;
/* The entries (hang off end: not really an array). */
- struct arpt_entry entries[0];
+ struct arpt_entry entries[];
};
/* The argument to ARPT_SO_GET_ENTRIES. */
@@ -193,7 +193,7 @@ struct arpt_get_entries {
unsigned int size;
/* The entries. */
- struct arpt_entry entrytable[0];
+ struct arpt_entry entrytable[];
};
/* Helper functions */
diff --git a/include/uapi/linux/netfilter_bridge/ebt_among.h b/include/uapi/linux/netfilter_bridge/ebt_among.h
index 9acf757bc1f7..73b26a280c4f 100644
--- a/include/uapi/linux/netfilter_bridge/ebt_among.h
+++ b/include/uapi/linux/netfilter_bridge/ebt_among.h
@@ -40,7 +40,7 @@ struct ebt_mac_wormhash_tuple {
struct ebt_mac_wormhash {
int table[257];
int poolsize;
- struct ebt_mac_wormhash_tuple pool[0];
+ struct ebt_mac_wormhash_tuple pool[];
};
#define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index a494cf43a755..4ff328f3d339 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -87,7 +87,7 @@ struct ebt_entries {
/* nr. of entries */
unsigned int nentries;
/* entry list */
- char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ char data[] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
};
/* used for the bitmask of struct ebt_entry */
@@ -129,7 +129,7 @@ struct ebt_entry_match {
} u;
/* size of data */
unsigned int match_size;
- unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ unsigned char data[] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
};
struct ebt_entry_watcher {
@@ -142,7 +142,7 @@ struct ebt_entry_watcher {
} u;
/* size of data */
unsigned int watcher_size;
- unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ unsigned char data[] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
};
struct ebt_entry_target {
@@ -182,13 +182,15 @@ struct ebt_entry {
unsigned char sourcemsk[ETH_ALEN];
unsigned char destmac[ETH_ALEN];
unsigned char destmsk[ETH_ALEN];
- /* sizeof ebt_entry + matches */
- unsigned int watchers_offset;
- /* sizeof ebt_entry + matches + watchers */
- unsigned int target_offset;
- /* sizeof ebt_entry + matches + watchers + target */
- unsigned int next_offset;
- unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ __struct_group(/* no tag */, offsets, /* no attrs */,
+ /* sizeof ebt_entry + matches */
+ unsigned int watchers_offset;
+ /* sizeof ebt_entry + matches + watchers */
+ unsigned int target_offset;
+ /* sizeof ebt_entry + matches + watchers + target */
+ unsigned int next_offset;
+ );
+ unsigned char elems[] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
};
static __inline__ struct ebt_entry_target *
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
deleted file mode 100644
index 3c77f54560f2..000000000000
--- a/include/uapi/linux/netfilter_decnet.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_DECNET_NETFILTER_H
-#define __LINUX_DECNET_NETFILTER_H
-
-/* DECnet-specific defines for netfilter.
- * This file (C) Steve Whitehouse 1999 derived from the
- * ipv4 netfilter header file which is
- * (C)1998 Rusty Russell -- This code is GPL.
- */
-
-#include <linux/netfilter.h>
-
-/* only for userspace compatibility */
-#ifndef __KERNEL__
-
-#include <limits.h> /* for INT_MIN, INT_MAX */
-
-/* kernel define is in netfilter_defs.h */
-#define NF_DN_NUMHOOKS 7
-#endif /* ! __KERNEL__ */
-
-/* DECnet Hooks */
-/* After promisc drops, checksum checks. */
-#define NF_DN_PRE_ROUTING 0
-/* If the packet is destined for this box. */
-#define NF_DN_LOCAL_IN 1
-/* If the packet is destined for another interface. */
-#define NF_DN_FORWARD 2
-/* Packets coming from a local process. */
-#define NF_DN_LOCAL_OUT 3
-/* Packets about to hit the wire. */
-#define NF_DN_POST_ROUTING 4
-/* Input Hello Packets */
-#define NF_DN_HELLO 5
-/* Input Routing Packets */
-#define NF_DN_ROUTE 6
-
-enum nf_dn_hook_priorities {
- NF_DN_PRI_FIRST = INT_MIN,
- NF_DN_PRI_CONNTRACK = -200,
- NF_DN_PRI_MANGLE = -150,
- NF_DN_PRI_NAT_DST = -100,
- NF_DN_PRI_FILTER = 0,
- NF_DN_PRI_NAT_SRC = 100,
- NF_DN_PRI_DNRTMSG = 200,
- NF_DN_PRI_LAST = INT_MAX,
-};
-
-struct nf_dn_rtmsg {
- int nfdn_ifindex;
-};
-
-#define NFDN_RTMSG(r) ((unsigned char *)(r) + NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)))
-
-#ifndef __KERNEL__
-/* backwards compatibility for userspace */
-#define DNRMG_L1_GROUP 0x01
-#define DNRMG_L2_GROUP 0x02
-#endif
-
-enum {
- DNRNG_NLGRP_NONE,
-#define DNRNG_NLGRP_NONE DNRNG_NLGRP_NONE
- DNRNG_NLGRP_L1,
-#define DNRNG_NLGRP_L1 DNRNG_NLGRP_L1
- DNRNG_NLGRP_L2,
-#define DNRNG_NLGRP_L2 DNRNG_NLGRP_L2
- __DNRNG_NLGRP_MAX
-};
-#define DNRNG_NLGRP_MAX (__DNRNG_NLGRP_MAX - 1)
-
-#endif /*__LINUX_DECNET_NETFILTER_H*/
diff --git a/include/uapi/linux/netfilter_ipv4/ip_tables.h b/include/uapi/linux/netfilter_ipv4/ip_tables.h
index 50c7fee625ae..1485df28b239 100644
--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
+++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
@@ -121,7 +121,7 @@ struct ipt_entry {
struct xt_counters counters;
/* The matches (if any), then the target. */
- unsigned char elems[0];
+ unsigned char elems[];
};
/*
@@ -203,7 +203,7 @@ struct ipt_replace {
struct xt_counters __user *counters;
/* The entries (hang off end: not really an array). */
- struct ipt_entry entries[0];
+ struct ipt_entry entries[];
};
/* The argument to IPT_SO_GET_ENTRIES. */
@@ -215,7 +215,7 @@ struct ipt_get_entries {
unsigned int size;
/* The entries. */
- struct ipt_entry entrytable[0];
+ struct ipt_entry entrytable[];
};
/* Helper functions */
diff --git a/include/uapi/linux/netfilter_ipv6/ip6_tables.h b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
index d9e364f96a5c..766e8e0bcc68 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
@@ -243,7 +243,7 @@ struct ip6t_replace {
struct xt_counters __user *counters;
/* The entries (hang off end: not really an array). */
- struct ip6t_entry entries[0];
+ struct ip6t_entry entries[];
};
/* The argument to IP6T_SO_GET_ENTRIES. */
@@ -255,7 +255,7 @@ struct ip6t_get_entries {
unsigned int size;
/* The entries. */
- struct ip6t_entry entrytable[0];
+ struct ip6t_entry entrytable[];
};
/* Helper functions */
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
index 23e91a9c2583..0b7b16dbdec2 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
@@ -17,4 +17,4 @@ struct ip6t_log_info {
char prefix[30];
};
-#endif /*_IPT_LOG_H*/
+#endif /* _IP6T_LOG_H */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index eac8a6a648ea..f87aaf28a649 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_NETLINK_H
#define _UAPI__LINUX_NETLINK_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/socket.h> /* for __kernel_sa_family_t */
#include <linux/types.h>
@@ -20,7 +20,7 @@
#define NETLINK_CONNECTOR 11
#define NETLINK_NETFILTER 12 /* netfilter subsystem */
#define NETLINK_IP6_FW 13
-#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
+#define NETLINK_DNRTMSG 14 /* DECnet routing messages (obsolete) */
#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
#define NETLINK_GENERIC 16
/* leave room for NETLINK_DM (DM Events) */
@@ -41,12 +41,20 @@ struct sockaddr_nl {
__u32 nl_groups; /* multicast groups mask */
};
+/**
+ * struct nlmsghdr - fixed format metadata header of Netlink messages
+ * @nlmsg_len: Length of message including header
+ * @nlmsg_type: Message content type
+ * @nlmsg_flags: Additional flags
+ * @nlmsg_seq: Sequence number
+ * @nlmsg_pid: Sending process port ID
+ */
struct nlmsghdr {
- __u32 nlmsg_len; /* Length of message including header */
- __u16 nlmsg_type; /* Message content */
- __u16 nlmsg_flags; /* Additional flags */
- __u32 nlmsg_seq; /* Sequence number */
- __u32 nlmsg_pid; /* Sending process port ID */
+ __u32 nlmsg_len;
+ __u16 nlmsg_type;
+ __u16 nlmsg_flags;
+ __u32 nlmsg_seq;
+ __u32 nlmsg_pid;
};
/* Flags values */
@@ -54,7 +62,7 @@ struct nlmsghdr {
#define NLM_F_REQUEST 0x01 /* It is request message. */
#define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */
-#define NLM_F_ECHO 0x08 /* Echo this request */
+#define NLM_F_ECHO 0x08 /* Receive resulting notifications */
#define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */
#define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */
@@ -72,6 +80,7 @@ struct nlmsghdr {
/* Modifiers to DELETE request */
#define NLM_F_NONREC 0x100 /* Do not delete recursively */
+#define NLM_F_BULK 0x200 /* Delete multiple objects */
/* Flags for ACK message */
#define NLM_F_CAPPED 0x100 /* request was capped */
@@ -91,9 +100,10 @@ struct nlmsghdr {
#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
#define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN)
#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
-#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
+#define NLMSG_DATA(nlh) ((void *)(((char *)nlh) + NLMSG_HDRLEN))
#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
- (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len)))
+ (struct nlmsghdr *)(((char *)(nlh)) + \
+ NLMSG_ALIGN((nlh)->nlmsg_len)))
#define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len <= (len))
@@ -129,6 +139,11 @@ struct nlmsgerr {
* @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to
* be used - in the success case - to identify a created
* object or operation or similar (binary)
+ * @NLMSGERR_ATTR_POLICY: policy for a rejected attribute
+ * @NLMSGERR_ATTR_MISS_TYPE: type of a missing required attribute,
+ * %NLMSGERR_ATTR_MISS_NEST will not be present if the attribute was
+ * missing at the message level
+ * @NLMSGERR_ATTR_MISS_NEST: offset of the nest where attribute was missing
* @__NLMSGERR_ATTR_MAX: number of attributes
* @NLMSGERR_ATTR_MAX: highest attribute number
*/
@@ -137,6 +152,9 @@ enum nlmsgerr_attrs {
NLMSGERR_ATTR_MSG,
NLMSGERR_ATTR_OFFS,
NLMSGERR_ATTR_COOKIE,
+ NLMSGERR_ATTR_POLICY,
+ NLMSGERR_ATTR_MISS_TYPE,
+ NLMSGERR_ATTR_MISS_NEST,
__NLMSGERR_ATTR_MAX,
NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1
@@ -280,6 +298,8 @@ struct nla_bitfield32 {
* entry has attributes again, the policy for those inner ones
* and the corresponding maxtype may be specified.
* @NL_ATTR_TYPE_BITFIELD32: &struct nla_bitfield32 attribute
+ * @NL_ATTR_TYPE_SINT: 32-bit or 64-bit signed attribute, aligned to 4B
+ * @NL_ATTR_TYPE_UINT: 32-bit or 64-bit unsigned attribute, aligned to 4B
*/
enum netlink_attribute_type {
NL_ATTR_TYPE_INVALID,
@@ -304,6 +324,9 @@ enum netlink_attribute_type {
NL_ATTR_TYPE_NESTED_ARRAY,
NL_ATTR_TYPE_BITFIELD32,
+
+ NL_ATTR_TYPE_SINT,
+ NL_ATTR_TYPE_UINT,
};
/**
@@ -331,7 +354,11 @@ enum netlink_attribute_type {
* the index, if limited inside the nesting (U32)
* @NL_POLICY_TYPE_ATTR_BITFIELD32_MASK: valid mask for the
* bitfield32 type (U32)
+ * @NL_POLICY_TYPE_ATTR_MASK: mask of valid bits for unsigned integers (U64)
* @NL_POLICY_TYPE_ATTR_PAD: pad attribute for 64-bit alignment
+ *
+ * @__NL_POLICY_TYPE_ATTR_MAX: number of attributes
+ * @NL_POLICY_TYPE_ATTR_MAX: highest attribute number
*/
enum netlink_policy_type_attr {
NL_POLICY_TYPE_ATTR_UNSPEC,
@@ -346,6 +373,7 @@ enum netlink_policy_type_attr {
NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE,
NL_POLICY_TYPE_ATTR_BITFIELD32_MASK,
NL_POLICY_TYPE_ATTR_PAD,
+ NL_POLICY_TYPE_ATTR_MASK,
/* keep last */
__NL_POLICY_TYPE_ATTR_MAX,
diff --git a/include/uapi/linux/nexthop.h b/include/uapi/linux/nexthop.h
index 2d4a1e784cf0..dd8787f9cf39 100644
--- a/include/uapi/linux/nexthop.h
+++ b/include/uapi/linux/nexthop.h
@@ -21,12 +21,18 @@ struct nexthop_grp {
};
enum {
- NEXTHOP_GRP_TYPE_MPATH, /* default type if not specified */
+ NEXTHOP_GRP_TYPE_MPATH, /* hash-threshold nexthop group
+ * default type if not specified
+ */
+ NEXTHOP_GRP_TYPE_RES, /* resilient nexthop group */
__NEXTHOP_GRP_TYPE_MAX,
};
#define NEXTHOP_GRP_TYPE_MAX (__NEXTHOP_GRP_TYPE_MAX - 1)
+#define NHA_OP_FLAG_DUMP_STATS BIT(0)
+#define NHA_OP_FLAG_DUMP_HW_STATS BIT(1)
+
enum {
NHA_UNSPEC,
NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */
@@ -52,8 +58,92 @@ enum {
NHA_FDB, /* flag; nexthop belongs to a bridge fdb */
/* if NHA_FDB is added, OIF, BLACKHOLE, ENCAP cannot be set */
+ /* nested; resilient nexthop group attributes */
+ NHA_RES_GROUP,
+ /* nested; nexthop bucket attributes */
+ NHA_RES_BUCKET,
+
+ /* u32; operation-specific flags */
+ NHA_OP_FLAGS,
+
+ /* nested; nexthop group stats */
+ NHA_GROUP_STATS,
+
+ /* u32; nexthop hardware stats enable */
+ NHA_HW_STATS_ENABLE,
+
+ /* u32; read-only; whether any driver collects HW stats */
+ NHA_HW_STATS_USED,
+
__NHA_MAX,
};
#define NHA_MAX (__NHA_MAX - 1)
+
+enum {
+ NHA_RES_GROUP_UNSPEC,
+ /* Pad attribute for 64-bit alignment. */
+ NHA_RES_GROUP_PAD = NHA_RES_GROUP_UNSPEC,
+
+ /* u16; number of nexthop buckets in a resilient nexthop group */
+ NHA_RES_GROUP_BUCKETS,
+ /* clock_t as u32; nexthop bucket idle timer (per-group) */
+ NHA_RES_GROUP_IDLE_TIMER,
+ /* clock_t as u32; nexthop unbalanced timer */
+ NHA_RES_GROUP_UNBALANCED_TIMER,
+ /* clock_t as u64; nexthop unbalanced time */
+ NHA_RES_GROUP_UNBALANCED_TIME,
+
+ __NHA_RES_GROUP_MAX,
+};
+
+#define NHA_RES_GROUP_MAX (__NHA_RES_GROUP_MAX - 1)
+
+enum {
+ NHA_RES_BUCKET_UNSPEC,
+ /* Pad attribute for 64-bit alignment. */
+ NHA_RES_BUCKET_PAD = NHA_RES_BUCKET_UNSPEC,
+
+ /* u16; nexthop bucket index */
+ NHA_RES_BUCKET_INDEX,
+ /* clock_t as u64; nexthop bucket idle time */
+ NHA_RES_BUCKET_IDLE_TIME,
+ /* u32; nexthop id assigned to the nexthop bucket */
+ NHA_RES_BUCKET_NH_ID,
+
+ __NHA_RES_BUCKET_MAX,
+};
+
+#define NHA_RES_BUCKET_MAX (__NHA_RES_BUCKET_MAX - 1)
+
+enum {
+ NHA_GROUP_STATS_UNSPEC,
+
+ /* nested; nexthop group entry stats */
+ NHA_GROUP_STATS_ENTRY,
+
+ __NHA_GROUP_STATS_MAX,
+};
+
+#define NHA_GROUP_STATS_MAX (__NHA_GROUP_STATS_MAX - 1)
+
+enum {
+ NHA_GROUP_STATS_ENTRY_UNSPEC,
+
+ /* u32; nexthop id of the nexthop group entry */
+ NHA_GROUP_STATS_ENTRY_ID,
+
+ /* uint; number of packets forwarded via the nexthop group entry */
+ NHA_GROUP_STATS_ENTRY_PACKETS,
+
+ /* uint; number of packets forwarded via the nexthop group entry in
+ * hardware
+ */
+ NHA_GROUP_STATS_ENTRY_PACKETS_HW,
+
+ __NHA_GROUP_STATS_ENTRY_MAX,
+};
+
+#define NHA_GROUP_STATS_ENTRY_MAX (__NHA_GROUP_STATS_ENTRY_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index f6e3c8c9c744..4fa4e979e948 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -263,7 +263,7 @@ enum nfc_sdp_attr {
#define NFC_SE_ENABLED 0x1
struct sockaddr_nfc {
- sa_family_t sa_family;
+ __kernel_sa_family_t sa_family;
__u32 dev_idx;
__u32 target_idx;
__u32 nfc_protocol;
@@ -271,14 +271,14 @@ struct sockaddr_nfc {
#define NFC_LLCP_MAX_SERVICE_NAME 63
struct sockaddr_nfc_llcp {
- sa_family_t sa_family;
+ __kernel_sa_family_t sa_family;
__u32 dev_idx;
__u32 target_idx;
__u32 nfc_protocol;
__u8 dsap; /* Destination SAP, if known */
__u8 ssap; /* Source SAP to be bound to */
char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
- size_t service_name_len;
+ __kernel_size_t service_name_len;
};
/* NFC socket protocols */
diff --git a/include/uapi/linux/nfs3.h b/include/uapi/linux/nfs3.h
index 37e4b34e6b43..c22ab77713bd 100644
--- a/include/uapi/linux/nfs3.h
+++ b/include/uapi/linux/nfs3.h
@@ -63,6 +63,12 @@ enum nfs3_ftype {
NF3BAD = 8
};
+enum nfs3_time_how {
+ DONT_CHANGE = 0,
+ SET_TO_SERVER_TIME = 1,
+ SET_TO_CLIENT_TIME = 2,
+};
+
struct nfs3_fh {
unsigned short size;
unsigned char data[NFS3_FHSIZE];
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index bf197e99b98f..1d2043708bf1 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -45,6 +45,7 @@
#define NFS4_OPEN_RESULT_CONFIRM 0x0002
#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
+#define NFS4_OPEN_RESULT_PRESERVE_UNLINKED 0x0008
#define NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK 0x0020
#define NFS4_SHARE_ACCESS_MASK 0x000F
@@ -139,6 +140,8 @@
#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
+
+#define EXCHGID4_FLAG_SUPP_FENCE_OPS 0x00000004
/*
* Since the validity of these bits depends on whether
* they're set in the argument or response, have separate
@@ -146,6 +149,7 @@
*/
#define EXCHGID4_FLAG_MASK_A 0x40070103
#define EXCHGID4_FLAG_MASK_R 0x80070103
+#define EXCHGID4_2_FLAG_MASK_R 0x80070107
#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
@@ -175,9 +179,3 @@
#define NFS4_MAX_BACK_CHANNEL_OPS 2
#endif /* _UAPI_LINUX_NFS4_H */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/include/uapi/linux/nfs_fs.h b/include/uapi/linux/nfs_fs.h
index 3afe3767c55d..ae0de165c014 100644
--- a/include/uapi/linux/nfs_fs.h
+++ b/include/uapi/linux/nfs_fs.h
@@ -52,7 +52,7 @@
#define NFSDBG_CALLBACK 0x0100
#define NFSDBG_CLIENT 0x0200
#define NFSDBG_MOUNT 0x0400
-#define NFSDBG_FSCACHE 0x0800
+#define NFSDBG_FSCACHE 0x0800 /* unused */
#define NFSDBG_PNFS 0x1000
#define NFSDBG_PNFS_LD 0x2000
#define NFSDBG_STATE 0x4000
diff --git a/include/uapi/linux/nfsacl.h b/include/uapi/linux/nfsacl.h
index ca9a8501ff30..2c2ad204d3b0 100644
--- a/include/uapi/linux/nfsacl.h
+++ b/include/uapi/linux/nfsacl.h
@@ -9,11 +9,13 @@
#define NFS_ACL_PROGRAM 100227
+#define ACLPROC2_NULL 0
#define ACLPROC2_GETACL 1
#define ACLPROC2_SETACL 2
#define ACLPROC2_GETATTR 3
#define ACLPROC2_ACCESS 4
+#define ACLPROC3_NULL 0
#define ACLPROC3_GETACL 1
#define ACLPROC3_SETACL 2
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h
index 2124ba904779..a73ca3703abb 100644
--- a/include/uapi/linux/nfsd/export.h
+++ b/include/uapi/linux/nfsd/export.h
@@ -62,5 +62,18 @@
| NFSEXP_ALLSQUASH \
| NFSEXP_INSECURE_PORT)
+/*
+ * Transport layer security policies that are permitted to access
+ * an export
+ */
+#define NFSEXP_XPRTSEC_NONE 0x0001
+#define NFSEXP_XPRTSEC_TLS 0x0002
+#define NFSEXP_XPRTSEC_MTLS 0x0004
+
+#define NFSEXP_XPRTSEC_NUM (3)
+
+#define NFSEXP_XPRTSEC_ALL (NFSEXP_XPRTSEC_NONE | \
+ NFSEXP_XPRTSEC_TLS | \
+ NFSEXP_XPRTSEC_MTLS)
#endif /* _UAPINFSD_EXPORT_H */
diff --git a/include/uapi/linux/nfsd/nfsfh.h b/include/uapi/linux/nfsd/nfsfh.h
deleted file mode 100644
index ff0ca88b1c8f..000000000000
--- a/include/uapi/linux/nfsd/nfsfh.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * This file describes the layout of the file handles as passed
- * over the wire.
- *
- * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef _UAPI_LINUX_NFSD_FH_H
-#define _UAPI_LINUX_NFSD_FH_H
-
-#include <linux/types.h>
-#include <linux/nfs.h>
-#include <linux/nfs2.h>
-#include <linux/nfs3.h>
-#include <linux/nfs4.h>
-
-/*
- * This is the old "dentry style" Linux NFSv2 file handle.
- *
- * The xino and xdev fields are currently used to transport the
- * ino/dev of the exported inode.
- */
-struct nfs_fhbase_old {
- __u32 fb_dcookie; /* dentry cookie - always 0xfeebbaca */
- __u32 fb_ino; /* our inode number */
- __u32 fb_dirino; /* dir inode number, 0 for directories */
- __u32 fb_dev; /* our device */
- __u32 fb_xdev;
- __u32 fb_xino;
- __u32 fb_generation;
-};
-
-/*
- * This is the new flexible, extensible style NFSv2/v3/v4 file handle.
- * by Neil Brown <neilb@cse.unsw.edu.au> - March 2000
- *
- * The file handle starts with a sequence of four-byte words.
- * The first word contains a version number (1) and three descriptor bytes
- * that tell how the remaining 3 variable length fields should be handled.
- * These three bytes are auth_type, fsid_type and fileid_type.
- *
- * All four-byte values are in host-byte-order.
- *
- * The auth_type field is deprecated and must be set to 0.
- *
- * The fsid_type identifies how the filesystem (or export point) is
- * encoded.
- * Current values:
- * 0 - 4 byte device id (ms-2-bytes major, ls-2-bytes minor), 4byte inode number
- * NOTE: we cannot use the kdev_t device id value, because kdev_t.h
- * says we mustn't. We must break it up and reassemble.
- * 1 - 4 byte user specified identifier
- * 2 - 4 byte major, 4 byte minor, 4 byte inode number - DEPRECATED
- * 3 - 4 byte device id, encoded for user-space, 4 byte inode number
- * 4 - 4 byte inode number and 4 byte uuid
- * 5 - 8 byte uuid
- * 6 - 16 byte uuid
- * 7 - 8 byte inode number and 16 byte uuid
- *
- * The fileid_type identified how the file within the filesystem is encoded.
- * The values for this field are filesystem specific, exccept that
- * filesystems must not use the values '0' or '0xff'. 'See enum fid_type'
- * in include/linux/exportfs.h for currently registered values.
- */
-struct nfs_fhbase_new {
- __u8 fb_version; /* == 1, even => nfs_fhbase_old */
- __u8 fb_auth_type;
- __u8 fb_fsid_type;
- __u8 fb_fileid_type;
- __u32 fb_auth[1];
-/* __u32 fb_fsid[0]; floating */
-/* __u32 fb_fileid[0]; floating */
-};
-
-struct knfsd_fh {
- unsigned int fh_size; /* significant for NFSv3.
- * Points to the current size while building
- * a new file handle
- */
- union {
- struct nfs_fhbase_old fh_old;
- __u32 fh_pad[NFS4_FHSIZE/4];
- struct nfs_fhbase_new fh_new;
- } fh_base;
-};
-
-#define ofh_dcookie fh_base.fh_old.fb_dcookie
-#define ofh_ino fh_base.fh_old.fb_ino
-#define ofh_dirino fh_base.fh_old.fb_dirino
-#define ofh_dev fh_base.fh_old.fb_dev
-#define ofh_xdev fh_base.fh_old.fb_xdev
-#define ofh_xino fh_base.fh_old.fb_xino
-#define ofh_generation fh_base.fh_old.fb_generation
-
-#define fh_version fh_base.fh_new.fb_version
-#define fh_fsid_type fh_base.fh_new.fb_fsid_type
-#define fh_auth_type fh_base.fh_new.fb_auth_type
-#define fh_fileid_type fh_base.fh_new.fb_fileid_type
-#define fh_fsid fh_base.fh_new.fb_auth
-
-/* Do not use, provided for userspace compatiblity. */
-#define fh_auth fh_base.fh_new.fb_auth
-
-#endif /* _UAPI_LINUX_NFSD_FH_H */
diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h
new file mode 100644
index 000000000000..3cd044edee5d
--- /dev/null
+++ b/include/uapi/linux/nfsd_netlink.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NFSD_NETLINK_H
+#define _UAPI_LINUX_NFSD_NETLINK_H
+
+#define NFSD_FAMILY_NAME "nfsd"
+#define NFSD_FAMILY_VERSION 1
+
+enum {
+ NFSD_A_RPC_STATUS_XID = 1,
+ NFSD_A_RPC_STATUS_FLAGS,
+ NFSD_A_RPC_STATUS_PROG,
+ NFSD_A_RPC_STATUS_VERSION,
+ NFSD_A_RPC_STATUS_PROC,
+ NFSD_A_RPC_STATUS_SERVICE_TIME,
+ NFSD_A_RPC_STATUS_PAD,
+ NFSD_A_RPC_STATUS_SADDR4,
+ NFSD_A_RPC_STATUS_DADDR4,
+ NFSD_A_RPC_STATUS_SADDR6,
+ NFSD_A_RPC_STATUS_DADDR6,
+ NFSD_A_RPC_STATUS_SPORT,
+ NFSD_A_RPC_STATUS_DPORT,
+ NFSD_A_RPC_STATUS_COMPOUND_OPS,
+
+ __NFSD_A_RPC_STATUS_MAX,
+ NFSD_A_RPC_STATUS_MAX = (__NFSD_A_RPC_STATUS_MAX - 1)
+};
+
+enum {
+ NFSD_CMD_RPC_STATUS_GET = 1,
+
+ __NFSD_CMD_MAX,
+ NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1)
+};
+
+#endif /* _UAPI_LINUX_NFSD_NETLINK_H */
diff --git a/include/uapi/linux/nitro_enclaves.h b/include/uapi/linux/nitro_enclaves.h
new file mode 100644
index 000000000000..e808f5ba124d
--- /dev/null
+++ b/include/uapi/linux/nitro_enclaves.h
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef _UAPI_LINUX_NITRO_ENCLAVES_H_
+#define _UAPI_LINUX_NITRO_ENCLAVES_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: Nitro Enclaves (NE) Kernel Driver Interface
+ */
+
+/**
+ * NE_CREATE_VM - The command is used to create a slot that is associated with
+ * an enclave VM.
+ * The generated unique slot id is an output parameter.
+ * The ioctl can be invoked on the /dev/nitro_enclaves fd, before
+ * setting any resources, such as memory and vCPUs, for an
+ * enclave. Memory and vCPUs are set for the slot mapped to an enclave.
+ * A NE CPU pool has to be set before calling this function. The
+ * pool can be set after the NE driver load, using
+ * /sys/module/nitro_enclaves/parameters/ne_cpus.
+ * Its format is the detailed in the cpu-lists section:
+ * https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html
+ * CPU 0 and its siblings have to remain available for the
+ * primary / parent VM, so they cannot be set for enclaves. Full
+ * CPU core(s), from the same NUMA node, need(s) to be included
+ * in the CPU pool.
+ *
+ * Context: Process context.
+ * Return:
+ * * Enclave file descriptor - Enclave file descriptor used with
+ * ioctl calls to set vCPUs and memory
+ * regions, then start the enclave.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_to_user() failure.
+ * * ENOMEM - Memory allocation failure for internal
+ * bookkeeping variables.
+ * * NE_ERR_NO_CPUS_AVAIL_IN_POOL - No NE CPU pool set / no CPUs available
+ * in the pool.
+ * * Error codes from get_unused_fd_flags() and anon_inode_getfile().
+ * * Error codes from the NE PCI device request.
+ */
+#define NE_CREATE_VM _IOR(0xAE, 0x20, __u64)
+
+/**
+ * NE_ADD_VCPU - The command is used to set a vCPU for an enclave. The vCPU can
+ * be auto-chosen from the NE CPU pool or it can be set by the
+ * caller, with the note that it needs to be available in the NE
+ * CPU pool. Full CPU core(s), from the same NUMA node, need(s) to
+ * be associated with an enclave.
+ * The vCPU id is an input / output parameter. If its value is 0,
+ * then a CPU is chosen from the enclave CPU pool and returned via
+ * this parameter.
+ * The ioctl can be invoked on the enclave fd, before an enclave
+ * is started.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 - Logic successfully completed.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_from_user() / copy_to_user() failure.
+ * * ENOMEM - Memory allocation failure for internal
+ * bookkeeping variables.
+ * * EIO - Current task mm is not the same as the one
+ * that created the enclave.
+ * * NE_ERR_NO_CPUS_AVAIL_IN_POOL - No CPUs available in the NE CPU pool.
+ * * NE_ERR_VCPU_ALREADY_USED - The provided vCPU is already used.
+ * * NE_ERR_VCPU_NOT_IN_CPU_POOL - The provided vCPU is not available in the
+ * NE CPU pool.
+ * * NE_ERR_VCPU_INVALID_CPU_CORE - The core id of the provided vCPU is invalid
+ * or out of range.
+ * * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state
+ * (init = before being started).
+ * * NE_ERR_INVALID_VCPU - The provided vCPU is not in the available
+ * CPUs range.
+ * * Error codes from the NE PCI device request.
+ */
+#define NE_ADD_VCPU _IOWR(0xAE, 0x21, __u32)
+
+/**
+ * NE_GET_IMAGE_LOAD_INFO - The command is used to get information needed for
+ * in-memory enclave image loading e.g. offset in
+ * enclave memory to start placing the enclave image.
+ * The image load info is an input / output parameter.
+ * It includes info provided by the caller - flags -
+ * and returns the offset in enclave memory where to
+ * start placing the enclave image.
+ * The ioctl can be invoked on the enclave fd, before
+ * an enclave is started.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 - Logic successfully completed.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_from_user() / copy_to_user() failure.
+ * * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state (init =
+ * before being started).
+ * * NE_ERR_INVALID_FLAG_VALUE - The value of the provided flag is invalid.
+ */
+#define NE_GET_IMAGE_LOAD_INFO _IOWR(0xAE, 0x22, struct ne_image_load_info)
+
+/**
+ * NE_SET_USER_MEMORY_REGION - The command is used to set a memory region for an
+ * enclave, given the allocated memory from the
+ * userspace. Enclave memory needs to be from the
+ * same NUMA node as the enclave CPUs.
+ * The user memory region is an input parameter. It
+ * includes info provided by the caller - flags,
+ * memory size and userspace address.
+ * The ioctl can be invoked on the enclave fd,
+ * before an enclave is started.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 - Logic successfully completed.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_from_user() failure.
+ * * EINVAL - Invalid physical memory region(s) e.g.
+ * unaligned address.
+ * * EIO - Current task mm is not the same as
+ * the one that created the enclave.
+ * * ENOMEM - Memory allocation failure for internal
+ * bookkeeping variables.
+ * * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state
+ * (init = before being started).
+ * * NE_ERR_INVALID_MEM_REGION_SIZE - The memory size of the region is not
+ * multiple of 2 MiB.
+ * * NE_ERR_INVALID_MEM_REGION_ADDR - Invalid user space address given.
+ * * NE_ERR_UNALIGNED_MEM_REGION_ADDR - Unaligned user space address given.
+ * * NE_ERR_MEM_REGION_ALREADY_USED - The memory region is already used.
+ * * NE_ERR_MEM_NOT_HUGE_PAGE - The memory region is not backed by
+ * huge pages.
+ * * NE_ERR_MEM_DIFFERENT_NUMA_NODE - The memory region is not from the same
+ * NUMA node as the CPUs.
+ * * NE_ERR_MEM_MAX_REGIONS - The number of memory regions set for
+ * the enclave reached maximum.
+ * * NE_ERR_INVALID_PAGE_SIZE - The memory region is not backed by
+ * pages multiple of 2 MiB.
+ * * NE_ERR_INVALID_FLAG_VALUE - The value of the provided flag is invalid.
+ * * Error codes from get_user_pages().
+ * * Error codes from the NE PCI device request.
+ */
+#define NE_SET_USER_MEMORY_REGION _IOW(0xAE, 0x23, struct ne_user_memory_region)
+
+/**
+ * NE_START_ENCLAVE - The command is used to trigger enclave start after the
+ * enclave resources, such as memory and CPU, have been set.
+ * The enclave start info is an input / output parameter. It
+ * includes info provided by the caller - enclave cid and
+ * flags - and returns the cid (if input cid is 0).
+ * The ioctl can be invoked on the enclave fd, after an
+ * enclave slot is created and resources, such as memory and
+ * vCPUs are set for an enclave.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 - Logic successfully completed.
+ * * -1 - There was a failure in the ioctl logic.
+ * On failure, errno is set to:
+ * * EFAULT - copy_from_user() / copy_to_user() failure.
+ * * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state
+ * (init = before being started).
+ * * NE_ERR_NO_MEM_REGIONS_ADDED - No memory regions are set.
+ * * NE_ERR_NO_VCPUS_ADDED - No vCPUs are set.
+ * * NE_ERR_FULL_CORES_NOT_USED - Full core(s) not set for the enclave.
+ * * NE_ERR_ENCLAVE_MEM_MIN_SIZE - Enclave memory is less than minimum
+ * memory size (64 MiB).
+ * * NE_ERR_INVALID_FLAG_VALUE - The value of the provided flag is invalid.
+ * * NE_ERR_INVALID_ENCLAVE_CID - The provided enclave CID is invalid.
+ * * Error codes from the NE PCI device request.
+ */
+#define NE_START_ENCLAVE _IOWR(0xAE, 0x24, struct ne_enclave_start_info)
+
+/**
+ * DOC: NE specific error codes
+ */
+
+/**
+ * NE_ERR_VCPU_ALREADY_USED - The provided vCPU is already used.
+ */
+#define NE_ERR_VCPU_ALREADY_USED (256)
+/**
+ * NE_ERR_VCPU_NOT_IN_CPU_POOL - The provided vCPU is not available in the
+ * NE CPU pool.
+ */
+#define NE_ERR_VCPU_NOT_IN_CPU_POOL (257)
+/**
+ * NE_ERR_VCPU_INVALID_CPU_CORE - The core id of the provided vCPU is invalid
+ * or out of range of the NE CPU pool.
+ */
+#define NE_ERR_VCPU_INVALID_CPU_CORE (258)
+/**
+ * NE_ERR_INVALID_MEM_REGION_SIZE - The user space memory region size is not
+ * multiple of 2 MiB.
+ */
+#define NE_ERR_INVALID_MEM_REGION_SIZE (259)
+/**
+ * NE_ERR_INVALID_MEM_REGION_ADDR - The user space memory region address range
+ * is invalid.
+ */
+#define NE_ERR_INVALID_MEM_REGION_ADDR (260)
+/**
+ * NE_ERR_UNALIGNED_MEM_REGION_ADDR - The user space memory region address is
+ * not aligned.
+ */
+#define NE_ERR_UNALIGNED_MEM_REGION_ADDR (261)
+/**
+ * NE_ERR_MEM_REGION_ALREADY_USED - The user space memory region is already used.
+ */
+#define NE_ERR_MEM_REGION_ALREADY_USED (262)
+/**
+ * NE_ERR_MEM_NOT_HUGE_PAGE - The user space memory region is not backed by
+ * contiguous physical huge page(s).
+ */
+#define NE_ERR_MEM_NOT_HUGE_PAGE (263)
+/**
+ * NE_ERR_MEM_DIFFERENT_NUMA_NODE - The user space memory region is backed by
+ * pages from different NUMA nodes than the CPUs.
+ */
+#define NE_ERR_MEM_DIFFERENT_NUMA_NODE (264)
+/**
+ * NE_ERR_MEM_MAX_REGIONS - The supported max memory regions per enclaves has
+ * been reached.
+ */
+#define NE_ERR_MEM_MAX_REGIONS (265)
+/**
+ * NE_ERR_NO_MEM_REGIONS_ADDED - The command to start an enclave is triggered
+ * and no memory regions are added.
+ */
+#define NE_ERR_NO_MEM_REGIONS_ADDED (266)
+/**
+ * NE_ERR_NO_VCPUS_ADDED - The command to start an enclave is triggered and no
+ * vCPUs are added.
+ */
+#define NE_ERR_NO_VCPUS_ADDED (267)
+/**
+ * NE_ERR_ENCLAVE_MEM_MIN_SIZE - The enclave memory size is lower than the
+ * minimum supported.
+ */
+#define NE_ERR_ENCLAVE_MEM_MIN_SIZE (268)
+/**
+ * NE_ERR_FULL_CORES_NOT_USED - The command to start an enclave is triggered and
+ * full CPU cores are not set.
+ */
+#define NE_ERR_FULL_CORES_NOT_USED (269)
+/**
+ * NE_ERR_NOT_IN_INIT_STATE - The enclave is not in init state when setting
+ * resources or triggering start.
+ */
+#define NE_ERR_NOT_IN_INIT_STATE (270)
+/**
+ * NE_ERR_INVALID_VCPU - The provided vCPU is out of range of the available CPUs.
+ */
+#define NE_ERR_INVALID_VCPU (271)
+/**
+ * NE_ERR_NO_CPUS_AVAIL_IN_POOL - The command to create an enclave is triggered
+ * and no CPUs are available in the pool.
+ */
+#define NE_ERR_NO_CPUS_AVAIL_IN_POOL (272)
+/**
+ * NE_ERR_INVALID_PAGE_SIZE - The user space memory region is not backed by pages
+ * multiple of 2 MiB.
+ */
+#define NE_ERR_INVALID_PAGE_SIZE (273)
+/**
+ * NE_ERR_INVALID_FLAG_VALUE - The provided flag value is invalid.
+ */
+#define NE_ERR_INVALID_FLAG_VALUE (274)
+/**
+ * NE_ERR_INVALID_ENCLAVE_CID - The provided enclave CID is invalid, either
+ * being a well-known value or the CID of the
+ * parent / primary VM.
+ */
+#define NE_ERR_INVALID_ENCLAVE_CID (275)
+
+/**
+ * DOC: Image load info flags
+ */
+
+/**
+ * NE_EIF_IMAGE - Enclave Image Format (EIF)
+ */
+#define NE_EIF_IMAGE (0x01)
+
+#define NE_IMAGE_LOAD_MAX_FLAG_VAL (0x02)
+
+/**
+ * struct ne_image_load_info - Info necessary for in-memory enclave image
+ * loading (in / out).
+ * @flags: Flags to determine the enclave image type
+ * (e.g. Enclave Image Format - EIF) (in).
+ * @memory_offset: Offset in enclave memory where to start placing the
+ * enclave image (out).
+ */
+struct ne_image_load_info {
+ __u64 flags;
+ __u64 memory_offset;
+};
+
+/**
+ * DOC: User memory region flags
+ */
+
+/**
+ * NE_DEFAULT_MEMORY_REGION - Memory region for enclave general usage.
+ */
+#define NE_DEFAULT_MEMORY_REGION (0x00)
+
+#define NE_MEMORY_REGION_MAX_FLAG_VAL (0x01)
+
+/**
+ * struct ne_user_memory_region - Memory region to be set for an enclave (in).
+ * @flags: Flags to determine the usage for the memory region (in).
+ * @memory_size: The size, in bytes, of the memory region to be set for
+ * an enclave (in).
+ * @userspace_addr: The start address of the userspace allocated memory of
+ * the memory region to set for an enclave (in).
+ */
+struct ne_user_memory_region {
+ __u64 flags;
+ __u64 memory_size;
+ __u64 userspace_addr;
+};
+
+/**
+ * DOC: Enclave start info flags
+ */
+
+/**
+ * NE_ENCLAVE_PRODUCTION_MODE - Start enclave in production mode.
+ */
+#define NE_ENCLAVE_PRODUCTION_MODE (0x00)
+/**
+ * NE_ENCLAVE_DEBUG_MODE - Start enclave in debug mode.
+ */
+#define NE_ENCLAVE_DEBUG_MODE (0x01)
+
+#define NE_ENCLAVE_START_MAX_FLAG_VAL (0x02)
+
+/**
+ * struct ne_enclave_start_info - Setup info necessary for enclave start (in / out).
+ * @flags: Flags for the enclave to start with (e.g. debug mode) (in).
+ * @enclave_cid: Context ID (CID) for the enclave vsock device. If 0 as
+ * input, the CID is autogenerated by the hypervisor and
+ * returned back as output by the driver (in / out).
+ */
+struct ne_enclave_start_info {
+ __u64 flags;
+ __u64 enclave_cid;
+};
+
+#endif /* _UAPI_LINUX_NITRO_ENCLAVES_H_ */
diff --git a/include/uapi/linux/nl80211-vnd-intel.h b/include/uapi/linux/nl80211-vnd-intel.h
new file mode 100644
index 000000000000..4ed7d0b24512
--- /dev/null
+++ b/include/uapi/linux/nl80211-vnd-intel.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __VENDOR_CMD_INTEL_H__
+#define __VENDOR_CMD_INTEL_H__
+
+#define INTEL_OUI 0x001735
+
+/**
+ * enum iwl_mvm_vendor_cmd - supported vendor commands
+ * @IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO: reports CSME connection info.
+ * @IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP: asks for ownership on the device.
+ * This is useful when the CSME firmware owns the device and the kernel
+ * wants to use it. In case the CSME firmware has no connection active the
+ * kernel will manage on its own to get ownership of the device.
+ * When the CSME firmware has an active connection, the user space
+ * involvement is required. The kernel will assert the RFKILL signal with
+ * the "device not owned" reason so that nobody can touch the device. Then
+ * the user space can run the following flow to be able to get connected
+ * to the very same AP the CSME firmware is currently connected to:
+ *
+ * 1) The user space (NetworkManager) boots and sees that the device is
+ * in RFKILL because the host doesn't own the device
+ * 2) The user space asks the kernel what AP the CSME firmware is
+ * connected to (with %IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO)
+ * 3) The user space checks if it has a profile that matches the reply
+ * from the CSME firmware
+ * 4) The user space installs a network to the wpa_supplicant with a
+ * specific BSSID and a specific frequency
+ * 5) The user space prevents any type of full scan
+ * 6) The user space asks iwlmei to request ownership on the device (with
+ * this command)
+ * 7) iwlmei requests ownership from the CSME firmware
+ * 8) The CSME firmware grants ownership
+ * 9) iwlmei tells iwlwifi to lift the RFKILL
+ * 10) RFKILL OFF is reported to user space
+ * 11) The host boots the device, loads the firwmare, and connects to a
+ * specific BSSID without scanning including IP as fast as it can
+ * 12) The host reports to the CSME firmware that there is a connection
+ * 13) The TCP connection is preserved and the host has connectivity
+ *
+ * @IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT: notifies if roaming is allowed.
+ * It contains a &IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN and a
+ * &IWL_MVM_VENDOR_ATTR_VIF_ADDR attributes.
+ */
+
+enum iwl_mvm_vendor_cmd {
+ IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO = 0x2d,
+ IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP = 0x30,
+ IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT = 0x32,
+};
+
+enum iwl_vendor_auth_akm_mode {
+ IWL_VENDOR_AUTH_OPEN,
+ IWL_VENDOR_AUTH_RSNA = 0x6,
+ IWL_VENDOR_AUTH_RSNA_PSK,
+ IWL_VENDOR_AUTH_SAE = 0x9,
+ IWL_VENDOR_AUTH_MAX,
+};
+
+/**
+ * enum iwl_mvm_vendor_attr - attributes used in vendor commands
+ * @__IWL_MVM_VENDOR_ATTR_INVALID: attribute 0 is invalid
+ * @IWL_MVM_VENDOR_ATTR_VIF_ADDR: interface MAC address
+ * @IWL_MVM_VENDOR_ATTR_ADDR: MAC address
+ * @IWL_MVM_VENDOR_ATTR_SSID: SSID (binary attribute, 0..32 octets)
+ * @IWL_MVM_VENDOR_ATTR_STA_CIPHER: the cipher to use for the station with the
+ * mac address specified in &IWL_MVM_VENDOR_ATTR_ADDR.
+ * @IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN: u8 attribute. Indicates whether
+ * roaming is forbidden or not. Value 1 means roaming is forbidden,
+ * 0 mean roaming is allowed.
+ * @IWL_MVM_VENDOR_ATTR_AUTH_MODE: u32 attribute. Authentication mode type
+ * as specified in &enum iwl_vendor_auth_akm_mode.
+ * @IWL_MVM_VENDOR_ATTR_CHANNEL_NUM: u8 attribute. Contains channel number.
+ * @IWL_MVM_VENDOR_ATTR_BAND: u8 attribute.
+ * 0 for 2.4 GHz band, 1 for 5.2GHz band and 2 for 6GHz band.
+ * @IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL: u32 attribute. Channel number of
+ * collocated AP. Relevant for 6GHz AP info.
+ * @IWL_MVM_VENDOR_ATTR_COLLOC_ADDR: MAC address of a collocated AP.
+ * Relevant for 6GHz AP info.
+ *
+ * @NUM_IWL_MVM_VENDOR_ATTR: number of vendor attributes
+ * @MAX_IWL_MVM_VENDOR_ATTR: highest vendor attribute number
+
+ */
+enum iwl_mvm_vendor_attr {
+ __IWL_MVM_VENDOR_ATTR_INVALID = 0x00,
+ IWL_MVM_VENDOR_ATTR_VIF_ADDR = 0x02,
+ IWL_MVM_VENDOR_ATTR_ADDR = 0x0a,
+ IWL_MVM_VENDOR_ATTR_SSID = 0x3d,
+ IWL_MVM_VENDOR_ATTR_STA_CIPHER = 0x51,
+ IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN = 0x64,
+ IWL_MVM_VENDOR_ATTR_AUTH_MODE = 0x65,
+ IWL_MVM_VENDOR_ATTR_CHANNEL_NUM = 0x66,
+ IWL_MVM_VENDOR_ATTR_BAND = 0x69,
+ IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL = 0x70,
+ IWL_MVM_VENDOR_ATTR_COLLOC_ADDR = 0x71,
+
+ NUM_IWL_MVM_VENDOR_ATTR,
+ MAX_IWL_MVM_VENDOR_ATTR = NUM_IWL_MVM_VENDOR_ATTR - 1,
+};
+
+#endif /* __VENDOR_CMD_INTEL_H__ */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 631f3a997b3c..f23ecbdd84a2 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -72,7 +72,7 @@
* For drivers supporting TDLS with external setup (WIPHY_FLAG_SUPPORTS_TDLS
* and WIPHY_FLAG_TDLS_EXTERNAL_SETUP), the station lifetime is as follows:
* - a setup station entry is added, not yet authorized, without any rate
- * or capability information, this just exists to avoid race conditions
+ * or capability information; this just exists to avoid race conditions
* - when the TDLS setup is done, a single NL80211_CMD_SET_STATION is valid
* to add rate and capability information to the station and at the same
* time mark it authorized.
@@ -87,7 +87,7 @@
* DOC: Frame transmission/registration support
*
* Frame transmission and registration support exists to allow userspace
- * management entities such as wpa_supplicant react to management frames
+ * management entities such as wpa_supplicant to react to management frames
* that are not being handled by the kernel. This includes, for example,
* certain classes of action frames that cannot be handled in the kernel
* for various reasons.
@@ -113,7 +113,7 @@
*
* Frame transmission allows userspace to send for example the required
* responses to action frames. It is subject to some sanity checking,
- * but many frames can be transmitted. When a frame was transmitted, its
+ * but many frames can be transmitted. When a frame is transmitted, its
* status is indicated to the sending socket.
*
* For more technical details, see the corresponding command descriptions
@@ -123,7 +123,7 @@
/**
* DOC: Virtual interface / concurrency capabilities
*
- * Some devices are able to operate with virtual MACs, they can have
+ * Some devices are able to operate with virtual MACs; they can have
* more than one virtual interface. The capability handling for this
* is a bit complex though, as there may be a number of restrictions
* on the types of concurrency that are supported.
@@ -135,7 +135,7 @@
* Once concurrency is desired, more attributes must be observed:
* To start with, since some interface types are purely managed in
* software, like the AP-VLAN type in mac80211 for example, there's
- * an additional list of these, they can be added at any time and
+ * an additional list of these; they can be added at any time and
* are only restricted by some semantic restrictions (e.g. AP-VLAN
* cannot be added without a corresponding AP interface). This list
* is exported in the %NL80211_ATTR_SOFTWARE_IFTYPES attribute.
@@ -164,17 +164,17 @@
* Packet coalesce feature helps to reduce number of received interrupts
* to host by buffering these packets in firmware/hardware for some
* predefined time. Received interrupt will be generated when one of the
- * following events occur.
+ * following events occurs.
* a) Expiration of hardware timer whose expiration time is set to maximum
* coalescing delay of matching coalesce rule.
- * b) Coalescing buffer in hardware reaches it's limit.
+ * b) Coalescing buffer in hardware reaches its limit.
* c) Packet doesn't match any of the configured coalesce rules.
*
* User needs to configure following parameters for creating a coalesce
* rule.
* a) Maximum coalescing delay
* b) List of packet patterns which needs to be matched
- * c) Condition for coalescence. pattern 'match' or 'no match'
+ * c) Condition for coalescence: pattern 'match' or 'no match'
* Multiple such rules can be created.
*/
@@ -213,7 +213,7 @@
/**
* DOC: FILS shared key authentication offload
*
- * FILS shared key authentication offload can be advertized by drivers by
+ * FILS shared key authentication offload can be advertised by drivers by
* setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support
* FILS shared key authentication offload should be able to construct the
* authentication and association frames for FILS shared key authentication and
@@ -239,7 +239,7 @@
* The PMKSA can be maintained in userspace persistently so that it can be used
* later after reboots or wifi turn off/on also.
*
- * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS
+ * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertised by a FILS
* capable AP supporting PMK caching. It specifies the scope within which the
* PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and
* %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based
@@ -252,9 +252,13 @@
* DOC: SAE authentication offload
*
* By setting @NL80211_EXT_FEATURE_SAE_OFFLOAD flag drivers can indicate they
- * support offloading SAE authentication for WPA3-Personal networks. In
- * %NL80211_CMD_CONNECT the password for SAE should be specified using
- * %NL80211_ATTR_SAE_PASSWORD.
+ * support offloading SAE authentication for WPA3-Personal networks in station
+ * mode. Similarly @NL80211_EXT_FEATURE_SAE_OFFLOAD_AP flag can be set by
+ * drivers indicating the offload support in AP mode.
+ *
+ * The password for SAE should be specified using %NL80211_ATTR_SAE_PASSWORD in
+ * %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP for station and AP mode
+ * respectively.
*/
/**
@@ -286,17 +290,60 @@
* If the configuration needs to be applied for specific peer then the MAC
* address of the peer needs to be passed in %NL80211_ATTR_MAC, otherwise the
* configuration will be applied for all the connected peers in the vif except
- * any peers that have peer specific configuration for the TID by default; if
- * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer specific values
+ * any peers that have peer-specific configuration for the TID by default; if
+ * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer-specific values
* will be overwritten.
*
- * All this configuration is valid only for STA's current connection
- * i.e. the configuration will be reset to default when the STA connects back
+ * All this configuration is valid only for STA's current connection,
+ * i.e., the configuration will be reset to default when the STA connects back
* after disconnection/roaming, and this configuration will be cleared when
* the interface goes down.
*/
/**
+ * DOC: FILS shared key crypto offload
+ *
+ * This feature is applicable to drivers running in AP mode.
+ *
+ * FILS shared key crypto offload can be advertised by drivers by setting
+ * @NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD flag. The drivers that support
+ * FILS shared key crypto offload should be able to encrypt and decrypt
+ * association frames for FILS shared key authentication as per IEEE 802.11ai.
+ * With this capability, for FILS key derivation, drivers depend on userspace.
+ *
+ * After FILS key derivation, userspace shares the FILS AAD details with the
+ * driver and the driver stores the same to use in decryption of association
+ * request and in encryption of association response. The below parameters
+ * should be given to the driver in %NL80211_CMD_SET_FILS_AAD.
+ * %NL80211_ATTR_MAC - STA MAC address, used for storing FILS AAD per STA
+ * %NL80211_ATTR_FILS_KEK - Used for encryption or decryption
+ * %NL80211_ATTR_FILS_NONCES - Used for encryption or decryption
+ * (STA Nonce 16 bytes followed by AP Nonce 16 bytes)
+ *
+ * Once the association is done, the driver cleans the FILS AAD data.
+ */
+
+/**
+ * DOC: Multi-Link Operation
+ *
+ * In Multi-Link Operation, a connection between two MLDs utilizes multiple
+ * links. To use this in nl80211, various commands and responses now need
+ * to or will include the new %NL80211_ATTR_MLO_LINKS attribute.
+ * Additionally, various commands that need to operate on a specific link
+ * now need to be given the %NL80211_ATTR_MLO_LINK_ID attribute, e.g. to
+ * use %NL80211_CMD_START_AP or similar functions.
+ */
+
+/**
+ * DOC: OWE DH IE handling offload
+ *
+ * By setting @NL80211_EXT_FEATURE_OWE_OFFLOAD flag, drivers can indicate
+ * kernel/application space to avoid DH IE handling. When this flag is
+ * advertised, the driver/device will take care of DH IE inclusion and
+ * processing of peer DH IE to generate PMK.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -333,17 +380,28 @@
* @NL80211_CMD_DEL_INTERFACE: Virtual interface was deleted, has attributes
* %NL80211_ATTR_IFINDEX and %NL80211_ATTR_WIPHY. Can also be sent from
* userspace to request deletion of a virtual interface, then requires
- * attribute %NL80211_ATTR_IFINDEX.
+ * attribute %NL80211_ATTR_IFINDEX. If multiple BSSID advertisements are
+ * enabled using %NL80211_ATTR_MBSSID_CONFIG, %NL80211_ATTR_MBSSID_ELEMS,
+ * and if this command is used for the transmitting interface, then all
+ * the non-transmitting interfaces are deleted as well.
*
* @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified
- * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC.
+ * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. %NL80211_ATTR_MAC
+ * represents peer's MLD address for MLO pairwise key. For MLO group key,
+ * the link is identified by %NL80211_ATTR_MLO_LINK_ID.
* @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT,
* %NL80211_ATTR_KEY_DEFAULT_MGMT, or %NL80211_ATTR_KEY_THRESHOLD.
+ * For MLO connection, the link to set default key is identified by
+ * %NL80211_ATTR_MLO_LINK_ID.
* @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA,
* %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC, %NL80211_ATTR_KEY_CIPHER,
- * and %NL80211_ATTR_KEY_SEQ attributes.
+ * and %NL80211_ATTR_KEY_SEQ attributes. %NL80211_ATTR_MAC represents
+ * peer's MLD address for MLO pairwise key. The link to add MLO
+ * group key is identified by %NL80211_ATTR_MLO_LINK_ID.
* @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_IDX
- * or %NL80211_ATTR_MAC.
+ * or %NL80211_ATTR_MAC. %NL80211_ATTR_MAC represents peer's MLD address
+ * for MLO pairwise key. The link to delete group key is identified by
+ * %NL80211_ATTR_MLO_LINK_ID.
*
* @NL80211_CMD_GET_BEACON: (not used)
* @NL80211_CMD_SET_BEACON: change the beacon on an access point interface
@@ -375,11 +433,13 @@
* interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all stations, on the interface identified
- * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and
+ * by %NL80211_ATTR_IFINDEX. For MLD station, MLD address is used in
+ * %NL80211_ATTR_MAC. %NL80211_ATTR_MGMT_SUBTYPE and
* %NL80211_ATTR_REASON_CODE can optionally be used to specify which type
* of disconnection indication should be sent to the station
* (Deauthentication or Disassociation frame and reason code for that
- * frame).
+ * frame). %NL80211_ATTR_MLO_LINK_ID can be used optionally to remove
+ * stations connected and using at least that link as one of its links.
*
* @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to
* destination %NL80211_ATTR_MAC on the interface identified by
@@ -462,7 +522,7 @@
* %NL80211_ATTR_SCHED_SCAN_PLANS. If %NL80211_ATTR_SCHED_SCAN_PLANS is
* not specified and only %NL80211_ATTR_SCHED_SCAN_INTERVAL is specified,
* scheduled scan will run in an infinite loop with the specified interval.
- * These attributes are mutually exculsive,
+ * These attributes are mutually exclusive,
* i.e. NL80211_ATTR_SCHED_SCAN_INTERVAL must not be passed if
* NL80211_ATTR_SCHED_SCAN_PLANS is defined.
* If for some reason scheduled scan is aborted by the driver, all scan
@@ -493,7 +553,7 @@
* %NL80211_CMD_STOP_SCHED_SCAN command is received or when the interface
* is brought down while a scheduled scan was running.
*
- * @NL80211_CMD_GET_SURVEY: get survey resuls, e.g. channel occupation
+ * @NL80211_CMD_GET_SURVEY: get survey results, e.g. channel occupation
* or noise level
* @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
* NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
@@ -504,12 +564,13 @@
* using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID,
* %NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS
* authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier
- * advertized by a FILS capable AP identifying the scope of PMKSA in an
+ * advertised by a FILS capable AP identifying the scope of PMKSA in an
* ESS.
* @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
* (for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID,
* %NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS
- * authentication.
+ * authentication. Additionally in case of SAE offload and OWE offloads
+ * PMKSA entry can be deleted using %NL80211_ATTR_SSID.
* @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
*
* @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
@@ -548,7 +609,7 @@
* BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify
* the SSID (mainly for association, but is included in authentication
* request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ +
- * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequence of the
+ * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequency of the
* channel in MHz. %NL80211_ATTR_AUTH_TYPE is used to specify the
* authentication type. %NL80211_ATTR_IE is used to define IEs
* (VendorSpecificInfo, but also including RSN IE and FT IEs) to be added
@@ -647,14 +708,13 @@
* authentication/association or not receiving a response from the AP.
* Non-zero %NL80211_ATTR_STATUS_CODE value is indicated in that case as
* well to remain backwards compatible.
- * When establishing a security association, drivers that support 4 way
- * handshake offload should send %NL80211_CMD_PORT_AUTHORIZED event when
- * the 4 way handshake is completed successfully.
* @NL80211_CMD_ROAM: Notification indicating the card/driver roamed by itself.
- * When a security association was established with the new AP (e.g. if
- * the FT protocol was used for roaming or the driver completed the 4 way
- * handshake), this event should be followed by an
+ * When a security association was established on an 802.1X network using
+ * fast transition, this event should be followed by an
* %NL80211_CMD_PORT_AUTHORIZED event.
+ * Following a %NL80211_CMD_ROAM event userspace can issue
+ * %NL80211_CMD_GET_SCAN in order to obtain the scan information for the
+ * new BSS the card/driver roamed to.
* @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify
* userspace that a connection was dropped by the AP or due to other
* reasons, for this the %NL80211_ATTR_DISCONNECTED_BY_AP and
@@ -724,6 +784,13 @@
* %NL80211_ATTR_CSA_C_OFFSETS_TX is an array of offsets to CSA
* counters which will be updated to the current value. This attribute
* is used during CSA period.
+ * For TX on an MLD, the frequency can be omitted and the link ID be
+ * specified, or if transmitting to a known peer MLD (with MLD addresses
+ * in the frame) both can be omitted and the link will be selected by
+ * lower layers.
+ * For RX notification, %NL80211_ATTR_RX_HW_TIMESTAMP may be included to
+ * indicate the frame RX timestamp and %NL80211_ATTR_TX_HW_TIMESTAMP may
+ * be included to indicate the ack TX timestamp.
* @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this
* command may be used with the corresponding cookie to cancel the wait
* time if it is known that it is no longer necessary. This command is
@@ -734,7 +801,9 @@
* transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies
* the TX command and %NL80211_ATTR_FRAME includes the contents of the
* frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged
- * the frame.
+ * the frame. %NL80211_ATTR_TX_HW_TIMESTAMP may be included to indicate the
+ * tx timestamp and %NL80211_ATTR_RX_HW_TIMESTAMP may be included to
+ * indicate the ack RX timestamp.
* @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for
* backward compatibility.
*
@@ -749,7 +818,7 @@
* reached.
* @NL80211_CMD_SET_CHANNEL: Set the channel (using %NL80211_ATTR_WIPHY_FREQ
* and the attributes determining channel width) the given interface
- * (identifed by %NL80211_ATTR_IFINDEX) shall operate on.
+ * (identified by %NL80211_ATTR_IFINDEX) shall operate on.
* In case multiple channels are supported by the device, the mechanism
* with which it switches channels is implementation-defined.
* When a monitor interface is given, it can only switch channel while
@@ -757,7 +826,8 @@
* of any other interfaces, and other interfaces will again take
* precedence when they are used.
*
- * @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface.
+ * @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface
+ * (no longer supported).
*
* @NL80211_CMD_SET_MULTICAST_TO_UNICAST: Configure if this AP should perform
* multicast to unicast conversion. When enabled, all multicast packets
@@ -820,7 +890,7 @@
* inform userspace of the new replay counter.
*
* @NL80211_CMD_PMKSA_CANDIDATE: This is used as an event to inform userspace
- * of PMKSA caching dandidates.
+ * of PMKSA caching candidates.
*
* @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup).
* In addition, this can be used as an event to request userspace to take
@@ -856,7 +926,7 @@
*
* @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface
* by sending a null data frame to it and reporting when the frame is
- * acknowleged. This is used to allow timing out inactive clients. Uses
+ * acknowledged. This is used to allow timing out inactive clients. Uses
* %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a
* direct reply with an %NL80211_ATTR_COOKIE that is later used to match
* up the event with the request. The event includes the same data and
@@ -1067,19 +1137,27 @@
* @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously
* configured PMK for the authenticator address identified by
* %NL80211_ATTR_MAC.
- * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way
- * handshake was completed successfully by the driver. The BSSID is
- * specified with %NL80211_ATTR_MAC. Drivers that support 4 way handshake
- * offload should send this event after indicating 802.11 association with
- * %NL80211_CMD_CONNECT or %NL80211_CMD_ROAM. If the 4 way handshake failed
- * %NL80211_CMD_DISCONNECT should be indicated instead.
- *
+ * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates port is authorized and
+ * open for regular data traffic. For STA/P2P-client, this event is sent
+ * with AP MAC address and for AP/P2P-GO, the event carries the STA/P2P-
+ * client MAC address.
+ * Drivers that support 4 way handshake offload should send this event for
+ * STA/P2P-client after successful 4-way HS or after 802.1X FT following
+ * NL80211_CMD_CONNECT or NL80211_CMD_ROAM. Drivers using AP/P2P-GO 4-way
+ * handshake offload should send this event on successful completion of
+ * 4-way handshake with the peer (STA/P2P-client).
* @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
* and RX notification. This command is used both as a request to transmit
* a control port frame and as a notification that a control port frame
* has been received. %NL80211_ATTR_FRAME is used to specify the
* frame contents. The frame is the raw EAPoL data, without ethernet or
* 802.11 headers.
+ * For an MLD transmitter, the %NL80211_ATTR_MLO_LINK_ID may be given and
+ * its effect will depend on the destination: If the destination is known
+ * to be an MLD, this will be used as a hint to select the link to transmit
+ * the frame on. If the destination is not an MLD, this will select both
+ * the link to transmit on and the source address will be set to the link
+ * address of that link.
* When used as an event indication %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
* %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT and %NL80211_ATTR_MAC are added
* indicating the protocol type of the received frame; whether the frame
@@ -1104,6 +1182,23 @@
* %NL80211_ATTR_STATUS_CODE attribute in %NL80211_CMD_EXTERNAL_AUTH
* command interface.
*
+ * Host driver sends MLD address of the AP with %NL80211_ATTR_MLD_ADDR in
+ * %NL80211_CMD_EXTERNAL_AUTH event to indicate user space to enable MLO
+ * during the authentication offload in STA mode while connecting to MLD
+ * APs. Host driver should check %NL80211_ATTR_MLO_SUPPORT flag capability
+ * in %NL80211_CMD_CONNECT to know whether the user space supports enabling
+ * MLO during the authentication offload or not.
+ * User space should enable MLO during the authentication only when it
+ * receives the AP MLD address in authentication offload request. User
+ * space shouldn't enable MLO when the authentication offload request
+ * doesn't indicate the AP MLD address even if the AP is MLO capable.
+ * User space should use %NL80211_ATTR_MLD_ADDR as peer's MLD address and
+ * interface address identified by %NL80211_ATTR_IFINDEX as self MLD
+ * address. User space and host driver to use MLD addresses in RA, TA and
+ * BSSID fields of the frames between them, and host driver translates the
+ * MLD addresses to/from link addresses based on the link chosen for the
+ * authentication.
+ *
* Host driver reports this status on an authentication failure to the
* user space through the connect result as the user space would have
* initiated the connection through the connect request.
@@ -1179,6 +1274,66 @@
* includes the contents of the frame. %NL80211_ATTR_ACK flag is included
* if the recipient acknowledged the frame.
*
+ * @NL80211_CMD_SET_SAR_SPECS: SAR power limitation configuration is
+ * passed using %NL80211_ATTR_SAR_SPEC. %NL80211_ATTR_WIPHY is used to
+ * specify the wiphy index to be applied to.
+ *
+ * @NL80211_CMD_OBSS_COLOR_COLLISION: This notification is sent out whenever
+ * mac80211/drv detects a bss color collision.
+ *
+ * @NL80211_CMD_COLOR_CHANGE_REQUEST: This command is used to indicate that
+ * userspace wants to change the BSS color.
+ *
+ * @NL80211_CMD_COLOR_CHANGE_STARTED: Notify userland, that a color change has
+ * started
+ *
+ * @NL80211_CMD_COLOR_CHANGE_ABORTED: Notify userland, that the color change has
+ * been aborted
+ *
+ * @NL80211_CMD_COLOR_CHANGE_COMPLETED: Notify userland that the color change
+ * has completed
+ *
+ * @NL80211_CMD_SET_FILS_AAD: Set FILS AAD data to the driver using -
+ * &NL80211_ATTR_MAC - for STA MAC address
+ * &NL80211_ATTR_FILS_KEK - for KEK
+ * &NL80211_ATTR_FILS_NONCES - for FILS Nonces
+ * (STA Nonce 16 bytes followed by AP Nonce 16 bytes)
+ *
+ * @NL80211_CMD_ASSOC_COMEBACK: notification about an association
+ * temporal rejection with comeback. The event includes %NL80211_ATTR_MAC
+ * to describe the BSSID address of the AP and %NL80211_ATTR_TIMEOUT to
+ * specify the timeout value.
+ *
+ * @NL80211_CMD_ADD_LINK: Add a new link to an interface. The
+ * %NL80211_ATTR_MLO_LINK_ID attribute is used for the new link.
+ * @NL80211_CMD_REMOVE_LINK: Remove a link from an interface. This may come
+ * without %NL80211_ATTR_MLO_LINK_ID as an easy way to remove all links
+ * in preparation for e.g. roaming to a regular (non-MLO) AP.
+ *
+ * @NL80211_CMD_ADD_LINK_STA: Add a link to an MLD station
+ * @NL80211_CMD_MODIFY_LINK_STA: Modify a link of an MLD station
+ * @NL80211_CMD_REMOVE_LINK_STA: Remove a link of an MLD station
+ *
+ * @NL80211_CMD_SET_HW_TIMESTAMP: Enable/disable HW timestamping of Timing
+ * measurement and Fine timing measurement frames. If %NL80211_ATTR_MAC
+ * is included, enable/disable HW timestamping only for frames to/from the
+ * specified MAC address. Otherwise enable/disable HW timestamping for
+ * all TM/FTM frames (including ones that were enabled with specific MAC
+ * address). If %NL80211_ATTR_HW_TIMESTAMP_ENABLED is not included, disable
+ * HW timestamping.
+ * The number of peers that HW timestamping can be enabled for concurrently
+ * is indicated by %NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS.
+ *
+ * @NL80211_CMD_LINKS_REMOVED: Notify userspace about the removal of STA MLD
+ * setup links due to AP MLD removing the corresponding affiliated APs with
+ * Multi-Link reconfiguration. %NL80211_ATTR_MLO_LINKS is used to provide
+ * information about the removed STA MLD setup links.
+ *
+ * @NL80211_CMD_SET_TID_TO_LINK_MAPPING: Set the TID to Link Mapping for a
+ * non-AP MLD station. The %NL80211_ATTR_MLO_TTLM_DLINK and
+ * %NL80211_ATTR_MLO_TTLM_ULINK attributes are used to specify the
+ * TID to Link mapping for downlink/uplink traffic.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1409,6 +1564,33 @@ enum nl80211_commands {
NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS,
+ NL80211_CMD_SET_SAR_SPECS,
+
+ NL80211_CMD_OBSS_COLOR_COLLISION,
+
+ NL80211_CMD_COLOR_CHANGE_REQUEST,
+
+ NL80211_CMD_COLOR_CHANGE_STARTED,
+ NL80211_CMD_COLOR_CHANGE_ABORTED,
+ NL80211_CMD_COLOR_CHANGE_COMPLETED,
+
+ NL80211_CMD_SET_FILS_AAD,
+
+ NL80211_CMD_ASSOC_COMEBACK,
+
+ NL80211_CMD_ADD_LINK,
+ NL80211_CMD_REMOVE_LINK,
+
+ NL80211_CMD_ADD_LINK_STA,
+ NL80211_CMD_MODIFY_LINK_STA,
+ NL80211_CMD_REMOVE_LINK_STA,
+
+ NL80211_CMD_SET_HW_TIMESTAMP,
+
+ NL80211_CMD_LINKS_REMOVED,
+
+ NL80211_CMD_SET_TID_TO_LINK_MAPPING,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1666,7 +1848,7 @@ enum nl80211_commands {
* using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is
* to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER
* flag. When used with %NL80211_ATTR_CONTROL_PORT_NO_PREAUTH, pre-auth
- * frames are not forwared over the control port.
+ * frames are not forwarded over the control port.
*
* @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
* We recommend using nested, driver-specific attributes within this.
@@ -1752,8 +1934,9 @@ enum nl80211_commands {
* specify just a single bitrate, which is to be used for the beacon.
* The driver must also specify support for this with the extended
* features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
- * NL80211_EXT_FEATURE_BEACON_RATE_HT and
- * NL80211_EXT_FEATURE_BEACON_RATE_VHT.
+ * NL80211_EXT_FEATURE_BEACON_RATE_HT,
+ * NL80211_EXT_FEATURE_BEACON_RATE_VHT and
+ * NL80211_EXT_FEATURE_BEACON_RATE_HE.
*
* @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
* at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -1802,10 +1985,10 @@ enum nl80211_commands {
* bit. Depending on which antennas are selected in the bitmap, 802.11n
* drivers can derive which chainmasks to use (if all antennas belonging to
* a particular chain are disabled this chain should be disabled) and if
- * a chain has diversity antennas wether diversity should be used or not.
+ * a chain has diversity antennas whether diversity should be used or not.
* HT capabilities (STBC, TX Beamforming, Antenna selection) can be
* derived from the available chains after applying the antenna mask.
- * Non-802.11n drivers can derive wether to use diversity or not.
+ * Non-802.11n drivers can derive whether to use diversity or not.
* Drivers may reject configurations or RX/TX mask combinations they cannot
* support by returning -EINVAL.
*
@@ -1957,8 +2140,15 @@ enum nl80211_commands {
* @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire
* probe-response frame. The DA field in the 802.11 header is zero-ed out,
* to be filled by the FW.
- * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable
- * this feature. Currently, only supported in mac80211 drivers.
+ * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable
+ * this feature during association. This is a flag attribute.
+ * Currently only supported in mac80211 drivers.
+ * @NL80211_ATTR_DISABLE_VHT: Force VHT capable interfaces to disable
+ * this feature during association. This is a flag attribute.
+ * Currently only supported in mac80211 drivers.
+ * @NL80211_ATTR_DISABLE_HE: Force HE capable interfaces to disable
+ * this feature during association. This is a flag attribute.
+ * Currently only supported in mac80211 drivers.
* @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the
* ATTR_HT_CAPABILITY to which attention should be paid.
* Currently, only mac80211 NICs support this feature.
@@ -2079,13 +2269,14 @@ enum nl80211_commands {
* until the channel switch event.
* @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission
* must be blocked on the current channel (before the channel switch
- * operation).
+ * operation). Also included in the channel switch started event if quiet
+ * was requested by the AP.
* @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
* for the time while performing a channel switch.
- * @NL80211_ATTR_CSA_C_OFF_BEACON: An array of offsets (u16) to the channel
- * switch counters in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
- * @NL80211_ATTR_CSA_C_OFF_PRESP: An array of offsets (u16) to the channel
- * switch counters in the probe response (%NL80211_ATTR_PROBE_RESP).
+ * @NL80211_ATTR_CNTDWN_OFFS_BEACON: An array of offsets (u16) to the channel
+ * switch or color change counters in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
+ * @NL80211_ATTR_CNTDWN_OFFS_PRESP: An array of offsets (u16) to the channel
+ * switch or color change counters in the probe response (%NL80211_ATTR_PROBE_RESP).
*
* @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
* As specified in the &enum nl80211_rxmgmt_flags.
@@ -2259,8 +2450,10 @@ enum nl80211_commands {
*
* @NL80211_ATTR_IFTYPE_EXT_CAPA: Nested attribute of the following attributes:
* %NL80211_ATTR_IFTYPE, %NL80211_ATTR_EXT_CAPA,
- * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities per
- * interface type.
+ * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities and
+ * other interface-type specific capabilities per interface type. For MLO,
+ * %NL80211_ATTR_EML_CAPABILITY and %NL80211_ATTR_MLD_CAPA_AND_OPS are
+ * present.
*
* @NL80211_ATTR_MU_MIMO_GROUP_DATA: array of 24 bytes that defines a MU-MIMO
* groupID for monitor mode.
@@ -2365,7 +2558,7 @@ enum nl80211_commands {
* from successful FILS authentication and is used with
* %NL80211_CMD_CONNECT.
*
- * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP
+ * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertised by a FILS AP
* identifying the scope of PMKSAs. This is used with
* @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
*
@@ -2396,7 +2589,9 @@ enum nl80211_commands {
* space supports external authentication. This attribute shall be used
* with %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP request. The driver
* may offload authentication processing to user space if this capability
- * is indicated in the respective requests from the user space.
+ * is indicated in the respective requests from the user space. (This flag
+ * attribute deprecated for %NL80211_CMD_START_AP, use
+ * %NL80211_ATTR_AP_SETTINGS_FLAGS)
*
* @NL80211_ATTR_NSS: Station's New/updated RX_NSS value notified using this
* u8 attribute. This is used with %NL80211_CMD_STA_OPMODE_CHANGED.
@@ -2515,6 +2710,152 @@ enum nl80211_commands {
* @NL80211_ATTR_HE_6GHZ_CAPABILITY: HE 6 GHz Band Capability element (from
* association request when used with NL80211_CMD_NEW_STATION).
*
+ * @NL80211_ATTR_FILS_DISCOVERY: Optional parameter to configure FILS
+ * discovery. It is a nested attribute, see
+ * &enum nl80211_fils_discovery_attributes. Userspace should pass an empty
+ * nested attribute to disable this feature and delete the templates.
+ *
+ * @NL80211_ATTR_UNSOL_BCAST_PROBE_RESP: Optional parameter to configure
+ * unsolicited broadcast probe response. It is a nested attribute, see
+ * &enum nl80211_unsol_bcast_probe_resp_attributes. Userspace should pass an empty
+ * nested attribute to disable this feature and delete the templates.
+ *
+ * @NL80211_ATTR_S1G_CAPABILITY: S1G Capability information element (from
+ * association request when used with NL80211_CMD_NEW_STATION)
+ * @NL80211_ATTR_S1G_CAPABILITY_MASK: S1G Capability Information element
+ * override mask. Used with NL80211_ATTR_S1G_CAPABILITY in
+ * NL80211_CMD_ASSOCIATE or NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_SAE_PWE: Indicates the mechanism(s) allowed for SAE PWE
+ * derivation in WPA3-Personal networks which are using SAE authentication.
+ * This is a u8 attribute that encapsulates one of the values from
+ * &enum nl80211_sae_pwe_mechanism.
+ *
+ * @NL80211_ATTR_SAR_SPEC: SAR power limitation specification when
+ * used with %NL80211_CMD_SET_SAR_SPECS. The message contains fields
+ * of %nl80211_sar_attrs which specifies the sar type and related
+ * sar specs. Sar specs contains array of %nl80211_sar_specs_attrs.
+ *
+ * @NL80211_ATTR_RECONNECT_REQUESTED: flag attribute, used with deauth and
+ * disassoc events to indicate that an immediate reconnect to the AP
+ * is desired.
+ *
+ * @NL80211_ATTR_OBSS_COLOR_BITMAP: bitmap of the u64 BSS colors for the
+ * %NL80211_CMD_OBSS_COLOR_COLLISION event.
+ *
+ * @NL80211_ATTR_COLOR_CHANGE_COUNT: u8 attribute specifying the number of TBTT's
+ * until the color switch event.
+ * @NL80211_ATTR_COLOR_CHANGE_COLOR: u8 attribute specifying the color that we are
+ * switching to
+ * @NL80211_ATTR_COLOR_CHANGE_ELEMS: Nested set of attributes containing the IE
+ * information for the time while performing a color switch.
+ *
+ * @NL80211_ATTR_MBSSID_CONFIG: Nested attribute for multiple BSSID
+ * advertisements (MBSSID) parameters in AP mode.
+ * Kernel uses this attribute to indicate the driver's support for MBSSID
+ * and enhanced multi-BSSID advertisements (EMA AP) to the userspace.
+ * Userspace should use this attribute to configure per interface MBSSID
+ * parameters.
+ * See &enum nl80211_mbssid_config_attributes for details.
+ *
+ * @NL80211_ATTR_MBSSID_ELEMS: Nested parameter to pass multiple BSSID elements.
+ * Mandatory parameter for the transmitting interface to enable MBSSID.
+ * Optional for the non-transmitting interfaces.
+ *
+ * @NL80211_ATTR_RADAR_BACKGROUND: Configure dedicated offchannel chain
+ * available for radar/CAC detection on some hw. This chain can't be used
+ * to transmit or receive frames and it is bounded to a running wdev.
+ * Background radar/CAC detection allows to avoid the CAC downtime
+ * switching on a different channel during CAC detection on the selected
+ * radar channel.
+ *
+ * @NL80211_ATTR_AP_SETTINGS_FLAGS: u32 attribute contains ap settings flags,
+ * enumerated in &enum nl80211_ap_settings_flags. This attribute shall be
+ * used with %NL80211_CMD_START_AP request.
+ *
+ * @NL80211_ATTR_EHT_CAPABILITY: EHT Capability information element (from
+ * association request when used with NL80211_CMD_NEW_STATION). Can be set
+ * only if %NL80211_STA_FLAG_WME is set.
+ *
+ * @NL80211_ATTR_MLO_LINK_ID: A (u8) link ID for use with MLO, to be used with
+ * various commands that need a link ID to operate.
+ * @NL80211_ATTR_MLO_LINKS: A nested array of links, each containing some
+ * per-link information and a link ID.
+ * @NL80211_ATTR_MLD_ADDR: An MLD address, used with various commands such as
+ * authenticate/associate.
+ *
+ * @NL80211_ATTR_MLO_SUPPORT: Flag attribute to indicate user space supports MLO
+ * connection. Used with %NL80211_CMD_CONNECT. If this attribute is not
+ * included in NL80211_CMD_CONNECT drivers must not perform MLO connection.
+ *
+ * @NL80211_ATTR_MAX_NUM_AKM_SUITES: U16 attribute. Indicates maximum number of
+ * AKM suites allowed for %NL80211_CMD_CONNECT, %NL80211_CMD_ASSOCIATE and
+ * %NL80211_CMD_START_AP in %NL80211_CMD_GET_WIPHY response. If this
+ * attribute is not present userspace shall consider maximum number of AKM
+ * suites allowed as %NL80211_MAX_NR_AKM_SUITES which is the legacy maximum
+ * number prior to the introduction of this attribute.
+ *
+ * @NL80211_ATTR_EML_CAPABILITY: EML Capability information (u16)
+ * @NL80211_ATTR_MLD_CAPA_AND_OPS: MLD Capabilities and Operations (u16)
+ *
+ * @NL80211_ATTR_TX_HW_TIMESTAMP: Hardware timestamp for TX operation in
+ * nanoseconds (u64). This is the device clock timestamp so it will
+ * probably reset when the device is stopped or the firmware is reset.
+ * When used with %NL80211_CMD_FRAME_TX_STATUS, indicates the frame TX
+ * timestamp. When used with %NL80211_CMD_FRAME RX notification, indicates
+ * the ack TX timestamp.
+ * @NL80211_ATTR_RX_HW_TIMESTAMP: Hardware timestamp for RX operation in
+ * nanoseconds (u64). This is the device clock timestamp so it will
+ * probably reset when the device is stopped or the firmware is reset.
+ * When used with %NL80211_CMD_FRAME_TX_STATUS, indicates the ack RX
+ * timestamp. When used with %NL80211_CMD_FRAME RX notification, indicates
+ * the incoming frame RX timestamp.
+ * @NL80211_ATTR_TD_BITMAP: Transition Disable bitmap, for subsequent
+ * (re)associations.
+ *
+ * @NL80211_ATTR_PUNCT_BITMAP: (u32) Preamble puncturing bitmap, lowest
+ * bit corresponds to the lowest 20 MHz channel. Each bit set to 1
+ * indicates that the sub-channel is punctured. Higher 16 bits are
+ * reserved.
+ *
+ * @NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS: Maximum number of peers that HW
+ * timestamping can be enabled for concurrently (u16), a wiphy attribute.
+ * A value of 0xffff indicates setting for all peers (i.e. not specifying
+ * an address with %NL80211_CMD_SET_HW_TIMESTAMP) is supported.
+ * @NL80211_ATTR_HW_TIMESTAMP_ENABLED: Indicates whether HW timestamping should
+ * be enabled or not (flag attribute).
+ *
+ * @NL80211_ATTR_EMA_RNR_ELEMS: Optional nested attribute for
+ * reduced neighbor report (RNR) elements. This attribute can be used
+ * only when NL80211_MBSSID_CONFIG_ATTR_EMA is enabled.
+ * Userspace is responsible for splitting the RNR into multiple
+ * elements such that each element excludes the non-transmitting
+ * profiles already included in the MBSSID element
+ * (%NL80211_ATTR_MBSSID_ELEMS) at the same index. Each EMA beacon
+ * will be generated by adding MBSSID and RNR elements at the same
+ * index. If the userspace includes more RNR elements than number of
+ * MBSSID elements then these will be added in every EMA beacon.
+ *
+ * @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is
+ * disabled.
+ *
+ * @NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA: Include BSS usage data, i.e.
+ * include BSSes that can only be used in restricted scenarios and/or
+ * cannot be used at all.
+ *
+ * @NL80211_ATTR_MLO_TTLM_DLINK: Binary attribute specifying the downlink TID to
+ * link mapping. The length is 8 * sizeof(u16). For each TID the link
+ * mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element)
+ * in Draft P802.11be_D4.0.
+ * @NL80211_ATTR_MLO_TTLM_ULINK: Binary attribute specifying the uplink TID to
+ * link mapping. The length is 8 * sizeof(u16). For each TID the link
+ * mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element)
+ * in Draft P802.11be_D4.0.
+ *
+ * @NL80211_ATTR_ASSOC_SPP_AMSDU: flag attribute used with
+ * %NL80211_CMD_ASSOCIATE indicating the SPP A-MSDUs
+ * are used on this connection
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2821,8 +3162,8 @@ enum nl80211_attrs {
NL80211_ATTR_CH_SWITCH_COUNT,
NL80211_ATTR_CH_SWITCH_BLOCK_TX,
NL80211_ATTR_CSA_IES,
- NL80211_ATTR_CSA_C_OFF_BEACON,
- NL80211_ATTR_CSA_C_OFF_PRESP,
+ NL80211_ATTR_CNTDWN_OFFS_BEACON,
+ NL80211_ATTR_CNTDWN_OFFS_PRESP,
NL80211_ATTR_RXMGMT_FLAGS,
@@ -2997,6 +3338,69 @@ enum nl80211_attrs {
NL80211_ATTR_HE_6GHZ_CAPABILITY,
+ NL80211_ATTR_FILS_DISCOVERY,
+
+ NL80211_ATTR_UNSOL_BCAST_PROBE_RESP,
+
+ NL80211_ATTR_S1G_CAPABILITY,
+ NL80211_ATTR_S1G_CAPABILITY_MASK,
+
+ NL80211_ATTR_SAE_PWE,
+
+ NL80211_ATTR_RECONNECT_REQUESTED,
+
+ NL80211_ATTR_SAR_SPEC,
+
+ NL80211_ATTR_DISABLE_HE,
+
+ NL80211_ATTR_OBSS_COLOR_BITMAP,
+
+ NL80211_ATTR_COLOR_CHANGE_COUNT,
+ NL80211_ATTR_COLOR_CHANGE_COLOR,
+ NL80211_ATTR_COLOR_CHANGE_ELEMS,
+
+ NL80211_ATTR_MBSSID_CONFIG,
+ NL80211_ATTR_MBSSID_ELEMS,
+
+ NL80211_ATTR_RADAR_BACKGROUND,
+
+ NL80211_ATTR_AP_SETTINGS_FLAGS,
+
+ NL80211_ATTR_EHT_CAPABILITY,
+
+ NL80211_ATTR_DISABLE_EHT,
+
+ NL80211_ATTR_MLO_LINKS,
+ NL80211_ATTR_MLO_LINK_ID,
+ NL80211_ATTR_MLD_ADDR,
+
+ NL80211_ATTR_MLO_SUPPORT,
+
+ NL80211_ATTR_MAX_NUM_AKM_SUITES,
+
+ NL80211_ATTR_EML_CAPABILITY,
+ NL80211_ATTR_MLD_CAPA_AND_OPS,
+
+ NL80211_ATTR_TX_HW_TIMESTAMP,
+ NL80211_ATTR_RX_HW_TIMESTAMP,
+ NL80211_ATTR_TD_BITMAP,
+
+ NL80211_ATTR_PUNCT_BITMAP,
+
+ NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS,
+ NL80211_ATTR_HW_TIMESTAMP_ENABLED,
+
+ NL80211_ATTR_EMA_RNR_ELEMS,
+
+ NL80211_ATTR_MLO_LINK_DISABLED,
+
+ NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA,
+
+ NL80211_ATTR_MLO_TTLM_DLINK,
+ NL80211_ATTR_MLO_TTLM_ULINK,
+
+ NL80211_ATTR_ASSOC_SPP_AMSDU,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3009,6 +3413,8 @@ enum nl80211_attrs {
#define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
#define NL80211_ATTR_SAE_DATA NL80211_ATTR_AUTH_DATA
+#define NL80211_ATTR_CSA_C_OFF_BEACON NL80211_ATTR_CNTDWN_OFFS_BEACON
+#define NL80211_ATTR_CSA_C_OFF_PRESP NL80211_ATTR_CNTDWN_OFFS_PRESP
/*
* Allow user space programs to use #ifdef on new attributes by defining them
@@ -3049,7 +3455,14 @@ enum nl80211_attrs {
#define NL80211_HE_MIN_CAPABILITY_LEN 16
#define NL80211_HE_MAX_CAPABILITY_LEN 54
#define NL80211_MAX_NR_CIPHER_SUITES 5
+
+/*
+ * NL80211_MAX_NR_AKM_SUITES is obsolete when %NL80211_ATTR_MAX_NUM_AKM_SUITES
+ * present in %NL80211_CMD_GET_WIPHY response.
+ */
#define NL80211_MAX_NR_AKM_SUITES 2
+#define NL80211_EHT_MIN_CAPABILITY_LEN 13
+#define NL80211_EHT_MAX_CAPABILITY_LEN 51
#define NL80211_MIN_REMAIN_ON_CHANNEL_TIME 10
@@ -3077,7 +3490,7 @@ enum nl80211_attrs {
* and therefore can't be created in the normal ways, use the
* %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
* commands to create and destroy one
- * @NL80211_IF_TYPE_OCB: Outside Context of a BSS
+ * @NL80211_IFTYPE_OCB: Outside Context of a BSS
* This mode corresponds to the MIB variable dot11OCBActivated=true
* @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev)
* @NL80211_IFTYPE_MAX: highest interface type number currently defined
@@ -3128,6 +3541,7 @@ enum nl80211_iftype {
* @NL80211_STA_FLAG_ASSOCIATED: station is associated; used with drivers
* that support %NL80211_FEATURE_FULL_AP_CLIENT_STATE to transition a
* previously added station into associated state
+ * @NL80211_STA_FLAG_SPP_AMSDU: station supports SPP A-MSDUs
* @NL80211_STA_FLAG_MAX: highest station flag number currently defined
* @__NL80211_STA_FLAG_AFTER_LAST: internal use
*/
@@ -3140,6 +3554,7 @@ enum nl80211_sta_flags {
NL80211_STA_FLAG_AUTHENTICATED,
NL80211_STA_FLAG_TDLS_PEER,
NL80211_STA_FLAG_ASSOCIATED,
+ NL80211_STA_FLAG_SPP_AMSDU,
/* keep last */
__NL80211_STA_FLAG_AFTER_LAST,
@@ -3187,6 +3602,18 @@ enum nl80211_he_gi {
};
/**
+ * enum nl80211_he_ltf - HE long training field
+ * @NL80211_RATE_INFO_HE_1xLTF: 3.2 usec
+ * @NL80211_RATE_INFO_HE_2xLTF: 6.4 usec
+ * @NL80211_RATE_INFO_HE_4xLTF: 12.8 usec
+ */
+enum nl80211_he_ltf {
+ NL80211_RATE_INFO_HE_1XLTF,
+ NL80211_RATE_INFO_HE_2XLTF,
+ NL80211_RATE_INFO_HE_4XLTF,
+};
+
+/**
* enum nl80211_he_ru_alloc - HE RU allocation values
* @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation
* @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation
@@ -3207,6 +3634,56 @@ enum nl80211_he_ru_alloc {
};
/**
+ * enum nl80211_eht_gi - EHT guard interval
+ * @NL80211_RATE_INFO_EHT_GI_0_8: 0.8 usec
+ * @NL80211_RATE_INFO_EHT_GI_1_6: 1.6 usec
+ * @NL80211_RATE_INFO_EHT_GI_3_2: 3.2 usec
+ */
+enum nl80211_eht_gi {
+ NL80211_RATE_INFO_EHT_GI_0_8,
+ NL80211_RATE_INFO_EHT_GI_1_6,
+ NL80211_RATE_INFO_EHT_GI_3_2,
+};
+
+/**
+ * enum nl80211_eht_ru_alloc - EHT RU allocation values
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_26: 26-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_52: 52-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_52P26: 52+26-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_106: 106-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_106P26: 106+26 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_242: 242-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_484: 484-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_484P242: 484+242 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_996: 996-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484: 996+484 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242: 996+484+242 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996: 2x996-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484: 2x996+484 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996: 3x996-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484: 3x996+484 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_4x996: 4x996-tone RU allocation
+ */
+enum nl80211_eht_ru_alloc {
+ NL80211_RATE_INFO_EHT_RU_ALLOC_26,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_52,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_52P26,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_106,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_106P26,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_242,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_484P242,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_996,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_996P484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_2x996,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_3x996,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_4x996,
+};
+
+/**
* enum nl80211_rate_info - bitrate information
*
* These attribute types are used with %NL80211_STA_INFO_TXRATE
@@ -3245,6 +3722,20 @@ enum nl80211_he_ru_alloc {
* @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1)
* @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then
* non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc)
+ * @NL80211_RATE_INFO_320_MHZ_WIDTH: 320 MHz bitrate
+ * @NL80211_RATE_INFO_EHT_MCS: EHT MCS index (u8, 0-15)
+ * @NL80211_RATE_INFO_EHT_NSS: EHT NSS value (u8, 1-8)
+ * @NL80211_RATE_INFO_EHT_GI: EHT guard interval identifier
+ * (u8, see &enum nl80211_eht_gi)
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC: EHT RU allocation, if not present then
+ * non-OFDMA was used (u8, see &enum nl80211_eht_ru_alloc)
+ * @NL80211_RATE_INFO_S1G_MCS: S1G MCS index (u8, 0-10)
+ * @NL80211_RATE_INFO_S1G_NSS: S1G NSS value (u8, 1-4)
+ * @NL80211_RATE_INFO_1_MHZ_WIDTH: 1 MHz S1G rate
+ * @NL80211_RATE_INFO_2_MHZ_WIDTH: 2 MHz S1G rate
+ * @NL80211_RATE_INFO_4_MHZ_WIDTH: 4 MHz S1G rate
+ * @NL80211_RATE_INFO_8_MHZ_WIDTH: 8 MHz S1G rate
+ * @NL80211_RATE_INFO_16_MHZ_WIDTH: 16 MHz S1G rate
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -3266,6 +3757,18 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_HE_GI,
NL80211_RATE_INFO_HE_DCM,
NL80211_RATE_INFO_HE_RU_ALLOC,
+ NL80211_RATE_INFO_320_MHZ_WIDTH,
+ NL80211_RATE_INFO_EHT_MCS,
+ NL80211_RATE_INFO_EHT_NSS,
+ NL80211_RATE_INFO_EHT_GI,
+ NL80211_RATE_INFO_EHT_RU_ALLOC,
+ NL80211_RATE_INFO_S1G_MCS,
+ NL80211_RATE_INFO_S1G_NSS,
+ NL80211_RATE_INFO_1_MHZ_WIDTH,
+ NL80211_RATE_INFO_2_MHZ_WIDTH,
+ NL80211_RATE_INFO_4_MHZ_WIDTH,
+ NL80211_RATE_INFO_8_MHZ_WIDTH,
+ NL80211_RATE_INFO_16_MHZ_WIDTH,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -3576,11 +4079,20 @@ enum nl80211_mpath_info {
* capabilities IE
* @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as
* defined in HE capabilities IE
- * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently
- * defined
* @NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA: HE 6GHz band capabilities (__le16),
* given for all 6 GHz band channels
+ * @NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS: vendor element capabilities that are
+ * advertised on this band/for this iftype (binary)
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC: EHT MAC capabilities as in EHT
+ * capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY: EHT PHY capabilities as in EHT
+ * capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET: EHT supported NSS/MCS as in EHT
+ * capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE: EHT PPE thresholds information as
+ * defined in EHT capabilities element
* @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
+ * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band attribute currently defined
*/
enum nl80211_band_iftype_attr {
__NL80211_BAND_IFTYPE_ATTR_INVALID,
@@ -3591,6 +4103,11 @@ enum nl80211_band_iftype_attr {
NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA,
+ NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS,
+ NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC,
+ NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY,
+ NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET,
+ NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE,
/* keep last */
__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
@@ -3620,6 +4137,10 @@ enum nl80211_band_iftype_attr {
* @NL80211_BAND_ATTR_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes
* the allowed channel bandwidth configurations.
* Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
+ * @NL80211_BAND_ATTR_S1G_MCS_NSS_SET: S1G capabilities, supported S1G-MCS and NSS
+ * set subfield, as in the S1G information IE, 5 bytes
+ * @NL80211_BAND_ATTR_S1G_CAPA: S1G capabilities information subfield as in the
+ * S1G information IE, 10 bytes
* @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
* @__NL80211_BAND_ATTR_AFTER_LAST: internal use
*/
@@ -3640,6 +4161,9 @@ enum nl80211_band_attr {
NL80211_BAND_ATTR_EDMG_CHANNELS,
NL80211_BAND_ATTR_EDMG_BW_CONFIG,
+ NL80211_BAND_ATTR_S1G_MCS_NSS_SET,
+ NL80211_BAND_ATTR_S1G_CAPA,
+
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
@@ -3685,7 +4209,7 @@ enum nl80211_wmm_rule {
* (100 * dBm).
* @NL80211_FREQUENCY_ATTR_DFS_STATE: current state for DFS
* (enum nl80211_dfs_state)
- * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in miliseconds for how long
+ * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in milliseconds for how long
* this channel is in this DFS state.
* @NL80211_FREQUENCY_ATTR_NO_HT40_MINUS: HT40- isn't possible with this
* channel as the control channel
@@ -3725,6 +4249,33 @@ enum nl80211_wmm_rule {
* @NL80211_FREQUENCY_ATTR_NO_HE: HE operation is not allowed on this channel
* in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_OFFSET: frequency offset in KHz
+ * @NL80211_FREQUENCY_ATTR_1MHZ: 1 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_2MHZ: 2 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_4MHZ: 4 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_8MHZ: 8 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_16MHZ: 16 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_NO_320MHZ: any 320 MHz channel using this channel
+ * as the primary or any of the secondary channels isn't possible
+ * @NL80211_FREQUENCY_ATTR_NO_EHT: EHT operation is not allowed on this channel
+ * in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_PSD: Power spectral density (in dBm) that
+ * is allowed on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_DFS_CONCURRENT: Operation on this channel is
+ * allowed for peer-to-peer or adhoc communication under the control
+ * of a DFS master which operates on the same channel (FCC-594280 D01
+ * Section B.3). Should be used together with %NL80211_RRF_DFS only.
+ * @NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP
+ * not allowed using this channel
+ * @NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP
+ * not allowed using this channel
+ * @NL80211_FREQUENCY_ATTR_CAN_MONITOR: This channel can be used in monitor
+ * mode despite other (regulatory) restrictions, even if the channel is
+ * otherwise completely disabled.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -3756,6 +4307,18 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_WMM,
NL80211_FREQUENCY_ATTR_NO_HE,
NL80211_FREQUENCY_ATTR_OFFSET,
+ NL80211_FREQUENCY_ATTR_1MHZ,
+ NL80211_FREQUENCY_ATTR_2MHZ,
+ NL80211_FREQUENCY_ATTR_4MHZ,
+ NL80211_FREQUENCY_ATTR_8MHZ,
+ NL80211_FREQUENCY_ATTR_16MHZ,
+ NL80211_FREQUENCY_ATTR_NO_320MHZ,
+ NL80211_FREQUENCY_ATTR_NO_EHT,
+ NL80211_FREQUENCY_ATTR_PSD,
+ NL80211_FREQUENCY_ATTR_DFS_CONCURRENT,
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT,
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT,
+ NL80211_FREQUENCY_ATTR_CAN_MONITOR,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -3768,6 +4331,10 @@ enum nl80211_frequency_attr {
#define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR
#define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \
NL80211_FREQUENCY_ATTR_IR_CONCURRENT
+#define NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT \
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT
+#define NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT \
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT
/**
* enum nl80211_bitrate_attr - bitrate attributes
@@ -3856,6 +4423,8 @@ enum nl80211_reg_type {
* a given frequency range. The value is in mBm (100 * dBm).
* @NL80211_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
* If not present or 0 default CAC time will be used.
+ * @NL80211_ATTR_POWER_RULE_PSD: power spectral density (in dBm).
+ * This could be negative.
* @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number
* currently defined
* @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use
@@ -3873,6 +4442,8 @@ enum nl80211_reg_rule_attr {
NL80211_ATTR_DFS_CAC_TIME,
+ NL80211_ATTR_POWER_RULE_PSD,
+
/* keep last */
__NL80211_REG_RULE_ATTR_AFTER_LAST,
NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1
@@ -3901,14 +4472,7 @@ enum nl80211_reg_rule_attr {
* value as specified by &struct nl80211_bss_select_rssi_adjust.
* @NL80211_SCHED_SCAN_MATCH_ATTR_BSSID: BSSID to be used for matching
* (this cannot be used together with SSID).
- * @NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI: Nested attribute that carries the
- * band specific minimum rssi thresholds for the bands defined in
- * enum nl80211_band. The minimum rssi threshold value(s32) specific to a
- * band shall be encapsulated in attribute with type value equals to one
- * of the NL80211_BAND_* defined in enum nl80211_band. For example, the
- * minimum rssi threshold value for 2.4GHZ band shall be encapsulated
- * within an attribute of type NL80211_BAND_2GHZ. And one or more of such
- * attributes will be nested within this attribute.
+ * @NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI: Obsolete
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3921,7 +4485,7 @@ enum nl80211_sched_scan_match_attr {
NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
NL80211_SCHED_SCAN_MATCH_ATTR_BSSID,
- NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI,
+ NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI, /* obsolete */
/* keep last */
__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -3954,6 +4518,15 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
* @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed
* @NL80211_RRF_NO_HE: HE operation not allowed
+ * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed
+ * @NL80211_RRF_NO_EHT: EHT operation not allowed
+ * @NL80211_RRF_PSD: Ruleset has power spectral density value
+ * @NL80211_RRF_DFS_CONCURRENT: Operation on this channel is allowed for
+ peer-to-peer or adhoc communication under the control of a DFS master
+ which operates on the same channel (FCC-594280 D01 Section B.3).
+ Should be used together with %NL80211_RRF_DFS only.
+ * @NL80211_RRF_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP not allowed
+ * @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -3972,6 +4545,12 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_80MHZ = 1<<15,
NL80211_RRF_NO_160MHZ = 1<<16,
NL80211_RRF_NO_HE = 1<<17,
+ NL80211_RRF_NO_320MHZ = 1<<18,
+ NL80211_RRF_NO_EHT = 1<<19,
+ NL80211_RRF_PSD = 1<<20,
+ NL80211_RRF_DFS_CONCURRENT = 1<<21,
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1<<22,
+ NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1<<23,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -3980,6 +4559,8 @@ enum nl80211_reg_rule_flags {
#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\
NL80211_RRF_NO_HT40PLUS)
#define NL80211_RRF_GO_CONCURRENT NL80211_RRF_IR_CONCURRENT
+#define NL80211_RRF_NO_UHB_VLP_CLIENT NL80211_RRF_NO_6GHZ_VLP_CLIENT
+#define NL80211_RRF_NO_UHB_AFC_CLIENT NL80211_RRF_NO_6GHZ_AFC_CLIENT
/* For backport compatibility with older userspace */
#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
@@ -4049,6 +4630,7 @@ enum nl80211_user_reg_hint_type {
* receiving frames destined to the local BSS
* @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
* currently defined
+ * @NL80211_SURVEY_INFO_FREQUENCY_OFFSET: center frequency offset in KHz
* @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
*/
enum nl80211_survey_info {
@@ -4064,6 +4646,7 @@ enum nl80211_survey_info {
NL80211_SURVEY_INFO_TIME_SCAN,
NL80211_SURVEY_INFO_PAD,
NL80211_SURVEY_INFO_TIME_BSS_RX,
+ NL80211_SURVEY_INFO_FREQUENCY_OFFSET,
/* keep last */
__NL80211_SURVEY_INFO_AFTER_LAST,
@@ -4467,6 +5050,8 @@ enum nl80211_key_mode {
* @NL80211_CHAN_WIDTH_4: 4 MHz OFDM channel
* @NL80211_CHAN_WIDTH_8: 8 MHz OFDM channel
* @NL80211_CHAN_WIDTH_16: 16 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_320: 320 MHz channel, the %NL80211_ATTR_CENTER_FREQ1
+ * attribute must be provided as well
*/
enum nl80211_chan_width {
NL80211_CHAN_WIDTH_20_NOHT,
@@ -4482,6 +5067,7 @@ enum nl80211_chan_width {
NL80211_CHAN_WIDTH_4,
NL80211_CHAN_WIDTH_8,
NL80211_CHAN_WIDTH_16,
+ NL80211_CHAN_WIDTH_320,
};
/**
@@ -4504,6 +5090,36 @@ enum nl80211_bss_scan_width {
};
/**
+ * enum nl80211_bss_use_for - bitmap indicating possible BSS use
+ * @NL80211_BSS_USE_FOR_NORMAL: Use this BSS for normal "connection",
+ * including IBSS/MBSS depending on the type.
+ * @NL80211_BSS_USE_FOR_MLD_LINK: This BSS can be used as a link in an
+ * MLO connection. Note that for an MLO connection, all links including
+ * the assoc link must have this flag set, and the assoc link must
+ * additionally have %NL80211_BSS_USE_FOR_NORMAL set.
+ */
+enum nl80211_bss_use_for {
+ NL80211_BSS_USE_FOR_NORMAL = 1 << 0,
+ NL80211_BSS_USE_FOR_MLD_LINK = 1 << 1,
+};
+
+/**
+ * enum nl80211_bss_cannot_use_reasons - reason(s) connection to a
+ * BSS isn't possible
+ * @NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY: NSTR nonprimary links aren't
+ * supported by the device, and this BSS entry represents one.
+ * @NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH: STA is not supporting
+ * the AP power type (SP, VLP, AP) that the AP uses.
+ */
+enum nl80211_bss_cannot_use_reasons {
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 1 << 0,
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 1 << 1,
+};
+
+#define NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH \
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH
+
+/**
* enum nl80211_bss - netlink attributes for a BSS
*
* @__NL80211_BSS_INVALID: invalid
@@ -4534,7 +5150,7 @@ enum nl80211_bss_scan_width {
* elements from a Beacon frame (bin); not present if no Beacon frame has
* yet been received
* @NL80211_BSS_CHAN_WIDTH: channel width of the control channel
- * (u32, enum nl80211_bss_scan_width)
+ * (u32, enum nl80211_bss_scan_width) - No longer used!
* @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64)
* (not present if no beacon frame has been received yet)
* @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and
@@ -4553,6 +5169,16 @@ enum nl80211_bss_scan_width {
* Contains a nested array of signal strength attributes (u8, dBm),
* using the nesting index as the antenna number.
* @NL80211_BSS_FREQUENCY_OFFSET: frequency offset in KHz
+ * @NL80211_BSS_MLO_LINK_ID: MLO link ID of the BSS (u8).
+ * @NL80211_BSS_MLD_ADDR: MLD address of this BSS if connected to it.
+ * @NL80211_BSS_USE_FOR: u32 bitmap attribute indicating what the BSS can be
+ * used for, see &enum nl80211_bss_use_for.
+ * @NL80211_BSS_CANNOT_USE_REASONS: Indicates the reason that this BSS cannot
+ * be used for all or some of the possible uses by the device reporting it,
+ * even though its presence was detected.
+ * This is a u64 attribute containing a bitmap of values from
+ * &enum nl80211_cannot_use_reasons, note that the attribute may be missing
+ * if no reasons are specified.
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -4578,6 +5204,10 @@ enum nl80211_bss {
NL80211_BSS_PARENT_BSSID,
NL80211_BSS_CHAIN_SIGNAL,
NL80211_BSS_FREQUENCY_OFFSET,
+ NL80211_BSS_MLO_LINK_ID,
+ NL80211_BSS_MLD_ADDR,
+ NL80211_BSS_USE_FOR,
+ NL80211_BSS_CANNOT_USE_REASONS,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -4741,6 +5371,10 @@ enum nl80211_key_attributes {
* @NL80211_TXRATE_VHT: VHT rates allowed for TX rate selection,
* see &struct nl80211_txrate_vht
* @NL80211_TXRATE_GI: configure GI, see &enum nl80211_txrate_gi
+ * @NL80211_TXRATE_HE: HE rates allowed for TX rate selection,
+ * see &struct nl80211_txrate_he
+ * @NL80211_TXRATE_HE_GI: configure HE GI, 0.8us, 1.6us and 3.2us.
+ * @NL80211_TXRATE_HE_LTF: configure HE LTF, 1XLTF, 2XLTF and 4XLTF.
* @__NL80211_TXRATE_AFTER_LAST: internal
* @NL80211_TXRATE_MAX: highest TX rate attribute
*/
@@ -4750,6 +5384,9 @@ enum nl80211_tx_rate_attributes {
NL80211_TXRATE_HT,
NL80211_TXRATE_VHT,
NL80211_TXRATE_GI,
+ NL80211_TXRATE_HE,
+ NL80211_TXRATE_HE_GI,
+ NL80211_TXRATE_HE_LTF,
/* keep last */
__NL80211_TXRATE_AFTER_LAST,
@@ -4767,6 +5404,15 @@ struct nl80211_txrate_vht {
__u16 mcs[NL80211_VHT_NSS_MAX];
};
+#define NL80211_HE_NSS_MAX 8
+/**
+ * struct nl80211_txrate_he - HE MCS/NSS txrate bitmap
+ * @mcs: MCS bitmap table for each NSS (array index 0 for 1 stream, etc.)
+ */
+struct nl80211_txrate_he {
+ __u16 mcs[NL80211_HE_NSS_MAX];
+};
+
enum nl80211_txrate_gi {
NL80211_TXRATE_DEFAULT_GI,
NL80211_TXRATE_FORCE_SGI,
@@ -4780,6 +5426,7 @@ enum nl80211_txrate_gi {
* @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz)
* @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz)
* @NL80211_BAND_S1GHZ: around 900MHz, supported by S1G PHYs
+ * @NL80211_BAND_LC: light communication band (placeholder)
* @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
* since newer kernel versions may support more bands
*/
@@ -4789,6 +5436,7 @@ enum nl80211_band {
NL80211_BAND_60GHZ,
NL80211_BAND_6GHZ,
NL80211_BAND_S1GHZ,
+ NL80211_BAND_LC,
NUM_NL80211_BANDS,
};
@@ -4908,7 +5556,7 @@ enum nl80211_tx_rate_setting {
* (%NL80211_TID_CONFIG_ATTR_TIDS, %NL80211_TID_CONFIG_ATTR_OVERRIDE).
* @NL80211_TID_CONFIG_ATTR_PEER_SUPP: same as the previous per-vif one, but
* per peer instead.
- * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribue, if set indicates
+ * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribute, if set indicates
* that the new configuration overrides all previous peer
* configurations, otherwise previous peer specific configurations
* should be left untouched.
@@ -5109,6 +5757,8 @@ struct nl80211_pattern_support {
* %NL80211_ATTR_SCAN_FREQUENCIES contains more than one
* frequency, it means that the match occurred in more than one
* channel.
+ * @NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC: For wakeup reporting only.
+ * Wake up happened due to unprotected deauth or disassoc frame in MFP.
* @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers
* @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number
*
@@ -5136,6 +5786,7 @@ enum nl80211_wowlan_triggers {
NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS,
NL80211_WOWLAN_TRIG_NET_DETECT,
NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS,
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC,
/* keep last */
NUM_NL80211_WOWLAN_TRIG,
@@ -5291,7 +5942,7 @@ enum nl80211_attr_coalesce_rule {
/**
* enum nl80211_coalesce_condition - coalesce rule conditions
- * @NL80211_COALESCE_CONDITION_MATCH: coalaesce Rx packets when patterns
+ * @NL80211_COALESCE_CONDITION_MATCH: coalesce Rx packets when patterns
* in a rule are matched.
* @NL80211_COALESCE_CONDITION_NO_MATCH: coalesce Rx packets when patterns
* in a rule are not matched.
@@ -5355,7 +6006,7 @@ enum nl80211_iface_limit_attrs {
* => allows 8 of AP/GO that can have BI gcd >= min gcd
*
* numbers = [ #{STA} <= 2 ], channels = 2, max = 2
- * => allows two STAs on different channels
+ * => allows two STAs on the same or on different channels
*
* numbers = [ #{STA} <= 1, #{P2P-client,P2P-GO} <= 3 ], max = 4
* => allows a STA plus three P2P interfaces
@@ -5390,7 +6041,7 @@ enum nl80211_if_combination_attrs {
* enum nl80211_plink_state - state of a mesh peer link finite state machine
*
* @NL80211_PLINK_LISTEN: initial state, considered the implicit
- * state of non existent mesh peer links
+ * state of non-existent mesh peer links
* @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to
* this mesh peer
* @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received
@@ -5400,7 +6051,7 @@ enum nl80211_if_combination_attrs {
* @NL80211_PLINK_ESTAB: mesh peer link is established
* @NL80211_PLINK_HOLDING: mesh peer link is being closed or cancelled
* @NL80211_PLINK_BLOCKED: all frames transmitted from this mesh
- * plink are discarded
+ * plink are discarded, except for authentication frames
* @NUM_NL80211_PLINK_STATES: number of peer link states
* @MAX_NL80211_PLINK_STATES: highest numerical value of plink states
*/
@@ -5439,6 +6090,7 @@ enum plink_actions {
#define NL80211_KEK_LEN 16
#define NL80211_KCK_EXT_LEN 24
#define NL80211_KEK_EXT_LEN 32
+#define NL80211_KCK_EXT_LEN_32 32
#define NL80211_REPLAY_CTR_LEN 8
/**
@@ -5537,13 +6189,15 @@ enum nl80211_tdls_operation {
NL80211_TDLS_DISABLE_LINK,
};
-/*
+/**
* enum nl80211_ap_sme_features - device-integrated AP features
- * Reserved for future use, no bits are defined in
- * NL80211_ATTR_DEVICE_AP_SME yet.
+ * @NL80211_AP_SME_SA_QUERY_OFFLOAD: SA Query procedures offloaded to driver
+ * when user space indicates support for SA Query procedures offload during
+ * "start ap" with %NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT.
+ */
enum nl80211_ap_sme_features {
+ NL80211_AP_SME_SA_QUERY_OFFLOAD = 1 << 0,
};
- */
/**
* enum nl80211_feature_flags - device/driver features
@@ -5554,7 +6208,7 @@ enum nl80211_ap_sme_features {
* @NL80211_FEATURE_INACTIVITY_TIMER: This driver takes care of freeing up
* the connected inactive stations in AP mode.
* @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
- * to work properly to suppport receiving regulatory hints from
+ * to work properly to support receiving regulatory hints from
* cellular base stations.
* @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
* here to reserve the value for API/ABI compatibility)
@@ -5680,7 +6334,7 @@ enum nl80211_feature_flags {
* request to use RRM (see %NL80211_ATTR_USE_RRM) with
* %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
* the ASSOC_REQ_USE_RRM flag in the association request even if
- * NL80211_FEATURE_QUIET is not advertized.
+ * NL80211_FEATURE_QUIET is not advertised.
* @NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER: This device supports MU-MIMO air
* sniffer which means that it can be configured to hear packets from
* certain groups which can be configured by the
@@ -5692,13 +6346,15 @@ enum nl80211_feature_flags {
* the BSS that the interface that requested the scan is connected to
* (if available).
* @NL80211_EXT_FEATURE_BSS_PARENT_TSF: Per BSS, this driver reports the
- * time the last beacon/probe was received. The time is the TSF of the
- * BSS that the interface that requested the scan is connected to
- * (if available).
+ * time the last beacon/probe was received. For a non-MLO connection, the
+ * time is the TSF of the BSS that the interface that requested the scan is
+ * connected to (if available). For an MLO connection, the time is the TSF
+ * of the BSS corresponding with link ID specified in the scan request (if
+ * specified).
* @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
* channel dwell time.
* @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate
- * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate.
+ * configuration (AP/mesh), supporting a legacy (non-HT/VHT) rate.
* @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate
* configuration (AP/mesh) with HT rates.
* @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
@@ -5772,8 +6428,7 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_AP_PMKSA_CACHING: Driver/device supports PMKSA caching
* (set/del PMKSA operations) in AP mode.
*
- * @NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD: Driver supports
- * filtering of sched scan results using band specific RSSI thresholds.
+ * @NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD: Obsolete
*
* @NL80211_EXT_FEATURE_STA_TX_PWR: This driver supports controlling tx power
* to a station.
@@ -5821,6 +6476,72 @@ enum nl80211_feature_flags {
* handshake with PSK in AP mode (PSK is passed as part of the start AP
* command).
*
+ * @NL80211_EXT_FEATURE_SAE_OFFLOAD_AP: Device wants to do SAE authentication
+ * in AP mode (SAE password is passed as part of the start AP command).
+ *
+ * @NL80211_EXT_FEATURE_FILS_DISCOVERY: Driver/device supports FILS discovery
+ * frames transmission
+ *
+ * @NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP: Driver/device supports
+ * unsolicited broadcast probe response transmission
+ *
+ * @NL80211_EXT_FEATURE_BEACON_RATE_HE: Driver supports beacon rate
+ * configuration (AP/mesh) with HE rates.
+ *
+ * @NL80211_EXT_FEATURE_SECURE_LTF: Device supports secure LTF measurement
+ * exchange protocol.
+ *
+ * @NL80211_EXT_FEATURE_SECURE_RTT: Device supports secure RTT measurement
+ * exchange protocol.
+ *
+ * @NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE: Device supports management
+ * frame protection for all management frames exchanged during the
+ * negotiation and range measurement procedure.
+ *
+ * @NL80211_EXT_FEATURE_BSS_COLOR: The driver supports BSS color collision
+ * detection and change announcemnts.
+ *
+ * @NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD: Driver running in AP mode supports
+ * FILS encryption and decryption for (Re)Association Request and Response
+ * frames. Userspace has to share FILS AAD details to the driver by using
+ * @NL80211_CMD_SET_FILS_AAD.
+ *
+ * @NL80211_EXT_FEATURE_RADAR_BACKGROUND: Device supports background radar/CAC
+ * detection.
+ *
+ * @NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE: Device can perform a MAC address
+ * change without having to bring the underlying network device down
+ * first. For example, in station mode this can be used to vary the
+ * origin MAC address prior to a connection to a new AP for privacy
+ * or other reasons. Note that certain driver specific restrictions
+ * might apply, e.g. no scans in progress, no offchannel operations
+ * in progress, and no active connections.
+ *
+ * @NL80211_EXT_FEATURE_PUNCT: Driver supports preamble puncturing in AP mode.
+ *
+ * @NL80211_EXT_FEATURE_SECURE_NAN: Device supports NAN Pairing which enables
+ * authentication, data encryption and message integrity.
+ *
+ * @NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA: Device supports randomized TA
+ * in authentication and deauthentication frames sent to unassociated peer
+ * using @NL80211_CMD_FRAME.
+ *
+ * @NL80211_EXT_FEATURE_OWE_OFFLOAD: Driver/Device wants to do OWE DH IE
+ * handling in station mode.
+ *
+ * @NL80211_EXT_FEATURE_OWE_OFFLOAD_AP: Driver/Device wants to do OWE DH IE
+ * handling in AP mode.
+ *
+ * @NL80211_EXT_FEATURE_DFS_CONCURRENT: The device supports peer-to-peer or
+ * ad hoc operation on DFS channels under the control of a concurrent
+ * DFS master on the same channel as described in FCC-594280 D01
+ * (Section B.3). This, for example, allows P2P GO and P2P clients to
+ * operate on DFS channels as long as there's a concurrent BSS connection.
+ *
+ * @NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT: The driver has support for SPP
+ * (signaling and payload protected) A-MSDUs and this shall be advertised
+ * in the RSNXE.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5862,7 +6583,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS,
NL80211_EXT_FEATURE_AP_PMKSA_CACHING,
- NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD,
+ NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD, /* obsolete */
NL80211_EXT_FEATURE_EXT_KEY_ID,
NL80211_EXT_FEATURE_STA_TX_PWR,
NL80211_EXT_FEATURE_SAE_OFFLOAD,
@@ -5878,6 +6599,24 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS,
NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION,
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD_AP,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP,
+ NL80211_EXT_FEATURE_BEACON_RATE_HE,
+ NL80211_EXT_FEATURE_SECURE_LTF,
+ NL80211_EXT_FEATURE_SECURE_RTT,
+ NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE,
+ NL80211_EXT_FEATURE_BSS_COLOR,
+ NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND,
+ NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE,
+ NL80211_EXT_FEATURE_PUNCT,
+ NL80211_EXT_FEATURE_SECURE_NAN,
+ NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA,
+ NL80211_EXT_FEATURE_OWE_OFFLOAD,
+ NL80211_EXT_FEATURE_OWE_OFFLOAD_AP,
+ NL80211_EXT_FEATURE_DFS_CONCURRENT,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5962,7 +6701,7 @@ enum nl80211_timeout_reason {
* request parameters IE in the probe request
* @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at
- * rate of at least 5.5M. In case non OCE AP is discovered in the channel,
+ * rate of at least 5.5M. In case non-OCE AP is discovered in the channel,
* only the first probe req in the channel will be sent in high rate.
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request
* tx deferral (dot11FILSProbeDelay shall be set to 15ms)
@@ -5992,6 +6731,16 @@ enum nl80211_timeout_reason {
* @NL80211_SCAN_FLAG_FREQ_KHZ: report scan results with
* %NL80211_ATTR_SCAN_FREQ_KHZ. This also means
* %NL80211_ATTR_SCAN_FREQUENCIES will not be included.
+ * @NL80211_SCAN_FLAG_COLOCATED_6GHZ: scan for collocated APs reported by
+ * 2.4/5 GHz APs. When the flag is set, the scan logic will use the
+ * information from the RNR element found in beacons/probe responses
+ * received on the 2.4/5 GHz channels to actively scan only the 6GHz
+ * channels on which APs are expected to be found. Note that when not set,
+ * the scan logic would scan all 6GHz channels, but since transmission of
+ * probe requests on non-PSC channels is limited, it is highly likely that
+ * these channels would passively be scanned. Also note that when the flag
+ * is set, in addition to the colocated APs, PSC channels would also be
+ * scanned if the user space has asked for it.
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
@@ -6008,6 +6757,7 @@ enum nl80211_scan_flags {
NL80211_SCAN_FLAG_RANDOM_SN = 1<<11,
NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12,
NL80211_SCAN_FLAG_FREQ_KHZ = 1<<13,
+ NL80211_SCAN_FLAG_COLOCATED_6GHZ = 1<<14,
};
/**
@@ -6172,11 +6922,13 @@ struct nl80211_vendor_cmd_info {
* @NL80211_TDLS_PEER_HT: TDLS peer is HT capable.
* @NL80211_TDLS_PEER_VHT: TDLS peer is VHT capable.
* @NL80211_TDLS_PEER_WMM: TDLS peer is WMM capable.
+ * @NL80211_TDLS_PEER_HE: TDLS peer is HE capable.
*/
enum nl80211_tdls_peer_capability {
NL80211_TDLS_PEER_HT = 1<<0,
NL80211_TDLS_PEER_VHT = 1<<1,
NL80211_TDLS_PEER_WMM = 1<<2,
+ NL80211_TDLS_PEER_HE = 1<<3,
};
/**
@@ -6317,7 +7069,7 @@ enum nl80211_nan_func_term_reason {
* The instance ID for the follow up Service Discovery Frame. This is u8.
* @NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID: relevant if the function's type
* is follow up. This is a u8.
- * The requestor instance ID for the follow up Service Discovery Frame.
+ * The requester instance ID for the follow up Service Discovery Frame.
* @NL80211_NAN_FUNC_FOLLOW_UP_DEST: the MAC address of the recipient of the
* follow up Service Discovery Frame. This is a binary attribute.
* @NL80211_NAN_FUNC_CLOSE_RANGE: is this function limited for devices in a
@@ -6707,7 +7459,7 @@ enum nl80211_peer_measurement_attrs {
* @NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED: flag attribute indicating if
* trigger based ranging measurement is supported
* @NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED: flag attribute indicating
- * if non trigger based ranging measurement is supported
+ * if non-trigger-based ranging measurement is supported
*
* @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
@@ -6761,13 +7513,19 @@ enum nl80211_peer_measurement_ftm_capa {
* if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor
* %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based
* ranging will be used.
- * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non trigger based
+ * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non-trigger-based
* ranging measurement (flag)
* This attribute and %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED are
* mutually exclusive.
* if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor
* %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based
* ranging will be used.
+ * @NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK: negotiate for LMR feedback. Only
+ * valid if either %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED or
+ * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set.
+ * @NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR: optional. The BSS color of the
+ * responder. Only valid if %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
+ * or %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED is set.
*
* @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
* @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
@@ -6786,6 +7544,8 @@ enum nl80211_peer_measurement_ftm_req {
NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC,
NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED,
NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED,
+ NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK,
+ NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR,
/* keep last */
NUM_NL80211_PMSR_FTM_REQ_ATTR,
@@ -6831,7 +7591,7 @@ enum nl80211_peer_measurement_ftm_failure_reasons {
* @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS: number of FTM Request frames
* transmitted (u32, optional)
* @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES: number of FTM Request frames
- * that were acknowleged (u32, optional)
+ * that were acknowledged (u32, optional)
* @NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME: retry time received from the
* busy peer (u32, seconds)
* @NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP: actual number of bursts exponent
@@ -6910,6 +7670,13 @@ enum nl80211_peer_measurement_ftm_resp {
*
* @NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET: the OBSS PD minimum tx power offset.
* @NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET: the OBSS PD maximum tx power offset.
+ * @NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET: the non-SRG OBSS PD maximum
+ * tx power offset.
+ * @NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP: bitmap that indicates the BSS color
+ * values used by members of the SRG.
+ * @NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP: bitmap that indicates the partial
+ * BSSID values used by members of the SRG.
+ * @NL80211_HE_OBSS_PD_ATTR_SR_CTRL: The SR Control field of SRP element.
*
* @__NL80211_HE_OBSS_PD_ATTR_LAST: Internal
* @NL80211_HE_OBSS_PD_ATTR_MAX: highest OBSS PD attribute.
@@ -6919,6 +7686,10 @@ enum nl80211_obss_pd_attributes {
NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET,
NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET,
+ NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET,
+ NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP,
+ NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP,
+ NL80211_HE_OBSS_PD_ATTR_SR_CTRL,
/* keep last */
__NL80211_HE_OBSS_PD_ATTR_LAST,
@@ -6972,4 +7743,248 @@ enum nl80211_iftype_akm_attributes {
NL80211_IFTYPE_AKM_ATTR_MAX = __NL80211_IFTYPE_AKM_ATTR_LAST - 1,
};
+/**
+ * enum nl80211_fils_discovery_attributes - FILS discovery configuration
+ * from IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @__NL80211_FILS_DISCOVERY_ATTR_INVALID: Invalid
+ *
+ * @NL80211_FILS_DISCOVERY_ATTR_INT_MIN: Minimum packet interval (u32, TU).
+ * Allowed range: 0..10000 (TU = Time Unit)
+ * @NL80211_FILS_DISCOVERY_ATTR_INT_MAX: Maximum packet interval (u32, TU).
+ * Allowed range: 0..10000 (TU = Time Unit). If set to 0, the feature is disabled.
+ * @NL80211_FILS_DISCOVERY_ATTR_TMPL: Template data for FILS discovery action
+ * frame including the headers.
+ *
+ * @__NL80211_FILS_DISCOVERY_ATTR_LAST: Internal
+ * @NL80211_FILS_DISCOVERY_ATTR_MAX: highest attribute
+ */
+enum nl80211_fils_discovery_attributes {
+ __NL80211_FILS_DISCOVERY_ATTR_INVALID,
+
+ NL80211_FILS_DISCOVERY_ATTR_INT_MIN,
+ NL80211_FILS_DISCOVERY_ATTR_INT_MAX,
+ NL80211_FILS_DISCOVERY_ATTR_TMPL,
+
+ /* keep last */
+ __NL80211_FILS_DISCOVERY_ATTR_LAST,
+ NL80211_FILS_DISCOVERY_ATTR_MAX = __NL80211_FILS_DISCOVERY_ATTR_LAST - 1
+};
+
+/*
+ * FILS discovery template minimum length with action frame headers and
+ * mandatory fields.
+ */
+#define NL80211_FILS_DISCOVERY_TMPL_MIN_LEN 42
+
+/**
+ * enum nl80211_unsol_bcast_probe_resp_attributes - Unsolicited broadcast probe
+ * response configuration. Applicable only in 6GHz.
+ *
+ * @__NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INVALID: Invalid
+ *
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT: Maximum packet interval (u32, TU).
+ * Allowed range: 0..20 (TU = Time Unit). IEEE P802.11ax/D6.0
+ * 26.17.2.3.2 (AP behavior for fast passive scanning). If set to 0, the feature is
+ * disabled.
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL: Unsolicited broadcast probe response
+ * frame template (binary).
+ *
+ * @__NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST: Internal
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX: highest attribute
+ */
+enum nl80211_unsol_bcast_probe_resp_attributes {
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INVALID,
+
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT,
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL,
+
+ /* keep last */
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST,
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX =
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST - 1
+};
+
+/**
+ * enum nl80211_sae_pwe_mechanism - The mechanism(s) allowed for SAE PWE
+ * derivation. Applicable only when WPA3-Personal SAE authentication is
+ * used.
+ *
+ * @NL80211_SAE_PWE_UNSPECIFIED: not specified, used internally to indicate that
+ * attribute is not present from userspace.
+ * @NL80211_SAE_PWE_HUNT_AND_PECK: hunting-and-pecking loop only
+ * @NL80211_SAE_PWE_HASH_TO_ELEMENT: hash-to-element only
+ * @NL80211_SAE_PWE_BOTH: both hunting-and-pecking loop and hash-to-element
+ * can be used.
+ */
+enum nl80211_sae_pwe_mechanism {
+ NL80211_SAE_PWE_UNSPECIFIED,
+ NL80211_SAE_PWE_HUNT_AND_PECK,
+ NL80211_SAE_PWE_HASH_TO_ELEMENT,
+ NL80211_SAE_PWE_BOTH,
+};
+
+/**
+ * enum nl80211_sar_type - type of SAR specs
+ *
+ * @NL80211_SAR_TYPE_POWER: power limitation specified in 0.25dBm unit
+ *
+ */
+enum nl80211_sar_type {
+ NL80211_SAR_TYPE_POWER,
+
+ /* add new type here */
+
+ /* Keep last */
+ NUM_NL80211_SAR_TYPE,
+};
+
+/**
+ * enum nl80211_sar_attrs - Attributes for SAR spec
+ *
+ * @NL80211_SAR_ATTR_TYPE: the SAR type as defined in &enum nl80211_sar_type.
+ *
+ * @NL80211_SAR_ATTR_SPECS: Nested array of SAR power
+ * limit specifications. Each specification contains a set
+ * of %nl80211_sar_specs_attrs.
+ *
+ * For SET operation, it contains array of %NL80211_SAR_ATTR_SPECS_POWER
+ * and %NL80211_SAR_ATTR_SPECS_RANGE_INDEX.
+ *
+ * For sar_capa dump, it contains array of
+ * %NL80211_SAR_ATTR_SPECS_START_FREQ
+ * and %NL80211_SAR_ATTR_SPECS_END_FREQ.
+ *
+ * @__NL80211_SAR_ATTR_LAST: Internal
+ * @NL80211_SAR_ATTR_MAX: highest sar attribute
+ *
+ * These attributes are used with %NL80211_CMD_SET_SAR_SPEC
+ */
+enum nl80211_sar_attrs {
+ __NL80211_SAR_ATTR_INVALID,
+
+ NL80211_SAR_ATTR_TYPE,
+ NL80211_SAR_ATTR_SPECS,
+
+ __NL80211_SAR_ATTR_LAST,
+ NL80211_SAR_ATTR_MAX = __NL80211_SAR_ATTR_LAST - 1,
+};
+
+/**
+ * enum nl80211_sar_specs_attrs - Attributes for SAR power limit specs
+ *
+ * @NL80211_SAR_ATTR_SPECS_POWER: Required (s32)value to specify the actual
+ * power limit value in units of 0.25 dBm if type is
+ * NL80211_SAR_TYPE_POWER. (i.e., a value of 44 represents 11 dBm).
+ * 0 means userspace doesn't have SAR limitation on this associated range.
+ *
+ * @NL80211_SAR_ATTR_SPECS_RANGE_INDEX: Required (u32) value to specify the
+ * index of exported freq range table and the associated power limitation
+ * is applied to this range.
+ *
+ * Userspace isn't required to set all the ranges advertised by WLAN driver,
+ * and userspace can skip some certain ranges. These skipped ranges don't
+ * have SAR limitations, and they are same as setting the
+ * %NL80211_SAR_ATTR_SPECS_POWER to any unreasonable high value because any
+ * value higher than regulatory allowed value just means SAR power
+ * limitation is removed, but it's required to set at least one range.
+ * It's not allowed to set duplicated range in one SET operation.
+ *
+ * Every SET operation overwrites previous SET operation.
+ *
+ * @NL80211_SAR_ATTR_SPECS_START_FREQ: Required (u32) value to specify the start
+ * frequency of this range edge when registering SAR capability to wiphy.
+ * It's not a channel center frequency. The unit is kHz.
+ *
+ * @NL80211_SAR_ATTR_SPECS_END_FREQ: Required (u32) value to specify the end
+ * frequency of this range edge when registering SAR capability to wiphy.
+ * It's not a channel center frequency. The unit is kHz.
+ *
+ * @__NL80211_SAR_ATTR_SPECS_LAST: Internal
+ * @NL80211_SAR_ATTR_SPECS_MAX: highest sar specs attribute
+ */
+enum nl80211_sar_specs_attrs {
+ __NL80211_SAR_ATTR_SPECS_INVALID,
+
+ NL80211_SAR_ATTR_SPECS_POWER,
+ NL80211_SAR_ATTR_SPECS_RANGE_INDEX,
+ NL80211_SAR_ATTR_SPECS_START_FREQ,
+ NL80211_SAR_ATTR_SPECS_END_FREQ,
+
+ __NL80211_SAR_ATTR_SPECS_LAST,
+ NL80211_SAR_ATTR_SPECS_MAX = __NL80211_SAR_ATTR_SPECS_LAST - 1,
+};
+
+/**
+ * enum nl80211_mbssid_config_attributes - multiple BSSID (MBSSID) and enhanced
+ * multi-BSSID advertisements (EMA) in AP mode.
+ * Kernel uses some of these attributes to advertise driver's support for
+ * MBSSID and EMA.
+ * Remaining attributes should be used by the userspace to configure the
+ * features.
+ *
+ * @__NL80211_MBSSID_CONFIG_ATTR_INVALID: Invalid
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES: Used by the kernel to advertise
+ * the maximum number of MBSSID interfaces supported by the driver.
+ * Driver should indicate MBSSID support by setting
+ * wiphy->mbssid_max_interfaces to a value more than or equal to 2.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY: Used by the kernel
+ * to advertise the maximum profile periodicity supported by the driver
+ * if EMA is enabled. Driver should indicate EMA support to the userspace
+ * by setting wiphy->ema_max_profile_periodicity to
+ * a non-zero value.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_INDEX: Mandatory parameter to pass the index of
+ * this BSS (u8) in the multiple BSSID set.
+ * Value must be set to 0 for the transmitting interface and non-zero for
+ * all non-transmitting interfaces. The userspace will be responsible
+ * for using unique indices for the interfaces.
+ * Range: 0 to wiphy->mbssid_max_interfaces-1.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX: Mandatory parameter for
+ * a non-transmitted profile which provides the interface index (u32) of
+ * the transmitted profile. The value must match one of the interface
+ * indices advertised by the kernel. Optional if the interface being set up
+ * is the transmitting one, however, if provided then the value must match
+ * the interface index of the same.
+ *
+ * @NL80211_MBSSID_CONFIG_ATTR_EMA: Flag used to enable EMA AP feature.
+ * Setting this flag is permitted only if the driver advertises EMA support
+ * by setting wiphy->ema_max_profile_periodicity to non-zero.
+ *
+ * @__NL80211_MBSSID_CONFIG_ATTR_LAST: Internal
+ * @NL80211_MBSSID_CONFIG_ATTR_MAX: highest attribute
+ */
+enum nl80211_mbssid_config_attributes {
+ __NL80211_MBSSID_CONFIG_ATTR_INVALID,
+
+ NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES,
+ NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY,
+ NL80211_MBSSID_CONFIG_ATTR_INDEX,
+ NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX,
+ NL80211_MBSSID_CONFIG_ATTR_EMA,
+
+ /* keep last */
+ __NL80211_MBSSID_CONFIG_ATTR_LAST,
+ NL80211_MBSSID_CONFIG_ATTR_MAX = __NL80211_MBSSID_CONFIG_ATTR_LAST - 1,
+};
+
+/**
+ * enum nl80211_ap_settings_flags - AP settings flags
+ *
+ * @NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT: AP supports external
+ * authentication.
+ * @NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT: Userspace supports SA Query
+ * procedures offload to driver. If driver advertises
+ * %NL80211_AP_SME_SA_QUERY_OFFLOAD in AP SME features, userspace shall
+ * ignore SA Query procedures and validations when this flag is set by
+ * userspace.
+ */
+enum nl80211_ap_settings_flags {
+ NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = 1 << 0,
+ NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 1 << 1,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/npcm-video.h b/include/uapi/linux/npcm-video.h
new file mode 100644
index 000000000000..1d39f6f38c96
--- /dev/null
+++ b/include/uapi/linux/npcm-video.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Controls header for NPCM video driver
+ *
+ * Copyright (C) 2022 Nuvoton Technologies
+ */
+
+#ifndef _UAPI_LINUX_NPCM_VIDEO_H
+#define _UAPI_LINUX_NPCM_VIDEO_H
+
+#include <linux/v4l2-controls.h>
+
+/*
+ * Check Documentation/userspace-api/media/drivers/npcm-video.rst for control
+ * details.
+ */
+
+/*
+ * This control is meant to set the mode of NPCM Video Capture/Differentiation
+ * (VCD) engine.
+ *
+ * The VCD engine supports two modes:
+ * COMPLETE - Capture the next complete frame into memory.
+ * DIFF - Compare the incoming frame with the frame stored in memory, and
+ * updates the differentiated frame in memory.
+ */
+#define V4L2_CID_NPCM_CAPTURE_MODE (V4L2_CID_USER_NPCM_BASE + 0)
+
+enum v4l2_npcm_capture_mode {
+ V4L2_NPCM_CAPTURE_MODE_COMPLETE = 0, /* COMPLETE mode */
+ V4L2_NPCM_CAPTURE_MODE_DIFF = 1, /* DIFF mode */
+};
+
+/*
+ * This control is meant to get the count of compressed HEXTILE rectangles which
+ * is relevant to the number of differentiated frames if VCD is in DIFF mode.
+ * And the count will always be 1 if VCD is in COMPLETE mode.
+ */
+#define V4L2_CID_NPCM_RECT_COUNT (V4L2_CID_USER_NPCM_BASE + 1)
+
+#endif /* _UAPI_LINUX_NPCM_VIDEO_H */
diff --git a/include/uapi/linux/nsm.h b/include/uapi/linux/nsm.h
new file mode 100644
index 000000000000..e529f232f6c0
--- /dev/null
+++ b/include/uapi/linux/nsm.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef __UAPI_LINUX_NSM_H
+#define __UAPI_LINUX_NSM_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define NSM_MAGIC 0x0A
+
+#define NSM_REQUEST_MAX_SIZE 0x1000
+#define NSM_RESPONSE_MAX_SIZE 0x3000
+
+struct nsm_iovec {
+ __u64 addr; /* Virtual address of target buffer */
+ __u64 len; /* Length of target buffer */
+};
+
+/* Raw NSM message. Only available with CAP_SYS_ADMIN. */
+struct nsm_raw {
+ /* Request from user */
+ struct nsm_iovec request;
+ /* Response to user */
+ struct nsm_iovec response;
+};
+#define NSM_IOCTL_RAW _IOWR(NSM_MAGIC, 0x0, struct nsm_raw)
+
+#endif /* __UAPI_LINUX_NSM_H */
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index d99b5a772698..2f76cba67166 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -55,7 +55,10 @@ struct nvme_passthru_cmd64 {
__u64 metadata;
__u64 addr;
__u32 metadata_len;
- __u32 data_len;
+ union {
+ __u32 data_len; /* for non-vectored io */
+ __u32 vec_cnt; /* for vectored io */
+ };
__u32 cdw10;
__u32 cdw11;
__u32 cdw12;
@@ -67,6 +70,28 @@ struct nvme_passthru_cmd64 {
__u64 result;
};
+/* same as struct nvme_passthru_cmd64, minus the 8b result field */
+struct nvme_uring_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd1;
+ __u32 nsid;
+ __u32 cdw2;
+ __u32 cdw3;
+ __u64 metadata;
+ __u64 addr;
+ __u32 metadata_len;
+ __u32 data_len;
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 rsvd2;
+};
+
#define nvme_admin_cmd nvme_passthru_cmd
#define NVME_IOCTL_ID _IO('N', 0x40)
@@ -78,5 +103,12 @@ struct nvme_passthru_cmd64 {
#define NVME_IOCTL_RESCAN _IO('N', 0x46)
#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
+#define NVME_IOCTL_IO64_CMD_VEC _IOWR('N', 0x49, struct nvme_passthru_cmd64)
+
+/* io_uring async commands: */
+#define NVME_URING_CMD_IO _IOWR('N', 0x80, struct nvme_uring_cmd)
+#define NVME_URING_CMD_IO_VEC _IOWR('N', 0x81, struct nvme_uring_cmd)
+#define NVME_URING_CMD_ADMIN _IOWR('N', 0x82, struct nvme_uring_cmd)
+#define NVME_URING_CMD_ADMIN_VEC _IOWR('N', 0x83, struct nvme_uring_cmd)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */
diff --git a/include/uapi/linux/omap3isp.h b/include/uapi/linux/omap3isp.h
index 87b55755f4ff..d9db7ad43890 100644
--- a/include/uapi/linux/omap3isp.h
+++ b/include/uapi/linux/omap3isp.h
@@ -162,6 +162,7 @@ struct omap3isp_h3a_aewb_config {
* struct omap3isp_stat_data - Statistic data sent to or received from user
* @ts: Timestamp of returned framestats.
* @buf: Pointer to pass to user.
+ * @buf_size: Size of buffer.
* @frame_number: Frame number of requested stats.
* @cur_frame: Current frame number being processed.
* @config_counter: Number of the configuration associated with the data.
@@ -176,10 +177,12 @@ struct omap3isp_stat_data {
struct timeval ts;
#endif
void __user *buf;
- __u32 buf_size;
- __u16 frame_number;
- __u16 cur_frame;
- __u16 config_counter;
+ __struct_group(/* no tag */, frame, /* no attrs */,
+ __u32 buf_size;
+ __u16 frame_number;
+ __u16 cur_frame;
+ __u16 config_counter;
+ );
};
#ifdef __KERNEL__
@@ -189,10 +192,12 @@ struct omap3isp_stat_data_time32 {
__s32 tv_usec;
} ts;
__u32 buf;
- __u32 buf_size;
- __u16 frame_number;
- __u16 cur_frame;
- __u16 config_counter;
+ __struct_group(/* no tag */, frame, /* no attrs */,
+ __u32 buf_size;
+ __u16 frame_number;
+ __u16 cur_frame;
+ __u16 config_counter;
+ );
};
#endif
diff --git a/include/uapi/linux/openat2.h b/include/uapi/linux/openat2.h
index 58b1eb711360..a5feb7604948 100644
--- a/include/uapi/linux/openat2.h
+++ b/include/uapi/linux/openat2.h
@@ -35,5 +35,9 @@ struct open_how {
#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
be scoped inside the dirfd
(similar to chroot(2)). */
+#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be
+ completed through cached lookup. May
+ return -EAGAIN if that's not
+ possible. */
#endif /* _UAPI_LINUX_OPENAT2_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 8300cc29dec8..efc82c318fa2 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -70,10 +70,14 @@ enum ovs_datapath_cmd {
* set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on
* %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should
* not be sent.
+ * @OVS_DP_ATTR_PER_CPU_PIDS: Per-cpu array of PIDs for upcalls when
+ * OVS_DP_F_DISPATCH_UPCALL_PER_CPU feature is set.
* @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
* datapath. Always present in notifications.
* @OVS_DP_ATTR_MEGAFLOW_STATS: Statistics about mega flow masks usage for the
* datapath. Always present in notifications.
+ * @OVS_DP_ATTR_IFINDEX: Interface index for a new datapath netdev. Only
+ * valid for %OVS_DP_CMD_NEW requests.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_DP_* commands.
@@ -87,6 +91,10 @@ enum ovs_datapath_attr {
OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
OVS_DP_ATTR_PAD,
OVS_DP_ATTR_MASKS_CACHE_SIZE,
+ OVS_DP_ATTR_PER_CPU_PIDS, /* Netlink PIDS to receive upcalls in
+ * per-cpu dispatch mode
+ */
+ OVS_DP_ATTR_IFINDEX,
__OVS_DP_ATTR_MAX
};
@@ -127,6 +135,9 @@ struct ovs_vport_stats {
/* Allow tc offload recirc sharing */
#define OVS_DP_F_TC_RECIRC_SHARING (1 << 2)
+/* Allow per-cpu dispatch of upcalls */
+#define OVS_DP_F_DISPATCH_UPCALL_PER_CPU (1 << 3)
+
/* Fixed logical ports. */
#define OVSP_LOCAL ((__u32)0)
@@ -266,11 +277,25 @@ enum ovs_vport_attr {
OVS_VPORT_ATTR_PAD,
OVS_VPORT_ATTR_IFINDEX,
OVS_VPORT_ATTR_NETNSID,
+ OVS_VPORT_ATTR_UPCALL_STATS,
__OVS_VPORT_ATTR_MAX
};
#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+/**
+ * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL* commands
+ * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
+ * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
+ */
+enum ovs_vport_upcall_attr {
+ OVS_VPORT_UPCALL_ATTR_SUCCESS,
+ OVS_VPORT_UPCALL_ATTR_FAIL,
+ __OVS_VPORT_UPCALL_ATTR_MAX
+};
+
+#define OVS_VPORT_UPCALL_ATTR_MAX (__OVS_VPORT_UPCALL_ATTR_MAX - 1)
+
enum {
OVS_VXLAN_EXT_UNSPEC,
OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
@@ -344,9 +369,20 @@ enum ovs_key_attr {
OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, /* struct ovs_key_ct_tuple_ipv6 */
OVS_KEY_ATTR_NSH, /* Nested set of ovs_nsh_key_* */
-#ifdef __KERNEL__
- OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */
-#endif
+ /* User space decided to squat on types 29 and 30. They are defined
+ * below, but should not be sent to the kernel.
+ *
+ * WARNING: No new types should be added unless they are defined
+ * for both kernel and user space (no 'ifdef's). It's hard
+ * to keep compatibility otherwise.
+ */
+ OVS_KEY_ATTR_PACKET_TYPE, /* be32 packet type */
+ OVS_KEY_ATTR_ND_EXTENSIONS, /* IPv6 Neighbor Discovery extensions */
+
+ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info.
+ * For in-kernel use only.
+ */
+ OVS_KEY_ATTR_IPV6_EXTHDRS, /* struct ovs_key_ipv6_exthdr */
__OVS_KEY_ATTR_MAX
};
@@ -422,6 +458,11 @@ struct ovs_key_ipv6 {
__u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */
};
+/* separate structure to support backward compatibility with older user space */
+struct ovs_key_ipv6_exthdrs {
+ __u16 hdrs;
+};
+
struct ovs_key_tcp {
__be16 tcp_src;
__be16 tcp_dst;
@@ -724,6 +765,7 @@ struct ovs_action_push_vlan {
*/
enum ovs_hash_alg {
OVS_HASH_ALG_L4,
+ OVS_HASH_ALG_SYM_L4,
};
/*
@@ -923,6 +965,7 @@ struct check_pkt_len_arg {
* start of the packet or at the start of the l3 header depending on the value
* of l3 tunnel flag in the tun_flags field of OVS_ACTION_ATTR_ADD_MPLS
* argument.
+ * @OVS_ACTION_ATTR_DROP: Explicit drop action.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -960,6 +1003,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */
OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */
OVS_ACTION_ATTR_DEC_TTL, /* Nested OVS_DEC_TTL_ATTR_*. */
+ OVS_ACTION_ATTR_DROP, /* u32 error code. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
@@ -1058,4 +1102,6 @@ enum ovs_dec_ttl_attr {
__OVS_DEC_TTL_ATTR_MAX
};
+#define OVS_DEC_TTL_ATTR_MAX (__OVS_DEC_TTL_ATTR_MAX - 1)
+
#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/uapi/linux/parport.h b/include/uapi/linux/parport.h
index f41388f88dc3..fe93e41fc205 100644
--- a/include/uapi/linux/parport.h
+++ b/include/uapi/linux/parport.h
@@ -90,6 +90,9 @@ typedef enum {
/* Flags for block transfer operations. */
#define PARPORT_EPP_FAST (1<<0) /* Unreliable counts. */
#define PARPORT_W91284PIC (1<<1) /* have a Warp9 w91284pic in the device */
+#define PARPORT_EPP_FAST_32 PARPORT_EPP_FAST /* 32-bit EPP transfers */
+#define PARPORT_EPP_FAST_16 (1<<2) /* 16-bit EPP transfers */
+#define PARPORT_EPP_FAST_8 (1<<3) /* 8-bit EPP transfers */
/* The rest is for the kernel only */
#endif /* _UAPI_PARPORT_H_ */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index f9701410d3b5..a39193213ff2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -76,9 +76,11 @@
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_HEADER_TYPE_MASK 0x7f
#define PCI_HEADER_TYPE_NORMAL 0
#define PCI_HEADER_TYPE_BRIDGE 1
#define PCI_HEADER_TYPE_CARDBUS 2
+#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */
#define PCI_BIST 0x0f /* 8 bits */
#define PCI_BIST_CODE_MASK 0x0f /* Return result */
@@ -246,7 +248,7 @@
#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */
#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
-#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
+#define PCI_PM_CAP_PME_D3hot 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
#define PCI_PM_CTRL 4 /* PM control and status register */
@@ -300,23 +302,23 @@
#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */
#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */
-/* Message Signalled Interrupt registers */
+/* Message Signaled Interrupt registers */
-#define PCI_MSI_FLAGS 2 /* Message Control */
+#define PCI_MSI_FLAGS 0x02 /* Message Control */
#define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */
#define PCI_MSI_FLAGS_QMASK 0x000e /* Maximum queue size available */
#define PCI_MSI_FLAGS_QSIZE 0x0070 /* Message queue size configured */
#define PCI_MSI_FLAGS_64BIT 0x0080 /* 64-bit addresses allowed */
#define PCI_MSI_FLAGS_MASKBIT 0x0100 /* Per-vector masking capable */
#define PCI_MSI_RFU 3 /* Rest of capability flags */
-#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
-#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
-#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
-#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
-#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
-#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
-#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
+#define PCI_MSI_ADDRESS_LO 0x04 /* Lower 32 bits */
+#define PCI_MSI_ADDRESS_HI 0x08 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
+#define PCI_MSI_DATA_32 0x08 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_MASK_32 0x0c /* Mask bits register for 32-bit devices */
+#define PCI_MSI_PENDING_32 0x10 /* Pending intrs for 32-bit devices */
+#define PCI_MSI_DATA_64 0x0c /* 16 bits of data for 64-bit devices */
+#define PCI_MSI_MASK_64 0x10 /* Mask bits register for 64-bit devices */
+#define PCI_MSI_PENDING_64 0x14 /* Pending intrs for 64-bit devices */
/* MSI-X registers (in MSI-X capability) */
#define PCI_MSIX_FLAGS 2 /* Message Control */
@@ -334,10 +336,10 @@
/* MSI-X Table entry format (in memory mapped by a BAR) */
#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR 0 /* Message Address */
-#define PCI_MSIX_ENTRY_UPPER_ADDR 4 /* Message Upper Address */
-#define PCI_MSIX_ENTRY_DATA 8 /* Message Data */
-#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 /* Vector Control */
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0x0 /* Message Address */
+#define PCI_MSIX_ENTRY_UPPER_ADDR 0x4 /* Message Upper Address */
+#define PCI_MSIX_ENTRY_DATA 0x8 /* Message Data */
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 0xc /* Vector Control */
#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001
/* CompactPCI Hotswap Register */
@@ -469,7 +471,7 @@
/* PCI Express capability registers */
-#define PCI_EXP_FLAGS 2 /* Capabilities register */
+#define PCI_EXP_FLAGS 0x02 /* Capabilities register */
#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
@@ -483,7 +485,7 @@
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
-#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_DEVCAP 0x04 /* Device capabilities */
#define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */
#define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */
#define PCI_EXP_DEVCAP_EXT_TAG 0x00000020 /* Extended tags */
@@ -496,13 +498,19 @@
#define PCI_EXP_DEVCAP_PWR_VAL 0x03fc0000 /* Slot Power Limit Value */
#define PCI_EXP_DEVCAP_PWR_SCL 0x0c000000 /* Slot Power Limit Scale */
#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
-#define PCI_EXP_DEVCTL 8 /* Device Control */
+#define PCI_EXP_DEVCTL 0x08 /* Device Control */
#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
+#define PCI_EXP_DEVCTL_PAYLOAD_128B 0x0000 /* 128 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_256B 0x0020 /* 256 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_512B 0x0040 /* 512 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_1024B 0x0060 /* 1024 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_2048B 0x0080 /* 2048 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_4096B 0x00a0 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
@@ -515,7 +523,7 @@
#define PCI_EXP_DEVCTL_READRQ_2048B 0x4000 /* 2048 Bytes */
#define PCI_EXP_DEVCTL_READRQ_4096B 0x5000 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
-#define PCI_EXP_DEVSTA 10 /* Device Status */
+#define PCI_EXP_DEVSTA 0x0a /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
#define PCI_EXP_DEVSTA_NFED 0x0002 /* Non-Fatal Error Detected */
#define PCI_EXP_DEVSTA_FED 0x0004 /* Fatal Error Detected */
@@ -523,15 +531,18 @@
#define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
-#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
#define PCI_EXP_LNKCAP_SLS_32_0GB 0x00000005 /* LNKCAP2 SLS Vector bit 4 */
+#define PCI_EXP_LNKCAP_SLS_64_0GB 0x00000006 /* LNKCAP2 SLS Vector bit 5 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#define PCI_EXP_LNKCAP_ASPM_L0S 0x00000400 /* ASPM L0s Support */
+#define PCI_EXP_LNKCAP_ASPM_L1 0x00000800 /* ASPM L1 Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
#define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */
#define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* Clock Power Management */
@@ -539,7 +550,7 @@
#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */
#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */
#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
-#define PCI_EXP_LNKCTL 16 /* Link Control */
+#define PCI_EXP_LNKCTL 0x10 /* Link Control */
#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
#define PCI_EXP_LNKCTL_ASPM_L0S 0x0001 /* L0s Enable */
#define PCI_EXP_LNKCTL_ASPM_L1 0x0002 /* L1 Enable */
@@ -552,13 +563,14 @@
#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */
-#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_LNKSTA 0x12 /* Link Status */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */
#define PCI_EXP_LNKSTA_CLS_32_0GB 0x0005 /* Current Link Speed 32.0GT/s */
+#define PCI_EXP_LNKSTA_CLS_64_0GB 0x0006 /* Current Link Speed 64.0GT/s */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
#define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */
#define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */
@@ -571,7 +583,7 @@
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints with link end here */
-#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCAP 0x14 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
#define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */
@@ -584,7 +596,7 @@
#define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */
#define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */
#define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */
-#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTCTL 0x18 /* Slot Control */
#define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */
#define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */
#define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */
@@ -605,8 +617,9 @@
#define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */
#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
+#define PCI_EXP_SLTCTL_ASPL_DISABLE 0x2000 /* Auto Slot Power Limit Disable */
#define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */
-#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_SLTSTA 0x1a /* Slot Status */
#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */
#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */
#define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */
@@ -616,15 +629,16 @@
#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */
#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */
-#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCTL 0x1c /* Root Control */
#define PCI_EXP_RTCTL_SECEE 0x0001 /* System Error on Correctable Error */
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
-#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
-#define PCI_EXP_RTSTA 32 /* Root Status */
+#define PCI_EXP_RTSTA 0x20 /* Root Status */
+#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
@@ -635,7 +649,7 @@
* Use pcie_capability_read_word() and similar interfaces to use them
* safely.
*/
-#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCAP2 0x24 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
@@ -647,7 +661,7 @@
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
-#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
@@ -659,31 +673,34 @@
#define PCI_EXP_DEVCTL2_OBFF_MSGA_EN 0x2000 /* Enable OBFF Message type A */
#define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */
#define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
-#define PCI_EXP_DEVSTA2 42 /* Device Status 2 */
-#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */
-#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
+#define PCI_EXP_DEVSTA2 0x2a /* Device Status 2 */
+#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c /* end of v2 EPs w/o link */
+#define PCI_EXP_LNKCAP2 0x2c /* Link Capabilities 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_SLS_32_0GB 0x00000020 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCAP2_SLS_64_0GB 0x00000040 /* Supported Speed 64GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
-#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+#define PCI_EXP_LNKCTL2 0x30 /* Link Control 2 */
#define PCI_EXP_LNKCTL2_TLS 0x000f
#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCTL2_TLS_64_0GT 0x0006 /* Supported Speed 64GT/s */
#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
-#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
-#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
-#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
+#define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+#define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */
+#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
+#define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
#define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
-#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
-#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */
+#define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+#define PCI_EXP_SLTSTA2 0x3a /* Slot Status 2 */
/* Extended Capabilities (PCI-X 2.0 and Express) */
#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -720,15 +737,18 @@
#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */
#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
+#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT
+#define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */
+#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
/* Advanced Error Reporting */
-#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
+#define PCI_ERR_UNCOR_STATUS 0x04 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
@@ -746,11 +766,11 @@
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
-#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
+#define PCI_ERR_UNCOR_MASK 0x08 /* Uncorrectable Error Mask */
/* Same bits as above */
-#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
+#define PCI_ERR_UNCOR_SEVER 0x0c /* Uncorrectable Error Severity */
/* Same bits as above */
-#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */
+#define PCI_ERR_COR_STATUS 0x10 /* Correctable Error Status */
#define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */
#define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
@@ -759,20 +779,20 @@
#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
-#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
+#define PCI_ERR_COR_MASK 0x14 /* Correctable Error Mask */
/* Same bits as above */
-#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
-#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */
+#define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/
+#define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */
#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
-#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
-#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
+#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */
+#define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */
-#define PCI_ERR_ROOT_STATUS 48
+#define PCI_ERR_ROOT_STATUS 0x30
#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */
#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */
@@ -781,52 +801,59 @@
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
-#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
+#define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */
/* Virtual Channel */
-#define PCI_VC_PORT_CAP1 4
+#define PCI_VC_PORT_CAP1 0x04
#define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */
#define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */
#define PCI_VC_CAP1_ARB_SIZE 0x00000c00
-#define PCI_VC_PORT_CAP2 8
+#define PCI_VC_PORT_CAP2 0x08
#define PCI_VC_CAP2_32_PHASE 0x00000002
#define PCI_VC_CAP2_64_PHASE 0x00000004
#define PCI_VC_CAP2_128_PHASE 0x00000008
#define PCI_VC_CAP2_ARB_OFF 0xff000000
-#define PCI_VC_PORT_CTRL 12
+#define PCI_VC_PORT_CTRL 0x0c
#define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001
-#define PCI_VC_PORT_STATUS 14
+#define PCI_VC_PORT_STATUS 0x0e
#define PCI_VC_PORT_STATUS_TABLE 0x00000001
-#define PCI_VC_RES_CAP 16
+#define PCI_VC_RES_CAP 0x10
#define PCI_VC_RES_CAP_32_PHASE 0x00000002
#define PCI_VC_RES_CAP_64_PHASE 0x00000004
#define PCI_VC_RES_CAP_128_PHASE 0x00000008
#define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010
#define PCI_VC_RES_CAP_256_PHASE 0x00000020
#define PCI_VC_RES_CAP_ARB_OFF 0xff000000
-#define PCI_VC_RES_CTRL 20
+#define PCI_VC_RES_CTRL 0x14
#define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000
#define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000
#define PCI_VC_RES_CTRL_ID 0x07000000
#define PCI_VC_RES_CTRL_ENABLE 0x80000000
-#define PCI_VC_RES_STATUS 26
+#define PCI_VC_RES_STATUS 0x1a
#define PCI_VC_RES_STATUS_TABLE 0x00000001
#define PCI_VC_RES_STATUS_NEGO 0x00000002
#define PCI_CAP_VC_BASE_SIZEOF 0x10
-#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
+#define PCI_CAP_VC_PER_VC_SIZEOF 0x0c
/* Power Budgeting */
-#define PCI_PWR_DSR 4 /* Data Select Register */
-#define PCI_PWR_DATA 8 /* Data Register */
+#define PCI_PWR_DSR 0x04 /* Data Select Register */
+#define PCI_PWR_DATA 0x08 /* Data Register */
#define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */
#define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */
#define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */
#define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
#define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
-#define PCI_PWR_CAP 12 /* Capability */
+#define PCI_PWR_CAP 0x0c /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
-#define PCI_EXT_CAP_PWR_SIZEOF 16
+#define PCI_EXT_CAP_PWR_SIZEOF 0x10
+
+/* Root Complex Event Collector Endpoint Association */
+#define PCI_RCEC_RCIEP_BITMAP 4 /* Associated Bitmap for RCiEPs */
+#define PCI_RCEC_BUSN 8 /* RCEC Associated Bus Numbers */
+#define PCI_RCEC_BUSN_REG_VER 0x02 /* Least version with BUSN present */
+#define PCI_RCEC_BUSN_NEXT(x) (((x) >> 8) & 0xff)
+#define PCI_RCEC_BUSN_LAST(x) (((x) >> 16) & 0xff)
/* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */
#define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */
@@ -905,12 +932,13 @@
/* Process Address Space ID */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
-#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
-#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */
+#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_WIDTH 0x1f00
#define PCI_PASID_CTRL 0x06 /* PASID control register */
-#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
-#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
-#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */
+#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */
+#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */
+#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */
@@ -943,13 +971,15 @@
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
-#define PCI_EXT_CAP_SRIOV_SIZEOF 64
+#define PCI_EXT_CAP_SRIOV_SIZEOF 0x40
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
+#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */
+#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */
#define PCI_EXT_CAP_LTR_SIZEOF 8
/* Access Control Service */
@@ -996,12 +1026,12 @@
#define PCI_TPH_LOC_NONE 0x000 /* no location */
#define PCI_TPH_LOC_CAP 0x200 /* in capability */
#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
-#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
-#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
-#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
+#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST table mask */
+#define PCI_TPH_CAP_ST_SHIFT 16 /* ST table shift */
+#define PCI_TPH_BASE_SIZEOF 0xc /* size with no ST table */
/* Downstream Port Containment */
-#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
+#define PCI_EXP_DPC_CAP 0x04 /* DPC Capability */
#define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */
#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */
#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */
@@ -1009,19 +1039,26 @@
#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
-#define PCI_EXP_DPC_CTL 6 /* DPC control */
+#define PCI_EXP_DPC_CTL 0x06 /* DPC control */
#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
-#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
+#define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */
#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */
#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */
+#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */
-#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
+#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */
#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */
@@ -1035,6 +1072,7 @@
/* Precision Time Measurement */
#define PCI_PTM_CAP 0x04 /* PTM Capability */
#define PCI_PTM_CAP_REQ 0x00000001 /* Requester capable */
+#define PCI_PTM_CAP_RES 0x00000002 /* Responder capable */
#define PCI_PTM_CAP_ROOT 0x00000004 /* Root capable */
#define PCI_PTM_GRANULARITY_MASK 0x0000FF00 /* Clock granularity */
#define PCI_PTM_CTRL 0x08 /* PTM Control */
@@ -1056,11 +1094,22 @@
#define PCI_L1SS_CTL1_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Enable */
#define PCI_L1SS_CTL1_ASPM_L1_2 0x00000004 /* ASPM L1.2 Enable */
#define PCI_L1SS_CTL1_ASPM_L1_1 0x00000008 /* ASPM L1.1 Enable */
+#define PCI_L1SS_CTL1_L1_2_MASK 0x00000005
#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000f
#define PCI_L1SS_CTL1_CM_RESTORE_TIME 0x0000ff00 /* Common_Mode_Restore_Time */
#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */
#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
+#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */
+#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */
+
+/* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */
+#define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */
+#define PCI_DVSEC_HEADER1_VID(x) ((x) & 0xffff)
+#define PCI_DVSEC_HEADER1_REV(x) (((x) >> 16) & 0xf)
+#define PCI_DVSEC_HEADER1_LEN(x) (((x) >> 20) & 0xfff)
+#define PCI_DVSEC_HEADER2 0x8 /* Designated Vendor-Specific Header2 */
+#define PCI_DVSEC_HEADER2_ID(x) ((x) & 0xffff)
/* Data Link Feature */
#define PCI_DLF_CAP 0x04 /* Capabilities Register */
@@ -1072,4 +1121,31 @@
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+/* Data Object Exchange */
+#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */
+#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */
+#define PCI_DOE_CAP_INT_MSG_NUM 0x00000ffe /* Interrupt Message Number */
+#define PCI_DOE_CTRL 0x08 /* DOE Control Register */
+#define PCI_DOE_CTRL_ABORT 0x00000001 /* DOE Abort */
+#define PCI_DOE_CTRL_INT_EN 0x00000002 /* DOE Interrupt Enable */
+#define PCI_DOE_CTRL_GO 0x80000000 /* DOE Go */
+#define PCI_DOE_STATUS 0x0c /* DOE Status Register */
+#define PCI_DOE_STATUS_BUSY 0x00000001 /* DOE Busy */
+#define PCI_DOE_STATUS_INT_STATUS 0x00000002 /* DOE Interrupt Status */
+#define PCI_DOE_STATUS_ERROR 0x00000004 /* DOE Error */
+#define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */
+#define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */
+#define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */
+#define PCI_DOE_CAP_SIZEOF 0x18 /* Size of DOE register block */
+
+/* DOE Data Object - note not actually registers */
+#define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_HEADER_1_TYPE 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH 0x0003ffff
+
+#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index c3ab4c826297..94b46b043b53 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/**
+/*
* pcitest.h - PCI test uapi defines
*
* Copyright (C) 2017 Texas Instruments
@@ -11,7 +11,8 @@
#define __UAPI_LINUX_PCITEST_H
#define PCITEST_BAR _IO('P', 0x1)
-#define PCITEST_LEGACY_IRQ _IO('P', 0x2)
+#define PCITEST_INTX_IRQ _IO('P', 0x2)
+#define PCITEST_LEGACY_IRQ PCITEST_INTX_IRQ
#define PCITEST_MSI _IOW('P', 0x3, int)
#define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
#define PCITEST_READ _IOW('P', 0x5, unsigned long)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 077e7ee69e3d..3a64499b0f5d 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -38,6 +38,21 @@ enum perf_type_id {
};
/*
+ * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
+ * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
+ * AA: hardware event ID
+ * EEEEEEEE: PMU type ID
+ * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
+ * BB: hardware cache ID
+ * CC: hardware cache op ID
+ * DD: hardware cache op result ID
+ * EEEEEEEE: PMU type ID
+ * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
+ */
+#define PERF_PMU_TYPE_SHIFT 32
+#define PERF_HW_EVENT_MASK 0xffffffff
+
+/*
* Generalized performance event event_id types, used by the
* attr.event_id parameter of the sys_perf_event_open()
* syscall:
@@ -112,6 +127,7 @@ enum perf_sw_ids {
PERF_COUNT_SW_EMULATION_FAULTS = 8,
PERF_COUNT_SW_DUMMY = 9,
PERF_COUNT_SW_BPF_OUTPUT = 10,
+ PERF_COUNT_SW_CGROUP_SWITCHES = 11,
PERF_COUNT_SW_MAX, /* non-ABI */
};
@@ -143,12 +159,14 @@ enum perf_event_sample_format {
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_AUX = 1U << 20,
PERF_SAMPLE_CGROUP = 1U << 21,
+ PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22,
+ PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23,
+ PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24,
- PERF_SAMPLE_MAX = 1U << 22, /* non-ABI */
-
- __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
+ PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */
};
+#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
/*
* values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
*
@@ -184,6 +202,10 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */
+ PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
+
+ PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -213,6 +235,10 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
+ PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+
+ PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -231,9 +257,50 @@ enum {
PERF_BR_SYSRET = 8, /* syscall return */
PERF_BR_COND_CALL = 9, /* conditional function call */
PERF_BR_COND_RET = 10, /* conditional function return */
+ PERF_BR_ERET = 11, /* exception return */
+ PERF_BR_IRQ = 12, /* irq */
+ PERF_BR_SERROR = 13, /* system error */
+ PERF_BR_NO_TX = 14, /* not in transaction */
+ PERF_BR_EXTEND_ABI = 15, /* extend ABI */
PERF_BR_MAX,
};
+/*
+ * Common branch speculation outcome classification
+ */
+enum {
+ PERF_BR_SPEC_NA = 0, /* Not available */
+ PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */
+ PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */
+ PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */
+ PERF_BR_SPEC_MAX,
+};
+
+enum {
+ PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */
+ PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */
+ PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */
+ PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */
+ PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */
+ PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */
+ PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */
+ PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */
+ PERF_BR_NEW_MAX,
+};
+
+enum {
+ PERF_BR_PRIV_UNKNOWN = 0,
+ PERF_BR_PRIV_USER = 1,
+ PERF_BR_PRIV_KERNEL = 2,
+ PERF_BR_PRIV_HV = 3,
+};
+
+#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
+#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
+#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
+#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
+#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
+
#define PERF_SAMPLE_BRANCH_PLM_ALL \
(PERF_SAMPLE_BRANCH_USER|\
PERF_SAMPLE_BRANCH_KERNEL|\
@@ -279,6 +346,7 @@ enum {
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } && !PERF_FORMAT_GROUP
*
* { u64 nr;
@@ -286,6 +354,7 @@ enum {
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 value;
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } cntr[nr];
* } && PERF_FORMAT_GROUP
* };
@@ -295,8 +364,9 @@ enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,
+ PERF_FORMAT_LOST = 1U << 4,
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
+ PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -307,6 +377,8 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
+#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */
+#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -384,7 +456,11 @@ struct perf_event_attr {
aux_output : 1, /* generate AUX records instead of events */
cgroup : 1, /* include cgroup events */
text_poke : 1, /* include text poke events */
- __reserved_1 : 30;
+ build_id : 1, /* use build id in mmap2 events */
+ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */
+ remove_on_exec : 1, /* event is removed from task on exec */
+ sigtrap : 1, /* send synchronous SIGTRAP on event */
+ __reserved_1 : 26;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -436,6 +512,16 @@ struct perf_event_attr {
__u16 __reserved_2;
__u32 aux_sample_size;
__u32 __reserved_3;
+
+ /*
+ * User provided data if sigtrap=1, passed back to user via
+ * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+ * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+ * truncated accordingly on 32 bit architectures.
+ */
+ __u64 sig_data;
+
+ __u64 config3; /* extension of config2 */
};
/*
@@ -456,7 +542,7 @@ struct perf_event_query_bpf {
/*
* User provided buffer to store program ids
*/
- __u32 ids[0];
+ __u32 ids[];
};
/*
@@ -657,6 +743,22 @@ struct perf_event_mmap_page {
__u64 aux_size;
};
+/*
+ * The current state of perf_event_header::misc bits usage:
+ * ('|' used bit, '-' unused bit)
+ *
+ * 012 CDEF
+ * |||---------||||
+ *
+ * Where:
+ * 0-2 CPUMODE_MASK
+ *
+ * C PROC_MAP_PARSE_TIMEOUT
+ * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
+ * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
+ * F (reserved)
+ */
+
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
#define PERF_RECORD_MISC_KERNEL (1 << 0)
@@ -688,6 +790,7 @@ struct perf_event_mmap_page {
*
* PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
+ * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event
*
*
* PERF_RECORD_MISC_EXACT_IP:
@@ -697,9 +800,13 @@ struct perf_event_mmap_page {
*
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
* Indicates that thread was preempted in TASK_RUNNING state.
+ *
+ * PERF_RECORD_MISC_MMAP_BUILD_ID:
+ * Indicates that mmap2 event carries build id data.
*/
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
+#define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14)
/*
* Reserve the last bit to indicate some extended misc field
*/
@@ -879,6 +986,12 @@ enum perf_event_type {
* { u64 nr;
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
* { u64 from, to, flags } lbr[nr];
+ * #
+ * # The format of the counters is decided by the
+ * # "branch_counter_nr" and "branch_counter_width",
+ * # which are defined in the ABI.
+ * #
+ * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
* } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
@@ -888,7 +1001,24 @@ enum perf_event_type {
* char data[size];
* u64 dyn_size; } && PERF_SAMPLE_STACK_USER
*
- * { u64 weight; } && PERF_SAMPLE_WEIGHT
+ * { union perf_sample_weight
+ * {
+ * u64 full; && PERF_SAMPLE_WEIGHT
+ * #if defined(__LITTLE_ENDIAN_BITFIELD)
+ * struct {
+ * u32 var1_dw;
+ * u16 var2_w;
+ * u16 var3_w;
+ * } && PERF_SAMPLE_WEIGHT_STRUCT
+ * #elif defined(__BIG_ENDIAN_BITFIELD)
+ * struct {
+ * u16 var3_w;
+ * u16 var2_w;
+ * u32 var1_dw;
+ * } && PERF_SAMPLE_WEIGHT_STRUCT
+ * #endif
+ * }
+ * }
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
* { u64 transaction; } && PERF_SAMPLE_TRANSACTION
* { u64 abi; # enum perf_sample_regs_abi
@@ -896,6 +1026,8 @@ enum perf_event_type {
* { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
* { u64 size;
* char data[size]; } && PERF_SAMPLE_AUX
+ * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
+ * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
* };
*/
PERF_RECORD_SAMPLE = 9,
@@ -911,10 +1043,20 @@ enum perf_event_type {
* u64 addr;
* u64 len;
* u64 pgoff;
- * u32 maj;
- * u32 min;
- * u64 ino;
- * u64 ino_generation;
+ * union {
+ * struct {
+ * u32 maj;
+ * u32 min;
+ * u64 ino;
+ * u64 ino_generation;
+ * };
+ * struct {
+ * u8 build_id_size;
+ * u8 __reserved_1;
+ * u16 __reserved_2;
+ * u8 build_id[20];
+ * };
+ * };
* u32 prot, flags;
* char filename[];
* struct sample_id sample_id;
@@ -1060,6 +1202,21 @@ enum perf_event_type {
*/
PERF_RECORD_TEXT_POKE = 20,
+ /*
+ * Data written to the AUX area by hardware due to aux_output, may need
+ * to be matched to the event by an architecture-specific hardware ID.
+ * This records the hardware ID, but requires sample_id to provide the
+ * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
+ * records from multiple events.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 hw_id;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -1101,10 +1258,15 @@ enum perf_callchain_context {
/**
* PERF_RECORD_AUX::flags bits
*/
-#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
-#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
-#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
-#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
+#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
+#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */
+
+/* CoreSight PMU AUX buffer formats */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
@@ -1123,14 +1285,18 @@ union perf_mem_data_src {
mem_lvl_num:4, /* memory hierarchy level number */
mem_remote:1, /* remote */
mem_snoopx:2, /* snoop mode, ext */
- mem_rsvd:24;
+ mem_blk:3, /* access blocked */
+ mem_hops:3, /* hop level */
+ mem_rsvd:18;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd:24,
+ __u64 mem_rsvd:18,
+ mem_hops:3, /* hop level */
+ mem_blk:3, /* access blocked */
mem_snoopx:2, /* snoop mode, ext */
mem_remote:1, /* remote */
mem_lvl_num:4, /* memory hierarchy level number */
@@ -1153,7 +1319,13 @@ union perf_mem_data_src {
#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
#define PERF_MEM_OP_SHIFT 0
-/* memory hierarchy (memory level, hit or miss) */
+/*
+ * PERF_MEM_LVL_* namespace being depricated to some extent in the
+ * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
+ * Supporting this namespace inorder to not break defined ABIs.
+ *
+ * memory hierarchy (memory level, hit or miss)
+ */
#define PERF_MEM_LVL_NA 0x01 /* not available */
#define PERF_MEM_LVL_HIT 0x02 /* hit level */
#define PERF_MEM_LVL_MISS 0x04 /* miss level */
@@ -1177,7 +1349,10 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
-/* 5-0xa available */
+/* 5-0x7 available */
+#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
+#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
+#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
@@ -1195,8 +1370,8 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOP_SHIFT 19
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
-/* 1 free */
-#define PERF_MEM_SNOOPX_SHIFT 37
+#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */
+#define PERF_MEM_SNOOPX_SHIFT 38
/* locked instruction */
#define PERF_MEM_LOCK_NA 0x01 /* not available */
@@ -1213,6 +1388,20 @@ union perf_mem_data_src {
#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
#define PERF_MEM_TLB_SHIFT 26
+/* Access blocked */
+#define PERF_MEM_BLK_NA 0x01 /* not available */
+#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */
+#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */
+#define PERF_MEM_BLK_SHIFT 40
+
+/* hop level */
+#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
+#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
+#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
+#define PERF_MEM_HOPS_3 0x04 /* remote board */
+/* 5-7 available */
+#define PERF_MEM_HOPS_SHIFT 43
+
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
@@ -1231,6 +1420,7 @@ union perf_mem_data_src {
* abort: aborting a hardware transaction
* cycles: cycles from last branch (or 0 if not supported)
* type: branch type
+ * spec: branch speculation info (or 0 if not supported)
*/
struct perf_branch_entry {
__u64 from;
@@ -1241,7 +1431,32 @@ struct perf_branch_entry {
abort:1, /* transaction abort */
cycles:16, /* cycle count to last branch */
type:4, /* branch type */
- reserved:40;
+ spec:2, /* branch speculation info */
+ new_type:4, /* additional branch type */
+ priv:3, /* privilege level */
+ reserved:31;
+};
+
+/* Size of used info bits in struct perf_branch_entry */
+#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33
+
+union perf_sample_weight {
+ __u64 full;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ struct {
+ __u32 var1_dw;
+ __u16 var2_w;
+ __u16 var3_w;
+ };
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ struct {
+ __u16 var3_w;
+ __u16 var2_w;
+ __u32 var1_dw;
+ };
+#else
+#error "Unknown endianness"
+#endif
};
#endif /* _UAPI_LINUX_PERF_EVENT_H */
diff --git a/include/uapi/linux/pfkeyv2.h b/include/uapi/linux/pfkeyv2.h
index d65b11785260..8abae1f6749c 100644
--- a/include/uapi/linux/pfkeyv2.h
+++ b/include/uapi/linux/pfkeyv2.h
@@ -309,6 +309,7 @@ struct sadb_x_filter {
#define SADB_X_AALG_SHA2_512HMAC 7
#define SADB_X_AALG_RIPEMD160HMAC 8
#define SADB_X_AALG_AES_XCBC_MAC 9
+#define SADB_X_AALG_SM3_256HMAC 10
#define SADB_X_AALG_NULL 251 /* kame */
#define SADB_AALG_MAX 251
@@ -329,6 +330,7 @@ struct sadb_x_filter {
#define SADB_X_EALG_AES_GCM_ICV16 20
#define SADB_X_EALG_CAMELLIACBC 22
#define SADB_X_EALG_NULL_AES_GMAC 23
+#define SADB_X_EALG_SM4CBC 24
#define SADB_EALG_MAX 253 /* last EALG */
/* private allocations should use 249-255 (RFC2407) */
#define SADB_X_EALG_SERPENTCBC 252 /* draft-ietf-ipsec-ciph-aes-cbc-00 */
diff --git a/include/uapi/linux/pfrut.h b/include/uapi/linux/pfrut.h
new file mode 100644
index 000000000000..42fa15f8310d
--- /dev/null
+++ b/include/uapi/linux/pfrut.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Platform Firmware Runtime Update header
+ *
+ * Copyright(c) 2021 Intel Corporation. All rights reserved.
+ */
+#ifndef __PFRUT_H__
+#define __PFRUT_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define PFRUT_IOCTL_MAGIC 0xEE
+
+/**
+ * PFRU_IOC_SET_REV - _IOW(PFRUT_IOCTL_MAGIC, 0x01, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EFAULT - fail to read the revision id
+ * * -EINVAL - user provides an invalid revision id
+ *
+ * Set the Revision ID for Platform Firmware Runtime Update.
+ */
+#define PFRU_IOC_SET_REV _IOW(PFRUT_IOCTL_MAGIC, 0x01, unsigned int)
+
+/**
+ * PFRU_IOC_STAGE - _IOW(PFRUT_IOCTL_MAGIC, 0x02, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - stage phase returns invalid result
+ *
+ * Stage a capsule image from communication buffer and perform authentication.
+ */
+#define PFRU_IOC_STAGE _IOW(PFRUT_IOCTL_MAGIC, 0x02, unsigned int)
+
+/**
+ * PFRU_IOC_ACTIVATE - _IOW(PFRUT_IOCTL_MAGIC, 0x03, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - activate phase returns invalid result
+ *
+ * Activate a previously staged capsule image.
+ */
+#define PFRU_IOC_ACTIVATE _IOW(PFRUT_IOCTL_MAGIC, 0x03, unsigned int)
+
+/**
+ * PFRU_IOC_STAGE_ACTIVATE - _IOW(PFRUT_IOCTL_MAGIC, 0x04, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - stage/activate phase returns invalid result.
+ *
+ * Perform both stage and activation action.
+ */
+#define PFRU_IOC_STAGE_ACTIVATE _IOW(PFRUT_IOCTL_MAGIC, 0x04, unsigned int)
+
+/**
+ * PFRU_IOC_QUERY_CAP - _IOR(PFRUT_IOCTL_MAGIC, 0x05,
+ * struct pfru_update_cap_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - query phase returns invalid result
+ * * -EFAULT - the result fails to be copied to userspace
+ *
+ * Retrieve information on the Platform Firmware Runtime Update capability.
+ * The information is a struct pfru_update_cap_info.
+ */
+#define PFRU_IOC_QUERY_CAP _IOR(PFRUT_IOCTL_MAGIC, 0x05, struct pfru_update_cap_info)
+
+/**
+ * struct pfru_payload_hdr - Capsule file payload header.
+ *
+ * @sig: Signature of this capsule file.
+ * @hdr_version: Revision of this header structure.
+ * @hdr_size: Size of this header, including the OemHeader bytes.
+ * @hw_ver: The supported firmware version.
+ * @rt_ver: Version of the code injection image.
+ * @platform_id: A platform specific GUID to specify the platform what
+ * this capsule image support.
+ */
+struct pfru_payload_hdr {
+ __u32 sig;
+ __u32 hdr_version;
+ __u32 hdr_size;
+ __u32 hw_ver;
+ __u32 rt_ver;
+ __u8 platform_id[16];
+};
+
+enum pfru_dsm_status {
+ DSM_SUCCEED = 0,
+ DSM_FUNC_NOT_SUPPORT = 1,
+ DSM_INVAL_INPUT = 2,
+ DSM_HARDWARE_ERR = 3,
+ DSM_RETRY_SUGGESTED = 4,
+ DSM_UNKNOWN = 5,
+ DSM_FUNC_SPEC_ERR = 6,
+};
+
+/**
+ * struct pfru_update_cap_info - Runtime update capability information.
+ *
+ * @status: Indicator of whether this query succeed.
+ * @update_cap: Bitmap to indicate whether the feature is supported.
+ * @code_type: A buffer containing an image type GUID.
+ * @fw_version: Platform firmware version.
+ * @code_rt_version: Code injection runtime version for anti-rollback.
+ * @drv_type: A buffer containing an image type GUID.
+ * @drv_rt_version: The version of the driver update runtime code.
+ * @drv_svn: The secure version number(SVN) of the driver update runtime code.
+ * @platform_id: A buffer containing a platform ID GUID.
+ * @oem_id: A buffer containing an OEM ID GUID.
+ * @oem_info_len: Length of the buffer containing the vendor specific information.
+ */
+struct pfru_update_cap_info {
+ __u32 status;
+ __u32 update_cap;
+
+ __u8 code_type[16];
+ __u32 fw_version;
+ __u32 code_rt_version;
+
+ __u8 drv_type[16];
+ __u32 drv_rt_version;
+ __u32 drv_svn;
+
+ __u8 platform_id[16];
+ __u8 oem_id[16];
+
+ __u32 oem_info_len;
+};
+
+/**
+ * struct pfru_com_buf_info - Communication buffer information.
+ *
+ * @status: Indicator of whether this query succeed.
+ * @ext_status: Implementation specific query result.
+ * @addr_lo: Low 32bit physical address of the communication buffer to hold
+ * a runtime update package.
+ * @addr_hi: High 32bit physical address of the communication buffer to hold
+ * a runtime update package.
+ * @buf_size: Maximum size in bytes of the communication buffer.
+ */
+struct pfru_com_buf_info {
+ __u32 status;
+ __u32 ext_status;
+ __u64 addr_lo;
+ __u64 addr_hi;
+ __u32 buf_size;
+};
+
+/**
+ * struct pfru_updated_result - Platform firmware runtime update result information.
+ * @status: Indicator of whether this update succeed.
+ * @ext_status: Implementation specific update result.
+ * @low_auth_time: Low 32bit value of image authentication time in nanosecond.
+ * @high_auth_time: High 32bit value of image authentication time in nanosecond.
+ * @low_exec_time: Low 32bit value of image execution time in nanosecond.
+ * @high_exec_time: High 32bit value of image execution time in nanosecond.
+ */
+struct pfru_updated_result {
+ __u32 status;
+ __u32 ext_status;
+ __u64 low_auth_time;
+ __u64 high_auth_time;
+ __u64 low_exec_time;
+ __u64 high_exec_time;
+};
+
+/**
+ * struct pfrt_log_data_info - Log Data from telemetry service.
+ * @status: Indicator of whether this update succeed.
+ * @ext_status: Implementation specific update result.
+ * @chunk1_addr_lo: Low 32bit physical address of the telemetry data chunk1
+ * starting address.
+ * @chunk1_addr_hi: High 32bit physical address of the telemetry data chunk1
+ * starting address.
+ * @chunk2_addr_lo: Low 32bit physical address of the telemetry data chunk2
+ * starting address.
+ * @chunk2_addr_hi: High 32bit physical address of the telemetry data chunk2
+ * starting address.
+ * @max_data_size: Maximum supported size of data of all data chunks combined.
+ * @chunk1_size: Data size in bytes of the telemetry data chunk1 buffer.
+ * @chunk2_size: Data size in bytes of the telemetry data chunk2 buffer.
+ * @rollover_cnt: Number of times telemetry data buffer is overwritten
+ * since telemetry buffer reset.
+ * @reset_cnt: Number of times telemetry services resets that results in
+ * rollover count and data chunk buffers are reset.
+ */
+struct pfrt_log_data_info {
+ __u32 status;
+ __u32 ext_status;
+ __u64 chunk1_addr_lo;
+ __u64 chunk1_addr_hi;
+ __u64 chunk2_addr_lo;
+ __u64 chunk2_addr_hi;
+ __u32 max_data_size;
+ __u32 chunk1_size;
+ __u32 chunk2_size;
+ __u32 rollover_cnt;
+ __u32 reset_cnt;
+};
+
+/**
+ * struct pfrt_log_info - Telemetry log information.
+ * @log_level: The telemetry log level.
+ * @log_type: The telemetry log type(history and execution).
+ * @log_revid: The telemetry log revision id.
+ */
+struct pfrt_log_info {
+ __u32 log_level;
+ __u32 log_type;
+ __u32 log_revid;
+};
+
+/**
+ * PFRT_LOG_IOC_SET_INFO - _IOW(PFRUT_IOCTL_MAGIC, 0x06,
+ * struct pfrt_log_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EFAULT - fail to get the setting parameter
+ * * -EINVAL - fail to set the log level
+ *
+ * Set the PFRT log level and log type. The input information is
+ * a struct pfrt_log_info.
+ */
+#define PFRT_LOG_IOC_SET_INFO _IOW(PFRUT_IOCTL_MAGIC, 0x06, struct pfrt_log_info)
+
+/**
+ * PFRT_LOG_IOC_GET_INFO - _IOR(PFRUT_IOCTL_MAGIC, 0x07,
+ * struct pfrt_log_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - fail to get the log level
+ * * -EFAULT - fail to copy the result back to userspace
+ *
+ * Retrieve log level and log type of the telemetry. The information is
+ * a struct pfrt_log_info.
+ */
+#define PFRT_LOG_IOC_GET_INFO _IOR(PFRUT_IOCTL_MAGIC, 0x07, struct pfrt_log_info)
+
+/**
+ * PFRT_LOG_IOC_GET_DATA_INFO - _IOR(PFRUT_IOCTL_MAGIC, 0x08,
+ * struct pfrt_log_data_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - fail to get the log buffer information
+ * * -EFAULT - fail to copy the log buffer information to userspace
+ *
+ * Retrieve data information about the telemetry. The information
+ * is a struct pfrt_log_data_info.
+ */
+#define PFRT_LOG_IOC_GET_DATA_INFO _IOR(PFRUT_IOCTL_MAGIC, 0x08, struct pfrt_log_data_info)
+
+#endif /* __PFRUT_H__ */
diff --git a/include/uapi/linux/pidfd.h b/include/uapi/linux/pidfd.h
new file mode 100644
index 000000000000..72ec000a97cd
--- /dev/null
+++ b/include/uapi/linux/pidfd.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_PIDFD_H
+#define _UAPI_LINUX_PIDFD_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+
+/* Flags for pidfd_open(). */
+#define PIDFD_NONBLOCK O_NONBLOCK
+#define PIDFD_THREAD O_EXCL
+
+/* Flags for pidfd_send_signal(). */
+#define PIDFD_SIGNAL_THREAD (1UL << 0)
+#define PIDFD_SIGNAL_THREAD_GROUP (1UL << 1)
+#define PIDFD_SIGNAL_PROCESS_GROUP (1UL << 2)
+
+#endif /* _UAPI_LINUX_PIDFD_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index ee95f42fb0ec..ea277039f89d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -19,12 +19,16 @@ enum {
TCA_ACT_FLAGS,
TCA_ACT_HW_STATS,
TCA_ACT_USED_HW_STATS,
+ TCA_ACT_IN_HW_COUNT,
__TCA_ACT_MAX
};
-#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
- * actions stats.
- */
+/* See other TCA_ACT_FLAGS_ * flags in include/net/act_api.h. */
+#define TCA_ACT_FLAGS_NO_PERCPU_STATS (1 << 0) /* Don't use percpu allocator for
+ * actions stats.
+ */
+#define TCA_ACT_FLAGS_SKIP_HW (1 << 1) /* don't offload action to HW */
+#define TCA_ACT_FLAGS_SKIP_SW (1 << 2) /* don't use action in SW */
/* tca HW stats type
* When user does not pass the attribute, he does not care.
@@ -95,7 +99,7 @@ enum {
* versions.
*/
#define TCA_ACT_GACT 5
-#define TCA_ACT_IPT 6
+#define TCA_ACT_IPT 6 /* obsoleted, can be reused */
#define TCA_ACT_PEDIT 7
#define TCA_ACT_MIRRED 8
#define TCA_ACT_NAT 9
@@ -116,7 +120,7 @@ enum tca_id {
TCA_ID_UNSPEC = 0,
TCA_ID_POLICE = 1,
TCA_ID_GACT = TCA_ACT_GACT,
- TCA_ID_IPT = TCA_ACT_IPT,
+ TCA_ID_IPT = TCA_ACT_IPT, /* Obsoleted, can be reused */
TCA_ID_PEDIT = TCA_ACT_PEDIT,
TCA_ID_MIRRED = TCA_ACT_MIRRED,
TCA_ID_NAT = TCA_ACT_NAT,
@@ -190,6 +194,8 @@ enum {
TCA_POLICE_PAD,
TCA_POLICE_RATE64,
TCA_POLICE_PEAKRATE64,
+ TCA_POLICE_PKTRATE64,
+ TCA_POLICE_PKTBURST64,
__TCA_POLICE_MAX
#define TCA_POLICE_RESULT TCA_POLICE_RESULT
};
@@ -250,7 +256,7 @@ struct tc_u32_sel {
short hoff;
__be32 hmask;
- struct tc_u32_key keys[0];
+ struct tc_u32_key keys[];
};
struct tc_u32_mark {
@@ -262,7 +268,7 @@ struct tc_u32_mark {
struct tc_u32_pcnt {
__u64 rcnt;
__u64 rhit;
- __u64 kcnts[0];
+ __u64 kcnts[];
};
/* Flags */
@@ -274,37 +280,6 @@ struct tc_u32_pcnt {
#define TC_U32_MAXDEPTH 8
-
-/* RSVP filter */
-
-enum {
- TCA_RSVP_UNSPEC,
- TCA_RSVP_CLASSID,
- TCA_RSVP_DST,
- TCA_RSVP_SRC,
- TCA_RSVP_PINFO,
- TCA_RSVP_POLICE,
- TCA_RSVP_ACT,
- __TCA_RSVP_MAX
-};
-
-#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
-
-struct tc_rsvp_gpi {
- __u32 key;
- __u32 mask;
- int offset;
-};
-
-struct tc_rsvp_pinfo {
- struct tc_rsvp_gpi dpi;
- struct tc_rsvp_gpi spi;
- __u8 protocol;
- __u8 tunnelid;
- __u8 tunnelhdr;
- __u8 pad;
-};
-
/* ROUTE filter */
enum {
@@ -335,22 +310,6 @@ enum {
#define TCA_FW_MAX (__TCA_FW_MAX - 1)
-/* TC index filter */
-
-enum {
- TCA_TCINDEX_UNSPEC,
- TCA_TCINDEX_HASH,
- TCA_TCINDEX_MASK,
- TCA_TCINDEX_SHIFT,
- TCA_TCINDEX_FALL_THROUGH,
- TCA_TCINDEX_CLASSID,
- TCA_TCINDEX_POLICE,
- TCA_TCINDEX_ACT,
- __TCA_TCINDEX_MAX
-};
-
-#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1)
-
/* Flow filter */
enum {
@@ -581,6 +540,20 @@ enum {
TCA_FLOWER_KEY_HASH, /* u32 */
TCA_FLOWER_KEY_HASH_MASK, /* u32 */
+ TCA_FLOWER_KEY_NUM_OF_VLANS, /* u8 */
+
+ TCA_FLOWER_KEY_PPPOE_SID, /* be16 */
+ TCA_FLOWER_KEY_PPP_PROTO, /* be16 */
+
+ TCA_FLOWER_KEY_L2TPV3_SID, /* be32 */
+
+ TCA_FLOWER_L2_MISS, /* u8 */
+
+ TCA_FLOWER_KEY_CFM, /* nested */
+
+ TCA_FLOWER_KEY_SPI, /* be32 */
+ TCA_FLOWER_KEY_SPI_MASK, /* be32 */
+
__TCA_FLOWER_MAX,
};
@@ -591,6 +564,9 @@ enum {
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, /* Part of an existing connection. */
TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, /* Related to an established connection. */
TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, /* Conntrack has occurred. */
+ TCA_FLOWER_KEY_CT_FLAGS_INVALID = 1 << 4, /* Conntrack is invalid. */
+ TCA_FLOWER_KEY_CT_FLAGS_REPLY = 1 << 5, /* Packet is in the reply direction. */
+ __TCA_FLOWER_KEY_CT_FLAGS_MAX,
};
enum {
@@ -607,6 +583,10 @@ enum {
* TCA_FLOWER_KEY_ENC_OPT_ERSPAN_
* attributes
*/
+ TCA_FLOWER_KEY_ENC_OPTS_GTP, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_GTP_
+ * attributes
+ */
__TCA_FLOWER_KEY_ENC_OPTS_MAX,
};
@@ -646,6 +626,17 @@ enum {
(__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1)
enum {
+ TCA_FLOWER_KEY_ENC_OPT_GTP_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, /* u8 */
+
+ __TCA_FLOWER_KEY_ENC_OPT_GTP_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_GTP_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX - 1)
+
+enum {
TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC,
TCA_FLOWER_KEY_MPLS_OPTS_LSE,
__TCA_FLOWER_KEY_MPLS_OPTS_MAX,
@@ -671,6 +662,15 @@ enum {
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
+enum {
+ TCA_FLOWER_KEY_CFM_OPT_UNSPEC,
+ TCA_FLOWER_KEY_CFM_MD_LEVEL,
+ TCA_FLOWER_KEY_CFM_OPCODE,
+ __TCA_FLOWER_KEY_CFM_OPT_MAX,
+};
+
+#define TCA_FLOWER_KEY_CFM_OPT_MAX (__TCA_FLOWER_KEY_CFM_OPT_MAX - 1)
+
#define TCA_FLOWER_MASK_FLAGS_RANGE (1 << 0) /* Range-based match */
/* Match-all classifier */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 9e7c2c607845..a3cd0c2dc995 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -434,6 +434,7 @@ enum {
TCA_HTB_RATE64,
TCA_HTB_CEIL64,
TCA_HTB_PAD,
+ TCA_HTB_OFFLOAD,
__TCA_HTB_MAX,
};
@@ -476,115 +477,6 @@ enum {
#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
-
-/* CBQ section */
-
-#define TC_CBQ_MAXPRIO 8
-#define TC_CBQ_MAXLEVEL 8
-#define TC_CBQ_DEF_EWMA 5
-
-struct tc_cbq_lssopt {
- unsigned char change;
- unsigned char flags;
-#define TCF_CBQ_LSS_BOUNDED 1
-#define TCF_CBQ_LSS_ISOLATED 2
- unsigned char ewma_log;
- unsigned char level;
-#define TCF_CBQ_LSS_FLAGS 1
-#define TCF_CBQ_LSS_EWMA 2
-#define TCF_CBQ_LSS_MAXIDLE 4
-#define TCF_CBQ_LSS_MINIDLE 8
-#define TCF_CBQ_LSS_OFFTIME 0x10
-#define TCF_CBQ_LSS_AVPKT 0x20
- __u32 maxidle;
- __u32 minidle;
- __u32 offtime;
- __u32 avpkt;
-};
-
-struct tc_cbq_wrropt {
- unsigned char flags;
- unsigned char priority;
- unsigned char cpriority;
- unsigned char __reserved;
- __u32 allot;
- __u32 weight;
-};
-
-struct tc_cbq_ovl {
- unsigned char strategy;
-#define TC_CBQ_OVL_CLASSIC 0
-#define TC_CBQ_OVL_DELAY 1
-#define TC_CBQ_OVL_LOWPRIO 2
-#define TC_CBQ_OVL_DROP 3
-#define TC_CBQ_OVL_RCLASSIC 4
- unsigned char priority2;
- __u16 pad;
- __u32 penalty;
-};
-
-struct tc_cbq_police {
- unsigned char police;
- unsigned char __res1;
- unsigned short __res2;
-};
-
-struct tc_cbq_fopt {
- __u32 split;
- __u32 defmap;
- __u32 defchange;
-};
-
-struct tc_cbq_xstats {
- __u32 borrows;
- __u32 overactions;
- __s32 avgidle;
- __s32 undertime;
-};
-
-enum {
- TCA_CBQ_UNSPEC,
- TCA_CBQ_LSSOPT,
- TCA_CBQ_WRROPT,
- TCA_CBQ_FOPT,
- TCA_CBQ_OVL_STRATEGY,
- TCA_CBQ_RATE,
- TCA_CBQ_RTAB,
- TCA_CBQ_POLICE,
- __TCA_CBQ_MAX,
-};
-
-#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
-
-/* dsmark section */
-
-enum {
- TCA_DSMARK_UNSPEC,
- TCA_DSMARK_INDICES,
- TCA_DSMARK_DEFAULT_INDEX,
- TCA_DSMARK_SET_TC_INDEX,
- TCA_DSMARK_MASK,
- TCA_DSMARK_VALUE,
- __TCA_DSMARK_MAX,
-};
-
-#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
-
-/* ATM section */
-
-enum {
- TCA_ATM_UNSPEC,
- TCA_ATM_FD, /* file/socket descriptor */
- TCA_ATM_PTR, /* pointer to descriptor - later */
- TCA_ATM_HDR, /* LL header */
- TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
- TCA_ATM_ADDR, /* PVC address (for output only) */
- TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
- __TCA_ATM_MAX,
-};
-
-#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
-
/* Network emulator */
enum {
@@ -602,6 +494,7 @@ enum {
TCA_NETEM_JITTER64,
TCA_NETEM_SLOT,
TCA_NETEM_SLOT_DIST,
+ TCA_NETEM_PRNG_SEED,
__TCA_NETEM_MAX,
};
@@ -718,6 +611,11 @@ enum {
#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
+enum {
+ TC_FP_EXPRESS = 1,
+ TC_FP_PREEMPTIBLE = 2,
+};
+
struct tc_mqprio_qopt {
__u8 num_tc;
__u8 prio_tc_map[TC_QOPT_BITMASK + 1];
@@ -732,11 +630,22 @@ struct tc_mqprio_qopt {
#define TC_MQPRIO_F_MAX_RATE 0x8
enum {
+ TCA_MQPRIO_TC_ENTRY_UNSPEC,
+ TCA_MQPRIO_TC_ENTRY_INDEX, /* u32 */
+ TCA_MQPRIO_TC_ENTRY_FP, /* u32 */
+
+ /* add new constants above here */
+ __TCA_MQPRIO_TC_ENTRY_CNT,
+ TCA_MQPRIO_TC_ENTRY_MAX = (__TCA_MQPRIO_TC_ENTRY_CNT - 1)
+};
+
+enum {
TCA_MQPRIO_UNSPEC,
TCA_MQPRIO_MODE,
TCA_MQPRIO_SHAPER,
TCA_MQPRIO_MIN_RATE64,
TCA_MQPRIO_MAX_RATE64,
+ TCA_MQPRIO_TC_ENTRY,
__TCA_MQPRIO_MAX,
};
@@ -826,6 +735,8 @@ struct tc_codel_xstats {
/* FQ_CODEL */
+#define FQ_CODEL_QUANTUM_MAX (1 << 20)
+
enum {
TCA_FQ_CODEL_UNSPEC,
TCA_FQ_CODEL_TARGET,
@@ -837,6 +748,8 @@ enum {
TCA_FQ_CODEL_CE_THRESHOLD,
TCA_FQ_CODEL_DROP_BATCH_SIZE,
TCA_FQ_CODEL_MEMORY_LIMIT,
+ TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
+ TCA_FQ_CODEL_CE_THRESHOLD_MASK,
__TCA_FQ_CODEL_MAX
};
@@ -919,15 +832,22 @@ enum {
TCA_FQ_HORIZON_DROP, /* drop packets beyond horizon, or cap their EDT */
+ TCA_FQ_PRIOMAP, /* prio2band */
+
+ TCA_FQ_WEIGHTS, /* Weights for each band */
+
__TCA_FQ_MAX
};
#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
+#define FQ_BANDS 3
+#define FQ_MIN_WEIGHT 16384
+
struct tc_fq_qd_stats {
__u64 gc_flows;
- __u64 highprio_packets;
- __u64 tcp_retrans;
+ __u64 highprio_packets; /* obsolete */
+ __u64 tcp_retrans; /* obsolete */
__u64 throttled;
__u64 flows_plimit;
__u64 pkts_too_long;
@@ -940,6 +860,10 @@ struct tc_fq_qd_stats {
__u64 ce_mark; /* packets above ce_threshold */
__u64 horizon_drops;
__u64 horizon_caps;
+ __u64 fastpath_packets;
+ __u64 band_drops[FQ_BANDS];
+ __u32 band_pkt_count[FQ_BANDS];
+ __u32 pad;
};
/* Heavy-Hitter Filter */
@@ -1228,6 +1152,27 @@ enum {
#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1)
enum {
+ TCA_TAPRIO_TC_ENTRY_UNSPEC,
+ TCA_TAPRIO_TC_ENTRY_INDEX, /* u32 */
+ TCA_TAPRIO_TC_ENTRY_MAX_SDU, /* u32 */
+ TCA_TAPRIO_TC_ENTRY_FP, /* u32 */
+
+ /* add new constants above here */
+ __TCA_TAPRIO_TC_ENTRY_CNT,
+ TCA_TAPRIO_TC_ENTRY_MAX = (__TCA_TAPRIO_TC_ENTRY_CNT - 1)
+};
+
+enum {
+ TCA_TAPRIO_OFFLOAD_STATS_PAD = 1, /* u64 */
+ TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS, /* u64 */
+ TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS, /* u64 */
+
+ /* add new constants above here */
+ __TCA_TAPRIO_OFFLOAD_STATS_CNT,
+ TCA_TAPRIO_OFFLOAD_STATS_MAX = (__TCA_TAPRIO_OFFLOAD_STATS_CNT - 1)
+};
+
+enum {
TCA_TAPRIO_ATTR_UNSPEC,
TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
@@ -1240,6 +1185,7 @@ enum {
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
TCA_TAPRIO_ATTR_FLAGS, /* u32 */
TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
+ TCA_TAPRIO_ATTR_TC_ENTRY, /* nest */
__TCA_TAPRIO_ATTR_MAX,
};
diff --git a/include/uapi/linux/pktcdvd.h b/include/uapi/linux/pktcdvd.h
index 9cbb55d21c94..987a3022dc5f 100644
--- a/include/uapi/linux/pktcdvd.h
+++ b/include/uapi/linux/pktcdvd.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
/*
+ * UNUSED:
* 1 for normal debug messages, 2 is very verbose. 0 to turn it off.
*/
#define PACKET_DEBUG 1
@@ -30,17 +31,6 @@
#define PACKET_WAIT_TIME (HZ * 5 / 1000)
/*
- * use drive write caching -- we need deferred error handling to be
- * able to successfully recover with this option (drive will return good
- * status as soon as the cdb is validated).
- */
-#if defined(CONFIG_CDROM_PKTCDVD_WCACHE)
-#define USE_WCACHING 1
-#else
-#define USE_WCACHING 0
-#endif
-
-/*
* No user-servicable parts beyond this point ->
*/
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index 7bd2a5a75348..1cc5ce0ae062 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -115,6 +115,8 @@ struct pppol2tp_ioc_stats {
#define PPPIOCATTCHAN _IOW('t', 56, int) /* attach to ppp channel */
#define PPPIOCGCHAN _IOR('t', 55, int) /* get ppp channel number */
#define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats)
+#define PPPIOCBRIDGECHAN _IOW('t', 53, int) /* bridge one channel to another */
+#define PPPIOCUNBRIDGECHAN _IO('t', 52) /* unbridge channel */
#define SIOCGPPPSTATS (SIOCDEVPRIVATE + 0)
#define SIOCGPPPVER (SIOCDEVPRIVATE + 1) /* NEVER change this!! */
diff --git a/include/uapi/linux/pr.h b/include/uapi/linux/pr.h
index ccc78cbf1221..d8126415966f 100644
--- a/include/uapi/linux/pr.h
+++ b/include/uapi/linux/pr.h
@@ -4,6 +4,23 @@
#include <linux/types.h>
+enum pr_status {
+ PR_STS_SUCCESS = 0x0,
+ /*
+ * The following error codes are based on SCSI, because the interface
+ * was originally created for it and has existing users.
+ */
+ /* Generic device failure. */
+ PR_STS_IOERR = 0x2,
+ PR_STS_RESERVATION_CONFLICT = 0x18,
+ /* Temporary path failure that can be retried. */
+ PR_STS_RETRY_PATH_FAILURE = 0xe0000,
+ /* The request was failed due to a fast failure timer. */
+ PR_STS_PATH_FAST_FAILED = 0xf0000,
+ /* The path cannot be reached and has been marked as failed. */
+ PR_STS_PATH_FAILED = 0x10000,
+};
+
enum pr_type {
PR_WRITE_EXCLUSIVE = 1,
PR_EXCLUSIVE_ACCESS = 2,
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 07b4f8131e36..370ed14b1ae0 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -213,6 +213,7 @@ struct prctl_mm_map {
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
# define PR_SPEC_INDIRECT_BRANCH 1
+# define PR_SPEC_L1D_FLUSH 2
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
@@ -233,9 +234,76 @@ struct prctl_mm_map {
#define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+/* MTE tag check fault modes */
+# define PR_MTE_TCF_NONE 0UL
+# define PR_MTE_TCF_SYNC (1UL << 1)
+# define PR_MTE_TCF_ASYNC (1UL << 2)
+# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
+/* MTE tag inclusion mask */
+# define PR_MTE_TAG_SHIFT 3
+# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+/* Unused; kept only for source compatibility */
+# define PR_MTE_TCF_SHIFT 1
/* Control reclaim behavior when allocating memory */
#define PR_SET_IO_FLUSHER 57
#define PR_GET_IO_FLUSHER 58
+/* Dispatch syscalls to a userspace handler */
+#define PR_SET_SYSCALL_USER_DISPATCH 59
+# define PR_SYS_DISPATCH_OFF 0
+# define PR_SYS_DISPATCH_ON 1
+/* The control values for the user space selector when dispatch is enabled */
+# define SYSCALL_DISPATCH_FILTER_ALLOW 0
+# define SYSCALL_DISPATCH_FILTER_BLOCK 1
+
+/* Set/get enabled arm64 pointer authentication keys */
+#define PR_PAC_SET_ENABLED_KEYS 60
+#define PR_PAC_GET_ENABLED_KEYS 61
+
+/* Request the scheduler to share a core */
+#define PR_SCHED_CORE 62
+# define PR_SCHED_CORE_GET 0
+# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
+# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
+# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
+# define PR_SCHED_CORE_MAX 4
+# define PR_SCHED_CORE_SCOPE_THREAD 0
+# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
+# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
+
+/* arm64 Scalable Matrix Extension controls */
+/* Flag values must be in sync with SVE versions */
+#define PR_SME_SET_VL 63 /* set task vector length */
+# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
+#define PR_SME_GET_VL 64 /* get task vector length */
+/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */
+# define PR_SME_VL_LEN_MASK 0xffff
+# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */
+
+/* Memory deny write / execute */
+#define PR_SET_MDWE 65
+# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
+# define PR_MDWE_NO_INHERIT (1UL << 1)
+
+#define PR_GET_MDWE 66
+
+#define PR_SET_VMA 0x53564d41
+# define PR_SET_VMA_ANON_NAME 0
+
+#define PR_GET_AUXV 0x41555856
+
+#define PR_SET_MEMORY_MERGE 67
+#define PR_GET_MEMORY_MERGE 68
+
+#define PR_RISCV_V_SET_CONTROL 69
+#define PR_RISCV_V_GET_CONTROL 70
+# define PR_RISCV_V_VSTATE_CTRL_DEFAULT 0
+# define PR_RISCV_V_VSTATE_CTRL_OFF 1
+# define PR_RISCV_V_VSTATE_CTRL_ON 2
+# define PR_RISCV_V_VSTATE_CTRL_INHERIT (1 << 4)
+# define PR_RISCV_V_VSTATE_CTRL_CUR_MASK 0x3
+# define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK 0xc
+# define PR_RISCV_V_VSTATE_CTRL_MASK 0x1f
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h
index aea26ab1431c..e585db5bf2d2 100644
--- a/include/uapi/linux/psample.h
+++ b/include/uapi/linux/psample.h
@@ -3,7 +3,6 @@
#define __UAPI_PSAMPLE_H
enum {
- /* sampled packet metadata */
PSAMPLE_ATTR_IIFINDEX,
PSAMPLE_ATTR_OIFINDEX,
PSAMPLE_ATTR_ORIGSIZE,
@@ -11,10 +10,15 @@ enum {
PSAMPLE_ATTR_GROUP_SEQ,
PSAMPLE_ATTR_SAMPLE_RATE,
PSAMPLE_ATTR_DATA,
+ PSAMPLE_ATTR_GROUP_REFCOUNT,
PSAMPLE_ATTR_TUNNEL,
- /* commands attributes */
- PSAMPLE_ATTR_GROUP_REFCOUNT,
+ PSAMPLE_ATTR_PAD,
+ PSAMPLE_ATTR_OUT_TC, /* u16 */
+ PSAMPLE_ATTR_OUT_TC_OCC, /* u64, bytes */
+ PSAMPLE_ATTR_LATENCY, /* u64, nanoseconds */
+ PSAMPLE_ATTR_TIMESTAMP, /* u64, nanoseconds */
+ PSAMPLE_ATTR_PROTO, /* u16 */
__PSAMPLE_ATTR_MAX
};
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index 2fcad1dd0b0e..42a40ad3fb62 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -48,12 +48,26 @@
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
+#define PSCI_1_0_FN_CPU_FREEZE PSCI_0_2_FN(11)
+#define PSCI_1_0_FN_CPU_DEFAULT_SUSPEND PSCI_0_2_FN(12)
+#define PSCI_1_0_FN_NODE_HW_STATE PSCI_0_2_FN(13)
#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15)
+#define PSCI_1_0_FN_STAT_RESIDENCY PSCI_0_2_FN(16)
+#define PSCI_1_0_FN_STAT_COUNT PSCI_0_2_FN(17)
+
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
+#define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19)
+#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(20)
+#define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12)
+#define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13)
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
+#define PSCI_1_0_FN64_STAT_RESIDENCY PSCI_0_2_FN64(16)
+#define PSCI_1_0_FN64_STAT_COUNT PSCI_0_2_FN64(17)
+
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
+#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(20)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
@@ -82,6 +96,10 @@
#define PSCI_0_2_TOS_UP_NO_MIGRATE 1
#define PSCI_0_2_TOS_MP 2
+/* PSCI v1.1 reset type encoding for SYSTEM_RESET2 */
+#define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0
+#define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U
+
/* PSCI version decoding (independent of PSCI version) */
#define PSCI_VERSION_MAJOR_SHIFT 16
#define PSCI_VERSION_MINOR_MASK \
diff --git a/include/uapi/linux/psp-dbc.h b/include/uapi/linux/psp-dbc.h
new file mode 100644
index 000000000000..b3845a9ff5fd
--- /dev/null
+++ b/include/uapi/linux/psp-dbc.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Userspace interface for AMD Dynamic Boost Control (DBC)
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __PSP_DBC_USER_H__
+#define __PSP_DBC_USER_H__
+
+#include <linux/types.h>
+
+/**
+ * DOC: AMD Dynamic Boost Control (DBC) interface
+ */
+
+#define DBC_NONCE_SIZE 16
+#define DBC_SIG_SIZE 32
+#define DBC_UID_SIZE 16
+
+/**
+ * struct dbc_user_nonce - Nonce exchange structure (input/output).
+ * @auth_needed: Whether the PSP should authenticate this request (input).
+ * 0: no authentication, PSP will return single use nonce.
+ * 1: authentication: PSP will return multi-use nonce.
+ * @nonce: 8 byte value used for future authentication (output).
+ * @signature: Optional 32 byte signature created by software using a
+ * previous nonce (input).
+ */
+struct dbc_user_nonce {
+ __u32 auth_needed;
+ __u8 nonce[DBC_NONCE_SIZE];
+ __u8 signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * struct dbc_user_setuid - UID exchange structure (input).
+ * @uid: 16 byte value representing software identity
+ * @signature: 32 byte signature created by software using a previous nonce
+ */
+struct dbc_user_setuid {
+ __u8 uid[DBC_UID_SIZE];
+ __u8 signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * struct dbc_user_param - Parameter exchange structure (input/output).
+ * @msg_index: Message indicating what parameter to set or get (input)
+ * @param: 4 byte parameter, units are message specific. (input/output)
+ * @signature: 32 byte signature.
+ * - When sending a message this is to be created by software
+ * using a previous nonce (input)
+ * - For interpreting results, this signature is updated by the
+ * PSP to allow software to validate the authenticity of the
+ * results.
+ */
+struct dbc_user_param {
+ __u32 msg_index;
+ __u32 param;
+ __u8 signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * Dynamic Boost Control (DBC) IOC
+ *
+ * possible return codes for all DBC IOCTLs:
+ * 0: success
+ * -EINVAL: invalid input
+ * -E2BIG: excess data passed
+ * -EFAULT: failed to copy to/from userspace
+ * -EBUSY: mailbox in recovery or in use
+ * -ENODEV: driver not bound with PSP device
+ * -EACCES: request isn't authorized
+ * -EINVAL: invalid parameter
+ * -ETIMEDOUT: request timed out
+ * -EAGAIN: invalid request for state machine
+ * -ENOENT: not implemented
+ * -ENFILE: overflow
+ * -EPERM: invalid signature
+ * -EIO: unknown error
+ */
+#define DBC_IOC_TYPE 'D'
+
+/**
+ * DBCIOCNONCE - Fetch a nonce from the PSP for authenticating commands.
+ * If a nonce is fetched without authentication it can only
+ * be utilized for one command.
+ * If a nonce is fetched with authentication it can be used
+ * for multiple requests.
+ */
+#define DBCIOCNONCE _IOWR(DBC_IOC_TYPE, 0x1, struct dbc_user_nonce)
+
+/**
+ * DBCIOCUID - Set the user ID (UID) of a calling process.
+ * The user ID is 8 bytes long. It must be programmed using a
+ * 32 byte signature built using the nonce fetched from
+ * DBCIOCNONCE.
+ * The UID can only be set once until the system is rebooted.
+ */
+#define DBCIOCUID _IOW(DBC_IOC_TYPE, 0x2, struct dbc_user_setuid)
+
+/**
+ * DBCIOCPARAM - Set or get a parameter from the PSP.
+ * This request will only work after DBCIOCUID has successfully
+ * set the UID of the calling process.
+ * Whether the parameter is set or get is controlled by the
+ * message ID in the request.
+ * This command must be sent using a 32 byte signature built
+ * using the nonce fetched from DBCIOCNONCE.
+ * When the command succeeds, the 32 byte signature will be
+ * updated by the PSP for software to authenticate the results.
+ */
+#define DBCIOCPARAM _IOWR(DBC_IOC_TYPE, 0x3, struct dbc_user_param)
+
+/**
+ * enum dbc_cmd_msg - Messages utilized by DBCIOCPARAM
+ * @PARAM_GET_FMAX_CAP: Get frequency cap (MHz)
+ * @PARAM_SET_FMAX_CAP: Set frequency cap (MHz)
+ * @PARAM_GET_PWR_CAP: Get socket power cap (mW)
+ * @PARAM_SET_PWR_CAP: Set socket power cap (mW)
+ * @PARAM_GET_GFX_MODE: Get graphics mode (0/1)
+ * @PARAM_SET_GFX_MODE: Set graphics mode (0/1)
+ * @PARAM_GET_CURR_TEMP: Get current temperature (degrees C)
+ * @PARAM_GET_FMAX_MAX: Get maximum allowed value for frequency (MHz)
+ * @PARAM_GET_FMAX_MIN: Get minimum allowed value for frequency (MHz)
+ * @PARAM_GET_SOC_PWR_MAX: Get maximum allowed value for SoC power (mw)
+ * @PARAM_GET_SOC_PWR_MIN: Get minimum allowed value for SoC power (mw)
+ * @PARAM_GET_SOC_PWR_CUR: Get current value for SoC Power (mW)
+ */
+enum dbc_cmd_msg {
+ PARAM_GET_FMAX_CAP = 0x3,
+ PARAM_SET_FMAX_CAP = 0x4,
+ PARAM_GET_PWR_CAP = 0x5,
+ PARAM_SET_PWR_CAP = 0x6,
+ PARAM_GET_GFX_MODE = 0x7,
+ PARAM_SET_GFX_MODE = 0x8,
+ PARAM_GET_CURR_TEMP = 0x9,
+ PARAM_GET_FMAX_MAX = 0xA,
+ PARAM_GET_FMAX_MIN = 0xB,
+ PARAM_GET_SOC_PWR_MAX = 0xC,
+ PARAM_GET_SOC_PWR_MIN = 0xD,
+ PARAM_GET_SOC_PWR_CUR = 0xE,
+};
+
+#endif /* __PSP_DBC_USER_H__ */
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 91b4c63d5cbf..b7a2c2ee35b7 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -28,6 +28,9 @@ enum {
SEV_PEK_CERT_IMPORT,
SEV_GET_ID, /* This command is deprecated, use SEV_GET_ID2 */
SEV_GET_ID2,
+ SNP_PLATFORM_STATUS,
+ SNP_COMMIT,
+ SNP_SET_CONFIG,
SEV_MAX,
};
@@ -36,6 +39,13 @@ enum {
* SEV Firmware status code
*/
typedef enum {
+ /*
+ * This error code is not in the SEV spec. Its purpose is to convey that
+ * there was an error that prevented the SEV firmware from being called.
+ * The SEV API error codes are 16 bits, so the -1 value will not overlap
+ * with possible values from the specification.
+ */
+ SEV_RET_NO_FW_CALL = -1,
SEV_RET_SUCCESS = 0,
SEV_RET_INVALID_PLATFORM_STATE,
SEV_RET_INVALID_GUEST_STATE,
@@ -61,6 +71,13 @@ typedef enum {
SEV_RET_INVALID_PARAM,
SEV_RET_RESOURCE_LIMIT,
SEV_RET_SECURE_DATA_INVALID,
+ SEV_RET_INVALID_KEY = 0x27,
+ SEV_RET_INVALID_PAGE_SIZE,
+ SEV_RET_INVALID_PAGE_STATE,
+ SEV_RET_INVALID_MDATA_ENTRY,
+ SEV_RET_INVALID_PAGE_OWNER,
+ SEV_RET_INVALID_PAGE_AEAD_OFLOW,
+ SEV_RET_RMP_INIT_REQUIRED,
SEV_RET_MAX,
} sev_ret_code;
@@ -148,6 +165,56 @@ struct sev_user_data_get_id2 {
} __packed;
/**
+ * struct sev_user_data_snp_status - SNP status
+ *
+ * @api_major: API major version
+ * @api_minor: API minor version
+ * @state: current platform state
+ * @is_rmp_initialized: whether RMP is initialized or not
+ * @rsvd: reserved
+ * @build_id: firmware build id for the API version
+ * @mask_chip_id: whether chip id is present in attestation reports or not
+ * @mask_chip_key: whether attestation reports are signed or not
+ * @vlek_en: VLEK (Version Loaded Endorsement Key) hashstick is loaded
+ * @rsvd1: reserved
+ * @guest_count: the number of guest currently managed by the firmware
+ * @current_tcb_version: current TCB version
+ * @reported_tcb_version: reported TCB version
+ */
+struct sev_user_data_snp_status {
+ __u8 api_major; /* Out */
+ __u8 api_minor; /* Out */
+ __u8 state; /* Out */
+ __u8 is_rmp_initialized:1; /* Out */
+ __u8 rsvd:7;
+ __u32 build_id; /* Out */
+ __u32 mask_chip_id:1; /* Out */
+ __u32 mask_chip_key:1; /* Out */
+ __u32 vlek_en:1; /* Out */
+ __u32 rsvd1:29;
+ __u32 guest_count; /* Out */
+ __u64 current_tcb_version; /* Out */
+ __u64 reported_tcb_version; /* Out */
+} __packed;
+
+/**
+ * struct sev_user_data_snp_config - system wide configuration value for SNP.
+ *
+ * @reported_tcb: the TCB version to report in the guest attestation report.
+ * @mask_chip_id: whether chip id is present in attestation reports or not
+ * @mask_chip_key: whether attestation reports are signed or not
+ * @rsvd: reserved
+ * @rsvd1: reserved
+ */
+struct sev_user_data_snp_config {
+ __u64 reported_tcb ; /* In */
+ __u32 mask_chip_id:1; /* In */
+ __u32 mask_chip_key:1; /* In */
+ __u32 rsvd:30; /* In */
+ __u8 rsvd1[52];
+} __packed;
+
+/**
* struct sev_issue_cmd - SEV ioctl parameters
*
* @cmd: SEV commands to execute
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 1d108d597f66..053b40d642de 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -32,6 +32,7 @@
#define PTP_RISING_EDGE (1<<1)
#define PTP_FALLING_EDGE (1<<2)
#define PTP_STRICT_FLAGS (1<<3)
+#define PTP_EXT_OFFSET (1<<4)
#define PTP_EXTTS_EDGES (PTP_RISING_EDGE | PTP_FALLING_EDGE)
/*
@@ -40,7 +41,8 @@
#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \
PTP_RISING_EDGE | \
PTP_FALLING_EDGE | \
- PTP_STRICT_FLAGS)
+ PTP_STRICT_FLAGS | \
+ PTP_EXT_OFFSET)
/*
* flag fields valid for the original PTP_EXTTS_REQUEST ioctl.
@@ -51,6 +53,11 @@
PTP_FALLING_EDGE)
/*
+ * flag fields valid for the ptp_extts_event report.
+ */
+#define PTP_EXTTS_EVENT_VALID (PTP_ENABLE_FEATURE)
+
+/*
* Bits of the ptp_perout_request.flags field:
*/
#define PTP_PEROUT_ONE_SHOT (1<<0)
@@ -95,7 +102,8 @@ struct ptp_clock_caps {
int cross_timestamping;
/* Whether the clock supports adjust phase */
int adjust_phase;
- int rsv[12]; /* Reserved for future use. */
+ int max_phase_adj; /* Maximum phase adjustment in nanoseconds. */
+ int rsv[11]; /* Reserved for future use. */
};
struct ptp_extts_request {
@@ -223,11 +231,13 @@ struct ptp_pin_desc {
_IOWR(PTP_CLK_MAGIC, 17, struct ptp_sys_offset_precise)
#define PTP_SYS_OFFSET_EXTENDED2 \
_IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended)
+#define PTP_MASK_CLEAR_ALL _IO(PTP_CLK_MAGIC, 19)
+#define PTP_MASK_EN_SINGLE _IOW(PTP_CLK_MAGIC, 20, unsigned int)
struct ptp_extts_event {
- struct ptp_clock_time t; /* Time event occured. */
+ struct ptp_clock_time t; /* Time event occurred. */
unsigned int index; /* Which channel produced the event. */
- unsigned int flags; /* Reserved for future use. */
+ unsigned int flags; /* Event type. */
unsigned int rsv[2]; /* Reserved for future use. */
};
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index a71b6e3b03eb..72c038fc71d0 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -81,7 +81,8 @@ struct seccomp_metadata {
struct ptrace_syscall_info {
__u8 op; /* PTRACE_SYSCALL_INFO_* */
- __u32 arch __attribute__((__aligned__(sizeof(__u32))));
+ __u8 pad[3];
+ __u32 arch;
__u64 instruction_pointer;
__u64 stack_pointer;
union {
@@ -101,9 +102,49 @@ struct ptrace_syscall_info {
};
};
+#define PTRACE_GET_RSEQ_CONFIGURATION 0x420f
+
+struct ptrace_rseq_configuration {
+ __u64 rseq_abi_pointer;
+ __u32 rseq_abi_size;
+ __u32 signature;
+ __u32 flags;
+ __u32 pad;
+};
+
+#define PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG 0x4210
+#define PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG 0x4211
+
+/*
+ * struct ptrace_sud_config - Per-task configuration for Syscall User Dispatch
+ * @mode: One of PR_SYS_DISPATCH_ON or PR_SYS_DISPATCH_OFF
+ * @selector: Tracees user virtual address of SUD selector
+ * @offset: SUD exclusion area (virtual address)
+ * @len: Length of SUD exclusion area
+ *
+ * Used to get/set the syscall user dispatch configuration for a tracee.
+ * Selector is optional (may be NULL), and if invalid will produce
+ * a SIGSEGV in the tracee upon first access.
+ *
+ * If mode is PR_SYS_DISPATCH_ON, syscall dispatch will be enabled. If
+ * PR_SYS_DISPATCH_OFF, syscall dispatch will be disabled and all other
+ * parameters must be 0. The value in *selector (if not null), also determines
+ * whether syscall dispatch will occur.
+ *
+ * The Syscall User Dispatch Exclusion area described by offset/len is the
+ * virtual address space from which syscalls will not produce a user
+ * dispatch.
+ */
+struct ptrace_sud_config {
+ __u64 mode;
+ __u64 selector;
+ __u64 offset;
+ __u64 len;
+};
+
/*
* These values are stored in task->ptrace_message
- * by tracehook_report_syscall_* to describe the current syscall-stop.
+ * by ptrace_stop to describe the current syscall-stop.
*/
#define PTRACE_EVENTMSG_SYSCALL_ENTRY 1
#define PTRACE_EVENTMSG_SYSCALL_EXIT 2
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index f17c9636a859..52090105b828 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -77,6 +77,7 @@
#define QFMT_VFS_V0 2
#define QFMT_OCFS2 3
#define QFMT_VFS_V1 4
+#define QFMT_SHMEM 5
/* Size of block in which space limits are passed through the quota
* interface */
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index e5a98a16f9b0..5a43c23f53bf 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -2,15 +2,11 @@
/*
md_p.h : physical layout of Linux RAID devices
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MD_P_H
@@ -237,7 +233,7 @@ struct mdp_superblock_1 {
char set_name[32]; /* set and interpreted by user-space */
__le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/
- __le32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */
+ __le32 level; /* 0,1,4,5 */
__le32 layout; /* only for raid5 and raid10 currently */
__le64 size; /* used size of component devices, in 512byte sectors */
@@ -303,7 +299,7 @@ struct mdp_superblock_1 {
* into the 'roles' value. If a device is spare or faulty, then it doesn't
* have a meaningful role.
*/
- __le16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */
+ __le16 dev_roles[]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */
};
/* feature_map bits */
diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h
index 105307244961..7be89a4906e7 100644
--- a/include/uapi/linux/raid/md_u.h
+++ b/include/uapi/linux/raid/md_u.h
@@ -2,15 +2,11 @@
/*
md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
Copyright (C) 1998 Ingo Molnar
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _UAPI_MD_U_H
@@ -107,11 +103,6 @@ typedef struct mdu_array_info_s {
} mdu_array_info_t;
-/* non-obvious values for 'level' */
-#define LEVEL_MULTIPATH (-4)
-#define LEVEL_LINEAR (-1)
-#define LEVEL_FAULTY (-5)
-
/* we need a value for 'no level specified' and 0
* means 'raid0', so we need something else. This is
* for internal use only
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index dcc1b3e6106f..e744c23582eb 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -41,7 +41,7 @@
struct rand_pool_info {
int entropy_count;
int buf_size;
- __u32 buf[0];
+ __u32 buf[];
};
/*
diff --git a/include/uapi/linux/raw.h b/include/uapi/linux/raw.h
deleted file mode 100644
index 47874919d0b9..000000000000
--- a/include/uapi/linux/raw.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_RAW_H
-#define __LINUX_RAW_H
-
-#include <linux/types.h>
-
-#define RAW_SETBIND _IO( 0xac, 0 )
-#define RAW_GETBIND _IO( 0xac, 1 )
-
-struct raw_config_request
-{
- int raw_minor;
- __u64 block_major;
- __u64 block_minor;
-};
-
-#endif /* __LINUX_RAW_H */
diff --git a/include/uapi/linux/reiserfs_xattr.h b/include/uapi/linux/reiserfs_xattr.h
index 28f10842f047..503ad018ce5b 100644
--- a/include/uapi/linux/reiserfs_xattr.h
+++ b/include/uapi/linux/reiserfs_xattr.h
@@ -19,7 +19,7 @@ struct reiserfs_xattr_header {
struct reiserfs_security_handle {
const char *name;
void *value;
- size_t length;
+ __kernel_size_t length;
};
#endif /* _LINUX_REISERFS_XATTR_H */
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index 74ef57b38f9f..4fc22908bc09 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -2,7 +2,7 @@
#ifndef _UAPI_LINUX_RESOURCE_H
#define _UAPI_LINUX_RESOURCE_H
-#include <linux/time.h>
+#include <linux/time_types.h>
#include <linux/types.h>
/*
@@ -66,10 +66,17 @@ struct rlimit64 {
#define _STK_LIM (8*1024*1024)
/*
- * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
- * and other sensitive information are never written to disk.
+ * Limit the amount of locked memory by some sane default:
+ * root can always increase this limit if needed.
+ *
+ * The main use-cases are (1) preventing sensitive memory
+ * from being swapped; (2) real-time operations; (3) via
+ * IOURING_REGISTER_BUFFERS.
+ *
+ * The first two don't need much. The latter will take as
+ * much as it can get. 8MB is a reasonably sane default.
*/
-#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
+#define MLOCK_LIMIT (8*1024*1024)
/*
* Due to binary compatibility, the actual resource numbers
diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h
index 2e00dcebebd0..db6c8588c1d0 100644
--- a/include/uapi/linux/rfkill.h
+++ b/include/uapi/linux/rfkill.h
@@ -70,6 +70,16 @@ enum rfkill_operation {
};
/**
+ * enum rfkill_hard_block_reasons - hard block reasons
+ * @RFKILL_HARD_BLOCK_SIGNAL: the hardware rfkill signal is active
+ * @RFKILL_HARD_BLOCK_NOT_OWNER: the NIC is not owned by the host
+ */
+enum rfkill_hard_block_reasons {
+ RFKILL_HARD_BLOCK_SIGNAL = 1 << 0,
+ RFKILL_HARD_BLOCK_NOT_OWNER = 1 << 1,
+};
+
+/**
* struct rfkill_event - events for userspace on /dev/rfkill
* @idx: index of dev rfkill
* @type: type of the rfkill struct
@@ -84,27 +94,97 @@ struct rfkill_event {
__u32 idx;
__u8 type;
__u8 op;
- __u8 soft, hard;
+ __u8 soft;
+ __u8 hard;
} __attribute__((packed));
-/*
- * We are planning to be backward and forward compatible with changes
- * to the event struct, by adding new, optional, members at the end.
- * When reading an event (whether the kernel from userspace or vice
- * versa) we need to accept anything that's at least as large as the
- * version 1 event size, but might be able to accept other sizes in
- * the future.
- *
- * One exception is the kernel -- we already have two event sizes in
- * that we've made the 'hard' member optional since our only option
- * is to ignore it anyway.
+/**
+ * struct rfkill_event_ext - events for userspace on /dev/rfkill
+ * @idx: index of dev rfkill
+ * @type: type of the rfkill struct
+ * @op: operation code
+ * @hard: hard state (0/1)
+ * @soft: soft state (0/1)
+ * @hard_block_reasons: valid if hard is set. One or several reasons from
+ * &enum rfkill_hard_block_reasons.
+ *
+ * Structure used for userspace communication on /dev/rfkill,
+ * used for events from the kernel and control to the kernel.
+ *
+ * See the extensibility docs below.
+ */
+struct rfkill_event_ext {
+ __u32 idx;
+ __u8 type;
+ __u8 op;
+ __u8 soft;
+ __u8 hard;
+
+ /*
+ * older kernels will accept/send only up to this point,
+ * and if extended further up to any chunk marked below
+ */
+
+ __u8 hard_block_reasons;
+} __attribute__((packed));
+
+/**
+ * DOC: Extensibility
+ *
+ * Originally, we had planned to allow backward and forward compatible
+ * changes by just adding fields at the end of the structure that are
+ * then not reported on older kernels on read(), and not written to by
+ * older kernels on write(), with the kernel reporting the size it did
+ * accept as the result.
+ *
+ * This would have allowed userspace to detect on read() and write()
+ * which kernel structure version it was dealing with, and if was just
+ * recompiled it would have gotten the new fields, but obviously not
+ * accessed them, but things should've continued to work.
+ *
+ * Unfortunately, while actually exercising this mechanism to add the
+ * hard block reasons field, we found that userspace (notably systemd)
+ * did all kinds of fun things not in line with this scheme:
+ *
+ * 1. treat the (expected) short writes as an error;
+ * 2. ask to read sizeof(struct rfkill_event) but then compare the
+ * actual return value to RFKILL_EVENT_SIZE_V1 and treat any
+ * mismatch as an error.
+ *
+ * As a consequence, just recompiling with a new struct version caused
+ * things to no longer work correctly on old and new kernels.
+ *
+ * Hence, we've rolled back &struct rfkill_event to the original version
+ * and added &struct rfkill_event_ext. This effectively reverts to the
+ * old behaviour for all userspace, unless it explicitly opts in to the
+ * rules outlined here by using the new &struct rfkill_event_ext.
+ *
+ * Additionally, some other userspace (bluez, g-s-d) was reading with a
+ * large size but as streaming reads rather than message-based, or with
+ * too strict checks for the returned size. So eventually, we completely
+ * reverted this, and extended messages need to be opted in to by using
+ * an ioctl:
+ *
+ * ioctl(fd, RFKILL_IOCTL_MAX_SIZE, sizeof(struct rfkill_event_ext));
+ *
+ * Userspace using &struct rfkill_event_ext and the ioctl must adhere to
+ * the following rules:
+ *
+ * 1. accept short writes, optionally using them to detect that it's
+ * running on an older kernel;
+ * 2. accept short reads, knowing that this means it's running on an
+ * older kernel;
+ * 3. treat reads that are as long as requested as acceptable, not
+ * checking against RFKILL_EVENT_SIZE_V1 or such.
*/
-#define RFKILL_EVENT_SIZE_V1 8
+#define RFKILL_EVENT_SIZE_V1 sizeof(struct rfkill_event)
/* ioctl for turning off rfkill-input (if present) */
#define RFKILL_IOC_MAGIC 'R'
#define RFKILL_IOC_NOINPUT 1
#define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT)
+#define RFKILL_IOC_MAX_SIZE 2
+#define RFKILL_IOCTL_MAX_SIZE _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_MAX_SIZE, __u32)
/* and that's all userspace gets */
diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h
new file mode 100644
index 000000000000..6eeaf8bf2362
--- /dev/null
+++ b/include/uapi/linux/rkisp1-config.h
@@ -0,0 +1,999 @@
+/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR MIT) */
+/*
+ * Rockchip ISP1 userspace API
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef _UAPI_RKISP1_CONFIG_H
+#define _UAPI_RKISP1_CONFIG_H
+
+#include <linux/types.h>
+
+/* Defect Pixel Cluster Detection */
+#define RKISP1_CIF_ISP_MODULE_DPCC (1U << 0)
+/* Black Level Subtraction */
+#define RKISP1_CIF_ISP_MODULE_BLS (1U << 1)
+/* Sensor De-gamma */
+#define RKISP1_CIF_ISP_MODULE_SDG (1U << 2)
+/* Histogram statistics configuration */
+#define RKISP1_CIF_ISP_MODULE_HST (1U << 3)
+/* Lens Shade Control */
+#define RKISP1_CIF_ISP_MODULE_LSC (1U << 4)
+/* Auto White Balance Gain */
+#define RKISP1_CIF_ISP_MODULE_AWB_GAIN (1U << 5)
+/* Filter */
+#define RKISP1_CIF_ISP_MODULE_FLT (1U << 6)
+/* Bayer Demosaic */
+#define RKISP1_CIF_ISP_MODULE_BDM (1U << 7)
+/* Cross Talk */
+#define RKISP1_CIF_ISP_MODULE_CTK (1U << 8)
+/* Gamma Out Curve */
+#define RKISP1_CIF_ISP_MODULE_GOC (1U << 9)
+/* Color Processing */
+#define RKISP1_CIF_ISP_MODULE_CPROC (1U << 10)
+/* Auto Focus Control statistics configuration */
+#define RKISP1_CIF_ISP_MODULE_AFC (1U << 11)
+/* Auto White Balancing statistics configuration */
+#define RKISP1_CIF_ISP_MODULE_AWB (1U << 12)
+/* Image Effect */
+#define RKISP1_CIF_ISP_MODULE_IE (1U << 13)
+/* Auto Exposure Control statistics configuration */
+#define RKISP1_CIF_ISP_MODULE_AEC (1U << 14)
+/* Wide Dynamic Range */
+#define RKISP1_CIF_ISP_MODULE_WDR (1U << 15)
+/* Denoise Pre-Filter */
+#define RKISP1_CIF_ISP_MODULE_DPF (1U << 16)
+/* Denoise Pre-Filter Strength */
+#define RKISP1_CIF_ISP_MODULE_DPF_STRENGTH (1U << 17)
+
+#define RKISP1_CIF_ISP_CTK_COEFF_MAX 0x100
+#define RKISP1_CIF_ISP_CTK_OFFSET_MAX 0x800
+
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V10 25
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V12 81
+#define RKISP1_CIF_ISP_AE_MEAN_MAX RKISP1_CIF_ISP_AE_MEAN_MAX_V12
+
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 16
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 32
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12
+
+#define RKISP1_CIF_ISP_AFM_MAX_WINDOWS 3
+#define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE 17
+
+#define RKISP1_CIF_ISP_BDM_MAX_TH 0xff
+
+/*
+ * Black level compensation
+ */
+/* maximum value for horizontal start address */
+#define RKISP1_CIF_ISP_BLS_START_H_MAX 0x00000fff
+/* maximum value for horizontal stop address */
+#define RKISP1_CIF_ISP_BLS_STOP_H_MAX 0x00000fff
+/* maximum value for vertical start address */
+#define RKISP1_CIF_ISP_BLS_START_V_MAX 0x00000fff
+/* maximum value for vertical stop address */
+#define RKISP1_CIF_ISP_BLS_STOP_V_MAX 0x00000fff
+/* maximum is 2^18 = 262144*/
+#define RKISP1_CIF_ISP_BLS_SAMPLES_MAX 0x00000012
+/* maximum value for fixed black level */
+#define RKISP1_CIF_ISP_BLS_FIX_SUB_MAX 0x00000fff
+/* minimum value for fixed black level */
+#define RKISP1_CIF_ISP_BLS_FIX_SUB_MIN 0xfffff000
+/* 13 bit range (signed)*/
+#define RKISP1_CIF_ISP_BLS_FIX_MASK 0x00001fff
+
+/*
+ * Automatic white balance measurements
+ */
+#define RKISP1_CIF_ISP_AWB_MAX_GRID 1
+#define RKISP1_CIF_ISP_AWB_MAX_FRAMES 7
+
+/*
+ * Gamma out
+ */
+/* Maximum number of color samples supported */
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 17
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 34
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
+
+/*
+ * Lens shade correction
+ */
+#define RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE 8
+
+/*
+ * The following matches the tuning process,
+ * not the max capabilities of the chip.
+ */
+#define RKISP1_CIF_ISP_LSC_SAMPLES_MAX 17
+
+/*
+ * Histogram calculation
+ */
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 25
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 81
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
+
+/*
+ * Defect Pixel Cluster Correction
+ */
+#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3
+
+#define RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE (1U << 2)
+
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_G_3X3 (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_RB_3X3 (1U << 3)
+
+/* 0-2 for sets 1-3 */
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_SET(n) ((n) << 0)
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET (1U << 3)
+
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE (1U << 3)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE (1U << 4)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE (1U << 8)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE (1U << 9)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE (1U << 10)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE (1U << 11)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE (1U << 12)
+
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_RB(v) ((v) << 8)
+
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(n, v) ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(n, v) ((v) << ((n) * 4 + 2))
+
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(n, v) ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(n, v) ((v) << ((n) * 4 + 2))
+
+/*
+ * Denoising pre filter
+ */
+#define RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS 17
+#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS 6
+
+/*
+ * Measurement types
+ */
+#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
+#define RKISP1_CIF_ISP_STAT_AUTOEXP (1U << 1)
+#define RKISP1_CIF_ISP_STAT_AFM (1U << 2)
+#define RKISP1_CIF_ISP_STAT_HIST (1U << 3)
+
+/**
+ * enum rkisp1_cif_isp_version - ISP variants
+ *
+ * @RKISP1_V10: Used at least in RK3288 and RK3399.
+ * @RKISP1_V11: Declared in the original vendor code, but not used. Same number
+ * of entries in grids and histogram as v10.
+ * @RKISP1_V12: Used at least in RK3326 and PX30.
+ * @RKISP1_V13: Used at least in RK1808. Same number of entries in grids and
+ * histogram as v12.
+ * @RKISP1_V_IMX8MP: Used in at least i.MX8MP. Same number of entries in grids
+ * and histogram as v10.
+ */
+enum rkisp1_cif_isp_version {
+ RKISP1_V10 = 10,
+ RKISP1_V11,
+ RKISP1_V12,
+ RKISP1_V13,
+ RKISP1_V_IMX8MP,
+};
+
+enum rkisp1_cif_isp_histogram_mode {
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_R_HISTOGRAM,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_G_HISTOGRAM,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_B_HISTOGRAM,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM
+};
+
+enum rkisp1_cif_isp_awb_mode_type {
+ RKISP1_CIF_ISP_AWB_MODE_MANUAL,
+ RKISP1_CIF_ISP_AWB_MODE_RGB,
+ RKISP1_CIF_ISP_AWB_MODE_YCBCR
+};
+
+enum rkisp1_cif_isp_flt_mode {
+ RKISP1_CIF_ISP_FLT_STATIC_MODE,
+ RKISP1_CIF_ISP_FLT_DYNAMIC_MODE
+};
+
+/**
+ * enum rkisp1_cif_isp_exp_ctrl_autostop - stop modes
+ * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0: continuous measurement
+ * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1: stop measuring after a complete frame
+ */
+enum rkisp1_cif_isp_exp_ctrl_autostop {
+ RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0 = 0,
+ RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1 = 1,
+};
+
+/**
+ * enum rkisp1_cif_isp_exp_meas_mode - Exposure measure mode
+ * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_0: Y = 16 + 0.25R + 0.5G + 0.1094B
+ * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_1: Y = (R + G + B) x (85/256)
+ */
+enum rkisp1_cif_isp_exp_meas_mode {
+ RKISP1_CIF_ISP_EXP_MEASURING_MODE_0,
+ RKISP1_CIF_ISP_EXP_MEASURING_MODE_1,
+};
+
+/*---------- PART1: Input Parameters ------------*/
+
+/**
+ * struct rkisp1_cif_isp_window - measurement window.
+ *
+ * Measurements are calculated per window inside the frame.
+ * This struct represents a window for a measurement.
+ *
+ * @h_offs: the horizontal offset of the window from the left of the frame in pixels.
+ * @v_offs: the vertical offset of the window from the top of the frame in pixels.
+ * @h_size: the horizontal size of the window in pixels
+ * @v_size: the vertical size of the window in pixels.
+ */
+struct rkisp1_cif_isp_window {
+ __u16 h_offs;
+ __u16 v_offs;
+ __u16 h_size;
+ __u16 v_size;
+};
+
+/**
+ * struct rkisp1_cif_isp_bls_fixed_val - BLS fixed subtraction values
+ *
+ * The values will be subtracted from the sensor
+ * values. Therefore a negative value means addition instead of subtraction!
+ *
+ * @r: Fixed (signed!) subtraction value for Bayer pattern R
+ * @gr: Fixed (signed!) subtraction value for Bayer pattern Gr
+ * @gb: Fixed (signed!) subtraction value for Bayer pattern Gb
+ * @b: Fixed (signed!) subtraction value for Bayer pattern B
+ */
+struct rkisp1_cif_isp_bls_fixed_val {
+ __s16 r;
+ __s16 gr;
+ __s16 gb;
+ __s16 b;
+};
+
+/**
+ * struct rkisp1_cif_isp_bls_config - Configuration used by black level subtraction
+ *
+ * @enable_auto: Automatic mode activated means that the measured values
+ * are subtracted. Otherwise the fixed subtraction
+ * values will be subtracted.
+ * @en_windows: enabled window
+ * @bls_window1: Measurement window 1 size
+ * @bls_window2: Measurement window 2 size
+ * @bls_samples: Set amount of measured pixels for each Bayer position
+ * (A, B,C and D) to 2^bls_samples.
+ * @fixed_val: Fixed subtraction values
+ */
+struct rkisp1_cif_isp_bls_config {
+ __u8 enable_auto;
+ __u8 en_windows;
+ struct rkisp1_cif_isp_window bls_window1;
+ struct rkisp1_cif_isp_window bls_window2;
+ __u8 bls_samples;
+ struct rkisp1_cif_isp_bls_fixed_val fixed_val;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpcc_methods_config - DPCC methods set configuration
+ *
+ * This structure stores the configuration of one set of methods for the DPCC
+ * algorithm. Multiple methods can be selected in each set (independently for
+ * the Green and Red/Blue components) through the @method field, the result is
+ * the logical AND of all enabled methods. The remaining fields set thresholds
+ * and factors for each method.
+ *
+ * @method: Method enable bits (RKISP1_CIF_ISP_DPCC_METHODS_SET_*)
+ * @line_thresh: Line threshold (RKISP1_CIF_ISP_DPCC_LINE_THRESH_*)
+ * @line_mad_fac: Line Mean Absolute Difference factor (RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_*)
+ * @pg_fac: Peak gradient factor (RKISP1_CIF_ISP_DPCC_PG_FAC_*)
+ * @rnd_thresh: Rank Neighbor Difference threshold (RKISP1_CIF_ISP_DPCC_RND_THRESH_*)
+ * @rg_fac: Rank gradient factor (RKISP1_CIF_ISP_DPCC_RG_FAC_*)
+ */
+struct rkisp1_cif_isp_dpcc_methods_config {
+ __u32 method;
+ __u32 line_thresh;
+ __u32 line_mad_fac;
+ __u32 pg_fac;
+ __u32 rnd_thresh;
+ __u32 rg_fac;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC
+ *
+ * Configuration used by Defect Pixel Cluster Correction. Three sets of methods
+ * can be configured and selected through the @set_use field. The result is the
+ * logical OR of all enabled sets.
+ *
+ * @mode: DPCC mode (RKISP1_CIF_ISP_DPCC_MODE_*)
+ * @output_mode: Interpolation output mode (RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_*)
+ * @set_use: Methods sets selection (RKISP1_CIF_ISP_DPCC_SET_USE_*)
+ * @methods: Methods sets configuration
+ * @ro_limits: Rank order limits (RKISP1_CIF_ISP_DPCC_RO_LIMITS_*)
+ * @rnd_offs: Differential rank offsets for rank neighbor difference (RKISP1_CIF_ISP_DPCC_RND_OFFS_*)
+ */
+struct rkisp1_cif_isp_dpcc_config {
+ __u32 mode;
+ __u32 output_mode;
+ __u32 set_use;
+ struct rkisp1_cif_isp_dpcc_methods_config methods[RKISP1_CIF_ISP_DPCC_METHODS_MAX];
+ __u32 ro_limits;
+ __u32 rnd_offs;
+};
+
+/**
+ * struct rkisp1_cif_isp_gamma_corr_curve - gamma curve point definition y-axis (output).
+ *
+ * The reset values define a linear curve which has the same effect as bypass. Reset values are:
+ * gamma_y[0] = 0x0000, gamma_y[1] = 0x0100, ... gamma_y[15] = 0x0f00, gamma_y[16] = 0xfff
+ *
+ * @gamma_y: the values for the y-axis of gamma curve points. Each value is 12 bit.
+ */
+struct rkisp1_cif_isp_gamma_corr_curve {
+ __u16 gamma_y[RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE];
+};
+
+/**
+ * struct rkisp1_cif_isp_gamma_curve_x_axis_pnts - De-Gamma Curve definition x increments
+ * (sampling points). gamma_dx0 is for the lower samples (1-8), gamma_dx1 is for the
+ * higher samples (9-16). The reset values for both fields is 0x44444444. This means
+ * that each sample is 4 units away from the previous one on the x-axis.
+ *
+ * @gamma_dx0: gamma curve sample points definitions. Bits 0:2 for sample 1. Bit 3 unused.
+ * Bits 4:6 for sample 2. bit 7 unused ... Bits 28:30 for sample 8. Bit 31 unused
+ * @gamma_dx1: gamma curve sample points definitions. Bits 0:2 for sample 9. Bit 3 unused.
+ * Bits 4:6 for sample 10. bit 7 unused ... Bits 28:30 for sample 16. Bit 31 unused
+ */
+struct rkisp1_cif_isp_gamma_curve_x_axis_pnts {
+ __u32 gamma_dx0;
+ __u32 gamma_dx1;
+};
+
+/**
+ * struct rkisp1_cif_isp_sdg_config - Configuration used by sensor degamma
+ *
+ * @curve_r: gamma curve point definition axis for red
+ * @curve_g: gamma curve point definition axis for green
+ * @curve_b: gamma curve point definition axis for blue
+ * @xa_pnts: x axis increments
+ */
+struct rkisp1_cif_isp_sdg_config {
+ struct rkisp1_cif_isp_gamma_corr_curve curve_r;
+ struct rkisp1_cif_isp_gamma_corr_curve curve_g;
+ struct rkisp1_cif_isp_gamma_corr_curve curve_b;
+ struct rkisp1_cif_isp_gamma_curve_x_axis_pnts xa_pnts;
+};
+
+/**
+ * struct rkisp1_cif_isp_lsc_config - Configuration used by Lens shading correction
+ *
+ * @r_data_tbl: sample table red
+ * @gr_data_tbl: sample table green (red)
+ * @gb_data_tbl: sample table green (blue)
+ * @b_data_tbl: sample table blue
+ * @x_grad_tbl: gradient table x
+ * @y_grad_tbl: gradient table y
+ * @x_size_tbl: size table x
+ * @y_size_tbl: size table y
+ * @config_width: not used at the moment
+ * @config_height: not used at the moment
+ */
+struct rkisp1_cif_isp_lsc_config {
+ __u16 r_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+ __u16 gr_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+ __u16 gb_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+ __u16 b_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+
+ __u16 x_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ __u16 y_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+
+ __u16 x_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ __u16 y_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ __u16 config_width;
+ __u16 config_height;
+};
+
+/**
+ * struct rkisp1_cif_isp_ie_config - Configuration used by image effects
+ *
+ * @effect: values from 'enum v4l2_colorfx'. Possible values are: V4L2_COLORFX_SEPIA,
+ * V4L2_COLORFX_SET_CBCR, V4L2_COLORFX_AQUA, V4L2_COLORFX_EMBOSS,
+ * V4L2_COLORFX_SKETCH, V4L2_COLORFX_BW, V4L2_COLORFX_NEGATIVE
+ * @color_sel: bits 0:2 - colors bitmask (001 - blue, 010 - green, 100 - red).
+ * bits 8:15 - Threshold value of the RGB colors for the color selection effect.
+ * @eff_mat_1: 3x3 Matrix Coefficients for Emboss Effect 1
+ * @eff_mat_2: 3x3 Matrix Coefficients for Emboss Effect 2
+ * @eff_mat_3: 3x3 Matrix Coefficients for Emboss 3/Sketch 1
+ * @eff_mat_4: 3x3 Matrix Coefficients for Sketch Effect 2
+ * @eff_mat_5: 3x3 Matrix Coefficients for Sketch Effect 3
+ * @eff_tint: Chrominance increment values of tint (used for sepia effect)
+ */
+struct rkisp1_cif_isp_ie_config {
+ __u16 effect;
+ __u16 color_sel;
+ __u16 eff_mat_1;
+ __u16 eff_mat_2;
+ __u16 eff_mat_3;
+ __u16 eff_mat_4;
+ __u16 eff_mat_5;
+ __u16 eff_tint;
+};
+
+/**
+ * struct rkisp1_cif_isp_cproc_config - Configuration used by Color Processing
+ *
+ * @c_out_range: Chrominance pixel clipping range at output.
+ * (0 for limit, 1 for full)
+ * @y_in_range: Luminance pixel clipping range at output.
+ * @y_out_range: Luminance pixel clipping range at output.
+ * @contrast: 00~ff, 0.0~1.992
+ * @brightness: 80~7F, -128~+127
+ * @sat: saturation, 00~FF, 0.0~1.992
+ * @hue: 80~7F, -90~+87.188
+ */
+struct rkisp1_cif_isp_cproc_config {
+ __u8 c_out_range;
+ __u8 y_in_range;
+ __u8 y_out_range;
+ __u8 contrast;
+ __u8 brightness;
+ __u8 sat;
+ __u8 hue;
+};
+
+/**
+ * struct rkisp1_cif_isp_awb_meas_config - Configuration for the AWB statistics
+ *
+ * @awb_mode: the awb meas mode. From enum rkisp1_cif_isp_awb_mode_type.
+ * @awb_wnd: white balance measurement window (in pixels)
+ * @max_y: only pixels values < max_y contribute to awb measurement, set to 0
+ * to disable this feature
+ * @min_y: only pixels values > min_y contribute to awb measurement
+ * @max_csum: Chrominance sum maximum value, only consider pixels with Cb+Cr,
+ * smaller than threshold for awb measurements
+ * @min_c: Chrominance minimum value, only consider pixels with Cb/Cr
+ * each greater than threshold value for awb measurements
+ * @frames: number of frames - 1 used for mean value calculation
+ * (ucFrames=0 means 1 Frame)
+ * @awb_ref_cr: reference Cr value for AWB regulation, target for AWB
+ * @awb_ref_cb: reference Cb value for AWB regulation, target for AWB
+ * @enable_ymax_cmp: enable Y_MAX compare (Not valid in RGB measurement mode.)
+ */
+struct rkisp1_cif_isp_awb_meas_config {
+ /*
+ * Note: currently the h and v offsets are mapped to grid offsets
+ */
+ struct rkisp1_cif_isp_window awb_wnd;
+ __u32 awb_mode;
+ __u8 max_y;
+ __u8 min_y;
+ __u8 max_csum;
+ __u8 min_c;
+ __u8 frames;
+ __u8 awb_ref_cr;
+ __u8 awb_ref_cb;
+ __u8 enable_ymax_cmp;
+};
+
+/**
+ * struct rkisp1_cif_isp_awb_gain_config - Configuration used by auto white balance gain
+ *
+ * All fields in this struct are 10 bit, where:
+ * 0x100h = 1, unsigned integer value, range 0 to 4 with 8 bit fractional part.
+ *
+ * out_data_x = ( AWB_GAIN_X * in_data + 128) >> 8
+ *
+ * @gain_red: gain value for red component.
+ * @gain_green_r: gain value for green component in red line.
+ * @gain_blue: gain value for blue component.
+ * @gain_green_b: gain value for green component in blue line.
+ */
+struct rkisp1_cif_isp_awb_gain_config {
+ __u16 gain_red;
+ __u16 gain_green_r;
+ __u16 gain_blue;
+ __u16 gain_green_b;
+};
+
+/**
+ * struct rkisp1_cif_isp_flt_config - Configuration used by ISP filtering
+ *
+ * All 4 threshold fields (thresh_*) are 10 bits.
+ * All 6 factor fields (fac_*) are 6 bits.
+ *
+ * @mode: ISP_FILT_MODE register fields (from enum rkisp1_cif_isp_flt_mode)
+ * @grn_stage1: Green filter stage 1 select (range 0x0...0x8)
+ * @chr_h_mode: Chroma filter horizontal mode
+ * @chr_v_mode: Chroma filter vertical mode
+ * @thresh_bl0: If thresh_bl1 < sum_grad < thresh_bl0 then fac_bl0 is selected (blurring th)
+ * @thresh_bl1: If sum_grad < thresh_bl1 then fac_bl1 is selected (blurring th)
+ * @thresh_sh0: If thresh_sh0 < sum_grad < thresh_sh1 then thresh_sh0 is selected (sharpening th)
+ * @thresh_sh1: If thresh_sh1 < sum_grad then thresh_sh1 is selected (sharpening th)
+ * @lum_weight: Parameters for luminance weight function.
+ * @fac_sh1: filter factor for sharp1 level
+ * @fac_sh0: filter factor for sharp0 level
+ * @fac_mid: filter factor for mid level and for static filter mode
+ * @fac_bl0: filter factor for blur 0 level
+ * @fac_bl1: filter factor for blur 1 level (max blur)
+ */
+struct rkisp1_cif_isp_flt_config {
+ __u32 mode;
+ __u8 grn_stage1;
+ __u8 chr_h_mode;
+ __u8 chr_v_mode;
+ __u32 thresh_bl0;
+ __u32 thresh_bl1;
+ __u32 thresh_sh0;
+ __u32 thresh_sh1;
+ __u32 lum_weight;
+ __u32 fac_sh1;
+ __u32 fac_sh0;
+ __u32 fac_mid;
+ __u32 fac_bl0;
+ __u32 fac_bl1;
+};
+
+/**
+ * struct rkisp1_cif_isp_bdm_config - Configuration used by Bayer DeMosaic
+ *
+ * @demosaic_th: threshold for bayer demosaicing texture detection
+ */
+struct rkisp1_cif_isp_bdm_config {
+ __u8 demosaic_th;
+};
+
+/**
+ * struct rkisp1_cif_isp_ctk_config - Configuration used by Cross Talk correction
+ *
+ * @coeff: color correction matrix. Values are 11-bit signed fixed-point numbers with 4 bit integer
+ * and 7 bit fractional part, ranging from -8 (0x400) to +7.992 (0x3FF). 0 is
+ * represented by 0x000 and a coefficient value of 1 as 0x080.
+ * @ct_offset: Red, Green, Blue offsets for the crosstalk correction matrix
+ */
+struct rkisp1_cif_isp_ctk_config {
+ __u16 coeff[3][3];
+ __u16 ct_offset[3];
+};
+
+enum rkisp1_cif_isp_goc_mode {
+ RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC,
+ RKISP1_CIF_ISP_GOC_MODE_EQUIDISTANT
+};
+
+/**
+ * struct rkisp1_cif_isp_goc_config - Configuration used by Gamma Out correction
+ *
+ * @mode: goc mode (from enum rkisp1_cif_isp_goc_mode)
+ * @gamma_y: gamma out curve y-axis for all color components
+ *
+ * The number of entries of @gamma_y depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * V10 has RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 entries, V12 has
+ * RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 entries.
+ * RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum of the two.
+ */
+struct rkisp1_cif_isp_goc_config {
+ __u32 mode;
+ __u16 gamma_y[RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES];
+};
+
+/**
+ * struct rkisp1_cif_isp_hst_config - Configuration for Histogram statistics
+ *
+ * @mode: histogram mode (from enum rkisp1_cif_isp_histogram_mode)
+ * @histogram_predivider: process every stepsize pixel, all other pixels are
+ * skipped
+ * @meas_window: coordinates of the measure window
+ * @hist_weight: weighting factor for sub-windows
+ *
+ * The number of entries of @hist_weight depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * V10 has RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 entries, V12 has
+ * RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 entries.
+ * RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum of the
+ * two.
+ */
+struct rkisp1_cif_isp_hst_config {
+ __u32 mode;
+ __u8 histogram_predivider;
+ struct rkisp1_cif_isp_window meas_window;
+ __u8 hist_weight[RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE];
+};
+
+/**
+ * struct rkisp1_cif_isp_aec_config - Configuration for Auto Exposure statistics
+ *
+ * @mode: Exposure measure mode (from enum rkisp1_cif_isp_exp_meas_mode)
+ * @autostop: stop mode (from enum rkisp1_cif_isp_exp_ctrl_autostop)
+ * @meas_window: coordinates of the measure window
+ */
+struct rkisp1_cif_isp_aec_config {
+ __u32 mode;
+ __u32 autostop;
+ struct rkisp1_cif_isp_window meas_window;
+};
+
+/**
+ * struct rkisp1_cif_isp_afc_config - Configuration for the Auto Focus statistics
+ *
+ * @num_afm_win: max RKISP1_CIF_ISP_AFM_MAX_WINDOWS
+ * @afm_win: coordinates of the meas window
+ * @thres: threshold used for minimizing the influence of noise
+ * @var_shift: the number of bits for the shift operation at the end of the
+ * calculation chain.
+ */
+struct rkisp1_cif_isp_afc_config {
+ __u8 num_afm_win;
+ struct rkisp1_cif_isp_window afm_win[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
+ __u32 thres;
+ __u32 var_shift;
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_gain_usage - dpf gain usage
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED: don't use any gains in preprocessing stage
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS: use only the noise function gains from
+ * registers DPF_NF_GAIN_R, ...
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS: use only the gains from LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS: use the noise function gains and the
+ * gains from LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS: use only the gains from AWB module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS: use the gains from AWB and LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX: upper border (only for an internal evaluation)
+ */
+enum rkisp1_cif_isp_dpf_gain_usage {
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_rb_filtersize - Red and blue filter sizes
+ * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9: red and blue filter kernel size 13x9
+ * (means 7x5 active pixel)
+ * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9: red and blue filter kernel size 9x9
+ * (means 5x5 active pixel)
+ */
+enum rkisp1_cif_isp_dpf_rb_filtersize {
+ RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9,
+ RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9,
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_nll_scale_mode - dpf noise level scale mode
+ * @RKISP1_CIF_ISP_NLL_SCALE_LINEAR: use a linear scaling
+ * @RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC: use a logarithmic scaling
+ */
+enum rkisp1_cif_isp_dpf_nll_scale_mode {
+ RKISP1_CIF_ISP_NLL_SCALE_LINEAR,
+ RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC,
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_nll - Noise level lookup
+ *
+ * @coeff: Noise level Lookup coefficient
+ * @scale_mode: dpf noise level scale mode (from enum rkisp1_cif_isp_dpf_nll_scale_mode)
+ */
+struct rkisp1_cif_isp_dpf_nll {
+ __u16 coeff[RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS];
+ __u32 scale_mode;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_rb_flt - Red blue filter config
+ *
+ * @fltsize: The filter size for the red and blue pixels
+ * (from enum rkisp1_cif_isp_dpf_rb_filtersize)
+ * @spatial_coeff: Spatial weights
+ * @r_enable: enable filter processing for red pixels
+ * @b_enable: enable filter processing for blue pixels
+ */
+struct rkisp1_cif_isp_dpf_rb_flt {
+ __u32 fltsize;
+ __u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
+ __u8 r_enable;
+ __u8 b_enable;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_g_flt - Green filter Configuration
+ *
+ * @spatial_coeff: Spatial weights
+ * @gr_enable: enable filter processing for green pixels in green/red lines
+ * @gb_enable: enable filter processing for green pixels in green/blue lines
+ */
+struct rkisp1_cif_isp_dpf_g_flt {
+ __u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
+ __u8 gr_enable;
+ __u8 gb_enable;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_gain - Noise function Configuration
+ *
+ * @mode: dpf gain usage (from enum rkisp1_cif_isp_dpf_gain_usage)
+ * @nf_r_gain: Noise function Gain that replaces the AWB gain for red pixels
+ * @nf_b_gain: Noise function Gain that replaces the AWB gain for blue pixels
+ * @nf_gr_gain: Noise function Gain that replaces the AWB gain
+ * for green pixels in a red line
+ * @nf_gb_gain: Noise function Gain that replaces the AWB gain
+ * for green pixels in a blue line
+ */
+struct rkisp1_cif_isp_dpf_gain {
+ __u32 mode;
+ __u16 nf_r_gain;
+ __u16 nf_b_gain;
+ __u16 nf_gr_gain;
+ __u16 nf_gb_gain;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_config - Configuration used by De-noising pre-filter
+ *
+ * @gain: noise function gain
+ * @g_flt: green filter config
+ * @rb_flt: red blue filter config
+ * @nll: noise level lookup
+ */
+struct rkisp1_cif_isp_dpf_config {
+ struct rkisp1_cif_isp_dpf_gain gain;
+ struct rkisp1_cif_isp_dpf_g_flt g_flt;
+ struct rkisp1_cif_isp_dpf_rb_flt rb_flt;
+ struct rkisp1_cif_isp_dpf_nll nll;
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_strength_config - strength of the filter
+ *
+ * @r: filter strength of the RED filter
+ * @g: filter strength of the GREEN filter
+ * @b: filter strength of the BLUE filter
+ */
+struct rkisp1_cif_isp_dpf_strength_config {
+ __u8 r;
+ __u8 g;
+ __u8 b;
+};
+
+/**
+ * struct rkisp1_cif_isp_isp_other_cfg - Parameters for some blocks in rockchip isp1
+ *
+ * @dpcc_config: Defect Pixel Cluster Correction config
+ * @bls_config: Black Level Subtraction config
+ * @sdg_config: sensor degamma config
+ * @lsc_config: Lens Shade config
+ * @awb_gain_config: Auto White balance gain config
+ * @flt_config: filter config
+ * @bdm_config: demosaic config
+ * @ctk_config: cross talk config
+ * @goc_config: gamma out config
+ * @bls_config: black level subtraction config
+ * @dpf_config: De-noising pre-filter config
+ * @dpf_strength_config: dpf strength config
+ * @cproc_config: color process config
+ * @ie_config: image effects config
+ */
+struct rkisp1_cif_isp_isp_other_cfg {
+ struct rkisp1_cif_isp_dpcc_config dpcc_config;
+ struct rkisp1_cif_isp_bls_config bls_config;
+ struct rkisp1_cif_isp_sdg_config sdg_config;
+ struct rkisp1_cif_isp_lsc_config lsc_config;
+ struct rkisp1_cif_isp_awb_gain_config awb_gain_config;
+ struct rkisp1_cif_isp_flt_config flt_config;
+ struct rkisp1_cif_isp_bdm_config bdm_config;
+ struct rkisp1_cif_isp_ctk_config ctk_config;
+ struct rkisp1_cif_isp_goc_config goc_config;
+ struct rkisp1_cif_isp_dpf_config dpf_config;
+ struct rkisp1_cif_isp_dpf_strength_config dpf_strength_config;
+ struct rkisp1_cif_isp_cproc_config cproc_config;
+ struct rkisp1_cif_isp_ie_config ie_config;
+};
+
+/**
+ * struct rkisp1_cif_isp_isp_meas_cfg - Rockchip ISP1 Measure Parameters
+ *
+ * @awb_meas_config: auto white balance config
+ * @hst_config: histogram config
+ * @aec_config: auto exposure config
+ * @afc_config: auto focus config
+ */
+struct rkisp1_cif_isp_isp_meas_cfg {
+ struct rkisp1_cif_isp_awb_meas_config awb_meas_config;
+ struct rkisp1_cif_isp_hst_config hst_config;
+ struct rkisp1_cif_isp_aec_config aec_config;
+ struct rkisp1_cif_isp_afc_config afc_config;
+};
+
+/**
+ * struct rkisp1_params_cfg - Rockchip ISP1 Input Parameters Meta Data
+ *
+ * @module_en_update: mask the enable bits of which module should be updated
+ * @module_ens: mask the enable value of each module, only update the module
+ * which correspond bit was set in module_en_update
+ * @module_cfg_update: mask the config bits of which module should be updated
+ * @meas: measurement config
+ * @others: other config
+ */
+struct rkisp1_params_cfg {
+ __u32 module_en_update;
+ __u32 module_ens;
+ __u32 module_cfg_update;
+
+ struct rkisp1_cif_isp_isp_meas_cfg meas;
+ struct rkisp1_cif_isp_isp_other_cfg others;
+};
+
+/*---------- PART2: Measurement Statistics ------------*/
+
+/**
+ * struct rkisp1_cif_isp_awb_meas - AWB measured values
+ *
+ * @cnt: White pixel count, number of "white pixels" found during last
+ * measurement
+ * @mean_y_or_g: Mean value of Y within window and frames,
+ * Green if RGB is selected.
+ * @mean_cb_or_b: Mean value of Cb within window and frames,
+ * Blue if RGB is selected.
+ * @mean_cr_or_r: Mean value of Cr within window and frames,
+ * Red if RGB is selected.
+ */
+struct rkisp1_cif_isp_awb_meas {
+ __u32 cnt;
+ __u8 mean_y_or_g;
+ __u8 mean_cb_or_b;
+ __u8 mean_cr_or_r;
+};
+
+/**
+ * struct rkisp1_cif_isp_awb_stat - statistics automatic white balance data
+ *
+ * @awb_mean: Mean measured data
+ */
+struct rkisp1_cif_isp_awb_stat {
+ struct rkisp1_cif_isp_awb_meas awb_mean[RKISP1_CIF_ISP_AWB_MAX_GRID];
+};
+
+/**
+ * struct rkisp1_cif_isp_bls_meas_val - BLS measured values
+ *
+ * @meas_r: Mean measured value for Bayer pattern R
+ * @meas_gr: Mean measured value for Bayer pattern Gr
+ * @meas_gb: Mean measured value for Bayer pattern Gb
+ * @meas_b: Mean measured value for Bayer pattern B
+ */
+struct rkisp1_cif_isp_bls_meas_val {
+ __u16 meas_r;
+ __u16 meas_gr;
+ __u16 meas_gb;
+ __u16 meas_b;
+};
+
+/**
+ * struct rkisp1_cif_isp_ae_stat - statistics auto exposure data
+ *
+ * @exp_mean: Mean luminance value of block xx
+ * @bls_val: BLS measured values
+ *
+ * The number of entries of @exp_mean depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * V10 has RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries, V12 has
+ * RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries. RKISP1_CIF_ISP_AE_MEAN_MAX is equal
+ * to the maximum of the two.
+ *
+ * Image is divided into 5x5 blocks on V10 and 9x9 blocks on V12.
+ */
+struct rkisp1_cif_isp_ae_stat {
+ __u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX];
+ struct rkisp1_cif_isp_bls_meas_val bls_val;
+};
+
+/**
+ * struct rkisp1_cif_isp_af_meas_val - AF measured values
+ *
+ * @sum: sharpness value
+ * @lum: luminance value
+ */
+struct rkisp1_cif_isp_af_meas_val {
+ __u32 sum;
+ __u32 lum;
+};
+
+/**
+ * struct rkisp1_cif_isp_af_stat - statistics auto focus data
+ *
+ * @window: AF measured value of window x
+ *
+ * The module measures the sharpness in 3 windows of selectable size via
+ * register settings(ISP_AFM_*_A/B/C)
+ */
+struct rkisp1_cif_isp_af_stat {
+ struct rkisp1_cif_isp_af_meas_val window[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
+};
+
+/**
+ * struct rkisp1_cif_isp_hist_stat - statistics histogram data
+ *
+ * @hist_bins: measured bin counters. Each bin is a 20 bits unsigned fixed point
+ * type. Bits 0-4 are the fractional part and bits 5-19 are the
+ * integer part.
+ *
+ * The window of the measurements area is divided to 5x5 sub-windows for
+ * V10 and to 9x9 sub-windows for V12. The histogram is then computed for each
+ * sub-window independently and the final result is a weighted average of the
+ * histogram measurements on all sub-windows. The window of the measurements
+ * area and the weight of each sub-window are configurable using
+ * struct @rkisp1_cif_isp_hst_config.
+ *
+ * The histogram contains 16 bins in V10 and 32 bins in V12.
+ *
+ * The number of entries of @hist_bins depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * V10 has RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries, V12 has
+ * RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries. RKISP1_CIF_ISP_HIST_BIN_N_MAX is
+ * equal to the maximum of the two.
+ */
+struct rkisp1_cif_isp_hist_stat {
+ __u32 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
+};
+
+/**
+ * struct rkisp1_cif_isp_stat - Rockchip ISP1 Statistics Data
+ *
+ * @awb: statistics data for automatic white balance
+ * @ae: statistics data for auto exposure
+ * @af: statistics data for auto focus
+ * @hist: statistics histogram data
+ */
+struct rkisp1_cif_isp_stat {
+ struct rkisp1_cif_isp_awb_stat awb;
+ struct rkisp1_cif_isp_ae_stat ae;
+ struct rkisp1_cif_isp_af_stat af;
+ struct rkisp1_cif_isp_hist_stat hist;
+};
+
+/**
+ * struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Meta Data
+ *
+ * @meas_type: measurement types (RKISP1_CIF_ISP_STAT_* definitions)
+ * @frame_id: frame ID for sync
+ * @params: statistics data
+ */
+struct rkisp1_stat_buffer {
+ __u32 meas_type;
+ __u32 frame_id;
+ struct rkisp1_cif_isp_stat params;
+};
+
+#endif /* _UAPI_RKISP1_CONFIG_H */
diff --git a/include/uapi/linux/romfs_fs.h b/include/uapi/linux/romfs_fs.h
index a7f1585accef..6aa05e792454 100644
--- a/include/uapi/linux/romfs_fs.h
+++ b/include/uapi/linux/romfs_fs.h
@@ -27,7 +27,7 @@ struct romfs_super_block {
__be32 word1;
__be32 size;
__be32 checksum;
- char name[0]; /* volume name */
+ char name[]; /* volume name */
};
/* On disk inode */
@@ -37,7 +37,7 @@ struct romfs_inode {
__be32 spec;
__be32 size;
__be32 checksum;
- char name[0];
+ char name[];
};
#define ROMFH_TYPE 7
diff --git a/include/uapi/linux/rpl.h b/include/uapi/linux/rpl.h
index 1dccb55cf8c6..7c8970e5b84b 100644
--- a/include/uapi/linux/rpl.h
+++ b/include/uapi/linux/rpl.h
@@ -28,17 +28,17 @@ struct ipv6_rpl_sr_hdr {
pad:4,
reserved1:16;
#elif defined(__BIG_ENDIAN_BITFIELD)
- __u32 reserved:20,
+ __u32 cmpri:4,
+ cmpre:4,
pad:4,
- cmpri:4,
- cmpre:4;
+ reserved:20;
#else
#error "Please fix <asm/byteorder.h>"
#endif
union {
- struct in6_addr addr[0];
- __u8 data[0];
+ __DECLARE_FLEX_ARRAY(struct in6_addr, addr);
+ __DECLARE_FLEX_ARRAY(__u8, data);
} segments;
} __attribute__((packed));
diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h
index e14c6dab4223..f0c8da2b185b 100644
--- a/include/uapi/linux/rpmsg.h
+++ b/include/uapi/linux/rpmsg.h
@@ -9,11 +9,13 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+#define RPMSG_ADDR_ANY 0xFFFFFFFF
+
/**
* struct rpmsg_endpoint_info - endpoint info representation
* @name: name of service
- * @src: local address
- * @dst: destination address
+ * @src: local address. To set to RPMSG_ADDR_ANY if not used.
+ * @dst: destination address. To set to RPMSG_ADDR_ANY if not used.
*/
struct rpmsg_endpoint_info {
char name[32];
@@ -21,7 +23,34 @@ struct rpmsg_endpoint_info {
__u32 dst;
};
+/**
+ * Instantiate a new rmpsg char device endpoint.
+ */
#define RPMSG_CREATE_EPT_IOCTL _IOW(0xb5, 0x1, struct rpmsg_endpoint_info)
+
+/**
+ * Destroy a rpmsg char device endpoint created by the RPMSG_CREATE_EPT_IOCTL.
+ */
#define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2)
+/**
+ * Instantiate a new local rpmsg service device.
+ */
+#define RPMSG_CREATE_DEV_IOCTL _IOW(0xb5, 0x3, struct rpmsg_endpoint_info)
+
+/**
+ * Release a local rpmsg device.
+ */
+#define RPMSG_RELEASE_DEV_IOCTL _IOW(0xb5, 0x4, struct rpmsg_endpoint_info)
+
+/**
+ * Get the flow control state of the remote rpmsg char device.
+ */
+#define RPMSG_GET_OUTGOING_FLOWCONTROL _IOR(0xb5, 0x5, int)
+
+/**
+ * Set the flow control state of the local rpmsg char device.
+ */
+#define RPMSG_SET_INCOMING_FLOWCONTROL _IOR(0xb5, 0x6, int)
+
#endif
diff --git a/include/uapi/linux/rpmsg_types.h b/include/uapi/linux/rpmsg_types.h
new file mode 100644
index 000000000000..36e3b9404391
--- /dev/null
+++ b/include/uapi/linux/rpmsg_types.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_RPMSG_TYPES_H
+#define _UAPI_LINUX_RPMSG_TYPES_H
+
+#include <linux/types.h>
+
+typedef __u16 __bitwise __rpmsg16;
+typedef __u32 __bitwise __rpmsg32;
+typedef __u64 __bitwise __rpmsg64;
+
+#endif /* _UAPI_LINUX_RPMSG_TYPES_H */
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index 9a402fdb60e9..c233aae5eac9 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -105,23 +105,11 @@ struct rseq {
* Read and set by the kernel. Set by user-space with single-copy
* atomicity semantics. This field should only be updated by the
* thread which registered this data structure. Aligned on 64-bit.
+ *
+ * 32-bit architectures should update the low order bits of the
+ * rseq_cs field, leaving the high order bits initialized to 0.
*/
- union {
- __u64 ptr64;
-#ifdef __LP64__
- __u64 ptr;
-#else
- struct {
-#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
- __u32 padding; /* Initialized to zero. */
- __u32 ptr32;
-#else /* LITTLE */
- __u32 ptr32;
- __u32 padding; /* Initialized to zero. */
-#endif /* ENDIAN */
- } ptr;
-#endif
- } rseq_cs;
+ __u64 rseq_cs;
/*
* Restartable sequences flags field.
@@ -142,6 +130,28 @@ struct rseq {
* this thread.
*/
__u32 flags;
+
+ /*
+ * Restartable sequences node_id field. Updated by the kernel. Read by
+ * user-space with single-copy atomicity semantics. This field should
+ * only be read by the thread which registered this data structure.
+ * Aligned on 32-bit. Contains the current NUMA node ID.
+ */
+ __u32 node_id;
+
+ /*
+ * Restartable sequences mm_cid field. Updated by the kernel. Read by
+ * user-space with single-copy atomicity semantics. This field should
+ * only be read by the thread which registered this data structure.
+ * Aligned on 32-bit. Contains the current thread's concurrency ID
+ * (allocated uniquely within a memory map).
+ */
+ __u32 mm_cid;
+
+ /*
+ * Flexible array member at end of structure, after last feature field.
+ */
+ char end[];
} __attribute__((aligned(4 * sizeof(__u64))));
#endif /* _UAPI_LINUX_RSEQ_H */
diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h
index fa9aff91cbf2..97aca4503a6a 100644
--- a/include/uapi/linux/rtc.h
+++ b/include/uapi/linux/rtc.h
@@ -14,6 +14,7 @@
#include <linux/const.h>
#include <linux/ioctl.h>
+#include <linux/types.h>
/*
* The struct used to pass data via the following ioctl. Similar to the
@@ -66,6 +67,17 @@ struct rtc_pll_info {
long pll_clock; /* base PLL frequency */
};
+struct rtc_param {
+ __u64 param;
+ union {
+ __u64 uvalue;
+ __s64 svalue;
+ __u64 ptr;
+ };
+ __u32 index;
+ __u32 __pad;
+};
+
/*
* ioctl calls that are permitted to the /dev/rtc interface, if
* any of the RTC drivers are enabled.
@@ -95,6 +107,9 @@ struct rtc_pll_info {
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
+#define RTC_PARAM_GET _IOW('p', 0x13, struct rtc_param) /* Get parameter */
+#define RTC_PARAM_SET _IOW('p', 0x14, struct rtc_param) /* Set parameter */
+
#define RTC_VL_DATA_INVALID _BITUL(0) /* Voltage too low, RTC data is invalid */
#define RTC_VL_BACKUP_LOW _BITUL(1) /* Backup voltage is low */
#define RTC_VL_BACKUP_EMPTY _BITUL(2) /* Backup empty or not present */
@@ -110,6 +125,26 @@ struct rtc_pll_info {
#define RTC_AF 0x20 /* Alarm interrupt */
#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */
+/* feature list */
+#define RTC_FEATURE_ALARM 0
+#define RTC_FEATURE_ALARM_RES_MINUTE 1
+#define RTC_FEATURE_NEED_WEEK_DAY 2
+#define RTC_FEATURE_ALARM_RES_2S 3
+#define RTC_FEATURE_UPDATE_INTERRUPT 4
+#define RTC_FEATURE_CORRECTION 5
+#define RTC_FEATURE_BACKUP_SWITCH_MODE 6
+#define RTC_FEATURE_ALARM_WAKEUP_ONLY 7
+#define RTC_FEATURE_CNT 8
+
+/* parameter list */
+#define RTC_PARAM_FEATURES 0
+#define RTC_PARAM_CORRECTION 1
+#define RTC_PARAM_BACKUP_SWITCH_MODE 2
+
+#define RTC_BSM_DISABLED 0
+#define RTC_BSM_DIRECT 1
+#define RTC_BSM_LEVEL 2
+#define RTC_BSM_STANDBY 3
#define RTC_MAX_FREQ 8192
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 9b814c92de12..3b687d20c9ed 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -146,6 +146,8 @@ enum {
#define RTM_NEWSTATS RTM_NEWSTATS
RTM_GETSTATS = 94,
#define RTM_GETSTATS RTM_GETSTATS
+ RTM_SETSTATS,
+#define RTM_SETSTATS RTM_SETSTATS
RTM_NEWCACHEREPORT = 96,
#define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT
@@ -178,6 +180,20 @@ enum {
RTM_GETVLAN,
#define RTM_GETVLAN RTM_GETVLAN
+ RTM_NEWNEXTHOPBUCKET = 116,
+#define RTM_NEWNEXTHOPBUCKET RTM_NEWNEXTHOPBUCKET
+ RTM_DELNEXTHOPBUCKET,
+#define RTM_DELNEXTHOPBUCKET RTM_DELNEXTHOPBUCKET
+ RTM_GETNEXTHOPBUCKET,
+#define RTM_GETNEXTHOPBUCKET RTM_GETNEXTHOPBUCKET
+
+ RTM_NEWTUNNEL = 120,
+#define RTM_NEWTUNNEL RTM_NEWTUNNEL
+ RTM_DELTUNNEL,
+#define RTM_DELTUNNEL RTM_DELTUNNEL
+ RTM_GETTUNNEL,
+#define RTM_GETTUNNEL RTM_GETTUNNEL
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -283,6 +299,7 @@ enum {
#define RTPROT_MROUTED 17 /* Multicast daemon */
#define RTPROT_KEEPALIVED 18 /* Keepalived daemon */
#define RTPROT_BABEL 42 /* Babel daemon */
+#define RTPROT_OPENR 99 /* Open Routing (Open/R) Routes */
#define RTPROT_BGP 186 /* BGP Routes */
#define RTPROT_ISIS 187 /* ISIS Routes */
#define RTPROT_OSPF 188 /* OSPF Routes */
@@ -319,6 +336,11 @@ enum rt_scope_t {
#define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */
#define RTM_F_OFFLOAD 0x4000 /* route is offloaded */
#define RTM_F_TRAP 0x8000 /* route is trapping packets */
+#define RTM_F_OFFLOAD_FAILED 0x20000000 /* route offload failed, this value
+ * is chosen to avoid conflicts with
+ * other flags defined in
+ * include/uapi/linux/ipv6_route.h
+ */
/* Reserved table identifiers */
@@ -396,11 +418,13 @@ struct rtnexthop {
#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
-#define RTNH_F_OFFLOAD 8 /* offloaded route */
+#define RTNH_F_OFFLOAD 8 /* Nexthop is offloaded */
#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
#define RTNH_F_UNRESOLVED 32 /* The entry is unresolved (ipmr) */
+#define RTNH_F_TRAP 64 /* Nexthop is trapping packets */
-#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | \
+ RTNH_F_OFFLOAD | RTNH_F_TRAP)
/* Macros to handle hexthops */
@@ -416,7 +440,7 @@ struct rtnexthop {
/* RTA_VIA */
struct rtvia {
__kernel_sa_family_t rtvia_family;
- __u8 rtvia_addr[0];
+ __u8 rtvia_addr[];
};
/* RTM_CACHEINFO */
@@ -478,13 +502,17 @@ enum {
#define RTAX_MAX (__RTAX_MAX - 1)
-#define RTAX_FEATURE_ECN (1 << 0)
-#define RTAX_FEATURE_SACK (1 << 1)
-#define RTAX_FEATURE_TIMESTAMP (1 << 2)
-#define RTAX_FEATURE_ALLFRAG (1 << 3)
+#define RTAX_FEATURE_ECN (1 << 0)
+#define RTAX_FEATURE_SACK (1 << 1) /* unused */
+#define RTAX_FEATURE_TIMESTAMP (1 << 2) /* unused */
+#define RTAX_FEATURE_ALLFRAG (1 << 3) /* unused */
+#define RTAX_FEATURE_TCP_USEC_TS (1 << 4)
-#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \
- RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG)
+#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | \
+ RTAX_FEATURE_SACK | \
+ RTAX_FEATURE_TIMESTAMP | \
+ RTAX_FEATURE_ALLFRAG | \
+ RTAX_FEATURE_TCP_USEC_TS)
struct rta_session {
__u8 proto;
@@ -611,6 +639,7 @@ enum {
TCA_INGRESS_BLOCK,
TCA_EGRESS_BLOCK,
TCA_DUMP_FLAGS,
+ TCA_EXT_WARN_MSG,
__TCA_MAX
};
@@ -739,6 +768,12 @@ enum rtnetlink_groups {
#define RTNLGRP_NEXTHOP RTNLGRP_NEXTHOP
RTNLGRP_BRVLAN,
#define RTNLGRP_BRVLAN RTNLGRP_BRVLAN
+ RTNLGRP_MCTP_IFADDR,
+#define RTNLGRP_MCTP_IFADDR RTNLGRP_MCTP_IFADDR
+ RTNLGRP_TUNNEL,
+#define RTNLGRP_TUNNEL RTNLGRP_TUNNEL
+ RTNLGRP_STATS,
+#define RTNLGRP_STATS RTNLGRP_STATS
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
@@ -758,6 +793,7 @@ enum {
TCA_ROOT_FLAGS,
TCA_ROOT_COUNT,
TCA_ROOT_TIME_DELTA, /* in msecs */
+ TCA_ROOT_EXT_WARN_MSG,
__TCA_ROOT_MAX,
#define TCA_ROOT_MAX (__TCA_ROOT_MAX - 1)
};
@@ -766,12 +802,18 @@ enum {
#define TA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcamsg))
/* tcamsg flags stored in attribute TCA_ROOT_FLAGS
*
- * TCA_FLAG_LARGE_DUMP_ON user->kernel to request for larger than TCA_ACT_MAX_PRIO
- * actions in a dump. All dump responses will contain the number of actions
- * being dumped stored in for user app's consumption in TCA_ROOT_COUNT
+ * TCA_ACT_FLAG_LARGE_DUMP_ON user->kernel to request for larger than
+ * TCA_ACT_MAX_PRIO actions in a dump. All dump responses will contain the
+ * number of actions being dumped stored in for user app's consumption in
+ * TCA_ROOT_COUNT
+ *
+ * TCA_ACT_FLAG_TERSE_DUMP user->kernel to request terse (brief) dump that only
+ * includes essential action info (kind, index, etc.)
*
*/
#define TCA_FLAG_LARGE_DUMP_ON (1 << 0)
+#define TCA_ACT_FLAG_LARGE_DUMP_ON TCA_FLAG_LARGE_DUMP_ON
+#define TCA_ACT_FLAG_TERSE_DUMP (1 << 1)
/* New extended info filters for IFLA_EXT_MASK */
#define RTEXT_FILTER_VF (1 << 0)
@@ -779,6 +821,9 @@ enum {
#define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2)
#define RTEXT_FILTER_SKIP_STATS (1 << 3)
#define RTEXT_FILTER_MRP (1 << 4)
+#define RTEXT_FILTER_CFM_CONFIG (1 << 5)
+#define RTEXT_FILTER_CFM_STATUS (1 << 6)
+#define RTEXT_FILTER_MST (1 << 7)
/* End of information exported to user level */
diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
index 4accfa7e266d..8f8dc7a937a4 100644
--- a/include/uapi/linux/rxrpc.h
+++ b/include/uapi/linux/rxrpc.h
@@ -51,11 +51,11 @@ enum rxrpc_cmsg_type {
RXRPC_BUSY = 6, /* -r: server busy received [terminal] */
RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */
RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */
- RXRPC_ACCEPT = 9, /* s-: [Service] accept request */
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
+ RXRPC_CHARGE_ACCEPT = 14, /* s-: Charge the accept pool with a user call ID */
RXRPC__SUPPORTED
};
diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h
index c852153ddb0d..90662385689b 100644
--- a/include/uapi/linux/sched/types.h
+++ b/include/uapi/linux/sched/types.h
@@ -4,10 +4,6 @@
#include <linux/types.h>
-struct sched_param {
- int sched_priority;
-};
-
#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
#define SCHED_ATTR_SIZE_VER1 56 /* add: util_{min,max} */
@@ -96,6 +92,8 @@ struct sched_param {
* on a CPU with a capacity big enough to fit the specified value.
* A task with a max utilization value smaller than 1024 is more likely
* scheduled on a CPU with no more capacity than the specified value.
+ *
+ * A task utilization boundary can be reset by setting the attribute to -1.
*/
struct sched_attr {
__u32 size;
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 28ad40d9acba..b7d91d4cf0db 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -140,6 +140,8 @@ typedef __s32 sctp_assoc_t;
#define SCTP_ECN_SUPPORTED 130
#define SCTP_EXPOSE_POTENTIALLY_FAILED_STATE 131
#define SCTP_EXPOSE_PF_STATE SCTP_EXPOSE_POTENTIALLY_FAILED_STATE
+#define SCTP_REMOTE_UDP_ENCAPS_PORT 132
+#define SCTP_PLPMTUD_PROBE_INTERVAL 133
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -363,7 +365,7 @@ struct sctp_assoc_change {
__u16 sac_outbound_streams;
__u16 sac_inbound_streams;
sctp_assoc_t sac_assoc_id;
- __u8 sac_info[0];
+ __u8 sac_info[];
};
/*
@@ -434,7 +436,7 @@ struct sctp_remote_error {
__u32 sre_length;
__be16 sre_error;
sctp_assoc_t sre_assoc_id;
- __u8 sre_data[0];
+ __u8 sre_data[];
};
@@ -451,7 +453,7 @@ struct sctp_send_failed {
__u32 ssf_error;
struct sctp_sndrcvinfo ssf_info;
sctp_assoc_t ssf_assoc_id;
- __u8 ssf_data[0];
+ __u8 ssf_data[];
};
struct sctp_send_failed_event {
@@ -461,7 +463,7 @@ struct sctp_send_failed_event {
__u32 ssf_error;
struct sctp_sndinfo ssfe_info;
sctp_assoc_t ssf_assoc_id;
- __u8 ssf_data[0];
+ __u8 ssf_data[];
};
/*
@@ -1027,7 +1029,7 @@ struct sctp_getaddrs_old {
struct sctp_getaddrs {
sctp_assoc_t assoc_id; /*input*/
__u32 addr_num; /*output*/
- __u8 addrs[0]; /*output, variable size*/
+ __u8 addrs[]; /*output, variable size*/
};
/* A socket user request obtained via SCTP_GET_ASSOC_STATS that retrieves
@@ -1197,13 +1199,28 @@ struct sctp_event {
uint8_t se_on;
};
+struct sctp_udpencaps {
+ sctp_assoc_t sue_assoc_id;
+ struct sockaddr_storage sue_address;
+ uint16_t sue_port;
+};
+
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
SCTP_SS_DEFAULT = SCTP_SS_FCFS,
SCTP_SS_PRIO,
SCTP_SS_RR,
- SCTP_SS_MAX = SCTP_SS_RR
+ SCTP_SS_FC,
+ SCTP_SS_WFQ,
+ SCTP_SS_MAX = SCTP_SS_WFQ
+};
+
+/* Probe Interval socket option */
+struct sctp_probeinterval {
+ sctp_assoc_t spi_assoc_id;
+ struct sockaddr_storage spi_address;
+ __u32 spi_interval;
};
#endif /* _UAPI_SCTP_H */
diff --git a/include/uapi/linux/sdla.h b/include/uapi/linux/sdla.h
deleted file mode 100644
index 1e3735be6511..000000000000
--- a/include/uapi/linux/sdla.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * Global definitions for the Frame relay interface.
- *
- * Version: @(#)if_ifrad.h 0.20 13 Apr 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan Structure packing
- *
- * 0.20 Mike McLagan New flags for S508 buffer handling
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _UAPISDLA_H
-#define _UAPISDLA_H
-
-/* adapter type */
-#define SDLA_TYPES
-#define SDLA_S502A 5020
-#define SDLA_S502E 5021
-#define SDLA_S503 5030
-#define SDLA_S507 5070
-#define SDLA_S508 5080
-#define SDLA_S509 5090
-#define SDLA_UNKNOWN -1
-
-/* port selection flags for the S508 */
-#define SDLA_S508_PORT_V35 0x00
-#define SDLA_S508_PORT_RS232 0x02
-
-/* Z80 CPU speeds */
-#define SDLA_CPU_3M 0x00
-#define SDLA_CPU_5M 0x01
-#define SDLA_CPU_7M 0x02
-#define SDLA_CPU_8M 0x03
-#define SDLA_CPU_10M 0x04
-#define SDLA_CPU_16M 0x05
-#define SDLA_CPU_12M 0x06
-
-/* some private IOCTLs */
-#define SDLA_IDENTIFY (FRAD_LAST_IOCTL + 1)
-#define SDLA_CPUSPEED (FRAD_LAST_IOCTL + 2)
-#define SDLA_PROTOCOL (FRAD_LAST_IOCTL + 3)
-
-#define SDLA_CLEARMEM (FRAD_LAST_IOCTL + 4)
-#define SDLA_WRITEMEM (FRAD_LAST_IOCTL + 5)
-#define SDLA_READMEM (FRAD_LAST_IOCTL + 6)
-
-struct sdla_mem {
- int addr;
- int len;
- void __user *data;
-};
-
-#define SDLA_START (FRAD_LAST_IOCTL + 7)
-#define SDLA_STOP (FRAD_LAST_IOCTL + 8)
-
-/* some offsets in the Z80's memory space */
-#define SDLA_NMIADDR 0x0000
-#define SDLA_CONF_ADDR 0x0010
-#define SDLA_S502A_NMIADDR 0x0066
-#define SDLA_CODE_BASEADDR 0x0100
-#define SDLA_WINDOW_SIZE 0x2000
-#define SDLA_ADDR_MASK 0x1FFF
-
-/* largest handleable block of data */
-#define SDLA_MAX_DATA 4080
-#define SDLA_MAX_MTU 4072 /* MAX_DATA - sizeof(fradhdr) */
-#define SDLA_MAX_DLCI 24
-
-/* this should be the same as frad_conf */
-struct sdla_conf {
- short station;
- short config;
- short kbaud;
- short clocking;
- short max_frm;
- short T391;
- short T392;
- short N391;
- short N392;
- short N393;
- short CIR_fwd;
- short Bc_fwd;
- short Be_fwd;
- short CIR_bwd;
- short Bc_bwd;
- short Be_bwd;
-};
-
-/* this should be the same as dlci_conf */
-struct sdla_dlci_conf {
- short config;
- short CIR_fwd;
- short Bc_fwd;
- short Be_fwd;
- short CIR_bwd;
- short Bc_bwd;
- short Be_bwd;
- short Tc_fwd;
- short Tc_bwd;
- short Tf_max;
- short Tb_max;
-};
-
-
-#endif /* _UAPISDLA_H */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 6ba18b82a02e..dbfc9b37fcae 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -23,6 +23,8 @@
#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4)
+/* Received notifications wait in killable state (only respond to fatal signals) */
+#define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5)
/*
* All BPF programs must return a 32-bit value.
@@ -113,8 +115,11 @@ struct seccomp_notif_resp {
__u32 flags;
};
+#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0)
+
/* valid flags for seccomp_notif_addfd */
#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
+#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
/**
* struct seccomp_notif_addfd
@@ -147,4 +152,6 @@ struct seccomp_notif_addfd {
#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \
struct seccomp_notif_addfd)
+#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64)
+
#endif /* _UAPI_LINUX_SECCOMP_H */
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index 6f5af1a84213..d3994b7716bc 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -44,13 +44,28 @@ enum opal_lock_state {
OPAL_LK = 0x04, /* 0100 */
};
+enum opal_lock_flags {
+ /* IOC_OPAL_SAVE will also store the provided key for locking */
+ OPAL_SAVE_FOR_LOCK = 0x01,
+};
+
+enum opal_key_type {
+ OPAL_INCLUDED = 0, /* key[] is the key */
+ OPAL_KEYRING, /* key is in keyring */
+};
+
struct opal_key {
__u8 lr;
__u8 key_len;
- __u8 __align[6];
+ __u8 key_type;
+ __u8 __align[5];
__u8 key[OPAL_KEY_MAX];
};
+enum opal_revert_lsp_opts {
+ OPAL_PRESERVE = 0x01,
+};
+
struct opal_lr_act {
struct opal_key key;
__u32 sum;
@@ -73,10 +88,21 @@ struct opal_user_lr_setup {
struct opal_session_info session;
};
+struct opal_lr_status {
+ struct opal_session_info session;
+ __u64 range_start;
+ __u64 range_length;
+ __u32 RLE; /* Read Lock enabled */
+ __u32 WLE; /* Write Lock Enabled */
+ __u32 l_state;
+ __u8 align[4];
+};
+
struct opal_lock_unlock {
struct opal_session_info session;
__u32 l_state;
- __u8 __align[4];
+ __u16 flags;
+ __u8 __align[2];
};
struct opal_new_pw {
@@ -132,6 +158,42 @@ struct opal_read_write_table {
__u64 priv;
};
+#define OPAL_FL_SUPPORTED 0x00000001
+#define OPAL_FL_LOCKING_SUPPORTED 0x00000002
+#define OPAL_FL_LOCKING_ENABLED 0x00000004
+#define OPAL_FL_LOCKED 0x00000008
+#define OPAL_FL_MBR_ENABLED 0x00000010
+#define OPAL_FL_MBR_DONE 0x00000020
+#define OPAL_FL_SUM_SUPPORTED 0x00000040
+
+struct opal_status {
+ __u32 flags;
+ __u32 reserved;
+};
+
+/*
+ * Geometry Reporting per TCG Storage OPAL SSC
+ * section 3.1.1.4
+ */
+struct opal_geometry {
+ __u8 align;
+ __u32 logical_block_size;
+ __u64 alignment_granularity;
+ __u64 lowest_aligned_lba;
+ __u8 __align[3];
+};
+
+struct opal_discovery {
+ __u64 data;
+ __u64 size;
+};
+
+struct opal_revert_lsp {
+ struct opal_key key;
+ __u32 options;
+ __u32 __pad;
+};
+
#define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock)
#define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock)
#define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key)
@@ -148,5 +210,10 @@ struct opal_read_write_table {
#define IOC_OPAL_MBR_DONE _IOW('p', 233, struct opal_mbr_done)
#define IOC_OPAL_WRITE_SHADOW_MBR _IOW('p', 234, struct opal_shadow_mbr)
#define IOC_OPAL_GENERIC_TABLE_RW _IOW('p', 235, struct opal_read_write_table)
+#define IOC_OPAL_GET_STATUS _IOR('p', 236, struct opal_status)
+#define IOC_OPAL_GET_LR_STATUS _IOW('p', 237, struct opal_lr_status)
+#define IOC_OPAL_GET_GEOMETRY _IOR('p', 238, struct opal_geometry)
+#define IOC_OPAL_DISCOVERY _IOW('p', 239, struct opal_discovery)
+#define IOC_OPAL_REVERT_LSP _IOW('p', 240, struct opal_revert_lsp)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h
index 286e8d6a8e98..13bcbc8bba32 100644
--- a/include/uapi/linux/seg6.h
+++ b/include/uapi/linux/seg6.h
@@ -30,7 +30,7 @@ struct ipv6_sr_hdr {
__u8 flags;
__u16 tag;
- struct in6_addr segments[0];
+ struct in6_addr segments[];
};
#define SR6_FLAG1_PROTECTED (1 << 6)
diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h
index eb815e0d0ac3..ae78791372b8 100644
--- a/include/uapi/linux/seg6_iptunnel.h
+++ b/include/uapi/linux/seg6_iptunnel.h
@@ -26,7 +26,7 @@ enum {
struct seg6_iptunnel_encap {
int mode;
- struct ipv6_sr_hdr srh[0];
+ struct ipv6_sr_hdr srh[];
};
#define SEG6_IPTUN_ENCAP_SIZE(x) ((sizeof(*x)) + (((x)->srh->hdrlen + 1) << 3))
@@ -35,6 +35,8 @@ enum {
SEG6_IPTUN_MODE_INLINE,
SEG6_IPTUN_MODE_ENCAP,
SEG6_IPTUN_MODE_L2ENCAP,
+ SEG6_IPTUN_MODE_ENCAP_RED,
+ SEG6_IPTUN_MODE_L2ENCAP_RED,
};
#endif
diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h
index edc138bdc56d..4fdc424c9cb3 100644
--- a/include/uapi/linux/seg6_local.h
+++ b/include/uapi/linux/seg6_local.h
@@ -26,6 +26,9 @@ enum {
SEG6_LOCAL_IIF,
SEG6_LOCAL_OIF,
SEG6_LOCAL_BPF,
+ SEG6_LOCAL_VRFTABLE,
+ SEG6_LOCAL_COUNTERS,
+ SEG6_LOCAL_FLAVORS,
__SEG6_LOCAL_MAX,
};
#define SEG6_LOCAL_MAX (__SEG6_LOCAL_MAX - 1)
@@ -62,6 +65,8 @@ enum {
SEG6_LOCAL_ACTION_END_AM = 14,
/* custom BPF action */
SEG6_LOCAL_ACTION_END_BPF = 15,
+ /* decap and lookup of DA in v4 or v6 table */
+ SEG6_LOCAL_ACTION_END_DT46 = 16,
__SEG6_LOCAL_ACTION_MAX,
};
@@ -77,4 +82,56 @@ enum {
#define SEG6_LOCAL_BPF_PROG_MAX (__SEG6_LOCAL_BPF_PROG_MAX - 1)
+/* SRv6 Behavior counters are encoded as netlink attributes guaranteeing the
+ * correct alignment.
+ * Each counter is identified by a different attribute type (i.e.
+ * SEG6_LOCAL_CNT_PACKETS).
+ *
+ * - SEG6_LOCAL_CNT_PACKETS: identifies a counter that counts the number of
+ * packets that have been CORRECTLY processed by an SRv6 Behavior instance
+ * (i.e., packets that generate errors or are dropped are NOT counted).
+ *
+ * - SEG6_LOCAL_CNT_BYTES: identifies a counter that counts the total amount
+ * of traffic in bytes of all packets that have been CORRECTLY processed by
+ * an SRv6 Behavior instance (i.e., packets that generate errors or are
+ * dropped are NOT counted).
+ *
+ * - SEG6_LOCAL_CNT_ERRORS: identifies a counter that counts the number of
+ * packets that have NOT been properly processed by an SRv6 Behavior instance
+ * (i.e., packets that generate errors or are dropped).
+ */
+enum {
+ SEG6_LOCAL_CNT_UNSPEC,
+ SEG6_LOCAL_CNT_PAD, /* pad for 64 bits values */
+ SEG6_LOCAL_CNT_PACKETS,
+ SEG6_LOCAL_CNT_BYTES,
+ SEG6_LOCAL_CNT_ERRORS,
+ __SEG6_LOCAL_CNT_MAX,
+};
+
+#define SEG6_LOCAL_CNT_MAX (__SEG6_LOCAL_CNT_MAX - 1)
+
+/* SRv6 End* Flavor attributes */
+enum {
+ SEG6_LOCAL_FLV_UNSPEC,
+ SEG6_LOCAL_FLV_OPERATION,
+ SEG6_LOCAL_FLV_LCBLOCK_BITS,
+ SEG6_LOCAL_FLV_LCNODE_FN_BITS,
+ __SEG6_LOCAL_FLV_MAX,
+};
+
+#define SEG6_LOCAL_FLV_MAX (__SEG6_LOCAL_FLV_MAX - 1)
+
+/* Designed flavor operations for SRv6 End* Behavior */
+enum {
+ SEG6_LOCAL_FLV_OP_UNSPEC,
+ SEG6_LOCAL_FLV_OP_PSP,
+ SEG6_LOCAL_FLV_OP_USP,
+ SEG6_LOCAL_FLV_OP_USD,
+ SEG6_LOCAL_FLV_OP_NEXT_CSID,
+ __SEG6_LOCAL_FLV_OP_MAX
+};
+
+#define SEG6_LOCAL_FLV_OP_MAX (__SEG6_LOCAL_FLV_OP_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 93eb3c496ff1..de9b4733607e 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -11,6 +11,7 @@
#ifndef _UAPI_LINUX_SERIAL_H
#define _UAPI_LINUX_SERIAL_H
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/tty_flags.h>
@@ -52,11 +53,11 @@ struct serial_struct {
#define PORT_16450 2
#define PORT_16550 3
#define PORT_16550A 4
-#define PORT_CIRRUS 5 /* usurped by cyclades.c */
+#define PORT_CIRRUS 5
#define PORT_16650 6
#define PORT_16650V2 7
#define PORT_16750 8
-#define PORT_STARTECH 9 /* usurped by cyclades.c */
+#define PORT_STARTECH 9
#define PORT_16C950 10 /* Oxford Semiconductor */
#define PORT_16654 11
#define PORT_16850 12
@@ -107,29 +108,65 @@ struct serial_icounter_struct {
int reserved[9];
};
-/*
+/**
+ * struct serial_rs485 - serial interface for controlling RS485 settings.
+ * @flags: RS485 feature flags.
+ * @delay_rts_before_send: Delay before send (milliseconds).
+ * @delay_rts_after_send: Delay after send (milliseconds).
+ * @addr_recv: Receive filter for RS485 addressing mode
+ * (used only when %SER_RS485_ADDR_RECV is set).
+ * @addr_dest: Destination address for RS485 addressing mode
+ * (used only when %SER_RS485_ADDR_DEST is set).
+ * @padding0: Padding (set to zero).
+ * @padding1: Padding (set to zero).
+ * @padding: Deprecated, use @padding0 and @padding1 instead.
+ * Do not use with @addr_recv and @addr_dest (due to
+ * overlap).
+ *
* Serial interface for controlling RS485 settings on chips with suitable
* support. Set with TIOCSRS485 and get with TIOCGRS485 if supported by your
* platform. The set function returns the new state, with any unsupported bits
* reverted appropriately.
+ *
+ * The flag bits are:
+ *
+ * * %SER_RS485_ENABLED - RS485 enabled.
+ * * %SER_RS485_RTS_ON_SEND - Logical level for RTS pin when sending.
+ * * %SER_RS485_RTS_AFTER_SEND - Logical level for RTS pin after sent.
+ * * %SER_RS485_RX_DURING_TX - Full-duplex RS485 line.
+ * * %SER_RS485_TERMINATE_BUS - Enable bus termination (if supported).
+ * * %SER_RS485_ADDRB - Enable RS485 addressing mode.
+ * * %SER_RS485_ADDR_RECV - Receive address filter (enables @addr_recv). Requires %SER_RS485_ADDRB.
+ * * %SER_RS485_ADDR_DEST - Destination address (enables @addr_dest). Requires %SER_RS485_ADDRB.
+ * * %SER_RS485_MODE_RS422 - Enable RS422. Requires %SER_RS485_ENABLED.
*/
-
struct serial_rs485 {
- __u32 flags; /* RS485 feature flags */
-#define SER_RS485_ENABLED (1 << 0) /* If enabled */
-#define SER_RS485_RTS_ON_SEND (1 << 1) /* Logical level for
- RTS pin when
- sending */
-#define SER_RS485_RTS_AFTER_SEND (1 << 2) /* Logical level for
- RTS pin after sent*/
-#define SER_RS485_RX_DURING_TX (1 << 4)
-#define SER_RS485_TERMINATE_BUS (1 << 5) /* Enable bus
- termination
- (if supported) */
- __u32 delay_rts_before_send; /* Delay before send (milliseconds) */
- __u32 delay_rts_after_send; /* Delay after send (milliseconds) */
- __u32 padding[5]; /* Memory is cheap, new structs
- are a royal PITA .. */
+ __u32 flags;
+#define SER_RS485_ENABLED _BITUL(0)
+#define SER_RS485_RTS_ON_SEND _BITUL(1)
+#define SER_RS485_RTS_AFTER_SEND _BITUL(2)
+/* Placeholder for bit 3: SER_RS485_RTS_BEFORE_SEND, which isn't used anymore */
+#define SER_RS485_RX_DURING_TX _BITUL(4)
+#define SER_RS485_TERMINATE_BUS _BITUL(5)
+#define SER_RS485_ADDRB _BITUL(6)
+#define SER_RS485_ADDR_RECV _BITUL(7)
+#define SER_RS485_ADDR_DEST _BITUL(8)
+#define SER_RS485_MODE_RS422 _BITUL(9)
+
+ __u32 delay_rts_before_send;
+ __u32 delay_rts_after_send;
+
+ /* The fields below are defined by flags */
+ union {
+ __u32 padding[5]; /* Memory is cheap, new structs are a pain */
+
+ struct {
+ __u8 addr_recv;
+ __u8 addr_dest;
+ __u8 padding0[2];
+ __u32 padding1[4];
+ };
+ };
};
/*
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 851b982f8c4b..9c007a106330 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -1,22 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
- * linux/drivers/char/serial_core.h
- *
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _UAPILINUX_SERIAL_CORE_H
#define _UAPILINUX_SERIAL_CORE_H
@@ -25,6 +9,10 @@
/*
* The type definitions. These are from Ted Ts'o's serial.h
+ * By historical reasons the values from 0 to 13 are defined
+ * in the include/uapi/linux/serial.h, do not define them here.
+ * Values 0 to 19 are used by setserial from busybox and must never
+ * be modified.
*/
#define PORT_NS16550A 14
#define PORT_XSCALE 15
@@ -68,6 +56,9 @@
/* NVIDIA Tegra Combined UART */
#define PORT_TEGRA_TCU 41
+/* ASPEED AST2x00 virtual UART */
+#define PORT_ASPEED_VUART 42
+
/* Intel EG20 */
#define PORT_PCH_8LINE 44
#define PORT_PCH_2LINE 45
@@ -91,15 +82,9 @@
#define PORT_SCIF 53
#define PORT_IRDA 54
-/* Samsung S3C2410 SoC and derivatives thereof */
-#define PORT_S3C2410 55
-
/* SGI IP22 aka Indy / Challenge S / Indigo 2 */
#define PORT_IP22ZILOG 56
-/* Sharp LH7a40x -- an ARM9 SoC series */
-#define PORT_LH7A40X 57
-
/* PPC CPM type number */
#define PORT_CPM 58
@@ -109,43 +94,23 @@
/* IBM icom */
#define PORT_ICOM 60
-/* Samsung S3C2440 SoC */
-#define PORT_S3C2440 61
-
/* Motorola i.MX SoC */
#define PORT_IMX 62
-/* Marvell MPSC (obsolete unused) */
-#define PORT_MPSC 63
-
/* TXX9 type number */
#define PORT_TXX9 64
-/* NEC VR4100 series SIU/DSIU */
-#define PORT_VR41XX_SIU 65
-#define PORT_VR41XX_DSIU 66
-
-/* Samsung S3C2400 SoC */
-#define PORT_S3C2400 67
-
-/* M32R SIO */
-#define PORT_M32R_SIO 68
-
/*Digi jsm */
#define PORT_JSM 69
-#define PORT_PNX8XXX 70
-
/* SUN4V Hypervisor Console */
#define PORT_SUNHV 72
-#define PORT_S3C2412 73
-
/* Xilinx uartlite */
#define PORT_UARTLITE 74
-/* Blackfin bf5xx */
-#define PORT_BFIN 75
+/* Broadcom BCM7271 UART */
+#define PORT_BCM7271 76
/* Broadcom SB1250, etc. SOC */
#define PORT_SB1250_DUART 77
@@ -153,13 +118,6 @@
/* Freescale ColdFire */
#define PORT_MCF 78
-/* Blackfin SPORT */
-#define PORT_BFIN_SPORT 79
-
-/* MN10300 on-chip UART numbers */
-#define PORT_MN10300 80
-#define PORT_MN10300_CTS 81
-
#define PORT_SC26XX 82
/* SH-SCI */
@@ -167,9 +125,6 @@
#define PORT_S3C6400 84
-/* NWPSERIAL, now removed */
-#define PORT_NWPSERIAL 85
-
/* MAX3100 */
#define PORT_MAX3100 86
@@ -210,8 +165,8 @@
/* Atheros AR933X SoC */
#define PORT_AR933X 99
-/* Energy Micro efm32 SoC */
-#define PORT_EFMUART 100
+/* MCHP 16550A UART with 256 byte FIFOs */
+#define PORT_MCHP16550A 100
/* ARC (Synopsys) on-chip UART */
#define PORT_ARC 101
@@ -228,13 +183,10 @@
/* ST ASC type numbers */
#define PORT_ASC 105
-/* Tilera TILE-Gx UART */
-#define PORT_TILEGX 106
-
/* MEN 16z135 UART */
#define PORT_MEN_Z135 107
-/* SC16IS74xx */
+/* SC16IS7xx */
#define PORT_SC16IS7XX 108
/* MESON */
@@ -246,9 +198,6 @@
/* SPRD SERIAL */
#define PORT_SPRD 111
-/* Cris v10 / v32 SoC */
-#define PORT_CRIS 112
-
/* STM32 USART */
#define PORT_STM32 113
@@ -279,4 +228,10 @@
/* Freescale LINFlexD UART */
#define PORT_LINFLEXUART 122
+/* Sunplus UART */
+#define PORT_SUNPLUS 123
+
+/* Generic type identifier for ports which type is not important to userspace. */
+#define PORT_GENERIC (-1)
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index be07b5470f4b..9c987b04e2d0 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -44,6 +44,12 @@
#define UART_IIR_RX_TIMEOUT 0x0c /* OMAP RX Timeout interrupt */
#define UART_IIR_XOFF 0x10 /* OMAP XOFF/Special Character */
#define UART_IIR_CTS_RTS_DSR 0x20 /* OMAP CTS/RTS/DSR Change */
+#define UART_IIR_64BYTE_FIFO 0x20 /* 16750 64 bytes FIFO */
+#define UART_IIR_FIFO_ENABLED 0xc0 /* FIFOs enabled / port type identification */
+#define UART_IIR_FIFO_ENABLED_8250 0x00 /* 8250: no FIFO */
+#define UART_IIR_FIFO_ENABLED_16550 0x80 /* 16550: (broken/unusable) FIFO */
+#define UART_IIR_FIFO_ENABLED_16550A 0xc0 /* 16550A: FIFO enabled */
+#define UART_IIR_FIFO_ENABLED_16750 0xe0 /* 16750: 64 bytes FIFO enabled */
#define UART_FCR 2 /* Out: FIFO Control Register */
#define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */
@@ -62,6 +68,7 @@
* ST16C654: 8 16 56 60 8 16 32 56 PORT_16654
* TI16C750: 1 16 32 56 xx xx xx xx PORT_16750
* TI16C752: 8 16 56 60 8 16 32 56
+ * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950
* Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA
*/
#define UART_FCR_R_TRIG_00 0x00
@@ -138,7 +145,7 @@
#define UART_LSR_PE 0x04 /* Parity error indicator */
#define UART_LSR_OE 0x02 /* Overrun error indicator */
#define UART_LSR_DR 0x01 /* Receiver data ready */
-#define UART_LSR_BRK_ERROR_BITS 0x1E /* BI, FE, PE, OE bits */
+#define UART_LSR_BRK_ERROR_BITS (UART_LSR_BI|UART_LSR_FE|UART_LSR_PE|UART_LSR_OE)
#define UART_MSR 6 /* In: Modem Status Register */
#define UART_MSR_DCD 0x80 /* Data Carrier Detect */
@@ -149,7 +156,7 @@
#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */
#define UART_MSR_DDSR 0x02 /* Delta DSR */
#define UART_MSR_DCTS 0x01 /* Delta CTS */
-#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */
+#define UART_MSR_ANY_DELTA (UART_MSR_DDCD|UART_MSR_TERI|UART_MSR_DDSR|UART_MSR_DCTS)
#define UART_SCR 7 /* I/O: Scratch Register */
diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h
new file mode 100644
index 000000000000..154a87a1eca9
--- /dev/null
+++ b/include/uapi/linux/sev-guest.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Userspace interface for AMD SEV and SNP guest driver.
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * SEV API specification is available at: https://developer.amd.com/sev/
+ */
+
+#ifndef __UAPI_LINUX_SEV_GUEST_H_
+#define __UAPI_LINUX_SEV_GUEST_H_
+
+#include <linux/types.h>
+
+#define SNP_REPORT_USER_DATA_SIZE 64
+
+struct snp_report_req {
+ /* user data that should be included in the report */
+ __u8 user_data[SNP_REPORT_USER_DATA_SIZE];
+
+ /* The vmpl level to be included in the report */
+ __u32 vmpl;
+
+ /* Must be zero filled */
+ __u8 rsvd[28];
+};
+
+struct snp_report_resp {
+ /* response data, see SEV-SNP spec for the format */
+ __u8 data[4000];
+};
+
+struct snp_derived_key_req {
+ __u32 root_key_select;
+ __u32 rsvd;
+ __u64 guest_field_select;
+ __u32 vmpl;
+ __u32 guest_svn;
+ __u64 tcb_version;
+};
+
+struct snp_derived_key_resp {
+ /* response data, see SEV-SNP spec for the format */
+ __u8 data[64];
+};
+
+struct snp_guest_request_ioctl {
+ /* message version number (must be non-zero) */
+ __u8 msg_version;
+
+ /* Request and response structure address */
+ __u64 req_data;
+ __u64 resp_data;
+
+ /* bits[63:32]: VMM error code, bits[31:0] firmware error code (see psp-sev.h) */
+ union {
+ __u64 exitinfo2;
+ struct {
+ __u32 fw_error;
+ __u32 vmm_error;
+ };
+ };
+};
+
+struct snp_ext_report_req {
+ struct snp_report_req data;
+
+ /* where to copy the certificate blob */
+ __u64 certs_address;
+
+ /* length of the certificate blob */
+ __u32 certs_len;
+};
+
+#define SNP_GUEST_REQ_IOC_TYPE 'S'
+
+/* Get SNP attestation report */
+#define SNP_GET_REPORT _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x0, struct snp_guest_request_ioctl)
+
+/* Get a derived key from the root */
+#define SNP_GET_DERIVED_KEY _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x1, struct snp_guest_request_ioctl)
+
+/* Get SNP extended report as defined in the GHCB specification version 2. */
+#define SNP_GET_EXT_REPORT _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x2, struct snp_guest_request_ioctl)
+
+/* Guest message request EXIT_INFO_2 constants */
+#define SNP_GUEST_FW_ERR_MASK GENMASK_ULL(31, 0)
+#define SNP_GUEST_VMM_ERR_SHIFT 32
+#define SNP_GUEST_VMM_ERR(x) (((u64)x) << SNP_GUEST_VMM_ERR_SHIFT)
+
+#define SNP_GUEST_VMM_ERR_INVALID_LEN 1
+#define SNP_GUEST_VMM_ERR_BUSY 2
+
+#endif /* __UAPI_LINUX_SEV_GUEST_H_ */
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
index 0e11ca421ca4..b531e3ef011a 100644
--- a/include/uapi/linux/smc.h
+++ b/include/uapi/linux/smc.h
@@ -33,4 +33,275 @@ enum { /* SMC PNET Table commands */
#define SMCR_GENL_FAMILY_NAME "SMC_PNETID"
#define SMCR_GENL_FAMILY_VERSION 1
+/* gennetlink interface to access non-socket information from SMC module */
+#define SMC_GENL_FAMILY_NAME "SMC_GEN_NETLINK"
+#define SMC_GENL_FAMILY_VERSION 1
+
+#define SMC_PCI_ID_STR_LEN 16 /* Max length of pci id string */
+#define SMC_MAX_HOSTNAME_LEN 32 /* Max length of the hostname */
+#define SMC_MAX_UEID 4 /* Max number of user EIDs */
+#define SMC_MAX_EID_LEN 32 /* Max length of an EID */
+
+/* SMC_GENL_FAMILY commands */
+enum {
+ SMC_NETLINK_GET_SYS_INFO = 1,
+ SMC_NETLINK_GET_LGR_SMCR,
+ SMC_NETLINK_GET_LINK_SMCR,
+ SMC_NETLINK_GET_LGR_SMCD,
+ SMC_NETLINK_GET_DEV_SMCD,
+ SMC_NETLINK_GET_DEV_SMCR,
+ SMC_NETLINK_GET_STATS,
+ SMC_NETLINK_GET_FBACK_STATS,
+ SMC_NETLINK_DUMP_UEID,
+ SMC_NETLINK_ADD_UEID,
+ SMC_NETLINK_REMOVE_UEID,
+ SMC_NETLINK_FLUSH_UEID,
+ SMC_NETLINK_DUMP_SEID,
+ SMC_NETLINK_ENABLE_SEID,
+ SMC_NETLINK_DISABLE_SEID,
+ SMC_NETLINK_DUMP_HS_LIMITATION,
+ SMC_NETLINK_ENABLE_HS_LIMITATION,
+ SMC_NETLINK_DISABLE_HS_LIMITATION,
+};
+
+/* SMC_GENL_FAMILY top level attributes */
+enum {
+ SMC_GEN_UNSPEC,
+ SMC_GEN_SYS_INFO, /* nest */
+ SMC_GEN_LGR_SMCR, /* nest */
+ SMC_GEN_LINK_SMCR, /* nest */
+ SMC_GEN_LGR_SMCD, /* nest */
+ SMC_GEN_DEV_SMCD, /* nest */
+ SMC_GEN_DEV_SMCR, /* nest */
+ SMC_GEN_STATS, /* nest */
+ SMC_GEN_FBACK_STATS, /* nest */
+ __SMC_GEN_MAX,
+ SMC_GEN_MAX = __SMC_GEN_MAX - 1
+};
+
+/* SMC_GEN_SYS_INFO attributes */
+enum {
+ SMC_NLA_SYS_UNSPEC,
+ SMC_NLA_SYS_VER, /* u8 */
+ SMC_NLA_SYS_REL, /* u8 */
+ SMC_NLA_SYS_IS_ISM_V2, /* u8 */
+ SMC_NLA_SYS_LOCAL_HOST, /* string */
+ SMC_NLA_SYS_SEID, /* string */
+ SMC_NLA_SYS_IS_SMCR_V2, /* u8 */
+ __SMC_NLA_SYS_MAX,
+ SMC_NLA_SYS_MAX = __SMC_NLA_SYS_MAX - 1
+};
+
+/* SMC_NLA_LGR_D_V2_COMMON and SMC_NLA_LGR_R_V2_COMMON nested attributes */
+enum {
+ SMC_NLA_LGR_V2_VER, /* u8 */
+ SMC_NLA_LGR_V2_REL, /* u8 */
+ SMC_NLA_LGR_V2_OS, /* u8 */
+ SMC_NLA_LGR_V2_NEG_EID, /* string */
+ SMC_NLA_LGR_V2_PEER_HOST, /* string */
+ __SMC_NLA_LGR_V2_MAX,
+ SMC_NLA_LGR_V2_MAX = __SMC_NLA_LGR_V2_MAX - 1
+};
+
+/* SMC_NLA_LGR_R_V2 nested attributes */
+enum {
+ SMC_NLA_LGR_R_V2_UNSPEC,
+ SMC_NLA_LGR_R_V2_DIRECT, /* u8 */
+ SMC_NLA_LGR_R_V2_MAX_CONNS, /* u8 */
+ SMC_NLA_LGR_R_V2_MAX_LINKS, /* u8 */
+ __SMC_NLA_LGR_R_V2_MAX,
+ SMC_NLA_LGR_R_V2_MAX = __SMC_NLA_LGR_R_V2_MAX - 1
+};
+
+/* SMC_GEN_LGR_SMCR attributes */
+enum {
+ SMC_NLA_LGR_R_UNSPEC,
+ SMC_NLA_LGR_R_ID, /* u32 */
+ SMC_NLA_LGR_R_ROLE, /* u8 */
+ SMC_NLA_LGR_R_TYPE, /* u8 */
+ SMC_NLA_LGR_R_PNETID, /* string */
+ SMC_NLA_LGR_R_VLAN_ID, /* u8 */
+ SMC_NLA_LGR_R_CONNS_NUM, /* u32 */
+ SMC_NLA_LGR_R_V2_COMMON, /* nest */
+ SMC_NLA_LGR_R_V2, /* nest */
+ SMC_NLA_LGR_R_NET_COOKIE, /* u64 */
+ SMC_NLA_LGR_R_PAD, /* flag */
+ SMC_NLA_LGR_R_BUF_TYPE, /* u8 */
+ __SMC_NLA_LGR_R_MAX,
+ SMC_NLA_LGR_R_MAX = __SMC_NLA_LGR_R_MAX - 1
+};
+
+/* SMC_GEN_LINK_SMCR attributes */
+enum {
+ SMC_NLA_LINK_UNSPEC,
+ SMC_NLA_LINK_ID, /* u8 */
+ SMC_NLA_LINK_IB_DEV, /* string */
+ SMC_NLA_LINK_IB_PORT, /* u8 */
+ SMC_NLA_LINK_GID, /* string */
+ SMC_NLA_LINK_PEER_GID, /* string */
+ SMC_NLA_LINK_CONN_CNT, /* u32 */
+ SMC_NLA_LINK_NET_DEV, /* u32 */
+ SMC_NLA_LINK_UID, /* u32 */
+ SMC_NLA_LINK_PEER_UID, /* u32 */
+ SMC_NLA_LINK_STATE, /* u32 */
+ __SMC_NLA_LINK_MAX,
+ SMC_NLA_LINK_MAX = __SMC_NLA_LINK_MAX - 1
+};
+
+/* SMC_GEN_LGR_SMCD attributes */
+enum {
+ SMC_NLA_LGR_D_UNSPEC,
+ SMC_NLA_LGR_D_ID, /* u32 */
+ SMC_NLA_LGR_D_GID, /* u64 */
+ SMC_NLA_LGR_D_PEER_GID, /* u64 */
+ SMC_NLA_LGR_D_VLAN_ID, /* u8 */
+ SMC_NLA_LGR_D_CONNS_NUM, /* u32 */
+ SMC_NLA_LGR_D_PNETID, /* string */
+ SMC_NLA_LGR_D_CHID, /* u16 */
+ SMC_NLA_LGR_D_PAD, /* flag */
+ SMC_NLA_LGR_D_V2_COMMON, /* nest */
+ SMC_NLA_LGR_D_EXT_GID, /* u64 */
+ SMC_NLA_LGR_D_PEER_EXT_GID, /* u64 */
+ __SMC_NLA_LGR_D_MAX,
+ SMC_NLA_LGR_D_MAX = __SMC_NLA_LGR_D_MAX - 1
+};
+
+/* SMC_NLA_DEV_PORT nested attributes */
+enum {
+ SMC_NLA_DEV_PORT_UNSPEC,
+ SMC_NLA_DEV_PORT_PNET_USR, /* u8 */
+ SMC_NLA_DEV_PORT_PNETID, /* string */
+ SMC_NLA_DEV_PORT_NETDEV, /* u32 */
+ SMC_NLA_DEV_PORT_STATE, /* u8 */
+ SMC_NLA_DEV_PORT_VALID, /* u8 */
+ SMC_NLA_DEV_PORT_LNK_CNT, /* u32 */
+ __SMC_NLA_DEV_PORT_MAX,
+ SMC_NLA_DEV_PORT_MAX = __SMC_NLA_DEV_PORT_MAX - 1
+};
+
+/* SMC_GEN_DEV_SMCD and SMC_GEN_DEV_SMCR attributes */
+enum {
+ SMC_NLA_DEV_UNSPEC,
+ SMC_NLA_DEV_USE_CNT, /* u32 */
+ SMC_NLA_DEV_IS_CRIT, /* u8 */
+ SMC_NLA_DEV_PCI_FID, /* u32 */
+ SMC_NLA_DEV_PCI_CHID, /* u16 */
+ SMC_NLA_DEV_PCI_VENDOR, /* u16 */
+ SMC_NLA_DEV_PCI_DEVICE, /* u16 */
+ SMC_NLA_DEV_PCI_ID, /* string */
+ SMC_NLA_DEV_PORT, /* nest */
+ SMC_NLA_DEV_PORT2, /* nest */
+ SMC_NLA_DEV_IB_NAME, /* string */
+ __SMC_NLA_DEV_MAX,
+ SMC_NLA_DEV_MAX = __SMC_NLA_DEV_MAX - 1
+};
+
+/* SMC_NLA_STATS_T_TX(RX)_RMB_SIZE nested attributes */
+/* SMC_NLA_STATS_TX(RX)PLOAD_SIZE nested attributes */
+enum {
+ SMC_NLA_STATS_PLOAD_PAD,
+ SMC_NLA_STATS_PLOAD_8K, /* u64 */
+ SMC_NLA_STATS_PLOAD_16K, /* u64 */
+ SMC_NLA_STATS_PLOAD_32K, /* u64 */
+ SMC_NLA_STATS_PLOAD_64K, /* u64 */
+ SMC_NLA_STATS_PLOAD_128K, /* u64 */
+ SMC_NLA_STATS_PLOAD_256K, /* u64 */
+ SMC_NLA_STATS_PLOAD_512K, /* u64 */
+ SMC_NLA_STATS_PLOAD_1024K, /* u64 */
+ SMC_NLA_STATS_PLOAD_G_1024K, /* u64 */
+ __SMC_NLA_STATS_PLOAD_MAX,
+ SMC_NLA_STATS_PLOAD_MAX = __SMC_NLA_STATS_PLOAD_MAX - 1
+};
+
+/* SMC_NLA_STATS_T_TX(RX)_RMB_STATS nested attributes */
+enum {
+ SMC_NLA_STATS_RMB_PAD,
+ SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_SIZE_SM_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_FULL_PEER_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_FULL_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_REUSE_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_ALLOC_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_DGRADE_CNT, /* u64 */
+ __SMC_NLA_STATS_RMB_MAX,
+ SMC_NLA_STATS_RMB_MAX = __SMC_NLA_STATS_RMB_MAX - 1
+};
+
+/* SMC_NLA_STATS_SMCD_TECH and _SMCR_TECH nested attributes */
+enum {
+ SMC_NLA_STATS_T_PAD,
+ SMC_NLA_STATS_T_TX_RMB_SIZE, /* nest */
+ SMC_NLA_STATS_T_RX_RMB_SIZE, /* nest */
+ SMC_NLA_STATS_T_TXPLOAD_SIZE, /* nest */
+ SMC_NLA_STATS_T_RXPLOAD_SIZE, /* nest */
+ SMC_NLA_STATS_T_TX_RMB_STATS, /* nest */
+ SMC_NLA_STATS_T_RX_RMB_STATS, /* nest */
+ SMC_NLA_STATS_T_CLNT_V1_SUCC, /* u64 */
+ SMC_NLA_STATS_T_CLNT_V2_SUCC, /* u64 */
+ SMC_NLA_STATS_T_SRV_V1_SUCC, /* u64 */
+ SMC_NLA_STATS_T_SRV_V2_SUCC, /* u64 */
+ SMC_NLA_STATS_T_SENDPAGE_CNT, /* u64 */
+ SMC_NLA_STATS_T_SPLICE_CNT, /* u64 */
+ SMC_NLA_STATS_T_CORK_CNT, /* u64 */
+ SMC_NLA_STATS_T_NDLY_CNT, /* u64 */
+ SMC_NLA_STATS_T_URG_DATA_CNT, /* u64 */
+ SMC_NLA_STATS_T_RX_BYTES, /* u64 */
+ SMC_NLA_STATS_T_TX_BYTES, /* u64 */
+ SMC_NLA_STATS_T_RX_CNT, /* u64 */
+ SMC_NLA_STATS_T_TX_CNT, /* u64 */
+ __SMC_NLA_STATS_T_MAX,
+ SMC_NLA_STATS_T_MAX = __SMC_NLA_STATS_T_MAX - 1
+};
+
+/* SMC_GEN_STATS attributes */
+enum {
+ SMC_NLA_STATS_PAD,
+ SMC_NLA_STATS_SMCD_TECH, /* nest */
+ SMC_NLA_STATS_SMCR_TECH, /* nest */
+ SMC_NLA_STATS_CLNT_HS_ERR_CNT, /* u64 */
+ SMC_NLA_STATS_SRV_HS_ERR_CNT, /* u64 */
+ __SMC_NLA_STATS_MAX,
+ SMC_NLA_STATS_MAX = __SMC_NLA_STATS_MAX - 1
+};
+
+/* SMC_GEN_FBACK_STATS attributes */
+enum {
+ SMC_NLA_FBACK_STATS_PAD,
+ SMC_NLA_FBACK_STATS_TYPE, /* u8 */
+ SMC_NLA_FBACK_STATS_SRV_CNT, /* u64 */
+ SMC_NLA_FBACK_STATS_CLNT_CNT, /* u64 */
+ SMC_NLA_FBACK_STATS_RSN_CODE, /* u32 */
+ SMC_NLA_FBACK_STATS_RSN_CNT, /* u16 */
+ __SMC_NLA_FBACK_STATS_MAX,
+ SMC_NLA_FBACK_STATS_MAX = __SMC_NLA_FBACK_STATS_MAX - 1
+};
+
+/* SMC_NETLINK_UEID attributes */
+enum {
+ SMC_NLA_EID_TABLE_UNSPEC,
+ SMC_NLA_EID_TABLE_ENTRY, /* string */
+ __SMC_NLA_EID_TABLE_MAX,
+ SMC_NLA_EID_TABLE_MAX = __SMC_NLA_EID_TABLE_MAX - 1
+};
+
+/* SMC_NETLINK_SEID attributes */
+enum {
+ SMC_NLA_SEID_UNSPEC,
+ SMC_NLA_SEID_ENTRY, /* string */
+ SMC_NLA_SEID_ENABLED, /* u8 */
+ __SMC_NLA_SEID_TABLE_MAX,
+ SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1
+};
+
+/* SMC_NETLINK_HS_LIMITATION attributes */
+enum {
+ SMC_NLA_HS_LIMITATION_UNSPEC,
+ SMC_NLA_HS_LIMITATION_ENABLED, /* u8 */
+ __SMC_NLA_HS_LIMITATION_MAX,
+ SMC_NLA_HS_LIMITATION_MAX = __SMC_NLA_HS_LIMITATION_MAX - 1
+};
+
+/* SMC socket options */
+#define SMC_LIMIT_HS 1 /* constraint on smc handshake */
+
#endif /* _UAPI_LINUX_SMC_H */
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index 8cb3a6fef553..58eceb7f5df2 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -107,6 +107,8 @@ struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
__aligned_u64 my_gid; /* My GID */
__aligned_u64 token; /* Token of DMB */
__aligned_u64 peer_token; /* Token of remote DMBE */
+ __aligned_u64 peer_gid_ext; /* Peer GID (extended part) */
+ __aligned_u64 my_gid_ext; /* My GID (extended part) */
};
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index cee9f8e6fce3..a0819c6a5988 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -24,7 +24,7 @@ enum
IPSTATS_MIB_INOCTETS, /* InOctets */
IPSTATS_MIB_INDELIVERS, /* InDelivers */
IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */
- IPSTATS_MIB_OUTPKTS, /* OutRequests */
+ IPSTATS_MIB_OUTREQUESTS, /* OutRequests */
IPSTATS_MIB_OUTOCTETS, /* OutOctets */
/* other fields */
IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */
@@ -57,6 +57,7 @@ enum
IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
IPSTATS_MIB_CEPKTS, /* InCEPkts */
IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
+ IPSTATS_MIB_OUTPKTS, /* OutTransmits */
__IPSTATS_MIB_MAX
};
@@ -95,6 +96,8 @@ enum
ICMP_MIB_OUTADDRMASKS, /* OutAddrMasks */
ICMP_MIB_OUTADDRMASKREPS, /* OutAddrMaskReps */
ICMP_MIB_CSUMERRORS, /* InCsumErrors */
+ ICMP_MIB_RATELIMITGLOBAL, /* OutRateLimitGlobal */
+ ICMP_MIB_RATELIMITHOST, /* OutRateLimitHost */
__ICMP_MIB_MAX
};
@@ -112,6 +115,7 @@ enum
ICMP6_MIB_OUTMSGS, /* OutMsgs */
ICMP6_MIB_OUTERRORS, /* OutErrors */
ICMP6_MIB_CSUMERRORS, /* InCsumErrors */
+ ICMP6_MIB_RATELIMITHOST, /* OutRateLimitHost */
__ICMP6_MIB_MAX
};
@@ -159,6 +163,7 @@ enum
UDP_MIB_SNDBUFERRORS, /* SndbufErrors */
UDP_MIB_CSUMERRORS, /* InCsumErrors */
UDP_MIB_IGNOREDMULTI, /* IgnoredMulti */
+ UDP_MIB_MEMERRORS, /* MemErrors */
__UDP_MIB_MAX
};
@@ -288,6 +293,15 @@ enum
LINUX_MIB_TCPTIMEOUTREHASH, /* TCPTimeoutRehash */
LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */
LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */
+ LINUX_MIB_TCPDSACKIGNOREDDUBIOUS, /* TCPDSACKIgnoredDubious */
+ LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */
+ LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */
+ LINUX_MIB_TCPPLBREHASH, /* TCPPLBRehash */
+ LINUX_MIB_TCPAOREQUIRED, /* TCPAORequired */
+ LINUX_MIB_TCPAOBAD, /* TCPAOBad */
+ LINUX_MIB_TCPAOKEYNOTFOUND, /* TCPAOKeyNotFound */
+ LINUX_MIB_TCPAOGOOD, /* TCPAOGood */
+ LINUX_MIB_TCPAODROPPEDICMPS, /* TCPAODroppedIcmps */
__LINUX_MIB_MAX
};
@@ -340,6 +354,8 @@ enum
LINUX_MIB_TLSRXDEVICE, /* TlsRxDevice */
LINUX_MIB_TLSDECRYPTERROR, /* TlsDecryptError */
LINUX_MIB_TLSRXDEVICERESYNC, /* TlsRxDeviceResync */
+ LINUX_MIB_TLSDECRYPTRETRY, /* TlsDecryptRetry */
+ LINUX_MIB_TLSRXNOPADVIOL, /* TlsRxNoPadViolation */
__LINUX_MIB_TLSMAX
};
diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h
index c3409c8ec0dd..d3fcd3b5ec53 100644
--- a/include/uapi/linux/socket.h
+++ b/include/uapi/linux/socket.h
@@ -26,4 +26,13 @@ struct __kernel_sockaddr_storage {
};
};
+#define SOCK_SNDBUF_LOCK 1
+#define SOCK_RCVBUF_LOCK 2
+
+#define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK)
+
+#define SOCK_TXREHASH_DEFAULT 255
+#define SOCK_TXREHASH_DISABLED 0
+#define SOCK_TXREHASH_ENABLED 1
+
#endif /* _UAPI_LINUX_SOCKET_H */
diff --git a/include/uapi/linux/soundcard.h b/include/uapi/linux/soundcard.h
index f3b21f989872..ac1318793a86 100644
--- a/include/uapi/linux/soundcard.h
+++ b/include/uapi/linux/soundcard.h
@@ -1051,7 +1051,7 @@ typedef struct mixer_vol_table {
* the GPL version of OSS-4.x and build against that version
* of the header.
*
- * We redefine the extern keyword so that make headers_check
+ * We redefine the extern keyword so that usr/include/headers_check.pl
* does not complain about SEQ_USE_EXTBUF.
*/
#define SEQ_DECLAREBUF() SEQ_USE_EXTBUF()
diff --git a/include/uapi/linux/spi/spi.h b/include/uapi/linux/spi/spi.h
new file mode 100644
index 000000000000..ca56e477d161
--- /dev/null
+++ b/include/uapi/linux/spi/spi.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_SPI_H
+#define _UAPI_SPI_H
+
+#include <linux/const.h>
+
+#define SPI_CPHA _BITUL(0) /* clock phase */
+#define SPI_CPOL _BITUL(1) /* clock polarity */
+
+#define SPI_MODE_0 (0|0) /* (original MicroWire) */
+#define SPI_MODE_1 (0|SPI_CPHA)
+#define SPI_MODE_2 (SPI_CPOL|0)
+#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
+#define SPI_MODE_X_MASK (SPI_CPOL|SPI_CPHA)
+
+#define SPI_CS_HIGH _BITUL(2) /* chipselect active high? */
+#define SPI_LSB_FIRST _BITUL(3) /* per-word bits-on-wire */
+#define SPI_3WIRE _BITUL(4) /* SI/SO signals shared */
+#define SPI_LOOP _BITUL(5) /* loopback mode */
+#define SPI_NO_CS _BITUL(6) /* 1 dev/bus, no chipselect */
+#define SPI_READY _BITUL(7) /* slave pulls low to pause */
+#define SPI_TX_DUAL _BITUL(8) /* transmit with 2 wires */
+#define SPI_TX_QUAD _BITUL(9) /* transmit with 4 wires */
+#define SPI_RX_DUAL _BITUL(10) /* receive with 2 wires */
+#define SPI_RX_QUAD _BITUL(11) /* receive with 4 wires */
+#define SPI_CS_WORD _BITUL(12) /* toggle cs after each word */
+#define SPI_TX_OCTAL _BITUL(13) /* transmit with 8 wires */
+#define SPI_RX_OCTAL _BITUL(14) /* receive with 8 wires */
+#define SPI_3WIRE_HIZ _BITUL(15) /* high impedance turnaround */
+#define SPI_RX_CPHA_FLIP _BITUL(16) /* flip CPHA on Rx only xfer */
+#define SPI_MOSI_IDLE_LOW _BITUL(17) /* leave mosi line low when idle */
+
+/*
+ * All the bits defined above should be covered by SPI_MODE_USER_MASK.
+ * The SPI_MODE_USER_MASK has the SPI_MODE_KERNEL_MASK counterpart in
+ * 'include/linux/spi/spi.h'. The bits defined here are from bit 0 upwards
+ * while in SPI_MODE_KERNEL_MASK they are from the other end downwards.
+ * These bits must not overlap. A static assert check should make sure of that.
+ * If adding extra bits, make sure to increase the bit index below as well.
+ */
+#define SPI_MODE_USER_MASK (_BITUL(18) - 1)
+
+#endif /* _UAPI_SPI_H */
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index d56427c0b3e0..0c3da08f2aff 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -25,35 +25,7 @@
#include <linux/types.h>
#include <linux/ioctl.h>
-
-/* User space versions of kernel symbols for SPI clocking modes,
- * matching <linux/spi/spi.h>
- */
-
-#define SPI_CPHA 0x01
-#define SPI_CPOL 0x02
-
-#define SPI_MODE_0 (0|0)
-#define SPI_MODE_1 (0|SPI_CPHA)
-#define SPI_MODE_2 (SPI_CPOL|0)
-#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
-
-#define SPI_CS_HIGH 0x04
-#define SPI_LSB_FIRST 0x08
-#define SPI_3WIRE 0x10
-#define SPI_LOOP 0x20
-#define SPI_NO_CS 0x40
-#define SPI_READY 0x80
-#define SPI_TX_DUAL 0x100
-#define SPI_TX_QUAD 0x200
-#define SPI_RX_DUAL 0x400
-#define SPI_RX_QUAD 0x800
-#define SPI_CS_WORD 0x1000
-#define SPI_TX_OCTAL 0x2000
-#define SPI_RX_OCTAL 0x4000
-#define SPI_3WIRE_HIZ 0x8000
-
-/*---------------------------------------------------------------------------*/
+#include <linux/spi/spi.h>
/* IOCTL commands */
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 82cc58fe9368..2f2ee82d5517 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -124,7 +124,8 @@ struct statx {
__u32 stx_dev_minor;
/* 0x90 */
__u64 stx_mnt_id;
- __u64 __spare2;
+ __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
+ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
/* 0xa0 */
__u64 __spare3[12]; /* Spare space for future expansion */
/* 0x100 */
@@ -152,6 +153,8 @@ struct statx {
#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
+#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
+#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
@@ -171,9 +174,12 @@ struct statx {
* be of use to ordinary userspace programs such as GUIs or ls rather than
* specialised tools.
*
- * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS
+ * Note that the flags marked [I] correspond to the FS_IOC_SETFLAGS flags
* semantically. Where possible, the numerical value is picked to correspond
- * also.
+ * also. Note that the DAX attribute indicates that the file is in the CPU
+ * direct access state. It does not correspond to the per-inode flag that
+ * some filesystems support.
+ *
*/
#define STATX_ATTR_COMPRESSED 0x00000004 /* [I] File is compressed by the fs */
#define STATX_ATTR_IMMUTABLE 0x00000010 /* [I] File is marked immutable */
@@ -183,7 +189,7 @@ struct statx {
#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */
#define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */
#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
-#define STATX_ATTR_DAX 0x00002000 /* [I] File is DAX */
+#define STATX_ATTR_DAX 0x00200000 /* File is currently in DAX state */
#endif /* _UAPI_LINUX_STAT_H */
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index ee8220f8dcf5..2ec6f35cda32 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -1,6 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_STDDEF_H
+#define _UAPI_LINUX_STDDEF_H
+
#include <linux/compiler_types.h>
#ifndef __always_inline
#define __always_inline inline
#endif
+
+/**
+ * __struct_group() - Create a mirrored named and anonyomous struct
+ *
+ * @TAG: The tag name for the named sub-struct (usually empty)
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @ATTRS: Any struct attributes (usually empty)
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical layout
+ * and size: one anonymous and one named. The former's members can be used
+ * normally without sub-struct naming, and the latter can be used to
+ * reason about the start, end, and size of the group of struct members.
+ * The named struct can also be explicitly tagged for layer reuse, as well
+ * as both having struct attributes appended.
+ */
+#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
+ union { \
+ struct { MEMBERS } ATTRS; \
+ struct TAG { MEMBERS } ATTRS NAME; \
+ } ATTRS
+
+#ifdef __cplusplus
+/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
+#define __DECLARE_FLEX_ARRAY(T, member) \
+ T member[0]
+#else
+/**
+ * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+ *
+ * @TYPE: The type of each flexible array element
+ * @NAME: The name of the flexible array member
+ *
+ * In order to have a flexible array member in a union or alone in a
+ * struct, it needs to be wrapped in an anonymous struct with at least 1
+ * named member, but that member can be empty.
+ */
+#define __DECLARE_FLEX_ARRAY(TYPE, NAME) \
+ struct { \
+ struct { } __empty_ ## NAME; \
+ TYPE NAME[]; \
+ }
+#endif
+
+#ifndef __counted_by
+#define __counted_by(m)
+#endif
+
+#endif /* _UAPI_LINUX_STDDEF_H */
diff --git a/include/uapi/linux/stm.h b/include/uapi/linux/stm.h
index 7bac318b4440..de3579c2cff0 100644
--- a/include/uapi/linux/stm.h
+++ b/include/uapi/linux/stm.h
@@ -36,7 +36,7 @@ struct stp_policy_id {
/* padding */
__u16 __reserved_0;
__u32 __reserved_1;
- char id[0];
+ char id[];
};
#define STP_POLICY_ID_SET _IOWR('%', 0, struct stp_policy_id)
diff --git a/include/uapi/linux/surface_aggregator/cdev.h b/include/uapi/linux/surface_aggregator/cdev.h
new file mode 100644
index 000000000000..08f46b60b151
--- /dev/null
+++ b/include/uapi/linux/surface_aggregator/cdev.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Surface System Aggregator Module (SSAM) user-space EC interface.
+ *
+ * Definitions, structs, and IOCTLs for the /dev/surface/aggregator misc
+ * device. This device provides direct user-space access to the SSAM EC.
+ * Intended for debugging and development.
+ *
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
+#define _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * enum ssam_cdev_request_flags - Request flags for SSAM cdev request IOCTL.
+ *
+ * @SSAM_CDEV_REQUEST_HAS_RESPONSE:
+ * Specifies that the request expects a response. If not set, the request
+ * will be directly completed after its underlying packet has been
+ * transmitted. If set, the request transport system waits for a response
+ * of the request.
+ *
+ * @SSAM_CDEV_REQUEST_UNSEQUENCED:
+ * Specifies that the request should be transmitted via an unsequenced
+ * packet. If set, the request must not have a response, meaning that this
+ * flag and the %SSAM_CDEV_REQUEST_HAS_RESPONSE flag are mutually
+ * exclusive.
+ */
+enum ssam_cdev_request_flags {
+ SSAM_CDEV_REQUEST_HAS_RESPONSE = 0x01,
+ SSAM_CDEV_REQUEST_UNSEQUENCED = 0x02,
+};
+
+/**
+ * struct ssam_cdev_request - Controller request IOCTL argument.
+ * @target_category: Target category of the SAM request.
+ * @target_id: Target ID of the SAM request.
+ * @command_id: Command ID of the SAM request.
+ * @instance_id: Instance ID of the SAM request.
+ * @flags: Request flags (see &enum ssam_cdev_request_flags).
+ * @status: Request status (output).
+ * @payload: Request payload (input data).
+ * @payload.data: Pointer to request payload data.
+ * @payload.length: Length of request payload data (in bytes).
+ * @response: Request response (output data).
+ * @response.data: Pointer to response buffer.
+ * @response.length: On input: Capacity of response buffer (in bytes).
+ * On output: Length of request response (number of bytes
+ * in the buffer that are actually used).
+ */
+struct ssam_cdev_request {
+ __u8 target_category;
+ __u8 target_id;
+ __u8 command_id;
+ __u8 instance_id;
+ __u16 flags;
+ __s16 status;
+
+ struct {
+ __u64 data;
+ __u16 length;
+ __u8 __pad[6];
+ } payload;
+
+ struct {
+ __u64 data;
+ __u16 length;
+ __u8 __pad[6];
+ } response;
+} __attribute__((__packed__));
+
+/**
+ * struct ssam_cdev_notifier_desc - Notifier descriptor.
+ * @priority: Priority value determining the order in which notifier
+ * callbacks will be called. A higher value means higher
+ * priority, i.e. the associated callback will be executed
+ * earlier than other (lower priority) callbacks.
+ * @target_category: The event target category for which this notifier should
+ * receive events.
+ *
+ * Specifies the notifier that should be registered or unregistered,
+ * specifically with which priority and for which target category of events.
+ */
+struct ssam_cdev_notifier_desc {
+ __s32 priority;
+ __u8 target_category;
+} __attribute__((__packed__));
+
+/**
+ * struct ssam_cdev_event_desc - Event descriptor.
+ * @reg: Registry via which the event will be enabled/disabled.
+ * @reg.target_category: Target category for the event registry requests.
+ * @reg.target_id: Target ID for the event registry requests.
+ * @reg.cid_enable: Command ID for the event-enable request.
+ * @reg.cid_disable: Command ID for the event-disable request.
+ * @id: ID specifying the event.
+ * @id.target_category: Target category of the event source.
+ * @id.instance: Instance ID of the event source.
+ * @flags: Flags used for enabling the event.
+ *
+ * Specifies which event should be enabled/disabled and how to do that.
+ */
+struct ssam_cdev_event_desc {
+ struct {
+ __u8 target_category;
+ __u8 target_id;
+ __u8 cid_enable;
+ __u8 cid_disable;
+ } reg;
+
+ struct {
+ __u8 target_category;
+ __u8 instance;
+ } id;
+
+ __u8 flags;
+} __attribute__((__packed__));
+
+/**
+ * struct ssam_cdev_event - SSAM event sent by the EC.
+ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
+ * @target_id: Target ID of the event source.
+ * @command_id: Command ID of the event.
+ * @instance_id: Instance ID of the event source.
+ * @length: Length of the event payload in bytes.
+ * @data: Event payload data.
+ */
+struct ssam_cdev_event {
+ __u8 target_category;
+ __u8 target_id;
+ __u8 command_id;
+ __u8 instance_id;
+ __u16 length;
+ __u8 data[];
+} __attribute__((__packed__));
+
+#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request)
+#define SSAM_CDEV_NOTIF_REGISTER _IOW(0xA5, 2, struct ssam_cdev_notifier_desc)
+#define SSAM_CDEV_NOTIF_UNREGISTER _IOW(0xA5, 3, struct ssam_cdev_notifier_desc)
+#define SSAM_CDEV_EVENT_ENABLE _IOW(0xA5, 4, struct ssam_cdev_event_desc)
+#define SSAM_CDEV_EVENT_DISABLE _IOW(0xA5, 5, struct ssam_cdev_event_desc)
+
+#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H */
diff --git a/include/uapi/linux/surface_aggregator/dtx.h b/include/uapi/linux/surface_aggregator/dtx.h
new file mode 100644
index 000000000000..0833aab0d819
--- /dev/null
+++ b/include/uapi/linux/surface_aggregator/dtx.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Surface DTX (clipboard detachment system driver) user-space interface.
+ *
+ * Definitions, structs, and IOCTLs for the /dev/surface/dtx misc device. This
+ * device allows user-space to control the clipboard detachment process on
+ * Surface Book series devices.
+ *
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
+#define _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Status/error categories */
+#define SDTX_CATEGORY_STATUS 0x0000
+#define SDTX_CATEGORY_RUNTIME_ERROR 0x1000
+#define SDTX_CATEGORY_HARDWARE_ERROR 0x2000
+#define SDTX_CATEGORY_UNKNOWN 0xf000
+
+#define SDTX_CATEGORY_MASK 0xf000
+#define SDTX_CATEGORY(value) ((value) & SDTX_CATEGORY_MASK)
+
+#define SDTX_STATUS(code) ((code) | SDTX_CATEGORY_STATUS)
+#define SDTX_ERR_RT(code) ((code) | SDTX_CATEGORY_RUNTIME_ERROR)
+#define SDTX_ERR_HW(code) ((code) | SDTX_CATEGORY_HARDWARE_ERROR)
+#define SDTX_UNKNOWN(code) ((code) | SDTX_CATEGORY_UNKNOWN)
+
+#define SDTX_SUCCESS(value) (SDTX_CATEGORY(value) == SDTX_CATEGORY_STATUS)
+
+/* Latch status values */
+#define SDTX_LATCH_CLOSED SDTX_STATUS(0x00)
+#define SDTX_LATCH_OPENED SDTX_STATUS(0x01)
+
+/* Base state values */
+#define SDTX_BASE_DETACHED SDTX_STATUS(0x00)
+#define SDTX_BASE_ATTACHED SDTX_STATUS(0x01)
+
+/* Runtime errors (non-critical) */
+#define SDTX_DETACH_NOT_FEASIBLE SDTX_ERR_RT(0x01)
+#define SDTX_DETACH_TIMEDOUT SDTX_ERR_RT(0x02)
+
+/* Hardware errors (critical) */
+#define SDTX_ERR_FAILED_TO_OPEN SDTX_ERR_HW(0x01)
+#define SDTX_ERR_FAILED_TO_REMAIN_OPEN SDTX_ERR_HW(0x02)
+#define SDTX_ERR_FAILED_TO_CLOSE SDTX_ERR_HW(0x03)
+
+/* Base types */
+#define SDTX_DEVICE_TYPE_HID 0x0100
+#define SDTX_DEVICE_TYPE_SSH 0x0200
+
+#define SDTX_DEVICE_TYPE_MASK 0x0f00
+#define SDTX_DEVICE_TYPE(value) ((value) & SDTX_DEVICE_TYPE_MASK)
+
+#define SDTX_BASE_TYPE_HID(id) ((id) | SDTX_DEVICE_TYPE_HID)
+#define SDTX_BASE_TYPE_SSH(id) ((id) | SDTX_DEVICE_TYPE_SSH)
+
+/**
+ * enum sdtx_device_mode - Mode describing how (and if) the clipboard is
+ * attached to the base of the device.
+ * @SDTX_DEVICE_MODE_TABLET: The clipboard is detached from the base and the
+ * device operates as tablet.
+ * @SDTX_DEVICE_MODE_LAPTOP: The clipboard is attached normally to the base
+ * and the device operates as laptop.
+ * @SDTX_DEVICE_MODE_STUDIO: The clipboard is attached to the base in reverse.
+ * The device operates as tablet with keyboard and
+ * touchpad deactivated, however, the base battery
+ * and, if present in the specific device model, dGPU
+ * are available to the system.
+ */
+enum sdtx_device_mode {
+ SDTX_DEVICE_MODE_TABLET = 0x00,
+ SDTX_DEVICE_MODE_LAPTOP = 0x01,
+ SDTX_DEVICE_MODE_STUDIO = 0x02,
+};
+
+/**
+ * struct sdtx_event - Event provided by reading from the DTX device file.
+ * @length: Length of the event payload, in bytes.
+ * @code: Event code, detailing what type of event this is.
+ * @data: Payload of the event, containing @length bytes.
+ *
+ * See &enum sdtx_event_code for currently valid event codes.
+ */
+struct sdtx_event {
+ __u16 length;
+ __u16 code;
+ __u8 data[];
+} __attribute__((__packed__));
+
+/**
+ * enum sdtx_event_code - Code describing the type of an event.
+ * @SDTX_EVENT_REQUEST: Detachment request event type.
+ * @SDTX_EVENT_CANCEL: Cancel detachment process event type.
+ * @SDTX_EVENT_BASE_CONNECTION: Base/clipboard connection change event type.
+ * @SDTX_EVENT_LATCH_STATUS: Latch status change event type.
+ * @SDTX_EVENT_DEVICE_MODE: Device mode change event type.
+ *
+ * Used in &struct sdtx_event to describe the type of the event. Further event
+ * codes are reserved for future use. Any event parser should be able to
+ * gracefully handle unknown events, i.e. by simply skipping them.
+ *
+ * Consult the DTX user-space interface documentation for details regarding
+ * the individual event types.
+ */
+enum sdtx_event_code {
+ SDTX_EVENT_REQUEST = 1,
+ SDTX_EVENT_CANCEL = 2,
+ SDTX_EVENT_BASE_CONNECTION = 3,
+ SDTX_EVENT_LATCH_STATUS = 4,
+ SDTX_EVENT_DEVICE_MODE = 5,
+};
+
+/**
+ * struct sdtx_base_info - Describes if and what type of base is connected.
+ * @state: The state of the connection. Valid values are %SDTX_BASE_DETACHED,
+ * %SDTX_BASE_ATTACHED, and %SDTX_DETACH_NOT_FEASIBLE (in case a base
+ * is attached but low clipboard battery prevents detachment). Other
+ * values are currently reserved.
+ * @base_id: The type of base connected. Zero if no base is connected.
+ */
+struct sdtx_base_info {
+ __u16 state;
+ __u16 base_id;
+} __attribute__((__packed__));
+
+/* IOCTLs */
+#define SDTX_IOCTL_EVENTS_ENABLE _IO(0xa5, 0x21)
+#define SDTX_IOCTL_EVENTS_DISABLE _IO(0xa5, 0x22)
+
+#define SDTX_IOCTL_LATCH_LOCK _IO(0xa5, 0x23)
+#define SDTX_IOCTL_LATCH_UNLOCK _IO(0xa5, 0x24)
+
+#define SDTX_IOCTL_LATCH_REQUEST _IO(0xa5, 0x25)
+#define SDTX_IOCTL_LATCH_CONFIRM _IO(0xa5, 0x26)
+#define SDTX_IOCTL_LATCH_HEARTBEAT _IO(0xa5, 0x27)
+#define SDTX_IOCTL_LATCH_CANCEL _IO(0xa5, 0x28)
+
+#define SDTX_IOCTL_GET_BASE_INFO _IOR(0xa5, 0x29, struct sdtx_base_info)
+#define SDTX_IOCTL_GET_DEVICE_MODE _IOR(0xa5, 0x2a, __u16)
+#define SDTX_IOCTL_GET_LATCH_STATUS _IOR(0xa5, 0x2b, __u16)
+
+#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 7272f85d6d6a..01717181339e 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -3,7 +3,7 @@
#define _UAPI_LINUX_SWAB_H
#include <linux/types.h>
-#include <linux/compiler.h>
+#include <linux/stddef.h>
#include <asm/bitsperlong.h>
#include <asm/swab.h>
@@ -102,7 +102,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
#else
#define __swab16(x) \
- (__builtin_constant_p((__u16)(x)) ? \
+ (__u16)(__builtin_constant_p(x) ? \
___constant_swab16(x) : \
__fswab16(x))
#endif
@@ -115,7 +115,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
#else
#define __swab32(x) \
- (__builtin_constant_p((__u32)(x)) ? \
+ (__u32)(__builtin_constant_p(x) ? \
___constant_swab32(x) : \
__fswab32(x))
#endif
@@ -128,7 +128,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
#else
#define __swab64(x) \
- (__builtin_constant_p((__u64)(x)) ? \
+ (__u64)(__builtin_constant_p(x) ? \
___constant_swab64(x) : \
__fswab64(x))
#endif
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index ee2dcfb3d660..ff1f38889dcf 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -16,12 +16,16 @@
#include <linux/types.h>
/**
- * struct sync_merge_data - data passed to merge ioctl
+ * struct sync_merge_data - SYNC_IOC_MERGE: merge two fences
* @name: name of new fence
* @fd2: file descriptor of second fence
* @fence: returns the fd of the new fence to userspace
* @flags: merge_data flags
* @pad: padding for 64-bit alignment, should always be zero
+ *
+ * Creates a new fence containing copies of the sync_pts in both
+ * the calling fd and sync_merge_data.fd2. Returns the new fence's
+ * fd in sync_merge_data.fence
*/
struct sync_merge_data {
char name[32];
@@ -34,8 +38,8 @@ struct sync_merge_data {
/**
* struct sync_fence_info - detailed fence information
* @obj_name: name of parent sync_timeline
-* @driver_name: name of driver implementing the parent
-* @status: status of the fence 0:active 1:signaled <0:error
+ * @driver_name: name of driver implementing the parent
+ * @status: status of the fence 0:active 1:signaled <0:error
* @flags: fence_info flags
* @timestamp_ns: timestamp of status change in nanoseconds
*/
@@ -48,14 +52,19 @@ struct sync_fence_info {
};
/**
- * struct sync_file_info - data returned from fence info ioctl
+ * struct sync_file_info - SYNC_IOC_FILE_INFO: get detailed information on a sync_file
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
* @flags: sync_file_info flags
- * @num_fences number of fences in the sync_file
+ * @num_fences: number of fences in the sync_file
* @pad: padding for 64-bit alignment, should always be zero
- * @sync_fence_info: pointer to array of structs sync_fence_info with all
+ * @sync_fence_info: pointer to array of struct &sync_fence_info with all
* fences in the sync_file
+ *
+ * Takes a struct sync_file_info. If num_fences is 0, the field is updated
+ * with the actual number of fences. If num_fences is > 0, the system will
+ * use the pointer provided on sync_fence_info to return up to num_fences of
+ * struct sync_fence_info, with detailed fence information.
*/
struct sync_file_info {
char name[32];
@@ -67,32 +76,38 @@ struct sync_file_info {
__u64 sync_fence_info;
};
+/**
+ * struct sync_set_deadline - SYNC_IOC_SET_DEADLINE - set a deadline hint on a fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad: must be zero
+ *
+ * Allows userspace to set a deadline on a fence, see &dma_fence_set_deadline
+ *
+ * The timebase for the deadline is CLOCK_MONOTONIC (same as vblank). For
+ * example
+ *
+ * clock_gettime(CLOCK_MONOTONIC, &t);
+ * deadline_ns = (t.tv_sec * 1000000000L) + t.tv_nsec + ns_until_deadline
+ */
+struct sync_set_deadline {
+ __u64 deadline_ns;
+ /* Not strictly needed for alignment but gives some possibility
+ * for future extension:
+ */
+ __u64 pad;
+};
+
#define SYNC_IOC_MAGIC '>'
-/**
+/*
* Opcodes 0, 1 and 2 were burned during a API change to avoid users of the
* old API to get weird errors when trying to handling sync_files. The API
* change happened during the de-stage of the Sync Framework when there was
* no upstream users available.
*/
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FILE_INFO - get detailed information on a sync_file
- *
- * Takes a struct sync_file_info. If num_fences is 0, the field is updated
- * with the actual number of fences. If num_fences is > 0, the system will
- * use the pointer provided on sync_fence_info to return up to num_fences of
- * struct sync_fence_info, with detailed fence information.
- */
#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
+#define SYNC_IOC_SET_DEADLINE _IOW(SYNC_IOC_MAGIC, 5, struct sync_set_deadline)
#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 27c1ed2822e6..8981f00204db 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -23,7 +23,7 @@
#ifndef _UAPI_LINUX_SYSCTL_H
#define _UAPI_LINUX_SYSCTL_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
@@ -482,6 +482,7 @@ enum
NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
NET_IPV4_CONF_ARP_ACCEPT=21,
NET_IPV4_CONF_ARP_NOTIFY=22,
+ NET_IPV4_CONF_ARP_EVICT_NOCARRIER=23,
};
/* /proc/sys/net/ipv4/netfilter */
@@ -571,6 +572,7 @@ enum {
NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
+ NET_IPV6_RA_DEFRTR_METRIC=28,
__NET_IPV6_MAX
};
@@ -582,24 +584,25 @@ enum {
/* /proc/sys/net/<protocol>/neigh/<dev> */
enum {
- NET_NEIGH_MCAST_SOLICIT=1,
- NET_NEIGH_UCAST_SOLICIT=2,
- NET_NEIGH_APP_SOLICIT=3,
- NET_NEIGH_RETRANS_TIME=4,
- NET_NEIGH_REACHABLE_TIME=5,
- NET_NEIGH_DELAY_PROBE_TIME=6,
- NET_NEIGH_GC_STALE_TIME=7,
- NET_NEIGH_UNRES_QLEN=8,
- NET_NEIGH_PROXY_QLEN=9,
- NET_NEIGH_ANYCAST_DELAY=10,
- NET_NEIGH_PROXY_DELAY=11,
- NET_NEIGH_LOCKTIME=12,
- NET_NEIGH_GC_INTERVAL=13,
- NET_NEIGH_GC_THRESH1=14,
- NET_NEIGH_GC_THRESH2=15,
- NET_NEIGH_GC_THRESH3=16,
- NET_NEIGH_RETRANS_TIME_MS=17,
- NET_NEIGH_REACHABLE_TIME_MS=18,
+ NET_NEIGH_MCAST_SOLICIT = 1,
+ NET_NEIGH_UCAST_SOLICIT = 2,
+ NET_NEIGH_APP_SOLICIT = 3,
+ NET_NEIGH_RETRANS_TIME = 4,
+ NET_NEIGH_REACHABLE_TIME = 5,
+ NET_NEIGH_DELAY_PROBE_TIME = 6,
+ NET_NEIGH_GC_STALE_TIME = 7,
+ NET_NEIGH_UNRES_QLEN = 8,
+ NET_NEIGH_PROXY_QLEN = 9,
+ NET_NEIGH_ANYCAST_DELAY = 10,
+ NET_NEIGH_PROXY_DELAY = 11,
+ NET_NEIGH_LOCKTIME = 12,
+ NET_NEIGH_GC_INTERVAL = 13,
+ NET_NEIGH_GC_THRESH1 = 14,
+ NET_NEIGH_GC_THRESH2 = 15,
+ NET_NEIGH_GC_THRESH3 = 16,
+ NET_NEIGH_RETRANS_TIME_MS = 17,
+ NET_NEIGH_REACHABLE_TIME_MS = 18,
+ NET_NEIGH_INTERVAL_PROBE_TIME_MS = 19,
};
/* /proc/sys/net/dccp */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 95b1597f16ae..f925a77f19ed 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -46,6 +46,7 @@
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
#define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */
+#define TCMU_MAILBOX_FLAG_CAP_KEEP_BUF (1<<3) /* Keep buf after cmd completion */
struct tcmu_mailbox {
__u16 version;
@@ -75,6 +76,7 @@ struct tcmu_cmd_entry_hdr {
__u8 kflags;
#define TCMU_UFLAG_UNKNOWN_OP 0x1
#define TCMU_UFLAG_READ_LEN 0x2
+#define TCMU_UFLAG_KEEP_BUF 0x4
__u8 uflags;
} __packed;
@@ -117,7 +119,7 @@ struct tcmu_cmd_entry {
__u64 cdb_off;
__u64 __pad1;
__u64 __pad2;
- struct iovec iov[0];
+ __DECLARE_FLEX_ARRAY(struct iovec, iov);
} req;
struct {
__u8 scsi_status;
@@ -150,7 +152,7 @@ struct tcmu_tmr_entry {
__u32 cmd_cnt;
__u64 __pad3;
__u64 __pad4;
- __u16 cmd_ids[0];
+ __u16 cmd_ids[];
} __packed;
#define TCMU_OP_ALIGN_SIZE sizeof(__u64)
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index ccbd08709321..b50b2eb257a0 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
*/
-#define TASKSTATS_VERSION 10
+#define TASKSTATS_VERSION 14
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -48,7 +48,8 @@ struct taskstats {
__u32 ac_exitcode; /* Exit status */
/* The accounting flags of a task as defined in <linux/acct.h>
- * Defined values are AFORK, ASU, ACOMPAT, ACORE, and AXSIG.
+ * Defined values are AFORK, ASU, ACOMPAT, ACORE, AXSIG, and AGROUP.
+ * (AGROUP since version 12).
*/
__u8 ac_flag; /* Record flags */
__u8 ac_nice; /* task_nice */
@@ -172,6 +173,35 @@ struct taskstats {
/* v10: 64-bit btime to avoid overflow */
__u64 ac_btime64; /* 64-bit begin time */
+
+ /* v11: Delay waiting for memory compact */
+ __u64 compact_count;
+ __u64 compact_delay_total;
+
+ /* v12 begin */
+ __u32 ac_tgid; /* thread group ID */
+ /* Thread group walltime up to now. This is total process walltime if
+ * AGROUP flag is set.
+ */
+ __u64 ac_tgetime __attribute__((aligned(8)));
+ /* Lightweight information to identify process binary files.
+ * This leaves userspace to match this to a file system path, using
+ * MAJOR() and MINOR() macros to identify a device and mount point,
+ * the inode to identify the executable file. This is /proc/self/exe
+ * at the end, so matching the most recent exec(). Values are zero
+ * for kernel threads.
+ */
+ __u64 ac_exe_dev; /* program binary device ID */
+ __u64 ac_exe_inode; /* program binary inode number */
+ /* v12 end */
+
+ /* v13: Delay waiting for write-protect copy */
+ __u64 wpcopy_count;
+ __u64 wpcopy_delay_total;
+
+ /* v14: Delay waiting for IRQ/SOFTIRQ */
+ __u64 irq_count;
+ __u64 irq_delay_total;
};
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 653c4f94f76e..fe6c8f8f3e8c 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_BPF_H
diff --git a/include/uapi/linux/tc_act/tc_ct.h b/include/uapi/linux/tc_act/tc_ct.h
index 5fb1d7ac1027..6c5200f0ed38 100644
--- a/include/uapi/linux/tc_act/tc_ct.h
+++ b/include/uapi/linux/tc_act/tc_ct.h
@@ -22,6 +22,9 @@ enum {
TCA_CT_NAT_PORT_MIN, /* be16 */
TCA_CT_NAT_PORT_MAX, /* be16 */
TCA_CT_PAD,
+ TCA_CT_HELPER_NAME, /* string */
+ TCA_CT_HELPER_FAMILY, /* u8 */
+ TCA_CT_HELPER_PROTO, /* u8 */
__TCA_CT_MAX
};
diff --git a/include/uapi/linux/tc_act/tc_ipt.h b/include/uapi/linux/tc_act/tc_ipt.h
deleted file mode 100644
index c48d7da6750d..000000000000
--- a/include/uapi/linux/tc_act/tc_ipt.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_TC_IPT_H
-#define __LINUX_TC_IPT_H
-
-#include <linux/pkt_cls.h>
-
-enum {
- TCA_IPT_UNSPEC,
- TCA_IPT_TABLE,
- TCA_IPT_HOOK,
- TCA_IPT_INDEX,
- TCA_IPT_CNT,
- TCA_IPT_TM,
- TCA_IPT_TARG,
- TCA_IPT_PAD,
- __TCA_IPT_MAX
-};
-#define TCA_IPT_MAX (__TCA_IPT_MAX - 1)
-
-#endif
diff --git a/include/uapi/linux/tc_act/tc_mirred.h b/include/uapi/linux/tc_act/tc_mirred.h
index 2500a0005d05..c61e76f3c23b 100644
--- a/include/uapi/linux/tc_act/tc_mirred.h
+++ b/include/uapi/linux/tc_act/tc_mirred.h
@@ -21,6 +21,7 @@ enum {
TCA_MIRRED_TM,
TCA_MIRRED_PARMS,
TCA_MIRRED_PAD,
+ TCA_MIRRED_BLOCKID,
__TCA_MIRRED_MAX
};
#define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_mpls.h b/include/uapi/linux/tc_act/tc_mpls.h
index 9360e95273c7..9e4e8f52a779 100644
--- a/include/uapi/linux/tc_act/tc_mpls.h
+++ b/include/uapi/linux/tc_act/tc_mpls.h
@@ -10,6 +10,7 @@
#define TCA_MPLS_ACT_PUSH 2
#define TCA_MPLS_ACT_MODIFY 3
#define TCA_MPLS_ACT_DEC_TTL 4
+#define TCA_MPLS_ACT_MAC_PUSH 5
struct tc_mpls {
tc_gen; /* generic TC action fields. */
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index f3e61b04fa01..f5cab7fc96ab 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -62,7 +62,7 @@ struct tc_pedit_sel {
tc_gen;
unsigned char nkeys;
unsigned char flags;
- struct tc_pedit_key keys[0];
+ struct tc_pedit_key keys[] __counted_by(nkeys);
};
#define tc_pedit tc_pedit_sel
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index 800e93377218..64032513cc4c 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -2,19 +2,6 @@
/*
* Copyright (c) 2008, Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
* Author: Alexander Duyck <alexander.h.duyck@intel.com>
*/
@@ -29,6 +16,7 @@
#define SKBEDIT_F_PTYPE 0x8
#define SKBEDIT_F_MASK 0x10
#define SKBEDIT_F_INHERITDSFIELD 0x20
+#define SKBEDIT_F_TXQ_SKBHASH 0x40
struct tc_skbedit {
tc_gen;
@@ -45,6 +33,7 @@ enum {
TCA_SKBEDIT_PTYPE,
TCA_SKBEDIT_MASK,
TCA_SKBEDIT_FLAGS,
+ TCA_SKBEDIT_QUEUE_MAPPING_MAX,
__TCA_SKBEDIT_MAX
};
#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
index c525b3503797..ac62c9a993ea 100644
--- a/include/uapi/linux/tc_act/tc_skbmod.h
+++ b/include/uapi/linux/tc_act/tc_skbmod.h
@@ -1,12 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2016, Jamal Hadi Salim
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
-*/
+ */
#ifndef __LINUX_TC_SKBMOD_H
#define __LINUX_TC_SKBMOD_H
@@ -17,6 +12,7 @@
#define SKBMOD_F_SMAC 0x2
#define SKBMOD_F_ETYPE 0x4
#define SKBMOD_F_SWAPMAC 0x8
+#define SKBMOD_F_ECN 0x10
struct tc_skbmod {
tc_gen;
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 3f10dc4e7a4b..37c6f612f161 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -2,11 +2,6 @@
/*
* Copyright (c) 2016, Amir Vadai <amir@vadai.me>
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_TUNNEL_KEY_H
@@ -39,6 +34,7 @@ enum {
*/
TCA_TUNNEL_KEY_ENC_TOS, /* u8 */
TCA_TUNNEL_KEY_ENC_TTL, /* u8 */
+ TCA_TUNNEL_KEY_NO_FRAG, /* flag */
__TCA_TUNNEL_KEY_MAX,
};
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index 168995b54a70..3e1f8e57cdd2 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_VLAN_H
@@ -16,6 +11,8 @@
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
#define TCA_VLAN_ACT_MODIFY 3
+#define TCA_VLAN_ACT_POP_ETH 4
+#define TCA_VLAN_ACT_PUSH_ETH 5
struct tc_vlan {
tc_gen;
@@ -30,6 +27,8 @@ enum {
TCA_VLAN_PUSH_VLAN_PROTOCOL,
TCA_VLAN_PAD,
TCA_VLAN_PUSH_VLAN_PRIORITY,
+ TCA_VLAN_PUSH_ETH_DST,
+ TCA_VLAN_PUSH_ETH_SRC,
__TCA_VLAN_MAX,
};
#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index cfcb10b75483..c07e9f90c084 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -51,7 +51,7 @@ struct tcphdr {
fin:1;
#else
#error "Adjust your <asm/byteorder.h> defines"
-#endif
+#endif
__be16 window;
__sum16 check;
__be16 urg_ptr;
@@ -62,14 +62,14 @@ struct tcphdr {
* (union is compatible to any of its members)
* This means this part of the code is -fstrict-aliasing safe now.
*/
-union tcp_word_hdr {
+union tcp_word_hdr {
struct tcphdr hdr;
- __be32 words[5];
-};
+ __be32 words[5];
+};
-#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3])
+#define tcp_flag_word(tp) (((union tcp_word_hdr *)(tp))->words[3])
-enum {
+enum {
TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000),
TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000),
TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000),
@@ -80,7 +80,7 @@ enum {
TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000),
TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000),
TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000)
-};
+};
/*
* TCP general constants
@@ -103,8 +103,8 @@ enum {
#define TCP_QUICKACK 12 /* Block/reenable quick acks */
#define TCP_CONGESTION 13 /* Congestion control algorithm */
#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */
-#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
-#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
+#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
+#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
#define TCP_REPAIR_QUEUE 20
@@ -129,6 +129,11 @@ enum {
#define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */
+#define TCP_AO_ADD_KEY 38 /* Add/Set MKT */
+#define TCP_AO_DEL_KEY 39 /* Delete MKT */
+#define TCP_AO_INFO 40 /* Set/list TCP-AO per-socket options */
+#define TCP_AO_GET_KEYS 41 /* List MKT(s) */
+#define TCP_AO_REPAIR 42 /* Get/Set SNEs and ISNs */
#define TCP_REPAIR_ON 1
#define TCP_REPAIR_OFF 0
@@ -170,6 +175,7 @@ enum tcp_fastopen_client_fail {
#define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */
#define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
#define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */
+#define TCPI_OPT_USEC_TS 64 /* usec timestamps */
/*
* Sender's congestion state indicating normal or abnormal situations
@@ -284,6 +290,23 @@ struct tcp_info {
__u32 tcpi_snd_wnd; /* peer's advertised receive window after
* scaling (bytes)
*/
+ __u32 tcpi_rcv_wnd; /* local advertised receive window after
+ * scaling (bytes)
+ */
+
+ __u32 tcpi_rehash; /* PLB or timeout triggered rehash attempts */
+
+ __u16 tcpi_total_rto; /* Total number of RTO timeouts, including
+ * SYN/SYN-ACK and recurring timeouts.
+ */
+ __u16 tcpi_total_rto_recoveries; /* Total number of RTO
+ * recoveries, including any
+ * unfinished recovery.
+ */
+ __u32 tcpi_total_rto_time; /* Total time spent in RTO recoveries
+ * in milliseconds, including any
+ * unfinished recovery.
+ */
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
@@ -314,6 +337,8 @@ enum {
TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */
TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */
+ TCP_NLA_TTL, /* TTL or hop limit of a packet received */
+ TCP_NLA_REHASH, /* PLB and timeout triggered rehash attempts */
};
/* for TCP_MD5SIG socket option */
@@ -341,13 +366,121 @@ struct tcp_diag_md5sig {
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN];
};
+#define TCP_AO_MAXKEYLEN 80
+
+#define TCP_AO_KEYF_IFINDEX (1 << 0) /* L3 ifindex for VRF */
+#define TCP_AO_KEYF_EXCLUDE_OPT (1 << 1) /* "Indicates whether TCP
+ * options other than TCP-AO
+ * are included in the MAC
+ * calculation"
+ */
+
+struct tcp_ao_add { /* setsockopt(TCP_AO_ADD_KEY) */
+ struct __kernel_sockaddr_storage addr; /* peer's address for the key */
+ char alg_name[64]; /* crypto hash algorithm to use */
+ __s32 ifindex; /* L3 dev index for VRF */
+ __u32 set_current :1, /* set key as Current_key at once */
+ set_rnext :1, /* request it from peer with RNext_key */
+ reserved :30; /* must be 0 */
+ __u16 reserved2; /* padding, must be 0 */
+ __u8 prefix; /* peer's address prefix */
+ __u8 sndid; /* SendID for outgoing segments */
+ __u8 rcvid; /* RecvID to match for incoming seg */
+ __u8 maclen; /* length of authentication code (hash) */
+ __u8 keyflags; /* see TCP_AO_KEYF_ */
+ __u8 keylen; /* length of ::key */
+ __u8 key[TCP_AO_MAXKEYLEN];
+} __attribute__((aligned(8)));
+
+struct tcp_ao_del { /* setsockopt(TCP_AO_DEL_KEY) */
+ struct __kernel_sockaddr_storage addr; /* peer's address for the key */
+ __s32 ifindex; /* L3 dev index for VRF */
+ __u32 set_current :1, /* corresponding ::current_key */
+ set_rnext :1, /* corresponding ::rnext */
+ del_async :1, /* only valid for listen sockets */
+ reserved :29; /* must be 0 */
+ __u16 reserved2; /* padding, must be 0 */
+ __u8 prefix; /* peer's address prefix */
+ __u8 sndid; /* SendID for outgoing segments */
+ __u8 rcvid; /* RecvID to match for incoming seg */
+ __u8 current_key; /* KeyID to set as Current_key */
+ __u8 rnext; /* KeyID to set as Rnext_key */
+ __u8 keyflags; /* see TCP_AO_KEYF_ */
+} __attribute__((aligned(8)));
+
+struct tcp_ao_info_opt { /* setsockopt(TCP_AO_INFO), getsockopt(TCP_AO_INFO) */
+ /* Here 'in' is for setsockopt(), 'out' is for getsockopt() */
+ __u32 set_current :1, /* in/out: corresponding ::current_key */
+ set_rnext :1, /* in/out: corresponding ::rnext */
+ ao_required :1, /* in/out: don't accept non-AO connects */
+ set_counters :1, /* in: set/clear ::pkt_* counters */
+ accept_icmps :1, /* in/out: accept incoming ICMPs */
+ reserved :27; /* must be 0 */
+ __u16 reserved2; /* padding, must be 0 */
+ __u8 current_key; /* in/out: KeyID of Current_key */
+ __u8 rnext; /* in/out: keyid of RNext_key */
+ __u64 pkt_good; /* in/out: verified segments */
+ __u64 pkt_bad; /* in/out: failed verification */
+ __u64 pkt_key_not_found; /* in/out: could not find a key to verify */
+ __u64 pkt_ao_required; /* in/out: segments missing TCP-AO sign */
+ __u64 pkt_dropped_icmp; /* in/out: ICMPs that were ignored */
+} __attribute__((aligned(8)));
+
+struct tcp_ao_getsockopt { /* getsockopt(TCP_AO_GET_KEYS) */
+ struct __kernel_sockaddr_storage addr; /* in/out: dump keys for peer
+ * with this address/prefix
+ */
+ char alg_name[64]; /* out: crypto hash algorithm */
+ __u8 key[TCP_AO_MAXKEYLEN];
+ __u32 nkeys; /* in: size of the userspace buffer
+ * @optval, measured in @optlen - the
+ * sizeof(struct tcp_ao_getsockopt)
+ * out: number of keys that matched
+ */
+ __u16 is_current :1, /* in: match and dump Current_key,
+ * out: the dumped key is Current_key
+ */
+
+ is_rnext :1, /* in: match and dump RNext_key,
+ * out: the dumped key is RNext_key
+ */
+ get_all :1, /* in: dump all keys */
+ reserved :13; /* padding, must be 0 */
+ __u8 sndid; /* in/out: dump keys with SendID */
+ __u8 rcvid; /* in/out: dump keys with RecvID */
+ __u8 prefix; /* in/out: dump keys with address/prefix */
+ __u8 maclen; /* out: key's length of authentication
+ * code (hash)
+ */
+ __u8 keyflags; /* in/out: see TCP_AO_KEYF_ */
+ __u8 keylen; /* out: length of ::key */
+ __s32 ifindex; /* in/out: L3 dev index for VRF */
+ __u64 pkt_good; /* out: verified segments */
+ __u64 pkt_bad; /* out: segments that failed verification */
+} __attribute__((aligned(8)));
+
+struct tcp_ao_repair { /* {s,g}etsockopt(TCP_AO_REPAIR) */
+ __be32 snt_isn;
+ __be32 rcv_isn;
+ __u32 snd_sne;
+ __u32 rcv_sne;
+} __attribute__((aligned(8)));
+
/* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */
+#define TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT 0x1
struct tcp_zerocopy_receive {
__u64 address; /* in: address of mapping */
__u32 length; /* in/out: number of bytes to map/mapped */
__u32 recv_skip_hint; /* out: amount of bytes to skip */
__u32 inq; /* out: amount of bytes in read queue */
__s32 err; /* out: socket error */
+ __u64 copybuf_address; /* in: copybuf address (small reads) */
+ __s32 copybuf_len; /* in/out: copybuf bytes avail/used or error */
+ __u32 flags; /* in: flags */
+ __u64 msg_control; /* ancillary data */
+ __u64 msg_controllen;
+ __u32 msg_flags;
+ __u32 reserved; /* set to 0 for now */
};
#endif /* _UAPI_LINUX_TCP_H */
diff --git a/include/uapi/linux/tdx-guest.h b/include/uapi/linux/tdx-guest.h
new file mode 100644
index 000000000000..a6a2098c08ff
--- /dev/null
+++ b/include/uapi/linux/tdx-guest.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace interface for TDX guest driver
+ *
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef _UAPI_LINUX_TDX_GUEST_H_
+#define _UAPI_LINUX_TDX_GUEST_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Length of the REPORTDATA used in TDG.MR.REPORT TDCALL */
+#define TDX_REPORTDATA_LEN 64
+
+/* Length of TDREPORT used in TDG.MR.REPORT TDCALL */
+#define TDX_REPORT_LEN 1024
+
+/**
+ * struct tdx_report_req - Request struct for TDX_CMD_GET_REPORT0 IOCTL.
+ *
+ * @reportdata: User buffer with REPORTDATA to be included into TDREPORT.
+ * Typically it can be some nonce provided by attestation
+ * service, so the generated TDREPORT can be uniquely verified.
+ * @tdreport: User buffer to store TDREPORT output from TDCALL[TDG.MR.REPORT].
+ */
+struct tdx_report_req {
+ __u8 reportdata[TDX_REPORTDATA_LEN];
+ __u8 tdreport[TDX_REPORT_LEN];
+};
+
+/*
+ * TDX_CMD_GET_REPORT0 - Get TDREPORT0 (a.k.a. TDREPORT subtype 0) using
+ * TDCALL[TDG.MR.REPORT]
+ *
+ * Return 0 on success, -EIO on TDCALL execution failure, and
+ * standard errno on other general error cases.
+ */
+#define TDX_CMD_GET_REPORT0 _IOWR('T', 1, struct tdx_report_req)
+
+#endif /* _UAPI_LINUX_TDX_GUEST_H_ */
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index b619f37ee03e..23e57164693c 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -42,15 +42,14 @@
#define TEE_IOC_MAGIC 0xa4
#define TEE_IOC_BASE 0
-/* Flags relating to shared memory */
-#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */
-#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */
-
#define TEE_MAX_ARG_SIZE 1024
#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */
+#define TEE_GEN_CAP_MEMREF_NULL (1 << 3)/* NULL MemRef support */
+
+#define TEE_MEMREF_NULL (__u64)(-1) /* NULL MemRef Buffer */
/*
* TEE Implementation ID
@@ -200,6 +199,16 @@ struct tee_ioctl_buf_data {
* a part of a shared memory by specifying an offset (@a) and size (@b) of
* the object. To supply the entire shared memory object set the offset
* (@a) to 0 and size (@b) to the previously returned size of the object.
+ *
+ * A client may need to present a NULL pointer in the argument
+ * passed to a trusted application in the TEE.
+ * This is also a requirement in GlobalPlatform Client API v1.0c
+ * (section 3.2.5 memory references), which can be found at
+ * http://www.globalplatform.org/specificationsdevice.asp
+ *
+ * If a NULL pointer is passed to a TA in the TEE, the (@c)
+ * IOCTL parameters value must be set to TEE_MEMREF_NULL indicating a NULL
+ * memory reference.
*/
struct tee_ioctl_param {
__u64 attr;
@@ -342,7 +351,7 @@ struct tee_iocl_supp_send_arg {
};
/**
- * TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function
+ * TEE_IOC_SUPPL_SEND - Send a response to a received request
*
* Takes a struct tee_ioctl_buf_data which contains a struct
* tee_iocl_supp_send_arg followed by any array of struct tee_param
diff --git a/include/uapi/linux/termios.h b/include/uapi/linux/termios.h
index 33961d4e4de0..e6da9d4433d1 100644
--- a/include/uapi/linux/termios.h
+++ b/include/uapi/linux/termios.h
@@ -5,19 +5,4 @@
#include <linux/types.h>
#include <asm/termios.h>
-#define NFF 5
-
-struct termiox
-{
- __u16 x_hflag;
- __u16 x_cflag;
- __u16 x_rflag[NFF];
- __u16 x_sflag;
-};
-
-#define RTSXOFF 0x0001 /* RTS flow control on input */
-#define CTSXON 0x0002 /* CTS flow control on output */
-#define DTRXOFF 0x0004 /* DTR flow control on input */
-#define DSRXON 0x0008 /* DCD flow control on output */
-
#endif
diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h
index c105054cbb57..fc78bf3aead7 100644
--- a/include/uapi/linux/thermal.h
+++ b/include/uapi/linux/thermal.h
@@ -44,7 +44,10 @@ enum thermal_genl_attr {
THERMAL_GENL_ATTR_CDEV_MAX_STATE,
THERMAL_GENL_ATTR_CDEV_NAME,
THERMAL_GENL_ATTR_GOV_NAME,
-
+ THERMAL_GENL_ATTR_CPU_CAPABILITY,
+ THERMAL_GENL_ATTR_CPU_CAPABILITY_ID,
+ THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE,
+ THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY,
__THERMAL_GENL_ATTR_MAX,
};
#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
@@ -60,7 +63,7 @@ enum thermal_genl_event {
THERMAL_GENL_EVENT_UNSPEC,
THERMAL_GENL_EVENT_TZ_CREATE, /* Thermal zone creation */
THERMAL_GENL_EVENT_TZ_DELETE, /* Thermal zone deletion */
- THERMAL_GENL_EVENT_TZ_DISABLE, /* Thermal zone disabed */
+ THERMAL_GENL_EVENT_TZ_DISABLE, /* Thermal zone disabled */
THERMAL_GENL_EVENT_TZ_ENABLE, /* Thermal zone enabled */
THERMAL_GENL_EVENT_TZ_TRIP_UP, /* Trip point crossed the way up */
THERMAL_GENL_EVENT_TZ_TRIP_DOWN, /* Trip point crossed the way down */
@@ -71,6 +74,7 @@ enum thermal_genl_event {
THERMAL_GENL_EVENT_CDEV_DELETE, /* Cdev unbound */
THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, /* Cdev state updated */
THERMAL_GENL_EVENT_TZ_GOV_CHANGE, /* Governor policy changed */
+ THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, /* CPU capability changed */
__THERMAL_GENL_EVENT_MAX,
};
#define THERMAL_GENL_EVENT_MAX (__THERMAL_GENL_EVENT_MAX - 1)
diff --git a/include/uapi/linux/thp7312.h b/include/uapi/linux/thp7312.h
new file mode 100644
index 000000000000..2b629e05daf9
--- /dev/null
+++ b/include/uapi/linux/thp7312.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * THine THP7312 user space header file.
+ *
+ * Copyright (C) 2021 THine Electronics, Inc.
+ * Copyright (C) 2023 Ideas on Board Oy
+ */
+
+#ifndef __UAPI_THP7312_H_
+#define __UAPI_THP7312_H_
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_THP7312_LOW_LIGHT_COMPENSATION (V4L2_CID_USER_THP7312_BASE + 0x01)
+#define V4L2_CID_THP7312_AUTO_FOCUS_METHOD (V4L2_CID_USER_THP7312_BASE + 0x02)
+#define V4L2_CID_THP7312_NOISE_REDUCTION_AUTO (V4L2_CID_USER_THP7312_BASE + 0x03)
+#define V4L2_CID_THP7312_NOISE_REDUCTION_ABSOLUTE (V4L2_CID_USER_THP7312_BASE + 0x04)
+
+#endif /* __UAPI_THP7312_H_ */
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index add01db1daef..80ea15e12113 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -254,6 +254,8 @@ static inline int tipc_aead_key_size(struct tipc_aead_key *key)
return sizeof(*key) + key->keylen;
}
+#define TIPC_REKEYING_NOW (~0U)
+
/* The macros and functions below are deprecated:
*/
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 4dfc05651c98..c00adf2fe868 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -43,10 +43,6 @@
#include <linux/tipc.h>
#include <asm/byteorder.h>
-#ifndef __KERNEL__
-#include <arpa/inet.h> /* for ntohs etc. */
-#endif
-
/*
* Configuration
*
@@ -269,33 +265,33 @@ static inline int TLV_OK(const void *tlv, __u16 space)
*/
return (space >= TLV_SPACE(0)) &&
- (ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space);
+ (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_len) <= space);
}
static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
{
return TLV_OK(tlv, space) &&
- (ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
+ (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
}
static inline int TLV_GET_LEN(struct tlv_desc *tlv)
{
- return ntohs(tlv->tlv_len);
+ return __be16_to_cpu(tlv->tlv_len);
}
static inline void TLV_SET_LEN(struct tlv_desc *tlv, __u16 len)
{
- tlv->tlv_len = htons(len);
+ tlv->tlv_len = __cpu_to_be16(len);
}
static inline int TLV_CHECK_TYPE(struct tlv_desc *tlv, __u16 type)
{
- return (ntohs(tlv->tlv_type) == type);
+ return (__be16_to_cpu(tlv->tlv_type) == type);
}
static inline void TLV_SET_TYPE(struct tlv_desc *tlv, __u16 type)
{
- tlv->tlv_type = htons(type);
+ tlv->tlv_type = __cpu_to_be16(type);
}
static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
@@ -305,8 +301,8 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
tlv_len = TLV_LENGTH(len);
tlv_ptr = (struct tlv_desc *)tlv;
- tlv_ptr->tlv_type = htons(type);
- tlv_ptr->tlv_len = htons(tlv_len);
+ tlv_ptr->tlv_type = __cpu_to_be16(type);
+ tlv_ptr->tlv_len = __cpu_to_be16(tlv_len);
if (len && data) {
memcpy(TLV_DATA(tlv_ptr), data, len);
memset((char *)TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
@@ -348,7 +344,7 @@ static inline void *TLV_LIST_DATA(struct tlv_list_desc *list)
static inline void TLV_LIST_STEP(struct tlv_list_desc *list)
{
- __u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len));
+ __u16 tlv_space = TLV_ALIGN(__be16_to_cpu(list->tlv_ptr->tlv_len));
list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space);
list->tlv_space -= tlv_space;
@@ -404,9 +400,9 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
msg_len = TCM_LENGTH(data_len);
tcm_hdr = (struct tipc_cfg_msg_hdr *)msg;
- tcm_hdr->tcm_len = htonl(msg_len);
- tcm_hdr->tcm_type = htons(cmd);
- tcm_hdr->tcm_flags = htons(flags);
+ tcm_hdr->tcm_len = __cpu_to_be32(msg_len);
+ tcm_hdr->tcm_type = __cpu_to_be16(cmd);
+ tcm_hdr->tcm_flags = __cpu_to_be16(flags);
if (data_len && data) {
memcpy(TCM_DATA(msg), data, data_len);
memset((char *)TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index dc0d23a50e69..d847dd671d79 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -165,6 +165,8 @@ enum {
TIPC_NLA_NODE_UP, /* flag */
TIPC_NLA_NODE_ID, /* data */
TIPC_NLA_NODE_KEY, /* data */
+ TIPC_NLA_NODE_KEY_MASTER, /* flag */
+ TIPC_NLA_NODE_REKEYING, /* u32 */
__TIPC_NLA_NODE_MAX,
TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index bcd2869ed472..b66a800389cc 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -39,6 +39,8 @@
/* TLS socket options */
#define TLS_TX 1 /* Set transmit parameters */
#define TLS_RX 2 /* Set receive parameters */
+#define TLS_TX_ZEROCOPY_RO 3 /* TX zerocopy (only sendfile now) */
+#define TLS_RX_EXPECT_NO_PAD 4 /* Attempt opportunistic zero-copy */
/* Supported versions */
#define TLS_VERSION_MINOR(ver) ((ver) & 0xFF)
@@ -77,6 +79,41 @@
#define TLS_CIPHER_AES_CCM_128_TAG_SIZE 16
#define TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE 8
+#define TLS_CIPHER_CHACHA20_POLY1305 54
+#define TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE 12
+#define TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE 32
+#define TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE 0
+#define TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE 16
+#define TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_SM4_GCM 55
+#define TLS_CIPHER_SM4_GCM_IV_SIZE 8
+#define TLS_CIPHER_SM4_GCM_KEY_SIZE 16
+#define TLS_CIPHER_SM4_GCM_SALT_SIZE 4
+#define TLS_CIPHER_SM4_GCM_TAG_SIZE 16
+#define TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_SM4_CCM 56
+#define TLS_CIPHER_SM4_CCM_IV_SIZE 8
+#define TLS_CIPHER_SM4_CCM_KEY_SIZE 16
+#define TLS_CIPHER_SM4_CCM_SALT_SIZE 4
+#define TLS_CIPHER_SM4_CCM_TAG_SIZE 16
+#define TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_ARIA_GCM_128 57
+#define TLS_CIPHER_ARIA_GCM_128_IV_SIZE 8
+#define TLS_CIPHER_ARIA_GCM_128_KEY_SIZE 16
+#define TLS_CIPHER_ARIA_GCM_128_SALT_SIZE 4
+#define TLS_CIPHER_ARIA_GCM_128_TAG_SIZE 16
+#define TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_ARIA_GCM_256 58
+#define TLS_CIPHER_ARIA_GCM_256_IV_SIZE 8
+#define TLS_CIPHER_ARIA_GCM_256_KEY_SIZE 32
+#define TLS_CIPHER_ARIA_GCM_256_SALT_SIZE 4
+#define TLS_CIPHER_ARIA_GCM_256_TAG_SIZE 16
+#define TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE 8
+
#define TLS_SET_RECORD_TYPE 1
#define TLS_GET_RECORD_TYPE 2
@@ -109,12 +146,54 @@ struct tls12_crypto_info_aes_ccm_128 {
unsigned char rec_seq[TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE];
};
+struct tls12_crypto_info_chacha20_poly1305 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE];
+ unsigned char key[TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_sm4_gcm {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_SM4_GCM_IV_SIZE];
+ unsigned char key[TLS_CIPHER_SM4_GCM_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_SM4_GCM_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_sm4_ccm {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_SM4_CCM_IV_SIZE];
+ unsigned char key[TLS_CIPHER_SM4_CCM_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_SM4_CCM_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_aria_gcm_128 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_ARIA_GCM_128_IV_SIZE];
+ unsigned char key[TLS_CIPHER_ARIA_GCM_128_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_ARIA_GCM_128_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_aria_gcm_256 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_ARIA_GCM_256_IV_SIZE];
+ unsigned char key[TLS_CIPHER_ARIA_GCM_256_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_ARIA_GCM_256_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE];
+};
+
enum {
TLS_INFO_UNSPEC,
TLS_INFO_VERSION,
TLS_INFO_CIPHER,
TLS_INFO_TXCONF,
TLS_INFO_RXCONF,
+ TLS_INFO_ZC_RO_TX,
+ TLS_INFO_RX_NO_PAD,
__TLS_INFO_MAX,
};
#define TLS_INFO_MAX (__TLS_INFO_MAX - 1)
diff --git a/include/uapi/linux/tps6594_pfsm.h b/include/uapi/linux/tps6594_pfsm.h
new file mode 100644
index 000000000000..c69569e0a7a2
--- /dev/null
+++ b/include/uapi/linux/tps6594_pfsm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace ABI for TPS6594 PMIC Pre-configurable Finite State Machine
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#ifndef __TPS6594_PFSM_H
+#define __TPS6594_PFSM_H
+
+#include <linux/const.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct pmic_state_opt - PMIC state options
+ * @gpio_retention: if enabled, power rails associated with GPIO retention remain active
+ * @ddr_retention: if enabled, power rails associated with DDR retention remain active
+ * @mcu_only_startup_dest: if enabled, startup destination state is MCU_ONLY
+ */
+struct pmic_state_opt {
+ __u8 gpio_retention;
+ __u8 ddr_retention;
+ __u8 mcu_only_startup_dest;
+};
+
+/* Commands */
+#define PMIC_BASE 'P'
+
+#define PMIC_GOTO_STANDBY _IO(PMIC_BASE, 0)
+#define PMIC_GOTO_LP_STANDBY _IO(PMIC_BASE, 1)
+#define PMIC_UPDATE_PGM _IO(PMIC_BASE, 2)
+#define PMIC_SET_ACTIVE_STATE _IO(PMIC_BASE, 3)
+#define PMIC_SET_MCU_ONLY_STATE _IOW(PMIC_BASE, 4, struct pmic_state_opt)
+#define PMIC_SET_RETENTION_STATE _IOW(PMIC_BASE, 5, struct pmic_state_opt)
+
+#endif /* __TPS6594_PFSM_H */
diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h
index 376cccf397be..68aeae2addec 100644
--- a/include/uapi/linux/tty.h
+++ b/include/uapi/linux/tty.h
@@ -6,8 +6,6 @@
* 'tty.h' defines some structures used by tty_io.c and some defines.
*/
-#define NR_LDISCS 30
-
/* line disciplines */
#define N_TTY 0
#define N_SLIP 1
@@ -38,5 +36,11 @@
#define N_NCI 25 /* NFC NCI UART */
#define N_SPEAKUP 26 /* Speakup communication with synths */
#define N_NULL 27 /* Null ldisc used for error handling */
+#define N_MCTP 28 /* MCTP-over-serial */
+#define N_DEVELOPMENT 29 /* Manual out-of-tree testing */
+#define N_CAN327 30 /* ELM327 based OBD-II interfaces */
+
+/* Always the newest line discipline + 1 */
+#define NR_LDISCS 31
#endif /* _UAPI_LINUX_TTY_H */
diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
index 900a32e63424..cf25056d4b27 100644
--- a/include/uapi/linux/tty_flags.h
+++ b/include/uapi/linux/tty_flags.h
@@ -39,7 +39,7 @@
* WARNING: These flags are no longer used and have been superceded by the
* TTY_PORT_ flags in the iflags field (and not userspace-visible)
*/
-#ifndef _KERNEL_
+#ifndef __KERNEL__
#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */
#define ASYNCB_SUSPENDED 30 /* Serial port is suspended */
#define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */
@@ -73,15 +73,15 @@
#define ASYNC_MAGIC_MULTIPLIER (1U << ASYNCB_MAGIC_MULTIPLIER)
#define ASYNC_FLAGS ((1U << (ASYNCB_LAST_USER + 1)) - 1)
-#define ASYNC_DEPRECATED (ASYNC_SESSION_LOCKOUT | ASYNC_PGRP_LOCKOUT | \
- ASYNC_CALLOUT_NOHUP | ASYNC_AUTOPROBE)
+#define ASYNC_DEPRECATED (ASYNC_SPLIT_TERMIOS | ASYNC_SESSION_LOCKOUT | \
+ ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP | ASYNC_AUTOPROBE)
#define ASYNC_USR_MASK (ASYNC_SPD_MASK|ASYNC_CALLOUT_NOHUP| \
ASYNC_LOW_LATENCY)
#define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI)
#define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI)
#define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
-#ifndef _KERNEL_
+#ifndef __KERNEL__
/* These flags are no longer used (and were always masked from userspace) */
#define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED)
#define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE)
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index f6d2f83cbe29..6375a0684052 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -13,18 +13,25 @@
#include <linux/posix_types.h>
+#ifdef __SIZEOF_INT128__
+typedef __signed__ __int128 __s128 __attribute__((aligned(16)));
+typedef unsigned __int128 __u128 __attribute__((aligned(16)));
+#endif
/*
* Below are truly Linux-specific types that should never collide with
* any application/library that wants linux/types.h.
*/
+/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
-#define __bitwise__ __attribute__((bitwise))
+#define __bitwise __attribute__((bitwise))
#else
-#define __bitwise__
+#define __bitwise
#endif
-#define __bitwise __bitwise__
+
+/* The kernel doesn't use this legacy form, but user space does */
+#define __bitwise__ __bitwise
typedef __u16 __bitwise __le16;
typedef __u16 __bitwise __be16;
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
new file mode 100644
index 000000000000..c8dc5f8ea699
--- /dev/null
+++ b/include/uapi/linux/ublk_cmd.h
@@ -0,0 +1,400 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef USER_BLK_DRV_CMD_INC_H
+#define USER_BLK_DRV_CMD_INC_H
+
+#include <linux/types.h>
+
+/* ublk server command definition */
+
+/*
+ * Admin commands, issued by ublk server, and handled by ublk driver.
+ *
+ * Legacy command definition, don't use in new application, and don't
+ * add new such definition any more
+ */
+#define UBLK_CMD_GET_QUEUE_AFFINITY 0x01
+#define UBLK_CMD_GET_DEV_INFO 0x02
+#define UBLK_CMD_ADD_DEV 0x04
+#define UBLK_CMD_DEL_DEV 0x05
+#define UBLK_CMD_START_DEV 0x06
+#define UBLK_CMD_STOP_DEV 0x07
+#define UBLK_CMD_SET_PARAMS 0x08
+#define UBLK_CMD_GET_PARAMS 0x09
+#define UBLK_CMD_START_USER_RECOVERY 0x10
+#define UBLK_CMD_END_USER_RECOVERY 0x11
+#define UBLK_CMD_GET_DEV_INFO2 0x12
+
+/* Any new ctrl command should encode by __IO*() */
+#define UBLK_U_CMD_GET_QUEUE_AFFINITY \
+ _IOR('u', UBLK_CMD_GET_QUEUE_AFFINITY, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_GET_DEV_INFO \
+ _IOR('u', UBLK_CMD_GET_DEV_INFO, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_ADD_DEV \
+ _IOWR('u', UBLK_CMD_ADD_DEV, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_DEL_DEV \
+ _IOWR('u', UBLK_CMD_DEL_DEV, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_START_DEV \
+ _IOWR('u', UBLK_CMD_START_DEV, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_STOP_DEV \
+ _IOWR('u', UBLK_CMD_STOP_DEV, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_SET_PARAMS \
+ _IOWR('u', UBLK_CMD_SET_PARAMS, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_GET_PARAMS \
+ _IOR('u', UBLK_CMD_GET_PARAMS, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_START_USER_RECOVERY \
+ _IOWR('u', UBLK_CMD_START_USER_RECOVERY, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_END_USER_RECOVERY \
+ _IOWR('u', UBLK_CMD_END_USER_RECOVERY, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_GET_DEV_INFO2 \
+ _IOR('u', UBLK_CMD_GET_DEV_INFO2, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_GET_FEATURES \
+ _IOR('u', 0x13, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_DEL_DEV_ASYNC \
+ _IOR('u', 0x14, struct ublksrv_ctrl_cmd)
+
+/*
+ * 64bits are enough now, and it should be easy to extend in case of
+ * running out of feature flags
+ */
+#define UBLK_FEATURES_LEN 8
+
+/*
+ * IO commands, issued by ublk server, and handled by ublk driver.
+ *
+ * FETCH_REQ: issued via sqe(URING_CMD) beforehand for fetching IO request
+ * from ublk driver, should be issued only when starting device. After
+ * the associated cqe is returned, request's tag can be retrieved via
+ * cqe->userdata.
+ *
+ * COMMIT_AND_FETCH_REQ: issued via sqe(URING_CMD) after ublkserver handled
+ * this IO request, request's handling result is committed to ublk
+ * driver, meantime FETCH_REQ is piggyback, and FETCH_REQ has to be
+ * handled before completing io request.
+ *
+ * NEED_GET_DATA: only used for write requests to set io addr and copy data
+ * When NEED_GET_DATA is set, ublksrv has to issue UBLK_IO_NEED_GET_DATA
+ * command after ublk driver returns UBLK_IO_RES_NEED_GET_DATA.
+ *
+ * It is only used if ublksrv set UBLK_F_NEED_GET_DATA flag
+ * while starting a ublk device.
+ */
+
+/*
+ * Legacy IO command definition, don't use in new application, and don't
+ * add new such definition any more
+ */
+#define UBLK_IO_FETCH_REQ 0x20
+#define UBLK_IO_COMMIT_AND_FETCH_REQ 0x21
+#define UBLK_IO_NEED_GET_DATA 0x22
+
+/* Any new IO command should encode by __IOWR() */
+#define UBLK_U_IO_FETCH_REQ \
+ _IOWR('u', UBLK_IO_FETCH_REQ, struct ublksrv_io_cmd)
+#define UBLK_U_IO_COMMIT_AND_FETCH_REQ \
+ _IOWR('u', UBLK_IO_COMMIT_AND_FETCH_REQ, struct ublksrv_io_cmd)
+#define UBLK_U_IO_NEED_GET_DATA \
+ _IOWR('u', UBLK_IO_NEED_GET_DATA, struct ublksrv_io_cmd)
+
+/* only ABORT means that no re-fetch */
+#define UBLK_IO_RES_OK 0
+#define UBLK_IO_RES_NEED_GET_DATA 1
+#define UBLK_IO_RES_ABORT (-ENODEV)
+
+#define UBLKSRV_CMD_BUF_OFFSET 0
+#define UBLKSRV_IO_BUF_OFFSET 0x80000000
+
+/* tag bit is 16bit, so far limit at most 4096 IOs for each queue */
+#define UBLK_MAX_QUEUE_DEPTH 4096
+
+/* single IO buffer max size is 32MB */
+#define UBLK_IO_BUF_OFF 0
+#define UBLK_IO_BUF_BITS 25
+#define UBLK_IO_BUF_BITS_MASK ((1ULL << UBLK_IO_BUF_BITS) - 1)
+
+/* so at most 64K IOs for each queue */
+#define UBLK_TAG_OFF UBLK_IO_BUF_BITS
+#define UBLK_TAG_BITS 16
+#define UBLK_TAG_BITS_MASK ((1ULL << UBLK_TAG_BITS) - 1)
+
+/* max 4096 queues */
+#define UBLK_QID_OFF (UBLK_TAG_OFF + UBLK_TAG_BITS)
+#define UBLK_QID_BITS 12
+#define UBLK_QID_BITS_MASK ((1ULL << UBLK_QID_BITS) - 1)
+
+#define UBLK_MAX_NR_QUEUES (1U << UBLK_QID_BITS)
+
+#define UBLKSRV_IO_BUF_TOTAL_BITS (UBLK_QID_OFF + UBLK_QID_BITS)
+#define UBLKSRV_IO_BUF_TOTAL_SIZE (1ULL << UBLKSRV_IO_BUF_TOTAL_BITS)
+
+/*
+ * zero copy requires 4k block size, and can remap ublk driver's io
+ * request into ublksrv's vm space
+ */
+#define UBLK_F_SUPPORT_ZERO_COPY (1ULL << 0)
+
+/*
+ * Force to complete io cmd via io_uring_cmd_complete_in_task so that
+ * performance comparison is done easily with using task_work_add
+ */
+#define UBLK_F_URING_CMD_COMP_IN_TASK (1ULL << 1)
+
+/*
+ * User should issue io cmd again for write requests to
+ * set io buffer address and copy data from bio vectors
+ * to the userspace io buffer.
+ *
+ * In this mode, task_work is not used.
+ */
+#define UBLK_F_NEED_GET_DATA (1UL << 2)
+
+#define UBLK_F_USER_RECOVERY (1UL << 3)
+
+#define UBLK_F_USER_RECOVERY_REISSUE (1UL << 4)
+
+/*
+ * Unprivileged user can create /dev/ublkcN and /dev/ublkbN.
+ *
+ * /dev/ublk-control needs to be available for unprivileged user, and it
+ * can be done via udev rule to make all control commands available to
+ * unprivileged user. Except for the command of UBLK_CMD_ADD_DEV, all
+ * other commands are only allowed for the owner of the specified device.
+ *
+ * When userspace sends UBLK_CMD_ADD_DEV, the device pair's owner_uid and
+ * owner_gid are stored to ublksrv_ctrl_dev_info by kernel, so far only
+ * the current user's uid/gid is stored, that said owner of the created
+ * device is always the current user.
+ *
+ * We still need udev rule to apply OWNER/GROUP with the stored owner_uid
+ * and owner_gid.
+ *
+ * Then ublk server can be run as unprivileged user, and /dev/ublkbN can
+ * be accessed and managed by its owner represented by owner_uid/owner_gid.
+ */
+#define UBLK_F_UNPRIVILEGED_DEV (1UL << 5)
+
+/* use ioctl encoding for uring command */
+#define UBLK_F_CMD_IOCTL_ENCODE (1UL << 6)
+
+/* Copy between request and user buffer by pread()/pwrite() */
+#define UBLK_F_USER_COPY (1UL << 7)
+
+/*
+ * User space sets this flag when setting up the device to request zoned storage support. Kernel may
+ * deny the request by returning an error.
+ */
+#define UBLK_F_ZONED (1ULL << 8)
+
+/* device state */
+#define UBLK_S_DEV_DEAD 0
+#define UBLK_S_DEV_LIVE 1
+#define UBLK_S_DEV_QUIESCED 2
+
+/* shipped via sqe->cmd of io_uring command */
+struct ublksrv_ctrl_cmd {
+ /* sent to which device, must be valid */
+ __u32 dev_id;
+
+ /* sent to which queue, must be -1 if the cmd isn't for queue */
+ __u16 queue_id;
+ /*
+ * cmd specific buffer, can be IN or OUT.
+ */
+ __u16 len;
+ __u64 addr;
+
+ /* inline data */
+ __u64 data[1];
+
+ /*
+ * Used for UBLK_F_UNPRIVILEGED_DEV and UBLK_CMD_GET_DEV_INFO2
+ * only, include null char
+ */
+ __u16 dev_path_len;
+ __u16 pad;
+ __u32 reserved;
+};
+
+struct ublksrv_ctrl_dev_info {
+ __u16 nr_hw_queues;
+ __u16 queue_depth;
+ __u16 state;
+ __u16 pad0;
+
+ __u32 max_io_buf_bytes;
+ __u32 dev_id;
+
+ __s32 ublksrv_pid;
+ __u32 pad1;
+
+ __u64 flags;
+
+ /* For ublksrv internal use, invisible to ublk driver */
+ __u64 ublksrv_flags;
+
+ __u32 owner_uid; /* store by kernel */
+ __u32 owner_gid; /* store by kernel */
+ __u64 reserved1;
+ __u64 reserved2;
+};
+
+#define UBLK_IO_OP_READ 0
+#define UBLK_IO_OP_WRITE 1
+#define UBLK_IO_OP_FLUSH 2
+#define UBLK_IO_OP_DISCARD 3
+#define UBLK_IO_OP_WRITE_SAME 4
+#define UBLK_IO_OP_WRITE_ZEROES 5
+#define UBLK_IO_OP_ZONE_OPEN 10
+#define UBLK_IO_OP_ZONE_CLOSE 11
+#define UBLK_IO_OP_ZONE_FINISH 12
+#define UBLK_IO_OP_ZONE_APPEND 13
+#define UBLK_IO_OP_ZONE_RESET_ALL 14
+#define UBLK_IO_OP_ZONE_RESET 15
+/*
+ * Construct a zone report. The report request is carried in `struct
+ * ublksrv_io_desc`. The `start_sector` field must be the first sector of a zone
+ * and shall indicate the first zone of the report. The `nr_zones` shall
+ * indicate how many zones should be reported at most. The report shall be
+ * delivered as a `struct blk_zone` array. To report fewer zones than requested,
+ * zero the last entry of the returned array.
+ *
+ * Related definitions(blk_zone, blk_zone_cond, blk_zone_type, ...) in
+ * include/uapi/linux/blkzoned.h are part of ublk UAPI.
+ */
+#define UBLK_IO_OP_REPORT_ZONES 18
+
+#define UBLK_IO_F_FAILFAST_DEV (1U << 8)
+#define UBLK_IO_F_FAILFAST_TRANSPORT (1U << 9)
+#define UBLK_IO_F_FAILFAST_DRIVER (1U << 10)
+#define UBLK_IO_F_META (1U << 11)
+#define UBLK_IO_F_FUA (1U << 13)
+#define UBLK_IO_F_NOUNMAP (1U << 15)
+#define UBLK_IO_F_SWAP (1U << 16)
+
+/*
+ * io cmd is described by this structure, and stored in share memory, indexed
+ * by request tag.
+ *
+ * The data is stored by ublk driver, and read by ublksrv after one fetch command
+ * returns.
+ */
+struct ublksrv_io_desc {
+ /* op: bit 0-7, flags: bit 8-31 */
+ __u32 op_flags;
+
+ union {
+ __u32 nr_sectors;
+ __u32 nr_zones; /* for UBLK_IO_OP_REPORT_ZONES */
+ };
+
+ /* start sector for this io */
+ __u64 start_sector;
+
+ /* buffer address in ublksrv daemon vm space, from ublk driver */
+ __u64 addr;
+};
+
+static inline __u8 ublksrv_get_op(const struct ublksrv_io_desc *iod)
+{
+ return iod->op_flags & 0xff;
+}
+
+static inline __u32 ublksrv_get_flags(const struct ublksrv_io_desc *iod)
+{
+ return iod->op_flags >> 8;
+}
+
+/* issued to ublk driver via /dev/ublkcN */
+struct ublksrv_io_cmd {
+ __u16 q_id;
+
+ /* for fetch/commit which result */
+ __u16 tag;
+
+ /* io result, it is valid for COMMIT* command only */
+ __s32 result;
+
+ union {
+ /*
+ * userspace buffer address in ublksrv daemon process, valid for
+ * FETCH* command only
+ *
+ * `addr` should not be used when UBLK_F_USER_COPY is enabled,
+ * because userspace handles data copy by pread()/pwrite() over
+ * /dev/ublkcN. But in case of UBLK_F_ZONED, this union is
+ * re-used to pass back the allocated LBA for
+ * UBLK_IO_OP_ZONE_APPEND which actually depends on
+ * UBLK_F_USER_COPY
+ */
+ __u64 addr;
+ __u64 zone_append_lba;
+ };
+};
+
+struct ublk_param_basic {
+#define UBLK_ATTR_READ_ONLY (1 << 0)
+#define UBLK_ATTR_ROTATIONAL (1 << 1)
+#define UBLK_ATTR_VOLATILE_CACHE (1 << 2)
+#define UBLK_ATTR_FUA (1 << 3)
+ __u32 attrs;
+ __u8 logical_bs_shift;
+ __u8 physical_bs_shift;
+ __u8 io_opt_shift;
+ __u8 io_min_shift;
+
+ __u32 max_sectors;
+ __u32 chunk_sectors;
+
+ __u64 dev_sectors;
+ __u64 virt_boundary_mask;
+};
+
+struct ublk_param_discard {
+ __u32 discard_alignment;
+
+ __u32 discard_granularity;
+ __u32 max_discard_sectors;
+
+ __u32 max_write_zeroes_sectors;
+ __u16 max_discard_segments;
+ __u16 reserved0;
+};
+
+/*
+ * read-only, can't set via UBLK_CMD_SET_PARAMS, disk_devt is available
+ * after device is started
+ */
+struct ublk_param_devt {
+ __u32 char_major;
+ __u32 char_minor;
+ __u32 disk_major;
+ __u32 disk_minor;
+};
+
+struct ublk_param_zoned {
+ __u32 max_open_zones;
+ __u32 max_active_zones;
+ __u32 max_zone_append_sectors;
+ __u8 reserved[20];
+};
+
+struct ublk_params {
+ /*
+ * Total length of parameters, userspace has to set 'len' for both
+ * SET_PARAMS and GET_PARAMS command, and driver may update len
+ * if two sides use different version of 'ublk_params', same with
+ * 'types' fields.
+ */
+ __u32 len;
+#define UBLK_PARAM_TYPE_BASIC (1 << 0)
+#define UBLK_PARAM_TYPE_DISCARD (1 << 1)
+#define UBLK_PARAM_TYPE_DEVT (1 << 2)
+#define UBLK_PARAM_TYPE_ZONED (1 << 3)
+ __u32 types; /* types of parameter included */
+
+ struct ublk_param_basic basic;
+ struct ublk_param_discard discard;
+ struct ublk_param_devt devt;
+ struct ublk_param_zoned zoned;
+};
+
+#endif
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 76b7c3f6cd0d..c917c53070d5 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -341,7 +341,7 @@ struct uac_feature_unit_descriptor {
__u8 bUnitID;
__u8 bSourceID;
__u8 bControlSize;
- __u8 bmaControls[0]; /* variable length */
+ __u8 bmaControls[]; /* variable length */
} __attribute__((packed));
static inline __u8 uac_feature_unit_iFeature(struct uac_feature_unit_descriptor *desc)
diff --git a/include/uapi/linux/usb/cdc.h b/include/uapi/linux/usb/cdc.h
index 6d61550959ef..1924cf665448 100644
--- a/include/uapi/linux/usb/cdc.h
+++ b/include/uapi/linux/usb/cdc.h
@@ -171,7 +171,7 @@ struct usb_cdc_mdlm_detail_desc {
/* type is associated with mdlm_desc.bGUID */
__u8 bGuidDescriptorType;
- __u8 bDetailData[0];
+ __u8 bDetailData[];
} __attribute__ ((packed));
/* "OBEX Control Model Functional Descriptor" */
@@ -271,6 +271,10 @@ struct usb_cdc_line_coding {
__u8 bDataBits;
} __attribute__ ((packed));
+/* Control Signal Bitmap Values from 6.2.14 SetControlLineState */
+#define USB_CDC_CTRL_DTR (1 << 0)
+#define USB_CDC_CTRL_RTS (1 << 1)
+
/* table 62; bits in multicast filter */
#define USB_CDC_PACKET_TYPE_PROMISCUOUS (1 << 0)
#define USB_CDC_PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */
@@ -302,6 +306,15 @@ struct usb_cdc_notification {
__le16 wLength;
} __attribute__ ((packed));
+/* UART State Bitmap Values from 6.3.5 SerialState */
+#define USB_CDC_SERIAL_STATE_DCD (1 << 0)
+#define USB_CDC_SERIAL_STATE_DSR (1 << 1)
+#define USB_CDC_SERIAL_STATE_BREAK (1 << 2)
+#define USB_CDC_SERIAL_STATE_RING_SIGNAL (1 << 3)
+#define USB_CDC_SERIAL_STATE_FRAMING (1 << 4)
+#define USB_CDC_SERIAL_STATE_PARITY (1 << 5)
+#define USB_CDC_SERIAL_STATE_OVERRUN (1 << 6)
+
struct usb_cdc_speed_change {
__le32 DLBitRRate; /* contains the downlink bit rate (IN pipe) */
__le32 ULBitRate; /* contains the uplink bit rate (OUT pipe) */
@@ -379,7 +392,7 @@ struct usb_cdc_ncm_ndp16 {
__le32 dwSignature;
__le16 wLength;
__le16 wNextNdpIndex;
- struct usb_cdc_ncm_dpe16 dpe16[0];
+ struct usb_cdc_ncm_dpe16 dpe16[];
} __attribute__ ((packed));
/* 32-bit NCM Datagram Pointer Entry */
@@ -395,7 +408,7 @@ struct usb_cdc_ncm_ndp32 {
__le16 wReserved6;
__le32 dwNextNdpIndex;
__le32 dwReserved12;
- struct usb_cdc_ncm_dpe32 dpe32[0];
+ struct usb_cdc_ncm_dpe32 dpe32[];
} __attribute__ ((packed));
/* CDC NCM subclass 3.2.1 and 3.2.2 */
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index fb0cd24c392c..ce4c83f2e66a 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -15,10 +15,8 @@
/* This is arbitrary.
* From USB 2.0 spec Table 11-13, offset 7, a hub can
* have up to 255 ports. The most yet reported is 10.
- *
- * Current Wireless USB host hardware (Intel i1480 for example) allows
- * up to 22 devices to connect. Upcoming hardware might raise that
- * limit. Because the arrays need to add a bit for hub status data, we
+ * Upcoming hardware might raise that limit.
+ * Because the arrays need to add a bit for hub status data, we
* use 31, so plus one evens out to four bytes.
*/
#define USB_MAXCHILDREN 31
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 0f865ae4ba89..44d73ba8788d 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -3,7 +3,7 @@
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
- * Wireless USB 1.0 (spread around). Linux has several APIs in C that
+ * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that
* need these:
*
* - the master/host side Linux-USB kernel driver API;
@@ -14,9 +14,6 @@
* act either as a USB master/host or as a USB slave/device. That means
* the master and slave side APIs benefit from working well together.
*
- * There's also "Wireless USB", using low power short range radios for
- * peripheral interconnection but otherwise building on the USB framework.
- *
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
@@ -376,7 +373,10 @@ struct usb_string_descriptor {
__u8 bLength;
__u8 bDescriptorType;
- __le16 wData[1]; /* UTF-16LE encoded */
+ union {
+ __le16 legacy_padding;
+ __DECLARE_FLEX_ARRAY(__le16, wData); /* UTF-16LE encoded */
+ };
} __attribute__ ((packed));
/* note that "string" zero is special, it holds language codes that
@@ -763,6 +763,8 @@ struct usb_otg20_descriptor {
#define USB_OTG_SRP (1 << 0)
#define USB_OTG_HNP (1 << 1) /* swap host/device roles */
#define USB_OTG_ADP (1 << 2) /* support ADP */
+/* OTG 3.0 */
+#define USB_OTG_RSP (1 << 3) /* support RSP */
#define OTG_STS_SELECTOR 0xF000 /* OTG status selector */
/*-------------------------------------------------------------------------*/
@@ -818,7 +820,7 @@ struct usb_key_descriptor {
__u8 tTKID[3];
__u8 bReserved;
- __u8 bKeyData[0];
+ __u8 bKeyData[];
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
@@ -948,6 +950,22 @@ struct usb_ss_container_id_descriptor {
#define USB_DT_USB_SS_CONTN_ID_SIZE 20
/*
+ * Platform Device Capability descriptor: Defines platform specific device
+ * capabilities
+ */
+#define USB_PLAT_DEV_CAP_TYPE 5
+struct usb_plat_dev_cap_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDevCapabilityType;
+ __u8 bReserved;
+ __u8 UUID[16];
+ __u8 CapabilityData[];
+} __attribute__((packed));
+
+#define USB_DT_USB_PLAT_DEV_CAP_SIZE(capability_data_size) (20 + capability_data_size)
+
+/*
* SuperSpeed Plus USB Capability descriptor: Defines the set of
* SuperSpeed Plus USB specific device level capabilities
*/
@@ -965,12 +983,29 @@ struct usb_ssp_cap_descriptor {
#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8)
#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12)
__le16 wReserved;
- __le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */
+ union {
+ __le32 legacy_padding;
+ /* list of sublink speed attrib entries */
+ __DECLARE_FLEX_ARRAY(__le32, bmSublinkSpeedAttr);
+ };
#define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */
#define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */
+#define USB_SSP_SUBLINK_SPEED_LSE_BPS 0
+#define USB_SSP_SUBLINK_SPEED_LSE_KBPS 1
+#define USB_SSP_SUBLINK_SPEED_LSE_MBPS 2
+#define USB_SSP_SUBLINK_SPEED_LSE_GBPS 3
+
#define USB_SSP_SUBLINK_SPEED_ST (0x3 << 6) /* Sublink type */
+#define USB_SSP_SUBLINK_SPEED_ST_SYM_RX 0
+#define USB_SSP_SUBLINK_SPEED_ST_ASYM_RX 1
+#define USB_SSP_SUBLINK_SPEED_ST_SYM_TX 2
+#define USB_SSP_SUBLINK_SPEED_ST_ASYM_TX 3
+
#define USB_SSP_SUBLINK_SPEED_RSVD (0x3f << 8) /* Reserved */
#define USB_SSP_SUBLINK_SPEED_LP (0x3 << 14) /* Link protocol */
+#define USB_SSP_SUBLINK_SPEED_LP_SS 0
+#define USB_SSP_SUBLINK_SPEED_LP_SSP 1
+
#define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */
} __attribute__((packed));
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index d77ee6b65328..9f88de9c3d66 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -73,8 +73,10 @@ struct usb_os_desc_header {
struct usb_ext_compat_desc {
__u8 bFirstInterfaceNumber;
__u8 Reserved1;
- __u8 CompatibleID[8];
- __u8 SubCompatibleID[8];
+ __struct_group(/* no tag */, IDs, /* no attrs */,
+ __u8 CompatibleID[8];
+ __u8 SubCompatibleID[8];
+ );
__u8 Reserved2[6];
};
@@ -84,6 +86,22 @@ struct usb_ext_prop_desc {
__le16 wPropertyNameLength;
} __attribute__((packed));
+/* Flags for usb_ffs_dmabuf_transfer_req->flags (none for now) */
+#define USB_FFS_DMABUF_TRANSFER_MASK 0x0
+
+/**
+ * struct usb_ffs_dmabuf_transfer_req - Transfer request for a DMABUF object
+ * @fd: file descriptor of the DMABUF object
+ * @flags: one or more USB_FFS_DMABUF_TRANSFER_* flags
+ * @length: number of bytes used in this DMABUF for the data transfer.
+ * Should generally be set to the DMABUF's size.
+ */
+struct usb_ffs_dmabuf_transfer_req {
+ int fd;
+ __u32 flags;
+ __u64 length;
+} __attribute__((packed));
+
#ifndef __KERNEL__
/*
@@ -288,6 +306,31 @@ struct usb_functionfs_event {
#define FUNCTIONFS_ENDPOINT_DESC _IOR('g', 130, \
struct usb_endpoint_descriptor)
+/*
+ * Attach the DMABUF object, identified by its file descriptor, to the
+ * data endpoint. Returns zero on success, and a negative errno value
+ * on error.
+ */
+#define FUNCTIONFS_DMABUF_ATTACH _IOW('g', 131, int)
+
+
+/*
+ * Detach the given DMABUF object, identified by its file descriptor,
+ * from the data endpoint. Returns zero on success, and a negative
+ * errno value on error. Note that closing the endpoint's file
+ * descriptor will automatically detach all attached DMABUFs.
+ */
+#define FUNCTIONFS_DMABUF_DETACH _IOW('g', 132, int)
+/*
+ * Enqueue the previously attached DMABUF to the transfer queue.
+ * The argument is a structure that packs the DMABUF's file descriptor,
+ * the size in bytes to transfer (which should generally correspond to
+ * the size of the DMABUF), and a 'flags' field which is unused
+ * for now. Returns zero on success, and a negative errno value on
+ * error.
+ */
+#define FUNCTIONFS_DMABUF_TRANSFER _IOW('g', 133, \
+ struct usb_ffs_dmabuf_transfer_req)
#endif /* _UAPI__LINUX_FUNCTIONFS_H__ */
diff --git a/include/uapi/linux/usb/g_uvc.h b/include/uapi/linux/usb/g_uvc.h
index 652f169a019e..8d7824dde1b2 100644
--- a/include/uapi/linux/usb/g_uvc.h
+++ b/include/uapi/linux/usb/g_uvc.h
@@ -21,6 +21,9 @@
#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5)
#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5)
+#define UVC_STRING_CONTROL_IDX 0
+#define UVC_STRING_STREAMING_IDX 1
+
struct uvc_request_data {
__s32 length;
__u8 data[60];
diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h
index 0be685272eb1..f0224a8dc858 100644
--- a/include/uapi/linux/usb/raw_gadget.h
+++ b/include/uapi/linux/usb/raw_gadget.h
@@ -44,6 +44,16 @@ enum usb_raw_event_type {
/* This event is queued when a new control request arrived to ep0. */
USB_RAW_EVENT_CONTROL = 2,
+ /*
+ * These events are queued when the gadget driver is suspended,
+ * resumed, reset, or disconnected. Note that some UDCs (e.g. dwc2)
+ * report a disconnect event instead of a reset.
+ */
+ USB_RAW_EVENT_SUSPEND = 3,
+ USB_RAW_EVENT_RESUME = 4,
+ USB_RAW_EVENT_RESET = 5,
+ USB_RAW_EVENT_DISCONNECT = 6,
+
/* The list might grow in the future. */
};
@@ -54,13 +64,13 @@ enum usb_raw_event_type {
* actual length of the fetched event data.
* @data: A buffer to store the fetched event data.
*
- * Currently the fetched data buffer is empty for USB_RAW_EVENT_CONNECT,
- * and contains struct usb_ctrlrequest for USB_RAW_EVENT_CONTROL.
+ * The fetched event data buffer contains struct usb_ctrlrequest for
+ * USB_RAW_EVENT_CONTROL and is empty for other events.
*/
struct usb_raw_event {
__u32 type;
__u32 length;
- __u8 data[0];
+ __u8 data[];
};
#define USB_RAW_IO_FLAGS_ZERO 0x0001
@@ -90,7 +100,7 @@ struct usb_raw_ep_io {
__u16 ep;
__u16 flags;
__u32 length;
- __u8 data[0];
+ __u8 data[];
};
/* Maximum number of non-control endpoints in struct usb_raw_eps_info. */
diff --git a/include/uapi/linux/usb/tmc.h b/include/uapi/linux/usb/tmc.h
index fdd4d88a7b95..d791cc58a7f0 100644
--- a/include/uapi/linux/usb/tmc.h
+++ b/include/uapi/linux/usb/tmc.h
@@ -102,6 +102,9 @@ struct usbtmc_message {
#define USBTMC_IOCTL_MSG_IN_ATTR _IOR(USBTMC_IOC_NR, 24, __u8)
#define USBTMC_IOCTL_AUTO_ABORT _IOW(USBTMC_IOC_NR, 25, __u8)
+#define USBTMC_IOCTL_GET_STB _IOR(USBTMC_IOC_NR, 26, __u8)
+#define USBTMC_IOCTL_GET_SRQ_STB _IOR(USBTMC_IOC_NR, 27, __u8)
+
/* Cancel and cleanup asynchronous calls */
#define USBTMC_IOCTL_CANCEL_IO _IO(USBTMC_IOC_NR, 35)
#define USBTMC_IOCTL_CLEANUP_IO _IO(USBTMC_IOC_NR, 36)
diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
index d854cb19c42c..2ff0e8a3a683 100644
--- a/include/uapi/linux/usb/video.h
+++ b/include/uapi/linux/usb/video.h
@@ -179,6 +179,36 @@
#define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3)
#define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4)
+/* 3.9.2.6 Color Matching Descriptor Values */
+enum uvc_color_primaries_values {
+ UVC_COLOR_PRIMARIES_UNSPECIFIED,
+ UVC_COLOR_PRIMARIES_BT_709_SRGB,
+ UVC_COLOR_PRIMARIES_BT_470_2_M,
+ UVC_COLOR_PRIMARIES_BT_470_2_B_G,
+ UVC_COLOR_PRIMARIES_SMPTE_170M,
+ UVC_COLOR_PRIMARIES_SMPTE_240M,
+};
+
+enum uvc_transfer_characteristics_values {
+ UVC_TRANSFER_CHARACTERISTICS_UNSPECIFIED,
+ UVC_TRANSFER_CHARACTERISTICS_BT_709,
+ UVC_TRANSFER_CHARACTERISTICS_BT_470_2_M,
+ UVC_TRANSFER_CHARACTERISTICS_BT_470_2_B_G,
+ UVC_TRANSFER_CHARACTERISTICS_SMPTE_170M,
+ UVC_TRANSFER_CHARACTERISTICS_SMPTE_240M,
+ UVC_TRANSFER_CHARACTERISTICS_LINEAR,
+ UVC_TRANSFER_CHARACTERISTICS_SRGB,
+};
+
+enum uvc_matrix_coefficients {
+ UVC_MATRIX_COEFFICIENTS_UNSPECIFIED,
+ UVC_MATRIX_COEFFICIENTS_BT_709,
+ UVC_MATRIX_COEFFICIENTS_FCC,
+ UVC_MATRIX_COEFFICIENTS_BT_470_2_B_G,
+ UVC_MATRIX_COEFFICIENTS_SMPTE_170M,
+ UVC_MATRIX_COEFFICIENTS_SMPTE_240M,
+};
+
/* ------------------------------------------------------------------------
* UVC structures
*/
@@ -302,9 +332,10 @@ struct uvc_processing_unit_descriptor {
__u8 bControlSize;
__u8 bmControls[2];
__u8 iProcessing;
+ __u8 bmVideoStandards;
} __attribute__((__packed__));
-#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n))
+#define UVC_DT_PROCESSING_UNIT_SIZE(n) (10+(n))
/* 3.7.2.6. Extension Unit Descriptor */
struct uvc_extension_unit_descriptor {
@@ -465,7 +496,7 @@ struct uvc_format_uncompressed {
__u8 bDefaultFrameIndex;
__u8 bAspectRatioX;
__u8 bAspectRatioY;
- __u8 bmInterfaceFlags;
+ __u8 bmInterlaceFlags;
__u8 bCopyProtect;
} __attribute__((__packed__));
@@ -521,7 +552,7 @@ struct uvc_format_mjpeg {
__u8 bDefaultFrameIndex;
__u8 bAspectRatioX;
__u8 bAspectRatioY;
- __u8 bmInterfaceFlags;
+ __u8 bmInterlaceFlags;
__u8 bCopyProtect;
} __attribute__((__packed__));
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
index cf525cddeb94..74a84e02422a 100644
--- a/include/uapi/linux/usbdevice_fs.h
+++ b/include/uapi/linux/usbdevice_fs.h
@@ -131,7 +131,7 @@ struct usbdevfs_urb {
unsigned int signr; /* signal to be sent on completion,
or 0 if none should be sent. */
void __user *usercontext;
- struct usbdevfs_iso_packet_desc iso_frame_desc[0];
+ struct usbdevfs_iso_packet_desc iso_frame_desc[];
};
/* ioctls for talking directly to drivers */
@@ -176,7 +176,7 @@ struct usbdevfs_disconnect_claim {
struct usbdevfs_streams {
unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */
unsigned int num_eps;
- unsigned char eps[0];
+ unsigned char eps[];
};
/*
diff --git a/include/uapi/linux/usbip.h b/include/uapi/linux/usbip.h
index fd393d908d8a..e4421ad55b2e 100644
--- a/include/uapi/linux/usbip.h
+++ b/include/uapi/linux/usbip.h
@@ -24,4 +24,30 @@ enum usbip_device_status {
VDEV_ST_USED,
VDEV_ST_ERROR
};
+
+/* USB URB Transfer flags:
+ *
+ * USBIP server and client (vchi) pack URBs in TCP packets. The following
+ * are the transfer type defines used in USBIP protocol.
+ */
+
+#define USBIP_URB_SHORT_NOT_OK 0x0001
+#define USBIP_URB_ISO_ASAP 0x0002
+#define USBIP_URB_NO_TRANSFER_DMA_MAP 0x0004
+#define USBIP_URB_ZERO_PACKET 0x0040
+#define USBIP_URB_NO_INTERRUPT 0x0080
+#define USBIP_URB_FREE_BUFFER 0x0100
+#define USBIP_URB_DIR_IN 0x0200
+#define USBIP_URB_DIR_OUT 0
+#define USBIP_URB_DIR_MASK USBIP_URB_DIR_IN
+
+#define USBIP_URB_DMA_MAP_SINGLE 0x00010000
+#define USBIP_URB_DMA_MAP_PAGE 0x00020000
+#define USBIP_URB_DMA_MAP_SG 0x00040000
+#define USBIP_URB_MAP_LOCAL 0x00080000
+#define USBIP_URB_SETUP_MAP_SINGLE 0x00100000
+#define USBIP_URB_SETUP_MAP_LOCAL 0x00200000
+#define USBIP_URB_DMA_SG_COMBINED 0x00400000
+#define USBIP_URB_ALIGNED_TEMP_BUFFER 0x00800000
+
#endif /* _UAPI_LINUX_USBIP_H */
diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h
new file mode 100644
index 000000000000..a03de03dccbc
--- /dev/null
+++ b/include/uapi/linux/user_events.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2021-2022, Microsoft Corporation.
+ *
+ * Authors:
+ * Beau Belgrave <beaub@linux.microsoft.com>
+ */
+#ifndef _UAPI_LINUX_USER_EVENTS_H
+#define _UAPI_LINUX_USER_EVENTS_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define USER_EVENTS_SYSTEM "user_events"
+#define USER_EVENTS_MULTI_SYSTEM "user_events_multi"
+#define USER_EVENTS_PREFIX "u:"
+
+/* Create dynamic location entry within a 32-bit value */
+#define DYN_LOC(offset, size) ((size) << 16 | (offset))
+
+/* List of supported registration flags */
+enum user_reg_flag {
+ /* Event will not delete upon last reference closing */
+ USER_EVENT_REG_PERSIST = 1U << 0,
+
+ /* Event will be allowed to have multiple formats */
+ USER_EVENT_REG_MULTI_FORMAT = 1U << 1,
+
+ /* This value or above is currently non-ABI */
+ USER_EVENT_REG_MAX = 1U << 2,
+};
+
+/*
+ * Describes an event registration and stores the results of the registration.
+ * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum
+ * must set the size and name_args before invocation.
+ */
+struct user_reg {
+
+ /* Input: Size of the user_reg structure being used */
+ __u32 size;
+
+ /* Input: Bit in enable address to use */
+ __u8 enable_bit;
+
+ /* Input: Enable size in bytes at address */
+ __u8 enable_size;
+
+ /* Input: Flags to use, if any */
+ __u16 flags;
+
+ /* Input: Address to update when enabled */
+ __u64 enable_addr;
+
+ /* Input: Pointer to string with event name, description and flags */
+ __u64 name_args;
+
+ /* Output: Index of the event to use when writing data */
+ __u32 write_index;
+} __attribute__((__packed__));
+
+/*
+ * Describes an event unregister, callers must set the size, address and bit.
+ * This structure is passed to the DIAG_IOCSUNREG ioctl to disable bit updates.
+ */
+struct user_unreg {
+ /* Input: Size of the user_unreg structure being used */
+ __u32 size;
+
+ /* Input: Bit to unregister */
+ __u8 disable_bit;
+
+ /* Input: Reserved, set to 0 */
+ __u8 __reserved;
+
+ /* Input: Reserved, set to 0 */
+ __u16 __reserved2;
+
+ /* Input: Address to unregister */
+ __u64 disable_addr;
+} __attribute__((__packed__));
+
+#define DIAG_IOC_MAGIC '*'
+
+/* Request to register a user_event */
+#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg *)
+
+/* Request to delete a user_event */
+#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char *)
+
+/* Requests to unregister a user_event */
+#define DIAG_IOCSUNREG _IOW(DIAG_IOC_MAGIC, 2, struct user_unreg*)
+
+#endif /* _UAPI_LINUX_USER_EVENTS_H */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index e7e98bde221f..2841e4ea8f2c 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -12,6 +12,10 @@
#include <linux/types.h>
+/* ioctls for /dev/userfaultfd */
+#define USERFAULTFD_IOC 0xAA
+#define USERFAULTFD_IOC_NEW _IO(USERFAULTFD_IOC, 0x00)
+
/*
* If the UFFDIO_API is upgraded someday, the UFFDIO_UNREGISTER and
* UFFDIO_WAKE ioctls should be defined as _IOW and not as _IOR. In
@@ -19,15 +23,26 @@
* means the userland is reading).
*/
#define UFFD_API ((__u64)0xAA)
+#define UFFD_API_REGISTER_MODES (UFFDIO_REGISTER_MODE_MISSING | \
+ UFFDIO_REGISTER_MODE_WP | \
+ UFFDIO_REGISTER_MODE_MINOR)
#define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \
UFFD_FEATURE_EVENT_FORK | \
UFFD_FEATURE_EVENT_REMAP | \
- UFFD_FEATURE_EVENT_REMOVE | \
+ UFFD_FEATURE_EVENT_REMOVE | \
UFFD_FEATURE_EVENT_UNMAP | \
UFFD_FEATURE_MISSING_HUGETLBFS | \
UFFD_FEATURE_MISSING_SHMEM | \
UFFD_FEATURE_SIGBUS | \
- UFFD_FEATURE_THREAD_ID)
+ UFFD_FEATURE_THREAD_ID | \
+ UFFD_FEATURE_MINOR_HUGETLBFS | \
+ UFFD_FEATURE_MINOR_SHMEM | \
+ UFFD_FEATURE_EXACT_ADDRESS | \
+ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
+ UFFD_FEATURE_WP_UNPOPULATED | \
+ UFFD_FEATURE_POISON | \
+ UFFD_FEATURE_WP_ASYNC | \
+ UFFD_FEATURE_MOVE)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -36,10 +51,16 @@
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE | \
- (__u64)1 << _UFFDIO_WRITEPROTECT)
+ (__u64)1 << _UFFDIO_MOVE | \
+ (__u64)1 << _UFFDIO_WRITEPROTECT | \
+ (__u64)1 << _UFFDIO_CONTINUE | \
+ (__u64)1 << _UFFDIO_POISON)
#define UFFD_API_RANGE_IOCTLS_BASIC \
((__u64)1 << _UFFDIO_WAKE | \
- (__u64)1 << _UFFDIO_COPY)
+ (__u64)1 << _UFFDIO_COPY | \
+ (__u64)1 << _UFFDIO_WRITEPROTECT | \
+ (__u64)1 << _UFFDIO_CONTINUE | \
+ (__u64)1 << _UFFDIO_POISON)
/*
* Valid ioctl command number range with this API is from 0x00 to
@@ -54,7 +75,10 @@
#define _UFFDIO_WAKE (0x02)
#define _UFFDIO_COPY (0x03)
#define _UFFDIO_ZEROPAGE (0x04)
+#define _UFFDIO_MOVE (0x05)
#define _UFFDIO_WRITEPROTECT (0x06)
+#define _UFFDIO_CONTINUE (0x07)
+#define _UFFDIO_POISON (0x08)
#define _UFFDIO_API (0x3F)
/* userfaultfd ioctl ids */
@@ -71,8 +95,14 @@
struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
struct uffdio_zeropage)
+#define UFFDIO_MOVE _IOWR(UFFDIO, _UFFDIO_MOVE, \
+ struct uffdio_move)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect)
+#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
+ struct uffdio_continue)
+#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \
+ struct uffdio_poison)
/* read() structure */
struct uffd_msg {
@@ -127,6 +157,7 @@ struct uffd_msg {
/* flags for UFFD_EVENT_PAGEFAULT */
#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
#define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */
+#define UFFD_PAGEFAULT_FLAG_MINOR (1<<2) /* If reason is VM_UFFD_MINOR */
struct uffdio_api {
/* userland asks for an API number and the features to enable */
@@ -171,6 +202,34 @@ struct uffdio_api {
*
* UFFD_FEATURE_THREAD_ID pid of the page faulted task_struct will
* be returned, if feature is not requested 0 will be returned.
+ *
+ * UFFD_FEATURE_MINOR_HUGETLBFS indicates that minor faults
+ * can be intercepted (via REGISTER_MODE_MINOR) for
+ * hugetlbfs-backed pages.
+ *
+ * UFFD_FEATURE_MINOR_SHMEM indicates the same support as
+ * UFFD_FEATURE_MINOR_HUGETLBFS, but for shmem-backed pages instead.
+ *
+ * UFFD_FEATURE_EXACT_ADDRESS indicates that the exact address of page
+ * faults would be provided and the offset within the page would not be
+ * masked.
+ *
+ * UFFD_FEATURE_WP_HUGETLBFS_SHMEM indicates that userfaultfd
+ * write-protection mode is supported on both shmem and hugetlbfs.
+ *
+ * UFFD_FEATURE_WP_UNPOPULATED indicates that userfaultfd
+ * write-protection mode will always apply to unpopulated pages
+ * (i.e. empty ptes). This will be the default behavior for shmem
+ * & hugetlbfs, so this flag only affects anonymous memory behavior
+ * when userfault write-protection mode is registered.
+ *
+ * UFFD_FEATURE_WP_ASYNC indicates that userfaultfd write-protection
+ * asynchronous mode is supported in which the write fault is
+ * automatically resolved and write-protection is un-set.
+ * It implies UFFD_FEATURE_WP_UNPOPULATED.
+ *
+ * UFFD_FEATURE_MOVE indicates that the kernel supports moving an
+ * existing page contents from userspace.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -181,6 +240,14 @@ struct uffdio_api {
#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
#define UFFD_FEATURE_SIGBUS (1<<7)
#define UFFD_FEATURE_THREAD_ID (1<<8)
+#define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9)
+#define UFFD_FEATURE_MINOR_SHMEM (1<<10)
+#define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
+#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
+#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
+#define UFFD_FEATURE_POISON (1<<14)
+#define UFFD_FEATURE_WP_ASYNC (1<<15)
+#define UFFD_FEATURE_MOVE (1<<16)
__u64 features;
__u64 ioctls;
@@ -195,6 +262,7 @@ struct uffdio_register {
struct uffdio_range range;
#define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0)
#define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1)
+#define UFFDIO_REGISTER_MODE_MINOR ((__u64)1<<2)
__u64 mode;
/*
@@ -257,4 +325,62 @@ struct uffdio_writeprotect {
__u64 mode;
};
+struct uffdio_continue {
+ struct uffdio_range range;
+#define UFFDIO_CONTINUE_MODE_DONTWAKE ((__u64)1<<0)
+ /*
+ * UFFDIO_CONTINUE_MODE_WP will map the page write protected on
+ * the fly. UFFDIO_CONTINUE_MODE_WP is available only if the
+ * write protected ioctl is implemented for the range
+ * according to the uffdio_register.ioctls.
+ */
+#define UFFDIO_CONTINUE_MODE_WP ((__u64)1<<1)
+ __u64 mode;
+
+ /*
+ * Fields below here are written by the ioctl and must be at the end:
+ * the copy_from_user will not read past here.
+ */
+ __s64 mapped;
+};
+
+struct uffdio_poison {
+ struct uffdio_range range;
+#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0)
+ __u64 mode;
+
+ /*
+ * Fields below here are written by the ioctl and must be at the end:
+ * the copy_from_user will not read past here.
+ */
+ __s64 updated;
+};
+
+struct uffdio_move {
+ __u64 dst;
+ __u64 src;
+ __u64 len;
+ /*
+ * Especially if used to atomically remove memory from the
+ * address space the wake on the dst range is not needed.
+ */
+#define UFFDIO_MOVE_MODE_DONTWAKE ((__u64)1<<0)
+#define UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES ((__u64)1<<1)
+ __u64 mode;
+ /*
+ * "move" is written by the ioctl and must be at the end: the
+ * copy_from_user will not read the last 8 bytes.
+ */
+ __s64 move;
+};
+
+/*
+ * Flags for the userfaultfd(2) system call itself.
+ */
+
+/*
+ * Create a userfaultfd that can handle page faults only in user mode.
+ */
+#define UFFD_USER_MODE_ONLY 1
+
#endif /* _LINUX_USERFAULTFD_H */
diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h
index e5a7eecef7c3..8443738f4bb2 100644
--- a/include/uapi/linux/uuid.h
+++ b/include/uapi/linux/uuid.h
@@ -1,42 +1 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * UUID/GUID definition
- *
- * Copyright (C) 2010, Intel Corp.
- * Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _UAPI_LINUX_UUID_H_
-#define _UAPI_LINUX_UUID_H_
-
-#include <linux/types.h>
-
-typedef struct {
- __u8 b[16];
-} guid_t;
-
-#define GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
-((guid_t) \
-{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
- (b) & 0xff, ((b) >> 8) & 0xff, \
- (c) & 0xff, ((c) >> 8) & 0xff, \
- (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
-
-/* backwards compatibility, don't use in new code */
-typedef guid_t uuid_le;
-#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
- GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
-#define NULL_UUID_LE \
- UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00)
-
-#endif /* _UAPI_LINUX_UUID_H_ */
+#include <linux/mei_uuid.h>
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index f80f05b3c423..f86185456dc5 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -36,9 +36,11 @@
UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | \
UVC_CTRL_FLAG_GET_DEF)
+#define UVC_MENU_NAME_LEN 32
+
struct uvc_menu_info {
__u32 value;
- __u8 name[32];
+ __u8 name[UVC_MENU_NAME_LEN];
};
struct uvc_xu_control_mapping {
@@ -76,17 +78,17 @@ struct uvc_xu_control_query {
/**
* struct uvc_meta_buf - metadata buffer building block
- * @ns - system timestamp of the payload in nanoseconds
- * @sof - USB Frame Number
- * @length - length of the payload header
- * @flags - payload header flags
- * @buf - optional device-specific header data
+ * @ns: system timestamp of the payload in nanoseconds
+ * @sof: USB Frame Number
+ * @length: length of the payload header
+ * @flags: payload header flags
+ * @buf: optional device-specific header data
*
* UVC metadata nodes fill buffers with possibly multiple instances of this
* struct. The first two fields are added by the driver, they can be used for
* clock synchronisation. The rest is an exact copy of a UVC payload header.
* Only complete objects with complete buffers are included. Therefore it's
- * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large.
+ * always sizeof(meta->ns) + sizeof(meta->sof) + meta->length bytes large.
*/
struct uvc_meta_buf {
__u64 ns;
diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h
index 7d21c1634b4d..5a23a15d78ac 100644
--- a/include/uapi/linux/v4l2-common.h
+++ b/include/uapi/linux/v4l2-common.h
@@ -10,45 +10,6 @@
*
* Copyright (C) 2012 Nokia Corporation
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Alternatively you can redistribute this file under the terms of the
- * BSD license as stated below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. The names of its contributors may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __V4L2_COMMON__
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 62271418c1be..99c3f5e99da7 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -4,44 +4,6 @@
*
* Copyright (C) 1999-2012 the contributors
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Alternatively you can redistribute this file under the terms of the
- * BSD license as stated below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. The names of its contributors may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
* The contents of this header was split off from videodev2.h. All control
* definitions should be added to this header, which is included by
* videodev2.h.
@@ -50,11 +12,12 @@
#ifndef __LINUX_V4L2_CONTROLS_H
#define __LINUX_V4L2_CONTROLS_H
+#include <linux/const.h>
#include <linux/types.h>
/* Control classes */
#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
-#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
+#define V4L2_CTRL_CLASS_CODEC 0x00990000 /* Stateful codec controls */
#define V4L2_CTRL_CLASS_CAMERA 0x009a0000 /* Camera class controls */
#define V4L2_CTRL_CLASS_FM_TX 0x009b0000 /* FM Modulator controls */
#define V4L2_CTRL_CLASS_FLASH 0x009c0000 /* Camera flash controls */
@@ -65,6 +28,8 @@
#define V4L2_CTRL_CLASS_FM_RX 0x00a10000 /* FM Receiver controls */
#define V4L2_CTRL_CLASS_RF_TUNER 0x00a20000 /* RF tuner controls */
#define V4L2_CTRL_CLASS_DETECT 0x00a30000 /* Detection controls */
+#define V4L2_CTRL_CLASS_CODEC_STATELESS 0x00a40000 /* Stateless codecs controls */
+#define V4L2_CTRL_CLASS_COLORIMETRY 0x00a50000 /* Colorimetry controls */
/* User-class control IDs */
@@ -125,6 +90,7 @@ enum v4l2_colorfx {
V4L2_COLORFX_SOLARIZATION = 13,
V4L2_COLORFX_ANTIQUE = 14,
V4L2_COLORFX_SET_CBCR = 15,
+ V4L2_COLORFX_SET_RGB = 16,
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
@@ -142,15 +108,20 @@ enum v4l2_colorfx {
#define V4L2_CID_ALPHA_COMPONENT (V4L2_CID_BASE+41)
#define V4L2_CID_COLORFX_CBCR (V4L2_CID_BASE+42)
+#define V4L2_CID_COLORFX_RGB (V4L2_CID_BASE+43)
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+43)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+44)
/* USER-class private control IDs */
-/* The base for the meye driver controls. See linux/meye.h for the list
- * of controls. We reserve 16 controls for this driver. */
+#ifndef __KERNEL__
+/*
+ * The base for the meye driver controls. This driver was removed, but
+ * we keep this define in case any software still uses it.
+ */
#define V4L2_CID_USER_MEYE_BASE (V4L2_CID_USER_BASE + 0x1000)
+#endif
/* The base for the bttv driver controls.
* We reserve 32 controls for this driver. */
@@ -198,15 +169,61 @@ enum v4l2_colorfx {
*/
#define V4L2_CID_USER_ATMEL_ISC_BASE (V4L2_CID_USER_BASE + 0x10c0)
+/*
+ * The base for the CODA driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_CODA_BASE (V4L2_CID_USER_BASE + 0x10e0)
+/*
+ * The base for MIPI CCS driver controls.
+ * We reserve 128 controls for this driver.
+ */
+#define V4L2_CID_USER_CCS_BASE (V4L2_CID_USER_BASE + 0x10f0)
+/*
+ * The base for Allegro driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_ALLEGRO_BASE (V4L2_CID_USER_BASE + 0x1170)
+
+/*
+ * The base for the isl7998x driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_ISL7998X_BASE (V4L2_CID_USER_BASE + 0x1180)
+
+/*
+ * The base for DW100 driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_DW100_BASE (V4L2_CID_USER_BASE + 0x1190)
+
+/*
+ * The base for Aspeed driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0)
+
+/*
+ * The base for Nuvoton NPCM driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0)
+
+/*
+ * The base for THine THP7312 driver controls.
+ * We reserve 32 controls for this driver.
+ */
+#define V4L2_CID_USER_THP7312_BASE (V4L2_CID_USER_BASE + 0x11c0)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
-#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
-#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1)
+#define V4L2_CID_CODEC_BASE (V4L2_CTRL_CLASS_CODEC | 0x900)
+#define V4L2_CID_CODEC_CLASS (V4L2_CTRL_CLASS_CODEC | 1)
/* MPEG streams, specific to multiplexed streams */
-#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0)
+#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_CODEC_BASE+0)
enum v4l2_mpeg_stream_type {
V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */
V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */
@@ -215,26 +232,26 @@ enum v4l2_mpeg_stream_type {
V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */
V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */
};
-#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1)
-#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2)
-#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3)
-#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4)
-#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5)
-#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6)
-#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7)
+#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_CODEC_BASE+1)
+#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_CODEC_BASE+2)
+#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_CODEC_BASE+3)
+#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_CODEC_BASE+4)
+#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_CODEC_BASE+5)
+#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_CODEC_BASE+6)
+#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_CODEC_BASE+7)
enum v4l2_mpeg_stream_vbi_fmt {
V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */
V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */
};
/* MPEG audio controls specific to multiplexed streams */
-#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100)
+#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_CODEC_BASE+100)
enum v4l2_mpeg_audio_sampling_freq {
V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2,
};
-#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101)
+#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_CODEC_BASE+101)
enum v4l2_mpeg_audio_encoding {
V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0,
V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1,
@@ -242,7 +259,7 @@ enum v4l2_mpeg_audio_encoding {
V4L2_MPEG_AUDIO_ENCODING_AAC = 3,
V4L2_MPEG_AUDIO_ENCODING_AC3 = 4,
};
-#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102)
+#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_CODEC_BASE+102)
enum v4l2_mpeg_audio_l1_bitrate {
V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1,
@@ -259,7 +276,7 @@ enum v4l2_mpeg_audio_l1_bitrate {
V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12,
V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13,
};
-#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103)
+#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_CODEC_BASE+103)
enum v4l2_mpeg_audio_l2_bitrate {
V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1,
@@ -276,7 +293,7 @@ enum v4l2_mpeg_audio_l2_bitrate {
V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12,
V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13,
};
-#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104)
+#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_CODEC_BASE+104)
enum v4l2_mpeg_audio_l3_bitrate {
V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1,
@@ -293,34 +310,34 @@ enum v4l2_mpeg_audio_l3_bitrate {
V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12,
V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13,
};
-#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105)
+#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_CODEC_BASE+105)
enum v4l2_mpeg_audio_mode {
V4L2_MPEG_AUDIO_MODE_STEREO = 0,
V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1,
V4L2_MPEG_AUDIO_MODE_DUAL = 2,
V4L2_MPEG_AUDIO_MODE_MONO = 3,
};
-#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106)
+#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_CODEC_BASE+106)
enum v4l2_mpeg_audio_mode_extension {
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3,
};
-#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107)
+#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_CODEC_BASE+107)
enum v4l2_mpeg_audio_emphasis {
V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0,
V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1,
V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2,
};
-#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108)
+#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_CODEC_BASE+108)
enum v4l2_mpeg_audio_crc {
V4L2_MPEG_AUDIO_CRC_NONE = 0,
V4L2_MPEG_AUDIO_CRC_CRC16 = 1,
};
-#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109)
-#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_MPEG_BASE+110)
-#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_MPEG_BASE+111)
+#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_CODEC_BASE+109)
+#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_CODEC_BASE+110)
+#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_CODEC_BASE+111)
enum v4l2_mpeg_audio_ac3_bitrate {
V4L2_MPEG_AUDIO_AC3_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_AC3_BITRATE_40K = 1,
@@ -342,7 +359,7 @@ enum v4l2_mpeg_audio_ac3_bitrate {
V4L2_MPEG_AUDIO_AC3_BITRATE_576K = 17,
V4L2_MPEG_AUDIO_AC3_BITRATE_640K = 18,
};
-#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK (V4L2_CID_MPEG_BASE+112)
+#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK (V4L2_CID_CODEC_BASE+112)
enum v4l2_mpeg_audio_dec_playback {
V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO = 0,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO = 1,
@@ -351,51 +368,52 @@ enum v4l2_mpeg_audio_dec_playback {
V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO = 4,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO = 5,
};
-#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113)
+#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_CODEC_BASE+113)
/* MPEG video controls specific to multiplexed streams */
-#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200)
+#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_CODEC_BASE+200)
enum v4l2_mpeg_video_encoding {
V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0,
V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1,
V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2,
};
-#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201)
+#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_CODEC_BASE+201)
enum v4l2_mpeg_video_aspect {
V4L2_MPEG_VIDEO_ASPECT_1x1 = 0,
V4L2_MPEG_VIDEO_ASPECT_4x3 = 1,
V4L2_MPEG_VIDEO_ASPECT_16x9 = 2,
V4L2_MPEG_VIDEO_ASPECT_221x100 = 3,
};
-#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202)
-#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203)
-#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204)
-#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205)
-#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206)
+#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_CODEC_BASE+202)
+#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_CODEC_BASE+203)
+#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_CODEC_BASE+204)
+#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_CODEC_BASE+205)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_CODEC_BASE+206)
enum v4l2_mpeg_video_bitrate_mode {
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1,
-};
-#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207)
-#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208)
-#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209)
-#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210)
-#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211)
-#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_MPEG_BASE+212)
-#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_MPEG_BASE+213)
-#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_MPEG_BASE+214)
-#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (V4L2_CID_MPEG_BASE+215)
-#define V4L2_CID_MPEG_VIDEO_HEADER_MODE (V4L2_CID_MPEG_BASE+216)
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CQ = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_CODEC_BASE+207)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_CODEC_BASE+208)
+#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_CODEC_BASE+209)
+#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_CODEC_BASE+210)
+#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_CODEC_BASE+211)
+#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_CODEC_BASE+212)
+#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_CODEC_BASE+213)
+#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_CODEC_BASE+214)
+#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (V4L2_CID_CODEC_BASE+215)
+#define V4L2_CID_MPEG_VIDEO_HEADER_MODE (V4L2_CID_CODEC_BASE+216)
enum v4l2_mpeg_video_header_mode {
V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE = 0,
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME = 1,
};
-#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (V4L2_CID_MPEG_BASE+217)
-#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (V4L2_CID_MPEG_BASE+218)
-#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (V4L2_CID_MPEG_BASE+219)
-#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (V4L2_CID_MPEG_BASE+220)
-#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (V4L2_CID_MPEG_BASE+221)
+#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (V4L2_CID_CODEC_BASE+217)
+#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (V4L2_CID_CODEC_BASE+218)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (V4L2_CID_CODEC_BASE+219)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (V4L2_CID_CODEC_BASE+220)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (V4L2_CID_CODEC_BASE+221)
enum v4l2_mpeg_video_multi_slice_mode {
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE = 0,
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB = 1,
@@ -406,24 +424,36 @@ enum v4l2_mpeg_video_multi_slice_mode {
V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES = 2,
#endif
};
-#define V4L2_CID_MPEG_VIDEO_VBV_SIZE (V4L2_CID_MPEG_BASE+222)
-#define V4L2_CID_MPEG_VIDEO_DEC_PTS (V4L2_CID_MPEG_BASE+223)
-#define V4L2_CID_MPEG_VIDEO_DEC_FRAME (V4L2_CID_MPEG_BASE+224)
-#define V4L2_CID_MPEG_VIDEO_VBV_DELAY (V4L2_CID_MPEG_BASE+225)
-#define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (V4L2_CID_MPEG_BASE+226)
-#define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_MPEG_BASE+227)
-#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228)
-#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229)
+#define V4L2_CID_MPEG_VIDEO_VBV_SIZE (V4L2_CID_CODEC_BASE+222)
+#define V4L2_CID_MPEG_VIDEO_DEC_PTS (V4L2_CID_CODEC_BASE+223)
+#define V4L2_CID_MPEG_VIDEO_DEC_FRAME (V4L2_CID_CODEC_BASE+224)
+#define V4L2_CID_MPEG_VIDEO_VBV_DELAY (V4L2_CID_CODEC_BASE+225)
+#define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (V4L2_CID_CODEC_BASE+226)
+#define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_CODEC_BASE+227)
+#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_CODEC_BASE+228)
+#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_CODEC_BASE+229)
+#define V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID (V4L2_CID_CODEC_BASE+230)
+#define V4L2_CID_MPEG_VIDEO_AU_DELIMITER (V4L2_CID_CODEC_BASE+231)
+#define V4L2_CID_MPEG_VIDEO_LTR_COUNT (V4L2_CID_CODEC_BASE+232)
+#define V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX (V4L2_CID_CODEC_BASE+233)
+#define V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES (V4L2_CID_CODEC_BASE+234)
+#define V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR (V4L2_CID_CODEC_BASE+235)
+#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD (V4L2_CID_CODEC_BASE+236)
+#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE (V4L2_CID_CODEC_BASE+237)
+enum v4l2_mpeg_video_intra_refresh_period_type {
+ V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_RANDOM = 0,
+ V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC = 1,
+};
/* CIDs for the MPEG-2 Part 2 (H.262) codec */
-#define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_MPEG_BASE+270)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_CODEC_BASE+270)
enum v4l2_mpeg_video_mpeg2_level {
V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW = 0,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN = 1,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440 = 2,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH = 3,
};
-#define V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE (V4L2_CID_MPEG_BASE+271)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE (V4L2_CID_CODEC_BASE+271)
enum v4l2_mpeg_video_mpeg2_profile {
V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE = 0,
V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN = 1,
@@ -434,28 +464,28 @@ enum v4l2_mpeg_video_mpeg2_profile {
};
/* CIDs for the FWHT codec as used by the vicodec driver. */
-#define V4L2_CID_FWHT_I_FRAME_QP (V4L2_CID_MPEG_BASE + 290)
-#define V4L2_CID_FWHT_P_FRAME_QP (V4L2_CID_MPEG_BASE + 291)
-
-#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300)
-#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301)
-#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302)
-#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP (V4L2_CID_MPEG_BASE+303)
-#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP (V4L2_CID_MPEG_BASE+304)
-#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (V4L2_CID_MPEG_BASE+350)
-#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (V4L2_CID_MPEG_BASE+351)
-#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (V4L2_CID_MPEG_BASE+352)
-#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP (V4L2_CID_MPEG_BASE+353)
-#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP (V4L2_CID_MPEG_BASE+354)
-#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (V4L2_CID_MPEG_BASE+355)
-#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (V4L2_CID_MPEG_BASE+356)
-#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE (V4L2_CID_MPEG_BASE+357)
+#define V4L2_CID_FWHT_I_FRAME_QP (V4L2_CID_CODEC_BASE + 290)
+#define V4L2_CID_FWHT_P_FRAME_QP (V4L2_CID_CODEC_BASE + 291)
+
+#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_CODEC_BASE+300)
+#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_CODEC_BASE+301)
+#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_CODEC_BASE+302)
+#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP (V4L2_CID_CODEC_BASE+303)
+#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP (V4L2_CID_CODEC_BASE+304)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (V4L2_CID_CODEC_BASE+350)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (V4L2_CID_CODEC_BASE+351)
+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (V4L2_CID_CODEC_BASE+352)
+#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP (V4L2_CID_CODEC_BASE+353)
+#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP (V4L2_CID_CODEC_BASE+354)
+#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (V4L2_CID_CODEC_BASE+355)
+#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (V4L2_CID_CODEC_BASE+356)
+#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE (V4L2_CID_CODEC_BASE+357)
enum v4l2_mpeg_video_h264_entropy_mode {
V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC = 0,
V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC = 1,
};
-#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (V4L2_CID_MPEG_BASE+358)
-#define V4L2_CID_MPEG_VIDEO_H264_LEVEL (V4L2_CID_MPEG_BASE+359)
+#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (V4L2_CID_CODEC_BASE+358)
+#define V4L2_CID_MPEG_VIDEO_H264_LEVEL (V4L2_CID_CODEC_BASE+359)
enum v4l2_mpeg_video_h264_level {
V4L2_MPEG_VIDEO_H264_LEVEL_1_0 = 0,
V4L2_MPEG_VIDEO_H264_LEVEL_1B = 1,
@@ -478,15 +508,15 @@ enum v4l2_mpeg_video_h264_level {
V4L2_MPEG_VIDEO_H264_LEVEL_6_1 = 18,
V4L2_MPEG_VIDEO_H264_LEVEL_6_2 = 19,
};
-#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (V4L2_CID_MPEG_BASE+360)
-#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (V4L2_CID_MPEG_BASE+361)
-#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE (V4L2_CID_MPEG_BASE+362)
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (V4L2_CID_CODEC_BASE+360)
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (V4L2_CID_CODEC_BASE+361)
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE (V4L2_CID_CODEC_BASE+362)
enum v4l2_mpeg_video_h264_loop_filter_mode {
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED = 0,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED = 1,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2,
};
-#define V4L2_CID_MPEG_VIDEO_H264_PROFILE (V4L2_CID_MPEG_BASE+363)
+#define V4L2_CID_MPEG_VIDEO_H264_PROFILE (V4L2_CID_CODEC_BASE+363)
enum v4l2_mpeg_video_h264_profile {
V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE = 0,
V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE = 1,
@@ -507,10 +537,10 @@ enum v4l2_mpeg_video_h264_profile {
V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH = 16,
V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH = 17,
};
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_MPEG_BASE+364)
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_MPEG_BASE+365)
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (V4L2_CID_MPEG_BASE+366)
-#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC (V4L2_CID_MPEG_BASE+367)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_CODEC_BASE+364)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_CODEC_BASE+365)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (V4L2_CID_CODEC_BASE+366)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC (V4L2_CID_CODEC_BASE+367)
enum v4l2_mpeg_video_h264_vui_sar_idc {
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED = 0,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 = 1,
@@ -531,9 +561,9 @@ enum v4l2_mpeg_video_h264_vui_sar_idc {
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 = 16,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED = 17,
};
-#define V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (V4L2_CID_MPEG_BASE+368)
-#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (V4L2_CID_MPEG_BASE+369)
-#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE (V4L2_CID_MPEG_BASE+370)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (V4L2_CID_CODEC_BASE+368)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (V4L2_CID_CODEC_BASE+369)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE (V4L2_CID_CODEC_BASE+370)
enum v4l2_mpeg_video_h264_sei_fp_arrangement_type {
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHECKERBOARD = 0,
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN = 1,
@@ -542,8 +572,8 @@ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type {
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM = 4,
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL = 5,
};
-#define V4L2_CID_MPEG_VIDEO_H264_FMO (V4L2_CID_MPEG_BASE+371)
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE (V4L2_CID_MPEG_BASE+372)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO (V4L2_CID_CODEC_BASE+371)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE (V4L2_CID_CODEC_BASE+372)
enum v4l2_mpeg_video_h264_fmo_map_type {
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES = 0,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES = 1,
@@ -553,36 +583,45 @@ enum v4l2_mpeg_video_h264_fmo_map_type {
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN = 5,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT = 6,
};
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (V4L2_CID_MPEG_BASE+373)
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION (V4L2_CID_MPEG_BASE+374)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (V4L2_CID_CODEC_BASE+373)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION (V4L2_CID_CODEC_BASE+374)
enum v4l2_mpeg_video_h264_fmo_change_dir {
V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT = 0,
V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT = 1,
};
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (V4L2_CID_MPEG_BASE+375)
-#define V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (V4L2_CID_MPEG_BASE+376)
-#define V4L2_CID_MPEG_VIDEO_H264_ASO (V4L2_CID_MPEG_BASE+377)
-#define V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (V4L2_CID_MPEG_BASE+378)
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (V4L2_CID_MPEG_BASE+379)
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE (V4L2_CID_MPEG_BASE+380)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (V4L2_CID_CODEC_BASE+375)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (V4L2_CID_CODEC_BASE+376)
+#define V4L2_CID_MPEG_VIDEO_H264_ASO (V4L2_CID_CODEC_BASE+377)
+#define V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (V4L2_CID_CODEC_BASE+378)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (V4L2_CID_CODEC_BASE+379)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE (V4L2_CID_CODEC_BASE+380)
enum v4l2_mpeg_video_h264_hierarchical_coding_type {
V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B = 0,
V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P = 1,
};
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (V4L2_CID_MPEG_BASE+381)
-#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (V4L2_CID_MPEG_BASE+382)
-#define V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (V4L2_CID_MPEG_BASE+383)
-#define V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (V4L2_CID_MPEG_BASE+384)
-#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP (V4L2_CID_MPEG_BASE+385)
-#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP (V4L2_CID_MPEG_BASE+386)
-#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP (V4L2_CID_MPEG_BASE+387)
-#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP (V4L2_CID_MPEG_BASE+388)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_MPEG_BASE+400)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_MPEG_BASE+401)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_MPEG_BASE+402)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (V4L2_CID_MPEG_BASE+403)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (V4L2_CID_MPEG_BASE+404)
-#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (V4L2_CID_MPEG_BASE+405)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (V4L2_CID_CODEC_BASE+381)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (V4L2_CID_CODEC_BASE+382)
+#define V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (V4L2_CID_CODEC_BASE+383)
+#define V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (V4L2_CID_CODEC_BASE+384)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+385)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+386)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+387)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+388)
+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+389)
+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+390)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR (V4L2_CID_CODEC_BASE+391)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR (V4L2_CID_CODEC_BASE+392)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR (V4L2_CID_CODEC_BASE+393)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR (V4L2_CID_CODEC_BASE+394)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR (V4L2_CID_CODEC_BASE+395)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR (V4L2_CID_CODEC_BASE+396)
+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR (V4L2_CID_CODEC_BASE+397)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_CODEC_BASE+400)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_CODEC_BASE+401)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_CODEC_BASE+402)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (V4L2_CID_CODEC_BASE+403)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (V4L2_CID_CODEC_BASE+404)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (V4L2_CID_CODEC_BASE+405)
enum v4l2_mpeg_video_mpeg4_level {
V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 = 0,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B = 1,
@@ -593,7 +632,7 @@ enum v4l2_mpeg_video_mpeg4_level {
V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 = 6,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 = 7,
};
-#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE (V4L2_CID_MPEG_BASE+406)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE (V4L2_CID_CODEC_BASE+406)
enum v4l2_mpeg_video_mpeg4_profile {
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE = 0,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE = 1,
@@ -601,40 +640,40 @@ enum v4l2_mpeg_video_mpeg4_profile {
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE = 3,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY = 4,
};
-#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (V4L2_CID_MPEG_BASE+407)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (V4L2_CID_CODEC_BASE+407)
/* Control IDs for VP8 streams
* Although VP8 is not part of MPEG we add these controls to the MPEG class
* as that class is already handling other video compression standards
*/
-#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS (V4L2_CID_MPEG_BASE+500)
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS (V4L2_CID_CODEC_BASE+500)
enum v4l2_vp8_num_partitions {
V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION = 0,
V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS = 1,
V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS = 2,
V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS = 3,
};
-#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (V4L2_CID_MPEG_BASE+501)
-#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES (V4L2_CID_MPEG_BASE+502)
+#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (V4L2_CID_CODEC_BASE+501)
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES (V4L2_CID_CODEC_BASE+502)
enum v4l2_vp8_num_ref_frames {
V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME = 0,
V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME = 1,
V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME = 2,
};
-#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (V4L2_CID_MPEG_BASE+503)
-#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (V4L2_CID_MPEG_BASE+504)
-#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (V4L2_CID_MPEG_BASE+505)
-#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL (V4L2_CID_MPEG_BASE+506)
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (V4L2_CID_CODEC_BASE+503)
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (V4L2_CID_CODEC_BASE+504)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (V4L2_CID_CODEC_BASE+505)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL (V4L2_CID_CODEC_BASE+506)
enum v4l2_vp8_golden_frame_sel {
V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV = 0,
V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD = 1,
};
-#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (V4L2_CID_MPEG_BASE+507)
-#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_MPEG_BASE+508)
-#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_MPEG_BASE+509)
-#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510)
+#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (V4L2_CID_CODEC_BASE+507)
+#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_CODEC_BASE+508)
+#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_CODEC_BASE+509)
+#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_CODEC_BASE+510)
-#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_MPEG_BASE+511)
+#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_CODEC_BASE+511)
enum v4l2_mpeg_video_vp8_profile {
V4L2_MPEG_VIDEO_VP8_PROFILE_0 = 0,
V4L2_MPEG_VIDEO_VP8_PROFILE_1 = 1,
@@ -643,42 +682,59 @@ enum v4l2_mpeg_video_vp8_profile {
};
/* Deprecated alias for compatibility reasons. */
#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE V4L2_CID_MPEG_VIDEO_VP8_PROFILE
-#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_MPEG_BASE+512)
+#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_CODEC_BASE+512)
enum v4l2_mpeg_video_vp9_profile {
V4L2_MPEG_VIDEO_VP9_PROFILE_0 = 0,
V4L2_MPEG_VIDEO_VP9_PROFILE_1 = 1,
V4L2_MPEG_VIDEO_VP9_PROFILE_2 = 2,
V4L2_MPEG_VIDEO_VP9_PROFILE_3 = 3,
};
+#define V4L2_CID_MPEG_VIDEO_VP9_LEVEL (V4L2_CID_CODEC_BASE+513)
+enum v4l2_mpeg_video_vp9_level {
+ V4L2_MPEG_VIDEO_VP9_LEVEL_1_0 = 0,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_1_1 = 1,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_2_0 = 2,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_2_1 = 3,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_3_0 = 4,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_3_1 = 5,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_4_0 = 6,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_4_1 = 7,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_5_0 = 8,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_5_1 = 9,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_5_2 = 10,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_6_0 = 11,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_6_1 = 12,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_6_2 = 13,
+};
/* CIDs for HEVC encoding. */
-#define V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (V4L2_CID_MPEG_BASE + 600)
-#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (V4L2_CID_MPEG_BASE + 601)
-#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (V4L2_CID_MPEG_BASE + 602)
-#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (V4L2_CID_MPEG_BASE + 603)
-#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (V4L2_CID_MPEG_BASE + 604)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (V4L2_CID_MPEG_BASE + 605)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE (V4L2_CID_MPEG_BASE + 606)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (V4L2_CID_CODEC_BASE + 600)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (V4L2_CID_CODEC_BASE + 601)
+#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (V4L2_CID_CODEC_BASE + 602)
+#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (V4L2_CID_CODEC_BASE + 603)
+#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (V4L2_CID_CODEC_BASE + 604)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (V4L2_CID_CODEC_BASE + 605)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE (V4L2_CID_CODEC_BASE + 606)
enum v4l2_mpeg_video_hevc_hier_coding_type {
V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B = 0,
V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P = 1,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (V4L2_CID_MPEG_BASE + 607)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (V4L2_CID_MPEG_BASE + 608)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (V4L2_CID_MPEG_BASE + 609)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (V4L2_CID_MPEG_BASE + 610)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (V4L2_CID_MPEG_BASE + 611)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (V4L2_CID_MPEG_BASE + 612)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (V4L2_CID_MPEG_BASE + 613)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (V4L2_CID_MPEG_BASE + 614)
-#define V4L2_CID_MPEG_VIDEO_HEVC_PROFILE (V4L2_CID_MPEG_BASE + 615)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (V4L2_CID_CODEC_BASE + 607)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (V4L2_CID_CODEC_BASE + 608)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (V4L2_CID_CODEC_BASE + 609)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (V4L2_CID_CODEC_BASE + 610)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (V4L2_CID_CODEC_BASE + 611)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (V4L2_CID_CODEC_BASE + 612)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (V4L2_CID_CODEC_BASE + 613)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (V4L2_CID_CODEC_BASE + 614)
+#define V4L2_CID_MPEG_VIDEO_HEVC_PROFILE (V4L2_CID_CODEC_BASE + 615)
enum v4l2_mpeg_video_hevc_profile {
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN = 0,
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE = 1,
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 = 2,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_LEVEL (V4L2_CID_MPEG_BASE + 616)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LEVEL (V4L2_CID_CODEC_BASE + 616)
enum v4l2_mpeg_video_hevc_level {
V4L2_MPEG_VIDEO_HEVC_LEVEL_1 = 0,
V4L2_MPEG_VIDEO_HEVC_LEVEL_2 = 1,
@@ -694,64 +750,163 @@ enum v4l2_mpeg_video_hevc_level {
V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1 = 11,
V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 = 12,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (V4L2_CID_MPEG_BASE + 617)
-#define V4L2_CID_MPEG_VIDEO_HEVC_TIER (V4L2_CID_MPEG_BASE + 618)
+#define V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (V4L2_CID_CODEC_BASE + 617)
+#define V4L2_CID_MPEG_VIDEO_HEVC_TIER (V4L2_CID_CODEC_BASE + 618)
enum v4l2_mpeg_video_hevc_tier {
V4L2_MPEG_VIDEO_HEVC_TIER_MAIN = 0,
V4L2_MPEG_VIDEO_HEVC_TIER_HIGH = 1,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (V4L2_CID_MPEG_BASE + 619)
-#define V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE (V4L2_CID_MPEG_BASE + 620)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (V4L2_CID_CODEC_BASE + 619)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE (V4L2_CID_CODEC_BASE + 620)
enum v4l2_cid_mpeg_video_hevc_loop_filter_mode {
V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED = 0,
V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED = 1,
V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (V4L2_CID_MPEG_BASE + 621)
-#define V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (V4L2_CID_MPEG_BASE + 622)
-#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE (V4L2_CID_MPEG_BASE + 623)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (V4L2_CID_CODEC_BASE + 621)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (V4L2_CID_CODEC_BASE + 622)
+#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE (V4L2_CID_CODEC_BASE + 623)
enum v4l2_cid_mpeg_video_hevc_refresh_type {
V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE = 0,
V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA = 1,
V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR = 2,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (V4L2_CID_MPEG_BASE + 624)
-#define V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (V4L2_CID_MPEG_BASE + 625)
-#define V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (V4L2_CID_MPEG_BASE + 626)
-#define V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (V4L2_CID_MPEG_BASE + 627)
-#define V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (V4L2_CID_MPEG_BASE + 628)
-#define V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (V4L2_CID_MPEG_BASE + 629)
-#define V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (V4L2_CID_MPEG_BASE + 630)
-#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (V4L2_CID_MPEG_BASE + 631)
-#define V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT (V4L2_CID_MPEG_BASE + 632)
-#define V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (V4L2_CID_MPEG_BASE + 633)
-#define V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (V4L2_CID_MPEG_BASE + 634)
-#define V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD (V4L2_CID_MPEG_BASE + 635)
+#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (V4L2_CID_CODEC_BASE + 624)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (V4L2_CID_CODEC_BASE + 625)
+#define V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (V4L2_CID_CODEC_BASE + 626)
+#define V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (V4L2_CID_CODEC_BASE + 627)
+#define V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (V4L2_CID_CODEC_BASE + 628)
+#define V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (V4L2_CID_CODEC_BASE + 629)
+#define V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (V4L2_CID_CODEC_BASE + 630)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (V4L2_CID_CODEC_BASE + 631)
+#define V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT (V4L2_CID_CODEC_BASE + 632)
+#define V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (V4L2_CID_CODEC_BASE + 633)
+#define V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (V4L2_CID_CODEC_BASE + 634)
+#define V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD (V4L2_CID_CODEC_BASE + 635)
enum v4l2_cid_mpeg_video_hevc_size_of_length_field {
V4L2_MPEG_VIDEO_HEVC_SIZE_0 = 0,
V4L2_MPEG_VIDEO_HEVC_SIZE_1 = 1,
V4L2_MPEG_VIDEO_HEVC_SIZE_2 = 2,
V4L2_MPEG_VIDEO_HEVC_SIZE_4 = 3,
};
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (V4L2_CID_MPEG_BASE + 636)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (V4L2_CID_MPEG_BASE + 637)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (V4L2_CID_MPEG_BASE + 638)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (V4L2_CID_MPEG_BASE + 639)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (V4L2_CID_MPEG_BASE + 640)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (V4L2_CID_MPEG_BASE + 641)
-#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (V4L2_CID_MPEG_BASE + 642)
-#define V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (V4L2_CID_MPEG_BASE + 643)
-#define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_MPEG_BASE + 644)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (V4L2_CID_CODEC_BASE + 636)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (V4L2_CID_CODEC_BASE + 637)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (V4L2_CID_CODEC_BASE + 638)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (V4L2_CID_CODEC_BASE + 639)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (V4L2_CID_CODEC_BASE + 640)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (V4L2_CID_CODEC_BASE + 641)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (V4L2_CID_CODEC_BASE + 642)
+#define V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (V4L2_CID_CODEC_BASE + 643)
+#define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_CODEC_BASE + 644)
+#define V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY (V4L2_CID_CODEC_BASE + 645)
+#define V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE (V4L2_CID_CODEC_BASE + 646)
+enum v4l2_mpeg_video_frame_skip_mode {
+ V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED = 0,
+ V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1,
+ V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2,
+};
+
+#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 647)
+#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 648)
+#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 649)
+#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 650)
+#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 651)
+#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 652)
+
+#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY (V4L2_CID_CODEC_BASE + 653)
+#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_BASE + 654)
+
+#define V4L2_CID_MPEG_VIDEO_AV1_PROFILE (V4L2_CID_CODEC_BASE + 655)
+/**
+ * enum v4l2_mpeg_video_av1_profile - AV1 profiles
+ *
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN: compliant decoders must be able to decode
+ * streams with seq_profile equal to 0.
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH: compliant decoders must be able to decode
+ * streams with seq_profile equal less than or equal to 1.
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL: compliant decoders must be able to
+ * decode streams with seq_profile less than or equal to 2.
+ *
+ * Conveys the highest profile a decoder can work with.
+ */
+enum v4l2_mpeg_video_av1_profile {
+ V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN = 0,
+ V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH = 1,
+ V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL = 2,
+};
+
+#define V4L2_CID_MPEG_VIDEO_AV1_LEVEL (V4L2_CID_CODEC_BASE + 656)
+/**
+ * enum v4l2_mpeg_video_av1_level - AV1 levels
+ *
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_0: Level 2.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_1: Level 2.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_2: Level 2.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_3: Level 2.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_0: Level 3.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_1: Level 3.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_2: Level 3.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_3: Level 3.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_0: Level 4.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_1: Level 4.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_2: Level 4.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_3: Level 4.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_0: Level 5.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_1: Level 5.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_2: Level 5.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_3: Level 5.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_0: Level 6.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_1: Level 6.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_2: Level 6.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_3: Level 6.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_0: Level 7.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_1: Level 7.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_2: Level 7.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_3: Level 7.3.
+ *
+ * Conveys the highest level a decoder can work with.
+ */
+enum v4l2_mpeg_video_av1_level {
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_0 = 0,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_1 = 1,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_2 = 2,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_3 = 3,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_0 = 4,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_1 = 5,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_2 = 6,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_3 = 7,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_0 = 8,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_1 = 9,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_2 = 10,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_3 = 11,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_0 = 12,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_1 = 13,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_2 = 14,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_3 = 15,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_0 = 16,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_1 = 17,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_2 = 18,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_3 = 19,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_0 = 20,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_1 = 21,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_2 = 22,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_3 = 23
+};
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
-#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
+#define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0)
enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0,
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_CODEC_CX2341X_BASE+1)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+2)
enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
@@ -759,18 +914,18 @@ enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+3)
enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+4)
enum v4l2_mpeg_cx2341x_video_temporal_filter_mode {
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0,
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_CODEC_CX2341X_BASE+5)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+6)
enum v4l2_mpeg_cx2341x_video_median_filter_type {
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1,
@@ -778,38 +933,38 @@ enum v4l2_mpeg_cx2341x_video_median_filter_type {
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+9)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10)
-#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_CODEC_CX2341X_BASE+7)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_CODEC_CX2341X_BASE+8)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_CODEC_CX2341X_BASE+9)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_CODEC_CX2341X_BASE+10)
+#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_CODEC_CX2341X_BASE+11)
/* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */
-#define V4L2_CID_MPEG_MFC51_BASE (V4L2_CTRL_CLASS_MPEG | 0x1100)
+#define V4L2_CID_CODEC_MFC51_BASE (V4L2_CTRL_CLASS_CODEC | 0x1100)
-#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (V4L2_CID_MPEG_MFC51_BASE+0)
-#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (V4L2_CID_MPEG_MFC51_BASE+1)
-#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE (V4L2_CID_MPEG_MFC51_BASE+2)
+#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (V4L2_CID_CODEC_MFC51_BASE+0)
+#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_MFC51_BASE+1)
+#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE (V4L2_CID_CODEC_MFC51_BASE+2)
enum v4l2_mpeg_mfc51_video_frame_skip_mode {
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED = 0,
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1,
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2,
};
-#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE (V4L2_CID_MPEG_MFC51_BASE+3)
+#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE (V4L2_CID_CODEC_MFC51_BASE+3)
enum v4l2_mpeg_mfc51_video_force_frame_type {
V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED = 0,
V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME = 1,
V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED = 2,
};
-#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING (V4L2_CID_MPEG_MFC51_BASE+4)
-#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (V4L2_CID_MPEG_MFC51_BASE+5)
-#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (V4L2_CID_MPEG_MFC51_BASE+6)
-#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (V4L2_CID_MPEG_MFC51_BASE+7)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (V4L2_CID_MPEG_MFC51_BASE+50)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (V4L2_CID_MPEG_MFC51_BASE+51)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (V4L2_CID_MPEG_MFC51_BASE+52)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_MPEG_MFC51_BASE+53)
-#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_MPEG_MFC51_BASE+54)
+#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING (V4L2_CID_CODEC_MFC51_BASE+4)
+#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (V4L2_CID_CODEC_MFC51_BASE+5)
+#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (V4L2_CID_CODEC_MFC51_BASE+6)
+#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (V4L2_CID_CODEC_MFC51_BASE+7)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (V4L2_CID_CODEC_MFC51_BASE+50)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (V4L2_CID_CODEC_MFC51_BASE+51)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (V4L2_CID_CODEC_MFC51_BASE+52)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_CODEC_MFC51_BASE+53)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_CODEC_MFC51_BASE+54)
/* Camera class control IDs */
@@ -930,6 +1085,8 @@ enum v4l2_auto_focus_range {
#define V4L2_CID_CAMERA_SENSOR_ROTATION (V4L2_CID_CAMERA_CLASS_BASE+35)
+#define V4L2_CID_HDR_SENSOR_MODE (V4L2_CID_CAMERA_CLASS_BASE+36)
+
/* FM Modulator class control IDs */
#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900)
@@ -1053,6 +1210,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
#define V4L2_CID_UNIT_CELL_SIZE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 8)
+#define V4L2_CID_NOTIFY_GAINS (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 9)
/* Image processing controls */
@@ -1146,4 +1304,2200 @@ enum v4l2_detect_md_mode {
#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3)
#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4)
+
+/* Stateless CODECs controls */
+#define V4L2_CID_CODEC_STATELESS_BASE (V4L2_CTRL_CLASS_CODEC_STATELESS | 0x900)
+#define V4L2_CID_CODEC_STATELESS_CLASS (V4L2_CTRL_CLASS_CODEC_STATELESS | 1)
+
+#define V4L2_CID_STATELESS_H264_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 0)
+/**
+ * enum v4l2_stateless_h264_decode_mode - Decoding mode
+ *
+ * @V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED: indicates that decoding
+ * is performed one slice at a time. In this mode,
+ * V4L2_CID_STATELESS_H264_SLICE_PARAMS must contain the parsed slice
+ * parameters and the OUTPUT buffer must contain a single slice.
+ * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF feature is used
+ * in order to support multislice frames.
+ * @V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED: indicates that
+ * decoding is performed per frame. The OUTPUT buffer must contain
+ * all slices and also both fields. This mode is typically supported
+ * by device drivers that are able to parse the slice(s) header(s)
+ * in hardware. When this mode is selected,
+ * V4L2_CID_STATELESS_H264_SLICE_PARAMS is not used.
+ */
+enum v4l2_stateless_h264_decode_mode {
+ V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
+ V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+};
+
+#define V4L2_CID_STATELESS_H264_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 1)
+/**
+ * enum v4l2_stateless_h264_start_code - Start code
+ *
+ * @V4L2_STATELESS_H264_START_CODE_NONE: slices are passed
+ * to the driver without any start code.
+ * @V4L2_STATELESS_H264_START_CODE_ANNEX_B: slices are passed
+ * to the driver with an Annex B start code prefix
+ * (legal start codes can be 3-bytes 0x000001 or 4-bytes 0x00000001).
+ * This mode is typically supported by device drivers that parse
+ * the start code in hardware.
+ */
+enum v4l2_stateless_h264_start_code {
+ V4L2_STATELESS_H264_START_CODE_NONE,
+ V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+};
+
+#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01
+#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02
+#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04
+#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08
+#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10
+#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20
+
+#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01
+#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02
+#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04
+#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08
+#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10
+#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20
+#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40
+
+#define V4L2_H264_SPS_HAS_CHROMA_FORMAT(sps) \
+ ((sps)->profile_idc == 100 || (sps)->profile_idc == 110 || \
+ (sps)->profile_idc == 122 || (sps)->profile_idc == 244 || \
+ (sps)->profile_idc == 44 || (sps)->profile_idc == 83 || \
+ (sps)->profile_idc == 86 || (sps)->profile_idc == 118 || \
+ (sps)->profile_idc == 128 || (sps)->profile_idc == 138 || \
+ (sps)->profile_idc == 139 || (sps)->profile_idc == 134 || \
+ (sps)->profile_idc == 135)
+
+#define V4L2_CID_STATELESS_H264_SPS (V4L2_CID_CODEC_STATELESS_BASE + 2)
+/**
+ * struct v4l2_ctrl_h264_sps - H264 sequence parameter set
+ *
+ * All the members on this sequence parameter set structure match the
+ * sequence parameter set syntax as specified by the H264 specification.
+ *
+ * @profile_idc: see H264 specification.
+ * @constraint_set_flags: see H264 specification.
+ * @level_idc: see H264 specification.
+ * @seq_parameter_set_id: see H264 specification.
+ * @chroma_format_idc: see H264 specification.
+ * @bit_depth_luma_minus8: see H264 specification.
+ * @bit_depth_chroma_minus8: see H264 specification.
+ * @log2_max_frame_num_minus4: see H264 specification.
+ * @pic_order_cnt_type: see H264 specification.
+ * @log2_max_pic_order_cnt_lsb_minus4: see H264 specification.
+ * @max_num_ref_frames: see H264 specification.
+ * @num_ref_frames_in_pic_order_cnt_cycle: see H264 specification.
+ * @offset_for_ref_frame: see H264 specification.
+ * @offset_for_non_ref_pic: see H264 specification.
+ * @offset_for_top_to_bottom_field: see H264 specification.
+ * @pic_width_in_mbs_minus1: see H264 specification.
+ * @pic_height_in_map_units_minus1: see H264 specification.
+ * @flags: see V4L2_H264_SPS_FLAG_{}.
+ */
+struct v4l2_ctrl_h264_sps {
+ __u8 profile_idc;
+ __u8 constraint_set_flags;
+ __u8 level_idc;
+ __u8 seq_parameter_set_id;
+ __u8 chroma_format_idc;
+ __u8 bit_depth_luma_minus8;
+ __u8 bit_depth_chroma_minus8;
+ __u8 log2_max_frame_num_minus4;
+ __u8 pic_order_cnt_type;
+ __u8 log2_max_pic_order_cnt_lsb_minus4;
+ __u8 max_num_ref_frames;
+ __u8 num_ref_frames_in_pic_order_cnt_cycle;
+ __s32 offset_for_ref_frame[255];
+ __s32 offset_for_non_ref_pic;
+ __s32 offset_for_top_to_bottom_field;
+ __u16 pic_width_in_mbs_minus1;
+ __u16 pic_height_in_map_units_minus1;
+ __u32 flags;
+};
+
+#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001
+#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002
+#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004
+#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008
+#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010
+#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020
+#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040
+#define V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT 0x0080
+
+#define V4L2_CID_STATELESS_H264_PPS (V4L2_CID_CODEC_STATELESS_BASE + 3)
+/**
+ * struct v4l2_ctrl_h264_pps - H264 picture parameter set
+ *
+ * Except where noted, all the members on this picture parameter set
+ * structure match the picture parameter set syntax as specified
+ * by the H264 specification.
+ *
+ * In particular, V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT flag
+ * has a specific meaning. This flag should be set if a non-flat
+ * scaling matrix applies to the picture. In this case, applications
+ * are expected to use V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ * to pass the values of the non-flat matrices.
+ *
+ * @pic_parameter_set_id: see H264 specification.
+ * @seq_parameter_set_id: see H264 specification.
+ * @num_slice_groups_minus1: see H264 specification.
+ * @num_ref_idx_l0_default_active_minus1: see H264 specification.
+ * @num_ref_idx_l1_default_active_minus1: see H264 specification.
+ * @weighted_bipred_idc: see H264 specification.
+ * @pic_init_qp_minus26: see H264 specification.
+ * @pic_init_qs_minus26: see H264 specification.
+ * @chroma_qp_index_offset: see H264 specification.
+ * @second_chroma_qp_index_offset: see H264 specification.
+ * @flags: see V4L2_H264_PPS_FLAG_{}.
+ */
+struct v4l2_ctrl_h264_pps {
+ __u8 pic_parameter_set_id;
+ __u8 seq_parameter_set_id;
+ __u8 num_slice_groups_minus1;
+ __u8 num_ref_idx_l0_default_active_minus1;
+ __u8 num_ref_idx_l1_default_active_minus1;
+ __u8 weighted_bipred_idc;
+ __s8 pic_init_qp_minus26;
+ __s8 pic_init_qs_minus26;
+ __s8 chroma_qp_index_offset;
+ __s8 second_chroma_qp_index_offset;
+ __u16 flags;
+};
+
+#define V4L2_CID_STATELESS_H264_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 4)
+/**
+ * struct v4l2_ctrl_h264_scaling_matrix - H264 scaling matrices
+ *
+ * @scaling_list_4x4: scaling matrix after applying the inverse
+ * scanning process. Expected list order is Intra Y, Intra Cb,
+ * Intra Cr, Inter Y, Inter Cb, Inter Cr. The values on each
+ * scaling list are expected in raster scan order.
+ * @scaling_list_8x8: scaling matrix after applying the inverse
+ * scanning process. Expected list order is Intra Y, Inter Y,
+ * Intra Cb, Inter Cb, Intra Cr, Inter Cr. The values on each
+ * scaling list are expected in raster scan order.
+ *
+ * Note that the list order is different for the 4x4 and 8x8
+ * matrices as per the H264 specification, see table 7-2 "Assignment
+ * of mnemonic names to scaling list indices and specification of
+ * fall-back rule".
+ */
+struct v4l2_ctrl_h264_scaling_matrix {
+ __u8 scaling_list_4x4[6][16];
+ __u8 scaling_list_8x8[6][64];
+};
+
+struct v4l2_h264_weight_factors {
+ __s16 luma_weight[32];
+ __s16 luma_offset[32];
+ __s16 chroma_weight[32][2];
+ __s16 chroma_offset[32][2];
+};
+
+#define V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(pps, slice) \
+ ((((pps)->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) && \
+ ((slice)->slice_type == V4L2_H264_SLICE_TYPE_P || \
+ (slice)->slice_type == V4L2_H264_SLICE_TYPE_SP)) || \
+ ((pps)->weighted_bipred_idc == 1 && \
+ (slice)->slice_type == V4L2_H264_SLICE_TYPE_B))
+
+#define V4L2_CID_STATELESS_H264_PRED_WEIGHTS (V4L2_CID_CODEC_STATELESS_BASE + 5)
+/**
+ * struct v4l2_ctrl_h264_pred_weights - Prediction weight table
+ *
+ * Prediction weight table, which matches the syntax specified
+ * by the H264 specification.
+ *
+ * @luma_log2_weight_denom: see H264 specification.
+ * @chroma_log2_weight_denom: see H264 specification.
+ * @weight_factors: luma and chroma weight factors.
+ */
+struct v4l2_ctrl_h264_pred_weights {
+ __u16 luma_log2_weight_denom;
+ __u16 chroma_log2_weight_denom;
+ struct v4l2_h264_weight_factors weight_factors[2];
+};
+
+#define V4L2_H264_SLICE_TYPE_P 0
+#define V4L2_H264_SLICE_TYPE_B 1
+#define V4L2_H264_SLICE_TYPE_I 2
+#define V4L2_H264_SLICE_TYPE_SP 3
+#define V4L2_H264_SLICE_TYPE_SI 4
+
+#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x01
+#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x02
+
+#define V4L2_H264_TOP_FIELD_REF 0x1
+#define V4L2_H264_BOTTOM_FIELD_REF 0x2
+#define V4L2_H264_FRAME_REF 0x3
+
+/**
+ * struct v4l2_h264_reference - H264 picture reference
+ *
+ * @fields: indicates how the picture is referenced.
+ * Valid values are V4L2_H264_{}_REF.
+ * @index: index into v4l2_ctrl_h264_decode_params.dpb[].
+ */
+struct v4l2_h264_reference {
+ __u8 fields;
+ __u8 index;
+};
+
+/*
+ * Maximum DPB size, as specified by section 'A.3.1 Level limits
+ * common to the Baseline, Main, and Extended profiles'.
+ */
+#define V4L2_H264_NUM_DPB_ENTRIES 16
+#define V4L2_H264_REF_LIST_LEN (2 * V4L2_H264_NUM_DPB_ENTRIES)
+
+#define V4L2_CID_STATELESS_H264_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 6)
+/**
+ * struct v4l2_ctrl_h264_slice_params - H264 slice parameters
+ *
+ * This structure holds the H264 syntax elements that are specified
+ * as non-invariant for the slices in a given frame.
+ *
+ * Slice invariant syntax elements are contained in struct
+ * v4l2_ctrl_h264_decode_params. This is done to reduce the API surface
+ * on frame-based decoders, where slice header parsing is done by the
+ * hardware.
+ *
+ * Slice invariant syntax elements are specified in specification section
+ * "7.4.3 Slice header semantics".
+ *
+ * Except where noted, the members on this struct match the slice header syntax.
+ *
+ * @header_bit_size: offset in bits to slice_data() from the beginning of this slice.
+ * @first_mb_in_slice: see H264 specification.
+ * @slice_type: see H264 specification.
+ * @colour_plane_id: see H264 specification.
+ * @redundant_pic_cnt: see H264 specification.
+ * @cabac_init_idc: see H264 specification.
+ * @slice_qp_delta: see H264 specification.
+ * @slice_qs_delta: see H264 specification.
+ * @disable_deblocking_filter_idc: see H264 specification.
+ * @slice_alpha_c0_offset_div2: see H264 specification.
+ * @slice_beta_offset_div2: see H264 specification.
+ * @num_ref_idx_l0_active_minus1: see H264 specification.
+ * @num_ref_idx_l1_active_minus1: see H264 specification.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @ref_pic_list0: reference picture list 0 after applying the per-slice modifications.
+ * @ref_pic_list1: reference picture list 1 after applying the per-slice modifications.
+ * @flags: see V4L2_H264_SLICE_FLAG_{}.
+ */
+struct v4l2_ctrl_h264_slice_params {
+ __u32 header_bit_size;
+ __u32 first_mb_in_slice;
+ __u8 slice_type;
+ __u8 colour_plane_id;
+ __u8 redundant_pic_cnt;
+ __u8 cabac_init_idc;
+ __s8 slice_qp_delta;
+ __s8 slice_qs_delta;
+ __u8 disable_deblocking_filter_idc;
+ __s8 slice_alpha_c0_offset_div2;
+ __s8 slice_beta_offset_div2;
+ __u8 num_ref_idx_l0_active_minus1;
+ __u8 num_ref_idx_l1_active_minus1;
+
+ __u8 reserved;
+
+ struct v4l2_h264_reference ref_pic_list0[V4L2_H264_REF_LIST_LEN];
+ struct v4l2_h264_reference ref_pic_list1[V4L2_H264_REF_LIST_LEN];
+
+ __u32 flags;
+};
+
+#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01
+#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02
+#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04
+#define V4L2_H264_DPB_ENTRY_FLAG_FIELD 0x08
+
+/**
+ * struct v4l2_h264_dpb_entry - H264 decoded picture buffer entry
+ *
+ * @reference_ts: timestamp of the V4L2 capture buffer to use as reference.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @pic_num: matches PicNum variable assigned during the reference
+ * picture lists construction process.
+ * @frame_num: frame identifier which matches frame_num syntax element.
+ * @fields: indicates how the DPB entry is referenced. Valid values are
+ * V4L2_H264_{}_REF.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @top_field_order_cnt: matches TopFieldOrderCnt picture value.
+ * @bottom_field_order_cnt: matches BottomFieldOrderCnt picture value.
+ * Note that picture field is indicated by v4l2_buffer.field.
+ * @flags: see V4L2_H264_DPB_ENTRY_FLAG_{}.
+ */
+struct v4l2_h264_dpb_entry {
+ __u64 reference_ts;
+ __u32 pic_num;
+ __u16 frame_num;
+ __u8 fields;
+ __u8 reserved[5];
+ __s32 top_field_order_cnt;
+ __s32 bottom_field_order_cnt;
+ __u32 flags;
+};
+
+#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01
+#define V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC 0x02
+#define V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD 0x04
+#define V4L2_H264_DECODE_PARAM_FLAG_PFRAME 0x08
+#define V4L2_H264_DECODE_PARAM_FLAG_BFRAME 0x10
+
+#define V4L2_CID_STATELESS_H264_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 7)
+/**
+ * struct v4l2_ctrl_h264_decode_params - H264 decoding parameters
+ *
+ * @dpb: decoded picture buffer.
+ * @nal_ref_idc: slice header syntax element.
+ * @frame_num: slice header syntax element.
+ * @top_field_order_cnt: matches TopFieldOrderCnt picture value.
+ * @bottom_field_order_cnt: matches BottomFieldOrderCnt picture value.
+ * Note that picture field is indicated by v4l2_buffer.field.
+ * @idr_pic_id: slice header syntax element.
+ * @pic_order_cnt_lsb: slice header syntax element.
+ * @delta_pic_order_cnt_bottom: slice header syntax element.
+ * @delta_pic_order_cnt0: slice header syntax element.
+ * @delta_pic_order_cnt1: slice header syntax element.
+ * @dec_ref_pic_marking_bit_size: size in bits of dec_ref_pic_marking()
+ * syntax element.
+ * @pic_order_cnt_bit_size: size in bits of pic order count syntax.
+ * @slice_group_change_cycle: slice header syntax element.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_H264_DECODE_PARAM_FLAG_{}.
+ */
+struct v4l2_ctrl_h264_decode_params {
+ struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES];
+ __u16 nal_ref_idc;
+ __u16 frame_num;
+ __s32 top_field_order_cnt;
+ __s32 bottom_field_order_cnt;
+ __u16 idr_pic_id;
+ __u16 pic_order_cnt_lsb;
+ __s32 delta_pic_order_cnt_bottom;
+ __s32 delta_pic_order_cnt0;
+ __s32 delta_pic_order_cnt1;
+ __u32 dec_ref_pic_marking_bit_size;
+ __u32 pic_order_cnt_bit_size;
+ __u32 slice_group_change_cycle;
+
+ __u32 reserved;
+ __u32 flags;
+};
+
+
+/* Stateless FWHT control, used by the vicodec driver */
+
+/* Current FWHT version */
+#define V4L2_FWHT_VERSION 3
+
+/* Set if this is an interlaced format */
+#define V4L2_FWHT_FL_IS_INTERLACED _BITUL(0)
+/* Set if this is a bottom-first (NTSC) interlaced format */
+#define V4L2_FWHT_FL_IS_BOTTOM_FIRST _BITUL(1)
+/* Set if each 'frame' contains just one field */
+#define V4L2_FWHT_FL_IS_ALTERNATE _BITUL(2)
+/*
+ * If V4L2_FWHT_FL_IS_ALTERNATE was set, then this is set if this
+ * 'frame' is the bottom field, else it is the top field.
+ */
+#define V4L2_FWHT_FL_IS_BOTTOM_FIELD _BITUL(3)
+/* Set if the Y' plane is uncompressed */
+#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED _BITUL(4)
+/* Set if the Cb plane is uncompressed */
+#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED _BITUL(5)
+/* Set if the Cr plane is uncompressed */
+#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED _BITUL(6)
+/* Set if the chroma plane is full height, if cleared it is half height */
+#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT _BITUL(7)
+/* Set if the chroma plane is full width, if cleared it is half width */
+#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH _BITUL(8)
+/* Set if the alpha plane is uncompressed */
+#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED _BITUL(9)
+/* Set if this is an I Frame */
+#define V4L2_FWHT_FL_I_FRAME _BITUL(10)
+
+/* A 4-values flag - the number of components - 1 */
+#define V4L2_FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16)
+#define V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET 16
+
+/* A 4-values flag - the pixel encoding type */
+#define V4L2_FWHT_FL_PIXENC_MSK GENMASK(20, 19)
+#define V4L2_FWHT_FL_PIXENC_OFFSET 19
+#define V4L2_FWHT_FL_PIXENC_YUV (1 << V4L2_FWHT_FL_PIXENC_OFFSET)
+#define V4L2_FWHT_FL_PIXENC_RGB (2 << V4L2_FWHT_FL_PIXENC_OFFSET)
+#define V4L2_FWHT_FL_PIXENC_HSV (3 << V4L2_FWHT_FL_PIXENC_OFFSET)
+
+#define V4L2_CID_STATELESS_FWHT_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 100)
+/**
+ * struct v4l2_ctrl_fwht_params - FWHT parameters
+ *
+ * @backward_ref_ts: timestamp of the V4L2 capture buffer to use as reference.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @version: must be V4L2_FWHT_VERSION.
+ * @width: width of frame.
+ * @height: height of frame.
+ * @flags: FWHT flags (see V4L2_FWHT_FL_*).
+ * @colorspace: the colorspace (enum v4l2_colorspace).
+ * @xfer_func: the transfer function (enum v4l2_xfer_func).
+ * @ycbcr_enc: the Y'CbCr encoding (enum v4l2_ycbcr_encoding).
+ * @quantization: the quantization (enum v4l2_quantization).
+ */
+struct v4l2_ctrl_fwht_params {
+ __u64 backward_ref_ts;
+ __u32 version;
+ __u32 width;
+ __u32 height;
+ __u32 flags;
+ __u32 colorspace;
+ __u32 xfer_func;
+ __u32 ycbcr_enc;
+ __u32 quantization;
+};
+
+/* Stateless VP8 control */
+
+#define V4L2_VP8_SEGMENT_FLAG_ENABLED 0x01
+#define V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP 0x02
+#define V4L2_VP8_SEGMENT_FLAG_UPDATE_FEATURE_DATA 0x04
+#define V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE 0x08
+
+/**
+ * struct v4l2_vp8_segment - VP8 segment-based adjustments parameters
+ *
+ * @quant_update: update values for the segment quantizer.
+ * @lf_update: update values for the loop filter level.
+ * @segment_probs: branch probabilities of the segment_id decoding tree.
+ * @padding: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_VP8_SEGMENT_FLAG_{}.
+ *
+ * This structure contains segment-based adjustments related parameters.
+ * See the 'update_segmentation()' part of the frame header syntax,
+ * and section '9.3. Segment-Based Adjustments' of the VP8 specification
+ * for more details.
+ */
+struct v4l2_vp8_segment {
+ __s8 quant_update[4];
+ __s8 lf_update[4];
+ __u8 segment_probs[3];
+ __u8 padding;
+ __u32 flags;
+};
+
+#define V4L2_VP8_LF_ADJ_ENABLE 0x01
+#define V4L2_VP8_LF_DELTA_UPDATE 0x02
+#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04
+
+/**
+ * struct v4l2_vp8_loop_filter - VP8 loop filter parameters
+ *
+ * @ref_frm_delta: Reference frame signed delta values.
+ * @mb_mode_delta: MB prediction mode signed delta values.
+ * @sharpness_level: matches sharpness_level syntax element.
+ * @level: matches loop_filter_level syntax element.
+ * @padding: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_VP8_LF_{}.
+ *
+ * This structure contains loop filter related parameters.
+ * See the 'mb_lf_adjustments()' part of the frame header syntax,
+ * and section '9.4. Loop Filter Type and Levels' of the VP8 specification
+ * for more details.
+ */
+struct v4l2_vp8_loop_filter {
+ __s8 ref_frm_delta[4];
+ __s8 mb_mode_delta[4];
+ __u8 sharpness_level;
+ __u8 level;
+ __u16 padding;
+ __u32 flags;
+};
+
+/**
+ * struct v4l2_vp8_quantization - VP8 quantizattion indices
+ *
+ * @y_ac_qi: luma AC coefficient table index.
+ * @y_dc_delta: luma DC delta vaue.
+ * @y2_dc_delta: y2 block DC delta value.
+ * @y2_ac_delta: y2 block AC delta value.
+ * @uv_dc_delta: chroma DC delta value.
+ * @uv_ac_delta: chroma AC delta value.
+ * @padding: padding field. Should be zeroed by applications.
+ *
+ * This structure contains the quantization indices present
+ * in 'quant_indices()' part of the frame header syntax.
+ * See section '9.6. Dequantization Indices' of the VP8 specification
+ * for more details.
+ */
+struct v4l2_vp8_quantization {
+ __u8 y_ac_qi;
+ __s8 y_dc_delta;
+ __s8 y2_dc_delta;
+ __s8 y2_ac_delta;
+ __s8 uv_dc_delta;
+ __s8 uv_ac_delta;
+ __u16 padding;
+};
+
+#define V4L2_VP8_COEFF_PROB_CNT 11
+#define V4L2_VP8_MV_PROB_CNT 19
+
+/**
+ * struct v4l2_vp8_entropy - VP8 update probabilities
+ *
+ * @coeff_probs: coefficient probability update values.
+ * @y_mode_probs: luma intra-prediction probabilities.
+ * @uv_mode_probs: chroma intra-prediction probabilities.
+ * @mv_probs: mv decoding probability.
+ * @padding: padding field. Should be zeroed by applications.
+ *
+ * This structure contains the update probabilities present in
+ * 'token_prob_update()' and 'mv_prob_update()' part of the frame header.
+ * See section '17.2. Probability Updates' of the VP8 specification
+ * for more details.
+ */
+struct v4l2_vp8_entropy {
+ __u8 coeff_probs[4][8][3][V4L2_VP8_COEFF_PROB_CNT];
+ __u8 y_mode_probs[4];
+ __u8 uv_mode_probs[3];
+ __u8 mv_probs[2][V4L2_VP8_MV_PROB_CNT];
+ __u8 padding[3];
+};
+
+/**
+ * struct v4l2_vp8_entropy_coder_state - VP8 boolean coder state
+ *
+ * @range: coder state value for "Range"
+ * @value: coder state value for "Value"
+ * @bit_count: number of bits left in range "Value".
+ * @padding: padding field. Should be zeroed by applications.
+ *
+ * This structure contains the state for the boolean coder, as
+ * explained in section '7. Boolean Entropy Decoder' of the VP8 specification.
+ */
+struct v4l2_vp8_entropy_coder_state {
+ __u8 range;
+ __u8 value;
+ __u8 bit_count;
+ __u8 padding;
+};
+
+#define V4L2_VP8_FRAME_FLAG_KEY_FRAME 0x01
+#define V4L2_VP8_FRAME_FLAG_EXPERIMENTAL 0x02
+#define V4L2_VP8_FRAME_FLAG_SHOW_FRAME 0x04
+#define V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF 0x08
+#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN 0x10
+#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT 0x20
+
+#define V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) \
+ (!!((hdr)->flags & V4L2_VP8_FRAME_FLAG_KEY_FRAME))
+
+#define V4L2_CID_STATELESS_VP8_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 200)
+/**
+ * struct v4l2_ctrl_vp8_frame - VP8 frame parameters
+ *
+ * @segment: segmentation parameters. See &v4l2_vp8_segment for more details
+ * @lf: loop filter parameters. See &v4l2_vp8_loop_filter for more details
+ * @quant: quantization parameters. See &v4l2_vp8_quantization for more details
+ * @entropy: update probabilities. See &v4l2_vp8_entropy for more details
+ * @coder_state: boolean coder state. See &v4l2_vp8_entropy_coder_state for more details
+ * @width: frame width.
+ * @height: frame height.
+ * @horizontal_scale: horizontal scaling factor.
+ * @vertical_scale: vertical scaling factor.
+ * @version: bitstream version.
+ * @prob_skip_false: frame header syntax element.
+ * @prob_intra: frame header syntax element.
+ * @prob_last: frame header syntax element.
+ * @prob_gf: frame header syntax element.
+ * @num_dct_parts: number of DCT coefficients partitions.
+ * @first_part_size: size of the first partition, i.e. the control partition.
+ * @first_part_header_bits: size in bits of the first partition header portion.
+ * @dct_part_sizes: DCT coefficients sizes.
+ * @last_frame_ts: "last" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @golden_frame_ts: "golden" reference buffer timestamp.
+ * @alt_frame_ts: "alt" reference buffer timestamp.
+ * @flags: see V4L2_VP8_FRAME_FLAG_{}.
+ */
+struct v4l2_ctrl_vp8_frame {
+ struct v4l2_vp8_segment segment;
+ struct v4l2_vp8_loop_filter lf;
+ struct v4l2_vp8_quantization quant;
+ struct v4l2_vp8_entropy entropy;
+ struct v4l2_vp8_entropy_coder_state coder_state;
+
+ __u16 width;
+ __u16 height;
+
+ __u8 horizontal_scale;
+ __u8 vertical_scale;
+
+ __u8 version;
+ __u8 prob_skip_false;
+ __u8 prob_intra;
+ __u8 prob_last;
+ __u8 prob_gf;
+ __u8 num_dct_parts;
+
+ __u32 first_part_size;
+ __u32 first_part_header_bits;
+ __u32 dct_part_sizes[8];
+
+ __u64 last_frame_ts;
+ __u64 golden_frame_ts;
+ __u64 alt_frame_ts;
+
+ __u64 flags;
+};
+
+/* Stateless MPEG-2 controls */
+
+#define V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE 0x01
+
+#define V4L2_CID_STATELESS_MPEG2_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE+220)
+/**
+ * struct v4l2_ctrl_mpeg2_sequence - MPEG-2 sequence header
+ *
+ * All the members on this structure match the sequence header and sequence
+ * extension syntaxes as specified by the MPEG-2 specification.
+ *
+ * Fields horizontal_size, vertical_size and vbv_buffer_size are a
+ * combination of respective _value and extension syntax elements,
+ * as described in section 6.3.3 "Sequence header".
+ *
+ * @horizontal_size: combination of elements horizontal_size_value and
+ * horizontal_size_extension.
+ * @vertical_size: combination of elements vertical_size_value and
+ * vertical_size_extension.
+ * @vbv_buffer_size: combination of elements vbv_buffer_size_value and
+ * vbv_buffer_size_extension.
+ * @profile_and_level_indication: see MPEG-2 specification.
+ * @chroma_format: see MPEG-2 specification.
+ * @flags: see V4L2_MPEG2_SEQ_FLAG_{}.
+ */
+struct v4l2_ctrl_mpeg2_sequence {
+ __u16 horizontal_size;
+ __u16 vertical_size;
+ __u32 vbv_buffer_size;
+ __u16 profile_and_level_indication;
+ __u8 chroma_format;
+ __u8 flags;
+};
+
+#define V4L2_MPEG2_PIC_CODING_TYPE_I 1
+#define V4L2_MPEG2_PIC_CODING_TYPE_P 2
+#define V4L2_MPEG2_PIC_CODING_TYPE_B 3
+#define V4L2_MPEG2_PIC_CODING_TYPE_D 4
+
+#define V4L2_MPEG2_PIC_TOP_FIELD 0x1
+#define V4L2_MPEG2_PIC_BOTTOM_FIELD 0x2
+#define V4L2_MPEG2_PIC_FRAME 0x3
+
+#define V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST 0x0001
+#define V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT 0x0002
+#define V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV 0x0004
+#define V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE 0x0008
+#define V4L2_MPEG2_PIC_FLAG_INTRA_VLC 0x0010
+#define V4L2_MPEG2_PIC_FLAG_ALT_SCAN 0x0020
+#define V4L2_MPEG2_PIC_FLAG_REPEAT_FIRST 0x0040
+#define V4L2_MPEG2_PIC_FLAG_PROGRESSIVE 0x0080
+
+#define V4L2_CID_STATELESS_MPEG2_PICTURE (V4L2_CID_CODEC_STATELESS_BASE+221)
+/**
+ * struct v4l2_ctrl_mpeg2_picture - MPEG-2 picture header
+ *
+ * All the members on this structure match the picture header and picture
+ * coding extension syntaxes as specified by the MPEG-2 specification.
+ *
+ * @backward_ref_ts: timestamp of the V4L2 capture buffer to use as
+ * reference for backward prediction.
+ * @forward_ref_ts: timestamp of the V4L2 capture buffer to use as
+ * reference for forward prediction. These timestamp refers to the
+ * timestamp field in struct v4l2_buffer. Use v4l2_timeval_to_ns()
+ * to convert the struct timeval to a __u64.
+ * @flags: see V4L2_MPEG2_PIC_FLAG_{}.
+ * @f_code: see MPEG-2 specification.
+ * @picture_coding_type: see MPEG-2 specification.
+ * @picture_structure: see V4L2_MPEG2_PIC_{}_FIELD.
+ * @intra_dc_precision: see MPEG-2 specification.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_mpeg2_picture {
+ __u64 backward_ref_ts;
+ __u64 forward_ref_ts;
+ __u32 flags;
+ __u8 f_code[2][2];
+ __u8 picture_coding_type;
+ __u8 picture_structure;
+ __u8 intra_dc_precision;
+ __u8 reserved[5];
+};
+
+#define V4L2_CID_STATELESS_MPEG2_QUANTISATION (V4L2_CID_CODEC_STATELESS_BASE+222)
+/**
+ * struct v4l2_ctrl_mpeg2_quantisation - MPEG-2 quantisation
+ *
+ * Quantisation matrices as specified by section 6.3.7
+ * "Quant matrix extension".
+ *
+ * @intra_quantiser_matrix: The quantisation matrix coefficients
+ * for intra-coded frames, in zigzag scanning order. It is relevant
+ * for both luma and chroma components, although it can be superseded
+ * by the chroma-specific matrix for non-4:2:0 YUV formats.
+ * @non_intra_quantiser_matrix: The quantisation matrix coefficients
+ * for non-intra-coded frames, in zigzag scanning order. It is relevant
+ * for both luma and chroma components, although it can be superseded
+ * by the chroma-specific matrix for non-4:2:0 YUV formats.
+ * @chroma_intra_quantiser_matrix: The quantisation matrix coefficients
+ * for the chominance component of intra-coded frames, in zigzag scanning
+ * order. Only relevant for 4:2:2 and 4:4:4 YUV formats.
+ * @chroma_non_intra_quantiser_matrix: The quantisation matrix coefficients
+ * for the chrominance component of non-intra-coded frames, in zigzag scanning
+ * order. Only relevant for 4:2:2 and 4:4:4 YUV formats.
+ */
+struct v4l2_ctrl_mpeg2_quantisation {
+ __u8 intra_quantiser_matrix[64];
+ __u8 non_intra_quantiser_matrix[64];
+ __u8 chroma_intra_quantiser_matrix[64];
+ __u8 chroma_non_intra_quantiser_matrix[64];
+};
+
+#define V4L2_CID_STATELESS_HEVC_SPS (V4L2_CID_CODEC_STATELESS_BASE + 400)
+#define V4L2_CID_STATELESS_HEVC_PPS (V4L2_CID_CODEC_STATELESS_BASE + 401)
+#define V4L2_CID_STATELESS_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 402)
+#define V4L2_CID_STATELESS_HEVC_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 403)
+#define V4L2_CID_STATELESS_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 404)
+#define V4L2_CID_STATELESS_HEVC_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 405)
+#define V4L2_CID_STATELESS_HEVC_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 406)
+#define V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS (V4L2_CID_CODEC_STATELESS_BASE + 407)
+
+enum v4l2_stateless_hevc_decode_mode {
+ V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+};
+
+enum v4l2_stateless_hevc_start_code {
+ V4L2_STATELESS_HEVC_START_CODE_NONE,
+ V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+};
+
+#define V4L2_HEVC_SLICE_TYPE_B 0
+#define V4L2_HEVC_SLICE_TYPE_P 1
+#define V4L2_HEVC_SLICE_TYPE_I 2
+
+#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0)
+#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1)
+#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3)
+#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4)
+#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5)
+#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6)
+#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7)
+#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8)
+
+/**
+ * struct v4l2_ctrl_hevc_sps - ITU-T Rec. H.265: Sequence parameter set
+ *
+ * @video_parameter_set_id: specifies the value of the
+ * vps_video_parameter_set_id of the active VPS
+ * @seq_parameter_set_id: provides an identifier for the SPS for
+ * reference by other syntax elements
+ * @pic_width_in_luma_samples: specifies the width of each decoded picture
+ * in units of luma samples
+ * @pic_height_in_luma_samples: specifies the height of each decoded picture
+ * in units of luma samples
+ * @bit_depth_luma_minus8: this value plus 8specifies the bit depth of the
+ * samples of the luma array
+ * @bit_depth_chroma_minus8: this value plus 8 specifies the bit depth of the
+ * samples of the chroma arrays
+ * @log2_max_pic_order_cnt_lsb_minus4: this value plus 4 specifies the value of
+ * the variable MaxPicOrderCntLsb
+ * @sps_max_dec_pic_buffering_minus1: this value plus 1 specifies the maximum
+ * required size of the decoded picture
+ * buffer for the codec video sequence
+ * @sps_max_num_reorder_pics: indicates the maximum allowed number of pictures
+ * @sps_max_latency_increase_plus1: not equal to 0 is used to compute the
+ * value of SpsMaxLatencyPictures array
+ * @log2_min_luma_coding_block_size_minus3: plus 3 specifies the minimum
+ * luma coding block size
+ * @log2_diff_max_min_luma_coding_block_size: specifies the difference between
+ * the maximum and minimum luma
+ * coding block size
+ * @log2_min_luma_transform_block_size_minus2: plus 2 specifies the minimum luma
+ * transform block size
+ * @log2_diff_max_min_luma_transform_block_size: specifies the difference between
+ * the maximum and minimum luma
+ * transform block size
+ * @max_transform_hierarchy_depth_inter: specifies the maximum hierarchy
+ * depth for transform units of
+ * coding units coded in inter
+ * prediction mode
+ * @max_transform_hierarchy_depth_intra: specifies the maximum hierarchy
+ * depth for transform units of
+ * coding units coded in intra
+ * prediction mode
+ * @pcm_sample_bit_depth_luma_minus1: this value plus 1 specifies the number of
+ * bits used to represent each of PCM sample
+ * values of the luma component
+ * @pcm_sample_bit_depth_chroma_minus1: this value plus 1 specifies the number
+ * of bits used to represent each of PCM
+ * sample values of the chroma components
+ * @log2_min_pcm_luma_coding_block_size_minus3: this value plus 3 specifies the
+ * minimum size of coding blocks
+ * @log2_diff_max_min_pcm_luma_coding_block_size: specifies the difference between
+ * the maximum and minimum size of
+ * coding blocks
+ * @num_short_term_ref_pic_sets: specifies the number of st_ref_pic_set()
+ * syntax structures included in the SPS
+ * @num_long_term_ref_pics_sps: specifies the number of candidate long-term
+ * reference pictures that are specified in the SPS
+ * @chroma_format_idc: specifies the chroma sampling
+ * @sps_max_sub_layers_minus1: this value plus 1 specifies the maximum number
+ * of temporal sub-layers
+ * @reserved: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_HEVC_SPS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_sps {
+ __u8 video_parameter_set_id;
+ __u8 seq_parameter_set_id;
+ __u16 pic_width_in_luma_samples;
+ __u16 pic_height_in_luma_samples;
+ __u8 bit_depth_luma_minus8;
+ __u8 bit_depth_chroma_minus8;
+ __u8 log2_max_pic_order_cnt_lsb_minus4;
+ __u8 sps_max_dec_pic_buffering_minus1;
+ __u8 sps_max_num_reorder_pics;
+ __u8 sps_max_latency_increase_plus1;
+ __u8 log2_min_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_luma_coding_block_size;
+ __u8 log2_min_luma_transform_block_size_minus2;
+ __u8 log2_diff_max_min_luma_transform_block_size;
+ __u8 max_transform_hierarchy_depth_inter;
+ __u8 max_transform_hierarchy_depth_intra;
+ __u8 pcm_sample_bit_depth_luma_minus1;
+ __u8 pcm_sample_bit_depth_chroma_minus1;
+ __u8 log2_min_pcm_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_pcm_luma_coding_block_size;
+ __u8 num_short_term_ref_pic_sets;
+ __u8 num_long_term_ref_pics_sps;
+ __u8 chroma_format_idc;
+ __u8 sps_max_sub_layers_minus1;
+
+ __u8 reserved[6];
+ __u64 flags;
+};
+
+#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0)
+#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
+#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
+#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
+#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4)
+#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5)
+#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6)
+#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9)
+#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10)
+#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11)
+#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12)
+#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13)
+#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15)
+#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
+#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
+#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19)
+#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20)
+
+/**
+ * struct v4l2_ctrl_hevc_pps - ITU-T Rec. H.265: Picture parameter set
+ *
+ * @pic_parameter_set_id: identifies the PPS for reference by other
+ * syntax elements
+ * @num_extra_slice_header_bits: specifies the number of extra slice header
+ * bits that are present in the slice header RBSP
+ * for coded pictures referring to the PPS.
+ * @num_ref_idx_l0_default_active_minus1: this value plus 1 specifies the
+ * inferred value of num_ref_idx_l0_active_minus1
+ * @num_ref_idx_l1_default_active_minus1: this value plus 1 specifies the
+ * inferred value of num_ref_idx_l1_active_minus1
+ * @init_qp_minus26: this value plus 26 specifies the initial value of SliceQp Y for
+ * each slice referring to the PPS
+ * @diff_cu_qp_delta_depth: specifies the difference between the luma coding
+ * tree block size and the minimum luma coding block
+ * size of coding units that convey cu_qp_delta_abs
+ * and cu_qp_delta_sign_flag
+ * @pps_cb_qp_offset: specify the offsets to the luma quantization parameter Cb
+ * @pps_cr_qp_offset: specify the offsets to the luma quantization parameter Cr
+ * @num_tile_columns_minus1: this value plus 1 specifies the number of tile columns
+ * partitioning the picture
+ * @num_tile_rows_minus1: this value plus 1 specifies the number of tile rows partitioning
+ * the picture
+ * @column_width_minus1: this value plus 1 specifies the width of the each tile column in
+ * units of coding tree blocks
+ * @row_height_minus1: this value plus 1 specifies the height of the each tile row in
+ * units of coding tree blocks
+ * @pps_beta_offset_div2: specify the default deblocking parameter offsets for
+ * beta divided by 2
+ * @pps_tc_offset_div2: specify the default deblocking parameter offsets for tC
+ * divided by 2
+ * @log2_parallel_merge_level_minus2: this value plus 2 specifies the value of
+ * the variable Log2ParMrgLevel
+ * @reserved: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_HEVC_PPS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_pps {
+ __u8 pic_parameter_set_id;
+ __u8 num_extra_slice_header_bits;
+ __u8 num_ref_idx_l0_default_active_minus1;
+ __u8 num_ref_idx_l1_default_active_minus1;
+ __s8 init_qp_minus26;
+ __u8 diff_cu_qp_delta_depth;
+ __s8 pps_cb_qp_offset;
+ __s8 pps_cr_qp_offset;
+ __u8 num_tile_columns_minus1;
+ __u8 num_tile_rows_minus1;
+ __u8 column_width_minus1[20];
+ __u8 row_height_minus1[22];
+ __s8 pps_beta_offset_div2;
+ __s8 pps_tc_offset_div2;
+ __u8 log2_parallel_merge_level_minus2;
+ __u8 reserved;
+ __u64 flags;
+};
+
+#define V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE 0x01
+
+#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME 0
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_FIELD 1
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_FIELD 2
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM 3
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP 4
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM_TOP 5
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM 6
+#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING 7
+#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING 8
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_PREVIOUS_BOTTOM 9
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_PREVIOUS_TOP 10
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_NEXT_BOTTOM 11
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_NEXT_TOP 12
+
+#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16
+
+/**
+ * struct v4l2_hevc_dpb_entry - HEVC decoded picture buffer entry
+ *
+ * @timestamp: timestamp of the V4L2 capture buffer to use as reference.
+ * @flags: long term flag for the reference frame
+ * @field_pic: whether the reference is a field picture or a frame.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @pic_order_cnt_val: the picture order count of the current picture.
+ */
+struct v4l2_hevc_dpb_entry {
+ __u64 timestamp;
+ __u8 flags;
+ __u8 field_pic;
+ __u16 reserved;
+ __s32 pic_order_cnt_val;
+};
+
+/**
+ * struct v4l2_hevc_pred_weight_table - HEVC weighted prediction parameters
+ *
+ * @delta_luma_weight_l0: the difference of the weighting factor applied
+ * to the luma prediction value for list 0
+ * @luma_offset_l0: the additive offset applied to the luma prediction value
+ * for list 0
+ * @delta_chroma_weight_l0: the difference of the weighting factor applied
+ * to the chroma prediction values for list 0
+ * @chroma_offset_l0: the difference of the additive offset applied to
+ * the chroma prediction values for list 0
+ * @delta_luma_weight_l1: the difference of the weighting factor applied
+ * to the luma prediction value for list 1
+ * @luma_offset_l1: the additive offset applied to the luma prediction value
+ * for list 1
+ * @delta_chroma_weight_l1: the difference of the weighting factor applied
+ * to the chroma prediction values for list 1
+ * @chroma_offset_l1: the difference of the additive offset applied to
+ * the chroma prediction values for list 1
+ * @luma_log2_weight_denom: the base 2 logarithm of the denominator for
+ * all luma weighting factors
+ * @delta_chroma_log2_weight_denom: the difference of the base 2 logarithm
+ * of the denominator for all chroma
+ * weighting factors
+ */
+struct v4l2_hevc_pred_weight_table {
+ __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __u8 luma_log2_weight_denom;
+ __s8 delta_chroma_log2_weight_denom;
+};
+
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9)
+
+/**
+ * struct v4l2_ctrl_hevc_slice_params - HEVC slice parameters
+ *
+ * This control is a dynamically sized 1-dimensional array,
+ * V4L2_CTRL_FLAG_DYNAMIC_ARRAY flag must be set when using it.
+ *
+ * @bit_size: size (in bits) of the current slice data
+ * @data_byte_offset: offset (in bytes) to the video data in the current slice data
+ * @num_entry_point_offsets: specifies the number of entry point offset syntax
+ * elements in the slice header.
+ * @nal_unit_type: specifies the coding type of the slice (B, P or I)
+ * @nuh_temporal_id_plus1: minus 1 specifies a temporal identifier for the NAL unit
+ * @slice_type: see V4L2_HEVC_SLICE_TYPE_{}
+ * @colour_plane_id: specifies the colour plane associated with the current slice
+ * @slice_pic_order_cnt: specifies the picture order count
+ * @num_ref_idx_l0_active_minus1: this value plus 1 specifies the maximum
+ * reference index for reference picture list 0
+ * that may be used to decode the slice
+ * @num_ref_idx_l1_active_minus1: this value plus 1 specifies the maximum
+ * reference index for reference picture list 1
+ * that may be used to decode the slice
+ * @collocated_ref_idx: specifies the reference index of the collocated picture used
+ * for temporal motion vector prediction
+ * @five_minus_max_num_merge_cand: specifies the maximum number of merging
+ * motion vector prediction candidates supported in
+ * the slice subtracted from 5
+ * @slice_qp_delta: specifies the initial value of QpY to be used for the coding
+ * blocks in the slice
+ * @slice_cb_qp_offset: specifies a difference to be added to the value of pps_cb_qp_offset
+ * @slice_cr_qp_offset: specifies a difference to be added to the value of pps_cr_qp_offset
+ * @slice_act_y_qp_offset: screen content extension parameters
+ * @slice_act_cb_qp_offset: screen content extension parameters
+ * @slice_act_cr_qp_offset: screen content extension parameters
+ * @slice_beta_offset_div2: specify the deblocking parameter offsets for beta divided by 2
+ * @slice_tc_offset_div2: specify the deblocking parameter offsets for tC divided by 2
+ * @pic_struct: indicates whether a picture should be displayed as a frame or as one or
+ * more fields
+ * @reserved0: padding field. Should be zeroed by applications.
+ * @slice_segment_addr: specifies the address of the first coding tree block in
+ * the slice segment
+ * @ref_idx_l0: the list of L0 reference elements as indices in the DPB
+ * @ref_idx_l1: the list of L1 reference elements as indices in the DPB
+ * @short_term_ref_pic_set_size: specifies the size of short-term reference
+ * pictures set included in the SPS
+ * @long_term_ref_pic_set_size: specifies the size of long-term reference
+ * pictures set include in the SPS
+ * @pred_weight_table: the prediction weight coefficients for inter-picture
+ * prediction
+ * @reserved1: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_HEVC_SLICE_PARAMS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_slice_params {
+ __u32 bit_size;
+ __u32 data_byte_offset;
+ __u32 num_entry_point_offsets;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
+ __u8 nal_unit_type;
+ __u8 nuh_temporal_id_plus1;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u8 slice_type;
+ __u8 colour_plane_id;
+ __s32 slice_pic_order_cnt;
+ __u8 num_ref_idx_l0_active_minus1;
+ __u8 num_ref_idx_l1_active_minus1;
+ __u8 collocated_ref_idx;
+ __u8 five_minus_max_num_merge_cand;
+ __s8 slice_qp_delta;
+ __s8 slice_cb_qp_offset;
+ __s8 slice_cr_qp_offset;
+ __s8 slice_act_y_qp_offset;
+ __s8 slice_act_cb_qp_offset;
+ __s8 slice_act_cr_qp_offset;
+ __s8 slice_beta_offset_div2;
+ __s8 slice_tc_offset_div2;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
+ __u8 pic_struct;
+
+ __u8 reserved0[3];
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u32 slice_segment_addr;
+ __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u16 short_term_ref_pic_set_size;
+ __u16 long_term_ref_pic_set_size;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
+ struct v4l2_hevc_pred_weight_table pred_weight_table;
+
+ __u8 reserved1[2];
+ __u64 flags;
+};
+
+#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1
+#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2
+#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4
+
+/**
+ * struct v4l2_ctrl_hevc_decode_params - HEVC decode parameters
+ *
+ * @pic_order_cnt_val: picture order count
+ * @short_term_ref_pic_set_size: specifies the size of short-term reference
+ * pictures set included in the SPS of the first slice
+ * @long_term_ref_pic_set_size: specifies the size of long-term reference
+ * pictures set include in the SPS of the first slice
+ * @num_active_dpb_entries: the number of entries in dpb
+ * @num_poc_st_curr_before: the number of reference pictures in the short-term
+ * set that come before the current frame
+ * @num_poc_st_curr_after: the number of reference pictures in the short-term
+ * set that come after the current frame
+ * @num_poc_lt_curr: the number of reference pictures in the long-term set
+ * @poc_st_curr_before: provides the index of the short term before references
+ * in DPB array
+ * @poc_st_curr_after: provides the index of the short term after references
+ * in DPB array
+ * @poc_lt_curr: provides the index of the long term references in DPB array
+ * @num_delta_pocs_of_ref_rps_idx: same as the derived value NumDeltaPocs[RefRpsIdx],
+ * can be used to parse the RPS data in slice headers
+ * instead of skipping it with @short_term_ref_pic_set_size.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @dpb: the decoded picture buffer, for meta-data about reference frames
+ * @flags: see V4L2_HEVC_DECODE_PARAM_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_decode_params {
+ __s32 pic_order_cnt_val;
+ __u16 short_term_ref_pic_set_size;
+ __u16 long_term_ref_pic_set_size;
+ __u8 num_active_dpb_entries;
+ __u8 num_poc_st_curr_before;
+ __u8 num_poc_st_curr_after;
+ __u8 num_poc_lt_curr;
+ __u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 num_delta_pocs_of_ref_rps_idx;
+ __u8 reserved[3];
+ struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u64 flags;
+};
+
+/**
+ * struct v4l2_ctrl_hevc_scaling_matrix - HEVC scaling lists parameters
+ *
+ * @scaling_list_4x4: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_8x8: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_16x16: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_32x32: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_dc_coef_16x16: scaling list is used for the scaling process
+ * for transform coefficients. The values on each
+ * scaling list are expected in raster scan order.
+ * @scaling_list_dc_coef_32x32: scaling list is used for the scaling process
+ * for transform coefficients. The values on each
+ * scaling list are expected in raster scan order.
+ */
+struct v4l2_ctrl_hevc_scaling_matrix {
+ __u8 scaling_list_4x4[6][16];
+ __u8 scaling_list_8x8[6][64];
+ __u8 scaling_list_16x16[6][64];
+ __u8 scaling_list_32x32[2][64];
+ __u8 scaling_list_dc_coef_16x16[6];
+ __u8 scaling_list_dc_coef_32x32[2];
+};
+
+#define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900)
+#define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1)
+
+#define V4L2_CID_COLORIMETRY_HDR10_CLL_INFO (V4L2_CID_COLORIMETRY_CLASS_BASE + 0)
+
+struct v4l2_ctrl_hdr10_cll_info {
+ __u16 max_content_light_level;
+ __u16 max_pic_average_light_level;
+};
+
+#define V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY (V4L2_CID_COLORIMETRY_CLASS_BASE + 1)
+
+#define V4L2_HDR10_MASTERING_PRIMARIES_X_LOW 5
+#define V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH 37000
+#define V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW 5
+#define V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH 42000
+#define V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW 5
+#define V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH 37000
+#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW 5
+#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH 42000
+#define V4L2_HDR10_MASTERING_MAX_LUMA_LOW 50000
+#define V4L2_HDR10_MASTERING_MAX_LUMA_HIGH 100000000
+#define V4L2_HDR10_MASTERING_MIN_LUMA_LOW 1
+#define V4L2_HDR10_MASTERING_MIN_LUMA_HIGH 50000
+
+struct v4l2_ctrl_hdr10_mastering_display {
+ __u16 display_primaries_x[3];
+ __u16 display_primaries_y[3];
+ __u16 white_point_x;
+ __u16 white_point_y;
+ __u32 max_display_mastering_luminance;
+ __u32 min_display_mastering_luminance;
+};
+
+/* Stateless VP9 controls */
+
+#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
+#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
+
+/**
+ * struct v4l2_vp9_loop_filter - VP9 loop filter parameters
+ *
+ * @ref_deltas: contains the adjustment needed for the filter level based on the
+ * chosen reference frame. If this syntax element is not present in the bitstream,
+ * users should pass its last value.
+ * @mode_deltas: contains the adjustment needed for the filter level based on the
+ * chosen mode. If this syntax element is not present in the bitstream, users should
+ * pass its last value.
+ * @level: indicates the loop filter strength.
+ * @sharpness: indicates the sharpness level.
+ * @flags: combination of V4L2_VP9_LOOP_FILTER_FLAG_{} flags.
+ * @reserved: padding field. Should be zeroed by applications.
+ *
+ * This structure contains all loop filter related parameters. See sections
+ * '7.2.8 Loop filter semantics' of the VP9 specification for more details.
+ */
+struct v4l2_vp9_loop_filter {
+ __s8 ref_deltas[4];
+ __s8 mode_deltas[2];
+ __u8 level;
+ __u8 sharpness;
+ __u8 flags;
+ __u8 reserved[7];
+};
+
+/**
+ * struct v4l2_vp9_quantization - VP9 quantization parameters
+ *
+ * @base_q_idx: indicates the base frame qindex.
+ * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx.
+ * @delta_q_uv_dc: indicates the UV DC quantizer relative to base_q_idx.
+ * @delta_q_uv_ac: indicates the UV AC quantizer relative to base_q_idx.
+ * @reserved: padding field. Should be zeroed by applications.
+ *
+ * Encodes the quantization parameters. See section '7.2.9 Quantization params
+ * syntax' of the VP9 specification for more details.
+ */
+struct v4l2_vp9_quantization {
+ __u8 base_q_idx;
+ __s8 delta_q_y_dc;
+ __s8 delta_q_uv_dc;
+ __s8 delta_q_uv_ac;
+ __u8 reserved[4];
+};
+
+#define V4L2_VP9_SEGMENTATION_FLAG_ENABLED 0x01
+#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP 0x02
+#define V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x04
+#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA 0x08
+#define V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE 0x10
+
+#define V4L2_VP9_SEG_LVL_ALT_Q 0
+#define V4L2_VP9_SEG_LVL_ALT_L 1
+#define V4L2_VP9_SEG_LVL_REF_FRAME 2
+#define V4L2_VP9_SEG_LVL_SKIP 3
+#define V4L2_VP9_SEG_LVL_MAX 4
+
+#define V4L2_VP9_SEGMENT_FEATURE_ENABLED(id) (1 << (id))
+#define V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK 0xf
+
+/**
+ * struct v4l2_vp9_segmentation - VP9 segmentation parameters
+ *
+ * @feature_data: data attached to each feature. Data entry is only valid if
+ * the feature is enabled. The array shall be indexed with segment number as
+ * the first dimension (0..7) and one of V4L2_VP9_SEG_{} as the second dimension.
+ * @feature_enabled: bitmask defining which features are enabled in each segment.
+ * The value for each segment is a combination of V4L2_VP9_SEGMENT_FEATURE_ENABLED(id)
+ * values where id is one of V4L2_VP9_SEG_LVL_{}.
+ * @tree_probs: specifies the probability values to be used when decoding a
+ * Segment-ID. See '5.15. Segmentation map' section of the VP9 specification
+ * for more details.
+ * @pred_probs: specifies the probability values to be used when decoding a
+ * Predicted-Segment-ID. See '6.4.14. Get segment id syntax' section of :ref:`vp9`
+ * for more details.
+ * @flags: combination of V4L2_VP9_SEGMENTATION_FLAG_{} flags.
+ * @reserved: padding field. Should be zeroed by applications.
+ *
+ * Encodes the quantization parameters. See section '7.2.10 Segmentation params syntax' of
+ * the VP9 specification for more details.
+ */
+struct v4l2_vp9_segmentation {
+ __s16 feature_data[8][4];
+ __u8 feature_enabled[8];
+ __u8 tree_probs[7];
+ __u8 pred_probs[3];
+ __u8 flags;
+ __u8 reserved[5];
+};
+
+#define V4L2_VP9_FRAME_FLAG_KEY_FRAME 0x001
+#define V4L2_VP9_FRAME_FLAG_SHOW_FRAME 0x002
+#define V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT 0x004
+#define V4L2_VP9_FRAME_FLAG_INTRA_ONLY 0x008
+#define V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV 0x010
+#define V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX 0x020
+#define V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE 0x040
+#define V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING 0x080
+#define V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING 0x100
+#define V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING 0x200
+
+#define V4L2_VP9_SIGN_BIAS_LAST 0x1
+#define V4L2_VP9_SIGN_BIAS_GOLDEN 0x2
+#define V4L2_VP9_SIGN_BIAS_ALT 0x4
+
+#define V4L2_VP9_RESET_FRAME_CTX_NONE 0
+#define V4L2_VP9_RESET_FRAME_CTX_SPEC 1
+#define V4L2_VP9_RESET_FRAME_CTX_ALL 2
+
+#define V4L2_VP9_INTERP_FILTER_EIGHTTAP 0
+#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SMOOTH 1
+#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SHARP 2
+#define V4L2_VP9_INTERP_FILTER_BILINEAR 3
+#define V4L2_VP9_INTERP_FILTER_SWITCHABLE 4
+
+#define V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE 0
+#define V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE 1
+#define V4L2_VP9_REFERENCE_MODE_SELECT 2
+
+#define V4L2_VP9_PROFILE_MAX 3
+
+#define V4L2_CID_STATELESS_VP9_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 300)
+/**
+ * struct v4l2_ctrl_vp9_frame - VP9 frame decoding control
+ *
+ * @lf: loop filter parameters. See &v4l2_vp9_loop_filter for more details.
+ * @quant: quantization parameters. See &v4l2_vp9_quantization for more details.
+ * @seg: segmentation parameters. See &v4l2_vp9_segmentation for more details.
+ * @flags: combination of V4L2_VP9_FRAME_FLAG_{} flags.
+ * @compressed_header_size: compressed header size in bytes.
+ * @uncompressed_header_size: uncompressed header size in bytes.
+ * @frame_width_minus_1: add 1 to it and you'll get the frame width expressed in pixels.
+ * @frame_height_minus_1: add 1 to it and you'll get the frame height expressed in pixels.
+ * @render_width_minus_1: add 1 to it and you'll get the expected render width expressed in
+ * pixels. This is not used during the decoding process but might be used by HW scalers
+ * to prepare a frame that's ready for scanout.
+ * @render_height_minus_1: add 1 to it and you'll get the expected render height expressed in
+ * pixels. This is not used during the decoding process but might be used by HW scalers
+ * to prepare a frame that's ready for scanout.
+ * @last_frame_ts: "last" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @golden_frame_ts: "golden" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @alt_frame_ts: "alt" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @ref_frame_sign_bias: a bitfield specifying whether the sign bias is set for a given
+ * reference frame. Either of V4L2_VP9_SIGN_BIAS_{}.
+ * @reset_frame_context: specifies whether the frame context should be reset to default values.
+ * Either of V4L2_VP9_RESET_FRAME_CTX_{}.
+ * @frame_context_idx: frame context that should be used/updated.
+ * @profile: VP9 profile. Can be 0, 1, 2 or 3.
+ * @bit_depth: bits per components. Can be 8, 10 or 12. Note that not all profiles support
+ * 10 and/or 12 bits depths.
+ * @interpolation_filter: specifies the filter selection used for performing inter prediction.
+ * Set to one of V4L2_VP9_INTERP_FILTER_{}.
+ * @tile_cols_log2: specifies the base 2 logarithm of the width of each tile (where the width
+ * is measured in units of 8x8 blocks). Shall be less than or equal to 6.
+ * @tile_rows_log2: specifies the base 2 logarithm of the height of each tile (where the height
+ * is measured in units of 8x8 blocks).
+ * @reference_mode: specifies the type of inter prediction to be used.
+ * Set to one of V4L2_VP9_REFERENCE_MODE_{}.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_vp9_frame {
+ struct v4l2_vp9_loop_filter lf;
+ struct v4l2_vp9_quantization quant;
+ struct v4l2_vp9_segmentation seg;
+ __u32 flags;
+ __u16 compressed_header_size;
+ __u16 uncompressed_header_size;
+ __u16 frame_width_minus_1;
+ __u16 frame_height_minus_1;
+ __u16 render_width_minus_1;
+ __u16 render_height_minus_1;
+ __u64 last_frame_ts;
+ __u64 golden_frame_ts;
+ __u64 alt_frame_ts;
+ __u8 ref_frame_sign_bias;
+ __u8 reset_frame_context;
+ __u8 frame_context_idx;
+ __u8 profile;
+ __u8 bit_depth;
+ __u8 interpolation_filter;
+ __u8 tile_cols_log2;
+ __u8 tile_rows_log2;
+ __u8 reference_mode;
+ __u8 reserved[7];
+};
+
+#define V4L2_VP9_NUM_FRAME_CTX 4
+
+/**
+ * struct v4l2_vp9_mv_probs - VP9 Motion vector probability updates
+ * @joint: motion vector joint probability updates.
+ * @sign: motion vector sign probability updates.
+ * @classes: motion vector class probability updates.
+ * @class0_bit: motion vector class0 bit probability updates.
+ * @bits: motion vector bits probability updates.
+ * @class0_fr: motion vector class0 fractional bit probability updates.
+ * @fr: motion vector fractional bit probability updates.
+ * @class0_hp: motion vector class0 high precision fractional bit probability updates.
+ * @hp: motion vector high precision fractional bit probability updates.
+ *
+ * This structure contains new values of motion vector probabilities.
+ * A value of zero in an array element means there is no update of the relevant probability.
+ * See `struct v4l2_vp9_prob_updates` for details.
+ */
+struct v4l2_vp9_mv_probs {
+ __u8 joint[3];
+ __u8 sign[2];
+ __u8 classes[2][10];
+ __u8 class0_bit[2];
+ __u8 bits[2][10];
+ __u8 class0_fr[2][2][3];
+ __u8 fr[2][3];
+ __u8 class0_hp[2];
+ __u8 hp[2];
+};
+
+#define V4L2_CID_STATELESS_VP9_COMPRESSED_HDR (V4L2_CID_CODEC_STATELESS_BASE + 301)
+
+#define V4L2_VP9_TX_MODE_ONLY_4X4 0
+#define V4L2_VP9_TX_MODE_ALLOW_8X8 1
+#define V4L2_VP9_TX_MODE_ALLOW_16X16 2
+#define V4L2_VP9_TX_MODE_ALLOW_32X32 3
+#define V4L2_VP9_TX_MODE_SELECT 4
+
+/**
+ * struct v4l2_ctrl_vp9_compressed_hdr - VP9 probability updates control
+ * @tx_mode: specifies the TX mode. Set to one of V4L2_VP9_TX_MODE_{}.
+ * @tx8: TX 8x8 probability updates.
+ * @tx16: TX 16x16 probability updates.
+ * @tx32: TX 32x32 probability updates.
+ * @coef: coefficient probability updates.
+ * @skip: skip probability updates.
+ * @inter_mode: inter mode probability updates.
+ * @interp_filter: interpolation filter probability updates.
+ * @is_inter: is inter-block probability updates.
+ * @comp_mode: compound prediction mode probability updates.
+ * @single_ref: single ref probability updates.
+ * @comp_ref: compound ref probability updates.
+ * @y_mode: Y prediction mode probability updates.
+ * @uv_mode: UV prediction mode probability updates.
+ * @partition: partition probability updates.
+ * @mv: motion vector probability updates.
+ *
+ * This structure holds the probabilities update as parsed in the compressed
+ * header (Spec 6.3). These values represent the value of probability update after
+ * being translated with inv_map_table[] (see 6.3.5). A value of zero in an array element
+ * means that there is no update of the relevant probability.
+ *
+ * This control is optional and needs to be used when dealing with the hardware which is
+ * not capable of parsing the compressed header itself. Only drivers which need it will
+ * implement it.
+ */
+struct v4l2_ctrl_vp9_compressed_hdr {
+ __u8 tx_mode;
+ __u8 tx8[2][1];
+ __u8 tx16[2][2];
+ __u8 tx32[2][3];
+ __u8 coef[4][2][2][6][6][3];
+ __u8 skip[3];
+ __u8 inter_mode[7][3];
+ __u8 interp_filter[4][2];
+ __u8 is_inter[4];
+ __u8 comp_mode[5];
+ __u8 single_ref[5][2];
+ __u8 comp_ref[5];
+ __u8 y_mode[4][9];
+ __u8 uv_mode[10][9];
+ __u8 partition[16][3];
+
+ struct v4l2_vp9_mv_probs mv;
+};
+
+/* Stateless AV1 controls */
+
+#define V4L2_AV1_TOTAL_REFS_PER_FRAME 8
+#define V4L2_AV1_CDEF_MAX 8
+#define V4L2_AV1_NUM_PLANES_MAX 3 /* 1 if monochrome, 3 otherwise */
+#define V4L2_AV1_MAX_SEGMENTS 8
+#define V4L2_AV1_MAX_OPERATING_POINTS (1 << 5) /* 5 bits to encode */
+#define V4L2_AV1_REFS_PER_FRAME 7
+#define V4L2_AV1_MAX_NUM_Y_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_MAX_NUM_CB_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_MAX_NUM_CR_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_AR_COEFFS_SIZE 25 /* (2 * 3 * (3 + 1)) + 1 */
+#define V4L2_AV1_MAX_NUM_PLANES 3
+#define V4L2_AV1_MAX_TILE_COLS 64
+#define V4L2_AV1_MAX_TILE_ROWS 64
+#define V4L2_AV1_MAX_TILE_COUNT 512
+
+#define V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE 0x00000001
+#define V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK 0x00000002
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA 0x00000004
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER 0x00000008
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND 0x00000010
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND 0x00000020
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION 0x00000040
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER 0x00000080
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT 0x00000100
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP 0x00000200
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS 0x00000400
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES 0x00000800
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF 0x00001000
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION 0x00002000
+#define V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME 0x00004000
+#define V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE 0x00008000
+#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X 0x00010000
+#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y 0x00020000
+#define V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT 0x00040000
+#define V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q 0x00080000
+
+#define V4L2_CID_STATELESS_AV1_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE + 500)
+/**
+ * struct v4l2_ctrl_av1_sequence - AV1 Sequence
+ *
+ * Represents an AV1 Sequence OBU. See section 5.5 "Sequence header OBU syntax"
+ * for more details.
+ *
+ * @flags: See V4L2_AV1_SEQUENCE_FLAG_{}.
+ * @seq_profile: specifies the features that can be used in the coded video
+ * sequence.
+ * @order_hint_bits: specifies the number of bits used for the order_hint field
+ * at each frame.
+ * @bit_depth: the bitdepth to use for the sequence as described in section
+ * 5.5.2 "Color config syntax".
+ * @reserved: padding field. Should be zeroed by applications.
+ * @max_frame_width_minus_1: specifies the maximum frame width minus 1 for the
+ * frames represented by this sequence header.
+ * @max_frame_height_minus_1: specifies the maximum frame height minus 1 for the
+ * frames represented by this sequence header.
+ */
+struct v4l2_ctrl_av1_sequence {
+ __u32 flags;
+ __u8 seq_profile;
+ __u8 order_hint_bits;
+ __u8 bit_depth;
+ __u8 reserved;
+ __u16 max_frame_width_minus_1;
+ __u16 max_frame_height_minus_1;
+};
+
+#define V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY (V4L2_CID_CODEC_STATELESS_BASE + 501)
+/**
+ * struct v4l2_ctrl_av1_tile_group_entry - AV1 Tile Group entry
+ *
+ * Represents a single AV1 tile inside an AV1 Tile Group. Note that MiRowStart,
+ * MiRowEnd, MiColStart and MiColEnd can be retrieved from struct
+ * v4l2_av1_tile_info in struct v4l2_ctrl_av1_frame using tile_row and
+ * tile_col. See section 6.10.1 "General tile group OBU semantics" for more
+ * details.
+ *
+ * @tile_offset: offset from the OBU data, i.e. where the coded tile data
+ * actually starts.
+ * @tile_size: specifies the size in bytes of the coded tile. Equivalent to
+ * "TileSize" in the AV1 Specification.
+ * @tile_row: specifies the row of the current tile. Equivalent to "TileRow" in
+ * the AV1 Specification.
+ * @tile_col: specifies the col of the current tile. Equivalent to "TileCol" in
+ * the AV1 Specification.
+ */
+struct v4l2_ctrl_av1_tile_group_entry {
+ __u32 tile_offset;
+ __u32 tile_size;
+ __u32 tile_row;
+ __u32 tile_col;
+};
+
+/**
+ * enum v4l2_av1_warp_model - AV1 Warp Model as described in section 3
+ * "Symbols and abbreviated terms" of the AV1 Specification.
+ *
+ * @V4L2_AV1_WARP_MODEL_IDENTITY: Warp model is just an identity transform.
+ * @V4L2_AV1_WARP_MODEL_TRANSLATION: Warp model is a pure translation.
+ * @V4L2_AV1_WARP_MODEL_ROTZOOM: Warp model is a rotation + symmetric zoom +
+ * translation.
+ * @V4L2_AV1_WARP_MODEL_AFFINE: Warp model is a general affine transform.
+ */
+enum v4l2_av1_warp_model {
+ V4L2_AV1_WARP_MODEL_IDENTITY = 0,
+ V4L2_AV1_WARP_MODEL_TRANSLATION = 1,
+ V4L2_AV1_WARP_MODEL_ROTZOOM = 2,
+ V4L2_AV1_WARP_MODEL_AFFINE = 3,
+};
+
+/**
+ * enum v4l2_av1_reference_frame - AV1 reference frames
+ *
+ * @V4L2_AV1_REF_INTRA_FRAME: Intra Frame Reference
+ * @V4L2_AV1_REF_LAST_FRAME: Last Reference Frame
+ * @V4L2_AV1_REF_LAST2_FRAME: Last2 Reference Frame
+ * @V4L2_AV1_REF_LAST3_FRAME: Last3 Reference Frame
+ * @V4L2_AV1_REF_GOLDEN_FRAME: Golden Reference Frame
+ * @V4L2_AV1_REF_BWDREF_FRAME: BWD Reference Frame
+ * @V4L2_AV1_REF_ALTREF2_FRAME: Alternative2 Reference Frame
+ * @V4L2_AV1_REF_ALTREF_FRAME: Alternative Reference Frame
+ */
+enum v4l2_av1_reference_frame {
+ V4L2_AV1_REF_INTRA_FRAME = 0,
+ V4L2_AV1_REF_LAST_FRAME = 1,
+ V4L2_AV1_REF_LAST2_FRAME = 2,
+ V4L2_AV1_REF_LAST3_FRAME = 3,
+ V4L2_AV1_REF_GOLDEN_FRAME = 4,
+ V4L2_AV1_REF_BWDREF_FRAME = 5,
+ V4L2_AV1_REF_ALTREF2_FRAME = 6,
+ V4L2_AV1_REF_ALTREF_FRAME = 7,
+};
+
+#define V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) (1 << (ref))
+
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_GLOBAL 0x1
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_ROT_ZOOM 0x2
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_TRANSLATION 0x4
+/**
+ * struct v4l2_av1_global_motion - AV1 Global Motion parameters as described in
+ * section 6.8.17 "Global motion params semantics" of the AV1 specification.
+ *
+ * @flags: A bitfield containing the flags per reference frame. See
+ * V4L2_AV1_GLOBAL_MOTION_FLAG_{}
+ * @type: The type of global motion transform used.
+ * @params: this field has the same meaning as "gm_params" in the AV1
+ * specification.
+ * @invalid: bitfield indicating whether the global motion params are invalid
+ * for a given reference frame. See section 7.11.3.6 Setup shear process and
+ * the variable "warpValid". Use V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) to
+ * create a suitable mask.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+
+struct v4l2_av1_global_motion {
+ __u8 flags[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ enum v4l2_av1_warp_model type[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s32 params[V4L2_AV1_TOTAL_REFS_PER_FRAME][6];
+ __u8 invalid;
+ __u8 reserved[3];
+};
+
+/**
+ * enum v4l2_av1_frame_restoration_type - AV1 Frame Restoration Type
+ * @V4L2_AV1_FRAME_RESTORE_NONE: no filtering is applied.
+ * @V4L2_AV1_FRAME_RESTORE_WIENER: Wiener filter process is invoked.
+ * @V4L2_AV1_FRAME_RESTORE_SGRPROJ: self guided filter process is invoked.
+ * @V4L2_AV1_FRAME_RESTORE_SWITCHABLE: restoration filter is swichtable.
+ */
+enum v4l2_av1_frame_restoration_type {
+ V4L2_AV1_FRAME_RESTORE_NONE = 0,
+ V4L2_AV1_FRAME_RESTORE_WIENER = 1,
+ V4L2_AV1_FRAME_RESTORE_SGRPROJ = 2,
+ V4L2_AV1_FRAME_RESTORE_SWITCHABLE = 3,
+};
+
+#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR 0x1
+#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_CHROMA_LR 0x2
+
+/**
+ * struct v4l2_av1_loop_restoration - AV1 Loop Restauration as described in
+ * section 6.10.15 "Loop restoration params semantics" of the AV1 specification.
+ *
+ * @flags: See V4L2_AV1_LOOP_RESTORATION_FLAG_{}.
+ * @lr_unit_shift: specifies if the luma restoration size should be halved.
+ * @lr_uv_shift: specifies if the chroma size should be half the luma size.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @frame_restoration_type: specifies the type of restoration used for each
+ * plane. See enum v4l2_av1_frame_restoration_type.
+ * @loop_restoration_size: specifies the size of loop restoration units in units
+ * of samples in the current plane.
+ */
+struct v4l2_av1_loop_restoration {
+ __u8 flags;
+ __u8 lr_unit_shift;
+ __u8 lr_uv_shift;
+ __u8 reserved;
+ enum v4l2_av1_frame_restoration_type frame_restoration_type[V4L2_AV1_NUM_PLANES_MAX];
+ __u32 loop_restoration_size[V4L2_AV1_MAX_NUM_PLANES];
+};
+
+/**
+ * struct v4l2_av1_cdef - AV1 CDEF params semantics as described in section
+ * 6.10.14 "CDEF params semantics" of the AV1 specification
+ *
+ * @damping_minus_3: controls the amount of damping in the deringing filter.
+ * @bits: specifies the number of bits needed to specify which CDEF filter to
+ * apply.
+ * @y_pri_strength: specifies the strength of the primary filter.
+ * @y_sec_strength: specifies the strength of the secondary filter.
+ * @uv_pri_strength: specifies the strength of the primary filter.
+ * @uv_sec_strength: specifies the strength of the secondary filter.
+ */
+struct v4l2_av1_cdef {
+ __u8 damping_minus_3;
+ __u8 bits;
+ __u8 y_pri_strength[V4L2_AV1_CDEF_MAX];
+ __u8 y_sec_strength[V4L2_AV1_CDEF_MAX];
+ __u8 uv_pri_strength[V4L2_AV1_CDEF_MAX];
+ __u8 uv_sec_strength[V4L2_AV1_CDEF_MAX];
+};
+
+#define V4L2_AV1_SEGMENTATION_FLAG_ENABLED 0x1
+#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP 0x2
+#define V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x4
+#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_DATA 0x8
+#define V4L2_AV1_SEGMENTATION_FLAG_SEG_ID_PRE_SKIP 0x10
+
+/**
+ * enum v4l2_av1_segment_feature - AV1 segment features as described in section
+ * 3 "Symbols and abbreviated terms" of the AV1 specification.
+ *
+ * @V4L2_AV1_SEG_LVL_ALT_Q: Index for quantizer segment feature.
+ * @V4L2_AV1_SEG_LVL_ALT_LF_Y_V: Index for vertical luma loop filter segment
+ * feature.
+ * @V4L2_AV1_SEG_LVL_REF_FRAME: Index for reference frame segment feature.
+ * @V4L2_AV1_SEG_LVL_REF_SKIP: Index for skip segment feature.
+ * @V4L2_AV1_SEG_LVL_REF_GLOBALMV: Index for global mv feature.
+ * @V4L2_AV1_SEG_LVL_MAX: Number of segment features.
+ */
+enum v4l2_av1_segment_feature {
+ V4L2_AV1_SEG_LVL_ALT_Q = 0,
+ V4L2_AV1_SEG_LVL_ALT_LF_Y_V = 1,
+ V4L2_AV1_SEG_LVL_REF_FRAME = 5,
+ V4L2_AV1_SEG_LVL_REF_SKIP = 6,
+ V4L2_AV1_SEG_LVL_REF_GLOBALMV = 7,
+ V4L2_AV1_SEG_LVL_MAX = 8
+};
+
+#define V4L2_AV1_SEGMENT_FEATURE_ENABLED(id) (1 << (id))
+
+/**
+ * struct v4l2_av1_segmentation - AV1 Segmentation params as defined in section
+ * 6.8.13 "Segmentation params semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_SEGMENTATION_FLAG_{}.
+ * @last_active_seg_id: indicates the highest numbered segment id that has some
+ * enabled feature. This is used when decoding the segment id to only decode
+ * choices corresponding to used segments.
+ * @feature_enabled: bitmask defining which features are enabled in each
+ * segment. Use V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask.
+ * @feature_data: data attached to each feature. Data entry is only valid if the
+ * feature is enabled
+ */
+struct v4l2_av1_segmentation {
+ __u8 flags;
+ __u8 last_active_seg_id;
+ __u8 feature_enabled[V4L2_AV1_MAX_SEGMENTS];
+ __s16 feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX];
+};
+
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT 0x4
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI 0x8
+
+/**
+ * struct v4l2_av1_loop_filter - AV1 Loop filter params as defined in section
+ * 6.8.10 "Loop filter semantics" and 6.8.16 "Loop filter delta parameters
+ * semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_LOOP_FILTER_FLAG_{}
+ * @level: an array containing loop filter strength values. Different loop
+ * filter strength values from the array are used depending on the image plane
+ * being filtered, and the edge direction (vertical or horizontal) being
+ * filtered.
+ * @sharpness: indicates the sharpness level. The loop_filter_level and
+ * loop_filter_sharpness together determine when a block edge is filtered, and
+ * by how much the filtering can change the sample values. The loop filter
+ * process is described in section 7.14 of the AV1 specification.
+ * @ref_deltas: contains the adjustment needed for the filter level based on the
+ * chosen reference frame. If this syntax element is not present, it maintains
+ * its previous value.
+ * @mode_deltas: contains the adjustment needed for the filter level based on
+ * the chosen mode. If this syntax element is not present, it maintains its
+ * previous value.
+ * @delta_lf_res: specifies the left shift which should be applied to decoded
+ * loop filter delta values.
+ */
+struct v4l2_av1_loop_filter {
+ __u8 flags;
+ __u8 level[4];
+ __u8 sharpness;
+ __s8 ref_deltas[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s8 mode_deltas[2];
+ __u8 delta_lf_res;
+};
+
+#define V4L2_AV1_QUANTIZATION_FLAG_DIFF_UV_DELTA 0x1
+#define V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX 0x2
+#define V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT 0x4
+
+/**
+ * struct v4l2_av1_quantization - AV1 Quantization params as defined in section
+ * 6.8.11 "Quantization params semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_QUANTIZATION_FLAG_{}
+ * @base_q_idx: indicates the base frame qindex. This is used for Y AC
+ * coefficients and as the base value for the other quantizers.
+ * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx.
+ * @delta_q_u_dc: indicates the U DC quantizer relative to base_q_idx.
+ * @delta_q_u_ac: indicates the U AC quantizer relative to base_q_idx.
+ * @delta_q_v_dc: indicates the V DC quantizer relative to base_q_idx.
+ * @delta_q_v_ac: indicates the V AC quantizer relative to base_q_idx.
+ * @qm_y: specifies the level in the quantizer matrix that should be used for
+ * luma plane decoding.
+ * @qm_u: specifies the level in the quantizer matrix that should be used for
+ * chroma U plane decoding.
+ * @qm_v: specifies the level in the quantizer matrix that should be used for
+ * chroma V plane decoding.
+ * @delta_q_res: specifies the left shift which should be applied to decoded
+ * quantizer index delta values.
+ */
+struct v4l2_av1_quantization {
+ __u8 flags;
+ __u8 base_q_idx;
+ __s8 delta_q_y_dc;
+ __s8 delta_q_u_dc;
+ __s8 delta_q_u_ac;
+ __s8 delta_q_v_dc;
+ __s8 delta_q_v_ac;
+ __u8 qm_y;
+ __u8 qm_u;
+ __u8 qm_v;
+ __u8 delta_q_res;
+};
+
+#define V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING 0x1
+
+/**
+ * struct v4l2_av1_tile_info - AV1 Tile info as defined in section 6.8.14 "Tile
+ * info semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_TILE_INFO_FLAG_{}
+ * @context_update_tile_id: specifies which tile to use for the CDF update.
+ * @tile_rows: specifies the number of tiles down the frame.
+ * @tile_cols: specifies the number of tiles across the frame.
+ * @mi_col_starts: an array specifying the start column (in units of 4x4 luma
+ * samples) for each tile across the image.
+ * @mi_row_starts: an array specifying the start row (in units of 4x4 luma
+ * samples) for each tile down the image.
+ * @width_in_sbs_minus_1: specifies the width of a tile minus 1 in units of
+ * superblocks.
+ * @height_in_sbs_minus_1: specifies the height of a tile minus 1 in units of
+ * superblocks.
+ * @tile_size_bytes: specifies the number of bytes needed to code each tile
+ * size.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_av1_tile_info {
+ __u8 flags;
+ __u8 context_update_tile_id;
+ __u8 tile_cols;
+ __u8 tile_rows;
+ __u32 mi_col_starts[V4L2_AV1_MAX_TILE_COLS + 1];
+ __u32 mi_row_starts[V4L2_AV1_MAX_TILE_ROWS + 1];
+ __u32 width_in_sbs_minus_1[V4L2_AV1_MAX_TILE_COLS];
+ __u32 height_in_sbs_minus_1[V4L2_AV1_MAX_TILE_ROWS];
+ __u8 tile_size_bytes;
+ __u8 reserved[3];
+};
+
+/**
+ * enum v4l2_av1_frame_type - AV1 Frame Type
+ *
+ * @V4L2_AV1_KEY_FRAME: Key frame
+ * @V4L2_AV1_INTER_FRAME: Inter frame
+ * @V4L2_AV1_INTRA_ONLY_FRAME: Intra-only frame
+ * @V4L2_AV1_SWITCH_FRAME: Switch frame
+ */
+enum v4l2_av1_frame_type {
+ V4L2_AV1_KEY_FRAME = 0,
+ V4L2_AV1_INTER_FRAME = 1,
+ V4L2_AV1_INTRA_ONLY_FRAME = 2,
+ V4L2_AV1_SWITCH_FRAME = 3
+};
+
+/**
+ * enum v4l2_av1_interpolation_filter - AV1 interpolation filter types
+ *
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP: eight tap filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH: eight tap smooth filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP: eight tap sharp filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_BILINEAR: bilinear filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE: filter selection is signaled at
+ * the block level
+ *
+ * See section 6.8.9 "Interpolation filter semantics" of the AV1 specification
+ * for more details.
+ */
+enum v4l2_av1_interpolation_filter {
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP = 0,
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH = 1,
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP = 2,
+ V4L2_AV1_INTERPOLATION_FILTER_BILINEAR = 3,
+ V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE = 4,
+};
+
+/**
+ * enum v4l2_av1_tx_mode - AV1 Tx mode as described in section 6.8.21 "TX mode
+ * semantics" of the AV1 specification.
+ * @V4L2_AV1_TX_MODE_ONLY_4X4: the inverse transform will use only 4x4
+ * transforms
+ * @V4L2_AV1_TX_MODE_LARGEST: the inverse transform will use the largest
+ * transform size that fits inside the block
+ * @V4L2_AV1_TX_MODE_SELECT: the choice of transform size is specified
+ * explicitly for each block.
+ */
+enum v4l2_av1_tx_mode {
+ V4L2_AV1_TX_MODE_ONLY_4X4 = 0,
+ V4L2_AV1_TX_MODE_LARGEST = 1,
+ V4L2_AV1_TX_MODE_SELECT = 2
+};
+
+#define V4L2_AV1_FRAME_FLAG_SHOW_FRAME 0x00000001
+#define V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME 0x00000002
+#define V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE 0x00000004
+#define V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE 0x00000008
+#define V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS 0x00000010
+#define V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV 0x00000020
+#define V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC 0x00000040
+#define V4L2_AV1_FRAME_FLAG_USE_SUPERRES 0x00000080
+#define V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV 0x00000100
+#define V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE 0x00000200
+#define V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS 0x00000400
+#define V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF 0x00000800
+#define V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION 0x00001000
+#define V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT 0x00002000
+#define V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET 0x00004000
+#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED 0x00008000
+#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT 0x00010000
+#define V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE 0x00020000
+#define V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT 0x00040000
+#define V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING 0x00080000
+
+#define V4L2_CID_STATELESS_AV1_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 502)
+/**
+ * struct v4l2_ctrl_av1_frame - Represents an AV1 Frame Header OBU.
+ *
+ * @tile_info: tile info
+ * @quantization: quantization params
+ * @segmentation: segmentation params
+ * @superres_denom: the denominator for the upscaling ratio.
+ * @loop_filter: loop filter params
+ * @cdef: cdef params
+ * @skip_mode_frame: specifies the frames to use for compound prediction when
+ * skip_mode is equal to 1.
+ * @primary_ref_frame: specifies which reference frame contains the CDF values
+ * and other state that should be loaded at the start of the frame.
+ * @loop_restoration: loop restoration params
+ * @global_motion: global motion params
+ * @flags: see V4L2_AV1_FRAME_FLAG_{}
+ * @frame_type: specifies the AV1 frame type
+ * @order_hint: specifies OrderHintBits least significant bits of the expected
+ * output order for this frame.
+ * @upscaled_width: the upscaled width.
+ * @interpolation_filter: specifies the filter selection used for performing
+ * inter prediction.
+ * @tx_mode: specifies how the transform size is determined.
+ * @frame_width_minus_1: add 1 to get the frame's width.
+ * @frame_height_minus_1: add 1 to get the frame's height
+ * @render_width_minus_1: add 1 to get the render width of the frame in luma
+ * samples.
+ * @render_height_minus_1: add 1 to get the render height of the frame in luma
+ * samples.
+ * @current_frame_id: specifies the frame id number for the current frame. Frame
+ * id numbers are additional information that do not affect the decoding
+ * process, but provide decoders with a way of detecting missing reference
+ * frames so that appropriate action can be taken.
+ * @buffer_removal_time: specifies the frame removal time in units of DecCT clock
+ * ticks counted from the removal time of the last random access point for
+ * operating point opNum.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @order_hints: specifies the expected output order hint for each reference
+ * frame. This field corresponds to the OrderHints variable from the
+ * specification (section 5.9.2 "Uncompressed header syntax"). As such, this is
+ * only used for non-intra frames and ignored otherwise. order_hints[0] is
+ * always ignored.
+ * @reference_frame_ts: the V4L2 timestamp of the reference frame slots.
+ * @ref_frame_idx: used to index into @reference_frame_ts when decoding
+ * inter-frames. The meaning of this array is the same as in the specification.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer. Use
+ * v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @refresh_frame_flags: contains a bitmask that specifies which reference frame
+ * slots will be updated with the current frame after it is decoded.
+ */
+struct v4l2_ctrl_av1_frame {
+ struct v4l2_av1_tile_info tile_info;
+ struct v4l2_av1_quantization quantization;
+ __u8 superres_denom;
+ struct v4l2_av1_segmentation segmentation;
+ struct v4l2_av1_loop_filter loop_filter;
+ struct v4l2_av1_cdef cdef;
+ __u8 skip_mode_frame[2];
+ __u8 primary_ref_frame;
+ struct v4l2_av1_loop_restoration loop_restoration;
+ struct v4l2_av1_global_motion global_motion;
+ __u32 flags;
+ enum v4l2_av1_frame_type frame_type;
+ __u32 order_hint;
+ __u32 upscaled_width;
+ enum v4l2_av1_interpolation_filter interpolation_filter;
+ enum v4l2_av1_tx_mode tx_mode;
+ __u32 frame_width_minus_1;
+ __u32 frame_height_minus_1;
+ __u16 render_width_minus_1;
+ __u16 render_height_minus_1;
+
+ __u32 current_frame_id;
+ __u32 buffer_removal_time[V4L2_AV1_MAX_OPERATING_POINTS];
+ __u8 reserved[4];
+ __u32 order_hints[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __u64 reference_frame_ts[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s8 ref_frame_idx[V4L2_AV1_REFS_PER_FRAME];
+ __u8 refresh_frame_flags;
+};
+
+#define V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN 0x1
+#define V4L2_AV1_FILM_GRAIN_FLAG_UPDATE_GRAIN 0x2
+#define V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA 0x4
+#define V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP 0x8
+#define V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE 0x10
+
+#define V4L2_CID_STATELESS_AV1_FILM_GRAIN (V4L2_CID_CODEC_STATELESS_BASE + 505)
+/**
+ * struct v4l2_ctrl_av1_film_grain - AV1 Film Grain parameters.
+ *
+ * Film grain parameters as specified by section 6.8.20 of the AV1 Specification.
+ *
+ * @flags: see V4L2_AV1_FILM_GRAIN_{}.
+ * @cr_mult: represents a multiplier for the cr component used in derivation of
+ * the input index to the cr component scaling function.
+ * @grain_seed: specifies the starting value for the pseudo-random numbers used
+ * during film grain synthesis.
+ * @film_grain_params_ref_idx: indicates which reference frame contains the
+ * film grain parameters to be used for this frame.
+ * @num_y_points: specifies the number of points for the piece-wise linear
+ * scaling function of the luma component.
+ * @point_y_value: represents the x (luma value) coordinate for the i-th point
+ * of the piecewise linear scaling function for luma component. The values are
+ * signaled on the scale of 0..255. In case of 10 bit video, these values
+ * correspond to luma values divided by 4. In case of 12 bit video, these values
+ * correspond to luma values divided by 16.
+ * @point_y_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for luma component.
+ * @num_cb_points: specifies the number of points for the piece-wise linear
+ * scaling function of the cb component.
+ * @point_cb_value: represents the x coordinate for the i-th point of the
+ * piece-wise linear scaling function for cb component. The values are signaled
+ * on the scale of 0..255.
+ * @point_cb_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for cb component.
+ * @num_cr_points: specifies represents the number of points for the piece-wise
+ * linear scaling function of the cr component.
+ * @point_cr_value: represents the x coordinate for the i-th point of the
+ * piece-wise linear scaling function for cr component. The values are signaled
+ * on the scale of 0..255.
+ * @point_cr_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for cr component.
+ * @grain_scaling_minus_8: represents the shift – 8 applied to the values of the
+ * chroma component. The grain_scaling_minus_8 can take values of 0..3 and
+ * determines the range and quantization step of the standard deviation of film
+ * grain.
+ * @ar_coeff_lag: specifies the number of auto-regressive coefficients for luma
+ * and chroma.
+ * @ar_coeffs_y_plus_128: specifies auto-regressive coefficients used for the Y
+ * plane.
+ * @ar_coeffs_cb_plus_128: specifies auto-regressive coefficients used for the U
+ * plane.
+ * @ar_coeffs_cr_plus_128: specifies auto-regressive coefficients used for the V
+ * plane.
+ * @ar_coeff_shift_minus_6: specifies the range of the auto-regressive
+ * coefficients. Values of 0, 1, 2, and 3 correspond to the ranges for
+ * auto-regressive coefficients of [-2, 2), [-1, 1), [-0.5, 0.5) and [-0.25,
+ * 0.25) respectively.
+ * @grain_scale_shift: specifies how much the Gaussian random numbers should be
+ * scaled down during the grain synthesis process.
+ * @cb_mult: represents a multiplier for the cb component used in derivation of
+ * the input index to the cb component scaling function.
+ * @cb_luma_mult: represents a multiplier for the average luma component used in
+ * derivation of the input index to the cb component scaling function.
+ * @cr_luma_mult: represents a multiplier for the average luma component used in
+ * derivation of the input index to the cr component scaling function.
+ * @cb_offset: represents an offset used in derivation of the input index to the
+ * cb component scaling function.
+ * @cr_offset: represents an offset used in derivation of the input index to the
+ * cr component scaling function.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_av1_film_grain {
+ __u8 flags;
+ __u8 cr_mult;
+ __u16 grain_seed;
+ __u8 film_grain_params_ref_idx;
+ __u8 num_y_points;
+ __u8 point_y_value[V4L2_AV1_MAX_NUM_Y_POINTS];
+ __u8 point_y_scaling[V4L2_AV1_MAX_NUM_Y_POINTS];
+ __u8 num_cb_points;
+ __u8 point_cb_value[V4L2_AV1_MAX_NUM_CB_POINTS];
+ __u8 point_cb_scaling[V4L2_AV1_MAX_NUM_CB_POINTS];
+ __u8 num_cr_points;
+ __u8 point_cr_value[V4L2_AV1_MAX_NUM_CR_POINTS];
+ __u8 point_cr_scaling[V4L2_AV1_MAX_NUM_CR_POINTS];
+ __u8 grain_scaling_minus_8;
+ __u8 ar_coeff_lag;
+ __u8 ar_coeffs_y_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeffs_cb_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeffs_cr_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeff_shift_minus_6;
+ __u8 grain_scale_shift;
+ __u8 cb_mult;
+ __u8 cb_luma_mult;
+ __u8 cr_luma_mult;
+ __u16 cb_offset;
+ __u16 cr_offset;
+ __u8 reserved[4];
+};
+
+/* MPEG-compression definitions kept for backwards compatibility */
+#ifndef __KERNEL__
+#define V4L2_CTRL_CLASS_MPEG V4L2_CTRL_CLASS_CODEC
+#define V4L2_CID_MPEG_CLASS V4L2_CID_CODEC_CLASS
+#define V4L2_CID_MPEG_BASE V4L2_CID_CODEC_BASE
+#define V4L2_CID_MPEG_CX2341X_BASE V4L2_CID_CODEC_CX2341X_BASE
+#define V4L2_CID_MPEG_MFC51_BASE V4L2_CID_CODEC_MFC51_BASE
+#endif
+
#endif
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index b52b67c62562..ef0128c7369c 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -3,15 +3,6 @@
* V4L2 DV timings header.
*
* Copyright (C) 2012-2016 Hans Verkuil <hans.verkuil@cisco.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _V4L2_DV_TIMINGS_H
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 123a231001a8..6b07b73473b5 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -3,10 +3,6 @@
* Media Bus API header
*
* Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_V4L2_MEDIABUS_H
@@ -16,6 +12,8 @@
#include <linux/types.h>
#include <linux/videodev2.h>
+#define V4L2_MBUS_FRAMEFMT_SET_CSC 0x0001
+
/**
* struct v4l2_mbus_framefmt - frame format on the media bus
* @width: image width
@@ -24,8 +22,11 @@
* @field: used interlacing type (from enum v4l2_field)
* @colorspace: colorspace of the data (from enum v4l2_colorspace)
* @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding)
+ * @hsv_enc: HSV encoding of the data (from enum v4l2_hsv_encoding)
* @quantization: quantization of the data (from enum v4l2_quantization)
* @xfer_func: transfer function of the data (from enum v4l2_xfer_func)
+ * @flags: flags (V4L2_MBUS_FRAMEFMT_*)
+ * @reserved: reserved bytes that can be later used
*/
struct v4l2_mbus_framefmt {
__u32 width;
@@ -33,10 +34,16 @@ struct v4l2_mbus_framefmt {
__u32 code;
__u32 field;
__u32 colorspace;
- __u16 ycbcr_enc;
+ union {
+ /* enum v4l2_ycbcr_encoding */
+ __u16 ycbcr_enc;
+ /* enum v4l2_hsv_encoding */
+ __u16 hsv_enc;
+ };
__u16 quantization;
__u16 xfer_func;
- __u16 reserved[11];
+ __u16 flags;
+ __u16 reserved[10];
};
#ifndef __KERNEL__
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index 5d2a1dab7911..7048c51581c6 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -6,24 +6,12 @@
*
* Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_V4L2_SUBDEV_H
#define __LINUX_V4L2_SUBDEV_H
+#include <linux/const.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include <linux/v4l2-common.h>
@@ -44,12 +32,15 @@ enum v4l2_subdev_format_whence {
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @format: media bus format (format code and frame size)
+ * @stream: stream number, defined in subdev routing
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_format {
__u32 which;
__u32 pad;
struct v4l2_mbus_framefmt format;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
@@ -57,35 +48,55 @@ struct v4l2_subdev_format {
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @rect: pad crop rectangle boundaries
+ * @stream: stream number, defined in subdev routing
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_crop {
__u32 which;
__u32 pad;
struct v4l2_rect rect;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
+#define V4L2_SUBDEV_MBUS_CODE_CSC_COLORSPACE 0x00000001
+#define V4L2_SUBDEV_MBUS_CODE_CSC_XFER_FUNC 0x00000002
+#define V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC 0x00000004
+#define V4L2_SUBDEV_MBUS_CODE_CSC_HSV_ENC V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC
+#define V4L2_SUBDEV_MBUS_CODE_CSC_QUANTIZATION 0x00000008
+
/**
* struct v4l2_subdev_mbus_code_enum - Media bus format enumeration
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
* @which: format type (from enum v4l2_subdev_format_whence)
+ * @flags: flags set by the driver, (V4L2_SUBDEV_MBUS_CODE_*)
+ * @stream: stream number, defined in subdev routing
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_mbus_code_enum {
__u32 pad;
__u32 index;
__u32 code;
__u32 which;
- __u32 reserved[8];
+ __u32 flags;
+ __u32 stream;
+ __u32 reserved[6];
};
/**
* struct v4l2_subdev_frame_size_enum - Media bus format enumeration
- * @pad: pad number, as reported by the media API
* @index: format index during enumeration
+ * @pad: pad number, as reported by the media API
* @code: format code (MEDIA_BUS_FMT_ definitions)
+ * @min_width: minimum frame width, in pixels
+ * @max_width: maximum frame width, in pixels
+ * @min_height: minimum frame height, in pixels
+ * @max_height: maximum frame height, in pixels
* @which: format type (from enum v4l2_subdev_format_whence)
+ * @stream: stream number, defined in subdev routing
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_size_enum {
__u32 index;
@@ -96,18 +107,24 @@ struct v4l2_subdev_frame_size_enum {
__u32 min_height;
__u32 max_height;
__u32 which;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
* struct v4l2_subdev_frame_interval - Pad-level frame rate
* @pad: pad number, as reported by the media API
* @interval: frame interval in seconds
+ * @stream: stream number, defined in subdev routing
+ * @which: interval type (from enum v4l2_subdev_format_whence)
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval {
__u32 pad;
struct v4l2_fract interval;
- __u32 reserved[9];
+ __u32 stream;
+ __u32 which;
+ __u32 reserved[7];
};
/**
@@ -118,7 +135,9 @@ struct v4l2_subdev_frame_interval {
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
- * @which: format type (from enum v4l2_subdev_format_whence)
+ * @which: interval type (from enum v4l2_subdev_format_whence)
+ * @stream: stream number, defined in subdev routing
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval_enum {
__u32 index;
@@ -128,7 +147,8 @@ struct v4l2_subdev_frame_interval_enum {
__u32 height;
struct v4l2_fract interval;
__u32 which;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
@@ -140,6 +160,7 @@ struct v4l2_subdev_frame_interval_enum {
* defined in v4l2-common.h; V4L2_SEL_TGT_* .
* @flags: constraint flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*.
* @r: coordinates of the selection window
+ * @stream: stream number, defined in subdev routing
* @reserved: for future use, set to zero for now
*
* Hardware may use multiple helper windows to process a video stream.
@@ -152,7 +173,8 @@ struct v4l2_subdev_selection {
__u32 target;
__u32 flags;
struct v4l2_rect r;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
@@ -168,7 +190,75 @@ struct v4l2_subdev_capability {
};
/* The v4l2 sub-device video device node is registered in read-only mode. */
-#define V4L2_SUBDEV_CAP_RO_SUBDEV BIT(0)
+#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001
+
+/* The v4l2 sub-device supports routing and multiplexed streams. */
+#define V4L2_SUBDEV_CAP_STREAMS 0x00000002
+
+/*
+ * Is the route active? An active route will start when streaming is enabled
+ * on a video node.
+ */
+#define V4L2_SUBDEV_ROUTE_FL_ACTIVE (1U << 0)
+
+/**
+ * struct v4l2_subdev_route - A route inside a subdev
+ *
+ * @sink_pad: the sink pad index
+ * @sink_stream: the sink stream identifier
+ * @source_pad: the source pad index
+ * @source_stream: the source stream identifier
+ * @flags: route flags V4L2_SUBDEV_ROUTE_FL_*
+ * @reserved: drivers and applications must zero this array
+ */
+struct v4l2_subdev_route {
+ __u32 sink_pad;
+ __u32 sink_stream;
+ __u32 source_pad;
+ __u32 source_stream;
+ __u32 flags;
+ __u32 reserved[5];
+};
+
+/**
+ * struct v4l2_subdev_routing - Subdev routing information
+ *
+ * @which: configuration type (from enum v4l2_subdev_format_whence)
+ * @num_routes: the total number of routes in the routes array
+ * @routes: pointer to the routes array
+ * @reserved: drivers and applications must zero this array
+ */
+struct v4l2_subdev_routing {
+ __u32 which;
+ __u32 num_routes;
+ __u64 routes;
+ __u32 reserved[6];
+};
+
+/*
+ * The client is aware of streams. Setting this flag enables the use of 'stream'
+ * fields (referring to the stream number) with various ioctls. If this is not
+ * set (which is the default), the 'stream' fields will be forced to 0 by the
+ * kernel.
+ */
+#define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
+
+/*
+ * The client is aware of the struct v4l2_subdev_frame_interval which field. If
+ * this is not set (which is the default), the which field is forced to
+ * V4L2_SUBDEV_FORMAT_ACTIVE by the kernel.
+ */
+#define V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH (1ULL << 1)
+
+/**
+ * struct v4l2_subdev_client_capability - Capabilities of the client accessing
+ * the subdev
+ *
+ * @capabilities: A bitmask of V4L2_SUBDEV_CLIENT_CAP_* flags.
+ */
+struct v4l2_subdev_client_capability {
+ __u64 capabilities;
+};
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
@@ -185,6 +275,11 @@ struct v4l2_subdev_capability {
#define VIDIOC_SUBDEV_S_CROP _IOWR('V', 60, struct v4l2_subdev_crop)
#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection)
#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection)
+#define VIDIOC_SUBDEV_G_ROUTING _IOWR('V', 38, struct v4l2_subdev_routing)
+#define VIDIOC_SUBDEV_S_ROUTING _IOWR('V', 39, struct v4l2_subdev_routing)
+#define VIDIOC_SUBDEV_G_CLIENT_CAP _IOR('V', 101, struct v4l2_subdev_client_capability)
+#define VIDIOC_SUBDEV_S_CLIENT_CAP _IOWR('V', 102, struct v4l2_subdev_client_capability)
+
/* The following ioctls are identical to the ioctls in videodev2.h */
#define VIDIOC_SUBDEV_G_STD _IOR('V', 23, v4l2_std_id)
#define VIDIOC_SUBDEV_S_STD _IOW('V', 24, v4l2_std_id)
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
new file mode 100644
index 000000000000..43c51698195c
--- /dev/null
+++ b/include/uapi/linux/vdpa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * vdpa device management interface
+ * Copyright (c) 2020 Mellanox Technologies Ltd. All rights reserved.
+ */
+
+#ifndef _UAPI_LINUX_VDPA_H_
+#define _UAPI_LINUX_VDPA_H_
+
+#define VDPA_GENL_NAME "vdpa"
+#define VDPA_GENL_VERSION 0x1
+
+enum vdpa_command {
+ VDPA_CMD_UNSPEC,
+ VDPA_CMD_MGMTDEV_NEW,
+ VDPA_CMD_MGMTDEV_GET, /* can dump */
+ VDPA_CMD_DEV_NEW,
+ VDPA_CMD_DEV_DEL,
+ VDPA_CMD_DEV_GET, /* can dump */
+ VDPA_CMD_DEV_CONFIG_GET, /* can dump */
+ VDPA_CMD_DEV_VSTATS_GET,
+};
+
+enum vdpa_attr {
+ VDPA_ATTR_UNSPEC,
+
+ /* Pad attribute for 64b alignment */
+ VDPA_ATTR_PAD = VDPA_ATTR_UNSPEC,
+
+ /* bus name (optional) + dev name together make the parent device handle */
+ VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */
+ VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */
+ VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, /* u64 */
+
+ VDPA_ATTR_DEV_NAME, /* string */
+ VDPA_ATTR_DEV_ID, /* u32 */
+ VDPA_ATTR_DEV_VENDOR_ID, /* u32 */
+ VDPA_ATTR_DEV_MAX_VQS, /* u32 */
+ VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */
+ VDPA_ATTR_DEV_MIN_VQ_SIZE, /* u16 */
+
+ VDPA_ATTR_DEV_NET_CFG_MACADDR, /* binary */
+ VDPA_ATTR_DEV_NET_STATUS, /* u8 */
+ VDPA_ATTR_DEV_NET_CFG_MAX_VQP, /* u16 */
+ VDPA_ATTR_DEV_NET_CFG_MTU, /* u16 */
+
+ VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */
+ VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */
+ /* virtio features that are supported by the vDPA management device */
+ VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */
+
+ VDPA_ATTR_DEV_QUEUE_INDEX, /* u32 */
+ VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */
+ VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */
+
+ /* virtio features that are provisioned to the vDPA device */
+ VDPA_ATTR_DEV_FEATURES, /* u64 */
+
+ VDPA_ATTR_DEV_BLK_CFG_CAPACITY, /* u64 */
+ VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_SEG_MAX, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES, /* u16 */
+ VDPA_ATTR_DEV_BLK_CFG_PHY_BLK_EXP, /* u8 */
+ VDPA_ATTR_DEV_BLK_CFG_ALIGN_OFFSET, /* u8 */
+ VDPA_ATTR_DEV_BLK_CFG_MIN_IO_SIZE, /* u16 */
+ VDPA_ATTR_DEV_BLK_CFG_OPT_IO_SIZE, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEC, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEG, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN,/* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, /* u8 */
+ VDPA_ATTR_DEV_BLK_CFG_FLUSH, /* u8 */
+
+ /* new attributes must be added above here */
+ VDPA_ATTR_MAX,
+};
+
+#endif
diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h
new file mode 100644
index 000000000000..11bd48c72c6c
--- /dev/null
+++ b/include/uapi/linux/vduse.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_VDUSE_H_
+#define _UAPI_VDUSE_H_
+
+#include <linux/types.h>
+
+#define VDUSE_BASE 0x81
+
+/* The ioctls for control device (/dev/vduse/control) */
+
+#define VDUSE_API_VERSION 0
+
+/*
+ * Get the version of VDUSE API that kernel supported (VDUSE_API_VERSION).
+ * This is used for future extension.
+ */
+#define VDUSE_GET_API_VERSION _IOR(VDUSE_BASE, 0x00, __u64)
+
+/* Set the version of VDUSE API that userspace supported. */
+#define VDUSE_SET_API_VERSION _IOW(VDUSE_BASE, 0x01, __u64)
+
+/**
+ * struct vduse_dev_config - basic configuration of a VDUSE device
+ * @name: VDUSE device name, needs to be NUL terminated
+ * @vendor_id: virtio vendor id
+ * @device_id: virtio device id
+ * @features: virtio features
+ * @vq_num: the number of virtqueues
+ * @vq_align: the allocation alignment of virtqueue's metadata
+ * @reserved: for future use, needs to be initialized to zero
+ * @config_size: the size of the configuration space
+ * @config: the buffer of the configuration space
+ *
+ * Structure used by VDUSE_CREATE_DEV ioctl to create VDUSE device.
+ */
+struct vduse_dev_config {
+#define VDUSE_NAME_MAX 256
+ char name[VDUSE_NAME_MAX];
+ __u32 vendor_id;
+ __u32 device_id;
+ __u64 features;
+ __u32 vq_num;
+ __u32 vq_align;
+ __u32 reserved[13];
+ __u32 config_size;
+ __u8 config[];
+};
+
+/* Create a VDUSE device which is represented by a char device (/dev/vduse/$NAME) */
+#define VDUSE_CREATE_DEV _IOW(VDUSE_BASE, 0x02, struct vduse_dev_config)
+
+/*
+ * Destroy a VDUSE device. Make sure there are no more references
+ * to the char device (/dev/vduse/$NAME).
+ */
+#define VDUSE_DESTROY_DEV _IOW(VDUSE_BASE, 0x03, char[VDUSE_NAME_MAX])
+
+/* The ioctls for VDUSE device (/dev/vduse/$NAME) */
+
+/**
+ * struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last]
+ * @offset: the mmap offset on returned file descriptor
+ * @start: start of the IOVA region
+ * @last: last of the IOVA region
+ * @perm: access permission of the IOVA region
+ *
+ * Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region.
+ */
+struct vduse_iotlb_entry {
+ __u64 offset;
+ __u64 start;
+ __u64 last;
+#define VDUSE_ACCESS_RO 0x1
+#define VDUSE_ACCESS_WO 0x2
+#define VDUSE_ACCESS_RW 0x3
+ __u8 perm;
+};
+
+/*
+ * Find the first IOVA region that overlaps with the range [start, last]
+ * and return the corresponding file descriptor. Return -EINVAL means the
+ * IOVA region doesn't exist. Caller should set start and last fields.
+ */
+#define VDUSE_IOTLB_GET_FD _IOWR(VDUSE_BASE, 0x10, struct vduse_iotlb_entry)
+
+/*
+ * Get the negotiated virtio features. It's a subset of the features in
+ * struct vduse_dev_config which can be accepted by virtio driver. It's
+ * only valid after FEATURES_OK status bit is set.
+ */
+#define VDUSE_DEV_GET_FEATURES _IOR(VDUSE_BASE, 0x11, __u64)
+
+/**
+ * struct vduse_config_data - data used to update configuration space
+ * @offset: the offset from the beginning of configuration space
+ * @length: the length to write to configuration space
+ * @buffer: the buffer used to write from
+ *
+ * Structure used by VDUSE_DEV_SET_CONFIG ioctl to update device
+ * configuration space.
+ */
+struct vduse_config_data {
+ __u32 offset;
+ __u32 length;
+ __u8 buffer[];
+};
+
+/* Set device configuration space */
+#define VDUSE_DEV_SET_CONFIG _IOW(VDUSE_BASE, 0x12, struct vduse_config_data)
+
+/*
+ * Inject a config interrupt. It's usually used to notify virtio driver
+ * that device configuration space has changed.
+ */
+#define VDUSE_DEV_INJECT_CONFIG_IRQ _IO(VDUSE_BASE, 0x13)
+
+/**
+ * struct vduse_vq_config - basic configuration of a virtqueue
+ * @index: virtqueue index
+ * @max_size: the max size of virtqueue
+ * @reserved: for future use, needs to be initialized to zero
+ *
+ * Structure used by VDUSE_VQ_SETUP ioctl to setup a virtqueue.
+ */
+struct vduse_vq_config {
+ __u32 index;
+ __u16 max_size;
+ __u16 reserved[13];
+};
+
+/*
+ * Setup the specified virtqueue. Make sure all virtqueues have been
+ * configured before the device is attached to vDPA bus.
+ */
+#define VDUSE_VQ_SETUP _IOW(VDUSE_BASE, 0x14, struct vduse_vq_config)
+
+/**
+ * struct vduse_vq_state_split - split virtqueue state
+ * @avail_index: available index
+ */
+struct vduse_vq_state_split {
+ __u16 avail_index;
+};
+
+/**
+ * struct vduse_vq_state_packed - packed virtqueue state
+ * @last_avail_counter: last driver ring wrap counter observed by device
+ * @last_avail_idx: device available index
+ * @last_used_counter: device ring wrap counter
+ * @last_used_idx: used index
+ */
+struct vduse_vq_state_packed {
+ __u16 last_avail_counter;
+ __u16 last_avail_idx;
+ __u16 last_used_counter;
+ __u16 last_used_idx;
+};
+
+/**
+ * struct vduse_vq_info - information of a virtqueue
+ * @index: virtqueue index
+ * @num: the size of virtqueue
+ * @desc_addr: address of desc area
+ * @driver_addr: address of driver area
+ * @device_addr: address of device area
+ * @split: split virtqueue state
+ * @packed: packed virtqueue state
+ * @ready: ready status of virtqueue
+ *
+ * Structure used by VDUSE_VQ_GET_INFO ioctl to get virtqueue's information.
+ */
+struct vduse_vq_info {
+ __u32 index;
+ __u32 num;
+ __u64 desc_addr;
+ __u64 driver_addr;
+ __u64 device_addr;
+ union {
+ struct vduse_vq_state_split split;
+ struct vduse_vq_state_packed packed;
+ };
+ __u8 ready;
+};
+
+/* Get the specified virtqueue's information. Caller should set index field. */
+#define VDUSE_VQ_GET_INFO _IOWR(VDUSE_BASE, 0x15, struct vduse_vq_info)
+
+/**
+ * struct vduse_vq_eventfd - eventfd configuration for a virtqueue
+ * @index: virtqueue index
+ * @fd: eventfd, -1 means de-assigning the eventfd
+ *
+ * Structure used by VDUSE_VQ_SETUP_KICKFD ioctl to setup kick eventfd.
+ */
+struct vduse_vq_eventfd {
+ __u32 index;
+#define VDUSE_EVENTFD_DEASSIGN -1
+ int fd;
+};
+
+/*
+ * Setup kick eventfd for specified virtqueue. The kick eventfd is used
+ * by VDUSE kernel module to notify userspace to consume the avail vring.
+ */
+#define VDUSE_VQ_SETUP_KICKFD _IOW(VDUSE_BASE, 0x16, struct vduse_vq_eventfd)
+
+/*
+ * Inject an interrupt for specific virtqueue. It's used to notify virtio driver
+ * to consume the used vring.
+ */
+#define VDUSE_VQ_INJECT_IRQ _IOW(VDUSE_BASE, 0x17, __u32)
+
+/**
+ * struct vduse_iova_umem - userspace memory configuration for one IOVA region
+ * @uaddr: start address of userspace memory, it must be aligned to page size
+ * @iova: start of the IOVA region
+ * @size: size of the IOVA region
+ * @reserved: for future use, needs to be initialized to zero
+ *
+ * Structure used by VDUSE_IOTLB_REG_UMEM and VDUSE_IOTLB_DEREG_UMEM
+ * ioctls to register/de-register userspace memory for IOVA regions
+ */
+struct vduse_iova_umem {
+ __u64 uaddr;
+ __u64 iova;
+ __u64 size;
+ __u64 reserved[3];
+};
+
+/* Register userspace memory for IOVA regions */
+#define VDUSE_IOTLB_REG_UMEM _IOW(VDUSE_BASE, 0x18, struct vduse_iova_umem)
+
+/* De-register the userspace memory. Caller should set iova and size field. */
+#define VDUSE_IOTLB_DEREG_UMEM _IOW(VDUSE_BASE, 0x19, struct vduse_iova_umem)
+
+/**
+ * struct vduse_iova_info - information of one IOVA region
+ * @start: start of the IOVA region
+ * @last: last of the IOVA region
+ * @capability: capability of the IOVA regsion
+ * @reserved: for future use, needs to be initialized to zero
+ *
+ * Structure used by VDUSE_IOTLB_GET_INFO ioctl to get information of
+ * one IOVA region.
+ */
+struct vduse_iova_info {
+ __u64 start;
+ __u64 last;
+#define VDUSE_IOVA_CAP_UMEM (1 << 0)
+ __u64 capability;
+ __u64 reserved[3];
+};
+
+/*
+ * Find the first IOVA region that overlaps with the range [start, last]
+ * and return some information on it. Caller should set start and last fields.
+ */
+#define VDUSE_IOTLB_GET_INFO _IOWR(VDUSE_BASE, 0x1a, struct vduse_iova_info)
+
+/* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */
+
+/**
+ * enum vduse_req_type - request type
+ * @VDUSE_GET_VQ_STATE: get the state for specified virtqueue from userspace
+ * @VDUSE_SET_STATUS: set the device status
+ * @VDUSE_UPDATE_IOTLB: Notify userspace to update the memory mapping for
+ * specified IOVA range via VDUSE_IOTLB_GET_FD ioctl
+ */
+enum vduse_req_type {
+ VDUSE_GET_VQ_STATE,
+ VDUSE_SET_STATUS,
+ VDUSE_UPDATE_IOTLB,
+};
+
+/**
+ * struct vduse_vq_state - virtqueue state
+ * @index: virtqueue index
+ * @split: split virtqueue state
+ * @packed: packed virtqueue state
+ */
+struct vduse_vq_state {
+ __u32 index;
+ union {
+ struct vduse_vq_state_split split;
+ struct vduse_vq_state_packed packed;
+ };
+};
+
+/**
+ * struct vduse_dev_status - device status
+ * @status: device status
+ */
+struct vduse_dev_status {
+ __u8 status;
+};
+
+/**
+ * struct vduse_iova_range - IOVA range [start, last]
+ * @start: start of the IOVA range
+ * @last: last of the IOVA range
+ */
+struct vduse_iova_range {
+ __u64 start;
+ __u64 last;
+};
+
+/**
+ * struct vduse_dev_request - control request
+ * @type: request type
+ * @request_id: request id
+ * @reserved: for future use
+ * @vq_state: virtqueue state, only index field is available
+ * @s: device status
+ * @iova: IOVA range for updating
+ * @padding: padding
+ *
+ * Structure used by read(2) on /dev/vduse/$NAME.
+ */
+struct vduse_dev_request {
+ __u32 type;
+ __u32 request_id;
+ __u32 reserved[4];
+ union {
+ struct vduse_vq_state vq_state;
+ struct vduse_dev_status s;
+ struct vduse_iova_range iova;
+ __u32 padding[32];
+ };
+};
+
+/**
+ * struct vduse_dev_response - response to control request
+ * @request_id: corresponding request id
+ * @result: the result of request
+ * @reserved: for future use, needs to be initialized to zero
+ * @vq_state: virtqueue state
+ * @padding: padding
+ *
+ * Structure used by write(2) on /dev/vduse/$NAME.
+ */
+struct vduse_dev_response {
+ __u32 request_id;
+#define VDUSE_REQ_RESULT_OK 0x00
+#define VDUSE_REQ_RESULT_FAILED 0x01
+ __u32 result;
+ __u32 reserved[4];
+ union {
+ struct vduse_vq_state vq_state;
+ __u32 padding[32];
+ };
+};
+
+#endif /* _UAPI_VDUSE_H_ */
diff --git a/include/uapi/linux/vesa.h b/include/uapi/linux/vesa.h
new file mode 100644
index 000000000000..81947f5088cd
--- /dev/null
+++ b/include/uapi/linux/vesa.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_VESA_H
+#define _UAPI_LINUX_VESA_H
+
+/* VESA Blanking Levels */
+enum vesa_blank_mode {
+ VESA_NO_BLANKING = 0,
+#define VESA_NO_BLANKING VESA_NO_BLANKING
+ VESA_VSYNC_SUSPEND = 1,
+#define VESA_VSYNC_SUSPEND VESA_VSYNC_SUSPEND
+ VESA_HSYNC_SUSPEND = 2,
+#define VESA_HSYNC_SUSPEND VESA_HSYNC_SUSPEND
+ VESA_POWERDOWN = VESA_VSYNC_SUSPEND | VESA_HSYNC_SUSPEND,
+#define VESA_POWERDOWN VESA_POWERDOWN
+ VESA_BLANK_MAX = VESA_POWERDOWN,
+};
+
+#endif
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 920470502329..2b68e6cdf190 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -46,6 +46,16 @@
*/
#define VFIO_NOIOMMU_IOMMU 8
+/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
+#define VFIO_UNMAP_ALL 9
+
+/*
+ * Supports the vaddr flag for DMA map and unmap. Not supported for mediated
+ * devices, so this capability is subject to change as groups are added or
+ * removed.
+ */
+#define VFIO_UPDATE_VADDR 10
+
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
@@ -201,8 +211,13 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
+#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6) /* vfio-fsl-mc device */
+#define VFIO_DEVICE_FLAGS_CAPS (1 << 7) /* Info supports caps */
+#define VFIO_DEVICE_FLAGS_CDX (1 << 8) /* vfio-cdx device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
+ __u32 cap_offset; /* Offset within info struct of first cap */
+ __u32 pad;
};
#define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
@@ -218,6 +233,29 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
#define VFIO_DEVICE_API_AP_STRING "vfio-ap"
+/*
+ * The following capabilities are unique to s390 zPCI devices. Their contents
+ * are further-defined in vfio_zdev.h
+ */
+#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE 1
+#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP 2
+#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL 3
+#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP 4
+
+/*
+ * The following VFIO_DEVICE_INFO capability reports support for PCIe AtomicOp
+ * completion to the root bus with supported widths provided via flags.
+ */
+#define VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP 5
+struct vfio_device_info_cap_pci_atomic_comp {
+ struct vfio_info_cap_header header;
+ __u32 flags;
+#define VFIO_PCI_ATOMIC_COMP32 (1 << 0)
+#define VFIO_PCI_ATOMIC_COMP64 (1 << 1)
+#define VFIO_PCI_ATOMIC_COMP128 (1 << 2)
+ __u32 reserved;
+};
+
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
* struct vfio_region_info)
@@ -239,8 +277,8 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */
__u32 index; /* Region index */
__u32 cap_offset; /* Offset within info struct of first cap */
- __u64 size; /* Region size (bytes) */
- __u64 offset; /* Region offset from start of device fd */
+ __aligned_u64 size; /* Region size (bytes) */
+ __aligned_u64 offset; /* Region offset from start of device fd */
};
#define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
@@ -256,8 +294,8 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1
struct vfio_region_sparse_mmap_area {
- __u64 offset; /* Offset of mmap'able area within region */
- __u64 size; /* Size of mmap'able area */
+ __aligned_u64 offset; /* Offset of mmap'able area within region */
+ __aligned_u64 size; /* Size of mmap'able area */
};
struct vfio_region_info_cap_sparse_mmap {
@@ -305,7 +343,7 @@ struct vfio_region_info_cap_type {
#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
#define VFIO_REGION_TYPE_GFX (1)
#define VFIO_REGION_TYPE_CCW (2)
-#define VFIO_REGION_TYPE_MIGRATION (3)
+#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED (3)
/* sub-types for VFIO_REGION_TYPE_PCI_* */
@@ -317,6 +355,8 @@ struct vfio_region_info_cap_type {
/* 10de vendor PCI sub-types */
/*
* NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
+ *
+ * Deprecated, region no longer provided
*/
#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
@@ -324,6 +364,8 @@ struct vfio_region_info_cap_type {
/*
* IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
* to do TLB invalidation on a GPU.
+ *
+ * Deprecated, region no longer provided
*/
#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
@@ -383,230 +425,34 @@ struct vfio_region_gfx_edid {
#define VFIO_REGION_SUBTYPE_CCW_CRW (3)
/* sub-types for VFIO_REGION_TYPE_MIGRATION */
-#define VFIO_REGION_SUBTYPE_MIGRATION (1)
-
-/*
- * The structure vfio_device_migration_info is placed at the 0th offset of
- * the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related
- * migration information. Field accesses from this structure are only supported
- * at their native width and alignment. Otherwise, the result is undefined and
- * vendor drivers should return an error.
- *
- * device_state: (read/write)
- * - The user application writes to this field to inform the vendor driver
- * about the device state to be transitioned to.
- * - The vendor driver should take the necessary actions to change the
- * device state. After successful transition to a given state, the
- * vendor driver should return success on write(device_state, state)
- * system call. If the device state transition fails, the vendor driver
- * should return an appropriate -errno for the fault condition.
- * - On the user application side, if the device state transition fails,
- * that is, if write(device_state, state) returns an error, read
- * device_state again to determine the current state of the device from
- * the vendor driver.
- * - The vendor driver should return previous state of the device unless
- * the vendor driver has encountered an internal error, in which case
- * the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR.
- * - The user application must use the device reset ioctl to recover the
- * device from VFIO_DEVICE_STATE_ERROR state. If the device is
- * indicated to be in a valid device state by reading device_state, the
- * user application may attempt to transition the device to any valid
- * state reachable from the current state or terminate itself.
- *
- * device_state consists of 3 bits:
- * - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear,
- * it indicates the _STOP state. When the device state is changed to
- * _STOP, driver should stop the device before write() returns.
- * - If bit 1 is set, it indicates the _SAVING state, which means that the
- * driver should start gathering device state information that will be
- * provided to the VFIO user application to save the device's state.
- * - If bit 2 is set, it indicates the _RESUMING state, which means that
- * the driver should prepare to resume the device. Data provided through
- * the migration region should be used to resume the device.
- * Bits 3 - 31 are reserved for future use. To preserve them, the user
- * application should perform a read-modify-write operation on this
- * field when modifying the specified bits.
- *
- * +------- _RESUMING
- * |+------ _SAVING
- * ||+----- _RUNNING
- * |||
- * 000b => Device Stopped, not saving or resuming
- * 001b => Device running, which is the default state
- * 010b => Stop the device & save the device state, stop-and-copy state
- * 011b => Device running and save the device state, pre-copy state
- * 100b => Device stopped and the device state is resuming
- * 101b => Invalid state
- * 110b => Error state
- * 111b => Invalid state
- *
- * State transitions:
- *
- * _RESUMING _RUNNING Pre-copy Stop-and-copy _STOP
- * (100b) (001b) (011b) (010b) (000b)
- * 0. Running or default state
- * |
- *
- * 1. Normal Shutdown (optional)
- * |------------------------------------->|
- *
- * 2. Save the state or suspend
- * |------------------------->|---------->|
- *
- * 3. Save the state during live migration
- * |----------->|------------>|---------->|
- *
- * 4. Resuming
- * |<---------|
- *
- * 5. Resumed
- * |--------->|
- *
- * 0. Default state of VFIO device is _RUNNNG when the user application starts.
- * 1. During normal shutdown of the user application, the user application may
- * optionally change the VFIO device state from _RUNNING to _STOP. This
- * transition is optional. The vendor driver must support this transition but
- * must not require it.
- * 2. When the user application saves state or suspends the application, the
- * device state transitions from _RUNNING to stop-and-copy and then to _STOP.
- * On state transition from _RUNNING to stop-and-copy, driver must stop the
- * device, save the device state and send it to the application through the
- * migration region. The sequence to be followed for such transition is given
- * below.
- * 3. In live migration of user application, the state transitions from _RUNNING
- * to pre-copy, to stop-and-copy, and to _STOP.
- * On state transition from _RUNNING to pre-copy, the driver should start
- * gathering the device state while the application is still running and send
- * the device state data to application through the migration region.
- * On state transition from pre-copy to stop-and-copy, the driver must stop
- * the device, save the device state and send it to the user application
- * through the migration region.
- * Vendor drivers must support the pre-copy state even for implementations
- * where no data is provided to the user before the stop-and-copy state. The
- * user must not be required to consume all migration data before the device
- * transitions to a new state, including the stop-and-copy state.
- * The sequence to be followed for above two transitions is given below.
- * 4. To start the resuming phase, the device state should be transitioned from
- * the _RUNNING to the _RESUMING state.
- * In the _RESUMING state, the driver should use the device state data
- * received through the migration region to resume the device.
- * 5. After providing saved device data to the driver, the application should
- * change the state from _RESUMING to _RUNNING.
- *
- * reserved:
- * Reads on this field return zero and writes are ignored.
- *
- * pending_bytes: (read only)
- * The number of pending bytes still to be migrated from the vendor driver.
- *
- * data_offset: (read only)
- * The user application should read data_offset field from the migration
- * region. The user application should read the device data from this
- * offset within the migration region during the _SAVING state or write
- * the device data during the _RESUMING state. See below for details of
- * sequence to be followed.
- *
- * data_size: (read/write)
- * The user application should read data_size to get the size in bytes of
- * the data copied in the migration region during the _SAVING state and
- * write the size in bytes of the data copied in the migration region
- * during the _RESUMING state.
- *
- * The format of the migration region is as follows:
- * ------------------------------------------------------------------
- * |vfio_device_migration_info| data section |
- * | | /////////////////////////////// |
- * ------------------------------------------------------------------
- * ^ ^
- * offset 0-trapped part data_offset
- *
- * The structure vfio_device_migration_info is always followed by the data
- * section in the region, so data_offset will always be nonzero. The offset
- * from where the data is copied is decided by the kernel driver. The data
- * section can be trapped, mmapped, or partitioned, depending on how the kernel
- * driver defines the data section. The data section partition can be defined
- * as mapped by the sparse mmap capability. If mmapped, data_offset must be
- * page aligned, whereas initial section which contains the
- * vfio_device_migration_info structure, might not end at the offset, which is
- * page aligned. The user is not required to access through mmap regardless
- * of the capabilities of the region mmap.
- * The vendor driver should determine whether and how to partition the data
- * section. The vendor driver should return data_offset accordingly.
- *
- * The sequence to be followed while in pre-copy state and stop-and-copy state
- * is as follows:
- * a. Read pending_bytes, indicating the start of a new iteration to get device
- * data. Repeated read on pending_bytes at this stage should have no side
- * effects.
- * If pending_bytes == 0, the user application should not iterate to get data
- * for that device.
- * If pending_bytes > 0, perform the following steps.
- * b. Read data_offset, indicating that the vendor driver should make data
- * available through the data section. The vendor driver should return this
- * read operation only after data is available from (region + data_offset)
- * to (region + data_offset + data_size).
- * c. Read data_size, which is the amount of data in bytes available through
- * the migration region.
- * Read on data_offset and data_size should return the offset and size of
- * the current buffer if the user application reads data_offset and
- * data_size more than once here.
- * d. Read data_size bytes of data from (region + data_offset) from the
- * migration region.
- * e. Process the data.
- * f. Read pending_bytes, which indicates that the data from the previous
- * iteration has been read. If pending_bytes > 0, go to step b.
- *
- * The user application can transition from the _SAVING|_RUNNING
- * (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the
- * number of pending bytes. The user application should iterate in _SAVING
- * (stop-and-copy) until pending_bytes is 0.
- *
- * The sequence to be followed while _RESUMING device state is as follows:
- * While data for this device is available, repeat the following steps:
- * a. Read data_offset from where the user application should write data.
- * b. Write migration data starting at the migration region + data_offset for
- * the length determined by data_size from the migration source.
- * c. Write data_size, which indicates to the vendor driver that data is
- * written in the migration region. Vendor driver must return this write
- * operations on consuming data. Vendor driver should apply the
- * user-provided migration region data to the device resume state.
- *
- * If an error occurs during the above sequences, the vendor driver can return
- * an error code for next read() or write() operation, which will terminate the
- * loop. The user application should then take the next necessary action, for
- * example, failing migration or terminating the user application.
- *
- * For the user application, data is opaque. The user application should write
- * data in the same order as the data is received and the data should be of
- * same transaction size at the source.
- */
+#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1)
struct vfio_device_migration_info {
__u32 device_state; /* VFIO device state */
-#define VFIO_DEVICE_STATE_STOP (0)
-#define VFIO_DEVICE_STATE_RUNNING (1 << 0)
-#define VFIO_DEVICE_STATE_SAVING (1 << 1)
-#define VFIO_DEVICE_STATE_RESUMING (1 << 2)
-#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_RUNNING | \
- VFIO_DEVICE_STATE_SAVING | \
- VFIO_DEVICE_STATE_RESUMING)
+#define VFIO_DEVICE_STATE_V1_STOP (0)
+#define VFIO_DEVICE_STATE_V1_RUNNING (1 << 0)
+#define VFIO_DEVICE_STATE_V1_SAVING (1 << 1)
+#define VFIO_DEVICE_STATE_V1_RESUMING (1 << 2)
+#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_V1_RUNNING | \
+ VFIO_DEVICE_STATE_V1_SAVING | \
+ VFIO_DEVICE_STATE_V1_RESUMING)
#define VFIO_DEVICE_STATE_VALID(state) \
- (state & VFIO_DEVICE_STATE_RESUMING ? \
- (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1)
+ (state & VFIO_DEVICE_STATE_V1_RESUMING ? \
+ (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1)
#define VFIO_DEVICE_STATE_IS_ERROR(state) \
- ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \
- VFIO_DEVICE_STATE_RESUMING))
+ ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \
+ VFIO_DEVICE_STATE_V1_RESUMING))
#define VFIO_DEVICE_STATE_SET_ERROR(state) \
- ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \
- VFIO_DEVICE_STATE_RESUMING)
+ ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \
+ VFIO_DEVICE_STATE_V1_RESUMING)
__u32 reserved;
- __u64 pending_bytes;
- __u64 data_offset;
- __u64 data_size;
+ __aligned_u64 pending_bytes;
+ __aligned_u64 data_offset;
+ __aligned_u64 data_size;
};
/*
@@ -623,12 +469,14 @@ struct vfio_device_migration_info {
* Capability with compressed real address (aka SSA - small system address)
* where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
* and by the userspace to associate a NVLink bridge with a GPU.
+ *
+ * Deprecated, capability no longer provided
*/
#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4
struct vfio_region_info_cap_nvlink2_ssatgt {
struct vfio_info_cap_header header;
- __u64 tgt;
+ __aligned_u64 tgt;
};
/*
@@ -637,6 +485,8 @@ struct vfio_region_info_cap_nvlink2_ssatgt {
* property in the device tree. The value is fixed in the hardware
* and failing to provide the correct value results in the link
* not working with no indication from the driver why.
+ *
+ * Deprecated, capability no longer provided
*/
#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5
@@ -677,6 +527,9 @@ struct vfio_region_info_cap_nvlink2_lnkspd {
* then add and unmask vectors, it's up to userspace to make the decision
* whether to allocate the maximum supported number of vectors or tear
* down setup and incrementally increase the vectors as each is enabled.
+ * Absence of the NORESIZE flag indicates that vectors can be enabled
+ * and disabled dynamically without impacting other vectors within the
+ * index.
*/
struct vfio_irq_info {
__u32 argsz;
@@ -808,18 +661,77 @@ enum {
enum {
VFIO_CCW_IO_IRQ_INDEX,
VFIO_CCW_CRW_IRQ_INDEX,
+ VFIO_CCW_REQ_IRQ_INDEX,
VFIO_CCW_NUM_IRQS
};
+/*
+ * The vfio-ap bus driver makes use of the following IRQ index mapping.
+ * Unimplemented IRQ types return a count of zero.
+ */
+enum {
+ VFIO_AP_REQ_IRQ_INDEX,
+ VFIO_AP_NUM_IRQS
+};
+
/**
- * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
+ * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
*
+ * This command is used to query the affected devices in the hot reset for
+ * a given device.
+ *
+ * This command always reports the segment, bus, and devfn information for
+ * each affected device, and selectively reports the group_id or devid per
+ * the way how the calling device is opened.
+ *
+ * - If the calling device is opened via the traditional group/container
+ * API, group_id is reported. User should check if it has owned all
+ * the affected devices and provides a set of group fds to prove the
+ * ownership in VFIO_DEVICE_PCI_HOT_RESET ioctl.
+ *
+ * - If the calling device is opened as a cdev, devid is reported.
+ * Flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set to indicate this
+ * data type. All the affected devices should be represented in
+ * the dev_set, ex. bound to a vfio driver, and also be owned by
+ * this interface which is determined by the following conditions:
+ * 1) Has a valid devid within the iommufd_ctx of the calling device.
+ * Ownership cannot be determined across separate iommufd_ctx and
+ * the cdev calling conventions do not support a proof-of-ownership
+ * model as provided in the legacy group interface. In this case
+ * valid devid with value greater than zero is provided in the return
+ * structure.
+ * 2) Does not have a valid devid within the iommufd_ctx of the calling
+ * device, but belongs to the same IOMMU group as the calling device
+ * or another opened device that has a valid devid within the
+ * iommufd_ctx of the calling device. This provides implicit ownership
+ * for devices within the same DMA isolation context. In this case
+ * the devid value of VFIO_PCI_DEVID_OWNED is provided in the return
+ * structure.
+ *
+ * A devid value of VFIO_PCI_DEVID_NOT_OWNED is provided in the return
+ * structure for affected devices where device is NOT represented in the
+ * dev_set or ownership is not available. Such devices prevent the use
+ * of VFIO_DEVICE_PCI_HOT_RESET ioctl outside of the proof-of-ownership
+ * calling conventions (ie. via legacy group accessed devices). Flag
+ * VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED would be set when all the
+ * affected devices are represented in the dev_set and also owned by
+ * the user. This flag is available only when
+ * flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set, otherwise reserved.
+ * When set, user could invoke VFIO_DEVICE_PCI_HOT_RESET with a zero
+ * length fd array on the calling device as the ownership is validated
+ * by iommufd_ctx.
+ *
* Return: 0 on success, -errno on failure:
* -enospc = insufficient buffer, -enodev = unsupported for device.
*/
struct vfio_pci_dependent_device {
- __u32 group_id;
+ union {
+ __u32 group_id;
+ __u32 devid;
+#define VFIO_PCI_DEVID_OWNED 0
+#define VFIO_PCI_DEVID_NOT_OWNED -1
+ };
__u16 segment;
__u8 bus;
__u8 devfn; /* Use PCI_SLOT/PCI_FUNC */
@@ -828,6 +740,8 @@ struct vfio_pci_dependent_device {
struct vfio_pci_hot_reset_info {
__u32 argsz;
__u32 flags;
+#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID (1 << 0)
+#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED (1 << 1)
__u32 count;
struct vfio_pci_dependent_device devices[];
};
@@ -838,6 +752,24 @@ struct vfio_pci_hot_reset_info {
* VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
* struct vfio_pci_hot_reset)
*
+ * A PCI hot reset results in either a bus or slot reset which may affect
+ * other devices sharing the bus/slot. The calling user must have
+ * ownership of the full set of affected devices as determined by the
+ * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO ioctl.
+ *
+ * When called on a device file descriptor acquired through the vfio
+ * group interface, the user is required to provide proof of ownership
+ * of those affected devices via the group_fds array in struct
+ * vfio_pci_hot_reset.
+ *
+ * When called on a direct cdev opened vfio device, the flags field of
+ * struct vfio_pci_hot_reset_info reports the ownership status of the
+ * affected devices and this ioctl must be called with an empty group_fds
+ * array. See above INFO ioctl definition for ownership requirements.
+ *
+ * Mixed usage of legacy groups and cdevs across the set of affected
+ * devices is not supported.
+ *
* Return: 0 on success, -errno on failure.
*/
struct vfio_pci_hot_reset {
@@ -884,7 +816,7 @@ struct vfio_device_gfx_plane_info {
__u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
/* out */
__u32 drm_format; /* drm format of plane */
- __u64 drm_format_mod; /* tiled mode */
+ __aligned_u64 drm_format_mod; /* tiled mode */
__u32 width; /* width of plane */
__u32 height; /* height of plane */
__u32 stride; /* stride of plane */
@@ -897,6 +829,7 @@ struct vfio_device_gfx_plane_info {
__u32 region_index; /* region index */
__u32 dmabuf_id; /* dma-buf id */
};
+ __u32 reserved;
};
#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
@@ -931,15 +864,16 @@ struct vfio_device_ioeventfd {
#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */
#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */
#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
- __u64 offset; /* device fd offset of write */
- __u64 data; /* data to be written */
+ __aligned_u64 offset; /* device fd offset of write */
+ __aligned_u64 data; /* data to be written */
__s32 fd; /* -1 for de-assignment */
+ __u32 reserved;
};
#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
/**
- * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17,
+ * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
* struct vfio_device_feature)
*
* Get, set, or probe feature data of the device. The feature is selected
@@ -967,6 +901,83 @@ struct vfio_device_feature {
#define VFIO_DEVICE_FEATURE _IO(VFIO_TYPE, VFIO_BASE + 17)
/*
+ * VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
+ * struct vfio_device_bind_iommufd)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ * @iommufd: iommufd to bind.
+ * @out_devid: The device id generated by this bind. devid is a handle for
+ * this device/iommufd bond and can be used in IOMMUFD commands.
+ *
+ * Bind a vfio_device to the specified iommufd.
+ *
+ * User is restricted from accessing the device before the binding operation
+ * is completed. Only allowed on cdev fds.
+ *
+ * Unbind is automatically conducted when device fd is closed.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_bind_iommufd {
+ __u32 argsz;
+ __u32 flags;
+ __s32 iommufd;
+ __u32 out_devid;
+};
+
+#define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18)
+
+/*
+ * VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19,
+ * struct vfio_device_attach_iommufd_pt)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ * @pt_id: Input the target id which can represent an ioas or a hwpt
+ * allocated via iommufd subsystem.
+ * Output the input ioas id or the attached hwpt id which could
+ * be the specified hwpt itself or a hwpt automatically created
+ * for the specified ioas by kernel during the attachment.
+ *
+ * Associate the device with an address space within the bound iommufd.
+ * Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only
+ * allowed on cdev fds.
+ *
+ * If a vfio device is currently attached to a valid hw_pagetable, without doing
+ * a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl
+ * passing in another hw_pagetable (hwpt) id is allowed. This action, also known
+ * as a hw_pagetable replacement, will replace the device's currently attached
+ * hw_pagetable with a new hw_pagetable corresponding to the given pt_id.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_attach_iommufd_pt {
+ __u32 argsz;
+ __u32 flags;
+ __u32 pt_id;
+};
+
+#define VFIO_DEVICE_ATTACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 19)
+
+/*
+ * VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20,
+ * struct vfio_device_detach_iommufd_pt)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ *
+ * Remove the association of the device and its current associated address
+ * space. After it, the device should be in a blocking DMA state. This is only
+ * allowed on cdev fds.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_detach_iommufd_pt {
+ __u32 argsz;
+ __u32 flags;
+};
+
+#define VFIO_DEVICE_DETACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 20)
+
+/*
* Provide support for setting a PCI VF Token, which is used as a shared
* secret between PF and VF drivers. This feature may only be set on a
* PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
@@ -975,6 +986,478 @@ struct vfio_device_feature {
*/
#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0)
+/*
+ * Indicates the device can support the migration API through
+ * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and
+ * ERROR states are always supported. Support for additional states is
+ * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be
+ * set.
+ *
+ * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and
+ * RESUMING are supported.
+ *
+ * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P
+ * is supported in addition to the STOP_COPY states.
+ *
+ * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY means that
+ * PRE_COPY is supported in addition to the STOP_COPY states.
+ *
+ * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY
+ * means that RUNNING_P2P, PRE_COPY and PRE_COPY_P2P are supported
+ * in addition to the STOP_COPY states.
+ *
+ * Other combinations of flags have behavior to be defined in the future.
+ */
+struct vfio_device_feature_migration {
+ __aligned_u64 flags;
+#define VFIO_MIGRATION_STOP_COPY (1 << 0)
+#define VFIO_MIGRATION_P2P (1 << 1)
+#define VFIO_MIGRATION_PRE_COPY (1 << 2)
+};
+#define VFIO_DEVICE_FEATURE_MIGRATION 1
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO
+ * device. The new state is supplied in device_state, see enum
+ * vfio_device_mig_state for details
+ *
+ * The kernel migration driver must fully transition the device to the new state
+ * value before the operation returns to the user.
+ *
+ * The kernel migration driver must not generate asynchronous device state
+ * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET
+ * ioctl as described above.
+ *
+ * If this function fails then current device_state may be the original
+ * operating state or some other state along the combination transition path.
+ * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt
+ * to return to the original state, or attempt to return to some other state
+ * such as RUNNING or STOP.
+ *
+ * If the new_state starts a new data transfer session then the FD associated
+ * with that session is returned in data_fd. The user is responsible to close
+ * this FD when it is finished. The user must consider the migration data stream
+ * carried over the FD to be opaque and must preserve the byte order of the
+ * stream. The user is not required to preserve buffer segmentation when writing
+ * the data stream during the RESUMING operation.
+ *
+ * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO
+ * device, data_fd will be -1.
+ */
+struct vfio_device_feature_mig_state {
+ __u32 device_state; /* From enum vfio_device_mig_state */
+ __s32 data_fd;
+};
+#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2
+
+/*
+ * The device migration Finite State Machine is described by the enum
+ * vfio_device_mig_state. Some of the FSM arcs will create a migration data
+ * transfer session by returning a FD, in this case the migration data will
+ * flow over the FD using read() and write() as discussed below.
+ *
+ * There are 5 states to support VFIO_MIGRATION_STOP_COPY:
+ * RUNNING - The device is running normally
+ * STOP - The device does not change the internal or external state
+ * STOP_COPY - The device internal state can be read out
+ * RESUMING - The device is stopped and is loading a new internal state
+ * ERROR - The device has failed and must be reset
+ *
+ * And optional states to support VFIO_MIGRATION_P2P:
+ * RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA
+ * And VFIO_MIGRATION_PRE_COPY:
+ * PRE_COPY - The device is running normally but tracking internal state
+ * changes
+ * And VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY:
+ * PRE_COPY_P2P - PRE_COPY, except the device cannot do peer to peer DMA
+ *
+ * The FSM takes actions on the arcs between FSM states. The driver implements
+ * the following behavior for the FSM arcs:
+ *
+ * RUNNING_P2P -> STOP
+ * STOP_COPY -> STOP
+ * While in STOP the device must stop the operation of the device. The device
+ * must not generate interrupts, DMA, or any other change to external state.
+ * It must not change its internal state. When stopped the device and kernel
+ * migration driver must accept and respond to interaction to support external
+ * subsystems in the STOP state, for example PCI MSI-X and PCI config space.
+ * Failure by the user to restrict device access while in STOP must not result
+ * in error conditions outside the user context (ex. host system faults).
+ *
+ * The STOP_COPY arc will terminate a data transfer session.
+ *
+ * RESUMING -> STOP
+ * Leaving RESUMING terminates a data transfer session and indicates the
+ * device should complete processing of the data delivered by write(). The
+ * kernel migration driver should complete the incorporation of data written
+ * to the data transfer FD into the device internal state and perform
+ * final validity and consistency checking of the new device state. If the
+ * user provided data is found to be incomplete, inconsistent, or otherwise
+ * invalid, the migration driver must fail the SET_STATE ioctl and
+ * optionally go to the ERROR state as described below.
+ *
+ * While in STOP the device has the same behavior as other STOP states
+ * described above.
+ *
+ * To abort a RESUMING session the device must be reset.
+ *
+ * PRE_COPY -> RUNNING
+ * RUNNING_P2P -> RUNNING
+ * While in RUNNING the device is fully operational, the device may generate
+ * interrupts, DMA, respond to MMIO, all vfio device regions are functional,
+ * and the device may advance its internal state.
+ *
+ * The PRE_COPY arc will terminate a data transfer session.
+ *
+ * PRE_COPY_P2P -> RUNNING_P2P
+ * RUNNING -> RUNNING_P2P
+ * STOP -> RUNNING_P2P
+ * While in RUNNING_P2P the device is partially running in the P2P quiescent
+ * state defined below.
+ *
+ * The PRE_COPY_P2P arc will terminate a data transfer session.
+ *
+ * RUNNING -> PRE_COPY
+ * RUNNING_P2P -> PRE_COPY_P2P
+ * STOP -> STOP_COPY
+ * PRE_COPY, PRE_COPY_P2P and STOP_COPY form the "saving group" of states
+ * which share a data transfer session. Moving between these states alters
+ * what is streamed in session, but does not terminate or otherwise affect
+ * the associated fd.
+ *
+ * These arcs begin the process of saving the device state and will return a
+ * new data_fd. The migration driver may perform actions such as enabling
+ * dirty logging of device state when entering PRE_COPY or PER_COPY_P2P.
+ *
+ * Each arc does not change the device operation, the device remains
+ * RUNNING, P2P quiesced or in STOP. The STOP_COPY state is described below
+ * in PRE_COPY_P2P -> STOP_COPY.
+ *
+ * PRE_COPY -> PRE_COPY_P2P
+ * Entering PRE_COPY_P2P continues all the behaviors of PRE_COPY above.
+ * However, while in the PRE_COPY_P2P state, the device is partially running
+ * in the P2P quiescent state defined below, like RUNNING_P2P.
+ *
+ * PRE_COPY_P2P -> PRE_COPY
+ * This arc allows returning the device to a full RUNNING behavior while
+ * continuing all the behaviors of PRE_COPY.
+ *
+ * PRE_COPY_P2P -> STOP_COPY
+ * While in the STOP_COPY state the device has the same behavior as STOP
+ * with the addition that the data transfers session continues to stream the
+ * migration state. End of stream on the FD indicates the entire device
+ * state has been transferred.
+ *
+ * The user should take steps to restrict access to vfio device regions while
+ * the device is in STOP_COPY or risk corruption of the device migration data
+ * stream.
+ *
+ * STOP -> RESUMING
+ * Entering the RESUMING state starts a process of restoring the device state
+ * and will return a new data_fd. The data stream fed into the data_fd should
+ * be taken from the data transfer output of a single FD during saving from
+ * a compatible device. The migration driver may alter/reset the internal
+ * device state for this arc if required to prepare the device to receive the
+ * migration data.
+ *
+ * STOP_COPY -> PRE_COPY
+ * STOP_COPY -> PRE_COPY_P2P
+ * These arcs are not permitted and return error if requested. Future
+ * revisions of this API may define behaviors for these arcs, in this case
+ * support will be discoverable by a new flag in
+ * VFIO_DEVICE_FEATURE_MIGRATION.
+ *
+ * any -> ERROR
+ * ERROR cannot be specified as a device state, however any transition request
+ * can be failed with an errno return and may then move the device_state into
+ * ERROR. In this case the device was unable to execute the requested arc and
+ * was also unable to restore the device to any valid device_state.
+ * To recover from ERROR VFIO_DEVICE_RESET must be used to return the
+ * device_state back to RUNNING.
+ *
+ * The optional peer to peer (P2P) quiescent state is intended to be a quiescent
+ * state for the device for the purposes of managing multiple devices within a
+ * user context where peer-to-peer DMA between devices may be active. The
+ * RUNNING_P2P and PRE_COPY_P2P states must prevent the device from initiating
+ * any new P2P DMA transactions. If the device can identify P2P transactions
+ * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration
+ * driver must complete any such outstanding operations prior to completing the
+ * FSM arc into a P2P state. For the purpose of specification the states
+ * behave as though the device was fully running if not supported. Like while in
+ * STOP or STOP_COPY the user must not touch the device, otherwise the state
+ * can be exited.
+ *
+ * The remaining possible transitions are interpreted as combinations of the
+ * above FSM arcs. As there are multiple paths through the FSM arcs the path
+ * should be selected based on the following rules:
+ * - Select the shortest path.
+ * - The path cannot have saving group states as interior arcs, only
+ * starting/end states.
+ * Refer to vfio_mig_get_next_state() for the result of the algorithm.
+ *
+ * The automatic transit through the FSM arcs that make up the combination
+ * transition is invisible to the user. When working with combination arcs the
+ * user may see any step along the path in the device_state if SET_STATE
+ * fails. When handling these types of errors users should anticipate future
+ * revisions of this protocol using new states and those states becoming
+ * visible in this case.
+ *
+ * The optional states cannot be used with SET_STATE if the device does not
+ * support them. The user can discover if these states are supported by using
+ * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can
+ * avoid knowing about these optional states if the kernel driver supports them.
+ *
+ * Arcs touching PRE_COPY and PRE_COPY_P2P are removed if support for PRE_COPY
+ * is not present.
+ */
+enum vfio_device_mig_state {
+ VFIO_DEVICE_STATE_ERROR = 0,
+ VFIO_DEVICE_STATE_STOP = 1,
+ VFIO_DEVICE_STATE_RUNNING = 2,
+ VFIO_DEVICE_STATE_STOP_COPY = 3,
+ VFIO_DEVICE_STATE_RESUMING = 4,
+ VFIO_DEVICE_STATE_RUNNING_P2P = 5,
+ VFIO_DEVICE_STATE_PRE_COPY = 6,
+ VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
+ VFIO_DEVICE_STATE_NR,
+};
+
+/**
+ * VFIO_MIG_GET_PRECOPY_INFO - _IO(VFIO_TYPE, VFIO_BASE + 21)
+ *
+ * This ioctl is used on the migration data FD in the precopy phase of the
+ * migration data transfer. It returns an estimate of the current data sizes
+ * remaining to be transferred. It allows the user to judge when it is
+ * appropriate to leave PRE_COPY for STOP_COPY.
+ *
+ * This ioctl is valid only in PRE_COPY states and kernel driver should
+ * return -EINVAL from any other migration state.
+ *
+ * The vfio_precopy_info data structure returned by this ioctl provides
+ * estimates of data available from the device during the PRE_COPY states.
+ * This estimate is split into two categories, initial_bytes and
+ * dirty_bytes.
+ *
+ * The initial_bytes field indicates the amount of initial precopy
+ * data available from the device. This field should have a non-zero initial
+ * value and decrease as migration data is read from the device.
+ * It is recommended to leave PRE_COPY for STOP_COPY only after this field
+ * reaches zero. Leaving PRE_COPY earlier might make things slower.
+ *
+ * The dirty_bytes field tracks device state changes relative to data
+ * previously retrieved. This field starts at zero and may increase as
+ * the internal device state is modified or decrease as that modified
+ * state is read from the device.
+ *
+ * Userspace may use the combination of these fields to estimate the
+ * potential data size available during the PRE_COPY phases, as well as
+ * trends relative to the rate the device is dirtying its internal
+ * state, but these fields are not required to have any bearing relative
+ * to the data size available during the STOP_COPY phase.
+ *
+ * Drivers have a lot of flexibility in when and what they transfer during the
+ * PRE_COPY phase, and how they report this from VFIO_MIG_GET_PRECOPY_INFO.
+ *
+ * During pre-copy the migration data FD has a temporary "end of stream" that is
+ * reached when both initial_bytes and dirty_byte are zero. For instance, this
+ * may indicate that the device is idle and not currently dirtying any internal
+ * state. When read() is done on this temporary end of stream the kernel driver
+ * should return ENOMSG from read(). Userspace can wait for more data (which may
+ * never come) by using poll.
+ *
+ * Once in STOP_COPY the migration data FD has a permanent end of stream
+ * signaled in the usual way by read() always returning 0 and poll always
+ * returning readable. ENOMSG may not be returned in STOP_COPY.
+ * Support for this ioctl is mandatory if a driver claims to support
+ * VFIO_MIGRATION_PRE_COPY.
+ *
+ * Return: 0 on success, -1 and errno set on failure.
+ */
+struct vfio_precopy_info {
+ __u32 argsz;
+ __u32 flags;
+ __aligned_u64 initial_bytes;
+ __aligned_u64 dirty_bytes;
+};
+
+#define VFIO_MIG_GET_PRECOPY_INFO _IO(VFIO_TYPE, VFIO_BASE + 21)
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power
+ * state with the platform-based power management. Device use of lower power
+ * states depends on factors managed by the runtime power management core,
+ * including system level support and coordinating support among dependent
+ * devices. Enabling device low power entry does not guarantee lower power
+ * usage by the device, nor is a mechanism provided through this feature to
+ * know the current power state of the device. If any device access happens
+ * (either from the host or through the vfio uAPI) when the device is in the
+ * low power state, then the host will move the device out of the low power
+ * state as necessary prior to the access. Once the access is completed, the
+ * device may re-enter the low power state. For single shot low power support
+ * with wake-up notification, see
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below. Access to mmap'd
+ * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after
+ * calling LOW_POWER_EXIT.
+ */
+#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3
+
+/*
+ * This device feature has the same behavior as
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user
+ * provides an eventfd for wake-up notification. When the device moves out of
+ * the low power state for the wake-up, the host will not allow the device to
+ * re-enter a low power state without a subsequent user call to one of the low
+ * power entry device feature IOCTLs. Access to mmap'd device regions is
+ * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the
+ * low power exit. The low power exit can happen either through LOW_POWER_EXIT
+ * or through any other access (where the wake-up notification has been
+ * generated). The access to mmap'd device regions will not trigger low power
+ * exit.
+ *
+ * The notification through the provided eventfd will be generated only when
+ * the device has entered and is resumed from a low power state after
+ * calling this device feature IOCTL. A device that has not entered low power
+ * state, as managed through the runtime power management core, will not
+ * generate a notification through the provided eventfd on access. Calling the
+ * LOW_POWER_EXIT feature is optional in the case where notification has been
+ * signaled on the provided eventfd that a resume from low power has occurred.
+ */
+struct vfio_device_low_power_entry_with_wakeup {
+ __s32 wakeup_eventfd;
+ __u32 reserved;
+};
+
+#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as
+ * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features.
+ * This device feature IOCTL may itself generate a wakeup eventfd notification
+ * in the latter case if the device had previously entered a low power state.
+ */
+#define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging.
+ * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports
+ * DMA logging.
+ *
+ * DMA logging allows a device to internally record what DMAs the device is
+ * initiating and report them back to userspace. It is part of the VFIO
+ * migration infrastructure that allows implementing dirty page tracking
+ * during the pre copy phase of live migration. Only DMA WRITEs are logged,
+ * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE.
+ *
+ * When DMA logging is started a range of IOVAs to monitor is provided and the
+ * device can optimize its logging to cover only the IOVA range given. Each
+ * DMA that the device initiates inside the range will be logged by the device
+ * for later retrieval.
+ *
+ * page_size is an input that hints what tracking granularity the device
+ * should try to achieve. If the device cannot do the hinted page size then
+ * it's the driver choice which page size to pick based on its support.
+ * On output the device will return the page size it selected.
+ *
+ * ranges is a pointer to an array of
+ * struct vfio_device_feature_dma_logging_range.
+ *
+ * The core kernel code guarantees to support by minimum num_ranges that fit
+ * into a single kernel page. User space can try higher values but should give
+ * up if the above can't be achieved as of some driver limitations.
+ *
+ * A single call to start device DMA logging can be issued and a matching stop
+ * should follow at the end. Another start is not allowed in the meantime.
+ */
+struct vfio_device_feature_dma_logging_control {
+ __aligned_u64 page_size;
+ __u32 num_ranges;
+ __u32 __reserved;
+ __aligned_u64 ranges;
+};
+
+struct vfio_device_feature_dma_logging_range {
+ __aligned_u64 iova;
+ __aligned_u64 length;
+};
+
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started
+ * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START
+ */
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log
+ *
+ * Query the device's DMA log for written pages within the given IOVA range.
+ * During querying the log is cleared for the IOVA range.
+ *
+ * bitmap is a pointer to an array of u64s that will hold the output bitmap
+ * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits
+ * is given by:
+ * bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
+ *
+ * The input page_size can be any power of two value and does not have to
+ * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver
+ * will format its internal logging to match the reporting page size, possibly
+ * by replicating bits if the internal page size is lower than requested.
+ *
+ * The LOGGING_REPORT will only set bits in the bitmap and never clear or
+ * perform any initialization of the user provided bitmap.
+ *
+ * If any error is returned userspace should assume that the dirty log is
+ * corrupted. Error recovery is to consider all memory dirty and try to
+ * restart the dirty tracking, or to abort/restart the whole migration.
+ *
+ * If DMA logging is not enabled, an error will be returned.
+ *
+ */
+struct vfio_device_feature_dma_logging_report {
+ __aligned_u64 iova;
+ __aligned_u64 length;
+ __aligned_u64 page_size;
+ __aligned_u64 bitmap;
+};
+
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_GET read back the estimated data length that will
+ * be required to complete stop copy.
+ *
+ * Note: Can be called on each device state.
+ */
+
+struct vfio_device_feature_mig_data_size {
+ __aligned_u64 stop_copy_length;
+};
+
+#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
+
+/**
+ * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device
+ * based on the operation specified in op flag.
+ *
+ * The functionality is incorporated for devices that needs bus master control,
+ * but the in-band device interface lacks the support. Consequently, it is not
+ * applicable to PCI devices, as bus master control for PCI devices is managed
+ * in-band through the configuration space. At present, this feature is supported
+ * only for CDX devices.
+ * When the device's BUS MASTER setting is configured as CLEAR, it will result in
+ * blocking all incoming DMA requests from the device. On the other hand, configuring
+ * the device's BUS MASTER setting as SET (enable) will grant the device the
+ * capability to perform DMA to the host memory.
+ */
+struct vfio_device_feature_bus_master {
+ __u32 op;
+#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */
+#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */
+};
+#define VFIO_DEVICE_FEATURE_BUS_MASTER 10
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
@@ -990,8 +1473,9 @@ struct vfio_iommu_type1_info {
__u32 flags;
#define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */
- __u64 iova_pgsizes; /* Bitmap of supported page sizes */
+ __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */
__u32 cap_offset; /* Offset within info struct of first cap */
+ __u32 pad;
};
/*
@@ -1039,6 +1523,21 @@ struct vfio_iommu_type1_info_cap_migration {
__u64 max_dirty_bitmap_size; /* in bytes */
};
+/*
+ * The DMA available capability allows to report the current number of
+ * simultaneously outstanding DMA mappings that are allowed.
+ *
+ * The structure below defines version 1 of this capability.
+ *
+ * avail: specifies the current number of outstanding DMA mappings allowed.
+ */
+#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
+
+struct vfio_iommu_type1_info_dma_avail {
+ struct vfio_info_cap_header header;
+ __u32 avail;
+};
+
#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
/**
@@ -1046,12 +1545,21 @@ struct vfio_iommu_type1_info_cap_migration {
*
* Map process virtual addresses to IO virtual addresses using the
* provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+ *
+ * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
+ * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR. To
+ * maintain memory consistency within the user application, the updated vaddr
+ * must address the same memory object as originally mapped. Failure to do so
+ * will result in user memory corruption and/or device misbehavior. iova and
+ * size must match those in the original MAP_DMA call. Protection is not
+ * changed, and the READ & WRITE flags must be 0.
*/
struct vfio_iommu_type1_dma_map {
__u32 argsz;
__u32 flags;
#define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */
#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */
+#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
__u64 vaddr; /* Process virtual address */
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
@@ -1074,6 +1582,7 @@ struct vfio_bitmap {
* field. No guarantee is made to the user that arbitrary unmaps of iova
* or size different from those used in the original mapping call will
* succeed.
+ *
* VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
* before unmapping IO virtual addresses. When this flag is set, the user must
* provide a struct vfio_bitmap in data[]. User must provide zero-allocated
@@ -1083,11 +1592,21 @@ struct vfio_bitmap {
* indicates that the page at that offset from iova is dirty. A Bitmap of the
* pages in the range of unmapped size is returned in the user-provided
* vfio_bitmap.data.
+ *
+ * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses. iova and size
+ * must be 0. This cannot be combined with the get-dirty-bitmap flag.
+ *
+ * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
+ * virtual addresses in the iova range. DMA to already-mapped pages continues.
+ * Groups may not be added to the container while any addresses are invalid.
+ * This cannot be combined with the get-dirty-bitmap flag.
*/
struct vfio_iommu_type1_dma_unmap {
__u32 argsz;
__u32 flags;
#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
+#define VFIO_DMA_UNMAP_FLAG_ALL (1 << 1)
+#define VFIO_DMA_UNMAP_FLAG_VADDR (1 << 2)
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
__u8 data[];
diff --git a/include/uapi/linux/vfio_zdev.h b/include/uapi/linux/vfio_zdev.h
new file mode 100644
index 000000000000..77f2aff1f27e
--- /dev/null
+++ b/include/uapi/linux/vfio_zdev.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * VFIO Region definitions for ZPCI devices
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ * Matthew Rosato <mjrosato@linux.ibm.com>
+ */
+
+#ifndef _VFIO_ZDEV_H_
+#define _VFIO_ZDEV_H_
+
+#include <linux/types.h>
+#include <linux/vfio.h>
+
+/**
+ * VFIO_DEVICE_INFO_CAP_ZPCI_BASE - Base PCI Function information
+ *
+ * This capability provides a set of descriptive information about the
+ * associated PCI function.
+ */
+struct vfio_device_info_cap_zpci_base {
+ struct vfio_info_cap_header header;
+ __u64 start_dma; /* Start of available DMA addresses */
+ __u64 end_dma; /* End of available DMA addresses */
+ __u16 pchid; /* Physical Channel ID */
+ __u16 vfn; /* Virtual function number */
+ __u16 fmb_length; /* Measurement Block Length (in bytes) */
+ __u8 pft; /* PCI Function Type */
+ __u8 gid; /* PCI function group ID */
+ /* End of version 1 */
+ __u32 fh; /* PCI function handle */
+ /* End of version 2 */
+};
+
+/**
+ * VFIO_DEVICE_INFO_CAP_ZPCI_GROUP - Base PCI Function Group information
+ *
+ * This capability provides a set of descriptive information about the group of
+ * PCI functions that the associated device belongs to.
+ */
+struct vfio_device_info_cap_zpci_group {
+ struct vfio_info_cap_header header;
+ __u64 dasm; /* DMA Address space mask */
+ __u64 msi_addr; /* MSI address */
+ __u64 flags;
+#define VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH 1 /* Program-specified TLB refresh */
+ __u16 mui; /* Measurement Block Update Interval */
+ __u16 noi; /* Maximum number of MSIs */
+ __u16 maxstbl; /* Maximum Store Block Length */
+ __u8 version; /* Supported PCI Version */
+ /* End of version 1 */
+ __u8 reserved;
+ __u16 imaxstbl; /* Maximum Interpreted Store Block Length */
+ /* End of version 2 */
+};
+
+/**
+ * VFIO_DEVICE_INFO_CAP_ZPCI_UTIL - Utility String
+ *
+ * This capability provides the utility string for the associated device, which
+ * is a device identifier string made up of EBCDID characters. 'size' specifies
+ * the length of 'util_str'.
+ */
+struct vfio_device_info_cap_zpci_util {
+ struct vfio_info_cap_header header;
+ __u32 size;
+ __u8 util_str[];
+};
+
+/**
+ * VFIO_DEVICE_INFO_CAP_ZPCI_PFIP - PCI Function Path
+ *
+ * This capability provides the PCI function path string, which is an identifier
+ * that describes the internal hardware path of the device. 'size' specifies
+ * the length of 'pfip'.
+ */
+struct vfio_device_info_cap_zpci_pfip {
+ struct vfio_info_cap_header header;
+ __u32 size;
+ __u8 pfip[];
+};
+
+#endif
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 75232185324a..bea697390613 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -45,6 +45,25 @@
#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
/* Specify an eventfd file descriptor to signal on log write. */
#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
+/* By default, a device gets one vhost_worker that its virtqueues share. This
+ * command allows the owner of the device to create an additional vhost_worker
+ * for the device. It can later be bound to 1 or more of its virtqueues using
+ * the VHOST_ATTACH_VRING_WORKER command.
+ *
+ * This must be called after VHOST_SET_OWNER and the caller must be the owner
+ * of the device. The new thread will inherit caller's cgroups and namespaces,
+ * and will share the caller's memory space. The new thread will also be
+ * counted against the caller's RLIMIT_NPROC value.
+ *
+ * The worker's ID used in other commands will be returned in
+ * vhost_worker_state.
+ */
+#define VHOST_NEW_WORKER _IOR(VHOST_VIRTIO, 0x8, struct vhost_worker_state)
+/* Free a worker created with VHOST_NEW_WORKER if it's not attached to any
+ * virtqueue. If userspace is not able to call this for workers its created,
+ * the kernel will free all the device's workers when the device is closed.
+ */
+#define VHOST_FREE_WORKER _IOW(VHOST_VIRTIO, 0x9, struct vhost_worker_state)
/* Ring setup. */
/* Set number of descriptors in ring. This parameter can not
@@ -70,6 +89,18 @@
#define VHOST_VRING_BIG_ENDIAN 1
#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
+/* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's
+ * virtqueues.
+ *
+ * This will replace the virtqueue's existing worker. If the replaced worker
+ * is no longer attached to any virtqueues, it can be freed with
+ * VHOST_FREE_WORKER.
+ */
+#define VHOST_ATTACH_VRING_WORKER _IOW(VHOST_VIRTIO, 0x15, \
+ struct vhost_vring_worker)
+/* Return the vring worker's ID */
+#define VHOST_GET_VRING_WORKER _IOWR(VHOST_VIRTIO, 0x16, \
+ struct vhost_vring_worker)
/* The following ioctls use eventfd file descriptors to signal and poll
* for events. */
@@ -89,11 +120,6 @@
/* Set or get vhost backend capability */
-/* Use message type V2 */
-#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
-/* IOTLB can accept batching hints */
-#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
-
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
@@ -146,4 +172,66 @@
/* Set event fd for config interrupt*/
#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int)
+
+/* Get the valid iova range */
+#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
+ struct vhost_vdpa_iova_range)
+/* Get the config size */
+#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
+
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
+
+/* Get the number of address spaces. */
+#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
+
+/* Get the group for a virtqueue: read index, write group in num,
+ * The virtqueue index is stored in the index field of
+ * vhost_vring_state. The group for this specific virtqueue is
+ * returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \
+ struct vhost_vring_state)
+/* Set the ASID for a virtqueue group. The group index is stored in
+ * the index field of vhost_vring_state, the ASID associated with this
+ * group is stored at num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
+ struct vhost_vring_state)
+
+/* Suspend a device so it does not process virtqueue requests anymore
+ *
+ * After the return of ioctl the device must preserve all the necessary state
+ * (the virtqueue vring base plus the possible device specific states) that is
+ * required for restoring in the future. The device must not change its
+ * configuration after that point.
+ */
+#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D)
+
+/* Resume a device so it can resume processing virtqueue requests
+ *
+ * After the return of this ioctl the device will have restored all the
+ * necessary states and it is fully operational to continue processing the
+ * virtqueue descriptors.
+ */
+#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)
+
+/* Get the group for the descriptor table including driver & device areas
+ * of a virtqueue: read index, write group in num.
+ * The virtqueue index is stored in the index field of vhost_vring_state.
+ * The group ID of the descriptor table for this specific virtqueue
+ * is returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \
+ struct vhost_vring_state)
+
+/* Get the queue size of a specific virtqueue.
+ * userspace set the vring index in vhost_vring_state.index
+ * kernel set the queue size in vhost_vring_state.num
+ */
+#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x80, \
+ struct vhost_vring_state)
#endif
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index 9a269a88a6ff..d7656908f730 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -47,6 +47,22 @@ struct vhost_vring_addr {
__u64 log_guest_addr;
};
+struct vhost_worker_state {
+ /*
+ * For VHOST_NEW_WORKER the kernel will return the new vhost_worker id.
+ * For VHOST_FREE_WORKER this must be set to the id of the vhost_worker
+ * to free.
+ */
+ unsigned int worker_id;
+};
+
+struct vhost_vring_worker {
+ /* vring index */
+ unsigned int index;
+ /* The id of the vhost_worker returned from VHOST_NEW_WORKER */
+ unsigned int worker_id;
+};
+
/* no alignment requirement */
struct vhost_iotlb_msg {
__u64 iova;
@@ -87,7 +103,7 @@ struct vhost_msg {
struct vhost_msg_v2 {
__u32 type;
- __u32 reserved;
+ __u32 asid;
union {
struct vhost_iotlb_msg iotlb;
__u8 padding[64];
@@ -107,7 +123,7 @@ struct vhost_memory_region {
struct vhost_memory {
__u32 nregions;
__u32 padding;
- struct vhost_memory_region regions[0];
+ struct vhost_memory_region regions[];
};
/* VHOST_SCSI specific definitions */
@@ -135,7 +151,16 @@ struct vhost_scsi_target {
struct vhost_vdpa_config {
__u32 off;
__u32 len;
- __u8 buf[0];
+ __u8 buf[];
+};
+
+/* vhost vdpa IOVA range
+ * @first: First address that can be mapped by vhost-vDPA
+ * @last: Last address that can be mapped by vhost-vDPA
+ */
+struct vhost_vdpa_iova_range {
+ __u64 first;
+ __u64 last;
};
/* Feature bits */
@@ -144,4 +169,28 @@ struct vhost_vdpa_config {
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accept batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
+/* IOTLB can accept address space identifier through V2 type of IOTLB
+ * message
+ */
+#define VHOST_BACKEND_F_IOTLB_ASID 0x3
+/* Device can be suspended */
+#define VHOST_BACKEND_F_SUSPEND 0x4
+/* Device can be resumed */
+#define VHOST_BACKEND_F_RESUME 0x5
+/* Device supports the driver enabling virtqueues both before and after
+ * DRIVER_OK
+ */
+#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
+/* Device may expose the virtqueue's descriptor area, driver area and
+ * device area to a different group for ASID binding than where its
+ * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID.
+ */
+#define VHOST_BACKEND_F_DESC_ASID 0x7
+/* IOTLB don't flush memory mapping across device reset */
+#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8
+
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index c7b70ff53bc1..a8015e5e7fa4 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -191,8 +191,6 @@ enum v4l2_memory {
V4L2_MEMORY_DMABUF = 4,
};
-#define V4L2_FLAG_MEMORY_NON_CONSISTENT (1 << 0)
-
/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
enum v4l2_colorspace {
/*
@@ -223,9 +221,7 @@ enum v4l2_colorspace {
V4L2_COLORSPACE_470_SYSTEM_M = 5,
/*
- * EBU Tech 3213 PAL/SECAM colorspace. This only makes sense when
- * dealing with really old PAL/SECAM recordings. Superseded by
- * SMPTE 170M.
+ * EBU Tech 3213 PAL/SECAM colorspace.
*/
V4L2_COLORSPACE_470_SYSTEM_BG = 6,
@@ -249,6 +245,14 @@ enum v4l2_colorspace {
/* DCI-P3 colorspace, used by cinema projectors */
V4L2_COLORSPACE_DCI_P3 = 12,
+
+#ifdef __KERNEL__
+ /*
+ * Largest supported colorspace value, assigned by the compiler, used
+ * by the framework to check for invalid values.
+ */
+ V4L2_COLORSPACE_LAST,
+#endif
};
/*
@@ -287,6 +291,13 @@ enum v4l2_xfer_func {
V4L2_XFER_FUNC_NONE = 5,
V4L2_XFER_FUNC_DCI_P3 = 6,
V4L2_XFER_FUNC_SMPTE2084 = 7,
+#ifdef __KERNEL__
+ /*
+ * Largest supported transfer function value, assigned by the compiler,
+ * used by the framework to check for invalid values.
+ */
+ V4L2_XFER_FUNC_LAST,
+#endif
};
/*
@@ -347,6 +358,13 @@ enum v4l2_ycbcr_encoding {
/* SMPTE 240M -- Obsolete HDTV */
V4L2_YCBCR_ENC_SMPTE240M = 8,
+#ifdef __KERNEL__
+ /*
+ * Largest supported encoding value, assigned by the compiler, used by
+ * the framework to check for invalid values.
+ */
+ V4L2_YCBCR_ENC_LAST,
+#endif
};
/*
@@ -375,9 +393,9 @@ enum v4l2_hsv_encoding {
enum v4l2_quantization {
/*
- * The default for R'G'B' quantization is always full range, except
- * for the BT2020 colorspace. For Y'CbCr the quantization is always
- * limited range, except for COLORSPACE_JPEG: this is full range.
+ * The default for R'G'B' quantization is always full range.
+ * For Y'CbCr the quantization is always limited range, except
+ * for COLORSPACE_JPEG: this is full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -386,14 +404,13 @@ enum v4l2_quantization {
/*
* Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
- * This depends on whether the image is RGB or not, the colorspace and the
- * Y'CbCr encoding.
+ * This depends on whether the image is RGB or not, the colorspace.
+ * The Y'CbCr encoding is not used anymore, but is still there for backwards
+ * compatibility.
*/
#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
- (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
- V4L2_QUANTIZATION_LIM_RANGE : \
- (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
- V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
+ (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+ V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)
/*
* Deprecated names for opRGB colorspace (IEC 61966-2-5)
@@ -485,7 +502,6 @@ struct v4l2_capability {
#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
-#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
@@ -520,7 +536,7 @@ struct v4l2_pix_format {
/* Pixel format FOURCC depth Description */
-/* RGB formats */
+/* RGB formats (1 or 2 bytes per pixel) */
#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */
#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_ARGB444 v4l2_fourcc('A', 'R', '1', '2') /* 16 aaaarrrr ggggbbbb */
@@ -529,12 +545,6 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */
#define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */
#define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */
-
-/*
- * Originally this had 'BA12' as fourcc, but this clashed with the older
- * V4L2_PIX_FMT_SGRBG12 which inexplicably used that same fourcc.
- * So use 'GA12' instead for V4L2_PIX_FMT_BGRA444.
- */
#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */
#define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */
#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
@@ -551,6 +561,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */
#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
+
+/* RGB formats (3 or 4 bytes per pixel) */
#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
@@ -564,6 +576,13 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
+#define V4L2_PIX_FMT_RGBX1010102 v4l2_fourcc('R', 'X', '3', '0') /* 32 RGBX-10-10-10-2 */
+#define V4L2_PIX_FMT_RGBA1010102 v4l2_fourcc('R', 'A', '3', '0') /* 32 RGBA-10-10-10-2 */
+#define V4L2_PIX_FMT_ARGB2101010 v4l2_fourcc('A', 'R', '3', '0') /* 32 ARGB-2-10-10-10 */
+
+/* RGB formats (6 or 8 bytes per pixel) */
+#define V4L2_PIX_FMT_BGR48_12 v4l2_fourcc('B', '3', '1', '2') /* 48 BGR 12-bit per component */
+#define V4L2_PIX_FMT_ABGR64_12 v4l2_fourcc('B', '4', '1', '2') /* 64 BGRA 12-bit per component */
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
@@ -571,6 +590,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
+#define V4L2_PIX_FMT_Y012 v4l2_fourcc('Y', '0', '1', '2') /* 12 Greyscale */
#define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
@@ -578,6 +598,7 @@ struct v4l2_pix_format {
/* Grey bit-packed formats */
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
+#define V4L2_PIX_FMT_IPU3_Y10 v4l2_fourcc('i', 'p', '3', 'y') /* IPU3 packed 10-bit greyscale */
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
@@ -595,14 +616,24 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
+#define V4L2_PIX_FMT_YUV24 v4l2_fourcc('Y', 'U', 'V', '3') /* 24 YUV-8-8-8 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
#define V4L2_PIX_FMT_AYUV32 v4l2_fourcc('A', 'Y', 'U', 'V') /* 32 AYUV-8-8-8-8 */
#define V4L2_PIX_FMT_XYUV32 v4l2_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */
#define V4L2_PIX_FMT_VUYA32 v4l2_fourcc('V', 'U', 'Y', 'A') /* 32 VUYA-8-8-8-8 */
#define V4L2_PIX_FMT_VUYX32 v4l2_fourcc('V', 'U', 'Y', 'X') /* 32 VUYX-8-8-8-8 */
-#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
-#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
+#define V4L2_PIX_FMT_YUVA32 v4l2_fourcc('Y', 'U', 'V', 'A') /* 32 YUVA-8-8-8-8 */
+#define V4L2_PIX_FMT_YUVX32 v4l2_fourcc('Y', 'U', 'V', 'X') /* 32 YUVX-8-8-8-8 */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
+#define V4L2_PIX_FMT_YUV48_12 v4l2_fourcc('Y', '3', '1', '2') /* 48 YUV 4:4:4 12-bit per component */
+
+/*
+ * YCbCr packed format. For each Y2xx format, xx bits of valid data occupy the MSBs
+ * of the 16 bit components, and 16-xx bits of zero padding occupy the LSBs.
+ */
+#define V4L2_PIX_FMT_Y210 v4l2_fourcc('Y', '2', '1', '0') /* 32 YUYV 4:2:2 */
+#define V4L2_PIX_FMT_Y212 v4l2_fourcc('Y', '2', '1', '2') /* 32 YUYV 4:2:2 */
+#define V4L2_PIX_FMT_Y216 v4l2_fourcc('Y', '2', '1', '6') /* 32 YUYV 4:2:2 */
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -611,14 +642,15 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
+#define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') /* 24 Y/CbCr 4:2:0 10-bit per component */
+#define V4L2_PIX_FMT_P012 v4l2_fourcc('P', '0', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */
#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
-#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */
-#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 macroblocks */
+#define V4L2_PIX_FMT_P012M v4l2_fourcc('P', 'M', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */
/* three planes - Y Cb, Cr */
#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
@@ -636,6 +668,21 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YUV444M v4l2_fourcc('Y', 'M', '2', '4') /* 24 YUV444 planar */
#define V4L2_PIX_FMT_YVU444M v4l2_fourcc('Y', 'M', '4', '2') /* 24 YVU444 planar */
+/* Tiled YUV formats */
+#define V4L2_PIX_FMT_NV12_4L4 v4l2_fourcc('V', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 4x4 tiles */
+#define V4L2_PIX_FMT_NV12_16L16 v4l2_fourcc('H', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
+#define V4L2_PIX_FMT_NV12_32L32 v4l2_fourcc('S', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 32x32 tiles */
+#define V4L2_PIX_FMT_NV15_4L4 v4l2_fourcc('V', 'T', '1', '5') /* 15 Y/CbCr 4:2:0 10-bit 4x4 tiles */
+#define V4L2_PIX_FMT_P010_4L4 v4l2_fourcc('T', '0', '1', '0') /* 12 Y/CbCr 4:2:0 10-bit 4x4 macroblocks */
+#define V4L2_PIX_FMT_NV12_8L128 v4l2_fourcc('A', 'T', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */
+#define V4L2_PIX_FMT_NV12_10BE_8L128 v4l2_fourcc_be('A', 'X', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */
+
+/* Tiled YUV formats, non contiguous planes */
+#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 tiles */
+#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
+#define V4L2_PIX_FMT_NV12M_8L128 v4l2_fourcc('N', 'A', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */
+#define V4L2_PIX_FMT_NV12M_10BE_8L128 v4l2_fourcc_be('N', 'T', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */
+
/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
@@ -704,10 +751,18 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
+#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /* VP8 parsed frame */
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
+#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F') /* VP9 parsed frame */
#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
#define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */
+#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
+#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
+#define V4L2_PIX_FMT_AV1_FRAME v4l2_fourcc('A', 'V', '1', 'F') /* AV1 parsed frame */
+#define V4L2_PIX_FMT_SPK v4l2_fourcc('S', 'P', 'K', '0') /* Sorenson Spark */
+#define V4L2_PIX_FMT_RV30 v4l2_fourcc('R', 'V', '3', '0') /* RealVideo 8 */
+#define V4L2_PIX_FMT_RV40 v4l2_fourcc('R', 'V', '4', '0') /* RealVideo 9 & 10 */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -740,11 +795,18 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
+#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */
+#define V4L2_PIX_FMT_MT2110T v4l2_fourcc('M', 'T', '2', 'T') /* Mediatek 10-bit block tile mode */
+#define V4L2_PIX_FMT_MT2110R v4l2_fourcc('M', 'T', '2', 'R') /* Mediatek 10-bit block raster mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
-#define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */
#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
+#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */
+#define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */
+#define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */
+#define V4L2_PIX_FMT_AJPG v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */
+#define V4L2_PIX_FMT_HEXTILE v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */
-/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
+/* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
#define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */
#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */
@@ -773,11 +835,16 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */
+/* Vendor specific - used for RK_ISP1 camera sub-system */
+#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */
+#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
+
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
/* Flags */
#define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA 0x00000001
+#define V4L2_PIX_FMT_FLAG_SET_CSC 0x00000002
/*
* F O R M A T E N U M E R A T I O N
@@ -797,6 +864,11 @@ struct v4l2_fmtdesc {
#define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004
#define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008
#define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL 0x0010
+#define V4L2_FMT_FLAG_CSC_COLORSPACE 0x0020
+#define V4L2_FMT_FLAG_CSC_XFER_FUNC 0x0040
+#define V4L2_FMT_FLAG_CSC_YCBCR_ENC 0x0080
+#define V4L2_FMT_FLAG_CSC_HSV_ENC V4L2_FMT_FLAG_CSC_YCBCR_ENC
+#define V4L2_FMT_FLAG_CSC_QUANTIZATION 0x0100
/* Frame Size and frame rate enumeration */
/*
@@ -949,12 +1021,12 @@ struct v4l2_requestbuffers {
__u32 type; /* enum v4l2_buf_type */
__u32 memory; /* enum v4l2_memory */
__u32 capabilities;
- union {
- __u32 flags;
- __u32 reserved[1];
- };
+ __u8 flags;
+ __u8 reserved[3];
};
+#define V4L2_MEMORY_FLAG_NON_COHERENT (1 << 0)
+
/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
@@ -963,21 +1035,24 @@ struct v4l2_requestbuffers {
#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6)
+#define V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS (1 << 7)
/**
* struct v4l2_plane - plane info for multi-planar buffers
* @bytesused: number of bytes occupied by data in the plane (payload)
* @length: size of this plane (NOT the payload) in bytes
- * @mem_offset: when memory in the associated struct v4l2_buffer is
+ * @m.mem_offset: when memory in the associated struct v4l2_buffer is
* V4L2_MEMORY_MMAP, equals the offset from the start of
* the device memory for this plane (or is a "cookie" that
* should be passed to mmap() called on the video node)
- * @userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer
+ * @m.userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer
* pointing to this plane
- * @fd: when memory is V4L2_MEMORY_DMABUF, a userspace file
+ * @m.fd: when memory is V4L2_MEMORY_DMABUF, a userspace file
* descriptor associated with this plane
+ * @m: union of @mem_offset, @userptr and @fd
* @data_offset: offset in the plane to the start of data; usually 0,
* unless there is a header in front of the data
+ * @reserved: drivers and applications must zero this array
*
* Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer
* with two planes can have one plane for Y, and another for interleaved CbCr
@@ -1010,19 +1085,23 @@ struct v4l2_plane {
* @sequence: sequence count of this frame
* @memory: enum v4l2_memory; the method, in which the actual video data is
* passed
- * @offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
+ * @m.offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
* offset from the start of the device memory for this plane,
* (or a "cookie" that should be passed to mmap() as offset)
- * @userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
+ * @m.userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
* a userspace pointer pointing to this buffer
- * @fd: for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
+ * @m.fd: for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
* a userspace file descriptor associated with this buffer
- * @planes: for multiplanar buffers; userspace pointer to the array of plane
+ * @m.planes: for multiplanar buffers; userspace pointer to the array of plane
* info structs for this buffer
+ * @m: union of @offset, @userptr, @planes and @fd
* @length: size in bytes of the buffer (NOT its payload) for single-plane
* buffers (when type != *_MPLANE); number of elements in the
* planes array for multi-plane buffers
+ * @reserved2: drivers and applications must zero this field
* @request_fd: fd of the request that this buffer should use
+ * @reserved: for backwards compatibility with applications that do not know
+ * about @request_fd
*
* Contains data exchanged by application and driver using one of the Streaming
* I/O methods.
@@ -1060,7 +1139,7 @@ struct v4l2_buffer {
#ifndef __KERNEL__
/**
* v4l2_timeval_to_ns - Convert timeval to nanoseconds
- * @ts: pointer to the timeval variable to be converted
+ * @tv: pointer to the timeval variable to be converted
*
* Returns the scalar nanosecond representation of the timeval
* parameter.
@@ -1121,6 +1200,7 @@ static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv)
* @flags: flags for newly created file, currently only O_CLOEXEC is
* supported, refer to manual of open syscall for more details
* @fd: file descriptor associated with DMABUF (set by driver)
+ * @reserved: drivers and applications must zero this array
*
* Contains data used for exporting a video buffer as DMABUF file descriptor.
* The buffer is identified by a 'cookie' returned by VIDIOC_QUERYBUF
@@ -1161,8 +1241,10 @@ struct v4l2_framebuffer {
/* Flags for the 'capability' field. Read only */
#define V4L2_FBUF_CAP_EXTERNOVERLAY 0x0001
#define V4L2_FBUF_CAP_CHROMAKEY 0x0002
+#ifndef __KERNEL__
#define V4L2_FBUF_CAP_LIST_CLIPPING 0x0004
#define V4L2_FBUF_CAP_BITMAP_CLIPPING 0x0008
+#endif
#define V4L2_FBUF_CAP_LOCAL_ALPHA 0x0010
#define V4L2_FBUF_CAP_GLOBAL_ALPHA 0x0020
#define V4L2_FBUF_CAP_LOCAL_INV_ALPHA 0x0040
@@ -1185,7 +1267,7 @@ struct v4l2_window {
struct v4l2_rect w;
__u32 field; /* enum v4l2_field */
__u32 chromakey;
- struct v4l2_clip __user *clips;
+ struct v4l2_clip *clips;
__u32 clipcount;
void __user *bitmap;
__u8 global_alpha;
@@ -1552,7 +1634,8 @@ struct v4l2_bt_timings {
((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
- (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
+ ((bt)->interlaced ? \
+ ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
@@ -1643,7 +1726,7 @@ struct v4l2_input {
__u8 name[32]; /* Label */
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
- __u32 tuner; /* enum v4l2_tuner_type */
+ __u32 tuner; /* Tuner index */
v4l2_std_id std;
__u32 status;
__u32 capabilities;
@@ -1730,7 +1813,33 @@ struct v4l2_ext_control {
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
+ __s32 __user *p_s32;
+ __s64 __user *p_s64;
struct v4l2_area __user *p_area;
+ struct v4l2_ctrl_h264_sps __user *p_h264_sps;
+ struct v4l2_ctrl_h264_pps __user *p_h264_pps;
+ struct v4l2_ctrl_h264_scaling_matrix __user *p_h264_scaling_matrix;
+ struct v4l2_ctrl_h264_pred_weights __user *p_h264_pred_weights;
+ struct v4l2_ctrl_h264_slice_params __user *p_h264_slice_params;
+ struct v4l2_ctrl_h264_decode_params __user *p_h264_decode_params;
+ struct v4l2_ctrl_fwht_params __user *p_fwht_params;
+ struct v4l2_ctrl_vp8_frame __user *p_vp8_frame;
+ struct v4l2_ctrl_mpeg2_sequence __user *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture __user *p_mpeg2_picture;
+ struct v4l2_ctrl_mpeg2_quantisation __user *p_mpeg2_quantisation;
+ struct v4l2_ctrl_vp9_compressed_hdr __user *p_vp9_compressed_hdr_probs;
+ struct v4l2_ctrl_vp9_frame __user *p_vp9_frame;
+ struct v4l2_ctrl_hevc_sps __user *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps __user *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params __user *p_hevc_slice_params;
+ struct v4l2_ctrl_hevc_scaling_matrix __user *p_hevc_scaling_matrix;
+ struct v4l2_ctrl_hevc_decode_params __user *p_hevc_decode_params;
+ struct v4l2_ctrl_av1_sequence __user *p_av1_sequence;
+ struct v4l2_ctrl_av1_tile_group_entry __user *p_av1_tile_group_entry;
+ struct v4l2_ctrl_av1_frame __user *p_av1_frame;
+ struct v4l2_ctrl_av1_film_grain __user *p_av1_film_grain;
+ struct v4l2_ctrl_hdr10_cll_info __user *p_hdr10_cll_info;
+ struct v4l2_ctrl_hdr10_mastering_display __user *p_hdr10_mastering_display;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1777,6 +1886,38 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
V4L2_CTRL_TYPE_AREA = 0x0106,
+
+ V4L2_CTRL_TYPE_HDR10_CLL_INFO = 0x0110,
+ V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY = 0x0111,
+
+ V4L2_CTRL_TYPE_H264_SPS = 0x0200,
+ V4L2_CTRL_TYPE_H264_PPS = 0x0201,
+ V4L2_CTRL_TYPE_H264_SCALING_MATRIX = 0x0202,
+ V4L2_CTRL_TYPE_H264_SLICE_PARAMS = 0x0203,
+ V4L2_CTRL_TYPE_H264_DECODE_PARAMS = 0x0204,
+ V4L2_CTRL_TYPE_H264_PRED_WEIGHTS = 0x0205,
+
+ V4L2_CTRL_TYPE_FWHT_PARAMS = 0x0220,
+
+ V4L2_CTRL_TYPE_VP8_FRAME = 0x0240,
+
+ V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 0x0250,
+ V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 0x0251,
+ V4L2_CTRL_TYPE_MPEG2_PICTURE = 0x0252,
+
+ V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR = 0x0260,
+ V4L2_CTRL_TYPE_VP9_FRAME = 0x0261,
+
+ V4L2_CTRL_TYPE_HEVC_SPS = 0x0270,
+ V4L2_CTRL_TYPE_HEVC_PPS = 0x0271,
+ V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272,
+ V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273,
+ V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274,
+
+ V4L2_CTRL_TYPE_AV1_SEQUENCE = 0x280,
+ V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY = 0x281,
+ V4L2_CTRL_TYPE_AV1_FRAME = 0x282,
+ V4L2_CTRL_TYPE_AV1_FILM_GRAIN = 0x283,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
@@ -1832,6 +1973,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
#define V4L2_CTRL_FLAG_MODIFY_LAYOUT 0x0400
+#define V4L2_CTRL_FLAG_DYNAMIC_ARRAY 0x0800
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -2214,6 +2356,7 @@ struct v4l2_mpeg_vbi_fmt_ivtv {
* this plane will be used
* @bytesperline: distance in bytes between the leftmost pixels in two
* adjacent lines
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_plane_pix_format {
__u32 sizeimage;
@@ -2232,8 +2375,10 @@ struct v4l2_plane_pix_format {
* @num_planes: number of planes for this format
* @flags: format flags (V4L2_PIX_FMT_FLAG_*)
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @hsv_enc: enum v4l2_hsv_encoding, HSV encoding
* @quantization: enum v4l2_quantization, colorspace quantization
* @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_pix_format_mplane {
__u32 width;
@@ -2258,6 +2403,7 @@ struct v4l2_pix_format_mplane {
* struct v4l2_sdr_format - SDR format definition
* @pixelformat: little endian four character code (fourcc)
* @buffersize: maximum size in bytes required for data
+ * @reserved: drivers and applications must zero this array
*/
struct v4l2_sdr_format {
__u32 pixelformat;
@@ -2277,13 +2423,15 @@ struct v4l2_meta_format {
/**
* struct v4l2_format - stream data format
- * @type: enum v4l2_buf_type; type of the data stream
- * @pix: definition of an image format
- * @pix_mp: definition of a multiplanar image format
- * @win: definition of an overlaid image
- * @vbi: raw VBI capture or output parameters
- * @sliced: sliced VBI capture or output parameters
- * @raw_data: placeholder for future extensions and custom formats
+ * @type: enum v4l2_buf_type; type of the data stream
+ * @fmt.pix: definition of an image format
+ * @fmt.pix_mp: definition of a multiplanar image format
+ * @fmt.win: definition of an overlaid image
+ * @fmt.vbi: raw VBI capture or output parameters
+ * @fmt.sliced: sliced VBI capture or output parameters
+ * @fmt.raw_data: placeholder for future extensions and custom formats
+ * @fmt: union of @pix, @pix_mp, @win, @vbi, @sliced, @sdr,
+ * @meta and @raw_data
*/
struct v4l2_format {
__u32 type;
@@ -2333,6 +2481,7 @@ struct v4l2_event_vsync {
#define V4L2_EVENT_CTRL_CH_VALUE (1 << 0)
#define V4L2_EVENT_CTRL_CH_FLAGS (1 << 1)
#define V4L2_EVENT_CTRL_CH_RANGE (1 << 2)
+#define V4L2_EVENT_CTRL_CH_DIMENSIONS (1 << 3)
struct v4l2_event_ctrl {
__u32 changes;
@@ -2459,6 +2608,9 @@ struct v4l2_dbg_chip_info {
* @flags: additional buffer management attributes (ignored unless the
* queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability
* and configured for MMAP streaming I/O).
+ * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set
+ * this field indicate the maximum possible number of buffers
+ * for this queue.
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2468,7 +2620,8 @@ struct v4l2_create_buffers {
struct v4l2_format format;
__u32 capabilities;
__u32 flags;
- __u32 reserved[6];
+ __u32 max_num_buffers;
+ __u32 reserved[5];
};
/*
@@ -2576,4 +2729,15 @@ struct v4l2_create_buffers {
#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */
+/* Deprecated definitions kept for backwards compatibility */
+#ifndef __KERNEL__
+#define V4L2_PIX_FMT_HM12 V4L2_PIX_FMT_NV12_16L16
+#define V4L2_PIX_FMT_SUNXI_TILED_NV12 V4L2_PIX_FMT_NV12_32L32
+/*
+ * This capability was never implemented, anyone using this cap should drop it
+ * from their code.
+ */
+#define V4L2_CAP_ASYNCIO 0x02000000
+#endif
+
#endif /* _UAPI__LINUX_VIDEODEV2_H */
diff --git a/include/uapi/linux/virtio_9p.h b/include/uapi/linux/virtio_9p.h
index 441047432258..374b68f8ac6e 100644
--- a/include/uapi/linux/virtio_9p.h
+++ b/include/uapi/linux/virtio_9p.h
@@ -38,7 +38,7 @@ struct virtio_9p_config {
/* length of the tag name */
__virtio16 tag_len;
/* non-NULL terminated tag name */
- __u8 tag[0];
+ __u8 tag[];
} __attribute__((packed));
#endif /* _LINUX_VIRTIO_9P_H */
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index d888f013d9ff..3744e4da1b2a 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -40,6 +40,8 @@
#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
#define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */
#define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */
+#define VIRTIO_BLK_F_SECURE_ERASE 16 /* Secure Erase is supported */
+#define VIRTIO_BLK_F_ZONED 17 /* Zoned block device */
/* Legacy feature bits */
#ifndef VIRTIO_BLK_NO_LEGACY
@@ -121,6 +123,31 @@ struct virtio_blk_config {
__u8 write_zeroes_may_unmap;
__u8 unused1[3];
+
+ /* the next 3 entries are guarded by VIRTIO_BLK_F_SECURE_ERASE */
+ /*
+ * The maximum secure erase sectors (in 512-byte sectors) for
+ * one segment.
+ */
+ __virtio32 max_secure_erase_sectors;
+ /*
+ * The maximum number of secure erase segments in a
+ * secure erase command.
+ */
+ __virtio32 max_secure_erase_seg;
+ /* Secure erase commands must be aligned to this number of sectors. */
+ __virtio32 secure_erase_sector_alignment;
+
+ /* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */
+ struct virtio_blk_zoned_characteristics {
+ __virtio32 zone_sectors;
+ __virtio32 max_open_zones;
+ __virtio32 max_active_zones;
+ __virtio32 max_append_sectors;
+ __virtio32 write_granularity;
+ __u8 model;
+ __u8 unused2[3];
+ } zoned;
} __attribute__((packed));
/*
@@ -155,6 +182,30 @@ struct virtio_blk_config {
/* Write zeroes command */
#define VIRTIO_BLK_T_WRITE_ZEROES 13
+/* Secure erase command */
+#define VIRTIO_BLK_T_SECURE_ERASE 14
+
+/* Zone append command */
+#define VIRTIO_BLK_T_ZONE_APPEND 15
+
+/* Report zones command */
+#define VIRTIO_BLK_T_ZONE_REPORT 16
+
+/* Open zone command */
+#define VIRTIO_BLK_T_ZONE_OPEN 18
+
+/* Close zone command */
+#define VIRTIO_BLK_T_ZONE_CLOSE 20
+
+/* Finish zone command */
+#define VIRTIO_BLK_T_ZONE_FINISH 22
+
+/* Reset zone command */
+#define VIRTIO_BLK_T_ZONE_RESET 24
+
+/* Reset All zones command */
+#define VIRTIO_BLK_T_ZONE_RESET_ALL 26
+
#ifndef VIRTIO_BLK_NO_LEGACY
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
@@ -174,6 +225,72 @@ struct virtio_blk_outhdr {
__virtio64 sector;
};
+/*
+ * Supported zoned device models.
+ */
+
+/* Regular block device */
+#define VIRTIO_BLK_Z_NONE 0
+/* Host-managed zoned device */
+#define VIRTIO_BLK_Z_HM 1
+/* Host-aware zoned device */
+#define VIRTIO_BLK_Z_HA 2
+
+/*
+ * Zone descriptor. A part of VIRTIO_BLK_T_ZONE_REPORT command reply.
+ */
+struct virtio_blk_zone_descriptor {
+ /* Zone capacity */
+ __virtio64 z_cap;
+ /* The starting sector of the zone */
+ __virtio64 z_start;
+ /* Zone write pointer position in sectors */
+ __virtio64 z_wp;
+ /* Zone type */
+ __u8 z_type;
+ /* Zone state */
+ __u8 z_state;
+ __u8 reserved[38];
+};
+
+struct virtio_blk_zone_report {
+ __virtio64 nr_zones;
+ __u8 reserved[56];
+ struct virtio_blk_zone_descriptor zones[];
+};
+
+/*
+ * Supported zone types.
+ */
+
+/* Conventional zone */
+#define VIRTIO_BLK_ZT_CONV 1
+/* Sequential Write Required zone */
+#define VIRTIO_BLK_ZT_SWR 2
+/* Sequential Write Preferred zone */
+#define VIRTIO_BLK_ZT_SWP 3
+
+/*
+ * Zone states that are available for zones of all types.
+ */
+
+/* Not a write pointer (conventional zones only) */
+#define VIRTIO_BLK_ZS_NOT_WP 0
+/* Empty */
+#define VIRTIO_BLK_ZS_EMPTY 1
+/* Implicitly Open */
+#define VIRTIO_BLK_ZS_IOPEN 2
+/* Explicitly Open */
+#define VIRTIO_BLK_ZS_EOPEN 3
+/* Closed */
+#define VIRTIO_BLK_ZS_CLOSED 4
+/* Read-Only */
+#define VIRTIO_BLK_ZS_RDONLY 13
+/* Full */
+#define VIRTIO_BLK_ZS_FULL 14
+/* Offline */
+#define VIRTIO_BLK_ZS_OFFLINE 15
+
/* Unmap this range (only valid for write zeroes command) */
#define VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP 0x00000001
@@ -200,4 +317,11 @@ struct virtio_scsi_inhdr {
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
#define VIRTIO_BLK_S_UNSUPP 2
+
+/* Error codes that are specific to zoned block devices */
+#define VIRTIO_BLK_S_ZONE_INVALID_CMD 3
+#define VIRTIO_BLK_S_ZONE_UNALIGNED_WP 4
+#define VIRTIO_BLK_S_ZONE_OPEN_RESOURCE 5
+#define VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE 6
+
#endif /* _LINUX_VIRTIO_BLK_H */
diff --git a/include/uapi/linux/virtio_bt.h b/include/uapi/linux/virtio_bt.h
new file mode 100644
index 000000000000..af798f4c9680
--- /dev/null
+++ b/include/uapi/linux/virtio_bt.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef _UAPI_LINUX_VIRTIO_BT_H
+#define _UAPI_LINUX_VIRTIO_BT_H
+
+#include <linux/virtio_types.h>
+
+/* Feature bits */
+#define VIRTIO_BT_F_VND_HCI 0 /* Indicates vendor command support */
+#define VIRTIO_BT_F_MSFT_EXT 1 /* Indicates MSFT vendor support */
+#define VIRTIO_BT_F_AOSP_EXT 2 /* Indicates AOSP vendor support */
+#define VIRTIO_BT_F_CONFIG_V2 3 /* Use second version configuration */
+
+enum virtio_bt_config_type {
+ VIRTIO_BT_CONFIG_TYPE_PRIMARY = 0,
+ VIRTIO_BT_CONFIG_TYPE_AMP = 1,
+};
+
+enum virtio_bt_config_vendor {
+ VIRTIO_BT_CONFIG_VENDOR_NONE = 0,
+ VIRTIO_BT_CONFIG_VENDOR_ZEPHYR = 1,
+ VIRTIO_BT_CONFIG_VENDOR_INTEL = 2,
+ VIRTIO_BT_CONFIG_VENDOR_REALTEK = 3,
+};
+
+struct virtio_bt_config {
+ __u8 type;
+ __u16 vendor;
+ __u16 msft_opcode;
+} __attribute__((packed));
+
+struct virtio_bt_config_v2 {
+ __u8 type;
+ __u8 alignment;
+ __u16 vendor;
+ __u16 msft_opcode;
+};
+
+#endif /* _UAPI_LINUX_VIRTIO_BT_H */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index b5eda06f0d57..2445f365bce7 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -52,7 +52,7 @@
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 38
+#define VIRTIO_TRANSPORT_F_END 42
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -83,6 +83,12 @@
#define VIRTIO_F_RING_PACKED 34
/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER 35
+
+/*
* This feature indicates that memory accesses by the driver and the
* device are ordered in a way described by the platform.
*/
@@ -92,4 +98,26 @@
* Does the device support Single Root I/O Virtualization?
*/
#define VIRTIO_F_SR_IOV 37
+
+/*
+ * This feature indicates that the driver passes extra data (besides
+ * identifying the virtqueue) in its device notifications.
+ */
+#define VIRTIO_F_NOTIFICATION_DATA 38
+
+/* This feature indicates that the driver uses the data provided by the device
+ * as a virtqueue identifier in available buffer notifications.
+ */
+#define VIRTIO_F_NOTIF_CONFIG_DATA 39
+
+/*
+ * This feature indicates that the driver can reset a queue individually.
+ */
+#define VIRTIO_F_RING_RESET 40
+
+/*
+ * This feature indicates that the device support administration virtqueues.
+ */
+#define VIRTIO_F_ADMIN_VQ 41
+
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h
index a03932f10565..71a54a6849ca 100644
--- a/include/uapi/linux/virtio_crypto.h
+++ b/include/uapi/linux/virtio_crypto.h
@@ -37,6 +37,7 @@
#define VIRTIO_CRYPTO_SERVICE_HASH 1
#define VIRTIO_CRYPTO_SERVICE_MAC 2
#define VIRTIO_CRYPTO_SERVICE_AEAD 3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
__le32 opcode;
__le32 algo;
__le32 flag;
@@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req {
__u8 padding[32];
};
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING 0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+ __le32 padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH 0
+#define VIRTIO_CRYPTO_RSA_MD2 1
+#define VIRTIO_CRYPTO_RSA_MD3 2
+#define VIRTIO_CRYPTO_RSA_MD4 3
+#define VIRTIO_CRYPTO_RSA_MD5 4
+#define VIRTIO_CRYPTO_RSA_SHA1 5
+#define VIRTIO_CRYPTO_RSA_SHA256 6
+#define VIRTIO_CRYPTO_RSA_SHA384 7
+#define VIRTIO_CRYPTO_RSA_SHA512 8
+#define VIRTIO_CRYPTO_RSA_SHA224 9
+ __le32 hash_algo;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN 0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+ __le32 curve_id;
+ __le32 padding;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER 0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA 1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA 2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+ __le32 algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC 1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+ __le32 keytype;
+ __le32 keylen;
+
+ union {
+ struct virtio_crypto_rsa_session_para rsa;
+ struct virtio_crypto_ecdsa_session_para ecdsa;
+ } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+ struct virtio_crypto_akcipher_session_para para;
+ __u8 padding[36];
+};
+
struct virtio_crypto_alg_chain_session_para {
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
@@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req {
mac_create_session;
struct virtio_crypto_aead_create_session_req
aead_create_session;
+ struct virtio_crypto_akcipher_create_session_req
+ akcipher_create_session;
struct virtio_crypto_destroy_session_req
destroy_session;
__u8 padding[56];
@@ -266,6 +325,14 @@ struct virtio_crypto_op_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
#define VIRTIO_CRYPTO_AEAD_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
__le32 opcode;
/* algo should be service-specific algorithms */
__le32 algo;
@@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req {
__u8 padding[32];
};
+struct virtio_crypto_akcipher_para {
+ __le32 src_data_len;
+ __le32 dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+ struct virtio_crypto_akcipher_para para;
+ __u8 padding[40];
+};
+
/* The request of the data virtqueue's packet */
struct virtio_crypto_op_data_req {
struct virtio_crypto_op_header header;
@@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req {
struct virtio_crypto_hash_data_req hash_req;
struct virtio_crypto_mac_data_req mac_req;
struct virtio_crypto_aead_data_req aead_req;
+ struct virtio_crypto_akcipher_data_req akcipher_req;
__u8 padding[48];
} u;
};
@@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req {
#define VIRTIO_CRYPTO_BADMSG 2
#define VIRTIO_CRYPTO_NOTSUPP 3
#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */
+#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
/* The accelerator hardware is ready */
#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
@@ -438,7 +518,7 @@ struct virtio_crypto_config {
__le32 max_cipher_key_len;
/* Maximum length of authenticated key */
__le32 max_auth_key_len;
- __le32 reserve;
+ __le32 akcipher_algo;
/* Maximum size of each crypto request's content */
__le64 max_size;
};
diff --git a/include/uapi/linux/virtio_fs.h b/include/uapi/linux/virtio_fs.h
index 3056b6e9f8ce..bea38291421b 100644
--- a/include/uapi/linux/virtio_fs.h
+++ b/include/uapi/linux/virtio_fs.h
@@ -16,4 +16,7 @@ struct virtio_fs_config {
__le32 num_request_queues;
} __attribute__((packed));
+/* For the id field in virtio_pci_shm_cap */
+#define VIRTIO_FS_SHMCAP_ID_CACHE 0
+
#endif /* _UAPI_LINUX_VIRTIO_FS_H */
diff --git a/include/uapi/linux/virtio_gpio.h b/include/uapi/linux/virtio_gpio.h
new file mode 100644
index 000000000000..d4b29d9a39dd
--- /dev/null
+++ b/include/uapi/linux/virtio_gpio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _LINUX_VIRTIO_GPIO_H
+#define _LINUX_VIRTIO_GPIO_H
+
+#include <linux/types.h>
+
+/* Virtio GPIO Feature bits */
+#define VIRTIO_GPIO_F_IRQ 0
+
+/* Virtio GPIO request types */
+#define VIRTIO_GPIO_MSG_GET_NAMES 0x0001
+#define VIRTIO_GPIO_MSG_GET_DIRECTION 0x0002
+#define VIRTIO_GPIO_MSG_SET_DIRECTION 0x0003
+#define VIRTIO_GPIO_MSG_GET_VALUE 0x0004
+#define VIRTIO_GPIO_MSG_SET_VALUE 0x0005
+#define VIRTIO_GPIO_MSG_IRQ_TYPE 0x0006
+
+/* Possible values of the status field */
+#define VIRTIO_GPIO_STATUS_OK 0x0
+#define VIRTIO_GPIO_STATUS_ERR 0x1
+
+/* Direction types */
+#define VIRTIO_GPIO_DIRECTION_NONE 0x00
+#define VIRTIO_GPIO_DIRECTION_OUT 0x01
+#define VIRTIO_GPIO_DIRECTION_IN 0x02
+
+/* Virtio GPIO IRQ types */
+#define VIRTIO_GPIO_IRQ_TYPE_NONE 0x00
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING 0x01
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
+
+struct virtio_gpio_config {
+ __le16 ngpio;
+ __u8 padding[2];
+ __le32 gpio_names_size;
+};
+
+/* Virtio GPIO Request / Response */
+struct virtio_gpio_request {
+ __le16 type;
+ __le16 gpio;
+ __le32 value;
+};
+
+struct virtio_gpio_response {
+ __u8 status;
+ __u8 value;
+};
+
+struct virtio_gpio_response_get_names {
+ __u8 status;
+ __u8 value[];
+};
+
+/* Virtio GPIO IRQ Request / Response */
+struct virtio_gpio_irq_request {
+ __le16 gpio;
+};
+
+struct virtio_gpio_irq_response {
+ __u8 status;
+};
+
+/* Possible values of the interrupt status field */
+#define VIRTIO_GPIO_IRQ_STATUS_INVALID 0x0
+#define VIRTIO_GPIO_IRQ_STATUS_VALID 0x1
+
+#endif /* _LINUX_VIRTIO_GPIO_H */
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index ccbd174ef321..0e21f3998108 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -50,6 +50,20 @@
* VIRTIO_GPU_CMD_GET_EDID
*/
#define VIRTIO_GPU_F_EDID 1
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID
+ */
+#define VIRTIO_GPU_F_RESOURCE_UUID 2
+
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
+ */
+#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+/*
+ * VIRTIO_GPU_CMD_CREATE_CONTEXT with
+ * context_init and multiple timelines
+ */
+#define VIRTIO_GPU_F_CONTEXT_INIT 4
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -66,6 +80,9 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
VIRTIO_GPU_CMD_GET_EDID,
+ VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
+ VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
+ VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -76,6 +93,8 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
+ VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
+ VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -87,6 +106,8 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
VIRTIO_GPU_RESP_OK_EDID,
+ VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
+ VIRTIO_GPU_RESP_OK_MAP_INFO,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -97,14 +118,29 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
};
-#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+enum virtio_gpu_shm_id {
+ VIRTIO_GPU_SHM_ID_UNDEFINED = 0,
+ /*
+ * VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB
+ * VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB
+ */
+ VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
+};
+
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+/*
+ * If the following flag is set, then ring_idx contains the index
+ * of the command ring that needs to used when creating the fence
+ */
+#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1)
struct virtio_gpu_ctrl_hdr {
__le32 type;
__le32 flags;
__le64 fence_id;
__le32 ctx_id;
- __le32 padding;
+ __u8 ring_idx;
+ __u8 padding[3];
};
/* data passed in the cursor vq */
@@ -244,10 +280,11 @@ struct virtio_gpu_resource_create_3d {
};
/* VIRTIO_GPU_CMD_CTX_CREATE */
+#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff
struct virtio_gpu_ctx_create {
struct virtio_gpu_ctrl_hdr hdr;
__le32 nlen;
- __le32 padding;
+ __le32 context_init;
char debug_name[64];
};
@@ -272,6 +309,8 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
+/* 3 is reserved for gfxstream */
+#define VIRTIO_GPU_CAPSET_VENUS 4
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
@@ -340,4 +379,80 @@ enum virtio_gpu_formats {
VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
};
+/* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */
+struct virtio_gpu_resource_assign_uuid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_RESOURCE_UUID */
+struct virtio_gpu_resp_resource_uuid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __u8 uuid[16];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
+struct virtio_gpu_resource_create_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001
+#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob mem */
+ __le32 blob_mem;
+ __le32 blob_flags;
+ __le32 nr_entries;
+ __le64 blob_id;
+ __le64 size;
+ /*
+ * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
+ */
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */
+struct virtio_gpu_set_scanout_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ __le32 scanout_id;
+ __le32 resource_id;
+ __le32 width;
+ __le32 height;
+ __le32 format;
+ __le32 padding;
+ __le32 strides[4];
+ __le32 offsets[4];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */
+struct virtio_gpu_resource_map_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+ __le64 offset;
+};
+
+/* VIRTIO_GPU_RESP_OK_MAP_INFO */
+#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
+#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
+#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
+#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
+#define VIRTIO_GPU_MAP_CACHE_WC 0x03
+struct virtio_gpu_resp_map_info {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __u32 map_info;
+ __u32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */
+struct virtio_gpu_resource_unmap_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+};
+
#endif
diff --git a/include/uapi/linux/virtio_i2c.h b/include/uapi/linux/virtio_i2c.h
new file mode 100644
index 000000000000..acf3b6069136
--- /dev/null
+++ b/include/uapi/linux/virtio_i2c.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
+/*
+ * Definitions for virtio I2C Adpter
+ *
+ * Copyright (c) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _UAPI_LINUX_VIRTIO_I2C_H
+#define _UAPI_LINUX_VIRTIO_I2C_H
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+/* Virtio I2C Feature bits */
+#define VIRTIO_I2C_F_ZERO_LENGTH_REQUEST 0
+
+/* The bit 0 of the @virtio_i2c_out_hdr.@flags, used to group the requests */
+#define VIRTIO_I2C_FLAGS_FAIL_NEXT _BITUL(0)
+
+/* The bit 1 of the @virtio_i2c_out_hdr.@flags, used to mark a buffer as read */
+#define VIRTIO_I2C_FLAGS_M_RD _BITUL(1)
+
+/**
+ * struct virtio_i2c_out_hdr - the virtio I2C message OUT header
+ * @addr: the controlled device address
+ * @padding: used to pad to full dword
+ * @flags: used for feature extensibility
+ */
+struct virtio_i2c_out_hdr {
+ __le16 addr;
+ __le16 padding;
+ __le32 flags;
+};
+
+/**
+ * struct virtio_i2c_in_hdr - the virtio I2C message IN header
+ * @status: the processing result from the backend
+ */
+struct virtio_i2c_in_hdr {
+ __u8 status;
+};
+
+/* The final status written by the device */
+#define VIRTIO_I2C_MSG_OK 0
+#define VIRTIO_I2C_MSG_ERR 1
+
+#endif /* _UAPI_LINUX_VIRTIO_I2C_H */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index b052355ac7a3..7aa2eb766205 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -29,24 +29,56 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
-#define VIRTIO_ID_NET 1 /* virtio net */
-#define VIRTIO_ID_BLOCK 2 /* virtio block */
-#define VIRTIO_ID_CONSOLE 3 /* virtio console */
-#define VIRTIO_ID_RNG 4 /* virtio rng */
-#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
-#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
-#define VIRTIO_ID_SCSI 8 /* virtio scsi */
-#define VIRTIO_ID_9P 9 /* 9p virtio console */
-#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
-#define VIRTIO_ID_CAIF 12 /* Virtio caif */
-#define VIRTIO_ID_GPU 16 /* virtio GPU */
-#define VIRTIO_ID_INPUT 18 /* virtio input */
-#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
-#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
-#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
-#define VIRTIO_ID_MEM 24 /* virtio mem */
-#define VIRTIO_ID_FS 26 /* virtio filesystem */
-#define VIRTIO_ID_PMEM 27 /* virtio pmem */
-#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_NET 1 /* virtio net */
+#define VIRTIO_ID_BLOCK 2 /* virtio block */
+#define VIRTIO_ID_CONSOLE 3 /* virtio console */
+#define VIRTIO_ID_RNG 4 /* virtio rng */
+#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
+#define VIRTIO_ID_IOMEM 6 /* virtio ioMemory */
+#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
+#define VIRTIO_ID_SCSI 8 /* virtio scsi */
+#define VIRTIO_ID_9P 9 /* 9p virtio console */
+#define VIRTIO_ID_MAC80211_WLAN 10 /* virtio WLAN MAC */
+#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
+#define VIRTIO_ID_CAIF 12 /* Virtio caif */
+#define VIRTIO_ID_MEMORY_BALLOON 13 /* virtio memory balloon */
+#define VIRTIO_ID_GPU 16 /* virtio GPU */
+#define VIRTIO_ID_CLOCK 17 /* virtio clock/timer */
+#define VIRTIO_ID_INPUT 18 /* virtio input */
+#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
+#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
+#define VIRTIO_ID_SIGNAL_DIST 21 /* virtio signal distribution device */
+#define VIRTIO_ID_PSTORE 22 /* virtio pstore device */
+#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
+#define VIRTIO_ID_MEM 24 /* virtio mem */
+#define VIRTIO_ID_SOUND 25 /* virtio sound */
+#define VIRTIO_ID_FS 26 /* virtio filesystem */
+#define VIRTIO_ID_PMEM 27 /* virtio pmem */
+#define VIRTIO_ID_RPMB 28 /* virtio rpmb */
+#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_VIDEO_ENCODER 30 /* virtio video encoder */
+#define VIRTIO_ID_VIDEO_DECODER 31 /* virtio video decoder */
+#define VIRTIO_ID_SCMI 32 /* virtio SCMI */
+#define VIRTIO_ID_NITRO_SEC_MOD 33 /* virtio nitro secure module*/
+#define VIRTIO_ID_I2C_ADAPTER 34 /* virtio i2c adapter */
+#define VIRTIO_ID_WATCHDOG 35 /* virtio watchdog */
+#define VIRTIO_ID_CAN 36 /* virtio can */
+#define VIRTIO_ID_DMABUF 37 /* virtio dmabuf */
+#define VIRTIO_ID_PARAM_SERV 38 /* virtio parameter server */
+#define VIRTIO_ID_AUDIO_POLICY 39 /* virtio audio policy */
+#define VIRTIO_ID_BT 40 /* virtio bluetooth */
+#define VIRTIO_ID_GPIO 41 /* virtio gpio */
+
+/*
+ * Virtio Transitional IDs
+ */
+
+#define VIRTIO_TRANS_ID_NET 0x1000 /* transitional virtio net */
+#define VIRTIO_TRANS_ID_BLOCK 0x1001 /* transitional virtio block */
+#define VIRTIO_TRANS_ID_BALLOON 0x1002 /* transitional virtio balloon */
+#define VIRTIO_TRANS_ID_CONSOLE 0x1003 /* transitional virtio console */
+#define VIRTIO_TRANS_ID_SCSI 0x1004 /* transitional virtio SCSI */
+#define VIRTIO_TRANS_ID_RNG 0x1005 /* transitional virtio rng */
+#define VIRTIO_TRANS_ID_9P 0x1009 /* transitional virtio 9p console */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
index 237e36a280cb..1ff357f0d72e 100644
--- a/include/uapi/linux/virtio_iommu.h
+++ b/include/uapi/linux/virtio_iommu.h
@@ -16,6 +16,7 @@
#define VIRTIO_IOMMU_F_BYPASS 3
#define VIRTIO_IOMMU_F_PROBE 4
#define VIRTIO_IOMMU_F_MMIO 5
+#define VIRTIO_IOMMU_F_BYPASS_CONFIG 6
struct virtio_iommu_range_64 {
__le64 start;
@@ -36,6 +37,8 @@ struct virtio_iommu_config {
struct virtio_iommu_range_32 domain_range;
/* Probe buffer size */
__le32 probe_size;
+ __u8 bypass;
+ __u8 reserved[3];
};
/* Request types */
@@ -66,11 +69,14 @@ struct virtio_iommu_req_tail {
__u8 reserved[3];
};
+#define VIRTIO_IOMMU_ATTACH_F_BYPASS (1 << 0)
+
struct virtio_iommu_req_attach {
struct virtio_iommu_req_head head;
__le32 domain;
__le32 endpoint;
- __u8 reserved[8];
+ __le32 flags;
+ __u8 reserved[4];
struct virtio_iommu_req_tail tail;
};
diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h
index 70e01c687d5e..e9122f1d0e0c 100644
--- a/include/uapi/linux/virtio_mem.h
+++ b/include/uapi/linux/virtio_mem.h
@@ -68,9 +68,10 @@
* explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
*
* There are no guarantees what will happen if unplugged memory is
- * read/written. Such memory should, in general, not be touched. E.g.,
- * even writing might succeed, but the values will simply be discarded at
- * random points in time.
+ * read/written. In general, unplugged memory should not be touched, because
+ * the resulting action is undefined. There is one exception: without
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, unplugged memory inside the usable
+ * region can be read, to simplify creation of memory dumps.
*
* It can happen that the device cannot process a request, because it is
* busy. The device driver has to retry later.
@@ -87,6 +88,8 @@
/* node_id is an ACPI PXM and is valid */
#define VIRTIO_MEM_F_ACPI_PXM 0
+/* unplugged memory must not be accessed */
+#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1
/* --- virtio-mem: guest -> host requests --- */
diff --git a/include/uapi/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h
index c4b09689ab64..0650f91bea6c 100644
--- a/include/uapi/linux/virtio_mmio.h
+++ b/include/uapi/linux/virtio_mmio.h
@@ -122,6 +122,17 @@
#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+/* Shared memory region id */
+#define VIRTIO_MMIO_SHM_SEL 0x0ac
+
+/* Shared memory region length, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0
+#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4
+
+/* Shared memory region base address, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8
+#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc
+
/* Configuration atomicity value */
#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 3f55a4215f11..cc65ef0f3c3e 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -56,8 +56,13 @@
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
-
+#define VIRTIO_NET_F_VQ_NOTF_COAL 52 /* Device supports virtqueue notification coalescing */
+#define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */
+#define VIRTIO_NET_F_GUEST_USO4 54 /* Guest can handle USOv4 in. */
+#define VIRTIO_NET_F_GUEST_USO6 55 /* Guest can handle USOv6 in. */
+#define VIRTIO_NET_F_HOST_USO 56 /* Host can handle USO in. */
#define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */
+#define VIRTIO_NET_F_GUEST_HDRLEN 59 /* Guest provides the exact hdr_len value. */
#define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */
#define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */
#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device
@@ -130,6 +135,7 @@ struct virtio_net_hdr_v1 {
#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_UDP_L4 5 /* GSO frame, IPv4& IPv6 UDP (USO) */
#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
__u8 gso_type;
__virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
@@ -355,4 +361,49 @@ struct virtio_net_hash_config {
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
+/*
+ * Control notifications coalescing.
+ *
+ * Request the device to change the notifications coalescing parameters.
+ *
+ * Available with the VIRTIO_NET_F_NOTF_COAL feature bit.
+ */
+#define VIRTIO_NET_CTRL_NOTF_COAL 6
+/*
+ * Set the tx-usecs/tx-max-packets parameters.
+ */
+struct virtio_net_ctrl_coal_tx {
+ /* Maximum number of packets to send before a TX notification */
+ __le32 tx_max_packets;
+ /* Maximum number of usecs to delay a TX notification */
+ __le32 tx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_TX_SET 0
+
+/*
+ * Set the rx-usecs/rx-max-packets parameters.
+ */
+struct virtio_net_ctrl_coal_rx {
+ /* Maximum number of packets to receive before a RX notification */
+ __le32 rx_max_packets;
+ /* Maximum number of usecs to delay a RX notification */
+ __le32 rx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET 1
+#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET 2
+#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_GET 3
+
+struct virtio_net_ctrl_coal {
+ __le32 max_packets;
+ __le32 max_usecs;
+};
+
+struct virtio_net_ctrl_coal_vq {
+ __le16 vqn;
+ __le16 reserved;
+ struct virtio_net_ctrl_coal coal;
+};
+
#endif /* _UAPI_LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 90007a1abcab..a8208492e822 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -113,6 +113,8 @@
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
/* PCI configuration access */
#define VIRTIO_PCI_CAP_PCI_CFG 5
+/* Additional shared memory capability */
+#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
/* This is the PCI capability header: */
struct virtio_pci_cap {
@@ -121,11 +123,18 @@ struct virtio_pci_cap {
__u8 cap_len; /* Generic PCI field: capability length */
__u8 cfg_type; /* Identifies the structure. */
__u8 bar; /* Where to find it. */
- __u8 padding[3]; /* Pad to full dword. */
+ __u8 id; /* Multiple capabilities of the same type */
+ __u8 padding[2]; /* Pad to full dword. */
__le32 offset; /* Offset within bar. */
__le32 length; /* Length of the structure, in bytes. */
};
+struct virtio_pci_cap64 {
+ struct virtio_pci_cap cap;
+ __le32 offset_hi; /* Most sig 32 bits of offset */
+ __le32 length_hi; /* Most sig 32 bits of length */
+};
+
struct virtio_pci_notify_cap {
struct virtio_pci_cap cap;
__le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
@@ -157,6 +166,20 @@ struct virtio_pci_common_cfg {
__le32 queue_used_hi; /* read-write */
};
+/*
+ * Warning: do not use sizeof on this: use offsetofend for
+ * specific fields you need.
+ */
+struct virtio_pci_modern_common_cfg {
+ struct virtio_pci_common_cfg cfg;
+
+ __le16 queue_notify_data; /* read-write */
+ __le16 queue_reset; /* read-write */
+
+ __le16 admin_queue_index; /* read-only */
+ __le16 admin_queue_num; /* read-only */
+};
+
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
struct virtio_pci_cfg_cap {
struct virtio_pci_cap cap;
@@ -193,7 +216,74 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
#define VIRTIO_PCI_COMMON_Q_USEDLO 48
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
+#define VIRTIO_PCI_COMMON_Q_NDATA 56
+#define VIRTIO_PCI_COMMON_Q_RESET 58
+#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60
+#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62
#endif /* VIRTIO_PCI_NO_MODERN */
+/* Admin command status. */
+#define VIRTIO_ADMIN_STATUS_OK 0
+
+/* Admin command opcode. */
+#define VIRTIO_ADMIN_CMD_LIST_QUERY 0x0
+#define VIRTIO_ADMIN_CMD_LIST_USE 0x1
+
+/* Admin command group type. */
+#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1
+
+/* Transitional device admin command. */
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE 0x2
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ 0x3
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE 0x4
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5
+#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6
+
+struct virtio_admin_cmd_hdr {
+ __le16 opcode;
+ /*
+ * 1 - SR-IOV
+ * 2-65535 - reserved
+ */
+ __le16 group_type;
+ /* Unused, reserved for future extensions. */
+ __u8 reserved1[12];
+ __le64 group_member_id;
+};
+
+struct virtio_admin_cmd_status {
+ __le16 status;
+ __le16 status_qualifier;
+ /* Unused, reserved for future extensions. */
+ __u8 reserved2[4];
+};
+
+struct virtio_admin_cmd_legacy_wr_data {
+ __u8 offset; /* Starting offset of the register(s) to write. */
+ __u8 reserved[7];
+ __u8 registers[];
+};
+
+struct virtio_admin_cmd_legacy_rd_data {
+ __u8 offset; /* Starting offset of the register(s) to read. */
+};
+
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2
+
+#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4
+
+struct virtio_admin_cmd_notify_info_data {
+ __u8 flags; /* 0 = end of list, 1 = owner device, 2 = member device */
+ __u8 bar; /* BAR of the member or the owner device */
+ __u8 padding[6];
+ __le64 offset; /* Offset within bar. */
+};
+
+struct virtio_admin_cmd_notify_info_result {
+ struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO];
+};
+
#endif
diff --git a/include/uapi/linux/virtio_pcidev.h b/include/uapi/linux/virtio_pcidev.h
new file mode 100644
index 000000000000..668b07ce515b
--- /dev/null
+++ b/include/uapi/linux/virtio_pcidev.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#ifndef _UAPI_LINUX_VIRTIO_PCIDEV_H
+#define _UAPI_LINUX_VIRTIO_PCIDEV_H
+#include <linux/types.h>
+
+/**
+ * enum virtio_pcidev_ops - virtual PCI device operations
+ * @VIRTIO_PCIDEV_OP_RESERVED: reserved to catch errors
+ * @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_READ: read BAR mem/pio, size can be variable;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_WRITE: write BAR mem/pio, size can be variable;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but
+ * the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE)
+ * @VIRTIO_PCIDEV_OP_INT: legacy INTx# pin interrupt, the addr field is 1-4 for
+ * the number
+ * @VIRTIO_PCIDEV_OP_MSI: MSI(-X) interrupt, this message basically transports
+ * the 16- or 32-bit write that would otherwise be done into memory,
+ * analogous to the write messages (@VIRTIO_PCIDEV_OP_MMIO_WRITE) above
+ * @VIRTIO_PCIDEV_OP_PME: Dummy message whose content is ignored (and should be
+ * all zeroes) to signal the PME# pin.
+ */
+enum virtio_pcidev_ops {
+ VIRTIO_PCIDEV_OP_RESERVED = 0,
+ VIRTIO_PCIDEV_OP_CFG_READ,
+ VIRTIO_PCIDEV_OP_CFG_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_READ,
+ VIRTIO_PCIDEV_OP_MMIO_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_MEMSET,
+ VIRTIO_PCIDEV_OP_INT,
+ VIRTIO_PCIDEV_OP_MSI,
+ VIRTIO_PCIDEV_OP_PME,
+};
+
+/**
+ * struct virtio_pcidev_msg - virtio PCI device operation
+ * @op: the operation to do
+ * @bar: the bar (only with BAR read/write messages)
+ * @reserved: reserved
+ * @size: the size of the read/write (in bytes)
+ * @addr: the address to read/write
+ * @data: the data, normally @size long, but just one byte for
+ * %VIRTIO_PCIDEV_OP_MMIO_MEMSET
+ *
+ * Note: the fields are all in native (CPU) endian, however, the
+ * @data values will often be in little endian (see the ops above.)
+ */
+struct virtio_pcidev_msg {
+ __u8 op;
+ __u8 bar;
+ __u16 reserved;
+ __u32 size;
+ __u64 addr;
+ __u8 data[];
+};
+
+#endif /* _UAPI_LINUX_VIRTIO_PCIDEV_H */
diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h
index d676b3620383..ede4f3564977 100644
--- a/include/uapi/linux/virtio_pmem.h
+++ b/include/uapi/linux/virtio_pmem.h
@@ -14,6 +14,13 @@
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+/* Feature bits */
+/* guest physical address range will be indicated as shared memory region 0 */
+#define VIRTIO_PMEM_F_SHMEM_REGION 0
+
+/* shmid of the shared memory region corresponding to the pmem */
+#define VIRTIO_PMEM_SHMEM_REGION_ID 0
+
struct virtio_pmem_config {
__le64 start;
__le64 size;
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 476d3e5c0fe7..f8c20d3de8da 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -93,15 +93,21 @@
#define VRING_USED_ALIGN_SIZE 4
#define VRING_DESC_ALIGN_SIZE 16
-/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
+/**
+ * struct vring_desc - Virtio ring descriptors,
+ * 16 bytes long. These can chain together via @next.
+ *
+ * @addr: buffer address (guest-physical)
+ * @len: buffer length
+ * @flags: descriptor flags
+ * @next: index of the next descriptor in the chain,
+ * if the VRING_DESC_F_NEXT flag is set. We chain unused
+ * descriptors via this, too.
+ */
struct vring_desc {
- /* Address (guest-physical). */
__virtio64 addr;
- /* Length. */
__virtio32 len;
- /* The flags as indicated above. */
__virtio16 flags;
- /* We chain unused descriptors via this, too */
__virtio16 next;
};
diff --git a/include/uapi/linux/virtio_scmi.h b/include/uapi/linux/virtio_scmi.h
new file mode 100644
index 000000000000..f8ddd04a3ace
--- /dev/null
+++ b/include/uapi/linux/virtio_scmi.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2020-2021 OpenSynergy GmbH
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _UAPI_LINUX_VIRTIO_SCMI_H
+#define _UAPI_LINUX_VIRTIO_SCMI_H
+
+#include <linux/virtio_types.h>
+
+/* Device implements some SCMI notifications, or delayed responses. */
+#define VIRTIO_SCMI_F_P2A_CHANNELS 0
+
+/* Device implements any SCMI statistics shared memory region */
+#define VIRTIO_SCMI_F_SHARED_MEMORY 1
+
+/* Virtqueues */
+
+#define VIRTIO_SCMI_VQ_TX 0 /* cmdq */
+#define VIRTIO_SCMI_VQ_RX 1 /* eventq */
+#define VIRTIO_SCMI_VQ_MAX_CNT 2
+
+#endif /* _UAPI_LINUX_VIRTIO_SCMI_H */
diff --git a/include/uapi/linux/virtio_snd.h b/include/uapi/linux/virtio_snd.h
new file mode 100644
index 000000000000..5f4100c2cf04
--- /dev/null
+++ b/include/uapi/linux/virtio_snd.h
@@ -0,0 +1,488 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2021 OpenSynergy GmbH
+ */
+#ifndef VIRTIO_SND_IF_H
+#define VIRTIO_SND_IF_H
+
+#include <linux/virtio_types.h>
+
+/*******************************************************************************
+ * FEATURE BITS
+ */
+enum {
+ /* device supports control elements */
+ VIRTIO_SND_F_CTLS = 0
+};
+
+/*******************************************************************************
+ * CONFIGURATION SPACE
+ */
+struct virtio_snd_config {
+ /* # of available physical jacks */
+ __le32 jacks;
+ /* # of available PCM streams */
+ __le32 streams;
+ /* # of available channel maps */
+ __le32 chmaps;
+ /* # of available control elements */
+ __le32 controls;
+};
+
+enum {
+ /* device virtqueue indexes */
+ VIRTIO_SND_VQ_CONTROL = 0,
+ VIRTIO_SND_VQ_EVENT,
+ VIRTIO_SND_VQ_TX,
+ VIRTIO_SND_VQ_RX,
+ /* # of device virtqueues */
+ VIRTIO_SND_VQ_MAX
+};
+
+/*******************************************************************************
+ * COMMON DEFINITIONS
+ */
+
+/* supported dataflow directions */
+enum {
+ VIRTIO_SND_D_OUTPUT = 0,
+ VIRTIO_SND_D_INPUT
+};
+
+enum {
+ /* jack control request types */
+ VIRTIO_SND_R_JACK_INFO = 1,
+ VIRTIO_SND_R_JACK_REMAP,
+
+ /* PCM control request types */
+ VIRTIO_SND_R_PCM_INFO = 0x0100,
+ VIRTIO_SND_R_PCM_SET_PARAMS,
+ VIRTIO_SND_R_PCM_PREPARE,
+ VIRTIO_SND_R_PCM_RELEASE,
+ VIRTIO_SND_R_PCM_START,
+ VIRTIO_SND_R_PCM_STOP,
+
+ /* channel map control request types */
+ VIRTIO_SND_R_CHMAP_INFO = 0x0200,
+
+ /* control element request types */
+ VIRTIO_SND_R_CTL_INFO = 0x0300,
+ VIRTIO_SND_R_CTL_ENUM_ITEMS,
+ VIRTIO_SND_R_CTL_READ,
+ VIRTIO_SND_R_CTL_WRITE,
+ VIRTIO_SND_R_CTL_TLV_READ,
+ VIRTIO_SND_R_CTL_TLV_WRITE,
+ VIRTIO_SND_R_CTL_TLV_COMMAND,
+
+ /* jack event types */
+ VIRTIO_SND_EVT_JACK_CONNECTED = 0x1000,
+ VIRTIO_SND_EVT_JACK_DISCONNECTED,
+
+ /* PCM event types */
+ VIRTIO_SND_EVT_PCM_PERIOD_ELAPSED = 0x1100,
+ VIRTIO_SND_EVT_PCM_XRUN,
+
+ /* control element event types */
+ VIRTIO_SND_EVT_CTL_NOTIFY = 0x1200,
+
+ /* common status codes */
+ VIRTIO_SND_S_OK = 0x8000,
+ VIRTIO_SND_S_BAD_MSG,
+ VIRTIO_SND_S_NOT_SUPP,
+ VIRTIO_SND_S_IO_ERR
+};
+
+/* common header */
+struct virtio_snd_hdr {
+ __le32 code;
+};
+
+/* event notification */
+struct virtio_snd_event {
+ /* VIRTIO_SND_EVT_XXX */
+ struct virtio_snd_hdr hdr;
+ /* optional event data */
+ __le32 data;
+};
+
+/* common control request to query an item information */
+struct virtio_snd_query_info {
+ /* VIRTIO_SND_R_XXX_INFO */
+ struct virtio_snd_hdr hdr;
+ /* item start identifier */
+ __le32 start_id;
+ /* item count to query */
+ __le32 count;
+ /* item information size in bytes */
+ __le32 size;
+};
+
+/* common item information header */
+struct virtio_snd_info {
+ /* function group node id (High Definition Audio Specification 7.1.2) */
+ __le32 hda_fn_nid;
+};
+
+/*******************************************************************************
+ * JACK CONTROL MESSAGES
+ */
+struct virtio_snd_jack_hdr {
+ /* VIRTIO_SND_R_JACK_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::jacks - 1 */
+ __le32 jack_id;
+};
+
+/* supported jack features */
+enum {
+ VIRTIO_SND_JACK_F_REMAP = 0
+};
+
+struct virtio_snd_jack_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_JACK_F_XXX) */
+ __le32 features;
+ /* pin configuration (High Definition Audio Specification 7.3.3.31) */
+ __le32 hda_reg_defconf;
+ /* pin capabilities (High Definition Audio Specification 7.3.4.9) */
+ __le32 hda_reg_caps;
+ /* current jack connection status (0: disconnected, 1: connected) */
+ __u8 connected;
+
+ __u8 padding[7];
+};
+
+/* jack remapping control request */
+struct virtio_snd_jack_remap {
+ /* .code = VIRTIO_SND_R_JACK_REMAP */
+ struct virtio_snd_jack_hdr hdr;
+ /* selected association number */
+ __le32 association;
+ /* selected sequence number */
+ __le32 sequence;
+};
+
+/*******************************************************************************
+ * PCM CONTROL MESSAGES
+ */
+struct virtio_snd_pcm_hdr {
+ /* VIRTIO_SND_R_PCM_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::streams - 1 */
+ __le32 stream_id;
+};
+
+/* supported PCM stream features */
+enum {
+ VIRTIO_SND_PCM_F_SHMEM_HOST = 0,
+ VIRTIO_SND_PCM_F_SHMEM_GUEST,
+ VIRTIO_SND_PCM_F_MSG_POLLING,
+ VIRTIO_SND_PCM_F_EVT_SHMEM_PERIODS,
+ VIRTIO_SND_PCM_F_EVT_XRUNS
+};
+
+/* supported PCM sample formats */
+enum {
+ /* analog formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_IMA_ADPCM = 0, /* 4 / 4 bits */
+ VIRTIO_SND_PCM_FMT_MU_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_A_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_S18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT64, /* 64 / 64 bits */
+ /* digital formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_DSD_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_IEC958_SUBFRAME /* 32 / 32 bits */
+};
+
+/* supported PCM frame rates */
+enum {
+ VIRTIO_SND_PCM_RATE_5512 = 0,
+ VIRTIO_SND_PCM_RATE_8000,
+ VIRTIO_SND_PCM_RATE_11025,
+ VIRTIO_SND_PCM_RATE_16000,
+ VIRTIO_SND_PCM_RATE_22050,
+ VIRTIO_SND_PCM_RATE_32000,
+ VIRTIO_SND_PCM_RATE_44100,
+ VIRTIO_SND_PCM_RATE_48000,
+ VIRTIO_SND_PCM_RATE_64000,
+ VIRTIO_SND_PCM_RATE_88200,
+ VIRTIO_SND_PCM_RATE_96000,
+ VIRTIO_SND_PCM_RATE_176400,
+ VIRTIO_SND_PCM_RATE_192000,
+ VIRTIO_SND_PCM_RATE_384000
+};
+
+struct virtio_snd_pcm_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ __le32 features;
+ /* supported sample format bit map (1 << VIRTIO_SND_PCM_FMT_XXX) */
+ __le64 formats;
+ /* supported frame rate bit map (1 << VIRTIO_SND_PCM_RATE_XXX) */
+ __le64 rates;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ __u8 direction;
+ /* minimum # of supported channels */
+ __u8 channels_min;
+ /* maximum # of supported channels */
+ __u8 channels_max;
+
+ __u8 padding[5];
+};
+
+/* set PCM stream format */
+struct virtio_snd_pcm_set_params {
+ /* .code = VIRTIO_SND_R_PCM_SET_PARAMS */
+ struct virtio_snd_pcm_hdr hdr;
+ /* size of the hardware buffer */
+ __le32 buffer_bytes;
+ /* size of the hardware period */
+ __le32 period_bytes;
+ /* selected feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ __le32 features;
+ /* selected # of channels */
+ __u8 channels;
+ /* selected sample format (VIRTIO_SND_PCM_FMT_XXX) */
+ __u8 format;
+ /* selected frame rate (VIRTIO_SND_PCM_RATE_XXX) */
+ __u8 rate;
+
+ __u8 padding;
+};
+
+/*******************************************************************************
+ * PCM I/O MESSAGES
+ */
+
+/* I/O request header */
+struct virtio_snd_pcm_xfer {
+ /* 0 ... virtio_snd_config::streams - 1 */
+ __le32 stream_id;
+};
+
+/* I/O request status */
+struct virtio_snd_pcm_status {
+ /* VIRTIO_SND_S_XXX */
+ __le32 status;
+ /* current device latency */
+ __le32 latency_bytes;
+};
+
+/*******************************************************************************
+ * CHANNEL MAP CONTROL MESSAGES
+ */
+struct virtio_snd_chmap_hdr {
+ /* VIRTIO_SND_R_CHMAP_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::chmaps - 1 */
+ __le32 chmap_id;
+};
+
+/* standard channel position definition */
+enum {
+ VIRTIO_SND_CHMAP_NONE = 0, /* undefined */
+ VIRTIO_SND_CHMAP_NA, /* silent */
+ VIRTIO_SND_CHMAP_MONO, /* mono stream */
+ VIRTIO_SND_CHMAP_FL, /* front left */
+ VIRTIO_SND_CHMAP_FR, /* front right */
+ VIRTIO_SND_CHMAP_RL, /* rear left */
+ VIRTIO_SND_CHMAP_RR, /* rear right */
+ VIRTIO_SND_CHMAP_FC, /* front center */
+ VIRTIO_SND_CHMAP_LFE, /* low frequency (LFE) */
+ VIRTIO_SND_CHMAP_SL, /* side left */
+ VIRTIO_SND_CHMAP_SR, /* side right */
+ VIRTIO_SND_CHMAP_RC, /* rear center */
+ VIRTIO_SND_CHMAP_FLC, /* front left center */
+ VIRTIO_SND_CHMAP_FRC, /* front right center */
+ VIRTIO_SND_CHMAP_RLC, /* rear left center */
+ VIRTIO_SND_CHMAP_RRC, /* rear right center */
+ VIRTIO_SND_CHMAP_FLW, /* front left wide */
+ VIRTIO_SND_CHMAP_FRW, /* front right wide */
+ VIRTIO_SND_CHMAP_FLH, /* front left high */
+ VIRTIO_SND_CHMAP_FCH, /* front center high */
+ VIRTIO_SND_CHMAP_FRH, /* front right high */
+ VIRTIO_SND_CHMAP_TC, /* top center */
+ VIRTIO_SND_CHMAP_TFL, /* top front left */
+ VIRTIO_SND_CHMAP_TFR, /* top front right */
+ VIRTIO_SND_CHMAP_TFC, /* top front center */
+ VIRTIO_SND_CHMAP_TRL, /* top rear left */
+ VIRTIO_SND_CHMAP_TRR, /* top rear right */
+ VIRTIO_SND_CHMAP_TRC, /* top rear center */
+ VIRTIO_SND_CHMAP_TFLC, /* top front left center */
+ VIRTIO_SND_CHMAP_TFRC, /* top front right center */
+ VIRTIO_SND_CHMAP_TSL, /* top side left */
+ VIRTIO_SND_CHMAP_TSR, /* top side right */
+ VIRTIO_SND_CHMAP_LLFE, /* left LFE */
+ VIRTIO_SND_CHMAP_RLFE, /* right LFE */
+ VIRTIO_SND_CHMAP_BC, /* bottom center */
+ VIRTIO_SND_CHMAP_BLC, /* bottom left center */
+ VIRTIO_SND_CHMAP_BRC /* bottom right center */
+};
+
+/* maximum possible number of channels */
+#define VIRTIO_SND_CHMAP_MAX_SIZE 18
+
+struct virtio_snd_chmap_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ __u8 direction;
+ /* # of valid channel position values */
+ __u8 channels;
+ /* channel position values (VIRTIO_SND_CHMAP_XXX) */
+ __u8 positions[VIRTIO_SND_CHMAP_MAX_SIZE];
+};
+
+/*******************************************************************************
+ * CONTROL ELEMENTS MESSAGES
+ */
+struct virtio_snd_ctl_hdr {
+ /* VIRTIO_SND_R_CTL_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::controls - 1 */
+ __le32 control_id;
+};
+
+/* supported roles for control elements */
+enum {
+ VIRTIO_SND_CTL_ROLE_UNDEFINED = 0,
+ VIRTIO_SND_CTL_ROLE_VOLUME,
+ VIRTIO_SND_CTL_ROLE_MUTE,
+ VIRTIO_SND_CTL_ROLE_GAIN
+};
+
+/* supported value types for control elements */
+enum {
+ VIRTIO_SND_CTL_TYPE_BOOLEAN = 0,
+ VIRTIO_SND_CTL_TYPE_INTEGER,
+ VIRTIO_SND_CTL_TYPE_INTEGER64,
+ VIRTIO_SND_CTL_TYPE_ENUMERATED,
+ VIRTIO_SND_CTL_TYPE_BYTES,
+ VIRTIO_SND_CTL_TYPE_IEC958
+};
+
+/* supported access rights for control elements */
+enum {
+ VIRTIO_SND_CTL_ACCESS_READ = 0,
+ VIRTIO_SND_CTL_ACCESS_WRITE,
+ VIRTIO_SND_CTL_ACCESS_VOLATILE,
+ VIRTIO_SND_CTL_ACCESS_INACTIVE,
+ VIRTIO_SND_CTL_ACCESS_TLV_READ,
+ VIRTIO_SND_CTL_ACCESS_TLV_WRITE,
+ VIRTIO_SND_CTL_ACCESS_TLV_COMMAND
+};
+
+struct virtio_snd_ctl_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* element role (VIRTIO_SND_CTL_ROLE_XXX) */
+ __le32 role;
+ /* element value type (VIRTIO_SND_CTL_TYPE_XXX) */
+ __le32 type;
+ /* element access right bit map (1 << VIRTIO_SND_CTL_ACCESS_XXX) */
+ __le32 access;
+ /* # of members in the element value */
+ __le32 count;
+ /* index for an element with a non-unique name */
+ __le32 index;
+ /* name identifier string for the element */
+ __u8 name[44];
+ /* additional information about the element's value */
+ union {
+ /* VIRTIO_SND_CTL_TYPE_INTEGER */
+ struct {
+ /* minimum supported value */
+ __le32 min;
+ /* maximum supported value */
+ __le32 max;
+ /* fixed step size for value (0 = variable size) */
+ __le32 step;
+ } integer;
+ /* VIRTIO_SND_CTL_TYPE_INTEGER64 */
+ struct {
+ /* minimum supported value */
+ __le64 min;
+ /* maximum supported value */
+ __le64 max;
+ /* fixed step size for value (0 = variable size) */
+ __le64 step;
+ } integer64;
+ /* VIRTIO_SND_CTL_TYPE_ENUMERATED */
+ struct {
+ /* # of options supported for value */
+ __le32 items;
+ } enumerated;
+ } value;
+};
+
+struct virtio_snd_ctl_enum_item {
+ /* option name */
+ __u8 item[64];
+};
+
+struct virtio_snd_ctl_iec958 {
+ /* AES/IEC958 channel status bits */
+ __u8 status[24];
+ /* AES/IEC958 subcode bits */
+ __u8 subcode[147];
+ /* nothing */
+ __u8 pad;
+ /* AES/IEC958 subframe bits */
+ __u8 dig_subframe[4];
+};
+
+struct virtio_snd_ctl_value {
+ union {
+ /* VIRTIO_SND_CTL_TYPE_BOOLEAN|INTEGER value */
+ __le32 integer[128];
+ /* VIRTIO_SND_CTL_TYPE_INTEGER64 value */
+ __le64 integer64[64];
+ /* VIRTIO_SND_CTL_TYPE_ENUMERATED value (option indexes) */
+ __le32 enumerated[128];
+ /* VIRTIO_SND_CTL_TYPE_BYTES value */
+ __u8 bytes[512];
+ /* VIRTIO_SND_CTL_TYPE_IEC958 value */
+ struct virtio_snd_ctl_iec958 iec958;
+ } value;
+};
+
+/* supported event reason types */
+enum {
+ /* element's value has changed */
+ VIRTIO_SND_CTL_EVT_MASK_VALUE = 0,
+ /* element's information has changed */
+ VIRTIO_SND_CTL_EVT_MASK_INFO,
+ /* element's metadata has changed */
+ VIRTIO_SND_CTL_EVT_MASK_TLV
+};
+
+struct virtio_snd_ctl_event {
+ /* VIRTIO_SND_EVT_CTL_NOTIFY */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::controls - 1 */
+ __le16 control_id;
+ /* event reason bit map (1 << VIRTIO_SND_CTL_EVT_MASK_XXX) */
+ __le16 mask;
+};
+
+#endif /* VIRTIO_SND_IF_H */
diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h
index 1d57ed3d84d2..64738838bee5 100644
--- a/include/uapi/linux/virtio_vsock.h
+++ b/include/uapi/linux/virtio_vsock.h
@@ -38,6 +38,9 @@
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+/* The feature bitmap for virtio vsock */
+#define VIRTIO_VSOCK_F_SEQPACKET 1 /* SOCK_SEQPACKET supported */
+
struct virtio_vsock_config {
__le64 guest_cid;
} __attribute__((packed));
@@ -65,6 +68,7 @@ struct virtio_vsock_hdr {
enum virtio_vsock_type {
VIRTIO_VSOCK_TYPE_STREAM = 1,
+ VIRTIO_VSOCK_TYPE_SEQPACKET = 2,
};
enum virtio_vsock_op {
@@ -91,4 +95,10 @@ enum virtio_vsock_shutdown {
VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
};
+/* VIRTIO_VSOCK_OP_RW flags values */
+enum virtio_vsock_rw {
+ VIRTIO_VSOCK_SEQ_EOM = 1,
+ VIRTIO_VSOCK_SEQ_EOR = 2,
+};
+
#endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */
diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
index fd0ed7221645..ed07181d4eff 100644
--- a/include/uapi/linux/vm_sockets.h
+++ b/include/uapi/linux/vm_sockets.h
@@ -18,6 +18,7 @@
#define _UAPI_VM_SOCKETS_H
#include <linux/socket.h>
+#include <linux/types.h>
/* Option name for STREAM socket buffer size. Use as the option name in
* setsockopt(3) or getsockopt(3) to set or get an unsigned long long that
@@ -63,7 +64,7 @@
* timeout for a STREAM socket.
*/
-#define SO_VM_SOCKETS_CONNECT_TIMEOUT 6
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD 6
/* Option name for using non-blocking send/receive. Use as the option name
* for setsockopt(3) or getsockopt(3) to set or get the non-blocking
@@ -80,6 +81,17 @@
#define SO_VM_SOCKETS_NONBLOCK_TXRX 7
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW 8
+
+#if !defined(__KERNEL__)
+#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD
+#else
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT \
+ (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD : SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW)
+#endif
+#endif
+
/* The vSocket equivalent of INADDR_ANY. This works for the svm_cid field of
* sockaddr_vm and indicates the context ID of the current endpoint.
*/
@@ -114,6 +126,26 @@
#define VMADDR_CID_HOST 2
+/* The current default use case for the vsock channel is the following:
+ * local vsock communication between guest and host and nested VMs setup.
+ * In addition to this, implicitly, the vsock packets are forwarded to the host
+ * if no host->guest vsock transport is set.
+ *
+ * Set this flag value in the sockaddr_vm corresponding field if the vsock
+ * packets need to be always forwarded to the host. Using this behavior,
+ * vsock communication between sibling VMs can be setup.
+ *
+ * This way can explicitly distinguish between vsock channels created for
+ * different use cases, such as nested VMs (or local communication between
+ * guest and host) and sibling VMs.
+ *
+ * The flag can be set in the connect logic in the user space application flow.
+ * In the listen logic (from kernel space) the flag is set on the remote peer
+ * address. This happens for an incoming connection when it is routed from the
+ * host and comes from the guest (local CID and remote CID > VMADDR_CID_HOST).
+ */
+#define VMADDR_FLAG_TO_HOST 0x01
+
/* Invalid vSockets version. */
#define VM_SOCKETS_INVALID_VERSION -1U
@@ -148,12 +180,32 @@ struct sockaddr_vm {
unsigned short svm_reserved1;
unsigned int svm_port;
unsigned int svm_cid;
+ __u8 svm_flags;
unsigned char svm_zero[sizeof(struct sockaddr) -
sizeof(sa_family_t) -
sizeof(unsigned short) -
- sizeof(unsigned int) - sizeof(unsigned int)];
+ sizeof(unsigned int) -
+ sizeof(unsigned int) -
+ sizeof(__u8)];
};
#define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
+/* MSG_ZEROCOPY notifications are encoded in the standard error format,
+ * sock_extended_err. See Documentation/networking/msg_zerocopy.rst in
+ * kernel source tree for more details.
+ */
+
+/* 'cmsg_level' field value of 'struct cmsghdr' for notification parsing
+ * when MSG_ZEROCOPY flag is used on transmissions.
+ */
+
+#define SOL_VSOCK 287
+
+/* 'cmsg_type' field value of 'struct cmsghdr' for notification parsing
+ * when MSG_ZEROCOPY flag is used on transmissions.
+ */
+
+#define VSOCK_RECVERR 1
+
#endif /* _UAPI_VM_SOCKETS_H */
diff --git a/include/uapi/linux/wimax.h b/include/uapi/linux/wimax.h
deleted file mode 100644
index 9f6b77af2f6d..000000000000
--- a/include/uapi/linux/wimax.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Linux WiMax
- * API for user space
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - Initial implementation
- *
- *
- * This file declares the user/kernel protocol that is spoken over
- * Generic Netlink, as well as any type declaration that is to be used
- * by kernel and user space.
- *
- * It is intended for user space to clone it verbatim to use it as a
- * primary reference for definitions.
- *
- * Stuff intended for kernel usage as well as full protocol and stack
- * documentation is rooted in include/net/wimax.h.
- */
-
-#ifndef __LINUX__WIMAX_H__
-#define __LINUX__WIMAX_H__
-
-#include <linux/types.h>
-
-enum {
- /**
- * Version of the interface (unsigned decimal, MMm, max 25.5)
- * M - Major: change if removing or modifying an existing call.
- * m - minor: change when adding a new call
- */
- WIMAX_GNL_VERSION = 01,
- /* Generic NetLink attributes */
- WIMAX_GNL_ATTR_INVALID = 0x00,
- WIMAX_GNL_ATTR_MAX = 10,
-};
-
-
-/*
- * Generic NetLink operations
- *
- * Most of these map to an API call; _OP_ stands for operation, _RP_
- * for reply and _RE_ for report (aka: signal).
- */
-enum {
- WIMAX_GNL_OP_MSG_FROM_USER, /* User to kernel message */
- WIMAX_GNL_OP_MSG_TO_USER, /* Kernel to user message */
- WIMAX_GNL_OP_RFKILL, /* Run wimax_rfkill() */
- WIMAX_GNL_OP_RESET, /* Run wimax_rfkill() */
- WIMAX_GNL_RE_STATE_CHANGE, /* Report: status change */
- WIMAX_GNL_OP_STATE_GET, /* Request for current state */
-};
-
-
-/* Message from user / to user */
-enum {
- WIMAX_GNL_MSG_IFIDX = 1,
- WIMAX_GNL_MSG_PIPE_NAME,
- WIMAX_GNL_MSG_DATA,
-};
-
-
-/*
- * wimax_rfkill()
- *
- * The state of the radio (ON/OFF) is mapped to the rfkill subsystem's
- * switch state (DISABLED/ENABLED).
- */
-enum wimax_rf_state {
- WIMAX_RF_OFF = 0, /* Radio is off, rfkill on/enabled */
- WIMAX_RF_ON = 1, /* Radio is on, rfkill off/disabled */
- WIMAX_RF_QUERY = 2,
-};
-
-/* Attributes */
-enum {
- WIMAX_GNL_RFKILL_IFIDX = 1,
- WIMAX_GNL_RFKILL_STATE,
-};
-
-
-/* Attributes for wimax_reset() */
-enum {
- WIMAX_GNL_RESET_IFIDX = 1,
-};
-
-/* Attributes for wimax_state_get() */
-enum {
- WIMAX_GNL_STGET_IFIDX = 1,
-};
-
-/*
- * Attributes for the Report State Change
- *
- * For now we just have the old and new states; new attributes might
- * be added later on.
- */
-enum {
- WIMAX_GNL_STCH_IFIDX = 1,
- WIMAX_GNL_STCH_STATE_OLD,
- WIMAX_GNL_STCH_STATE_NEW,
-};
-
-
-/**
- * enum wimax_st - The different states of a WiMAX device
- * @__WIMAX_ST_NULL: The device structure has been allocated and zeroed,
- * but still wimax_dev_add() hasn't been called. There is no state.
- *
- * @WIMAX_ST_DOWN: The device has been registered with the WiMAX and
- * networking stacks, but it is not initialized (normally that is
- * done with 'ifconfig DEV up' [or equivalent], which can upload
- * firmware and enable communications with the device).
- * In this state, the device is powered down and using as less
- * power as possible.
- * This state is the default after a call to wimax_dev_add(). It
- * is ok to have drivers move directly to %WIMAX_ST_UNINITIALIZED
- * or %WIMAX_ST_RADIO_OFF in _probe() after the call to
- * wimax_dev_add().
- * It is recommended that the driver leaves this state when
- * calling 'ifconfig DEV up' and enters it back on 'ifconfig DEV
- * down'.
- *
- * @__WIMAX_ST_QUIESCING: The device is being torn down, so no API
- * operations are allowed to proceed except the ones needed to
- * complete the device clean up process.
- *
- * @WIMAX_ST_UNINITIALIZED: [optional] Communication with the device
- * is setup, but the device still requires some configuration
- * before being operational.
- * Some WiMAX API calls might work.
- *
- * @WIMAX_ST_RADIO_OFF: The device is fully up; radio is off (wether
- * by hardware or software switches).
- * It is recommended to always leave the device in this state
- * after initialization.
- *
- * @WIMAX_ST_READY: The device is fully up and radio is on.
- *
- * @WIMAX_ST_SCANNING: [optional] The device has been instructed to
- * scan. In this state, the device cannot be actively connected to
- * a network.
- *
- * @WIMAX_ST_CONNECTING: The device is connecting to a network. This
- * state exists because in some devices, the connect process can
- * include a number of negotiations between user space, kernel
- * space and the device. User space needs to know what the device
- * is doing. If the connect sequence in a device is atomic and
- * fast, the device can transition directly to CONNECTED
- *
- * @WIMAX_ST_CONNECTED: The device is connected to a network.
- *
- * @__WIMAX_ST_INVALID: This is an invalid state used to mark the
- * maximum numeric value of states.
- *
- * Description:
- *
- * Transitions from one state to another one are atomic and can only
- * be caused in kernel space with wimax_state_change(). To read the
- * state, use wimax_state_get().
- *
- * States starting with __ are internal and shall not be used or
- * referred to by drivers or userspace. They look ugly, but that's the
- * point -- if any use is made non-internal to the stack, it is easier
- * to catch on review.
- *
- * All API operations [with well defined exceptions] will take the
- * device mutex before starting and then check the state. If the state
- * is %__WIMAX_ST_NULL, %WIMAX_ST_DOWN, %WIMAX_ST_UNINITIALIZED or
- * %__WIMAX_ST_QUIESCING, it will drop the lock and quit with
- * -%EINVAL, -%ENOMEDIUM, -%ENOTCONN or -%ESHUTDOWN.
- *
- * The order of the definitions is important, so we can do numerical
- * comparisons (eg: < %WIMAX_ST_RADIO_OFF means the device is not ready
- * to operate).
- */
-/*
- * The allowed state transitions are described in the table below
- * (states in rows can go to states in columns where there is an X):
- *
- * UNINI RADIO READY SCAN CONNEC CONNEC
- * NULL DOWN QUIESCING TIALIZED OFF NING TING TED
- * NULL - x
- * DOWN - x x x
- * QUIESCING x -
- * UNINITIALIZED x - x
- * RADIO_OFF x - x
- * READY x x - x x x
- * SCANNING x x x - x x
- * CONNECTING x x x x - x
- * CONNECTED x x x -
- *
- * This table not available in kernel-doc because the formatting messes it up.
- */
- enum wimax_st {
- __WIMAX_ST_NULL = 0,
- WIMAX_ST_DOWN,
- __WIMAX_ST_QUIESCING,
- WIMAX_ST_UNINITIALIZED,
- WIMAX_ST_RADIO_OFF,
- WIMAX_ST_READY,
- WIMAX_ST_SCANNING,
- WIMAX_ST_CONNECTING,
- WIMAX_ST_CONNECTED,
- __WIMAX_ST_INVALID /* Always keep last */
-};
-
-
-#endif /* #ifndef __LINUX__WIMAX_H__ */
diff --git a/include/uapi/linux/wimax/i2400m.h b/include/uapi/linux/wimax/i2400m.h
deleted file mode 100644
index fd198bc24a3c..000000000000
--- a/include/uapi/linux/wimax/i2400m.h
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
- * Intel Wireless WiMax Connection 2400m
- * Host-Device protocol interface definitions
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - Initial implementation
- *
- *
- * This header defines the data structures and constants used to
- * communicate with the device.
- *
- * BOOTMODE/BOOTROM/FIRMWARE UPLOAD PROTOCOL
- *
- * The firmware upload protocol is quite simple and only requires a
- * handful of commands. See drivers/net/wimax/i2400m/fw.c for more
- * details.
- *
- * The BCF data structure is for the firmware file header.
- *
- *
- * THE DATA / CONTROL PROTOCOL
- *
- * This is the normal protocol spoken with the device once the
- * firmware is uploaded. It transports data payloads and control
- * messages back and forth.
- *
- * It consists 'messages' that pack one or more payloads each. The
- * format is described in detail in drivers/net/wimax/i2400m/rx.c and
- * tx.c.
- *
- *
- * THE L3L4 PROTOCOL
- *
- * The term L3L4 refers to Layer 3 (the device), Layer 4 (the
- * driver/host software).
- *
- * This is the control protocol used by the host to control the i2400m
- * device (scan, connect, disconnect...). This is sent to / received
- * as control frames. These frames consist of a header and zero or
- * more TLVs with information. We call each control frame a "message".
- *
- * Each message is composed of:
- *
- * HEADER
- * [TLV0 + PAYLOAD0]
- * [TLV1 + PAYLOAD1]
- * [...]
- * [TLVN + PAYLOADN]
- *
- * The HEADER is defined by 'struct i2400m_l3l4_hdr'. The payloads are
- * defined by a TLV structure (Type Length Value) which is a 'header'
- * (struct i2400m_tlv_hdr) and then the payload.
- *
- * All integers are represented as Little Endian.
- *
- * - REQUESTS AND EVENTS
- *
- * The requests can be clasified as follows:
- *
- * COMMAND: implies a request from the host to the device requesting
- * an action being performed. The device will reply with a
- * message (with the same type as the command), status and
- * no (TLV) payload. Execution of a command might cause
- * events (of different type) to be sent later on as
- * device's state changes.
- *
- * GET/SET: similar to COMMAND, but will not cause other
- * EVENTs. The reply, in the case of GET, will contain
- * TLVs with the requested information.
- *
- * EVENT: asynchronous messages sent from the device, maybe as a
- * consequence of previous COMMANDs but disassociated from
- * them.
- *
- * Only one request might be pending at the same time (ie: don't
- * parallelize nor post another GET request before the previous
- * COMMAND has been acknowledged with it's corresponding reply by the
- * device).
- *
- * The different requests and their formats are described below:
- *
- * I2400M_MT_* Message types
- * I2400M_MS_* Message status (for replies, events)
- * i2400m_tlv_* TLVs
- *
- * data types are named 'struct i2400m_msg_OPNAME', OPNAME matching the
- * operation.
- */
-
-#ifndef __LINUX__WIMAX__I2400M_H__
-#define __LINUX__WIMAX__I2400M_H__
-
-#include <linux/types.h>
-#include <linux/if_ether.h>
-
-/*
- * Host Device Interface (HDI) common to all busses
- */
-
-/* Boot-mode (firmware upload mode) commands */
-
-/* Header for the firmware file */
-struct i2400m_bcf_hdr {
- __le32 module_type;
- __le32 header_len;
- __le32 header_version;
- __le32 module_id;
- __le32 module_vendor;
- __le32 date; /* BCD YYYMMDD */
- __le32 size; /* in dwords */
- __le32 key_size; /* in dwords */
- __le32 modulus_size; /* in dwords */
- __le32 exponent_size; /* in dwords */
- __u8 reserved[88];
-} __attribute__ ((packed));
-
-/* Boot mode opcodes */
-enum i2400m_brh_opcode {
- I2400M_BRH_READ = 1,
- I2400M_BRH_WRITE = 2,
- I2400M_BRH_JUMP = 3,
- I2400M_BRH_SIGNED_JUMP = 8,
- I2400M_BRH_HASH_PAYLOAD_ONLY = 9,
-};
-
-/* Boot mode command masks and stuff */
-enum i2400m_brh {
- I2400M_BRH_SIGNATURE = 0xcbbc0000,
- I2400M_BRH_SIGNATURE_MASK = 0xffff0000,
- I2400M_BRH_SIGNATURE_SHIFT = 16,
- I2400M_BRH_OPCODE_MASK = 0x0000000f,
- I2400M_BRH_RESPONSE_MASK = 0x000000f0,
- I2400M_BRH_RESPONSE_SHIFT = 4,
- I2400M_BRH_DIRECT_ACCESS = 0x00000400,
- I2400M_BRH_RESPONSE_REQUIRED = 0x00000200,
- I2400M_BRH_USE_CHECKSUM = 0x00000100,
-};
-
-
-/**
- * i2400m_bootrom_header - Header for a boot-mode command
- *
- * @cmd: the above command descriptor
- * @target_addr: where on the device memory should the action be performed.
- * @data_size: for read/write, amount of data to be read/written
- * @block_checksum: checksum value (if applicable)
- * @payload: the beginning of data attached to this header
- */
-struct i2400m_bootrom_header {
- __le32 command; /* Compose with enum i2400_brh */
- __le32 target_addr;
- __le32 data_size;
- __le32 block_checksum;
- char payload[0];
-} __attribute__ ((packed));
-
-
-/*
- * Data / control protocol
- */
-
-/* Packet types for the host-device interface */
-enum i2400m_pt {
- I2400M_PT_DATA = 0,
- I2400M_PT_CTRL,
- I2400M_PT_TRACE, /* For device debug */
- I2400M_PT_RESET_WARM, /* device reset */
- I2400M_PT_RESET_COLD, /* USB[transport] reset, like reconnect */
- I2400M_PT_EDATA, /* Extended RX data */
- I2400M_PT_ILLEGAL
-};
-
-
-/*
- * Payload for a data packet
- *
- * This is prefixed to each and every outgoing DATA type.
- */
-struct i2400m_pl_data_hdr {
- __le32 reserved;
-} __attribute__((packed));
-
-
-/*
- * Payload for an extended data packet
- *
- * New in fw v1.4
- *
- * @reorder: if this payload has to be reorder or not (and how)
- * @cs: the type of data in the packet, as defined per (802.16e
- * T11.13.19.1). Currently only 2 (IPv4 packet) supported.
- *
- * This is prefixed to each and every INCOMING DATA packet.
- */
-struct i2400m_pl_edata_hdr {
- __le32 reorder; /* bits defined in i2400m_ro */
- __u8 cs;
- __u8 reserved[11];
-} __attribute__((packed));
-
-enum i2400m_cs {
- I2400M_CS_IPV4_0 = 0,
- I2400M_CS_IPV4 = 2,
-};
-
-enum i2400m_ro {
- I2400M_RO_NEEDED = 0x01,
- I2400M_RO_TYPE = 0x03,
- I2400M_RO_TYPE_SHIFT = 1,
- I2400M_RO_CIN = 0x0f,
- I2400M_RO_CIN_SHIFT = 4,
- I2400M_RO_FBN = 0x07ff,
- I2400M_RO_FBN_SHIFT = 8,
- I2400M_RO_SN = 0x07ff,
- I2400M_RO_SN_SHIFT = 21,
-};
-
-enum i2400m_ro_type {
- I2400M_RO_TYPE_RESET = 0,
- I2400M_RO_TYPE_PACKET,
- I2400M_RO_TYPE_WS,
- I2400M_RO_TYPE_PACKET_WS,
-};
-
-
-/* Misc constants */
-enum {
- I2400M_PL_ALIGN = 16, /* Payload data size alignment */
- I2400M_PL_SIZE_MAX = 0x3EFF,
- I2400M_MAX_PLS_IN_MSG = 60,
- /* protocol barkers: sync sequences; for notifications they
- * are sent in groups of four. */
- I2400M_H2D_PREVIEW_BARKER = 0xcafe900d,
- I2400M_COLD_RESET_BARKER = 0xc01dc01d,
- I2400M_WARM_RESET_BARKER = 0x50f750f7,
- I2400M_NBOOT_BARKER = 0xdeadbeef,
- I2400M_SBOOT_BARKER = 0x0ff1c1a1,
- I2400M_SBOOT_BARKER_6050 = 0x80000001,
- I2400M_ACK_BARKER = 0xfeedbabe,
- I2400M_D2H_MSG_BARKER = 0xbeefbabe,
-};
-
-
-/*
- * Hardware payload descriptor
- *
- * Bitfields encoded in a struct to enforce typing semantics.
- *
- * Look in rx.c and tx.c for a full description of the format.
- */
-struct i2400m_pld {
- __le32 val;
-} __attribute__ ((packed));
-
-#define I2400M_PLD_SIZE_MASK 0x00003fff
-#define I2400M_PLD_TYPE_SHIFT 16
-#define I2400M_PLD_TYPE_MASK 0x000f0000
-
-/*
- * Header for a TX message or RX message
- *
- * @barker: preamble
- * @size: used for management of the FIFO queue buffer; before
- * sending, this is converted to be a real preamble. This
- * indicates the real size of the TX message that starts at this
- * point. If the highest bit is set, then this message is to be
- * skipped.
- * @sequence: sequence number of this message
- * @offset: offset where the message itself starts -- see the comments
- * in the file header about message header and payload descriptor
- * alignment.
- * @num_pls: number of payloads in this message
- * @padding: amount of padding bytes at the end of the message to make
- * it be of block-size aligned
- *
- * Look in rx.c and tx.c for a full description of the format.
- */
-struct i2400m_msg_hdr {
- union {
- __le32 barker;
- __u32 size; /* same size type as barker!! */
- };
- union {
- __le32 sequence;
- __u32 offset; /* same size type as barker!! */
- };
- __le16 num_pls;
- __le16 rsv1;
- __le16 padding;
- __le16 rsv2;
- struct i2400m_pld pld[0];
-} __attribute__ ((packed));
-
-
-
-/*
- * L3/L4 control protocol
- */
-
-enum {
- /* Interface version */
- I2400M_L3L4_VERSION = 0x0100,
-};
-
-/* Message types */
-enum i2400m_mt {
- I2400M_MT_RESERVED = 0x0000,
- I2400M_MT_INVALID = 0xffff,
- I2400M_MT_REPORT_MASK = 0x8000,
-
- I2400M_MT_GET_SCAN_RESULT = 0x4202,
- I2400M_MT_SET_SCAN_PARAM = 0x4402,
- I2400M_MT_CMD_RF_CONTROL = 0x4602,
- I2400M_MT_CMD_SCAN = 0x4603,
- I2400M_MT_CMD_CONNECT = 0x4604,
- I2400M_MT_CMD_DISCONNECT = 0x4605,
- I2400M_MT_CMD_EXIT_IDLE = 0x4606,
- I2400M_MT_GET_LM_VERSION = 0x5201,
- I2400M_MT_GET_DEVICE_INFO = 0x5202,
- I2400M_MT_GET_LINK_STATUS = 0x5203,
- I2400M_MT_GET_STATISTICS = 0x5204,
- I2400M_MT_GET_STATE = 0x5205,
- I2400M_MT_GET_MEDIA_STATUS = 0x5206,
- I2400M_MT_SET_INIT_CONFIG = 0x5404,
- I2400M_MT_CMD_INIT = 0x5601,
- I2400M_MT_CMD_TERMINATE = 0x5602,
- I2400M_MT_CMD_MODE_OF_OP = 0x5603,
- I2400M_MT_CMD_RESET_DEVICE = 0x5604,
- I2400M_MT_CMD_MONITOR_CONTROL = 0x5605,
- I2400M_MT_CMD_ENTER_POWERSAVE = 0x5606,
- I2400M_MT_GET_TLS_OPERATION_RESULT = 0x6201,
- I2400M_MT_SET_EAP_SUCCESS = 0x6402,
- I2400M_MT_SET_EAP_FAIL = 0x6403,
- I2400M_MT_SET_EAP_KEY = 0x6404,
- I2400M_MT_CMD_SEND_EAP_RESPONSE = 0x6602,
- I2400M_MT_REPORT_SCAN_RESULT = 0xc002,
- I2400M_MT_REPORT_STATE = 0xd002,
- I2400M_MT_REPORT_POWERSAVE_READY = 0xd005,
- I2400M_MT_REPORT_EAP_REQUEST = 0xe002,
- I2400M_MT_REPORT_EAP_RESTART = 0xe003,
- I2400M_MT_REPORT_ALT_ACCEPT = 0xe004,
- I2400M_MT_REPORT_KEY_REQUEST = 0xe005,
-};
-
-
-/*
- * Message Ack Status codes
- *
- * When a message is replied-to, this status is reported.
- */
-enum i2400m_ms {
- I2400M_MS_DONE_OK = 0,
- I2400M_MS_DONE_IN_PROGRESS = 1,
- I2400M_MS_INVALID_OP = 2,
- I2400M_MS_BAD_STATE = 3,
- I2400M_MS_ILLEGAL_VALUE = 4,
- I2400M_MS_MISSING_PARAMS = 5,
- I2400M_MS_VERSION_ERROR = 6,
- I2400M_MS_ACCESSIBILITY_ERROR = 7,
- I2400M_MS_BUSY = 8,
- I2400M_MS_CORRUPTED_TLV = 9,
- I2400M_MS_UNINITIALIZED = 10,
- I2400M_MS_UNKNOWN_ERROR = 11,
- I2400M_MS_PRODUCTION_ERROR = 12,
- I2400M_MS_NO_RF = 13,
- I2400M_MS_NOT_READY_FOR_POWERSAVE = 14,
- I2400M_MS_THERMAL_CRITICAL = 15,
- I2400M_MS_MAX
-};
-
-
-/**
- * i2400m_tlv - enumeration of the different types of TLVs
- *
- * TLVs stand for type-length-value and are the header for a payload
- * composed of almost anything. Each payload has a type assigned
- * and a length.
- */
-enum i2400m_tlv {
- I2400M_TLV_L4_MESSAGE_VERSIONS = 129,
- I2400M_TLV_SYSTEM_STATE = 141,
- I2400M_TLV_MEDIA_STATUS = 161,
- I2400M_TLV_RF_OPERATION = 162,
- I2400M_TLV_RF_STATUS = 163,
- I2400M_TLV_DEVICE_RESET_TYPE = 132,
- I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601,
- I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611,
- I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614,
- I2400M_TLV_CONFIG_DL_HOST_REORDER = 615,
-};
-
-
-struct i2400m_tlv_hdr {
- __le16 type;
- __le16 length; /* payload's */
- __u8 pl[0];
-} __attribute__((packed));
-
-
-struct i2400m_l3l4_hdr {
- __le16 type;
- __le16 length; /* payload's */
- __le16 version;
- __le16 resv1;
- __le16 status;
- __le16 resv2;
- struct i2400m_tlv_hdr pl[0];
-} __attribute__((packed));
-
-
-/**
- * i2400m_system_state - different states of the device
- */
-enum i2400m_system_state {
- I2400M_SS_UNINITIALIZED = 1,
- I2400M_SS_INIT,
- I2400M_SS_READY,
- I2400M_SS_SCAN,
- I2400M_SS_STANDBY,
- I2400M_SS_CONNECTING,
- I2400M_SS_WIMAX_CONNECTED,
- I2400M_SS_DATA_PATH_CONNECTED,
- I2400M_SS_IDLE,
- I2400M_SS_DISCONNECTING,
- I2400M_SS_OUT_OF_ZONE,
- I2400M_SS_SLEEPACTIVE,
- I2400M_SS_PRODUCTION,
- I2400M_SS_CONFIG,
- I2400M_SS_RF_OFF,
- I2400M_SS_RF_SHUTDOWN,
- I2400M_SS_DEVICE_DISCONNECT,
- I2400M_SS_MAX,
-};
-
-
-/**
- * i2400m_tlv_system_state - report on the state of the system
- *
- * @state: see enum i2400m_system_state
- */
-struct i2400m_tlv_system_state {
- struct i2400m_tlv_hdr hdr;
- __le32 state;
-} __attribute__((packed));
-
-
-struct i2400m_tlv_l4_message_versions {
- struct i2400m_tlv_hdr hdr;
- __le16 major;
- __le16 minor;
- __le16 branch;
- __le16 reserved;
-} __attribute__((packed));
-
-
-struct i2400m_tlv_detailed_device_info {
- struct i2400m_tlv_hdr hdr;
- __u8 reserved1[400];
- __u8 mac_address[ETH_ALEN];
- __u8 reserved2[2];
-} __attribute__((packed));
-
-
-enum i2400m_rf_switch_status {
- I2400M_RF_SWITCH_ON = 1,
- I2400M_RF_SWITCH_OFF = 2,
-};
-
-struct i2400m_tlv_rf_switches_status {
- struct i2400m_tlv_hdr hdr;
- __u8 sw_rf_switch; /* 1 ON, 2 OFF */
- __u8 hw_rf_switch; /* 1 ON, 2 OFF */
- __u8 reserved[2];
-} __attribute__((packed));
-
-
-enum {
- i2400m_rf_operation_on = 1,
- i2400m_rf_operation_off = 2
-};
-
-struct i2400m_tlv_rf_operation {
- struct i2400m_tlv_hdr hdr;
- __le32 status; /* 1 ON, 2 OFF */
-} __attribute__((packed));
-
-
-enum i2400m_tlv_reset_type {
- I2400M_RESET_TYPE_COLD = 1,
- I2400M_RESET_TYPE_WARM
-};
-
-struct i2400m_tlv_device_reset_type {
- struct i2400m_tlv_hdr hdr;
- __le32 reset_type;
-} __attribute__((packed));
-
-
-struct i2400m_tlv_config_idle_parameters {
- struct i2400m_tlv_hdr hdr;
- __le32 idle_timeout; /* 100 to 300000 ms [5min], 100 increments
- * 0 disabled */
- __le32 idle_paging_interval; /* frames */
-} __attribute__((packed));
-
-
-enum i2400m_media_status {
- I2400M_MEDIA_STATUS_LINK_UP = 1,
- I2400M_MEDIA_STATUS_LINK_DOWN,
- I2400M_MEDIA_STATUS_LINK_RENEW,
-};
-
-struct i2400m_tlv_media_status {
- struct i2400m_tlv_hdr hdr;
- __le32 media_status;
-} __attribute__((packed));
-
-
-/* New in v1.4 */
-struct i2400m_tlv_config_idle_timeout {
- struct i2400m_tlv_hdr hdr;
- __le32 timeout; /* 100 to 300000 ms [5min], 100 increments
- * 0 disabled */
-} __attribute__((packed));
-
-/* New in v1.4 -- for backward compat, will be removed */
-struct i2400m_tlv_config_d2h_data_format {
- struct i2400m_tlv_hdr hdr;
- __u8 format; /* 0 old format, 1 enhanced */
- __u8 reserved[3];
-} __attribute__((packed));
-
-/* New in v1.4 */
-struct i2400m_tlv_config_dl_host_reorder {
- struct i2400m_tlv_hdr hdr;
- __u8 reorder; /* 0 disabled, 1 enabled */
- __u8 reserved[3];
-} __attribute__((packed));
-
-
-#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */
diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
index 08967b3f19c8..3c2ad5fae17f 100644
--- a/include/uapi/linux/wireless.h
+++ b/include/uapi/linux/wireless.h
@@ -835,7 +835,7 @@ struct iw_encode_ext {
* individual keys */
__u16 alg; /* IW_ENCODE_ALG_* */
__u16 key_len;
- __u8 key[0];
+ __u8 key[];
};
/* SIOCSIWMLME data */
diff --git a/include/uapi/linux/wwan.h b/include/uapi/linux/wwan.h
new file mode 100644
index 000000000000..32a2720b4d11
--- /dev/null
+++ b/include/uapi/linux/wwan.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2021 Intel Corporation.
+ */
+#ifndef _UAPI_WWAN_H_
+#define _UAPI_WWAN_H_
+
+enum {
+ IFLA_WWAN_UNSPEC,
+ IFLA_WWAN_LINK_ID, /* u32 */
+
+ __IFLA_WWAN_MAX
+};
+#define IFLA_WWAN_MAX (__IFLA_WWAN_MAX - 1)
+
+#endif /* _UAPI_WWAN_H_ */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index ffc6a5391bb7..6a77328be114 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -4,6 +4,7 @@
#include <linux/in6.h>
#include <linux/types.h>
+#include <linux/stddef.h>
/* All of the structures in this file may not change size as they are
* passed into the kernel from userspace via netlink sockets.
@@ -33,7 +34,7 @@ struct xfrm_sec_ctx {
__u8 ctx_alg;
__u16 ctx_len;
__u32 ctx_sid;
- char ctx_str[0];
+ char ctx_str[] __counted_by(ctx_len);
};
/* Security Context Domains of Interpretation */
@@ -96,27 +97,27 @@ struct xfrm_replay_state_esn {
__u32 oseq_hi;
__u32 seq_hi;
__u32 replay_window;
- __u32 bmp[0];
+ __u32 bmp[];
};
struct xfrm_algo {
char alg_name[64];
unsigned int alg_key_len; /* in bits */
- char alg_key[0];
+ char alg_key[];
};
struct xfrm_algo_auth {
char alg_name[64];
unsigned int alg_key_len; /* in bits */
unsigned int alg_trunc_len; /* in bits */
- char alg_key[0];
+ char alg_key[];
};
struct xfrm_algo_aead {
char alg_name[64];
unsigned int alg_key_len; /* in bits */
unsigned int alg_icv_len; /* in bits */
- char alg_key[0];
+ char alg_key[];
};
struct xfrm_stats {
@@ -215,6 +216,11 @@ enum {
XFRM_MSG_MAPPING,
#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
+
+ XFRM_MSG_SETDEFAULT,
+#define XFRM_MSG_SETDEFAULT XFRM_MSG_SETDEFAULT
+ XFRM_MSG_GETDEFAULT,
+#define XFRM_MSG_GETDEFAULT XFRM_MSG_GETDEFAULT
__XFRM_MSG_MAX
};
#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -291,7 +297,7 @@ enum xfrm_attr_type_t {
XFRMA_ETIMER_THRESH,
XFRMA_SRCADDR, /* xfrm_address_t */
XFRMA_COADDR, /* xfrm_address_t */
- XFRMA_LASTUSED, /* unsigned long */
+ XFRMA_LASTUSED, /* __u64 */
XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */
XFRMA_MIGRATE,
XFRMA_ALG_AEAD, /* struct xfrm_algo_aead */
@@ -308,6 +314,7 @@ enum xfrm_attr_type_t {
XFRMA_SET_MARK, /* __u32 */
XFRMA_SET_MARK_MASK, /* __u32 */
XFRMA_IF_ID, /* __u32 */
+ XFRMA_MTIMER_THRESH, /* __u32 in seconds for input SA */
__XFRMA_MAX
#define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */
@@ -505,8 +512,29 @@ struct xfrm_user_offload {
int ifindex;
__u8 flags;
};
+/* This flag was exposed without any kernel code that supports it.
+ * Unfortunately, strongswan has the code that sets this flag,
+ * which makes it impossible to reuse this bit.
+ *
+ * So leave it here to make sure that it won't be reused by mistake.
+ */
#define XFRM_OFFLOAD_IPV6 1
#define XFRM_OFFLOAD_INBOUND 2
+/* Two bits above are relevant for state path only, while
+ * offload is used for both policy and state flows.
+ *
+ * In policy offload mode, they are free and can be safely reused.
+ */
+#define XFRM_OFFLOAD_PACKET 4
+
+struct xfrm_userpolicy_default {
+#define XFRM_USERPOLICY_UNSPEC 0
+#define XFRM_USERPOLICY_BLOCK 1
+#define XFRM_USERPOLICY_ACCEPT 2
+ __u8 in;
+ __u8 fwd;
+ __u8 out;
+};
#ifndef __KERNEL__
/* backwards compatibility for userspace */
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index 07de2b7aac85..f33d914d8f46 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -10,14 +10,68 @@
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
-#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
-#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
+#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
+#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
+#define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8)
+#define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static)
+#define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map)
+#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap)
+#define FASTRPC_IOCTL_GET_DSP_INFO _IOWR('R', 13, struct fastrpc_ioctl_capability)
+
+/**
+ * enum fastrpc_map_flags - control flags for mapping memory on DSP user process
+ * @FASTRPC_MAP_STATIC: Map memory pages with RW- permission and CACHE WRITEBACK.
+ * The driver is responsible for cache maintenance when passed
+ * the buffer to FastRPC calls. Same virtual address will be
+ * assigned for subsequent FastRPC calls.
+ * @FASTRPC_MAP_RESERVED: Reserved
+ * @FASTRPC_MAP_FD: Map memory pages with RW- permission and CACHE WRITEBACK.
+ * Mapping tagged with a file descriptor. User is responsible for
+ * CPU and DSP cache maintenance for the buffer. Get virtual address
+ * of buffer on DSP using HAP_mmap_get() and HAP_mmap_put() APIs.
+ * @FASTRPC_MAP_FD_DELAYED: Mapping delayed until user call HAP_mmap() and HAP_munmap()
+ * functions on DSP. It is useful to map a buffer with cache modes
+ * other than default modes. User is responsible for CPU and DSP
+ * cache maintenance for the buffer.
+ * @FASTRPC_MAP_FD_NOMAP: This flag is used to skip CPU mapping,
+ * otherwise behaves similar to FASTRPC_MAP_FD_DELAYED flag.
+ * @FASTRPC_MAP_MAX: max count for flags
+ *
+ */
+enum fastrpc_map_flags {
+ FASTRPC_MAP_STATIC = 0,
+ FASTRPC_MAP_RESERVED,
+ FASTRPC_MAP_FD = 2,
+ FASTRPC_MAP_FD_DELAYED,
+ FASTRPC_MAP_FD_NOMAP = 16,
+ FASTRPC_MAP_MAX,
+};
+
+enum fastrpc_proc_attr {
+ /* Macro for Debug attr */
+ FASTRPC_MODE_DEBUG = (1 << 0),
+ /* Macro for Ptrace */
+ FASTRPC_MODE_PTRACE = (1 << 1),
+ /* Macro for CRC Check */
+ FASTRPC_MODE_CRC = (1 << 2),
+ /* Macro for Unsigned PD */
+ FASTRPC_MODE_UNSIGNED_MODULE = (1 << 3),
+ /* Macro for Adaptive QoS */
+ FASTRPC_MODE_ADAPTIVE_QOS = (1 << 4),
+ /* Macro for System Process */
+ FASTRPC_MODE_SYSTEM_PROCESS = (1 << 5),
+ /* Macro for Prvileged Process */
+ FASTRPC_MODE_PRIVILEGED = (1 << 6),
+};
+
+/* Fastrpc attribute for memory protection of buffers */
+#define FASTRPC_ATTR_SECUREMAP (1)
struct fastrpc_invoke_args {
__u64 ptr;
__u64 length;
__s32 fd;
- __u32 reserved;
+ __u32 attr;
};
struct fastrpc_invoke {
@@ -34,6 +88,12 @@ struct fastrpc_init_create {
__u64 file; /* pointer to elf file */
};
+struct fastrpc_init_create_static {
+ __u32 namelen; /* length of pd process name */
+ __u32 memlen;
+ __u64 name; /* pd process name */
+};
+
struct fastrpc_alloc_dma_buf {
__s32 fd; /* fd */
__u32 flags; /* flags to map with */
@@ -48,9 +108,36 @@ struct fastrpc_req_mmap {
__u64 vaddrout; /* dsp virtual address */
};
+struct fastrpc_mem_map {
+ __s32 version;
+ __s32 fd; /* fd */
+ __s32 offset; /* buffer offset */
+ __u32 flags; /* flags defined in enum fastrpc_map_flags */
+ __u64 vaddrin; /* buffer virtual address */
+ __u64 length; /* buffer length */
+ __u64 vaddrout; /* [out] remote virtual address */
+ __s32 attrs; /* buffer attributes used for SMMU mapping */
+ __s32 reserved[4];
+};
+
struct fastrpc_req_munmap {
__u64 vaddrout; /* address to unmap */
__u64 size; /* size */
};
+struct fastrpc_mem_unmap {
+ __s32 vesion;
+ __s32 fd; /* fd */
+ __u64 vaddr; /* remote process (dsp) virtual address */
+ __u64 length; /* buffer size */
+ __s32 reserved[5];
+};
+
+struct fastrpc_ioctl_capability {
+ __u32 domain;
+ __u32 attribute_id;
+ __u32 capability; /* dsp capability */
+ __u32 reserved[4];
+};
+
#endif /* __QCOM_FASTRPC_H__ */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
deleted file mode 100644
index d5c4f983b7a8..000000000000
--- a/include/uapi/misc/habanalabs.h
+++ /dev/null
@@ -1,921 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
- *
- * Copyright 2016-2020 HabanaLabs, Ltd.
- * All Rights Reserved.
- *
- */
-
-#ifndef HABANALABS_H_
-#define HABANALABS_H_
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-/*
- * Defines that are asic-specific but constitutes as ABI between kernel driver
- * and userspace
- */
-#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
-#define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START 0x80 /* 128 bytes */
-
-#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 48
-#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 24
-/*
- * Goya queue Numbering
- *
- * The external queues (PCI DMA channels) MUST be before the internal queues
- * and each group (PCI DMA channels and internal) must be contiguous inside
- * itself but there can be a gap between the two groups (although not
- * recommended)
- */
-
-enum goya_queue_id {
- GOYA_QUEUE_ID_DMA_0 = 0,
- GOYA_QUEUE_ID_DMA_1 = 1,
- GOYA_QUEUE_ID_DMA_2 = 2,
- GOYA_QUEUE_ID_DMA_3 = 3,
- GOYA_QUEUE_ID_DMA_4 = 4,
- GOYA_QUEUE_ID_CPU_PQ = 5,
- GOYA_QUEUE_ID_MME = 6, /* Internal queues start here */
- GOYA_QUEUE_ID_TPC0 = 7,
- GOYA_QUEUE_ID_TPC1 = 8,
- GOYA_QUEUE_ID_TPC2 = 9,
- GOYA_QUEUE_ID_TPC3 = 10,
- GOYA_QUEUE_ID_TPC4 = 11,
- GOYA_QUEUE_ID_TPC5 = 12,
- GOYA_QUEUE_ID_TPC6 = 13,
- GOYA_QUEUE_ID_TPC7 = 14,
- GOYA_QUEUE_ID_SIZE
-};
-
-/*
- * Gaudi queue Numbering
- * External queues (PCI DMA channels) are DMA_0_*, DMA_1_* and DMA_5_*.
- * Except one CPU queue, all the rest are internal queues.
- */
-
-enum gaudi_queue_id {
- GAUDI_QUEUE_ID_DMA_0_0 = 0, /* external */
- GAUDI_QUEUE_ID_DMA_0_1 = 1, /* external */
- GAUDI_QUEUE_ID_DMA_0_2 = 2, /* external */
- GAUDI_QUEUE_ID_DMA_0_3 = 3, /* external */
- GAUDI_QUEUE_ID_DMA_1_0 = 4, /* external */
- GAUDI_QUEUE_ID_DMA_1_1 = 5, /* external */
- GAUDI_QUEUE_ID_DMA_1_2 = 6, /* external */
- GAUDI_QUEUE_ID_DMA_1_3 = 7, /* external */
- GAUDI_QUEUE_ID_CPU_PQ = 8, /* CPU */
- GAUDI_QUEUE_ID_DMA_2_0 = 9, /* internal */
- GAUDI_QUEUE_ID_DMA_2_1 = 10, /* internal */
- GAUDI_QUEUE_ID_DMA_2_2 = 11, /* internal */
- GAUDI_QUEUE_ID_DMA_2_3 = 12, /* internal */
- GAUDI_QUEUE_ID_DMA_3_0 = 13, /* internal */
- GAUDI_QUEUE_ID_DMA_3_1 = 14, /* internal */
- GAUDI_QUEUE_ID_DMA_3_2 = 15, /* internal */
- GAUDI_QUEUE_ID_DMA_3_3 = 16, /* internal */
- GAUDI_QUEUE_ID_DMA_4_0 = 17, /* internal */
- GAUDI_QUEUE_ID_DMA_4_1 = 18, /* internal */
- GAUDI_QUEUE_ID_DMA_4_2 = 19, /* internal */
- GAUDI_QUEUE_ID_DMA_4_3 = 20, /* internal */
- GAUDI_QUEUE_ID_DMA_5_0 = 21, /* external */
- GAUDI_QUEUE_ID_DMA_5_1 = 22, /* external */
- GAUDI_QUEUE_ID_DMA_5_2 = 23, /* external */
- GAUDI_QUEUE_ID_DMA_5_3 = 24, /* external */
- GAUDI_QUEUE_ID_DMA_6_0 = 25, /* internal */
- GAUDI_QUEUE_ID_DMA_6_1 = 26, /* internal */
- GAUDI_QUEUE_ID_DMA_6_2 = 27, /* internal */
- GAUDI_QUEUE_ID_DMA_6_3 = 28, /* internal */
- GAUDI_QUEUE_ID_DMA_7_0 = 29, /* internal */
- GAUDI_QUEUE_ID_DMA_7_1 = 30, /* internal */
- GAUDI_QUEUE_ID_DMA_7_2 = 31, /* internal */
- GAUDI_QUEUE_ID_DMA_7_3 = 32, /* internal */
- GAUDI_QUEUE_ID_MME_0_0 = 33, /* internal */
- GAUDI_QUEUE_ID_MME_0_1 = 34, /* internal */
- GAUDI_QUEUE_ID_MME_0_2 = 35, /* internal */
- GAUDI_QUEUE_ID_MME_0_3 = 36, /* internal */
- GAUDI_QUEUE_ID_MME_1_0 = 37, /* internal */
- GAUDI_QUEUE_ID_MME_1_1 = 38, /* internal */
- GAUDI_QUEUE_ID_MME_1_2 = 39, /* internal */
- GAUDI_QUEUE_ID_MME_1_3 = 40, /* internal */
- GAUDI_QUEUE_ID_TPC_0_0 = 41, /* internal */
- GAUDI_QUEUE_ID_TPC_0_1 = 42, /* internal */
- GAUDI_QUEUE_ID_TPC_0_2 = 43, /* internal */
- GAUDI_QUEUE_ID_TPC_0_3 = 44, /* internal */
- GAUDI_QUEUE_ID_TPC_1_0 = 45, /* internal */
- GAUDI_QUEUE_ID_TPC_1_1 = 46, /* internal */
- GAUDI_QUEUE_ID_TPC_1_2 = 47, /* internal */
- GAUDI_QUEUE_ID_TPC_1_3 = 48, /* internal */
- GAUDI_QUEUE_ID_TPC_2_0 = 49, /* internal */
- GAUDI_QUEUE_ID_TPC_2_1 = 50, /* internal */
- GAUDI_QUEUE_ID_TPC_2_2 = 51, /* internal */
- GAUDI_QUEUE_ID_TPC_2_3 = 52, /* internal */
- GAUDI_QUEUE_ID_TPC_3_0 = 53, /* internal */
- GAUDI_QUEUE_ID_TPC_3_1 = 54, /* internal */
- GAUDI_QUEUE_ID_TPC_3_2 = 55, /* internal */
- GAUDI_QUEUE_ID_TPC_3_3 = 56, /* internal */
- GAUDI_QUEUE_ID_TPC_4_0 = 57, /* internal */
- GAUDI_QUEUE_ID_TPC_4_1 = 58, /* internal */
- GAUDI_QUEUE_ID_TPC_4_2 = 59, /* internal */
- GAUDI_QUEUE_ID_TPC_4_3 = 60, /* internal */
- GAUDI_QUEUE_ID_TPC_5_0 = 61, /* internal */
- GAUDI_QUEUE_ID_TPC_5_1 = 62, /* internal */
- GAUDI_QUEUE_ID_TPC_5_2 = 63, /* internal */
- GAUDI_QUEUE_ID_TPC_5_3 = 64, /* internal */
- GAUDI_QUEUE_ID_TPC_6_0 = 65, /* internal */
- GAUDI_QUEUE_ID_TPC_6_1 = 66, /* internal */
- GAUDI_QUEUE_ID_TPC_6_2 = 67, /* internal */
- GAUDI_QUEUE_ID_TPC_6_3 = 68, /* internal */
- GAUDI_QUEUE_ID_TPC_7_0 = 69, /* internal */
- GAUDI_QUEUE_ID_TPC_7_1 = 70, /* internal */
- GAUDI_QUEUE_ID_TPC_7_2 = 71, /* internal */
- GAUDI_QUEUE_ID_TPC_7_3 = 72, /* internal */
- GAUDI_QUEUE_ID_NIC_0_0 = 73, /* internal */
- GAUDI_QUEUE_ID_NIC_0_1 = 74, /* internal */
- GAUDI_QUEUE_ID_NIC_0_2 = 75, /* internal */
- GAUDI_QUEUE_ID_NIC_0_3 = 76, /* internal */
- GAUDI_QUEUE_ID_NIC_1_0 = 77, /* internal */
- GAUDI_QUEUE_ID_NIC_1_1 = 78, /* internal */
- GAUDI_QUEUE_ID_NIC_1_2 = 79, /* internal */
- GAUDI_QUEUE_ID_NIC_1_3 = 80, /* internal */
- GAUDI_QUEUE_ID_NIC_2_0 = 81, /* internal */
- GAUDI_QUEUE_ID_NIC_2_1 = 82, /* internal */
- GAUDI_QUEUE_ID_NIC_2_2 = 83, /* internal */
- GAUDI_QUEUE_ID_NIC_2_3 = 84, /* internal */
- GAUDI_QUEUE_ID_NIC_3_0 = 85, /* internal */
- GAUDI_QUEUE_ID_NIC_3_1 = 86, /* internal */
- GAUDI_QUEUE_ID_NIC_3_2 = 87, /* internal */
- GAUDI_QUEUE_ID_NIC_3_3 = 88, /* internal */
- GAUDI_QUEUE_ID_NIC_4_0 = 89, /* internal */
- GAUDI_QUEUE_ID_NIC_4_1 = 90, /* internal */
- GAUDI_QUEUE_ID_NIC_4_2 = 91, /* internal */
- GAUDI_QUEUE_ID_NIC_4_3 = 92, /* internal */
- GAUDI_QUEUE_ID_NIC_5_0 = 93, /* internal */
- GAUDI_QUEUE_ID_NIC_5_1 = 94, /* internal */
- GAUDI_QUEUE_ID_NIC_5_2 = 95, /* internal */
- GAUDI_QUEUE_ID_NIC_5_3 = 96, /* internal */
- GAUDI_QUEUE_ID_NIC_6_0 = 97, /* internal */
- GAUDI_QUEUE_ID_NIC_6_1 = 98, /* internal */
- GAUDI_QUEUE_ID_NIC_6_2 = 99, /* internal */
- GAUDI_QUEUE_ID_NIC_6_3 = 100, /* internal */
- GAUDI_QUEUE_ID_NIC_7_0 = 101, /* internal */
- GAUDI_QUEUE_ID_NIC_7_1 = 102, /* internal */
- GAUDI_QUEUE_ID_NIC_7_2 = 103, /* internal */
- GAUDI_QUEUE_ID_NIC_7_3 = 104, /* internal */
- GAUDI_QUEUE_ID_NIC_8_0 = 105, /* internal */
- GAUDI_QUEUE_ID_NIC_8_1 = 106, /* internal */
- GAUDI_QUEUE_ID_NIC_8_2 = 107, /* internal */
- GAUDI_QUEUE_ID_NIC_8_3 = 108, /* internal */
- GAUDI_QUEUE_ID_NIC_9_0 = 109, /* internal */
- GAUDI_QUEUE_ID_NIC_9_1 = 110, /* internal */
- GAUDI_QUEUE_ID_NIC_9_2 = 111, /* internal */
- GAUDI_QUEUE_ID_NIC_9_3 = 112, /* internal */
- GAUDI_QUEUE_ID_SIZE
-};
-
-/*
- * Engine Numbering
- *
- * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle'
- */
-
-enum goya_engine_id {
- GOYA_ENGINE_ID_DMA_0 = 0,
- GOYA_ENGINE_ID_DMA_1,
- GOYA_ENGINE_ID_DMA_2,
- GOYA_ENGINE_ID_DMA_3,
- GOYA_ENGINE_ID_DMA_4,
- GOYA_ENGINE_ID_MME_0,
- GOYA_ENGINE_ID_TPC_0,
- GOYA_ENGINE_ID_TPC_1,
- GOYA_ENGINE_ID_TPC_2,
- GOYA_ENGINE_ID_TPC_3,
- GOYA_ENGINE_ID_TPC_4,
- GOYA_ENGINE_ID_TPC_5,
- GOYA_ENGINE_ID_TPC_6,
- GOYA_ENGINE_ID_TPC_7,
- GOYA_ENGINE_ID_SIZE
-};
-
-enum gaudi_engine_id {
- GAUDI_ENGINE_ID_DMA_0 = 0,
- GAUDI_ENGINE_ID_DMA_1,
- GAUDI_ENGINE_ID_DMA_2,
- GAUDI_ENGINE_ID_DMA_3,
- GAUDI_ENGINE_ID_DMA_4,
- GAUDI_ENGINE_ID_DMA_5,
- GAUDI_ENGINE_ID_DMA_6,
- GAUDI_ENGINE_ID_DMA_7,
- GAUDI_ENGINE_ID_MME_0,
- GAUDI_ENGINE_ID_MME_1,
- GAUDI_ENGINE_ID_MME_2,
- GAUDI_ENGINE_ID_MME_3,
- GAUDI_ENGINE_ID_TPC_0,
- GAUDI_ENGINE_ID_TPC_1,
- GAUDI_ENGINE_ID_TPC_2,
- GAUDI_ENGINE_ID_TPC_3,
- GAUDI_ENGINE_ID_TPC_4,
- GAUDI_ENGINE_ID_TPC_5,
- GAUDI_ENGINE_ID_TPC_6,
- GAUDI_ENGINE_ID_TPC_7,
- GAUDI_ENGINE_ID_NIC_0,
- GAUDI_ENGINE_ID_NIC_1,
- GAUDI_ENGINE_ID_NIC_2,
- GAUDI_ENGINE_ID_NIC_3,
- GAUDI_ENGINE_ID_NIC_4,
- GAUDI_ENGINE_ID_NIC_5,
- GAUDI_ENGINE_ID_NIC_6,
- GAUDI_ENGINE_ID_NIC_7,
- GAUDI_ENGINE_ID_NIC_8,
- GAUDI_ENGINE_ID_NIC_9,
- GAUDI_ENGINE_ID_SIZE
-};
-
-enum hl_device_status {
- HL_DEVICE_STATUS_OPERATIONAL,
- HL_DEVICE_STATUS_IN_RESET,
- HL_DEVICE_STATUS_MALFUNCTION
-};
-
-/* Opcode for management ioctl
- *
- * HW_IP_INFO - Receive information about different IP blocks in the
- * device.
- * HL_INFO_HW_EVENTS - Receive an array describing how many times each event
- * occurred since the last hard reset.
- * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the
- * specific context. This is relevant only for devices
- * where the dram is managed by the kernel driver
- * HL_INFO_HW_IDLE - Retrieve information about the idle status of each
- * internal engine.
- * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't
- * require an open context.
- * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
- * over the last period specified by the user.
- * The period can be between 100ms to 1s, in
- * resolution of 100ms. The return value is a
- * percentage of the utilization rate.
- * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each
- * event occurred since the driver was loaded.
- * HL_INFO_CLK_RATE - Retrieve the current and maximum clock rate
- * of the device in MHz. The maximum clock rate is
- * configurable via sysfs parameter
- * HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset
- * operations performed on the device since the last
- * time the driver was loaded.
- * HL_INFO_TIME_SYNC - Retrieve the device's time alongside the host's time
- * for synchronization.
- * HL_INFO_CS_COUNTERS - Retrieve command submission counters
- */
-#define HL_INFO_HW_IP_INFO 0
-#define HL_INFO_HW_EVENTS 1
-#define HL_INFO_DRAM_USAGE 2
-#define HL_INFO_HW_IDLE 3
-#define HL_INFO_DEVICE_STATUS 4
-#define HL_INFO_DEVICE_UTILIZATION 6
-#define HL_INFO_HW_EVENTS_AGGREGATE 7
-#define HL_INFO_CLK_RATE 8
-#define HL_INFO_RESET_COUNT 9
-#define HL_INFO_TIME_SYNC 10
-#define HL_INFO_CS_COUNTERS 11
-
-#define HL_INFO_VERSION_MAX_LEN 128
-#define HL_INFO_CARD_NAME_MAX_LEN 16
-
-struct hl_info_hw_ip_info {
- __u64 sram_base_address;
- __u64 dram_base_address;
- __u64 dram_size;
- __u32 sram_size;
- __u32 num_of_events;
- __u32 device_id; /* PCI Device ID */
- __u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */
- __u32 reserved[2];
- __u32 armcp_cpld_version;
- __u32 psoc_pci_pll_nr;
- __u32 psoc_pci_pll_nf;
- __u32 psoc_pci_pll_od;
- __u32 psoc_pci_pll_div_factor;
- __u8 tpc_enabled_mask;
- __u8 dram_enabled;
- __u8 pad[2];
- __u8 armcp_version[HL_INFO_VERSION_MAX_LEN];
- __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
-};
-
-struct hl_info_dram_usage {
- __u64 dram_free_mem;
- __u64 ctx_dram_mem;
-};
-
-struct hl_info_hw_idle {
- __u32 is_idle;
- /*
- * Bitmask of busy engines.
- * Bits definition is according to `enum <chip>_enging_id'.
- */
- __u32 busy_engines_mask;
-};
-
-struct hl_info_device_status {
- __u32 status;
- __u32 pad;
-};
-
-struct hl_info_device_utilization {
- __u32 utilization;
- __u32 pad;
-};
-
-struct hl_info_clk_rate {
- __u32 cur_clk_rate_mhz;
- __u32 max_clk_rate_mhz;
-};
-
-struct hl_info_reset_count {
- __u32 hard_reset_cnt;
- __u32 soft_reset_cnt;
-};
-
-struct hl_info_time_sync {
- __u64 device_time;
- __u64 host_time;
-};
-
-/**
- * struct hl_info_cs_counters - command submission counters
- * @out_of_mem_drop_cnt: dropped due to memory allocation issue
- * @parsing_drop_cnt: dropped due to error in packet parsing
- * @queue_full_drop_cnt: dropped due to queue full
- * @device_in_reset_drop_cnt: dropped due to device in reset
- */
-struct hl_cs_counters {
- __u64 out_of_mem_drop_cnt;
- __u64 parsing_drop_cnt;
- __u64 queue_full_drop_cnt;
- __u64 device_in_reset_drop_cnt;
-};
-
-struct hl_info_cs_counters {
- struct hl_cs_counters cs_counters;
- struct hl_cs_counters ctx_cs_counters;
-};
-
-struct hl_info_args {
- /* Location of relevant struct in userspace */
- __u64 return_pointer;
- /*
- * The size of the return value. Just like "size" in "snprintf",
- * it limits how many bytes the kernel can write
- *
- * For hw_events array, the size should be
- * hl_info_hw_ip_info.num_of_events * sizeof(__u32)
- */
- __u32 return_size;
-
- /* HL_INFO_* */
- __u32 op;
-
- union {
- /* Context ID - Currently not in use */
- __u32 ctx_id;
- /* Period value for utilization rate (100ms - 1000ms, in 100ms
- * resolution.
- */
- __u32 period_ms;
- };
-
- __u32 pad;
-};
-
-/* Opcode to create a new command buffer */
-#define HL_CB_OP_CREATE 0
-/* Opcode to destroy previously created command buffer */
-#define HL_CB_OP_DESTROY 1
-
-/* 2MB minus 32 bytes for 2xMSG_PROT */
-#define HL_MAX_CB_SIZE (0x200000 - 32)
-
-struct hl_cb_in {
- /* Handle of CB or 0 if we want to create one */
- __u64 cb_handle;
- /* HL_CB_OP_* */
- __u32 op;
- /* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that
- * will be allocated, regardless of this parameter's value, is PAGE_SIZE
- */
- __u32 cb_size;
- /* Context ID - Currently not in use */
- __u32 ctx_id;
- __u32 pad;
-};
-
-struct hl_cb_out {
- /* Handle of CB */
- __u64 cb_handle;
-};
-
-union hl_cb_args {
- struct hl_cb_in in;
- struct hl_cb_out out;
-};
-
-/*
- * This structure size must always be fixed to 64-bytes for backward
- * compatibility
- */
-struct hl_cs_chunk {
- union {
- /* For external queue, this represents a Handle of CB on the
- * Host.
- * For internal queue in Goya, this represents an SRAM or
- * a DRAM address of the internal CB. In Gaudi, this might also
- * represent a mapped host address of the CB.
- *
- * A mapped host address is in the device address space, after
- * a host address was mapped by the device MMU.
- */
- __u64 cb_handle;
-
- /* Relevant only when HL_CS_FLAGS_WAIT is set.
- * This holds address of array of u64 values that contain
- * signal CS sequence numbers. The wait described by this job
- * will listen on all those signals (wait event per signal)
- */
- __u64 signal_seq_arr;
- };
-
- /* Index of queue to put the CB on */
- __u32 queue_index;
-
- union {
- /*
- * Size of command buffer with valid packets
- * Can be smaller then actual CB size
- */
- __u32 cb_size;
-
- /* Relevant only when HL_CS_FLAGS_WAIT is set.
- * Number of entries in signal_seq_arr
- */
- __u32 num_signal_seq_arr;
- };
-
- /* HL_CS_CHUNK_FLAGS_* */
- __u32 cs_chunk_flags;
-
- /* Align structure to 64 bytes */
- __u32 pad[11];
-};
-
-/* SIGNAL and WAIT flags are mutually exclusive */
-#define HL_CS_FLAGS_FORCE_RESTORE 0x1
-#define HL_CS_FLAGS_SIGNAL 0x2
-#define HL_CS_FLAGS_WAIT 0x4
-
-#define HL_CS_STATUS_SUCCESS 0
-
-#define HL_MAX_JOBS_PER_CS 512
-
-struct hl_cs_in {
-
- /* this holds address of array of hl_cs_chunk for restore phase */
- __u64 chunks_restore;
-
- /* holds address of array of hl_cs_chunk for execution phase */
- __u64 chunks_execute;
-
- /* this holds address of array of hl_cs_chunk for store phase -
- * Currently not in use
- */
- __u64 chunks_store;
-
- /* Number of chunks in restore phase array. Maximum number is
- * HL_MAX_JOBS_PER_CS
- */
- __u32 num_chunks_restore;
-
- /* Number of chunks in execution array. Maximum number is
- * HL_MAX_JOBS_PER_CS
- */
- __u32 num_chunks_execute;
-
- /* Number of chunks in restore phase array - Currently not in use */
- __u32 num_chunks_store;
-
- /* HL_CS_FLAGS_* */
- __u32 cs_flags;
-
- /* Context ID - Currently not in use */
- __u32 ctx_id;
-};
-
-struct hl_cs_out {
- /*
- * seq holds the sequence number of the CS to pass to wait ioctl. All
- * values are valid except for 0 and ULLONG_MAX
- */
- __u64 seq;
- /* HL_CS_STATUS_* */
- __u32 status;
- __u32 pad;
-};
-
-union hl_cs_args {
- struct hl_cs_in in;
- struct hl_cs_out out;
-};
-
-struct hl_wait_cs_in {
- /* Command submission sequence number */
- __u64 seq;
- /* Absolute timeout to wait in microseconds */
- __u64 timeout_us;
- /* Context ID - Currently not in use */
- __u32 ctx_id;
- __u32 pad;
-};
-
-#define HL_WAIT_CS_STATUS_COMPLETED 0
-#define HL_WAIT_CS_STATUS_BUSY 1
-#define HL_WAIT_CS_STATUS_TIMEDOUT 2
-#define HL_WAIT_CS_STATUS_ABORTED 3
-#define HL_WAIT_CS_STATUS_INTERRUPTED 4
-
-struct hl_wait_cs_out {
- /* HL_WAIT_CS_STATUS_* */
- __u32 status;
- __u32 pad;
-};
-
-union hl_wait_cs_args {
- struct hl_wait_cs_in in;
- struct hl_wait_cs_out out;
-};
-
-/* Opcode to allocate device memory */
-#define HL_MEM_OP_ALLOC 0
-/* Opcode to free previously allocated device memory */
-#define HL_MEM_OP_FREE 1
-/* Opcode to map host and device memory */
-#define HL_MEM_OP_MAP 2
-/* Opcode to unmap previously mapped host and device memory */
-#define HL_MEM_OP_UNMAP 3
-
-/* Memory flags */
-#define HL_MEM_CONTIGUOUS 0x1
-#define HL_MEM_SHARED 0x2
-#define HL_MEM_USERPTR 0x4
-
-struct hl_mem_in {
- union {
- /* HL_MEM_OP_ALLOC- allocate device memory */
- struct {
- /* Size to alloc */
- __u64 mem_size;
- } alloc;
-
- /* HL_MEM_OP_FREE - free device memory */
- struct {
- /* Handle returned from HL_MEM_OP_ALLOC */
- __u64 handle;
- } free;
-
- /* HL_MEM_OP_MAP - map device memory */
- struct {
- /*
- * Requested virtual address of mapped memory.
- * The driver will try to map the requested region to
- * this hint address, as long as the address is valid
- * and not already mapped. The user should check the
- * returned address of the IOCTL to make sure he got
- * the hint address. Passing 0 here means that the
- * driver will choose the address itself.
- */
- __u64 hint_addr;
- /* Handle returned from HL_MEM_OP_ALLOC */
- __u64 handle;
- } map_device;
-
- /* HL_MEM_OP_MAP - map host memory */
- struct {
- /* Address of allocated host memory */
- __u64 host_virt_addr;
- /*
- * Requested virtual address of mapped memory.
- * The driver will try to map the requested region to
- * this hint address, as long as the address is valid
- * and not already mapped. The user should check the
- * returned address of the IOCTL to make sure he got
- * the hint address. Passing 0 here means that the
- * driver will choose the address itself.
- */
- __u64 hint_addr;
- /* Size of allocated host memory */
- __u64 mem_size;
- } map_host;
-
- /* HL_MEM_OP_UNMAP - unmap host memory */
- struct {
- /* Virtual address returned from HL_MEM_OP_MAP */
- __u64 device_virt_addr;
- } unmap;
- };
-
- /* HL_MEM_OP_* */
- __u32 op;
- /* HL_MEM_* flags */
- __u32 flags;
- /* Context ID - Currently not in use */
- __u32 ctx_id;
- __u32 pad;
-};
-
-struct hl_mem_out {
- union {
- /*
- * Used for HL_MEM_OP_MAP as the virtual address that was
- * assigned in the device VA space.
- * A value of 0 means the requested operation failed.
- */
- __u64 device_virt_addr;
-
- /*
- * Used for HL_MEM_OP_ALLOC. This is the assigned
- * handle for the allocated memory
- */
- __u64 handle;
- };
-};
-
-union hl_mem_args {
- struct hl_mem_in in;
- struct hl_mem_out out;
-};
-
-#define HL_DEBUG_MAX_AUX_VALUES 10
-
-struct hl_debug_params_etr {
- /* Address in memory to allocate buffer */
- __u64 buffer_address;
-
- /* Size of buffer to allocate */
- __u64 buffer_size;
-
- /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
- __u32 sink_mode;
- __u32 pad;
-};
-
-struct hl_debug_params_etf {
- /* Address in memory to allocate buffer */
- __u64 buffer_address;
-
- /* Size of buffer to allocate */
- __u64 buffer_size;
-
- /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
- __u32 sink_mode;
- __u32 pad;
-};
-
-struct hl_debug_params_stm {
- /* Two bit masks for HW event and Stimulus Port */
- __u64 he_mask;
- __u64 sp_mask;
-
- /* Trace source ID */
- __u32 id;
-
- /* Frequency for the timestamp register */
- __u32 frequency;
-};
-
-struct hl_debug_params_bmon {
- /* Two address ranges that the user can request to filter */
- __u64 start_addr0;
- __u64 addr_mask0;
-
- __u64 start_addr1;
- __u64 addr_mask1;
-
- /* Capture window configuration */
- __u32 bw_win;
- __u32 win_capture;
-
- /* Trace source ID */
- __u32 id;
- __u32 pad;
-};
-
-struct hl_debug_params_spmu {
- /* Event types selection */
- __u64 event_types[HL_DEBUG_MAX_AUX_VALUES];
-
- /* Number of event types selection */
- __u32 event_types_num;
- __u32 pad;
-};
-
-/* Opcode for ETR component */
-#define HL_DEBUG_OP_ETR 0
-/* Opcode for ETF component */
-#define HL_DEBUG_OP_ETF 1
-/* Opcode for STM component */
-#define HL_DEBUG_OP_STM 2
-/* Opcode for FUNNEL component */
-#define HL_DEBUG_OP_FUNNEL 3
-/* Opcode for BMON component */
-#define HL_DEBUG_OP_BMON 4
-/* Opcode for SPMU component */
-#define HL_DEBUG_OP_SPMU 5
-/* Opcode for timestamp (deprecated) */
-#define HL_DEBUG_OP_TIMESTAMP 6
-/* Opcode for setting the device into or out of debug mode. The enable
- * variable should be 1 for enabling debug mode and 0 for disabling it
- */
-#define HL_DEBUG_OP_SET_MODE 7
-
-struct hl_debug_args {
- /*
- * Pointer to user input structure.
- * This field is relevant to specific opcodes.
- */
- __u64 input_ptr;
- /* Pointer to user output structure */
- __u64 output_ptr;
- /* Size of user input structure */
- __u32 input_size;
- /* Size of user output structure */
- __u32 output_size;
- /* HL_DEBUG_OP_* */
- __u32 op;
- /*
- * Register index in the component, taken from the debug_regs_index enum
- * in the various ASIC header files
- */
- __u32 reg_idx;
- /* Enable/disable */
- __u32 enable;
- /* Context ID - Currently not in use */
- __u32 ctx_id;
-};
-
-/*
- * Various information operations such as:
- * - H/W IP information
- * - Current dram usage
- *
- * The user calls this IOCTL with an opcode that describes the required
- * information. The user should supply a pointer to a user-allocated memory
- * chunk, which will be filled by the driver with the requested information.
- *
- * The user supplies the maximum amount of size to copy into the user's memory,
- * in order to prevent data corruption in case of differences between the
- * definitions of structures in kernel and userspace, e.g. in case of old
- * userspace and new kernel driver
- */
-#define HL_IOCTL_INFO \
- _IOWR('H', 0x01, struct hl_info_args)
-
-/*
- * Command Buffer
- * - Request a Command Buffer
- * - Destroy a Command Buffer
- *
- * The command buffers are memory blocks that reside in DMA-able address
- * space and are physically contiguous so they can be accessed by the device
- * directly. They are allocated using the coherent DMA API.
- *
- * When creating a new CB, the IOCTL returns a handle of it, and the user-space
- * process needs to use that handle to mmap the buffer so it can access them.
- *
- */
-#define HL_IOCTL_CB \
- _IOWR('H', 0x02, union hl_cb_args)
-
-/*
- * Command Submission
- *
- * To submit work to the device, the user need to call this IOCTL with a set
- * of JOBS. That set of JOBS constitutes a CS object.
- * Each JOB will be enqueued on a specific queue, according to the user's input.
- * There can be more then one JOB per queue.
- *
- * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
- * a second set is for "execution" phase and a third set is for "store" phase.
- * The JOBS on the "restore" phase are enqueued only after context-switch
- * (or if its the first CS for this context). The user can also order the
- * driver to run the "restore" phase explicitly
- *
- * There are two types of queues - external and internal. External queues
- * are DMA queues which transfer data from/to the Host. All other queues are
- * internal. The driver will get completion notifications from the device only
- * on JOBS which are enqueued in the external queues.
- *
- * For jobs on external queues, the user needs to create command buffers
- * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on
- * internal queues, the user needs to prepare a "command buffer" with packets
- * on either the device SRAM/DRAM or the host, and give the device address of
- * that buffer to the CS ioctl.
- *
- * This IOCTL is asynchronous in regard to the actual execution of the CS. This
- * means it returns immediately after ALL the JOBS were enqueued on their
- * relevant queues. Therefore, the user mustn't assume the CS has been completed
- * or has even started to execute.
- *
- * Upon successful enqueue, the IOCTL returns a sequence number which the user
- * can use with the "Wait for CS" IOCTL to check whether the handle's CS
- * external JOBS have been completed. Note that if the CS has internal JOBS
- * which can execute AFTER the external JOBS have finished, the driver might
- * report that the CS has finished executing BEFORE the internal JOBS have
- * actually finished executing.
- *
- * Even though the sequence number increments per CS, the user can NOT
- * automatically assume that if CS with sequence number N finished, then CS
- * with sequence number N-1 also finished. The user can make this assumption if
- * and only if CS N and CS N-1 are exactly the same (same CBs for the same
- * queues).
- */
-#define HL_IOCTL_CS \
- _IOWR('H', 0x03, union hl_cs_args)
-
-/*
- * Wait for Command Submission
- *
- * The user can call this IOCTL with a handle it received from the CS IOCTL
- * to wait until the handle's CS has finished executing. The user will wait
- * inside the kernel until the CS has finished or until the user-requested
- * timeout has expired.
- *
- * The return value of the IOCTL is a standard Linux error code. The possible
- * values are:
- *
- * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal
- * that the user process received
- * ETIMEDOUT - The CS has caused a timeout on the device
- * EIO - The CS was aborted (usually because the device was reset)
- * ENODEV - The device wants to do hard-reset (so user need to close FD)
- *
- * The driver also returns a custom define inside the IOCTL which can be:
- *
- * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
- * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
- * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device
- * (ETIMEDOUT)
- * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
- * device was reset (EIO)
- * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
- *
- */
-
-#define HL_IOCTL_WAIT_CS \
- _IOWR('H', 0x04, union hl_wait_cs_args)
-
-/*
- * Memory
- * - Map host memory to device MMU
- * - Unmap host memory from device MMU
- *
- * This IOCTL allows the user to map host memory to the device MMU
- *
- * For host memory, the IOCTL doesn't allocate memory. The user is supposed
- * to allocate the memory in user-space (malloc/new). The driver pins the
- * physical pages (up to the allowed limit by the OS), assigns a virtual
- * address in the device VA space and initializes the device MMU.
- *
- * There is an option for the user to specify the requested virtual address.
- *
- */
-#define HL_IOCTL_MEMORY \
- _IOWR('H', 0x05, union hl_mem_args)
-
-/*
- * Debug
- * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces
- *
- * This IOCTL allows the user to get debug traces from the chip.
- *
- * Before the user can send configuration requests of the various
- * debug/profile engines, it needs to set the device into debug mode.
- * This is because the debug/profile infrastructure is shared component in the
- * device and we can't allow multiple users to access it at the same time.
- *
- * Once a user set the device into debug mode, the driver won't allow other
- * users to "work" with the device, i.e. open a FD. If there are multiple users
- * opened on the device, the driver won't allow any user to debug the device.
- *
- * For each configuration request, the user needs to provide the register index
- * and essential data such as buffer address and size.
- *
- * Once the user has finished using the debug/profile engines, he should
- * set the device into non-debug mode, i.e. disable debug mode.
- *
- * The driver can decide to "kick out" the user if he abuses this interface.
- *
- */
-#define HL_IOCTL_DEBUG \
- _IOWR('H', 0x06, struct hl_debug_args)
-
-#define HL_COMMAND_START 0x01
-#define HL_COMMAND_END 0x07
-
-#endif /* HABANALABS_H_ */
diff --git a/include/uapi/misc/uacce/hisi_qm.h b/include/uapi/misc/uacce/hisi_qm.h
index 6435f0bcb556..3e66dbc2f323 100644
--- a/include/uapi/misc/uacce/hisi_qm.h
+++ b/include/uapi/misc/uacce/hisi_qm.h
@@ -14,10 +14,26 @@ struct hisi_qp_ctx {
__u16 qc_type;
};
+/**
+ * struct hisi_qp_info - User data for hisi qp.
+ * @sqe_size: Submission queue element size
+ * @sq_depth: The number of sqe
+ * @cq_depth: The number of cqe
+ * @reserved: Reserved data
+ */
+struct hisi_qp_info {
+ __u32 sqe_size;
+ __u16 sq_depth;
+ __u16 cq_depth;
+ __u64 reserved;
+};
+
#define HISI_QM_API_VER_BASE "hisi_qm_v1"
#define HISI_QM_API_VER2_BASE "hisi_qm_v2"
+#define HISI_QM_API_VER3_BASE "hisi_qm_v3"
/* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */
#define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx)
-
+/* UACCE_CMD_QM_SET_QP_INFO: Set qp depth and BD size */
+#define UACCE_CMD_QM_SET_QP_INFO _IOWR('H', 11, struct hisi_qp_info)
#endif
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 65b9db936557..714d55b49d2a 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -55,9 +55,9 @@ struct mtd_oob_buf64 {
* @MTD_OPS_RAW: data are transferred as-is, with no error correction;
* this mode implies %MTD_OPS_PLACE_OOB
*
- * These modes can be passed to ioctl(MEMWRITE) and are also used internally.
- * See notes on "MTD file modes" for discussion on %MTD_OPS_RAW vs.
- * %MTD_FILE_MODE_RAW.
+ * These modes can be passed to ioctl(MEMWRITE) and ioctl(MEMREAD); they are
+ * also used internally. See notes on "MTD file modes" for discussion on
+ * %MTD_OPS_RAW vs. %MTD_FILE_MODE_RAW.
*/
enum {
MTD_OPS_PLACE_OOB = 0,
@@ -69,8 +69,8 @@ enum {
* struct mtd_write_req - data structure for requesting a write operation
*
* @start: start address
- * @len: length of data buffer
- * @ooblen: length of OOB buffer
+ * @len: length of data buffer (only lower 32 bits are used)
+ * @ooblen: length of OOB buffer (only lower 32 bits are used)
* @usr_data: user-provided data buffer
* @usr_oob: user-provided OOB buffer
* @mode: MTD mode (see "MTD operation modes")
@@ -91,6 +91,53 @@ struct mtd_write_req {
__u8 padding[7];
};
+/**
+ * struct mtd_read_req_ecc_stats - ECC statistics for a read operation
+ *
+ * @uncorrectable_errors: the number of uncorrectable errors that happened
+ * during the read operation
+ * @corrected_bitflips: the number of bitflips corrected during the read
+ * operation
+ * @max_bitflips: the maximum number of bitflips detected in any single ECC
+ * step for the data read during the operation; this information
+ * can be used to decide whether the data stored in a specific
+ * region of the MTD device should be moved somewhere else to
+ * avoid data loss.
+ */
+struct mtd_read_req_ecc_stats {
+ __u32 uncorrectable_errors;
+ __u32 corrected_bitflips;
+ __u32 max_bitflips;
+};
+
+/**
+ * struct mtd_read_req - data structure for requesting a read operation
+ *
+ * @start: start address
+ * @len: length of data buffer (only lower 32 bits are used)
+ * @ooblen: length of OOB buffer (only lower 32 bits are used)
+ * @usr_data: user-provided data buffer
+ * @usr_oob: user-provided OOB buffer
+ * @mode: MTD mode (see "MTD operation modes")
+ * @padding: reserved, must be set to 0
+ * @ecc_stats: ECC statistics for the read operation
+ *
+ * This structure supports ioctl(MEMREAD) operations, allowing data and/or OOB
+ * reads in various modes. To read from OOB-only, set @usr_data == NULL, and to
+ * read data-only, set @usr_oob == NULL. However, setting both @usr_data and
+ * @usr_oob to NULL is not allowed.
+ */
+struct mtd_read_req {
+ __u64 start;
+ __u64 len;
+ __u64 ooblen;
+ __u64 usr_data;
+ __u64 usr_oob;
+ __u8 mode;
+ __u8 padding[7];
+ struct mtd_read_req_ecc_stats ecc_stats;
+};
+
#define MTD_ABSENT 0
#define MTD_RAM 1
#define MTD_ROM 2
@@ -205,6 +252,14 @@ struct otp_info {
* without OOB, e.g., NOR flash.
*/
#define MEMWRITE _IOWR('M', 24, struct mtd_write_req)
+/* Erase a given range of user data (must be in mode %MTD_FILE_MODE_OTP_USER) */
+#define OTPERASE _IOW('M', 25, struct otp_info)
+/*
+ * Most generic read interface; can read in-band and/or out-of-band in various
+ * modes (see "struct mtd_read_req"). This ioctl is not supported for flashes
+ * without OOB, e.g., NOR flash.
+ */
+#define MEMREAD _IOWR('M', 26, struct mtd_read_req)
/*
* Obsolete legacy interface. Keep it in order not to break userspace
@@ -268,8 +323,9 @@ struct mtd_ecc_stats {
* Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW -
* raw access to the flash, without error correction or autoplacement schemes.
* Wherever possible, the MTD_OPS_* mode will override the MTD_FILE_MODE_* mode
- * (e.g., when using ioctl(MEMWRITE)), but in some cases, the MTD_FILE_MODE is
- * used out of necessity (e.g., `write()', ioctl(MEMWRITEOOB64)).
+ * (e.g., when using ioctl(MEMWRITE) or ioctl(MEMREAD)), but in some cases, the
+ * MTD_FILE_MODE is used out of necessity (e.g., `write()',
+ * ioctl(MEMWRITEOOB64)).
*/
enum mtd_file_modes {
MTD_FILE_MODE_NORMAL = MTD_OTP_OFF,
diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h
index b69e9ba6742b..e1571603175e 100644
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -247,6 +247,8 @@ enum {
* @vid_hdr_offset: VID header offset (use defaults if %0)
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
* @padding: reserved for future, not used, has to be zeroed
+ * @disable_fm: whether disable fastmap
+ * @need_resv_pool: whether reserve free pebs for filling pool/wl_pool
*
* This data structure is used to specify MTD device UBI has to attach and the
* parameters it has to use. The number which should be assigned to the new UBI
@@ -281,13 +283,19 @@ enum {
* eraseblocks for new bad eraseblocks, but attempts to use available
* eraseblocks (if any). The accepted range is 0-768. If 0 is given, the
* default kernel value of %CONFIG_MTD_UBI_BEB_LIMIT will be used.
+ *
+ * If @disable_fm is not zero, ubi doesn't create new fastmap even the module
+ * param 'fm_autoconvert' is set, and existed old fastmap will be destroyed
+ * after doing full scanning.
*/
struct ubi_attach_req {
__s32 ubi_num;
__s32 mtd_num;
__s32 vid_hdr_offset;
__s16 max_beb_per1024;
- __s8 padding[10];
+ __s8 disable_fm;
+ __s8 need_resv_pool;
+ __s8 padding[8];
};
/*
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index dc52e3cf574c..c0c34aca90ec 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -41,6 +41,7 @@
#define __BNXT_RE_UVERBS_ABI_H__
#include <linux/types.h>
+#include <rdma/ib_user_ioctl_cmds.h>
#define BNXT_RE_ABI_VERSION 1
@@ -49,7 +50,26 @@
#define BNXT_RE_CHIP_ID0_CHIP_MET_SFT 0x18
enum {
- BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL
+ BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL,
+ BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
+ BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL,
+ BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
+ BNXT_RE_UCNTX_CMASK_POW2_DISABLED = 0x10ULL,
+ BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40,
+};
+
+enum bnxt_re_wqe_mode {
+ BNXT_QPLIB_WQE_MODE_STATIC = 0x00,
+ BNXT_QPLIB_WQE_MODE_VARIABLE = 0x01,
+ BNXT_QPLIB_WQE_MODE_INVALID = 0x02,
+};
+
+enum {
+ BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
+};
+
+struct bnxt_re_uctx_req {
+ __aligned_u64 comp_mask;
};
struct bnxt_re_uctx_resp {
@@ -62,6 +82,8 @@ struct bnxt_re_uctx_resp {
__aligned_u64 comp_mask;
__u32 chip_id0;
__u32 chip_id1;
+ __u32 mode;
+ __u32 rsvd1; /* padding */
};
/*
@@ -80,11 +102,20 @@ struct bnxt_re_cq_req {
__aligned_u64 cq_handle;
};
+enum bnxt_re_cq_mask {
+ BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT = 0x1,
+};
+
struct bnxt_re_cq_resp {
__u32 cqid;
__u32 tail;
__u32 phase;
__u32 rsvd;
+ __aligned_u64 comp_mask;
+};
+
+struct bnxt_re_resize_cq_req {
+ __aligned_u64 cq_va;
};
struct bnxt_re_qp_req {
@@ -114,4 +145,61 @@ enum bnxt_re_shpg_offt {
BNXT_RE_END_RESV_OFFT = 0xFF0
};
+enum bnxt_re_objects {
+ BNXT_RE_OBJECT_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_OBJECT_NOTIFY_DRV,
+ BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+};
+
+enum bnxt_re_alloc_page_type {
+ BNXT_RE_ALLOC_WC_PAGE = 0,
+ BNXT_RE_ALLOC_DBR_BAR_PAGE,
+ BNXT_RE_ALLOC_DBR_PAGE,
+};
+
+enum bnxt_re_var_alloc_page_attrs {
+ BNXT_RE_ALLOC_PAGE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_ALLOC_PAGE_TYPE,
+ BNXT_RE_ALLOC_PAGE_DPI,
+ BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
+ BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
+};
+
+enum bnxt_re_alloc_page_attrs {
+ BNXT_RE_DESTROY_PAGE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum bnxt_re_alloc_page_methods {
+ BNXT_RE_METHOD_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_METHOD_DESTROY_PAGE,
+};
+
+enum bnxt_re_notify_drv_methods {
+ BNXT_RE_METHOD_NOTIFY_DRV = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+/* Toggle mem */
+
+enum bnxt_re_get_toggle_mem_type {
+ BNXT_RE_CQ_TOGGLE_MEM = 0,
+ BNXT_RE_SRQ_TOGGLE_MEM,
+};
+
+enum bnxt_re_var_toggle_mem_attrs {
+ BNXT_RE_TOGGLE_MEM_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_TOGGLE_MEM_TYPE,
+ BNXT_RE_TOGGLE_MEM_RES_ID,
+ BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
+ BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+ BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
+};
+
+enum bnxt_re_toggle_mem_attrs {
+ BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum bnxt_re_toggle_mem_methods {
+ BNXT_RE_METHOD_GET_TOGGLE_MEM = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
+};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index 507a2862bedb..701e2d567e41 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef EFA_ABI_USER_H
#define EFA_ABI_USER_H
#include <linux/types.h>
+#include <rdma/ib_user_ioctl_cmds.h>
/*
* Increment this value if any changes that break userspace ABI
@@ -52,11 +53,21 @@ struct efa_ibv_alloc_pd_resp {
__u8 reserved_30[2];
};
+enum {
+ EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL = 1 << 0,
+ EFA_CREATE_CQ_WITH_SGID = 1 << 1,
+};
+
struct efa_ibv_create_cq {
__u32 comp_mask;
__u32 cq_entry_size;
__u16 num_sub_cqs;
- __u8 reserved_50[6];
+ __u8 flags;
+ __u8 reserved_58[5];
+};
+
+enum {
+ EFA_CREATE_CQ_RESP_DB_OFF = 1 << 0,
};
struct efa_ibv_create_cq_resp {
@@ -65,7 +76,9 @@ struct efa_ibv_create_cq_resp {
__aligned_u64 q_mmap_key;
__aligned_u64 q_mmap_size;
__u16 cq_idx;
- __u8 reserved_d0[6];
+ __u8 reserved_d0[2];
+ __u32 db_off;
+ __aligned_u64 db_mmap_key;
};
enum {
@@ -105,6 +118,11 @@ struct efa_ibv_create_ah_resp {
enum {
EFA_QUERY_DEVICE_CAPS_RDMA_READ = 1 << 0,
+ EFA_QUERY_DEVICE_CAPS_RNR_RETRY = 1 << 1,
+ EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS = 1 << 2,
+ EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID = 1 << 3,
+ EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128 = 1 << 4,
+ EFA_QUERY_DEVICE_CAPS_RDMA_WRITE = 1 << 5,
};
struct efa_ibv_ex_query_device_resp {
@@ -117,4 +135,22 @@ struct efa_ibv_ex_query_device_resp {
__u32 device_caps;
};
+enum {
+ EFA_QUERY_MR_VALIDITY_RECV_IC_ID = 1 << 0,
+ EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID = 1 << 1,
+ EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID = 1 << 2,
+};
+
+enum efa_query_mr_attrs {
+ EFA_IB_ATTR_QUERY_MR_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
+ EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
+ EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
+ EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
+};
+
+enum efa_mr_methods {
+ EFA_IB_METHOD_MR_QUERY = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#endif /* EFA_ABI_USER_H */
diff --git a/include/uapi/rdma/erdma-abi.h b/include/uapi/rdma/erdma-abi.h
new file mode 100644
index 000000000000..b7a0222f978f
--- /dev/null
+++ b/include/uapi/rdma/erdma-abi.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2020-2022, Alibaba Group.
+ */
+
+#ifndef __ERDMA_USER_H__
+#define __ERDMA_USER_H__
+
+#include <linux/types.h>
+
+#define ERDMA_ABI_VERSION 1
+
+struct erdma_ureq_create_cq {
+ __aligned_u64 db_record_va;
+ __aligned_u64 qbuf_va;
+ __u32 qbuf_len;
+ __u32 rsvd0;
+};
+
+struct erdma_uresp_create_cq {
+ __u32 cq_id;
+ __u32 num_cqe;
+};
+
+struct erdma_ureq_create_qp {
+ __aligned_u64 db_record_va;
+ __aligned_u64 qbuf_va;
+ __u32 qbuf_len;
+ __u32 rsvd0;
+};
+
+struct erdma_uresp_create_qp {
+ __u32 qp_id;
+ __u32 num_sqe;
+ __u32 num_rqe;
+ __u32 rq_offset;
+};
+
+struct erdma_uresp_alloc_ctx {
+ __u32 dev_id;
+ __u32 pad;
+ __u32 sdb_type;
+ __u32 sdb_offset;
+ __aligned_u64 sdb;
+ __aligned_u64 rdb;
+ __aligned_u64 cdb;
+};
+
+#endif
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index d95ef9a2b032..1106a7c90b29 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -180,7 +180,7 @@ struct hfi1_sdma_comp_entry {
struct hfi1_status {
__aligned_u64 dev; /* device/hw status bits */
__aligned_u64 port; /* port state and status bits */
- char freezemsg[0];
+ char freezemsg[];
};
enum sdma_req_opcode {
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index eb76b38a00d4..158670da2b2a 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -39,6 +39,12 @@
struct hns_roce_ib_create_cq {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
+ __u32 cqe_size;
+ __u32 reserved;
+};
+
+enum hns_roce_cq_cap_flags {
+ HNS_ROCE_CQ_FLAG_RECORD_DB = 1 << 0,
};
struct hns_roce_ib_create_cq_resp {
@@ -46,15 +52,36 @@ struct hns_roce_ib_create_cq_resp {
__aligned_u64 cap_flags;
};
+enum hns_roce_srq_cap_flags {
+ HNS_ROCE_SRQ_CAP_RECORD_DB = 1 << 0,
+};
+
+enum hns_roce_srq_cap_flags_resp {
+ HNS_ROCE_RSP_SRQ_CAP_RECORD_DB = 1 << 0,
+};
+
struct hns_roce_ib_create_srq {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__aligned_u64 que_addr;
+ __u32 req_cap_flags; /* Use enum hns_roce_srq_cap_flags */
+ __u32 reserved;
};
struct hns_roce_ib_create_srq_resp {
__u32 srqn;
- __u32 reserved;
+ __u32 cap_flags; /* Use enum hns_roce_srq_cap_flags */
+};
+
+enum hns_roce_congest_type_flags {
+ HNS_ROCE_CREATE_QP_FLAGS_DCQCN,
+ HNS_ROCE_CREATE_QP_FLAGS_LDCP,
+ HNS_ROCE_CREATE_QP_FLAGS_HC3,
+ HNS_ROCE_CREATE_QP_FLAGS_DIP,
+};
+
+enum hns_roce_create_qp_comp_mask {
+ HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE = 1 << 0,
};
struct hns_roce_ib_create_qp {
@@ -65,19 +92,58 @@ struct hns_roce_ib_create_qp {
__u8 sq_no_prefetch;
__u8 reserved[5];
__aligned_u64 sdb_addr;
+ __aligned_u64 comp_mask; /* Use enum hns_roce_create_qp_comp_mask */
+ __aligned_u64 create_flags;
+ __aligned_u64 cong_type_flags;
+};
+
+enum hns_roce_qp_cap_flags {
+ HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0,
+ HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1,
+ HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2,
+ HNS_ROCE_QP_CAP_DIRECT_WQE = 1 << 5,
};
struct hns_roce_ib_create_qp_resp {
__aligned_u64 cap_flags;
+ __aligned_u64 dwqe_mmap_key;
+};
+
+enum {
+ HNS_ROCE_EXSGE_FLAGS = 1 << 0,
+ HNS_ROCE_RQ_INLINE_FLAGS = 1 << 1,
+ HNS_ROCE_CQE_INLINE_FLAGS = 1 << 2,
+};
+
+enum {
+ HNS_ROCE_RSP_EXSGE_FLAGS = 1 << 0,
+ HNS_ROCE_RSP_RQ_INLINE_FLAGS = 1 << 1,
+ HNS_ROCE_RSP_CQE_INLINE_FLAGS = 1 << 2,
};
struct hns_roce_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
+ __u32 cqe_size;
+ __u32 srq_tab_size;
__u32 reserved;
+ __u32 config;
+ __u32 max_inline_data;
+ __u8 congest_type;
+ __u8 reserved0[7];
+};
+
+struct hns_roce_ib_alloc_ucontext {
+ __u32 config;
+ __u32 reserved;
};
struct hns_roce_ib_alloc_pd_resp {
__u32 pdn;
};
+struct hns_roce_ib_create_ah_resp {
+ __u8 dmac[6];
+ __u8 reserved[2];
+};
+
#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h
deleted file mode 100644
index 79890baa6fdb..000000000000
--- a/include/uapi/rdma/i40iw-abi.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef I40IW_ABI_H
-#define I40IW_ABI_H
-
-#include <linux/types.h>
-
-#define I40IW_ABI_VER 5
-
-struct i40iw_alloc_ucontext_req {
- __u32 reserved32;
- __u8 userspace_ver;
- __u8 reserved8[3];
-};
-
-struct i40iw_alloc_ucontext_resp {
- __u32 max_pds; /* maximum pds allowed for this user process */
- __u32 max_qps; /* maximum qps allowed for this user process */
- __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
- __u8 kernel_ver;
- __u8 reserved[3];
-};
-
-struct i40iw_alloc_pd_resp {
- __u32 pd_id;
- __u8 reserved[4];
-};
-
-struct i40iw_create_cq_req {
- __aligned_u64 user_cq_buffer;
- __aligned_u64 user_shadow_area;
-};
-
-struct i40iw_create_qp_req {
- __aligned_u64 user_wqe_buffers;
- __aligned_u64 user_compl_ctx;
-
- /* UDA QP PHB */
- __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
- __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
-};
-
-enum i40iw_memreg_type {
- IW_MEMREG_TYPE_MEM = 0x0000,
- IW_MEMREG_TYPE_QP = 0x0001,
- IW_MEMREG_TYPE_CQ = 0x0002,
-};
-
-struct i40iw_mem_reg_req {
- __u16 reg_type; /* Memory, QP or CQ */
- __u16 cq_pages;
- __u16 rq_pages;
- __u16 sq_pages;
-};
-
-struct i40iw_create_cq_resp {
- __u32 cq_id;
- __u32 cq_size;
- __u32 mmap_db_index;
- __u32 reserved;
-};
-
-struct i40iw_create_qp_resp {
- __u32 qp_id;
- __u32 actual_sq_size;
- __u32 actual_rq_size;
- __u32 i40iw_drv_opt;
- __u16 push_idx;
- __u8 lsmm;
- __u8 rsvd2;
-};
-
-#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 99dcabf61a71..dafc7ebe545b 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -70,6 +71,8 @@ enum uverbs_methods_device {
UVERBS_METHOD_QUERY_PORT,
UVERBS_METHOD_GET_CONTEXT,
UVERBS_METHOD_QUERY_CONTEXT,
+ UVERBS_METHOD_QUERY_GID_TABLE,
+ UVERBS_METHOD_QUERY_GID_ENTRY,
};
enum uverbs_attrs_invoke_write_cmd_attr_ids {
@@ -249,6 +252,7 @@ enum uverbs_methods_mr {
UVERBS_METHOD_MR_DESTROY,
UVERBS_METHOD_ADVISE_MR,
UVERBS_METHOD_QUERY_MR,
+ UVERBS_METHOD_REG_DMABUF_MR,
};
enum uverbs_attrs_mr_destroy_ids {
@@ -270,6 +274,18 @@ enum uverbs_attrs_query_mr_cmd_attr_ids {
UVERBS_ATTR_QUERY_MR_RESP_IOVA,
};
+enum uverbs_attrs_reg_dmabuf_mr_cmd_attr_ids {
+ UVERBS_ATTR_REG_DMABUF_MR_HANDLE,
+ UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE,
+ UVERBS_ATTR_REG_DMABUF_MR_OFFSET,
+ UVERBS_ATTR_REG_DMABUF_MR_LENGTH,
+ UVERBS_ATTR_REG_DMABUF_MR_IOVA,
+ UVERBS_ATTR_REG_DMABUF_MR_FD,
+ UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
+ UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
+};
+
enum uverbs_attrs_create_counters_cmd_attr_ids {
UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
};
@@ -352,4 +368,18 @@ enum uverbs_attrs_async_event_create {
UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
};
+enum uverbs_attrs_query_gid_table_cmd_attr_ids {
+ UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE,
+ UVERBS_ATTR_QUERY_GID_TABLE_FLAGS,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
+};
+
+enum uverbs_attrs_query_gid_entry_cmd_attr_ids {
+ UVERBS_ATTR_QUERY_GID_ENTRY_PORT,
+ UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX,
+ UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS,
+ UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY,
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 5debab45ebcb..fe15bc7e9f70 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -57,6 +57,8 @@ enum ib_uverbs_access_flags {
IB_UVERBS_ACCESS_ZERO_BASED = 1 << 5,
IB_UVERBS_ACCESS_ON_DEMAND = 1 << 6,
IB_UVERBS_ACCESS_HUGETLB = 1 << 7,
+ IB_UVERBS_ACCESS_FLUSH_GLOBAL = 1 << 8,
+ IB_UVERBS_ACCESS_FLUSH_PERSISTENT = 1 << 9,
IB_UVERBS_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_OPTIONAL_FIRST,
IB_UVERBS_ACCESS_OPTIONAL_RANGE =
@@ -208,6 +210,7 @@ enum ib_uverbs_read_counters_flags {
enum ib_uverbs_advise_mr_advice {
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH,
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
+ IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT,
};
enum ib_uverbs_advise_mr_flag {
@@ -217,7 +220,8 @@ enum ib_uverbs_advise_mr_flag {
struct ib_uverbs_query_port_resp_ex {
struct ib_uverbs_query_port_resp legacy_resp;
__u16 port_cap_flags2;
- __u8 reserved[6];
+ __u8 reserved[2];
+ __u32 active_speed_ex;
};
struct ib_uverbs_qp_cap {
@@ -239,6 +243,7 @@ enum rdma_driver_id {
RDMA_DRIVER_OCRDMA,
RDMA_DRIVER_NES,
RDMA_DRIVER_I40IW,
+ RDMA_DRIVER_IRDMA = RDMA_DRIVER_I40IW,
RDMA_DRIVER_VMW_PVRDMA,
RDMA_DRIVER_QEDR,
RDMA_DRIVER_HNS,
@@ -248,6 +253,22 @@ enum rdma_driver_id {
RDMA_DRIVER_QIB,
RDMA_DRIVER_EFA,
RDMA_DRIVER_SIW,
+ RDMA_DRIVER_ERDMA,
+ RDMA_DRIVER_MANA,
+};
+
+enum ib_uverbs_gid_type {
+ IB_UVERBS_GID_TYPE_IB,
+ IB_UVERBS_GID_TYPE_ROCE_V1,
+ IB_UVERBS_GID_TYPE_ROCE_V2,
+};
+
+struct ib_uverbs_gid_entry {
+ __aligned_u64 gid[2];
+ __u32 gid_index;
+ __u32 port_num;
+ __u32 gid_type;
+ __u32 netdev_ifindex; /* It is 0 if there is no netdev associated with it */
};
#endif
diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h
index 90c0cf228020..10b5f6a4c677 100644
--- a/include/uapi/rdma/ib_user_mad.h
+++ b/include/uapi/rdma/ib_user_mad.h
@@ -143,7 +143,7 @@ struct ib_user_mad_hdr {
*/
struct ib_user_mad {
struct ib_user_mad_hdr hdr;
- __aligned_u64 data[0];
+ __aligned_u64 data[];
};
/*
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 0474c7400268..e16650f0c85d 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -105,6 +105,18 @@ enum {
IB_USER_VERBS_EX_CMD_MODIFY_CQ
};
+/* see IBA A19.4.1.1 Placement Types */
+enum ib_placement_type {
+ IB_FLUSH_GLOBAL = 1U << 0,
+ IB_FLUSH_PERSISTENT = 1U << 1,
+};
+
+/* see IBA A19.4.1.2 Selectivity Level */
+enum ib_selectivity_level {
+ IB_FLUSH_RANGE = 0,
+ IB_FLUSH_MR,
+};
+
/*
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
@@ -158,18 +170,18 @@ struct ib_uverbs_ex_cmd_hdr {
struct ib_uverbs_get_context {
__aligned_u64 response;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_get_context_resp {
__u32 async_fd;
__u32 num_comp_vectors;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_device {
__aligned_u64 response;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_device_resp {
@@ -278,7 +290,7 @@ struct ib_uverbs_query_port {
__aligned_u64 response;
__u8 port_num;
__u8 reserved[7];
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_port_resp {
@@ -308,12 +320,12 @@ struct ib_uverbs_query_port_resp {
struct ib_uverbs_alloc_pd {
__aligned_u64 response;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_alloc_pd_resp {
__u32 pd_handle;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_dealloc_pd {
@@ -324,12 +336,12 @@ struct ib_uverbs_open_xrcd {
__aligned_u64 response;
__u32 fd;
__u32 oflags;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_open_xrcd_resp {
__u32 xrcd_handle;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_close_xrcd {
@@ -343,14 +355,14 @@ struct ib_uverbs_reg_mr {
__aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_reg_mr_resp {
__u32 mr_handle;
__u32 lkey;
__u32 rkey;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_rereg_mr {
@@ -362,13 +374,13 @@ struct ib_uverbs_rereg_mr {
__aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_rereg_mr_resp {
__u32 lkey;
__u32 rkey;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_dereg_mr {
@@ -380,13 +392,13 @@ struct ib_uverbs_alloc_mw {
__u32 pd_handle;
__u8 mw_type;
__u8 reserved[3];
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_alloc_mw_resp {
__u32 mw_handle;
__u32 rkey;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_dealloc_mw {
@@ -408,7 +420,7 @@ struct ib_uverbs_create_cq {
__u32 comp_vector;
__s32 comp_channel;
__u32 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
enum ib_uverbs_ex_create_cq_flags {
@@ -442,13 +454,13 @@ struct ib_uverbs_resize_cq {
__aligned_u64 response;
__u32 cq_handle;
__u32 cqe;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_resize_cq_resp {
__u32 cqe;
__u32 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_poll_cq {
@@ -457,6 +469,19 @@ struct ib_uverbs_poll_cq {
__u32 ne;
};
+enum ib_uverbs_wc_opcode {
+ IB_UVERBS_WC_SEND = 0,
+ IB_UVERBS_WC_RDMA_WRITE = 1,
+ IB_UVERBS_WC_RDMA_READ = 2,
+ IB_UVERBS_WC_COMP_SWAP = 3,
+ IB_UVERBS_WC_FETCH_ADD = 4,
+ IB_UVERBS_WC_BIND_MW = 5,
+ IB_UVERBS_WC_LOCAL_INV = 6,
+ IB_UVERBS_WC_TSO = 7,
+ IB_UVERBS_WC_FLUSH = 8,
+ IB_UVERBS_WC_ATOMIC_WRITE = 9,
+};
+
struct ib_uverbs_wc {
__aligned_u64 wr_id;
__u32 status;
@@ -481,7 +506,7 @@ struct ib_uverbs_wc {
struct ib_uverbs_poll_cq_resp {
__u32 count;
__u32 reserved;
- struct ib_uverbs_wc wc[0];
+ struct ib_uverbs_wc wc[];
};
struct ib_uverbs_req_notify_cq {
@@ -574,7 +599,7 @@ struct ib_uverbs_create_qp {
__u8 qp_type;
__u8 is_srq;
__u8 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
enum ib_uverbs_create_qp_mask {
@@ -585,20 +610,6 @@ enum {
IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE,
};
-enum {
- /*
- * This value is equal to IB_QP_DEST_QPN.
- */
- IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20,
-};
-
-enum {
- /*
- * This value is equal to IB_QP_RATE_LIMIT.
- */
- IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25,
-};
-
struct ib_uverbs_ex_create_qp {
__aligned_u64 user_handle;
__u32 pd_handle;
@@ -627,7 +638,7 @@ struct ib_uverbs_open_qp {
__u32 qpn;
__u8 qp_type;
__u8 reserved[7];
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
/* also used for open response */
@@ -672,7 +683,7 @@ struct ib_uverbs_query_qp {
__aligned_u64 response;
__u32 qp_handle;
__u32 attr_mask;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_qp_resp {
@@ -706,7 +717,7 @@ struct ib_uverbs_query_qp_resp {
__u8 alt_timeout;
__u8 sq_sig_all;
__u8 reserved[5];
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_modify_qp {
@@ -787,6 +798,8 @@ enum ib_uverbs_wr_opcode {
IB_UVERBS_WR_RDMA_READ_WITH_INV = 11,
IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12,
IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13,
+ IB_UVERBS_WR_FLUSH = 14,
+ IB_UVERBS_WR_ATOMIC_WRITE = 15,
/* Review enum ib_wr_opcode before modifying this */
};
@@ -827,7 +840,7 @@ struct ib_uverbs_post_send {
__u32 wr_count;
__u32 sge_count;
__u32 wqe_size;
- struct ib_uverbs_send_wr send_wr[0];
+ struct ib_uverbs_send_wr send_wr[];
};
struct ib_uverbs_post_send_resp {
@@ -846,7 +859,7 @@ struct ib_uverbs_post_recv {
__u32 wr_count;
__u32 sge_count;
__u32 wqe_size;
- struct ib_uverbs_recv_wr recv_wr[0];
+ struct ib_uverbs_recv_wr recv_wr[];
};
struct ib_uverbs_post_recv_resp {
@@ -859,7 +872,7 @@ struct ib_uverbs_post_srq_recv {
__u32 wr_count;
__u32 sge_count;
__u32 wqe_size;
- struct ib_uverbs_recv_wr recv[0];
+ struct ib_uverbs_recv_wr recv[];
};
struct ib_uverbs_post_srq_recv_resp {
@@ -872,12 +885,12 @@ struct ib_uverbs_create_ah {
__u32 pd_handle;
__u32 reserved;
struct ib_uverbs_ah_attr attr;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_create_ah_resp {
__u32 ah_handle;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_destroy_ah {
@@ -889,7 +902,7 @@ struct ib_uverbs_attach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_detach_mcast {
@@ -897,7 +910,7 @@ struct ib_uverbs_detach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_flow_spec_hdr {
@@ -1138,7 +1151,7 @@ struct ib_uverbs_flow_attr {
* struct ib_flow_spec_xxx
* struct ib_flow_spec_yyy
*/
- struct ib_uverbs_flow_spec_hdr flow_specs[0];
+ struct ib_uverbs_flow_spec_hdr flow_specs[];
};
struct ib_uverbs_create_flow {
@@ -1164,7 +1177,7 @@ struct ib_uverbs_create_srq {
__u32 max_wr;
__u32 max_sge;
__u32 srq_limit;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_create_xsrq {
@@ -1178,7 +1191,7 @@ struct ib_uverbs_create_xsrq {
__u32 max_num_tags;
__u32 xrcd_handle;
__u32 cq_handle;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_create_srq_resp {
@@ -1186,7 +1199,7 @@ struct ib_uverbs_create_srq_resp {
__u32 max_wr;
__u32 max_sge;
__u32 srqn;
- __u32 driver_data[0];
+ __u32 driver_data[];
};
struct ib_uverbs_modify_srq {
@@ -1194,14 +1207,14 @@ struct ib_uverbs_modify_srq {
__u32 attr_mask;
__u32 max_wr;
__u32 srq_limit;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_srq {
__aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
- __aligned_u64 driver_data[0];
+ __aligned_u64 driver_data[];
};
struct ib_uverbs_query_srq_resp {
@@ -1272,7 +1285,7 @@ struct ib_uverbs_ex_create_rwq_ind_table {
* wq_handle1
* wq_handle2
*/
- __u32 wq_handles[0];
+ __u32 wq_handles[];
};
struct ib_uverbs_ex_create_rwq_ind_table_resp {
@@ -1301,4 +1314,51 @@ struct ib_uverbs_ex_modify_cq {
#define IB_DEVICE_NAME_MAX 64
+/*
+ * bits 9, 15, 16, 19, 22, 27, 30, 31, 32, 33, 35 and 37 may be set by old
+ * kernels and should not be used.
+ */
+enum ib_uverbs_device_cap_flags {
+ IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1 << 0,
+ IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 1 << 1,
+ IB_UVERBS_DEVICE_BAD_QKEY_CNTR = 1 << 2,
+ IB_UVERBS_DEVICE_RAW_MULTI = 1 << 3,
+ IB_UVERBS_DEVICE_AUTO_PATH_MIG = 1 << 4,
+ IB_UVERBS_DEVICE_CHANGE_PHY_PORT = 1 << 5,
+ IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6,
+ IB_UVERBS_DEVICE_CURR_QP_STATE_MOD = 1 << 7,
+ IB_UVERBS_DEVICE_SHUTDOWN_PORT = 1 << 8,
+ /* IB_UVERBS_DEVICE_INIT_TYPE = 1 << 9, (not in use) */
+ IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT = 1 << 10,
+ IB_UVERBS_DEVICE_SYS_IMAGE_GUID = 1 << 11,
+ IB_UVERBS_DEVICE_RC_RNR_NAK_GEN = 1 << 12,
+ IB_UVERBS_DEVICE_SRQ_RESIZE = 1 << 13,
+ IB_UVERBS_DEVICE_N_NOTIFY_CQ = 1 << 14,
+ IB_UVERBS_DEVICE_MEM_WINDOW = 1 << 17,
+ IB_UVERBS_DEVICE_UD_IP_CSUM = 1 << 18,
+ IB_UVERBS_DEVICE_XRC = 1 << 20,
+ IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS = 1 << 21,
+ IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A = 1 << 23,
+ IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B = 1 << 24,
+ IB_UVERBS_DEVICE_RC_IP_CSUM = 1 << 25,
+ /* Deprecated. Please use IB_UVERBS_RAW_PACKET_CAP_IP_CSUM. */
+ IB_UVERBS_DEVICE_RAW_IP_CSUM = 1 << 26,
+ IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING = 1 << 29,
+ /* Deprecated. Please use IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS. */
+ IB_UVERBS_DEVICE_RAW_SCATTER_FCS = 1ULL << 34,
+ IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING = 1ULL << 36,
+ /* Flush placement types */
+ IB_UVERBS_DEVICE_FLUSH_GLOBAL = 1ULL << 38,
+ IB_UVERBS_DEVICE_FLUSH_PERSISTENT = 1ULL << 39,
+ /* Atomic write attributes */
+ IB_UVERBS_DEVICE_ATOMIC_WRITE = 1ULL << 40,
+};
+
+enum ib_uverbs_raw_packet_caps {
+ IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING = 1 << 0,
+ IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS = 1 << 1,
+ IB_UVERBS_RAW_PACKET_CAP_IP_CSUM = 1 << 2,
+ IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP = 1 << 3,
+};
+
#endif /* IB_USER_VERBS_H */
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
new file mode 100644
index 000000000000..bb18f15489e3
--- /dev/null
+++ b/include/uapi/rdma/irdma-abi.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */
+/*
+ * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ */
+
+#ifndef IRDMA_ABI_H
+#define IRDMA_ABI_H
+
+#include <linux/types.h>
+
+/* irdma must support legacy GEN_1 i40iw kernel
+ * and user-space whose last ABI ver is 5
+ */
+#define IRDMA_ABI_VER 5
+
+enum irdma_memreg_type {
+ IRDMA_MEMREG_TYPE_MEM = 0,
+ IRDMA_MEMREG_TYPE_QP = 1,
+ IRDMA_MEMREG_TYPE_CQ = 2,
+};
+
+enum {
+ IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+ IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
+};
+
+struct irdma_alloc_ucontext_req {
+ __u32 rsvd32;
+ __u8 userspace_ver;
+ __u8 rsvd8[3];
+ __aligned_u64 comp_mask;
+};
+
+struct irdma_alloc_ucontext_resp {
+ __u32 max_pds;
+ __u32 max_qps;
+ __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
+ __u8 kernel_ver;
+ __u8 rsvd[3];
+ __aligned_u64 feature_flags;
+ __aligned_u64 db_mmap_key;
+ __u32 max_hw_wq_frags;
+ __u32 max_hw_read_sges;
+ __u32 max_hw_inline;
+ __u32 max_hw_rq_quanta;
+ __u32 max_hw_wq_quanta;
+ __u32 min_hw_cq_size;
+ __u32 max_hw_cq_size;
+ __u16 max_hw_sq_chunk;
+ __u8 hw_rev;
+ __u8 rsvd2;
+ __aligned_u64 comp_mask;
+ __u16 min_hw_wq_size;
+ __u8 rsvd3[6];
+};
+
+struct irdma_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 rsvd[4];
+};
+
+struct irdma_resize_cq_req {
+ __aligned_u64 user_cq_buffer;
+};
+
+struct irdma_create_cq_req {
+ __aligned_u64 user_cq_buf;
+ __aligned_u64 user_shadow_area;
+};
+
+struct irdma_create_qp_req {
+ __aligned_u64 user_wqe_bufs;
+ __aligned_u64 user_compl_ctx;
+};
+
+struct irdma_mem_reg_req {
+ __u16 reg_type; /* enum irdma_memreg_type */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct irdma_modify_qp_req {
+ __u8 sq_flush;
+ __u8 rq_flush;
+ __u8 rsvd[6];
+};
+
+struct irdma_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+};
+
+struct irdma_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 irdma_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd;
+ __u32 qp_caps;
+};
+
+struct irdma_modify_qp_resp {
+ __aligned_u64 push_wqe_mmap_key;
+ __aligned_u64 push_db_mmap_key;
+ __u16 push_offset;
+ __u8 push_valid;
+ __u8 rsvd[5];
+};
+
+struct irdma_create_ah_resp {
+ __u32 ah_id;
+ __u8 rsvd[4];
+};
+#endif /* IRDMA_ABI_H */
diff --git a/include/uapi/rdma/mana-abi.h b/include/uapi/rdma/mana-abi.h
new file mode 100644
index 000000000000..5fcb31b37fb9
--- /dev/null
+++ b/include/uapi/rdma/mana-abi.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
+/*
+ * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
+ */
+
+#ifndef MANA_ABI_USER_H
+#define MANA_ABI_USER_H
+
+#include <linux/types.h>
+#include <rdma/ib_user_ioctl_verbs.h>
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+
+#define MANA_IB_UVERBS_ABI_VERSION 1
+
+struct mana_ib_create_cq {
+ __aligned_u64 buf_addr;
+};
+
+struct mana_ib_create_qp {
+ __aligned_u64 sq_buf_addr;
+ __u32 sq_buf_size;
+ __u32 port;
+};
+
+struct mana_ib_create_qp_resp {
+ __u32 sqid;
+ __u32 cqid;
+ __u32 tx_vp_offset;
+ __u32 reserved;
+};
+
+struct mana_ib_create_wq {
+ __aligned_u64 wq_buf_addr;
+ __u32 wq_buf_size;
+ __u32 reserved;
+};
+
+/* RX Hash function flags */
+enum mana_ib_rx_hash_function_flags {
+ MANA_IB_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
+};
+
+struct mana_ib_create_qp_rss {
+ __aligned_u64 rx_hash_fields_mask;
+ __u8 rx_hash_function;
+ __u8 reserved[7];
+ __u32 rx_hash_key_len;
+ __u8 rx_hash_key[40];
+ __u32 port;
+};
+
+struct rss_resp_entry {
+ __u32 cqid;
+ __u32 wqid;
+};
+
+struct mana_ib_create_qp_rss_resp {
+ __aligned_u64 num_entries;
+ struct rss_resp_entry entries[64];
+};
+
+#endif
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 27905a0268c9..d4f6a36dffb0 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/if_ether.h> /* For ETH_ALEN. */
#include <rdma/ib_user_ioctl_verbs.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -50,6 +51,7 @@ enum {
MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
+ MLX5_QP_FLAG_DCI_STREAM = 1 << 11,
};
enum {
@@ -101,6 +103,9 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS = 1UL << 3,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS = 1UL << 4,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG = 1UL << 5,
};
enum mlx5_user_cmds_supp_uhw {
@@ -236,6 +241,11 @@ struct mlx5_ib_striding_rq_caps {
__u32 reserved;
};
+struct mlx5_ib_dci_streams_caps {
+ __u8 max_log_num_concurent;
+ __u8 max_log_num_errored;
+};
+
enum mlx5_ib_query_dev_resp_flags {
/* Support 128B CQE compression */
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
@@ -264,12 +274,15 @@ struct mlx5_ib_query_device_resp {
struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
struct mlx5_ib_striding_rq_caps striding_rq_caps;
__u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
- __u32 reserved;
+ struct mlx5_ib_dci_streams_caps dci_streams_caps;
+ __u16 reserved;
+ struct mlx5_ib_uapi_reg reg_c0;
};
enum mlx5_ib_create_cq_flags {
MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1,
+ MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS = 1 << 2,
};
struct mlx5_ib_create_cq {
@@ -310,6 +323,11 @@ struct mlx5_ib_create_srq_resp {
__u32 reserved;
};
+struct mlx5_ib_create_qp_dci_streams {
+ __u8 log_num_concurent;
+ __u8 log_num_errored;
+};
+
struct mlx5_ib_create_qp {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
@@ -324,7 +342,8 @@ struct mlx5_ib_create_qp {
__aligned_u64 access_key;
};
__u32 ece_options;
- __u32 reserved;
+ struct mlx5_ib_create_qp_dci_streams dci_streams;
+ __u16 reserved;
};
/* RX Hash function flags */
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index e24d66d278cf..595edad03dfe 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -41,6 +41,25 @@ enum mlx5_ib_create_flow_action_attrs {
MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
};
+enum mlx5_ib_dm_methods {
+ MLX5_IB_METHOD_DM_MAP_OP_ADDR = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_DM_QUERY,
+};
+
+enum mlx5_ib_dm_map_op_addr_attrs {
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+};
+
+enum mlx5_ib_query_dm_attrs {
+ MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+};
+
enum mlx5_ib_alloc_dm_attrs {
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
@@ -154,6 +173,8 @@ enum mlx5_ib_devx_umem_reg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
};
enum mlx5_ib_devx_umem_dereg_attrs {
@@ -208,6 +229,7 @@ enum mlx5_ib_objects {
MLX5_IB_OBJECT_VAR,
MLX5_IB_OBJECT_PP,
MLX5_IB_OBJECT_UAR,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
};
enum mlx5_ib_flow_matcher_create_attrs {
@@ -228,11 +250,27 @@ enum mlx5_ib_flow_matcher_methods {
MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
};
+enum mlx5_ib_flow_steering_anchor_create_attrs {
+ MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE,
+ MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY,
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+};
+
+enum mlx5_ib_flow_steering_anchor_destroy_attrs {
+ MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_steering_anchor_methods {
+ MLX5_IB_METHOD_STEERING_ANCHOR_CREATE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY,
+};
+
enum mlx5_ib_device_query_context_attrs {
MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT),
};
-#define MLX5_IB_DW_MATCH_PARAM 0x80
+#define MLX5_IB_DW_MATCH_PARAM 0xA0
struct mlx5_ib_match_params {
__u32 match_params[MLX5_IB_DW_MATCH_PARAM];
@@ -300,4 +338,13 @@ enum mlx5_ib_pd_methods {
};
+enum mlx5_ib_device_methods {
+ MLX5_IB_METHOD_QUERY_PORT = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_query_port_attrs {
+ MLX5_IB_ATTR_QUERY_PORT_PORT_NUM = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_QUERY_PORT,
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 56b26eaea083..3189c7f08d17 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -63,6 +63,8 @@ enum mlx5_ib_uapi_dm_type {
MLX5_IB_UAPI_DM_TYPE_MEMIC,
MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM,
MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM,
+ MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM,
+ MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM,
};
enum mlx5_ib_uapi_devx_create_event_channel_flags {
@@ -83,5 +85,30 @@ enum mlx5_ib_uapi_uar_alloc_type {
MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC = 0x1,
};
+enum mlx5_ib_uapi_query_port_flags {
+ MLX5_IB_UAPI_QUERY_PORT_VPORT = 1 << 0,
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID = 1 << 1,
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX = 1 << 2,
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX = 1 << 3,
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0 = 1 << 4,
+ MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID = 1 << 5,
+};
+
+struct mlx5_ib_uapi_reg {
+ __u32 value;
+ __u32 mask;
+};
+
+struct mlx5_ib_uapi_query_port {
+ __aligned_u64 flags;
+ __u16 vport;
+ __u16 vport_vhca_id;
+ __u16 esw_owner_vhca_id;
+ __u16 rsvd0;
+ __aligned_u64 vport_steering_icm_rx;
+ __aligned_u64 vport_steering_icm_tx;
+ struct mlx5_ib_uapi_reg reg_c0;
+};
+
#endif
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index d2f5b8396243..723bbb0f7042 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -293,6 +293,14 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_RES_MR_GET_RAW,
+ RDMA_NLDEV_CMD_RES_CTX_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_SRQ_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_STAT_GET_STATUS,
+
+ RDMA_NLDEV_CMD_RES_SRQ_GET_RAW,
+
RDMA_NLDEV_NUM_OPS
};
@@ -533,6 +541,23 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_RES_RAW, /* binary */
+ RDMA_NLDEV_ATTR_RES_CTX, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CTX_ENTRY, /* nested table */
+
+ RDMA_NLDEV_ATTR_RES_SRQ, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SRQ_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SRQN, /* u32 */
+
+ RDMA_NLDEV_ATTR_MIN_RANGE, /* u32 */
+ RDMA_NLDEV_ATTR_MAX_RANGE, /* u32 */
+
+ RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, /* u8 */
+
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, /* u32 */
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, /* u8 */
+
+ RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, /* u8 */
+
/*
* Always the end
*/
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index ed5a514305c1..7cea03581f79 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -184,7 +184,7 @@ struct rdma_ucm_query_addr_resp {
struct rdma_ucm_query_path_resp {
__u32 num_paths;
__u32 reserved;
- struct ib_path_rec_data path_data[0];
+ struct ib_path_rec_data path_data[];
};
struct rdma_ucm_conn_param {
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index 38ab7accb7be..ab1aef17feb1 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -81,7 +81,7 @@ struct ib_uverbs_ioctl_hdr {
__aligned_u64 reserved1;
__u32 driver_id;
__u32 reserved2;
- struct ib_uverbs_attr attrs[0];
+ struct ib_uverbs_attr attrs[];
};
#endif
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index aae2e696bb38..bb092fccb813 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -39,6 +39,11 @@
#include <linux/in.h>
#include <linux/in6.h>
+enum {
+ RXE_NETWORK_TYPE_IPV4 = 1,
+ RXE_NETWORK_TYPE_IPV6 = 2,
+};
+
union rxe_gid {
__u8 raw[16];
struct {
@@ -57,6 +62,7 @@ struct rxe_global_route {
struct rxe_av {
__u8 port_num;
+ /* From RXE_NETWORK_TYPE_* */
__u8 network_type;
__u8 dmac[6];
struct rxe_global_route grh;
@@ -68,7 +74,7 @@ struct rxe_av {
struct rxe_send_wr {
__aligned_u64 wr_id;
- __u32 num_sge;
+ __u32 reserved;
__u32 opcode;
__u32 send_flags;
union {
@@ -78,6 +84,13 @@ struct rxe_send_wr {
union {
struct {
__aligned_u64 remote_addr;
+ __u32 length;
+ __u32 rkey;
+ __u8 type;
+ __u8 level;
+ } flush;
+ struct {
+ __aligned_u64 remote_addr;
__u32 rkey;
__u32 reserved;
} rdma;
@@ -92,16 +105,30 @@ struct rxe_send_wr {
__u32 remote_qpn;
__u32 remote_qkey;
__u16 pkey_index;
+ __u16 reserved;
+ __u32 ah_num;
+ __u32 pad[4];
+ struct rxe_av av;
} ud;
+ struct {
+ __aligned_u64 addr;
+ __aligned_u64 length;
+ __u32 mr_lkey;
+ __u32 mw_rkey;
+ __u32 rkey;
+ __u32 access;
+ } mw;
/* reg is only used by the kernel and is not part of the uapi */
+#ifdef __KERNEL__
struct {
union {
struct ib_mr *mr;
__aligned_u64 reserved;
};
- __u32 key;
- __u32 access;
+ __u32 key;
+ __u32 access;
} reg;
+#endif
} wr;
};
@@ -112,7 +139,7 @@ struct rxe_sge {
};
struct mminfo {
- __aligned_u64 offset;
+ __aligned_u64 offset;
__u32 size;
__u32 pad;
};
@@ -125,14 +152,14 @@ struct rxe_dma_info {
__u32 sge_offset;
__u32 reserved;
union {
- __u8 inline_data[0];
- struct rxe_sge sge[0];
+ __DECLARE_FLEX_ARRAY(__u8, inline_data);
+ __DECLARE_FLEX_ARRAY(__u8, atomic_wr);
+ __DECLARE_FLEX_ARRAY(struct rxe_sge, sge);
};
};
struct rxe_send_wqe {
struct rxe_send_wr wr;
- struct rxe_av av;
__u32 status;
__u32 state;
__aligned_u64 iova;
@@ -147,11 +174,16 @@ struct rxe_send_wqe {
struct rxe_recv_wqe {
__aligned_u64 wr_id;
- __u32 num_sge;
+ __u32 reserved;
__u32 padding;
struct rxe_dma_info dma;
};
+struct rxe_create_ah_resp {
+ __u32 ah_num;
+ __u32 reserved;
+};
+
struct rxe_create_cq_resp {
struct mminfo mi;
};
@@ -175,4 +207,25 @@ struct rxe_modify_srq_cmd {
__aligned_u64 mmap_info_addr;
};
+/* This data structure is stored at the base of work and
+ * completion queues shared between user space and kernel space.
+ * It contains the producer and consumer indices. Is also
+ * contains a copy of the queue size parameters for user space
+ * to use but the kernel must use the parameters in the
+ * rxe_queue struct. For performance reasons arrange to have
+ * producer and consumer indices in separate cache lines
+ * the kernel should always mask the indices to avoid accessing
+ * memory outside of the data area
+ */
+struct rxe_queue_buf {
+ __u32 log2_elem_size;
+ __u32 index_mask;
+ __u32 pad_1[30];
+ __u32 producer_index;
+ __u32 pad_2[31];
+ __u32 consumer_index;
+ __u32 pad_3[31];
+ __u8 data[];
+};
+
#endif /* RDMA_USER_RXE_H */
diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h
index af735f55b291..6df49724954f 100644
--- a/include/uapi/rdma/siw-abi.h
+++ b/include/uapi/rdma/siw-abi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) or BSD-3-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index f8b638c73371..901a4fd72c09 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -133,6 +133,13 @@ enum pvrdma_wc_flags {
PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
};
+enum pvrdma_network_type {
+ PVRDMA_NETWORK_IB,
+ PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
+ PVRDMA_NETWORK_IPV4,
+ PVRDMA_NETWORK_IPV6
+};
+
struct pvrdma_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 reserved;
diff --git a/include/uapi/regulator/regulator.h b/include/uapi/regulator/regulator.h
new file mode 100644
index 000000000000..71bf71a22e7f
--- /dev/null
+++ b/include/uapi/regulator/regulator.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Regulator uapi header
+ *
+ * Author: Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#ifndef _UAPI_REGULATOR_H
+#define _UAPI_REGULATOR_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/*
+ * Regulator notifier events.
+ *
+ * UNDER_VOLTAGE Regulator output is under voltage.
+ * OVER_CURRENT Regulator output current is too high.
+ * REGULATION_OUT Regulator output is out of regulation.
+ * FAIL Regulator output has failed.
+ * OVER_TEMP Regulator over temp.
+ * FORCE_DISABLE Regulator forcibly shut down by software.
+ * VOLTAGE_CHANGE Regulator voltage changed.
+ * Data passed is old voltage cast to (void *).
+ * DISABLE Regulator was disabled.
+ * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
+ * Data passed is "struct pre_voltage_change_data"
+ * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
+ * Data passed is old voltage cast to (void *).
+ * PRE_DISABLE Regulator is about to be disabled
+ * ABORT_DISABLE Regulator disable failed for some reason
+ *
+ * NOTE: These events can be OR'ed together when passed into handler.
+ */
+
+#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
+#define REGULATOR_EVENT_OVER_CURRENT 0x02
+#define REGULATOR_EVENT_REGULATION_OUT 0x04
+#define REGULATOR_EVENT_FAIL 0x08
+#define REGULATOR_EVENT_OVER_TEMP 0x10
+#define REGULATOR_EVENT_FORCE_DISABLE 0x20
+#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+#define REGULATOR_EVENT_DISABLE 0x80
+#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
+#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
+#define REGULATOR_EVENT_PRE_DISABLE 0x400
+#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+#define REGULATOR_EVENT_ENABLE 0x1000
+/*
+ * Following notifications should be emitted only if detected condition
+ * is such that the HW is likely to still be working but consumers should
+ * take a recovery action to prevent problems escalating into errors.
+ */
+#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
+#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
+#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
+#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
+#define REGULATOR_EVENT_WARN_MASK 0x1E000
+
+struct reg_genl_event {
+ char reg_name[32];
+ uint64_t event;
+};
+
+/* attributes of reg_genl_family */
+enum {
+ REG_GENL_ATTR_UNSPEC,
+ REG_GENL_ATTR_EVENT, /* reg event info needed by user space */
+ __REG_GENL_ATTR_MAX,
+};
+
+#define REG_GENL_ATTR_MAX (__REG_GENL_ATTR_MAX - 1)
+
+/* commands supported by the reg_genl_family */
+enum {
+ REG_GENL_CMD_UNSPEC,
+ REG_GENL_CMD_EVENT, /* kernel->user notifications for reg events */
+ __REG_GENL_CMD_MAX,
+};
+
+#define REG_GENL_CMD_MAX (__REG_GENL_CMD_MAX - 1)
+
+#define REG_GENL_FAMILY_NAME "reg_event"
+#define REG_GENL_VERSION 0x01
+#define REG_GENL_MCAST_GROUP_NAME "reg_mc_group"
+
+#endif /* _UAPI_REGULATOR_H */
diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
index 8c704e510e39..16782c360de3 100644
--- a/include/uapi/scsi/fc/fc_els.h
+++ b/include/uapi/scsi/fc/fc_els.h
@@ -41,6 +41,7 @@ enum fc_els_cmd {
ELS_REC = 0x13, /* read exchange concise */
ELS_SRR = 0x14, /* sequence retransmission request */
ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */
+ ELS_EDC = 0x17, /* Exchange Diagnostic Capabilities */
ELS_RDP = 0x18, /* Read Diagnostic Parameters */
ELS_RDF = 0x19, /* Register Diagnostic Functions */
ELS_PRLI = 0x20, /* process login */
@@ -111,6 +112,7 @@ enum fc_els_cmd {
[ELS_REC] = "REC", \
[ELS_SRR] = "SRR", \
[ELS_FPIN] = "FPIN", \
+ [ELS_EDC] = "EDC", \
[ELS_RDP] = "RDP", \
[ELS_RDF] = "RDF", \
[ELS_PRLI] = "PRLI", \
@@ -218,6 +220,10 @@ enum fc_els_rjt_explan {
enum fc_ls_tlv_dtag {
ELS_DTAG_LS_REQ_INFO = 0x00000001,
/* Link Service Request Information Descriptor */
+ ELS_DTAG_LNK_FAULT_CAP = 0x0001000D,
+ /* Link Fault Capability Descriptor */
+ ELS_DTAG_CG_SIGNAL_CAP = 0x0001000F,
+ /* Congestion Signaling Capability Descriptor */
ELS_DTAG_LNK_INTEGRITY = 0x00020001,
/* Link Integrity Notification Descriptor */
ELS_DTAG_DELIVERY = 0x00020002,
@@ -236,6 +242,8 @@ enum fc_ls_tlv_dtag {
*/
#define FC_LS_TLV_DTAG_INIT { \
{ ELS_DTAG_LS_REQ_INFO, "Link Service Request Information" }, \
+ { ELS_DTAG_LNK_FAULT_CAP, "Link Fault Capability" }, \
+ { ELS_DTAG_CG_SIGNAL_CAP, "Congestion Signaling Capability" }, \
{ ELS_DTAG_LNK_INTEGRITY, "Link Integrity Notification" }, \
{ ELS_DTAG_DELIVERY, "Delivery Notification Present" }, \
{ ELS_DTAG_PEER_CONGEST, "Peer Congestion Notification" }, \
@@ -256,7 +264,7 @@ struct fc_tlv_desc {
* Size of descriptor excluding
* desc_tag and desc_len fields.
*/
- __u8 desc_value[0]; /* Descriptor Value */
+ __u8 desc_value[]; /* Descriptor Value */
};
/* Descriptor tag and len fields are considered the mandatory header
@@ -916,7 +924,9 @@ enum fc_els_clid_ic {
ELS_CLID_IC_LIP = 8, /* receiving LIP */
};
-
+/*
+ * Link Integrity event types
+ */
enum fc_fpin_li_event_types {
FPIN_LI_UNKNOWN = 0x0,
FPIN_LI_LINK_FAILURE = 0x1,
@@ -943,6 +953,54 @@ enum fc_fpin_li_event_types {
{ FPIN_LI_DEVICE_SPEC, "Device Specific" }, \
}
+/*
+ * Delivery event types
+ */
+enum fc_fpin_deli_event_types {
+ FPIN_DELI_UNKNOWN = 0x0,
+ FPIN_DELI_TIMEOUT = 0x1,
+ FPIN_DELI_UNABLE_TO_ROUTE = 0x2,
+ FPIN_DELI_DEVICE_SPEC = 0xF,
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define FC_FPIN_DELI_EVT_TYPES_INIT { \
+ { FPIN_DELI_UNKNOWN, "Unknown" }, \
+ { FPIN_DELI_TIMEOUT, "Timeout" }, \
+ { FPIN_DELI_UNABLE_TO_ROUTE, "Unable to Route" }, \
+ { FPIN_DELI_DEVICE_SPEC, "Device Specific" }, \
+}
+
+/*
+ * Congestion event types
+ */
+enum fc_fpin_congn_event_types {
+ FPIN_CONGN_CLEAR = 0x0,
+ FPIN_CONGN_LOST_CREDIT = 0x1,
+ FPIN_CONGN_CREDIT_STALL = 0x2,
+ FPIN_CONGN_OVERSUBSCRIPTION = 0x3,
+ FPIN_CONGN_DEVICE_SPEC = 0xF,
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define FC_FPIN_CONGN_EVT_TYPES_INIT { \
+ { FPIN_CONGN_CLEAR, "Clear" }, \
+ { FPIN_CONGN_LOST_CREDIT, "Lost Credit" }, \
+ { FPIN_CONGN_CREDIT_STALL, "Credit Stall" }, \
+ { FPIN_CONGN_OVERSUBSCRIPTION, "Oversubscription" }, \
+ { FPIN_CONGN_DEVICE_SPEC, "Device Specific" }, \
+}
+
+enum fc_fpin_congn_severity_types {
+ FPIN_CONGN_SEVERITY_WARNING = 0xF1,
+ FPIN_CONGN_SEVERITY_ERROR = 0xF7,
+};
/*
* Link Integrity Notification Descriptor
@@ -969,12 +1027,74 @@ struct fc_fn_li_desc {
* threshold to caause the LI event
*/
__be32 pname_count; /* number of portname_list elements */
- __be64 pname_list[0]; /* list of N_Port_Names accessible
+ __be64 pname_list[]; /* list of N_Port_Names accessible
* through the attached port
*/
};
/*
+ * Delivery Notification Descriptor
+ */
+struct fc_fn_deli_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x00020002) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be64 detecting_wwpn; /* Port Name that detected event */
+ __be64 attached_wwpn; /* Port Name of device attached to
+ * detecting Port Name
+ */
+ __be32 deli_reason_code;/* see enum fc_fpin_deli_event_types */
+};
+
+/*
+ * Peer Congestion Notification Descriptor
+ */
+struct fc_fn_peer_congn_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x00020003) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be64 detecting_wwpn; /* Port Name that detected event */
+ __be64 attached_wwpn; /* Port Name of device attached to
+ * detecting Port Name
+ */
+ __be16 event_type; /* see enum fc_fpin_congn_event_types */
+ __be16 event_modifier; /* Implementation specific value
+ * describing the event type
+ */
+ __be32 event_period; /* duration (ms) of the detected
+ * congestion event
+ */
+ __be32 pname_count; /* number of portname_list elements */
+ __be64 pname_list[]; /* list of N_Port_Names accessible
+ * through the attached port
+ */
+};
+
+/*
+ * Congestion Notification Descriptor
+ */
+struct fc_fn_congn_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x00020004) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be16 event_type; /* see enum fc_fpin_congn_event_types */
+ __be16 event_modifier; /* Implementation specific value
+ * describing the event type
+ */
+ __be32 event_period; /* duration (ms) of the detected
+ * congestion event
+ */
+ __u8 severity; /* command */
+ __u8 resv[3]; /* reserved - must be zero */
+};
+
+/*
* ELS_FPIN - Fabric Performance Impact Notification
*/
struct fc_els_fpin {
@@ -984,7 +1104,7 @@ struct fc_els_fpin {
* Size of ELS excluding fpin_cmd,
* fpin_zero and desc_len fields.
*/
- struct fc_tlv_desc fpin_desc[0]; /* Descriptor list */
+ struct fc_tlv_desc fpin_desc[]; /* Descriptor list */
};
/* Diagnostic Function Descriptor - FPIN Registration */
@@ -995,7 +1115,7 @@ struct fc_df_desc_fpin_reg {
* desc_tag and desc_len fields.
*/
__be32 count; /* Number of desc_tags elements */
- __be32 desc_tags[0]; /* Array of Descriptor Tags.
+ __be32 desc_tags[]; /* Array of Descriptor Tags.
* Each tag indicates a function
* supported by the N_Port (request)
* or by the N_Port and Fabric
@@ -1015,7 +1135,7 @@ struct fc_els_rdf {
* Size of ELS excluding fpin_cmd,
* fpin_zero and desc_len fields.
*/
- struct fc_tlv_desc desc[0]; /* Descriptor list */
+ struct fc_tlv_desc desc[]; /* Descriptor list */
};
/*
@@ -1028,7 +1148,105 @@ struct fc_els_rdf_resp {
* and desc_list_len fields.
*/
struct fc_els_lsri_desc lsri;
- struct fc_tlv_desc desc[0]; /* Supported Descriptor list */
+ struct fc_tlv_desc desc[]; /* Supported Descriptor list */
+};
+
+
+/*
+ * Diagnostic Capability Descriptors for EDC ELS
+ */
+
+/*
+ * Diagnostic: Link Fault Capability Descriptor
+ */
+struct fc_diag_lnkflt_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x0001000D) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ * 12 bytes
+ */
+ __be32 degrade_activate_threshold;
+ __be32 degrade_deactivate_threshold;
+ __be32 fec_degrade_interval;
+};
+
+enum fc_edc_cg_signal_cap_types {
+ /* Note: Capability: bits 31:4 Rsvd; bits 3:0 are capabilities */
+ EDC_CG_SIG_NOTSUPPORTED = 0x00, /* neither supported */
+ EDC_CG_SIG_WARN_ONLY = 0x01,
+ EDC_CG_SIG_WARN_ALARM = 0x02, /* both supported */
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define FC_EDC_CG_SIGNAL_CAP_TYPES_INIT { \
+ { EDC_CG_SIG_NOTSUPPORTED, "Signaling Not Supported" }, \
+ { EDC_CG_SIG_WARN_ONLY, "Warning Signal" }, \
+ { EDC_CG_SIG_WARN_ALARM, "Warning and Alarm Signals" }, \
+}
+
+enum fc_diag_cg_sig_freq_types {
+ EDC_CG_SIGFREQ_CNT_MIN = 1, /* Min Frequency Count */
+ EDC_CG_SIGFREQ_CNT_MAX = 999, /* Max Frequency Count */
+
+ EDC_CG_SIGFREQ_SEC = 0x1, /* Units: seconds */
+ EDC_CG_SIGFREQ_MSEC = 0x2, /* Units: milliseconds */
+};
+
+struct fc_diag_cg_sig_freq {
+ __be16 count; /* Time between signals
+ * note: upper 6 bits rsvd
+ */
+ __be16 units; /* Time unit for count
+ * note: upper 12 bits rsvd
+ */
+};
+
+/*
+ * Diagnostic: Congestion Signaling Capability Descriptor
+ */
+struct fc_diag_cg_sig_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x0001000F) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ * 16 bytes
+ */
+ __be32 xmt_signal_capability;
+ struct fc_diag_cg_sig_freq xmt_signal_frequency;
+ __be32 rcv_signal_capability;
+ struct fc_diag_cg_sig_freq rcv_signal_frequency;
+};
+
+/*
+ * ELS_EDC - Exchange Diagnostic Capabilities
+ */
+struct fc_els_edc {
+ __u8 edc_cmd; /* command (0x17) */
+ __u8 edc_zero[3]; /* specified as zero - part of cmd */
+ __be32 desc_len; /* Length of Descriptor List (in bytes).
+ * Size of ELS excluding edc_cmd,
+ * edc_zero and desc_len fields.
+ */
+ struct fc_tlv_desc desc[];
+ /* Diagnostic Descriptor list */
+};
+
+/*
+ * ELS EDC LS_ACC Response.
+ */
+struct fc_els_edc_resp {
+ struct fc_els_ls_acc acc_hdr;
+ __be32 desc_list_len; /* Length of response (in
+ * bytes). Excludes acc_hdr
+ * and desc_list_len fields.
+ */
+ struct fc_els_lsri_desc lsri;
+ struct fc_tlv_desc desc[];
+ /* Supported Diagnostic Descriptor list */
};
diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
index 3ae65e93235c..5e46cf1054af 100644
--- a/include/uapi/scsi/scsi_bsg_fc.h
+++ b/include/uapi/scsi/scsi_bsg_fc.h
@@ -209,14 +209,14 @@ struct fc_bsg_host_vendor {
__u64 vendor_id;
/* start of vendor command area */
- __u32 vendor_cmd[0];
+ __u32 vendor_cmd[];
};
/* Response:
*/
struct fc_bsg_host_vendor_reply {
/* start of vendor response area */
- __u32 vendor_rsp[0];
+ __DECLARE_FLEX_ARRAY(__u32, vendor_rsp);
};
diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
new file mode 100644
index 000000000000..c72ce387286a
--- /dev/null
+++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef SCSI_BSG_MPI3MR_H_INCLUDED
+#define SCSI_BSG_MPI3MR_H_INCLUDED
+
+#include <linux/types.h>
+
+/* Definitions for BSG commands */
+#define MPI3MR_IOCTL_VERSION 0x06
+
+#define MPI3MR_APP_DEFAULT_TIMEOUT (60) /*seconds*/
+
+#define MPI3MR_BSG_ADPTYPE_UNKNOWN 0
+#define MPI3MR_BSG_ADPTYPE_AVGFAMILY 1
+
+#define MPI3MR_BSG_ADPSTATE_UNKNOWN 0
+#define MPI3MR_BSG_ADPSTATE_OPERATIONAL 1
+#define MPI3MR_BSG_ADPSTATE_FAULT 2
+#define MPI3MR_BSG_ADPSTATE_IN_RESET 3
+#define MPI3MR_BSG_ADPSTATE_UNRECOVERABLE 4
+
+#define MPI3MR_BSG_ADPRESET_UNKNOWN 0
+#define MPI3MR_BSG_ADPRESET_SOFT 1
+#define MPI3MR_BSG_ADPRESET_DIAG_FAULT 2
+
+#define MPI3MR_BSG_LOGDATA_MAX_ENTRIES 400
+#define MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ 4
+
+#define MPI3MR_DRVBSG_OPCODE_UNKNOWN 0
+#define MPI3MR_DRVBSG_OPCODE_ADPINFO 1
+#define MPI3MR_DRVBSG_OPCODE_ADPRESET 2
+#define MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO 4
+#define MPI3MR_DRVBSG_OPCODE_GETCHGCNT 5
+#define MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE 6
+#define MPI3MR_DRVBSG_OPCODE_PELENABLE 7
+#define MPI3MR_DRVBSG_OPCODE_GETLOGDATA 8
+#define MPI3MR_DRVBSG_OPCODE_QUERY_HDB 9
+#define MPI3MR_DRVBSG_OPCODE_REPOST_HDB 10
+#define MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB 11
+#define MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS 12
+
+
+#define MPI3MR_BSG_BUFTYPE_UNKNOWN 0
+#define MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD 1
+#define MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP 2
+#define MPI3MR_BSG_BUFTYPE_DATA_IN 3
+#define MPI3MR_BSG_BUFTYPE_DATA_OUT 4
+#define MPI3MR_BSG_BUFTYPE_MPI_REPLY 5
+#define MPI3MR_BSG_BUFTYPE_ERR_RESPONSE 6
+#define MPI3MR_BSG_BUFTYPE_MPI_REQUEST 0xFE
+
+#define MPI3MR_BSG_MPI_REPLY_BUFTYPE_UNKNOWN 0
+#define MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS 1
+#define MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS 2
+
+#define MPI3MR_HDB_BUFTYPE_UNKNOWN 0
+#define MPI3MR_HDB_BUFTYPE_TRACE 1
+#define MPI3MR_HDB_BUFTYPE_FIRMWARE 2
+#define MPI3MR_HDB_BUFTYPE_RESERVED 3
+
+#define MPI3MR_HDB_BUFSTATUS_UNKNOWN 0
+#define MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED 1
+#define MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED 2
+#define MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED 3
+#define MPI3MR_HDB_BUFSTATUS_RELEASED 4
+
+#define MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN 0
+#define MPI3MR_HDB_TRIGGER_TYPE_DIAGFAULT 1
+#define MPI3MR_HDB_TRIGGER_TYPE_ELEMENT 2
+#define MPI3MR_HDB_TRIGGER_TYPE_MASTER 3
+
+
+/* Supported BSG commands */
+enum command {
+ MPI3MR_DRV_CMD = 1,
+ MPI3MR_MPT_CMD = 2,
+};
+
+/**
+ * struct mpi3_driver_info_layout - Information about driver
+ *
+ * @information_length: Length of this structure in bytes
+ * @driver_signature: Driver Vendor name
+ * @os_name: Operating System Name
+ * @driver_name: Driver name
+ * @driver_version: Driver version
+ * @driver_release_date: Driver release date
+ * @driver_capabilities: Driver capabilities
+ */
+struct mpi3_driver_info_layout {
+ __le32 information_length;
+ __u8 driver_signature[12];
+ __u8 os_name[16];
+ __u8 os_version[12];
+ __u8 driver_name[20];
+ __u8 driver_version[32];
+ __u8 driver_release_date[20];
+ __le32 driver_capabilities;
+};
+
+/**
+ * struct mpi3mr_bsg_in_adpinfo - Adapter information request
+ * data returned by the driver.
+ *
+ * @adp_type: Adapter type
+ * @rsvd1: Reserved
+ * @pci_dev_id: PCI device ID of the adapter
+ * @pci_dev_hw_rev: PCI revision of the adapter
+ * @pci_subsys_dev_id: PCI subsystem device ID of the adapter
+ * @pci_subsys_ven_id: PCI subsystem vendor ID of the adapter
+ * @pci_dev: PCI device
+ * @pci_func: PCI function
+ * @pci_bus: PCI bus
+ * @rsvd2: Reserved
+ * @pci_seg_id: PCI segment ID
+ * @app_intfc_ver: version of the application interface definition
+ * @rsvd3: Reserved
+ * @rsvd4: Reserved
+ * @rsvd5: Reserved
+ * @driver_info: Driver Information (Version/Name)
+ */
+struct mpi3mr_bsg_in_adpinfo {
+ __u32 adp_type;
+ __u32 rsvd1;
+ __u32 pci_dev_id;
+ __u32 pci_dev_hw_rev;
+ __u32 pci_subsys_dev_id;
+ __u32 pci_subsys_ven_id;
+ __u32 pci_dev:5;
+ __u32 pci_func:3;
+ __u32 pci_bus:8;
+ __u16 rsvd2;
+ __u32 pci_seg_id;
+ __u32 app_intfc_ver;
+ __u8 adp_state;
+ __u8 rsvd3;
+ __u16 rsvd4;
+ __u32 rsvd5[2];
+ struct mpi3_driver_info_layout driver_info;
+};
+
+/**
+ * struct mpi3mr_bsg_adp_reset - Adapter reset request
+ * payload data to the driver.
+ *
+ * @reset_type: Reset type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_adp_reset {
+ __u8 reset_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+};
+
+/**
+ * struct mpi3mr_change_count - Topology change count
+ * returned by the driver.
+ *
+ * @change_count: Topology change count
+ * @rsvd: Reserved
+ */
+struct mpi3mr_change_count {
+ __u16 change_count;
+ __u16 rsvd;
+};
+
+/**
+ * struct mpi3mr_device_map_info - Target device mapping
+ * information
+ *
+ * @handle: Firmware device handle
+ * @perst_id: Persistent ID assigned by the firmware
+ * @target_id: Target ID assigned by the driver
+ * @bus_id: Bus ID assigned by the driver
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_device_map_info {
+ __u16 handle;
+ __u16 perst_id;
+ __u32 target_id;
+ __u8 bus_id;
+ __u8 rsvd1;
+ __u16 rsvd2;
+};
+
+/**
+ * struct mpi3mr_all_tgt_info - Target device mapping
+ * information returned by the driver
+ *
+ * @num_devices: The number of devices in driver's inventory
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @dmi: Variable length array of mapping information of targets
+ */
+struct mpi3mr_all_tgt_info {
+ __u16 num_devices;
+ __u16 rsvd1;
+ __u32 rsvd2;
+ struct mpi3mr_device_map_info dmi[1];
+};
+
+/**
+ * struct mpi3mr_logdata_enable - Number of log data
+ * entries saved by the driver returned as payload data for
+ * enable logdata BSG request by the driver.
+ *
+ * @max_entries: Number of log data entries cached by the driver
+ * @rsvd: Reserved
+ */
+struct mpi3mr_logdata_enable {
+ __u16 max_entries;
+ __u16 rsvd;
+};
+
+/**
+ * struct mpi3mr_bsg_out_pel_enable - PEL enable request payload
+ * data to the driver.
+ *
+ * @pel_locale: PEL locale to the firmware
+ * @pel_class: PEL class to the firmware
+ * @rsvd: Reserved
+ */
+struct mpi3mr_bsg_out_pel_enable {
+ __u16 pel_locale;
+ __u8 pel_class;
+ __u8 rsvd;
+};
+
+/**
+ * struct mpi3mr_logdata_entry - Log data entry cached by the
+ * driver.
+ *
+ * @valid_entry: Is the entry valid
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @data: Variable length Log entry data
+ */
+struct mpi3mr_logdata_entry {
+ __u8 valid_entry;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u8 data[1]; /* Variable length Array */
+};
+
+/**
+ * struct mpi3mr_bsg_in_log_data - Log data entries saved by
+ * the driver returned as payload data for Get logdata request
+ * by the driver.
+ *
+ * @entry: Variable length Log data entry array
+ */
+struct mpi3mr_bsg_in_log_data {
+ struct mpi3mr_logdata_entry entry[1];
+};
+
+/**
+ * struct mpi3mr_hdb_entry - host diag buffer entry.
+ *
+ * @buf_type: Buffer type
+ * @status: Buffer status
+ * @trigger_type: Trigger type
+ * @rsvd1: Reserved
+ * @size: Buffer size
+ * @rsvd2: Reserved
+ * @trigger_data: Trigger specific data
+ * @rsvd3: Reserved
+ * @rsvd4: Reserved
+ */
+struct mpi3mr_hdb_entry {
+ __u8 buf_type;
+ __u8 status;
+ __u8 trigger_type;
+ __u8 rsvd1;
+ __u16 size;
+ __u16 rsvd2;
+ __u64 trigger_data;
+ __u32 rsvd3;
+ __u32 rsvd4;
+};
+
+
+/**
+ * struct mpi3mr_bsg_in_hdb_status - This structure contains
+ * return data for the BSG request to retrieve the number of host
+ * diagnostic buffers supported by the driver and their current
+ * status and additional status specific data if any in forms of
+ * multiple hdb entries.
+ *
+ * @num_hdb_types: Number of host diag buffer types supported
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @rsvd3: Reserved
+ * @entry: Variable length Diag buffer status entry array
+ */
+struct mpi3mr_bsg_in_hdb_status {
+ __u8 num_hdb_types;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 rsvd3;
+ struct mpi3mr_hdb_entry entry[1];
+};
+
+/**
+ * struct mpi3mr_bsg_out_repost_hdb - Repost host diagnostic
+ * buffer request payload data to the driver.
+ *
+ * @buf_type: Buffer type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_out_repost_hdb {
+ __u8 buf_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+};
+
+/**
+ * struct mpi3mr_bsg_out_upload_hdb - Upload host diagnostic
+ * buffer request payload data to the driver.
+ *
+ * @buf_type: Buffer type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @start_offset: Start offset of the buffer from where to copy
+ * @length: Length of the buffer to copy
+ */
+struct mpi3mr_bsg_out_upload_hdb {
+ __u8 buf_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 start_offset;
+ __u32 length;
+};
+
+/**
+ * struct mpi3mr_bsg_out_refresh_hdb_triggers - Refresh host
+ * diagnostic buffer triggers request payload data to the driver.
+ *
+ * @page_type: Page type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_out_refresh_hdb_triggers {
+ __u8 page_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+};
+/**
+ * struct mpi3mr_bsg_drv_cmd - Generic bsg data
+ * structure for all driver specific requests.
+ *
+ * @mrioc_id: Controller ID
+ * @opcode: Driver specific opcode
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_drv_cmd {
+ __u8 mrioc_id;
+ __u8 opcode;
+ __u16 rsvd1;
+ __u32 rsvd2[4];
+};
+/**
+ * struct mpi3mr_bsg_in_reply_buf - MPI reply buffer returned
+ * for MPI Passthrough request .
+ *
+ * @mpi_reply_type: Type of MPI reply
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @reply_buf: Variable Length buffer based on mpirep type
+ */
+struct mpi3mr_bsg_in_reply_buf {
+ __u8 mpi_reply_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u8 reply_buf[1];
+};
+
+/**
+ * struct mpi3mr_buf_entry - User buffer descriptor for MPI
+ * Passthrough requests.
+ *
+ * @buf_type: Buffer type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @buf_len: Buffer length
+ */
+struct mpi3mr_buf_entry {
+ __u8 buf_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 buf_len;
+};
+/**
+ * struct mpi3mr_bsg_buf_entry_list - list of user buffer
+ * descriptor for MPI Passthrough requests.
+ *
+ * @num_of_entries: Number of buffer descriptors
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @rsvd3: Reserved
+ * @buf_entry: Variable length array of buffer descriptors
+ */
+struct mpi3mr_buf_entry_list {
+ __u8 num_of_entries;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 rsvd3;
+ struct mpi3mr_buf_entry buf_entry[1];
+};
+/**
+ * struct mpi3mr_bsg_mptcmd - Generic bsg data
+ * structure for all MPI Passthrough requests .
+ *
+ * @mrioc_id: Controller ID
+ * @rsvd1: Reserved
+ * @timeout: MPI request timeout
+ * @buf_entry_list: Buffer descriptor list
+ */
+struct mpi3mr_bsg_mptcmd {
+ __u8 mrioc_id;
+ __u8 rsvd1;
+ __u16 timeout;
+ __u32 rsvd2;
+ struct mpi3mr_buf_entry_list buf_entry_list;
+};
+
+/**
+ * struct mpi3mr_bsg_packet - Generic bsg data
+ * structure for all supported requests .
+ *
+ * @cmd_type: represents drvrcmd or mptcmd
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @drvrcmd: driver request structure
+ * @mptcmd: mpt request structure
+ */
+struct mpi3mr_bsg_packet {
+ __u8 cmd_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+ __u32 rsvd3;
+ union {
+ struct mpi3mr_bsg_drv_cmd drvrcmd;
+ struct mpi3mr_bsg_mptcmd mptcmd;
+ } cmd;
+};
+
+struct mpi3_nvme_encapsulated_request {
+ __le16 host_tag;
+ __u8 ioc_use_only02;
+ __u8 function;
+ __le16 ioc_use_only04;
+ __u8 ioc_use_only06;
+ __u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le16 encapsulated_command_length;
+ __le16 flags;
+ __le32 data_length;
+ __le32 reserved14[3];
+ __le32 command[];
+};
+
+struct mpi3_nvme_encapsulated_error_reply {
+ __le16 host_tag;
+ __u8 ioc_use_only02;
+ __u8 function;
+ __le16 ioc_use_only04;
+ __u8 ioc_use_only06;
+ __u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 nvme_completion_entry[4];
+};
+
+#define MPI3MR_NVME_PRP_SIZE 8 /* PRP size */
+#define MPI3MR_NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
+#define MPI3MR_NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
+#define MPI3MR_NVME_CMD_SGL_OFFSET 24 /* SGL offset in NVMe cmd */
+#define MPI3MR_NVME_DATA_FORMAT_PRP 0
+#define MPI3MR_NVME_DATA_FORMAT_SGL1 1
+#define MPI3MR_NVME_DATA_FORMAT_SGL2 2
+#define MPI3MR_NVMESGL_DATA_SEGMENT 0x00
+#define MPI3MR_NVMESGL_LAST_SEGMENT 0x03
+
+/* MPI3: task management related definitions */
+struct mpi3_scsi_task_mgmt_request {
+ __le16 host_tag;
+ __u8 ioc_use_only02;
+ __u8 function;
+ __le16 ioc_use_only04;
+ __u8 ioc_use_only06;
+ __u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le16 task_host_tag;
+ __u8 task_type;
+ __u8 reserved0f;
+ __le16 task_request_queue_id;
+ __le16 reserved12;
+ __le32 reserved14;
+ __u8 lun[8];
+};
+
+#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET (0x02)
+#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET (0x09)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT (0x0a)
+#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET (0x0b)
+struct mpi3_scsi_task_mgmt_reply {
+ __le16 host_tag;
+ __u8 ioc_use_only02;
+ __u8 function;
+ __le16 ioc_use_only04;
+ __u8 ioc_use_only06;
+ __u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 termination_count;
+ __le32 response_data;
+ __le32 reserved18;
+};
+
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE (0x00)
+#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME (0x02)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED (0x04)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED (0x05)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED (0x08)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN (0x09)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG (0x0a)
+#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED (0x81)
+
+/* MPI3: PEL related definitions */
+#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
+#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
+#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
+#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040)
+#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020)
+#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010)
+#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008)
+#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004)
+#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002)
+#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001)
+#define MPI3_PEL_CLASS_DEBUG (0x00)
+#define MPI3_PEL_CLASS_PROGRESS (0x01)
+#define MPI3_PEL_CLASS_INFORMATIONAL (0x02)
+#define MPI3_PEL_CLASS_WARNING (0x03)
+#define MPI3_PEL_CLASS_CRITICAL (0x04)
+#define MPI3_PEL_CLASS_FATAL (0x05)
+#define MPI3_PEL_CLASS_FAULT (0x06)
+
+/* MPI3: Function definitions */
+#define MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH (0x0a)
+#define MPI3_BSG_FUNCTION_SCSI_IO (0x20)
+#define MPI3_BSG_FUNCTION_SCSI_TASK_MGMT (0x21)
+#define MPI3_BSG_FUNCTION_SMP_PASSTHROUGH (0x22)
+#define MPI3_BSG_FUNCTION_NVME_ENCAPSULATED (0x24)
+
+#endif
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index d55f2176dfd4..03f2beadf201 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -8,26 +8,89 @@
#ifndef SCSI_BSG_UFS_H
#define SCSI_BSG_UFS_H
+#include <asm/byteorder.h>
#include <linux/types.h>
/*
* This file intended to be included by both kernel and user space
*/
#define UFS_CDB_SIZE 16
-#define UPIU_TRANSACTION_UIC_CMD 0x1F
/* uic commands are 4DW long, per UFSHCI V2.1 paragraph 5.6.1 */
#define UIC_CMD_SIZE (sizeof(__u32) * 4)
+enum ufs_bsg_msg_code {
+ UPIU_TRANSACTION_UIC_CMD = 0x1F,
+ UPIU_TRANSACTION_ARPMB_CMD,
+};
+
+/* UFS RPMB Request Message Types */
+enum ufs_rpmb_op_type {
+ UFS_RPMB_WRITE_KEY = 0x01,
+ UFS_RPMB_READ_CNT = 0x02,
+ UFS_RPMB_WRITE = 0x03,
+ UFS_RPMB_READ = 0x04,
+ UFS_RPMB_READ_RESP = 0x05,
+ UFS_RPMB_SEC_CONF_WRITE = 0x06,
+ UFS_RPMB_SEC_CONF_READ = 0x07,
+ UFS_RPMB_PURGE_ENABLE = 0x08,
+ UFS_RPMB_PURGE_STATUS_READ = 0x09,
+};
+
/**
* struct utp_upiu_header - UPIU header structure
* @dword_0: UPIU header DW-0
* @dword_1: UPIU header DW-1
* @dword_2: UPIU header DW-2
+ *
+ * @transaction_code: Type of request or response. See also enum
+ * upiu_request_transaction and enum upiu_response_transaction.
+ * @flags: UPIU flags. The meaning of individual flags depends on the
+ * transaction code.
+ * @lun: Logical unit number.
+ * @task_tag: Task tag.
+ * @iid: Initiator ID.
+ * @command_set_type: 0 for SCSI command set; 1 for UFS specific.
+ * @tm_function: Task management function in case of a task management request
+ * UPIU.
+ * @query_function: Query function in case of a query request UPIU.
+ * @response: 0 for success; 1 for failure.
+ * @status: SCSI status if this is the header of a response to a SCSI command.
+ * @ehs_length: EHS length in units of 32 bytes.
+ * @device_information:
+ * @data_segment_length: data segment length.
*/
struct utp_upiu_header {
- __be32 dword_0;
- __be32 dword_1;
- __be32 dword_2;
+ union {
+ struct {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+ };
+ struct {
+ __u8 transaction_code;
+ __u8 flags;
+ __u8 lun;
+ __u8 task_tag;
+#if defined(__BIG_ENDIAN)
+ __u8 iid: 4;
+ __u8 command_set_type: 4;
+#elif defined(__LITTLE_ENDIAN)
+ __u8 command_set_type: 4;
+ __u8 iid: 4;
+#else
+#error
+#endif
+ union {
+ __u8 tm_function;
+ __u8 query_function;
+ } __attribute__((packed));
+ __u8 response;
+ __u8 status;
+ __u8 ehs_length;
+ __u8 device_information;
+ __be16 data_segment_length;
+ };
+ };
};
/**
@@ -54,6 +117,31 @@ struct utp_upiu_query {
};
/**
+ * struct utp_upiu_query_v4_0 - upiu request buffer structure for
+ * query request >= UFS 4.0 spec.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @osf4: spec field B-5
+ * @osf5: spec field B 6,7
+ * @osf6: spec field DW 8,9
+ * @osf7: spec field DW 10,11
+ */
+struct utp_upiu_query_v4_0 {
+ __u8 opcode;
+ __u8 idn;
+ __u8 index;
+ __u8 selector;
+ __u8 osf3;
+ __u8 osf4;
+ __be16 osf5;
+ __be32 osf6;
+ __be32 osf7;
+ __be32 reserved;
+};
+
+/**
* struct utp_upiu_cmd - Command UPIU structure
* @data_transfer_len: Data Transfer Length DW-3
* @cdb: Command Descriptor Block CDB DW-4 to DW-7
@@ -79,6 +167,23 @@ struct utp_upiu_req {
};
};
+struct ufs_arpmb_meta {
+ __be16 req_resp_type;
+ __u8 nonce[16];
+ __be32 write_counter;
+ __be16 addr_lun;
+ __be16 block_count;
+ __be16 result;
+} __attribute__((__packed__));
+
+struct ufs_ehs {
+ __u8 length;
+ __u8 ehs_type;
+ __be16 ehssub_type;
+ struct ufs_arpmb_meta meta;
+ __u8 mac_key[32];
+} __attribute__((__packed__));
+
/* request (CDB) structure of the sg_io_v4 */
struct ufs_bsg_request {
__u32 msgcode;
@@ -95,11 +200,21 @@ struct ufs_bsg_reply {
* msg and status fields. The per-msgcode reply structure
* will contain valid data.
*/
- __u32 result;
+ int result;
/* If there was reply_payload, how much was received? */
__u32 reply_payload_rcv_len;
struct utp_upiu_req upiu_rsp;
};
+
+struct ufs_rpmb_request {
+ struct ufs_bsg_request bsg_request;
+ struct ufs_ehs ehs_req;
+};
+
+struct ufs_rpmb_reply {
+ struct ufs_bsg_reply bsg_reply;
+ struct ufs_ehs ehs_rsp;
+};
#endif /* UFS_BSG_H */
diff --git a/include/uapi/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h
index 7535253f1a96..b46e9cbeb001 100644
--- a/include/uapi/scsi/scsi_netlink_fc.h
+++ b/include/uapi/scsi/scsi_netlink_fc.h
@@ -35,7 +35,7 @@
* FC Transport Broadcast Event Message :
* FC_NL_ASYNC_EVENT
*
- * Note: if Vendor Unique message, &event_data will be start of
+ * Note: if Vendor Unique message, event_data_flex will be start of
* vendor unique payload, and the length of the payload is
* per event_datalen
*
@@ -50,7 +50,10 @@ struct fc_nl_event {
__u16 event_datalen;
__u32 event_num;
__u32 event_code;
- __u32 event_data;
+ union {
+ __u32 event_data;
+ __DECLARE_FLEX_ARRAY(__u8, event_data_flex);
+ };
} __attribute__((aligned(sizeof(__u64))));
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index a75e14edc957..c85fdd8895d8 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -3,22 +3,6 @@
* Main header file for the ALSA sequencer
* Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
* (c) 1998-1999 by Jaroslav Kysela <perex@perex.cz>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_ASEQUENCER_H
#define _UAPI__SOUND_ASEQUENCER_H
@@ -26,7 +10,7 @@
#include <sound/asound.h>
/** version of the sequencer */
-#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 2)
+#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 3)
/**
* definition of sequencer event types
@@ -190,6 +174,7 @@ struct snd_seq_connect {
#define SNDRV_SEQ_PRIORITY_HIGH (1<<4) /* event should be processed before others */
#define SNDRV_SEQ_PRIORITY_MASK (1<<4)
+#define SNDRV_SEQ_EVENT_UMP (1<<5) /* event holds a UMP packet */
/* note event */
struct snd_seq_ev_note {
@@ -222,7 +207,7 @@ struct snd_seq_ev_raw32 {
struct snd_seq_ev_ext {
unsigned int len; /* length of data */
void *ptr; /* pointer to data (note: maybe 64-bit) */
-} __attribute__((packed));
+} __packed;
struct snd_seq_result {
int event; /* processed event type */
@@ -266,8 +251,21 @@ struct snd_seq_ev_quote {
struct snd_seq_addr origin; /* original sender */
unsigned short value; /* optional data */
struct snd_seq_event *event; /* quoted event */
-} __attribute__((packed));
-
+} __packed;
+
+union snd_seq_event_data { /* event data... */
+ struct snd_seq_ev_note note;
+ struct snd_seq_ev_ctrl control;
+ struct snd_seq_ev_raw8 raw8;
+ struct snd_seq_ev_raw32 raw32;
+ struct snd_seq_ev_ext ext;
+ struct snd_seq_ev_queue_control queue;
+ union snd_seq_timestamp time;
+ struct snd_seq_addr addr;
+ struct snd_seq_connect connect;
+ struct snd_seq_result result;
+ struct snd_seq_ev_quote quote;
+};
/* sequencer event */
struct snd_seq_event {
@@ -278,25 +276,27 @@ struct snd_seq_event {
unsigned char queue; /* schedule queue */
union snd_seq_timestamp time; /* schedule time */
-
struct snd_seq_addr source; /* source address */
struct snd_seq_addr dest; /* destination address */
- union { /* event data... */
- struct snd_seq_ev_note note;
- struct snd_seq_ev_ctrl control;
- struct snd_seq_ev_raw8 raw8;
- struct snd_seq_ev_raw32 raw32;
- struct snd_seq_ev_ext ext;
- struct snd_seq_ev_queue_control queue;
- union snd_seq_timestamp time;
- struct snd_seq_addr addr;
- struct snd_seq_connect connect;
- struct snd_seq_result result;
- struct snd_seq_ev_quote quote;
- } data;
+ union snd_seq_event_data data;
};
+ /* (compatible) event for UMP-capable clients */
+struct snd_seq_ump_event {
+ snd_seq_event_type_t type; /* event type */
+ unsigned char flags; /* event flags */
+ char tag;
+ unsigned char queue; /* schedule queue */
+ union snd_seq_timestamp time; /* schedule time */
+ struct snd_seq_addr source; /* source address */
+ struct snd_seq_addr dest; /* destination address */
+
+ union {
+ union snd_seq_event_data data;
+ unsigned int ump[4];
+ };
+};
/*
* bounce event - stored as variable size data
@@ -344,10 +344,11 @@ typedef int __bitwise snd_seq_client_type_t;
#define KERNEL_CLIENT ((__force snd_seq_client_type_t) 2)
/* event filter flags */
-#define SNDRV_SEQ_FILTER_BROADCAST (1<<0) /* accept broadcast messages */
-#define SNDRV_SEQ_FILTER_MULTICAST (1<<1) /* accept multicast messages */
-#define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */
-#define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */
+#define SNDRV_SEQ_FILTER_BROADCAST (1U<<0) /* accept broadcast messages */
+#define SNDRV_SEQ_FILTER_MULTICAST (1U<<1) /* accept multicast messages */
+#define SNDRV_SEQ_FILTER_BOUNCE (1U<<2) /* accept bounce event in error */
+#define SNDRV_SEQ_FILTER_NO_CONVERT (1U<<30) /* don't convert UMP events */
+#define SNDRV_SEQ_FILTER_USE_EVENT (1U<<31) /* use event filter */
struct snd_seq_client_info {
int client; /* client number to inquire */
@@ -360,9 +361,18 @@ struct snd_seq_client_info {
int event_lost; /* number of lost events */
int card; /* RO: card number[kernel] */
int pid; /* RO: pid[user] */
- char reserved[56]; /* for future use */
+ unsigned int midi_version; /* MIDI version */
+ unsigned int group_filter; /* UMP group filter bitmap
+ * (bit 0 = groupless messages,
+ * bit 1-16 = messages for groups 1-16)
+ */
+ char reserved[48]; /* for future use */
};
+/* MIDI version numbers in client info */
+#define SNDRV_SEQ_CLIENT_LEGACY_MIDI 0 /* Legacy client */
+#define SNDRV_SEQ_CLIENT_UMP_MIDI_1_0 1 /* UMP MIDI 1.0 */
+#define SNDRV_SEQ_CLIENT_UMP_MIDI_2_0 2 /* UMP MIDI 2.0 */
/* client pool size */
struct snd_seq_client_pool {
@@ -422,6 +432,8 @@ struct snd_seq_remove_events {
#define SNDRV_SEQ_PORT_CAP_SUBS_READ (1<<5) /* allow read subscription */
#define SNDRV_SEQ_PORT_CAP_SUBS_WRITE (1<<6) /* allow write subscription */
#define SNDRV_SEQ_PORT_CAP_NO_EXPORT (1<<7) /* routing not allowed */
+#define SNDRV_SEQ_PORT_CAP_INACTIVE (1<<8) /* inactive port */
+#define SNDRV_SEQ_PORT_CAP_UMP_ENDPOINT (1<<9) /* MIDI 2.0 UMP Endpoint port */
/* port type */
#define SNDRV_SEQ_PORT_TYPE_SPECIFIC (1<<0) /* hardware specific */
@@ -431,6 +443,7 @@ struct snd_seq_remove_events {
#define SNDRV_SEQ_PORT_TYPE_MIDI_XG (1<<4) /* XG compatible device */
#define SNDRV_SEQ_PORT_TYPE_MIDI_MT32 (1<<5) /* MT-32 compatible device */
#define SNDRV_SEQ_PORT_TYPE_MIDI_GM2 (1<<6) /* General MIDI 2 compatible device */
+#define SNDRV_SEQ_PORT_TYPE_MIDI_UMP (1<<7) /* UMP */
/* other standards...*/
#define SNDRV_SEQ_PORT_TYPE_SYNTH (1<<10) /* Synth device (no MIDI compatible - direct wavetable) */
@@ -448,6 +461,12 @@ struct snd_seq_remove_events {
#define SNDRV_SEQ_PORT_FLG_TIMESTAMP (1<<1)
#define SNDRV_SEQ_PORT_FLG_TIME_REAL (1<<2)
+/* port direction */
+#define SNDRV_SEQ_PORT_DIR_UNKNOWN 0
+#define SNDRV_SEQ_PORT_DIR_INPUT 1
+#define SNDRV_SEQ_PORT_DIR_OUTPUT 2
+#define SNDRV_SEQ_PORT_DIR_BIDIRECTION 3
+
struct snd_seq_port_info {
struct snd_seq_addr addr; /* client/port numbers */
char name[64]; /* port name */
@@ -464,7 +483,9 @@ struct snd_seq_port_info {
void *kernel; /* reserved for kernel use (must be NULL) */
unsigned int flags; /* misc. conditioning */
unsigned char time_queue; /* queue # for timestamping */
- char reserved[59]; /* for future use */
+ unsigned char direction; /* port usage direction (r/w/bidir) */
+ unsigned char ump_group; /* 0 = UMP EP (no conversion), 1-16 = UMP group number */
+ char reserved[57]; /* for future use */
};
@@ -568,6 +589,18 @@ struct snd_seq_query_subs {
char reserved[64]; /* for future use */
};
+/*
+ * UMP-specific information
+ */
+/* type of UMP info query */
+#define SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT 0
+#define SNDRV_SEQ_CLIENT_UMP_INFO_BLOCK 1
+
+struct snd_seq_client_ump_info {
+ int client; /* client number to inquire/set */
+ int type; /* type to inquire/set */
+ unsigned char info[512]; /* info (either UMP ep or block info) */
+} __packed;
/*
* IOCTL commands
@@ -577,9 +610,12 @@ struct snd_seq_query_subs {
#define SNDRV_SEQ_IOCTL_CLIENT_ID _IOR ('S', 0x01, int)
#define SNDRV_SEQ_IOCTL_SYSTEM_INFO _IOWR('S', 0x02, struct snd_seq_system_info)
#define SNDRV_SEQ_IOCTL_RUNNING_MODE _IOWR('S', 0x03, struct snd_seq_running_info)
+#define SNDRV_SEQ_IOCTL_USER_PVERSION _IOW('S', 0x04, int)
#define SNDRV_SEQ_IOCTL_GET_CLIENT_INFO _IOWR('S', 0x10, struct snd_seq_client_info)
#define SNDRV_SEQ_IOCTL_SET_CLIENT_INFO _IOW ('S', 0x11, struct snd_seq_client_info)
+#define SNDRV_SEQ_IOCTL_GET_CLIENT_UMP_INFO _IOWR('S', 0x12, struct snd_seq_client_ump_info)
+#define SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO _IOWR('S', 0x13, struct snd_seq_client_ump_info)
#define SNDRV_SEQ_IOCTL_CREATE_PORT _IOWR('S', 0x20, struct snd_seq_port_info)
#define SNDRV_SEQ_IOCTL_DELETE_PORT _IOW ('S', 0x21, struct snd_seq_port_info)
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index a74ca232f1fc..10851bca7174 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -5,10 +5,6 @@
* Copyright (C) 2012 Texas Instruments Inc.
* Copyright (C) 2015 Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
* algorithms, equalisers, DAIs, widgets etc.
*/
@@ -170,16 +166,22 @@
#define SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP (1 << 3)
/* DAI topology BCLK parameter
- * For the backwards capability, by default codec is bclk master
+ * For the backwards capability, by default codec is bclk provider
*/
-#define SND_SOC_TPLG_BCLK_CM 0 /* codec is bclk master */
-#define SND_SOC_TPLG_BCLK_CS 1 /* codec is bclk slave */
+#define SND_SOC_TPLG_BCLK_CP 0 /* codec is bclk provider */
+#define SND_SOC_TPLG_BCLK_CC 1 /* codec is bclk consumer */
+/* keep previous definitions for compatibility */
+#define SND_SOC_TPLG_BCLK_CM SND_SOC_TPLG_BCLK_CP
+#define SND_SOC_TPLG_BCLK_CS SND_SOC_TPLG_BCLK_CC
/* DAI topology FSYNC parameter
- * For the backwards capability, by default codec is fsync master
+ * For the backwards capability, by default codec is fsync provider
*/
-#define SND_SOC_TPLG_FSYNC_CM 0 /* codec is fsync master */
-#define SND_SOC_TPLG_FSYNC_CS 1 /* codec is fsync slave */
+#define SND_SOC_TPLG_FSYNC_CP 0 /* codec is fsync provider */
+#define SND_SOC_TPLG_FSYNC_CC 1 /* codec is fsync consumer */
+/* keep previous definitions for compatibility */
+#define SND_SOC_TPLG_FSYNC_CM SND_SOC_TPLG_FSYNC_CP
+#define SND_SOC_TPLG_FSYNC_CS SND_SOC_TPLG_FSYNC_CC
/*
* Block Header.
@@ -220,9 +222,9 @@ struct snd_soc_tplg_vendor_array {
__le32 type; /* SND_SOC_TPLG_TUPLE_TYPE_ */
__le32 num_elems; /* number of elements in array */
union {
- struct snd_soc_tplg_vendor_uuid_elem uuid[0];
- struct snd_soc_tplg_vendor_value_elem value[0];
- struct snd_soc_tplg_vendor_string_elem string[0];
+ __DECLARE_FLEX_ARRAY(struct snd_soc_tplg_vendor_uuid_elem, uuid);
+ __DECLARE_FLEX_ARRAY(struct snd_soc_tplg_vendor_value_elem, value);
+ __DECLARE_FLEX_ARRAY(struct snd_soc_tplg_vendor_string_elem, string);
};
} __attribute__((packed));
@@ -234,8 +236,8 @@ struct snd_soc_tplg_vendor_array {
struct snd_soc_tplg_private {
__le32 size; /* in bytes of private data */
union {
- char data[0];
- struct snd_soc_tplg_vendor_array array[0];
+ __DECLARE_FLEX_ARRAY(char, data);
+ __DECLARE_FLEX_ARRAY(struct snd_soc_tplg_vendor_array, array);
};
} __attribute__((packed));
@@ -336,8 +338,8 @@ struct snd_soc_tplg_hw_config {
__u8 clock_gated; /* SND_SOC_TPLG_DAI_CLK_GATE_ value */
__u8 invert_bclk; /* 1 for inverted BCLK, 0 for normal */
__u8 invert_fsync; /* 1 for inverted frame clock, 0 for normal */
- __u8 bclk_master; /* SND_SOC_TPLG_BCLK_ value */
- __u8 fsync_master; /* SND_SOC_TPLG_FSYNC_ value */
+ __u8 bclk_provider; /* SND_SOC_TPLG_BCLK_ value */
+ __u8 fsync_provider; /* SND_SOC_TPLG_FSYNC_ value */
__u8 mclk_direction; /* SND_SOC_TPLG_MCLK_ value */
__le16 reserved; /* for 32bit alignment */
__le32 mclk_rate; /* MCLK or SYSCLK freqency in Hz */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 535a7229e1d9..628d46a0da92 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -3,22 +3,6 @@
* Advanced Linux Sound Architecture - ALSA - Driver
* Copyright (c) 1994-2003 by Jaroslav Kysela <perex@perex.cz>,
* Abramo Bagnara <abramo@alsa-project.org>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_ASOUND_H
@@ -56,8 +40,10 @@
* *
****************************************************************************/
+#define AES_IEC958_STATUS_SIZE 24
+
struct snd_aes_iec958 {
- unsigned char status[24]; /* AES/IEC958 channel status bits */
+ unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */
unsigned char subcode[147]; /* AES/IEC958 subcode bits */
unsigned char pad; /* nothing */
unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
@@ -156,7 +142,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 15)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 17)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -202,6 +188,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
+/*
+ * For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+ * available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+ * `right-padding` sample which has less width than 32 bit.
+ */
#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
@@ -276,13 +267,17 @@ typedef int __bitwise snd_pcm_format_t;
typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_SUBFORMAT_STD ((__force snd_pcm_subformat_t) 0)
-#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_STD
+#define SNDRV_PCM_SUBFORMAT_MSBITS_MAX ((__force snd_pcm_subformat_t) 1)
+#define SNDRV_PCM_SUBFORMAT_MSBITS_20 ((__force snd_pcm_subformat_t) 2)
+#define SNDRV_PCM_SUBFORMAT_MSBITS_24 ((__force snd_pcm_subformat_t) 3)
+#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_MSBITS_24
#define SNDRV_PCM_INFO_MMAP 0x00000001 /* hardware supports mmap */
#define SNDRV_PCM_INFO_MMAP_VALID 0x00000002 /* period data are valid during transfer */
#define SNDRV_PCM_INFO_DOUBLE 0x00000004 /* Double buffering needed for PCM start/stop */
#define SNDRV_PCM_INFO_BATCH 0x00000010 /* double buffering */
#define SNDRV_PCM_INFO_SYNC_APPLPTR 0x00000020 /* need the explicit sync of appl_ptr update */
+#define SNDRV_PCM_INFO_PERFECT_DRAIN 0x00000040 /* silencing at the end of stream is not required */
#define SNDRV_PCM_INFO_INTERLEAVED 0x00000100 /* channels are interleaved */
#define SNDRV_PCM_INFO_NONINTERLEAVED 0x00000200 /* channels are not interleaved */
#define SNDRV_PCM_INFO_COMPLEX 0x00000400 /* complex frame organization (mmap only) */
@@ -299,7 +294,8 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */
#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
-
+#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */
+#define SNDRV_PCM_INFO_NO_REWINDS 0x20000000 /* hardware can only support monotonic changes of appl_ptr */
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
@@ -391,6 +387,9 @@ typedef int snd_pcm_hw_param_t;
#define SNDRV_PCM_HW_PARAMS_NORESAMPLE (1<<0) /* avoid rate resampling */
#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER (1<<1) /* export buffer */
#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP (1<<2) /* disable period wakeups */
+#define SNDRV_PCM_HW_PARAMS_NO_DRAIN_SILENCE (1<<3) /* suppress drain with the filling
+ * of the silence samples
+ */
struct snd_interval {
unsigned int min, max;
@@ -417,7 +416,7 @@ struct snd_pcm_hw_params {
unsigned int rmask; /* W: requested masks */
unsigned int cmask; /* R: changed masks */
unsigned int info; /* R: Info flags for returned setup */
- unsigned int msbits; /* R: used most significant bits */
+ unsigned int msbits; /* R: used most significant bits (in sample bit-width) */
unsigned int rate_num; /* R: rate numerator */
unsigned int rate_den; /* R: rate denominator */
snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */
@@ -437,9 +436,14 @@ struct snd_pcm_sw_params {
snd_pcm_uframes_t avail_min; /* min avail frames for wakeup */
snd_pcm_uframes_t xfer_align; /* obsolete: xfer size need to be a multiple */
snd_pcm_uframes_t start_threshold; /* min hw_avail frames for automatic start */
- snd_pcm_uframes_t stop_threshold; /* min avail frames for automatic stop */
- snd_pcm_uframes_t silence_threshold; /* min distance from noise for silence filling */
- snd_pcm_uframes_t silence_size; /* silence block size */
+ /*
+ * The following two thresholds alleviate playback buffer underruns; when
+ * hw_avail drops below the threshold, the respective action is triggered:
+ */
+ snd_pcm_uframes_t stop_threshold; /* - stop playback */
+ snd_pcm_uframes_t silence_threshold; /* - pre-fill buffer with silence */
+ snd_pcm_uframes_t silence_size; /* max size of silence pre-fill; when >= boundary,
+ * fill played area with silence immediately */
snd_pcm_uframes_t boundary; /* pointers wrap point */
unsigned int proto; /* protocol version */
unsigned int tstamp_type; /* timestamp type (req. proto >= 2.0.12) */
@@ -578,7 +582,8 @@ struct __snd_pcm_mmap_status64 {
struct __snd_pcm_mmap_control64 {
__pad_before_uframe __pad1;
snd_pcm_uframes_t appl_ptr; /* RW: appl ptr (0...boundary-1) */
- __pad_before_uframe __pad2;
+ __pad_before_uframe __pad2; // This should be __pad_after_uframe, but binary
+ // backwards compatibility constraints prevent a fix.
__pad_before_uframe __pad3;
snd_pcm_uframes_t avail_min; /* RW: min available frames for wakeup */
@@ -710,7 +715,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 4)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -721,6 +726,7 @@ enum {
#define SNDRV_RAWMIDI_INFO_OUTPUT 0x00000001
#define SNDRV_RAWMIDI_INFO_INPUT 0x00000002
#define SNDRV_RAWMIDI_INFO_DUPLEX 0x00000004
+#define SNDRV_RAWMIDI_INFO_UMP 0x00000008
struct snd_rawmidi_info {
unsigned int device; /* RO/WR (control): device number */
@@ -736,12 +742,38 @@ struct snd_rawmidi_info {
unsigned char reserved[64]; /* reserved for future use */
};
+#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_SHIFT 0
+#define SNDRV_RAWMIDI_MODE_FRAMING_NONE (0<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP (1<<0)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MASK (7<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_SHIFT 3
+#define SNDRV_RAWMIDI_MODE_CLOCK_NONE (0<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_REALTIME (1<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC (2<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW (3<<3)
+
+#define SNDRV_RAWMIDI_FRAMING_DATA_LENGTH 16
+
+struct snd_rawmidi_framing_tstamp {
+ /* For now, frame_type is always 0. Midi 2.0 is expected to add new
+ * types here. Applications are expected to skip unknown frame types.
+ */
+ __u8 frame_type;
+ __u8 length; /* number of valid bytes in data field */
+ __u8 reserved[2];
+ __u32 tv_nsec; /* nanoseconds */
+ __u64 tv_sec; /* seconds */
+ __u8 data[SNDRV_RAWMIDI_FRAMING_DATA_LENGTH];
+} __packed;
+
struct snd_rawmidi_params {
int stream;
size_t buffer_size; /* queue size in bytes */
size_t avail_min; /* minimum avail bytes for wakeup */
unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */
- unsigned char reserved[16]; /* reserved for future use */
+ unsigned int mode; /* For input data only, frame incoming data */
+ unsigned char reserved[12]; /* reserved for future use */
};
#ifndef __KERNEL__
@@ -755,12 +787,82 @@ struct snd_rawmidi_status {
};
#endif
+/* UMP EP info flags */
+#define SNDRV_UMP_EP_INFO_STATIC_BLOCKS 0x01
+
+/* UMP EP Protocol / JRTS capability bits */
+#define SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK 0x0300
+#define SNDRV_UMP_EP_INFO_PROTO_MIDI1 0x0100 /* MIDI 1.0 */
+#define SNDRV_UMP_EP_INFO_PROTO_MIDI2 0x0200 /* MIDI 2.0 */
+#define SNDRV_UMP_EP_INFO_PROTO_JRTS_MASK 0x0003
+#define SNDRV_UMP_EP_INFO_PROTO_JRTS_TX 0x0001 /* JRTS Transmit */
+#define SNDRV_UMP_EP_INFO_PROTO_JRTS_RX 0x0002 /* JRTS Receive */
+
+/* UMP Endpoint information */
+struct snd_ump_endpoint_info {
+ int card; /* card number */
+ int device; /* device number */
+ unsigned int flags; /* additional info */
+ unsigned int protocol_caps; /* protocol capabilities */
+ unsigned int protocol; /* current protocol */
+ unsigned int num_blocks; /* # of function blocks */
+ unsigned short version; /* UMP major/minor version */
+ unsigned short family_id; /* MIDI device family ID */
+ unsigned short model_id; /* MIDI family model ID */
+ unsigned int manufacturer_id; /* MIDI manufacturer ID */
+ unsigned char sw_revision[4]; /* software revision */
+ unsigned short padding;
+ unsigned char name[128]; /* endpoint name string */
+ unsigned char product_id[128]; /* unique product id string */
+ unsigned char reserved[32];
+} __packed;
+
+/* UMP direction */
+#define SNDRV_UMP_DIR_INPUT 0x01
+#define SNDRV_UMP_DIR_OUTPUT 0x02
+#define SNDRV_UMP_DIR_BIDIRECTION 0x03
+
+/* UMP block info flags */
+#define SNDRV_UMP_BLOCK_IS_MIDI1 (1U << 0) /* MIDI 1.0 port w/o restrict */
+#define SNDRV_UMP_BLOCK_IS_LOWSPEED (1U << 1) /* 31.25Kbps B/W MIDI1 port */
+
+/* UMP block user-interface hint */
+#define SNDRV_UMP_BLOCK_UI_HINT_UNKNOWN 0x00
+#define SNDRV_UMP_BLOCK_UI_HINT_RECEIVER 0x01
+#define SNDRV_UMP_BLOCK_UI_HINT_SENDER 0x02
+#define SNDRV_UMP_BLOCK_UI_HINT_BOTH 0x03
+
+/* UMP groups and blocks */
+#define SNDRV_UMP_MAX_GROUPS 16
+#define SNDRV_UMP_MAX_BLOCKS 32
+
+/* UMP Block information */
+struct snd_ump_block_info {
+ int card; /* card number */
+ int device; /* device number */
+ unsigned char block_id; /* block ID (R/W) */
+ unsigned char direction; /* UMP direction */
+ unsigned char active; /* Activeness */
+ unsigned char first_group; /* first group ID */
+ unsigned char num_groups; /* number of groups */
+ unsigned char midi_ci_version; /* MIDI-CI support version */
+ unsigned char sysex8_streams; /* max number of sysex8 streams */
+ unsigned char ui_hint; /* user interface hint */
+ unsigned int flags; /* various info flags */
+ unsigned char name[128]; /* block name string */
+ unsigned char reserved[32];
+} __packed;
+
#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
#define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params)
#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
#define SNDRV_RAWMIDI_IOCTL_DRAIN _IOW('W', 0x31, int)
+/* Additional ioctls for UMP rawmidi devices */
+#define SNDRV_UMP_IOCTL_ENDPOINT_INFO _IOR('W', 0x40, struct snd_ump_endpoint_info)
+#define SNDRV_UMP_IOCTL_BLOCK_INFO _IOR('W', 0x41, struct snd_ump_block_info)
/*
* Timer section - /dev/snd/timer
@@ -936,7 +1038,7 @@ struct snd_timer_tread {
* *
****************************************************************************/
-#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 8)
+#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 9)
struct snd_ctl_card_info {
int card; /* card number */
@@ -974,7 +1076,7 @@ typedef int __bitwise snd_ctl_elem_iface_t;
#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1)
#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE)
#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */
-// (1 << 3) is unused.
+/* (1 << 3) is unused. */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
@@ -1071,7 +1173,7 @@ struct snd_ctl_elem_value {
struct snd_ctl_tlv {
unsigned int numid; /* control element numeric identification */
unsigned int length; /* in bytes aligned to 4 */
- unsigned int tlv[0]; /* first TLV */
+ unsigned int tlv[]; /* first TLV */
};
#define SNDRV_CTL_IOCTL_PVERSION _IOR('U', 0x00, int)
@@ -1097,6 +1199,9 @@ struct snd_ctl_tlv {
#define SNDRV_CTL_IOCTL_RAWMIDI_NEXT_DEVICE _IOWR('U', 0x40, int)
#define SNDRV_CTL_IOCTL_RAWMIDI_INFO _IOWR('U', 0x41, struct snd_rawmidi_info)
#define SNDRV_CTL_IOCTL_RAWMIDI_PREFER_SUBDEVICE _IOW('U', 0x42, int)
+#define SNDRV_CTL_IOCTL_UMP_NEXT_DEVICE _IOWR('U', 0x43, int)
+#define SNDRV_CTL_IOCTL_UMP_ENDPOINT_INFO _IOWR('U', 0x44, struct snd_ump_endpoint_info)
+#define SNDRV_CTL_IOCTL_UMP_BLOCK_INFO _IOWR('U', 0x45, struct snd_ump_block_info)
#define SNDRV_CTL_IOCTL_POWER _IOWR('U', 0xd0, int)
#define SNDRV_CTL_IOCTL_POWER_STATE _IOR('U', 0xd1, int)
diff --git a/include/uapi/sound/asound_fm.h b/include/uapi/sound/asound_fm.h
index 8471f404ff0b..25ec5e38af5c 100644
--- a/include/uapi/sound/asound_fm.h
+++ b/include/uapi/sound/asound_fm.h
@@ -10,21 +10,6 @@
* 4Front Technologies
*
* Direct FM control
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define SNDRV_DM_FM_MODE_OPL2 0x00
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 7184265c0b0d..d185957f3fe0 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -5,23 +5,6 @@
* Copyright (C) 2011 Intel Corporation
* Authors: Vinod Koul <vinod.koul@linux.intel.com>
* Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
*/
#ifndef __COMPRESS_OFFLOAD_H
#define __COMPRESS_OFFLOAD_H
@@ -123,7 +106,7 @@ struct snd_compr_codec_caps {
} __attribute__((packed, aligned(4)));
/**
- * enum sndrv_compress_encoder
+ * enum sndrv_compress_encoder - encoder metadata key
* @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
* end of the track
* @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the
@@ -144,7 +127,7 @@ struct snd_compr_metadata {
__u32 value[8];
} __attribute__((packed, aligned(4)));
-/**
+/*
* compress path ioctl definitions
* SNDRV_COMPRESS_GET_CAPS: Query capability of DSP
* SNDRV_COMPRESS_GET_CODEC_CAPS: Query capability of a codec
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 79b14389ae41..ddc77322d571 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -7,47 +7,13 @@
* Authors: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
* Vinod Koul <vinod.koul@linux.intel.com>
*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* The definitions in this file are derived from the OpenMAX AL version 1.1
- * and OpenMAX IL v 1.1.2 header files which contain the copyright notice below.
+ * and OpenMAX IL v 1.1.2 header files which contain the copyright notice below
+ * and are licensed under the MIT license.
*
* Copyright (c) 2007-2010 The Khronos Group Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and/or associated documentation files (the
- * "Materials "), to deal in the Materials without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Materials, and to
- * permit persons to whom the Materials are furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Materials.
- *
- * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
- *
*/
#ifndef __SND_COMPRESS_PARAMS_H
#define __SND_COMPRESS_PARAMS_H
@@ -250,7 +216,7 @@ struct snd_enc_wma {
/**
- * struct snd_enc_vorbis
+ * struct snd_enc_vorbis - Vorbis encoder parameters
* @quality: Sets encoding quality to n, between -1 (low) and 10 (high).
* In the default mode of operation, the quality level is 3.
* Normal quality range is 0 - 10.
@@ -279,7 +245,7 @@ struct snd_enc_vorbis {
/**
- * struct snd_enc_real
+ * struct snd_enc_real - RealAudio encoder parameters
* @quant_bits: number of coupling quantization bits in the stream
* @start_region: coupling start region in the stream
* @num_regions: number of regions value
@@ -294,7 +260,7 @@ struct snd_enc_real {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_enc_flac
+ * struct snd_enc_flac - FLAC encoder parameters
* @num: serial number, valid only for OGG formats
* needs to be set by application
* @gain: Add replay gain tags
diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h
index 88609cc0524c..4c32a116e7ad 100644
--- a/include/uapi/sound/emu10k1.h
+++ b/include/uapi/sound/emu10k1.h
@@ -3,22 +3,6 @@
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>,
* Creative Labs, Inc.
* Definitions for EMU10K1 (SB Live!) chips
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_EMU10K1_H
#define _UAPI__SOUND_EMU10K1_H
@@ -31,9 +15,6 @@
* ---- FX8010 ----
*/
-#define EMU10K1_CARD_CREATIVE 0x00000000
-#define EMU10K1_CARD_EMUAPS 0x00000001
-
#define EMU10K1_FX8010_PCM_COUNT 8
/*
@@ -62,6 +43,19 @@
#define iINTERP 0x0e /* R = A + (X * (Y - A) >> 31) ; saturation */
#define iSKIP 0x0f /* R = A (cc_reg), X (count), Y (cc_test) */
+#define LOWORD_OPX_MASK 0x000ffc00 /* Instruction operand X */
+#define LOWORD_OPY_MASK 0x000003ff /* Instruction operand Y */
+#define HIWORD_OPCODE_MASK 0x00f00000 /* Instruction opcode */
+#define HIWORD_RESULT_MASK 0x000ffc00 /* Instruction result */
+#define HIWORD_OPA_MASK 0x000003ff /* Instruction operand A */
+
+/* Audigy Soundcards have a different instruction format */
+#define A_LOWORD_OPX_MASK 0x007ff000
+#define A_LOWORD_OPY_MASK 0x000007ff
+#define A_HIWORD_OPCODE_MASK 0x0f000000
+#define A_HIWORD_RESULT_MASK 0x007ff000
+#define A_HIWORD_OPA_MASK 0x000007ff
+
/* GPRs */
#define FXBUS(x) (0x00 + (x)) /* x = 0x00 - 0x0f */
#define EXTIN(x) (0x10 + (x)) /* x = 0x00 - 0x0f */
@@ -69,6 +63,16 @@
#define FXBUS2(x) (0x30 + (x)) /* x = 0x00 - 0x0f copies of fx buses for capture -> FXWC high 16 bits */
/* NB: 0x31 and 0x32 are shared with Center/LFE on SB live 5.1 */
+#define A_FXBUS(x) (0x00 + (x)) /* x = 0x00 - 0x3f FX buses */
+#define A_EXTIN(x) (0x40 + (x)) /* x = 0x00 - 0x0f physical ins */
+#define A_P16VIN(x) (0x50 + (x)) /* x = 0x00 - 0x0f p16v ins (A2 only) "EMU32 inputs" */
+#define A_EXTOUT(x) (0x60 + (x)) /* x = 0x00 - 0x1f physical outs -> A_FXWC1 0x79-7f unknown */
+#define A_FXBUS2(x) (0x80 + (x)) /* x = 0x00 - 0x1f extra outs used for EFX capture -> A_FXWC2 */
+#define A_EMU32OUTH(x) (0xa0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_10 - _1F" */
+#define A_EMU32OUTL(x) (0xb0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_01 - _0F" */
+#define A3_EMU32IN(x) (0x160 + (x)) /* x = 0x00 - 0x1f "EMU32_IN_00 - _1F" - Only when .device = 0x0008 */
+#define A3_EMU32OUT(x) (0x1E0 + (x)) /* x = 0x00 - 0x1f "EMU32_OUT_00 - _1F" - Only when .device = 0x0008 */
+
#define C_00000000 0x40
#define C_00000001 0x41
#define C_00000002 0x42
@@ -97,12 +101,66 @@
#define GPR_NOISE1 0x59 /* noise source */
#define GPR_IRQ 0x5a /* IRQ register */
#define GPR_DBAC 0x5b /* TRAM Delay Base Address Counter */
+
+/* Audigy constants */
+#define A_C_00000000 0xc0
+#define A_C_00000001 0xc1
+#define A_C_00000002 0xc2
+#define A_C_00000003 0xc3
+#define A_C_00000004 0xc4
+#define A_C_00000008 0xc5
+#define A_C_00000010 0xc6
+#define A_C_00000020 0xc7
+#define A_C_00000100 0xc8
+#define A_C_00010000 0xc9
+#define A_C_00000800 0xca
+#define A_C_10000000 0xcb
+#define A_C_20000000 0xcc
+#define A_C_40000000 0xcd
+#define A_C_80000000 0xce
+#define A_C_7fffffff 0xcf
+#define A_C_ffffffff 0xd0
+#define A_C_fffffffe 0xd1
+#define A_C_c0000000 0xd2
+#define A_C_4f1bbcdc 0xd3
+#define A_C_5a7ef9db 0xd4
+#define A_C_00100000 0xd5
+#define A_GPR_ACCU 0xd6 /* ACCUM, accumulator */
+#define A_GPR_COND 0xd7 /* CCR, condition register */
+#define A_GPR_NOISE0 0xd8 /* noise source */
+#define A_GPR_NOISE1 0xd9 /* noise source */
+#define A_GPR_IRQ 0xda /* IRQ register */
+#define A_GPR_DBAC 0xdb /* TRAM Delay Base Address Counter - internal */
+#define A_GPR_DBACE 0xde /* TRAM Delay Base Address Counter - external */
+
+/* Each FX general purpose register is 32 bits in length, all bits are used */
+#define FXGPREGBASE 0x100 /* FX general purpose registers base */
+#define A_FXGPREGBASE 0x400 /* Audigy GPRs, 0x400 to 0x5ff */
+
+#define A_TANKMEMCTLREGBASE 0x100 /* Tank memory control registers base - only for Audigy */
+#define A_TANKMEMCTLREG_MASK 0x1f /* only 5 bits used - only for Audigy */
+
+/* Tank audio data is logarithmically compressed down to 16 bits before writing to TRAM and is */
+/* decompressed back to 20 bits on a read. There are a total of 160 locations, the last 32 */
+/* locations are for external TRAM. */
+#define TANKMEMDATAREGBASE 0x200 /* Tank memory data registers base */
+#define TANKMEMDATAREG_MASK 0x000fffff /* 20 bit tank audio data field */
+
+/* Combined address field and memory opcode or flag field. 160 locations, last 32 are external */
+#define TANKMEMADDRREGBASE 0x300 /* Tank memory address registers base */
+#define TANKMEMADDRREG_ADDR_MASK 0x000fffff /* 20 bit tank address field */
+#define TANKMEMADDRREG_CLEAR 0x00800000 /* Clear tank memory */
+#define TANKMEMADDRREG_ALIGN 0x00400000 /* Align read or write relative to tank access */
+#define TANKMEMADDRREG_WRITE 0x00200000 /* Write to tank memory */
+#define TANKMEMADDRREG_READ 0x00100000 /* Read from tank memory */
+
#define GPR(x) (FXGPREGBASE + (x)) /* free GPRs: x = 0x00 - 0xff */
#define ITRAM_DATA(x) (TANKMEMDATAREGBASE + 0x00 + (x)) /* x = 0x00 - 0x7f */
#define ETRAM_DATA(x) (TANKMEMDATAREGBASE + 0x80 + (x)) /* x = 0x00 - 0x1f */
#define ITRAM_ADDR(x) (TANKMEMADDRREGBASE + 0x00 + (x)) /* x = 0x00 - 0x7f */
#define ETRAM_ADDR(x) (TANKMEMADDRREGBASE + 0x80 + (x)) /* x = 0x00 - 0x1f */
+#define A_GPR(x) (A_FXGPREGBASE + (x))
#define A_ITRAM_DATA(x) (TANKMEMDATAREGBASE + 0x00 + (x)) /* x = 0x00 - 0xbf */
#define A_ETRAM_DATA(x) (TANKMEMDATAREGBASE + 0xc0 + (x)) /* x = 0x00 - 0x3f */
#define A_ITRAM_ADDR(x) (TANKMEMADDRREGBASE + 0x00 + (x)) /* x = 0x00 - 0xbf */
@@ -110,17 +168,6 @@
#define A_ITRAM_CTL(x) (A_TANKMEMCTLREGBASE + 0x00 + (x)) /* x = 0x00 - 0xbf */
#define A_ETRAM_CTL(x) (A_TANKMEMCTLREGBASE + 0xc0 + (x)) /* x = 0x00 - 0x3f */
-#define A_FXBUS(x) (0x00 + (x)) /* x = 0x00 - 0x3f FX buses */
-#define A_EXTIN(x) (0x40 + (x)) /* x = 0x00 - 0x0f physical ins */
-#define A_P16VIN(x) (0x50 + (x)) /* x = 0x00 - 0x0f p16v ins (A2 only) "EMU32 inputs" */
-#define A_EXTOUT(x) (0x60 + (x)) /* x = 0x00 - 0x1f physical outs -> A_FXWC1 0x79-7f unknown */
-#define A_FXBUS2(x) (0x80 + (x)) /* x = 0x00 - 0x1f extra outs used for EFX capture -> A_FXWC2 */
-#define A_EMU32OUTH(x) (0xa0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_10 - _1F" - ??? */
-#define A_EMU32OUTL(x) (0xb0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_1 - _F" - ??? */
-#define A3_EMU32IN(x) (0x160 + (x)) /* x = 0x00 - 0x3f "EMU32_IN_00 - _3F" - Only when .device = 0x0008 */
-#define A3_EMU32OUT(x) (0x1E0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_00 - _3F" - Only when .device = 0x0008 */
-#define A_GPR(x) (A_FXGPREGBASE + (x))
-
/* cc_reg constants */
#define CC_REG_NORMALIZED C_00000001
#define CC_REG_BORROW C_00000002
@@ -129,7 +176,17 @@
#define CC_REG_SATURATE C_00000010
#define CC_REG_NONZERO C_00000100
+#define A_CC_REG_NORMALIZED A_C_00000001
+#define A_CC_REG_BORROW A_C_00000002
+#define A_CC_REG_MINUS A_C_00000004
+#define A_CC_REG_ZERO A_C_00000008
+#define A_CC_REG_SATURATE A_C_00000010
+#define A_CC_REG_NONZERO A_C_00000100
+
/* FX buses */
+// These are arbitrary mappings; our DSP code simply expects
+// the config files to route the channels this way.
+// The numbers are documented in {audigy,sb-live}-mixer.rst.
#define FXBUS_PCM_LEFT 0x00
#define FXBUS_PCM_RIGHT 0x01
#define FXBUS_PCM_LEFT_REAR 0x02
@@ -219,38 +276,7 @@
#define A_EXTOUT_ADC_CAP_R 0x17 /* right */
#define A_EXTOUT_MIC_CAP 0x18 /* Mic capture buffer */
-/* Audigy constants */
-#define A_C_00000000 0xc0
-#define A_C_00000001 0xc1
-#define A_C_00000002 0xc2
-#define A_C_00000003 0xc3
-#define A_C_00000004 0xc4
-#define A_C_00000008 0xc5
-#define A_C_00000010 0xc6
-#define A_C_00000020 0xc7
-#define A_C_00000100 0xc8
-#define A_C_00010000 0xc9
-#define A_C_00000800 0xca
-#define A_C_10000000 0xcb
-#define A_C_20000000 0xcc
-#define A_C_40000000 0xcd
-#define A_C_80000000 0xce
-#define A_C_7fffffff 0xcf
-#define A_C_ffffffff 0xd0
-#define A_C_fffffffe 0xd1
-#define A_C_c0000000 0xd2
-#define A_C_4f1bbcdc 0xd3
-#define A_C_5a7ef9db 0xd4
-#define A_C_00100000 0xd5
-#define A_GPR_ACCU 0xd6 /* ACCUM, accumulator */
-#define A_GPR_COND 0xd7 /* CCR, condition register */
-#define A_GPR_NOISE0 0xd8 /* noise source */
-#define A_GPR_NOISE1 0xd9 /* noise source */
-#define A_GPR_IRQ 0xda /* IRQ register */
-#define A_GPR_DBAC 0xdb /* TRAM Delay Base Address Counter - internal */
-#define A_GPR_DBACE 0xde /* TRAM Delay Base Address Counter - external */
-
-/* definitions for debug register */
+/* Definitions for debug register. Note that these are for emu10k1 ONLY. */
#define EMU10K1_DBG_ZC 0x80000000 /* zero tram counter */
#define EMU10K1_DBG_SATURATION_OCCURED 0x02000000 /* saturation control */
#define EMU10K1_DBG_SATURATION_ADDR 0x01ff0000 /* saturation address */
@@ -259,14 +285,14 @@
#define EMU10K1_DBG_CONDITION_CODE 0x00003e00 /* condition code */
#define EMU10K1_DBG_SINGLE_STEP_ADDR 0x000001ff /* single step address */
-/* tank memory address line */
-#ifndef __KERNEL__
-#define TANKMEMADDRREG_ADDR_MASK 0x000fffff /* 20 bit tank address field */
-#define TANKMEMADDRREG_CLEAR 0x00800000 /* Clear tank memory */
-#define TANKMEMADDRREG_ALIGN 0x00400000 /* Align read or write relative to tank access */
-#define TANKMEMADDRREG_WRITE 0x00200000 /* Write to tank memory */
-#define TANKMEMADDRREG_READ 0x00100000 /* Read from tank memory */
-#endif
+/* Definitions for emu10k2 debug register. */
+#define A_DBG_ZC 0x40000000 /* zero tram counter */
+#define A_DBG_SATURATION_OCCURED 0x20000000
+#define A_DBG_SATURATION_ADDR 0x0ffc0000
+#define A_DBG_SINGLE_STEP 0x00020000 /* Set to zero to start dsp */
+#define A_DBG_STEP 0x00010000
+#define A_DBG_CONDITION_CODE 0x0000f800
+#define A_DBG_STEP_ADDR 0x000003ff
struct snd_emu10k1_fx8010_info {
unsigned int internal_tram_size; /* in samples */
@@ -282,6 +308,8 @@ struct snd_emu10k1_fx8010_info {
#define EMU10K1_GPR_TRANSLATION_BASS 2
#define EMU10K1_GPR_TRANSLATION_TREBLE 3
#define EMU10K1_GPR_TRANSLATION_ONOFF 4
+#define EMU10K1_GPR_TRANSLATION_NEGATE 5
+#define EMU10K1_GPR_TRANSLATION_NEG_TABLE100 6
enum emu10k1_ctl_elem_iface {
EMU10K1_CTL_ELEM_IFACE_MIXER = 2, /* virtual mixer device */
@@ -302,9 +330,9 @@ struct snd_emu10k1_fx8010_control_gpr {
unsigned int vcount; /* visible count */
unsigned int count; /* count of GPR (1..16) */
unsigned short gpr[32]; /* GPR number(s) */
- unsigned int value[32]; /* initial values */
- unsigned int min; /* minimum range */
- unsigned int max; /* maximum range */
+ int value[32]; /* initial values */
+ int min; /* minimum range */
+ int max; /* maximum range */
unsigned int translation; /* translation type (EMU10K1_GPR_TRANSLATION*) */
const unsigned int *tlv;
};
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
index ae12826ed641..1e86872c151f 100644
--- a/include/uapi/sound/firewire.h
+++ b/include/uapi/sound/firewire.h
@@ -13,6 +13,8 @@
#define SNDRV_FIREWIRE_EVENT_DIGI00X_MESSAGE 0x746e736c
#define SNDRV_FIREWIRE_EVENT_MOTU_NOTIFICATION 0x64776479
#define SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL 0x7473636d
+#define SNDRV_FIREWIRE_EVENT_MOTU_REGISTER_DSP_CHANGE 0x4d545244
+#define SNDRV_FIREWIRE_EVENT_FF400_MESSAGE 0x4f6c6761
struct snd_firewire_event_common {
unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */
@@ -37,11 +39,11 @@ struct snd_efw_transaction {
__be32 category;
__be32 command;
__be32 status;
- __be32 params[0];
+ __be32 params[];
};
struct snd_firewire_event_efw_response {
unsigned int type;
- __be32 response[0]; /* some responses */
+ __be32 response[]; /* some responses */
};
struct snd_firewire_event_digi00x_message {
@@ -62,7 +64,37 @@ struct snd_firewire_tascam_change {
struct snd_firewire_event_tascam_control {
unsigned int type;
- struct snd_firewire_tascam_change changes[0];
+ struct snd_firewire_tascam_change changes[];
+};
+
+struct snd_firewire_event_motu_register_dsp_change {
+ unsigned int type;
+ __u32 count; /* The number of changes. */
+ __u32 changes[]; /* Encoded event for change of register DSP. */
+};
+
+/**
+ * struct snd_firewire_event_ff400_message - the container for message from Fireface 400 when
+ * operating hardware knob.
+ *
+ * @type: Fixed to SNDRV_FIREWIRE_EVENT_FF400_MESSAGE.
+ * @message_count: The number of messages.
+ * @messages.message: The messages expressing hardware knob operation.
+ * @messages.tstamp: The isochronous cycle at which the request subaction of asynchronous
+ * transaction was sent to deliver the message. It has 16 bit unsigned integer
+ * value. The higher 3 bits of value expresses the lower three bits of second
+ * field in the format of CYCLE_TIME, up to 7. The rest 13 bits expresses cycle
+ * field up to 7999.
+ *
+ * The structure expresses message transmitted by Fireface 400 when operating hardware knob.
+ */
+struct snd_firewire_event_ff400_message {
+ unsigned int type;
+ unsigned int message_count;
+ struct {
+ __u32 message;
+ __u32 tstamp;
+ } messages[];
};
union snd_firewire_event {
@@ -73,6 +105,8 @@ union snd_firewire_event {
struct snd_firewire_event_digi00x_message digi00x_message;
struct snd_firewire_event_tascam_control tascam_control;
struct snd_firewire_event_motu_notification motu_notification;
+ struct snd_firewire_event_motu_register_dsp_change motu_register_dsp_change;
+ struct snd_firewire_event_ff400_message ff400_message;
};
@@ -80,6 +114,9 @@ union snd_firewire_event {
#define SNDRV_FIREWIRE_IOCTL_LOCK _IO('H', 0xf9)
#define SNDRV_FIREWIRE_IOCTL_UNLOCK _IO('H', 0xfa)
#define SNDRV_FIREWIRE_IOCTL_TASCAM_STATE _IOR('H', 0xfb, struct snd_firewire_tascam_state)
+#define SNDRV_FIREWIRE_IOCTL_MOTU_REGISTER_DSP_METER _IOR('H', 0xfc, struct snd_firewire_motu_register_dsp_meter)
+#define SNDRV_FIREWIRE_IOCTL_MOTU_COMMAND_DSP_METER _IOR('H', 0xfd, struct snd_firewire_motu_command_dsp_meter)
+#define SNDRV_FIREWIRE_IOCTL_MOTU_REGISTER_DSP_PARAMETER _IOR('H', 0xfe, struct snd_firewire_motu_register_dsp_parameter)
#define SNDRV_FIREWIRE_TYPE_DICE 1
#define SNDRV_FIREWIRE_TYPE_FIREWORKS 2
@@ -108,4 +145,143 @@ struct snd_firewire_tascam_state {
__be32 data[SNDRV_FIREWIRE_TASCAM_STATE_COUNT];
};
+/*
+ * In below MOTU models, software is allowed to control their DSP by accessing to registers.
+ * - 828mk2
+ * - 896hd
+ * - Traveler
+ * - 8 pre
+ * - Ultralite
+ * - 4 pre
+ * - Audio Express
+ *
+ * On the other hand, the status of DSP is split into specific messages included in the sequence of
+ * isochronous packet. ALSA firewire-motu driver gathers the messages and allow userspace applications
+ * to read it via ioctl. In 828mk2, 896hd, and Traveler, hardware meter for all of physical inputs
+ * are put into the message, while one pair of physical outputs is selected. The selection is done by
+ * LSB one byte in asynchronous write quadlet transaction to 0x'ffff'f000'0b2c.
+ *
+ * I note that V3HD/V4HD uses asynchronous transaction for the purpose. The destination address is
+ * registered to 0x'ffff'f000'0b38 and '0b3c by asynchronous write quadlet request. The size of
+ * message differs between 23 and 51 quadlets. For the case, the number of mixer bus can be extended
+ * up to 12.
+ */
+
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_INPUT_COUNT 24
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_OUTPUT_COUNT 24
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_COUNT \
+ (SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_INPUT_COUNT + SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_OUTPUT_COUNT)
+
+/**
+ * struct snd_firewire_motu_register_dsp_meter - the container for meter information in DSP
+ * controlled by register access
+ * @data: Signal level meters. The mapping between position and input/output channel is
+ * model-dependent.
+ *
+ * The structure expresses the part of DSP status for hardware meter. The u8 storage includes linear
+ * value for audio signal level between 0x00 and 0x7f.
+ */
+struct snd_firewire_motu_register_dsp_meter {
+ __u8 data[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_METER_COUNT];
+};
+
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_COUNT 4
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT 20
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_INPUT_COUNT 10
+#define SNDRV_FIREWIRE_MOTU_REGISTER_DSP_ALIGNED_INPUT_COUNT (SNDRV_FIREWIRE_MOTU_REGISTER_DSP_INPUT_COUNT + 2)
+
+/**
+ * snd_firewire_motu_register_dsp_parameter - the container for parameters of DSP controlled
+ * by register access.
+ * @mixer.source.gain: The gain of source to mixer.
+ * @mixer.source.pan: The L/R balance of source to mixer.
+ * @mixer.source.flag: The flag of source to mixer, including mute, solo.
+ * @mixer.source.paired_balance: The L/R balance of paired source to mixer, only for 4 pre and
+ * Audio Express.
+ * @mixer.source.paired_width: The width of paired source to mixer, only for 4 pre and
+ * Audio Express.
+ * @mixer.output.paired_volume: The volume of paired output from mixer.
+ * @mixer.output.paired_flag: The flag of paired output from mixer.
+ * @output.main_paired_volume: The volume of paired main output.
+ * @output.hp_paired_volume: The volume of paired hp output.
+ * @output.hp_paired_assignment: The source assigned to paired hp output.
+ * @output.reserved: Padding for 32 bit alignment for future extension.
+ * @line_input.boost_flag: The flags of boost for line inputs, only for 828mk2 and Traveler.
+ * @line_input.nominal_level_flag: The flags of nominal level for line inputs, only for 828mk2 and
+ * Traveler.
+ * @line_input.reserved: Padding for 32 bit alignment for future extension.
+ * @input.gain_and_invert: The value including gain and invert for input, only for Ultralite, 4 pre
+ * and Audio Express.
+ * @input.flag: The flag of input; e.g. jack detection, phantom power, and pad, only for Ultralite,
+ * 4 pre and Audio express.
+ * @reserved: Padding so that the size of structure is kept to 512 byte, but for future extension.
+ *
+ * The structure expresses the set of parameters for DSP controlled by register access.
+ */
+struct snd_firewire_motu_register_dsp_parameter {
+ struct {
+ struct {
+ __u8 gain[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ __u8 pan[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ __u8 flag[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ __u8 paired_balance[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ __u8 paired_width[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_SRC_COUNT];
+ } source[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_COUNT];
+ struct {
+ __u8 paired_volume[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_COUNT];
+ __u8 paired_flag[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_MIXER_COUNT];
+ } output;
+ } mixer;
+ struct {
+ __u8 main_paired_volume;
+ __u8 hp_paired_volume;
+ __u8 hp_paired_assignment;
+ __u8 reserved[5];
+ } output;
+ struct {
+ __u8 boost_flag;
+ __u8 nominal_level_flag;
+ __u8 reserved[6];
+ } line_input;
+ struct {
+ __u8 gain_and_invert[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_ALIGNED_INPUT_COUNT];
+ __u8 flag[SNDRV_FIREWIRE_MOTU_REGISTER_DSP_ALIGNED_INPUT_COUNT];
+ } input;
+ __u8 reserved[64];
+};
+
+/*
+ * In below MOTU models, software is allowed to control their DSP by command in frame of
+ * asynchronous transaction to 0x'ffff'0001'0000:
+ *
+ * - 828 mk3 (FireWire only and Hybrid)
+ * - 896 mk3 (FireWire only and Hybrid)
+ * - Ultralite mk3 (FireWire only and Hybrid)
+ * - Traveler mk3
+ * - Track 16
+ *
+ * On the other hand, the states of hardware meter is split into specific messages included in the
+ * sequence of isochronous packet. ALSA firewire-motu driver gathers the message and allow userspace
+ * application to read it via ioctl.
+ */
+
+#define SNDRV_FIREWIRE_MOTU_COMMAND_DSP_METER_COUNT 400
+
+/**
+ * struct snd_firewire_motu_command_dsp_meter - the container for meter information in DSP
+ * controlled by command
+ * @data: Signal level meters. The mapping between position and signal channel is model-dependent.
+ *
+ * The structure expresses the part of DSP status for hardware meter. The 32 bit storage is
+ * estimated to include IEEE 764 32 bit single precision floating point (binary32) value. It is
+ * expected to be linear value (not logarithm) for audio signal level between 0.0 and +1.0.
+ */
+struct snd_firewire_motu_command_dsp_meter {
+#ifdef __KERNEL__
+ __u32 data[SNDRV_FIREWIRE_MOTU_COMMAND_DSP_METER_COUNT];
+#else
+ float data[SNDRV_FIREWIRE_MOTU_COMMAND_DSP_METER_COUNT];
+#endif
+};
+
#endif /* _UAPI_SOUND_FIREWIRE_H_INCLUDED */
diff --git a/include/uapi/sound/hdsp.h b/include/uapi/sound/hdsp.h
index b8df62b60f4d..0961954658d6 100644
--- a/include/uapi/sound/hdsp.h
+++ b/include/uapi/sound/hdsp.h
@@ -4,20 +4,6 @@
/*
* Copyright (C) 2003 Thomas Charbonnel (thomas@undata.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef __linux__
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index 14af3d00ea3f..7043bb3d435a 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -4,21 +4,6 @@
/*
* Copyright (C) 2003 Winfried Ritsch (IEM)
* based on hdsp.h from Thomas Charbonnel (thomas@undata.org)
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef __linux__
diff --git a/include/uapi/sound/intel/avs/tokens.h b/include/uapi/sound/intel/avs/tokens.h
new file mode 100644
index 000000000000..4beca03405c0
--- /dev/null
+++ b/include/uapi/sound/intel/avs/tokens.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright(c) 2021 Intel Corporation. All rights reserved.
+ *
+ * Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+ * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ */
+
+#ifndef __UAPI_SOUND_INTEL_AVS_TOKENS_H
+#define __UAPI_SOUND_INTEL_AVS_TOKENS_H
+
+enum avs_tplg_token {
+ /* struct avs_tplg */
+ AVS_TKN_MANIFEST_NAME_STRING = 1,
+ AVS_TKN_MANIFEST_VERSION_U32 = 2,
+ AVS_TKN_MANIFEST_NUM_LIBRARIES_U32 = 3,
+ AVS_TKN_MANIFEST_NUM_AFMTS_U32 = 4,
+ AVS_TKN_MANIFEST_NUM_MODCFGS_BASE_U32 = 5,
+ AVS_TKN_MANIFEST_NUM_MODCFGS_EXT_U32 = 6,
+ AVS_TKN_MANIFEST_NUM_PPLCFGS_U32 = 7,
+ AVS_TKN_MANIFEST_NUM_BINDINGS_U32 = 8,
+ AVS_TKN_MANIFEST_NUM_CONDPATH_TMPLS_U32 = 9,
+ AVS_TKN_MANIFEST_NUM_INIT_CONFIGS_U32 = 10,
+
+ /* struct avs_tplg_library */
+ AVS_TKN_LIBRARY_ID_U32 = 101,
+ AVS_TKN_LIBRARY_NAME_STRING = 102,
+
+ /* struct avs_audio_format */
+ AVS_TKN_AFMT_ID_U32 = 201,
+ AVS_TKN_AFMT_SAMPLE_RATE_U32 = 202,
+ AVS_TKN_AFMT_BIT_DEPTH_U32 = 203,
+ AVS_TKN_AFMT_CHANNEL_MAP_U32 = 204,
+ AVS_TKN_AFMT_CHANNEL_CFG_U32 = 205,
+ AVS_TKN_AFMT_INTERLEAVING_U32 = 206,
+ AVS_TKN_AFMT_NUM_CHANNELS_U32 = 207,
+ AVS_TKN_AFMT_VALID_BIT_DEPTH_U32 = 208,
+ AVS_TKN_AFMT_SAMPLE_TYPE_U32 = 209,
+
+ /* struct avs_tplg_modcfg_base */
+ AVS_TKN_MODCFG_BASE_ID_U32 = 301,
+ AVS_TKN_MODCFG_BASE_CPC_U32 = 302,
+ AVS_TKN_MODCFG_BASE_IBS_U32 = 303,
+ AVS_TKN_MODCFG_BASE_OBS_U32 = 304,
+ AVS_TKN_MODCFG_BASE_PAGES_U32 = 305,
+
+ /* struct avs_tplg_modcfg_ext */
+ AVS_TKN_MODCFG_EXT_ID_U32 = 401,
+ AVS_TKN_MODCFG_EXT_TYPE_UUID = 402,
+ AVS_TKN_MODCFG_CPR_OUT_AFMT_ID_U32 = 403,
+ AVS_TKN_MODCFG_CPR_FEATURE_MASK_U32 = 404,
+ AVS_TKN_MODCFG_CPR_DMA_TYPE_U32 = 405,
+ AVS_TKN_MODCFG_CPR_DMABUFF_SIZE_U32 = 406,
+ AVS_TKN_MODCFG_CPR_VINDEX_U8 = 407,
+ AVS_TKN_MODCFG_CPR_BLOB_FMT_ID_U32 = 408,
+ AVS_TKN_MODCFG_MICSEL_OUT_AFMT_ID_U32 = 409,
+ AVS_TKN_MODCFG_INTELWOV_CPC_LP_MODE_U32 = 410,
+ AVS_TKN_MODCFG_SRC_OUT_FREQ_U32 = 411,
+ AVS_TKN_MODCFG_MUX_REF_AFMT_ID_U32 = 412,
+ AVS_TKN_MODCFG_MUX_OUT_AFMT_ID_U32 = 413,
+ AVS_TKN_MODCFG_AEC_REF_AFMT_ID_U32 = 414,
+ AVS_TKN_MODCFG_AEC_OUT_AFMT_ID_U32 = 415,
+ AVS_TKN_MODCFG_AEC_CPC_LP_MODE_U32 = 416,
+ AVS_TKN_MODCFG_ASRC_OUT_FREQ_U32 = 417,
+ AVS_TKN_MODCFG_ASRC_MODE_U8 = 418,
+ AVS_TKN_MODCFG_ASRC_DISABLE_JITTER_U8 = 419,
+ AVS_TKN_MODCFG_UPDOWN_MIX_OUT_CHAN_CFG_U32 = 420,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_SELECT_U32 = 421,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_0_S32 = 422,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_1_S32 = 423,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_2_S32 = 424,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_3_S32 = 425,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_4_S32 = 426,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_5_S32 = 427,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_6_S32 = 428,
+ AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_7_S32 = 429,
+ AVS_TKN_MODCFG_UPDOWN_MIX_CHAN_MAP_U32 = 430,
+ AVS_TKN_MODCFG_EXT_NUM_INPUT_PINS_U16 = 431,
+ AVS_TKN_MODCFG_EXT_NUM_OUTPUT_PINS_U16 = 432,
+
+ /* struct avs_tplg_pplcfg */
+ AVS_TKN_PPLCFG_ID_U32 = 1401,
+ AVS_TKN_PPLCFG_REQ_SIZE_U16 = 1402,
+ AVS_TKN_PPLCFG_PRIORITY_U8 = 1403,
+ AVS_TKN_PPLCFG_LOW_POWER_BOOL = 1404,
+ AVS_TKN_PPLCFG_ATTRIBUTES_U16 = 1405,
+ AVS_TKN_PPLCFG_TRIGGER_U32 = 1406,
+
+ /* struct avs_tplg_binding */
+ AVS_TKN_BINDING_ID_U32 = 1501,
+ AVS_TKN_BINDING_TARGET_TPLG_NAME_STRING = 1502,
+ AVS_TKN_BINDING_TARGET_PATH_TMPL_ID_U32 = 1503,
+ AVS_TKN_BINDING_TARGET_PPL_ID_U32 = 1504,
+ AVS_TKN_BINDING_TARGET_MOD_ID_U32 = 1505,
+ AVS_TKN_BINDING_TARGET_MOD_PIN_U8 = 1506,
+ AVS_TKN_BINDING_MOD_ID_U32 = 1507,
+ AVS_TKN_BINDING_MOD_PIN_U8 = 1508,
+ AVS_TKN_BINDING_IS_SINK_U8 = 1509,
+
+ /* struct avs_tplg_pipeline */
+ AVS_TKN_PPL_ID_U32 = 1601,
+ AVS_TKN_PPL_PPLCFG_ID_U32 = 1602,
+ AVS_TKN_PPL_NUM_BINDING_IDS_U32 = 1603,
+ AVS_TKN_PPL_BINDING_ID_U32 = 1604,
+
+ /* struct avs_tplg_module */
+ AVS_TKN_MOD_ID_U32 = 1701,
+ AVS_TKN_MOD_MODCFG_BASE_ID_U32 = 1702,
+ AVS_TKN_MOD_IN_AFMT_ID_U32 = 1703,
+ AVS_TKN_MOD_CORE_ID_U8 = 1704,
+ AVS_TKN_MOD_PROC_DOMAIN_U8 = 1705,
+ AVS_TKN_MOD_MODCFG_EXT_ID_U32 = 1706,
+ AVS_TKN_MOD_KCONTROL_ID_U32 = 1707,
+ AVS_TKN_MOD_INIT_CONFIG_NUM_IDS_U32 = 1708,
+ AVS_TKN_MOD_INIT_CONFIG_ID_U32 = 1709,
+
+ /* struct avs_tplg_path_template */
+ AVS_TKN_PATH_TMPL_ID_U32 = 1801,
+
+ /* struct avs_tplg_path */
+ AVS_TKN_PATH_ID_U32 = 1901,
+ AVS_TKN_PATH_FE_FMT_ID_U32 = 1902,
+ AVS_TKN_PATH_BE_FMT_ID_U32 = 1903,
+
+ /* struct avs_tplg_pin_format */
+ AVS_TKN_PIN_FMT_INDEX_U32 = 2201,
+ AVS_TKN_PIN_FMT_IOBS_U32 = 2202,
+ AVS_TKN_PIN_FMT_AFMT_ID_U32 = 2203,
+
+ /* struct avs_tplg_kcontrol */
+ AVS_TKN_KCONTROL_ID_U32 = 2301,
+
+ /* struct avs_tplg_init_config */
+ AVS_TKN_INIT_CONFIG_ID_U32 = 2401,
+ AVS_TKN_INIT_CONFIG_PARAM_U8 = 2402,
+ AVS_TKN_INIT_CONFIG_LENGTH_U32 = 2403,
+};
+
+#endif
diff --git a/include/uapi/sound/sb16_csp.h b/include/uapi/sound/sb16_csp.h
index e64851481d88..5a80f5ec02ee 100644
--- a/include/uapi/sound/sb16_csp.h
+++ b/include/uapi/sound/sb16_csp.h
@@ -4,21 +4,6 @@
* Takashi Iwai <tiwai@suse.de>
*
* SB16ASP/AWE32 CSP control
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_SB16_CSP_H
#define _UAPI__SOUND_SB16_CSP_H
diff --git a/include/uapi/sound/scarlett2.h b/include/uapi/sound/scarlett2.h
new file mode 100644
index 000000000000..91467ab0ed70
--- /dev/null
+++ b/include/uapi/sound/scarlett2.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Focusrite Scarlett 2 Protocol Driver for ALSA
+ * (including Scarlett 2nd Gen, 3rd Gen, 4th Gen, Clarett USB, and
+ * Clarett+ series products)
+ *
+ * Copyright (c) 2023 by Geoffrey D. Bennett <g at b4.vu>
+ */
+#ifndef __UAPI_SOUND_SCARLETT2_H
+#define __UAPI_SOUND_SCARLETT2_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SCARLETT2_HWDEP_MAJOR 1
+#define SCARLETT2_HWDEP_MINOR 0
+#define SCARLETT2_HWDEP_SUBMINOR 0
+
+#define SCARLETT2_HWDEP_VERSION \
+ ((SCARLETT2_HWDEP_MAJOR << 16) | \
+ (SCARLETT2_HWDEP_MINOR << 8) | \
+ SCARLETT2_HWDEP_SUBMINOR)
+
+#define SCARLETT2_HWDEP_VERSION_MAJOR(v) (((v) >> 16) & 0xFF)
+#define SCARLETT2_HWDEP_VERSION_MINOR(v) (((v) >> 8) & 0xFF)
+#define SCARLETT2_HWDEP_VERSION_SUBMINOR(v) ((v) & 0xFF)
+
+/* Get protocol version */
+#define SCARLETT2_IOCTL_PVERSION _IOR('S', 0x60, int)
+
+/* Reboot */
+#define SCARLETT2_IOCTL_REBOOT _IO('S', 0x61)
+
+/* Select flash segment */
+#define SCARLETT2_SEGMENT_ID_SETTINGS 0
+#define SCARLETT2_SEGMENT_ID_FIRMWARE 1
+#define SCARLETT2_SEGMENT_ID_COUNT 2
+
+#define SCARLETT2_IOCTL_SELECT_FLASH_SEGMENT _IOW('S', 0x62, int)
+
+/* Erase selected flash segment */
+#define SCARLETT2_IOCTL_ERASE_FLASH_SEGMENT _IO('S', 0x63)
+
+/* Get selected flash segment erase progress
+ * 1 through to num_blocks, or 255 for complete
+ */
+struct scarlett2_flash_segment_erase_progress {
+ unsigned char progress;
+ unsigned char num_blocks;
+};
+#define SCARLETT2_IOCTL_GET_ERASE_PROGRESS \
+ _IOR('S', 0x64, struct scarlett2_flash_segment_erase_progress)
+
+#endif /* __UAPI_SOUND_SCARLETT2_H */
diff --git a/include/uapi/sound/sfnt_info.h b/include/uapi/sound/sfnt_info.h
index c9a810a6ef48..f2b5e13fb5a7 100644
--- a/include/uapi/sound/sfnt_info.h
+++ b/include/uapi/sound/sfnt_info.h
@@ -6,21 +6,6 @@
* Patch record compatible with AWE driver on OSS
*
* Copyright (C) 1999-2000 Takashi Iwai
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <sound/asound.h>
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index a93c0decfdd5..4bf9c4f9add8 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -66,7 +66,8 @@ enum skl_ch_cfg {
SKL_CH_CFG_DUAL_MONO = 9,
SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
- SKL_CH_CFG_4_CHANNEL = 12,
+ SKL_CH_CFG_7_1 = 12,
+ SKL_CH_CFG_4_CHANNEL = SKL_CH_CFG_7_1,
SKL_CH_CFG_INVALID
};
@@ -151,7 +152,7 @@ struct skl_dfw_algo_data {
__u32 rsvd:30;
__u32 param_id;
__u32 max;
- char params[0];
+ char params[];
} __packed;
enum skl_tkn_dir {
diff --git a/include/uapi/sound/snd_ar_tokens.h b/include/uapi/sound/snd_ar_tokens.h
new file mode 100644
index 000000000000..b9b9093b4396
--- /dev/null
+++ b/include/uapi/sound/snd_ar_tokens.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __SND_AR_TOKENS_H__
+#define __SND_AR_TOKENS_H__
+
+#define APM_SUB_GRAPH_PERF_MODE_LOW_POWER 0x1
+#define APM_SUB_GRAPH_PERF_MODE_LOW_LATENCY 0x2
+
+#define APM_SUB_GRAPH_DIRECTION_TX 0x1
+#define APM_SUB_GRAPH_DIRECTION_RX 0x2
+
+/** Scenario ID Audio Playback */
+#define APM_SUB_GRAPH_SID_AUDIO_PLAYBACK 0x1
+/* Scenario ID Audio Record */
+#define APM_SUB_GRAPH_SID_AUDIO_RECORD 0x2
+/* Scenario ID Voice call. */
+#define APM_SUB_GRAPH_SID_VOICE_CALL 0x3
+
+/* container capability ID Pre/Post Processing (PP) */
+#define APM_CONTAINER_CAP_ID_PP 0x1
+/* container capability ID Compression/Decompression (CD) */
+#define APM_CONTAINER_CAP_ID_CD 0x2
+/* container capability ID End Point(EP) */
+#define APM_CONTAINER_CAP_ID_EP 0x3
+/* container capability ID Offload (OLC) */
+#define APM_CONTAINER_CAP_ID_OLC 0x4
+
+/* container graph position Stream */
+#define APM_CONT_GRAPH_POS_STREAM 0x1
+/* container graph position Per Stream Per Device*/
+#define APM_CONT_GRAPH_POS_PER_STR_PER_DEV 0x2
+/* container graph position Stream-Device */
+#define APM_CONT_GRAPH_POS_STR_DEV 0x3
+/* container graph position Global Device */
+#define APM_CONT_GRAPH_POS_GLOBAL_DEV 0x4
+
+#define APM_PROC_DOMAIN_ID_MDSP 0x1
+#define APM_PROC_DOMAIN_ID_ADSP 0x2
+#define APM_PROC_DOMAIN_ID_SDSP 0x4
+#define APM_PROC_DOMAIN_ID_CDSP 0x5
+
+#define PCM_INTERLEAVED 1
+#define PCM_DEINTERLEAVED_PACKED 2
+#define PCM_DEINTERLEAVED_UNPACKED 3
+#define AR_I2S_WS_SRC_EXTERNAL 0
+#define AR_I2S_WS_SRC_INTERNAL 1
+
+enum ar_event_types {
+ AR_EVENT_NONE = 0,
+ AR_PGA_DAPM_EVENT
+};
+
+/*
+ * Kcontrol IDs
+ */
+#define SND_SOC_AR_TPLG_FE_BE_GRAPH_CTL_MIX 256
+#define SND_SOC_AR_TPLG_VOL_CTL 257
+
+/**
+ * %AR_TKN_U32_SUB_GRAPH_INSTANCE_ID: Sub Graph Instance Id
+ *
+ * %AR_TKN_U32_SUB_GRAPH_PERF_MODE: Performance mode of subgraph
+ * APM_SUB_GRAPH_PERF_MODE_LOW_POWER = 1,
+ * APM_SUB_GRAPH_PERF_MODE_LOW_LATENCY = 2
+ *
+ * %AR_TKN_U32_SUB_GRAPH_DIRECTION: Direction of subgraph
+ * APM_SUB_GRAPH_DIRECTION_TX = 1,
+ * APM_SUB_GRAPH_DIRECTION_RX = 2
+ *
+ * %AR_TKN_U32_SUB_GRAPH_SCENARIO_ID: Scenario ID for subgraph
+ * APM_SUB_GRAPH_SID_AUDIO_PLAYBACK = 1,
+ * APM_SUB_GRAPH_SID_AUDIO_RECORD = 2,
+ * APM_SUB_GRAPH_SID_VOICE_CALL = 3
+ *
+ * %AR_TKN_U32_CONTAINER_INSTANCE_ID: Container Instance ID
+ *
+ * %AR_TKN_U32_CONTAINER_CAPABILITY_ID: Container capability ID
+ * APM_CONTAINER_CAP_ID_PP = 1,
+ * APM_CONTAINER_CAP_ID_CD = 2,
+ * APM_CONTAINER_CAP_ID_EP = 3,
+ * APM_CONTAINER_CAP_ID_OLC = 4
+ *
+ * %AR_TKN_U32_CONTAINER_STACK_SIZE: Stack size in the container.
+ *
+ * %AR_TKN_U32_CONTAINER_GRAPH_POS: Graph Position
+ * APM_CONT_GRAPH_POS_STREAM = 1,
+ * APM_CONT_GRAPH_POS_PER_STR_PER_DEV = 2,
+ * APM_CONT_GRAPH_POS_STR_DEV = 3,
+ * APM_CONT_GRAPH_POS_GLOBAL_DEV = 4
+ *
+ * %AR_TKN_U32_CONTAINER_PROC_DOMAIN: Processor domain of container
+ * APM_PROC_DOMAIN_ID_MDSP = 1,
+ * APM_PROC_DOMAIN_ID_ADSP = 2,
+ * APM_PROC_DOMAIN_ID_SDSP = 4,
+ * APM_PROC_DOMAIN_ID_CDSP = 5
+ *
+ * %AR_TKN_U32_MODULE_ID: Module ID
+ *
+ * %AR_TKN_U32_MODULE_INSTANCE_ID: Module Instance ID.
+ *
+ * %AR_TKN_U32_MODULE_MAX_IP_PORTS: Module maximum input ports
+ *
+ * %AR_TKN_U32_MODULE_MAX_OP_PORTS: Module maximum output ports.
+ *
+ * %AR_TKN_U32_MODULE_IN_PORTS: Number of in ports
+ *
+ * %AR_TKN_U32_MODULE_OUT_PORTS: Number of out ports.
+ *
+ * %AR_TKN_U32_MODULE_SRC_OP_PORT_ID: Source module output port ID
+ *
+ * %AR_TKN_U32_MODULE_DST_IN_PORT_ID: Destination module input port ID
+ *
+ * %AR_TKN_U32_MODULE_HW_IF_IDX: Interface index types for I2S/LPAIF
+ *
+ * %AR_TKN_U32_MODULE_HW_IF_TYPE: Interface type
+ * LPAIF = 0,
+ * LPAIF_RXTX = 1,
+ * LPAIF_WSA = 2,
+ * LPAIF_VA = 3,
+ * LPAIF_AXI = 4
+ *
+ * %AR_TKN_U32_MODULE_FMT_INTERLEAVE: PCM Interleaving
+ * PCM_INTERLEAVED = 1,
+ * PCM_DEINTERLEAVED_PACKED = 2,
+ * PCM_DEINTERLEAVED_UNPACKED = 3
+ *
+ * %AR_TKN_U32_MODULE_FMT_DATA: data format
+ * FIXED POINT = 1,
+ * IEC60958 PACKETIZED = 3,
+ * IEC60958 PACKETIZED NON LINEAR = 8,
+ * COMPR OVER PCM PACKETIZED = 7,
+ * IEC61937 PACKETIZED = 2,
+ * GENERIC COMPRESSED = 5
+ *
+ * %AR_TKN_U32_MODULE_FMT_SAMPLE_RATE: sample rate
+ *
+ * %AR_TKN_U32_MODULE_FMT_BIT_DEPTH: bit depth
+ *
+ * %AR_TKN_U32_MODULE_SD_LINE_IDX: I2S serial data line idx
+ * I2S_SD0 = 1,
+ * I2S_SD1 = 2,
+ * I2S_SD2 = 3,
+ * I2S_SD3 = 4,
+ * I2S_QUAD01 = 5,
+ * I2S_QUAD23 = 6,
+ * I2S_6CHS = 7,
+ * I2S_8CHS = 8
+ *
+ * %AR_TKN_U32_MODULE_WS_SRC: Word Select Source
+ * AR_I2S_WS_SRC_EXTERNAL = 0,
+ * AR_I2S_WS_SRC_INTERNAL = 1,
+ *
+ * %AR_TKN_U32_MODULE_FRAME_SZ_FACTOR: Frame size factor
+ *
+ * %AR_TKN_U32_MODULE_LOG_CODE: Log Module Code
+ *
+ * %AR_TKN_U32_MODULE_LOG_TAP_POINT_ID: logging tap point of this module
+ *
+ * %AR_TKN_U32_MODULE_LOG_MODE: logging mode
+ * LOG_WAIT = 0,
+ * LOG_IMMEDIATELY = 1
+ *
+ * %AR_TKN_DAI_INDEX: dai index
+ *
+ */
+
+/* DAI Tokens */
+#define AR_TKN_DAI_INDEX 1
+/* SUB GRAPH Tokens */
+#define AR_TKN_U32_SUB_GRAPH_INSTANCE_ID 2
+#define AR_TKN_U32_SUB_GRAPH_PERF_MODE 3
+#define AR_TKN_U32_SUB_GRAPH_DIRECTION 4
+#define AR_TKN_U32_SUB_GRAPH_SCENARIO_ID 5
+
+/* Container Tokens */
+#define AR_TKN_U32_CONTAINER_INSTANCE_ID 100
+#define AR_TKN_U32_CONTAINER_CAPABILITY_ID 101
+#define AR_TKN_U32_CONTAINER_STACK_SIZE 102
+#define AR_TKN_U32_CONTAINER_GRAPH_POS 103
+#define AR_TKN_U32_CONTAINER_PROC_DOMAIN 104
+
+/* Module Tokens */
+#define AR_TKN_U32_MODULE_ID 200
+#define AR_TKN_U32_MODULE_INSTANCE_ID 201
+#define AR_TKN_U32_MODULE_MAX_IP_PORTS 202
+#define AR_TKN_U32_MODULE_MAX_OP_PORTS 203
+#define AR_TKN_U32_MODULE_IN_PORTS 204
+#define AR_TKN_U32_MODULE_OUT_PORTS 205
+#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID 206
+#define AR_TKN_U32_MODULE_DST_IN_PORT_ID 207
+#define AR_TKN_U32_MODULE_SRC_INSTANCE_ID 208
+#define AR_TKN_U32_MODULE_DST_INSTANCE_ID 209
+
+#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID1 210
+#define AR_TKN_U32_MODULE_DST_IN_PORT_ID1 211
+#define AR_TKN_U32_MODULE_DST_INSTANCE_ID1 212
+
+#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID2 213
+#define AR_TKN_U32_MODULE_DST_IN_PORT_ID2 214
+#define AR_TKN_U32_MODULE_DST_INSTANCE_ID2 215
+
+#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID3 216
+#define AR_TKN_U32_MODULE_DST_IN_PORT_ID3 217
+#define AR_TKN_U32_MODULE_DST_INSTANCE_ID3 218
+
+#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID4 219
+#define AR_TKN_U32_MODULE_DST_IN_PORT_ID4 220
+#define AR_TKN_U32_MODULE_DST_INSTANCE_ID4 221
+
+#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID5 222
+#define AR_TKN_U32_MODULE_DST_IN_PORT_ID5 223
+#define AR_TKN_U32_MODULE_DST_INSTANCE_ID5 224
+
+#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID6 225
+#define AR_TKN_U32_MODULE_DST_IN_PORT_ID6 226
+#define AR_TKN_U32_MODULE_DST_INSTANCE_ID6 227
+
+#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID7 228
+#define AR_TKN_U32_MODULE_DST_IN_PORT_ID7 229
+#define AR_TKN_U32_MODULE_DST_INSTANCE_ID7 230
+
+#define AR_TKN_U32_MODULE_HW_IF_IDX 250
+#define AR_TKN_U32_MODULE_HW_IF_TYPE 251
+#define AR_TKN_U32_MODULE_FMT_INTERLEAVE 252
+#define AR_TKN_U32_MODULE_FMT_DATA 253
+#define AR_TKN_U32_MODULE_FMT_SAMPLE_RATE 254
+#define AR_TKN_U32_MODULE_FMT_BIT_DEPTH 255
+#define AR_TKN_U32_MODULE_SD_LINE_IDX 256
+#define AR_TKN_U32_MODULE_WS_SRC 257
+#define AR_TKN_U32_MODULE_FRAME_SZ_FACTOR 258
+#define AR_TKN_U32_MODULE_LOG_CODE 259
+#define AR_TKN_U32_MODULE_LOG_TAP_POINT_ID 260
+#define AR_TKN_U32_MODULE_LOG_MODE 261
+
+#endif /* __SND_AR_TOKENS_H__ */
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 8ba0112e5336..defeb0c6ed20 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -4,16 +4,6 @@
*
* Copyright (C) 2016 Intel Corp
* Author: Shreyas NC <shreyas.nc@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef __SND_SST_TOKENS_H__
#define __SND_SST_TOKENS_H__
@@ -233,6 +223,8 @@
*
* %SKL_TKN_U32_ASTATE_CLK_SRC: Clock source for A-State entry
*
+ * %SKL_TKN_U32_FMT_CFG_IDX: Format config index
+ *
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*
@@ -324,7 +316,9 @@ enum SKL_TKNS {
SKL_TKN_U32_ASTATE_COUNT,
SKL_TKN_U32_ASTATE_KCPS,
SKL_TKN_U32_ASTATE_CLK_SRC,
- SKL_TKN_MAX = SKL_TKN_U32_ASTATE_CLK_SRC,
+
+ SKL_TKN_U32_FMT_CFG_IDX = 96,
+ SKL_TKN_MAX = SKL_TKN_U32_FMT_CFG_IDX,
};
#endif
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index d54be303090f..45c657c3919e 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -24,9 +24,11 @@
#ifndef __INCLUDE_UAPI_SOUND_SOF_ABI_H__
#define __INCLUDE_UAPI_SOUND_SOF_ABI_H__
+#include <linux/types.h>
+
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 16
+#define SOF_ABI_MINOR 23
#define SOF_ABI_PATCH 0
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
@@ -58,5 +60,7 @@
/* SOF ABI magic number "SOF\0". */
#define SOF_ABI_MAGIC 0x00464F53
+/* SOF IPC4 ABI magic number "SOF4". */
+#define SOF_IPC4_ABI_MAGIC 0x34464F53
#endif
diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h
index 5f4518e7a972..cb3c1ace69e3 100644
--- a/include/uapi/sound/sof/header.h
+++ b/include/uapi/sound/sof/header.h
@@ -11,19 +11,60 @@
#include <linux/types.h>
-/*
- * Header for all non IPC ABI data.
+/**
+ * struct sof_abi_hdr - Header for all non IPC ABI data.
+ * @magic: Magic number for validation
+ * for IPC3 data: 0x00464F53 ('S', 'O', 'F', '\0')
+ * for IPC4 data: 0x34464F53 ('S', 'O', 'F', '4')
+ * @type: module specific parameter
+ * for IPC3: Component specific type
+ * for IPC4: parameter ID (param_id) of the data
+ * @size: The size in bytes of the data, excluding this struct
+ * @abi: SOF ABI version. The version is valid in scope of the 'magic', IPC3 and
+ * IPC4 ABI version numbers have no relationship.
+ * @reserved: Reserved for future use
+ * @data: Component data - opaque to core
*
* Identifies data type, size and ABI.
* Used by any bespoke component data structures or binary blobs.
*/
struct sof_abi_hdr {
- __u32 magic; /**< 'S', 'O', 'F', '\0' */
- __u32 type; /**< component specific type */
- __u32 size; /**< size in bytes of data excl. this struct */
- __u32 abi; /**< SOF ABI version */
- __u32 reserved[4]; /**< reserved for future use */
- __u32 data[0]; /**< Component data - opaque to core */
+ __u32 magic;
+ __u32 type;
+ __u32 size;
+ __u32 abi;
+ __u32 reserved[4];
+ __u32 data[];
} __packed;
+#define SOF_MANIFEST_DATA_TYPE_NHLT 1
+
+/**
+ * struct sof_manifest_tlv - SOF manifest TLV data
+ * @type: type of data
+ * @size: data size (not including the size of this struct)
+ * @data: payload data
+ */
+struct sof_manifest_tlv {
+ __le32 type;
+ __le32 size;
+ __u8 data[];
+};
+
+/**
+ * struct sof_manifest - SOF topology manifest
+ * @abi_major: Major ABI version
+ * @abi_minor: Minor ABI version
+ * @abi_patch: ABI patch
+ * @count: count of tlv items
+ * @items: consecutive variable size tlv items
+ */
+struct sof_manifest {
+ __le16 abi_major;
+ __le16 abi_minor;
+ __le16 abi_patch;
+ __le16 count;
+ struct sof_manifest_tlv items[];
+};
+
#endif
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 5941e2eb1588..6bf00c09d30d 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -24,6 +24,9 @@
#define SOF_TPLG_KCTL_ENUM_ID 257
#define SOF_TPLG_KCTL_BYTES_ID 258
#define SOF_TPLG_KCTL_SWITCH_ID 259
+#define SOF_TPLG_KCTL_BYTES_VOLATILE_RO 260
+#define SOF_TPLG_KCTL_BYTES_VOLATILE_RW 261
+#define SOF_TPLG_KCTL_BYTES_WO_ID 262
/*
* Tokens - must match values in topology configurations
@@ -32,6 +35,7 @@
/* buffers */
#define SOF_TKN_BUF_SIZE 100
#define SOF_TKN_BUF_CAPS 101
+#define SOF_TKN_BUF_FLAGS 102
/* DAI */
/* Token retired with ABI 3.2, do not use for new capabilities
@@ -48,11 +52,19 @@
#define SOF_TKN_SCHED_CORE 203
#define SOF_TKN_SCHED_FRAMES 204
#define SOF_TKN_SCHED_TIME_DOMAIN 205
+#define SOF_TKN_SCHED_DYNAMIC_PIPELINE 206
+#define SOF_TKN_SCHED_LP_MODE 207
+#define SOF_TKN_SCHED_MEM_USAGE 208
+#define SOF_TKN_SCHED_USE_CHAIN_DMA 209
/* volume */
#define SOF_TKN_VOLUME_RAMP_STEP_TYPE 250
#define SOF_TKN_VOLUME_RAMP_STEP_MS 251
+#define SOF_TKN_GAIN_RAMP_TYPE 260
+#define SOF_TKN_GAIN_RAMP_DURATION 261
+#define SOF_TKN_GAIN_VAL 262
+
/* SRC */
#define SOF_TKN_SRC_RATE_IN 300
#define SOF_TKN_SRC_RATE_OUT 301
@@ -73,6 +85,26 @@
/* Token retired with ABI 3.2, do not use for new capabilities
* #define SOF_TKN_COMP_PRELOAD_COUNT 403
*/
+#define SOF_TKN_COMP_CORE_ID 404
+#define SOF_TKN_COMP_UUID 405
+#define SOF_TKN_COMP_CPC 406
+#define SOF_TKN_COMP_IS_PAGES 409
+#define SOF_TKN_COMP_NUM_AUDIO_FORMATS 410
+#define SOF_TKN_COMP_NUM_INPUT_PINS 411
+#define SOF_TKN_COMP_NUM_OUTPUT_PINS 412
+/*
+ * The token for input/output pin binding, it specifies the widget
+ * name that the input/output pin is connected from/to.
+ */
+#define SOF_TKN_COMP_INPUT_PIN_BINDING_WNAME 413
+#define SOF_TKN_COMP_OUTPUT_PIN_BINDING_WNAME 414
+#define SOF_TKN_COMP_NUM_INPUT_AUDIO_FORMATS 415
+#define SOF_TKN_COMP_NUM_OUTPUT_AUDIO_FORMATS 416
+/*
+ * The token value is copied to the dapm_widget's
+ * no_wname_in_kcontrol_name.
+ */
+#define SOF_TKN_COMP_NO_WNAME_IN_KCONTROL_NAME 417
/* SSP */
#define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500
@@ -134,4 +166,60 @@
#define SOF_TKN_INTEL_HDA_RATE 1500
#define SOF_TKN_INTEL_HDA_CH 1501
+/* AFE */
+#define SOF_TKN_MEDIATEK_AFE_RATE 1600
+#define SOF_TKN_MEDIATEK_AFE_CH 1601
+#define SOF_TKN_MEDIATEK_AFE_FORMAT 1602
+
+/* MIXER */
+#define SOF_TKN_MIXER_TYPE 1700
+
+/* ACPDMIC */
+#define SOF_TKN_AMD_ACPDMIC_RATE 1800
+#define SOF_TKN_AMD_ACPDMIC_CH 1801
+
+/* CAVS AUDIO FORMAT */
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_RATE 1900
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_BIT_DEPTH 1901
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_VALID_BIT_DEPTH 1902
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CHANNELS 1903
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_MAP 1904
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_CFG 1905
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_INTERLEAVING_STYLE 1906
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG 1907
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_SAMPLE_TYPE 1908
+#define SOF_TKN_CAVS_AUDIO_FORMAT_INPUT_PIN_INDEX 1909
+/* intentional token numbering discontinuity, reserved for future use */
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_RATE 1930
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_BIT_DEPTH 1931
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_VALID_BIT_DEPTH 1932
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CHANNELS 1933
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_MAP 1934
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_CFG 1935
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_INTERLEAVING_STYLE 1936
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG 1937
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_SAMPLE_TYPE 1938
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUTPUT_PIN_INDEX 1939
+/* intentional token numbering discontinuity, reserved for future use */
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IBS 1970
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OBS 1971
+#define SOF_TKN_CAVS_AUDIO_FORMAT_DMA_BUFFER_SIZE 1972
+
+/* COPIER */
+#define SOF_TKN_INTEL_COPIER_NODE_TYPE 1980
+#define SOF_TKN_INTEL_COPIER_DEEP_BUFFER_DMA_MS 1981
+
+/* ACP I2S */
+#define SOF_TKN_AMD_ACPI2S_RATE 1700
+#define SOF_TKN_AMD_ACPI2S_CH 1701
+#define SOF_TKN_AMD_ACPI2S_TDM_MODE 1702
+
+/* MICFIL PDM */
+#define SOF_TKN_IMX_MICFIL_RATE 2000
+#define SOF_TKN_IMX_MICFIL_CH 2001
+
+/* ACP SDW */
+#define SOF_TKN_AMD_ACP_SDW_RATE 2100
+#define SOF_TKN_AMD_ACP_SDW_CH 2101
+
#endif
diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h
index 7d6d65f60a42..b99a2414b53d 100644
--- a/include/uapi/sound/tlv.h
+++ b/include/uapi/sound/tlv.h
@@ -1,15 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
#ifndef __UAPI_SOUND_TLV_H
#define __UAPI_SOUND_TLV_H
diff --git a/include/uapi/sound/usb_stream.h b/include/uapi/sound/usb_stream.h
index 95419d8bbc16..50609016185a 100644
--- a/include/uapi/sound/usb_stream.h
+++ b/include/uapi/sound/usb_stream.h
@@ -1,20 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _UAPI__SOUND_USB_STREAM_H
@@ -61,7 +47,7 @@ struct usb_stream {
unsigned inpacket_split_at;
unsigned next_inpacket_split;
unsigned next_inpacket_split_at;
- struct usb_stream_packet inpacket[0];
+ struct usb_stream_packet inpacket[];
};
enum usb_stream_state {
diff --git a/include/uapi/xen/evtchn.h b/include/uapi/xen/evtchn.h
index 7fbf732f168f..aef2b75f3413 100644
--- a/include/uapi/xen/evtchn.h
+++ b/include/uapi/xen/evtchn.h
@@ -101,4 +101,13 @@ struct ioctl_evtchn_restrict_domid {
domid_t domid;
};
+/*
+ * Bind statically allocated @port.
+ */
+#define IOCTL_EVTCHN_BIND_STATIC \
+ _IOC(_IOC_NONE, 'E', 7, sizeof(struct ioctl_evtchn_bind))
+struct ioctl_evtchn_bind {
+ unsigned int port;
+};
+
#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
diff --git a/include/uapi/xen/gntalloc.h b/include/uapi/xen/gntalloc.h
index 48d2790ef928..3109282672f3 100644
--- a/include/uapi/xen/gntalloc.h
+++ b/include/uapi/xen/gntalloc.h
@@ -31,7 +31,10 @@ struct ioctl_gntalloc_alloc_gref {
__u64 index;
/* The grant references of the newly created grant, one per page */
/* Variable size, depending on count */
- __u32 gref_ids[1];
+ union {
+ __u32 gref_ids[1];
+ __DECLARE_FLEX_ARRAY(__u32, gref_ids_flex);
+ };
};
#define GNTALLOC_FLAG_WRITABLE 1
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 9ac5515b9bc2..7a7145395c09 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -47,7 +47,13 @@ struct ioctl_gntdev_grant_ref {
/*
* Inserts the grant references into the mapping table of an instance
* of gntdev. N.B. This does not perform the mapping, which is deferred
- * until mmap() is called with @index as the offset.
+ * until mmap() is called with @index as the offset. @index should be
+ * considered opaque to userspace, with one exception: if no grant
+ * references have ever been inserted into the mapping table of this
+ * instance, @index will be set to 0. This is necessary to use gntdev
+ * with userspace APIs that expect a file descriptor that can be
+ * mmap()'d at offset 0, such as Wayland. If @count is set to 0, this
+ * ioctl will fail.
*/
#define IOCTL_GNTDEV_MAP_GRANT_REF \
_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index d2029556083e..8b8c5d1420fe 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -98,6 +98,34 @@ struct privcmd_mmap_resource {
__u64 addr;
};
+/* For privcmd_irqfd::flags */
+#define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0)
+
+struct privcmd_irqfd {
+ __u64 dm_op;
+ __u32 size; /* Size of structure pointed by dm_op */
+ __u32 fd;
+ __u32 flags;
+ domid_t dom;
+ __u8 pad[2];
+};
+
+/* For privcmd_ioeventfd::flags */
+#define PRIVCMD_IOEVENTFD_FLAG_DEASSIGN (1 << 0)
+
+struct privcmd_ioeventfd {
+ __u64 ioreq;
+ __u64 ports;
+ __u64 addr;
+ __u32 addr_len;
+ __u32 event_fd;
+ __u32 vcpus;
+ __u32 vq;
+ __u32 flags;
+ domid_t dom;
+ __u8 pad[2];
+};
+
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
@@ -125,5 +153,9 @@ struct privcmd_mmap_resource {
_IOC(_IOC_NONE, 'P', 6, sizeof(domid_t))
#define IOCTL_PRIVCMD_MMAP_RESOURCE \
_IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource))
+#define IOCTL_PRIVCMD_IRQFD \
+ _IOW('P', 8, struct privcmd_irqfd)
+#define IOCTL_PRIVCMD_IOEVENTFD \
+ _IOW('P', 9, struct privcmd_ioeventfd)
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
new file mode 100644
index 000000000000..b6003749bc83
--- /dev/null
+++ b/include/ufs/ufs.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Universal Flash Storage Host controller driver
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ */
+
+#ifndef _UFS_H
+#define _UFS_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <uapi/scsi/scsi_bsg_ufs.h>
+#include <linux/time64.h>
+
+/*
+ * Using static_assert() is not allowed in UAPI header files. Hence the check
+ * in this header file of the size of struct utp_upiu_header.
+ */
+static_assert(sizeof(struct utp_upiu_header) == 12);
+
+#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
+#define QUERY_DESC_MAX_SIZE 255
+#define QUERY_DESC_MIN_SIZE 2
+#define QUERY_DESC_HDR_SIZE 2
+#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
+ (sizeof(struct utp_upiu_header)))
+#define UFS_SENSE_SIZE 18
+
+/*
+ * UFS device may have standard LUs and LUN id could be from 0x00 to
+ * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
+ * UFS device may also have the Well Known LUs (also referred as W-LU)
+ * which again could be from 0x00 to 0x7F. For W-LUs, device only use
+ * the "Extended Addressing Format" which means the W-LUNs would be
+ * from 0xc100 (SCSI_W_LUN_BASE) onwards.
+ * This means max. LUN number reported from UFS device could be 0xC17F.
+ */
+#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
+#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
+#define UFS_UPIU_WLUN_ID (1 << 7)
+
+/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
+#define UFS_UPIU_MAX_WB_LUN_ID 8
+
+/*
+ * WriteBooster buffer lifetime has a limit setted by vendor.
+ * If it is over the limit, WriteBooster feature will be disabled.
+ */
+#define UFS_WB_EXCEED_LIFETIME 0x0B
+
+/*
+ * In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU request/response packet
+ */
+#define EHS_OFFSET_IN_RESPONSE 32
+
+/* Well known logical unit id in LUN field of UPIU */
+enum {
+ UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
+ UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
+ UFS_UPIU_BOOT_WLUN = 0xB0,
+ UFS_UPIU_RPMB_WLUN = 0xC4,
+};
+
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+ UFS_ABORT_TASK = 0x01,
+ UFS_ABORT_TASK_SET = 0x02,
+ UFS_CLEAR_TASK_SET = 0x04,
+ UFS_LOGICAL_RESET = 0x08,
+ UFS_QUERY_TASK = 0x80,
+ UFS_QUERY_TASK_SET = 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum upiu_request_transaction {
+ UPIU_TRANSACTION_NOP_OUT = 0x00,
+ UPIU_TRANSACTION_COMMAND = 0x01,
+ UPIU_TRANSACTION_DATA_OUT = 0x02,
+ UPIU_TRANSACTION_TASK_REQ = 0x04,
+ UPIU_TRANSACTION_QUERY_REQ = 0x16,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum upiu_response_transaction {
+ UPIU_TRANSACTION_NOP_IN = 0x20,
+ UPIU_TRANSACTION_RESPONSE = 0x21,
+ UPIU_TRANSACTION_DATA_IN = 0x22,
+ UPIU_TRANSACTION_TASK_RSP = 0x24,
+ UPIU_TRANSACTION_READY_XFER = 0x31,
+ UPIU_TRANSACTION_QUERY_RSP = 0x36,
+ UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
+};
+
+/* UPIU Read/Write flags. See also table "UPIU Flags" in the UFS standard. */
+enum {
+ UPIU_CMD_FLAGS_NONE = 0x00,
+ UPIU_CMD_FLAGS_CP = 0x04,
+ UPIU_CMD_FLAGS_WRITE = 0x20,
+ UPIU_CMD_FLAGS_READ = 0x40,
+};
+
+/* UPIU response flags */
+enum {
+ UPIU_RSP_FLAG_UNDERFLOW = 0x20,
+ UPIU_RSP_FLAG_OVERFLOW = 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+ UPIU_TASK_ATTR_SIMPLE = 0x00,
+ UPIU_TASK_ATTR_ORDERED = 0x01,
+ UPIU_TASK_ATTR_HEADQ = 0x02,
+ UPIU_TASK_ATTR_ACA = 0x03,
+};
+
+/* UPIU Query request function */
+enum {
+ UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+ UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+ QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
+ QUERY_FLAG_IDN_PERMANENT_WPE = 0x02,
+ QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
+ QUERY_FLAG_IDN_BKOPS_EN = 0x04,
+ QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05,
+ QUERY_FLAG_IDN_PURGE_ENABLE = 0x06,
+ QUERY_FLAG_IDN_RESERVED2 = 0x07,
+ QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08,
+ QUERY_FLAG_IDN_BUSY_RTC = 0x09,
+ QUERY_FLAG_IDN_RESERVED3 = 0x0A,
+ QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+ QUERY_FLAG_IDN_WB_EN = 0x0E,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
+ QUERY_FLAG_IDN_HPB_RESET = 0x11,
+ QUERY_FLAG_IDN_HPB_EN = 0x12,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+ QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
+ QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
+ QUERY_ATTR_IDN_POWER_MODE = 0x02,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
+ QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
+ QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
+ QUERY_ATTR_IDN_PURGE_STATUS = 0x06,
+ QUERY_ATTR_IDN_MAX_DATA_IN = 0x07,
+ QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08,
+ QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09,
+ QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A,
+ QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C,
+ QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
+ QUERY_ATTR_IDN_EE_STATUS = 0x0E,
+ QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F,
+ QUERY_ATTR_IDN_CNTX_CONF = 0x10,
+ QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11,
+ QUERY_ATTR_IDN_RESERVED2 = 0x12,
+ QUERY_ATTR_IDN_RESERVED3 = 0x13,
+ QUERY_ATTR_IDN_FFU_STATUS = 0x14,
+ QUERY_ATTR_IDN_PSA_STATE = 0x15,
+ QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
+ QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18,
+ QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19,
+ QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A,
+ QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
+ QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
+ QUERY_ATTR_IDN_EXT_IID_EN = 0x2A,
+ QUERY_ATTR_IDN_TIMESTAMP = 0x30
+};
+
+/* Descriptor idn for Query requests */
+enum desc_idn {
+ QUERY_DESC_IDN_DEVICE = 0x0,
+ QUERY_DESC_IDN_CONFIGURATION = 0x1,
+ QUERY_DESC_IDN_UNIT = 0x2,
+ QUERY_DESC_IDN_RFU_0 = 0x3,
+ QUERY_DESC_IDN_INTERCONNECT = 0x4,
+ QUERY_DESC_IDN_STRING = 0x5,
+ QUERY_DESC_IDN_RFU_1 = 0x6,
+ QUERY_DESC_IDN_GEOMETRY = 0x7,
+ QUERY_DESC_IDN_POWER = 0x8,
+ QUERY_DESC_IDN_HEALTH = 0x9,
+ QUERY_DESC_IDN_MAX,
+};
+
+enum desc_header_offset {
+ QUERY_DESC_LENGTH_OFFSET = 0x00,
+ QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
+};
+
+/* Unit descriptor parameters offsets in bytes*/
+enum unit_desc_param {
+ UNIT_DESC_PARAM_LEN = 0x0,
+ UNIT_DESC_PARAM_TYPE = 0x1,
+ UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+ UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+ UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+ UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+ UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+ UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
+ UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+ UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9,
+ UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+ UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+ UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13,
+ UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+ UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+ UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
+ UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23,
+ UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25,
+ UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
+};
+
+/* RPMB Unit descriptor parameters offsets in bytes*/
+enum rpmb_unit_desc_param {
+ RPMB_UNIT_DESC_PARAM_LEN = 0x0,
+ RPMB_UNIT_DESC_PARAM_TYPE = 0x1,
+ RPMB_UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+ RPMB_UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+ RPMB_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+ RPMB_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+ RPMB_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+ RPMB_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
+ RPMB_UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+ RPMB_UNIT_DESC_PARAM_REGION_EN = 0x9,
+ RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+ RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+ RPMB_UNIT_DESC_PARAM_REGION0_SIZE = 0x13,
+ RPMB_UNIT_DESC_PARAM_REGION1_SIZE = 0x14,
+ RPMB_UNIT_DESC_PARAM_REGION2_SIZE = 0x15,
+ RPMB_UNIT_DESC_PARAM_REGION3_SIZE = 0x16,
+ RPMB_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+ RPMB_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+};
+
+/* Device descriptor parameters offsets in bytes*/
+enum device_desc_param {
+ DEVICE_DESC_PARAM_LEN = 0x0,
+ DEVICE_DESC_PARAM_TYPE = 0x1,
+ DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
+ DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
+ DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
+ DEVICE_DESC_PARAM_PRTCL = 0x5,
+ DEVICE_DESC_PARAM_NUM_LU = 0x6,
+ DEVICE_DESC_PARAM_NUM_WLU = 0x7,
+ DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
+ DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
+ DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
+ DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
+ DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
+ DEVICE_DESC_PARAM_SEC_LU = 0xD,
+ DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
+ DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
+ DEVICE_DESC_PARAM_SPEC_VER = 0x10,
+ DEVICE_DESC_PARAM_MANF_DATE = 0x12,
+ DEVICE_DESC_PARAM_MANF_NAME = 0x14,
+ DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
+ DEVICE_DESC_PARAM_SN = 0x16,
+ DEVICE_DESC_PARAM_OEM_ID = 0x17,
+ DEVICE_DESC_PARAM_MANF_ID = 0x18,
+ DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
+ DEVICE_DESC_PARAM_UD_LEN = 0x1B,
+ DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
+ DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
+ DEVICE_DESC_PARAM_UFS_FEAT = 0x1F,
+ DEVICE_DESC_PARAM_FFU_TMT = 0x20,
+ DEVICE_DESC_PARAM_Q_DPTH = 0x21,
+ DEVICE_DESC_PARAM_DEV_VER = 0x22,
+ DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24,
+ DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
+ DEVICE_DESC_PARAM_PSA_TMT = 0x29,
+ DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ DEVICE_DESC_PARAM_HPB_VER = 0x40,
+ DEVICE_DESC_PARAM_HPB_CONTROL = 0x42,
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
+ DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
+ DEVICE_DESC_PARAM_WB_TYPE = 0x54,
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
+};
+
+/* Interconnect descriptor parameters offsets in bytes*/
+enum interconnect_desc_param {
+ INTERCONNECT_DESC_PARAM_LEN = 0x0,
+ INTERCONNECT_DESC_PARAM_TYPE = 0x1,
+ INTERCONNECT_DESC_PARAM_UNIPRO_VER = 0x2,
+ INTERCONNECT_DESC_PARAM_MPHY_VER = 0x4,
+};
+
+/* Geometry descriptor parameters offsets in bytes*/
+enum geometry_desc_param {
+ GEOMETRY_DESC_PARAM_LEN = 0x0,
+ GEOMETRY_DESC_PARAM_TYPE = 0x1,
+ GEOMETRY_DESC_PARAM_DEV_CAP = 0x4,
+ GEOMETRY_DESC_PARAM_MAX_NUM_LUN = 0xC,
+ GEOMETRY_DESC_PARAM_SEG_SIZE = 0xD,
+ GEOMETRY_DESC_PARAM_ALLOC_UNIT_SIZE = 0x11,
+ GEOMETRY_DESC_PARAM_MIN_BLK_SIZE = 0x12,
+ GEOMETRY_DESC_PARAM_OPT_RD_BLK_SIZE = 0x13,
+ GEOMETRY_DESC_PARAM_OPT_WR_BLK_SIZE = 0x14,
+ GEOMETRY_DESC_PARAM_MAX_IN_BUF_SIZE = 0x15,
+ GEOMETRY_DESC_PARAM_MAX_OUT_BUF_SIZE = 0x16,
+ GEOMETRY_DESC_PARAM_RPMB_RW_SIZE = 0x17,
+ GEOMETRY_DESC_PARAM_DYN_CAP_RSRC_PLC = 0x18,
+ GEOMETRY_DESC_PARAM_DATA_ORDER = 0x19,
+ GEOMETRY_DESC_PARAM_MAX_NUM_CTX = 0x1A,
+ GEOMETRY_DESC_PARAM_TAG_UNIT_SIZE = 0x1B,
+ GEOMETRY_DESC_PARAM_TAG_RSRC_SIZE = 0x1C,
+ GEOMETRY_DESC_PARAM_SEC_RM_TYPES = 0x1D,
+ GEOMETRY_DESC_PARAM_MEM_TYPES = 0x1E,
+ GEOMETRY_DESC_PARAM_SCM_MAX_NUM_UNITS = 0x20,
+ GEOMETRY_DESC_PARAM_SCM_CAP_ADJ_FCTR = 0x24,
+ GEOMETRY_DESC_PARAM_NPM_MAX_NUM_UNITS = 0x26,
+ GEOMETRY_DESC_PARAM_NPM_CAP_ADJ_FCTR = 0x2A,
+ GEOMETRY_DESC_PARAM_ENM1_MAX_NUM_UNITS = 0x2C,
+ GEOMETRY_DESC_PARAM_ENM1_CAP_ADJ_FCTR = 0x30,
+ GEOMETRY_DESC_PARAM_ENM2_MAX_NUM_UNITS = 0x32,
+ GEOMETRY_DESC_PARAM_ENM2_CAP_ADJ_FCTR = 0x36,
+ GEOMETRY_DESC_PARAM_ENM3_MAX_NUM_UNITS = 0x38,
+ GEOMETRY_DESC_PARAM_ENM3_CAP_ADJ_FCTR = 0x3C,
+ GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
+ GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
+ GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48,
+ GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49,
+ GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A,
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
+ GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
+ GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
+ GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
+ GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
+ GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
+};
+
+/* Health descriptor parameters offsets in bytes*/
+enum health_desc_param {
+ HEALTH_DESC_PARAM_LEN = 0x0,
+ HEALTH_DESC_PARAM_TYPE = 0x1,
+ HEALTH_DESC_PARAM_EOL_INFO = 0x2,
+ HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 0x3,
+ HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
+};
+
+/* WriteBooster buffer mode */
+enum {
+ WB_BUF_MODE_LU_DEDICATED = 0x0,
+ WB_BUF_MODE_SHARED = 0x1,
+};
+
+/*
+ * Logical Unit Write Protect
+ * 00h: LU not write protected
+ * 01h: LU write protected when fPowerOnWPEn =1
+ * 02h: LU permanently write protected when fPermanentWPEn =1
+ */
+enum ufs_lu_wp_type {
+ UFS_LU_NO_WP = 0x00,
+ UFS_LU_POWER_ON_WP = 0x01,
+ UFS_LU_PERM_WP = 0x02,
+};
+
+/* bActiveICCLevel parameter current units */
+enum {
+ UFSHCD_NANO_AMP = 0,
+ UFSHCD_MICRO_AMP = 1,
+ UFSHCD_MILI_AMP = 2,
+ UFSHCD_AMP = 3,
+};
+
+/* Possible values for dExtendedUFSFeaturesSupport */
+enum {
+ UFS_DEV_LOW_TEMP_NOTIF = BIT(4),
+ UFS_DEV_HIGH_TEMP_NOTIF = BIT(5),
+ UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
+ UFS_DEV_HPB_SUPPORT = BIT(7),
+ UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
+ UFS_DEV_EXT_IID_SUP = BIT(16),
+};
+#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
+
+#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
+
+/* Attribute bActiveICCLevel parameter bit masks definitions */
+#define ATTR_ICC_LVL_UNIT_OFFSET 14
+#define ATTR_ICC_LVL_UNIT_MASK (0x3 << ATTR_ICC_LVL_UNIT_OFFSET)
+#define ATTR_ICC_LVL_VALUE_MASK 0x3FF
+
+/* Power descriptor parameters offsets in bytes */
+enum power_desc_param_offset {
+ PWR_DESC_LEN = 0x0,
+ PWR_DESC_TYPE = 0x1,
+ PWR_DESC_ACTIVE_LVLS_VCC_0 = 0x2,
+ PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 0x22,
+ PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 0x42,
+};
+
+/* Exception event mask values */
+enum {
+ MASK_EE_STATUS = 0xFFFF,
+ MASK_EE_DYNCAP_EVENT = BIT(0),
+ MASK_EE_SYSPOOL_EVENT = BIT(1),
+ MASK_EE_URGENT_BKOPS = BIT(2),
+ MASK_EE_TOO_HIGH_TEMP = BIT(3),
+ MASK_EE_TOO_LOW_TEMP = BIT(4),
+ MASK_EE_WRITEBOOSTER_EVENT = BIT(5),
+ MASK_EE_PERFORMANCE_THROTTLING = BIT(6),
+};
+#define MASK_EE_URGENT_TEMP (MASK_EE_TOO_HIGH_TEMP | MASK_EE_TOO_LOW_TEMP)
+
+/* Background operation status */
+enum bkops_status {
+ BKOPS_STATUS_NO_OP = 0x0,
+ BKOPS_STATUS_NON_CRITICAL = 0x1,
+ BKOPS_STATUS_PERF_IMPACT = 0x2,
+ BKOPS_STATUS_CRITICAL = 0x3,
+ BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
+ UPIU_QUERY_OPCODE_NOP = 0x0,
+ UPIU_QUERY_OPCODE_READ_DESC = 0x1,
+ UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
+ UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
+ UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
+ UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
+ UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
+ UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
+ UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
+};
+
+/* bRefClkFreq attribute values */
+enum ufs_ref_clk_freq {
+ REF_CLK_FREQ_19_2_MHZ = 0,
+ REF_CLK_FREQ_26_MHZ = 1,
+ REF_CLK_FREQ_38_4_MHZ = 2,
+ REF_CLK_FREQ_52_MHZ = 3,
+ REF_CLK_FREQ_INVAL = -1,
+};
+
+/* Query response result code */
+enum {
+ QUERY_RESULT_SUCCESS = 0x00,
+ QUERY_RESULT_NOT_READABLE = 0xF6,
+ QUERY_RESULT_NOT_WRITEABLE = 0xF7,
+ QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
+ QUERY_RESULT_INVALID_LENGTH = 0xF9,
+ QUERY_RESULT_INVALID_VALUE = 0xFA,
+ QUERY_RESULT_INVALID_SELECTOR = 0xFB,
+ QUERY_RESULT_INVALID_INDEX = 0xFC,
+ QUERY_RESULT_INVALID_IDN = 0xFD,
+ QUERY_RESULT_INVALID_OPCODE = 0xFE,
+ QUERY_RESULT_GENERAL_FAILURE = 0xFF,
+};
+
+/* UTP Transfer Request Command Type (CT) */
+enum {
+ UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
+ UPIU_COMMAND_SET_TYPE_UFS = 0x1,
+ UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
+};
+
+/* Offset of the response code in the UPIU header */
+#define UPIU_RSP_CODE_OFFSET 8
+
+enum {
+ MASK_TM_SERVICE_RESP = 0xFF,
+};
+
+/* Task management service response */
+enum {
+ UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
+ UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
+ UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
+ UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
+ UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
+};
+
+/* UFS device power modes */
+enum ufs_dev_pwr_mode {
+ UFS_ACTIVE_PWR_MODE = 1,
+ UFS_SLEEP_PWR_MODE = 2,
+ UFS_POWERDOWN_PWR_MODE = 3,
+ UFS_DEEPSLEEP_PWR_MODE = 4,
+};
+
+#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
+
+/**
+ * struct utp_cmd_rsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+struct utp_cmd_rsp {
+ __be32 residual_transfer_count;
+ __be32 reserved[4];
+ __be16 sense_data_len;
+ u8 sense_data[UFS_SENSE_SIZE];
+};
+
+/**
+ * struct utp_upiu_rsp - general upiu response structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @sr: fields structure for scsi command DW-3 to DW-12
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_rsp {
+ struct utp_upiu_header header;
+ union {
+ struct utp_cmd_rsp sr;
+ struct utp_upiu_query qr;
+ };
+};
+
+/*
+ * VCCQ & VCCQ2 current requirement when UFS device is in sleep state
+ * and link is in Hibern8 state.
+ */
+#define UFS_VREG_LPM_LOAD_UA 1000 /* uA */
+
+struct ufs_vreg {
+ struct regulator *reg;
+ const char *name;
+ bool always_on;
+ bool enabled;
+ int max_uA;
+};
+
+struct ufs_vreg_info {
+ struct ufs_vreg *vcc;
+ struct ufs_vreg *vccq;
+ struct ufs_vreg *vccq2;
+ struct ufs_vreg *vdd_hba;
+};
+
+/* UFS device descriptor wPeriodicRTCUpdate bit9 defines RTC time baseline */
+#define UFS_RTC_TIME_BASELINE BIT(9)
+
+enum ufs_rtc_time {
+ UFS_RTC_RELATIVE,
+ UFS_RTC_ABSOLUTE
+};
+
+struct ufs_dev_info {
+ bool f_power_on_wp_en;
+ /* Keeps information if any of the LU is power on write protected */
+ bool is_lu_power_on_wp;
+ /* Maximum number of general LU supported by the UFS device */
+ u8 max_lu_supported;
+ u16 wmanufacturerid;
+ /*UFS device Product Name */
+ u8 *model;
+ u16 wspecversion;
+ u32 clk_gating_wait_us;
+ /* Stores the depth of queue in UFS device */
+ u8 bqueuedepth;
+
+ /* UFS WB related flags */
+ bool wb_enabled;
+ bool wb_buf_flush_enabled;
+ u8 wb_dedicated_lu;
+ u8 wb_buffer_type;
+
+ bool b_rpm_dev_flush_capable;
+ u8 b_presrv_uspc_en;
+
+ bool b_advanced_rpmb_en;
+
+ /* UFS EXT_IID Enable */
+ bool b_ext_iid_en;
+
+ /* UFS RTC */
+ enum ufs_rtc_time rtc_type;
+ time64_t rtc_time_baseline;
+ u32 rtc_update_period;
+};
+
+/*
+ * This enum is used in string mapping in include/trace/events/ufs.h.
+ */
+enum ufs_trace_str_t {
+ UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
+ UFS_QUERY_SEND, UFS_QUERY_COMP, UFS_QUERY_ERR,
+ UFS_TM_SEND, UFS_TM_COMP, UFS_TM_ERR
+};
+
+/*
+ * Transaction Specific Fields (TSF) type in the UPIU package, this enum is
+ * used in include/trace/events/ufs.h for UFS command trace.
+ */
+enum ufs_trace_tsf_t {
+ UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
+};
+
+#endif /* End of Header */
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
new file mode 100644
index 000000000000..41ff44dfa1db
--- /dev/null
+++ b/include/ufs/ufs_quirks.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UFS_QUIRKS_H_
+#define _UFS_QUIRKS_H_
+
+/* return true if s1 is a prefix of s2 */
+#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
+
+#define UFS_ANY_VENDOR 0xFFFF
+#define UFS_ANY_MODEL "ANY_MODEL"
+
+#define UFS_VENDOR_MICRON 0x12C
+#define UFS_VENDOR_SAMSUNG 0x1CE
+#define UFS_VENDOR_SKHYNIX 0x1AD
+#define UFS_VENDOR_TOSHIBA 0x198
+#define UFS_VENDOR_WDC 0x145
+
+/**
+ * ufs_dev_quirk - ufs device quirk info
+ * @card: ufs card details
+ * @quirk: device quirk
+ */
+struct ufs_dev_quirk {
+ u16 wmanufacturerid;
+ const u8 *model;
+ unsigned int quirk;
+};
+
+/*
+ * Some vendor's UFS device sends back to back NACs for the DL data frames
+ * causing the host controller to raise the DFES error status. Sometimes
+ * such UFS devices send back to back NAC without waiting for new
+ * retransmitted DL frame from the host and in such cases it might be possible
+ * the Host UniPro goes into bad state without raising the DFES error
+ * interrupt. If this happens then all the pending commands would timeout
+ * only after respective SW command (which is generally too large).
+ *
+ * We can workaround such device behaviour like this:
+ * - As soon as SW sees the DL NAC error, it should schedule the error handler
+ * - Error handler would sleep for 50ms to see if there are any fatal errors
+ * raised by UFS controller.
+ * - If there are fatal errors then SW does normal error recovery.
+ * - If there are no fatal errors then SW sends the NOP command to device
+ * to check if link is alive.
+ * - If NOP command times out, SW does normal error recovery
+ * - If NOP command succeed, skip the error handling.
+ *
+ * If DL NAC error is seen multiple times with some vendor's UFS devices then
+ * enable this quirk to initiate quick error recovery and also silence related
+ * error logs to reduce spamming of kernel logs.
+ */
+#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
+
+/*
+ * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
+ * 600us which may not be enough for reliable hibern8 exit hardware sequence
+ * from UFS device.
+ * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
+ * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
+ */
+#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
+
+/*
+ * It seems some UFS devices may keep drawing more than sleep current
+ * (atleast for 500us) from UFS rails (especially from VCCQ rail).
+ * To avoid this situation, add 2ms delay before putting these UFS
+ * rails in LPM mode.
+ */
+#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
+
+/*
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
+
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
+
+/*
+ * Some UFS devices require VS_DebugSaveConfigTime is 0x10,
+ * enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
+
+/*
+ * Some pre-3.1 UFS devices can support extended features by upgrading
+ * the firmware. Enable this quirk to make UFS core driver probe and enable
+ * supported features on such devices.
+ */
+#define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10)
+
+/*
+ * Some UFS devices require delay after VCC power rail is turned-off.
+ * Enable this quirk to introduce 5ms delays after VCC power-off during
+ * suspend flow.
+ */
+#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
+
+#endif /* UFS_QUIRKS_H_ */
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
new file mode 100644
index 000000000000..cb2afcebbdf5
--- /dev/null
+++ b/include/ufs/ufshcd.h
@@ -0,0 +1,1438 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Universal Flash Storage Host controller driver
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ */
+
+#ifndef _UFSHCD_H
+#define _UFSHCD_H
+
+#include <linux/bitfield.h>
+#include <linux/blk-crypto-profile.h>
+#include <linux/blk-mq.h>
+#include <linux/devfreq.h>
+#include <linux/fault-inject.h>
+#include <linux/msi.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-direction.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <ufs/unipro.h>
+#include <ufs/ufs.h>
+#include <ufs/ufs_quirks.h>
+#include <ufs/ufshci.h>
+
+#define UFSHCD "ufshcd"
+
+struct scsi_device;
+struct ufs_hba;
+
+enum dev_cmd_type {
+ DEV_CMD_TYPE_NOP = 0x0,
+ DEV_CMD_TYPE_QUERY = 0x1,
+ DEV_CMD_TYPE_RPMB = 0x2,
+};
+
+enum ufs_event_type {
+ /* uic specific errors */
+ UFS_EVT_PA_ERR = 0,
+ UFS_EVT_DL_ERR,
+ UFS_EVT_NL_ERR,
+ UFS_EVT_TL_ERR,
+ UFS_EVT_DME_ERR,
+
+ /* fatal errors */
+ UFS_EVT_AUTO_HIBERN8_ERR,
+ UFS_EVT_FATAL_ERR,
+ UFS_EVT_LINK_STARTUP_FAIL,
+ UFS_EVT_RESUME_ERR,
+ UFS_EVT_SUSPEND_ERR,
+ UFS_EVT_WL_SUSP_ERR,
+ UFS_EVT_WL_RES_ERR,
+
+ /* abnormal events */
+ UFS_EVT_DEV_RESET,
+ UFS_EVT_HOST_RESET,
+ UFS_EVT_ABORT,
+
+ UFS_EVT_CNT,
+};
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @done: UIC command completion
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ struct completion done;
+};
+
+/* Used to differentiate the power management options */
+enum ufs_pm_op {
+ UFS_RUNTIME_PM,
+ UFS_SYSTEM_PM,
+ UFS_SHUTDOWN_PM,
+};
+
+/* Host <-> Device UniPro Link state */
+enum uic_link_state {
+ UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
+ UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
+ UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
+ UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
+};
+
+#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
+#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
+ UIC_LINK_ACTIVE_STATE)
+#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
+ UIC_LINK_HIBERN8_STATE)
+#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
+ UIC_LINK_BROKEN_STATE)
+#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
+#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
+ UIC_LINK_ACTIVE_STATE)
+#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
+ UIC_LINK_HIBERN8_STATE)
+#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
+ UIC_LINK_BROKEN_STATE)
+
+#define ufshcd_set_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_set_ufs_dev_deepsleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_deepsleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
+
+/*
+ * UFS Power management levels.
+ * Each level is in increasing order of power savings, except DeepSleep
+ * which is lower than PowerDown with power on but not PowerDown with
+ * power off.
+ */
+enum ufs_pm_level {
+ UFS_PM_LVL_0,
+ UFS_PM_LVL_1,
+ UFS_PM_LVL_2,
+ UFS_PM_LVL_3,
+ UFS_PM_LVL_4,
+ UFS_PM_LVL_5,
+ UFS_PM_LVL_6,
+ UFS_PM_LVL_MAX
+};
+
+struct ufs_pm_lvl_states {
+ enum ufs_dev_pwr_mode dev_state;
+ enum uic_link_state link_state;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_req_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @utrd_dma_addr: UTRD dma address for debug
+ * @ucd_prdt_dma_addr: PRDT dma address for debug
+ * @ucd_rsp_dma_addr: UPIU response dma address for debug
+ * @ucd_req_dma_addr: UPIU request dma address for debug
+ * @cmd: pointer to SCSI command
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
+ * @issue_time_stamp: time stamp for debug purposes (CLOCK_MONOTONIC)
+ * @issue_time_stamp_local_clock: time stamp for debug purposes (local_clock)
+ * @compl_time_stamp: time stamp for statistics (CLOCK_MONOTONIC)
+ * @compl_time_stamp_local_clock: time stamp for debug purposes (local_clock)
+ * @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
+ * @data_unit_num: the data unit number for the first block for inline crypto
+ * @req_abort_skip: skip request abort task flag
+ */
+struct ufshcd_lrb {
+ struct utp_transfer_req_desc *utr_descriptor_ptr;
+ struct utp_upiu_req *ucd_req_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ dma_addr_t utrd_dma_addr;
+ dma_addr_t ucd_req_dma_addr;
+ dma_addr_t ucd_rsp_dma_addr;
+ dma_addr_t ucd_prdt_dma_addr;
+
+ struct scsi_cmnd *cmd;
+ int scsi_status;
+
+ int command_type;
+ int task_tag;
+ u8 lun; /* UPIU LUN id field is only 8-bit wide */
+ bool intr_cmd;
+ ktime_t issue_time_stamp;
+ u64 issue_time_stamp_local_clock;
+ ktime_t compl_time_stamp;
+ u64 compl_time_stamp_local_clock;
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+ int crypto_key_slot;
+ u64 data_unit_num;
+#endif
+
+ bool req_abort_skip;
+};
+
+/**
+ * struct ufs_query_req - parameters for building a query request
+ * @query_func: UPIU header query function
+ * @upiu_req: the query request data
+ */
+struct ufs_query_req {
+ u8 query_func;
+ struct utp_upiu_query upiu_req;
+};
+
+/**
+ * struct ufs_query_resp - UPIU QUERY
+ * @response: device response code
+ * @upiu_res: query response data
+ */
+struct ufs_query_res {
+ struct utp_upiu_query upiu_res;
+};
+
+/**
+ * struct ufs_query - holds relevant data structures for query request
+ * @request: request upiu and function
+ * @descriptor: buffer for sending/receiving descriptor
+ * @response: response upiu and response
+ */
+struct ufs_query {
+ struct ufs_query_req request;
+ u8 *descriptor;
+ struct ufs_query_res response;
+};
+
+/**
+ * struct ufs_dev_cmd - all assosiated fields with device management commands
+ * @type: device management command type - Query, NOP OUT
+ * @lock: lock to allow one command at a time
+ * @complete: internal commands completion
+ * @query: Device management query information
+ */
+struct ufs_dev_cmd {
+ enum dev_cmd_type type;
+ struct mutex lock;
+ struct completion *complete;
+ struct ufs_query query;
+};
+
+/**
+ * struct ufs_clk_info - UFS clock related info
+ * @list: list headed by hba->clk_list_head
+ * @clk: clock node
+ * @name: clock name
+ * @max_freq: maximum frequency supported by the clock
+ * @min_freq: min frequency that can be used for clock scaling
+ * @curr_freq: indicates the current frequency that it is set to
+ * @keep_link_active: indicates that the clk should not be disabled if
+ * link is active
+ * @enabled: variable to check against multiple enable/disable
+ */
+struct ufs_clk_info {
+ struct list_head list;
+ struct clk *clk;
+ const char *name;
+ u32 max_freq;
+ u32 min_freq;
+ u32 curr_freq;
+ bool keep_link_active;
+ bool enabled;
+};
+
+enum ufs_notify_change_status {
+ PRE_CHANGE,
+ POST_CHANGE,
+};
+
+struct ufs_pa_layer_attr {
+ u32 gear_rx;
+ u32 gear_tx;
+ u32 lane_rx;
+ u32 lane_tx;
+ u32 pwr_rx;
+ u32 pwr_tx;
+ u32 hs_rate;
+};
+
+struct ufs_pwr_mode_info {
+ bool is_valid;
+ struct ufs_pa_layer_attr info;
+};
+
+/**
+ * struct ufs_hba_variant_ops - variant specific callbacks
+ * @name: variant name
+ * @init: called when the driver is initialized
+ * @exit: called to cleanup everything done in init
+ * @get_ufs_hci_version: called to get UFS HCI version
+ * @clk_scale_notify: notifies that clks are scaled up/down
+ * @setup_clocks: called before touching any of the controller registers
+ * @hce_enable_notify: called before and after HCE enable bit is set to allow
+ * variant specific Uni-Pro initialization.
+ * @link_startup_notify: called before and after Link startup is carried out
+ * to allow variant specific Uni-Pro initialization.
+ * @pwr_change_notify: called before and after a power mode change
+ * is carried out to allow vendor spesific capabilities
+ * to be set.
+ * @setup_xfer_req: called before any transfer request is issued
+ * to set some things
+ * @setup_task_mgmt: called before any task management request is issued
+ * to set some things
+ * @hibern8_notify: called around hibern8 enter/exit
+ * @apply_dev_quirks: called to apply device specific quirks
+ * @fixup_dev_quirks: called to modify device specific quirks
+ * @suspend: called during host controller PM callback
+ * @resume: called during host controller PM callback
+ * @dbg_register_dump: used to dump controller debug information
+ * @phy_initialization: used to initialize phys
+ * @device_reset: called to issue a reset pulse on the UFS device
+ * @config_scaling_param: called to configure clock scaling parameters
+ * @program_key: program or evict an inline encryption key
+ * @event_notify: called to notify important events
+ * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
+ * @mcq_config_resource: called to configure MCQ platform resources
+ * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
+ * @op_runtime_config: called to config Operation and runtime regs Pointers
+ * @get_outstanding_cqs: called to get outstanding completion queues
+ * @config_esi: called to config Event Specific Interrupt
+ */
+struct ufs_hba_variant_ops {
+ const char *name;
+ int (*init)(struct ufs_hba *);
+ void (*exit)(struct ufs_hba *);
+ u32 (*get_ufs_hci_version)(struct ufs_hba *);
+ int (*clk_scale_notify)(struct ufs_hba *, bool,
+ enum ufs_notify_change_status);
+ int (*setup_clocks)(struct ufs_hba *, bool,
+ enum ufs_notify_change_status);
+ int (*hce_enable_notify)(struct ufs_hba *,
+ enum ufs_notify_change_status);
+ int (*link_startup_notify)(struct ufs_hba *,
+ enum ufs_notify_change_status);
+ int (*pwr_change_notify)(struct ufs_hba *,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *,
+ struct ufs_pa_layer_attr *);
+ void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
+ bool is_scsi_cmd);
+ void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
+ void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
+ enum ufs_notify_change_status);
+ int (*apply_dev_quirks)(struct ufs_hba *hba);
+ void (*fixup_dev_quirks)(struct ufs_hba *hba);
+ int (*suspend)(struct ufs_hba *, enum ufs_pm_op,
+ enum ufs_notify_change_status);
+ int (*resume)(struct ufs_hba *, enum ufs_pm_op);
+ void (*dbg_register_dump)(struct ufs_hba *hba);
+ int (*phy_initialization)(struct ufs_hba *);
+ int (*device_reset)(struct ufs_hba *hba);
+ void (*config_scaling_param)(struct ufs_hba *hba,
+ struct devfreq_dev_profile *profile,
+ struct devfreq_simple_ondemand_data *data);
+ int (*program_key)(struct ufs_hba *hba,
+ const union ufs_crypto_cfg_entry *cfg, int slot);
+ void (*event_notify)(struct ufs_hba *hba,
+ enum ufs_event_type evt, void *data);
+ void (*reinit_notify)(struct ufs_hba *);
+ int (*mcq_config_resource)(struct ufs_hba *hba);
+ int (*get_hba_mac)(struct ufs_hba *hba);
+ int (*op_runtime_config)(struct ufs_hba *hba);
+ int (*get_outstanding_cqs)(struct ufs_hba *hba,
+ unsigned long *ocqs);
+ int (*config_esi)(struct ufs_hba *hba);
+ void (*config_scsi_dev)(struct scsi_device *sdev);
+};
+
+/* clock gating state */
+enum clk_gating_state {
+ CLKS_OFF,
+ CLKS_ON,
+ REQ_CLKS_OFF,
+ REQ_CLKS_ON,
+};
+
+/**
+ * struct ufs_clk_gating - UFS clock gating related info
+ * @gate_work: worker to turn off clocks after some delay as specified in
+ * delay_ms
+ * @ungate_work: worker to turn on clocks that will be used in case of
+ * interrupt context
+ * @state: the current clocks state
+ * @delay_ms: gating delay in ms
+ * @is_suspended: clk gating is suspended when set to 1 which can be used
+ * during suspend/resume
+ * @delay_attr: sysfs attribute to control delay_attr
+ * @enable_attr: sysfs attribute to enable/disable clock gating
+ * @is_enabled: Indicates the current status of clock gating
+ * @is_initialized: Indicates whether clock gating is initialized or not
+ * @active_reqs: number of requests that are pending and should be waited for
+ * completion before gating clocks.
+ * @clk_gating_workq: workqueue for clock gating work.
+ */
+struct ufs_clk_gating {
+ struct delayed_work gate_work;
+ struct work_struct ungate_work;
+ enum clk_gating_state state;
+ unsigned long delay_ms;
+ bool is_suspended;
+ struct device_attribute delay_attr;
+ struct device_attribute enable_attr;
+ bool is_enabled;
+ bool is_initialized;
+ int active_reqs;
+ struct workqueue_struct *clk_gating_workq;
+};
+
+/**
+ * struct ufs_clk_scaling - UFS clock scaling related data
+ * @active_reqs: number of requests that are pending. If this is zero when
+ * devfreq ->target() function is called then schedule "suspend_work" to
+ * suspend devfreq.
+ * @tot_busy_t: Total busy time in current polling window
+ * @window_start_t: Start time (in jiffies) of the current polling window
+ * @busy_start_t: Start time of current busy period
+ * @enable_attr: sysfs attribute to enable/disable clock scaling
+ * @saved_pwr_info: UFS power mode may also be changed during scaling and this
+ * one keeps track of previous power mode.
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @target_freq: frequency requested by devfreq framework
+ * @min_gear: lowest HS gear to scale down to
+ * @is_enabled: tracks if scaling is currently enabled or not, controlled by
+ * clkscale_enable sysfs node
+ * @is_allowed: tracks if scaling is currently allowed or not, used to block
+ * clock scaling which is not invoked from devfreq governor
+ * @is_initialized: Indicates whether clock scaling is initialized or not
+ * @is_busy_started: tracks if busy period has started or not
+ * @is_suspended: tracks if devfreq is suspended or not
+ */
+struct ufs_clk_scaling {
+ int active_reqs;
+ unsigned long tot_busy_t;
+ ktime_t window_start_t;
+ ktime_t busy_start_t;
+ struct device_attribute enable_attr;
+ struct ufs_pa_layer_attr saved_pwr_info;
+ struct workqueue_struct *workq;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+ unsigned long target_freq;
+ u32 min_gear;
+ bool is_enabled;
+ bool is_allowed;
+ bool is_initialized;
+ bool is_busy_started;
+ bool is_suspended;
+};
+
+#define UFS_EVENT_HIST_LENGTH 8
+/**
+ * struct ufs_event_hist - keeps history of errors
+ * @pos: index to indicate cyclic buffer position
+ * @val: cyclic buffer for registers value
+ * @tstamp: cyclic buffer for time stamp
+ * @cnt: error counter
+ */
+struct ufs_event_hist {
+ int pos;
+ u32 val[UFS_EVENT_HIST_LENGTH];
+ u64 tstamp[UFS_EVENT_HIST_LENGTH];
+ unsigned long long cnt;
+};
+
+/**
+ * struct ufs_stats - keeps usage/err statistics
+ * @last_intr_status: record the last interrupt status.
+ * @last_intr_ts: record the last interrupt timestamp.
+ * @hibern8_exit_cnt: Counter to keep track of number of exits,
+ * reset this after link-startup.
+ * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
+ * Clear after the first successful command completion.
+ * @event: array with event history.
+ */
+struct ufs_stats {
+ u32 last_intr_status;
+ u64 last_intr_ts;
+
+ u32 hibern8_exit_cnt;
+ u64 last_hibern8_exit_tstamp;
+ struct ufs_event_hist event[UFS_EVT_CNT];
+};
+
+/**
+ * enum ufshcd_state - UFS host controller state
+ * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command
+ * processing.
+ * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process
+ * SCSI commands.
+ * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled.
+ * SCSI commands may be submitted to the controller.
+ * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail
+ * newly submitted SCSI commands with error code DID_BAD_TARGET.
+ * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery
+ * failed. Fail all SCSI commands with error code DID_ERROR.
+ */
+enum ufshcd_state {
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
+ UFSHCD_STATE_EH_SCHEDULED_FATAL,
+ UFSHCD_STATE_ERROR,
+};
+
+enum ufshcd_quirks {
+ /* Interrupt aggregation support is broken */
+ UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
+
+ /*
+ * delay before each dme command is required as the unipro
+ * layer has shown instabilities
+ */
+ UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
+
+ /*
+ * If UFS host controller is having issue in processing LCC (Line
+ * Control Command) coming from device then enable this quirk.
+ * When this quirk is enabled, host controller driver should disable
+ * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
+ * attribute of device to 0).
+ */
+ UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
+
+ /*
+ * The attribute PA_RXHSUNTERMCAP specifies whether or not the
+ * inbound Link supports unterminated line in HS mode. Setting this
+ * attribute to 1 fixes moving to HS gear.
+ */
+ UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
+
+ /*
+ * This quirk needs to be enabled if the host controller only allows
+ * accessing the peer dme attributes in AUTO mode (FAST AUTO or
+ * SLOW AUTO).
+ */
+ UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
+
+ /*
+ * This quirk needs to be enabled if the host controller doesn't
+ * advertise the correct version in UFS_VER register. If this quirk
+ * is enabled, standard UFS host driver will call the vendor specific
+ * ops (get_ufs_hci_version) to get the correct version.
+ */
+ UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
+
+ /*
+ * Clear handling for transfer/task request list is just opposite.
+ */
+ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
+
+ /*
+ * This quirk needs to be enabled if host controller doesn't allow
+ * that the interrupt aggregation timer and counter are reset by s/w.
+ */
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
+
+ /*
+ * This quirks needs to be enabled if host controller cannot be
+ * enabled via HCE register.
+ */
+ UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
+
+ /*
+ * This quirk needs to be enabled if the host controller regards
+ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
+ */
+ UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
+
+ /*
+ * This quirk needs to be enabled if the host controller reports
+ * OCS FATAL ERROR with device error through sense data
+ */
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * auto-hibernate capability but it doesn't work.
+ */
+ UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
+
+ /*
+ * This quirk needs to disable manual flush for write booster
+ */
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
+
+ /*
+ * This quirk needs to disable unipro timeout values
+ * before power mode change
+ */
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
+
+ /*
+ * This quirk needs to be enabled if the host controller does not
+ * support UIC command
+ */
+ UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
+
+ /*
+ * This quirk needs to be enabled if the host controller cannot
+ * support physical host configuration.
+ */
+ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * 64-bit addressing supported capability but it doesn't work.
+ */
+ UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * auto-hibernate capability but it's FASTAUTO only.
+ */
+ UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
+
+ /*
+ * This quirk needs to be enabled if the host controller needs
+ * to reinit the device after switching to maximum gear.
+ */
+ UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19,
+
+ /*
+ * Some host raises interrupt (per queue) in addition to
+ * CQES (traditional) when ESI is disabled.
+ * Enable this quirk will disable CQES and use per queue interrupt.
+ */
+ UFSHCD_QUIRK_MCQ_BROKEN_INTR = 1 << 20,
+
+ /*
+ * Some host does not implement SQ Run Time Command (SQRTC) register
+ * thus need this quirk to skip related flow.
+ */
+ UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21,
+};
+
+enum ufshcd_caps {
+ /* Allow dynamic clk gating */
+ UFSHCD_CAP_CLK_GATING = 1 << 0,
+
+ /* Allow hiberb8 with clk gating */
+ UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
+
+ /* Allow dynamic clk scaling */
+ UFSHCD_CAP_CLK_SCALING = 1 << 2,
+
+ /* Allow auto bkops to enabled during runtime suspend */
+ UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
+
+ /*
+ * This capability allows host controller driver to use the UFS HCI's
+ * interrupt aggregation capability.
+ * CAUTION: Enabling this might reduce overall UFS throughput.
+ */
+ UFSHCD_CAP_INTR_AGGR = 1 << 4,
+
+ /*
+ * This capability allows the device auto-bkops to be always enabled
+ * except during suspend (both runtime and suspend).
+ * Enabling this capability means that device will always be allowed
+ * to do background operation when it's active but it might degrade
+ * the performance of ongoing read/write operations.
+ */
+ UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
+
+ /*
+ * This capability allows host controller driver to automatically
+ * enable runtime power management by itself instead of waiting
+ * for userspace to control the power management.
+ */
+ UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
+
+ /*
+ * This capability allows the host controller driver to turn-on
+ * WriteBooster, if the underlying device supports it and is
+ * provisioned to be used. This would increase the write performance.
+ */
+ UFSHCD_CAP_WB_EN = 1 << 7,
+
+ /*
+ * This capability allows the host controller driver to use the
+ * inline crypto engine, if it is present
+ */
+ UFSHCD_CAP_CRYPTO = 1 << 8,
+
+ /*
+ * This capability allows the controller regulators to be put into
+ * lpm mode aggressively during clock gating.
+ * This would increase power savings.
+ */
+ UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
+
+ /*
+ * This capability allows the host controller driver to use DeepSleep,
+ * if it is supported by the UFS device. The host controller driver must
+ * support device hardware reset via the hba->device_reset() callback,
+ * in order to exit DeepSleep state.
+ */
+ UFSHCD_CAP_DEEPSLEEP = 1 << 10,
+
+ /*
+ * This capability allows the host controller driver to use temperature
+ * notification if it is supported by the UFS device.
+ */
+ UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
+
+ /*
+ * Enable WriteBooster when scaling up the clock and disable
+ * WriteBooster when scaling the clock down.
+ */
+ UFSHCD_CAP_WB_WITH_CLK_SCALING = 1 << 12,
+};
+
+struct ufs_hba_variant_params {
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq_simple_ondemand_data ondemand_data;
+ u16 hba_enable_delay_us;
+ u32 wb_flush_threshold;
+};
+
+struct ufs_hba_monitor {
+ unsigned long chunk_size;
+
+ unsigned long nr_sec_rw[2];
+ ktime_t total_busy[2];
+
+ unsigned long nr_req[2];
+ /* latencies*/
+ ktime_t lat_sum[2];
+ ktime_t lat_max[2];
+ ktime_t lat_min[2];
+
+ u32 nr_queued[2];
+ ktime_t busy_start_ts[2];
+
+ ktime_t enabled_ts;
+ bool enabled;
+};
+
+/**
+ * struct ufshcd_res_info_t - MCQ related resource regions
+ *
+ * @name: resource name
+ * @resource: pointer to resource region
+ * @base: register base address
+ */
+struct ufshcd_res_info {
+ const char *name;
+ struct resource *resource;
+ void __iomem *base;
+};
+
+enum ufshcd_res {
+ RES_UFS,
+ RES_MCQ,
+ RES_MCQ_SQD,
+ RES_MCQ_SQIS,
+ RES_MCQ_CQD,
+ RES_MCQ_CQIS,
+ RES_MCQ_VS,
+ RES_MAX,
+};
+
+/**
+ * struct ufshcd_mcq_opr_info_t - Operation and Runtime registers
+ *
+ * @offset: Doorbell Address Offset
+ * @stride: Steps proportional to queue [0...31]
+ * @base: base address
+ */
+struct ufshcd_mcq_opr_info_t {
+ unsigned long offset;
+ unsigned long stride;
+ void __iomem *base;
+};
+
+enum ufshcd_mcq_opr {
+ OPR_SQD,
+ OPR_SQIS,
+ OPR_CQD,
+ OPR_CQIS,
+ OPR_MAX,
+};
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @dev: device handle
+ * @ufs_device_wlun: WLUN that controls the entire UFS device.
+ * @hwmon_device: device instance registered with the hwmon core.
+ * @curr_dev_pwr_mode: active UFS device power mode.
+ * @uic_link_state: active state of the link to the UFS device.
+ * @rpm_lvl: desired UFS power management level during runtime PM.
+ * @spm_lvl: desired UFS power management level during system PM.
+ * @pm_op_in_progress: whether or not a PM operation is in progress.
+ * @ahit: value of Auto-Hibernate Idle Timer register.
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_lock: Protects @outstanding_reqs.
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @mcq_capabilities: UFS Multi Circular Queue capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
+ * @ufs_version: UFS Version to which controller complies
+ * @vops: pointer to variant specific operations
+ * @vps: pointer to variant specific parameters
+ * @priv: pointer to variant specific private data
+ * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
+ * @irq: Irq number of the controller
+ * @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
+ * @dev_ref_clk_freq: reference clock frequency
+ * @quirks: bitmask with information about deviations from the UFSHCI standard.
+ * @dev_quirks: bitmask with information about deviations from the UFS standard.
+ * @tmf_tag_set: TMF tag set.
+ * @tmf_queue: Used to allocate TMF tags.
+ * @tmf_rqs: array with pointers to TMF requests while these are in progress.
+ * @active_uic_cmd: handle of active UIC command
+ * @uic_cmd_mutex: mutex for UIC command
+ * @uic_async_done: completion used during UIC processing
+ * @ufshcd_state: UFSHCD state
+ * @eh_flags: Error handling flags
+ * @intr_mask: Interrupt Mask Bits
+ * @ee_ctrl_mask: Exception event control mask
+ * @ee_drv_mask: Exception event mask for driver
+ * @ee_usr_mask: Exception event mask for user (set via debugfs)
+ * @ee_ctrl_mutex: Used to serialize exception event information.
+ * @is_powered: flag to check if HBA is powered
+ * @shutting_down: flag to check if shutdown has been invoked
+ * @host_sem: semaphore used to serialize concurrent contexts
+ * @eh_wq: Workqueue that eh_work works on
+ * @eh_work: Worker to handle UFS errors that require s/w attention
+ * @eeh_work: Worker to handle exception events
+ * @errors: HBA errors
+ * @uic_error: UFS interconnect layer error status
+ * @saved_err: sticky error mask
+ * @saved_uic_err: sticky UIC error mask
+ * @ufs_stats: various error counters
+ * @force_reset: flag to force eh_work perform a full reset
+ * @force_pmc: flag to force a power mode change
+ * @silence_err_logs: flag to silence error logs
+ * @dev_cmd: ufs device management command information
+ * @last_dme_cmd_tstamp: time stamp of the last completed DME command
+ * @nop_out_timeout: NOP OUT timeout value
+ * @dev_info: information about the UFS device
+ * @auto_bkops_enabled: to track whether bkops is enabled in device
+ * @vreg_info: UFS device voltage regulator information
+ * @clk_list_head: UFS host controller clocks list node head
+ * @use_pm_opp: Indicates whether OPP based scaling is used or not
+ * @req_abort_count: number of times ufshcd_abort() has been called
+ * @lanes_per_direction: number of lanes per data direction between the UFS
+ * controller and the UFS device.
+ * @pwr_info: holds current power mode
+ * @max_pwr_info: keeps the device max valid pwm
+ * @clk_gating: information related to clock gating
+ * @caps: bitmask with information about UFS controller capabilities
+ * @devfreq: frequency scaling information owned by the devfreq core
+ * @clk_scaling: frequency scaling information owned by the UFS driver
+ * @system_suspending: system suspend has been started and system resume has
+ * not yet finished.
+ * @is_sys_suspended: UFS device has been suspended because of system suspend
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ * device is known or not.
+ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
+ * @clk_scaling_lock: used to serialize device commands and clock scaling
+ * @desc_size: descriptor sizes reported by device
+ * @scsi_block_reqs_cnt: reference counting for scsi block requests
+ * @bsg_dev: struct device associated with the BSG queue
+ * @bsg_queue: BSG queue associated with the UFS controller
+ * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
+ * management) after the UFS device has finished a WriteBooster buffer
+ * flush or auto BKOP.
+ * @monitor: statistics about UFS commands
+ * @crypto_capabilities: Content of crypto capabilities register (0x100)
+ * @crypto_cap_array: Array of crypto capabilities
+ * @crypto_cfg_register: Start of the crypto cfg array
+ * @crypto_profile: the crypto profile of this hba (if applicable)
+ * @debugfs_root: UFS controller debugfs root directory
+ * @debugfs_ee_work: used to restore ee_ctrl_mask after a delay
+ * @debugfs_ee_rate_limit_ms: user configurable delay after which to restore
+ * ee_ctrl_mask
+ * @luns_avail: number of regular and well known LUNs supported by the UFS
+ * device
+ * @nr_hw_queues: number of hardware queues configured
+ * @nr_queues: number of Queues of different queue types
+ * @complete_put: whether or not to call ufshcd_rpm_put() from inside
+ * ufshcd_resume_complete()
+ * @ext_iid_sup: is EXT_IID is supported by UFSHC
+ * @mcq_sup: is mcq supported by UFSHC
+ * @mcq_enabled: is mcq ready to accept requests
+ * @res: array of resource info of MCQ registers
+ * @mcq_base: Multi circular queue registers base address
+ * @uhq: array of supported hardware queues
+ * @dev_cmd_queue: Queue for issuing device management commands
+ * @mcq_opr: MCQ operation and runtime registers
+ * @ufs_rtc_update_work: A work for UFS RTC periodic update
+ * @pm_qos_req: PM QoS request handle
+ * @pm_qos_enabled: flag to check if pm qos is enabled
+ */
+struct ufs_hba {
+ void __iomem *mmio_base;
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdl_base_addr;
+ struct utp_task_req_desc *utmrdl_base_addr;
+
+ /* DMA memory reference */
+ dma_addr_t ucdl_dma_addr;
+ dma_addr_t utrdl_dma_addr;
+ dma_addr_t utmrdl_dma_addr;
+
+ struct Scsi_Host *host;
+ struct device *dev;
+ struct scsi_device *ufs_device_wlun;
+
+#ifdef CONFIG_SCSI_UFS_HWMON
+ struct device *hwmon_device;
+#endif
+
+ enum ufs_dev_pwr_mode curr_dev_pwr_mode;
+ enum uic_link_state uic_link_state;
+ /* Desired UFS power management level during runtime PM */
+ enum ufs_pm_level rpm_lvl;
+ /* Desired UFS power management level during system PM */
+ enum ufs_pm_level spm_lvl;
+ int pm_op_in_progress;
+
+ /* Auto-Hibernate Idle Timer register value */
+ u32 ahit;
+
+ struct ufshcd_lrb *lrb;
+
+ unsigned long outstanding_tasks;
+ spinlock_t outstanding_lock;
+ unsigned long outstanding_reqs;
+
+ u32 capabilities;
+ int nutrs;
+ u32 mcq_capabilities;
+ int nutmrs;
+ u32 reserved_slot;
+ u32 ufs_version;
+ const struct ufs_hba_variant_ops *vops;
+ struct ufs_hba_variant_params *vps;
+ void *priv;
+#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+ size_t sg_entry_size;
+#endif
+ unsigned int irq;
+ bool is_irq_enabled;
+ enum ufs_ref_clk_freq dev_ref_clk_freq;
+
+ unsigned int quirks; /* Deviations from standard UFSHCI spec. */
+
+ /* Device deviations from standard UFS device spec. */
+ unsigned int dev_quirks;
+
+ struct blk_mq_tag_set tmf_tag_set;
+ struct request_queue *tmf_queue;
+ struct request **tmf_rqs;
+
+ struct uic_command *active_uic_cmd;
+ struct mutex uic_cmd_mutex;
+ struct completion *uic_async_done;
+
+ enum ufshcd_state ufshcd_state;
+ u32 eh_flags;
+ u32 intr_mask;
+ u16 ee_ctrl_mask;
+ u16 ee_drv_mask;
+ u16 ee_usr_mask;
+ struct mutex ee_ctrl_mutex;
+ bool is_powered;
+ bool shutting_down;
+ struct semaphore host_sem;
+
+ /* Work Queues */
+ struct workqueue_struct *eh_wq;
+ struct work_struct eh_work;
+ struct work_struct eeh_work;
+
+ /* HBA Errors */
+ u32 errors;
+ u32 uic_error;
+ u32 saved_err;
+ u32 saved_uic_err;
+ struct ufs_stats ufs_stats;
+ bool force_reset;
+ bool force_pmc;
+ bool silence_err_logs;
+
+ /* Device management request data */
+ struct ufs_dev_cmd dev_cmd;
+ ktime_t last_dme_cmd_tstamp;
+ int nop_out_timeout;
+
+ /* Keeps information of the UFS device connected to this host */
+ struct ufs_dev_info dev_info;
+ bool auto_bkops_enabled;
+ struct ufs_vreg_info vreg_info;
+ struct list_head clk_list_head;
+ bool use_pm_opp;
+
+ /* Number of requests aborts */
+ int req_abort_count;
+
+ /* Number of lanes available (1 or 2) for Rx/Tx */
+ u32 lanes_per_direction;
+ struct ufs_pa_layer_attr pwr_info;
+ struct ufs_pwr_mode_info max_pwr_info;
+
+ struct ufs_clk_gating clk_gating;
+ /* Control to enable/disable host capabilities */
+ u32 caps;
+
+ struct devfreq *devfreq;
+ struct ufs_clk_scaling clk_scaling;
+ bool system_suspending;
+ bool is_sys_suspended;
+
+ enum bkops_status urgent_bkops_lvl;
+ bool is_urgent_bkops_lvl_checked;
+
+ struct mutex wb_mutex;
+ struct rw_semaphore clk_scaling_lock;
+ atomic_t scsi_block_reqs_cnt;
+
+ struct device bsg_dev;
+ struct request_queue *bsg_queue;
+ struct delayed_work rpm_dev_flush_recheck_work;
+
+ struct ufs_hba_monitor monitor;
+
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+ union ufs_crypto_capabilities crypto_capabilities;
+ union ufs_crypto_cap_entry *crypto_cap_array;
+ u32 crypto_cfg_register;
+ struct blk_crypto_profile crypto_profile;
+#endif
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+ struct delayed_work debugfs_ee_work;
+ u32 debugfs_ee_rate_limit_ms;
+#endif
+#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
+ struct fault_attr trigger_eh_attr;
+ struct fault_attr timeout_attr;
+#endif
+ u32 luns_avail;
+ unsigned int nr_hw_queues;
+ unsigned int nr_queues[HCTX_MAX_TYPES];
+ bool complete_put;
+ bool ext_iid_sup;
+ bool scsi_host_added;
+ bool mcq_sup;
+ bool mcq_enabled;
+ struct ufshcd_res_info res[RES_MAX];
+ void __iomem *mcq_base;
+ struct ufs_hw_queue *uhq;
+ struct ufs_hw_queue *dev_cmd_queue;
+ struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
+
+ struct delayed_work ufs_rtc_update_work;
+ struct pm_qos_request pm_qos_req;
+ bool pm_qos_enabled;
+};
+
+/**
+ * struct ufs_hw_queue - per hardware queue structure
+ * @mcq_sq_head: base address of submission queue head pointer
+ * @mcq_sq_tail: base address of submission queue tail pointer
+ * @mcq_cq_head: base address of completion queue head pointer
+ * @mcq_cq_tail: base address of completion queue tail pointer
+ * @sqe_base_addr: submission queue entry base address
+ * @sqe_dma_addr: submission queue dma address
+ * @cqe_base_addr: completion queue base address
+ * @cqe_dma_addr: completion queue dma address
+ * @max_entries: max number of slots in this hardware queue
+ * @id: hardware queue ID
+ * @sq_tp_slot: current slot to which SQ tail pointer is pointing
+ * @sq_lock: serialize submission queue access
+ * @cq_tail_slot: current slot to which CQ tail pointer is pointing
+ * @cq_head_slot: current slot to which CQ head pointer is pointing
+ * @cq_lock: Synchronize between multiple polling instances
+ * @sq_mutex: prevent submission queue concurrent access
+ */
+struct ufs_hw_queue {
+ void __iomem *mcq_sq_head;
+ void __iomem *mcq_sq_tail;
+ void __iomem *mcq_cq_head;
+ void __iomem *mcq_cq_tail;
+
+ struct utp_transfer_req_desc *sqe_base_addr;
+ dma_addr_t sqe_dma_addr;
+ struct cq_entry *cqe_base_addr;
+ dma_addr_t cqe_dma_addr;
+ u32 max_entries;
+ u32 id;
+ u32 sq_tail_slot;
+ spinlock_t sq_lock;
+ u32 cq_tail_slot;
+ u32 cq_head_slot;
+ spinlock_t cq_lock;
+ /* prevent concurrent access to submission queue */
+ struct mutex sq_mutex;
+};
+
+static inline bool is_mcq_enabled(struct ufs_hba *hba)
+{
+ return hba->mcq_enabled;
+}
+
+#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
+{
+ return hba->sg_entry_size;
+}
+
+static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size)
+{
+ WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry));
+ hba->sg_entry_size = sg_entry_size;
+}
+#else
+static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
+{
+ return sizeof(struct ufshcd_sg_entry);
+}
+
+#define ufshcd_set_sg_entry_size(hba, sg_entry_size) \
+ ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
+#endif
+
+static inline size_t ufshcd_get_ucd_size(const struct ufs_hba *hba)
+{
+ return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
+}
+
+/* Returns true if clocks can be gated. Otherwise false */
+static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_CLK_GATING;
+}
+static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+}
+static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_CLK_SCALING;
+}
+static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+}
+static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
+}
+
+static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
+{
+ return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
+}
+
+static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
+{
+ return !!(ufshcd_is_link_hibern8(hba) &&
+ (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
+}
+
+static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
+{
+ return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
+}
+
+static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
+{
+ return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit);
+}
+
+static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_WB_EN;
+}
+
+static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING;
+}
+
+#define ufsmcq_writel(hba, val, reg) \
+ writel((val), (hba)->mcq_base + (reg))
+#define ufsmcq_readl(hba, reg) \
+ readl((hba)->mcq_base + (reg))
+
+#define ufsmcq_writelx(hba, val, reg) \
+ writel_relaxed((val), (hba)->mcq_base + (reg))
+#define ufsmcq_readlx(hba, reg) \
+ readl_relaxed((hba)->mcq_base + (reg))
+
+#define ufshcd_writel(hba, val, reg) \
+ writel((val), (hba)->mmio_base + (reg))
+#define ufshcd_readl(hba, reg) \
+ readl((hba)->mmio_base + (reg))
+
+/**
+ * ufshcd_rmwl - perform read/modify/write for a controller register
+ * @hba: per adapter instance
+ * @mask: mask to apply on read value
+ * @val: actual value to write
+ * @reg: register address
+ */
+static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
+{
+ u32 tmp;
+
+ tmp = ufshcd_readl(hba, reg);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ ufshcd_writel(hba, tmp, reg);
+}
+
+void ufshcd_enable_irq(struct ufs_hba *hba);
+void ufshcd_disable_irq(struct ufs_hba *hba);
+int ufshcd_alloc_host(struct device *, struct ufs_hba **);
+void ufshcd_dealloc_host(struct ufs_hba *);
+int ufshcd_hba_enable(struct ufs_hba *hba);
+int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
+int ufshcd_link_recovery(struct ufs_hba *hba);
+int ufshcd_make_hba_operational(struct ufs_hba *hba);
+void ufshcd_remove(struct ufs_hba *);
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
+void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
+void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
+void ufshcd_hba_stop(struct ufs_hba *hba);
+void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
+void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
+void ufshcd_mcq_enable(struct ufs_hba *hba);
+void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
+
+int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data,
+ bool scaling_down);
+/**
+ * ufshcd_set_variant - set variant specific data to the hba
+ * @hba: per adapter instance
+ * @variant: pointer to variant specific data
+ */
+static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
+{
+ BUG_ON(!hba);
+ hba->priv = variant;
+}
+
+/**
+ * ufshcd_get_variant - get variant specific data from the hba
+ * @hba: per adapter instance
+ */
+static inline void *ufshcd_get_variant(struct ufs_hba *hba)
+{
+ BUG_ON(!hba);
+ return hba->priv;
+}
+
+#ifdef CONFIG_PM
+extern int ufshcd_runtime_suspend(struct device *dev);
+extern int ufshcd_runtime_resume(struct device *dev);
+#endif
+#ifdef CONFIG_PM_SLEEP
+extern int ufshcd_system_suspend(struct device *dev);
+extern int ufshcd_system_resume(struct device *dev);
+extern int ufshcd_system_freeze(struct device *dev);
+extern int ufshcd_system_thaw(struct device *dev);
+extern int ufshcd_system_restore(struct device *dev);
+#endif
+
+extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
+ int agreed_gear,
+ int adapt_val);
+extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer);
+extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer);
+extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode);
+extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode);
+
+/* UIC command interfaces for DME primitives */
+#define DME_LOCAL 0
+#define DME_PEER 1
+#define ATTR_SET_NOR 0 /* NORMAL */
+#define ATTR_SET_ST 1 /* STATIC */
+
+static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
+}
+
+static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
+{
+ return (pwr_info->pwr_rx == FAST_MODE ||
+ pwr_info->pwr_rx == FASTAUTO_MODE) &&
+ (pwr_info->pwr_tx == FAST_MODE ||
+ pwr_info->pwr_tx == FASTAUTO_MODE);
+}
+
+static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+}
+
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
+ const struct ufs_dev_quirk *fixups);
+#define SD_ASCII_STD true
+#define SD_RAW false
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ u8 **buf, bool ascii);
+
+void ufshcd_hold(struct ufs_hba *hba);
+void ufshcd_release(struct ufs_hba *hba);
+
+void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
+
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+
+int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
+
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+
+int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
+ struct ufs_ehs *ehs_rsp, int sg_cnt,
+ struct scatterlist *sg_list, enum dma_data_direction dir);
+int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
+int ufshcd_suspend_prepare(struct device *dev);
+int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
+void ufshcd_resume_complete(struct device *dev);
+bool ufshcd_is_hba_active(struct ufs_hba *hba);
+void ufshcd_pm_qos_init(struct ufs_hba *hba);
+void ufshcd_pm_qos_exit(struct ufs_hba *hba);
+
+/* Wrapper functions for safely calling variant operations */
+static inline int ufshcd_vops_init(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->init)
+ return hba->vops->init(hba);
+
+ return 0;
+}
+
+static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->phy_initialization)
+ return hba->vops->phy_initialization(hba);
+
+ return 0;
+}
+
+extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
+
+int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix);
+
+int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
+int ufshcd_write_ee_control(struct ufs_hba *hba);
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
+ const u16 *other_mask, u16 set, u16 clr);
+
+#endif /* End of Header */
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
new file mode 100644
index 000000000000..a196e1c4c3bb
--- /dev/null
+++ b/include/ufs/ufshci.h
@@ -0,0 +1,615 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Universal Flash Storage Host controller driver
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ */
+
+#ifndef _UFSHCI_H
+#define _UFSHCI_H
+
+#include <linux/types.h>
+#include <ufs/ufs.h>
+
+enum {
+ TASK_REQ_UPIU_SIZE_DWORDS = 8,
+ TASK_RSP_UPIU_SIZE_DWORDS = 8,
+ ALIGNED_UPIU_SIZE = 512,
+};
+
+/* UFSHCI Registers */
+enum {
+ REG_CONTROLLER_CAPABILITIES = 0x00,
+ REG_MCQCAP = 0x04,
+ REG_UFS_VERSION = 0x08,
+ REG_CONTROLLER_DEV_ID = 0x10,
+ REG_CONTROLLER_PROD_ID = 0x14,
+ REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
+ REG_INTERRUPT_STATUS = 0x20,
+ REG_INTERRUPT_ENABLE = 0x24,
+ REG_CONTROLLER_STATUS = 0x30,
+ REG_CONTROLLER_ENABLE = 0x34,
+ REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38,
+ REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C,
+ REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40,
+ REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44,
+ REG_UIC_ERROR_CODE_DME = 0x48,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
+ REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
+ REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
+ REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
+ REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80,
+ REG_UIC_COMMAND = 0x90,
+ REG_UIC_COMMAND_ARG_1 = 0x94,
+ REG_UIC_COMMAND_ARG_2 = 0x98,
+ REG_UIC_COMMAND_ARG_3 = 0x9C,
+
+ UFSHCI_REG_SPACE_SIZE = 0xA0,
+
+ REG_UFS_CCAP = 0x100,
+ REG_UFS_CRYPTOCAP = 0x104,
+
+ REG_UFS_MEM_CFG = 0x300,
+ REG_UFS_MCQ_CFG = 0x380,
+ REG_UFS_ESILBA = 0x384,
+ REG_UFS_ESIUBA = 0x388,
+ UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
+};
+
+/* Controller capability masks */
+enum {
+ MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
+ MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
+ MASK_EHSLUTRD_SUPPORTED = 0x00400000,
+ MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,
+ MASK_64_ADDRESSING_SUPPORT = 0x01000000,
+ MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
+ MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
+ MASK_CRYPTO_SUPPORT = 0x10000000,
+ MASK_MCQ_SUPPORT = 0x40000000,
+};
+
+/* MCQ capability mask */
+enum {
+ MASK_EXT_IID_SUPPORT = 0x00000400,
+};
+
+enum {
+ REG_SQATTR = 0x0,
+ REG_SQLBA = 0x4,
+ REG_SQUBA = 0x8,
+ REG_SQDAO = 0xC,
+ REG_SQISAO = 0x10,
+
+ REG_CQATTR = 0x20,
+ REG_CQLBA = 0x24,
+ REG_CQUBA = 0x28,
+ REG_CQDAO = 0x2C,
+ REG_CQISAO = 0x30,
+};
+
+enum {
+ REG_SQHP = 0x0,
+ REG_SQTP = 0x4,
+ REG_SQRTC = 0x8,
+ REG_SQCTI = 0xC,
+ REG_SQRTS = 0x10,
+};
+
+enum {
+ REG_CQHP = 0x0,
+ REG_CQTP = 0x4,
+};
+
+enum {
+ REG_CQIS = 0x0,
+ REG_CQIE = 0x4,
+};
+
+enum {
+ SQ_START = 0x0,
+ SQ_STOP = 0x1,
+ SQ_ICU = 0x2,
+};
+
+enum {
+ SQ_STS = 0x1,
+ SQ_CUS = 0x2,
+};
+
+#define SQ_ICU_ERR_CODE_MASK GENMASK(7, 4)
+#define UFS_MASK(mask, offset) ((mask) << (offset))
+
+/* UFS Version 08h */
+#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
+#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
+
+#define UFSHCD_NUM_RESERVED 1
+/*
+ * Controller UFSHCI version
+ * - 2.x and newer use the following scheme:
+ * major << 8 + minor << 4
+ * - 1.x has been converted to match this in
+ * ufshcd_get_ufs_version()
+ */
+static inline u32 ufshci_version(u32 major, u32 minor)
+{
+ return (major << 8) + (minor << 4);
+}
+
+/*
+ * HCDDID - Host Controller Identification Descriptor
+ * - Device ID and Device Class 10h
+ */
+#define DEVICE_CLASS UFS_MASK(0xFFFF, 0)
+#define DEVICE_ID UFS_MASK(0xFF, 24)
+
+/*
+ * HCPMID - Host Controller Identification Descriptor
+ * - Product/Manufacturer ID 14h
+ */
+#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
+#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
+
+/* AHIT - Auto-Hibernate Idle Timer */
+#define UFSHCI_AHIBERN8_TIMER_MASK GENMASK(9, 0)
+#define UFSHCI_AHIBERN8_SCALE_MASK GENMASK(12, 10)
+#define UFSHCI_AHIBERN8_SCALE_FACTOR 10
+#define UFSHCI_AHIBERN8_MAX (1023 * 100000)
+
+/*
+ * IS - Interrupt Status - 20h
+ */
+#define UTP_TRANSFER_REQ_COMPL 0x1
+#define UIC_DME_END_PT_RESET 0x2
+#define UIC_ERROR 0x4
+#define UIC_TEST_MODE 0x8
+#define UIC_POWER_MODE 0x10
+#define UIC_HIBERNATE_EXIT 0x20
+#define UIC_HIBERNATE_ENTER 0x40
+#define UIC_LINK_LOST 0x80
+#define UIC_LINK_STARTUP 0x100
+#define UTP_TASK_REQ_COMPL 0x200
+#define UIC_COMMAND_COMPL 0x400
+#define DEVICE_FATAL_ERROR 0x800
+#define CONTROLLER_FATAL_ERROR 0x10000
+#define SYSTEM_BUS_FATAL_ERROR 0x20000
+#define CRYPTO_ENGINE_FATAL_ERROR 0x40000
+#define MCQ_CQ_EVENT_STATUS 0x100000
+
+#define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\
+ UIC_HIBERNATE_EXIT)
+
+#define UFSHCD_UIC_PWR_MASK (UFSHCD_UIC_HIBERN8_MASK |\
+ UIC_POWER_MODE)
+
+#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
+
+#define UFSHCD_ERROR_MASK (UIC_ERROR | INT_FATAL_ERRORS)
+
+#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
+ SYSTEM_BUS_FATAL_ERROR |\
+ CRYPTO_ENGINE_FATAL_ERROR |\
+ UIC_LINK_LOST)
+
+/* HCS - Host Controller Status 30h */
+#define DEVICE_PRESENT 0x1
+#define UTP_TRANSFER_REQ_LIST_READY 0x2
+#define UTP_TASK_REQ_LIST_READY 0x4
+#define UIC_COMMAND_READY 0x8
+#define HOST_ERROR_INDICATOR 0x10
+#define DEVICE_ERROR_INDICATOR 0x20
+#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
+
+#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
+ UTP_TASK_REQ_LIST_READY |\
+ UIC_COMMAND_READY)
+
+enum {
+ PWR_OK = 0x0,
+ PWR_LOCAL = 0x01,
+ PWR_REMOTE = 0x02,
+ PWR_BUSY = 0x03,
+ PWR_ERROR_CAP = 0x04,
+ PWR_FATAL_ERROR = 0x05,
+};
+
+/* HCE - Host Controller Enable 34h */
+#define CONTROLLER_ENABLE 0x1
+#define CONTROLLER_DISABLE 0x0
+#define CRYPTO_GENERAL_ENABLE 0x2
+
+/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
+#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
+#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
+#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR 0x10
+
+/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
+#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
+#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
+#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
+#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
+#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
+#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
+#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
+#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
+
+/* UECN - Host UIC Error Code Network Layer 40h */
+#define UIC_NETWORK_LAYER_ERROR 0x80000000
+#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
+#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
+
+/* UECT - Host UIC Error Code Transport Layer 44h */
+#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
+#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
+#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
+#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
+#define UIC_TRANSPORT_BAD_TC 0x10
+#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
+#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
+
+/* UECDME - Host UIC Error Code DME 48h */
+#define UIC_DME_ERROR 0x80000000
+#define UIC_DME_ERROR_CODE_MASK 0x1
+
+/* UTRIACR - Interrupt Aggregation control register - 0x4Ch */
+#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
+#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
+#define INT_AGGR_COUNTER_AND_TIMER_RESET 0x10000
+#define INT_AGGR_STATUS_BIT 0x100000
+#define INT_AGGR_PARAM_WRITE 0x1000000
+#define INT_AGGR_ENABLE 0x80000000
+
+/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
+#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1
+
+/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
+#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
+
+/* REG_UFS_MEM_CFG - Global Config Registers 300h */
+#define MCQ_MODE_SELECT BIT(0)
+
+/* CQISy - CQ y Interrupt Status Register */
+#define UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS 0x1
+
+/* UICCMD - UIC Command */
+#define COMMAND_OPCODE_MASK 0xFF
+#define GEN_SELECTOR_INDEX_MASK 0xFFFF
+
+#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16)
+#define RESET_LEVEL 0xFF
+
+#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16)
+#define CONFIG_RESULT_CODE_MASK 0xFF
+#define GENERIC_ERROR_CODE_MASK 0xFF
+
+/* GenSelectorIndex calculation macros for M-PHY attributes */
+#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
+#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
+
+#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
+ ((sel) & 0xFFFF))
+#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
+#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
+#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
+
+/* Link Status*/
+enum link_status {
+ UFSHCD_LINK_IS_DOWN = 1,
+ UFSHCD_LINK_IS_UP = 2,
+};
+
+/* UIC Commands */
+enum uic_cmd_dme {
+ UIC_CMD_DME_GET = 0x01,
+ UIC_CMD_DME_SET = 0x02,
+ UIC_CMD_DME_PEER_GET = 0x03,
+ UIC_CMD_DME_PEER_SET = 0x04,
+ UIC_CMD_DME_POWERON = 0x10,
+ UIC_CMD_DME_POWEROFF = 0x11,
+ UIC_CMD_DME_ENABLE = 0x12,
+ UIC_CMD_DME_RESET = 0x14,
+ UIC_CMD_DME_END_PT_RST = 0x15,
+ UIC_CMD_DME_LINK_STARTUP = 0x16,
+ UIC_CMD_DME_HIBER_ENTER = 0x17,
+ UIC_CMD_DME_HIBER_EXIT = 0x18,
+ UIC_CMD_DME_TEST_MODE = 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+ UIC_CMD_RESULT_SUCCESS = 0x00,
+ UIC_CMD_RESULT_INVALID_ATTR = 0x01,
+ UIC_CMD_RESULT_FAILURE = 0x01,
+ UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
+ UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
+ UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
+ UIC_CMD_RESULT_BAD_INDEX = 0x05,
+ UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
+ UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
+ UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
+ UIC_CMD_RESULT_BUSY = 0x09,
+ UIC_CMD_RESULT_DME_FAILURE = 0x0A,
+};
+
+#define MASK_UIC_COMMAND_RESULT 0xFF
+
+#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8)
+#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0)
+
+/* Interrupt disable masks */
+enum {
+ /* Interrupt disable mask for UFSHCI v1.0 */
+ INTERRUPT_MASK_ALL_VER_10 = 0x30FFF,
+ INTERRUPT_MASK_RW_VER_10 = 0x30000,
+
+ /* Interrupt disable mask for UFSHCI v1.1 */
+ INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
+
+ /* Interrupt disable mask for UFSHCI v2.1 */
+ INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
+};
+
+/* CCAP - Crypto Capability 100h */
+union ufs_crypto_capabilities {
+ __le32 reg_val;
+ struct {
+ u8 num_crypto_cap;
+ u8 config_count;
+ u8 reserved;
+ u8 config_array_ptr;
+ };
+};
+
+enum ufs_crypto_key_size {
+ UFS_CRYPTO_KEY_SIZE_INVALID = 0x0,
+ UFS_CRYPTO_KEY_SIZE_128 = 0x1,
+ UFS_CRYPTO_KEY_SIZE_192 = 0x2,
+ UFS_CRYPTO_KEY_SIZE_256 = 0x3,
+ UFS_CRYPTO_KEY_SIZE_512 = 0x4,
+};
+
+enum ufs_crypto_alg {
+ UFS_CRYPTO_ALG_AES_XTS = 0x0,
+ UFS_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1,
+ UFS_CRYPTO_ALG_AES_ECB = 0x2,
+ UFS_CRYPTO_ALG_ESSIV_AES_CBC = 0x3,
+};
+
+/* x-CRYPTOCAP - Crypto Capability X */
+union ufs_crypto_cap_entry {
+ __le32 reg_val;
+ struct {
+ u8 algorithm_id;
+ u8 sdus_mask; /* Supported data unit size mask */
+ u8 key_size;
+ u8 reserved;
+ };
+};
+
+#define UFS_CRYPTO_CONFIGURATION_ENABLE (1 << 7)
+#define UFS_CRYPTO_KEY_MAX_SIZE 64
+/* x-CRYPTOCFG - Crypto Configuration X */
+union ufs_crypto_cfg_entry {
+ __le32 reg_val[32];
+ struct {
+ u8 crypto_key[UFS_CRYPTO_KEY_MAX_SIZE];
+ u8 data_unit_size;
+ u8 crypto_cap_idx;
+ u8 reserved_1;
+ u8 config_enable;
+ u8 reserved_multi_host;
+ u8 reserved_2;
+ u8 vsb[2];
+ u8 reserved_3[56];
+ };
+};
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+ UTP_CMD_TYPE_SCSI = 0x0,
+ UTP_CMD_TYPE_UFS = 0x1,
+ UTP_CMD_TYPE_DEV_MANAGE = 0x2,
+};
+
+/* To accommodate UFS2.0 required Command type */
+enum {
+ UTP_CMD_TYPE_UFS_STORAGE = 0x1,
+};
+
+enum {
+ UTP_SCSI_COMMAND = 0x00000000,
+ UTP_NATIVE_UFS_COMMAND = 0x10000000,
+ UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum utp_data_direction {
+ UTP_NO_DATA_TRANSFER = 0,
+ UTP_HOST_TO_DEVICE = 1,
+ UTP_DEVICE_TO_HOST = 2,
+};
+
+/* Overall command status values */
+enum utp_ocs {
+ OCS_SUCCESS = 0x0,
+ OCS_INVALID_CMD_TABLE_ATTR = 0x1,
+ OCS_INVALID_PRDT_ATTR = 0x2,
+ OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
+ OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
+ OCS_PEER_COMM_FAILURE = 0x5,
+ OCS_ABORTED = 0x6,
+ OCS_FATAL_ERROR = 0x7,
+ OCS_DEVICE_FATAL_ERROR = 0x8,
+ OCS_INVALID_CRYPTO_CONFIG = 0x9,
+ OCS_GENERAL_CRYPTO_ERROR = 0xA,
+ OCS_INVALID_COMMAND_STATUS = 0x0F,
+};
+
+enum {
+ MASK_OCS = 0x0F,
+};
+
+/* The maximum length of the data byte count field in the PRDT is 256KB */
+#define PRDT_DATA_BYTE_COUNT_MAX SZ_256K
+/* The granularity of the data byte count field in the PRDT is 32-bit */
+#define PRDT_DATA_BYTE_COUNT_PAD 4
+
+/**
+ * struct ufshcd_sg_entry - UFSHCI PRD Entry
+ * @addr: Physical address; DW-0 and DW-1.
+ * @reserved: Reserved for future use DW-2
+ * @size: size of physical segment DW-3
+ */
+struct ufshcd_sg_entry {
+ __le64 addr;
+ __le32 reserved;
+ __le32 size;
+ /*
+ * followed by variant-specific fields if
+ * CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE has been defined.
+ */
+};
+
+/**
+ * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
+ * @command_upiu: Command UPIU Frame address
+ * @response_upiu: Response UPIU Frame address
+ * @prd_table: Physical Region Descriptor: an array of SG_ALL struct
+ * ufshcd_sg_entry's. Variant-specific fields may be present after each.
+ */
+struct utp_transfer_cmd_desc {
+ u8 command_upiu[ALIGNED_UPIU_SIZE];
+ u8 response_upiu[ALIGNED_UPIU_SIZE];
+ u8 prd_table[];
+};
+
+/**
+ * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
+ */
+struct request_desc_header {
+ u8 cci;
+ u8 ehs_length;
+#if defined(__BIG_ENDIAN)
+ u8 enable_crypto:1;
+ u8 reserved2:7;
+
+ u8 command_type:4;
+ u8 reserved1:1;
+ u8 data_direction:2;
+ u8 interrupt:1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved2:7;
+ u8 enable_crypto:1;
+
+ u8 interrupt:1;
+ u8 data_direction:2;
+ u8 reserved1:1;
+ u8 command_type:4;
+#else
+#error
+#endif
+
+ __le32 dunl;
+ u8 ocs;
+ u8 cds;
+ __le16 ldbc;
+ __le32 dunu;
+};
+
+static_assert(sizeof(struct request_desc_header) == 16);
+
+/**
+ * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr: UCD base address DW 4-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+struct utp_transfer_req_desc {
+
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-5*/
+ __le64 command_desc_base_addr;
+
+ /* DW 6 */
+ __le16 response_upiu_length;
+ __le16 response_upiu_offset;
+
+ /* DW 7 */
+ __le16 prd_table_length;
+ __le16 prd_table_offset;
+};
+
+/* MCQ Completion Queue Entry */
+struct cq_entry {
+ /* DW 0-1 */
+ __le64 command_desc_base_addr;
+
+ /* DW 2 */
+ __le16 response_upiu_length;
+ __le16 response_upiu_offset;
+
+ /* DW 3 */
+ __le16 prd_table_length;
+ __le16 prd_table_offset;
+
+ /* DW 4 */
+ __le32 status;
+
+ /* DW 5-7 */
+ __le32 reserved[3];
+};
+
+static_assert(sizeof(struct cq_entry) == 32);
+
+/*
+ * UTMRD structure.
+ */
+struct utp_task_req_desc {
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-11 - Task request UPIU structure */
+ struct {
+ struct utp_upiu_header req_header;
+ __be32 input_param1;
+ __be32 input_param2;
+ __be32 input_param3;
+ __be32 __reserved1[2];
+ } upiu_req;
+
+ /* DW 12-19 - Task Management Response UPIU structure */
+ struct {
+ struct utp_upiu_header rsp_header;
+ __be32 output_param1;
+ __be32 output_param2;
+ __be32 __reserved2[3];
+ } upiu_rsp;
+};
+
+#endif /* End of Header */
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
new file mode 100644
index 000000000000..360e1245fb40
--- /dev/null
+++ b/include/ufs/unipro.h
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef _UNIPRO_H_
+#define _UNIPRO_H_
+
+/*
+ * M-TX Configuration Attributes
+ */
+#define TX_HIBERN8TIME_CAPABILITY 0x000F
+#define TX_MODE 0x0021
+#define TX_HSRATE_SERIES 0x0022
+#define TX_HSGEAR 0x0023
+#define TX_PWMGEAR 0x0024
+#define TX_AMPLITUDE 0x0025
+#define TX_HS_SLEWRATE 0x0026
+#define TX_SYNC_SOURCE 0x0027
+#define TX_HS_SYNC_LENGTH 0x0028
+#define TX_HS_PREPARE_LENGTH 0x0029
+#define TX_LS_PREPARE_LENGTH 0x002A
+#define TX_HIBERN8_CONTROL 0x002B
+#define TX_LCC_ENABLE 0x002C
+#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D
+#define TX_BYPASS_8B10B_ENABLE 0x002E
+#define TX_DRIVER_POLARITY 0x002F
+#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030
+#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031
+#define TX_LCC_SEQUENCER 0x0032
+#define TX_MIN_ACTIVATETIME 0x0033
+#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034
+#define TX_REFCLKFREQ 0x00EB
+#define TX_CFGCLKFREQVAL 0x00EC
+#define CFGEXTRATTR 0x00F0
+#define DITHERCTRL2 0x00F1
+
+/*
+ * M-RX Configuration Attributes
+ */
+#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B
+#define RX_HS_G1_PREP_LENGTH_CAP 0x008C
+#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
+#define RX_HIBERN8TIME_CAPABILITY 0x0092
+#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094
+#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095
+#define RX_HS_G2_PREP_LENGTH_CAP 0x0096
+#define RX_HS_G3_PREP_LENGTH_CAP 0x0097
+#define RX_ADV_GRANULARITY_CAP 0x0098
+#define RX_HIBERN8TIME_CAP 0x0092
+#define RX_ADV_HIBERN8TIME_CAP 0x0099
+#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A
+#define RX_MODE 0x00A1
+#define RX_HSRATE_SERIES 0x00A2
+#define RX_HSGEAR 0x00A3
+#define RX_PWMGEAR 0x00A4
+#define RX_LS_TERMINATED_ENABLE 0x00A5
+#define RX_HS_UNTERMINATED_ENABLE 0x00A6
+#define RX_ENTER_HIBERN8 0x00A7
+#define RX_BYPASS_8B10B_ENABLE 0x00A8
+#define RX_TERMINATION_FORCE_ENABLE 0x00A9
+#define RXCALCTRL 0x00B4
+#define RXSQCTRL 0x00B5
+#define CFGRXCDR8 0x00BA
+#define CFGRXOVR8 0x00BD
+#define CFGRXOVR6 0x00BF
+#define RXDIRECTCTRL2 0x00C7
+#define CFGRXOVR4 0x00E9
+#define RX_REFCLKFREQ 0x00EB
+#define RX_CFGCLKFREQVAL 0x00EC
+#define CFGWIDEINLN 0x00F0
+#define ENARXDIRECTCFG4 0x00F2
+#define ENARXDIRECTCFG3 0x00F3
+#define ENARXDIRECTCFG2 0x00F4
+
+
+#define is_mphy_tx_attr(attr) (attr < RX_MODE)
+#define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1)
+#define SYNC_LEN_FINE(x) ((x) & 0x3F)
+#define SYNC_LEN_COARSE(x) ((1 << 6) | ((x) & 0x3F))
+#define PREP_LEN(x) ((x) & 0xF)
+
+#define RX_MIN_ACTIVATETIME_UNIT_US 100
+#define HIBERN8TIME_UNIT_US 100
+
+/*
+ * Common Block Attributes
+ */
+#define TX_GLOBALHIBERNATE UNIPRO_CB_OFFSET(0x002B)
+#define REFCLKMODE UNIPRO_CB_OFFSET(0x00BF)
+#define DIRECTCTRL19 UNIPRO_CB_OFFSET(0x00CD)
+#define DIRECTCTRL10 UNIPRO_CB_OFFSET(0x00E6)
+#define CDIRECTCTRL6 UNIPRO_CB_OFFSET(0x00EA)
+#define RTOBSERVESELECT UNIPRO_CB_OFFSET(0x00F0)
+#define CBDIVFACTOR UNIPRO_CB_OFFSET(0x00F1)
+#define CBDCOCTRL5 UNIPRO_CB_OFFSET(0x00F3)
+#define CBPRGPLL2 UNIPRO_CB_OFFSET(0x00F8)
+#define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB)
+
+#define UNIPRO_CB_OFFSET(x) (0x8000 | x)
+
+/*
+ * PHY Adapter attributes
+ */
+#define PA_PHY_TYPE 0x1500
+#define PA_AVAILTXDATALANES 0x1520
+#define PA_MAXTXSPEEDFAST 0x1521
+#define PA_MAXTXSPEEDSLOW 0x1522
+#define PA_MAXRXSPEEDFAST 0x1541
+#define PA_MAXRXSPEEDSLOW 0x1542
+#define PA_TXLINKSTARTUPHS 0x1544
+#define PA_AVAILRXDATALANES 0x1540
+#define PA_MINRXTRAILINGCLOCKS 0x1543
+#define PA_LOCAL_TX_LCC_ENABLE 0x155E
+#define PA_ACTIVETXDATALANES 0x1560
+#define PA_CONNECTEDTXDATALANES 0x1561
+#define PA_TXFORCECLOCK 0x1562
+#define PA_TXPWRMODE 0x1563
+#define PA_TXTRAILINGCLOCKS 0x1564
+#define PA_TXSPEEDFAST 0x1565
+#define PA_TXSPEEDSLOW 0x1566
+#define PA_TXPWRSTATUS 0x1567
+#define PA_TXGEAR 0x1568
+#define PA_TXTERMINATION 0x1569
+#define PA_HSSERIES 0x156A
+#define PA_LEGACYDPHYESCDL 0x1570
+#define PA_PWRMODE 0x1571
+#define PA_ACTIVERXDATALANES 0x1580
+#define PA_CONNECTEDRXDATALANES 0x1581
+#define PA_RXPWRSTATUS 0x1582
+#define PA_RXGEAR 0x1583
+#define PA_RXTERMINATION 0x1584
+#define PA_MAXRXPWMGEAR 0x1586
+#define PA_MAXRXHSGEAR 0x1587
+#define PA_PACPREQTIMEOUT 0x1590
+#define PA_PACPREQEOBTIMEOUT 0x1591
+#define PA_REMOTEVERINFO 0x15A0
+#define PA_LOGICALLANEMAP 0x15A1
+#define PA_SLEEPNOCONFIGTIME 0x15A2
+#define PA_STALLNOCONFIGTIME 0x15A3
+#define PA_SAVECONFIGTIME 0x15A4
+#define PA_RXHSUNTERMCAP 0x15A5
+#define PA_RXLSTERMCAP 0x15A6
+#define PA_HIBERN8TIME 0x15A7
+#define PA_LOCALVERINFO 0x15A9
+#define PA_GRANULARITY 0x15AA
+#define PA_TACTIVATE 0x15A8
+#define PA_PWRMODEUSERDATA0 0x15B0
+#define PA_PWRMODEUSERDATA1 0x15B1
+#define PA_PWRMODEUSERDATA2 0x15B2
+#define PA_PWRMODEUSERDATA3 0x15B3
+#define PA_PWRMODEUSERDATA4 0x15B4
+#define PA_PWRMODEUSERDATA5 0x15B5
+#define PA_PWRMODEUSERDATA6 0x15B6
+#define PA_PWRMODEUSERDATA7 0x15B7
+#define PA_PWRMODEUSERDATA8 0x15B8
+#define PA_PWRMODEUSERDATA9 0x15B9
+#define PA_PWRMODEUSERDATA10 0x15BA
+#define PA_PWRMODEUSERDATA11 0x15BB
+#define PA_PACPFRAMECOUNT 0x15C0
+#define PA_PACPERRORCOUNT 0x15C1
+#define PA_PHYTESTCONTROL 0x15C2
+#define PA_TXHSADAPTTYPE 0x15D4
+
+/* Adpat type for PA_TXHSADAPTTYPE attribute */
+#define PA_REFRESH_ADAPT 0x00
+#define PA_INITIAL_ADAPT 0x01
+#define PA_NO_ADAPT 0x03
+
+#define PA_TACTIVATE_TIME_UNIT_US 10
+#define PA_HIBERN8_TIME_UNIT_US 100
+
+/*Other attributes*/
+#define VS_POWERSTATE 0xD083
+#define VS_MPHYCFGUPDT 0xD085
+#define VS_DEBUGOMC 0xD09E
+
+#define PA_GRANULARITY_MIN_VAL 1
+#define PA_GRANULARITY_MAX_VAL 6
+
+/* PHY Adapter Protocol Constants */
+#define PA_MAXDATALANES 4
+
+#define DL_FC0ProtectionTimeOutVal_Default 8191
+#define DL_TC0ReplayTimeOutVal_Default 65535
+#define DL_AFC0ReqTimeOutVal_Default 32767
+#define DL_FC1ProtectionTimeOutVal_Default 8191
+#define DL_TC1ReplayTimeOutVal_Default 65535
+#define DL_AFC1ReqTimeOutVal_Default 32767
+
+#define DME_LocalFC0ProtectionTimeOutVal 0xD041
+#define DME_LocalTC0ReplayTimeOutVal 0xD042
+#define DME_LocalAFC0ReqTimeOutVal 0xD043
+
+/* PA power modes */
+enum ufs_pa_pwr_mode {
+ FAST_MODE = 1,
+ SLOW_MODE = 2,
+ FASTAUTO_MODE = 4,
+ SLOWAUTO_MODE = 5,
+ UNCHANGED = 7,
+};
+
+#define PWRMODE_MASK 0xF
+#define PWRMODE_RX_OFFSET 4
+
+/* PA TX/RX Frequency Series */
+enum ufs_hs_gear_rate {
+ PA_HS_MODE_A = 1,
+ PA_HS_MODE_B = 2,
+};
+
+enum ufs_pwm_gear_tag {
+ UFS_PWM_DONT_CHANGE, /* Don't change Gear */
+ UFS_PWM_G1, /* PWM Gear 1 (default for reset) */
+ UFS_PWM_G2, /* PWM Gear 2 */
+ UFS_PWM_G3, /* PWM Gear 3 */
+ UFS_PWM_G4, /* PWM Gear 4 */
+ UFS_PWM_G5, /* PWM Gear 5 */
+ UFS_PWM_G6, /* PWM Gear 6 */
+ UFS_PWM_G7, /* PWM Gear 7 */
+};
+
+enum ufs_hs_gear_tag {
+ UFS_HS_DONT_CHANGE, /* Don't change Gear */
+ UFS_HS_G1, /* HS Gear 1 (default for reset) */
+ UFS_HS_G2, /* HS Gear 2 */
+ UFS_HS_G3, /* HS Gear 3 */
+ UFS_HS_G4, /* HS Gear 4 */
+ UFS_HS_G5 /* HS Gear 5 */
+};
+
+enum ufs_lanes {
+ UFS_LANE_DONT_CHANGE, /* Don't change Lane */
+ UFS_LANE_1, /* Lane 1 (default for reset) */
+ UFS_LANE_2, /* Lane 2 */
+};
+
+enum ufs_unipro_ver {
+ UFS_UNIPRO_VER_RESERVED = 0,
+ UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
+ UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
+ UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
+ UFS_UNIPRO_VER_1_61 = 4, /* UniPro version 1.61 */
+ UFS_UNIPRO_VER_1_8 = 5, /* UniPro version 1.8 */
+ UFS_UNIPRO_VER_MAX = 6, /* UniPro unsupported version */
+ /* UniPro version field mask in PA_LOCALVERINFO */
+ UFS_UNIPRO_VER_MASK = 0xF,
+};
+
+/*
+ * Data Link Layer Attributes
+ */
+#define DL_TXPREEMPTIONCAP 0x2000
+#define DL_TC0TXMAXSDUSIZE 0x2001
+#define DL_TC0RXINITCREDITVAL 0x2002
+#define DL_TC1TXMAXSDUSIZE 0x2003
+#define DL_TC1RXINITCREDITVAL 0x2004
+#define DL_TC0TXBUFFERSIZE 0x2005
+#define DL_TC1TXBUFFERSIZE 0x2006
+#define DL_TC0TXFCTHRESHOLD 0x2040
+#define DL_FC0PROTTIMEOUTVAL 0x2041
+#define DL_TC0REPLAYTIMEOUTVAL 0x2042
+#define DL_AFC0REQTIMEOUTVAL 0x2043
+#define DL_AFC0CREDITTHRESHOLD 0x2044
+#define DL_TC0OUTACKTHRESHOLD 0x2045
+#define DL_PEERTC0PRESENT 0x2046
+#define DL_PEERTC0RXINITCREVAL 0x2047
+#define DL_TC1TXFCTHRESHOLD 0x2060
+#define DL_FC1PROTTIMEOUTVAL 0x2061
+#define DL_TC1REPLAYTIMEOUTVAL 0x2062
+#define DL_AFC1REQTIMEOUTVAL 0x2063
+#define DL_AFC1CREDITTHRESHOLD 0x2064
+#define DL_TC1OUTACKTHRESHOLD 0x2065
+#define DL_PEERTC1PRESENT 0x2066
+#define DL_PEERTC1RXINITCREVAL 0x2067
+
+/*
+ * Network Layer Attributes
+ */
+#define N_DEVICEID 0x3000
+#define N_DEVICEID_VALID 0x3001
+#define N_TC0TXMAXSDUSIZE 0x3020
+#define N_TC1TXMAXSDUSIZE 0x3021
+
+/*
+ * Transport Layer Attributes
+ */
+#define T_NUMCPORTS 0x4000
+#define T_NUMTESTFEATURES 0x4001
+#define T_CONNECTIONSTATE 0x4020
+#define T_PEERDEVICEID 0x4021
+#define T_PEERCPORTID 0x4022
+#define T_TRAFFICCLASS 0x4023
+#define T_PROTOCOLID 0x4024
+#define T_CPORTFLAGS 0x4025
+#define T_TXTOKENVALUE 0x4026
+#define T_RXTOKENVALUE 0x4027
+#define T_LOCALBUFFERSPACE 0x4028
+#define T_PEERBUFFERSPACE 0x4029
+#define T_CREDITSTOSEND 0x402A
+#define T_CPORTMODE 0x402B
+#define T_TC0TXMAXSDUSIZE 0x4060
+#define T_TC1TXMAXSDUSIZE 0x4061
+
+/* CPort setting */
+#define E2EFC_ON (1 << 0)
+#define E2EFC_OFF (0 << 0)
+#define CSD_N_ON (0 << 1)
+#define CSD_N_OFF (1 << 1)
+#define CSV_N_ON (0 << 2)
+#define CSV_N_OFF (1 << 2)
+#define CPORT_DEF_FLAGS (CSV_N_OFF | CSD_N_OFF | E2EFC_OFF)
+
+/* CPort connection state */
+enum {
+ CPORT_IDLE = 0,
+ CPORT_CONNECTED,
+};
+
+#endif /* _UNIPRO_H_ */
diff --git a/include/vdso/bits.h b/include/vdso/bits.h
index 6d005a1f5d94..388b212088ea 100644
--- a/include/vdso/bits.h
+++ b/include/vdso/bits.h
@@ -5,5 +5,6 @@
#include <vdso/const.h>
#define BIT(nr) (UL(1) << (nr))
+#define BIT_ULL(nr) (ULL(1) << (nr))
#endif /* __VDSO_BITS_H */
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index 73eb622e7663..5d5c0b8efff2 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -19,6 +19,12 @@
#include <vdso/time32.h>
#include <vdso/time64.h>
+#ifdef CONFIG_ARM64
+#include <asm/page-def.h>
+#else
+#include <asm/page.h>
+#endif
+
#ifdef CONFIG_ARCH_HAS_VDSO_DATA
#include <asm/vdso/data.h>
#else
@@ -121,6 +127,14 @@ struct vdso_data {
extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden")));
+/**
+ * union vdso_data_store - Generic vDSO data page
+ */
+union vdso_data_store {
+ struct vdso_data data[CS_BASES];
+ u8 page[PAGE_SIZE];
+};
+
/*
* The generic vDSO implementation requires that gettimeofday.h
* provides:
diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h
new file mode 100644
index 000000000000..c50d152e7b3e
--- /dev/null
+++ b/include/vdso/gettime.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VDSO_GETTIME_H
+#define _VDSO_GETTIME_H
+
+#include <linux/types.h>
+
+struct __kernel_timespec;
+struct timezone;
+
+#if !defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+struct old_timespec32;
+int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res);
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts);
+#else
+int __vdso_clock_getres(clockid_t clock, struct __kernel_timespec *res);
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+#endif
+
+__kernel_old_time_t __vdso_time(__kernel_old_time_t *t);
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+
+#endif
diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h
index 9a2af9fca45e..73501149439d 100644
--- a/include/vdso/helpers.h
+++ b/include/vdso/helpers.h
@@ -30,9 +30,9 @@ static __always_inline u32 vdso_read_retry(const struct vdso_data *vd,
static __always_inline void vdso_write_begin(struct vdso_data *vd)
{
/*
- * WRITE_ONCE it is required otherwise the compiler can validly tear
+ * WRITE_ONCE() is required otherwise the compiler can validly tear
* updates to vd[x].seq and it is possible that the value seen by the
- * reader it is inconsistent.
+ * reader is inconsistent.
*/
WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1);
WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1);
@@ -43,9 +43,9 @@ static __always_inline void vdso_write_end(struct vdso_data *vd)
{
smp_wmb();
/*
- * WRITE_ONCE it is required otherwise the compiler can validly tear
+ * WRITE_ONCE() is required otherwise the compiler can validly tear
* updates to vd[x].seq and it is possible that the value seen by the
- * reader it is inconsistent.
+ * reader is inconsistent.
*/
WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1);
WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1);
diff --git a/include/vdso/time64.h b/include/vdso/time64.h
index 9d43c3f5e89d..b40cfa2aa33c 100644
--- a/include/vdso/time64.h
+++ b/include/vdso/time64.h
@@ -9,6 +9,7 @@
#define NSEC_PER_MSEC 1000000L
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
+#define PSEC_PER_SEC 1000000000000LL
#define FSEC_PER_SEC 1000000000000000LL
#endif /* __VDSO_TIME64_H */
diff --git a/include/video/cmdline.h b/include/video/cmdline.h
new file mode 100644
index 000000000000..76649465bb08
--- /dev/null
+++ b/include/video/cmdline.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef VIDEO_CMDLINE_H
+#define VIDEO_CMDLINE_H
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+const char *video_get_options(const char *name);
+
+#if IS_ENABLED(CONFIG_FB_CORE)
+/* exported for compatibility with fbdev; don't use in new code */
+bool __video_get_options(const char *name, const char **option, bool is_of);
+#endif
+
+#endif
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 06b0b57e996c..c422a403c099 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -17,6 +17,7 @@
#include <linux/bitmap.h>
#include <linux/fb.h>
#include <linux/of.h>
+#include <drm/drm_color_mgmt.h>
#include <media/v4l2-mediabus.h>
#include <video/videomode.h>
@@ -330,6 +331,7 @@ int ipu_dp_enable_channel(struct ipu_dp *dp);
void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync);
void ipu_dp_disable(struct ipu_soc *ipu);
int ipu_dp_setup_channel(struct ipu_dp *dp,
+ enum drm_color_encoding ycbcr_enc, enum drm_color_range range,
enum ipu_color_space in, enum ipu_color_space out);
int ipu_dp_set_window_pos(struct ipu_dp *, u16 x_pos, u16 y_pos);
int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha,
@@ -484,9 +486,6 @@ int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level);
enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc);
enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat);
-enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code);
-int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat);
-bool ipu_pixelformat_is_planar(u32 pixelformat);
int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
bool hflip, bool vflip);
int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
diff --git a/include/video/kyro.h b/include/video/kyro.h
index b958c2e9c915..418eef6c5523 100644
--- a/include/video/kyro.h
+++ b/include/video/kyro.h
@@ -38,18 +38,6 @@ struct kyrofb_info {
int wc_cookie;
};
-extern int kyro_dev_init(void);
-extern void kyro_dev_reset(void);
-
-extern unsigned char *kyro_dev_physical_fb_ptr(void);
-extern unsigned char *kyro_dev_virtual_fb_ptr(void);
-extern void *kyro_dev_physical_regs_ptr(void);
-extern void *kyro_dev_virtual_regs_ptr(void);
-extern unsigned int kyro_dev_fb_size(void);
-extern unsigned int kyro_dev_regs_size(void);
-
-extern u32 kyro_dev_overlay_offset(void);
-
/*
* benedict.gaster@superh.com
* Added the follow IOCTLS for the creation of overlay services...
diff --git a/include/video/mbxfb.h b/include/video/mbxfb.h
deleted file mode 100644
index 35921cb6d1e5..000000000000
--- a/include/video/mbxfb.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MBX_FB_H
-#define __MBX_FB_H
-
-#include <asm/ioctl.h>
-#include <asm/types.h>
-
-struct mbxfb_val {
- unsigned int defval;
- unsigned int min;
- unsigned int max;
-};
-
-struct fb_info;
-
-struct mbxfb_platform_data {
- /* Screen info */
- struct mbxfb_val xres;
- struct mbxfb_val yres;
- struct mbxfb_val bpp;
-
- /* Memory info */
- unsigned long memsize; /* if 0 use ODFB? */
- unsigned long timings1;
- unsigned long timings2;
- unsigned long timings3;
-
- int (*probe)(struct fb_info *fb);
- int (*remove)(struct fb_info *fb);
-};
-
-/* planar */
-#define MBXFB_FMT_YUV16 0
-#define MBXFB_FMT_YUV12 1
-
-/* packed */
-#define MBXFB_FMT_UY0VY1 2
-#define MBXFB_FMT_VY0UY1 3
-#define MBXFB_FMT_Y0UY1V 4
-#define MBXFB_FMT_Y0VY1U 5
-struct mbxfb_overlaySetup {
- __u32 enable;
- __u32 x, y;
- __u32 width, height;
- __u32 fmt;
- __u32 mem_offset;
- __u32 scaled_width;
- __u32 scaled_height;
-
- /* Filled by the driver */
- __u32 U_offset;
- __u32 V_offset;
-
- __u16 Y_stride;
- __u16 UV_stride;
-};
-
-#define MBXFB_ALPHABLEND_NONE 0
-#define MBXFB_ALPHABLEND_GLOBAL 1
-#define MBXFB_ALPHABLEND_PIXEL 2
-
-#define MBXFB_COLORKEY_DISABLED 0
-#define MBXFB_COLORKEY_PREVIOUS 1
-#define MBXFB_COLORKEY_CURRENT 2
-struct mbxfb_alphaCtl {
- __u8 overlay_blend_mode;
- __u8 overlay_colorkey_mode;
- __u8 overlay_global_alpha;
- __u32 overlay_colorkey;
- __u32 overlay_colorkey_mask;
-
- __u8 graphics_blend_mode;
- __u8 graphics_colorkey_mode;
- __u8 graphics_global_alpha;
- __u32 graphics_colorkey;
- __u32 graphics_colorkey_mask;
-};
-
-#define MBXFB_PLANE_GRAPHICS 0
-#define MBXFB_PLANE_VIDEO 1
-struct mbxfb_planeorder {
- __u8 bottom;
- __u8 top;
-};
-
-struct mbxfb_reg {
- __u32 addr; /* offset from 0x03fe 0000 */
- __u32 val; /* value */
- __u32 mask; /* which bits to touch (for write) */
-};
-
-#define MBXFB_IOCX_OVERLAY _IOWR(0xF4, 0x00,struct mbxfb_overlaySetup)
-#define MBXFB_IOCG_ALPHA _IOR(0xF4, 0x01,struct mbxfb_alphaCtl)
-#define MBXFB_IOCS_ALPHA _IOW(0xF4, 0x02,struct mbxfb_alphaCtl)
-#define MBXFB_IOCS_PLANEORDER _IOR(0xF4, 0x03,struct mbxfb_planeorder)
-#define MBXFB_IOCS_REG _IOW(0xF4, 0x04,struct mbxfb_reg)
-#define MBXFB_IOCX_REG _IOWR(0xF4, 0x05,struct mbxfb_reg)
-
-#endif /* __MBX_FB_H */
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index 77252cb46361..a722dcbf5073 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -231,7 +231,7 @@ struct mmp_path {
/* layers */
int overlay_num;
- struct mmp_overlay overlays[];
+ struct mmp_overlay overlays[] __counted_by(overlay_num);
};
extern struct mmp_path *mmp_get_path(const char *name);
diff --git a/include/video/nomodeset.h b/include/video/nomodeset.h
new file mode 100644
index 000000000000..8f8688b8beab
--- /dev/null
+++ b/include/video/nomodeset.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef VIDEO_NOMODESET_H
+#define VIDEO_NOMODESET_H
+
+bool video_firmware_drivers_only(void);
+
+#endif
diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h
index e1126a74882a..eff166fdd81b 100644
--- a/include/video/of_display_timing.h
+++ b/include/video/of_display_timing.h
@@ -8,6 +8,8 @@
#ifndef __LINUX_OF_DISPLAY_TIMING_H
#define __LINUX_OF_DISPLAY_TIMING_H
+#include <linux/errno.h>
+
struct device_node;
struct display_timing;
struct display_timings;
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
deleted file mode 100644
index 42b77249ee14..000000000000
--- a/include/video/omap-panel-data.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header containing platform_data structs for omap panels
- *
- * Copyright (C) 2013 Texas Instruments
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- * Archit Taneja <archit@ti.com>
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Mayuresh Janorkar <mayur@ti.com>
- *
- * Copyright (C) 2010 Canonical Ltd.
- * Author: Bryan Wu <bryan.wu@canonical.com>
- */
-
-#ifndef __OMAP_PANEL_DATA_H
-#define __OMAP_PANEL_DATA_H
-
-#include <video/display_timing.h>
-
-/**
- * connector_atv platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @invert_polarity: invert signal polarity
- */
-struct connector_atv_platform_data {
- const char *name;
- const char *source;
-
- bool invert_polarity;
-};
-
-/**
- * panel_dpi platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- * @display_timing: timings for this panel
- * @backlight_gpio: gpio to enable/disable the backlight (or -1)
- * @enable_gpio: gpio to enable/disable the panel (or -1)
- */
-struct panel_dpi_platform_data {
- const char *name;
- const char *source;
-
- int data_lines;
-
- const struct display_timing *display_timing;
-
- int backlight_gpio;
- int enable_gpio;
-};
-
-/**
- * panel_acx565akm platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @reset_gpio: gpio to reset the panel (or -1)
- * @datapairs: number of SDI datapairs
- */
-struct panel_acx565akm_platform_data {
- const char *name;
- const char *source;
-
- int reset_gpio;
-
- int datapairs;
-};
-
-#endif /* __OMAP_PANEL_DATA_H */
diff --git a/include/video/radeon.h b/include/video/radeon.h
index 005eae19ec09..72f94ccfa725 100644
--- a/include/video/radeon.h
+++ b/include/video/radeon.h
@@ -750,7 +750,7 @@
#define WAIT_DMA_GUI_IDLE (1 << 9)
#define WAIT_2D_IDLECLEAN (1 << 16)
-/* SURFACE_CNTL bit consants */
+/* SURFACE_CNTL bit constants */
#define SURF_TRANSLATION_DIS (1 << 8)
#define NONSURF_AP0_SWP_16BPP (1 << 20)
#define NONSURF_AP0_SWP_32BPP (1 << 21)
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index c4a93ce1de48..e6966d187591 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -476,6 +476,10 @@
* 1111 -none- -none- -none- -none- -none-
*/
+#define WIN_RGB_ORDER(_win) (0x2020 + ((_win) * 4))
+#define WIN_RGB_ORDER_FORWARD (0 << 11)
+#define WIN_RGB_ORDER_REVERSE (1 << 11)
+
/* FIMD Version 8 register offset definitions */
#define FIMD_V8_VIDTCON0 0x20010
#define FIMD_V8_VIDTCON1 0x20014
diff --git a/include/video/sstfb.h b/include/video/sstfb.h
index 28384f354773..d4a5e41d1173 100644
--- a/include/video/sstfb.h
+++ b/include/video/sstfb.h
@@ -23,7 +23,7 @@
# define SST_DEBUG_FUNC 1
# define SST_DEBUG_VAR 1
#else
-# define dprintk(X...)
+# define dprintk(X...) no_printk(X)
# define SST_DEBUG_REG 0
# define SST_DEBUG_FUNC 0
# define SST_DEBUG_VAR 0
@@ -48,7 +48,7 @@
#if (SST_DEBUG_FUNC > 1)
# define f_ddprintk(X...) dprintk(" " X)
#else
-# define f_ddprintk(X...)
+# define f_ddprintk(X...) no_printk(X)
#endif
#if (SST_DEBUG_FUNC > 2)
# define f_dddprintk(X...) dprintk(" " X)
diff --git a/include/video/sticore.h b/include/video/sticore.h
new file mode 100644
index 000000000000..823666af1871
--- /dev/null
+++ b/include/video/sticore.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef STICORE_H
+#define STICORE_H
+
+struct device;
+
+/* generic STI structures & functions */
+
+#define MAX_STI_ROMS 4 /* max no. of ROMs which this driver handles */
+
+#define STI_REGION_MAX 8 /* hardcoded STI constants */
+#define STI_DEV_NAME_LENGTH 32
+#define STI_MONITOR_MAX 256
+
+#define STI_FONT_HPROMAN8 1
+#define STI_FONT_KANA8 2
+
+#define ALT_CODE_TYPE_UNKNOWN 0x00 /* alt code type values */
+#define ALT_CODE_TYPE_PA_RISC_64 0x01
+
+/* The latency of the STI functions cannot really be reduced by setting
+ * this to 0; STI doesn't seem to be designed to allow calling a different
+ * function (or the same function with different arguments) after a
+ * function exited with 1 as return value.
+ *
+ * As all of the functions below could be called from interrupt context,
+ * we have to spin_lock_irqsave around the do { ret = bla(); } while(ret==1)
+ * block. Really bad latency there.
+ *
+ * Probably the best solution to all this is have the generic code manage
+ * the screen buffer and a kernel thread to call STI occasionally.
+ *
+ * Luckily, the frame buffer guys have the same problem so we can just wait
+ * for them to fix it and steal their solution. prumpf
+ */
+
+#include <asm/io.h>
+
+#define STI_WAIT 1
+
+#define STI_PTR(p) ( virt_to_phys(p) )
+
+#define sti_onscreen_x(sti) (sti->glob_cfg->onscreen_x)
+#define sti_onscreen_y(sti) (sti->glob_cfg->onscreen_y)
+
+/* sti_font_xy() use the native font ROM ! */
+#define sti_font_x(sti) (PTR_STI(sti->font)->width)
+#define sti_font_y(sti) (PTR_STI(sti->font)->height)
+
+#ifdef CONFIG_64BIT
+#define STI_LOWMEM (GFP_KERNEL | GFP_DMA)
+#else
+#define STI_LOWMEM (GFP_KERNEL)
+#endif
+
+
+/* STI function configuration structs */
+
+typedef union region {
+ struct {
+ u32 offset : 14; /* offset in 4kbyte page */
+ u32 sys_only : 1; /* don't map to user space */
+ u32 cache : 1; /* map to data cache */
+ u32 btlb : 1; /* map to block tlb */
+ u32 last : 1; /* last region in list */
+ u32 length : 14; /* length in 4kbyte page */
+ } region_desc;
+
+ u32 region; /* complete region value */
+} region_t;
+
+#define REGION_OFFSET_TO_PHYS( rt, hpa ) \
+ (((rt).region_desc.offset << 12) + (hpa))
+
+struct sti_glob_cfg_ext {
+ u8 curr_mon; /* current monitor configured */
+ u8 friendly_boot; /* in friendly boot mode */
+ s16 power; /* power calculation (in Watts) */
+ s32 freq_ref; /* frequency reference */
+ u32 *sti_mem_addr; /* pointer to global sti memory (size=sti_mem_request) */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_glob_cfg {
+ s32 text_planes; /* number of planes used for text */
+ s16 onscreen_x; /* screen width in pixels */
+ s16 onscreen_y; /* screen height in pixels */
+ s16 offscreen_x; /* offset width in pixels */
+ s16 offscreen_y; /* offset height in pixels */
+ s16 total_x; /* frame buffer width in pixels */
+ s16 total_y; /* frame buffer height in pixels */
+ u32 *region_ptrs[STI_REGION_MAX]; /* region pointers */
+ s32 reent_lvl; /* storage for reentry level value */
+ u32 *save_addr; /* where to save or restore reentrant state */
+ u32 *ext_ptr; /* pointer to extended glob_cfg data structure */
+};
+
+
+/* STI init function structs */
+
+struct sti_init_flags {
+ u32 wait : 1; /* should routine idle wait or not */
+ u32 reset : 1; /* hard reset the device? */
+ u32 text : 1; /* turn on text display planes? */
+ u32 nontext : 1; /* turn on non-text display planes? */
+ u32 clear : 1; /* clear text display planes? */
+ u32 cmap_blk : 1; /* non-text planes cmap black? */
+ u32 enable_be_timer : 1; /* enable bus error timer */
+ u32 enable_be_int : 1; /* enable bus error timer interrupt */
+ u32 no_chg_tx : 1; /* don't change text settings */
+ u32 no_chg_ntx : 1; /* don't change non-text settings */
+ u32 no_chg_bet : 1; /* don't change berr timer settings */
+ u32 no_chg_bei : 1; /* don't change berr int settings */
+ u32 init_cmap_tx : 1; /* initialize cmap for text planes */
+ u32 cmt_chg : 1; /* change current monitor type */
+ u32 retain_ie : 1; /* don't allow reset to clear int enables */
+ u32 caller_bootrom : 1; /* set only by bootrom for each call */
+ u32 caller_kernel : 1; /* set only by kernel for each call */
+ u32 caller_other : 1; /* set only by non-[BR/K] caller */
+ u32 pad : 14; /* pad to word boundary */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_init_inptr_ext {
+ u8 config_mon_type; /* configure to monitor type */
+ u8 pad[1]; /* pad to word boundary */
+ u16 inflight_data; /* inflight data possible on PCI */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_init_inptr {
+ s32 text_planes; /* number of planes to use for text */
+ u32 *ext_ptr; /* pointer to extended init_graph inptr data structure*/
+};
+
+
+struct sti_init_outptr {
+ s32 errno; /* error number on failure */
+ s32 text_planes; /* number of planes used for text */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+
+
+/* STI configuration function structs */
+
+struct sti_conf_flags {
+ u32 wait : 1; /* should routine idle wait or not */
+ u32 pad : 31; /* pad to word boundary */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_conf_inptr {
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_conf_outptr_ext {
+ u32 crt_config[3]; /* hardware specific X11/OGL information */
+ u32 crt_hdw[3];
+ u32 *future_ptr;
+};
+
+struct sti_conf_outptr {
+ s32 errno; /* error number on failure */
+ s16 onscreen_x; /* screen width in pixels */
+ s16 onscreen_y; /* screen height in pixels */
+ s16 offscreen_x; /* offscreen width in pixels */
+ s16 offscreen_y; /* offscreen height in pixels */
+ s16 total_x; /* frame buffer width in pixels */
+ s16 total_y; /* frame buffer height in pixels */
+ s32 bits_per_pixel; /* bits/pixel device has configured */
+ s32 bits_used; /* bits which can be accessed */
+ s32 planes; /* number of fb planes in system */
+ u8 dev_name[STI_DEV_NAME_LENGTH]; /* null terminated product name */
+ u32 attributes; /* flags denoting attributes */
+ u32 *ext_ptr; /* pointer to future data */
+};
+
+struct sti_rom {
+ u8 type[4];
+ u8 res004;
+ u8 num_mons;
+ u8 revno[2];
+ u32 graphics_id[2];
+
+ u32 font_start;
+ u32 statesize;
+ u32 last_addr;
+ u32 region_list;
+
+ u16 reentsize;
+ u16 maxtime;
+ u32 mon_tbl_addr;
+ u32 user_data_addr;
+ u32 sti_mem_req;
+
+ u32 user_data_size;
+ u16 power;
+ u8 bus_support;
+ u8 ext_bus_support;
+ u8 alt_code_type;
+ u8 ext_dd_struct[3];
+ u32 cfb_addr;
+
+ u32 init_graph;
+ u32 state_mgmt;
+ u32 font_unpmv;
+ u32 block_move;
+ u32 self_test;
+ u32 excep_hdlr;
+ u32 inq_conf;
+ u32 set_cm_entry;
+ u32 dma_ctrl;
+ u8 res040[7 * 4];
+
+ u32 init_graph_addr;
+ u32 state_mgmt_addr;
+ u32 font_unp_addr;
+ u32 block_move_addr;
+ u32 self_test_addr;
+ u32 excep_hdlr_addr;
+ u32 inq_conf_addr;
+ u32 set_cm_entry_addr;
+ u32 image_unpack_addr;
+ u32 pa_risx_addrs[7];
+};
+
+struct sti_rom_font {
+ u16 first_char;
+ u16 last_char;
+ u8 width;
+ u8 height;
+ u8 font_type; /* language type */
+ u8 bytes_per_char;
+ s32 next_font; /* note: signed int */
+ u8 underline_height;
+ u8 underline_pos;
+ u8 res008[2];
+};
+
+/* sticore internal font handling */
+
+struct sti_cooked_font {
+ struct sti_rom_font *raw; /* native ptr for STI functions */
+ void *raw_ptr; /* kmalloc'ed font data */
+ struct sti_cooked_font *next_font;
+ int height, width;
+ int refcount;
+ u32 crc;
+};
+
+struct sti_cooked_rom {
+ struct sti_rom *raw;
+ struct sti_cooked_font *font_start;
+};
+
+/* STI font printing function structs */
+
+struct sti_font_inptr {
+ u32 *font_start_addr; /* address of font start */
+ s16 index; /* index into font table of character */
+ u8 fg_color; /* foreground color of character */
+ u8 bg_color; /* background color of character */
+ s16 dest_x; /* X location of character upper left */
+ s16 dest_y; /* Y location of character upper left */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_font_flags {
+ u32 wait : 1; /* should routine idle wait or not */
+ u32 non_text : 1; /* font unpack/move in non_text planes =1, text =0 */
+ u32 pad : 30; /* pad to word boundary */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_font_outptr {
+ s32 errno; /* error number on failure */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+/* STI blockmove structs */
+
+struct sti_blkmv_flags {
+ u32 wait : 1; /* should routine idle wait or not */
+ u32 color : 1; /* change color during move? */
+ u32 clear : 1; /* clear during move? */
+ u32 non_text : 1; /* block move in non_text planes =1, text =0 */
+ u32 pad : 28; /* pad to word boundary */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_blkmv_inptr {
+ u8 fg_color; /* foreground color after move */
+ u8 bg_color; /* background color after move */
+ s16 src_x; /* source upper left pixel x location */
+ s16 src_y; /* source upper left pixel y location */
+ s16 dest_x; /* dest upper left pixel x location */
+ s16 dest_y; /* dest upper left pixel y location */
+ s16 width; /* block width in pixels */
+ s16 height; /* block height in pixels */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_blkmv_outptr {
+ s32 errno; /* error number on failure */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+
+/* sti_all_data is an internal struct which needs to be allocated in
+ * low memory (< 4GB) if STI is used with 32bit STI on a 64bit kernel */
+
+struct sti_all_data {
+ struct sti_glob_cfg glob_cfg;
+ struct sti_glob_cfg_ext glob_cfg_ext;
+
+ struct sti_conf_inptr inq_inptr;
+ struct sti_conf_outptr inq_outptr; /* configuration */
+ struct sti_conf_outptr_ext inq_outptr_ext;
+
+ struct sti_init_inptr_ext init_inptr_ext;
+ struct sti_init_inptr init_inptr;
+ struct sti_init_outptr init_outptr;
+
+ struct sti_blkmv_inptr blkmv_inptr;
+ struct sti_blkmv_outptr blkmv_outptr;
+
+ struct sti_font_inptr font_inptr;
+ struct sti_font_outptr font_outptr;
+
+ /* leave as last entries */
+ unsigned long save_addr[1024 / sizeof(unsigned long)];
+ /* min 256 bytes which is STI default, max sti->sti_mem_request */
+ unsigned long sti_mem_addr[256 / sizeof(unsigned long)];
+ /* do not add something below here ! */
+};
+
+/* internal generic STI struct */
+
+struct sti_struct {
+ spinlock_t lock;
+
+ /* char **mon_strings; */
+ int sti_mem_request;
+ u32 graphics_id[2];
+
+ struct sti_cooked_rom *rom;
+
+ unsigned long font_unpmv;
+ unsigned long block_move;
+ unsigned long init_graph;
+ unsigned long inq_conf;
+ int do_call64; /* call 64-bit code */
+
+ /* all following fields are initialized by the generic routines */
+ int text_planes;
+ region_t regions[STI_REGION_MAX];
+ unsigned long regions_phys[STI_REGION_MAX];
+
+ struct sti_glob_cfg *glob_cfg; /* points into sti_all_data */
+
+ int wordmode;
+ struct sti_cooked_font *font; /* ptr to selected font (cooked) */
+
+ struct pci_dev *pd;
+
+ /* PCI data structures (pg. 17ff from sti.pdf) */
+ u8 rm_entry[16]; /* pci region mapper array == pci config space offset */
+
+ /* pointer to the parent device */
+ struct device *dev;
+
+ /* pointer to all internal data */
+ struct sti_all_data *sti_data;
+
+ /* pa_path of this device */
+ char pa_path[24];
+};
+
+
+/* sticore interface functions */
+
+struct sti_struct *sti_get_rom(unsigned int index); /* 0: default sti */
+void sti_font_convert_bytemode(struct sti_struct *sti, struct sti_cooked_font *f);
+
+
+/* sticore main function to call STI firmware */
+
+int sti_call(const struct sti_struct *sti, unsigned long func,
+ const void *flags, void *inptr, void *outptr,
+ struct sti_glob_cfg *glob_cfg);
+
+
+/* functions to call the STI ROM directly */
+
+void sti_putc(struct sti_struct *sti, int c, int y, int x,
+ struct sti_cooked_font *font);
+void sti_set(struct sti_struct *sti, int src_y, int src_x,
+ int height, int width, u8 color);
+void sti_clear(struct sti_struct *sti, int src_y, int src_x,
+ int height, int width, int c, struct sti_cooked_font *font);
+void sti_bmove(struct sti_struct *sti, int src_y, int src_x,
+ int dst_y, int dst_x, int height, int width,
+ struct sti_cooked_font *font);
+
+#endif /* STICORE_H */
diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
index 8d2a3bfc8dac..47d96e75e8ef 100644
--- a/include/video/uvesafb.h
+++ b/include/video/uvesafb.h
@@ -109,8 +109,6 @@ struct uvesafb_ktask {
u32 ack;
};
-static int uvesafb_exec(struct uvesafb_ktask *tsk);
-
#define UVESAFB_EXACT_RES 1
#define UVESAFB_EXACT_DEPTH 2
diff --git a/include/video/vga.h b/include/video/vga.h
index d334e64c1c19..947c0abd04ef 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -2,15 +2,15 @@
* linux/include/video/vga.h -- standard VGA chipset interaction
*
* Copyright 1999 Jeff Garzik <jgarzik@pobox.com>
- *
+ *
* Copyright history from vga16fb.c:
* Copyright 1999 Ben Pfaff and Petr Vandrovec
- * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm
+ * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm
* Based on VESA framebuffer (c) 1998 Gerd Knorr
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
- * archive for more details.
+ * archive for more details.
*
*/
@@ -22,6 +22,8 @@
#include <asm/vga.h>
#include <asm/byteorder.h>
+#define VGA_FB_PHYS_BASE 0xA0000 /* VGA framebuffer I/O base */
+#define VGA_FB_PHYS_SIZE 65536 /* VGA framebuffer I/O size */
/* Some of the code below is taken from SVGAlib. The original,
unmodified copyright notice for that code is below. */
@@ -190,7 +192,7 @@ struct vgastate {
__u32 num_gfx; /* number of gfx registers, 0 for default */
__u32 num_seq; /* number of seq registers, 0 for default */
void *vidstate;
-};
+};
extern int save_vga(struct vgastate *state);
extern int restore_vga(struct vgastate *state);
@@ -198,7 +200,7 @@ extern int restore_vga(struct vgastate *state);
/*
* generic VGA port read/write
*/
-
+
static inline unsigned char vga_io_r (unsigned short port)
{
return inb_p(port);
@@ -261,7 +263,7 @@ static inline void vga_w_fast (void __iomem *regbase, unsigned short port,
/*
* VGA CRTC register read/write
*/
-
+
static inline unsigned char vga_rcrt (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_CRT_IC, reg);
@@ -314,7 +316,7 @@ static inline void vga_mm_wcrt (void __iomem *regbase, unsigned char reg, unsign
/*
* VGA sequencer register read/write
*/
-
+
static inline unsigned char vga_rseq (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_SEQ_I, reg);
@@ -366,7 +368,7 @@ static inline void vga_mm_wseq (void __iomem *regbase, unsigned char reg, unsign
/*
* VGA graphics controller register read/write
*/
-
+
static inline unsigned char vga_rgfx (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_GFX_I, reg);
@@ -419,7 +421,7 @@ static inline void vga_mm_wgfx (void __iomem *regbase, unsigned char reg, unsign
/*
* VGA attribute controller register read/write
*/
-
+
static inline unsigned char vga_rattr (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_ATT_IW, reg);
diff --git a/include/video/w100fb.h b/include/video/w100fb.h
deleted file mode 100644
index a614654d8598..000000000000
--- a/include/video/w100fb.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Support for the w100 frame buffer.
- *
- * Copyright (c) 2004-2005 Richard Purdie
- * Copyright (c) 2005 Ian Molton
- */
-
-#define W100_GPIO_PORT_A 0
-#define W100_GPIO_PORT_B 1
-
-#define CLK_SRC_XTAL 0
-#define CLK_SRC_PLL 1
-
-struct w100fb_par;
-
-unsigned long w100fb_gpio_read(int port);
-void w100fb_gpio_write(int port, unsigned long value);
-unsigned long w100fb_get_hsynclen(struct device *dev);
-
-/* LCD Specific Routines and Config */
-struct w100_tg_info {
- void (*change)(struct w100fb_par*);
- void (*suspend)(struct w100fb_par*);
- void (*resume)(struct w100fb_par*);
-};
-
-/* General Platform Specific w100 Register Values */
-struct w100_gen_regs {
- unsigned long lcd_format;
- unsigned long lcdd_cntl1;
- unsigned long lcdd_cntl2;
- unsigned long genlcd_cntl1;
- unsigned long genlcd_cntl2;
- unsigned long genlcd_cntl3;
-};
-
-struct w100_gpio_regs {
- unsigned long init_data1;
- unsigned long init_data2;
- unsigned long gpio_dir1;
- unsigned long gpio_oe1;
- unsigned long gpio_dir2;
- unsigned long gpio_oe2;
-};
-
-/* Optional External Memory Configuration */
-struct w100_mem_info {
- unsigned long ext_cntl;
- unsigned long sdram_mode_reg;
- unsigned long ext_timing_cntl;
- unsigned long io_cntl;
- unsigned int size;
-};
-
-struct w100_bm_mem_info {
- unsigned long ext_mem_bw;
- unsigned long offset;
- unsigned long ext_timing_ctl;
- unsigned long ext_cntl;
- unsigned long mode_reg;
- unsigned long io_cntl;
- unsigned long config;
-};
-
-/* LCD Mode definition */
-struct w100_mode {
- unsigned int xres;
- unsigned int yres;
- unsigned short left_margin;
- unsigned short right_margin;
- unsigned short upper_margin;
- unsigned short lower_margin;
- unsigned long crtc_ss;
- unsigned long crtc_ls;
- unsigned long crtc_gs;
- unsigned long crtc_vpos_gs;
- unsigned long crtc_rev;
- unsigned long crtc_dclk;
- unsigned long crtc_gclk;
- unsigned long crtc_goe;
- unsigned long crtc_ps1_active;
- char pll_freq;
- char fast_pll_freq;
- int sysclk_src;
- int sysclk_divider;
- int pixclk_src;
- int pixclk_divider;
- int pixclk_divider_rotated;
-};
-
-struct w100_pll_info {
- uint16_t freq; /* desired Fout for PLL (Mhz) */
- uint8_t M; /* input divider */
- uint8_t N_int; /* VCO multiplier */
- uint8_t N_fac; /* VCO multiplier fractional part */
- uint8_t tfgoal;
- uint8_t lock_time;
-};
-
-/* Initial Video mode orientation flags */
-#define INIT_MODE_ROTATED 0x1
-#define INIT_MODE_FLIPPED 0x2
-
-/*
- * This structure describes the machine which we are running on.
- * It is set by machine specific code and used in the probe routine
- * of drivers/video/w100fb.c
- */
-struct w100fb_mach_info {
- /* General Platform Specific Registers */
- struct w100_gen_regs *regs;
- /* Table of modes the LCD is capable of */
- struct w100_mode *modelist;
- unsigned int num_modes;
- /* Hooks for any platform specific tg/lcd code (optional) */
- struct w100_tg_info *tg;
- /* External memory definition (if present) */
- struct w100_mem_info *mem;
- /* Additional External memory definition (if present) */
- struct w100_bm_mem_info *bm_mem;
- /* GPIO definitions (optional) */
- struct w100_gpio_regs *gpio;
- /* Initial Mode flags */
- unsigned int init_mode;
- /* Xtal Frequency */
- unsigned int xtal_freq;
- /* Enable Xtal input doubler (1 == enable) */
- unsigned int xtal_dbl;
-};
-
-/* General frame buffer data structure */
-struct w100fb_par {
- unsigned int chip_id;
- unsigned int xres;
- unsigned int yres;
- unsigned int extmem_active;
- unsigned int flip;
- unsigned int blanked;
- unsigned int fastpll_mode;
- unsigned long hsync_len;
- struct w100_mode *mode;
- struct w100_pll_info *pll_table;
- struct w100fb_mach_info *mach;
- uint32_t *saved_intmem;
- uint32_t *saved_extmem;
-};
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index 4ddd7dc4a61e..b1e11863144d 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -40,41 +40,6 @@
#include <xen/xen.h>
#include <linux/acpi.h>
-#define ACPI_MEMORY_DEVICE_CLASS "memory"
-#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
-#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
-
-int xen_stub_memory_device_init(void);
-void xen_stub_memory_device_exit(void);
-
-#define ACPI_PROCESSOR_CLASS "processor"
-#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
-#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
-
-int xen_stub_processor_init(void);
-void xen_stub_processor_exit(void);
-
-void xen_pcpu_hotplug_sync(void);
-int xen_pcpu_id(uint32_t acpi_id);
-
-static inline int xen_acpi_get_pxm(acpi_handle h)
-{
- unsigned long long pxm;
- acpi_status status;
- acpi_handle handle;
- acpi_handle phandle = h;
-
- do {
- handle = phandle;
- status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
- if (ACPI_SUCCESS(status))
- return pxm;
- status = acpi_get_parent(handle, &phandle);
- } while (ACPI_SUCCESS(status));
-
- return -ENXIO;
-}
-
int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
u32 pm1a_cnt, u32 pm1b_cnd);
int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
index b40485e54d80..9d7dd1c65a21 100644
--- a/include/xen/arm/hypercall.h
+++ b/include/xen/arm/hypercall.h
@@ -53,7 +53,6 @@ unsigned long HYPERVISOR_hvm_op(int op, void *arg);
int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
int HYPERVISOR_physdev_op(int cmd, void *arg);
int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
-int HYPERVISOR_tmem_op(void *arg);
int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
int HYPERVISOR_dm_op(domid_t domid, unsigned int nr_bufs,
struct xen_dm_op_buf *bufs);
@@ -74,18 +73,4 @@ HYPERVISOR_suspend(unsigned long start_info_mfn)
return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
}
-static inline void
-MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
- unsigned int new_val, unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
- int count, int *success_count, domid_t domid)
-{
- BUG();
-}
-
#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
index 43ef24dd030e..9995695204f5 100644
--- a/include/xen/arm/hypervisor.h
+++ b/include/xen/arm/hypervisor.h
@@ -7,18 +7,6 @@
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
- return PARAVIRT_LAZY_NONE;
-}
-
#ifdef CONFIG_XEN
void __init xen_early_init(void);
#else
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
deleted file mode 100644
index b9cc11e887ed..000000000000
--- a/include/xen/arm/page-coherent.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_ARM_PAGE_COHERENT_H
-#define _XEN_ARM_PAGE_COHERENT_H
-
-#include <linux/dma-mapping.h>
-#include <asm/page.h>
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
- return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
- dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-#endif /* _XEN_ARM_PAGE_COHERENT_H */
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
index 39df751d0dc4..e5c84ff28c8b 100644
--- a/include/xen/arm/page.h
+++ b/include/xen/arm/page.h
@@ -83,6 +83,9 @@ static inline unsigned long bfn_to_pfn(unsigned long bfn)
})
#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
+#define percpu_to_gfn(v) \
+ (pfn_to_gfn(per_cpu_ptr_to_phys(v) >> XEN_PAGE_SHIFT))
+
/* Only used in PV code. But ARM guests are always HVM. */
static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
@@ -106,12 +109,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
return __set_phys_to_machine(pfn, mfn);
}
-#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
bool xen_arch_need_swiotlb(struct device *dev,
phys_addr_t phys,
dma_addr_t dev_addr);
-unsigned long xen_get_swiotlb_free_pages(unsigned int order);
#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/include/xen/arm/swiotlb-xen.h b/include/xen/arm/swiotlb-xen.h
new file mode 100644
index 000000000000..33336ab58afc
--- /dev/null
+++ b/include/xen/arm/swiotlb-xen.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_SWIOTLB_XEN_H
+#define _ASM_ARM_SWIOTLB_XEN_H
+
+#include <xen/features.h>
+#include <xen/xen.h>
+
+static inline int xen_swiotlb_detect(void)
+{
+ if (!xen_domain())
+ return 0;
+ if (xen_feature(XENFEAT_direct_mapped))
+ return 1;
+ /* legacy case */
+ if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain())
+ return 1;
+ return 0;
+}
+
+#endif /* _ASM_ARM_SWIOTLB_XEN_H */
diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h
new file mode 100644
index 000000000000..70073f5a2b54
--- /dev/null
+++ b/include/xen/arm/xen-ops.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_XEN_OPS_H
+#define _ASM_ARM_XEN_OPS_H
+
+#include <xen/swiotlb-xen.h>
+#include <xen/xen-ops.h>
+
+static inline void xen_setup_dma_ops(struct device *dev)
+{
+#ifdef CONFIG_XEN
+ if (xen_swiotlb_detect())
+ dev->dma_ops = &xen_swiotlb_dma_ops;
+#endif
+}
+
+#endif /* _ASM_ARM_XEN_OPS_H */
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 6fb95aa19405..f78a6cc94f1a 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -2,6 +2,8 @@
/******************************************************************************
* Xen balloon functionality
*/
+#ifndef _XEN_BALLOON_H
+#define _XEN_BALLOON_H
#define RETRY_UNLIMITED 0
@@ -24,8 +26,8 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target);
-int alloc_xenballooned_pages(int nr_pages, struct page **pages);
-void free_xenballooned_pages(int nr_pages, struct page **pages);
+int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages);
+void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages);
#ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void);
@@ -34,3 +36,5 @@ static inline void xen_balloon_init(void)
{
}
#endif
+
+#endif /* _XEN_BALLOON_H */
diff --git a/include/xen/events.h b/include/xen/events.h
index df1e6391f63f..3b07409f8032 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -12,13 +12,20 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/events.h>
+struct xenbus_device;
+
unsigned xen_evtchn_nr_channels(void);
int bind_evtchn_to_irq(evtchn_port_t evtchn);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
irq_handler_t handler,
unsigned long irqflags, const char *devname,
void *dev_id);
+int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags, const char *devname,
+ void *dev_id);
int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
@@ -30,14 +37,14 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
unsigned long irqflags,
const char *devname,
void *dev_id);
-int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
- evtchn_port_t remote_port);
-int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
- evtchn_port_t remote_port,
- irq_handler_t handler,
- unsigned long irqflags,
- const char *devname,
- void *dev_id);
+int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
+ evtchn_port_t remote_port);
+int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev,
+ evtchn_port_t remote_port,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
/*
* Common unbind function for all event sources. Takes IRQ to unbind from.
@@ -46,6 +53,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
*/
void unbind_from_irqhandler(unsigned int irq, void *dev_id);
+/*
+ * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi
+ * functions above.
+ */
+void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags);
+/* Signal an event was spurious, i.e. there was no action resulting from it. */
+#define XEN_EOI_FLAG_SPURIOUS 0x00000001
+
#define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX
#define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT
#define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN
@@ -54,13 +69,12 @@ int xen_set_irq_priority(unsigned irq, unsigned priority);
/*
* Allow extra references to event channels exposed to userspace by evtchn
*/
-int evtchn_make_refcounted(evtchn_port_t evtchn);
+int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static);
int evtchn_get(evtchn_port_t evtchn);
void evtchn_put(evtchn_port_t evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(evtchn_port_t evtchn, int irq);
-int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
static inline void notify_remote_via_evtchn(evtchn_port_t port)
{
@@ -74,7 +88,6 @@ void xen_irq_resume(void);
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq);
-void xen_set_irq_pending(int irq);
bool xen_test_irq_pending(int irq);
/* Poll waiting for an irq to become pending. In the usual case, the
@@ -87,12 +100,11 @@ void xen_poll_irq_timeout(int irq, u64 timeout);
/* Determine the IRQ which is bound to an event channel */
unsigned int irq_from_evtchn(evtchn_port_t evtchn);
-int irq_from_virq(unsigned int cpu, unsigned int virq);
-evtchn_port_t evtchn_from_irq(unsigned irq);
+int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
+ evtchn_port_t *evtchn);
int xen_set_callback_via(uint64_t via);
-void xen_evtchn_do_upcall(struct pt_regs *regs);
-void xen_hvm_evtchn_do_upcall(void);
+int xen_evtchn_do_upcall(void);
/* Bind a pirq for a physical interrupt to an irq. */
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
@@ -109,9 +121,6 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
/* De-allocates the above mentioned physical interrupt. */
int xen_destroy_irq(int irq);
-/* Return irq from pirq */
-int xen_irq_from_pirq(unsigned pirq);
-
/* Return the pirq allocated to the irq. */
int xen_pirq_from_irq(unsigned irq);
@@ -123,4 +132,16 @@ int xen_test_irq_shared(int irq);
/* initialize Xen IRQ subsystem */
void xen_init_IRQ(void);
+
+irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
+
+static inline void xen_evtchn_close(evtchn_port_t port)
+{
+ struct evtchn_close close;
+
+ close.port = port;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+}
+
#endif /* _XEN_EVENTS_H */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 9bc5bc07d4d3..e279be353e3f 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -50,7 +50,12 @@
#include <linux/page-flags.h>
#include <linux/kernel.h>
-#define GNTTAB_RESERVED_XENSTORE 1
+/*
+ * Technically there's no reliably invalid grant reference or grant handle,
+ * so pick the value that is the most unlikely one to be observed valid.
+ */
+#define INVALID_GRANT_REF ((grant_ref_t)-1)
+#define INVALID_GRANT_HANDLE ((grant_handle_t)-1)
/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
#define NR_GRANT_FRAMES 4
@@ -90,33 +95,46 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
* longer in use. Return 1 if the grant entry was freed, 0 if it is still in
* use.
*/
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
+int gnttab_end_foreign_access_ref(grant_ref_t ref);
/*
* Eventually end access through the given grant reference, and once that
* access has been ended, free the given page too. Access will be ended
* immediately iff the grant entry is not in use, otherwise it will happen
- * some time later. page may be 0, in which case no freeing will occur.
+ * some time later. page may be NULL, in which case no freeing will occur.
+ * Note that the granted page might still be accessed (read or write) by the
+ * other side after gnttab_end_foreign_access() returns, so even if page was
+ * specified as NULL it is not allowed to just reuse the page for other
+ * purposes immediately. gnttab_end_foreign_access() will take an additional
+ * reference to the granted page in this case, which is dropped only after
+ * the grant is no longer in use.
+ * This requires that multi page allocations for areas subject to
+ * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
+ * via free_pages_exact()) in order to avoid high order pages.
*/
-void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
- unsigned long page);
-
-int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
-
-unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
-unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
+void gnttab_end_foreign_access(grant_ref_t ref, struct page *page);
-int gnttab_query_foreign_access(grant_ref_t ref);
+/*
+ * End access through the given grant reference, iff the grant entry is
+ * no longer in use. In case of success ending foreign access, the
+ * grant reference is deallocated.
+ * Return 1 if the grant entry was freed, 0 if it is still in use.
+ */
+int gnttab_try_end_foreign_access(grant_ref_t ref);
/*
* operations on reserved batches of grant references
*/
int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
+int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first);
+
void gnttab_free_grant_reference(grant_ref_t ref);
void gnttab_free_grant_references(grant_ref_t head);
+void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count);
+
int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
@@ -140,9 +158,6 @@ static inline void gnttab_page_grant_foreign_access_ref_one(
readonly);
}
-void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
- unsigned long pfn);
-
static inline void
gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
uint32_t flags, grant_ref_t ref, domid_t domid)
@@ -157,6 +172,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
map->flags = flags;
map->ref = ref;
map->dom = domid;
+ map->status = 1; /* arbitrary positive value */
}
static inline void
@@ -198,6 +214,23 @@ void gnttab_free_auto_xlat_frames(void);
int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages);
+struct gnttab_page_cache {
+ spinlock_t lock;
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+ struct page *pages;
+#else
+ struct list_head pages;
+#endif
+ unsigned int num_pages;
+};
+
+void gnttab_page_cache_init(struct gnttab_page_cache *cache);
+int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
+void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
+ unsigned int num);
+void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
+ unsigned int num);
+
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
struct gnttab_dma_alloc_args {
/* Device for which DMA memory will be/was allocated. */
diff --git a/include/xen/hvm.h b/include/xen/hvm.h
index b7fd7fc9ad41..8da7a6747058 100644
--- a/include/xen/hvm.h
+++ b/include/xen/hvm.h
@@ -60,4 +60,6 @@ static inline int hvm_get_parameter(int idx, uint64_t *value)
void xen_setup_callback_vector(void);
+int xen_set_upcall_vector(unsigned int cpu);
+
#endif /* XEN_HVM_H__ */
diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h
index dc3193f4b581..c67822a25ea6 100644
--- a/include/xen/interface/callback.h
+++ b/include/xen/interface/callback.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* callback.h
*
* Register guest OS callbacks with Xen.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2006, Ian Campbell
*/
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 9e9f9bf7c66d..38deb1214613 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* elfnote.h
*
* Definitions used for the Xen ELF notes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2006, Ian Campbell, XenSource Ltd.
*/
@@ -208,13 +191,3 @@
#define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_ENTRY
#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
index cf80e338fbb0..5f8da466e8a9 100644
--- a/include/xen/interface/event_channel.h
+++ b/include/xen/interface/event_channel.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* event_channel.h
*
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 6d1384abfbdf..53f760378e39 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* features.h
*
@@ -83,6 +83,20 @@
*/
#define XENFEAT_linux_rsdp_unrestricted 15
+/*
+ * A direct-mapped (or 1:1 mapped) domain is a domain for which its
+ * local pages have gfn == mfn. If a domain is direct-mapped,
+ * XENFEAT_direct_mapped is set; otherwise XENFEAT_not_direct_mapped
+ * is set.
+ *
+ * If neither flag is set (e.g. older Xen releases) the assumptions are:
+ * - not auto_translated domains (x86 only) are always direct-mapped
+ * - on x86, auto_translated domains are not direct-mapped
+ * - on ARM, Dom0 is direct-mapped, DomUs are not
+ */
+#define XENFEAT_not_direct_mapped 16
+#define XENFEAT_direct_mapped 17
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index 7fb7112d667c..cebbd99f1f84 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -1,27 +1,10 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* grant_table.h
*
* Interface for granting foreign access to page frames, and receiving
* page-ownership transfers.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2004, K A Fraser
*/
@@ -36,7 +19,8 @@
/* Some rough guidelines on accessing and updating grant-table entries
* in a concurrency-safe manner. For more information, Linux contains a
- * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
+ * reference implementation for guest OSes (drivers/xen/grant_table.c, see
+ * http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=drivers/xen/grant-table.c;hb=HEAD
*
* NB. WMB is a no-op on current-generation x86 processors. However, a
* compiler barrier will still be required.
@@ -97,8 +81,9 @@ typedef uint32_t grant_ref_t;
*/
/*
- * Version 1 of the grant table entry structure is maintained purely
- * for backwards compatibility. New guests should use version 2.
+ * Version 1 of the grant table entry structure is maintained largely for
+ * backwards compatibility. New guests are recommended to support using
+ * version 2 to overcome version 1 limitations, but to default to version 1.
*/
struct grant_entry_v1 {
/* GTF_xxx: various type and flag information. [XEN,GST] */
@@ -106,12 +91,21 @@ struct grant_entry_v1 {
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
- * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
- * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
+ * GTF_permit_access: GFN that @domid is allowed to map and access. [GST]
+ * GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST]
+ * GTF_transfer_completed: MFN whose ownership transferred by @domid
+ * (non-translated guests only). [XEN]
*/
uint32_t frame;
};
+/* The first few grant table entries will be preserved across grant table
+ * version changes and may be pre-populated at domain creation by tools.
+ */
+#define GNTTAB_NR_RESERVED_ENTRIES 8
+#define GNTTAB_RESERVED_CONSOLE 0
+#define GNTTAB_RESERVED_XENSTORE 1
+
/*
* Type of grant entry.
* GTF_invalid: This grant entry grants no privileges.
@@ -128,10 +122,13 @@ struct grant_entry_v1 {
#define GTF_type_mask (3U<<0)
/*
- * Subflags for GTF_permit_access.
+ * Subflags for GTF_permit_access and GTF_transitive.
* GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
* GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
* GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ * Further subflags for GTF_permit_access only.
+ * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags to be used for
+ * mappings of the grant [GST]
* GTF_sub_page: Grant access to only a subrange of the page. @domid
* will only be allowed to copy from the grant, and not
* map it. [GST]
@@ -142,6 +139,12 @@ struct grant_entry_v1 {
#define GTF_reading (1U<<_GTF_reading)
#define _GTF_writing (4)
#define GTF_writing (1U<<_GTF_writing)
+#define _GTF_PWT (5)
+#define GTF_PWT (1U<<_GTF_PWT)
+#define _GTF_PCD (6)
+#define GTF_PCD (1U<<_GTF_PCD)
+#define _GTF_PAT (7)
+#define GTF_PAT (1U<<_GTF_PAT)
#define _GTF_sub_page (8)
#define GTF_sub_page (1U<<_GTF_sub_page)
@@ -181,8 +184,7 @@ struct grant_entry_header {
};
/*
- * Version 2 of the grant entry structure, here is a union because three
- * different types are suppotted: full_page, sub_page and transitive.
+ * Version 2 of the grant entry structure.
*/
union grant_entry_v2 {
struct grant_entry_header hdr;
@@ -197,9 +199,9 @@ union grant_entry_v2 {
* field of the same name in the V1 entry structure.
*/
struct {
- struct grant_entry_header hdr;
- uint32_t pad0;
- uint64_t frame;
+ struct grant_entry_header hdr;
+ uint32_t pad0;
+ uint64_t frame;
} full_page;
/*
@@ -208,10 +210,10 @@ union grant_entry_v2 {
* in frame @frame.
*/
struct {
- struct grant_entry_header hdr;
- uint16_t page_off;
- uint16_t length;
- uint64_t frame;
+ struct grant_entry_header hdr;
+ uint16_t page_off;
+ uint16_t length;
+ uint64_t frame;
} sub_page;
/*
@@ -219,12 +221,15 @@ union grant_entry_v2 {
* grant @gref in domain @trans_domid, as if it was the local
* domain. Obviously, the transitive access must be compatible
* with the original grant.
+ *
+ * The current version of Xen does not allow transitive grants
+ * to be mapped.
*/
struct {
- struct grant_entry_header hdr;
- domid_t trans_domid;
- uint16_t pad0;
- grant_ref_t gref;
+ struct grant_entry_header hdr;
+ domid_t trans_domid;
+ uint16_t pad0;
+ grant_ref_t gref;
} transitive;
uint32_t __spacer[4]; /* Pad to a power of two */
@@ -236,6 +241,21 @@ typedef uint16_t grant_status_t;
* GRANT TABLE QUERIES AND USES
*/
+#define GNTTABOP_map_grant_ref 0
+#define GNTTABOP_unmap_grant_ref 1
+#define GNTTABOP_setup_table 2
+#define GNTTABOP_dump_table 3
+#define GNTTABOP_transfer 4
+#define GNTTABOP_copy 5
+#define GNTTABOP_query_size 6
+#define GNTTABOP_unmap_and_replace 7
+#define GNTTABOP_set_version 8
+#define GNTTABOP_get_status_frames 9
+#define GNTTABOP_get_version 10
+#define GNTTABOP_swap_grant_ref 11
+#define GNTTABOP_cache_flush 12
+/* ` } */
+
/*
* Handle to track a mapping created via a grant reference.
*/
@@ -244,7 +264,7 @@ typedef uint32_t grant_handle_t;
/*
* GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
* by devices and/or host CPUs. If successful, <handle> is a tracking number
- * that must be presented later to destroy the mapping(s). On error, <handle>
+ * that must be presented later to destroy the mapping(s). On error, <status>
* is a negative status code.
* NOTES:
* 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
@@ -258,7 +278,6 @@ typedef uint32_t grant_handle_t;
* host mapping is destroyed by other means then it is *NOT* guaranteed
* to be accounted to the correct grant reference!
*/
-#define GNTTABOP_map_grant_ref 0
struct gnttab_map_grant_ref {
/* IN parameters. */
uint64_t host_addr;
@@ -283,7 +302,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref);
* 3. After executing a batch of unmaps, it is guaranteed that no stale
* mappings will remain in the device or host TLBs.
*/
-#define GNTTABOP_unmap_grant_ref 1
struct gnttab_unmap_grant_ref {
/* IN parameters. */
uint64_t host_addr;
@@ -303,7 +321,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref);
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
* 3. Xen may not support more than a single grant-table page per domain.
*/
-#define GNTTABOP_setup_table 2
struct gnttab_setup_table {
/* IN parameters. */
domid_t dom;
@@ -318,7 +335,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
* GNTTABOP_dump_table: Dump the contents of the grant table to the
* xen console. Debugging use only.
*/
-#define GNTTABOP_dump_table 3
struct gnttab_dump_table {
/* IN parameters. */
domid_t dom;
@@ -328,17 +344,17 @@ struct gnttab_dump_table {
DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
/*
- * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
- * foreign domain has previously registered its interest in the transfer via
- * <domid, ref>.
+ * GNTTABOP_transfer: Transfer <frame> to a foreign domain. The foreign domain
+ * has previously registered its interest in the transfer via <domid, ref>.
*
* Note that, even if the transfer fails, the specified page no longer belongs
* to the calling domain *unless* the error is GNTST_bad_page.
+ *
+ * Note further that only PV guests can use this operation.
*/
-#define GNTTABOP_transfer 4
struct gnttab_transfer {
/* IN parameters. */
- xen_pfn_t mfn;
+ xen_pfn_t mfn;
domid_t domid;
grant_ref_t ref;
/* OUT parameters. */
@@ -369,21 +385,20 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer);
#define _GNTCOPY_dest_gref (1)
#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
-#define GNTTABOP_copy 5
struct gnttab_copy {
- /* IN parameters. */
- struct {
- union {
- grant_ref_t ref;
- xen_pfn_t gmfn;
- } u;
- domid_t domid;
- uint16_t offset;
- } source, dest;
- uint16_t len;
- uint16_t flags; /* GNTCOPY_* */
- /* OUT parameters. */
- int16_t status;
+ /* IN parameters. */
+ struct gnttab_copy_ptr {
+ union {
+ grant_ref_t ref;
+ xen_pfn_t gmfn;
+ } u;
+ domid_t domid;
+ uint16_t offset;
+ } source, dest;
+ uint16_t len;
+ uint16_t flags; /* GNTCOPY_* */
+ /* OUT parameters. */
+ int16_t status;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
@@ -394,7 +409,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
*/
-#define GNTTABOP_query_size 6
struct gnttab_query_size {
/* IN parameters. */
domid_t dom;
@@ -416,7 +430,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
* 2. After executing a batch of unmaps, it is guaranteed that no stale
* mappings will remain in the device or host TLBs.
*/
-#define GNTTABOP_unmap_and_replace 7
struct gnttab_unmap_and_replace {
/* IN parameters. */
uint64_t host_addr;
@@ -429,14 +442,12 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_replace);
/*
* GNTTABOP_set_version: Request a particular version of the grant
- * table shared table structure. This operation can only be performed
- * once in any given domain. It must be performed before any grants
- * are activated; otherwise, the domain will be stuck with version 1.
- * The only defined versions are 1 and 2.
+ * table shared table structure. This operation may be used to toggle
+ * between different versions, but must be performed while no grants
+ * are active. The only defined versions are 1 and 2.
*/
-#define GNTTABOP_set_version 8
struct gnttab_set_version {
- /* IN parameters */
+ /* IN/OUT parameters */
uint32_t version;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version);
@@ -453,7 +464,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version);
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
*/
-#define GNTTABOP_get_status_frames 9
struct gnttab_get_status_frames {
/* IN parameters. */
uint32_t nr_frames;
@@ -468,7 +478,6 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_status_frames);
* GNTTABOP_get_version: Get the grant table version which is in
* effect for domain <dom>.
*/
-#define GNTTABOP_get_version 10
struct gnttab_get_version {
/* IN parameters */
domid_t dom;
@@ -479,26 +488,37 @@ struct gnttab_get_version {
DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
/*
+ * GNTTABOP_swap_grant_ref: Swap the contents of two grant entries.
+ */
+struct gnttab_swap_grant_ref {
+ /* IN parameters */
+ grant_ref_t ref_a;
+ grant_ref_t ref_b;
+ /* OUT parameters */
+ int16_t status; /* GNTST_* */
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_swap_grant_ref);
+
+/*
* Issue one or more cache maintenance operations on a portion of a
* page granted to the calling domain by a foreign domain.
*/
-#define GNTTABOP_cache_flush 12
struct gnttab_cache_flush {
union {
uint64_t dev_bus_addr;
grant_ref_t ref;
} a;
- uint16_t offset; /* offset from start of grant */
- uint16_t length; /* size within the grant */
-#define GNTTAB_CACHE_CLEAN (1<<0)
-#define GNTTAB_CACHE_INVAL (1<<1)
-#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
+ uint16_t offset; /* offset from start of grant */
+ uint16_t length; /* size within the grant */
+#define GNTTAB_CACHE_CLEAN (1u<<0)
+#define GNTTAB_CACHE_INVAL (1u<<1)
+#define GNTTAB_CACHE_SOURCE_GREF (1u<<31)
uint32_t op;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
/*
- * Bitfield values for update_pin_status.flags.
+ * Bitfield values for gnttab_map_grant_ref.flags.
*/
/* Map the grant entry for access by I/O devices. */
#define _GNTMAP_device_map (0)
@@ -548,6 +568,7 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
#define GNTST_address_too_big (-11) /* transfer page address too large. */
#define GNTST_eagain (-12) /* Operation not done; try again. */
+#define GNTST_no_space (-13) /* Out of space (handles etc). */
#define GNTTABOP_error_msgs { \
"okay", \
@@ -562,7 +583,8 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
"bad page", \
"copy arguments cross page boundary", \
"page address size too large", \
- "operation not done; try again" \
+ "operation not done; try again", \
+ "out of space", \
}
#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
diff --git a/include/xen/interface/hvm/dm_op.h b/include/xen/interface/hvm/dm_op.h
index ee9e480bc559..08d972f87c7b 100644
--- a/include/xen/interface/hvm/dm_op.h
+++ b/include/xen/interface/hvm/dm_op.h
@@ -1,23 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (c) 2016, Citrix Systems Inc
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
#ifndef __XEN_PUBLIC_HVM_DM_OP_H__
diff --git a/include/xen/interface/hvm/hvm_op.h b/include/xen/interface/hvm/hvm_op.h
index 25d945ef17de..03134bf3cec1 100644
--- a/include/xen/interface/hvm/hvm_op.h
+++ b/include/xen/interface/hvm/hvm_op.h
@@ -1,22 +1,4 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
#define __XEN_PUBLIC_HVM_HVM_OP_H__
@@ -64,4 +46,23 @@ struct xen_hvm_get_mem_type {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_get_mem_type);
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
+ * channel upcalls on the specified <vcpu>. If set,
+ * this vector will be used in preference to the
+ * domain global callback via (see
+ * HVM_PARAM_CALLBACK_IRQ).
+ */
+#define HVMOP_set_evtchn_upcall_vector 23
+struct xen_hvm_evtchn_upcall_vector {
+ uint32_t vcpu;
+ uint8_t vector;
+};
+typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
+DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_evtchn_upcall_vector_t);
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
diff --git a/include/xen/interface/hvm/hvm_vcpu.h b/include/xen/interface/hvm/hvm_vcpu.h
index 32ca83edd44d..cbf93493275c 100644
--- a/include/xen/interface/hvm/hvm_vcpu.h
+++ b/include/xen/interface/hvm/hvm_vcpu.h
@@ -1,22 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2015, Roger Pau Monne <roger.pau@citrix.com>
*/
@@ -131,13 +114,3 @@ struct vcpu_hvm_context {
typedef struct vcpu_hvm_context vcpu_hvm_context_t;
#endif /* __XEN_PUBLIC_HVM_HVM_VCPU_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/include/xen/interface/hvm/ioreq.h b/include/xen/interface/hvm/ioreq.h
new file mode 100644
index 000000000000..b02cfeae7eb5
--- /dev/null
+++ b/include/xen/interface/hvm/ioreq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * ioreq.h: I/O request definitions for device models
+ * Copyright (c) 2004, Intel Corporation.
+ */
+
+#ifndef __XEN_PUBLIC_HVM_IOREQ_H__
+#define __XEN_PUBLIC_HVM_IOREQ_H__
+
+#define IOREQ_READ 1
+#define IOREQ_WRITE 0
+
+#define STATE_IOREQ_NONE 0
+#define STATE_IOREQ_READY 1
+#define STATE_IOREQ_INPROCESS 2
+#define STATE_IORESP_READY 3
+
+#define IOREQ_TYPE_PIO 0 /* pio */
+#define IOREQ_TYPE_COPY 1 /* mmio ops */
+#define IOREQ_TYPE_PCI_CONFIG 2
+#define IOREQ_TYPE_TIMEOFFSET 7
+#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */
+
+/*
+ * VMExit dispatcher should cooperate with instruction decoder to
+ * prepare this structure and notify service OS and DM by sending
+ * virq.
+ *
+ * For I/O type IOREQ_TYPE_PCI_CONFIG, the physical address is formatted
+ * as follows:
+ *
+ * 63....48|47..40|39..35|34..32|31........0
+ * SEGMENT |BUS |DEV |FN |OFFSET
+ */
+struct ioreq {
+ uint64_t addr; /* physical address */
+ uint64_t data; /* data (or paddr of data) */
+ uint32_t count; /* for rep prefixes */
+ uint32_t size; /* size in bytes */
+ uint32_t vp_eport; /* evtchn for notifications to/from device model */
+ uint16_t _pad0;
+ uint8_t state:4;
+ uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
+ * of the real data to use. */
+ uint8_t dir:1; /* 1=read, 0=write */
+ uint8_t df:1;
+ uint8_t _pad1:1;
+ uint8_t type; /* I/O type */
+};
+
+#endif /* __XEN_PUBLIC_HVM_IOREQ_H__ */
diff --git a/include/xen/interface/hvm/params.h b/include/xen/interface/hvm/params.h
index 4d61fc58d99d..4e2c94b3c466 100644
--- a/include/xen/interface/hvm/params.h
+++ b/include/xen/interface/hvm/params.h
@@ -1,22 +1,4 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
#define __XEN_PUBLIC_HVM_PARAMS_H__
diff --git a/include/xen/interface/hvm/start_info.h b/include/xen/interface/hvm/start_info.h
index 50af9ea2ff1e..e33557c0b4e9 100644
--- a/include/xen/interface/hvm/start_info.h
+++ b/include/xen/interface/hvm/start_info.h
@@ -1,22 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2016, Citrix Systems, Inc.
*/
diff --git a/include/xen/interface/io/9pfs.h b/include/xen/interface/io/9pfs.h
index 5b6c19dae5e2..f1a4c5ad2fd1 100644
--- a/include/xen/interface/io/9pfs.h
+++ b/include/xen/interface/io/9pfs.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* 9pfs.h -- Xen 9PFS transport
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2017 Stefano Stabellini <stefano@aporeto.com>
*/
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 5e40041c7e95..ba1e9f5b630e 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* blkif.h
*
diff --git a/include/xen/interface/io/console.h b/include/xen/interface/io/console.h
index 85ca8b02695a..cf17e89ed861 100644
--- a/include/xen/interface/io/console.h
+++ b/include/xen/interface/io/console.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* console.h
*
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
index d43ca0361f86..60e42d3b760e 100644
--- a/include/xen/interface/io/displif.h
+++ b/include/xen/interface/io/displif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* displif.h
*
* Unified display device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2016-2017 EPAM Systems Inc.
*
* Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
@@ -554,7 +537,7 @@ struct xendispl_dbuf_create_req {
struct xendispl_page_directory {
grant_ref_t gref_dir_next_page;
- grant_ref_t gref[1]; /* Variable length */
+ grant_ref_t gref[];
};
/*
diff --git a/include/xen/interface/io/fbif.h b/include/xen/interface/io/fbif.h
index 974a51ed9165..60ca808cef97 100644
--- a/include/xen/interface/io/fbif.h
+++ b/include/xen/interface/io/fbif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* fbif.h -- Xen virtual frame buffer device
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
index 5c7630d7376e..b8b08aa53325 100644
--- a/include/xen/interface/io/kbdif.h
+++ b/include/xen/interface/io/kbdif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* kbdif.h -- Xen virtual keyboard/mouse
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 2194322c3c7f..cb0c1a25d5d4 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* xen_netif.h
*
* Unified network-device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2003-2004, Keir Fraser
*/
diff --git a/include/xen/interface/io/pciif.h b/include/xen/interface/io/pciif.h
index d9922ae36eb5..d1a87b62daae 100644
--- a/include/xen/interface/io/pciif.h
+++ b/include/xen/interface/io/pciif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* PCI Backend/Frontend Common Data Structures & Macros
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#ifndef __XEN_PCI_COMMON_H__
diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h
index 6a89dc1bf225..22099bb4079f 100644
--- a/include/xen/interface/io/protocols.h
+++ b/include/xen/interface/io/protocols.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PROTOCOLS_H__
#define __XEN_PROTOCOLS_H__
diff --git a/include/xen/interface/io/pvcalls.h b/include/xen/interface/io/pvcalls.h
index ccf97b817e72..b6680fdbe2a8 100644
--- a/include/xen/interface/io/pvcalls.h
+++ b/include/xen/interface/io/pvcalls.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: MIT */
+
#ifndef __XEN_PUBLIC_IO_XEN_PVCALLS_H__
#define __XEN_PUBLIC_IO_XEN_PVCALLS_H__
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 2af7a1cd6658..4fef1efcdcab 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* ring.h
*
@@ -10,12 +10,27 @@
#ifndef __XEN_PUBLIC_IO_RING_H__
#define __XEN_PUBLIC_IO_RING_H__
+/*
+ * When #include'ing this header, you need to provide the following
+ * declaration upfront:
+ * - standard integers types (uint8_t, uint16_t, etc)
+ * They are provided by stdint.h of the standard headers.
+ *
+ * In addition, if you intend to use the FLEX macros, you also need to
+ * provide the following, before invoking the FLEX macros:
+ * - size_t
+ * - memcpy
+ * - grant_ref_t
+ * These declarations are provided by string.h of the standard headers,
+ * and grant_table.h from the Xen public headers.
+ */
+
#include <xen/interface/grant_table.h>
typedef unsigned int RING_IDX;
/* Round a 32-bit unsigned constant down to the nearest power of two. */
-#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
@@ -27,82 +42,78 @@ typedef unsigned int RING_IDX;
* A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
-#define __CONST_RING_SIZE(_s, _sz) \
- (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
- sizeof(((struct _s##_sring *)0)->ring[0])))
-
+#define __CONST_RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
+ sizeof(((struct _s##_sring *)0)->ring[0])))
/*
* The same for passing in an actual pointer instead of a name tag.
*/
-#define __RING_SIZE(_s, _sz) \
- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
+#define __RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
/*
* Macros to make the correct C datatypes for a new kind of ring.
*
* To make a new ring datatype, you need to have two message structures,
- * let's say struct request, and struct response already defined.
+ * let's say request_t, and response_t already defined.
*
* In a header where you want the ring datatype declared, you then do:
*
- * DEFINE_RING_TYPES(mytag, struct request, struct response);
+ * DEFINE_RING_TYPES(mytag, request_t, response_t);
*
* These expand out to give you a set of types, as you can see below.
* The most important of these are:
*
- * struct mytag_sring - The shared ring.
- * struct mytag_front_ring - The 'front' half of the ring.
- * struct mytag_back_ring - The 'back' half of the ring.
+ * mytag_sring_t - The shared ring.
+ * mytag_front_ring_t - The 'front' half of the ring.
+ * mytag_back_ring_t - The 'back' half of the ring.
*
* To initialize a ring in your code you need to know the location and size
* of the shared memory area (PAGE_SIZE, for instance). To initialise
* the front half:
*
- * struct mytag_front_ring front_ring;
- * SHARED_RING_INIT((struct mytag_sring *)shared_page);
- * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
- * PAGE_SIZE);
+ * mytag_front_ring_t ring;
+ * XEN_FRONT_RING_INIT(&ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*
* Initializing the back follows similarly (note that only the front
* initializes the shared ring):
*
- * struct mytag_back_ring back_ring;
- * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
- * PAGE_SIZE);
+ * mytag_back_ring_t back_ring;
+ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*/
-#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
- \
-/* Shared ring entry */ \
-union __name##_sring_entry { \
- __req_t req; \
- __rsp_t rsp; \
-}; \
- \
-/* Shared ring page */ \
-struct __name##_sring { \
- RING_IDX req_prod, req_event; \
- RING_IDX rsp_prod, rsp_event; \
- uint8_t pad[48]; \
- union __name##_sring_entry ring[1]; /* variable-length */ \
-}; \
- \
-/* "Front" end's private variables */ \
-struct __name##_front_ring { \
- RING_IDX req_prod_pvt; \
- RING_IDX rsp_cons; \
- unsigned int nr_ents; \
- struct __name##_sring *sring; \
-}; \
- \
-/* "Back" end's private variables */ \
-struct __name##_back_ring { \
- RING_IDX rsp_prod_pvt; \
- RING_IDX req_cons; \
- unsigned int nr_ents; \
- struct __name##_sring *sring; \
-};
-
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
+ \
+/* Shared ring entry */ \
+union __name##_sring_entry { \
+ __req_t req; \
+ __rsp_t rsp; \
+}; \
+ \
+/* Shared ring page */ \
+struct __name##_sring { \
+ RING_IDX req_prod, req_event; \
+ RING_IDX rsp_prod, rsp_event; \
+ uint8_t __pad[48]; \
+ union __name##_sring_entry ring[]; \
+}; \
+ \
+/* "Front" end's private variables */ \
+struct __name##_front_ring { \
+ RING_IDX req_prod_pvt; \
+ RING_IDX rsp_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
+/* "Back" end's private variables */ \
+struct __name##_back_ring { \
+ RING_IDX rsp_prod_pvt; \
+ RING_IDX req_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
/*
* Macros for manipulating rings.
*
@@ -119,94 +130,109 @@ struct __name##_back_ring { \
*/
/* Initialising empty rings */
-#define SHARED_RING_INIT(_s) do { \
- (_s)->req_prod = (_s)->rsp_prod = 0; \
- (_s)->req_event = (_s)->rsp_event = 1; \
- memset((_s)->pad, 0, sizeof((_s)->pad)); \
+#define SHARED_RING_INIT(_s) do { \
+ (_s)->req_prod = (_s)->rsp_prod = 0; \
+ (_s)->req_event = (_s)->rsp_event = 1; \
+ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
-#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
- (_r)->req_prod_pvt = (_i); \
- (_r)->rsp_cons = (_i); \
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
- (_r)->sring = (_s); \
+#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->req_prod_pvt = (_i); \
+ (_r)->rsp_cons = (_i); \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
} while (0)
#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
-#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
- (_r)->rsp_prod_pvt = (_i); \
- (_r)->req_cons = (_i); \
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
- (_r)->sring = (_s); \
+#define XEN_FRONT_RING_INIT(r, s, size) do { \
+ SHARED_RING_INIT(s); \
+ FRONT_RING_INIT(r, s, size); \
+} while (0)
+
+#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->rsp_prod_pvt = (_i); \
+ (_r)->req_cons = (_i); \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
} while (0)
#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
/* How big is this ring? */
-#define RING_SIZE(_r) \
+#define RING_SIZE(_r) \
((_r)->nr_ents)
/* Number of free requests (for use on front side only). */
-#define RING_FREE_REQUESTS(_r) \
+#define RING_FREE_REQUESTS(_r) \
(RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
/* Test if there is an empty slot available on the front ring.
* (This is only meaningful from the front. )
*/
-#define RING_FULL(_r) \
+#define RING_FULL(_r) \
(RING_FREE_REQUESTS(_r) == 0)
/* Test if there are outstanding messages to be processed on a ring. */
-#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+#define XEN_RING_NR_UNCONSUMED_RESPONSES(_r) \
((_r)->sring->rsp_prod - (_r)->rsp_cons)
-#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
- ({ \
- unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
- unsigned int rsp = RING_SIZE(_r) - \
- ((_r)->req_cons - (_r)->rsp_prod_pvt); \
- req < rsp ? req : rsp; \
- })
+#define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) ({ \
+ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
+ unsigned int rsp = RING_SIZE(_r) - \
+ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
+ req < rsp ? req : rsp; \
+})
+
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+ (!!XEN_RING_NR_UNCONSUMED_RESPONSES(_r))
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
+ (!!XEN_RING_NR_UNCONSUMED_REQUESTS(_r))
/* Direct access to individual ring elements, by index. */
-#define RING_GET_REQUEST(_r, _idx) \
+#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+#define RING_GET_RESPONSE(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
/*
- * Get a local copy of a request.
+ * Get a local copy of a request/response.
*
- * Use this in preference to RING_GET_REQUEST() so all processing is
+ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
* done on a local copy that cannot be modified by the other end.
*
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
- * to be ineffective where _req is a struct which consists of only bitfields.
+ * to be ineffective where dest is a struct which consists of only bitfields.
*/
-#define RING_COPY_REQUEST(_r, _idx, _req) do { \
- /* Use volatile to force the copy into _req. */ \
- *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
+#define RING_COPY_(type, r, idx, dest) do { \
+ /* Use volatile to force the copy into dest. */ \
+ *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
} while (0)
-#define RING_GET_RESPONSE(_r, _idx) \
- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
+#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
/* Loop termination condition: Would the specified index overflow the ring? */
-#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
/* Ill-behaved frontend determination: Can there be this many requests? */
-#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
+#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
+/* Ill-behaved backend determination: Can there be this many responses? */
+#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
+ (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
-#define RING_PUSH_REQUESTS(_r) do { \
- virt_wmb(); /* back sees requests /before/ updated producer index */ \
- (_r)->sring->req_prod = (_r)->req_prod_pvt; \
+#define RING_PUSH_REQUESTS(_r) do { \
+ virt_wmb(); /* back sees requests /before/ updated producer index */\
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
} while (0)
-#define RING_PUSH_RESPONSES(_r) do { \
- virt_wmb(); /* front sees responses /before/ updated producer index */ \
- (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
+#define RING_PUSH_RESPONSES(_r) do { \
+ virt_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
} while (0)
/*
@@ -239,40 +265,40 @@ struct __name##_back_ring { \
* field appropriately.
*/
-#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
- RING_IDX __old = (_r)->sring->req_prod; \
- RING_IDX __new = (_r)->req_prod_pvt; \
- virt_wmb(); /* back sees requests /before/ updated producer index */ \
- (_r)->sring->req_prod = __new; \
- virt_mb(); /* back sees new requests /before/ we check req_event */ \
- (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
- (RING_IDX)(__new - __old)); \
+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->req_prod; \
+ RING_IDX __new = (_r)->req_prod_pvt; \
+ virt_wmb(); /* back sees requests /before/ updated producer index */\
+ (_r)->sring->req_prod = __new; \
+ virt_mb(); /* back sees new requests /before/ we check req_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
+ (RING_IDX)(__new - __old)); \
} while (0)
-#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
- RING_IDX __old = (_r)->sring->rsp_prod; \
- RING_IDX __new = (_r)->rsp_prod_pvt; \
- virt_wmb(); /* front sees responses /before/ updated producer index */ \
- (_r)->sring->rsp_prod = __new; \
- virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
- (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
- (RING_IDX)(__new - __old)); \
+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->rsp_prod; \
+ RING_IDX __new = (_r)->rsp_prod_pvt; \
+ virt_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = __new; \
+ virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
+ (RING_IDX)(__new - __old)); \
} while (0)
-#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
- if (_work_to_do) break; \
- (_r)->sring->req_event = (_r)->req_cons + 1; \
- virt_mb(); \
- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->req_event = (_r)->req_cons + 1; \
+ virt_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
} while (0)
-#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
- if (_work_to_do) break; \
- (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
- virt_mb(); \
- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
+ virt_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
} while (0)
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
index 2aac8f73614c..b818517588b5 100644
--- a/include/xen/interface/io/sndif.h
+++ b/include/xen/interface/io/sndif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* sndif.h
*
* Unified sound-device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2013-2015 GlobalLogic Inc.
* Copyright (C) 2016-2017 EPAM Systems Inc.
*
@@ -676,7 +659,7 @@ struct xensnd_open_req {
struct xensnd_page_directory {
grant_ref_t gref_dir_next_page;
- grant_ref_t gref[1]; /* Variable length */
+ grant_ref_t gref[];
};
/*
diff --git a/include/xen/interface/io/usbif.h b/include/xen/interface/io/usbif.h
new file mode 100644
index 000000000000..a70e0b93178b
--- /dev/null
+++ b/include/xen/interface/io/usbif.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * usbif.h
+ *
+ * USB I/O interface for Xen guest OSes.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_USBIF_H__
+#define __XEN_PUBLIC_IO_USBIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Detailed Interface Description
+ * ==============================
+ * The pvUSB interface is using a split driver design: a frontend driver in
+ * the guest and a backend driver in a driver domain (normally dom0) having
+ * access to the physical USB device(s) being passed to the guest.
+ *
+ * The frontend and backend drivers use XenStore to initiate the connection
+ * between them, the I/O activity is handled via two shared ring pages and an
+ * event channel. As the interface between frontend and backend is at the USB
+ * host connector level, multiple (up to 31) physical USB devices can be
+ * handled by a single connection.
+ *
+ * The Xen pvUSB device name is "qusb", so the frontend's XenStore entries are
+ * to be found under "device/qusb", while the backend's XenStore entries are
+ * under "backend/<guest-dom-id>/qusb".
+ *
+ * When a new pvUSB connection is established, the frontend needs to setup the
+ * two shared ring pages for communication and the event channel. The ring
+ * pages need to be made available to the backend via the grant table
+ * interface.
+ *
+ * One of the shared ring pages is used by the backend to inform the frontend
+ * about USB device plug events (device to be added or removed). This is the
+ * "conn-ring".
+ *
+ * The other ring page is used for USB I/O communication (requests and
+ * responses). This is the "urb-ring".
+ *
+ * Feature and Parameter Negotiation
+ * =================================
+ * The two halves of a Xen pvUSB driver utilize nodes within the XenStore to
+ * communicate capabilities and to negotiate operating parameters. This
+ * section enumerates these nodes which reside in the respective front and
+ * backend portions of the XenStore, following the XenBus convention.
+ *
+ * Any specified default value is in effect if the corresponding XenBus node
+ * is not present in the XenStore.
+ *
+ * XenStore nodes in sections marked "PRIVATE" are solely for use by the
+ * driver side whose XenBus tree contains them.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------ Backend Device Identification (PRIVATE) ------------------
+ *
+ * num-ports
+ * Values: unsigned [1...31]
+ *
+ * Number of ports for this (virtual) USB host connector.
+ *
+ * usb-ver
+ * Values: unsigned [1...2]
+ *
+ * USB version of this host connector: 1 = USB 1.1, 2 = USB 2.0.
+ *
+ * port/[1...31]
+ * Values: string
+ *
+ * Physical USB device connected to the given port, e.g. "3-1.5".
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: unsigned
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * urb-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for urb requests.
+ *
+ * conn-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for connection/disconnection requests.
+ *
+ * protocol
+ * Values: string (XEN_IO_PROTO_ABI_*)
+ * Default Value: XEN_IO_PROTO_ABI_NATIVE
+ *
+ * The machine ABI rules governing the format of all ring request and
+ * response structures.
+ *
+ * Protocol Description
+ * ====================
+ *
+ *-------------------------- USB device plug events --------------------------
+ *
+ * USB device plug events are send via the "conn-ring" shared page. As only
+ * events are being sent, the respective requests from the frontend to the
+ * backend are just dummy ones.
+ * The events sent to the frontend have the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | portnum | speed | 4
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, event id (taken from the actual frontend dummy request)
+ * portnum - uint8_t, port number (1 ... 31)
+ * speed - uint8_t, device XENUSB_SPEED_*, XENUSB_SPEED_NONE == unplug
+ *
+ * The dummy request:
+ * 0 1 octet
+ * +----------------+----------------+
+ * | id | 2
+ * +----------------+----------------+
+ * id - uint16_t, guest supplied value (no need for being unique)
+ *
+ *-------------------------- USB I/O request ---------------------------------
+ *
+ * A single USB I/O request on the "urb-ring" has the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | nr_buffer_segs | 4
+ * +----------------+----------------+----------------+----------------+
+ * | pipe | 8
+ * +----------------+----------------+----------------+----------------+
+ * | transfer_flags | buffer_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | request type specific | 16
+ * | data | 20
+ * +----------------+----------------+----------------+----------------+
+ * | seg[0] | 24
+ * | data | 28
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | seg[XENUSB_MAX_SEGMENTS_PER_REQUEST - 1] | 144
+ * | data | 148
+ * +----------------+----------------+----------------+----------------+
+ * Bit field bit number 0 is always least significant bit, undefined bits must
+ * be zero.
+ * id - uint16_t, guest supplied value
+ * nr_buffer_segs - uint16_t, number of segment entries in seg[] array
+ * pipe - uint32_t, bit field with multiple information:
+ * bits 0-4: port request to send to
+ * bit 5: unlink request with specified id (cancel I/O) if set (see below)
+ * bit 7: direction (1 = read from device)
+ * bits 8-14: device number on port
+ * bits 15-18: endpoint of device
+ * bits 30-31: request type: 00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk
+ * transfer_flags - uint16_t, bit field with processing flags:
+ * bit 0: less data than specified allowed
+ * buffer_length - uint16_t, total length of data
+ * request type specific data - 8 bytes, see below
+ * seg[] - array with 8 byte elements, see below
+ *
+ * Request type specific data for isochronous request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | number_of_packets | nr_frame_desc_segs | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time interval in msecs between frames
+ * start_frame - uint16_t, start frame number
+ * number_of_packets - uint16_t, number of packets to transfer
+ * nr_frame_desc_segs - uint16_t number of seg[] frame descriptors elements
+ *
+ * Request type specific data for interrupt request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time in msecs until interruption
+ *
+ * Request type specific data for control request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | data of setup packet | 4
+ * | | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for bulk request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 4
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for unlink request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | unlink_id | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * unlink_id - uint16_t, request id of request to terminate
+ *
+ * seg[] array element layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | gref | 4
+ * +----------------+----------------+----------------+----------------+
+ * | offset | length | 8
+ * +----------------+----------------+----------------+----------------+
+ * gref - uint32_t, grant reference of buffer page
+ * offset - uint16_t, offset of buffer start in page
+ * length - uint16_t, length of buffer in page
+ *
+ *-------------------------- USB I/O response --------------------------------
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | actual_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | error_count | 16
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, id of the request this response belongs to
+ * start_frame - uint16_t, start_frame this response (iso requests only)
+ * status - int32_t, XENUSB_STATUS_* (non-iso requests)
+ * actual_length - uint32_t, actual size of data transferred
+ * error_count - uint32_t, number of errors (iso requests)
+ */
+
+enum xenusb_spec_version {
+ XENUSB_VER_UNKNOWN = 0,
+ XENUSB_VER_USB11,
+ XENUSB_VER_USB20,
+ XENUSB_VER_USB30, /* not supported yet */
+};
+
+/*
+ * USB pipe in xenusb_request
+ *
+ * - port number: bits 0-4
+ * (USB_MAXCHILDREN is 31)
+ *
+ * - operation flag: bit 5
+ * (0 = submit urb,
+ * 1 = unlink urb)
+ *
+ * - direction: bit 7
+ * (0 = Host-to-Device [Out]
+ * 1 = Device-to-Host [In])
+ *
+ * - device address: bits 8-14
+ *
+ * - endpoint: bits 15-18
+ *
+ * - pipe type: bits 30-31
+ * (00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk)
+ */
+
+#define XENUSB_PIPE_PORT_MASK 0x0000001f
+#define XENUSB_PIPE_UNLINK 0x00000020
+#define XENUSB_PIPE_DIR 0x00000080
+#define XENUSB_PIPE_DEV_MASK 0x0000007f
+#define XENUSB_PIPE_DEV_SHIFT 8
+#define XENUSB_PIPE_EP_MASK 0x0000000f
+#define XENUSB_PIPE_EP_SHIFT 15
+#define XENUSB_PIPE_TYPE_MASK 0x00000003
+#define XENUSB_PIPE_TYPE_SHIFT 30
+#define XENUSB_PIPE_TYPE_ISOC 0
+#define XENUSB_PIPE_TYPE_INT 1
+#define XENUSB_PIPE_TYPE_CTRL 2
+#define XENUSB_PIPE_TYPE_BULK 3
+
+#define xenusb_pipeportnum(pipe) ((pipe) & XENUSB_PIPE_PORT_MASK)
+#define xenusb_setportnum_pipe(pipe, portnum) ((pipe) | (portnum))
+
+#define xenusb_pipeunlink(pipe) ((pipe) & XENUSB_PIPE_UNLINK)
+#define xenusb_pipesubmit(pipe) (!xenusb_pipeunlink(pipe))
+#define xenusb_setunlink_pipe(pipe) ((pipe) | XENUSB_PIPE_UNLINK)
+
+#define xenusb_pipein(pipe) ((pipe) & XENUSB_PIPE_DIR)
+#define xenusb_pipeout(pipe) (!xenusb_pipein(pipe))
+
+#define xenusb_pipedevice(pipe) \
+ (((pipe) >> XENUSB_PIPE_DEV_SHIFT) & XENUSB_PIPE_DEV_MASK)
+
+#define xenusb_pipeendpoint(pipe) \
+ (((pipe) >> XENUSB_PIPE_EP_SHIFT) & XENUSB_PIPE_EP_MASK)
+
+#define xenusb_pipetype(pipe) \
+ (((pipe) >> XENUSB_PIPE_TYPE_SHIFT) & XENUSB_PIPE_TYPE_MASK)
+#define xenusb_pipeisoc(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_ISOC)
+#define xenusb_pipeint(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_INT)
+#define xenusb_pipectrl(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_CTRL)
+#define xenusb_pipebulk(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_BULK)
+
+#define XENUSB_MAX_SEGMENTS_PER_REQUEST (16)
+#define XENUSB_MAX_PORTNR 31
+#define XENUSB_RING_SIZE 4096
+
+/*
+ * RING for transferring urbs.
+ */
+struct xenusb_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+};
+
+struct xenusb_urb_request {
+ uint16_t id; /* request id */
+ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
+
+ /* basic urb parameter */
+ uint32_t pipe;
+ uint16_t transfer_flags;
+#define XENUSB_SHORT_NOT_OK 0x0001
+ uint16_t buffer_length;
+ union {
+ uint8_t ctrl[8]; /* setup_packet (Ctrl) */
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t start_frame; /* start frame */
+ uint16_t number_of_packets; /* number of ISO packet */
+ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */
+ } isoc;
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t pad[3];
+ } intr;
+
+ struct {
+ uint16_t unlink_id; /* unlink request id */
+ uint16_t pad[3];
+ } unlink;
+
+ } u;
+
+ /* urb data segments */
+ struct xenusb_request_segment seg[XENUSB_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct xenusb_urb_response {
+ uint16_t id; /* request id */
+ uint16_t start_frame; /* start frame (ISO) */
+ int32_t status; /* status (non-ISO) */
+#define XENUSB_STATUS_OK 0
+#define XENUSB_STATUS_NODEV (-19)
+#define XENUSB_STATUS_INVAL (-22)
+#define XENUSB_STATUS_STALL (-32)
+#define XENUSB_STATUS_IOERROR (-71)
+#define XENUSB_STATUS_BABBLE (-75)
+#define XENUSB_STATUS_SHUTDOWN (-108)
+ int32_t actual_length; /* actual transfer length */
+ int32_t error_count; /* number of ISO errors */
+};
+
+DEFINE_RING_TYPES(xenusb_urb, struct xenusb_urb_request, struct xenusb_urb_response);
+#define XENUSB_URB_RING_SIZE __CONST_RING_SIZE(xenusb_urb, XENUSB_RING_SIZE)
+
+/*
+ * RING for notifying connect/disconnect events to frontend
+ */
+struct xenusb_conn_request {
+ uint16_t id;
+};
+
+struct xenusb_conn_response {
+ uint16_t id; /* request id */
+ uint8_t portnum; /* port number */
+ uint8_t speed; /* usb_device_speed */
+#define XENUSB_SPEED_NONE 0
+#define XENUSB_SPEED_LOW 1
+#define XENUSB_SPEED_FULL 2
+#define XENUSB_SPEED_HIGH 3
+};
+
+DEFINE_RING_TYPES(xenusb_conn, struct xenusb_conn_request, struct xenusb_conn_response);
+#define XENUSB_CONN_RING_SIZE __CONST_RING_SIZE(xenusb_conn, XENUSB_RING_SIZE)
+
+#endif /* __XEN_PUBLIC_IO_USBIF_H__ */
diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
index d07d7aca8d1c..7ea4dc9611c4 100644
--- a/include/xen/interface/io/vscsiif.h
+++ b/include/xen/interface/io/vscsiif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* vscsiif.h
*
* Based on the blkif.h code.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright(c) FUJITSU Limited 2008.
*/
@@ -60,7 +43,7 @@
*
* A string specifying the backend device: either a 4-tuple "h:c:t:l"
* (host, controller, target, lun, all integers), or a WWN (e.g.
- * "naa.60014054ac780582").
+ * "naa.60014054ac780582:0").
*
* v-dev
* Values: string
@@ -104,6 +87,75 @@
* response structures.
*/
+/*
+ * Xenstore format in practice
+ * ===========================
+ *
+ * The backend driver uses a single_host:many_devices notation to manage domU
+ * devices. Everything is stored in /local/domain/<backend_domid>/backend/vscsi/.
+ * The xenstore layout looks like this (dom0 is assumed to be the backend_domid):
+ *
+ * <domid>/<vhost>/feature-host = "0"
+ * <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0"
+ * <domid>/<vhost>/frontend-id = "<domid>"
+ * <domid>/<vhost>/online = "1"
+ * <domid>/<vhost>/state = "4"
+ * <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1" or "naa.wwn:lun"
+ * <domid>/<vhost>/vscsi-devs/dev-0/state = "4"
+ * <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0"
+ * <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2"
+ * <domid>/<vhost>/vscsi-devs/dev-1/state = "4"
+ * <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0"
+ *
+ * The frontend driver maintains its state in
+ * /local/domain/<domid>/device/vscsi/.
+ *
+ * <vhost>/backend = "/local/domain/0/backend/vscsi/<domid>/<vhost>"
+ * <vhost>/backend-id = "0"
+ * <vhost>/event-channel = "20"
+ * <vhost>/ring-ref = "43"
+ * <vhost>/state = "4"
+ * <vhost>/vscsi-devs/dev-0/state = "4"
+ * <vhost>/vscsi-devs/dev-1/state = "4"
+ *
+ * In addition to the entries for backend and frontend these flags are stored
+ * for the toolstack:
+ *
+ * <domid>/<vhost>/vscsi-devs/dev-1/p-devname = "/dev/$device"
+ * <domid>/<vhost>/libxl_ctrl_index = "0"
+ *
+ *
+ * Backend/frontend protocol
+ * =========================
+ *
+ * To create a vhost along with a device:
+ * <domid>/<vhost>/feature-host = "0"
+ * <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0"
+ * <domid>/<vhost>/frontend-id = "<domid>"
+ * <domid>/<vhost>/online = "1"
+ * <domid>/<vhost>/state = "1"
+ * <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1"
+ * <domid>/<vhost>/vscsi-devs/dev-0/state = "1"
+ * <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0"
+ * Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-0/state become 4
+ *
+ * To add another device to a vhost:
+ * <domid>/<vhost>/state = "7"
+ * <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2"
+ * <domid>/<vhost>/vscsi-devs/dev-1/state = "1"
+ * <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0"
+ * Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-1/state become 4
+ *
+ * To remove a device from a vhost:
+ * <domid>/<vhost>/state = "7"
+ * <domid>/<vhost>/vscsi-devs/dev-1/state = "5"
+ * Wait for <domid>/<vhost>/state to become 4
+ * Wait for <domid>/<vhost>/vscsi-devs/dev-1/state become 6
+ * Remove <domid>/<vhost>/vscsi-devs/dev-1/{state,p-dev,v-dev,p-devname}
+ * Remove <domid>/<vhost>/vscsi-devs/dev-1/
+ *
+ */
+
/* Requests from the frontend to the backend */
/*
@@ -134,7 +186,8 @@
* (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment
* elements referencing the target data buffers is calculated from the lengths
* of the seg[] elements (the sum of all valid seg[].length divided by the
- * size of one scsiif_request_segment structure).
+ * size of one scsiif_request_segment structure). The frontend may use a mix of
+ * direct and indirect requests.
*/
#define VSCSIIF_ACT_SCSI_CDB 1
@@ -171,12 +224,14 @@
/*
* based on Linux kernel 2.6.18, still valid
+ *
* Changing these values requires support of multiple protocols via the rings
* as "old clients" will blindly use these values and the resulting structure
* sizes.
*/
#define VSCSIIF_MAX_COMMAND_SIZE 16
#define VSCSIIF_SENSE_BUFFERSIZE 96
+#define VSCSIIF_PAGE_SIZE 4096
struct scsiif_request_segment {
grant_ref_t gref;
@@ -184,7 +239,8 @@ struct scsiif_request_segment {
uint16_t length;
};
-#define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment))
+#define VSCSIIF_SG_PER_PAGE (VSCSIIF_PAGE_SIZE / \
+ sizeof(struct scsiif_request_segment))
/* Size of one request is 252 bytes */
struct vscsiif_request {
@@ -224,6 +280,58 @@ struct vscsiif_response {
uint32_t reserved[36];
};
+/* SCSI I/O status from vscsiif_response->rslt */
+#define XEN_VSCSIIF_RSLT_STATUS(x) ((x) & 0x00ff)
+
+/* Host I/O status from vscsiif_response->rslt */
+#define XEN_VSCSIIF_RSLT_HOST(x) (((x) & 0x00ff0000) >> 16)
+#define XEN_VSCSIIF_RSLT_HOST_OK 0
+/* Couldn't connect before timeout */
+#define XEN_VSCSIIF_RSLT_HOST_NO_CONNECT 1
+/* Bus busy through timeout */
+#define XEN_VSCSIIF_RSLT_HOST_BUS_BUSY 2
+/* Timed out for other reason */
+#define XEN_VSCSIIF_RSLT_HOST_TIME_OUT 3
+/* Bad target */
+#define XEN_VSCSIIF_RSLT_HOST_BAD_TARGET 4
+/* Abort for some other reason */
+#define XEN_VSCSIIF_RSLT_HOST_ABORT 5
+/* Parity error */
+#define XEN_VSCSIIF_RSLT_HOST_PARITY 6
+/* Internal error */
+#define XEN_VSCSIIF_RSLT_HOST_ERROR 7
+/* Reset by somebody */
+#define XEN_VSCSIIF_RSLT_HOST_RESET 8
+/* Unexpected interrupt */
+#define XEN_VSCSIIF_RSLT_HOST_BAD_INTR 9
+/* Force command past mid-layer */
+#define XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH 10
+/* Retry requested */
+#define XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR 11
+/* Hidden retry requested */
+#define XEN_VSCSIIF_RSLT_HOST_IMM_RETRY 12
+/* Requeue command requested */
+#define XEN_VSCSIIF_RSLT_HOST_REQUEUE 13
+/* Transport error disrupted I/O */
+#define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED 14
+/* Transport class fastfailed */
+#define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST 15
+/* Permanent target failure */
+#define XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE 16
+/* Permanent nexus failure on path */
+#define XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE 17
+/* Space allocation on device failed */
+#define XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE 18
+/* Medium error */
+#define XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR 19
+/* Transport marginal errors */
+#define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL 20
+
+/* Result values of reset operations */
+#define XEN_VSCSIIF_RSLT_RESET_SUCCESS 0x2002
+#define XEN_VSCSIIF_RSLT_RESET_FAILED 0x2003
+
DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
-#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
+
+#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
diff --git a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h
index aaf2951b1cce..44456e2853fc 100644
--- a/include/xen/interface/io/xenbus.h
+++ b/include/xen/interface/io/xenbus.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*****************************************************************************
* xenbus.h
*
@@ -39,13 +39,3 @@ enum xenbus_state
};
#endif /* _XEN_PUBLIC_IO_XENBUS_H */
-
-/*
- * Local variables:
- * c-file-style: "linux"
- * indent-tabs-mode: t
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index 1517c7e93a3a..b62365478ac0 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Details of the "wire" protocol between Xen Store Daemon and client
* library or guest kernel.
@@ -10,7 +10,8 @@
enum xsd_sockmsg_type
{
- XS_DEBUG,
+ XS_CONTROL,
+#define XS_DEBUG XS_CONTROL
XS_DIRECTORY,
XS_READ,
XS_GET_PERMS,
@@ -30,8 +31,13 @@ enum xsd_sockmsg_type
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
- XS_RESTRICT,
- XS_RESET_WATCHES,
+ /* XS_RESTRICT has been removed */
+ XS_RESET_WATCHES = XS_SET_TARGET + 2,
+ XS_DIRECTORY_PART,
+
+ XS_TYPE_COUNT, /* Number of valid types. */
+
+ XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
};
#define XS_WRITE_NONE "NONE"
@@ -59,7 +65,8 @@ static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
XSD_ERROR(EROFS),
XSD_ERROR(EBUSY),
XSD_ERROR(EAGAIN),
- XSD_ERROR(EISCONN)
+ XSD_ERROR(EISCONN),
+ XSD_ERROR(E2BIG)
};
struct xsd_sockmsg
@@ -87,9 +94,31 @@ struct xenstore_domain_interface {
char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
XENSTORE_RING_IDX req_cons, req_prod;
XENSTORE_RING_IDX rsp_cons, rsp_prod;
+ uint32_t server_features; /* Bitmap of features supported by the server */
+ uint32_t connection;
+ uint32_t error;
};
/* Violating this is very bad. See docs/misc/xenstore.txt. */
#define XENSTORE_PAYLOAD_MAX 4096
+/* Violating these just gets you an error back */
+#define XENSTORE_ABS_PATH_MAX 3072
+#define XENSTORE_REL_PATH_MAX 2048
+
+/* The ability to reconnect a ring */
+#define XENSTORE_SERVER_FEATURE_RECONNECTION 1
+/* The presence of the "error" field in the ring page */
+#define XENSTORE_SERVER_FEATURE_ERROR 2
+
+/* Valid values for the connection field */
+#define XENSTORE_CONNECTED 0 /* the steady-state */
+#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */
+
+/* Valid values for the error field */
+#define XENSTORE_ERROR_NONE 0 /* No error */
+#define XENSTORE_ERROR_COMM 1 /* Communication problem */
+#define XENSTORE_ERROR_RINGIDX 2 /* Invalid ring index */
+#define XENSTORE_ERROR_PROTO 3 /* Protocol violation (payload too long) */
+
#endif /* _XS_WIRE_H */
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 447004861f00..1a371a825c55 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* memory.h
*
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
index 73d9b0a2974e..b665fdbef1fd 100644
--- a/include/xen/interface/nmi.h
+++ b/include/xen/interface/nmi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* nmi.h
*
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 610dba9b620a..a237af867873 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -1,22 +1,4 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_PHYSDEV_H__
#define __XEN_PUBLIC_PHYSDEV_H__
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 732efb08c3e1..79a443c65ea9 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* platform.h
*
* Hardware platform operations. Intended for use by domain-0 kernel.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2002-2006, K Fraser
*/
@@ -500,6 +483,8 @@ struct xenpf_symdata {
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata);
+#define XENPF_get_dom0_console 64
+
struct xen_platform_op {
uint32_t cmd;
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -523,6 +508,7 @@ struct xen_platform_op {
struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking;
struct xenpf_symdata symdata;
+ struct dom0_vga_console_info dom0_console;
uint8_t pad[128];
} u;
};
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index a4c4d735d781..4dac0634ffff 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* sched.h
*
* Scheduler state interactions
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index 504c71601511..c7cc28ad8d63 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* vcpu.h
*
* VCPU initialisation, query, and hotplug.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h
index 8772b552c006..37d6588873d6 100644
--- a/include/xen/interface/version.h
+++ b/include/xen/interface/version.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* version.h
*
diff --git a/include/xen/interface/xen-mca.h b/include/xen/interface/xen-mca.h
index 7483a78d2425..464aa6b3a5f9 100644
--- a/include/xen/interface/xen-mca.h
+++ b/include/xen/interface/xen-mca.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* arch-x86/mca.h
* Guest OS machine check interface to x86 Xen.
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 8bfb242f433e..0ca23eca2a9c 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* xen.h
*
* Guest OS interface to Xen.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2004, K A Fraser
*/
@@ -598,7 +581,9 @@ struct shared_info {
* their gettimeofday() syscall on this wallclock-base value.
*/
struct pvclock_wall_clock wc;
-
+#ifndef CONFIG_X86_32
+ uint32_t wc_sec_hi;
+#endif
struct arch_shared_info arch;
};
@@ -737,6 +722,9 @@ struct dom0_vga_console_info {
uint32_t gbl_caps;
/* Mode attributes (offset 0x0, VESA command 0x4f01). */
uint16_t mode_attrs;
+ uint16_t pad;
+ /* high 32 bits of lfb_base */
+ uint32_t ext_lfb_base;
} vesa_lfb;
} u;
};
diff --git a/include/xen/interface/xenpmu.h b/include/xen/interface/xenpmu.h
index ad603eab24b3..e2ee73d91bd6 100644
--- a/include/xen/interface/xenpmu.h
+++ b/include/xen/interface/xenpmu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_XENPMU_H__
#define __XEN_PUBLIC_XENPMU_H__
diff --git a/include/xen/pci.h b/include/xen/pci.h
new file mode 100644
index 000000000000..b8337cf85fd1
--- /dev/null
+++ b/include/xen/pci.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __XEN_PCI_H__
+#define __XEN_PCI_H__
+
+#if defined(CONFIG_XEN_DOM0)
+int xen_find_device_domain_owner(struct pci_dev *dev);
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
+int xen_unregister_device_domain_owner(struct pci_dev *dev);
+#else
+static inline int xen_find_device_domain_owner(struct pci_dev *dev)
+{
+ return -1;
+}
+
+static inline int xen_register_device_domain_owner(struct pci_dev *dev,
+ uint16_t domain)
+{
+ return -1;
+}
+
+static inline int xen_unregister_device_domain_owner(struct pci_dev *dev)
+{
+ return -1;
+}
+#endif
+
+#endif
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index d5eaf9d682b8..808d17ad8d57 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -3,13 +3,13 @@
#define __LINUX_SWIOTLB_XEN_H
#include <linux/swiotlb.h>
+#include <asm/xen/swiotlb-xen.h>
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir);
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir);
-extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;
#endif /* __LINUX_SWIOTLB_XEN_H */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 39a5580f8feb..47f11bec5e90 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -5,6 +5,7 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/efi.h>
+#include <linux/virtio_anchor.h>
#include <xen/features.h>
#include <asm/xen/interface.h>
#include <xen/interface/vcpu.h>
@@ -42,34 +43,15 @@ int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
-#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
- unsigned int address_bits,
- dma_addr_t *dma_handle);
-
-void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-#else
-static inline int xen_create_contiguous_region(phys_addr_t pstart,
- unsigned int order,
- unsigned int address_bits,
- dma_addr_t *dma_handle)
-{
- return 0;
-}
-
-static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
- unsigned int order) { }
-#endif
-
#if defined(CONFIG_XEN_PV)
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages);
+ unsigned int domid, bool no_translate);
#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid,
- bool no_translate, struct page **pages)
+ bool no_translate)
{
BUG();
return 0;
@@ -146,7 +128,7 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
*/
BUG_ON(err_ptr == NULL);
return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
- false, pages);
+ false);
}
/*
@@ -158,7 +140,6 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
* @err_ptr: Returns per-MFN error status.
* @prot: page protection mask
* @domid: Domain owning the pages
- * @pages: Array of pages if this domain has an auto-translated physmap
*
* @mfn and @err_ptr may point to the same buffer, the MFNs will be
* overwritten by the error codes after they are mapped.
@@ -169,14 +150,13 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
unsigned long addr, xen_pfn_t *mfn,
int nr, int *err_ptr,
- pgprot_t prot, unsigned int domid,
- struct page **pages)
+ pgprot_t prot, unsigned int domid)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
- true, pages);
+ true);
}
/* xen_remap_domain_gfn_range() - map a range of foreign frames
@@ -200,8 +180,7 @@ static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
- return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
- pages);
+ return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
}
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
@@ -236,4 +215,15 @@ static inline void xen_preemptible_hcall_end(void) { }
#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
+#ifdef CONFIG_XEN_GRANT_DMA_OPS
+bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
+#else
+struct virtio_device;
+
+static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_XEN_GRANT_DMA_OPS */
+
#endif /* INCLUDE_XEN_OPS_H */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 19a72f591e2b..a1e5b3f18d69 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -2,6 +2,8 @@
#ifndef _XEN_XEN_H
#define _XEN_XEN_H
+#include <linux/types.h>
+
enum xen_domain_type {
XEN_NATIVE, /* running on bare hardware */
XEN_PV_DOMAIN, /* running in a PV domain */
@@ -25,12 +27,19 @@ extern bool xen_pvh;
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
#define xen_pvh_domain() (xen_pvh)
-#include <linux/types.h>
-
extern uint32_t xen_start_flags;
+#ifdef CONFIG_XEN_PV
+extern bool xen_pv_pci_possible;
+#else
+#define xen_pv_pci_possible 0
+#endif
+
#include <xen/interface/hvm/start_info.h>
extern struct hvm_start_info pvh_start_info;
+void xen_prepare_pvh(void);
+struct pt_regs;
+void xen_pv_evtchn_do_upcall(struct pt_regs *regs);
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
@@ -52,4 +61,34 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size;
#endif
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+#include <linux/ioport.h>
+int arch_xen_unpopulated_init(struct resource **res);
+#else
+#include <xen/balloon.h>
+static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages,
+ struct page **pages)
+{
+ return xen_alloc_ballooned_pages(nr_pages, pages);
+}
+static inline void xen_free_unpopulated_pages(unsigned int nr_pages,
+ struct page **pages)
+{
+ xen_free_ballooned_pages(nr_pages, pages);
+}
+#endif
+
+#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI) && defined(CONFIG_X86)
+bool __init xen_processor_present(uint32_t acpi_id);
+#else
+#include <linux/bug.h>
+static inline bool xen_processor_present(uint32_t acpi_id)
+{
+ BUG();
+ return false;
+}
+#endif
+
#endif /* _XEN_XEN_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 5a8315e6d8a6..ac22cf08c09f 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -51,7 +51,6 @@
#define XENBUS_MAX_RING_GRANT_ORDER 4
#define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER)
-#define INVALID_GRANT_HANDLE (~0U)
/* Register callback to watch this node. */
struct xenbus_watch
@@ -61,6 +60,15 @@ struct xenbus_watch
/* Path being watched. */
const char *node;
+ unsigned int nr_pending;
+
+ /*
+ * Called just before enqueing new event while a spinlock is held.
+ * The event will be discarded if this callback returns false.
+ */
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *path, const char *token);
+
/* Callback (executed in a process context with no locks held). */
void (*callback)(struct xenbus_watch *,
const char *path, const char *token);
@@ -79,12 +87,16 @@ struct xenbus_device {
struct completion down;
struct work_struct work;
struct semaphore reclaim_sem;
+
+ /* Event channel based statistics and settings. */
+ atomic_t event_channels;
+ atomic_t events;
+ atomic_t spurious_events;
+ atomic_t jiffies_eoi_delayed;
+ unsigned int spurious_threshold;
};
-static inline struct xenbus_device *to_xenbus_device(struct device *dev)
-{
- return container_of(dev, struct xenbus_device, dev);
-}
+#define to_xenbus_device(__dev) container_of_const(__dev, struct xenbus_device, dev)
struct xenbus_device_id
{
@@ -97,14 +109,15 @@ struct xenbus_driver {
const char *name; /* defaults to ids[0].devicetype */
const struct xenbus_device_id *ids;
bool allow_rebind; /* avoid setting xenstore closed during remove */
+ bool not_essential; /* is not mandatory for boot progress */
int (*probe)(struct xenbus_device *dev,
const struct xenbus_device_id *id);
void (*otherend_changed)(struct xenbus_device *dev,
enum xenbus_state backend_state);
- int (*remove)(struct xenbus_device *dev);
+ void (*remove)(struct xenbus_device *dev);
int (*suspend)(struct xenbus_device *dev);
int (*resume)(struct xenbus_device *dev);
- int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
+ int (*uevent)(const struct xenbus_device *, struct kobj_uevent_env *);
struct device_driver driver;
int (*read_otherend_details)(struct xenbus_device *dev);
int (*is_ready)(struct xenbus_device *dev);
@@ -183,8 +196,6 @@ void xs_suspend_cancel(void);
struct work_struct;
-void xenbus_probe(struct work_struct *);
-
#define XENBUS_IS_ERR_READ(str) ({ \
if (!IS_ERR(str) && strlen(str) == 0) { \
kfree(str); \
@@ -197,17 +208,23 @@ void xenbus_probe(struct work_struct *);
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *));
-__printf(4, 5)
+__printf(5, 6)
int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *),
const char *pathfmt, ...);
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
-int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
+int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned int nr_pages, grant_ref_t *grefs);
+void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
+ grant_ref_t *grefs);
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr);
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
index bbee8c6a349d..4dc45a51c044 100644
--- a/include/xen/xenbus_dev.h
+++ b/include/xen/xenbus_dev.h
@@ -1,6 +1,4 @@
/******************************************************************************
- * evtchn.h
- *
* Interface to /dev/xen/xenbus_backend.
*
* Copyright (c) 2011 Bastian Blank <waldi@debian.org>